summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2015-09-16 20:44:25 +0100
committerMark Brown <broonie@kernel.org>2015-09-16 20:44:25 +0100
commit92243b6fc8fcb16bf401b055f7a0ba79f70a4115 (patch)
tree08e9fa205efb9bfab23b6ea59fc0a3eaa3fed82b /drivers
parenta394d635193b641f2c86ead5ada5b115d57c51f8 (diff)
parent6ff33f3902c3b1c5d0db6b1e2c70b6d76fba357f (diff)
downloadlinux-92243b6fc8fcb16bf401b055f7a0ba79f70a4115.tar.gz
linux-92243b6fc8fcb16bf401b055f7a0ba79f70a4115.tar.bz2
linux-92243b6fc8fcb16bf401b055f7a0ba79f70a4115.zip
Merge tag 'v4.3-rc1' into spi-fix-doc
Linux 4.3-rc1
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/Makefile4
-rw-r--r--drivers/acpi/Kconfig20
-rw-r--r--drivers/acpi/Makefile8
-rw-r--r--drivers/acpi/ac.c4
-rw-r--r--drivers/acpi/acpi_apd.c1
-rw-r--r--drivers/acpi/acpi_ipmi.c4
-rw-r--r--drivers/acpi/acpi_lpss.c46
-rw-r--r--drivers/acpi/acpi_memhotplug.c5
-rw-r--r--drivers/acpi/acpi_pad.c4
-rw-r--r--drivers/acpi/acpi_pnp.c3
-rw-r--r--drivers/acpi/acpi_processor.c2
-rw-r--r--drivers/acpi/acpi_video.c4
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/acdebug.h26
-rw-r--r--drivers/acpi/acpica/acdispat.h8
-rw-r--r--drivers/acpi/acpica/acglobal.h20
-rw-r--r--drivers/acpi/acpica/acinterp.h22
-rw-r--r--drivers/acpi/acpica/aclocal.h28
-rw-r--r--drivers/acpi/acpica/acmacros.h9
-rw-r--r--drivers/acpi/acpica/acnamesp.h13
-rw-r--r--drivers/acpi/acpica/acobject.h1
-rw-r--r--drivers/acpi/acpica/acparser.h4
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h14
-rw-r--r--drivers/acpi/acpica/acutils.h25
-rw-r--r--drivers/acpi/acpica/dsargs.c4
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsdebug.c231
-rw-r--r--drivers/acpi/acpica/dsinit.c20
-rw-r--r--drivers/acpi/acpica/dsmethod.c35
-rw-r--r--drivers/acpi/acpica/dsopcode.c31
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/evregion.c22
-rw-r--r--drivers/acpi/acpica/exconfig.c8
-rw-r--r--drivers/acpi/acpica/excreate.c1
-rw-r--r--drivers/acpi/acpica/exdebug.c324
-rw-r--r--drivers/acpi/acpica/exdump.c5
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c16
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c15
-rw-r--r--drivers/acpi/acpica/nseval.c4
-rw-r--r--drivers/acpi/acpica/nsload.c16
-rw-r--r--drivers/acpi/acpica/nsnames.c275
-rw-r--r--drivers/acpi/acpica/nsparse.c42
-rw-r--r--drivers/acpi/acpica/nsutils.c19
-rw-r--r--drivers/acpi/acpica/nsxfname.c8
-rw-r--r--drivers/acpi/acpica/psargs.c26
-rw-r--r--drivers/acpi/acpica/psloop.c32
-rw-r--r--drivers/acpi/acpica/psobject.c17
-rw-r--r--drivers/acpi/acpica/psparse.c14
-rw-r--r--drivers/acpi/acpica/psutils.c8
-rw-r--r--drivers/acpi/acpica/psxface.c123
-rw-r--r--drivers/acpi/acpica/rscreate.c3
-rw-r--r--drivers/acpi/acpica/tbfadt.c6
-rw-r--r--drivers/acpi/acpica/tbfind.c15
-rw-r--r--drivers/acpi/acpica/tbinstal.c40
-rw-r--r--drivers/acpi/acpica/tbutils.c73
-rw-r--r--drivers/acpi/acpica/tbxfload.c93
-rw-r--r--drivers/acpi/acpica/utdebug.c31
-rw-r--r--drivers/acpi/acpica/utdelete.c3
-rw-r--r--drivers/acpi/acpica/utfileio.c2
-rw-r--r--drivers/acpi/acpica/utinit.c3
-rw-r--r--drivers/acpi/acpica/utmisc.c4
-rw-r--r--drivers/acpi/acpica/utnonansi.c380
-rw-r--r--drivers/acpi/acpica/utstring.c342
-rw-r--r--drivers/acpi/acpica/utxface.c12
-rw-r--r--drivers/acpi/acpica/utxfinit.c11
-rw-r--r--drivers/acpi/apei/apei-base.c4
-rw-r--r--drivers/acpi/apei/einj.c4
-rw-r--r--drivers/acpi/apei/erst-dbg.c4
-rw-r--r--drivers/acpi/apei/erst.c4
-rw-r--r--drivers/acpi/apei/ghes.c4
-rw-r--r--drivers/acpi/apei/hest.c4
-rw-r--r--drivers/acpi/battery.c4
-rw-r--r--drivers/acpi/blacklist.c4
-rw-r--r--drivers/acpi/bus.c408
-rw-r--r--drivers/acpi/button.c4
-rw-r--r--drivers/acpi/cm_sbs.c4
-rw-r--r--drivers/acpi/container.c4
-rw-r--r--drivers/acpi/debugfs.c2
-rw-r--r--drivers/acpi/device_pm.c14
-rw-r--r--drivers/acpi/device_sysfs.c521
-rw-r--r--drivers/acpi/dock.c4
-rw-r--r--drivers/acpi/ec.c86
-rw-r--r--drivers/acpi/fan.c4
-rw-r--r--drivers/acpi/hed.c4
-rw-r--r--drivers/acpi/internal.h16
-rw-r--r--drivers/acpi/nfit.c205
-rw-r--r--drivers/acpi/nfit.h35
-rw-r--r--drivers/acpi/numa.c4
-rw-r--r--drivers/acpi/osl.c57
-rw-r--r--drivers/acpi/pci_irq.c21
-rw-r--r--drivers/acpi/pci_link.c20
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/pci_slot.c4
-rw-r--r--drivers/acpi/power.c19
-rw-r--r--drivers/acpi/processor_driver.c92
-rw-r--r--drivers/acpi/processor_idle.c4
-rw-r--r--drivers/acpi/processor_perflib.c10
-rw-r--r--drivers/acpi/processor_thermal.c4
-rw-r--r--drivers/acpi/processor_throttling.c4
-rw-r--r--drivers/acpi/property.c5
-rw-r--r--drivers/acpi/resource.c190
-rw-r--r--drivers/acpi/sbs.c4
-rw-r--r--drivers/acpi/scan.c836
-rw-r--r--drivers/acpi/sysfs.c133
-rw-r--r--drivers/acpi/tables.c4
-rw-r--r--drivers/acpi/thermal.c16
-rw-r--r--drivers/acpi/utils.c4
-rw-r--r--drivers/acpi/video_detect.c16
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.c13
-rw-r--r--drivers/ata/ahci_brcmstb.c6
-rw-r--r--drivers/ata/ahci_platform.c9
-rw-r--r--drivers/ata/libata-core.c48
-rw-r--r--drivers/ata/libata-eh.c105
-rw-r--r--drivers/ata/libata-pmp.c7
-rw-r--r--drivers/ata/libata-scsi.c24
-rw-r--r--drivers/ata/libata-transport.c2
-rw-r--r--drivers/ata/libata.h6
-rw-r--r--drivers/ata/pata_arasan_cf.c15
-rw-r--r--drivers/ata/pata_jmicron.c12
-rw-r--r--drivers/ata/pata_rb532_cf.c3
-rw-r--r--drivers/ata/sata_rcar.c4
-rw-r--r--drivers/ata/sata_sx4.c16
-rw-r--r--drivers/auxdisplay/ks0108.c97
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/base.h3
-rw-r--r--drivers/base/core.c95
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/dd.c28
-rw-r--r--drivers/base/devres.c4
-rw-r--r--drivers/base/firmware_class.c18
-rw-r--r--drivers/base/node.c10
-rw-r--r--drivers/base/platform-msi.c282
-rw-r--r--drivers/base/platform.c8
-rw-r--r--drivers/base/power/clock_ops.c4
-rw-r--r--drivers/base/power/domain.c437
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/power/opp.c1035
-rw-r--r--drivers/base/power/power.h2
-rw-r--r--drivers/base/power/qos.c37
-rw-r--r--drivers/base/power/sysfs.c11
-rw-r--r--drivers/base/power/wakeirq.c12
-rw-r--r--drivers/base/power/wakeup.c31
-rw-r--r--drivers/base/property.c105
-rw-r--r--drivers/base/regmap/internal.h12
-rw-r--r--drivers/base/regmap/regcache-rbtree.c19
-rw-r--r--drivers/base/regmap/regcache.c2
-rw-r--r--drivers/base/regmap/regmap-ac97.c41
-rw-r--r--drivers/base/regmap/regmap-debugfs.c99
-rw-r--r--drivers/base/regmap/regmap-i2c.c90
-rw-r--r--drivers/base/regmap/regmap-irq.c4
-rw-r--r--drivers/base/regmap/regmap-mmio.c52
-rw-r--r--drivers/base/regmap/regmap-spi.c41
-rw-r--r--drivers/base/regmap/regmap-spmi.c78
-rw-r--r--drivers/base/regmap/regmap.c368
-rw-r--r--drivers/bcma/Kconfig2
-rw-r--r--drivers/bcma/bcma_private.h1
-rw-r--r--drivers/bcma/driver_gpio.c92
-rw-r--r--drivers/bcma/main.c36
-rw-r--r--drivers/block/aoe/aoeblk.c2
-rw-r--r--drivers/block/aoe/aoecmd.c10
-rw-r--r--drivers/block/aoe/aoedev.c2
-rw-r--r--drivers/block/brd.c23
-rw-r--r--drivers/block/drbd/drbd_actlog.c4
-rw-r--r--drivers/block/drbd/drbd_bitmap.c19
-rw-r--r--drivers/block/drbd/drbd_int.h12
-rw-r--r--drivers/block/drbd/drbd_main.c1
-rw-r--r--drivers/block/drbd/drbd_nl.c4
-rw-r--r--drivers/block/drbd/drbd_req.c47
-rw-r--r--drivers/block/drbd/drbd_worker.c44
-rw-r--r--drivers/block/floppy.c7
-rw-r--r--drivers/block/loop.c4
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c8
-rw-r--r--drivers/block/nbd.c364
-rw-r--r--drivers/block/null_blk.c20
-rw-r--r--drivers/block/nvme-core.c284
-rw-r--r--drivers/block/pktcdvd.c59
-rw-r--r--drivers/block/ps3vram.c5
-rw-r--r--drivers/block/rbd.c77
-rw-r--r--drivers/block/rsxx/dev.c11
-rw-r--r--drivers/block/skd_main.c2
-rw-r--r--drivers/block/umem.c6
-rw-r--r--drivers/block/virtio_blk.c6
-rw-r--r--drivers/block/xen-blkback/blkback.c8
-rw-r--r--drivers/block/xen-blkfront.c289
-rw-r--r--drivers/block/zram/zram_drv.c45
-rw-r--r--drivers/block/zram/zram_drv.h1
-rw-r--r--drivers/bluetooth/Kconfig18
-rw-r--r--drivers/bluetooth/Makefile2
-rw-r--r--drivers/bluetooth/bfusb.c2
-rw-r--r--drivers/bluetooth/bt3c_cs.c2
-rw-r--r--drivers/bluetooth/btbcm.c19
-rw-r--r--drivers/bluetooth/btintel.c82
-rw-r--r--drivers/bluetooth/btintel.h19
-rw-r--r--drivers/bluetooth/btmrvl_drv.h6
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c7
-rw-r--r--drivers/bluetooth/btqca.c392
-rw-r--r--drivers/bluetooth/btqca.h135
-rw-r--r--drivers/bluetooth/btusb.c101
-rw-r--r--drivers/bluetooth/dtl1_cs.c6
-rw-r--r--drivers/bluetooth/hci_bcm.c326
-rw-r--r--drivers/bluetooth/hci_h4.c9
-rw-r--r--drivers/bluetooth/hci_h5.c2
-rw-r--r--drivers/bluetooth/hci_intel.c856
-rw-r--r--drivers/bluetooth/hci_ldisc.c14
-rw-r--r--drivers/bluetooth/hci_qca.c969
-rw-r--r--drivers/bluetooth/hci_uart.h13
-rw-r--r--drivers/bus/mips_cdmm.c14
-rw-r--r--drivers/bus/vexpress-config.c2
-rw-r--r--drivers/char/agp/intel-gtt.c4
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c2
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c46
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c10
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c597
-rw-r--r--drivers/char/ipmi/ipmi_si_sm.h10
-rw-r--r--drivers/char/ipmi/ipmi_smic_sm.c2
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c14
-rw-r--r--drivers/char/misc.c17
-rw-r--r--drivers/char/nvram.c2
-rw-r--r--drivers/char/toshiba.c2
-rw-r--r--drivers/char/tpm/tpm-chip.c3
-rw-r--r--drivers/char/tpm/tpm_crb.c8
-rw-r--r--drivers/char/xillybus/xillybus_pcie.c10
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/clk/at91/clk-h32mx.c4
-rw-r--r--drivers/clk/at91/clk-main.c11
-rw-r--r--drivers/clk/at91/clk-master.c15
-rw-r--r--drivers/clk/at91/clk-peripheral.c6
-rw-r--r--drivers/clk/at91/clk-pll.c8
-rw-r--r--drivers/clk/at91/clk-programmable.c40
-rw-r--r--drivers/clk/at91/clk-slow.c16
-rw-r--r--drivers/clk/at91/clk-smd.c7
-rw-r--r--drivers/clk/at91/clk-system.c8
-rw-r--r--drivers/clk/at91/clk-usb.c47
-rw-r--r--drivers/clk/at91/clk-utmi.c8
-rw-r--r--drivers/clk/at91/pmc.c1
-rw-r--r--drivers/clk/at91/pmc.h124
-rw-r--r--drivers/clk/bcm/clk-iproc-asiu.c6
-rw-r--r--drivers/clk/bcm/clk-iproc-pll.c13
-rw-r--r--drivers/clk/bcm/clk-kona.c53
-rw-r--r--drivers/clk/berlin/berlin2-pll.c4
-rw-r--r--drivers/clk/clk-axi-clkgen.c1
-rw-r--r--drivers/clk/clk-bcm2835.c5
-rw-r--r--drivers/clk/clk-cdce706.c3
-rw-r--r--drivers/clk/clk-cdce925.c1
-rw-r--r--drivers/clk/clk-clps711x.c1
-rw-r--r--drivers/clk/clk-composite.c61
-rw-r--r--drivers/clk/clk-divider.c28
-rw-r--r--drivers/clk/clk-efm32gg.c1
-rw-r--r--drivers/clk/clk-fixed-factor.c5
-rw-r--r--drivers/clk/clk-fractional-divider.c8
-rw-r--r--drivers/clk/clk-gate.c4
-rw-r--r--drivers/clk/clk-gpio-gate.c207
-rw-r--r--drivers/clk/clk-gpio.c325
-rw-r--r--drivers/clk/clk-highbank.c1
-rw-r--r--drivers/clk/clk-moxart.c1
-rw-r--r--drivers/clk/clk-mux.c7
-rw-r--r--drivers/clk/clk-nomadik.c3
-rw-r--r--drivers/clk/clk-palmas.c1
-rw-r--r--drivers/clk/clk-rk808.c1
-rw-r--r--drivers/clk/clk-s2mps11.c32
-rw-r--r--drivers/clk/clk-si5351.c22
-rw-r--r--drivers/clk/clk-si570.c1
-rw-r--r--drivers/clk/clk-stm32f4.c7
-rw-r--r--drivers/clk/clk-twl6040.c13
-rw-r--r--drivers/clk/clk-u300.c2
-rw-r--r--drivers/clk/clk-wm831x.c1
-rw-r--r--drivers/clk/clk-xgene.c28
-rw-r--r--drivers/clk/clk.c346
-rw-r--r--drivers/clk/h8300/clk-div.c4
-rw-r--r--drivers/clk/h8300/clk-h8s2678.c20
-rw-r--r--drivers/clk/hisilicon/Kconfig2
-rw-r--r--drivers/clk/hisilicon/Makefile2
-rw-r--r--drivers/clk/hisilicon/clk-hi3620.c41
-rw-r--r--drivers/clk/hisilicon/clk-hi6220-stub.c276
-rw-r--r--drivers/clk/hisilicon/clk-hip04.c2
-rw-r--r--drivers/clk/hisilicon/clk.c14
-rw-r--r--drivers/clk/hisilicon/clkgate-separated.c2
-rw-r--r--drivers/clk/imx/Makefile1
-rw-r--r--drivers/clk/imx/clk-imx1.c1
-rw-r--r--drivers/clk/imx/clk-imx21.c1
-rw-r--r--drivers/clk/imx/clk-imx31.c3
-rw-r--r--drivers/clk/imx/clk-imx35.c6
-rw-r--r--drivers/clk/imx/clk-imx6q.c7
-rw-r--r--drivers/clk/imx/clk-imx6ul.c432
-rw-r--r--drivers/clk/imx/clk-pfd.c1
-rw-r--r--drivers/clk/imx/clk-pllv1.c1
-rw-r--r--drivers/clk/imx/clk-pllv3.c1
-rw-r--r--drivers/clk/ingenic/cgu.c1
-rw-r--r--drivers/clk/keystone/gate.c1
-rw-r--r--drivers/clk/keystone/pll.c4
-rw-r--r--drivers/clk/mediatek/clk-gate.h3
-rw-r--r--drivers/clk/mediatek/clk-mt8135.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8173.c51
-rw-r--r--drivers/clk/mediatek/clk-mtk.h9
-rw-r--r--drivers/clk/mediatek/clk-pll.c39
-rw-r--r--drivers/clk/meson/clk-cpu.c1
-rw-r--r--drivers/clk/meson/clkc.c1
-rw-r--r--drivers/clk/mmp/clk-apbc.c1
-rw-r--r--drivers/clk/mmp/clk-apmu.c1
-rw-r--r--drivers/clk/mmp/clk-gate.c3
-rw-r--r--drivers/clk/mmp/clk-mix.c71
-rw-r--r--drivers/clk/mmp/clk.c3
-rw-r--r--drivers/clk/mvebu/clk-cpu.c9
-rw-r--r--drivers/clk/mvebu/common.c2
-rw-r--r--drivers/clk/mxs/clk-div.c1
-rw-r--r--drivers/clk/mxs/clk-frac.c1
-rw-r--r--drivers/clk/mxs/clk-imx23.c3
-rw-r--r--drivers/clk/mxs/clk-imx28.c2
-rw-r--r--drivers/clk/mxs/clk-pll.c1
-rw-r--r--drivers/clk/mxs/clk-ref.c1
-rw-r--r--drivers/clk/mxs/clk.h3
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-cgu.c1
-rw-r--r--drivers/clk/pistachio/clk-pistachio.c19
-rw-r--r--drivers/clk/pistachio/clk-pll.c81
-rw-r--r--drivers/clk/pistachio/clk.c1
-rw-r--r--drivers/clk/pistachio/clk.h14
-rw-r--r--drivers/clk/pxa/clk-pxa25x.c2
-rw-r--r--drivers/clk/pxa/clk-pxa27x.c2
-rw-r--r--drivers/clk/pxa/clk-pxa3xx.c4
-rw-r--r--drivers/clk/qcom/clk-branch.c2
-rw-r--r--drivers/clk/qcom/clk-pll.c93
-rw-r--r--drivers/clk/qcom/clk-pll.h1
-rw-r--r--drivers/clk/qcom/clk-rcg.c63
-rw-r--r--drivers/clk/qcom/clk-rcg2.c106
-rw-r--r--drivers/clk/qcom/common.c5
-rw-r--r--drivers/clk/qcom/gcc-apq8084.c13
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c10
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c8
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c26
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c12
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c5
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c6
-rw-r--r--drivers/clk/qcom/lcc-msm8960.c8
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c20
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c27
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c16
-rw-r--r--drivers/clk/rockchip/Makefile2
-rw-r--r--drivers/clk/rockchip/clk-cpu.c1
-rw-r--r--drivers/clk/rockchip/clk-inverter.c116
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c9
-rw-r--r--drivers/clk/rockchip/clk-pll.c100
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c18
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c16
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c881
-rw-r--r--drivers/clk/rockchip/clk.c7
-rw-r--r--drivers/clk/rockchip/clk.h82
-rw-r--r--drivers/clk/samsung/clk-cpu.c7
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c3
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c2
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c34
-rw-r--r--drivers/clk/samsung/clk-exynos4.c54
-rw-r--r--drivers/clk/samsung/clk-exynos4415.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c33
-rw-r--r--drivers/clk/samsung/clk-exynos5260.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5410.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c3
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5440.c2
-rw-r--r--drivers/clk/samsung/clk-exynos7.c2
-rw-r--r--drivers/clk/samsung/clk-pll.c20
-rw-r--r--drivers/clk/samsung/clk-s3c2410-dclk.c6
-rw-r--r--drivers/clk/samsung/clk-s3c2410.c2
-rw-r--r--drivers/clk/samsung/clk-s3c2412.c2
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c2
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c3
-rw-r--r--drivers/clk/samsung/clk-s5pv210-audss.c2
-rw-r--r--drivers/clk/samsung/clk-s5pv210.c4
-rw-r--r--drivers/clk/samsung/clk.c4
-rw-r--r--drivers/clk/samsung/clk.h3
-rw-r--r--drivers/clk/shmobile/clk-div6.c8
-rw-r--r--drivers/clk/shmobile/clk-emev2.c6
-rw-r--r--drivers/clk/shmobile/clk-mstp.c87
-rw-r--r--drivers/clk/shmobile/clk-r8a73a4.c2
-rw-r--r--drivers/clk/shmobile/clk-r8a7740.c2
-rw-r--r--drivers/clk/shmobile/clk-r8a7778.c4
-rw-r--r--drivers/clk/shmobile/clk-r8a7779.c4
-rw-r--r--drivers/clk/shmobile/clk-rcar-gen2.c4
-rw-r--r--drivers/clk/shmobile/clk-rz.c3
-rw-r--r--drivers/clk/shmobile/clk-sh73a0.c2
-rw-r--r--drivers/clk/sirf/clk-atlas6.c1
-rw-r--r--drivers/clk/sirf/clk-atlas7.c25
-rw-r--r--drivers/clk/sirf/clk-common.c14
-rw-r--r--drivers/clk/sirf/clk-prima2.c1
-rw-r--r--drivers/clk/socfpga/clk-gate-a10.c3
-rw-r--r--drivers/clk/socfpga/clk-gate.c5
-rw-r--r--drivers/clk/socfpga/clk-periph-a10.c3
-rw-r--r--drivers/clk/socfpga/clk-periph.c23
-rw-r--r--drivers/clk/socfpga/clk-pll-a10.c1
-rw-r--r--drivers/clk/socfpga/clk-pll.c3
-rw-r--r--drivers/clk/socfpga/clk.h3
-rw-r--r--drivers/clk/spear/clk-aux-synth.c2
-rw-r--r--drivers/clk/spear/clk-frac-synth.c2
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c2
-rw-r--r--drivers/clk/spear/clk-vco-pll.c4
-rw-r--r--drivers/clk/spear/clk.c2
-rw-r--r--drivers/clk/spear/clk.h2
-rw-r--r--drivers/clk/spear/spear1310_clock.c3
-rw-r--r--drivers/clk/spear/spear1340_clock.c3
-rw-r--r--drivers/clk/spear/spear3xx_clock.c2
-rw-r--r--drivers/clk/spear/spear6xx_clock.c3
-rw-r--r--drivers/clk/st/clk-flexgen.c17
-rw-r--r--drivers/clk/st/clkgen-fsyn.c31
-rw-r--r--drivers/clk/st/clkgen-mux.c103
-rw-r--r--drivers/clk/st/clkgen-pll.c11
-rw-r--r--drivers/clk/sunxi/Makefile1
-rw-r--r--drivers/clk/sunxi/clk-a20-gmac.c4
-rw-r--r--drivers/clk/sunxi/clk-factors.c39
-rw-r--r--drivers/clk/sunxi/clk-mod0.c3
-rw-r--r--drivers/clk/sunxi/clk-simple-gates.c158
-rw-r--r--drivers/clk/sunxi/clk-sun6i-ar100.c36
-rw-r--r--drivers/clk/sunxi/clk-sun8i-mbus.c2
-rw-r--r--drivers/clk/sunxi/clk-sun9i-core.c2
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c3
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c228
-rw-r--r--drivers/clk/sunxi/clk-usb.c3
-rw-r--r--drivers/clk/tegra/Makefile3
-rw-r--r--drivers/clk/tegra/clk-dfll.c1757
-rw-r--r--drivers/clk/tegra/clk-dfll.h54
-rw-r--r--drivers/clk/tegra/clk-divider.c1
-rw-r--r--drivers/clk/tegra/clk-emc.c36
-rw-r--r--drivers/clk/tegra/clk-periph-gate.c1
-rw-r--r--drivers/clk/tegra/clk-periph.c1
-rw-r--r--drivers/clk/tegra/clk-pll-out.c1
-rw-r--r--drivers/clk/tegra/clk-pll.c20
-rw-r--r--drivers/clk/tegra/clk-super.c1
-rw-r--r--drivers/clk/tegra/clk-tegra-audio.c1
-rw-r--r--drivers/clk/tegra/clk-tegra-fixed.c1
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c1
-rw-r--r--drivers/clk/tegra/clk-tegra-pmc.c1
-rw-r--r--drivers/clk/tegra/clk-tegra-super-gen4.c5
-rw-r--r--drivers/clk/tegra/clk-tegra114.c2
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c166
-rw-r--r--drivers/clk/tegra/clk-tegra124.c83
-rw-r--r--drivers/clk/tegra/clk-tegra20.c1
-rw-r--r--drivers/clk/tegra/clk-tegra30.c1
-rw-r--r--drivers/clk/tegra/clk.c40
-rw-r--r--drivers/clk/tegra/clk.h3
-rw-r--r--drivers/clk/tegra/cvb.c140
-rw-r--r--drivers/clk/tegra/cvb.h67
-rw-r--r--drivers/clk/ti/Makefile19
-rw-r--r--drivers/clk/ti/apll.c11
-rw-r--r--drivers/clk/ti/autoidle.c115
-rw-r--r--drivers/clk/ti/clk-2xxx.c4
-rw-r--r--drivers/clk/ti/clk-33xx.c3
-rw-r--r--drivers/clk/ti/clk-3xxx-legacy.c1
-rw-r--r--drivers/clk/ti/clk-3xxx.c235
-rw-r--r--drivers/clk/ti/clk-43xx.c4
-rw-r--r--drivers/clk/ti/clk-44xx.c2
-rw-r--r--drivers/clk/ti/clk-54xx.c2
-rw-r--r--drivers/clk/ti/clk-7xx.c3
-rw-r--r--drivers/clk/ti/clk-814x.c33
-rw-r--r--drivers/clk/ti/clk-816x.c4
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c1
-rw-r--r--drivers/clk/ti/clk.c154
-rw-r--r--drivers/clk/ti/clkt_dflt.c316
-rw-r--r--drivers/clk/ti/clkt_dpll.c370
-rw-r--r--drivers/clk/ti/clkt_iclk.c101
-rw-r--r--drivers/clk/ti/clock.h105
-rw-r--r--drivers/clk/ti/clockdomain.c83
-rw-r--r--drivers/clk/ti/composite.c4
-rw-r--r--drivers/clk/ti/divider.c8
-rw-r--r--drivers/clk/ti/dpll.c9
-rw-r--r--drivers/clk/ti/dpll3xxx.c817
-rw-r--r--drivers/clk/ti/dpll44xx.c227
-rw-r--r--drivers/clk/ti/fapll.c4
-rw-r--r--drivers/clk/ti/fixed-factor.c2
-rw-r--r--drivers/clk/ti/gate.c6
-rw-r--r--drivers/clk/ti/interface.c2
-rw-r--r--drivers/clk/ti/mux.c6
-rw-r--r--drivers/clk/ux500/Makefile1
-rw-r--r--drivers/clk/ux500/abx500-clk.c1
-rw-r--r--drivers/clk/ux500/clk-prcmu.c16
-rw-r--r--drivers/clk/ux500/clk-sysctrl.c2
-rw-r--r--drivers/clk/ux500/clk.h3
-rw-r--r--drivers/clk/ux500/u8500_clk.c526
-rw-r--r--drivers/clk/ux500/u8500_of_clk.c165
-rw-r--r--drivers/clk/ux500/u8540_clk.c198
-rw-r--r--drivers/clk/ux500/u9540_clk.c5
-rw-r--r--drivers/clk/versatile/clk-icst.c5
-rw-r--r--drivers/clk/versatile/clk-impd1.c1
-rw-r--r--drivers/clk/versatile/clk-realview.c5
-rw-r--r--drivers/clk/versatile/clk-sp810.c83
-rw-r--r--drivers/clk/versatile/clk-versatile.c4
-rw-r--r--drivers/clk/zte/Makefile2
-rw-r--r--drivers/clk/zte/clk-zx296702.c126
-rw-r--r--drivers/clk/zte/clk.c (renamed from drivers/clk/zte/clk-pll.c)141
-rw-r--r--drivers/clk/zte/clk.h9
-rw-r--r--drivers/clk/zynq/Makefile2
-rw-r--r--drivers/clk/zynq/clkc.c1
-rw-r--r--drivers/clocksource/Kconfig14
-rw-r--r--drivers/clocksource/Makefile2
-rw-r--r--drivers/clocksource/arm_arch_timer.c52
-rw-r--r--drivers/clocksource/arm_global_timer.c37
-rw-r--r--drivers/clocksource/asm9260_timer.c64
-rw-r--r--drivers/clocksource/bcm2835_timer.c16
-rw-r--r--drivers/clocksource/bcm_kona_timer.c17
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c60
-rw-r--r--drivers/clocksource/clksrc_st_lpc.c131
-rw-r--r--drivers/clocksource/clps711x-timer.c6
-rw-r--r--drivers/clocksource/cs5535-clockevt.c24
-rw-r--r--drivers/clocksource/dummy_timer.c10
-rw-r--r--drivers/clocksource/dw_apb_timer.c146
-rw-r--r--drivers/clocksource/em_sti.c39
-rw-r--r--drivers/clocksource/exynos_mct.c101
-rw-r--r--drivers/clocksource/fsl_ftm_timer.c35
-rw-r--r--drivers/clocksource/h8300_timer8.c51
-rw-r--r--drivers/clocksource/i8253.c77
-rw-r--r--drivers/clocksource/meson6_timer.c50
-rw-r--r--drivers/clocksource/metag_generic.c20
-rw-r--r--drivers/clocksource/mips-gic-timer.c72
-rw-r--r--drivers/clocksource/moxart_timer.c49
-rw-r--r--drivers/clocksource/mtk_timer.c32
-rw-r--r--drivers/clocksource/mxs_timer.c80
-rw-r--r--drivers/clocksource/nomadik-mtu.c58
-rw-r--r--drivers/clocksource/pxa_timer.c39
-rw-r--r--drivers/clocksource/qcom-timer.c24
-rw-r--r--drivers/clocksource/rockchip_timer.c32
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c41
-rw-r--r--drivers/clocksource/sh_cmt.c71
-rw-r--r--drivers/clocksource/sh_mtu2.c42
-rw-r--r--drivers/clocksource/sh_tmu.c64
-rw-r--r--drivers/clocksource/sun4i_timer.c41
-rw-r--r--drivers/clocksource/tcb_clksrc.c93
-rw-r--r--drivers/clocksource/tegra20_timer.c45
-rw-r--r--drivers/clocksource/time-armada-370-xp.c53
-rw-r--r--drivers/clocksource/time-efm32.c66
-rw-r--r--drivers/clocksource/time-orion.c46
-rw-r--r--drivers/clocksource/time-pistachio.c217
-rw-r--r--drivers/clocksource/timer-atlas7.c19
-rw-r--r--drivers/clocksource/timer-atmel-pit.c45
-rw-r--r--drivers/clocksource/timer-atmel-st.c69
-rw-r--r--drivers/clocksource/timer-digicolor.c41
-rw-r--r--drivers/clocksource/timer-imx-gpt.c77
-rw-r--r--drivers/clocksource/timer-integrator-ap.c58
-rw-r--r--drivers/clocksource/timer-keystone.c44
-rw-r--r--drivers/clocksource/timer-prima2.c34
-rw-r--r--drivers/clocksource/timer-sp804.c54
-rw-r--r--drivers/clocksource/timer-stm32.c30
-rw-r--r--drivers/clocksource/timer-sun5i.c45
-rw-r--r--drivers/clocksource/timer-u300.c155
-rw-r--r--drivers/clocksource/vf_pit_timer.c27
-rw-r--r--drivers/clocksource/vt8500_timer.c29
-rw-r--r--drivers/clocksource/zevio-timer.c44
-rw-r--r--drivers/cpufreq/Kconfig.arm70
-rw-r--r--drivers/cpufreq/Makefile8
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c93
-rw-r--r--drivers/cpufreq/cpufreq-dt.c88
-rw-r--r--drivers/cpufreq/cpufreq.c506
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c25
-rw-r--r--drivers/cpufreq/cpufreq_governor.c196
-rw-r--r--drivers/cpufreq/cpufreq_governor.h40
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c67
-rw-r--r--drivers/cpufreq/cpufreq_opp.c4
-rw-r--r--drivers/cpufreq/e_powersaver.c2
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c237
-rw-r--r--drivers/cpufreq/exynos-cpufreq.h89
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c236
-rw-r--r--drivers/cpufreq/exynos5250-cpufreq.c210
-rw-r--r--drivers/cpufreq/freq_table.c24
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c20
-rw-r--r--drivers/cpufreq/integrator-cpufreq.c18
-rw-r--r--drivers/cpufreq/intel_pstate.c61
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c4
-rw-r--r--drivers/cpufreq/mt8173-cpufreq.c527
-rw-r--r--drivers/cpufreq/powernow-k7.c4
-rw-r--r--drivers/cpufreq/powernow-k8.c5
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c199
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c4
-rw-r--r--drivers/cpufreq/sfi-cpufreq.c4
-rw-r--r--drivers/cpufreq/speedstep-lib.c9
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c214
-rw-r--r--drivers/cpufreq/tegra20-cpufreq.c (renamed from drivers/cpufreq/tegra-cpufreq.c)0
-rw-r--r--drivers/cpuidle/coupled.c30
-rw-r--r--drivers/cpuidle/cpuidle-calxeda.c15
-rw-r--r--drivers/cpuidle/cpuidle.c17
-rw-r--r--drivers/cpuidle/cpuidle.h13
-rw-r--r--drivers/cpuidle/driver.c4
-rw-r--r--drivers/crypto/Kconfig17
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c2
-rw-r--r--drivers/crypto/bfin_crc.c3
-rw-r--r--drivers/crypto/caam/Kconfig10
-rw-r--r--drivers/crypto/caam/caamalg.c2701
-rw-r--r--drivers/crypto/caam/caamhash.c76
-rw-r--r--drivers/crypto/caam/caamrng.c26
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/ctrl.c154
-rw-r--r--drivers/crypto/caam/desc.h23
-rw-r--r--drivers/crypto/caam/desc_constr.h2
-rw-r--r--drivers/crypto/caam/intern.h5
-rw-r--r--drivers/crypto/caam/jr.c30
-rw-r--r--drivers/crypto/caam/regs.h64
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h25
-rw-r--r--drivers/crypto/ccp/ccp-platform.c2
-rw-r--r--drivers/crypto/img-hash.c2
-rw-r--r--drivers/crypto/ixp4xx_crypto.c313
-rw-r--r--drivers/crypto/marvell/cesa.c1
-rw-r--r--drivers/crypto/nx/Kconfig17
-rw-r--r--drivers/crypto/nx/Makefile8
-rw-r--r--drivers/crypto/nx/nx-842-crypto.c580
-rw-r--r--drivers/crypto/nx/nx-842-platform.c84
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c42
-rw-r--r--drivers/crypto/nx/nx-842-pseries.c139
-rw-r--r--drivers/crypto/nx/nx-842.c554
-rw-r--r--drivers/crypto/nx/nx-842.h65
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c157
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c28
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c81
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c70
-rw-r--r--drivers/crypto/nx/nx-sha256.c70
-rw-r--r--drivers/crypto/nx/nx-sha512.c72
-rw-r--r--drivers/crypto/nx/nx.c29
-rw-r--r--drivers/crypto/nx/nx.h23
-rw-r--r--drivers/crypto/omap-aes.c86
-rw-r--r--drivers/crypto/omap-des.c3
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/picoxcell_crypto.c677
-rw-r--r--drivers/crypto/qat/Kconfig15
-rw-r--r--drivers/crypto/qat/Makefile1
-rw-r--r--drivers/crypto/qat/qat_common/.gitignore1
-rw-r--r--drivers/crypto/qat/qat_common/Makefile8
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h46
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_engine.c42
-rw-r--r--drivers/crypto/qat/qat_common/adf_admin.c290
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c5
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c9
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_common.h3
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h53
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_dev_mgr.c286
-rw-r--r--drivers/crypto/qat/qat_common/adf_hw_arbiter.c (renamed from drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c)37
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c104
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.c438
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.h146
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c309
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c13
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_access_macros.h5
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_debug.c16
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw.h2
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_pke.h112
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c344
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c652
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c26
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.h2
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c14
-rw-r--r--drivers/crypto/qat/qat_common/qat_rsakey.asn15
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c27
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/Makefile5
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_admin.c145
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c38
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h12
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c97
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.h9
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_isr.c139
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/Makefile5
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c172
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h68
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c393
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.h (renamed from drivers/crypto/qat/qat_dh895xcc/qat_admin.c)70
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_isr.c258
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/sahara.c48
-rw-r--r--drivers/crypto/sunxi-ss/Makefile2
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c542
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c425
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-hash.c492
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss.h201
-rw-r--r--drivers/crypto/talitos.c620
-rw-r--r--drivers/crypto/talitos.h8
-rw-r--r--drivers/crypto/vmx/aes.c3
-rw-r--r--drivers/crypto/vmx/aes_cbc.c3
-rw-r--r--drivers/crypto/vmx/aes_ctr.c11
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.pl34
-rw-r--r--drivers/crypto/vmx/ghash.c4
-rw-r--r--drivers/crypto/vmx/ghashp8-ppc.pl6
-rw-r--r--drivers/crypto/vmx/ppc-xlate.pl1
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c170
-rw-r--r--drivers/devfreq/event/exynos-ppmu.h70
-rw-r--r--drivers/dma/Kconfig601
-rw-r--r--drivers/dma/Makefile87
-rw-r--r--drivers/dma/amba-pl08x.c192
-rw-r--r--drivers/dma/at_hdmac.c259
-rw-r--r--drivers/dma/at_hdmac_regs.h9
-rw-r--r--drivers/dma/at_xdmac.c209
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/dma-axi-dmac.c691
-rw-r--r--drivers/dma/dma-jz4780.c124
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/dw/Kconfig6
-rw-r--r--drivers/dma/dw/core.c2
-rw-r--r--drivers/dma/edma.c2
-rw-r--r--drivers/dma/hsu/hsu.c39
-rw-r--r--drivers/dma/hsu/hsu.h1
-rw-r--r--drivers/dma/idma64.c710
-rw-r--r--drivers/dma/idma64.h233
-rw-r--r--drivers/dma/imx-dma.c25
-rw-r--r--drivers/dma/imx-sdma.c254
-rw-r--r--drivers/dma/ioat/Makefile2
-rw-r--r--drivers/dma/ioat/dca.c374
-rw-r--r--drivers/dma/ioat/dma.c1655
-rw-r--r--drivers/dma/ioat/dma.h353
-rw-r--r--drivers/dma/ioat/dma_v2.c916
-rw-r--r--drivers/dma/ioat/dma_v2.h175
-rw-r--r--drivers/dma/ioat/dma_v3.c1717
-rw-r--r--drivers/dma/ioat/hw.h16
-rw-r--r--drivers/dma/ioat/init.c1314
-rw-r--r--drivers/dma/ioat/pci.c258
-rw-r--r--drivers/dma/ioat/prep.c715
-rw-r--r--drivers/dma/ioat/sysfs.c135
-rw-r--r--drivers/dma/iop-adma.c9
-rw-r--r--drivers/dma/ipu/ipu_irq.c64
-rw-r--r--drivers/dma/k3dma.c3
-rw-r--r--drivers/dma/lpc18xx-dmamux.c183
-rw-r--r--drivers/dma/mic_x100_dma.h2
-rw-r--r--drivers/dma/mmp_pdma.c3
-rw-r--r--drivers/dma/mmp_tdma.c3
-rw-r--r--drivers/dma/mv_xor.c78
-rw-r--r--drivers/dma/pch_dma.c4
-rw-r--r--drivers/dma/pl330.c6
-rw-r--r--drivers/dma/pxa_dma.c15
-rw-r--r--drivers/dma/sh/Kconfig24
-rw-r--r--drivers/dma/sh/Makefile4
-rw-r--r--drivers/dma/sirf-dma.c3
-rw-r--r--drivers/dma/ste_dma40.c2
-rw-r--r--drivers/dma/sun4i-dma.c1288
-rw-r--r--drivers/dma/sun6i-dma.c2
-rw-r--r--drivers/dma/tegra20-apb-dma.c63
-rw-r--r--drivers/dma/ti-dma-crossbar.c41
-rw-r--r--drivers/dma/timb_dma.c4
-rw-r--r--drivers/dma/virt-dma.c19
-rw-r--r--drivers/dma/virt-dma.h13
-rw-r--r--drivers/dma/xgene-dma.c69
-rw-r--r--drivers/dma/zx296702_dma.c951
-rw-r--r--drivers/edac/Kconfig10
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/mce_amd.c3
-rw-r--r--drivers/edac/mce_amd_inj.c375
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/edac/sb_edac.c72
-rw-r--r--drivers/edac/xgene_edac.c1
-rw-r--r--drivers/extcon/extcon-arizona.c101
-rw-r--r--drivers/extcon/extcon-gpio.c18
-rw-r--r--drivers/extcon/extcon-max77693.c94
-rw-r--r--drivers/extcon/extcon-max77843.c75
-rw-r--r--drivers/extcon/extcon-palmas.c129
-rw-r--r--drivers/extcon/extcon-rt8973a.c1
-rw-r--r--drivers/extcon/extcon-sm5502.c1
-rw-r--r--drivers/extcon/extcon-usb-gpio.c1
-rw-r--r--drivers/extcon/extcon.c101
-rw-r--r--drivers/firmware/Kconfig3
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/broadcom/bcm47xx_nvram.c2
-rw-r--r--drivers/firmware/efi/Kconfig2
-rw-r--r--drivers/firmware/efi/cper.c15
-rw-r--r--drivers/firmware/efi/efi.c5
-rw-r--r--drivers/firmware/psci.c382
-rw-r--r--drivers/firmware/qcom_scm-32.c4
-rw-r--r--drivers/gpio/Kconfig10
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/devres.c18
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c2
-rw-r--r--drivers/gpio/gpio-adp5588.c10
-rw-r--r--drivers/gpio/gpio-altera.c4
-rw-r--r--drivers/gpio/gpio-ath79.c204
-rw-r--r--drivers/gpio/gpio-bcm-kona.c17
-rw-r--r--drivers/gpio/gpio-brcmstb.c320
-rw-r--r--drivers/gpio/gpio-davinci.c22
-rw-r--r--drivers/gpio/gpio-dwapb.c2
-rw-r--r--drivers/gpio/gpio-em.c35
-rw-r--r--drivers/gpio/gpio-ep93xx.c8
-rw-r--r--drivers/gpio/gpio-etraxfs.c329
-rw-r--r--drivers/gpio/gpio-generic.c41
-rw-r--r--drivers/gpio/gpio-grgpio.c23
-rw-r--r--drivers/gpio/gpio-max732x.c12
-rw-r--r--drivers/gpio/gpio-mcp23s08.c4
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c121
-rw-r--r--drivers/gpio/gpio-msm-v2.c23
-rw-r--r--drivers/gpio/gpio-mvebu.c8
-rw-r--r--drivers/gpio/gpio-mxc.c9
-rw-r--r--drivers/gpio/gpio-mxs.c2
-rw-r--r--drivers/gpio/gpio-omap.c159
-rw-r--r--drivers/gpio/gpio-pca953x.c4
-rw-r--r--drivers/gpio/gpio-pcf857x.c14
-rw-r--r--drivers/gpio/gpio-pch.c4
-rw-r--r--drivers/gpio/gpio-pxa.c8
-rw-r--r--drivers/gpio/gpio-rcar.c24
-rw-r--r--drivers/gpio/gpio-sa1100.c6
-rw-r--r--drivers/gpio/gpio-sta2x11.c2
-rw-r--r--drivers/gpio/gpio-tc3589x.c10
-rw-r--r--drivers/gpio/gpio-tegra.c9
-rw-r--r--drivers/gpio/gpio-timberdale.c12
-rw-r--r--drivers/gpio/gpio-tz1090.c4
-rw-r--r--drivers/gpio/gpio-vf610.c11
-rw-r--r--drivers/gpio/gpio-xilinx.c4
-rw-r--r--drivers/gpio/gpio-xlp.c2
-rw-r--r--drivers/gpio/gpio-zx.c324
-rw-r--r--drivers/gpio/gpio-zynq.c13
-rw-r--r--drivers/gpio/gpiolib-acpi.c2
-rw-r--r--drivers/gpio/gpiolib-of.c34
-rw-r--r--drivers/gpio/gpiolib.c97
-rw-r--r--drivers/gpu/drm/Kconfig28
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h229
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c269
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c670
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c543
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c838
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c282
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c213
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c154
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c580
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c195
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c187
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c305
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_dpm.c181
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h182
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smc.c857
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c245
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c133
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smc.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi_dpm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_regs.h11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c103
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c249
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c99
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h398
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h1
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h39
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h1246
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h1282
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h6080
-rw-r--r--drivers/gpu/drm/amd/include/atom-bits.h (renamed from drivers/gpu/drm/amd/amdgpu/atom-bits.h)0
-rw-r--r--drivers/gpu/drm/amd/include/atom-names.h (renamed from drivers/gpu/drm/amd/amdgpu/atom-names.h)0
-rw-r--r--drivers/gpu/drm/amd/include/atom-types.h (renamed from drivers/gpu/drm/amd/amdgpu/atom-types.h)0
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h (renamed from drivers/gpu/drm/amd/amdgpu/atombios.h)0
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h624
-rw-r--r--drivers/gpu/drm/amd/include/cgs_linux.h135
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h3
-rw-r--r--drivers/gpu/drm/amd/include/pptable.h (renamed from drivers/gpu/drm/amd/amdgpu/pptable.h)6
-rw-r--r--drivers/gpu/drm/amd/include/vi_structs.h417
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c424
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h134
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c81
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c2
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c33
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c5
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c121
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c48
-rw-r--r--drivers/gpu/drm/ast/ast_main.c16
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c7
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c230
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c4
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c4
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c36
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c16
-rw-r--r--drivers/gpu/drm/bridge/Kconfig24
-rw-r--r--drivers/gpu/drm/bridge/Makefile4
-rw-r--r--drivers/gpu/drm/bridge/dw_hdmi.c387
-rw-r--r--drivers/gpu/drm/bridge/dw_hdmi.h8
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c (renamed from drivers/gpu/drm/bridge/ptn3460.c)0
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c (renamed from drivers/gpu/drm/bridge/ps8622.c)0
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c41
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c15
-rw-r--r--drivers/gpu/drm/drm_atomic.c97
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c139
-rw-r--r--drivers/gpu/drm/drm_context.c51
-rw-r--r--drivers/gpu/drm/drm_crtc.c239
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c75
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c99
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c30
-rw-r--r--drivers/gpu/drm/drm_drv.c19
-rw-r--r--drivers/gpu/drm/drm_edid.c4
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c63
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c379
-rw-r--r--drivers/gpu/drm/drm_gem.c13
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c10
-rw-r--r--drivers/gpu/drm/drm_ioc32.c115
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/drm_irq.c334
-rw-r--r--drivers/gpu/drm/drm_legacy.h2
-rw-r--r--drivers/gpu/drm/drm_lock.c6
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c59
-rw-r--r--drivers/gpu/drm/drm_of.c2
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c23
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c45
-rw-r--r--drivers/gpu/drm/exynos/Kconfig3
-rw-r--r--drivers/gpu/drm/exynos/Makefile7
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c113
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c147
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c123
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c186
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c86
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c286
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.h20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c111
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c224
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h134
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c138
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c174
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c164
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c129
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c182
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c154
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c445
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h58
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c56
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c124
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c1021
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c236
-rw-r--r--drivers/gpu/drm/fsl-dcu/Kconfig18
-rw-r--r--drivers/gpu/drm/fsl-dcu/Makefile7
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c210
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h19
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c404
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h197
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c23
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c43
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h33
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c261
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h17
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c182
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c6
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c48
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c4
-rw-r--r--drivers/gpu/drm/i915/Kconfig24
-rw-r--r--drivers/gpu/drm/i915/Makefile21
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c63
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c342
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c42
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c91
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h324
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c898
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c102
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c162
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c787
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c775
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h64
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c70
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c307
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c308
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c29
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c5
-rw-r--r--drivers/gpu/drm/i915/i915_guc_reg.h102
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c140
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c560
-rw-r--r--drivers/gpu/drm/i915/i915_params.c24
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h181
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c22
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h18
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c249
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c41
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c30
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c232
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h29
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c49
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c24
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c1117
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4293
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c530
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c57
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h163
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c60
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h3
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c97
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c46
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c540
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c110
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c117
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h245
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c446
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c508
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c895
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h21
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c74
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c335
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.h57
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c104
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c63
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c94
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c839
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c81
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c408
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h95
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c115
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c47
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c205
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c100
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c5
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c21
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c22
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c41
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c25
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c221
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c8
-rw-r--r--drivers/gpu/drm/msm/Kconfig15
-rw-r--r--drivers/gpu/drm/msm/Makefile15
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h18
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h33
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h206
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h18
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h18
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c58
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h43
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h211
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c92
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h44
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c270
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c216
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h26
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c (renamed from drivers/gpu/drm/msm/dsi/dsi_phy.c)413
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h89
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c150
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c166
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c42
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.h9
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c33
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h26
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h22
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c17
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c79
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h32
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h28
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c16
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c101
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c1437
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c52
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c32
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c57
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h22
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c19
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c38
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h24
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h180
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c180
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c139
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c243
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h43
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c18
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c19
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c93
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h59
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c363
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c113
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h28
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_format.c46
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h20
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c8
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c95
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h23
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c34
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c8
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c45
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c23
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c29
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.h26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c15
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c16
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c30
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h199
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/client.h27
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/device.h73
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ioctl.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/notify.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h70
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/os.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h65
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/debug.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h274
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/engine.h81
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/enum.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/handle.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/memory.h53
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/mm.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h53
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/object.h261
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h22
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/option.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/parent.h58
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/pci.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/printk.h29
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h139
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/device.h30
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h39
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h26
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h75
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h160
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h118
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h63
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h50
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h29
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h15
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h24
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h44
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h70
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h43
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h139
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h26
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h151
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h30
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h54
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h37
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h78
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h30
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h106
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h83
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h30
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h48
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c221
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.c195
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c44
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c84
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c123
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c151
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h33
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c58
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_nvif.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c211
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c75
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c199
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c68
-rw-r--r--drivers/gpu/drm/nouveau/nvif/device.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvif/notify.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c200
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/Kbuild7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/client.c188
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/engctx.c239
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/engine.c154
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/enum.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c379
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/handle.c221
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ioctl.c395
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/memory.c64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/mm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/namedb.c199
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/object.c400
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/oproxy.c200
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/option.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/parent.c159
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/printk.c103
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ramht.c144
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c208
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c79
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c180
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c174
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c167
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c144
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c2923
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c358
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c326
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c190
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c89
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c204
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c131
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c153
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c427
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c478
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c1686
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c295
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c371
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild86
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c325
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c114
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c123
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c301
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h127
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c118
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h61
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c117
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c244
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c132
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c242
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c)26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c)27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c68
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c247
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h91
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c86
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c275
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c139
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c1310
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c536
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c265
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c109
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c147
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c105
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c)34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c)41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c186
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c1667
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h231
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c)29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c68
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c127
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c202
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c101
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c103
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c111
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c81
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c165
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h78
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c)59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c171
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c139
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c399
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c95
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c)83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c74
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c138
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c157
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c)96
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c131
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c133
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c156
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c176
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c165
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c163
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c195
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/falcon.c292
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c345
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c415
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c285
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c270
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c220
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c96
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c243
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c91
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c481
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c924
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c1037
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h89
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c94
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c293
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c323
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c92
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c638
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h170
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c153
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c208
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c335
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c533
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h132
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild48
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c136
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c327
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c88
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c143
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c135
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c119
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c103
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c196
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c1577
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h127
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c227
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c349
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c215
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c223
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c48
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c48
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c1213
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c824
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c567
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c220
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c180
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c331
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c218
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c218
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c590
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c877
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c84
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c406
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c248
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c228
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c)37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c101
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c909
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c126
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c214
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c154
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c157
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c113
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c152
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c138
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c111
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c188
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c151
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c106
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c224
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c79
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c192
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c133
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c56
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c205
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c287
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c147
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c72
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c81
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c620
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c92
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c174
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c187
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c116
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c (renamed from drivers/gpu/drm/amd/amdgpu/amdgpu_family.h)76
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c71
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c78
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c81
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c176
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c318
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c326
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c356
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c347
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c282
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c56
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c173
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c294
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c128
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c125
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c242
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c71
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c151
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c197
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c121
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c351
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c342
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c263
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c304
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c104
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c176
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c507
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c147
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c)50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c374
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c151
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c181
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c181
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c742
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c245
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c95
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c96
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c86
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c113
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c241
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c106
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c199
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c104
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c96
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c109
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c119
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h67
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c124
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c301
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c394
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c234
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c247
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c266
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c124
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c202
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c146
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h76
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c178
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c234
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c138
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c128
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c136
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c195
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c174
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c171
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c182
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c65
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c230
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4 (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c)29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c102
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c69
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c305
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c117
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c67
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c190
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c174
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c153
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c129
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c106
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h86
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c122
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c158
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c253
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c88
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c128
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c123
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h20
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c16
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c40
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c26
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c26
-rw-r--r--drivers/gpu/drm/panel/Kconfig16
-rw-r--r--drivers/gpu/drm/panel/Makefile5
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c298
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c (renamed from drivers/gpu/drm/panel/panel-ld9040.c)2
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c (renamed from drivers/gpu/drm/panel/panel-s6e8aa0.c)2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c99
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c66
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c40
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c336
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c91
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c392
-rw-r--r--drivers/gpu/drm/radeon/ni.c25
-rw-r--r--drivers/gpu/drm/radeon/r600.c155
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c220
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c109
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_auxch.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c40
-rw-r--r--drivers/gpu/drm/radeon/si.c336
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c47
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c79
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c318
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h88
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/sti/Makefile7
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c141
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h12
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c (renamed from drivers/gpu/drm/sti/sti_drm_crtc.c)211
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.h22
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c243
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.h5
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.h22
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.c251
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.h18
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c (renamed from drivers/gpu/drm/sti/sti_drm_drv.c)147
-rw-r--r--drivers/gpu/drm/sti/sti_drv.h (renamed from drivers/gpu/drm/sti/sti_drm_drv.h)6
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c536
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.h7
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c27
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c482
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.h12
-rw-r--r--drivers/gpu/drm/sti/sti_layer.c213
-rw-r--r--drivers/gpu/drm/sti/sti_layer.h131
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c72
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h27
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c122
-rw-r--r--drivers/gpu/drm/sti/sti_plane.h71
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c54
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c72
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h19
-rw-r--r--drivers/gpu/drm/tegra/dc.c300
-rw-r--r--drivers/gpu/drm/tegra/dc.h24
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c63
-rw-r--r--drivers/gpu/drm/tegra/dpaux.h2
-rw-r--r--drivers/gpu/drm/tegra/drm.c16
-rw-r--r--drivers/gpu/drm/tegra/drm.h10
-rw-r--r--drivers/gpu/drm/tegra/dsi.c126
-rw-r--r--drivers/gpu/drm/tegra/dsi.h4
-rw-r--r--drivers/gpu/drm/tegra/fb.c35
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c78
-rw-r--r--drivers/gpu/drm/tegra/output.c20
-rw-r--r--drivers/gpu/drm/tegra/rgb.c49
-rw-r--r--drivers/gpu/drm/tegra/sor.c1606
-rw-r--r--drivers/gpu/drm/tegra/sor.h298
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c22
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c41
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c2
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c32
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/includeCheck.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h110
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h2071
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h457
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h1487
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h99
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h50
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h1204
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h1633
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_escape.h (renamed from drivers/gpu/drm/vmwgfx/svga_escape.h)2
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h (renamed from drivers/gpu/drm/vmwgfx/svga_overlay.h)10
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_reg.h (renamed from drivers/gpu/drm/vmwgfx/svga_reg.h)664
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_types.h46
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h21
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h2627
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h912
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_types.h45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c1294
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.h209
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c1303
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c786
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c662
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c184
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c508
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h337
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1939
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c575
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c145
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c47
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1654
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h194
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c49
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c212
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_reg.h12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c277
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c556
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c500
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c555
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.h160
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c1266
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c315
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c2
-rw-r--r--drivers/gpu/host1x/mipi.c253
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c7
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c95
-rw-r--r--drivers/gpu/vga/vgaarb.c142
-rw-r--r--drivers/hid/Kconfig7
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-apple.c6
-rw-r--r--drivers/hid/hid-chicony.c26
-rw-r--r--drivers/hid/hid-core.c37
-rw-r--r--drivers/hid/hid-cp2112.c109
-rw-r--r--drivers/hid/hid-gembird.c116
-rw-r--r--drivers/hid/hid-ids.h23
-rw-r--r--drivers/hid/hid-input.c12
-rw-r--r--drivers/hid/hid-lenovo.c59
-rw-r--r--drivers/hid/hid-lg.c2
-rw-r--r--drivers/hid/hid-microsoft.c6
-rw-r--r--drivers/hid/hid-multitouch.c15
-rw-r--r--drivers/hid/hid-picolcd_backlight.c3
-rw-r--r--drivers/hid/hid-picolcd_cir.c3
-rw-r--r--drivers/hid/hid-picolcd_lcd.c3
-rw-r--r--drivers/hid/hid-rmi.c163
-rw-r--r--drivers/hid/hid-sensor-hub.c3
-rw-r--r--drivers/hid/hid-sony.c22
-rw-r--r--drivers/hid/hid-uclogic.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c28
-rw-r--r--drivers/hid/usbhid/hid-core.c5
-rw-r--r--drivers/hid/usbhid/hid-quirks.c9
-rw-r--r--drivers/hid/wacom.h7
-rw-r--r--drivers/hid/wacom_sys.c361
-rw-r--r--drivers/hid/wacom_wac.c507
-rw-r--r--drivers/hid/wacom_wac.h15
-rw-r--r--drivers/hsi/clients/cmt_speech.c2
-rw-r--r--drivers/hv/channel.c4
-rw-r--r--drivers/hv/channel_mgmt.c34
-rw-r--r--drivers/hv/hv.c152
-rw-r--r--drivers/hv/hv_balloon.c26
-rw-r--r--drivers/hv/hv_fcopy.c21
-rw-r--r--drivers/hv/hv_kvp.c3
-rw-r--r--drivers/hv/hv_utils_transport.c2
-rw-r--r--drivers/hv/hyperv_vmbus.h16
-rw-r--r--drivers/hv/ring_buffer.c14
-rw-r--r--drivers/hv/vmbus_drv.c353
-rw-r--r--drivers/hwmon/Kconfig4
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c18
-rw-r--r--drivers/hwmon/f71882fg.c176
-rw-r--r--drivers/hwmon/fam15h_power.c36
-rw-r--r--drivers/hwmon/g762.c2
-rw-r--r--drivers/hwmon/it87.c43
-rw-r--r--drivers/hwmon/lm70.c34
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/nct7802.c319
-rw-r--r--drivers/hwmon/nct7904.c58
-rw-r--r--drivers/hwmon/ntc_thermistor.c2
-rw-r--r--drivers/hwmon/pmbus/Kconfig20
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/adm1275.c358
-rw-r--r--drivers/hwmon/pmbus/lm25066.c7
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c482
-rw-r--r--drivers/hwmon/pmbus/max20751.c64
-rw-r--r--drivers/hwmon/pmbus/max34440.c9
-rw-r--r--drivers/hwmon/pmbus/max8688.c19
-rw-r--r--drivers/hwmon/pmbus/pmbus.c5
-rw-r--r--drivers/hwmon/pmbus/pmbus.h448
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c31
-rw-r--r--drivers/hwmon/pmbus/zl6100.c11
-rw-r--r--drivers/hwmon/sht15.c20
-rw-r--r--drivers/hwmon/tmp102.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h7
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c33
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c37
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h7
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c13
-rw-r--r--drivers/i2c/busses/Kconfig20
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c4
-rw-r--r--drivers/i2c/busses/i2c-cadence.c69
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c4
-rw-r--r--drivers/i2c/busses/i2c-emev2.c332
-rw-r--r--drivers/i2c/busses/i2c-i801.c120
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c15
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c513
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c21
-rw-r--r--drivers/i2c/busses/i2c-omap.c611
-rw-r--r--drivers/i2c/busses/i2c-parport.c25
-rw-r--r--drivers/i2c/busses/i2c-parport.h8
-rw-r--r--drivers/i2c/busses/i2c-pxa.c112
-rw-r--r--drivers/i2c/busses/i2c-tegra.c52
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c10
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c9
-rw-r--r--drivers/i2c/busses/i2c-xiic.c74
-rw-r--r--drivers/i2c/i2c-core.c269
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c7
-rw-r--r--drivers/i2c/muxes/Kconfig11
-rw-r--r--drivers/i2c/muxes/Makefile1
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c3
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c1
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c1
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c1
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c1
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c290
-rw-r--r--drivers/idle/intel_idle.c72
-rw-r--r--drivers/iio/accel/Kconfig26
-rw-r--r--drivers/iio/accel/bma180.c1
-rw-r--r--drivers/iio/accel/bmc150-accel.c245
-rw-r--r--drivers/iio/accel/kxcjk-1013.c5
-rw-r--r--drivers/iio/accel/mma8452.c225
-rw-r--r--drivers/iio/accel/mma9551_core.c35
-rw-r--r--drivers/iio/accel/mma9551_core.h6
-rw-r--r--drivers/iio/accel/mma9553.c83
-rw-r--r--drivers/iio/accel/st_accel.h1
-rw-r--r--drivers/iio/accel/st_accel_core.c6
-rw-r--r--drivers/iio/accel/st_accel_i2c.c6
-rw-r--r--drivers/iio/accel/st_accel_spi.c1
-rw-r--r--drivers/iio/accel/stk8312.c429
-rw-r--r--drivers/iio/accel/stk8ba50.c369
-rw-r--r--drivers/iio/adc/Kconfig55
-rw-r--r--drivers/iio/adc/at91_adc.c8
-rw-r--r--drivers/iio/adc/berlin2-adc.c22
-rw-r--r--drivers/iio/adc/cc10001_adc.c26
-rw-r--r--drivers/iio/adc/mcp320x.c18
-rw-r--r--drivers/iio/adc/mcp3422.c1
-rw-r--r--drivers/iio/adc/rockchip_saradc.c4
-rw-r--r--drivers/iio/adc/ti-adc081c.c1
-rw-r--r--drivers/iio/adc/twl4030-madc.c3
-rw-r--r--drivers/iio/adc/vf610_adc.c81
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c11
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_dev.c1
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c52
-rw-r--r--drivers/iio/dac/ad5064.c1
-rw-r--r--drivers/iio/dac/ad5380.c1
-rw-r--r--drivers/iio/dac/ad5446.c1
-rw-r--r--drivers/iio/dac/ad5624r_spi.c4
-rw-r--r--drivers/iio/dac/max5821.c1
-rw-r--r--drivers/iio/frequency/adf4350.c1
-rw-r--r--drivers/iio/gyro/Kconfig3
-rw-r--r--drivers/iio/gyro/adis16136.c6
-rw-r--r--drivers/iio/gyro/adis16260.c137
-rw-r--r--drivers/iio/gyro/itg3200_core.c1
-rw-r--r--drivers/iio/gyro/st_gyro_core.c3
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c1
-rw-r--r--drivers/iio/humidity/dht11.c65
-rw-r--r--drivers/iio/humidity/si7005.c1
-rw-r--r--drivers/iio/imu/adis16400_core.c46
-rw-r--r--drivers/iio/imu/adis16480.c39
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c25
-rw-r--r--drivers/iio/imu/kmx61.c8
-rw-r--r--drivers/iio/industrialio-buffer.c35
-rw-r--r--drivers/iio/industrialio-core.c33
-rw-r--r--drivers/iio/industrialio-event.c8
-rw-r--r--drivers/iio/industrialio-trigger.c27
-rw-r--r--drivers/iio/industrialio-triggered-buffer.c12
-rw-r--r--drivers/iio/light/Kconfig36
-rw-r--r--drivers/iio/light/Makefile3
-rw-r--r--drivers/iio/light/acpi-als.c18
-rw-r--r--drivers/iio/light/apds9300.c1
-rw-r--r--drivers/iio/light/bh1750.c1
-rw-r--r--drivers/iio/light/cm32181.c2
-rw-r--r--drivers/iio/light/cm3232.c2
-rw-r--r--drivers/iio/light/cm3323.c21
-rw-r--r--drivers/iio/light/cm36651.c2
-rw-r--r--drivers/iio/light/gp2ap020a00f.c2
-rw-r--r--drivers/iio/light/hid-sensor-prox.c3
-rw-r--r--drivers/iio/light/isl29125.c13
-rw-r--r--drivers/iio/light/jsa1212.c1
-rw-r--r--drivers/iio/light/ltr501.c3
-rw-r--r--drivers/iio/light/opt3001.c804
-rw-r--r--drivers/iio/light/pa12203001.c483
-rw-r--r--drivers/iio/light/rpr0521.c615
-rw-r--r--drivers/iio/light/stk3310.c82
-rw-r--r--drivers/iio/light/tcs3414.c3
-rw-r--r--drivers/iio/light/tcs3472.c1
-rw-r--r--drivers/iio/light/tsl4531.c1
-rw-r--r--drivers/iio/light/vcnl4000.c1
-rw-r--r--drivers/iio/magnetometer/Kconfig1
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c105
-rw-r--r--drivers/iio/magnetometer/mmc35240.c80
-rw-r--r--drivers/iio/magnetometer/st_magn.h3
-rw-r--r--drivers/iio/magnetometer/st_magn_buffer.c7
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c98
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c6
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c1
-rw-r--r--drivers/iio/pressure/Kconfig6
-rw-r--r--drivers/iio/pressure/ms5611.h16
-rw-r--r--drivers/iio/pressure/ms5611_core.c82
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c6
-rw-r--r--drivers/iio/pressure/ms5611_spi.c6
-rw-r--r--drivers/iio/pressure/st_pressure_core.c3
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c1
-rw-r--r--drivers/iio/proximity/sx9500.c28
-rw-r--r--drivers/iio/temperature/mlx90614.c21
-rw-r--r--drivers/iio/temperature/tmp006.c8
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/core/Makefile3
-rw-r--r--drivers/infiniband/core/agent.c4
-rw-r--r--drivers/infiniband/core/cache.c773
-rw-r--r--drivers/infiniband/core/cm.c276
-rw-r--r--drivers/infiniband/core/cma.c657
-rw-r--r--drivers/infiniband/core/core_priv.h54
-rw-r--r--drivers/infiniband/core/device.c335
-rw-r--r--drivers/infiniband/core/iwpm_msg.c33
-rw-r--r--drivers/infiniband/core/iwpm_util.c12
-rw-r--r--drivers/infiniband/core/iwpm_util.h28
-rw-r--r--drivers/infiniband/core/mad.c75
-rw-r--r--drivers/infiniband/core/mad_priv.h1
-rw-r--r--drivers/infiniband/core/multicast.c15
-rw-r--r--drivers/infiniband/core/netlink.c55
-rw-r--r--drivers/infiniband/core/opa_smi.h4
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c728
-rw-r--r--drivers/infiniband/core/sa_query.c523
-rw-r--r--drivers/infiniband/core/smi.c37
-rw-r--r--drivers/infiniband/core/smi.h4
-rw-r--r--drivers/infiniband/core/sysfs.c53
-rw-r--r--drivers/infiniband/core/ucm.c13
-rw-r--r--drivers/infiniband/core/ucma.c151
-rw-r--r--drivers/infiniband/core/user_mad.c6
-rw-r--r--drivers/infiniband/core/uverbs.h16
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c147
-rw-r--r--drivers/infiniband/core/uverbs_main.c448
-rw-r--r--drivers/infiniband/core/verbs.c198
-rw-r--r--drivers/infiniband/hw/Makefile2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c18
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c82
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h4
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c5
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c8
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c57
-rw-r--r--drivers/infiniband/hw/mlx4/main.c924
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c15
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h40
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c11
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c10
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c5
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c10
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c5
-rw-r--r--drivers/infiniband/hw/mlx5/main.c30
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h14
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c124
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c1
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c5
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c19
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h54
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_abi.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c58
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c292
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h55
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c109
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h68
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h147
-rw-r--r--drivers/infiniband/hw/qib/qib_mmap.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c17
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h30
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c37
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c49
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c257
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c50
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c25
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c91
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h206
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c38
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c482
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c339
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c92
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h1
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c301
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h25
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c93
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h1
-rw-r--r--drivers/input/evdev.c13
-rw-r--r--drivers/input/ff-core.c5
-rw-r--r--drivers/input/gameport/gameport.c4
-rw-r--r--drivers/input/input-leds.c16
-rw-r--r--drivers/input/input.c10
-rw-r--r--drivers/input/joydev.c11
-rw-r--r--drivers/input/joystick/analog.c4
-rw-r--r--drivers/input/joystick/turbografx.c2
-rw-r--r--drivers/input/joystick/zhenhua.c13
-rw-r--r--drivers/input/keyboard/Kconfig17
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/adp5589-keys.c1
-rw-r--r--drivers/input/keyboard/cap11xx.c145
-rw-r--r--drivers/input/keyboard/gpio_keys.c9
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c7
-rw-r--r--drivers/input/keyboard/imx_keypad.c2
-rw-r--r--drivers/input/keyboard/lm8333.c1
-rw-r--r--drivers/input/keyboard/matrix_keypad.c6
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c1
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c1
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c10
-rw-r--r--drivers/input/keyboard/qt1070.c1
-rw-r--r--drivers/input/keyboard/qt2160.c1
-rw-r--r--drivers/input/keyboard/samsung-keypad.c6
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c227
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c63
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c1
-rw-r--r--drivers/input/misc/Kconfig29
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ab8500-ponkey.c1
-rw-r--r--drivers/input/misc/adxl34x-i2c.c1
-rw-r--r--drivers/input/misc/arizona-haptics.c26
-rw-r--r--drivers/input/misc/axp20x-pek.c1
-rw-r--r--drivers/input/misc/bma150.c8
-rw-r--r--drivers/input/misc/cma3000_d0x_i2c.c1
-rw-r--r--drivers/input/misc/drv260x.c9
-rw-r--r--drivers/input/misc/drv2665.c5
-rw-r--r--drivers/input/misc/drv2667.c7
-rw-r--r--drivers/input/misc/gp2ap002a00f.c2
-rw-r--r--drivers/input/misc/kxtj9.c1
-rw-r--r--drivers/input/misc/max77693-haptic.c92
-rw-r--r--drivers/input/misc/max77843-haptic.c358
-rw-r--r--drivers/input/misc/max8997_haptic.c3
-rw-r--r--drivers/input/misc/mpu3050.c1
-rw-r--r--drivers/input/misc/pcf8574_keypad.c1
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c268
-rw-r--r--drivers/input/misc/pwm-beeper.c1
-rw-r--r--drivers/input/misc/rb532_button.c1
-rw-r--r--drivers/input/misc/regulator-haptic.c1
-rw-r--r--drivers/input/misc/sparcspkr.c2
-rw-r--r--drivers/input/misc/twl4030-vibra.c3
-rw-r--r--drivers/input/misc/uinput.c6
-rw-r--r--drivers/input/misc/xen-kbdfront.c4
-rw-r--r--drivers/input/mouse/Kconfig2
-rw-r--r--drivers/input/mouse/Makefile2
-rw-r--r--drivers/input/mouse/alps.c8
-rw-r--r--drivers/input/mouse/bcm5974.c165
-rw-r--r--drivers/input/mouse/cyapa.c183
-rw-r--r--drivers/input/mouse/cyapa.h157
-rw-r--r--drivers/input/mouse/cyapa_gen3.c15
-rw-r--r--drivers/input/mouse/cyapa_gen5.c1255
-rw-r--r--drivers/input/mouse/cyapa_gen6.c749
-rw-r--r--drivers/input/mouse/elan_i2c_core.c44
-rw-r--r--drivers/input/mouse/elantech.c35
-rw-r--r--drivers/input/mouse/elantech.h1
-rw-r--r--drivers/input/mouse/psmouse-base.c4
-rw-r--r--drivers/input/mouse/sentelic.c14
-rw-r--r--drivers/input/mouse/synaptics.c18
-rw-r--r--drivers/input/mouse/synaptics_i2c.c1
-rw-r--r--drivers/input/serio/ambakmi.c8
-rw-r--r--drivers/input/serio/i8042.c45
-rw-r--r--drivers/input/serio/i8042.h13
-rw-r--r--drivers/input/serio/serio.c5
-rw-r--r--drivers/input/touchscreen/Kconfig45
-rw-r--r--drivers/input/touchscreen/Makefile4
-rw-r--r--drivers/input/touchscreen/ad7879-i2c.c1
-rw-r--r--drivers/input/touchscreen/ads7846.c3
-rw-r--r--drivers/input/touchscreen/ar1021_i2c.c1
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c240
-rw-r--r--drivers/input/touchscreen/auo-pixcir-ts.c1
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c1
-rw-r--r--drivers/input/touchscreen/chipone_icn8318.c1
-rw-r--r--drivers/input/touchscreen/colibri-vf50-ts.c386
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c1
-rw-r--r--drivers/input/touchscreen/cyttsp4_i2c.c2
-rw-r--r--drivers/input/touchscreen/cyttsp_i2c.c2
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c3
-rw-r--r--drivers/input/touchscreen/egalax_ts.c2
-rw-r--r--drivers/input/touchscreen/elants_i2c.c188
-rw-r--r--drivers/input/touchscreen/goodix.c38
-rw-r--r--drivers/input/touchscreen/ili210x.c5
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c523
-rw-r--r--drivers/input/touchscreen/max11801_ts.c1
-rw-r--r--drivers/input/touchscreen/mms114.c2
-rw-r--r--drivers/input/touchscreen/of_touchscreen.c68
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c147
-rw-r--r--drivers/input/touchscreen/st1232.c1
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c8
-rw-r--r--drivers/input/touchscreen/sur40.c1
-rw-r--r--drivers/input/touchscreen/tsc2005.c264
-rw-r--r--drivers/input/touchscreen/tsc2007.c1
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c3
-rw-r--r--drivers/input/touchscreen/wacom_i2c.c1
-rw-r--r--drivers/input/touchscreen/wdt87xx_i2c.c49
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c13
-rw-r--r--drivers/input/touchscreen/zforce_ts.c98
-rw-r--r--drivers/iommu/Kconfig5
-rw-r--r--drivers/iommu/amd_iommu.c119
-rw-r--r--drivers/iommu/amd_iommu_init.c12
-rw-r--r--drivers/iommu/amd_iommu_v2.c28
-rw-r--r--drivers/iommu/arm-smmu-v3.c126
-rw-r--r--drivers/iommu/arm-smmu.c45
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/fsl_pamu.c26
-rw-r--r--drivers/iommu/intel-iommu.c745
-rw-r--r--drivers/iommu/intel_irq_remapping.c6
-rw-r--r--drivers/iommu/io-pgtable-arm.c143
-rw-r--r--drivers/iommu/io-pgtable.c5
-rw-r--r--drivers/iommu/io-pgtable.h14
-rw-r--r--drivers/iommu/iommu.c2
-rw-r--r--drivers/iommu/ipmmu-vmsa.c19
-rw-r--r--drivers/iommu/irq_remapping.c2
-rw-r--r--drivers/iommu/msm_iommu.c4
-rw-r--r--drivers/iommu/of_iommu.c8
-rw-r--r--drivers/iommu/omap-iommu-debug.c132
-rw-r--r--drivers/iommu/omap-iommu.c198
-rw-r--r--drivers/iommu/omap-iommu.h79
-rw-r--r--drivers/iommu/omap-iopgtable.h27
-rw-r--r--drivers/iommu/tegra-smmu.c306
-rw-r--r--drivers/irqchip/Kconfig10
-rw-r--r--drivers/irqchip/Makefile5
-rw-r--r--drivers/irqchip/exynos-combiner.c20
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c5
-rw-r--r--drivers/irqchip/irq-atmel-aic.c4
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c4
-rw-r--r--drivers/irqchip/irq-bcm2835.c111
-rw-r--r--drivers/irqchip/irq-bcm2836.c275
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c7
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c76
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c10
-rw-r--r--drivers/irqchip/irq-clps711x.c3
-rw-r--r--drivers/irqchip/irq-crossbar.c7
-rw-r--r--drivers/irqchip/irq-digicolor.c3
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c56
-rw-r--r--drivers/irqchip/irq-gic-v2m.c52
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c140
-rw-r--r--drivers/irqchip/irq-gic-v3-its-platform-msi.c93
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c251
-rw-r--r--drivers/irqchip/irq-gic-v3.c93
-rw-r--r--drivers/irqchip/irq-gic.c228
-rw-r--r--drivers/irqchip/irq-hip04.c6
-rw-r--r--drivers/irqchip/irq-i8259.c384
-rw-r--r--drivers/irqchip/irq-imgpdc.c11
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c278
-rw-r--r--drivers/irqchip/irq-ingenic.c3
-rw-r--r--drivers/irqchip/irq-keystone.c6
-rw-r--r--drivers/irqchip/irq-metag-ext.c9
-rw-r--r--drivers/irqchip/irq-metag.c3
-rw-r--r--drivers/irqchip/irq-mips-cpu.c3
-rw-r--r--drivers/irqchip/irq-mips-gic.c174
-rw-r--r--drivers/irqchip/irq-mmp.c6
-rw-r--r--drivers/irqchip/irq-moxart.c3
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c3
-rw-r--r--drivers/irqchip/irq-mxs.c3
-rw-r--r--drivers/irqchip/irq-nvic.c3
-rw-r--r--drivers/irqchip/irq-omap-intc.c38
-rw-r--r--drivers/irqchip/irq-or1k-pic.c3
-rw-r--r--drivers/irqchip/irq-orion.c9
-rw-r--r--drivers/irqchip/irq-renesas-h8300h.c2
-rw-r--r--drivers/irqchip/irq-renesas-h8s.c2
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c16
-rw-r--r--drivers/irqchip/irq-s3c24xx.c11
-rw-r--r--drivers/irqchip/irq-sa11x0.c1
-rw-r--r--drivers/irqchip/irq-sirfsoc.c50
-rw-r--r--drivers/irqchip/irq-sun4i.c3
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c7
-rw-r--r--drivers/irqchip/irq-tb10x.c9
-rw-r--r--drivers/irqchip/irq-tegra.c3
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c10
-rw-r--r--drivers/irqchip/irq-vf610-mscm-ir.c3
-rw-r--r--drivers/irqchip/irq-vic.c7
-rw-r--r--drivers/irqchip/irq-vt8500.c9
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c3
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c3
-rw-r--r--drivers/irqchip/irq-zevio.c3
-rw-r--r--drivers/irqchip/irqchip.h11
-rw-r--r--drivers/irqchip/spear-shirq.c9
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c35
-rw-r--r--drivers/isdn/icn/icn.h2
-rw-r--r--drivers/isdn/mISDN/dsp_audio.c22
-rw-r--r--drivers/isdn/mISDN/dsp_cmx.c2
-rw-r--r--drivers/leds/Kconfig38
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/led-class.c7
-rw-r--r--drivers/leds/leds-fsg.c52
-rw-r--r--drivers/leds/leds-lm3530.c1
-rw-r--r--drivers/leds/leds-lm355x.c1
-rw-r--r--drivers/leds/leds-lm3642.c1
-rw-r--r--drivers/leds/leds-lp5521.c11
-rw-r--r--drivers/leds/leds-lp5523.c11
-rw-r--r--drivers/leds/leds-lp5562.c11
-rw-r--r--drivers/leds/leds-lp55xx-common.c13
-rw-r--r--drivers/leds/leds-lp55xx-common.h4
-rw-r--r--drivers/leds/leds-lp8501.c11
-rw-r--r--drivers/leds/leds-lp8860.c4
-rw-r--r--drivers/leds/leds-max77693.c1
-rw-r--r--drivers/leds/leds-ns2.c169
-rw-r--r--drivers/leds/leds-pca955x.c1
-rw-r--r--drivers/leds/leds-pca963x.c2
-rw-r--r--drivers/leds/leds-powernv.c345
-rw-r--r--drivers/leds/leds-syscon.c4
-rw-r--r--drivers/leds/leds-tca6507.c2
-rw-r--r--drivers/leds/leds-tlc591xx.c4
-rw-r--r--drivers/leds/trigger/Kconfig2
-rw-r--r--drivers/macintosh/ans-lcd.c2
-rw-r--r--drivers/macintosh/therm_windtunnel.c2
-rw-r--r--drivers/macintosh/windfarm.h4
-rw-r--r--drivers/macintosh/windfarm_core.c47
-rw-r--r--drivers/mailbox/Kconfig1
-rw-r--r--drivers/mailbox/arm_mhu.c4
-rw-r--r--drivers/mailbox/bcm2835-mailbox.c1
-rw-r--r--drivers/mailbox/mailbox.c27
-rw-r--r--drivers/mailbox/pcc.c8
-rw-r--r--drivers/md/Kconfig6
-rw-r--r--drivers/md/bcache/bcache.h18
-rw-r--r--drivers/md/bcache/btree.c10
-rw-r--r--drivers/md/bcache/closure.h5
-rw-r--r--drivers/md/bcache/io.c100
-rw-r--r--drivers/md/bcache/journal.c14
-rw-r--r--drivers/md/bcache/movinggc.c8
-rw-r--r--drivers/md/bcache/request.c57
-rw-r--r--drivers/md/bcache/super.c48
-rw-r--r--drivers/md/bcache/util.h5
-rw-r--r--drivers/md/bcache/writeback.c14
-rw-r--r--drivers/md/bitmap.c28
-rw-r--r--drivers/md/dm-bio-prison.c6
-rw-r--r--drivers/md/dm-bufio.c26
-rw-r--r--drivers/md/dm-cache-policy-mq.c2
-rw-r--r--drivers/md/dm-cache-policy-smq.c114
-rw-r--r--drivers/md/dm-cache-target.c121
-rw-r--r--drivers/md/dm-crypt.c30
-rw-r--r--drivers/md/dm-delay.c16
-rw-r--r--drivers/md/dm-era-target.c15
-rw-r--r--drivers/md/dm-flakey.c24
-rw-r--r--drivers/md/dm-io.c8
-rw-r--r--drivers/md/dm-ioctl.c4
-rw-r--r--drivers/md/dm-linear.c23
-rw-r--r--drivers/md/dm-log-writes.c38
-rw-r--r--drivers/md/dm-mpath.c27
-rw-r--r--drivers/md/dm-raid.c19
-rw-r--r--drivers/md/dm-raid1.c32
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-snap.c41
-rw-r--r--drivers/md/dm-stats.c14
-rw-r--r--drivers/md/dm-stripe.c31
-rw-r--r--drivers/md/dm-table.c21
-rw-r--r--drivers/md/dm-thin-metadata.c4
-rw-r--r--drivers/md/dm-thin.c214
-rw-r--r--drivers/md/dm-verity.c42
-rw-r--r--drivers/md/dm-zero.c2
-rw-r--r--drivers/md/dm.c158
-rw-r--r--drivers/md/dm.h2
-rw-r--r--drivers/md/faulty.c4
-rw-r--r--drivers/md/linear.c45
-rw-r--r--drivers/md/md-cluster.c171
-rw-r--r--drivers/md/md-cluster.h2
-rw-r--r--drivers/md/md.c162
-rw-r--r--drivers/md/md.h12
-rw-r--r--drivers/md/multipath.c33
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c8
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h6
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c45
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c37
-rw-r--r--drivers/md/persistent-data/dm-btree.c15
-rw-r--r--drivers/md/raid0.c133
-rw-r--r--drivers/md/raid0.h2
-rw-r--r--drivers/md/raid1.c164
-rw-r--r--drivers/md/raid1.h5
-rw-r--r--drivers/md/raid10.c220
-rw-r--r--drivers/md/raid10.h6
-rw-r--r--drivers/md/raid5.c325
-rw-r--r--drivers/md/raid5.h8
-rw-r--r--drivers/media/common/saa7146/saa7146_hlp.c9
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c167
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.h34
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c1
-rw-r--r--drivers/media/dvb-core/dvb_frontend.h410
-rw-r--r--drivers/media/dvb-core/dvb_math.h25
-rw-r--r--drivers/media/dvb-core/dvb_net.c2
-rw-r--r--drivers/media/dvb-core/dvb_ringbuffer.h135
-rw-r--r--drivers/media/dvb-core/dvbdev.h116
-rw-r--r--drivers/media/dvb-frontends/Kconfig34
-rw-r--r--drivers/media/dvb-frontends/Makefile4
-rw-r--r--drivers/media/dvb-frontends/a8293.c168
-rw-r--r--drivers/media/dvb-frontends/a8293.h22
-rw-r--r--drivers/media/dvb-frontends/af9033.c1
-rw-r--r--drivers/media/dvb-frontends/ascot2e.c548
-rw-r--r--drivers/media/dvb-frontends/ascot2e.h58
-rw-r--r--drivers/media/dvb-frontends/au8522_decoder.c1
-rw-r--r--drivers/media/dvb-frontends/au8522_dig.c2
-rw-r--r--drivers/media/dvb-frontends/cx24123.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c2727
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.h65
-rw-r--r--drivers/media/dvb-frontends/cxd2841er_priv.h43
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c50
-rw-r--r--drivers/media/dvb-frontends/horus3a.c430
-rw-r--r--drivers/media/dvb-frontends/horus3a.h58
-rw-r--r--drivers/media/dvb-frontends/lnbh25.c189
-rw-r--r--drivers/media/dvb-frontends/lnbh25.h56
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c1
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c1
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c1
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c1
-rw-r--r--drivers/media/dvb-frontends/s921.c2
-rw-r--r--drivers/media/dvb-frontends/si2168.c1
-rw-r--r--drivers/media/dvb-frontends/sp2.c1
-rw-r--r--drivers/media/dvb-frontends/stv0367.c17
-rw-r--r--drivers/media/dvb-frontends/tda10071.c825
-rw-r--r--drivers/media/dvb-frontends/tda10071.h63
-rw-r--r--drivers/media/dvb-frontends/tda10071_priv.h20
-rw-r--r--drivers/media/dvb-frontends/ts2020.c1
-rw-r--r--drivers/media/i2c/Kconfig15
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/adp1653.c2
-rw-r--r--drivers/media/i2c/adv7170.c1
-rw-r--r--drivers/media/i2c/adv7175.c1
-rw-r--r--drivers/media/i2c/adv7180.c12
-rw-r--r--drivers/media/i2c/adv7343.c8
-rw-r--r--drivers/media/i2c/adv7393.c7
-rw-r--r--drivers/media/i2c/adv7511.c3
-rw-r--r--drivers/media/i2c/adv7604.c486
-rw-r--r--drivers/media/i2c/adv7842.c28
-rw-r--r--drivers/media/i2c/ak881x.c8
-rw-r--r--drivers/media/i2c/bt819.c12
-rw-r--r--drivers/media/i2c/bt856.c1
-rw-r--r--drivers/media/i2c/bt866.c1
-rw-r--r--drivers/media/i2c/cs5345.c8
-rw-r--r--drivers/media/i2c/cs53l32a.c1
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c1
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c1
-rw-r--r--drivers/media/i2c/ks0127.c1
-rw-r--r--drivers/media/i2c/m52790.c1
-rw-r--r--drivers/media/i2c/msp3400-driver.c1
-rw-r--r--drivers/media/i2c/mt9v011.c1
-rw-r--r--drivers/media/i2c/mt9v032.c2
-rw-r--r--drivers/media/i2c/ov2659.c4
-rw-r--r--drivers/media/i2c/ov7640.c1
-rw-r--r--drivers/media/i2c/ov7670.c1
-rw-r--r--drivers/media/i2c/ov9650.c2
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-spi.c1
-rw-r--r--drivers/media/i2c/s5k6a3.c1
-rw-r--r--drivers/media/i2c/saa6588.c5
-rw-r--r--drivers/media/i2c/saa6752hs.c1
-rw-r--r--drivers/media/i2c/saa7110.c12
-rw-r--r--drivers/media/i2c/saa7115.c1
-rw-r--r--drivers/media/i2c/saa7127.c1
-rw-r--r--drivers/media/i2c/saa717x.c8
-rw-r--r--drivers/media/i2c/saa7185.c1
-rw-r--r--drivers/media/i2c/soc_camera/mt9t112.c8
-rw-r--r--drivers/media/i2c/soc_camera/tw9910.c35
-rw-r--r--drivers/media/i2c/sony-btf-mpx.c1
-rw-r--r--drivers/media/i2c/sr030pc30.c15
-rw-r--r--drivers/media/i2c/tc358743.c1979
-rw-r--r--drivers/media/i2c/tc358743_regs.h681
-rw-r--r--drivers/media/i2c/tda7432.c8
-rw-r--r--drivers/media/i2c/tda9840.c1
-rw-r--r--drivers/media/i2c/tea6415c.c1
-rw-r--r--drivers/media/i2c/tea6420.c1
-rw-r--r--drivers/media/i2c/ths7303.c1
-rw-r--r--drivers/media/i2c/tlv320aic23b.c7
-rw-r--r--drivers/media/i2c/tvaudio.c1
-rw-r--r--drivers/media/i2c/tvp514x.c11
-rw-r--r--drivers/media/i2c/tvp5150.c1
-rw-r--r--drivers/media/i2c/tvp7002.c7
-rw-r--r--drivers/media/i2c/tw9903.c1
-rw-r--r--drivers/media/i2c/tw9906.c1
-rw-r--r--drivers/media/i2c/upd64031a.c1
-rw-r--r--drivers/media/i2c/upd64083.c1
-rw-r--r--drivers/media/i2c/vp27smpx.c1
-rw-r--r--drivers/media/i2c/vpx3220.c8
-rw-r--r--drivers/media/i2c/wm8739.c8
-rw-r--r--drivers/media/i2c/wm8775.c1
-rw-r--r--drivers/media/media-entity.c6
-rw-r--r--drivers/media/pci/Kconfig7
-rw-r--r--drivers/media/pci/Makefile3
-rw-r--r--drivers/media/pci/bt8xx/btcx-risc.c5
-rw-r--r--drivers/media/pci/bt8xx/bttv-input.c21
-rw-r--r--drivers/media/pci/bt8xx/bttvp.h2
-rw-r--r--drivers/media/pci/cobalt/Kconfig4
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c11
-rw-r--r--drivers/media/pci/cobalt/cobalt-irq.c2
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c11
-rw-r--r--drivers/media/pci/ivtv/ivtv-gpio.c7
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c15
-rw-r--r--drivers/media/pci/mantis/mantis_dma.c9
-rw-r--r--drivers/media/pci/netup_unidvb/Kconfig12
-rw-r--r--drivers/media/pci/netup_unidvb/Makefile9
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb.h133
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_ci.c248
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c1001
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c381
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_spi.c252
-rw-r--r--drivers/media/pci/smipcie/Kconfig1
-rw-r--r--drivers/media/pci/smipcie/Makefile3
-rw-r--r--drivers/media/pci/smipcie/smipcie-ir.c232
-rw-r--r--drivers/media/pci/smipcie/smipcie-main.c (renamed from drivers/media/pci/smipcie/smipcie.c)14
-rw-r--r--drivers/media/pci/smipcie/smipcie.h19
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-core.c18
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-g723.c13
-rw-r--r--drivers/media/pci/solo6x10/solo6x10.h26
-rw-r--r--drivers/media/pci/ttpci/budget-av.c2
-rw-r--r--drivers/media/pci/ttpci/ttpci-eeprom.c9
-rw-r--r--drivers/media/pci/tw68/tw68-core.c21
-rw-r--r--drivers/media/pci/tw68/tw68.h16
-rw-r--r--drivers/media/pci/zoran/zoran.h7
-rw-r--r--drivers/media/pci/zoran/zoran_card.c11
-rw-r--r--drivers/media/pci/zoran/zoran_device.c18
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c344
-rw-r--r--drivers/media/platform/Kconfig27
-rw-r--r--drivers/media/platform/Makefile2
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c16
-rw-r--r--drivers/media/platform/coda/Makefile2
-rw-r--r--drivers/media/platform/coda/coda-bit.c147
-rw-r--r--drivers/media/platform/coda/coda-common.c338
-rw-r--r--drivers/media/platform/coda/coda-gdi.c150
-rw-r--r--drivers/media/platform/coda/coda.h15
-rw-r--r--drivers/media/platform/coda/coda_regs.h10
-rw-r--r--drivers/media/platform/coda/trace.h89
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c2
-rw-r--r--drivers/media/platform/fsl-viu.c160
-rw-r--r--drivers/media/platform/omap/Kconfig1
-rw-r--r--drivers/media/platform/omap/omap_vout.c71
-rw-r--r--drivers/media/platform/omap3isp/isp.c144
-rw-r--r--drivers/media/platform/omap3isp/isp.h7
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.h2
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c9
-rw-r--r--drivers/media/platform/omap3isp/omap3isp.h132
-rw-r--r--drivers/media/platform/rcar_jpu.c1794
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c14
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c6
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c9
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr.c11
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr.h2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c12
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c10
-rw-r--r--drivers/media/platform/s5p-tv/hdmiphy_drv.c1
-rw-r--r--drivers/media/platform/s5p-tv/mixer_reg.c12
-rw-r--r--drivers/media/platform/s5p-tv/sii9234_drv.c1
-rw-r--r--drivers/media/platform/sh_veu.c10
-rw-r--r--drivers/media/platform/sh_vou.c817
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c105
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c16
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c48
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-debug.c8
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-hw.c12
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c76
-rw-r--r--drivers/media/platform/sti/c8sectpfe/Kconfig28
-rw-r--r--drivers/media/platform/sti/c8sectpfe/Makefile9
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c265
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.h64
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c1236
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h288
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c271
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.h26
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c244
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.h20
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c15
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c13
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.c18
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.h4
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h6
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.c11
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c85
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.h5
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c4
-rw-r--r--drivers/media/radio/radio-tea5764.c1
-rw-r--r--drivers/media/radio/saa7706h.c17
-rw-r--r--drivers/media/radio/tef6862.c1
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c5
-rw-r--r--drivers/media/rc/Kconfig26
-rw-r--r--drivers/media/rc/ir-lirc-codec.c5
-rw-r--r--drivers/media/rc/ir-rc5-decoder.c116
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c122
-rw-r--r--drivers/media/rc/keymaps/rc-lirc.c2
-rw-r--r--drivers/media/rc/keymaps/rc-lme2510.c132
-rw-r--r--drivers/media/rc/nuvoton-cir.c127
-rw-r--r--drivers/media/rc/nuvoton-cir.h1
-rw-r--r--drivers/media/rc/rc-core-priv.h36
-rw-r--r--drivers/media/rc/rc-ir-raw.c141
-rw-r--r--drivers/media/rc/rc-loopback.c36
-rw-r--r--drivers/media/rc/rc-main.c81
-rw-r--r--drivers/media/tuners/Kconfig2
-rw-r--r--drivers/media/tuners/e4000.c1
-rw-r--r--drivers/media/tuners/fc2580.c1
-rw-r--r--drivers/media/tuners/it913x.c1
-rw-r--r--drivers/media/tuners/m88rs6000t.c1
-rw-r--r--drivers/media/tuners/si2157.c1
-rw-r--r--drivers/media/tuners/tda18212.c1
-rw-r--r--drivers/media/tuners/tua9001.c1
-rw-r--r--drivers/media/usb/airspy/airspy.c3
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c21
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c2
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c4
-rw-r--r--drivers/media/usb/go7007/s2250-board.c1
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_s5k83a.c2
-rw-r--r--drivers/media/usb/gspca/sn9c2028.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-core.c5
-rw-r--r--drivers/media/usb/stk1160/stk1160-reg.h34
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c219
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c4
-rw-r--r--drivers/media/usb/stk1160/stk1160.h4
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c9
-rw-r--r--drivers/media/usb/usbvision/usbvision-core.c71
-rw-r--r--drivers/media/usb/usbvision/usbvision-i2c.c2
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c246
-rw-r--r--drivers/media/usb/usbvision/usbvision.h10
-rw-r--r--drivers/media/v4l2-core/Kconfig1
-rw-r--r--drivers/media/v4l2-core/Makefile3
-rw-r--r--drivers/media/v4l2-core/tuner-core.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c15
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c98
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c21
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c18
-rw-r--r--drivers/media/v4l2-core/v4l2-trace.c11
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c53
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c207
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c91
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c148
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c90
-rw-r--r--drivers/memory/Kconfig8
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/fsl_ifc.c43
-rw-r--r--drivers/memory/omap-gpmc.c19
-rw-r--r--drivers/memory/pl172.c301
-rw-r--r--drivers/memory/tegra/Makefile1
-rw-r--r--drivers/memory/tegra/mc.c8
-rw-r--r--drivers/memory/tegra/mc.h4
-rw-r--r--drivers/memory/tegra/tegra114.c19
-rw-r--r--drivers/memory/tegra/tegra124-emc.c42
-rw-r--r--drivers/memory/tegra/tegra124.c33
-rw-r--r--drivers/memory/tegra/tegra210.c1080
-rw-r--r--drivers/memory/tegra/tegra30.c19
-rw-r--r--drivers/message/fusion/mptctl.c9
-rw-r--r--drivers/mfd/88pm800.c1
-rw-r--r--drivers/mfd/88pm805.c1
-rw-r--r--drivers/mfd/88pm860x-core.c5
-rw-r--r--drivers/mfd/Kconfig49
-rw-r--r--drivers/mfd/Makefile9
-rw-r--r--drivers/mfd/aat2870-core.c1
-rw-r--r--drivers/mfd/ab3100-core.c1
-rw-r--r--drivers/mfd/ab8500-core.c4
-rw-r--r--drivers/mfd/adp5520.c1
-rw-r--r--drivers/mfd/arizona-core.c156
-rw-r--r--drivers/mfd/arizona-i2c.c9
-rw-r--r--drivers/mfd/arizona-irq.c16
-rw-r--r--drivers/mfd/arizona.h5
-rw-r--r--drivers/mfd/as3711.c1
-rw-r--r--drivers/mfd/as3722.c1
-rw-r--r--drivers/mfd/asic3.c4
-rw-r--r--drivers/mfd/atmel-hlcdc.c55
-rw-r--r--drivers/mfd/axp20x.c112
-rw-r--r--drivers/mfd/bcm590xx.c1
-rw-r--r--drivers/mfd/cros_ec_i2c.c1
-rw-r--r--drivers/mfd/cros_ec_spi.c7
-rw-r--r--drivers/mfd/da903x.c1
-rw-r--r--drivers/mfd/da9052-i2c.c1
-rw-r--r--drivers/mfd/da9055-i2c.c1
-rw-r--r--drivers/mfd/da9062-core.c533
-rw-r--r--drivers/mfd/da9063-i2c.c1
-rw-r--r--drivers/mfd/da9063-irq.c4
-rw-r--r--drivers/mfd/db8500-prcmu.c1
-rw-r--r--drivers/mfd/ezx-pcap.c11
-rw-r--r--drivers/mfd/htc-egpio.c8
-rw-r--r--drivers/mfd/htc-i2cpld.c6
-rw-r--r--drivers/mfd/intel-lpss-acpi.c84
-rw-r--r--drivers/mfd/intel-lpss-pci.c113
-rw-r--r--drivers/mfd/intel-lpss.c524
-rw-r--r--drivers/mfd/intel-lpss.h62
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c32
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c3
-rw-r--r--drivers/mfd/ipaq-micro.c38
-rw-r--r--drivers/mfd/jz4740-adc.c9
-rw-r--r--drivers/mfd/kempld-core.c16
-rw-r--r--drivers/mfd/lm3533-core.c1
-rw-r--r--drivers/mfd/lp3943.c1
-rw-r--r--drivers/mfd/lp8788-irq.c5
-rw-r--r--drivers/mfd/lp8788.c1
-rw-r--r--drivers/mfd/lpc_ich.c32
-rw-r--r--drivers/mfd/max14577.c1
-rw-r--r--drivers/mfd/max77686.c1
-rw-r--r--drivers/mfd/max77693.c32
-rw-r--r--drivers/mfd/max77843.c20
-rw-r--r--drivers/mfd/max8907.c1
-rw-r--r--drivers/mfd/max8925-core.c5
-rw-r--r--drivers/mfd/max8925-i2c.c1
-rw-r--r--drivers/mfd/max8997-irq.c20
-rw-r--r--drivers/mfd/max8997.c1
-rw-r--r--drivers/mfd/max8998-irq.c14
-rw-r--r--drivers/mfd/max8998.c1
-rw-r--r--drivers/mfd/mc13xxx-i2c.c1
-rw-r--r--drivers/mfd/mfd-core.c2
-rw-r--r--drivers/mfd/mt6397-core.c61
-rw-r--r--drivers/mfd/palmas.c1
-rw-r--r--drivers/mfd/pm8921-core.c52
-rw-r--r--drivers/mfd/qcom_rpm.c1
-rw-r--r--drivers/mfd/rc5t583-irq.c4
-rw-r--r--drivers/mfd/rc5t583.c1
-rw-r--r--drivers/mfd/retu-mfd.c1
-rw-r--r--drivers/mfd/rt5033.c1
-rw-r--r--drivers/mfd/sec-core.c1
-rw-r--r--drivers/mfd/si476x-i2c.c1
-rw-r--r--drivers/mfd/smsc-ece1099.c1
-rw-r--r--drivers/mfd/stmpe-i2c.c3
-rw-r--r--drivers/mfd/stmpe-spi.c17
-rw-r--r--drivers/mfd/stmpe.c7
-rw-r--r--drivers/mfd/stw481x.c1
-rw-r--r--drivers/mfd/t7l66xb.c18
-rw-r--r--drivers/mfd/tc3589x.c8
-rw-r--r--drivers/mfd/tc6393xb.c13
-rw-r--r--drivers/mfd/tps6507x.c1
-rw-r--r--drivers/mfd/tps65090.c1
-rw-r--r--drivers/mfd/tps65217.c2
-rw-r--r--drivers/mfd/tps65218.c2
-rw-r--r--drivers/mfd/tps6586x.c12
-rw-r--r--drivers/mfd/tps65910.c1
-rw-r--r--drivers/mfd/tps65912-i2c.c1
-rw-r--r--drivers/mfd/tps65912-irq.c8
-rw-r--r--drivers/mfd/tps80031.c1
-rw-r--r--drivers/mfd/twl-core.c9
-rw-r--r--drivers/mfd/twl4030-irq.c11
-rw-r--r--drivers/mfd/twl6030-irq.c15
-rw-r--r--drivers/mfd/twl6040.c3
-rw-r--r--drivers/mfd/ucb1x00-core.c6
-rw-r--r--drivers/mfd/wm5102-tables.c57
-rw-r--r--drivers/mfd/wm5110-tables.c36
-rw-r--r--drivers/mfd/wm831x-i2c.c1
-rw-r--r--drivers/mfd/wm831x-irq.c7
-rw-r--r--drivers/mfd/wm8350-i2c.c1
-rw-r--r--drivers/mfd/wm8350-irq.c8
-rw-r--r--drivers/mfd/wm8400-core.c1
-rw-r--r--drivers/mfd/wm8994-core.c9
-rw-r--r--drivers/mfd/wm8994-irq.c9
-rw-r--r--drivers/mfd/wm8994-regmap.c6
-rw-r--r--drivers/mfd/wm8997-tables.c10
-rw-r--r--drivers/mfd/wm8998-tables.c1594
-rw-r--r--drivers/misc/Kconfig10
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c1
-rw-r--r--drivers/misc/apds990x.c1
-rw-r--r--drivers/misc/bh1770glc.c1
-rw-r--r--drivers/misc/bmp085-i2c.c1
-rw-r--r--drivers/misc/cxl/Kconfig7
-rw-r--r--drivers/misc/cxl/Makefile2
-rw-r--r--drivers/misc/cxl/api.c69
-rw-r--r--drivers/misc/cxl/context.c36
-rw-r--r--drivers/misc/cxl/cxl.h94
-rw-r--r--drivers/misc/cxl/debugfs.c2
-rw-r--r--drivers/misc/cxl/file.c27
-rw-r--r--drivers/misc/cxl/irq.c56
-rw-r--r--drivers/misc/cxl/main.c3
-rw-r--r--drivers/misc/cxl/native.c119
-rw-r--r--drivers/misc/cxl/pci.c611
-rw-r--r--drivers/misc/cxl/sysfs.c33
-rw-r--r--drivers/misc/cxl/trace.h10
-rw-r--r--drivers/misc/cxl/vphb.c37
-rw-r--r--drivers/misc/ds1682.c12
-rw-r--r--drivers/misc/eeprom/Kconfig13
-rw-r--r--drivers/misc/eeprom/Makefile1
-rw-r--r--drivers/misc/eeprom/at24.c41
-rw-r--r--drivers/misc/eeprom/eeprom.c5
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c14
-rw-r--r--drivers/misc/eeprom/max6875.c7
-rw-r--r--drivers/misc/eeprom/sunxi_sid.c156
-rw-r--r--drivers/misc/genwqe/card_dev.c2
-rw-r--r--drivers/misc/isl29003.c1
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c1
-rw-r--r--drivers/misc/mei/Makefile2
-rw-r--r--drivers/misc/mei/bus-fixup.c306
-rw-r--r--drivers/misc/mei/bus.c1012
-rw-r--r--drivers/misc/mei/client.c333
-rw-r--r--drivers/misc/mei/client.h8
-rw-r--r--drivers/misc/mei/debugfs.c6
-rw-r--r--drivers/misc/mei/hbm.c330
-rw-r--r--drivers/misc/mei/hbm.h3
-rw-r--r--drivers/misc/mei/hw-me-regs.h27
-rw-r--r--drivers/misc/mei/hw-me.c499
-rw-r--r--drivers/misc/mei/hw-me.h8
-rw-r--r--drivers/misc/mei/hw.h134
-rw-r--r--drivers/misc/mei/init.c3
-rw-r--r--drivers/misc/mei/interrupt.c27
-rw-r--r--drivers/misc/mei/main.c98
-rw-r--r--drivers/misc/mei/mei_dev.h47
-rw-r--r--drivers/misc/mei/nfc.c414
-rw-r--r--drivers/misc/mei/pci-me.c32
-rw-r--r--drivers/misc/mei/wd.c1
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c15
-rw-r--r--drivers/misc/qcom-coincell.c152
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c2
-rw-r--r--drivers/misc/sram.c8
-rw-r--r--drivers/misc/ti-st/st_kim.c105
-rw-r--r--drivers/misc/ti-st/st_ll.c17
-rw-r--r--drivers/misc/tsl2550.c1
-rw-r--r--drivers/misc/vmw_balloon.c170
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c7
-rw-r--r--drivers/mmc/card/block.c19
-rw-r--r--drivers/mmc/card/queue.c6
-rw-r--r--drivers/mmc/core/core.c46
-rw-r--r--drivers/mmc/core/host.c42
-rw-r--r--drivers/mmc/host/Kconfig9
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/android-goldfish.c2
-rw-r--r--drivers/mmc/host/atmel-mci.c1
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c3
-rw-r--r--drivers/mmc/host/dw_mmc.c284
-rw-r--r--drivers/mmc/host/omap.c9
-rw-r--r--drivers/mmc/host/omap_hsmmc.c358
-rw-r--r--drivers/mmc/host/pxamci.c200
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c320
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h5
-rw-r--r--drivers/mmc/host/sdhci-msm.c5
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c4
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c191
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c6
-rw-r--r--drivers/mmc/host/sdhci-pci.c1
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c1
-rw-r--r--drivers/mmc/host/sdhci-sirf.c4
-rw-r--r--drivers/mmc/host/sdhci-spear.c4
-rw-r--r--drivers/mmc/host/sdhci.c139
-rw-r--r--drivers/mmc/host/sdhci.h10
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/sunxi-mmc.c2
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c6
-rw-r--r--drivers/mmc/host/usdhi6rol0.c15
-rw-r--r--drivers/mtd/devices/m25p80.c18
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c1
-rw-r--r--drivers/mtd/devices/slram.c2
-rw-r--r--drivers/mtd/maps/nettel.c13
-rw-r--r--drivers/mtd/maps/physmap_of.c6
-rw-r--r--drivers/mtd/mtd_blkdevs.c10
-rw-r--r--drivers/mtd/nand/Kconfig13
-rw-r--r--drivers/mtd/nand/Makefile3
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.h4
-rw-r--r--drivers/mtd/nand/davinci_nand.c42
-rw-r--r--drivers/mtd/nand/denali_pci.c43
-rw-r--r--drivers/mtd/nand/diskonchip.c2
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c258
-rw-r--r--drivers/mtd/nand/nand_ids.c4
-rw-r--r--drivers/mtd/nand/nandsim.c28
-rw-r--r--drivers/mtd/nand/omap_elm.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c61
-rw-r--r--drivers/mtd/nand/r852.c2
-rw-r--r--drivers/mtd/nand/sunxi_nand.c88
-rw-r--r--drivers/mtd/onenand/generic.c2
-rw-r--r--drivers/mtd/spi-nor/Kconfig14
-rw-r--r--drivers/mtd/spi-nor/Makefile1
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c265
-rw-r--r--drivers/mtd/spi-nor/nxp-spifi.c482
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c79
-rw-r--r--drivers/mtd/tests/oobtest.c18
-rw-r--r--drivers/net/Kconfig19
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/bonding/bond_3ad.c2
-rw-r--r--drivers/net/bonding/bond_main.c95
-rw-r--r--drivers/net/bonding/bond_netlink.c17
-rw-r--r--drivers/net/bonding/bond_options.c20
-rw-r--r--drivers/net/bonding/bond_sysfs.c20
-rw-r--r--drivers/net/caif/caif_hsi.c2
-rw-r--r--drivers/net/caif/caif_serial.c2
-rw-r--r--drivers/net/caif/caif_spi.c2
-rw-r--r--drivers/net/can/at91_can.c8
-rw-r--r--drivers/net/can/bfin_can.c6
-rw-r--r--drivers/net/can/c_can/c_can.c10
-rw-r--r--drivers/net/can/cc770/cc770.c4
-rw-r--r--drivers/net/can/dev.c7
-rw-r--r--drivers/net/can/flexcan.c9
-rw-r--r--drivers/net/can/grcan.c3
-rw-r--r--drivers/net/can/rcar_can.c16
-rw-r--r--drivers/net/can/sja1000/sja1000.c6
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/can/spi/mcp251x.c17
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c6
-rw-r--r--drivers/net/can/usb/esd_usb2.c6
-rw-r--r--drivers/net/can/usb/gs_usb.c8
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c31
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c96
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c28
-rw-r--r--drivers/net/can/usb/usb_8dev.c6
-rw-r--r--drivers/net/can/vcan.c3
-rw-r--r--drivers/net/dsa/Kconfig6
-rw-r--r--drivers/net/dsa/bcm_sf2.c45
-rw-r--r--drivers/net/dsa/bcm_sf2.h4
-rw-r--r--drivers/net/dsa/mv88e6123_61_65.c1
-rw-r--r--drivers/net/dsa/mv88e6131.c1
-rw-r--r--drivers/net/dsa/mv88e6171.c12
-rw-r--r--drivers/net/dsa/mv88e6352.c115
-rw-r--r--drivers/net/dsa/mv88e6xxx.c999
-rw-r--r--drivers/net/dsa/mv88e6xxx.h91
-rw-r--r--drivers/net/dummy.c3
-rw-r--r--drivers/net/ethernet/3com/3c59x.c44
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile3
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c13
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c8
-rw-r--r--drivers/net/ethernet/altera/altera_sgdmahw.h1
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h1
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c17
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h5
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c3
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h16
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c317
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h12
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c8
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c10
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig10
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c20
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h63
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c102
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h71
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h204
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c254
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c587
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h79
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c325
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h77
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c358
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c212
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h37
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c36
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h21
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c103
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h5
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c113
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c9
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c131
-rw-r--r--drivers/net/ethernet/cadence/macb.h36
-rw-r--r--drivers/net/ethernet/cavium/Kconfig5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h105
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c198
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c198
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c592
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c161
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h58
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c170
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c42
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c803
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c89
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c94
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c25
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c346
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h197
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h33
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h21
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c113
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c146
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c277
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h44
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h28
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_intr.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_resource.h7
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c33
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h18
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c14
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h7
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c69
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h16
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c17
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c260
-rw-r--r--drivers/net/ethernet/ethoc.c7
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c37
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.h20
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c149
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c16
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c611
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h80
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c354
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_mdio.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c145
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h18
-rw-r--r--drivers/net/ethernet/intel/e100.c12
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c89
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h74
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h72
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c407
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c151
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c67
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c784
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c135
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h1938
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c259
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h60
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h85
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c91
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c17
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h67
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c380
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_hmc.h10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h13
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h3155
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c199
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h58
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h81
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h17
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h61
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c44
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c350
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c51
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c38
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c109
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c30
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c138
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c72
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c91
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c62
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c279
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c75
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h73
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c182
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c51
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c116
-rw-r--r--drivers/net/ethernet/jme.c8
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c5
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c31
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c244
-rw-r--r--drivers/net/ethernet/mellanox/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h172
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c259
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c371
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c973
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h1090
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c1295
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h207
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/emad.h127
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h405
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c1826
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h227
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h75
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h1349
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c1568
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h80
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c5
-rw-r--r--drivers/net/ethernet/neterion/s2io.c26
-rw-r--r--drivers/net/ethernet/neterion/s2io.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h19
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c33
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c41
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c177
-rw-r--r--drivers/net/ethernet/renesas/ravb.h5
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c174
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c4
-rw-r--r--drivers/net/ethernet/rocker/rocker.c213
-rw-r--r--drivers/net/ethernet/rocker/rocker.h2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c757
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c59
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h6
-rw-r--r--drivers/net/ethernet/sfc/efx.c71
-rw-r--r--drivers/net/ethernet/sfc/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/falcon.c1
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c28
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h3
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h3429
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h5
-rw-r--r--drivers/net/ethernet/sfc/nic.h2
-rw-r--r--drivers/net/ethernet/sfc/selftest.c14
-rw-r--r--drivers/net/ethernet/sfc/siena.c7
-rw-r--r--drivers/net/ethernet/sfc/tx.c3
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c32
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c65
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c42
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c54
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c59
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c73
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c78
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c83
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c95
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c142
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h9
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/ethernet/synopsys/Kconfig27
-rw-r--r--drivers/net/ethernet/synopsys/Makefile5
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c3019
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c188
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c4
-rw-r--r--drivers/net/ethernet/ti/netcp.h2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c67
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c468
-rw-r--r--drivers/net/ethernet/ti/netcp_sgmii.c30
-rw-r--r--drivers/net/ethernet/tile/tilegx.c4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c8
-rw-r--r--drivers/net/fddi/skfp/h/hwmtm.h9
-rw-r--r--drivers/net/fjes/Makefile30
-rw-r--r--drivers/net/fjes/fjes.h77
-rw-r--r--drivers/net/fjes/fjes_ethtool.c137
-rw-r--r--drivers/net/fjes/fjes_hw.c1125
-rw-r--r--drivers/net/fjes/fjes_hw.h334
-rw-r--r--drivers/net/fjes/fjes_main.c1383
-rw-r--r--drivers/net/fjes/fjes_regs.h142
-rw-r--r--drivers/net/geneve.c753
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/hamradio/bpqether.c1
-rw-r--r--drivers/net/hamradio/mkiss.c7
-rw-r--r--drivers/net/hyperv/hyperv_net.h33
-rw-r--r--drivers/net/hyperv/netvsc.c43
-rw-r--r--drivers/net/hyperv/netvsc_drv.c166
-rw-r--r--drivers/net/hyperv/rndis_filter.c37
-rw-r--r--drivers/net/ieee802154/at86rf230.c56
-rw-r--r--drivers/net/ieee802154/cc2520.c2
-rw-r--r--drivers/net/ieee802154/mrf24j40.c1
-rw-r--r--drivers/net/ifb.c207
-rw-r--r--drivers/net/ipvlan/ipvlan.h9
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c45
-rw-r--r--drivers/net/loopback.c3
-rw-r--r--drivers/net/macvlan.c1
-rw-r--r--drivers/net/macvtap.c8
-rw-r--r--drivers/net/nlmon.c2
-rw-r--r--drivers/net/ntb_netdev.c86
-rw-r--r--drivers/net/phy/Kconfig27
-rw-r--r--drivers/net/phy/Makefile3
-rw-r--r--drivers/net/phy/aquantia.c201
-rw-r--r--drivers/net/phy/dp83640.c10
-rw-r--r--drivers/net/phy/dp83867.c8
-rw-r--r--drivers/net/phy/fixed_phy.c115
-rw-r--r--drivers/net/phy/marvell.c53
-rw-r--r--drivers/net/phy/mdio-octeon.c136
-rw-r--r--drivers/net/phy/mdio_bus.c21
-rw-r--r--drivers/net/phy/microchip.c148
-rw-r--r--drivers/net/phy/phy.c39
-rw-r--r--drivers/net/phy/phy_device.c10
-rw-r--r--drivers/net/phy/realtek.c14
-rw-r--r--drivers/net/phy/smsc.c31
-rw-r--r--drivers/net/phy/spi_ks8995.c22
-rw-r--r--drivers/net/phy/teranetics.c135
-rw-r--r--drivers/net/ppp/ppp_generic.c93
-rw-r--r--drivers/net/rionet.c4
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c1
-rw-r--r--drivers/net/usb/Kconfig10
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/cdc_ether.c8
-rw-r--r--drivers/net/usb/cdc_mbim.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c63
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c7
-rw-r--r--drivers/net/usb/lan78xx.c3494
-rw-r--r--drivers/net/usb/lan78xx.h1069
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/r8152.c278
-rw-r--r--drivers/net/usb/usbnet.c46
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/net/virtio_net.c37
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c8
-rw-r--r--drivers/net/vrf.c710
-rw-r--r--drivers/net/vxlan.c730
-rw-r--r--drivers/net/wan/cosa.c3
-rw-r--r--drivers/net/wan/hdlc_fr.c2
-rw-r--r--drivers/net/wan/sbni.c2
-rw-r--r--drivers/net/wan/z85230.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c183
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h65
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c30
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c66
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h89
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c49
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c145
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c90
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h135
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c376
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c236
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h173
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c18
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.c208
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.h72
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c23
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h32
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c169
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c1350
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h1024
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c20
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h25
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h22
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c170
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c156
-rw-r--r--drivers/net/wireless/ath/debug.c2
-rw-r--r--drivers/net/wireless/ath/dfs_pri_detector.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/boot_loader.h61
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c244
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c51
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c14
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.c10
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c16
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c165
-rw-r--r--drivers/net/wireless/ath/wil6210/ioctl.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c198
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c127
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c98
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c383
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h8
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h64
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c132
-rw-r--r--drivers/net/wireless/b43/lo.c4
-rw-r--r--drivers/net/wireless/b43/lo.h2
-rw-r--r--drivers/net/wireless/b43/phy_g.c2
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c216
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/core.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/firmware.c39
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/flowring.c10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.h10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c56
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio.c13
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c4
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c4
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.h2
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/debug.c8
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h21
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c8
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h7
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c8
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c14
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c12
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c51
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c109
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c25
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c111
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c18
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace-data.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c72
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h53
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h68
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h125
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c44
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex_legacy.c31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c74
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c751
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h150
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h386
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h86
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c389
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c78
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h154
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c23
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c114
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c46
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c160
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c43
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c324
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c46
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tdls.c33
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c16
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.h5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tof.c304
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tof.h94
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c96
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c13
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c22
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h13
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c158
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c488
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c150
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c7
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c34
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mac.c4
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mt7601u.h10
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/tx.c3
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/usb.c63
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/usb.h2
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig12
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c130
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c39
-rw-r--r--drivers/net/wireless/mwifiex/decl.h3
-rw-r--r--drivers/net/wireless/mwifiex/fw.h95
-rw-r--r--drivers/net/wireless/mwifiex/ie.c3
-rw-r--r--drivers/net/wireless/mwifiex/init.c10
-rw-r--r--drivers/net/wireless/mwifiex/join.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.c63
-rw-r--r--drivers/net/wireless/mwifiex/main.h40
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c14
-rw-r--r--drivers/net/wireless/mwifiex/pcie.h45
-rw-r--r--drivers/net/wireless/mwifiex/scan.c157
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c207
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h77
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c90
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c7
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c207
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c4
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c80
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c22
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c7
-rw-r--r--drivers/net/wireless/mwifiex/uap_event.c15
-rw-r--r--drivers/net/wireless/mwifiex/usb.c24
-rw-r--r--drivers/net/wireless/mwifiex/usb.h3
-rw-r--r--drivers/net/wireless/mwifiex/util.c75
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c156
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h8
-rw-r--r--drivers/net/wireless/mwl8k.c49
-rw-r--r--drivers/net/wireless/orinoco/main.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c1
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c5
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c5
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c5
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c12
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c8
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig1
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c18
-rw-r--r--drivers/net/wireless/rtlwifi/core.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/fw.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/fw.h21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c12
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h19
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/def.h9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c110
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c105
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.h10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/fw.h22
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/fw.c12
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/fw.h21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/phy.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h19
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/fw.c14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/fw.h23
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/reg.h1
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h25
-rw-r--r--drivers/net/wireless/ti/wl12xx/scan.c6
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.c27
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.h138
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c230
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c13
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.h12
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c59
-rw-r--r--drivers/net/wireless/ti/wl18xx/scan.c23
-rw-r--r--drivers/net/wireless/ti/wl18xx/scan.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c56
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h15
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h11
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/init.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c69
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c9
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.h6
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h5
-rw-r--r--drivers/net/xen-netback/common.h28
-rw-r--r--drivers/net/xen-netback/interface.c16
-rw-r--r--drivers/net/xen-netback/netback.c201
-rw-r--r--drivers/net/xen-netback/xenbus.c13
-rw-r--r--drivers/net/xen-netfront.c27
-rw-r--r--drivers/nfc/Kconfig1
-rw-r--r--drivers/nfc/Makefile1
-rw-r--r--drivers/nfc/mei_phy.c3
-rw-r--r--drivers/nfc/nxp-nci/i2c.c10
-rw-r--r--drivers/nfc/s3fwrn5/Kconfig19
-rw-r--r--drivers/nfc/s3fwrn5/Makefile11
-rw-r--r--drivers/nfc/s3fwrn5/core.c219
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c511
-rw-r--r--drivers/nfc/s3fwrn5/firmware.h111
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c306
-rw-r--r--drivers/nfc/s3fwrn5/nci.c165
-rw-r--r--drivers/nfc/s3fwrn5/nci.h89
-rw-r--r--drivers/nfc/s3fwrn5/s3fwrn5.h99
-rw-r--r--drivers/nfc/st-nci/Kconfig11
-rw-r--r--drivers/nfc/st-nci/Makefile3
-rw-r--r--drivers/nfc/st-nci/i2c.c23
-rw-r--r--drivers/nfc/st-nci/ndlc.c7
-rw-r--r--drivers/nfc/st-nci/spi.c392
-rw-r--r--drivers/nfc/st-nci/st-nci_se.c8
-rw-r--r--drivers/nfc/st21nfca/st21nfca.c11
-rw-r--r--drivers/nfc/trf7970a.c6
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c39
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.h3
-rw-r--r--drivers/ntb/ntb.c2
-rw-r--r--drivers/ntb/ntb_transport.c325
-rw-r--r--drivers/nvdimm/Kconfig23
-rw-r--r--drivers/nvdimm/Makefile5
-rw-r--r--drivers/nvdimm/blk.c5
-rw-r--r--drivers/nvdimm/btt.c55
-rw-r--r--drivers/nvdimm/btt.h3
-rw-r--r--drivers/nvdimm/btt_devs.c215
-rw-r--r--drivers/nvdimm/bus.c11
-rw-r--r--drivers/nvdimm/claim.c201
-rw-r--r--drivers/nvdimm/dimm_devs.c5
-rw-r--r--drivers/nvdimm/e820.c87
-rw-r--r--drivers/nvdimm/namespace_devs.c89
-rw-r--r--drivers/nvdimm/nd-core.h9
-rw-r--r--drivers/nvdimm/nd.h67
-rw-r--r--drivers/nvdimm/pfn.h35
-rw-r--r--drivers/nvdimm/pfn_devs.c337
-rw-r--r--drivers/nvdimm/pmem.c247
-rw-r--r--drivers/nvdimm/region.c2
-rw-r--r--drivers/nvdimm/region_devs.c25
-rw-r--r--drivers/nvmem/Kconfig39
-rw-r--r--drivers/nvmem/Makefile12
-rw-r--r--drivers/nvmem/core.c1083
-rw-r--r--drivers/nvmem/qfprom.c85
-rw-r--r--drivers/nvmem/sunxi_sid.c171
-rw-r--r--drivers/of/Kconfig2
-rw-r--r--drivers/of/address.c6
-rw-r--r--drivers/of/fdt.c12
-rw-r--r--drivers/of/irq.c22
-rw-r--r--drivers/of/of_mdio.c30
-rw-r--r--drivers/of/platform.c10
-rw-r--r--drivers/of/unittest.c3
-rw-r--r--drivers/parisc/ccio-dma.c13
-rw-r--r--drivers/parisc/dino.c3
-rw-r--r--drivers/parisc/iosapic.c2
-rw-r--r--drivers/parisc/lba_pci.c8
-rw-r--r--drivers/parisc/sba_iommu.c9
-rw-r--r--drivers/parport/share.c11
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/access.c84
-rw-r--r--drivers/pci/ats.c131
-rw-r--r--drivers/pci/host/Kconfig8
-rw-r--r--drivers/pci/host/pci-dra7xx.c122
-rw-r--r--drivers/pci/host/pci-host-generic.c52
-rw-r--r--drivers/pci/host/pci-imx6.c12
-rw-r--r--drivers/pci/host/pci-keystone-dw.c23
-rw-r--r--drivers/pci/host/pci-keystone.c13
-rw-r--r--drivers/pci/host/pci-mvebu.c1
-rw-r--r--drivers/pci/host/pci-tegra.c1
-rw-r--r--drivers/pci/host/pci-xgene-msi.c57
-rw-r--r--drivers/pci/host/pci-xgene.c13
-rw-r--r--drivers/pci/host/pcie-designware.c21
-rw-r--r--drivers/pci/host/pcie-iproc.c60
-rw-r--r--drivers/pci/host/pcie-iproc.h4
-rw-r--r--drivers/pci/host/pcie-rcar.c1
-rw-r--r--drivers/pci/host/pcie-spear13xx.c3
-rw-r--r--drivers/pci/host/pcie-xilinx.c42
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c122
-rw-r--r--drivers/pci/hotplug/pciehp.h14
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c48
-rw-r--r--drivers/pci/msi.c128
-rw-r--r--drivers/pci/of.c30
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/pci/pci-driver.c28
-rw-r--r--drivers/pci/pci.c28
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/portdrv_core.c2
-rw-r--r--drivers/pci/probe.c149
-rw-r--r--drivers/pci/quirks.c211
-rw-r--r--drivers/pci/slot.c29
-rw-r--r--drivers/pci/xen-pcifront.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.c17
-rw-r--r--drivers/pcmcia/sa1100_generic.c2
-rw-r--r--drivers/pcmcia/sa1111_generic.c14
-rw-r--r--drivers/pcmcia/sa11xx_base.c17
-rw-r--r--drivers/pcmcia/soc_common.h1
-rw-r--r--drivers/perf/Kconfig15
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/arm_pmu.c921
-rw-r--r--drivers/phy/Kconfig15
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/phy-armada375-usb2.c3
-rw-r--r--drivers/phy/phy-bcm-kona-usb2.c2
-rw-r--r--drivers/phy/phy-berlin-sata.c2
-rw-r--r--drivers/phy/phy-berlin-usb.c17
-rw-r--r--drivers/phy/phy-brcmstb-sata.c2
-rw-r--r--drivers/phy/phy-dm816x-usb.c2
-rw-r--r--drivers/phy/phy-exynos-dp-video.c2
-rw-r--r--drivers/phy/phy-exynos-mipi-video.c2
-rw-r--r--drivers/phy/phy-exynos5-usbdrd.c2
-rw-r--r--drivers/phy/phy-exynos5250-sata.c2
-rw-r--r--drivers/phy/phy-hix5hd2-sata.c2
-rw-r--r--drivers/phy/phy-lpc18xx-usb-otg.c143
-rw-r--r--drivers/phy/phy-miphy28lp.c3
-rw-r--r--drivers/phy/phy-miphy365x.c2
-rw-r--r--drivers/phy/phy-mvebu-sata.c2
-rw-r--r--drivers/phy/phy-omap-usb2.c2
-rw-r--r--drivers/phy/phy-qcom-apq8064-sata.c2
-rw-r--r--drivers/phy/phy-qcom-ipq806x-sata.c2
-rw-r--r--drivers/phy/phy-qcom-ufs-i.h2
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-14nm.c3
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-20nm.c3
-rw-r--r--drivers/phy/phy-qcom-ufs.c2
-rw-r--r--drivers/phy/phy-rcar-gen2.c2
-rw-r--r--drivers/phy/phy-rockchip-usb.c3
-rw-r--r--drivers/phy/phy-samsung-usb2.c2
-rw-r--r--drivers/phy/phy-spear1310-miphy.c2
-rw-r--r--drivers/phy/phy-spear1340-miphy.c2
-rw-r--r--drivers/phy/phy-stih41x-usb.c2
-rw-r--r--drivers/phy/phy-sun4i-usb.c424
-rw-r--r--drivers/phy/phy-sun9i-usb.c2
-rw-r--r--drivers/phy/phy-ti-pipe3.c219
-rw-r--r--drivers/phy/phy-tusb1210.c32
-rw-r--r--drivers/phy/ulpi_phy.h2
-rw-r--r--drivers/pinctrl/Kconfig7
-rw-r--r--drivers/pinctrl/Makefile8
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c7
-rw-r--r--drivers/pinctrl/core.c3
-rw-r--r--drivers/pinctrl/devicetree.c8
-rw-r--r--drivers/pinctrl/freescale/Kconfig7
-rw-r--r--drivers/pinctrl/freescale/Makefile1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6ul.c322
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c68
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c76
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c6
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8173.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c120
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.h5
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c1
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c21
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c24
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c30
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c241
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.h4
-rw-r--r--drivers/pinctrl/pinconf.c88
-rw-r--r--drivers/pinctrl/pinctrl-adi2-bf60x.c8
-rw-r--r--drivers/pinctrl/pinctrl-adi2.c4
-rw-r--r--drivers/pinctrl/pinctrl-amd.c15
-rw-r--r--drivers/pinctrl/pinctrl-at91.c51
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c7
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c378
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c58
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c6
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c71
-rw-r--r--drivers/pinctrl/pinctrl-single.c17
-rw-r--r--drivers/pinctrl/pinctrl-st.c6
-rw-r--r--drivers/pinctrl/pinctrl-tegra.c19
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c4
-rw-r--r--drivers/pinctrl/pinmux.c3
-rw-r--r--drivers/pinctrl/qcom/Kconfig20
-rw-r--r--drivers/pinctrl/qcom/Makefile3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c19
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdf2xxx.c122
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c376
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c791
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c882
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c18
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos5440.c90
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c28
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c34
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c5
-rw-r--r--drivers/pinctrl/sh-pfc/core.c52
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7740.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c21
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c19
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7794.c30
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh73a0.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c86
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h7
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c525
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c5
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.h2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1310.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1340.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear300.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear310.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear320.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.h2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c17
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c62
-rw-r--r--drivers/pinctrl/uniphier/Kconfig32
-rw-r--r--drivers/pinctrl/uniphier/Makefile8
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c886
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c1274
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c1554
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c1351
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c794
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-proxstream2.c1269
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-core.c684
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier.h217
-rw-r--r--drivers/platform/chrome/Kconfig1
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c4
-rw-r--r--drivers/platform/x86/Kconfig5
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/acer-wmi.c1
-rw-r--r--drivers/platform/x86/acerhdf.c9
-rw-r--r--drivers/platform/x86/asus-laptop.c1
-rw-r--r--drivers/platform/x86/dell-laptop.c171
-rw-r--r--drivers/platform/x86/hp-wireless.c7
-rw-r--r--drivers/platform/x86/ideapad-laptop.c14
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c9
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c92
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c6
-rw-r--r--drivers/platform/x86/surfacepro3_button.c216
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c805
-rw-r--r--drivers/pnp/manager.c2
-rw-r--r--drivers/pnp/system.c35
-rw-r--r--drivers/power/Kconfig17
-rw-r--r--drivers/power/avs/Kconfig2
-rw-r--r--drivers/power/avs/rockchip-io-domain.c59
-rw-r--r--drivers/power/bq2415x_charger.c143
-rw-r--r--drivers/power/bq24190_charger.c4
-rw-r--r--drivers/power/bq24735-charger.c52
-rw-r--r--drivers/power/bq27x00_battery.c123
-rw-r--r--drivers/power/charger-manager.c2
-rw-r--r--drivers/power/ds2780_battery.c20
-rw-r--r--drivers/power/ds2781_battery.c8
-rw-r--r--drivers/power/ltc2941-battery-gauge.c54
-rw-r--r--drivers/power/max77693_charger.c1
-rw-r--r--drivers/power/olpc_battery.c7
-rw-r--r--drivers/power/pm2301_charger.c1
-rw-r--r--drivers/power/power_supply_core.c2
-rw-r--r--drivers/power/reset/Kconfig7
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/at91-reset.c26
-rw-r--r--drivers/power/reset/zx-reboot.c80
-rw-r--r--drivers/power/rt5033_battery.c2
-rw-r--r--drivers/power/rt9455_charger.c16
-rw-r--r--drivers/power/rx51_battery.c2
-rw-r--r--drivers/power/twl4030_charger.c598
-rw-r--r--drivers/powercap/intel_rapl.c8
-rw-r--r--drivers/pwm/Kconfig19
-rw-r--r--drivers/pwm/Makefile2
-rw-r--r--drivers/pwm/core.c49
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c5
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c2
-rw-r--r--drivers/pwm/pwm-atmel.c6
-rw-r--r--drivers/pwm/pwm-bcm-kona.c54
-rw-r--r--drivers/pwm/pwm-crc.c143
-rw-r--r--drivers/pwm/pwm-ep93xx.c4
-rw-r--r--drivers/pwm/pwm-imx.c5
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c465
-rw-r--r--drivers/pwm/pwm-mxs.c4
-rw-r--r--drivers/pwm/pwm-pca9685.c90
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c2
-rw-r--r--drivers/pwm/pwm-rockchip.c2
-rw-r--r--drivers/pwm/pwm-tegra.c6
-rw-r--r--drivers/pwm/pwm-tiecap.c10
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c6
-rw-r--r--drivers/pwm/sysfs.c29
-rw-r--r--drivers/ras/Kconfig37
-rw-r--r--drivers/regulator/88pm800.c236
-rw-r--r--drivers/regulator/Kconfig43
-rw-r--r--drivers/regulator/Makefile3
-rw-r--r--drivers/regulator/act8865-regulator.c1
-rw-r--r--drivers/regulator/ad5398.c1
-rw-r--r--drivers/regulator/axp20x-regulator.c1
-rw-r--r--drivers/regulator/core.c168
-rw-r--r--drivers/regulator/da9062-regulator.c1
-rw-r--r--drivers/regulator/da9210-regulator.c76
-rw-r--r--drivers/regulator/da9211-regulator.c41
-rw-r--r--drivers/regulator/da9211-regulator.h18
-rw-r--r--drivers/regulator/fan53555.c1
-rw-r--r--drivers/regulator/isl6271a-regulator.c1
-rw-r--r--drivers/regulator/isl9305.c2
-rw-r--r--drivers/regulator/lp3971.c1
-rw-r--r--drivers/regulator/lp3972.c1
-rw-r--r--drivers/regulator/lp872x.c17
-rw-r--r--drivers/regulator/ltc3589.c4
-rw-r--r--drivers/regulator/max1586.c1
-rw-r--r--drivers/regulator/max77693.c173
-rw-r--r--drivers/regulator/max77843.c201
-rw-r--r--drivers/regulator/max8660.c1
-rw-r--r--drivers/regulator/max8973-regulator.c83
-rw-r--r--drivers/regulator/mt6311-regulator.c179
-rw-r--r--drivers/regulator/mt6311-regulator.h65
-rw-r--r--drivers/regulator/of_regulator.c3
-rw-r--r--drivers/regulator/pbias-regulator.c5
-rw-r--r--drivers/regulator/pfuze100-regulator.c2
-rw-r--r--drivers/regulator/pwm-regulator.c160
-rw-r--r--drivers/regulator/qcom_smd-regulator.c350
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c203
-rw-r--r--drivers/regulator/rk808-regulator.c212
-rw-r--r--drivers/regulator/s2mps11.c14
-rw-r--r--drivers/regulator/tps51632-regulator.c1
-rw-r--r--drivers/regulator/tps62360-regulator.c1
-rw-r--r--drivers/regulator/tps65023-regulator.c1
-rw-r--r--drivers/regulator/tps6586x-regulator.c4
-rw-r--r--drivers/reset/Makefile3
-rw-r--r--drivers/reset/reset-ath79.c129
-rw-r--r--drivers/reset/reset-lpc18xx.c258
-rw-r--r--drivers/reset/reset-socfpga.c19
-rw-r--r--drivers/reset/reset-zynq.c155
-rw-r--r--drivers/reset/sti/reset-stih407.c4
-rw-r--r--drivers/reset/sti/reset-stih415.c4
-rw-r--r--drivers/reset/sti/reset-stih416.c4
-rw-r--r--drivers/rtc/Kconfig38
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/class.c33
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/rtc-88pm80x.c28
-rw-r--r--drivers/rtc/rtc-ab-b5ze-s3.c2
-rw-r--r--drivers/rtc/rtc-ab8500.c2
-rw-r--r--drivers/rtc/rtc-abx80x.c2
-rw-r--r--drivers/rtc/rtc-armada38x.c31
-rw-r--r--drivers/rtc/rtc-as3722.c4
-rw-r--r--drivers/rtc/rtc-at91rm9200.c43
-rw-r--r--drivers/rtc/rtc-at91sam9.c45
-rw-r--r--drivers/rtc/rtc-bfin.c2
-rw-r--r--drivers/rtc/rtc-bq32k.c3
-rw-r--r--drivers/rtc/rtc-cmos.c125
-rw-r--r--drivers/rtc/rtc-coh901331.c1
-rw-r--r--drivers/rtc/rtc-core.h19
-rw-r--r--drivers/rtc/rtc-da9063.c392
-rw-r--r--drivers/rtc/rtc-dev.c1
-rw-r--r--drivers/rtc/rtc-ds1305.c18
-rw-r--r--drivers/rtc/rtc-ds1307.c120
-rw-r--r--drivers/rtc/rtc-ds1343.c12
-rw-r--r--drivers/rtc/rtc-ds1374.c12
-rw-r--r--drivers/rtc/rtc-ds1511.c42
-rw-r--r--drivers/rtc/rtc-ds1553.c4
-rw-r--r--drivers/rtc/rtc-ds1685.c22
-rw-r--r--drivers/rtc/rtc-ds1742.c4
-rw-r--r--drivers/rtc/rtc-ds3232.c8
-rw-r--r--drivers/rtc/rtc-fm3130.c1
-rw-r--r--drivers/rtc/rtc-gemini.c5
-rw-r--r--drivers/rtc/rtc-hym8563.c1
-rw-r--r--drivers/rtc/rtc-isl12022.c8
-rw-r--r--drivers/rtc/rtc-isl12057.c2
-rw-r--r--drivers/rtc/rtc-lpc24xx.c310
-rw-r--r--drivers/rtc/rtc-m48t59.c18
-rw-r--r--drivers/rtc/rtc-max8997.c1
-rw-r--r--drivers/rtc/rtc-moxart.c1
-rw-r--r--drivers/rtc/rtc-mpc5121.c1
-rw-r--r--drivers/rtc/rtc-mt6397.c31
-rw-r--r--drivers/rtc/rtc-mv.c1
-rw-r--r--drivers/rtc/rtc-mxc.c60
-rw-r--r--drivers/rtc/rtc-omap.c33
-rw-r--r--drivers/rtc/rtc-opal.c10
-rw-r--r--drivers/rtc/rtc-pcf2123.c8
-rw-r--r--drivers/rtc/rtc-pcf2127.c35
-rw-r--r--drivers/rtc/rtc-pcf85063.c1
-rw-r--r--drivers/rtc/rtc-pcf8523.c1
-rw-r--r--drivers/rtc/rtc-pcf8563.c1
-rw-r--r--drivers/rtc/rtc-pcf8583.c1
-rw-r--r--drivers/rtc/rtc-pl031.c2
-rw-r--r--drivers/rtc/rtc-pxa.c55
-rw-r--r--drivers/rtc/rtc-rp5c01.c4
-rw-r--r--drivers/rtc/rtc-rx8025.c277
-rw-r--r--drivers/rtc/rtc-rx8581.c1
-rw-r--r--drivers/rtc/rtc-s3c.c28
-rw-r--r--drivers/rtc/rtc-s5m.c11
-rw-r--r--drivers/rtc/rtc-sa1100.c139
-rw-r--r--drivers/rtc/rtc-sa1100.h23
-rw-r--r--drivers/rtc/rtc-sirfsoc.c107
-rw-r--r--drivers/rtc/rtc-snvs.c132
-rw-r--r--drivers/rtc/rtc-st-lpc.c2
-rw-r--r--drivers/rtc/rtc-stk17ta8.c4
-rw-r--r--drivers/rtc/rtc-sysfs.c72
-rw-r--r--drivers/rtc/rtc-tx4939.c6
-rw-r--r--drivers/rtc/rtc-vt8500.c1
-rw-r--r--drivers/rtc/rtc-zynqmp.c279
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/block/dasd.c36
-rw-r--r--drivers/s390/block/dasd_alias.c9
-rw-r--r--drivers/s390/block/dasd_eckd.c333
-rw-r--r--drivers/s390/block/dasd_eckd.h11
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/block/dcssblk.c28
-rw-r--r--drivers/s390/block/xpram.c5
-rw-r--r--drivers/s390/char/con3270.c4
-rw-r--r--drivers/s390/char/ctrlchar.c16
-rw-r--r--drivers/s390/char/ctrlchar.h12
-rw-r--r--drivers/s390/char/diag_ftp.c4
-rw-r--r--drivers/s390/char/monreader.c2
-rw-r--r--drivers/s390/char/sclp.c6
-rw-r--r--drivers/s390/char/sclp_cmd.c18
-rw-r--r--drivers/s390/char/sclp_early.c1
-rw-r--r--drivers/s390/char/sclp_vt220.c52
-rw-r--r--drivers/s390/char/tty3270.c4
-rw-r--r--drivers/s390/cio/chsc.c165
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/eadm_sch.c3
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c17
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c5
-rw-r--r--drivers/s390/net/qeth_l3_main.c5
-rw-r--r--drivers/s390/scsi/zfcp_aux.c2
-rw-r--r--drivers/s390/scsi/zfcp_erp.c62
-rw-r--r--drivers/s390/scsi/zfcp_fc.c8
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c28
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c14
-rw-r--r--drivers/s390/virtio/Makefile (renamed from drivers/s390/kvm/Makefile)0
-rw-r--r--drivers/s390/virtio/kvm_virtio.c (renamed from drivers/s390/kvm/kvm_virtio.c)0
-rw-r--r--drivers/s390/virtio/virtio_ccw.c (renamed from drivers/s390/kvm/virtio_ccw.c)0
-rw-r--r--drivers/scsi/53c700.c2
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c8
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c5
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c7
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c24
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c66
-rw-r--r--drivers/scsi/cxlflash/Kconfig11
-rw-r--r--drivers/scsi/cxlflash/Makefile2
-rw-r--r--drivers/scsi/cxlflash/common.h208
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c266
-rw-r--r--drivers/scsi/cxlflash/main.c2494
-rw-r--r--drivers/scsi/cxlflash/main.h108
-rw-r--r--drivers/scsi/cxlflash/sislite.h472
-rw-r--r--drivers/scsi/cxlflash/superpipe.c2084
-rw-r--r--drivers/scsi/cxlflash/superpipe.h147
-rw-r--r--drivers/scsi/cxlflash/vlun.c1243
-rw-r--r--drivers/scsi/cxlflash/vlun.h86
-rw-r--r--drivers/scsi/device_handler/Kconfig2
-rw-r--r--drivers/scsi/device_handler/Makefile1
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c621
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c31
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c58
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c55
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c80
-rw-r--r--drivers/scsi/dpt_i2o.c3
-rw-r--r--drivers/scsi/fcoe/fcoe.c2
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c8
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c4
-rw-r--r--drivers/scsi/hpsa.c301
-rw-r--r--drivers/scsi/hpsa.h16
-rw-r--r--drivers/scsi/hpsa_cmd.h10
-rw-r--r--drivers/scsi/hptiop.c97
-rw-r--r--drivers/scsi/hptiop.h6
-rw-r--r--drivers/scsi/ipr.c51
-rw-r--r--drivers/scsi/ipr.h18
-rw-r--r--drivers/scsi/libfc/fc_exch.c8
-rw-r--r--drivers/scsi/libfc/fc_fcp.c21
-rw-r--r--drivers/scsi/libiscsi.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c7
-rw-r--r--drivers/scsi/megaraid.c140
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c544
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c95
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c22
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h41
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c38
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c605
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c12
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h52
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h4
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_tool.h4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c326
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h57
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c343
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c22
-rw-r--r--drivers/scsi/mvsas/mv_init.c20
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c5
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c5
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c19
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h12
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c112
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h5
-rw-r--r--drivers/scsi/qla2xxx/Kconfig4
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c26
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c108
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h35
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c352
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c135
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c70
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c87
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c165
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c53
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c902
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h72
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c47
-rw-r--r--drivers/scsi/scsi_common.c109
-rw-r--r--drivers/scsi/scsi_debug.c158
-rw-r--r--drivers/scsi/scsi_dh.c437
-rw-r--r--drivers/scsi/scsi_error.c127
-rw-r--r--drivers/scsi/scsi_lib.c23
-rw-r--r--drivers/scsi/scsi_pm.c22
-rw-r--r--drivers/scsi/scsi_priv.h9
-rw-r--r--drivers/scsi/scsi_sysfs.c12
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c13
-rw-r--r--drivers/scsi/scsi_transport_sas.c10
-rw-r--r--drivers/scsi/scsi_transport_spi.c4
-rw-r--r--drivers/scsi/scsi_transport_srp.c3
-rw-r--r--drivers/scsi/sd.c10
-rw-r--r--drivers/scsi/st.c85
-rw-r--r--drivers/scsi/storvsc_drv.c224
-rw-r--r--drivers/scsi/sun3x_esp.c2
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/scsi/wd719x.c2
-rw-r--r--drivers/scsi/xen-scsifront.c10
-rw-r--r--drivers/sh/intc/chip.c6
-rw-r--r--drivers/sh/intc/core.c2
-rw-r--r--drivers/sh/intc/virq.c29
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/dove/Makefile1
-rw-r--r--drivers/soc/dove/pmu.c412
-rw-r--r--drivers/soc/mediatek/Kconfig19
-rw-r--r--drivers/soc/mediatek/Makefile2
-rw-r--r--drivers/soc/mediatek/mtk-infracfg.c91
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c1
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c488
-rw-r--r--drivers/soc/qcom/Kconfig31
-rw-r--r--drivers/soc/qcom/Makefile3
-rw-r--r--drivers/soc/qcom/smd-rpm.c244
-rw-r--r--drivers/soc/qcom/smd.c1327
-rw-r--r--drivers/soc/qcom/smem.c769
-rw-r--r--drivers/soc/tegra/Makefile6
-rw-r--r--drivers/soc/tegra/common.c2
-rw-r--r--drivers/soc/tegra/fuse/Makefile2
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c257
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra20.c175
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c232
-rw-r--r--drivers/soc/tegra/fuse/fuse.h95
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra114.c22
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra124.c26
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra20.c28
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra210.c184
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra30.c48
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c76
-rw-r--r--drivers/soc/tegra/pmc.c125
-rw-r--r--drivers/spi/Kconfig22
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-atmel.c1
-rw-r--r--drivers/spi/spi-bcm2835.c38
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c13
-rw-r--r--drivers/spi/spi-bitbang-txrx.h4
-rw-r--r--drivers/spi/spi-davinci.c50
-rw-r--r--drivers/spi/spi-dw-mmio.c3
-rw-r--r--drivers/spi/spi-dw.c4
-rw-r--r--drivers/spi/spi-dw.h35
-rw-r--r--drivers/spi/spi-fsl-espi.c89
-rw-r--r--drivers/spi/spi-fsl-lib.c19
-rw-r--r--drivers/spi/spi-fsl-lib.h3
-rw-r--r--drivers/spi/spi-fsl-spi.c43
-rw-r--r--drivers/spi/spi-img-spfi.c75
-rw-r--r--drivers/spi/spi-imx.c5
-rw-r--r--drivers/spi/spi-mpc512x-psc.c70
-rw-r--r--drivers/spi/spi-mt65xx.c726
-rw-r--r--drivers/spi/spi-omap2-mcspi.c10
-rw-r--r--drivers/spi/spi-orion.c54
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c1
-rw-r--r--drivers/spi/spi-pxa2xx.c66
-rw-r--r--drivers/spi/spi-pxa2xx.h5
-rw-r--r--drivers/spi/spi-rockchip.c1
-rw-r--r--drivers/spi/spi-rspi.c19
-rw-r--r--drivers/spi/spi-s3c24xx.c1
-rw-r--r--drivers/spi/spi-s3c64xx.c4
-rw-r--r--drivers/spi/spi-sh-msiof.c20
-rw-r--r--drivers/spi/spi-ti-qspi.c34
-rw-r--r--drivers/spi/spi-xcomm.c2
-rw-r--r--drivers/spi/spi-xilinx.c20
-rw-r--r--drivers/spi/spi-xlp.c456
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c1
-rw-r--r--drivers/spi/spi.c235
-rw-r--r--drivers/spi/spidev.c9
-rw-r--r--drivers/spmi/Kconfig2
-rw-r--r--drivers/spmi/spmi-pmic-arb.c30
-rw-r--r--drivers/spmi/spmi.c22
-rw-r--r--drivers/staging/Kconfig8
-rw-r--r--drivers/staging/Makefile4
-rw-r--r--drivers/staging/android/Kconfig3
-rw-r--r--drivers/staging/android/TODO10
-rw-r--r--drivers/staging/android/ashmem.c11
-rw-r--r--drivers/staging/android/ion/ion.c26
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c8
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c4
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c5
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c16
-rw-r--r--drivers/staging/android/ion/ion_test.c3
-rw-r--r--drivers/staging/android/sync.h10
-rw-r--r--drivers/staging/android/timed_gpio.c4
-rw-r--r--drivers/staging/board/Kconfig2
-rw-r--r--drivers/staging/board/armadillo800eva.c2
-rw-r--r--drivers/staging/board/board.c36
-rw-r--r--drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c1
-rw-r--r--drivers/staging/comedi/Kconfig2
-rw-r--r--drivers/staging/comedi/comedi_compat32.c3
-rw-r--r--drivers/staging/comedi/comedi_fops.c37
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c93
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c154
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c35
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3501.c60
-rw-r--r--drivers/staging/comedi/drivers/addi_tcw.h63
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7x3x.c16
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c6
-rw-r--r--drivers/staging/comedi/drivers/dac02.c6
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c4
-rw-r--r--drivers/staging/comedi/drivers/das16.c3
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c41
-rw-r--r--drivers/staging/comedi/drivers/das1800.c1
-rw-r--r--drivers/staging/comedi/drivers/dmm32at.c6
-rw-r--r--drivers/staging/comedi/drivers/fl512.c6
-rw-r--r--drivers/staging/comedi/drivers/ii_pci20kc.c1
-rw-r--r--drivers/staging/comedi/drivers/me4000.c1000
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c6
-rw-r--r--drivers/staging/comedi/drivers/ni_usb6501.c27
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c2
-rw-r--r--drivers/staging/comedi/drivers/s626.c6
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c10
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c214
-rw-r--r--drivers/staging/comedi/range.c13
-rw-r--r--drivers/staging/dgap/dgap.c45
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h6
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.h16
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c76
-rw-r--r--drivers/staging/fbtft/Kconfig9
-rw-r--r--drivers/staging/fbtft/Makefile1
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c350
-rw-r--r--drivers/staging/fbtft/fbtft-core.c17
-rw-r--r--drivers/staging/fbtft/fbtft.h57
-rw-r--r--drivers/staging/fbtft/fbtft_device.c31
-rw-r--r--drivers/staging/fbtft/flexfb.c258
-rw-r--r--drivers/staging/fsl-mc/README.txt364
-rw-r--r--drivers/staging/fsl-mc/TODO28
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000.h19
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_debug.c31
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_download.c24
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.c1
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.h4
-rw-r--r--drivers/staging/gdm72xx/usb_ids.h6
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c122
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c1
-rw-r--r--drivers/staging/iio/iio_dummy_evgen.c1
-rw-r--r--drivers/staging/iio/iio_simple_dummy.c2
-rw-r--r--drivers/staging/iio/iio_simple_dummy.h1
-rw-r--r--drivers/staging/iio/iio_simple_dummy_buffer.c2
-rw-r--r--drivers/staging/iio/iio_simple_dummy_events.c4
-rw-r--r--drivers/staging/iio/light/isl29018.c1
-rw-r--r--drivers/staging/iio/light/isl29028.c1
-rw-r--r--drivers/staging/iio/meter/ade7854.h4
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.c7
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c5
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h15
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h18
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h28
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_string.h2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c265
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h146
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c58
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c6
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c5
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h143
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c11
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h90
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c12
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c4
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c2
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c2
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_compat25.h119
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h12
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h313
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h133
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_export.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h18
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h2
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h17
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h22
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c2
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c3
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c17
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c8
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c1
-rw-r--r--drivers/staging/lustre/lustre/libcfs/debug.c149
-rw-r--r--drivers/staging/lustre/lustre/libcfs/fail.c2
-rw-r--r--drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c2
-rw-r--r--drivers/staging/lustre/lustre/libcfs/libcfs_string.c4
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c4
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-module.c6
-rw-r--r--drivers/staging/lustre/lustre/libcfs/module.c361
-rw-r--r--drivers/staging/lustre/lustre/libcfs/tracefile.c12
-rw-r--r--drivers/staging/lustre/lustre/libcfs/tracefile.h33
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c5
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_capa.c40
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h17
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c23
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c9
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c12
-rw-r--r--drivers/staging/lustre/lustre/llite/remote_perm.c17
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c5
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c27
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c2
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c2
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c8
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c13
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c18
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c2
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c6
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/acl.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c30
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c19
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c22
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c374
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_peer.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c17
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c24
-rw-r--r--drivers/staging/lustre/lustre/obdclass/uuid.c34
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c14
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_dev.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_config.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c18
-rw-r--r--drivers/staging/lustre/sysfs-fs-lustre103
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c20
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c10
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c2
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c63
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c2
-rw-r--r--drivers/staging/media/mn88472/mn88472.c1
-rw-r--r--drivers/staging/media/mn88473/mn88473.c1
-rw-r--r--drivers/staging/media/omap4iss/Kconfig2
-rw-r--r--drivers/staging/media/omap4iss/TODO1
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c73
-rw-r--r--drivers/staging/most/Documentation/ABI/sysfs-class-most.txt181
-rw-r--r--drivers/staging/most/Documentation/driver_usage.txt180
-rw-r--r--drivers/staging/most/Kconfig30
-rw-r--r--drivers/staging/most/Makefile8
-rw-r--r--drivers/staging/most/TODO8
-rw-r--r--drivers/staging/most/aim-cdev/Kconfig12
-rw-r--r--drivers/staging/most/aim-cdev/Makefile4
-rw-r--r--drivers/staging/most/aim-cdev/cdev.c528
-rw-r--r--drivers/staging/most/aim-network/Kconfig13
-rw-r--r--drivers/staging/most/aim-network/Makefile4
-rw-r--r--drivers/staging/most/aim-network/networking.c567
-rw-r--r--drivers/staging/most/aim-network/networking.h23
-rw-r--r--drivers/staging/most/aim-sound/Kconfig13
-rw-r--r--drivers/staging/most/aim-sound/Makefile4
-rw-r--r--drivers/staging/most/aim-sound/sound.c758
-rw-r--r--drivers/staging/most/aim-v4l2/Kconfig12
-rw-r--r--drivers/staging/most/aim-v4l2/Makefile6
-rw-r--r--drivers/staging/most/aim-v4l2/video.c635
-rw-r--r--drivers/staging/most/hdm-dim2/Kconfig16
-rw-r--r--drivers/staging/most/hdm-dim2/Makefile5
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_errors.h67
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hal.c919
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hal.h124
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hdm.c964
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hdm.h26
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_reg.h176
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_sysfs.c116
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_sysfs.h39
-rw-r--r--drivers/staging/most/hdm-i2c/Kconfig12
-rw-r--r--drivers/staging/most/hdm-i2c/Makefile3
-rw-r--r--drivers/staging/most/hdm-i2c/hdm_i2c.c451
-rw-r--r--drivers/staging/most/hdm-usb/Kconfig14
-rw-r--r--drivers/staging/most/hdm-usb/Makefile4
-rw-r--r--drivers/staging/most/hdm-usb/hdm_usb.c1454
-rw-r--r--drivers/staging/most/mostcore/Kconfig13
-rw-r--r--drivers/staging/most/mostcore/Makefile3
-rw-r--r--drivers/staging/most/mostcore/core.c1932
-rw-r--r--drivers/staging/most/mostcore/mostcore.h316
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c1
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.h4
-rw-r--r--drivers/staging/netlogic/platform_net.c2
-rw-r--r--drivers/staging/netlogic/xlr_net.h2
-rw-r--r--drivers/staging/nvec/nvec.h19
-rw-r--r--drivers/staging/octeon/ethernet-mdio.h2
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c7
-rw-r--r--drivers/staging/octeon/ethernet-rx.c133
-rw-r--r--drivers/staging/octeon/ethernet-tx.c29
-rw-r--r--drivers/staging/octeon/ethernet-util.h22
-rw-r--r--drivers/staging/octeon/ethernet.c8
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h22
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h2
-rw-r--r--drivers/staging/ozwpan/Kconfig9
-rw-r--r--drivers/staging/ozwpan/Makefile16
-rw-r--r--drivers/staging/ozwpan/README25
-rw-r--r--drivers/staging/ozwpan/TODO14
-rw-r--r--drivers/staging/ozwpan/ozappif.h36
-rw-r--r--drivers/staging/ozwpan/ozcdev.c554
-rw-r--r--drivers/staging/ozwpan/ozcdev.h17
-rw-r--r--drivers/staging/ozwpan/ozdbg.h54
-rw-r--r--drivers/staging/ozwpan/ozeltbuf.c252
-rw-r--r--drivers/staging/ozwpan/ozeltbuf.h65
-rw-r--r--drivers/staging/ozwpan/ozhcd.c2301
-rw-r--r--drivers/staging/ozwpan/ozhcd.h15
-rw-r--r--drivers/staging/ozwpan/ozmain.c71
-rw-r--r--drivers/staging/ozwpan/ozpd.c886
-rw-r--r--drivers/staging/ozwpan/ozpd.h134
-rw-r--r--drivers/staging/ozwpan/ozproto.c813
-rw-r--r--drivers/staging/ozwpan/ozproto.h62
-rw-r--r--drivers/staging/ozwpan/ozprotocol.h375
-rw-r--r--drivers/staging/ozwpan/ozurbparanoia.c54
-rw-r--r--drivers/staging/ozwpan/ozurbparanoia.h19
-rw-r--r--drivers/staging/ozwpan/ozusbif.h43
-rw-r--r--drivers/staging/ozwpan/ozusbsvc.c263
-rw-r--r--drivers/staging/ozwpan/ozusbsvc.h32
-rw-r--r--drivers/staging/ozwpan/ozusbsvc1.c471
-rw-r--r--drivers/staging/panel/panel.c12
-rw-r--r--drivers/staging/rdma/Kconfig31
-rw-r--r--drivers/staging/rdma/Makefile4
-rw-r--r--drivers/staging/rdma/amso1100/Kbuild (renamed from drivers/infiniband/hw/amso1100/Kbuild)0
-rw-r--r--drivers/staging/rdma/amso1100/Kconfig (renamed from drivers/infiniband/hw/amso1100/Kconfig)0
-rw-r--r--drivers/staging/rdma/amso1100/TODO4
-rw-r--r--drivers/staging/rdma/amso1100/c2.c (renamed from drivers/infiniband/hw/amso1100/c2.c)0
-rw-r--r--drivers/staging/rdma/amso1100/c2.h (renamed from drivers/infiniband/hw/amso1100/c2.h)0
-rw-r--r--drivers/staging/rdma/amso1100/c2_ae.c (renamed from drivers/infiniband/hw/amso1100/c2_ae.c)0
-rw-r--r--drivers/staging/rdma/amso1100/c2_ae.h (renamed from drivers/infiniband/hw/amso1100/c2_ae.h)0
-rw-r--r--drivers/staging/rdma/amso1100/c2_alloc.c (renamed from drivers/infiniband/hw/amso1100/c2_alloc.c)0
-rw-r--r--drivers/staging/rdma/amso1100/c2_cm.c (renamed from drivers/infiniband/hw/amso1100/c2_cm.c)0
-rw-r--r--drivers/staging/rdma/amso1100/c2_cq.c (renamed from drivers/infiniband/hw/amso1100/c2_cq.c)0
-rw-r--r--drivers/staging/rdma/amso1100/c2_intr.c (renamed from drivers/infiniband/hw/amso1100/c2_intr.c)0
-rw-r--r--drivers/staging/rdma/amso1100/c2_mm.c (renamed from drivers/infiniband/hw/amso1100/c2_mm.c)0
-rw-r--r--drivers/staging/rdma/amso1100/c2_mq.c (renamed from drivers/infiniband/hw/amso1100/c2_mq.c)0
-rw-r--r--drivers/staging/rdma/amso1100/c2_mq.h (renamed from drivers/infiniband/hw/amso1100/c2_mq.h)0
-rw-r--r--drivers/staging/rdma/amso1100/c2_pd.c (renamed from drivers/infiniband/hw/amso1100/c2_pd.c)0
-rw-r--r--drivers/staging/rdma/amso1100/c2_provider.c (renamed from drivers/infiniband/hw/amso1100/c2_provider.c)0
-rw-r--r--drivers/staging/rdma/amso1100/c2_provider.h (renamed from drivers/infiniband/hw/amso1100/c2_provider.h)0
-rw-r--r--drivers/staging/rdma/amso1100/c2_qp.c (renamed from drivers/infiniband/hw/amso1100/c2_qp.c)0
-rw-r--r--drivers/staging/rdma/amso1100/c2_rnic.c (renamed from drivers/infiniband/hw/amso1100/c2_rnic.c)0
-rw-r--r--drivers/staging/rdma/amso1100/c2_status.h (renamed from drivers/infiniband/hw/amso1100/c2_status.h)0
-rw-r--r--drivers/staging/rdma/amso1100/c2_user.h (renamed from drivers/infiniband/hw/amso1100/c2_user.h)0
-rw-r--r--drivers/staging/rdma/amso1100/c2_vq.c (renamed from drivers/infiniband/hw/amso1100/c2_vq.c)0
-rw-r--r--drivers/staging/rdma/amso1100/c2_vq.h (renamed from drivers/infiniband/hw/amso1100/c2_vq.h)0
-rw-r--r--drivers/staging/rdma/amso1100/c2_wr.h (renamed from drivers/infiniband/hw/amso1100/c2_wr.h)0
-rw-r--r--drivers/staging/rdma/hfi1/Kconfig37
-rw-r--r--drivers/staging/rdma/hfi1/Makefile19
-rw-r--r--drivers/staging/rdma/hfi1/TODO6
-rw-r--r--drivers/staging/rdma/hfi1/chip.c10798
-rw-r--r--drivers/staging/rdma/hfi1/chip.h1035
-rw-r--r--drivers/staging/rdma/hfi1/chip_registers.h1292
-rw-r--r--drivers/staging/rdma/hfi1/common.h415
-rw-r--r--drivers/staging/rdma/hfi1/cq.c558
-rw-r--r--drivers/staging/rdma/hfi1/debugfs.c899
-rw-r--r--drivers/staging/rdma/hfi1/debugfs.h78
-rw-r--r--drivers/staging/rdma/hfi1/device.c142
-rw-r--r--drivers/staging/rdma/hfi1/device.h61
-rw-r--r--drivers/staging/rdma/hfi1/diag.c1873
-rw-r--r--drivers/staging/rdma/hfi1/dma.c186
-rw-r--r--drivers/staging/rdma/hfi1/driver.c1241
-rw-r--r--drivers/staging/rdma/hfi1/eprom.c475
-rw-r--r--drivers/staging/rdma/hfi1/eprom.h55
-rw-r--r--drivers/staging/rdma/hfi1/file_ops.c2140
-rw-r--r--drivers/staging/rdma/hfi1/firmware.c1620
-rw-r--r--drivers/staging/rdma/hfi1/hfi.h1821
-rw-r--r--drivers/staging/rdma/hfi1/init.c1722
-rw-r--r--drivers/staging/rdma/hfi1/intr.c207
-rw-r--r--drivers/staging/rdma/hfi1/iowait.h186
-rw-r--r--drivers/staging/rdma/hfi1/keys.c411
-rw-r--r--drivers/staging/rdma/hfi1/mad.c4257
-rw-r--r--drivers/staging/rdma/hfi1/mad.h325
-rw-r--r--drivers/staging/rdma/hfi1/mmap.c192
-rw-r--r--drivers/staging/rdma/hfi1/mr.c551
-rw-r--r--drivers/staging/rdma/hfi1/opa_compat.h129
-rw-r--r--drivers/staging/rdma/hfi1/pcie.c1253
-rw-r--r--drivers/staging/rdma/hfi1/pio.c1771
-rw-r--r--drivers/staging/rdma/hfi1/pio.h224
-rw-r--r--drivers/staging/rdma/hfi1/pio_copy.c858
-rw-r--r--drivers/staging/rdma/hfi1/platform_config.h286
-rw-r--r--drivers/staging/rdma/hfi1/qp.c1687
-rw-r--r--drivers/staging/rdma/hfi1/qp.h235
-rw-r--r--drivers/staging/rdma/hfi1/qsfp.c546
-rw-r--r--drivers/staging/rdma/hfi1/qsfp.h222
-rw-r--r--drivers/staging/rdma/hfi1/rc.c2426
-rw-r--r--drivers/staging/rdma/hfi1/ruc.c948
-rw-r--r--drivers/staging/rdma/hfi1/sdma.c2962
-rw-r--r--drivers/staging/rdma/hfi1/sdma.h1123
-rw-r--r--drivers/staging/rdma/hfi1/srq.c397
-rw-r--r--drivers/staging/rdma/hfi1/sysfs.c739
-rw-r--r--drivers/staging/rdma/hfi1/trace.c221
-rw-r--r--drivers/staging/rdma/hfi1/trace.h1409
-rw-r--r--drivers/staging/rdma/hfi1/twsi.c518
-rw-r--r--drivers/staging/rdma/hfi1/twsi.h68
-rw-r--r--drivers/staging/rdma/hfi1/uc.c585
-rw-r--r--drivers/staging/rdma/hfi1/ud.c885
-rw-r--r--drivers/staging/rdma/hfi1/user_pages.c156
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.c1444
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.h89
-rw-r--r--drivers/staging/rdma/hfi1/verbs.c2143
-rw-r--r--drivers/staging/rdma/hfi1/verbs.h1151
-rw-r--r--drivers/staging/rdma/hfi1/verbs_mcast.c385
-rw-r--r--drivers/staging/rdma/ipath/Kconfig (renamed from drivers/infiniband/hw/ipath/Kconfig)4
-rw-r--r--drivers/staging/rdma/ipath/Makefile (renamed from drivers/infiniband/hw/ipath/Makefile)0
-rw-r--r--drivers/staging/rdma/ipath/TODO5
-rw-r--r--drivers/staging/rdma/ipath/ipath_common.h (renamed from drivers/infiniband/hw/ipath/ipath_common.h)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_cq.c (renamed from drivers/infiniband/hw/ipath/ipath_cq.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_debug.h (renamed from drivers/infiniband/hw/ipath/ipath_debug.h)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_diag.c (renamed from drivers/infiniband/hw/ipath/ipath_diag.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_dma.c (renamed from drivers/infiniband/hw/ipath/ipath_dma.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_driver.c (renamed from drivers/infiniband/hw/ipath/ipath_driver.c)6
-rw-r--r--drivers/staging/rdma/ipath/ipath_eeprom.c (renamed from drivers/infiniband/hw/ipath/ipath_eeprom.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_file_ops.c (renamed from drivers/infiniband/hw/ipath/ipath_file_ops.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_fs.c (renamed from drivers/infiniband/hw/ipath/ipath_fs.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_iba6110.c (renamed from drivers/infiniband/hw/ipath/ipath_iba6110.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_init_chip.c (renamed from drivers/infiniband/hw/ipath/ipath_init_chip.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_intr.c (renamed from drivers/infiniband/hw/ipath/ipath_intr.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_kernel.h (renamed from drivers/infiniband/hw/ipath/ipath_kernel.h)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_keys.c (renamed from drivers/infiniband/hw/ipath/ipath_keys.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_mad.c (renamed from drivers/infiniband/hw/ipath/ipath_mad.c)5
-rw-r--r--drivers/staging/rdma/ipath/ipath_mmap.c (renamed from drivers/infiniband/hw/ipath/ipath_mmap.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_mr.c (renamed from drivers/infiniband/hw/ipath/ipath_mr.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_qp.c (renamed from drivers/infiniband/hw/ipath/ipath_qp.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_rc.c (renamed from drivers/infiniband/hw/ipath/ipath_rc.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_registers.h (renamed from drivers/infiniband/hw/ipath/ipath_registers.h)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_ruc.c (renamed from drivers/infiniband/hw/ipath/ipath_ruc.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_sdma.c (renamed from drivers/infiniband/hw/ipath/ipath_sdma.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_srq.c (renamed from drivers/infiniband/hw/ipath/ipath_srq.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_stats.c (renamed from drivers/infiniband/hw/ipath/ipath_stats.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_sysfs.c (renamed from drivers/infiniband/hw/ipath/ipath_sysfs.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_uc.c (renamed from drivers/infiniband/hw/ipath/ipath_uc.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_ud.c (renamed from drivers/infiniband/hw/ipath/ipath_ud.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_user_pages.c (renamed from drivers/infiniband/hw/ipath/ipath_user_pages.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_user_sdma.c (renamed from drivers/infiniband/hw/ipath/ipath_user_sdma.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_user_sdma.h (renamed from drivers/infiniband/hw/ipath/ipath_user_sdma.h)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_verbs.c (renamed from drivers/infiniband/hw/ipath/ipath_verbs.c)5
-rw-r--r--drivers/staging/rdma/ipath/ipath_verbs.h (renamed from drivers/infiniband/hw/ipath/ipath_verbs.h)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_verbs_mcast.c (renamed from drivers/infiniband/hw/ipath/ipath_verbs_mcast.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_wc_ppc64.c (renamed from drivers/infiniband/hw/ipath/ipath_wc_ppc64.c)0
-rw-r--r--drivers/staging/rdma/ipath/ipath_wc_x86_64.c (renamed from drivers/infiniband/hw/ipath/ipath_wc_x86_64.c)0
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c10
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c43
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c10
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c8
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c44
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c36
-rw-r--r--drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c8
-rw-r--r--drivers/staging/rtl8188eu/hal/bb_cfg.c6
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_com.c27
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_intf.c32
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c11
-rw-r--r--drivers/staging/rtl8188eu/hal/rf.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rf_cfg.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c21
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c33
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c99
-rw-r--r--drivers/staging/rtl8188eu/include/HalVerDef.h84
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h16
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211.h54
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h2
-rw-r--r--drivers/staging/rtl8188eu/include/recv_osdep.h3
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_cmd.h1
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h8
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h3
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h8
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_pwrctrl.h1
-rw-r--r--drivers/staging/rtl8188eu/include/sta_info.h19
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h7
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c22
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c16
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c79
-rw-r--r--drivers/staging/rtl8192e/dot11d.c39
-rw-r--r--drivers/staging/rtl8192e/dot11d.h6
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_def.h46
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c192
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h11
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c11
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h5
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c440
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h58
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c20
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h6
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h8
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c448
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h66
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c125
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.h16
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c615
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h112
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c472
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.h44
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c130
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c24
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.h4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c53
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.h19
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c132
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BA.h5
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c5
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HT.h2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c5
-rw-r--r--drivers/staging/rtl8192e/rtl819x_Qos.h5
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TS.h2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib.h439
-rw-r--r--drivers/staging/rtl8192e/rtllib_debug.h8
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c91
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c30
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c9
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h345
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c17
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c16
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c4
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.h11
-rw-r--r--drivers/staging/rtl8192u/r8192U.h2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c14
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c8
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.h36
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.h2
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.h8
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c15
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.h55
-rw-r--r--drivers/staging/rtl8712/ieee80211.c25
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c28
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h18
-rw-r--r--drivers/staging/rtl8712/rtl871x_event.h2
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl.h28
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c34
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c47
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.h2
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c35
-rw-r--r--drivers/staging/rtl8712/wlan_bssdef.h42
-rw-r--r--drivers/staging/rtl8723au/core/rtw_recv.c3
-rw-r--r--drivers/staging/rtl8723au/core/rtw_security.c24
-rw-r--r--drivers/staging/rtl8723au/hal/odm.c2
-rw-r--r--drivers/staging/rtl8723au/hal/odm_RegConfig8723A.c2
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c4
-rw-r--r--drivers/staging/rts5208/ms.c5
-rw-r--r--drivers/staging/rts5208/sd.c21
-rw-r--r--drivers/staging/slicoss/slicoss.c1
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c15
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.h12
-rw-r--r--drivers/staging/sm750fb/ddk750_display.c170
-rw-r--r--drivers/staging/sm750fb/ddk750_display.h11
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.c75
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.h3
-rw-r--r--drivers/staging/sm750fb/ddk750_help.c8
-rw-r--r--drivers/staging/sm750fb/ddk750_help.h4
-rw-r--r--drivers/staging/sm750fb/ddk750_hwi2c.c244
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.c164
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.h52
-rw-r--r--drivers/staging/sm750fb/ddk750_power.c261
-rw-r--r--drivers/staging/sm750fb/ddk750_power.h11
-rw-r--r--drivers/staging/sm750fb/ddk750_reg.h18
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c389
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.h31
-rw-r--r--drivers/staging/sm750fb/sm750.c160
-rw-r--r--drivers/staging/sm750fb/sm750.h72
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c381
-rw-r--r--drivers/staging/sm750fb/sm750_accel.h4
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.c55
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.h4
-rw-r--r--drivers/staging/sm750fb/sm750_help.h28
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c414
-rw-r--r--drivers/staging/sm750fb/sm750_hw.h48
-rw-r--r--drivers/staging/sm7xxfb/Kconfig13
-rw-r--r--drivers/staging/sm7xxfb/Makefile1
-rw-r--r--drivers/staging/sm7xxfb/TODO12
-rw-r--r--drivers/staging/speakup/buffers.c3
-rw-r--r--drivers/staging/speakup/i18n.c3
-rw-r--r--drivers/staging/speakup/i18n.h12
-rw-r--r--drivers/staging/speakup/keyhelp.c2
-rw-r--r--drivers/staging/speakup/kobjects.c3
-rw-r--r--drivers/staging/speakup/main.c15
-rw-r--r--drivers/staging/speakup/selection.c3
-rw-r--r--drivers/staging/speakup/serialio.c10
-rw-r--r--drivers/staging/speakup/speakup.h68
-rw-r--r--drivers/staging/speakup/speakup_acnt.h8
-rw-r--r--drivers/staging/speakup/speakup_decpc.c6
-rw-r--r--drivers/staging/speakup/speakup_dtlk.h52
-rw-r--r--drivers/staging/speakup/speakup_soft.c1
-rw-r--r--drivers/staging/speakup/thread.c3
-rw-r--r--drivers/staging/speakup/varhandlers.c3
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c1
-rw-r--r--drivers/staging/unisys/Kconfig2
-rw-r--r--drivers/staging/unisys/include/channel_guid.h24
-rw-r--r--drivers/staging/unisys/include/visorbus.h5
-rw-r--r--drivers/staging/unisys/visorbus/controlvmchannel.h9
-rw-r--r--drivers/staging/unisys/visorbus/controlvmcompletionstatus.h9
-rw-r--r--drivers/staging/unisys/visorbus/iovmcall_gnuc.h9
-rw-r--r--drivers/staging/unisys/visorbus/periodic_work.c9
-rw-r--r--drivers/staging/unisys/visorbus/vbuschannel.h9
-rw-r--r--drivers/staging/unisys/visorbus/vbusdeviceinfo.h9
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c109
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_private.h9
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c59
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c39
-rw-r--r--drivers/staging/unisys/visorbus/vmcallinterface.h9
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c1122
-rw-r--r--drivers/staging/vme/devices/vme_pio2_core.c18
-rw-r--r--drivers/staging/vme/devices/vme_user.c168
-rw-r--r--drivers/staging/vt6655/baseband.c6
-rw-r--r--drivers/staging/vt6655/card.c15
-rw-r--r--drivers/staging/vt6655/desc.h146
-rw-r--r--drivers/staging/vt6655/device.h13
-rw-r--r--drivers/staging/vt6655/device_cfg.h15
-rw-r--r--drivers/staging/vt6655/device_main.c153
-rw-r--r--drivers/staging/vt6655/dpc.c2
-rw-r--r--drivers/staging/vt6655/mac.c18
-rw-r--r--drivers/staging/vt6655/power.c16
-rw-r--r--drivers/staging/vt6655/rf.c532
-rw-r--r--drivers/staging/vt6655/rf.h24
-rw-r--r--drivers/staging/vt6655/rxtx.c23
-rw-r--r--drivers/staging/vt6655/rxtx.h4
-rw-r--r--drivers/staging/vt6655/upc.h36
-rw-r--r--drivers/staging/vt6656/main_usb.c2
-rw-r--r--drivers/staging/vt6656/rxtx.c7
-rw-r--r--drivers/staging/wilc1000/Kconfig28
-rw-r--r--drivers/staging/wilc1000/Makefile8
-rw-r--r--drivers/staging/wilc1000/coreconfigsimulator.h17
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.c225
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.h37
-rw-r--r--drivers/staging/wilc1000/fifo_buffer.c133
-rw-r--r--drivers/staging/wilc1000/fifo_buffer.h26
-rw-r--r--drivers/staging/wilc1000/host_interface.c1433
-rw-r--r--drivers/staging/wilc1000/host_interface.h115
-rw-r--r--drivers/staging/wilc1000/linux_mon.c25
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c248
-rw-r--r--drivers/staging/wilc1000/linux_wlan_common.h4
-rw-r--r--drivers/staging/wilc1000/linux_wlan_sdio.c1
-rw-r--r--drivers/staging/wilc1000/wilc_debugfs.c30
-rw-r--r--drivers/staging/wilc1000/wilc_exported_buf.c13
-rw-r--r--drivers/staging/wilc1000/wilc_log.h47
-rw-r--r--drivers/staging/wilc1000/wilc_memory.c46
-rw-r--r--drivers/staging/wilc1000/wilc_memory.h173
-rw-r--r--drivers/staging/wilc1000/wilc_msgqueue.c28
-rw-r--r--drivers/staging/wilc1000/wilc_msgqueue.h35
-rw-r--r--drivers/staging/wilc1000/wilc_osconfig.h9
-rw-r--r--drivers/staging/wilc1000/wilc_oswrapper.h12
-rw-r--r--drivers/staging/wilc1000/wilc_platform.h8
-rw-r--r--drivers/staging/wilc1000/wilc_sdio.c288
-rw-r--r--drivers/staging/wilc1000/wilc_sleep.c18
-rw-r--r--drivers/staging/wilc1000/wilc_sleep.h20
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c15
-rw-r--r--drivers/staging/wilc1000/wilc_strutils.c80
-rw-r--r--drivers/staging/wilc1000/wilc_strutils.h134
-rw-r--r--drivers/staging/wilc1000/wilc_timer.c45
-rw-r--r--drivers/staging/wilc1000/wilc_timer.h129
-rw-r--r--drivers/staging/wilc1000/wilc_type.h34
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c321
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.h2
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.c951
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h16
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c53
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.h2
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.c16
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h6
-rw-r--r--drivers/staging/xgifb/Makefile2
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c12
-rw-r--r--drivers/staging/xgifb/vb_init.h4
-rw-r--r--drivers/staging/xgifb/vb_setmode.h34
-rw-r--r--drivers/staging/xgifb/vb_util.c42
-rw-r--r--drivers/staging/xgifb/vb_util.h44
-rw-r--r--drivers/target/iscsi/iscsi_target.c138
-rw-r--r--drivers/target/iscsi/iscsi_target.h6
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c34
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c10
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c123
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h7
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c41
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c38
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c39
-rw-r--r--drivers/target/loopback/tcm_loop.c22
-rw-r--r--drivers/target/target_core_configfs.c49
-rw-r--r--drivers/target/target_core_device.c11
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_hba.c15
-rw-r--r--drivers/target/target_core_iblock.c21
-rw-r--r--drivers/target/target_core_pr.c2
-rw-r--r--drivers/target/target_core_pscsi.c6
-rw-r--r--drivers/target/target_core_rd.c45
-rw-r--r--drivers/target/target_core_sbc.c49
-rw-r--r--drivers/target/target_core_spc.c108
-rw-r--r--drivers/target/target_core_tpg.c17
-rw-r--r--drivers/target/target_core_transport.c507
-rw-r--r--drivers/target/target_core_user.c14
-rw-r--r--drivers/target/target_core_xcopy.c6
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c2
-rw-r--r--drivers/thermal/Kconfig8
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/armada_thermal.c2
-rw-r--r--drivers/thermal/cpu_cooling.c73
-rw-r--r--drivers/thermal/db8500_thermal.c7
-rw-r--r--drivers/thermal/dove_thermal.c2
-rw-r--r--drivers/thermal/fair_share.c2
-rw-r--r--drivers/thermal/gov_bang_bang.c5
-rw-r--r--drivers/thermal/hisi_thermal.c5
-rw-r--r--drivers/thermal/imx_thermal.c27
-rw-r--r--drivers/thermal/int340x_thermal/int3400_thermal.c2
-rw-r--r--drivers/thermal/int340x_thermal/int340x_thermal_zone.c10
-rw-r--r--drivers/thermal/int340x_thermal/int340x_thermal_zone.h8
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c4
-rw-r--r--drivers/thermal/intel_pch_thermal.c283
-rw-r--r--drivers/thermal/intel_powerclamp.c7
-rw-r--r--drivers/thermal/intel_quark_dts_thermal.c13
-rw-r--r--drivers/thermal/intel_soc_dts_iosf.c8
-rw-r--r--drivers/thermal/kirkwood_thermal.c2
-rw-r--r--drivers/thermal/of-thermal.c14
-rw-r--r--drivers/thermal/power_allocator.c51
-rw-r--r--drivers/thermal/qcom-spmi-temp-alarm.c2
-rw-r--r--drivers/thermal/rcar_thermal.c7
-rw-r--r--drivers/thermal/rockchip_thermal.c10
-rw-r--r--drivers/thermal/samsung/Kconfig2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c28
-rw-r--r--drivers/thermal/spear_thermal.c2
-rw-r--r--drivers/thermal/st/st_thermal.c7
-rw-r--r--drivers/thermal/step_wise.c4
-rw-r--r--drivers/thermal/tegra_soctherm.c4
-rw-r--r--drivers/thermal/thermal_core.c110
-rw-r--r--drivers/thermal/thermal_hwmon.c10
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c10
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c10
-rw-r--r--drivers/tty/hvc/hvc_xen.c18
-rw-r--r--drivers/tty/hvc/hvsi.c46
-rw-r--r--drivers/tty/mips_ejtag_fdc.c9
-rw-r--r--drivers/tty/n_gsm.c2
-rw-r--r--drivers/tty/n_tty.c31
-rw-r--r--drivers/tty/pty.c8
-rw-r--r--drivers/tty/serial/8250/8250.h17
-rw-r--r--drivers/tty/serial/8250/8250_core.c2880
-rw-r--r--drivers/tty/serial/8250/8250_dw.c27
-rw-r--r--drivers/tty/serial/8250/8250_early.c4
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c172
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c1
-rw-r--r--drivers/tty/serial/8250/8250_omap.c147
-rw-r--r--drivers/tty/serial/8250/8250_pci.c164
-rw-r--r--drivers/tty/serial/8250/8250_pnp.c11
-rw-r--r--drivers/tty/serial/8250/8250_port.c2912
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c1
-rw-r--r--drivers/tty/serial/8250/Makefile5
-rw-r--r--drivers/tty/serial/Kconfig8
-rw-r--r--drivers/tty/serial/amba-pl011.c4
-rw-r--r--drivers/tty/serial/atmel_serial.c484
-rw-r--r--drivers/tty/serial/etraxfs-uart.c61
-rw-r--r--drivers/tty/serial/imx.c218
-rw-r--r--drivers/tty/serial/lantiq.c8
-rw-r--r--drivers/tty/serial/men_z135_uart.c10
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c5
-rw-r--r--drivers/tty/serial/mxs-auart.c58
-rw-r--r--drivers/tty/serial/samsung.c48
-rw-r--r--drivers/tty/serial/samsung.h1
-rw-r--r--drivers/tty/serial/sc16is7xx.c139
-rw-r--r--drivers/tty/serial/serial_core.c28
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c126
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h5
-rw-r--r--drivers/tty/serial/sn_console.c32
-rw-r--r--drivers/tty/serial/stm32-usart.c2
-rw-r--r--drivers/tty/serial/suncore.c11
-rw-r--r--drivers/tty/serial/sunhv.c13
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/sysrq.c11
-rw-r--r--drivers/tty/tty_buffer.c12
-rw-r--r--drivers/tty/tty_io.c108
-rw-r--r--drivers/tty/tty_ioctl.c11
-rw-r--r--drivers/tty/tty_ldisc.c15
-rw-r--r--drivers/tty/vt/selection.c1
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/uio/Kconfig2
-rw-r--r--drivers/uio/uio.c1
-rw-r--r--drivers/uio/uio_fsl_elbc_gpcm.c14
-rw-r--r--drivers/usb/atm/cxacru.c7
-rw-r--r--drivers/usb/chipidea/bits.h12
-rw-r--r--drivers/usb/chipidea/ci.h9
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c17
-rw-r--r--drivers/usb/chipidea/core.c141
-rw-r--r--drivers/usb/chipidea/debug.c7
-rw-r--r--drivers/usb/chipidea/host.c33
-rw-r--r--drivers/usb/chipidea/host.h6
-rw-r--r--drivers/usb/chipidea/otg_fsm.c1
-rw-r--r--drivers/usb/chipidea/udc.c30
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c12
-rw-r--r--drivers/usb/class/cdc-acm.c1
-rw-r--r--drivers/usb/class/usblp.c66
-rw-r--r--drivers/usb/common/common.c56
-rw-r--r--drivers/usb/common/ulpi.c2
-rw-r--r--drivers/usb/core/devio.c19
-rw-r--r--drivers/usb/core/driver.c1
-rw-r--r--drivers/usb/core/endpoint.c2
-rw-r--r--drivers/usb/core/hcd.c26
-rw-r--r--drivers/usb/core/hub.c72
-rw-r--r--drivers/usb/core/otg_whitelist.h2
-rw-r--r--drivers/usb/core/sysfs.c31
-rw-r--r--drivers/usb/core/usb.h1
-rw-r--r--drivers/usb/dwc2/core.c57
-rw-r--r--drivers/usb/dwc2/core.h9
-rw-r--r--drivers/usb/dwc2/gadget.c15
-rw-r--r--drivers/usb/dwc2/hcd.c55
-rw-r--r--drivers/usb/dwc2/hcd.h5
-rw-r--r--drivers/usb/dwc2/hcd_queue.c49
-rw-r--r--drivers/usb/dwc3/Kconfig7
-rw-r--r--drivers/usb/dwc3/Makefile2
-rw-r--r--drivers/usb/dwc3/core.c8
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c2
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c2
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c75
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c26
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c4
-rw-r--r--drivers/usb/dwc3/dwc3-st.c4
-rw-r--r--drivers/usb/dwc3/ep0.c96
-rw-r--r--drivers/usb/dwc3/gadget.c57
-rw-r--r--drivers/usb/gadget/composite.c52
-rw-r--r--drivers/usb/gadget/config.c56
-rw-r--r--drivers/usb/gadget/configfs.c31
-rw-r--r--drivers/usb/gadget/epautoconf.c282
-rw-r--r--drivers/usb/gadget/function/f_acm.c1
-rw-r--r--drivers/usb/gadget/function/f_ecm.c4
-rw-r--r--drivers/usb/gadget/function/f_fs.c14
-rw-r--r--drivers/usb/gadget/function/f_hid.c4
-rw-r--r--drivers/usb/gadget/function/f_loopback.c5
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c156
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.h6
-rw-r--r--drivers/usb/gadget/function/f_midi.c8
-rw-r--r--drivers/usb/gadget/function/f_ncm.c5
-rw-r--r--drivers/usb/gadget/function/f_obex.c22
-rw-r--r--drivers/usb/gadget/function/f_printer.c17
-rw-r--r--drivers/usb/gadget/function/f_serial.c1
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c6
-rw-r--r--drivers/usb/gadget/function/f_uac2.c35
-rw-r--r--drivers/usb/gadget/function/f_uvc.c7
-rw-r--r--drivers/usb/gadget/function/storage_common.h2
-rw-r--r--drivers/usb/gadget/function/u_ether.h4
-rw-r--r--drivers/usb/gadget/function/u_uac1.h2
-rw-r--r--drivers/usb/gadget/legacy/Kconfig2
-rw-r--r--drivers/usb/gadget/legacy/acm_ms.c41
-rw-r--r--drivers/usb/gadget/legacy/audio.c41
-rw-r--r--drivers/usb/gadget/legacy/cdc2.c35
-rw-r--r--drivers/usb/gadget/legacy/dbgp.c10
-rw-r--r--drivers/usb/gadget/legacy/ether.c36
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c32
-rw-r--r--drivers/usb/gadget/legacy/gmidi.c8
-rw-r--r--drivers/usb/gadget/legacy/hid.c37
-rw-r--r--drivers/usb/gadget/legacy/mass_storage.c41
-rw-r--r--drivers/usb/gadget/legacy/multi.c43
-rw-r--r--drivers/usb/gadget/legacy/ncm.c34
-rw-r--r--drivers/usb/gadget/legacy/nokia.c105
-rw-r--r--drivers/usb/gadget/legacy/printer.c51
-rw-r--r--drivers/usb/gadget/legacy/serial.c38
-rw-r--r--drivers/usb/gadget/legacy/zero.c41
-rw-r--r--drivers/usb/gadget/udc/amd5536udc.c88
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c39
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c16
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c29
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc.h2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c11
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c95
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c32
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c11
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c13
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c11
-rw-r--r--drivers/usb/gadget/udc/gadget_chips.h55
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c38
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c11
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c32
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c13
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c9
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c11
-rw-r--r--drivers/usb/gadget/udc/net2272.c15
-rw-r--r--drivers/usb/gadget/udc/net2280.c95
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c22
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c52
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c30
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c3
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.h40
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c10
-rw-r--r--drivers/usb/gadget/udc/s3c-hsudc.c15
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c10
-rw-r--r--drivers/usb/gadget/udc/udc-core.c105
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c9
-rw-r--r--drivers/usb/host/Kconfig17
-rw-r--r--drivers/usb/host/bcma-hcd.c128
-rw-r--r--drivers/usb/host/ehci-fsl.c53
-rw-r--r--drivers/usb/host/ehci-fsl.h1
-rw-r--r--drivers/usb/host/ehci-hub.c7
-rw-r--r--drivers/usb/host/ehci-platform.c13
-rw-r--r--drivers/usb/host/ehci-st.c7
-rw-r--r--drivers/usb/host/ehci-sysfs.c8
-rw-r--r--drivers/usb/host/ehci.h12
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c29
-rw-r--r--drivers/usb/host/ohci-at91.c179
-rw-r--r--drivers/usb/host/ohci-q.c7
-rw-r--r--drivers/usb/host/ohci-tmio.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c7
-rw-r--r--drivers/usb/host/u132-hcd.c35
-rw-r--r--drivers/usb/host/xhci-dbg.c4
-rw-r--r--drivers/usb/host/xhci-hub.c22
-rw-r--r--drivers/usb/host/xhci-mem.c5
-rw-r--r--drivers/usb/host/xhci-pci.c57
-rw-r--r--drivers/usb/host/xhci-ring.c306
-rw-r--r--drivers/usb/host/xhci.c16
-rw-r--r--drivers/usb/host/xhci.h14
-rw-r--r--drivers/usb/isp1760/isp1760-udc.c15
-rw-r--r--drivers/usb/misc/ftdi-elan.c12
-rw-r--r--drivers/usb/misc/usbtest.c7
-rw-r--r--drivers/usb/musb/Kconfig51
-rw-r--r--drivers/usb/musb/Makefile1
-rw-r--r--drivers/usb/musb/musb_cppi41.c6
-rw-r--r--drivers/usb/musb/musb_dsps.c6
-rw-r--r--drivers/usb/musb/musb_gadget.c87
-rw-r--r--drivers/usb/musb/musb_virthub.c4
-rw-r--r--drivers/usb/musb/sunxi.c756
-rw-r--r--drivers/usb/phy/Kconfig14
-rw-r--r--drivers/usb/phy/Makefile1
-rw-r--r--drivers/usb/phy/phy-generic.c6
-rw-r--r--drivers/usb/phy/phy-keystone.c6
-rw-r--r--drivers/usb/phy/phy-msm-usb.c67
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c9
-rw-r--r--drivers/usb/phy/phy-omap-otg.c22
-rw-r--r--drivers/usb/phy/phy-qcom-8x16-usb.c436
-rw-r--r--drivers/usb/phy/phy-tahvo.c27
-rw-r--r--drivers/usb/renesas_usbhs/common.c2
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c68
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h10
-rw-r--r--drivers/usb/serial/io_ti.c279
-rw-r--r--drivers/usb/serial/mos7720.c253
-rw-r--r--drivers/usb/serial/mxuport.c10
-rw-r--r--drivers/usb/serial/option.c5
-rw-r--r--drivers/usb/serial/pl2303.c35
-rw-r--r--drivers/usb/serial/qcserial.c3
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/serial/symbolserial.c24
-rw-r--r--drivers/usb/serial/usb-serial.c1
-rw-r--r--drivers/usb/serial/usb_wwan.c2
-rw-r--r--drivers/usb/storage/transport.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h23
-rw-r--r--drivers/vfio/vfio.c91
-rw-r--r--drivers/vhost/scsi.c4
-rw-r--r--drivers/vhost/vhost.c65
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/backlight/Kconfig7
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/lp855x_bl.c23
-rw-r--r--drivers/video/backlight/lp8788_bl.c3
-rw-r--r--drivers/video/backlight/pm8941-wled.c (renamed from drivers/leds/leds-pm8941-wled.c)60
-rw-r--r--drivers/video/backlight/sky81452-backlight.c26
-rw-r--r--drivers/video/backlight/tosa_bl.c1
-rw-r--r--drivers/video/console/Kconfig2
-rw-r--r--drivers/video/console/fbcon.c3
-rw-r--r--drivers/video/fbdev/Kconfig18
-rw-r--r--drivers/video/fbdev/Makefile1
-rw-r--r--drivers/video/fbdev/arkfb.c36
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c3
-rw-r--r--drivers/video/fbdev/aty/atyfb.h5
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c109
-rw-r--r--drivers/video/fbdev/core/fbmon.c4
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c2
-rw-r--r--drivers/video/fbdev/core/modedb.c2
-rw-r--r--drivers/video/fbdev/ep93xx-fb.c30
-rw-r--r--drivers/video/fbdev/gxt4500.c2
-rw-r--r--drivers/video/fbdev/hyperv_fb.c46
-rw-r--r--drivers/video/fbdev/i740fb.c35
-rw-r--r--drivers/video/fbdev/kyro/fbdev.c33
-rw-r--r--drivers/video/fbdev/ocfb.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/encoder-opa362.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/dss-of.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/video/fbdev/pxa168fb.c14
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c4
-rw-r--r--drivers/video/fbdev/pxafb.c1
-rw-r--r--drivers/video/fbdev/s1d13xxxfb.c3
-rw-r--r--drivers/video/fbdev/s3c-fb.c2
-rw-r--r--drivers/video/fbdev/s3fb.c35
-rw-r--r--drivers/video/fbdev/sa1100fb.c1
-rw-r--r--drivers/video/fbdev/simplefb.c1
-rw-r--r--drivers/video/fbdev/sm712.h (renamed from drivers/staging/sm7xxfb/sm7xx.h)39
-rw-r--r--drivers/video/fbdev/sm712fb.c (renamed from drivers/staging/sm7xxfb/sm7xxfb.c)71
-rw-r--r--drivers/video/fbdev/ssd1307fb.c6
-rw-r--r--drivers/video/fbdev/stifb.c41
-rw-r--r--drivers/video/fbdev/udlfb.c10
-rw-r--r--drivers/video/fbdev/vfb.c17
-rw-r--r--drivers/video/fbdev/vt8623fb.c31
-rw-r--r--drivers/video/fbdev/xen-fbfront.c20
-rw-r--r--drivers/video/of_videomode.c4
-rw-r--r--drivers/virtio/virtio_balloon.c16
-rw-r--r--drivers/virtio/virtio_input.c4
-rw-r--r--drivers/virtio/virtio_mmio.c10
-rw-r--r--drivers/w1/masters/ds2482.c1
-rw-r--r--drivers/w1/masters/matrox_w1.c16
-rw-r--r--drivers/watchdog/Kconfig25
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c7
-rw-r--r--drivers/watchdog/at91sam9_wdt.c22
-rw-r--r--drivers/watchdog/at91sam9_wdt.h2
-rw-r--r--drivers/watchdog/bcm2835_wdt.c1
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c1
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c1
-rw-r--r--drivers/watchdog/booke_wdt.c4
-rw-r--r--drivers/watchdog/coh901327_wdt.c1
-rw-r--r--drivers/watchdog/da9052_wdt.c1
-rw-r--r--drivers/watchdog/da9055_wdt.c1
-rw-r--r--drivers/watchdog/da9062_wdt.c1
-rw-r--r--drivers/watchdog/da9063_wdt.c1
-rw-r--r--drivers/watchdog/davinci_wdt.c1
-rw-r--r--drivers/watchdog/digicolor_wdt.c1
-rw-r--r--drivers/watchdog/ep93xx_wdt.c1
-rw-r--r--drivers/watchdog/gpio_wdt.c65
-rw-r--r--drivers/watchdog/iTCO_wdt.c82
-rw-r--r--drivers/watchdog/ie6xx_wdt.c1
-rw-r--r--drivers/watchdog/imgpdc_wdt.c1
-rw-r--r--drivers/watchdog/intel-mid_wdt.c1
-rw-r--r--drivers/watchdog/jz4740_wdt.c1
-rw-r--r--drivers/watchdog/ks8695_wdt.c9
-rw-r--r--drivers/watchdog/lpc18xx_wdt.c340
-rw-r--r--drivers/watchdog/mena21_wdt.c1
-rw-r--r--drivers/watchdog/menf21bmc_wdt.c1
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c156
-rw-r--r--drivers/watchdog/mtk_wdt.c39
-rw-r--r--drivers/watchdog/nv_tco.c2
-rw-r--r--drivers/watchdog/omap_wdt.c1
-rw-r--r--drivers/watchdog/orion_wdt.c1
-rw-r--r--drivers/watchdog/pnx4008_wdt.c1
-rw-r--r--drivers/watchdog/qcom-wdt.c1
-rw-r--r--drivers/watchdog/retu_wdt.c1
-rw-r--r--drivers/watchdog/rt2880_wdt.c1
-rw-r--r--drivers/watchdog/s3c2410_wdt.c1
-rw-r--r--drivers/watchdog/sama5d4_wdt.c280
-rw-r--r--drivers/watchdog/shwdt.c1
-rw-r--r--drivers/watchdog/sirfsoc_wdt.c1
-rw-r--r--drivers/watchdog/sp805_wdt.c5
-rw-r--r--drivers/watchdog/st_lpc_wdt.c1
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c1
-rw-r--r--drivers/watchdog/sunxi_wdt.c2
-rw-r--r--drivers/watchdog/tegra_wdt.c1
-rw-r--r--drivers/watchdog/ts72xx_wdt.c3
-rw-r--r--drivers/watchdog/twl4030_wdt.c1
-rw-r--r--drivers/watchdog/txx9wdt.c1
-rw-r--r--drivers/watchdog/ux500_wdt.c1
-rw-r--r--drivers/watchdog/via_wdt.c1
-rw-r--r--drivers/watchdog/wm831x_wdt.c1
-rw-r--r--drivers/watchdog/wm8350_wdt.c1
-rw-r--r--drivers/xen/Kconfig11
-rw-r--r--drivers/xen/balloon.c23
-rw-r--r--drivers/xen/biomerge.c6
-rw-r--r--drivers/xen/events/events_base.c12
-rw-r--r--drivers/xen/events/events_fifo.c4
-rw-r--r--drivers/xen/gntalloc.c5
-rw-r--r--drivers/xen/gntdev.c4
-rw-r--r--drivers/xen/manage.c2
-rw-r--r--drivers/xen/preempt.c2
-rw-r--r--drivers/xen/privcmd.c48
-rw-r--r--drivers/xen/swiotlb-xen.c22
-rw-r--r--drivers/xen/sys-hypervisor.c136
-rw-r--r--drivers/xen/tmem.c24
-rw-r--r--drivers/xen/xen-acpi-processor.c16
-rw-r--r--drivers/xen/xenbus/xenbus_client.c6
-rw-r--r--drivers/xen/xenbus/xenbus_dev_backend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c16
-rw-r--r--drivers/xen/xenfs/Makefile1
-rw-r--r--drivers/xen/xenfs/super.c3
-rw-r--r--drivers/xen/xenfs/xenfs.h1
-rw-r--r--drivers/xen/xenfs/xensyms.c152
-rw-r--r--drivers/xen/xlate_mmu.c18
5390 files changed, 390614 insertions, 149924 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 6e973b8e3a3b..46b4a8e0f859 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -176,6 +176,8 @@ source "drivers/powercap/Kconfig"
source "drivers/mcb/Kconfig"
+source "drivers/perf/Kconfig"
+
source "drivers/ras/Kconfig"
source "drivers/thunderbolt/Kconfig"
@@ -184,4 +186,6 @@ source "drivers/android/Kconfig"
source "drivers/nvdimm/Kconfig"
+source "drivers/nvmem/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index b64b49f6e01b..b250b36b54f2 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -11,7 +11,7 @@ obj-y += bus/
obj-$(CONFIG_GENERIC_PHY) += phy/
# GPIO must come after pinctrl as gpios may need to mux pins etc
-obj-y += pinctrl/
+obj-$(CONFIG_PINCTRL) += pinctrl/
obj-y += gpio/
obj-y += pwm/
obj-$(CONFIG_PCI) += pci/
@@ -161,7 +161,9 @@ obj-$(CONFIG_NTB) += ntb/
obj-$(CONFIG_FMC) += fmc/
obj-$(CONFIG_POWERCAP) += powercap/
obj-$(CONFIG_MCB) += mcb/
+obj-$(CONFIG_PERF_EVENTS) += perf/
obj-$(CONFIG_RAS) += ras/
obj-$(CONFIG_THUNDERBOLT) += thunderbolt/
obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/
obj-$(CONFIG_ANDROID) += android/
+obj-$(CONFIG_NVMEM) += nvmem/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 114cf48085ab..5d1015c26ff4 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -189,17 +189,24 @@ config ACPI_DOCK
This driver supports ACPI-controlled docking stations and removable
drive bays such as the IBM Ultrabay and the Dell Module Bay.
-config ACPI_PROCESSOR
- tristate "Processor"
+config ACPI_CPU_FREQ_PSS
+ bool
select THERMAL
+
+config ACPI_PROCESSOR_IDLE
+ bool
select CPU_IDLE
+
+config ACPI_PROCESSOR
+ tristate "Processor"
depends on X86 || IA64
+ select ACPI_PROCESSOR_IDLE
+ select ACPI_CPU_FREQ_PSS
default y
help
- This driver installs ACPI as the idle handler for Linux and uses
- ACPI C2 and C3 processor states to save power on systems that
- support it. It is required by several flavors of cpufreq
- performance-state drivers.
+ This driver adds support for the ACPI Processor package. It is required
+ by several flavors of cpufreq performance-state, thermal, throttling and
+ idle drivers.
To compile this driver as a module, choose M here:
the module will be called processor.
@@ -410,6 +417,7 @@ config ACPI_NFIT
tristate "ACPI NVDIMM Firmware Interface Table (NFIT)"
depends on PHYS_ADDR_T_64BIT
depends on BLK_DEV
+ depends on ARCH_HAS_MMIO_FLUSH
select LIBNVDIMM
help
Infrastructure to probe ACPI 6 compliant platforms for
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 8321430d7f24..b5e7cd8a9c71 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -24,7 +24,7 @@ acpi-y += nvs.o
# Power management related files
acpi-y += wakeup.o
acpi-$(CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT) += sleep.o
-acpi-y += device_pm.o
+acpi-y += device_sysfs.o device_pm.o
acpi-$(CONFIG_ACPI_SLEEP) += proc.o
@@ -80,8 +80,10 @@ obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
obj-$(CONFIG_ACPI_BGRT) += bgrt.o
# processor has its own "processor." module_param namespace
-processor-y := processor_driver.o processor_throttling.o
-processor-y += processor_idle.o processor_thermal.o
+processor-y := processor_driver.o
+processor-$(CONFIG_ACPI_PROCESSOR_IDLE) += processor_idle.o
+processor-$(CONFIG_ACPI_CPU_FREQ_PSS) += processor_throttling.o \
+ processor_thermal.o
processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 9b5354a2cd08..f71b756b05c4 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index 3984ea96e5f7..a450e7af877c 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -16,7 +16,6 @@
#include <linux/clkdev.h>
#include <linux/acpi.h>
#include <linux/err.h>
-#include <linux/clk.h>
#include <linux/pm.h>
#include "internal.h"
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
index ac0f52f6df2b..f77956c3fd45 100644
--- a/drivers/acpi/acpi_ipmi.c
+++ b/drivers/acpi/acpi_ipmi.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 569ee090343f..f51bd0d0bc17 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -11,7 +11,6 @@
*/
#include <linux/acpi.h>
-#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
@@ -60,6 +59,7 @@ ACPI_MODULE_NAME("acpi_lpss");
#define LPSS_CLK_DIVIDER BIT(2)
#define LPSS_LTR BIT(3)
#define LPSS_SAVE_CTX BIT(4)
+#define LPSS_NO_D3_DELAY BIT(5)
struct lpss_private_data;
@@ -156,6 +156,10 @@ static const struct lpss_device_desc byt_pwm_dev_desc = {
.flags = LPSS_SAVE_CTX,
};
+static const struct lpss_device_desc bsw_pwm_dev_desc = {
+ .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
+};
+
static const struct lpss_device_desc byt_uart_dev_desc = {
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
.clk_con_id = "baudclk",
@@ -163,6 +167,14 @@ static const struct lpss_device_desc byt_uart_dev_desc = {
.setup = lpss_uart_setup,
};
+static const struct lpss_device_desc bsw_uart_dev_desc = {
+ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
+ | LPSS_NO_D3_DELAY,
+ .clk_con_id = "baudclk",
+ .prv_offset = 0x800,
+ .setup = lpss_uart_setup,
+};
+
static const struct lpss_device_desc byt_spi_dev_desc = {
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
.prv_offset = 0x400,
@@ -178,8 +190,15 @@ static const struct lpss_device_desc byt_i2c_dev_desc = {
.setup = byt_i2c_setup,
};
+static const struct lpss_device_desc bsw_i2c_dev_desc = {
+ .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
+ .prv_offset = 0x800,
+ .setup = byt_i2c_setup,
+};
+
static struct lpss_device_desc bsw_spi_dev_desc = {
- .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
+ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
+ | LPSS_NO_D3_DELAY,
.prv_offset = 0x400,
.setup = lpss_deassert_reset,
};
@@ -214,11 +233,12 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "INT33FC", },
/* Braswell LPSS devices */
- { "80862288", LPSS_ADDR(byt_pwm_dev_desc) },
- { "8086228A", LPSS_ADDR(byt_uart_dev_desc) },
+ { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
+ { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
{ "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
- { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) },
+ { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
+ /* Broadwell LPSS devices */
{ "INT3430", LPSS_ADDR(lpt_dev_desc) },
{ "INT3431", LPSS_ADDR(lpt_dev_desc) },
{ "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) },
@@ -352,13 +372,16 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
pdata->mmio_size = resource_size(rentry->res);
pdata->mmio_base = ioremap(rentry->res->start,
pdata->mmio_size);
- if (!pdata->mmio_base)
- goto err_out;
break;
}
acpi_dev_free_resource_list(&resource_list);
+ if (!pdata->mmio_base) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
pdata->dev_desc = dev_desc;
if (dev_desc->setup)
@@ -555,9 +578,14 @@ static void acpi_lpss_restore_ctx(struct device *dev,
* The following delay is needed or the subsequent write operations may
* fail. The LPSS devices are actually PCI devices and the PCI spec
* expects 10ms delay before the device can be accessed after D3 to D0
- * transition.
+ * transition. However some platforms like BSW does not need this delay.
*/
- msleep(10);
+ unsigned int delay = 10; /* default 10ms delay */
+
+ if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY)
+ delay = 0;
+
+ msleep(delay);
for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
unsigned long offset = i * sizeof(u32);
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index ee28f4d15625..6b0d3ef7309c 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -16,11 +16,6 @@
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
* ACPI based HotPlug driver that supports Memory Hotplug
* This driver fields notifications from firmware for memory add
* and remove operations and alerts the VM of the affected memory
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 00b39802d7ec..ae307ff36acb 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -12,10 +12,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index ff6d8adc9cda..c58940b231d6 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -19,8 +19,6 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
{"PNP0600"}, /* Generic ESDI/IDE/ATA compatible hard disk controller */
/* floppy */
{"PNP0700"},
- /* ipmi_si */
- {"IPI0001"},
/* tpm_inf_pnp */
{"IFX0101"}, /* Infineon TPMs */
{"IFX0102"}, /* Infineon TPMs */
@@ -153,6 +151,7 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
{"AEI0250"}, /* PROLiNK 1456VH ISA PnP K56flex Fax Modem */
{"AEI1240"}, /* Actiontec ISA PNP 56K X2 Fax Modem */
{"AKY1021"}, /* Rockwell 56K ACF II Fax+Data+Voice Modem */
+ {"ALI5123"}, /* ALi Fast Infrared Controller */
{"AZT4001"}, /* AZT3005 PnP SOUND DEVICE */
{"BDP3336"}, /* Best Data Products Inc. Smart One 336F PnP Modem */
{"BRI0A49"}, /* Boca Complete Ofc Communicator 14.4 Data-FAX */
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 92a5f738e370..985b8a83184e 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -485,7 +485,7 @@ static const struct acpi_device_id processor_device_ids[] = {
{ }
};
-static struct acpi_scan_handler __refdata processor_handler = {
+static struct acpi_scan_handler processor_handler = {
.ids = processor_device_ids,
.attach = acpi_processor_add,
#ifdef CONFIG_ACPI_HOTPLUG_CPU
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 8c2fe2f2f9fd..5778e8e4313a 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index c1a963581dc0..fedcc16b56cc 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -11,6 +11,7 @@ obj-y += acpi.o
acpi-y := \
dsargs.o \
dscontrol.o \
+ dsdebug.o \
dsfield.o \
dsinit.o \
dsmethod.o \
@@ -164,6 +165,7 @@ acpi-y += \
utmath.o \
utmisc.o \
utmutex.o \
+ utnonansi.o \
utobject.o \
utosi.o \
utownerid.o \
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 43685dd36c77..eb2e926d8218 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -67,9 +67,6 @@ struct acpi_db_execute_walk {
};
#define PARAM_LIST(pl) pl
-#define DBTEST_OUTPUT_LEVEL(lvl) if (acpi_gbl_db_opt_verbose)
-#define VERBOSE_PRINT(fp) DBTEST_OUTPUT_LEVEL(lvl) {\
- acpi_os_printf PARAM_LIST(fp);}
#define EX_NO_SINGLE_STEP 1
#define EX_SINGLE_STEP 2
@@ -77,10 +74,6 @@ struct acpi_db_execute_walk {
/*
* dbxface - external debugger interfaces
*/
-acpi_status acpi_db_initialize(void);
-
-void acpi_db_terminate(void);
-
acpi_status
acpi_db_single_step(struct acpi_walk_state *walk_state,
union acpi_parse_object *op, u32 op_type);
@@ -102,6 +95,8 @@ void acpi_db_display_interfaces(char *action_arg, char *interface_name_arg);
acpi_status acpi_db_sleep(char *object_arg);
+void acpi_db_trace(char *enable_arg, char *method_arg, char *once_arg);
+
void acpi_db_display_locks(void);
void acpi_db_display_resources(char *object_arg);
@@ -262,6 +257,23 @@ char *acpi_db_get_next_token(char *string,
char **next, acpi_object_type * return_type);
/*
+ * dbobject
+ */
+void acpi_db_decode_internal_object(union acpi_operand_object *obj_desc);
+
+void
+acpi_db_display_internal_object(union acpi_operand_object *obj_desc,
+ struct acpi_walk_state *walk_state);
+
+void acpi_db_decode_arguments(struct acpi_walk_state *walk_state);
+
+void acpi_db_decode_locals(struct acpi_walk_state *walk_state);
+
+void
+acpi_db_dump_method_info(acpi_status status,
+ struct acpi_walk_state *walk_state);
+
+/*
* dbstats - Generation and display of ACPI table statistics
*/
void acpi_db_generate_statistics(union acpi_parse_object *root, u8 is_method);
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 408f04bcaab4..7094dc89eb81 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -354,4 +354,12 @@ acpi_status
acpi_ds_result_push(union acpi_operand_object *object,
struct acpi_walk_state *walk_state);
+/*
+ * dsdebug - parser debugging routines
+ */
+void
+acpi_ds_dump_method_stack(acpi_status status,
+ struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op);
+
#endif /* _ACDISPAT_H_ */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 53f96a370762..09f37b516808 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -58,11 +58,12 @@ ACPI_GLOBAL(struct acpi_table_list, acpi_gbl_root_table_list);
ACPI_GLOBAL(struct acpi_table_header *, acpi_gbl_DSDT);
ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX);
#if (!ACPI_REDUCED_HARDWARE)
ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS);
-ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_facs32);
-ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_facs64);
#endif /* !ACPI_REDUCED_HARDWARE */
@@ -235,6 +236,10 @@ ACPI_INIT_GLOBAL(u32, acpi_gbl_nesting_level, 0);
ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
+/* Maximum number of While() loop iterations before forced abort */
+
+ACPI_GLOBAL(u16, acpi_gbl_max_loop_iterations);
+
/* Control method single step flag */
ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
@@ -290,8 +295,6 @@ ACPI_GLOBAL(u32, acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]);
ACPI_GLOBAL(u32, acpi_gbl_original_dbg_level);
ACPI_GLOBAL(u32, acpi_gbl_original_dbg_layer);
-ACPI_GLOBAL(u32, acpi_gbl_trace_dbg_level);
-ACPI_GLOBAL(u32, acpi_gbl_trace_dbg_layer);
/*****************************************************************************
*
@@ -309,9 +312,10 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_no_resource_disassembly, FALSE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_ignore_noop_operator, FALSE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_cstyle_disassembly, TRUE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_force_aml_disassembly, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_dm_opt_verbose, TRUE);
-ACPI_GLOBAL(u8, acpi_gbl_db_opt_disasm);
-ACPI_GLOBAL(u8, acpi_gbl_db_opt_verbose);
+ACPI_GLOBAL(u8, acpi_gbl_dm_opt_disasm);
+ACPI_GLOBAL(u8, acpi_gbl_dm_opt_listing);
ACPI_GLOBAL(u8, acpi_gbl_num_external_methods);
ACPI_GLOBAL(u32, acpi_gbl_resolved_external_methods);
ACPI_GLOBAL(struct acpi_external_list *, acpi_gbl_external_list);
@@ -346,8 +350,8 @@ ACPI_GLOBAL(char, acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE]);
/*
* Statistic globals
*/
-ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TYPE_NS_NODE_MAX + 1]);
-ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TYPE_NS_NODE_MAX + 1]);
+ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TOTAL_TYPES]);
+ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TOTAL_TYPES]);
ACPI_GLOBAL(u16, acpi_gbl_obj_type_count_misc);
ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc);
ACPI_GLOBAL(u32, acpi_gbl_num_nodes);
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 7ac98000b46b..e820ed8f173f 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -131,6 +131,28 @@ void
acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
u32 level, u32 index);
+void
+acpi_ex_start_trace_method(struct acpi_namespace_node *method_node,
+ union acpi_operand_object *obj_desc,
+ struct acpi_walk_state *walk_state);
+
+void
+acpi_ex_stop_trace_method(struct acpi_namespace_node *method_node,
+ union acpi_operand_object *obj_desc,
+ struct acpi_walk_state *walk_state);
+
+void
+acpi_ex_start_trace_opcode(union acpi_parse_object *op,
+ struct acpi_walk_state *walk_state);
+
+void
+acpi_ex_stop_trace_opcode(union acpi_parse_object *op,
+ struct acpi_walk_state *walk_state);
+
+void
+acpi_ex_trace_point(acpi_trace_event_type type,
+ u8 begin, u8 *aml, char *pathname);
+
/*
* exfield - ACPI AML (p-code) execution - field manipulation
*/
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index bc600969c6a1..6f708267ad8c 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -174,8 +174,12 @@ struct acpi_namespace_node {
*/
#ifdef ACPI_LARGE_NAMESPACE_NODE
union acpi_parse_object *op;
+ void *method_locals;
+ void *method_args;
u32 value;
u32 length;
+ u8 arg_count;
+
#endif
};
@@ -209,11 +213,9 @@ struct acpi_table_list {
#define ACPI_ROOT_ORIGIN_ALLOCATED (1)
#define ACPI_ROOT_ALLOW_RESIZE (2)
-/* Predefined (fixed) table indexes */
+/* Predefined table indexes */
-#define ACPI_TABLE_INDEX_DSDT (0)
-#define ACPI_TABLE_INDEX_FACS (1)
-#define ACPI_TABLE_INDEX_X_FACS (2)
+#define ACPI_INVALID_TABLE_INDEX (0xFFFFFFFF)
struct acpi_find_context {
char *search_for;
@@ -404,6 +406,13 @@ struct acpi_simple_repair_info {
#define ACPI_NUM_RTYPES 5 /* Number of actual object types */
+/* Info for running the _REG methods */
+
+struct acpi_reg_walk_info {
+ acpi_adr_space_type space_id;
+ u32 reg_run_count;
+};
+
/*****************************************************************************
*
* Event typedefs and structs
@@ -715,7 +724,7 @@ union acpi_parse_value {
union acpi_parse_object *arg; /* arguments and contained ops */
};
-#ifdef ACPI_DISASSEMBLER
+#if defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUG_OUTPUT)
#define ACPI_DISASM_ONLY_MEMBERS(a) a;
#else
#define ACPI_DISASM_ONLY_MEMBERS(a)
@@ -726,7 +735,7 @@ union acpi_parse_value {
u8 descriptor_type; /* To differentiate various internal objs */\
u8 flags; /* Type of Op */\
u16 aml_opcode; /* AML opcode */\
- u32 aml_offset; /* Offset of declaration in AML */\
+ u8 *aml; /* Address of declaration in AML */\
union acpi_parse_object *next; /* Next op */\
struct acpi_namespace_node *node; /* For use by interpreter */\
union acpi_parse_value value; /* Value or args associated with the opcode */\
@@ -1103,6 +1112,9 @@ struct acpi_db_method_info {
* Index of current thread inside all them created.
*/
char init_args;
+#ifdef ACPI_DEBUGGER
+ acpi_object_type arg_types[4];
+#endif
char *arguments[4];
char num_threads_str[11];
char id_of_thread_str[11];
@@ -1119,6 +1131,10 @@ struct acpi_integrity_info {
#define ACPI_DB_CONSOLE_OUTPUT 0x02
#define ACPI_DB_DUPLICATE_OUTPUT 0x03
+struct acpi_object_info {
+ u32 types[ACPI_TOTAL_TYPES];
+};
+
/*****************************************************************************
*
* Debug
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index c240bdf824f2..e85366ceb15a 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -220,6 +220,15 @@
#define ACPI_MUL_32(a) _ACPI_MUL(a, 5)
#define ACPI_MOD_32(a) _ACPI_MOD(a, 32)
+/* Test for ASCII character */
+
+#define ACPI_IS_ASCII(c) ((c) < 0x80)
+
+/* Signed integers */
+
+#define ACPI_SIGN_POSITIVE 0
+#define ACPI_SIGN_NEGATIVE 1
+
/*
* Rounding macros (Power of two boundaries only)
*/
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 0dd088290d80..ea0d9076d408 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -272,17 +272,20 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
*/
u32 acpi_ns_opens_scope(acpi_object_type type);
-acpi_status
-acpi_ns_build_external_path(struct acpi_namespace_node *node,
- acpi_size size, char *name_buffer);
-
char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node);
+u32
+acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
+ char *full_path, u32 path_size, u8 no_trailing);
+
+char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
+ u8 no_trailing);
+
char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state);
acpi_status
acpi_ns_handle_to_pathname(acpi_handle target_handle,
- struct acpi_buffer *buffer);
+ struct acpi_buffer *buffer, u8 no_trailing);
u8
acpi_ns_pattern_match(struct acpi_namespace_node *obj_node, char *search_for);
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index c81d98d09cac..0bd02c4a5f75 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -176,6 +176,7 @@ struct acpi_object_method {
u8 param_count;
u8 sync_level;
union acpi_operand_object *mutex;
+ union acpi_operand_object *node;
u8 *aml_start;
union {
acpi_internal_method implementation;
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 0cdd2fce493a..6021ccfb0b1c 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -225,11 +225,11 @@ void acpi_ps_delete_parse_tree(union acpi_parse_object *root);
/*
* psutils - parser utilities
*/
-union acpi_parse_object *acpi_ps_create_scope_op(void);
+union acpi_parse_object *acpi_ps_create_scope_op(u8 *aml);
void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode);
-union acpi_parse_object *acpi_ps_alloc_op(u16 opcode);
+union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml);
void acpi_ps_free_op(union acpi_parse_object *op);
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 44997ca02ae2..f9992dced1f9 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -85,7 +85,7 @@ struct acpi_walk_state {
u8 namespace_override; /* Override existing objects */
u8 result_size; /* Total elements for the result stack */
u8 result_count; /* Current number of occupied elements of result stack */
- u32 aml_offset;
+ u8 *aml;
u32 arg_types;
u32 method_breakpoint; /* For single stepping */
u32 user_breakpoint; /* User AML breakpoint */
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 7e0b6f1bec9c..f7731f260c31 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -154,14 +154,20 @@ void acpi_tb_check_dsdt_header(void);
struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index);
void
-acpi_tb_install_table_with_override(u32 table_index,
- struct acpi_table_desc *new_table_desc,
- u8 override);
+acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
+ u8 override, u32 *table_index);
acpi_status
acpi_tb_install_fixed_table(acpi_physical_address address,
- char *signature, u32 table_index);
+ char *signature, u32 *table_index);
acpi_status acpi_tb_parse_root_table(acpi_physical_address rsdp_address);
+u8 acpi_is_valid_signature(char *signature);
+
+/*
+ * tbxfload
+ */
+acpi_status acpi_tb_load_namespace(void);
+
#endif /* __ACTABLES_H__ */
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 6de0d3573037..fb2aa5066f3f 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -167,6 +167,17 @@ struct acpi_pkg_info {
#define DB_QWORD_DISPLAY 8
/*
+ * utnonansi - Non-ANSI C library functions
+ */
+void acpi_ut_strupr(char *src_string);
+
+void acpi_ut_strlwr(char *src_string);
+
+int acpi_ut_stricmp(char *string1, char *string2);
+
+acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
+
+/*
* utglobal - Global data structures and procedures
*/
acpi_status acpi_ut_init_globals(void);
@@ -205,8 +216,6 @@ acpi_status acpi_ut_hardware_initialize(void);
void acpi_ut_subsystem_shutdown(void);
-#define ACPI_IS_ASCII(c) ((c) < 0x80)
-
/*
* utcopy - Object construction and conversion interfaces
*/
@@ -508,7 +517,7 @@ const struct acpi_exception_info *acpi_ut_validate_exception(acpi_status
u8 acpi_ut_is_pci_root_bridge(char *id);
-#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP)
+#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP || defined ACPI_NAMES_APP)
u8 acpi_ut_is_aml_table(struct acpi_table_header *table);
#endif
@@ -567,16 +576,6 @@ acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, u8 **end_tag);
/*
* utstring - String and character utilities
*/
-void acpi_ut_strupr(char *src_string);
-
-#ifdef ACPI_ASL_COMPILER
-void acpi_ut_strlwr(char *src_string);
-
-int acpi_ut_stricmp(char *string1, char *string2);
-#endif
-
-acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
-
void acpi_ut_print_string(char *string, u16 max_length);
#if defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 3e6989738e85..e2ab59e39162 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -86,7 +86,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
/* Allocate a new parser op to be the root of the parsed tree */
- op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
+ op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP, aml_start);
if (!op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -129,7 +129,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
/* Evaluate the deferred arguments */
- op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
+ op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP, aml_start);
if (!op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 39da9da62bbf..435fc16e2f83 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -212,7 +212,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
*/
control_state->control.loop_count++;
if (control_state->control.loop_count >
- ACPI_MAX_LOOP_ITERATIONS) {
+ acpi_gbl_max_loop_iterations) {
status = AE_AML_INFINITE_LOOP;
break;
}
diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c
new file mode 100644
index 000000000000..309556efc553
--- /dev/null
+++ b/drivers/acpi/acpica/dsdebug.c
@@ -0,0 +1,231 @@
+/******************************************************************************
+ *
+ * Module Name: dsdebug - Parser/Interpreter interface - debugging
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2015, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acdispat.h"
+#include "acnamesp.h"
+#ifdef ACPI_DISASSEMBLER
+#include "acdisasm.h"
+#endif
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_DISPATCHER
+ACPI_MODULE_NAME("dsdebug")
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+/* Local prototypes */
+static void
+acpi_ds_print_node_pathname(struct acpi_namespace_node *node,
+ const char *message);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_print_node_pathname
+ *
+ * PARAMETERS: node - Object
+ * message - Prefix message
+ *
+ * DESCRIPTION: Print an object's full namespace pathname
+ * Manages allocation/freeing of a pathname buffer
+ *
+ ******************************************************************************/
+
+static void
+acpi_ds_print_node_pathname(struct acpi_namespace_node *node,
+ const char *message)
+{
+ struct acpi_buffer buffer;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ds_print_node_pathname);
+
+ if (!node) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "[NULL NAME]"));
+ return_VOID;
+ }
+
+ /* Convert handle to full pathname and print it (with supplied message) */
+
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+
+ status = acpi_ns_handle_to_pathname(node, &buffer, TRUE);
+ if (ACPI_SUCCESS(status)) {
+ if (message) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "%s ",
+ message));
+ }
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "[%s] (Node %p)",
+ (char *)buffer.pointer, node));
+ ACPI_FREE(buffer.pointer);
+ }
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_dump_method_stack
+ *
+ * PARAMETERS: status - Method execution status
+ * walk_state - Current state of the parse tree walk
+ * op - Executing parse op
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Called when a method has been aborted because of an error.
+ * Dumps the method execution stack.
+ *
+ ******************************************************************************/
+
+void
+acpi_ds_dump_method_stack(acpi_status status,
+ struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op)
+{
+ union acpi_parse_object *next;
+ struct acpi_thread_state *thread;
+ struct acpi_walk_state *next_walk_state;
+ struct acpi_namespace_node *previous_method = NULL;
+ union acpi_operand_object *method_desc;
+
+ ACPI_FUNCTION_TRACE(ds_dump_method_stack);
+
+ /* Ignore control codes, they are not errors */
+
+ if ((status & AE_CODE_MASK) == AE_CODE_CONTROL) {
+ return_VOID;
+ }
+
+ /* We may be executing a deferred opcode */
+
+ if (walk_state->deferred_node) {
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "Executing subtree for Buffer/Package/Region\n"));
+ return_VOID;
+ }
+
+ /*
+ * If there is no Thread, we are not actually executing a method.
+ * This can happen when the iASL compiler calls the interpreter
+ * to perform constant folding.
+ */
+ thread = walk_state->thread;
+ if (!thread) {
+ return_VOID;
+ }
+
+ /* Display exception and method name */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "\n**** Exception %s during execution of method ",
+ acpi_format_exception(status)));
+ acpi_ds_print_node_pathname(walk_state->method_node, NULL);
+
+ /* Display stack of executing methods */
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH,
+ "\n\nMethod Execution Stack:\n"));
+ next_walk_state = thread->walk_state_list;
+
+ /* Walk list of linked walk states */
+
+ while (next_walk_state) {
+ method_desc = next_walk_state->method_desc;
+ if (method_desc) {
+ acpi_ex_stop_trace_method((struct acpi_namespace_node *)
+ method_desc->method.node,
+ method_desc, walk_state);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ " Method [%4.4s] executing: ",
+ acpi_ut_get_node_name(next_walk_state->
+ method_node)));
+
+ /* First method is the currently executing method */
+
+ if (next_walk_state == walk_state) {
+ if (op) {
+
+ /* Display currently executing ASL statement */
+
+ next = op->common.next;
+ op->common.next = NULL;
+
+#ifdef ACPI_DISASSEMBLER
+ acpi_dm_disassemble(next_walk_state, op,
+ ACPI_UINT32_MAX);
+#endif
+ op->common.next = next;
+ }
+ } else {
+ /*
+ * This method has called another method
+ * NOTE: the method call parse subtree is already deleted at this
+ * point, so we cannot disassemble the method invocation.
+ */
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH,
+ "Call to method "));
+ acpi_ds_print_node_pathname(previous_method, NULL);
+ }
+
+ previous_method = next_walk_state->method_node;
+ next_walk_state = next_walk_state->next;
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "\n"));
+ }
+
+ return_VOID;
+}
+
+#else
+void
+acpi_ds_dump_method_stack(acpi_status status,
+ struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op)
+{
+ return;
+}
+
+#endif
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 95779e8ec3bb..920f1b199bc6 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -237,12 +237,22 @@ acpi_ds_initialize_objects(u32 table_index,
return_ACPI_STATUS(status);
}
+ /* DSDT is always the first AML table */
+
+ if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT)) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+ "\nInitializing Namespace objects:\n"));
+ }
+
+ /* Summary of objects initialized */
+
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- "Table [%4.4s] (id %4.4X) - %4u Objects with %3u Devices, "
- "%3u Regions, %3u Methods (%u/%u/%u Serial/Non/Cvt)\n",
- table->signature, owner_id, info.object_count,
- info.device_count, info.op_region_count,
- info.method_count, info.serial_method_count,
+ "Table [%4.4s:%8.8s] (id %.2X) - %4u Objects with %3u Devices, "
+ "%3u Regions, %4u Methods (%u/%u/%u Serial/Non/Cvt)\n",
+ table->signature, table->oem_table_id, owner_id,
+ info.object_count, info.device_count,
+ info.op_region_count, info.method_count,
+ info.serial_method_count,
info.non_serial_method_count,
info.serialized_method_count));
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 85bb951430d9..bc32f3194afe 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -46,11 +46,9 @@
#include "acdispat.h"
#include "acinterp.h"
#include "acnamesp.h"
-#ifdef ACPI_DISASSEMBLER
-#include "acdisasm.h"
-#endif
#include "acparser.h"
#include "amlcode.h"
+#include "acdebug.h"
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsmethod")
@@ -103,7 +101,7 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
/* Create/Init a root op for the method parse tree */
- op = acpi_ps_alloc_op(AML_METHOD_OP);
+ op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start);
if (!op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -205,7 +203,7 @@ acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
* RETURN: Status
*
* DESCRIPTION: Called on method error. Invoke the global exception handler if
- * present, dump the method data if the disassembler is configured
+ * present, dump the method data if the debugger is configured
*
* Note: Allows the exception handler to change the status code
*
@@ -214,6 +212,8 @@ acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
acpi_status
acpi_ds_method_error(acpi_status status, struct acpi_walk_state * walk_state)
{
+ u32 aml_offset;
+
ACPI_FUNCTION_ENTRY();
/* Ignore AE_OK and control exception codes */
@@ -234,26 +234,30 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state * walk_state)
* Handler can map the exception code to anything it wants, including
* AE_OK, in which case the executing method will not be aborted.
*/
+ aml_offset = (u32)ACPI_PTR_DIFF(walk_state->aml,
+ walk_state->parser_state.
+ aml_start);
+
status = acpi_gbl_exception_handler(status,
walk_state->method_node ?
walk_state->method_node->
name.integer : 0,
walk_state->opcode,
- walk_state->aml_offset,
- NULL);
+ aml_offset, NULL);
acpi_ex_enter_interpreter();
}
acpi_ds_clear_implicit_return(walk_state);
-#ifdef ACPI_DISASSEMBLER
if (ACPI_FAILURE(status)) {
+ acpi_ds_dump_method_stack(status, walk_state, walk_state->op);
- /* Display method locals/args if disassembler is present */
+ /* Display method locals/args if debugger is present */
- acpi_dm_dump_method_info(status, walk_state, walk_state->op);
- }
+#ifdef ACPI_DEBUGGER
+ acpi_db_dump_method_info(status, walk_state);
#endif
+ }
return (status);
}
@@ -328,6 +332,8 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
return_ACPI_STATUS(AE_NULL_ENTRY);
}
+ acpi_ex_start_trace_method(method_node, obj_desc, walk_state);
+
/* Prevent wraparound of thread count */
if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
@@ -574,9 +580,7 @@ cleanup:
/* On error, we must terminate the method properly */
acpi_ds_terminate_control_method(obj_desc, next_walk_state);
- if (next_walk_state) {
- acpi_ds_delete_walk_state(next_walk_state);
- }
+ acpi_ds_delete_walk_state(next_walk_state);
return_ACPI_STATUS(status);
}
@@ -826,5 +830,8 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
}
}
+ acpi_ex_stop_trace_method((struct acpi_namespace_node *)method_desc->
+ method.node, method_desc, walk_state);
+
return_VOID;
}
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index ea0cc4e08f80..81d7b9863e32 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -480,8 +480,8 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
union acpi_operand_object **operand;
struct acpi_namespace_node *node;
union acpi_parse_object *next_op;
- u32 table_index;
struct acpi_table_header *table;
+ u32 table_index;
ACPI_FUNCTION_TRACE_PTR(ds_eval_table_region_operands, op);
@@ -504,6 +504,8 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status);
}
+ operand = &walk_state->operands[0];
+
/*
* Resolve the Signature string, oem_id string,
* and oem_table_id string operands
@@ -511,32 +513,34 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
status = acpi_ex_resolve_operands(op->common.aml_opcode,
ACPI_WALK_OPERANDS, walk_state);
if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ goto cleanup;
}
- operand = &walk_state->operands[0];
-
/* Find the ACPI table */
status = acpi_tb_find_table(operand[0]->string.pointer,
operand[1]->string.pointer,
operand[2]->string.pointer, &table_index);
if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ if (status == AE_NOT_FOUND) {
+ ACPI_ERROR((AE_INFO,
+ "ACPI Table [%4.4s] OEM:(%s, %s) not found in RSDT/XSDT",
+ operand[0]->string.pointer,
+ operand[1]->string.pointer,
+ operand[2]->string.pointer));
+ }
+ goto cleanup;
}
- acpi_ut_remove_reference(operand[0]);
- acpi_ut_remove_reference(operand[1]);
- acpi_ut_remove_reference(operand[2]);
-
status = acpi_get_table_by_index(table_index, &table);
if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ goto cleanup;
}
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
- return_ACPI_STATUS(AE_NOT_EXIST);
+ status = AE_NOT_EXIST;
+ goto cleanup;
}
obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table);
@@ -551,6 +555,11 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
obj_desc->region.flags |= AOPOBJ_DATA_VALID;
+cleanup:
+ acpi_ut_remove_reference(operand[0]);
+ acpi_ut_remove_reference(operand[1]);
+ acpi_ut_remove_reference(operand[2]);
+
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 845ff44919c3..097188a6b1c1 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -388,7 +388,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
/* Create a new op */
- op = acpi_ps_alloc_op(walk_state->opcode);
+ op = acpi_ps_alloc_op(walk_state->opcode, walk_state->aml);
if (!op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index fcaa30c611fb..e2c08cd79aca 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -335,7 +335,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
/* Create a new op */
- op = acpi_ps_alloc_op(walk_state->opcode);
+ op = acpi_ps_alloc_op(walk_state->opcode, walk_state->aml);
if (!op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 2ba28a63fb68..5ee79a16fe33 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -626,9 +626,17 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
acpi_adr_space_type space_id)
{
acpi_status status;
+ struct acpi_reg_walk_info info;
ACPI_FUNCTION_TRACE(ev_execute_reg_methods);
+ info.space_id = space_id;
+ info.reg_run_count = 0;
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES,
+ " Running _REG methods for SpaceId %s\n",
+ acpi_ut_get_region_name(info.space_id)));
+
/*
* Run all _REG methods for all Operation Regions for this space ID. This
* is a separate walk in order to handle any interdependencies between
@@ -637,7 +645,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
*/
status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run,
- NULL, &space_id, NULL);
+ NULL, &info, NULL);
/* Special case for EC: handle "orphan" _REG methods with no region */
@@ -645,6 +653,11 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
acpi_ev_orphan_ec_reg_method(node);
}
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES,
+ " Executed %u _REG methods for SpaceId %s\n",
+ info.reg_run_count,
+ acpi_ut_get_region_name(info.space_id)));
+
return_ACPI_STATUS(status);
}
@@ -664,10 +677,10 @@ acpi_ev_reg_run(acpi_handle obj_handle,
{
union acpi_operand_object *obj_desc;
struct acpi_namespace_node *node;
- acpi_adr_space_type space_id;
acpi_status status;
+ struct acpi_reg_walk_info *info;
- space_id = *ACPI_CAST_PTR(acpi_adr_space_type, context);
+ info = ACPI_CAST_PTR(struct acpi_reg_walk_info, context);
/* Convert and validate the device handle */
@@ -696,13 +709,14 @@ acpi_ev_reg_run(acpi_handle obj_handle,
/* Object is a Region */
- if (obj_desc->region.space_id != space_id) {
+ if (obj_desc->region.space_id != info->space_id) {
/* This region is for a different address space, just ignore it */
return (AE_OK);
}
+ info->reg_run_count++;
status = acpi_ev_execute_reg_method(obj_desc, ACPI_REG_CONNECT);
return (status);
}
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 24a4c5c2b124..b540913c11ac 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -162,14 +162,6 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
ACPI_FUNCTION_TRACE(ex_load_table_op);
- /* Validate lengths for the Signature, oem_id, and oem_table_id strings */
-
- if ((operand[0]->string.length > ACPI_NAME_SIZE) ||
- (operand[1]->string.length > ACPI_OEM_ID_SIZE) ||
- (operand[2]->string.length > ACPI_OEM_TABLE_ID_SIZE)) {
- return_ACPI_STATUS(AE_AML_STRING_LIMIT);
- }
-
/* Find the ACPI table in the RSDT/XSDT */
status = acpi_tb_find_table(operand[0]->string.pointer,
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index aaeea4840aaa..ccb7219bdcee 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -486,6 +486,7 @@ acpi_ex_create_method(u8 * aml_start,
obj_desc->method.aml_start = aml_start;
obj_desc->method.aml_length = aml_length;
+ obj_desc->method.node = operand[0];
/*
* Disassemble the method flags. Split off the arg_count, Serialized
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index 815442bbd051..de92458236f5 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -43,11 +43,21 @@
#include <acpi/acpi.h>
#include "accommon.h"
+#include "acnamesp.h"
#include "acinterp.h"
+#include "acparser.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exdebug")
+static union acpi_operand_object *acpi_gbl_trace_method_object = NULL;
+
+/* Local prototypes */
+
+#ifdef ACPI_DEBUG_OUTPUT
+static const char *acpi_ex_get_trace_event_name(acpi_trace_event_type type);
+#endif
+
#ifndef ACPI_NO_ERROR_MESSAGES
/*******************************************************************************
*
@@ -70,6 +80,7 @@ ACPI_MODULE_NAME("exdebug")
* enabled if necessary.
*
******************************************************************************/
+
void
acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
u32 level, u32 index)
@@ -308,3 +319,316 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
return_VOID;
}
#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_interpreter_trace_enabled
+ *
+ * PARAMETERS: name - Whether method name should be matched,
+ * this should be checked before starting
+ * the tracer
+ *
+ * RETURN: TRUE if interpreter trace is enabled.
+ *
+ * DESCRIPTION: Check whether interpreter trace is enabled
+ *
+ ******************************************************************************/
+
+static u8 acpi_ex_interpreter_trace_enabled(char *name)
+{
+
+ /* Check if tracing is enabled */
+
+ if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED)) {
+ return (FALSE);
+ }
+
+ /*
+ * Check if tracing is filtered:
+ *
+ * 1. If the tracer is started, acpi_gbl_trace_method_object should have
+ * been filled by the trace starter
+ * 2. If the tracer is not started, acpi_gbl_trace_method_name should be
+ * matched if it is specified
+ * 3. If the tracer is oneshot style, acpi_gbl_trace_method_name should
+ * not be cleared by the trace stopper during the first match
+ */
+ if (acpi_gbl_trace_method_object) {
+ return (TRUE);
+ }
+ if (name &&
+ (acpi_gbl_trace_method_name &&
+ strcmp(acpi_gbl_trace_method_name, name))) {
+ return (FALSE);
+ }
+ if ((acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) &&
+ !acpi_gbl_trace_method_name) {
+ return (FALSE);
+ }
+
+ return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_get_trace_event_name
+ *
+ * PARAMETERS: type - Trace event type
+ *
+ * RETURN: Trace event name.
+ *
+ * DESCRIPTION: Used to obtain the full trace event name.
+ *
+ ******************************************************************************/
+
+#ifdef ACPI_DEBUG_OUTPUT
+
+static const char *acpi_ex_get_trace_event_name(acpi_trace_event_type type)
+{
+ switch (type) {
+ case ACPI_TRACE_AML_METHOD:
+
+ return "Method";
+
+ case ACPI_TRACE_AML_OPCODE:
+
+ return "Opcode";
+
+ case ACPI_TRACE_AML_REGION:
+
+ return "Region";
+
+ default:
+
+ return "";
+ }
+}
+
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_trace_point
+ *
+ * PARAMETERS: type - Trace event type
+ * begin - TRUE if before execution
+ * aml - Executed AML address
+ * pathname - Object path
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Internal interpreter execution trace.
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_trace_point(acpi_trace_event_type type,
+ u8 begin, u8 *aml, char *pathname)
+{
+
+ ACPI_FUNCTION_NAME(ex_trace_point);
+
+ if (pathname) {
+ ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT,
+ "%s %s [0x%p:%s] execution.\n",
+ acpi_ex_get_trace_event_name(type),
+ begin ? "Begin" : "End", aml, pathname));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT,
+ "%s %s [0x%p] execution.\n",
+ acpi_ex_get_trace_event_name(type),
+ begin ? "Begin" : "End", aml));
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_start_trace_method
+ *
+ * PARAMETERS: method_node - Node of the method
+ * obj_desc - The method object
+ * walk_state - current state, NULL if not yet executing
+ * a method.
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Start control method execution trace
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_start_trace_method(struct acpi_namespace_node *method_node,
+ union acpi_operand_object *obj_desc,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+ char *pathname = NULL;
+ u8 enabled = FALSE;
+
+ ACPI_FUNCTION_NAME(ex_start_trace_method);
+
+ if (method_node) {
+ pathname = acpi_ns_get_normalized_pathname(method_node, TRUE);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ enabled = acpi_ex_interpreter_trace_enabled(pathname);
+ if (enabled && !acpi_gbl_trace_method_object) {
+ acpi_gbl_trace_method_object = obj_desc;
+ acpi_gbl_original_dbg_level = acpi_dbg_level;
+ acpi_gbl_original_dbg_layer = acpi_dbg_layer;
+ acpi_dbg_level = ACPI_TRACE_LEVEL_ALL;
+ acpi_dbg_layer = ACPI_TRACE_LAYER_ALL;
+
+ if (acpi_gbl_trace_dbg_level) {
+ acpi_dbg_level = acpi_gbl_trace_dbg_level;
+ }
+ if (acpi_gbl_trace_dbg_layer) {
+ acpi_dbg_layer = acpi_gbl_trace_dbg_layer;
+ }
+ }
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+exit:
+ if (enabled) {
+ ACPI_TRACE_POINT(ACPI_TRACE_AML_METHOD, TRUE,
+ obj_desc ? obj_desc->method.aml_start : NULL,
+ pathname);
+ }
+ if (pathname) {
+ ACPI_FREE(pathname);
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_stop_trace_method
+ *
+ * PARAMETERS: method_node - Node of the method
+ * obj_desc - The method object
+ * walk_state - current state, NULL if not yet executing
+ * a method.
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Stop control method execution trace
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_stop_trace_method(struct acpi_namespace_node *method_node,
+ union acpi_operand_object *obj_desc,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+ char *pathname = NULL;
+ u8 enabled;
+
+ ACPI_FUNCTION_NAME(ex_stop_trace_method);
+
+ if (method_node) {
+ pathname = acpi_ns_get_normalized_pathname(method_node, TRUE);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ goto exit_path;
+ }
+
+ enabled = acpi_ex_interpreter_trace_enabled(NULL);
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+ if (enabled) {
+ ACPI_TRACE_POINT(ACPI_TRACE_AML_METHOD, FALSE,
+ obj_desc ? obj_desc->method.aml_start : NULL,
+ pathname);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ goto exit_path;
+ }
+
+ /* Check whether the tracer should be stopped */
+
+ if (acpi_gbl_trace_method_object == obj_desc) {
+
+ /* Disable further tracing if type is one-shot */
+
+ if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) {
+ acpi_gbl_trace_method_name = NULL;
+ }
+
+ acpi_dbg_level = acpi_gbl_original_dbg_level;
+ acpi_dbg_layer = acpi_gbl_original_dbg_layer;
+ acpi_gbl_trace_method_object = NULL;
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+exit_path:
+ if (pathname) {
+ ACPI_FREE(pathname);
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_start_trace_opcode
+ *
+ * PARAMETERS: op - The parser opcode object
+ * walk_state - current state, NULL if not yet executing
+ * a method.
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Start opcode execution trace
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_start_trace_opcode(union acpi_parse_object *op,
+ struct acpi_walk_state *walk_state)
+{
+
+ ACPI_FUNCTION_NAME(ex_start_trace_opcode);
+
+ if (acpi_ex_interpreter_trace_enabled(NULL) &&
+ (acpi_gbl_trace_flags & ACPI_TRACE_OPCODE)) {
+ ACPI_TRACE_POINT(ACPI_TRACE_AML_OPCODE, TRUE,
+ op->common.aml, op->common.aml_op_name);
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_stop_trace_opcode
+ *
+ * PARAMETERS: op - The parser opcode object
+ * walk_state - current state, NULL if not yet executing
+ * a method.
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Stop opcode execution trace
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_stop_trace_opcode(union acpi_parse_object *op,
+ struct acpi_walk_state *walk_state)
+{
+
+ ACPI_FUNCTION_NAME(ex_stop_trace_opcode);
+
+ if (acpi_ex_interpreter_trace_enabled(NULL) &&
+ (acpi_gbl_trace_flags & ACPI_TRACE_OPCODE)) {
+ ACPI_TRACE_POINT(ACPI_TRACE_AML_OPCODE, FALSE,
+ op->common.aml, op->common.aml_op_name);
+ }
+}
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 401e7edcd419..d836f888bb16 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -995,9 +995,8 @@ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
if (obj_desc->reference.class == ACPI_REFCLASS_NAME) {
acpi_os_printf(" %p ", obj_desc->reference.node);
- status =
- acpi_ns_handle_to_pathname(obj_desc->reference.node,
- &ret_buf);
+ status = acpi_ns_handle_to_pathname(obj_desc->reference.node,
+ &ret_buf, TRUE);
if (ACPI_FAILURE(status)) {
acpi_os_printf(" Could not convert name to pathname\n");
} else {
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index c7e3b929aa85..1b372ef69308 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -126,7 +126,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
if (!source_desc) {
ACPI_ERROR((AE_INFO, "No object attached to node [%4.4s] %p",
node->name.ascii, node));
- return_ACPI_STATUS(AE_AML_NO_OPERAND);
+ return_ACPI_STATUS(AE_AML_UNINITIALIZED_NODE);
}
/*
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index b6b7f3af29e4..7b109128b035 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -337,8 +337,9 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
acpi_object_type * return_type,
union acpi_operand_object **return_desc)
{
- union acpi_operand_object *obj_desc = (void *)operand;
- struct acpi_namespace_node *node;
+ union acpi_operand_object *obj_desc = ACPI_CAST_PTR(void, operand);
+ struct acpi_namespace_node *node =
+ ACPI_CAST_PTR(struct acpi_namespace_node, operand);
acpi_object_type type;
acpi_status status;
@@ -355,9 +356,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
case ACPI_DESC_TYPE_NAMED:
type = ((struct acpi_namespace_node *)obj_desc)->type;
- obj_desc =
- acpi_ns_get_attached_object((struct acpi_namespace_node *)
- obj_desc);
+ obj_desc = acpi_ns_get_attached_object(node);
/* If we had an Alias node, use the attached object for type info */
@@ -368,6 +367,13 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
acpi_namespace_node *)
obj_desc);
}
+
+ if (!obj_desc) {
+ ACPI_ERROR((AE_INFO,
+ "[%4.4s] Node is unresolved or uninitialized",
+ acpi_ut_get_node_name(node)));
+ return_ACPI_STATUS(AE_AML_UNINITIALIZED_NODE);
+ }
break;
default:
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 52dfd0d050fa..d62a61612b3f 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -160,19 +160,8 @@ acpi_set_firmware_waking_vectors(acpi_physical_address physical_address,
ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vectors);
- /* If Hardware Reduced flag is set, there is no FACS */
-
- if (acpi_gbl_reduced_hardware) {
- return_ACPI_STATUS (AE_OK);
- }
-
- if (acpi_gbl_facs32) {
- (void)acpi_hw_set_firmware_waking_vectors(acpi_gbl_facs32,
- physical_address,
- physical_address64);
- }
- if (acpi_gbl_facs64) {
- (void)acpi_hw_set_firmware_waking_vectors(acpi_gbl_facs64,
+ if (acpi_gbl_FACS) {
+ (void)acpi_hw_set_firmware_waking_vectors(acpi_gbl_FACS,
physical_address,
physical_address64);
}
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 80670cb32b5a..7eba578d36f3 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -274,6 +274,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
acpi_ex_exit_interpreter();
if (ACPI_FAILURE(status)) {
+ info->return_object = NULL;
goto cleanup;
}
@@ -464,7 +465,8 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
status = acpi_ns_evaluate(info);
- ACPI_DEBUG_PRINT((ACPI_DB_INIT, "Executed module-level code at %p\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT_NAMES,
+ "Executed module-level code at %p\n",
method_obj->method.aml_start));
/* Delete a possible implicit return value (in slack mode) */
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index bd6cd4a81316..14ab83668207 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -111,7 +111,21 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
if (ACPI_SUCCESS(status)) {
acpi_tb_set_table_loaded_flag(table_index, TRUE);
} else {
- (void)acpi_tb_release_owner_id(table_index);
+ /*
+ * On error, delete any namespace objects created by this table.
+ * We cannot initialize these objects, so delete them. There are
+ * a couple of expecially bad cases:
+ * AE_ALREADY_EXISTS - namespace collision.
+ * AE_NOT_FOUND - the target of a Scope operator does not
+ * exist. This target of Scope must already exist in the
+ * namespace, as per the ACPI specification.
+ */
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ acpi_ns_delete_namespace_by_owner(acpi_gbl_root_table_list.
+ tables[table_index].owner_id);
+ acpi_tb_release_owner_id(table_index);
+
+ return_ACPI_STATUS(status);
}
unlock:
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index d293d9748036..8934b4eddb73 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -51,73 +51,6 @@ ACPI_MODULE_NAME("nsnames")
/*******************************************************************************
*
- * FUNCTION: acpi_ns_build_external_path
- *
- * PARAMETERS: node - NS node whose pathname is needed
- * size - Size of the pathname
- * *name_buffer - Where to return the pathname
- *
- * RETURN: Status
- * Places the pathname into the name_buffer, in external format
- * (name segments separated by path separators)
- *
- * DESCRIPTION: Generate a full pathaname
- *
- ******************************************************************************/
-acpi_status
-acpi_ns_build_external_path(struct acpi_namespace_node *node,
- acpi_size size, char *name_buffer)
-{
- acpi_size index;
- struct acpi_namespace_node *parent_node;
-
- ACPI_FUNCTION_ENTRY();
-
- /* Special case for root */
-
- index = size - 1;
- if (index < ACPI_NAME_SIZE) {
- name_buffer[0] = AML_ROOT_PREFIX;
- name_buffer[1] = 0;
- return (AE_OK);
- }
-
- /* Store terminator byte, then build name backwards */
-
- parent_node = node;
- name_buffer[index] = 0;
-
- while ((index > ACPI_NAME_SIZE) && (parent_node != acpi_gbl_root_node)) {
- index -= ACPI_NAME_SIZE;
-
- /* Put the name into the buffer */
-
- ACPI_MOVE_32_TO_32((name_buffer + index), &parent_node->name);
- parent_node = parent_node->parent;
-
- /* Prefix name with the path separator */
-
- index--;
- name_buffer[index] = ACPI_PATH_SEPARATOR;
- }
-
- /* Overwrite final separator with the root prefix character */
-
- name_buffer[index] = AML_ROOT_PREFIX;
-
- if (index != 0) {
- ACPI_ERROR((AE_INFO,
- "Could not construct external pathname; index=%u, size=%u, Path=%s",
- (u32) index, (u32) size, &name_buffer[size]));
-
- return (AE_BAD_PARAMETER);
- }
-
- return (AE_OK);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ns_get_external_pathname
*
* PARAMETERS: node - Namespace node whose pathname is needed
@@ -130,37 +63,13 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
* for error and debug statements.
*
******************************************************************************/
-
char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
{
- acpi_status status;
char *name_buffer;
- acpi_size size;
ACPI_FUNCTION_TRACE_PTR(ns_get_external_pathname, node);
- /* Calculate required buffer size based on depth below root */
-
- size = acpi_ns_get_pathname_length(node);
- if (!size) {
- return_PTR(NULL);
- }
-
- /* Allocate a buffer to be returned to caller */
-
- name_buffer = ACPI_ALLOCATE_ZEROED(size);
- if (!name_buffer) {
- ACPI_ERROR((AE_INFO, "Could not allocate %u bytes", (u32)size));
- return_PTR(NULL);
- }
-
- /* Build the path in the allocated buffer */
-
- status = acpi_ns_build_external_path(node, size, name_buffer);
- if (ACPI_FAILURE(status)) {
- ACPI_FREE(name_buffer);
- return_PTR(NULL);
- }
+ name_buffer = acpi_ns_get_normalized_pathname(node, FALSE);
return_PTR(name_buffer);
}
@@ -180,33 +89,12 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
{
acpi_size size;
- struct acpi_namespace_node *next_node;
ACPI_FUNCTION_ENTRY();
- /*
- * Compute length of pathname as 5 * number of name segments.
- * Go back up the parent tree to the root
- */
- size = 0;
- next_node = node;
+ size = acpi_ns_build_normalized_path(node, NULL, 0, FALSE);
- while (next_node && (next_node != acpi_gbl_root_node)) {
- if (ACPI_GET_DESCRIPTOR_TYPE(next_node) != ACPI_DESC_TYPE_NAMED) {
- ACPI_ERROR((AE_INFO,
- "Invalid Namespace Node (%p) while traversing namespace",
- next_node));
- return (0);
- }
- size += ACPI_PATH_SEGMENT_LENGTH;
- next_node = next_node->parent;
- }
-
- if (!size) {
- size = 1; /* Root node case */
- }
-
- return (size + 1); /* +1 for null string terminator */
+ return (size);
}
/*******************************************************************************
@@ -216,6 +104,8 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
* PARAMETERS: target_handle - Handle of named object whose name is
* to be found
* buffer - Where the pathname is returned
+ * no_trailing - Remove trailing '_' for each name
+ * segment
*
* RETURN: Status, Buffer is filled with pathname if status is AE_OK
*
@@ -225,7 +115,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
acpi_status
acpi_ns_handle_to_pathname(acpi_handle target_handle,
- struct acpi_buffer * buffer)
+ struct acpi_buffer * buffer, u8 no_trailing)
{
acpi_status status;
struct acpi_namespace_node *node;
@@ -240,7 +130,8 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
/* Determine size required for the caller buffer */
- required_size = acpi_ns_get_pathname_length(node);
+ required_size =
+ acpi_ns_build_normalized_path(node, NULL, 0, no_trailing);
if (!required_size) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
@@ -254,8 +145,8 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
/* Build the path in the caller buffer */
- status =
- acpi_ns_build_external_path(node, required_size, buffer->pointer);
+ (void)acpi_ns_build_normalized_path(node, buffer->pointer,
+ required_size, no_trailing);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@@ -264,3 +155,149 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
(char *)buffer->pointer, (u32) required_size));
return_ACPI_STATUS(AE_OK);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_build_normalized_path
+ *
+ * PARAMETERS: node - Namespace node
+ * full_path - Where the path name is returned
+ * path_size - Size of returned path name buffer
+ * no_trailing - Remove trailing '_' from each name segment
+ *
+ * RETURN: Return 1 if the AML path is empty, otherwise returning (length
+ * of pathname + 1) which means the 'FullPath' contains a trailing
+ * null.
+ *
+ * DESCRIPTION: Build and return a full namespace pathname.
+ * Note that if the size of 'FullPath' isn't large enough to
+ * contain the namespace node's path name, the actual required
+ * buffer length is returned, and it should be greater than
+ * 'PathSize'. So callers are able to check the returning value
+ * to determine the buffer size of 'FullPath'.
+ *
+ ******************************************************************************/
+
+u32
+acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
+ char *full_path, u32 path_size, u8 no_trailing)
+{
+ u32 length = 0, i;
+ char name[ACPI_NAME_SIZE];
+ u8 do_no_trailing;
+ char c, *left, *right;
+ struct acpi_namespace_node *next_node;
+
+ ACPI_FUNCTION_TRACE_PTR(ns_build_normalized_path, node);
+
+#define ACPI_PATH_PUT8(path, size, byte, length) \
+ do { \
+ if ((length) < (size)) \
+ { \
+ (path)[(length)] = (byte); \
+ } \
+ (length)++; \
+ } while (0)
+
+ /*
+ * Make sure the path_size is correct, so that we don't need to
+ * validate both full_path and path_size.
+ */
+ if (!full_path) {
+ path_size = 0;
+ }
+
+ if (!node) {
+ goto build_trailing_null;
+ }
+
+ next_node = node;
+ while (next_node && next_node != acpi_gbl_root_node) {
+ if (next_node != node) {
+ ACPI_PATH_PUT8(full_path, path_size,
+ AML_DUAL_NAME_PREFIX, length);
+ }
+ ACPI_MOVE_32_TO_32(name, &next_node->name);
+ do_no_trailing = no_trailing;
+ for (i = 0; i < 4; i++) {
+ c = name[4 - i - 1];
+ if (do_no_trailing && c != '_') {
+ do_no_trailing = FALSE;
+ }
+ if (!do_no_trailing) {
+ ACPI_PATH_PUT8(full_path, path_size, c, length);
+ }
+ }
+ next_node = next_node->parent;
+ }
+ ACPI_PATH_PUT8(full_path, path_size, AML_ROOT_PREFIX, length);
+
+ /* Reverse the path string */
+
+ if (length <= path_size) {
+ left = full_path;
+ right = full_path + length - 1;
+ while (left < right) {
+ c = *left;
+ *left++ = *right;
+ *right-- = c;
+ }
+ }
+
+ /* Append the trailing null */
+
+build_trailing_null:
+ ACPI_PATH_PUT8(full_path, path_size, '\0', length);
+
+#undef ACPI_PATH_PUT8
+
+ return_UINT32(length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_get_normalized_pathname
+ *
+ * PARAMETERS: node - Namespace node whose pathname is needed
+ * no_trailing - Remove trailing '_' from each name segment
+ *
+ * RETURN: Pointer to storage containing the fully qualified name of
+ * the node, In external format (name segments separated by path
+ * separators.)
+ *
+ * DESCRIPTION: Used to obtain the full pathname to a namespace node, usually
+ * for error and debug statements. All trailing '_' will be
+ * removed from the full pathname if 'NoTrailing' is specified..
+ *
+ ******************************************************************************/
+
+char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
+ u8 no_trailing)
+{
+ char *name_buffer;
+ acpi_size size;
+
+ ACPI_FUNCTION_TRACE_PTR(ns_get_normalized_pathname, node);
+
+ /* Calculate required buffer size based on depth below root */
+
+ size = acpi_ns_build_normalized_path(node, NULL, 0, no_trailing);
+ if (!size) {
+ return_PTR(NULL);
+ }
+
+ /* Allocate a buffer to be returned to caller */
+
+ name_buffer = ACPI_ALLOCATE_ZEROED(size);
+ if (!name_buffer) {
+ ACPI_ERROR((AE_INFO, "Could not allocate %u bytes", (u32)size));
+ return_PTR(NULL);
+ }
+
+ /* Build the path in the allocated buffer */
+
+ (void)acpi_ns_build_normalized_path(node, name_buffer, size,
+ no_trailing);
+
+ return_PTR(name_buffer);
+}
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 57a4cfe547e4..3736d43b18b9 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -70,7 +70,7 @@ acpi_ns_one_complete_parse(u32 pass_number,
{
union acpi_parse_object *parse_root;
acpi_status status;
- u32 aml_length;
+ u32 aml_length;
u8 *aml_start;
struct acpi_walk_state *walk_state;
struct acpi_table_header *table;
@@ -78,6 +78,20 @@ acpi_ns_one_complete_parse(u32 pass_number,
ACPI_FUNCTION_TRACE(ns_one_complete_parse);
+ status = acpi_get_table_by_index(table_index, &table);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Table must consist of at least a complete header */
+
+ if (table->length < sizeof(struct acpi_table_header)) {
+ return_ACPI_STATUS(AE_BAD_HEADER);
+ }
+
+ aml_start = (u8 *)table + sizeof(struct acpi_table_header);
+ aml_length = table->length - sizeof(struct acpi_table_header);
+
status = acpi_tb_get_owner_id(table_index, &owner_id);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
@@ -85,7 +99,7 @@ acpi_ns_one_complete_parse(u32 pass_number,
/* Create and init a Root Node */
- parse_root = acpi_ps_create_scope_op();
+ parse_root = acpi_ps_create_scope_op(aml_start);
if (!parse_root) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -98,23 +112,12 @@ acpi_ns_one_complete_parse(u32 pass_number,
return_ACPI_STATUS(AE_NO_MEMORY);
}
- status = acpi_get_table_by_index(table_index, &table);
+ status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL,
+ aml_start, aml_length, NULL,
+ (u8)pass_number);
if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(walk_state);
- acpi_ps_free_op(parse_root);
- return_ACPI_STATUS(status);
- }
-
- /* Table must consist of at least a complete header */
-
- if (table->length < sizeof(struct acpi_table_header)) {
- status = AE_BAD_HEADER;
- } else {
- aml_start = (u8 *) table + sizeof(struct acpi_table_header);
- aml_length = table->length - sizeof(struct acpi_table_header);
- status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL,
- aml_start, aml_length, NULL,
- (u8) pass_number);
+ goto cleanup;
}
/* Found OSDT table, enable the namespace override feature */
@@ -124,11 +127,6 @@ acpi_ns_one_complete_parse(u32 pass_number,
walk_state->namespace_override = TRUE;
}
- if (ACPI_FAILURE(status)) {
- acpi_ds_delete_walk_state(walk_state);
- goto cleanup;
- }
-
/* start_node is the default location to load the table */
if (start_node && start_node != acpi_gbl_root_node) {
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 8d8104b8bd28..de325ae04ce1 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -83,7 +83,7 @@ acpi_ns_print_node_pathname(struct acpi_namespace_node *node,
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
- status = acpi_ns_handle_to_pathname(node, &buffer);
+ status = acpi_ns_handle_to_pathname(node, &buffer, TRUE);
if (ACPI_SUCCESS(status)) {
if (message) {
acpi_os_printf("%s ", message);
@@ -596,6 +596,23 @@ void acpi_ns_terminate(void)
ACPI_FUNCTION_TRACE(ns_terminate);
+#ifdef ACPI_EXEC_APP
+ {
+ union acpi_operand_object *prev;
+ union acpi_operand_object *next;
+
+ /* Delete any module-level code blocks */
+
+ next = acpi_gbl_module_code_list;
+ while (next) {
+ prev = next;
+ next = next->method.mutex;
+ prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */
+ acpi_ut_remove_reference(prev);
+ }
+ }
+#endif
+
/*
* Free the entire namespace -- all nodes and all objects
* attached to the nodes
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 9ff643b9553f..4b4d2f43d406 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -172,11 +172,15 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
return (status);
}
- if (name_type == ACPI_FULL_PATHNAME) {
+ if (name_type == ACPI_FULL_PATHNAME ||
+ name_type == ACPI_FULL_PATHNAME_NO_TRAILING) {
/* Get the full pathname (From the namespace root) */
- status = acpi_ns_handle_to_pathname(handle, buffer);
+ status = acpi_ns_handle_to_pathname(handle, buffer,
+ name_type ==
+ ACPI_FULL_PATHNAME ? FALSE :
+ TRUE);
return (status);
}
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 6d038770577b..29d8b7b01dca 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -287,7 +287,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
"Control Method - %p Desc %p Path=%p\n", node,
method_desc, path));
- name_op = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
+ name_op = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP, start);
if (!name_op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -484,7 +484,7 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
*parser_state)
{
- u32 aml_offset;
+ u8 *aml;
union acpi_parse_object *field;
union acpi_parse_object *arg = NULL;
u16 opcode;
@@ -498,8 +498,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
ACPI_FUNCTION_TRACE(ps_get_next_field);
- aml_offset =
- (u32)ACPI_PTR_DIFF(parser_state->aml, parser_state->aml_start);
+ aml = parser_state->aml;
/* Determine field type */
@@ -536,13 +535,11 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
/* Allocate a new field op */
- field = acpi_ps_alloc_op(opcode);
+ field = acpi_ps_alloc_op(opcode, aml);
if (!field) {
return_PTR(NULL);
}
- field->common.aml_offset = aml_offset;
-
/* Decode the field type */
switch (opcode) {
@@ -604,6 +601,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
* Argument for Connection operator can be either a Buffer
* (resource descriptor), or a name_string.
*/
+ aml = parser_state->aml;
if (ACPI_GET8(parser_state->aml) == AML_BUFFER_OP) {
parser_state->aml++;
@@ -616,7 +614,8 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
/* Non-empty list */
- arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
+ arg =
+ acpi_ps_alloc_op(AML_INT_BYTELIST_OP, aml);
if (!arg) {
acpi_ps_free_op(field);
return_PTR(NULL);
@@ -665,7 +664,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
parser_state->aml = pkg_end;
} else {
- arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
+ arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP, aml);
if (!arg) {
acpi_ps_free_op(field);
return_PTR(NULL);
@@ -730,7 +729,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
/* Constants, strings, and namestrings are all the same size */
- arg = acpi_ps_alloc_op(AML_BYTE_OP);
+ arg = acpi_ps_alloc_op(AML_BYTE_OP, parser_state->aml);
if (!arg) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -777,7 +776,8 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
/* Non-empty list */
- arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
+ arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP,
+ parser_state->aml);
if (!arg) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -807,7 +807,9 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
/* null_name or name_string */
- arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
+ arg =
+ acpi_ps_alloc_op(AML_INT_NAMEPATH_OP,
+ parser_state->aml);
if (!arg) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 90437227d790..03ac8c9a67ab 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -51,6 +51,7 @@
#include <acpi/acpi.h>
#include "accommon.h"
+#include "acinterp.h"
#include "acparser.h"
#include "acdispat.h"
#include "amlcode.h"
@@ -125,10 +126,7 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
*/
while (GET_CURRENT_ARG_TYPE(walk_state->arg_types)
&& !walk_state->arg_count) {
- walk_state->aml_offset =
- (u32) ACPI_PTR_DIFF(walk_state->parser_state.aml,
- walk_state->parser_state.
- aml_start);
+ walk_state->aml = walk_state->parser_state.aml;
status =
acpi_ps_get_next_arg(walk_state,
@@ -140,7 +138,6 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
}
if (arg) {
- arg->common.aml_offset = walk_state->aml_offset;
acpi_ps_append_arg(op, arg);
}
@@ -324,6 +321,8 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op,
union acpi_operand_object *method_obj;
struct acpi_namespace_node *parent_node;
+ ACPI_FUNCTION_TRACE(ps_link_module_code);
+
/* Get the tail of the list */
prev = next = acpi_gbl_module_code_list;
@@ -343,9 +342,13 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op,
method_obj = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
if (!method_obj) {
- return;
+ return_VOID;
}
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Create/Link new code block: %p\n",
+ method_obj));
+
if (parent_op->common.node) {
parent_node = parent_op->common.node;
} else {
@@ -370,8 +373,14 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op,
prev->method.mutex = method_obj;
}
} else {
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Appending to existing code block: %p\n",
+ prev));
+
prev->method.aml_length += aml_length;
}
+
+ return_VOID;
}
/*******************************************************************************
@@ -494,16 +503,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
continue;
}
- op->common.aml_offset = walk_state->aml_offset;
-
- if (walk_state->op_info) {
- ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "Opcode %4.4X [%s] Op %p Aml %p AmlOffset %5.5X\n",
- (u32) op->common.aml_opcode,
- walk_state->op_info->name, op,
- parser_state->aml,
- op->common.aml_offset));
- }
+ acpi_ex_start_trace_opcode(op, walk_state);
}
/*
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 2f5ddd806c58..e54bc2aa7a88 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -66,12 +66,11 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state);
static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
{
+ u32 aml_offset;
ACPI_FUNCTION_TRACE_PTR(ps_get_aml_opcode, walk_state);
- walk_state->aml_offset =
- (u32)ACPI_PTR_DIFF(walk_state->parser_state.aml,
- walk_state->parser_state.aml_start);
+ walk_state->aml = walk_state->parser_state.aml;
walk_state->opcode = acpi_ps_peek_opcode(&(walk_state->parser_state));
/*
@@ -98,10 +97,14 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
/* The opcode is unrecognized. Complain and skip unknown opcodes */
if (walk_state->pass_number == 2) {
+ aml_offset = (u32)ACPI_PTR_DIFF(walk_state->aml,
+ walk_state->
+ parser_state.aml_start);
+
ACPI_ERROR((AE_INFO,
"Unknown opcode 0x%.2X at table offset 0x%.4X, ignoring",
walk_state->opcode,
- (u32)(walk_state->aml_offset +
+ (u32)(aml_offset +
sizeof(struct acpi_table_header))));
ACPI_DUMP_BUFFER((walk_state->parser_state.aml - 16),
@@ -115,14 +118,14 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
acpi_os_printf
("/*\nError: Unknown opcode 0x%.2X at table offset 0x%.4X, context:\n",
walk_state->opcode,
- (u32)(walk_state->aml_offset +
+ (u32)(aml_offset +
sizeof(struct acpi_table_header)));
/* Dump the context surrounding the invalid opcode */
acpi_ut_dump_buffer(((u8 *)walk_state->parser_state.
aml - 16), 48, DB_BYTE_DISPLAY,
- (walk_state->aml_offset +
+ (aml_offset +
sizeof(struct acpi_table_header) -
16));
acpi_os_printf(" */\n");
@@ -294,7 +297,7 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
/* Create Op structure and append to parent's argument list */
walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
- op = acpi_ps_alloc_op(walk_state->opcode);
+ op = acpi_ps_alloc_op(walk_state->opcode, aml_op_start);
if (!op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index a555f7f7b9a2..98001d7f6f80 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -147,6 +147,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
return_ACPI_STATUS(AE_OK); /* OK for now */
}
+ acpi_ex_stop_trace_opcode(op, walk_state);
+
/* Delete this op and the subtree below it if asked to */
if (((walk_state->parse_flags & ACPI_PARSE_TREE_MASK) !=
@@ -185,7 +187,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
* op must be replaced by a placeholder return op
*/
replacement_op =
- acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
+ acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP,
+ op->common.aml);
if (!replacement_op) {
status = AE_NO_MEMORY;
}
@@ -209,7 +212,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
|| (op->common.parent->common.aml_opcode ==
AML_VAR_PACKAGE_OP)) {
replacement_op =
- acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
+ acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP,
+ op->common.aml);
if (!replacement_op) {
status = AE_NO_MEMORY;
}
@@ -224,7 +228,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
AML_VAR_PACKAGE_OP)) {
replacement_op =
acpi_ps_alloc_op(op->common.
- aml_opcode);
+ aml_opcode,
+ op->common.aml);
if (!replacement_op) {
status = AE_NO_MEMORY;
} else {
@@ -240,7 +245,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
default:
replacement_op =
- acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
+ acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP,
+ op->common.aml);
if (!replacement_op) {
status = AE_NO_MEMORY;
}
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 32440912023a..183cc1efbc51 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -60,11 +60,11 @@ ACPI_MODULE_NAME("psutils")
* DESCRIPTION: Create a Scope and associated namepath op with the root name
*
******************************************************************************/
-union acpi_parse_object *acpi_ps_create_scope_op(void)
+union acpi_parse_object *acpi_ps_create_scope_op(u8 *aml)
{
union acpi_parse_object *scope_op;
- scope_op = acpi_ps_alloc_op(AML_SCOPE_OP);
+ scope_op = acpi_ps_alloc_op(AML_SCOPE_OP, aml);
if (!scope_op) {
return (NULL);
}
@@ -103,6 +103,7 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
* FUNCTION: acpi_ps_alloc_op
*
* PARAMETERS: opcode - Opcode that will be stored in the new Op
+ * aml - Address of the opcode
*
* RETURN: Pointer to the new Op, null on failure
*
@@ -112,7 +113,7 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
*
******************************************************************************/
-union acpi_parse_object *acpi_ps_alloc_op(u16 opcode)
+union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml)
{
union acpi_parse_object *op;
const struct acpi_opcode_info *op_info;
@@ -149,6 +150,7 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode)
if (op) {
acpi_ps_init_op(op, opcode);
+ op->common.aml = aml;
op->common.flags = flags;
}
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 841a5ea06094..4254805dd319 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -47,15 +47,12 @@
#include "acdispat.h"
#include "acinterp.h"
#include "actables.h"
+#include "acnamesp.h"
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME("psxface")
/* Local Prototypes */
-static void acpi_ps_start_trace(struct acpi_evaluate_info *info);
-
-static void acpi_ps_stop_trace(struct acpi_evaluate_info *info);
-
static void
acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
@@ -76,7 +73,7 @@ acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
******************************************************************************/
acpi_status
-acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags)
+acpi_debug_trace(const char *name, u32 debug_level, u32 debug_layer, u32 flags)
{
acpi_status status;
@@ -85,108 +82,14 @@ acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags)
return (status);
}
- /* TBDs: Validate name, allow full path or just nameseg */
-
- acpi_gbl_trace_method_name = *ACPI_CAST_PTR(u32, name);
+ acpi_gbl_trace_method_name = name;
acpi_gbl_trace_flags = flags;
-
- if (debug_level) {
- acpi_gbl_trace_dbg_level = debug_level;
- }
- if (debug_layer) {
- acpi_gbl_trace_dbg_layer = debug_layer;
- }
+ acpi_gbl_trace_dbg_level = debug_level;
+ acpi_gbl_trace_dbg_layer = debug_layer;
+ status = AE_OK;
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ps_start_trace
- *
- * PARAMETERS: info - Method info struct
- *
- * RETURN: None
- *
- * DESCRIPTION: Start control method execution trace
- *
- ******************************************************************************/
-
-static void acpi_ps_start_trace(struct acpi_evaluate_info *info)
-{
- acpi_status status;
-
- ACPI_FUNCTION_ENTRY();
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return;
- }
-
- if ((!acpi_gbl_trace_method_name) ||
- (acpi_gbl_trace_method_name != info->node->name.integer)) {
- goto exit;
- }
-
- acpi_gbl_original_dbg_level = acpi_dbg_level;
- acpi_gbl_original_dbg_layer = acpi_dbg_layer;
-
- acpi_dbg_level = 0x00FFFFFF;
- acpi_dbg_layer = ACPI_UINT32_MAX;
-
- if (acpi_gbl_trace_dbg_level) {
- acpi_dbg_level = acpi_gbl_trace_dbg_level;
- }
- if (acpi_gbl_trace_dbg_layer) {
- acpi_dbg_layer = acpi_gbl_trace_dbg_layer;
- }
-
-exit:
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ps_stop_trace
- *
- * PARAMETERS: info - Method info struct
- *
- * RETURN: None
- *
- * DESCRIPTION: Stop control method execution trace
- *
- ******************************************************************************/
-
-static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
-{
- acpi_status status;
-
- ACPI_FUNCTION_ENTRY();
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return;
- }
-
- if ((!acpi_gbl_trace_method_name) ||
- (acpi_gbl_trace_method_name != info->node->name.integer)) {
- goto exit;
- }
-
- /* Disable further tracing if type is one-shot */
-
- if (acpi_gbl_trace_flags & 1) {
- acpi_gbl_trace_method_name = 0;
- acpi_gbl_trace_dbg_level = 0;
- acpi_gbl_trace_dbg_layer = 0;
- }
-
- acpi_dbg_level = acpi_gbl_original_dbg_level;
- acpi_dbg_layer = acpi_gbl_original_dbg_layer;
-
-exit:
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return (status);
}
/*******************************************************************************
@@ -212,7 +115,7 @@ exit:
*
******************************************************************************/
-acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
+acpi_status acpi_ps_execute_method(struct acpi_evaluate_info * info)
{
acpi_status status;
union acpi_parse_object *op;
@@ -243,10 +146,6 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
*/
acpi_ps_update_parameter_list(info, REF_INCREMENT);
- /* Begin tracing if requested */
-
- acpi_ps_start_trace(info);
-
/*
* Execute the method. Performs parse simultaneously
*/
@@ -256,7 +155,7 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
/* Create and init a Root Node */
- op = acpi_ps_create_scope_op();
+ op = acpi_ps_create_scope_op(info->obj_desc->method.aml_start);
if (!op) {
status = AE_NO_MEMORY;
goto cleanup;
@@ -326,10 +225,6 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
cleanup:
acpi_ps_delete_parse_tree(op);
- /* End optional tracing */
-
- acpi_ps_stop_trace(info);
-
/* Take away the extra reference that we gave the parameters above */
acpi_ps_update_parameter_list(info, REF_DECREMENT);
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 3fa829e96c2a..a5344428f3ae 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -348,7 +348,8 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
status =
acpi_ns_handle_to_pathname((acpi_handle)
node,
- &path_buffer);
+ &path_buffer,
+ FALSE);
/* +1 to include null terminator */
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 6253001b6375..455a0700db39 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -345,7 +345,7 @@ void acpi_tb_parse_fadt(u32 table_index)
/* Obtain the DSDT and FACS tables via their addresses within the FADT */
acpi_tb_install_fixed_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
- ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT);
+ ACPI_SIG_DSDT, &acpi_gbl_dsdt_index);
/* If Hardware Reduced flag is set, there is no FACS */
@@ -354,13 +354,13 @@ void acpi_tb_parse_fadt(u32 table_index)
acpi_tb_install_fixed_table((acpi_physical_address)
acpi_gbl_FADT.facs,
ACPI_SIG_FACS,
- ACPI_TABLE_INDEX_FACS);
+ &acpi_gbl_facs_index);
}
if (acpi_gbl_FADT.Xfacs) {
acpi_tb_install_fixed_table((acpi_physical_address)
acpi_gbl_FADT.Xfacs,
ACPI_SIG_FACS,
- ACPI_TABLE_INDEX_X_FACS);
+ &acpi_gbl_xfacs_index);
}
}
}
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 119c84ad9833..405529d49a1a 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -68,12 +68,25 @@ acpi_status
acpi_tb_find_table(char *signature,
char *oem_id, char *oem_table_id, u32 *table_index)
{
- u32 i;
acpi_status status;
struct acpi_table_header header;
+ u32 i;
ACPI_FUNCTION_TRACE(tb_find_table);
+ /* Validate the input table signature */
+
+ if (!acpi_is_valid_signature(signature)) {
+ return_ACPI_STATUS(AE_BAD_SIGNATURE);
+ }
+
+ /* Don't allow the OEM strings to be too long */
+
+ if ((strlen(oem_id) > ACPI_OEM_ID_SIZE) ||
+ (strlen(oem_table_id) > ACPI_OEM_TABLE_ID_SIZE)) {
+ return_ACPI_STATUS(AE_AML_STRING_LIMIT);
+ }
+
/* Normalize the input strings */
memset(&header, 0, sizeof(struct acpi_table_header));
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 15ea98e0068d..6319b42420c6 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -100,9 +100,9 @@ acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
*
* FUNCTION: acpi_tb_install_table_with_override
*
- * PARAMETERS: table_index - Index into root table array
- * new_table_desc - New table descriptor to install
+ * PARAMETERS: new_table_desc - New table descriptor to install
* override - Whether override should be performed
+ * table_index - Where the table index is returned
*
* RETURN: None
*
@@ -114,12 +114,14 @@ acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
******************************************************************************/
void
-acpi_tb_install_table_with_override(u32 table_index,
- struct acpi_table_desc *new_table_desc,
- u8 override)
+acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
+ u8 override, u32 *table_index)
{
+ u32 i;
+ acpi_status status;
- if (table_index >= acpi_gbl_root_table_list.current_table_count) {
+ status = acpi_tb_get_next_table_descriptor(&i, NULL);
+ if (ACPI_FAILURE(status)) {
return;
}
@@ -134,8 +136,7 @@ acpi_tb_install_table_with_override(u32 table_index,
acpi_tb_override_table(new_table_desc);
}
- acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list.
- tables[table_index],
+ acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list.tables[i],
new_table_desc->address,
new_table_desc->flags,
new_table_desc->pointer);
@@ -143,9 +144,13 @@ acpi_tb_install_table_with_override(u32 table_index,
acpi_tb_print_table_header(new_table_desc->address,
new_table_desc->pointer);
+ /* This synchronizes acpi_gbl_dsdt_index */
+
+ *table_index = i;
+
/* Set the global integer width (based upon revision of the DSDT) */
- if (table_index == ACPI_TABLE_INDEX_DSDT) {
+ if (i == acpi_gbl_dsdt_index) {
acpi_ut_set_integer_width(new_table_desc->pointer->revision);
}
}
@@ -157,7 +162,7 @@ acpi_tb_install_table_with_override(u32 table_index,
* PARAMETERS: address - Physical address of DSDT or FACS
* signature - Table signature, NULL if no need to
* match
- * table_index - Index into root table array
+ * table_index - Where the table index is returned
*
* RETURN: Status
*
@@ -168,7 +173,7 @@ acpi_tb_install_table_with_override(u32 table_index,
acpi_status
acpi_tb_install_fixed_table(acpi_physical_address address,
- char *signature, u32 table_index)
+ char *signature, u32 *table_index)
{
struct acpi_table_desc new_table_desc;
acpi_status status;
@@ -200,7 +205,9 @@ acpi_tb_install_fixed_table(acpi_physical_address address,
goto release_and_exit;
}
- acpi_tb_install_table_with_override(table_index, &new_table_desc, TRUE);
+ /* Add the table to the global root table list */
+
+ acpi_tb_install_table_with_override(&new_table_desc, TRUE, table_index);
release_and_exit:
@@ -355,13 +362,8 @@ acpi_tb_install_standard_table(acpi_physical_address address,
/* Add the table to the global root table list */
- status = acpi_tb_get_next_table_descriptor(&i, NULL);
- if (ACPI_FAILURE(status)) {
- goto release_and_exit;
- }
-
- *table_index = i;
- acpi_tb_install_table_with_override(i, &new_table_desc, override);
+ acpi_tb_install_table_with_override(&new_table_desc, override,
+ table_index);
release_and_exit:
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 568ac0e4a3c6..4337990127cc 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -68,28 +68,27 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size);
acpi_status acpi_tb_initialize_facs(void)
{
+ struct acpi_table_facs *facs;
/* If Hardware Reduced flag is set, there is no FACS */
if (acpi_gbl_reduced_hardware) {
acpi_gbl_FACS = NULL;
return (AE_OK);
- }
-
- (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
- ACPI_CAST_INDIRECT_PTR(struct
- acpi_table_header,
- &acpi_gbl_facs32));
- (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_X_FACS,
- ACPI_CAST_INDIRECT_PTR(struct
- acpi_table_header,
- &acpi_gbl_facs64));
-
- if (acpi_gbl_facs64
- && (!acpi_gbl_facs32 || !acpi_gbl_use32_bit_facs_addresses)) {
- acpi_gbl_FACS = acpi_gbl_facs64;
- } else if (acpi_gbl_facs32) {
- acpi_gbl_FACS = acpi_gbl_facs32;
+ } else if (acpi_gbl_FADT.Xfacs &&
+ (!acpi_gbl_FADT.facs
+ || !acpi_gbl_use32_bit_facs_addresses)) {
+ (void)acpi_get_table_by_index(acpi_gbl_xfacs_index,
+ ACPI_CAST_INDIRECT_PTR(struct
+ acpi_table_header,
+ &facs));
+ acpi_gbl_FACS = facs;
+ } else if (acpi_gbl_FADT.facs) {
+ (void)acpi_get_table_by_index(acpi_gbl_facs_index,
+ ACPI_CAST_INDIRECT_PTR(struct
+ acpi_table_header,
+ &facs));
+ acpi_gbl_FACS = facs;
}
/* If there is no FACS, just continue. There was already an error msg */
@@ -192,7 +191,7 @@ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
acpi_tb_uninstall_table(table_desc);
acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list.
- tables[ACPI_TABLE_INDEX_DSDT],
+ tables[acpi_gbl_dsdt_index],
ACPI_PTR_TO_PHYSADDR(new_table),
ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL,
new_table);
@@ -369,13 +368,6 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
table_entry_size);
table_entry = ACPI_ADD_PTR(u8, table, sizeof(struct acpi_table_header));
- /*
- * First three entries in the table array are reserved for the DSDT
- * and 32bit/64bit FACS, which are not actually present in the
- * RSDT/XSDT - they come from the FADT
- */
- acpi_gbl_root_table_list.current_table_count = 3;
-
/* Initialize the root table array from the RSDT/XSDT */
for (i = 0; i < table_count; i++) {
@@ -412,3 +404,36 @@ next_table:
return_ACPI_STATUS(AE_OK);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_is_valid_signature
+ *
+ * PARAMETERS: signature - Sig string to be validated
+ *
+ * RETURN: TRUE if signature is correct length and has valid characters
+ *
+ * DESCRIPTION: Validate an ACPI table signature.
+ *
+ ******************************************************************************/
+
+u8 acpi_is_valid_signature(char *signature)
+{
+ u32 i;
+
+ /* Validate the signature length */
+
+ if (strlen(signature) != ACPI_NAME_SIZE) {
+ return (FALSE);
+ }
+
+ /* Validate each character in the signature */
+
+ for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ if (!acpi_ut_valid_acpi_char(signature[i], i)) {
+ return (FALSE);
+ }
+ }
+
+ return (TRUE);
+}
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 9682d40ca6ff..55ee14ca9418 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -51,9 +51,6 @@
#define _COMPONENT ACPI_TABLES
ACPI_MODULE_NAME("tbxfload")
-/* Local prototypes */
-static acpi_status acpi_tb_load_namespace(void);
-
/*******************************************************************************
*
* FUNCTION: acpi_load_tables
@@ -65,7 +62,6 @@ static acpi_status acpi_tb_load_namespace(void);
* DESCRIPTION: Load the ACPI tables from the RSDT/XSDT
*
******************************************************************************/
-
acpi_status __init acpi_load_tables(void)
{
acpi_status status;
@@ -75,6 +71,13 @@ acpi_status __init acpi_load_tables(void)
/* Load the namespace from the tables */
status = acpi_tb_load_namespace();
+
+ /* Don't let single failures abort the load */
+
+ if (status == AE_CTRL_TERMINATE) {
+ status = AE_OK;
+ }
+
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"While loading namespace from ACPI tables"));
@@ -97,11 +100,14 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_load_tables)
* the RSDT/XSDT.
*
******************************************************************************/
-static acpi_status acpi_tb_load_namespace(void)
+acpi_status acpi_tb_load_namespace(void)
{
acpi_status status;
u32 i;
struct acpi_table_header *new_dsdt;
+ struct acpi_table_desc *table;
+ u32 tables_loaded = 0;
+ u32 tables_failed = 0;
ACPI_FUNCTION_TRACE(tb_load_namespace);
@@ -111,15 +117,11 @@ static acpi_status acpi_tb_load_namespace(void)
* Load the namespace. The DSDT is required, but any SSDT and
* PSDT tables are optional. Verify the DSDT.
*/
+ table = &acpi_gbl_root_table_list.tables[acpi_gbl_dsdt_index];
+
if (!acpi_gbl_root_table_list.current_table_count ||
- !ACPI_COMPARE_NAME(&
- (acpi_gbl_root_table_list.
- tables[ACPI_TABLE_INDEX_DSDT].signature),
- ACPI_SIG_DSDT)
- ||
- ACPI_FAILURE(acpi_tb_validate_table
- (&acpi_gbl_root_table_list.
- tables[ACPI_TABLE_INDEX_DSDT]))) {
+ !ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_DSDT) ||
+ ACPI_FAILURE(acpi_tb_validate_table(table))) {
status = AE_NO_ACPI_TABLES;
goto unlock_and_exit;
}
@@ -130,8 +132,7 @@ static acpi_status acpi_tb_load_namespace(void)
* array can change dynamically as tables are loaded at run-time. Note:
* .Pointer field is not validated until after call to acpi_tb_validate_table.
*/
- acpi_gbl_DSDT =
- acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer;
+ acpi_gbl_DSDT = table->pointer;
/*
* Optionally copy the entire DSDT to local memory (instead of simply
@@ -140,7 +141,7 @@ static acpi_status acpi_tb_load_namespace(void)
* the DSDT.
*/
if (acpi_gbl_copy_dsdt_locally) {
- new_dsdt = acpi_tb_copy_dsdt(ACPI_TABLE_INDEX_DSDT);
+ new_dsdt = acpi_tb_copy_dsdt(acpi_gbl_dsdt_index);
if (new_dsdt) {
acpi_gbl_DSDT = new_dsdt;
}
@@ -157,41 +158,65 @@ static acpi_status acpi_tb_load_namespace(void)
/* Load and parse tables */
- status = acpi_ns_load_table(ACPI_TABLE_INDEX_DSDT, acpi_gbl_root_node);
+ status = acpi_ns_load_table(acpi_gbl_dsdt_index, acpi_gbl_root_node);
if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ ACPI_EXCEPTION((AE_INFO, status, "[DSDT] table load failed"));
+ tables_failed++;
+ } else {
+ tables_loaded++;
}
/* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
+ table = &acpi_gbl_root_table_list.tables[i];
+
if (!acpi_gbl_root_table_list.tables[i].address ||
- (!ACPI_COMPARE_NAME
- (&(acpi_gbl_root_table_list.tables[i].signature),
- ACPI_SIG_SSDT)
- &&
- !ACPI_COMPARE_NAME(&
- (acpi_gbl_root_table_list.tables[i].
- signature), ACPI_SIG_PSDT)
- &&
- !ACPI_COMPARE_NAME(&
- (acpi_gbl_root_table_list.tables[i].
- signature), ACPI_SIG_OSDT))
- ||
- ACPI_FAILURE(acpi_tb_validate_table
- (&acpi_gbl_root_table_list.tables[i]))) {
+ (!ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_SSDT)
+ && !ACPI_COMPARE_NAME(table->signature.ascii,
+ ACPI_SIG_PSDT)
+ && !ACPI_COMPARE_NAME(table->signature.ascii,
+ ACPI_SIG_OSDT))
+ || ACPI_FAILURE(acpi_tb_validate_table(table))) {
continue;
}
/* Ignore errors while loading tables, get as many as possible */
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- (void)acpi_ns_load_table(i, acpi_gbl_root_node);
+ status = acpi_ns_load_table(i, acpi_gbl_root_node);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "(%4.4s:%8.8s) while loading table",
+ table->signature.ascii,
+ table->pointer->oem_table_id));
+ tables_failed++;
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+ "Table [%4.4s:%8.8s] (id FF) - Table namespace load failed\n\n",
+ table->signature.ascii,
+ table->pointer->oem_table_id));
+ } else {
+ tables_loaded++;
+ }
+
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
}
- ACPI_INFO((AE_INFO, "All ACPI Tables successfully acquired"));
+ if (!tables_failed) {
+ ACPI_INFO((AE_INFO,
+ "%u ACPI AML tables successfully acquired and loaded",
+ tables_loaded));
+ } else {
+ ACPI_ERROR((AE_INFO,
+ "%u table load failures, %u successful",
+ tables_failed, tables_loaded));
+
+ /* Indicate at least one failure */
+
+ status = AE_CTRL_TERMINATE;
+ }
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index cd02693841db..4146229103c8 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -45,6 +45,7 @@
#include <acpi/acpi.h>
#include "accommon.h"
+#include "acinterp.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utdebug")
@@ -560,8 +561,37 @@ acpi_ut_ptr_exit(u32 line_number,
}
}
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_trace_point
+ *
+ * PARAMETERS: type - Trace event type
+ * begin - TRUE if before execution
+ * aml - Executed AML address
+ * pathname - Object path
+ * pointer - Pointer to the related object
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Interpreter execution trace.
+ *
+ ******************************************************************************/
+
+void
+acpi_trace_point(acpi_trace_event_type type, u8 begin, u8 *aml, char *pathname)
+{
+
+ ACPI_FUNCTION_ENTRY();
+
+ acpi_ex_trace_point(type, begin, aml, pathname);
+
+#ifdef ACPI_USE_SYSTEM_TRACER
+ acpi_os_trace_point(type, begin, aml, pathname);
#endif
+}
+ACPI_EXPORT_SYMBOL(acpi_trace_point)
+#endif
#ifdef ACPI_APPLICATION
/*******************************************************************************
*
@@ -575,7 +605,6 @@ acpi_ut_ptr_exit(u32 line_number,
* DESCRIPTION: Print error message to the console, used by applications.
*
******************************************************************************/
-
void ACPI_INTERNAL_VAR_XFACE acpi_log_error(const char *format, ...)
{
va_list args;
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 71fce389fd48..1638312e3d8f 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -209,6 +209,9 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
acpi_ut_delete_object_desc(object->method.mutex);
object->method.mutex = NULL;
}
+ if (object->method.node) {
+ object->method.node = NULL;
+ }
break;
case ACPI_TYPE_REGION:
diff --git a/drivers/acpi/acpica/utfileio.c b/drivers/acpi/acpica/utfileio.c
index 857af824337b..75a94f52b4be 100644
--- a/drivers/acpi/acpica/utfileio.c
+++ b/drivers/acpi/acpica/utfileio.c
@@ -312,7 +312,7 @@ acpi_ut_read_table_from_file(char *filename, struct acpi_table_header ** table)
/* Get the entire file */
fprintf(stderr,
- "Reading ACPI table from file %10s - Length %.8u (0x%06X)\n",
+ "Reading ACPI table from file %12s - Length %.8u (0x%06X)\n",
filename, file_size, file_size);
status = acpi_ut_read_table(file, table, &table_length);
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index e402e07b4846..28ab3a1d5ec1 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -204,11 +204,10 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_acpi_hardware_present = TRUE;
acpi_gbl_last_owner_id_index = 0;
acpi_gbl_next_owner_id_offset = 0;
- acpi_gbl_trace_dbg_level = 0;
- acpi_gbl_trace_dbg_layer = 0;
acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
acpi_gbl_osi_mutex = NULL;
acpi_gbl_reg_methods_executed = FALSE;
+ acpi_gbl_max_loop_iterations = 0xFFFF;
/* Hardware oriented */
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 71b66537f826..bd4443bdcbad 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -75,7 +75,7 @@ u8 acpi_ut_is_pci_root_bridge(char *id)
return (FALSE);
}
-#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP)
+#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP || defined ACPI_NAMES_APP)
/*******************************************************************************
*
* FUNCTION: acpi_ut_is_aml_table
@@ -376,7 +376,7 @@ acpi_ut_display_init_pathname(u8 type,
/* Get the full pathname to the node */
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
- status = acpi_ns_handle_to_pathname(obj_handle, &buffer);
+ status = acpi_ns_handle_to_pathname(obj_handle, &buffer, TRUE);
if (ACPI_FAILURE(status)) {
return;
}
diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c
new file mode 100644
index 000000000000..1d5f6b17b766
--- /dev/null
+++ b/drivers/acpi/acpica/utnonansi.c
@@ -0,0 +1,380 @@
+/*******************************************************************************
+ *
+ * Module Name: utnonansi - Non-ansi C library functions
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2015, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utnonansi")
+
+/*
+ * Non-ANSI C library functions - strlwr, strupr, stricmp, and a 64-bit
+ * version of strtoul.
+ */
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strlwr (strlwr)
+ *
+ * PARAMETERS: src_string - The source string to convert
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Convert a string to lowercase
+ *
+ ******************************************************************************/
+void acpi_ut_strlwr(char *src_string)
+{
+ char *string;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!src_string) {
+ return;
+ }
+
+ /* Walk entire string, lowercasing the letters */
+
+ for (string = src_string; *string; string++) {
+ *string = (char)tolower((int)*string);
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strupr (strupr)
+ *
+ * PARAMETERS: src_string - The source string to convert
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Convert a string to uppercase
+ *
+ ******************************************************************************/
+
+void acpi_ut_strupr(char *src_string)
+{
+ char *string;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!src_string) {
+ return;
+ }
+
+ /* Walk entire string, uppercasing the letters */
+
+ for (string = src_string; *string; string++) {
+ *string = (char)toupper((int)*string);
+ }
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ut_stricmp (stricmp)
+ *
+ * PARAMETERS: string1 - first string to compare
+ * string2 - second string to compare
+ *
+ * RETURN: int that signifies string relationship. Zero means strings
+ * are equal.
+ *
+ * DESCRIPTION: Case-insensitive string compare. Implementation of the
+ * non-ANSI stricmp function.
+ *
+ ******************************************************************************/
+
+int acpi_ut_stricmp(char *string1, char *string2)
+{
+ int c1;
+ int c2;
+
+ do {
+ c1 = tolower((int)*string1);
+ c2 = tolower((int)*string2);
+
+ string1++;
+ string2++;
+ }
+ while ((c1 == c2) && (c1));
+
+ return (c1 - c2);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strtoul64
+ *
+ * PARAMETERS: string - Null terminated string
+ * base - Radix of the string: 16 or ACPI_ANY_BASE;
+ * ACPI_ANY_BASE means 'in behalf of to_integer'
+ * ret_integer - Where the converted integer is returned
+ *
+ * RETURN: Status and Converted value
+ *
+ * DESCRIPTION: Convert a string into an unsigned value. Performs either a
+ * 32-bit or 64-bit conversion, depending on the current mode
+ * of the interpreter.
+ *
+ * NOTE: Does not support Octal strings, not needed.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
+{
+ u32 this_digit = 0;
+ u64 return_value = 0;
+ u64 quotient;
+ u64 dividend;
+ u32 to_integer_op = (base == ACPI_ANY_BASE);
+ u32 mode32 = (acpi_gbl_integer_byte_width == 4);
+ u8 valid_digits = 0;
+ u8 sign_of0x = 0;
+ u8 term = 0;
+
+ ACPI_FUNCTION_TRACE_STR(ut_stroul64, string);
+
+ switch (base) {
+ case ACPI_ANY_BASE:
+ case 16:
+
+ break;
+
+ default:
+
+ /* Invalid Base */
+
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ if (!string) {
+ goto error_exit;
+ }
+
+ /* Skip over any white space in the buffer */
+
+ while ((*string) && (isspace((int)*string) || *string == '\t')) {
+ string++;
+ }
+
+ if (to_integer_op) {
+ /*
+ * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
+ * We need to determine if it is decimal or hexadecimal.
+ */
+ if ((*string == '0') && (tolower((int)*(string + 1)) == 'x')) {
+ sign_of0x = 1;
+ base = 16;
+
+ /* Skip over the leading '0x' */
+ string += 2;
+ } else {
+ base = 10;
+ }
+ }
+
+ /* Any string left? Check that '0x' is not followed by white space. */
+
+ if (!(*string) || isspace((int)*string) || *string == '\t') {
+ if (to_integer_op) {
+ goto error_exit;
+ } else {
+ goto all_done;
+ }
+ }
+
+ /*
+ * Perform a 32-bit or 64-bit conversion, depending upon the current
+ * execution mode of the interpreter
+ */
+ dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
+
+ /* Main loop: convert the string to a 32- or 64-bit integer */
+
+ while (*string) {
+ if (isdigit((int)*string)) {
+
+ /* Convert ASCII 0-9 to Decimal value */
+
+ this_digit = ((u8)*string) - '0';
+ } else if (base == 10) {
+
+ /* Digit is out of range; possible in to_integer case only */
+
+ term = 1;
+ } else {
+ this_digit = (u8)toupper((int)*string);
+ if (isxdigit((int)this_digit)) {
+
+ /* Convert ASCII Hex char to value */
+
+ this_digit = this_digit - 'A' + 10;
+ } else {
+ term = 1;
+ }
+ }
+
+ if (term) {
+ if (to_integer_op) {
+ goto error_exit;
+ } else {
+ break;
+ }
+ } else if ((valid_digits == 0) && (this_digit == 0)
+ && !sign_of0x) {
+
+ /* Skip zeros */
+ string++;
+ continue;
+ }
+
+ valid_digits++;
+
+ if (sign_of0x
+ && ((valid_digits > 16)
+ || ((valid_digits > 8) && mode32))) {
+ /*
+ * This is to_integer operation case.
+ * No any restrictions for string-to-integer conversion,
+ * see ACPI spec.
+ */
+ goto error_exit;
+ }
+
+ /* Divide the digit into the correct position */
+
+ (void)acpi_ut_short_divide((dividend - (u64)this_digit),
+ base, &quotient, NULL);
+
+ if (return_value > quotient) {
+ if (to_integer_op) {
+ goto error_exit;
+ } else {
+ break;
+ }
+ }
+
+ return_value *= base;
+ return_value += this_digit;
+ string++;
+ }
+
+ /* All done, normal exit */
+
+all_done:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
+ ACPI_FORMAT_UINT64(return_value)));
+
+ *ret_integer = return_value;
+ return_ACPI_STATUS(AE_OK);
+
+error_exit:
+ /* Base was set/validated above */
+
+ if (base == 10) {
+ return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT);
+ } else {
+ return_ACPI_STATUS(AE_BAD_HEX_CONSTANT);
+ }
+}
+
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
+ *
+ * PARAMETERS: Adds a "DestSize" parameter to each of the standard string
+ * functions. This is the size of the Destination buffer.
+ *
+ * RETURN: TRUE if the operation would overflow the destination buffer.
+ *
+ * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that
+ * the result of the operation will not overflow the output string
+ * buffer.
+ *
+ * NOTE: These functions are typically only helpful for processing
+ * user input and command lines. For most ACPICA code, the
+ * required buffer length is precisely calculated before buffer
+ * allocation, so the use of these functions is unnecessary.
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source)
+{
+
+ if (strlen(source) >= dest_size) {
+ return (TRUE);
+ }
+
+ strcpy(dest, source);
+ return (FALSE);
+}
+
+u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source)
+{
+
+ if ((strlen(dest) + strlen(source)) >= dest_size) {
+ return (TRUE);
+ }
+
+ strcat(dest, source);
+ return (FALSE);
+}
+
+u8
+acpi_ut_safe_strncat(char *dest,
+ acpi_size dest_size,
+ char *source, acpi_size max_transfer_length)
+{
+ acpi_size actual_transfer_length;
+
+ actual_transfer_length = ACPI_MIN(max_transfer_length, strlen(source));
+
+ if ((strlen(dest) + actual_transfer_length) >= dest_size) {
+ return (TRUE);
+ }
+
+ strncat(dest, source, max_transfer_length);
+ return (FALSE);
+}
+#endif
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index 8f3c883dfe0e..4ddd105d9741 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -48,286 +48,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utstring")
-/*
- * Non-ANSI C library functions - strlwr, strupr, stricmp, and a 64-bit
- * version of strtoul.
- */
-#ifdef ACPI_ASL_COMPILER
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_strlwr (strlwr)
- *
- * PARAMETERS: src_string - The source string to convert
- *
- * RETURN: None
- *
- * DESCRIPTION: Convert string to lowercase
- *
- * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
- *
- ******************************************************************************/
-void acpi_ut_strlwr(char *src_string)
-{
- char *string;
-
- ACPI_FUNCTION_ENTRY();
-
- if (!src_string) {
- return;
- }
-
- /* Walk entire string, lowercasing the letters */
-
- for (string = src_string; *string; string++) {
- *string = (char)tolower((int)*string);
- }
-
- return;
-}
-
-/******************************************************************************
- *
- * FUNCTION: acpi_ut_stricmp (stricmp)
- *
- * PARAMETERS: string1 - first string to compare
- * string2 - second string to compare
- *
- * RETURN: int that signifies string relationship. Zero means strings
- * are equal.
- *
- * DESCRIPTION: Implementation of the non-ANSI stricmp function (compare
- * strings with no case sensitivity)
- *
- ******************************************************************************/
-
-int acpi_ut_stricmp(char *string1, char *string2)
-{
- int c1;
- int c2;
-
- do {
- c1 = tolower((int)*string1);
- c2 = tolower((int)*string2);
-
- string1++;
- string2++;
- }
- while ((c1 == c2) && (c1));
-
- return (c1 - c2);
-}
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_strupr (strupr)
- *
- * PARAMETERS: src_string - The source string to convert
- *
- * RETURN: None
- *
- * DESCRIPTION: Convert string to uppercase
- *
- * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
- *
- ******************************************************************************/
-
-void acpi_ut_strupr(char *src_string)
-{
- char *string;
-
- ACPI_FUNCTION_ENTRY();
-
- if (!src_string) {
- return;
- }
-
- /* Walk entire string, uppercasing the letters */
-
- for (string = src_string; *string; string++) {
- *string = (char)toupper((int)*string);
- }
-
- return;
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_strtoul64
- *
- * PARAMETERS: string - Null terminated string
- * base - Radix of the string: 16 or ACPI_ANY_BASE;
- * ACPI_ANY_BASE means 'in behalf of to_integer'
- * ret_integer - Where the converted integer is returned
- *
- * RETURN: Status and Converted value
- *
- * DESCRIPTION: Convert a string into an unsigned value. Performs either a
- * 32-bit or 64-bit conversion, depending on the current mode
- * of the interpreter.
- * NOTE: Does not support Octal strings, not needed.
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
-{
- u32 this_digit = 0;
- u64 return_value = 0;
- u64 quotient;
- u64 dividend;
- u32 to_integer_op = (base == ACPI_ANY_BASE);
- u32 mode32 = (acpi_gbl_integer_byte_width == 4);
- u8 valid_digits = 0;
- u8 sign_of0x = 0;
- u8 term = 0;
-
- ACPI_FUNCTION_TRACE_STR(ut_stroul64, string);
-
- switch (base) {
- case ACPI_ANY_BASE:
- case 16:
-
- break;
-
- default:
-
- /* Invalid Base */
-
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
- if (!string) {
- goto error_exit;
- }
-
- /* Skip over any white space in the buffer */
-
- while ((*string) && (isspace((int)*string) || *string == '\t')) {
- string++;
- }
-
- if (to_integer_op) {
- /*
- * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
- * We need to determine if it is decimal or hexadecimal.
- */
- if ((*string == '0') && (tolower((int)*(string + 1)) == 'x')) {
- sign_of0x = 1;
- base = 16;
-
- /* Skip over the leading '0x' */
- string += 2;
- } else {
- base = 10;
- }
- }
-
- /* Any string left? Check that '0x' is not followed by white space. */
-
- if (!(*string) || isspace((int)*string) || *string == '\t') {
- if (to_integer_op) {
- goto error_exit;
- } else {
- goto all_done;
- }
- }
-
- /*
- * Perform a 32-bit or 64-bit conversion, depending upon the current
- * execution mode of the interpreter
- */
- dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
-
- /* Main loop: convert the string to a 32- or 64-bit integer */
-
- while (*string) {
- if (isdigit((int)*string)) {
-
- /* Convert ASCII 0-9 to Decimal value */
-
- this_digit = ((u8)*string) - '0';
- } else if (base == 10) {
-
- /* Digit is out of range; possible in to_integer case only */
-
- term = 1;
- } else {
- this_digit = (u8)toupper((int)*string);
- if (isxdigit((int)this_digit)) {
-
- /* Convert ASCII Hex char to value */
-
- this_digit = this_digit - 'A' + 10;
- } else {
- term = 1;
- }
- }
-
- if (term) {
- if (to_integer_op) {
- goto error_exit;
- } else {
- break;
- }
- } else if ((valid_digits == 0) && (this_digit == 0)
- && !sign_of0x) {
-
- /* Skip zeros */
- string++;
- continue;
- }
-
- valid_digits++;
-
- if (sign_of0x
- && ((valid_digits > 16)
- || ((valid_digits > 8) && mode32))) {
- /*
- * This is to_integer operation case.
- * No any restrictions for string-to-integer conversion,
- * see ACPI spec.
- */
- goto error_exit;
- }
-
- /* Divide the digit into the correct position */
-
- (void)acpi_ut_short_divide((dividend - (u64)this_digit),
- base, &quotient, NULL);
-
- if (return_value > quotient) {
- if (to_integer_op) {
- goto error_exit;
- } else {
- break;
- }
- }
-
- return_value *= base;
- return_value += this_digit;
- string++;
- }
-
- /* All done, normal exit */
-
-all_done:
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
- ACPI_FORMAT_UINT64(return_value)));
-
- *ret_integer = return_value;
- return_ACPI_STATUS(AE_OK);
-
-error_exit:
- /* Base was set/validated above */
-
- if (base == 10) {
- return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT);
- } else {
- return_ACPI_STATUS(AE_BAD_HEX_CONSTANT);
- }
-}
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_print_string
@@ -342,7 +62,6 @@ error_exit:
* sequences.
*
******************************************************************************/
-
void acpi_ut_print_string(char *string, u16 max_length)
{
u32 i;
@@ -584,64 +303,3 @@ void ut_convert_backslashes(char *pathname)
}
}
#endif
-
-#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
- *
- * PARAMETERS: Adds a "DestSize" parameter to each of the standard string
- * functions. This is the size of the Destination buffer.
- *
- * RETURN: TRUE if the operation would overflow the destination buffer.
- *
- * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that
- * the result of the operation will not overflow the output string
- * buffer.
- *
- * NOTE: These functions are typically only helpful for processing
- * user input and command lines. For most ACPICA code, the
- * required buffer length is precisely calculated before buffer
- * allocation, so the use of these functions is unnecessary.
- *
- ******************************************************************************/
-
-u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source)
-{
-
- if (strlen(source) >= dest_size) {
- return (TRUE);
- }
-
- strcpy(dest, source);
- return (FALSE);
-}
-
-u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source)
-{
-
- if ((strlen(dest) + strlen(source)) >= dest_size) {
- return (TRUE);
- }
-
- strcat(dest, source);
- return (FALSE);
-}
-
-u8
-acpi_ut_safe_strncat(char *dest,
- acpi_size dest_size,
- char *source, acpi_size max_transfer_length)
-{
- acpi_size actual_transfer_length;
-
- actual_transfer_length = ACPI_MIN(max_transfer_length, strlen(source));
-
- if ((strlen(dest) + actual_transfer_length) >= dest_size) {
- return (TRUE);
- }
-
- strncat(dest, source, max_transfer_length);
- return (FALSE);
-}
-#endif
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 51cf52d52243..4f332815db00 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -92,13 +92,6 @@ acpi_status __init acpi_terminate(void)
acpi_ut_mutex_terminate();
-#ifdef ACPI_DEBUGGER
-
- /* Shut down the debugger */
-
- acpi_db_terminate();
-#endif
-
/* Now we can shutdown the OS-dependent layer */
status = acpi_os_terminate();
@@ -517,7 +510,8 @@ acpi_decode_pld_buffer(u8 *in_buffer,
/* Parameter validation */
- if (!in_buffer || !return_buffer || (length < 16)) {
+ if (!in_buffer || !return_buffer
+ || (length < ACPI_PLD_REV1_BUFFER_SIZE)) {
return (AE_BAD_PARAMETER);
}
@@ -567,7 +561,7 @@ acpi_decode_pld_buffer(u8 *in_buffer,
pld_info->rotation = ACPI_PLD_GET_ROTATION(&dword);
pld_info->order = ACPI_PLD_GET_ORDER(&dword);
- if (length >= ACPI_PLD_BUFFER_SIZE) {
+ if (length >= ACPI_PLD_REV2_BUFFER_SIZE) {
/* Fifth 32-bit DWord (Revision 2 of _PLD) */
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 42a32a66ef22..a7137ec28447 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -124,17 +124,6 @@ acpi_status __init acpi_initialize_subsystem(void)
return_ACPI_STATUS(status);
}
- /* If configured, initialize the AML debugger */
-
-#ifdef ACPI_DEBUGGER
- status = acpi_db_initialize();
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "During Debugger initialization"));
- return_ACPI_STATUS(status);
- }
-#endif
-
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index a85ac07f3da3..a2c8d7adb6eb 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -24,10 +24,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index a095d4f858da..0431883653be 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -18,10 +18,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index 04ab5c9d3ced..6330f557a2c8 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -17,10 +17,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 3670bbab57a3..6682c5daf742 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -18,10 +18,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 2bfd53cbfe80..23981ac1c6c2 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -23,10 +23,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 06e9b411a0a2..20b3fcf4007c 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -21,10 +21,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index b3628cc01a53..b719ab3090bb 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -18,10 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 278dc4be992a..96809cd99ace 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -20,10 +20,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 513e7230e3d0..46506e7687cd 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
@@ -423,6 +419,406 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
acpi_evaluate_ost(handle, type, ost_code, NULL);
}
+static void acpi_device_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct acpi_device *device = data;
+
+ device->driver->ops.notify(device, event);
+}
+
+static void acpi_device_notify_fixed(void *data)
+{
+ struct acpi_device *device = data;
+
+ /* Fixed hardware devices have no handles */
+ acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
+}
+
+static u32 acpi_device_fixed_event(void *data)
+{
+ acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data);
+ return ACPI_INTERRUPT_HANDLED;
+}
+
+static int acpi_device_install_notify_handler(struct acpi_device *device)
+{
+ acpi_status status;
+
+ if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
+ status =
+ acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
+ acpi_device_fixed_event,
+ device);
+ else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
+ status =
+ acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
+ acpi_device_fixed_event,
+ device);
+ else
+ status = acpi_install_notify_handler(device->handle,
+ ACPI_DEVICE_NOTIFY,
+ acpi_device_notify,
+ device);
+
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+ return 0;
+}
+
+static void acpi_device_remove_notify_handler(struct acpi_device *device)
+{
+ if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
+ acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
+ acpi_device_fixed_event);
+ else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
+ acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
+ acpi_device_fixed_event);
+ else
+ acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+ acpi_device_notify);
+}
+
+/* --------------------------------------------------------------------------
+ Device Matching
+ -------------------------------------------------------------------------- */
+
+static struct acpi_device *acpi_primary_dev_companion(struct acpi_device *adev,
+ const struct device *dev)
+{
+ struct mutex *physical_node_lock = &adev->physical_node_lock;
+
+ mutex_lock(physical_node_lock);
+ if (list_empty(&adev->physical_node_list)) {
+ adev = NULL;
+ } else {
+ const struct acpi_device_physical_node *node;
+
+ node = list_first_entry(&adev->physical_node_list,
+ struct acpi_device_physical_node, node);
+ if (node->dev != dev)
+ adev = NULL;
+ }
+ mutex_unlock(physical_node_lock);
+ return adev;
+}
+
+/**
+ * acpi_device_is_first_physical_node - Is given dev first physical node
+ * @adev: ACPI companion device
+ * @dev: Physical device to check
+ *
+ * Function checks if given @dev is the first physical devices attached to
+ * the ACPI companion device. This distinction is needed in some cases
+ * where the same companion device is shared between many physical devices.
+ *
+ * Note that the caller have to provide valid @adev pointer.
+ */
+bool acpi_device_is_first_physical_node(struct acpi_device *adev,
+ const struct device *dev)
+{
+ return !!acpi_primary_dev_companion(adev, dev);
+}
+
+/*
+ * acpi_companion_match() - Can we match via ACPI companion device
+ * @dev: Device in question
+ *
+ * Check if the given device has an ACPI companion and if that companion has
+ * a valid list of PNP IDs, and if the device is the first (primary) physical
+ * device associated with it. Return the companion pointer if that's the case
+ * or NULL otherwise.
+ *
+ * If multiple physical devices are attached to a single ACPI companion, we need
+ * to be careful. The usage scenario for this kind of relationship is that all
+ * of the physical devices in question use resources provided by the ACPI
+ * companion. A typical case is an MFD device where all the sub-devices share
+ * the parent's ACPI companion. In such cases we can only allow the primary
+ * (first) physical device to be matched with the help of the companion's PNP
+ * IDs.
+ *
+ * Additional physical devices sharing the ACPI companion can still use
+ * resources available from it but they will be matched normally using functions
+ * provided by their bus types (and analogously for their modalias).
+ */
+struct acpi_device *acpi_companion_match(const struct device *dev)
+{
+ struct acpi_device *adev;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return NULL;
+
+ if (list_empty(&adev->pnp.ids))
+ return NULL;
+
+ return acpi_primary_dev_companion(adev, dev);
+}
+
+/**
+ * acpi_of_match_device - Match device object using the "compatible" property.
+ * @adev: ACPI device object to match.
+ * @of_match_table: List of device IDs to match against.
+ *
+ * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of
+ * identifiers and a _DSD object with the "compatible" property, use that
+ * property to match against the given list of identifiers.
+ */
+static bool acpi_of_match_device(struct acpi_device *adev,
+ const struct of_device_id *of_match_table)
+{
+ const union acpi_object *of_compatible, *obj;
+ int i, nval;
+
+ if (!adev)
+ return false;
+
+ of_compatible = adev->data.of_compatible;
+ if (!of_match_table || !of_compatible)
+ return false;
+
+ if (of_compatible->type == ACPI_TYPE_PACKAGE) {
+ nval = of_compatible->package.count;
+ obj = of_compatible->package.elements;
+ } else { /* Must be ACPI_TYPE_STRING. */
+ nval = 1;
+ obj = of_compatible;
+ }
+ /* Now we can look for the driver DT compatible strings */
+ for (i = 0; i < nval; i++, obj++) {
+ const struct of_device_id *id;
+
+ for (id = of_match_table; id->compatible[0]; id++)
+ if (!strcasecmp(obj->string.pointer, id->compatible))
+ return true;
+ }
+
+ return false;
+}
+
+static bool __acpi_match_device_cls(const struct acpi_device_id *id,
+ struct acpi_hardware_id *hwid)
+{
+ int i, msk, byte_shift;
+ char buf[3];
+
+ if (!id->cls)
+ return false;
+
+ /* Apply class-code bitmask, before checking each class-code byte */
+ for (i = 1; i <= 3; i++) {
+ byte_shift = 8 * (3 - i);
+ msk = (id->cls_msk >> byte_shift) & 0xFF;
+ if (!msk)
+ continue;
+
+ sprintf(buf, "%02x", (id->cls >> byte_shift) & msk);
+ if (strncmp(buf, &hwid->id[(i - 1) * 2], 2))
+ return false;
+ }
+ return true;
+}
+
+static const struct acpi_device_id *__acpi_match_device(
+ struct acpi_device *device,
+ const struct acpi_device_id *ids,
+ const struct of_device_id *of_ids)
+{
+ const struct acpi_device_id *id;
+ struct acpi_hardware_id *hwid;
+
+ /*
+ * If the device is not present, it is unnecessary to load device
+ * driver for it.
+ */
+ if (!device || !device->status.present)
+ return NULL;
+
+ list_for_each_entry(hwid, &device->pnp.ids, list) {
+ /* First, check the ACPI/PNP IDs provided by the caller. */
+ for (id = ids; id->id[0] || id->cls; id++) {
+ if (id->id[0] && !strcmp((char *) id->id, hwid->id))
+ return id;
+ else if (id->cls && __acpi_match_device_cls(id, hwid))
+ return id;
+ }
+
+ /*
+ * Next, check ACPI_DT_NAMESPACE_HID and try to match the
+ * "compatible" property if found.
+ *
+ * The id returned by the below is not valid, but the only
+ * caller passing non-NULL of_ids here is only interested in
+ * whether or not the return value is NULL.
+ */
+ if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id)
+ && acpi_of_match_device(device, of_ids))
+ return id;
+ }
+ return NULL;
+}
+
+/**
+ * acpi_match_device - Match a struct device against a given list of ACPI IDs
+ * @ids: Array of struct acpi_device_id object to match against.
+ * @dev: The device structure to match.
+ *
+ * Check if @dev has a valid ACPI handle and if there is a struct acpi_device
+ * object for that handle and use that object to match against a given list of
+ * device IDs.
+ *
+ * Return a pointer to the first matching ID on success or %NULL on failure.
+ */
+const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
+ const struct device *dev)
+{
+ return __acpi_match_device(acpi_companion_match(dev), ids, NULL);
+}
+EXPORT_SYMBOL_GPL(acpi_match_device);
+
+int acpi_match_device_ids(struct acpi_device *device,
+ const struct acpi_device_id *ids)
+{
+ return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT;
+}
+EXPORT_SYMBOL(acpi_match_device_ids);
+
+bool acpi_driver_match_device(struct device *dev,
+ const struct device_driver *drv)
+{
+ if (!drv->acpi_match_table)
+ return acpi_of_match_device(ACPI_COMPANION(dev),
+ drv->of_match_table);
+
+ return !!__acpi_match_device(acpi_companion_match(dev),
+ drv->acpi_match_table, drv->of_match_table);
+}
+EXPORT_SYMBOL_GPL(acpi_driver_match_device);
+
+/* --------------------------------------------------------------------------
+ ACPI Driver Management
+ -------------------------------------------------------------------------- */
+
+/**
+ * acpi_bus_register_driver - register a driver with the ACPI bus
+ * @driver: driver being registered
+ *
+ * Registers a driver with the ACPI bus. Searches the namespace for all
+ * devices that match the driver's criteria and binds. Returns zero for
+ * success or a negative error status for failure.
+ */
+int acpi_bus_register_driver(struct acpi_driver *driver)
+{
+ int ret;
+
+ if (acpi_disabled)
+ return -ENODEV;
+ driver->drv.name = driver->name;
+ driver->drv.bus = &acpi_bus_type;
+ driver->drv.owner = driver->owner;
+
+ ret = driver_register(&driver->drv);
+ return ret;
+}
+
+EXPORT_SYMBOL(acpi_bus_register_driver);
+
+/**
+ * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
+ * @driver: driver to unregister
+ *
+ * Unregisters a driver with the ACPI bus. Searches the namespace for all
+ * devices that match the driver's criteria and unbinds.
+ */
+void acpi_bus_unregister_driver(struct acpi_driver *driver)
+{
+ driver_unregister(&driver->drv);
+}
+
+EXPORT_SYMBOL(acpi_bus_unregister_driver);
+
+/* --------------------------------------------------------------------------
+ ACPI Bus operations
+ -------------------------------------------------------------------------- */
+
+static int acpi_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_driver *acpi_drv = to_acpi_driver(drv);
+
+ return acpi_dev->flags.match_driver
+ && !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
+}
+
+static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ return __acpi_device_uevent_modalias(to_acpi_device(dev), env);
+}
+
+static int acpi_device_probe(struct device *dev)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
+ int ret;
+
+ if (acpi_dev->handler && !acpi_is_pnp_device(acpi_dev))
+ return -EINVAL;
+
+ if (!acpi_drv->ops.add)
+ return -ENOSYS;
+
+ ret = acpi_drv->ops.add(acpi_dev);
+ if (ret)
+ return ret;
+
+ acpi_dev->driver = acpi_drv;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Driver [%s] successfully bound to device [%s]\n",
+ acpi_drv->name, acpi_dev->pnp.bus_id));
+
+ if (acpi_drv->ops.notify) {
+ ret = acpi_device_install_notify_handler(acpi_dev);
+ if (ret) {
+ if (acpi_drv->ops.remove)
+ acpi_drv->ops.remove(acpi_dev);
+
+ acpi_dev->driver = NULL;
+ acpi_dev->driver_data = NULL;
+ return ret;
+ }
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n",
+ acpi_drv->name, acpi_dev->pnp.bus_id));
+ get_device(dev);
+ return 0;
+}
+
+static int acpi_device_remove(struct device * dev)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_driver *acpi_drv = acpi_dev->driver;
+
+ if (acpi_drv) {
+ if (acpi_drv->ops.notify)
+ acpi_device_remove_notify_handler(acpi_dev);
+ if (acpi_drv->ops.remove)
+ acpi_drv->ops.remove(acpi_dev);
+ }
+ acpi_dev->driver = NULL;
+ acpi_dev->driver_data = NULL;
+
+ put_device(dev);
+ return 0;
+}
+
+struct bus_type acpi_bus_type = {
+ .name = "acpi",
+ .match = acpi_bus_match,
+ .probe = acpi_device_probe,
+ .remove = acpi_device_remove,
+ .uevent = acpi_device_uevent,
+};
+
/* --------------------------------------------------------------------------
Initialization/Cleanup
-------------------------------------------------------------------------- */
@@ -661,7 +1057,9 @@ static int __init acpi_bus_init(void)
*/
acpi_root_dir = proc_mkdir(ACPI_BUS_FILE_ROOT, NULL);
- return 0;
+ result = bus_register(&acpi_bus_type);
+ if (!result)
+ return 0;
/* Mimic structured exception handling */
error1:
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 6d5d1832a588..5c3b0918d5fd 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
index 6c9ee68e46fb..d0918d421f90 100644
--- a/drivers/acpi/cm_sbs.c
+++ b/drivers/acpi/cm_sbs.c
@@ -11,10 +11,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index c8ead9f97375..12c240903c18 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -20,10 +20,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/acpi.h>
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index 6b1919f6bd82..68bb305b977f 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -7,6 +7,8 @@
#include <linux/debugfs.h>
#include <linux/acpi.h>
+#include "internal.h"
+
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME("debugfs");
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 717afcdb5f4a..4806b7f856c4 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
@@ -231,7 +227,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
dev_warn(&device->dev, "Failed to change power state to %s\n",
acpi_power_state_string(state));
} else {
- device->power.state = state;
+ device->power.state = target_state;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Device [%s] transitioned to %s\n",
device->pnp.bus_id,
@@ -1123,6 +1119,14 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
if (dev->pm_domain)
return -EEXIST;
+ /*
+ * Only attach the power domain to the first device if the
+ * companion is shared by multiple. This is to prevent doing power
+ * management twice.
+ */
+ if (!acpi_device_is_first_physical_node(adev, dev))
+ return -EBUSY;
+
acpi_add_pm_notifier(adev, dev, acpi_pm_notify_work_func);
dev->pm_domain = &acpi_general_pm_domain;
if (power_on) {
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
new file mode 100644
index 000000000000..4ab4582e586b
--- /dev/null
+++ b/drivers/acpi/device_sysfs.c
@@ -0,0 +1,521 @@
+/*
+ * drivers/acpi/device_sysfs.c - ACPI device sysfs attributes and modalias.
+ *
+ * Copyright (C) 2015, Intel Corp.
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/nls.h>
+
+#include "internal.h"
+
+/**
+ * create_pnp_modalias - Create hid/cid(s) string for modalias and uevent
+ * @acpi_dev: ACPI device object.
+ * @modalias: Buffer to print into.
+ * @size: Size of the buffer.
+ *
+ * Creates hid/cid(s) string needed for modalias and uevent
+ * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
+ * char *modalias: "acpi:IBM0001:ACPI0001"
+ * Return: 0: no _HID and no _CID
+ * -EINVAL: output error
+ * -ENOMEM: output is truncated
+*/
+static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
+ int size)
+{
+ int len;
+ int count;
+ struct acpi_hardware_id *id;
+
+ /*
+ * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
+ * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
+ * device's list.
+ */
+ count = 0;
+ list_for_each_entry(id, &acpi_dev->pnp.ids, list)
+ if (strcmp(id->id, ACPI_DT_NAMESPACE_HID))
+ count++;
+
+ if (!count)
+ return 0;
+
+ len = snprintf(modalias, size, "acpi:");
+ if (len <= 0)
+ return len;
+
+ size -= len;
+
+ list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
+ if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID))
+ continue;
+
+ count = snprintf(&modalias[len], size, "%s:", id->id);
+ if (count < 0)
+ return -EINVAL;
+
+ if (count >= size)
+ return -ENOMEM;
+
+ len += count;
+ size -= count;
+ }
+ modalias[len] = '\0';
+ return len;
+}
+
+/**
+ * create_of_modalias - Creates DT compatible string for modalias and uevent
+ * @acpi_dev: ACPI device object.
+ * @modalias: Buffer to print into.
+ * @size: Size of the buffer.
+ *
+ * Expose DT compatible modalias as of:NnameTCcompatible. This function should
+ * only be called for devices having ACPI_DT_NAMESPACE_HID in their list of
+ * ACPI/PNP IDs.
+ */
+static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
+ int size)
+{
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+ const union acpi_object *of_compatible, *obj;
+ int len, count;
+ int i, nval;
+ char *c;
+
+ acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
+ /* DT strings are all in lower case */
+ for (c = buf.pointer; *c != '\0'; c++)
+ *c = tolower(*c);
+
+ len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
+ ACPI_FREE(buf.pointer);
+
+ if (len <= 0)
+ return len;
+
+ of_compatible = acpi_dev->data.of_compatible;
+ if (of_compatible->type == ACPI_TYPE_PACKAGE) {
+ nval = of_compatible->package.count;
+ obj = of_compatible->package.elements;
+ } else { /* Must be ACPI_TYPE_STRING. */
+ nval = 1;
+ obj = of_compatible;
+ }
+ for (i = 0; i < nval; i++, obj++) {
+ count = snprintf(&modalias[len], size, "C%s",
+ obj->string.pointer);
+ if (count < 0)
+ return -EINVAL;
+
+ if (count >= size)
+ return -ENOMEM;
+
+ len += count;
+ size -= count;
+ }
+ modalias[len] = '\0';
+ return len;
+}
+
+int __acpi_device_uevent_modalias(struct acpi_device *adev,
+ struct kobj_uevent_env *env)
+{
+ int len;
+
+ if (!adev)
+ return -ENODEV;
+
+ if (list_empty(&adev->pnp.ids))
+ return 0;
+
+ if (add_uevent_var(env, "MODALIAS="))
+ return -ENOMEM;
+
+ len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
+ if (len < 0)
+ return len;
+
+ env->buflen += len;
+ if (!adev->data.of_compatible)
+ return 0;
+
+ if (len > 0 && add_uevent_var(env, "MODALIAS="))
+ return -ENOMEM;
+
+ len = create_of_modalias(adev, &env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
+ if (len < 0)
+ return len;
+
+ env->buflen += len;
+
+ return 0;
+}
+
+/**
+ * acpi_device_uevent_modalias - uevent modalias for ACPI-enumerated devices.
+ *
+ * Create the uevent modalias field for ACPI-enumerated devices.
+ *
+ * Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with
+ * hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001".
+ */
+int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
+{
+ return __acpi_device_uevent_modalias(acpi_companion_match(dev), env);
+}
+EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias);
+
+static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size)
+{
+ int len, count;
+
+ if (!adev)
+ return -ENODEV;
+
+ if (list_empty(&adev->pnp.ids))
+ return 0;
+
+ len = create_pnp_modalias(adev, buf, size - 1);
+ if (len < 0) {
+ return len;
+ } else if (len > 0) {
+ buf[len++] = '\n';
+ size -= len;
+ }
+ if (!adev->data.of_compatible)
+ return len;
+
+ count = create_of_modalias(adev, buf + len, size - 1);
+ if (count < 0) {
+ return count;
+ } else if (count > 0) {
+ len += count;
+ buf[len++] = '\n';
+ }
+
+ return len;
+}
+
+/**
+ * acpi_device_modalias - modalias sysfs attribute for ACPI-enumerated devices.
+ *
+ * Create the modalias sysfs attribute for ACPI-enumerated devices.
+ *
+ * Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with
+ * hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001".
+ */
+int acpi_device_modalias(struct device *dev, char *buf, int size)
+{
+ return __acpi_device_modalias(acpi_companion_match(dev), buf, size);
+}
+EXPORT_SYMBOL_GPL(acpi_device_modalias);
+
+static ssize_t
+acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) {
+ return __acpi_device_modalias(to_acpi_device(dev), buf, 1024);
+}
+static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
+
+static ssize_t real_power_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_device *adev = to_acpi_device(dev);
+ int state;
+ int ret;
+
+ ret = acpi_device_get_power(adev, &state);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%s\n", acpi_power_state_string(state));
+}
+
+static DEVICE_ATTR(real_power_state, 0444, real_power_state_show, NULL);
+
+static ssize_t power_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_device *adev = to_acpi_device(dev);
+
+ return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state));
+}
+
+static DEVICE_ATTR(power_state, 0444, power_state_show, NULL);
+
+static ssize_t
+acpi_eject_store(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi_device = to_acpi_device(d);
+ acpi_object_type not_used;
+ acpi_status status;
+
+ if (!count || buf[0] != '1')
+ return -EINVAL;
+
+ if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled)
+ && !acpi_device->driver)
+ return -ENODEV;
+
+ status = acpi_get_type(acpi_device->handle, &not_used);
+ if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
+ return -ENODEV;
+
+ get_device(&acpi_device->dev);
+ status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT);
+ if (ACPI_SUCCESS(status))
+ return count;
+
+ put_device(&acpi_device->dev);
+ acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
+ ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
+ return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
+}
+
+static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
+
+static ssize_t
+acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+
+ return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
+}
+static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
+
+static ssize_t acpi_device_uid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+
+ return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id);
+}
+static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL);
+
+static ssize_t acpi_device_adr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+
+ return sprintf(buf, "0x%08x\n",
+ (unsigned int)(acpi_dev->pnp.bus_address));
+}
+static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL);
+
+static ssize_t
+acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) {
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
+ int result;
+
+ result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path);
+ if (result)
+ goto end;
+
+ result = sprintf(buf, "%s\n", (char*)path.pointer);
+ kfree(path.pointer);
+end:
+ return result;
+}
+static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
+
+/* sysfs file that shows description text from the ACPI _STR method */
+static ssize_t description_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf) {
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ int result;
+
+ if (acpi_dev->pnp.str_obj == NULL)
+ return 0;
+
+ /*
+ * The _STR object contains a Unicode identifier for a device.
+ * We need to convert to utf-8 so it can be displayed.
+ */
+ result = utf16s_to_utf8s(
+ (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
+ acpi_dev->pnp.str_obj->buffer.length,
+ UTF16_LITTLE_ENDIAN, buf,
+ PAGE_SIZE);
+
+ buf[result++] = '\n';
+
+ return result;
+}
+static DEVICE_ATTR(description, 0444, description_show, NULL);
+
+static ssize_t
+acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
+ char *buf) {
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ acpi_status status;
+ unsigned long long sun;
+
+ status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return sprintf(buf, "%llu\n", sun);
+}
+static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
+
+static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+ char *buf) {
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ acpi_status status;
+ unsigned long long sta;
+
+ status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return sprintf(buf, "%llu\n", sta);
+}
+static DEVICE_ATTR_RO(status);
+
+/**
+ * acpi_device_setup_files - Create sysfs attributes of an ACPI device.
+ * @dev: ACPI device object.
+ */
+int acpi_device_setup_files(struct acpi_device *dev)
+{
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+ int result = 0;
+
+ /*
+ * Devices gotten from FADT don't have a "path" attribute
+ */
+ if (dev->handle) {
+ result = device_create_file(&dev->dev, &dev_attr_path);
+ if (result)
+ goto end;
+ }
+
+ if (!list_empty(&dev->pnp.ids)) {
+ result = device_create_file(&dev->dev, &dev_attr_hid);
+ if (result)
+ goto end;
+
+ result = device_create_file(&dev->dev, &dev_attr_modalias);
+ if (result)
+ goto end;
+ }
+
+ /*
+ * If device has _STR, 'description' file is created
+ */
+ if (acpi_has_method(dev->handle, "_STR")) {
+ status = acpi_evaluate_object(dev->handle, "_STR",
+ NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ buffer.pointer = NULL;
+ dev->pnp.str_obj = buffer.pointer;
+ result = device_create_file(&dev->dev, &dev_attr_description);
+ if (result)
+ goto end;
+ }
+
+ if (dev->pnp.type.bus_address)
+ result = device_create_file(&dev->dev, &dev_attr_adr);
+ if (dev->pnp.unique_id)
+ result = device_create_file(&dev->dev, &dev_attr_uid);
+
+ if (acpi_has_method(dev->handle, "_SUN")) {
+ result = device_create_file(&dev->dev, &dev_attr_sun);
+ if (result)
+ goto end;
+ }
+
+ if (acpi_has_method(dev->handle, "_STA")) {
+ result = device_create_file(&dev->dev, &dev_attr_status);
+ if (result)
+ goto end;
+ }
+
+ /*
+ * If device has _EJ0, 'eject' file is created that is used to trigger
+ * hot-removal function from userland.
+ */
+ if (acpi_has_method(dev->handle, "_EJ0")) {
+ result = device_create_file(&dev->dev, &dev_attr_eject);
+ if (result)
+ return result;
+ }
+
+ if (dev->flags.power_manageable) {
+ result = device_create_file(&dev->dev, &dev_attr_power_state);
+ if (result)
+ return result;
+
+ if (dev->power.flags.power_resources)
+ result = device_create_file(&dev->dev,
+ &dev_attr_real_power_state);
+ }
+
+end:
+ return result;
+}
+
+/**
+ * acpi_device_remove_files - Remove sysfs attributes of an ACPI device.
+ * @dev: ACPI device object.
+ */
+void acpi_device_remove_files(struct acpi_device *dev)
+{
+ if (dev->flags.power_manageable) {
+ device_remove_file(&dev->dev, &dev_attr_power_state);
+ if (dev->power.flags.power_resources)
+ device_remove_file(&dev->dev,
+ &dev_attr_real_power_state);
+ }
+
+ /*
+ * If device has _STR, remove 'description' file
+ */
+ if (acpi_has_method(dev->handle, "_STR")) {
+ kfree(dev->pnp.str_obj);
+ device_remove_file(&dev->dev, &dev_attr_description);
+ }
+ /*
+ * If device has _EJ0, remove 'eject' file.
+ */
+ if (acpi_has_method(dev->handle, "_EJ0"))
+ device_remove_file(&dev->dev, &dev_attr_eject);
+
+ if (acpi_has_method(dev->handle, "_SUN"))
+ device_remove_file(&dev->dev, &dev_attr_sun);
+
+ if (dev->pnp.unique_id)
+ device_remove_file(&dev->dev, &dev_attr_uid);
+ if (dev->pnp.type.bus_address)
+ device_remove_file(&dev->dev, &dev_attr_adr);
+ device_remove_file(&dev->dev, &dev_attr_modalias);
+ device_remove_file(&dev->dev, &dev_attr_hid);
+ if (acpi_has_method(dev->handle, "_STA"))
+ device_remove_file(&dev->dev, &dev_attr_status);
+ if (dev->handle)
+ device_remove_file(&dev->dev, &dev_attr_path);
+}
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index a688aa243f6c..e8e128dede29 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 9d4761d2f6b7..2614a839c60d 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -22,10 +22,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
@@ -165,8 +161,16 @@ struct transaction {
u8 flags;
};
+struct acpi_ec_query {
+ struct transaction transaction;
+ struct work_struct work;
+ struct acpi_ec_query_handler *handler;
+};
+
static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
static void advance_transaction(struct acpi_ec *ec);
+static void acpi_ec_event_handler(struct work_struct *work);
+static void acpi_ec_event_processor(struct work_struct *work);
struct acpi_ec *boot_ec, *first_ec;
EXPORT_SYMBOL(first_ec);
@@ -978,60 +982,90 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
}
EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
-static void acpi_ec_run(void *cxt)
+static struct acpi_ec_query *acpi_ec_create_query(u8 *pval)
{
- struct acpi_ec_query_handler *handler = cxt;
+ struct acpi_ec_query *q;
+ struct transaction *t;
+
+ q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
+ if (!q)
+ return NULL;
+ INIT_WORK(&q->work, acpi_ec_event_processor);
+ t = &q->transaction;
+ t->command = ACPI_EC_COMMAND_QUERY;
+ t->rdata = pval;
+ t->rlen = 1;
+ return q;
+}
+
+static void acpi_ec_delete_query(struct acpi_ec_query *q)
+{
+ if (q) {
+ if (q->handler)
+ acpi_ec_put_query_handler(q->handler);
+ kfree(q);
+ }
+}
+
+static void acpi_ec_event_processor(struct work_struct *work)
+{
+ struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
+ struct acpi_ec_query_handler *handler = q->handler;
- if (!handler)
- return;
ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
if (handler->func)
handler->func(handler->data);
else if (handler->handle)
acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
- acpi_ec_put_query_handler(handler);
+ acpi_ec_delete_query(q);
}
static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
{
u8 value = 0;
int result;
- acpi_status status;
struct acpi_ec_query_handler *handler;
- struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
- .wdata = NULL, .rdata = &value,
- .wlen = 0, .rlen = 1};
+ struct acpi_ec_query *q;
+
+ q = acpi_ec_create_query(&value);
+ if (!q)
+ return -ENOMEM;
/*
* Query the EC to find out which _Qxx method we need to evaluate.
* Note that successful completion of the query causes the ACPI_EC_SCI
* bit to be cleared (and thus clearing the interrupt source).
*/
- result = acpi_ec_transaction(ec, &t);
- if (result)
- return result;
- if (data)
- *data = value;
+ result = acpi_ec_transaction(ec, &q->transaction);
if (!value)
- return -ENODATA;
+ result = -ENODATA;
+ if (result)
+ goto err_exit;
mutex_lock(&ec->mutex);
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
- /* have custom handler for this bit */
- handler = acpi_ec_get_query_handler(handler);
+ q->handler = acpi_ec_get_query_handler(handler);
ec_dbg_evt("Query(0x%02x) scheduled",
- handler->query_bit);
- status = acpi_os_execute((handler->func) ?
- OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
- acpi_ec_run, handler);
- if (ACPI_FAILURE(status))
+ q->handler->query_bit);
+ /*
+ * It is reported that _Qxx are evaluated in a
+ * parallel way on Windows:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=94411
+ */
+ if (!schedule_work(&q->work))
result = -EBUSY;
break;
}
}
mutex_unlock(&ec->mutex);
+
+err_exit:
+ if (result && q)
+ acpi_ec_delete_query(q);
+ if (data)
+ *data = value;
return result;
}
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index bea0bbaafa97..e297a480e135 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
index a322710b5ba4..5c67a6d8f803 100644
--- a/drivers/acpi/hed.c
+++ b/drivers/acpi/hed.c
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 4683a96932b9..9e426210c2a8 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -13,9 +13,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef _ACPI_INTERNAL_H_
@@ -70,7 +67,7 @@ void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val);
#ifdef CONFIG_DEBUG_FS
extern struct dentry *acpi_debugfs_dir;
-int acpi_debugfs_init(void);
+void acpi_debugfs_init(void);
#else
static inline void acpi_debugfs_init(void) { return; }
#endif
@@ -93,10 +90,21 @@ int acpi_device_add(struct acpi_device *device,
void (*release)(struct device *));
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
int type, unsigned long long sta);
+int acpi_device_setup_files(struct acpi_device *dev);
+void acpi_device_remove_files(struct acpi_device *dev);
void acpi_device_add_finalize(struct acpi_device *device);
void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
bool acpi_device_is_present(struct acpi_device *adev);
bool acpi_device_is_battery(struct acpi_device *adev);
+bool acpi_device_is_first_physical_node(struct acpi_device *adev,
+ const struct device *dev);
+
+/* --------------------------------------------------------------------------
+ Device Matching and Notification
+ -------------------------------------------------------------------------- */
+struct acpi_device *acpi_companion_match(const struct device *dev);
+int __acpi_device_uevent_modalias(struct acpi_device *adev,
+ struct kobj_uevent_env *env);
/* --------------------------------------------------------------------------
Power Resource
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index 2161fa178c8d..c1b8d03e262e 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -18,7 +18,9 @@
#include <linux/list.h>
#include <linux/acpi.h>
#include <linux/sort.h>
+#include <linux/pmem.h>
#include <linux/io.h>
+#include <asm/cacheflush.h>
#include "nfit.h"
/*
@@ -305,6 +307,23 @@ static bool add_idt(struct acpi_nfit_desc *acpi_desc,
return true;
}
+static bool add_flush(struct acpi_nfit_desc *acpi_desc,
+ struct acpi_nfit_flush_address *flush)
+{
+ struct device *dev = acpi_desc->dev;
+ struct nfit_flush *nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush),
+ GFP_KERNEL);
+
+ if (!nfit_flush)
+ return false;
+ INIT_LIST_HEAD(&nfit_flush->list);
+ nfit_flush->flush = flush;
+ list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
+ dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
+ flush->device_handle, flush->hint_count);
+ return true;
+}
+
static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table,
const void *end)
{
@@ -338,7 +357,8 @@ static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table,
return err;
break;
case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
- dev_dbg(dev, "%s: flush\n", __func__);
+ if (!add_flush(acpi_desc, table))
+ return err;
break;
case ACPI_NFIT_TYPE_SMBIOS:
dev_dbg(dev, "%s: smbios\n", __func__);
@@ -389,6 +409,7 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
{
u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
struct nfit_memdev *nfit_memdev;
+ struct nfit_flush *nfit_flush;
struct nfit_dcr *nfit_dcr;
struct nfit_bdw *nfit_bdw;
struct nfit_idt *nfit_idt;
@@ -442,6 +463,14 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
nfit_mem->idt_bdw = nfit_idt->idt;
break;
}
+
+ list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
+ if (nfit_flush->flush->device_handle !=
+ nfit_memdev->memdev->device_handle)
+ continue;
+ nfit_mem->nfit_flush = nfit_flush;
+ break;
+ }
break;
}
@@ -674,11 +703,11 @@ static ssize_t flags_show(struct device *dev,
u16 flags = to_nfit_memdev(dev)->flags;
return sprintf(buf, "%s%s%s%s%s\n",
- flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save " : "",
- flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore " : "",
- flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush " : "",
- flags & ACPI_NFIT_MEM_ARMED ? "arm " : "",
- flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart " : "");
+ flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
+ flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
+ flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
+ flags & ACPI_NFIT_MEM_ARMED ? "not_armed " : "",
+ flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
}
static DEVICE_ATTR_RO(flags);
@@ -736,9 +765,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
struct acpi_device *adev, *adev_dimm;
struct device *dev = acpi_desc->dev;
const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM);
- unsigned long long sta;
- int i, rc = -ENODEV;
- acpi_status status;
+ int i;
nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en;
adev = to_acpi_dev(acpi_desc);
@@ -753,25 +780,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
return force_enable_dimms ? 0 : -ENODEV;
}
- status = acpi_evaluate_integer(adev_dimm->handle, "_STA", NULL, &sta);
- if (status == AE_NOT_FOUND) {
- dev_dbg(dev, "%s missing _STA, assuming enabled...\n",
- dev_name(&adev_dimm->dev));
- rc = 0;
- } else if (ACPI_FAILURE(status))
- dev_err(dev, "%s failed to retrieve_STA, disabling...\n",
- dev_name(&adev_dimm->dev));
- else if ((sta & ACPI_STA_DEVICE_ENABLED) == 0)
- dev_info(dev, "%s disabled by firmware\n",
- dev_name(&adev_dimm->dev));
- else
- rc = 0;
-
for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++)
if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
set_bit(i, &nfit_mem->dsm_mask);
- return force_enable_dimms ? 0 : rc;
+ return 0;
}
static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
@@ -821,12 +834,12 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
continue;
- dev_info(acpi_desc->dev, "%s: failed: %s%s%s%s\n",
+ dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
nvdimm_name(nvdimm),
- mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save " : "",
- mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore " : "",
- mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush " : "",
- mem_flags & ACPI_NFIT_MEM_ARMED ? "arm " : "");
+ mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
+ mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
+ mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
+ mem_flags & ACPI_NFIT_MEM_ARMED ? " not_armed" : "");
}
@@ -840,6 +853,7 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
struct acpi_device *adev;
int i;
+ nd_desc->dsm_mask = acpi_desc->bus_dsm_force_en;
adev = to_acpi_dev(acpi_desc);
if (!adev)
return;
@@ -978,7 +992,25 @@ static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
return mmio->base_offset + line_offset + table_offset + sub_line_offset;
}
-static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
+static void wmb_blk(struct nfit_blk *nfit_blk)
+{
+
+ if (nfit_blk->nvdimm_flush) {
+ /*
+ * The first wmb() is needed to 'sfence' all previous writes
+ * such that they are architecturally visible for the platform
+ * buffer flush. Note that we've already arranged for pmem
+ * writes to avoid the cache via arch_memcpy_to_pmem(). The
+ * final wmb() ensures ordering for the NVDIMM flush write.
+ */
+ wmb();
+ writeq(1, nfit_blk->nvdimm_flush);
+ wmb();
+ } else
+ wmb_pmem();
+}
+
+static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
{
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
u64 offset = nfit_blk->stat_offset + mmio->size * bw;
@@ -986,7 +1018,7 @@ static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
if (mmio->num_lines)
offset = to_interleave_offset(offset, mmio);
- return readq(mmio->base + offset);
+ return readl(mmio->addr.base + offset);
}
static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
@@ -1011,8 +1043,11 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
if (mmio->num_lines)
offset = to_interleave_offset(offset, mmio);
- writeq(cmd, mmio->base + offset);
- /* FIXME: conditionally perform read-back if mandated by firmware */
+ writeq(cmd, mmio->addr.base + offset);
+ wmb_blk(nfit_blk);
+
+ if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH)
+ readq(mmio->addr.base + offset);
}
static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
@@ -1026,7 +1061,6 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
+ lane * mmio->size;
- /* TODO: non-temporal access, flush hints, cache management etc... */
write_blk_ctl(nfit_blk, lane, dpa, len, rw);
while (len) {
unsigned int c;
@@ -1045,13 +1079,24 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
}
if (rw)
- memcpy(mmio->aperture + offset, iobuf + copied, c);
- else
- memcpy(iobuf + copied, mmio->aperture + offset, c);
+ memcpy_to_pmem(mmio->addr.aperture + offset,
+ iobuf + copied, c);
+ else {
+ if (nfit_blk->dimm_flags & ND_BLK_READ_FLUSH)
+ mmio_flush_range((void __force *)
+ mmio->addr.aperture + offset, c);
+
+ memcpy_from_pmem(iobuf + copied,
+ mmio->addr.aperture + offset, c);
+ }
copied += c;
len -= c;
}
+
+ if (rw)
+ wmb_blk(nfit_blk);
+
rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
return rc;
}
@@ -1090,7 +1135,10 @@ static void nfit_spa_mapping_release(struct kref *kref)
WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index);
- iounmap(spa_map->iomem);
+ if (spa_map->type == SPA_MAP_APERTURE)
+ memunmap((void __force *)spa_map->addr.aperture);
+ else
+ iounmap(spa_map->addr.base);
release_mem_region(spa->address, spa->length);
list_del(&spa_map->list);
kfree(spa_map);
@@ -1124,7 +1172,7 @@ static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc,
}
static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
- struct acpi_nfit_system_address *spa)
+ struct acpi_nfit_system_address *spa, enum spa_map_type type)
{
resource_size_t start = spa->address;
resource_size_t n = spa->length;
@@ -1136,7 +1184,7 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
spa_map = find_spa_mapping(acpi_desc, spa);
if (spa_map) {
kref_get(&spa_map->kref);
- return spa_map->iomem;
+ return spa_map->addr.base;
}
spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL);
@@ -1152,13 +1200,19 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
if (!res)
goto err_mem;
- /* TODO: cacheability based on the spa type */
- spa_map->iomem = ioremap_nocache(start, n);
- if (!spa_map->iomem)
+ spa_map->type = type;
+ if (type == SPA_MAP_APERTURE)
+ spa_map->addr.aperture = (void __pmem *)memremap(start, n,
+ ARCH_MEMREMAP_PMEM);
+ else
+ spa_map->addr.base = ioremap_nocache(start, n);
+
+
+ if (!spa_map->addr.base)
goto err_map;
list_add_tail(&spa_map->list, &acpi_desc->spa_maps);
- return spa_map->iomem;
+ return spa_map->addr.base;
err_map:
release_mem_region(start, n);
@@ -1171,6 +1225,7 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
* nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
* @nvdimm_bus: NFIT-bus that provided the spa table entry
* @nfit_spa: spa table to map
+ * @type: aperture or control region
*
* In the case where block-data-window apertures and
* dimm-control-regions are interleaved they will end up sharing a
@@ -1180,12 +1235,12 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
* unbound.
*/
static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
- struct acpi_nfit_system_address *spa)
+ struct acpi_nfit_system_address *spa, enum spa_map_type type)
{
void __iomem *iomem;
mutex_lock(&acpi_desc->spa_map_mutex);
- iomem = __nfit_spa_map(acpi_desc, spa);
+ iomem = __nfit_spa_map(acpi_desc, spa, type);
mutex_unlock(&acpi_desc->spa_map_mutex);
return iomem;
@@ -1206,12 +1261,35 @@ static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
return 0;
}
+static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
+ struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
+{
+ struct nd_cmd_dimm_flags flags;
+ int rc;
+
+ memset(&flags, 0, sizeof(flags));
+ rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
+ sizeof(flags));
+
+ if (rc >= 0 && flags.status == 0)
+ nfit_blk->dimm_flags = flags.flags;
+ else if (rc == -ENOTTY) {
+ /* fall back to a conservative default */
+ nfit_blk->dimm_flags = ND_BLK_DCR_LATCH | ND_BLK_READ_FLUSH;
+ rc = 0;
+ } else
+ rc = -ENXIO;
+
+ return rc;
+}
+
static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
struct device *dev)
{
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
struct nd_blk_region *ndbr = to_nd_blk_region(dev);
+ struct nfit_flush *nfit_flush;
struct nfit_blk_mmio *mmio;
struct nfit_blk *nfit_blk;
struct nfit_mem *nfit_mem;
@@ -1223,8 +1301,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
nfit_mem ? "" : " nfit_mem",
- nfit_mem->dcr ? "" : " dcr",
- nfit_mem->bdw ? "" : " bdw");
+ (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
+ (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
return -ENXIO;
}
@@ -1237,8 +1315,9 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
/* map block aperture memory */
nfit_blk->bdw_offset = nfit_mem->bdw->offset;
mmio = &nfit_blk->mmio[BDW];
- mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw);
- if (!mmio->base) {
+ mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
+ SPA_MAP_APERTURE);
+ if (!mmio->addr.base) {
dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
nvdimm_name(nvdimm));
return -ENOMEM;
@@ -1259,8 +1338,9 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
mmio = &nfit_blk->mmio[DCR];
- mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr);
- if (!mmio->base) {
+ mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
+ SPA_MAP_CONTROL);
+ if (!mmio->addr.base) {
dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
nvdimm_name(nvdimm));
return -ENOMEM;
@@ -1277,6 +1357,24 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
return rc;
}
+ rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
+ if (rc < 0) {
+ dev_dbg(dev, "%s: %s failed get DIMM flags\n",
+ __func__, nvdimm_name(nvdimm));
+ return rc;
+ }
+
+ nfit_flush = nfit_mem->nfit_flush;
+ if (nfit_flush && nfit_flush->flush->hint_count != 0) {
+ nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev,
+ nfit_flush->flush->hint_address[0], 8);
+ if (!nfit_blk->nvdimm_flush)
+ return -ENOMEM;
+ }
+
+ if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush)
+ dev_warn(dev, "unable to guarantee persistence of writes\n");
+
if (mmio->line_size == 0)
return 0;
@@ -1309,7 +1407,7 @@ static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus,
for (i = 0; i < 2; i++) {
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i];
- if (mmio->base)
+ if (mmio->addr.base)
nfit_spa_unmap(acpi_desc, mmio->spa);
}
nd_blk_region_set_provider_data(ndbr, NULL);
@@ -1459,6 +1557,7 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
INIT_LIST_HEAD(&acpi_desc->dcrs);
INIT_LIST_HEAD(&acpi_desc->bdws);
INIT_LIST_HEAD(&acpi_desc->idts);
+ INIT_LIST_HEAD(&acpi_desc->flushes);
INIT_LIST_HEAD(&acpi_desc->memdevs);
INIT_LIST_HEAD(&acpi_desc->dimms);
mutex_init(&acpi_desc->spa_map_mutex);
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
index 81f2e8c5a79c..7e740156b9c2 100644
--- a/drivers/acpi/nfit.h
+++ b/drivers/acpi/nfit.h
@@ -40,6 +40,11 @@ enum nfit_uuids {
NFIT_UUID_MAX,
};
+enum {
+ ND_BLK_READ_FLUSH = 1,
+ ND_BLK_DCR_LATCH = 2,
+};
+
struct nfit_spa {
struct acpi_nfit_system_address *spa;
struct list_head list;
@@ -60,6 +65,11 @@ struct nfit_idt {
struct list_head list;
};
+struct nfit_flush {
+ struct acpi_nfit_flush_address *flush;
+ struct list_head list;
+};
+
struct nfit_memdev {
struct acpi_nfit_memory_map *memdev;
struct list_head list;
@@ -77,6 +87,7 @@ struct nfit_mem {
struct acpi_nfit_system_address *spa_bdw;
struct acpi_nfit_interleave *idt_dcr;
struct acpi_nfit_interleave *idt_bdw;
+ struct nfit_flush *nfit_flush;
struct list_head list;
struct acpi_device *adev;
unsigned long dsm_mask;
@@ -88,6 +99,7 @@ struct acpi_nfit_desc {
struct mutex spa_map_mutex;
struct list_head spa_maps;
struct list_head memdevs;
+ struct list_head flushes;
struct list_head dimms;
struct list_head spas;
struct list_head dcrs;
@@ -96,6 +108,7 @@ struct acpi_nfit_desc {
struct nvdimm_bus *nvdimm_bus;
struct device *dev;
unsigned long dimm_dsm_force_en;
+ unsigned long bus_dsm_force_en;
int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
void *iobuf, u64 len, int rw);
};
@@ -105,12 +118,16 @@ enum nd_blk_mmio_selector {
DCR,
};
+struct nd_blk_addr {
+ union {
+ void __iomem *base;
+ void __pmem *aperture;
+ };
+};
+
struct nfit_blk {
struct nfit_blk_mmio {
- union {
- void __iomem *base;
- void *aperture;
- };
+ struct nd_blk_addr addr;
u64 size;
u64 base_offset;
u32 line_size;
@@ -123,6 +140,13 @@ struct nfit_blk {
u64 bdw_offset; /* post interleave offset */
u64 stat_offset;
u64 cmd_offset;
+ void __iomem *nvdimm_flush;
+ u32 dimm_flags;
+};
+
+enum spa_map_type {
+ SPA_MAP_CONTROL,
+ SPA_MAP_APERTURE,
};
struct nfit_spa_mapping {
@@ -130,7 +154,8 @@ struct nfit_spa_mapping {
struct acpi_nfit_system_address *spa;
struct list_head list;
struct kref kref;
- void __iomem *iomem;
+ enum spa_map_type type;
+ struct nd_blk_addr addr;
};
static inline struct nfit_spa_mapping *to_spa_map(struct kref *kref)
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index acaa3b4ea504..72b6e9ef0ae9 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
*/
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index c262e4acd68d..739a4a6b3b9b 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -19,10 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
*/
@@ -47,6 +43,7 @@
#include <asm/io.h>
#include <asm/uaccess.h>
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
#include "internal.h"
@@ -83,6 +80,7 @@ static void *acpi_irq_context;
static struct workqueue_struct *kacpid_wq;
static struct workqueue_struct *kacpi_notify_wq;
static struct workqueue_struct *kacpi_hotplug_wq;
+static bool acpi_os_initialized;
/*
* This list of permanent mappings is for memory that may be accessed from
@@ -175,10 +173,14 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
if (!addr || !length)
return;
- acpi_reserve_region(addr, length, gas->space_id, 0, desc);
+ /* Resources are never freed */
+ if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+ request_region(addr, length, desc);
+ else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ request_mem_region(addr, length, desc);
}
-static void __init acpi_reserve_resources(void)
+static int __init acpi_reserve_resources(void)
{
acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
"ACPI PM1a_EVT_BLK");
@@ -207,7 +209,10 @@ static void __init acpi_reserve_resources(void)
if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
+
+ return 0;
}
+fs_initcall_sync(acpi_reserve_resources);
void acpi_os_printf(const char *fmt, ...)
{
@@ -940,21 +945,6 @@ acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
EXPORT_SYMBOL(acpi_os_write_port);
-#ifdef readq
-static inline u64 read64(const volatile void __iomem *addr)
-{
- return readq(addr);
-}
-#else
-static inline u64 read64(const volatile void __iomem *addr)
-{
- u64 l, h;
- l = readl(addr);
- h = readl(addr+4);
- return l | (h << 32);
-}
-#endif
-
acpi_status
acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
{
@@ -987,7 +977,7 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
*(u32 *) value = readl(virt_addr);
break;
case 64:
- *(u64 *) value = read64(virt_addr);
+ *(u64 *) value = readq(virt_addr);
break;
default:
BUG();
@@ -1001,19 +991,6 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
return AE_OK;
}
-#ifdef writeq
-static inline void write64(u64 val, volatile void __iomem *addr)
-{
- writeq(val, addr);
-}
-#else
-static inline void write64(u64 val, volatile void __iomem *addr)
-{
- writel(val, addr);
- writel(val>>32, addr+4);
-}
-#endif
-
acpi_status
acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
{
@@ -1042,7 +1019,7 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
writel(value, virt_addr);
break;
case 64:
- write64(value, virt_addr);
+ writeq(value, virt_addr);
break;
default:
BUG();
@@ -1309,6 +1286,9 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
long jiffies;
int ret = 0;
+ if (!acpi_os_initialized)
+ return AE_OK;
+
if (!sem || (units < 1))
return AE_BAD_PARAMETER;
@@ -1348,6 +1328,9 @@ acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
{
struct semaphore *sem = (struct semaphore *)handle;
+ if (!acpi_os_initialized)
+ return AE_OK;
+
if (!sem || (units < 1))
return AE_BAD_PARAMETER;
@@ -1856,13 +1839,13 @@ acpi_status __init acpi_os_initialize(void)
rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
}
+ acpi_os_initialized = true;
return AE_OK;
}
acpi_status __init acpi_os_initialize1(void)
{
- acpi_reserve_resources();
kacpid_wq = alloc_workqueue("kacpid", 0, 1);
kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 304eccb0ae5c..6da0f9beab19 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -19,10 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
@@ -412,7 +408,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
return 0;
}
- if (dev->irq_managed && dev->irq > 0)
+ if (pci_has_managed_irq(dev))
return 0;
entry = acpi_pci_irq_lookup(dev, pin);
@@ -457,8 +453,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
kfree(entry);
return rc;
}
- dev->irq = rc;
- dev->irq_managed = 1;
+ pci_set_managed_irq(dev, rc);
if (link)
snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link);
@@ -481,16 +476,8 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
u8 pin;
pin = dev->pin;
- if (!pin || !dev->irq_managed || dev->irq <= 0)
- return;
-
- /* Keep IOAPIC pin configuration when suspending */
- if (dev->dev.power.is_prepared)
+ if (!pin || !pci_has_managed_irq(dev))
return;
-#ifdef CONFIG_PM
- if (dev->dev.power.runtime_status == RPM_SUSPENDING)
- return;
-#endif
entry = acpi_pci_irq_lookup(dev, pin);
if (!entry)
@@ -511,6 +498,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
if (gsi >= 0) {
acpi_unregister_gsi(gsi);
- dev->irq_managed = 0;
+ pci_reset_managed_irq(dev);
}
}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index cfd7581cc19f..3b4ea98e3ea0 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* TBD:
@@ -826,6 +822,22 @@ void acpi_penalize_isa_irq(int irq, int active)
}
/*
+ * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with
+ * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for
+ * PCI IRQs.
+ */
+void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
+{
+ if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
+ if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
+ polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS;
+ else
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
+ }
+}
+
+/*
* Over-ride default table to reserve additional IRQs for use by ISA
* e.g. acpi_irq_isa=5
* Useful for telling ACPI how not to interfere with your ISA sound card.
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 1b5569c092c6..393706a5261b 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index 139d9e479370..7188e53b6b7c 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -20,10 +20,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 93eac53b5110..fcd4ce6f78d5 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -1,8 +1,10 @@
/*
- * acpi_power.c - ACPI Bus Power Management ($Revision: 39 $)
+ * drivers/acpi/power.c - ACPI Power Resources management.
*
- * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
- * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2001 - 2015 Intel Corp.
+ * Author: Andy Grover <andrew.grover@intel.com>
+ * Author: Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
@@ -16,10 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
@@ -27,10 +25,11 @@
* ACPI power-managed devices may be controlled in two ways:
* 1. via "Device Specific (D-State) Control"
* 2. via "Power Resource Control".
- * This module is used to manage devices relying on Power Resource Control.
+ * The code below deals with ACPI Power Resources control.
*
- * An ACPI "power resource object" describes a software controllable power
- * plane, clock plane, or other resource used by a power managed device.
+ * An ACPI "power resource object" represents a software controllable power
+ * plane, clock plane, or other resource depended on by a device.
+ *
* A device may rely on multiple power resources, and a power resource
* may be shared by multiple devices.
*/
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index d9f71581b79b..51e658f21e95 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -21,10 +21,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
@@ -159,38 +155,28 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block __refdata acpi_cpu_notifier = {
+static struct notifier_block acpi_cpu_notifier = {
.notifier_call = acpi_cpu_soft_notify,
};
-static int __acpi_processor_start(struct acpi_device *device)
+#ifdef CONFIG_ACPI_CPU_FREQ_PSS
+static int acpi_pss_perf_init(struct acpi_processor *pr,
+ struct acpi_device *device)
{
- struct acpi_processor *pr = acpi_driver_data(device);
- acpi_status status;
int result = 0;
- if (!pr)
- return -ENODEV;
-
- if (pr->flags.need_hotplug_init)
- return 0;
-
-#ifdef CONFIG_CPU_FREQ
acpi_processor_ppc_has_changed(pr, 0);
-#endif
+
acpi_processor_get_throttling_info(pr);
if (pr->flags.throttling)
pr->flags.limit = 1;
- if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
- acpi_processor_power_init(pr);
-
pr->cdev = thermal_cooling_device_register("Processor", device,
&processor_cooling_ops);
if (IS_ERR(pr->cdev)) {
result = PTR_ERR(pr->cdev);
- goto err_power_exit;
+ return result;
}
dev_dbg(&device->dev, "registered as cooling_device%d\n",
@@ -204,6 +190,7 @@ static int __acpi_processor_start(struct acpi_device *device)
"Failed to create sysfs link 'thermal_cooling'\n");
goto err_thermal_unregister;
}
+
result = sysfs_create_link(&pr->cdev->device.kobj,
&device->dev.kobj,
"device");
@@ -213,17 +200,61 @@ static int __acpi_processor_start(struct acpi_device *device)
goto err_remove_sysfs_thermal;
}
- status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
- acpi_processor_notify, device);
- if (ACPI_SUCCESS(status))
- return 0;
-
sysfs_remove_link(&pr->cdev->device.kobj, "device");
err_remove_sysfs_thermal:
sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
err_thermal_unregister:
thermal_cooling_device_unregister(pr->cdev);
- err_power_exit:
+
+ return result;
+}
+
+static void acpi_pss_perf_exit(struct acpi_processor *pr,
+ struct acpi_device *device)
+{
+ if (pr->cdev) {
+ sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+ sysfs_remove_link(&pr->cdev->device.kobj, "device");
+ thermal_cooling_device_unregister(pr->cdev);
+ pr->cdev = NULL;
+ }
+}
+#else
+static inline int acpi_pss_perf_init(struct acpi_processor *pr,
+ struct acpi_device *device)
+{
+ return 0;
+}
+
+static inline void acpi_pss_perf_exit(struct acpi_processor *pr,
+ struct acpi_device *device) {}
+#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
+
+static int __acpi_processor_start(struct acpi_device *device)
+{
+ struct acpi_processor *pr = acpi_driver_data(device);
+ acpi_status status;
+ int result = 0;
+
+ if (!pr)
+ return -ENODEV;
+
+ if (pr->flags.need_hotplug_init)
+ return 0;
+
+ if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
+ acpi_processor_power_init(pr);
+
+ result = acpi_pss_perf_init(pr, device);
+ if (result)
+ goto err_power_exit;
+
+ status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+ acpi_processor_notify, device);
+ if (ACPI_SUCCESS(status))
+ return 0;
+
+err_power_exit:
acpi_processor_power_exit(pr);
return result;
}
@@ -252,15 +283,10 @@ static int acpi_processor_stop(struct device *dev)
pr = acpi_driver_data(device);
if (!pr)
return 0;
-
acpi_processor_power_exit(pr);
- if (pr->cdev) {
- sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
- sysfs_remove_link(&pr->cdev->device.kobj, "device");
- thermal_cooling_device_unregister(pr->cdev);
- pr->cdev = NULL;
- }
+ acpi_pss_perf_exit(pr, device);
+
return 0;
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index d540f42c9232..175c86bee3a9 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -21,10 +21,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index cfc8aba72f86..bb01dea39fdc 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -20,10 +20,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
*/
#include <linux/kernel.h>
@@ -87,7 +83,7 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
if (ignore_ppc)
return 0;
- if (event != CPUFREQ_INCOMPATIBLE)
+ if (event != CPUFREQ_ADJUST)
return 0;
mutex_lock(&performance_mutex);
@@ -784,9 +780,7 @@ acpi_processor_register_performance(struct acpi_processor_performance
EXPORT_SYMBOL(acpi_processor_register_performance);
-void
-acpi_processor_unregister_performance(struct acpi_processor_performance
- *performance, unsigned int cpu)
+void acpi_processor_unregister_performance(unsigned int cpu)
{
struct acpi_processor *pr;
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index e003663b2f8e..1fed84a092c2 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -19,10 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 84243c32e29c..f170d746336d 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -19,10 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 7836e2e980f4..6d99450549c5 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -528,13 +528,14 @@ int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
if (!val)
return obj->package.count;
- else if (nval <= 0)
- return -EINVAL;
if (nval > obj->package.count)
return -EOVERFLOW;
+ else if (nval <= 0)
+ return -EINVAL;
items = obj->package.elements;
+
switch (proptype) {
case DEV_PROP_U8:
ret = acpi_copy_property_array_u8(items, (u8 *)val, nval);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 10561ce16ed1..15d22db05054 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
@@ -26,7 +22,6 @@
#include <linux/device.h>
#include <linux/export.h>
#include <linux/ioport.h>
-#include <linux/list.h>
#include <linux/slab.h>
#ifdef CONFIG_X86
@@ -194,6 +189,7 @@ static bool acpi_decode_space(struct resource_win *win,
u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16;
bool wp = addr->info.mem.write_protect;
u64 len = attr->address_length;
+ u64 start, end, offset = 0;
struct resource *res = &win->res;
/*
@@ -205,9 +201,6 @@ static bool acpi_decode_space(struct resource_win *win,
pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n",
addr->min_address_fixed, addr->max_address_fixed, len);
- res->start = attr->minimum;
- res->end = attr->maximum;
-
/*
* For bridges that translate addresses across the bridge,
* translation_offset is the offset that must be added to the
@@ -215,12 +208,22 @@ static bool acpi_decode_space(struct resource_win *win,
* primary side. Non-bridge devices must list 0 for all Address
* Translation offset bits.
*/
- if (addr->producer_consumer == ACPI_PRODUCER) {
- res->start += attr->translation_offset;
- res->end += attr->translation_offset;
- } else if (attr->translation_offset) {
+ if (addr->producer_consumer == ACPI_PRODUCER)
+ offset = attr->translation_offset;
+ else if (attr->translation_offset)
pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n",
attr->translation_offset);
+ start = attr->minimum + offset;
+ end = attr->maximum + offset;
+
+ win->offset = offset;
+ res->start = start;
+ res->end = end;
+ if (sizeof(resource_size_t) < sizeof(u64) &&
+ (offset != win->offset || start != res->start || end != res->end)) {
+ pr_warn("acpi resource window ([%#llx-%#llx] ignored, not CPU addressable)\n",
+ attr->minimum, attr->maximum);
+ return false;
}
switch (addr->resource_type) {
@@ -237,8 +240,6 @@ static bool acpi_decode_space(struct resource_win *win,
return false;
}
- win->offset = attr->translation_offset;
-
if (addr->producer_consumer == ACPI_PRODUCER)
res->flags |= IORESOURCE_WINDOW;
@@ -622,164 +623,3 @@ int acpi_dev_filter_resource_type(struct acpi_resource *ares,
return (type & types) ? 0 : 1;
}
EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);
-
-struct reserved_region {
- struct list_head node;
- u64 start;
- u64 end;
-};
-
-static LIST_HEAD(reserved_io_regions);
-static LIST_HEAD(reserved_mem_regions);
-
-static int request_range(u64 start, u64 end, u8 space_id, unsigned long flags,
- char *desc)
-{
- unsigned int length = end - start + 1;
- struct resource *res;
-
- res = space_id == ACPI_ADR_SPACE_SYSTEM_IO ?
- request_region(start, length, desc) :
- request_mem_region(start, length, desc);
- if (!res)
- return -EIO;
-
- res->flags &= ~flags;
- return 0;
-}
-
-static int add_region_before(u64 start, u64 end, u8 space_id,
- unsigned long flags, char *desc,
- struct list_head *head)
-{
- struct reserved_region *reg;
- int error;
-
- reg = kmalloc(sizeof(*reg), GFP_KERNEL);
- if (!reg)
- return -ENOMEM;
-
- error = request_range(start, end, space_id, flags, desc);
- if (error) {
- kfree(reg);
- return error;
- }
-
- reg->start = start;
- reg->end = end;
- list_add_tail(&reg->node, head);
- return 0;
-}
-
-/**
- * acpi_reserve_region - Reserve an I/O or memory region as a system resource.
- * @start: Starting address of the region.
- * @length: Length of the region.
- * @space_id: Identifier of address space to reserve the region from.
- * @flags: Resource flags to clear for the region after requesting it.
- * @desc: Region description (for messages).
- *
- * Reserve an I/O or memory region as a system resource to prevent others from
- * using it. If the new region overlaps with one of the regions (in the given
- * address space) already reserved by this routine, only the non-overlapping
- * parts of it will be reserved.
- *
- * Returned is either 0 (success) or a negative error code indicating a resource
- * reservation problem. It is the code of the first encountered error, but the
- * routine doesn't abort until it has attempted to request all of the parts of
- * the new region that don't overlap with other regions reserved previously.
- *
- * The resources requested by this routine are never released.
- */
-int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
- unsigned long flags, char *desc)
-{
- struct list_head *regions;
- struct reserved_region *reg;
- u64 end = start + length - 1;
- int ret = 0, error = 0;
-
- if (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
- regions = &reserved_io_regions;
- else if (space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- regions = &reserved_mem_regions;
- else
- return -EINVAL;
-
- if (list_empty(regions))
- return add_region_before(start, end, space_id, flags, desc, regions);
-
- list_for_each_entry(reg, regions, node)
- if (reg->start == end + 1) {
- /* The new region can be prepended to this one. */
- ret = request_range(start, end, space_id, flags, desc);
- if (!ret)
- reg->start = start;
-
- return ret;
- } else if (reg->start > end) {
- /* No overlap. Add the new region here and get out. */
- return add_region_before(start, end, space_id, flags,
- desc, &reg->node);
- } else if (reg->end == start - 1) {
- goto combine;
- } else if (reg->end >= start) {
- goto overlap;
- }
-
- /* The new region goes after the last existing one. */
- return add_region_before(start, end, space_id, flags, desc, regions);
-
- overlap:
- /*
- * The new region overlaps an existing one.
- *
- * The head part of the new region immediately preceding the existing
- * overlapping one can be combined with it right away.
- */
- if (reg->start > start) {
- error = request_range(start, reg->start - 1, space_id, flags, desc);
- if (error)
- ret = error;
- else
- reg->start = start;
- }
-
- combine:
- /*
- * The new region is adjacent to an existing one. If it extends beyond
- * that region all the way to the next one, it is possible to combine
- * all three of them.
- */
- while (reg->end < end) {
- struct reserved_region *next = NULL;
- u64 a = reg->end + 1, b = end;
-
- if (!list_is_last(&reg->node, regions)) {
- next = list_next_entry(reg, node);
- if (next->start <= end)
- b = next->start - 1;
- }
- error = request_range(a, b, space_id, flags, desc);
- if (!error) {
- if (next && next->start == b + 1) {
- reg->end = next->end;
- list_del(&next->node);
- kfree(next);
- } else {
- reg->end = end;
- break;
- }
- } else if (next) {
- if (!ret)
- ret = error;
-
- reg = next;
- } else {
- break;
- }
- }
-
- return ret ? ret : error;
-}
-EXPORT_SYMBOL_GPL(acpi_reserve_region);
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 01504c819e8f..cb3dedb1beae 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 2649a068671d..01136b879038 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -115,264 +115,6 @@ int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler,
return 0;
}
-/**
- * create_pnp_modalias - Create hid/cid(s) string for modalias and uevent
- * @acpi_dev: ACPI device object.
- * @modalias: Buffer to print into.
- * @size: Size of the buffer.
- *
- * Creates hid/cid(s) string needed for modalias and uevent
- * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
- * char *modalias: "acpi:IBM0001:ACPI0001"
- * Return: 0: no _HID and no _CID
- * -EINVAL: output error
- * -ENOMEM: output is truncated
-*/
-static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
- int size)
-{
- int len;
- int count;
- struct acpi_hardware_id *id;
-
- /*
- * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
- * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
- * device's list.
- */
- count = 0;
- list_for_each_entry(id, &acpi_dev->pnp.ids, list)
- if (strcmp(id->id, ACPI_DT_NAMESPACE_HID))
- count++;
-
- if (!count)
- return 0;
-
- len = snprintf(modalias, size, "acpi:");
- if (len <= 0)
- return len;
-
- size -= len;
-
- list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
- if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID))
- continue;
-
- count = snprintf(&modalias[len], size, "%s:", id->id);
- if (count < 0)
- return -EINVAL;
-
- if (count >= size)
- return -ENOMEM;
-
- len += count;
- size -= count;
- }
- modalias[len] = '\0';
- return len;
-}
-
-/**
- * create_of_modalias - Creates DT compatible string for modalias and uevent
- * @acpi_dev: ACPI device object.
- * @modalias: Buffer to print into.
- * @size: Size of the buffer.
- *
- * Expose DT compatible modalias as of:NnameTCcompatible. This function should
- * only be called for devices having ACPI_DT_NAMESPACE_HID in their list of
- * ACPI/PNP IDs.
- */
-static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
- int size)
-{
- struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
- const union acpi_object *of_compatible, *obj;
- int len, count;
- int i, nval;
- char *c;
-
- acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
- /* DT strings are all in lower case */
- for (c = buf.pointer; *c != '\0'; c++)
- *c = tolower(*c);
-
- len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
- ACPI_FREE(buf.pointer);
-
- if (len <= 0)
- return len;
-
- of_compatible = acpi_dev->data.of_compatible;
- if (of_compatible->type == ACPI_TYPE_PACKAGE) {
- nval = of_compatible->package.count;
- obj = of_compatible->package.elements;
- } else { /* Must be ACPI_TYPE_STRING. */
- nval = 1;
- obj = of_compatible;
- }
- for (i = 0; i < nval; i++, obj++) {
- count = snprintf(&modalias[len], size, "C%s",
- obj->string.pointer);
- if (count < 0)
- return -EINVAL;
-
- if (count >= size)
- return -ENOMEM;
-
- len += count;
- size -= count;
- }
- modalias[len] = '\0';
- return len;
-}
-
-/*
- * acpi_companion_match() - Can we match via ACPI companion device
- * @dev: Device in question
- *
- * Check if the given device has an ACPI companion and if that companion has
- * a valid list of PNP IDs, and if the device is the first (primary) physical
- * device associated with it. Return the companion pointer if that's the case
- * or NULL otherwise.
- *
- * If multiple physical devices are attached to a single ACPI companion, we need
- * to be careful. The usage scenario for this kind of relationship is that all
- * of the physical devices in question use resources provided by the ACPI
- * companion. A typical case is an MFD device where all the sub-devices share
- * the parent's ACPI companion. In such cases we can only allow the primary
- * (first) physical device to be matched with the help of the companion's PNP
- * IDs.
- *
- * Additional physical devices sharing the ACPI companion can still use
- * resources available from it but they will be matched normally using functions
- * provided by their bus types (and analogously for their modalias).
- */
-static struct acpi_device *acpi_companion_match(const struct device *dev)
-{
- struct acpi_device *adev;
- struct mutex *physical_node_lock;
-
- adev = ACPI_COMPANION(dev);
- if (!adev)
- return NULL;
-
- if (list_empty(&adev->pnp.ids))
- return NULL;
-
- physical_node_lock = &adev->physical_node_lock;
- mutex_lock(physical_node_lock);
- if (list_empty(&adev->physical_node_list)) {
- adev = NULL;
- } else {
- const struct acpi_device_physical_node *node;
-
- node = list_first_entry(&adev->physical_node_list,
- struct acpi_device_physical_node, node);
- if (node->dev != dev)
- adev = NULL;
- }
- mutex_unlock(physical_node_lock);
-
- return adev;
-}
-
-static int __acpi_device_uevent_modalias(struct acpi_device *adev,
- struct kobj_uevent_env *env)
-{
- int len;
-
- if (!adev)
- return -ENODEV;
-
- if (list_empty(&adev->pnp.ids))
- return 0;
-
- if (add_uevent_var(env, "MODALIAS="))
- return -ENOMEM;
-
- len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
- sizeof(env->buf) - env->buflen);
- if (len < 0)
- return len;
-
- env->buflen += len;
- if (!adev->data.of_compatible)
- return 0;
-
- if (len > 0 && add_uevent_var(env, "MODALIAS="))
- return -ENOMEM;
-
- len = create_of_modalias(adev, &env->buf[env->buflen - 1],
- sizeof(env->buf) - env->buflen);
- if (len < 0)
- return len;
-
- env->buflen += len;
-
- return 0;
-}
-
-/*
- * Creates uevent modalias field for ACPI enumerated devices.
- * Because the other buses does not support ACPI HIDs & CIDs.
- * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get:
- * "acpi:IBM0001:ACPI0001"
- */
-int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
-{
- return __acpi_device_uevent_modalias(acpi_companion_match(dev), env);
-}
-EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias);
-
-static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size)
-{
- int len, count;
-
- if (!adev)
- return -ENODEV;
-
- if (list_empty(&adev->pnp.ids))
- return 0;
-
- len = create_pnp_modalias(adev, buf, size - 1);
- if (len < 0) {
- return len;
- } else if (len > 0) {
- buf[len++] = '\n';
- size -= len;
- }
- if (!adev->data.of_compatible)
- return len;
-
- count = create_of_modalias(adev, buf + len, size - 1);
- if (count < 0) {
- return count;
- } else if (count > 0) {
- len += count;
- buf[len++] = '\n';
- }
-
- return len;
-}
-
-/*
- * Creates modalias sysfs attribute for ACPI enumerated devices.
- * Because the other buses does not support ACPI HIDs & CIDs.
- * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get:
- * "acpi:IBM0001:ACPI0001"
- */
-int acpi_device_modalias(struct device *dev, char *buf, int size)
-{
- return __acpi_device_modalias(acpi_companion_match(dev), buf, size);
-}
-EXPORT_SYMBOL_GPL(acpi_device_modalias);
-
-static ssize_t
-acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) {
- return __acpi_device_modalias(to_acpi_device(dev), buf, 1024);
-}
-static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
-
bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
{
struct acpi_device_physical_node *pn;
@@ -701,397 +443,6 @@ void acpi_device_hotplug(struct acpi_device *adev, u32 src)
unlock_device_hotplug();
}
-static ssize_t real_power_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct acpi_device *adev = to_acpi_device(dev);
- int state;
- int ret;
-
- ret = acpi_device_get_power(adev, &state);
- if (ret)
- return ret;
-
- return sprintf(buf, "%s\n", acpi_power_state_string(state));
-}
-
-static DEVICE_ATTR(real_power_state, 0444, real_power_state_show, NULL);
-
-static ssize_t power_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct acpi_device *adev = to_acpi_device(dev);
-
- return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state));
-}
-
-static DEVICE_ATTR(power_state, 0444, power_state_show, NULL);
-
-static ssize_t
-acpi_eject_store(struct device *d, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct acpi_device *acpi_device = to_acpi_device(d);
- acpi_object_type not_used;
- acpi_status status;
-
- if (!count || buf[0] != '1')
- return -EINVAL;
-
- if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled)
- && !acpi_device->driver)
- return -ENODEV;
-
- status = acpi_get_type(acpi_device->handle, &not_used);
- if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
- return -ENODEV;
-
- get_device(&acpi_device->dev);
- status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT);
- if (ACPI_SUCCESS(status))
- return count;
-
- put_device(&acpi_device->dev);
- acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
- ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
- return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
-}
-
-static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
-
-static ssize_t
-acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
- struct acpi_device *acpi_dev = to_acpi_device(dev);
-
- return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
-}
-static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
-
-static ssize_t acpi_device_uid_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
-
- return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id);
-}
-static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL);
-
-static ssize_t acpi_device_adr_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
-
- return sprintf(buf, "0x%08x\n",
- (unsigned int)(acpi_dev->pnp.bus_address));
-}
-static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL);
-
-static ssize_t
-acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) {
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
- int result;
-
- result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path);
- if (result)
- goto end;
-
- result = sprintf(buf, "%s\n", (char*)path.pointer);
- kfree(path.pointer);
-end:
- return result;
-}
-static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
-
-/* sysfs file that shows description text from the ACPI _STR method */
-static ssize_t description_show(struct device *dev,
- struct device_attribute *attr,
- char *buf) {
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- int result;
-
- if (acpi_dev->pnp.str_obj == NULL)
- return 0;
-
- /*
- * The _STR object contains a Unicode identifier for a device.
- * We need to convert to utf-8 so it can be displayed.
- */
- result = utf16s_to_utf8s(
- (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
- acpi_dev->pnp.str_obj->buffer.length,
- UTF16_LITTLE_ENDIAN, buf,
- PAGE_SIZE);
-
- buf[result++] = '\n';
-
- return result;
-}
-static DEVICE_ATTR(description, 0444, description_show, NULL);
-
-static ssize_t
-acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
- char *buf) {
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- acpi_status status;
- unsigned long long sun;
-
- status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- return sprintf(buf, "%llu\n", sun);
-}
-static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
-
-static ssize_t status_show(struct device *dev, struct device_attribute *attr,
- char *buf) {
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- acpi_status status;
- unsigned long long sta;
-
- status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- return sprintf(buf, "%llu\n", sta);
-}
-static DEVICE_ATTR_RO(status);
-
-static int acpi_device_setup_files(struct acpi_device *dev)
-{
- struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_status status;
- int result = 0;
-
- /*
- * Devices gotten from FADT don't have a "path" attribute
- */
- if (dev->handle) {
- result = device_create_file(&dev->dev, &dev_attr_path);
- if (result)
- goto end;
- }
-
- if (!list_empty(&dev->pnp.ids)) {
- result = device_create_file(&dev->dev, &dev_attr_hid);
- if (result)
- goto end;
-
- result = device_create_file(&dev->dev, &dev_attr_modalias);
- if (result)
- goto end;
- }
-
- /*
- * If device has _STR, 'description' file is created
- */
- if (acpi_has_method(dev->handle, "_STR")) {
- status = acpi_evaluate_object(dev->handle, "_STR",
- NULL, &buffer);
- if (ACPI_FAILURE(status))
- buffer.pointer = NULL;
- dev->pnp.str_obj = buffer.pointer;
- result = device_create_file(&dev->dev, &dev_attr_description);
- if (result)
- goto end;
- }
-
- if (dev->pnp.type.bus_address)
- result = device_create_file(&dev->dev, &dev_attr_adr);
- if (dev->pnp.unique_id)
- result = device_create_file(&dev->dev, &dev_attr_uid);
-
- if (acpi_has_method(dev->handle, "_SUN")) {
- result = device_create_file(&dev->dev, &dev_attr_sun);
- if (result)
- goto end;
- }
-
- if (acpi_has_method(dev->handle, "_STA")) {
- result = device_create_file(&dev->dev, &dev_attr_status);
- if (result)
- goto end;
- }
-
- /*
- * If device has _EJ0, 'eject' file is created that is used to trigger
- * hot-removal function from userland.
- */
- if (acpi_has_method(dev->handle, "_EJ0")) {
- result = device_create_file(&dev->dev, &dev_attr_eject);
- if (result)
- return result;
- }
-
- if (dev->flags.power_manageable) {
- result = device_create_file(&dev->dev, &dev_attr_power_state);
- if (result)
- return result;
-
- if (dev->power.flags.power_resources)
- result = device_create_file(&dev->dev,
- &dev_attr_real_power_state);
- }
-
-end:
- return result;
-}
-
-static void acpi_device_remove_files(struct acpi_device *dev)
-{
- if (dev->flags.power_manageable) {
- device_remove_file(&dev->dev, &dev_attr_power_state);
- if (dev->power.flags.power_resources)
- device_remove_file(&dev->dev,
- &dev_attr_real_power_state);
- }
-
- /*
- * If device has _STR, remove 'description' file
- */
- if (acpi_has_method(dev->handle, "_STR")) {
- kfree(dev->pnp.str_obj);
- device_remove_file(&dev->dev, &dev_attr_description);
- }
- /*
- * If device has _EJ0, remove 'eject' file.
- */
- if (acpi_has_method(dev->handle, "_EJ0"))
- device_remove_file(&dev->dev, &dev_attr_eject);
-
- if (acpi_has_method(dev->handle, "_SUN"))
- device_remove_file(&dev->dev, &dev_attr_sun);
-
- if (dev->pnp.unique_id)
- device_remove_file(&dev->dev, &dev_attr_uid);
- if (dev->pnp.type.bus_address)
- device_remove_file(&dev->dev, &dev_attr_adr);
- device_remove_file(&dev->dev, &dev_attr_modalias);
- device_remove_file(&dev->dev, &dev_attr_hid);
- if (acpi_has_method(dev->handle, "_STA"))
- device_remove_file(&dev->dev, &dev_attr_status);
- if (dev->handle)
- device_remove_file(&dev->dev, &dev_attr_path);
-}
-/* --------------------------------------------------------------------------
- ACPI Bus operations
- -------------------------------------------------------------------------- */
-
-/**
- * acpi_of_match_device - Match device object using the "compatible" property.
- * @adev: ACPI device object to match.
- * @of_match_table: List of device IDs to match against.
- *
- * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of
- * identifiers and a _DSD object with the "compatible" property, use that
- * property to match against the given list of identifiers.
- */
-static bool acpi_of_match_device(struct acpi_device *adev,
- const struct of_device_id *of_match_table)
-{
- const union acpi_object *of_compatible, *obj;
- int i, nval;
-
- if (!adev)
- return false;
-
- of_compatible = adev->data.of_compatible;
- if (!of_match_table || !of_compatible)
- return false;
-
- if (of_compatible->type == ACPI_TYPE_PACKAGE) {
- nval = of_compatible->package.count;
- obj = of_compatible->package.elements;
- } else { /* Must be ACPI_TYPE_STRING. */
- nval = 1;
- obj = of_compatible;
- }
- /* Now we can look for the driver DT compatible strings */
- for (i = 0; i < nval; i++, obj++) {
- const struct of_device_id *id;
-
- for (id = of_match_table; id->compatible[0]; id++)
- if (!strcasecmp(obj->string.pointer, id->compatible))
- return true;
- }
-
- return false;
-}
-
-static const struct acpi_device_id *__acpi_match_device(
- struct acpi_device *device,
- const struct acpi_device_id *ids,
- const struct of_device_id *of_ids)
-{
- const struct acpi_device_id *id;
- struct acpi_hardware_id *hwid;
-
- /*
- * If the device is not present, it is unnecessary to load device
- * driver for it.
- */
- if (!device || !device->status.present)
- return NULL;
-
- list_for_each_entry(hwid, &device->pnp.ids, list) {
- /* First, check the ACPI/PNP IDs provided by the caller. */
- for (id = ids; id->id[0]; id++)
- if (!strcmp((char *) id->id, hwid->id))
- return id;
-
- /*
- * Next, check ACPI_DT_NAMESPACE_HID and try to match the
- * "compatible" property if found.
- *
- * The id returned by the below is not valid, but the only
- * caller passing non-NULL of_ids here is only interested in
- * whether or not the return value is NULL.
- */
- if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id)
- && acpi_of_match_device(device, of_ids))
- return id;
- }
- return NULL;
-}
-
-/**
- * acpi_match_device - Match a struct device against a given list of ACPI IDs
- * @ids: Array of struct acpi_device_id object to match against.
- * @dev: The device structure to match.
- *
- * Check if @dev has a valid ACPI handle and if there is a struct acpi_device
- * object for that handle and use that object to match against a given list of
- * device IDs.
- *
- * Return a pointer to the first matching ID on success or %NULL on failure.
- */
-const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
- const struct device *dev)
-{
- return __acpi_match_device(acpi_companion_match(dev), ids, NULL);
-}
-EXPORT_SYMBOL_GPL(acpi_match_device);
-
-int acpi_match_device_ids(struct acpi_device *device,
- const struct acpi_device_id *ids)
-{
- return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT;
-}
-EXPORT_SYMBOL(acpi_match_device_ids);
-
-bool acpi_driver_match_device(struct device *dev,
- const struct device_driver *drv)
-{
- if (!drv->acpi_match_table)
- return acpi_of_match_device(ACPI_COMPANION(dev),
- drv->of_match_table);
-
- return !!__acpi_match_device(acpi_companion_match(dev),
- drv->acpi_match_table, drv->of_match_table);
-}
-EXPORT_SYMBOL_GPL(acpi_driver_match_device);
-
static void acpi_free_power_resources_lists(struct acpi_device *device)
{
int i;
@@ -1118,144 +469,6 @@ static void acpi_device_release(struct device *dev)
kfree(acpi_dev);
}
-static int acpi_bus_match(struct device *dev, struct device_driver *drv)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_driver *acpi_drv = to_acpi_driver(drv);
-
- return acpi_dev->flags.match_driver
- && !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
-}
-
-static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- return __acpi_device_uevent_modalias(to_acpi_device(dev), env);
-}
-
-static void acpi_device_notify(acpi_handle handle, u32 event, void *data)
-{
- struct acpi_device *device = data;
-
- device->driver->ops.notify(device, event);
-}
-
-static void acpi_device_notify_fixed(void *data)
-{
- struct acpi_device *device = data;
-
- /* Fixed hardware devices have no handles */
- acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
-}
-
-static u32 acpi_device_fixed_event(void *data)
-{
- acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data);
- return ACPI_INTERRUPT_HANDLED;
-}
-
-static int acpi_device_install_notify_handler(struct acpi_device *device)
-{
- acpi_status status;
-
- if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
- status =
- acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
- acpi_device_fixed_event,
- device);
- else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
- status =
- acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
- acpi_device_fixed_event,
- device);
- else
- status = acpi_install_notify_handler(device->handle,
- ACPI_DEVICE_NOTIFY,
- acpi_device_notify,
- device);
-
- if (ACPI_FAILURE(status))
- return -EINVAL;
- return 0;
-}
-
-static void acpi_device_remove_notify_handler(struct acpi_device *device)
-{
- if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
- acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
- acpi_device_fixed_event);
- else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
- acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
- acpi_device_fixed_event);
- else
- acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
- acpi_device_notify);
-}
-
-static int acpi_device_probe(struct device *dev)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
- int ret;
-
- if (acpi_dev->handler && !acpi_is_pnp_device(acpi_dev))
- return -EINVAL;
-
- if (!acpi_drv->ops.add)
- return -ENOSYS;
-
- ret = acpi_drv->ops.add(acpi_dev);
- if (ret)
- return ret;
-
- acpi_dev->driver = acpi_drv;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Driver [%s] successfully bound to device [%s]\n",
- acpi_drv->name, acpi_dev->pnp.bus_id));
-
- if (acpi_drv->ops.notify) {
- ret = acpi_device_install_notify_handler(acpi_dev);
- if (ret) {
- if (acpi_drv->ops.remove)
- acpi_drv->ops.remove(acpi_dev);
-
- acpi_dev->driver = NULL;
- acpi_dev->driver_data = NULL;
- return ret;
- }
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n",
- acpi_drv->name, acpi_dev->pnp.bus_id));
- get_device(dev);
- return 0;
-}
-
-static int acpi_device_remove(struct device * dev)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_driver *acpi_drv = acpi_dev->driver;
-
- if (acpi_drv) {
- if (acpi_drv->ops.notify)
- acpi_device_remove_notify_handler(acpi_dev);
- if (acpi_drv->ops.remove)
- acpi_drv->ops.remove(acpi_dev);
- }
- acpi_dev->driver = NULL;
- acpi_dev->driver_data = NULL;
-
- put_device(dev);
- return 0;
-}
-
-struct bus_type acpi_bus_type = {
- .name = "acpi",
- .match = acpi_bus_match,
- .probe = acpi_device_probe,
- .remove = acpi_device_remove,
- .uevent = acpi_device_uevent,
-};
-
static void acpi_device_del(struct acpi_device *device)
{
mutex_lock(&acpi_device_lock);
@@ -1503,47 +716,6 @@ struct acpi_device *acpi_get_next_child(struct device *dev,
}
/* --------------------------------------------------------------------------
- Driver Management
- -------------------------------------------------------------------------- */
-/**
- * acpi_bus_register_driver - register a driver with the ACPI bus
- * @driver: driver being registered
- *
- * Registers a driver with the ACPI bus. Searches the namespace for all
- * devices that match the driver's criteria and binds. Returns zero for
- * success or a negative error status for failure.
- */
-int acpi_bus_register_driver(struct acpi_driver *driver)
-{
- int ret;
-
- if (acpi_disabled)
- return -ENODEV;
- driver->drv.name = driver->name;
- driver->drv.bus = &acpi_bus_type;
- driver->drv.owner = driver->owner;
-
- ret = driver_register(&driver->drv);
- return ret;
-}
-
-EXPORT_SYMBOL(acpi_bus_register_driver);
-
-/**
- * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
- * @driver: driver to unregister
- *
- * Unregisters a driver with the ACPI bus. Searches the namespace for all
- * devices that match the driver's criteria and unbinds.
- */
-void acpi_bus_unregister_driver(struct acpi_driver *driver)
-{
- driver_unregister(&driver->drv);
-}
-
-EXPORT_SYMBOL(acpi_bus_unregister_driver);
-
-/* --------------------------------------------------------------------------
Device Enumeration
-------------------------------------------------------------------------- */
static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
@@ -2101,6 +1273,8 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
if (info->valid & ACPI_VALID_UID)
pnp->unique_id = kstrdup(info->unique_id.string,
GFP_KERNEL);
+ if (info->valid & ACPI_VALID_CLS)
+ acpi_add_id(pnp, info->class_code.string);
kfree(info);
@@ -2716,12 +1890,6 @@ int __init acpi_scan_init(void)
{
int result;
- result = bus_register(&acpi_bus_type);
- if (result) {
- /* We don't want to quit even if we failed to add suspend/resume */
- printk(KERN_ERR PREFIX "Could not register bus type\n");
- }
-
acpi_pci_root_init();
acpi_pci_link_init();
acpi_processor_init();
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 0876d77b3206..40a42655227c 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -69,6 +69,8 @@ static const struct acpi_dlevel acpi_debug_levels[] = {
ACPI_DEBUG_INIT(ACPI_LV_INIT),
ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
ACPI_DEBUG_INIT(ACPI_LV_INFO),
+ ACPI_DEBUG_INIT(ACPI_LV_REPAIR),
+ ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT),
ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
ACPI_DEBUG_INIT(ACPI_LV_PARSE),
@@ -162,55 +164,116 @@ static const struct kernel_param_ops param_ops_debug_level = {
module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
-static char trace_method_name[6];
-module_param_string(trace_method_name, trace_method_name, 6, 0644);
-static unsigned int trace_debug_layer;
-module_param(trace_debug_layer, uint, 0644);
-static unsigned int trace_debug_level;
-module_param(trace_debug_level, uint, 0644);
+static char trace_method_name[1024];
-static int param_set_trace_state(const char *val, struct kernel_param *kp)
+int param_set_trace_method_name(const char *val, const struct kernel_param *kp)
{
- int result = 0;
+ u32 saved_flags = 0;
+ bool is_abs_path = true;
- if (!strncmp(val, "enable", sizeof("enable") - 1)) {
- result = acpi_debug_trace(trace_method_name, trace_debug_level,
- trace_debug_layer, 0);
- if (result)
- result = -EBUSY;
- goto exit;
- }
+ if (*val != '\\')
+ is_abs_path = false;
- if (!strncmp(val, "disable", sizeof("disable") - 1)) {
- int name = 0;
- result = acpi_debug_trace((char *)&name, trace_debug_level,
- trace_debug_layer, 0);
- if (result)
- result = -EBUSY;
- goto exit;
+ if ((is_abs_path && strlen(val) > 1023) ||
+ (!is_abs_path && strlen(val) > 1022)) {
+ pr_err("%s: string parameter too long\n", kp->name);
+ return -ENOSPC;
}
- if (!strncmp(val, "1", 1)) {
- result = acpi_debug_trace(trace_method_name, trace_debug_level,
- trace_debug_layer, 1);
- if (result)
- result = -EBUSY;
- goto exit;
+ /*
+ * It's not safe to update acpi_gbl_trace_method_name without
+ * having the tracer stopped, so we save the original tracer
+ * state and disable it.
+ */
+ saved_flags = acpi_gbl_trace_flags;
+ (void)acpi_debug_trace(NULL,
+ acpi_gbl_trace_dbg_level,
+ acpi_gbl_trace_dbg_layer,
+ 0);
+
+ /* This is a hack. We can't kmalloc in early boot. */
+ if (is_abs_path)
+ strcpy(trace_method_name, val);
+ else {
+ trace_method_name[0] = '\\';
+ strcpy(trace_method_name+1, val);
}
- result = -EINVAL;
-exit:
- return result;
+ /* Restore the original tracer state */
+ (void)acpi_debug_trace(trace_method_name,
+ acpi_gbl_trace_dbg_level,
+ acpi_gbl_trace_dbg_layer,
+ saved_flags);
+
+ return 0;
+}
+
+static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
+{
+ return scnprintf(buffer, PAGE_SIZE, "%s", acpi_gbl_trace_method_name);
+}
+
+static const struct kernel_param_ops param_ops_trace_method = {
+ .set = param_set_trace_method_name,
+ .get = param_get_trace_method_name,
+};
+
+static const struct kernel_param_ops param_ops_trace_attrib = {
+ .set = param_set_uint,
+ .get = param_get_uint,
+};
+
+module_param_cb(trace_method_name, &param_ops_trace_method, &trace_method_name, 0644);
+module_param_cb(trace_debug_layer, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
+module_param_cb(trace_debug_level, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
+
+static int param_set_trace_state(const char *val, struct kernel_param *kp)
+{
+ acpi_status status;
+ const char *method = trace_method_name;
+ u32 flags = 0;
+
+/* So "xxx-once" comparison should go prior than "xxx" comparison */
+#define acpi_compare_param(val, key) \
+ strncmp((val), (key), sizeof(key) - 1)
+
+ if (!acpi_compare_param(val, "enable")) {
+ method = NULL;
+ flags = ACPI_TRACE_ENABLED;
+ } else if (!acpi_compare_param(val, "disable"))
+ method = NULL;
+ else if (!acpi_compare_param(val, "method-once"))
+ flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT;
+ else if (!acpi_compare_param(val, "method"))
+ flags = ACPI_TRACE_ENABLED;
+ else if (!acpi_compare_param(val, "opcode-once"))
+ flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE;
+ else if (!acpi_compare_param(val, "opcode"))
+ flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE;
+ else
+ return -EINVAL;
+
+ status = acpi_debug_trace(method,
+ acpi_gbl_trace_dbg_level,
+ acpi_gbl_trace_dbg_layer,
+ flags);
+ if (ACPI_FAILURE(status))
+ return -EBUSY;
+
+ return 0;
}
static int param_get_trace_state(char *buffer, struct kernel_param *kp)
{
- if (!acpi_gbl_trace_method_name)
+ if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
return sprintf(buffer, "disable");
else {
- if (acpi_gbl_trace_flags & 1)
- return sprintf(buffer, "1");
- else
+ if (acpi_gbl_trace_method_name) {
+ if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
+ return sprintf(buffer, "method-once");
+ else
+ return sprintf(buffer, "method");
+ } else
return sprintf(buffer, "enable");
}
return 0;
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 2e19189da0ee..17a6fa01a338 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
*/
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 6d4e44ea74ac..30d8518b25fb 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This driver fully implements the ACPI thermal policy as described in the
@@ -529,8 +525,7 @@ static void acpi_thermal_check(void *data)
/* sys I/F for generic thermal sysfs support */
-static int thermal_get_temp(struct thermal_zone_device *thermal,
- unsigned long *temp)
+static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
{
struct acpi_thermal *tz = thermal->devdata;
int result;
@@ -637,7 +632,7 @@ static int thermal_get_trip_type(struct thermal_zone_device *thermal,
}
static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
- int trip, unsigned long *temp)
+ int trip, int *temp)
{
struct acpi_thermal *tz = thermal->devdata;
int i;
@@ -690,7 +685,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
}
static int thermal_get_crit_temp(struct thermal_zone_device *thermal,
- unsigned long *temperature) {
+ int *temperature)
+{
struct acpi_thermal *tz = thermal->devdata;
if (tz->trips.critical.flags.valid) {
@@ -713,8 +709,8 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
return -EINVAL;
if (type == THERMAL_TRIP_ACTIVE) {
- unsigned long trip_temp;
- unsigned long temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
+ int trip_temp;
+ int temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
tz->temperature, tz->kelvin_offset);
if (thermal_get_trip_temp(thermal, trip, &trip_temp))
return -EINVAL;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 67c548ad3764..475c9079bf85 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 815f75ef2411..2922f1f252d5 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include <acpi/video.h>
ACPI_MODULE_NAME("video");
@@ -41,6 +42,7 @@ void acpi_video_unregister_backlight(void);
static bool backlight_notifier_registered;
static struct notifier_block backlight_nb;
+static struct work_struct backlight_notify_work;
static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef;
@@ -262,6 +264,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
{ },
};
+/* This uses a workqueue to avoid various locking ordering issues */
+static void acpi_video_backlight_notify_work(struct work_struct *work)
+{
+ if (acpi_video_get_backlight_type() != acpi_backlight_video)
+ acpi_video_unregister_backlight();
+}
+
static int acpi_video_backlight_notify(struct notifier_block *nb,
unsigned long val, void *bd)
{
@@ -269,9 +278,8 @@ static int acpi_video_backlight_notify(struct notifier_block *nb,
/* A raw bl registering may change video -> native */
if (backlight->props.type == BACKLIGHT_RAW &&
- val == BACKLIGHT_REGISTERED &&
- acpi_video_get_backlight_type() != acpi_backlight_video)
- acpi_video_unregister_backlight();
+ val == BACKLIGHT_REGISTERED)
+ schedule_work(&backlight_notify_work);
return NOTIFY_OK;
}
@@ -304,6 +312,8 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, find_video, NULL,
&video_caps, NULL);
+ INIT_WORK(&backlight_notify_work,
+ acpi_video_backlight_notify_work);
backlight_nb.notifier_call = acpi_video_backlight_notify;
backlight_nb.priority = 0;
if (backlight_register_notifier(&backlight_nb) == 0)
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 6607f3c6ace1..a39e85f9efa9 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2834,7 +2834,7 @@ static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
-static struct vm_operations_struct binder_vm_ops = {
+static const struct vm_operations_struct binder_vm_ops = {
.open = binder_vma_open,
.close = binder_vma_close,
.fault = binder_vm_fault,
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 6d17a3b65ef7..15e40ee62a94 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -48,7 +48,7 @@ config ATA_VERBOSE_ERROR
config ATA_ACPI
bool "ATA ACPI Support"
- depends on ACPI && PCI
+ depends on ACPI
default y
help
This option adds support for ATA-related ACPI objects.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 7e62751abfac..a46660204e3a 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -351,6 +351,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
/* JMicron 362B and 362C have an AHCI function with IDE class code */
{ PCI_VDEVICE(JMICRON, 0x2362), board_ahci_ign_iferr },
{ PCI_VDEVICE(JMICRON, 0x236f), board_ahci_ign_iferr },
+ /* May need to update quirk_jmicron_async_suspend() for additions */
/* ATI */
{ PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
@@ -1451,18 +1452,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
ahci_pci_bar = AHCI_PCI_BAR_CAVIUM;
- /*
- * The JMicron chip 361/363 contains one SATA controller and one
- * PATA controller,for powering on these both controllers, we must
- * follow the sequence one by one, otherwise one of them can not be
- * powered on successfully, so here we disable the async suspend
- * method for these chips.
- */
- if (pdev->vendor == PCI_VENDOR_ID_JMICRON &&
- (pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 ||
- pdev->device == PCI_DEVICE_ID_JMICRON_JMB361))
- device_disable_async_suspend(&pdev->dev);
-
/* acquire resources */
rc = pcim_enable_device(pdev);
if (rc)
diff --git a/drivers/ata/ahci_brcmstb.c b/drivers/ata/ahci_brcmstb.c
index ce1e3a885981..14b7305d2ba0 100644
--- a/drivers/ata/ahci_brcmstb.c
+++ b/drivers/ata/ahci_brcmstb.c
@@ -92,7 +92,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
* Other architectures (e.g., ARM) either do not support big endian, or
* else leave I/O in little endian mode.
*/
- if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
return __raw_readl(addr);
else
return readl_relaxed(addr);
@@ -101,7 +101,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
{
/* See brcm_sata_readreg() comments */
- if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
__raw_writel(val, addr);
else
writel_relaxed(val, addr);
@@ -209,6 +209,7 @@ static void brcm_sata_init(struct brcm_ahci_priv *priv)
priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
}
+#ifdef CONFIG_PM_SLEEP
static int brcm_ahci_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
@@ -231,6 +232,7 @@ static int brcm_ahci_resume(struct device *dev)
brcm_sata_phys_enable(priv);
return ahci_platform_resume(dev);
}
+#endif
static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 614c78f510f0..1befb114c384 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -20,6 +20,8 @@
#include <linux/platform_device.h>
#include <linux/libata.h>
#include <linux/ahci_platform.h>
+#include <linux/acpi.h>
+#include <linux/pci_ids.h>
#include "ahci.h"
#define DRV_NAME "ahci"
@@ -79,12 +81,19 @@ static const struct of_device_id ahci_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
+static const struct acpi_device_id ahci_acpi_match[] = {
+ { ACPI_DEVICE_CLASS(PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff) },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
+
static struct platform_driver ahci_driver = {
.probe = ahci_probe,
.remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_of_match,
+ .acpi_match_table = ahci_acpi_match,
.pm = &ahci_pm_ops,
},
};
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index e83fc3d0da9c..b79cb10e289e 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -694,11 +694,11 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
* RETURNS:
* Block address read from @tf.
*/
-u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
+u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
{
u64 block = 0;
- if (!dev || tf->flags & ATA_TFLAG_LBA) {
+ if (tf->flags & ATA_TFLAG_LBA) {
if (tf->flags & ATA_TFLAG_LBA48) {
block |= (u64)tf->hob_lbah << 40;
block |= (u64)tf->hob_lbam << 32;
@@ -2147,24 +2147,6 @@ static int ata_dev_config_ncq(struct ata_device *dev,
return 0;
}
-static void ata_dev_config_sense_reporting(struct ata_device *dev)
-{
- unsigned int err_mask;
-
- if (!ata_id_has_sense_reporting(dev->id))
- return;
-
- if (ata_id_sense_reporting_enabled(dev->id))
- return;
-
- err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
- if (err_mask) {
- ata_dev_dbg(dev,
- "failed to enable Sense Data Reporting, Emask 0x%x\n",
- err_mask);
- }
-}
-
/**
* ata_dev_configure - Configure the specified ATA/ATAPI device
* @dev: Target device to configure
@@ -2387,7 +2369,7 @@ int ata_dev_configure(struct ata_device *dev)
dev->devslp_timing[i] = sata_setting[j];
}
}
- ata_dev_config_sense_reporting(dev);
+
dev->cdb_len = 16;
}
@@ -2478,6 +2460,10 @@ int ata_dev_configure(struct ata_device *dev)
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
dev->max_sectors);
+ if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
+ dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
+ dev->max_sectors);
+
if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
dev->max_sectors = ATA_MAX_SECTORS_LBA48;
@@ -4146,6 +4132,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
{ "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
+ /*
+ * Causes silent data corruption with higher max sects.
+ * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
+ */
+ { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
+
/* Devices we expect to fail diagnostics */
/* Devices where NCQ should be avoided */
@@ -4174,9 +4166,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
- /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
+ /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
{ "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+ { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
/* Blacklist entries taken from Silicon Image 3124/3132
Windows driver .inf file - also several Linux problem reports */
@@ -4229,7 +4222,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
- { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+ { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4237,6 +4230,11 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
+
+ /* devices that don't properly handle TRIM commands */
+ { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
/*
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
@@ -4501,7 +4499,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
else /* In the ancient relic department - skip all of this */
return 0;
- err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ /* On some disks, this command causes spin-up, so we need longer timeout */
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
DPRINTK("EXIT, err_mask=%x\n", err_mask);
return err_mask;
@@ -4754,6 +4753,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
/**
* ata_qc_new_init - Request an available ATA command, and initialize it
* @dev: Device from whom we request an available command structure
+ * @tag: tag
*
* LOCKING:
* None.
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 7465031a893c..cb0508af1459 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1592,8 +1592,6 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
tf->hob_lbah = buf[10];
tf->nsect = buf[12];
tf->hob_nsect = buf[13];
- if (ata_id_has_ncq_autosense(dev->id))
- tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
return 0;
}
@@ -1630,70 +1628,6 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
}
/**
- * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
- * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
- * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
- * @dfl_sense_key: default sense key to use
- *
- * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
- * SENSE. This function is EH helper.
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- *
- * RETURNS:
- * encoded sense data on success, 0 on failure or if sense data
- * is not available.
- */
-static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
- struct scsi_cmnd *cmd)
-{
- struct ata_device *dev = qc->dev;
- struct ata_taskfile tf;
- unsigned int err_mask;
-
- if (!cmd)
- return 0;
-
- DPRINTK("ATA request sense\n");
- ata_dev_warn(dev, "request sense\n");
- if (!ata_id_sense_reporting_enabled(dev->id)) {
- ata_dev_warn(qc->dev, "sense data reporting disabled\n");
- return 0;
- }
- ata_tf_init(dev, &tf);
-
- tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
- tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
- tf.command = ATA_CMD_REQ_SENSE_DATA;
- tf.protocol = ATA_PROT_NODATA;
-
- err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
- /*
- * ACS-4 states:
- * The device may set the SENSE DATA AVAILABLE bit to one in the
- * STATUS field and clear the ERROR bit to zero in the STATUS field
- * to indicate that the command returned completion without an error
- * and the sense data described in table 306 is available.
- *
- * IOW the 'ATA_SENSE' bit might not be set even though valid
- * sense data is available.
- * So check for both.
- */
- if ((tf.command & ATA_SENSE) ||
- tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
- ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
- qc->flags |= ATA_QCFLAG_SENSE_VALID;
- ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
- tf.lbah, tf.lbam, tf.lbal);
- } else {
- ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
- tf.command, err_mask);
- }
- return err_mask;
-}
-
-/**
* atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
* @dev: device to perform REQUEST_SENSE to
* @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
@@ -1855,19 +1789,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
memcpy(&qc->result_tf, &tf, sizeof(tf));
qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
- if (qc->result_tf.auxiliary) {
- char sense_key, asc, ascq;
-
- sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
- asc = (qc->result_tf.auxiliary >> 8) & 0xff;
- ascq = qc->result_tf.auxiliary & 0xff;
- ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
- sense_key, asc, ascq);
- ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
- ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
- qc->flags |= ATA_QCFLAG_SENSE_VALID;
- }
-
ehc->i.err_mask &= ~AC_ERR_DEV;
}
@@ -1897,27 +1818,6 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
return ATA_EH_RESET;
}
- /*
- * Sense data reporting does not work if the
- * device fault bit is set.
- */
- if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
- !(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
- if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
- tmp = ata_eh_request_sense(qc, qc->scsicmd);
- if (tmp)
- qc->err_mask |= tmp;
- else
- ata_scsi_set_sense_information(qc->scsicmd, tf);
- } else {
- ata_dev_warn(qc->dev, "sense data available but port frozen\n");
- }
- }
-
- /* Set by NCQ autosense or request sense above */
- if (qc->flags & ATA_QCFLAG_SENSE_VALID)
- return 0;
-
if (stat & (ATA_ERR | ATA_DF))
qc->err_mask |= AC_ERR_DEV;
else
@@ -2661,15 +2561,14 @@ static void ata_eh_link_report(struct ata_link *link)
#ifdef CONFIG_ATA_VERBOSE_ERROR
if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
- ATA_SENSE | ATA_ERR)) {
+ ATA_ERR)) {
if (res->command & ATA_BUSY)
ata_dev_err(qc->dev, "status: { Busy }\n");
else
- ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
+ ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
res->command & ATA_DRDY ? "DRDY " : "",
res->command & ATA_DF ? "DF " : "",
res->command & ATA_DRQ ? "DRQ " : "",
- res->command & ATA_SENSE ? "SENSE " : "",
res->command & ATA_ERR ? "ERR " : "");
}
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 7ccc084bf1df..85aa76116a30 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
ATA_LFLAG_NO_SRST |
ATA_LFLAG_ASSUME_ATA;
}
+ } else if (vendor == 0x11ab && devid == 0x4140) {
+ /* Marvell 4140 quirks */
+ ata_for_each_link(link, ap, EDGE) {
+ /* port 4 is for SEMB device and it doesn't like SRST */
+ if (link->pmp == 4)
+ link->flags |= ATA_LFLAG_DISABLED;
+ }
}
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 3131adcc1f87..0d7f0da3a269 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -270,28 +270,13 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
ata_scsi_park_show, ata_scsi_park_store);
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
-void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
+static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
{
- if (!cmd)
- return;
-
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
}
-void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf)
-{
- u64 information;
-
- if (!cmd)
- return;
-
- information = ata_tf_read_block(tf, NULL);
- scsi_set_sense_information(cmd->sense_buffer, information);
-}
-
static ssize_t
ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -1792,9 +1777,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
((cdb[2] & 0x20) || need_sense)) {
ata_gen_passthru_sense(qc);
} else {
- if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
- cmd->result = SAM_STAT_CHECK_CONDITION;
- } else if (!need_sense) {
+ if (!need_sense) {
cmd->result = SAM_STAT_GOOD;
} else {
/* TODO: decide which descriptor format to use
@@ -2568,7 +2551,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
rbuf[14] = (lowest_aligned >> 8) & 0x3f;
rbuf[15] = lowest_aligned;
- if (ata_id_has_trim(args->id)) {
+ if (ata_id_has_trim(args->id) &&
+ !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
rbuf[14] |= 0x80; /* LBPME */
if (ata_id_has_zero_after_trim(args->id) &&
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index d6c37bcd416d..e2d94972962d 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -569,6 +569,8 @@ show_ata_dev_trim(struct device *dev,
if (!ata_id_has_trim(ata_dev->id))
mode = "unsupported";
+ else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM)
+ mode = "forced_unsupported";
else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM)
mode = "forced_unqueued";
else if (ata_fpdma_dsm_supported(ata_dev))
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index a998a175f9f1..f840ca18a7c0 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -67,8 +67,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
unsigned int tag);
-extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
- struct ata_device *dev);
+extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
extern unsigned ata_exec_internal(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, void *buf, unsigned int buflen,
@@ -138,9 +137,6 @@ extern int ata_scsi_add_hosts(struct ata_host *host,
struct scsi_host_template *sht);
extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
extern int ata_scsi_offline_dev(struct ata_device *dev);
-extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
-extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf);
extern void ata_scsi_media_change_notify(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work);
extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index a9b0c820f2eb..80fe0f6fed29 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -4,7 +4,7 @@
* Arasan Compact Flash host controller source file
*
* Copyright (C) 2011 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -834,7 +834,7 @@ static int arasan_cf_probe(struct platform_device *pdev)
return -ENOMEM;
}
- acdev->clk = clk_get(&pdev->dev, NULL);
+ acdev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(acdev->clk)) {
dev_warn(&pdev->dev, "Clock not found\n");
return PTR_ERR(acdev->clk);
@@ -843,9 +843,8 @@ static int arasan_cf_probe(struct platform_device *pdev)
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
if (!host) {
- ret = -ENOMEM;
dev_warn(&pdev->dev, "alloc host fail\n");
- goto free_clk;
+ return -ENOMEM;
}
ap = host->ports[0];
@@ -894,7 +893,7 @@ static int arasan_cf_probe(struct platform_device *pdev)
ret = cf_init(acdev);
if (ret)
- goto free_clk;
+ return ret;
cf_card_detect(acdev, 0);
@@ -904,8 +903,7 @@ static int arasan_cf_probe(struct platform_device *pdev)
return 0;
cf_exit(acdev);
-free_clk:
- clk_put(acdev->clk);
+
return ret;
}
@@ -916,7 +914,6 @@ static int arasan_cf_remove(struct platform_device *pdev)
ata_host_detach(host);
cf_exit(acdev);
- clk_put(acdev->clk);
return 0;
}
@@ -968,7 +965,7 @@ static struct platform_driver arasan_cf_driver = {
module_platform_driver(arasan_cf_driver);
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 47e418b8c8ba..4d1a5d2c4287 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -143,18 +143,6 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
};
const struct ata_port_info *ppi[] = { &info, NULL };
- /*
- * The JMicron chip 361/363 contains one SATA controller and one
- * PATA controller,for powering on these both controllers, we must
- * follow the sequence one by one, otherwise one of them can not be
- * powered on successfully, so here we disable the async suspend
- * method for these chips.
- */
- if (pdev->vendor == PCI_VENDOR_ID_JMICRON &&
- (pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 ||
- pdev->device == PCI_DEVICE_ID_JMICRON_JMB361))
- device_disable_async_suspend(&pdev->dev);
-
return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
}
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 6d08446b877c..12fe0f3bb7e9 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -27,12 +27,11 @@
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/gpio.h>
#include <linux/libata.h>
#include <scsi/scsi_host.h>
-#include <asm/gpio.h>
-
#define DRV_NAME "pata-rb532-cf"
#define DRV_VERSION "0.1.0"
#define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash"
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index d49a5193b7de..8804127b108c 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -861,10 +861,6 @@ MODULE_DEVICE_TABLE(of, sata_rcar_match);
static const struct platform_device_id sata_rcar_id_table[] = {
{ "sata_rcar", RCAR_GEN1_SATA }, /* Deprecated by "sata-r8a7779" */
{ "sata-r8a7779", RCAR_GEN1_SATA },
- { "sata-r8a7790", RCAR_GEN2_SATA },
- { "sata-r8a7790-es1", RCAR_R8A7790_ES1_SATA },
- { "sata-r8a7791", RCAR_GEN2_SATA },
- { "sata-r8a7793", RCAR_GEN2_SATA },
{ },
};
MODULE_DEVICE_TABLE(platform, sata_rcar_id_table);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 3a18a8a719b4..fab504fd9cfd 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -1238,8 +1238,12 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
readl(mmio + PDC_SDRAM_CONTROL);
/* Turn on for ECC */
- pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
- PDC_DIMM_SPD_TYPE, &spd0);
+ if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+ PDC_DIMM_SPD_TYPE, &spd0)) {
+ pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+ PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+ return 1;
+ }
if (spd0 == 0x02) {
data |= (0x01 << 16);
writel(data, mmio + PDC_SDRAM_CONTROL);
@@ -1380,8 +1384,12 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
/* ECC initiliazation. */
- pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
- PDC_DIMM_SPD_TYPE, &spd0);
+ if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+ PDC_DIMM_SPD_TYPE, &spd0)) {
+ pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+ PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+ return 1;
+ }
if (spd0 == 0x02) {
void *buf;
VPRINTK("Start ECC initialization\n");
diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c
index 5b93852392b8..816de9eaac26 100644
--- a/drivers/auxdisplay/ks0108.c
+++ b/drivers/auxdisplay/ks0108.c
@@ -23,6 +23,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -90,17 +92,19 @@ void ks0108_displaystate(unsigned char state)
void ks0108_startline(unsigned char startline)
{
- ks0108_writedata(min(startline,(unsigned char)63) | bit(6) | bit(7));
+ ks0108_writedata(min_t(unsigned char, startline, 63) | bit(6) |
+ bit(7));
}
void ks0108_address(unsigned char address)
{
- ks0108_writedata(min(address,(unsigned char)63) | bit(6));
+ ks0108_writedata(min_t(unsigned char, address, 63) | bit(6));
}
void ks0108_page(unsigned char page)
{
- ks0108_writedata(min(page,(unsigned char)7) | bit(3) | bit(4) | bit(5) | bit(7));
+ ks0108_writedata(min_t(unsigned char, page, 7) | bit(3) | bit(4) |
+ bit(5) | bit(7));
}
EXPORT_SYMBOL_GPL(ks0108_writedata);
@@ -121,52 +125,71 @@ unsigned char ks0108_isinited(void)
}
EXPORT_SYMBOL_GPL(ks0108_isinited);
-/*
- * Module Init & Exit
- */
-
-static int __init ks0108_init(void)
+static void ks0108_parport_attach(struct parport *port)
{
- int result;
- int ret = -EINVAL;
-
- ks0108_parport = parport_find_base(ks0108_port);
- if (ks0108_parport == NULL) {
- printk(KERN_ERR KS0108_NAME ": ERROR: "
- "parport didn't find %i port\n", ks0108_port);
- goto none;
- }
-
- ks0108_pardevice = parport_register_device(ks0108_parport, KS0108_NAME,
- NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL);
- if (ks0108_pardevice == NULL) {
- printk(KERN_ERR KS0108_NAME ": ERROR: "
- "parport didn't register new device\n");
- goto none;
+ struct pardev_cb ks0108_cb;
+
+ if (port->base != ks0108_port)
+ return;
+
+ memset(&ks0108_cb, 0, sizeof(ks0108_cb));
+ ks0108_cb.flags = PARPORT_DEV_EXCL;
+ ks0108_pardevice = parport_register_dev_model(port, KS0108_NAME,
+ &ks0108_cb, 0);
+ if (!ks0108_pardevice) {
+ pr_err("ERROR: parport didn't register new device\n");
+ return;
}
-
- result = parport_claim(ks0108_pardevice);
- if (result != 0) {
- printk(KERN_ERR KS0108_NAME ": ERROR: "
- "can't claim %i parport, maybe in use\n", ks0108_port);
- ret = result;
- goto registered;
+ if (parport_claim(ks0108_pardevice)) {
+ pr_err("could not claim access to parport %i. Aborting.\n",
+ ks0108_port);
+ goto err_unreg_device;
}
+ ks0108_parport = port;
ks0108_inited = 1;
- return 0;
+ return;
-registered:
+err_unreg_device:
parport_unregister_device(ks0108_pardevice);
-
-none:
- return ret;
+ ks0108_pardevice = NULL;
}
-static void __exit ks0108_exit(void)
+static void ks0108_parport_detach(struct parport *port)
{
+ if (port->base != ks0108_port)
+ return;
+
+ if (!ks0108_pardevice) {
+ pr_err("%s: already unregistered.\n", KS0108_NAME);
+ return;
+ }
+
parport_release(ks0108_pardevice);
parport_unregister_device(ks0108_pardevice);
+ ks0108_pardevice = NULL;
+ ks0108_parport = NULL;
+}
+
+/*
+ * Module Init & Exit
+ */
+
+static struct parport_driver ks0108_parport_driver = {
+ .name = "ks0108",
+ .match_port = ks0108_parport_attach,
+ .detach = ks0108_parport_detach,
+ .devmodel = true,
+};
+
+static int __init ks0108_init(void)
+{
+ return parport_register_driver(&ks0108_parport_driver);
+}
+
+static void __exit ks0108_exit(void)
+{
+ parport_unregister_driver(&ks0108_parport_driver);
}
module_init(ks0108_init);
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 527d291706e8..6b2a84e7f2be 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_REGMAP) += regmap/
obj-$(CONFIG_SOC_BUS) += soc.o
obj-$(CONFIG_PINCTRL) += pinctrl.o
obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
+obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/base.h b/drivers/base/base.h
index fd3347d9f153..1782f3aa386e 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -63,7 +63,7 @@ struct driver_private {
* binding of drivers which were unable to get all the resources needed by
* the device; typically because it depends on another driver getting
* probed first.
- * @device - pointer back to the struct class that this structure is
+ * @device - pointer back to the struct device that this structure is
* associated with.
*
* Nothing outside of the driver core should ever touch these fields.
@@ -134,6 +134,7 @@ extern int devres_release_all(struct device *dev);
/* /sys/devices directory */
extern struct kset *devices_kset;
+extern void devices_kset_move_last(struct device *dev);
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
extern void module_add_driver(struct module *mod, struct device_driver *drv);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index dafae6d2f7ac..334ec7ef1960 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -534,6 +534,52 @@ static DEVICE_ATTR_RO(dev);
struct kset *devices_kset;
/**
+ * devices_kset_move_before - Move device in the devices_kset's list.
+ * @deva: Device to move.
+ * @devb: Device @deva should come before.
+ */
+static void devices_kset_move_before(struct device *deva, struct device *devb)
+{
+ if (!devices_kset)
+ return;
+ pr_debug("devices_kset: Moving %s before %s\n",
+ dev_name(deva), dev_name(devb));
+ spin_lock(&devices_kset->list_lock);
+ list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
+ spin_unlock(&devices_kset->list_lock);
+}
+
+/**
+ * devices_kset_move_after - Move device in the devices_kset's list.
+ * @deva: Device to move
+ * @devb: Device @deva should come after.
+ */
+static void devices_kset_move_after(struct device *deva, struct device *devb)
+{
+ if (!devices_kset)
+ return;
+ pr_debug("devices_kset: Moving %s after %s\n",
+ dev_name(deva), dev_name(devb));
+ spin_lock(&devices_kset->list_lock);
+ list_move(&deva->kobj.entry, &devb->kobj.entry);
+ spin_unlock(&devices_kset->list_lock);
+}
+
+/**
+ * devices_kset_move_last - move the device to the end of devices_kset's list.
+ * @dev: device to move
+ */
+void devices_kset_move_last(struct device *dev)
+{
+ if (!devices_kset)
+ return;
+ pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
+ spin_lock(&devices_kset->list_lock);
+ list_move_tail(&dev->kobj.entry, &devices_kset->list);
+ spin_unlock(&devices_kset->list_lock);
+}
+
+/**
* device_create_file - create sysfs attribute file for device.
* @dev: device.
* @attr: device attribute descriptor.
@@ -662,6 +708,9 @@ void device_initialize(struct device *dev)
INIT_LIST_HEAD(&dev->devres_head);
device_pm_init(dev);
set_dev_node(dev, -1);
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ INIT_LIST_HEAD(&dev->msi_list);
+#endif
}
EXPORT_SYMBOL_GPL(device_initialize);
@@ -1252,6 +1301,19 @@ void device_unregister(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_unregister);
+static struct device *prev_device(struct klist_iter *i)
+{
+ struct klist_node *n = klist_prev(i);
+ struct device *dev = NULL;
+ struct device_private *p;
+
+ if (n) {
+ p = to_device_private_parent(n);
+ dev = p->device;
+ }
+ return dev;
+}
+
static struct device *next_device(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
@@ -1341,6 +1403,36 @@ int device_for_each_child(struct device *parent, void *data,
EXPORT_SYMBOL_GPL(device_for_each_child);
/**
+ * device_for_each_child_reverse - device child iterator in reversed order.
+ * @parent: parent struct device.
+ * @fn: function to be called for each device.
+ * @data: data for the callback.
+ *
+ * Iterate over @parent's child devices, and call @fn for each,
+ * passing it @data.
+ *
+ * We check the return of @fn each time. If it returns anything
+ * other than 0, we break out and return that value.
+ */
+int device_for_each_child_reverse(struct device *parent, void *data,
+ int (*fn)(struct device *dev, void *data))
+{
+ struct klist_iter i;
+ struct device *child;
+ int error = 0;
+
+ if (!parent->p)
+ return 0;
+
+ klist_iter_init(&parent->p->klist_children, &i);
+ while ((child = prev_device(&i)) && !error)
+ error = fn(child, data);
+ klist_iter_exit(&i);
+ return error;
+}
+EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
+
+/**
* device_find_child - device iterator for locating a particular device.
* @parent: parent struct device
* @match: Callback function to check device
@@ -1923,12 +2015,15 @@ int device_move(struct device *dev, struct device *new_parent,
break;
case DPM_ORDER_DEV_AFTER_PARENT:
device_pm_move_after(dev, new_parent);
+ devices_kset_move_after(dev, new_parent);
break;
case DPM_ORDER_PARENT_BEFORE_DEV:
device_pm_move_before(new_parent, dev);
+ devices_kset_move_before(new_parent, dev);
break;
case DPM_ORDER_DEV_LAST:
device_pm_move_last(dev);
+ devices_kset_move_last(dev);
break;
}
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 78720e706176..91bbb1959d8d 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -41,7 +41,7 @@ static void change_cpu_under_node(struct cpu *cpu,
cpu->node_id = to_nid;
}
-static int __ref cpu_subsys_online(struct device *dev)
+static int cpu_subsys_online(struct device *dev)
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
int cpuid = dev->id;
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index a638bbb1a27a..be0eb4639128 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -304,6 +304,14 @@ static int really_probe(struct device *dev, struct device_driver *drv)
goto probe_failed;
}
+ /*
+ * Ensure devices are listed in devices_kset in correct order
+ * It's important to move Dev to the end of devices_kset before
+ * calling .probe, because it could be recursive and parent Dev
+ * should always go first
+ */
+ devices_kset_move_last(dev);
+
if (dev->bus->probe) {
ret = dev->bus->probe(dev);
if (ret)
@@ -399,6 +407,8 @@ EXPORT_SYMBOL_GPL(wait_for_device_probe);
*
* This function must be called with @dev lock held. When called for a
* USB interface, @dev->parent lock must be held as well.
+ *
+ * If the device has a parent, runtime-resume the parent before driver probing.
*/
int driver_probe_device(struct device_driver *drv, struct device *dev)
{
@@ -410,10 +420,16 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
drv->bus->name, __func__, dev_name(dev), drv->name);
+ if (dev->parent)
+ pm_runtime_get_sync(dev->parent);
+
pm_runtime_barrier(dev);
ret = really_probe(dev, drv);
pm_request_idle(dev);
+ if (dev->parent)
+ pm_runtime_put(dev->parent);
+
return ret;
}
@@ -507,11 +523,17 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
device_lock(dev);
+ if (dev->parent)
+ pm_runtime_get_sync(dev->parent);
+
bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
dev_dbg(dev, "async probe completed\n");
pm_request_idle(dev);
+ if (dev->parent)
+ pm_runtime_put(dev->parent);
+
device_unlock(dev);
put_device(dev);
@@ -541,6 +563,9 @@ static int __device_attach(struct device *dev, bool allow_async)
.want_async = false,
};
+ if (dev->parent)
+ pm_runtime_get_sync(dev->parent);
+
ret = bus_for_each_drv(dev->bus, NULL, &data,
__device_attach_driver);
if (!ret && allow_async && data.have_async) {
@@ -557,6 +582,9 @@ static int __device_attach(struct device *dev, bool allow_async)
} else {
pm_request_idle(dev);
}
+
+ if (dev->parent)
+ pm_runtime_put(dev->parent);
}
out_unlock:
device_unlock(dev);
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index c8a53d1e019f..875464690117 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -297,10 +297,10 @@ void * devres_get(struct device *dev, void *new_res,
if (!dr) {
add_dr(dev, &new_dr->node);
dr = new_dr;
- new_dr = NULL;
+ new_res = NULL;
}
spin_unlock_irqrestore(&dev->devres_lock, flags);
- devres_free(new_dr);
+ devres_free(new_res);
return dr->data;
}
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 9c4288362a8e..8524450e75bd 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -443,7 +443,7 @@ static int fw_add_devm_name(struct device *dev, const char *name)
return -ENOMEM;
fwn->name = kstrdup_const(name, GFP_KERNEL);
if (!fwn->name) {
- kfree(fwn);
+ devres_free(fwn);
return -ENOMEM;
}
@@ -563,10 +563,8 @@ static void fw_dev_release(struct device *dev)
kfree(fw_priv);
}
-static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
{
- struct firmware_priv *fw_priv = to_firmware_priv(dev);
-
if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
return -ENOMEM;
if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
@@ -577,6 +575,18 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
+static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct firmware_priv *fw_priv = to_firmware_priv(dev);
+ int err = 0;
+
+ mutex_lock(&fw_lock);
+ if (fw_priv->buf)
+ err = do_firmware_uevent(fw_priv, env);
+ mutex_unlock(&fw_lock);
+ return err;
+}
+
static struct class firmware_class = {
.name = "firmware",
.class_attrs = firmware_class_attrs,
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 31df474d72f4..560751bad294 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -392,6 +392,16 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
int page_nid;
+ /*
+ * memory block could have several absent sections from start.
+ * skip pfn range from absent section
+ */
+ if (!pfn_present(pfn)) {
+ pfn = round_down(pfn + PAGES_PER_SECTION,
+ PAGES_PER_SECTION) - 1;
+ continue;
+ }
+
page_nid = get_nid_for_pfn(pfn);
if (page_nid < 0)
continue;
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
new file mode 100644
index 000000000000..1857a5dd0816
--- /dev/null
+++ b/drivers/base/platform-msi.c
@@ -0,0 +1,282 @@
+/*
+ * MSI framework for platform devices
+ *
+ * Copyright (C) 2015 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/slab.h>
+
+#define DEV_ID_SHIFT 24
+
+/*
+ * Internal data structure containing a (made up, but unique) devid
+ * and the callback to write the MSI message.
+ */
+struct platform_msi_priv_data {
+ irq_write_msi_msg_t write_msg;
+ int devid;
+};
+
+/* The devid allocator */
+static DEFINE_IDA(platform_msi_devid_ida);
+
+#ifdef GENERIC_MSI_DOMAIN_OPS
+/*
+ * Convert an msi_desc to a globaly unique identifier (per-device
+ * devid + msi_desc position in the msi_list).
+ */
+static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc)
+{
+ u32 devid;
+
+ devid = desc->platform.msi_priv_data->devid;
+
+ return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index;
+}
+
+static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = platform_msi_calc_hwirq(desc);
+}
+
+static int platform_msi_init(struct irq_domain *domain,
+ struct msi_domain_info *info,
+ unsigned int virq, irq_hw_number_t hwirq,
+ msi_alloc_info_t *arg)
+{
+ struct irq_data *data;
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ info->chip, info->chip_data);
+
+ /*
+ * Save the MSI descriptor in handler_data so that the
+ * irq_write_msi_msg callback can retrieve it (and the
+ * associated device).
+ */
+ data = irq_domain_get_irq_data(domain, virq);
+ data->handler_data = arg->desc;
+
+ return 0;
+}
+#else
+#define platform_msi_set_desc NULL
+#define platform_msi_init NULL
+#endif
+
+static void platform_msi_update_dom_ops(struct msi_domain_info *info)
+{
+ struct msi_domain_ops *ops = info->ops;
+
+ BUG_ON(!ops);
+
+ if (ops->msi_init == NULL)
+ ops->msi_init = platform_msi_init;
+ if (ops->set_desc == NULL)
+ ops->set_desc = platform_msi_set_desc;
+}
+
+static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct msi_desc *desc = irq_data_get_irq_handler_data(data);
+ struct platform_msi_priv_data *priv_data;
+
+ priv_data = desc->platform.msi_priv_data;
+
+ priv_data->write_msg(desc, msg);
+}
+
+static void platform_msi_update_chip_ops(struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ BUG_ON(!chip);
+ if (!chip->irq_mask)
+ chip->irq_mask = irq_chip_mask_parent;
+ if (!chip->irq_unmask)
+ chip->irq_unmask = irq_chip_unmask_parent;
+ if (!chip->irq_eoi)
+ chip->irq_eoi = irq_chip_eoi_parent;
+ if (!chip->irq_set_affinity)
+ chip->irq_set_affinity = msi_domain_set_affinity;
+ if (!chip->irq_write_msi_msg)
+ chip->irq_write_msi_msg = platform_msi_write_msg;
+}
+
+static void platform_msi_free_descs(struct device *dev)
+{
+ struct msi_desc *desc, *tmp;
+
+ list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
+ list_del(&desc->list);
+ free_msi_entry(desc);
+ }
+}
+
+static int platform_msi_alloc_descs(struct device *dev, int nvec,
+ struct platform_msi_priv_data *data)
+
+{
+ int i;
+
+ for (i = 0; i < nvec; i++) {
+ struct msi_desc *desc;
+
+ desc = alloc_msi_entry(dev);
+ if (!desc)
+ break;
+
+ desc->platform.msi_priv_data = data;
+ desc->platform.msi_index = i;
+ desc->nvec_used = 1;
+
+ list_add_tail(&desc->list, dev_to_msi_list(dev));
+ }
+
+ if (i != nvec) {
+ /* Clean up the mess */
+ platform_msi_free_descs(dev);
+
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * platform_msi_create_irq_domain - Create a platform MSI interrupt domain
+ * @np: Optional device-tree node of the interrupt controller
+ * @info: MSI domain info
+ * @parent: Parent irq domain
+ *
+ * Updates the domain and chip ops and creates a platform MSI
+ * interrupt domain.
+ *
+ * Returns:
+ * A domain pointer or NULL in case of failure.
+ */
+struct irq_domain *platform_msi_create_irq_domain(struct device_node *np,
+ struct msi_domain_info *info,
+ struct irq_domain *parent)
+{
+ struct irq_domain *domain;
+
+ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
+ platform_msi_update_dom_ops(info);
+ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
+ platform_msi_update_chip_ops(info);
+
+ domain = msi_create_irq_domain(np, info, parent);
+ if (domain)
+ domain->bus_token = DOMAIN_BUS_PLATFORM_MSI;
+
+ return domain;
+}
+
+/**
+ * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev
+ * @dev: The device for which to allocate interrupts
+ * @nvec: The number of interrupts to allocate
+ * @write_msi_msg: Callback to write an interrupt message for @dev
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
+ */
+int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg)
+{
+ struct platform_msi_priv_data *priv_data;
+ int err;
+
+ /*
+ * Limit the number of interrupts to 256 per device. Should we
+ * need to bump this up, DEV_ID_SHIFT should be adjusted
+ * accordingly (which would impact the max number of MSI
+ * capable devices).
+ */
+ if (!dev->msi_domain || !write_msi_msg || !nvec ||
+ nvec > (1 << (32 - DEV_ID_SHIFT)))
+ return -EINVAL;
+
+ if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
+ dev_err(dev, "Incompatible msi_domain, giving up\n");
+ return -EINVAL;
+ }
+
+ /* Already had a helping of MSI? Greed... */
+ if (!list_empty(dev_to_msi_list(dev)))
+ return -EBUSY;
+
+ priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
+ if (!priv_data)
+ return -ENOMEM;
+
+ priv_data->devid = ida_simple_get(&platform_msi_devid_ida,
+ 0, 1 << DEV_ID_SHIFT, GFP_KERNEL);
+ if (priv_data->devid < 0) {
+ err = priv_data->devid;
+ goto out_free_data;
+ }
+
+ priv_data->write_msg = write_msi_msg;
+
+ err = platform_msi_alloc_descs(dev, nvec, priv_data);
+ if (err)
+ goto out_free_id;
+
+ err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec);
+ if (err)
+ goto out_free_desc;
+
+ return 0;
+
+out_free_desc:
+ platform_msi_free_descs(dev);
+out_free_id:
+ ida_simple_remove(&platform_msi_devid_ida, priv_data->devid);
+out_free_data:
+ kfree(priv_data);
+
+ return err;
+}
+
+/**
+ * platform_msi_domain_free_irqs - Free MSI interrupts for @dev
+ * @dev: The device for which to free interrupts
+ */
+void platform_msi_domain_free_irqs(struct device *dev)
+{
+ struct msi_desc *desc;
+
+ desc = first_msi_entry(dev);
+ if (desc) {
+ struct platform_msi_priv_data *data;
+
+ data = desc->platform.msi_priv_data;
+
+ ida_simple_remove(&platform_msi_devid_ida, data->devid);
+ kfree(data);
+ }
+
+ msi_domain_free_irqs(dev->msi_domain, dev);
+ platform_msi_free_descs(dev);
+}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 063f0ab15259..f80aaaf9f610 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -375,9 +375,7 @@ int platform_device_add(struct platform_device *pdev)
while (--i >= 0) {
struct resource *r = &pdev->resource[i];
- unsigned long type = resource_type(r);
-
- if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+ if (r->parent)
release_resource(r);
}
@@ -408,9 +406,7 @@ void platform_device_del(struct platform_device *pdev)
for (i = 0; i < pdev->num_resources; i++) {
struct resource *r = &pdev->resource[i];
- unsigned long type = resource_type(r);
-
- if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+ if (r->parent)
release_resource(r);
}
}
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index acef9f9f759a..652b5a367c1f 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -38,7 +38,7 @@ struct pm_clock_entry {
* @dev: The device for the given clock
* @ce: PM clock entry corresponding to the clock.
*/
-static inline int __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
+static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
{
int ret;
@@ -50,8 +50,6 @@ static inline int __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
dev_err(dev, "%s: failed to enable clk %p, error %d\n",
__func__, ce->clk, ret);
}
-
- return ret;
}
/**
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index cdd547bd67df..16550c63d611 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -6,6 +6,7 @@
* This file is released under the GPLv2.
*/
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/platform_device.h>
@@ -19,6 +20,8 @@
#include <linux/suspend.h>
#include <linux/export.h>
+#define GENPD_RETRY_MAX_MS 250 /* Approximate */
+
#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
({ \
type (*__routine)(struct device *__d); \
@@ -111,8 +114,12 @@ static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
stop_latency_ns, "stop");
}
-static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
+static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev,
+ bool timed)
{
+ if (!timed)
+ return GENPD_DEV_CALLBACK(genpd, int, start, dev);
+
return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
start_latency_ns, "start");
}
@@ -133,41 +140,6 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
smp_mb__after_atomic();
}
-static void genpd_acquire_lock(struct generic_pm_domain *genpd)
-{
- DEFINE_WAIT(wait);
-
- mutex_lock(&genpd->lock);
- /*
- * Wait for the domain to transition into either the active,
- * or the power off state.
- */
- for (;;) {
- prepare_to_wait(&genpd->status_wait_queue, &wait,
- TASK_UNINTERRUPTIBLE);
- if (genpd->status == GPD_STATE_ACTIVE
- || genpd->status == GPD_STATE_POWER_OFF)
- break;
- mutex_unlock(&genpd->lock);
-
- schedule();
-
- mutex_lock(&genpd->lock);
- }
- finish_wait(&genpd->status_wait_queue, &wait);
-}
-
-static void genpd_release_lock(struct generic_pm_domain *genpd)
-{
- mutex_unlock(&genpd->lock);
-}
-
-static void genpd_set_active(struct generic_pm_domain *genpd)
-{
- if (genpd->resume_count == 0)
- genpd->status = GPD_STATE_ACTIVE;
-}
-
static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
{
s64 usecs64;
@@ -241,6 +213,18 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
}
/**
+ * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
+ * @genpd: PM domait to power off.
+ *
+ * Queue up the execution of pm_genpd_poweroff() unless it's already been done
+ * before.
+ */
+static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
+{
+ queue_work(pm_wq, &genpd->power_off_work);
+}
+
+/**
* __pm_genpd_poweron - Restore power to a given PM domain and its masters.
* @genpd: PM domain to power up.
*
@@ -248,35 +232,14 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
* resume a device belonging to it.
*/
static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
- __releases(&genpd->lock) __acquires(&genpd->lock)
{
struct gpd_link *link;
- DEFINE_WAIT(wait);
int ret = 0;
- /* If the domain's master is being waited for, we have to wait too. */
- for (;;) {
- prepare_to_wait(&genpd->status_wait_queue, &wait,
- TASK_UNINTERRUPTIBLE);
- if (genpd->status != GPD_STATE_WAIT_MASTER)
- break;
- mutex_unlock(&genpd->lock);
-
- schedule();
-
- mutex_lock(&genpd->lock);
- }
- finish_wait(&genpd->status_wait_queue, &wait);
-
if (genpd->status == GPD_STATE_ACTIVE
|| (genpd->prepared_count > 0 && genpd->suspend_power_off))
return 0;
- if (genpd->status != GPD_STATE_POWER_OFF) {
- genpd_set_active(genpd);
- return 0;
- }
-
if (genpd->cpuidle_data) {
cpuidle_pause_and_lock();
genpd->cpuidle_data->idle_state->disabled = true;
@@ -291,20 +254,8 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
*/
list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_inc(link->master);
- genpd->status = GPD_STATE_WAIT_MASTER;
-
- mutex_unlock(&genpd->lock);
ret = pm_genpd_poweron(link->master);
-
- mutex_lock(&genpd->lock);
-
- /*
- * The "wait for parent" status is guaranteed not to change
- * while the master is powering on.
- */
- genpd->status = GPD_STATE_POWER_OFF;
- wake_up_all(&genpd->status_wait_queue);
if (ret) {
genpd_sd_counter_dec(link->master);
goto err;
@@ -316,13 +267,16 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
goto err;
out:
- genpd_set_active(genpd);
-
+ genpd->status = GPD_STATE_ACTIVE;
return 0;
err:
- list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
+ list_for_each_entry_continue_reverse(link,
+ &genpd->slave_links,
+ slave_node) {
genpd_sd_counter_dec(link->master);
+ genpd_queue_power_off_work(link->master);
+ }
return ret;
}
@@ -353,20 +307,18 @@ int pm_genpd_name_poweron(const char *domain_name)
return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
}
-static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
- struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, start, dev);
-}
-
static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
{
return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
save_state_latency_ns, "state save");
}
-static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
+static int genpd_restore_dev(struct generic_pm_domain *genpd,
+ struct device *dev, bool timed)
{
+ if (!timed)
+ return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
+
return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
restore_state_latency_ns,
"state restore");
@@ -413,133 +365,30 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
}
/**
- * __pm_genpd_save_device - Save the pre-suspend state of a device.
- * @pdd: Domain data of the device to save the state of.
- * @genpd: PM domain the device belongs to.
- */
-static int __pm_genpd_save_device(struct pm_domain_data *pdd,
- struct generic_pm_domain *genpd)
- __releases(&genpd->lock) __acquires(&genpd->lock)
-{
- struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
- struct device *dev = pdd->dev;
- int ret = 0;
-
- if (gpd_data->need_restore > 0)
- return 0;
-
- /*
- * If the value of the need_restore flag is still unknown at this point,
- * we trust that pm_genpd_poweroff() has verified that the device is
- * already runtime PM suspended.
- */
- if (gpd_data->need_restore < 0) {
- gpd_data->need_restore = 1;
- return 0;
- }
-
- mutex_unlock(&genpd->lock);
-
- genpd_start_dev(genpd, dev);
- ret = genpd_save_dev(genpd, dev);
- genpd_stop_dev(genpd, dev);
-
- mutex_lock(&genpd->lock);
-
- if (!ret)
- gpd_data->need_restore = 1;
-
- return ret;
-}
-
-/**
- * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
- * @pdd: Domain data of the device to restore the state of.
- * @genpd: PM domain the device belongs to.
- */
-static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
- struct generic_pm_domain *genpd)
- __releases(&genpd->lock) __acquires(&genpd->lock)
-{
- struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
- struct device *dev = pdd->dev;
- int need_restore = gpd_data->need_restore;
-
- gpd_data->need_restore = 0;
- mutex_unlock(&genpd->lock);
-
- genpd_start_dev(genpd, dev);
-
- /*
- * Call genpd_restore_dev() for recently added devices too (need_restore
- * is negative then).
- */
- if (need_restore)
- genpd_restore_dev(genpd, dev);
-
- mutex_lock(&genpd->lock);
-}
-
-/**
- * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
- * @genpd: PM domain to check.
- *
- * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
- * a "power off" operation, which means that a "power on" has occured in the
- * meantime, or if its resume_count field is different from zero, which means
- * that one of its devices has been resumed in the meantime.
- */
-static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
-{
- return genpd->status == GPD_STATE_WAIT_MASTER
- || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
-}
-
-/**
- * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
- * @genpd: PM domait to power off.
- *
- * Queue up the execution of pm_genpd_poweroff() unless it's already been done
- * before.
- */
-static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
-{
- queue_work(pm_wq, &genpd->power_off_work);
-}
-
-/**
* pm_genpd_poweroff - Remove power from a given PM domain.
* @genpd: PM domain to power down.
*
* If all of the @genpd's devices have been suspended and all of its subdomains
- * have been powered down, run the runtime suspend callbacks provided by all of
- * the @genpd's devices' drivers and remove power from @genpd.
+ * have been powered down, remove power from @genpd.
*/
static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
- __releases(&genpd->lock) __acquires(&genpd->lock)
{
struct pm_domain_data *pdd;
struct gpd_link *link;
- unsigned int not_suspended;
- int ret = 0;
+ unsigned int not_suspended = 0;
- start:
/*
* Do not try to power off the domain in the following situations:
* (1) The domain is already in the "power off" state.
- * (2) The domain is waiting for its master to power up.
- * (3) One of the domain's devices is being resumed right now.
- * (4) System suspend is in progress.
+ * (2) System suspend is in progress.
*/
if (genpd->status == GPD_STATE_POWER_OFF
- || genpd->status == GPD_STATE_WAIT_MASTER
- || genpd->resume_count > 0 || genpd->prepared_count > 0)
+ || genpd->prepared_count > 0)
return 0;
if (atomic_read(&genpd->sd_count) > 0)
return -EBUSY;
- not_suspended = 0;
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
enum pm_qos_flags_status stat;
@@ -557,41 +406,11 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
if (not_suspended > genpd->in_progress)
return -EBUSY;
- if (genpd->poweroff_task) {
- /*
- * Another instance of pm_genpd_poweroff() is executing
- * callbacks, so tell it to start over and return.
- */
- genpd->status = GPD_STATE_REPEAT;
- return 0;
- }
-
if (genpd->gov && genpd->gov->power_down_ok) {
if (!genpd->gov->power_down_ok(&genpd->domain))
return -EAGAIN;
}
- genpd->status = GPD_STATE_BUSY;
- genpd->poweroff_task = current;
-
- list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
- ret = atomic_read(&genpd->sd_count) == 0 ?
- __pm_genpd_save_device(pdd, genpd) : -EBUSY;
-
- if (genpd_abort_poweroff(genpd))
- goto out;
-
- if (ret) {
- genpd_set_active(genpd);
- goto out;
- }
-
- if (genpd->status == GPD_STATE_REPEAT) {
- genpd->poweroff_task = NULL;
- goto start;
- }
- }
-
if (genpd->cpuidle_data) {
/*
* If cpuidle_data is set, cpuidle should turn the domain off
@@ -604,14 +423,14 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
cpuidle_pause_and_lock();
genpd->cpuidle_data->idle_state->disabled = false;
cpuidle_resume_and_unlock();
- goto out;
+ return 0;
}
if (genpd->power_off) {
- if (atomic_read(&genpd->sd_count) > 0) {
- ret = -EBUSY;
- goto out;
- }
+ int ret;
+
+ if (atomic_read(&genpd->sd_count) > 0)
+ return -EBUSY;
/*
* If sd_count > 0 at this point, one of the subdomains hasn't
@@ -622,10 +441,8 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
* happen very often).
*/
ret = genpd_power_off(genpd, true);
- if (ret == -EBUSY) {
- genpd_set_active(genpd);
- goto out;
- }
+ if (ret)
+ return ret;
}
genpd->status = GPD_STATE_POWER_OFF;
@@ -635,10 +452,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
genpd_queue_power_off_work(link->master);
}
- out:
- genpd->poweroff_task = NULL;
- wake_up_all(&genpd->status_wait_queue);
- return ret;
+ return 0;
}
/**
@@ -651,9 +465,9 @@ static void genpd_power_off_work_fn(struct work_struct *work)
genpd = container_of(work, struct generic_pm_domain, power_off_work);
- genpd_acquire_lock(genpd);
+ mutex_lock(&genpd->lock);
pm_genpd_poweroff(genpd);
- genpd_release_lock(genpd);
+ mutex_unlock(&genpd->lock);
}
/**
@@ -667,7 +481,6 @@ static void genpd_power_off_work_fn(struct work_struct *work)
static int pm_genpd_runtime_suspend(struct device *dev)
{
struct generic_pm_domain *genpd;
- struct generic_pm_domain_data *gpd_data;
bool (*stop_ok)(struct device *__dev);
int ret;
@@ -681,10 +494,16 @@ static int pm_genpd_runtime_suspend(struct device *dev)
if (stop_ok && !stop_ok(dev))
return -EBUSY;
- ret = genpd_stop_dev(genpd, dev);
+ ret = genpd_save_dev(genpd, dev);
if (ret)
return ret;
+ ret = genpd_stop_dev(genpd, dev);
+ if (ret) {
+ genpd_restore_dev(genpd, dev, true);
+ return ret;
+ }
+
/*
* If power.irq_safe is set, this routine will be run with interrupts
* off, so it can't use mutexes.
@@ -693,16 +512,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
return 0;
mutex_lock(&genpd->lock);
-
- /*
- * If we have an unknown state of the need_restore flag, it means none
- * of the runtime PM callbacks has been invoked yet. Let's update the
- * flag to reflect that the current state is active.
- */
- gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
- if (gpd_data->need_restore < 0)
- gpd_data->need_restore = 0;
-
genpd->in_progress++;
pm_genpd_poweroff(genpd);
genpd->in_progress--;
@@ -722,8 +531,8 @@ static int pm_genpd_runtime_suspend(struct device *dev)
static int pm_genpd_runtime_resume(struct device *dev)
{
struct generic_pm_domain *genpd;
- DEFINE_WAIT(wait);
int ret;
+ bool timed = true;
dev_dbg(dev, "%s()\n", __func__);
@@ -732,39 +541,21 @@ static int pm_genpd_runtime_resume(struct device *dev)
return -EINVAL;
/* If power.irq_safe, the PM domain is never powered off. */
- if (dev->power.irq_safe)
- return genpd_start_dev_no_timing(genpd, dev);
+ if (dev->power.irq_safe) {
+ timed = false;
+ goto out;
+ }
mutex_lock(&genpd->lock);
ret = __pm_genpd_poweron(genpd);
- if (ret) {
- mutex_unlock(&genpd->lock);
- return ret;
- }
- genpd->status = GPD_STATE_BUSY;
- genpd->resume_count++;
- for (;;) {
- prepare_to_wait(&genpd->status_wait_queue, &wait,
- TASK_UNINTERRUPTIBLE);
- /*
- * If current is the powering off task, we have been called
- * reentrantly from one of the device callbacks, so we should
- * not wait.
- */
- if (!genpd->poweroff_task || genpd->poweroff_task == current)
- break;
- mutex_unlock(&genpd->lock);
+ mutex_unlock(&genpd->lock);
- schedule();
+ if (ret)
+ return ret;
- mutex_lock(&genpd->lock);
- }
- finish_wait(&genpd->status_wait_queue, &wait);
- __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
- genpd->resume_count--;
- genpd_set_active(genpd);
- wake_up_all(&genpd->status_wait_queue);
- mutex_unlock(&genpd->lock);
+ out:
+ genpd_start_dev(genpd, dev, timed);
+ genpd_restore_dev(genpd, dev, timed);
return 0;
}
@@ -880,7 +671,7 @@ static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd,
{
struct gpd_link *link;
- if (genpd->status != GPD_STATE_POWER_OFF)
+ if (genpd->status == GPD_STATE_ACTIVE)
return;
list_for_each_entry(link, &genpd->slave_links, slave_node) {
@@ -957,14 +748,14 @@ static int pm_genpd_prepare(struct device *dev)
if (resume_needed(dev, genpd))
pm_runtime_resume(dev);
- genpd_acquire_lock(genpd);
+ mutex_lock(&genpd->lock);
if (genpd->prepared_count++ == 0) {
genpd->suspended_count = 0;
genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
}
- genpd_release_lock(genpd);
+ mutex_unlock(&genpd->lock);
if (genpd->suspend_power_off) {
pm_runtime_put_noidle(dev);
@@ -1099,7 +890,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
pm_genpd_sync_poweron(genpd, true);
genpd->suspended_count--;
- return genpd_start_dev(genpd, dev);
+ return genpd_start_dev(genpd, dev, true);
}
/**
@@ -1227,7 +1018,7 @@ static int pm_genpd_thaw_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
+ return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev, true);
}
/**
@@ -1321,7 +1112,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
pm_genpd_sync_poweron(genpd, true);
- return genpd_start_dev(genpd, dev);
+ return genpd_start_dev(genpd, dev, true);
}
/**
@@ -1437,7 +1228,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
gpd_data->td = *td;
gpd_data->base.dev = dev;
- gpd_data->need_restore = -1;
gpd_data->td.constraint_changed = true;
gpd_data->td.effective_constraint_ns = -1;
gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
@@ -1499,7 +1289,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (IS_ERR(gpd_data))
return PTR_ERR(gpd_data);
- genpd_acquire_lock(genpd);
+ mutex_lock(&genpd->lock);
if (genpd->prepared_count > 0) {
ret = -EAGAIN;
@@ -1516,7 +1306,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
out:
- genpd_release_lock(genpd);
+ mutex_unlock(&genpd->lock);
if (ret)
genpd_free_dev_data(dev, gpd_data);
@@ -1560,7 +1350,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
gpd_data = to_gpd_data(pdd);
dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
- genpd_acquire_lock(genpd);
+ mutex_lock(&genpd->lock);
if (genpd->prepared_count > 0) {
ret = -EAGAIN;
@@ -1575,14 +1365,14 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
list_del_init(&pdd->list_node);
- genpd_release_lock(genpd);
+ mutex_unlock(&genpd->lock);
genpd_free_dev_data(dev, gpd_data);
return 0;
out:
- genpd_release_lock(genpd);
+ mutex_unlock(&genpd->lock);
dev_pm_qos_add_notifier(dev, &gpd_data->nb);
return ret;
@@ -1603,17 +1393,9 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|| genpd == subdomain)
return -EINVAL;
- start:
- genpd_acquire_lock(genpd);
+ mutex_lock(&genpd->lock);
mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
- if (subdomain->status != GPD_STATE_POWER_OFF
- && subdomain->status != GPD_STATE_ACTIVE) {
- mutex_unlock(&subdomain->lock);
- genpd_release_lock(genpd);
- goto start;
- }
-
if (genpd->status == GPD_STATE_POWER_OFF
&& subdomain->status != GPD_STATE_POWER_OFF) {
ret = -EINVAL;
@@ -1641,7 +1423,7 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
out:
mutex_unlock(&subdomain->lock);
- genpd_release_lock(genpd);
+ mutex_unlock(&genpd->lock);
return ret;
}
@@ -1689,8 +1471,14 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
return -EINVAL;
- start:
- genpd_acquire_lock(genpd);
+ mutex_lock(&genpd->lock);
+
+ if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
+ pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
+ subdomain->name);
+ ret = -EBUSY;
+ goto out;
+ }
list_for_each_entry(link, &genpd->master_links, master_node) {
if (link->slave != subdomain)
@@ -1698,13 +1486,6 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
- if (subdomain->status != GPD_STATE_POWER_OFF
- && subdomain->status != GPD_STATE_ACTIVE) {
- mutex_unlock(&subdomain->lock);
- genpd_release_lock(genpd);
- goto start;
- }
-
list_del(&link->master_node);
list_del(&link->slave_node);
kfree(link);
@@ -1717,7 +1498,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
break;
}
- genpd_release_lock(genpd);
+out:
+ mutex_unlock(&genpd->lock);
return ret;
}
@@ -1741,7 +1523,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
if (IS_ERR_OR_NULL(genpd) || state < 0)
return -EINVAL;
- genpd_acquire_lock(genpd);
+ mutex_lock(&genpd->lock);
if (genpd->cpuidle_data) {
ret = -EEXIST;
@@ -1772,7 +1554,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
genpd_recalc_cpu_exit_latency(genpd);
out:
- genpd_release_lock(genpd);
+ mutex_unlock(&genpd->lock);
return ret;
err:
@@ -1809,7 +1591,7 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
if (IS_ERR_OR_NULL(genpd))
return -EINVAL;
- genpd_acquire_lock(genpd);
+ mutex_lock(&genpd->lock);
cpuidle_data = genpd->cpuidle_data;
if (!cpuidle_data) {
@@ -1827,7 +1609,7 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
kfree(cpuidle_data);
out:
- genpd_release_lock(genpd);
+ mutex_unlock(&genpd->lock);
return ret;
}
@@ -1909,9 +1691,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->in_progress = 0;
atomic_set(&genpd->sd_count, 0);
genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
- init_waitqueue_head(&genpd->status_wait_queue);
- genpd->poweroff_task = NULL;
- genpd->resume_count = 0;
genpd->device_count = 0;
genpd->max_off_time_ns = -1;
genpd->max_off_time_changed = true;
@@ -1949,6 +1728,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
list_add(&genpd->gpd_list_node, &gpd_list);
mutex_unlock(&gpd_list_lock);
}
+EXPORT_SYMBOL_GPL(pm_genpd_init);
#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
/*
@@ -2122,7 +1902,7 @@ EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
/**
* genpd_dev_pm_detach - Detach a device from its PM domain.
- * @dev: Device to attach.
+ * @dev: Device to detach.
* @power_off: Currently not used
*
* Try to locate a corresponding generic PM domain, which the device was
@@ -2131,6 +1911,7 @@ EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
static void genpd_dev_pm_detach(struct device *dev, bool power_off)
{
struct generic_pm_domain *pd;
+ unsigned int i;
int ret = 0;
pd = pm_genpd_lookup_dev(dev);
@@ -2139,10 +1920,12 @@ static void genpd_dev_pm_detach(struct device *dev, bool power_off)
dev_dbg(dev, "removing from PM domain %s\n", pd->name);
- while (1) {
+ for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
ret = pm_genpd_remove_device(pd, dev);
if (ret != -EAGAIN)
break;
+
+ mdelay(i);
cond_resched();
}
@@ -2177,12 +1960,16 @@ static void genpd_dev_pm_sync(struct device *dev)
* Both generic and legacy Samsung-specific DT bindings are supported to keep
* backwards compatibility with existing DTBs.
*
- * Returns 0 on successfully attached PM domain or negative error code.
+ * Returns 0 on successfully attached PM domain or negative error code. Note
+ * that if a power-domain exists for the device, but it cannot be found or
+ * turned on, then return -EPROBE_DEFER to ensure that the device is not
+ * probed and to re-try again later.
*/
int genpd_dev_pm_attach(struct device *dev)
{
struct of_phandle_args pd_args;
struct generic_pm_domain *pd;
+ unsigned int i;
int ret;
if (!dev->of_node)
@@ -2213,15 +2000,17 @@ int genpd_dev_pm_attach(struct device *dev)
dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
__func__, PTR_ERR(pd));
of_node_put(dev->of_node);
- return PTR_ERR(pd);
+ return -EPROBE_DEFER;
}
dev_dbg(dev, "adding to PM domain %s\n", pd->name);
- while (1) {
+ for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
ret = pm_genpd_add_device(pd, dev);
if (ret != -EAGAIN)
break;
+
+ mdelay(i);
cond_resched();
}
@@ -2229,14 +2018,15 @@ int genpd_dev_pm_attach(struct device *dev)
dev_err(dev, "failed to add to PM domain %s: %d",
pd->name, ret);
of_node_put(dev->of_node);
- return ret;
+ goto out;
}
dev->pm_domain->detach = genpd_dev_pm_detach;
dev->pm_domain->sync = genpd_dev_pm_sync;
- pm_genpd_poweron(pd);
+ ret = pm_genpd_poweron(pd);
- return 0;
+out:
+ return ret ? -EPROBE_DEFER : 0;
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
@@ -2284,9 +2074,6 @@ static int pm_genpd_summary_one(struct seq_file *s,
{
static const char * const status_lookup[] = {
[GPD_STATE_ACTIVE] = "on",
- [GPD_STATE_WAIT_MASTER] = "wait-master",
- [GPD_STATE_BUSY] = "busy",
- [GPD_STATE_REPEAT] = "off-in-progress",
[GPD_STATE_POWER_OFF] = "off"
};
struct pm_domain_data *pm_data;
@@ -2300,7 +2087,7 @@ static int pm_genpd_summary_one(struct seq_file *s,
if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
goto exit;
- seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]);
+ seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]);
/*
* Modifications on the list require holding locks on both
@@ -2335,8 +2122,8 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data)
struct generic_pm_domain *genpd;
int ret = 0;
- seq_puts(s, " domain status slaves\n");
- seq_puts(s, " /device runtime status\n");
+ seq_puts(s, "domain status slaves\n");
+ seq_puts(s, " /device runtime status\n");
seq_puts(s, "----------------------------------------------------------------------\n");
ret = mutex_lock_interruptible(&gpd_list_lock);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 30b7bbfdc558..1710c26ba097 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1377,7 +1377,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (dev->power.direct_complete) {
if (pm_runtime_status_suspended(dev)) {
pm_runtime_disable(dev);
- if (pm_runtime_suspended_if_enabled(dev))
+ if (pm_runtime_status_suspended(dev))
goto Complete;
pm_runtime_enable(dev);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 677fb2843553..28cd75c535b0 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -11,6 +11,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/err.h>
@@ -51,10 +52,17 @@
* order.
* @dynamic: not-created from static DT entries.
* @available: true/false - marks if this OPP as available or not
+ * @turbo: true if turbo (boost) OPP
* @rate: Frequency in hertz
- * @u_volt: Nominal voltage in microvolts corresponding to this OPP
+ * @u_volt: Target voltage in microvolts corresponding to this OPP
+ * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
+ * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP
+ * @u_amp: Maximum current drawn by the device in microamperes
+ * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
+ * frequency from any other OPP's frequency.
* @dev_opp: points back to the device_opp struct this opp belongs to
* @rcu_head: RCU callback head used for deferred freeing
+ * @np: OPP's device node.
*
* This structure stores the OPP information for a given device.
*/
@@ -63,11 +71,34 @@ struct dev_pm_opp {
bool available;
bool dynamic;
+ bool turbo;
unsigned long rate;
+
unsigned long u_volt;
+ unsigned long u_volt_min;
+ unsigned long u_volt_max;
+ unsigned long u_amp;
+ unsigned long clock_latency_ns;
struct device_opp *dev_opp;
struct rcu_head rcu_head;
+
+ struct device_node *np;
+};
+
+/**
+ * struct device_list_opp - devices managed by 'struct device_opp'
+ * @node: list node
+ * @dev: device to which the struct object belongs
+ * @rcu_head: RCU callback head used for deferred freeing
+ *
+ * This is an internal data structure maintaining the list of devices that are
+ * managed by 'struct device_opp'.
+ */
+struct device_list_opp {
+ struct list_head node;
+ const struct device *dev;
+ struct rcu_head rcu_head;
};
/**
@@ -77,10 +108,12 @@ struct dev_pm_opp {
* list.
* RCU usage: nodes are not modified in the list of device_opp,
* however addition is possible and is secured by dev_opp_list_lock
- * @dev: device pointer
* @srcu_head: notifier head to notify the OPP availability changes.
* @rcu_head: RCU callback head used for deferred freeing
+ * @dev_list: list of devices that share these OPPs
* @opp_list: list of opps
+ * @np: struct device_node pointer for opp's DT node.
+ * @shared_opp: OPP is shared between multiple devices.
*
* This is an internal data structure maintaining the link to opps attached to
* a device. This structure is not meant to be shared to users as it is
@@ -93,10 +126,15 @@ struct dev_pm_opp {
struct device_opp {
struct list_head node;
- struct device *dev;
struct srcu_notifier_head srcu_head;
struct rcu_head rcu_head;
+ struct list_head dev_list;
struct list_head opp_list;
+
+ struct device_node *np;
+ unsigned long clock_latency_ns_max;
+ bool shared_opp;
+ struct dev_pm_opp *suspend_opp;
};
/*
@@ -110,12 +148,44 @@ static DEFINE_MUTEX(dev_opp_list_lock);
#define opp_rcu_lockdep_assert() \
do { \
- rcu_lockdep_assert(rcu_read_lock_held() || \
- lockdep_is_held(&dev_opp_list_lock), \
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
+ !lockdep_is_held(&dev_opp_list_lock), \
"Missing rcu_read_lock() or " \
"dev_opp_list_lock protection"); \
} while (0)
+static struct device_list_opp *_find_list_dev(const struct device *dev,
+ struct device_opp *dev_opp)
+{
+ struct device_list_opp *list_dev;
+
+ list_for_each_entry(list_dev, &dev_opp->dev_list, node)
+ if (list_dev->dev == dev)
+ return list_dev;
+
+ return NULL;
+}
+
+static struct device_opp *_managed_opp(const struct device_node *np)
+{
+ struct device_opp *dev_opp;
+
+ list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
+ if (dev_opp->np == np) {
+ /*
+ * Multiple devices can point to the same OPP table and
+ * so will have same node-pointer, np.
+ *
+ * But the OPPs will be considered as shared only if the
+ * OPP table contains a "opp-shared" property.
+ */
+ return dev_opp->shared_opp ? dev_opp : NULL;
+ }
+ }
+
+ return NULL;
+}
+
/**
* _find_device_opp() - find device_opp struct using device pointer
* @dev: device pointer used to lookup device OPPs
@@ -132,21 +202,18 @@ do { \
*/
static struct device_opp *_find_device_opp(struct device *dev)
{
- struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
+ struct device_opp *dev_opp;
- if (unlikely(IS_ERR_OR_NULL(dev))) {
+ if (IS_ERR_OR_NULL(dev)) {
pr_err("%s: Invalid parameters\n", __func__);
return ERR_PTR(-EINVAL);
}
- list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) {
- if (tmp_dev_opp->dev == dev) {
- dev_opp = tmp_dev_opp;
- break;
- }
- }
+ list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
+ if (_find_list_dev(dev, dev_opp))
+ return dev_opp;
- return dev_opp;
+ return ERR_PTR(-ENODEV);
}
/**
@@ -172,7 +239,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
opp_rcu_lockdep_assert();
tmp_opp = rcu_dereference(opp);
- if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
+ if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
pr_err("%s: Invalid parameters\n", __func__);
else
v = tmp_opp->u_volt;
@@ -204,7 +271,7 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
opp_rcu_lockdep_assert();
tmp_opp = rcu_dereference(opp);
- if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
+ if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
pr_err("%s: Invalid parameters\n", __func__);
else
f = tmp_opp->rate;
@@ -214,6 +281,94 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
/**
+ * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
+ * @opp: opp for which turbo mode is being verified
+ *
+ * Turbo OPPs are not for normal use, and can be enabled (under certain
+ * conditions) for short duration of times to finish high throughput work
+ * quickly. Running on them for longer times may overheat the chip.
+ *
+ * Return: true if opp is turbo opp, else false.
+ *
+ * Locking: This function must be called under rcu_read_lock(). opp is a rcu
+ * protected pointer. This means that opp which could have been fetched by
+ * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
+ * under RCU lock. The pointer returned by the opp_find_freq family must be
+ * used in the same section as the usage of this function with the pointer
+ * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
+ * pointer.
+ */
+bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
+{
+ struct dev_pm_opp *tmp_opp;
+
+ opp_rcu_lockdep_assert();
+
+ tmp_opp = rcu_dereference(opp);
+ if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return false;
+ }
+
+ return tmp_opp->turbo;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
+
+/**
+ * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns the max clock latency in nanoseconds.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
+{
+ struct device_opp *dev_opp;
+ unsigned long clock_latency_ns;
+
+ rcu_read_lock();
+
+ dev_opp = _find_device_opp(dev);
+ if (IS_ERR(dev_opp))
+ clock_latency_ns = 0;
+ else
+ clock_latency_ns = dev_opp->clock_latency_ns_max;
+
+ rcu_read_unlock();
+ return clock_latency_ns;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
+
+/**
+ * dev_pm_opp_get_suspend_opp() - Get suspend opp
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns pointer to the suspend opp if it is
+ * defined and available, otherwise it returns NULL.
+ *
+ * Locking: This function must be called under rcu_read_lock(). opp is a rcu
+ * protected pointer. The reason for the same is that the opp pointer which is
+ * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * under the locked area. The pointer returned must be used prior to unlocking
+ * with rcu_read_unlock() to maintain the integrity of the pointer.
+ */
+struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
+{
+ struct device_opp *dev_opp;
+
+ opp_rcu_lockdep_assert();
+
+ dev_opp = _find_device_opp(dev);
+ if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
+ !dev_opp->suspend_opp->available)
+ return NULL;
+
+ return dev_opp->suspend_opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
+
+/**
* dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
* @dev: device for which we do this operation
*
@@ -407,18 +562,57 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
+/* List-dev Helpers */
+static void _kfree_list_dev_rcu(struct rcu_head *head)
+{
+ struct device_list_opp *list_dev;
+
+ list_dev = container_of(head, struct device_list_opp, rcu_head);
+ kfree_rcu(list_dev, rcu_head);
+}
+
+static void _remove_list_dev(struct device_list_opp *list_dev,
+ struct device_opp *dev_opp)
+{
+ list_del(&list_dev->node);
+ call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
+ _kfree_list_dev_rcu);
+}
+
+static struct device_list_opp *_add_list_dev(const struct device *dev,
+ struct device_opp *dev_opp)
+{
+ struct device_list_opp *list_dev;
+
+ list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
+ if (!list_dev)
+ return NULL;
+
+ /* Initialize list-dev */
+ list_dev->dev = dev;
+ list_add_rcu(&list_dev->node, &dev_opp->dev_list);
+
+ return list_dev;
+}
+
/**
- * _add_device_opp() - Allocate a new device OPP table
+ * _add_device_opp() - Find device OPP table or allocate a new one
* @dev: device for which we do this operation
*
- * New device node which uses OPPs - used when multiple devices with OPP tables
- * are maintained.
+ * It tries to find an existing table first, if it couldn't find one, it
+ * allocates a new OPP table and returns that.
*
* Return: valid device_opp pointer if success, else NULL.
*/
static struct device_opp *_add_device_opp(struct device *dev)
{
struct device_opp *dev_opp;
+ struct device_list_opp *list_dev;
+
+ /* Check for existing list for 'dev' first */
+ dev_opp = _find_device_opp(dev);
+ if (!IS_ERR(dev_opp))
+ return dev_opp;
/*
* Allocate a new device OPP table. In the infrequent case where a new
@@ -428,7 +622,14 @@ static struct device_opp *_add_device_opp(struct device *dev)
if (!dev_opp)
return NULL;
- dev_opp->dev = dev;
+ INIT_LIST_HEAD(&dev_opp->dev_list);
+
+ list_dev = _add_list_dev(dev, dev_opp);
+ if (!list_dev) {
+ kfree(dev_opp);
+ return NULL;
+ }
+
srcu_init_notifier_head(&dev_opp->srcu_head);
INIT_LIST_HEAD(&dev_opp->opp_list);
@@ -438,6 +639,185 @@ static struct device_opp *_add_device_opp(struct device *dev)
}
/**
+ * _kfree_device_rcu() - Free device_opp RCU handler
+ * @head: RCU head
+ */
+static void _kfree_device_rcu(struct rcu_head *head)
+{
+ struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
+
+ kfree_rcu(device_opp, rcu_head);
+}
+
+/**
+ * _remove_device_opp() - Removes a device OPP table
+ * @dev_opp: device OPP table to be removed.
+ *
+ * Removes/frees device OPP table it it doesn't contain any OPPs.
+ */
+static void _remove_device_opp(struct device_opp *dev_opp)
+{
+ struct device_list_opp *list_dev;
+
+ if (!list_empty(&dev_opp->opp_list))
+ return;
+
+ list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
+ node);
+
+ _remove_list_dev(list_dev, dev_opp);
+
+ /* dev_list must be empty now */
+ WARN_ON(!list_empty(&dev_opp->dev_list));
+
+ list_del_rcu(&dev_opp->node);
+ call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
+ _kfree_device_rcu);
+}
+
+/**
+ * _kfree_opp_rcu() - Free OPP RCU handler
+ * @head: RCU head
+ */
+static void _kfree_opp_rcu(struct rcu_head *head)
+{
+ struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
+
+ kfree_rcu(opp, rcu_head);
+}
+
+/**
+ * _opp_remove() - Remove an OPP from a table definition
+ * @dev_opp: points back to the device_opp struct this opp belongs to
+ * @opp: pointer to the OPP to remove
+ * @notify: OPP_EVENT_REMOVE notification should be sent or not
+ *
+ * This function removes an opp definition from the opp list.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * It is assumed that the caller holds required mutex for an RCU updater
+ * strategy.
+ */
+static void _opp_remove(struct device_opp *dev_opp,
+ struct dev_pm_opp *opp, bool notify)
+{
+ /*
+ * Notify the changes in the availability of the operable
+ * frequency/voltage list.
+ */
+ if (notify)
+ srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
+ list_del_rcu(&opp->node);
+ call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+
+ _remove_device_opp(dev_opp);
+}
+
+/**
+ * dev_pm_opp_remove() - Remove an OPP from OPP list
+ * @dev: device for which we do this operation
+ * @freq: OPP to remove with matching 'freq'
+ *
+ * This function removes an opp from the opp list.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_remove(struct device *dev, unsigned long freq)
+{
+ struct dev_pm_opp *opp;
+ struct device_opp *dev_opp;
+ bool found = false;
+
+ /* Hold our list modification lock here */
+ mutex_lock(&dev_opp_list_lock);
+
+ dev_opp = _find_device_opp(dev);
+ if (IS_ERR(dev_opp))
+ goto unlock;
+
+ list_for_each_entry(opp, &dev_opp->opp_list, node) {
+ if (opp->rate == freq) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
+ __func__, freq);
+ goto unlock;
+ }
+
+ _opp_remove(dev_opp, opp, true);
+unlock:
+ mutex_unlock(&dev_opp_list_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
+
+static struct dev_pm_opp *_allocate_opp(struct device *dev,
+ struct device_opp **dev_opp)
+{
+ struct dev_pm_opp *opp;
+
+ /* allocate new OPP node */
+ opp = kzalloc(sizeof(*opp), GFP_KERNEL);
+ if (!opp)
+ return NULL;
+
+ INIT_LIST_HEAD(&opp->node);
+
+ *dev_opp = _add_device_opp(dev);
+ if (!*dev_opp) {
+ kfree(opp);
+ return NULL;
+ }
+
+ return opp;
+}
+
+static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
+ struct device_opp *dev_opp)
+{
+ struct dev_pm_opp *opp;
+ struct list_head *head = &dev_opp->opp_list;
+
+ /*
+ * Insert new OPP in order of increasing frequency and discard if
+ * already present.
+ *
+ * Need to use &dev_opp->opp_list in the condition part of the 'for'
+ * loop, don't replace it with head otherwise it will become an infinite
+ * loop.
+ */
+ list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
+ if (new_opp->rate > opp->rate) {
+ head = &opp->node;
+ continue;
+ }
+
+ if (new_opp->rate < opp->rate)
+ break;
+
+ /* Duplicate OPPs */
+ dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
+ __func__, opp->rate, opp->u_volt, opp->available,
+ new_opp->rate, new_opp->u_volt, new_opp->available);
+
+ return opp->available && new_opp->u_volt == opp->u_volt ?
+ 0 : -EEXIST;
+ }
+
+ new_opp->dev_opp = dev_opp;
+ list_add_rcu(&new_opp->node, head);
+
+ return 0;
+}
+
+/**
* _opp_add_dynamic() - Allocate a dynamic OPP.
* @dev: device for which we do this operation
* @freq: Frequency in Hz for this OPP
@@ -467,64 +847,29 @@ static struct device_opp *_add_device_opp(struct device *dev)
static int _opp_add_dynamic(struct device *dev, unsigned long freq,
long u_volt, bool dynamic)
{
- struct device_opp *dev_opp = NULL;
- struct dev_pm_opp *opp, *new_opp;
- struct list_head *head;
+ struct device_opp *dev_opp;
+ struct dev_pm_opp *new_opp;
int ret;
- /* allocate new OPP node */
- new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
- if (!new_opp)
- return -ENOMEM;
-
/* Hold our list modification lock here */
mutex_lock(&dev_opp_list_lock);
+ new_opp = _allocate_opp(dev, &dev_opp);
+ if (!new_opp) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
/* populate the opp table */
new_opp->rate = freq;
new_opp->u_volt = u_volt;
new_opp->available = true;
new_opp->dynamic = dynamic;
- /* Check for existing list for 'dev' */
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- dev_opp = _add_device_opp(dev);
- if (!dev_opp) {
- ret = -ENOMEM;
- goto free_opp;
- }
-
- head = &dev_opp->opp_list;
- goto list_add;
- }
-
- /*
- * Insert new OPP in order of increasing frequency
- * and discard if already present
- */
- head = &dev_opp->opp_list;
- list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
- if (new_opp->rate <= opp->rate)
- break;
- else
- head = &opp->node;
- }
-
- /* Duplicate OPPs ? */
- if (new_opp->rate == opp->rate) {
- ret = opp->available && new_opp->u_volt == opp->u_volt ?
- 0 : -EEXIST;
-
- dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
- __func__, opp->rate, opp->u_volt, opp->available,
- new_opp->rate, new_opp->u_volt, new_opp->available);
+ ret = _opp_add(dev, new_opp, dev_opp);
+ if (ret)
goto free_opp;
- }
-list_add:
- new_opp->dev_opp = dev_opp;
- list_add_rcu(&new_opp->node, head);
mutex_unlock(&dev_opp_list_lock);
/*
@@ -535,20 +880,52 @@ list_add:
return 0;
free_opp:
+ _opp_remove(dev_opp, new_opp, false);
+unlock:
mutex_unlock(&dev_opp_list_lock);
- kfree(new_opp);
return ret;
}
+/* TODO: Support multiple regulators */
+static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev)
+{
+ u32 microvolt[3] = {0};
+ int count, ret;
+
+ count = of_property_count_u32_elems(opp->np, "opp-microvolt");
+ if (!count)
+ return 0;
+
+ /* There can be one or three elements here */
+ if (count != 1 && count != 3) {
+ dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
+ __func__, count);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt,
+ count);
+ if (ret) {
+ dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__,
+ ret);
+ return -EINVAL;
+ }
+
+ opp->u_volt = microvolt[0];
+ opp->u_volt_min = microvolt[1];
+ opp->u_volt_max = microvolt[2];
+
+ return 0;
+}
+
/**
- * dev_pm_opp_add() - Add an OPP table from a table definitions
+ * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
* @dev: device for which we do this operation
- * @freq: Frequency in Hz for this OPP
- * @u_volt: Voltage in uVolts for this OPP
+ * @np: device node
*
- * This function adds an opp definition to the opp list and returns status.
- * The opp is made available by default and it can be controlled using
- * dev_pm_opp_enable/disable functions.
+ * This function adds an opp definition to the opp list and returns status. The
+ * opp can be controlled using dev_pm_opp_enable/disable functions and may be
+ * removed by dev_pm_opp_remove.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
@@ -562,108 +939,119 @@ free_opp:
* -EEXIST Freq are same and volt are different OR
* Duplicate OPPs (both freq and volt are same) and !opp->available
* -ENOMEM Memory allocation failure
+ * -EINVAL Failed parsing the OPP node
*/
-int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
+static int _opp_add_static_v2(struct device *dev, struct device_node *np)
{
- return _opp_add_dynamic(dev, freq, u_volt, true);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_add);
+ struct device_opp *dev_opp;
+ struct dev_pm_opp *new_opp;
+ u64 rate;
+ u32 val;
+ int ret;
-/**
- * _kfree_opp_rcu() - Free OPP RCU handler
- * @head: RCU head
- */
-static void _kfree_opp_rcu(struct rcu_head *head)
-{
- struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
+ /* Hold our list modification lock here */
+ mutex_lock(&dev_opp_list_lock);
- kfree_rcu(opp, rcu_head);
-}
+ new_opp = _allocate_opp(dev, &dev_opp);
+ if (!new_opp) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
-/**
- * _kfree_device_rcu() - Free device_opp RCU handler
- * @head: RCU head
- */
-static void _kfree_device_rcu(struct rcu_head *head)
-{
- struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
+ ret = of_property_read_u64(np, "opp-hz", &rate);
+ if (ret < 0) {
+ dev_err(dev, "%s: opp-hz not found\n", __func__);
+ goto free_opp;
+ }
- kfree_rcu(device_opp, rcu_head);
-}
+ /*
+ * Rate is defined as an unsigned long in clk API, and so casting
+ * explicitly to its type. Must be fixed once rate is 64 bit
+ * guaranteed in clk API.
+ */
+ new_opp->rate = (unsigned long)rate;
+ new_opp->turbo = of_property_read_bool(np, "turbo-mode");
+
+ new_opp->np = np;
+ new_opp->dynamic = false;
+ new_opp->available = true;
+
+ if (!of_property_read_u32(np, "clock-latency-ns", &val))
+ new_opp->clock_latency_ns = val;
+
+ ret = opp_get_microvolt(new_opp, dev);
+ if (ret)
+ goto free_opp;
+
+ if (!of_property_read_u32(new_opp->np, "opp-microamp", &val))
+ new_opp->u_amp = val;
+
+ ret = _opp_add(dev, new_opp, dev_opp);
+ if (ret)
+ goto free_opp;
+
+ /* OPP to select on device suspend */
+ if (of_property_read_bool(np, "opp-suspend")) {
+ if (dev_opp->suspend_opp)
+ dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
+ __func__, dev_opp->suspend_opp->rate,
+ new_opp->rate);
+ else
+ dev_opp->suspend_opp = new_opp;
+ }
+
+ if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
+ dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
+
+ mutex_unlock(&dev_opp_list_lock);
+
+ pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
+ __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
+ new_opp->u_volt_min, new_opp->u_volt_max,
+ new_opp->clock_latency_ns);
-/**
- * _opp_remove() - Remove an OPP from a table definition
- * @dev_opp: points back to the device_opp struct this opp belongs to
- * @opp: pointer to the OPP to remove
- *
- * This function removes an opp definition from the opp list.
- *
- * Locking: The internal device_opp and opp structures are RCU protected.
- * It is assumed that the caller holds required mutex for an RCU updater
- * strategy.
- */
-static void _opp_remove(struct device_opp *dev_opp,
- struct dev_pm_opp *opp)
-{
/*
* Notify the changes in the availability of the operable
* frequency/voltage list.
*/
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
- list_del_rcu(&opp->node);
- call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+ srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
+ return 0;
- if (list_empty(&dev_opp->opp_list)) {
- list_del_rcu(&dev_opp->node);
- call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
- _kfree_device_rcu);
- }
+free_opp:
+ _opp_remove(dev_opp, new_opp, false);
+unlock:
+ mutex_unlock(&dev_opp_list_lock);
+ return ret;
}
/**
- * dev_pm_opp_remove() - Remove an OPP from OPP list
+ * dev_pm_opp_add() - Add an OPP table from a table definitions
* @dev: device for which we do this operation
- * @freq: OPP to remove with matching 'freq'
+ * @freq: Frequency in Hz for this OPP
+ * @u_volt: Voltage in uVolts for this OPP
*
- * This function removes an opp from the opp list.
+ * This function adds an opp definition to the opp list and returns status.
+ * The opp is made available by default and it can be controlled using
+ * dev_pm_opp_enable/disable functions.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
+ *
+ * Return:
+ * 0 On success OR
+ * Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST Freq are same and volt are different OR
+ * Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM Memory allocation failure
*/
-void dev_pm_opp_remove(struct device *dev, unsigned long freq)
+int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
{
- struct dev_pm_opp *opp;
- struct device_opp *dev_opp;
- bool found = false;
-
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
-
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp))
- goto unlock;
-
- list_for_each_entry(opp, &dev_opp->opp_list, node) {
- if (opp->rate == freq) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
- __func__, freq);
- goto unlock;
- }
-
- _opp_remove(dev_opp, opp);
-unlock:
- mutex_unlock(&dev_opp_list_lock);
+ return _opp_add_dynamic(dev, freq, u_volt, true);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
+EXPORT_SYMBOL_GPL(dev_pm_opp_add);
/**
* _opp_set_availability() - helper to set the availability of an opp
@@ -825,28 +1213,179 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
#ifdef CONFIG_OF
/**
- * of_init_opp_table() - Initialize opp table from device tree
+ * of_free_opp_table() - Free OPP table entries created from static DT entries
* @dev: device pointer used to lookup device OPPs.
*
- * Register the initial OPP table with the OPP library for given device.
+ * Free OPPs created using static entries present in DT.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function indirectly uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
- *
- * Return:
- * 0 On success OR
- * Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST Freq are same and volt are different OR
- * Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM Memory allocation failure
- * -ENODEV when 'operating-points' property is not found or is invalid data
- * in device node.
- * -ENODATA when empty 'operating-points' property is found
*/
-int of_init_opp_table(struct device *dev)
+void of_free_opp_table(struct device *dev)
+{
+ struct device_opp *dev_opp;
+ struct dev_pm_opp *opp, *tmp;
+
+ /* Hold our list modification lock here */
+ mutex_lock(&dev_opp_list_lock);
+
+ /* Check for existing list for 'dev' */
+ dev_opp = _find_device_opp(dev);
+ if (IS_ERR(dev_opp)) {
+ int error = PTR_ERR(dev_opp);
+
+ if (error != -ENODEV)
+ WARN(1, "%s: dev_opp: %d\n",
+ IS_ERR_OR_NULL(dev) ?
+ "Invalid device" : dev_name(dev),
+ error);
+ goto unlock;
+ }
+
+ /* Find if dev_opp manages a single device */
+ if (list_is_singular(&dev_opp->dev_list)) {
+ /* Free static OPPs */
+ list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
+ if (!opp->dynamic)
+ _opp_remove(dev_opp, opp, true);
+ }
+ } else {
+ _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
+ }
+
+unlock:
+ mutex_unlock(&dev_opp_list_lock);
+}
+EXPORT_SYMBOL_GPL(of_free_opp_table);
+
+void of_cpumask_free_opp_table(cpumask_var_t cpumask)
+{
+ struct device *cpu_dev;
+ int cpu;
+
+ WARN_ON(cpumask_empty(cpumask));
+
+ for_each_cpu(cpu, cpumask) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ cpu);
+ continue;
+ }
+
+ of_free_opp_table(cpu_dev);
+ }
+}
+EXPORT_SYMBOL_GPL(of_cpumask_free_opp_table);
+
+/* Returns opp descriptor node from its phandle. Caller must do of_node_put() */
+static struct device_node *
+_of_get_opp_desc_node_from_prop(struct device *dev, const struct property *prop)
+{
+ struct device_node *opp_np;
+
+ opp_np = of_find_node_by_phandle(be32_to_cpup(prop->value));
+ if (!opp_np) {
+ dev_err(dev, "%s: Prop: %s contains invalid opp desc phandle\n",
+ __func__, prop->name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return opp_np;
+}
+
+/* Returns opp descriptor node for a device. Caller must do of_node_put() */
+static struct device_node *_of_get_opp_desc_node(struct device *dev)
+{
+ const struct property *prop;
+
+ prop = of_find_property(dev->of_node, "operating-points-v2", NULL);
+ if (!prop)
+ return ERR_PTR(-ENODEV);
+ if (!prop->value)
+ return ERR_PTR(-ENODATA);
+
+ /*
+ * TODO: Support for multiple OPP tables.
+ *
+ * There should be only ONE phandle present in "operating-points-v2"
+ * property.
+ */
+ if (prop->length != sizeof(__be32)) {
+ dev_err(dev, "%s: Invalid opp desc phandle\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return _of_get_opp_desc_node_from_prop(dev, prop);
+}
+
+/* Initializes OPP tables based on new bindings */
+static int _of_init_opp_table_v2(struct device *dev,
+ const struct property *prop)
+{
+ struct device_node *opp_np, *np;
+ struct device_opp *dev_opp;
+ int ret = 0, count = 0;
+
+ if (!prop->value)
+ return -ENODATA;
+
+ /* Get opp node */
+ opp_np = _of_get_opp_desc_node_from_prop(dev, prop);
+ if (IS_ERR(opp_np))
+ return PTR_ERR(opp_np);
+
+ dev_opp = _managed_opp(opp_np);
+ if (dev_opp) {
+ /* OPPs are already managed */
+ if (!_add_list_dev(dev, dev_opp))
+ ret = -ENOMEM;
+ goto put_opp_np;
+ }
+
+ /* We have opp-list node now, iterate over it and add OPPs */
+ for_each_available_child_of_node(opp_np, np) {
+ count++;
+
+ ret = _opp_add_static_v2(dev, np);
+ if (ret) {
+ dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
+ ret);
+ goto free_table;
+ }
+ }
+
+ /* There should be one of more OPP defined */
+ if (WARN_ON(!count)) {
+ ret = -ENOENT;
+ goto put_opp_np;
+ }
+
+ dev_opp = _find_device_opp(dev);
+ if (WARN_ON(IS_ERR(dev_opp))) {
+ ret = PTR_ERR(dev_opp);
+ goto free_table;
+ }
+
+ dev_opp->np = opp_np;
+ dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
+
+ of_node_put(opp_np);
+ return 0;
+
+free_table:
+ of_free_opp_table(dev);
+put_opp_np:
+ of_node_put(opp_np);
+
+ return ret;
+}
+
+/* Initializes OPP tables based on old-deprecated bindings */
+static int _of_init_opp_table_v1(struct device *dev)
{
const struct property *prop;
const __be32 *val;
@@ -881,47 +1420,177 @@ int of_init_opp_table(struct device *dev)
return 0;
}
-EXPORT_SYMBOL_GPL(of_init_opp_table);
/**
- * of_free_opp_table() - Free OPP table entries created from static DT entries
+ * of_init_opp_table() - Initialize opp table from device tree
* @dev: device pointer used to lookup device OPPs.
*
- * Free OPPs created using static entries present in DT.
+ * Register the initial OPP table with the OPP library for given device.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function indirectly uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
+ *
+ * Return:
+ * 0 On success OR
+ * Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST Freq are same and volt are different OR
+ * Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM Memory allocation failure
+ * -ENODEV when 'operating-points' property is not found or is invalid data
+ * in device node.
+ * -ENODATA when empty 'operating-points' property is found
+ * -EINVAL when invalid entries are found in opp-v2 table
*/
-void of_free_opp_table(struct device *dev)
+int of_init_opp_table(struct device *dev)
{
+ const struct property *prop;
+
+ /*
+ * OPPs have two version of bindings now. The older one is deprecated,
+ * try for the new binding first.
+ */
+ prop = of_find_property(dev->of_node, "operating-points-v2", NULL);
+ if (!prop) {
+ /*
+ * Try old-deprecated bindings for backward compatibility with
+ * older dtbs.
+ */
+ return _of_init_opp_table_v1(dev);
+ }
+
+ return _of_init_opp_table_v2(dev, prop);
+}
+EXPORT_SYMBOL_GPL(of_init_opp_table);
+
+int of_cpumask_init_opp_table(cpumask_var_t cpumask)
+{
+ struct device *cpu_dev;
+ int cpu, ret = 0;
+
+ WARN_ON(cpumask_empty(cpumask));
+
+ for_each_cpu(cpu, cpumask) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ cpu);
+ continue;
+ }
+
+ ret = of_init_opp_table(cpu_dev);
+ if (ret) {
+ pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
+ __func__, cpu, ret);
+
+ /* Free all other OPPs */
+ of_cpumask_free_opp_table(cpumask);
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_cpumask_init_opp_table);
+
+/* Required only for V1 bindings, as v2 can manage it from DT itself */
+int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+ struct device_list_opp *list_dev;
struct device_opp *dev_opp;
- struct dev_pm_opp *opp, *tmp;
+ struct device *dev;
+ int cpu, ret = 0;
- /* Check for existing list for 'dev' */
- dev_opp = _find_device_opp(dev);
+ rcu_read_lock();
+
+ dev_opp = _find_device_opp(cpu_dev);
if (IS_ERR(dev_opp)) {
- int error = PTR_ERR(dev_opp);
- if (error != -ENODEV)
- WARN(1, "%s: dev_opp: %d\n",
- IS_ERR_OR_NULL(dev) ?
- "Invalid device" : dev_name(dev),
- error);
- return;
+ ret = -EINVAL;
+ goto out_rcu_read_unlock;
}
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ for_each_cpu(cpu, cpumask) {
+ if (cpu == cpu_dev->id)
+ continue;
- /* Free static OPPs */
- list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
- if (!opp->dynamic)
- _opp_remove(dev_opp, opp);
+ dev = get_cpu_device(cpu);
+ if (!dev) {
+ dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
+ __func__, cpu);
+ continue;
+ }
+
+ list_dev = _add_list_dev(dev, dev_opp);
+ if (!list_dev) {
+ dev_err(dev, "%s: failed to add list-dev for cpu%d device\n",
+ __func__, cpu);
+ continue;
+ }
}
+out_rcu_read_unlock:
+ rcu_read_unlock();
- mutex_unlock(&dev_opp_list_lock);
+ return 0;
}
-EXPORT_SYMBOL_GPL(of_free_opp_table);
+EXPORT_SYMBOL_GPL(set_cpus_sharing_opps);
+
+/*
+ * Works only for OPP v2 bindings.
+ *
+ * cpumask should be already set to mask of cpu_dev->id.
+ * Returns -ENOENT if operating-points-v2 bindings aren't supported.
+ */
+int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+ struct device_node *np, *tmp_np;
+ struct device *tcpu_dev;
+ int cpu, ret = 0;
+
+ /* Get OPP descriptor node */
+ np = _of_get_opp_desc_node(cpu_dev);
+ if (IS_ERR(np)) {
+ dev_dbg(cpu_dev, "%s: Couldn't find opp node: %ld\n", __func__,
+ PTR_ERR(np));
+ return -ENOENT;
+ }
+
+ /* OPPs are shared ? */
+ if (!of_property_read_bool(np, "opp-shared"))
+ goto put_cpu_node;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == cpu_dev->id)
+ continue;
+
+ tcpu_dev = get_cpu_device(cpu);
+ if (!tcpu_dev) {
+ dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
+ __func__, cpu);
+ ret = -ENODEV;
+ goto put_cpu_node;
+ }
+
+ /* Get OPP descriptor node */
+ tmp_np = _of_get_opp_desc_node(tcpu_dev);
+ if (IS_ERR(tmp_np)) {
+ dev_err(tcpu_dev, "%s: Couldn't find opp node: %ld\n",
+ __func__, PTR_ERR(tmp_np));
+ ret = PTR_ERR(tmp_np);
+ goto put_cpu_node;
+ }
+
+ /* CPUs are sharing opp node */
+ if (np == tmp_np)
+ cpumask_set_cpu(cpu, cpumask);
+
+ of_node_put(tmp_np);
+ }
+
+put_cpu_node:
+ of_node_put(np);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_get_cpus_sharing_opps);
#endif
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index f1a5d95e7b20..998fa6b23084 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -73,6 +73,8 @@ extern int pm_qos_sysfs_add_resume_latency(struct device *dev);
extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);
extern int pm_qos_sysfs_add_flags(struct device *dev);
extern void pm_qos_sysfs_remove_flags(struct device *dev);
+extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev);
+extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev);
#else /* CONFIG_PM */
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index e56d538d039e..7f3646e459cb 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -883,3 +883,40 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
+
+/**
+ * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
+ * @dev: Device whose latency tolerance to expose
+ */
+int dev_pm_qos_expose_latency_tolerance(struct device *dev)
+{
+ int ret;
+
+ if (!dev->power.set_latency_tolerance)
+ return -EINVAL;
+
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+ ret = pm_qos_sysfs_add_latency_tolerance(dev);
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
+
+/**
+ * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
+ * @dev: Device whose latency tolerance to hide
+ */
+void dev_pm_qos_hide_latency_tolerance(struct device *dev)
+{
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+ pm_qos_sysfs_remove_latency_tolerance(dev);
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
+
+ /* Remove the request from user space now */
+ pm_runtime_get_sync(dev);
+ dev_pm_qos_update_user_latency_tolerance(dev,
+ PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
+ pm_runtime_put(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index d2be3f9c211c..a7b46798c81d 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -738,6 +738,17 @@ void pm_qos_sysfs_remove_flags(struct device *dev)
sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);
}
+int pm_qos_sysfs_add_latency_tolerance(struct device *dev)
+{
+ return sysfs_merge_group(&dev->kobj,
+ &pm_qos_latency_tolerance_attr_group);
+}
+
+void pm_qos_sysfs_remove_latency_tolerance(struct device *dev)
+{
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
+}
+
void rpm_sysfs_remove(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 7470004ca810..eb6e67451dec 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -45,14 +45,12 @@ static int dev_pm_attach_wake_irq(struct device *dev, int irq,
return -EEXIST;
}
- dev->power.wakeirq = wirq;
- spin_unlock_irqrestore(&dev->power.lock, flags);
-
err = device_wakeup_attach_irq(dev, wirq);
- if (err)
- return err;
+ if (!err)
+ dev->power.wakeirq = wirq;
- return 0;
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ return err;
}
/**
@@ -105,10 +103,10 @@ void dev_pm_clear_wake_irq(struct device *dev)
return;
spin_lock_irqsave(&dev->power.lock, flags);
+ device_wakeup_detach_irq(dev);
dev->power.wakeirq = NULL;
spin_unlock_irqrestore(&dev->power.lock, flags);
- device_wakeup_detach_irq(dev);
if (wirq->dedicated_irq)
free_irq(wirq->irq, wirq);
kfree(wirq);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 40f71603378c..51f15bc15774 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -281,32 +281,25 @@ EXPORT_SYMBOL_GPL(device_wakeup_enable);
* Attach a device wakeirq to the wakeup source so the device
* wake IRQ can be configured automatically for suspend and
* resume.
+ *
+ * Call under the device's power.lock lock.
*/
int device_wakeup_attach_irq(struct device *dev,
struct wake_irq *wakeirq)
{
struct wakeup_source *ws;
- int ret = 0;
- spin_lock_irq(&dev->power.lock);
ws = dev->power.wakeup;
if (!ws) {
dev_err(dev, "forgot to call call device_init_wakeup?\n");
- ret = -EINVAL;
- goto unlock;
+ return -EINVAL;
}
- if (ws->wakeirq) {
- ret = -EEXIST;
- goto unlock;
- }
+ if (ws->wakeirq)
+ return -EEXIST;
ws->wakeirq = wakeirq;
-
-unlock:
- spin_unlock_irq(&dev->power.lock);
-
- return ret;
+ return 0;
}
/**
@@ -314,20 +307,16 @@ unlock:
* @dev: Device to handle
*
* Removes a device wakeirq from the wakeup source.
+ *
+ * Call under the device's power.lock lock.
*/
void device_wakeup_detach_irq(struct device *dev)
{
struct wakeup_source *ws;
- spin_lock_irq(&dev->power.lock);
ws = dev->power.wakeup;
- if (!ws)
- goto unlock;
-
- ws->wakeirq = NULL;
-
-unlock:
- spin_unlock_irq(&dev->power.lock);
+ if (ws)
+ ws->wakeirq = NULL;
}
/**
diff --git a/drivers/base/property.c b/drivers/base/property.c
index f3f6d167f3f1..2d75366c61e0 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -16,6 +16,8 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/property.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
/**
* device_add_property_set - Add a collection of properties to a device object.
@@ -27,9 +29,10 @@
*/
void device_add_property_set(struct device *dev, struct property_set *pset)
{
- if (pset)
- pset->fwnode.type = FWNODE_PDATA;
+ if (!pset)
+ return;
+ pset->fwnode.type = FWNODE_PDATA;
set_secondary_fwnode(dev, &pset->fwnode);
}
EXPORT_SYMBOL_GPL(device_add_property_set);
@@ -153,6 +156,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_present);
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected.
+ * %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_u8_array(struct device *dev, const char *propname,
u8 *val, size_t nval)
@@ -177,6 +181,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u8_array);
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected.
+ * %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_u16_array(struct device *dev, const char *propname,
u16 *val, size_t nval)
@@ -201,6 +206,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u16_array);
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected.
+ * %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_u32_array(struct device *dev, const char *propname,
u32 *val, size_t nval)
@@ -225,6 +231,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u32_array);
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected.
+ * %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_u64_array(struct device *dev, const char *propname,
u64 *val, size_t nval)
@@ -249,6 +256,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u64_array);
* %-ENODATA if the property does not have a value,
* %-EPROTO or %-EILSEQ if the property is not an array of strings,
* %-EOVERFLOW if the size of the property is not as expected.
+ * %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_string_array(struct device *dev, const char *propname,
const char **val, size_t nval)
@@ -270,6 +278,7 @@ EXPORT_SYMBOL_GPL(device_property_read_string_array);
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO or %-EILSEQ if the property type is not a string.
+ * %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_string(struct device *dev, const char *propname,
const char **val)
@@ -291,9 +300,11 @@ EXPORT_SYMBOL_GPL(device_property_read_string);
else if (is_acpi_node(_fwnode_)) \
_ret_ = acpi_dev_prop_read(to_acpi_node(_fwnode_), _propname_, \
_proptype_, _val_, _nval_); \
- else \
+ else if (is_pset(_fwnode_)) \
_ret_ = pset_prop_read_array(to_pset(_fwnode_), _propname_, \
_proptype_, _val_, _nval_); \
+ else \
+ _ret_ = -ENXIO; \
_ret_; \
})
@@ -431,9 +442,10 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
else if (is_acpi_node(fwnode))
return acpi_dev_prop_read(to_acpi_node(fwnode), propname,
DEV_PROP_STRING, val, nval);
-
- return pset_prop_read_array(to_pset(fwnode), propname,
- DEV_PROP_STRING, val, nval);
+ else if (is_pset(fwnode))
+ return pset_prop_read_array(to_pset(fwnode), propname,
+ DEV_PROP_STRING, val, nval);
+ return -ENXIO;
}
EXPORT_SYMBOL_GPL(fwnode_property_read_string_array);
@@ -461,7 +473,8 @@ int fwnode_property_read_string(struct fwnode_handle *fwnode,
return acpi_dev_prop_read(to_acpi_node(fwnode), propname,
DEV_PROP_STRING, val, 1);
- return -ENXIO;
+ return pset_prop_read_array(to_pset(fwnode), propname,
+ DEV_PROP_STRING, val, 1);
}
EXPORT_SYMBOL_GPL(fwnode_property_read_string);
@@ -533,3 +546,81 @@ bool device_dma_is_coherent(struct device *dev)
return coherent;
}
EXPORT_SYMBOL_GPL(device_dma_is_coherent);
+
+/**
+ * device_get_phy_mode - Get phy mode for given device
+ * @dev: Pointer to the given device
+ *
+ * The function gets phy interface string from property 'phy-mode' or
+ * 'phy-connection-type', and return its index in phy_modes table, or errno in
+ * error case.
+ */
+int device_get_phy_mode(struct device *dev)
+{
+ const char *pm;
+ int err, i;
+
+ err = device_property_read_string(dev, "phy-mode", &pm);
+ if (err < 0)
+ err = device_property_read_string(dev,
+ "phy-connection-type", &pm);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
+ if (!strcasecmp(pm, phy_modes(i)))
+ return i;
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(device_get_phy_mode);
+
+static void *device_get_mac_addr(struct device *dev,
+ const char *name, char *addr,
+ int alen)
+{
+ int ret = device_property_read_u8_array(dev, name, addr, alen);
+
+ if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr))
+ return addr;
+ return NULL;
+}
+
+/**
+ * device_get_mac_address - Get the MAC for a given device
+ * @dev: Pointer to the device
+ * @addr: Address of buffer to store the MAC in
+ * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN
+ *
+ * Search the firmware node for the best MAC address to use. 'mac-address' is
+ * checked first, because that is supposed to contain to "most recent" MAC
+ * address. If that isn't set, then 'local-mac-address' is checked next,
+ * because that is the default address. If that isn't set, then the obsolete
+ * 'address' is checked, just in case we're using an old device tree.
+ *
+ * Note that the 'address' property is supposed to contain a virtual address of
+ * the register set, but some DTS files have redefined that property to be the
+ * MAC address.
+ *
+ * All-zero MAC addresses are rejected, because those could be properties that
+ * exist in the firmware tables, but were not updated by the firmware. For
+ * example, the DTS could define 'mac-address' and 'local-mac-address', with
+ * zero MAC addresses. Some older U-Boots only initialized 'local-mac-address'.
+ * In this case, the real MAC is in 'local-mac-address', and 'mac-address'
+ * exists but is all zeros.
+*/
+void *device_get_mac_address(struct device *dev, char *addr, int alen)
+{
+ char *res;
+
+ res = device_get_mac_addr(dev, "mac-address", addr, alen);
+ if (res)
+ return res;
+
+ res = device_get_mac_addr(dev, "local-mac-address", addr, alen);
+ if (res)
+ return res;
+
+ return device_get_mac_addr(dev, "address", addr, alen);
+}
+EXPORT_SYMBOL(device_get_mac_address);
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index b2b2849fc6d3..cc557886ab23 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -136,14 +136,20 @@ struct regmap {
/* if set, the HW registers are known to match map->reg_defaults */
bool no_sync_defaults;
- struct reg_default *patch;
+ struct reg_sequence *patch;
int patch_regs;
- /* if set, converts bulk rw to single rw */
- bool use_single_rw;
+ /* if set, converts bulk read to single read */
+ bool use_single_read;
+ /* if set, converts bulk read to single read */
+ bool use_single_write;
/* if set, the device supports multi write mode */
bool can_multi_write;
+ /* if set, raw reads/writes are limited to this size */
+ size_t max_raw_read;
+ size_t max_raw_write;
+
struct rb_root range_tree;
void *selector_work_buf; /* Scratch buffer used for selector */
};
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 81751a49d8bf..56486d92c4e7 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
if (!blk)
return -ENOMEM;
- present = krealloc(rbnode->cache_present,
- BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
- if (!present) {
- kfree(blk);
- return -ENOMEM;
+ if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
+ present = krealloc(rbnode->cache_present,
+ BITS_TO_LONGS(blklen) * sizeof(*present),
+ GFP_KERNEL);
+ if (!present) {
+ kfree(blk);
+ return -ENOMEM;
+ }
+
+ memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
+ (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
+ * sizeof(*present));
+ } else {
+ present = rbnode->cache_present;
}
/* insert the register value in the correct place in the rbnode block */
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index b9862d741a56..6f8a13ec32a4 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -729,7 +729,7 @@ int regcache_sync_block(struct regmap *map, void *block,
unsigned int block_base, unsigned int start,
unsigned int end)
{
- if (regmap_can_raw_write(map) && !map->use_single_rw)
+ if (regmap_can_raw_write(map) && !map->use_single_write)
return regcache_sync_block_raw(map, block, cache_present,
block_base, start, end);
else
diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c
index 8d304e2a943d..c03ebfd4c731 100644
--- a/drivers/base/regmap/regmap-ac97.c
+++ b/drivers/base/regmap/regmap-ac97.c
@@ -78,37 +78,24 @@ static const struct regmap_bus ac97_regmap_bus = {
.reg_read = regmap_ac97_reg_read,
};
-/**
- * regmap_init_ac97(): Initialise AC'97 register map
- *
- * @ac97: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
- */
-struct regmap *regmap_init_ac97(struct snd_ac97 *ac97,
- const struct regmap_config *config)
+struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
- return regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config);
+ return __regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(regmap_init_ac97);
+EXPORT_SYMBOL_GPL(__regmap_init_ac97);
-/**
- * devm_regmap_init_ac97(): Initialise AC'97 register map
- *
- * @ac97: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap. The regmap will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97,
- const struct regmap_config *config)
+struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
- return devm_regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config);
+ return __devm_regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(devm_regmap_init_ac97);
+EXPORT_SYMBOL_GPL(__devm_regmap_init_ac97);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 5799a0b9e6cc..f42f2bac6466 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -469,6 +469,87 @@ static const struct file_operations regmap_access_fops = {
.llseek = default_llseek,
};
+static ssize_t regmap_cache_only_write_file(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct regmap *map = container_of(file->private_data,
+ struct regmap, cache_only);
+ ssize_t result;
+ bool was_enabled, require_sync = false;
+ int err;
+
+ map->lock(map->lock_arg);
+
+ was_enabled = map->cache_only;
+
+ result = debugfs_write_file_bool(file, user_buf, count, ppos);
+ if (result < 0) {
+ map->unlock(map->lock_arg);
+ return result;
+ }
+
+ if (map->cache_only && !was_enabled) {
+ dev_warn(map->dev, "debugfs cache_only=Y forced\n");
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+ } else if (!map->cache_only && was_enabled) {
+ dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
+ require_sync = true;
+ }
+
+ map->unlock(map->lock_arg);
+
+ if (require_sync) {
+ err = regcache_sync(map);
+ if (err)
+ dev_err(map->dev, "Failed to sync cache %d\n", err);
+ }
+
+ return result;
+}
+
+static const struct file_operations regmap_cache_only_fops = {
+ .open = simple_open,
+ .read = debugfs_read_file_bool,
+ .write = regmap_cache_only_write_file,
+};
+
+static ssize_t regmap_cache_bypass_write_file(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct regmap *map = container_of(file->private_data,
+ struct regmap, cache_bypass);
+ ssize_t result;
+ bool was_enabled;
+
+ map->lock(map->lock_arg);
+
+ was_enabled = map->cache_bypass;
+
+ result = debugfs_write_file_bool(file, user_buf, count, ppos);
+ if (result < 0)
+ goto out;
+
+ if (map->cache_bypass && !was_enabled) {
+ dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+ } else if (!map->cache_bypass && was_enabled) {
+ dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
+ }
+
+out:
+ map->unlock(map->lock_arg);
+
+ return result;
+}
+
+static const struct file_operations regmap_cache_bypass_fops = {
+ .open = simple_open,
+ .read = debugfs_read_file_bool,
+ .write = regmap_cache_bypass_write_file,
+};
+
void regmap_debugfs_init(struct regmap *map, const char *name)
{
struct rb_node *next;
@@ -518,10 +599,11 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
if (map->max_register || regmap_readable(map, 0)) {
umode_t registers_mode;
- if (IS_ENABLED(REGMAP_ALLOW_WRITE_DEBUGFS))
- registers_mode = 0600;
- else
- registers_mode = 0400;
+#if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
+ registers_mode = 0600;
+#else
+ registers_mode = 0400;
+#endif
debugfs_create_file("registers", registers_mode, map->debugfs,
map, &regmap_map_fops);
@@ -530,12 +612,13 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
}
if (map->cache_type) {
- debugfs_create_bool("cache_only", 0400, map->debugfs,
- &map->cache_only);
+ debugfs_create_file("cache_only", 0600, map->debugfs,
+ &map->cache_only, &regmap_cache_only_fops);
debugfs_create_bool("cache_dirty", 0400, map->debugfs,
&map->cache_dirty);
- debugfs_create_bool("cache_bypass", 0400, map->debugfs,
- &map->cache_bypass);
+ debugfs_create_file("cache_bypass", 0600, map->debugfs,
+ &map->cache_bypass,
+ &regmap_cache_bypass_fops);
}
next = rb_first(&map->range_tree);
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 4b76e33110a2..1a8ec3b2b601 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -209,11 +209,60 @@ static struct regmap_bus regmap_i2c = {
.val_format_endian_default = REGMAP_ENDIAN_BIG,
};
+static int regmap_i2c_smbus_i2c_write(void *context, const void *data,
+ size_t count)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ if (count < 1)
+ return -EINVAL;
+ if (count >= I2C_SMBUS_BLOCK_MAX)
+ return -E2BIG;
+
+ --count;
+ return i2c_smbus_write_i2c_block_data(i2c, ((u8 *)data)[0], count,
+ ((u8 *)data + 1));
+}
+
+static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
+ size_t reg_size, void *val,
+ size_t val_size)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ int ret;
+
+ if (reg_size != 1 || val_size < 1)
+ return -EINVAL;
+ if (val_size >= I2C_SMBUS_BLOCK_MAX)
+ return -E2BIG;
+
+ ret = i2c_smbus_read_i2c_block_data(i2c, ((u8 *)reg)[0], val_size, val);
+ if (ret == val_size)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+static struct regmap_bus regmap_i2c_smbus_i2c_block = {
+ .write = regmap_i2c_smbus_i2c_write,
+ .read = regmap_i2c_smbus_i2c_read,
+ .max_raw_read = I2C_SMBUS_BLOCK_MAX,
+ .max_raw_write = I2C_SMBUS_BLOCK_MAX,
+};
+
static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
const struct regmap_config *config)
{
if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C))
return &regmap_i2c;
+ else if (config->reg_bits == 8 &&
+ i2c_check_functionality(i2c->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return &regmap_i2c_smbus_i2c_block;
else if (config->val_bits == 16 && config->reg_bits == 8 &&
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_WORD_DATA))
@@ -233,47 +282,34 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
return ERR_PTR(-ENOTSUPP);
}
-/**
- * regmap_init_i2c(): Initialise register map
- *
- * @i2c: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
- */
-struct regmap *regmap_init_i2c(struct i2c_client *i2c,
- const struct regmap_config *config)
+struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config);
if (IS_ERR(bus))
return ERR_CAST(bus);
- return regmap_init(&i2c->dev, bus, &i2c->dev, config);
+ return __regmap_init(&i2c->dev, bus, &i2c->dev, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(regmap_init_i2c);
+EXPORT_SYMBOL_GPL(__regmap_init_i2c);
-/**
- * devm_regmap_init_i2c(): Initialise managed register map
- *
- * @i2c: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap. The regmap will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
- const struct regmap_config *config)
+struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config);
if (IS_ERR(bus))
return ERR_CAST(bus);
- return devm_regmap_init(&i2c->dev, bus, &i2c->dev, config);
+ return __devm_regmap_init(&i2c->dev, bus, &i2c->dev, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(devm_regmap_init_i2c);
+EXPORT_SYMBOL_GPL(__devm_regmap_init_i2c);
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 2597600a5d26..38d1f72d869c 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -209,7 +209,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
* Read in the statuses, using a single bulk read if possible
* in order to reduce the I/O overheads.
*/
- if (!map->use_single_rw && map->reg_stride == 1 &&
+ if (!map->use_single_read && map->reg_stride == 1 &&
data->irq_reg_stride == 1) {
u8 *buf8 = data->status_reg_buf;
u16 *buf16 = data->status_reg_buf;
@@ -398,7 +398,7 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
else
d->irq_reg_stride = 1;
- if (!map->use_single_rw && map->reg_stride == 1 &&
+ if (!map->use_single_read && map->reg_stride == 1 &&
d->irq_reg_stride == 1) {
d->status_reg_buf = kmalloc(map->format.val_bytes *
chip->num_regs, GFP_KERNEL);
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 04a329a377e9..426a57e41ac7 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -296,20 +296,11 @@ err_free:
return ERR_PTR(ret);
}
-/**
- * regmap_init_mmio_clk(): Initialise register map with register clock
- *
- * @dev: Device that will be interacted with
- * @clk_id: register clock consumer ID
- * @regs: Pointer to memory-mapped IO region
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
- */
-struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
- void __iomem *regs,
- const struct regmap_config *config)
+struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
struct regmap_mmio_context *ctx;
@@ -317,25 +308,17 @@ struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
if (IS_ERR(ctx))
return ERR_CAST(ctx);
- return regmap_init(dev, &regmap_mmio, ctx, config);
+ return __regmap_init(dev, &regmap_mmio, ctx, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(regmap_init_mmio_clk);
-
-/**
- * devm_regmap_init_mmio_clk(): Initialise managed register map with clock
- *
- * @dev: Device that will be interacted with
- * @clk_id: register clock consumer ID
- * @regs: Pointer to memory-mapped IO region
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap. The regmap will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
- void __iomem *regs,
- const struct regmap_config *config)
+EXPORT_SYMBOL_GPL(__regmap_init_mmio_clk);
+
+struct regmap *__devm_regmap_init_mmio_clk(struct device *dev,
+ const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
struct regmap_mmio_context *ctx;
@@ -343,8 +326,9 @@ struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
if (IS_ERR(ctx))
return ERR_CAST(ctx);
- return devm_regmap_init(dev, &regmap_mmio, ctx, config);
+ return __devm_regmap_init(dev, &regmap_mmio, ctx, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(devm_regmap_init_mmio_clk);
+EXPORT_SYMBOL_GPL(__devm_regmap_init_mmio_clk);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 53d1148e80a0..edd9a839d004 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -113,37 +113,24 @@ static struct regmap_bus regmap_spi = {
.val_format_endian_default = REGMAP_ENDIAN_BIG,
};
-/**
- * regmap_init_spi(): Initialise register map
- *
- * @spi: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
- */
-struct regmap *regmap_init_spi(struct spi_device *spi,
- const struct regmap_config *config)
+struct regmap *__regmap_init_spi(struct spi_device *spi,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
- return regmap_init(&spi->dev, &regmap_spi, &spi->dev, config);
+ return __regmap_init(&spi->dev, &regmap_spi, &spi->dev, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(regmap_init_spi);
+EXPORT_SYMBOL_GPL(__regmap_init_spi);
-/**
- * devm_regmap_init_spi(): Initialise register map
- *
- * @spi: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap. The map will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_spi(struct spi_device *spi,
- const struct regmap_config *config)
+struct regmap *__devm_regmap_init_spi(struct spi_device *spi,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
- return devm_regmap_init(&spi->dev, &regmap_spi, &spi->dev, config);
+ return __devm_regmap_init(&spi->dev, &regmap_spi, &spi->dev, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(devm_regmap_init_spi);
+EXPORT_SYMBOL_GPL(__devm_regmap_init_spi);
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
index d7026dc33388..7e58f6560399 100644
--- a/drivers/base/regmap/regmap-spmi.c
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -91,36 +91,25 @@ static struct regmap_bus regmap_spmi_base = {
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
};
-/**
- * regmap_init_spmi_base(): Create regmap for the Base register space
- * @sdev: SPMI device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
- */
-struct regmap *regmap_init_spmi_base(struct spmi_device *sdev,
- const struct regmap_config *config)
+struct regmap *__regmap_init_spmi_base(struct spmi_device *sdev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
- return regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config);
+ return __regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(regmap_init_spmi_base);
+EXPORT_SYMBOL_GPL(__regmap_init_spmi_base);
-/**
- * devm_regmap_init_spmi_base(): Create managed regmap for Base register space
- * @sdev: SPMI device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap. The regmap will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_spmi_base(struct spmi_device *sdev,
- const struct regmap_config *config)
+struct regmap *__devm_regmap_init_spmi_base(struct spmi_device *sdev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
- return devm_regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config);
+ return __devm_regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_base);
+EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_base);
static int regmap_spmi_ext_read(void *context,
const void *reg, size_t reg_size,
@@ -222,35 +211,24 @@ static struct regmap_bus regmap_spmi_ext = {
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
};
-/**
- * regmap_init_spmi_ext(): Create regmap for Ext register space
- * @sdev: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
- */
-struct regmap *regmap_init_spmi_ext(struct spmi_device *sdev,
- const struct regmap_config *config)
+struct regmap *__regmap_init_spmi_ext(struct spmi_device *sdev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
- return regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config);
+ return __regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(regmap_init_spmi_ext);
+EXPORT_SYMBOL_GPL(__regmap_init_spmi_ext);
-/**
- * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space
- * @sdev: SPMI device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap. The regmap will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *sdev,
- const struct regmap_config *config)
+struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *sdev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
- return devm_regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config);
+ return __devm_regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_ext);
+EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_ext);
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 7111d04f2621..afaf56200674 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/rbtree.h>
#include <linux/sched.h>
+#include <linux/delay.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -34,7 +35,7 @@
static int _regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
- bool *change);
+ bool *change, bool force_write);
static int _regmap_bus_reg_read(void *context, unsigned int reg,
unsigned int *val);
@@ -93,6 +94,9 @@ bool regmap_writeable(struct regmap *map, unsigned int reg)
bool regmap_readable(struct regmap *map, unsigned int reg)
{
+ if (!map->reg_read)
+ return false;
+
if (map->max_register && reg > map->max_register)
return false;
@@ -515,22 +519,12 @@ enum regmap_endian regmap_get_val_endian(struct device *dev,
}
EXPORT_SYMBOL_GPL(regmap_get_val_endian);
-/**
- * regmap_init(): Initialise register map
- *
- * @dev: Device that will be interacted with
- * @bus: Bus-specific callbacks to use with device
- * @bus_context: Data passed to bus-specific callbacks
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap. This function should generally not be called
- * directly, it should be called by bus-specific init functions.
- */
-struct regmap *regmap_init(struct device *dev,
- const struct regmap_bus *bus,
- void *bus_context,
- const struct regmap_config *config)
+struct regmap *__regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ void *bus_context,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
struct regmap *map;
int ret = -EINVAL;
@@ -556,10 +550,14 @@ struct regmap *regmap_init(struct device *dev,
spin_lock_init(&map->spinlock);
map->lock = regmap_lock_spinlock;
map->unlock = regmap_unlock_spinlock;
+ lockdep_set_class_and_name(&map->spinlock,
+ lock_key, lock_name);
} else {
mutex_init(&map->mutex);
map->lock = regmap_lock_mutex;
map->unlock = regmap_unlock_mutex;
+ lockdep_set_class_and_name(&map->mutex,
+ lock_key, lock_name);
}
map->lock_arg = map;
}
@@ -573,8 +571,13 @@ struct regmap *regmap_init(struct device *dev,
map->reg_stride = config->reg_stride;
else
map->reg_stride = 1;
- map->use_single_rw = config->use_single_rw;
- map->can_multi_write = config->can_multi_write;
+ map->use_single_read = config->use_single_rw || !bus || !bus->read;
+ map->use_single_write = config->use_single_rw || !bus || !bus->write;
+ map->can_multi_write = config->can_multi_write && bus && bus->write;
+ if (bus) {
+ map->max_raw_read = bus->max_raw_read;
+ map->max_raw_write = bus->max_raw_write;
+ }
map->dev = dev;
map->bus = bus;
map->bus_context = bus_context;
@@ -763,7 +766,7 @@ struct regmap *regmap_init(struct device *dev,
if ((reg_endian != REGMAP_ENDIAN_BIG) ||
(val_endian != REGMAP_ENDIAN_BIG))
goto err_map;
- map->use_single_rw = true;
+ map->use_single_write = true;
}
if (!map->format.format_write &&
@@ -899,30 +902,19 @@ err_map:
err:
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(regmap_init);
+EXPORT_SYMBOL_GPL(__regmap_init);
static void devm_regmap_release(struct device *dev, void *res)
{
regmap_exit(*(struct regmap **)res);
}
-/**
- * devm_regmap_init(): Initialise managed register map
- *
- * @dev: Device that will be interacted with
- * @bus: Bus-specific callbacks to use with device
- * @bus_context: Data passed to bus-specific callbacks
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap. This function should generally not be called
- * directly, it should be called by bus-specific init functions. The
- * map will be automatically freed by the device management code.
- */
-struct regmap *devm_regmap_init(struct device *dev,
- const struct regmap_bus *bus,
- void *bus_context,
- const struct regmap_config *config)
+struct regmap *__devm_regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ void *bus_context,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
struct regmap **ptr, *regmap;
@@ -930,7 +922,8 @@ struct regmap *devm_regmap_init(struct device *dev,
if (!ptr)
return ERR_PTR(-ENOMEM);
- regmap = regmap_init(dev, bus, bus_context, config);
+ regmap = __regmap_init(dev, bus, bus_context, config,
+ lock_key, lock_name);
if (!IS_ERR(regmap)) {
*ptr = regmap;
devres_add(dev, ptr);
@@ -940,7 +933,7 @@ struct regmap *devm_regmap_init(struct device *dev,
return regmap;
}
-EXPORT_SYMBOL_GPL(devm_regmap_init);
+EXPORT_SYMBOL_GPL(__devm_regmap_init);
static void regmap_field_init(struct regmap_field *rm_field,
struct regmap *regmap, struct reg_field reg_field)
@@ -1178,7 +1171,7 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
ret = _regmap_update_bits(map, range->selector_reg,
range->selector_mask,
win_page << range->selector_shift,
- &page_chg);
+ &page_chg, false);
map->work_buf = orig_work_buf;
@@ -1382,10 +1375,33 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
*/
bool regmap_can_raw_write(struct regmap *map)
{
- return map->bus && map->format.format_val && map->format.format_reg;
+ return map->bus && map->bus->write && map->format.format_val &&
+ map->format.format_reg;
}
EXPORT_SYMBOL_GPL(regmap_can_raw_write);
+/**
+ * regmap_get_raw_read_max - Get the maximum size we can read
+ *
+ * @map: Map to check.
+ */
+size_t regmap_get_raw_read_max(struct regmap *map)
+{
+ return map->max_raw_read;
+}
+EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
+
+/**
+ * regmap_get_raw_write_max - Get the maximum size we can read
+ *
+ * @map: Map to check.
+ */
+size_t regmap_get_raw_write_max(struct regmap *map)
+{
+ return map->max_raw_write;
+}
+EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
+
static int _regmap_bus_formatted_write(void *context, unsigned int reg,
unsigned int val)
{
@@ -1555,6 +1571,8 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
+ if (map->max_raw_write && map->max_raw_write > val_len)
+ return -E2BIG;
map->lock(map->lock_arg);
@@ -1624,6 +1642,18 @@ int regmap_fields_write(struct regmap_field *field, unsigned int id,
}
EXPORT_SYMBOL_GPL(regmap_fields_write);
+int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
+ unsigned int val)
+{
+ if (id >= field->id_size)
+ return -EINVAL;
+
+ return regmap_write_bits(field->regmap,
+ field->reg + (field->id_offset * id),
+ field->mask, val << field->shift);
+}
+EXPORT_SYMBOL_GPL(regmap_fields_force_write);
+
/**
* regmap_fields_update_bits(): Perform a read/modify/write cycle
* on the register field
@@ -1669,6 +1699,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
{
int ret = 0, i;
size_t val_bytes = map->format.val_bytes;
+ size_t total_size = val_bytes * val_count;
if (map->bus && !map->format.parse_inplace)
return -EINVAL;
@@ -1677,9 +1708,15 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
/*
* Some devices don't support bulk write, for
- * them we have a series of single write operations.
+ * them we have a series of single write operations in the first two if
+ * blocks.
+ *
+ * The first if block is used for memory mapped io. It does not allow
+ * val_bytes of 3 for example.
+ * The second one is used for busses which do not have this limitation
+ * and can write arbitrary value lengths.
*/
- if (!map->bus || map->use_single_rw) {
+ if (!map->bus) {
map->lock(map->lock_arg);
for (i = 0; i < val_count; i++) {
unsigned int ival;
@@ -1711,6 +1748,38 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
}
out:
map->unlock(map->lock_arg);
+ } else if (map->use_single_write ||
+ (map->max_raw_write && map->max_raw_write < total_size)) {
+ int chunk_stride = map->reg_stride;
+ size_t chunk_size = val_bytes;
+ size_t chunk_count = val_count;
+
+ if (!map->use_single_write) {
+ chunk_size = map->max_raw_write;
+ if (chunk_size % val_bytes)
+ chunk_size -= chunk_size % val_bytes;
+ chunk_count = total_size / chunk_size;
+ chunk_stride *= chunk_size / val_bytes;
+ }
+
+ map->lock(map->lock_arg);
+ /* Write as many bytes as possible with chunk_size */
+ for (i = 0; i < chunk_count; i++) {
+ ret = _regmap_raw_write(map,
+ reg + (i * chunk_stride),
+ val + (i * chunk_size),
+ chunk_size);
+ if (ret)
+ break;
+ }
+
+ /* Write remaining bytes */
+ if (!ret && chunk_size * i < total_size) {
+ ret = _regmap_raw_write(map, reg + (i * chunk_stride),
+ val + (i * chunk_size),
+ total_size - i * chunk_size);
+ }
+ map->unlock(map->lock_arg);
} else {
void *wval;
@@ -1740,10 +1809,10 @@ EXPORT_SYMBOL_GPL(regmap_bulk_write);
*
* the (register,newvalue) pairs in regs have not been formatted, but
* they are all in the same page and have been changed to being page
- * relative. The page register has been written if that was neccessary.
+ * relative. The page register has been written if that was necessary.
*/
static int _regmap_raw_multi_reg_write(struct regmap *map,
- const struct reg_default *regs,
+ const struct reg_sequence *regs,
size_t num_regs)
{
int ret;
@@ -1768,8 +1837,8 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
u8 = buf;
for (i = 0; i < num_regs; i++) {
- int reg = regs[i].reg;
- int val = regs[i].def;
+ unsigned int reg = regs[i].reg;
+ unsigned int val = regs[i].def;
trace_regmap_hw_write_start(map, reg, 1);
map->format.format_reg(u8, reg, map->reg_shift);
u8 += reg_bytes + pad_bytes;
@@ -1800,17 +1869,19 @@ static unsigned int _regmap_register_page(struct regmap *map,
}
static int _regmap_range_multi_paged_reg_write(struct regmap *map,
- struct reg_default *regs,
+ struct reg_sequence *regs,
size_t num_regs)
{
int ret;
int i, n;
- struct reg_default *base;
+ struct reg_sequence *base;
unsigned int this_page = 0;
+ unsigned int page_change = 0;
/*
* the set of registers are not neccessarily in order, but
* since the order of write must be preserved this algorithm
- * chops the set each time the page changes
+ * chops the set each time the page changes. This also applies
+ * if there is a delay required at any point in the sequence.
*/
base = regs;
for (i = 0, n = 0; i < num_regs; i++, n++) {
@@ -1826,16 +1897,48 @@ static int _regmap_range_multi_paged_reg_write(struct regmap *map,
this_page = win_page;
if (win_page != this_page) {
this_page = win_page;
+ page_change = 1;
+ }
+ }
+
+ /* If we have both a page change and a delay make sure to
+ * write the regs and apply the delay before we change the
+ * page.
+ */
+
+ if (page_change || regs[i].delay_us) {
+
+ /* For situations where the first write requires
+ * a delay we need to make sure we don't call
+ * raw_multi_reg_write with n=0
+ * This can't occur with page breaks as we
+ * never write on the first iteration
+ */
+ if (regs[i].delay_us && i == 0)
+ n = 1;
+
ret = _regmap_raw_multi_reg_write(map, base, n);
if (ret != 0)
return ret;
+
+ if (regs[i].delay_us)
+ udelay(regs[i].delay_us);
+
base += n;
n = 0;
- }
- ret = _regmap_select_page(map, &base[n].reg, range, 1);
- if (ret != 0)
- return ret;
+
+ if (page_change) {
+ ret = _regmap_select_page(map,
+ &base[n].reg,
+ range, 1);
+ if (ret != 0)
+ return ret;
+
+ page_change = 0;
+ }
+
}
+
}
if (n > 0)
return _regmap_raw_multi_reg_write(map, base, n);
@@ -1843,7 +1946,7 @@ static int _regmap_range_multi_paged_reg_write(struct regmap *map,
}
static int _regmap_multi_reg_write(struct regmap *map,
- const struct reg_default *regs,
+ const struct reg_sequence *regs,
size_t num_regs)
{
int i;
@@ -1854,6 +1957,9 @@ static int _regmap_multi_reg_write(struct regmap *map,
ret = _regmap_write(map, regs[i].reg, regs[i].def);
if (ret != 0)
return ret;
+
+ if (regs[i].delay_us)
+ udelay(regs[i].delay_us);
}
return 0;
}
@@ -1893,10 +1999,14 @@ static int _regmap_multi_reg_write(struct regmap *map,
for (i = 0; i < num_regs; i++) {
unsigned int reg = regs[i].reg;
struct regmap_range_node *range;
+
+ /* Coalesce all the writes between a page break or a delay
+ * in a sequence
+ */
range = _regmap_range_lookup(map, reg);
- if (range) {
- size_t len = sizeof(struct reg_default)*num_regs;
- struct reg_default *base = kmemdup(regs, len,
+ if (range || regs[i].delay_us) {
+ size_t len = sizeof(struct reg_sequence)*num_regs;
+ struct reg_sequence *base = kmemdup(regs, len,
GFP_KERNEL);
if (!base)
return -ENOMEM;
@@ -1929,7 +2039,7 @@ static int _regmap_multi_reg_write(struct regmap *map,
* A value of zero will be returned on success, a negative errno will be
* returned in error cases.
*/
-int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs,
+int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
int num_regs)
{
int ret;
@@ -1962,7 +2072,7 @@ EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
* be returned in error cases.
*/
int regmap_multi_reg_write_bypassed(struct regmap *map,
- const struct reg_default *regs,
+ const struct reg_sequence *regs,
int num_regs)
{
int ret;
@@ -2050,7 +2160,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
/*
* Some buses or devices flag reads by setting the high bits in the
- * register addresss; since it's always the high bits for all
+ * register address; since it's always the high bits for all
* current formats we can do this here rather than in
* formatting. This may break if we get interesting formats.
*/
@@ -2097,8 +2207,6 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
int ret;
void *context = _regmap_map_get_context(map);
- WARN_ON(!map->reg_read);
-
if (!map->cache_bypass) {
ret = regcache_read(map, reg, val);
if (ret == 0)
@@ -2179,11 +2287,22 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
return -EINVAL;
if (reg % map->reg_stride)
return -EINVAL;
+ if (val_count == 0)
+ return -EINVAL;
map->lock(map->lock_arg);
if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
map->cache_type == REGCACHE_NONE) {
+ if (!map->bus->read) {
+ ret = -ENOTSUPP;
+ goto out;
+ }
+ if (map->max_raw_read && map->max_raw_read < val_len) {
+ ret = -E2BIG;
+ goto out;
+ }
+
/* Physical block read if there's no cache involved */
ret = _regmap_raw_read(map, reg, val, val_len);
@@ -2292,20 +2411,51 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
* Some devices does not support bulk read, for
* them we have a series of single read operations.
*/
- if (map->use_single_rw) {
- for (i = 0; i < val_count; i++) {
- ret = regmap_raw_read(map,
- reg + (i * map->reg_stride),
- val + (i * val_bytes),
- val_bytes);
- if (ret != 0)
- return ret;
- }
- } else {
+ size_t total_size = val_bytes * val_count;
+
+ if (!map->use_single_read &&
+ (!map->max_raw_read || map->max_raw_read > total_size)) {
ret = regmap_raw_read(map, reg, val,
val_bytes * val_count);
if (ret != 0)
return ret;
+ } else {
+ /*
+ * Some devices do not support bulk read or do not
+ * support large bulk reads, for them we have a series
+ * of read operations.
+ */
+ int chunk_stride = map->reg_stride;
+ size_t chunk_size = val_bytes;
+ size_t chunk_count = val_count;
+
+ if (!map->use_single_read) {
+ chunk_size = map->max_raw_read;
+ if (chunk_size % val_bytes)
+ chunk_size -= chunk_size % val_bytes;
+ chunk_count = total_size / chunk_size;
+ chunk_stride *= chunk_size / val_bytes;
+ }
+
+ /* Read bytes that fit into a multiple of chunk_size */
+ for (i = 0; i < chunk_count; i++) {
+ ret = regmap_raw_read(map,
+ reg + (i * chunk_stride),
+ val + (i * chunk_size),
+ chunk_size);
+ if (ret != 0)
+ return ret;
+ }
+
+ /* Read remaining bytes */
+ if (chunk_size * i < total_size) {
+ ret = regmap_raw_read(map,
+ reg + (i * chunk_stride),
+ val + (i * chunk_size),
+ total_size - i * chunk_size);
+ if (ret != 0)
+ return ret;
+ }
}
for (i = 0; i < val_count * val_bytes; i += val_bytes)
@@ -2317,7 +2467,34 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
&ival);
if (ret != 0)
return ret;
- map->format.format_val(val + (i * val_bytes), ival, 0);
+
+ if (map->format.format_val) {
+ map->format.format_val(val + (i * val_bytes), ival, 0);
+ } else {
+ /* Devices providing read and write
+ * operations can use the bulk I/O
+ * functions if they define a val_bytes,
+ * we assume that the values are native
+ * endian.
+ */
+ u32 *u32 = val;
+ u16 *u16 = val;
+ u8 *u8 = val;
+
+ switch (map->format.val_bytes) {
+ case 4:
+ u32[i] = ival;
+ break;
+ case 2:
+ u16[i] = ival;
+ break;
+ case 1:
+ u8[i] = ival;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
}
}
@@ -2327,7 +2504,7 @@ EXPORT_SYMBOL_GPL(regmap_bulk_read);
static int _regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
- bool *change)
+ bool *change, bool force_write)
{
int ret;
unsigned int tmp, orig;
@@ -2339,7 +2516,7 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
tmp = orig & ~mask;
tmp |= val & mask;
- if (tmp != orig) {
+ if (force_write || (tmp != orig)) {
ret = _regmap_write(map, reg, tmp);
if (change)
*change = true;
@@ -2367,7 +2544,7 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,
int ret;
map->lock(map->lock_arg);
- ret = _regmap_update_bits(map, reg, mask, val, NULL);
+ ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
map->unlock(map->lock_arg);
return ret;
@@ -2375,6 +2552,29 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,
EXPORT_SYMBOL_GPL(regmap_update_bits);
/**
+ * regmap_write_bits: Perform a read/modify/write cycle on the register map
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_write_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ int ret;
+
+ map->lock(map->lock_arg);
+ ret = _regmap_update_bits(map, reg, mask, val, NULL, true);
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_write_bits);
+
+/**
* regmap_update_bits_async: Perform a read/modify/write cycle on the register
* map asynchronously
*
@@ -2398,7 +2598,7 @@ int regmap_update_bits_async(struct regmap *map, unsigned int reg,
map->async = true;
- ret = _regmap_update_bits(map, reg, mask, val, NULL);
+ ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
map->async = false;
@@ -2427,7 +2627,7 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
int ret;
map->lock(map->lock_arg);
- ret = _regmap_update_bits(map, reg, mask, val, change);
+ ret = _regmap_update_bits(map, reg, mask, val, change, false);
map->unlock(map->lock_arg);
return ret;
}
@@ -2460,7 +2660,7 @@ int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
map->async = true;
- ret = _regmap_update_bits(map, reg, mask, val, change);
+ ret = _regmap_update_bits(map, reg, mask, val, change, false);
map->async = false;
@@ -2552,10 +2752,10 @@ EXPORT_SYMBOL_GPL(regmap_async_complete);
* The caller must ensure that this function cannot be called
* concurrently with either itself or regcache_sync().
*/
-int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
+int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
int num_regs)
{
- struct reg_default *p;
+ struct reg_sequence *p;
int ret;
bool bypass;
@@ -2564,7 +2764,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
return 0;
p = krealloc(map->patch,
- sizeof(struct reg_default) * (map->patch_regs + num_regs),
+ sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
GFP_KERNEL);
if (p) {
memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index be5fffb6da24..023d448ed3fa 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -92,7 +92,7 @@ config BCMA_DRIVER_GMAC_CMN
config BCMA_DRIVER_GPIO
bool "BCMA GPIO driver"
depends on BCMA && GPIOLIB
- select IRQ_DOMAIN if BCMA_HOST_SOC
+ select GPIOLIB_IRQCHIP if BCMA_HOST_SOC
help
Driver to provide access to the GPIO pins of the bcma bus.
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 15f2b2e242ea..38f156745d53 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -34,6 +34,7 @@ int __init bcma_bus_early_register(struct bcma_bus *bus);
int bcma_bus_suspend(struct bcma_bus *bus);
int bcma_bus_resume(struct bcma_bus *bus);
#endif
+struct device *bcma_bus_get_host_dev(struct bcma_bus *bus);
/* scan.c */
void bcma_detect_chip(struct bcma_bus *bus);
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 5f6018e7cd4c..504899a72966 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -8,10 +8,8 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
-#include <linux/gpio.h>
-#include <linux/irq.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
-#include <linux/irqdomain.h>
#include <linux/export.h>
#include <linux/bcma/bcma.h>
@@ -79,19 +77,11 @@ static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio)
}
#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
-static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
-{
- struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
-
- if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
- return irq_find_mapping(cc->irq_domain, gpio);
- else
- return -EINVAL;
-}
static void bcma_gpio_irq_unmask(struct irq_data *d)
{
- struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(gc);
int gpio = irqd_to_hwirq(d);
u32 val = bcma_chipco_gpio_in(cc, BIT(gpio));
@@ -101,7 +91,8 @@ static void bcma_gpio_irq_unmask(struct irq_data *d)
static void bcma_gpio_irq_mask(struct irq_data *d)
{
- struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(gc);
int gpio = irqd_to_hwirq(d);
bcma_chipco_gpio_intmask(cc, BIT(gpio), 0);
@@ -116,6 +107,7 @@ static struct irq_chip bcma_gpio_irq_chip = {
static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
{
struct bcma_drv_cc *cc = dev_id;
+ struct gpio_chip *gc = &cc->gpio;
u32 val = bcma_cc_read32(cc, BCMA_CC_GPIOIN);
u32 mask = bcma_cc_read32(cc, BCMA_CC_GPIOIRQ);
u32 pol = bcma_cc_read32(cc, BCMA_CC_GPIOPOL);
@@ -125,81 +117,58 @@ static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
if (!irqs)
return IRQ_NONE;
- for_each_set_bit(gpio, &irqs, cc->gpio.ngpio)
- generic_handle_irq(bcma_gpio_to_irq(&cc->gpio, gpio));
+ for_each_set_bit(gpio, &irqs, gc->ngpio)
+ generic_handle_irq(irq_find_mapping(gc->irqdomain, gpio));
bcma_chipco_gpio_polarity(cc, irqs, val & irqs);
return IRQ_HANDLED;
}
-static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
+static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
{
struct gpio_chip *chip = &cc->gpio;
- int gpio, hwirq, err;
+ int hwirq, err;
if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
return 0;
- cc->irq_domain = irq_domain_add_linear(NULL, chip->ngpio,
- &irq_domain_simple_ops, cc);
- if (!cc->irq_domain) {
- err = -ENODEV;
- goto err_irq_domain;
- }
- for (gpio = 0; gpio < chip->ngpio; gpio++) {
- int irq = irq_create_mapping(cc->irq_domain, gpio);
-
- irq_set_chip_data(irq, cc);
- irq_set_chip_and_handler(irq, &bcma_gpio_irq_chip,
- handle_simple_irq);
- }
-
hwirq = bcma_core_irq(cc->core, 0);
err = request_irq(hwirq, bcma_gpio_irq_handler, IRQF_SHARED, "gpio",
cc);
if (err)
- goto err_req_irq;
+ return err;
bcma_chipco_gpio_intmask(cc, ~0, 0);
bcma_cc_set32(cc, BCMA_CC_IRQMASK, BCMA_CC_IRQ_GPIO);
- return 0;
-
-err_req_irq:
- for (gpio = 0; gpio < chip->ngpio; gpio++) {
- int irq = irq_find_mapping(cc->irq_domain, gpio);
-
- irq_dispose_mapping(irq);
+ err = gpiochip_irqchip_add(chip,
+ &bcma_gpio_irq_chip,
+ 0,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
+ if (err) {
+ free_irq(hwirq, cc);
+ return err;
}
- irq_domain_remove(cc->irq_domain);
-err_irq_domain:
- return err;
+
+ return 0;
}
-static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
+static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc)
{
- struct gpio_chip *chip = &cc->gpio;
- int gpio;
-
if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
return;
bcma_cc_mask32(cc, BCMA_CC_IRQMASK, ~BCMA_CC_IRQ_GPIO);
free_irq(bcma_core_irq(cc->core, 0), cc);
- for (gpio = 0; gpio < chip->ngpio; gpio++) {
- int irq = irq_find_mapping(cc->irq_domain, gpio);
-
- irq_dispose_mapping(irq);
- }
- irq_domain_remove(cc->irq_domain);
}
#else
-static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
+static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
{
return 0;
}
-static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
+static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc)
{
}
#endif
@@ -218,9 +187,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->set = bcma_gpio_set_value;
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
-#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
- chip->to_irq = bcma_gpio_to_irq;
-#endif
+ chip->owner = THIS_MODULE;
+ chip->dev = bcma_bus_get_host_dev(bus);
#if IS_BUILTIN(CONFIG_OF)
if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
chip->of_node = cc->core->dev.of_node;
@@ -248,13 +216,13 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
else
chip->base = -1;
- err = bcma_gpio_irq_domain_init(cc);
+ err = gpiochip_add(chip);
if (err)
return err;
- err = gpiochip_add(chip);
+ err = bcma_gpio_irq_init(cc);
if (err) {
- bcma_gpio_irq_domain_exit(cc);
+ gpiochip_remove(chip);
return err;
}
@@ -263,7 +231,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
int bcma_gpio_unregister(struct bcma_drv_cc *cc)
{
- bcma_gpio_irq_domain_exit(cc);
+ bcma_gpio_irq_exit(cc);
gpiochip_remove(&cc->gpio);
return 0;
}
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 9635f1033ce5..24882c18fcbe 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -7,11 +7,14 @@
#include "bcma_private.h"
#include <linux/module.h>
+#include <linux/mmc/sdio_func.h>
#include <linux/platform_device.h>
+#include <linux/pci.h>
#include <linux/bcma/bcma.h>
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/of_platform.h>
MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
MODULE_LICENSE("GPL");
@@ -268,6 +271,28 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
}
}
+struct device *bcma_bus_get_host_dev(struct bcma_bus *bus)
+{
+ switch (bus->hosttype) {
+ case BCMA_HOSTTYPE_PCI:
+ if (bus->host_pci)
+ return &bus->host_pci->dev;
+ else
+ return NULL;
+ case BCMA_HOSTTYPE_SOC:
+ if (bus->host_pdev)
+ return &bus->host_pdev->dev;
+ else
+ return NULL;
+ case BCMA_HOSTTYPE_SDIO:
+ if (bus->host_sdio)
+ return &bus->host_sdio->dev;
+ else
+ return NULL;
+ }
+ return NULL;
+}
+
void bcma_init_bus(struct bcma_bus *bus)
{
mutex_lock(&bcma_buses_mutex);
@@ -387,6 +412,7 @@ int bcma_bus_register(struct bcma_bus *bus)
{
int err;
struct bcma_device *core;
+ struct device *dev;
/* Scan for devices (cores) */
err = bcma_bus_scan(bus);
@@ -409,6 +435,16 @@ int bcma_bus_register(struct bcma_bus *bus)
bcma_core_pci_early_init(&bus->drv_pci[0]);
}
+ dev = bcma_bus_get_host_dev(bus);
+ /* TODO: remove check for IS_BUILTIN(CONFIG_BCMA) check when
+ * of_default_bus_match_table is exported or in some other way
+ * accessible. This is just a temporary workaround.
+ */
+ if (IS_BUILTIN(CONFIG_BCMA) && dev) {
+ of_platform_populate(dev->of_node, of_default_bus_match_table,
+ NULL, dev);
+ }
+
/* Cores providing flash access go before SPROM init */
list_for_each_entry(core, &bus->cores, list) {
if (bcma_is_core_needed_early(core->id.id))
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 46c282fff104..dd73e1ff1759 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -395,7 +395,7 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->flags & DEVFL_TKILL);
WARN_ON(d->gd);
WARN_ON(d->flags & DEVFL_UP);
- blk_queue_max_hw_sectors(q, 1024);
+ blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
q->backing_dev_info.name = "aoe";
q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
d->bufpool = mp;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 422b7d84f686..ad80c85e0857 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1110,7 +1110,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
d->ip.rq = NULL;
do {
bio = rq->bio;
- bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
+ bok = !fastfail && !bio->bi_error;
} while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
/* cf. http://lkml.org/lkml/2006/10/31/28 */
@@ -1172,7 +1172,7 @@ ktiocomplete(struct frame *f)
ahout->cmdstat, ahin->cmdstat,
d->aoemajor, d->aoeminor);
noskb: if (buf)
- clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
+ buf->bio->bi_error = -EIO;
goto out;
}
@@ -1185,7 +1185,7 @@ noskb: if (buf)
"aoe: runt data size in read from",
(long) d->aoemajor, d->aoeminor,
skb->len, n);
- clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
+ buf->bio->bi_error = -EIO;
break;
}
if (n > f->iter.bi_size) {
@@ -1193,7 +1193,7 @@ noskb: if (buf)
"aoe: too-large data size in read from",
(long) d->aoemajor, d->aoeminor,
n, f->iter.bi_size);
- clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
+ buf->bio->bi_error = -EIO;
break;
}
bvcpy(skb, f->buf->bio, f->iter, n);
@@ -1695,7 +1695,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf)
if (buf == NULL)
return;
buf->iter.bi_size = 0;
- clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
+ buf->bio->bi_error = -EIO;
if (buf->nframesout == 0)
aoe_end_buf(d, buf);
}
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index e774c50b6842..ffd1947500c6 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -170,7 +170,7 @@ aoe_failip(struct aoedev *d)
if (rq == NULL)
return;
while ((bio = d->ip.nxbio)) {
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ bio->bi_error = -EIO;
d->ip.nxbio = bio->bi_next;
n = (unsigned long) rq->special;
rq->special = (void *) --n;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 64ab4951e9d6..b9794aeeb878 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -331,14 +331,12 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
struct bio_vec bvec;
sector_t sector;
struct bvec_iter iter;
- int err = -EIO;
sector = bio->bi_iter.bi_sector;
if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
- goto out;
+ goto io_error;
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
- err = 0;
discard_from_brd(brd, sector, bio->bi_iter.bi_size);
goto out;
}
@@ -349,15 +347,20 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
+ int err;
+
err = brd_do_bvec(brd, bvec.bv_page, len,
bvec.bv_offset, rw, sector);
if (err)
- break;
+ goto io_error;
sector += len >> SECTOR_SHIFT;
}
out:
- bio_endio(bio, err);
+ bio_endio(bio);
+ return;
+io_error:
+ bio_io_error(bio);
}
static int brd_rw_page(struct block_device *bdev, sector_t sector,
@@ -371,7 +374,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
#ifdef CONFIG_BLK_DEV_RAM_DAX
static long brd_direct_access(struct block_device *bdev, sector_t sector,
- void **kaddr, unsigned long *pfn, long size)
+ void __pmem **kaddr, unsigned long *pfn)
{
struct brd_device *brd = bdev->bd_disk->private_data;
struct page *page;
@@ -381,13 +384,9 @@ static long brd_direct_access(struct block_device *bdev, sector_t sector,
page = brd_insert_page(brd, sector);
if (!page)
return -ENOSPC;
- *kaddr = page_address(page);
+ *kaddr = (void __pmem *)page_address(page);
*pfn = page_to_pfn(page);
- /*
- * TODO: If size > PAGE_SIZE, we could look to see if the next page in
- * the file happens to be mapped to the next page of physical RAM.
- */
return PAGE_SIZE;
}
#else
@@ -500,7 +499,7 @@ static struct brd_device *brd_alloc(int i)
blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
- brd->brd_queue->limits.max_discard_sectors = UINT_MAX;
+ blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX);
brd->brd_queue->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 1318e3217cb0..b3868e7a1ffd 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -175,11 +175,11 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */
device->md_io.submit_jif = jiffies;
if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
- bio_endio(bio, -EIO);
+ bio_io_error(bio);
else
submit_bio(rw, bio);
wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
- if (bio_flagged(bio, BIO_UPTODATE))
+ if (!bio->bi_error)
err = device->md_io.error;
out:
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 434c77dcc99e..e5e0f19ceda0 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -941,36 +941,27 @@ static void drbd_bm_aio_ctx_destroy(struct kref *kref)
}
/* bv_page may be a copy, or may be the original */
-static void drbd_bm_endio(struct bio *bio, int error)
+static void drbd_bm_endio(struct bio *bio)
{
struct drbd_bm_aio_ctx *ctx = bio->bi_private;
struct drbd_device *device = ctx->device;
struct drbd_bitmap *b = device->bitmap;
unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
- int uptodate = bio_flagged(bio, BIO_UPTODATE);
-
-
- /* strange behavior of some lower level drivers...
- * fail the request by clearing the uptodate flag,
- * but do not return any error?!
- * do we want to WARN() on this? */
- if (!error && !uptodate)
- error = -EIO;
if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
!bm_test_page_unchanged(b->bm_pages[idx]))
drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
- if (error) {
+ if (bio->bi_error) {
/* ctx error will hold the completed-last non-zero error code,
* in case error codes differ. */
- ctx->error = error;
+ ctx->error = bio->bi_error;
bm_set_page_io_err(b->bm_pages[idx]);
/* Not identical to on disk version of it.
* Is BM_PAGE_IO_ERROR enough? */
if (__ratelimit(&drbd_ratelimit_state))
drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
- error, idx);
+ bio->bi_error, idx);
} else {
bm_clear_page_io_err(b->bm_pages[idx]);
dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
@@ -1031,7 +1022,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
bio->bi_rw |= rw;
- bio_endio(bio, -EIO);
+ bio_io_error(bio);
} else {
submit_bio(rw, bio);
/* this should not count as user activity and cause the
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index efd19c2da9c2..015c6e91b756 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1450,7 +1450,6 @@ extern void do_submit(struct work_struct *ws);
extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
extern void drbd_make_request(struct request_queue *q, struct bio *bio);
extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
-extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
extern int is_valid_ar_handle(struct drbd_request *, sector_t);
@@ -1481,9 +1480,9 @@ extern int drbd_khelper(struct drbd_device *device, char *cmd);
/* drbd_worker.c */
/* bi_end_io handlers */
-extern void drbd_md_endio(struct bio *bio, int error);
-extern void drbd_peer_request_endio(struct bio *bio, int error);
-extern void drbd_request_endio(struct bio *bio, int error);
+extern void drbd_md_endio(struct bio *bio);
+extern void drbd_peer_request_endio(struct bio *bio);
+extern void drbd_request_endio(struct bio *bio);
extern int drbd_worker(struct drbd_thread *thi);
enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
void drbd_resync_after_changed(struct drbd_device *device);
@@ -1604,12 +1603,13 @@ static inline void drbd_generic_make_request(struct drbd_device *device,
__release(local);
if (!bio->bi_bdev) {
drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
- bio_endio(bio, -ENODEV);
+ bio->bi_error = -ENODEV;
+ bio_endio(bio);
return;
}
if (drbd_insert_fault(device, fault_type))
- bio_endio(bio, -EIO);
+ bio_io_error(bio);
else
generic_make_request(bio);
}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index a1518539b858..74d97f4bac34 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2774,7 +2774,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
This triggers a max_bio_size message upon first attach or connect */
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
- blk_queue_merge_bvec(q, drbd_merge_bvec);
q->queue_lock = &resource->req_lock;
device->md_io.page = alloc_page(GFP_KERNEL);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 74df8cfad414..e80cbefbc2b5 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1156,14 +1156,14 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
/* For now, don't allow more than one activity log extent worth of data
* to be discarded in one go. We may need to rework drbd_al_begin_io()
* to allow for even larger discard ranges */
- q->limits.max_discard_sectors = DRBD_MAX_DISCARD_SECTORS;
+ blk_queue_max_discard_sectors(q, DRBD_MAX_DISCARD_SECTORS);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
/* REALLY? Is stacking secdiscard "legal"? */
if (blk_queue_secdiscard(b))
queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
} else {
- q->limits.max_discard_sectors = 0;
+ blk_queue_max_discard_sectors(q, 0);
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
queue_flag_clear_unlocked(QUEUE_FLAG_SECDISCARD, q);
}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 3907202fb9d9..211592682169 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -201,7 +201,8 @@ void start_new_tl_epoch(struct drbd_connection *connection)
void complete_master_bio(struct drbd_device *device,
struct bio_and_error *m)
{
- bio_endio(m->bio, m->error);
+ m->bio->bi_error = m->error;
+ bio_endio(m->bio);
dec_ap_bio(device);
}
@@ -1153,12 +1154,12 @@ drbd_submit_req_private_bio(struct drbd_request *req)
rw == WRITE ? DRBD_FAULT_DT_WR
: rw == READ ? DRBD_FAULT_DT_RD
: DRBD_FAULT_DT_RA))
- bio_endio(bio, -EIO);
+ bio_io_error(bio);
else
generic_make_request(bio);
put_ldev(device);
} else
- bio_endio(bio, -EIO);
+ bio_io_error(bio);
}
static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
@@ -1191,7 +1192,8 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
/* only pass the error to the upper layers.
* if user cannot handle io errors, that's not our business. */
drbd_err(device, "could not kmalloc() req\n");
- bio_endio(bio, -ENOMEM);
+ bio->bi_error = -ENOMEM;
+ bio_endio(bio);
return ERR_PTR(-ENOMEM);
}
req->start_jif = start_jif;
@@ -1497,6 +1499,8 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
struct drbd_device *device = (struct drbd_device *) q->queuedata;
unsigned long start_jif;
+ blk_queue_split(q, &bio, q->bio_split);
+
start_jif = jiffies;
/*
@@ -1508,41 +1512,6 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
__drbd_make_request(device, bio, start_jif);
}
-/* This is called by bio_add_page().
- *
- * q->max_hw_sectors and other global limits are already enforced there.
- *
- * We need to call down to our lower level device,
- * in case it has special restrictions.
- *
- * We also may need to enforce configured max-bio-bvecs limits.
- *
- * As long as the BIO is empty we have to allow at least one bvec,
- * regardless of size and offset, so no need to ask lower levels.
- */
-int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec)
-{
- struct drbd_device *device = (struct drbd_device *) q->queuedata;
- unsigned int bio_size = bvm->bi_size;
- int limit = DRBD_MAX_BIO_SIZE;
- int backing_limit;
-
- if (bio_size && get_ldev(device)) {
- unsigned int max_hw_sectors = queue_max_hw_sectors(q);
- struct request_queue * const b =
- device->ldev->backing_bdev->bd_disk->queue;
- if (b->merge_bvec_fn) {
- bvm->bi_bdev = device->ldev->backing_bdev;
- backing_limit = b->merge_bvec_fn(b, bvm, bvec);
- limit = min(limit, backing_limit);
- }
- put_ldev(device);
- if ((limit >> 9) > max_hw_sectors)
- limit = max_hw_sectors << 9;
- }
- return limit;
-}
-
void request_timer_fn(unsigned long data)
{
struct drbd_device *device = (struct drbd_device *) data;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index d0fae55d871d..5578c1477ba6 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -65,12 +65,12 @@ rwlock_t global_state_lock;
/* used for synchronous meta data and bitmap IO
* submitted by drbd_md_sync_page_io()
*/
-void drbd_md_endio(struct bio *bio, int error)
+void drbd_md_endio(struct bio *bio)
{
struct drbd_device *device;
device = bio->bi_private;
- device->md_io.error = error;
+ device->md_io.error = bio->bi_error;
/* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
* to timeout on the lower level device, and eventually detach from it.
@@ -170,31 +170,20 @@ void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(l
/* writes on behalf of the partner, or resync writes,
* "submitted" by the receiver.
*/
-void drbd_peer_request_endio(struct bio *bio, int error)
+void drbd_peer_request_endio(struct bio *bio)
{
struct drbd_peer_request *peer_req = bio->bi_private;
struct drbd_device *device = peer_req->peer_device->device;
- int uptodate = bio_flagged(bio, BIO_UPTODATE);
int is_write = bio_data_dir(bio) == WRITE;
int is_discard = !!(bio->bi_rw & REQ_DISCARD);
- if (error && __ratelimit(&drbd_ratelimit_state))
+ if (bio->bi_error && __ratelimit(&drbd_ratelimit_state))
drbd_warn(device, "%s: error=%d s=%llus\n",
is_write ? (is_discard ? "discard" : "write")
- : "read", error,
+ : "read", bio->bi_error,
(unsigned long long)peer_req->i.sector);
- if (!error && !uptodate) {
- if (__ratelimit(&drbd_ratelimit_state))
- drbd_warn(device, "%s: setting error to -EIO s=%llus\n",
- is_write ? "write" : "read",
- (unsigned long long)peer_req->i.sector);
- /* strange behavior of some lower level drivers...
- * fail the request by clearing the uptodate flag,
- * but do not return any error?! */
- error = -EIO;
- }
- if (error)
+ if (bio->bi_error)
set_bit(__EE_WAS_ERROR, &peer_req->flags);
bio_put(bio); /* no need for the bio anymore */
@@ -208,24 +197,13 @@ void drbd_peer_request_endio(struct bio *bio, int error)
/* read, readA or write requests on R_PRIMARY coming from drbd_make_request
*/
-void drbd_request_endio(struct bio *bio, int error)
+void drbd_request_endio(struct bio *bio)
{
unsigned long flags;
struct drbd_request *req = bio->bi_private;
struct drbd_device *device = req->device;
struct bio_and_error m;
enum drbd_req_event what;
- int uptodate = bio_flagged(bio, BIO_UPTODATE);
-
- if (!error && !uptodate) {
- drbd_warn(device, "p %s: setting error to -EIO\n",
- bio_data_dir(bio) == WRITE ? "write" : "read");
- /* strange behavior of some lower level drivers...
- * fail the request by clearing the uptodate flag,
- * but do not return any error?! */
- error = -EIO;
- }
-
/* If this request was aborted locally before,
* but now was completed "successfully",
@@ -259,14 +237,14 @@ void drbd_request_endio(struct bio *bio, int error)
if (__ratelimit(&drbd_ratelimit_state))
drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
- if (!error)
+ if (!bio->bi_error)
panic("possible random memory corruption caused by delayed completion of aborted local request\n");
}
/* to avoid recursion in __req_mod */
- if (unlikely(error)) {
+ if (unlikely(bio->bi_error)) {
if (bio->bi_rw & REQ_DISCARD)
- what = (error == -EOPNOTSUPP)
+ what = (bio->bi_error == -EOPNOTSUPP)
? DISCARD_COMPLETED_NOTSUPP
: DISCARD_COMPLETED_WITH_ERROR;
else
@@ -279,7 +257,7 @@ void drbd_request_endio(struct bio *bio, int error)
what = COMPLETED_OK;
bio_put(req->private_bio);
- req->private_bio = ERR_PTR(error);
+ req->private_bio = ERR_PTR(bio->bi_error);
/* not req_mod(), we need irqsave here! */
spin_lock_irqsave(&device->resource->req_lock, flags);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index a08cda955285..331363e7de0f 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3771,13 +3771,14 @@ struct rb0_cbdata {
struct completion complete;
};
-static void floppy_rb0_cb(struct bio *bio, int err)
+static void floppy_rb0_cb(struct bio *bio)
{
struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
int drive = cbdata->drive;
- if (err) {
- pr_info("floppy: error %d while reading block 0\n", err);
+ if (bio->bi_error) {
+ pr_info("floppy: error %d while reading block 0\n",
+ bio->bi_error);
set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
}
complete(&cbdata->complete);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f7a4c9d7f721..f9889b6bc02c 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -675,7 +675,7 @@ static void loop_config_discard(struct loop_device *lo)
lo->lo_encrypt_key_size) {
q->limits.discard_granularity = 0;
q->limits.discard_alignment = 0;
- q->limits.max_discard_sectors = 0;
+ blk_queue_max_discard_sectors(q, 0);
q->limits.discard_zeroes_data = 0;
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
return;
@@ -683,7 +683,7 @@ static void loop_config_discard(struct loop_device *lo)
q->limits.discard_granularity = inode->i_sb->s_blocksize;
q->limits.discard_alignment = 0;
- q->limits.max_discard_sectors = UINT_MAX >> 9;
+ blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
q->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
}
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 4a2ef09e6704..f504232c1ee7 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3756,6 +3756,14 @@ static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
+ /*
+ * For flush requests, request_idx starts at the end of the
+ * tag space. Since we don't support FLUSH/FUA, simply return
+ * 0 as there's nothing to be done.
+ */
+ if (request_idx >= MTIP_MAX_COMMAND_SLOTS)
+ return 0;
+
cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
&cmd->command_dma, GFP_KERNEL);
if (!cmd->command)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 0e385d8e9b86..293495a75d3d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -33,6 +33,7 @@
#include <linux/net.h>
#include <linux/kthread.h>
#include <linux/types.h>
+#include <linux/debugfs.h>
#include <asm/uaccess.h>
#include <asm/types.h>
@@ -40,8 +41,7 @@
#include <linux/nbd.h>
struct nbd_device {
- int flags;
- int harderror; /* Code of hard error */
+ u32 flags;
struct socket * sock; /* If == NULL, device is not ready, yet */
int magic;
@@ -56,11 +56,24 @@ struct nbd_device {
struct gendisk *disk;
int blksize;
loff_t bytesize;
- pid_t pid; /* pid of nbd-client, if attached */
int xmit_timeout;
- int disconnect; /* a disconnect has been requested by user */
+ bool disconnect; /* a disconnect has been requested by user */
+
+ struct timer_list timeout_timer;
+ struct task_struct *task_recv;
+ struct task_struct *task_send;
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ struct dentry *dbg_dir;
+#endif
};
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static struct dentry *nbd_dbg_dir;
+#endif
+
+#define nbd_name(nbd) ((nbd)->disk->disk_name)
+
#define NBD_MAGIC 0x68797548
static unsigned int nbds_max = 16;
@@ -113,26 +126,36 @@ static void nbd_end_request(struct nbd_device *nbd, struct request *req)
/*
* Forcibly shutdown the socket causing all listeners to error
*/
-static void sock_shutdown(struct nbd_device *nbd, int lock)
+static void sock_shutdown(struct nbd_device *nbd)
{
- if (lock)
- mutex_lock(&nbd->tx_lock);
- if (nbd->sock) {
- dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
- kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
- nbd->sock = NULL;
- }
- if (lock)
- mutex_unlock(&nbd->tx_lock);
+ if (!nbd->sock)
+ return;
+
+ dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
+ kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
+ nbd->sock = NULL;
+ del_timer_sync(&nbd->timeout_timer);
}
static void nbd_xmit_timeout(unsigned long arg)
{
- struct task_struct *task = (struct task_struct *)arg;
+ struct nbd_device *nbd = (struct nbd_device *)arg;
+ struct task_struct *task;
+
+ if (list_empty(&nbd->queue_head))
+ return;
+
+ nbd->disconnect = true;
+
+ task = READ_ONCE(nbd->task_recv);
+ if (task)
+ force_sig(SIGKILL, task);
- printk(KERN_WARNING "nbd: killing hung xmit (%s, pid: %d)\n",
- task->comm, task->pid);
- force_sig(SIGKILL, task);
+ task = READ_ONCE(nbd->task_send);
+ if (task)
+ force_sig(SIGKILL, nbd->task_send);
+
+ dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n");
}
/*
@@ -171,33 +194,12 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
msg.msg_controllen = 0;
msg.msg_flags = msg_flags | MSG_NOSIGNAL;
- if (send) {
- struct timer_list ti;
-
- if (nbd->xmit_timeout) {
- init_timer(&ti);
- ti.function = nbd_xmit_timeout;
- ti.data = (unsigned long)current;
- ti.expires = jiffies + nbd->xmit_timeout;
- add_timer(&ti);
- }
+ if (send)
result = kernel_sendmsg(sock, &msg, &iov, 1, size);
- if (nbd->xmit_timeout)
- del_timer_sync(&ti);
- } else
+ else
result = kernel_recvmsg(sock, &msg, &iov, 1, size,
msg.msg_flags);
- if (signal_pending(current)) {
- siginfo_t info;
- printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n",
- task_pid_nr(current), current->comm,
- dequeue_signal_lock(current, &current->blocked, &info));
- result = -EINTR;
- sock_shutdown(nbd, !send);
- break;
- }
-
if (result <= 0) {
if (result == 0)
result = -EPIPE; /* short read */
@@ -210,6 +212,9 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
sigprocmask(SIG_SETMASK, &oldset, NULL);
tsk_restore_flags(current, pflags, PF_MEMALLOC);
+ if (!send && nbd->xmit_timeout)
+ mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
+
return result;
}
@@ -333,26 +338,24 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk),
"Receive control failed (result %d)\n", result);
- goto harderror;
+ return ERR_PTR(result);
}
if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
(unsigned long)ntohl(reply.magic));
- result = -EPROTO;
- goto harderror;
+ return ERR_PTR(-EPROTO);
}
req = nbd_find_request(nbd, *(struct request **)reply.handle);
if (IS_ERR(req)) {
result = PTR_ERR(req);
if (result != -ENOENT)
- goto harderror;
+ return ERR_PTR(result);
dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n",
reply.handle);
- result = -EBADR;
- goto harderror;
+ return ERR_PTR(-EBADR);
}
if (ntohl(reply.error)) {
@@ -380,18 +383,15 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
}
}
return req;
-harderror:
- nbd->harderror = result;
- return NULL;
}
static ssize_t pid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
+ struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
- return sprintf(buf, "%ld\n",
- (long) ((struct nbd_device *)disk->private_data)->pid);
+ return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
}
static struct device_attribute pid_attr = {
@@ -399,7 +399,7 @@ static struct device_attribute pid_attr = {
.show = pid_show,
};
-static int nbd_do_it(struct nbd_device *nbd)
+static int nbd_thread_recv(struct nbd_device *nbd)
{
struct request *req;
int ret;
@@ -407,20 +407,43 @@ static int nbd_do_it(struct nbd_device *nbd)
BUG_ON(nbd->magic != NBD_MAGIC);
sk_set_memalloc(nbd->sock->sk);
- nbd->pid = task_pid_nr(current);
+
+ nbd->task_recv = current;
+
ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
if (ret) {
dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
- nbd->pid = 0;
+ nbd->task_recv = NULL;
return ret;
}
- while ((req = nbd_read_stat(nbd)) != NULL)
+ while (1) {
+ req = nbd_read_stat(nbd);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
+ break;
+ }
+
nbd_end_request(nbd, req);
+ }
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
- nbd->pid = 0;
- return 0;
+
+ nbd->task_recv = NULL;
+
+ if (signal_pending(current)) {
+ siginfo_t info;
+
+ ret = dequeue_signal_lock(current, &current->blocked, &info);
+ dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n",
+ task_pid_nr(current), current->comm, ret);
+ mutex_lock(&nbd->tx_lock);
+ sock_shutdown(nbd);
+ mutex_unlock(&nbd->tx_lock);
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
}
static void nbd_clear_que(struct nbd_device *nbd)
@@ -455,6 +478,7 @@ static void nbd_clear_que(struct nbd_device *nbd)
req->errors++;
nbd_end_request(nbd, req);
}
+ dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
}
@@ -482,6 +506,9 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
nbd->active_req = req;
+ if (nbd->xmit_timeout && list_empty_careful(&nbd->queue_head))
+ mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
+
if (nbd_send_req(nbd, req) != 0) {
dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
req->errors++;
@@ -503,11 +530,13 @@ error_out:
nbd_end_request(nbd, req);
}
-static int nbd_thread(void *data)
+static int nbd_thread_send(void *data)
{
struct nbd_device *nbd = data;
struct request *req;
+ nbd->task_send = current;
+
set_user_nice(current, MIN_NICE);
while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
/* wait for something to do */
@@ -515,6 +544,20 @@ static int nbd_thread(void *data)
kthread_should_stop() ||
!list_empty(&nbd->waiting_queue));
+ if (signal_pending(current)) {
+ siginfo_t info;
+ int ret;
+
+ ret = dequeue_signal_lock(current, &current->blocked,
+ &info);
+ dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n",
+ task_pid_nr(current), current->comm, ret);
+ mutex_lock(&nbd->tx_lock);
+ sock_shutdown(nbd);
+ mutex_unlock(&nbd->tx_lock);
+ break;
+ }
+
/* extract request */
if (list_empty(&nbd->waiting_queue))
continue;
@@ -528,6 +571,9 @@ static int nbd_thread(void *data)
/* handle request */
nbd_handle_req(nbd, req);
}
+
+ nbd->task_send = NULL;
+
return 0;
}
@@ -538,7 +584,7 @@ static int nbd_thread(void *data)
* { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
*/
-static void do_nbd_request(struct request_queue *q)
+static void nbd_request_handler(struct request_queue *q)
__releases(q->queue_lock) __acquires(q->queue_lock)
{
struct request *req;
@@ -574,6 +620,9 @@ static void do_nbd_request(struct request_queue *q)
}
}
+static int nbd_dev_dbg_init(struct nbd_device *nbd);
+static void nbd_dev_dbg_close(struct nbd_device *nbd);
+
/* Must be called with tx_lock held */
static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
@@ -597,7 +646,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
if (!nbd->sock)
return -EINVAL;
- nbd->disconnect = 1;
+ nbd->disconnect = true;
nbd_send_req(nbd, &sreq);
return 0;
@@ -625,7 +674,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
nbd->sock = sock;
if (max_part > 0)
bdev->bd_invalidated = 1;
- nbd->disconnect = 0; /* we're connected now */
+ nbd->disconnect = false; /* we're connected now */
return 0;
}
return -EINVAL;
@@ -648,6 +697,12 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
case NBD_SET_TIMEOUT:
nbd->xmit_timeout = arg * HZ;
+ if (arg)
+ mod_timer(&nbd->timeout_timer,
+ jiffies + nbd->xmit_timeout);
+ else
+ del_timer_sync(&nbd->timeout_timer);
+
return 0;
case NBD_SET_FLAGS:
@@ -666,7 +721,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
struct socket *sock;
int error;
- if (nbd->pid)
+ if (nbd->task_recv)
return -EBUSY;
if (!nbd->sock)
return -EINVAL;
@@ -683,24 +738,24 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
else
blk_queue_flush(nbd->disk->queue, 0);
- thread = kthread_run(nbd_thread, nbd, "%s",
- nbd->disk->disk_name);
+ thread = kthread_run(nbd_thread_send, nbd, "%s",
+ nbd_name(nbd));
if (IS_ERR(thread)) {
mutex_lock(&nbd->tx_lock);
return PTR_ERR(thread);
}
- error = nbd_do_it(nbd);
+ nbd_dev_dbg_init(nbd);
+ error = nbd_thread_recv(nbd);
+ nbd_dev_dbg_close(nbd);
kthread_stop(thread);
mutex_lock(&nbd->tx_lock);
- if (error)
- return error;
- sock_shutdown(nbd, 0);
+
+ sock_shutdown(nbd);
sock = nbd->sock;
nbd->sock = NULL;
nbd_clear_que(nbd);
- dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
kill_bdev(bdev);
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
set_device_ro(bdev, false);
@@ -714,7 +769,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
blkdev_reread_part(bdev);
if (nbd->disconnect) /* user requested, ignore socket errors */
return 0;
- return nbd->harderror;
+ return error;
}
case NBD_CLEAR_QUE:
@@ -758,6 +813,161 @@ static const struct block_device_operations nbd_fops =
.ioctl = nbd_ioctl,
};
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+
+static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
+{
+ struct nbd_device *nbd = s->private;
+
+ if (nbd->task_recv)
+ seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
+ if (nbd->task_send)
+ seq_printf(s, "send: %d\n", task_pid_nr(nbd->task_send));
+
+ return 0;
+}
+
+static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nbd_dbg_tasks_show, inode->i_private);
+}
+
+static const struct file_operations nbd_dbg_tasks_ops = {
+ .open = nbd_dbg_tasks_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
+{
+ struct nbd_device *nbd = s->private;
+ u32 flags = nbd->flags;
+
+ seq_printf(s, "Hex: 0x%08x\n\n", flags);
+
+ seq_puts(s, "Known flags:\n");
+
+ if (flags & NBD_FLAG_HAS_FLAGS)
+ seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
+ if (flags & NBD_FLAG_READ_ONLY)
+ seq_puts(s, "NBD_FLAG_READ_ONLY\n");
+ if (flags & NBD_FLAG_SEND_FLUSH)
+ seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
+ if (flags & NBD_FLAG_SEND_TRIM)
+ seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
+
+ return 0;
+}
+
+static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nbd_dbg_flags_show, inode->i_private);
+}
+
+static const struct file_operations nbd_dbg_flags_ops = {
+ .open = nbd_dbg_flags_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int nbd_dev_dbg_init(struct nbd_device *nbd)
+{
+ struct dentry *dir;
+ struct dentry *f;
+
+ dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
+ if (IS_ERR_OR_NULL(dir)) {
+ dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s' (%ld)\n",
+ nbd_name(nbd), PTR_ERR(dir));
+ return PTR_ERR(dir);
+ }
+ nbd->dbg_dir = dir;
+
+ f = debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
+ if (IS_ERR_OR_NULL(f)) {
+ dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'tasks', %ld\n",
+ PTR_ERR(f));
+ return PTR_ERR(f);
+ }
+
+ f = debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
+ if (IS_ERR_OR_NULL(f)) {
+ dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'size_bytes', %ld\n",
+ PTR_ERR(f));
+ return PTR_ERR(f);
+ }
+
+ f = debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
+ if (IS_ERR_OR_NULL(f)) {
+ dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'timeout', %ld\n",
+ PTR_ERR(f));
+ return PTR_ERR(f);
+ }
+
+ f = debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
+ if (IS_ERR_OR_NULL(f)) {
+ dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'blocksize', %ld\n",
+ PTR_ERR(f));
+ return PTR_ERR(f);
+ }
+
+ f = debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops);
+ if (IS_ERR_OR_NULL(f)) {
+ dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'flags', %ld\n",
+ PTR_ERR(f));
+ return PTR_ERR(f);
+ }
+
+ return 0;
+}
+
+static void nbd_dev_dbg_close(struct nbd_device *nbd)
+{
+ debugfs_remove_recursive(nbd->dbg_dir);
+}
+
+static int nbd_dbg_init(void)
+{
+ struct dentry *dbg_dir;
+
+ dbg_dir = debugfs_create_dir("nbd", NULL);
+ if (IS_ERR(dbg_dir))
+ return PTR_ERR(dbg_dir);
+
+ nbd_dbg_dir = dbg_dir;
+
+ return 0;
+}
+
+static void nbd_dbg_close(void)
+{
+ debugfs_remove_recursive(nbd_dbg_dir);
+}
+
+#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
+
+static int nbd_dev_dbg_init(struct nbd_device *nbd)
+{
+ return 0;
+}
+
+static void nbd_dev_dbg_close(struct nbd_device *nbd)
+{
+}
+
+static int nbd_dbg_init(void)
+{
+ return 0;
+}
+
+static void nbd_dbg_close(void)
+{
+}
+
+#endif
+
/*
* And here should be modules and kernel interface
* (Just smiley confuses emacs :-)
@@ -811,7 +1021,7 @@ static int __init nbd_init(void)
* every gendisk to have its very own request_queue struct.
* These structs are big so we dynamically allocate them.
*/
- disk->queue = blk_init_queue(do_nbd_request, &nbd_lock);
+ disk->queue = blk_init_queue(nbd_request_handler, &nbd_lock);
if (!disk->queue) {
put_disk(disk);
goto out;
@@ -822,7 +1032,7 @@ static int __init nbd_init(void)
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
disk->queue->limits.discard_granularity = 512;
- disk->queue->limits.max_discard_sectors = UINT_MAX;
+ blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
disk->queue->limits.discard_zeroes_data = 0;
blk_queue_max_hw_sectors(disk->queue, 65536);
disk->queue->limits.max_sectors = 256;
@@ -835,6 +1045,8 @@ static int __init nbd_init(void)
printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
+ nbd_dbg_init();
+
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = nbd_dev[i].disk;
nbd_dev[i].magic = NBD_MAGIC;
@@ -842,6 +1054,9 @@ static int __init nbd_init(void)
spin_lock_init(&nbd_dev[i].queue_lock);
INIT_LIST_HEAD(&nbd_dev[i].queue_head);
mutex_init(&nbd_dev[i].tx_lock);
+ init_timer(&nbd_dev[i].timeout_timer);
+ nbd_dev[i].timeout_timer.function = nbd_xmit_timeout;
+ nbd_dev[i].timeout_timer.data = (unsigned long)&nbd_dev[i];
init_waitqueue_head(&nbd_dev[i].active_wq);
init_waitqueue_head(&nbd_dev[i].waiting_wq);
nbd_dev[i].blksize = 1024;
@@ -868,6 +1083,9 @@ out:
static void __exit nbd_cleanup(void)
{
int i;
+
+ nbd_dbg_close();
+
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = nbd_dev[i].disk;
nbd_dev[i].magic = 0;
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 69de41a87b74..17269a3b85f2 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -222,7 +222,7 @@ static void end_cmd(struct nullb_cmd *cmd)
blk_end_request_all(cmd->rq, 0);
break;
case NULL_Q_BIO:
- bio_endio(cmd->bio, 0);
+ bio_endio(cmd->bio);
break;
}
@@ -240,19 +240,19 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
while ((entry = llist_del_all(&cq->list)) != NULL) {
entry = llist_reverse_order(entry);
do {
+ struct request_queue *q = NULL;
+
cmd = container_of(entry, struct nullb_cmd, ll_list);
entry = entry->next;
+ if (cmd->rq)
+ q = cmd->rq->q;
end_cmd(cmd);
- if (cmd->rq) {
- struct request_queue *q = cmd->rq->q;
-
- if (!q->mq_ops && blk_queue_stopped(q)) {
- spin_lock(q->queue_lock);
- if (blk_queue_stopped(q))
- blk_start_queue(q);
- spin_unlock(q->queue_lock);
- }
+ if (q && !q->mq_ops && blk_queue_stopped(q)) {
+ spin_lock(q->queue_lock);
+ if (blk_queue_stopped(q))
+ blk_start_queue(q);
+ spin_unlock(q->queue_lock);
}
} while (entry);
}
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index d1d6141920d3..b97fc3fe0916 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -72,6 +72,10 @@ module_param(nvme_char_major, int, 0);
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);
+static bool use_cmb_sqes = true;
+module_param(use_cmb_sqes, bool, 0644);
+MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
+
static DEFINE_SPINLOCK(dev_list_lock);
static LIST_HEAD(dev_list);
static struct task_struct *nvme_thread;
@@ -103,6 +107,7 @@ struct nvme_queue {
char irqname[24]; /* nvme4294967295-65535\0 */
spinlock_t q_lock;
struct nvme_command *sq_cmds;
+ struct nvme_command __iomem *sq_cmds_io;
volatile struct nvme_completion *cqes;
struct blk_mq_tags **tags;
dma_addr_t sq_dma_addr;
@@ -379,27 +384,28 @@ static void *nvme_finish_cmd(struct nvme_queue *nvmeq, int tag,
*
* Safe to use from interrupt context
*/
-static int __nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
+static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
+ struct nvme_command *cmd)
{
u16 tail = nvmeq->sq_tail;
- memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
+ if (nvmeq->sq_cmds_io)
+ memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd));
+ else
+ memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
+
if (++tail == nvmeq->q_depth)
tail = 0;
writel(tail, nvmeq->q_db);
nvmeq->sq_tail = tail;
-
- return 0;
}
-static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
+static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
{
unsigned long flags;
- int ret;
spin_lock_irqsave(&nvmeq->q_lock, flags);
- ret = __nvme_submit_cmd(nvmeq, cmd);
+ __nvme_submit_cmd(nvmeq, cmd);
spin_unlock_irqrestore(&nvmeq->q_lock, flags);
- return ret;
}
static __le64 **iod_list(struct nvme_iod *iod)
@@ -730,18 +736,16 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
static void nvme_submit_priv(struct nvme_queue *nvmeq, struct request *req,
struct nvme_iod *iod)
{
- struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+ struct nvme_command cmnd;
- memcpy(cmnd, req->cmd, sizeof(struct nvme_command));
- cmnd->rw.command_id = req->tag;
+ memcpy(&cmnd, req->cmd, sizeof(cmnd));
+ cmnd.rw.command_id = req->tag;
if (req->nr_phys_segments) {
- cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
- cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
+ cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd.rw.prp2 = cpu_to_le64(iod->first_dma);
}
- if (++nvmeq->sq_tail == nvmeq->q_depth)
- nvmeq->sq_tail = 0;
- writel(nvmeq->sq_tail, nvmeq->q_db);
+ __nvme_submit_cmd(nvmeq, &cmnd);
}
/*
@@ -754,45 +758,41 @@ static void nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
{
struct nvme_dsm_range *range =
(struct nvme_dsm_range *)iod_list(iod)[0];
- struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+ struct nvme_command cmnd;
range->cattr = cpu_to_le32(0);
range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift);
range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
- memset(cmnd, 0, sizeof(*cmnd));
- cmnd->dsm.opcode = nvme_cmd_dsm;
- cmnd->dsm.command_id = req->tag;
- cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
- cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
- cmnd->dsm.nr = 0;
- cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
+ memset(&cmnd, 0, sizeof(cmnd));
+ cmnd.dsm.opcode = nvme_cmd_dsm;
+ cmnd.dsm.command_id = req->tag;
+ cmnd.dsm.nsid = cpu_to_le32(ns->ns_id);
+ cmnd.dsm.prp1 = cpu_to_le64(iod->first_dma);
+ cmnd.dsm.nr = 0;
+ cmnd.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
- if (++nvmeq->sq_tail == nvmeq->q_depth)
- nvmeq->sq_tail = 0;
- writel(nvmeq->sq_tail, nvmeq->q_db);
+ __nvme_submit_cmd(nvmeq, &cmnd);
}
static void nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
int cmdid)
{
- struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+ struct nvme_command cmnd;
- memset(cmnd, 0, sizeof(*cmnd));
- cmnd->common.opcode = nvme_cmd_flush;
- cmnd->common.command_id = cmdid;
- cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+ memset(&cmnd, 0, sizeof(cmnd));
+ cmnd.common.opcode = nvme_cmd_flush;
+ cmnd.common.command_id = cmdid;
+ cmnd.common.nsid = cpu_to_le32(ns->ns_id);
- if (++nvmeq->sq_tail == nvmeq->q_depth)
- nvmeq->sq_tail = 0;
- writel(nvmeq->sq_tail, nvmeq->q_db);
+ __nvme_submit_cmd(nvmeq, &cmnd);
}
static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
struct nvme_ns *ns)
{
struct request *req = iod_get_private(iod);
- struct nvme_command *cmnd;
+ struct nvme_command cmnd;
u16 control = 0;
u32 dsmgmt = 0;
@@ -804,19 +804,16 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
if (req->cmd_flags & REQ_RAHEAD)
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
- cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
- memset(cmnd, 0, sizeof(*cmnd));
+ memset(&cmnd, 0, sizeof(cmnd));
+ cmnd.rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
+ cmnd.rw.command_id = req->tag;
+ cmnd.rw.nsid = cpu_to_le32(ns->ns_id);
+ cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd.rw.prp2 = cpu_to_le64(iod->first_dma);
+ cmnd.rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+ cmnd.rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
- cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
- cmnd->rw.command_id = req->tag;
- cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
- cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
- cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
- cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
- cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
-
- if (blk_integrity_rq(req)) {
- cmnd->rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg));
+ if (ns->ms) {
switch (ns->pi_type) {
case NVME_NS_DPS_PI_TYPE3:
control |= NVME_RW_PRINFO_PRCHK_GUARD;
@@ -825,19 +822,21 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
case NVME_NS_DPS_PI_TYPE2:
control |= NVME_RW_PRINFO_PRCHK_GUARD |
NVME_RW_PRINFO_PRCHK_REF;
- cmnd->rw.reftag = cpu_to_le32(
+ cmnd.rw.reftag = cpu_to_le32(
nvme_block_nr(ns, blk_rq_pos(req)));
break;
}
- } else if (ns->ms)
- control |= NVME_RW_PRINFO_PRACT;
+ if (blk_integrity_rq(req))
+ cmnd.rw.metadata =
+ cpu_to_le64(sg_dma_address(iod->meta_sg));
+ else
+ control |= NVME_RW_PRINFO_PRACT;
+ }
- cmnd->rw.control = cpu_to_le16(control);
- cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+ cmnd.rw.control = cpu_to_le16(control);
+ cmnd.rw.dsmgmt = cpu_to_le32(dsmgmt);
- if (++nvmeq->sq_tail == nvmeq->q_depth)
- nvmeq->sq_tail = 0;
- writel(nvmeq->sq_tail, nvmeq->q_db);
+ __nvme_submit_cmd(nvmeq, &cmnd);
return 0;
}
@@ -1080,7 +1079,8 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
c.common.command_id = req->tag;
blk_mq_free_request(req);
- return __nvme_submit_cmd(nvmeq, &c);
+ __nvme_submit_cmd(nvmeq, &c);
+ return 0;
}
static int nvme_submit_admin_async_cmd(struct nvme_dev *dev,
@@ -1103,7 +1103,8 @@ static int nvme_submit_admin_async_cmd(struct nvme_dev *dev,
cmd->common.command_id = req->tag;
- return nvme_submit_cmd(nvmeq, cmd);
+ nvme_submit_cmd(nvmeq, cmd);
+ return 0;
}
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
@@ -1315,12 +1316,7 @@ static void nvme_abort_req(struct request *req)
dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag,
nvmeq->qid);
- if (nvme_submit_cmd(dev->queues[0], &cmd) < 0) {
- dev_warn(nvmeq->q_dmadev,
- "Could not abort I/O %d QID %d",
- req->tag, nvmeq->qid);
- blk_mq_free_request(abort_req);
- }
+ nvme_submit_cmd(dev->queues[0], &cmd);
}
static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved)
@@ -1374,7 +1370,8 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
{
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
- dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+ if (nvmeq->sq_cmds)
+ dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
kfree(nvmeq);
}
@@ -1447,6 +1444,47 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
spin_unlock_irq(&nvmeq->q_lock);
}
+static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
+ int entry_size)
+{
+ int q_depth = dev->q_depth;
+ unsigned q_size_aligned = roundup(q_depth * entry_size, dev->page_size);
+
+ if (q_size_aligned * nr_io_queues > dev->cmb_size) {
+ u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
+ mem_per_q = round_down(mem_per_q, dev->page_size);
+ q_depth = div_u64(mem_per_q, entry_size);
+
+ /*
+ * Ensure the reduced q_depth is above some threshold where it
+ * would be better to map queues in system memory with the
+ * original depth
+ */
+ if (q_depth < 64)
+ return -ENOMEM;
+ }
+
+ return q_depth;
+}
+
+static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
+ int qid, int depth)
+{
+ if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
+ unsigned offset = (qid - 1) *
+ roundup(SQ_SIZE(depth), dev->page_size);
+ nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
+ nvmeq->sq_cmds_io = dev->cmb + offset;
+ } else {
+ nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
+ &nvmeq->sq_dma_addr, GFP_KERNEL);
+ if (!nvmeq->sq_cmds)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
int depth)
{
@@ -1459,9 +1497,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
if (!nvmeq->cqes)
goto free_nvmeq;
- nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
- &nvmeq->sq_dma_addr, GFP_KERNEL);
- if (!nvmeq->sq_cmds)
+ if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
goto free_cqdma;
nvmeq->q_dmadev = dev->dev;
@@ -1696,6 +1732,12 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
page_shift = dev_page_max;
}
+ dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ?
+ NVME_CAP_NSSRC(cap) : 0;
+
+ if (dev->subsystem && (readl(&dev->bar->csts) & NVME_CSTS_NSSRO))
+ writel(NVME_CSTS_NSSRO, &dev->bar->csts);
+
result = nvme_disable_ctrl(dev, cap);
if (result < 0)
return result;
@@ -1856,6 +1898,15 @@ static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
return status;
}
+static int nvme_subsys_reset(struct nvme_dev *dev)
+{
+ if (!dev->subsystem)
+ return -ENOTTY;
+
+ writel(0x4E564D65, &dev->bar->nssr); /* "NVMe" */
+ return 0;
+}
+
static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
unsigned long arg)
{
@@ -1935,7 +1986,7 @@ static void nvme_config_discard(struct nvme_ns *ns)
ns->queue->limits.discard_zeroes_data = 0;
ns->queue->limits.discard_alignment = logical_block_size;
ns->queue->limits.discard_granularity = logical_block_size;
- ns->queue->limits.max_discard_sectors = 0xffffffff;
+ blk_queue_max_discard_sectors(ns->queue, 0xffffffff);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
}
@@ -1989,7 +2040,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
!ns->ext)
nvme_init_integrity(ns);
- if (ns->ms && !blk_get_integrity(disk))
+ if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
set_capacity(disk, 0);
else
set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
@@ -2020,7 +2071,10 @@ static int nvme_kthread(void *data)
spin_lock(&dev_list_lock);
list_for_each_entry_safe(dev, next, &dev_list, node) {
int i;
- if (readl(&dev->bar->csts) & NVME_CSTS_CFS) {
+ u32 csts = readl(&dev->bar->csts);
+
+ if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) ||
+ csts & NVME_CSTS_CFS) {
if (work_busy(&dev->reset_work))
continue;
list_del_init(&dev->node);
@@ -2067,7 +2121,6 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
goto out_free_ns;
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
- queue_flag_set_unlocked(QUEUE_FLAG_SG_GAPS, ns->queue);
ns->dev = dev;
ns->queue->queuedata = ns;
@@ -2081,12 +2134,16 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
list_add_tail(&ns->list, &dev->namespaces);
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
- if (dev->max_hw_sectors)
+ if (dev->max_hw_sectors) {
blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
+ blk_queue_max_segments(ns->queue,
+ ((dev->max_hw_sectors << 9) / dev->page_size) + 1);
+ }
if (dev->stripe_size)
blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9);
if (dev->vwc & NVME_CTRL_VWC_PRESENT)
blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
+ blk_queue_virt_boundary(ns->queue, dev->page_size - 1);
disk->major = nvme_major;
disk->first_minor = 0;
@@ -2108,8 +2165,17 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
goto out_free_disk;
add_disk(ns->disk);
- if (ns->ms)
- revalidate_disk(ns->disk);
+ if (ns->ms) {
+ struct block_device *bd = bdget_disk(ns->disk, 0);
+ if (!bd)
+ return;
+ if (blkdev_get(bd, FMODE_READ, NULL)) {
+ bdput(bd);
+ return;
+ }
+ blkdev_reread_part(bd);
+ blkdev_put(bd, FMODE_READ);
+ }
return;
out_free_disk:
kfree(disk);
@@ -2150,6 +2216,58 @@ static int set_queue_count(struct nvme_dev *dev, int count)
return min(result & 0xffff, result >> 16) + 1;
}
+static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
+{
+ u64 szu, size, offset;
+ u32 cmbloc;
+ resource_size_t bar_size;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ void __iomem *cmb;
+ dma_addr_t dma_addr;
+
+ if (!use_cmb_sqes)
+ return NULL;
+
+ dev->cmbsz = readl(&dev->bar->cmbsz);
+ if (!(NVME_CMB_SZ(dev->cmbsz)))
+ return NULL;
+
+ cmbloc = readl(&dev->bar->cmbloc);
+
+ szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
+ size = szu * NVME_CMB_SZ(dev->cmbsz);
+ offset = szu * NVME_CMB_OFST(cmbloc);
+ bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc));
+
+ if (offset > bar_size)
+ return NULL;
+
+ /*
+ * Controllers may support a CMB size larger than their BAR,
+ * for example, due to being behind a bridge. Reduce the CMB to
+ * the reported size of the BAR
+ */
+ if (size > bar_size - offset)
+ size = bar_size - offset;
+
+ dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset;
+ cmb = ioremap_wc(dma_addr, size);
+ if (!cmb)
+ return NULL;
+
+ dev->cmb_dma_addr = dma_addr;
+ dev->cmb_size = size;
+ return cmb;
+}
+
+static inline void nvme_release_cmb(struct nvme_dev *dev)
+{
+ if (dev->cmb) {
+ iounmap(dev->cmb);
+ dev->cmb = NULL;
+ }
+}
+
static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
{
return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
@@ -2168,6 +2286,15 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (result < nr_io_queues)
nr_io_queues = result;
+ if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
+ result = nvme_cmb_qdepth(dev, nr_io_queues,
+ sizeof(struct nvme_command));
+ if (result > 0)
+ dev->q_depth = result;
+ else
+ nvme_release_cmb(dev);
+ }
+
size = db_bar_size(dev, nr_io_queues);
if (size > 8192) {
iounmap(dev->bar);
@@ -2335,7 +2462,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
int res;
- unsigned nn;
struct nvme_id_ctrl *ctrl;
int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
@@ -2345,7 +2471,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
return -EIO;
}
- nn = le32_to_cpup(&ctrl->nn);
dev->oncs = le16_to_cpup(&ctrl->oncs);
dev->abort_limit = ctrl->acl + 1;
dev->vwc = ctrl->vwc;
@@ -2431,6 +2556,8 @@ static int nvme_dev_map(struct nvme_dev *dev)
dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
dev->dbs = ((void __iomem *)dev->bar) + 4096;
+ if (readl(&dev->bar->vs) >= NVME_VS(1, 2))
+ dev->cmb = nvme_map_cmb(dev);
return 0;
@@ -2811,6 +2938,8 @@ static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
case NVME_IOCTL_RESET:
dev_warn(dev->dev, "resetting controller\n");
return nvme_reset(dev);
+ case NVME_IOCTL_SUBSYS_RESET:
+ return nvme_subsys_reset(dev);
default:
return -ENOTTY;
}
@@ -3136,6 +3265,7 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_dev_remove_admin(dev);
device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
nvme_free_queues(dev, 0);
+ nvme_release_cmb(dev);
nvme_release_prp_pools(dev);
kref_put(&dev->kref, nvme_free_dev);
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 4c20c228184c..7be2375db7f2 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -977,7 +977,7 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
}
}
-static void pkt_end_io_read(struct bio *bio, int err)
+static void pkt_end_io_read(struct bio *bio)
{
struct packet_data *pkt = bio->bi_private;
struct pktcdvd_device *pd = pkt->pd;
@@ -985,9 +985,9 @@ static void pkt_end_io_read(struct bio *bio, int err)
pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
bio, (unsigned long long)pkt->sector,
- (unsigned long long)bio->bi_iter.bi_sector, err);
+ (unsigned long long)bio->bi_iter.bi_sector, bio->bi_error);
- if (err)
+ if (bio->bi_error)
atomic_inc(&pkt->io_errors);
if (atomic_dec_and_test(&pkt->io_wait)) {
atomic_inc(&pkt->run_sm);
@@ -996,13 +996,13 @@ static void pkt_end_io_read(struct bio *bio, int err)
pkt_bio_finished(pd);
}
-static void pkt_end_io_packet_write(struct bio *bio, int err)
+static void pkt_end_io_packet_write(struct bio *bio)
{
struct packet_data *pkt = bio->bi_private;
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
- pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, err);
+ pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_error);
pd->stats.pkt_ended++;
@@ -1340,22 +1340,22 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
pkt_queue_bio(pd, pkt->w_bio);
}
-static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
+static void pkt_finish_packet(struct packet_data *pkt, int error)
{
struct bio *bio;
- if (!uptodate)
+ if (error)
pkt->cache_valid = 0;
/* Finish all bios corresponding to this packet */
- while ((bio = bio_list_pop(&pkt->orig_bios)))
- bio_endio(bio, uptodate ? 0 : -EIO);
+ while ((bio = bio_list_pop(&pkt->orig_bios))) {
+ bio->bi_error = error;
+ bio_endio(bio);
+ }
}
static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
{
- int uptodate;
-
pkt_dbg(2, pd, "pkt %d\n", pkt->id);
for (;;) {
@@ -1384,7 +1384,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
if (atomic_read(&pkt->io_wait) > 0)
return;
- if (test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags)) {
+ if (!pkt->w_bio->bi_error) {
pkt_set_state(pkt, PACKET_FINISHED_STATE);
} else {
pkt_set_state(pkt, PACKET_RECOVERY_STATE);
@@ -1401,8 +1401,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
break;
case PACKET_FINISHED_STATE:
- uptodate = test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags);
- pkt_finish_packet(pkt, uptodate);
+ pkt_finish_packet(pkt, pkt->w_bio->bi_error);
return;
default:
@@ -2332,13 +2331,14 @@ static void pkt_close(struct gendisk *disk, fmode_t mode)
}
-static void pkt_end_io_read_cloned(struct bio *bio, int err)
+static void pkt_end_io_read_cloned(struct bio *bio)
{
struct packet_stacked_data *psd = bio->bi_private;
struct pktcdvd_device *pd = psd->pd;
+ psd->bio->bi_error = bio->bi_error;
bio_put(bio);
- bio_endio(psd->bio, err);
+ bio_endio(psd->bio);
mempool_free(psd, psd_pool);
pkt_bio_finished(pd);
}
@@ -2447,6 +2447,10 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
char b[BDEVNAME_SIZE];
struct bio *split;
+ blk_queue_bounce(q, &bio);
+
+ blk_queue_split(q, &bio, q->bio_split);
+
pd = q->queuedata;
if (!pd) {
pr_err("%s incorrect request queue\n",
@@ -2477,8 +2481,6 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
goto end_io;
}
- blk_queue_bounce(q, &bio);
-
do {
sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
@@ -2504,26 +2506,6 @@ end_io:
-static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
- struct bio_vec *bvec)
-{
- struct pktcdvd_device *pd = q->queuedata;
- sector_t zone = get_zone(bmd->bi_sector, pd);
- int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size;
- int remaining = (pd->settings.size << 9) - used;
- int remaining2;
-
- /*
- * A bio <= PAGE_SIZE must be allowed. If it crosses a packet
- * boundary, pkt_make_request() will split the bio.
- */
- remaining2 = PAGE_SIZE - bmd->bi_size;
- remaining = max(remaining, remaining2);
-
- BUG_ON(remaining < 0);
- return remaining;
-}
-
static void pkt_init_queue(struct pktcdvd_device *pd)
{
struct request_queue *q = pd->disk->queue;
@@ -2531,7 +2513,6 @@ static void pkt_init_queue(struct pktcdvd_device *pd)
blk_queue_make_request(q, pkt_make_request);
blk_queue_logical_block_size(q, CD_FRAMESIZE);
blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
- blk_queue_merge_bvec(q, pkt_merge_bvec);
q->queuedata = pd;
}
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index b1612eb16172..d89fcac59515 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -593,7 +593,8 @@ out:
next = bio_list_peek(&priv->list);
spin_unlock_irq(&priv->lock);
- bio_endio(bio, error);
+ bio->bi_error = error;
+ bio_endio(bio);
return next;
}
@@ -605,6 +606,8 @@ static void ps3vram_make_request(struct request_queue *q, struct bio *bio)
dev_dbg(&dev->core, "%s\n", __func__);
+ blk_queue_split(q, &bio, q->bio_split);
+
spin_lock_irq(&priv->lock);
busy = !bio_list_empty(&priv->list);
bio_list_add(&priv->list, bio);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index d94529d5c8e9..d93a0372b37b 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -523,6 +523,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
# define rbd_assert(expr) ((void) 0)
#endif /* !RBD_DEBUG */
+static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
@@ -1818,6 +1819,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
obj_request_done_set(obj_request);
}
+static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
+{
+ dout("%s: obj %p\n", __func__, obj_request);
+
+ if (obj_request_img_data_test(obj_request))
+ rbd_osd_copyup_callback(obj_request);
+ else
+ obj_request_done_set(obj_request);
+}
+
static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
struct ceph_msg *msg)
{
@@ -1866,6 +1877,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
rbd_osd_discard_callback(obj_request);
break;
case CEPH_OSD_OP_CALL:
+ rbd_osd_call_callback(obj_request);
+ break;
case CEPH_OSD_OP_NOTIFY_ACK:
case CEPH_OSD_OP_WATCH:
rbd_osd_trivial_callback(obj_request);
@@ -2530,13 +2543,15 @@ out_unwind:
}
static void
-rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
+rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request;
struct rbd_device *rbd_dev;
struct page **pages;
u32 page_count;
+ dout("%s: obj %p\n", __func__, obj_request);
+
rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
obj_request->type == OBJ_REQUEST_NODATA);
rbd_assert(obj_request_img_data_test(obj_request));
@@ -2563,9 +2578,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
if (!obj_request->result)
obj_request->xferred = obj_request->length;
- /* Finish up with the normal image object callback */
-
- rbd_img_obj_callback(obj_request);
+ obj_request_done_set(obj_request);
}
static void
@@ -2650,7 +2663,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
/* All set, send it off. */
- orig_request->callback = rbd_img_obj_copyup_callback;
osdc = &rbd_dev->rbd_client->client->osdc;
img_result = rbd_obj_request_submit(osdc, orig_request);
if (!img_result)
@@ -3462,52 +3474,6 @@ static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_MQ_RQ_QUEUE_OK;
}
-/*
- * a queue callback. Makes sure that we don't create a bio that spans across
- * multiple osd objects. One exception would be with a single page bios,
- * which we handle later at bio_chain_clone_range()
- */
-static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
- struct bio_vec *bvec)
-{
- struct rbd_device *rbd_dev = q->queuedata;
- sector_t sector_offset;
- sector_t sectors_per_obj;
- sector_t obj_sector_offset;
- int ret;
-
- /*
- * Find how far into its rbd object the partition-relative
- * bio start sector is to offset relative to the enclosing
- * device.
- */
- sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
- sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
- obj_sector_offset = sector_offset & (sectors_per_obj - 1);
-
- /*
- * Compute the number of bytes from that offset to the end
- * of the object. Account for what's already used by the bio.
- */
- ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
- if (ret > bmd->bi_size)
- ret -= bmd->bi_size;
- else
- ret = 0;
-
- /*
- * Don't send back more than was asked for. And if the bio
- * was empty, let the whole thing through because: "Note
- * that a block device *must* allow a single page to be
- * added to an empty bio."
- */
- rbd_assert(bvec->bv_len <= PAGE_SIZE);
- if (ret > (int) bvec->bv_len || !bmd->bi_size)
- ret = (int) bvec->bv_len;
-
- return ret;
-}
-
static void rbd_free_disk(struct rbd_device *rbd_dev)
{
struct gendisk *disk = rbd_dev->disk;
@@ -3803,10 +3769,9 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
q->limits.discard_granularity = segment_size;
q->limits.discard_alignment = segment_size;
- q->limits.max_discard_sectors = segment_size / SECTOR_SIZE;
+ blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
q->limits.discard_zeroes_data = 1;
- blk_queue_merge_bvec(q, rbd_merge_bvec);
disk->queue = q;
q->queuedata = rbd_dev;
@@ -4708,7 +4673,10 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
}
ret = rbd_dev_v2_snap_context(rbd_dev);
- dout("rbd_dev_v2_snap_context returned %d\n", ret);
+ if (ret && first_time) {
+ kfree(rbd_dev->header.object_prefix);
+ rbd_dev->header.object_prefix = NULL;
+ }
return ret;
}
@@ -5189,7 +5157,6 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
out_err:
if (parent) {
rbd_dev_unparent(rbd_dev);
- kfree(rbd_dev->header_name);
rbd_dev_destroy(parent);
} else {
rbd_put_client(rbdc);
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index ac8c62cb4875..3163e4cdc2cc 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -137,7 +137,10 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card,
if (!card->eeh_state && card->gendisk)
disk_stats_complete(card, meta->bio, meta->start_time);
- bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0);
+ if (atomic_read(&meta->error))
+ bio_io_error(meta->bio);
+ else
+ bio_endio(meta->bio);
kmem_cache_free(bio_meta_pool, meta);
}
}
@@ -148,6 +151,8 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
struct rsxx_bio_meta *bio_meta;
int st = -EINVAL;
+ blk_queue_split(q, &bio, q->bio_split);
+
might_sleep();
if (!card)
@@ -199,7 +204,9 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
queue_err:
kmem_cache_free(bio_meta_pool, bio_meta);
req_err:
- bio_endio(bio, st);
+ if (st)
+ bio->bi_error = st;
+ bio_endio(bio);
}
/*----------------- Device Setup -------------------*/
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 1e46eb2305c0..586f9168ffa4 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -4422,7 +4422,7 @@ static int skd_cons_disk(struct skd_device *skdev)
/* DISCARD Flag initialization. */
q->limits.discard_granularity = 8192;
q->limits.discard_alignment = 0;
- q->limits.max_discard_sectors = UINT_MAX >> 9;
+ blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
q->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 4cf81b5bf0f7..04d65790a886 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -456,7 +456,7 @@ static void process_page(unsigned long data)
PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
if (control & DMASCR_HARD_ERROR) {
/* error */
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ bio->bi_error = -EIO;
dev_printk(KERN_WARNING, &card->dev->dev,
"I/O error on sector %d/%d\n",
le32_to_cpu(desc->local_addr)>>9,
@@ -505,7 +505,7 @@ static void process_page(unsigned long data)
return_bio = bio->bi_next;
bio->bi_next = NULL;
- bio_endio(bio, 0);
+ bio_endio(bio);
}
}
@@ -531,6 +531,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
(unsigned long long)bio->bi_iter.bi_sector,
bio->bi_iter.bi_size);
+ blk_queue_split(q, &bio, q->bio_split);
+
spin_lock_irq(&card->lock);
*card->biotail = bio;
bio->bi_next = NULL;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index d4d05f064d39..e93899cc6f60 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -478,8 +478,7 @@ static int virtblk_get_cache_mode(struct virtio_device *vdev)
struct virtio_blk_config, wce,
&writeback);
if (err)
- writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE) ||
- virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
+ writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE);
return writeback;
}
@@ -657,6 +656,7 @@ static int virtblk_probe(struct virtio_device *vdev)
vblk->disk->private_data = vblk;
vblk->disk->fops = &virtblk_fops;
vblk->disk->driverfs_dev = &vdev->dev;
+ vblk->disk->flags |= GENHD_FL_EXT_DEVT;
vblk->index = index;
/* configure queue flush support */
@@ -840,7 +840,7 @@ static unsigned int features_legacy[] = {
static unsigned int features[] = {
VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
- VIRTIO_BLK_F_TOPOLOGY,
+ VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ,
};
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index ced96777b677..6a685aec6994 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -369,8 +369,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
return;
}
- if (work_pending(&blkif->persistent_purge_work)) {
- pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n");
+ if (work_busy(&blkif->persistent_purge_work)) {
+ pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
return;
}
@@ -1078,9 +1078,9 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
/*
* bio callback.
*/
-static void end_block_io_op(struct bio *bio, int error)
+static void end_block_io_op(struct bio *bio)
{
- __end_block_io_op(bio->bi_private, error);
+ __end_block_io_op(bio->bi_private, bio->bi_error);
bio_put(bio);
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 6d89ed35d80c..0823a96902f8 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -37,6 +37,7 @@
#include <linux/interrupt.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/hdreg.h>
#include <linux/cdrom.h>
#include <linux/module.h>
@@ -82,7 +83,6 @@ struct blk_shadow {
struct split_bio {
struct bio *bio;
atomic_t pending;
- int err;
};
static DEFINE_MUTEX(blkfront_mutex);
@@ -148,6 +148,7 @@ struct blkfront_info
unsigned int feature_persistent:1;
unsigned int max_indirect_segments;
int is_ready;
+ struct blk_mq_tag_set tag_set;
};
static unsigned int nr_minors;
@@ -179,6 +180,7 @@ static DEFINE_SPINLOCK(minor_lock);
((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
static int blkfront_setup_indirect(struct blkfront_info *info);
+static int blkfront_gather_backend_features(struct blkfront_info *info);
static int get_id_from_freelist(struct blkfront_info *info)
{
@@ -247,7 +249,7 @@ static struct grant *get_grant(grant_ref_t *gref_head,
struct blkfront_info *info)
{
struct grant *gnt_list_entry;
- unsigned long buffer_mfn;
+ unsigned long buffer_gfn;
BUG_ON(list_empty(&info->grants));
gnt_list_entry = list_first_entry(&info->grants, struct grant,
@@ -266,10 +268,10 @@ static struct grant *get_grant(grant_ref_t *gref_head,
BUG_ON(!pfn);
gnt_list_entry->pfn = pfn;
}
- buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
+ buffer_gfn = pfn_to_gfn(gnt_list_entry->pfn);
gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
info->xbdev->otherend_id,
- buffer_mfn, 0);
+ buffer_gfn, 0);
return gnt_list_entry;
}
@@ -616,54 +618,41 @@ static inline bool blkif_request_flush_invalid(struct request *req,
!(info->feature_flush & REQ_FUA)));
}
-/*
- * do_blkif_request
- * read a block; request is in a request queue
- */
-static void do_blkif_request(struct request_queue *rq)
+static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *qd)
{
- struct blkfront_info *info = NULL;
- struct request *req;
- int queued;
-
- pr_debug("Entered do_blkif_request\n");
+ struct blkfront_info *info = qd->rq->rq_disk->private_data;
- queued = 0;
-
- while ((req = blk_peek_request(rq)) != NULL) {
- info = req->rq_disk->private_data;
-
- if (RING_FULL(&info->ring))
- goto wait;
+ blk_mq_start_request(qd->rq);
+ spin_lock_irq(&info->io_lock);
+ if (RING_FULL(&info->ring))
+ goto out_busy;
- blk_start_request(req);
+ if (blkif_request_flush_invalid(qd->rq, info))
+ goto out_err;
- if (blkif_request_flush_invalid(req, info)) {
- __blk_end_request_all(req, -EOPNOTSUPP);
- continue;
- }
+ if (blkif_queue_request(qd->rq))
+ goto out_busy;
- pr_debug("do_blk_req %p: cmd %p, sec %lx, "
- "(%u/%u) [%s]\n",
- req, req->cmd, (unsigned long)blk_rq_pos(req),
- blk_rq_cur_sectors(req), blk_rq_sectors(req),
- rq_data_dir(req) ? "write" : "read");
-
- if (blkif_queue_request(req)) {
- blk_requeue_request(rq, req);
-wait:
- /* Avoid pointless unplugs. */
- blk_stop_queue(rq);
- break;
- }
+ flush_requests(info);
+ spin_unlock_irq(&info->io_lock);
+ return BLK_MQ_RQ_QUEUE_OK;
- queued++;
- }
+out_err:
+ spin_unlock_irq(&info->io_lock);
+ return BLK_MQ_RQ_QUEUE_ERROR;
- if (queued != 0)
- flush_requests(info);
+out_busy:
+ spin_unlock_irq(&info->io_lock);
+ blk_mq_stop_hw_queue(hctx);
+ return BLK_MQ_RQ_QUEUE_BUSY;
}
+static struct blk_mq_ops blkfront_mq_ops = {
+ .queue_rq = blkif_queue_rq,
+ .map_queue = blk_mq_map_queue,
+};
+
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
unsigned int physical_sector_size,
unsigned int segments)
@@ -671,9 +660,22 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
struct request_queue *rq;
struct blkfront_info *info = gd->private_data;
- rq = blk_init_queue(do_blkif_request, &info->io_lock);
- if (rq == NULL)
+ memset(&info->tag_set, 0, sizeof(info->tag_set));
+ info->tag_set.ops = &blkfront_mq_ops;
+ info->tag_set.nr_hw_queues = 1;
+ info->tag_set.queue_depth = BLK_RING_SIZE(info);
+ info->tag_set.numa_node = NUMA_NO_NODE;
+ info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
+ info->tag_set.cmd_size = 0;
+ info->tag_set.driver_data = info;
+
+ if (blk_mq_alloc_tag_set(&info->tag_set))
+ return -1;
+ rq = blk_mq_init_queue(&info->tag_set);
+ if (IS_ERR(rq)) {
+ blk_mq_free_tag_set(&info->tag_set);
return -1;
+ }
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
@@ -901,19 +903,15 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
static void xlvbd_release_gendisk(struct blkfront_info *info)
{
unsigned int minor, nr_minors;
- unsigned long flags;
if (info->rq == NULL)
return;
- spin_lock_irqsave(&info->io_lock, flags);
-
/* No more blkif_request(). */
- blk_stop_queue(info->rq);
+ blk_mq_stop_hw_queues(info->rq);
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback);
- spin_unlock_irqrestore(&info->io_lock, flags);
/* Flush gnttab callback work. Must be done with no locks held. */
flush_work(&info->work);
@@ -925,20 +923,18 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
xlbd_release_minors(minor, nr_minors);
blk_cleanup_queue(info->rq);
+ blk_mq_free_tag_set(&info->tag_set);
info->rq = NULL;
put_disk(info->gd);
info->gd = NULL;
}
+/* Must be called with io_lock holded */
static void kick_pending_request_queues(struct blkfront_info *info)
{
- if (!RING_FULL(&info->ring)) {
- /* Re-enable calldowns. */
- blk_start_queue(info->rq);
- /* Kick things off immediately. */
- do_blkif_request(info->rq);
- }
+ if (!RING_FULL(&info->ring))
+ blk_mq_start_stopped_hw_queues(info->rq, true);
}
static void blkif_restart_queue(struct work_struct *work)
@@ -963,7 +959,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
/* No more blkif_request(). */
if (info->rq)
- blk_stop_queue(info->rq);
+ blk_mq_stop_hw_queues(info->rq);
/* Remove all persistent grants */
if (!list_empty(&info->grants)) {
@@ -1128,8 +1124,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
* Add the used indirect page back to the list of
* available pages for indirect grefs.
*/
- indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
- list_add(&indirect_page->lru, &info->indirect_pages);
+ if (!info->feature_persistent) {
+ indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
+ list_add(&indirect_page->lru, &info->indirect_pages);
+ }
s->indirect_grants[i]->gref = GRANT_INVALID_REF;
list_add_tail(&s->indirect_grants[i]->node, &info->grants);
}
@@ -1144,7 +1142,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
RING_IDX i, rp;
unsigned long flags;
struct blkfront_info *info = (struct blkfront_info *)dev_id;
- int error;
spin_lock_irqsave(&info->io_lock, flags);
@@ -1185,37 +1182,37 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
continue;
}
- error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
+ req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
switch (bret->operation) {
case BLKIF_OP_DISCARD:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
struct request_queue *rq = info->rq;
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- error = -EOPNOTSUPP;
+ req->errors = -EOPNOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
}
- __blk_end_request_all(req, error);
+ blk_mq_complete_request(req);
break;
case BLKIF_OP_FLUSH_DISKCACHE:
case BLKIF_OP_WRITE_BARRIER:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- error = -EOPNOTSUPP;
+ req->errors = -EOPNOTSUPP;
}
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
info->shadow[id].req.u.rw.nr_segments == 0)) {
printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- error = -EOPNOTSUPP;
+ req->errors = -EOPNOTSUPP;
}
- if (unlikely(error)) {
- if (error == -EOPNOTSUPP)
- error = 0;
+ if (unlikely(req->errors)) {
+ if (req->errors == -EOPNOTSUPP)
+ req->errors = 0;
info->feature_flush = 0;
xlvbd_flush(info);
}
@@ -1226,7 +1223,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
"request: %x\n", bret->status);
- __blk_end_request_all(req, error);
+ blk_mq_complete_request(req);
break;
default:
BUG();
@@ -1478,16 +1475,14 @@ static int blkfront_probe(struct xenbus_device *dev,
return 0;
}
-static void split_bio_end(struct bio *bio, int error)
+static void split_bio_end(struct bio *bio)
{
struct split_bio *split_bio = bio->bi_private;
- if (error)
- split_bio->err = error;
-
if (atomic_dec_and_test(&split_bio->pending)) {
split_bio->bio->bi_phys_segments = 0;
- bio_endio(split_bio->bio, split_bio->err);
+ split_bio->bio->bi_error = bio->bi_error;
+ bio_endio(split_bio->bio);
kfree(split_bio);
}
bio_put(bio);
@@ -1519,7 +1514,7 @@ static int blkif_recover(struct blkfront_info *info)
info->shadow_free = info->ring.req_prod_pvt;
info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
- rc = blkfront_setup_indirect(info);
+ rc = blkfront_gather_backend_features(info);
if (rc) {
kfree(copy);
return rc;
@@ -1555,28 +1550,6 @@ static int blkif_recover(struct blkfront_info *info)
kfree(copy);
- /*
- * Empty the queue, this is important because we might have
- * requests in the queue with more segments than what we
- * can handle now.
- */
- spin_lock_irq(&info->io_lock);
- while ((req = blk_fetch_request(info->rq)) != NULL) {
- if (req->cmd_flags &
- (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
- list_add(&req->queuelist, &requests);
- continue;
- }
- merge_bio.head = req->bio;
- merge_bio.tail = req->biotail;
- bio_list_merge(&bio_list, &merge_bio);
- req->bio = NULL;
- if (req->cmd_flags & (REQ_FLUSH | REQ_FUA))
- pr_alert("diskcache flush request found!\n");
- __blk_end_request_all(req, 0);
- }
- spin_unlock_irq(&info->io_lock);
-
xenbus_switch_state(info->xbdev, XenbusStateConnected);
spin_lock_irq(&info->io_lock);
@@ -1591,9 +1564,10 @@ static int blkif_recover(struct blkfront_info *info)
/* Requeue pending requests (flush or discard) */
list_del_init(&req->queuelist);
BUG_ON(req->nr_phys_segments > segs);
- blk_requeue_request(info->rq, req);
+ blk_mq_requeue_request(req);
}
spin_unlock_irq(&info->io_lock);
+ blk_mq_kick_requeue_list(info->rq);
while ((bio = bio_list_pop(&bio_list)) != NULL) {
/* Traverse the list of pending bios and re-queue them */
@@ -1720,20 +1694,13 @@ static void blkfront_setup_discard(struct blkfront_info *info)
static int blkfront_setup_indirect(struct blkfront_info *info)
{
- unsigned int indirect_segments, segs;
+ unsigned int segs;
int err, i;
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-max-indirect-segments", "%u", &indirect_segments,
- NULL);
- if (err) {
- info->max_indirect_segments = 0;
+ if (info->max_indirect_segments == 0)
segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
- } else {
- info->max_indirect_segments = min(indirect_segments,
- xen_blkif_max_segments);
+ else
segs = info->max_indirect_segments;
- }
err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info));
if (err)
@@ -1797,6 +1764,68 @@ out_of_memory:
}
/*
+ * Gather all backend feature-*
+ */
+static int blkfront_gather_backend_features(struct blkfront_info *info)
+{
+ int err;
+ int barrier, flush, discard, persistent;
+ unsigned int indirect_segments;
+
+ info->feature_flush = 0;
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-barrier", "%d", &barrier,
+ NULL);
+
+ /*
+ * If there's no "feature-barrier" defined, then it means
+ * we're dealing with a very old backend which writes
+ * synchronously; nothing to do.
+ *
+ * If there are barriers, then we use flush.
+ */
+ if (!err && barrier)
+ info->feature_flush = REQ_FLUSH | REQ_FUA;
+ /*
+ * And if there is "feature-flush-cache" use that above
+ * barriers.
+ */
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-flush-cache", "%d", &flush,
+ NULL);
+
+ if (!err && flush)
+ info->feature_flush = REQ_FLUSH;
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-discard", "%d", &discard,
+ NULL);
+
+ if (!err && discard)
+ blkfront_setup_discard(info);
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-persistent", "%u", &persistent,
+ NULL);
+ if (err)
+ info->feature_persistent = 0;
+ else
+ info->feature_persistent = persistent;
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-max-indirect-segments", "%u", &indirect_segments,
+ NULL);
+ if (err)
+ info->max_indirect_segments = 0;
+ else
+ info->max_indirect_segments = min(indirect_segments,
+ xen_blkif_max_segments);
+
+ return blkfront_setup_indirect(info);
+}
+
+/*
* Invoked when the backend is finally 'ready' (and has told produced
* the details about the physical device - #sectors, size, etc).
*/
@@ -1807,7 +1836,6 @@ static void blkfront_connect(struct blkfront_info *info)
unsigned int physical_sector_size;
unsigned int binfo;
int err;
- int barrier, flush, discard, persistent;
switch (info->connected) {
case BLKIF_STATE_CONNECTED:
@@ -1864,48 +1892,7 @@ static void blkfront_connect(struct blkfront_info *info)
if (err != 1)
physical_sector_size = sector_size;
- info->feature_flush = 0;
-
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-barrier", "%d", &barrier,
- NULL);
-
- /*
- * If there's no "feature-barrier" defined, then it means
- * we're dealing with a very old backend which writes
- * synchronously; nothing to do.
- *
- * If there are barriers, then we use flush.
- */
- if (!err && barrier)
- info->feature_flush = REQ_FLUSH | REQ_FUA;
- /*
- * And if there is "feature-flush-cache" use that above
- * barriers.
- */
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-flush-cache", "%d", &flush,
- NULL);
-
- if (!err && flush)
- info->feature_flush = REQ_FLUSH;
-
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-discard", "%d", &discard,
- NULL);
-
- if (!err && discard)
- blkfront_setup_discard(info);
-
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-persistent", "%u", &persistent,
- NULL);
- if (err)
- info->feature_persistent = 0;
- else
- info->feature_persistent = persistent;
-
- err = blkfront_setup_indirect(info);
+ err = blkfront_gather_backend_features(info);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
info->xbdev->otherend);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index fb655e8d1e3b..9fa15bb9d118 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -388,7 +388,6 @@ static ssize_t comp_algorithm_store(struct device *dev,
static ssize_t compact_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
- unsigned long nr_migrated;
struct zram *zram = dev_to_zram(dev);
struct zram_meta *meta;
@@ -399,8 +398,7 @@ static ssize_t compact_store(struct device *dev,
}
meta = zram->meta;
- nr_migrated = zs_compact(meta->mem_pool);
- atomic64_add(nr_migrated, &zram->stats.num_migrated);
+ zs_compact(meta->mem_pool);
up_read(&zram->init_lock);
return len;
@@ -428,26 +426,31 @@ static ssize_t mm_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct zram *zram = dev_to_zram(dev);
+ struct zs_pool_stats pool_stats;
u64 orig_size, mem_used = 0;
long max_used;
ssize_t ret;
+ memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
+
down_read(&zram->init_lock);
- if (init_done(zram))
+ if (init_done(zram)) {
mem_used = zs_get_total_pages(zram->meta->mem_pool);
+ zs_pool_stats(zram->meta->mem_pool, &pool_stats);
+ }
orig_size = atomic64_read(&zram->stats.pages_stored);
max_used = atomic_long_read(&zram->stats.max_used_pages);
ret = scnprintf(buf, PAGE_SIZE,
- "%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n",
+ "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
orig_size << PAGE_SHIFT,
(u64)atomic64_read(&zram->stats.compr_data_size),
mem_used << PAGE_SHIFT,
zram->limit_pages << PAGE_SHIFT,
max_used << PAGE_SHIFT,
(u64)atomic64_read(&zram->stats.zero_pages),
- (u64)atomic64_read(&zram->stats.num_migrated));
+ pool_stats.pages_compacted);
up_read(&zram->init_lock);
return ret;
@@ -496,10 +499,9 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
kfree(meta);
}
-static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
+static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
{
size_t num_pages;
- char pool_name[8];
struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
if (!meta)
@@ -512,7 +514,6 @@ static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
goto out_error;
}
- snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
if (!meta->mem_pool) {
pr_err("Error creating memory pool\n");
@@ -621,7 +622,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
uncmem = user_mem;
if (!uncmem) {
- pr_info("Unable to allocate temp memory\n");
+ pr_err("Unable to allocate temp memory\n");
ret = -ENOMEM;
goto out_cleanup;
}
@@ -718,7 +719,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
handle = zs_malloc(meta->mem_pool, clen);
if (!handle) {
- pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
+ pr_err("Error allocating memory for compressed page: %u, size=%zu\n",
index, clen);
ret = -ENOMEM;
goto out;
@@ -850,7 +851,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
zram_bio_discard(zram, index, offset, bio);
- bio_endio(bio, 0);
+ bio_endio(bio);
return;
}
@@ -883,8 +884,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
update_position(&index, &offset, &bvec);
}
- set_bit(BIO_UPTODATE, &bio->bi_flags);
- bio_endio(bio, 0);
+ bio_endio(bio);
return;
out:
@@ -901,6 +901,8 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio)
if (unlikely(!zram_meta_get(zram)))
goto error;
+ blk_queue_split(queue, &bio, queue->bio_split);
+
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
bio->bi_iter.bi_size)) {
atomic64_inc(&zram->stats.invalid_io);
@@ -1031,13 +1033,13 @@ static ssize_t disksize_store(struct device *dev,
return -EINVAL;
disksize = PAGE_ALIGN(disksize);
- meta = zram_meta_alloc(zram->disk->first_minor, disksize);
+ meta = zram_meta_alloc(zram->disk->disk_name, disksize);
if (!meta)
return -ENOMEM;
comp = zcomp_create(zram->compressor, zram->max_comp_streams);
if (IS_ERR(comp)) {
- pr_info("Cannot initialise %s compressing backend\n",
+ pr_err("Cannot initialise %s compressing backend\n",
zram->compressor);
err = PTR_ERR(comp);
goto out_free_meta;
@@ -1215,7 +1217,7 @@ static int zram_add(void)
/* gendisk structure */
zram->disk = alloc_disk(1);
if (!zram->disk) {
- pr_warn("Error allocating disk structure for device %d\n",
+ pr_err("Error allocating disk structure for device %d\n",
device_id);
ret = -ENOMEM;
goto out_free_queue;
@@ -1244,7 +1246,7 @@ static int zram_add(void)
blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
- zram->disk->queue->limits.max_discard_sectors = UINT_MAX;
+ blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
/*
* zram_bio_discard() will clear all logical blocks if logical block
* size is identical with physical block size(PAGE_SIZE). But if it is
@@ -1264,7 +1266,8 @@ static int zram_add(void)
ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
&zram_disk_attr_group);
if (ret < 0) {
- pr_warn("Error creating sysfs group");
+ pr_err("Error creating sysfs group for device %d\n",
+ device_id);
goto out_free_disk;
}
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
@@ -1404,13 +1407,13 @@ static int __init zram_init(void)
ret = class_register(&zram_control_class);
if (ret) {
- pr_warn("Unable to register zram-control class\n");
+ pr_err("Unable to register zram-control class\n");
return ret;
}
zram_major = register_blkdev(0, "zram");
if (zram_major <= 0) {
- pr_warn("Unable to get major number\n");
+ pr_err("Unable to get major number\n");
class_unregister(&zram_control_class);
return -EBUSY;
}
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 6dbe2df506bf..8e92339686d7 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -78,7 +78,6 @@ struct zram_stats {
atomic64_t compr_data_size; /* compressed size of pages stored */
atomic64_t num_reads; /* failed + successful */
atomic64_t num_writes; /* --do-- */
- atomic64_t num_migrated; /* no. of migrated object */
atomic64_t failed_reads; /* can happen when memory is too low */
atomic64_t failed_writes; /* can happen when memory is too low */
atomic64_t invalid_io; /* non-page-aligned I/O requests */
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 2e777071e1dc..0bd88c942a52 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -13,6 +13,10 @@ config BT_RTL
tristate
select FW_LOADER
+config BT_QCA
+ tristate
+ select FW_LOADER
+
config BT_HCIBTUSB
tristate "HCI USB driver"
depends on USB
@@ -132,6 +136,7 @@ config BT_HCIUART_3WIRE
config BT_HCIUART_INTEL
bool "Intel protocol support"
depends on BT_HCIUART
+ select BT_HCIUART_H4
select BT_INTEL
help
The Intel protocol support enables Bluetooth HCI over serial
@@ -150,6 +155,19 @@ config BT_HCIUART_BCM
Say Y here to compile support for Broadcom protocol.
+config BT_HCIUART_QCA
+ bool "Qualcomm Atheros protocol support"
+ depends on BT_HCIUART
+ select BT_HCIUART_H4
+ select BT_QCA
+ help
+ The Qualcomm Atheros protocol supports HCI In-Band Sleep feature
+ over serial port interface(H4) between controller and host.
+ This protocol is required for UART clock control for QCA Bluetooth
+ devices.
+
+ Say Y here to compile support for QCA protocol.
+
config BT_HCIBCM203X
tristate "HCI BCM203x USB driver"
depends on USB
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index f40e194e7080..07c9cf381e5a 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
obj-$(CONFIG_BT_WILINK) += btwilink.o
obj-$(CONFIG_BT_BCM) += btbcm.o
obj-$(CONFIG_BT_RTL) += btrtl.o
+obj-$(CONFIG_BT_QCA) += btqca.o
btmrvl-y := btmrvl_main.o
btmrvl-$(CONFIG_DEBUG_FS) += btmrvl_debugfs.o
@@ -34,6 +35,7 @@ hci_uart-$(CONFIG_BT_HCIUART_ATH3K) += hci_ath.o
hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o
hci_uart-$(CONFIG_BT_HCIUART_INTEL) += hci_intel.o
hci_uart-$(CONFIG_BT_HCIUART_BCM) += hci_bcm.o
+hci_uart-$(CONFIG_BT_HCIUART_QCA) += hci_qca.o
hci_uart-objs := $(hci_uart-y)
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index fcfb72e9e0ee..a5c4d0584389 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -492,7 +492,7 @@ static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
case HCI_SCODATA_PKT:
hdev->stat.sco_tx++;
break;
- };
+ }
/* Prepend skb with frame type */
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 7aab65427d38..a00bb82eb7c6 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -427,7 +427,7 @@ static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
case HCI_SCODATA_PKT:
hdev->stat.sco_tx++;
break;
- };
+ }
/* Prepend skb with frame type */
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 1e1a4323a71f..02ed816a18f9 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -34,6 +34,7 @@
#define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}})
#define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}})
+#define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}})
int btbcm_check_bdaddr(struct hci_dev *hdev)
{
@@ -66,9 +67,13 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
*
* The address 43:24:B3:00:00:00 indicates a BCM4324B3 controller
* with waiting for configuration state.
+ *
+ * The address 43:30:B1:00:00:00 indicates a BCM4330B1 controller
+ * with waiting for configuration state.
*/
if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0) ||
- !bacmp(&bda->bdaddr, BDADDR_BCM4324B3)) {
+ !bacmp(&bda->bdaddr, BDADDR_BCM4324B3) ||
+ !bacmp(&bda->bdaddr, BDADDR_BCM4330B1)) {
BT_INFO("%s: BCM: Using default device address (%pMR)",
hdev->name, &bda->bdaddr);
set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
@@ -241,6 +246,7 @@ static const struct {
u16 subver;
const char *name;
} bcm_uart_subver_table[] = {
+ { 0x4103, "BCM4330B1" }, /* 002.001.003 */
{ 0x410e, "BCM43341B0" }, /* 002.001.014 */
{ 0x4406, "BCM4324B3" }, /* 002.004.006 */
{ 0x610c, "BCM4354" }, /* 003.001.012 */
@@ -472,12 +478,11 @@ int btbcm_setup_apple(struct hci_dev *hdev)
/* Read Verbose Config Version Info */
skb = btbcm_read_verbose_config(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
- get_unaligned_le16(skb->data + 5));
- kfree_skb(skb);
+ if (!IS_ERR(skb)) {
+ BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
+ get_unaligned_le16(skb->data + 5));
+ kfree_skb(skb);
+ }
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 828f2f8d1568..048423fd83bf 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -89,7 +89,89 @@ int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
}
EXPORT_SYMBOL_GPL(btintel_set_bdaddr);
+void btintel_hw_error(struct hci_dev *hdev, u8 code)
+{
+ struct sk_buff *skb;
+ u8 type = 0x00;
+
+ BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code);
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: Reset after hardware error failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return;
+ }
+ kfree_skb(skb);
+
+ skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: Retrieving Intel exception info failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return;
+ }
+
+ if (skb->len != 13) {
+ BT_ERR("%s: Exception info size mismatch", hdev->name);
+ kfree_skb(skb);
+ return;
+ }
+
+ BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
+
+ kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(btintel_hw_error);
+
+void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
+{
+ const char *variant;
+
+ switch (ver->fw_variant) {
+ case 0x06:
+ variant = "Bootloader";
+ break;
+ case 0x23:
+ variant = "Firmware";
+ break;
+ default:
+ return;
+ }
+
+ BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name,
+ variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
+ ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy);
+}
+EXPORT_SYMBOL_GPL(btintel_version_info);
+
+int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
+ const void *param)
+{
+ while (plen > 0) {
+ struct sk_buff *skb;
+ u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen;
+
+ cmd_param[0] = fragment_type;
+ memcpy(cmd_param + 1, param, fragment_len);
+
+ skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1,
+ cmd_param, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ kfree_skb(skb);
+
+ plen -= fragment_len;
+ param += fragment_len;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_secure_send);
+
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("intel/ibt-11-5.sfi");
+MODULE_FIRMWARE("intel/ibt-11-5.ddc");
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 4bda6ab34f60..b278d14758d5 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -73,6 +73,11 @@ struct intel_secure_send_result {
int btintel_check_bdaddr(struct hci_dev *hdev);
int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+void btintel_hw_error(struct hci_dev *hdev, u8 code);
+
+void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver);
+int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
+ const void *param);
#else
@@ -86,4 +91,18 @@ static inline int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdadd
return -EOPNOTSUPP;
}
+static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
+{
+}
+
+static void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
+{
+}
+
+static inline int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type,
+ u32 plen, const void *param)
+{
+ return -EOPNOTSUPP;
+}
+
#endif
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 086f0ec89580..27a9aac25583 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -95,10 +95,10 @@ struct btmrvl_private {
struct btmrvl_device btmrvl_dev;
struct btmrvl_adapter *adapter;
struct btmrvl_thread main_thread;
- int (*hw_host_to_card) (struct btmrvl_private *priv,
+ int (*hw_host_to_card)(struct btmrvl_private *priv,
u8 *payload, u16 nb);
- int (*hw_wakeup_firmware) (struct btmrvl_private *priv);
- int (*hw_process_int_status) (struct btmrvl_private *priv);
+ int (*hw_wakeup_firmware)(struct btmrvl_private *priv);
+ int (*hw_process_int_status)(struct btmrvl_private *priv);
void (*firmware_dump)(struct btmrvl_private *priv);
spinlock_t driver_lock; /* spinlock used by driver */
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index b9a811900f6a..b9978a7ba0cc 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -1071,8 +1071,6 @@ static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card)
}
}
- sdio_release_host(card->func);
-
/*
* winner or not, with this test the FW synchronizes when the
* module can continue its initialization
@@ -1082,6 +1080,8 @@ static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card)
return -ETIMEDOUT;
}
+ sdio_release_host(card->func);
+
return 0;
done:
@@ -1376,8 +1376,7 @@ done:
/* fw_dump_data will be free in device coredump release function
after 5 min*/
- dev_coredumpv(&priv->btmrvl_dev.hcidev->dev, fw_dump_data,
- fw_dump_len, GFP_KERNEL);
+ dev_coredumpv(&card->func->dev, fw_dump_data, fw_dump_len, GFP_KERNEL);
BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump end");
}
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
new file mode 100644
index 000000000000..4a6208168850
--- /dev/null
+++ b/drivers/bluetooth/btqca.c
@@ -0,0 +1,392 @@
+/*
+ * Bluetooth supports for Qualcomm Atheros chips
+ *
+ * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/firmware.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btqca.h"
+
+#define VERSION "0.1"
+
+static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
+{
+ struct sk_buff *skb;
+ struct edl_event_hdr *edl;
+ struct rome_version *ver;
+ char cmd;
+ int err = 0;
+
+ BT_DBG("%s: ROME Patch Version Request", hdev->name);
+
+ cmd = EDL_PATCH_VER_REQ_CMD;
+ skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
+ &cmd, HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ BT_ERR("%s: Failed to read version of ROME (%d)", hdev->name,
+ err);
+ return err;
+ }
+
+ if (skb->len != sizeof(*edl) + sizeof(*ver)) {
+ BT_ERR("%s: Version size mismatch len %d", hdev->name,
+ skb->len);
+ err = -EILSEQ;
+ goto out;
+ }
+
+ edl = (struct edl_event_hdr *)(skb->data);
+ if (!edl || !edl->data) {
+ BT_ERR("%s: TLV with no header or no data", hdev->name);
+ err = -EILSEQ;
+ goto out;
+ }
+
+ if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
+ edl->rtype != EDL_APP_VER_RES_EVT) {
+ BT_ERR("%s: Wrong packet received %d %d", hdev->name,
+ edl->cresp, edl->rtype);
+ err = -EIO;
+ goto out;
+ }
+
+ ver = (struct rome_version *)(edl->data);
+
+ BT_DBG("%s: Product:0x%08x", hdev->name, le32_to_cpu(ver->product_id));
+ BT_DBG("%s: Patch :0x%08x", hdev->name, le16_to_cpu(ver->patch_ver));
+ BT_DBG("%s: ROM :0x%08x", hdev->name, le16_to_cpu(ver->rome_ver));
+ BT_DBG("%s: SOC :0x%08x", hdev->name, le32_to_cpu(ver->soc_id));
+
+ /* ROME chipset version can be decided by patch and SoC
+ * version, combination with upper 2 bytes from SoC
+ * and lower 2 bytes from patch will be used.
+ */
+ *rome_version = (le32_to_cpu(ver->soc_id) << 16) |
+ (le16_to_cpu(ver->rome_ver) & 0x0000ffff);
+
+out:
+ kfree_skb(skb);
+
+ return err;
+}
+
+static int rome_reset(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ int err;
+
+ BT_DBG("%s: ROME HCI_RESET", hdev->name);
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ BT_ERR("%s: Reset failed (%d)", hdev->name, err);
+ return err;
+ }
+
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static void rome_tlv_check_data(struct rome_config *config,
+ const struct firmware *fw)
+{
+ const u8 *data;
+ u32 type_len;
+ u16 tag_id, tag_len;
+ int idx, length;
+ struct tlv_type_hdr *tlv;
+ struct tlv_type_patch *tlv_patch;
+ struct tlv_type_nvm *tlv_nvm;
+
+ tlv = (struct tlv_type_hdr *)fw->data;
+
+ type_len = le32_to_cpu(tlv->type_len);
+ length = (type_len >> 8) & 0x00ffffff;
+
+ BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff);
+ BT_DBG("Length\t\t : %d bytes", length);
+
+ switch (config->type) {
+ case TLV_TYPE_PATCH:
+ tlv_patch = (struct tlv_type_patch *)tlv->data;
+ BT_DBG("Total Length\t\t : %d bytes",
+ le32_to_cpu(tlv_patch->total_size));
+ BT_DBG("Patch Data Length\t : %d bytes",
+ le32_to_cpu(tlv_patch->data_length));
+ BT_DBG("Signing Format Version : 0x%x",
+ tlv_patch->format_version);
+ BT_DBG("Signature Algorithm\t : 0x%x",
+ tlv_patch->signature);
+ BT_DBG("Reserved\t\t : 0x%x",
+ le16_to_cpu(tlv_patch->reserved1));
+ BT_DBG("Product ID\t\t : 0x%04x",
+ le16_to_cpu(tlv_patch->product_id));
+ BT_DBG("Rom Build Version\t : 0x%04x",
+ le16_to_cpu(tlv_patch->rom_build));
+ BT_DBG("Patch Version\t\t : 0x%04x",
+ le16_to_cpu(tlv_patch->patch_version));
+ BT_DBG("Reserved\t\t : 0x%x",
+ le16_to_cpu(tlv_patch->reserved2));
+ BT_DBG("Patch Entry Address\t : 0x%x",
+ le32_to_cpu(tlv_patch->entry));
+ break;
+
+ case TLV_TYPE_NVM:
+ idx = 0;
+ data = tlv->data;
+ while (idx < length) {
+ tlv_nvm = (struct tlv_type_nvm *)(data + idx);
+
+ tag_id = le16_to_cpu(tlv_nvm->tag_id);
+ tag_len = le16_to_cpu(tlv_nvm->tag_len);
+
+ /* Update NVM tags as needed */
+ switch (tag_id) {
+ case EDL_TAG_ID_HCI:
+ /* HCI transport layer parameters
+ * enabling software inband sleep
+ * onto controller side.
+ */
+ tlv_nvm->data[0] |= 0x80;
+
+ /* UART Baud Rate */
+ tlv_nvm->data[2] = config->user_baud_rate;
+
+ break;
+
+ case EDL_TAG_ID_DEEP_SLEEP:
+ /* Sleep enable mask
+ * enabling deep sleep feature on controller.
+ */
+ tlv_nvm->data[0] |= 0x01;
+
+ break;
+ }
+
+ idx += (sizeof(u16) + sizeof(u16) + 8 + tag_len);
+ }
+ break;
+
+ default:
+ BT_ERR("Unknown TLV type %d", config->type);
+ break;
+ }
+}
+
+static int rome_tlv_send_segment(struct hci_dev *hdev, int idx, int seg_size,
+ const u8 *data)
+{
+ struct sk_buff *skb;
+ struct edl_event_hdr *edl;
+ struct tlv_seg_resp *tlv_resp;
+ u8 cmd[MAX_SIZE_PER_TLV_SEGMENT + 2];
+ int err = 0;
+
+ BT_DBG("%s: Download segment #%d size %d", hdev->name, idx, seg_size);
+
+ cmd[0] = EDL_PATCH_TLV_REQ_CMD;
+ cmd[1] = seg_size;
+ memcpy(cmd + 2, data, seg_size);
+
+ skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd,
+ HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ BT_ERR("%s: Failed to send TLV segment (%d)", hdev->name, err);
+ return err;
+ }
+
+ if (skb->len != sizeof(*edl) + sizeof(*tlv_resp)) {
+ BT_ERR("%s: TLV response size mismatch", hdev->name);
+ err = -EILSEQ;
+ goto out;
+ }
+
+ edl = (struct edl_event_hdr *)(skb->data);
+ if (!edl || !edl->data) {
+ BT_ERR("%s: TLV with no header or no data", hdev->name);
+ err = -EILSEQ;
+ goto out;
+ }
+
+ tlv_resp = (struct tlv_seg_resp *)(edl->data);
+
+ if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
+ edl->rtype != EDL_TVL_DNLD_RES_EVT || tlv_resp->result != 0x00) {
+ BT_ERR("%s: TLV with error stat 0x%x rtype 0x%x (0x%x)",
+ hdev->name, edl->cresp, edl->rtype, tlv_resp->result);
+ err = -EIO;
+ }
+
+out:
+ kfree_skb(skb);
+
+ return err;
+}
+
+static int rome_tlv_download_request(struct hci_dev *hdev,
+ const struct firmware *fw)
+{
+ const u8 *buffer, *data;
+ int total_segment, remain_size;
+ int ret, i;
+
+ if (!fw || !fw->data)
+ return -EINVAL;
+
+ total_segment = fw->size / MAX_SIZE_PER_TLV_SEGMENT;
+ remain_size = fw->size % MAX_SIZE_PER_TLV_SEGMENT;
+
+ BT_DBG("%s: Total segment num %d remain size %d total size %zu",
+ hdev->name, total_segment, remain_size, fw->size);
+
+ data = fw->data;
+ for (i = 0; i < total_segment; i++) {
+ buffer = data + i * MAX_SIZE_PER_TLV_SEGMENT;
+ ret = rome_tlv_send_segment(hdev, i, MAX_SIZE_PER_TLV_SEGMENT,
+ buffer);
+ if (ret < 0)
+ return -EIO;
+ }
+
+ if (remain_size) {
+ buffer = data + total_segment * MAX_SIZE_PER_TLV_SEGMENT;
+ ret = rome_tlv_send_segment(hdev, total_segment, remain_size,
+ buffer);
+ if (ret < 0)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int rome_download_firmware(struct hci_dev *hdev,
+ struct rome_config *config)
+{
+ const struct firmware *fw;
+ int ret;
+
+ BT_INFO("%s: ROME Downloading %s", hdev->name, config->fwname);
+
+ ret = request_firmware(&fw, config->fwname, &hdev->dev);
+ if (ret) {
+ BT_ERR("%s: Failed to request file: %s (%d)", hdev->name,
+ config->fwname, ret);
+ return ret;
+ }
+
+ rome_tlv_check_data(config, fw);
+
+ ret = rome_tlv_download_request(hdev, fw);
+ if (ret) {
+ BT_ERR("%s: Failed to download file: %s (%d)", hdev->name,
+ config->fwname, ret);
+ }
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ struct sk_buff *skb;
+ u8 cmd[9];
+ int err;
+
+ cmd[0] = EDL_NVM_ACCESS_SET_REQ_CMD;
+ cmd[1] = 0x02; /* TAG ID */
+ cmd[2] = sizeof(bdaddr_t); /* size */
+ memcpy(cmd + 3, bdaddr, sizeof(bdaddr_t));
+ skb = __hci_cmd_sync_ev(hdev, EDL_NVM_ACCESS_OPCODE, sizeof(cmd), cmd,
+ HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ BT_ERR("%s: Change address command failed (%d)",
+ hdev->name, err);
+ return err;
+ }
+
+ kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
+
+int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate)
+{
+ u32 rome_ver = 0;
+ struct rome_config config;
+ int err;
+
+ BT_DBG("%s: ROME setup on UART", hdev->name);
+
+ config.user_baud_rate = baudrate;
+
+ /* Get ROME version information */
+ err = rome_patch_ver_req(hdev, &rome_ver);
+ if (err < 0 || rome_ver == 0) {
+ BT_ERR("%s: Failed to get version 0x%x", hdev->name, err);
+ return err;
+ }
+
+ BT_INFO("%s: ROME controller version 0x%08x", hdev->name, rome_ver);
+
+ /* Download rampatch file */
+ config.type = TLV_TYPE_PATCH;
+ snprintf(config.fwname, sizeof(config.fwname), "qca/rampatch_%08x.bin",
+ rome_ver);
+ err = rome_download_firmware(hdev, &config);
+ if (err < 0) {
+ BT_ERR("%s: Failed to download patch (%d)", hdev->name, err);
+ return err;
+ }
+
+ /* Download NVM configuration */
+ config.type = TLV_TYPE_NVM;
+ snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin",
+ rome_ver);
+ err = rome_download_firmware(hdev, &config);
+ if (err < 0) {
+ BT_ERR("%s: Failed to download NVM (%d)", hdev->name, err);
+ return err;
+ }
+
+ /* Perform HCI reset */
+ err = rome_reset(hdev);
+ if (err < 0) {
+ BT_ERR("%s: Failed to run HCI_RESET (%d)", hdev->name, err);
+ return err;
+ }
+
+ BT_INFO("%s: ROME setup on UART is completed", hdev->name);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qca_uart_setup_rome);
+
+MODULE_AUTHOR("Ben Young Tae Kim <ytkim@qca.qualcomm.com>");
+MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
new file mode 100644
index 000000000000..65e994b96c47
--- /dev/null
+++ b/drivers/bluetooth/btqca.h
@@ -0,0 +1,135 @@
+/*
+ * Bluetooth supports for Qualcomm Atheros ROME chips
+ *
+ * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define EDL_PATCH_CMD_OPCODE (0xFC00)
+#define EDL_NVM_ACCESS_OPCODE (0xFC0B)
+#define EDL_PATCH_CMD_LEN (1)
+#define EDL_PATCH_VER_REQ_CMD (0x19)
+#define EDL_PATCH_TLV_REQ_CMD (0x1E)
+#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
+#define MAX_SIZE_PER_TLV_SEGMENT (243)
+
+#define EDL_CMD_REQ_RES_EVT (0x00)
+#define EDL_PATCH_VER_RES_EVT (0x19)
+#define EDL_APP_VER_RES_EVT (0x02)
+#define EDL_TVL_DNLD_RES_EVT (0x04)
+#define EDL_CMD_EXE_STATUS_EVT (0x00)
+#define EDL_SET_BAUDRATE_RSP_EVT (0x92)
+#define EDL_NVM_ACCESS_CODE_EVT (0x0B)
+
+#define EDL_TAG_ID_HCI (17)
+#define EDL_TAG_ID_DEEP_SLEEP (27)
+
+enum qca_bardrate {
+ QCA_BAUDRATE_115200 = 0,
+ QCA_BAUDRATE_57600,
+ QCA_BAUDRATE_38400,
+ QCA_BAUDRATE_19200,
+ QCA_BAUDRATE_9600,
+ QCA_BAUDRATE_230400,
+ QCA_BAUDRATE_250000,
+ QCA_BAUDRATE_460800,
+ QCA_BAUDRATE_500000,
+ QCA_BAUDRATE_720000,
+ QCA_BAUDRATE_921600,
+ QCA_BAUDRATE_1000000,
+ QCA_BAUDRATE_1250000,
+ QCA_BAUDRATE_2000000,
+ QCA_BAUDRATE_3000000,
+ QCA_BAUDRATE_4000000,
+ QCA_BAUDRATE_1600000,
+ QCA_BAUDRATE_3200000,
+ QCA_BAUDRATE_3500000,
+ QCA_BAUDRATE_AUTO = 0xFE,
+ QCA_BAUDRATE_RESERVED
+};
+
+enum rome_tlv_type {
+ TLV_TYPE_PATCH = 1,
+ TLV_TYPE_NVM
+};
+
+struct rome_config {
+ u8 type;
+ char fwname[64];
+ uint8_t user_baud_rate;
+};
+
+struct edl_event_hdr {
+ __u8 cresp;
+ __u8 rtype;
+ __u8 data[0];
+} __packed;
+
+struct rome_version {
+ __le32 product_id;
+ __le16 patch_ver;
+ __le16 rome_ver;
+ __le32 soc_id;
+} __packed;
+
+struct tlv_seg_resp {
+ __u8 result;
+} __packed;
+
+struct tlv_type_patch {
+ __le32 total_size;
+ __le32 data_length;
+ __u8 format_version;
+ __u8 signature;
+ __le16 reserved1;
+ __le16 product_id;
+ __le16 rom_build;
+ __le16 patch_version;
+ __le16 reserved2;
+ __le32 entry;
+} __packed;
+
+struct tlv_type_nvm {
+ __le16 tag_id;
+ __le16 tag_len;
+ __le32 reserve1;
+ __le32 reserve2;
+ __u8 data[0];
+} __packed;
+
+struct tlv_type_hdr {
+ __le32 type_len;
+ __u8 data[0];
+} __packed;
+
+#if IS_ENABLED(CONFIG_BT_QCA)
+
+int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate);
+
+#else
+
+static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int qca_uart_setup_rome(struct hci_dev *hdev, int speed)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index b4cf8d9c9dac..b6aceaf82aa8 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -68,6 +68,9 @@ static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth AMP device */
{ USB_DEVICE_INFO(0xe0, 0x01, 0x04), .driver_info = BTUSB_AMP },
+ /* Generic Bluetooth USB interface */
+ { USB_INTERFACE_INFO(0xe0, 0x01, 0x01) },
+
/* Apple-specific (Broadcom) devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01),
.driver_info = BTUSB_BCM_APPLE },
@@ -319,6 +322,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
+ /* Silicon Wave based devices */
+ { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
+
{ } /* Terminating entry */
};
@@ -1575,7 +1581,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
/* fw_patch_num indicates the version of patch the device currently
* have. If there is no patch data in the device, it is always 0x00.
- * So, if it is other than 0x00, no need to patch the deivce again.
+ * So, if it is other than 0x00, no need to patch the device again.
*/
if (ver->fw_patch_num) {
BT_INFO("%s: Intel device is already patched. patch num: %02x",
@@ -1878,51 +1884,6 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
return -EILSEQ;
}
-static int btusb_intel_secure_send(struct hci_dev *hdev, u8 fragment_type,
- u32 plen, const void *param)
-{
- while (plen > 0) {
- struct sk_buff *skb;
- u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen;
-
- cmd_param[0] = fragment_type;
- memcpy(cmd_param + 1, param, fragment_len);
-
- skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1,
- cmd_param, HCI_INIT_TIMEOUT);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- kfree_skb(skb);
-
- plen -= fragment_len;
- param += fragment_len;
- }
-
- return 0;
-}
-
-static void btusb_intel_version_info(struct hci_dev *hdev,
- struct intel_version *ver)
-{
- const char *variant;
-
- switch (ver->fw_variant) {
- case 0x06:
- variant = "Bootloader";
- break;
- case 0x23:
- variant = "Firmware";
- break;
- default:
- return;
- }
-
- BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name,
- variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
- ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy);
-}
-
static int btusb_setup_intel_new(struct hci_dev *hdev)
{
static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
@@ -1984,7 +1945,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
return -EINVAL;
}
- btusb_intel_version_info(hdev, ver);
+ btintel_version_info(hdev, ver);
/* The firmware variant determines if the device is in bootloader
* mode or is running operational firmware. The value 0x06 identifies
@@ -2104,7 +2065,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
/* Start the firmware download transaction with the Init fragment
* represented by the 128 bytes of CSS header.
*/
- err = btusb_intel_secure_send(hdev, 0x00, 128, fw->data);
+ err = btintel_secure_send(hdev, 0x00, 128, fw->data);
if (err < 0) {
BT_ERR("%s: Failed to send firmware header (%d)",
hdev->name, err);
@@ -2114,7 +2075,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
/* Send the 256 bytes of public key information from the firmware
* as the PKey fragment.
*/
- err = btusb_intel_secure_send(hdev, 0x03, 256, fw->data + 128);
+ err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
if (err < 0) {
BT_ERR("%s: Failed to send firmware public key (%d)",
hdev->name, err);
@@ -2124,7 +2085,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
/* Send the 256 bytes of signature information from the firmware
* as the Sign fragment.
*/
- err = btusb_intel_secure_send(hdev, 0x02, 256, fw->data + 388);
+ err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
if (err < 0) {
BT_ERR("%s: Failed to send firmware signature (%d)",
hdev->name, err);
@@ -2139,7 +2100,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
frag_len += sizeof(*cmd) + cmd->plen;
- /* The paramter length of the secure send command requires
+ /* The parameter length of the secure send command requires
* a 4 byte alignment. It happens so that the firmware file
* contains proper Intel_NOP commands to align the fragments
* as needed.
@@ -2148,8 +2109,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* firmware data buffer as a single Data fragement.
*/
if (!(frag_len % 4)) {
- err = btusb_intel_secure_send(hdev, 0x01, frag_len,
- fw_ptr);
+ err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
if (err < 0) {
BT_ERR("%s: Failed to send firmware data (%d)",
hdev->name, err);
@@ -2291,39 +2251,6 @@ done:
return 0;
}
-static void btusb_hw_error_intel(struct hci_dev *hdev, u8 code)
-{
- struct sk_buff *skb;
- u8 type = 0x00;
-
- BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code);
-
- skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- BT_ERR("%s: Reset after hardware error failed (%ld)",
- hdev->name, PTR_ERR(skb));
- return;
- }
- kfree_skb(skb);
-
- skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- BT_ERR("%s: Retrieving Intel exception info failed (%ld)",
- hdev->name, PTR_ERR(skb));
- return;
- }
-
- if (skb->len != 13) {
- BT_ERR("%s: Exception info size mismatch", hdev->name);
- kfree_skb(skb);
- return;
- }
-
- BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
-
- kfree_skb(skb);
-}
-
static int btusb_shutdown_intel(struct hci_dev *hdev)
{
struct sk_buff *skb;
@@ -2783,7 +2710,7 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_INTEL_NEW) {
hdev->send = btusb_send_frame_intel;
hdev->setup = btusb_setup_intel_new;
- hdev->hw_error = btusb_hw_error_intel;
+ hdev->hw_error = btintel_hw_error;
hdev->set_bdaddr = btintel_set_bdaddr;
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
}
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 78e10f0c65b2..84135c54ed2e 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -182,9 +182,9 @@ static void dtl1_control(struct dtl1_info *info, struct sk_buff *skb)
int i;
printk(KERN_INFO "Bluetooth: Nokia control data =");
- for (i = 0; i < skb->len; i++) {
+ for (i = 0; i < skb->len; i++)
printk(" %02x", skb->data[i]);
- }
+
printk("\n");
/* transition to active state */
@@ -406,7 +406,7 @@ static int dtl1_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
break;
default:
return -EILSEQ;
- };
+ }
nsh.zero = 0;
nsh.len = skb->len;
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 23523e140a9a..835bfab88ef5 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -25,6 +25,12 @@
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/tty.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -32,11 +38,37 @@
#include "btbcm.h"
#include "hci_uart.h"
+struct bcm_device {
+ struct list_head list;
+
+ struct platform_device *pdev;
+
+ const char *name;
+ struct gpio_desc *device_wakeup;
+ struct gpio_desc *shutdown;
+
+ struct clk *clk;
+ bool clk_enabled;
+
+ u32 init_speed;
+
+#ifdef CONFIG_PM_SLEEP
+ struct hci_uart *hu;
+ bool is_suspended; /* suspend/resume flag */
+#endif
+};
+
struct bcm_data {
- struct sk_buff *rx_skb;
- struct sk_buff_head txq;
+ struct sk_buff *rx_skb;
+ struct sk_buff_head txq;
+
+ struct bcm_device *dev;
};
+/* List of BCM BT UART devices */
+static DEFINE_SPINLOCK(bcm_device_lock);
+static LIST_HEAD(bcm_device_list);
+
static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
{
struct hci_dev *hdev = hu->hdev;
@@ -86,9 +118,41 @@ static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
return 0;
}
+/* bcm_device_exists should be protected by bcm_device_lock */
+static bool bcm_device_exists(struct bcm_device *device)
+{
+ struct list_head *p;
+
+ list_for_each(p, &bcm_device_list) {
+ struct bcm_device *dev = list_entry(p, struct bcm_device, list);
+
+ if (device == dev)
+ return true;
+ }
+
+ return false;
+}
+
+static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
+{
+ if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled)
+ clk_enable(dev->clk);
+
+ gpiod_set_value(dev->shutdown, powered);
+ gpiod_set_value(dev->device_wakeup, powered);
+
+ if (!powered && !IS_ERR(dev->clk) && dev->clk_enabled)
+ clk_disable(dev->clk);
+
+ dev->clk_enabled = powered;
+
+ return 0;
+}
+
static int bcm_open(struct hci_uart *hu)
{
struct bcm_data *bcm;
+ struct list_head *p;
BT_DBG("hu %p", hu);
@@ -99,6 +163,30 @@ static int bcm_open(struct hci_uart *hu)
skb_queue_head_init(&bcm->txq);
hu->priv = bcm;
+
+ spin_lock(&bcm_device_lock);
+ list_for_each(p, &bcm_device_list) {
+ struct bcm_device *dev = list_entry(p, struct bcm_device, list);
+
+ /* Retrieve saved bcm_device based on parent of the
+ * platform device (saved during device probe) and
+ * parent of tty device used by hci_uart
+ */
+ if (hu->tty->dev->parent == dev->pdev->dev.parent) {
+ bcm->dev = dev;
+ hu->init_speed = dev->init_speed;
+#ifdef CONFIG_PM_SLEEP
+ dev->hu = hu;
+#endif
+ break;
+ }
+ }
+
+ if (bcm->dev)
+ bcm_gpio_set_power(bcm->dev, true);
+
+ spin_unlock(&bcm_device_lock);
+
return 0;
}
@@ -108,6 +196,16 @@ static int bcm_close(struct hci_uart *hu)
BT_DBG("hu %p", hu);
+ /* Protect bcm->dev against removal of the device or driver */
+ spin_lock(&bcm_device_lock);
+ if (bcm_device_exists(bcm->dev)) {
+ bcm_gpio_set_power(bcm->dev, false);
+#ifdef CONFIG_PM_SLEEP
+ bcm->dev->hu = NULL;
+#endif
+ }
+ spin_unlock(&bcm_device_lock);
+
skb_queue_purge(&bcm->txq);
kfree_skb(bcm->rx_skb);
kfree(bcm);
@@ -232,6 +330,204 @@ static struct sk_buff *bcm_dequeue(struct hci_uart *hu)
return skb_dequeue(&bcm->txq);
}
+#ifdef CONFIG_PM_SLEEP
+/* Platform suspend callback */
+static int bcm_suspend(struct device *dev)
+{
+ struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+
+ BT_DBG("suspend (%p): is_suspended %d", bdev, bdev->is_suspended);
+
+ spin_lock(&bcm_device_lock);
+
+ if (!bdev->hu)
+ goto unlock;
+
+ if (!bdev->is_suspended) {
+ hci_uart_set_flow_control(bdev->hu, true);
+
+ /* Once this callback returns, driver suspends BT via GPIO */
+ bdev->is_suspended = true;
+ }
+
+ /* Suspend the device */
+ if (bdev->device_wakeup) {
+ gpiod_set_value(bdev->device_wakeup, false);
+ BT_DBG("suspend, delaying 15 ms");
+ mdelay(15);
+ }
+
+unlock:
+ spin_unlock(&bcm_device_lock);
+
+ return 0;
+}
+
+/* Platform resume callback */
+static int bcm_resume(struct device *dev)
+{
+ struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+
+ BT_DBG("resume (%p): is_suspended %d", bdev, bdev->is_suspended);
+
+ spin_lock(&bcm_device_lock);
+
+ if (!bdev->hu)
+ goto unlock;
+
+ if (bdev->device_wakeup) {
+ gpiod_set_value(bdev->device_wakeup, true);
+ BT_DBG("resume, delaying 15 ms");
+ mdelay(15);
+ }
+
+ /* When this callback executes, the device has woken up already */
+ if (bdev->is_suspended) {
+ bdev->is_suspended = false;
+
+ hci_uart_set_flow_control(bdev->hu, false);
+ }
+
+unlock:
+ spin_unlock(&bcm_device_lock);
+
+ return 0;
+}
+#endif
+
+static const struct acpi_gpio_params device_wakeup_gpios = { 0, 0, false };
+static const struct acpi_gpio_params shutdown_gpios = { 1, 0, false };
+
+static const struct acpi_gpio_mapping acpi_bcm_default_gpios[] = {
+ { "device-wakeup-gpios", &device_wakeup_gpios, 1 },
+ { "shutdown-gpios", &shutdown_gpios, 1 },
+ { },
+};
+
+#ifdef CONFIG_ACPI
+static int bcm_resource(struct acpi_resource *ares, void *data)
+{
+ struct bcm_device *dev = data;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ struct acpi_resource_uart_serialbus *sb;
+
+ sb = &ares->data.uart_serial_bus;
+ if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART)
+ dev->init_speed = sb->default_baud_rate;
+ }
+
+ /* Always tell the ACPI core to skip this resource */
+ return 1;
+}
+
+static int bcm_acpi_probe(struct bcm_device *dev)
+{
+ struct platform_device *pdev = dev->pdev;
+ const struct acpi_device_id *id;
+ struct acpi_device *adev;
+ LIST_HEAD(resources);
+ int ret;
+
+ id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
+ /* Retrieve GPIO data */
+ dev->name = dev_name(&pdev->dev);
+ ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
+ acpi_bcm_default_gpios);
+ if (ret)
+ return ret;
+
+ dev->clk = devm_clk_get(&pdev->dev, NULL);
+
+ dev->device_wakeup = devm_gpiod_get_optional(&pdev->dev,
+ "device-wakeup",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(dev->device_wakeup))
+ return PTR_ERR(dev->device_wakeup);
+
+ dev->shutdown = devm_gpiod_get_optional(&pdev->dev, "shutdown",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(dev->shutdown))
+ return PTR_ERR(dev->shutdown);
+
+ /* Make sure at-least one of the GPIO is defined and that
+ * a name is specified for this instance
+ */
+ if ((!dev->device_wakeup && !dev->shutdown) || !dev->name) {
+ dev_err(&pdev->dev, "invalid platform data\n");
+ return -EINVAL;
+ }
+
+ /* Retrieve UART ACPI info */
+ adev = ACPI_COMPANION(&dev->pdev->dev);
+ if (!adev)
+ return 0;
+
+ acpi_dev_get_resources(adev, &resources, bcm_resource, dev);
+
+ return 0;
+}
+#else
+static int bcm_acpi_probe(struct bcm_device *dev)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_ACPI */
+
+static int bcm_probe(struct platform_device *pdev)
+{
+ struct bcm_device *dev;
+ struct acpi_device_id *pdata = pdev->dev.platform_data;
+ int ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->pdev = pdev;
+
+ if (ACPI_HANDLE(&pdev->dev)) {
+ ret = bcm_acpi_probe(dev);
+ if (ret)
+ return ret;
+ } else if (pdata) {
+ dev->name = pdata->id;
+ } else {
+ return -ENODEV;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ dev_info(&pdev->dev, "%s device registered.\n", dev->name);
+
+ /* Place this instance on the device list */
+ spin_lock(&bcm_device_lock);
+ list_add_tail(&dev->list, &bcm_device_list);
+ spin_unlock(&bcm_device_lock);
+
+ bcm_gpio_set_power(dev, false);
+
+ return 0;
+}
+
+static int bcm_remove(struct platform_device *pdev)
+{
+ struct bcm_device *dev = platform_get_drvdata(pdev);
+
+ spin_lock(&bcm_device_lock);
+ list_del(&dev->list);
+ spin_unlock(&bcm_device_lock);
+
+ acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev));
+
+ dev_info(&pdev->dev, "%s device unregistered.\n", dev->name);
+
+ return 0;
+}
+
static const struct hci_uart_proto bcm_proto = {
.id = HCI_UART_BCM,
.name = "BCM",
@@ -247,12 +543,38 @@ static const struct hci_uart_proto bcm_proto = {
.dequeue = bcm_dequeue,
};
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id bcm_acpi_match[] = {
+ { "BCM2E39", 0 },
+ { "BCM2E67", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, bcm_acpi_match);
+#endif
+
+/* Platform suspend and resume callbacks */
+static SIMPLE_DEV_PM_OPS(bcm_pm_ops, bcm_suspend, bcm_resume);
+
+static struct platform_driver bcm_driver = {
+ .probe = bcm_probe,
+ .remove = bcm_remove,
+ .driver = {
+ .name = "hci_bcm",
+ .acpi_match_table = ACPI_PTR(bcm_acpi_match),
+ .pm = &bcm_pm_ops,
+ },
+};
+
int __init bcm_init(void)
{
+ platform_driver_register(&bcm_driver);
+
return hci_uart_register_proto(&bcm_proto);
}
int __exit bcm_deinit(void)
{
+ platform_driver_unregister(&bcm_driver);
+
return hci_uart_unregister_proto(&bcm_proto);
}
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 57faddc53645..eec3f28e4bb9 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -223,8 +223,7 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
switch ((&pkts[i])->lsize) {
case 0:
/* No variable data length */
- (&pkts[i])->recv(hdev, skb);
- skb = NULL;
+ dlen = 0;
break;
case 1:
/* Single octet variable length */
@@ -252,6 +251,12 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
kfree_skb(skb);
return ERR_PTR(-EILSEQ);
}
+
+ if (!dlen) {
+ /* No more data, complete frame */
+ (&pkts[i])->recv(hdev, skb);
+ skb = NULL;
+ }
} else {
/* Complete frame */
(&pkts[i])->recv(hdev, skb);
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 3455cecc9ecf..b35b238a0380 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -75,7 +75,7 @@ struct h5 {
size_t rx_pending; /* Expecting more bytes */
u8 rx_ack; /* Last ack number received */
- int (*rx_func) (struct hci_uart *hu, u8 c);
+ int (*rx_func)(struct hci_uart *hu, u8 c);
struct timer_list timer; /* Retransmission timer */
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 5dd07bf05236..cf07d1121956 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -24,8 +24,864 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/tty.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/acpi.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include "hci_uart.h"
+#include "btintel.h"
+
+#define STATE_BOOTLOADER 0
+#define STATE_DOWNLOADING 1
+#define STATE_FIRMWARE_LOADED 2
+#define STATE_FIRMWARE_FAILED 3
+#define STATE_BOOTING 4
+
+struct intel_device {
+ struct list_head list;
+ struct platform_device *pdev;
+ struct gpio_desc *reset;
+};
+
+static LIST_HEAD(intel_device_list);
+static DEFINE_SPINLOCK(intel_device_list_lock);
+
+struct intel_data {
+ struct sk_buff *rx_skb;
+ struct sk_buff_head txq;
+ unsigned long flags;
+};
+
+static u8 intel_convert_speed(unsigned int speed)
+{
+ switch (speed) {
+ case 9600:
+ return 0x00;
+ case 19200:
+ return 0x01;
+ case 38400:
+ return 0x02;
+ case 57600:
+ return 0x03;
+ case 115200:
+ return 0x04;
+ case 230400:
+ return 0x05;
+ case 460800:
+ return 0x06;
+ case 921600:
+ return 0x07;
+ case 1843200:
+ return 0x08;
+ case 3250000:
+ return 0x09;
+ case 2000000:
+ return 0x0a;
+ case 3000000:
+ return 0x0b;
+ default:
+ return 0xff;
+ }
+}
+
+static int intel_wait_booting(struct hci_uart *hu)
+{
+ struct intel_data *intel = hu->priv;
+ int err;
+
+ err = wait_on_bit_timeout(&intel->flags, STATE_BOOTING,
+ TASK_INTERRUPTIBLE,
+ msecs_to_jiffies(1000));
+
+ if (err == 1) {
+ BT_ERR("%s: Device boot interrupted", hu->hdev->name);
+ return -EINTR;
+ }
+
+ if (err) {
+ BT_ERR("%s: Device boot timeout", hu->hdev->name);
+ return -ETIMEDOUT;
+ }
+
+ return err;
+}
+
+static int intel_set_power(struct hci_uart *hu, bool powered)
+{
+ struct list_head *p;
+ int err = -ENODEV;
+
+ spin_lock(&intel_device_list_lock);
+
+ list_for_each(p, &intel_device_list) {
+ struct intel_device *idev = list_entry(p, struct intel_device,
+ list);
+
+ /* tty device and pdev device should share the same parent
+ * which is the UART port.
+ */
+ if (hu->tty->dev->parent != idev->pdev->dev.parent)
+ continue;
+
+ if (!idev->reset) {
+ err = -ENOTSUPP;
+ break;
+ }
+
+ BT_INFO("hu %p, Switching compatible pm device (%s) to %u",
+ hu, dev_name(&idev->pdev->dev), powered);
+
+ gpiod_set_value(idev->reset, powered);
+ }
+
+ spin_unlock(&intel_device_list_lock);
+
+ return err;
+}
+
+static int intel_open(struct hci_uart *hu)
+{
+ struct intel_data *intel;
+
+ BT_DBG("hu %p", hu);
+
+ intel = kzalloc(sizeof(*intel), GFP_KERNEL);
+ if (!intel)
+ return -ENOMEM;
+
+ skb_queue_head_init(&intel->txq);
+
+ hu->priv = intel;
+
+ if (!intel_set_power(hu, true))
+ set_bit(STATE_BOOTING, &intel->flags);
+
+ return 0;
+}
+
+static int intel_close(struct hci_uart *hu)
+{
+ struct intel_data *intel = hu->priv;
+
+ BT_DBG("hu %p", hu);
+
+ intel_set_power(hu, false);
+
+ skb_queue_purge(&intel->txq);
+ kfree_skb(intel->rx_skb);
+ kfree(intel);
+
+ hu->priv = NULL;
+ return 0;
+}
+
+static int intel_flush(struct hci_uart *hu)
+{
+ struct intel_data *intel = hu->priv;
+
+ BT_DBG("hu %p", hu);
+
+ skb_queue_purge(&intel->txq);
+
+ return 0;
+}
+
+static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
+{
+ struct sk_buff *skb;
+ struct hci_event_hdr *hdr;
+ struct hci_ev_cmd_complete *evt;
+
+ skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
+ hdr->evt = HCI_EV_CMD_COMPLETE;
+ hdr->plen = sizeof(*evt) + 1;
+
+ evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
+ evt->ncmd = 0x01;
+ evt->opcode = cpu_to_le16(opcode);
+
+ *skb_put(skb, 1) = 0x00;
+
+ bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
+
+ return hci_recv_frame(hdev, skb);
+}
+
+static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed)
+{
+ struct intel_data *intel = hu->priv;
+ struct hci_dev *hdev = hu->hdev;
+ u8 speed_cmd[] = { 0x06, 0xfc, 0x01, 0x00 };
+ struct sk_buff *skb;
+ int err;
+
+ /* This can be the first command sent to the chip, check
+ * that the controller is ready.
+ */
+ err = intel_wait_booting(hu);
+
+ clear_bit(STATE_BOOTING, &intel->flags);
+
+ /* In case of timeout, try to continue anyway */
+ if (err && err != ETIMEDOUT)
+ return err;
+
+ BT_INFO("%s: Change controller speed to %d", hdev->name, speed);
+
+ speed_cmd[3] = intel_convert_speed(speed);
+ if (speed_cmd[3] == 0xff) {
+ BT_ERR("%s: Unsupported speed", hdev->name);
+ return -EINVAL;
+ }
+
+ /* Device will not accept speed change if Intel version has not been
+ * previously requested.
+ */
+ skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: Reading Intel version information failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ skb = bt_skb_alloc(sizeof(speed_cmd), GFP_KERNEL);
+ if (!skb) {
+ BT_ERR("%s: Failed to allocate memory for baudrate packet",
+ hdev->name);
+ return -ENOMEM;
+ }
+
+ memcpy(skb_put(skb, sizeof(speed_cmd)), speed_cmd, sizeof(speed_cmd));
+ bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
+
+ hci_uart_set_flow_control(hu, true);
+
+ skb_queue_tail(&intel->txq, skb);
+ hci_uart_tx_wakeup(hu);
+
+ /* wait 100ms to change baudrate on controller side */
+ msleep(100);
+
+ hci_uart_set_baudrate(hu, speed);
+ hci_uart_set_flow_control(hu, false);
+
+ return 0;
+}
+
+static int intel_setup(struct hci_uart *hu)
+{
+ static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
+ 0x00, 0x08, 0x04, 0x00 };
+ struct intel_data *intel = hu->priv;
+ struct hci_dev *hdev = hu->hdev;
+ struct sk_buff *skb;
+ struct intel_version *ver;
+ struct intel_boot_params *params;
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ char fwname[64];
+ u32 frag_len;
+ ktime_t calltime, delta, rettime;
+ unsigned long long duration;
+ unsigned int init_speed, oper_speed;
+ int speed_change = 0;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ hu->hdev->set_bdaddr = btintel_set_bdaddr;
+
+ calltime = ktime_get();
+
+ if (hu->init_speed)
+ init_speed = hu->init_speed;
+ else
+ init_speed = hu->proto->init_speed;
+
+ if (hu->oper_speed)
+ oper_speed = hu->oper_speed;
+ else
+ oper_speed = hu->proto->oper_speed;
+
+ if (oper_speed && init_speed && oper_speed != init_speed)
+ speed_change = 1;
+
+ /* Check that the controller is ready */
+ err = intel_wait_booting(hu);
+
+ clear_bit(STATE_BOOTING, &intel->flags);
+
+ /* In case of timeout, try to continue anyway */
+ if (err && err != ETIMEDOUT)
+ return err;
+
+ set_bit(STATE_BOOTLOADER, &intel->flags);
+
+ /* Read the Intel version information to determine if the device
+ * is in bootloader mode or if it already has operational firmware
+ * loaded.
+ */
+ skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: Reading Intel version information failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ if (skb->len != sizeof(*ver)) {
+ BT_ERR("%s: Intel version event size mismatch", hdev->name);
+ kfree_skb(skb);
+ return -EILSEQ;
+ }
+
+ ver = (struct intel_version *)skb->data;
+ if (ver->status) {
+ BT_ERR("%s: Intel version command failure (%02x)",
+ hdev->name, ver->status);
+ err = -bt_to_errno(ver->status);
+ kfree_skb(skb);
+ return err;
+ }
+
+ /* The hardware platform number has a fixed value of 0x37 and
+ * for now only accept this single value.
+ */
+ if (ver->hw_platform != 0x37) {
+ BT_ERR("%s: Unsupported Intel hardware platform (%u)",
+ hdev->name, ver->hw_platform);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ /* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is
+ * supported by this firmware loading method. This check has been
+ * put in place to ensure correct forward compatibility options
+ * when newer hardware variants come along.
+ */
+ if (ver->hw_variant != 0x0b) {
+ BT_ERR("%s: Unsupported Intel hardware variant (%u)",
+ hdev->name, ver->hw_variant);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ btintel_version_info(hdev, ver);
+
+ /* The firmware variant determines if the device is in bootloader
+ * mode or is running operational firmware. The value 0x06 identifies
+ * the bootloader and the value 0x23 identifies the operational
+ * firmware.
+ *
+ * When the operational firmware is already present, then only
+ * the check for valid Bluetooth device address is needed. This
+ * determines if the device will be added as configured or
+ * unconfigured controller.
+ *
+ * It is not possible to use the Secure Boot Parameters in this
+ * case since that command is only available in bootloader mode.
+ */
+ if (ver->fw_variant == 0x23) {
+ kfree_skb(skb);
+ clear_bit(STATE_BOOTLOADER, &intel->flags);
+ btintel_check_bdaddr(hdev);
+ return 0;
+ }
+
+ /* If the device is not in bootloader mode, then the only possible
+ * choice is to return an error and abort the device initialization.
+ */
+ if (ver->fw_variant != 0x06) {
+ BT_ERR("%s: Unsupported Intel firmware variant (%u)",
+ hdev->name, ver->fw_variant);
+ kfree_skb(skb);
+ return -ENODEV;
+ }
+
+ kfree_skb(skb);
+
+ /* Read the secure boot parameters to identify the operating
+ * details of the bootloader.
+ */
+ skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: Reading Intel boot parameters failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ if (skb->len != sizeof(*params)) {
+ BT_ERR("%s: Intel boot parameters size mismatch", hdev->name);
+ kfree_skb(skb);
+ return -EILSEQ;
+ }
+
+ params = (struct intel_boot_params *)skb->data;
+ if (params->status) {
+ BT_ERR("%s: Intel boot parameters command failure (%02x)",
+ hdev->name, params->status);
+ err = -bt_to_errno(params->status);
+ kfree_skb(skb);
+ return err;
+ }
+
+ BT_INFO("%s: Device revision is %u", hdev->name,
+ le16_to_cpu(params->dev_revid));
+
+ BT_INFO("%s: Secure boot is %s", hdev->name,
+ params->secure_boot ? "enabled" : "disabled");
+
+ BT_INFO("%s: Minimum firmware build %u week %u %u", hdev->name,
+ params->min_fw_build_nn, params->min_fw_build_cw,
+ 2000 + params->min_fw_build_yy);
+
+ /* It is required that every single firmware fragment is acknowledged
+ * with a command complete event. If the boot parameters indicate
+ * that this bootloader does not send them, then abort the setup.
+ */
+ if (params->limited_cce != 0x00) {
+ BT_ERR("%s: Unsupported Intel firmware loading method (%u)",
+ hdev->name, params->limited_cce);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ /* If the OTP has no valid Bluetooth device address, then there will
+ * also be no valid address for the operational firmware.
+ */
+ if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
+ BT_INFO("%s: No device address configured", hdev->name);
+ set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ }
+
+ /* With this Intel bootloader only the hardware variant and device
+ * revision information are used to select the right firmware.
+ *
+ * Currently this bootloader support is limited to hardware variant
+ * iBT 3.0 (LnP/SfP) which is identified by the value 11 (0x0b).
+ */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.sfi",
+ le16_to_cpu(params->dev_revid));
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+ BT_ERR("%s: Failed to load Intel firmware file (%d)",
+ hdev->name, err);
+ kfree_skb(skb);
+ return err;
+ }
+
+ BT_INFO("%s: Found device firmware: %s", hdev->name, fwname);
+
+ kfree_skb(skb);
+
+ if (fw->size < 644) {
+ BT_ERR("%s: Invalid size of firmware file (%zu)",
+ hdev->name, fw->size);
+ err = -EBADF;
+ goto done;
+ }
+
+ set_bit(STATE_DOWNLOADING, &intel->flags);
+
+ /* Start the firmware download transaction with the Init fragment
+ * represented by the 128 bytes of CSS header.
+ */
+ err = btintel_secure_send(hdev, 0x00, 128, fw->data);
+ if (err < 0) {
+ BT_ERR("%s: Failed to send firmware header (%d)",
+ hdev->name, err);
+ goto done;
+ }
+
+ /* Send the 256 bytes of public key information from the firmware
+ * as the PKey fragment.
+ */
+ err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
+ if (err < 0) {
+ BT_ERR("%s: Failed to send firmware public key (%d)",
+ hdev->name, err);
+ goto done;
+ }
+
+ /* Send the 256 bytes of signature information from the firmware
+ * as the Sign fragment.
+ */
+ err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
+ if (err < 0) {
+ BT_ERR("%s: Failed to send firmware signature (%d)",
+ hdev->name, err);
+ goto done;
+ }
+
+ fw_ptr = fw->data + 644;
+ frag_len = 0;
+
+ while (fw_ptr - fw->data < fw->size) {
+ struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
+
+ frag_len += sizeof(*cmd) + cmd->plen;
+
+ BT_DBG("%s: patching %td/%zu", hdev->name,
+ (fw_ptr - fw->data), fw->size);
+
+ /* The parameter length of the secure send command requires
+ * a 4 byte alignment. It happens so that the firmware file
+ * contains proper Intel_NOP commands to align the fragments
+ * as needed.
+ *
+ * Send set of commands with 4 byte alignment from the
+ * firmware data buffer as a single Data fragement.
+ */
+ if (frag_len % 4)
+ continue;
+
+ /* Send each command from the firmware data buffer as
+ * a single Data fragment.
+ */
+ err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
+ if (err < 0) {
+ BT_ERR("%s: Failed to send firmware data (%d)",
+ hdev->name, err);
+ goto done;
+ }
+
+ fw_ptr += frag_len;
+ frag_len = 0;
+ }
+
+ set_bit(STATE_FIRMWARE_LOADED, &intel->flags);
+
+ BT_INFO("%s: Waiting for firmware download to complete", hdev->name);
+
+ /* Before switching the device into operational mode and with that
+ * booting the loaded firmware, wait for the bootloader notification
+ * that all fragments have been successfully received.
+ *
+ * When the event processing receives the notification, then the
+ * STATE_DOWNLOADING flag will be cleared.
+ *
+ * The firmware loading should not take longer than 5 seconds
+ * and thus just timeout if that happens and fail the setup
+ * of this device.
+ */
+ err = wait_on_bit_timeout(&intel->flags, STATE_DOWNLOADING,
+ TASK_INTERRUPTIBLE,
+ msecs_to_jiffies(5000));
+ if (err == 1) {
+ BT_ERR("%s: Firmware loading interrupted", hdev->name);
+ err = -EINTR;
+ goto done;
+ }
+
+ if (err) {
+ BT_ERR("%s: Firmware loading timeout", hdev->name);
+ err = -ETIMEDOUT;
+ goto done;
+ }
+
+ if (test_bit(STATE_FIRMWARE_FAILED, &intel->flags)) {
+ BT_ERR("%s: Firmware loading failed", hdev->name);
+ err = -ENOEXEC;
+ goto done;
+ }
+
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+ BT_INFO("%s: Firmware loaded in %llu usecs", hdev->name, duration);
+
+done:
+ release_firmware(fw);
+
+ if (err < 0)
+ return err;
+
+ /* We need to restore the default speed before Intel reset */
+ if (speed_change) {
+ err = intel_set_baudrate(hu, init_speed);
+ if (err)
+ return err;
+ }
+
+ calltime = ktime_get();
+
+ set_bit(STATE_BOOTING, &intel->flags);
+
+ skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ kfree_skb(skb);
+
+ /* The bootloader will not indicate when the device is ready. This
+ * is done by the operational firmware sending bootup notification.
+ *
+ * Booting into operational firmware should not take longer than
+ * 1 second. However if that happens, then just fail the setup
+ * since something went wrong.
+ */
+ BT_INFO("%s: Waiting for device to boot", hdev->name);
+
+ err = intel_wait_booting(hu);
+ if (err)
+ return err;
+
+ clear_bit(STATE_BOOTING, &intel->flags);
+
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+ BT_INFO("%s: Device booted in %llu usecs", hdev->name, duration);
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+ kfree_skb(skb);
+
+ if (speed_change) {
+ err = intel_set_baudrate(hu, oper_speed);
+ if (err)
+ return err;
+ }
+
+ BT_INFO("%s: Setup complete", hdev->name);
+
+ clear_bit(STATE_BOOTLOADER, &intel->flags);
+
+ return 0;
+}
+
+static int intel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct intel_data *intel = hu->priv;
+ struct hci_event_hdr *hdr;
+
+ if (!test_bit(STATE_BOOTLOADER, &intel->flags) &&
+ !test_bit(STATE_BOOTING, &intel->flags))
+ goto recv;
+
+ hdr = (void *)skb->data;
+
+ /* When the firmware loading completes the device sends
+ * out a vendor specific event indicating the result of
+ * the firmware loading.
+ */
+ if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 &&
+ skb->data[2] == 0x06) {
+ if (skb->data[3] != 0x00)
+ set_bit(STATE_FIRMWARE_FAILED, &intel->flags);
+
+ if (test_and_clear_bit(STATE_DOWNLOADING, &intel->flags) &&
+ test_bit(STATE_FIRMWARE_LOADED, &intel->flags)) {
+ smp_mb__after_atomic();
+ wake_up_bit(&intel->flags, STATE_DOWNLOADING);
+ }
+
+ /* When switching to the operational firmware the device
+ * sends a vendor specific event indicating that the bootup
+ * completed.
+ */
+ } else if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 &&
+ skb->data[2] == 0x02) {
+ if (test_and_clear_bit(STATE_BOOTING, &intel->flags)) {
+ smp_mb__after_atomic();
+ wake_up_bit(&intel->flags, STATE_BOOTING);
+ }
+ }
+recv:
+ return hci_recv_frame(hdev, skb);
+}
+
+static const struct h4_recv_pkt intel_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = intel_recv_event },
+};
+
+static int intel_recv(struct hci_uart *hu, const void *data, int count)
+{
+ struct intel_data *intel = hu->priv;
+
+ if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+ return -EUNATCH;
+
+ intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count,
+ intel_recv_pkts,
+ ARRAY_SIZE(intel_recv_pkts));
+ if (IS_ERR(intel->rx_skb)) {
+ int err = PTR_ERR(intel->rx_skb);
+ BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+ intel->rx_skb = NULL;
+ return err;
+ }
+
+ return count;
+}
+
+static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+ struct intel_data *intel = hu->priv;
+
+ BT_DBG("hu %p skb %p", hu, skb);
+
+ skb_queue_tail(&intel->txq, skb);
+
+ return 0;
+}
+
+static struct sk_buff *intel_dequeue(struct hci_uart *hu)
+{
+ struct intel_data *intel = hu->priv;
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&intel->txq);
+ if (!skb)
+ return skb;
+
+ if (test_bit(STATE_BOOTLOADER, &intel->flags) &&
+ (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT)) {
+ struct hci_command_hdr *cmd = (void *)skb->data;
+ __u16 opcode = le16_to_cpu(cmd->opcode);
+
+ /* When the 0xfc01 command is issued to boot into
+ * the operational firmware, it will actually not
+ * send a command complete event. To keep the flow
+ * control working inject that event here.
+ */
+ if (opcode == 0xfc01)
+ inject_cmd_complete(hu->hdev, opcode);
+ }
+
+ /* Prepend skb with frame type */
+ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+
+ return skb;
+}
+
+static const struct hci_uart_proto intel_proto = {
+ .id = HCI_UART_INTEL,
+ .name = "Intel",
+ .init_speed = 115200,
+ .oper_speed = 3000000,
+ .open = intel_open,
+ .close = intel_close,
+ .flush = intel_flush,
+ .setup = intel_setup,
+ .set_baudrate = intel_set_baudrate,
+ .recv = intel_recv,
+ .enqueue = intel_enqueue,
+ .dequeue = intel_dequeue,
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id intel_acpi_match[] = {
+ { "INT33E1", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, intel_acpi_match);
+
+static int intel_acpi_probe(struct intel_device *idev)
+{
+ const struct acpi_device_id *id;
+
+ id = acpi_match_device(intel_acpi_match, &idev->pdev->dev);
+ if (!id)
+ return -ENODEV;
+
+ return 0;
+}
+#else
+static int intel_acpi_probe(struct intel_device *idev)
+{
+ return -ENODEV;
+}
+#endif
+
+static int intel_probe(struct platform_device *pdev)
+{
+ struct intel_device *idev;
+
+ idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL);
+ if (!idev)
+ return -ENOMEM;
+
+ idev->pdev = pdev;
+
+ if (ACPI_HANDLE(&pdev->dev)) {
+ int err = intel_acpi_probe(idev);
+ if (err)
+ return err;
+ } else {
+ return -ENODEV;
+ }
+
+ idev->reset = devm_gpiod_get_optional(&pdev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(idev->reset)) {
+ dev_err(&pdev->dev, "Unable to retrieve gpio\n");
+ return PTR_ERR(idev->reset);
+ }
+
+ platform_set_drvdata(pdev, idev);
+
+ /* Place this instance on the device list */
+ spin_lock(&intel_device_list_lock);
+ list_add_tail(&idev->list, &intel_device_list);
+ spin_unlock(&intel_device_list_lock);
+
+ dev_info(&pdev->dev, "registered.\n");
+
+ return 0;
+}
+
+static int intel_remove(struct platform_device *pdev)
+{
+ struct intel_device *idev = platform_get_drvdata(pdev);
+
+ spin_lock(&intel_device_list_lock);
+ list_del(&idev->list);
+ spin_unlock(&intel_device_list_lock);
+
+ dev_info(&pdev->dev, "unregistered.\n");
+
+ return 0;
+}
+
+static struct platform_driver intel_driver = {
+ .probe = intel_probe,
+ .remove = intel_remove,
+ .driver = {
+ .name = "hci_intel",
+ .acpi_match_table = ACPI_PTR(intel_acpi_match),
+ },
+};
+
+int __init intel_init(void)
+{
+ platform_driver_register(&intel_driver);
+
+ return hci_uart_register_proto(&intel_proto);
+}
+
+int __exit intel_deinit(void)
+{
+ platform_driver_unregister(&intel_driver);
+
+ return hci_uart_unregister_proto(&intel_proto);
+}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 177dd69fdd95..0d5a05a7c1fd 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -770,7 +770,7 @@ static int __init hci_uart_init(void)
/* Register the tty discipline */
- memset(&hci_uart_ldisc, 0, sizeof (hci_uart_ldisc));
+ memset(&hci_uart_ldisc, 0, sizeof(hci_uart_ldisc));
hci_uart_ldisc.magic = TTY_LDISC_MAGIC;
hci_uart_ldisc.name = "n_hci";
hci_uart_ldisc.open = hci_uart_tty_open;
@@ -804,9 +804,15 @@ static int __init hci_uart_init(void)
#ifdef CONFIG_BT_HCIUART_3WIRE
h5_init();
#endif
+#ifdef CONFIG_BT_HCIUART_INTEL
+ intel_init();
+#endif
#ifdef CONFIG_BT_HCIUART_BCM
bcm_init();
#endif
+#ifdef CONFIG_BT_HCIUART_QCA
+ qca_init();
+#endif
return 0;
}
@@ -830,9 +836,15 @@ static void __exit hci_uart_exit(void)
#ifdef CONFIG_BT_HCIUART_3WIRE
h5_deinit();
#endif
+#ifdef CONFIG_BT_HCIUART_INTEL
+ intel_deinit();
+#endif
#ifdef CONFIG_BT_HCIUART_BCM
bcm_deinit();
#endif
+#ifdef CONFIG_BT_HCIUART_QCA
+ qca_deinit();
+#endif
/* Release tty registration of line discipline */
err = tty_unregister_ldisc(N_HCI);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
new file mode 100644
index 000000000000..6b9b91267959
--- /dev/null
+++ b/drivers/bluetooth/hci_qca.c
@@ -0,0 +1,969 @@
+/*
+ * Bluetooth Software UART Qualcomm protocol
+ *
+ * HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
+ * protocol extension to H4.
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ * Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved.
+ *
+ * Acknowledgements:
+ * This file is based on hci_ll.c, which was...
+ * Written by Ohad Ben-Cohen <ohad@bencohen.org>
+ * which was in turn based on hci_h4.c, which was written
+ * by Maxim Krasnyansky and Marcel Holtmann.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_uart.h"
+#include "btqca.h"
+
+/* HCI_IBS protocol messages */
+#define HCI_IBS_SLEEP_IND 0xFE
+#define HCI_IBS_WAKE_IND 0xFD
+#define HCI_IBS_WAKE_ACK 0xFC
+#define HCI_MAX_IBS_SIZE 10
+
+/* Controller states */
+#define STATE_IN_BAND_SLEEP_ENABLED 1
+
+#define IBS_WAKE_RETRANS_TIMEOUT_MS 100
+#define IBS_TX_IDLE_TIMEOUT_MS 2000
+#define BAUDRATE_SETTLE_TIMEOUT_MS 300
+
+/* HCI_IBS transmit side sleep protocol states */
+enum tx_ibs_states {
+ HCI_IBS_TX_ASLEEP,
+ HCI_IBS_TX_WAKING,
+ HCI_IBS_TX_AWAKE,
+};
+
+/* HCI_IBS receive side sleep protocol states */
+enum rx_states {
+ HCI_IBS_RX_ASLEEP,
+ HCI_IBS_RX_AWAKE,
+};
+
+/* HCI_IBS transmit and receive side clock state vote */
+enum hci_ibs_clock_state_vote {
+ HCI_IBS_VOTE_STATS_UPDATE,
+ HCI_IBS_TX_VOTE_CLOCK_ON,
+ HCI_IBS_TX_VOTE_CLOCK_OFF,
+ HCI_IBS_RX_VOTE_CLOCK_ON,
+ HCI_IBS_RX_VOTE_CLOCK_OFF,
+};
+
+struct qca_data {
+ struct hci_uart *hu;
+ struct sk_buff *rx_skb;
+ struct sk_buff_head txq;
+ struct sk_buff_head tx_wait_q; /* HCI_IBS wait queue */
+ spinlock_t hci_ibs_lock; /* HCI_IBS state lock */
+ u8 tx_ibs_state; /* HCI_IBS transmit side power state*/
+ u8 rx_ibs_state; /* HCI_IBS receive side power state */
+ u32 tx_vote; /* Clock must be on for TX */
+ u32 rx_vote; /* Clock must be on for RX */
+ struct timer_list tx_idle_timer;
+ u32 tx_idle_delay;
+ struct timer_list wake_retrans_timer;
+ u32 wake_retrans;
+ struct workqueue_struct *workqueue;
+ struct work_struct ws_awake_rx;
+ struct work_struct ws_awake_device;
+ struct work_struct ws_rx_vote_off;
+ struct work_struct ws_tx_vote_off;
+ unsigned long flags;
+
+ /* For debugging purpose */
+ u64 ibs_sent_wacks;
+ u64 ibs_sent_slps;
+ u64 ibs_sent_wakes;
+ u64 ibs_recv_wacks;
+ u64 ibs_recv_slps;
+ u64 ibs_recv_wakes;
+ u64 vote_last_jif;
+ u32 vote_on_ms;
+ u32 vote_off_ms;
+ u64 tx_votes_on;
+ u64 rx_votes_on;
+ u64 tx_votes_off;
+ u64 rx_votes_off;
+ u64 votes_on;
+ u64 votes_off;
+};
+
+static void __serial_clock_on(struct tty_struct *tty)
+{
+ /* TODO: Some chipset requires to enable UART clock on client
+ * side to save power consumption or manual work is required.
+ * Please put your code to control UART clock here if needed
+ */
+}
+
+static void __serial_clock_off(struct tty_struct *tty)
+{
+ /* TODO: Some chipset requires to disable UART clock on client
+ * side to save power consumption or manual work is required.
+ * Please put your code to control UART clock off here if needed
+ */
+}
+
+/* serial_clock_vote needs to be called with the ibs lock held */
+static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
+{
+ struct qca_data *qca = hu->priv;
+ unsigned int diff;
+
+ bool old_vote = (qca->tx_vote | qca->rx_vote);
+ bool new_vote;
+
+ switch (vote) {
+ case HCI_IBS_VOTE_STATS_UPDATE:
+ diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
+
+ if (old_vote)
+ qca->vote_off_ms += diff;
+ else
+ qca->vote_on_ms += diff;
+ return;
+
+ case HCI_IBS_TX_VOTE_CLOCK_ON:
+ qca->tx_vote = true;
+ qca->tx_votes_on++;
+ new_vote = true;
+ break;
+
+ case HCI_IBS_RX_VOTE_CLOCK_ON:
+ qca->rx_vote = true;
+ qca->rx_votes_on++;
+ new_vote = true;
+ break;
+
+ case HCI_IBS_TX_VOTE_CLOCK_OFF:
+ qca->tx_vote = false;
+ qca->tx_votes_off++;
+ new_vote = qca->rx_vote | qca->tx_vote;
+ break;
+
+ case HCI_IBS_RX_VOTE_CLOCK_OFF:
+ qca->rx_vote = false;
+ qca->rx_votes_off++;
+ new_vote = qca->rx_vote | qca->tx_vote;
+ break;
+
+ default:
+ BT_ERR("Voting irregularity");
+ return;
+ }
+
+ if (new_vote != old_vote) {
+ if (new_vote)
+ __serial_clock_on(hu->tty);
+ else
+ __serial_clock_off(hu->tty);
+
+ BT_DBG("Vote serial clock %s(%s)", new_vote? "true" : "false",
+ vote? "true" : "false");
+
+ diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
+
+ if (new_vote) {
+ qca->votes_on++;
+ qca->vote_off_ms += diff;
+ } else {
+ qca->votes_off++;
+ qca->vote_on_ms += diff;
+ }
+ qca->vote_last_jif = jiffies;
+ }
+}
+
+/* Builds and sends an HCI_IBS command packet.
+ * These are very simple packets with only 1 cmd byte.
+ */
+static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
+{
+ int err = 0;
+ struct sk_buff *skb = NULL;
+ struct qca_data *qca = hu->priv;
+
+ BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd);
+
+ skb = bt_skb_alloc(1, GFP_ATOMIC);
+ if (!skb) {
+ BT_ERR("Failed to allocate memory for HCI_IBS packet");
+ return -ENOMEM;
+ }
+
+ /* Assign HCI_IBS type */
+ *skb_put(skb, 1) = cmd;
+
+ skb_queue_tail(&qca->txq, skb);
+
+ return err;
+}
+
+static void qca_wq_awake_device(struct work_struct *work)
+{
+ struct qca_data *qca = container_of(work, struct qca_data,
+ ws_awake_device);
+ struct hci_uart *hu = qca->hu;
+ unsigned long retrans_delay;
+
+ BT_DBG("hu %p wq awake device", hu);
+
+ /* Vote for serial clock */
+ serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
+
+ spin_lock(&qca->hci_ibs_lock);
+
+ /* Send wake indication to device */
+ if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
+ BT_ERR("Failed to send WAKE to device");
+
+ qca->ibs_sent_wakes++;
+
+ /* Start retransmit timer */
+ retrans_delay = msecs_to_jiffies(qca->wake_retrans);
+ mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
+
+ spin_unlock(&qca->hci_ibs_lock);
+
+ /* Actually send the packets */
+ hci_uart_tx_wakeup(hu);
+}
+
+static void qca_wq_awake_rx(struct work_struct *work)
+{
+ struct qca_data *qca = container_of(work, struct qca_data,
+ ws_awake_rx);
+ struct hci_uart *hu = qca->hu;
+
+ BT_DBG("hu %p wq awake rx", hu);
+
+ serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
+
+ spin_lock(&qca->hci_ibs_lock);
+ qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
+
+ /* Always acknowledge device wake up,
+ * sending IBS message doesn't count as TX ON.
+ */
+ if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
+ BT_ERR("Failed to acknowledge device wake up");
+
+ qca->ibs_sent_wacks++;
+
+ spin_unlock(&qca->hci_ibs_lock);
+
+ /* Actually send the packets */
+ hci_uart_tx_wakeup(hu);
+}
+
+static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
+{
+ struct qca_data *qca = container_of(work, struct qca_data,
+ ws_rx_vote_off);
+ struct hci_uart *hu = qca->hu;
+
+ BT_DBG("hu %p rx clock vote off", hu);
+
+ serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
+}
+
+static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
+{
+ struct qca_data *qca = container_of(work, struct qca_data,
+ ws_tx_vote_off);
+ struct hci_uart *hu = qca->hu;
+
+ BT_DBG("hu %p tx clock vote off", hu);
+
+ /* Run HCI tx handling unlocked */
+ hci_uart_tx_wakeup(hu);
+
+ /* Now that message queued to tty driver, vote for tty clocks off.
+ * It is up to the tty driver to pend the clocks off until tx done.
+ */
+ serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
+}
+
+static void hci_ibs_tx_idle_timeout(unsigned long arg)
+{
+ struct hci_uart *hu = (struct hci_uart *)arg;
+ struct qca_data *qca = hu->priv;
+ unsigned long flags;
+
+ BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
+
+ spin_lock_irqsave_nested(&qca->hci_ibs_lock,
+ flags, SINGLE_DEPTH_NESTING);
+
+ switch (qca->tx_ibs_state) {
+ case HCI_IBS_TX_AWAKE:
+ /* TX_IDLE, go to SLEEP */
+ if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
+ BT_ERR("Failed to send SLEEP to device");
+ break;
+ }
+ qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
+ qca->ibs_sent_slps++;
+ queue_work(qca->workqueue, &qca->ws_tx_vote_off);
+ break;
+
+ case HCI_IBS_TX_ASLEEP:
+ case HCI_IBS_TX_WAKING:
+ /* Fall through */
+
+ default:
+ BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
+ break;
+ }
+
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+}
+
+static void hci_ibs_wake_retrans_timeout(unsigned long arg)
+{
+ struct hci_uart *hu = (struct hci_uart *)arg;
+ struct qca_data *qca = hu->priv;
+ unsigned long flags, retrans_delay;
+ unsigned long retransmit = 0;
+
+ BT_DBG("hu %p wake retransmit timeout in %d state",
+ hu, qca->tx_ibs_state);
+
+ spin_lock_irqsave_nested(&qca->hci_ibs_lock,
+ flags, SINGLE_DEPTH_NESTING);
+
+ switch (qca->tx_ibs_state) {
+ case HCI_IBS_TX_WAKING:
+ /* No WAKE_ACK, retransmit WAKE */
+ retransmit = 1;
+ if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
+ BT_ERR("Failed to acknowledge device wake up");
+ break;
+ }
+ qca->ibs_sent_wakes++;
+ retrans_delay = msecs_to_jiffies(qca->wake_retrans);
+ mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
+ break;
+
+ case HCI_IBS_TX_ASLEEP:
+ case HCI_IBS_TX_AWAKE:
+ /* Fall through */
+
+ default:
+ BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
+ break;
+ }
+
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+ if (retransmit)
+ hci_uart_tx_wakeup(hu);
+}
+
+/* Initialize protocol */
+static int qca_open(struct hci_uart *hu)
+{
+ struct qca_data *qca;
+
+ BT_DBG("hu %p qca_open", hu);
+
+ qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC);
+ if (!qca)
+ return -ENOMEM;
+
+ skb_queue_head_init(&qca->txq);
+ skb_queue_head_init(&qca->tx_wait_q);
+ spin_lock_init(&qca->hci_ibs_lock);
+ qca->workqueue = create_singlethread_workqueue("qca_wq");
+ if (!qca->workqueue) {
+ BT_ERR("QCA Workqueue not initialized properly");
+ kfree(qca);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
+ INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
+ INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
+ INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
+
+ qca->hu = hu;
+
+ /* Assume we start with both sides asleep -- extra wakes OK */
+ qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
+ qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
+
+ /* clocks actually on, but we start votes off */
+ qca->tx_vote = false;
+ qca->rx_vote = false;
+ qca->flags = 0;
+
+ qca->ibs_sent_wacks = 0;
+ qca->ibs_sent_slps = 0;
+ qca->ibs_sent_wakes = 0;
+ qca->ibs_recv_wacks = 0;
+ qca->ibs_recv_slps = 0;
+ qca->ibs_recv_wakes = 0;
+ qca->vote_last_jif = jiffies;
+ qca->vote_on_ms = 0;
+ qca->vote_off_ms = 0;
+ qca->votes_on = 0;
+ qca->votes_off = 0;
+ qca->tx_votes_on = 0;
+ qca->tx_votes_off = 0;
+ qca->rx_votes_on = 0;
+ qca->rx_votes_off = 0;
+
+ hu->priv = qca;
+
+ init_timer(&qca->wake_retrans_timer);
+ qca->wake_retrans_timer.function = hci_ibs_wake_retrans_timeout;
+ qca->wake_retrans_timer.data = (u_long)hu;
+ qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
+
+ init_timer(&qca->tx_idle_timer);
+ qca->tx_idle_timer.function = hci_ibs_tx_idle_timeout;
+ qca->tx_idle_timer.data = (u_long)hu;
+ qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
+
+ BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
+ qca->tx_idle_delay, qca->wake_retrans);
+
+ return 0;
+}
+
+static void qca_debugfs_init(struct hci_dev *hdev)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+ struct dentry *ibs_dir;
+ umode_t mode;
+
+ if (!hdev->debugfs)
+ return;
+
+ ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
+
+ /* read only */
+ mode = S_IRUGO;
+ debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
+ debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
+ debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir,
+ &qca->ibs_sent_slps);
+ debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir,
+ &qca->ibs_sent_wakes);
+ debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir,
+ &qca->ibs_sent_wacks);
+ debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir,
+ &qca->ibs_recv_slps);
+ debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir,
+ &qca->ibs_recv_wakes);
+ debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir,
+ &qca->ibs_recv_wacks);
+ debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
+ debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
+ debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
+ debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
+ debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
+ debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
+ debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
+ debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
+ debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
+ debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
+
+ /* read/write */
+ mode = S_IRUGO | S_IWUSR;
+ debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
+ debugfs_create_u32("tx_idle_delay", mode, ibs_dir,
+ &qca->tx_idle_delay);
+}
+
+/* Flush protocol data */
+static int qca_flush(struct hci_uart *hu)
+{
+ struct qca_data *qca = hu->priv;
+
+ BT_DBG("hu %p qca flush", hu);
+
+ skb_queue_purge(&qca->tx_wait_q);
+ skb_queue_purge(&qca->txq);
+
+ return 0;
+}
+
+/* Close protocol */
+static int qca_close(struct hci_uart *hu)
+{
+ struct qca_data *qca = hu->priv;
+
+ BT_DBG("hu %p qca close", hu);
+
+ serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
+
+ skb_queue_purge(&qca->tx_wait_q);
+ skb_queue_purge(&qca->txq);
+ del_timer(&qca->tx_idle_timer);
+ del_timer(&qca->wake_retrans_timer);
+ destroy_workqueue(qca->workqueue);
+ qca->hu = NULL;
+
+ kfree_skb(qca->rx_skb);
+
+ hu->priv = NULL;
+
+ kfree(qca);
+
+ return 0;
+}
+
+/* Called upon a wake-up-indication from the device.
+ */
+static void device_want_to_wakeup(struct hci_uart *hu)
+{
+ unsigned long flags;
+ struct qca_data *qca = hu->priv;
+
+ BT_DBG("hu %p want to wake up", hu);
+
+ spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+ qca->ibs_recv_wakes++;
+
+ switch (qca->rx_ibs_state) {
+ case HCI_IBS_RX_ASLEEP:
+ /* Make sure clock is on - we may have turned clock off since
+ * receiving the wake up indicator awake rx clock.
+ */
+ queue_work(qca->workqueue, &qca->ws_awake_rx);
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+ return;
+
+ case HCI_IBS_RX_AWAKE:
+ /* Always acknowledge device wake up,
+ * sending IBS message doesn't count as TX ON.
+ */
+ if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) {
+ BT_ERR("Failed to acknowledge device wake up");
+ break;
+ }
+ qca->ibs_sent_wacks++;
+ break;
+
+ default:
+ /* Any other state is illegal */
+ BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
+ qca->rx_ibs_state);
+ break;
+ }
+
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+ /* Actually send the packets */
+ hci_uart_tx_wakeup(hu);
+}
+
+/* Called upon a sleep-indication from the device.
+ */
+static void device_want_to_sleep(struct hci_uart *hu)
+{
+ unsigned long flags;
+ struct qca_data *qca = hu->priv;
+
+ BT_DBG("hu %p want to sleep", hu);
+
+ spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+ qca->ibs_recv_slps++;
+
+ switch (qca->rx_ibs_state) {
+ case HCI_IBS_RX_AWAKE:
+ /* Update state */
+ qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
+ /* Vote off rx clock under workqueue */
+ queue_work(qca->workqueue, &qca->ws_rx_vote_off);
+ break;
+
+ case HCI_IBS_RX_ASLEEP:
+ /* Fall through */
+
+ default:
+ /* Any other state is illegal */
+ BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
+ qca->rx_ibs_state);
+ break;
+ }
+
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+}
+
+/* Called upon wake-up-acknowledgement from the device
+ */
+static void device_woke_up(struct hci_uart *hu)
+{
+ unsigned long flags, idle_delay;
+ struct qca_data *qca = hu->priv;
+ struct sk_buff *skb = NULL;
+
+ BT_DBG("hu %p woke up", hu);
+
+ spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+ qca->ibs_recv_wacks++;
+
+ switch (qca->tx_ibs_state) {
+ case HCI_IBS_TX_AWAKE:
+ /* Expect one if we send 2 WAKEs */
+ BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
+ qca->tx_ibs_state);
+ break;
+
+ case HCI_IBS_TX_WAKING:
+ /* Send pending packets */
+ while ((skb = skb_dequeue(&qca->tx_wait_q)))
+ skb_queue_tail(&qca->txq, skb);
+
+ /* Switch timers and change state to HCI_IBS_TX_AWAKE */
+ del_timer(&qca->wake_retrans_timer);
+ idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
+ mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
+ qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
+ break;
+
+ case HCI_IBS_TX_ASLEEP:
+ /* Fall through */
+
+ default:
+ BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
+ qca->tx_ibs_state);
+ break;
+ }
+
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+ /* Actually send the packets */
+ hci_uart_tx_wakeup(hu);
+}
+
+/* Enqueue frame for transmittion (padding, crc, etc) may be called from
+ * two simultaneous tasklets.
+ */
+static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+ unsigned long flags = 0, idle_delay;
+ struct qca_data *qca = hu->priv;
+
+ BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
+ qca->tx_ibs_state);
+
+ /* Prepend skb with frame type */
+ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+
+ /* Don't go to sleep in middle of patch download or
+ * Out-Of-Band(GPIOs control) sleep is selected.
+ */
+ if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) {
+ skb_queue_tail(&qca->txq, skb);
+ return 0;
+ }
+
+ spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+ /* Act according to current state */
+ switch (qca->tx_ibs_state) {
+ case HCI_IBS_TX_AWAKE:
+ BT_DBG("Device awake, sending normally");
+ skb_queue_tail(&qca->txq, skb);
+ idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
+ mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
+ break;
+
+ case HCI_IBS_TX_ASLEEP:
+ BT_DBG("Device asleep, waking up and queueing packet");
+ /* Save packet for later */
+ skb_queue_tail(&qca->tx_wait_q, skb);
+
+ qca->tx_ibs_state = HCI_IBS_TX_WAKING;
+ /* Schedule a work queue to wake up device */
+ queue_work(qca->workqueue, &qca->ws_awake_device);
+ break;
+
+ case HCI_IBS_TX_WAKING:
+ BT_DBG("Device waking up, queueing packet");
+ /* Transient state; just keep packet for later */
+ skb_queue_tail(&qca->tx_wait_q, skb);
+ break;
+
+ default:
+ BT_ERR("Illegal tx state: %d (losing packet)",
+ qca->tx_ibs_state);
+ kfree_skb(skb);
+ break;
+ }
+
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+ return 0;
+}
+
+static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+
+ BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND);
+
+ device_want_to_sleep(hu);
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+
+ BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND);
+
+ device_want_to_wakeup(hu);
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+
+ BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK);
+
+ device_woke_up(hu);
+
+ kfree_skb(skb);
+ return 0;
+}
+
+#define QCA_IBS_SLEEP_IND_EVENT \
+ .type = HCI_IBS_SLEEP_IND, \
+ .hlen = 0, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = HCI_MAX_IBS_SIZE
+
+#define QCA_IBS_WAKE_IND_EVENT \
+ .type = HCI_IBS_WAKE_IND, \
+ .hlen = 0, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = HCI_MAX_IBS_SIZE
+
+#define QCA_IBS_WAKE_ACK_EVENT \
+ .type = HCI_IBS_WAKE_ACK, \
+ .hlen = 0, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = HCI_MAX_IBS_SIZE
+
+static const struct h4_recv_pkt qca_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = hci_recv_frame },
+ { QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind },
+ { QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack },
+ { QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
+};
+
+static int qca_recv(struct hci_uart *hu, const void *data, int count)
+{
+ struct qca_data *qca = hu->priv;
+
+ if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+ return -EUNATCH;
+
+ qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
+ qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
+ if (IS_ERR(qca->rx_skb)) {
+ int err = PTR_ERR(qca->rx_skb);
+ BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+ qca->rx_skb = NULL;
+ return err;
+ }
+
+ return count;
+}
+
+static struct sk_buff *qca_dequeue(struct hci_uart *hu)
+{
+ struct qca_data *qca = hu->priv;
+
+ return skb_dequeue(&qca->txq);
+}
+
+static uint8_t qca_get_baudrate_value(int speed)
+{
+ switch(speed) {
+ case 9600:
+ return QCA_BAUDRATE_9600;
+ case 19200:
+ return QCA_BAUDRATE_19200;
+ case 38400:
+ return QCA_BAUDRATE_38400;
+ case 57600:
+ return QCA_BAUDRATE_57600;
+ case 115200:
+ return QCA_BAUDRATE_115200;
+ case 230400:
+ return QCA_BAUDRATE_230400;
+ case 460800:
+ return QCA_BAUDRATE_460800;
+ case 500000:
+ return QCA_BAUDRATE_500000;
+ case 921600:
+ return QCA_BAUDRATE_921600;
+ case 1000000:
+ return QCA_BAUDRATE_1000000;
+ case 2000000:
+ return QCA_BAUDRATE_2000000;
+ case 3000000:
+ return QCA_BAUDRATE_3000000;
+ case 3500000:
+ return QCA_BAUDRATE_3500000;
+ default:
+ return QCA_BAUDRATE_115200;
+ }
+}
+
+static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+ struct sk_buff *skb;
+ u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
+
+ if (baudrate > QCA_BAUDRATE_3000000)
+ return -EINVAL;
+
+ cmd[4] = baudrate;
+
+ skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC);
+ if (!skb) {
+ BT_ERR("Failed to allocate memory for baudrate packet");
+ return -ENOMEM;
+ }
+
+ /* Assign commands to change baudrate and packet type. */
+ memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
+ bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
+
+ skb_queue_tail(&qca->txq, skb);
+ hci_uart_tx_wakeup(hu);
+
+ /* wait 300ms to change new baudrate on controller side
+ * controller will come back after they receive this HCI command
+ * then host can communicate with new baudrate to controller
+ */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ return 0;
+}
+
+static int qca_setup(struct hci_uart *hu)
+{
+ struct hci_dev *hdev = hu->hdev;
+ struct qca_data *qca = hu->priv;
+ unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
+ int ret;
+
+ BT_INFO("%s: ROME setup", hdev->name);
+
+ /* Patch downloading has to be done without IBS mode */
+ clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+
+ /* Setup initial baudrate */
+ speed = 0;
+ if (hu->init_speed)
+ speed = hu->init_speed;
+ else if (hu->proto->init_speed)
+ speed = hu->proto->init_speed;
+
+ if (speed)
+ hci_uart_set_baudrate(hu, speed);
+
+ /* Setup user speed if needed */
+ speed = 0;
+ if (hu->oper_speed)
+ speed = hu->oper_speed;
+ else if (hu->proto->oper_speed)
+ speed = hu->proto->oper_speed;
+
+ if (speed) {
+ qca_baudrate = qca_get_baudrate_value(speed);
+
+ BT_INFO("%s: Set UART speed to %d", hdev->name, speed);
+ ret = qca_set_baudrate(hdev, qca_baudrate);
+ if (ret) {
+ BT_ERR("%s: Failed to change the baud rate (%d)",
+ hdev->name, ret);
+ return ret;
+ }
+ hci_uart_set_baudrate(hu, speed);
+ }
+
+ /* Setup patch / NVM configurations */
+ ret = qca_uart_setup_rome(hdev, qca_baudrate);
+ if (!ret) {
+ set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+ qca_debugfs_init(hdev);
+ }
+
+ /* Setup bdaddr */
+ hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
+
+ return ret;
+}
+
+static struct hci_uart_proto qca_proto = {
+ .id = HCI_UART_QCA,
+ .name = "QCA",
+ .init_speed = 115200,
+ .oper_speed = 3000000,
+ .open = qca_open,
+ .close = qca_close,
+ .flush = qca_flush,
+ .setup = qca_setup,
+ .recv = qca_recv,
+ .enqueue = qca_enqueue,
+ .dequeue = qca_dequeue,
+};
+
+int __init qca_init(void)
+{
+ return hci_uart_register_proto(&qca_proto);
+}
+
+int __exit qca_deinit(void)
+{
+ return hci_uart_unregister_proto(&qca_proto);
+}
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index ce9c670956f5..495b9ef52bb0 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -35,7 +35,7 @@
#define HCIUARTGETFLAGS _IOR('U', 204, int)
/* UART protocols */
-#define HCI_UART_MAX_PROTO 8
+#define HCI_UART_MAX_PROTO 9
#define HCI_UART_H4 0
#define HCI_UART_BCSP 1
@@ -45,6 +45,7 @@
#define HCI_UART_ATH3K 5
#define HCI_UART_INTEL 6
#define HCI_UART_BCM 7
+#define HCI_UART_QCA 8
#define HCI_UART_RAW_DEVICE 0
#define HCI_UART_RESET_ON_INIT 1
@@ -167,7 +168,17 @@ int h5_init(void);
int h5_deinit(void);
#endif
+#ifdef CONFIG_BT_HCIUART_INTEL
+int intel_init(void);
+int intel_deinit(void);
+#endif
+
#ifdef CONFIG_BT_HCIUART_BCM
int bcm_init(void);
int bcm_deinit(void);
#endif
+
+#ifdef CONFIG_BT_HCIUART_QCA
+int qca_init(void);
+int qca_deinit(void);
+#endif
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
index ab3bde16ecb4..1c543effe062 100644
--- a/drivers/bus/mips_cdmm.c
+++ b/drivers/bus/mips_cdmm.c
@@ -332,6 +332,18 @@ static phys_addr_t mips_cdmm_cur_base(void)
}
/**
+ * mips_cdmm_phys_base() - Choose a physical base address for CDMM region.
+ *
+ * Picking a suitable physical address at which to map the CDMM region is
+ * platform specific, so this weak function can be overridden by platform
+ * code to pick a suitable value if none is configured by the bootloader.
+ */
+phys_addr_t __weak mips_cdmm_phys_base(void)
+{
+ return 0;
+}
+
+/**
* mips_cdmm_setup() - Ensure the CDMM bus is initialised and usable.
* @bus: Pointer to bus information for current CPU.
* IS_ERR(bus) is checked, so no need for caller to check.
@@ -368,7 +380,7 @@ static int mips_cdmm_setup(struct mips_cdmm_bus *bus)
if (!bus->phys)
bus->phys = mips_cdmm_cur_base();
/* Otherwise, ask platform code for suggestions */
- if (!bus->phys && mips_cdmm_phys_base)
+ if (!bus->phys)
bus->phys = mips_cdmm_phys_base();
/* Otherwise, copy what other CPUs have done */
if (!bus->phys)
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
index a64763b6b5fd..6575c0fe6a4e 100644
--- a/drivers/bus/vexpress-config.c
+++ b/drivers/bus/vexpress-config.c
@@ -107,7 +107,7 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev)
if (!res)
return ERR_PTR(-ENOMEM);
- regmap = bridge->ops->regmap_init(dev, bridge->context);
+ regmap = (bridge->ops->regmap_init)(dev, bridge->context);
if (IS_ERR(regmap)) {
devres_free(res);
return regmap;
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index c6dea3f6917b..1341a94cc779 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1408,8 +1408,8 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
}
EXPORT_SYMBOL(intel_gmch_probe);
-void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
- phys_addr_t *mappable_base, unsigned long *mappable_end)
+void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
+ phys_addr_t *mappable_base, u64 *mappable_end)
{
*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
*stolen_size = intel_private.stolen_size;
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index da8faf78536a..5643b65cee20 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused)
static void start_khwrngd(void)
{
hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
- if (hwrng_fill == ERR_PTR(-ENOMEM)) {
+ if (IS_ERR(hwrng_fill)) {
pr_err("hwrng_fill thread creation failed");
hwrng_fill = NULL;
}
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index 61e71616689b..feafdab734ae 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -694,7 +694,7 @@ static int bt_size(void)
return sizeof(struct si_sm_data);
}
-struct si_sm_handlers bt_smi_handlers = {
+const struct si_sm_handlers bt_smi_handlers = {
.init_data = bt_init_data,
.start_transaction = bt_start_transaction,
.get_result = bt_get_result,
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index 8c25f596808a..1da61af7f576 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -540,7 +540,7 @@ static void kcs_cleanup(struct si_sm_data *kcs)
{
}
-struct si_sm_handlers kcs_smi_handlers = {
+const struct si_sm_handlers kcs_smi_handlers = {
.init_data = init_kcs_data,
.start_transaction = start_kcs_transaction,
.get_result = get_kcs_result,
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index bf75f6361773..e3536da05c88 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -342,7 +342,7 @@ struct ipmi_smi {
* an umpreemptible region to use this. You must fetch the
* value into a local variable and make sure it is not NULL.
*/
- struct ipmi_smi_handlers *handlers;
+ const struct ipmi_smi_handlers *handlers;
void *send_info;
#ifdef CONFIG_PROC_FS
@@ -744,7 +744,13 @@ static void deliver_response(struct ipmi_recv_msg *msg)
ipmi_inc_stat(intf, unhandled_local_responses);
}
ipmi_free_recv_msg(msg);
- } else {
+ } else if (!oops_in_progress) {
+ /*
+ * If we are running in the panic context, calling the
+ * receive handler doesn't much meaning and has a deadlock
+ * risk. At this moment, simply skip it in that case.
+ */
+
ipmi_user_t user = msg->user;
user->handler->ipmi_recv_hndl(msg, user->handler_data);
}
@@ -1015,7 +1021,7 @@ int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
{
int rv = 0;
ipmi_smi_t intf;
- struct ipmi_smi_handlers *handlers;
+ const struct ipmi_smi_handlers *handlers;
mutex_lock(&ipmi_interfaces_mutex);
list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
@@ -1501,7 +1507,7 @@ static struct ipmi_smi_msg *smi_add_send_msg(ipmi_smi_t intf,
}
-static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
+static void smi_send(ipmi_smi_t intf, const struct ipmi_smi_handlers *handlers,
struct ipmi_smi_msg *smi_msg, int priority)
{
int run_to_completion = intf->run_to_completion;
@@ -2747,7 +2753,7 @@ void ipmi_poll_interface(ipmi_user_t user)
}
EXPORT_SYMBOL(ipmi_poll_interface);
-int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
+int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
void *send_info,
struct ipmi_device_id *device_id,
struct device *si_dev,
@@ -3959,6 +3965,10 @@ free_msg:
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+ /*
+ * We can get an asynchronous event or receive message in addition
+ * to commands we send.
+ */
if (msg == intf->curr_msg)
intf->curr_msg = NULL;
if (!run_to_completion)
@@ -4015,7 +4025,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
unsigned int *waiting_msgs)
{
struct ipmi_recv_msg *msg;
- struct ipmi_smi_handlers *handlers;
+ const struct ipmi_smi_handlers *handlers;
if (intf->in_shutdown)
return;
@@ -4082,7 +4092,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
ipmi_inc_stat(intf,
retransmitted_ipmb_commands);
- smi_send(intf, intf->handlers, smi_msg, 0);
+ smi_send(intf, handlers, smi_msg, 0);
} else
ipmi_free_smi_msg(smi_msg);
@@ -4291,6 +4301,9 @@ static void ipmi_panic_request_and_wait(ipmi_smi_t intf,
0, 1); /* Don't retry, and don't wait. */
if (rv)
atomic_sub(2, &panic_done_count);
+ else if (intf->handlers->flush_messages)
+ intf->handlers->flush_messages(intf->send_info);
+
while (atomic_read(&panic_done_count) != 0)
ipmi_poll(intf);
}
@@ -4364,9 +4377,7 @@ static void send_panic_events(char *str)
/* Interface is not ready. */
continue;
- intf->run_to_completion = 1;
/* Send the event announcing the panic. */
- intf->handlers->set_run_to_completion(intf->send_info, 1);
ipmi_panic_request_and_wait(intf, &addr, &msg);
}
@@ -4506,6 +4517,23 @@ static int panic_event(struct notifier_block *this,
/* Interface is not ready. */
continue;
+ /*
+ * If we were interrupted while locking xmit_msgs_lock or
+ * waiting_rcv_msgs_lock, the corresponding list may be
+ * corrupted. In this case, drop items on the list for
+ * the safety.
+ */
+ if (!spin_trylock(&intf->xmit_msgs_lock)) {
+ INIT_LIST_HEAD(&intf->xmit_msgs);
+ INIT_LIST_HEAD(&intf->hp_xmit_msgs);
+ } else
+ spin_unlock(&intf->xmit_msgs_lock);
+
+ if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
+ INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
+ else
+ spin_unlock(&intf->waiting_rcv_msgs_lock);
+
intf->run_to_completion = 1;
intf->handlers->set_run_to_completion(intf->send_info, 1);
}
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
index 9b409c0f14f7..6e658aa114f1 100644
--- a/drivers/char/ipmi/ipmi_powernv.c
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -143,8 +143,15 @@ static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi)
pr_devel("%s: -> %d (size %lld)\n", __func__,
rc, rc == 0 ? size : 0);
if (rc) {
+ /* If came via the poll, and response was not yet ready */
+ if (rc == OPAL_EMPTY) {
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ return 0;
+ }
+
+ smi->cur_msg = NULL;
spin_unlock_irqrestore(&smi->msg_lock, flags);
- ipmi_free_smi_msg(msg);
+ send_error_reply(smi, msg, IPMI_ERR_UNSPECIFIED);
return 0;
}
@@ -300,7 +307,6 @@ static const struct of_device_id ipmi_powernv_match[] = {
static struct platform_driver powernv_ipmi_driver = {
.driver = {
.name = "ipmi-powernv",
- .owner = THIS_MODULE,
.of_match_table = ipmi_powernv_match,
},
.probe = ipmi_powernv_probe,
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 8a45e92ff60c..654f6f36a071 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -64,7 +64,6 @@
#include <linux/dmi.h>
#include <linux/string.h>
#include <linux/ctype.h>
-#include <linux/pnp.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
@@ -164,7 +163,7 @@ struct smi_info {
int intf_num;
ipmi_smi_t intf;
struct si_sm_data *si_sm;
- struct si_sm_handlers *handlers;
+ const struct si_sm_handlers *handlers;
enum si_type si_type;
spinlock_t si_lock;
struct ipmi_smi_msg *waiting_msg;
@@ -263,9 +262,21 @@ struct smi_info {
bool supports_event_msg_buff;
/*
- * Can we clear the global enables receive irq bit?
+ * Can we disable interrupts the global enables receive irq
+ * bit? There are currently two forms of brokenness, some
+ * systems cannot disable the bit (which is technically within
+ * the spec but a bad idea) and some systems have the bit
+ * forced to zero even though interrupts work (which is
+ * clearly outside the spec). The next bool tells which form
+ * of brokenness is present.
*/
- bool cannot_clear_recv_irq_bit;
+ bool cannot_disable_irq;
+
+ /*
+ * Some systems are broken and cannot set the irq enable
+ * bit, even if they support interrupts.
+ */
+ bool irq_enable_broken;
/*
* Did we get an attention that we did not handle?
@@ -309,9 +320,6 @@ static int num_force_kipmid;
#ifdef CONFIG_PCI
static bool pci_registered;
#endif
-#ifdef CONFIG_ACPI
-static bool pnp_registered;
-#endif
#ifdef CONFIG_PARISC
static bool parisc_registered;
#endif
@@ -558,13 +566,14 @@ static u8 current_global_enables(struct smi_info *smi_info, u8 base,
if (smi_info->supports_event_msg_buff)
enables |= IPMI_BMC_EVT_MSG_BUFF;
- if ((smi_info->irq && !smi_info->interrupt_disabled) ||
- smi_info->cannot_clear_recv_irq_bit)
+ if (((smi_info->irq && !smi_info->interrupt_disabled) ||
+ smi_info->cannot_disable_irq) &&
+ !smi_info->irq_enable_broken)
enables |= IPMI_BMC_RCV_MSG_INTR;
if (smi_info->supports_event_msg_buff &&
- smi_info->irq && !smi_info->interrupt_disabled)
-
+ smi_info->irq && !smi_info->interrupt_disabled &&
+ !smi_info->irq_enable_broken)
enables |= IPMI_BMC_EVT_MSG_INTR;
*irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);
@@ -928,33 +937,36 @@ static void check_start_timer_thread(struct smi_info *smi_info)
}
}
+static void flush_messages(void *send_info)
+{
+ struct smi_info *smi_info = send_info;
+ enum si_sm_result result;
+
+ /*
+ * Currently, this function is called only in run-to-completion
+ * mode. This means we are single-threaded, no need for locks.
+ */
+ result = smi_event_handler(smi_info, 0);
+ while (result != SI_SM_IDLE) {
+ udelay(SI_SHORT_TIMEOUT_USEC);
+ result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
+ }
+}
+
static void sender(void *send_info,
struct ipmi_smi_msg *msg)
{
struct smi_info *smi_info = send_info;
- enum si_sm_result result;
unsigned long flags;
debug_timestamp("Enqueue");
if (smi_info->run_to_completion) {
/*
- * If we are running to completion, start it and run
- * transactions until everything is clear.
+ * If we are running to completion, start it. Upper
+ * layer will call flush_messages to clear it out.
*/
smi_info->waiting_msg = msg;
-
- /*
- * Run to completion means we are single-threaded, no
- * need for locks.
- */
-
- result = smi_event_handler(smi_info, 0);
- while (result != SI_SM_IDLE) {
- udelay(SI_SHORT_TIMEOUT_USEC);
- result = smi_event_handler(smi_info,
- SI_SHORT_TIMEOUT_USEC);
- }
return;
}
@@ -975,17 +987,10 @@ static void sender(void *send_info,
static void set_run_to_completion(void *send_info, bool i_run_to_completion)
{
struct smi_info *smi_info = send_info;
- enum si_sm_result result;
smi_info->run_to_completion = i_run_to_completion;
- if (i_run_to_completion) {
- result = smi_event_handler(smi_info, 0);
- while (result != SI_SM_IDLE) {
- udelay(SI_SHORT_TIMEOUT_USEC);
- result = smi_event_handler(smi_info,
- SI_SHORT_TIMEOUT_USEC);
- }
- }
+ if (i_run_to_completion)
+ flush_messages(smi_info);
}
/*
@@ -1258,7 +1263,7 @@ static void set_maintenance_mode(void *send_info, bool enable)
atomic_set(&smi_info->req_events, 0);
}
-static struct ipmi_smi_handlers handlers = {
+static const struct ipmi_smi_handlers handlers = {
.owner = THIS_MODULE,
.start_processing = smi_start_processing,
.get_smi_info = get_smi_info,
@@ -1267,6 +1272,7 @@ static struct ipmi_smi_handlers handlers = {
.set_need_watch = set_need_watch,
.set_maintenance_mode = set_maintenance_mode,
.set_run_to_completion = set_run_to_completion,
+ .flush_messages = flush_messages,
.poll = poll,
};
@@ -1283,14 +1289,14 @@ static int smi_num; /* Used to sequence the SMIs */
#define DEFAULT_REGSIZE 1
#ifdef CONFIG_ACPI
-static bool si_tryacpi = 1;
+static bool si_tryacpi = true;
#endif
#ifdef CONFIG_DMI
-static bool si_trydmi = 1;
+static bool si_trydmi = true;
#endif
-static bool si_tryplatform = 1;
+static bool si_tryplatform = true;
#ifdef CONFIG_PCI
-static bool si_trypci = 1;
+static bool si_trypci = true;
#endif
static bool si_trydefaults = IS_ENABLED(CONFIG_IPMI_SI_PROBE_DEFAULTS);
static char *si_type[SI_MAX_PARMS];
@@ -1446,14 +1452,14 @@ static int std_irq_setup(struct smi_info *info)
return rv;
}
-static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
+static unsigned char port_inb(const struct si_sm_io *io, unsigned int offset)
{
unsigned int addr = io->addr_data;
return inb(addr + (offset * io->regspacing));
}
-static void port_outb(struct si_sm_io *io, unsigned int offset,
+static void port_outb(const struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
unsigned int addr = io->addr_data;
@@ -1461,14 +1467,14 @@ static void port_outb(struct si_sm_io *io, unsigned int offset,
outb(b, addr + (offset * io->regspacing));
}
-static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
+static unsigned char port_inw(const struct si_sm_io *io, unsigned int offset)
{
unsigned int addr = io->addr_data;
return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
}
-static void port_outw(struct si_sm_io *io, unsigned int offset,
+static void port_outw(const struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
unsigned int addr = io->addr_data;
@@ -1476,14 +1482,14 @@ static void port_outw(struct si_sm_io *io, unsigned int offset,
outw(b << io->regshift, addr + (offset * io->regspacing));
}
-static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
+static unsigned char port_inl(const struct si_sm_io *io, unsigned int offset)
{
unsigned int addr = io->addr_data;
return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
}
-static void port_outl(struct si_sm_io *io, unsigned int offset,
+static void port_outl(const struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
unsigned int addr = io->addr_data;
@@ -1556,49 +1562,52 @@ static int port_setup(struct smi_info *info)
return 0;
}
-static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
+static unsigned char intf_mem_inb(const struct si_sm_io *io,
+ unsigned int offset)
{
return readb((io->addr)+(offset * io->regspacing));
}
-static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
- unsigned char b)
+static void intf_mem_outb(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
{
writeb(b, (io->addr)+(offset * io->regspacing));
}
-static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
+static unsigned char intf_mem_inw(const struct si_sm_io *io,
+ unsigned int offset)
{
return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
& 0xff;
}
-static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
- unsigned char b)
+static void intf_mem_outw(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
{
writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
}
-static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
+static unsigned char intf_mem_inl(const struct si_sm_io *io,
+ unsigned int offset)
{
return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
& 0xff;
}
-static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
- unsigned char b)
+static void intf_mem_outl(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
{
writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
}
#ifdef readq
-static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
+static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset)
{
return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
& 0xff;
}
-static void mem_outq(struct si_sm_io *io, unsigned int offset,
+static void mem_outq(const struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
@@ -2233,134 +2242,6 @@ static void spmi_find_bmc(void)
try_init_spmi(spmi);
}
}
-
-static int ipmi_pnp_probe(struct pnp_dev *dev,
- const struct pnp_device_id *dev_id)
-{
- struct acpi_device *acpi_dev;
- struct smi_info *info;
- struct resource *res, *res_second;
- acpi_handle handle;
- acpi_status status;
- unsigned long long tmp;
- int rv = -EINVAL;
-
- acpi_dev = pnp_acpi_device(dev);
- if (!acpi_dev)
- return -ENODEV;
-
- info = smi_info_alloc();
- if (!info)
- return -ENOMEM;
-
- info->addr_source = SI_ACPI;
- printk(KERN_INFO PFX "probing via ACPI\n");
-
- handle = acpi_dev->handle;
- info->addr_info.acpi_info.acpi_handle = handle;
-
- /* _IFT tells us the interface type: KCS, BT, etc */
- status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
- if (ACPI_FAILURE(status)) {
- dev_err(&dev->dev, "Could not find ACPI IPMI interface type\n");
- goto err_free;
- }
-
- switch (tmp) {
- case 1:
- info->si_type = SI_KCS;
- break;
- case 2:
- info->si_type = SI_SMIC;
- break;
- case 3:
- info->si_type = SI_BT;
- break;
- case 4: /* SSIF, just ignore */
- rv = -ENODEV;
- goto err_free;
- default:
- dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
- goto err_free;
- }
-
- res = pnp_get_resource(dev, IORESOURCE_IO, 0);
- if (res) {
- info->io_setup = port_setup;
- info->io.addr_type = IPMI_IO_ADDR_SPACE;
- } else {
- res = pnp_get_resource(dev, IORESOURCE_MEM, 0);
- if (res) {
- info->io_setup = mem_setup;
- info->io.addr_type = IPMI_MEM_ADDR_SPACE;
- }
- }
- if (!res) {
- dev_err(&dev->dev, "no I/O or memory address\n");
- goto err_free;
- }
- info->io.addr_data = res->start;
-
- info->io.regspacing = DEFAULT_REGSPACING;
- res_second = pnp_get_resource(dev,
- (info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
- IORESOURCE_IO : IORESOURCE_MEM,
- 1);
- if (res_second) {
- if (res_second->start > info->io.addr_data)
- info->io.regspacing = res_second->start - info->io.addr_data;
- }
- info->io.regsize = DEFAULT_REGSPACING;
- info->io.regshift = 0;
-
- /* If _GPE exists, use it; otherwise use standard interrupts */
- status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
- if (ACPI_SUCCESS(status)) {
- info->irq = tmp;
- info->irq_setup = acpi_gpe_irq_setup;
- } else if (pnp_irq_valid(dev, 0)) {
- info->irq = pnp_irq(dev, 0);
- info->irq_setup = std_irq_setup;
- }
-
- info->dev = &dev->dev;
- pnp_set_drvdata(dev, info);
-
- dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n",
- res, info->io.regsize, info->io.regspacing,
- info->irq);
-
- rv = add_smi(info);
- if (rv)
- kfree(info);
-
- return rv;
-
-err_free:
- kfree(info);
- return rv;
-}
-
-static void ipmi_pnp_remove(struct pnp_dev *dev)
-{
- struct smi_info *info = pnp_get_drvdata(dev);
-
- cleanup_one_si(info);
-}
-
-static const struct pnp_device_id pnp_dev_table[] = {
- {"IPI0001", 0},
- {"", 0},
-};
-
-static struct pnp_driver ipmi_pnp_driver = {
- .name = DEVICE_NAME,
- .probe = ipmi_pnp_probe,
- .remove = ipmi_pnp_remove,
- .id_table = pnp_dev_table,
-};
-
-MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
#endif
#ifdef CONFIG_DMI
@@ -2654,7 +2535,7 @@ static void ipmi_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id ipmi_pci_devices[] = {
+static const struct pci_device_id ipmi_pci_devices[] = {
{ PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
{ PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
{ 0, }
@@ -2669,10 +2550,19 @@ static struct pci_driver ipmi_pci_driver = {
};
#endif /* CONFIG_PCI */
-static const struct of_device_id ipmi_match[];
-static int ipmi_probe(struct platform_device *dev)
-{
#ifdef CONFIG_OF
+static const struct of_device_id of_ipmi_match[] = {
+ { .type = "ipmi", .compatible = "ipmi-kcs",
+ .data = (void *)(unsigned long) SI_KCS },
+ { .type = "ipmi", .compatible = "ipmi-smic",
+ .data = (void *)(unsigned long) SI_SMIC },
+ { .type = "ipmi", .compatible = "ipmi-bt",
+ .data = (void *)(unsigned long) SI_BT },
+ {},
+};
+
+static int of_ipmi_probe(struct platform_device *dev)
+{
const struct of_device_id *match;
struct smi_info *info;
struct resource resource;
@@ -2683,9 +2573,9 @@ static int ipmi_probe(struct platform_device *dev)
dev_info(&dev->dev, "probing via device tree\n");
- match = of_match_device(ipmi_match, &dev->dev);
+ match = of_match_device(of_ipmi_match, &dev->dev);
if (!match)
- return -EINVAL;
+ return -ENODEV;
if (!of_device_is_available(np))
return -EINVAL;
@@ -2754,33 +2644,160 @@ static int ipmi_probe(struct platform_device *dev)
kfree(info);
return ret;
}
-#endif
return 0;
}
+MODULE_DEVICE_TABLE(of, of_ipmi_match);
+#else
+#define of_ipmi_match NULL
+static int of_ipmi_probe(struct platform_device *dev)
+{
+ return -ENODEV;
+}
+#endif
-static int ipmi_remove(struct platform_device *dev)
+#ifdef CONFIG_ACPI
+static int acpi_ipmi_probe(struct platform_device *dev)
{
-#ifdef CONFIG_OF
- cleanup_one_si(dev_get_drvdata(&dev->dev));
+ struct smi_info *info;
+ struct resource *res, *res_second;
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long tmp;
+ int rv = -EINVAL;
+
+ handle = ACPI_HANDLE(&dev->dev);
+ if (!handle)
+ return -ENODEV;
+
+ info = smi_info_alloc();
+ if (!info)
+ return -ENOMEM;
+
+ info->addr_source = SI_ACPI;
+ dev_info(&dev->dev, PFX "probing via ACPI\n");
+
+ info->addr_info.acpi_info.acpi_handle = handle;
+
+ /* _IFT tells us the interface type: KCS, BT, etc */
+ status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&dev->dev, "Could not find ACPI IPMI interface type\n");
+ goto err_free;
+ }
+
+ switch (tmp) {
+ case 1:
+ info->si_type = SI_KCS;
+ break;
+ case 2:
+ info->si_type = SI_SMIC;
+ break;
+ case 3:
+ info->si_type = SI_BT;
+ break;
+ case 4: /* SSIF, just ignore */
+ rv = -ENODEV;
+ goto err_free;
+ default:
+ dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
+ goto err_free;
+ }
+
+ res = platform_get_resource(dev, IORESOURCE_IO, 0);
+ if (res) {
+ info->io_setup = port_setup;
+ info->io.addr_type = IPMI_IO_ADDR_SPACE;
+ } else {
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (res) {
+ info->io_setup = mem_setup;
+ info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+ }
+ }
+ if (!res) {
+ dev_err(&dev->dev, "no I/O or memory address\n");
+ goto err_free;
+ }
+ info->io.addr_data = res->start;
+
+ info->io.regspacing = DEFAULT_REGSPACING;
+ res_second = platform_get_resource(dev,
+ (info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
+ IORESOURCE_IO : IORESOURCE_MEM,
+ 1);
+ if (res_second) {
+ if (res_second->start > info->io.addr_data)
+ info->io.regspacing =
+ res_second->start - info->io.addr_data;
+ }
+ info->io.regsize = DEFAULT_REGSPACING;
+ info->io.regshift = 0;
+
+ /* If _GPE exists, use it; otherwise use standard interrupts */
+ status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
+ if (ACPI_SUCCESS(status)) {
+ info->irq = tmp;
+ info->irq_setup = acpi_gpe_irq_setup;
+ } else {
+ int irq = platform_get_irq(dev, 0);
+
+ if (irq > 0) {
+ info->irq = irq;
+ info->irq_setup = std_irq_setup;
+ }
+ }
+
+ info->dev = &dev->dev;
+ platform_set_drvdata(dev, info);
+
+ dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n",
+ res, info->io.regsize, info->io.regspacing,
+ info->irq);
+
+ rv = add_smi(info);
+ if (rv)
+ kfree(info);
+
+ return rv;
+
+err_free:
+ kfree(info);
+ return rv;
+}
+
+static const struct acpi_device_id acpi_ipmi_match[] = {
+ { "IPI0001", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, acpi_ipmi_match);
+#else
+static int acpi_ipmi_probe(struct platform_device *dev)
+{
+ return -ENODEV;
+}
#endif
- return 0;
+
+static int ipmi_probe(struct platform_device *dev)
+{
+ if (of_ipmi_probe(dev) == 0)
+ return 0;
+
+ return acpi_ipmi_probe(dev);
}
-static const struct of_device_id ipmi_match[] =
+static int ipmi_remove(struct platform_device *dev)
{
- { .type = "ipmi", .compatible = "ipmi-kcs",
- .data = (void *)(unsigned long) SI_KCS },
- { .type = "ipmi", .compatible = "ipmi-smic",
- .data = (void *)(unsigned long) SI_SMIC },
- { .type = "ipmi", .compatible = "ipmi-bt",
- .data = (void *)(unsigned long) SI_BT },
- {},
-};
+ struct smi_info *info = dev_get_drvdata(&dev->dev);
+
+ cleanup_one_si(info);
+ return 0;
+}
static struct platform_driver ipmi_driver = {
.driver = {
.name = DEVICE_NAME,
- .of_match_table = ipmi_match,
+ .of_match_table = of_ipmi_match,
+ .acpi_match_table = ACPI_PTR(acpi_ipmi_match),
},
.probe = ipmi_probe,
.remove = ipmi_remove,
@@ -2905,12 +2922,7 @@ static int try_get_dev_id(struct smi_info *smi_info)
return rv;
}
-/*
- * Some BMCs do not support clearing the receive irq bit in the global
- * enables (even if they don't support interrupts on the BMC). Check
- * for this and handle it properly.
- */
-static void check_clr_rcv_irq(struct smi_info *smi_info)
+static int get_global_enables(struct smi_info *smi_info, u8 *enables)
{
unsigned char msg[3];
unsigned char *resp;
@@ -2918,12 +2930,8 @@ static void check_clr_rcv_irq(struct smi_info *smi_info)
int rv;
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
- if (!resp) {
- printk(KERN_WARNING PFX "Out of memory allocating response for"
- " global enables command, cannot check recv irq bit"
- " handling.\n");
- return;
- }
+ if (!resp)
+ return -ENOMEM;
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
@@ -2931,9 +2939,9 @@ static void check_clr_rcv_irq(struct smi_info *smi_info)
rv = wait_for_msg_done(smi_info);
if (rv) {
- printk(KERN_WARNING PFX "Error getting response from get"
- " global enables command, cannot check recv irq bit"
- " handling.\n");
+ dev_warn(smi_info->dev,
+ "Error getting response from get global enables command: %d\n",
+ rv);
goto out;
}
@@ -2944,27 +2952,44 @@ static void check_clr_rcv_irq(struct smi_info *smi_info)
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
resp[2] != 0) {
- printk(KERN_WARNING PFX "Invalid return from get global"
- " enables command, cannot check recv irq bit"
- " handling.\n");
+ dev_warn(smi_info->dev,
+ "Invalid return from get global enables command: %ld %x %x %x\n",
+ resp_len, resp[0], resp[1], resp[2]);
rv = -EINVAL;
goto out;
+ } else {
+ *enables = resp[3];
}
- if ((resp[3] & IPMI_BMC_RCV_MSG_INTR) == 0)
- /* Already clear, should work ok. */
- goto out;
+out:
+ kfree(resp);
+ return rv;
+}
+
+/*
+ * Returns 1 if it gets an error from the command.
+ */
+static int set_global_enables(struct smi_info *smi_info, u8 enables)
+{
+ unsigned char msg[3];
+ unsigned char *resp;
+ unsigned long resp_len;
+ int rv;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
- msg[2] = resp[3] & ~IPMI_BMC_RCV_MSG_INTR;
+ msg[2] = enables;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
rv = wait_for_msg_done(smi_info);
if (rv) {
- printk(KERN_WARNING PFX "Error getting response from set"
- " global enables command, cannot check recv irq bit"
- " handling.\n");
+ dev_warn(smi_info->dev,
+ "Error getting response from set global enables command: %d\n",
+ rv);
goto out;
}
@@ -2974,25 +2999,93 @@ static void check_clr_rcv_irq(struct smi_info *smi_info)
if (resp_len < 3 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
- printk(KERN_WARNING PFX "Invalid return from get global"
- " enables command, cannot check recv irq bit"
- " handling.\n");
+ dev_warn(smi_info->dev,
+ "Invalid return from set global enables command: %ld %x %x\n",
+ resp_len, resp[0], resp[1]);
rv = -EINVAL;
goto out;
}
- if (resp[2] != 0) {
+ if (resp[2] != 0)
+ rv = 1;
+
+out:
+ kfree(resp);
+ return rv;
+}
+
+/*
+ * Some BMCs do not support clearing the receive irq bit in the global
+ * enables (even if they don't support interrupts on the BMC). Check
+ * for this and handle it properly.
+ */
+static void check_clr_rcv_irq(struct smi_info *smi_info)
+{
+ u8 enables = 0;
+ int rv;
+
+ rv = get_global_enables(smi_info, &enables);
+ if (!rv) {
+ if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0)
+ /* Already clear, should work ok. */
+ return;
+
+ enables &= ~IPMI_BMC_RCV_MSG_INTR;
+ rv = set_global_enables(smi_info, enables);
+ }
+
+ if (rv < 0) {
+ dev_err(smi_info->dev,
+ "Cannot check clearing the rcv irq: %d\n", rv);
+ return;
+ }
+
+ if (rv) {
/*
* An error when setting the event buffer bit means
* clearing the bit is not supported.
*/
- printk(KERN_WARNING PFX "The BMC does not support clearing"
- " the recv irq bit, compensating, but the BMC needs to"
- " be fixed.\n");
- smi_info->cannot_clear_recv_irq_bit = true;
+ dev_warn(smi_info->dev,
+ "The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n");
+ smi_info->cannot_disable_irq = true;
+ }
+}
+
+/*
+ * Some BMCs do not support setting the interrupt bits in the global
+ * enables even if they support interrupts. Clearly bad, but we can
+ * compensate.
+ */
+static void check_set_rcv_irq(struct smi_info *smi_info)
+{
+ u8 enables = 0;
+ int rv;
+
+ if (!smi_info->irq)
+ return;
+
+ rv = get_global_enables(smi_info, &enables);
+ if (!rv) {
+ enables |= IPMI_BMC_RCV_MSG_INTR;
+ rv = set_global_enables(smi_info, enables);
+ }
+
+ if (rv < 0) {
+ dev_err(smi_info->dev,
+ "Cannot check setting the rcv irq: %d\n", rv);
+ return;
+ }
+
+ if (rv) {
+ /*
+ * An error when setting the event buffer bit means
+ * setting the bit is not supported.
+ */
+ dev_warn(smi_info->dev,
+ "The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n");
+ smi_info->cannot_disable_irq = true;
+ smi_info->irq_enable_broken = true;
}
- out:
- kfree(resp);
}
static int try_enable_event_buffer(struct smi_info *smi_info)
@@ -3313,6 +3406,12 @@ static void setup_xaction_handlers(struct smi_info *smi_info)
setup_dell_poweredge_bt_xaction_handler(smi_info);
}
+static void check_for_broken_irqs(struct smi_info *smi_info)
+{
+ check_clr_rcv_irq(smi_info);
+ check_set_rcv_irq(smi_info);
+}
+
static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
{
if (smi_info->thread != NULL)
@@ -3321,7 +3420,7 @@ static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
del_timer_sync(&smi_info->si_timer);
}
-static struct ipmi_default_vals
+static const struct ipmi_default_vals
{
int type;
int port;
@@ -3490,10 +3589,9 @@ static int try_smi_init(struct smi_info *new_smi)
goto out_err;
}
- check_clr_rcv_irq(new_smi);
-
setup_oem_data_handler(new_smi);
setup_xaction_handlers(new_smi);
+ check_for_broken_irqs(new_smi);
new_smi->waiting_msg = NULL;
new_smi->curr_msg = NULL;
@@ -3692,13 +3790,6 @@ static int init_ipmi_si(void)
}
#endif
-#ifdef CONFIG_ACPI
- if (si_tryacpi) {
- pnp_register_driver(&ipmi_pnp_driver);
- pnp_registered = true;
- }
-#endif
-
#ifdef CONFIG_DMI
if (si_trydmi)
dmi_find_bmc();
@@ -3850,10 +3941,6 @@ static void cleanup_ipmi_si(void)
if (pci_registered)
pci_unregister_driver(&ipmi_pci_driver);
#endif
-#ifdef CONFIG_ACPI
- if (pnp_registered)
- pnp_unregister_driver(&ipmi_pnp_driver);
-#endif
#ifdef CONFIG_PARISC
if (parisc_registered)
unregister_parisc_driver(&ipmi_parisc_driver);
diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h
index df89f73475fb..a705027c0493 100644
--- a/drivers/char/ipmi/ipmi_si_sm.h
+++ b/drivers/char/ipmi/ipmi_si_sm.h
@@ -46,8 +46,8 @@ struct si_sm_data;
* this interface.
*/
struct si_sm_io {
- unsigned char (*inputb)(struct si_sm_io *io, unsigned int offset);
- void (*outputb)(struct si_sm_io *io,
+ unsigned char (*inputb)(const struct si_sm_io *io, unsigned int offset);
+ void (*outputb)(const struct si_sm_io *io,
unsigned int offset,
unsigned char b);
@@ -135,7 +135,7 @@ struct si_sm_handlers {
};
/* Current state machines that we can use. */
-extern struct si_sm_handlers kcs_smi_handlers;
-extern struct si_sm_handlers smic_smi_handlers;
-extern struct si_sm_handlers bt_smi_handlers;
+extern const struct si_sm_handlers kcs_smi_handlers;
+extern const struct si_sm_handlers smic_smi_handlers;
+extern const struct si_sm_handlers bt_smi_handlers;
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
index c8e77afa8b96..8f7c73ff58f2 100644
--- a/drivers/char/ipmi/ipmi_smic_sm.c
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -589,7 +589,7 @@ static int smic_size(void)
return sizeof(struct si_sm_data);
}
-struct si_sm_handlers smic_smi_handlers = {
+const struct si_sm_handlers smic_smi_handlers = {
.init_data = init_smic_data,
.start_transaction = start_smic_transaction,
.get_result = smic_get_result,
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 207689c444a8..877205d22046 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1136,6 +1136,10 @@ module_param_array(slave_addrs, int, &num_slave_addrs, 0);
MODULE_PARM_DESC(slave_addrs,
"The default IPMB slave address for the controller.");
+static bool alerts_broken;
+module_param(alerts_broken, bool, 0);
+MODULE_PARM_DESC(alerts_broken, "Don't enable alerts for the controller.");
+
/*
* Bit 0 enables message debugging, bit 1 enables state debugging, and
* bit 2 enables timing debugging. This is an array indexed by
@@ -1154,11 +1158,11 @@ static int use_thread;
module_param(use_thread, int, 0);
MODULE_PARM_DESC(use_thread, "Use the thread interface.");
-static bool ssif_tryacpi = 1;
+static bool ssif_tryacpi = true;
module_param_named(tryacpi, ssif_tryacpi, bool, 0);
MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the default scan of the interfaces identified via ACPI");
-static bool ssif_trydmi = 1;
+static bool ssif_trydmi = true;
module_param_named(trydmi, ssif_trydmi, bool, 0);
MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the default scan of the interfaces identified via DMI (SMBIOS)");
@@ -1582,6 +1586,10 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
ssif_info->global_enables |= IPMI_BMC_EVT_MSG_BUFF;
}
+ /* Some systems don't behave well if you enable alerts. */
+ if (alerts_broken)
+ goto found;
+
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
msg[2] = ssif_info->global_enables | IPMI_BMC_RCV_MSG_INTR;
@@ -1787,7 +1795,7 @@ skip_addr:
}
#ifdef CONFIG_ACPI
-static struct acpi_device_id ssif_acpi_match[] = {
+static const struct acpi_device_id ssif_acpi_match[] = {
{ "IPI0001", 0 },
{ },
};
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index fdb0f9b3fe45..8069b361b8dd 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -243,17 +243,15 @@ int misc_register(struct miscdevice * misc)
* @misc: device to unregister
*
* Unregister a miscellaneous device that was previously
- * successfully registered with misc_register(). Success
- * is indicated by a zero return, a negative errno code
- * indicates an error.
+ * successfully registered with misc_register().
*/
-int misc_deregister(struct miscdevice *misc)
+void misc_deregister(struct miscdevice *misc)
{
int i = DYNAMIC_MINORS - misc->minor - 1;
if (WARN_ON(list_empty(&misc->list)))
- return -EINVAL;
+ return;
mutex_lock(&misc_mtx);
list_del(&misc->list);
@@ -261,7 +259,6 @@ int misc_deregister(struct miscdevice *misc)
if (i < DYNAMIC_MINORS && i >= 0)
clear_bit(i, misc_minors);
mutex_unlock(&misc_mtx);
- return 0;
}
EXPORT_SYMBOL(misc_register);
@@ -281,10 +278,9 @@ static char *misc_devnode(struct device *dev, umode_t *mode)
static int __init misc_init(void)
{
int err;
+ struct proc_dir_entry *ret;
-#ifdef CONFIG_PROC_FS
- proc_create("misc", 0, NULL, &misc_proc_fops);
-#endif
+ ret = proc_create("misc", 0, NULL, &misc_proc_fops);
misc_class = class_create(THIS_MODULE, "misc");
err = PTR_ERR(misc_class);
if (IS_ERR(misc_class))
@@ -300,7 +296,8 @@ fail_printk:
printk("unable to get major %d for misc devices\n", MISC_MAJOR);
class_destroy(misc_class);
fail_remove:
- remove_proc_entry("misc", NULL);
+ if (ret)
+ remove_proc_entry("misc", NULL);
return err;
}
subsys_initcall(misc_init);
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 9df78e2cc45d..97c2d8d433d6 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -702,7 +702,7 @@ static void atari_proc_infos(unsigned char *nvram, struct seq_file *seq,
seq_printf(seq, "%ds%s\n", nvram[10],
nvram[10] < 8 ? ", no memory test" : "");
- vmode = (nvram[14] << 8) || nvram[15];
+ vmode = (nvram[14] << 8) | nvram[15];
seq_printf(seq,
"Video mode : %s colors, %d columns, %s %s monitor\n",
colors[vmode & 7],
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
index 014c9d90d297..f5a45d887a37 100644
--- a/drivers/char/toshiba.c
+++ b/drivers/char/toshiba.c
@@ -430,7 +430,7 @@ static int tosh_probe(void)
int i,major,minor,day,year,month,flag;
unsigned char signature[7] = { 0x54,0x4f,0x53,0x48,0x49,0x42,0x41 };
SMMRegisters regs;
- void __iomem *bios = ioremap_cache(0xf0000, 0x10000);
+ void __iomem *bios = ioremap(0xf0000, 0x10000);
if (!bios)
return -ENOMEM;
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 283f00a7f036..1082d4bb016a 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -129,8 +129,9 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
device_initialize(&chip->dev);
- chip->cdev.owner = chip->pdev->driver->owner;
cdev_init(&chip->cdev, &tpm_fops);
+ chip->cdev.owner = chip->pdev->driver->owner;
+ chip->cdev.kobj.parent = &chip->dev.kobj;
return chip;
}
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 44f9d20c19ac..1267322595da 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -233,6 +233,14 @@ static int crb_acpi_add(struct acpi_device *device)
return -ENODEV;
}
+ /* At least some versions of AMI BIOS have a bug that TPM2 table has
+ * zero address for the control area and therefore we must fail.
+ */
+ if (!buf->control_area_pa) {
+ dev_err(dev, "TPM2 ACPI table has a zero address for the control area\n");
+ return -EINVAL;
+ }
+
if (buf->hdr.length < sizeof(struct acpi_tpm2)) {
dev_err(dev, "TPM2 ACPI table has wrong size");
return -EINVAL;
diff --git a/drivers/char/xillybus/xillybus_pcie.c b/drivers/char/xillybus/xillybus_pcie.c
index d8266bc2ae35..9418300214e9 100644
--- a/drivers/char/xillybus/xillybus_pcie.c
+++ b/drivers/char/xillybus/xillybus_pcie.c
@@ -193,14 +193,16 @@ static int xilly_probe(struct pci_dev *pdev,
}
/*
- * In theory, an attempt to set the DMA mask to 64 and dma_using_dac=1
- * is the right thing. But some unclever PCIe drivers report it's OK
- * when the hardware drops those 64-bit PCIe packets. So trust
- * nobody and use 32 bits DMA addressing in any case.
+ * Some (old and buggy?) hardware drops 64-bit addressed PCIe packets,
+ * even when the PCIe driver claims that a 64-bit mask is OK. On the
+ * other hand, on some architectures, 64-bit addressing is mandatory.
+ * So go for the 64-bit mask only when failing is the other option.
*/
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
endpoint->dma_using_dac = 0;
+ } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ endpoint->dma_using_dac = 1;
} else {
dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n");
return -ENODEV;
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index c4cf075a2320..d08b3e5985be 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_COMMON_CLK) += clk-gate.o
obj-$(CONFIG_COMMON_CLK) += clk-mux.o
obj-$(CONFIG_COMMON_CLK) += clk-composite.o
obj-$(CONFIG_COMMON_CLK) += clk-fractional-divider.o
-obj-$(CONFIG_COMMON_CLK) += clk-gpio-gate.o
+obj-$(CONFIG_COMMON_CLK) += clk-gpio.o
ifeq ($(CONFIG_OF), y)
obj-$(CONFIG_COMMON_CLK) += clk-conf.o
endif
diff --git a/drivers/clk/at91/clk-h32mx.c b/drivers/clk/at91/clk-h32mx.c
index 152dcb3f7b5f..61566bcefa53 100644
--- a/drivers/clk/at91/clk-h32mx.c
+++ b/drivers/clk/at91/clk-h32mx.c
@@ -116,8 +116,10 @@ void __init of_sama5d4_clk_h32mx_setup(struct device_node *np,
h32mxclk->pmc = pmc;
clk = clk_register(NULL, &h32mxclk->hw);
- if (!clk)
+ if (!clk) {
+ kfree(h32mxclk);
return;
+ }
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index c2400456a044..fd7247deabdc 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -171,8 +171,10 @@ at91_clk_register_main_osc(struct at91_pmc *pmc,
irq_set_status_flags(osc->irq, IRQ_NOAUTOEN);
ret = request_irq(osc->irq, clk_main_osc_irq_handler,
IRQF_TRIGGER_HIGH, name, osc);
- if (ret)
+ if (ret) {
+ kfree(osc);
return ERR_PTR(ret);
+ }
if (bypass)
pmc_write(pmc, AT91_CKGR_MOR,
@@ -612,17 +614,12 @@ void __init of_at91sam9x5_clk_main_setup(struct device_node *np,
int num_parents;
unsigned int irq;
const char *name = np->name;
- int i;
num_parents = of_clk_get_parent_count(np);
if (num_parents <= 0 || num_parents > 2)
return;
- for (i = 0; i < num_parents; ++i) {
- parent_names[i] = of_clk_get_parent_name(np, i);
- if (!parent_names[i])
- return;
- }
+ of_clk_parent_fill(np, parent_names, num_parents);
of_property_read_string(np, "clock-output-names", &name);
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index f98eafe9b12d..620ea323356b 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -165,12 +165,16 @@ at91_clk_register_master(struct at91_pmc *pmc, unsigned int irq,
irq_set_status_flags(master->irq, IRQ_NOAUTOEN);
ret = request_irq(master->irq, clk_master_irq_handler,
IRQF_TRIGGER_HIGH, "clk-master", master);
- if (ret)
+ if (ret) {
+ kfree(master);
return ERR_PTR(ret);
+ }
clk = clk_register(NULL, &master->hw);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ free_irq(master->irq, master);
kfree(master);
+ }
return clk;
}
@@ -218,7 +222,6 @@ of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc,
{
struct clk *clk;
int num_parents;
- int i;
unsigned int irq;
const char *parent_names[MASTER_SOURCE_MAX];
const char *name = np->name;
@@ -228,11 +231,7 @@ of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc,
if (num_parents <= 0 || num_parents > MASTER_SOURCE_MAX)
return;
- for (i = 0; i < num_parents; ++i) {
- parent_names[i] = of_clk_get_parent_name(np, i);
- if (!parent_names[i])
- return;
- }
+ of_clk_parent_fill(np, parent_names, num_parents);
of_property_read_string(np, "clock-output-names", &name);
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index df2c1afa52b4..e4d7b574f1ea 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -134,7 +134,7 @@ at91_clk_register_peripheral(struct at91_pmc *pmc, const char *name,
static void clk_sam9x5_peripheral_autodiv(struct clk_sam9x5_peripheral *periph)
{
- struct clk *parent;
+ struct clk_hw *parent;
unsigned long parent_rate;
int shift = 0;
@@ -142,8 +142,8 @@ static void clk_sam9x5_peripheral_autodiv(struct clk_sam9x5_peripheral *periph)
return;
if (periph->range.max) {
- parent = clk_get_parent_by_index(periph->hw.clk, 0);
- parent_rate = __clk_get_rate(parent);
+ parent = clk_hw_get_parent_by_index(&periph->hw, 0);
+ parent_rate = clk_hw_get_rate(parent);
if (!parent_rate)
return;
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
index cbbe40377ad6..18b60f4895a6 100644
--- a/drivers/clk/at91/clk-pll.c
+++ b/drivers/clk/at91/clk-pll.c
@@ -346,12 +346,16 @@ at91_clk_register_pll(struct at91_pmc *pmc, unsigned int irq, const char *name,
irq_set_status_flags(pll->irq, IRQ_NOAUTOEN);
ret = request_irq(pll->irq, clk_pll_irq_handler, IRQF_TRIGGER_HIGH,
id ? "clk-pllb" : "clk-plla", pll);
- if (ret)
+ if (ret) {
+ kfree(pll);
return ERR_PTR(ret);
+ }
clk = clk_register(NULL, &pll->hw);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ free_irq(pll->irq, pll);
kfree(pll);
+ }
return clk;
}
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 8c86c0f7847a..14b270b85fec 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -54,46 +54,47 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
return parent_rate >> pres;
}
-static long clk_programmable_determine_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_hw)
+static int clk_programmable_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- struct clk *parent = NULL;
+ struct clk_hw *parent;
long best_rate = -EINVAL;
unsigned long parent_rate;
unsigned long tmp_rate;
int shift;
int i;
- for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
- parent = clk_get_parent_by_index(hw->clk, i);
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ parent = clk_hw_get_parent_by_index(hw, i);
if (!parent)
continue;
- parent_rate = __clk_get_rate(parent);
+ parent_rate = clk_hw_get_rate(parent);
for (shift = 0; shift < PROG_PRES_MASK; shift++) {
tmp_rate = parent_rate >> shift;
- if (tmp_rate <= rate)
+ if (tmp_rate <= req->rate)
break;
}
- if (tmp_rate > rate)
+ if (tmp_rate > req->rate)
continue;
- if (best_rate < 0 || (rate - tmp_rate) < (rate - best_rate)) {
+ if (best_rate < 0 ||
+ (req->rate - tmp_rate) < (req->rate - best_rate)) {
best_rate = tmp_rate;
- *best_parent_rate = parent_rate;
- *best_parent_hw = __clk_get_hw(parent);
+ req->best_parent_rate = parent_rate;
+ req->best_parent_hw = parent;
}
if (!best_rate)
break;
}
- return best_rate;
+ if (best_rate < 0)
+ return best_rate;
+
+ req->rate = best_rate;
+ return 0;
}
static int clk_programmable_set_parent(struct clk_hw *hw, u8 index)
@@ -230,7 +231,6 @@ of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc,
{
int num;
u32 id;
- int i;
struct clk *clk;
int num_parents;
const char *parent_names[PROG_SOURCE_MAX];
@@ -241,11 +241,7 @@ of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc,
if (num_parents <= 0 || num_parents > PROG_SOURCE_MAX)
return;
- for (i = 0; i < num_parents; ++i) {
- parent_names[i] = of_clk_get_parent_name(np, i);
- if (!parent_names[i])
- return;
- }
+ of_clk_parent_fill(np, parent_names, num_parents);
num = of_get_child_count(np);
if (!num || num > (PROG_ID_MAX + 1))
diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c
index 98a84a865fe1..d0d5076a9b94 100644
--- a/drivers/clk/at91/clk-slow.c
+++ b/drivers/clk/at91/clk-slow.c
@@ -10,8 +10,10 @@
*
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/slab.h>
#include <linux/clk/at91_pmc.h>
#include <linux/delay.h>
#include <linux/of.h>
@@ -371,17 +373,12 @@ void __init of_at91sam9x5_clk_slow_setup(struct device_node *np,
const char *parent_names[2];
int num_parents;
const char *name = np->name;
- int i;
num_parents = of_clk_get_parent_count(np);
if (num_parents <= 0 || num_parents > 2)
return;
- for (i = 0; i < num_parents; ++i) {
- parent_names[i] = of_clk_get_parent_name(np, i);
- if (!parent_names[i])
- return;
- }
+ of_clk_parent_fill(np, parent_names, num_parents);
of_property_read_string(np, "clock-output-names", &name);
@@ -449,17 +446,12 @@ void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
const char *parent_names[2];
int num_parents;
const char *name = np->name;
- int i;
num_parents = of_clk_get_parent_count(np);
if (num_parents != 2)
return;
- for (i = 0; i < num_parents; ++i) {
- parent_names[i] = of_clk_get_parent_name(np, i);
- if (!parent_names[i])
- return;
- }
+ of_clk_parent_fill(np, parent_names, num_parents);
of_property_read_string(np, "clock-output-names", &name);
diff --git a/drivers/clk/at91/clk-smd.c b/drivers/clk/at91/clk-smd.c
index 3817ea865ca2..a7f8501cfa05 100644
--- a/drivers/clk/at91/clk-smd.c
+++ b/drivers/clk/at91/clk-smd.c
@@ -145,7 +145,6 @@ void __init of_at91sam9x5_clk_smd_setup(struct device_node *np,
struct at91_pmc *pmc)
{
struct clk *clk;
- int i;
int num_parents;
const char *parent_names[SMD_SOURCE_MAX];
const char *name = np->name;
@@ -154,11 +153,7 @@ void __init of_at91sam9x5_clk_smd_setup(struct device_node *np,
if (num_parents <= 0 || num_parents > SMD_SOURCE_MAX)
return;
- for (i = 0; i < num_parents; i++) {
- parent_names[i] = of_clk_get_parent_name(np, i);
- if (!parent_names[i])
- return;
- }
+ of_clk_parent_fill(np, parent_names, num_parents);
of_property_read_string(np, "clock-output-names", &name);
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
index a76d03fd577b..58008b3e8bc1 100644
--- a/drivers/clk/at91/clk-system.c
+++ b/drivers/clk/at91/clk-system.c
@@ -130,13 +130,17 @@ at91_clk_register_system(struct at91_pmc *pmc, const char *name,
irq_set_status_flags(sys->irq, IRQ_NOAUTOEN);
ret = request_irq(sys->irq, clk_system_irq_handler,
IRQF_TRIGGER_HIGH, name, sys);
- if (ret)
+ if (ret) {
+ kfree(sys);
return ERR_PTR(ret);
+ }
}
clk = clk_register(NULL, &sys->hw);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ free_irq(sys->irq, sys);
kfree(sys);
+ }
return clk;
}
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index b0cbd2b1ff59..8ab8502778a2 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -56,47 +56,43 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1));
}
-static long at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_hw)
+static int at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- struct clk *parent = NULL;
+ struct clk_hw *parent;
long best_rate = -EINVAL;
unsigned long tmp_rate;
int best_diff = -1;
int tmp_diff;
int i;
- for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
int div;
- parent = clk_get_parent_by_index(hw->clk, i);
+ parent = clk_hw_get_parent_by_index(hw, i);
if (!parent)
continue;
for (div = 1; div < SAM9X5_USB_MAX_DIV + 2; div++) {
unsigned long tmp_parent_rate;
- tmp_parent_rate = rate * div;
- tmp_parent_rate = __clk_round_rate(parent,
+ tmp_parent_rate = req->rate * div;
+ tmp_parent_rate = clk_hw_round_rate(parent,
tmp_parent_rate);
tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div);
- if (tmp_rate < rate)
- tmp_diff = rate - tmp_rate;
+ if (tmp_rate < req->rate)
+ tmp_diff = req->rate - tmp_rate;
else
- tmp_diff = tmp_rate - rate;
+ tmp_diff = tmp_rate - req->rate;
if (best_diff < 0 || best_diff > tmp_diff) {
best_rate = tmp_rate;
best_diff = tmp_diff;
- *best_parent_rate = tmp_parent_rate;
- *best_parent_hw = __clk_get_hw(parent);
+ req->best_parent_rate = tmp_parent_rate;
+ req->best_parent_hw = parent;
}
- if (!best_diff || tmp_rate < rate)
+ if (!best_diff || tmp_rate < req->rate)
break;
}
@@ -104,7 +100,11 @@ static long at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
break;
}
- return best_rate;
+ if (best_rate < 0)
+ return best_rate;
+
+ req->rate = best_rate;
+ return 0;
}
static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
@@ -273,7 +273,7 @@ static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
- struct clk *parent = __clk_get_parent(hw->clk);
+ struct clk_hw *parent = clk_hw_get_parent(hw);
unsigned long bestrate = 0;
int bestdiff = -1;
unsigned long tmprate;
@@ -287,7 +287,7 @@ static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
continue;
tmp_parent_rate = rate * usb->divisors[i];
- tmp_parent_rate = __clk_round_rate(parent, tmp_parent_rate);
+ tmp_parent_rate = clk_hw_round_rate(parent, tmp_parent_rate);
tmprate = DIV_ROUND_CLOSEST(tmp_parent_rate, usb->divisors[i]);
if (tmprate < rate)
tmpdiff = rate - tmprate;
@@ -373,7 +373,6 @@ void __init of_at91sam9x5_clk_usb_setup(struct device_node *np,
struct at91_pmc *pmc)
{
struct clk *clk;
- int i;
int num_parents;
const char *parent_names[USB_SOURCE_MAX];
const char *name = np->name;
@@ -382,11 +381,7 @@ void __init of_at91sam9x5_clk_usb_setup(struct device_node *np,
if (num_parents <= 0 || num_parents > USB_SOURCE_MAX)
return;
- for (i = 0; i < num_parents; i++) {
- parent_names[i] = of_clk_get_parent_name(np, i);
- if (!parent_names[i])
- return;
- }
+ of_clk_parent_fill(np, parent_names, num_parents);
of_property_read_string(np, "clock-output-names", &name);
diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c
index ae3263bc1476..30dd697b1668 100644
--- a/drivers/clk/at91/clk-utmi.c
+++ b/drivers/clk/at91/clk-utmi.c
@@ -118,12 +118,16 @@ at91_clk_register_utmi(struct at91_pmc *pmc, unsigned int irq,
irq_set_status_flags(utmi->irq, IRQ_NOAUTOEN);
ret = request_irq(utmi->irq, clk_utmi_irq_handler,
IRQF_TRIGGER_HIGH, "clk-utmi", utmi);
- if (ret)
+ if (ret) {
+ kfree(utmi);
return ERR_PTR(ret);
+ }
clk = clk_register(NULL, &utmi->hw);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ free_irq(utmi->irq, utmi);
kfree(utmi);
+ }
return clk;
}
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index 39be2be82b0a..d1844f1f3729 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -125,7 +125,6 @@ static int pmc_irq_map(struct irq_domain *h, unsigned int virq,
irq_set_chip_and_handler(virq, &pmc_irq,
handle_level_irq);
- set_irq_flags(virq, IRQF_VALID);
irq_set_chip_data(virq, pmc);
return 0;
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index eb8e5dc9076d..8b87771c69b2 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -59,71 +59,63 @@ static inline void pmc_write(struct at91_pmc *pmc, int offset, u32 value)
int of_at91_get_clk_range(struct device_node *np, const char *propname,
struct clk_range *range);
-extern void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-extern void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np,
- struct at91_pmc *pmc);
-extern void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np,
- struct at91_pmc *pmc);
-extern void __init of_at91rm9200_clk_main_setup(struct device_node *np,
- struct at91_pmc *pmc);
-extern void __init of_at91sam9x5_clk_main_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-extern void __init of_at91rm9200_clk_pll_setup(struct device_node *np,
- struct at91_pmc *pmc);
-extern void __init of_at91sam9g45_clk_pll_setup(struct device_node *np,
- struct at91_pmc *pmc);
-extern void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np,
- struct at91_pmc *pmc);
-extern void __init of_sama5d3_clk_pll_setup(struct device_node *np,
- struct at91_pmc *pmc);
-extern void __init of_at91sam9x5_clk_plldiv_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-extern void __init of_at91rm9200_clk_master_setup(struct device_node *np,
- struct at91_pmc *pmc);
-extern void __init of_at91sam9x5_clk_master_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-extern void __init of_at91rm9200_clk_sys_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-extern void __init of_at91rm9200_clk_periph_setup(struct device_node *np,
- struct at91_pmc *pmc);
-extern void __init of_at91sam9x5_clk_periph_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-extern void __init of_at91rm9200_clk_prog_setup(struct device_node *np,
- struct at91_pmc *pmc);
-extern void __init of_at91sam9g45_clk_prog_setup(struct device_node *np,
- struct at91_pmc *pmc);
-extern void __init of_at91sam9x5_clk_prog_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-#if defined(CONFIG_HAVE_AT91_UTMI)
-extern void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np,
- struct at91_pmc *pmc);
-#endif
-
-#if defined(CONFIG_HAVE_AT91_USB_CLK)
-extern void __init of_at91rm9200_clk_usb_setup(struct device_node *np,
- struct at91_pmc *pmc);
-extern void __init of_at91sam9x5_clk_usb_setup(struct device_node *np,
- struct at91_pmc *pmc);
-extern void __init of_at91sam9n12_clk_usb_setup(struct device_node *np,
- struct at91_pmc *pmc);
-#endif
-
-#if defined(CONFIG_HAVE_AT91_SMD)
-extern void __init of_at91sam9x5_clk_smd_setup(struct device_node *np,
- struct at91_pmc *pmc);
-#endif
-
-#if defined(CONFIG_HAVE_AT91_H32MX)
-extern void __init of_sama5d4_clk_h32mx_setup(struct device_node *np,
- struct at91_pmc *pmc);
-#endif
+void of_at91sam9260_clk_slow_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+void of_at91rm9200_clk_main_osc_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+void of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+void of_at91rm9200_clk_main_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+void of_at91sam9x5_clk_main_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+void of_at91rm9200_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+void of_at91sam9g45_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+void of_at91sam9g20_clk_pllb_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+void of_sama5d3_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+void of_at91sam9x5_clk_plldiv_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+void of_at91rm9200_clk_master_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+void of_at91sam9x5_clk_master_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+void of_at91rm9200_clk_sys_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+void of_at91rm9200_clk_periph_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+void of_at91sam9x5_clk_periph_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+void of_at91rm9200_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+void of_at91sam9g45_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+void of_at91sam9x5_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+void of_at91sam9x5_clk_utmi_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+void of_at91rm9200_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+void of_at91sam9x5_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+void of_at91sam9n12_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+void of_at91sam9x5_clk_smd_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+void of_sama5d4_clk_h32mx_setup(struct device_node *np,
+ struct at91_pmc *pmc);
#endif /* __PMC_H_ */
diff --git a/drivers/clk/bcm/clk-iproc-asiu.c b/drivers/clk/bcm/clk-iproc-asiu.c
index e19c09cd9645..f630e1bbdcfe 100644
--- a/drivers/clk/bcm/clk-iproc-asiu.c
+++ b/drivers/clk/bcm/clk-iproc-asiu.c
@@ -222,10 +222,6 @@ void __init iproc_asiu_setup(struct device_node *node,
struct iproc_asiu_clk *asiu_clk;
const char *clk_name;
- clk_name = kzalloc(IPROC_CLK_NAME_LEN, GFP_KERNEL);
- if (WARN_ON(!clk_name))
- goto err_clk_register;
-
ret = of_property_read_string_index(node, "clock-output-names",
i, &clk_name);
if (WARN_ON(ret))
@@ -259,7 +255,7 @@ void __init iproc_asiu_setup(struct device_node *node,
err_clk_register:
for (i = 0; i < num_clks; i++)
- kfree(asiu->clks[i].name);
+ clk_unregister(asiu->clk_data.clks[i]);
iounmap(asiu->gate_base);
err_iomap_gate:
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
index 46fb84bc2674..2dda4e8295a9 100644
--- a/drivers/clk/bcm/clk-iproc-pll.c
+++ b/drivers/clk/bcm/clk-iproc-pll.c
@@ -366,7 +366,7 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
val = readl(pll->pll_base + ctrl->ndiv_int.offset);
ndiv_int = (val >> ctrl->ndiv_int.shift) &
bit_mask(ctrl->ndiv_int.width);
- ndiv = ndiv_int << ctrl->ndiv_int.shift;
+ ndiv = (u64)ndiv_int << ctrl->ndiv_int.shift;
if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
val = readl(pll->pll_base + ctrl->ndiv_frac.offset);
@@ -374,7 +374,8 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
bit_mask(ctrl->ndiv_frac.width);
if (ndiv_frac != 0)
- ndiv = (ndiv_int << ctrl->ndiv_int.shift) | ndiv_frac;
+ ndiv = ((u64)ndiv_int << ctrl->ndiv_int.shift) |
+ ndiv_frac;
}
val = readl(pll->pll_base + ctrl->pdiv.offset);
@@ -655,10 +656,6 @@ void __init iproc_pll_clk_setup(struct device_node *node,
memset(&init, 0, sizeof(init));
parent_name = node->name;
- clk_name = kzalloc(IPROC_CLK_NAME_LEN, GFP_KERNEL);
- if (WARN_ON(!clk_name))
- goto err_clk_register;
-
ret = of_property_read_string_index(node, "clock-output-names",
i, &clk_name);
if (WARN_ON(ret))
@@ -690,10 +687,8 @@ void __init iproc_pll_clk_setup(struct device_node *node,
return;
err_clk_register:
- for (i = 0; i < num_clks; i++) {
- kfree(pll->clks[i].name);
+ for (i = 0; i < num_clks; i++)
clk_unregister(pll->clk_data.clks[i]);
- }
err_pll_register:
if (pll->asiu_base)
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index 79a98506c433..3a15347b4233 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/clk.h>
/*
* "Policies" affect the frequencies of bus clocks provided by a
@@ -1010,25 +1011,23 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
struct bcm_clk_div *div = &bcm_clk->u.peri->div;
if (!divider_exists(div))
- return __clk_get_rate(hw->clk);
+ return clk_hw_get_rate(hw);
/* Quietly avoid a zero rate */
return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div,
rate ? rate : 1, *parent_rate, NULL);
}
-static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate, struct clk_hw **best_parent)
+static int kona_peri_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct kona_clk *bcm_clk = to_kona_clk(hw);
- struct clk *clk = hw->clk;
- struct clk *current_parent;
+ struct clk_hw *current_parent;
unsigned long parent_rate;
unsigned long best_delta;
unsigned long best_rate;
u32 parent_count;
+ long rate;
u32 which;
/*
@@ -1037,18 +1036,25 @@ static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
*/
WARN_ON_ONCE(bcm_clk->init_data.flags & CLK_SET_RATE_NO_REPARENT);
parent_count = (u32)bcm_clk->init_data.num_parents;
- if (parent_count < 2)
- return kona_peri_clk_round_rate(hw, rate, best_parent_rate);
+ if (parent_count < 2) {
+ rate = kona_peri_clk_round_rate(hw, req->rate,
+ &req->best_parent_rate);
+ if (rate < 0)
+ return rate;
+
+ req->rate = rate;
+ return 0;
+ }
/* Unless we can do better, stick with current parent */
- current_parent = clk_get_parent(clk);
- parent_rate = __clk_get_rate(current_parent);
- best_rate = kona_peri_clk_round_rate(hw, rate, &parent_rate);
- best_delta = abs(best_rate - rate);
+ current_parent = clk_hw_get_parent(hw);
+ parent_rate = clk_hw_get_rate(current_parent);
+ best_rate = kona_peri_clk_round_rate(hw, req->rate, &parent_rate);
+ best_delta = abs(best_rate - req->rate);
/* Check whether any other parent clock can produce a better result */
for (which = 0; which < parent_count; which++) {
- struct clk *parent = clk_get_parent_by_index(clk, which);
+ struct clk_hw *parent = clk_hw_get_parent_by_index(hw, which);
unsigned long delta;
unsigned long other_rate;
@@ -1057,18 +1063,20 @@ static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
continue;
/* We don't support CLK_SET_RATE_PARENT */
- parent_rate = __clk_get_rate(parent);
- other_rate = kona_peri_clk_round_rate(hw, rate, &parent_rate);
- delta = abs(other_rate - rate);
+ parent_rate = clk_hw_get_rate(parent);
+ other_rate = kona_peri_clk_round_rate(hw, req->rate,
+ &parent_rate);
+ delta = abs(other_rate - req->rate);
if (delta < best_delta) {
best_delta = delta;
best_rate = other_rate;
- *best_parent = __clk_get_hw(parent);
- *best_parent_rate = parent_rate;
+ req->best_parent_hw = parent;
+ req->best_parent_rate = parent_rate;
}
}
- return best_rate;
+ req->rate = best_rate;
+ return 0;
}
static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
@@ -1130,7 +1138,7 @@ static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (parent_rate > (unsigned long)LONG_MAX)
return -EINVAL;
- if (rate == __clk_get_rate(hw->clk))
+ if (rate == clk_hw_get_rate(hw))
return 0;
if (!divider_exists(div))
@@ -1249,6 +1257,7 @@ bool __init kona_ccu_init(struct ccu_data *ccu)
unsigned long flags;
unsigned int which;
struct clk **clks = ccu->clk_data.clks;
+ struct kona_clk *kona_clks = ccu->kona_clks;
bool success = true;
flags = ccu_lock(ccu);
@@ -1259,7 +1268,7 @@ bool __init kona_ccu_init(struct ccu_data *ccu)
if (!clks[which])
continue;
- bcm_clk = to_kona_clk(__clk_get_hw(clks[which]));
+ bcm_clk = &kona_clks[which];
success &= __kona_clk_init(bcm_clk);
}
diff --git a/drivers/clk/berlin/berlin2-pll.c b/drivers/clk/berlin/berlin2-pll.c
index f4b8d324b083..1c2294d3ba85 100644
--- a/drivers/clk/berlin/berlin2-pll.c
+++ b/drivers/clk/berlin/berlin2-pll.c
@@ -61,7 +61,7 @@ berlin2_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
fbdiv = (val >> map->fbdiv_shift) & FBDIV_MASK;
rfdiv = (val >> map->rfdiv_shift) & RFDIV_MASK;
if (rfdiv == 0) {
- pr_warn("%s has zero rfdiv\n", __clk_get_name(hw->clk));
+ pr_warn("%s has zero rfdiv\n", clk_hw_get_name(hw));
rfdiv = 1;
}
@@ -70,7 +70,7 @@ berlin2_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
vcodiv = map->vcodiv[vcodivsel];
if (vcodiv == 0) {
pr_warn("%s has zero vcodiv (index %d)\n",
- __clk_get_name(hw->clk), vcodivsel);
+ clk_hw_get_name(hw), vcodivsel);
vcodiv = 1;
}
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index e619285c6def..3bcd42fbb55e 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -10,7 +10,6 @@
#include <linux/platform_device.h>
#include <linux/clk-provider.h>
-#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/of.h>
diff --git a/drivers/clk/clk-bcm2835.c b/drivers/clk/clk-bcm2835.c
index 6b950ca8b711..dd295e498309 100644
--- a/drivers/clk/clk-bcm2835.c
+++ b/drivers/clk/clk-bcm2835.c
@@ -32,11 +32,6 @@ void __init bcm2835_init_clocks(void)
struct clk *clk;
int ret;
- clk = clk_register_fixed_rate(NULL, "sys_pclk", NULL, CLK_IS_ROOT,
- 250000000);
- if (IS_ERR(clk))
- pr_err("sys_pclk not registered\n");
-
clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT,
126000000);
if (IS_ERR(clk))
diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c
index f01164fada5d..01877f64eff6 100644
--- a/drivers/clk/clk-cdce706.c
+++ b/drivers/clk/clk-cdce706.c
@@ -10,6 +10,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/i2c.h>
@@ -309,7 +310,7 @@ static long cdce706_divider_round_rate(struct clk_hw *hw, unsigned long rate,
if (!mul)
div = CDCE706_DIVIDER_DIVIDER_MAX;
- if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+ if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
unsigned long best_diff = rate;
unsigned long best_div = 0;
struct clk *gp_clk = cdce->clkin_clk[cdce->clkin[0].parent];
diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c
index 85fafb41e6ca..089bf88ffa8d 100644
--- a/drivers/clk/clk-cdce925.c
+++ b/drivers/clk/clk-cdce925.c
@@ -10,6 +10,7 @@
* Copyright (C) 2014, Topic Embedded Products
* Licenced under GPL
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/module.h>
diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c
index 715eec1a9902..ff4ef4f1df62 100644
--- a/drivers/clk/clk-clps711x.c
+++ b/drivers/clk/clk-clps711x.c
@@ -9,7 +9,6 @@
* (at your option) any later version.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/io.h>
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 616f5aef3c26..4735de0660cc 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -55,78 +55,77 @@ static unsigned long clk_composite_recalc_rate(struct clk_hw *hw,
return rate_ops->recalc_rate(rate_hw, parent_rate);
}
-static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_p)
+static int clk_composite_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *rate_ops = composite->rate_ops;
const struct clk_ops *mux_ops = composite->mux_ops;
struct clk_hw *rate_hw = composite->rate_hw;
struct clk_hw *mux_hw = composite->mux_hw;
- struct clk *parent;
+ struct clk_hw *parent;
unsigned long parent_rate;
long tmp_rate, best_rate = 0;
unsigned long rate_diff;
unsigned long best_rate_diff = ULONG_MAX;
+ long rate;
int i;
if (rate_hw && rate_ops && rate_ops->determine_rate) {
__clk_hw_set_clk(rate_hw, hw);
- return rate_ops->determine_rate(rate_hw, rate, min_rate,
- max_rate,
- best_parent_rate,
- best_parent_p);
+ return rate_ops->determine_rate(rate_hw, req);
} else if (rate_hw && rate_ops && rate_ops->round_rate &&
mux_hw && mux_ops && mux_ops->set_parent) {
- *best_parent_p = NULL;
+ req->best_parent_hw = NULL;
- if (__clk_get_flags(hw->clk) & CLK_SET_RATE_NO_REPARENT) {
- parent = clk_get_parent(mux_hw->clk);
- *best_parent_p = __clk_get_hw(parent);
- *best_parent_rate = __clk_get_rate(parent);
+ if (clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT) {
+ parent = clk_hw_get_parent(mux_hw);
+ req->best_parent_hw = parent;
+ req->best_parent_rate = clk_hw_get_rate(parent);
- return rate_ops->round_rate(rate_hw, rate,
- best_parent_rate);
+ rate = rate_ops->round_rate(rate_hw, req->rate,
+ &req->best_parent_rate);
+ if (rate < 0)
+ return rate;
+
+ req->rate = rate;
+ return 0;
}
- for (i = 0; i < __clk_get_num_parents(mux_hw->clk); i++) {
- parent = clk_get_parent_by_index(mux_hw->clk, i);
+ for (i = 0; i < clk_hw_get_num_parents(mux_hw); i++) {
+ parent = clk_hw_get_parent_by_index(mux_hw, i);
if (!parent)
continue;
- parent_rate = __clk_get_rate(parent);
+ parent_rate = clk_hw_get_rate(parent);
- tmp_rate = rate_ops->round_rate(rate_hw, rate,
+ tmp_rate = rate_ops->round_rate(rate_hw, req->rate,
&parent_rate);
if (tmp_rate < 0)
continue;
- rate_diff = abs(rate - tmp_rate);
+ rate_diff = abs(req->rate - tmp_rate);
- if (!rate_diff || !*best_parent_p
+ if (!rate_diff || !req->best_parent_hw
|| best_rate_diff > rate_diff) {
- *best_parent_p = __clk_get_hw(parent);
- *best_parent_rate = parent_rate;
+ req->best_parent_hw = parent;
+ req->best_parent_rate = parent_rate;
best_rate_diff = rate_diff;
best_rate = tmp_rate;
}
if (!rate_diff)
- return rate;
+ return 0;
}
- return best_rate;
+ req->rate = best_rate;
+ return 0;
} else if (mux_hw && mux_ops && mux_ops->determine_rate) {
__clk_hw_set_clk(mux_hw, hw);
- return mux_ops->determine_rate(mux_hw, rate, min_rate,
- max_rate, best_parent_rate,
- best_parent_p);
+ return mux_ops->determine_rate(mux_hw, req);
} else {
pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n");
- return 0;
+ return -EINVAL;
}
}
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 706b5783c360..f24d0a19ae70 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -78,12 +78,14 @@ static unsigned int _get_table_div(const struct clk_div_table *table,
}
static unsigned int _get_div(const struct clk_div_table *table,
- unsigned int val, unsigned long flags)
+ unsigned int val, unsigned long flags, u8 width)
{
if (flags & CLK_DIVIDER_ONE_BASED)
return val;
if (flags & CLK_DIVIDER_POWER_OF_TWO)
return 1 << val;
+ if (flags & CLK_DIVIDER_MAX_AT_ZERO)
+ return val ? val : div_mask(width) + 1;
if (table)
return _get_table_div(table, val);
return val + 1;
@@ -101,12 +103,14 @@ static unsigned int _get_table_val(const struct clk_div_table *table,
}
static unsigned int _get_val(const struct clk_div_table *table,
- unsigned int div, unsigned long flags)
+ unsigned int div, unsigned long flags, u8 width)
{
if (flags & CLK_DIVIDER_ONE_BASED)
return div;
if (flags & CLK_DIVIDER_POWER_OF_TWO)
return __ffs(div);
+ if (flags & CLK_DIVIDER_MAX_AT_ZERO)
+ return (div == div_mask(width) + 1) ? 0 : div;
if (table)
return _get_table_val(table, div);
return div - 1;
@@ -117,13 +121,14 @@ unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
const struct clk_div_table *table,
unsigned long flags)
{
+ struct clk_divider *divider = to_clk_divider(hw);
unsigned int div;
- div = _get_div(table, val, flags);
+ div = _get_div(table, val, flags, divider->width);
if (!div) {
WARN(!(flags & CLK_DIVIDER_ALLOW_ZERO),
"%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
- __clk_get_name(hw->clk));
+ clk_hw_get_name(hw));
return parent_rate;
}
@@ -285,7 +290,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
maxdiv = _get_maxdiv(table, width, flags);
- if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
+ if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
parent_rate = *best_parent_rate;
bestdiv = _div_round(table, parent_rate, rate, flags);
bestdiv = bestdiv == 0 ? 1 : bestdiv;
@@ -311,7 +316,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
*best_parent_rate = parent_rate_saved;
return i;
}
- parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
+ parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
rate * i);
now = DIV_ROUND_UP(parent_rate, i);
if (_is_best_div(rate, now, best, flags)) {
@@ -323,7 +328,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
if (!bestdiv) {
bestdiv = _get_maxdiv(table, width, flags);
- *best_parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 1);
+ *best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1);
}
return bestdiv;
@@ -351,7 +356,8 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
if (divider->flags & CLK_DIVIDER_READ_ONLY) {
bestdiv = readl(divider->reg) >> divider->shift;
bestdiv &= div_mask(divider->width);
- bestdiv = _get_div(divider->table, bestdiv, divider->flags);
+ bestdiv = _get_div(divider->table, bestdiv, divider->flags,
+ divider->width);
return DIV_ROUND_UP(*prate, bestdiv);
}
@@ -370,7 +376,7 @@ int divider_get_val(unsigned long rate, unsigned long parent_rate,
if (!_is_valid_div(table, div, flags))
return -EINVAL;
- value = _get_val(table, div, flags);
+ value = _get_val(table, div, flags, width);
return min_t(unsigned int, value, div_mask(width));
}
@@ -389,6 +395,8 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
if (divider->lock)
spin_lock_irqsave(divider->lock, flags);
+ else
+ __acquire(divider->lock);
if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
val = div_mask(divider->width) << (divider->shift + 16);
@@ -401,6 +409,8 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
if (divider->lock)
spin_unlock_irqrestore(divider->lock, flags);
+ else
+ __release(divider->lock);
return 0;
}
diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c
index 73a8d0ff530c..bac4553f04b8 100644
--- a/drivers/clk/clk-efm32gg.c
+++ b/drivers/clk/clk-efm32gg.c
@@ -6,7 +6,6 @@
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*/
-#include <linux/clk.h>
#include <linux/io.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index fccabe497f6e..83de57aeceea 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -41,12 +41,11 @@ static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
- if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+ if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
unsigned long best_parent;
best_parent = (rate / fix->mult) * fix->div;
- *prate = __clk_round_rate(__clk_get_parent(hw->clk),
- best_parent);
+ *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
}
return (*prate / fix->div) * fix->mult;
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index 140eb5844dc4..e85f856b8592 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -27,11 +27,15 @@ static unsigned long clk_fd_recalc_rate(struct clk_hw *hw,
if (fd->lock)
spin_lock_irqsave(fd->lock, flags);
+ else
+ __acquire(fd->lock);
val = clk_readl(fd->reg);
if (fd->lock)
spin_unlock_irqrestore(fd->lock, flags);
+ else
+ __release(fd->lock);
m = (val & fd->mmask) >> fd->mshift;
n = (val & fd->nmask) >> fd->nshift;
@@ -80,6 +84,8 @@ static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
if (fd->lock)
spin_lock_irqsave(fd->lock, flags);
+ else
+ __acquire(fd->lock);
val = clk_readl(fd->reg);
val &= ~(fd->mmask | fd->nmask);
@@ -88,6 +94,8 @@ static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
if (fd->lock)
spin_unlock_irqrestore(fd->lock, flags);
+ else
+ __release(fd->lock);
return 0;
}
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 551dd0672794..de0b322f5f58 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -52,6 +52,8 @@ static void clk_gate_endisable(struct clk_hw *hw, int enable)
if (gate->lock)
spin_lock_irqsave(gate->lock, flags);
+ else
+ __acquire(gate->lock);
if (gate->flags & CLK_GATE_HIWORD_MASK) {
reg = BIT(gate->bit_idx + 16);
@@ -70,6 +72,8 @@ static void clk_gate_endisable(struct clk_hw *hw, int enable)
if (gate->lock)
spin_unlock_irqrestore(gate->lock, flags);
+ else
+ __release(gate->lock);
}
static int clk_gate_enable(struct clk_hw *hw)
diff --git a/drivers/clk/clk-gpio-gate.c b/drivers/clk/clk-gpio-gate.c
deleted file mode 100644
index f564e624fb93..000000000000
--- a/drivers/clk/clk-gpio-gate.c
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * Copyright (C) 2013 - 2014 Texas Instruments Incorporated - http://www.ti.com
- * Author: Jyri Sarha <jsarha@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Gpio gated clock implementation
- */
-
-#include <linux/clk-provider.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
-#include <linux/of_gpio.h>
-#include <linux/err.h>
-#include <linux/device.h>
-
-/**
- * DOC: basic gpio gated clock which can be enabled and disabled
- * with gpio output
- * Traits of this clock:
- * prepare - clk_(un)prepare only ensures parent is (un)prepared
- * enable - clk_enable and clk_disable are functional & control gpio
- * rate - inherits rate from parent. No clk_set_rate support
- * parent - fixed parent. No clk_set_parent support
- */
-
-#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw)
-
-static int clk_gpio_gate_enable(struct clk_hw *hw)
-{
- struct clk_gpio *clk = to_clk_gpio(hw);
-
- gpiod_set_value(clk->gpiod, 1);
-
- return 0;
-}
-
-static void clk_gpio_gate_disable(struct clk_hw *hw)
-{
- struct clk_gpio *clk = to_clk_gpio(hw);
-
- gpiod_set_value(clk->gpiod, 0);
-}
-
-static int clk_gpio_gate_is_enabled(struct clk_hw *hw)
-{
- struct clk_gpio *clk = to_clk_gpio(hw);
-
- return gpiod_get_value(clk->gpiod);
-}
-
-const struct clk_ops clk_gpio_gate_ops = {
- .enable = clk_gpio_gate_enable,
- .disable = clk_gpio_gate_disable,
- .is_enabled = clk_gpio_gate_is_enabled,
-};
-EXPORT_SYMBOL_GPL(clk_gpio_gate_ops);
-
-/**
- * clk_register_gpio - register a gpip clock with the clock framework
- * @dev: device that is registering this clock
- * @name: name of this clock
- * @parent_name: name of this clock's parent
- * @gpio: gpio number to gate this clock
- * @active_low: true if gpio should be set to 0 to enable clock
- * @flags: clock flags
- */
-struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
- const char *parent_name, unsigned gpio, bool active_low,
- unsigned long flags)
-{
- struct clk_gpio *clk_gpio = NULL;
- struct clk *clk = ERR_PTR(-EINVAL);
- struct clk_init_data init = { NULL };
- unsigned long gpio_flags;
- int err;
-
- if (active_low)
- gpio_flags = GPIOF_ACTIVE_LOW | GPIOF_OUT_INIT_HIGH;
- else
- gpio_flags = GPIOF_OUT_INIT_LOW;
-
- if (dev)
- err = devm_gpio_request_one(dev, gpio, gpio_flags, name);
- else
- err = gpio_request_one(gpio, gpio_flags, name);
-
- if (err) {
- pr_err("%s: %s: Error requesting clock control gpio %u\n",
- __func__, name, gpio);
- return ERR_PTR(err);
- }
-
- if (dev)
- clk_gpio = devm_kzalloc(dev, sizeof(struct clk_gpio),
- GFP_KERNEL);
- else
- clk_gpio = kzalloc(sizeof(struct clk_gpio), GFP_KERNEL);
-
- if (!clk_gpio) {
- clk = ERR_PTR(-ENOMEM);
- goto clk_register_gpio_gate_err;
- }
-
- init.name = name;
- init.ops = &clk_gpio_gate_ops;
- init.flags = flags | CLK_IS_BASIC;
- init.parent_names = (parent_name ? &parent_name : NULL);
- init.num_parents = (parent_name ? 1 : 0);
-
- clk_gpio->gpiod = gpio_to_desc(gpio);
- clk_gpio->hw.init = &init;
-
- clk = clk_register(dev, &clk_gpio->hw);
-
- if (!IS_ERR(clk))
- return clk;
-
- if (!dev)
- kfree(clk_gpio);
-
-clk_register_gpio_gate_err:
- if (!dev)
- gpio_free(gpio);
-
- return clk;
-}
-EXPORT_SYMBOL_GPL(clk_register_gpio_gate);
-
-#ifdef CONFIG_OF
-/**
- * The clk_register_gpio_gate has to be delayed, because the EPROBE_DEFER
- * can not be handled properly at of_clk_init() call time.
- */
-
-struct clk_gpio_gate_delayed_register_data {
- struct device_node *node;
- struct mutex lock;
- struct clk *clk;
-};
-
-static struct clk *of_clk_gpio_gate_delayed_register_get(
- struct of_phandle_args *clkspec,
- void *_data)
-{
- struct clk_gpio_gate_delayed_register_data *data = _data;
- struct clk *clk;
- const char *clk_name = data->node->name;
- const char *parent_name;
- int gpio;
- enum of_gpio_flags of_flags;
-
- mutex_lock(&data->lock);
-
- if (data->clk) {
- mutex_unlock(&data->lock);
- return data->clk;
- }
-
- gpio = of_get_named_gpio_flags(data->node, "enable-gpios", 0,
- &of_flags);
- if (gpio < 0) {
- mutex_unlock(&data->lock);
- if (gpio != -EPROBE_DEFER)
- pr_err("%s: %s: Can't get 'enable-gpios' DT property\n",
- __func__, clk_name);
- return ERR_PTR(gpio);
- }
-
- parent_name = of_clk_get_parent_name(data->node, 0);
-
- clk = clk_register_gpio_gate(NULL, clk_name, parent_name, gpio,
- of_flags & OF_GPIO_ACTIVE_LOW, 0);
- if (IS_ERR(clk)) {
- mutex_unlock(&data->lock);
- return clk;
- }
-
- data->clk = clk;
- mutex_unlock(&data->lock);
-
- return clk;
-}
-
-/**
- * of_gpio_gate_clk_setup() - Setup function for gpio controlled clock
- */
-static void __init of_gpio_gate_clk_setup(struct device_node *node)
-{
- struct clk_gpio_gate_delayed_register_data *data;
-
- data = kzalloc(sizeof(struct clk_gpio_gate_delayed_register_data),
- GFP_KERNEL);
- if (!data)
- return;
-
- data->node = node;
- mutex_init(&data->lock);
-
- of_clk_add_provider(node, of_clk_gpio_gate_delayed_register_get, data);
-}
-CLK_OF_DECLARE(gpio_gate_clk, "gpio-gate-clock", of_gpio_gate_clk_setup);
-#endif
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
new file mode 100644
index 000000000000..10819e248414
--- /dev/null
+++ b/drivers/clk/clk-gpio.c
@@ -0,0 +1,325 @@
+/*
+ * Copyright (C) 2013 - 2014 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors:
+ * Jyri Sarha <jsarha@ti.com>
+ * Sergej Sawazki <ce3a@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Gpio controlled clock implementation
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_gpio.h>
+#include <linux/err.h>
+#include <linux/device.h>
+
+/**
+ * DOC: basic gpio gated clock which can be enabled and disabled
+ * with gpio output
+ * Traits of this clock:
+ * prepare - clk_(un)prepare only ensures parent is (un)prepared
+ * enable - clk_enable and clk_disable are functional & control gpio
+ * rate - inherits rate from parent. No clk_set_rate support
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw)
+
+static int clk_gpio_gate_enable(struct clk_hw *hw)
+{
+ struct clk_gpio *clk = to_clk_gpio(hw);
+
+ gpiod_set_value(clk->gpiod, 1);
+
+ return 0;
+}
+
+static void clk_gpio_gate_disable(struct clk_hw *hw)
+{
+ struct clk_gpio *clk = to_clk_gpio(hw);
+
+ gpiod_set_value(clk->gpiod, 0);
+}
+
+static int clk_gpio_gate_is_enabled(struct clk_hw *hw)
+{
+ struct clk_gpio *clk = to_clk_gpio(hw);
+
+ return gpiod_get_value(clk->gpiod);
+}
+
+const struct clk_ops clk_gpio_gate_ops = {
+ .enable = clk_gpio_gate_enable,
+ .disable = clk_gpio_gate_disable,
+ .is_enabled = clk_gpio_gate_is_enabled,
+};
+EXPORT_SYMBOL_GPL(clk_gpio_gate_ops);
+
+/**
+ * DOC: basic clock multiplexer which can be controlled with a gpio output
+ * Traits of this clock:
+ * prepare - clk_prepare only ensures that parents are prepared
+ * rate - rate is only affected by parent switching. No clk_set_rate support
+ * parent - parent is adjustable through clk_set_parent
+ */
+
+static u8 clk_gpio_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_gpio *clk = to_clk_gpio(hw);
+
+ return gpiod_get_value(clk->gpiod);
+}
+
+static int clk_gpio_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_gpio *clk = to_clk_gpio(hw);
+
+ gpiod_set_value(clk->gpiod, index);
+
+ return 0;
+}
+
+const struct clk_ops clk_gpio_mux_ops = {
+ .get_parent = clk_gpio_mux_get_parent,
+ .set_parent = clk_gpio_mux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_gpio_mux_ops);
+
+static struct clk *clk_register_gpio(struct device *dev, const char *name,
+ const char * const *parent_names, u8 num_parents, unsigned gpio,
+ bool active_low, unsigned long flags,
+ const struct clk_ops *clk_gpio_ops)
+{
+ struct clk_gpio *clk_gpio;
+ struct clk *clk;
+ struct clk_init_data init = {};
+ unsigned long gpio_flags;
+ int err;
+
+ if (dev)
+ clk_gpio = devm_kzalloc(dev, sizeof(*clk_gpio), GFP_KERNEL);
+ else
+ clk_gpio = kzalloc(sizeof(*clk_gpio), GFP_KERNEL);
+
+ if (!clk_gpio)
+ return ERR_PTR(-ENOMEM);
+
+ if (active_low)
+ gpio_flags = GPIOF_ACTIVE_LOW | GPIOF_OUT_INIT_HIGH;
+ else
+ gpio_flags = GPIOF_OUT_INIT_LOW;
+
+ if (dev)
+ err = devm_gpio_request_one(dev, gpio, gpio_flags, name);
+ else
+ err = gpio_request_one(gpio, gpio_flags, name);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ pr_err("%s: %s: Error requesting clock control gpio %u\n",
+ __func__, name, gpio);
+ if (!dev)
+ kfree(clk_gpio);
+
+ return ERR_PTR(err);
+ }
+
+ init.name = name;
+ init.ops = clk_gpio_ops;
+ init.flags = flags | CLK_IS_BASIC;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ clk_gpio->gpiod = gpio_to_desc(gpio);
+ clk_gpio->hw.init = &init;
+
+ if (dev)
+ clk = devm_clk_register(dev, &clk_gpio->hw);
+ else
+ clk = clk_register(NULL, &clk_gpio->hw);
+
+ if (!IS_ERR(clk))
+ return clk;
+
+ if (!dev) {
+ gpiod_put(clk_gpio->gpiod);
+ kfree(clk_gpio);
+ }
+
+ return clk;
+}
+
+/**
+ * clk_register_gpio_gate - register a gpio clock gate with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of this clock's parent
+ * @gpio: gpio number to gate this clock
+ * @active_low: true if gpio should be set to 0 to enable clock
+ * @flags: clock flags
+ */
+struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
+ const char *parent_name, unsigned gpio, bool active_low,
+ unsigned long flags)
+{
+ return clk_register_gpio(dev, name,
+ (parent_name ? &parent_name : NULL),
+ (parent_name ? 1 : 0), gpio, active_low, flags,
+ &clk_gpio_gate_ops);
+}
+EXPORT_SYMBOL_GPL(clk_register_gpio_gate);
+
+/**
+ * clk_register_gpio_mux - register a gpio clock mux with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_names: names of this clock's parents
+ * @num_parents: number of parents listed in @parent_names
+ * @gpio: gpio number to gate this clock
+ * @active_low: true if gpio should be set to 0 to enable clock
+ * @flags: clock flags
+ */
+struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
+ const char * const *parent_names, u8 num_parents, unsigned gpio,
+ bool active_low, unsigned long flags)
+{
+ if (num_parents != 2) {
+ pr_err("mux-clock %s must have 2 parents\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return clk_register_gpio(dev, name, parent_names, num_parents,
+ gpio, active_low, flags, &clk_gpio_mux_ops);
+}
+EXPORT_SYMBOL_GPL(clk_register_gpio_mux);
+
+#ifdef CONFIG_OF
+/**
+ * clk_register_get() has to be delayed, because -EPROBE_DEFER
+ * can not be handled properly at of_clk_init() call time.
+ */
+
+struct clk_gpio_delayed_register_data {
+ const char *gpio_name;
+ struct device_node *node;
+ struct mutex lock;
+ struct clk *clk;
+ struct clk *(*clk_register_get)(const char *name,
+ const char * const *parent_names, u8 num_parents,
+ unsigned gpio, bool active_low);
+};
+
+static struct clk *of_clk_gpio_delayed_register_get(
+ struct of_phandle_args *clkspec, void *_data)
+{
+ struct clk_gpio_delayed_register_data *data = _data;
+ struct clk *clk;
+ const char **parent_names;
+ int i, num_parents;
+ int gpio;
+ enum of_gpio_flags of_flags;
+
+ mutex_lock(&data->lock);
+
+ if (data->clk) {
+ mutex_unlock(&data->lock);
+ return data->clk;
+ }
+
+ gpio = of_get_named_gpio_flags(data->node, data->gpio_name, 0,
+ &of_flags);
+ if (gpio < 0) {
+ mutex_unlock(&data->lock);
+ if (gpio == -EPROBE_DEFER)
+ pr_debug("%s: %s: GPIOs not yet available, retry later\n",
+ data->node->name, __func__);
+ else
+ pr_err("%s: %s: Can't get '%s' DT property\n",
+ data->node->name, __func__,
+ data->gpio_name);
+ return ERR_PTR(gpio);
+ }
+
+ num_parents = of_clk_get_parent_count(data->node);
+
+ parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
+ if (!parent_names) {
+ clk = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ for (i = 0; i < num_parents; i++)
+ parent_names[i] = of_clk_get_parent_name(data->node, i);
+
+ clk = data->clk_register_get(data->node->name, parent_names,
+ num_parents, gpio, of_flags & OF_GPIO_ACTIVE_LOW);
+ if (IS_ERR(clk))
+ goto out;
+
+ data->clk = clk;
+out:
+ mutex_unlock(&data->lock);
+ kfree(parent_names);
+
+ return clk;
+}
+
+static struct clk *of_clk_gpio_gate_delayed_register_get(const char *name,
+ const char * const *parent_names, u8 num_parents,
+ unsigned gpio, bool active_low)
+{
+ return clk_register_gpio_gate(NULL, name, parent_names[0],
+ gpio, active_low, 0);
+}
+
+static struct clk *of_clk_gpio_mux_delayed_register_get(const char *name,
+ const char * const *parent_names, u8 num_parents, unsigned gpio,
+ bool active_low)
+{
+ return clk_register_gpio_mux(NULL, name, parent_names, num_parents,
+ gpio, active_low, 0);
+}
+
+static void __init of_gpio_clk_setup(struct device_node *node,
+ const char *gpio_name,
+ struct clk *(*clk_register_get)(const char *name,
+ const char * const *parent_names,
+ u8 num_parents,
+ unsigned gpio, bool active_low))
+{
+ struct clk_gpio_delayed_register_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return;
+
+ data->node = node;
+ data->gpio_name = gpio_name;
+ data->clk_register_get = clk_register_get;
+ mutex_init(&data->lock);
+
+ of_clk_add_provider(node, of_clk_gpio_delayed_register_get, data);
+}
+
+static void __init of_gpio_gate_clk_setup(struct device_node *node)
+{
+ of_gpio_clk_setup(node, "enable-gpios",
+ of_clk_gpio_gate_delayed_register_get);
+}
+CLK_OF_DECLARE(gpio_gate_clk, "gpio-gate-clock", of_gpio_gate_clk_setup);
+
+void __init of_gpio_mux_clk_setup(struct device_node *node)
+{
+ of_gpio_clk_setup(node, "select-gpios",
+ of_clk_gpio_mux_delayed_register_get);
+}
+CLK_OF_DECLARE(gpio_mux_clk, "gpio-mux-clock", of_gpio_mux_clk_setup);
+#endif
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index 2e7e9d9798cb..be3a21abb185 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/of.h>
diff --git a/drivers/clk/clk-moxart.c b/drivers/clk/clk-moxart.c
index 5181b89c3cb2..f37f719643ec 100644
--- a/drivers/clk/clk-moxart.c
+++ b/drivers/clk/clk-moxart.c
@@ -10,6 +10,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 6066a01b20ea..7129c86a79db 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -10,7 +10,6 @@
* Simple multiplexer clock implementation
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -32,7 +31,7 @@
static u8 clk_mux_get_parent(struct clk_hw *hw)
{
struct clk_mux *mux = to_clk_mux(hw);
- int num_parents = __clk_get_num_parents(hw->clk);
+ int num_parents = clk_hw_get_num_parents(hw);
u32 val;
/*
@@ -85,6 +84,8 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
if (mux->lock)
spin_lock_irqsave(mux->lock, flags);
+ else
+ __acquire(mux->lock);
if (mux->flags & CLK_MUX_HIWORD_MASK) {
val = mux->mask << (mux->shift + 16);
@@ -97,6 +98,8 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
if (mux->lock)
spin_unlock_irqrestore(mux->lock, flags);
+ else
+ __release(mux->lock);
return 0;
}
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index c9487179f25f..e4d8a991c58f 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -8,8 +8,7 @@
#define pr_fmt(fmt) "Nomadik SRC clocks: " fmt
#include <linux/bitops.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk-provider.h>
diff --git a/drivers/clk/clk-palmas.c b/drivers/clk/clk-palmas.c
index 45a535ab48aa..8e3039f0c3f9 100644
--- a/drivers/clk/clk-palmas.c
+++ b/drivers/clk/clk-palmas.c
@@ -18,7 +18,6 @@
*/
#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/mfd/palmas.h>
#include <linux/module.h>
diff --git a/drivers/clk/clk-rk808.c b/drivers/clk/clk-rk808.c
index 83902b9cd49e..0fee2f4ca258 100644
--- a/drivers/clk/clk-rk808.c
+++ b/drivers/clk/clk-rk808.c
@@ -15,7 +15,6 @@
* more details.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 9b13a303d3f8..d266299dfdb1 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -58,21 +58,17 @@ static struct s2mps11_clk *to_s2mps11_clk(struct clk_hw *hw)
static int s2mps11_clk_prepare(struct clk_hw *hw)
{
struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
- int ret;
- ret = regmap_update_bits(s2mps11->iodev->regmap_pmic,
+ return regmap_update_bits(s2mps11->iodev->regmap_pmic,
s2mps11->reg,
s2mps11->mask, s2mps11->mask);
-
- return ret;
}
static void s2mps11_clk_unprepare(struct clk_hw *hw)
{
struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
- int ret;
- ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, s2mps11->reg,
+ regmap_update_bits(s2mps11->iodev->regmap_pmic, s2mps11->reg,
s2mps11->mask, ~s2mps11->mask);
}
@@ -186,15 +182,15 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
struct clk_init_data *clks_init;
int i, ret = 0;
- s2mps11_clks = devm_kzalloc(&pdev->dev, sizeof(*s2mps11_clk) *
- S2MPS11_CLKS_NUM, GFP_KERNEL);
+ s2mps11_clks = devm_kcalloc(&pdev->dev, S2MPS11_CLKS_NUM,
+ sizeof(*s2mps11_clk), GFP_KERNEL);
if (!s2mps11_clks)
return -ENOMEM;
s2mps11_clk = s2mps11_clks;
- clk_table = devm_kzalloc(&pdev->dev, sizeof(struct clk *) *
- S2MPS11_CLKS_NUM, GFP_KERNEL);
+ clk_table = devm_kcalloc(&pdev->dev, S2MPS11_CLKS_NUM,
+ sizeof(struct clk *), GFP_KERNEL);
if (!clk_table)
return -ENOMEM;
@@ -246,7 +242,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
s2mps11_name(s2mps11_clk), NULL);
if (!s2mps11_clk->lookup) {
ret = -ENOMEM;
- goto err_lup;
+ goto err_reg;
}
}
@@ -265,16 +261,10 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, s2mps11_clks);
return ret;
-err_lup:
- devm_clk_unregister(&pdev->dev, s2mps11_clk->clk);
+
err_reg:
- while (s2mps11_clk > s2mps11_clks) {
- if (s2mps11_clk->lookup) {
- clkdev_drop(s2mps11_clk->lookup);
- devm_clk_unregister(&pdev->dev, s2mps11_clk->clk);
- }
- s2mps11_clk--;
- }
+ while (--i >= 0)
+ clkdev_drop(s2mps11_clks[i].lookup);
return ret;
}
@@ -322,7 +312,7 @@ static int __init s2mps11_clk_init(void)
}
subsys_initcall(s2mps11_clk_init);
-static void __init s2mps11_clk_cleanup(void)
+static void __exit s2mps11_clk_cleanup(void)
{
platform_driver_unregister(&s2mps11_clk_driver);
}
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index e39e1e680b3c..5596c0aac22f 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -18,7 +18,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/clkdev.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -439,7 +439,7 @@ static unsigned long si5351_pll_recalc_rate(struct clk_hw *hw,
dev_dbg(&hwdata->drvdata->client->dev,
"%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, parent_rate = %lu, rate = %lu\n",
- __func__, __clk_get_name(hwdata->hw.clk),
+ __func__, clk_hw_get_name(hw),
hwdata->params.p1, hwdata->params.p2, hwdata->params.p3,
parent_rate, (unsigned long)rate);
@@ -497,7 +497,7 @@ static long si5351_pll_round_rate(struct clk_hw *hw, unsigned long rate,
dev_dbg(&hwdata->drvdata->client->dev,
"%s - %s: a = %lu, b = %lu, c = %lu, parent_rate = %lu, rate = %lu\n",
- __func__, __clk_get_name(hwdata->hw.clk), a, b, c,
+ __func__, clk_hw_get_name(hw), a, b, c,
*parent_rate, rate);
return rate;
@@ -521,7 +521,7 @@ static int si5351_pll_set_rate(struct clk_hw *hw, unsigned long rate,
dev_dbg(&hwdata->drvdata->client->dev,
"%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, parent_rate = %lu, rate = %lu\n",
- __func__, __clk_get_name(hwdata->hw.clk),
+ __func__, clk_hw_get_name(hw),
hwdata->params.p1, hwdata->params.p2, hwdata->params.p3,
parent_rate, rate);
@@ -632,7 +632,7 @@ static unsigned long si5351_msynth_recalc_rate(struct clk_hw *hw,
dev_dbg(&hwdata->drvdata->client->dev,
"%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, m = %lu, parent_rate = %lu, rate = %lu\n",
- __func__, __clk_get_name(hwdata->hw.clk),
+ __func__, clk_hw_get_name(hw),
hwdata->params.p1, hwdata->params.p2, hwdata->params.p3,
m, parent_rate, (unsigned long)rate);
@@ -663,7 +663,7 @@ static long si5351_msynth_round_rate(struct clk_hw *hw, unsigned long rate,
divby4 = 1;
/* multisync can set pll */
- if (__clk_get_flags(hwdata->hw.clk) & CLK_SET_RATE_PARENT) {
+ if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
/*
* find largest integer divider for max
* vco frequency and given target rate
@@ -745,7 +745,7 @@ static long si5351_msynth_round_rate(struct clk_hw *hw, unsigned long rate,
dev_dbg(&hwdata->drvdata->client->dev,
"%s - %s: a = %lu, b = %lu, c = %lu, divby4 = %d, parent_rate = %lu, rate = %lu\n",
- __func__, __clk_get_name(hwdata->hw.clk), a, b, c, divby4,
+ __func__, clk_hw_get_name(hw), a, b, c, divby4,
*parent_rate, rate);
return rate;
@@ -777,7 +777,7 @@ static int si5351_msynth_set_rate(struct clk_hw *hw, unsigned long rate,
dev_dbg(&hwdata->drvdata->client->dev,
"%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, divby4 = %d, parent_rate = %lu, rate = %lu\n",
- __func__, __clk_get_name(hwdata->hw.clk),
+ __func__, clk_hw_get_name(hw),
hwdata->params.p1, hwdata->params.p2, hwdata->params.p3,
divby4, parent_rate, rate);
@@ -1013,7 +1013,7 @@ static long si5351_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
rate = SI5351_CLKOUT_MIN_FREQ;
/* request frequency if multisync master */
- if (__clk_get_flags(hwdata->hw.clk) & CLK_SET_RATE_PARENT) {
+ if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
/* use r divider for frequencies below 1MHz */
rdiv = SI5351_OUTPUT_CLK_DIV_1;
while (rate < SI5351_MULTISYNTH_MIN_FREQ &&
@@ -1042,7 +1042,7 @@ static long si5351_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
dev_dbg(&hwdata->drvdata->client->dev,
"%s - %s: rdiv = %u, parent_rate = %lu, rate = %lu\n",
- __func__, __clk_get_name(hwdata->hw.clk), (1 << rdiv),
+ __func__, clk_hw_get_name(hw), (1 << rdiv),
*parent_rate, rate);
return rate;
@@ -1093,7 +1093,7 @@ static int si5351_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
dev_dbg(&hwdata->drvdata->client->dev,
"%s - %s: rdiv = %u, parent_rate = %lu, rate = %lu\n",
- __func__, __clk_get_name(hwdata->hw.clk), (1 << rdiv),
+ __func__, clk_hw_get_name(hw), (1 << rdiv),
parent_rate, rate);
return 0;
diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c
index 20a5aec98b1a..cf478aa9fa5d 100644
--- a/drivers/clk/clk-si570.c
+++ b/drivers/clk/clk-si570.c
@@ -19,6 +19,7 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/module.h>
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index b9b12a742970..fd89e771107e 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -175,11 +175,10 @@ static long clk_apb_mul_round_rate(struct clk_hw *hw, unsigned long rate,
if (readl(base + STM32F4_RCC_CFGR) & BIT(am->bit_idx))
mult = 2;
- if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+ if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
unsigned long best_parent = rate / mult;
- *prate =
- __clk_round_rate(__clk_get_parent(hw->clk), best_parent);
+ *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
}
return *prate * mult;
@@ -268,7 +267,7 @@ static int stm32f4_rcc_lookup_clk_idx(u8 primary, u8 secondary)
memcpy(table, stm32f42xx_gate_map, sizeof(table));
/* only bits set in table can be used as indices */
- if (WARN_ON(secondary > 8 * sizeof(table) ||
+ if (WARN_ON(secondary >= BITS_PER_BYTE * sizeof(table) ||
0 == (table[BIT_ULL_WORD(secondary)] &
BIT_ULL_MASK(secondary))))
return -EINVAL;
diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c
index 4a755135bcd3..8e5ed649a098 100644
--- a/drivers/clk/clk-twl6040.c
+++ b/drivers/clk/clk-twl6040.c
@@ -20,7 +20,6 @@
*
*/
-#include <linux/clk.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
@@ -91,7 +90,7 @@ static int twl6040_clk_probe(struct platform_device *pdev)
clkdata->twl6040 = twl6040;
clkdata->mcpdm_fclk.init = &wm831x_clkout_init;
- clkdata->clk = clk_register(&pdev->dev, &clkdata->mcpdm_fclk);
+ clkdata->clk = devm_clk_register(&pdev->dev, &clkdata->mcpdm_fclk);
if (IS_ERR(clkdata->clk))
return PTR_ERR(clkdata->clk);
@@ -100,21 +99,11 @@ static int twl6040_clk_probe(struct platform_device *pdev)
return 0;
}
-static int twl6040_clk_remove(struct platform_device *pdev)
-{
- struct twl6040_clk *clkdata = platform_get_drvdata(pdev);
-
- clk_unregister(clkdata->clk);
-
- return 0;
-}
-
static struct platform_driver twl6040_clk_driver = {
.driver = {
.name = "twl6040-clk",
},
.probe = twl6040_clk_probe,
- .remove = twl6040_clk_remove,
};
module_platform_driver(twl6040_clk_driver);
diff --git a/drivers/clk/clk-u300.c b/drivers/clk/clk-u300.c
index 18bf5e576b93..95d1742dac30 100644
--- a/drivers/clk/clk-u300.c
+++ b/drivers/clk/clk-u300.c
@@ -5,8 +5,8 @@
* Author: Linus Walleij <linus.walleij@stericsson.com>
* Author: Jonas Aaberg <jonas.aberg@stericsson.com>
*/
-#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/slab.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk-provider.h>
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
index ef67719f4e52..43f9d15255f4 100644
--- a/drivers/clk/clk-wm831x.c
+++ b/drivers/clk/clk-wm831x.c
@@ -12,7 +12,6 @@
*
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/module.h>
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index f26b3ac36b27..96a6190acac2 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -60,7 +60,6 @@ enum xgene_pll_type {
struct xgene_clk_pll {
struct clk_hw hw;
- const char *name;
void __iomem *reg;
spinlock_t *lock;
u32 pll_offset;
@@ -75,7 +74,7 @@ static int xgene_clk_pll_is_enabled(struct clk_hw *hw)
u32 data;
data = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
- pr_debug("%s pll %s\n", pllclk->name,
+ pr_debug("%s pll %s\n", clk_hw_get_name(hw),
data & REGSPEC_RESET_F1_MASK ? "disabled" : "enabled");
return data & REGSPEC_RESET_F1_MASK ? 0 : 1;
@@ -113,7 +112,7 @@ static unsigned long xgene_clk_pll_recalc_rate(struct clk_hw *hw,
fref = parent_rate / nref;
fvco = fref * nfb;
}
- pr_debug("%s pll recalc rate %ld parent %ld\n", pllclk->name,
+ pr_debug("%s pll recalc rate %ld parent %ld\n", clk_hw_get_name(hw),
fvco / nout, parent_rate);
return fvco / nout;
@@ -146,7 +145,6 @@ static struct clk *xgene_register_clk_pll(struct device *dev,
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
- apmclk->name = name;
apmclk->reg = reg;
apmclk->lock = lock;
apmclk->pll_offset = pll_offset;
@@ -210,7 +208,6 @@ struct xgene_dev_parameters {
struct xgene_clk {
struct clk_hw hw;
- const char *name;
spinlock_t *lock;
struct xgene_dev_parameters param;
};
@@ -228,7 +225,7 @@ static int xgene_clk_enable(struct clk_hw *hw)
spin_lock_irqsave(pclk->lock, flags);
if (pclk->param.csr_reg != NULL) {
- pr_debug("%s clock enabled\n", pclk->name);
+ pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
reg = __pa(pclk->param.csr_reg);
/* First enable the clock */
data = xgene_clk_read(pclk->param.csr_reg +
@@ -237,7 +234,7 @@ static int xgene_clk_enable(struct clk_hw *hw)
xgene_clk_write(data, pclk->param.csr_reg +
pclk->param.reg_clk_offset);
pr_debug("%s clock PADDR base %pa clk offset 0x%08X mask 0x%08X value 0x%08X\n",
- pclk->name, &reg,
+ clk_hw_get_name(hw), &reg,
pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
data);
@@ -248,7 +245,7 @@ static int xgene_clk_enable(struct clk_hw *hw)
xgene_clk_write(data, pclk->param.csr_reg +
pclk->param.reg_csr_offset);
pr_debug("%s CSR RESET PADDR base %pa csr offset 0x%08X mask 0x%08X value 0x%08X\n",
- pclk->name, &reg,
+ clk_hw_get_name(hw), &reg,
pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
data);
}
@@ -269,7 +266,7 @@ static void xgene_clk_disable(struct clk_hw *hw)
spin_lock_irqsave(pclk->lock, flags);
if (pclk->param.csr_reg != NULL) {
- pr_debug("%s clock disabled\n", pclk->name);
+ pr_debug("%s clock disabled\n", clk_hw_get_name(hw));
/* First put the CSR in reset */
data = xgene_clk_read(pclk->param.csr_reg +
pclk->param.reg_csr_offset);
@@ -295,10 +292,10 @@ static int xgene_clk_is_enabled(struct clk_hw *hw)
u32 data = 0;
if (pclk->param.csr_reg != NULL) {
- pr_debug("%s clock checking\n", pclk->name);
+ pr_debug("%s clock checking\n", clk_hw_get_name(hw));
data = xgene_clk_read(pclk->param.csr_reg +
pclk->param.reg_clk_offset);
- pr_debug("%s clock is %s\n", pclk->name,
+ pr_debug("%s clock is %s\n", clk_hw_get_name(hw),
data & pclk->param.reg_clk_mask ? "enabled" :
"disabled");
}
@@ -321,11 +318,13 @@ static unsigned long xgene_clk_recalc_rate(struct clk_hw *hw,
data &= (1 << pclk->param.reg_divider_width) - 1;
pr_debug("%s clock recalc rate %ld parent %ld\n",
- pclk->name, parent_rate / data, parent_rate);
+ clk_hw_get_name(hw),
+ parent_rate / data, parent_rate);
+
return parent_rate / data;
} else {
pr_debug("%s clock recalc rate %ld parent %ld\n",
- pclk->name, parent_rate, parent_rate);
+ clk_hw_get_name(hw), parent_rate, parent_rate);
return parent_rate;
}
}
@@ -357,7 +356,7 @@ static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
data |= divider;
xgene_clk_write(data, pclk->param.divider_reg +
pclk->param.reg_divider_offset);
- pr_debug("%s clock set rate %ld\n", pclk->name,
+ pr_debug("%s clock set rate %ld\n", clk_hw_get_name(hw),
parent_rate / divider_save);
} else {
divider_save = 1;
@@ -419,7 +418,6 @@ static struct clk *xgene_register_clk(struct device *dev,
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
- apmclk->name = name;
apmclk->lock = lock;
apmclk->hw.init = &init;
apmclk->param = *parameters;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index ddb4b541016f..43e2c3ad6c31 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -9,6 +9,7 @@
* Standard functionality for the common clock API. See Documentation/clk.txt
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clk/clk-conf.h>
#include <linux/module.h>
@@ -56,8 +57,11 @@ struct clk_core {
struct clk_core *new_parent;
struct clk_core *new_child;
unsigned long flags;
+ bool orphan;
unsigned int enable_count;
unsigned int prepare_count;
+ unsigned long min_rate;
+ unsigned long max_rate;
unsigned long accuracy;
int phase;
struct hlist_head children;
@@ -111,12 +115,14 @@ static void clk_prepare_unlock(void)
}
static unsigned long clk_enable_lock(void)
+ __acquires(enable_lock)
{
unsigned long flags;
if (!spin_trylock_irqsave(&enable_lock, flags)) {
if (enable_owner == current) {
enable_refcnt++;
+ __acquire(enable_lock);
return flags;
}
spin_lock_irqsave(&enable_lock, flags);
@@ -129,12 +135,15 @@ static unsigned long clk_enable_lock(void)
}
static void clk_enable_unlock(unsigned long flags)
+ __releases(enable_lock)
{
WARN_ON_ONCE(enable_owner != current);
WARN_ON_ONCE(enable_refcnt == 0);
- if (--enable_refcnt)
+ if (--enable_refcnt) {
+ __release(enable_lock);
return;
+ }
enable_owner = NULL;
spin_unlock_irqrestore(&enable_lock, flags);
}
@@ -269,27 +278,29 @@ const char *__clk_get_name(struct clk *clk)
}
EXPORT_SYMBOL_GPL(__clk_get_name);
+const char *clk_hw_get_name(const struct clk_hw *hw)
+{
+ return hw->core->name;
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_name);
+
struct clk_hw *__clk_get_hw(struct clk *clk)
{
return !clk ? NULL : clk->core->hw;
}
EXPORT_SYMBOL_GPL(__clk_get_hw);
-u8 __clk_get_num_parents(struct clk *clk)
+unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
{
- return !clk ? 0 : clk->core->num_parents;
+ return hw->core->num_parents;
}
-EXPORT_SYMBOL_GPL(__clk_get_num_parents);
+EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
-struct clk *__clk_get_parent(struct clk *clk)
+struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
{
- if (!clk)
- return NULL;
-
- /* TODO: Create a per-user clk and change callers to call clk_put */
- return !clk->core->parent ? NULL : clk->core->parent->hw->clk;
+ return hw->core->parent ? hw->core->parent->hw : NULL;
}
-EXPORT_SYMBOL_GPL(__clk_get_parent);
+EXPORT_SYMBOL_GPL(clk_hw_get_parent);
static struct clk_core *__clk_lookup_subtree(const char *name,
struct clk_core *core)
@@ -348,18 +359,16 @@ static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
return core->parents[index];
}
-struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
+struct clk_hw *
+clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
{
struct clk_core *parent;
- if (!clk)
- return NULL;
-
- parent = clk_core_get_parent_by_index(clk->core, index);
+ parent = clk_core_get_parent_by_index(hw->core, index);
- return !parent ? NULL : parent->hw->clk;
+ return !parent ? NULL : parent->hw;
}
-EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
+EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
unsigned int __clk_get_enable_count(struct clk *clk)
{
@@ -387,14 +396,11 @@ out:
return ret;
}
-unsigned long __clk_get_rate(struct clk *clk)
+unsigned long clk_hw_get_rate(const struct clk_hw *hw)
{
- if (!clk)
- return 0;
-
- return clk_core_get_rate_nolock(clk->core);
+ return clk_core_get_rate_nolock(hw->core);
}
-EXPORT_SYMBOL_GPL(__clk_get_rate);
+EXPORT_SYMBOL_GPL(clk_hw_get_rate);
static unsigned long __clk_get_accuracy(struct clk_core *core)
{
@@ -410,12 +416,15 @@ unsigned long __clk_get_flags(struct clk *clk)
}
EXPORT_SYMBOL_GPL(__clk_get_flags);
-bool __clk_is_prepared(struct clk *clk)
+unsigned long clk_hw_get_flags(const struct clk_hw *hw)
{
- if (!clk)
- return false;
+ return hw->core->flags;
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_flags);
- return clk_core_is_prepared(clk->core);
+bool clk_hw_is_prepared(const struct clk_hw *hw)
+{
+ return clk_core_is_prepared(hw->core);
}
bool __clk_is_enabled(struct clk *clk)
@@ -436,28 +445,31 @@ static bool mux_is_better_rate(unsigned long rate, unsigned long now,
return now <= rate && now > best;
}
-static long
-clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_p,
+static int
+clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req,
unsigned long flags)
{
struct clk_core *core = hw->core, *parent, *best_parent = NULL;
- int i, num_parents;
- unsigned long parent_rate, best = 0;
+ int i, num_parents, ret;
+ unsigned long best = 0;
+ struct clk_rate_request parent_req = *req;
/* if NO_REPARENT flag set, pass through to current parent */
if (core->flags & CLK_SET_RATE_NO_REPARENT) {
parent = core->parent;
- if (core->flags & CLK_SET_RATE_PARENT)
- best = __clk_determine_rate(parent ? parent->hw : NULL,
- rate, min_rate, max_rate);
- else if (parent)
+ if (core->flags & CLK_SET_RATE_PARENT) {
+ ret = __clk_determine_rate(parent ? parent->hw : NULL,
+ &parent_req);
+ if (ret)
+ return ret;
+
+ best = parent_req.rate;
+ } else if (parent) {
best = clk_core_get_rate_nolock(parent);
- else
+ } else {
best = clk_core_get_rate_nolock(core);
+ }
+
goto out;
}
@@ -467,24 +479,33 @@ clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate,
parent = clk_core_get_parent_by_index(core, i);
if (!parent)
continue;
- if (core->flags & CLK_SET_RATE_PARENT)
- parent_rate = __clk_determine_rate(parent->hw, rate,
- min_rate,
- max_rate);
- else
- parent_rate = clk_core_get_rate_nolock(parent);
- if (mux_is_better_rate(rate, parent_rate, best, flags)) {
+
+ if (core->flags & CLK_SET_RATE_PARENT) {
+ parent_req = *req;
+ ret = __clk_determine_rate(parent->hw, &parent_req);
+ if (ret)
+ continue;
+ } else {
+ parent_req.rate = clk_core_get_rate_nolock(parent);
+ }
+
+ if (mux_is_better_rate(req->rate, parent_req.rate,
+ best, flags)) {
best_parent = parent;
- best = parent_rate;
+ best = parent_req.rate;
}
}
+ if (!best_parent)
+ return -EINVAL;
+
out:
if (best_parent)
- *best_parent_p = best_parent->hw;
- *best_parent_rate = best;
+ req->best_parent_hw = best_parent->hw;
+ req->best_parent_rate = best;
+ req->rate = best;
- return best;
+ return 0;
}
struct clk *__clk_lookup(const char *name)
@@ -500,8 +521,8 @@ static void clk_core_get_boundaries(struct clk_core *core,
{
struct clk *clk_user;
- *min_rate = 0;
- *max_rate = ULONG_MAX;
+ *min_rate = core->min_rate;
+ *max_rate = core->max_rate;
hlist_for_each_entry(clk_user, &core->clks, clks_node)
*min_rate = max(*min_rate, clk_user->min_rate);
@@ -510,33 +531,30 @@ static void clk_core_get_boundaries(struct clk_core *core,
*max_rate = min(*max_rate, clk_user->max_rate);
}
+void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
+ unsigned long max_rate)
+{
+ hw->core->min_rate = min_rate;
+ hw->core->max_rate = max_rate;
+}
+EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
+
/*
* Helper for finding best parent to provide a given frequency. This can be used
* directly as a determine_rate callback (e.g. for a mux), or from a more
* complex clock that may combine a mux with other operations.
*/
-long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_p)
+int __clk_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
- best_parent_rate,
- best_parent_p, 0);
+ return clk_mux_determine_rate_flags(hw, req, 0);
}
EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
-long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_p)
+int __clk_mux_determine_rate_closest(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
- best_parent_rate,
- best_parent_p,
- CLK_MUX_ROUND_CLOSEST);
+ return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
}
EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
@@ -759,14 +777,11 @@ int clk_enable(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_enable);
-static unsigned long clk_core_round_rate_nolock(struct clk_core *core,
- unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate)
+static int clk_core_round_rate_nolock(struct clk_core *core,
+ struct clk_rate_request *req)
{
- unsigned long parent_rate = 0;
struct clk_core *parent;
- struct clk_hw *parent_hw;
+ long rate;
lockdep_assert_held(&prepare_lock);
@@ -774,21 +789,30 @@ static unsigned long clk_core_round_rate_nolock(struct clk_core *core,
return 0;
parent = core->parent;
- if (parent)
- parent_rate = parent->rate;
+ if (parent) {
+ req->best_parent_hw = parent->hw;
+ req->best_parent_rate = parent->rate;
+ } else {
+ req->best_parent_hw = NULL;
+ req->best_parent_rate = 0;
+ }
if (core->ops->determine_rate) {
- parent_hw = parent ? parent->hw : NULL;
- return core->ops->determine_rate(core->hw, rate,
- min_rate, max_rate,
- &parent_rate, &parent_hw);
- } else if (core->ops->round_rate)
- return core->ops->round_rate(core->hw, rate, &parent_rate);
- else if (core->flags & CLK_SET_RATE_PARENT)
- return clk_core_round_rate_nolock(core->parent, rate, min_rate,
- max_rate);
- else
- return core->rate;
+ return core->ops->determine_rate(core->hw, req);
+ } else if (core->ops->round_rate) {
+ rate = core->ops->round_rate(core->hw, req->rate,
+ &req->best_parent_rate);
+ if (rate < 0)
+ return rate;
+
+ req->rate = rate;
+ } else if (core->flags & CLK_SET_RATE_PARENT) {
+ return clk_core_round_rate_nolock(parent, req);
+ } else {
+ req->rate = core->rate;
+ }
+
+ return 0;
}
/**
@@ -800,38 +824,32 @@ static unsigned long clk_core_round_rate_nolock(struct clk_core *core,
*
* Useful for clk_ops such as .set_rate and .determine_rate.
*/
-unsigned long __clk_determine_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate)
+int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
- if (!hw)
+ if (!hw) {
+ req->rate = 0;
return 0;
+ }
- return clk_core_round_rate_nolock(hw->core, rate, min_rate, max_rate);
+ return clk_core_round_rate_nolock(hw->core, req);
}
EXPORT_SYMBOL_GPL(__clk_determine_rate);
-/**
- * __clk_round_rate - round the given rate for a clk
- * @clk: round the rate of this clock
- * @rate: the rate which is to be rounded
- *
- * Useful for clk_ops such as .set_rate
- */
-unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
+unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
{
- unsigned long min_rate;
- unsigned long max_rate;
+ int ret;
+ struct clk_rate_request req;
- if (!clk)
- return 0;
+ clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
+ req.rate = rate;
- clk_core_get_boundaries(clk->core, &min_rate, &max_rate);
+ ret = clk_core_round_rate_nolock(hw->core, &req);
+ if (ret)
+ return 0;
- return clk_core_round_rate_nolock(clk->core, rate, min_rate, max_rate);
+ return req.rate;
}
-EXPORT_SYMBOL_GPL(__clk_round_rate);
+EXPORT_SYMBOL_GPL(clk_hw_round_rate);
/**
* clk_round_rate - round the given rate for a clk
@@ -844,16 +862,24 @@ EXPORT_SYMBOL_GPL(__clk_round_rate);
*/
long clk_round_rate(struct clk *clk, unsigned long rate)
{
- unsigned long ret;
+ struct clk_rate_request req;
+ int ret;
if (!clk)
return 0;
clk_prepare_lock();
- ret = __clk_round_rate(clk, rate);
+
+ clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
+ req.rate = rate;
+
+ ret = clk_core_round_rate_nolock(clk->core, &req);
clk_prepare_unlock();
- return ret;
+ if (ret)
+ return ret;
+
+ return req.rate;
}
EXPORT_SYMBOL_GPL(clk_round_rate);
@@ -1064,18 +1090,40 @@ static int clk_fetch_parent_index(struct clk_core *core,
return -EINVAL;
}
+/*
+ * Update the orphan status of @core and all its children.
+ */
+static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
+{
+ struct clk_core *child;
+
+ core->orphan = is_orphan;
+
+ hlist_for_each_entry(child, &core->children, child_node)
+ clk_core_update_orphan_status(child, is_orphan);
+}
+
static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
{
+ bool was_orphan = core->orphan;
+
hlist_del(&core->child_node);
if (new_parent) {
+ bool becomes_orphan = new_parent->orphan;
+
/* avoid duplicate POST_RATE_CHANGE notifications */
if (new_parent->new_child == core)
new_parent->new_child = NULL;
hlist_add_head(&core->child_node, &new_parent->children);
+
+ if (was_orphan != becomes_orphan)
+ clk_core_update_orphan_status(core, becomes_orphan);
} else {
hlist_add_head(&core->child_node, &clk_orphan_list);
+ if (!was_orphan)
+ clk_core_update_orphan_status(core, true);
}
core->parent = new_parent;
@@ -1160,14 +1208,8 @@ static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
flags = clk_enable_lock();
clk_reparent(core, old_parent);
clk_enable_unlock(flags);
+ __clk_set_parent_after(core, old_parent, parent);
- if (core->prepare_count) {
- flags = clk_enable_lock();
- clk_core_disable(core);
- clk_core_disable(parent);
- clk_enable_unlock(flags);
- clk_core_unprepare(parent);
- }
return ret;
}
@@ -1249,7 +1291,6 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core,
{
struct clk_core *top = core;
struct clk_core *old_parent, *parent;
- struct clk_hw *parent_hw;
unsigned long best_parent_rate = 0;
unsigned long new_rate;
unsigned long min_rate;
@@ -1270,20 +1311,29 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core,
/* find the closest rate and parent clk/rate */
if (core->ops->determine_rate) {
- parent_hw = parent ? parent->hw : NULL;
- ret = core->ops->determine_rate(core->hw, rate,
- min_rate,
- max_rate,
- &best_parent_rate,
- &parent_hw);
+ struct clk_rate_request req;
+
+ req.rate = rate;
+ req.min_rate = min_rate;
+ req.max_rate = max_rate;
+ if (parent) {
+ req.best_parent_hw = parent->hw;
+ req.best_parent_rate = parent->rate;
+ } else {
+ req.best_parent_hw = NULL;
+ req.best_parent_rate = 0;
+ }
+
+ ret = core->ops->determine_rate(core->hw, &req);
if (ret < 0)
return NULL;
- new_rate = ret;
- parent = parent_hw ? parent_hw->core : NULL;
+ best_parent_rate = req.best_parent_rate;
+ new_rate = req.rate;
+ parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
} else if (core->ops->round_rate) {
ret = core->ops->round_rate(core->hw, rate,
- &best_parent_rate);
+ &best_parent_rate);
if (ret < 0)
return NULL;
@@ -1592,8 +1642,12 @@ struct clk *clk_get_parent(struct clk *clk)
{
struct clk *parent;
+ if (!clk)
+ return NULL;
+
clk_prepare_lock();
- parent = __clk_get_parent(clk);
+ /* TODO: Create a per-user clk and change callers to call clk_put */
+ parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
clk_prepare_unlock();
return parent;
@@ -2324,13 +2378,17 @@ static int __clk_init(struct device *dev, struct clk *clk_user)
* clocks and re-parent any that are children of the clock currently
* being clk_init'd.
*/
- if (core->parent)
+ if (core->parent) {
hlist_add_head(&core->child_node,
&core->parent->children);
- else if (core->flags & CLK_IS_ROOT)
+ core->orphan = core->parent->orphan;
+ } else if (core->flags & CLK_IS_ROOT) {
hlist_add_head(&core->child_node, &clk_root_list);
- else
+ core->orphan = false;
+ } else {
hlist_add_head(&core->child_node, &clk_orphan_list);
+ core->orphan = true;
+ }
/*
* Set clk's accuracy. The preferred method is to use
@@ -2479,6 +2537,8 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
core->hw = hw;
core->flags = hw->init->flags;
core->num_parents = hw->init->num_parents;
+ core->min_rate = 0;
+ core->max_rate = ULONG_MAX;
hw->core = core;
/* allocate local copy in case parent_names is __initdata */
@@ -3054,8 +3114,6 @@ struct clock_provider {
struct list_head node;
};
-static LIST_HEAD(clk_provider_list);
-
/*
* This function looks for a parent clock. If there is one, then it
* checks that the provider for this parent clock was initialized, in
@@ -3106,14 +3164,24 @@ void __init of_clk_init(const struct of_device_id *matches)
struct clock_provider *clk_provider, *next;
bool is_init_done;
bool force = false;
+ LIST_HEAD(clk_provider_list);
if (!matches)
matches = &__clk_of_table;
/* First prepare the list of the clocks providers */
for_each_matching_node_and_match(np, matches, &match) {
- struct clock_provider *parent =
- kzalloc(sizeof(struct clock_provider), GFP_KERNEL);
+ struct clock_provider *parent;
+
+ parent = kzalloc(sizeof(*parent), GFP_KERNEL);
+ if (!parent) {
+ list_for_each_entry_safe(clk_provider, next,
+ &clk_provider_list, node) {
+ list_del(&clk_provider->node);
+ kfree(clk_provider);
+ }
+ return;
+ }
parent->clk_init_cb = match->data;
parent->np = np;
diff --git a/drivers/clk/h8300/clk-div.c b/drivers/clk/h8300/clk-div.c
index 56f9eba91b83..1dd5d14d5dbe 100644
--- a/drivers/clk/h8300/clk-div.c
+++ b/drivers/clk/h8300/clk-div.c
@@ -4,8 +4,6 @@
* Copyright 2015 Yoshinori Sato <ysato@users.sourceforge.jp>
*/
-#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/of.h>
@@ -15,7 +13,7 @@ static DEFINE_SPINLOCK(clklock);
static void __init h8300_div_clk_setup(struct device_node *node)
{
- unsigned int num_parents;
+ int num_parents;
struct clk *clk;
const char *clk_name = node->name;
const char *parent_name;
diff --git a/drivers/clk/h8300/clk-h8s2678.c b/drivers/clk/h8300/clk-h8s2678.c
index 4701b093e497..2a38eb4a2552 100644
--- a/drivers/clk/h8300/clk-h8s2678.c
+++ b/drivers/clk/h8300/clk-h8s2678.c
@@ -4,8 +4,6 @@
* Copyright 2015 Yoshinori Sato <ysato@users.sourceforge.jp>
*/
-#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/device.h>
@@ -28,7 +26,7 @@ static unsigned long pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct pll_clock *pll_clock = to_pll_clock(hw);
- int mul = 1 << (ctrl_inb((unsigned long)pll_clock->pllcr) & 3);
+ int mul = 1 << (readb(pll_clock->pllcr) & 3);
return parent_rate * mul;
}
@@ -65,13 +63,13 @@ static int pll_set_rate(struct clk_hw *hw, unsigned long rate,
pll = ((rate / parent_rate) / 2) & 0x03;
spin_lock_irqsave(&clklock, flags);
- val = ctrl_inb((unsigned long)pll_clock->sckcr);
+ val = readb(pll_clock->sckcr);
val |= 0x08;
- ctrl_outb(val, (unsigned long)pll_clock->sckcr);
- val = ctrl_inb((unsigned long)pll_clock->pllcr);
+ writeb(val, pll_clock->sckcr);
+ val = readb(pll_clock->pllcr);
val &= ~0x03;
val |= pll;
- ctrl_outb(val, (unsigned long)pll_clock->pllcr);
+ writeb(val, pll_clock->pllcr);
spin_unlock_irqrestore(&clklock, flags);
return 0;
}
@@ -84,7 +82,7 @@ static const struct clk_ops pll_ops = {
static void __init h8s2678_pll_clk_setup(struct device_node *node)
{
- unsigned int num_parents;
+ int num_parents;
struct clk *clk;
const char *clk_name = node->name;
const char *parent_name;
@@ -98,11 +96,9 @@ static void __init h8s2678_pll_clk_setup(struct device_node *node)
}
- pll_clock = kzalloc(sizeof(struct pll_clock), GFP_KERNEL);
- if (!pll_clock) {
- pr_err("%s: failed to alloc memory", clk_name);
+ pll_clock = kzalloc(sizeof(*pll_clock), GFP_KERNEL);
+ if (!pll_clock)
return;
- }
pll_clock->sckcr = of_iomap(node, 0);
if (pll_clock->sckcr == NULL) {
diff --git a/drivers/clk/hisilicon/Kconfig b/drivers/clk/hisilicon/Kconfig
index b4165ba75d9f..2c16807341dc 100644
--- a/drivers/clk/hisilicon/Kconfig
+++ b/drivers/clk/hisilicon/Kconfig
@@ -1,6 +1,6 @@
config COMMON_CLK_HI6220
bool "Hi6220 Clock Driver"
- depends on ARCH_HISI || COMPILE_TEST
+ depends on (ARCH_HISI || COMPILE_TEST) && MAILBOX
default ARCH_HISI
help
Build the Hisilicon Hi6220 clock driver based on the common clock framework.
diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile
index 48f0116a032a..4a1001a11f04 100644
--- a/drivers/clk/hisilicon/Makefile
+++ b/drivers/clk/hisilicon/Makefile
@@ -7,4 +7,4 @@ obj-y += clk.o clkgate-separated.o clkdivider-hi6220.o
obj-$(CONFIG_ARCH_HI3xxx) += clk-hi3620.o
obj-$(CONFIG_ARCH_HIP04) += clk-hip04.o
obj-$(CONFIG_ARCH_HIX5HD2) += clk-hix5hd2.o
-obj-$(CONFIG_COMMON_CLK_HI6220) += clk-hi6220.o
+obj-$(CONFIG_COMMON_CLK_HI6220) += clk-hi6220.o clk-hi6220-stub.o
diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
index 715d34a5ef9b..7d03fe17d66f 100644
--- a/drivers/clk/hisilicon/clk-hi3620.c
+++ b/drivers/clk/hisilicon/clk-hi3620.c
@@ -25,13 +25,11 @@
#include <linux/kernel.h>
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/slab.h>
-#include <linux/clk.h>
#include <dt-bindings/clock/hi3620-clock.h>
@@ -294,34 +292,29 @@ static unsigned long mmc_clk_recalc_rate(struct clk_hw *hw,
}
}
-static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_p)
+static int mmc_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_mmc *mclk = to_mmc(hw);
- unsigned long best = 0;
- if ((rate <= 13000000) && (mclk->id == HI3620_MMC_CIUCLK1)) {
- rate = 13000000;
- best = 26000000;
- } else if (rate <= 26000000) {
- rate = 25000000;
- best = 180000000;
- } else if (rate <= 52000000) {
- rate = 50000000;
- best = 360000000;
- } else if (rate <= 100000000) {
- rate = 100000000;
- best = 720000000;
+ if ((req->rate <= 13000000) && (mclk->id == HI3620_MMC_CIUCLK1)) {
+ req->rate = 13000000;
+ req->best_parent_rate = 26000000;
+ } else if (req->rate <= 26000000) {
+ req->rate = 25000000;
+ req->best_parent_rate = 180000000;
+ } else if (req->rate <= 52000000) {
+ req->rate = 50000000;
+ req->best_parent_rate = 360000000;
+ } else if (req->rate <= 100000000) {
+ req->rate = 100000000;
+ req->best_parent_rate = 720000000;
} else {
/* max is 180M */
- rate = 180000000;
- best = 1440000000;
+ req->rate = 180000000;
+ req->best_parent_rate = 1440000000;
}
- *best_parent_rate = best;
- return rate;
+ return -EINVAL;
}
static u32 mmc_clk_delay(u32 val, u32 para, u32 off, u32 len)
diff --git a/drivers/clk/hisilicon/clk-hi6220-stub.c b/drivers/clk/hisilicon/clk-hi6220-stub.c
new file mode 100644
index 000000000000..2c4add11c1ca
--- /dev/null
+++ b/drivers/clk/hisilicon/clk-hi6220-stub.c
@@ -0,0 +1,276 @@
+/*
+ * Hi6220 stub clock driver
+ *
+ * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2015 Linaro Limited.
+ *
+ * Author: Leo Yan <leo.yan@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mailbox_client.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+/* Stub clocks id */
+#define HI6220_STUB_ACPU0 0
+#define HI6220_STUB_ACPU1 1
+#define HI6220_STUB_GPU 2
+#define HI6220_STUB_DDR 5
+
+/* Mailbox message */
+#define HI6220_MBOX_MSG_LEN 8
+
+#define HI6220_MBOX_FREQ 0xA
+#define HI6220_MBOX_CMD_SET 0x3
+#define HI6220_MBOX_OBJ_AP 0x0
+
+/* CPU dynamic frequency scaling */
+#define ACPU_DFS_FREQ_MAX 0x1724
+#define ACPU_DFS_CUR_FREQ 0x17CC
+#define ACPU_DFS_FLAG 0x1B30
+#define ACPU_DFS_FREQ_REQ 0x1B34
+#define ACPU_DFS_FREQ_LMT 0x1B38
+#define ACPU_DFS_LOCK_FLAG 0xAEAEAEAE
+
+#define to_stub_clk(hw) container_of(hw, struct hi6220_stub_clk, hw)
+
+struct hi6220_stub_clk {
+ u32 id;
+
+ struct device *dev;
+ struct clk_hw hw;
+
+ struct regmap *dfs_map;
+ struct mbox_client cl;
+ struct mbox_chan *mbox;
+};
+
+struct hi6220_mbox_msg {
+ unsigned char type;
+ unsigned char cmd;
+ unsigned char obj;
+ unsigned char src;
+ unsigned char para[4];
+};
+
+union hi6220_mbox_data {
+ unsigned int data[HI6220_MBOX_MSG_LEN];
+ struct hi6220_mbox_msg msg;
+};
+
+static unsigned int hi6220_acpu_get_freq(struct hi6220_stub_clk *stub_clk)
+{
+ unsigned int freq;
+
+ regmap_read(stub_clk->dfs_map, ACPU_DFS_CUR_FREQ, &freq);
+ return freq;
+}
+
+static int hi6220_acpu_set_freq(struct hi6220_stub_clk *stub_clk,
+ unsigned int freq)
+{
+ union hi6220_mbox_data data;
+
+ /* set the frequency in sram */
+ regmap_write(stub_clk->dfs_map, ACPU_DFS_FREQ_REQ, freq);
+
+ /* compound mailbox message */
+ data.msg.type = HI6220_MBOX_FREQ;
+ data.msg.cmd = HI6220_MBOX_CMD_SET;
+ data.msg.obj = HI6220_MBOX_OBJ_AP;
+ data.msg.src = HI6220_MBOX_OBJ_AP;
+
+ mbox_send_message(stub_clk->mbox, &data);
+ return 0;
+}
+
+static int hi6220_acpu_round_freq(struct hi6220_stub_clk *stub_clk,
+ unsigned int freq)
+{
+ unsigned int limit_flag, limit_freq = UINT_MAX;
+ unsigned int max_freq;
+
+ /* check the constrained frequency */
+ regmap_read(stub_clk->dfs_map, ACPU_DFS_FLAG, &limit_flag);
+ if (limit_flag == ACPU_DFS_LOCK_FLAG)
+ regmap_read(stub_clk->dfs_map, ACPU_DFS_FREQ_LMT, &limit_freq);
+
+ /* check the supported maximum frequency */
+ regmap_read(stub_clk->dfs_map, ACPU_DFS_FREQ_MAX, &max_freq);
+
+ /* calculate the real maximum frequency */
+ max_freq = min(max_freq, limit_freq);
+
+ if (WARN_ON(freq > max_freq))
+ freq = max_freq;
+
+ return freq;
+}
+
+static unsigned long hi6220_stub_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u32 rate = 0;
+ struct hi6220_stub_clk *stub_clk = to_stub_clk(hw);
+
+ switch (stub_clk->id) {
+ case HI6220_STUB_ACPU0:
+ rate = hi6220_acpu_get_freq(stub_clk);
+
+ /* convert from kHz to Hz */
+ rate *= 1000;
+ break;
+
+ default:
+ dev_err(stub_clk->dev, "%s: un-supported clock id %d\n",
+ __func__, stub_clk->id);
+ break;
+ }
+
+ return rate;
+}
+
+static int hi6220_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct hi6220_stub_clk *stub_clk = to_stub_clk(hw);
+ unsigned long new_rate = rate / 1000; /* kHz */
+ int ret = 0;
+
+ switch (stub_clk->id) {
+ case HI6220_STUB_ACPU0:
+ ret = hi6220_acpu_set_freq(stub_clk, new_rate);
+ if (ret < 0)
+ return ret;
+
+ break;
+
+ default:
+ dev_err(stub_clk->dev, "%s: un-supported clock id %d\n",
+ __func__, stub_clk->id);
+ break;
+ }
+
+ pr_debug("%s: set rate=%ldkHz\n", __func__, new_rate);
+ return ret;
+}
+
+static long hi6220_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct hi6220_stub_clk *stub_clk = to_stub_clk(hw);
+ unsigned long new_rate = rate / 1000; /* kHz */
+
+ switch (stub_clk->id) {
+ case HI6220_STUB_ACPU0:
+ new_rate = hi6220_acpu_round_freq(stub_clk, new_rate);
+
+ /* convert from kHz to Hz */
+ new_rate *= 1000;
+ break;
+
+ default:
+ dev_err(stub_clk->dev, "%s: un-supported clock id %d\n",
+ __func__, stub_clk->id);
+ break;
+ }
+
+ return new_rate;
+}
+
+static const struct clk_ops hi6220_stub_clk_ops = {
+ .recalc_rate = hi6220_stub_clk_recalc_rate,
+ .round_rate = hi6220_stub_clk_round_rate,
+ .set_rate = hi6220_stub_clk_set_rate,
+};
+
+static int hi6220_stub_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk_init_data init;
+ struct hi6220_stub_clk *stub_clk;
+ struct clk *clk;
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ stub_clk = devm_kzalloc(dev, sizeof(*stub_clk), GFP_KERNEL);
+ if (!stub_clk)
+ return -ENOMEM;
+
+ stub_clk->dfs_map = syscon_regmap_lookup_by_phandle(np,
+ "hisilicon,hi6220-clk-sram");
+ if (IS_ERR(stub_clk->dfs_map)) {
+ dev_err(dev, "failed to get sram regmap\n");
+ return PTR_ERR(stub_clk->dfs_map);
+ }
+
+ stub_clk->hw.init = &init;
+ stub_clk->dev = dev;
+ stub_clk->id = HI6220_STUB_ACPU0;
+
+ /* Use mailbox client with blocking mode */
+ stub_clk->cl.dev = dev;
+ stub_clk->cl.tx_done = NULL;
+ stub_clk->cl.tx_block = true;
+ stub_clk->cl.tx_tout = 500;
+ stub_clk->cl.knows_txdone = false;
+
+ /* Allocate mailbox channel */
+ stub_clk->mbox = mbox_request_channel(&stub_clk->cl, 0);
+ if (IS_ERR(stub_clk->mbox)) {
+ dev_err(dev, "failed get mailbox channel\n");
+ return PTR_ERR(stub_clk->mbox);
+ };
+
+ init.name = "acpu0";
+ init.ops = &hi6220_stub_clk_ops;
+ init.num_parents = 0;
+ init.flags = CLK_IS_ROOT;
+
+ clk = devm_clk_register(dev, &stub_clk->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ if (ret) {
+ dev_err(dev, "failed to register OF clock provider\n");
+ return ret;
+ }
+
+ /* initialize buffer to zero */
+ regmap_write(stub_clk->dfs_map, ACPU_DFS_FLAG, 0x0);
+ regmap_write(stub_clk->dfs_map, ACPU_DFS_FREQ_REQ, 0x0);
+ regmap_write(stub_clk->dfs_map, ACPU_DFS_FREQ_LMT, 0x0);
+
+ dev_dbg(dev, "Registered clock '%s'\n", init.name);
+ return 0;
+}
+
+static const struct of_device_id hi6220_stub_clk_of_match[] = {
+ { .compatible = "hisilicon,hi6220-stub-clk", },
+ {}
+};
+
+static struct platform_driver hi6220_stub_clk_driver = {
+ .driver = {
+ .name = "hi6220-stub-clk",
+ .of_match_table = hi6220_stub_clk_of_match,
+ },
+ .probe = hi6220_stub_clk_probe,
+};
+
+static int __init hi6220_stub_clk_init(void)
+{
+ return platform_driver_register(&hi6220_stub_clk_driver);
+}
+subsys_initcall(hi6220_stub_clk_init);
diff --git a/drivers/clk/hisilicon/clk-hip04.c b/drivers/clk/hisilicon/clk-hip04.c
index 132b57a0ce09..8ca967308343 100644
--- a/drivers/clk/hisilicon/clk-hip04.c
+++ b/drivers/clk/hisilicon/clk-hip04.c
@@ -24,13 +24,11 @@
#include <linux/kernel.h>
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/slab.h>
-#include <linux/clk.h>
#include <dt-bindings/clock/hip04-clock.h>
diff --git a/drivers/clk/hisilicon/clk.c b/drivers/clk/hisilicon/clk.c
index c90a89739b03..9f8e76676553 100644
--- a/drivers/clk/hisilicon/clk.c
+++ b/drivers/clk/hisilicon/clk.c
@@ -24,15 +24,14 @@
*/
#include <linux/kernel.h>
-#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/slab.h>
-#include <linux/clk.h>
#include "clk.h"
@@ -45,14 +44,9 @@ struct hisi_clock_data __init *hisi_clk_init(struct device_node *np,
struct clk **clk_table;
void __iomem *base;
- if (np) {
- base = of_iomap(np, 0);
- if (!base) {
- pr_err("failed to map Hisilicon clock registers\n");
- goto err;
- }
- } else {
- pr_err("failed to find Hisilicon clock node in DTS\n");
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_err("%s: failed to map clock registers\n", __func__);
goto err;
}
diff --git a/drivers/clk/hisilicon/clkgate-separated.c b/drivers/clk/hisilicon/clkgate-separated.c
index b03d5a7246f9..a47812f56a17 100644
--- a/drivers/clk/hisilicon/clkgate-separated.c
+++ b/drivers/clk/hisilicon/clkgate-separated.c
@@ -25,10 +25,8 @@
#include <linux/kernel.h>
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/clk.h>
#include "clk.h"
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index 75fae169ce8f..1ada68abb158 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -22,5 +22,6 @@ obj-$(CONFIG_SOC_IMX5) += clk-imx51-imx53.o
obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o
obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o
obj-$(CONFIG_SOC_IMX6SX) += clk-imx6sx.o
+obj-$(CONFIG_SOC_IMX6UL) += clk-imx6ul.o
obj-$(CONFIG_SOC_IMX7D) += clk-imx7d.o
obj-$(CONFIG_SOC_VF610) += clk-vf610.o
diff --git a/drivers/clk/imx/clk-imx1.c b/drivers/clk/imx/clk-imx1.c
index c2647fa19f28..99cf802fa51f 100644
--- a/drivers/clk/imx/clk-imx1.c
+++ b/drivers/clk/imx/clk-imx1.c
@@ -15,7 +15,6 @@
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
-#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
diff --git a/drivers/clk/imx/clk-imx21.c b/drivers/clk/imx/clk-imx21.c
index dba987e3b89f..e63188eb08ac 100644
--- a/drivers/clk/imx/clk-imx21.c
+++ b/drivers/clk/imx/clk-imx21.c
@@ -9,7 +9,6 @@
* of the License, or (at your option) any later version.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/of.h>
diff --git a/drivers/clk/imx/clk-imx31.c b/drivers/clk/imx/clk-imx31.c
index fe66c40b7be2..1f8383475bb3 100644
--- a/drivers/clk/imx/clk-imx31.c
+++ b/drivers/clk/imx/clk-imx31.c
@@ -147,7 +147,8 @@ int __init mx31_clocks_init(unsigned long fref)
clk_register_clkdev(clk[cspi3_gate], NULL, "imx31-cspi.2");
clk_register_clkdev(clk[pwm_gate], "pwm", NULL);
clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
- clk_register_clkdev(clk[rtc_gate], NULL, "imx21-rtc");
+ clk_register_clkdev(clk[ckil], "ref", "imx21-rtc");
+ clk_register_clkdev(clk[rtc_gate], "ipg", "imx21-rtc");
clk_register_clkdev(clk[epit1_gate], "epit", NULL);
clk_register_clkdev(clk[epit2_gate], "epit", NULL);
clk_register_clkdev(clk[nfc], NULL, "imx27-nand.0");
diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c
index 69138ba3dec7..8623cd4e49fd 100644
--- a/drivers/clk/imx/clk-imx35.c
+++ b/drivers/clk/imx/clk-imx35.c
@@ -66,7 +66,7 @@ static const char *std_sel[] = {"ppll", "arm"};
static const char *ipg_per_sel[] = {"ahb_per_div", "arm_per_div"};
enum mx35_clks {
- ckih, mpll, ppll, mpll_075, arm, hsp, hsp_div, hsp_sel, ahb, ipg,
+ ckih, ckil, mpll, ppll, mpll_075, arm, hsp, hsp_div, hsp_sel, ahb, ipg,
arm_per_div, ahb_per_div, ipg_per, uart_sel, uart_div, esdhc_sel,
esdhc1_div, esdhc2_div, esdhc3_div, spdif_sel, spdif_div_pre,
spdif_div_post, ssi_sel, ssi1_div_pre, ssi1_div_post, ssi2_div_pre,
@@ -107,6 +107,7 @@ int __init mx35_clocks_init(void)
}
clk[ckih] = imx_clk_fixed("ckih", 24000000);
+ clk[ckil] = imx_clk_fixed("ckih", 32768);
clk[mpll] = imx_clk_pllv1(IMX_PLLV1_IMX35, "mpll", "ckih", base + MX35_CCM_MPCTL);
clk[ppll] = imx_clk_pllv1(IMX_PLLV1_IMX35, "ppll", "ckih", base + MX35_CCM_PPCTL);
@@ -258,6 +259,9 @@ int __init mx35_clocks_init(void)
clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.1");
clk_register_clkdev(clk[uart3_gate], "per", "imx21-uart.2");
clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.2");
+ /* i.mx35 has the i.mx21 type rtc */
+ clk_register_clkdev(clk[ckil], "ref", "imx21-rtc");
+ clk_register_clkdev(clk[rtc_gate], "ipg", "imx21-rtc");
clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.0");
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index d046f8e43de8..b2c1c047dc94 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -381,6 +381,9 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_ASRC] = imx_clk_gate2_shared("asrc", "asrc_podf", base + 0x68, 6, &share_count_asrc);
clk[IMX6QDL_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc);
clk[IMX6QDL_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc);
+ clk[IMX6QDL_CLK_CAAM_MEM] = imx_clk_gate2("caam_mem", "ahb", base + 0x68, 8);
+ clk[IMX6QDL_CLK_CAAM_ACLK] = imx_clk_gate2("caam_aclk", "ahb", base + 0x68, 10);
+ clk[IMX6QDL_CLK_CAAM_IPG] = imx_clk_gate2("caam_ipg", "ipg", base + 0x68, 12);
clk[IMX6QDL_CLK_CAN1_IPG] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14);
clk[IMX6QDL_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_root", base + 0x68, 16);
clk[IMX6QDL_CLK_CAN2_IPG] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18);
@@ -494,6 +497,10 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk_set_parent(clk[IMX6QDL_CLK_LDB_DI1_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
}
+ clk_set_rate(clk[IMX6QDL_CLK_PLL3_PFD1_540M], 540000000);
+ if (clk_on_imx6dl())
+ clk_set_parent(clk[IMX6QDL_CLK_IPU1_SEL], clk[IMX6QDL_CLK_PLL3_PFD1_540M]);
+
clk_set_parent(clk[IMX6QDL_CLK_IPU1_DI0_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
clk_set_parent(clk[IMX6QDL_CLK_IPU1_DI1_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
clk_set_parent(clk[IMX6QDL_CLK_IPU2_DI0_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
new file mode 100644
index 000000000000..aaa36650695f
--- /dev/null
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -0,0 +1,432 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <dt-bindings/clock/imx6ul-clock.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/types.h>
+
+#include "clk.h"
+
+#define BM_CCM_CCDR_MMDC_CH0_MASK (0x2 << 16)
+#define CCDR 0x4
+
+static const char *pll_bypass_src_sels[] = { "osc", "dummy", };
+static const char *pll1_bypass_sels[] = { "pll1", "pll1_bypass_src", };
+static const char *pll2_bypass_sels[] = { "pll2", "pll2_bypass_src", };
+static const char *pll3_bypass_sels[] = { "pll3", "pll3_bypass_src", };
+static const char *pll4_bypass_sels[] = { "pll4", "pll4_bypass_src", };
+static const char *pll5_bypass_sels[] = { "pll5", "pll5_bypass_src", };
+static const char *pll6_bypass_sels[] = { "pll6", "pll6_bypass_src", };
+static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", };
+static const char *ca7_secondary_sels[] = { "pll2_pfd2_396m", "pll2_bus", };
+static const char *step_sels[] = { "osc", "ca7_secondary_sel", };
+static const char *pll1_sw_sels[] = { "pll1_sys", "step", };
+static const char *axi_alt_sels[] = { "pll2_pfd2_396m", "pll3_pfd1_540m", };
+static const char *axi_sels[] = {"periph", "axi_alt_sel", };
+static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
+static const char *periph2_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll4_audio_div", };
+static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", "osc", };
+static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "osc", };
+static const char *periph_sels[] = { "periph_pre", "periph_clk2", };
+static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", };
+static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *bch_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *gpmi_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *eim_slow_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll3_pfd0_720m", };
+static const char *spdif_sels[] = { "pll4_audio_div", "pll3_pfd2_508m", "pll5_video_div", "pll3_usb_otg", };
+static const char *sai_sels[] = { "pll3_pfd2_508m", "pll5_video_div", "pll4_audio_div", };
+static const char *lcdif_pre_sels[] = { "pll2_bus", "pll3_pfd3_454m", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd1_594m", "pll3_pfd1_540m", };
+static const char *sim_pre_sels[] = { "pll2_bus", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd2_508m", };
+static const char *ldb_di0_sels[] = { "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll2_pfd3_594m", "pll2_pfd1_594m", "pll3_pfd3_454m", };
+static const char *ldb_di0_div_sels[] = { "ldb_di0_div_3_5", "ldb_di0_div_7", };
+static const char *ldb_di1_div_sels[] = { "ldb_di1_div_3_5", "ldb_di1_div_7", };
+static const char *qspi1_sels[] = { "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll2_bus", "pll3_pfd3_454m", "pll3_pfd2_508m", };
+static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", "pll3_pfd3_454m", "dummy", "dummy", "dummy", };
+static const char *can_sels[] = { "pll3_60m", "osc", "pll3_80m", "dummy", };
+static const char *ecspi_sels[] = { "pll3_60m", "osc", };
+static const char *uart_sels[] = { "pll3_80m", "osc", };
+static const char *perclk_sels[] = { "ipg", "osc", };
+static const char *lcdif_sels[] = { "lcdif_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
+static const char *csi_sels[] = { "osc", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
+static const char *sim_sels[] = { "sim_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
+
+static struct clk *clks[IMX6UL_CLK_END];
+static struct clk_onecell_data clk_data;
+
+static int const clks_init_on[] __initconst = {
+ IMX6UL_CLK_AIPSTZ1, IMX6UL_CLK_AIPSTZ2, IMX6UL_CLK_AIPSTZ3,
+ IMX6UL_CLK_AXI, IMX6UL_CLK_ARM, IMX6UL_CLK_ROM,
+ IMX6UL_CLK_MMDC_P0_FAST, IMX6UL_CLK_MMDC_P0_IPG,
+};
+
+static struct clk_div_table clk_enet_ref_table[] = {
+ { .val = 0, .div = 20, },
+ { .val = 1, .div = 10, },
+ { .val = 2, .div = 5, },
+ { .val = 3, .div = 4, },
+ { }
+};
+
+static struct clk_div_table post_div_table[] = {
+ { .val = 2, .div = 1, },
+ { .val = 1, .div = 2, },
+ { .val = 0, .div = 4, },
+ { }
+};
+
+static struct clk_div_table video_div_table[] = {
+ { .val = 0, .div = 1, },
+ { .val = 1, .div = 2, },
+ { .val = 2, .div = 1, },
+ { .val = 3, .div = 4, },
+ { }
+};
+
+static u32 share_count_asrc;
+static u32 share_count_audio;
+static u32 share_count_sai1;
+static u32 share_count_sai2;
+static u32 share_count_sai3;
+
+static void __init imx6ul_clocks_init(struct device_node *ccm_node)
+{
+ struct device_node *np;
+ void __iomem *base;
+ int i;
+
+ clks[IMX6UL_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
+
+ clks[IMX6UL_CLK_CKIL] = of_clk_get_by_name(ccm_node, "ckil");
+ clks[IMX6UL_CLK_OSC] = of_clk_get_by_name(ccm_node, "osc");
+
+ /* ipp_di clock is external input */
+ clks[IMX6UL_CLK_IPP_DI0] = of_clk_get_by_name(ccm_node, "ipp_di0");
+ clks[IMX6UL_CLK_IPP_DI1] = of_clk_get_by_name(ccm_node, "ipp_di1");
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-anatop");
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+
+ clks[IMX6UL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clks[IMX6UL_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clks[IMX6UL_PLL3_BYPASS_SRC] = imx_clk_mux("pll3_bypass_src", base + 0x10, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clks[IMX6UL_PLL4_BYPASS_SRC] = imx_clk_mux("pll4_bypass_src", base + 0x70, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clks[IMX6UL_PLL5_BYPASS_SRC] = imx_clk_mux("pll5_bypass_src", base + 0xa0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clks[IMX6UL_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", base + 0xe0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clks[IMX6UL_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+
+ clks[IMX6UL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "pll1_bypass_src", base + 0x00, 0x7f);
+ clks[IMX6UL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", base + 0x30, 0x1);
+ clks[IMX6UL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", base + 0x10, 0x3);
+ clks[IMX6UL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", base + 0x70, 0x7f);
+ clks[IMX6UL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "pll5_bypass_src", base + 0xa0, 0x7f);
+ clks[IMX6UL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "pll6_bypass_src", base + 0xe0, 0x3);
+ clks[IMX6UL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", base + 0x20, 0x3);
+
+ clks[IMX6UL_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6UL_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6UL_PLL3_BYPASS] = imx_clk_mux_flags("pll3_bypass", base + 0x10, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6UL_PLL4_BYPASS] = imx_clk_mux_flags("pll4_bypass", base + 0x70, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6UL_PLL5_BYPASS] = imx_clk_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6UL_PLL6_BYPASS] = imx_clk_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6UL_PLL7_BYPASS] = imx_clk_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6UL_CLK_CSI_SEL] = imx_clk_mux_flags("csi_sel", base + 0x3c, 9, 2, csi_sels, ARRAY_SIZE(csi_sels), CLK_SET_RATE_PARENT);
+
+ /* Do not bypass PLLs initially */
+ clk_set_parent(clks[IMX6UL_PLL1_BYPASS], clks[IMX6UL_CLK_PLL1]);
+ clk_set_parent(clks[IMX6UL_PLL2_BYPASS], clks[IMX6UL_CLK_PLL2]);
+ clk_set_parent(clks[IMX6UL_PLL3_BYPASS], clks[IMX6UL_CLK_PLL3]);
+ clk_set_parent(clks[IMX6UL_PLL4_BYPASS], clks[IMX6UL_CLK_PLL4]);
+ clk_set_parent(clks[IMX6UL_PLL5_BYPASS], clks[IMX6UL_CLK_PLL5]);
+ clk_set_parent(clks[IMX6UL_PLL6_BYPASS], clks[IMX6UL_CLK_PLL6]);
+ clk_set_parent(clks[IMX6UL_PLL7_BYPASS], clks[IMX6UL_CLK_PLL7]);
+
+ clks[IMX6UL_CLK_PLL1_SYS] = imx_clk_fixed_factor("pll1_sys", "pll1_bypass", 1, 1);
+ clks[IMX6UL_CLK_PLL2_BUS] = imx_clk_gate("pll2_bus", "pll2_bypass", base + 0x30, 13);
+ clks[IMX6UL_CLK_PLL3_USB_OTG] = imx_clk_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13);
+ clks[IMX6UL_CLK_PLL4_AUDIO] = imx_clk_gate("pll4_audio", "pll4_bypass", base + 0x70, 13);
+ clks[IMX6UL_CLK_PLL5_VIDEO] = imx_clk_gate("pll5_video", "pll5_bypass", base + 0xa0, 13);
+ clks[IMX6UL_CLK_PLL6_ENET] = imx_clk_gate("pll6_enet", "pll6_bypass", base + 0xe0, 13);
+ clks[IMX6UL_CLK_PLL7_USB_HOST] = imx_clk_gate("pll7_usb_host", "pll7_bypass", base + 0x20, 13);
+
+ /*
+ * Bit 20 is the reserved and read-only bit, we do this only for:
+ * - Do nothing for usbphy clk_enable/disable
+ * - Keep refcount when do usbphy clk_enable/disable, in that case,
+ * the clk framework many need to enable/disable usbphy's parent
+ */
+ clks[IMX6UL_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20);
+ clks[IMX6UL_CLK_USBPHY2] = imx_clk_gate("usbphy2", "pll7_usb_host", base + 0x20, 20);
+
+ /*
+ * usbphy*_gate needs to be on after system boots up, and software
+ * never needs to control it anymore.
+ */
+ clks[IMX6UL_CLK_USBPHY1_GATE] = imx_clk_gate("usbphy1_gate", "dummy", base + 0x10, 6);
+ clks[IMX6UL_CLK_USBPHY2_GATE] = imx_clk_gate("usbphy2_gate", "dummy", base + 0x20, 6);
+
+ /* name parent_name reg idx */
+ clks[IMX6UL_CLK_PLL2_PFD0] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0);
+ clks[IMX6UL_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1);
+ clks[IMX6UL_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2);
+ clks[IMX6UL_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3_594m", "pll2_bus", base + 0x100, 3);
+ clks[IMX6UL_CLK_PLL3_PFD0] = imx_clk_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0);
+ clks[IMX6UL_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1);
+ clks[IMX6UL_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2);
+ clks[IMX6UL_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3);
+
+ clks[IMX6UL_CLK_ENET_REF] = clk_register_divider_table(NULL, "enet_ref", "pll6_enet", 0,
+ base + 0xe0, 0, 2, 0, clk_enet_ref_table, &imx_ccm_lock);
+ clks[IMX6UL_CLK_ENET2_REF] = clk_register_divider_table(NULL, "enet2_ref", "pll6_enet", 0,
+ base + 0xe0, 2, 2, 0, clk_enet_ref_table, &imx_ccm_lock);
+
+ clks[IMX6UL_CLK_ENET2_REF_125M] = imx_clk_gate("enet_ref_125m", "enet2_ref", base + 0xe0, 20);
+ clks[IMX6UL_CLK_ENET_PTP_REF] = imx_clk_fixed_factor("enet_ptp_ref", "pll6_enet", 1, 20);
+ clks[IMX6UL_CLK_ENET_PTP] = imx_clk_gate("enet_ptp", "enet_ptp_ref", base + 0xe0, 21);
+
+ clks[IMX6UL_CLK_PLL4_POST_DIV] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock);
+ clks[IMX6UL_CLK_PLL4_AUDIO_DIV] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x170, 15, 1, 0, &imx_ccm_lock);
+ clks[IMX6UL_CLK_PLL5_POST_DIV] = clk_register_divider_table(NULL, "pll5_post_div", "pll5_video",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock);
+ clks[IMX6UL_CLK_PLL5_VIDEO_DIV] = clk_register_divider_table(NULL, "pll5_video_div", "pll5_post_div",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock);
+
+ /* name parent_name mult div */
+ clks[IMX6UL_CLK_PLL2_198M] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2);
+ clks[IMX6UL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6);
+ clks[IMX6UL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
+ clks[IMX6UL_CLK_GPT_3M] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8);
+
+ np = ccm_node;
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+
+ clks[IMX6UL_CA7_SECONDARY_SEL] = imx_clk_mux("ca7_secondary_sel", base + 0xc, 3, 1, ca7_secondary_sels, ARRAY_SIZE(ca7_secondary_sels));
+ clks[IMX6UL_CLK_STEP] = imx_clk_mux("step", base + 0x0c, 8, 1, step_sels, ARRAY_SIZE(step_sels));
+ clks[IMX6UL_CLK_PLL1_SW] = imx_clk_mux_flags("pll1_sw", base + 0x0c, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels), 0);
+ clks[IMX6UL_CLK_AXI_ALT_SEL] = imx_clk_mux("axi_alt_sel", base + 0x14, 7, 1, axi_alt_sels, ARRAY_SIZE(axi_alt_sels));
+ clks[IMX6UL_CLK_AXI_SEL] = imx_clk_mux_flags("axi_sel", base + 0x14, 6, 1, axi_sels, ARRAY_SIZE(axi_sels), 0);
+ clks[IMX6UL_CLK_PERIPH_PRE] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
+ clks[IMX6UL_CLK_PERIPH2_PRE] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph2_pre_sels, ARRAY_SIZE(periph2_pre_sels));
+ clks[IMX6UL_CLK_PERIPH_CLK2_SEL] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels));
+ clks[IMX6UL_CLK_PERIPH2_CLK2_SEL] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels));
+ clks[IMX6UL_CLK_EIM_SLOW_SEL] = imx_clk_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels));
+ clks[IMX6UL_CLK_GPMI_SEL] = imx_clk_mux("gpmi_sel", base + 0x1c, 19, 1, gpmi_sels, ARRAY_SIZE(gpmi_sels));
+ clks[IMX6UL_CLK_BCH_SEL] = imx_clk_mux("bch_sel", base + 0x1c, 18, 1, bch_sels, ARRAY_SIZE(bch_sels));
+ clks[IMX6UL_CLK_USDHC2_SEL] = imx_clk_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clks[IMX6UL_CLK_USDHC1_SEL] = imx_clk_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clks[IMX6UL_CLK_SAI3_SEL] = imx_clk_mux("sai3_sel", base + 0x1c, 14, 2, sai_sels, ARRAY_SIZE(sai_sels));
+ clks[IMX6UL_CLK_SAI2_SEL] = imx_clk_mux("sai2_sel", base + 0x1c, 12, 2, sai_sels, ARRAY_SIZE(sai_sels));
+ clks[IMX6UL_CLK_SAI1_SEL] = imx_clk_mux("sai1_sel", base + 0x1c, 10, 2, sai_sels, ARRAY_SIZE(sai_sels));
+ clks[IMX6UL_CLK_QSPI1_SEL] = imx_clk_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels));
+ clks[IMX6UL_CLK_PERCLK_SEL] = imx_clk_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels));
+ clks[IMX6UL_CLK_CAN_SEL] = imx_clk_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels));
+ clks[IMX6UL_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
+ clks[IMX6UL_CLK_ENFC_SEL] = imx_clk_mux("enfc_sel", base + 0x2c, 15, 3, enfc_sels, ARRAY_SIZE(enfc_sels));
+ clks[IMX6UL_CLK_LDB_DI0_SEL] = imx_clk_mux("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di0_sels, ARRAY_SIZE(ldb_di0_sels));
+ clks[IMX6UL_CLK_SPDIF_SEL] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, spdif_sels, ARRAY_SIZE(spdif_sels));
+ clks[IMX6UL_CLK_SIM_PRE_SEL] = imx_clk_mux("sim_pre_sel", base + 0x34, 15, 3, sim_pre_sels, ARRAY_SIZE(sim_pre_sels));
+ clks[IMX6UL_CLK_SIM_SEL] = imx_clk_mux("sim_sel", base + 0x34, 9, 3, sim_sels, ARRAY_SIZE(sim_sels));
+ clks[IMX6UL_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels));
+ clks[IMX6UL_CLK_LCDIF_PRE_SEL] = imx_clk_mux("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels));
+ clks[IMX6UL_CLK_LCDIF_SEL] = imx_clk_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels));
+
+ clks[IMX6UL_CLK_LDB_DI0_DIV_SEL] = imx_clk_mux("ldb_di0", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels));
+ clks[IMX6UL_CLK_LDB_DI1_DIV_SEL] = imx_clk_mux("ldb_di1", base + 0x20, 11, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels));
+
+ clks[IMX6UL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
+ clks[IMX6UL_CLK_LDB_DI0_DIV_7] = imx_clk_fixed_factor("ldb_di0_div_7", "ldb_di0_sel", 1, 7);
+ clks[IMX6UL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "qspi1_sel", 2, 7);
+ clks[IMX6UL_CLK_LDB_DI1_DIV_7] = imx_clk_fixed_factor("ldb_di1_div_7", "qspi1_sel", 1, 7);
+
+ clks[IMX6UL_CLK_PERIPH] = imx_clk_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels));
+ clks[IMX6UL_CLK_PERIPH2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels));
+
+ clks[IMX6UL_CLK_PERIPH_CLK2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3);
+ clks[IMX6UL_CLK_PERIPH2_CLK2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3);
+ clks[IMX6UL_CLK_IPG] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2);
+ clks[IMX6UL_CLK_LCDIF_PODF] = imx_clk_divider("lcdif_podf", "lcdif_pred", base + 0x18, 23, 3);
+ clks[IMX6UL_CLK_QSPI1_PDOF] = imx_clk_divider("qspi1_podf", "qspi1_sel", base + 0x1c, 26, 3);
+ clks[IMX6UL_CLK_EIM_SLOW_PODF] = imx_clk_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3);
+ clks[IMX6UL_CLK_PERCLK] = imx_clk_divider("perclk", "perclk_sel", base + 0x1c, 0, 6);
+ clks[IMX6UL_CLK_CAN_PODF] = imx_clk_divider("can_podf", "can_sel", base + 0x20, 2, 6);
+ clks[IMX6UL_CLK_GPMI_PODF] = imx_clk_divider("gpmi_podf", "gpmi_sel", base + 0x24, 22, 3);
+ clks[IMX6UL_CLK_BCH_PODF] = imx_clk_divider("bch_podf", "bch_sel", base + 0x24, 19, 3);
+ clks[IMX6UL_CLK_USDHC2_PODF] = imx_clk_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3);
+ clks[IMX6UL_CLK_USDHC1_PODF] = imx_clk_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3);
+ clks[IMX6UL_CLK_UART_PODF] = imx_clk_divider("uart_podf", "uart_sel", base + 0x24, 0, 6);
+ clks[IMX6UL_CLK_SAI3_PRED] = imx_clk_divider("sai3_pred", "sai3_sel", base + 0x28, 22, 3);
+ clks[IMX6UL_CLK_SAI3_PODF] = imx_clk_divider("sai3_podf", "sai3_pred", base + 0x28, 16, 6);
+ clks[IMX6UL_CLK_SAI1_PRED] = imx_clk_divider("sai1_pred", "sai1_sel", base + 0x28, 6, 3);
+ clks[IMX6UL_CLK_SAI1_PODF] = imx_clk_divider("sai1_podf", "sai1_pred", base + 0x28, 0, 6);
+ clks[IMX6UL_CLK_ENFC_PRED] = imx_clk_divider("enfc_pred", "enfc_sel", base + 0x2c, 18, 3);
+ clks[IMX6UL_CLK_ENFC_PODF] = imx_clk_divider("enfc_podf", "enfc_pred", base + 0x2c, 21, 6);
+ clks[IMX6UL_CLK_SAI2_PRED] = imx_clk_divider("sai2_pred", "sai2_sel", base + 0x2c, 6, 3);
+ clks[IMX6UL_CLK_SAI2_PODF] = imx_clk_divider("sai2_podf", "sai2_pred", base + 0x2c, 0, 6);
+ clks[IMX6UL_CLK_SPDIF_PRED] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3);
+ clks[IMX6UL_CLK_SPDIF_PODF] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3);
+ clks[IMX6UL_CLK_SIM_PODF] = imx_clk_divider("sim_podf", "sim_pre_sel", base + 0x34, 12, 3);
+ clks[IMX6UL_CLK_ECSPI_PODF] = imx_clk_divider("ecspi_podf", "ecspi_sel", base + 0x38, 19, 6);
+ clks[IMX6UL_CLK_LCDIF_PRED] = imx_clk_divider("lcdif_pred", "lcdif_pre_sel", base + 0x38, 12, 3);
+ clks[IMX6UL_CLK_CSI_PODF] = imx_clk_divider("csi_podf", "csi_sel", base + 0x3c, 11, 3);
+
+ clks[IMX6UL_CLK_ARM] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16);
+ clks[IMX6UL_CLK_MMDC_PODF] = imx_clk_busy_divider("mmdc_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2);
+ clks[IMX6UL_CLK_AXI_PODF] = imx_clk_busy_divider("axi_podf", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0);
+ clks[IMX6UL_CLK_AHB] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1);
+
+ /* CCGR0 */
+ clks[IMX6UL_CLK_AIPSTZ1] = imx_clk_gate2("aips_tz1", "ahb", base + 0x68, 0);
+ clks[IMX6UL_CLK_AIPSTZ2] = imx_clk_gate2("aips_tz2", "ahb", base + 0x68, 2);
+ clks[IMX6UL_CLK_APBHDMA] = imx_clk_gate2("apbh_dma", "bch_podf", base + 0x68, 4);
+ clks[IMX6UL_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc);
+ clks[IMX6UL_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc);
+ clks[IMX6UL_CLK_CAAM_MEM] = imx_clk_gate2("caam_mem", "ahb", base + 0x68, 8);
+ clks[IMX6UL_CLK_CAAM_ACLK] = imx_clk_gate2("caam_aclk", "ahb", base + 0x68, 10);
+ clks[IMX6UL_CLK_CAAM_IPG] = imx_clk_gate2("caam_ipg", "ipg", base + 0x68, 12);
+ clks[IMX6UL_CLK_CAN1_IPG] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14);
+ clks[IMX6UL_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_podf", base + 0x68, 16);
+ clks[IMX6UL_CLK_CAN2_IPG] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18);
+ clks[IMX6UL_CLK_CAN2_SERIAL] = imx_clk_gate2("can2_serial", "can_podf", base + 0x68, 20);
+ clks[IMX6UL_CLK_GPT2_BUS] = imx_clk_gate2("gpt_bus", "perclk", base + 0x68, 24);
+ clks[IMX6UL_CLK_GPT2_SERIAL] = imx_clk_gate2("gpt_serial", "perclk", base + 0x68, 26);
+ clks[IMX6UL_CLK_UART2_IPG] = imx_clk_gate2("uart2_ipg", "ipg", base + 0x68, 28);
+ clks[IMX6UL_CLK_UART2_SERIAL] = imx_clk_gate2("uart2_serial", "uart_podf", base + 0x68, 28);
+ clks[IMX6UL_CLK_AIPSTZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x68, 30);
+
+ /* CCGR1 */
+ clks[IMX6UL_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0);
+ clks[IMX6UL_CLK_ECSPI2] = imx_clk_gate2("ecspi2", "ecspi_podf", base + 0x6c, 2);
+ clks[IMX6UL_CLK_ECSPI3] = imx_clk_gate2("ecspi3", "ecspi_podf", base + 0x6c, 4);
+ clks[IMX6UL_CLK_ECSPI4] = imx_clk_gate2("ecspi4", "ecspi_podf", base + 0x6c, 6);
+ clks[IMX6UL_CLK_ADC2] = imx_clk_gate2("adc2", "ipg", base + 0x6c, 8);
+ clks[IMX6UL_CLK_UART3_IPG] = imx_clk_gate2("uart3_ipg", "ipg", base + 0x6c, 10);
+ clks[IMX6UL_CLK_UART3_SERIAL] = imx_clk_gate2("uart3_serial", "uart_podf", base + 0x6c, 10);
+ clks[IMX6UL_CLK_EPIT1] = imx_clk_gate2("epit1", "perclk", base + 0x6c, 12);
+ clks[IMX6UL_CLK_EPIT2] = imx_clk_gate2("epit2", "perclk", base + 0x6c, 14);
+ clks[IMX6UL_CLK_ADC1] = imx_clk_gate2("adc1", "ipg", base + 0x6c, 16);
+ clks[IMX6UL_CLK_GPT1_BUS] = imx_clk_gate2("gpt1_bus", "perclk", base + 0x6c, 20);
+ clks[IMX6UL_CLK_GPT1_SERIAL] = imx_clk_gate2("gpt1_serial", "perclk", base + 0x6c, 22);
+ clks[IMX6UL_CLK_UART4_IPG] = imx_clk_gate2("uart4_ipg", "ipg", base + 0x6c, 24);
+ clks[IMX6UL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serail", "uart_podf", base + 0x6c, 24);
+
+ /* CCGR2 */
+ clks[IMX6UL_CLK_CSI] = imx_clk_gate2("csi", "csi_podf", base + 0x70, 2);
+ clks[IMX6UL_CLK_I2C1] = imx_clk_gate2("i2c1", "perclk", base + 0x70, 6);
+ clks[IMX6UL_CLK_I2C2] = imx_clk_gate2("i2c2", "perclk", base + 0x70, 8);
+ clks[IMX6UL_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10);
+ clks[IMX6UL_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12);
+ clks[IMX6UL_CLK_IOMUXC] = imx_clk_gate2("iomuxc", "lcdif_podf", base + 0x70, 14);
+ clks[IMX6UL_CLK_LCDIF_APB] = imx_clk_gate2("lcdif_apb", "axi", base + 0x70, 28);
+ clks[IMX6UL_CLK_PXP] = imx_clk_gate2("pxp", "axi", base + 0x70, 30);
+
+ /* CCGR3 */
+ clks[IMX6UL_CLK_UART5_IPG] = imx_clk_gate2("uart5_ipg", "ipg", base + 0x74, 2);
+ clks[IMX6UL_CLK_UART5_SERIAL] = imx_clk_gate2("uart5_serial", "uart_podf", base + 0x74, 2);
+ clks[IMX6UL_CLK_ENET] = imx_clk_gate2("enet", "ipg", base + 0x74, 4);
+ clks[IMX6UL_CLK_ENET_AHB] = imx_clk_gate2("enet_ahb", "ahb", base + 0x74, 4);
+ clks[IMX6UL_CLK_UART6_IPG] = imx_clk_gate2("uart6_ipg", "ipg", base + 0x74, 6);
+ clks[IMX6UL_CLK_UART6_SERIAL] = imx_clk_gate2("uart6_serial", "uart_podf", base + 0x74, 6);
+ clks[IMX6UL_CLK_LCDIF_PIX] = imx_clk_gate2("lcdif_pix", "lcdif_podf", base + 0x74, 10);
+ clks[IMX6UL_CLK_QSPI] = imx_clk_gate2("qspi1", "qspi1_podf", base + 0x74, 14);
+ clks[IMX6UL_CLK_WDOG1] = imx_clk_gate2("wdog1", "ipg", base + 0x74, 16);
+ clks[IMX6UL_CLK_MMDC_P0_FAST] = imx_clk_gate("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20);
+ clks[IMX6UL_CLK_MMDC_P0_IPG] = imx_clk_gate2("mmdc_p0_ipg", "ipg", base + 0x74, 24);
+ clks[IMX6UL_CLK_AXI] = imx_clk_gate("axi", "axi_podf", base + 0x74, 28);
+
+ /* CCGR4 */
+ clks[IMX6UL_CLK_PER_BCH] = imx_clk_gate2("per_bch", "bch_podf", base + 0x78, 12);
+ clks[IMX6UL_CLK_PWM1] = imx_clk_gate2("pwm1", "perclk", base + 0x78, 16);
+ clks[IMX6UL_CLK_PWM2] = imx_clk_gate2("pwm2", "perclk", base + 0x78, 18);
+ clks[IMX6UL_CLK_PWM3] = imx_clk_gate2("pwm3", "perclk", base + 0x78, 20);
+ clks[IMX6UL_CLK_PWM4] = imx_clk_gate2("pwm4", "perclk", base + 0x78, 22);
+ clks[IMX6UL_CLK_GPMI_BCH_APB] = imx_clk_gate2("gpmi_bch_apb", "bch_podf", base + 0x78, 24);
+ clks[IMX6UL_CLK_GPMI_BCH] = imx_clk_gate2("gpmi_bch", "gpmi_podf", base + 0x78, 26);
+ clks[IMX6UL_CLK_GPMI_IO] = imx_clk_gate2("gpmi_io", "enfc_podf", base + 0x78, 28);
+ clks[IMX6UL_CLK_GPMI_APB] = imx_clk_gate2("gpmi_apb", "bch_podf", base + 0x78, 30);
+
+ /* CCGR5 */
+ clks[IMX6UL_CLK_ROM] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
+ clks[IMX6UL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
+ clks[IMX6UL_CLK_WDOG2] = imx_clk_gate2("wdog2", "ipg", base + 0x7c, 10);
+ clks[IMX6UL_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
+ clks[IMX6UL_CLK_SPDIF] = imx_clk_gate2_shared("spdif", "spdif_podf", base + 0x7c, 14, &share_count_audio);
+ clks[IMX6UL_CLK_SPDIF_GCLK] = imx_clk_gate2_shared("spdif_gclk", "ipg", base + 0x7c, 14, &share_count_audio);
+ clks[IMX6UL_CLK_SAI3] = imx_clk_gate2_shared("sai3", "sai3_podf", base + 0x7c, 22, &share_count_sai3);
+ clks[IMX6UL_CLK_SAI3_IPG] = imx_clk_gate2_shared("sai3_ipg", "ipg", base + 0x7c, 22, &share_count_sai3);
+ clks[IMX6UL_CLK_UART1_IPG] = imx_clk_gate2("uart1_ipg", "ipg", base + 0x7c, 24);
+ clks[IMX6UL_CLK_UART1_SERIAL] = imx_clk_gate2("uart1_serial", "uart_podf", base + 0x7c, 24);
+ clks[IMX6UL_CLK_UART7_IPG] = imx_clk_gate2("uart7_ipg", "ipg", base + 0x7c, 26);
+ clks[IMX6UL_CLK_UART7_SERIAL] = imx_clk_gate2("uart7_serial", "uart_podf", base + 0x7c, 26);
+ clks[IMX6UL_CLK_SAI1] = imx_clk_gate2_shared("sai1", "sai1_podf", base + 0x7c, 28, &share_count_sai1);
+ clks[IMX6UL_CLK_SAI1_IPG] = imx_clk_gate2_shared("sai1_ipg", "ipg", base + 0x7c, 28, &share_count_sai1);
+ clks[IMX6UL_CLK_SAI2] = imx_clk_gate2_shared("sai2", "sai2_podf", base + 0x7c, 30, &share_count_sai2);
+ clks[IMX6UL_CLK_SAI2_IPG] = imx_clk_gate2_shared("sai2_ipg", "ipg", base + 0x7c, 30, &share_count_sai2);
+
+ /* CCGR6 */
+ clks[IMX6UL_CLK_USBOH3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0);
+ clks[IMX6UL_CLK_USDHC1] = imx_clk_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2);
+ clks[IMX6UL_CLK_USDHC2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4);
+ clks[IMX6UL_CLK_SIM1] = imx_clk_gate2("sim1", "sim_sel", base + 0x80, 6);
+ clks[IMX6UL_CLK_SIM2] = imx_clk_gate2("sim2", "sim_sel", base + 0x80, 8);
+ clks[IMX6UL_CLK_EIM] = imx_clk_gate2("eim", "eim_slow_podf", base + 0x80, 10);
+ clks[IMX6UL_CLK_PWM8] = imx_clk_gate2("pwm8", "perclk", base + 0x80, 16);
+ clks[IMX6UL_CLK_UART8_IPG] = imx_clk_gate2("uart8_ipg", "ipg", base + 0x80, 14);
+ clks[IMX6UL_CLK_UART8_SERIAL] = imx_clk_gate2("uart8_serial", "uart_podf", base + 0x80, 14);
+ clks[IMX6UL_CLK_WDOG3] = imx_clk_gate2("wdog3", "ipg", base + 0x80, 20);
+ clks[IMX6UL_CLK_I2C4] = imx_clk_gate2("i2c4", "perclk", base + 0x80, 24);
+ clks[IMX6UL_CLK_PWM5] = imx_clk_gate2("pwm5", "perclk", base + 0x80, 26);
+ clks[IMX6UL_CLK_PWM6] = imx_clk_gate2("pwm6", "perclk", base + 0x80, 28);
+ clks[IMX6UL_CLK_PWM7] = imx_clk_gate2("Pwm7", "perclk", base + 0x80, 30);
+
+ /* mask handshake of mmdc */
+ writel_relaxed(BM_CCM_CCDR_MMDC_CH0_MASK, base + CCDR);
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++)
+ if (IS_ERR(clks[i]))
+ pr_err("i.MX6UL clk %d: register failed with %ld\n", i, PTR_ERR(clks[i]));
+
+ clk_data.clks = clks;
+ clk_data.clk_num = ARRAY_SIZE(clks);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ /* set perclk to from OSC */
+ clk_set_parent(clks[IMX6UL_CLK_PERCLK_SEL], clks[IMX6UL_CLK_OSC]);
+
+ clk_set_rate(clks[IMX6UL_CLK_ENET_REF], 50000000);
+ clk_set_rate(clks[IMX6UL_CLK_ENET2_REF], 50000000);
+ clk_set_rate(clks[IMX6UL_CLK_CSI], 24000000);
+
+ /* keep all the clks on just for bringup */
+ for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+ clk_prepare_enable(clks[clks_init_on[i]]);
+
+ if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
+ clk_prepare_enable(clks[IMX6UL_CLK_USBPHY1_GATE]);
+ clk_prepare_enable(clks[IMX6UL_CLK_USBPHY2_GATE]);
+ }
+
+ clk_set_parent(clks[IMX6UL_CLK_CAN_SEL], clks[IMX6UL_CLK_PLL3_60M]);
+ clk_set_parent(clks[IMX6UL_CLK_SIM_PRE_SEL], clks[IMX6UL_CLK_PLL3_USB_OTG]);
+
+ clk_set_parent(clks[IMX6UL_CLK_ENFC_SEL], clks[IMX6UL_CLK_PLL2_PFD2]);
+}
+
+CLK_OF_DECLARE(imx6ul, "fsl,imx6ul-ccm", imx6ul_clocks_init);
diff --git a/drivers/clk/imx/clk-pfd.c b/drivers/clk/imx/clk-pfd.c
index 0b0f6f66ec56..04a3e78ea1bc 100644
--- a/drivers/clk/imx/clk-pfd.c
+++ b/drivers/clk/imx/clk-pfd.c
@@ -10,7 +10,6 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/slab.h>
diff --git a/drivers/clk/imx/clk-pllv1.c b/drivers/clk/imx/clk-pllv1.c
index c34ad8a611dd..8564e4342c7d 100644
--- a/drivers/clk/imx/clk-pllv1.c
+++ b/drivers/clk/imx/clk-pllv1.c
@@ -1,4 +1,3 @@
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/slab.h>
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index f0d15fb9d783..6addf8f58b97 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -10,7 +10,6 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/io.h>
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index b936cdd1a13c..7cfb7b2a2ed6 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -16,6 +16,7 @@
*/
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/delay.h>
diff --git a/drivers/clk/keystone/gate.c b/drivers/clk/keystone/gate.c
index 86f1e362eafb..aed5af23895b 100644
--- a/drivers/clk/keystone/gate.c
+++ b/drivers/clk/keystone/gate.c
@@ -10,7 +10,6 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
index 4a375ead70e9..3f553d0ae0b5 100644
--- a/drivers/clk/keystone/pll.c
+++ b/drivers/clk/keystone/pll.c
@@ -10,7 +10,6 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -309,8 +308,7 @@ static void __init of_pll_mux_clk_init(struct device_node *node)
return;
}
- parents[0] = of_clk_get_parent_name(node, 0);
- parents[1] = of_clk_get_parent_name(node, 1);
+ of_clk_parent_fill(node, parents, 2);
if (!parents[0] || !parents[1]) {
pr_err("%s: missing parent clocks\n", __func__);
return;
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
index 6b6780b1e9c5..11e25c992948 100644
--- a/drivers/clk/mediatek/clk-gate.h
+++ b/drivers/clk/mediatek/clk-gate.h
@@ -16,9 +16,10 @@
#define __DRV_CLK_GATE_H
#include <linux/regmap.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+struct clk;
+
struct mtk_clk_gate {
struct clk_hw hw;
struct regmap *regmap;
diff --git a/drivers/clk/mediatek/clk-mt8135.c b/drivers/clk/mediatek/clk-mt8135.c
index 08b4b849b491..07c21e44b4b3 100644
--- a/drivers/clk/mediatek/clk-mt8135.c
+++ b/drivers/clk/mediatek/clk-mt8135.c
@@ -12,6 +12,7 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index 4b9e04cdf7e8..90eff85f4285 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -12,6 +12,7 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
@@ -700,6 +701,22 @@ static const struct mtk_composite peri_clks[] __initconst = {
MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1),
};
+static struct clk_onecell_data *mt8173_top_clk_data __initdata;
+static struct clk_onecell_data *mt8173_pll_clk_data __initdata;
+
+static void __init mtk_clk_enable_critical(void)
+{
+ if (!mt8173_top_clk_data || !mt8173_pll_clk_data)
+ return;
+
+ clk_prepare_enable(mt8173_pll_clk_data->clks[CLK_APMIXED_ARMCA15PLL]);
+ clk_prepare_enable(mt8173_pll_clk_data->clks[CLK_APMIXED_ARMCA7PLL]);
+ clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_MEM_SEL]);
+ clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_DDRPHYCFG_SEL]);
+ clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_CCI400_SEL]);
+ clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_RTC_SEL]);
+}
+
static void __init mtk_topckgen_init(struct device_node *node)
{
struct clk_onecell_data *clk_data;
@@ -712,19 +729,19 @@ static void __init mtk_topckgen_init(struct device_node *node)
return;
}
- clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+ mt8173_top_clk_data = clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
mtk_clk_register_factors(root_clk_alias, ARRAY_SIZE(root_clk_alias), clk_data);
mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
&mt8173_clk_lock, clk_data);
- clk_prepare_enable(clk_data->clks[CLK_TOP_CCI400_SEL]);
-
r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
+
+ mtk_clk_enable_critical();
}
CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8173-topckgen", mtk_topckgen_init);
@@ -779,8 +796,9 @@ CLK_OF_DECLARE(mtk_pericfg, "mediatek,mt8173-pericfg", mtk_pericfg_init);
#define CON0_MT8173_RST_BAR BIT(24)
-#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, _pd_reg, _pd_shift, \
- _tuner_reg, _pcw_reg, _pcw_shift) { \
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift, _div_table) { \
.id = _id, \
.name = _name, \
.reg = _reg, \
@@ -795,14 +813,31 @@ CLK_OF_DECLARE(mtk_pericfg, "mediatek,mt8173-pericfg", mtk_pericfg_init);
.tuner_reg = _tuner_reg, \
.pcw_reg = _pcw_reg, \
.pcw_shift = _pcw_shift, \
+ .div_table = _div_table, \
}
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift) \
+ PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
+ NULL)
+
+static const struct mtk_pll_div_table mmpll_div_table[] = {
+ { .div = 0, .freq = MT8173_PLL_FMAX },
+ { .div = 1, .freq = 1000000000 },
+ { .div = 2, .freq = 702000000 },
+ { .div = 3, .freq = 253500000 },
+ { .div = 4, .freq = 126750000 },
+ { } /* sentinel */
+};
+
static const struct mtk_pll_data plls[] = {
PLL(CLK_APMIXED_ARMCA15PLL, "armca15pll", 0x200, 0x20c, 0x00000001, 0, 21, 0x204, 24, 0x0, 0x204, 0),
PLL(CLK_APMIXED_ARMCA7PLL, "armca7pll", 0x210, 0x21c, 0x00000001, 0, 21, 0x214, 24, 0x0, 0x214, 0),
PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x220, 0x22c, 0xf0000101, HAVE_RST_BAR, 21, 0x220, 4, 0x0, 0x224, 0),
PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x230, 0x23c, 0xfe000001, HAVE_RST_BAR, 7, 0x230, 4, 0x0, 0x234, 14),
- PLL(CLK_APMIXED_MMPLL, "mmpll", 0x240, 0x24c, 0x00000001, 0, 21, 0x244, 24, 0x0, 0x244, 0),
+ PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x240, 0x24c, 0x00000001, 0, 21, 0x244, 24, 0x0, 0x244, 0, mmpll_div_table),
PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x250, 0x25c, 0x00000001, 0, 21, 0x250, 4, 0x0, 0x254, 0),
PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x260, 0x26c, 0x00000001, 0, 21, 0x260, 4, 0x0, 0x264, 0),
PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x270, 0x27c, 0x00000001, 0, 21, 0x270, 4, 0x0, 0x274, 0),
@@ -818,13 +853,13 @@ static void __init mtk_apmixedsys_init(struct device_node *node)
{
struct clk_onecell_data *clk_data;
- clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+ mt8173_pll_clk_data = clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
if (!clk_data)
return;
mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
- clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMCA15PLL]);
+ mtk_clk_enable_critical();
}
CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8173-apmixedsys",
mtk_apmixedsys_init);
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index 9dda9d8ad10b..c5cbecb3d218 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -17,9 +17,10 @@
#include <linux/regmap.h>
#include <linux/bitops.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+struct clk;
+
#define MAX_MUX_GATE_BIT 31
#define INVALID_MUX_GATE_BIT (MAX_MUX_GATE_BIT + 1)
@@ -134,6 +135,11 @@ struct clk_onecell_data *mtk_alloc_clk_data(unsigned int clk_num);
#define HAVE_RST_BAR BIT(0)
+struct mtk_pll_div_table {
+ u32 div;
+ unsigned long freq;
+};
+
struct mtk_pll_data {
int id;
const char *name;
@@ -150,6 +156,7 @@ struct mtk_pll_data {
int pcwbits;
uint32_t pcw_reg;
int pcw_shift;
+ const struct mtk_pll_div_table *div_table;
};
void __init mtk_clk_register_plls(struct device_node *node,
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index 44409e98c52f..622e7b6c62b4 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -90,20 +90,23 @@ static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin,
static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
int postdiv)
{
- u32 con1, pd, val;
+ u32 con1, val;
int pll_en;
- /* set postdiv */
- pd = readl(pll->pd_addr);
- pd &= ~(POSTDIV_MASK << pll->data->pd_shift);
- pd |= (ffs(postdiv) - 1) << pll->data->pd_shift;
- writel(pd, pll->pd_addr);
-
pll_en = readl(pll->base_addr + REG_CON0) & CON0_BASE_EN;
- /* set pcw */
- val = readl(pll->pcw_addr);
+ /* set postdiv */
+ val = readl(pll->pd_addr);
+ val &= ~(POSTDIV_MASK << pll->data->pd_shift);
+ val |= (ffs(postdiv) - 1) << pll->data->pd_shift;
+
+ /* postdiv and pcw need to set at the same time if on same register */
+ if (pll->pd_addr != pll->pcw_addr) {
+ writel(val, pll->pd_addr);
+ val = readl(pll->pcw_addr);
+ }
+ /* set pcw */
val &= ~GENMASK(pll->data->pcw_shift + pll->data->pcwbits - 1,
pll->data->pcw_shift);
val |= pcw << pll->data->pcw_shift;
@@ -135,16 +138,28 @@ static void mtk_pll_calc_values(struct mtk_clk_pll *pll, u32 *pcw, u32 *postdiv,
u32 freq, u32 fin)
{
unsigned long fmin = 1000 * MHZ;
+ const struct mtk_pll_div_table *div_table = pll->data->div_table;
u64 _pcw;
u32 val;
if (freq > pll->data->fmax)
freq = pll->data->fmax;
- for (val = 0; val < 4; val++) {
+ if (div_table) {
+ if (freq > div_table[0].freq)
+ freq = div_table[0].freq;
+
+ for (val = 0; div_table[val + 1].freq != 0; val++) {
+ if (freq > div_table[val + 1].freq)
+ break;
+ }
*postdiv = 1 << val;
- if (freq * *postdiv >= fmin)
- break;
+ } else {
+ for (val = 0; val < 5; val++) {
+ *postdiv = 1 << val;
+ if ((u64)freq * *postdiv >= fmin)
+ break;
+ }
}
/* _pcw = freq * postdiv / fin * 2^pcwfbits */
diff --git a/drivers/clk/meson/clk-cpu.c b/drivers/clk/meson/clk-cpu.c
index 71ad493b94df..f7c30ea54ca8 100644
--- a/drivers/clk/meson/clk-cpu.c
+++ b/drivers/clk/meson/clk-cpu.c
@@ -35,6 +35,7 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/slab.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#define MESON_CPU_CLK_CNTL1 0x00
diff --git a/drivers/clk/meson/clkc.c b/drivers/clk/meson/clkc.c
index b8c511c5e7a7..c83ae1367abc 100644
--- a/drivers/clk/meson/clkc.c
+++ b/drivers/clk/meson/clkc.c
@@ -15,7 +15,6 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/mfd/syscon.h>
#include <linux/slab.h>
diff --git a/drivers/clk/mmp/clk-apbc.c b/drivers/clk/mmp/clk-apbc.c
index 09d41c717c52..4c717db05f2d 100644
--- a/drivers/clk/mmp/clk-apbc.c
+++ b/drivers/clk/mmp/clk-apbc.c
@@ -10,7 +10,6 @@
*/
#include <linux/kernel.h>
-#include <linux/clk.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/delay.h>
diff --git a/drivers/clk/mmp/clk-apmu.c b/drivers/clk/mmp/clk-apmu.c
index cdcf2d7f321e..47b5542ce50f 100644
--- a/drivers/clk/mmp/clk-apmu.c
+++ b/drivers/clk/mmp/clk-apmu.c
@@ -10,7 +10,6 @@
*/
#include <linux/kernel.h>
-#include <linux/clk.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/delay.h>
diff --git a/drivers/clk/mmp/clk-gate.c b/drivers/clk/mmp/clk-gate.c
index adbd9d64ded2..d20cd3431ac2 100644
--- a/drivers/clk/mmp/clk-gate.c
+++ b/drivers/clk/mmp/clk-gate.c
@@ -27,7 +27,6 @@
static int mmp_clk_gate_enable(struct clk_hw *hw)
{
struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
- struct clk *clk = hw->clk;
unsigned long flags = 0;
unsigned long rate;
u32 tmp;
@@ -44,7 +43,7 @@ static int mmp_clk_gate_enable(struct clk_hw *hw)
spin_unlock_irqrestore(gate->lock, flags);
if (gate->flags & MMP_CLK_GATE_NEED_DELAY) {
- rate = __clk_get_rate(clk);
+ rate = clk_hw_get_rate(hw);
/* Need delay 2 cycles. */
udelay(2000000/rate);
}
diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c
index de6a873175d2..c554833cffc5 100644
--- a/drivers/clk/mmp/clk-mix.c
+++ b/drivers/clk/mmp/clk-mix.c
@@ -63,7 +63,7 @@ static unsigned int _get_div(struct mmp_clk_mix *mix, unsigned int val)
static unsigned int _get_mux(struct mmp_clk_mix *mix, unsigned int val)
{
- int num_parents = __clk_get_num_parents(mix->hw.clk);
+ int num_parents = clk_hw_get_num_parents(&mix->hw);
int i;
if (mix->mux_flags & CLK_MUX_INDEX_BIT)
@@ -113,15 +113,15 @@ static void _filter_clk_table(struct mmp_clk_mix *mix,
{
int i;
struct mmp_clk_mix_clk_table *item;
- struct clk *parent, *clk;
+ struct clk_hw *parent, *hw;
unsigned long parent_rate;
- clk = mix->hw.clk;
+ hw = &mix->hw;
for (i = 0; i < table_size; i++) {
item = &table[i];
- parent = clk_get_parent_by_index(clk, item->parent_index);
- parent_rate = __clk_get_rate(parent);
+ parent = clk_hw_get_parent_by_index(hw, item->parent_index);
+ parent_rate = clk_hw_get_rate(parent);
if (parent_rate % item->rate) {
item->valid = 0;
} else {
@@ -181,7 +181,7 @@ static int _set_rate(struct mmp_clk_mix *mix, u32 mux_val, u32 div_val,
if (timeout == 0) {
pr_err("%s:%s cannot do frequency change\n",
- __func__, __clk_get_name(mix->hw.clk));
+ __func__, clk_hw_get_name(&mix->hw));
ret = -EBUSY;
goto error;
}
@@ -201,27 +201,22 @@ error:
return ret;
}
-static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_clk)
+static int mmp_clk_mix_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct mmp_clk_mix *mix = to_clk_mix(hw);
struct mmp_clk_mix_clk_table *item;
- struct clk *parent, *parent_best, *mix_clk;
+ struct clk_hw *parent, *parent_best;
unsigned long parent_rate, mix_rate, mix_rate_best, parent_rate_best;
unsigned long gap, gap_best;
u32 div_val_max;
unsigned int div;
int i, j;
- mix_clk = hw->clk;
- parent = NULL;
mix_rate_best = 0;
parent_rate_best = 0;
- gap_best = rate;
+ gap_best = ULONG_MAX;
parent_best = NULL;
if (mix->table) {
@@ -229,11 +224,11 @@ static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate,
item = &mix->table[i];
if (item->valid == 0)
continue;
- parent = clk_get_parent_by_index(mix_clk,
+ parent = clk_hw_get_parent_by_index(hw,
item->parent_index);
- parent_rate = __clk_get_rate(parent);
+ parent_rate = clk_hw_get_rate(parent);
mix_rate = parent_rate / item->divisor;
- gap = abs(mix_rate - rate);
+ gap = abs(mix_rate - req->rate);
if (parent_best == NULL || gap < gap_best) {
parent_best = parent;
parent_rate_best = parent_rate;
@@ -244,14 +239,14 @@ static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate,
}
}
} else {
- for (i = 0; i < __clk_get_num_parents(mix_clk); i++) {
- parent = clk_get_parent_by_index(mix_clk, i);
- parent_rate = __clk_get_rate(parent);
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ parent = clk_hw_get_parent_by_index(hw, i);
+ parent_rate = clk_hw_get_rate(parent);
div_val_max = _get_maxdiv(mix);
for (j = 0; j < div_val_max; j++) {
div = _get_div(mix, j);
mix_rate = parent_rate / div;
- gap = abs(mix_rate - rate);
+ gap = abs(mix_rate - req->rate);
if (parent_best == NULL || gap < gap_best) {
parent_best = parent;
parent_rate_best = parent_rate;
@@ -265,10 +260,14 @@ static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate,
}
found:
- *best_parent_rate = parent_rate_best;
- *best_parent_clk = __clk_get_hw(parent_best);
+ if (!parent_best)
+ return -EINVAL;
+
+ req->best_parent_rate = parent_rate_best;
+ req->best_parent_hw = parent_best;
+ req->rate = mix_rate_best;
- return mix_rate_best;
+ return 0;
}
static int mmp_clk_mix_set_rate_and_parent(struct clk_hw *hw,
@@ -381,20 +380,19 @@ static int mmp_clk_set_rate(struct clk_hw *hw, unsigned long rate,
struct mmp_clk_mix_clk_table *item;
unsigned long parent_rate;
unsigned int best_divisor;
- struct clk *mix_clk, *parent;
+ struct clk_hw *parent;
int i;
best_divisor = best_parent_rate / rate;
- mix_clk = hw->clk;
if (mix->table) {
for (i = 0; i < mix->table_size; i++) {
item = &mix->table[i];
if (item->valid == 0)
continue;
- parent = clk_get_parent_by_index(mix_clk,
+ parent = clk_hw_get_parent_by_index(hw,
item->parent_index);
- parent_rate = __clk_get_rate(parent);
+ parent_rate = clk_hw_get_rate(parent);
if (parent_rate == best_parent_rate
&& item->divisor == best_divisor)
break;
@@ -407,13 +405,13 @@ static int mmp_clk_set_rate(struct clk_hw *hw, unsigned long rate,
else
return -EINVAL;
} else {
- for (i = 0; i < __clk_get_num_parents(mix_clk); i++) {
- parent = clk_get_parent_by_index(mix_clk, i);
- parent_rate = __clk_get_rate(parent);
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ parent = clk_hw_get_parent_by_index(hw, i);
+ parent_rate = clk_hw_get_rate(parent);
if (parent_rate == best_parent_rate)
break;
}
- if (i < __clk_get_num_parents(mix_clk))
+ if (i < clk_hw_get_num_parents(hw))
return _set_rate(mix, _get_mux_val(mix, i),
_get_div_val(mix, best_divisor), 1, 1);
else
@@ -468,20 +466,20 @@ struct clk *mmp_clk_register_mix(struct device *dev,
memcpy(&mix->reg_info, &config->reg_info, sizeof(config->reg_info));
if (config->table) {
table_bytes = sizeof(*config->table) * config->table_size;
- mix->table = kzalloc(table_bytes, GFP_KERNEL);
+ mix->table = kmemdup(config->table, table_bytes, GFP_KERNEL);
if (!mix->table) {
pr_err("%s:%s: could not allocate mmp mix table\n",
__func__, name);
kfree(mix);
return ERR_PTR(-ENOMEM);
}
- memcpy(mix->table, config->table, table_bytes);
mix->table_size = config->table_size;
}
if (config->mux_table) {
table_bytes = sizeof(u32) * num_parents;
- mix->mux_table = kzalloc(table_bytes, GFP_KERNEL);
+ mix->mux_table = kmemdup(config->mux_table, table_bytes,
+ GFP_KERNEL);
if (!mix->mux_table) {
pr_err("%s:%s: could not allocate mmp mix mux-table\n",
__func__, name);
@@ -489,7 +487,6 @@ struct clk *mmp_clk_register_mix(struct device *dev,
kfree(mix);
return ERR_PTR(-ENOMEM);
}
- memcpy(mix->mux_table, config->mux_table, table_bytes);
}
mix->div_flags = config->div_flags;
diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c
index cf038ef54c59..61893fe73251 100644
--- a/drivers/clk/mmp/clk.c
+++ b/drivers/clk/mmp/clk.c
@@ -1,7 +1,6 @@
#include <linux/io.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index 3821a88077ea..5837eb8a212f 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -10,7 +10,8 @@
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of_address.h>
#include <linux/io.h>
@@ -120,7 +121,7 @@ static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate,
if (!cpuclk->pmu_dfs)
return -ENODEV;
- cur_rate = __clk_get_rate(hwclk->clk);
+ cur_rate = clk_hw_get_rate(hwclk);
reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET);
fabric_div = (reg >> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT) &
@@ -196,7 +197,6 @@ static void __init of_cpu_clk_setup(struct device_node *node)
for_each_node_by_type(dn, "cpu") {
struct clk_init_data init;
struct clk *clk;
- struct clk *parent_clk;
char *clk_name = kzalloc(5, GFP_KERNEL);
int cpu, err;
@@ -208,9 +208,8 @@ static void __init of_cpu_clk_setup(struct device_node *node)
goto bail_out;
sprintf(clk_name, "cpu%d", cpu);
- parent_clk = of_clk_get(node, 0);
- cpuclk[cpu].parent_name = __clk_get_name(parent_clk);
+ cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0);
cpuclk[cpu].clk_name = clk_name;
cpuclk[cpu].cpu = cpu;
cpuclk[cpu].reg_base = clock_complex_base;
diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c
index 15b370ff3748..4a22429cd7a2 100644
--- a/drivers/clk/mvebu/common.c
+++ b/drivers/clk/mvebu/common.c
@@ -13,8 +13,8 @@
*/
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/of.h>
diff --git a/drivers/clk/mxs/clk-div.c b/drivers/clk/mxs/clk-div.c
index 90e1da93877e..049ee27d5a22 100644
--- a/drivers/clk/mxs/clk-div.c
+++ b/drivers/clk/mxs/clk-div.c
@@ -9,7 +9,6 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/slab.h>
diff --git a/drivers/clk/mxs/clk-frac.c b/drivers/clk/mxs/clk-frac.c
index e6aa6b567d68..73f0240569ac 100644
--- a/drivers/clk/mxs/clk-frac.c
+++ b/drivers/clk/mxs/clk-frac.c
@@ -9,7 +9,6 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
index 32216f9b7f03..f01876af6bb8 100644
--- a/drivers/clk/mxs/clk-imx23.c
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -9,9 +9,8 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-#include <linux/clk.h>
#include <linux/clk/mxs.h>
-#include <linux/clkdev.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
index a68670868baa..6b572b759f9a 100644
--- a/drivers/clk/mxs/clk-imx28.c
+++ b/drivers/clk/mxs/clk-imx28.c
@@ -9,9 +9,9 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-#include <linux/clk.h>
#include <linux/clk/mxs.h>
#include <linux/clkdev.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/clk/mxs/clk-pll.c b/drivers/clk/mxs/clk-pll.c
index fadae41833ec..d4ca79a868e0 100644
--- a/drivers/clk/mxs/clk-pll.c
+++ b/drivers/clk/mxs/clk-pll.c
@@ -9,7 +9,6 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
diff --git a/drivers/clk/mxs/clk-ref.c b/drivers/clk/mxs/clk-ref.c
index 4adeed6c2f94..495f99b7965e 100644
--- a/drivers/clk/mxs/clk-ref.c
+++ b/drivers/clk/mxs/clk-ref.c
@@ -9,7 +9,6 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
diff --git a/drivers/clk/mxs/clk.h b/drivers/clk/mxs/clk.h
index f07d821dd75d..a4590956d2a2 100644
--- a/drivers/clk/mxs/clk.h
+++ b/drivers/clk/mxs/clk.h
@@ -12,7 +12,8 @@
#ifndef __MXS_CLK_H
#define __MXS_CLK_H
-#include <linux/clk.h>
+struct clk;
+
#include <linux/clk-provider.h>
#include <linux/spinlock.h>
diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
index 81e9e1c788f4..e0a3cb8970ab 100644
--- a/drivers/clk/nxp/clk-lpc18xx-cgu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c
@@ -8,7 +8,6 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/kernel.h>
diff --git a/drivers/clk/pistachio/clk-pistachio.c b/drivers/clk/pistachio/clk-pistachio.c
index 8c0fe8828f99..c4ceb5eaf46c 100644
--- a/drivers/clk/pistachio/clk-pistachio.c
+++ b/drivers/clk/pistachio/clk-pistachio.c
@@ -159,9 +159,15 @@ PNAME(mux_debug) = { "mips_pll_mux", "rpu_v_pll_mux",
"wifi_pll_mux", "bt_pll_mux" };
static u32 mux_debug_idx[] = { 0x0, 0x1, 0x2, 0x4, 0x8, 0x10 };
-static unsigned int pistachio_critical_clks[] __initdata = {
- CLK_MIPS,
- CLK_PERIPH_SYS,
+static unsigned int pistachio_critical_clks_core[] __initdata = {
+ CLK_MIPS
+};
+
+static unsigned int pistachio_critical_clks_sys[] __initdata = {
+ PERIPH_CLK_SYS,
+ PERIPH_CLK_SYS_BUS,
+ PERIPH_CLK_DDR,
+ PERIPH_CLK_ROM,
};
static void __init pistachio_clk_init(struct device_node *np)
@@ -193,8 +199,8 @@ static void __init pistachio_clk_init(struct device_node *np)
pistachio_clk_register_provider(p);
- pistachio_clk_force_enable(p, pistachio_critical_clks,
- ARRAY_SIZE(pistachio_critical_clks));
+ pistachio_clk_force_enable(p, pistachio_critical_clks_core,
+ ARRAY_SIZE(pistachio_critical_clks_core));
}
CLK_OF_DECLARE(pistachio_clk, "img,pistachio-clk", pistachio_clk_init);
@@ -261,6 +267,9 @@ static void __init pistachio_clk_periph_init(struct device_node *np)
ARRAY_SIZE(pistachio_periph_gates));
pistachio_clk_register_provider(p);
+
+ pistachio_clk_force_enable(p, pistachio_critical_clks_sys,
+ ARRAY_SIZE(pistachio_critical_clks_sys));
}
CLK_OF_DECLARE(pistachio_clk_periph, "img,pistachio-clk-periph",
pistachio_clk_periph_init);
diff --git a/drivers/clk/pistachio/clk-pll.c b/drivers/clk/pistachio/clk-pll.c
index e17dada0dd21..7e8daab9025b 100644
--- a/drivers/clk/pistachio/clk-pll.c
+++ b/drivers/clk/pistachio/clk-pll.c
@@ -65,6 +65,12 @@
#define MIN_OUTPUT_FRAC 12000000UL
#define MAX_OUTPUT_FRAC 1600000000UL
+/* Fractional PLL operating modes */
+enum pll_mode {
+ PLL_MODE_FRAC,
+ PLL_MODE_INT,
+};
+
struct pistachio_clk_pll {
struct clk_hw hw;
void __iomem *base;
@@ -88,12 +94,10 @@ static inline void pll_lock(struct pistachio_clk_pll *pll)
cpu_relax();
}
-static inline u32 do_div_round_closest(u64 dividend, u32 divisor)
+static inline u64 do_div_round_closest(u64 dividend, u64 divisor)
{
dividend += divisor / 2;
- do_div(dividend, divisor);
-
- return dividend;
+ return div64_u64(dividend, divisor);
}
static inline struct pistachio_clk_pll *to_pistachio_pll(struct clk_hw *hw)
@@ -101,6 +105,29 @@ static inline struct pistachio_clk_pll *to_pistachio_pll(struct clk_hw *hw)
return container_of(hw, struct pistachio_clk_pll, hw);
}
+static inline enum pll_mode pll_frac_get_mode(struct clk_hw *hw)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+ u32 val;
+
+ val = pll_readl(pll, PLL_CTRL3) & PLL_FRAC_CTRL3_DSMPD;
+ return val ? PLL_MODE_INT : PLL_MODE_FRAC;
+}
+
+static inline void pll_frac_set_mode(struct clk_hw *hw, enum pll_mode mode)
+{
+ struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+ u32 val;
+
+ val = pll_readl(pll, PLL_CTRL3);
+ if (mode == PLL_MODE_INT)
+ val |= PLL_FRAC_CTRL3_DSMPD | PLL_FRAC_CTRL3_DACPD;
+ else
+ val &= ~(PLL_FRAC_CTRL3_DSMPD | PLL_FRAC_CTRL3_DACPD);
+
+ pll_writel(pll, val, PLL_CTRL3);
+}
+
static struct pistachio_pll_rate_table *
pll_get_params(struct pistachio_clk_pll *pll, unsigned long fref,
unsigned long fout)
@@ -136,8 +163,7 @@ static int pll_gf40lp_frac_enable(struct clk_hw *hw)
u32 val;
val = pll_readl(pll, PLL_CTRL3);
- val &= ~(PLL_FRAC_CTRL3_PD | PLL_FRAC_CTRL3_DACPD |
- PLL_FRAC_CTRL3_DSMPD | PLL_FRAC_CTRL3_FOUTPOSTDIVPD |
+ val &= ~(PLL_FRAC_CTRL3_PD | PLL_FRAC_CTRL3_FOUTPOSTDIVPD |
PLL_FRAC_CTRL3_FOUT4PHASEPD | PLL_FRAC_CTRL3_FOUTVCOPD);
pll_writel(pll, val, PLL_CTRL3);
@@ -173,8 +199,8 @@ static int pll_gf40lp_frac_set_rate(struct clk_hw *hw, unsigned long rate,
struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
struct pistachio_pll_rate_table *params;
int enabled = pll_gf40lp_frac_is_enabled(hw);
- u32 val, vco, old_postdiv1, old_postdiv2;
- const char *name = __clk_get_name(hw->clk);
+ u64 val, vco, old_postdiv1, old_postdiv2;
+ const char *name = clk_hw_get_name(hw);
if (rate < MIN_OUTPUT_FRAC || rate > MAX_OUTPUT_FRAC)
return -EINVAL;
@@ -183,17 +209,21 @@ static int pll_gf40lp_frac_set_rate(struct clk_hw *hw, unsigned long rate,
if (!params || !params->refdiv)
return -EINVAL;
- vco = params->fref * params->fbdiv / params->refdiv;
+ /* calculate vco */
+ vco = params->fref;
+ vco *= (params->fbdiv << 24) + params->frac;
+ vco = div64_u64(vco, params->refdiv << 24);
+
if (vco < MIN_VCO_FRAC_FRAC || vco > MAX_VCO_FRAC_FRAC)
- pr_warn("%s: VCO %u is out of range %lu..%lu\n", name, vco,
+ pr_warn("%s: VCO %llu is out of range %lu..%lu\n", name, vco,
MIN_VCO_FRAC_FRAC, MAX_VCO_FRAC_FRAC);
- val = params->fref / params->refdiv;
+ val = div64_u64(params->fref, params->refdiv);
if (val < MIN_PFD)
- pr_warn("%s: PFD %u is too low (min %lu)\n",
+ pr_warn("%s: PFD %llu is too low (min %lu)\n",
name, val, MIN_PFD);
if (val > vco / 16)
- pr_warn("%s: PFD %u is too high (max %u)\n",
+ pr_warn("%s: PFD %llu is too high (max %llu)\n",
name, val, vco / 16);
val = pll_readl(pll, PLL_CTRL1);
@@ -227,6 +257,12 @@ static int pll_gf40lp_frac_set_rate(struct clk_hw *hw, unsigned long rate,
(params->postdiv2 << PLL_FRAC_CTRL2_POSTDIV2_SHIFT);
pll_writel(pll, val, PLL_CTRL2);
+ /* set operating mode */
+ if (params->frac)
+ pll_frac_set_mode(hw, PLL_MODE_FRAC);
+ else
+ pll_frac_set_mode(hw, PLL_MODE_INT);
+
if (enabled)
pll_lock(pll);
@@ -237,8 +273,7 @@ static unsigned long pll_gf40lp_frac_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
- u32 val, prediv, fbdiv, frac, postdiv1, postdiv2;
- u64 rate = parent_rate;
+ u64 val, prediv, fbdiv, frac, postdiv1, postdiv2, rate;
val = pll_readl(pll, PLL_CTRL1);
prediv = (val >> PLL_CTRL1_REFDIV_SHIFT) & PLL_CTRL1_REFDIV_MASK;
@@ -251,7 +286,13 @@ static unsigned long pll_gf40lp_frac_recalc_rate(struct clk_hw *hw,
PLL_FRAC_CTRL2_POSTDIV2_MASK;
frac = (val >> PLL_FRAC_CTRL2_FRAC_SHIFT) & PLL_FRAC_CTRL2_FRAC_MASK;
- rate *= (fbdiv << 24) + frac;
+ /* get operating mode (int/frac) and calculate rate accordingly */
+ rate = parent_rate;
+ if (pll_frac_get_mode(hw) == PLL_MODE_FRAC)
+ rate *= (fbdiv << 24) + frac;
+ else
+ rate *= (fbdiv << 24);
+
rate = do_div_round_closest(rate, (prediv * postdiv1 * postdiv2) << 24);
return rate;
@@ -279,7 +320,7 @@ static int pll_gf40lp_laint_enable(struct clk_hw *hw)
u32 val;
val = pll_readl(pll, PLL_CTRL1);
- val &= ~(PLL_INT_CTRL1_PD | PLL_INT_CTRL1_DSMPD |
+ val &= ~(PLL_INT_CTRL1_PD |
PLL_INT_CTRL1_FOUTPOSTDIVPD | PLL_INT_CTRL1_FOUTVCOPD);
pll_writel(pll, val, PLL_CTRL1);
@@ -316,7 +357,7 @@ static int pll_gf40lp_laint_set_rate(struct clk_hw *hw, unsigned long rate,
struct pistachio_pll_rate_table *params;
int enabled = pll_gf40lp_laint_is_enabled(hw);
u32 val, vco, old_postdiv1, old_postdiv2;
- const char *name = __clk_get_name(hw->clk);
+ const char *name = clk_hw_get_name(hw);
if (rate < MIN_OUTPUT_LA || rate > MAX_OUTPUT_LA)
return -EINVAL;
@@ -325,12 +366,12 @@ static int pll_gf40lp_laint_set_rate(struct clk_hw *hw, unsigned long rate,
if (!params || !params->refdiv)
return -EINVAL;
- vco = params->fref * params->fbdiv / params->refdiv;
+ vco = div_u64(params->fref * params->fbdiv, params->refdiv);
if (vco < MIN_VCO_LA || vco > MAX_VCO_LA)
pr_warn("%s: VCO %u is out of range %lu..%lu\n", name, vco,
MIN_VCO_LA, MAX_VCO_LA);
- val = params->fref / params->refdiv;
+ val = div_u64(params->fref, params->refdiv);
if (val < MIN_PFD)
pr_warn("%s: PFD %u is too low (min %lu)\n",
name, val, MIN_PFD);
diff --git a/drivers/clk/pistachio/clk.c b/drivers/clk/pistachio/clk.c
index 85faa83e1bd7..698cad4f509e 100644
--- a/drivers/clk/pistachio/clk.c
+++ b/drivers/clk/pistachio/clk.c
@@ -6,6 +6,7 @@
* version 2, as published by the Free Software Foundation.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/kernel.h>
#include <linux/of.h>
diff --git a/drivers/clk/pistachio/clk.h b/drivers/clk/pistachio/clk.h
index 52fabbc24624..8d45178dbde3 100644
--- a/drivers/clk/pistachio/clk.h
+++ b/drivers/clk/pistachio/clk.h
@@ -95,13 +95,13 @@ struct pistachio_fixed_factor {
}
struct pistachio_pll_rate_table {
- unsigned long fref;
- unsigned long fout;
- unsigned int refdiv;
- unsigned int fbdiv;
- unsigned int postdiv1;
- unsigned int postdiv2;
- unsigned int frac;
+ unsigned long long fref;
+ unsigned long long fout;
+ unsigned long long refdiv;
+ unsigned long long fbdiv;
+ unsigned long long postdiv1;
+ unsigned long long postdiv2;
+ unsigned long long frac;
};
enum pistachio_pll_type {
diff --git a/drivers/clk/pxa/clk-pxa25x.c b/drivers/clk/pxa/clk-pxa25x.c
index 6cd88d963a7f..542e45ef5087 100644
--- a/drivers/clk/pxa/clk-pxa25x.c
+++ b/drivers/clk/pxa/clk-pxa25x.c
@@ -79,7 +79,7 @@ unsigned int pxa25x_get_clk_frequency_khz(int info)
clks[3] / 1000000, (clks[3] % 1000000) / 10000);
}
- return (unsigned int)clks[0];
+ return (unsigned int)clks[0] / KHz;
}
static unsigned long clk_pxa25x_memory_get_rate(struct clk_hw *hw,
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index 9a31b77eed23..5b82d30baf9f 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -80,7 +80,7 @@ unsigned int pxa27x_get_clk_frequency_khz(int info)
pr_info("System bus clock: %ld.%02ldMHz\n",
clks[4] / 1000000, (clks[4] % 1000000) / 10000);
}
- return (unsigned int)clks[0];
+ return (unsigned int)clks[0] / KHz;
}
bool pxa27x_is_ppll_disabled(void)
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
index 4b93a1efb36d..4af4eed5f89f 100644
--- a/drivers/clk/pxa/clk-pxa3xx.c
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -78,7 +78,7 @@ unsigned int pxa3xx_get_clk_frequency_khz(int info)
pr_info("System bus clock: %ld.%02ldMHz\n",
clks[4] / 1000000, (clks[4] % 1000000) / 10000);
}
- return (unsigned int)clks[0];
+ return (unsigned int)clks[0] / KHz;
}
static unsigned long clk_pxa3xx_ac97_get_rate(struct clk_hw *hw,
@@ -126,7 +126,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
-#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
+#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
div_hp, bit, is_lp, flags) \
PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index 6b4d2bcb1a53..26f7af315066 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -75,7 +75,7 @@ static int clk_branch_wait(const struct clk_branch *br, bool enabling,
bool (check_halt)(const struct clk_branch *, bool))
{
bool voted = br->halt_check & BRANCH_VOTED;
- const char *name = __clk_get_name(br->clkr.hw.clk);
+ const char *name = clk_hw_get_name(&br->clkr.hw);
/* Skip checking halt bit if the clock is in hardware gated mode */
if (clk_branch_in_hwcg_mode(br))
diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c
index 245d5063a385..5b940d629045 100644
--- a/drivers/clk/qcom/clk-pll.c
+++ b/drivers/clk/qcom/clk-pll.c
@@ -135,19 +135,19 @@ struct pll_freq_tbl *find_freq(const struct pll_freq_tbl *f, unsigned long rate)
return NULL;
}
-static long
-clk_pll_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate, unsigned long max_rate,
- unsigned long *p_rate, struct clk_hw **p)
+static int
+clk_pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct clk_pll *pll = to_clk_pll(hw);
const struct pll_freq_tbl *f;
- f = find_freq(pll->freq_tbl, rate);
+ f = find_freq(pll->freq_tbl, req->rate);
if (!f)
- return clk_pll_recalc_rate(hw, *p_rate);
+ req->rate = clk_pll_recalc_rate(hw, req->best_parent_rate);
+ else
+ req->rate = f->freq;
- return f->freq;
+ return 0;
}
static int
@@ -194,7 +194,7 @@ static int wait_for_pll(struct clk_pll *pll)
u32 val;
int count;
int ret;
- const char *name = __clk_get_name(pll->clkr.hw.clk);
+ const char *name = clk_hw_get_name(&pll->clkr.hw);
/* Wait for pll to enable. */
for (count = 200; count > 0; count--) {
@@ -213,7 +213,7 @@ static int wait_for_pll(struct clk_pll *pll)
static int clk_pll_vote_enable(struct clk_hw *hw)
{
int ret;
- struct clk_pll *p = to_clk_pll(__clk_get_hw(__clk_get_parent(hw->clk)));
+ struct clk_pll *p = to_clk_pll(clk_hw_get_parent(hw));
ret = clk_enable_regmap(hw);
if (ret)
@@ -292,3 +292,78 @@ void clk_pll_configure_sr_hpm_lp(struct clk_pll *pll, struct regmap *regmap,
clk_pll_set_fsm_mode(pll, regmap, 0);
}
EXPORT_SYMBOL_GPL(clk_pll_configure_sr_hpm_lp);
+
+static int clk_pll_sr2_enable(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ int ret;
+ u32 mode;
+
+ ret = regmap_read(pll->clkr.regmap, pll->mode_reg, &mode);
+ if (ret)
+ return ret;
+
+ /* Disable PLL bypass mode. */
+ ret = regmap_update_bits(pll->clkr.regmap, pll->mode_reg, PLL_BYPASSNL,
+ PLL_BYPASSNL);
+ if (ret)
+ return ret;
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Delay 10us just to be safe.
+ */
+ udelay(10);
+
+ /* De-assert active-low PLL reset. */
+ ret = regmap_update_bits(pll->clkr.regmap, pll->mode_reg, PLL_RESET_N,
+ PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ ret = wait_for_pll(pll);
+ if (ret)
+ return ret;
+
+ /* Enable PLL output. */
+ return regmap_update_bits(pll->clkr.regmap, pll->mode_reg, PLL_OUTCTRL,
+ PLL_OUTCTRL);
+}
+
+static int
+clk_pll_sr2_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long prate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ const struct pll_freq_tbl *f;
+ bool enabled;
+ u32 mode;
+ u32 enable_mask = PLL_OUTCTRL | PLL_BYPASSNL | PLL_RESET_N;
+
+ f = find_freq(pll->freq_tbl, rate);
+ if (!f)
+ return -EINVAL;
+
+ regmap_read(pll->clkr.regmap, pll->mode_reg, &mode);
+ enabled = (mode & enable_mask) == enable_mask;
+
+ if (enabled)
+ clk_pll_disable(hw);
+
+ regmap_update_bits(pll->clkr.regmap, pll->l_reg, 0x3ff, f->l);
+ regmap_update_bits(pll->clkr.regmap, pll->m_reg, 0x7ffff, f->m);
+ regmap_update_bits(pll->clkr.regmap, pll->n_reg, 0x7ffff, f->n);
+
+ if (enabled)
+ clk_pll_sr2_enable(hw);
+
+ return 0;
+}
+
+const struct clk_ops clk_pll_sr2_ops = {
+ .enable = clk_pll_sr2_enable,
+ .disable = clk_pll_disable,
+ .set_rate = clk_pll_sr2_set_rate,
+ .recalc_rate = clk_pll_recalc_rate,
+ .determine_rate = clk_pll_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_pll_sr2_ops);
diff --git a/drivers/clk/qcom/clk-pll.h b/drivers/clk/qcom/clk-pll.h
index c9c0cda306d0..ffd0c63bddbc 100644
--- a/drivers/clk/qcom/clk-pll.h
+++ b/drivers/clk/qcom/clk-pll.h
@@ -62,6 +62,7 @@ struct clk_pll {
extern const struct clk_ops clk_pll_ops;
extern const struct clk_ops clk_pll_vote_ops;
+extern const struct clk_ops clk_pll_sr2_ops;
#define to_clk_pll(_hw) container_of(to_clk_regmap(_hw), struct clk_pll, clkr)
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index 7b3d62674203..bccedc4b5756 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -45,7 +45,7 @@ static u32 src_to_ns(struct src_sel *s, u8 src, u32 ns)
static u8 clk_rcg_get_parent(struct clk_hw *hw)
{
struct clk_rcg *rcg = to_clk_rcg(hw);
- int num_parents = __clk_get_num_parents(hw->clk);
+ int num_parents = clk_hw_get_num_parents(hw);
u32 ns;
int i, ret;
@@ -59,7 +59,7 @@ static u8 clk_rcg_get_parent(struct clk_hw *hw)
err:
pr_debug("%s: Clock %s has invalid parent, using default.\n",
- __func__, __clk_get_name(hw->clk));
+ __func__, clk_hw_get_name(hw));
return 0;
}
@@ -72,7 +72,7 @@ static int reg_to_bank(struct clk_dyn_rcg *rcg, u32 bank)
static u8 clk_dyn_rcg_get_parent(struct clk_hw *hw)
{
struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
- int num_parents = __clk_get_num_parents(hw->clk);
+ int num_parents = clk_hw_get_num_parents(hw);
u32 ns, reg;
int bank;
int i, ret;
@@ -95,7 +95,7 @@ static u8 clk_dyn_rcg_get_parent(struct clk_hw *hw)
err:
pr_debug("%s: Clock %s has invalid parent, using default.\n",
- __func__, __clk_get_name(hw->clk));
+ __func__, clk_hw_get_name(hw));
return 0;
}
@@ -404,14 +404,12 @@ clk_dyn_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
return calc_rate(parent_rate, m, n, mode, pre_div);
}
-static long _freq_tbl_determine_rate(struct clk_hw *hw,
- const struct freq_tbl *f, unsigned long rate,
- unsigned long min_rate, unsigned long max_rate,
- unsigned long *p_rate, struct clk_hw **p_hw,
+static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
+ struct clk_rate_request *req,
const struct parent_map *parent_map)
{
- unsigned long clk_flags;
- struct clk *p;
+ unsigned long clk_flags, rate = req->rate;
+ struct clk_hw *p;
int index;
f = qcom_find_freq(f, rate);
@@ -422,8 +420,8 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
if (index < 0)
return index;
- clk_flags = __clk_get_flags(hw->clk);
- p = clk_get_parent_by_index(hw->clk, index);
+ clk_flags = clk_hw_get_flags(hw);
+ p = clk_hw_get_parent_by_index(hw, index);
if (clk_flags & CLK_SET_RATE_PARENT) {
rate = rate * f->pre_div;
if (f->n) {
@@ -433,27 +431,26 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
rate = tmp;
}
} else {
- rate = __clk_get_rate(p);
+ rate = clk_hw_get_rate(p);
}
- *p_hw = __clk_get_hw(p);
- *p_rate = rate;
+ req->best_parent_hw = p;
+ req->best_parent_rate = rate;
+ req->rate = f->freq;
- return f->freq;
+ return 0;
}
-static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate, unsigned long max_rate,
- unsigned long *p_rate, struct clk_hw **p)
+static int clk_rcg_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_rcg *rcg = to_clk_rcg(hw);
- return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate,
- max_rate, p_rate, p, rcg->s.parent_map);
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req,
+ rcg->s.parent_map);
}
-static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate, unsigned long max_rate,
- unsigned long *p_rate, struct clk_hw **p)
+static int clk_dyn_rcg_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
u32 reg;
@@ -464,24 +461,22 @@ static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
bank = reg_to_bank(rcg, reg);
s = &rcg->s[bank];
- return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate,
- max_rate, p_rate, p, s->parent_map);
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, s->parent_map);
}
-static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate, unsigned long max_rate,
- unsigned long *p_rate, struct clk_hw **p_hw)
+static int clk_rcg_bypass_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_rcg *rcg = to_clk_rcg(hw);
const struct freq_tbl *f = rcg->freq_tbl;
- struct clk *p;
+ struct clk_hw *p;
int index = qcom_find_src_index(hw, rcg->s.parent_map, f->src);
- p = clk_get_parent_by_index(hw->clk, index);
- *p_hw = __clk_get_hw(p);
- *p_rate = __clk_round_rate(p, rate);
+ req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
+ req->best_parent_rate = clk_hw_round_rate(p, req->rate);
+ req->rate = req->best_parent_rate;
- return *p_rate;
+ return 0;
}
static int __clk_rcg_set_rate(struct clk_rcg *rcg, const struct freq_tbl *f)
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index b95d17fbb8d7..9aec1761fd29 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -63,7 +63,7 @@ static int clk_rcg2_is_enabled(struct clk_hw *hw)
static u8 clk_rcg2_get_parent(struct clk_hw *hw)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
- int num_parents = __clk_get_num_parents(hw->clk);
+ int num_parents = clk_hw_get_num_parents(hw);
u32 cfg;
int i, ret;
@@ -80,7 +80,7 @@ static u8 clk_rcg2_get_parent(struct clk_hw *hw)
err:
pr_debug("%s: Clock %s has invalid parent, using default.\n",
- __func__, __clk_get_name(hw->clk));
+ __func__, clk_hw_get_name(hw));
return 0;
}
@@ -89,7 +89,7 @@ static int update_config(struct clk_rcg2 *rcg)
int count, ret;
u32 cmd;
struct clk_hw *hw = &rcg->clkr.hw;
- const char *name = __clk_get_name(hw->clk);
+ const char *name = clk_hw_get_name(hw);
ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
CMD_UPDATE, CMD_UPDATE);
@@ -176,12 +176,11 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
return calc_rate(parent_rate, m, n, mode, hid_div);
}
-static long _freq_tbl_determine_rate(struct clk_hw *hw,
- const struct freq_tbl *f, unsigned long rate,
- unsigned long *p_rate, struct clk_hw **p_hw)
+static int _freq_tbl_determine_rate(struct clk_hw *hw,
+ const struct freq_tbl *f, struct clk_rate_request *req)
{
- unsigned long clk_flags;
- struct clk *p;
+ unsigned long clk_flags, rate = req->rate;
+ struct clk_hw *p;
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
int index;
@@ -193,8 +192,8 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
if (index < 0)
return index;
- clk_flags = __clk_get_flags(hw->clk);
- p = clk_get_parent_by_index(hw->clk, index);
+ clk_flags = clk_hw_get_flags(hw);
+ p = clk_hw_get_parent_by_index(hw, index);
if (clk_flags & CLK_SET_RATE_PARENT) {
if (f->pre_div) {
rate /= 2;
@@ -208,21 +207,21 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
rate = tmp;
}
} else {
- rate = __clk_get_rate(p);
+ rate = clk_hw_get_rate(p);
}
- *p_hw = __clk_get_hw(p);
- *p_rate = rate;
+ req->best_parent_hw = p;
+ req->best_parent_rate = rate;
+ req->rate = f->freq;
- return f->freq;
+ return 0;
}
-static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate, unsigned long max_rate,
- unsigned long *p_rate, struct clk_hw **p)
+static int clk_rcg2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
- return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p);
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req);
}
static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
@@ -374,35 +373,33 @@ static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
return clk_edp_pixel_set_rate(hw, rate, parent_rate);
}
-static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *p_rate, struct clk_hw **p)
+static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
const struct freq_tbl *f = rcg->freq_tbl;
const struct frac_entry *frac;
int delta = 100000;
- s64 src_rate = *p_rate;
s64 request;
u32 mask = BIT(rcg->hid_width) - 1;
u32 hid_div;
int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
/* Force the correct parent */
- *p = __clk_get_hw(clk_get_parent_by_index(hw->clk, index));
+ req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
+ req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
- if (src_rate == 810000000)
+ if (req->best_parent_rate == 810000000)
frac = frac_table_810m;
else
frac = frac_table_675m;
for (; frac->num; frac++) {
- request = rate;
+ request = req->rate;
request *= frac->den;
request = div_s64(request, frac->num);
- if ((src_rate < (request - delta)) ||
- (src_rate > (request + delta)))
+ if ((req->best_parent_rate < (request - delta)) ||
+ (req->best_parent_rate > (request + delta)))
continue;
regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
@@ -410,8 +407,10 @@ static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
hid_div >>= CFG_SRC_DIV_SHIFT;
hid_div &= mask;
- return calc_rate(src_rate, frac->num, frac->den, !!frac->den,
- hid_div);
+ req->rate = calc_rate(req->best_parent_rate,
+ frac->num, frac->den,
+ !!frac->den, hid_div);
+ return 0;
}
return -EINVAL;
@@ -428,28 +427,28 @@ const struct clk_ops clk_edp_pixel_ops = {
};
EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
-static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate, unsigned long max_rate,
- unsigned long *p_rate, struct clk_hw **p_hw)
+static int clk_byte_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
const struct freq_tbl *f = rcg->freq_tbl;
int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
unsigned long parent_rate, div;
u32 mask = BIT(rcg->hid_width) - 1;
- struct clk *p;
+ struct clk_hw *p;
- if (rate == 0)
+ if (req->rate == 0)
return -EINVAL;
- p = clk_get_parent_by_index(hw->clk, index);
- *p_hw = __clk_get_hw(p);
- *p_rate = parent_rate = __clk_round_rate(p, rate);
+ req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
+ req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
- div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
div = min_t(u32, div, mask);
- return calc_rate(parent_rate, 0, 0, 0, div);
+ req->rate = calc_rate(parent_rate, 0, 0, 0, div);
+
+ return 0;
}
static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -494,10 +493,8 @@ static const struct frac_entry frac_table_pixel[] = {
{ }
};
-static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *p_rate, struct clk_hw **p)
+static int clk_pixel_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
unsigned long request, src_rate;
@@ -505,20 +502,20 @@ static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
const struct freq_tbl *f = rcg->freq_tbl;
const struct frac_entry *frac = frac_table_pixel;
int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
- struct clk *parent = clk_get_parent_by_index(hw->clk, index);
- *p = __clk_get_hw(parent);
+ req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
for (; frac->num; frac++) {
- request = (rate * frac->den) / frac->num;
+ request = (req->rate * frac->den) / frac->num;
- src_rate = __clk_round_rate(parent, request);
+ src_rate = clk_hw_round_rate(req->best_parent_hw, request);
if ((src_rate < (request - delta)) ||
(src_rate > (request + delta)))
continue;
- *p_rate = src_rate;
- return (src_rate * frac->num) / frac->den;
+ req->best_parent_rate = src_rate;
+ req->rate = (src_rate * frac->num) / frac->den;
+ return 0;
}
return -EINVAL;
@@ -530,19 +527,16 @@ static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
struct freq_tbl f = *rcg->freq_tbl;
const struct frac_entry *frac = frac_table_pixel;
- unsigned long request, src_rate;
+ unsigned long request;
int delta = 100000;
u32 mask = BIT(rcg->hid_width) - 1;
u32 hid_div;
- int index = qcom_find_src_index(hw, rcg->parent_map, f.src);
- struct clk *parent = clk_get_parent_by_index(hw->clk, index);
for (; frac->num; frac++) {
request = (rate * frac->den) / frac->num;
- src_rate = __clk_round_rate(parent, request);
- if ((src_rate < (request - delta)) ||
- (src_rate > (request + delta)))
+ if ((parent_rate < (request - delta)) ||
+ (parent_rate > (request + delta)))
continue;
regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index f7101e330b1d..2dedceefd21d 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -12,6 +12,7 @@
*/
#include <linux/export.h>
+#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/platform_device.h>
#include <linux/clk-provider.h>
@@ -45,7 +46,7 @@ EXPORT_SYMBOL_GPL(qcom_find_freq);
int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src)
{
- int i, num_parents = __clk_get_num_parents(hw->clk);
+ int i, num_parents = clk_hw_get_num_parents(hw);
for (i = 0; i < num_parents; i++)
if (src == map[i].src)
@@ -144,3 +145,5 @@ void qcom_cc_remove(struct platform_device *pdev)
reset_controller_unregister(platform_get_drvdata(pdev));
}
EXPORT_SYMBOL_GPL(qcom_cc_remove);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index 54a756b90a37..3563019b8e3c 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -48,7 +48,7 @@ static const struct parent_map gcc_xo_gpll0_map[] = {
{ P_GPLL0, 1 }
};
-static const char *gcc_xo_gpll0[] = {
+static const char * const gcc_xo_gpll0[] = {
"xo",
"gpll0_vote",
};
@@ -59,7 +59,7 @@ static const struct parent_map gcc_xo_gpll0_gpll4_map[] = {
{ P_GPLL4, 5 }
};
-static const char *gcc_xo_gpll0_gpll4[] = {
+static const char * const gcc_xo_gpll0_gpll4[] = {
"xo",
"gpll0_vote",
"gpll4_vote",
@@ -70,7 +70,7 @@ static const struct parent_map gcc_xo_sata_asic0_map[] = {
{ P_SATA_ASIC0_CLK, 2 }
};
-static const char *gcc_xo_sata_asic0[] = {
+static const char * const gcc_xo_sata_asic0[] = {
"xo",
"sata_asic0_clk",
};
@@ -80,7 +80,7 @@ static const struct parent_map gcc_xo_sata_rx_map[] = {
{ P_SATA_RX_CLK, 2}
};
-static const char *gcc_xo_sata_rx[] = {
+static const char * const gcc_xo_sata_rx[] = {
"xo",
"sata_rx_clk",
};
@@ -90,7 +90,7 @@ static const struct parent_map gcc_xo_pcie_map[] = {
{ P_PCIE_0_1_PIPE_CLK, 2 }
};
-static const char *gcc_xo_pcie[] = {
+static const char * const gcc_xo_pcie[] = {
"xo",
"pcie_pipe",
};
@@ -100,7 +100,7 @@ static const struct parent_map gcc_xo_pcie_sleep_map[] = {
{ P_SLEEP_CLK, 6 }
};
-static const char *gcc_xo_pcie_sleep[] = {
+static const char * const gcc_xo_pcie_sleep[] = {
"xo",
"sleep_clk_src",
};
@@ -2105,6 +2105,7 @@ static struct clk_branch gcc_ce1_clk = {
"ce1_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index 563969942a1d..40e480220cd3 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -188,7 +188,7 @@ static const struct parent_map gcc_pxo_pll8_map[] = {
{ P_PLL8, 3 }
};
-static const char *gcc_pxo_pll8[] = {
+static const char * const gcc_pxo_pll8[] = {
"pxo",
"pll8_vote",
};
@@ -199,7 +199,7 @@ static const struct parent_map gcc_pxo_pll8_cxo_map[] = {
{ P_CXO, 5 }
};
-static const char *gcc_pxo_pll8_cxo[] = {
+static const char * const gcc_pxo_pll8_cxo[] = {
"pxo",
"pll8_vote",
"cxo",
@@ -215,7 +215,7 @@ static const struct parent_map gcc_pxo_pll3_sata_map[] = {
{ P_PLL3, 6 }
};
-static const char *gcc_pxo_pll3[] = {
+static const char * const gcc_pxo_pll3[] = {
"pxo",
"pll3",
};
@@ -226,7 +226,7 @@ static const struct parent_map gcc_pxo_pll8_pll0[] = {
{ P_PLL0, 2 }
};
-static const char *gcc_pxo_pll8_pll0_map[] = {
+static const char * const gcc_pxo_pll8_pll0_map[] = {
"pxo",
"pll8_vote",
"pll0_vote",
@@ -240,7 +240,7 @@ static const struct parent_map gcc_pxo_pll8_pll14_pll18_pll0_map[] = {
{ P_PLL18, 1 }
};
-static const char *gcc_pxo_pll8_pll14_pll18_pll0[] = {
+static const char * const gcc_pxo_pll8_pll14_pll18_pll0[] = {
"pxo",
"pll8_vote",
"pll0_vote",
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index fc6b12da5b30..b02826ed770a 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -70,7 +70,7 @@ static const struct parent_map gcc_pxo_pll8_map[] = {
{ P_PLL8, 3 }
};
-static const char *gcc_pxo_pll8[] = {
+static const char * const gcc_pxo_pll8[] = {
"pxo",
"pll8_vote",
};
@@ -81,7 +81,7 @@ static const struct parent_map gcc_pxo_pll8_cxo_map[] = {
{ P_CXO, 5 }
};
-static const char *gcc_pxo_pll8_cxo[] = {
+static const char * const gcc_pxo_pll8_cxo[] = {
"pxo",
"pll8_vote",
"cxo",
@@ -1917,7 +1917,7 @@ static struct clk_rcg usb_fs1_xcvr_fs_src = {
}
};
-static const char *usb_fs1_xcvr_fs_src_p[] = { "usb_fs1_xcvr_fs_src" };
+static const char * const usb_fs1_xcvr_fs_src_p[] = { "usb_fs1_xcvr_fs_src" };
static struct clk_branch usb_fs1_xcvr_fs_clk = {
.halt_reg = 0x2fcc,
@@ -1984,7 +1984,7 @@ static struct clk_rcg usb_fs2_xcvr_fs_src = {
}
};
-static const char *usb_fs2_xcvr_fs_src_p[] = { "usb_fs2_xcvr_fs_src" };
+static const char * const usb_fs2_xcvr_fs_src_p[] = { "usb_fs2_xcvr_fs_src" };
static struct clk_branch usb_fs2_xcvr_fs_clk = {
.halt_reg = 0x2fcc,
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index c66f7bc2ae87..22a4e1e732c0 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -51,7 +51,7 @@ static const struct parent_map gcc_xo_gpll0_map[] = {
{ P_GPLL0, 1 },
};
-static const char *gcc_xo_gpll0[] = {
+static const char * const gcc_xo_gpll0[] = {
"xo",
"gpll0_vote",
};
@@ -62,7 +62,7 @@ static const struct parent_map gcc_xo_gpll0_bimc_map[] = {
{ P_BIMC, 2 },
};
-static const char *gcc_xo_gpll0_bimc[] = {
+static const char * const gcc_xo_gpll0_bimc[] = {
"xo",
"gpll0_vote",
"bimc_pll_vote",
@@ -75,7 +75,7 @@ static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2a_map[] = {
{ P_GPLL2_AUX, 2 },
};
-static const char *gcc_xo_gpll0a_gpll1_gpll2a[] = {
+static const char * const gcc_xo_gpll0a_gpll1_gpll2a[] = {
"xo",
"gpll0_vote",
"gpll1_vote",
@@ -88,7 +88,7 @@ static const struct parent_map gcc_xo_gpll0_gpll2_map[] = {
{ P_GPLL2, 2 },
};
-static const char *gcc_xo_gpll0_gpll2[] = {
+static const char * const gcc_xo_gpll0_gpll2[] = {
"xo",
"gpll0_vote",
"gpll2_vote",
@@ -99,7 +99,7 @@ static const struct parent_map gcc_xo_gpll0a_map[] = {
{ P_GPLL0_AUX, 2 },
};
-static const char *gcc_xo_gpll0a[] = {
+static const char * const gcc_xo_gpll0a[] = {
"xo",
"gpll0_vote",
};
@@ -111,7 +111,7 @@ static const struct parent_map gcc_xo_gpll0_gpll1a_sleep_map[] = {
{ P_SLEEP_CLK, 6 },
};
-static const char *gcc_xo_gpll0_gpll1a_sleep[] = {
+static const char * const gcc_xo_gpll0_gpll1a_sleep[] = {
"xo",
"gpll0_vote",
"gpll1_vote",
@@ -124,7 +124,7 @@ static const struct parent_map gcc_xo_gpll0_gpll1a_map[] = {
{ P_GPLL1_AUX, 2 },
};
-static const char *gcc_xo_gpll0_gpll1a[] = {
+static const char * const gcc_xo_gpll0_gpll1a[] = {
"xo",
"gpll0_vote",
"gpll1_vote",
@@ -135,7 +135,7 @@ static const struct parent_map gcc_xo_dsibyte_map[] = {
{ P_DSI0_PHYPLL_BYTE, 2 },
};
-static const char *gcc_xo_dsibyte[] = {
+static const char * const gcc_xo_dsibyte[] = {
"xo",
"dsi0pllbyte",
};
@@ -146,7 +146,7 @@ static const struct parent_map gcc_xo_gpll0a_dsibyte_map[] = {
{ P_DSI0_PHYPLL_BYTE, 1 },
};
-static const char *gcc_xo_gpll0a_dsibyte[] = {
+static const char * const gcc_xo_gpll0a_dsibyte[] = {
"xo",
"gpll0_vote",
"dsi0pllbyte",
@@ -158,7 +158,7 @@ static const struct parent_map gcc_xo_gpll0_dsiphy_map[] = {
{ P_DSI0_PHYPLL_DSI, 2 },
};
-static const char *gcc_xo_gpll0_dsiphy[] = {
+static const char * const gcc_xo_gpll0_dsiphy[] = {
"xo",
"gpll0_vote",
"dsi0pll",
@@ -170,7 +170,7 @@ static const struct parent_map gcc_xo_gpll0a_dsiphy_map[] = {
{ P_DSI0_PHYPLL_DSI, 1 },
};
-static const char *gcc_xo_gpll0a_dsiphy[] = {
+static const char * const gcc_xo_gpll0a_dsiphy[] = {
"xo",
"gpll0_vote",
"dsi0pll",
@@ -183,7 +183,7 @@ static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2_map[] = {
{ P_GPLL2, 2 },
};
-static const char *gcc_xo_gpll0a_gpll1_gpll2[] = {
+static const char * const gcc_xo_gpll0a_gpll1_gpll2[] = {
"xo",
"gpll0_vote",
"gpll1_vote",
@@ -2278,7 +2278,7 @@ static struct clk_branch gcc_prng_ahb_clk = {
.halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x45004,
- .enable_mask = BIT(0),
+ .enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
.name = "gcc_prng_ahb_clk",
.parent_names = (const char *[]){
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index eb6a4f9fa107..aa294b1bad34 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -125,7 +125,7 @@ static const struct parent_map gcc_pxo_pll8_map[] = {
{ P_PLL8, 3 }
};
-static const char *gcc_pxo_pll8[] = {
+static const char * const gcc_pxo_pll8[] = {
"pxo",
"pll8_vote",
};
@@ -136,7 +136,7 @@ static const struct parent_map gcc_pxo_pll8_cxo_map[] = {
{ P_CXO, 5 }
};
-static const char *gcc_pxo_pll8_cxo[] = {
+static const char * const gcc_pxo_pll8_cxo[] = {
"pxo",
"pll8_vote",
"cxo",
@@ -148,7 +148,7 @@ static const struct parent_map gcc_pxo_pll8_pll3_map[] = {
{ P_PLL3, 6 }
};
-static const char *gcc_pxo_pll8_pll3[] = {
+static const char * const gcc_pxo_pll8_pll3[] = {
"pxo",
"pll8_vote",
"pll3",
@@ -2085,7 +2085,7 @@ static struct clk_rcg usb_hsic_xcvr_fs_src = {
}
};
-static const char *usb_hsic_xcvr_fs_src_p[] = { "usb_hsic_xcvr_fs_src" };
+static const char * const usb_hsic_xcvr_fs_src_p[] = { "usb_hsic_xcvr_fs_src" };
static struct clk_branch usb_hsic_xcvr_fs_clk = {
.halt_reg = 0x2fc8,
@@ -2181,7 +2181,7 @@ static struct clk_rcg usb_fs1_xcvr_fs_src = {
}
};
-static const char *usb_fs1_xcvr_fs_src_p[] = { "usb_fs1_xcvr_fs_src" };
+static const char * const usb_fs1_xcvr_fs_src_p[] = { "usb_fs1_xcvr_fs_src" };
static struct clk_branch usb_fs1_xcvr_fs_clk = {
.halt_reg = 0x2fcc,
@@ -2248,7 +2248,7 @@ static struct clk_rcg usb_fs2_xcvr_fs_src = {
}
};
-static const char *usb_fs2_xcvr_fs_src_p[] = { "usb_fs2_xcvr_fs_src" };
+static const char * const usb_fs2_xcvr_fs_src_p[] = { "usb_fs2_xcvr_fs_src" };
static struct clk_branch usb_fs2_xcvr_fs_clk = {
.halt_reg = 0x2fcc,
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index c39d09874e74..2bcf87538f9d 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -44,7 +44,7 @@ static const struct parent_map gcc_xo_gpll0_map[] = {
{ P_GPLL0, 1 }
};
-static const char *gcc_xo_gpll0[] = {
+static const char * const gcc_xo_gpll0[] = {
"xo",
"gpll0_vote",
};
@@ -55,7 +55,7 @@ static const struct parent_map gcc_xo_gpll0_gpll4_map[] = {
{ P_GPLL4, 5 }
};
-static const char *gcc_xo_gpll0_gpll4[] = {
+static const char * const gcc_xo_gpll0_gpll4[] = {
"xo",
"gpll0_vote",
"gpll4_vote",
@@ -1783,6 +1783,7 @@ static struct clk_branch gcc_ce1_clk = {
"ce1_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index 47f0ac16d149..93ad42b14366 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -71,7 +71,7 @@ static const struct parent_map lcc_pxo_pll4_map[] = {
{ P_PLL4, 2 }
};
-static const char *lcc_pxo_pll4[] = {
+static const char * const lcc_pxo_pll4[] = {
"pxo",
"pll4_vote",
};
@@ -146,7 +146,7 @@ static struct clk_rcg mi2s_osr_src = {
},
};
-static const char *lcc_mi2s_parents[] = {
+static const char * const lcc_mi2s_parents[] = {
"mi2s_osr_src",
};
@@ -340,7 +340,7 @@ static struct clk_rcg spdif_src = {
},
};
-static const char *lcc_spdif_parents[] = {
+static const char * const lcc_spdif_parents[] = {
"spdif_src",
};
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
index d0df9d5fc3af..ecb96c284675 100644
--- a/drivers/clk/qcom/lcc-msm8960.c
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -57,7 +57,7 @@ static const struct parent_map lcc_pxo_pll4_map[] = {
{ P_PLL4, 2 }
};
-static const char *lcc_pxo_pll4[] = {
+static const char * const lcc_pxo_pll4[] = {
"pxo",
"pll4_vote",
};
@@ -127,7 +127,7 @@ static struct clk_rcg mi2s_osr_src = {
},
};
-static const char *lcc_mi2s_parents[] = {
+static const char * const lcc_mi2s_parents[] = {
"mi2s_osr_src",
};
@@ -233,7 +233,7 @@ static struct clk_rcg prefix##_osr_src = { \
}, \
}; \
\
-static const char *lcc_##prefix##_parents[] = { \
+static const char * const lcc_##prefix##_parents[] = { \
#prefix "_osr_src", \
}; \
\
@@ -445,7 +445,7 @@ static struct clk_rcg slimbus_src = {
},
};
-static const char *lcc_slimbus_parents[] = {
+static const char * const lcc_slimbus_parents[] = {
"slimbus_src",
};
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index 1b17df2cb0af..f0ee6bde11af 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -53,7 +53,7 @@ static const struct parent_map mmcc_xo_mmpll0_mmpll1_gpll0_map[] = {
{ P_GPLL0, 5 }
};
-static const char *mmcc_xo_mmpll0_mmpll1_gpll0[] = {
+static const char * const mmcc_xo_mmpll0_mmpll1_gpll0[] = {
"xo",
"mmpll0_vote",
"mmpll1_vote",
@@ -69,7 +69,7 @@ static const struct parent_map mmcc_xo_mmpll0_dsi_hdmi_gpll0_map[] = {
{ P_DSI1PLL, 3 }
};
-static const char *mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = {
+static const char * const mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = {
"xo",
"mmpll0_vote",
"hdmipll",
@@ -86,7 +86,7 @@ static const struct parent_map mmcc_xo_mmpll0_1_2_gpll0_map[] = {
{ P_MMPLL2, 3 }
};
-static const char *mmcc_xo_mmpll0_1_2_gpll0[] = {
+static const char * const mmcc_xo_mmpll0_1_2_gpll0[] = {
"xo",
"mmpll0_vote",
"mmpll1_vote",
@@ -102,7 +102,7 @@ static const struct parent_map mmcc_xo_mmpll0_1_3_gpll0_map[] = {
{ P_MMPLL3, 3 }
};
-static const char *mmcc_xo_mmpll0_1_3_gpll0[] = {
+static const char * const mmcc_xo_mmpll0_1_3_gpll0[] = {
"xo",
"mmpll0_vote",
"mmpll1_vote",
@@ -119,7 +119,7 @@ static const struct parent_map mmcc_xo_dsi_hdmi_edp_map[] = {
{ P_DSI1PLL, 2 }
};
-static const char *mmcc_xo_dsi_hdmi_edp[] = {
+static const char * const mmcc_xo_dsi_hdmi_edp[] = {
"xo",
"edp_link_clk",
"hdmipll",
@@ -137,7 +137,7 @@ static const struct parent_map mmcc_xo_dsi_hdmi_edp_gpll0_map[] = {
{ P_DSI1PLL, 2 }
};
-static const char *mmcc_xo_dsi_hdmi_edp_gpll0[] = {
+static const char * const mmcc_xo_dsi_hdmi_edp_gpll0[] = {
"xo",
"edp_link_clk",
"hdmipll",
@@ -155,7 +155,7 @@ static const struct parent_map mmcc_xo_dsibyte_hdmi_edp_gpll0_map[] = {
{ P_DSI1PLL_BYTE, 2 }
};
-static const char *mmcc_xo_dsibyte_hdmi_edp_gpll0[] = {
+static const char * const mmcc_xo_dsibyte_hdmi_edp_gpll0[] = {
"xo",
"edp_link_clk",
"hdmipll",
@@ -172,7 +172,7 @@ static const struct parent_map mmcc_xo_mmpll0_1_4_gpll0_map[] = {
{ P_MMPLL4, 3 }
};
-static const char *mmcc_xo_mmpll0_1_4_gpll0[] = {
+static const char * const mmcc_xo_mmpll0_1_4_gpll0[] = {
"xo",
"mmpll0",
"mmpll1",
@@ -189,7 +189,7 @@ static const struct parent_map mmcc_xo_mmpll0_1_4_gpll1_0_map[] = {
{ P_GPLL1, 4 }
};
-static const char *mmcc_xo_mmpll0_1_4_gpll1_0[] = {
+static const char * const mmcc_xo_mmpll0_1_4_gpll1_0[] = {
"xo",
"mmpll0",
"mmpll1",
@@ -208,7 +208,7 @@ static const struct parent_map mmcc_xo_mmpll0_1_4_gpll1_0_sleep_map[] = {
{ P_MMSLEEP, 6 }
};
-static const char *mmcc_xo_mmpll0_1_4_gpll1_0_sleep[] = {
+static const char * const mmcc_xo_mmpll0_1_4_gpll1_0_sleep[] = {
"xo",
"mmpll0",
"mmpll1",
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index 9711bca9cc06..bad02aebf959 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
@@ -50,7 +51,7 @@ static const struct parent_map mmcc_pxo_pll8_pll2_map[] = {
{ P_PLL2, 1 }
};
-static const char *mmcc_pxo_pll8_pll2[] = {
+static const char * const mmcc_pxo_pll8_pll2[] = {
"pxo",
"pll8_vote",
"pll2",
@@ -63,7 +64,7 @@ static const struct parent_map mmcc_pxo_pll8_pll2_pll3_map[] = {
{ P_PLL3, 3 }
};
-static const char *mmcc_pxo_pll8_pll2_pll15[] = {
+static const char * const mmcc_pxo_pll8_pll2_pll15[] = {
"pxo",
"pll8_vote",
"pll2",
@@ -77,7 +78,7 @@ static const struct parent_map mmcc_pxo_pll8_pll2_pll15_map[] = {
{ P_PLL15, 3 }
};
-static const char *mmcc_pxo_pll8_pll2_pll3[] = {
+static const char * const mmcc_pxo_pll8_pll2_pll3[] = {
"pxo",
"pll8_vote",
"pll2",
@@ -508,8 +509,7 @@ static int pix_rdi_set_parent(struct clk_hw *hw, u8 index)
int ret = 0;
u32 val;
struct clk_pix_rdi *rdi = to_clk_pix_rdi(hw);
- struct clk *clk = hw->clk;
- int num_parents = __clk_get_num_parents(hw->clk);
+ int num_parents = clk_hw_get_num_parents(hw);
/*
* These clocks select three inputs via two muxes. One mux selects
@@ -520,7 +520,8 @@ static int pix_rdi_set_parent(struct clk_hw *hw, u8 index)
* needs to be on at what time.
*/
for (i = 0; i < num_parents; i++) {
- ret = clk_prepare_enable(clk_get_parent_by_index(clk, i));
+ struct clk_hw *p = clk_hw_get_parent_by_index(hw, i);
+ ret = clk_prepare_enable(p->clk);
if (ret)
goto err;
}
@@ -548,8 +549,10 @@ static int pix_rdi_set_parent(struct clk_hw *hw, u8 index)
udelay(1);
err:
- for (i--; i >= 0; i--)
- clk_disable_unprepare(clk_get_parent_by_index(clk, i));
+ for (i--; i >= 0; i--) {
+ struct clk_hw *p = clk_hw_get_parent_by_index(hw, i);
+ clk_disable_unprepare(p->clk);
+ }
return ret;
}
@@ -579,7 +582,7 @@ static const struct clk_ops clk_ops_pix_rdi = {
.determine_rate = __clk_mux_determine_rate,
};
-static const char *pix_rdi_parents[] = {
+static const char * const pix_rdi_parents[] = {
"csi0_clk",
"csi1_clk",
"csi2_clk",
@@ -709,7 +712,7 @@ static struct clk_rcg csiphytimer_src = {
},
};
-static const char *csixphy_timer_src[] = { "csiphytimer_src" };
+static const char * const csixphy_timer_src[] = { "csiphytimer_src" };
static struct clk_branch csiphy0_timer_clk = {
.halt_reg = 0x01e8,
@@ -1385,7 +1388,7 @@ static const struct parent_map mmcc_pxo_hdmi_map[] = {
{ P_HDMI_PLL, 3 }
};
-static const char *mmcc_pxo_hdmi[] = {
+static const char * const mmcc_pxo_hdmi[] = {
"pxo",
"hdmi_pll",
};
@@ -1428,7 +1431,7 @@ static struct clk_rcg tv_src = {
},
};
-static const char *tv_src_name[] = { "tv_src" };
+static const char * const tv_src_name[] = { "tv_src" };
static struct clk_branch tv_enc_clk = {
.halt_reg = 0x01d4,
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index 07f4cc159ad3..0987bf443e1f 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -56,7 +56,7 @@ static const struct parent_map mmcc_xo_mmpll0_mmpll1_gpll0_map[] = {
{ P_GPLL0, 5 }
};
-static const char *mmcc_xo_mmpll0_mmpll1_gpll0[] = {
+static const char * const mmcc_xo_mmpll0_mmpll1_gpll0[] = {
"xo",
"mmpll0_vote",
"mmpll1_vote",
@@ -72,7 +72,7 @@ static const struct parent_map mmcc_xo_mmpll0_dsi_hdmi_gpll0_map[] = {
{ P_DSI1PLL, 3 }
};
-static const char *mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = {
+static const char * const mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = {
"xo",
"mmpll0_vote",
"hdmipll",
@@ -89,7 +89,7 @@ static const struct parent_map mmcc_xo_mmpll0_1_2_gpll0_map[] = {
{ P_MMPLL2, 3 }
};
-static const char *mmcc_xo_mmpll0_1_2_gpll0[] = {
+static const char * const mmcc_xo_mmpll0_1_2_gpll0[] = {
"xo",
"mmpll0_vote",
"mmpll1_vote",
@@ -105,7 +105,7 @@ static const struct parent_map mmcc_xo_mmpll0_1_3_gpll0_map[] = {
{ P_MMPLL3, 3 }
};
-static const char *mmcc_xo_mmpll0_1_3_gpll0[] = {
+static const char * const mmcc_xo_mmpll0_1_3_gpll0[] = {
"xo",
"mmpll0_vote",
"mmpll1_vote",
@@ -121,7 +121,7 @@ static const struct parent_map mmcc_xo_mmpll0_1_gpll1_0_map[] = {
{ P_GPLL1, 4 }
};
-static const char *mmcc_xo_mmpll0_1_gpll1_0[] = {
+static const char * const mmcc_xo_mmpll0_1_gpll1_0[] = {
"xo",
"mmpll0_vote",
"mmpll1_vote",
@@ -138,7 +138,7 @@ static const struct parent_map mmcc_xo_dsi_hdmi_edp_map[] = {
{ P_DSI1PLL, 2 }
};
-static const char *mmcc_xo_dsi_hdmi_edp[] = {
+static const char * const mmcc_xo_dsi_hdmi_edp[] = {
"xo",
"edp_link_clk",
"hdmipll",
@@ -156,7 +156,7 @@ static const struct parent_map mmcc_xo_dsi_hdmi_edp_gpll0_map[] = {
{ P_DSI1PLL, 2 }
};
-static const char *mmcc_xo_dsi_hdmi_edp_gpll0[] = {
+static const char * const mmcc_xo_dsi_hdmi_edp_gpll0[] = {
"xo",
"edp_link_clk",
"hdmipll",
@@ -174,7 +174,7 @@ static const struct parent_map mmcc_xo_dsibyte_hdmi_edp_gpll0_map[] = {
{ P_DSI1PLL_BYTE, 2 }
};
-static const char *mmcc_xo_dsibyte_hdmi_edp_gpll0[] = {
+static const char * const mmcc_xo_dsibyte_hdmi_edp_gpll0[] = {
"xo",
"edp_link_clk",
"hdmipll",
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index 2714097f90db..b27edd6c8183 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -6,8 +6,10 @@ obj-y += clk-rockchip.o
obj-y += clk.o
obj-y += clk-pll.o
obj-y += clk-cpu.o
+obj-y += clk-inverter.o
obj-y += clk-mmc-phase.o
obj-$(CONFIG_RESET_CONTROLLER) += softrst.o
obj-y += clk-rk3188.o
obj-y += clk-rk3288.o
+obj-y += clk-rk3368.o
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index fb7721bd37e6..330870a6d8bf 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -35,6 +35,7 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include "clk.h"
diff --git a/drivers/clk/rockchip/clk-inverter.c b/drivers/clk/rockchip/clk-inverter.c
new file mode 100644
index 000000000000..7cbf43beb3c6
--- /dev/null
+++ b/drivers/clk/rockchip/clk-inverter.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2015 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include "clk.h"
+
+struct rockchip_inv_clock {
+ struct clk_hw hw;
+ void __iomem *reg;
+ int shift;
+ int flags;
+ spinlock_t *lock;
+};
+
+#define to_inv_clock(_hw) container_of(_hw, struct rockchip_inv_clock, hw)
+
+#define INVERTER_MASK 0x1
+
+static int rockchip_inv_get_phase(struct clk_hw *hw)
+{
+ struct rockchip_inv_clock *inv_clock = to_inv_clock(hw);
+ u32 val;
+
+ val = readl(inv_clock->reg) >> inv_clock->shift;
+ val &= INVERTER_MASK;
+ return val ? 180 : 0;
+}
+
+static int rockchip_inv_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct rockchip_inv_clock *inv_clock = to_inv_clock(hw);
+ u32 val;
+
+ if (degrees % 180 == 0) {
+ val = !!degrees;
+ } else {
+ pr_err("%s: unsupported phase %d for %s\n",
+ __func__, degrees, clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ if (inv_clock->flags & ROCKCHIP_INVERTER_HIWORD_MASK) {
+ writel(HIWORD_UPDATE(val, INVERTER_MASK, inv_clock->shift),
+ inv_clock->reg);
+ } else {
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(inv_clock->lock, flags);
+
+ reg = readl(inv_clock->reg);
+ reg &= ~BIT(inv_clock->shift);
+ reg |= val;
+ writel(reg, inv_clock->reg);
+
+ spin_unlock_irqrestore(inv_clock->lock, flags);
+ }
+
+ return 0;
+}
+
+static const struct clk_ops rockchip_inv_clk_ops = {
+ .get_phase = rockchip_inv_get_phase,
+ .set_phase = rockchip_inv_set_phase,
+};
+
+struct clk *rockchip_clk_register_inverter(const char *name,
+ const char *const *parent_names, u8 num_parents,
+ void __iomem *reg, int shift, int flags,
+ spinlock_t *lock)
+{
+ struct clk_init_data init;
+ struct rockchip_inv_clock *inv_clock;
+ struct clk *clk;
+
+ inv_clock = kmalloc(sizeof(*inv_clock), GFP_KERNEL);
+ if (!inv_clock)
+ return NULL;
+
+ init.name = name;
+ init.num_parents = num_parents;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = parent_names;
+ init.ops = &rockchip_inv_clk_ops;
+
+ inv_clock->hw.init = &init;
+ inv_clock->reg = reg;
+ inv_clock->shift = shift;
+ inv_clock->flags = flags;
+ inv_clock->lock = lock;
+
+ clk = clk_register(NULL, &inv_clock->hw);
+ if (IS_ERR(clk))
+ goto err_free;
+
+ return clk;
+
+err_free:
+ kfree(inv_clock);
+ return NULL;
+}
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index e9f8df324e7c..9b613426e968 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -14,7 +14,10 @@
*/
#include <linux/slab.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
#include "clk.h"
struct rockchip_mmc_clock {
@@ -105,7 +108,7 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift), mmc_clock->reg);
pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n",
- __clk_get_name(hw->clk), degrees, delay_num,
+ clk_hw_get_name(hw), degrees, delay_num,
mmc_clock->reg, raw_value>>(mmc_clock->shift),
rockchip_mmc_get_phase(hw)
);
@@ -131,6 +134,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
if (!mmc_clock)
return NULL;
+ init.name = name;
init.num_parents = num_parents;
init.parent_names = parent_names;
init.ops = &rockchip_mmc_clk_ops;
@@ -139,9 +143,6 @@ struct clk *rockchip_clk_register_mmc(const char *name,
mmc_clock->reg = reg;
mmc_clock->shift = shift;
- if (name)
- init.name = name;
-
clk = clk_register(NULL, &mmc_clock->hw);
if (IS_ERR(clk))
goto err_free;
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index 76027261f7ed..7737a1df1e4b 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -17,7 +17,6 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/delay.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
#include "clk.h"
@@ -121,8 +120,8 @@ static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll)
#define RK3066_PLLCON0_NR_SHIFT 8
#define RK3066_PLLCON1_NF_MASK 0x1fff
#define RK3066_PLLCON1_NF_SHIFT 0
-#define RK3066_PLLCON2_BWADJ_MASK 0xfff
-#define RK3066_PLLCON2_BWADJ_SHIFT 0
+#define RK3066_PLLCON2_NB_MASK 0xfff
+#define RK3066_PLLCON2_NB_SHIFT 0
#define RK3066_PLLCON3_RESET (1 << 5)
#define RK3066_PLLCON3_PWRDOWN (1 << 1)
#define RK3066_PLLCON3_BYPASS (1 << 0)
@@ -137,7 +136,7 @@ static unsigned long rockchip_rk3066_pll_recalc_rate(struct clk_hw *hw,
pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(3));
if (pllcon & RK3066_PLLCON3_BYPASS) {
pr_debug("%s: pll %s is bypassed\n", __func__,
- __clk_get_name(hw->clk));
+ clk_hw_get_name(hw));
return prate;
}
@@ -175,13 +174,13 @@ static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate,
}
pr_debug("%s: changing %s from %lu to %lu with a parent rate of %lu\n",
- __func__, __clk_get_name(hw->clk), old_rate, drate, prate);
+ __func__, clk_hw_get_name(hw), old_rate, drate, prate);
/* Get required rate settings from table */
rate = rockchip_get_pll_settings(pll, drate);
if (!rate) {
pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
- drate, __clk_get_name(hw->clk));
+ drate, clk_hw_get_name(hw));
return -EINVAL;
}
@@ -208,8 +207,8 @@ static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate,
writel_relaxed(HIWORD_UPDATE(rate->nf - 1, RK3066_PLLCON1_NF_MASK,
RK3066_PLLCON1_NF_SHIFT),
pll->reg_base + RK3066_PLLCON(1));
- writel_relaxed(HIWORD_UPDATE(rate->bwadj, RK3066_PLLCON2_BWADJ_MASK,
- RK3066_PLLCON2_BWADJ_SHIFT),
+ writel_relaxed(HIWORD_UPDATE(rate->nb - 1, RK3066_PLLCON2_NB_MASK,
+ RK3066_PLLCON2_NB_SHIFT),
pll->reg_base + RK3066_PLLCON(2));
/* leave reset and wait the reset_delay */
@@ -262,14 +261,14 @@ static void rockchip_rk3066_pll_init(struct clk_hw *hw)
{
struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
const struct rockchip_pll_rate_table *rate;
- unsigned int nf, nr, no, bwadj;
+ unsigned int nf, nr, no, nb;
unsigned long drate;
u32 pllcon;
if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
return;
- drate = __clk_get_rate(hw->clk);
+ drate = clk_hw_get_rate(hw);
rate = rockchip_get_pll_settings(pll, drate);
/* when no rate setting for the current rate, rely on clk_set_rate */
@@ -284,25 +283,25 @@ static void rockchip_rk3066_pll_init(struct clk_hw *hw)
nf = ((pllcon >> RK3066_PLLCON1_NF_SHIFT) & RK3066_PLLCON1_NF_MASK) + 1;
pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(2));
- bwadj = (pllcon >> RK3066_PLLCON2_BWADJ_SHIFT) & RK3066_PLLCON2_BWADJ_MASK;
+ nb = ((pllcon >> RK3066_PLLCON2_NB_SHIFT) & RK3066_PLLCON2_NB_MASK) + 1;
- pr_debug("%s: pll %s@%lu: nr (%d:%d); no (%d:%d); nf(%d:%d), bwadj(%d:%d)\n",
- __func__, __clk_get_name(hw->clk), drate, rate->nr, nr,
- rate->no, no, rate->nf, nf, rate->bwadj, bwadj);
+ pr_debug("%s: pll %s@%lu: nr (%d:%d); no (%d:%d); nf(%d:%d), nb(%d:%d)\n",
+ __func__, clk_hw_get_name(hw), drate, rate->nr, nr,
+ rate->no, no, rate->nf, nf, rate->nb, nb);
if (rate->nr != nr || rate->no != no || rate->nf != nf
- || rate->bwadj != bwadj) {
- struct clk *parent = __clk_get_parent(hw->clk);
+ || rate->nb != nb) {
+ struct clk_hw *parent = clk_hw_get_parent(hw);
unsigned long prate;
if (!parent) {
pr_warn("%s: parent of %s not available\n",
- __func__, __clk_get_name(hw->clk));
+ __func__, clk_hw_get_name(hw));
return;
}
pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n",
- __func__, __clk_get_name(hw->clk));
- prate = __clk_get_rate(parent);
+ __func__, clk_hw_get_name(hw));
+ prate = clk_hw_get_rate(parent);
rockchip_rk3066_pll_set_rate(hw, drate, prate);
}
}
@@ -354,6 +353,35 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
if (!pll)
return ERR_PTR(-ENOMEM);
+ /* create the mux on top of the real pll */
+ pll->pll_mux_ops = &clk_mux_ops;
+ pll_mux = &pll->pll_mux;
+ pll_mux->reg = base + mode_offset;
+ pll_mux->shift = mode_shift;
+ pll_mux->mask = PLL_MODE_MASK;
+ pll_mux->flags = 0;
+ pll_mux->lock = lock;
+ pll_mux->hw.init = &init;
+
+ if (pll_type == pll_rk3066)
+ pll_mux->flags |= CLK_MUX_HIWORD_MASK;
+
+ /* the actual muxing is xin24m, pll-output, xin32k */
+ pll_parents[0] = parent_names[0];
+ pll_parents[1] = pll_name;
+ pll_parents[2] = parent_names[1];
+
+ init.name = name;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.ops = pll->pll_mux_ops;
+ init.parent_names = pll_parents;
+ init.num_parents = ARRAY_SIZE(pll_parents);
+
+ mux_clk = clk_register(NULL, &pll_mux->hw);
+ if (IS_ERR(mux_clk))
+ goto err_mux;
+
+ /* now create the actual pll */
init.name = pll_name;
/* keep all plls untouched for now */
@@ -399,47 +427,19 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
pll->flags = clk_pll_flags;
pll->lock = lock;
- /* create the mux on top of the real pll */
- pll->pll_mux_ops = &clk_mux_ops;
- pll_mux = &pll->pll_mux;
- pll_mux->reg = base + mode_offset;
- pll_mux->shift = mode_shift;
- pll_mux->mask = PLL_MODE_MASK;
- pll_mux->flags = 0;
- pll_mux->lock = lock;
- pll_mux->hw.init = &init;
-
- if (pll_type == pll_rk3066)
- pll_mux->flags |= CLK_MUX_HIWORD_MASK;
-
pll_clk = clk_register(NULL, &pll->hw);
if (IS_ERR(pll_clk)) {
pr_err("%s: failed to register pll clock %s : %ld\n",
__func__, name, PTR_ERR(pll_clk));
- mux_clk = pll_clk;
goto err_pll;
}
- /* the actual muxing is xin24m, pll-output, xin32k */
- pll_parents[0] = parent_names[0];
- pll_parents[1] = pll_name;
- pll_parents[2] = parent_names[1];
-
- init.name = name;
- init.flags = CLK_SET_RATE_PARENT;
- init.ops = pll->pll_mux_ops;
- init.parent_names = pll_parents;
- init.num_parents = ARRAY_SIZE(pll_parents);
-
- mux_clk = clk_register(NULL, &pll_mux->hw);
- if (IS_ERR(mux_clk))
- goto err_mux;
-
return mux_clk;
-err_mux:
- clk_unregister(pll_clk);
err_pll:
+ clk_unregister(mux_clk);
+ mux_clk = pll_clk;
+err_mux:
kfree(pll);
return mux_clk;
}
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index e4f9d472f1ff..ed02bbc7b11f 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -13,6 +13,7 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -201,7 +202,7 @@ PNAME(mux_pll_src_cpll_gpll_p) = { "cpll", "gpll" };
PNAME(mux_aclk_cpu_p) = { "apll", "gpll" };
PNAME(mux_sclk_cif0_p) = { "cif0_pre", "xin24m" };
PNAME(mux_sclk_i2s0_p) = { "i2s0_pre", "i2s0_frac", "xin12m" };
-PNAME(mux_sclk_spdif_p) = { "spdif_src", "spdif_frac", "xin12m" };
+PNAME(mux_sclk_spdif_p) = { "spdif_pre", "spdif_frac", "xin12m" };
PNAME(mux_sclk_uart0_p) = { "uart0_pre", "uart0_frac", "xin24m" };
PNAME(mux_sclk_uart1_p) = { "uart1_pre", "uart1_frac", "xin24m" };
PNAME(mux_sclk_uart2_p) = { "uart2_pre", "uart2_frac", "xin24m" };
@@ -235,6 +236,7 @@ static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = {
#define MFLAGS CLK_MUX_HIWORD_MASK
#define DFLAGS CLK_DIVIDER_HIWORD_MASK
#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+#define IFLAGS ROCKCHIP_INVERTER_HIWORD_MASK
/* 2 ^ (val + 1) */
static struct clk_div_table div_core_peri_t[] = {
@@ -310,6 +312,8 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
GATE(0, "pclkin_cif0", "ext_cif0", 0,
RK2928_CLKGATE_CON(3), 3, GFLAGS),
+ INVERTER(0, "pclk_cif0", "pclkin_cif0",
+ RK2928_CLKSEL_CON(30), 8, IFLAGS),
/*
* the 480m are generated inside the usb block from these clocks,
@@ -334,8 +338,10 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
COMPOSITE_FRAC(0, "hsadc_frac", "hsadc_src", 0,
RK2928_CLKSEL_CON(23), 0,
RK2928_CLKGATE_CON(2), 7, GFLAGS),
- MUX(SCLK_HSADC, "sclk_hsadc", mux_sclk_hsadc_p, 0,
+ MUX(0, "sclk_hsadc_out", mux_sclk_hsadc_p, 0,
RK2928_CLKSEL_CON(22), 4, 2, MFLAGS),
+ INVERTER(SCLK_HSADC, "sclk_hsadc", "sclk_hsadc_out",
+ RK2928_CLKSEL_CON(22), 7, IFLAGS),
COMPOSITE_NOMUX(SCLK_SARADC, "sclk_saradc", "xin24m", 0,
RK2928_CLKSEL_CON(24), 8, 8, DFLAGS,
@@ -344,10 +350,10 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
RK2928_CLKGATE_CON(0), 13, GFLAGS),
- COMPOSITE_FRAC(0, "spdif_frac", "spdif_pll", 0,
+ COMPOSITE_FRAC(0, "spdif_frac", "spdif_pre", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(9), 0,
RK2928_CLKGATE_CON(0), 14, GFLAGS),
- MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0,
+ MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(5), 8, 2, MFLAGS),
/*
@@ -557,6 +563,8 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
GATE(0, "pclkin_cif1", "ext_cif1", 0,
RK2928_CLKGATE_CON(3), 4, GFLAGS),
+ INVERTER(0, "pclk_cif1", "pclkin_cif1",
+ RK2928_CLKSEL_CON(30), 12, IFLAGS),
COMPOSITE(0, "aclk_gpu_src", mux_pll_src_cpll_gpll_p, 0,
RK2928_CLKSEL_CON(33), 8, 1, MFLAGS, 0, 5, DFLAGS,
@@ -809,7 +817,7 @@ static void __init rk3188_clk_init(struct device_node *np)
rate = pll->rate_table;
while (rate->rate > 0) {
- rate->bwadj = 0;
+ rate->nb = 1;
rate++;
}
}
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 4f817ed9e6ee..9040878e3e2b 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -84,7 +84,7 @@ static struct rockchip_pll_rate_table rk3288_pll_rates[] = {
RK3066_PLL_RATE( 742500000, 8, 495, 2),
RK3066_PLL_RATE( 696000000, 1, 58, 2),
RK3066_PLL_RATE( 600000000, 1, 50, 2),
- RK3066_PLL_RATE_BWADJ(594000000, 1, 198, 8, 1),
+ RK3066_PLL_RATE_NB(594000000, 1, 198, 8, 1),
RK3066_PLL_RATE( 552000000, 1, 46, 2),
RK3066_PLL_RATE( 504000000, 1, 84, 4),
RK3066_PLL_RATE( 500000000, 3, 125, 2),
@@ -189,7 +189,7 @@ PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
PNAME(mux_uart3_p) = { "uart3_src", "uart3_frac", "xin24m" };
PNAME(mux_uart4_p) = { "uart4_src", "uart4_frac", "xin24m" };
-PNAME(mux_cif_out_p) = { "cif_src", "xin24m" };
+PNAME(mux_vip_out_p) = { "vip_src", "xin24m" };
PNAME(mux_mac_p) = { "mac_pll_src", "ext_gmac" };
PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" };
PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" };
@@ -223,6 +223,7 @@ static struct clk_div_table div_hclk_cpu_t[] = {
#define MFLAGS CLK_MUX_HIWORD_MASK
#define DFLAGS CLK_DIVIDER_HIWORD_MASK
#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+#define IFLAGS ROCKCHIP_INVERTER_HIWORD_MASK
static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
/*
@@ -434,7 +435,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0,
RK3288_CLKSEL_CON(26), 8, 1, MFLAGS,
RK3288_CLKGATE_CON(3), 7, GFLAGS),
- COMPOSITE_NOGATE(0, "sclk_vip_out", mux_cif_out_p, 0,
+ COMPOSITE_NOGATE(0, "sclk_vip_out", mux_vip_out_p, 0,
RK3288_CLKSEL_CON(26), 15, 1, MFLAGS, 9, 5, DFLAGS),
DIV(0, "pclk_pd_alive", "gpll", 0,
@@ -578,7 +579,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0,
RK3288_CLKSEL_CON(21), 0, 2, MFLAGS, 8, 5, DFLAGS,
RK3288_CLKGATE_CON(2), 5, GFLAGS),
- MUX(SCLK_MAC, "mac_clk", mux_mac_p, 0,
+ MUX(SCLK_MAC, "mac_clk", mux_mac_p, CLK_SET_RATE_PARENT,
RK3288_CLKSEL_CON(21), 4, 1, MFLAGS),
GATE(SCLK_MACREF_OUT, "sclk_macref_out", "mac_clk", 0,
RK3288_CLKGATE_CON(5), 3, GFLAGS),
@@ -592,8 +593,10 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
COMPOSITE(0, "hsadc_src", mux_pll_src_cpll_gpll_p, 0,
RK3288_CLKSEL_CON(22), 0, 1, MFLAGS, 8, 8, DFLAGS,
RK3288_CLKGATE_CON(2), 6, GFLAGS),
- MUX(SCLK_HSADC, "sclk_hsadc_out", mux_hsadcout_p, 0,
+ MUX(0, "sclk_hsadc_out", mux_hsadcout_p, 0,
RK3288_CLKSEL_CON(22), 4, 1, MFLAGS),
+ INVERTER(SCLK_HSADC, "sclk_hsadc", "sclk_hsadc_out",
+ RK3288_CLKSEL_CON(22), 7, IFLAGS),
GATE(0, "jtag", "ext_jtag", 0,
RK3288_CLKGATE_CON(4), 14, GFLAGS),
@@ -768,13 +771,16 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
*/
GATE(0, "pclk_vip_in", "ext_vip", 0, RK3288_CLKGATE_CON(16), 0, GFLAGS),
+ INVERTER(0, "pclk_vip", "pclk_vip_in", RK3288_CLKSEL_CON(29), 4, IFLAGS),
GATE(0, "pclk_isp_in", "ext_isp", 0, RK3288_CLKGATE_CON(16), 3, GFLAGS),
+ INVERTER(0, "pclk_isp", "pclk_isp_in", RK3288_CLKSEL_CON(29), 3, IFLAGS),
};
static const char *const rk3288_critical_clocks[] __initconst = {
"aclk_cpu",
"aclk_peri",
"hclk_peri",
+ "pclk_pd_pmu",
};
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
new file mode 100644
index 000000000000..9c5d61e698ef
--- /dev/null
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -0,0 +1,881 @@
+/*
+ * Copyright (c) 2015 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/rk3368-cru.h>
+#include "clk.h"
+
+#define RK3368_GRF_SOC_STATUS0 0x480
+
+enum rk3368_plls {
+ apllb, aplll, dpll, cpll, gpll, npll,
+};
+
+static struct rockchip_pll_rate_table rk3368_pll_rates[] = {
+ RK3066_PLL_RATE(2208000000, 1, 92, 1),
+ RK3066_PLL_RATE(2184000000, 1, 91, 1),
+ RK3066_PLL_RATE(2160000000, 1, 90, 1),
+ RK3066_PLL_RATE(2136000000, 1, 89, 1),
+ RK3066_PLL_RATE(2112000000, 1, 88, 1),
+ RK3066_PLL_RATE(2088000000, 1, 87, 1),
+ RK3066_PLL_RATE(2064000000, 1, 86, 1),
+ RK3066_PLL_RATE(2040000000, 1, 85, 1),
+ RK3066_PLL_RATE(2016000000, 1, 84, 1),
+ RK3066_PLL_RATE(1992000000, 1, 83, 1),
+ RK3066_PLL_RATE(1968000000, 1, 82, 1),
+ RK3066_PLL_RATE(1944000000, 1, 81, 1),
+ RK3066_PLL_RATE(1920000000, 1, 80, 1),
+ RK3066_PLL_RATE(1896000000, 1, 79, 1),
+ RK3066_PLL_RATE(1872000000, 1, 78, 1),
+ RK3066_PLL_RATE(1848000000, 1, 77, 1),
+ RK3066_PLL_RATE(1824000000, 1, 76, 1),
+ RK3066_PLL_RATE(1800000000, 1, 75, 1),
+ RK3066_PLL_RATE(1776000000, 1, 74, 1),
+ RK3066_PLL_RATE(1752000000, 1, 73, 1),
+ RK3066_PLL_RATE(1728000000, 1, 72, 1),
+ RK3066_PLL_RATE(1704000000, 1, 71, 1),
+ RK3066_PLL_RATE(1680000000, 1, 70, 1),
+ RK3066_PLL_RATE(1656000000, 1, 69, 1),
+ RK3066_PLL_RATE(1632000000, 1, 68, 1),
+ RK3066_PLL_RATE(1608000000, 1, 67, 1),
+ RK3066_PLL_RATE(1560000000, 1, 65, 1),
+ RK3066_PLL_RATE(1512000000, 1, 63, 1),
+ RK3066_PLL_RATE(1488000000, 1, 62, 1),
+ RK3066_PLL_RATE(1464000000, 1, 61, 1),
+ RK3066_PLL_RATE(1440000000, 1, 60, 1),
+ RK3066_PLL_RATE(1416000000, 1, 59, 1),
+ RK3066_PLL_RATE(1392000000, 1, 58, 1),
+ RK3066_PLL_RATE(1368000000, 1, 57, 1),
+ RK3066_PLL_RATE(1344000000, 1, 56, 1),
+ RK3066_PLL_RATE(1320000000, 1, 55, 1),
+ RK3066_PLL_RATE(1296000000, 1, 54, 1),
+ RK3066_PLL_RATE(1272000000, 1, 53, 1),
+ RK3066_PLL_RATE(1248000000, 1, 52, 1),
+ RK3066_PLL_RATE(1224000000, 1, 51, 1),
+ RK3066_PLL_RATE(1200000000, 1, 50, 1),
+ RK3066_PLL_RATE(1176000000, 1, 49, 1),
+ RK3066_PLL_RATE(1128000000, 1, 47, 1),
+ RK3066_PLL_RATE(1104000000, 1, 46, 1),
+ RK3066_PLL_RATE(1008000000, 1, 84, 2),
+ RK3066_PLL_RATE( 912000000, 1, 76, 2),
+ RK3066_PLL_RATE( 888000000, 1, 74, 2),
+ RK3066_PLL_RATE( 816000000, 1, 68, 2),
+ RK3066_PLL_RATE( 792000000, 1, 66, 2),
+ RK3066_PLL_RATE( 696000000, 1, 58, 2),
+ RK3066_PLL_RATE( 672000000, 1, 56, 2),
+ RK3066_PLL_RATE( 648000000, 1, 54, 2),
+ RK3066_PLL_RATE( 624000000, 1, 52, 2),
+ RK3066_PLL_RATE( 600000000, 1, 50, 2),
+ RK3066_PLL_RATE( 576000000, 1, 48, 2),
+ RK3066_PLL_RATE( 552000000, 1, 46, 2),
+ RK3066_PLL_RATE( 528000000, 1, 88, 4),
+ RK3066_PLL_RATE( 504000000, 1, 84, 4),
+ RK3066_PLL_RATE( 480000000, 1, 80, 4),
+ RK3066_PLL_RATE( 456000000, 1, 76, 4),
+ RK3066_PLL_RATE( 408000000, 1, 68, 4),
+ RK3066_PLL_RATE( 312000000, 1, 52, 4),
+ RK3066_PLL_RATE( 252000000, 1, 84, 8),
+ RK3066_PLL_RATE( 216000000, 1, 72, 8),
+ RK3066_PLL_RATE( 126000000, 2, 84, 8),
+ RK3066_PLL_RATE( 48000000, 2, 32, 8),
+ { /* sentinel */ },
+};
+
+PNAME(mux_pll_p) = { "xin24m", "xin32k" };
+PNAME(mux_armclkb_p) = { "apllb_core", "gpllb_core" };
+PNAME(mux_armclkl_p) = { "aplll_core", "gplll_core" };
+PNAME(mux_ddrphy_p) = { "dpll_ddr", "gpll_ddr" };
+PNAME(mux_cs_src_p) = { "apllb_cs", "aplll_cs", "gpll_cs"};
+PNAME(mux_aclk_bus_src_p) = { "cpll_aclk_bus", "gpll_aclk_bus" };
+
+PNAME(mux_pll_src_cpll_gpll_p) = { "cpll", "gpll" };
+PNAME(mux_pll_src_cpll_gpll_npll_p) = { "cpll", "gpll", "npll" };
+PNAME(mux_pll_src_npll_cpll_gpll_p) = { "npll", "cpll", "gpll" };
+PNAME(mux_pll_src_cpll_gpll_usb_p) = { "cpll", "gpll", "usbphy_480m" };
+PNAME(mux_pll_src_cpll_gpll_usb_usb_p) = { "cpll", "gpll", "usbphy_480m",
+ "usbphy_480m" };
+PNAME(mux_pll_src_cpll_gpll_usb_npll_p) = { "cpll", "gpll", "usbphy_480m",
+ "npll" };
+PNAME(mux_pll_src_cpll_gpll_npll_npll_p) = { "cpll", "gpll", "npll", "npll" };
+PNAME(mux_pll_src_cpll_gpll_npll_usb_p) = { "cpll", "gpll", "npll",
+ "usbphy_480m" };
+
+PNAME(mux_i2s_8ch_pre_p) = { "i2s_8ch_src", "i2s_8ch_frac",
+ "ext_i2s", "xin12m" };
+PNAME(mux_i2s_8ch_clkout_p) = { "i2s_8ch_pre", "xin12m" };
+PNAME(mux_i2s_2ch_p) = { "i2s_2ch_src", "i2s_2ch_frac",
+ "dummy", "xin12m" };
+PNAME(mux_spdif_8ch_p) = { "spdif_8ch_pre", "spdif_8ch_frac",
+ "ext_i2s", "xin12m" };
+PNAME(mux_edp_24m_p) = { "dummy", "xin24m" };
+PNAME(mux_vip_out_p) = { "vip_src", "xin24m" };
+PNAME(mux_usbphy480m_p) = { "usbotg_out", "xin24m" };
+PNAME(mux_hsic_usbphy480m_p) = { "usbotg_out", "dummy" };
+PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy_480m" };
+PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
+PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
+PNAME(mux_uart2_p) = { "uart2_src", "xin24m" };
+PNAME(mux_uart3_p) = { "uart3_src", "uart3_frac", "xin24m" };
+PNAME(mux_uart4_p) = { "uart4_src", "uart4_frac", "xin24m" };
+PNAME(mux_mac_p) = { "mac_pll_src", "ext_gmac" };
+PNAME(mux_mmc_src_p) = { "cpll", "gpll", "usbphy_480m", "xin24m" };
+
+static struct rockchip_pll_clock rk3368_pll_clks[] __initdata = {
+ [apllb] = PLL(pll_rk3066, PLL_APLLB, "apllb", mux_pll_p, 0, RK3368_PLL_CON(0),
+ RK3368_PLL_CON(3), 8, 1, 0, rk3368_pll_rates),
+ [aplll] = PLL(pll_rk3066, PLL_APLLL, "aplll", mux_pll_p, 0, RK3368_PLL_CON(4),
+ RK3368_PLL_CON(7), 8, 0, 0, rk3368_pll_rates),
+ [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK3368_PLL_CON(8),
+ RK3368_PLL_CON(11), 8, 2, 0, NULL),
+ [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK3368_PLL_CON(12),
+ RK3368_PLL_CON(15), 8, 3, ROCKCHIP_PLL_SYNC_RATE, rk3368_pll_rates),
+ [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK3368_PLL_CON(16),
+ RK3368_PLL_CON(19), 8, 4, ROCKCHIP_PLL_SYNC_RATE, rk3368_pll_rates),
+ [npll] = PLL(pll_rk3066, PLL_NPLL, "npll", mux_pll_p, 0, RK3368_PLL_CON(20),
+ RK3368_PLL_CON(23), 8, 5, ROCKCHIP_PLL_SYNC_RATE, rk3368_pll_rates),
+};
+
+static struct clk_div_table div_ddrphy_t[] = {
+ { .val = 0, .div = 1 },
+ { .val = 1, .div = 2 },
+ { .val = 3, .div = 4 },
+ { /* sentinel */ },
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+#define IFLAGS ROCKCHIP_INVERTER_HIWORD_MASK
+
+static const struct rockchip_cpuclk_reg_data rk3368_cpuclkb_data = {
+ .core_reg = RK3368_CLKSEL_CON(0),
+ .div_core_shift = 0,
+ .div_core_mask = 0x1f,
+ .mux_core_shift = 15,
+};
+
+static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
+ .core_reg = RK3368_CLKSEL_CON(2),
+ .div_core_shift = 0,
+ .div_core_mask = 0x1f,
+ .mux_core_shift = 7,
+};
+
+#define RK3368_DIV_ACLKM_MASK 0x1f
+#define RK3368_DIV_ACLKM_SHIFT 8
+#define RK3368_DIV_ATCLK_MASK 0x1f
+#define RK3368_DIV_ATCLK_SHIFT 0
+#define RK3368_DIV_PCLK_DBG_MASK 0x1f
+#define RK3368_DIV_PCLK_DBG_SHIFT 8
+
+#define RK3368_CLKSEL0(_offs, _aclkm) \
+ { \
+ .reg = RK3288_CLKSEL_CON(0 + _offs), \
+ .val = HIWORD_UPDATE(_aclkm, RK3368_DIV_ACLKM_MASK, \
+ RK3368_DIV_ACLKM_SHIFT), \
+ }
+#define RK3368_CLKSEL1(_offs, _atclk, _pdbg) \
+ { \
+ .reg = RK3288_CLKSEL_CON(1 + _offs), \
+ .val = HIWORD_UPDATE(_atclk, RK3368_DIV_ATCLK_MASK, \
+ RK3368_DIV_ATCLK_SHIFT) | \
+ HIWORD_UPDATE(_pdbg, RK3368_DIV_PCLK_DBG_MASK, \
+ RK3368_DIV_PCLK_DBG_SHIFT), \
+ }
+
+/* cluster_b: aclkm in clksel0, rest in clksel1 */
+#define RK3368_CPUCLKB_RATE(_prate, _aclkm, _atclk, _pdbg) \
+ { \
+ .prate = _prate, \
+ .divs = { \
+ RK3368_CLKSEL0(0, _aclkm), \
+ RK3368_CLKSEL1(0, _atclk, _pdbg), \
+ }, \
+ }
+
+/* cluster_l: aclkm in clksel2, rest in clksel3 */
+#define RK3368_CPUCLKL_RATE(_prate, _aclkm, _atclk, _pdbg) \
+ { \
+ .prate = _prate, \
+ .divs = { \
+ RK3368_CLKSEL0(2, _aclkm), \
+ RK3368_CLKSEL1(2, _atclk, _pdbg), \
+ }, \
+ }
+
+static struct rockchip_cpuclk_rate_table rk3368_cpuclkb_rates[] __initdata = {
+ RK3368_CPUCLKB_RATE(1512000000, 2, 6, 6),
+ RK3368_CPUCLKB_RATE(1488000000, 2, 5, 5),
+ RK3368_CPUCLKB_RATE(1416000000, 2, 5, 5),
+ RK3368_CPUCLKB_RATE(1200000000, 2, 4, 4),
+ RK3368_CPUCLKB_RATE(1008000000, 2, 4, 4),
+ RK3368_CPUCLKB_RATE( 816000000, 2, 3, 3),
+ RK3368_CPUCLKB_RATE( 696000000, 2, 3, 3),
+ RK3368_CPUCLKB_RATE( 600000000, 2, 2, 2),
+ RK3368_CPUCLKB_RATE( 408000000, 2, 2, 2),
+ RK3368_CPUCLKB_RATE( 312000000, 2, 2, 2),
+};
+
+static struct rockchip_cpuclk_rate_table rk3368_cpuclkl_rates[] __initdata = {
+ RK3368_CPUCLKL_RATE(1512000000, 2, 7, 7),
+ RK3368_CPUCLKL_RATE(1488000000, 2, 6, 6),
+ RK3368_CPUCLKL_RATE(1416000000, 2, 6, 6),
+ RK3368_CPUCLKL_RATE(1200000000, 2, 5, 5),
+ RK3368_CPUCLKL_RATE(1008000000, 2, 5, 5),
+ RK3368_CPUCLKL_RATE( 816000000, 2, 4, 4),
+ RK3368_CPUCLKL_RATE( 696000000, 2, 3, 3),
+ RK3368_CPUCLKL_RATE( 600000000, 2, 3, 3),
+ RK3368_CPUCLKL_RATE( 408000000, 2, 2, 2),
+ RK3368_CPUCLKL_RATE( 312000000, 2, 2, 2),
+};
+
+static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
+ /*
+ * Clock-Architecture Diagram 2
+ */
+
+ MUX(SCLK_USBPHY480M, "usbphy_480m", mux_usbphy480m_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(13), 8, 1, MFLAGS),
+
+ GATE(0, "apllb_core", "apllb", CLK_IGNORE_UNUSED,
+ RK3368_CLKGATE_CON(0), 0, GFLAGS),
+ GATE(0, "gpllb_core", "gpll", CLK_IGNORE_UNUSED,
+ RK3368_CLKGATE_CON(0), 1, GFLAGS),
+
+ GATE(0, "aplll_core", "aplll", CLK_IGNORE_UNUSED,
+ RK3368_CLKGATE_CON(0), 4, GFLAGS),
+ GATE(0, "gplll_core", "gpll", CLK_IGNORE_UNUSED,
+ RK3368_CLKGATE_CON(0), 5, GFLAGS),
+
+ DIV(0, "aclkm_core_b", "armclkb", 0,
+ RK3368_CLKSEL_CON(0), 8, 5, DFLAGS | CLK_DIVIDER_READ_ONLY),
+ DIV(0, "atclk_core_b", "armclkb", 0,
+ RK3368_CLKSEL_CON(1), 0, 5, DFLAGS | CLK_DIVIDER_READ_ONLY),
+ DIV(0, "pclk_dbg_b", "armclkb", 0,
+ RK3368_CLKSEL_CON(1), 8, 5, DFLAGS | CLK_DIVIDER_READ_ONLY),
+
+ DIV(0, "aclkm_core_l", "armclkl", 0,
+ RK3368_CLKSEL_CON(2), 8, 5, DFLAGS | CLK_DIVIDER_READ_ONLY),
+ DIV(0, "atclk_core_l", "armclkl", 0,
+ RK3368_CLKSEL_CON(3), 0, 5, DFLAGS | CLK_DIVIDER_READ_ONLY),
+ DIV(0, "pclk_dbg_l", "armclkl", 0,
+ RK3368_CLKSEL_CON(3), 8, 5, DFLAGS | CLK_DIVIDER_READ_ONLY),
+
+ GATE(0, "apllb_cs", "apllb", CLK_IGNORE_UNUSED,
+ RK3368_CLKGATE_CON(0), 9, GFLAGS),
+ GATE(0, "aplll_cs", "aplll", CLK_IGNORE_UNUSED,
+ RK3368_CLKGATE_CON(0), 10, GFLAGS),
+ GATE(0, "gpll_cs", "gpll", CLK_IGNORE_UNUSED,
+ RK3368_CLKGATE_CON(0), 8, GFLAGS),
+ COMPOSITE_NOGATE(0, "sclk_cs_pre", mux_cs_src_p, CLK_IGNORE_UNUSED,
+ RK3368_CLKSEL_CON(4), 6, 2, MFLAGS, 0, 5, DFLAGS),
+ COMPOSITE_NOMUX(0, "clkin_trace", "sclk_cs_pre", CLK_IGNORE_UNUSED,
+ RK3368_CLKSEL_CON(4), 8, 5, DFLAGS,
+ RK3368_CLKGATE_CON(0), 13, GFLAGS),
+
+ COMPOSITE(0, "aclk_cci_pre", mux_pll_src_cpll_gpll_usb_npll_p, CLK_IGNORE_UNUSED,
+ RK3368_CLKSEL_CON(5), 6, 2, MFLAGS, 0, 7, DFLAGS,
+ RK3368_CLKGATE_CON(0), 12, GFLAGS),
+ GATE(SCLK_PVTM_CORE, "sclk_pvtm_core", "xin24m", 0, RK3368_CLKGATE_CON(7), 10, GFLAGS),
+
+ GATE(0, "dpll_ddr", "dpll", CLK_IGNORE_UNUSED,
+ RK3368_CLKGATE_CON(1), 8, GFLAGS),
+ GATE(0, "gpll_ddr", "gpll", 0,
+ RK3368_CLKGATE_CON(1), 9, GFLAGS),
+ COMPOSITE_NOGATE_DIVTBL(0, "ddrphy_src", mux_ddrphy_p, CLK_IGNORE_UNUSED,
+ RK3368_CLKSEL_CON(13), 4, 1, MFLAGS, 0, 2, DFLAGS, div_ddrphy_t),
+
+ GATE(0, "sclk_ddr", "ddrphy_div4", CLK_IGNORE_UNUSED,
+ RK3368_CLKGATE_CON(6), 14, GFLAGS),
+ GATE(0, "sclk_ddr4x", "ddrphy_src", CLK_IGNORE_UNUSED,
+ RK3368_CLKGATE_CON(6), 15, GFLAGS),
+
+ GATE(0, "gpll_aclk_bus", "gpll", CLK_IGNORE_UNUSED,
+ RK3368_CLKGATE_CON(1), 10, GFLAGS),
+ GATE(0, "cpll_aclk_bus", "cpll", CLK_IGNORE_UNUSED,
+ RK3368_CLKGATE_CON(1), 11, GFLAGS),
+ COMPOSITE_NOGATE(0, "aclk_bus_src", mux_aclk_bus_src_p, CLK_IGNORE_UNUSED,
+ RK3368_CLKSEL_CON(8), 7, 1, MFLAGS, 0, 5, DFLAGS),
+
+ GATE(ACLK_BUS, "aclk_bus", "aclk_bus_src", CLK_IGNORE_UNUSED,
+ RK3368_CLKGATE_CON(1), 0, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_BUS, "pclk_bus", "aclk_bus_src", CLK_IGNORE_UNUSED,
+ RK3368_CLKSEL_CON(8), 12, 3, DFLAGS,
+ RK3368_CLKGATE_CON(1), 2, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_BUS, "hclk_bus", "aclk_bus_src", CLK_IGNORE_UNUSED,
+ RK3368_CLKSEL_CON(8), 8, 2, DFLAGS,
+ RK3368_CLKGATE_CON(1), 1, GFLAGS),
+ COMPOSITE_NOMUX(0, "sclk_crypto", "aclk_bus_src", 0,
+ RK3368_CLKSEL_CON(10), 14, 2, DFLAGS,
+ RK3368_CLKGATE_CON(7), 2, GFLAGS),
+
+ COMPOSITE(0, "fclk_mcu_src", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED,
+ RK3368_CLKSEL_CON(12), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3368_CLKGATE_CON(1), 3, GFLAGS),
+ /*
+ * stclk_mcu is listed as child of fclk_mcu_src in diagram 5,
+ * but stclk_mcu has an additional own divider in diagram 2
+ */
+ COMPOSITE_NOMUX(0, "stclk_mcu", "fclk_mcu_src", 0,
+ RK3368_CLKSEL_CON(12), 8, 3, DFLAGS,
+ RK3368_CLKGATE_CON(13), 13, GFLAGS),
+
+ COMPOSITE(0, "i2s_8ch_src", mux_pll_src_cpll_gpll_p, 0,
+ RK3368_CLKSEL_CON(27), 12, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3368_CLKGATE_CON(6), 1, GFLAGS),
+ COMPOSITE_FRAC(0, "i2s_8ch_frac", "i2s_8ch_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(28), 0,
+ RK3368_CLKGATE_CON(6), 2, GFLAGS),
+ MUX(0, "i2s_8ch_pre", mux_i2s_8ch_pre_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(27), 8, 2, MFLAGS),
+ COMPOSITE_NODIV(SCLK_I2S_8CH_OUT, "i2s_8ch_clkout", mux_i2s_8ch_clkout_p, 0,
+ RK3368_CLKSEL_CON(27), 15, 1, MFLAGS,
+ RK3368_CLKGATE_CON(6), 0, GFLAGS),
+ GATE(SCLK_I2S_8CH, "sclk_i2s_8ch", "i2s_8ch_pre", CLK_SET_RATE_PARENT,
+ RK3368_CLKGATE_CON(6), 3, GFLAGS),
+ COMPOSITE(0, "spdif_8ch_src", mux_pll_src_cpll_gpll_p, 0,
+ RK3368_CLKSEL_CON(31), 12, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3368_CLKGATE_CON(6), 4, GFLAGS),
+ COMPOSITE_FRAC(0, "spdif_8ch_frac", "spdif_8ch_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(32), 0,
+ RK3368_CLKGATE_CON(6), 5, GFLAGS),
+ COMPOSITE_NODIV(SCLK_SPDIF_8CH, "sclk_spdif_8ch", mux_spdif_8ch_p, 0,
+ RK3368_CLKSEL_CON(31), 8, 2, MFLAGS,
+ RK3368_CLKGATE_CON(6), 6, GFLAGS),
+ COMPOSITE(0, "i2s_2ch_src", mux_pll_src_cpll_gpll_p, 0,
+ RK3368_CLKSEL_CON(53), 12, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3368_CLKGATE_CON(5), 13, GFLAGS),
+ COMPOSITE_FRAC(0, "i2s_2ch_frac", "i2s_2ch_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(54), 0,
+ RK3368_CLKGATE_CON(5), 14, GFLAGS),
+ COMPOSITE_NODIV(SCLK_I2S_2CH, "sclk_i2s_2ch", mux_i2s_2ch_p, 0,
+ RK3368_CLKSEL_CON(53), 8, 2, MFLAGS,
+ RK3368_CLKGATE_CON(5), 15, GFLAGS),
+
+ COMPOSITE(0, "sclk_tsp", mux_pll_src_cpll_gpll_npll_p, 0,
+ RK3368_CLKSEL_CON(46), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3368_CLKGATE_CON(6), 12, GFLAGS),
+ GATE(0, "sclk_hsadc_tsp", "ext_hsadc_tsp", 0,
+ RK3368_CLKGATE_CON(13), 7, GFLAGS),
+
+ MUX(0, "uart_src", mux_pll_src_cpll_gpll_p, 0,
+ RK3368_CLKSEL_CON(35), 12, 1, MFLAGS),
+ COMPOSITE_NOMUX(0, "uart2_src", "uart_src", 0,
+ RK3368_CLKSEL_CON(37), 0, 7, DFLAGS,
+ RK3368_CLKGATE_CON(2), 4, GFLAGS),
+ MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(37), 8, 1, MFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 3
+ */
+
+ COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_usb_p, 0,
+ RK3368_CLKSEL_CON(15), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3368_CLKGATE_CON(4), 6, GFLAGS),
+ COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb_p, 0,
+ RK3368_CLKSEL_CON(15), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK3368_CLKGATE_CON(4), 7, GFLAGS),
+
+ /*
+ * We introduce a virtual node of hclk_vodec_pre_v to split one clock
+ * struct with a gate and a fix divider into two node in software.
+ */
+ GATE(0, "hclk_video_pre_v", "aclk_vdpu", 0,
+ RK3368_CLKGATE_CON(4), 8, GFLAGS),
+
+ COMPOSITE(0, "sclk_hevc_cabac_src", mux_pll_src_cpll_gpll_npll_usb_p, 0,
+ RK3368_CLKSEL_CON(17), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3368_CLKGATE_CON(5), 1, GFLAGS),
+ COMPOSITE(0, "sclk_hevc_core_src", mux_pll_src_cpll_gpll_npll_usb_p, 0,
+ RK3368_CLKSEL_CON(17), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK3368_CLKGATE_CON(5), 2, GFLAGS),
+
+ COMPOSITE(0, "aclk_vio0", mux_pll_src_cpll_gpll_usb_p, CLK_IGNORE_UNUSED,
+ RK3368_CLKSEL_CON(19), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3368_CLKGATE_CON(4), 0, GFLAGS),
+ DIV(0, "hclk_vio", "aclk_vio0", 0,
+ RK3368_CLKSEL_CON(21), 0, 5, DFLAGS),
+
+ COMPOSITE(0, "aclk_rga_pre", mux_pll_src_cpll_gpll_usb_p, 0,
+ RK3368_CLKSEL_CON(18), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK3368_CLKGATE_CON(4), 3, GFLAGS),
+ COMPOSITE(SCLK_RGA, "sclk_rga", mux_pll_src_cpll_gpll_usb_p, 0,
+ RK3368_CLKSEL_CON(18), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3368_CLKGATE_CON(4), 4, GFLAGS),
+
+ COMPOSITE(DCLK_VOP, "dclk_vop", mux_pll_src_cpll_gpll_npll_p, 0,
+ RK3368_CLKSEL_CON(20), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3368_CLKGATE_CON(4), 1, GFLAGS),
+
+ GATE(SCLK_VOP0_PWM, "sclk_vop0_pwm", "xin24m", 0,
+ RK3368_CLKGATE_CON(4), 2, GFLAGS),
+
+ COMPOSITE(SCLK_ISP, "sclk_isp", mux_pll_src_cpll_gpll_npll_npll_p, 0,
+ RK3368_CLKSEL_CON(22), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3368_CLKGATE_CON(4), 9, GFLAGS),
+
+ GATE(0, "pclk_isp_in", "ext_isp", 0,
+ RK3368_CLKGATE_CON(17), 2, GFLAGS),
+ INVERTER(PCLK_ISP, "pclk_isp", "pclk_isp_in",
+ RK3368_CLKSEL_CON(21), 6, IFLAGS),
+
+ GATE(0, "pclk_vip_in", "ext_vip", 0,
+ RK3368_CLKGATE_CON(16), 13, GFLAGS),
+ INVERTER(PCLK_VIP, "pclk_vip", "pclk_vip_in",
+ RK3368_CLKSEL_CON(21), 13, IFLAGS),
+
+ GATE(SCLK_HDMI_HDCP, "sclk_hdmi_hdcp", "xin24m", 0,
+ RK3368_CLKGATE_CON(4), 13, GFLAGS),
+ GATE(SCLK_HDMI_CEC, "sclk_hdmi_cec", "xin32k", 0,
+ RK3368_CLKGATE_CON(5), 12, GFLAGS),
+
+ COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0,
+ RK3368_CLKSEL_CON(21), 15, 1, MFLAGS,
+ RK3368_CLKGATE_CON(4), 5, GFLAGS),
+ COMPOSITE_NOGATE(0, "sclk_vip_out", mux_vip_out_p, 0,
+ RK3368_CLKSEL_CON(21), 14, 1, MFLAGS, 8, 5, DFLAGS),
+
+ COMPOSITE_NODIV(SCLK_EDP_24M, "sclk_edp_24m", mux_edp_24m_p, 0,
+ RK3368_CLKSEL_CON(23), 8, 1, MFLAGS,
+ RK3368_CLKGATE_CON(5), 4, GFLAGS),
+ COMPOSITE(SCLK_EDP, "sclk_edp", mux_pll_src_cpll_gpll_npll_npll_p, 0,
+ RK3368_CLKSEL_CON(23), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3368_CLKGATE_CON(5), 3, GFLAGS),
+
+ COMPOSITE(SCLK_HDCP, "sclk_hdcp", mux_pll_src_cpll_gpll_npll_npll_p, 0,
+ RK3368_CLKSEL_CON(55), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3368_CLKGATE_CON(5), 5, GFLAGS),
+
+ DIV(0, "pclk_pd_alive", "gpll", 0,
+ RK3368_CLKSEL_CON(10), 8, 5, DFLAGS),
+
+ /* sclk_timer has a gate in the sgrf */
+
+ COMPOSITE_NOMUX(0, "pclk_pd_pmu", "gpll", CLK_IGNORE_UNUSED,
+ RK3368_CLKSEL_CON(10), 0, 5, DFLAGS,
+ RK3368_CLKGATE_CON(7), 9, GFLAGS),
+ GATE(SCLK_PVTM_PMU, "sclk_pvtm_pmu", "xin24m", 0,
+ RK3368_CLKGATE_CON(7), 3, GFLAGS),
+ COMPOSITE(0, "sclk_gpu_core_src", mux_pll_src_cpll_gpll_usb_npll_p, 0,
+ RK3368_CLKSEL_CON(14), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3368_CLKGATE_CON(4), 11, GFLAGS),
+ MUX(0, "aclk_gpu_src", mux_pll_src_cpll_gpll_p, 0,
+ RK3368_CLKSEL_CON(14), 14, 1, MFLAGS),
+ COMPOSITE_NOMUX(0, "aclk_gpu_mem_pre", "aclk_gpu_src", 0,
+ RK3368_CLKSEL_CON(14), 8, 5, DFLAGS,
+ RK3368_CLKGATE_CON(5), 8, GFLAGS),
+ COMPOSITE_NOMUX(0, "aclk_gpu_cfg_pre", "aclk_gpu_src", 0,
+ RK3368_CLKSEL_CON(16), 8, 5, DFLAGS,
+ RK3368_CLKGATE_CON(5), 9, GFLAGS),
+ GATE(SCLK_PVTM_GPU, "sclk_pvtm_gpu", "xin24m", 0,
+ RK3368_CLKGATE_CON(7), 11, GFLAGS),
+
+ COMPOSITE(0, "aclk_peri_src", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED,
+ RK3368_CLKSEL_CON(9), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3368_CLKGATE_CON(3), 0, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "aclk_peri_src", 0,
+ RK3368_CLKSEL_CON(9), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+ RK3368_CLKGATE_CON(3), 3, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_src", CLK_IGNORE_UNUSED,
+ RK3368_CLKSEL_CON(9), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+ RK3368_CLKGATE_CON(3), 2, GFLAGS),
+ GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", CLK_IGNORE_UNUSED,
+ RK3368_CLKGATE_CON(3), 1, GFLAGS),
+
+ GATE(0, "sclk_mipidsi_24m", "xin24m", 0, RK3368_CLKGATE_CON(4), 14, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 4
+ */
+
+ COMPOSITE(SCLK_SPI0, "sclk_spi0", mux_pll_src_cpll_gpll_p, 0,
+ RK3368_CLKSEL_CON(45), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3368_CLKGATE_CON(3), 7, GFLAGS),
+ COMPOSITE(SCLK_SPI1, "sclk_spi1", mux_pll_src_cpll_gpll_p, 0,
+ RK3368_CLKSEL_CON(45), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RK3368_CLKGATE_CON(3), 8, GFLAGS),
+ COMPOSITE(SCLK_SPI2, "sclk_spi2", mux_pll_src_cpll_gpll_p, 0,
+ RK3368_CLKSEL_CON(46), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RK3368_CLKGATE_CON(3), 9, GFLAGS),
+
+
+ COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0,
+ RK3368_CLKSEL_CON(50), 8, 2, MFLAGS, 0, 7, DFLAGS,
+ RK3368_CLKGATE_CON(7), 12, GFLAGS),
+ COMPOSITE(SCLK_SDIO0, "sclk_sdio0", mux_mmc_src_p, 0,
+ RK3368_CLKSEL_CON(48), 8, 2, MFLAGS, 0, 7, DFLAGS,
+ RK3368_CLKGATE_CON(7), 13, GFLAGS),
+ COMPOSITE(SCLK_EMMC, "sclk_emmc", mux_mmc_src_p, 0,
+ RK3368_CLKSEL_CON(51), 8, 2, MFLAGS, 0, 7, DFLAGS,
+ RK3368_CLKGATE_CON(7), 15, GFLAGS),
+
+ MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", RK3368_SDMMC_CON0, 1),
+ MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3368_SDMMC_CON1, 0),
+
+ MMC(SCLK_SDIO0_DRV, "sdio0_drv", "sclk_sdio0", RK3368_SDIO0_CON0, 1),
+ MMC(SCLK_SDIO0_SAMPLE, "sdio0_sample", "sclk_sdio0", RK3368_SDIO0_CON1, 0),
+
+ MMC(SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc", RK3368_EMMC_CON0, 1),
+ MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK3368_EMMC_CON1, 0),
+
+ GATE(SCLK_OTGPHY0, "sclk_otgphy0", "xin24m", CLK_IGNORE_UNUSED,
+ RK3368_CLKGATE_CON(8), 1, GFLAGS),
+
+ /* pmu_grf_soc_con0[6] allows to select between xin32k and pvtm_pmu */
+ GATE(SCLK_OTG_ADP, "sclk_otg_adp", "xin32k", CLK_IGNORE_UNUSED,
+ RK3368_CLKGATE_CON(8), 4, GFLAGS),
+
+ /* pmu_grf_soc_con0[6] allows to select between xin32k and pvtm_pmu */
+ COMPOSITE_NOMUX(SCLK_TSADC, "sclk_tsadc", "xin32k", 0,
+ RK3368_CLKSEL_CON(25), 0, 6, DFLAGS,
+ RK3368_CLKGATE_CON(3), 5, GFLAGS),
+
+ COMPOSITE_NOMUX(SCLK_SARADC, "sclk_saradc", "xin24m", 0,
+ RK3368_CLKSEL_CON(25), 8, 8, DFLAGS,
+ RK3368_CLKGATE_CON(3), 6, GFLAGS),
+
+ COMPOSITE(SCLK_NANDC0, "sclk_nandc0", mux_pll_src_cpll_gpll_p, 0,
+ RK3368_CLKSEL_CON(47), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3368_CLKGATE_CON(7), 8, GFLAGS),
+
+ COMPOSITE(SCLK_SFC, "sclk_sfc", mux_pll_src_cpll_gpll_p, 0,
+ RK3368_CLKSEL_CON(52), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3368_CLKGATE_CON(6), 7, GFLAGS),
+
+ COMPOSITE(0, "uart0_src", mux_pll_src_cpll_gpll_usb_usb_p, 0,
+ RK3368_CLKSEL_CON(33), 12, 2, MFLAGS, 0, 7, DFLAGS,
+ RK3368_CLKGATE_CON(2), 0, GFLAGS),
+ COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(34), 0,
+ RK3368_CLKGATE_CON(2), 1, GFLAGS),
+ MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(33), 8, 2, MFLAGS),
+
+ COMPOSITE_NOMUX(0, "uart1_src", "uart_src", 0,
+ RK3368_CLKSEL_CON(35), 0, 7, DFLAGS,
+ RK3368_CLKGATE_CON(2), 2, GFLAGS),
+ COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(36), 0,
+ RK3368_CLKGATE_CON(2), 3, GFLAGS),
+ MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(35), 8, 2, MFLAGS),
+
+ COMPOSITE_NOMUX(0, "uart3_src", "uart_src", 0,
+ RK3368_CLKSEL_CON(39), 0, 7, DFLAGS,
+ RK3368_CLKGATE_CON(2), 6, GFLAGS),
+ COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(40), 0,
+ RK3368_CLKGATE_CON(2), 7, GFLAGS),
+ MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(39), 8, 2, MFLAGS),
+
+ COMPOSITE_NOMUX(0, "uart4_src", "uart_src", 0,
+ RK3368_CLKSEL_CON(41), 0, 7, DFLAGS,
+ RK3368_CLKGATE_CON(2), 8, GFLAGS),
+ COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(42), 0,
+ RK3368_CLKGATE_CON(2), 9, GFLAGS),
+ MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(41), 8, 2, MFLAGS),
+
+ COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0,
+ RK3368_CLKSEL_CON(43), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3368_CLKGATE_CON(3), 4, GFLAGS),
+ MUX(SCLK_MAC, "mac_clk", mux_mac_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(43), 8, 1, MFLAGS),
+ GATE(SCLK_MACREF_OUT, "sclk_macref_out", "mac_clk", 0,
+ RK3368_CLKGATE_CON(7), 7, GFLAGS),
+ GATE(SCLK_MACREF, "sclk_macref", "mac_clk", 0,
+ RK3368_CLKGATE_CON(7), 6, GFLAGS),
+ GATE(SCLK_MAC_RX, "sclk_mac_rx", "mac_clk", 0,
+ RK3368_CLKGATE_CON(7), 4, GFLAGS),
+ GATE(SCLK_MAC_TX, "sclk_mac_tx", "mac_clk", 0,
+ RK3368_CLKGATE_CON(7), 5, GFLAGS),
+
+ GATE(0, "jtag", "ext_jtag", 0,
+ RK3368_CLKGATE_CON(7), 0, GFLAGS),
+
+ COMPOSITE_NODIV(0, "hsic_usbphy_480m", mux_hsic_usbphy480m_p, 0,
+ RK3368_CLKSEL_CON(26), 8, 2, MFLAGS,
+ RK3368_CLKGATE_CON(8), 0, GFLAGS),
+ COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
+ RK3368_CLKSEL_CON(26), 12, 2, MFLAGS,
+ RK3368_CLKGATE_CON(8), 7, GFLAGS),
+ GATE(SCLK_HSICPHY12M, "sclk_hsicphy12m", "xin12m", 0,
+ RK3368_CLKGATE_CON(8), 6, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 5
+ */
+
+ /* aclk_cci_pre gates */
+ GATE(0, "aclk_core_niu_cpup", "aclk_cci_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 4, GFLAGS),
+ GATE(0, "aclk_core_niu_cci", "aclk_cci_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 3, GFLAGS),
+ GATE(0, "aclk_cci400", "aclk_cci_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 2, GFLAGS),
+ GATE(0, "aclk_adb400m_pd_core_b", "aclk_cci_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 1, GFLAGS),
+ GATE(0, "aclk_adb400m_pd_core_l", "aclk_cci_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 0, GFLAGS),
+
+ /* aclkm_core_* gates */
+ GATE(0, "aclk_adb400s_pd_core_b", "aclkm_core_b", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(10), 0, GFLAGS),
+ GATE(0, "aclk_adb400s_pd_core_l", "aclkm_core_l", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(9), 0, GFLAGS),
+
+ /* armclk* gates */
+ GATE(0, "sclk_dbg_pd_core_b", "armclkb", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(10), 1, GFLAGS),
+ GATE(0, "sclk_dbg_pd_core_l", "armclkl", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(9), 1, GFLAGS),
+
+ /* sclk_cs_pre gates */
+ GATE(0, "sclk_dbg", "sclk_cs_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 7, GFLAGS),
+ GATE(0, "pclk_core_niu_sdbg", "sclk_cs_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 6, GFLAGS),
+ GATE(0, "hclk_core_niu_dbg", "sclk_cs_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 5, GFLAGS),
+
+ /* aclk_bus gates */
+ GATE(0, "aclk_strc_sys", "aclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(12), 12, GFLAGS),
+ GATE(ACLK_DMAC_BUS, "aclk_dmac_bus", "aclk_bus", 0, RK3368_CLKGATE_CON(12), 11, GFLAGS),
+ GATE(0, "sclk_intmem1", "aclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(12), 6, GFLAGS),
+ GATE(0, "sclk_intmem0", "aclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(12), 5, GFLAGS),
+ GATE(0, "aclk_intmem", "aclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(12), 4, GFLAGS),
+ GATE(0, "aclk_gic400", "aclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(13), 9, GFLAGS),
+
+ /* sclk_ddr gates */
+ GATE(0, "nclk_ddrupctl", "sclk_ddr", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(13), 2, GFLAGS),
+
+ /* clk_hsadc_tsp is part of diagram2 */
+
+ /* fclk_mcu_src gates */
+ GATE(0, "hclk_noc_mcu", "fclk_mcu_src", 0, RK3368_CLKGATE_CON(13), 14, GFLAGS),
+ GATE(0, "fclk_mcu", "fclk_mcu_src", 0, RK3368_CLKGATE_CON(13), 12, GFLAGS),
+ GATE(0, "hclk_mcu", "fclk_mcu_src", 0, RK3368_CLKGATE_CON(13), 11, GFLAGS),
+
+ /* hclk_cpu gates */
+ GATE(HCLK_SPDIF, "hclk_spdif", "hclk_bus", 0, RK3368_CLKGATE_CON(12), 10, GFLAGS),
+ GATE(HCLK_ROM, "hclk_rom", "hclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(12), 9, GFLAGS),
+ GATE(HCLK_I2S_2CH, "hclk_i2s_2ch", "hclk_bus", 0, RK3368_CLKGATE_CON(12), 8, GFLAGS),
+ GATE(HCLK_I2S_8CH, "hclk_i2s_8ch", "hclk_bus", 0, RK3368_CLKGATE_CON(12), 7, GFLAGS),
+ GATE(HCLK_TSP, "hclk_tsp", "hclk_bus", 0, RK3368_CLKGATE_CON(13), 10, GFLAGS),
+ GATE(HCLK_CRYPTO, "hclk_crypto", "hclk_bus", 0, RK3368_CLKGATE_CON(13), 4, GFLAGS),
+ GATE(MCLK_CRYPTO, "mclk_crypto", "hclk_bus", 0, RK3368_CLKGATE_CON(13), 3, GFLAGS),
+
+ /* pclk_cpu gates */
+ GATE(PCLK_DDRPHY, "pclk_ddrphy", "pclk_bus", 0, RK3368_CLKGATE_CON(12), 14, GFLAGS),
+ GATE(PCLK_DDRUPCTL, "pclk_ddrupctl", "pclk_bus", 0, RK3368_CLKGATE_CON(12), 13, GFLAGS),
+ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_bus", 0, RK3368_CLKGATE_CON(12), 3, GFLAGS),
+ GATE(PCLK_I2C0, "pclk_i2c0", "pclk_bus", 0, RK3368_CLKGATE_CON(12), 2, GFLAGS),
+ GATE(PCLK_MAILBOX, "pclk_mailbox", "pclk_bus", 0, RK3368_CLKGATE_CON(12), 1, GFLAGS),
+ GATE(PCLK_PWM0, "pclk_pwm0", "pclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(12), 0, GFLAGS),
+ GATE(PCLK_SIM, "pclk_sim", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 8, GFLAGS),
+ GATE(PCLK_PWM1, "pclk_pwm1", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 6, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 5, GFLAGS),
+ GATE(0, "pclk_efuse_256", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 1, GFLAGS),
+ GATE(0, "pclk_efuse_1024", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 0, GFLAGS),
+
+ /*
+ * video clk gates
+ * aclk_video(_pre) can actually select between parents of aclk_vdpu
+ * and aclk_vepu by setting bit GRF_SOC_CON0[7].
+ */
+ GATE(ACLK_VIDEO, "aclk_video", "aclk_vdpu", 0, RK3368_CLKGATE_CON(15), 0, GFLAGS),
+ GATE(SCLK_HEVC_CABAC, "sclk_hevc_cabac", "sclk_hevc_cabac_src", 0, RK3368_CLKGATE_CON(15), 3, GFLAGS),
+ GATE(SCLK_HEVC_CORE, "sclk_hevc_core", "sclk_hevc_core_src", 0, RK3368_CLKGATE_CON(15), 2, GFLAGS),
+ GATE(HCLK_VIDEO, "hclk_video", "hclk_video_pre", 0, RK3368_CLKGATE_CON(15), 1, GFLAGS),
+
+ /* aclk_rga_pre gates */
+ GATE(ACLK_VIO1_NOC, "aclk_vio1_noc", "aclk_rga_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(16), 10, GFLAGS),
+ GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 0, RK3368_CLKGATE_CON(16), 0, GFLAGS),
+ GATE(ACLK_HDCP, "aclk_hdcp", "aclk_rga_pre", 0, RK3368_CLKGATE_CON(17), 10, GFLAGS),
+
+ /* aclk_vio0 gates */
+ GATE(ACLK_VIP, "aclk_vip", "aclk_vio0", 0, RK3368_CLKGATE_CON(16), 11, GFLAGS),
+ GATE(ACLK_VIO0_NOC, "aclk_vio0_noc", "aclk_vio0", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(16), 9, GFLAGS),
+ GATE(ACLK_VOP, "aclk_vop", "aclk_vio0", 0, RK3368_CLKGATE_CON(16), 5, GFLAGS),
+ GATE(ACLK_VOP_IEP, "aclk_vop_iep", "aclk_vio0", 0, RK3368_CLKGATE_CON(16), 4, GFLAGS),
+ GATE(ACLK_IEP, "aclk_iep", "aclk_vio0", 0, RK3368_CLKGATE_CON(16), 2, GFLAGS),
+
+ /* sclk_isp gates */
+ GATE(HCLK_ISP, "hclk_isp", "sclk_isp", 0, RK3368_CLKGATE_CON(16), 14, GFLAGS),
+ GATE(ACLK_ISP, "aclk_isp", "sclk_isp", 0, RK3368_CLKGATE_CON(17), 0, GFLAGS),
+
+ /* hclk_vio gates */
+ GATE(HCLK_VIP, "hclk_vip", "hclk_vio", 0, RK3368_CLKGATE_CON(16), 12, GFLAGS),
+ GATE(HCLK_VIO_NOC, "hclk_vio_noc", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(16), 8, GFLAGS),
+ GATE(HCLK_VIO_AHB_ARBI, "hclk_vio_ahb_arbi", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(16), 7, GFLAGS),
+ GATE(HCLK_VOP, "hclk_vop", "hclk_vio", 0, RK3368_CLKGATE_CON(16), 6, GFLAGS),
+ GATE(HCLK_IEP, "hclk_iep", "hclk_vio", 0, RK3368_CLKGATE_CON(16), 3, GFLAGS),
+ GATE(HCLK_RGA, "hclk_rga", "hclk_vio", 0, RK3368_CLKGATE_CON(16), 1, GFLAGS),
+ GATE(HCLK_VIO_HDCPMMU, "hclk_hdcpmmu", "hclk_vio", 0, RK3368_CLKGATE_CON(17), 12, GFLAGS),
+ GATE(HCLK_VIO_H2P, "hclk_vio_h2p", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 7, GFLAGS),
+
+ /*
+ * pclk_vio gates
+ * pclk_vio comes from the exactly same source as hclk_vio
+ */
+ GATE(PCLK_HDCP, "pclk_hdcp", "hclk_vio", 0, RK3368_CLKGATE_CON(17), 11, GFLAGS),
+ GATE(PCLK_EDP_CTRL, "pclk_edp_ctrl", "hclk_vio", 0, RK3368_CLKGATE_CON(17), 9, GFLAGS),
+ GATE(PCLK_VIO_H2P, "pclk_vio_h2p", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 8, GFLAGS),
+ GATE(PCLK_HDMI_CTRL, "pclk_hdmi_ctrl", "hclk_vio", 0, RK3368_CLKGATE_CON(17), 6, GFLAGS),
+ GATE(PCLK_MIPI_CSI, "pclk_mipi_csi", "hclk_vio", 0, RK3368_CLKGATE_CON(17), 4, GFLAGS),
+ GATE(PCLK_MIPI_DSI0, "pclk_mipi_dsi0", "hclk_vio", 0, RK3368_CLKGATE_CON(17), 3, GFLAGS),
+
+ /* ext_vip gates in diagram3 */
+
+ /* gpu gates */
+ GATE(SCLK_GPU_CORE, "sclk_gpu_core", "sclk_gpu_core_src", 0, RK3368_CLKGATE_CON(18), 2, GFLAGS),
+ GATE(ACLK_GPU_MEM, "aclk_gpu_mem", "aclk_gpu_mem_pre", 0, RK3368_CLKGATE_CON(18), 1, GFLAGS),
+ GATE(ACLK_GPU_CFG, "aclk_gpu_cfg", "aclk_gpu_cfg_pre", 0, RK3368_CLKGATE_CON(18), 0, GFLAGS),
+
+ /* aclk_peri gates */
+ GATE(ACLK_DMAC_PERI, "aclk_dmac_peri", "aclk_peri", 0, RK3368_CLKGATE_CON(19), 3, GFLAGS),
+ GATE(0, "aclk_peri_axi_matrix", "aclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(19), 2, GFLAGS),
+ GATE(HCLK_SFC, "hclk_sfc", "aclk_peri", 0, RK3368_CLKGATE_CON(20), 15, GFLAGS),
+ GATE(ACLK_GMAC, "aclk_gmac", "aclk_peri", 0, RK3368_CLKGATE_CON(20), 13, GFLAGS),
+ GATE(0, "aclk_peri_niu", "aclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 8, GFLAGS),
+ GATE(ACLK_PERI_MMU, "aclk_peri_mmu", "aclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(21), 4, GFLAGS),
+
+ /* hclk_peri gates */
+ GATE(0, "hclk_peri_axi_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(19), 0, GFLAGS),
+ GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK3368_CLKGATE_CON(20), 11, GFLAGS),
+ GATE(0, "hclk_mmc_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 10, GFLAGS),
+ GATE(0, "hclk_emem_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 9, GFLAGS),
+ GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 7, GFLAGS),
+ GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 6, GFLAGS),
+ GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK3368_CLKGATE_CON(20), 5, GFLAGS),
+ GATE(HCLK_HOST1, "hclk_host1", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 4, GFLAGS),
+ GATE(HCLK_HOST0, "hclk_host0", "hclk_peri", 0, RK3368_CLKGATE_CON(20), 3, GFLAGS),
+ GATE(0, "pmu_hclk_otg0", "hclk_peri", 0, RK3368_CLKGATE_CON(20), 2, GFLAGS),
+ GATE(HCLK_OTG0, "hclk_otg0", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 1, GFLAGS),
+ GATE(HCLK_HSADC, "hclk_hsadc", "hclk_peri", 0, RK3368_CLKGATE_CON(21), 3, GFLAGS),
+ GATE(HCLK_EMMC, "hclk_emmc", "hclk_peri", 0, RK3368_CLKGATE_CON(21), 2, GFLAGS),
+ GATE(HCLK_SDIO0, "hclk_sdio0", "hclk_peri", 0, RK3368_CLKGATE_CON(21), 1, GFLAGS),
+ GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK3368_CLKGATE_CON(21), 0, GFLAGS),
+
+ /* pclk_peri gates */
+ GATE(PCLK_SARADC, "pclk_saradc", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 15, GFLAGS),
+ GATE(PCLK_I2C5, "pclk_i2c5", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 14, GFLAGS),
+ GATE(PCLK_I2C4, "pclk_i2c4", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 13, GFLAGS),
+ GATE(PCLK_I2C3, "pclk_i2c3", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 12, GFLAGS),
+ GATE(PCLK_I2C2, "pclk_i2c2", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 11, GFLAGS),
+ GATE(PCLK_UART4, "pclk_uart4", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 10, GFLAGS),
+ GATE(PCLK_UART3, "pclk_uart3", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 9, GFLAGS),
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 8, GFLAGS),
+ GATE(PCLK_UART0, "pclk_uart0", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 7, GFLAGS),
+ GATE(PCLK_SPI2, "pclk_spi2", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 6, GFLAGS),
+ GATE(PCLK_SPI1, "pclk_spi1", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 5, GFLAGS),
+ GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 4, GFLAGS),
+ GATE(0, "pclk_peri_axi_matrix", "pclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(19), 1, GFLAGS),
+ GATE(PCLK_GMAC, "pclk_gmac", "pclk_peri", 0, RK3368_CLKGATE_CON(20), 14, GFLAGS),
+ GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3368_CLKGATE_CON(20), 0, GFLAGS),
+
+ /* pclk_pd_alive gates */
+ GATE(PCLK_TIMER1, "pclk_timer1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 8, GFLAGS),
+ GATE(PCLK_TIMER0, "pclk_timer0", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 7, GFLAGS),
+ GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 12, GFLAGS),
+ GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 11, GFLAGS),
+ GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 3, GFLAGS),
+ GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 2, GFLAGS),
+ GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 1, GFLAGS),
+
+ /*
+ * pclk_vio gates
+ * pclk_vio comes from the exactly same source as hclk_vio
+ */
+ GATE(0, "pclk_dphyrx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS),
+ GATE(0, "pclk_dphytx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS),
+
+ /* pclk_pd_pmu gates */
+ GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 0, GFLAGS),
+ GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3368_CLKGATE_CON(17), 4, GFLAGS),
+ GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 3, GFLAGS),
+ GATE(0, "pclk_pmu_noc", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS),
+ GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 1, GFLAGS),
+ GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS),
+
+ /* timer gates */
+ GATE(0, "sclk_timer15", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 11, GFLAGS),
+ GATE(0, "sclk_timer14", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 10, GFLAGS),
+ GATE(0, "sclk_timer13", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 9, GFLAGS),
+ GATE(0, "sclk_timer12", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 8, GFLAGS),
+ GATE(0, "sclk_timer11", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 7, GFLAGS),
+ GATE(0, "sclk_timer10", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 6, GFLAGS),
+ GATE(0, "sclk_timer05", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 5, GFLAGS),
+ GATE(0, "sclk_timer04", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 4, GFLAGS),
+ GATE(0, "sclk_timer03", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 3, GFLAGS),
+ GATE(0, "sclk_timer02", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 2, GFLAGS),
+ GATE(0, "sclk_timer01", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 1, GFLAGS),
+ GATE(0, "sclk_timer00", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 0, GFLAGS),
+};
+
+static void __init rk3368_clk_init(struct device_node *np)
+{
+ void __iomem *reg_base;
+ struct clk *clk;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru region\n", __func__);
+ return;
+ }
+
+ rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+
+ /* xin12m is created by a cru-internal divider */
+ clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
+ if (IS_ERR(clk))
+ pr_warn("%s: could not register clock xin12m: %ld\n",
+ __func__, PTR_ERR(clk));
+
+ /* ddrphy_div4 is created by a cru-internal divider */
+ clk = clk_register_fixed_factor(NULL, "ddrphy_div4", "ddrphy_src", 0, 1, 4);
+ if (IS_ERR(clk))
+ pr_warn("%s: could not register clock xin12m: %ld\n",
+ __func__, PTR_ERR(clk));
+
+ clk = clk_register_fixed_factor(NULL, "hclk_video_pre",
+ "hclk_video_pre_v", 0, 1, 4);
+ if (IS_ERR(clk))
+ pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n",
+ __func__, PTR_ERR(clk));
+
+ /* Watchdog pclk is controlled by sgrf_soc_con3[7]. */
+ clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1);
+ if (IS_ERR(clk))
+ pr_warn("%s: could not register clock pclk_wdt: %ld\n",
+ __func__, PTR_ERR(clk));
+ else
+ rockchip_clk_add_lookup(clk, PCLK_WDT);
+
+ rockchip_clk_register_plls(rk3368_pll_clks,
+ ARRAY_SIZE(rk3368_pll_clks),
+ RK3368_GRF_SOC_STATUS0);
+ rockchip_clk_register_branches(rk3368_clk_branches,
+ ARRAY_SIZE(rk3368_clk_branches));
+
+ rockchip_clk_register_armclk(ARMCLKB, "armclkb",
+ mux_armclkb_p, ARRAY_SIZE(mux_armclkb_p),
+ &rk3368_cpuclkb_data, rk3368_cpuclkb_rates,
+ ARRAY_SIZE(rk3368_cpuclkb_rates));
+
+ rockchip_clk_register_armclk(ARMCLKL, "armclkl",
+ mux_armclkl_p, ARRAY_SIZE(mux_armclkl_p),
+ &rk3368_cpuclkl_data, rk3368_cpuclkl_rates,
+ ARRAY_SIZE(rk3368_cpuclkl_rates));
+
+ rockchip_register_softrst(np, 15, reg_base + RK3368_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+
+ rockchip_register_restart_notifier(RK3368_GLB_SRST_FST);
+}
+CLK_OF_DECLARE(rk3368_cru, "rockchip,rk3368-cru", rk3368_clk_init);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 052b94db0ff9..24938815655f 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -277,6 +277,13 @@ void __init rockchip_clk_register_branches(
list->div_shift
);
break;
+ case branch_inverter:
+ clk = rockchip_clk_register_inverter(
+ list->name, list->parent_names,
+ list->num_parents,
+ reg_base + list->muxdiv_offset,
+ list->div_shift, list->div_flags, &clk_lock);
+ break;
}
/* none of the cases above matched */
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index 6b092673048a..dc8ecb2673b7 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -24,29 +24,29 @@
#define CLK_ROCKCHIP_CLK_H
#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
+
+struct clk;
#define HIWORD_UPDATE(val, mask, shift) \
((val) << (shift) | (mask) << ((shift) + 16))
/* register positions shared by RK2928, RK3066 and RK3188 */
-#define RK2928_PLL_CON(x) (x * 0x4)
+#define RK2928_PLL_CON(x) ((x) * 0x4)
#define RK2928_MODE_CON 0x40
-#define RK2928_CLKSEL_CON(x) (x * 0x4 + 0x44)
-#define RK2928_CLKGATE_CON(x) (x * 0x4 + 0xd0)
+#define RK2928_CLKSEL_CON(x) ((x) * 0x4 + 0x44)
+#define RK2928_CLKGATE_CON(x) ((x) * 0x4 + 0xd0)
#define RK2928_GLB_SRST_FST 0x100
#define RK2928_GLB_SRST_SND 0x104
-#define RK2928_SOFTRST_CON(x) (x * 0x4 + 0x110)
+#define RK2928_SOFTRST_CON(x) ((x) * 0x4 + 0x110)
#define RK2928_MISC_CON 0x134
#define RK3288_PLL_CON(x) RK2928_PLL_CON(x)
#define RK3288_MODE_CON 0x50
-#define RK3288_CLKSEL_CON(x) (x * 0x4 + 0x60)
-#define RK3288_CLKGATE_CON(x) (x * 0x4 + 0x160)
+#define RK3288_CLKSEL_CON(x) ((x) * 0x4 + 0x60)
+#define RK3288_CLKGATE_CON(x) ((x) * 0x4 + 0x160)
#define RK3288_GLB_SRST_FST 0x1b0
#define RK3288_GLB_SRST_SND 0x1b4
-#define RK3288_SOFTRST_CON(x) (x * 0x4 + 0x1b8)
+#define RK3288_SOFTRST_CON(x) ((x) * 0x4 + 0x1b8)
#define RK3288_MISC_CON 0x1e8
#define RK3288_SDMMC_CON0 0x200
#define RK3288_SDMMC_CON1 0x204
@@ -57,6 +57,22 @@
#define RK3288_EMMC_CON0 0x218
#define RK3288_EMMC_CON1 0x21c
+#define RK3368_PLL_CON(x) RK2928_PLL_CON(x)
+#define RK3368_CLKSEL_CON(x) ((x) * 0x4 + 0x100)
+#define RK3368_CLKGATE_CON(x) ((x) * 0x4 + 0x200)
+#define RK3368_GLB_SRST_FST 0x280
+#define RK3368_GLB_SRST_SND 0x284
+#define RK3368_SOFTRST_CON(x) ((x) * 0x4 + 0x300)
+#define RK3368_MISC_CON 0x380
+#define RK3368_SDMMC_CON0 0x400
+#define RK3368_SDMMC_CON1 0x404
+#define RK3368_SDIO0_CON0 0x408
+#define RK3368_SDIO0_CON1 0x40c
+#define RK3368_SDIO1_CON0 0x410
+#define RK3368_SDIO1_CON1 0x414
+#define RK3368_EMMC_CON0 0x418
+#define RK3368_EMMC_CON1 0x41c
+
enum rockchip_pll_type {
pll_rk3066,
};
@@ -67,16 +83,16 @@ enum rockchip_pll_type {
.nr = _nr, \
.nf = _nf, \
.no = _no, \
- .bwadj = (_nf >> 1), \
+ .nb = ((_nf) < 2) ? 1 : (_nf) >> 1, \
}
-#define RK3066_PLL_RATE_BWADJ(_rate, _nr, _nf, _no, _bw) \
+#define RK3066_PLL_RATE_NB(_rate, _nr, _nf, _no, _nb) \
{ \
.rate = _rate##U, \
.nr = _nr, \
.nf = _nf, \
.no = _no, \
- .bwadj = _bw, \
+ .nb = _nb, \
}
struct rockchip_pll_rate_table {
@@ -84,7 +100,7 @@ struct rockchip_pll_rate_table {
unsigned int nr;
unsigned int nf;
unsigned int no;
- unsigned int bwadj;
+ unsigned int nb;
};
/**
@@ -182,6 +198,13 @@ struct clk *rockchip_clk_register_mmc(const char *name,
const char *const *parent_names, u8 num_parents,
void __iomem *reg, int shift);
+#define ROCKCHIP_INVERTER_HIWORD_MASK BIT(0)
+
+struct clk *rockchip_clk_register_inverter(const char *name,
+ const char *const *parent_names, u8 num_parents,
+ void __iomem *reg, int shift, int flags,
+ spinlock_t *lock);
+
#define PNAME(x) static const char *const x[] __initconst
enum rockchip_clk_branch_type {
@@ -191,6 +214,7 @@ enum rockchip_clk_branch_type {
branch_fraction_divider,
branch_gate,
branch_mmc,
+ branch_inverter,
};
struct rockchip_clk_branch {
@@ -308,6 +332,26 @@ struct rockchip_clk_branch {
.gate_offset = -1, \
}
+#define COMPOSITE_NOGATE_DIVTBL(_id, cname, pnames, f, mo, ms, \
+ mw, mf, ds, dw, df, dt) \
+ { \
+ .id = _id, \
+ .branch_type = branch_composite, \
+ .name = cname, \
+ .parent_names = pnames, \
+ .num_parents = ARRAY_SIZE(pnames), \
+ .flags = f, \
+ .muxdiv_offset = mo, \
+ .mux_shift = ms, \
+ .mux_width = mw, \
+ .mux_flags = mf, \
+ .div_shift = ds, \
+ .div_width = dw, \
+ .div_flags = df, \
+ .div_table = dt, \
+ .gate_offset = -1, \
+ }
+
#define COMPOSITE_FRAC(_id, cname, pname, f, mo, df, go, gs, gf)\
{ \
.id = _id, \
@@ -394,6 +438,18 @@ struct rockchip_clk_branch {
.div_shift = shift, \
}
+#define INVERTER(_id, cname, pname, io, is, if) \
+ { \
+ .id = _id, \
+ .branch_type = branch_inverter, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .muxdiv_offset = io, \
+ .div_shift = is, \
+ .div_flags = if, \
+ }
+
void rockchip_clk_init(struct device_node *np, void __iomem *base,
unsigned long nr_clks);
struct regmap *rockchip_clk_get_grf(void);
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index 3a1fe07cfe9e..7c1e1f58e2da 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -33,6 +33,9 @@
*/
#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include "clk-cpu.h"
#define E4210_SRC_CPU 0x0
@@ -97,8 +100,8 @@ static void wait_until_mux_stable(void __iomem *mux_reg, u32 mux_pos,
static long exynos_cpuclk_round_rate(struct clk_hw *hw,
unsigned long drate, unsigned long *prate)
{
- struct clk *parent = __clk_get_parent(hw->clk);
- *prate = __clk_round_rate(parent, drate);
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+ *prate = clk_hw_round_rate(parent, drate);
return *prate;
}
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 454b02ae486a..4e9584d79089 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -9,8 +9,9 @@
* Common Clock Framework support for Audio Subsystem Clock Controller.
*/
-#include <linux/clkdev.h>
+#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 03a52228b6d1..7cd02ff37a1f 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -9,8 +9,8 @@
* Clock driver for Exynos clock output
*/
+#include <linux/slab.h>
#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index 538de66a759e..fdd41b17a24f 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -8,8 +8,6 @@
* Common Clock Framework support for Exynos3250 SoC.
*/
-#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -19,6 +17,7 @@
#include <dt-bindings/clock/exynos3250.h>
#include "clk.h"
+#include "clk-cpu.h"
#include "clk-pll.h"
#define SRC_LEFTBUS 0x4200
@@ -319,8 +318,10 @@ static struct samsung_mux_clock mux_clks[] __initdata = {
MUX(CLK_MOUT_MPLL_USER_C, "mout_mpll_user_c", mout_mpll_user_p,
SRC_CPU, 24, 1),
MUX(CLK_MOUT_HPM, "mout_hpm", mout_hpm_p, SRC_CPU, 20, 1),
- MUX(CLK_MOUT_CORE, "mout_core", mout_core_p, SRC_CPU, 16, 1),
- MUX(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1),
+ MUX_F(CLK_MOUT_CORE, "mout_core", mout_core_p, SRC_CPU, 16, 1,
+ CLK_SET_RATE_PARENT, 0),
+ MUX_F(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
+ CLK_SET_RATE_PARENT, 0),
};
static struct samsung_div_clock div_clks[] __initdata = {
@@ -772,6 +773,26 @@ static struct samsung_cmu_info cmu_info __initdata = {
.nr_clk_regs = ARRAY_SIZE(exynos3250_cmu_clk_regs),
};
+#define E3250_CPU_DIV0(apll, pclk_dbg, atb, corem) \
+ (((apll) << 24) | ((pclk_dbg) << 20) | ((atb) << 16) | \
+ ((corem) << 4))
+#define E3250_CPU_DIV1(hpm, copy) \
+ (((hpm) << 4) | ((copy) << 0))
+
+static const struct exynos_cpuclk_cfg_data e3250_armclk_d[] __initconst = {
+ { 1000000, E3250_CPU_DIV0(1, 7, 4, 1), E3250_CPU_DIV1(7, 7), },
+ { 900000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 800000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 700000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 600000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 500000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 400000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 300000, E3250_CPU_DIV0(1, 5, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 200000, E3250_CPU_DIV0(1, 3, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 100000, E3250_CPU_DIV0(1, 1, 1, 1), E3250_CPU_DIV1(7, 7), },
+ { 0 },
+};
+
static void __init exynos3250_cmu_init(struct device_node *np)
{
struct samsung_clk_provider *ctx;
@@ -780,6 +801,11 @@ static void __init exynos3250_cmu_init(struct device_node *np)
if (!ctx)
return;
+ exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
+ mout_core_p[0], mout_core_p[1], 0x14200,
+ e3250_armclk_d, ARRAY_SIZE(e3250_armclk_d),
+ CLK_CPU_HAS_DIV1);
+
exynos3_core_down_clock(ctx->reg_base);
}
CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init);
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index cae2c048488d..7f370d3e0983 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -11,8 +11,8 @@
*/
#include <dt-bindings/clock/exynos4.h>
+#include <linux/slab.h>
#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -86,6 +86,7 @@
#define DIV_PERIL4 0xc560
#define DIV_PERIL5 0xc564
#define E4X12_DIV_CAM1 0xc568
+#define E4X12_GATE_BUS_FSYS1 0xc744
#define GATE_SCLK_CAM 0xc820
#define GATE_IP_CAM 0xc920
#define GATE_IP_TV 0xc924
@@ -1097,6 +1098,7 @@ static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
0),
GATE(CLK_PPMUIMAGE, "ppmuimage", "aclk200", E4X12_GATE_IP_IMAGE, 9, 0,
0),
+ GATE(CLK_TSADC, "tsadc", "aclk133", E4X12_GATE_BUS_FSYS1, 16, 0, 0),
GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
@@ -1396,6 +1398,45 @@ static const struct exynos_cpuclk_cfg_data e4210_armclk_d[] __initconst = {
{ 0 },
};
+static const struct exynos_cpuclk_cfg_data e4212_armclk_d[] __initconst = {
+ { 1500000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4210_CPU_DIV1(2, 6), },
+ { 1400000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4210_CPU_DIV1(2, 6), },
+ { 1300000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4210_CPU_DIV1(2, 5), },
+ { 1200000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4210_CPU_DIV1(2, 5), },
+ { 1100000, E4210_CPU_DIV0(2, 1, 4, 0, 6, 3), E4210_CPU_DIV1(2, 4), },
+ { 1000000, E4210_CPU_DIV0(1, 1, 4, 0, 5, 2), E4210_CPU_DIV1(2, 4), },
+ { 900000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4210_CPU_DIV1(2, 3), },
+ { 800000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4210_CPU_DIV1(2, 3), },
+ { 700000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
+ { 600000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
+ { 500000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
+ { 400000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
+ { 300000, E4210_CPU_DIV0(1, 1, 2, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
+ { 200000, E4210_CPU_DIV0(1, 1, 1, 0, 3, 1), E4210_CPU_DIV1(2, 3), },
+ { 0 },
+};
+
+#define E4412_CPU_DIV1(cores, hpm, copy) \
+ (((cores) << 8) | ((hpm) << 4) | ((copy) << 0))
+
+static const struct exynos_cpuclk_cfg_data e4412_armclk_d[] __initconst = {
+ { 1500000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4412_CPU_DIV1(7, 0, 6), },
+ { 1400000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4412_CPU_DIV1(6, 0, 6), },
+ { 1300000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4412_CPU_DIV1(6, 0, 5), },
+ { 1200000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4412_CPU_DIV1(5, 0, 5), },
+ { 1100000, E4210_CPU_DIV0(2, 1, 4, 0, 6, 3), E4412_CPU_DIV1(5, 0, 4), },
+ { 1000000, E4210_CPU_DIV0(1, 1, 4, 0, 5, 2), E4412_CPU_DIV1(4, 0, 4), },
+ { 900000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4412_CPU_DIV1(4, 0, 3), },
+ { 800000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4412_CPU_DIV1(3, 0, 3), },
+ { 700000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4412_CPU_DIV1(3, 0, 3), },
+ { 600000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4412_CPU_DIV1(2, 0, 3), },
+ { 500000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4412_CPU_DIV1(2, 0, 3), },
+ { 400000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4412_CPU_DIV1(1, 0, 3), },
+ { 300000, E4210_CPU_DIV0(1, 1, 2, 0, 4, 2), E4412_CPU_DIV1(1, 0, 3), },
+ { 200000, E4210_CPU_DIV0(1, 1, 1, 0, 3, 1), E4412_CPU_DIV1(0, 0, 3), },
+ { 0 },
+};
+
/* register exynos4 clocks */
static void __init exynos4_clk_init(struct device_node *np,
enum exynos4_soc soc)
@@ -1489,6 +1530,17 @@ static void __init exynos4_clk_init(struct device_node *np,
samsung_clk_register_fixed_factor(ctx,
exynos4x12_fixed_factor_clks,
ARRAY_SIZE(exynos4x12_fixed_factor_clks));
+ if (of_machine_is_compatible("samsung,exynos4412")) {
+ exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
+ mout_core_p4x12[0], mout_core_p4x12[1], 0x14200,
+ e4412_armclk_d, ARRAY_SIZE(e4412_armclk_d),
+ CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
+ } else {
+ exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
+ mout_core_p4x12[0], mout_core_p4x12[1], 0x14200,
+ e4212_armclk_d, ARRAY_SIZE(e4212_armclk_d),
+ CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
+ }
}
samsung_clk_register_alias(ctx, exynos4_aliases,
diff --git a/drivers/clk/samsung/clk-exynos4415.c b/drivers/clk/samsung/clk-exynos4415.c
index 6c78b09c829f..92c39f6efec8 100644
--- a/drivers/clk/samsung/clk-exynos4415.c
+++ b/drivers/clk/samsung/clk-exynos4415.c
@@ -9,8 +9,6 @@
* Common Clock Framework support for Exynos4415 SoC.
*/
-#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 70ec3d2608a1..55b83c7ef878 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -11,14 +11,13 @@
*/
#include <dt-bindings/clock/exynos5250.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
#include "clk.h"
+#include "clk-cpu.h"
#define APLL_LOCK 0x0
#define APLL_CON0 0x100
@@ -748,6 +747,32 @@ static struct samsung_pll_clock exynos5250_plls[nr_plls] __initdata = {
VPLL_LOCK, VPLL_CON0, NULL),
};
+#define E5250_CPU_DIV0(apll, pclk_dbg, atb, periph, acp, cpud) \
+ ((((apll) << 24) | ((pclk_dbg) << 20) | ((atb) << 16) | \
+ ((periph) << 12) | ((acp) << 8) | ((cpud) << 4)))
+#define E5250_CPU_DIV1(hpm, copy) \
+ (((hpm) << 4) | (copy))
+
+static const struct exynos_cpuclk_cfg_data exynos5250_armclk_d[] __initconst = {
+ { 1700000, E5250_CPU_DIV0(5, 3, 7, 7, 7, 3), E5250_CPU_DIV1(2, 0), },
+ { 1600000, E5250_CPU_DIV0(4, 1, 7, 7, 7, 3), E5250_CPU_DIV1(2, 0), },
+ { 1500000, E5250_CPU_DIV0(4, 1, 7, 7, 7, 2), E5250_CPU_DIV1(2, 0), },
+ { 1400000, E5250_CPU_DIV0(4, 1, 6, 7, 7, 2), E5250_CPU_DIV1(2, 0), },
+ { 1300000, E5250_CPU_DIV0(3, 1, 6, 7, 7, 2), E5250_CPU_DIV1(2, 0), },
+ { 1200000, E5250_CPU_DIV0(3, 1, 5, 7, 7, 2), E5250_CPU_DIV1(2, 0), },
+ { 1100000, E5250_CPU_DIV0(3, 1, 5, 7, 7, 3), E5250_CPU_DIV1(2, 0), },
+ { 1000000, E5250_CPU_DIV0(2, 1, 4, 7, 7, 1), E5250_CPU_DIV1(2, 0), },
+ { 900000, E5250_CPU_DIV0(2, 1, 4, 7, 7, 1), E5250_CPU_DIV1(2, 0), },
+ { 800000, E5250_CPU_DIV0(2, 1, 4, 7, 7, 1), E5250_CPU_DIV1(2, 0), },
+ { 700000, E5250_CPU_DIV0(1, 1, 3, 7, 7, 1), E5250_CPU_DIV1(2, 0), },
+ { 600000, E5250_CPU_DIV0(1, 1, 3, 7, 7, 1), E5250_CPU_DIV1(2, 0), },
+ { 500000, E5250_CPU_DIV0(1, 1, 2, 7, 7, 1), E5250_CPU_DIV1(2, 0), },
+ { 400000, E5250_CPU_DIV0(1, 1, 2, 7, 7, 1), E5250_CPU_DIV1(2, 0), },
+ { 300000, E5250_CPU_DIV0(1, 1, 1, 7, 7, 1), E5250_CPU_DIV1(2, 0), },
+ { 200000, E5250_CPU_DIV0(1, 1, 1, 7, 7, 1), E5250_CPU_DIV1(2, 0), },
+ { 0 },
+};
+
static const struct of_device_id ext_clk_match[] __initconst = {
{ .compatible = "samsung,clock-xxti", .data = (void *)0, },
{ },
@@ -797,6 +822,10 @@ static void __init exynos5250_clk_init(struct device_node *np)
ARRAY_SIZE(exynos5250_div_clks));
samsung_clk_register_gate(ctx, exynos5250_gate_clks,
ARRAY_SIZE(exynos5250_gate_clks));
+ exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
+ mout_cpu_p[0], mout_cpu_p[1], 0x200,
+ exynos5250_armclk_d, ARRAY_SIZE(exynos5250_armclk_d),
+ CLK_CPU_HAS_DIV1);
/*
* Enable arm clock down (in idle) and set arm divider
diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
index 06f96eb7cf93..d1a29f6c1084 100644
--- a/drivers/clk/samsung/clk-exynos5260.c
+++ b/drivers/clk/samsung/clk-exynos5260.c
@@ -9,8 +9,6 @@
* Common Clock Framework support for Exynos5260 SoC.
*/
-#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos5410.c b/drivers/clk/samsung/clk-exynos5410.c
index 231475bc2b99..d5d5dcabc4a9 100644
--- a/drivers/clk/samsung/clk-exynos5410.c
+++ b/drivers/clk/samsung/clk-exynos5410.c
@@ -11,8 +11,6 @@
#include <dt-bindings/clock/exynos5410.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index a1d731ca8f48..389af3c15ec4 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -11,8 +11,7 @@
*/
#include <dt-bindings/clock/exynos5420.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 39c95649d3d0..cee062c588de 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -9,8 +9,6 @@
* Common Clock Framework support for Exynos5443 SoC.
*/
-#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c
index 979e81389cdd..590813871ffe 100644
--- a/drivers/clk/samsung/clk-exynos5440.c
+++ b/drivers/clk/samsung/clk-exynos5440.c
@@ -10,8 +10,6 @@
*/
#include <dt-bindings/clock/exynos5440.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
index 03d36e847b78..8524e667097e 100644
--- a/drivers/clk/samsung/clk-exynos7.c
+++ b/drivers/clk/samsung/clk-exynos7.c
@@ -8,8 +8,6 @@
*
*/
-#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index bebc61b5fce1..b7dd396100d8 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -12,6 +12,8 @@
#include <linux/errno.h>
#include <linux/hrtimer.h>
#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/clkdev.h>
#include "clk.h"
#include "clk-pll.h"
@@ -180,7 +182,7 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
rate = samsung_get_pll_settings(pll, drate);
if (!rate) {
pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
- drate, __clk_get_name(hw->clk));
+ drate, clk_hw_get_name(hw));
return -EINVAL;
}
@@ -288,7 +290,7 @@ static int samsung_pll36xx_set_rate(struct clk_hw *hw, unsigned long drate,
rate = samsung_get_pll_settings(pll, drate);
if (!rate) {
pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
- drate, __clk_get_name(hw->clk));
+ drate, clk_hw_get_name(hw));
return -EINVAL;
}
@@ -403,7 +405,7 @@ static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate,
rate = samsung_get_pll_settings(pll, drate);
if (!rate) {
pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
- drate, __clk_get_name(hw->clk));
+ drate, clk_hw_get_name(hw));
return -EINVAL;
}
@@ -455,7 +457,7 @@ static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate,
if (ktime_to_ms(delta) > PLL_TIMEOUT_MS) {
pr_err("%s: could not lock PLL %s\n",
- __func__, __clk_get_name(hw->clk));
+ __func__, clk_hw_get_name(hw));
return -EFAULT;
}
@@ -554,7 +556,7 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate,
rate = samsung_get_pll_settings(pll, drate);
if (!rate) {
pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
- drate, __clk_get_name(hw->clk));
+ drate, clk_hw_get_name(hw));
return -EINVAL;
}
@@ -614,7 +616,7 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate,
if (ktime_to_ms(delta) > PLL_TIMEOUT_MS) {
pr_err("%s: could not lock PLL %s\n",
- __func__, __clk_get_name(hw->clk));
+ __func__, clk_hw_get_name(hw));
return -EFAULT;
}
@@ -772,7 +774,7 @@ static int samsung_s3c2410_pll_set_rate(struct clk_hw *hw, unsigned long drate,
rate = samsung_get_pll_settings(pll, drate);
if (!rate) {
pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
- drate, __clk_get_name(hw->clk));
+ drate, clk_hw_get_name(hw));
return -EINVAL;
}
@@ -1013,7 +1015,7 @@ static int samsung_pll2550xx_set_rate(struct clk_hw *hw, unsigned long drate,
rate = samsung_get_pll_settings(pll, drate);
if (!rate) {
pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
- drate, __clk_get_name(hw->clk));
+ drate, clk_hw_get_name(hw));
return -EINVAL;
}
@@ -1111,7 +1113,7 @@ static int samsung_pll2650xx_set_rate(struct clk_hw *hw, unsigned long drate,
rate = samsung_get_pll_settings(pll, drate);
if (!rate) {
pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
- drate, __clk_get_name(hw->clk));
+ drate, clk_hw_get_name(hw));
return -EINVAL;
}
diff --git a/drivers/clk/samsung/clk-s3c2410-dclk.c b/drivers/clk/samsung/clk-s3c2410-dclk.c
index e56df5064889..e9eb935d7616 100644
--- a/drivers/clk/samsung/clk-s3c2410-dclk.c
+++ b/drivers/clk/samsung/clk-s3c2410-dclk.c
@@ -8,6 +8,10 @@
* Common Clock Framework support for s3c24xx external clock output.
*/
+#include <linux/clkdev.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include "clk.h"
@@ -57,7 +61,7 @@ struct s3c24xx_clkout {
static u8 s3c24xx_clkout_get_parent(struct clk_hw *hw)
{
struct s3c24xx_clkout *clkout = to_s3c24xx_clkout(hw);
- int num_parents = __clk_get_num_parents(hw->clk);
+ int num_parents = clk_hw_get_num_parents(hw);
u32 val;
val = readl_relaxed(S3C24XX_MISCCR) >> clkout->shift;
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
index 5d2f03461bc5..0945a8852299 100644
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ b/drivers/clk/samsung/clk-s3c2410.c
@@ -8,8 +8,6 @@
* Common Clock Framework support for S3C2410 and following SoCs.
*/
-#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c
index 2ceedaf8ce18..44d6a9f4f5b2 100644
--- a/drivers/clk/samsung/clk-s3c2412.c
+++ b/drivers/clk/samsung/clk-s3c2412.c
@@ -8,8 +8,6 @@
* Common Clock Framework support for S3C2412 and S3C2413.
*/
-#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index 0c3c182b902a..2c0a1ea3c80c 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -8,8 +8,6 @@
* Common Clock Framework support for S3C2443 and following SoCs.
*/
-#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index 0f590e5550cb..d325ed1e196b 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -8,8 +8,7 @@
* Common Clock Framework support for all S3C64xx SoCs.
*/
-#include <linux/clk.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-s5pv210-audss.c b/drivers/clk/samsung/clk-s5pv210-audss.c
index de4455b75e8a..eefb84b22566 100644
--- a/drivers/clk/samsung/clk-s5pv210-audss.c
+++ b/drivers/clk/samsung/clk-s5pv210-audss.c
@@ -13,8 +13,8 @@
* Driver for Audio Subsystem Clock Controller of S5PV210-compatible SoCs.
*/
-#include <linux/clkdev.h>
#include <linux/io.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c
index cf7e8fa7b624..759aaf342bea 100644
--- a/drivers/clk/samsung/clk-s5pv210.c
+++ b/drivers/clk/samsung/clk-s5pv210.c
@@ -11,8 +11,6 @@
* Common Clock Framework support for all S5PC110/S5PV210 SoCs.
*/
-#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -828,6 +826,8 @@ static void __init __s5pv210_clk_init(struct device_node *np,
s5pv210_clk_sleep_init();
+ samsung_clk_of_add_provider(np, ctx);
+
pr_info("%s clocks: mout_apll = %ld, mout_mpll = %ld\n"
"\tmout_epll = %ld, mout_vpll = %ld\n",
is_s5p6442 ? "S5P6442" : "S5PV210",
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index 0117238391d6..f38a6c49f744 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -11,6 +11,10 @@
* clock framework for Samsung platforms.
*/
+#include <linux/slab.h>
+#include <linux/clkdev.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index b775fc29caa5..aa872d2c5105 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -13,10 +13,11 @@
#ifndef __SAMSUNG_CLK_H
#define __SAMSUNG_CLK_H
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include "clk-pll.h"
+struct clk;
+
/**
* struct samsung_clk_provider: information about clock provider
* @reg_base: virtual address for the register base.
diff --git a/drivers/clk/shmobile/clk-div6.c b/drivers/clk/shmobile/clk-div6.c
index 036a692c7219..b4c8d6746f68 100644
--- a/drivers/clk/shmobile/clk-div6.c
+++ b/drivers/clk/shmobile/clk-div6.c
@@ -11,12 +11,12 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/slab.h>
#define CPG_DIV6_CKSTP BIT(8)
#define CPG_DIV6_DIV(d) ((d) & 0x3f)
@@ -133,13 +133,13 @@ static u8 cpg_div6_clock_get_parent(struct clk_hw *hw)
hw_index = (clk_readl(clock->reg) >> clock->src_shift) &
(BIT(clock->src_width) - 1);
- for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
if (clock->parents[i] == hw_index)
return i;
}
pr_err("%s: %s DIV6 clock set to invalid parent %u\n",
- __func__, __clk_get_name(hw->clk), hw_index);
+ __func__, clk_hw_get_name(hw), hw_index);
return 0;
}
@@ -149,7 +149,7 @@ static int cpg_div6_clock_set_parent(struct clk_hw *hw, u8 index)
u8 hw_index;
u32 mask;
- if (index >= __clk_get_num_parents(hw->clk))
+ if (index >= clk_hw_get_num_parents(hw))
return -EINVAL;
mask = ~((BIT(clock->src_width) - 1) << clock->src_shift);
diff --git a/drivers/clk/shmobile/clk-emev2.c b/drivers/clk/shmobile/clk-emev2.c
index 5b60beb7d0eb..a91825471c79 100644
--- a/drivers/clk/shmobile/clk-emev2.c
+++ b/drivers/clk/shmobile/clk-emev2.c
@@ -28,6 +28,8 @@
#define USIBU1_RSTCTRL 0x0ac
#define USIBU2_RSTCTRL 0x0b0
#define USIBU3_RSTCTRL 0x0b4
+#define IIC0_RSTCTRL 0x0dc
+#define IIC1_RSTCTRL 0x0e0
#define STI_RSTCTRL 0x124
#define STI_CLKSEL 0x688
@@ -66,6 +68,10 @@ static void __init emev2_smu_init(void)
emev2_smu_write(2, USIBU1_RSTCTRL);
emev2_smu_write(2, USIBU2_RSTCTRL);
emev2_smu_write(2, USIBU3_RSTCTRL);
+
+ /* deassert reset for IIC0->IIC1 */
+ emev2_smu_write(1, IIC0_RSTCTRL);
+ emev2_smu_write(1, IIC1_RSTCTRL);
}
static void __init emev2_smu_clkdiv_init(struct device_node *np)
diff --git a/drivers/clk/shmobile/clk-mstp.c b/drivers/clk/shmobile/clk-mstp.c
index 2d2fe773ac81..b1df7b2f1e97 100644
--- a/drivers/clk/shmobile/clk-mstp.c
+++ b/drivers/clk/shmobile/clk-mstp.c
@@ -2,6 +2,7 @@
* R-Car MSTP clocks
*
* Copyright (C) 2013 Ideas On Board SPRL
+ * Copyright (C) 2015 Glider bvba
*
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
@@ -10,11 +11,16 @@
* the Free Software Foundation; version 2 of the License.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/clk/shmobile.h>
+#include <linux/device.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_domain.h>
#include <linux/spinlock.h>
/*
@@ -236,3 +242,84 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
of_clk_add_provider(np, of_clk_src_onecell_get, &group->data);
}
CLK_OF_DECLARE(cpg_mstp_clks, "renesas,cpg-mstp-clocks", cpg_mstp_clocks_init);
+
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+int cpg_mstp_attach_dev(struct generic_pm_domain *domain, struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args clkspec;
+ struct clk *clk;
+ int i = 0;
+ int error;
+
+ while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
+ &clkspec)) {
+ if (of_device_is_compatible(clkspec.np,
+ "renesas,cpg-mstp-clocks"))
+ goto found;
+
+ of_node_put(clkspec.np);
+ i++;
+ }
+
+ return 0;
+
+found:
+ clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ error = pm_clk_create(dev);
+ if (error) {
+ dev_err(dev, "pm_clk_create failed %d\n", error);
+ goto fail_put;
+ }
+
+ error = pm_clk_add_clk(dev, clk);
+ if (error) {
+ dev_err(dev, "pm_clk_add_clk %pC failed %d\n", clk, error);
+ goto fail_destroy;
+ }
+
+ return 0;
+
+fail_destroy:
+ pm_clk_destroy(dev);
+fail_put:
+ clk_put(clk);
+ return error;
+}
+
+void cpg_mstp_detach_dev(struct generic_pm_domain *domain, struct device *dev)
+{
+ if (!list_empty(&dev->power.subsys_data->clock_list))
+ pm_clk_destroy(dev);
+}
+
+void __init cpg_mstp_add_clk_domain(struct device_node *np)
+{
+ struct generic_pm_domain *pd;
+ u32 ncells;
+
+ if (of_property_read_u32(np, "#power-domain-cells", &ncells)) {
+ pr_warn("%s lacks #power-domain-cells\n", np->full_name);
+ return;
+ }
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return;
+
+ pd->name = np->name;
+
+ pd->flags = GENPD_FLAG_PM_CLK;
+ pm_genpd_init(pd, &simple_qos_governor, false);
+ pd->attach_dev = cpg_mstp_attach_dev;
+ pd->detach_dev = cpg_mstp_detach_dev;
+
+ of_genpd_add_provider_simple(np, pd);
+}
+#endif /* !CONFIG_PM_GENERIC_DOMAINS_OF */
diff --git a/drivers/clk/shmobile/clk-r8a73a4.c b/drivers/clk/shmobile/clk-r8a73a4.c
index 29b9a0b0012a..9326204bed9d 100644
--- a/drivers/clk/shmobile/clk-r8a73a4.c
+++ b/drivers/clk/shmobile/clk-r8a73a4.c
@@ -9,10 +9,10 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/clk/shmobile.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/spinlock.h>
diff --git a/drivers/clk/shmobile/clk-r8a7740.c b/drivers/clk/shmobile/clk-r8a7740.c
index 1e2eaae21e01..1e6b1da58065 100644
--- a/drivers/clk/shmobile/clk-r8a7740.c
+++ b/drivers/clk/shmobile/clk-r8a7740.c
@@ -9,10 +9,10 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/clk/shmobile.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/spinlock.h>
diff --git a/drivers/clk/shmobile/clk-r8a7778.c b/drivers/clk/shmobile/clk-r8a7778.c
index cb33b57274bf..87c1d2f2fb57 100644
--- a/drivers/clk/shmobile/clk-r8a7778.c
+++ b/drivers/clk/shmobile/clk-r8a7778.c
@@ -9,9 +9,9 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/clk/shmobile.h>
#include <linux/of_address.h>
+#include <linux/slab.h>
struct r8a7778_cpg {
struct clk_onecell_data data;
@@ -124,6 +124,8 @@ static void __init r8a7778_cpg_clocks_init(struct device_node *np)
}
of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+
+ cpg_mstp_add_clk_domain(np);
}
CLK_OF_DECLARE(r8a7778_cpg_clks, "renesas,r8a7778-cpg-clocks",
diff --git a/drivers/clk/shmobile/clk-r8a7779.c b/drivers/clk/shmobile/clk-r8a7779.c
index 652ecacb6daf..92275c5f2c60 100644
--- a/drivers/clk/shmobile/clk-r8a7779.c
+++ b/drivers/clk/shmobile/clk-r8a7779.c
@@ -11,12 +11,12 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/clk/shmobile.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
#include <dt-bindings/clock/r8a7779-clock.h>
@@ -168,6 +168,8 @@ static void __init r8a7779_cpg_clocks_init(struct device_node *np)
}
of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+
+ cpg_mstp_add_clk_domain(np);
}
CLK_OF_DECLARE(r8a7779_cpg_clks, "renesas,r8a7779-cpg-clocks",
r8a7779_cpg_clocks_init);
diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c
index acfb6d7dbd6b..745496f7ee9c 100644
--- a/drivers/clk/shmobile/clk-rcar-gen2.c
+++ b/drivers/clk/shmobile/clk-rcar-gen2.c
@@ -11,13 +11,13 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/clk/shmobile.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
struct rcar_gen2_cpg {
@@ -415,6 +415,8 @@ static void __init rcar_gen2_cpg_clocks_init(struct device_node *np)
}
of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+
+ cpg_mstp_add_clk_domain(np);
}
CLK_OF_DECLARE(rcar_gen2_cpg_clks, "renesas,rcar-gen2-cpg-clocks",
rcar_gen2_cpg_clocks_init);
diff --git a/drivers/clk/shmobile/clk-rz.c b/drivers/clk/shmobile/clk-rz.c
index 7e68e8630962..9766e3cb595f 100644
--- a/drivers/clk/shmobile/clk-rz.c
+++ b/drivers/clk/shmobile/clk-rz.c
@@ -10,6 +10,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/clk/shmobile.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of.h>
@@ -99,5 +100,7 @@ static void __init rz_cpg_clocks_init(struct device_node *np)
}
of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+
+ cpg_mstp_add_clk_domain(np);
}
CLK_OF_DECLARE(rz_cpg_clks, "renesas,rz-cpg-clocks", rz_cpg_clocks_init);
diff --git a/drivers/clk/shmobile/clk-sh73a0.c b/drivers/clk/shmobile/clk-sh73a0.c
index cd529cfe412f..8966f8bbfd72 100644
--- a/drivers/clk/shmobile/clk-sh73a0.c
+++ b/drivers/clk/shmobile/clk-sh73a0.c
@@ -9,12 +9,12 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/clk/shmobile.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
struct sh73a0_cpg {
diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c
index d63b76ca60c3..c5eaa9d16247 100644
--- a/drivers/clk/sirf/clk-atlas6.c
+++ b/drivers/clk/sirf/clk-atlas6.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/io.h>
-#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/sirf/clk-atlas7.c b/drivers/clk/sirf/clk-atlas7.c
index db8ab691dbf6..a98e21fe773a 100644
--- a/drivers/clk/sirf/clk-atlas7.c
+++ b/drivers/clk/sirf/clk-atlas7.c
@@ -358,6 +358,7 @@ static unsigned long pll_clk_recalc_rate(struct clk_hw *hw,
if (regctrl0 & SIRFSOC_ABPLL_CTRL0_SSEN) {
rate = fin;
rate *= 1 << 24;
+ do_div(rate, nr);
do_div(rate, (256 * ((ssdiv >> ssdepth) << ssdepth)
+ (ssmod << ssdepth)));
} else {
@@ -465,6 +466,9 @@ static struct clk_pll clk_sys3pll = {
* double resolution mode:fout = fin * finc / 2^29
* normal mode:fout = fin * finc / 2^28
*/
+#define DTO_RESL_DOUBLE (1ULL << 29)
+#define DTO_RESL_NORMAL (1ULL << 28)
+
static int dto_clk_is_enabled(struct clk_hw *hw)
{
struct clk_dto *clk = to_dtoclk(hw);
@@ -509,9 +513,9 @@ static unsigned long dto_clk_recalc_rate(struct clk_hw *hw,
rate *= finc;
if (droff & BIT(0))
/* Double resolution off */
- do_div(rate, 1 << 28);
+ do_div(rate, DTO_RESL_NORMAL);
else
- do_div(rate, 1 << 29);
+ do_div(rate, DTO_RESL_DOUBLE);
return rate;
}
@@ -519,11 +523,11 @@ static unsigned long dto_clk_recalc_rate(struct clk_hw *hw,
static long dto_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
- u64 dividend = rate * (1 << 29);
+ u64 dividend = rate * DTO_RESL_DOUBLE;
do_div(dividend, *parent_rate);
dividend *= *parent_rate;
- do_div(dividend, 1 << 29);
+ do_div(dividend, DTO_RESL_DOUBLE);
return dividend;
}
@@ -531,7 +535,7 @@ static long dto_clk_round_rate(struct clk_hw *hw, unsigned long rate,
static int dto_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- u64 dividend = rate * (1 << 29);
+ u64 dividend = rate * DTO_RESL_DOUBLE;
struct clk_dto *clk = to_dtoclk(hw);
do_div(dividend, parent_rate);
@@ -1161,7 +1165,7 @@ static struct atlas7_unit_init_data unit_list[] __initdata = {
{ 122, "spram1_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 0, &leaf6_gate_lock },
{ 123, "spram2_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 1, &leaf6_gate_lock },
{ 124, "coresight_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 2, &leaf6_gate_lock },
- { 125, "thcpum_cpudiv4", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 3, &leaf6_gate_lock },
+ { 125, "coresight_tpiu", "cpum_tpiu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 3, &leaf6_gate_lock },
{ 126, "graphic_gpu", "gpum_gpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 0, &leaf7_gate_lock },
{ 127, "vss_sdr", "gpum_sdr", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 1, &leaf7_gate_lock },
{ 128, "thgpum_nocr", "gpum_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 2, &leaf7_gate_lock },
@@ -1174,9 +1178,13 @@ static struct atlas7_unit_init_data unit_list[] __initdata = {
{ 135, "thbtm_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 7, &leaf8_gate_lock },
{ 136, "btslow", "xinw_fixdiv_btslow", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 25, &root1_gate_lock },
{ 137, "a7ca_btslow", "btslow", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 0, &leaf8_gate_lock },
+ { 138, "pwm_io", "io_mux", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 0, &leaf0_gate_lock },
+ { 139, "pwm_xin", "xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 1, &leaf0_gate_lock },
+ { 140, "pwm_xinw", "xinw", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 2, &leaf0_gate_lock },
+ { 141, "thcgum_sys", "sys_mux", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 3, &leaf0_gate_lock },
};
-static struct clk *atlas7_clks[ARRAY_SIZE(unit_list)];
+static struct clk *atlas7_clks[ARRAY_SIZE(unit_list) + ARRAY_SIZE(mux_list)];
static int unit_clk_is_enabled(struct clk_hw *hw)
{
@@ -1609,6 +1617,7 @@ static void __init atlas7_clk_init(struct device_node *np)
sirfsoc_clk_vbase + mux->mux_offset,
mux->shift, mux->width,
mux->mux_flags, NULL);
+ atlas7_clks[ARRAY_SIZE(unit_list) + i] = clk;
BUG_ON(!clk);
}
@@ -1620,7 +1629,7 @@ static void __init atlas7_clk_init(struct device_node *np)
}
clk_data.clks = atlas7_clks;
- clk_data.clk_num = ARRAY_SIZE(unit_list);
+ clk_data.clk_num = ARRAY_SIZE(unit_list) + ARRAY_SIZE(mux_list);
ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
BUG_ON(ret);
diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c
index 9fc285d784d3..77e1e2491689 100644
--- a/drivers/clk/sirf/clk-common.c
+++ b/drivers/clk/sirf/clk-common.c
@@ -7,6 +7,8 @@
* Licensed under GPLv2 or later.
*/
+#include <linux/clk.h>
+
#define KHZ 1000
#define MHZ (KHZ * KHZ)
@@ -165,10 +167,10 @@ static long cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
* SiRF SoC has not cpu clock control,
* So bypass to it's parent pll.
*/
- struct clk *parent_clk = clk_get_parent(hw->clk);
- struct clk *pll_parent_clk = clk_get_parent(parent_clk);
- unsigned long pll_parent_rate = clk_get_rate(pll_parent_clk);
- return pll_clk_round_rate(__clk_get_hw(parent_clk), rate, &pll_parent_rate);
+ struct clk_hw *parent_clk = clk_hw_get_parent(hw);
+ struct clk_hw *pll_parent_clk = clk_hw_get_parent(parent_clk);
+ unsigned long pll_parent_rate = clk_hw_get_rate(pll_parent_clk);
+ return pll_clk_round_rate(parent_clk, rate, &pll_parent_rate);
}
static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw,
@@ -178,8 +180,8 @@ static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw,
* SiRF SoC has not cpu clock control,
* So return the parent pll rate.
*/
- struct clk *parent_clk = clk_get_parent(hw->clk);
- return __clk_get_rate(parent_clk);
+ struct clk_hw *parent_clk = clk_hw_get_parent(hw);
+ return clk_hw_get_rate(parent_clk);
}
static struct clk_ops std_pll_ops = {
diff --git a/drivers/clk/sirf/clk-prima2.c b/drivers/clk/sirf/clk-prima2.c
index 6968e2ebcd8a..f92c40264342 100644
--- a/drivers/clk/sirf/clk-prima2.c
+++ b/drivers/clk/sirf/clk-prima2.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/io.h>
-#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c
index 83c6780ff4b2..1cebf253e8fd 100644
--- a/drivers/clk/socfpga/clk-gate-a10.c
+++ b/drivers/clk/socfpga/clk-gate-a10.c
@@ -13,6 +13,7 @@
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/slab.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
@@ -38,7 +39,7 @@ static unsigned long socfpga_gate_clk_recalc_rate(struct clk_hw *hwclk,
div = socfpgaclk->fixed_div;
else if (socfpgaclk->div_reg) {
val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
- val &= div_mask(socfpgaclk->width);
+ val &= GENMASK(socfpgaclk->width - 1, 0);
div = (1 << val);
}
diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
index 82449cd76fd7..aa7a6e6a15b6 100644
--- a/drivers/clk/socfpga/clk-gate.c
+++ b/drivers/clk/socfpga/clk-gate.c
@@ -15,8 +15,7 @@
* Based from clk-highbank.c
*
*/
-#include <linux/clk.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
@@ -106,7 +105,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
div = socfpgaclk->fixed_div;
else if (socfpgaclk->div_reg) {
val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
- val &= div_mask(socfpgaclk->width);
+ val &= GENMASK(socfpgaclk->width - 1, 0);
/* Check for GPIO_DB_CLK by its offset */
if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
div = val + 1;
diff --git a/drivers/clk/socfpga/clk-periph-a10.c b/drivers/clk/socfpga/clk-periph-a10.c
index 9d0181b5a6a4..1f397cb72e89 100644
--- a/drivers/clk/socfpga/clk-periph-a10.c
+++ b/drivers/clk/socfpga/clk-periph-a10.c
@@ -13,6 +13,7 @@
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/slab.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -37,7 +38,7 @@ static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk,
div = socfpgaclk->fixed_div;
} else if (socfpgaclk->div_reg) {
div = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
- div &= div_mask(socfpgaclk->width);
+ div &= GENMASK(socfpgaclk->width - 1, 0);
div += 1;
} else {
div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
diff --git a/drivers/clk/socfpga/clk-periph.c b/drivers/clk/socfpga/clk-periph.c
index 83aeaa219d14..52c883ea7706 100644
--- a/drivers/clk/socfpga/clk-periph.c
+++ b/drivers/clk/socfpga/clk-periph.c
@@ -15,8 +15,7 @@
* Based from clk-highbank.c
*
*/
-#include <linux/clk.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -36,7 +35,7 @@ static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk,
} else {
if (socfpgaclk->div_reg) {
val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
- val &= div_mask(socfpgaclk->width);
+ val &= GENMASK(socfpgaclk->width - 1, 0);
parent_rate /= (val + 1);
}
div = ((readl(socfpgaclk->hw.reg) & 0x1ff) + 1);
@@ -45,8 +44,17 @@ static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk,
return parent_rate / div;
}
+static u8 clk_periclk_get_parent(struct clk_hw *hwclk)
+{
+ u32 clk_src;
+
+ clk_src = readl(clk_mgr_base_addr + CLKMGR_DBCTRL);
+ return clk_src & 0x1;
+}
+
static const struct clk_ops periclk_ops = {
.recalc_rate = clk_periclk_recalc_rate,
+ .get_parent = clk_periclk_get_parent,
};
static __init void __socfpga_periph_init(struct device_node *node,
@@ -56,7 +64,7 @@ static __init void __socfpga_periph_init(struct device_node *node,
struct clk *clk;
struct socfpga_periph_clk *periph_clk;
const char *clk_name = node->name;
- const char *parent_name;
+ const char *parent_name[SOCFPGA_MAX_PARENTS];
struct clk_init_data init;
int rc;
u32 fixed_div;
@@ -90,9 +98,10 @@ static __init void __socfpga_periph_init(struct device_node *node,
init.name = clk_name;
init.ops = ops;
init.flags = 0;
- parent_name = of_clk_get_parent_name(node, 0);
- init.parent_names = &parent_name;
- init.num_parents = 1;
+
+ init.num_parents = of_clk_parent_fill(node, parent_name,
+ SOCFPGA_MAX_PARENTS);
+ init.parent_names = parent_name;
periph_clk->hw.hw.init = &init;
diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
index 1178b11babca..402d630bd531 100644
--- a/drivers/clk/socfpga/clk-pll-a10.c
+++ b/drivers/clk/socfpga/clk-pll-a10.c
@@ -13,6 +13,7 @@
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/slab.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/of.h>
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index 8f26b5234947..c7f463172e4b 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -15,8 +15,7 @@
* Based from clk-highbank.c
*
*/
-#include <linux/clk.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/of.h>
diff --git a/drivers/clk/socfpga/clk.h b/drivers/clk/socfpga/clk.h
index 603973ab7e29..814c7247bf73 100644
--- a/drivers/clk/socfpga/clk.h
+++ b/drivers/clk/socfpga/clk.h
@@ -18,16 +18,15 @@
#define __SOCFPGA_CLK_H
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
/* Clock Manager offsets */
#define CLKMGR_CTRL 0x0
#define CLKMGR_BYPASS 0x4
+#define CLKMGR_DBCTRL 0x10
#define CLKMGR_L4SRC 0x70
#define CLKMGR_PERPLL_SRC 0xAC
#define SOCFPGA_MAX_PARENTS 5
-#define div_mask(width) ((1 << (width)) - 1)
#define streq(a, b) (strcmp((a), (b)) == 0)
#define SYSMGR_SDMMC_CTRL_SET(smplsel, drvsel) \
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
index bdfb4421c643..f271c350ef94 100644
--- a/drivers/clk/spear/clk-aux-synth.c
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
index dffd4ce6c8b5..58d678b5b40a 100644
--- a/drivers/clk/spear/clk-frac-synth.c
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
index 1afc18c4effc..1a722e99e76e 100644
--- a/drivers/clk/spear/clk-gpt-synth.c
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
index 1b9b65bca51e..dc21ca4601aa 100644
--- a/drivers/clk/spear/clk-vco-pll.c
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -87,7 +87,7 @@ static long clk_pll_round_rate_index(struct clk_hw *hw, unsigned long drate,
struct clk_pll *pll = to_clk_pll(hw);
unsigned long prev_rate, vco_prev_rate, rate = 0;
unsigned long vco_parent_rate =
- __clk_get_rate(__clk_get_parent(__clk_get_parent(hw->clk)));
+ clk_hw_get_rate(clk_hw_get_parent(clk_hw_get_parent(hw)));
if (!prate) {
pr_err("%s: prate is must for pll clk\n", __func__);
diff --git a/drivers/clk/spear/clk.c b/drivers/clk/spear/clk.c
index 628b6d5ed3d9..157fe099ea6a 100644
--- a/drivers/clk/spear/clk.c
+++ b/drivers/clk/spear/clk.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h
index 931737677dfa..9834944f08b1 100644
--- a/drivers/clk/spear/clk.h
+++ b/drivers/clk/spear/clk.h
@@ -2,7 +2,7 @@
* Clock framework definitions for SPEAr platform
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 4daa5977793a..009bd1410cfa 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -4,14 +4,13 @@
* SPEAr1310 machine clock framework source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
-#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/err.h>
#include <linux/io.h>
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index 5a5c6648308d..9c7abfd951ba 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -4,14 +4,13 @@
* SPEAr1340 machine clock framework source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
-#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/err.h>
#include <linux/io.h>
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index bb5f387774e2..404a55edd613 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -2,7 +2,7 @@
* SPEAr3xx machines clock framework source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index 4f649c9cb094..e24f85cd4300 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -2,14 +2,13 @@
* SPEAr6xx machines clock framework source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
-#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/io.h>
#include <linux/spinlock_types.h>
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 657ca14ba709..bd355ee33766 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -5,6 +5,7 @@
* Author: Maxime Coquelin <maxime.coquelin@st.com> for ST-Microelectronics.
* License terms: GNU General Public License (GPL), version 2 */
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -44,7 +45,7 @@ static int flexgen_enable(struct clk_hw *hw)
clk_gate_ops.enable(fgate_hw);
- pr_debug("%s: flexgen output enabled\n", __clk_get_name(hw->clk));
+ pr_debug("%s: flexgen output enabled\n", clk_hw_get_name(hw));
return 0;
}
@@ -58,7 +59,7 @@ static void flexgen_disable(struct clk_hw *hw)
clk_gate_ops.disable(fgate_hw);
- pr_debug("%s: flexgen output disabled\n", __clk_get_name(hw->clk));
+ pr_debug("%s: flexgen output disabled\n", clk_hw_get_name(hw));
}
static int flexgen_is_enabled(struct clk_hw *hw)
@@ -108,7 +109,7 @@ static long flexgen_round_rate(struct clk_hw *hw, unsigned long rate,
/* Round div according to exact prate and wished rate */
div = clk_best_div(*prate, rate);
- if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+ if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
*prate = rate * div;
return rate;
}
@@ -190,7 +191,7 @@ static struct clk *clk_register_flexgen(const char *name,
init.name = name;
init.ops = &flexgen_ops;
- init.flags = CLK_IS_BASIC | flexgen_flags;
+ init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE | flexgen_flags;
init.parent_names = parent_names;
init.num_parents = num_parents;
@@ -243,7 +244,7 @@ static const char ** __init flexgen_get_parents(struct device_node *np,
int *num_parents)
{
const char **parents;
- int nparents, i;
+ int nparents;
nparents = of_clk_get_parent_count(np);
if (WARN_ON(nparents <= 0))
@@ -253,10 +254,8 @@ static const char ** __init flexgen_get_parents(struct device_node *np,
if (!parents)
return NULL;
- for (i = 0; i < nparents; i++)
- parents[i] = of_clk_get_parent_name(np, i);
+ *num_parents = of_clk_parent_fill(np, parents, nparents);
- *num_parents = nparents;
return parents;
}
@@ -303,6 +302,8 @@ static void __init st_of_flexgen_setup(struct device_node *np)
if (!rlock)
goto err;
+ spin_lock_init(rlock);
+
for (i = 0; i < clk_data->clk_num; i++) {
struct clk *clk;
const char *clk_name;
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index e94197f04b0b..83ccf142ff2a 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/of_address.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include "clkgen.h"
@@ -340,7 +341,7 @@ static const struct clkgen_quadfs_data st_fs660c32_C_407 = {
CLKGEN_FIELD(0x30c, 0xf, 20),
CLKGEN_FIELD(0x310, 0xf, 20) },
.lockstatus_present = true,
- .lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24),
+ .lock_status = CLKGEN_FIELD(0x2f0, 0x1, 24),
.powerup_polarity = 1,
.standby_polarity = 1,
.pll_ops = &st_quadfs_pll_c32_ops,
@@ -489,7 +490,7 @@ static int quadfs_pll_is_enabled(struct clk_hw *hw)
struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
u32 npda = CLKGEN_READ(pll, npda);
- return !!npda;
+ return pll->data->powerup_polarity ? !npda : !!npda;
}
static int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs,
@@ -512,7 +513,7 @@ static unsigned long quadfs_pll_fs660c32_recalc_rate(struct clk_hw *hw,
params.ndiv = CLKGEN_READ(pll, ndiv);
if (clk_fs660c32_vco_get_rate(parent_rate, &params, &rate))
pr_err("%s:%s error calculating rate\n",
- __clk_get_name(hw->clk), __func__);
+ clk_hw_get_name(hw), __func__);
pll->ndiv = params.ndiv;
@@ -557,7 +558,7 @@ static long quadfs_pll_fs660c32_round_rate(struct clk_hw *hw, unsigned long rate
clk_fs660c32_vco_get_rate(*prate, &params, &rate);
pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n",
- __func__, __clk_get_name(hw->clk),
+ __func__, clk_hw_get_name(hw),
rate, (unsigned int)params.sdiv,
(unsigned int)params.mdiv,
(unsigned int)params.pe, (unsigned int)params.nsdiv);
@@ -580,7 +581,7 @@ static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate,
clk_fs660c32_vco_get_rate(parent_rate, &params, &hwrate);
pr_debug("%s: %s new rate %ld [ndiv=0x%x]\n",
- __func__, __clk_get_name(hw->clk),
+ __func__, clk_hw_get_name(hw),
hwrate, (unsigned int)params.ndiv);
if (!hwrate)
@@ -635,7 +636,7 @@ static struct clk * __init st_clk_register_quadfs_pll(
init.name = name;
init.ops = quadfs->pll_ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
init.parent_names = &parent_name;
init.num_parents = 1;
@@ -744,7 +745,7 @@ static int quadfs_fsynth_enable(struct clk_hw *hw)
struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
unsigned long flags = 0;
- pr_debug("%s: %s\n", __func__, __clk_get_name(hw->clk));
+ pr_debug("%s: %s\n", __func__, clk_hw_get_name(hw));
quadfs_fsynth_program_rate(fs);
@@ -769,12 +770,12 @@ static void quadfs_fsynth_disable(struct clk_hw *hw)
struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
unsigned long flags = 0;
- pr_debug("%s: %s\n", __func__, __clk_get_name(hw->clk));
+ pr_debug("%s: %s\n", __func__, clk_hw_get_name(hw));
if (fs->lock)
spin_lock_irqsave(fs->lock, flags);
- CLKGEN_WRITE(fs, nsb[fs->chan], !fs->data->standby_polarity);
+ CLKGEN_WRITE(fs, nsb[fs->chan], fs->data->standby_polarity);
if (fs->lock)
spin_unlock_irqrestore(fs->lock, flags);
@@ -786,7 +787,7 @@ static int quadfs_fsynth_is_enabled(struct clk_hw *hw)
u32 nsb = CLKGEN_READ(fs, nsb[fs->chan]);
pr_debug("%s: %s enable bit = 0x%x\n",
- __func__, __clk_get_name(hw->clk), nsb);
+ __func__, clk_hw_get_name(hw), nsb);
return fs->data->standby_polarity ? !nsb : !!nsb;
}
@@ -945,10 +946,10 @@ static unsigned long quadfs_recalc_rate(struct clk_hw *hw,
if (clk_fs_get_rate(parent_rate, &params, &rate)) {
pr_err("%s:%s error calculating rate\n",
- __clk_get_name(hw->clk), __func__);
+ clk_hw_get_name(hw), __func__);
}
- pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
+ pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate);
return rate;
}
@@ -961,7 +962,7 @@ static long quadfs_round_rate(struct clk_hw *hw, unsigned long rate,
rate = quadfs_find_best_rate(hw, rate, *prate, &params);
pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n",
- __func__, __clk_get_name(hw->clk),
+ __func__, clk_hw_get_name(hw),
rate, (unsigned int)params.sdiv, (unsigned int)params.mdiv,
(unsigned int)params.pe, (unsigned int)params.nsdiv);
@@ -1082,10 +1083,6 @@ static const struct of_device_id quadfs_of_match[] = {
.compatible = "st,stih407-quadfs660-D",
.data = &st_fs660c32_D_407
},
- {
- .compatible = "st,stih407-quadfs660-D",
- .data = (void *)&st_fs660c32_D_407
- },
{}
};
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index 4fbe6e099587..4f7f6c00b219 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/of_address.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
static DEFINE_SPINLOCK(clkgena_divmux_lock);
@@ -24,20 +25,17 @@ static const char ** __init clkgen_mux_get_parents(struct device_node *np,
int *num_parents)
{
const char **parents;
- int nparents, i;
+ int nparents;
nparents = of_clk_get_parent_count(np);
if (WARN_ON(nparents <= 0))
return ERR_PTR(-EINVAL);
- parents = kzalloc(nparents * sizeof(const char *), GFP_KERNEL);
+ parents = kcalloc(nparents, sizeof(const char *), GFP_KERNEL);
if (!parents)
return ERR_PTR(-ENOMEM);
- for (i = 0; i < nparents; i++)
- parents[i] = of_clk_get_parent_name(np, i);
-
- *num_parents = nparents;
+ *num_parents = of_clk_parent_fill(np, parents, nparents);
return parents;
}
@@ -141,7 +139,7 @@ static u8 clkgena_divmux_get_parent(struct clk_hw *hw)
genamux->muxsel = clk_mux_ops.get_parent(mux_hw);
if ((s8)genamux->muxsel < 0) {
pr_debug("%s: %s: Invalid parent, setting to default.\n",
- __func__, __clk_get_name(hw->clk));
+ __func__, clk_hw_get_name(hw));
genamux->muxsel = 0;
}
@@ -215,7 +213,7 @@ static const struct clk_ops clkgena_divmux_ops = {
/**
* clk_register_genamux - register a genamux clock with the clock framework
*/
-static struct clk *clk_register_genamux(const char *name,
+static struct clk * __init clk_register_genamux(const char *name,
const char **parent_names, u8 num_parents,
void __iomem *reg,
const struct clkgena_divmux_data *muxdata,
@@ -237,7 +235,7 @@ static struct clk *clk_register_genamux(const char *name,
init.name = name;
init.ops = &clkgena_divmux_ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
init.parent_names = parent_names;
init.num_parents = num_parents;
@@ -369,11 +367,10 @@ static const struct of_device_id clkgena_divmux_of_match[] = {
{}
};
-static void __iomem * __init clkgen_get_register_base(
- struct device_node *np)
+static void __iomem * __init clkgen_get_register_base(struct device_node *np)
{
struct device_node *pnode;
- void __iomem *reg = NULL;
+ void __iomem *reg;
pnode = of_get_parent(np);
if (!pnode)
@@ -398,7 +395,7 @@ static void __init st_of_clkgena_divmux_setup(struct device_node *np)
if (WARN_ON(!match))
return;
- data = (struct clkgena_divmux_data *)match->data;
+ data = match->data;
reg = clkgen_get_register_base(np);
if (!reg)
@@ -406,18 +403,18 @@ static void __init st_of_clkgena_divmux_setup(struct device_node *np)
parents = clkgen_mux_get_parents(np, &num_parents);
if (IS_ERR(parents))
- return;
+ goto err_parents;
clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
if (!clk_data)
- goto err;
+ goto err_alloc;
clk_data->clk_num = data->num_outputs;
- clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *),
+ clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *),
GFP_KERNEL);
if (!clk_data->clks)
- goto err;
+ goto err_alloc_clks;
for (i = 0; i < clk_data->clk_num; i++) {
struct clk *clk;
@@ -447,11 +444,13 @@ static void __init st_of_clkgena_divmux_setup(struct device_node *np)
of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
return;
err:
- if (clk_data)
- kfree(clk_data->clks);
-
+ kfree(clk_data->clks);
+err_alloc_clks:
kfree(clk_data);
+err_alloc:
kfree(parents);
+err_parents:
+ iounmap(reg);
}
CLK_OF_DECLARE(clkgenadivmux, "st,clkgena-divmux", st_of_clkgena_divmux_setup);
@@ -491,7 +490,7 @@ static void __init st_of_clkgena_prediv_setup(struct device_node *np)
void __iomem *reg;
const char *parent_name, *clk_name;
struct clk *clk;
- struct clkgena_prediv_data *data;
+ const struct clkgena_prediv_data *data;
match = of_match_node(clkgena_prediv_of_match, np);
if (!match) {
@@ -499,7 +498,7 @@ static void __init st_of_clkgena_prediv_setup(struct device_node *np)
return;
}
- data = (struct clkgena_prediv_data *)match->data;
+ data = match->data;
reg = clkgen_get_register_base(np);
if (!reg)
@@ -507,17 +506,18 @@ static void __init st_of_clkgena_prediv_setup(struct device_node *np)
parent_name = of_clk_get_parent_name(np, 0);
if (!parent_name)
- return;
+ goto err;
if (of_property_read_string_index(np, "clock-output-names",
0, &clk_name))
- return;
+ goto err;
- clk = clk_register_divider_table(NULL, clk_name, parent_name, 0,
+ clk = clk_register_divider_table(NULL, clk_name, parent_name,
+ CLK_GET_RATE_NOCACHE,
reg + data->offset, data->shift, 1,
0, data->table, NULL);
if (IS_ERR(clk))
- return;
+ goto err;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
pr_debug("%s: parent %s rate %u\n",
@@ -526,6 +526,8 @@ static void __init st_of_clkgena_prediv_setup(struct device_node *np)
(unsigned int)clk_get_rate(clk));
return;
+err:
+ iounmap(reg);
}
CLK_OF_DECLARE(clkgenaprediv, "st,clkgena-prediv", st_of_clkgena_prediv_setup);
@@ -582,7 +584,7 @@ static struct clkgen_mux_data stih416_a9_mux_data = {
};
static struct clkgen_mux_data stih407_a9_mux_data = {
.offset = 0x1a4,
- .shift = 1,
+ .shift = 0,
.width = 2,
};
@@ -629,7 +631,7 @@ static void __init st_of_clkgen_mux_setup(struct device_node *np)
void __iomem *reg;
const char **parents;
int num_parents;
- struct clkgen_mux_data *data;
+ const struct clkgen_mux_data *data;
match = of_match_node(mux_of_match, np);
if (!match) {
@@ -637,7 +639,7 @@ static void __init st_of_clkgen_mux_setup(struct device_node *np)
return;
}
- data = (struct clkgen_mux_data *)match->data;
+ data = match->data;
reg = of_iomap(np, 0);
if (!reg) {
@@ -649,7 +651,7 @@ static void __init st_of_clkgen_mux_setup(struct device_node *np)
if (IS_ERR(parents)) {
pr_err("%s: Failed to get parents (%ld)\n",
__func__, PTR_ERR(parents));
- return;
+ goto err_parents;
}
clk = clk_register_mux(NULL, np->name, parents, num_parents,
@@ -665,12 +667,14 @@ static void __init st_of_clkgen_mux_setup(struct device_node *np)
__clk_get_name(clk_get_parent(clk)),
(unsigned int)clk_get_rate(clk));
+ kfree(parents);
of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ return;
err:
kfree(parents);
-
- return;
+err_parents:
+ iounmap(reg);
}
CLK_OF_DECLARE(clkgen_mux, "st,clkgen-mux", st_of_clkgen_mux_setup);
@@ -706,12 +710,12 @@ static void __init st_of_clkgen_vcc_setup(struct device_node *np)
const char **parents;
int num_parents, i;
struct clk_onecell_data *clk_data;
- struct clkgen_vcc_data *data;
+ const struct clkgen_vcc_data *data;
match = of_match_node(vcc_of_match, np);
if (WARN_ON(!match))
return;
- data = (struct clkgen_vcc_data *)match->data;
+ data = match->data;
reg = of_iomap(np, 0);
if (!reg)
@@ -719,18 +723,18 @@ static void __init st_of_clkgen_vcc_setup(struct device_node *np)
parents = clkgen_mux_get_parents(np, &num_parents);
if (IS_ERR(parents))
- return;
+ goto err_parents;
clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
if (!clk_data)
- goto err;
+ goto err_alloc;
clk_data->clk_num = VCC_MAX_CHANNELS;
- clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *),
+ clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *),
GFP_KERNEL);
if (!clk_data->clks)
- goto err;
+ goto err_alloc_clks;
for (i = 0; i < clk_data->clk_num; i++) {
struct clk *clk;
@@ -749,21 +753,21 @@ static void __init st_of_clkgen_vcc_setup(struct device_node *np)
if (*clk_name == '\0')
continue;
- gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
- break;
+ goto err;
- div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL);
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
if (!div) {
kfree(gate);
- break;
+ goto err;
}
- mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux) {
kfree(gate);
kfree(div);
- break;
+ goto err;
}
gate->reg = reg + VCC_GATE_OFFSET;
@@ -786,7 +790,8 @@ static void __init st_of_clkgen_vcc_setup(struct device_node *np)
&mux->hw, &clk_mux_ops,
&div->hw, &clk_divider_ops,
&gate->hw, &clk_gate_ops,
- data->clk_flags);
+ data->clk_flags |
+ CLK_GET_RATE_NOCACHE);
if (IS_ERR(clk)) {
kfree(gate);
kfree(div);
@@ -821,10 +826,12 @@ err:
kfree(container_of(composite->mux_hw, struct clk_mux, hw));
}
- if (clk_data)
- kfree(clk_data->clks);
-
+ kfree(clk_data->clks);
+err_alloc_clks:
kfree(clk_data);
+err_alloc:
kfree(parents);
+err_parents:
+ iounmap(reg);
}
CLK_OF_DECLARE(clkgen_vcc, "st,clkgen-vcc", st_of_clkgen_vcc_setup);
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index 106532207213..47a38a994cac 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/of_address.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include "clkgen.h"
@@ -291,7 +292,7 @@ static unsigned long recalc_stm_pll800c65(struct clk_hw *hw,
res = (uint64_t)2 * (uint64_t)parent_rate * (uint64_t)ndiv;
rate = (unsigned long)div64_u64(res, mdiv * (1 << pdiv));
- pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
+ pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate);
return rate;
@@ -316,7 +317,7 @@ static unsigned long recalc_stm_pll1600c65(struct clk_hw *hw,
/* Note: input is divided by 1000 to avoid overflow */
rate = ((2 * (parent_rate / 1000) * ndiv) / mdiv) * 1000;
- pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
+ pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate);
return rate;
}
@@ -338,7 +339,7 @@ static unsigned long recalc_stm_pll3200c32(struct clk_hw *hw,
/* Note: input is divided to avoid overflow */
rate = ((2 * (parent_rate/1000) * ndiv) / idf) * 1000;
- pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
+ pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate);
return rate;
}
@@ -365,7 +366,7 @@ static unsigned long recalc_stm_pll1200c32(struct clk_hw *hw,
/* Note: input is divided by 1000 to avoid overflow */
rate = (((parent_rate / 1000) * ldf) / (odf * idf)) * 1000;
- pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
+ pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate);
return rate;
}
@@ -406,7 +407,7 @@ static struct clk * __init clkgen_pll_register(const char *parent_name,
init.name = clk_name;
init.ops = pll_data->ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
init.parent_names = &parent_name;
init.num_parents = 1;
diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile
index 058f273d6154..f5a35b82cc1a 100644
--- a/drivers/clk/sunxi/Makefile
+++ b/drivers/clk/sunxi/Makefile
@@ -6,6 +6,7 @@ obj-y += clk-sunxi.o clk-factors.o
obj-y += clk-a10-hosc.o
obj-y += clk-a20-gmac.o
obj-y += clk-mod0.o
+obj-y += clk-simple-gates.o
obj-y += clk-sun8i-mbus.o
obj-y += clk-sun9i-core.o
obj-y += clk-sun9i-mmc.o
diff --git a/drivers/clk/sunxi/clk-a20-gmac.c b/drivers/clk/sunxi/clk-a20-gmac.c
index 0dcf4f205fb8..1611b036421c 100644
--- a/drivers/clk/sunxi/clk-a20-gmac.c
+++ b/drivers/clk/sunxi/clk-a20-gmac.c
@@ -80,9 +80,7 @@ static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
goto free_mux;
/* gmac clock requires exactly 2 parents */
- parents[0] = of_clk_get_parent_name(node, 0);
- parents[1] = of_clk_get_parent_name(node, 1);
- if (!parents[0] || !parents[1])
+ if (of_clk_parent_fill(node, parents, 2) != 2)
goto free_gate;
reg = of_iomap(node, 0);
diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c
index 8c20190a3e9f..59428dbd607a 100644
--- a/drivers/clk/sunxi/clk-factors.c
+++ b/drivers/clk/sunxi/clk-factors.c
@@ -79,41 +79,42 @@ static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
return rate;
}
-static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_p)
+static int clk_factors_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- struct clk *clk = hw->clk, *parent, *best_parent = NULL;
+ struct clk_hw *parent, *best_parent = NULL;
int i, num_parents;
unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
/* find the parent that can help provide the fastest rate <= rate */
- num_parents = __clk_get_num_parents(clk);
+ num_parents = clk_hw_get_num_parents(hw);
for (i = 0; i < num_parents; i++) {
- parent = clk_get_parent_by_index(clk, i);
+ parent = clk_hw_get_parent_by_index(hw, i);
if (!parent)
continue;
- if (__clk_get_flags(clk) & CLK_SET_RATE_PARENT)
- parent_rate = __clk_round_rate(parent, rate);
+ if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)
+ parent_rate = clk_hw_round_rate(parent, req->rate);
else
- parent_rate = __clk_get_rate(parent);
+ parent_rate = clk_hw_get_rate(parent);
- child_rate = clk_factors_round_rate(hw, rate, &parent_rate);
+ child_rate = clk_factors_round_rate(hw, req->rate,
+ &parent_rate);
- if (child_rate <= rate && child_rate > best_child_rate) {
+ if (child_rate <= req->rate && child_rate > best_child_rate) {
best_parent = parent;
best = parent_rate;
best_child_rate = child_rate;
}
}
- if (best_parent)
- *best_parent_p = __clk_get_hw(best_parent);
- *best_parent_rate = best;
+ if (!best_parent)
+ return -EINVAL;
- return best_child_rate;
+ req->best_parent_hw = best_parent;
+ req->best_parent_rate = best;
+ req->rate = best_child_rate;
+
+ return 0;
}
static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -174,9 +175,7 @@ struct clk *sunxi_factors_register(struct device_node *node,
int i = 0;
/* if we have a mux, we will have >1 parents */
- while (i < FACTORS_MAX_PARENTS &&
- (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
- i++;
+ i = of_clk_parent_fill(node, parents, FACTORS_MAX_PARENTS);
/*
* some factor clocks, such as pll5 and pll6, may have multiple
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index 9d028aec58e5..d167e1efb927 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -14,10 +14,11 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include "clk-factors.h"
diff --git a/drivers/clk/sunxi/clk-simple-gates.c b/drivers/clk/sunxi/clk-simple-gates.c
new file mode 100644
index 000000000000..6ce91180da1b
--- /dev/null
+++ b/drivers/clk/sunxi/clk-simple-gates.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2015 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+static DEFINE_SPINLOCK(gates_lock);
+
+static void __init sunxi_simple_gates_setup(struct device_node *node,
+ const int protected[],
+ int nprotected)
+{
+ struct clk_onecell_data *clk_data;
+ const char *clk_parent, *clk_name;
+ struct property *prop;
+ struct resource res;
+ void __iomem *clk_reg;
+ void __iomem *reg;
+ const __be32 *p;
+ int number, i = 0, j;
+ u8 clk_bit;
+ u32 index;
+
+ reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (IS_ERR(reg))
+ return;
+
+ clk_parent = of_clk_get_parent_name(node, 0);
+
+ clk_data = kmalloc(sizeof(struct clk_onecell_data), GFP_KERNEL);
+ if (!clk_data)
+ goto err_unmap;
+
+ number = of_property_count_u32_elems(node, "clock-indices");
+ of_property_read_u32_index(node, "clock-indices", number - 1, &number);
+
+ clk_data->clks = kcalloc(number + 1, sizeof(struct clk *), GFP_KERNEL);
+ if (!clk_data->clks)
+ goto err_free_data;
+
+ of_property_for_each_u32(node, "clock-indices", prop, p, index) {
+ of_property_read_string_index(node, "clock-output-names",
+ i, &clk_name);
+
+ clk_reg = reg + 4 * (index / 32);
+ clk_bit = index % 32;
+
+ clk_data->clks[index] = clk_register_gate(NULL, clk_name,
+ clk_parent, 0,
+ clk_reg,
+ clk_bit,
+ 0, &gates_lock);
+ i++;
+
+ if (IS_ERR(clk_data->clks[index])) {
+ WARN_ON(true);
+ continue;
+ }
+
+ for (j = 0; j < nprotected; j++)
+ if (protected[j] == index)
+ clk_prepare_enable(clk_data->clks[index]);
+
+ }
+
+ clk_data->clk_num = number + 1;
+ of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ return;
+
+err_free_data:
+ kfree(clk_data);
+err_unmap:
+ iounmap(reg);
+ of_address_to_resource(node, 0, &res);
+ release_mem_region(res.start, resource_size(&res));
+}
+
+static void __init sunxi_simple_gates_init(struct device_node *node)
+{
+ sunxi_simple_gates_setup(node, NULL, 0);
+}
+
+CLK_OF_DECLARE(sun4i_a10_apb0, "allwinner,sun4i-a10-apb0-gates-clk",
+ sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun4i_a10_apb1, "allwinner,sun4i-a10-apb1-gates-clk",
+ sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun4i_a10_axi, "allwinner,sun4i-a10-axi-gates-clk",
+ sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun5i_a10s_apb0, "allwinner,sun5i-a10s-apb0-gates-clk",
+ sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun5i_a10s_apb1, "allwinner,sun5i-a10s-apb1-gates-clk",
+ sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun5i_a13_apb0, "allwinner,sun5i-a13-apb0-gates-clk",
+ sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun5i_a13_apb1, "allwinner,sun5i-a13-apb1-gates-clk",
+ sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-gates-clk",
+ sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun6i_a31_apb1, "allwinner,sun6i-a31-apb1-gates-clk",
+ sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun6i_a31_apb2, "allwinner,sun6i-a31-apb2-gates-clk",
+ sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun7i_a20_apb0, "allwinner,sun7i-a20-apb0-gates-clk",
+ sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun7i_a20_apb1, "allwinner,sun7i-a20-apb1-gates-clk",
+ sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun8i_a23_ahb1, "allwinner,sun8i-a23-ahb1-gates-clk",
+ sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun8i_a23_apb1, "allwinner,sun8i-a23-apb1-gates-clk",
+ sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun8i_a23_apb2, "allwinner,sun8i-a23-apb2-gates-clk",
+ sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun9i_a80_ahb0, "allwinner,sun9i-a80-ahb0-gates-clk",
+ sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun9i_a80_ahb1, "allwinner,sun9i-a80-ahb1-gates-clk",
+ sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun9i_a80_ahb2, "allwinner,sun9i-a80-ahb2-gates-clk",
+ sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun9i_a80_apb0, "allwinner,sun9i-a80-apb0-gates-clk",
+ sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun9i_a80_apb1, "allwinner,sun9i-a80-apb1-gates-clk",
+ sunxi_simple_gates_init);
+
+static const int sun4i_a10_ahb_critical_clocks[] __initconst = {
+ 14, /* ahb_sdram */
+};
+
+static void __init sun4i_a10_ahb_init(struct device_node *node)
+{
+ sunxi_simple_gates_setup(node, sun4i_a10_ahb_critical_clocks,
+ ARRAY_SIZE(sun4i_a10_ahb_critical_clocks));
+}
+CLK_OF_DECLARE(sun4i_a10_ahb, "allwinner,sun4i-a10-ahb-gates-clk",
+ sun4i_a10_ahb_init);
+CLK_OF_DECLARE(sun5i_a10s_ahb, "allwinner,sun5i-a10s-ahb-gates-clk",
+ sun4i_a10_ahb_init);
+CLK_OF_DECLARE(sun5i_a13_ahb, "allwinner,sun5i-a13-ahb-gates-clk",
+ sun4i_a10_ahb_init);
+CLK_OF_DECLARE(sun7i_a20_ahb, "allwinner,sun7i-a20-ahb-gates-clk",
+ sun4i_a10_ahb_init);
diff --git a/drivers/clk/sunxi/clk-sun6i-ar100.c b/drivers/clk/sunxi/clk-sun6i-ar100.c
index 63cf149195ae..806fd019c05d 100644
--- a/drivers/clk/sunxi/clk-sun6i-ar100.c
+++ b/drivers/clk/sunxi/clk-sun6i-ar100.c
@@ -44,28 +44,25 @@ static unsigned long ar100_recalc_rate(struct clk_hw *hw,
return (parent_rate >> shift) / (div + 1);
}
-static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_clk)
+static int ar100_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- int nparents = __clk_get_num_parents(hw->clk);
+ int nparents = clk_hw_get_num_parents(hw);
long best_rate = -EINVAL;
int i;
- *best_parent_clk = NULL;
+ req->best_parent_hw = NULL;
for (i = 0; i < nparents; i++) {
unsigned long parent_rate;
unsigned long tmp_rate;
- struct clk *parent;
+ struct clk_hw *parent;
unsigned long div;
int shift;
- parent = clk_get_parent_by_index(hw->clk, i);
- parent_rate = __clk_get_rate(parent);
- div = DIV_ROUND_UP(parent_rate, rate);
+ parent = clk_hw_get_parent_by_index(hw, i);
+ parent_rate = clk_hw_get_rate(parent);
+ div = DIV_ROUND_UP(parent_rate, req->rate);
/*
* The AR100 clk contains 2 divisors:
@@ -101,14 +98,19 @@ static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate,
continue;
tmp_rate = (parent_rate >> shift) / div;
- if (!*best_parent_clk || tmp_rate > best_rate) {
- *best_parent_clk = __clk_get_hw(parent);
- *best_parent_rate = parent_rate;
+ if (!req->best_parent_hw || tmp_rate > best_rate) {
+ req->best_parent_hw = parent;
+ req->best_parent_rate = parent_rate;
best_rate = tmp_rate;
}
}
- return best_rate;
+ if (best_rate < 0)
+ return best_rate;
+
+ req->rate = best_rate;
+
+ return 0;
}
static int ar100_set_parent(struct clk_hw *hw, u8 index)
@@ -180,7 +182,6 @@ static int sun6i_a31_ar100_clk_probe(struct platform_device *pdev)
struct resource *r;
struct clk *clk;
int nparents;
- int i;
ar100 = devm_kzalloc(&pdev->dev, sizeof(*ar100), GFP_KERNEL);
if (!ar100)
@@ -195,8 +196,7 @@ static int sun6i_a31_ar100_clk_probe(struct platform_device *pdev)
if (nparents > SUN6I_AR100_MAX_PARENTS)
nparents = SUN6I_AR100_MAX_PARENTS;
- for (i = 0; i < nparents; i++)
- parents[i] = of_clk_get_parent_name(np, i);
+ of_clk_parent_fill(np, parents, nparents);
of_property_read_string(np, "clock-output-names", &clk_name);
diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c
index 14cd026064bf..bf117a636d23 100644
--- a/drivers/clk/sunxi/clk-sun8i-mbus.c
+++ b/drivers/clk/sunxi/clk-sun8i-mbus.c
@@ -14,8 +14,8 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/of_address.h>
#include "clk-factors.h"
diff --git a/drivers/clk/sunxi/clk-sun9i-core.c b/drivers/clk/sunxi/clk-sun9i-core.c
index 887f4ea161bb..6c4c98324d3c 100644
--- a/drivers/clk/sunxi/clk-sun9i-core.c
+++ b/drivers/clk/sunxi/clk-sun9i-core.c
@@ -14,8 +14,8 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/log2.h>
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index 710c273648d7..3436a948b796 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -14,14 +14,15 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/reset.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
#define SUN9I_MMC_WIDTH 4
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 9a82f17d2d73..413070d07b3f 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -14,11 +14,13 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reset-controller.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/log2.h>
@@ -118,42 +120,42 @@ static long sun6i_ahb1_clk_round(unsigned long rate, u8 *divp, u8 *pre_divp,
return (parent_rate / calcm) >> calcp;
}
-static long sun6i_ahb1_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_clk)
+static int sun6i_ahb1_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- struct clk *clk = hw->clk, *parent, *best_parent = NULL;
+ struct clk_hw *parent, *best_parent = NULL;
int i, num_parents;
unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
/* find the parent that can help provide the fastest rate <= rate */
- num_parents = __clk_get_num_parents(clk);
+ num_parents = clk_hw_get_num_parents(hw);
for (i = 0; i < num_parents; i++) {
- parent = clk_get_parent_by_index(clk, i);
+ parent = clk_hw_get_parent_by_index(hw, i);
if (!parent)
continue;
- if (__clk_get_flags(clk) & CLK_SET_RATE_PARENT)
- parent_rate = __clk_round_rate(parent, rate);
+ if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)
+ parent_rate = clk_hw_round_rate(parent, req->rate);
else
- parent_rate = __clk_get_rate(parent);
+ parent_rate = clk_hw_get_rate(parent);
- child_rate = sun6i_ahb1_clk_round(rate, NULL, NULL, i,
+ child_rate = sun6i_ahb1_clk_round(req->rate, NULL, NULL, i,
parent_rate);
- if (child_rate <= rate && child_rate > best_child_rate) {
+ if (child_rate <= req->rate && child_rate > best_child_rate) {
best_parent = parent;
best = parent_rate;
best_child_rate = child_rate;
}
}
- if (best_parent)
- *best_parent_clk = __clk_get_hw(best_parent);
- *best_parent_rate = best;
+ if (!best_parent)
+ return -EINVAL;
- return best_child_rate;
+ req->best_parent_hw = best_parent;
+ req->best_parent_rate = best;
+ req->rate = best_child_rate;
+
+ return 0;
}
static int sun6i_ahb1_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -195,17 +197,14 @@ static void __init sun6i_ahb1_clk_setup(struct device_node *node)
const char *clk_name = node->name;
const char *parents[SUN6I_AHB1_MAX_PARENTS];
void __iomem *reg;
- int i = 0;
+ int i;
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
if (IS_ERR(reg))
return;
/* we have a mux, we will have >1 parents */
- while (i < SUN6I_AHB1_MAX_PARENTS &&
- (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
- i++;
-
+ i = of_clk_parent_fill(node, parents, SUN6I_AHB1_MAX_PARENTS);
of_property_read_string(node, "clock-output-names", &clk_name);
ahb1 = kzalloc(sizeof(struct sun6i_ahb1_clk), GFP_KERNEL);
@@ -786,14 +785,11 @@ static void __init sunxi_mux_clk_setup(struct device_node *node,
const char *clk_name = node->name;
const char *parents[SUNXI_MAX_PARENTS];
void __iomem *reg;
- int i = 0;
+ int i;
reg = of_iomap(node, 0);
- while (i < SUNXI_MAX_PARENTS &&
- (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
- i++;
-
+ i = of_clk_parent_fill(node, parents, SUNXI_MAX_PARENTS);
of_property_read_string(node, "clock-output-names", &clk_name);
clk = clk_register_mux(NULL, clk_name, parents, i,
@@ -900,150 +896,6 @@ struct gates_data {
DECLARE_BITMAP(mask, SUNXI_GATES_MAX_SIZE);
};
-static const struct gates_data sun4i_axi_gates_data __initconst = {
- .mask = {1},
-};
-
-static const struct gates_data sun4i_ahb_gates_data __initconst = {
- .mask = {0x7F77FFF, 0x14FB3F},
-};
-
-static const struct gates_data sun5i_a10s_ahb_gates_data __initconst = {
- .mask = {0x147667e7, 0x185915},
-};
-
-static const struct gates_data sun5i_a13_ahb_gates_data __initconst = {
- .mask = {0x107067e7, 0x185111},
-};
-
-static const struct gates_data sun6i_a31_ahb1_gates_data __initconst = {
- .mask = {0xEDFE7F62, 0x794F931},
-};
-
-static const struct gates_data sun7i_a20_ahb_gates_data __initconst = {
- .mask = { 0x12f77fff, 0x16ff3f },
-};
-
-static const struct gates_data sun8i_a23_ahb1_gates_data __initconst = {
- .mask = {0x25386742, 0x2505111},
-};
-
-static const struct gates_data sun9i_a80_ahb0_gates_data __initconst = {
- .mask = {0xF5F12B},
-};
-
-static const struct gates_data sun9i_a80_ahb1_gates_data __initconst = {
- .mask = {0x1E20003},
-};
-
-static const struct gates_data sun9i_a80_ahb2_gates_data __initconst = {
- .mask = {0x9B7},
-};
-
-static const struct gates_data sun4i_apb0_gates_data __initconst = {
- .mask = {0x4EF},
-};
-
-static const struct gates_data sun5i_a10s_apb0_gates_data __initconst = {
- .mask = {0x469},
-};
-
-static const struct gates_data sun5i_a13_apb0_gates_data __initconst = {
- .mask = {0x61},
-};
-
-static const struct gates_data sun7i_a20_apb0_gates_data __initconst = {
- .mask = { 0x4ff },
-};
-
-static const struct gates_data sun9i_a80_apb0_gates_data __initconst = {
- .mask = {0xEB822},
-};
-
-static const struct gates_data sun4i_apb1_gates_data __initconst = {
- .mask = {0xFF00F7},
-};
-
-static const struct gates_data sun5i_a10s_apb1_gates_data __initconst = {
- .mask = {0xf0007},
-};
-
-static const struct gates_data sun5i_a13_apb1_gates_data __initconst = {
- .mask = {0xa0007},
-};
-
-static const struct gates_data sun6i_a31_apb1_gates_data __initconst = {
- .mask = {0x3031},
-};
-
-static const struct gates_data sun8i_a23_apb1_gates_data __initconst = {
- .mask = {0x3021},
-};
-
-static const struct gates_data sun6i_a31_apb2_gates_data __initconst = {
- .mask = {0x3F000F},
-};
-
-static const struct gates_data sun7i_a20_apb1_gates_data __initconst = {
- .mask = { 0xff80ff },
-};
-
-static const struct gates_data sun9i_a80_apb1_gates_data __initconst = {
- .mask = {0x3F001F},
-};
-
-static const struct gates_data sun8i_a23_apb2_gates_data __initconst = {
- .mask = {0x1F0007},
-};
-
-static void __init sunxi_gates_clk_setup(struct device_node *node,
- struct gates_data *data)
-{
- struct clk_onecell_data *clk_data;
- const char *clk_parent;
- const char *clk_name;
- void __iomem *reg;
- int qty;
- int i = 0;
- int j = 0;
-
- reg = of_iomap(node, 0);
-
- clk_parent = of_clk_get_parent_name(node, 0);
-
- /* Worst-case size approximation and memory allocation */
- qty = find_last_bit(data->mask, SUNXI_GATES_MAX_SIZE);
- clk_data = kmalloc(sizeof(struct clk_onecell_data), GFP_KERNEL);
- if (!clk_data)
- return;
- clk_data->clks = kzalloc((qty+1) * sizeof(struct clk *), GFP_KERNEL);
- if (!clk_data->clks) {
- kfree(clk_data);
- return;
- }
-
- for_each_set_bit(i, data->mask, SUNXI_GATES_MAX_SIZE) {
- of_property_read_string_index(node, "clock-output-names",
- j, &clk_name);
-
- clk_data->clks[i] = clk_register_gate(NULL, clk_name,
- clk_parent, 0,
- reg + 4 * (i/32), i % 32,
- 0, &clk_lock);
- WARN_ON(IS_ERR(clk_data->clks[i]));
- clk_register_clkdev(clk_data->clks[i], clk_name, NULL);
-
- j++;
- }
-
- /* Adjust to the real max */
- clk_data->clk_num = i;
-
- of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
-}
-
-
-
/**
* sunxi_divs_clk_setup() helper data
*/
@@ -1281,34 +1133,6 @@ static const struct of_device_id clk_mux_match[] __initconst = {
{}
};
-/* Matches for gate clocks */
-static const struct of_device_id clk_gates_match[] __initconst = {
- {.compatible = "allwinner,sun4i-a10-axi-gates-clk", .data = &sun4i_axi_gates_data,},
- {.compatible = "allwinner,sun4i-a10-ahb-gates-clk", .data = &sun4i_ahb_gates_data,},
- {.compatible = "allwinner,sun5i-a10s-ahb-gates-clk", .data = &sun5i_a10s_ahb_gates_data,},
- {.compatible = "allwinner,sun5i-a13-ahb-gates-clk", .data = &sun5i_a13_ahb_gates_data,},
- {.compatible = "allwinner,sun6i-a31-ahb1-gates-clk", .data = &sun6i_a31_ahb1_gates_data,},
- {.compatible = "allwinner,sun7i-a20-ahb-gates-clk", .data = &sun7i_a20_ahb_gates_data,},
- {.compatible = "allwinner,sun8i-a23-ahb1-gates-clk", .data = &sun8i_a23_ahb1_gates_data,},
- {.compatible = "allwinner,sun9i-a80-ahb0-gates-clk", .data = &sun9i_a80_ahb0_gates_data,},
- {.compatible = "allwinner,sun9i-a80-ahb1-gates-clk", .data = &sun9i_a80_ahb1_gates_data,},
- {.compatible = "allwinner,sun9i-a80-ahb2-gates-clk", .data = &sun9i_a80_ahb2_gates_data,},
- {.compatible = "allwinner,sun4i-a10-apb0-gates-clk", .data = &sun4i_apb0_gates_data,},
- {.compatible = "allwinner,sun5i-a10s-apb0-gates-clk", .data = &sun5i_a10s_apb0_gates_data,},
- {.compatible = "allwinner,sun5i-a13-apb0-gates-clk", .data = &sun5i_a13_apb0_gates_data,},
- {.compatible = "allwinner,sun7i-a20-apb0-gates-clk", .data = &sun7i_a20_apb0_gates_data,},
- {.compatible = "allwinner,sun9i-a80-apb0-gates-clk", .data = &sun9i_a80_apb0_gates_data,},
- {.compatible = "allwinner,sun4i-a10-apb1-gates-clk", .data = &sun4i_apb1_gates_data,},
- {.compatible = "allwinner,sun5i-a10s-apb1-gates-clk", .data = &sun5i_a10s_apb1_gates_data,},
- {.compatible = "allwinner,sun5i-a13-apb1-gates-clk", .data = &sun5i_a13_apb1_gates_data,},
- {.compatible = "allwinner,sun6i-a31-apb1-gates-clk", .data = &sun6i_a31_apb1_gates_data,},
- {.compatible = "allwinner,sun7i-a20-apb1-gates-clk", .data = &sun7i_a20_apb1_gates_data,},
- {.compatible = "allwinner,sun8i-a23-apb1-gates-clk", .data = &sun8i_a23_apb1_gates_data,},
- {.compatible = "allwinner,sun9i-a80-apb1-gates-clk", .data = &sun9i_a80_apb1_gates_data,},
- {.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,},
- {.compatible = "allwinner,sun8i-a23-apb2-gates-clk", .data = &sun8i_a23_apb2_gates_data,},
- {}
-};
static void __init of_sunxi_table_clock_setup(const struct of_device_id *clk_match,
void *function)
@@ -1340,9 +1164,6 @@ static void __init sunxi_init_clocks(const char *clocks[], int nclocks)
/* Register mux clocks */
of_sunxi_table_clock_setup(clk_mux_match, sunxi_mux_clk_setup);
- /* Register gate clocks */
- of_sunxi_table_clock_setup(clk_gates_match, sunxi_gates_clk_setup);
-
/* Protect the clocks that needs to stay on */
for (i = 0; i < nclocks; i++) {
struct clk *clk = clk_get(NULL, clocks[i]);
@@ -1354,7 +1175,6 @@ static void __init sunxi_init_clocks(const char *clocks[], int nclocks)
static const char *sun4i_a10_critical_clocks[] __initdata = {
"pll5_ddr",
- "ahb_sdram",
};
static void __init sun4i_a10_init_clocks(struct device_node *node)
@@ -1367,7 +1187,6 @@ CLK_OF_DECLARE(sun4i_a10_clk_init, "allwinner,sun4i-a10", sun4i_a10_init_clocks)
static const char *sun5i_critical_clocks[] __initdata = {
"cpu",
"pll5_ddr",
- "ahb_sdram",
};
static void __init sun5i_init_clocks(struct device_node *node)
@@ -1391,6 +1210,7 @@ static void __init sun6i_init_clocks(struct device_node *node)
CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks);
CLK_OF_DECLARE(sun6i_a31s_clk_init, "allwinner,sun6i-a31s", sun6i_init_clocks);
CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks);
+CLK_OF_DECLARE(sun8i_a33_clk_init, "allwinner,sun8i-a33", sun6i_init_clocks);
static void __init sun9i_init_clocks(struct device_node *node)
{
diff --git a/drivers/clk/sunxi/clk-usb.c b/drivers/clk/sunxi/clk-usb.c
index 3a25f9588e67..1a72cd672839 100644
--- a/drivers/clk/sunxi/clk-usb.c
+++ b/drivers/clk/sunxi/clk-usb.c
@@ -14,11 +14,12 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reset-controller.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index aec862ba7a17..826c325dc2e8 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -1,5 +1,6 @@
obj-y += clk.o
obj-y += clk-audio-sync.o
+obj-y += clk-dfll.o
obj-y += clk-divider.o
obj-y += clk-periph.o
obj-y += clk-periph-gate.o
@@ -16,4 +17,6 @@ obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o
obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o
obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o
obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124.o
+obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124-dfll-fcpu.o
obj-$(CONFIG_ARCH_TEGRA_132_SOC) += clk-tegra124.o
+obj-y += cvb.o
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
new file mode 100644
index 000000000000..c2ff859ee0e8
--- /dev/null
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -0,0 +1,1757 @@
+/*
+ * clk-dfll.c - Tegra DFLL clock source common code
+ *
+ * Copyright (C) 2012-2014 NVIDIA Corporation. All rights reserved.
+ *
+ * Aleksandr Frid <afrid@nvidia.com>
+ * Paul Walmsley <pwalmsley@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * This library is for the DVCO and DFLL IP blocks on the Tegra124
+ * SoC. These IP blocks together are also known at NVIDIA as
+ * "CL-DVFS". To try to avoid confusion, this code refers to them
+ * collectively as the "DFLL."
+ *
+ * The DFLL is a root clocksource which tolerates some amount of
+ * supply voltage noise. Tegra124 uses it to clock the fast CPU
+ * complex when the target CPU speed is above a particular rate. The
+ * DFLL can be operated in either open-loop mode or closed-loop mode.
+ * In open-loop mode, the DFLL generates an output clock appropriate
+ * to the supply voltage. In closed-loop mode, when configured with a
+ * target frequency, the DFLL minimizes supply voltage while
+ * delivering an average frequency equal to the target.
+ *
+ * Devices clocked by the DFLL must be able to tolerate frequency
+ * variation. In the case of the CPU, it's important to note that the
+ * CPU cycle time will vary. This has implications for
+ * performance-measurement code and any code that relies on the CPU
+ * cycle time to delay for a certain length of time.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/seq_file.h>
+
+#include "clk-dfll.h"
+
+/*
+ * DFLL control registers - access via dfll_{readl,writel}
+ */
+
+/* DFLL_CTRL: DFLL control register */
+#define DFLL_CTRL 0x00
+#define DFLL_CTRL_MODE_MASK 0x03
+
+/* DFLL_CONFIG: DFLL sample rate control */
+#define DFLL_CONFIG 0x04
+#define DFLL_CONFIG_DIV_MASK 0xff
+#define DFLL_CONFIG_DIV_PRESCALE 32
+
+/* DFLL_PARAMS: tuning coefficients for closed loop integrator */
+#define DFLL_PARAMS 0x08
+#define DFLL_PARAMS_CG_SCALE (0x1 << 24)
+#define DFLL_PARAMS_FORCE_MODE_SHIFT 22
+#define DFLL_PARAMS_FORCE_MODE_MASK (0x3 << DFLL_PARAMS_FORCE_MODE_SHIFT)
+#define DFLL_PARAMS_CF_PARAM_SHIFT 16
+#define DFLL_PARAMS_CF_PARAM_MASK (0x3f << DFLL_PARAMS_CF_PARAM_SHIFT)
+#define DFLL_PARAMS_CI_PARAM_SHIFT 8
+#define DFLL_PARAMS_CI_PARAM_MASK (0x7 << DFLL_PARAMS_CI_PARAM_SHIFT)
+#define DFLL_PARAMS_CG_PARAM_SHIFT 0
+#define DFLL_PARAMS_CG_PARAM_MASK (0xff << DFLL_PARAMS_CG_PARAM_SHIFT)
+
+/* DFLL_TUNE0: delay line configuration register 0 */
+#define DFLL_TUNE0 0x0c
+
+/* DFLL_TUNE1: delay line configuration register 1 */
+#define DFLL_TUNE1 0x10
+
+/* DFLL_FREQ_REQ: target DFLL frequency control */
+#define DFLL_FREQ_REQ 0x14
+#define DFLL_FREQ_REQ_FORCE_ENABLE (0x1 << 28)
+#define DFLL_FREQ_REQ_FORCE_SHIFT 16
+#define DFLL_FREQ_REQ_FORCE_MASK (0xfff << DFLL_FREQ_REQ_FORCE_SHIFT)
+#define FORCE_MAX 2047
+#define FORCE_MIN -2048
+#define DFLL_FREQ_REQ_SCALE_SHIFT 8
+#define DFLL_FREQ_REQ_SCALE_MASK (0xff << DFLL_FREQ_REQ_SCALE_SHIFT)
+#define DFLL_FREQ_REQ_SCALE_MAX 256
+#define DFLL_FREQ_REQ_FREQ_VALID (0x1 << 7)
+#define DFLL_FREQ_REQ_MULT_SHIFT 0
+#define DFLL_FREQ_REG_MULT_MASK (0x7f << DFLL_FREQ_REQ_MULT_SHIFT)
+#define FREQ_MAX 127
+
+/* DFLL_DROOP_CTRL: droop prevention control */
+#define DFLL_DROOP_CTRL 0x1c
+
+/* DFLL_OUTPUT_CFG: closed loop mode control registers */
+/* NOTE: access via dfll_i2c_{readl,writel} */
+#define DFLL_OUTPUT_CFG 0x20
+#define DFLL_OUTPUT_CFG_I2C_ENABLE (0x1 << 30)
+#define OUT_MASK 0x3f
+#define DFLL_OUTPUT_CFG_SAFE_SHIFT 24
+#define DFLL_OUTPUT_CFG_SAFE_MASK \
+ (OUT_MASK << DFLL_OUTPUT_CFG_SAFE_SHIFT)
+#define DFLL_OUTPUT_CFG_MAX_SHIFT 16
+#define DFLL_OUTPUT_CFG_MAX_MASK \
+ (OUT_MASK << DFLL_OUTPUT_CFG_MAX_SHIFT)
+#define DFLL_OUTPUT_CFG_MIN_SHIFT 8
+#define DFLL_OUTPUT_CFG_MIN_MASK \
+ (OUT_MASK << DFLL_OUTPUT_CFG_MIN_SHIFT)
+#define DFLL_OUTPUT_CFG_PWM_DELTA (0x1 << 7)
+#define DFLL_OUTPUT_CFG_PWM_ENABLE (0x1 << 6)
+#define DFLL_OUTPUT_CFG_PWM_DIV_SHIFT 0
+#define DFLL_OUTPUT_CFG_PWM_DIV_MASK \
+ (OUT_MASK << DFLL_OUTPUT_CFG_PWM_DIV_SHIFT)
+
+/* DFLL_OUTPUT_FORCE: closed loop mode voltage forcing control */
+#define DFLL_OUTPUT_FORCE 0x24
+#define DFLL_OUTPUT_FORCE_ENABLE (0x1 << 6)
+#define DFLL_OUTPUT_FORCE_VALUE_SHIFT 0
+#define DFLL_OUTPUT_FORCE_VALUE_MASK \
+ (OUT_MASK << DFLL_OUTPUT_FORCE_VALUE_SHIFT)
+
+/* DFLL_MONITOR_CTRL: internal monitor data source control */
+#define DFLL_MONITOR_CTRL 0x28
+#define DFLL_MONITOR_CTRL_FREQ 6
+
+/* DFLL_MONITOR_DATA: internal monitor data output */
+#define DFLL_MONITOR_DATA 0x2c
+#define DFLL_MONITOR_DATA_NEW_MASK (0x1 << 16)
+#define DFLL_MONITOR_DATA_VAL_SHIFT 0
+#define DFLL_MONITOR_DATA_VAL_MASK (0xFFFF << DFLL_MONITOR_DATA_VAL_SHIFT)
+
+/*
+ * I2C output control registers - access via dfll_i2c_{readl,writel}
+ */
+
+/* DFLL_I2C_CFG: I2C controller configuration register */
+#define DFLL_I2C_CFG 0x40
+#define DFLL_I2C_CFG_ARB_ENABLE (0x1 << 20)
+#define DFLL_I2C_CFG_HS_CODE_SHIFT 16
+#define DFLL_I2C_CFG_HS_CODE_MASK (0x7 << DFLL_I2C_CFG_HS_CODE_SHIFT)
+#define DFLL_I2C_CFG_PACKET_ENABLE (0x1 << 15)
+#define DFLL_I2C_CFG_SIZE_SHIFT 12
+#define DFLL_I2C_CFG_SIZE_MASK (0x7 << DFLL_I2C_CFG_SIZE_SHIFT)
+#define DFLL_I2C_CFG_SLAVE_ADDR_10 (0x1 << 10)
+#define DFLL_I2C_CFG_SLAVE_ADDR_SHIFT_7BIT 1
+#define DFLL_I2C_CFG_SLAVE_ADDR_SHIFT_10BIT 0
+
+/* DFLL_I2C_VDD_REG_ADDR: PMIC I2C address for closed loop mode */
+#define DFLL_I2C_VDD_REG_ADDR 0x44
+
+/* DFLL_I2C_STS: I2C controller status */
+#define DFLL_I2C_STS 0x48
+#define DFLL_I2C_STS_I2C_LAST_SHIFT 1
+#define DFLL_I2C_STS_I2C_REQ_PENDING 0x1
+
+/* DFLL_INTR_STS: DFLL interrupt status register */
+#define DFLL_INTR_STS 0x5c
+
+/* DFLL_INTR_EN: DFLL interrupt enable register */
+#define DFLL_INTR_EN 0x60
+#define DFLL_INTR_MIN_MASK 0x1
+#define DFLL_INTR_MAX_MASK 0x2
+
+/*
+ * Integrated I2C controller registers - relative to td->i2c_controller_base
+ */
+
+/* DFLL_I2C_CLK_DIVISOR: I2C controller clock divisor */
+#define DFLL_I2C_CLK_DIVISOR 0x6c
+#define DFLL_I2C_CLK_DIVISOR_MASK 0xffff
+#define DFLL_I2C_CLK_DIVISOR_FS_SHIFT 16
+#define DFLL_I2C_CLK_DIVISOR_HS_SHIFT 0
+#define DFLL_I2C_CLK_DIVISOR_PREDIV 8
+#define DFLL_I2C_CLK_DIVISOR_HSMODE_PREDIV 12
+
+/*
+ * Other constants
+ */
+
+/* MAX_DFLL_VOLTAGES: number of LUT entries in the DFLL IP block */
+#define MAX_DFLL_VOLTAGES 33
+
+/*
+ * REF_CLK_CYC_PER_DVCO_SAMPLE: the number of ref_clk cycles that the hardware
+ * integrates the DVCO counter over - used for debug rate monitoring and
+ * droop control
+ */
+#define REF_CLK_CYC_PER_DVCO_SAMPLE 4
+
+/*
+ * REF_CLOCK_RATE: the DFLL reference clock rate currently supported by this
+ * driver, in Hz
+ */
+#define REF_CLOCK_RATE 51000000UL
+
+#define DVCO_RATE_TO_MULT(rate, ref_rate) ((rate) / ((ref_rate) / 2))
+#define MULT_TO_DVCO_RATE(mult, ref_rate) ((mult) * ((ref_rate) / 2))
+
+/**
+ * enum dfll_ctrl_mode - DFLL hardware operating mode
+ * @DFLL_UNINITIALIZED: (uninitialized state - not in hardware bitfield)
+ * @DFLL_DISABLED: DFLL not generating an output clock
+ * @DFLL_OPEN_LOOP: DVCO running, but DFLL not adjusting voltage
+ * @DFLL_CLOSED_LOOP: DVCO running, and DFLL adjusting voltage to match
+ * the requested rate
+ *
+ * The integer corresponding to the last two states, minus one, is
+ * written to the DFLL hardware to change operating modes.
+ */
+enum dfll_ctrl_mode {
+ DFLL_UNINITIALIZED = 0,
+ DFLL_DISABLED = 1,
+ DFLL_OPEN_LOOP = 2,
+ DFLL_CLOSED_LOOP = 3,
+};
+
+/**
+ * enum dfll_tune_range - voltage range that the driver believes it's in
+ * @DFLL_TUNE_UNINITIALIZED: DFLL tuning not yet programmed
+ * @DFLL_TUNE_LOW: DFLL in the low-voltage range (or open-loop mode)
+ *
+ * Some DFLL tuning parameters may need to change depending on the
+ * DVCO's voltage; these states represent the ranges that the driver
+ * supports. These are software states; these values are never
+ * written into registers.
+ */
+enum dfll_tune_range {
+ DFLL_TUNE_UNINITIALIZED = 0,
+ DFLL_TUNE_LOW = 1,
+};
+
+/**
+ * struct dfll_rate_req - target DFLL rate request data
+ * @rate: target frequency, after the postscaling
+ * @dvco_target_rate: target frequency, after the postscaling
+ * @lut_index: LUT index at which voltage the dvco_target_rate will be reached
+ * @mult_bits: value to program to the MULT bits of the DFLL_FREQ_REQ register
+ * @scale_bits: value to program to the SCALE bits of the DFLL_FREQ_REQ register
+ */
+struct dfll_rate_req {
+ unsigned long rate;
+ unsigned long dvco_target_rate;
+ int lut_index;
+ u8 mult_bits;
+ u8 scale_bits;
+};
+
+struct tegra_dfll {
+ struct device *dev;
+ struct tegra_dfll_soc_data *soc;
+
+ void __iomem *base;
+ void __iomem *i2c_base;
+ void __iomem *i2c_controller_base;
+ void __iomem *lut_base;
+
+ struct regulator *vdd_reg;
+ struct clk *soc_clk;
+ struct clk *ref_clk;
+ struct clk *i2c_clk;
+ struct clk *dfll_clk;
+ struct reset_control *dvco_rst;
+ unsigned long ref_rate;
+ unsigned long i2c_clk_rate;
+ unsigned long dvco_rate_min;
+
+ enum dfll_ctrl_mode mode;
+ enum dfll_tune_range tune_range;
+ struct dentry *debugfs_dir;
+ struct clk_hw dfll_clk_hw;
+ const char *output_clock_name;
+ struct dfll_rate_req last_req;
+ unsigned long last_unrounded_rate;
+
+ /* Parameters from DT */
+ u32 droop_ctrl;
+ u32 sample_rate;
+ u32 force_mode;
+ u32 cf;
+ u32 ci;
+ u32 cg;
+ bool cg_scale;
+
+ /* I2C interface parameters */
+ u32 i2c_fs_rate;
+ u32 i2c_reg;
+ u32 i2c_slave_addr;
+
+ /* i2c_lut array entries are regulator framework selectors */
+ unsigned i2c_lut[MAX_DFLL_VOLTAGES];
+ int i2c_lut_size;
+ u8 lut_min, lut_max, lut_safe;
+};
+
+#define clk_hw_to_dfll(_hw) container_of(_hw, struct tegra_dfll, dfll_clk_hw)
+
+/* mode_name: map numeric DFLL modes to names for friendly console messages */
+static const char * const mode_name[] = {
+ [DFLL_UNINITIALIZED] = "uninitialized",
+ [DFLL_DISABLED] = "disabled",
+ [DFLL_OPEN_LOOP] = "open_loop",
+ [DFLL_CLOSED_LOOP] = "closed_loop",
+};
+
+/*
+ * Register accessors
+ */
+
+static inline u32 dfll_readl(struct tegra_dfll *td, u32 offs)
+{
+ return __raw_readl(td->base + offs);
+}
+
+static inline void dfll_writel(struct tegra_dfll *td, u32 val, u32 offs)
+{
+ WARN_ON(offs >= DFLL_I2C_CFG);
+ __raw_writel(val, td->base + offs);
+}
+
+static inline void dfll_wmb(struct tegra_dfll *td)
+{
+ dfll_readl(td, DFLL_CTRL);
+}
+
+/* I2C output control registers - for addresses above DFLL_I2C_CFG */
+
+static inline u32 dfll_i2c_readl(struct tegra_dfll *td, u32 offs)
+{
+ return __raw_readl(td->i2c_base + offs);
+}
+
+static inline void dfll_i2c_writel(struct tegra_dfll *td, u32 val, u32 offs)
+{
+ __raw_writel(val, td->i2c_base + offs);
+}
+
+static inline void dfll_i2c_wmb(struct tegra_dfll *td)
+{
+ dfll_i2c_readl(td, DFLL_I2C_CFG);
+}
+
+/**
+ * dfll_is_running - is the DFLL currently generating a clock?
+ * @td: DFLL instance
+ *
+ * If the DFLL is currently generating an output clock signal, return
+ * true; otherwise return false.
+ */
+static bool dfll_is_running(struct tegra_dfll *td)
+{
+ return td->mode >= DFLL_OPEN_LOOP;
+}
+
+/*
+ * Runtime PM suspend/resume callbacks
+ */
+
+/**
+ * tegra_dfll_runtime_resume - enable all clocks needed by the DFLL
+ * @dev: DFLL device *
+ *
+ * Enable all clocks needed by the DFLL. Assumes that clk_prepare()
+ * has already been called on all the clocks.
+ *
+ * XXX Should also handle context restore when returning from off.
+ */
+int tegra_dfll_runtime_resume(struct device *dev)
+{
+ struct tegra_dfll *td = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(td->ref_clk);
+ if (ret) {
+ dev_err(dev, "could not enable ref clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_enable(td->soc_clk);
+ if (ret) {
+ dev_err(dev, "could not enable register clock: %d\n", ret);
+ clk_disable(td->ref_clk);
+ return ret;
+ }
+
+ ret = clk_enable(td->i2c_clk);
+ if (ret) {
+ dev_err(dev, "could not enable i2c clock: %d\n", ret);
+ clk_disable(td->soc_clk);
+ clk_disable(td->ref_clk);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dfll_runtime_resume);
+
+/**
+ * tegra_dfll_runtime_suspend - disable all clocks needed by the DFLL
+ * @dev: DFLL device *
+ *
+ * Disable all clocks needed by the DFLL. Assumes that other code
+ * will later call clk_unprepare().
+ */
+int tegra_dfll_runtime_suspend(struct device *dev)
+{
+ struct tegra_dfll *td = dev_get_drvdata(dev);
+
+ clk_disable(td->ref_clk);
+ clk_disable(td->soc_clk);
+ clk_disable(td->i2c_clk);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dfll_runtime_suspend);
+
+/*
+ * DFLL tuning operations (per-voltage-range tuning settings)
+ */
+
+/**
+ * dfll_tune_low - tune to DFLL and CPU settings valid for any voltage
+ * @td: DFLL instance
+ *
+ * Tune the DFLL oscillator parameters and the CPU clock shaper for
+ * the low-voltage range. These settings are valid for any voltage,
+ * but may not be optimal.
+ */
+static void dfll_tune_low(struct tegra_dfll *td)
+{
+ td->tune_range = DFLL_TUNE_LOW;
+
+ dfll_writel(td, td->soc->tune0_low, DFLL_TUNE0);
+ dfll_writel(td, td->soc->tune1, DFLL_TUNE1);
+ dfll_wmb(td);
+
+ if (td->soc->set_clock_trimmers_low)
+ td->soc->set_clock_trimmers_low();
+}
+
+/*
+ * Output clock scaler helpers
+ */
+
+/**
+ * dfll_scale_dvco_rate - calculate scaled rate from the DVCO rate
+ * @scale_bits: clock scaler value (bits in the DFLL_FREQ_REQ_SCALE field)
+ * @dvco_rate: the DVCO rate
+ *
+ * Apply the same scaling formula that the DFLL hardware uses to scale
+ * the DVCO rate.
+ */
+static unsigned long dfll_scale_dvco_rate(int scale_bits,
+ unsigned long dvco_rate)
+{
+ return (u64)dvco_rate * (scale_bits + 1) / DFLL_FREQ_REQ_SCALE_MAX;
+}
+
+/*
+ * Monitor control
+ */
+
+/**
+ * dfll_calc_monitored_rate - convert DFLL_MONITOR_DATA_VAL rate into real freq
+ * @monitor_data: value read from the DFLL_MONITOR_DATA_VAL bitfield
+ * @ref_rate: DFLL reference clock rate
+ *
+ * Convert @monitor_data from DFLL_MONITOR_DATA_VAL units into cycles
+ * per second. Returns the converted value.
+ */
+static u64 dfll_calc_monitored_rate(u32 monitor_data,
+ unsigned long ref_rate)
+{
+ return monitor_data * (ref_rate / REF_CLK_CYC_PER_DVCO_SAMPLE);
+}
+
+/**
+ * dfll_read_monitor_rate - return the DFLL's output rate from internal monitor
+ * @td: DFLL instance
+ *
+ * If the DFLL is enabled, return the last rate reported by the DFLL's
+ * internal monitoring hardware. This works in both open-loop and
+ * closed-loop mode, and takes the output scaler setting into account.
+ * Assumes that the monitor was programmed to monitor frequency before
+ * the sample period started. If the driver believes that the DFLL is
+ * currently uninitialized or disabled, it will return 0, since
+ * otherwise the DFLL monitor data register will return the last
+ * measured rate from when the DFLL was active.
+ */
+static u64 dfll_read_monitor_rate(struct tegra_dfll *td)
+{
+ u32 v, s;
+ u64 pre_scaler_rate, post_scaler_rate;
+
+ if (!dfll_is_running(td))
+ return 0;
+
+ v = dfll_readl(td, DFLL_MONITOR_DATA);
+ v = (v & DFLL_MONITOR_DATA_VAL_MASK) >> DFLL_MONITOR_DATA_VAL_SHIFT;
+ pre_scaler_rate = dfll_calc_monitored_rate(v, td->ref_rate);
+
+ s = dfll_readl(td, DFLL_FREQ_REQ);
+ s = (s & DFLL_FREQ_REQ_SCALE_MASK) >> DFLL_FREQ_REQ_SCALE_SHIFT;
+ post_scaler_rate = dfll_scale_dvco_rate(s, pre_scaler_rate);
+
+ return post_scaler_rate;
+}
+
+/*
+ * DFLL mode switching
+ */
+
+/**
+ * dfll_set_mode - change the DFLL control mode
+ * @td: DFLL instance
+ * @mode: DFLL control mode (see enum dfll_ctrl_mode)
+ *
+ * Change the DFLL's operating mode between disabled, open-loop mode,
+ * and closed-loop mode, or vice versa.
+ */
+static void dfll_set_mode(struct tegra_dfll *td,
+ enum dfll_ctrl_mode mode)
+{
+ td->mode = mode;
+ dfll_writel(td, mode - 1, DFLL_CTRL);
+ dfll_wmb(td);
+}
+
+/*
+ * DFLL-to-I2C controller interface
+ */
+
+/**
+ * dfll_i2c_set_output_enabled - enable/disable I2C PMIC voltage requests
+ * @td: DFLL instance
+ * @enable: whether to enable or disable the I2C voltage requests
+ *
+ * Set the master enable control for I2C control value updates. If disabled,
+ * then I2C control messages are inhibited, regardless of the DFLL mode.
+ */
+static int dfll_i2c_set_output_enabled(struct tegra_dfll *td, bool enable)
+{
+ u32 val;
+
+ val = dfll_i2c_readl(td, DFLL_OUTPUT_CFG);
+
+ if (enable)
+ val |= DFLL_OUTPUT_CFG_I2C_ENABLE;
+ else
+ val &= ~DFLL_OUTPUT_CFG_I2C_ENABLE;
+
+ dfll_i2c_writel(td, val, DFLL_OUTPUT_CFG);
+ dfll_i2c_wmb(td);
+
+ return 0;
+}
+
+/**
+ * dfll_load_lut - load the voltage lookup table
+ * @td: struct tegra_dfll *
+ *
+ * Load the voltage-to-PMIC register value lookup table into the DFLL
+ * IP block memory. Look-up tables can be loaded at any time.
+ */
+static void dfll_load_i2c_lut(struct tegra_dfll *td)
+{
+ int i, lut_index;
+ u32 val;
+
+ for (i = 0; i < MAX_DFLL_VOLTAGES; i++) {
+ if (i < td->lut_min)
+ lut_index = td->lut_min;
+ else if (i > td->lut_max)
+ lut_index = td->lut_max;
+ else
+ lut_index = i;
+
+ val = regulator_list_hardware_vsel(td->vdd_reg,
+ td->i2c_lut[lut_index]);
+ __raw_writel(val, td->lut_base + i * 4);
+ }
+
+ dfll_i2c_wmb(td);
+}
+
+/**
+ * dfll_init_i2c_if - set up the DFLL's DFLL-I2C interface
+ * @td: DFLL instance
+ *
+ * During DFLL driver initialization, program the DFLL-I2C interface
+ * with the PMU slave address, vdd register offset, and transfer mode.
+ * This data is used by the DFLL to automatically construct I2C
+ * voltage-set commands, which are then passed to the DFLL's internal
+ * I2C controller.
+ */
+static void dfll_init_i2c_if(struct tegra_dfll *td)
+{
+ u32 val;
+
+ if (td->i2c_slave_addr > 0x7f) {
+ val = td->i2c_slave_addr << DFLL_I2C_CFG_SLAVE_ADDR_SHIFT_10BIT;
+ val |= DFLL_I2C_CFG_SLAVE_ADDR_10;
+ } else {
+ val = td->i2c_slave_addr << DFLL_I2C_CFG_SLAVE_ADDR_SHIFT_7BIT;
+ }
+ val |= DFLL_I2C_CFG_SIZE_MASK;
+ val |= DFLL_I2C_CFG_ARB_ENABLE;
+ dfll_i2c_writel(td, val, DFLL_I2C_CFG);
+
+ dfll_i2c_writel(td, td->i2c_reg, DFLL_I2C_VDD_REG_ADDR);
+
+ val = DIV_ROUND_UP(td->i2c_clk_rate, td->i2c_fs_rate * 8);
+ BUG_ON(!val || (val > DFLL_I2C_CLK_DIVISOR_MASK));
+ val = (val - 1) << DFLL_I2C_CLK_DIVISOR_FS_SHIFT;
+
+ /* default hs divisor just in case */
+ val |= 1 << DFLL_I2C_CLK_DIVISOR_HS_SHIFT;
+ __raw_writel(val, td->i2c_controller_base + DFLL_I2C_CLK_DIVISOR);
+ dfll_i2c_wmb(td);
+}
+
+/**
+ * dfll_init_out_if - prepare DFLL-to-PMIC interface
+ * @td: DFLL instance
+ *
+ * During DFLL driver initialization or resume from context loss,
+ * disable the I2C command output to the PMIC, set safe voltage and
+ * output limits, and disable and clear limit interrupts.
+ */
+static void dfll_init_out_if(struct tegra_dfll *td)
+{
+ u32 val;
+
+ td->lut_min = 0;
+ td->lut_max = td->i2c_lut_size - 1;
+ td->lut_safe = td->lut_min + 1;
+
+ dfll_i2c_writel(td, 0, DFLL_OUTPUT_CFG);
+ val = (td->lut_safe << DFLL_OUTPUT_CFG_SAFE_SHIFT) |
+ (td->lut_max << DFLL_OUTPUT_CFG_MAX_SHIFT) |
+ (td->lut_min << DFLL_OUTPUT_CFG_MIN_SHIFT);
+ dfll_i2c_writel(td, val, DFLL_OUTPUT_CFG);
+ dfll_i2c_wmb(td);
+
+ dfll_writel(td, 0, DFLL_OUTPUT_FORCE);
+ dfll_i2c_writel(td, 0, DFLL_INTR_EN);
+ dfll_i2c_writel(td, DFLL_INTR_MAX_MASK | DFLL_INTR_MIN_MASK,
+ DFLL_INTR_STS);
+
+ dfll_load_i2c_lut(td);
+ dfll_init_i2c_if(td);
+}
+
+/*
+ * Set/get the DFLL's targeted output clock rate
+ */
+
+/**
+ * find_lut_index_for_rate - determine I2C LUT index for given DFLL rate
+ * @td: DFLL instance
+ * @rate: clock rate
+ *
+ * Determines the index of a I2C LUT entry for a voltage that approximately
+ * produces the given DFLL clock rate. This is used when forcing a value
+ * to the integrator during rate changes. Returns -ENOENT if a suitable
+ * LUT index is not found.
+ */
+static int find_lut_index_for_rate(struct tegra_dfll *td, unsigned long rate)
+{
+ struct dev_pm_opp *opp;
+ int i, uv;
+
+ opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+ uv = dev_pm_opp_get_voltage(opp);
+
+ for (i = 0; i < td->i2c_lut_size; i++) {
+ if (regulator_list_voltage(td->vdd_reg, td->i2c_lut[i]) == uv)
+ return i;
+ }
+
+ return -ENOENT;
+}
+
+/**
+ * dfll_calculate_rate_request - calculate DFLL parameters for a given rate
+ * @td: DFLL instance
+ * @req: DFLL-rate-request structure
+ * @rate: the desired DFLL rate
+ *
+ * Populate the DFLL-rate-request record @req fields with the scale_bits
+ * and mult_bits fields, based on the target input rate. Returns 0 upon
+ * success, or -EINVAL if the requested rate in req->rate is too high
+ * or low for the DFLL to generate.
+ */
+static int dfll_calculate_rate_request(struct tegra_dfll *td,
+ struct dfll_rate_req *req,
+ unsigned long rate)
+{
+ u32 val;
+
+ /*
+ * If requested rate is below the minimum DVCO rate, active the scaler.
+ * In the future the DVCO minimum voltage should be selected based on
+ * chip temperature and the actual minimum rate should be calibrated
+ * at runtime.
+ */
+ req->scale_bits = DFLL_FREQ_REQ_SCALE_MAX - 1;
+ if (rate < td->dvco_rate_min) {
+ int scale;
+
+ scale = DIV_ROUND_CLOSEST(rate / 1000 * DFLL_FREQ_REQ_SCALE_MAX,
+ td->dvco_rate_min / 1000);
+ if (!scale) {
+ dev_err(td->dev, "%s: Rate %lu is too low\n",
+ __func__, rate);
+ return -EINVAL;
+ }
+ req->scale_bits = scale - 1;
+ rate = td->dvco_rate_min;
+ }
+
+ /* Convert requested rate into frequency request and scale settings */
+ val = DVCO_RATE_TO_MULT(rate, td->ref_rate);
+ if (val > FREQ_MAX) {
+ dev_err(td->dev, "%s: Rate %lu is above dfll range\n",
+ __func__, rate);
+ return -EINVAL;
+ }
+ req->mult_bits = val;
+ req->dvco_target_rate = MULT_TO_DVCO_RATE(req->mult_bits, td->ref_rate);
+ req->rate = dfll_scale_dvco_rate(req->scale_bits,
+ req->dvco_target_rate);
+ req->lut_index = find_lut_index_for_rate(td, req->dvco_target_rate);
+ if (req->lut_index < 0)
+ return req->lut_index;
+
+ return 0;
+}
+
+/**
+ * dfll_set_frequency_request - start the frequency change operation
+ * @td: DFLL instance
+ * @req: rate request structure
+ *
+ * Tell the DFLL to try to change its output frequency to the
+ * frequency represented by @req. DFLL must be in closed-loop mode.
+ */
+static void dfll_set_frequency_request(struct tegra_dfll *td,
+ struct dfll_rate_req *req)
+{
+ u32 val = 0;
+ int force_val;
+ int coef = 128; /* FIXME: td->cg_scale? */;
+
+ force_val = (req->lut_index - td->lut_safe) * coef / td->cg;
+ force_val = clamp(force_val, FORCE_MIN, FORCE_MAX);
+
+ val |= req->mult_bits << DFLL_FREQ_REQ_MULT_SHIFT;
+ val |= req->scale_bits << DFLL_FREQ_REQ_SCALE_SHIFT;
+ val |= ((u32)force_val << DFLL_FREQ_REQ_FORCE_SHIFT) &
+ DFLL_FREQ_REQ_FORCE_MASK;
+ val |= DFLL_FREQ_REQ_FREQ_VALID | DFLL_FREQ_REQ_FORCE_ENABLE;
+
+ dfll_writel(td, val, DFLL_FREQ_REQ);
+ dfll_wmb(td);
+}
+
+/**
+ * tegra_dfll_request_rate - set the next rate for the DFLL to tune to
+ * @td: DFLL instance
+ * @rate: clock rate to target
+ *
+ * Convert the requested clock rate @rate into the DFLL control logic
+ * settings. In closed-loop mode, update new settings immediately to
+ * adjust DFLL output rate accordingly. Otherwise, just save them
+ * until the next switch to closed loop. Returns 0 upon success,
+ * -EPERM if the DFLL driver has not yet been initialized, or -EINVAL
+ * if @rate is outside the DFLL's tunable range.
+ */
+static int dfll_request_rate(struct tegra_dfll *td, unsigned long rate)
+{
+ int ret;
+ struct dfll_rate_req req;
+
+ if (td->mode == DFLL_UNINITIALIZED) {
+ dev_err(td->dev, "%s: Cannot set DFLL rate in %s mode\n",
+ __func__, mode_name[td->mode]);
+ return -EPERM;
+ }
+
+ ret = dfll_calculate_rate_request(td, &req, rate);
+ if (ret)
+ return ret;
+
+ td->last_unrounded_rate = rate;
+ td->last_req = req;
+
+ if (td->mode == DFLL_CLOSED_LOOP)
+ dfll_set_frequency_request(td, &td->last_req);
+
+ return 0;
+}
+
+/*
+ * DFLL enable/disable & open-loop <-> closed-loop transitions
+ */
+
+/**
+ * dfll_disable - switch from open-loop mode to disabled mode
+ * @td: DFLL instance
+ *
+ * Switch from OPEN_LOOP state to DISABLED state. Returns 0 upon success
+ * or -EPERM if the DFLL is not currently in open-loop mode.
+ */
+static int dfll_disable(struct tegra_dfll *td)
+{
+ if (td->mode != DFLL_OPEN_LOOP) {
+ dev_err(td->dev, "cannot disable DFLL in %s mode\n",
+ mode_name[td->mode]);
+ return -EINVAL;
+ }
+
+ dfll_set_mode(td, DFLL_DISABLED);
+ pm_runtime_put_sync(td->dev);
+
+ return 0;
+}
+
+/**
+ * dfll_enable - switch a disabled DFLL to open-loop mode
+ * @td: DFLL instance
+ *
+ * Switch from DISABLED state to OPEN_LOOP state. Returns 0 upon success
+ * or -EPERM if the DFLL is not currently disabled.
+ */
+static int dfll_enable(struct tegra_dfll *td)
+{
+ if (td->mode != DFLL_DISABLED) {
+ dev_err(td->dev, "cannot enable DFLL in %s mode\n",
+ mode_name[td->mode]);
+ return -EPERM;
+ }
+
+ pm_runtime_get_sync(td->dev);
+ dfll_set_mode(td, DFLL_OPEN_LOOP);
+
+ return 0;
+}
+
+/**
+ * dfll_set_open_loop_config - prepare to switch to open-loop mode
+ * @td: DFLL instance
+ *
+ * Prepare to switch the DFLL to open-loop mode. This switches the
+ * DFLL to the low-voltage tuning range, ensures that I2C output
+ * forcing is disabled, and disables the output clock rate scaler.
+ * The DFLL's low-voltage tuning range parameters must be
+ * characterized to keep the downstream device stable at any DVCO
+ * input voltage. No return value.
+ */
+static void dfll_set_open_loop_config(struct tegra_dfll *td)
+{
+ u32 val;
+
+ /* always tune low (safe) in open loop */
+ if (td->tune_range != DFLL_TUNE_LOW)
+ dfll_tune_low(td);
+
+ val = dfll_readl(td, DFLL_FREQ_REQ);
+ val |= DFLL_FREQ_REQ_SCALE_MASK;
+ val &= ~DFLL_FREQ_REQ_FORCE_ENABLE;
+ dfll_writel(td, val, DFLL_FREQ_REQ);
+ dfll_wmb(td);
+}
+
+/**
+ * tegra_dfll_lock - switch from open-loop to closed-loop mode
+ * @td: DFLL instance
+ *
+ * Switch from OPEN_LOOP state to CLOSED_LOOP state. Returns 0 upon success,
+ * -EINVAL if the DFLL's target rate hasn't been set yet, or -EPERM if the
+ * DFLL is not currently in open-loop mode.
+ */
+static int dfll_lock(struct tegra_dfll *td)
+{
+ struct dfll_rate_req *req = &td->last_req;
+
+ switch (td->mode) {
+ case DFLL_CLOSED_LOOP:
+ return 0;
+
+ case DFLL_OPEN_LOOP:
+ if (req->rate == 0) {
+ dev_err(td->dev, "%s: Cannot lock DFLL at rate 0\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ dfll_i2c_set_output_enabled(td, true);
+ dfll_set_mode(td, DFLL_CLOSED_LOOP);
+ dfll_set_frequency_request(td, req);
+ return 0;
+
+ default:
+ BUG_ON(td->mode > DFLL_CLOSED_LOOP);
+ dev_err(td->dev, "%s: Cannot lock DFLL in %s mode\n",
+ __func__, mode_name[td->mode]);
+ return -EPERM;
+ }
+}
+
+/**
+ * tegra_dfll_unlock - switch from closed-loop to open-loop mode
+ * @td: DFLL instance
+ *
+ * Switch from CLOSED_LOOP state to OPEN_LOOP state. Returns 0 upon success,
+ * or -EPERM if the DFLL is not currently in open-loop mode.
+ */
+static int dfll_unlock(struct tegra_dfll *td)
+{
+ switch (td->mode) {
+ case DFLL_CLOSED_LOOP:
+ dfll_set_open_loop_config(td);
+ dfll_set_mode(td, DFLL_OPEN_LOOP);
+ dfll_i2c_set_output_enabled(td, false);
+ return 0;
+
+ case DFLL_OPEN_LOOP:
+ return 0;
+
+ default:
+ BUG_ON(td->mode > DFLL_CLOSED_LOOP);
+ dev_err(td->dev, "%s: Cannot unlock DFLL in %s mode\n",
+ __func__, mode_name[td->mode]);
+ return -EPERM;
+ }
+}
+
+/*
+ * Clock framework integration
+ *
+ * When the DFLL is being controlled by the CCF, always enter closed loop
+ * mode when the clk is enabled. This requires that a DFLL rate request
+ * has been set beforehand, which implies that a clk_set_rate() call is
+ * always required before a clk_enable().
+ */
+
+static int dfll_clk_is_enabled(struct clk_hw *hw)
+{
+ struct tegra_dfll *td = clk_hw_to_dfll(hw);
+
+ return dfll_is_running(td);
+}
+
+static int dfll_clk_enable(struct clk_hw *hw)
+{
+ struct tegra_dfll *td = clk_hw_to_dfll(hw);
+ int ret;
+
+ ret = dfll_enable(td);
+ if (ret)
+ return ret;
+
+ ret = dfll_lock(td);
+ if (ret)
+ dfll_disable(td);
+
+ return ret;
+}
+
+static void dfll_clk_disable(struct clk_hw *hw)
+{
+ struct tegra_dfll *td = clk_hw_to_dfll(hw);
+ int ret;
+
+ ret = dfll_unlock(td);
+ if (!ret)
+ dfll_disable(td);
+}
+
+static unsigned long dfll_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_dfll *td = clk_hw_to_dfll(hw);
+
+ return td->last_unrounded_rate;
+}
+
+static long dfll_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct tegra_dfll *td = clk_hw_to_dfll(hw);
+ struct dfll_rate_req req;
+ int ret;
+
+ ret = dfll_calculate_rate_request(td, &req, rate);
+ if (ret)
+ return ret;
+
+ /*
+ * Don't return the rounded rate, since it doesn't really matter as
+ * the output rate will be voltage controlled anyway, and cpufreq
+ * freaks out if any rounding happens.
+ */
+ return rate;
+}
+
+static int dfll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_dfll *td = clk_hw_to_dfll(hw);
+
+ return dfll_request_rate(td, rate);
+}
+
+static const struct clk_ops dfll_clk_ops = {
+ .is_enabled = dfll_clk_is_enabled,
+ .enable = dfll_clk_enable,
+ .disable = dfll_clk_disable,
+ .recalc_rate = dfll_clk_recalc_rate,
+ .round_rate = dfll_clk_round_rate,
+ .set_rate = dfll_clk_set_rate,
+};
+
+static struct clk_init_data dfll_clk_init_data = {
+ .flags = CLK_IS_ROOT,
+ .ops = &dfll_clk_ops,
+ .num_parents = 0,
+};
+
+/**
+ * dfll_register_clk - register the DFLL output clock with the clock framework
+ * @td: DFLL instance
+ *
+ * Register the DFLL's output clock with the Linux clock framework and register
+ * the DFLL driver as an OF clock provider. Returns 0 upon success or -EINVAL
+ * or -ENOMEM upon failure.
+ */
+static int dfll_register_clk(struct tegra_dfll *td)
+{
+ int ret;
+
+ dfll_clk_init_data.name = td->output_clock_name;
+ td->dfll_clk_hw.init = &dfll_clk_init_data;
+
+ td->dfll_clk = clk_register(td->dev, &td->dfll_clk_hw);
+ if (IS_ERR(td->dfll_clk)) {
+ dev_err(td->dev, "DFLL clock registration error\n");
+ return -EINVAL;
+ }
+
+ ret = of_clk_add_provider(td->dev->of_node, of_clk_src_simple_get,
+ td->dfll_clk);
+ if (ret) {
+ dev_err(td->dev, "of_clk_add_provider() failed\n");
+
+ clk_unregister(td->dfll_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * dfll_unregister_clk - unregister the DFLL output clock
+ * @td: DFLL instance
+ *
+ * Unregister the DFLL's output clock from the Linux clock framework
+ * and from clkdev. No return value.
+ */
+static void dfll_unregister_clk(struct tegra_dfll *td)
+{
+ of_clk_del_provider(td->dev->of_node);
+ clk_unregister(td->dfll_clk);
+ td->dfll_clk = NULL;
+}
+
+/*
+ * Debugfs interface
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+static int attr_enable_get(void *data, u64 *val)
+{
+ struct tegra_dfll *td = data;
+
+ *val = dfll_is_running(td);
+
+ return 0;
+}
+static int attr_enable_set(void *data, u64 val)
+{
+ struct tegra_dfll *td = data;
+
+ return val ? dfll_enable(td) : dfll_disable(td);
+}
+DEFINE_SIMPLE_ATTRIBUTE(enable_fops, attr_enable_get, attr_enable_set,
+ "%llu\n");
+
+static int attr_lock_get(void *data, u64 *val)
+{
+ struct tegra_dfll *td = data;
+
+ *val = (td->mode == DFLL_CLOSED_LOOP);
+
+ return 0;
+}
+static int attr_lock_set(void *data, u64 val)
+{
+ struct tegra_dfll *td = data;
+
+ return val ? dfll_lock(td) : dfll_unlock(td);
+}
+DEFINE_SIMPLE_ATTRIBUTE(lock_fops, attr_lock_get, attr_lock_set,
+ "%llu\n");
+
+static int attr_rate_get(void *data, u64 *val)
+{
+ struct tegra_dfll *td = data;
+
+ *val = dfll_read_monitor_rate(td);
+
+ return 0;
+}
+
+static int attr_rate_set(void *data, u64 val)
+{
+ struct tegra_dfll *td = data;
+
+ return dfll_request_rate(td, val);
+}
+DEFINE_SIMPLE_ATTRIBUTE(rate_fops, attr_rate_get, attr_rate_set, "%llu\n");
+
+static int attr_registers_show(struct seq_file *s, void *data)
+{
+ u32 val, offs;
+ struct tegra_dfll *td = s->private;
+
+ seq_puts(s, "CONTROL REGISTERS:\n");
+ for (offs = 0; offs <= DFLL_MONITOR_DATA; offs += 4) {
+ if (offs == DFLL_OUTPUT_CFG)
+ val = dfll_i2c_readl(td, offs);
+ else
+ val = dfll_readl(td, offs);
+ seq_printf(s, "[0x%02x] = 0x%08x\n", offs, val);
+ }
+
+ seq_puts(s, "\nI2C and INTR REGISTERS:\n");
+ for (offs = DFLL_I2C_CFG; offs <= DFLL_I2C_STS; offs += 4)
+ seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
+ dfll_i2c_readl(td, offs));
+ for (offs = DFLL_INTR_STS; offs <= DFLL_INTR_EN; offs += 4)
+ seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
+ dfll_i2c_readl(td, offs));
+
+ seq_puts(s, "\nINTEGRATED I2C CONTROLLER REGISTERS:\n");
+ offs = DFLL_I2C_CLK_DIVISOR;
+ seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
+ __raw_readl(td->i2c_controller_base + offs));
+
+ seq_puts(s, "\nLUT:\n");
+ for (offs = 0; offs < 4 * MAX_DFLL_VOLTAGES; offs += 4)
+ seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
+ __raw_readl(td->lut_base + offs));
+
+ return 0;
+}
+
+static int attr_registers_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, attr_registers_show, inode->i_private);
+}
+
+static const struct file_operations attr_registers_fops = {
+ .open = attr_registers_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int dfll_debug_init(struct tegra_dfll *td)
+{
+ int ret;
+
+ if (!td || (td->mode == DFLL_UNINITIALIZED))
+ return 0;
+
+ td->debugfs_dir = debugfs_create_dir("tegra_dfll_fcpu", NULL);
+ if (!td->debugfs_dir)
+ return -ENOMEM;
+
+ ret = -ENOMEM;
+
+ if (!debugfs_create_file("enable", S_IRUGO | S_IWUSR,
+ td->debugfs_dir, td, &enable_fops))
+ goto err_out;
+
+ if (!debugfs_create_file("lock", S_IRUGO,
+ td->debugfs_dir, td, &lock_fops))
+ goto err_out;
+
+ if (!debugfs_create_file("rate", S_IRUGO,
+ td->debugfs_dir, td, &rate_fops))
+ goto err_out;
+
+ if (!debugfs_create_file("registers", S_IRUGO,
+ td->debugfs_dir, td, &attr_registers_fops))
+ goto err_out;
+
+ return 0;
+
+err_out:
+ debugfs_remove_recursive(td->debugfs_dir);
+ return ret;
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * DFLL initialization
+ */
+
+/**
+ * dfll_set_default_params - program non-output related DFLL parameters
+ * @td: DFLL instance
+ *
+ * During DFLL driver initialization or resume from context loss,
+ * program parameters for the closed loop integrator, DVCO tuning,
+ * voltage droop control and monitor control.
+ */
+static void dfll_set_default_params(struct tegra_dfll *td)
+{
+ u32 val;
+
+ val = DIV_ROUND_UP(td->ref_rate, td->sample_rate * 32);
+ BUG_ON(val > DFLL_CONFIG_DIV_MASK);
+ dfll_writel(td, val, DFLL_CONFIG);
+
+ val = (td->force_mode << DFLL_PARAMS_FORCE_MODE_SHIFT) |
+ (td->cf << DFLL_PARAMS_CF_PARAM_SHIFT) |
+ (td->ci << DFLL_PARAMS_CI_PARAM_SHIFT) |
+ (td->cg << DFLL_PARAMS_CG_PARAM_SHIFT) |
+ (td->cg_scale ? DFLL_PARAMS_CG_SCALE : 0);
+ dfll_writel(td, val, DFLL_PARAMS);
+
+ dfll_tune_low(td);
+ dfll_writel(td, td->droop_ctrl, DFLL_DROOP_CTRL);
+ dfll_writel(td, DFLL_MONITOR_CTRL_FREQ, DFLL_MONITOR_CTRL);
+}
+
+/**
+ * dfll_init_clks - clk_get() the DFLL source clocks
+ * @td: DFLL instance
+ *
+ * Call clk_get() on the DFLL source clocks and save the pointers for later
+ * use. Returns 0 upon success or error (see devm_clk_get) if one or more
+ * of the clocks couldn't be looked up.
+ */
+static int dfll_init_clks(struct tegra_dfll *td)
+{
+ td->ref_clk = devm_clk_get(td->dev, "ref");
+ if (IS_ERR(td->ref_clk)) {
+ dev_err(td->dev, "missing ref clock\n");
+ return PTR_ERR(td->ref_clk);
+ }
+
+ td->soc_clk = devm_clk_get(td->dev, "soc");
+ if (IS_ERR(td->soc_clk)) {
+ dev_err(td->dev, "missing soc clock\n");
+ return PTR_ERR(td->soc_clk);
+ }
+
+ td->i2c_clk = devm_clk_get(td->dev, "i2c");
+ if (IS_ERR(td->i2c_clk)) {
+ dev_err(td->dev, "missing i2c clock\n");
+ return PTR_ERR(td->i2c_clk);
+ }
+ td->i2c_clk_rate = clk_get_rate(td->i2c_clk);
+
+ return 0;
+}
+
+/**
+ * dfll_init - Prepare the DFLL IP block for use
+ * @td: DFLL instance
+ *
+ * Do everything necessary to prepare the DFLL IP block for use. The
+ * DFLL will be left in DISABLED state. Called by dfll_probe().
+ * Returns 0 upon success, or passes along the error from whatever
+ * function returned it.
+ */
+static int dfll_init(struct tegra_dfll *td)
+{
+ int ret;
+
+ td->ref_rate = clk_get_rate(td->ref_clk);
+ if (td->ref_rate != REF_CLOCK_RATE) {
+ dev_err(td->dev, "unexpected ref clk rate %lu, expecting %lu",
+ td->ref_rate, REF_CLOCK_RATE);
+ return -EINVAL;
+ }
+
+ reset_control_deassert(td->dvco_rst);
+
+ ret = clk_prepare(td->ref_clk);
+ if (ret) {
+ dev_err(td->dev, "failed to prepare ref_clk\n");
+ return ret;
+ }
+
+ ret = clk_prepare(td->soc_clk);
+ if (ret) {
+ dev_err(td->dev, "failed to prepare soc_clk\n");
+ goto di_err1;
+ }
+
+ ret = clk_prepare(td->i2c_clk);
+ if (ret) {
+ dev_err(td->dev, "failed to prepare i2c_clk\n");
+ goto di_err2;
+ }
+
+ td->last_unrounded_rate = 0;
+
+ pm_runtime_enable(td->dev);
+ pm_runtime_get_sync(td->dev);
+
+ dfll_set_mode(td, DFLL_DISABLED);
+ dfll_set_default_params(td);
+
+ if (td->soc->init_clock_trimmers)
+ td->soc->init_clock_trimmers();
+
+ dfll_set_open_loop_config(td);
+
+ dfll_init_out_if(td);
+
+ pm_runtime_put_sync(td->dev);
+
+ return 0;
+
+di_err2:
+ clk_unprepare(td->soc_clk);
+di_err1:
+ clk_unprepare(td->ref_clk);
+
+ reset_control_assert(td->dvco_rst);
+
+ return ret;
+}
+
+/*
+ * DT data fetch
+ */
+
+/*
+ * Find a PMIC voltage register-to-voltage mapping for the given voltage.
+ * An exact voltage match is required.
+ */
+static int find_vdd_map_entry_exact(struct tegra_dfll *td, int uV)
+{
+ int i, n_voltages, reg_uV;
+
+ n_voltages = regulator_count_voltages(td->vdd_reg);
+ for (i = 0; i < n_voltages; i++) {
+ reg_uV = regulator_list_voltage(td->vdd_reg, i);
+ if (reg_uV < 0)
+ break;
+
+ if (uV == reg_uV)
+ return i;
+ }
+
+ dev_err(td->dev, "no voltage map entry for %d uV\n", uV);
+ return -EINVAL;
+}
+
+/*
+ * Find a PMIC voltage register-to-voltage mapping for the given voltage,
+ * rounding up to the closest supported voltage.
+ * */
+static int find_vdd_map_entry_min(struct tegra_dfll *td, int uV)
+{
+ int i, n_voltages, reg_uV;
+
+ n_voltages = regulator_count_voltages(td->vdd_reg);
+ for (i = 0; i < n_voltages; i++) {
+ reg_uV = regulator_list_voltage(td->vdd_reg, i);
+ if (reg_uV < 0)
+ break;
+
+ if (uV <= reg_uV)
+ return i;
+ }
+
+ dev_err(td->dev, "no voltage map entry rounding to %d uV\n", uV);
+ return -EINVAL;
+}
+
+/**
+ * dfll_build_i2c_lut - build the I2C voltage register lookup table
+ * @td: DFLL instance
+ *
+ * The DFLL hardware has 33 bytes of look-up table RAM that must be filled with
+ * PMIC voltage register values that span the entire DFLL operating range.
+ * This function builds the look-up table based on the OPP table provided by
+ * the soc-specific platform driver (td->soc->opp_dev) and the PMIC
+ * register-to-voltage mapping queried from the regulator framework.
+ *
+ * On success, fills in td->i2c_lut and returns 0, or -err on failure.
+ */
+static int dfll_build_i2c_lut(struct tegra_dfll *td)
+{
+ int ret = -EINVAL;
+ int j, v, v_max, v_opp;
+ int selector;
+ unsigned long rate;
+ struct dev_pm_opp *opp;
+ int lut;
+
+ rcu_read_lock();
+
+ rate = ULONG_MAX;
+ opp = dev_pm_opp_find_freq_floor(td->soc->dev, &rate);
+ if (IS_ERR(opp)) {
+ dev_err(td->dev, "couldn't get vmax opp, empty opp table?\n");
+ goto out;
+ }
+ v_max = dev_pm_opp_get_voltage(opp);
+
+ v = td->soc->min_millivolts * 1000;
+ lut = find_vdd_map_entry_exact(td, v);
+ if (lut < 0)
+ goto out;
+ td->i2c_lut[0] = lut;
+
+ for (j = 1, rate = 0; ; rate++) {
+ opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate);
+ if (IS_ERR(opp))
+ break;
+ v_opp = dev_pm_opp_get_voltage(opp);
+
+ if (v_opp <= td->soc->min_millivolts * 1000)
+ td->dvco_rate_min = dev_pm_opp_get_freq(opp);
+
+ for (;;) {
+ v += max(1, (v_max - v) / (MAX_DFLL_VOLTAGES - j));
+ if (v >= v_opp)
+ break;
+
+ selector = find_vdd_map_entry_min(td, v);
+ if (selector < 0)
+ goto out;
+ if (selector != td->i2c_lut[j - 1])
+ td->i2c_lut[j++] = selector;
+ }
+
+ v = (j == MAX_DFLL_VOLTAGES - 1) ? v_max : v_opp;
+ selector = find_vdd_map_entry_exact(td, v);
+ if (selector < 0)
+ goto out;
+ if (selector != td->i2c_lut[j - 1])
+ td->i2c_lut[j++] = selector;
+
+ if (v >= v_max)
+ break;
+ }
+ td->i2c_lut_size = j;
+
+ if (!td->dvco_rate_min)
+ dev_err(td->dev, "no opp above DFLL minimum voltage %d mV\n",
+ td->soc->min_millivolts);
+ else
+ ret = 0;
+
+out:
+ rcu_read_unlock();
+
+ return ret;
+}
+
+/**
+ * read_dt_param - helper function for reading required parameters from the DT
+ * @td: DFLL instance
+ * @param: DT property name
+ * @dest: output pointer for the value read
+ *
+ * Read a required numeric parameter from the DFLL device node, or complain
+ * if the property doesn't exist. Returns a boolean indicating success for
+ * easy chaining of multiple calls to this function.
+ */
+static bool read_dt_param(struct tegra_dfll *td, const char *param, u32 *dest)
+{
+ int err = of_property_read_u32(td->dev->of_node, param, dest);
+
+ if (err < 0) {
+ dev_err(td->dev, "failed to read DT parameter %s: %d\n",
+ param, err);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * dfll_fetch_i2c_params - query PMIC I2C params from DT & regulator subsystem
+ * @td: DFLL instance
+ *
+ * Read all the parameters required for operation in I2C mode. The parameters
+ * can originate from the device tree or the regulator subsystem.
+ * Returns 0 on success or -err on failure.
+ */
+static int dfll_fetch_i2c_params(struct tegra_dfll *td)
+{
+ struct regmap *regmap;
+ struct device *i2c_dev;
+ struct i2c_client *i2c_client;
+ int vsel_reg, vsel_mask;
+ int ret;
+
+ if (!read_dt_param(td, "nvidia,i2c-fs-rate", &td->i2c_fs_rate))
+ return -EINVAL;
+
+ regmap = regulator_get_regmap(td->vdd_reg);
+ i2c_dev = regmap_get_device(regmap);
+ i2c_client = to_i2c_client(i2c_dev);
+
+ td->i2c_slave_addr = i2c_client->addr;
+
+ ret = regulator_get_hardware_vsel_register(td->vdd_reg,
+ &vsel_reg,
+ &vsel_mask);
+ if (ret < 0) {
+ dev_err(td->dev,
+ "regulator unsuitable for DFLL I2C operation\n");
+ return -EINVAL;
+ }
+ td->i2c_reg = vsel_reg;
+
+ ret = dfll_build_i2c_lut(td);
+ if (ret) {
+ dev_err(td->dev, "couldn't build I2C LUT\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * dfll_fetch_common_params - read DFLL parameters from the device tree
+ * @td: DFLL instance
+ *
+ * Read all the DT parameters that are common to both I2C and PWM operation.
+ * Returns 0 on success or -EINVAL on any failure.
+ */
+static int dfll_fetch_common_params(struct tegra_dfll *td)
+{
+ bool ok = true;
+
+ ok &= read_dt_param(td, "nvidia,droop-ctrl", &td->droop_ctrl);
+ ok &= read_dt_param(td, "nvidia,sample-rate", &td->sample_rate);
+ ok &= read_dt_param(td, "nvidia,force-mode", &td->force_mode);
+ ok &= read_dt_param(td, "nvidia,cf", &td->cf);
+ ok &= read_dt_param(td, "nvidia,ci", &td->ci);
+ ok &= read_dt_param(td, "nvidia,cg", &td->cg);
+ td->cg_scale = of_property_read_bool(td->dev->of_node,
+ "nvidia,cg-scale");
+
+ if (of_property_read_string(td->dev->of_node, "clock-output-names",
+ &td->output_clock_name)) {
+ dev_err(td->dev, "missing clock-output-names property\n");
+ ok = false;
+ }
+
+ return ok ? 0 : -EINVAL;
+}
+
+/*
+ * API exported to per-SoC platform drivers
+ */
+
+/**
+ * tegra_dfll_register - probe a Tegra DFLL device
+ * @pdev: DFLL platform_device *
+ * @soc: Per-SoC integration and characterization data for this DFLL instance
+ *
+ * Probe and initialize a DFLL device instance. Intended to be called
+ * by a SoC-specific shim driver that passes in per-SoC integration
+ * and configuration data via @soc. Returns 0 on success or -err on failure.
+ */
+int tegra_dfll_register(struct platform_device *pdev,
+ struct tegra_dfll_soc_data *soc)
+{
+ struct resource *mem;
+ struct tegra_dfll *td;
+ int ret;
+
+ if (!soc) {
+ dev_err(&pdev->dev, "no tegra_dfll_soc_data provided\n");
+ return -EINVAL;
+ }
+
+ td = devm_kzalloc(&pdev->dev, sizeof(*td), GFP_KERNEL);
+ if (!td)
+ return -ENOMEM;
+ td->dev = &pdev->dev;
+ platform_set_drvdata(pdev, td);
+
+ td->soc = soc;
+
+ td->vdd_reg = devm_regulator_get(td->dev, "vdd-cpu");
+ if (IS_ERR(td->vdd_reg)) {
+ dev_err(td->dev, "couldn't get vdd_cpu regulator\n");
+ return PTR_ERR(td->vdd_reg);
+ }
+
+ td->dvco_rst = devm_reset_control_get(td->dev, "dvco");
+ if (IS_ERR(td->dvco_rst)) {
+ dev_err(td->dev, "couldn't get dvco reset\n");
+ return PTR_ERR(td->dvco_rst);
+ }
+
+ ret = dfll_fetch_common_params(td);
+ if (ret) {
+ dev_err(td->dev, "couldn't parse device tree parameters\n");
+ return ret;
+ }
+
+ ret = dfll_fetch_i2c_params(td);
+ if (ret)
+ return ret;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(td->dev, "no control register resource\n");
+ return -ENODEV;
+ }
+
+ td->base = devm_ioremap(td->dev, mem->start, resource_size(mem));
+ if (!td->base) {
+ dev_err(td->dev, "couldn't ioremap DFLL control registers\n");
+ return -ENODEV;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!mem) {
+ dev_err(td->dev, "no i2c_base resource\n");
+ return -ENODEV;
+ }
+
+ td->i2c_base = devm_ioremap(td->dev, mem->start, resource_size(mem));
+ if (!td->i2c_base) {
+ dev_err(td->dev, "couldn't ioremap i2c_base resource\n");
+ return -ENODEV;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!mem) {
+ dev_err(td->dev, "no i2c_controller_base resource\n");
+ return -ENODEV;
+ }
+
+ td->i2c_controller_base = devm_ioremap(td->dev, mem->start,
+ resource_size(mem));
+ if (!td->i2c_controller_base) {
+ dev_err(td->dev,
+ "couldn't ioremap i2c_controller_base resource\n");
+ return -ENODEV;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ if (!mem) {
+ dev_err(td->dev, "no lut_base resource\n");
+ return -ENODEV;
+ }
+
+ td->lut_base = devm_ioremap(td->dev, mem->start, resource_size(mem));
+ if (!td->lut_base) {
+ dev_err(td->dev,
+ "couldn't ioremap lut_base resource\n");
+ return -ENODEV;
+ }
+
+ ret = dfll_init_clks(td);
+ if (ret) {
+ dev_err(&pdev->dev, "DFLL clock init error\n");
+ return ret;
+ }
+
+ /* Enable the clocks and set the device up */
+ ret = dfll_init(td);
+ if (ret)
+ return ret;
+
+ ret = dfll_register_clk(td);
+ if (ret) {
+ dev_err(&pdev->dev, "DFLL clk registration failed\n");
+ return ret;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ dfll_debug_init(td);
+#endif
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dfll_register);
+
+/**
+ * tegra_dfll_unregister - release all of the DFLL driver resources for a device
+ * @pdev: DFLL platform_device *
+ *
+ * Unbind this driver from the DFLL hardware device represented by
+ * @pdev. The DFLL must be disabled for this to succeed. Returns 0
+ * upon success or -EBUSY if the DFLL is still active.
+ */
+int tegra_dfll_unregister(struct platform_device *pdev)
+{
+ struct tegra_dfll *td = platform_get_drvdata(pdev);
+
+ /* Try to prevent removal while the DFLL is active */
+ if (td->mode != DFLL_DISABLED) {
+ dev_err(&pdev->dev,
+ "must disable DFLL before removing driver\n");
+ return -EBUSY;
+ }
+
+ debugfs_remove_recursive(td->debugfs_dir);
+
+ dfll_unregister_clk(td);
+ pm_runtime_disable(&pdev->dev);
+
+ clk_unprepare(td->ref_clk);
+ clk_unprepare(td->soc_clk);
+ clk_unprepare(td->i2c_clk);
+
+ reset_control_assert(td->dvco_rst);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dfll_unregister);
diff --git a/drivers/clk/tegra/clk-dfll.h b/drivers/clk/tegra/clk-dfll.h
new file mode 100644
index 000000000000..2e4c0772a5dc
--- /dev/null
+++ b/drivers/clk/tegra/clk-dfll.h
@@ -0,0 +1,54 @@
+/*
+ * clk-dfll.h - prototypes and macros for the Tegra DFLL clocksource driver
+ * Copyright (C) 2013 NVIDIA Corporation. All rights reserved.
+ *
+ * Aleksandr Frid <afrid@nvidia.com>
+ * Paul Walmsley <pwalmsley@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DRIVERS_CLK_TEGRA_CLK_DFLL_H
+#define __DRIVERS_CLK_TEGRA_CLK_DFLL_H
+
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+
+/**
+ * struct tegra_dfll_soc_data - SoC-specific hooks/integration for the DFLL driver
+ * @opp_dev: struct device * that holds the OPP table for the DFLL
+ * @min_millivolts: minimum voltage (in mV) that the DFLL can operate
+ * @tune0_low: DFLL tuning register 0 (low voltage range)
+ * @tune0_high: DFLL tuning register 0 (high voltage range)
+ * @tune1: DFLL tuning register 1
+ * @assert_dvco_reset: fn ptr to place the DVCO in reset
+ * @deassert_dvco_reset: fn ptr to release the DVCO reset
+ * @set_clock_trimmers_high: fn ptr to tune clock trimmers for high voltage
+ * @set_clock_trimmers_low: fn ptr to tune clock trimmers for low voltage
+ */
+struct tegra_dfll_soc_data {
+ struct device *dev;
+ unsigned int min_millivolts;
+ u32 tune0_low;
+ u32 tune0_high;
+ u32 tune1;
+ void (*init_clock_trimmers)(void);
+ void (*set_clock_trimmers_high)(void);
+ void (*set_clock_trimmers_low)(void);
+};
+
+int tegra_dfll_register(struct platform_device *pdev,
+ struct tegra_dfll_soc_data *soc);
+int tegra_dfll_unregister(struct platform_device *pdev);
+int tegra_dfll_runtime_suspend(struct device *dev);
+int tegra_dfll_runtime_resume(struct device *dev);
+
+#endif /* __DRIVERS_CLK_TEGRA_CLK_DFLL_H */
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
index 59a5714dfe18..48c83efda4cf 100644
--- a/drivers/clk/tegra/clk-divider.c
+++ b/drivers/clk/tegra/clk-divider.c
@@ -19,7 +19,6 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/clk-provider.h>
-#include <linux/clk.h>
#include "clk.h"
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index 7649685c86bc..138a94b99b5b 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -103,7 +103,7 @@ static unsigned long emc_recalc_rate(struct clk_hw *hw,
* CCF wrongly assumes that the parent won't change during set_rate,
* so get the parent rate explicitly.
*/
- parent_rate = __clk_get_rate(__clk_get_parent(hw->clk));
+ parent_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
val = readl(tegra->clk_regs + CLK_SOURCE_EMC);
div = val & CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK;
@@ -116,11 +116,7 @@ static unsigned long emc_recalc_rate(struct clk_hw *hw,
* safer since things have EMC rate floors. Also don't touch parent_rate
* since we don't want the CCF to play with our parent clocks.
*/
-static long emc_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long min_rate,
- unsigned long max_rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_hw)
+static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct tegra_clk_emc *tegra;
u8 ram_code = tegra_read_ram_code();
@@ -135,22 +131,28 @@ static long emc_determine_rate(struct clk_hw *hw, unsigned long rate,
timing = tegra->timings + i;
- if (timing->rate > max_rate) {
+ if (timing->rate > req->max_rate) {
i = min(i, 1);
- return tegra->timings[i - 1].rate;
+ req->rate = tegra->timings[i - 1].rate;
+ return 0;
}
- if (timing->rate < min_rate)
+ if (timing->rate < req->min_rate)
continue;
- if (timing->rate >= rate)
- return timing->rate;
+ if (timing->rate >= req->rate) {
+ req->rate = timing->rate;
+ return 0;
+ }
}
- if (timing)
- return timing->rate;
+ if (timing) {
+ req->rate = timing->rate;
+ return 0;
+ }
- return __clk_get_rate(hw->clk);
+ req->rate = clk_hw_get_rate(hw);
+ return 0;
}
static u8 emc_get_parent(struct clk_hw *hw)
@@ -312,7 +314,7 @@ static int emc_set_rate(struct clk_hw *hw, unsigned long rate,
tegra = container_of(hw, struct tegra_clk_emc, hw);
- if (__clk_get_rate(hw->clk) == rate)
+ if (clk_hw_get_rate(hw) == rate)
return 0;
/*
@@ -525,8 +527,8 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
if (IS_ERR(clk))
return clk;
- tegra->prev_parent = clk_get_parent_by_index(
- tegra->hw.clk, emc_get_parent(&tegra->hw));
+ tegra->prev_parent = clk_hw_get_parent_by_index(
+ &tegra->hw, emc_get_parent(&tegra->hw))->clk;
tegra->changing_timing = false;
/* Allow debugging tools to see the EMC clock */
diff --git a/drivers/clk/tegra/clk-periph-gate.c b/drivers/clk/tegra/clk-periph-gate.c
index 0aa8830ae7cc..d28d6e95020f 100644
--- a/drivers/clk/tegra/clk-periph-gate.c
+++ b/drivers/clk/tegra/clk-periph-gate.c
@@ -14,7 +14,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/slab.h>
#include <linux/io.h>
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index d84ae49d0e05..ec5b6113b012 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -14,7 +14,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/export.h>
#include <linux/slab.h>
diff --git a/drivers/clk/tegra/clk-pll-out.c b/drivers/clk/tegra/clk-pll-out.c
index 3598987a451d..257cae0c1488 100644
--- a/drivers/clk/tegra/clk-pll-out.c
+++ b/drivers/clk/tegra/clk-pll-out.c
@@ -20,7 +20,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/clk-provider.h>
-#include <linux/clk.h>
#include "clk.h"
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 05c6d08a6695..d6d4ecb88e94 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -18,8 +18,8 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/clk-provider.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include "clk.h"
@@ -264,7 +264,7 @@ static int clk_pll_wait_for_lock(struct tegra_clk_pll *pll)
}
pr_err("%s: Timed out waiting for pll %s lock\n", __func__,
- __clk_get_name(pll->hw.clk));
+ clk_hw_get_name(&pll->hw));
return -1;
}
@@ -595,7 +595,7 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
if (pll->params->flags & TEGRA_PLL_FIXED) {
if (rate != pll->params->fixed_rate) {
pr_err("%s: Can not change %s fixed rate %lu to %lu\n",
- __func__, __clk_get_name(hw->clk),
+ __func__, clk_hw_get_name(hw),
pll->params->fixed_rate, rate);
return -EINVAL;
}
@@ -605,7 +605,7 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
if (_get_table_rate(hw, &cfg, rate, parent_rate) &&
_calc_rate(hw, &cfg, rate, parent_rate)) {
pr_err("%s: Failed to set %s rate %lu\n", __func__,
- __clk_get_name(hw->clk), rate);
+ clk_hw_get_name(hw), rate);
WARN_ON(1);
return -EINVAL;
}
@@ -634,7 +634,7 @@ static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
/* PLLM is used for memory; we do not change rate */
if (pll->params->flags & TEGRA_PLLM)
- return __clk_get_rate(hw->clk);
+ return clk_hw_get_rate(hw);
if (_get_table_rate(hw, &cfg, rate, *prate) &&
_calc_rate(hw, &cfg, rate, *prate))
@@ -663,7 +663,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
if (_get_table_rate(hw, &sel, pll->params->fixed_rate,
parent_rate)) {
pr_err("Clock %s has unknown fixed frequency\n",
- __clk_get_name(hw->clk));
+ clk_hw_get_name(hw));
BUG();
}
return pll->params->fixed_rate;
@@ -1577,7 +1577,7 @@ struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name,
if (!pll_params->pdiv_tohw)
return ERR_PTR(-EINVAL);
- parent_rate = __clk_get_rate(parent);
+ parent_rate = clk_get_rate(parent);
pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate);
@@ -1674,7 +1674,7 @@ struct clk *tegra_clk_register_pllm(const char *name, const char *parent_name,
return ERR_PTR(-EINVAL);
}
- parent_rate = __clk_get_rate(parent);
+ parent_rate = clk_get_rate(parent);
pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate);
@@ -1715,7 +1715,7 @@ struct clk *tegra_clk_register_pllc(const char *name, const char *parent_name,
return ERR_PTR(-EINVAL);
}
- parent_rate = __clk_get_rate(parent);
+ parent_rate = clk_get_rate(parent);
pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate);
@@ -1848,7 +1848,7 @@ struct clk *tegra_clk_register_pllss(const char *name, const char *parent_name,
val &= ~PLLSS_REF_SRC_SEL_MASK;
pll_writel_base(val, pll);
- parent_rate = __clk_get_rate(parent);
+ parent_rate = clk_get_rate(parent);
pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate);
diff --git a/drivers/clk/tegra/clk-super.c b/drivers/clk/tegra/clk-super.c
index 2fd924d38606..131d1b5085e2 100644
--- a/drivers/clk/tegra/clk-super.c
+++ b/drivers/clk/tegra/clk-super.c
@@ -20,7 +20,6 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/clk-provider.h>
-#include <linux/clk.h>
#include "clk.h"
diff --git a/drivers/clk/tegra/clk-tegra-audio.c b/drivers/clk/tegra/clk-tegra-audio.c
index 5c38aab2c5b8..11e3ad7ad7a3 100644
--- a/drivers/clk/tegra/clk-tegra-audio.c
+++ b/drivers/clk/tegra/clk-tegra-audio.c
@@ -15,7 +15,6 @@
*/
#include <linux/io.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/tegra/clk-tegra-fixed.c b/drivers/clk/tegra/clk-tegra-fixed.c
index 605676d368eb..da0b5941c89f 100644
--- a/drivers/clk/tegra/clk-tegra-fixed.c
+++ b/drivers/clk/tegra/clk-tegra-fixed.c
@@ -15,7 +15,6 @@
*/
#include <linux/io.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 46af9244ba74..cb6ab830941d 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -15,7 +15,6 @@
*/
#include <linux/io.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/of.h>
diff --git a/drivers/clk/tegra/clk-tegra-pmc.c b/drivers/clk/tegra/clk-tegra-pmc.c
index 08b21c1ee867..91377abfefa1 100644
--- a/drivers/clk/tegra/clk-tegra-pmc.c
+++ b/drivers/clk/tegra/clk-tegra-pmc.c
@@ -15,7 +15,6 @@
*/
#include <linux/io.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/of.h>
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
index feb3201c85ce..5b1d723932c5 100644
--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -15,7 +15,6 @@
*/
#include <linux/io.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -44,7 +43,9 @@ static const char *sclk_parents[] = { "clk_m", "pll_c_out1", "pll_p_out4",
static const char *cclk_g_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
"pll_p", "pll_p_out4", "unused",
- "unused", "pll_x" };
+ "unused", "pll_x", "unused", "unused",
+ "unused", "unused", "unused", "unused",
+ "dfllCPU_out" };
static const char *cclk_lp_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
"pll_p", "pll_p_out4", "unused",
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 8237d16b4075..db5871519bf5 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -15,9 +15,7 @@
*/
#include <linux/io.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/delay.h>
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
new file mode 100644
index 000000000000..61253330c12b
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -0,0 +1,166 @@
+/*
+ * Tegra124 DFLL FCPU clock source driver
+ *
+ * Copyright (C) 2012-2014 NVIDIA Corporation. All rights reserved.
+ *
+ * Aleksandr Frid <afrid@nvidia.com>
+ * Paul Walmsley <pwalmsley@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <soc/tegra/fuse.h>
+
+#include "clk.h"
+#include "clk-dfll.h"
+#include "cvb.h"
+
+/* Maximum CPU frequency, indexed by CPU speedo id */
+static const unsigned long cpu_max_freq_table[] = {
+ [0] = 2014500000UL,
+ [1] = 2320500000UL,
+ [2] = 2116500000UL,
+ [3] = 2524500000UL,
+};
+
+static const struct cvb_table tegra124_cpu_cvb_tables[] = {
+ {
+ .speedo_id = -1,
+ .process_id = -1,
+ .min_millivolts = 900,
+ .max_millivolts = 1260,
+ .alignment = {
+ .step_uv = 10000, /* 10mV */
+ },
+ .speedo_scale = 100,
+ .voltage_scale = 1000,
+ .cvb_table = {
+ {204000000UL, {1112619, -29295, 402} },
+ {306000000UL, {1150460, -30585, 402} },
+ {408000000UL, {1190122, -31865, 402} },
+ {510000000UL, {1231606, -33155, 402} },
+ {612000000UL, {1274912, -34435, 402} },
+ {714000000UL, {1320040, -35725, 402} },
+ {816000000UL, {1366990, -37005, 402} },
+ {918000000UL, {1415762, -38295, 402} },
+ {1020000000UL, {1466355, -39575, 402} },
+ {1122000000UL, {1518771, -40865, 402} },
+ {1224000000UL, {1573009, -42145, 402} },
+ {1326000000UL, {1629068, -43435, 402} },
+ {1428000000UL, {1686950, -44715, 402} },
+ {1530000000UL, {1746653, -46005, 402} },
+ {1632000000UL, {1808179, -47285, 402} },
+ {1734000000UL, {1871526, -48575, 402} },
+ {1836000000UL, {1936696, -49855, 402} },
+ {1938000000UL, {2003687, -51145, 402} },
+ {2014500000UL, {2054787, -52095, 402} },
+ {2116500000UL, {2124957, -53385, 402} },
+ {2218500000UL, {2196950, -54665, 402} },
+ {2320500000UL, {2270765, -55955, 402} },
+ {2422500000UL, {2346401, -57235, 402} },
+ {2524500000UL, {2437299, -58535, 402} },
+ {0, { 0, 0, 0} },
+ },
+ .cpu_dfll_data = {
+ .tune0_low = 0x005020ff,
+ .tune0_high = 0x005040ff,
+ .tune1 = 0x00000060,
+ }
+ },
+};
+
+static int tegra124_dfll_fcpu_probe(struct platform_device *pdev)
+{
+ int process_id, speedo_id, speedo_value;
+ struct tegra_dfll_soc_data *soc;
+ const struct cvb_table *cvb;
+
+ process_id = tegra_sku_info.cpu_process_id;
+ speedo_id = tegra_sku_info.cpu_speedo_id;
+ speedo_value = tegra_sku_info.cpu_speedo_value;
+
+ if (speedo_id >= ARRAY_SIZE(cpu_max_freq_table)) {
+ dev_err(&pdev->dev, "unknown max CPU freq for speedo_id=%d\n",
+ speedo_id);
+ return -ENODEV;
+ }
+
+ soc = devm_kzalloc(&pdev->dev, sizeof(*soc), GFP_KERNEL);
+ if (!soc)
+ return -ENOMEM;
+
+ soc->dev = get_cpu_device(0);
+ if (!soc->dev) {
+ dev_err(&pdev->dev, "no CPU0 device\n");
+ return -ENODEV;
+ }
+
+ cvb = tegra_cvb_build_opp_table(tegra124_cpu_cvb_tables,
+ ARRAY_SIZE(tegra124_cpu_cvb_tables),
+ process_id, speedo_id, speedo_value,
+ cpu_max_freq_table[speedo_id],
+ soc->dev);
+ if (IS_ERR(cvb)) {
+ dev_err(&pdev->dev, "couldn't build OPP table: %ld\n",
+ PTR_ERR(cvb));
+ return PTR_ERR(cvb);
+ }
+
+ soc->min_millivolts = cvb->min_millivolts;
+ soc->tune0_low = cvb->cpu_dfll_data.tune0_low;
+ soc->tune0_high = cvb->cpu_dfll_data.tune0_high;
+ soc->tune1 = cvb->cpu_dfll_data.tune1;
+
+ return tegra_dfll_register(pdev, soc);
+}
+
+static const struct of_device_id tegra124_dfll_fcpu_of_match[] = {
+ { .compatible = "nvidia,tegra124-dfll", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra124_dfll_fcpu_of_match);
+
+static const struct dev_pm_ops tegra124_dfll_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_dfll_runtime_suspend,
+ tegra_dfll_runtime_resume, NULL)
+};
+
+static struct platform_driver tegra124_dfll_fcpu_driver = {
+ .probe = tegra124_dfll_fcpu_probe,
+ .remove = tegra_dfll_unregister,
+ .driver = {
+ .name = "tegra124-dfll",
+ .of_match_table = tegra124_dfll_fcpu_of_match,
+ .pm = &tegra124_dfll_pm_ops,
+ },
+};
+
+static int __init tegra124_dfll_fcpu_init(void)
+{
+ return platform_driver_register(&tegra124_dfll_fcpu_driver);
+}
+module_init(tegra124_dfll_fcpu_init);
+
+static void __exit tegra124_dfll_fcpu_exit(void)
+{
+ platform_driver_unregister(&tegra124_dfll_fcpu_driver);
+}
+module_exit(tegra124_dfll_fcpu_exit);
+
+MODULE_DESCRIPTION("Tegra124 DFLL clock source driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Aleksandr Frid <afrid@nvidia.com>");
+MODULE_AUTHOR("Paul Walmsley <pwalmsley@nvidia.com>");
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index e8cca3eac007..824d75883d2b 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -15,7 +15,6 @@
*/
#include <linux/io.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/of.h>
@@ -24,6 +23,7 @@
#include <linux/export.h>
#include <linux/clk/tegra.h>
#include <dt-bindings/clock/tegra124-car.h>
+#include <dt-bindings/reset/tegra124-car.h>
#include "clk.h"
#include "clk-id.h"
@@ -39,6 +39,9 @@
#define CLK_SOURCE_CSITE 0x1d4
#define CLK_SOURCE_EMC 0x19c
+#define RST_DFLL_DVCO 0x2f4
+#define DVFS_DFLL_RESET_SHIFT 0
+
#define PLLC_BASE 0x80
#define PLLC_OUT 0x84
#define PLLC_MISC2 0x88
@@ -94,6 +97,8 @@
#define PMC_PLLM_WB0_OVERRIDE 0x1dc
#define PMC_PLLM_WB0_OVERRIDE_2 0x2b0
+#define CCLKG_BURST_POLICY 0x368
+
#define UTMIP_PLL_CFG2 0x488
#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xffff) << 6)
#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18)
@@ -126,6 +131,8 @@
#ifdef CONFIG_PM_SLEEP
static struct cpu_clk_suspend_context {
u32 clk_csite_src;
+ u32 cclkg_burst;
+ u32 cclkg_divider;
} tegra124_cpu_clk_sctx;
#endif
@@ -1319,12 +1326,22 @@ static void tegra124_cpu_clock_suspend(void)
tegra124_cpu_clk_sctx.clk_csite_src =
readl(clk_base + CLK_SOURCE_CSITE);
writel(3 << 30, clk_base + CLK_SOURCE_CSITE);
+
+ tegra124_cpu_clk_sctx.cclkg_burst =
+ readl(clk_base + CCLKG_BURST_POLICY);
+ tegra124_cpu_clk_sctx.cclkg_divider =
+ readl(clk_base + CCLKG_BURST_POLICY + 4);
}
static void tegra124_cpu_clock_resume(void)
{
writel(tegra124_cpu_clk_sctx.clk_csite_src,
clk_base + CLK_SOURCE_CSITE);
+
+ writel(tegra124_cpu_clk_sctx.cclkg_burst,
+ clk_base + CCLKG_BURST_POLICY);
+ writel(tegra124_cpu_clk_sctx.cclkg_divider,
+ clk_base + CCLKG_BURST_POLICY + 4);
}
#endif
@@ -1415,6 +1432,68 @@ static void __init tegra124_clock_apply_init_table(void)
}
/**
+ * tegra124_car_barrier - wait for pending writes to the CAR to complete
+ *
+ * Wait for any outstanding writes to the CAR MMIO space from this CPU
+ * to complete before continuing execution. No return value.
+ */
+static void tegra124_car_barrier(void)
+{
+ readl_relaxed(clk_base + RST_DFLL_DVCO);
+}
+
+/**
+ * tegra124_clock_assert_dfll_dvco_reset - assert the DFLL's DVCO reset
+ *
+ * Assert the reset line of the DFLL's DVCO. No return value.
+ */
+static void tegra124_clock_assert_dfll_dvco_reset(void)
+{
+ u32 v;
+
+ v = readl_relaxed(clk_base + RST_DFLL_DVCO);
+ v |= (1 << DVFS_DFLL_RESET_SHIFT);
+ writel_relaxed(v, clk_base + RST_DFLL_DVCO);
+ tegra124_car_barrier();
+}
+
+/**
+ * tegra124_clock_deassert_dfll_dvco_reset - deassert the DFLL's DVCO reset
+ *
+ * Deassert the reset line of the DFLL's DVCO, allowing the DVCO to
+ * operate. No return value.
+ */
+static void tegra124_clock_deassert_dfll_dvco_reset(void)
+{
+ u32 v;
+
+ v = readl_relaxed(clk_base + RST_DFLL_DVCO);
+ v &= ~(1 << DVFS_DFLL_RESET_SHIFT);
+ writel_relaxed(v, clk_base + RST_DFLL_DVCO);
+ tegra124_car_barrier();
+}
+
+static int tegra124_reset_assert(unsigned long id)
+{
+ if (id == TEGRA124_RST_DFLL_DVCO)
+ tegra124_clock_assert_dfll_dvco_reset();
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int tegra124_reset_deassert(unsigned long id)
+{
+ if (id == TEGRA124_RST_DFLL_DVCO)
+ tegra124_clock_deassert_dfll_dvco_reset();
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
* tegra132_clock_apply_init_table - initialize clocks on Tegra132 SoCs
*
* Program an initial clock rate and enable or disable clocks needed
@@ -1499,6 +1578,8 @@ static void __init tegra124_132_clock_init_post(struct device_node *np)
{
tegra_super_clk_gen4_init(clk_base, pmc_base, tegra124_clks,
&pll_x_params);
+ tegra_init_special_resets(1, tegra124_reset_assert,
+ tegra124_reset_deassert);
tegra_add_of_provider(np);
clks[TEGRA124_CLK_EMC] = tegra_clk_register_emc(clk_base, np,
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 41272dcc9e22..bf004f0e4f65 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -15,7 +15,6 @@
*/
#include <linux/io.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/of.h>
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 0af3e834dd24..fad561a5896b 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -16,7 +16,6 @@
#include <linux/io.h>
#include <linux/delay.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/of.h>
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index 41cd87c67be6..2a3a4fe803d6 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -14,6 +14,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/clkdev.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
@@ -49,7 +50,6 @@
#define RST_DEVICES_L 0x004
#define RST_DEVICES_H 0x008
#define RST_DEVICES_U 0x00C
-#define RST_DFLL_DVCO 0x2F4
#define RST_DEVICES_V 0x358
#define RST_DEVICES_W 0x35C
#define RST_DEVICES_X 0x28C
@@ -79,6 +79,11 @@ static struct clk **clks;
static int clk_num;
static struct clk_onecell_data clk_data;
+/* Handlers for SoC-specific reset lines */
+static int (*special_reset_assert)(unsigned long);
+static int (*special_reset_deassert)(unsigned long);
+static unsigned int num_special_reset;
+
static struct tegra_clk_periph_regs periph_regs[] = {
[0] = {
.enb_reg = CLK_OUT_ENB_L,
@@ -152,19 +157,29 @@ static int tegra_clk_rst_assert(struct reset_controller_dev *rcdev,
*/
tegra_read_chipid();
- writel_relaxed(BIT(id % 32),
- clk_base + periph_regs[id / 32].rst_set_reg);
+ if (id < periph_banks * 32) {
+ writel_relaxed(BIT(id % 32),
+ clk_base + periph_regs[id / 32].rst_set_reg);
+ return 0;
+ } else if (id < periph_banks * 32 + num_special_reset) {
+ return special_reset_assert(id);
+ }
- return 0;
+ return -EINVAL;
}
static int tegra_clk_rst_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
- writel_relaxed(BIT(id % 32),
- clk_base + periph_regs[id / 32].rst_clr_reg);
+ if (id < periph_banks * 32) {
+ writel_relaxed(BIT(id % 32),
+ clk_base + periph_regs[id / 32].rst_clr_reg);
+ return 0;
+ } else if (id < periph_banks * 32 + num_special_reset) {
+ return special_reset_deassert(id);
+ }
- return 0;
+ return -EINVAL;
}
struct tegra_clk_periph_regs *get_reg_bank(int clkid)
@@ -286,10 +301,19 @@ void __init tegra_add_of_provider(struct device_node *np)
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
rst_ctlr.of_node = np;
- rst_ctlr.nr_resets = periph_banks * 32;
+ rst_ctlr.nr_resets = periph_banks * 32 + num_special_reset;
reset_controller_register(&rst_ctlr);
}
+void __init tegra_init_special_resets(unsigned int num,
+ int (*assert)(unsigned long),
+ int (*deassert)(unsigned long))
+{
+ num_special_reset = num;
+ special_reset_assert = assert;
+ special_reset_deassert = deassert;
+}
+
void __init tegra_register_devclks(struct tegra_devclk *dev_clks, int num)
{
int i;
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 75ddc8ff8bd4..0621887e06f7 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -591,6 +591,9 @@ struct tegra_devclk {
char *con_id;
};
+void tegra_init_special_resets(unsigned int num, int (*assert)(unsigned long),
+ int (*deassert)(unsigned long));
+
void tegra_init_from_table(struct tegra_clk_init_table *tbl,
struct clk *clks[], int clk_max);
diff --git a/drivers/clk/tegra/cvb.c b/drivers/clk/tegra/cvb.c
new file mode 100644
index 000000000000..0204e0861134
--- /dev/null
+++ b/drivers/clk/tegra/cvb.c
@@ -0,0 +1,140 @@
+/*
+ * Utility functions for parsing Tegra CVB voltage tables
+ *
+ * Copyright (C) 2012-2014 NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/pm_opp.h>
+
+#include "cvb.h"
+
+/* cvb_mv = ((c2 * speedo / s_scale + c1) * speedo / s_scale + c0) */
+static inline int get_cvb_voltage(int speedo, int s_scale,
+ const struct cvb_coefficients *cvb)
+{
+ int mv;
+
+ /* apply only speedo scale: output mv = cvb_mv * v_scale */
+ mv = DIV_ROUND_CLOSEST(cvb->c2 * speedo, s_scale);
+ mv = DIV_ROUND_CLOSEST((mv + cvb->c1) * speedo, s_scale) + cvb->c0;
+ return mv;
+}
+
+static int round_cvb_voltage(int mv, int v_scale,
+ const struct rail_alignment *align)
+{
+ /* combined: apply voltage scale and round to cvb alignment step */
+ int uv;
+ int step = (align->step_uv ? : 1000) * v_scale;
+ int offset = align->offset_uv * v_scale;
+
+ uv = max(mv * 1000, offset) - offset;
+ uv = DIV_ROUND_UP(uv, step) * align->step_uv + align->offset_uv;
+ return uv / 1000;
+}
+
+enum {
+ DOWN,
+ UP
+};
+
+static int round_voltage(int mv, const struct rail_alignment *align, int up)
+{
+ if (align->step_uv) {
+ int uv;
+
+ uv = max(mv * 1000, align->offset_uv) - align->offset_uv;
+ uv = (uv + (up ? align->step_uv - 1 : 0)) / align->step_uv;
+ return (uv * align->step_uv + align->offset_uv) / 1000;
+ }
+ return mv;
+}
+
+static int build_opp_table(const struct cvb_table *d,
+ int speedo_value,
+ unsigned long max_freq,
+ struct device *opp_dev)
+{
+ int i, ret, dfll_mv, min_mv, max_mv;
+ const struct cvb_table_freq_entry *table = NULL;
+ const struct rail_alignment *align = &d->alignment;
+
+ min_mv = round_voltage(d->min_millivolts, align, UP);
+ max_mv = round_voltage(d->max_millivolts, align, DOWN);
+
+ for (i = 0; i < MAX_DVFS_FREQS; i++) {
+ table = &d->cvb_table[i];
+ if (!table->freq || (table->freq > max_freq))
+ break;
+
+ /*
+ * FIXME after clk_round_rate/clk_determine_rate prototypes
+ * have been updated
+ */
+ if (table->freq & (1<<31))
+ continue;
+
+ dfll_mv = get_cvb_voltage(
+ speedo_value, d->speedo_scale, &table->coefficients);
+ dfll_mv = round_cvb_voltage(dfll_mv, d->voltage_scale, align);
+ dfll_mv = clamp(dfll_mv, min_mv, max_mv);
+
+ ret = dev_pm_opp_add(opp_dev, table->freq, dfll_mv * 1000);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * tegra_cvb_build_opp_table - build OPP table from Tegra CVB tables
+ * @cvb_tables: array of CVB tables
+ * @sz: size of the previously mentioned array
+ * @process_id: process id of the HW module
+ * @speedo_id: speedo id of the HW module
+ * @speedo_value: speedo value of the HW module
+ * @max_rate: highest safe clock rate
+ * @opp_dev: the struct device * for which the OPP table is built
+ *
+ * On Tegra, a CVB table encodes the relationship between operating voltage
+ * and safe maximal frequency for a given module (e.g. GPU or CPU). This
+ * function calculates the optimal voltage-frequency operating points
+ * for the given arguments and exports them via the OPP library for the
+ * given @opp_dev. Returns a pointer to the struct cvb_table that matched
+ * or an ERR_PTR on failure.
+ */
+const struct cvb_table *tegra_cvb_build_opp_table(
+ const struct cvb_table *cvb_tables,
+ size_t sz, int process_id,
+ int speedo_id, int speedo_value,
+ unsigned long max_rate,
+ struct device *opp_dev)
+{
+ int i, ret;
+
+ for (i = 0; i < sz; i++) {
+ const struct cvb_table *d = &cvb_tables[i];
+
+ if (d->speedo_id != -1 && d->speedo_id != speedo_id)
+ continue;
+ if (d->process_id != -1 && d->process_id != process_id)
+ continue;
+
+ ret = build_opp_table(d, speedo_value, max_rate, opp_dev);
+ return ret ? ERR_PTR(ret) : d;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
diff --git a/drivers/clk/tegra/cvb.h b/drivers/clk/tegra/cvb.h
new file mode 100644
index 000000000000..f62cdc4f4234
--- /dev/null
+++ b/drivers/clk/tegra/cvb.h
@@ -0,0 +1,67 @@
+/*
+ * Utility functions for parsing Tegra CVB voltage tables
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef __DRIVERS_CLK_TEGRA_CVB_H
+#define __DRIVERS_CLK_TEGRA_CVB_H
+
+#include <linux/types.h>
+
+struct device;
+
+#define MAX_DVFS_FREQS 40
+
+struct rail_alignment {
+ int offset_uv;
+ int step_uv;
+};
+
+struct cvb_coefficients {
+ int c0;
+ int c1;
+ int c2;
+};
+
+struct cvb_table_freq_entry {
+ unsigned long freq;
+ struct cvb_coefficients coefficients;
+};
+
+struct cvb_cpu_dfll_data {
+ u32 tune0_low;
+ u32 tune0_high;
+ u32 tune1;
+};
+
+struct cvb_table {
+ int speedo_id;
+ int process_id;
+
+ int min_millivolts;
+ int max_millivolts;
+ struct rail_alignment alignment;
+
+ int speedo_scale;
+ int voltage_scale;
+ struct cvb_table_freq_entry cvb_table[MAX_DVFS_FREQS];
+ struct cvb_cpu_dfll_data cpu_dfll_data;
+};
+
+const struct cvb_table *tegra_cvb_build_opp_table(
+ const struct cvb_table *cvb_tables,
+ size_t sz, int process_id,
+ int speedo_id, int speedo_value,
+ unsigned long max_rate,
+ struct device *opp_dev);
+
+#endif
diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile
index 105ffd0f5e79..d4ac96087ccd 100644
--- a/drivers/clk/ti/Makefile
+++ b/drivers/clk/ti/Makefile
@@ -1,16 +1,19 @@
obj-y += clk.o autoidle.o clockdomain.o
clk-common = dpll.o composite.o divider.o gate.o \
- fixed-factor.o mux.o apll.o
-obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o
-obj-$(CONFIG_SOC_TI81XX) += $(clk-common) fapll.o clk-816x.o
+ fixed-factor.o mux.o apll.o \
+ clkt_dpll.o clkt_iclk.o clkt_dflt.o
+obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o dpll3xxx.o
+obj-$(CONFIG_SOC_TI81XX) += $(clk-common) fapll.o clk-814x.o clk-816x.o
obj-$(CONFIG_ARCH_OMAP2) += $(clk-common) interface.o clk-2xxx.o
obj-$(CONFIG_ARCH_OMAP3) += $(clk-common) interface.o \
- clk-3xxx.o
-obj-$(CONFIG_ARCH_OMAP4) += $(clk-common) clk-44xx.o
-obj-$(CONFIG_SOC_OMAP5) += $(clk-common) clk-54xx.o
+ clk-3xxx.o dpll3xxx.o
+obj-$(CONFIG_ARCH_OMAP4) += $(clk-common) clk-44xx.o \
+ dpll3xxx.o dpll44xx.o
+obj-$(CONFIG_SOC_OMAP5) += $(clk-common) clk-54xx.o \
+ dpll3xxx.o dpll44xx.o
obj-$(CONFIG_SOC_DRA7XX) += $(clk-common) clk-7xx.o \
- clk-dra7-atl.o
-obj-$(CONFIG_SOC_AM43XX) += $(clk-common) clk-43xx.o
+ clk-dra7-atl.o dpll3xxx.o dpll44xx.o
+obj-$(CONFIG_SOC_AM43XX) += $(clk-common) dpll3xxx.o clk-43xx.o
ifdef CONFIG_ATAGS
obj-$(CONFIG_ARCH_OMAP3) += clk-3xxx-legacy.o
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
index 49baf3831546..f3eab6e79027 100644
--- a/drivers/clk/ti/apll.c
+++ b/drivers/clk/ti/apll.c
@@ -15,6 +15,7 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -27,6 +28,8 @@
#include <linux/clk/ti.h>
#include <linux/delay.h>
+#include "clock.h"
+
#define APLL_FORCE_LOCK 0x1
#define APLL_AUTO_IDLE 0x2
#define MAX_APLL_WAIT_TRIES 1000000
@@ -47,7 +50,7 @@ static int dra7_apll_enable(struct clk_hw *hw)
if (!ad)
return -EINVAL;
- clk_name = __clk_get_name(clk->hw.clk);
+ clk_name = clk_hw_get_name(&clk->hw);
state <<= __ffs(ad->idlest_mask);
@@ -170,7 +173,6 @@ static void __init of_dra7_apll_setup(struct device_node *node)
struct clk_hw_omap *clk_hw = NULL;
struct clk_init_data *init = NULL;
const char **parent_names = NULL;
- int i;
ad = kzalloc(sizeof(*ad), GFP_KERNEL);
clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
@@ -195,8 +197,7 @@ static void __init of_dra7_apll_setup(struct device_node *node)
if (!parent_names)
goto cleanup;
- for (i = 0; i < init->num_parents; i++)
- parent_names[i] = of_clk_get_parent_name(node, i);
+ of_clk_parent_fill(node, parent_names, init->num_parents);
init->parent_names = parent_names;
@@ -272,7 +273,7 @@ static int omap2_apll_enable(struct clk_hw *hw)
if (i == MAX_APLL_WAIT_TRIES) {
pr_warn("%s failed to transition to locked\n",
- __clk_get_name(clk->hw.clk));
+ clk_hw_get_name(&clk->hw));
return -EBUSY;
}
diff --git a/drivers/clk/ti/autoidle.c b/drivers/clk/ti/autoidle.c
index e75c64c9e81c..345af43465f0 100644
--- a/drivers/clk/ti/autoidle.c
+++ b/drivers/clk/ti/autoidle.c
@@ -22,6 +22,8 @@
#include <linux/of_address.h>
#include <linux/clk/ti.h>
+#include "clock.h"
+
struct clk_ti_autoidle {
void __iomem *reg;
u8 shift;
@@ -33,8 +35,41 @@ struct clk_ti_autoidle {
#define AUTOIDLE_LOW 0x1
static LIST_HEAD(autoidle_clks);
+static LIST_HEAD(clk_hw_omap_clocks);
+
+/**
+ * omap2_clk_deny_idle - disable autoidle on an OMAP clock
+ * @clk: struct clk * to disable autoidle for
+ *
+ * Disable autoidle on an OMAP clock.
+ */
+int omap2_clk_deny_idle(struct clk *clk)
+{
+ struct clk_hw_omap *c;
-static void ti_allow_autoidle(struct clk_ti_autoidle *clk)
+ c = to_clk_hw_omap(__clk_get_hw(clk));
+ if (c->ops && c->ops->deny_idle)
+ c->ops->deny_idle(c);
+ return 0;
+}
+
+/**
+ * omap2_clk_allow_idle - enable autoidle on an OMAP clock
+ * @clk: struct clk * to enable autoidle for
+ *
+ * Enable autoidle on an OMAP clock.
+ */
+int omap2_clk_allow_idle(struct clk *clk)
+{
+ struct clk_hw_omap *c;
+
+ c = to_clk_hw_omap(__clk_get_hw(clk));
+ if (c->ops && c->ops->allow_idle)
+ c->ops->allow_idle(c);
+ return 0;
+}
+
+static void _allow_autoidle(struct clk_ti_autoidle *clk)
{
u32 val;
@@ -48,7 +83,7 @@ static void ti_allow_autoidle(struct clk_ti_autoidle *clk)
ti_clk_ll_ops->clk_writel(val, clk->reg);
}
-static void ti_deny_autoidle(struct clk_ti_autoidle *clk)
+static void _deny_autoidle(struct clk_ti_autoidle *clk)
{
u32 val;
@@ -63,31 +98,31 @@ static void ti_deny_autoidle(struct clk_ti_autoidle *clk)
}
/**
- * of_ti_clk_allow_autoidle_all - enable autoidle for all clocks
+ * _clk_generic_allow_autoidle_all - enable autoidle for all clocks
*
* Enables hardware autoidle for all registered DT clocks, which have
* the feature.
*/
-void of_ti_clk_allow_autoidle_all(void)
+static void _clk_generic_allow_autoidle_all(void)
{
struct clk_ti_autoidle *c;
list_for_each_entry(c, &autoidle_clks, node)
- ti_allow_autoidle(c);
+ _allow_autoidle(c);
}
/**
- * of_ti_clk_deny_autoidle_all - disable autoidle for all clocks
+ * _clk_generic_deny_autoidle_all - disable autoidle for all clocks
*
* Disables hardware autoidle for all registered DT clocks, which have
* the feature.
*/
-void of_ti_clk_deny_autoidle_all(void)
+static void _clk_generic_deny_autoidle_all(void)
{
struct clk_ti_autoidle *c;
list_for_each_entry(c, &autoidle_clks, node)
- ti_deny_autoidle(c);
+ _deny_autoidle(c);
}
/**
@@ -131,3 +166,67 @@ int __init of_ti_clk_autoidle_setup(struct device_node *node)
return 0;
}
+
+/**
+ * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock
+ * @hw: struct clk_hw * to initialize
+ *
+ * Add an OMAP clock @clk to the internal list of OMAP clocks. Used
+ * temporarily for autoidle handling, until this support can be
+ * integrated into the common clock framework code in some way. No
+ * return value.
+ */
+void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw)
+{
+ struct clk_hw_omap *c;
+
+ if (clk_hw_get_flags(hw) & CLK_IS_BASIC)
+ return;
+
+ c = to_clk_hw_omap(hw);
+ list_add(&c->node, &clk_hw_omap_clocks);
+}
+
+/**
+ * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that
+ * support it
+ *
+ * Enable clock autoidle on all OMAP clocks that have allow_idle
+ * function pointers associated with them. This function is intended
+ * to be temporary until support for this is added to the common clock
+ * code. Returns 0.
+ */
+int omap2_clk_enable_autoidle_all(void)
+{
+ struct clk_hw_omap *c;
+
+ list_for_each_entry(c, &clk_hw_omap_clocks, node)
+ if (c->ops && c->ops->allow_idle)
+ c->ops->allow_idle(c);
+
+ _clk_generic_allow_autoidle_all();
+
+ return 0;
+}
+
+/**
+ * omap2_clk_disable_autoidle_all - disable autoidle on all OMAP clocks that
+ * support it
+ *
+ * Disable clock autoidle on all OMAP clocks that have allow_idle
+ * function pointers associated with them. This function is intended
+ * to be temporary until support for this is added to the common clock
+ * code. Returns 0.
+ */
+int omap2_clk_disable_autoidle_all(void)
+{
+ struct clk_hw_omap *c;
+
+ list_for_each_entry(c, &clk_hw_omap_clocks, node)
+ if (c->ops && c->ops->deny_idle)
+ c->ops->deny_idle(c);
+
+ _clk_generic_deny_autoidle_all();
+
+ return 0;
+}
diff --git a/drivers/clk/ti/clk-2xxx.c b/drivers/clk/ti/clk-2xxx.c
index c808ab3d2bb2..657c4fe07a95 100644
--- a/drivers/clk/ti/clk-2xxx.c
+++ b/drivers/clk/ti/clk-2xxx.c
@@ -16,9 +16,11 @@
#include <linux/kernel.h>
#include <linux/list.h>
-#include <linux/clk-provider.h>
+#include <linux/clk.h>
#include <linux/clk/ti.h>
+#include "clock.h"
+
static struct ti_dt_clk omap2xxx_clks[] = {
DT_CLK(NULL, "func_32k_ck", "func_32k_ck"),
DT_CLK(NULL, "secure_32k_ck", "secure_32k_ck"),
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index 028b33783d38..ef2ec64fe547 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -16,9 +16,12 @@
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clk/ti.h>
+#include "clock.h"
+
static struct ti_dt_clk am33xx_clks[] = {
DT_CLK(NULL, "clk_32768_ck", "clk_32768_ck"),
DT_CLK(NULL, "clk_rc32k_ck", "clk_rc32k_ck"),
diff --git a/drivers/clk/ti/clk-3xxx-legacy.c b/drivers/clk/ti/clk-3xxx-legacy.c
index 0b61548d569b..0fbf8a917955 100644
--- a/drivers/clk/ti/clk-3xxx-legacy.c
+++ b/drivers/clk/ti/clk-3xxx-legacy.c
@@ -15,6 +15,7 @@
*/
#include <linux/kernel.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clk/ti.h>
diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
index 757636d166cf..676ee8f6d813 100644
--- a/drivers/clk/ti/clk-3xxx.c
+++ b/drivers/clk/ti/clk-3xxx.c
@@ -16,9 +16,220 @@
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clk/ti.h>
+#include "clock.h"
+
+/*
+ * DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks
+ * that are sourced by DPLL5, and both of these require this clock
+ * to be at 120 MHz for proper operation.
+ */
+#define DPLL5_FREQ_FOR_USBHOST 120000000
+
+#define OMAP3430ES2_ST_DSS_IDLE_SHIFT 1
+#define OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT 5
+#define OMAP3430ES2_ST_SSI_IDLE_SHIFT 8
+
+#define OMAP34XX_CM_IDLEST_VAL 1
+
+/*
+ * In AM35xx IPSS, the {ICK,FCK} enable bits for modules are exported
+ * in the same register at a bit offset of 0x8. The EN_ACK for ICK is
+ * at an offset of 4 from ICK enable bit.
+ */
+#define AM35XX_IPSS_ICK_MASK 0xF
+#define AM35XX_IPSS_ICK_EN_ACK_OFFSET 0x4
+#define AM35XX_IPSS_ICK_FCK_OFFSET 0x8
+#define AM35XX_IPSS_CLK_IDLEST_VAL 0
+
+#define AM35XX_ST_IPSS_SHIFT 5
+
+/**
+ * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
+ *
+ * The OMAP3430ES2 SSI target CM_IDLEST bit is at a different shift
+ * from the CM_{I,F}CLKEN bit. Pass back the correct info via
+ * @idlest_reg and @idlest_bit. No return value.
+ */
+static void omap3430es2_clk_ssi_find_idlest(struct clk_hw_omap *clk,
+ void __iomem **idlest_reg,
+ u8 *idlest_bit,
+ u8 *idlest_val)
+{
+ u32 r;
+
+ r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
+ *idlest_reg = (__force void __iomem *)r;
+ *idlest_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT;
+ *idlest_val = OMAP34XX_CM_IDLEST_VAL;
+}
+
+const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait = {
+ .allow_idle = omap2_clkt_iclk_allow_idle,
+ .deny_idle = omap2_clkt_iclk_deny_idle,
+ .find_idlest = omap3430es2_clk_ssi_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
+
+/**
+ * omap3430es2_clk_dss_usbhost_find_idlest - CM_IDLEST info for DSS, USBHOST
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
+ *
+ * Some OMAP modules on OMAP3 ES2+ chips have both initiator and
+ * target IDLEST bits. For our purposes, we are concerned with the
+ * target IDLEST bits, which exist at a different bit position than
+ * the *CLKEN bit position for these modules (DSS and USBHOST) (The
+ * default find_idlest code assumes that they are at the same
+ * position.) No return value.
+ */
+static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk_hw_omap *clk,
+ void __iomem **idlest_reg,
+ u8 *idlest_bit,
+ u8 *idlest_val)
+{
+ u32 r;
+
+ r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
+ *idlest_reg = (__force void __iomem *)r;
+ /* USBHOST_IDLE has same shift */
+ *idlest_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT;
+ *idlest_val = OMAP34XX_CM_IDLEST_VAL;
+}
+
+const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait = {
+ .find_idlest = omap3430es2_clk_dss_usbhost_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
+
+const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait = {
+ .allow_idle = omap2_clkt_iclk_allow_idle,
+ .deny_idle = omap2_clkt_iclk_deny_idle,
+ .find_idlest = omap3430es2_clk_dss_usbhost_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
+
+/**
+ * omap3430es2_clk_hsotgusb_find_idlest - return CM_IDLEST info for HSOTGUSB
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
+ *
+ * The OMAP3430ES2 HSOTGUSB target CM_IDLEST bit is at a different
+ * shift from the CM_{I,F}CLKEN bit. Pass back the correct info via
+ * @idlest_reg and @idlest_bit. No return value.
+ */
+static void omap3430es2_clk_hsotgusb_find_idlest(struct clk_hw_omap *clk,
+ void __iomem **idlest_reg,
+ u8 *idlest_bit,
+ u8 *idlest_val)
+{
+ u32 r;
+
+ r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
+ *idlest_reg = (__force void __iomem *)r;
+ *idlest_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT;
+ *idlest_val = OMAP34XX_CM_IDLEST_VAL;
+}
+
+const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait = {
+ .allow_idle = omap2_clkt_iclk_allow_idle,
+ .deny_idle = omap2_clkt_iclk_deny_idle,
+ .find_idlest = omap3430es2_clk_hsotgusb_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
+
+/**
+ * am35xx_clk_find_idlest - return clock ACK info for AM35XX IPSS
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
+ *
+ * The interface clocks on AM35xx IPSS reflects the clock idle status
+ * in the enable register itsel at a bit offset of 4 from the enable
+ * bit. A value of 1 indicates that clock is enabled.
+ */
+static void am35xx_clk_find_idlest(struct clk_hw_omap *clk,
+ void __iomem **idlest_reg,
+ u8 *idlest_bit,
+ u8 *idlest_val)
+{
+ *idlest_reg = (__force void __iomem *)(clk->enable_reg);
+ *idlest_bit = clk->enable_bit + AM35XX_IPSS_ICK_EN_ACK_OFFSET;
+ *idlest_val = AM35XX_IPSS_CLK_IDLEST_VAL;
+}
+
+/**
+ * am35xx_clk_find_companion - find companion clock to @clk
+ * @clk: struct clk * to find the companion clock of
+ * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in
+ * @other_bit: u8 ** to return the companion clock bit shift in
+ *
+ * Some clocks don't have companion clocks. For example, modules with
+ * only an interface clock (such as HECC) don't have a companion
+ * clock. Right now, this code relies on the hardware exporting a bit
+ * in the correct companion register that indicates that the
+ * nonexistent 'companion clock' is active. Future patches will
+ * associate this type of code with per-module data structures to
+ * avoid this issue, and remove the casts. No return value.
+ */
+static void am35xx_clk_find_companion(struct clk_hw_omap *clk,
+ void __iomem **other_reg,
+ u8 *other_bit)
+{
+ *other_reg = (__force void __iomem *)(clk->enable_reg);
+ if (clk->enable_bit & AM35XX_IPSS_ICK_MASK)
+ *other_bit = clk->enable_bit + AM35XX_IPSS_ICK_FCK_OFFSET;
+ else
+ *other_bit = clk->enable_bit - AM35XX_IPSS_ICK_FCK_OFFSET;
+}
+
+const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait = {
+ .find_idlest = am35xx_clk_find_idlest,
+ .find_companion = am35xx_clk_find_companion,
+};
+
+/**
+ * am35xx_clk_ipss_find_idlest - return CM_IDLEST info for IPSS
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
+ *
+ * The IPSS target CM_IDLEST bit is at a different shift from the
+ * CM_{I,F}CLKEN bit. Pass back the correct info via @idlest_reg
+ * and @idlest_bit. No return value.
+ */
+static void am35xx_clk_ipss_find_idlest(struct clk_hw_omap *clk,
+ void __iomem **idlest_reg,
+ u8 *idlest_bit,
+ u8 *idlest_val)
+{
+ u32 r;
+
+ r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
+ *idlest_reg = (__force void __iomem *)r;
+ *idlest_bit = AM35XX_ST_IPSS_SHIFT;
+ *idlest_val = OMAP34XX_CM_IDLEST_VAL;
+}
+
+const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait = {
+ .allow_idle = omap2_clkt_iclk_allow_idle,
+ .deny_idle = omap2_clkt_iclk_deny_idle,
+ .find_idlest = am35xx_clk_ipss_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
static struct ti_dt_clk omap3xxx_clks[] = {
DT_CLK(NULL, "apb_pclk", "dummy_apb_pclk"),
@@ -324,6 +535,30 @@ enum {
OMAP3_SOC_OMAP3630,
};
+/**
+ * omap3_clk_lock_dpll5 - locks DPLL5
+ *
+ * Locks DPLL5 to a pre-defined frequency. This is required for proper
+ * operation of USB.
+ */
+void __init omap3_clk_lock_dpll5(void)
+{
+ struct clk *dpll5_clk;
+ struct clk *dpll5_m2_clk;
+
+ dpll5_clk = clk_get(NULL, "dpll5_ck");
+ clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST);
+ clk_prepare_enable(dpll5_clk);
+
+ /* Program dpll5_m2_clk divider for no division */
+ dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck");
+ clk_prepare_enable(dpll5_m2_clk);
+ clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST);
+
+ clk_disable_unprepare(dpll5_m2_clk);
+ clk_disable_unprepare(dpll5_clk);
+}
+
static int __init omap3xxx_dt_clk_init(int soc_type)
{
if (soc_type == OMAP3_SOC_AM35XX || soc_type == OMAP3_SOC_OMAP3630 ||
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
index 3795fce8a830..097fc90bf19a 100644
--- a/drivers/clk/ti/clk-43xx.c
+++ b/drivers/clk/ti/clk-43xx.c
@@ -16,9 +16,12 @@
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clk/ti.h>
+#include "clock.h"
+
static struct ti_dt_clk am43xx_clks[] = {
DT_CLK(NULL, "clk_32768_ck", "clk_32768_ck"),
DT_CLK(NULL, "clk_rc32k_ck", "clk_rc32k_ck"),
@@ -71,6 +74,7 @@ static struct ti_dt_clk am43xx_clks[] = {
DT_CLK(NULL, "clk_24mhz", "clk_24mhz"),
DT_CLK(NULL, "cpsw_125mhz_gclk", "cpsw_125mhz_gclk"),
DT_CLK(NULL, "cpsw_cpts_rft_clk", "cpsw_cpts_rft_clk"),
+ DT_CLK(NULL, "dpll_clksel_mac_clk", "dpll_clksel_mac_clk"),
DT_CLK(NULL, "gpio0_dbclk_mux_ck", "gpio0_dbclk_mux_ck"),
DT_CLK(NULL, "gpio0_dbclk", "gpio0_dbclk"),
DT_CLK(NULL, "gpio1_dbclk", "gpio1_dbclk"),
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index 581db7711f51..7a8b51b35f9f 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -16,6 +16,8 @@
#include <linux/clkdev.h>
#include <linux/clk/ti.h>
+#include "clock.h"
+
/*
* OMAP4 ABE DPLL default frequency. In OMAP4460 TRM version V, section
* "3.6.3.2.3 CM1_ABE Clock Generator" states that the "DPLL_ABE_X2_CLK
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index 96c69a335975..59ce2fa2c104 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -17,6 +17,8 @@
#include <linux/io.h>
#include <linux/clk/ti.h>
+#include "clock.h"
+
#define OMAP5_DPLL_ABE_DEFFREQ 98304000
/*
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index 63b8323df918..9b5b289e6334 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -16,11 +16,12 @@
#include <linux/clkdev.h>
#include <linux/clk/ti.h>
+#include "clock.h"
+
#define DRA7_DPLL_ABE_DEFFREQ 180633600
#define DRA7_DPLL_GMAC_DEFFREQ 1000000000
#define DRA7_DPLL_USB_DEFFREQ 960000000
-
static struct ti_dt_clk dra7xx_clks[] = {
DT_CLK(NULL, "atl_clkin0_ck", "atl_clkin0_ck"),
DT_CLK(NULL, "atl_clkin1_ck", "atl_clkin1_ck"),
diff --git a/drivers/clk/ti/clk-814x.c b/drivers/clk/ti/clk-814x.c
new file mode 100644
index 000000000000..e172920798ea
--- /dev/null
+++ b/drivers/clk/ti/clk-814x.c
@@ -0,0 +1,33 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+static struct ti_dt_clk dm814_clks[] = {
+ DT_CLK(NULL, "devosc_ck", "devosc_ck"),
+ DT_CLK(NULL, "mpu_ck", "mpu_ck"),
+ DT_CLK(NULL, "sysclk4_ck", "sysclk4_ck"),
+ DT_CLK(NULL, "sysclk6_ck", "sysclk6_ck"),
+ DT_CLK(NULL, "sysclk10_ck", "sysclk10_ck"),
+ DT_CLK(NULL, "sysclk18_ck", "sysclk18_ck"),
+ DT_CLK(NULL, "timer_sys_ck", "devosc_ck"),
+ DT_CLK(NULL, "cpsw_125mhz_gclk", "cpsw_125mhz_gclk"),
+ DT_CLK(NULL, "cpsw_cpts_rft_clk", "cpsw_cpts_rft_clk"),
+ { .node_name = NULL },
+};
+
+int __init dm814x_dt_clk_init(void)
+{
+ ti_dt_clocks_register(dm814_clks);
+ omap2_clk_disable_autoidle_all();
+ omap2_clk_enable_init_clocks(NULL, 0);
+
+ return 0;
+}
diff --git a/drivers/clk/ti/clk-816x.c b/drivers/clk/ti/clk-816x.c
index 9451e651a1ff..1dfad0c712cd 100644
--- a/drivers/clk/ti/clk-816x.c
+++ b/drivers/clk/ti/clk-816x.c
@@ -14,6 +14,8 @@
#include <linux/clk-provider.h>
#include <linux/clk/ti.h>
+#include "clock.h"
+
static struct ti_dt_clk dm816x_clks[] = {
DT_CLK(NULL, "sys_clkin", "sys_clkin_ck"),
DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
@@ -42,7 +44,7 @@ static const char *enable_init_clks[] = {
"ddr_pll_clk3",
};
-int __init ti81xx_dt_clk_init(void)
+int __init dm816x_dt_clk_init(void)
{
ti_dt_clocks_register(dm816x_clks);
omap2_clk_disable_autoidle_all();
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index 19e543a32e2b..2e14dfb588f4 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -16,6 +16,7 @@
*/
#include <linux/module.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/slab.h>
#include <linux/io.h>
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index 64bb5e8a3b8c..b5bcd77e8d0f 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -15,12 +15,15 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/ti.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/list.h>
+#include <linux/regmap.h>
+#include <linux/bootmem.h>
#include "clock.h"
@@ -30,6 +33,63 @@
struct ti_clk_ll_ops *ti_clk_ll_ops;
static struct device_node *clocks_node_ptr[CLK_MAX_MEMMAPS];
+static struct ti_clk_features ti_clk_features;
+
+struct clk_iomap {
+ struct regmap *regmap;
+ void __iomem *mem;
+};
+
+static struct clk_iomap *clk_memmaps[CLK_MAX_MEMMAPS];
+
+static void clk_memmap_writel(u32 val, void __iomem *reg)
+{
+ struct clk_omap_reg *r = (struct clk_omap_reg *)&reg;
+ struct clk_iomap *io = clk_memmaps[r->index];
+
+ if (io->regmap)
+ regmap_write(io->regmap, r->offset, val);
+ else
+ writel_relaxed(val, io->mem + r->offset);
+}
+
+static u32 clk_memmap_readl(void __iomem *reg)
+{
+ u32 val;
+ struct clk_omap_reg *r = (struct clk_omap_reg *)&reg;
+ struct clk_iomap *io = clk_memmaps[r->index];
+
+ if (io->regmap)
+ regmap_read(io->regmap, r->offset, &val);
+ else
+ val = readl_relaxed(io->mem + r->offset);
+
+ return val;
+}
+
+/**
+ * ti_clk_setup_ll_ops - setup low level clock operations
+ * @ops: low level clock ops descriptor
+ *
+ * Sets up low level clock operations for TI clock driver. This is used
+ * to provide various callbacks for the clock driver towards platform
+ * specific code. Returns 0 on success, -EBUSY if ll_ops have been
+ * registered already.
+ */
+int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops)
+{
+ if (ti_clk_ll_ops) {
+ pr_err("Attempt to register ll_ops multiple times.\n");
+ return -EBUSY;
+ }
+
+ ti_clk_ll_ops = ops;
+ ops->clk_readl = clk_memmap_readl;
+ ops->clk_writel = clk_memmap_writel;
+
+ return 0;
+}
+
/**
* ti_dt_clocks_register - register DT alias clocks during boot
* @oclks: list of clocks to register
@@ -134,32 +194,67 @@ void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index)
reg->offset = val;
- return (void __iomem *)tmp;
+ return (__force void __iomem *)tmp;
}
/**
- * ti_dt_clk_init_provider - init master clock provider
+ * omap2_clk_provider_init - init master clock provider
* @parent: master node
* @index: internal index for clk_reg_ops
+ * @syscon: syscon regmap pointer for accessing clock registers
+ * @mem: iomem pointer for the clock provider memory area, only used if
+ * syscon is not provided
*
* Initializes a master clock IP block. This basically sets up the
* mapping from clocks node to the memory map index. All the clocks
* are then initialized through the common of_clk_init call, and the
* clocks will access their memory maps based on the node layout.
+ * Returns 0 in success.
*/
-void ti_dt_clk_init_provider(struct device_node *parent, int index)
+int __init omap2_clk_provider_init(struct device_node *parent, int index,
+ struct regmap *syscon, void __iomem *mem)
{
struct device_node *clocks;
+ struct clk_iomap *io;
/* get clocks for this parent */
clocks = of_get_child_by_name(parent, "clocks");
if (!clocks) {
pr_err("%s missing 'clocks' child node.\n", parent->name);
- return;
+ return -EINVAL;
}
/* add clocks node info */
clocks_node_ptr[index] = clocks;
+
+ io = kzalloc(sizeof(*io), GFP_KERNEL);
+ if (!io)
+ return -ENOMEM;
+
+ io->regmap = syscon;
+ io->mem = mem;
+
+ clk_memmaps[index] = io;
+
+ return 0;
+}
+
+/**
+ * omap2_clk_legacy_provider_init - initialize a legacy clock provider
+ * @index: index for the clock provider
+ * @mem: iomem pointer for the clock provider memory area
+ *
+ * Initializes a legacy clock provider memory mapping.
+ */
+void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem)
+{
+ struct clk_iomap *io;
+
+ io = memblock_virt_alloc(sizeof(*io), 0);
+
+ io->mem = mem;
+
+ clk_memmaps[index] = io;
}
/**
@@ -244,11 +339,11 @@ struct clk __init *ti_clk_register_clk(struct ti_clk *setup)
if (!IS_ERR(clk)) {
setup->clk = clk;
if (setup->clkdm_name) {
- if (__clk_get_flags(clk) & CLK_IS_BASIC) {
+ clk_hw = __clk_get_hw(clk);
+ if (clk_hw_get_flags(clk_hw) & CLK_IS_BASIC) {
pr_warn("can't setup clkdm for basic clk %s\n",
setup->name);
} else {
- clk_hw = __clk_get_hw(clk);
to_clk_hw_omap(clk_hw)->clkdm_name =
setup->clkdm_name;
omap2_init_clk_clkdm(clk_hw);
@@ -311,3 +406,50 @@ int __init ti_clk_register_legacy_clks(struct ti_clk_alias *clks)
return 0;
}
#endif
+
+/**
+ * ti_clk_setup_features - setup clock features flags
+ * @features: features definition to use
+ *
+ * Initializes the clock driver features flags based on platform
+ * provided data. No return value.
+ */
+void __init ti_clk_setup_features(struct ti_clk_features *features)
+{
+ memcpy(&ti_clk_features, features, sizeof(*features));
+}
+
+/**
+ * ti_clk_get_features - get clock driver features flags
+ *
+ * Get TI clock driver features description. Returns a pointer
+ * to the current feature setup.
+ */
+const struct ti_clk_features *ti_clk_get_features(void)
+{
+ return &ti_clk_features;
+}
+
+/**
+ * omap2_clk_enable_init_clocks - prepare & enable a list of clocks
+ * @clk_names: ptr to an array of strings of clock names to enable
+ * @num_clocks: number of clock names in @clk_names
+ *
+ * Prepare and enable a list of clocks, named by @clk_names. No
+ * return value. XXX Deprecated; only needed until these clocks are
+ * properly claimed and enabled by the drivers or core code that uses
+ * them. XXX What code disables & calls clk_put on these clocks?
+ */
+void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks)
+{
+ struct clk *init_clk;
+ int i;
+
+ for (i = 0; i < num_clocks; i++) {
+ init_clk = clk_get(NULL, clk_names[i]);
+ if (WARN(IS_ERR(init_clk), "could not find init clock %s\n",
+ clk_names[i]))
+ continue;
+ clk_prepare_enable(init_clk);
+ }
+}
diff --git a/drivers/clk/ti/clkt_dflt.c b/drivers/clk/ti/clkt_dflt.c
new file mode 100644
index 000000000000..90d7d8a21c49
--- /dev/null
+++ b/drivers/clk/ti/clkt_dflt.c
@@ -0,0 +1,316 @@
+/*
+ * Default clock type
+ *
+ * Copyright (C) 2005-2008, 2015 Texas Instruments, Inc.
+ * Copyright (C) 2004-2010 Nokia Corporation
+ *
+ * Contacts:
+ * Richard Woodruff <r-woodruff2@ti.com>
+ * Paul Walmsley
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/clk/ti.h>
+#include <linux/delay.h>
+
+#include "clock.h"
+
+/*
+ * MAX_MODULE_ENABLE_WAIT: maximum of number of microseconds to wait
+ * for a module to indicate that it is no longer in idle
+ */
+#define MAX_MODULE_ENABLE_WAIT 100000
+
+/*
+ * CM module register offsets, used for calculating the companion
+ * register addresses.
+ */
+#define CM_FCLKEN 0x0000
+#define CM_ICLKEN 0x0010
+
+/**
+ * _wait_idlest_generic - wait for a module to leave the idle state
+ * @clk: module clock to wait for (needed for register offsets)
+ * @reg: virtual address of module IDLEST register
+ * @mask: value to mask against to determine if the module is active
+ * @idlest: idle state indicator (0 or 1) for the clock
+ * @name: name of the clock (for printk)
+ *
+ * Wait for a module to leave idle, where its idle-status register is
+ * not inside the CM module. Returns 1 if the module left idle
+ * promptly, or 0 if the module did not leave idle before the timeout
+ * elapsed. XXX Deprecated - should be moved into drivers for the
+ * individual IP block that the IDLEST register exists in.
+ */
+static int _wait_idlest_generic(struct clk_hw_omap *clk, void __iomem *reg,
+ u32 mask, u8 idlest, const char *name)
+{
+ int i = 0, ena = 0;
+
+ ena = (idlest) ? 0 : mask;
+
+ /* Wait until module enters enabled state */
+ for (i = 0; i < MAX_MODULE_ENABLE_WAIT; i++) {
+ if ((ti_clk_ll_ops->clk_readl(reg) & mask) == ena)
+ break;
+ udelay(1);
+ }
+
+ if (i < MAX_MODULE_ENABLE_WAIT)
+ pr_debug("omap clock: module associated with clock %s ready after %d loops\n",
+ name, i);
+ else
+ pr_err("omap clock: module associated with clock %s didn't enable in %d tries\n",
+ name, MAX_MODULE_ENABLE_WAIT);
+
+ return (i < MAX_MODULE_ENABLE_WAIT) ? 1 : 0;
+}
+
+/**
+ * _omap2_module_wait_ready - wait for an OMAP module to leave IDLE
+ * @clk: struct clk * belonging to the module
+ *
+ * If the necessary clocks for the OMAP hardware IP block that
+ * corresponds to clock @clk are enabled, then wait for the module to
+ * indicate readiness (i.e., to leave IDLE). This code does not
+ * belong in the clock code and will be moved in the medium term to
+ * module-dependent code. No return value.
+ */
+static void _omap2_module_wait_ready(struct clk_hw_omap *clk)
+{
+ void __iomem *companion_reg, *idlest_reg;
+ u8 other_bit, idlest_bit, idlest_val, idlest_reg_id;
+ s16 prcm_mod;
+ int r;
+
+ /* Not all modules have multiple clocks that their IDLEST depends on */
+ if (clk->ops->find_companion) {
+ clk->ops->find_companion(clk, &companion_reg, &other_bit);
+ if (!(ti_clk_ll_ops->clk_readl(companion_reg) &
+ (1 << other_bit)))
+ return;
+ }
+
+ clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val);
+ r = ti_clk_ll_ops->cm_split_idlest_reg(idlest_reg, &prcm_mod,
+ &idlest_reg_id);
+ if (r) {
+ /* IDLEST register not in the CM module */
+ _wait_idlest_generic(clk, idlest_reg, (1 << idlest_bit),
+ idlest_val, clk_hw_get_name(&clk->hw));
+ } else {
+ ti_clk_ll_ops->cm_wait_module_ready(0, prcm_mod, idlest_reg_id,
+ idlest_bit);
+ }
+}
+
+/**
+ * omap2_clk_dflt_find_companion - find companion clock to @clk
+ * @clk: struct clk * to find the companion clock of
+ * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in
+ * @other_bit: u8 ** to return the companion clock bit shift in
+ *
+ * Note: We don't need special code here for INVERT_ENABLE for the
+ * time being since INVERT_ENABLE only applies to clocks enabled by
+ * CM_CLKEN_PLL
+ *
+ * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes it's
+ * just a matter of XORing the bits.
+ *
+ * Some clocks don't have companion clocks. For example, modules with
+ * only an interface clock (such as MAILBOXES) don't have a companion
+ * clock. Right now, this code relies on the hardware exporting a bit
+ * in the correct companion register that indicates that the
+ * nonexistent 'companion clock' is active. Future patches will
+ * associate this type of code with per-module data structures to
+ * avoid this issue, and remove the casts. No return value.
+ */
+void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk,
+ void __iomem **other_reg, u8 *other_bit)
+{
+ u32 r;
+
+ /*
+ * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes
+ * it's just a matter of XORing the bits.
+ */
+ r = ((__force u32)clk->enable_reg ^ (CM_FCLKEN ^ CM_ICLKEN));
+
+ *other_reg = (__force void __iomem *)r;
+ *other_bit = clk->enable_bit;
+}
+
+/**
+ * omap2_clk_dflt_find_idlest - find CM_IDLEST reg va, bit shift for @clk
+ * @clk: struct clk * to find IDLEST info for
+ * @idlest_reg: void __iomem ** to return the CM_IDLEST va in
+ * @idlest_bit: u8 * to return the CM_IDLEST bit shift in
+ * @idlest_val: u8 * to return the idle status indicator
+ *
+ * Return the CM_IDLEST register address and bit shift corresponding
+ * to the module that "owns" this clock. This default code assumes
+ * that the CM_IDLEST bit shift is the CM_*CLKEN bit shift, and that
+ * the IDLEST register address ID corresponds to the CM_*CLKEN
+ * register address ID (e.g., that CM_FCLKEN2 corresponds to
+ * CM_IDLEST2). This is not true for all modules. No return value.
+ */
+void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk,
+ void __iomem **idlest_reg, u8 *idlest_bit,
+ u8 *idlest_val)
+{
+ u32 r;
+
+ r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
+ *idlest_reg = (__force void __iomem *)r;
+ *idlest_bit = clk->enable_bit;
+
+ /*
+ * 24xx uses 0 to indicate not ready, and 1 to indicate ready.
+ * 34xx reverses this, just to keep us on our toes
+ * AM35xx uses both, depending on the module.
+ */
+ *idlest_val = ti_clk_get_features()->cm_idlest_val;
+}
+
+/**
+ * omap2_dflt_clk_enable - enable a clock in the hardware
+ * @hw: struct clk_hw * of the clock to enable
+ *
+ * Enable the clock @hw in the hardware. We first call into the OMAP
+ * clockdomain code to "enable" the corresponding clockdomain if this
+ * is the first enabled user of the clockdomain. Then program the
+ * hardware to enable the clock. Then wait for the IP block that uses
+ * this clock to leave idle (if applicable). Returns the error value
+ * from clkdm_clk_enable() if it terminated with an error, or -EINVAL
+ * if @hw has a null clock enable_reg, or zero upon success.
+ */
+int omap2_dflt_clk_enable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk;
+ u32 v;
+ int ret = 0;
+ bool clkdm_control;
+
+ if (ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL)
+ clkdm_control = false;
+ else
+ clkdm_control = true;
+
+ clk = to_clk_hw_omap(hw);
+
+ if (clkdm_control && clk->clkdm) {
+ ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
+ if (ret) {
+ WARN(1,
+ "%s: could not enable %s's clockdomain %s: %d\n",
+ __func__, clk_hw_get_name(hw),
+ clk->clkdm_name, ret);
+ return ret;
+ }
+ }
+
+ if (unlikely(!clk->enable_reg)) {
+ pr_err("%s: %s missing enable_reg\n", __func__,
+ clk_hw_get_name(hw));
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* FIXME should not have INVERT_ENABLE bit here */
+ v = ti_clk_ll_ops->clk_readl(clk->enable_reg);
+ if (clk->flags & INVERT_ENABLE)
+ v &= ~(1 << clk->enable_bit);
+ else
+ v |= (1 << clk->enable_bit);
+ ti_clk_ll_ops->clk_writel(v, clk->enable_reg);
+ v = ti_clk_ll_ops->clk_readl(clk->enable_reg); /* OCP barrier */
+
+ if (clk->ops && clk->ops->find_idlest)
+ _omap2_module_wait_ready(clk);
+
+ return 0;
+
+err:
+ if (clkdm_control && clk->clkdm)
+ ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
+ return ret;
+}
+
+/**
+ * omap2_dflt_clk_disable - disable a clock in the hardware
+ * @hw: struct clk_hw * of the clock to disable
+ *
+ * Disable the clock @hw in the hardware, and call into the OMAP
+ * clockdomain code to "disable" the corresponding clockdomain if all
+ * clocks/hwmods in that clockdomain are now disabled. No return
+ * value.
+ */
+void omap2_dflt_clk_disable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk;
+ u32 v;
+
+ clk = to_clk_hw_omap(hw);
+ if (!clk->enable_reg) {
+ /*
+ * 'independent' here refers to a clock which is not
+ * controlled by its parent.
+ */
+ pr_err("%s: independent clock %s has no enable_reg\n",
+ __func__, clk_hw_get_name(hw));
+ return;
+ }
+
+ v = ti_clk_ll_ops->clk_readl(clk->enable_reg);
+ if (clk->flags & INVERT_ENABLE)
+ v |= (1 << clk->enable_bit);
+ else
+ v &= ~(1 << clk->enable_bit);
+ ti_clk_ll_ops->clk_writel(v, clk->enable_reg);
+ /* No OCP barrier needed here since it is a disable operation */
+
+ if (!(ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) &&
+ clk->clkdm)
+ ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
+}
+
+/**
+ * omap2_dflt_clk_is_enabled - is clock enabled in the hardware?
+ * @hw: struct clk_hw * to check
+ *
+ * Return 1 if the clock represented by @hw is enabled in the
+ * hardware, or 0 otherwise. Intended for use in the struct
+ * clk_ops.is_enabled function pointer.
+ */
+int omap2_dflt_clk_is_enabled(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ u32 v;
+
+ v = ti_clk_ll_ops->clk_readl(clk->enable_reg);
+
+ if (clk->flags & INVERT_ENABLE)
+ v ^= BIT(clk->enable_bit);
+
+ v &= BIT(clk->enable_bit);
+
+ return v ? 1 : 0;
+}
+
+const struct clk_hw_omap_ops clkhwops_wait = {
+ .find_idlest = omap2_clk_dflt_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
diff --git a/drivers/clk/ti/clkt_dpll.c b/drivers/clk/ti/clkt_dpll.c
new file mode 100644
index 000000000000..9023ca9caf84
--- /dev/null
+++ b/drivers/clk/ti/clkt_dpll.c
@@ -0,0 +1,370 @@
+/*
+ * OMAP2/3/4 DPLL clock functions
+ *
+ * Copyright (C) 2005-2008 Texas Instruments, Inc.
+ * Copyright (C) 2004-2010 Nokia Corporation
+ *
+ * Contacts:
+ * Richard Woodruff <r-woodruff2@ti.com>
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/clk/ti.h>
+
+#include <asm/div64.h>
+
+#include "clock.h"
+
+/* DPLL rate rounding: minimum DPLL multiplier, divider values */
+#define DPLL_MIN_MULTIPLIER 2
+#define DPLL_MIN_DIVIDER 1
+
+/* Possible error results from _dpll_test_mult */
+#define DPLL_MULT_UNDERFLOW -1
+
+/*
+ * Scale factor to mitigate roundoff errors in DPLL rate rounding.
+ * The higher the scale factor, the greater the risk of arithmetic overflow,
+ * but the closer the rounded rate to the target rate. DPLL_SCALE_FACTOR
+ * must be a power of DPLL_SCALE_BASE.
+ */
+#define DPLL_SCALE_FACTOR 64
+#define DPLL_SCALE_BASE 2
+#define DPLL_ROUNDING_VAL ((DPLL_SCALE_BASE / 2) * \
+ (DPLL_SCALE_FACTOR / DPLL_SCALE_BASE))
+
+/*
+ * DPLL valid Fint frequency range for OMAP36xx and OMAP4xxx.
+ * From device data manual section 4.3 "DPLL and DLL Specifications".
+ */
+#define OMAP3PLUS_DPLL_FINT_JTYPE_MIN 500000
+#define OMAP3PLUS_DPLL_FINT_JTYPE_MAX 2500000
+
+/* _dpll_test_fint() return codes */
+#define DPLL_FINT_UNDERFLOW -1
+#define DPLL_FINT_INVALID -2
+
+/* Private functions */
+
+/*
+ * _dpll_test_fint - test whether an Fint value is valid for the DPLL
+ * @clk: DPLL struct clk to test
+ * @n: divider value (N) to test
+ *
+ * Tests whether a particular divider @n will result in a valid DPLL
+ * internal clock frequency Fint. See the 34xx TRM 4.7.6.2 "DPLL Jitter
+ * Correction". Returns 0 if OK, -1 if the enclosing loop can terminate
+ * (assuming that it is counting N upwards), or -2 if the enclosing loop
+ * should skip to the next iteration (again assuming N is increasing).
+ */
+static int _dpll_test_fint(struct clk_hw_omap *clk, unsigned int n)
+{
+ struct dpll_data *dd;
+ long fint, fint_min, fint_max;
+ int ret = 0;
+
+ dd = clk->dpll_data;
+
+ /* DPLL divider must result in a valid jitter correction val */
+ fint = clk_hw_get_rate(clk_hw_get_parent(&clk->hw)) / n;
+
+ if (dd->flags & DPLL_J_TYPE) {
+ fint_min = OMAP3PLUS_DPLL_FINT_JTYPE_MIN;
+ fint_max = OMAP3PLUS_DPLL_FINT_JTYPE_MAX;
+ } else {
+ fint_min = ti_clk_get_features()->fint_min;
+ fint_max = ti_clk_get_features()->fint_max;
+ }
+
+ if (!fint_min || !fint_max) {
+ WARN(1, "No fint limits available!\n");
+ return DPLL_FINT_INVALID;
+ }
+
+ if (fint < ti_clk_get_features()->fint_min) {
+ pr_debug("rejecting n=%d due to Fint failure, lowering max_divider\n",
+ n);
+ dd->max_divider = n;
+ ret = DPLL_FINT_UNDERFLOW;
+ } else if (fint > ti_clk_get_features()->fint_max) {
+ pr_debug("rejecting n=%d due to Fint failure, boosting min_divider\n",
+ n);
+ dd->min_divider = n;
+ ret = DPLL_FINT_INVALID;
+ } else if (fint > ti_clk_get_features()->fint_band1_max &&
+ fint < ti_clk_get_features()->fint_band2_min) {
+ pr_debug("rejecting n=%d due to Fint failure\n", n);
+ ret = DPLL_FINT_INVALID;
+ }
+
+ return ret;
+}
+
+static unsigned long _dpll_compute_new_rate(unsigned long parent_rate,
+ unsigned int m, unsigned int n)
+{
+ unsigned long long num;
+
+ num = (unsigned long long)parent_rate * m;
+ do_div(num, n);
+ return num;
+}
+
+/*
+ * _dpll_test_mult - test a DPLL multiplier value
+ * @m: pointer to the DPLL m (multiplier) value under test
+ * @n: current DPLL n (divider) value under test
+ * @new_rate: pointer to storage for the resulting rounded rate
+ * @target_rate: the desired DPLL rate
+ * @parent_rate: the DPLL's parent clock rate
+ *
+ * This code tests a DPLL multiplier value, ensuring that the
+ * resulting rate will not be higher than the target_rate, and that
+ * the multiplier value itself is valid for the DPLL. Initially, the
+ * integer pointed to by the m argument should be prescaled by
+ * multiplying by DPLL_SCALE_FACTOR. The code will replace this with
+ * a non-scaled m upon return. This non-scaled m will result in a
+ * new_rate as close as possible to target_rate (but not greater than
+ * target_rate) given the current (parent_rate, n, prescaled m)
+ * triple. Returns DPLL_MULT_UNDERFLOW in the event that the
+ * non-scaled m attempted to underflow, which can allow the calling
+ * function to bail out early; or 0 upon success.
+ */
+static int _dpll_test_mult(int *m, int n, unsigned long *new_rate,
+ unsigned long target_rate,
+ unsigned long parent_rate)
+{
+ int r = 0, carry = 0;
+
+ /* Unscale m and round if necessary */
+ if (*m % DPLL_SCALE_FACTOR >= DPLL_ROUNDING_VAL)
+ carry = 1;
+ *m = (*m / DPLL_SCALE_FACTOR) + carry;
+
+ /*
+ * The new rate must be <= the target rate to avoid programming
+ * a rate that is impossible for the hardware to handle
+ */
+ *new_rate = _dpll_compute_new_rate(parent_rate, *m, n);
+ if (*new_rate > target_rate) {
+ (*m)--;
+ *new_rate = 0;
+ }
+
+ /* Guard against m underflow */
+ if (*m < DPLL_MIN_MULTIPLIER) {
+ *m = DPLL_MIN_MULTIPLIER;
+ *new_rate = 0;
+ r = DPLL_MULT_UNDERFLOW;
+ }
+
+ if (*new_rate == 0)
+ *new_rate = _dpll_compute_new_rate(parent_rate, *m, n);
+
+ return r;
+}
+
+/**
+ * _omap2_dpll_is_in_bypass - check if DPLL is in bypass mode or not
+ * @v: bitfield value of the DPLL enable
+ *
+ * Checks given DPLL enable bitfield to see whether the DPLL is in bypass
+ * mode or not. Returns 1 if the DPLL is in bypass, 0 otherwise.
+ */
+static int _omap2_dpll_is_in_bypass(u32 v)
+{
+ u8 mask, val;
+
+ mask = ti_clk_get_features()->dpll_bypass_vals;
+
+ /*
+ * Each set bit in the mask corresponds to a bypass value equal
+ * to the bitshift. Go through each set-bit in the mask and
+ * compare against the given register value.
+ */
+ while (mask) {
+ val = __ffs(mask);
+ mask ^= (1 << val);
+ if (v == val)
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Public functions */
+u8 omap2_init_dpll_parent(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ u32 v;
+ struct dpll_data *dd;
+
+ dd = clk->dpll_data;
+ if (!dd)
+ return -EINVAL;
+
+ v = ti_clk_ll_ops->clk_readl(dd->control_reg);
+ v &= dd->enable_mask;
+ v >>= __ffs(dd->enable_mask);
+
+ /* Reparent the struct clk in case the dpll is in bypass */
+ if (_omap2_dpll_is_in_bypass(v))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * omap2_get_dpll_rate - returns the current DPLL CLKOUT rate
+ * @clk: struct clk * of a DPLL
+ *
+ * DPLLs can be locked or bypassed - basically, enabled or disabled.
+ * When locked, the DPLL output depends on the M and N values. When
+ * bypassed, on OMAP2xxx, the output rate is either the 32KiHz clock
+ * or sys_clk. Bypass rates on OMAP3 depend on the DPLL: DPLLs 1 and
+ * 2 are bypassed with dpll1_fclk and dpll2_fclk respectively
+ * (generated by DPLL3), while DPLL 3, 4, and 5 bypass rates are sys_clk.
+ * Returns the current DPLL CLKOUT rate (*not* CLKOUTX2) if the DPLL is
+ * locked, or the appropriate bypass rate if the DPLL is bypassed, or 0
+ * if the clock @clk is not a DPLL.
+ */
+unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
+{
+ long long dpll_clk;
+ u32 dpll_mult, dpll_div, v;
+ struct dpll_data *dd;
+
+ dd = clk->dpll_data;
+ if (!dd)
+ return 0;
+
+ /* Return bypass rate if DPLL is bypassed */
+ v = ti_clk_ll_ops->clk_readl(dd->control_reg);
+ v &= dd->enable_mask;
+ v >>= __ffs(dd->enable_mask);
+
+ if (_omap2_dpll_is_in_bypass(v))
+ return clk_get_rate(dd->clk_bypass);
+
+ v = ti_clk_ll_ops->clk_readl(dd->mult_div1_reg);
+ dpll_mult = v & dd->mult_mask;
+ dpll_mult >>= __ffs(dd->mult_mask);
+ dpll_div = v & dd->div1_mask;
+ dpll_div >>= __ffs(dd->div1_mask);
+
+ dpll_clk = (long long)clk_get_rate(dd->clk_ref) * dpll_mult;
+ do_div(dpll_clk, dpll_div + 1);
+
+ return dpll_clk;
+}
+
+/* DPLL rate rounding code */
+
+/**
+ * omap2_dpll_round_rate - round a target rate for an OMAP DPLL
+ * @clk: struct clk * for a DPLL
+ * @target_rate: desired DPLL clock rate
+ *
+ * Given a DPLL and a desired target rate, round the target rate to a
+ * possible, programmable rate for this DPLL. Attempts to select the
+ * minimum possible n. Stores the computed (m, n) in the DPLL's
+ * dpll_data structure so set_rate() will not need to call this
+ * (expensive) function again. Returns ~0 if the target rate cannot
+ * be rounded, or the rounded rate upon success.
+ */
+long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
+ unsigned long *parent_rate)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ int m, n, r, scaled_max_m;
+ int min_delta_m = INT_MAX, min_delta_n = INT_MAX;
+ unsigned long scaled_rt_rp;
+ unsigned long new_rate = 0;
+ struct dpll_data *dd;
+ unsigned long ref_rate;
+ long delta;
+ long prev_min_delta = LONG_MAX;
+ const char *clk_name;
+
+ if (!clk || !clk->dpll_data)
+ return ~0;
+
+ dd = clk->dpll_data;
+
+ ref_rate = clk_get_rate(dd->clk_ref);
+ clk_name = clk_hw_get_name(hw);
+ pr_debug("clock: %s: starting DPLL round_rate, target rate %lu\n",
+ clk_name, target_rate);
+
+ scaled_rt_rp = target_rate / (ref_rate / DPLL_SCALE_FACTOR);
+ scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR;
+
+ dd->last_rounded_rate = 0;
+
+ for (n = dd->min_divider; n <= dd->max_divider; n++) {
+ /* Is the (input clk, divider) pair valid for the DPLL? */
+ r = _dpll_test_fint(clk, n);
+ if (r == DPLL_FINT_UNDERFLOW)
+ break;
+ else if (r == DPLL_FINT_INVALID)
+ continue;
+
+ /* Compute the scaled DPLL multiplier, based on the divider */
+ m = scaled_rt_rp * n;
+
+ /*
+ * Since we're counting n up, a m overflow means we
+ * can bail out completely (since as n increases in
+ * the next iteration, there's no way that m can
+ * increase beyond the current m)
+ */
+ if (m > scaled_max_m)
+ break;
+
+ r = _dpll_test_mult(&m, n, &new_rate, target_rate,
+ ref_rate);
+
+ /* m can't be set low enough for this n - try with a larger n */
+ if (r == DPLL_MULT_UNDERFLOW)
+ continue;
+
+ /* skip rates above our target rate */
+ delta = target_rate - new_rate;
+ if (delta < 0)
+ continue;
+
+ if (delta < prev_min_delta) {
+ prev_min_delta = delta;
+ min_delta_m = m;
+ min_delta_n = n;
+ }
+
+ pr_debug("clock: %s: m = %d: n = %d: new_rate = %lu\n",
+ clk_name, m, n, new_rate);
+
+ if (delta == 0)
+ break;
+ }
+
+ if (prev_min_delta == LONG_MAX) {
+ pr_debug("clock: %s: cannot round to rate %lu\n",
+ clk_name, target_rate);
+ return ~0;
+ }
+
+ dd->last_rounded_m = min_delta_m;
+ dd->last_rounded_n = min_delta_n;
+ dd->last_rounded_rate = target_rate - prev_min_delta;
+
+ return dd->last_rounded_rate;
+}
diff --git a/drivers/clk/ti/clkt_iclk.c b/drivers/clk/ti/clkt_iclk.c
new file mode 100644
index 000000000000..38c36908cf88
--- /dev/null
+++ b/drivers/clk/ti/clkt_iclk.c
@@ -0,0 +1,101 @@
+/*
+ * OMAP2/3 interface clock control
+ *
+ * Copyright (C) 2011 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+/* Register offsets */
+#define OMAP24XX_CM_FCLKEN2 0x04
+#define CM_AUTOIDLE 0x30
+#define CM_ICLKEN 0x10
+#define CM_IDLEST 0x20
+
+#define OMAP24XX_CM_IDLEST_VAL 0
+
+/* Private functions */
+
+/* XXX */
+void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk)
+{
+ u32 v;
+ void __iomem *r;
+
+ r = (__force void __iomem *)
+ ((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN));
+
+ v = ti_clk_ll_ops->clk_readl(r);
+ v |= (1 << clk->enable_bit);
+ ti_clk_ll_ops->clk_writel(v, r);
+}
+
+/* XXX */
+void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk)
+{
+ u32 v;
+ void __iomem *r;
+
+ r = (__force void __iomem *)
+ ((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN));
+
+ v = ti_clk_ll_ops->clk_readl(r);
+ v &= ~(1 << clk->enable_bit);
+ ti_clk_ll_ops->clk_writel(v, r);
+}
+
+/**
+ * omap2430_clk_i2chs_find_idlest - return CM_IDLEST info for 2430 I2CHS
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
+ *
+ * OMAP2430 I2CHS CM_IDLEST bits are in CM_IDLEST1_CORE, but the
+ * CM_*CLKEN bits are in CM_{I,F}CLKEN2_CORE. This custom function
+ * passes back the correct CM_IDLEST register address for I2CHS
+ * modules. No return value.
+ */
+static void omap2430_clk_i2chs_find_idlest(struct clk_hw_omap *clk,
+ void __iomem **idlest_reg,
+ u8 *idlest_bit,
+ u8 *idlest_val)
+{
+ u32 r;
+
+ r = ((__force u32)clk->enable_reg ^ (OMAP24XX_CM_FCLKEN2 ^ CM_IDLEST));
+ *idlest_reg = (__force void __iomem *)r;
+ *idlest_bit = clk->enable_bit;
+ *idlest_val = OMAP24XX_CM_IDLEST_VAL;
+}
+
+/* Public data */
+
+const struct clk_hw_omap_ops clkhwops_iclk = {
+ .allow_idle = omap2_clkt_iclk_allow_idle,
+ .deny_idle = omap2_clkt_iclk_deny_idle,
+};
+
+const struct clk_hw_omap_ops clkhwops_iclk_wait = {
+ .allow_idle = omap2_clkt_iclk_allow_idle,
+ .deny_idle = omap2_clkt_iclk_deny_idle,
+ .find_idlest = omap2_clk_dflt_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
+
+/* 2430 I2CHS has non-standard IDLEST register */
+const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait = {
+ .find_idlest = omap2430_clk_i2chs_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index 404158d2d7f8..90f3f472ae1c 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -154,6 +154,35 @@ struct ti_clk_dpll {
u8 recal_st_bit;
};
+/* Composite clock component types */
+enum {
+ CLK_COMPONENT_TYPE_GATE = 0,
+ CLK_COMPONENT_TYPE_DIVIDER,
+ CLK_COMPONENT_TYPE_MUX,
+ CLK_COMPONENT_TYPE_MAX,
+};
+
+/**
+ * struct ti_dt_clk - OMAP DT clock alias declarations
+ * @lk: clock lookup definition
+ * @node_name: clock DT node to map to
+ */
+struct ti_dt_clk {
+ struct clk_lookup lk;
+ char *node_name;
+};
+
+#define DT_CLK(dev, con, name) \
+ { \
+ .lk = { \
+ .dev_id = dev, \
+ .con_id = con, \
+ }, \
+ .node_name = name, \
+ }
+
+typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);
+
struct clk *ti_clk_register_gate(struct ti_clk *setup);
struct clk *ti_clk_register_interface(struct ti_clk *setup);
struct clk *ti_clk_register_mux(struct ti_clk *setup);
@@ -169,4 +198,80 @@ void ti_clk_patch_legacy_clks(struct ti_clk **patch);
struct clk *ti_clk_register_clk(struct ti_clk *setup);
int ti_clk_register_legacy_clks(struct ti_clk_alias *clks);
+void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index);
+void ti_dt_clocks_register(struct ti_dt_clk *oclks);
+int ti_clk_retry_init(struct device_node *node, struct clk_hw *hw,
+ ti_of_clk_init_cb_t func);
+int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type);
+
+void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw);
+int of_ti_clk_autoidle_setup(struct device_node *node);
+void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
+
+extern const struct clk_hw_omap_ops clkhwops_omap3_dpll;
+extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx;
+extern const struct clk_hw_omap_ops clkhwops_wait;
+extern const struct clk_hw_omap_ops clkhwops_iclk;
+extern const struct clk_hw_omap_ops clkhwops_iclk_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait;
+extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait;
+extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait;
+
+extern const struct clk_ops ti_clk_divider_ops;
+extern const struct clk_ops ti_clk_mux_ops;
+
+int omap2_clkops_enable_clkdm(struct clk_hw *hw);
+void omap2_clkops_disable_clkdm(struct clk_hw *hw);
+
+int omap2_dflt_clk_enable(struct clk_hw *hw);
+void omap2_dflt_clk_disable(struct clk_hw *hw);
+int omap2_dflt_clk_is_enabled(struct clk_hw *hw);
+void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk,
+ void __iomem **other_reg,
+ u8 *other_bit);
+void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk,
+ void __iomem **idlest_reg,
+ u8 *idlest_bit, u8 *idlest_val);
+
+void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk);
+void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk);
+
+u8 omap2_init_dpll_parent(struct clk_hw *hw);
+int omap3_noncore_dpll_enable(struct clk_hw *hw);
+void omap3_noncore_dpll_disable(struct clk_hw *hw);
+int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index);
+int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate,
+ u8 index);
+int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req);
+long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
+ unsigned long *parent_rate);
+unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
+ unsigned long parent_rate);
+
+unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
+int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate,
+ unsigned long parent_rate);
+int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate, u8 index);
+void omap3_clk_lock_dpll5(void);
+
+unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
+ unsigned long parent_rate);
+long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
+ unsigned long target_rate,
+ unsigned long *parent_rate);
+int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req);
+
+extern struct ti_clk_ll_ops *ti_clk_ll_ops;
+
#endif
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
index b82ef07f3403..b9bc3b8df659 100644
--- a/drivers/clk/ti/clockdomain.c
+++ b/drivers/clk/ti/clockdomain.c
@@ -15,15 +15,94 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/clk/ti.h>
+#include "clock.h"
+
#undef pr_fmt
#define pr_fmt(fmt) "%s: " fmt, __func__
+/**
+ * omap2_clkops_enable_clkdm - increment usecount on clkdm of @hw
+ * @hw: struct clk_hw * of the clock being enabled
+ *
+ * Increment the usecount of the clockdomain of the clock pointed to
+ * by @hw; if the usecount is 1, the clockdomain will be "enabled."
+ * Only needed for clocks that don't use omap2_dflt_clk_enable() as
+ * their enable function pointer. Passes along the return value of
+ * clkdm_clk_enable(), -EINVAL if @hw is not associated with a
+ * clockdomain, or 0 if clock framework-based clockdomain control is
+ * not implemented.
+ */
+int omap2_clkops_enable_clkdm(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk;
+ int ret = 0;
+
+ clk = to_clk_hw_omap(hw);
+
+ if (unlikely(!clk->clkdm)) {
+ pr_err("%s: %s: no clkdm set ?!\n", __func__,
+ clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ if (unlikely(clk->enable_reg))
+ pr_err("%s: %s: should use dflt_clk_enable ?!\n", __func__,
+ clk_hw_get_name(hw));
+
+ if (ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) {
+ pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
+ __func__, clk_hw_get_name(hw));
+ return 0;
+ }
+
+ ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
+ WARN(ret, "%s: could not enable %s's clockdomain %s: %d\n",
+ __func__, clk_hw_get_name(hw), clk->clkdm_name, ret);
+
+ return ret;
+}
+
+/**
+ * omap2_clkops_disable_clkdm - decrement usecount on clkdm of @hw
+ * @hw: struct clk_hw * of the clock being disabled
+ *
+ * Decrement the usecount of the clockdomain of the clock pointed to
+ * by @hw; if the usecount is 0, the clockdomain will be "disabled."
+ * Only needed for clocks that don't use omap2_dflt_clk_disable() as their
+ * disable function pointer. No return value.
+ */
+void omap2_clkops_disable_clkdm(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk;
+
+ clk = to_clk_hw_omap(hw);
+
+ if (unlikely(!clk->clkdm)) {
+ pr_err("%s: %s: no clkdm set ?!\n", __func__,
+ clk_hw_get_name(hw));
+ return;
+ }
+
+ if (unlikely(clk->enable_reg))
+ pr_err("%s: %s: should use dflt_clk_disable ?!\n", __func__,
+ clk_hw_get_name(hw));
+
+ if (ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) {
+ pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
+ __func__, clk_hw_get_name(hw));
+ return;
+ }
+
+ ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
+}
+
static void __init of_ti_clockdomain_setup(struct device_node *node)
{
struct clk *clk;
@@ -41,12 +120,12 @@ static void __init of_ti_clockdomain_setup(struct device_node *node)
__func__, node->full_name, i, PTR_ERR(clk));
continue;
}
- if (__clk_get_flags(clk) & CLK_IS_BASIC) {
+ clk_hw = __clk_get_hw(clk);
+ if (clk_hw_get_flags(clk_hw) & CLK_IS_BASIC) {
pr_warn("can't setup clkdm for basic clk %s\n",
__clk_get_name(clk));
continue;
}
- clk_hw = __clk_get_hw(clk);
to_clk_hw_omap(clk_hw)->clkdm_name = clkdm_name;
omap2_init_clk_clkdm(clk_hw);
}
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
index 96f83cedb4b3..dbef218fe5ec 100644
--- a/drivers/clk/ti/composite.c
+++ b/drivers/clk/ti/composite.c
@@ -276,7 +276,6 @@ int __init ti_clk_add_component(struct device_node *node, struct clk_hw *hw,
int num_parents;
const char **parent_names;
struct component_clk *clk;
- int i;
num_parents = of_clk_get_parent_count(node);
@@ -289,8 +288,7 @@ int __init ti_clk_add_component(struct device_node *node, struct clk_hw *hw,
if (!parent_names)
return -ENOMEM;
- for (i = 0; i < num_parents; i++)
- parent_names[i] = of_clk_get_parent_name(node, i);
+ of_clk_parent_fill(node, parent_names, num_parents);
clk = kzalloc(sizeof(*clk), GFP_KERNEL);
if (!clk) {
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index ff5f117950a9..5b1726829e6d 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -109,7 +109,7 @@ static unsigned long ti_clk_divider_recalc_rate(struct clk_hw *hw,
if (!div) {
WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
"%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
- __clk_get_name(hw->clk));
+ clk_hw_get_name(hw));
return parent_rate;
}
@@ -155,7 +155,7 @@ static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
maxdiv = _get_maxdiv(divider);
- if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
+ if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
parent_rate = *best_parent_rate;
bestdiv = DIV_ROUND_UP(parent_rate, rate);
bestdiv = bestdiv == 0 ? 1 : bestdiv;
@@ -181,7 +181,7 @@ static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
*best_parent_rate = parent_rate_saved;
return i;
}
- parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
+ parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
MULT_ROUND_UP(rate, i));
now = DIV_ROUND_UP(parent_rate, i);
if (now <= rate && now > best) {
@@ -194,7 +194,7 @@ static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
if (!bestdiv) {
bestdiv = _get_maxdiv(divider);
*best_parent_rate =
- __clk_round_rate(__clk_get_parent(hw->clk), 1);
+ clk_hw_round_rate(clk_hw_get_parent(hw), 1);
}
return bestdiv;
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 2aacf7a3bcae..5519b386edc0 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -15,6 +15,7 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/slab.h>
#include <linux/err.h>
@@ -162,7 +163,7 @@ static void __init _register_dpll(struct clk_hw *hw,
clk = clk_register(NULL, &clk_hw->hw);
if (!IS_ERR(clk)) {
- omap2_init_clk_hw_omap_clocks(clk);
+ omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
of_clk_add_provider(node, of_clk_src_simple_get, clk);
kfree(clk_hw->hw.init->parent_names);
kfree(clk_hw->hw.init);
@@ -319,7 +320,7 @@ static void _register_dpll_x2(struct device_node *node,
if (IS_ERR(clk)) {
kfree(clk_hw);
} else {
- omap2_init_clk_hw_omap_clocks(clk);
+ omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
of_clk_add_provider(node, of_clk_src_simple_get, clk);
}
}
@@ -341,7 +342,6 @@ static void __init of_ti_dpll_setup(struct device_node *node,
struct clk_init_data *init = NULL;
const char **parent_names = NULL;
struct dpll_data *dd = NULL;
- int i;
u8 dpll_mode = 0;
dd = kzalloc(sizeof(*dd), GFP_KERNEL);
@@ -370,8 +370,7 @@ static void __init of_ti_dpll_setup(struct device_node *node,
if (!parent_names)
goto cleanup;
- for (i = 0; i < init->num_parents; i++)
- parent_names[i] = of_clk_get_parent_name(node, i);
+ of_clk_parent_fill(node, parent_names, init->num_parents);
init->parent_names = parent_names;
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
new file mode 100644
index 000000000000..f4dec00fb684
--- /dev/null
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -0,0 +1,817 @@
+/*
+ * OMAP3/4 - specific DPLL control functions
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * Written by Paul Walmsley
+ * Testing and integration fixes by Jouni Högander
+ *
+ * 36xx support added by Vishwanath BS, Richard Woodruff, and Nishanth
+ * Menon
+ *
+ * Parts of this code are based on code written by
+ * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/clkdev.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+/* CM_AUTOIDLE_PLL*.AUTO_* bit values */
+#define DPLL_AUTOIDLE_DISABLE 0x0
+#define DPLL_AUTOIDLE_LOW_POWER_STOP 0x1
+
+#define MAX_DPLL_WAIT_TRIES 1000000
+
+#define OMAP3XXX_EN_DPLL_LOCKED 0x7
+
+/* Forward declarations */
+static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk);
+static void omap3_dpll_deny_idle(struct clk_hw_omap *clk);
+static void omap3_dpll_allow_idle(struct clk_hw_omap *clk);
+
+/* Private functions */
+
+/* _omap3_dpll_write_clken - write clken_bits arg to a DPLL's enable bits */
+static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits)
+{
+ const struct dpll_data *dd;
+ u32 v;
+
+ dd = clk->dpll_data;
+
+ v = ti_clk_ll_ops->clk_readl(dd->control_reg);
+ v &= ~dd->enable_mask;
+ v |= clken_bits << __ffs(dd->enable_mask);
+ ti_clk_ll_ops->clk_writel(v, dd->control_reg);
+}
+
+/* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */
+static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state)
+{
+ const struct dpll_data *dd;
+ int i = 0;
+ int ret = -EINVAL;
+ const char *clk_name;
+
+ dd = clk->dpll_data;
+ clk_name = clk_hw_get_name(&clk->hw);
+
+ state <<= __ffs(dd->idlest_mask);
+
+ while (((ti_clk_ll_ops->clk_readl(dd->idlest_reg) & dd->idlest_mask)
+ != state) && i < MAX_DPLL_WAIT_TRIES) {
+ i++;
+ udelay(1);
+ }
+
+ if (i == MAX_DPLL_WAIT_TRIES) {
+ pr_err("clock: %s failed transition to '%s'\n",
+ clk_name, (state) ? "locked" : "bypassed");
+ } else {
+ pr_debug("clock: %s transition to '%s' in %d loops\n",
+ clk_name, (state) ? "locked" : "bypassed", i);
+
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/* From 3430 TRM ES2 4.7.6.2 */
+static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n)
+{
+ unsigned long fint;
+ u16 f = 0;
+
+ fint = clk_get_rate(clk->dpll_data->clk_ref) / n;
+
+ pr_debug("clock: fint is %lu\n", fint);
+
+ if (fint >= 750000 && fint <= 1000000)
+ f = 0x3;
+ else if (fint > 1000000 && fint <= 1250000)
+ f = 0x4;
+ else if (fint > 1250000 && fint <= 1500000)
+ f = 0x5;
+ else if (fint > 1500000 && fint <= 1750000)
+ f = 0x6;
+ else if (fint > 1750000 && fint <= 2100000)
+ f = 0x7;
+ else if (fint > 7500000 && fint <= 10000000)
+ f = 0xB;
+ else if (fint > 10000000 && fint <= 12500000)
+ f = 0xC;
+ else if (fint > 12500000 && fint <= 15000000)
+ f = 0xD;
+ else if (fint > 15000000 && fint <= 17500000)
+ f = 0xE;
+ else if (fint > 17500000 && fint <= 21000000)
+ f = 0xF;
+ else
+ pr_debug("clock: unknown freqsel setting for %d\n", n);
+
+ return f;
+}
+
+/*
+ * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness
+ * @clk: pointer to a DPLL struct clk
+ *
+ * Instructs a non-CORE DPLL to lock. Waits for the DPLL to report
+ * readiness before returning. Will save and restore the DPLL's
+ * autoidle state across the enable, per the CDP code. If the DPLL
+ * locked successfully, return 0; if the DPLL did not lock in the time
+ * allotted, or DPLL3 was passed in, return -EINVAL.
+ */
+static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk)
+{
+ const struct dpll_data *dd;
+ u8 ai;
+ u8 state = 1;
+ int r = 0;
+
+ pr_debug("clock: locking DPLL %s\n", clk_hw_get_name(&clk->hw));
+
+ dd = clk->dpll_data;
+ state <<= __ffs(dd->idlest_mask);
+
+ /* Check if already locked */
+ if ((ti_clk_ll_ops->clk_readl(dd->idlest_reg) & dd->idlest_mask) ==
+ state)
+ goto done;
+
+ ai = omap3_dpll_autoidle_read(clk);
+
+ if (ai)
+ omap3_dpll_deny_idle(clk);
+
+ _omap3_dpll_write_clken(clk, DPLL_LOCKED);
+
+ r = _omap3_wait_dpll_status(clk, 1);
+
+ if (ai)
+ omap3_dpll_allow_idle(clk);
+
+done:
+ return r;
+}
+
+/*
+ * _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness
+ * @clk: pointer to a DPLL struct clk
+ *
+ * Instructs a non-CORE DPLL to enter low-power bypass mode. In
+ * bypass mode, the DPLL's rate is set equal to its parent clock's
+ * rate. Waits for the DPLL to report readiness before returning.
+ * Will save and restore the DPLL's autoidle state across the enable,
+ * per the CDP code. If the DPLL entered bypass mode successfully,
+ * return 0; if the DPLL did not enter bypass in the time allotted, or
+ * DPLL3 was passed in, or the DPLL does not support low-power bypass,
+ * return -EINVAL.
+ */
+static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk)
+{
+ int r;
+ u8 ai;
+
+ if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS)))
+ return -EINVAL;
+
+ pr_debug("clock: configuring DPLL %s for low-power bypass\n",
+ clk_hw_get_name(&clk->hw));
+
+ ai = omap3_dpll_autoidle_read(clk);
+
+ _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS);
+
+ r = _omap3_wait_dpll_status(clk, 0);
+
+ if (ai)
+ omap3_dpll_allow_idle(clk);
+
+ return r;
+}
+
+/*
+ * _omap3_noncore_dpll_stop - instruct a DPLL to stop
+ * @clk: pointer to a DPLL struct clk
+ *
+ * Instructs a non-CORE DPLL to enter low-power stop. Will save and
+ * restore the DPLL's autoidle state across the stop, per the CDP
+ * code. If DPLL3 was passed in, or the DPLL does not support
+ * low-power stop, return -EINVAL; otherwise, return 0.
+ */
+static int _omap3_noncore_dpll_stop(struct clk_hw_omap *clk)
+{
+ u8 ai;
+
+ if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP)))
+ return -EINVAL;
+
+ pr_debug("clock: stopping DPLL %s\n", clk_hw_get_name(&clk->hw));
+
+ ai = omap3_dpll_autoidle_read(clk);
+
+ _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP);
+
+ if (ai)
+ omap3_dpll_allow_idle(clk);
+
+ return 0;
+}
+
+/**
+ * _lookup_dco - Lookup DCO used by j-type DPLL
+ * @clk: pointer to a DPLL struct clk
+ * @dco: digital control oscillator selector
+ * @m: DPLL multiplier to set
+ * @n: DPLL divider to set
+ *
+ * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)"
+ *
+ * XXX This code is not needed for 3430/AM35xx; can it be optimized
+ * out in non-multi-OMAP builds for those chips?
+ */
+static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n)
+{
+ unsigned long fint, clkinp; /* watch out for overflow */
+
+ clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw));
+ fint = (clkinp / n) * m;
+
+ if (fint < 1000000000)
+ *dco = 2;
+ else
+ *dco = 4;
+}
+
+/**
+ * _lookup_sddiv - Calculate sigma delta divider for j-type DPLL
+ * @clk: pointer to a DPLL struct clk
+ * @sd_div: target sigma-delta divider
+ * @m: DPLL multiplier to set
+ * @n: DPLL divider to set
+ *
+ * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)"
+ *
+ * XXX This code is not needed for 3430/AM35xx; can it be optimized
+ * out in non-multi-OMAP builds for those chips?
+ */
+static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n)
+{
+ unsigned long clkinp, sd; /* watch out for overflow */
+ int mod1, mod2;
+
+ clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw));
+
+ /*
+ * target sigma-delta to near 250MHz
+ * sd = ceil[(m/(n+1)) * (clkinp_MHz / 250)]
+ */
+ clkinp /= 100000; /* shift from MHz to 10*Hz for 38.4 and 19.2 */
+ mod1 = (clkinp * m) % (250 * n);
+ sd = (clkinp * m) / (250 * n);
+ mod2 = sd % 10;
+ sd /= 10;
+
+ if (mod1 || mod2)
+ sd++;
+ *sd_div = sd;
+}
+
+/*
+ * _omap3_noncore_dpll_program - set non-core DPLL M,N values directly
+ * @clk: struct clk * of DPLL to set
+ * @freqsel: FREQSEL value to set
+ *
+ * Program the DPLL with the last M, N values calculated, and wait for
+ * the DPLL to lock. Returns -EINVAL upon error, or 0 upon success.
+ */
+static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
+{
+ struct dpll_data *dd = clk->dpll_data;
+ u8 dco, sd_div;
+ u32 v;
+
+ /* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */
+ _omap3_noncore_dpll_bypass(clk);
+
+ /*
+ * Set jitter correction. Jitter correction applicable for OMAP343X
+ * only since freqsel field is no longer present on other devices.
+ */
+ if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) {
+ v = ti_clk_ll_ops->clk_readl(dd->control_reg);
+ v &= ~dd->freqsel_mask;
+ v |= freqsel << __ffs(dd->freqsel_mask);
+ ti_clk_ll_ops->clk_writel(v, dd->control_reg);
+ }
+
+ /* Set DPLL multiplier, divider */
+ v = ti_clk_ll_ops->clk_readl(dd->mult_div1_reg);
+
+ /* Handle Duty Cycle Correction */
+ if (dd->dcc_mask) {
+ if (dd->last_rounded_rate >= dd->dcc_rate)
+ v |= dd->dcc_mask; /* Enable DCC */
+ else
+ v &= ~dd->dcc_mask; /* Disable DCC */
+ }
+
+ v &= ~(dd->mult_mask | dd->div1_mask);
+ v |= dd->last_rounded_m << __ffs(dd->mult_mask);
+ v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
+
+ /* Configure dco and sd_div for dplls that have these fields */
+ if (dd->dco_mask) {
+ _lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n);
+ v &= ~(dd->dco_mask);
+ v |= dco << __ffs(dd->dco_mask);
+ }
+ if (dd->sddiv_mask) {
+ _lookup_sddiv(clk, &sd_div, dd->last_rounded_m,
+ dd->last_rounded_n);
+ v &= ~(dd->sddiv_mask);
+ v |= sd_div << __ffs(dd->sddiv_mask);
+ }
+
+ ti_clk_ll_ops->clk_writel(v, dd->mult_div1_reg);
+
+ /* Set 4X multiplier and low-power mode */
+ if (dd->m4xen_mask || dd->lpmode_mask) {
+ v = ti_clk_ll_ops->clk_readl(dd->control_reg);
+
+ if (dd->m4xen_mask) {
+ if (dd->last_rounded_m4xen)
+ v |= dd->m4xen_mask;
+ else
+ v &= ~dd->m4xen_mask;
+ }
+
+ if (dd->lpmode_mask) {
+ if (dd->last_rounded_lpmode)
+ v |= dd->lpmode_mask;
+ else
+ v &= ~dd->lpmode_mask;
+ }
+
+ ti_clk_ll_ops->clk_writel(v, dd->control_reg);
+ }
+
+ /* We let the clock framework set the other output dividers later */
+
+ /* REVISIT: Set ramp-up delay? */
+
+ _omap3_noncore_dpll_lock(clk);
+
+ return 0;
+}
+
+/* Public functions */
+
+/**
+ * omap3_dpll_recalc - recalculate DPLL rate
+ * @clk: DPLL struct clk
+ *
+ * Recalculate and propagate the DPLL rate.
+ */
+unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+
+ return omap2_get_dpll_rate(clk);
+}
+
+/* Non-CORE DPLL (e.g., DPLLs that do not control SDRC) clock functions */
+
+/**
+ * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode
+ * @clk: pointer to a DPLL struct clk
+ *
+ * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock.
+ * The choice of modes depends on the DPLL's programmed rate: if it is
+ * the same as the DPLL's parent clock, it will enter bypass;
+ * otherwise, it will enter lock. This code will wait for the DPLL to
+ * indicate readiness before returning, unless the DPLL takes too long
+ * to enter the target state. Intended to be used as the struct clk's
+ * enable function. If DPLL3 was passed in, or the DPLL does not
+ * support low-power stop, or if the DPLL took too long to enter
+ * bypass or lock, return -EINVAL; otherwise, return 0.
+ */
+int omap3_noncore_dpll_enable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ int r;
+ struct dpll_data *dd;
+ struct clk_hw *parent;
+
+ dd = clk->dpll_data;
+ if (!dd)
+ return -EINVAL;
+
+ if (clk->clkdm) {
+ r = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
+ if (r) {
+ WARN(1,
+ "%s: could not enable %s's clockdomain %s: %d\n",
+ __func__, clk_hw_get_name(hw),
+ clk->clkdm_name, r);
+ return r;
+ }
+ }
+
+ parent = clk_hw_get_parent(hw);
+
+ if (clk_hw_get_rate(hw) == clk_get_rate(dd->clk_bypass)) {
+ WARN_ON(parent != __clk_get_hw(dd->clk_bypass));
+ r = _omap3_noncore_dpll_bypass(clk);
+ } else {
+ WARN_ON(parent != __clk_get_hw(dd->clk_ref));
+ r = _omap3_noncore_dpll_lock(clk);
+ }
+
+ return r;
+}
+
+/**
+ * omap3_noncore_dpll_disable - instruct a DPLL to enter low-power stop
+ * @clk: pointer to a DPLL struct clk
+ *
+ * Instructs a non-CORE DPLL to enter low-power stop. This function is
+ * intended for use in struct clkops. No return value.
+ */
+void omap3_noncore_dpll_disable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+
+ _omap3_noncore_dpll_stop(clk);
+ if (clk->clkdm)
+ ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
+}
+
+/* Non-CORE DPLL rate set code */
+
+/**
+ * omap3_noncore_dpll_determine_rate - determine rate for a DPLL
+ * @hw: pointer to the clock to determine rate for
+ * @req: target rate request
+ *
+ * Determines which DPLL mode to use for reaching a desired target rate.
+ * Checks whether the DPLL shall be in bypass or locked mode, and if
+ * locked, calculates the M,N values for the DPLL via round-rate.
+ * Returns a 0 on success, negative error value in failure.
+ */
+int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *dd;
+
+ if (!req->rate)
+ return -EINVAL;
+
+ dd = clk->dpll_data;
+ if (!dd)
+ return -EINVAL;
+
+ if (clk_get_rate(dd->clk_bypass) == req->rate &&
+ (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
+ req->best_parent_hw = __clk_get_hw(dd->clk_bypass);
+ } else {
+ req->rate = omap2_dpll_round_rate(hw, req->rate,
+ &req->best_parent_rate);
+ req->best_parent_hw = __clk_get_hw(dd->clk_ref);
+ }
+
+ req->best_parent_rate = req->rate;
+
+ return 0;
+}
+
+/**
+ * omap3_noncore_dpll_set_parent - set parent for a DPLL clock
+ * @hw: pointer to the clock to set parent for
+ * @index: parent index to select
+ *
+ * Sets parent for a DPLL clock. This sets the DPLL into bypass or
+ * locked mode. Returns 0 with success, negative error value otherwise.
+ */
+int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ int ret;
+
+ if (!hw)
+ return -EINVAL;
+
+ if (index)
+ ret = _omap3_noncore_dpll_bypass(clk);
+ else
+ ret = _omap3_noncore_dpll_lock(clk);
+
+ return ret;
+}
+
+/**
+ * omap3_noncore_dpll_set_rate - set rate for a DPLL clock
+ * @hw: pointer to the clock to set parent for
+ * @rate: target rate for the clock
+ * @parent_rate: rate of the parent clock
+ *
+ * Sets rate for a DPLL clock. First checks if the clock parent is
+ * reference clock (in bypass mode, the rate of the clock can't be
+ * changed) and proceeds with the rate change operation. Returns 0
+ * with success, negative error value otherwise.
+ */
+int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *dd;
+ u16 freqsel = 0;
+ int ret;
+
+ if (!hw || !rate)
+ return -EINVAL;
+
+ dd = clk->dpll_data;
+ if (!dd)
+ return -EINVAL;
+
+ if (clk_hw_get_parent(hw) != __clk_get_hw(dd->clk_ref))
+ return -EINVAL;
+
+ if (dd->last_rounded_rate == 0)
+ return -EINVAL;
+
+ /* Freqsel is available only on OMAP343X devices */
+ if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) {
+ freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n);
+ WARN_ON(!freqsel);
+ }
+
+ pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__,
+ clk_hw_get_name(hw), rate);
+
+ ret = omap3_noncore_dpll_program(clk, freqsel);
+
+ return ret;
+}
+
+/**
+ * omap3_noncore_dpll_set_rate_and_parent - set rate and parent for a DPLL clock
+ * @hw: pointer to the clock to set rate and parent for
+ * @rate: target rate for the DPLL
+ * @parent_rate: clock rate of the DPLL parent
+ * @index: new parent index for the DPLL, 0 - reference, 1 - bypass
+ *
+ * Sets rate and parent for a DPLL clock. If new parent is the bypass
+ * clock, only selects the parent. Otherwise proceeds with a rate
+ * change, as this will effectively also change the parent as the
+ * DPLL is put into locked mode. Returns 0 with success, negative error
+ * value otherwise.
+ */
+int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate,
+ u8 index)
+{
+ int ret;
+
+ if (!hw || !rate)
+ return -EINVAL;
+
+ /*
+ * clk-ref at index[0], in which case we only need to set rate,
+ * the parent will be changed automatically with the lock sequence.
+ * With clk-bypass case we only need to change parent.
+ */
+ if (index)
+ ret = omap3_noncore_dpll_set_parent(hw, index);
+ else
+ ret = omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
+
+ return ret;
+}
+
+/* DPLL autoidle read/set code */
+
+/**
+ * omap3_dpll_autoidle_read - read a DPLL's autoidle bits
+ * @clk: struct clk * of the DPLL to read
+ *
+ * Return the DPLL's autoidle bits, shifted down to bit 0. Returns
+ * -EINVAL if passed a null pointer or if the struct clk does not
+ * appear to refer to a DPLL.
+ */
+static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk)
+{
+ const struct dpll_data *dd;
+ u32 v;
+
+ if (!clk || !clk->dpll_data)
+ return -EINVAL;
+
+ dd = clk->dpll_data;
+
+ if (!dd->autoidle_reg)
+ return -EINVAL;
+
+ v = ti_clk_ll_ops->clk_readl(dd->autoidle_reg);
+ v &= dd->autoidle_mask;
+ v >>= __ffs(dd->autoidle_mask);
+
+ return v;
+}
+
+/**
+ * omap3_dpll_allow_idle - enable DPLL autoidle bits
+ * @clk: struct clk * of the DPLL to operate on
+ *
+ * Enable DPLL automatic idle control. This automatic idle mode
+ * switching takes effect only when the DPLL is locked, at least on
+ * OMAP3430. The DPLL will enter low-power stop when its downstream
+ * clocks are gated. No return value.
+ */
+static void omap3_dpll_allow_idle(struct clk_hw_omap *clk)
+{
+ const struct dpll_data *dd;
+ u32 v;
+
+ if (!clk || !clk->dpll_data)
+ return;
+
+ dd = clk->dpll_data;
+
+ if (!dd->autoidle_reg)
+ return;
+
+ /*
+ * REVISIT: CORE DPLL can optionally enter low-power bypass
+ * by writing 0x5 instead of 0x1. Add some mechanism to
+ * optionally enter this mode.
+ */
+ v = ti_clk_ll_ops->clk_readl(dd->autoidle_reg);
+ v &= ~dd->autoidle_mask;
+ v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask);
+ ti_clk_ll_ops->clk_writel(v, dd->autoidle_reg);
+}
+
+/**
+ * omap3_dpll_deny_idle - prevent DPLL from automatically idling
+ * @clk: struct clk * of the DPLL to operate on
+ *
+ * Disable DPLL automatic idle control. No return value.
+ */
+static void omap3_dpll_deny_idle(struct clk_hw_omap *clk)
+{
+ const struct dpll_data *dd;
+ u32 v;
+
+ if (!clk || !clk->dpll_data)
+ return;
+
+ dd = clk->dpll_data;
+
+ if (!dd->autoidle_reg)
+ return;
+
+ v = ti_clk_ll_ops->clk_readl(dd->autoidle_reg);
+ v &= ~dd->autoidle_mask;
+ v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask);
+ ti_clk_ll_ops->clk_writel(v, dd->autoidle_reg);
+}
+
+/* Clock control for DPLL outputs */
+
+/* Find the parent DPLL for the given clkoutx2 clock */
+static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
+{
+ struct clk_hw_omap *pclk = NULL;
+
+ /* Walk up the parents of clk, looking for a DPLL */
+ do {
+ do {
+ hw = clk_hw_get_parent(hw);
+ } while (hw && (clk_hw_get_flags(hw) & CLK_IS_BASIC));
+ if (!hw)
+ break;
+ pclk = to_clk_hw_omap(hw);
+ } while (pclk && !pclk->dpll_data);
+
+ /* clk does not have a DPLL as a parent? error in the clock data */
+ if (!pclk) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return pclk;
+}
+
+/**
+ * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
+ * @clk: DPLL output struct clk
+ *
+ * Using parent clock DPLL data, look up DPLL state. If locked, set our
+ * rate to the dpll_clk * 2; otherwise, just use dpll_clk.
+ */
+unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ const struct dpll_data *dd;
+ unsigned long rate;
+ u32 v;
+ struct clk_hw_omap *pclk = NULL;
+
+ if (!parent_rate)
+ return 0;
+
+ pclk = omap3_find_clkoutx2_dpll(hw);
+
+ if (!pclk)
+ return 0;
+
+ dd = pclk->dpll_data;
+
+ WARN_ON(!dd->enable_mask);
+
+ v = ti_clk_ll_ops->clk_readl(dd->control_reg) & dd->enable_mask;
+ v >>= __ffs(dd->enable_mask);
+ if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE))
+ rate = parent_rate;
+ else
+ rate = parent_rate * 2;
+ return rate;
+}
+
+/* OMAP3/4 non-CORE DPLL clkops */
+const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
+ .allow_idle = omap3_dpll_allow_idle,
+ .deny_idle = omap3_dpll_deny_idle,
+};
+
+/**
+ * omap3_dpll4_set_rate - set rate for omap3 per-dpll
+ * @hw: clock to change
+ * @rate: target rate for clock
+ * @parent_rate: rate of the parent clock
+ *
+ * Check if the current SoC supports the per-dpll reprogram operation
+ * or not, and then do the rate change if supported. Returns -EINVAL
+ * if not supported, 0 for success, and potential error codes from the
+ * clock rate change.
+ */
+int omap3_dpll4_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ /*
+ * According to the 12-5 CDP code from TI, "Limitation 2.5"
+ * on 3430ES1 prevents us from changing DPLL multipliers or dividers
+ * on DPLL4.
+ */
+ if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) {
+ pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n");
+ return -EINVAL;
+ }
+
+ return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
+}
+
+/**
+ * omap3_dpll4_set_rate_and_parent - set rate and parent for omap3 per-dpll
+ * @hw: clock to change
+ * @rate: target rate for clock
+ * @parent_rate: rate of the parent clock
+ * @index: parent index, 0 - reference clock, 1 - bypass clock
+ *
+ * Check if the current SoC support the per-dpll reprogram operation
+ * or not, and then do the rate + parent change if supported. Returns
+ * -EINVAL if not supported, 0 for success, and potential error codes
+ * from the clock rate change.
+ */
+int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate, u8 index)
+{
+ if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) {
+ pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n");
+ return -EINVAL;
+ }
+
+ return omap3_noncore_dpll_set_rate_and_parent(hw, rate, parent_rate,
+ index);
+}
diff --git a/drivers/clk/ti/dpll44xx.c b/drivers/clk/ti/dpll44xx.c
new file mode 100644
index 000000000000..660d7436ac24
--- /dev/null
+++ b/drivers/clk/ti/dpll44xx.c
@@ -0,0 +1,227 @@
+/*
+ * OMAP4-specific DPLL control functions
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Rajendra Nayak
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+/*
+ * Maximum DPLL input frequency (FINT) and output frequency (FOUT) that
+ * can supported when using the DPLL low-power mode. Frequencies are
+ * defined in OMAP4430/60 Public TRM section 3.6.3.3.2 "Enable Control,
+ * Status, and Low-Power Operation Mode".
+ */
+#define OMAP4_DPLL_LP_FINT_MAX 1000000
+#define OMAP4_DPLL_LP_FOUT_MAX 100000000
+
+/*
+ * Bitfield declarations
+ */
+#define OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK BIT(8)
+#define OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK BIT(10)
+#define OMAP4430_DPLL_REGM4XEN_MASK BIT(11)
+
+/* Static rate multiplier for OMAP4 REGM4XEN clocks */
+#define OMAP4430_REGM4XEN_MULT 4
+
+static void omap4_dpllmx_allow_gatectrl(struct clk_hw_omap *clk)
+{
+ u32 v;
+ u32 mask;
+
+ if (!clk || !clk->clksel_reg)
+ return;
+
+ mask = clk->flags & CLOCK_CLKOUTX2 ?
+ OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK :
+ OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK;
+
+ v = ti_clk_ll_ops->clk_readl(clk->clksel_reg);
+ /* Clear the bit to allow gatectrl */
+ v &= ~mask;
+ ti_clk_ll_ops->clk_writel(v, clk->clksel_reg);
+}
+
+static void omap4_dpllmx_deny_gatectrl(struct clk_hw_omap *clk)
+{
+ u32 v;
+ u32 mask;
+
+ if (!clk || !clk->clksel_reg)
+ return;
+
+ mask = clk->flags & CLOCK_CLKOUTX2 ?
+ OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK :
+ OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK;
+
+ v = ti_clk_ll_ops->clk_readl(clk->clksel_reg);
+ /* Set the bit to deny gatectrl */
+ v |= mask;
+ ti_clk_ll_ops->clk_writel(v, clk->clksel_reg);
+}
+
+const struct clk_hw_omap_ops clkhwops_omap4_dpllmx = {
+ .allow_idle = omap4_dpllmx_allow_gatectrl,
+ .deny_idle = omap4_dpllmx_deny_gatectrl,
+};
+
+/**
+ * omap4_dpll_lpmode_recalc - compute DPLL low-power setting
+ * @dd: pointer to the dpll data structure
+ *
+ * Calculates if low-power mode can be enabled based upon the last
+ * multiplier and divider values calculated. If low-power mode can be
+ * enabled, then the bit to enable low-power mode is stored in the
+ * last_rounded_lpmode variable. This implementation is based upon the
+ * criteria for enabling low-power mode as described in the OMAP4430/60
+ * Public TRM section 3.6.3.3.2 "Enable Control, Status, and Low-Power
+ * Operation Mode".
+ */
+static void omap4_dpll_lpmode_recalc(struct dpll_data *dd)
+{
+ long fint, fout;
+
+ fint = clk_get_rate(dd->clk_ref) / (dd->last_rounded_n + 1);
+ fout = fint * dd->last_rounded_m;
+
+ if ((fint < OMAP4_DPLL_LP_FINT_MAX) && (fout < OMAP4_DPLL_LP_FOUT_MAX))
+ dd->last_rounded_lpmode = 1;
+ else
+ dd->last_rounded_lpmode = 0;
+}
+
+/**
+ * omap4_dpll_regm4xen_recalc - compute DPLL rate, considering REGM4XEN bit
+ * @clk: struct clk * of the DPLL to compute the rate for
+ *
+ * Compute the output rate for the OMAP4 DPLL represented by @clk.
+ * Takes the REGM4XEN bit into consideration, which is needed for the
+ * OMAP4 ABE DPLL. Returns the DPLL's output rate (before M-dividers)
+ * upon success, or 0 upon error.
+ */
+unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ u32 v;
+ unsigned long rate;
+ struct dpll_data *dd;
+
+ if (!clk || !clk->dpll_data)
+ return 0;
+
+ dd = clk->dpll_data;
+
+ rate = omap2_get_dpll_rate(clk);
+
+ /* regm4xen adds a multiplier of 4 to DPLL calculations */
+ v = ti_clk_ll_ops->clk_readl(dd->control_reg);
+ if (v & OMAP4430_DPLL_REGM4XEN_MASK)
+ rate *= OMAP4430_REGM4XEN_MULT;
+
+ return rate;
+}
+
+/**
+ * omap4_dpll_regm4xen_round_rate - round DPLL rate, considering REGM4XEN bit
+ * @clk: struct clk * of the DPLL to round a rate for
+ * @target_rate: the desired rate of the DPLL
+ *
+ * Compute the rate that would be programmed into the DPLL hardware
+ * for @clk if set_rate() were to be provided with the rate
+ * @target_rate. Takes the REGM4XEN bit into consideration, which is
+ * needed for the OMAP4 ABE DPLL. Returns the rounded rate (before
+ * M-dividers) upon success, -EINVAL if @clk is null or not a DPLL, or
+ * ~0 if an error occurred in omap2_dpll_round_rate().
+ */
+long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
+ unsigned long target_rate,
+ unsigned long *parent_rate)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *dd;
+ long r;
+
+ if (!clk || !clk->dpll_data)
+ return -EINVAL;
+
+ dd = clk->dpll_data;
+
+ dd->last_rounded_m4xen = 0;
+
+ /*
+ * First try to compute the DPLL configuration for
+ * target rate without using the 4X multiplier.
+ */
+ r = omap2_dpll_round_rate(hw, target_rate, NULL);
+ if (r != ~0)
+ goto out;
+
+ /*
+ * If we did not find a valid DPLL configuration, try again, but
+ * this time see if using the 4X multiplier can help. Enabling the
+ * 4X multiplier is equivalent to dividing the target rate by 4.
+ */
+ r = omap2_dpll_round_rate(hw, target_rate / OMAP4430_REGM4XEN_MULT,
+ NULL);
+ if (r == ~0)
+ return r;
+
+ dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
+ dd->last_rounded_m4xen = 1;
+
+out:
+ omap4_dpll_lpmode_recalc(dd);
+
+ return dd->last_rounded_rate;
+}
+
+/**
+ * omap4_dpll_regm4xen_determine_rate - determine rate for a DPLL
+ * @hw: pointer to the clock to determine rate for
+ * @req: target rate request
+ *
+ * Determines which DPLL mode to use for reaching a desired rate.
+ * Checks whether the DPLL shall be in bypass or locked mode, and if
+ * locked, calculates the M,N values for the DPLL via round-rate.
+ * Returns 0 on success and a negative error value otherwise.
+ */
+int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *dd;
+
+ if (!req->rate)
+ return -EINVAL;
+
+ dd = clk->dpll_data;
+ if (!dd)
+ return -EINVAL;
+
+ if (clk_get_rate(dd->clk_bypass) == req->rate &&
+ (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
+ req->best_parent_hw = __clk_get_hw(dd->clk_bypass);
+ } else {
+ req->rate = omap4_dpll_regm4xen_round_rate(hw, req->rate,
+ &req->best_parent_rate);
+ req->best_parent_hw = __clk_get_hw(dd->clk_ref);
+ }
+
+ req->best_parent_rate = req->rate;
+
+ return 0;
+}
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index 730aa62454a2..f4b2e9888bdf 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -9,6 +9,7 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -558,8 +559,7 @@ static void __init ti_fapll_setup(struct device_node *node)
goto free;
}
- parent_name[0] = of_clk_get_parent_name(node, 0);
- parent_name[1] = of_clk_get_parent_name(node, 1);
+ of_clk_parent_fill(node, parent_name, 2);
init->parent_names = parent_name;
fd->clk_ref = of_clk_get(node, 0);
diff --git a/drivers/clk/ti/fixed-factor.c b/drivers/clk/ti/fixed-factor.c
index c2c8a287408c..3cd406768909 100644
--- a/drivers/clk/ti/fixed-factor.c
+++ b/drivers/clk/ti/fixed-factor.c
@@ -22,6 +22,8 @@
#include <linux/of_address.h>
#include <linux/clk/ti.h>
+#include "clock.h"
+
#undef pr_fmt
#define pr_fmt(fmt) "%s: " fmt, __func__
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
index 0c6fdfcd5f93..5429d3534363 100644
--- a/drivers/clk/ti/gate.c
+++ b/drivers/clk/ti/gate.c
@@ -62,7 +62,7 @@ static const struct clk_ops omap_gate_clk_hsdiv_restore_ops = {
* (Any other value different from the Read value) to the
* corresponding CM_CLKSEL register will refresh the dividers.
*/
-static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
+static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *hw)
{
struct clk_divider *parent;
struct clk_hw *parent_hw;
@@ -70,10 +70,10 @@ static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
int ret;
/* Clear PWRDN bit of HSDIVIDER */
- ret = omap2_dflt_clk_enable(clk);
+ ret = omap2_dflt_clk_enable(hw);
/* Parent is the x2 node, get parent of parent for the m2 div */
- parent_hw = __clk_get_hw(__clk_get_parent(__clk_get_parent(clk->clk)));
+ parent_hw = clk_hw_get_parent(clk_hw_get_parent(hw));
parent = to_clk_divider(parent_hw);
/* Restore the dividers */
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
index c76230d8dd04..e505e6f8228d 100644
--- a/drivers/clk/ti/interface.c
+++ b/drivers/clk/ti/interface.c
@@ -63,7 +63,7 @@ static struct clk *_register_interface(struct device *dev, const char *name,
if (IS_ERR(clk))
kfree(clk_hw);
else
- omap2_init_clk_hw_omap_clocks(clk);
+ omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
return clk;
}
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 5cdeed538b08..69f08a1d047d 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -31,7 +31,7 @@
static u8 ti_clk_mux_get_parent(struct clk_hw *hw)
{
struct clk_mux *mux = to_clk_mux(hw);
- int num_parents = __clk_get_num_parents(hw->clk);
+ int num_parents = clk_hw_get_num_parents(hw);
u32 val;
/*
@@ -190,7 +190,6 @@ static void of_mux_clk_setup(struct device_node *node)
void __iomem *reg;
int num_parents;
const char **parent_names;
- int i;
u8 clk_mux_flags = 0;
u32 mask = 0;
u32 shift = 0;
@@ -205,8 +204,7 @@ static void of_mux_clk_setup(struct device_node *node)
if (!parent_names)
goto cleanup;
- for (i = 0; i < num_parents; i++)
- parent_names[i] = of_clk_get_parent_name(node, i);
+ of_clk_parent_fill(node, parent_names, num_parents);
reg = ti_clk_get_reg_addr(node, 0);
diff --git a/drivers/clk/ux500/Makefile b/drivers/clk/ux500/Makefile
index 521483f0ba33..f3baef29859c 100644
--- a/drivers/clk/ux500/Makefile
+++ b/drivers/clk/ux500/Makefile
@@ -9,7 +9,6 @@ obj-y += clk-sysctrl.o
# Clock definitions
obj-y += u8500_of_clk.o
-obj-y += u8500_clk.o
obj-y += u9540_clk.o
obj-y += u8540_clk.o
diff --git a/drivers/clk/ux500/abx500-clk.c b/drivers/clk/ux500/abx500-clk.c
index 3e5e05101302..222425d08ab6 100644
--- a/drivers/clk/ux500/abx500-clk.c
+++ b/drivers/clk/ux500/abx500-clk.c
@@ -13,7 +13,6 @@
#include <linux/platform_device.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500/ab8500-sysctrl.h>
-#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/mfd/dbx500-prcmu.h>
diff --git a/drivers/clk/ux500/clk-prcmu.c b/drivers/clk/ux500/clk-prcmu.c
index bf63c96acb1a..7f343821f4e4 100644
--- a/drivers/clk/ux500/clk-prcmu.c
+++ b/drivers/clk/ux500/clk-prcmu.c
@@ -43,7 +43,7 @@ static void clk_prcmu_unprepare(struct clk_hw *hw)
struct clk_prcmu *clk = to_clk_prcmu(hw);
if (prcmu_request_clock(clk->cg_sel, false))
pr_err("clk_prcmu: %s failed to disable %s.\n", __func__,
- __clk_get_name(hw->clk));
+ clk_hw_get_name(hw));
else
clk->is_prepared = 0;
}
@@ -101,11 +101,11 @@ static int clk_prcmu_opp_prepare(struct clk_hw *hw)
if (!clk->opp_requested) {
err = prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP,
- (char *)__clk_get_name(hw->clk),
+ (char *)clk_hw_get_name(hw),
100);
if (err) {
pr_err("clk_prcmu: %s fail req APE OPP for %s.\n",
- __func__, __clk_get_name(hw->clk));
+ __func__, clk_hw_get_name(hw));
return err;
}
clk->opp_requested = 1;
@@ -114,7 +114,7 @@ static int clk_prcmu_opp_prepare(struct clk_hw *hw)
err = prcmu_request_clock(clk->cg_sel, true);
if (err) {
prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
- (char *)__clk_get_name(hw->clk));
+ (char *)clk_hw_get_name(hw));
clk->opp_requested = 0;
return err;
}
@@ -129,13 +129,13 @@ static void clk_prcmu_opp_unprepare(struct clk_hw *hw)
if (prcmu_request_clock(clk->cg_sel, false)) {
pr_err("clk_prcmu: %s failed to disable %s.\n", __func__,
- __clk_get_name(hw->clk));
+ clk_hw_get_name(hw));
return;
}
if (clk->opp_requested) {
prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
- (char *)__clk_get_name(hw->clk));
+ (char *)clk_hw_get_name(hw));
clk->opp_requested = 0;
}
@@ -151,7 +151,7 @@ static int clk_prcmu_opp_volt_prepare(struct clk_hw *hw)
err = prcmu_request_ape_opp_100_voltage(true);
if (err) {
pr_err("clk_prcmu: %s fail req APE OPP VOLT for %s.\n",
- __func__, __clk_get_name(hw->clk));
+ __func__, clk_hw_get_name(hw));
return err;
}
clk->opp_requested = 1;
@@ -174,7 +174,7 @@ static void clk_prcmu_opp_volt_unprepare(struct clk_hw *hw)
if (prcmu_request_clock(clk->cg_sel, false)) {
pr_err("clk_prcmu: %s failed to disable %s.\n", __func__,
- __clk_get_name(hw->clk));
+ clk_hw_get_name(hw));
return;
}
diff --git a/drivers/clk/ux500/clk-sysctrl.c b/drivers/clk/ux500/clk-sysctrl.c
index e364c9d4aa60..266ddea630d2 100644
--- a/drivers/clk/ux500/clk-sysctrl.c
+++ b/drivers/clk/ux500/clk-sysctrl.c
@@ -52,7 +52,7 @@ static void clk_sysctrl_unprepare(struct clk_hw *hw)
struct clk_sysctrl *clk = to_clk_sysctrl(hw);
if (ab8500_sysctrl_clear(clk->reg_sel[0], clk->reg_mask[0]))
dev_err(clk->dev, "clk_sysctrl: %s fail to clear %s.\n",
- __func__, __clk_get_name(hw->clk));
+ __func__, clk_hw_get_name(hw));
}
static unsigned long clk_sysctrl_recalc_rate(struct clk_hw *hw,
diff --git a/drivers/clk/ux500/clk.h b/drivers/clk/ux500/clk.h
index a2bb92d85ee0..b42485da704e 100644
--- a/drivers/clk/ux500/clk.h
+++ b/drivers/clk/ux500/clk.h
@@ -10,10 +10,11 @@
#ifndef __UX500_CLK_H
#define __UX500_CLK_H
-#include <linux/clk.h>
#include <linux/device.h>
#include <linux/types.h>
+struct clk;
+
struct clk *clk_reg_prcc_pclk(const char *name,
const char *parent_name,
resource_size_t phy_base,
diff --git a/drivers/clk/ux500/u8500_clk.c b/drivers/clk/ux500/u8500_clk.c
deleted file mode 100644
index 4626b97b7d83..000000000000
--- a/drivers/clk/ux500/u8500_clk.c
+++ /dev/null
@@ -1,526 +0,0 @@
-/*
- * Clock definitions for u8500 platform.
- *
- * Copyright (C) 2012 ST-Ericsson SA
- * Author: Ulf Hansson <ulf.hansson@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/mfd/dbx500-prcmu.h>
-#include <linux/platform_data/clk-ux500.h>
-#include "clk.h"
-
-void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
- u32 clkrst5_base, u32 clkrst6_base)
-{
- struct prcmu_fw_version *fw_version;
- const char *sgaclk_parent = NULL;
- struct clk *clk;
-
- /* Clock sources */
- clk = clk_reg_prcmu_gate("soc0_pll", NULL, PRCMU_PLLSOC0,
- CLK_IS_ROOT|CLK_IGNORE_UNUSED);
- clk_register_clkdev(clk, "soc0_pll", NULL);
-
- clk = clk_reg_prcmu_gate("soc1_pll", NULL, PRCMU_PLLSOC1,
- CLK_IS_ROOT|CLK_IGNORE_UNUSED);
- clk_register_clkdev(clk, "soc1_pll", NULL);
-
- clk = clk_reg_prcmu_gate("ddr_pll", NULL, PRCMU_PLLDDR,
- CLK_IS_ROOT|CLK_IGNORE_UNUSED);
- clk_register_clkdev(clk, "ddr_pll", NULL);
-
- /* FIXME: Add sys, ulp and int clocks here. */
-
- clk = clk_register_fixed_rate(NULL, "rtc32k", "NULL",
- CLK_IS_ROOT|CLK_IGNORE_UNUSED,
- 32768);
- clk_register_clkdev(clk, "clk32k", NULL);
- clk_register_clkdev(clk, "apb_pclk", "rtc-pl031");
-
- /* PRCMU clocks */
- fw_version = prcmu_get_fw_version();
- if (fw_version != NULL) {
- switch (fw_version->project) {
- case PRCMU_FW_PROJECT_U8500_C2:
- case PRCMU_FW_PROJECT_U8520:
- case PRCMU_FW_PROJECT_U8420:
- sgaclk_parent = "soc0_pll";
- break;
- default:
- break;
- }
- }
-
- if (sgaclk_parent)
- clk = clk_reg_prcmu_gate("sgclk", sgaclk_parent,
- PRCMU_SGACLK, 0);
- else
- clk = clk_reg_prcmu_gate("sgclk", NULL,
- PRCMU_SGACLK, CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "mali");
-
- clk = clk_reg_prcmu_gate("uartclk", NULL, PRCMU_UARTCLK, CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "UART");
-
- clk = clk_reg_prcmu_gate("msp02clk", NULL, PRCMU_MSP02CLK, CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "MSP02");
-
- clk = clk_reg_prcmu_gate("msp1clk", NULL, PRCMU_MSP1CLK, CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "MSP1");
-
- clk = clk_reg_prcmu_gate("i2cclk", NULL, PRCMU_I2CCLK, CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "I2C");
-
- clk = clk_reg_prcmu_gate("slimclk", NULL, PRCMU_SLIMCLK, CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "slim");
-
- clk = clk_reg_prcmu_gate("per1clk", NULL, PRCMU_PER1CLK, CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "PERIPH1");
-
- clk = clk_reg_prcmu_gate("per2clk", NULL, PRCMU_PER2CLK, CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "PERIPH2");
-
- clk = clk_reg_prcmu_gate("per3clk", NULL, PRCMU_PER3CLK, CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "PERIPH3");
-
- clk = clk_reg_prcmu_gate("per5clk", NULL, PRCMU_PER5CLK, CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "PERIPH5");
-
- clk = clk_reg_prcmu_gate("per6clk", NULL, PRCMU_PER6CLK, CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "PERIPH6");
-
- clk = clk_reg_prcmu_gate("per7clk", NULL, PRCMU_PER7CLK, CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "PERIPH7");
-
- clk = clk_reg_prcmu_scalable("lcdclk", NULL, PRCMU_LCDCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "lcd");
- clk_register_clkdev(clk, "lcd", "mcde");
-
- clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK, CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "bml");
-
- clk = clk_reg_prcmu_scalable("hsitxclk", NULL, PRCMU_HSITXCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
-
- clk = clk_reg_prcmu_scalable("hsirxclk", NULL, PRCMU_HSIRXCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
-
- clk = clk_reg_prcmu_scalable("hdmiclk", NULL, PRCMU_HDMICLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "hdmi");
- clk_register_clkdev(clk, "hdmi", "mcde");
-
- clk = clk_reg_prcmu_scalable("apeatclk", NULL, PRCMU_APEATCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "apeat");
-
- clk = clk_reg_prcmu_scalable("apetraceclk", NULL, PRCMU_APETRACECLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "apetrace");
-
- clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "mcde");
- clk_register_clkdev(clk, "mcde", "mcde");
- clk_register_clkdev(clk, "dsisys", "dsilink.0");
- clk_register_clkdev(clk, "dsisys", "dsilink.1");
- clk_register_clkdev(clk, "dsisys", "dsilink.2");
-
- clk = clk_reg_prcmu_opp_gate("ipi2cclk", NULL, PRCMU_IPI2CCLK,
- CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "ipi2");
-
- clk = clk_reg_prcmu_gate("dsialtclk", NULL, PRCMU_DSIALTCLK,
- CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "dsialt");
-
- clk = clk_reg_prcmu_gate("dmaclk", NULL, PRCMU_DMACLK, CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "dma40.0");
-
- clk = clk_reg_prcmu_gate("b2r2clk", NULL, PRCMU_B2R2CLK, CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "b2r2");
- clk_register_clkdev(clk, NULL, "b2r2_core");
- clk_register_clkdev(clk, NULL, "U8500-B2R2.0");
-
- clk = clk_reg_prcmu_scalable("tvclk", NULL, PRCMU_TVCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "tv");
- clk_register_clkdev(clk, "tv", "mcde");
-
- clk = clk_reg_prcmu_gate("sspclk", NULL, PRCMU_SSPCLK, CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "SSP");
-
- clk = clk_reg_prcmu_gate("rngclk", NULL, PRCMU_RNGCLK, CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "rngclk");
-
- clk = clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "uicc");
-
- clk = clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, CLK_IS_ROOT);
- clk_register_clkdev(clk, NULL, "mtu0");
- clk_register_clkdev(clk, NULL, "mtu1");
-
- clk = clk_reg_prcmu_opp_volt_scalable("sdmmcclk", NULL, PRCMU_SDMMCCLK,
- 100000000,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdmmc");
-
- clk = clk_reg_prcmu_scalable("dsi_pll", "hdmiclk",
- PRCMU_PLLDSI, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "dsihs2", "mcde");
- clk_register_clkdev(clk, "dsihs2", "dsilink.2");
-
-
- clk = clk_reg_prcmu_scalable("dsi0clk", "dsi_pll",
- PRCMU_DSI0CLK, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "dsihs0", "mcde");
- clk_register_clkdev(clk, "dsihs0", "dsilink.0");
-
- clk = clk_reg_prcmu_scalable("dsi1clk", "dsi_pll",
- PRCMU_DSI1CLK, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "dsihs1", "mcde");
- clk_register_clkdev(clk, "dsihs1", "dsilink.1");
-
- clk = clk_reg_prcmu_scalable("dsi0escclk", "tvclk",
- PRCMU_DSI0ESCCLK, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "dsilp0", "dsilink.0");
- clk_register_clkdev(clk, "dsilp0", "mcde");
-
- clk = clk_reg_prcmu_scalable("dsi1escclk", "tvclk",
- PRCMU_DSI1ESCCLK, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "dsilp1", "dsilink.1");
- clk_register_clkdev(clk, "dsilp1", "mcde");
-
- clk = clk_reg_prcmu_scalable("dsi2escclk", "tvclk",
- PRCMU_DSI2ESCCLK, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "dsilp2", "dsilink.2");
- clk_register_clkdev(clk, "dsilp2", "mcde");
-
- clk = clk_reg_prcmu_scalable_rate("armss", NULL,
- PRCMU_ARMSS, 0, CLK_IS_ROOT|CLK_IGNORE_UNUSED);
- clk_register_clkdev(clk, "armss", NULL);
-
- clk = clk_register_fixed_factor(NULL, "smp_twd", "armss",
- CLK_IGNORE_UNUSED, 1, 2);
- clk_register_clkdev(clk, NULL, "smp_twd");
-
- /*
- * FIXME: Add special handled PRCMU clocks here:
- * 1. clkout0yuv, use PRCMU as parent + need regulator + pinctrl.
- * 2. ab9540_clkout1yuv, see clkout0yuv
- */
-
- /* PRCC P-clocks */
- clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", clkrst1_base,
- BIT(0), 0);
- clk_register_clkdev(clk, "apb_pclk", "uart0");
-
- clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", clkrst1_base,
- BIT(1), 0);
- clk_register_clkdev(clk, "apb_pclk", "uart1");
-
- clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", clkrst1_base,
- BIT(2), 0);
- clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.1");
-
- clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", clkrst1_base,
- BIT(3), 0);
- clk_register_clkdev(clk, "apb_pclk", "msp0");
- clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.0");
-
- clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", clkrst1_base,
- BIT(4), 0);
- clk_register_clkdev(clk, "apb_pclk", "msp1");
- clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.1");
-
- clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", clkrst1_base,
- BIT(5), 0);
- clk_register_clkdev(clk, "apb_pclk", "sdi0");
-
- clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", clkrst1_base,
- BIT(6), 0);
- clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.2");
-
- clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", clkrst1_base,
- BIT(7), 0);
- clk_register_clkdev(clk, NULL, "spi3");
-
- clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", clkrst1_base,
- BIT(8), 0);
- clk_register_clkdev(clk, "apb_pclk", "slimbus0");
-
- clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", clkrst1_base,
- BIT(9), 0);
- clk_register_clkdev(clk, NULL, "gpio.0");
- clk_register_clkdev(clk, NULL, "gpio.1");
- clk_register_clkdev(clk, NULL, "gpioblock0");
-
- clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", clkrst1_base,
- BIT(10), 0);
- clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.4");
-
- clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", clkrst1_base,
- BIT(11), 0);
- clk_register_clkdev(clk, "apb_pclk", "msp3");
- clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.3");
-
- clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", clkrst2_base,
- BIT(0), 0);
- clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.3");
-
- clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", clkrst2_base,
- BIT(1), 0);
- clk_register_clkdev(clk, NULL, "spi2");
-
- clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", clkrst2_base,
- BIT(2), 0);
- clk_register_clkdev(clk, NULL, "spi1");
-
- clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", clkrst2_base,
- BIT(3), 0);
- clk_register_clkdev(clk, NULL, "pwl");
-
- clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", clkrst2_base,
- BIT(4), 0);
- clk_register_clkdev(clk, "apb_pclk", "sdi4");
-
- clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", clkrst2_base,
- BIT(5), 0);
- clk_register_clkdev(clk, "apb_pclk", "msp2");
- clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.2");
-
- clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", clkrst2_base,
- BIT(6), 0);
- clk_register_clkdev(clk, "apb_pclk", "sdi1");
-
- clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", clkrst2_base,
- BIT(7), 0);
- clk_register_clkdev(clk, "apb_pclk", "sdi3");
-
- clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", clkrst2_base,
- BIT(8), 0);
- clk_register_clkdev(clk, NULL, "spi0");
-
- clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", clkrst2_base,
- BIT(9), 0);
- clk_register_clkdev(clk, "hsir_hclk", "ste_hsi.0");
-
- clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", clkrst2_base,
- BIT(10), 0);
- clk_register_clkdev(clk, "hsit_hclk", "ste_hsi.0");
-
- clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", clkrst2_base,
- BIT(11), 0);
- clk_register_clkdev(clk, NULL, "gpio.6");
- clk_register_clkdev(clk, NULL, "gpio.7");
- clk_register_clkdev(clk, NULL, "gpioblock1");
-
- clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", clkrst2_base,
- BIT(12), 0);
-
- clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", clkrst3_base,
- BIT(0), 0);
- clk_register_clkdev(clk, "fsmc", NULL);
- clk_register_clkdev(clk, NULL, "smsc911x.0");
-
- clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", clkrst3_base,
- BIT(1), 0);
- clk_register_clkdev(clk, "apb_pclk", "ssp0");
-
- clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", clkrst3_base,
- BIT(2), 0);
- clk_register_clkdev(clk, "apb_pclk", "ssp1");
-
- clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", clkrst3_base,
- BIT(3), 0);
- clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.0");
-
- clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", clkrst3_base,
- BIT(4), 0);
- clk_register_clkdev(clk, "apb_pclk", "sdi2");
-
- clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", clkrst3_base,
- BIT(5), 0);
- clk_register_clkdev(clk, "apb_pclk", "ske");
- clk_register_clkdev(clk, "apb_pclk", "nmk-ske-keypad");
-
- clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", clkrst3_base,
- BIT(6), 0);
- clk_register_clkdev(clk, "apb_pclk", "uart2");
-
- clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", clkrst3_base,
- BIT(7), 0);
- clk_register_clkdev(clk, "apb_pclk", "sdi5");
-
- clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", clkrst3_base,
- BIT(8), 0);
- clk_register_clkdev(clk, NULL, "gpio.2");
- clk_register_clkdev(clk, NULL, "gpio.3");
- clk_register_clkdev(clk, NULL, "gpio.4");
- clk_register_clkdev(clk, NULL, "gpio.5");
- clk_register_clkdev(clk, NULL, "gpioblock2");
-
- clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", clkrst5_base,
- BIT(0), 0);
- clk_register_clkdev(clk, "usb", "musb-ux500.0");
-
- clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", clkrst5_base,
- BIT(1), 0);
- clk_register_clkdev(clk, NULL, "gpio.8");
- clk_register_clkdev(clk, NULL, "gpioblock3");
-
- clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", clkrst6_base,
- BIT(0), 0);
- clk_register_clkdev(clk, "apb_pclk", "rng");
-
- clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", clkrst6_base,
- BIT(1), 0);
- clk_register_clkdev(clk, NULL, "cryp0");
- clk_register_clkdev(clk, NULL, "cryp1");
-
- clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", clkrst6_base,
- BIT(2), 0);
- clk_register_clkdev(clk, NULL, "hash0");
-
- clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", clkrst6_base,
- BIT(3), 0);
- clk_register_clkdev(clk, NULL, "pka");
-
- clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", clkrst6_base,
- BIT(4), 0);
- clk_register_clkdev(clk, NULL, "hash1");
-
- clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", clkrst6_base,
- BIT(5), 0);
- clk_register_clkdev(clk, NULL, "cfgreg");
-
- clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", clkrst6_base,
- BIT(6), 0);
- clk_register_clkdev(clk, "apb_pclk", "mtu0");
-
- clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", clkrst6_base,
- BIT(7), 0);
- clk_register_clkdev(clk, "apb_pclk", "mtu1");
-
- /* PRCC K-clocks
- *
- * FIXME: Some drivers requires PERPIH[n| to be automatically enabled
- * by enabling just the K-clock, even if it is not a valid parent to
- * the K-clock. Until drivers get fixed we might need some kind of
- * "parent muxed join".
- */
-
- /* Periph1 */
- clk = clk_reg_prcc_kclk("p1_uart0_kclk", "uartclk",
- clkrst1_base, BIT(0), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "uart0");
-
- clk = clk_reg_prcc_kclk("p1_uart1_kclk", "uartclk",
- clkrst1_base, BIT(1), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "uart1");
-
- clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk",
- clkrst1_base, BIT(2), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "nmk-i2c.1");
-
- clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk",
- clkrst1_base, BIT(3), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "msp0");
- clk_register_clkdev(clk, NULL, "ux500-msp-i2s.0");
-
- clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk",
- clkrst1_base, BIT(4), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "msp1");
- clk_register_clkdev(clk, NULL, "ux500-msp-i2s.1");
-
- clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmcclk",
- clkrst1_base, BIT(5), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdi0");
-
- clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk",
- clkrst1_base, BIT(6), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "nmk-i2c.2");
-
- clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk",
- clkrst1_base, BIT(8), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "slimbus0");
-
- clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk",
- clkrst1_base, BIT(9), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "nmk-i2c.4");
-
- clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk",
- clkrst1_base, BIT(10), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "msp3");
- clk_register_clkdev(clk, NULL, "ux500-msp-i2s.3");
-
- /* Periph2 */
- clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk",
- clkrst2_base, BIT(0), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "nmk-i2c.3");
-
- clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmcclk",
- clkrst2_base, BIT(2), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdi4");
-
- clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk",
- clkrst2_base, BIT(3), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "msp2");
- clk_register_clkdev(clk, NULL, "ux500-msp-i2s.2");
-
- clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmcclk",
- clkrst2_base, BIT(4), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdi1");
-
- clk = clk_reg_prcc_kclk("p2_sdi3_kclk", "sdmmcclk",
- clkrst2_base, BIT(5), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdi3");
-
- /* Note that rate is received from parent. */
- clk = clk_reg_prcc_kclk("p2_ssirx_kclk", "hsirxclk",
- clkrst2_base, BIT(6),
- CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
- clk = clk_reg_prcc_kclk("p2_ssitx_kclk", "hsitxclk",
- clkrst2_base, BIT(7),
- CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
-
- /* Periph3 */
- clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk",
- clkrst3_base, BIT(1), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "ssp0");
-
- clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk",
- clkrst3_base, BIT(2), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "ssp1");
-
- clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk",
- clkrst3_base, BIT(3), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "nmk-i2c.0");
-
- clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmcclk",
- clkrst3_base, BIT(4), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdi2");
-
- clk = clk_reg_prcc_kclk("p3_ske_kclk", "rtc32k",
- clkrst3_base, BIT(5), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "ske");
- clk_register_clkdev(clk, NULL, "nmk-ske-keypad");
-
- clk = clk_reg_prcc_kclk("p3_uart2_kclk", "uartclk",
- clkrst3_base, BIT(6), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "uart2");
-
- clk = clk_reg_prcc_kclk("p3_sdi5_kclk", "sdmmcclk",
- clkrst3_base, BIT(7), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdi5");
-
- /* Periph6 */
- clk = clk_reg_prcc_kclk("p3_rng_kclk", "rngclk",
- clkrst6_base, BIT(0), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "rng");
-}
diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c
index e319ef912dc6..271c09644652 100644
--- a/drivers/clk/ux500/u8500_of_clk.c
+++ b/drivers/clk/ux500/u8500_of_clk.c
@@ -8,8 +8,7 @@
*/
#include <linux/of.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
+#include <linux/of_address.h>
#include <linux/clk-provider.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/platform_data/clk-ux500.h>
@@ -54,14 +53,25 @@ static const struct of_device_id u8500_clk_of_match[] = {
{ },
};
-void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
- u32 clkrst5_base, u32 clkrst6_base)
+/* CLKRST4 is missing making it hard to index things */
+enum clkrst_index {
+ CLKRST1_INDEX = 0,
+ CLKRST2_INDEX,
+ CLKRST3_INDEX,
+ CLKRST5_INDEX,
+ CLKRST6_INDEX,
+ CLKRST_MAX,
+};
+
+void u8500_clk_init(void)
{
struct prcmu_fw_version *fw_version;
struct device_node *np = NULL;
struct device_node *child = NULL;
const char *sgaclk_parent = NULL;
struct clk *clk, *rtc_clk, *twd_clk;
+ u32 bases[CLKRST_MAX];
+ int i;
if (of_have_populated_dt())
np = of_find_matching_node(NULL, u8500_clk_of_match);
@@ -69,6 +79,15 @@ void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
pr_err("Either DT or U8500 Clock node not found\n");
return;
}
+ for (i = 0; i < ARRAY_SIZE(bases); i++) {
+ struct resource r;
+
+ if (of_address_to_resource(np, i, &r))
+ /* Not much choice but to continue */
+ pr_err("failed to get CLKRST %d base address\n",
+ i + 1);
+ bases[i] = r.start;
+ }
/* Clock sources */
clk = clk_reg_prcmu_gate("soc0_pll", NULL, PRCMU_PLLSOC0,
@@ -246,179 +265,179 @@ void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
*/
/* PRCC P-clocks */
- clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", bases[CLKRST1_INDEX],
BIT(0), 0);
PRCC_PCLK_STORE(clk, 1, 0);
- clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", bases[CLKRST1_INDEX],
BIT(1), 0);
PRCC_PCLK_STORE(clk, 1, 1);
- clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", bases[CLKRST1_INDEX],
BIT(2), 0);
PRCC_PCLK_STORE(clk, 1, 2);
- clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", bases[CLKRST1_INDEX],
BIT(3), 0);
PRCC_PCLK_STORE(clk, 1, 3);
- clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", bases[CLKRST1_INDEX],
BIT(4), 0);
PRCC_PCLK_STORE(clk, 1, 4);
- clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", bases[CLKRST1_INDEX],
BIT(5), 0);
PRCC_PCLK_STORE(clk, 1, 5);
- clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", bases[CLKRST1_INDEX],
BIT(6), 0);
PRCC_PCLK_STORE(clk, 1, 6);
- clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", bases[CLKRST1_INDEX],
BIT(7), 0);
PRCC_PCLK_STORE(clk, 1, 7);
- clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", bases[CLKRST1_INDEX],
BIT(8), 0);
PRCC_PCLK_STORE(clk, 1, 8);
- clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", bases[CLKRST1_INDEX],
BIT(9), 0);
PRCC_PCLK_STORE(clk, 1, 9);
- clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", bases[CLKRST1_INDEX],
BIT(10), 0);
PRCC_PCLK_STORE(clk, 1, 10);
- clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", bases[CLKRST1_INDEX],
BIT(11), 0);
PRCC_PCLK_STORE(clk, 1, 11);
- clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", bases[CLKRST2_INDEX],
BIT(0), 0);
PRCC_PCLK_STORE(clk, 2, 0);
- clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", bases[CLKRST2_INDEX],
BIT(1), 0);
PRCC_PCLK_STORE(clk, 2, 1);
- clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", bases[CLKRST2_INDEX],
BIT(2), 0);
PRCC_PCLK_STORE(clk, 2, 2);
- clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", bases[CLKRST2_INDEX],
BIT(3), 0);
PRCC_PCLK_STORE(clk, 2, 3);
- clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", bases[CLKRST2_INDEX],
BIT(4), 0);
PRCC_PCLK_STORE(clk, 2, 4);
- clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", bases[CLKRST2_INDEX],
BIT(5), 0);
PRCC_PCLK_STORE(clk, 2, 5);
- clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", bases[CLKRST2_INDEX],
BIT(6), 0);
PRCC_PCLK_STORE(clk, 2, 6);
- clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", bases[CLKRST2_INDEX],
BIT(7), 0);
PRCC_PCLK_STORE(clk, 2, 7);
- clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", bases[CLKRST2_INDEX],
BIT(8), 0);
PRCC_PCLK_STORE(clk, 2, 8);
- clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", bases[CLKRST2_INDEX],
BIT(9), 0);
PRCC_PCLK_STORE(clk, 2, 9);
- clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", bases[CLKRST2_INDEX],
BIT(10), 0);
PRCC_PCLK_STORE(clk, 2, 10);
- clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", bases[CLKRST2_INDEX],
BIT(11), 0);
PRCC_PCLK_STORE(clk, 2, 11);
- clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", bases[CLKRST2_INDEX],
BIT(12), 0);
PRCC_PCLK_STORE(clk, 2, 12);
- clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", bases[CLKRST3_INDEX],
BIT(0), 0);
PRCC_PCLK_STORE(clk, 3, 0);
- clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", bases[CLKRST3_INDEX],
BIT(1), 0);
PRCC_PCLK_STORE(clk, 3, 1);
- clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", bases[CLKRST3_INDEX],
BIT(2), 0);
PRCC_PCLK_STORE(clk, 3, 2);
- clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", bases[CLKRST3_INDEX],
BIT(3), 0);
PRCC_PCLK_STORE(clk, 3, 3);
- clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", bases[CLKRST3_INDEX],
BIT(4), 0);
PRCC_PCLK_STORE(clk, 3, 4);
- clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", bases[CLKRST3_INDEX],
BIT(5), 0);
PRCC_PCLK_STORE(clk, 3, 5);
- clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", bases[CLKRST3_INDEX],
BIT(6), 0);
PRCC_PCLK_STORE(clk, 3, 6);
- clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", bases[CLKRST3_INDEX],
BIT(7), 0);
PRCC_PCLK_STORE(clk, 3, 7);
- clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", bases[CLKRST3_INDEX],
BIT(8), 0);
PRCC_PCLK_STORE(clk, 3, 8);
- clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", clkrst5_base,
+ clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", bases[CLKRST5_INDEX],
BIT(0), 0);
PRCC_PCLK_STORE(clk, 5, 0);
- clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", clkrst5_base,
+ clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", bases[CLKRST5_INDEX],
BIT(1), 0);
PRCC_PCLK_STORE(clk, 5, 1);
- clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", clkrst6_base,
+ clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", bases[CLKRST6_INDEX],
BIT(0), 0);
PRCC_PCLK_STORE(clk, 6, 0);
- clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", clkrst6_base,
+ clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", bases[CLKRST6_INDEX],
BIT(1), 0);
PRCC_PCLK_STORE(clk, 6, 1);
- clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", clkrst6_base,
+ clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", bases[CLKRST6_INDEX],
BIT(2), 0);
PRCC_PCLK_STORE(clk, 6, 2);
- clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", clkrst6_base,
+ clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", bases[CLKRST6_INDEX],
BIT(3), 0);
PRCC_PCLK_STORE(clk, 6, 3);
- clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", clkrst6_base,
+ clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", bases[CLKRST6_INDEX],
BIT(4), 0);
PRCC_PCLK_STORE(clk, 6, 4);
- clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", clkrst6_base,
+ clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", bases[CLKRST6_INDEX],
BIT(5), 0);
PRCC_PCLK_STORE(clk, 6, 5);
- clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", clkrst6_base,
+ clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", bases[CLKRST6_INDEX],
BIT(6), 0);
PRCC_PCLK_STORE(clk, 6, 6);
- clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", clkrst6_base,
+ clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", bases[CLKRST6_INDEX],
BIT(7), 0);
PRCC_PCLK_STORE(clk, 6, 7);
@@ -432,109 +451,109 @@ void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
/* Periph1 */
clk = clk_reg_prcc_kclk("p1_uart0_kclk", "uartclk",
- clkrst1_base, BIT(0), CLK_SET_RATE_GATE);
+ bases[CLKRST1_INDEX], BIT(0), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 1, 0);
clk = clk_reg_prcc_kclk("p1_uart1_kclk", "uartclk",
- clkrst1_base, BIT(1), CLK_SET_RATE_GATE);
+ bases[CLKRST1_INDEX], BIT(1), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 1, 1);
clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk",
- clkrst1_base, BIT(2), CLK_SET_RATE_GATE);
+ bases[CLKRST1_INDEX], BIT(2), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 1, 2);
clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk",
- clkrst1_base, BIT(3), CLK_SET_RATE_GATE);
+ bases[CLKRST1_INDEX], BIT(3), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 1, 3);
clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk",
- clkrst1_base, BIT(4), CLK_SET_RATE_GATE);
+ bases[CLKRST1_INDEX], BIT(4), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 1, 4);
clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmcclk",
- clkrst1_base, BIT(5), CLK_SET_RATE_GATE);
+ bases[CLKRST1_INDEX], BIT(5), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 1, 5);
clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk",
- clkrst1_base, BIT(6), CLK_SET_RATE_GATE);
+ bases[CLKRST1_INDEX], BIT(6), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 1, 6);
clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk",
- clkrst1_base, BIT(8), CLK_SET_RATE_GATE);
+ bases[CLKRST1_INDEX], BIT(8), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 1, 8);
clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk",
- clkrst1_base, BIT(9), CLK_SET_RATE_GATE);
+ bases[CLKRST1_INDEX], BIT(9), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 1, 9);
clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk",
- clkrst1_base, BIT(10), CLK_SET_RATE_GATE);
+ bases[CLKRST1_INDEX], BIT(10), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 1, 10);
/* Periph2 */
clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk",
- clkrst2_base, BIT(0), CLK_SET_RATE_GATE);
+ bases[CLKRST2_INDEX], BIT(0), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 2, 0);
clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmcclk",
- clkrst2_base, BIT(2), CLK_SET_RATE_GATE);
+ bases[CLKRST2_INDEX], BIT(2), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 2, 2);
clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk",
- clkrst2_base, BIT(3), CLK_SET_RATE_GATE);
+ bases[CLKRST2_INDEX], BIT(3), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 2, 3);
clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmcclk",
- clkrst2_base, BIT(4), CLK_SET_RATE_GATE);
+ bases[CLKRST2_INDEX], BIT(4), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 2, 4);
clk = clk_reg_prcc_kclk("p2_sdi3_kclk", "sdmmcclk",
- clkrst2_base, BIT(5), CLK_SET_RATE_GATE);
+ bases[CLKRST2_INDEX], BIT(5), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 2, 5);
/* Note that rate is received from parent. */
clk = clk_reg_prcc_kclk("p2_ssirx_kclk", "hsirxclk",
- clkrst2_base, BIT(6),
+ bases[CLKRST2_INDEX], BIT(6),
CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
PRCC_KCLK_STORE(clk, 2, 6);
clk = clk_reg_prcc_kclk("p2_ssitx_kclk", "hsitxclk",
- clkrst2_base, BIT(7),
+ bases[CLKRST2_INDEX], BIT(7),
CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
PRCC_KCLK_STORE(clk, 2, 7);
/* Periph3 */
clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk",
- clkrst3_base, BIT(1), CLK_SET_RATE_GATE);
+ bases[CLKRST3_INDEX], BIT(1), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 3, 1);
clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk",
- clkrst3_base, BIT(2), CLK_SET_RATE_GATE);
+ bases[CLKRST3_INDEX], BIT(2), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 3, 2);
clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk",
- clkrst3_base, BIT(3), CLK_SET_RATE_GATE);
+ bases[CLKRST3_INDEX], BIT(3), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 3, 3);
clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmcclk",
- clkrst3_base, BIT(4), CLK_SET_RATE_GATE);
+ bases[CLKRST3_INDEX], BIT(4), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 3, 4);
clk = clk_reg_prcc_kclk("p3_ske_kclk", "rtc32k",
- clkrst3_base, BIT(5), CLK_SET_RATE_GATE);
+ bases[CLKRST3_INDEX], BIT(5), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 3, 5);
clk = clk_reg_prcc_kclk("p3_uart2_kclk", "uartclk",
- clkrst3_base, BIT(6), CLK_SET_RATE_GATE);
+ bases[CLKRST3_INDEX], BIT(6), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 3, 6);
clk = clk_reg_prcc_kclk("p3_sdi5_kclk", "sdmmcclk",
- clkrst3_base, BIT(7), CLK_SET_RATE_GATE);
+ bases[CLKRST3_INDEX], BIT(7), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 3, 7);
/* Periph6 */
clk = clk_reg_prcc_kclk("p3_rng_kclk", "rngclk",
- clkrst6_base, BIT(0), CLK_SET_RATE_GATE);
+ bases[CLKRST6_INDEX], BIT(0), CLK_SET_RATE_GATE);
PRCC_KCLK_STORE(clk, 6, 0);
for_each_child_of_node(np, child) {
diff --git a/drivers/clk/ux500/u8540_clk.c b/drivers/clk/ux500/u8540_clk.c
index 20c8add90d11..d7bcb7a86615 100644
--- a/drivers/clk/ux500/u8540_clk.c
+++ b/drivers/clk/ux500/u8540_clk.c
@@ -7,17 +7,51 @@
* License terms: GNU General Public License (GPL) version 2
*/
-#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/platform_data/clk-ux500.h>
#include "clk.h"
-void u8540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
- u32 clkrst5_base, u32 clkrst6_base)
+static const struct of_device_id u8540_clk_of_match[] = {
+ { .compatible = "stericsson,u8540-clks", },
+ { }
+};
+
+/* CLKRST4 is missing making it hard to index things */
+enum clkrst_index {
+ CLKRST1_INDEX = 0,
+ CLKRST2_INDEX,
+ CLKRST3_INDEX,
+ CLKRST5_INDEX,
+ CLKRST6_INDEX,
+ CLKRST_MAX,
+};
+
+void u8540_clk_init(void)
{
struct clk *clk;
+ struct device_node *np = NULL;
+ u32 bases[CLKRST_MAX];
+ int i;
+
+ if (of_have_populated_dt())
+ np = of_find_matching_node(NULL, u8540_clk_of_match);
+ if (!np) {
+ pr_err("Either DT or U8540 Clock node not found\n");
+ return;
+ }
+ for (i = 0; i < ARRAY_SIZE(bases); i++) {
+ struct resource r;
+
+ if (of_address_to_resource(np, i, &r))
+ /* Not much choice but to continue */
+ pr_err("failed to get CLKRST %d base address\n",
+ i + 1);
+ bases[i] = r.start;
+ }
/* Clock sources. */
/* Fixed ClockGen */
@@ -219,151 +253,151 @@ void u8540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
/* PRCC P-clocks */
/* Peripheral 1 : PRCC P-clocks */
- clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", bases[CLKRST1_INDEX],
BIT(0), 0);
clk_register_clkdev(clk, "apb_pclk", "uart0");
- clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", bases[CLKRST1_INDEX],
BIT(1), 0);
clk_register_clkdev(clk, "apb_pclk", "uart1");
- clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", bases[CLKRST1_INDEX],
BIT(2), 0);
clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.1");
- clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", bases[CLKRST1_INDEX],
BIT(3), 0);
clk_register_clkdev(clk, "apb_pclk", "msp0");
clk_register_clkdev(clk, "apb_pclk", "dbx5x0-msp-i2s.0");
- clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", bases[CLKRST1_INDEX],
BIT(4), 0);
clk_register_clkdev(clk, "apb_pclk", "msp1");
clk_register_clkdev(clk, "apb_pclk", "dbx5x0-msp-i2s.1");
- clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", bases[CLKRST1_INDEX],
BIT(5), 0);
clk_register_clkdev(clk, "apb_pclk", "sdi0");
- clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", bases[CLKRST1_INDEX],
BIT(6), 0);
clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.2");
- clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", bases[CLKRST1_INDEX],
BIT(7), 0);
clk_register_clkdev(clk, NULL, "spi3");
- clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", bases[CLKRST1_INDEX],
BIT(8), 0);
clk_register_clkdev(clk, "apb_pclk", "slimbus0");
- clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", bases[CLKRST1_INDEX],
BIT(9), 0);
clk_register_clkdev(clk, NULL, "gpio.0");
clk_register_clkdev(clk, NULL, "gpio.1");
clk_register_clkdev(clk, NULL, "gpioblock0");
clk_register_clkdev(clk, "apb_pclk", "ab85xx-codec.0");
- clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", bases[CLKRST1_INDEX],
BIT(10), 0);
clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.4");
- clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", clkrst1_base,
+ clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", bases[CLKRST1_INDEX],
BIT(11), 0);
clk_register_clkdev(clk, "apb_pclk", "msp3");
clk_register_clkdev(clk, "apb_pclk", "dbx5x0-msp-i2s.3");
/* Peripheral 2 : PRCC P-clocks */
- clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", bases[CLKRST2_INDEX],
BIT(0), 0);
clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.3");
- clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", bases[CLKRST2_INDEX],
BIT(1), 0);
clk_register_clkdev(clk, NULL, "spi2");
- clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", bases[CLKRST2_INDEX],
BIT(2), 0);
clk_register_clkdev(clk, NULL, "spi1");
- clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", bases[CLKRST2_INDEX],
BIT(3), 0);
clk_register_clkdev(clk, NULL, "pwl");
- clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", bases[CLKRST2_INDEX],
BIT(4), 0);
clk_register_clkdev(clk, "apb_pclk", "sdi4");
- clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", bases[CLKRST2_INDEX],
BIT(5), 0);
clk_register_clkdev(clk, "apb_pclk", "msp2");
clk_register_clkdev(clk, "apb_pclk", "dbx5x0-msp-i2s.2");
- clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", bases[CLKRST2_INDEX],
BIT(6), 0);
clk_register_clkdev(clk, "apb_pclk", "sdi1");
- clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", bases[CLKRST2_INDEX],
BIT(7), 0);
clk_register_clkdev(clk, "apb_pclk", "sdi3");
- clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", bases[CLKRST2_INDEX],
BIT(8), 0);
clk_register_clkdev(clk, NULL, "spi0");
- clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", bases[CLKRST2_INDEX],
BIT(9), 0);
clk_register_clkdev(clk, "hsir_hclk", "ste_hsi.0");
- clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", bases[CLKRST2_INDEX],
BIT(10), 0);
clk_register_clkdev(clk, "hsit_hclk", "ste_hsi.0");
- clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", bases[CLKRST2_INDEX],
BIT(11), 0);
clk_register_clkdev(clk, NULL, "gpio.6");
clk_register_clkdev(clk, NULL, "gpio.7");
clk_register_clkdev(clk, NULL, "gpioblock1");
- clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", clkrst2_base,
+ clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", bases[CLKRST2_INDEX],
BIT(12), 0);
clk_register_clkdev(clk, "msp4-pclk", "ab85xx-codec.0");
/* Peripheral 3 : PRCC P-clocks */
- clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", bases[CLKRST3_INDEX],
BIT(0), 0);
clk_register_clkdev(clk, NULL, "fsmc");
- clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", bases[CLKRST3_INDEX],
BIT(1), 0);
clk_register_clkdev(clk, "apb_pclk", "ssp0");
- clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", bases[CLKRST3_INDEX],
BIT(2), 0);
clk_register_clkdev(clk, "apb_pclk", "ssp1");
- clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", bases[CLKRST3_INDEX],
BIT(3), 0);
clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.0");
- clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", bases[CLKRST3_INDEX],
BIT(4), 0);
clk_register_clkdev(clk, "apb_pclk", "sdi2");
- clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", bases[CLKRST3_INDEX],
BIT(5), 0);
clk_register_clkdev(clk, "apb_pclk", "ske");
clk_register_clkdev(clk, "apb_pclk", "nmk-ske-keypad");
- clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", bases[CLKRST3_INDEX],
BIT(6), 0);
clk_register_clkdev(clk, "apb_pclk", "uart2");
- clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", bases[CLKRST3_INDEX],
BIT(7), 0);
clk_register_clkdev(clk, "apb_pclk", "sdi5");
- clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", bases[CLKRST3_INDEX],
BIT(8), 0);
clk_register_clkdev(clk, NULL, "gpio.2");
clk_register_clkdev(clk, NULL, "gpio.3");
@@ -371,64 +405,64 @@ void u8540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
clk_register_clkdev(clk, NULL, "gpio.5");
clk_register_clkdev(clk, NULL, "gpioblock2");
- clk = clk_reg_prcc_pclk("p3_pclk9", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk9", "per3clk", bases[CLKRST3_INDEX],
BIT(9), 0);
clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.5");
- clk = clk_reg_prcc_pclk("p3_pclk10", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk10", "per3clk", bases[CLKRST3_INDEX],
BIT(10), 0);
clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.6");
- clk = clk_reg_prcc_pclk("p3_pclk11", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk11", "per3clk", bases[CLKRST3_INDEX],
BIT(11), 0);
clk_register_clkdev(clk, "apb_pclk", "uart3");
- clk = clk_reg_prcc_pclk("p3_pclk12", "per3clk", clkrst3_base,
+ clk = clk_reg_prcc_pclk("p3_pclk12", "per3clk", bases[CLKRST3_INDEX],
BIT(12), 0);
clk_register_clkdev(clk, "apb_pclk", "uart4");
/* Peripheral 5 : PRCC P-clocks */
- clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", clkrst5_base,
+ clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", bases[CLKRST5_INDEX],
BIT(0), 0);
clk_register_clkdev(clk, "usb", "musb-ux500.0");
clk_register_clkdev(clk, "usbclk", "ab-iddet.0");
- clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", clkrst5_base,
+ clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", bases[CLKRST5_INDEX],
BIT(1), 0);
clk_register_clkdev(clk, NULL, "gpio.8");
clk_register_clkdev(clk, NULL, "gpioblock3");
/* Peripheral 6 : PRCC P-clocks */
- clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", clkrst6_base,
+ clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", bases[CLKRST6_INDEX],
BIT(0), 0);
clk_register_clkdev(clk, "apb_pclk", "rng");
- clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", clkrst6_base,
+ clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", bases[CLKRST6_INDEX],
BIT(1), 0);
clk_register_clkdev(clk, NULL, "cryp0");
clk_register_clkdev(clk, NULL, "cryp1");
- clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", clkrst6_base,
+ clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", bases[CLKRST6_INDEX],
BIT(2), 0);
clk_register_clkdev(clk, NULL, "hash0");
- clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", clkrst6_base,
+ clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", bases[CLKRST6_INDEX],
BIT(3), 0);
clk_register_clkdev(clk, NULL, "pka");
- clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", clkrst6_base,
+ clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", bases[CLKRST6_INDEX],
BIT(4), 0);
clk_register_clkdev(clk, NULL, "db8540-hash1");
- clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", clkrst6_base,
+ clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", bases[CLKRST6_INDEX],
BIT(5), 0);
clk_register_clkdev(clk, NULL, "cfgreg");
- clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", clkrst6_base,
+ clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", bases[CLKRST6_INDEX],
BIT(6), 0);
clk_register_clkdev(clk, "apb_pclk", "mtu0");
- clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", clkrst6_base,
+ clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", bases[CLKRST6_INDEX],
BIT(7), 0);
clk_register_clkdev(clk, "apb_pclk", "mtu1");
@@ -442,138 +476,138 @@ void u8540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
/* Peripheral 1 : PRCC K-clocks */
clk = clk_reg_prcc_kclk("p1_uart0_kclk", "uartclk",
- clkrst1_base, BIT(0), CLK_SET_RATE_GATE);
+ bases[CLKRST1_INDEX], BIT(0), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "uart0");
clk = clk_reg_prcc_kclk("p1_uart1_kclk", "uartclk",
- clkrst1_base, BIT(1), CLK_SET_RATE_GATE);
+ bases[CLKRST1_INDEX], BIT(1), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "uart1");
clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk",
- clkrst1_base, BIT(2), CLK_SET_RATE_GATE);
+ bases[CLKRST1_INDEX], BIT(2), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "nmk-i2c.1");
clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk",
- clkrst1_base, BIT(3), CLK_SET_RATE_GATE);
+ bases[CLKRST1_INDEX], BIT(3), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "msp0");
clk_register_clkdev(clk, NULL, "dbx5x0-msp-i2s.0");
clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk",
- clkrst1_base, BIT(4), CLK_SET_RATE_GATE);
+ bases[CLKRST1_INDEX], BIT(4), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "msp1");
clk_register_clkdev(clk, NULL, "dbx5x0-msp-i2s.1");
clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmchclk",
- clkrst1_base, BIT(5), CLK_SET_RATE_GATE);
+ bases[CLKRST1_INDEX], BIT(5), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "sdi0");
clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk",
- clkrst1_base, BIT(6), CLK_SET_RATE_GATE);
+ bases[CLKRST1_INDEX], BIT(6), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "nmk-i2c.2");
clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk",
- clkrst1_base, BIT(8), CLK_SET_RATE_GATE);
+ bases[CLKRST1_INDEX], BIT(8), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "slimbus0");
clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk",
- clkrst1_base, BIT(9), CLK_SET_RATE_GATE);
+ bases[CLKRST1_INDEX], BIT(9), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "nmk-i2c.4");
clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk",
- clkrst1_base, BIT(10), CLK_SET_RATE_GATE);
+ bases[CLKRST1_INDEX], BIT(10), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "msp3");
clk_register_clkdev(clk, NULL, "dbx5x0-msp-i2s.3");
/* Peripheral 2 : PRCC K-clocks */
clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk",
- clkrst2_base, BIT(0), CLK_SET_RATE_GATE);
+ bases[CLKRST2_INDEX], BIT(0), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "nmk-i2c.3");
clk = clk_reg_prcc_kclk("p2_pwl_kclk", "rtc32k",
- clkrst2_base, BIT(1), CLK_SET_RATE_GATE);
+ bases[CLKRST2_INDEX], BIT(1), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "pwl");
clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmchclk",
- clkrst2_base, BIT(2), CLK_SET_RATE_GATE);
+ bases[CLKRST2_INDEX], BIT(2), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "sdi4");
clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk",
- clkrst2_base, BIT(3), CLK_SET_RATE_GATE);
+ bases[CLKRST2_INDEX], BIT(3), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "msp2");
clk_register_clkdev(clk, NULL, "dbx5x0-msp-i2s.2");
clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmchclk",
- clkrst2_base, BIT(4), CLK_SET_RATE_GATE);
+ bases[CLKRST2_INDEX], BIT(4), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "sdi1");
clk = clk_reg_prcc_kclk("p2_sdi3_kclk", "sdmmcclk",
- clkrst2_base, BIT(5), CLK_SET_RATE_GATE);
+ bases[CLKRST2_INDEX], BIT(5), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "sdi3");
clk = clk_reg_prcc_kclk("p2_ssirx_kclk", "hsirxclk",
- clkrst2_base, BIT(6),
+ bases[CLKRST2_INDEX], BIT(6),
CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
clk_register_clkdev(clk, "hsir_hsirxclk", "ste_hsi.0");
clk = clk_reg_prcc_kclk("p2_ssitx_kclk", "hsitxclk",
- clkrst2_base, BIT(7),
+ bases[CLKRST2_INDEX], BIT(7),
CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
clk_register_clkdev(clk, "hsit_hsitxclk", "ste_hsi.0");
/* Should only be 9540, but might be added for 85xx as well */
clk = clk_reg_prcc_kclk("p2_msp4_kclk", "msp02clk",
- clkrst2_base, BIT(9), CLK_SET_RATE_GATE);
+ bases[CLKRST2_INDEX], BIT(9), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "msp4");
clk_register_clkdev(clk, "msp4", "ab85xx-codec.0");
/* Peripheral 3 : PRCC K-clocks */
clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk",
- clkrst3_base, BIT(1), CLK_SET_RATE_GATE);
+ bases[CLKRST3_INDEX], BIT(1), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "ssp0");
clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk",
- clkrst3_base, BIT(2), CLK_SET_RATE_GATE);
+ bases[CLKRST3_INDEX], BIT(2), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "ssp1");
clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk",
- clkrst3_base, BIT(3), CLK_SET_RATE_GATE);
+ bases[CLKRST3_INDEX], BIT(3), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "nmk-i2c.0");
clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmchclk",
- clkrst3_base, BIT(4), CLK_SET_RATE_GATE);
+ bases[CLKRST3_INDEX], BIT(4), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "sdi2");
clk = clk_reg_prcc_kclk("p3_ske_kclk", "rtc32k",
- clkrst3_base, BIT(5), CLK_SET_RATE_GATE);
+ bases[CLKRST3_INDEX], BIT(5), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "ske");
clk_register_clkdev(clk, NULL, "nmk-ske-keypad");
clk = clk_reg_prcc_kclk("p3_uart2_kclk", "uartclk",
- clkrst3_base, BIT(6), CLK_SET_RATE_GATE);
+ bases[CLKRST3_INDEX], BIT(6), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "uart2");
clk = clk_reg_prcc_kclk("p3_sdi5_kclk", "sdmmcclk",
- clkrst3_base, BIT(7), CLK_SET_RATE_GATE);
+ bases[CLKRST3_INDEX], BIT(7), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "sdi5");
clk = clk_reg_prcc_kclk("p3_i2c5_kclk", "i2cclk",
- clkrst3_base, BIT(8), CLK_SET_RATE_GATE);
+ bases[CLKRST3_INDEX], BIT(8), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "nmk-i2c.5");
clk = clk_reg_prcc_kclk("p3_i2c6_kclk", "i2cclk",
- clkrst3_base, BIT(9), CLK_SET_RATE_GATE);
+ bases[CLKRST3_INDEX], BIT(9), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "nmk-i2c.6");
clk = clk_reg_prcc_kclk("p3_uart3_kclk", "uartclk",
- clkrst3_base, BIT(10), CLK_SET_RATE_GATE);
+ bases[CLKRST3_INDEX], BIT(10), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "uart3");
clk = clk_reg_prcc_kclk("p3_uart4_kclk", "uartclk",
- clkrst3_base, BIT(11), CLK_SET_RATE_GATE);
+ bases[CLKRST3_INDEX], BIT(11), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "uart4");
/* Peripheral 6 : PRCC K-clocks */
clk = clk_reg_prcc_kclk("p6_rng_kclk", "rngclk",
- clkrst6_base, BIT(0), CLK_SET_RATE_GATE);
+ bases[CLKRST6_INDEX], BIT(0), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "rng");
}
diff --git a/drivers/clk/ux500/u9540_clk.c b/drivers/clk/ux500/u9540_clk.c
index 44794782e7e0..2138a4c8cbca 100644
--- a/drivers/clk/ux500/u9540_clk.c
+++ b/drivers/clk/ux500/u9540_clk.c
@@ -7,15 +7,12 @@
* License terms: GNU General Public License (GPL) version 2
*/
-#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/platform_data/clk-ux500.h>
#include "clk.h"
-void u9540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
- u32 clkrst5_base, u32 clkrst6_base)
+void u9540_clk_init(void)
{
/* register clocks here */
}
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index bc96f103bd7c..a3893ea2199d 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -13,8 +13,9 @@
* ICST clock code from the ARM tree should probably be merged into this
* file.
*/
-#include <linux/clk.h>
-#include <linux/clkdev.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/export.h>
#include <linux/err.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c
index 1cc1330dc570..65c842a21c62 100644
--- a/drivers/clk/versatile/clk-impd1.c
+++ b/drivers/clk/versatile/clk-impd1.c
@@ -7,7 +7,6 @@
* published by the Free Software Foundation.
*/
#include <linux/clk-provider.h>
-#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/err.h>
#include <linux/io.h>
diff --git a/drivers/clk/versatile/clk-realview.c b/drivers/clk/versatile/clk-realview.c
index c8b523117fb7..86f70997d59d 100644
--- a/drivers/clk/versatile/clk-realview.c
+++ b/drivers/clk/versatile/clk-realview.c
@@ -6,7 +6,6 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -33,13 +32,13 @@ static const struct icst_params realview_oscvco_params = {
.idx2s = icst307_idx2s,
};
-static const struct clk_icst_desc __initdata realview_osc0_desc = {
+static const struct clk_icst_desc realview_osc0_desc __initconst = {
.params = &realview_oscvco_params,
.vco_offset = REALVIEW_SYS_OSC0_OFFSET,
.lock_offset = REALVIEW_SYS_LOCK_OFFSET,
};
-static const struct clk_icst_desc __initdata realview_osc4_desc = {
+static const struct clk_icst_desc realview_osc4_desc __initconst = {
.params = &realview_oscvco_params,
.vco_offset = REALVIEW_SYS_OSC4_OFFSET,
.lock_offset = REALVIEW_SYS_LOCK_OFFSET,
diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
index a96dd8e53fdb..a1cdef6b0f90 100644
--- a/drivers/clk/versatile/clk-sp810.c
+++ b/drivers/clk/versatile/clk-sp810.c
@@ -12,7 +12,8 @@
*/
#include <linux/amba/sp810.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/of.h>
@@ -32,12 +33,9 @@ struct clk_sp810_timerclken {
struct clk_sp810 {
struct device_node *node;
- int refclk_index, timclk_index;
void __iomem *base;
spinlock_t lock;
struct clk_sp810_timerclken timerclken[4];
- struct clk *refclk;
- struct clk *timclk;
};
static u8 clk_sp810_timerclken_get_parent(struct clk_hw *hw)
@@ -70,55 +68,7 @@ static int clk_sp810_timerclken_set_parent(struct clk_hw *hw, u8 index)
return 0;
}
-/*
- * FIXME - setting the parent every time .prepare is invoked is inefficient.
- * This is better handled by a dedicated clock tree configuration mechanism at
- * init-time. Revisit this later when such a mechanism exists
- */
-static int clk_sp810_timerclken_prepare(struct clk_hw *hw)
-{
- struct clk_sp810_timerclken *timerclken = to_clk_sp810_timerclken(hw);
- struct clk_sp810 *sp810 = timerclken->sp810;
- struct clk *old_parent = __clk_get_parent(hw->clk);
- struct clk *new_parent;
-
- if (!sp810->refclk)
- sp810->refclk = of_clk_get(sp810->node, sp810->refclk_index);
-
- if (!sp810->timclk)
- sp810->timclk = of_clk_get(sp810->node, sp810->timclk_index);
-
- if (WARN_ON(IS_ERR(sp810->refclk) || IS_ERR(sp810->timclk)))
- return -ENOENT;
-
- /* Select fastest parent */
- if (clk_get_rate(sp810->refclk) > clk_get_rate(sp810->timclk))
- new_parent = sp810->refclk;
- else
- new_parent = sp810->timclk;
-
- /* Switch the parent if necessary */
- if (old_parent != new_parent) {
- clk_prepare(new_parent);
- clk_set_parent(hw->clk, new_parent);
- clk_unprepare(old_parent);
- }
-
- return 0;
-}
-
-static void clk_sp810_timerclken_unprepare(struct clk_hw *hw)
-{
- struct clk_sp810_timerclken *timerclken = to_clk_sp810_timerclken(hw);
- struct clk_sp810 *sp810 = timerclken->sp810;
-
- clk_put(sp810->timclk);
- clk_put(sp810->refclk);
-}
-
static const struct clk_ops clk_sp810_timerclken_ops = {
- .prepare = clk_sp810_timerclken_prepare,
- .unprepare = clk_sp810_timerclken_unprepare,
.get_parent = clk_sp810_timerclken_get_parent,
.set_parent = clk_sp810_timerclken_set_parent,
};
@@ -128,8 +78,8 @@ static struct clk *clk_sp810_timerclken_of_get(struct of_phandle_args *clkspec,
{
struct clk_sp810 *sp810 = data;
- if (WARN_ON(clkspec->args_count != 1 || clkspec->args[0] >
- ARRAY_SIZE(sp810->timerclken)))
+ if (WARN_ON(clkspec->args_count != 1 ||
+ clkspec->args[0] >= ARRAY_SIZE(sp810->timerclken)))
return NULL;
return sp810->timerclken[clkspec->args[0]].clk;
@@ -139,24 +89,18 @@ static void __init clk_sp810_of_setup(struct device_node *node)
{
struct clk_sp810 *sp810 = kzalloc(sizeof(*sp810), GFP_KERNEL);
const char *parent_names[2];
+ int num = ARRAY_SIZE(parent_names);
char name[12];
struct clk_init_data init;
int i;
+ bool deprecated;
if (!sp810) {
pr_err("Failed to allocate memory for SP810!\n");
return;
}
- sp810->refclk_index = of_property_match_string(node, "clock-names",
- "refclk");
- parent_names[0] = of_clk_get_parent_name(node, sp810->refclk_index);
-
- sp810->timclk_index = of_property_match_string(node, "clock-names",
- "timclk");
- parent_names[1] = of_clk_get_parent_name(node, sp810->timclk_index);
-
- if (!parent_names[0] || !parent_names[1]) {
+ if (of_clk_parent_fill(node, parent_names, num) != num) {
pr_warn("Failed to obtain parent clocks for SP810!\n");
return;
}
@@ -169,7 +113,9 @@ static void __init clk_sp810_of_setup(struct device_node *node)
init.ops = &clk_sp810_timerclken_ops;
init.flags = CLK_IS_BASIC;
init.parent_names = parent_names;
- init.num_parents = ARRAY_SIZE(parent_names);
+ init.num_parents = num;
+
+ deprecated = !of_find_property(node, "assigned-clock-parents", NULL);
for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) {
snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
@@ -178,6 +124,15 @@ static void __init clk_sp810_of_setup(struct device_node *node)
sp810->timerclken[i].channel = i;
sp810->timerclken[i].hw.init = &init;
+ /*
+ * If DT isn't setting the parent, force it to be
+ * the 1 MHz clock without going through the framework.
+ * We do this before clk_register() so that it can determine
+ * the parent and setup the tree properly.
+ */
+ if (deprecated)
+ init.ops->set_parent(&sp810->timerclken[i].hw, 1);
+
sp810->timerclken[i].clk = clk_register(NULL,
&sp810->timerclken[i].hw);
WARN_ON(IS_ERR(sp810->timerclken[i].clk));
diff --git a/drivers/clk/versatile/clk-versatile.c b/drivers/clk/versatile/clk-versatile.c
index 7a4f8635bd1e..a89a927567e0 100644
--- a/drivers/clk/versatile/clk-versatile.c
+++ b/drivers/clk/versatile/clk-versatile.c
@@ -8,8 +8,6 @@
* published by the Free Software Foundation.
*/
#include <linux/clk-provider.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -35,7 +33,7 @@ static const struct icst_params cp_auxosc_params = {
.idx2s = icst525_idx2s,
};
-static const struct clk_icst_desc __initdata cm_auxosc_desc = {
+static const struct clk_icst_desc cm_auxosc_desc __initconst = {
.params = &cp_auxosc_params,
.vco_offset = 0x1c,
.lock_offset = INTEGRATOR_HDR_LOCK_OFFSET,
diff --git a/drivers/clk/zte/Makefile b/drivers/clk/zte/Makefile
index 95b707c18108..74005aa322a2 100644
--- a/drivers/clk/zte/Makefile
+++ b/drivers/clk/zte/Makefile
@@ -1,2 +1,2 @@
-obj-y := clk-pll.o
+obj-y := clk.o
obj-$(CONFIG_SOC_ZX296702) += clk-zx296702.o
diff --git a/drivers/clk/zte/clk-zx296702.c b/drivers/clk/zte/clk-zx296702.c
index 929d033594af..ebd20d852e73 100644
--- a/drivers/clk/zte/clk-zx296702.c
+++ b/drivers/clk/zte/clk-zx296702.c
@@ -36,10 +36,21 @@ static struct clk_onecell_data lsp1clk_data;
#define CLK_MUX1 (topcrm_base + 0x8c)
#define CLK_SDMMC1 (lsp0crpm_base + 0x0c)
+#define CLK_GPIO (lsp0crpm_base + 0x2c)
+#define CLK_SPDIF0 (lsp0crpm_base + 0x10)
+#define SPDIF0_DIV (lsp0crpm_base + 0x14)
+#define CLK_I2S0 (lsp0crpm_base + 0x18)
+#define I2S0_DIV (lsp0crpm_base + 0x1c)
+#define CLK_I2S1 (lsp0crpm_base + 0x20)
+#define I2S1_DIV (lsp0crpm_base + 0x24)
+#define CLK_I2S2 (lsp0crpm_base + 0x34)
+#define I2S2_DIV (lsp0crpm_base + 0x38)
#define CLK_UART0 (lsp1crpm_base + 0x20)
#define CLK_UART1 (lsp1crpm_base + 0x24)
#define CLK_SDMMC0 (lsp1crpm_base + 0x2c)
+#define CLK_SPDIF1 (lsp1crpm_base + 0x30)
+#define SPDIF1_DIV (lsp1crpm_base + 0x34)
static const struct zx_pll_config pll_a9_config[] = {
{ .rate = 700000000, .cfg0 = 0x800405d1, .cfg1 = 0x04555555 },
@@ -72,104 +83,119 @@ static const struct clk_div_table sec_wclk_divider[] = {
{ /* sentinel */ }
};
-static const char * matrix_aclk_sel[] = {
+static const char * const matrix_aclk_sel[] = {
"pll_mm0_198M",
"osc",
"clk_148M5",
"pll_lsp_104M",
};
-static const char * a9_wclk_sel[] = {
+static const char * const a9_wclk_sel[] = {
"pll_a9",
"osc",
"clk_500",
"clk_250",
};
-static const char * a9_as1_aclk_sel[] = {
+static const char * const a9_as1_aclk_sel[] = {
"clk_250",
"osc",
"pll_mm0_396M",
"pll_mac_333M",
};
-static const char * a9_trace_clkin_sel[] = {
+static const char * const a9_trace_clkin_sel[] = {
"clk_74M25",
"pll_mm1_108M",
"clk_125",
"clk_148M5",
};
-static const char * decppu_aclk_sel[] = {
+static const char * const decppu_aclk_sel[] = {
"clk_250",
"pll_mm0_198M",
"pll_lsp_104M",
"pll_audio_294M912",
};
-static const char * vou_main_wclk_sel[] = {
+static const char * const vou_main_wclk_sel[] = {
"clk_148M5",
"clk_74M25",
"clk_27",
"pll_mm1_54M",
};
-static const char * vou_scaler_wclk_sel[] = {
+static const char * const vou_scaler_wclk_sel[] = {
"clk_250",
"pll_mac_333M",
"pll_audio_294M912",
"pll_mm0_198M",
};
-static const char * r2d_wclk_sel[] = {
+static const char * const r2d_wclk_sel[] = {
"pll_audio_294M912",
"pll_mac_333M",
"pll_a9_350M",
"pll_mm0_396M",
};
-static const char * ddr_wclk_sel[] = {
+static const char * const ddr_wclk_sel[] = {
"pll_mac_333M",
"pll_ddr_266M",
"pll_audio_294M912",
"pll_mm0_198M",
};
-static const char * nand_wclk_sel[] = {
+static const char * const nand_wclk_sel[] = {
"pll_lsp_104M",
"osc",
};
-static const char * lsp_26_wclk_sel[] = {
+static const char * const lsp_26_wclk_sel[] = {
"pll_lsp_26M",
"osc",
};
-static const char * vl0_sel[] = {
+static const char * const vl0_sel[] = {
"vou_main_channel_div",
"vou_aux_channel_div",
};
-static const char * hdmi_sel[] = {
+static const char * const hdmi_sel[] = {
"vou_main_channel_wclk",
"vou_aux_channel_wclk",
};
-static const char * sdmmc0_wclk_sel[] = {
+static const char * const sdmmc0_wclk_sel[] = {
"lsp1_104M_wclk",
"lsp1_26M_wclk",
};
-static const char * sdmmc1_wclk_sel[] = {
+static const char * const sdmmc1_wclk_sel[] = {
"lsp0_104M_wclk",
"lsp0_26M_wclk",
};
-static const char * uart_wclk_sel[] = {
+static const char * const uart_wclk_sel[] = {
"lsp1_104M_wclk",
"lsp1_26M_wclk",
};
+static const char * const spdif0_wclk_sel[] = {
+ "lsp0_104M_wclk",
+ "lsp0_26M_wclk",
+};
+
+static const char * const spdif1_wclk_sel[] = {
+ "lsp1_104M_wclk",
+ "lsp1_26M_wclk",
+};
+
+static const char * const i2s_wclk_sel[] = {
+ "lsp0_104M_wclk",
+ "lsp0_26M_wclk",
+};
+
static inline struct clk *zx_divtbl(const char *name, const char *parent,
void __iomem *reg, u8 shift, u8 width,
const struct clk_div_table *table)
@@ -185,7 +211,7 @@ static inline struct clk *zx_div(const char *name, const char *parent,
reg, shift, width, 0, &reg_lock);
}
-static inline struct clk *zx_mux(const char *name, const char **parents,
+static inline struct clk *zx_mux(const char *name, const char * const *parents,
int num_parents, void __iomem *reg, u8 shift, u8 width)
{
return clk_register_mux(NULL, name, parents, num_parents,
@@ -196,7 +222,7 @@ static inline struct clk *zx_gate(const char *name, const char *parent,
void __iomem *reg, u8 shift)
{
return clk_register_gate(NULL, name, parent, CLK_IGNORE_UNUSED,
- reg, shift, 0, &reg_lock);
+ reg, shift, CLK_SET_RATE_PARENT, &reg_lock);
}
static void __init zx296702_top_clocks_init(struct device_node *np)
@@ -585,7 +611,57 @@ static void __init zx296702_lsp0_clocks_init(struct device_node *np)
clk[ZX296702_SDMMC1_WCLK] =
zx_gate("sdmmc1_wclk", "sdmmc1_wclk_div", CLK_SDMMC1, 1);
clk[ZX296702_SDMMC1_PCLK] =
- zx_gate("sdmmc1_pclk", "lsp1_apb_pclk", CLK_SDMMC1, 0);
+ zx_gate("sdmmc1_pclk", "lsp0_apb_pclk", CLK_SDMMC1, 0);
+
+ clk[ZX296702_GPIO_CLK] =
+ zx_gate("gpio_clk", "lsp0_apb_pclk", CLK_GPIO, 0);
+
+ /* SPDIF */
+ clk[ZX296702_SPDIF0_WCLK_MUX] =
+ zx_mux("spdif0_wclk_mux", spdif0_wclk_sel,
+ ARRAY_SIZE(spdif0_wclk_sel), CLK_SPDIF0, 4, 1);
+ clk[ZX296702_SPDIF0_WCLK] =
+ zx_gate("spdif0_wclk", "spdif0_wclk_mux", CLK_SPDIF0, 1);
+ clk[ZX296702_SPDIF0_PCLK] =
+ zx_gate("spdif0_pclk", "lsp0_apb_pclk", CLK_SPDIF0, 0);
+
+ clk[ZX296702_SPDIF0_DIV] =
+ clk_register_zx_audio("spdif0_div", "spdif0_wclk", 0,
+ SPDIF0_DIV);
+
+ /* I2S */
+ clk[ZX296702_I2S0_WCLK_MUX] =
+ zx_mux("i2s0_wclk_mux", i2s_wclk_sel,
+ ARRAY_SIZE(i2s_wclk_sel), CLK_I2S0, 4, 1);
+ clk[ZX296702_I2S0_WCLK] =
+ zx_gate("i2s0_wclk", "i2s0_wclk_mux", CLK_I2S0, 1);
+ clk[ZX296702_I2S0_PCLK] =
+ zx_gate("i2s0_pclk", "lsp0_apb_pclk", CLK_I2S0, 0);
+
+ clk[ZX296702_I2S0_DIV] =
+ clk_register_zx_audio("i2s0_div", "i2s0_wclk", 0, I2S0_DIV);
+
+ clk[ZX296702_I2S1_WCLK_MUX] =
+ zx_mux("i2s1_wclk_mux", i2s_wclk_sel,
+ ARRAY_SIZE(i2s_wclk_sel), CLK_I2S1, 4, 1);
+ clk[ZX296702_I2S1_WCLK] =
+ zx_gate("i2s1_wclk", "i2s1_wclk_mux", CLK_I2S1, 1);
+ clk[ZX296702_I2S1_PCLK] =
+ zx_gate("i2s1_pclk", "lsp0_apb_pclk", CLK_I2S1, 0);
+
+ clk[ZX296702_I2S1_DIV] =
+ clk_register_zx_audio("i2s1_div", "i2s1_wclk", 0, I2S1_DIV);
+
+ clk[ZX296702_I2S2_WCLK_MUX] =
+ zx_mux("i2s2_wclk_mux", i2s_wclk_sel,
+ ARRAY_SIZE(i2s_wclk_sel), CLK_I2S2, 4, 1);
+ clk[ZX296702_I2S2_WCLK] =
+ zx_gate("i2s2_wclk", "i2s2_wclk_mux", CLK_I2S2, 1);
+ clk[ZX296702_I2S2_PCLK] =
+ zx_gate("i2s2_pclk", "lsp0_apb_pclk", CLK_I2S2, 0);
+
+ clk[ZX296702_I2S2_DIV] =
+ clk_register_zx_audio("i2s2_div", "i2s2_wclk", 0, I2S2_DIV);
for (i = 0; i < ARRAY_SIZE(lsp0clk); i++) {
if (IS_ERR(clk[i])) {
@@ -641,6 +717,18 @@ static void __init zx296702_lsp1_clocks_init(struct device_node *np)
clk[ZX296702_SDMMC0_PCLK] =
zx_gate("sdmmc0_pclk", "lsp1_apb_pclk", CLK_SDMMC0, 0);
+ clk[ZX296702_SPDIF1_WCLK_MUX] =
+ zx_mux("spdif1_wclk_mux", spdif1_wclk_sel,
+ ARRAY_SIZE(spdif1_wclk_sel), CLK_SPDIF1, 4, 1);
+ clk[ZX296702_SPDIF1_WCLK] =
+ zx_gate("spdif1_wclk", "spdif1_wclk_mux", CLK_SPDIF1, 1);
+ clk[ZX296702_SPDIF1_PCLK] =
+ zx_gate("spdif1_pclk", "lsp1_apb_pclk", CLK_SPDIF1, 0);
+
+ clk[ZX296702_SPDIF1_DIV] =
+ clk_register_zx_audio("spdif1_div", "spdif1_wclk", 0,
+ SPDIF1_DIV);
+
for (i = 0; i < ARRAY_SIZE(lsp1clk); i++) {
if (IS_ERR(clk[i])) {
pr_err("zx296702 clk %d: register failed with %ld\n",
diff --git a/drivers/clk/zte/clk-pll.c b/drivers/clk/zte/clk.c
index c3b221ae6cd7..7c73c538c43d 100644
--- a/drivers/clk/zte/clk-pll.c
+++ b/drivers/clk/zte/clk.c
@@ -13,10 +13,12 @@
#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <asm/div64.h>
#include "clk.h"
#define to_clk_zx_pll(_hw) container_of(_hw, struct clk_zx_pll, hw)
+#define to_clk_zx_audio(_hw) container_of(_hw, struct clk_zx_audio, hw)
#define CFG0_CFG1_OFFSET 4
#define LOCK_FLAG BIT(30)
@@ -141,8 +143,9 @@ static const struct clk_ops zx_pll_ops = {
};
struct clk *clk_register_zx_pll(const char *name, const char *parent_name,
- unsigned long flags, void __iomem *reg_base,
- const struct zx_pll_config *lookup_table, int count, spinlock_t *lock)
+ unsigned long flags, void __iomem *reg_base,
+ const struct zx_pll_config *lookup_table,
+ int count, spinlock_t *lock)
{
struct clk_zx_pll *zx_pll;
struct clk *clk;
@@ -170,3 +173,137 @@ struct clk *clk_register_zx_pll(const char *name, const char *parent_name,
return clk;
}
+
+#define BPAR 1000000
+static u32 calc_reg(u32 parent_rate, u32 rate)
+{
+ u32 sel, integ, fra_div, tmp;
+ u64 tmp64 = (u64)parent_rate * BPAR;
+
+ do_div(tmp64, rate);
+ integ = (u32)tmp64 / BPAR;
+ integ = integ >> 1;
+
+ tmp = (u32)tmp64 % BPAR;
+ sel = tmp / BPAR;
+
+ tmp = tmp % BPAR;
+ fra_div = tmp * 0xff / BPAR;
+ tmp = (sel << 24) | (integ << 16) | (0xff << 8) | fra_div;
+
+ /* Set I2S integer divider as 1. This bit is reserved for SPDIF
+ * and do no harm.
+ */
+ tmp |= BIT(28);
+ return tmp;
+}
+
+static u32 calc_rate(u32 reg, u32 parent_rate)
+{
+ u32 sel, integ, fra_div, tmp;
+ u64 tmp64 = (u64)parent_rate * BPAR;
+
+ tmp = reg;
+ sel = (tmp >> 24) & BIT(0);
+ integ = (tmp >> 16) & 0xff;
+ fra_div = tmp & 0xff;
+
+ tmp = fra_div * BPAR;
+ tmp = tmp / 0xff;
+ tmp += sel * BPAR;
+ tmp += 2 * integ * BPAR;
+ do_div(tmp64, tmp);
+
+ return (u32)tmp64;
+}
+
+static unsigned long zx_audio_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_zx_audio *zx_audio = to_clk_zx_audio(hw);
+ u32 reg;
+
+ reg = readl_relaxed(zx_audio->reg_base);
+ return calc_rate(reg, parent_rate);
+}
+
+static long zx_audio_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ u32 reg;
+
+ if (rate * 2 > *prate)
+ return -EINVAL;
+
+ reg = calc_reg(*prate, rate);
+ return calc_rate(reg, *prate);
+}
+
+static int zx_audio_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_zx_audio *zx_audio = to_clk_zx_audio(hw);
+ u32 reg;
+
+ reg = calc_reg(parent_rate, rate);
+ writel_relaxed(reg, zx_audio->reg_base);
+
+ return 0;
+}
+
+#define ZX_AUDIO_EN BIT(25)
+static int zx_audio_enable(struct clk_hw *hw)
+{
+ struct clk_zx_audio *zx_audio = to_clk_zx_audio(hw);
+ u32 reg;
+
+ reg = readl_relaxed(zx_audio->reg_base);
+ writel_relaxed(reg & ~ZX_AUDIO_EN, zx_audio->reg_base);
+ return 0;
+}
+
+static void zx_audio_disable(struct clk_hw *hw)
+{
+ struct clk_zx_audio *zx_audio = to_clk_zx_audio(hw);
+ u32 reg;
+
+ reg = readl_relaxed(zx_audio->reg_base);
+ writel_relaxed(reg | ZX_AUDIO_EN, zx_audio->reg_base);
+}
+
+static const struct clk_ops zx_audio_ops = {
+ .recalc_rate = zx_audio_recalc_rate,
+ .round_rate = zx_audio_round_rate,
+ .set_rate = zx_audio_set_rate,
+ .enable = zx_audio_enable,
+ .disable = zx_audio_disable,
+};
+
+struct clk *clk_register_zx_audio(const char *name,
+ const char * const parent_name,
+ unsigned long flags,
+ void __iomem *reg_base)
+{
+ struct clk_zx_audio *zx_audio;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ zx_audio = kzalloc(sizeof(*zx_audio), GFP_KERNEL);
+ if (!zx_audio)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &zx_audio_ops;
+ init.flags = flags;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ zx_audio->reg_base = reg_base;
+ zx_audio->hw.init = &init;
+
+ clk = clk_register(NULL, &zx_audio->hw);
+ if (IS_ERR(clk))
+ kfree(zx_audio);
+
+ return clk;
+}
diff --git a/drivers/clk/zte/clk.h b/drivers/clk/zte/clk.h
index 0914a82d0535..65ae08b818d3 100644
--- a/drivers/clk/zte/clk.h
+++ b/drivers/clk/zte/clk.h
@@ -29,4 +29,13 @@ struct clk_zx_pll {
struct clk *clk_register_zx_pll(const char *name, const char *parent_name,
unsigned long flags, void __iomem *reg_base,
const struct zx_pll_config *lookup_table, int count, spinlock_t *lock);
+
+struct clk_zx_audio {
+ struct clk_hw hw;
+ void __iomem *reg_base;
+};
+
+struct clk *clk_register_zx_audio(const char *name,
+ const char * const parent_name,
+ unsigned long flags, void __iomem *reg_base);
#endif
diff --git a/drivers/clk/zynq/Makefile b/drivers/clk/zynq/Makefile
index 156d923f4fa9..0afc2e7cc5c1 100644
--- a/drivers/clk/zynq/Makefile
+++ b/drivers/clk/zynq/Makefile
@@ -1,3 +1,3 @@
# Zynq clock specific Makefile
-obj-$(CONFIG_ARCH_ZYNQ) += clkc.o pll.o
+obj-y += clkc.o pll.o
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index de614384bb44..38a65c3e62fc 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -19,6 +19,7 @@
*/
#include <linux/clk/zynq.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 4e57730e0be4..a7726db13abb 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -111,6 +111,10 @@ config CLKSRC_LPC32XX
select CLKSRC_MMIO
select CLKSRC_OF
+config CLKSRC_PISTACHIO
+ bool
+ select CLKSRC_OF
+
config CLKSRC_STM32
bool "Clocksource for STM32 SoCs" if !ARCH_STM32
depends on OF && ARM && (ARCH_STM32 || COMPILE_TEST)
@@ -277,7 +281,7 @@ config CLKSRC_MIPS_GIC
config CLKSRC_PXA
def_bool y if ARCH_PXA || ARCH_SA1100
- select CLKSRC_OF if USE_OF
+ select CLKSRC_OF if OF
help
This enables OST0 support available on PXA and SA-11x0
platforms.
@@ -293,4 +297,12 @@ config CLKSRC_IMX_GPT
depends on ARM && CLKDEV_LOOKUP
select CLKSRC_MMIO
+config CLKSRC_ST_LPC
+ bool
+ depends on ARCH_STI
+ select CLKSRC_OF if OF
+ help
+ Enable this option to use the Low Power controller timer
+ as clocksource.
+
endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index f228354961ca..5c00863c3e33 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -44,6 +44,7 @@ obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o
obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o
obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o
obj-$(CONFIG_MTK_TIMER) += mtk_timer.o
+obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
@@ -60,3 +61,4 @@ obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o
obj-$(CONFIG_H8300) += h8300_timer8.o
obj-$(CONFIG_H8300_TMR16) += h8300_timer16.o
obj-$(CONFIG_H8300_TPU) += h8300_tpu.o
+obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 0aa135ddbf80..d6e3e49399dd 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -181,44 +181,36 @@ static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
}
-static __always_inline void timer_set_mode(const int access, int mode,
- struct clock_event_device *clk)
+static __always_inline int timer_shutdown(const int access,
+ struct clock_event_device *clk)
{
unsigned long ctrl;
- switch (mode) {
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
- ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
- break;
- default:
- break;
- }
+
+ ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+ ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+
+ return 0;
}
-static void arch_timer_set_mode_virt(enum clock_event_mode mode,
- struct clock_event_device *clk)
+static int arch_timer_shutdown_virt(struct clock_event_device *clk)
{
- timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode, clk);
+ return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
}
-static void arch_timer_set_mode_phys(enum clock_event_mode mode,
- struct clock_event_device *clk)
+static int arch_timer_shutdown_phys(struct clock_event_device *clk)
{
- timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode, clk);
+ return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
}
-static void arch_timer_set_mode_virt_mem(enum clock_event_mode mode,
- struct clock_event_device *clk)
+static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
{
- timer_set_mode(ARCH_TIMER_MEM_VIRT_ACCESS, mode, clk);
+ return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
}
-static void arch_timer_set_mode_phys_mem(enum clock_event_mode mode,
- struct clock_event_device *clk)
+static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
{
- timer_set_mode(ARCH_TIMER_MEM_PHYS_ACCESS, mode, clk);
+ return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
}
static __always_inline void set_next_event(const int access, unsigned long evt,
@@ -273,11 +265,11 @@ static void __arch_timer_setup(unsigned type,
clk->cpumask = cpumask_of(smp_processor_id());
if (arch_timer_use_virtual) {
clk->irq = arch_timer_ppi[VIRT_PPI];
- clk->set_mode = arch_timer_set_mode_virt;
+ clk->set_state_shutdown = arch_timer_shutdown_virt;
clk->set_next_event = arch_timer_set_next_event_virt;
} else {
clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
- clk->set_mode = arch_timer_set_mode_phys;
+ clk->set_state_shutdown = arch_timer_shutdown_phys;
clk->set_next_event = arch_timer_set_next_event_phys;
}
} else {
@@ -286,17 +278,17 @@ static void __arch_timer_setup(unsigned type,
clk->rating = 400;
clk->cpumask = cpu_all_mask;
if (arch_timer_mem_use_virtual) {
- clk->set_mode = arch_timer_set_mode_virt_mem;
+ clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
clk->set_next_event =
arch_timer_set_next_event_virt_mem;
} else {
- clk->set_mode = arch_timer_set_mode_phys_mem;
+ clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
clk->set_next_event =
arch_timer_set_next_event_phys_mem;
}
}
- clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, clk);
+ clk->set_state_shutdown(clk);
clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
}
@@ -506,7 +498,7 @@ static void arch_timer_stop(struct clock_event_device *clk)
disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
}
- clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
+ clk->set_state_shutdown(clk);
}
static int arch_timer_cpu_notify(struct notifier_block *self,
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index e6833771a716..29ea50ac366a 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -107,26 +107,21 @@ static void gt_compare_set(unsigned long delta, int periodic)
writel(ctrl, gt_base + GT_CONTROL);
}
-static void gt_clockevent_set_mode(enum clock_event_mode mode,
- struct clock_event_device *clk)
+static int gt_clockevent_shutdown(struct clock_event_device *evt)
{
unsigned long ctrl;
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- gt_compare_set(DIV_ROUND_CLOSEST(gt_clk_rate, HZ), 1);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- ctrl = readl(gt_base + GT_CONTROL);
- ctrl &= ~(GT_CONTROL_COMP_ENABLE |
- GT_CONTROL_IRQ_ENABLE | GT_CONTROL_AUTO_INC);
- writel(ctrl, gt_base + GT_CONTROL);
- break;
- default:
- break;
- }
+ ctrl = readl(gt_base + GT_CONTROL);
+ ctrl &= ~(GT_CONTROL_COMP_ENABLE | GT_CONTROL_IRQ_ENABLE |
+ GT_CONTROL_AUTO_INC);
+ writel(ctrl, gt_base + GT_CONTROL);
+ return 0;
+}
+
+static int gt_clockevent_set_periodic(struct clock_event_device *evt)
+{
+ gt_compare_set(DIV_ROUND_CLOSEST(gt_clk_rate, HZ), 1);
+ return 0;
}
static int gt_clockevent_set_next_event(unsigned long evt,
@@ -155,7 +150,7 @@ static irqreturn_t gt_clockevent_interrupt(int irq, void *dev_id)
* the Global Timer flag _after_ having incremented
* the Comparator register value to a higher value.
*/
- if (evt->mode == CLOCK_EVT_MODE_ONESHOT)
+ if (clockevent_state_oneshot(evt))
gt_compare_set(ULONG_MAX, 0);
writel_relaxed(GT_INT_STATUS_EVENT_FLAG, gt_base + GT_INT_STATUS);
@@ -171,7 +166,9 @@ static int gt_clockevents_init(struct clock_event_device *clk)
clk->name = "arm_global_timer";
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERCPU;
- clk->set_mode = gt_clockevent_set_mode;
+ clk->set_state_shutdown = gt_clockevent_shutdown;
+ clk->set_state_periodic = gt_clockevent_set_periodic;
+ clk->set_state_oneshot = gt_clockevent_shutdown;
clk->set_next_event = gt_clockevent_set_next_event;
clk->cpumask = cpumask_of(cpu);
clk->rating = 300;
@@ -184,7 +181,7 @@ static int gt_clockevents_init(struct clock_event_device *clk)
static void gt_clockevents_stop(struct clock_event_device *clk)
{
- gt_clockevent_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
+ gt_clockevent_shutdown(clk);
disable_percpu_irq(clk->irq);
}
diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
index 4c2ba59897e8..217438d39eb3 100644
--- a/drivers/clocksource/asm9260_timer.c
+++ b/drivers/clocksource/asm9260_timer.c
@@ -120,38 +120,52 @@ static int asm9260_timer_set_next_event(unsigned long delta,
return 0;
}
-static void asm9260_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static inline void __asm9260_timer_shutdown(struct clock_event_device *evt)
{
/* stop timer0 */
writel_relaxed(BM_C0_EN, priv.base + HW_TCR + CLR_REG);
+}
+
+static int asm9260_timer_shutdown(struct clock_event_device *evt)
+{
+ __asm9260_timer_shutdown(evt);
+ return 0;
+}
+
+static int asm9260_timer_set_oneshot(struct clock_event_device *evt)
+{
+ __asm9260_timer_shutdown(evt);
+
+ /* enable reset and stop on match */
+ writel_relaxed(BM_MCR_RES_EN(0) | BM_MCR_STOP_EN(0),
+ priv.base + HW_MCR + SET_REG);
+ return 0;
+}
+
+static int asm9260_timer_set_periodic(struct clock_event_device *evt)
+{
+ __asm9260_timer_shutdown(evt);
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- /* disable reset and stop on match */
- writel_relaxed(BM_MCR_RES_EN(0) | BM_MCR_STOP_EN(0),
- priv.base + HW_MCR + CLR_REG);
- /* configure match count for TC0 */
- writel_relaxed(priv.ticks_per_jiffy, priv.base + HW_MR0);
- /* enable TC0 */
- writel_relaxed(BM_C0_EN, priv.base + HW_TCR + SET_REG);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- /* enable reset and stop on match */
- writel_relaxed(BM_MCR_RES_EN(0) | BM_MCR_STOP_EN(0),
- priv.base + HW_MCR + SET_REG);
- break;
- default:
- break;
- }
+ /* disable reset and stop on match */
+ writel_relaxed(BM_MCR_RES_EN(0) | BM_MCR_STOP_EN(0),
+ priv.base + HW_MCR + CLR_REG);
+ /* configure match count for TC0 */
+ writel_relaxed(priv.ticks_per_jiffy, priv.base + HW_MR0);
+ /* enable TC0 */
+ writel_relaxed(BM_C0_EN, priv.base + HW_TCR + SET_REG);
+ return 0;
}
static struct clock_event_device event_dev = {
- .name = DRIVER_NAME,
- .rating = 200,
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_next_event = asm9260_timer_set_next_event,
- .set_mode = asm9260_timer_set_mode,
+ .name = DRIVER_NAME,
+ .rating = 200,
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .set_next_event = asm9260_timer_set_next_event,
+ .set_state_shutdown = asm9260_timer_shutdown,
+ .set_state_periodic = asm9260_timer_set_periodic,
+ .set_state_oneshot = asm9260_timer_set_oneshot,
+ .tick_resume = asm9260_timer_shutdown,
};
static irqreturn_t asm9260_timer_interrupt(int irq, void *dev_id)
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index 26ed331b1aad..6f2822928963 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -54,21 +54,6 @@ static u64 notrace bcm2835_sched_read(void)
return readl_relaxed(system_clock);
}
-static void bcm2835_time_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt_dev)
-{
- switch (mode) {
- case CLOCK_EVT_MODE_ONESHOT:
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_RESUME:
- break;
- default:
- WARN(1, "%s: unhandled event mode %d\n", __func__, mode);
- break;
- }
-}
-
static int bcm2835_time_set_next_event(unsigned long event,
struct clock_event_device *evt_dev)
{
@@ -129,7 +114,6 @@ static void __init bcm2835_timer_init(struct device_node *node)
timer->evt.name = node->name;
timer->evt.rating = 300;
timer->evt.features = CLOCK_EVT_FEAT_ONESHOT;
- timer->evt.set_mode = bcm2835_time_set_mode;
timer->evt.set_next_event = bcm2835_time_set_next_event;
timer->evt.cpumask = cpumask_of(0);
timer->act.name = node->name;
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index f1e33d08dd83..e717e87df9bc 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -127,25 +127,18 @@ static int kona_timer_set_next_event(unsigned long clc,
return 0;
}
-static void kona_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *unused)
+static int kona_timer_shutdown(struct clock_event_device *evt)
{
- switch (mode) {
- case CLOCK_EVT_MODE_ONESHOT:
- /* by default mode is one shot don't do any thing */
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- default:
- kona_timer_disable_and_clear(timers.tmr_regs);
- }
+ kona_timer_disable_and_clear(timers.tmr_regs);
+ return 0;
}
static struct clock_event_device kona_clockevent_timer = {
.name = "timer 1",
.features = CLOCK_EVT_FEAT_ONESHOT,
.set_next_event = kona_timer_set_next_event,
- .set_mode = kona_timer_set_mode
+ .set_state_shutdown = kona_timer_shutdown,
+ .tick_resume = kona_timer_shutdown,
};
static void __init kona_timer_clockevents_init(void)
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index 510c8a1d37b3..9be6018bd2b8 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -16,7 +16,6 @@
*/
#include <linux/clk.h>
-#include <linux/clk-provider.h>
#include <linux/interrupt.h>
#include <linux/clockchips.h>
#include <linux/of_address.h>
@@ -191,40 +190,42 @@ static int ttc_set_next_event(unsigned long cycles,
}
/**
- * ttc_set_mode - Sets the mode of timer
+ * ttc_set_{shutdown|oneshot|periodic} - Sets the state of timer
*
- * @mode: Mode to be set
* @evt: Address of clock event instance
**/
-static void ttc_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int ttc_shutdown(struct clock_event_device *evt)
{
struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
struct ttc_timer *timer = &ttce->ttc;
u32 ctrl_reg;
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- ttc_set_interval(timer, DIV_ROUND_CLOSEST(ttce->ttc.freq,
- PRESCALE * HZ));
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- ctrl_reg = readl_relaxed(timer->base_addr +
- TTC_CNT_CNTRL_OFFSET);
- ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK;
- writel_relaxed(ctrl_reg,
- timer->base_addr + TTC_CNT_CNTRL_OFFSET);
- break;
- case CLOCK_EVT_MODE_RESUME:
- ctrl_reg = readl_relaxed(timer->base_addr +
- TTC_CNT_CNTRL_OFFSET);
- ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK;
- writel_relaxed(ctrl_reg,
- timer->base_addr + TTC_CNT_CNTRL_OFFSET);
- break;
- }
+ ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+ ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK;
+ writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+ return 0;
+}
+
+static int ttc_set_periodic(struct clock_event_device *evt)
+{
+ struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
+ struct ttc_timer *timer = &ttce->ttc;
+
+ ttc_set_interval(timer,
+ DIV_ROUND_CLOSEST(ttce->ttc.freq, PRESCALE * HZ));
+ return 0;
+}
+
+static int ttc_resume(struct clock_event_device *evt)
+{
+ struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
+ struct ttc_timer *timer = &ttce->ttc;
+ u32 ctrl_reg;
+
+ ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+ ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK;
+ writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+ return 0;
}
static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
@@ -430,7 +431,10 @@ static void __init ttc_setup_clockevent(struct clk *clk,
ttcce->ce.name = "ttc_clockevent";
ttcce->ce.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
ttcce->ce.set_next_event = ttc_set_next_event;
- ttcce->ce.set_mode = ttc_set_mode;
+ ttcce->ce.set_state_shutdown = ttc_shutdown;
+ ttcce->ce.set_state_periodic = ttc_set_periodic;
+ ttcce->ce.set_state_oneshot = ttc_shutdown;
+ ttcce->ce.tick_resume = ttc_resume;
ttcce->ce.rating = 200;
ttcce->ce.irq = irq;
ttcce->ce.cpumask = cpu_possible_mask;
diff --git a/drivers/clocksource/clksrc_st_lpc.c b/drivers/clocksource/clksrc_st_lpc.c
new file mode 100644
index 000000000000..65ec4674416d
--- /dev/null
+++ b/drivers/clocksource/clksrc_st_lpc.c
@@ -0,0 +1,131 @@
+/*
+ * Clocksource using the Low Power Timer found in the Low Power Controller (LPC)
+ *
+ * Copyright (C) 2015 STMicroelectronics – All Rights Reserved
+ *
+ * Author(s): Francesco Virlinzi <francesco.virlinzi@st.com>
+ * Ajit Pal Singh <ajitpal.singh@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/mfd/st-lpc.h>
+
+/* Low Power Timer */
+#define LPC_LPT_LSB_OFF 0x400
+#define LPC_LPT_MSB_OFF 0x404
+#define LPC_LPT_START_OFF 0x408
+
+static struct st_clksrc_ddata {
+ struct clk *clk;
+ void __iomem *base;
+} ddata;
+
+static void __init st_clksrc_reset(void)
+{
+ writel_relaxed(0, ddata.base + LPC_LPT_START_OFF);
+ writel_relaxed(0, ddata.base + LPC_LPT_MSB_OFF);
+ writel_relaxed(0, ddata.base + LPC_LPT_LSB_OFF);
+ writel_relaxed(1, ddata.base + LPC_LPT_START_OFF);
+}
+
+static u64 notrace st_clksrc_sched_clock_read(void)
+{
+ return (u64)readl_relaxed(ddata.base + LPC_LPT_LSB_OFF);
+}
+
+static int __init st_clksrc_init(void)
+{
+ unsigned long rate;
+ int ret;
+
+ st_clksrc_reset();
+
+ rate = clk_get_rate(ddata.clk);
+
+ sched_clock_register(st_clksrc_sched_clock_read, 32, rate);
+
+ ret = clocksource_mmio_init(ddata.base + LPC_LPT_LSB_OFF,
+ "clksrc-st-lpc", rate, 300, 32,
+ clocksource_mmio_readl_up);
+ if (ret) {
+ pr_err("clksrc-st-lpc: Failed to register clocksource\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init st_clksrc_setup_clk(struct device_node *np)
+{
+ struct clk *clk;
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("clksrc-st-lpc: Failed to get LPC clock\n");
+ return PTR_ERR(clk);
+ }
+
+ if (clk_prepare_enable(clk)) {
+ pr_err("clksrc-st-lpc: Failed to enable LPC clock\n");
+ return -EINVAL;
+ }
+
+ if (!clk_get_rate(clk)) {
+ pr_err("clksrc-st-lpc: Failed to get LPC clock rate\n");
+ clk_disable_unprepare(clk);
+ return -EINVAL;
+ }
+
+ ddata.clk = clk;
+
+ return 0;
+}
+
+static void __init st_clksrc_of_register(struct device_node *np)
+{
+ int ret;
+ uint32_t mode;
+
+ ret = of_property_read_u32(np, "st,lpc-mode", &mode);
+ if (ret) {
+ pr_err("clksrc-st-lpc: An LPC mode must be provided\n");
+ return;
+ }
+
+ /* LPC can either run as a Clocksource or in RTC or WDT mode */
+ if (mode != ST_LPC_MODE_CLKSRC)
+ return;
+
+ ddata.base = of_iomap(np, 0);
+ if (!ddata.base) {
+ pr_err("clksrc-st-lpc: Unable to map iomem\n");
+ return;
+ }
+
+ if (st_clksrc_setup_clk(np)) {
+ iounmap(ddata.base);
+ return;
+ }
+
+ if (st_clksrc_init()) {
+ clk_disable_unprepare(ddata.clk);
+ clk_put(ddata.clk);
+ iounmap(ddata.base);
+ return;
+ }
+
+ pr_info("clksrc-st-lpc: clocksource initialised - running @ %luHz\n",
+ clk_get_rate(ddata.clk));
+}
+CLOCKSOURCE_OF_DECLARE(ddata, "st,stih407-lpc", st_clksrc_of_register);
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
index d83ec1f2fddc..cdd86e3525bb 100644
--- a/drivers/clocksource/clps711x-timer.c
+++ b/drivers/clocksource/clps711x-timer.c
@@ -61,11 +61,6 @@ static irqreturn_t clps711x_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void clps711x_clockevent_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
-}
-
static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base,
unsigned int irq)
{
@@ -91,7 +86,6 @@ static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base,
clkevt->name = "clps711x-clockevent";
clkevt->rating = 300;
clkevt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_C3STOP;
- clkevt->set_mode = clps711x_clockevent_set_mode;
clkevt->cpumask = cpumask_of(0);
clockevents_config_and_register(clkevt, HZ, 0, 0);
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index db2105290898..9a7e37cf56b0 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -42,7 +42,6 @@ MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks.");
* 256 128 .125 512.000
*/
-static unsigned int cs5535_tick_mode = CLOCK_EVT_MODE_SHUTDOWN;
static struct cs5535_mfgpt_timer *cs5535_event_clock;
/* Selected from the table above */
@@ -77,15 +76,17 @@ static void start_timer(struct cs5535_mfgpt_timer *timer, uint16_t delta)
MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
}
-static void mfgpt_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int mfgpt_shutdown(struct clock_event_device *evt)
{
disable_timer(cs5535_event_clock);
+ return 0;
+}
- if (mode == CLOCK_EVT_MODE_PERIODIC)
- start_timer(cs5535_event_clock, MFGPT_PERIODIC);
-
- cs5535_tick_mode = mode;
+static int mfgpt_set_periodic(struct clock_event_device *evt)
+{
+ disable_timer(cs5535_event_clock);
+ start_timer(cs5535_event_clock, MFGPT_PERIODIC);
+ return 0;
}
static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt)
@@ -97,7 +98,10 @@ static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt)
static struct clock_event_device cs5535_clockevent = {
.name = DRV_NAME,
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = mfgpt_set_mode,
+ .set_state_shutdown = mfgpt_shutdown,
+ .set_state_periodic = mfgpt_set_periodic,
+ .set_state_oneshot = mfgpt_shutdown,
+ .tick_resume = mfgpt_shutdown,
.set_next_event = mfgpt_next_event,
.rating = 250,
};
@@ -113,7 +117,7 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
/* Turn off the clock (and clear the event) */
disable_timer(cs5535_event_clock);
- if (cs5535_tick_mode == CLOCK_EVT_MODE_SHUTDOWN)
+ if (clockevent_state_shutdown(&cs5535_clockevent))
return IRQ_HANDLED;
/* Clear the counter */
@@ -121,7 +125,7 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
/* Restart the clock in periodic mode */
- if (cs5535_tick_mode == CLOCK_EVT_MODE_PERIODIC)
+ if (clockevent_state_periodic(&cs5535_clockevent))
cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP,
MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c
index 31990600fcff..776b6c86dcd5 100644
--- a/drivers/clocksource/dummy_timer.c
+++ b/drivers/clocksource/dummy_timer.c
@@ -16,15 +16,6 @@
static DEFINE_PER_CPU(struct clock_event_device, dummy_timer_evt);
-static void dummy_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
- /*
- * Core clockevents code will call this when exchanging timer devices.
- * We don't need to do anything here.
- */
-}
-
static void dummy_timer_setup(void)
{
int cpu = smp_processor_id();
@@ -35,7 +26,6 @@ static void dummy_timer_setup(void)
CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_DUMMY;
evt->rating = 100;
- evt->set_mode = dummy_timer_set_mode;
evt->cpumask = cpumask_of(cpu);
clockevents_register_device(evt);
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
index 35a88097af3c..c76c75006ea6 100644
--- a/drivers/clocksource/dw_apb_timer.c
+++ b/drivers/clocksource/dw_apb_timer.c
@@ -110,71 +110,87 @@ static void apbt_enable_int(struct dw_apb_timer *timer)
apbt_writel(timer, ctrl, APBTMR_N_CONTROL);
}
-static void apbt_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int apbt_shutdown(struct clock_event_device *evt)
{
+ struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
unsigned long ctrl;
- unsigned long period;
+
+ pr_debug("%s CPU %d state=shutdown\n", __func__,
+ cpumask_first(evt->cpumask));
+
+ ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
+ ctrl &= ~APBTMR_CONTROL_ENABLE;
+ apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+ return 0;
+}
+
+static int apbt_set_oneshot(struct clock_event_device *evt)
+{
struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
+ unsigned long ctrl;
- pr_debug("%s CPU %d mode=%d\n", __func__,
- cpumask_first(evt->cpumask),
- mode);
-
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- period = DIV_ROUND_UP(dw_ced->timer.freq, HZ);
- ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
- ctrl |= APBTMR_CONTROL_MODE_PERIODIC;
- apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
- /*
- * DW APB p. 46, have to disable timer before load counter,
- * may cause sync problem.
- */
- ctrl &= ~APBTMR_CONTROL_ENABLE;
- apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
- udelay(1);
- pr_debug("Setting clock period %lu for HZ %d\n", period, HZ);
- apbt_writel(&dw_ced->timer, period, APBTMR_N_LOAD_COUNT);
- ctrl |= APBTMR_CONTROL_ENABLE;
- apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
- break;
-
- case CLOCK_EVT_MODE_ONESHOT:
- ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
- /*
- * set free running mode, this mode will let timer reload max
- * timeout which will give time (3min on 25MHz clock) to rearm
- * the next event, therefore emulate the one-shot mode.
- */
- ctrl &= ~APBTMR_CONTROL_ENABLE;
- ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
-
- apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
- /* write again to set free running mode */
- apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
-
- /*
- * DW APB p. 46, load counter with all 1s before starting free
- * running mode.
- */
- apbt_writel(&dw_ced->timer, ~0, APBTMR_N_LOAD_COUNT);
- ctrl &= ~APBTMR_CONTROL_INT;
- ctrl |= APBTMR_CONTROL_ENABLE;
- apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
- break;
-
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
- ctrl &= ~APBTMR_CONTROL_ENABLE;
- apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
- break;
-
- case CLOCK_EVT_MODE_RESUME:
- apbt_enable_int(&dw_ced->timer);
- break;
- }
+ pr_debug("%s CPU %d state=oneshot\n", __func__,
+ cpumask_first(evt->cpumask));
+
+ ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
+ /*
+ * set free running mode, this mode will let timer reload max
+ * timeout which will give time (3min on 25MHz clock) to rearm
+ * the next event, therefore emulate the one-shot mode.
+ */
+ ctrl &= ~APBTMR_CONTROL_ENABLE;
+ ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
+
+ apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+ /* write again to set free running mode */
+ apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+
+ /*
+ * DW APB p. 46, load counter with all 1s before starting free
+ * running mode.
+ */
+ apbt_writel(&dw_ced->timer, ~0, APBTMR_N_LOAD_COUNT);
+ ctrl &= ~APBTMR_CONTROL_INT;
+ ctrl |= APBTMR_CONTROL_ENABLE;
+ apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+ return 0;
+}
+
+static int apbt_set_periodic(struct clock_event_device *evt)
+{
+ struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
+ unsigned long period = DIV_ROUND_UP(dw_ced->timer.freq, HZ);
+ unsigned long ctrl;
+
+ pr_debug("%s CPU %d state=periodic\n", __func__,
+ cpumask_first(evt->cpumask));
+
+ ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
+ ctrl |= APBTMR_CONTROL_MODE_PERIODIC;
+ apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+ /*
+ * DW APB p. 46, have to disable timer before load counter,
+ * may cause sync problem.
+ */
+ ctrl &= ~APBTMR_CONTROL_ENABLE;
+ apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+ udelay(1);
+ pr_debug("Setting clock period %lu for HZ %d\n", period, HZ);
+ apbt_writel(&dw_ced->timer, period, APBTMR_N_LOAD_COUNT);
+ ctrl |= APBTMR_CONTROL_ENABLE;
+ apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+ return 0;
+}
+
+static int apbt_resume(struct clock_event_device *evt)
+{
+ struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
+
+ pr_debug("%s CPU %d state=resume\n", __func__,
+ cpumask_first(evt->cpumask));
+
+ apbt_enable_int(&dw_ced->timer);
+ return 0;
}
static int apbt_next_event(unsigned long delta,
@@ -232,8 +248,12 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
&dw_ced->ced);
dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced);
dw_ced->ced.cpumask = cpumask_of(cpu);
- dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
- dw_ced->ced.set_mode = apbt_set_mode;
+ dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
+ dw_ced->ced.set_state_shutdown = apbt_shutdown;
+ dw_ced->ced.set_state_periodic = apbt_set_periodic;
+ dw_ced->ced.set_state_oneshot = apbt_set_oneshot;
+ dw_ced->ced.tick_resume = apbt_resume;
dw_ced->ced.set_next_event = apbt_next_event;
dw_ced->ced.irq = dw_ced->timer.irq;
dw_ced->ced.rating = rating;
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index dc3c6ee04aaa..7a97a34dba70 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -251,33 +251,21 @@ static struct em_sti_priv *ced_to_em_sti(struct clock_event_device *ced)
return container_of(ced, struct em_sti_priv, ced);
}
-static void em_sti_clock_event_mode(enum clock_event_mode mode,
- struct clock_event_device *ced)
+static int em_sti_clock_event_shutdown(struct clock_event_device *ced)
{
struct em_sti_priv *p = ced_to_em_sti(ced);
+ em_sti_stop(p, USER_CLOCKEVENT);
+ return 0;
+}
- /* deal with old setting first */
- switch (ced->mode) {
- case CLOCK_EVT_MODE_ONESHOT:
- em_sti_stop(p, USER_CLOCKEVENT);
- break;
- default:
- break;
- }
+static int em_sti_clock_event_set_oneshot(struct clock_event_device *ced)
+{
+ struct em_sti_priv *p = ced_to_em_sti(ced);
- switch (mode) {
- case CLOCK_EVT_MODE_ONESHOT:
- dev_info(&p->pdev->dev, "used for oneshot clock events\n");
- em_sti_start(p, USER_CLOCKEVENT);
- clockevents_config(&p->ced, p->rate);
- break;
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
- em_sti_stop(p, USER_CLOCKEVENT);
- break;
- default:
- break;
- }
+ dev_info(&p->pdev->dev, "used for oneshot clock events\n");
+ em_sti_start(p, USER_CLOCKEVENT);
+ clockevents_config(&p->ced, p->rate);
+ return 0;
}
static int em_sti_clock_event_next(unsigned long delta,
@@ -303,11 +291,12 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
ced->rating = 200;
ced->cpumask = cpu_possible_mask;
ced->set_next_event = em_sti_clock_event_next;
- ced->set_mode = em_sti_clock_event_mode;
+ ced->set_state_shutdown = em_sti_clock_event_shutdown;
+ ced->set_state_oneshot = em_sti_clock_event_set_oneshot;
dev_info(&p->pdev->dev, "used for clock events\n");
- /* Register with dummy 1 Hz value, gets updated in ->set_mode() */
+ /* Register with dummy 1 Hz value, gets updated in ->set_state_oneshot() */
clockevents_config_and_register(ced, 1, 2, 0xffffffff);
}
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 9064ff743598..029f96ab131a 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -257,15 +257,14 @@ static void exynos4_mct_comp0_stop(void)
exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB);
}
-static void exynos4_mct_comp0_start(enum clock_event_mode mode,
- unsigned long cycles)
+static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles)
{
unsigned int tcon;
cycle_t comp_cycle;
tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
- if (mode == CLOCK_EVT_MODE_PERIODIC) {
+ if (periodic) {
tcon |= MCT_G_TCON_COMP0_AUTO_INC;
exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
}
@@ -283,38 +282,38 @@ static void exynos4_mct_comp0_start(enum clock_event_mode mode,
static int exynos4_comp_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
- exynos4_mct_comp0_start(evt->mode, cycles);
+ exynos4_mct_comp0_start(false, cycles);
return 0;
}
-static void exynos4_comp_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int mct_set_state_shutdown(struct clock_event_device *evt)
{
- unsigned long cycles_per_jiffy;
exynos4_mct_comp0_stop();
+ return 0;
+}
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- cycles_per_jiffy =
- (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
- exynos4_mct_comp0_start(mode, cycles_per_jiffy);
- break;
+static int mct_set_state_periodic(struct clock_event_device *evt)
+{
+ unsigned long cycles_per_jiffy;
- case CLOCK_EVT_MODE_ONESHOT:
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_RESUME:
- break;
- }
+ cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult)
+ >> evt->shift);
+ exynos4_mct_comp0_stop();
+ exynos4_mct_comp0_start(true, cycles_per_jiffy);
+ return 0;
}
static struct clock_event_device mct_comp_device = {
- .name = "mct-comp",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .rating = 250,
- .set_next_event = exynos4_comp_set_next_event,
- .set_mode = exynos4_comp_set_mode,
+ .name = "mct-comp",
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 250,
+ .set_next_event = exynos4_comp_set_next_event,
+ .set_state_periodic = mct_set_state_periodic,
+ .set_state_shutdown = mct_set_state_shutdown,
+ .set_state_oneshot = mct_set_state_shutdown,
+ .tick_resume = mct_set_state_shutdown,
};
static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id)
@@ -390,39 +389,32 @@ static int exynos4_tick_set_next_event(unsigned long cycles,
return 0;
}
-static inline void exynos4_tick_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int set_state_shutdown(struct clock_event_device *evt)
+{
+ exynos4_mct_tick_stop(this_cpu_ptr(&percpu_mct_tick));
+ return 0;
+}
+
+static int set_state_periodic(struct clock_event_device *evt)
{
struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
unsigned long cycles_per_jiffy;
+ cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult)
+ >> evt->shift);
exynos4_mct_tick_stop(mevt);
-
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- cycles_per_jiffy =
- (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
- exynos4_mct_tick_start(cycles_per_jiffy, mevt);
- break;
-
- case CLOCK_EVT_MODE_ONESHOT:
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_RESUME:
- break;
- }
+ exynos4_mct_tick_start(cycles_per_jiffy, mevt);
+ return 0;
}
static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
{
- struct clock_event_device *evt = &mevt->evt;
-
/*
* This is for supporting oneshot mode.
* Mct would generate interrupt periodically
* without explicit stopping.
*/
- if (evt->mode != CLOCK_EVT_MODE_PERIODIC)
+ if (!clockevent_state_periodic(&mevt->evt))
exynos4_mct_tick_stop(mevt);
/* Clear the MCT tick interrupt */
@@ -442,20 +434,21 @@ static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int exynos4_local_timer_setup(struct clock_event_device *evt)
+static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt)
{
- struct mct_clock_event_device *mevt;
+ struct clock_event_device *evt = &mevt->evt;
unsigned int cpu = smp_processor_id();
- mevt = container_of(evt, struct mct_clock_event_device, evt);
-
mevt->base = EXYNOS4_MCT_L_BASE(cpu);
snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
evt->name = mevt->name;
evt->cpumask = cpumask_of(cpu);
evt->set_next_event = exynos4_tick_set_next_event;
- evt->set_mode = exynos4_tick_set_mode;
+ evt->set_state_periodic = set_state_periodic;
+ evt->set_state_shutdown = set_state_shutdown;
+ evt->set_state_oneshot = set_state_shutdown;
+ evt->tick_resume = set_state_shutdown;
evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
evt->rating = 450;
@@ -477,9 +470,11 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
return 0;
}
-static void exynos4_local_timer_stop(struct clock_event_device *evt)
+static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt)
{
- evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
+ struct clock_event_device *evt = &mevt->evt;
+
+ evt->set_state_shutdown(evt);
if (mct_int_type == MCT_INT_SPI) {
if (evt->irq != -1)
disable_irq_nosync(evt->irq);
@@ -500,11 +495,11 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_STARTING:
mevt = this_cpu_ptr(&percpu_mct_tick);
- exynos4_local_timer_setup(&mevt->evt);
+ exynos4_local_timer_setup(mevt);
break;
case CPU_DYING:
mevt = this_cpu_ptr(&percpu_mct_tick);
- exynos4_local_timer_stop(&mevt->evt);
+ exynos4_local_timer_stop(mevt);
break;
}
@@ -570,7 +565,7 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
goto out_irq;
/* Immediately configure the timer on the boot CPU */
- exynos4_local_timer_setup(&mevt->evt);
+ exynos4_local_timer_setup(mevt);
return;
out_irq:
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
index 454227d4f895..ef434699c80a 100644
--- a/drivers/clocksource/fsl_ftm_timer.c
+++ b/drivers/clocksource/fsl_ftm_timer.c
@@ -153,19 +153,16 @@ static int ftm_set_next_event(unsigned long delta,
return 0;
}
-static void ftm_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int ftm_set_oneshot(struct clock_event_device *evt)
{
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- ftm_set_next_event(priv->periodic_cyc, evt);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- ftm_counter_disable(priv->clkevt_base);
- break;
- default:
- return;
- }
+ ftm_counter_disable(priv->clkevt_base);
+ return 0;
+}
+
+static int ftm_set_periodic(struct clock_event_device *evt)
+{
+ ftm_set_next_event(priv->periodic_cyc, evt);
+ return 0;
}
static irqreturn_t ftm_evt_interrupt(int irq, void *dev_id)
@@ -174,7 +171,7 @@ static irqreturn_t ftm_evt_interrupt(int irq, void *dev_id)
ftm_irq_acknowledge(priv->clkevt_base);
- if (likely(evt->mode == CLOCK_EVT_MODE_ONESHOT)) {
+ if (likely(clockevent_state_oneshot(evt))) {
ftm_irq_disable(priv->clkevt_base);
ftm_counter_disable(priv->clkevt_base);
}
@@ -185,11 +182,13 @@ static irqreturn_t ftm_evt_interrupt(int irq, void *dev_id)
}
static struct clock_event_device ftm_clockevent = {
- .name = "Freescale ftm timer",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = ftm_set_mode,
- .set_next_event = ftm_set_next_event,
- .rating = 300,
+ .name = "Freescale ftm timer",
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_periodic = ftm_set_periodic,
+ .set_state_oneshot = ftm_set_oneshot,
+ .set_next_event = ftm_set_next_event,
+ .rating = 300,
};
static struct irqaction ftm_timer_irq = {
diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c
index 0214cb3a7f5e..f9b3b7033a97 100644
--- a/drivers/clocksource/h8300_timer8.c
+++ b/drivers/clocksource/h8300_timer8.c
@@ -81,7 +81,7 @@ static irqreturn_t timer8_interrupt(int irq, void *dev_id)
p->flags |= FLAG_IRQCONTEXT;
ctrl_outw(p->tcora, p->mapbase + TCORA);
if (!(p->flags & FLAG_SKIPEVENT)) {
- if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT)
+ if (clockevent_state_oneshot(&p->ced))
ctrl_outw(0x0000, p->mapbase + _8TCR);
p->ced.event_handler(&p->ced);
}
@@ -169,29 +169,32 @@ static void timer8_clock_event_start(struct timer8_priv *p, int periodic)
timer8_set_next(p, periodic?(p->rate + HZ/2) / HZ:0x10000);
}
-static void timer8_clock_event_mode(enum clock_event_mode mode,
- struct clock_event_device *ced)
+static int timer8_clock_event_shutdown(struct clock_event_device *ced)
+{
+ timer8_stop(ced_to_priv(ced));
+ return 0;
+}
+
+static int timer8_clock_event_periodic(struct clock_event_device *ced)
{
struct timer8_priv *p = ced_to_priv(ced);
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- dev_info(&p->pdev->dev, "used for periodic clock events\n");
- timer8_stop(p);
- timer8_clock_event_start(p, PERIODIC);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- dev_info(&p->pdev->dev, "used for oneshot clock events\n");
- timer8_stop(p);
- timer8_clock_event_start(p, ONESHOT);
- break;
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
- timer8_stop(p);
- break;
- default:
- break;
- }
+ dev_info(&p->pdev->dev, "used for periodic clock events\n");
+ timer8_stop(p);
+ timer8_clock_event_start(p, PERIODIC);
+
+ return 0;
+}
+
+static int timer8_clock_event_oneshot(struct clock_event_device *ced)
+{
+ struct timer8_priv *p = ced_to_priv(ced);
+
+ dev_info(&p->pdev->dev, "used for oneshot clock events\n");
+ timer8_stop(p);
+ timer8_clock_event_start(p, ONESHOT);
+
+ return 0;
}
static int timer8_clock_event_next(unsigned long delta,
@@ -199,7 +202,7 @@ static int timer8_clock_event_next(unsigned long delta,
{
struct timer8_priv *p = ced_to_priv(ced);
- BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
+ BUG_ON(!clockevent_state_oneshot(ced));
timer8_set_next(p, delta - 1);
return 0;
@@ -246,7 +249,9 @@ static int timer8_setup(struct timer8_priv *p,
p->ced.rating = 200;
p->ced.cpumask = cpumask_of(0);
p->ced.set_next_event = timer8_clock_event_next;
- p->ced.set_mode = timer8_clock_event_mode;
+ p->ced.set_state_shutdown = timer8_clock_event_shutdown;
+ p->ced.set_state_periodic = timer8_clock_event_periodic;
+ p->ced.set_state_oneshot = timer8_clock_event_oneshot;
ret = setup_irq(irq, &p->irqaction);
if (ret < 0) {
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 14ee3efcc404..0efd36e483ab 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -100,44 +100,40 @@ int __init clocksource_i8253_init(void)
#endif
#ifdef CONFIG_CLKEVT_I8253
-/*
- * Initialize the PIT timer.
- *
- * This is also called after resume to bring the PIT into operation again.
- */
-static void init_pit_timer(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int pit_shutdown(struct clock_event_device *evt)
{
+ if (!clockevent_state_oneshot(evt) && !clockevent_state_periodic(evt))
+ return 0;
+
raw_spin_lock(&i8253_lock);
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- /* binary, mode 2, LSB/MSB, ch 0 */
- outb_p(0x34, PIT_MODE);
- outb_p(PIT_LATCH & 0xff , PIT_CH0); /* LSB */
- outb_p(PIT_LATCH >> 8 , PIT_CH0); /* MSB */
- break;
-
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
- if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
- evt->mode == CLOCK_EVT_MODE_ONESHOT) {
- outb_p(0x30, PIT_MODE);
- outb_p(0, PIT_CH0);
- outb_p(0, PIT_CH0);
- }
- break;
-
- case CLOCK_EVT_MODE_ONESHOT:
- /* One shot setup */
- outb_p(0x38, PIT_MODE);
- break;
-
- case CLOCK_EVT_MODE_RESUME:
- /* Nothing to do here */
- break;
- }
+ outb_p(0x30, PIT_MODE);
+ outb_p(0, PIT_CH0);
+ outb_p(0, PIT_CH0);
+
+ raw_spin_unlock(&i8253_lock);
+ return 0;
+}
+
+static int pit_set_oneshot(struct clock_event_device *evt)
+{
+ raw_spin_lock(&i8253_lock);
+ outb_p(0x38, PIT_MODE);
+ raw_spin_unlock(&i8253_lock);
+ return 0;
+}
+
+static int pit_set_periodic(struct clock_event_device *evt)
+{
+ raw_spin_lock(&i8253_lock);
+
+ /* binary, mode 2, LSB/MSB, ch 0 */
+ outb_p(0x34, PIT_MODE);
+ outb_p(PIT_LATCH & 0xff, PIT_CH0); /* LSB */
+ outb_p(PIT_LATCH >> 8, PIT_CH0); /* MSB */
+
raw_spin_unlock(&i8253_lock);
+ return 0;
}
/*
@@ -160,10 +156,11 @@ static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
* it can be solely used for the global tick.
*/
struct clock_event_device i8253_clockevent = {
- .name = "pit",
- .features = CLOCK_EVT_FEAT_PERIODIC,
- .set_mode = init_pit_timer,
- .set_next_event = pit_next_event,
+ .name = "pit",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .set_state_shutdown = pit_shutdown,
+ .set_state_periodic = pit_set_periodic,
+ .set_next_event = pit_next_event,
};
/*
@@ -172,8 +169,10 @@ struct clock_event_device i8253_clockevent = {
*/
void __init clockevent_i8253_init(bool oneshot)
{
- if (oneshot)
+ if (oneshot) {
i8253_clockevent.features |= CLOCK_EVT_FEAT_ONESHOT;
+ i8253_clockevent.set_state_oneshot = pit_set_oneshot;
+ }
/*
* Start pit with the boot cpu mask. x86 might make it global
* when it is used as broadcast device later.
diff --git a/drivers/clocksource/meson6_timer.c b/drivers/clocksource/meson6_timer.c
index 5c15cba41dca..1fa22c4d2d49 100644
--- a/drivers/clocksource/meson6_timer.c
+++ b/drivers/clocksource/meson6_timer.c
@@ -67,25 +67,25 @@ static void meson6_clkevt_time_start(unsigned char timer, bool periodic)
writel(val | TIMER_ENABLE_BIT(timer), timer_base + TIMER_ISA_MUX);
}
-static void meson6_clkevt_mode(enum clock_event_mode mode,
- struct clock_event_device *clk)
+static int meson6_shutdown(struct clock_event_device *evt)
{
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- meson6_clkevt_time_stop(CED_ID);
- meson6_clkevt_time_setup(CED_ID, USEC_PER_SEC/HZ - 1);
- meson6_clkevt_time_start(CED_ID, true);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- meson6_clkevt_time_stop(CED_ID);
- meson6_clkevt_time_start(CED_ID, false);
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- default:
- meson6_clkevt_time_stop(CED_ID);
- break;
- }
+ meson6_clkevt_time_stop(CED_ID);
+ return 0;
+}
+
+static int meson6_set_oneshot(struct clock_event_device *evt)
+{
+ meson6_clkevt_time_stop(CED_ID);
+ meson6_clkevt_time_start(CED_ID, false);
+ return 0;
+}
+
+static int meson6_set_periodic(struct clock_event_device *evt)
+{
+ meson6_clkevt_time_stop(CED_ID);
+ meson6_clkevt_time_setup(CED_ID, USEC_PER_SEC / HZ - 1);
+ meson6_clkevt_time_start(CED_ID, true);
+ return 0;
}
static int meson6_clkevt_next_event(unsigned long evt,
@@ -99,11 +99,15 @@ static int meson6_clkevt_next_event(unsigned long evt,
}
static struct clock_event_device meson6_clockevent = {
- .name = "meson6_tick",
- .rating = 400,
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = meson6_clkevt_mode,
- .set_next_event = meson6_clkevt_next_event,
+ .name = "meson6_tick",
+ .rating = 400,
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_shutdown = meson6_shutdown,
+ .set_state_periodic = meson6_set_periodic,
+ .set_state_oneshot = meson6_set_oneshot,
+ .tick_resume = meson6_shutdown,
+ .set_next_event = meson6_clkevt_next_event,
};
static irqreturn_t meson6_timer_interrupt(int irq, void *dev_id)
diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
index b7384b853e5a..bcd5c0d602a0 100644
--- a/drivers/clocksource/metag_generic.c
+++ b/drivers/clocksource/metag_generic.c
@@ -56,25 +56,6 @@ static int metag_timer_set_next_event(unsigned long delta,
return 0;
}
-static void metag_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
- switch (mode) {
- case CLOCK_EVT_MODE_ONESHOT:
- case CLOCK_EVT_MODE_RESUME:
- break;
-
- case CLOCK_EVT_MODE_SHUTDOWN:
- /* We should disable the IRQ here */
- break;
-
- case CLOCK_EVT_MODE_PERIODIC:
- case CLOCK_EVT_MODE_UNUSED:
- WARN_ON(1);
- break;
- };
-}
-
static cycle_t metag_clocksource_read(struct clocksource *cs)
{
return __core_reg_get(TXTIMER);
@@ -129,7 +110,6 @@ static void arch_timer_setup(unsigned int cpu)
clk->rating = 200,
clk->shift = 12,
clk->irq = tbisig_map(TBID_SIGNUM_TRT),
- clk->set_mode = metag_timer_set_mode,
clk->set_next_event = metag_timer_set_next_event,
clk->mult = div_sc(hwtimer_freq, NSEC_PER_SEC, clk->shift);
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index b81ed1a5342d..02a1945e5093 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -33,12 +33,6 @@ static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
return res;
}
-static void gic_set_clock_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
- /* Nothing to do ... */
-}
-
static irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
{
struct clock_event_device *cd = dev_id;
@@ -67,7 +61,6 @@ static void gic_clockevent_cpu_init(struct clock_event_device *cd)
cd->irq = gic_timer_irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = gic_next_event;
- cd->set_mode = gic_set_clock_mode;
clockevents_config_and_register(cd, gic_frequency, 0x300, 0x7fffffff);
@@ -79,6 +72,13 @@ static void gic_clockevent_cpu_exit(struct clock_event_device *cd)
disable_percpu_irq(gic_timer_irq);
}
+static void gic_update_frequency(void *data)
+{
+ unsigned long rate = (unsigned long)data;
+
+ clockevents_update_freq(this_cpu_ptr(&gic_clockevent_device), rate);
+}
+
static int gic_cpu_notifier(struct notifier_block *nb, unsigned long action,
void *data)
{
@@ -94,18 +94,40 @@ static int gic_cpu_notifier(struct notifier_block *nb, unsigned long action,
return NOTIFY_OK;
}
+static int gic_clk_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct clk_notifier_data *cnd = data;
+
+ if (action == POST_RATE_CHANGE)
+ on_each_cpu(gic_update_frequency, (void *)cnd->new_rate, 1);
+
+ return NOTIFY_OK;
+}
+
+
static struct notifier_block gic_cpu_nb = {
.notifier_call = gic_cpu_notifier,
};
+static struct notifier_block gic_clk_nb = {
+ .notifier_call = gic_clk_notifier,
+};
+
static int gic_clockevent_init(void)
{
+ int ret;
+
if (!cpu_has_counter || !gic_frequency)
return -ENXIO;
- setup_percpu_irq(gic_timer_irq, &gic_compare_irqaction);
+ ret = setup_percpu_irq(gic_timer_irq, &gic_compare_irqaction);
+ if (ret < 0)
+ return ret;
- register_cpu_notifier(&gic_cpu_nb);
+ ret = register_cpu_notifier(&gic_cpu_nb);
+ if (ret < 0)
+ pr_warn("GIC: Unable to register CPU notifier\n");
gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device));
@@ -125,18 +147,17 @@ static struct clocksource gic_clocksource = {
static void __init __gic_clocksource_init(void)
{
+ int ret;
+
/* Set clocksource mask. */
gic_clocksource.mask = CLOCKSOURCE_MASK(gic_get_count_width());
/* Calculate a somewhat reasonable rating value. */
gic_clocksource.rating = 200 + gic_frequency / 10000000;
- clocksource_register_hz(&gic_clocksource, gic_frequency);
-
- gic_clockevent_init();
-
- /* And finally start the counter */
- gic_start_count();
+ ret = clocksource_register_hz(&gic_clocksource, gic_frequency);
+ if (ret < 0)
+ pr_warn("GIC: Unable to register clocksource\n");
}
void __init gic_clocksource_init(unsigned int frequency)
@@ -146,11 +167,16 @@ void __init gic_clocksource_init(unsigned int frequency)
GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_COMPARE);
__gic_clocksource_init();
+ gic_clockevent_init();
+
+ /* And finally start the counter */
+ gic_start_count();
}
static void __init gic_clocksource_of_init(struct device_node *node)
{
struct clk *clk;
+ int ret;
if (WARN_ON(!gic_present || !node->parent ||
!of_device_is_compatible(node->parent, "mti,gic")))
@@ -158,8 +184,13 @@ static void __init gic_clocksource_of_init(struct device_node *node)
clk = of_clk_get(node, 0);
if (!IS_ERR(clk)) {
+ if (clk_prepare_enable(clk) < 0) {
+ pr_err("GIC failed to enable clock\n");
+ clk_put(clk);
+ return;
+ }
+
gic_frequency = clk_get_rate(clk);
- clk_put(clk);
} else if (of_property_read_u32(node, "clock-frequency",
&gic_frequency)) {
pr_err("GIC frequency not specified.\n");
@@ -172,6 +203,15 @@ static void __init gic_clocksource_of_init(struct device_node *node)
}
__gic_clocksource_init();
+
+ ret = gic_clockevent_init();
+ if (!ret && !IS_ERR(clk)) {
+ if (clk_notifier_register(clk, &gic_clk_nb) < 0)
+ pr_warn("GIC: Unable to register clock notifier\n");
+ }
+
+ /* And finally start the counter */
+ gic_start_count();
}
CLOCKSOURCE_OF_DECLARE(mips_gic_timer, "mti,gic-timer",
gic_clocksource_of_init);
diff --git a/drivers/clocksource/moxart_timer.c b/drivers/clocksource/moxart_timer.c
index 5eb2c35932b1..19857af651c1 100644
--- a/drivers/clocksource/moxart_timer.c
+++ b/drivers/clocksource/moxart_timer.c
@@ -58,25 +58,24 @@
static void __iomem *base;
static unsigned int clock_count_per_tick;
-static void moxart_clkevt_mode(enum clock_event_mode mode,
- struct clock_event_device *clk)
+static int moxart_shutdown(struct clock_event_device *evt)
{
- switch (mode) {
- case CLOCK_EVT_MODE_RESUME:
- case CLOCK_EVT_MODE_ONESHOT:
- writel(TIMER1_DISABLE, base + TIMER_CR);
- writel(~0, base + TIMER1_BASE + REG_LOAD);
- break;
- case CLOCK_EVT_MODE_PERIODIC:
- writel(clock_count_per_tick, base + TIMER1_BASE + REG_LOAD);
- writel(TIMER1_ENABLE, base + TIMER_CR);
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- default:
- writel(TIMER1_DISABLE, base + TIMER_CR);
- break;
- }
+ writel(TIMER1_DISABLE, base + TIMER_CR);
+ return 0;
+}
+
+static int moxart_set_oneshot(struct clock_event_device *evt)
+{
+ writel(TIMER1_DISABLE, base + TIMER_CR);
+ writel(~0, base + TIMER1_BASE + REG_LOAD);
+ return 0;
+}
+
+static int moxart_set_periodic(struct clock_event_device *evt)
+{
+ writel(clock_count_per_tick, base + TIMER1_BASE + REG_LOAD);
+ writel(TIMER1_ENABLE, base + TIMER_CR);
+ return 0;
}
static int moxart_clkevt_next_event(unsigned long cycles,
@@ -95,11 +94,15 @@ static int moxart_clkevt_next_event(unsigned long cycles,
}
static struct clock_event_device moxart_clockevent = {
- .name = "moxart_timer",
- .rating = 200,
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = moxart_clkevt_mode,
- .set_next_event = moxart_clkevt_next_event,
+ .name = "moxart_timer",
+ .rating = 200,
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_shutdown = moxart_shutdown,
+ .set_state_periodic = moxart_set_periodic,
+ .set_state_oneshot = moxart_set_oneshot,
+ .tick_resume = moxart_set_oneshot,
+ .set_next_event = moxart_clkevt_next_event,
};
static irqreturn_t moxart_timer_interrupt(int irq, void *dev_id)
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
index 68ab42356d0e..50f0641c65b6 100644
--- a/drivers/clocksource/mtk_timer.c
+++ b/drivers/clocksource/mtk_timer.c
@@ -102,27 +102,20 @@ static void mtk_clkevt_time_start(struct mtk_clock_event_device *evt,
evt->gpt_base + TIMER_CTRL_REG(timer));
}
-static void mtk_clkevt_mode(enum clock_event_mode mode,
- struct clock_event_device *clk)
+static int mtk_clkevt_shutdown(struct clock_event_device *clk)
+{
+ mtk_clkevt_time_stop(to_mtk_clk(clk), GPT_CLK_EVT);
+ return 0;
+}
+
+static int mtk_clkevt_set_periodic(struct clock_event_device *clk)
{
struct mtk_clock_event_device *evt = to_mtk_clk(clk);
mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
-
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- mtk_clkevt_time_setup(evt, evt->ticks_per_jiffy, GPT_CLK_EVT);
- mtk_clkevt_time_start(evt, true, GPT_CLK_EVT);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- /* Timer is enabled in set_next_event */
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- default:
- /* No more interrupts will occur as source is disabled */
- break;
- }
+ mtk_clkevt_time_setup(evt, evt->ticks_per_jiffy, GPT_CLK_EVT);
+ mtk_clkevt_time_start(evt, true, GPT_CLK_EVT);
+ return 0;
}
static int mtk_clkevt_next_event(unsigned long event,
@@ -196,7 +189,10 @@ static void __init mtk_timer_init(struct device_node *node)
evt->dev.name = "mtk_tick";
evt->dev.rating = 300;
evt->dev.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
- evt->dev.set_mode = mtk_clkevt_mode;
+ evt->dev.set_state_shutdown = mtk_clkevt_shutdown;
+ evt->dev.set_state_periodic = mtk_clkevt_set_periodic;
+ evt->dev.set_state_oneshot = mtk_clkevt_shutdown;
+ evt->dev.tick_resume = mtk_clkevt_shutdown;
evt->dev.set_next_event = mtk_clkevt_next_event;
evt->dev.cpumask = cpu_possible_mask;
diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
index 445b68a01dc5..f5ce2961c0d6 100644
--- a/drivers/clocksource/mxs_timer.c
+++ b/drivers/clocksource/mxs_timer.c
@@ -77,7 +77,6 @@
#define BV_TIMROTv2_TIMCTRLn_SELECT__TICK_ALWAYS 0xf
static struct clock_event_device mxs_clockevent_device;
-static enum clock_event_mode mxs_clockevent_mode = CLOCK_EVT_MODE_UNUSED;
static void __iomem *mxs_timrot_base;
static u32 timrot_major_version;
@@ -141,64 +140,49 @@ static struct irqaction mxs_timer_irq = {
.handler = mxs_timer_interrupt,
};
-#ifdef DEBUG
-static const char *clock_event_mode_label[] const = {
- [CLOCK_EVT_MODE_PERIODIC] = "CLOCK_EVT_MODE_PERIODIC",
- [CLOCK_EVT_MODE_ONESHOT] = "CLOCK_EVT_MODE_ONESHOT",
- [CLOCK_EVT_MODE_SHUTDOWN] = "CLOCK_EVT_MODE_SHUTDOWN",
- [CLOCK_EVT_MODE_UNUSED] = "CLOCK_EVT_MODE_UNUSED"
-};
-#endif /* DEBUG */
-
-static void mxs_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static void mxs_irq_clear(char *state)
{
/* Disable interrupt in timer module */
timrot_irq_disable();
- if (mode != mxs_clockevent_mode) {
- /* Set event time into the furthest future */
- if (timrot_is_v1())
- __raw_writel(0xffff,
- mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1));
- else
- __raw_writel(0xffffffff,
- mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1));
-
- /* Clear pending interrupt */
- timrot_irq_acknowledge();
- }
+ /* Set event time into the furthest future */
+ if (timrot_is_v1())
+ __raw_writel(0xffff, mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1));
+ else
+ __raw_writel(0xffffffff,
+ mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1));
+
+ /* Clear pending interrupt */
+ timrot_irq_acknowledge();
#ifdef DEBUG
- pr_info("%s: changing mode from %s to %s\n", __func__,
- clock_event_mode_label[mxs_clockevent_mode],
- clock_event_mode_label[mode]);
+ pr_info("%s: changing mode to %s\n", __func__, state)
#endif /* DEBUG */
+}
- /* Remember timer mode */
- mxs_clockevent_mode = mode;
-
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- pr_err("%s: Periodic mode is not implemented\n", __func__);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- timrot_irq_enable();
- break;
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_RESUME:
- /* Left event sources disabled, no more interrupts appear */
- break;
- }
+static int mxs_shutdown(struct clock_event_device *evt)
+{
+ mxs_irq_clear("shutdown");
+
+ return 0;
+}
+
+static int mxs_set_oneshot(struct clock_event_device *evt)
+{
+ if (clockevent_state_oneshot(evt))
+ mxs_irq_clear("oneshot");
+ timrot_irq_enable();
+ return 0;
}
static struct clock_event_device mxs_clockevent_device = {
- .name = "mxs_timrot",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = mxs_set_mode,
- .set_next_event = timrotv2_set_next_event,
- .rating = 200,
+ .name = "mxs_timrot",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_shutdown = mxs_shutdown,
+ .set_state_oneshot = mxs_set_oneshot,
+ .tick_resume = mxs_shutdown,
+ .set_next_event = timrotv2_set_next_event,
+ .rating = 200,
};
static int __init mxs_clockevent_init(struct clk *timer_clk)
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index a709cfa49d85..bc8dd443c727 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -119,28 +119,27 @@ static void nmdk_clkevt_reset(void)
}
}
-static void nmdk_clkevt_mode(enum clock_event_mode mode,
- struct clock_event_device *dev)
+static int nmdk_clkevt_shutdown(struct clock_event_device *evt)
{
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- clkevt_periodic = true;
- nmdk_clkevt_reset();
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- clkevt_periodic = false;
- break;
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
- writel(0, mtu_base + MTU_IMSC);
- /* disable timer */
- writel(0, mtu_base + MTU_CR(1));
- /* load some high default value */
- writel(0xffffffff, mtu_base + MTU_LR(1));
- break;
- case CLOCK_EVT_MODE_RESUME:
- break;
- }
+ writel(0, mtu_base + MTU_IMSC);
+ /* disable timer */
+ writel(0, mtu_base + MTU_CR(1));
+ /* load some high default value */
+ writel(0xffffffff, mtu_base + MTU_LR(1));
+ return 0;
+}
+
+static int nmdk_clkevt_set_oneshot(struct clock_event_device *evt)
+{
+ clkevt_periodic = false;
+ return 0;
+}
+
+static int nmdk_clkevt_set_periodic(struct clock_event_device *evt)
+{
+ clkevt_periodic = true;
+ nmdk_clkevt_reset();
+ return 0;
}
static void nmdk_clksrc_reset(void)
@@ -163,13 +162,16 @@ static void nmdk_clkevt_resume(struct clock_event_device *cedev)
}
static struct clock_event_device nmdk_clkevt = {
- .name = "mtu_1",
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_DYNIRQ,
- .rating = 200,
- .set_mode = nmdk_clkevt_mode,
- .set_next_event = nmdk_clkevt_next,
- .resume = nmdk_clkevt_resume,
+ .name = "mtu_1",
+ .features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_DYNIRQ,
+ .rating = 200,
+ .set_state_shutdown = nmdk_clkevt_shutdown,
+ .set_state_periodic = nmdk_clkevt_set_periodic,
+ .set_state_oneshot = nmdk_clkevt_set_oneshot,
+ .set_next_event = nmdk_clkevt_next,
+ .resume = nmdk_clkevt_resume,
};
/*
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
index d9438af2bbd6..45b6a4999713 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/pxa_timer.c
@@ -88,26 +88,12 @@ pxa_osmr0_set_next_event(unsigned long delta, struct clock_event_device *dev)
return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0;
}
-static void
-pxa_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *dev)
+static int pxa_osmr0_shutdown(struct clock_event_device *evt)
{
- switch (mode) {
- case CLOCK_EVT_MODE_ONESHOT:
- timer_writel(timer_readl(OIER) & ~OIER_E0, OIER);
- timer_writel(OSSR_M0, OSSR);
- break;
-
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- /* initializing, released, or preparing for suspend */
- timer_writel(timer_readl(OIER) & ~OIER_E0, OIER);
- timer_writel(OSSR_M0, OSSR);
- break;
-
- case CLOCK_EVT_MODE_RESUME:
- case CLOCK_EVT_MODE_PERIODIC:
- break;
- }
+ /* initializing, released, or preparing for suspend */
+ timer_writel(timer_readl(OIER) & ~OIER_E0, OIER);
+ timer_writel(OSSR_M0, OSSR);
+ return 0;
}
#ifdef CONFIG_PM
@@ -147,13 +133,14 @@ static void pxa_timer_resume(struct clock_event_device *cedev)
#endif
static struct clock_event_device ckevt_pxa_osmr0 = {
- .name = "osmr0",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .rating = 200,
- .set_next_event = pxa_osmr0_set_next_event,
- .set_mode = pxa_osmr0_set_mode,
- .suspend = pxa_timer_suspend,
- .resume = pxa_timer_resume,
+ .name = "osmr0",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 200,
+ .set_next_event = pxa_osmr0_set_next_event,
+ .set_state_shutdown = pxa_osmr0_shutdown,
+ .set_state_oneshot = pxa_osmr0_shutdown,
+ .suspend = pxa_timer_suspend,
+ .resume = pxa_timer_resume,
};
static struct irqaction pxa_ost0_irq = {
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c
index cba2d015564c..f8e09f923651 100644
--- a/drivers/clocksource/qcom-timer.c
+++ b/drivers/clocksource/qcom-timer.c
@@ -47,7 +47,7 @@ static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
/* Stop the timer tick */
- if (evt->mode == CLOCK_EVT_MODE_ONESHOT) {
+ if (clockevent_state_oneshot(evt)) {
u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
ctrl &= ~TIMER_ENABLE_EN;
writel_relaxed(ctrl, event_base + TIMER_ENABLE);
@@ -75,26 +75,14 @@ static int msm_timer_set_next_event(unsigned long cycles,
return 0;
}
-static void msm_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int msm_timer_shutdown(struct clock_event_device *evt)
{
u32 ctrl;
ctrl = readl_relaxed(event_base + TIMER_ENABLE);
ctrl &= ~(TIMER_ENABLE_EN | TIMER_ENABLE_CLR_ON_MATCH_EN);
-
- switch (mode) {
- case CLOCK_EVT_MODE_RESUME:
- case CLOCK_EVT_MODE_PERIODIC:
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- /* Timer is enabled in set_next_event */
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- break;
- }
writel_relaxed(ctrl, event_base + TIMER_ENABLE);
+ return 0;
}
static struct clock_event_device __percpu *msm_evt;
@@ -126,7 +114,9 @@ static int msm_local_timer_setup(struct clock_event_device *evt)
evt->name = "msm_timer";
evt->features = CLOCK_EVT_FEAT_ONESHOT;
evt->rating = 200;
- evt->set_mode = msm_timer_set_mode;
+ evt->set_state_shutdown = msm_timer_shutdown;
+ evt->set_state_oneshot = msm_timer_shutdown;
+ evt->tick_resume = msm_timer_shutdown;
evt->set_next_event = msm_timer_set_next_event;
evt->cpumask = cpumask_of(cpu);
@@ -147,7 +137,7 @@ static int msm_local_timer_setup(struct clock_event_device *evt)
static void msm_local_timer_stop(struct clock_event_device *evt)
{
- evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
+ evt->set_state_shutdown(evt);
disable_percpu_irq(evt->irq);
}
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c
index a35993bafb20..bb2c2b050964 100644
--- a/drivers/clocksource/rockchip_timer.c
+++ b/drivers/clocksource/rockchip_timer.c
@@ -82,23 +82,18 @@ static inline int rk_timer_set_next_event(unsigned long cycles,
return 0;
}
-static inline void rk_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *ce)
+static int rk_timer_shutdown(struct clock_event_device *ce)
{
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- rk_timer_disable(ce);
- rk_timer_update_counter(rk_timer(ce)->freq / HZ - 1, ce);
- rk_timer_enable(ce, TIMER_MODE_FREE_RUNNING);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- case CLOCK_EVT_MODE_RESUME:
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- rk_timer_disable(ce);
- break;
- }
+ rk_timer_disable(ce);
+ return 0;
+}
+
+static int rk_timer_set_periodic(struct clock_event_device *ce)
+{
+ rk_timer_disable(ce);
+ rk_timer_update_counter(rk_timer(ce)->freq / HZ - 1, ce);
+ rk_timer_enable(ce, TIMER_MODE_FREE_RUNNING);
+ return 0;
}
static irqreturn_t rk_timer_interrupt(int irq, void *dev_id)
@@ -107,7 +102,7 @@ static irqreturn_t rk_timer_interrupt(int irq, void *dev_id)
rk_timer_interrupt_clear(ce);
- if (ce->mode == CLOCK_EVT_MODE_ONESHOT)
+ if (clockevent_state_oneshot(ce))
rk_timer_disable(ce);
ce->event_handler(ce);
@@ -161,7 +156,8 @@ static void __init rk_timer_init(struct device_node *np)
ce->name = TIMER_NAME;
ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
ce->set_next_event = rk_timer_set_next_event;
- ce->set_mode = rk_timer_set_mode;
+ ce->set_state_shutdown = rk_timer_shutdown;
+ ce->set_state_periodic = rk_timer_set_periodic;
ce->irq = irq;
ce->cpumask = cpumask_of(0);
ce->rating = 250;
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index 5645cfc90c41..bc90e13338cc 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -207,25 +207,18 @@ static int samsung_set_next_event(unsigned long cycles,
return 0;
}
-static void samsung_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int samsung_shutdown(struct clock_event_device *evt)
{
samsung_time_stop(pwm.event_id);
+ return 0;
+}
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- samsung_time_setup(pwm.event_id, pwm.clock_count_per_tick - 1);
- samsung_time_start(pwm.event_id, true);
- break;
-
- case CLOCK_EVT_MODE_ONESHOT:
- break;
-
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_RESUME:
- break;
- }
+static int samsung_set_periodic(struct clock_event_device *evt)
+{
+ samsung_time_stop(pwm.event_id);
+ samsung_time_setup(pwm.event_id, pwm.clock_count_per_tick - 1);
+ samsung_time_start(pwm.event_id, true);
+ return 0;
}
static void samsung_clockevent_resume(struct clock_event_device *cev)
@@ -240,12 +233,16 @@ static void samsung_clockevent_resume(struct clock_event_device *cev)
}
static struct clock_event_device time_event_device = {
- .name = "samsung_event_timer",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .rating = 200,
- .set_next_event = samsung_set_next_event,
- .set_mode = samsung_set_mode,
- .resume = samsung_clockevent_resume,
+ .name = "samsung_event_timer",
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 200,
+ .set_next_event = samsung_set_next_event,
+ .set_state_shutdown = samsung_shutdown,
+ .set_state_periodic = samsung_set_periodic,
+ .set_state_oneshot = samsung_shutdown,
+ .tick_resume = samsung_shutdown,
+ .resume = samsung_clockevent_resume,
};
static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id)
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index b8ff3c64cc45..ba73a6eb8d66 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -538,7 +538,7 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
if (ch->flags & FLAG_CLOCKEVENT) {
if (!(ch->flags & FLAG_SKIPEVENT)) {
- if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) {
+ if (clockevent_state_oneshot(&ch->ced)) {
ch->next_match_value = ch->max_match_value;
ch->flags |= FLAG_REPROGRAM;
}
@@ -554,7 +554,7 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
sh_cmt_clock_event_program_verify(ch, 1);
if (ch->flags & FLAG_CLOCKEVENT)
- if ((ch->ced.mode == CLOCK_EVT_MODE_SHUTDOWN)
+ if ((clockevent_state_shutdown(&ch->ced))
|| (ch->match_value == ch->next_match_value))
ch->flags &= ~FLAG_REPROGRAM;
}
@@ -661,6 +661,9 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs)
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
+ if (!ch->cs_enabled)
+ return;
+
sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
}
@@ -669,6 +672,9 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs)
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
+ if (!ch->cs_enabled)
+ return;
+
pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
sh_cmt_start(ch, FLAG_CLOCKSOURCE);
}
@@ -720,39 +726,37 @@ static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
sh_cmt_set_next(ch, ch->max_match_value);
}
-static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
- struct clock_event_device *ced)
+static int sh_cmt_clock_event_shutdown(struct clock_event_device *ced)
+{
+ struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
+
+ sh_cmt_stop(ch, FLAG_CLOCKEVENT);
+ return 0;
+}
+
+static int sh_cmt_clock_event_set_state(struct clock_event_device *ced,
+ int periodic)
{
struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
/* deal with old setting first */
- switch (ced->mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- case CLOCK_EVT_MODE_ONESHOT:
+ if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
sh_cmt_stop(ch, FLAG_CLOCKEVENT);
- break;
- default:
- break;
- }
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- dev_info(&ch->cmt->pdev->dev,
- "ch%u: used for periodic clock events\n", ch->index);
- sh_cmt_clock_event_start(ch, 1);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- dev_info(&ch->cmt->pdev->dev,
- "ch%u: used for oneshot clock events\n", ch->index);
- sh_cmt_clock_event_start(ch, 0);
- break;
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
- sh_cmt_stop(ch, FLAG_CLOCKEVENT);
- break;
- default:
- break;
- }
+ dev_info(&ch->cmt->pdev->dev, "ch%u: used for %s clock events\n",
+ ch->index, periodic ? "periodic" : "oneshot");
+ sh_cmt_clock_event_start(ch, periodic);
+ return 0;
+}
+
+static int sh_cmt_clock_event_set_oneshot(struct clock_event_device *ced)
+{
+ return sh_cmt_clock_event_set_state(ced, 0);
+}
+
+static int sh_cmt_clock_event_set_periodic(struct clock_event_device *ced)
+{
+ return sh_cmt_clock_event_set_state(ced, 1);
}
static int sh_cmt_clock_event_next(unsigned long delta,
@@ -760,7 +764,7 @@ static int sh_cmt_clock_event_next(unsigned long delta,
{
struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
- BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
+ BUG_ON(!clockevent_state_oneshot(ced));
if (likely(ch->flags & FLAG_IRQCONTEXT))
ch->next_match_value = delta - 1;
else
@@ -814,7 +818,9 @@ static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
ced->rating = 125;
ced->cpumask = cpu_possible_mask;
ced->set_next_event = sh_cmt_clock_event_next;
- ced->set_mode = sh_cmt_clock_event_mode;
+ ced->set_state_shutdown = sh_cmt_clock_event_shutdown;
+ ced->set_state_periodic = sh_cmt_clock_event_set_periodic;
+ ced->set_state_oneshot = sh_cmt_clock_event_set_oneshot;
ced->suspend = sh_cmt_clock_event_suspend;
ced->resume = sh_cmt_clock_event_resume;
@@ -929,9 +935,6 @@ static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
static const struct platform_device_id sh_cmt_id_table[] = {
{ "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
{ "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
- { "sh-cmt-32-fast", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT_FAST] },
- { "sh-cmt-48", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT] },
- { "sh-cmt-48-gen2", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT_GEN2] },
{ }
};
MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 3d88698cf2b8..f1985da8113f 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -276,36 +276,25 @@ static struct sh_mtu2_channel *ced_to_sh_mtu2(struct clock_event_device *ced)
return container_of(ced, struct sh_mtu2_channel, ced);
}
-static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
- struct clock_event_device *ced)
+static int sh_mtu2_clock_event_shutdown(struct clock_event_device *ced)
{
struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
- int disabled = 0;
- /* deal with old setting first */
- switch (ced->mode) {
- case CLOCK_EVT_MODE_PERIODIC:
+ sh_mtu2_disable(ch);
+ return 0;
+}
+
+static int sh_mtu2_clock_event_set_periodic(struct clock_event_device *ced)
+{
+ struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
+
+ if (clockevent_state_periodic(ced))
sh_mtu2_disable(ch);
- disabled = 1;
- break;
- default:
- break;
- }
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- dev_info(&ch->mtu->pdev->dev,
- "ch%u: used for periodic clock events\n", ch->index);
- sh_mtu2_enable(ch);
- break;
- case CLOCK_EVT_MODE_UNUSED:
- if (!disabled)
- sh_mtu2_disable(ch);
- break;
- case CLOCK_EVT_MODE_SHUTDOWN:
- default:
- break;
- }
+ dev_info(&ch->mtu->pdev->dev, "ch%u: used for periodic clock events\n",
+ ch->index);
+ sh_mtu2_enable(ch);
+ return 0;
}
static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)
@@ -327,7 +316,8 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch,
ced->features = CLOCK_EVT_FEAT_PERIODIC;
ced->rating = 200;
ced->cpumask = cpu_possible_mask;
- ced->set_mode = sh_mtu2_clock_event_mode;
+ ced->set_state_shutdown = sh_mtu2_clock_event_shutdown;
+ ced->set_state_periodic = sh_mtu2_clock_event_set_periodic;
ced->suspend = sh_mtu2_clock_event_suspend;
ced->resume = sh_mtu2_clock_event_resume;
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index b6b8fa3cd211..469e776ec17a 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -240,7 +240,7 @@ static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
struct sh_tmu_channel *ch = dev_id;
/* disable or acknowledge interrupt */
- if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT)
+ if (clockevent_state_oneshot(&ch->ced))
sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
else
sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
@@ -358,42 +358,38 @@ static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)
}
}
-static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
- struct clock_event_device *ced)
+static int sh_tmu_clock_event_shutdown(struct clock_event_device *ced)
+{
+ struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
+
+ if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
+ sh_tmu_disable(ch);
+ return 0;
+}
+
+static int sh_tmu_clock_event_set_state(struct clock_event_device *ced,
+ int periodic)
{
struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
- int disabled = 0;
/* deal with old setting first */
- switch (ced->mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- case CLOCK_EVT_MODE_ONESHOT:
+ if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
sh_tmu_disable(ch);
- disabled = 1;
- break;
- default:
- break;
- }
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- dev_info(&ch->tmu->pdev->dev,
- "ch%u: used for periodic clock events\n", ch->index);
- sh_tmu_clock_event_start(ch, 1);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- dev_info(&ch->tmu->pdev->dev,
- "ch%u: used for oneshot clock events\n", ch->index);
- sh_tmu_clock_event_start(ch, 0);
- break;
- case CLOCK_EVT_MODE_UNUSED:
- if (!disabled)
- sh_tmu_disable(ch);
- break;
- case CLOCK_EVT_MODE_SHUTDOWN:
- default:
- break;
- }
+ dev_info(&ch->tmu->pdev->dev, "ch%u: used for %s clock events\n",
+ ch->index, periodic ? "periodic" : "oneshot");
+ sh_tmu_clock_event_start(ch, periodic);
+ return 0;
+}
+
+static int sh_tmu_clock_event_set_oneshot(struct clock_event_device *ced)
+{
+ return sh_tmu_clock_event_set_state(ced, 0);
+}
+
+static int sh_tmu_clock_event_set_periodic(struct clock_event_device *ced)
+{
+ return sh_tmu_clock_event_set_state(ced, 1);
}
static int sh_tmu_clock_event_next(unsigned long delta,
@@ -401,7 +397,7 @@ static int sh_tmu_clock_event_next(unsigned long delta,
{
struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
- BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
+ BUG_ON(!clockevent_state_oneshot(ced));
/* program new delta value */
sh_tmu_set_next(ch, delta, 0);
@@ -430,7 +426,9 @@ static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
ced->rating = 200;
ced->cpumask = cpu_possible_mask;
ced->set_next_event = sh_tmu_clock_event_next;
- ced->set_mode = sh_tmu_clock_event_mode;
+ ced->set_state_shutdown = sh_tmu_clock_event_shutdown;
+ ced->set_state_periodic = sh_tmu_clock_event_set_periodic;
+ ced->set_state_oneshot = sh_tmu_clock_event_set_oneshot;
ced->suspend = sh_tmu_clock_event_suspend;
ced->resume = sh_tmu_clock_event_resume;
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index 1928a8912584..6f3719d73390 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -81,25 +81,25 @@ static void sun4i_clkevt_time_start(u8 timer, bool periodic)
timer_base + TIMER_CTL_REG(timer));
}
-static void sun4i_clkevt_mode(enum clock_event_mode mode,
- struct clock_event_device *clk)
+static int sun4i_clkevt_shutdown(struct clock_event_device *evt)
{
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- sun4i_clkevt_time_stop(0);
- sun4i_clkevt_time_setup(0, ticks_per_jiffy);
- sun4i_clkevt_time_start(0, true);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- sun4i_clkevt_time_stop(0);
- sun4i_clkevt_time_start(0, false);
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- default:
- sun4i_clkevt_time_stop(0);
- break;
- }
+ sun4i_clkevt_time_stop(0);
+ return 0;
+}
+
+static int sun4i_clkevt_set_oneshot(struct clock_event_device *evt)
+{
+ sun4i_clkevt_time_stop(0);
+ sun4i_clkevt_time_start(0, false);
+ return 0;
+}
+
+static int sun4i_clkevt_set_periodic(struct clock_event_device *evt)
+{
+ sun4i_clkevt_time_stop(0);
+ sun4i_clkevt_time_setup(0, ticks_per_jiffy);
+ sun4i_clkevt_time_start(0, true);
+ return 0;
}
static int sun4i_clkevt_next_event(unsigned long evt,
@@ -116,7 +116,10 @@ static struct clock_event_device sun4i_clockevent = {
.name = "sun4i_tick",
.rating = 350,
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = sun4i_clkevt_mode,
+ .set_state_shutdown = sun4i_clkevt_shutdown,
+ .set_state_periodic = sun4i_clkevt_set_periodic,
+ .set_state_oneshot = sun4i_clkevt_set_oneshot,
+ .tick_resume = sun4i_clkevt_shutdown,
.set_next_event = sun4i_clkevt_next_event,
};
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 8bdbc45c6dad..d28d2fe798d5 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -91,55 +91,62 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
*/
static u32 timer_clock;
-static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
+static int tc_shutdown(struct clock_event_device *d)
{
struct tc_clkevt_device *tcd = to_tc_clkevt(d);
void __iomem *regs = tcd->regs;
- if (tcd->clkevt.mode == CLOCK_EVT_MODE_PERIODIC
- || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
- __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
- __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
- clk_disable(tcd->clk);
- }
+ __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
+ __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
+ clk_disable(tcd->clk);
- switch (m) {
+ return 0;
+}
- /* By not making the gentime core emulate periodic mode on top
- * of oneshot, we get lower overhead and improved accuracy.
- */
- case CLOCK_EVT_MODE_PERIODIC:
- clk_enable(tcd->clk);
+static int tc_set_oneshot(struct clock_event_device *d)
+{
+ struct tc_clkevt_device *tcd = to_tc_clkevt(d);
+ void __iomem *regs = tcd->regs;
- /* slow clock, count up to RC, then irq and restart */
- __raw_writel(timer_clock
- | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
- regs + ATMEL_TC_REG(2, CMR));
- __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
+ if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
+ tc_shutdown(d);
- /* Enable clock and interrupts on RC compare */
- __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
+ clk_enable(tcd->clk);
- /* go go gadget! */
- __raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
- regs + ATMEL_TC_REG(2, CCR));
- break;
+ /* slow clock, count up to RC, then irq and stop */
+ __raw_writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
+ ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
+ __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
- case CLOCK_EVT_MODE_ONESHOT:
- clk_enable(tcd->clk);
+ /* set_next_event() configures and starts the timer */
+ return 0;
+}
- /* slow clock, count up to RC, then irq and stop */
- __raw_writel(timer_clock | ATMEL_TC_CPCSTOP
- | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
- regs + ATMEL_TC_REG(2, CMR));
- __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
+static int tc_set_periodic(struct clock_event_device *d)
+{
+ struct tc_clkevt_device *tcd = to_tc_clkevt(d);
+ void __iomem *regs = tcd->regs;
- /* set_next_event() configures and starts the timer */
- break;
+ if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
+ tc_shutdown(d);
- default:
- break;
- }
+ /* By not making the gentime core emulate periodic mode on top
+ * of oneshot, we get lower overhead and improved accuracy.
+ */
+ clk_enable(tcd->clk);
+
+ /* slow clock, count up to RC, then irq and restart */
+ __raw_writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
+ regs + ATMEL_TC_REG(2, CMR));
+ __raw_writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
+
+ /* Enable clock and interrupts on RC compare */
+ __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
+
+ /* go go gadget! */
+ __raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs +
+ ATMEL_TC_REG(2, CCR));
+ return 0;
}
static int tc_next_event(unsigned long delta, struct clock_event_device *d)
@@ -154,13 +161,15 @@ static int tc_next_event(unsigned long delta, struct clock_event_device *d)
static struct tc_clkevt_device clkevt = {
.clkevt = {
- .name = "tc_clkevt",
- .features = CLOCK_EVT_FEAT_PERIODIC
- | CLOCK_EVT_FEAT_ONESHOT,
+ .name = "tc_clkevt",
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
/* Should be lower than at91rm9200's system timer */
- .rating = 125,
- .set_next_event = tc_next_event,
- .set_mode = tc_mode,
+ .rating = 125,
+ .set_next_event = tc_next_event,
+ .set_state_shutdown = tc_shutdown,
+ .set_state_periodic = tc_set_periodic,
+ .set_state_oneshot = tc_set_oneshot,
},
};
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index 5a112d72fc2d..6ebda1177e79 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -72,33 +72,36 @@ static int tegra_timer_set_next_event(unsigned long cycles,
return 0;
}
-static void tegra_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static inline void timer_shutdown(struct clock_event_device *evt)
{
- u32 reg;
-
timer_writel(0, TIMER3_BASE + TIMER_PTV);
+}
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- reg = 0xC0000000 | ((1000000/HZ)-1);
- timer_writel(reg, TIMER3_BASE + TIMER_PTV);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_RESUME:
- break;
- }
+static int tegra_timer_shutdown(struct clock_event_device *evt)
+{
+ timer_shutdown(evt);
+ return 0;
+}
+
+static int tegra_timer_set_periodic(struct clock_event_device *evt)
+{
+ u32 reg = 0xC0000000 | ((1000000 / HZ) - 1);
+
+ timer_shutdown(evt);
+ timer_writel(reg, TIMER3_BASE + TIMER_PTV);
+ return 0;
}
static struct clock_event_device tegra_clockevent = {
- .name = "timer0",
- .rating = 300,
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
- .set_next_event = tegra_timer_set_next_event,
- .set_mode = tegra_timer_set_mode,
+ .name = "timer0",
+ .rating = 300,
+ .features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC,
+ .set_next_event = tegra_timer_set_next_event,
+ .set_state_shutdown = tegra_timer_shutdown,
+ .set_state_periodic = tegra_timer_set_periodic,
+ .set_state_oneshot = tegra_timer_shutdown,
+ .tick_resume = tegra_timer_shutdown,
};
static u64 notrace tegra_read_sched_clock(void)
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 0c8c5e337540..2162796fd504 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -121,33 +121,33 @@ armada_370_xp_clkevt_next_event(unsigned long delta,
return 0;
}
-static void
-armada_370_xp_clkevt_mode(enum clock_event_mode mode,
- struct clock_event_device *dev)
+static int armada_370_xp_clkevt_shutdown(struct clock_event_device *evt)
{
- if (mode == CLOCK_EVT_MODE_PERIODIC) {
+ /*
+ * Disable timer.
+ */
+ local_timer_ctrl_clrset(TIMER0_EN, 0);
- /*
- * Setup timer to fire at 1/HZ intervals.
- */
- writel(ticks_per_jiffy - 1, local_base + TIMER0_RELOAD_OFF);
- writel(ticks_per_jiffy - 1, local_base + TIMER0_VAL_OFF);
+ /*
+ * ACK pending timer interrupt.
+ */
+ writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
+ return 0;
+}
- /*
- * Enable timer.
- */
- local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);
- } else {
- /*
- * Disable timer.
- */
- local_timer_ctrl_clrset(TIMER0_EN, 0);
+static int armada_370_xp_clkevt_set_periodic(struct clock_event_device *evt)
+{
+ /*
+ * Setup timer to fire at 1/HZ intervals.
+ */
+ writel(ticks_per_jiffy - 1, local_base + TIMER0_RELOAD_OFF);
+ writel(ticks_per_jiffy - 1, local_base + TIMER0_VAL_OFF);
- /*
- * ACK pending timer interrupt.
- */
- writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
- }
+ /*
+ * Enable timer.
+ */
+ local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);
+ return 0;
}
static int armada_370_xp_clkevt_irq;
@@ -185,7 +185,10 @@ static int armada_370_xp_timer_setup(struct clock_event_device *evt)
evt->shift = 32,
evt->rating = 300,
evt->set_next_event = armada_370_xp_clkevt_next_event,
- evt->set_mode = armada_370_xp_clkevt_mode,
+ evt->set_state_shutdown = armada_370_xp_clkevt_shutdown;
+ evt->set_state_periodic = armada_370_xp_clkevt_set_periodic;
+ evt->set_state_oneshot = armada_370_xp_clkevt_shutdown;
+ evt->tick_resume = armada_370_xp_clkevt_shutdown;
evt->irq = armada_370_xp_clkevt_irq;
evt->cpumask = cpumask_of(cpu);
@@ -197,7 +200,7 @@ static int armada_370_xp_timer_setup(struct clock_event_device *evt)
static void armada_370_xp_timer_stop(struct clock_event_device *evt)
{
- evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
+ evt->set_state_shutdown(evt);
disable_percpu_irq(evt->irq);
}
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c
index 5b6e3d5644c9..b06e4c2be406 100644
--- a/drivers/clocksource/time-efm32.c
+++ b/drivers/clocksource/time-efm32.c
@@ -48,40 +48,42 @@ struct efm32_clock_event_ddata {
unsigned periodic_top;
};
-static void efm32_clock_event_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evtdev)
+static int efm32_clock_event_shutdown(struct clock_event_device *evtdev)
{
struct efm32_clock_event_ddata *ddata =
container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
- writel_relaxed(ddata->periodic_top, ddata->base + TIMERn_TOP);
- writel_relaxed(TIMERn_CTRL_PRESC_1024 |
- TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
- TIMERn_CTRL_MODE_DOWN,
- ddata->base + TIMERn_CTRL);
- writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD);
- break;
-
- case CLOCK_EVT_MODE_ONESHOT:
- writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
- writel_relaxed(TIMERn_CTRL_PRESC_1024 |
- TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
- TIMERn_CTRL_OSMEN |
- TIMERn_CTRL_MODE_DOWN,
- ddata->base + TIMERn_CTRL);
- break;
-
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
- break;
-
- case CLOCK_EVT_MODE_RESUME:
- break;
- }
+ writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
+ return 0;
+}
+
+static int efm32_clock_event_set_oneshot(struct clock_event_device *evtdev)
+{
+ struct efm32_clock_event_ddata *ddata =
+ container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
+
+ writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
+ writel_relaxed(TIMERn_CTRL_PRESC_1024 |
+ TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
+ TIMERn_CTRL_OSMEN |
+ TIMERn_CTRL_MODE_DOWN,
+ ddata->base + TIMERn_CTRL);
+ return 0;
+}
+
+static int efm32_clock_event_set_periodic(struct clock_event_device *evtdev)
+{
+ struct efm32_clock_event_ddata *ddata =
+ container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
+
+ writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
+ writel_relaxed(ddata->periodic_top, ddata->base + TIMERn_TOP);
+ writel_relaxed(TIMERn_CTRL_PRESC_1024 |
+ TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
+ TIMERn_CTRL_MODE_DOWN,
+ ddata->base + TIMERn_CTRL);
+ writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD);
+ return 0;
}
static int efm32_clock_event_set_next_event(unsigned long evt,
@@ -112,7 +114,9 @@ static struct efm32_clock_event_ddata clock_event_ddata = {
.evtdev = {
.name = "efm32 clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
- .set_mode = efm32_clock_event_set_mode,
+ .set_state_shutdown = efm32_clock_event_shutdown,
+ .set_state_periodic = efm32_clock_event_set_periodic,
+ .set_state_oneshot = efm32_clock_event_set_oneshot,
.set_next_event = efm32_clock_event_set_next_event,
.rating = 200,
},
diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c
index 0b3ce0399c51..0ece7427b497 100644
--- a/drivers/clocksource/time-orion.c
+++ b/drivers/clocksource/time-orion.c
@@ -60,30 +60,36 @@ static int orion_clkevt_next_event(unsigned long delta,
return 0;
}
-static void orion_clkevt_mode(enum clock_event_mode mode,
- struct clock_event_device *dev)
+static int orion_clkevt_shutdown(struct clock_event_device *dev)
{
- if (mode == CLOCK_EVT_MODE_PERIODIC) {
- /* setup and enable periodic timer at 1/HZ intervals */
- writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD);
- writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL);
- atomic_io_modify(timer_base + TIMER_CTRL,
- TIMER1_RELOAD_EN | TIMER1_EN,
- TIMER1_RELOAD_EN | TIMER1_EN);
- } else {
- /* disable timer */
- atomic_io_modify(timer_base + TIMER_CTRL,
- TIMER1_RELOAD_EN | TIMER1_EN, 0);
- }
+ /* disable timer */
+ atomic_io_modify(timer_base + TIMER_CTRL,
+ TIMER1_RELOAD_EN | TIMER1_EN, 0);
+ return 0;
+}
+
+static int orion_clkevt_set_periodic(struct clock_event_device *dev)
+{
+ /* setup and enable periodic timer at 1/HZ intervals */
+ writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD);
+ writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL);
+ atomic_io_modify(timer_base + TIMER_CTRL,
+ TIMER1_RELOAD_EN | TIMER1_EN,
+ TIMER1_RELOAD_EN | TIMER1_EN);
+ return 0;
}
static struct clock_event_device orion_clkevt = {
- .name = "orion_event",
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
- .shift = 32,
- .rating = 300,
- .set_next_event = orion_clkevt_next_event,
- .set_mode = orion_clkevt_mode,
+ .name = "orion_event",
+ .features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC,
+ .shift = 32,
+ .rating = 300,
+ .set_next_event = orion_clkevt_next_event,
+ .set_state_shutdown = orion_clkevt_shutdown,
+ .set_state_periodic = orion_clkevt_set_periodic,
+ .set_state_oneshot = orion_clkevt_shutdown,
+ .tick_resume = orion_clkevt_shutdown,
};
static irqreturn_t orion_clkevt_irq_handler(int irq, void *dev_id)
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c
new file mode 100644
index 000000000000..18d4266c2986
--- /dev/null
+++ b/drivers/clocksource/time-pistachio.c
@@ -0,0 +1,217 @@
+/*
+ * Pistachio clocksource based on general-purpose timers
+ *
+ * Copyright (C) 2015 Imagination Technologies
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/sched_clock.h>
+#include <linux/time.h>
+
+/* Top level reg */
+#define CR_TIMER_CTRL_CFG 0x00
+#define TIMER_ME_GLOBAL BIT(0)
+#define CR_TIMER_REV 0x10
+
+/* Timer specific registers */
+#define TIMER_CFG 0x20
+#define TIMER_ME_LOCAL BIT(0)
+#define TIMER_RELOAD_VALUE 0x24
+#define TIMER_CURRENT_VALUE 0x28
+#define TIMER_CURRENT_OVERFLOW_VALUE 0x2C
+#define TIMER_IRQ_STATUS 0x30
+#define TIMER_IRQ_CLEAR 0x34
+#define TIMER_IRQ_MASK 0x38
+
+#define PERIP_TIMER_CONTROL 0x90
+
+/* Timer specific configuration Values */
+#define RELOAD_VALUE 0xffffffff
+
+struct pistachio_clocksource {
+ void __iomem *base;
+ raw_spinlock_t lock;
+ struct clocksource cs;
+};
+
+static struct pistachio_clocksource pcs_gpt;
+
+#define to_pistachio_clocksource(cs) \
+ container_of(cs, struct pistachio_clocksource, cs)
+
+static inline u32 gpt_readl(void __iomem *base, u32 offset, u32 gpt_id)
+{
+ return readl(base + 0x20 * gpt_id + offset);
+}
+
+static inline void gpt_writel(void __iomem *base, u32 value, u32 offset,
+ u32 gpt_id)
+{
+ writel(value, base + 0x20 * gpt_id + offset);
+}
+
+static cycle_t pistachio_clocksource_read_cycles(struct clocksource *cs)
+{
+ struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
+ u32 counter, overflw;
+ unsigned long flags;
+
+ /*
+ * The counter value is only refreshed after the overflow value is read.
+ * And they must be read in strict order, hence raw spin lock added.
+ */
+
+ raw_spin_lock_irqsave(&pcs->lock, flags);
+ overflw = gpt_readl(pcs->base, TIMER_CURRENT_OVERFLOW_VALUE, 0);
+ counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0);
+ raw_spin_unlock_irqrestore(&pcs->lock, flags);
+
+ return ~(cycle_t)counter;
+}
+
+static u64 notrace pistachio_read_sched_clock(void)
+{
+ return pistachio_clocksource_read_cycles(&pcs_gpt.cs);
+}
+
+static void pistachio_clksrc_set_mode(struct clocksource *cs, int timeridx,
+ int enable)
+{
+ struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
+ u32 val;
+
+ val = gpt_readl(pcs->base, TIMER_CFG, timeridx);
+ if (enable)
+ val |= TIMER_ME_LOCAL;
+ else
+ val &= ~TIMER_ME_LOCAL;
+
+ gpt_writel(pcs->base, val, TIMER_CFG, timeridx);
+}
+
+static void pistachio_clksrc_enable(struct clocksource *cs, int timeridx)
+{
+ struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
+
+ /* Disable GPT local before loading reload value */
+ pistachio_clksrc_set_mode(cs, timeridx, false);
+ gpt_writel(pcs->base, RELOAD_VALUE, TIMER_RELOAD_VALUE, timeridx);
+ pistachio_clksrc_set_mode(cs, timeridx, true);
+}
+
+static void pistachio_clksrc_disable(struct clocksource *cs, int timeridx)
+{
+ /* Disable GPT local */
+ pistachio_clksrc_set_mode(cs, timeridx, false);
+}
+
+static int pistachio_clocksource_enable(struct clocksource *cs)
+{
+ pistachio_clksrc_enable(cs, 0);
+ return 0;
+}
+
+static void pistachio_clocksource_disable(struct clocksource *cs)
+{
+ pistachio_clksrc_disable(cs, 0);
+}
+
+/* Desirable clock source for pistachio platform */
+static struct pistachio_clocksource pcs_gpt = {
+ .cs = {
+ .name = "gptimer",
+ .rating = 300,
+ .enable = pistachio_clocksource_enable,
+ .disable = pistachio_clocksource_disable,
+ .read = pistachio_clocksource_read_cycles,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS |
+ CLOCK_SOURCE_SUSPEND_NONSTOP,
+ },
+};
+
+static void __init pistachio_clksrc_of_init(struct device_node *node)
+{
+ struct clk *sys_clk, *fast_clk;
+ struct regmap *periph_regs;
+ unsigned long rate;
+ int ret;
+
+ pcs_gpt.base = of_iomap(node, 0);
+ if (!pcs_gpt.base) {
+ pr_err("cannot iomap\n");
+ return;
+ }
+
+ periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph");
+ if (IS_ERR(periph_regs)) {
+ pr_err("cannot get peripheral regmap (%lu)\n",
+ PTR_ERR(periph_regs));
+ return;
+ }
+
+ /* Switch to using the fast counter clock */
+ ret = regmap_update_bits(periph_regs, PERIP_TIMER_CONTROL,
+ 0xf, 0x0);
+ if (ret)
+ return;
+
+ sys_clk = of_clk_get_by_name(node, "sys");
+ if (IS_ERR(sys_clk)) {
+ pr_err("clock get failed (%lu)\n", PTR_ERR(sys_clk));
+ return;
+ }
+
+ fast_clk = of_clk_get_by_name(node, "fast");
+ if (IS_ERR(fast_clk)) {
+ pr_err("clock get failed (%lu)\n", PTR_ERR(fast_clk));
+ return;
+ }
+
+ ret = clk_prepare_enable(sys_clk);
+ if (ret < 0) {
+ pr_err("failed to enable clock (%d)\n", ret);
+ return;
+ }
+
+ ret = clk_prepare_enable(fast_clk);
+ if (ret < 0) {
+ pr_err("failed to enable clock (%d)\n", ret);
+ clk_disable_unprepare(sys_clk);
+ return;
+ }
+
+ rate = clk_get_rate(fast_clk);
+
+ /* Disable irq's for clocksource usage */
+ gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 0);
+ gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 1);
+ gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 2);
+ gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 3);
+
+ /* Enable timer block */
+ writel(TIMER_ME_GLOBAL, pcs_gpt.base);
+
+ raw_spin_lock_init(&pcs_gpt.lock);
+ sched_clock_register(pistachio_read_sched_clock, 32, rate);
+ clocksource_register_hz(&pcs_gpt.cs, rate);
+}
+CLOCKSOURCE_OF_DECLARE(pistachio_gptimer, "img,pistachio-gptimer",
+ pistachio_clksrc_of_init);
diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c
index 60f9de3438b0..27fa13680be1 100644
--- a/drivers/clocksource/timer-atlas7.c
+++ b/drivers/clocksource/timer-atlas7.c
@@ -76,7 +76,7 @@ static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id)
/* clear timer interrupt */
writel_relaxed(BIT(cpu), sirfsoc_timer_base + SIRFSOC_TIMER_INTR_STATUS);
- if (ce->mode == CLOCK_EVT_MODE_ONESHOT)
+ if (clockevent_state_oneshot(ce))
sirfsoc_timer_count_disable(cpu);
ce->event_handler(ce);
@@ -117,18 +117,11 @@ static int sirfsoc_timer_set_next_event(unsigned long delta,
return 0;
}
-static void sirfsoc_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *ce)
+/* Oneshot is enabled in set_next_event */
+static int sirfsoc_timer_shutdown(struct clock_event_device *evt)
{
- switch (mode) {
- case CLOCK_EVT_MODE_ONESHOT:
- /* enable in set_next_event */
- break;
- default:
- break;
- }
-
sirfsoc_timer_count_disable(smp_processor_id());
+ return 0;
}
static void sirfsoc_clocksource_suspend(struct clocksource *cs)
@@ -193,7 +186,9 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
ce->name = "local_timer";
ce->features = CLOCK_EVT_FEAT_ONESHOT;
ce->rating = 200;
- ce->set_mode = sirfsoc_timer_set_mode;
+ ce->set_state_shutdown = sirfsoc_timer_shutdown;
+ ce->set_state_oneshot = sirfsoc_timer_shutdown;
+ ce->tick_resume = sirfsoc_timer_shutdown;
ce->set_next_event = sirfsoc_timer_set_next_event;
clockevents_calc_mult_shift(ce, atlas7_timer_rate, 60);
ce->max_delta_ns = clockevent_delta2ns(-2, ce);
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
index c0304ff608b0..d911c5dca8f1 100644
--- a/drivers/clocksource/timer-atmel-pit.c
+++ b/drivers/clocksource/timer-atmel-pit.c
@@ -90,33 +90,27 @@ static cycle_t read_pit_clk(struct clocksource *cs)
return elapsed;
}
+static int pit_clkevt_shutdown(struct clock_event_device *dev)
+{
+ struct pit_data *data = clkevt_to_pit_data(dev);
+
+ /* disable irq, leaving the clocksource active */
+ pit_write(data->base, AT91_PIT_MR, (data->cycle - 1) | AT91_PIT_PITEN);
+ return 0;
+}
+
/*
* Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
*/
-static void
-pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev)
+static int pit_clkevt_set_periodic(struct clock_event_device *dev)
{
struct pit_data *data = clkevt_to_pit_data(dev);
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- /* update clocksource counter */
- data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
- pit_write(data->base, AT91_PIT_MR,
- (data->cycle - 1) | AT91_PIT_PITEN | AT91_PIT_PITIEN);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- BUG();
- /* FALLTHROUGH */
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
- /* disable irq, leaving the clocksource active */
- pit_write(data->base, AT91_PIT_MR,
- (data->cycle - 1) | AT91_PIT_PITEN);
- break;
- case CLOCK_EVT_MODE_RESUME:
- break;
- }
+ /* update clocksource counter */
+ data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
+ pit_write(data->base, AT91_PIT_MR,
+ (data->cycle - 1) | AT91_PIT_PITEN | AT91_PIT_PITIEN);
+ return 0;
}
static void at91sam926x_pit_suspend(struct clock_event_device *cedev)
@@ -162,7 +156,7 @@ static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id)
WARN_ON_ONCE(!irqs_disabled());
/* The PIT interrupt may be disabled, and is shared */
- if ((data->clkevt.mode == CLOCK_EVT_MODE_PERIODIC) &&
+ if (clockevent_state_periodic(&data->clkevt) &&
(pit_read(data->base, AT91_PIT_SR) & AT91_PIT_PITS)) {
unsigned nr_ticks;
@@ -208,8 +202,8 @@ static void __init at91sam926x_pit_common_init(struct pit_data *data)
data->clksrc.mask = CLOCKSOURCE_MASK(bits);
data->clksrc.name = "pit";
data->clksrc.rating = 175;
- data->clksrc.read = read_pit_clk,
- data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ data->clksrc.read = read_pit_clk;
+ data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
clocksource_register_hz(&data->clksrc, pit_rate);
/* Set up irq handler */
@@ -227,7 +221,8 @@ static void __init at91sam926x_pit_common_init(struct pit_data *data)
data->clkevt.rating = 100;
data->clkevt.cpumask = cpumask_of(0);
- data->clkevt.set_mode = pit_clkevt_mode;
+ data->clkevt.set_state_shutdown = pit_clkevt_shutdown;
+ data->clkevt.set_state_periodic = pit_clkevt_set_periodic;
data->clkevt.resume = at91sam926x_pit_resume;
data->clkevt.suspend = at91sam926x_pit_suspend;
clockevents_register_device(&data->clkevt);
diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c
index 1692e17e096b..41b7b6dc1d0d 100644
--- a/drivers/clocksource/timer-atmel-st.c
+++ b/drivers/clocksource/timer-atmel-st.c
@@ -106,36 +106,47 @@ static struct clocksource clk32k = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static void
-clkevt32k_mode(enum clock_event_mode mode, struct clock_event_device *dev)
+static void clkdev32k_disable_and_flush_irq(void)
{
unsigned int val;
/* Disable and flush pending timer interrupts */
regmap_write(regmap_st, AT91_ST_IDR, AT91_ST_PITS | AT91_ST_ALMS);
regmap_read(regmap_st, AT91_ST_SR, &val);
-
last_crtr = read_CRTR();
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- /* PIT for periodic irqs; fixed rate of 1/HZ */
- irqmask = AT91_ST_PITS;
- regmap_write(regmap_st, AT91_ST_PIMR, RM9200_TIMER_LATCH);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- /* ALM for oneshot irqs, set by next_event()
- * before 32 seconds have passed
- */
- irqmask = AT91_ST_ALMS;
- regmap_write(regmap_st, AT91_ST_RTAR, last_crtr);
- break;
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_RESUME:
- irqmask = 0;
- break;
- }
+}
+
+static int clkevt32k_shutdown(struct clock_event_device *evt)
+{
+ clkdev32k_disable_and_flush_irq();
+ irqmask = 0;
+ regmap_write(regmap_st, AT91_ST_IER, irqmask);
+ return 0;
+}
+
+static int clkevt32k_set_oneshot(struct clock_event_device *dev)
+{
+ clkdev32k_disable_and_flush_irq();
+
+ /*
+ * ALM for oneshot irqs, set by next_event()
+ * before 32 seconds have passed.
+ */
+ irqmask = AT91_ST_ALMS;
+ regmap_write(regmap_st, AT91_ST_RTAR, last_crtr);
regmap_write(regmap_st, AT91_ST_IER, irqmask);
+ return 0;
+}
+
+static int clkevt32k_set_periodic(struct clock_event_device *dev)
+{
+ clkdev32k_disable_and_flush_irq();
+
+ /* PIT for periodic irqs; fixed rate of 1/HZ */
+ irqmask = AT91_ST_PITS;
+ regmap_write(regmap_st, AT91_ST_PIMR, RM9200_TIMER_LATCH);
+ regmap_write(regmap_st, AT91_ST_IER, irqmask);
+ return 0;
}
static int
@@ -170,11 +181,15 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev)
}
static struct clock_event_device clkevt = {
- .name = "at91_tick",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .rating = 150,
- .set_next_event = clkevt32k_next_event,
- .set_mode = clkevt32k_mode,
+ .name = "at91_tick",
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 150,
+ .set_next_event = clkevt32k_next_event,
+ .set_state_shutdown = clkevt32k_shutdown,
+ .set_state_periodic = clkevt32k_set_periodic,
+ .set_state_oneshot = clkevt32k_set_oneshot,
+ .tick_resume = clkevt32k_shutdown,
};
/*
diff --git a/drivers/clocksource/timer-digicolor.c b/drivers/clocksource/timer-digicolor.c
index 7f8388cfa810..e73947f0f86d 100644
--- a/drivers/clocksource/timer-digicolor.c
+++ b/drivers/clocksource/timer-digicolor.c
@@ -87,27 +87,27 @@ static inline void dc_timer_set_count(struct clock_event_device *ce,
writel(count, dt->base + COUNT(dt->timer_id));
}
-static void digicolor_clkevt_mode(enum clock_event_mode mode,
- struct clock_event_device *ce)
+static int digicolor_clkevt_shutdown(struct clock_event_device *ce)
+{
+ dc_timer_disable(ce);
+ return 0;
+}
+
+static int digicolor_clkevt_set_oneshot(struct clock_event_device *ce)
+{
+ dc_timer_disable(ce);
+ dc_timer_enable(ce, CONTROL_MODE_ONESHOT);
+ return 0;
+}
+
+static int digicolor_clkevt_set_periodic(struct clock_event_device *ce)
{
struct digicolor_timer *dt = dc_timer(ce);
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- dc_timer_disable(ce);
- dc_timer_set_count(ce, dt->ticks_per_jiffy);
- dc_timer_enable(ce, CONTROL_MODE_PERIODIC);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- dc_timer_disable(ce);
- dc_timer_enable(ce, CONTROL_MODE_ONESHOT);
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- default:
- dc_timer_disable(ce);
- break;
- }
+ dc_timer_disable(ce);
+ dc_timer_set_count(ce, dt->ticks_per_jiffy);
+ dc_timer_enable(ce, CONTROL_MODE_PERIODIC);
+ return 0;
}
static int digicolor_clkevt_next_event(unsigned long evt,
@@ -125,7 +125,10 @@ static struct digicolor_timer dc_timer_dev = {
.name = "digicolor_tick",
.rating = 340,
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = digicolor_clkevt_mode,
+ .set_state_shutdown = digicolor_clkevt_shutdown,
+ .set_state_periodic = digicolor_clkevt_set_periodic,
+ .set_state_oneshot = digicolor_clkevt_set_oneshot,
+ .tick_resume = digicolor_clkevt_shutdown,
.set_next_event = digicolor_clkevt_next_event,
},
.timer_id = TIMER_C,
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 879c78423546..839aba92fc39 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -83,7 +83,6 @@ struct imx_timer {
struct clk *clk_ipg;
const struct imx_gpt_data *gpt;
struct clock_event_device ced;
- enum clock_event_mode cem;
struct irqaction act;
};
@@ -212,18 +211,38 @@ static int v2_set_next_event(unsigned long evt,
-ETIME : 0;
}
+static int mxc_shutdown(struct clock_event_device *ced)
+{
+ struct imx_timer *imxtm = to_imx_timer(ced);
+ unsigned long flags;
+ u32 tcn;
+
+ /*
+ * The timer interrupt generation is disabled at least
+ * for enough time to call mxc_set_next_event()
+ */
+ local_irq_save(flags);
+
+ /* Disable interrupt in GPT module */
+ imxtm->gpt->gpt_irq_disable(imxtm);
+
+ tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn);
+ /* Set event time into far-far future */
+ writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp);
+
+ /* Clear pending interrupt */
+ imxtm->gpt->gpt_irq_acknowledge(imxtm);
+
#ifdef DEBUG
-static const char *clock_event_mode_label[] = {
- [CLOCK_EVT_MODE_PERIODIC] = "CLOCK_EVT_MODE_PERIODIC",
- [CLOCK_EVT_MODE_ONESHOT] = "CLOCK_EVT_MODE_ONESHOT",
- [CLOCK_EVT_MODE_SHUTDOWN] = "CLOCK_EVT_MODE_SHUTDOWN",
- [CLOCK_EVT_MODE_UNUSED] = "CLOCK_EVT_MODE_UNUSED",
- [CLOCK_EVT_MODE_RESUME] = "CLOCK_EVT_MODE_RESUME",
-};
+ printk(KERN_INFO "%s: changing mode\n", __func__);
#endif /* DEBUG */
-static void mxc_set_mode(enum clock_event_mode mode,
- struct clock_event_device *ced)
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int mxc_set_oneshot(struct clock_event_device *ced)
{
struct imx_timer *imxtm = to_imx_timer(ced);
unsigned long flags;
@@ -237,7 +256,7 @@ static void mxc_set_mode(enum clock_event_mode mode,
/* Disable interrupt in GPT module */
imxtm->gpt->gpt_irq_disable(imxtm);
- if (mode != imxtm->cem) {
+ if (!clockevent_state_oneshot(ced)) {
u32 tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn);
/* Set event time into far-far future */
writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp);
@@ -247,37 +266,19 @@ static void mxc_set_mode(enum clock_event_mode mode,
}
#ifdef DEBUG
- printk(KERN_INFO "mxc_set_mode: changing mode from %s to %s\n",
- clock_event_mode_label[imxtm->cem],
- clock_event_mode_label[mode]);
+ printk(KERN_INFO "%s: changing mode\n", __func__);
#endif /* DEBUG */
- /* Remember timer mode */
- imxtm->cem = mode;
- local_irq_restore(flags);
-
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- printk(KERN_ERR"mxc_set_mode: Periodic mode is not "
- "supported for i.MX\n");
- break;
- case CLOCK_EVT_MODE_ONESHOT:
/*
* Do not put overhead of interrupt enable/disable into
* mxc_set_next_event(), the core has about 4 minutes
* to call mxc_set_next_event() or shutdown clock after
* mode switching
*/
- local_irq_save(flags);
- imxtm->gpt->gpt_irq_enable(imxtm);
- local_irq_restore(flags);
- break;
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_RESUME:
- /* Left event sources disabled, no more interrupts appear */
- break;
- }
+ imxtm->gpt->gpt_irq_enable(imxtm);
+ local_irq_restore(flags);
+
+ return 0;
}
/*
@@ -303,11 +304,11 @@ static int __init mxc_clockevent_init(struct imx_timer *imxtm)
struct clock_event_device *ced = &imxtm->ced;
struct irqaction *act = &imxtm->act;
- imxtm->cem = CLOCK_EVT_MODE_UNUSED;
-
ced->name = "mxc_timer1";
ced->features = CLOCK_EVT_FEAT_ONESHOT;
- ced->set_mode = mxc_set_mode;
+ ced->set_state_shutdown = mxc_shutdown;
+ ced->set_state_oneshot = mxc_set_oneshot;
+ ced->tick_resume = mxc_shutdown;
ced->set_next_event = imxtm->gpt->set_next_event;
ced->rating = 200;
ced->cpumask = cpumask_of(0);
@@ -462,6 +463,7 @@ void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
BUG_ON(!imxtm->base);
imxtm->type = type;
+ imxtm->irq = irq;
_mxc_timer_init(imxtm);
}
@@ -529,6 +531,7 @@ static void __init imx6dl_timer_init_dt(struct device_node *np)
CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
CLOCKSOURCE_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt);
+CLOCKSOURCE_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt);
CLOCKSOURCE_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt);
CLOCKSOURCE_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt);
CLOCKSOURCE_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt);
diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
index a68866e0ecd4..3f59ac2180dc 100644
--- a/drivers/clocksource/timer-integrator-ap.c
+++ b/drivers/clocksource/timer-integrator-ap.c
@@ -75,33 +75,37 @@ static irqreturn_t integrator_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void clkevt_set_mode(enum clock_event_mode mode, struct clock_event_device *evt)
+static int clkevt_shutdown(struct clock_event_device *evt)
{
u32 ctrl = readl(clkevt_base + TIMER_CTRL) & ~TIMER_CTRL_ENABLE;
/* Disable timer */
writel(ctrl, clkevt_base + TIMER_CTRL);
+ return 0;
+}
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- /* Enable the timer and start the periodic tick */
- writel(timer_reload, clkevt_base + TIMER_LOAD);
- ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
- writel(ctrl, clkevt_base + TIMER_CTRL);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- /* Leave the timer disabled, .set_next_event will enable it */
- ctrl &= ~TIMER_CTRL_PERIODIC;
- writel(ctrl, clkevt_base + TIMER_CTRL);
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_RESUME:
- default:
- /* Just leave in disabled state */
- break;
- }
+static int clkevt_set_oneshot(struct clock_event_device *evt)
+{
+ u32 ctrl = readl(clkevt_base + TIMER_CTRL) &
+ ~(TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC);
+
+ /* Leave the timer disabled, .set_next_event will enable it */
+ writel(ctrl, clkevt_base + TIMER_CTRL);
+ return 0;
+}
+static int clkevt_set_periodic(struct clock_event_device *evt)
+{
+ u32 ctrl = readl(clkevt_base + TIMER_CTRL) & ~TIMER_CTRL_ENABLE;
+
+ /* Disable timer */
+ writel(ctrl, clkevt_base + TIMER_CTRL);
+
+ /* Enable the timer and start the periodic tick */
+ writel(timer_reload, clkevt_base + TIMER_LOAD);
+ ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
+ writel(ctrl, clkevt_base + TIMER_CTRL);
+ return 0;
}
static int clkevt_set_next_event(unsigned long next, struct clock_event_device *evt)
@@ -116,11 +120,15 @@ static int clkevt_set_next_event(unsigned long next, struct clock_event_device *
}
static struct clock_event_device integrator_clockevent = {
- .name = "timer1",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = clkevt_set_mode,
- .set_next_event = clkevt_set_next_event,
- .rating = 300,
+ .name = "timer1",
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_shutdown = clkevt_shutdown,
+ .set_state_periodic = clkevt_set_periodic,
+ .set_state_oneshot = clkevt_set_oneshot,
+ .tick_resume = clkevt_shutdown,
+ .set_next_event = clkevt_set_next_event,
+ .rating = 300,
};
static struct irqaction integrator_timer_irq = {
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
index 0250354f7e55..edacf3902e10 100644
--- a/drivers/clocksource/timer-keystone.c
+++ b/drivers/clocksource/timer-keystone.c
@@ -72,10 +72,10 @@ static inline void keystone_timer_barrier(void)
/**
* keystone_timer_config: configures timer to work in oneshot/periodic modes.
- * @ mode: mode to configure
+ * @ mask: mask of the mode to configure
* @ period: cycles number to configure for
*/
-static int keystone_timer_config(u64 period, enum clock_event_mode mode)
+static int keystone_timer_config(u64 period, int mask)
{
u32 tcr;
u32 off;
@@ -84,16 +84,7 @@ static int keystone_timer_config(u64 period, enum clock_event_mode mode)
off = tcr & ~(TCR_ENAMODE_MASK);
/* set enable mode */
- switch (mode) {
- case CLOCK_EVT_MODE_ONESHOT:
- tcr |= TCR_ENAMODE_ONESHOT_MASK;
- break;
- case CLOCK_EVT_MODE_PERIODIC:
- tcr |= TCR_ENAMODE_PERIODIC_MASK;
- break;
- default:
- return -1;
- }
+ tcr |= mask;
/* disable timer */
keystone_timer_writel(off, TCR);
@@ -138,24 +129,19 @@ static irqreturn_t keystone_timer_interrupt(int irq, void *dev_id)
static int keystone_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
- return keystone_timer_config(cycles, evt->mode);
+ return keystone_timer_config(cycles, TCR_ENAMODE_ONESHOT_MASK);
}
-static void keystone_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int keystone_shutdown(struct clock_event_device *evt)
{
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- keystone_timer_config(timer.hz_period, CLOCK_EVT_MODE_PERIODIC);
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_ONESHOT:
- keystone_timer_disable();
- break;
- default:
- break;
- }
+ keystone_timer_disable();
+ return 0;
+}
+
+static int keystone_set_periodic(struct clock_event_device *evt)
+{
+ keystone_timer_config(timer.hz_period, TCR_ENAMODE_PERIODIC_MASK);
+ return 0;
}
static void __init keystone_timer_init(struct device_node *np)
@@ -222,7 +208,9 @@ static void __init keystone_timer_init(struct device_node *np)
/* setup clockevent */
event_dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
event_dev->set_next_event = keystone_set_next_event;
- event_dev->set_mode = keystone_set_mode;
+ event_dev->set_state_shutdown = keystone_shutdown;
+ event_dev->set_state_periodic = keystone_set_periodic;
+ event_dev->set_state_oneshot = keystone_shutdown;
event_dev->cpumask = cpu_all_mask;
event_dev->owner = THIS_MODULE;
event_dev->name = TIMER_NAME;
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
index ce18d570e1cd..78de982cc640 100644
--- a/drivers/clocksource/timer-prima2.c
+++ b/drivers/clocksource/timer-prima2.c
@@ -104,26 +104,21 @@ static int sirfsoc_timer_set_next_event(unsigned long delta,
return next - now > delta ? -ETIME : 0;
}
-static void sirfsoc_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *ce)
+static int sirfsoc_timer_shutdown(struct clock_event_device *evt)
{
u32 val = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- WARN_ON(1);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- writel_relaxed(val | BIT(0),
- sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
- break;
- case CLOCK_EVT_MODE_SHUTDOWN:
- writel_relaxed(val & ~BIT(0),
- sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_RESUME:
- break;
- }
+
+ writel_relaxed(val & ~BIT(0),
+ sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
+ return 0;
+}
+
+static int sirfsoc_timer_set_oneshot(struct clock_event_device *evt)
+{
+ u32 val = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
+
+ writel_relaxed(val | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
+ return 0;
}
static void sirfsoc_clocksource_suspend(struct clocksource *cs)
@@ -157,7 +152,8 @@ static struct clock_event_device sirfsoc_clockevent = {
.name = "sirfsoc_clockevent",
.rating = 200,
.features = CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = sirfsoc_timer_set_mode,
+ .set_state_shutdown = sirfsoc_timer_shutdown,
+ .set_state_oneshot = sirfsoc_timer_set_oneshot,
.set_next_event = sirfsoc_timer_set_next_event,
};
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index ca02503f17d1..5f45b9adef60 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -133,50 +133,50 @@ static irqreturn_t sp804_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void sp804_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static inline void timer_shutdown(struct clock_event_device *evt)
{
- unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE;
+ writel(0, clkevt_base + TIMER_CTRL);
+}
- writel(ctrl, clkevt_base + TIMER_CTRL);
+static int sp804_shutdown(struct clock_event_device *evt)
+{
+ timer_shutdown(evt);
+ return 0;
+}
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- writel(clkevt_reload, clkevt_base + TIMER_LOAD);
- ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
- break;
-
- case CLOCK_EVT_MODE_ONESHOT:
- /* period set, and timer enabled in 'next_event' hook */
- ctrl |= TIMER_CTRL_ONESHOT;
- break;
-
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- default:
- break;
- }
+static int sp804_set_periodic(struct clock_event_device *evt)
+{
+ unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE |
+ TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
+ timer_shutdown(evt);
+ writel(clkevt_reload, clkevt_base + TIMER_LOAD);
writel(ctrl, clkevt_base + TIMER_CTRL);
+ return 0;
}
static int sp804_set_next_event(unsigned long next,
struct clock_event_device *evt)
{
- unsigned long ctrl = readl(clkevt_base + TIMER_CTRL);
+ unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE |
+ TIMER_CTRL_ONESHOT | TIMER_CTRL_ENABLE;
writel(next, clkevt_base + TIMER_LOAD);
- writel(ctrl | TIMER_CTRL_ENABLE, clkevt_base + TIMER_CTRL);
+ writel(ctrl, clkevt_base + TIMER_CTRL);
return 0;
}
static struct clock_event_device sp804_clockevent = {
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
- CLOCK_EVT_FEAT_DYNIRQ,
- .set_mode = sp804_set_mode,
- .set_next_event = sp804_set_next_event,
- .rating = 300,
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_DYNIRQ,
+ .set_state_shutdown = sp804_shutdown,
+ .set_state_periodic = sp804_set_periodic,
+ .set_state_oneshot = sp804_shutdown,
+ .tick_resume = sp804_shutdown,
+ .set_next_event = sp804_set_next_event,
+ .rating = 300,
};
static struct irqaction sp804_timer_irq = {
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index a97e8b50701c..f3dcb76799b4 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -40,24 +40,25 @@ struct stm32_clock_event_ddata {
void __iomem *base;
};
-static void stm32_clock_event_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evtdev)
+static int stm32_clock_event_shutdown(struct clock_event_device *evtdev)
{
struct stm32_clock_event_ddata *data =
container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
void *base = data->base;
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- writel_relaxed(data->periodic_top, base + TIM_ARR);
- writel_relaxed(TIM_CR1_ARPE | TIM_CR1_CEN, base + TIM_CR1);
- break;
+ writel_relaxed(0, base + TIM_CR1);
+ return 0;
+}
- case CLOCK_EVT_MODE_ONESHOT:
- default:
- writel_relaxed(0, base + TIM_CR1);
- break;
- }
+static int stm32_clock_event_set_periodic(struct clock_event_device *evtdev)
+{
+ struct stm32_clock_event_ddata *data =
+ container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
+ void *base = data->base;
+
+ writel_relaxed(data->periodic_top, base + TIM_ARR);
+ writel_relaxed(TIM_CR1_ARPE | TIM_CR1_CEN, base + TIM_CR1);
+ return 0;
}
static int stm32_clock_event_set_next_event(unsigned long evt,
@@ -88,7 +89,10 @@ static struct stm32_clock_event_ddata clock_event_ddata = {
.evtdev = {
.name = "stm32 clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
- .set_mode = stm32_clock_event_set_mode,
+ .set_state_shutdown = stm32_clock_event_shutdown,
+ .set_state_periodic = stm32_clock_event_set_periodic,
+ .set_state_oneshot = stm32_clock_event_shutdown,
+ .tick_resume = stm32_clock_event_shutdown,
.set_next_event = stm32_clock_event_set_next_event,
.rating = 200,
},
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 0ffb4ea7c925..bca9573e036a 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -103,27 +103,31 @@ static void sun5i_clkevt_time_start(struct sun5i_timer_clkevt *ce, u8 timer, boo
ce->timer.base + TIMER_CTL_REG(timer));
}
-static void sun5i_clkevt_mode(enum clock_event_mode mode,
- struct clock_event_device *clkevt)
+static int sun5i_clkevt_shutdown(struct clock_event_device *clkevt)
{
struct sun5i_timer_clkevt *ce = to_sun5i_timer_clkevt(clkevt);
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- sun5i_clkevt_time_stop(ce, 0);
- sun5i_clkevt_time_setup(ce, 0, ce->timer.ticks_per_jiffy);
- sun5i_clkevt_time_start(ce, 0, true);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- sun5i_clkevt_time_stop(ce, 0);
- sun5i_clkevt_time_start(ce, 0, false);
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- default:
- sun5i_clkevt_time_stop(ce, 0);
- break;
- }
+ sun5i_clkevt_time_stop(ce, 0);
+ return 0;
+}
+
+static int sun5i_clkevt_set_oneshot(struct clock_event_device *clkevt)
+{
+ struct sun5i_timer_clkevt *ce = to_sun5i_timer_clkevt(clkevt);
+
+ sun5i_clkevt_time_stop(ce, 0);
+ sun5i_clkevt_time_start(ce, 0, false);
+ return 0;
+}
+
+static int sun5i_clkevt_set_periodic(struct clock_event_device *clkevt)
+{
+ struct sun5i_timer_clkevt *ce = to_sun5i_timer_clkevt(clkevt);
+
+ sun5i_clkevt_time_stop(ce, 0);
+ sun5i_clkevt_time_setup(ce, 0, ce->timer.ticks_per_jiffy);
+ sun5i_clkevt_time_start(ce, 0, true);
+ return 0;
}
static int sun5i_clkevt_next_event(unsigned long evt,
@@ -286,7 +290,10 @@ static int __init sun5i_setup_clockevent(struct device_node *node, void __iomem
ce->clkevt.name = node->name;
ce->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
ce->clkevt.set_next_event = sun5i_clkevt_next_event;
- ce->clkevt.set_mode = sun5i_clkevt_mode;
+ ce->clkevt.set_state_shutdown = sun5i_clkevt_shutdown;
+ ce->clkevt.set_state_periodic = sun5i_clkevt_set_periodic;
+ ce->clkevt.set_state_oneshot = sun5i_clkevt_set_oneshot;
+ ce->clkevt.tick_resume = sun5i_clkevt_shutdown;
ce->clkevt.rating = 340;
ce->clkevt.irq = irq;
ce->clkevt.cpumask = cpu_possible_mask;
diff --git a/drivers/clocksource/timer-u300.c b/drivers/clocksource/timer-u300.c
index 5dcf756970e7..1744b243898a 100644
--- a/drivers/clocksource/timer-u300.c
+++ b/drivers/clocksource/timer-u300.c
@@ -187,85 +187,82 @@ struct u300_clockevent_data {
unsigned ticks_per_jiffy;
};
+static int u300_shutdown(struct clock_event_device *evt)
+{
+ /* Disable interrupts on GP1 */
+ writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
+ u300_timer_base + U300_TIMER_APP_GPT1IE);
+ /* Disable GP1 */
+ writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
+ u300_timer_base + U300_TIMER_APP_DGPT1);
+ return 0;
+}
+
/*
- * The u300_set_mode() function is always called first, if we
- * have oneshot timer active, the oneshot scheduling function
+ * If we have oneshot timer active, the oneshot scheduling function
* u300_set_next_event() is called immediately after.
*/
-static void u300_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int u300_set_oneshot(struct clock_event_device *evt)
+{
+ /* Just return; here? */
+ /*
+ * The actual event will be programmed by the next event hook,
+ * so we just set a dummy value somewhere at the end of the
+ * universe here.
+ */
+ /* Disable interrupts on GPT1 */
+ writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
+ u300_timer_base + U300_TIMER_APP_GPT1IE);
+ /* Disable GP1 while we're reprogramming it. */
+ writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
+ u300_timer_base + U300_TIMER_APP_DGPT1);
+ /*
+ * Expire far in the future, u300_set_next_event() will be
+ * called soon...
+ */
+ writel(0xFFFFFFFF, u300_timer_base + U300_TIMER_APP_GPT1TC);
+ /* We run one shot per tick here! */
+ writel(U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT,
+ u300_timer_base + U300_TIMER_APP_SGPT1M);
+ /* Enable interrupts for this timer */
+ writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE,
+ u300_timer_base + U300_TIMER_APP_GPT1IE);
+ /* Enable timer */
+ writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE,
+ u300_timer_base + U300_TIMER_APP_EGPT1);
+ return 0;
+}
+
+static int u300_set_periodic(struct clock_event_device *evt)
{
struct u300_clockevent_data *cevdata =
container_of(evt, struct u300_clockevent_data, cevd);
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- /* Disable interrupts on GPT1 */
- writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
- u300_timer_base + U300_TIMER_APP_GPT1IE);
- /* Disable GP1 while we're reprogramming it. */
- writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
- u300_timer_base + U300_TIMER_APP_DGPT1);
- /*
- * Set the periodic mode to a certain number of ticks per
- * jiffy.
- */
- writel(cevdata->ticks_per_jiffy,
- u300_timer_base + U300_TIMER_APP_GPT1TC);
- /*
- * Set continuous mode, so the timer keeps triggering
- * interrupts.
- */
- writel(U300_TIMER_APP_SGPT1M_MODE_CONTINUOUS,
- u300_timer_base + U300_TIMER_APP_SGPT1M);
- /* Enable timer interrupts */
- writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE,
- u300_timer_base + U300_TIMER_APP_GPT1IE);
- /* Then enable the OS timer again */
- writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE,
- u300_timer_base + U300_TIMER_APP_EGPT1);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- /* Just break; here? */
- /*
- * The actual event will be programmed by the next event hook,
- * so we just set a dummy value somewhere at the end of the
- * universe here.
- */
- /* Disable interrupts on GPT1 */
- writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
- u300_timer_base + U300_TIMER_APP_GPT1IE);
- /* Disable GP1 while we're reprogramming it. */
- writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
- u300_timer_base + U300_TIMER_APP_DGPT1);
- /*
- * Expire far in the future, u300_set_next_event() will be
- * called soon...
- */
- writel(0xFFFFFFFF, u300_timer_base + U300_TIMER_APP_GPT1TC);
- /* We run one shot per tick here! */
- writel(U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT,
- u300_timer_base + U300_TIMER_APP_SGPT1M);
- /* Enable interrupts for this timer */
- writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE,
- u300_timer_base + U300_TIMER_APP_GPT1IE);
- /* Enable timer */
- writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE,
- u300_timer_base + U300_TIMER_APP_EGPT1);
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- /* Disable interrupts on GP1 */
- writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
- u300_timer_base + U300_TIMER_APP_GPT1IE);
- /* Disable GP1 */
- writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
- u300_timer_base + U300_TIMER_APP_DGPT1);
- break;
- case CLOCK_EVT_MODE_RESUME:
- /* Ignore this call */
- break;
- }
+ /* Disable interrupts on GPT1 */
+ writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
+ u300_timer_base + U300_TIMER_APP_GPT1IE);
+ /* Disable GP1 while we're reprogramming it. */
+ writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
+ u300_timer_base + U300_TIMER_APP_DGPT1);
+ /*
+ * Set the periodic mode to a certain number of ticks per
+ * jiffy.
+ */
+ writel(cevdata->ticks_per_jiffy,
+ u300_timer_base + U300_TIMER_APP_GPT1TC);
+ /*
+ * Set continuous mode, so the timer keeps triggering
+ * interrupts.
+ */
+ writel(U300_TIMER_APP_SGPT1M_MODE_CONTINUOUS,
+ u300_timer_base + U300_TIMER_APP_SGPT1M);
+ /* Enable timer interrupts */
+ writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE,
+ u300_timer_base + U300_TIMER_APP_GPT1IE);
+ /* Then enable the OS timer again */
+ writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE,
+ u300_timer_base + U300_TIMER_APP_EGPT1);
+ return 0;
}
/*
@@ -309,13 +306,15 @@ static int u300_set_next_event(unsigned long cycles,
static struct u300_clockevent_data u300_clockevent_data = {
/* Use general purpose timer 1 as clock event */
.cevd = {
- .name = "GPT1",
+ .name = "GPT1",
/* Reasonably fast and accurate clock event */
- .rating = 300,
- .features = CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_ONESHOT,
- .set_next_event = u300_set_next_event,
- .set_mode = u300_set_mode,
+ .rating = 300,
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .set_next_event = u300_set_next_event,
+ .set_state_shutdown = u300_shutdown,
+ .set_state_periodic = u300_set_periodic,
+ .set_state_oneshot = u300_set_oneshot,
},
};
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c
index b45ac6229b57..f07ba9932171 100644
--- a/drivers/clocksource/vf_pit_timer.c
+++ b/drivers/clocksource/vf_pit_timer.c
@@ -86,20 +86,16 @@ static int pit_set_next_event(unsigned long delta,
return 0;
}
-static void pit_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int pit_shutdown(struct clock_event_device *evt)
{
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- pit_set_next_event(cycle_per_jiffy, evt);
- break;
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
- pit_timer_disable();
- break;
- default:
- break;
- }
+ pit_timer_disable();
+ return 0;
+}
+
+static int pit_set_periodic(struct clock_event_device *evt)
+{
+ pit_set_next_event(cycle_per_jiffy, evt);
+ return 0;
}
static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
@@ -114,7 +110,7 @@ static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
* and start the counter again. So software need to disable the timer
* to stop the counter loop in ONESHOT mode.
*/
- if (likely(evt->mode == CLOCK_EVT_MODE_ONESHOT))
+ if (likely(clockevent_state_oneshot(evt)))
pit_timer_disable();
evt->event_handler(evt);
@@ -125,7 +121,8 @@ static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
static struct clock_event_device clockevent_pit = {
.name = "VF pit timer",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = pit_set_mode,
+ .set_state_shutdown = pit_shutdown,
+ .set_state_periodic = pit_set_periodic,
.set_next_event = pit_set_next_event,
.rating = 300,
};
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
index 1098ed3b9b89..a92e94b40b5b 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/vt8500_timer.c
@@ -88,29 +88,20 @@ static int vt8500_timer_set_next_event(unsigned long cycles,
return 0;
}
-static void vt8500_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int vt8500_shutdown(struct clock_event_device *evt)
{
- switch (mode) {
- case CLOCK_EVT_MODE_RESUME:
- case CLOCK_EVT_MODE_PERIODIC:
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- writel(readl(regbase + TIMER_CTRL_VAL) | 1,
- regbase + TIMER_CTRL_VAL);
- writel(0, regbase + TIMER_IER_VAL);
- break;
- }
+ writel(readl(regbase + TIMER_CTRL_VAL) | 1, regbase + TIMER_CTRL_VAL);
+ writel(0, regbase + TIMER_IER_VAL);
+ return 0;
}
static struct clock_event_device clockevent = {
- .name = "vt8500_timer",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .rating = 200,
- .set_next_event = vt8500_timer_set_next_event,
- .set_mode = vt8500_timer_set_mode,
+ .name = "vt8500_timer",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 200,
+ .set_next_event = vt8500_timer_set_next_event,
+ .set_state_shutdown = vt8500_shutdown,
+ .set_state_oneshot = vt8500_shutdown,
};
static irqreturn_t vt8500_timer_interrupt(int irq, void *dev_id)
diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c
index 7ce442148c3f..ceaa6133f9c2 100644
--- a/drivers/clocksource/zevio-timer.c
+++ b/drivers/clocksource/zevio-timer.c
@@ -76,32 +76,28 @@ static int zevio_timer_set_event(unsigned long delta,
return 0;
}
-static void zevio_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *dev)
+static int zevio_timer_shutdown(struct clock_event_device *dev)
{
struct zevio_timer *timer = container_of(dev, struct zevio_timer,
clkevt);
- switch (mode) {
- case CLOCK_EVT_MODE_RESUME:
- case CLOCK_EVT_MODE_ONESHOT:
- /* Enable timer interrupts */
- writel(TIMER_INTR_MSK, timer->interrupt_regs + IO_INTR_MSK);
- writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK);
- break;
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
- /* Disable timer interrupts */
- writel(0, timer->interrupt_regs + IO_INTR_MSK);
- writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK);
- /* Stop timer */
- writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL);
- break;
- case CLOCK_EVT_MODE_PERIODIC:
- default:
- /* Unsupported */
- break;
- }
+ /* Disable timer interrupts */
+ writel(0, timer->interrupt_regs + IO_INTR_MSK);
+ writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK);
+ /* Stop timer */
+ writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL);
+ return 0;
+}
+
+static int zevio_timer_set_oneshot(struct clock_event_device *dev)
+{
+ struct zevio_timer *timer = container_of(dev, struct zevio_timer,
+ clkevt);
+
+ /* Enable timer interrupts */
+ writel(TIMER_INTR_MSK, timer->interrupt_regs + IO_INTR_MSK);
+ writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK);
+ return 0;
}
static irqreturn_t zevio_timer_interrupt(int irq, void *dev_id)
@@ -162,7 +158,9 @@ static int __init zevio_timer_add(struct device_node *node)
if (timer->interrupt_regs && irqnr) {
timer->clkevt.name = timer->clockevent_name;
timer->clkevt.set_next_event = zevio_timer_set_event;
- timer->clkevt.set_mode = zevio_timer_set_mode;
+ timer->clkevt.set_state_shutdown = zevio_timer_shutdown;
+ timer->clkevt.set_state_oneshot = zevio_timer_set_oneshot;
+ timer->clkevt.tick_resume = zevio_timer_set_oneshot;
timer->clkevt.rating = 200;
timer->clkevt.cpumask = cpu_all_mask;
timer->clkevt.features = CLOCK_EVT_FEAT_ONESHOT;
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index cc8a71c267b8..cd0391e46c6d 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -24,55 +24,6 @@ config ARM_VEXPRESS_SPC_CPUFREQ
This add the CPUfreq driver support for Versatile Express
big.LITTLE platforms using SPC for power management.
-
-config ARM_EXYNOS_CPUFREQ
- tristate "SAMSUNG EXYNOS CPUfreq Driver"
- depends on CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412 || SOC_EXYNOS5250
- depends on THERMAL
- help
- This adds the CPUFreq driver for Samsung EXYNOS platforms.
- Supported SoC versions are:
- Exynos4210, Exynos4212, Exynos4412, and Exynos5250.
-
- If in doubt, say N.
-
-config ARM_EXYNOS4X12_CPUFREQ
- bool "SAMSUNG EXYNOS4x12"
- depends on SOC_EXYNOS4212 || SOC_EXYNOS4412
- depends on ARM_EXYNOS_CPUFREQ
- default y
- help
- This adds the CPUFreq driver for Samsung EXYNOS4X12
- SoC (EXYNOS4212 or EXYNOS4412).
-
- If in doubt, say N.
-
-config ARM_EXYNOS5250_CPUFREQ
- bool "SAMSUNG EXYNOS5250"
- depends on SOC_EXYNOS5250
- depends on ARM_EXYNOS_CPUFREQ
- default y
- help
- This adds the CPUFreq driver for Samsung EXYNOS5250
- SoC.
-
- If in doubt, say N.
-
-config ARM_EXYNOS_CPU_FREQ_BOOST_SW
- bool "EXYNOS Frequency Overclocking - Software"
- depends on ARM_EXYNOS_CPUFREQ && THERMAL
- select CPU_FREQ_BOOST_SW
- select EXYNOS_THERMAL
- help
- This driver supports software managed overclocking (BOOST).
- It allows usage of special frequencies for Samsung Exynos
- processors if thermal conditions are appropriate.
-
- It requires, for safe operation, thermal framework with properly
- defined trip points.
-
- If in doubt, say N.
-
config ARM_EXYNOS5440_CPUFREQ
tristate "SAMSUNG EXYNOS5440"
depends on SOC_EXYNOS5440
@@ -130,6 +81,14 @@ config ARM_KIRKWOOD_CPUFREQ
This adds the CPUFreq driver for Marvell Kirkwood
SoCs.
+config ARM_MT8173_CPUFREQ
+ bool "Mediatek MT8173 CPUFreq support"
+ depends on ARCH_MEDIATEK && REGULATOR
+ depends on !CPU_THERMAL || THERMAL=y
+ select PM_OPP
+ help
+ This adds the CPUFreq driver support for Mediatek MT8173 SoC.
+
config ARM_OMAP2PLUS_CPUFREQ
bool "TI OMAP2+"
depends on ARCH_OMAP2PLUS
@@ -247,12 +206,19 @@ config ARM_SPEAR_CPUFREQ
help
This adds the CPUFreq driver support for SPEAr SOCs.
-config ARM_TEGRA_CPUFREQ
- bool "TEGRA CPUFreq support"
+config ARM_TEGRA20_CPUFREQ
+ bool "Tegra20 CPUFreq support"
depends on ARCH_TEGRA
default y
help
- This adds the CPUFreq driver support for TEGRA SOCs.
+ This adds the CPUFreq driver support for Tegra20 SOCs.
+
+config ARM_TEGRA124_CPUFREQ
+ tristate "Tegra124 CPUFreq support"
+ depends on ARCH_TEGRA && CPUFREQ_DT
+ default y
+ help
+ This adds the CPUFreq driver support for Tegra124 SOCs.
config ARM_PXA2xx_CPUFREQ
tristate "Intel PXA2xx CPUfreq driver"
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 2169bf792db7..41340384f11f 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -52,16 +52,13 @@ obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
-obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += arm-exynos-cpufreq.o
-arm-exynos-cpufreq-y := exynos-cpufreq.o
-arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
-arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
obj-$(CONFIG_ARM_HISI_ACPU_CPUFREQ) += hisi-acpu-cpufreq.o
obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
+obj-$(CONFIG_ARM_MT8173_CPUFREQ) += mt8173-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
@@ -76,7 +73,8 @@ obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o
obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
-obj-$(CONFIG_ARM_TEGRA_CPUFREQ) += tegra-cpufreq.o
+obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o
+obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
##################################################################################
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 0136dfcdabf0..15b921a9248c 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -65,18 +65,21 @@ enum {
#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
struct acpi_cpufreq_data {
- struct acpi_processor_performance *acpi_data;
struct cpufreq_frequency_table *freq_table;
unsigned int resume;
unsigned int cpu_feature;
+ unsigned int acpi_perf_cpu;
cpumask_var_t freqdomain_cpus;
};
-static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
-
/* acpi_perf_data is a pointer to percpu data. */
static struct acpi_processor_performance __percpu *acpi_perf_data;
+static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data)
+{
+ return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu);
+}
+
static struct cpufreq_driver acpi_cpufreq_driver;
static unsigned int acpi_pstate_strict;
@@ -144,7 +147,7 @@ static int _store_boost(int val)
static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
{
- struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
+ struct acpi_cpufreq_data *data = policy->driver_data;
return cpufreq_show_cpus(data->freqdomain_cpus, buf);
}
@@ -202,7 +205,7 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
struct acpi_processor_performance *perf;
int i;
- perf = data->acpi_data;
+ perf = to_perf_data(data);
for (i = 0; i < perf->state_count; i++) {
if (value == perf->states[i].status)
@@ -221,7 +224,7 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
else
msr &= INTEL_MSR_RANGE;
- perf = data->acpi_data;
+ perf = to_perf_data(data);
cpufreq_for_each_entry(pos, data->freq_table)
if (msr == perf->states[pos->driver_data].status)
@@ -327,7 +330,8 @@ static void drv_write(struct drv_cmd *cmd)
put_cpu();
}
-static u32 get_cur_val(const struct cpumask *mask)
+static u32
+get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
{
struct acpi_processor_performance *perf;
struct drv_cmd cmd;
@@ -335,7 +339,7 @@ static u32 get_cur_val(const struct cpumask *mask)
if (unlikely(cpumask_empty(mask)))
return 0;
- switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
+ switch (data->cpu_feature) {
case SYSTEM_INTEL_MSR_CAPABLE:
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
@@ -346,7 +350,7 @@ static u32 get_cur_val(const struct cpumask *mask)
break;
case SYSTEM_IO_CAPABLE:
cmd.type = SYSTEM_IO_CAPABLE;
- perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
+ perf = to_perf_data(data);
cmd.addr.io.port = perf->control_register.address;
cmd.addr.io.bit_width = perf->control_register.bit_width;
break;
@@ -364,19 +368,24 @@ static u32 get_cur_val(const struct cpumask *mask)
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{
- struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
+ struct acpi_cpufreq_data *data;
+ struct cpufreq_policy *policy;
unsigned int freq;
unsigned int cached_freq;
pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
- if (unlikely(data == NULL ||
- data->acpi_data == NULL || data->freq_table == NULL)) {
+ policy = cpufreq_cpu_get(cpu);
+ if (unlikely(!policy))
return 0;
- }
- cached_freq = data->freq_table[data->acpi_data->state].frequency;
- freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
+ data = policy->driver_data;
+ cpufreq_cpu_put(policy);
+ if (unlikely(!data || !data->freq_table))
+ return 0;
+
+ cached_freq = data->freq_table[to_perf_data(data)->state].frequency;
+ freq = extract_freq(get_cur_val(cpumask_of(cpu), data), data);
if (freq != cached_freq) {
/*
* The dreaded BIOS frequency change behind our back.
@@ -397,7 +406,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
unsigned int i;
for (i = 0; i < 100; i++) {
- cur_freq = extract_freq(get_cur_val(mask), data);
+ cur_freq = extract_freq(get_cur_val(mask, data), data);
if (cur_freq == freq)
return 1;
udelay(10);
@@ -408,18 +417,17 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
static int acpi_cpufreq_target(struct cpufreq_policy *policy,
unsigned int index)
{
- struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
+ struct acpi_cpufreq_data *data = policy->driver_data;
struct acpi_processor_performance *perf;
struct drv_cmd cmd;
unsigned int next_perf_state = 0; /* Index into perf table */
int result = 0;
- if (unlikely(data == NULL ||
- data->acpi_data == NULL || data->freq_table == NULL)) {
+ if (unlikely(data == NULL || data->freq_table == NULL)) {
return -ENODEV;
}
- perf = data->acpi_data;
+ perf = to_perf_data(data);
next_perf_state = data->freq_table[index].driver_data;
if (perf->state == next_perf_state) {
if (unlikely(data->resume)) {
@@ -482,8 +490,9 @@ out:
static unsigned long
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
{
- struct acpi_processor_performance *perf = data->acpi_data;
+ struct acpi_processor_performance *perf;
+ perf = to_perf_data(data);
if (cpu_khz) {
/* search the closest match to cpu_khz */
unsigned int i;
@@ -672,17 +681,17 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_free;
}
- data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
- per_cpu(acfreq_data, cpu) = data;
+ perf = per_cpu_ptr(acpi_perf_data, cpu);
+ data->acpi_perf_cpu = cpu;
+ policy->driver_data = data;
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
- result = acpi_processor_register_performance(data->acpi_data, cpu);
+ result = acpi_processor_register_performance(perf, cpu);
if (result)
goto err_free_mask;
- perf = data->acpi_data;
policy->shared_type = perf->shared_type;
/*
@@ -838,26 +847,25 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
err_freqfree:
kfree(data->freq_table);
err_unreg:
- acpi_processor_unregister_performance(perf, cpu);
+ acpi_processor_unregister_performance(cpu);
err_free_mask:
free_cpumask_var(data->freqdomain_cpus);
err_free:
kfree(data);
- per_cpu(acfreq_data, cpu) = NULL;
+ policy->driver_data = NULL;
return result;
}
static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
- struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
+ struct acpi_cpufreq_data *data = policy->driver_data;
pr_debug("acpi_cpufreq_cpu_exit\n");
if (data) {
- per_cpu(acfreq_data, policy->cpu) = NULL;
- acpi_processor_unregister_performance(data->acpi_data,
- policy->cpu);
+ policy->driver_data = NULL;
+ acpi_processor_unregister_performance(data->acpi_perf_cpu);
free_cpumask_var(data->freqdomain_cpus);
kfree(data->freq_table);
kfree(data);
@@ -868,7 +876,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
{
- struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
+ struct acpi_cpufreq_data *data = policy->driver_data;
pr_debug("acpi_cpufreq_resume\n");
@@ -880,7 +888,9 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
static struct freq_attr *acpi_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
&freqdomain_cpus,
- NULL, /* this is a placeholder for cpb, do not remove */
+#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
+ &cpb,
+#endif
NULL,
};
@@ -953,17 +963,16 @@ static int __init acpi_cpufreq_init(void)
* only if configured. This is considered legacy code, which
* will probably be removed at some point in the future.
*/
- if (check_amd_hwpstate_cpu(0)) {
- struct freq_attr **iter;
-
- pr_debug("adding sysfs entry for cpb\n");
+ if (!check_amd_hwpstate_cpu(0)) {
+ struct freq_attr **attr;
- for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
- ;
+ pr_debug("CPB unsupported, do not expose it\n");
- /* make sure there is a terminator behind it */
- if (iter[1] == NULL)
- *iter = &cpb;
+ for (attr = acpi_cpufreq_attr; *attr; attr++)
+ if (*attr == &cpb) {
+ *attr = NULL;
+ break;
+ }
}
#endif
acpi_cpufreq_boost_init();
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 528a82bf5038..7c0d70e2a861 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -36,6 +36,12 @@ struct private_data {
unsigned int voltage_tolerance; /* in percentage */
};
+static struct freq_attr *cpufreq_dt_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL, /* Extra space for boost-attr if required */
+ NULL,
+};
+
static int set_target(struct cpufreq_policy *policy, unsigned int index)
{
struct dev_pm_opp *opp;
@@ -184,15 +190,16 @@ try_again:
static int cpufreq_init(struct cpufreq_policy *policy)
{
- struct cpufreq_dt_platform_data *pd;
struct cpufreq_frequency_table *freq_table;
struct device_node *np;
struct private_data *priv;
struct device *cpu_dev;
struct regulator *cpu_reg;
struct clk *cpu_clk;
+ struct dev_pm_opp *suspend_opp;
unsigned long min_uV = ~0, max_uV = 0;
unsigned int transition_latency;
+ bool need_update = false;
int ret;
ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
@@ -208,8 +215,30 @@ static int cpufreq_init(struct cpufreq_policy *policy)
goto out_put_reg_clk;
}
- /* OPPs might be populated at runtime, don't check for error here */
- of_init_opp_table(cpu_dev);
+ /* Get OPP-sharing information from "operating-points-v2" bindings */
+ ret = of_get_cpus_sharing_opps(cpu_dev, policy->cpus);
+ if (ret) {
+ /*
+ * operating-points-v2 not supported, fallback to old method of
+ * finding shared-OPPs for backward compatibility.
+ */
+ if (ret == -ENOENT)
+ need_update = true;
+ else
+ goto out_node_put;
+ }
+
+ /*
+ * Initialize OPP tables for all policy->cpus. They will be shared by
+ * all CPUs which have marked their CPUs shared with OPP bindings.
+ *
+ * For platforms not using operating-points-v2 bindings, we do this
+ * before updating policy->cpus. Otherwise, we will end up creating
+ * duplicate OPPs for policy->cpus.
+ *
+ * OPPs might be populated at runtime, don't check for error here
+ */
+ of_cpumask_init_opp_table(policy->cpus);
/*
* But we need OPP table to function so if it is not there let's
@@ -222,6 +251,26 @@ static int cpufreq_init(struct cpufreq_policy *policy)
goto out_free_opp;
}
+ if (need_update) {
+ struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
+
+ if (!pd || !pd->independent_clocks)
+ cpumask_setall(policy->cpus);
+
+ /*
+ * OPP tables are initialized only for policy->cpu, do it for
+ * others as well.
+ */
+ ret = set_cpus_sharing_opps(cpu_dev, policy->cpus);
+ if (ret)
+ dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
+ __func__, ret);
+
+ of_property_read_u32(np, "clock-latency", &transition_latency);
+ } else {
+ transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev);
+ }
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
@@ -230,7 +279,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
- if (of_property_read_u32(np, "clock-latency", &transition_latency))
+ if (!transition_latency)
transition_latency = CPUFREQ_ETERNAL;
if (!IS_ERR(cpu_reg)) {
@@ -255,7 +304,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
rcu_read_unlock();
tol_uV = opp_uV * priv->voltage_tolerance / 100;
- if (regulator_is_supported_voltage(cpu_reg, opp_uV,
+ if (regulator_is_supported_voltage(cpu_reg,
+ opp_uV - tol_uV,
opp_uV + tol_uV)) {
if (opp_uV < min_uV)
min_uV = opp_uV;
@@ -284,6 +334,13 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->driver_data = priv;
policy->clk = cpu_clk;
+
+ rcu_read_lock();
+ suspend_opp = dev_pm_opp_get_suspend_opp(cpu_dev);
+ if (suspend_opp)
+ policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000;
+ rcu_read_unlock();
+
ret = cpufreq_table_validate_and_show(policy, freq_table);
if (ret) {
dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
@@ -291,11 +348,16 @@ static int cpufreq_init(struct cpufreq_policy *policy)
goto out_free_cpufreq_table;
}
- policy->cpuinfo.transition_latency = transition_latency;
+ /* Support turbo/boost mode */
+ if (policy_has_boost_freq(policy)) {
+ /* This gets disabled by core on driver unregister */
+ ret = cpufreq_enable_boost_support();
+ if (ret)
+ goto out_free_cpufreq_table;
+ cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
+ }
- pd = cpufreq_get_driver_data();
- if (!pd || !pd->independent_clocks)
- cpumask_setall(policy->cpus);
+ policy->cpuinfo.transition_latency = transition_latency;
of_node_put(np);
@@ -306,7 +368,8 @@ out_free_cpufreq_table:
out_free_priv:
kfree(priv);
out_free_opp:
- of_free_opp_table(cpu_dev);
+ of_cpumask_free_opp_table(policy->cpus);
+out_node_put:
of_node_put(np);
out_put_reg_clk:
clk_put(cpu_clk);
@@ -322,7 +385,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
- of_free_opp_table(priv->cpu_dev);
+ of_cpumask_free_opp_table(policy->related_cpus);
clk_put(policy->clk);
if (!IS_ERR(priv->cpu_reg))
regulator_put(priv->cpu_reg);
@@ -367,7 +430,8 @@ static struct cpufreq_driver dt_cpufreq_driver = {
.exit = cpufreq_exit,
.ready = cpufreq_ready,
.name = "cpufreq-dt",
- .attr = cpufreq_generic_attr,
+ .attr = cpufreq_dt_attr,
+ .suspend = cpufreq_generic_suspend,
};
static int dt_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b612411655f9..6633b3fa996e 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -112,12 +112,6 @@ static inline bool has_target(void)
return cpufreq_driver->target_index || cpufreq_driver->target;
}
-/*
- * rwsem to guarantee that cpufreq driver module doesn't unload during critical
- * sections
- */
-static DECLARE_RWSEM(cpufreq_rwsem);
-
/* internal prototypes */
static int __cpufreq_governor(struct cpufreq_policy *policy,
unsigned int event);
@@ -169,6 +163,15 @@ struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
}
EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
+struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+
+ return policy && !policy_is_inactive(policy) ?
+ policy->freq_table : NULL;
+}
+EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
+
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
{
u64 idle_time;
@@ -236,7 +239,7 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
EXPORT_SYMBOL_GPL(cpufreq_generic_init);
/* Only for cpufreq core internal use */
-struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
+static struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
{
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
@@ -268,10 +271,6 @@ EXPORT_SYMBOL_GPL(cpufreq_generic_get);
* If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
* freed as that depends on the kobj count.
*
- * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
- * valid policy is found. This is done to make sure the driver doesn't get
- * unregistered while the policy is being used.
- *
* Return: A valid policy on success, otherwise NULL on failure.
*/
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
@@ -282,9 +281,6 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
if (WARN_ON(cpu >= nr_cpu_ids))
return NULL;
- if (!down_read_trylock(&cpufreq_rwsem))
- return NULL;
-
/* get the cpufreq driver */
read_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -297,9 +293,6 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- if (!policy)
- up_read(&cpufreq_rwsem);
-
return policy;
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
@@ -311,13 +304,10 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
*
* This decrements the kobject reference count incremented earlier by calling
* cpufreq_cpu_get().
- *
- * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
*/
void cpufreq_cpu_put(struct cpufreq_policy *policy)
{
kobject_put(&policy->kobj);
- up_read(&cpufreq_rwsem);
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
@@ -530,9 +520,6 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
{
int err = -EINVAL;
- if (!cpufreq_driver)
- goto out;
-
if (cpufreq_driver->setpolicy) {
if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
*policy = CPUFREQ_POLICY_PERFORMANCE;
@@ -567,7 +554,6 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
mutex_unlock(&cpufreq_governor_mutex);
}
-out:
return err;
}
@@ -616,9 +602,7 @@ static ssize_t store_##file_name \
int ret, temp; \
struct cpufreq_policy new_policy; \
\
- ret = cpufreq_get_policy(&new_policy, policy->cpu); \
- if (ret) \
- return -EINVAL; \
+ memcpy(&new_policy, policy, sizeof(*policy)); \
\
ret = sscanf(buf, "%u", &new_policy.object); \
if (ret != 1) \
@@ -672,9 +656,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
char str_governor[16];
struct cpufreq_policy new_policy;
- ret = cpufreq_get_policy(&new_policy, policy->cpu);
- if (ret)
- return ret;
+ memcpy(&new_policy, policy, sizeof(*policy));
ret = sscanf(buf, "%15s", str_governor);
if (ret != 1)
@@ -685,14 +667,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
return -EINVAL;
ret = cpufreq_set_policy(policy, &new_policy);
-
- policy->user_policy.policy = policy->policy;
- policy->user_policy.governor = policy->governor;
-
- if (ret)
- return ret;
- else
- return count;
+ return ret ? ret : count;
}
/**
@@ -842,9 +817,6 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
struct freq_attr *fattr = to_attr(attr);
ssize_t ret;
- if (!down_read_trylock(&cpufreq_rwsem))
- return -EINVAL;
-
down_read(&policy->rwsem);
if (fattr->show)
@@ -853,7 +825,6 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
ret = -EIO;
up_read(&policy->rwsem);
- up_read(&cpufreq_rwsem);
return ret;
}
@@ -870,9 +841,6 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
if (!cpu_online(policy->cpu))
goto unlock;
- if (!down_read_trylock(&cpufreq_rwsem))
- goto unlock;
-
down_write(&policy->rwsem);
/* Updating inactive policies is invalid, so avoid doing that. */
@@ -888,8 +856,6 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
unlock_policy_rwsem:
up_write(&policy->rwsem);
-
- up_read(&cpufreq_rwsem);
unlock:
put_online_cpus();
@@ -993,7 +959,7 @@ static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
int ret = 0;
/* Some related CPUs might not be present (physically hotplugged) */
- for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
+ for_each_cpu(j, policy->real_cpus) {
if (j == policy->kobj_cpu)
continue;
@@ -1010,7 +976,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
unsigned int j;
/* Some related CPUs might not be present (physically hotplugged) */
- for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
+ for_each_cpu(j, policy->real_cpus) {
if (j == policy->kobj_cpu)
continue;
@@ -1018,8 +984,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
}
}
-static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
- struct device *dev)
+static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
{
struct freq_attr **drv_attr;
int ret = 0;
@@ -1051,11 +1016,10 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
return cpufreq_add_dev_symlink(policy);
}
-static void cpufreq_init_policy(struct cpufreq_policy *policy)
+static int cpufreq_init_policy(struct cpufreq_policy *policy)
{
struct cpufreq_governor *gov = NULL;
struct cpufreq_policy new_policy;
- int ret = 0;
memcpy(&new_policy, policy, sizeof(*policy));
@@ -1074,16 +1038,10 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
/* set default policy */
- ret = cpufreq_set_policy(policy, &new_policy);
- if (ret) {
- pr_debug("setting policy failed\n");
- if (cpufreq_driver->exit)
- cpufreq_driver->exit(policy);
- }
+ return cpufreq_set_policy(policy, &new_policy);
}
-static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
- unsigned int cpu, struct device *dev)
+static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
{
int ret = 0;
@@ -1117,32 +1075,15 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
return 0;
}
-static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
-{
- struct cpufreq_policy *policy;
- unsigned long flags;
-
- read_lock_irqsave(&cpufreq_driver_lock, flags);
- policy = per_cpu(cpufreq_cpu_data, cpu);
- read_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
- if (likely(policy)) {
- /* Policy should be inactive here */
- WARN_ON(!policy_is_inactive(policy));
-
- down_write(&policy->rwsem);
- policy->cpu = cpu;
- up_write(&policy->rwsem);
- }
-
- return policy;
-}
-
-static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
+static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
{
+ struct device *dev = get_cpu_device(cpu);
struct cpufreq_policy *policy;
int ret;
+ if (WARN_ON(!dev))
+ return NULL;
+
policy = kzalloc(sizeof(*policy), GFP_KERNEL);
if (!policy)
return NULL;
@@ -1153,11 +1094,14 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
goto err_free_cpumask;
+ if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
+ goto err_free_rcpumask;
+
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
"cpufreq");
if (ret) {
pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
- goto err_free_rcpumask;
+ goto err_free_real_cpus;
}
INIT_LIST_HEAD(&policy->policy_list);
@@ -1167,13 +1111,15 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update);
- policy->cpu = dev->id;
+ policy->cpu = cpu;
/* Set this once on allocation */
- policy->kobj_cpu = dev->id;
+ policy->kobj_cpu = cpu;
return policy;
+err_free_real_cpus:
+ free_cpumask_var(policy->real_cpus);
err_free_rcpumask:
free_cpumask_var(policy->related_cpus);
err_free_cpumask:
@@ -1224,61 +1170,40 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
cpufreq_policy_put_kobj(policy, notify);
+ free_cpumask_var(policy->real_cpus);
free_cpumask_var(policy->related_cpus);
free_cpumask_var(policy->cpus);
kfree(policy);
}
-/**
- * cpufreq_add_dev - add a CPU device
- *
- * Adds the cpufreq interface for a CPU device.
- *
- * The Oracle says: try running cpufreq registration/unregistration concurrently
- * with with cpu hotplugging and all hell will break loose. Tried to clean this
- * mess up, but more thorough testing is needed. - Mathieu
- */
-static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
+static int cpufreq_online(unsigned int cpu)
{
- unsigned int j, cpu = dev->id;
- int ret = -ENOMEM;
struct cpufreq_policy *policy;
+ bool new_policy;
unsigned long flags;
- bool recover_policy = !sif;
-
- pr_debug("adding CPU %u\n", cpu);
-
- /*
- * Only possible if 'cpu' wasn't physically present earlier and we are
- * here from subsys_interface add callback. A hotplug notifier will
- * follow and we will handle it like logical CPU hotplug then. For now,
- * just create the sysfs link.
- */
- if (cpu_is_offline(cpu))
- return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu);
+ unsigned int j;
+ int ret;
- if (!down_read_trylock(&cpufreq_rwsem))
- return 0;
+ pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
/* Check if this CPU already has a policy to manage it */
policy = per_cpu(cpufreq_cpu_data, cpu);
- if (policy && !policy_is_inactive(policy)) {
+ if (policy) {
WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
- ret = cpufreq_add_policy_cpu(policy, cpu, dev);
- up_read(&cpufreq_rwsem);
- return ret;
- }
+ if (!policy_is_inactive(policy))
+ return cpufreq_add_policy_cpu(policy, cpu);
- /*
- * Restore the saved policy when doing light-weight init and fall back
- * to the full init if that fails.
- */
- policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
- if (!policy) {
- recover_policy = false;
- policy = cpufreq_policy_alloc(dev);
+ /* This is the only online CPU for the policy. Start over. */
+ new_policy = false;
+ down_write(&policy->rwsem);
+ policy->cpu = cpu;
+ policy->governor = NULL;
+ up_write(&policy->rwsem);
+ } else {
+ new_policy = true;
+ policy = cpufreq_policy_alloc(cpu);
if (!policy)
- goto nomem_out;
+ return -ENOMEM;
}
cpumask_copy(policy->cpus, cpumask_of(cpu));
@@ -1289,13 +1214,17 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
ret = cpufreq_driver->init(policy);
if (ret) {
pr_debug("initialization failed\n");
- goto err_set_policy_cpu;
+ goto out_free_policy;
}
down_write(&policy->rwsem);
- /* related cpus should atleast have policy->cpus */
- cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
+ if (new_policy) {
+ /* related_cpus should at least include policy->cpus. */
+ cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
+ /* Remember CPUs present at the policy creation time. */
+ cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
+ }
/*
* affected cpus must always be the one, which are online. We aren't
@@ -1303,7 +1232,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
*/
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
- if (!recover_policy) {
+ if (new_policy) {
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
@@ -1317,7 +1246,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
policy->cur = cpufreq_driver->get(policy->cpu);
if (!policy->cur) {
pr_err("%s: ->get() failed\n", __func__);
- goto err_get_freq;
+ goto out_exit_policy;
}
}
@@ -1364,10 +1293,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
- if (!recover_policy) {
- ret = cpufreq_add_dev_interface(policy, dev);
+ if (new_policy) {
+ ret = cpufreq_add_dev_interface(policy);
if (ret)
- goto err_out_unregister;
+ goto out_exit_policy;
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_CREATE_POLICY, policy);
@@ -1376,18 +1305,19 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
}
- cpufreq_init_policy(policy);
-
- if (!recover_policy) {
- policy->user_policy.policy = policy->policy;
- policy->user_policy.governor = policy->governor;
+ ret = cpufreq_init_policy(policy);
+ if (ret) {
+ pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
+ __func__, cpu, ret);
+ /* cpufreq_policy_free() will notify based on this */
+ new_policy = false;
+ goto out_exit_policy;
}
+
up_write(&policy->rwsem);
kobject_uevent(&policy->kobj, KOBJ_ADD);
- up_read(&cpufreq_rwsem);
-
/* Callback for handling stuff after policy is ready */
if (cpufreq_driver->ready)
cpufreq_driver->ready(policy);
@@ -1396,25 +1326,47 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
return 0;
-err_out_unregister:
-err_get_freq:
+out_exit_policy:
up_write(&policy->rwsem);
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
-err_set_policy_cpu:
- cpufreq_policy_free(policy, recover_policy);
-nomem_out:
- up_read(&cpufreq_rwsem);
+out_free_policy:
+ cpufreq_policy_free(policy, !new_policy);
+ return ret;
+}
+
+/**
+ * cpufreq_add_dev - the cpufreq interface for a CPU device.
+ * @dev: CPU device.
+ * @sif: Subsystem interface structure pointer (not used)
+ */
+static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
+{
+ unsigned cpu = dev->id;
+ int ret;
+
+ dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
+
+ if (cpu_online(cpu)) {
+ ret = cpufreq_online(cpu);
+ } else {
+ /*
+ * A hotplug notifier will follow and we will handle it as CPU
+ * online then. For now, just create the sysfs link, unless
+ * there is no policy or the link is already present.
+ */
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+
+ ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
+ ? add_cpu_dev_symlink(policy, cpu) : 0;
+ }
return ret;
}
-static int __cpufreq_remove_dev_prepare(struct device *dev,
- struct subsys_interface *sif)
+static void cpufreq_offline_prepare(unsigned int cpu)
{
- unsigned int cpu = dev->id;
- int ret = 0;
struct cpufreq_policy *policy;
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
@@ -1422,15 +1374,13 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
policy = cpufreq_cpu_get_raw(cpu);
if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__);
- return -EINVAL;
+ return;
}
if (has_target()) {
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
- if (ret) {
+ int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ if (ret)
pr_err("%s: Failed to stop governor\n", __func__);
- return ret;
- }
}
down_write(&policy->rwsem);
@@ -1449,7 +1399,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
/* Start governor again for active policy */
if (!policy_is_inactive(policy)) {
if (has_target()) {
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
if (!ret)
ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
@@ -1459,33 +1409,26 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
} else if (cpufreq_driver->stop_cpu) {
cpufreq_driver->stop_cpu(policy);
}
-
- return ret;
}
-static int __cpufreq_remove_dev_finish(struct device *dev,
- struct subsys_interface *sif)
+static void cpufreq_offline_finish(unsigned int cpu)
{
- unsigned int cpu = dev->id;
- int ret;
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__);
- return -EINVAL;
+ return;
}
/* Only proceed for inactive policies */
if (!policy_is_inactive(policy))
- return 0;
+ return;
/* If cpu is last user of policy, free policy */
if (has_target()) {
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
- if (ret) {
+ int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+ if (ret)
pr_err("%s: Failed to exit governor\n", __func__);
- return ret;
- }
}
/*
@@ -1495,12 +1438,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
*/
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
-
- /* Free the policy only if the driver is getting removed. */
- if (sif)
- cpufreq_policy_free(policy, true);
-
- return 0;
}
/**
@@ -1508,45 +1445,42 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
*
* Removes the cpufreq interface for a CPU device.
*/
-static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
+static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
unsigned int cpu = dev->id;
- int ret;
-
- /*
- * Only possible if 'cpu' is getting physically removed now. A hotplug
- * notifier should have already been called and we just need to remove
- * link or free policy here.
- */
- if (cpu_is_offline(cpu)) {
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
- struct cpumask mask;
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
- if (!policy)
- return 0;
+ if (!policy)
+ return;
- cpumask_copy(&mask, policy->related_cpus);
- cpumask_clear_cpu(cpu, &mask);
+ if (cpu_online(cpu)) {
+ cpufreq_offline_prepare(cpu);
+ cpufreq_offline_finish(cpu);
+ }
- /*
- * Free policy only if all policy->related_cpus are removed
- * physically.
- */
- if (cpumask_intersects(&mask, cpu_present_mask)) {
- remove_cpu_dev_symlink(policy, cpu);
- return 0;
- }
+ cpumask_clear_cpu(cpu, policy->real_cpus);
+ if (cpumask_empty(policy->real_cpus)) {
cpufreq_policy_free(policy, true);
- return 0;
+ return;
}
- ret = __cpufreq_remove_dev_prepare(dev, sif);
+ if (cpu != policy->kobj_cpu) {
+ remove_cpu_dev_symlink(policy, cpu);
+ } else {
+ /*
+ * The CPU owning the policy object is going away. Move it to
+ * another suitable CPU.
+ */
+ unsigned int new_cpu = cpumask_first(policy->real_cpus);
+ struct device *new_dev = get_cpu_device(new_cpu);
- if (!ret)
- ret = __cpufreq_remove_dev_finish(dev, sif);
+ dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu);
- return ret;
+ sysfs_remove_link(&new_dev->kobj, "cpufreq");
+ policy->kobj_cpu = new_cpu;
+ WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj));
+ }
}
static void handle_update(struct work_struct *work)
@@ -1692,8 +1626,8 @@ int cpufreq_generic_suspend(struct cpufreq_policy *policy)
int ret;
if (!policy->suspend_freq) {
- pr_err("%s: suspend_freq can't be zero\n", __func__);
- return -EINVAL;
+ pr_debug("%s: suspend_freq not defined\n", __func__);
+ return 0;
}
pr_debug("%s: Setting suspend-freq: %u\n", __func__,
@@ -2097,8 +2031,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
if (!try_module_get(policy->governor->owner))
return -EINVAL;
- pr_debug("__cpufreq_governor for CPU %u, event %u\n",
- policy->cpu, event);
+ pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
mutex_lock(&cpufreq_governor_lock);
if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
@@ -2235,7 +2168,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
- if (new_policy->min > policy->max || new_policy->max < policy->min)
+ /*
+ * This check works well when we store new min/max freq attributes,
+ * because new_policy is a copy of policy with one field updated.
+ */
+ if (new_policy->min > new_policy->max)
return -EINVAL;
/* verify the cpu speed can be set within this limit */
@@ -2247,10 +2184,6 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_ADJUST, new_policy);
- /* adjust if necessary - hardware incompatibility*/
- blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
- CPUFREQ_INCOMPATIBLE, new_policy);
-
/*
* verify the cpu speed can be set within this limit, which might be
* different to the first one
@@ -2284,16 +2217,31 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
old_gov = policy->governor;
/* end old governor */
if (old_gov) {
- __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ if (ret) {
+ /* This can happen due to race with other operations */
+ pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
+ __func__, old_gov->name, ret);
+ return ret;
+ }
+
up_write(&policy->rwsem);
- __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
down_write(&policy->rwsem);
+
+ if (ret) {
+ pr_err("%s: Failed to Exit Governor: %s (%d)\n",
+ __func__, old_gov->name, ret);
+ return ret;
+ }
}
/* start new governor */
policy->governor = new_policy->governor;
- if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
- if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
+ if (!ret) {
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ if (!ret)
goto out;
up_write(&policy->rwsem);
@@ -2305,11 +2253,13 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
pr_debug("starting governor %s failed\n", policy->governor->name);
if (old_gov) {
policy->governor = old_gov;
- __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
- __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
+ policy->governor = NULL;
+ else
+ __cpufreq_governor(policy, CPUFREQ_GOV_START);
}
- return -EINVAL;
+ return ret;
out:
pr_debug("governor: change or update limits\n");
@@ -2338,8 +2288,6 @@ int cpufreq_update_policy(unsigned int cpu)
memcpy(&new_policy, policy, sizeof(*policy));
new_policy.min = policy->user_policy.min;
new_policy.max = policy->user_policy.max;
- new_policy.policy = policy->user_policy.policy;
- new_policy.governor = policy->user_policy.governor;
/*
* BIOS might change freq behind our back
@@ -2375,27 +2323,23 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
- struct device *dev;
- dev = get_cpu_device(cpu);
- if (dev) {
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- cpufreq_add_dev(dev, NULL);
- break;
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ cpufreq_online(cpu);
+ break;
- case CPU_DOWN_PREPARE:
- __cpufreq_remove_dev_prepare(dev, NULL);
- break;
+ case CPU_DOWN_PREPARE:
+ cpufreq_offline_prepare(cpu);
+ break;
- case CPU_POST_DEAD:
- __cpufreq_remove_dev_finish(dev, NULL);
- break;
+ case CPU_POST_DEAD:
+ cpufreq_offline_finish(cpu);
+ break;
- case CPU_DOWN_FAILED:
- cpufreq_add_dev(dev, NULL);
- break;
- }
+ case CPU_DOWN_FAILED:
+ cpufreq_online(cpu);
+ break;
}
return NOTIFY_OK;
}
@@ -2465,6 +2409,49 @@ int cpufreq_boost_supported(void)
}
EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
+static int create_boost_sysfs_file(void)
+{
+ int ret;
+
+ if (!cpufreq_boost_supported())
+ return 0;
+
+ /*
+ * Check if driver provides function to enable boost -
+ * if not, use cpufreq_boost_set_sw as default
+ */
+ if (!cpufreq_driver->set_boost)
+ cpufreq_driver->set_boost = cpufreq_boost_set_sw;
+
+ ret = cpufreq_sysfs_create_file(&boost.attr);
+ if (ret)
+ pr_err("%s: cannot register global BOOST sysfs file\n",
+ __func__);
+
+ return ret;
+}
+
+static void remove_boost_sysfs_file(void)
+{
+ if (cpufreq_boost_supported())
+ cpufreq_sysfs_remove_file(&boost.attr);
+}
+
+int cpufreq_enable_boost_support(void)
+{
+ if (!cpufreq_driver)
+ return -EINVAL;
+
+ if (cpufreq_boost_supported())
+ return 0;
+
+ cpufreq_driver->boost_supported = true;
+
+ /* This will get removed on driver unregister */
+ return create_boost_sysfs_file();
+}
+EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
+
int cpufreq_boost_enabled(void)
{
return cpufreq_driver->boost_enabled;
@@ -2503,10 +2490,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
pr_debug("trying to register driver %s\n", driver_data->name);
+ /* Protect against concurrent CPU online/offline. */
+ get_online_cpus();
+
write_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver) {
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return -EEXIST;
+ ret = -EEXIST;
+ goto out;
}
cpufreq_driver = driver_data;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -2514,21 +2505,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (driver_data->setpolicy)
driver_data->flags |= CPUFREQ_CONST_LOOPS;
- if (cpufreq_boost_supported()) {
- /*
- * Check if driver provides function to enable boost -
- * if not, use cpufreq_boost_set_sw as default
- */
- if (!cpufreq_driver->set_boost)
- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
-
- ret = cpufreq_sysfs_create_file(&boost.attr);
- if (ret) {
- pr_err("%s: cannot register global BOOST sysfs file\n",
- __func__);
- goto err_null_driver;
- }
- }
+ ret = create_boost_sysfs_file();
+ if (ret)
+ goto err_null_driver;
ret = subsys_interface_register(&cpufreq_interface);
if (ret)
@@ -2545,17 +2524,19 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
register_hotcpu_notifier(&cpufreq_cpu_notifier);
pr_debug("driver %s up and running\n", driver_data->name);
- return 0;
+out:
+ put_online_cpus();
+ return ret;
+
err_if_unreg:
subsys_interface_unregister(&cpufreq_interface);
err_boost_unreg:
- if (cpufreq_boost_supported())
- cpufreq_sysfs_remove_file(&boost.attr);
+ remove_boost_sysfs_file();
err_null_driver:
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return ret;
+ goto out;
}
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
@@ -2576,19 +2557,18 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
pr_debug("unregistering driver %s\n", driver->name);
+ /* Protect against concurrent cpu hotplug */
+ get_online_cpus();
subsys_interface_unregister(&cpufreq_interface);
- if (cpufreq_boost_supported())
- cpufreq_sysfs_remove_file(&boost.attr);
-
+ remove_boost_sysfs_file();
unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
- down_write(&cpufreq_rwsem);
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- up_write(&cpufreq_rwsem);
+ put_online_cpus();
return 0;
}
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index c86a10c30912..84a1506950a7 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -47,7 +47,7 @@ static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
static void cs_check_cpu(int cpu, unsigned int load)
{
struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
- struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
+ struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
struct dbs_data *dbs_data = policy->governor_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
@@ -102,26 +102,15 @@ static void cs_check_cpu(int cpu, unsigned int load)
}
}
-static void cs_dbs_timer(struct work_struct *work)
+static unsigned int cs_dbs_timer(struct cpu_dbs_info *cdbs,
+ struct dbs_data *dbs_data, bool modify_all)
{
- struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
- struct cs_cpu_dbs_info_s, cdbs.work.work);
- unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
- struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
- cpu);
- struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
- int delay = delay_for_sampling_rate(cs_tuners->sampling_rate);
- bool modify_all = true;
- mutex_lock(&core_dbs_info->cdbs.timer_mutex);
- if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate))
- modify_all = false;
- else
- dbs_check_cpu(dbs_data, cpu);
+ if (modify_all)
+ dbs_check_cpu(dbs_data, cdbs->shared->policy->cpu);
- gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
- mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
+ return delay_for_sampling_rate(cs_tuners->sampling_rate);
}
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
@@ -135,7 +124,7 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
if (!dbs_info->enable)
return 0;
- policy = dbs_info->cdbs.cur_policy;
+ policy = dbs_info->cdbs.shared->policy;
/*
* we only care if our internally tracked freq moves outside the 'valid'
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 57a39f8a92b7..939197ffa4ac 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -32,10 +32,10 @@ static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
{
- struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
+ struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
- struct cpufreq_policy *policy;
+ struct cpufreq_policy *policy = cdbs->shared->policy;
unsigned int sampling_rate;
unsigned int max_load = 0;
unsigned int ignore_nice;
@@ -60,11 +60,9 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
ignore_nice = cs_tuners->ignore_nice_load;
}
- policy = cdbs->cur_policy;
-
/* Get Absolute Load */
for_each_cpu(j, policy->cpus) {
- struct cpu_dbs_common_info *j_cdbs;
+ struct cpu_dbs_info *j_cdbs;
u64 cur_wall_time, cur_idle_time;
unsigned int idle_time, wall_time;
unsigned int load;
@@ -163,9 +161,9 @@ EXPORT_SYMBOL_GPL(dbs_check_cpu);
static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
unsigned int delay)
{
- struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
+ struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
- mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
+ mod_delayed_work_on(cpu, system_wq, &cdbs->dwork, delay);
}
void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
@@ -199,33 +197,63 @@ EXPORT_SYMBOL_GPL(gov_queue_work);
static inline void gov_cancel_work(struct dbs_data *dbs_data,
struct cpufreq_policy *policy)
{
- struct cpu_dbs_common_info *cdbs;
+ struct cpu_dbs_info *cdbs;
int i;
for_each_cpu(i, policy->cpus) {
cdbs = dbs_data->cdata->get_cpu_cdbs(i);
- cancel_delayed_work_sync(&cdbs->work);
+ cancel_delayed_work_sync(&cdbs->dwork);
}
}
/* Will return if we need to evaluate cpu load again or not */
-bool need_load_eval(struct cpu_dbs_common_info *cdbs,
- unsigned int sampling_rate)
+static bool need_load_eval(struct cpu_common_dbs_info *shared,
+ unsigned int sampling_rate)
{
- if (policy_is_shared(cdbs->cur_policy)) {
+ if (policy_is_shared(shared->policy)) {
ktime_t time_now = ktime_get();
- s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
+ s64 delta_us = ktime_us_delta(time_now, shared->time_stamp);
/* Do nothing if we recently have sampled */
if (delta_us < (s64)(sampling_rate / 2))
return false;
else
- cdbs->time_stamp = time_now;
+ shared->time_stamp = time_now;
}
return true;
}
-EXPORT_SYMBOL_GPL(need_load_eval);
+
+static void dbs_timer(struct work_struct *work)
+{
+ struct cpu_dbs_info *cdbs = container_of(work, struct cpu_dbs_info,
+ dwork.work);
+ struct cpu_common_dbs_info *shared = cdbs->shared;
+ struct cpufreq_policy *policy = shared->policy;
+ struct dbs_data *dbs_data = policy->governor_data;
+ unsigned int sampling_rate, delay;
+ bool modify_all = true;
+
+ mutex_lock(&shared->timer_mutex);
+
+ if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+
+ sampling_rate = cs_tuners->sampling_rate;
+ } else {
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+
+ sampling_rate = od_tuners->sampling_rate;
+ }
+
+ if (!need_load_eval(cdbs->shared, sampling_rate))
+ modify_all = false;
+
+ delay = dbs_data->cdata->gov_dbs_timer(cdbs, dbs_data, modify_all);
+ gov_queue_work(dbs_data, policy, delay, modify_all);
+
+ mutex_unlock(&shared->timer_mutex);
+}
static void set_sampling_rate(struct dbs_data *dbs_data,
unsigned int sampling_rate)
@@ -239,6 +267,37 @@ static void set_sampling_rate(struct dbs_data *dbs_data,
}
}
+static int alloc_common_dbs_info(struct cpufreq_policy *policy,
+ struct common_dbs_data *cdata)
+{
+ struct cpu_common_dbs_info *shared;
+ int j;
+
+ /* Allocate memory for the common information for policy->cpus */
+ shared = kzalloc(sizeof(*shared), GFP_KERNEL);
+ if (!shared)
+ return -ENOMEM;
+
+ /* Set shared for all CPUs, online+offline */
+ for_each_cpu(j, policy->related_cpus)
+ cdata->get_cpu_cdbs(j)->shared = shared;
+
+ return 0;
+}
+
+static void free_common_dbs_info(struct cpufreq_policy *policy,
+ struct common_dbs_data *cdata)
+{
+ struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
+ struct cpu_common_dbs_info *shared = cdbs->shared;
+ int j;
+
+ for_each_cpu(j, policy->cpus)
+ cdata->get_cpu_cdbs(j)->shared = NULL;
+
+ kfree(shared);
+}
+
static int cpufreq_governor_init(struct cpufreq_policy *policy,
struct dbs_data *dbs_data,
struct common_dbs_data *cdata)
@@ -246,9 +305,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
unsigned int latency;
int ret;
+ /* State should be equivalent to EXIT */
+ if (policy->governor_data)
+ return -EBUSY;
+
if (dbs_data) {
if (WARN_ON(have_governor_per_policy()))
return -EINVAL;
+
+ ret = alloc_common_dbs_info(policy, cdata);
+ if (ret)
+ return ret;
+
dbs_data->usage_count++;
policy->governor_data = dbs_data;
return 0;
@@ -258,12 +326,16 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
if (!dbs_data)
return -ENOMEM;
+ ret = alloc_common_dbs_info(policy, cdata);
+ if (ret)
+ goto free_dbs_data;
+
dbs_data->cdata = cdata;
dbs_data->usage_count = 1;
ret = cdata->init(dbs_data, !policy->governor->initialized);
if (ret)
- goto free_dbs_data;
+ goto free_common_dbs_info;
/* policy latency is in ns. Convert it to us first */
latency = policy->cpuinfo.transition_latency / 1000;
@@ -300,15 +372,22 @@ put_kobj:
}
cdata_exit:
cdata->exit(dbs_data, !policy->governor->initialized);
+free_common_dbs_info:
+ free_common_dbs_info(policy, cdata);
free_dbs_data:
kfree(dbs_data);
return ret;
}
-static void cpufreq_governor_exit(struct cpufreq_policy *policy,
- struct dbs_data *dbs_data)
+static int cpufreq_governor_exit(struct cpufreq_policy *policy,
+ struct dbs_data *dbs_data)
{
struct common_dbs_data *cdata = dbs_data->cdata;
+ struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
+
+ /* State should be equivalent to INIT */
+ if (!cdbs->shared || cdbs->shared->policy)
+ return -EBUSY;
policy->governor_data = NULL;
if (!--dbs_data->usage_count) {
@@ -323,6 +402,9 @@ static void cpufreq_governor_exit(struct cpufreq_policy *policy,
cdata->exit(dbs_data, policy->governor->initialized == 1);
kfree(dbs_data);
}
+
+ free_common_dbs_info(policy, cdata);
+ return 0;
}
static int cpufreq_governor_start(struct cpufreq_policy *policy,
@@ -330,12 +412,17 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
{
struct common_dbs_data *cdata = dbs_data->cdata;
unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
- struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
+ struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
+ struct cpu_common_dbs_info *shared = cdbs->shared;
int io_busy = 0;
if (!policy->cur)
return -EINVAL;
+ /* State should be equivalent to INIT */
+ if (!shared || shared->policy)
+ return -EBUSY;
+
if (cdata->governor == GOV_CONSERVATIVE) {
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
@@ -349,12 +436,14 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
io_busy = od_tuners->io_is_busy;
}
+ shared->policy = policy;
+ shared->time_stamp = ktime_get();
+ mutex_init(&shared->timer_mutex);
+
for_each_cpu(j, policy->cpus) {
- struct cpu_dbs_common_info *j_cdbs = cdata->get_cpu_cdbs(j);
+ struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j);
unsigned int prev_load;
- j_cdbs->cpu = j;
- j_cdbs->cur_policy = policy;
j_cdbs->prev_cpu_idle =
get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
@@ -366,8 +455,7 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
if (ignore_nice)
j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- mutex_init(&j_cdbs->timer_mutex);
- INIT_DEFERRABLE_WORK(&j_cdbs->work, cdata->gov_dbs_timer);
+ INIT_DEFERRABLE_WORK(&j_cdbs->dwork, dbs_timer);
}
if (cdata->governor == GOV_CONSERVATIVE) {
@@ -386,20 +474,24 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
od_ops->powersave_bias_init_cpu(cpu);
}
- /* Initiate timer time stamp */
- cpu_cdbs->time_stamp = ktime_get();
-
gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate),
true);
return 0;
}
-static void cpufreq_governor_stop(struct cpufreq_policy *policy,
- struct dbs_data *dbs_data)
+static int cpufreq_governor_stop(struct cpufreq_policy *policy,
+ struct dbs_data *dbs_data)
{
struct common_dbs_data *cdata = dbs_data->cdata;
unsigned int cpu = policy->cpu;
- struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
+ struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
+ struct cpu_common_dbs_info *shared = cdbs->shared;
+
+ /* State should be equivalent to START */
+ if (!shared || !shared->policy)
+ return -EBUSY;
+
+ gov_cancel_work(dbs_data, policy);
if (cdata->governor == GOV_CONSERVATIVE) {
struct cs_cpu_dbs_info_s *cs_dbs_info =
@@ -408,38 +500,40 @@ static void cpufreq_governor_stop(struct cpufreq_policy *policy,
cs_dbs_info->enable = 0;
}
- gov_cancel_work(dbs_data, policy);
-
- mutex_destroy(&cpu_cdbs->timer_mutex);
- cpu_cdbs->cur_policy = NULL;
+ shared->policy = NULL;
+ mutex_destroy(&shared->timer_mutex);
+ return 0;
}
-static void cpufreq_governor_limits(struct cpufreq_policy *policy,
- struct dbs_data *dbs_data)
+static int cpufreq_governor_limits(struct cpufreq_policy *policy,
+ struct dbs_data *dbs_data)
{
struct common_dbs_data *cdata = dbs_data->cdata;
unsigned int cpu = policy->cpu;
- struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
+ struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
- if (!cpu_cdbs->cur_policy)
- return;
+ /* State should be equivalent to START */
+ if (!cdbs->shared || !cdbs->shared->policy)
+ return -EBUSY;
- mutex_lock(&cpu_cdbs->timer_mutex);
- if (policy->max < cpu_cdbs->cur_policy->cur)
- __cpufreq_driver_target(cpu_cdbs->cur_policy, policy->max,
+ mutex_lock(&cdbs->shared->timer_mutex);
+ if (policy->max < cdbs->shared->policy->cur)
+ __cpufreq_driver_target(cdbs->shared->policy, policy->max,
CPUFREQ_RELATION_H);
- else if (policy->min > cpu_cdbs->cur_policy->cur)
- __cpufreq_driver_target(cpu_cdbs->cur_policy, policy->min,
+ else if (policy->min > cdbs->shared->policy->cur)
+ __cpufreq_driver_target(cdbs->shared->policy, policy->min,
CPUFREQ_RELATION_L);
dbs_check_cpu(dbs_data, cpu);
- mutex_unlock(&cpu_cdbs->timer_mutex);
+ mutex_unlock(&cdbs->shared->timer_mutex);
+
+ return 0;
}
int cpufreq_governor_dbs(struct cpufreq_policy *policy,
struct common_dbs_data *cdata, unsigned int event)
{
struct dbs_data *dbs_data;
- int ret = 0;
+ int ret;
/* Lock governor to block concurrent initialization of governor */
mutex_lock(&cdata->mutex);
@@ -449,7 +543,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
else
dbs_data = cdata->gdbs_data;
- if (WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT))) {
+ if (!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)) {
ret = -EINVAL;
goto unlock;
}
@@ -459,17 +553,19 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
ret = cpufreq_governor_init(policy, dbs_data, cdata);
break;
case CPUFREQ_GOV_POLICY_EXIT:
- cpufreq_governor_exit(policy, dbs_data);
+ ret = cpufreq_governor_exit(policy, dbs_data);
break;
case CPUFREQ_GOV_START:
ret = cpufreq_governor_start(policy, dbs_data);
break;
case CPUFREQ_GOV_STOP:
- cpufreq_governor_stop(policy, dbs_data);
+ ret = cpufreq_governor_stop(policy, dbs_data);
break;
case CPUFREQ_GOV_LIMITS:
- cpufreq_governor_limits(policy, dbs_data);
+ ret = cpufreq_governor_limits(policy, dbs_data);
break;
+ default:
+ ret = -EINVAL;
}
unlock:
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 34736f5e869d..50f171796632 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -109,7 +109,7 @@ store_one(_gov, file_name)
/* create helper routines */
#define define_get_cpu_dbs_routines(_dbs_info) \
-static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu) \
+static struct cpu_dbs_info *get_cpu_cdbs(int cpu) \
{ \
return &per_cpu(_dbs_info, cpu).cdbs; \
} \
@@ -128,9 +128,20 @@ static void *get_cpu_dbs_info_s(int cpu) \
* cs_*: Conservative governor
*/
+/* Common to all CPUs of a policy */
+struct cpu_common_dbs_info {
+ struct cpufreq_policy *policy;
+ /*
+ * percpu mutex that serializes governor limit change with dbs_timer
+ * invocation. We do not want dbs_timer to run when user is changing
+ * the governor or limits.
+ */
+ struct mutex timer_mutex;
+ ktime_t time_stamp;
+};
+
/* Per cpu structures */
-struct cpu_dbs_common_info {
- int cpu;
+struct cpu_dbs_info {
u64 prev_cpu_idle;
u64 prev_cpu_wall;
u64 prev_cpu_nice;
@@ -141,19 +152,12 @@ struct cpu_dbs_common_info {
* wake-up from idle.
*/
unsigned int prev_load;
- struct cpufreq_policy *cur_policy;
- struct delayed_work work;
- /*
- * percpu mutex that serializes governor limit change with gov_dbs_timer
- * invocation. We do not want gov_dbs_timer to run when user is changing
- * the governor or limits.
- */
- struct mutex timer_mutex;
- ktime_t time_stamp;
+ struct delayed_work dwork;
+ struct cpu_common_dbs_info *shared;
};
struct od_cpu_dbs_info_s {
- struct cpu_dbs_common_info cdbs;
+ struct cpu_dbs_info cdbs;
struct cpufreq_frequency_table *freq_table;
unsigned int freq_lo;
unsigned int freq_lo_jiffies;
@@ -163,7 +167,7 @@ struct od_cpu_dbs_info_s {
};
struct cs_cpu_dbs_info_s {
- struct cpu_dbs_common_info cdbs;
+ struct cpu_dbs_info cdbs;
unsigned int down_skip;
unsigned int requested_freq;
unsigned int enable:1;
@@ -204,9 +208,11 @@ struct common_dbs_data {
*/
struct dbs_data *gdbs_data;
- struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu);
+ struct cpu_dbs_info *(*get_cpu_cdbs)(int cpu);
void *(*get_cpu_dbs_info_s)(int cpu);
- void (*gov_dbs_timer)(struct work_struct *work);
+ unsigned int (*gov_dbs_timer)(struct cpu_dbs_info *cdbs,
+ struct dbs_data *dbs_data,
+ bool modify_all);
void (*gov_check_cpu)(int cpu, unsigned int load);
int (*init)(struct dbs_data *dbs_data, bool notify);
void (*exit)(struct dbs_data *dbs_data, bool notify);
@@ -265,8 +271,6 @@ static ssize_t show_sampling_rate_min_gov_pol \
extern struct mutex cpufreq_governor_lock;
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
-bool need_load_eval(struct cpu_dbs_common_info *cdbs,
- unsigned int sampling_rate);
int cpufreq_governor_dbs(struct cpufreq_policy *policy,
struct common_dbs_data *cdata, unsigned int event);
void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 3c1e10f2304c..1fa9088c84a8 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -155,7 +155,7 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
static void od_check_cpu(int cpu, unsigned int load)
{
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
- struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
+ struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
struct dbs_data *dbs_data = policy->governor_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
@@ -191,46 +191,40 @@ static void od_check_cpu(int cpu, unsigned int load)
}
}
-static void od_dbs_timer(struct work_struct *work)
+static unsigned int od_dbs_timer(struct cpu_dbs_info *cdbs,
+ struct dbs_data *dbs_data, bool modify_all)
{
- struct od_cpu_dbs_info_s *dbs_info =
- container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
- unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
- struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
+ struct cpufreq_policy *policy = cdbs->shared->policy;
+ unsigned int cpu = policy->cpu;
+ struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
cpu);
- struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
- int delay = 0, sample_type = core_dbs_info->sample_type;
- bool modify_all = true;
+ int delay = 0, sample_type = dbs_info->sample_type;
- mutex_lock(&core_dbs_info->cdbs.timer_mutex);
- if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
- modify_all = false;
+ if (!modify_all)
goto max_delay;
- }
/* Common NORMAL_SAMPLE setup */
- core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
+ dbs_info->sample_type = OD_NORMAL_SAMPLE;
if (sample_type == OD_SUB_SAMPLE) {
- delay = core_dbs_info->freq_lo_jiffies;
- __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
- core_dbs_info->freq_lo, CPUFREQ_RELATION_H);
+ delay = dbs_info->freq_lo_jiffies;
+ __cpufreq_driver_target(policy, dbs_info->freq_lo,
+ CPUFREQ_RELATION_H);
} else {
dbs_check_cpu(dbs_data, cpu);
- if (core_dbs_info->freq_lo) {
+ if (dbs_info->freq_lo) {
/* Setup timer for SUB_SAMPLE */
- core_dbs_info->sample_type = OD_SUB_SAMPLE;
- delay = core_dbs_info->freq_hi_jiffies;
+ dbs_info->sample_type = OD_SUB_SAMPLE;
+ delay = dbs_info->freq_hi_jiffies;
}
}
max_delay:
if (!delay)
delay = delay_for_sampling_rate(od_tuners->sampling_rate
- * core_dbs_info->rate_mult);
+ * dbs_info->rate_mult);
- gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
- mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
+ return delay;
}
/************************** sysfs interface ************************/
@@ -273,27 +267,27 @@ static void update_sampling_rate(struct dbs_data *dbs_data,
dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
cpufreq_cpu_put(policy);
- mutex_lock(&dbs_info->cdbs.timer_mutex);
+ mutex_lock(&dbs_info->cdbs.shared->timer_mutex);
- if (!delayed_work_pending(&dbs_info->cdbs.work)) {
- mutex_unlock(&dbs_info->cdbs.timer_mutex);
+ if (!delayed_work_pending(&dbs_info->cdbs.dwork)) {
+ mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
continue;
}
next_sampling = jiffies + usecs_to_jiffies(new_rate);
- appointed_at = dbs_info->cdbs.work.timer.expires;
+ appointed_at = dbs_info->cdbs.dwork.timer.expires;
if (time_before(next_sampling, appointed_at)) {
- mutex_unlock(&dbs_info->cdbs.timer_mutex);
- cancel_delayed_work_sync(&dbs_info->cdbs.work);
- mutex_lock(&dbs_info->cdbs.timer_mutex);
+ mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
+ cancel_delayed_work_sync(&dbs_info->cdbs.dwork);
+ mutex_lock(&dbs_info->cdbs.shared->timer_mutex);
- gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
- usecs_to_jiffies(new_rate), true);
+ gov_queue_work(dbs_data, policy,
+ usecs_to_jiffies(new_rate), true);
}
- mutex_unlock(&dbs_info->cdbs.timer_mutex);
+ mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
}
}
@@ -556,13 +550,16 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
get_online_cpus();
for_each_online_cpu(cpu) {
+ struct cpu_common_dbs_info *shared;
+
if (cpumask_test_cpu(cpu, &done))
continue;
- policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
- if (!policy)
+ shared = per_cpu(od_cpu_dbs_info, cpu).cdbs.shared;
+ if (!shared)
continue;
+ policy = shared->policy;
cpumask_or(&done, &done, policy->cpus);
if (policy->governor != &cpufreq_gov_ondemand)
diff --git a/drivers/cpufreq/cpufreq_opp.c b/drivers/cpufreq/cpufreq_opp.c
index 773bcde893c0..0f5e6d5f6da0 100644
--- a/drivers/cpufreq/cpufreq_opp.c
+++ b/drivers/cpufreq/cpufreq_opp.c
@@ -75,6 +75,10 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
}
freq_table[i].driver_data = i;
freq_table[i].frequency = rate / 1000;
+
+ /* Is Boost/turbo opp ? */
+ if (dev_pm_opp_is_turbo(opp))
+ freq_table[i].flags = CPUFREQ_BOOST_FREQ;
}
freq_table[i].driver_data = i;
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index a0d2a423cea9..4085244c8a67 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -78,7 +78,7 @@ static int eps_acpi_init(void)
static int eps_acpi_exit(struct cpufreq_policy *policy)
{
if (eps_acpi_cpu_perf) {
- acpi_processor_unregister_performance(eps_acpi_cpu_perf, 0);
+ acpi_processor_unregister_performance(0);
free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map);
kfree(eps_acpi_cpu_perf);
eps_acpi_cpu_perf = NULL;
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
deleted file mode 100644
index ae5b2bd3a978..000000000000
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * EXYNOS - CPU frequency scaling support for EXYNOS series
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/regulator/consumer.h>
-#include <linux/cpufreq.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/cpu_cooling.h>
-#include <linux/cpu.h>
-
-#include "exynos-cpufreq.h"
-
-static struct exynos_dvfs_info *exynos_info;
-static struct thermal_cooling_device *cdev;
-static struct regulator *arm_regulator;
-static unsigned int locking_frequency;
-
-static int exynos_cpufreq_get_index(unsigned int freq)
-{
- struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
- struct cpufreq_frequency_table *pos;
-
- cpufreq_for_each_entry(pos, freq_table)
- if (pos->frequency == freq)
- break;
-
- if (pos->frequency == CPUFREQ_TABLE_END)
- return -EINVAL;
-
- return pos - freq_table;
-}
-
-static int exynos_cpufreq_scale(unsigned int target_freq)
-{
- struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
- unsigned int *volt_table = exynos_info->volt_table;
- struct cpufreq_policy *policy = cpufreq_cpu_get(0);
- unsigned int arm_volt, safe_arm_volt = 0;
- unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz;
- struct device *dev = exynos_info->dev;
- unsigned int old_freq;
- int index, old_index;
- int ret = 0;
-
- old_freq = policy->cur;
-
- /*
- * The policy max have been changed so that we cannot get proper
- * old_index with cpufreq_frequency_table_target(). Thus, ignore
- * policy and get the index from the raw frequency table.
- */
- old_index = exynos_cpufreq_get_index(old_freq);
- if (old_index < 0) {
- ret = old_index;
- goto out;
- }
-
- index = exynos_cpufreq_get_index(target_freq);
- if (index < 0) {
- ret = index;
- goto out;
- }
-
- /*
- * ARM clock source will be changed APLL to MPLL temporary
- * To support this level, need to control regulator for
- * required voltage level
- */
- if (exynos_info->need_apll_change != NULL) {
- if (exynos_info->need_apll_change(old_index, index) &&
- (freq_table[index].frequency < mpll_freq_khz) &&
- (freq_table[old_index].frequency < mpll_freq_khz))
- safe_arm_volt = volt_table[exynos_info->pll_safe_idx];
- }
- arm_volt = volt_table[index];
-
- /* When the new frequency is higher than current frequency */
- if ((target_freq > old_freq) && !safe_arm_volt) {
- /* Firstly, voltage up to increase frequency */
- ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
- if (ret) {
- dev_err(dev, "failed to set cpu voltage to %d\n",
- arm_volt);
- return ret;
- }
- }
-
- if (safe_arm_volt) {
- ret = regulator_set_voltage(arm_regulator, safe_arm_volt,
- safe_arm_volt);
- if (ret) {
- dev_err(dev, "failed to set cpu voltage to %d\n",
- safe_arm_volt);
- return ret;
- }
- }
-
- exynos_info->set_freq(old_index, index);
-
- /* When the new frequency is lower than current frequency */
- if ((target_freq < old_freq) ||
- ((target_freq > old_freq) && safe_arm_volt)) {
- /* down the voltage after frequency change */
- ret = regulator_set_voltage(arm_regulator, arm_volt,
- arm_volt);
- if (ret) {
- dev_err(dev, "failed to set cpu voltage to %d\n",
- arm_volt);
- goto out;
- }
- }
-
-out:
- cpufreq_cpu_put(policy);
-
- return ret;
-}
-
-static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
-{
- return exynos_cpufreq_scale(exynos_info->freq_table[index].frequency);
-}
-
-static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
-{
- policy->clk = exynos_info->cpu_clk;
- policy->suspend_freq = locking_frequency;
- return cpufreq_generic_init(policy, exynos_info->freq_table, 100000);
-}
-
-static struct cpufreq_driver exynos_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = exynos_target,
- .get = cpufreq_generic_get,
- .init = exynos_cpufreq_cpu_init,
- .name = "exynos_cpufreq",
- .attr = cpufreq_generic_attr,
-#ifdef CONFIG_ARM_EXYNOS_CPU_FREQ_BOOST_SW
- .boost_supported = true,
-#endif
-#ifdef CONFIG_PM
- .suspend = cpufreq_generic_suspend,
-#endif
-};
-
-static int exynos_cpufreq_probe(struct platform_device *pdev)
-{
- struct device_node *cpu0;
- int ret = -EINVAL;
-
- exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
- if (!exynos_info)
- return -ENOMEM;
-
- exynos_info->dev = &pdev->dev;
-
- if (of_machine_is_compatible("samsung,exynos4212")) {
- exynos_info->type = EXYNOS_SOC_4212;
- ret = exynos4x12_cpufreq_init(exynos_info);
- } else if (of_machine_is_compatible("samsung,exynos4412")) {
- exynos_info->type = EXYNOS_SOC_4412;
- ret = exynos4x12_cpufreq_init(exynos_info);
- } else if (of_machine_is_compatible("samsung,exynos5250")) {
- exynos_info->type = EXYNOS_SOC_5250;
- ret = exynos5250_cpufreq_init(exynos_info);
- } else {
- pr_err("%s: Unknown SoC type\n", __func__);
- return -ENODEV;
- }
-
- if (ret)
- goto err_vdd_arm;
-
- if (exynos_info->set_freq == NULL) {
- dev_err(&pdev->dev, "No set_freq function (ERR)\n");
- goto err_vdd_arm;
- }
-
- arm_regulator = regulator_get(NULL, "vdd_arm");
- if (IS_ERR(arm_regulator)) {
- dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
- goto err_vdd_arm;
- }
-
- /* Done here as we want to capture boot frequency */
- locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000;
-
- ret = cpufreq_register_driver(&exynos_driver);
- if (ret)
- goto err_cpufreq_reg;
-
- cpu0 = of_get_cpu_node(0, NULL);
- if (!cpu0) {
- pr_err("failed to find cpu0 node\n");
- return 0;
- }
-
- if (of_find_property(cpu0, "#cooling-cells", NULL)) {
- cdev = of_cpufreq_cooling_register(cpu0,
- cpu_present_mask);
- if (IS_ERR(cdev))
- pr_err("running cpufreq without cooling device: %ld\n",
- PTR_ERR(cdev));
- }
-
- return 0;
-
-err_cpufreq_reg:
- dev_err(&pdev->dev, "failed to register cpufreq driver\n");
- regulator_put(arm_regulator);
-err_vdd_arm:
- kfree(exynos_info);
- return -EINVAL;
-}
-
-static struct platform_driver exynos_cpufreq_platdrv = {
- .driver = {
- .name = "exynos-cpufreq",
- },
- .probe = exynos_cpufreq_probe,
-};
-module_platform_driver(exynos_cpufreq_platdrv);
diff --git a/drivers/cpufreq/exynos-cpufreq.h b/drivers/cpufreq/exynos-cpufreq.h
deleted file mode 100644
index a3855e4d913d..000000000000
--- a/drivers/cpufreq/exynos-cpufreq.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * EXYNOS - CPUFreq support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-enum cpufreq_level_index {
- L0, L1, L2, L3, L4,
- L5, L6, L7, L8, L9,
- L10, L11, L12, L13, L14,
- L15, L16, L17, L18, L19,
- L20,
-};
-
-enum exynos_soc_type {
- EXYNOS_SOC_4212,
- EXYNOS_SOC_4412,
- EXYNOS_SOC_5250,
-};
-
-#define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \
- { \
- .freq = (f) * 1000, \
- .clk_div_cpu0 = ((a0) | (a1) << 4 | (a2) << 8 | (a3) << 12 | \
- (a4) << 16 | (a5) << 20 | (a6) << 24 | (a7) << 28), \
- .clk_div_cpu1 = (b0 << 0 | b1 << 4 | b2 << 8), \
- .mps = ((m) << 16 | (p) << 8 | (s)), \
- }
-
-struct apll_freq {
- unsigned int freq;
- u32 clk_div_cpu0;
- u32 clk_div_cpu1;
- u32 mps;
-};
-
-struct exynos_dvfs_info {
- enum exynos_soc_type type;
- struct device *dev;
- unsigned long mpll_freq_khz;
- unsigned int pll_safe_idx;
- struct clk *cpu_clk;
- unsigned int *volt_table;
- struct cpufreq_frequency_table *freq_table;
- void (*set_freq)(unsigned int, unsigned int);
- bool (*need_apll_change)(unsigned int, unsigned int);
- void __iomem *cmu_regs;
-};
-
-#ifdef CONFIG_ARM_EXYNOS4X12_CPUFREQ
-extern int exynos4x12_cpufreq_init(struct exynos_dvfs_info *);
-#else
-static inline int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
-{
- return -EOPNOTSUPP;
-}
-#endif
-#ifdef CONFIG_ARM_EXYNOS5250_CPUFREQ
-extern int exynos5250_cpufreq_init(struct exynos_dvfs_info *);
-#else
-static inline int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
-{
- return -EOPNOTSUPP;
-}
-#endif
-
-#define EXYNOS4_CLKSRC_CPU 0x14200
-#define EXYNOS4_CLKMUX_STATCPU 0x14400
-
-#define EXYNOS4_CLKDIV_CPU 0x14500
-#define EXYNOS4_CLKDIV_CPU1 0x14504
-#define EXYNOS4_CLKDIV_STATCPU 0x14600
-#define EXYNOS4_CLKDIV_STATCPU1 0x14604
-
-#define EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT (16)
-#define EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK (0x7 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)
-
-#define EXYNOS5_APLL_LOCK 0x00000
-#define EXYNOS5_APLL_CON0 0x00100
-#define EXYNOS5_CLKMUX_STATCPU 0x00400
-#define EXYNOS5_CLKDIV_CPU0 0x00500
-#define EXYNOS5_CLKDIV_CPU1 0x00504
-#define EXYNOS5_CLKDIV_STATCPU0 0x00600
-#define EXYNOS5_CLKDIV_STATCPU1 0x00604
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
deleted file mode 100644
index 9e78a850e29f..000000000000
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * EXYNOS4X12 - CPU frequency scaling support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/cpufreq.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include "exynos-cpufreq.h"
-
-static struct clk *cpu_clk;
-static struct clk *moutcore;
-static struct clk *mout_mpll;
-static struct clk *mout_apll;
-static struct exynos_dvfs_info *cpufreq;
-
-static unsigned int exynos4x12_volt_table[] = {
- 1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500,
- 1000000, 987500, 975000, 950000, 925000, 900000, 900000
-};
-
-static struct cpufreq_frequency_table exynos4x12_freq_table[] = {
- {CPUFREQ_BOOST_FREQ, L0, 1500 * 1000},
- {0, L1, 1400 * 1000},
- {0, L2, 1300 * 1000},
- {0, L3, 1200 * 1000},
- {0, L4, 1100 * 1000},
- {0, L5, 1000 * 1000},
- {0, L6, 900 * 1000},
- {0, L7, 800 * 1000},
- {0, L8, 700 * 1000},
- {0, L9, 600 * 1000},
- {0, L10, 500 * 1000},
- {0, L11, 400 * 1000},
- {0, L12, 300 * 1000},
- {0, L13, 200 * 1000},
- {0, 0, CPUFREQ_TABLE_END},
-};
-
-static struct apll_freq *apll_freq_4x12;
-
-static struct apll_freq apll_freq_4212[] = {
- /*
- * values:
- * freq
- * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, CORE2
- * clock divider for COPY, HPM, RESERVED
- * PLL M, P, S
- */
- APLL_FREQ(1500, 0, 3, 7, 0, 6, 1, 2, 0, 6, 2, 0, 250, 4, 0),
- APLL_FREQ(1400, 0, 3, 7, 0, 6, 1, 2, 0, 6, 2, 0, 175, 3, 0),
- APLL_FREQ(1300, 0, 3, 7, 0, 5, 1, 2, 0, 5, 2, 0, 325, 6, 0),
- APLL_FREQ(1200, 0, 3, 7, 0, 5, 1, 2, 0, 5, 2, 0, 200, 4, 0),
- APLL_FREQ(1100, 0, 3, 6, 0, 4, 1, 2, 0, 4, 2, 0, 275, 6, 0),
- APLL_FREQ(1000, 0, 2, 5, 0, 4, 1, 1, 0, 4, 2, 0, 125, 3, 0),
- APLL_FREQ(900, 0, 2, 5, 0, 3, 1, 1, 0, 3, 2, 0, 150, 4, 0),
- APLL_FREQ(800, 0, 2, 5, 0, 3, 1, 1, 0, 3, 2, 0, 100, 3, 0),
- APLL_FREQ(700, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 175, 3, 1),
- APLL_FREQ(600, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 200, 4, 1),
- APLL_FREQ(500, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 125, 3, 1),
- APLL_FREQ(400, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 100, 3, 1),
- APLL_FREQ(300, 0, 2, 4, 0, 2, 1, 1, 0, 3, 2, 0, 200, 4, 2),
- APLL_FREQ(200, 0, 1, 3, 0, 1, 1, 1, 0, 3, 2, 0, 100, 3, 2),
-};
-
-static struct apll_freq apll_freq_4412[] = {
- /*
- * values:
- * freq
- * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, CORE2
- * clock divider for COPY, HPM, CORES
- * PLL M, P, S
- */
- APLL_FREQ(1500, 0, 3, 7, 0, 6, 1, 2, 0, 6, 0, 7, 250, 4, 0),
- APLL_FREQ(1400, 0, 3, 7, 0, 6, 1, 2, 0, 6, 0, 6, 175, 3, 0),
- APLL_FREQ(1300, 0, 3, 7, 0, 5, 1, 2, 0, 5, 0, 6, 325, 6, 0),
- APLL_FREQ(1200, 0, 3, 7, 0, 5, 1, 2, 0, 5, 0, 5, 200, 4, 0),
- APLL_FREQ(1100, 0, 3, 6, 0, 4, 1, 2, 0, 4, 0, 5, 275, 6, 0),
- APLL_FREQ(1000, 0, 2, 5, 0, 4, 1, 1, 0, 4, 0, 4, 125, 3, 0),
- APLL_FREQ(900, 0, 2, 5, 0, 3, 1, 1, 0, 3, 0, 4, 150, 4, 0),
- APLL_FREQ(800, 0, 2, 5, 0, 3, 1, 1, 0, 3, 0, 3, 100, 3, 0),
- APLL_FREQ(700, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 3, 175, 3, 1),
- APLL_FREQ(600, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 2, 200, 4, 1),
- APLL_FREQ(500, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 2, 125, 3, 1),
- APLL_FREQ(400, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 1, 100, 3, 1),
- APLL_FREQ(300, 0, 2, 4, 0, 2, 1, 1, 0, 3, 0, 1, 200, 4, 2),
- APLL_FREQ(200, 0, 1, 3, 0, 1, 1, 1, 0, 3, 0, 0, 100, 3, 2),
-};
-
-static void exynos4x12_set_clkdiv(unsigned int div_index)
-{
- unsigned int tmp;
-
- /* Change Divider - CPU0 */
-
- tmp = apll_freq_4x12[div_index].clk_div_cpu0;
-
- __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU);
-
- while (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU)
- & 0x11111111)
- cpu_relax();
-
- /* Change Divider - CPU1 */
- tmp = apll_freq_4x12[div_index].clk_div_cpu1;
-
- __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1);
-
- do {
- cpu_relax();
- tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1);
- } while (tmp != 0x0);
-}
-
-static void exynos4x12_set_apll(unsigned int index)
-{
- unsigned int tmp, freq = apll_freq_4x12[index].freq;
-
- /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
- clk_set_parent(moutcore, mout_mpll);
-
- do {
- cpu_relax();
- tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU)
- >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT);
- tmp &= 0x7;
- } while (tmp != 0x2);
-
- clk_set_rate(mout_apll, freq * 1000);
-
- /* MUX_CORE_SEL = APLL */
- clk_set_parent(moutcore, mout_apll);
-
- do {
- cpu_relax();
- tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU);
- tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK;
- } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
-}
-
-static void exynos4x12_set_frequency(unsigned int old_index,
- unsigned int new_index)
-{
- if (old_index > new_index) {
- exynos4x12_set_clkdiv(new_index);
- exynos4x12_set_apll(new_index);
- } else if (old_index < new_index) {
- exynos4x12_set_apll(new_index);
- exynos4x12_set_clkdiv(new_index);
- }
-}
-
-int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
-{
- struct device_node *np;
- unsigned long rate;
-
- /*
- * HACK: This is a temporary workaround to get access to clock
- * controller registers directly and remove static mappings and
- * dependencies on platform headers. It is necessary to enable
- * Exynos multi-platform support and will be removed together with
- * this whole driver as soon as Exynos gets migrated to use
- * cpufreq-dt driver.
- */
- np = of_find_compatible_node(NULL, NULL, "samsung,exynos4412-clock");
- if (!np) {
- pr_err("%s: failed to find clock controller DT node\n",
- __func__);
- return -ENODEV;
- }
-
- info->cmu_regs = of_iomap(np, 0);
- if (!info->cmu_regs) {
- pr_err("%s: failed to map CMU registers\n", __func__);
- return -EFAULT;
- }
-
- cpu_clk = clk_get(NULL, "armclk");
- if (IS_ERR(cpu_clk))
- return PTR_ERR(cpu_clk);
-
- moutcore = clk_get(NULL, "moutcore");
- if (IS_ERR(moutcore))
- goto err_moutcore;
-
- mout_mpll = clk_get(NULL, "mout_mpll");
- if (IS_ERR(mout_mpll))
- goto err_mout_mpll;
-
- rate = clk_get_rate(mout_mpll) / 1000;
-
- mout_apll = clk_get(NULL, "mout_apll");
- if (IS_ERR(mout_apll))
- goto err_mout_apll;
-
- if (info->type == EXYNOS_SOC_4212)
- apll_freq_4x12 = apll_freq_4212;
- else
- apll_freq_4x12 = apll_freq_4412;
-
- info->mpll_freq_khz = rate;
- /* 800Mhz */
- info->pll_safe_idx = L7;
- info->cpu_clk = cpu_clk;
- info->volt_table = exynos4x12_volt_table;
- info->freq_table = exynos4x12_freq_table;
- info->set_freq = exynos4x12_set_frequency;
-
- cpufreq = info;
-
- return 0;
-
-err_mout_apll:
- clk_put(mout_mpll);
-err_mout_mpll:
- clk_put(moutcore);
-err_moutcore:
- clk_put(cpu_clk);
-
- pr_debug("%s: failed initialization\n", __func__);
- return -EINVAL;
-}
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
deleted file mode 100644
index 3eafdc7ba787..000000000000
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright (c) 2010-20122Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * EXYNOS5250 - CPU frequency scaling support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/cpufreq.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include "exynos-cpufreq.h"
-
-static struct clk *cpu_clk;
-static struct clk *moutcore;
-static struct clk *mout_mpll;
-static struct clk *mout_apll;
-static struct exynos_dvfs_info *cpufreq;
-
-static unsigned int exynos5250_volt_table[] = {
- 1300000, 1250000, 1225000, 1200000, 1150000,
- 1125000, 1100000, 1075000, 1050000, 1025000,
- 1012500, 1000000, 975000, 950000, 937500,
- 925000
-};
-
-static struct cpufreq_frequency_table exynos5250_freq_table[] = {
- {0, L0, 1700 * 1000},
- {0, L1, 1600 * 1000},
- {0, L2, 1500 * 1000},
- {0, L3, 1400 * 1000},
- {0, L4, 1300 * 1000},
- {0, L5, 1200 * 1000},
- {0, L6, 1100 * 1000},
- {0, L7, 1000 * 1000},
- {0, L8, 900 * 1000},
- {0, L9, 800 * 1000},
- {0, L10, 700 * 1000},
- {0, L11, 600 * 1000},
- {0, L12, 500 * 1000},
- {0, L13, 400 * 1000},
- {0, L14, 300 * 1000},
- {0, L15, 200 * 1000},
- {0, 0, CPUFREQ_TABLE_END},
-};
-
-static struct apll_freq apll_freq_5250[] = {
- /*
- * values:
- * freq
- * clock divider for ARM, CPUD, ACP, PERIPH, ATB, PCLK_DBG, APLL, ARM2
- * clock divider for COPY, HPM, RESERVED
- * PLL M, P, S
- */
- APLL_FREQ(1700, 0, 3, 7, 7, 7, 3, 5, 0, 0, 2, 0, 425, 6, 0),
- APLL_FREQ(1600, 0, 3, 7, 7, 7, 1, 4, 0, 0, 2, 0, 200, 3, 0),
- APLL_FREQ(1500, 0, 2, 7, 7, 7, 1, 4, 0, 0, 2, 0, 250, 4, 0),
- APLL_FREQ(1400, 0, 2, 7, 7, 6, 1, 4, 0, 0, 2, 0, 175, 3, 0),
- APLL_FREQ(1300, 0, 2, 7, 7, 6, 1, 3, 0, 0, 2, 0, 325, 6, 0),
- APLL_FREQ(1200, 0, 2, 7, 7, 5, 1, 3, 0, 0, 2, 0, 200, 4, 0),
- APLL_FREQ(1100, 0, 3, 7, 7, 5, 1, 3, 0, 0, 2, 0, 275, 6, 0),
- APLL_FREQ(1000, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 125, 3, 0),
- APLL_FREQ(900, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 150, 4, 0),
- APLL_FREQ(800, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 100, 3, 0),
- APLL_FREQ(700, 0, 1, 7, 7, 3, 1, 1, 0, 0, 2, 0, 175, 3, 1),
- APLL_FREQ(600, 0, 1, 7, 7, 3, 1, 1, 0, 0, 2, 0, 200, 4, 1),
- APLL_FREQ(500, 0, 1, 7, 7, 2, 1, 1, 0, 0, 2, 0, 125, 3, 1),
- APLL_FREQ(400, 0, 1, 7, 7, 2, 1, 1, 0, 0, 2, 0, 100, 3, 1),
- APLL_FREQ(300, 0, 1, 7, 7, 1, 1, 1, 0, 0, 2, 0, 200, 4, 2),
- APLL_FREQ(200, 0, 1, 7, 7, 1, 1, 1, 0, 0, 2, 0, 100, 3, 2),
-};
-
-static void set_clkdiv(unsigned int div_index)
-{
- unsigned int tmp;
-
- /* Change Divider - CPU0 */
-
- tmp = apll_freq_5250[div_index].clk_div_cpu0;
-
- __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU0);
-
- while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU0)
- & 0x11111111)
- cpu_relax();
-
- /* Change Divider - CPU1 */
- tmp = apll_freq_5250[div_index].clk_div_cpu1;
-
- __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU1);
-
- while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU1) & 0x11)
- cpu_relax();
-}
-
-static void set_apll(unsigned int index)
-{
- unsigned int tmp;
- unsigned int freq = apll_freq_5250[index].freq;
-
- /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
- clk_set_parent(moutcore, mout_mpll);
-
- do {
- cpu_relax();
- tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU)
- >> 16);
- tmp &= 0x7;
- } while (tmp != 0x2);
-
- clk_set_rate(mout_apll, freq * 1000);
-
- /* MUX_CORE_SEL = APLL */
- clk_set_parent(moutcore, mout_apll);
-
- do {
- cpu_relax();
- tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU);
- tmp &= (0x7 << 16);
- } while (tmp != (0x1 << 16));
-}
-
-static void exynos5250_set_frequency(unsigned int old_index,
- unsigned int new_index)
-{
- if (old_index > new_index) {
- set_clkdiv(new_index);
- set_apll(new_index);
- } else if (old_index < new_index) {
- set_apll(new_index);
- set_clkdiv(new_index);
- }
-}
-
-int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
-{
- struct device_node *np;
- unsigned long rate;
-
- /*
- * HACK: This is a temporary workaround to get access to clock
- * controller registers directly and remove static mappings and
- * dependencies on platform headers. It is necessary to enable
- * Exynos multi-platform support and will be removed together with
- * this whole driver as soon as Exynos gets migrated to use
- * cpufreq-dt driver.
- */
- np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-clock");
- if (!np) {
- pr_err("%s: failed to find clock controller DT node\n",
- __func__);
- return -ENODEV;
- }
-
- info->cmu_regs = of_iomap(np, 0);
- if (!info->cmu_regs) {
- pr_err("%s: failed to map CMU registers\n", __func__);
- return -EFAULT;
- }
-
- cpu_clk = clk_get(NULL, "armclk");
- if (IS_ERR(cpu_clk))
- return PTR_ERR(cpu_clk);
-
- moutcore = clk_get(NULL, "mout_cpu");
- if (IS_ERR(moutcore))
- goto err_moutcore;
-
- mout_mpll = clk_get(NULL, "mout_mpll");
- if (IS_ERR(mout_mpll))
- goto err_mout_mpll;
-
- rate = clk_get_rate(mout_mpll) / 1000;
-
- mout_apll = clk_get(NULL, "mout_apll");
- if (IS_ERR(mout_apll))
- goto err_mout_apll;
-
- info->mpll_freq_khz = rate;
- /* 800Mhz */
- info->pll_safe_idx = L9;
- info->cpu_clk = cpu_clk;
- info->volt_table = exynos5250_volt_table;
- info->freq_table = exynos5250_freq_table;
- info->set_freq = exynos5250_set_frequency;
-
- cpufreq = info;
-
- return 0;
-
-err_mout_apll:
- clk_put(mout_mpll);
-err_mout_mpll:
- clk_put(moutcore);
-err_moutcore:
- clk_put(cpu_clk);
-
- pr_err("%s: failed initialization\n", __func__);
- return -EINVAL;
-}
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index df14766a8e06..a8f1daffc9bc 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -18,6 +18,21 @@
* FREQUENCY TABLE HELPERS *
*********************************************************************/
+bool policy_has_boost_freq(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
+
+ if (!table)
+ return false;
+
+ cpufreq_for_each_valid_entry(pos, table)
+ if (pos->flags & CPUFREQ_BOOST_FREQ)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(policy_has_boost_freq);
+
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
{
@@ -297,15 +312,6 @@ int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
-struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
-
-struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
-{
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
- return policy ? policy->freq_table : NULL;
-}
-EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
-
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("CPUfreq frequency table helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index c30aaa6a54e8..0202429f1c5b 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -29,7 +29,6 @@ MODULE_LICENSE("GPL");
struct cpufreq_acpi_io {
struct acpi_processor_performance acpi_data;
- struct cpufreq_frequency_table *freq_table;
unsigned int resume;
};
@@ -221,6 +220,7 @@ acpi_cpufreq_cpu_init (
unsigned int cpu = policy->cpu;
struct cpufreq_acpi_io *data;
unsigned int result = 0;
+ struct cpufreq_frequency_table *freq_table;
pr_debug("acpi_cpufreq_cpu_init\n");
@@ -254,10 +254,10 @@ acpi_cpufreq_cpu_init (
}
/* alloc freq_table */
- data->freq_table = kzalloc(sizeof(*data->freq_table) *
+ freq_table = kzalloc(sizeof(*freq_table) *
(data->acpi_data.state_count + 1),
GFP_KERNEL);
- if (!data->freq_table) {
+ if (!freq_table) {
result = -ENOMEM;
goto err_unreg;
}
@@ -276,14 +276,14 @@ acpi_cpufreq_cpu_init (
for (i = 0; i <= data->acpi_data.state_count; i++)
{
if (i < data->acpi_data.state_count) {
- data->freq_table[i].frequency =
+ freq_table[i].frequency =
data->acpi_data.states[i].core_frequency * 1000;
} else {
- data->freq_table[i].frequency = CPUFREQ_TABLE_END;
+ freq_table[i].frequency = CPUFREQ_TABLE_END;
}
}
- result = cpufreq_table_validate_and_show(policy, data->freq_table);
+ result = cpufreq_table_validate_and_show(policy, freq_table);
if (result) {
goto err_freqfree;
}
@@ -311,9 +311,9 @@ acpi_cpufreq_cpu_init (
return (result);
err_freqfree:
- kfree(data->freq_table);
+ kfree(freq_table);
err_unreg:
- acpi_processor_unregister_performance(&data->acpi_data, cpu);
+ acpi_processor_unregister_performance(cpu);
err_free:
kfree(data);
acpi_io_data[cpu] = NULL;
@@ -332,8 +332,8 @@ acpi_cpufreq_cpu_exit (
if (data) {
acpi_io_data[policy->cpu] = NULL;
- acpi_processor_unregister_performance(&data->acpi_data,
- policy->cpu);
+ acpi_processor_unregister_performance(policy->cpu);
+ kfree(policy->freq_table);
kfree(data);
}
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
index 129e266f7621..2faa4216bf2a 100644
--- a/drivers/cpufreq/integrator-cpufreq.c
+++ b/drivers/cpufreq/integrator-cpufreq.c
@@ -98,11 +98,10 @@ static int integrator_set_target(struct cpufreq_policy *policy,
/* get current setting */
cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
- if (machine_is_integrator()) {
+ if (machine_is_integrator())
vco.s = (cm_osc >> 8) & 7;
- } else if (machine_is_cintegrator()) {
+ else if (machine_is_cintegrator())
vco.s = 1;
- }
vco.v = cm_osc & 255;
vco.r = 22;
freqs.old = icst_hz(&cclk_params, vco) / 1000;
@@ -163,11 +162,10 @@ static unsigned int integrator_get(unsigned int cpu)
/* detect memory etc. */
cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
- if (machine_is_integrator()) {
+ if (machine_is_integrator())
vco.s = (cm_osc >> 8) & 7;
- } else {
+ else
vco.s = 1;
- }
vco.v = cm_osc & 255;
vco.r = 22;
@@ -203,7 +201,7 @@ static int __init integrator_cpufreq_probe(struct platform_device *pdev)
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
+ if (!res)
return -ENODEV;
cm_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
@@ -234,6 +232,6 @@ static struct platform_driver integrator_cpufreq_driver = {
module_platform_driver_probe(integrator_cpufreq_driver,
integrator_cpufreq_probe);
-MODULE_AUTHOR ("Russell M. King");
-MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs");
-MODULE_LICENSE ("GPL");
+MODULE_AUTHOR("Russell M. King");
+MODULE_DESCRIPTION("cpufreq driver for ARM Integrator CPUs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 15ada47bb720..3af9dd7332e6 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -260,24 +260,31 @@ static inline void update_turbo_state(void)
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
}
-#define PCT_TO_HWP(x) (x * 255 / 100)
static void intel_pstate_hwp_set(void)
{
- int min, max, cpu;
- u64 value, freq;
+ int min, hw_min, max, hw_max, cpu, range, adj_range;
+ u64 value, cap;
+
+ rdmsrl(MSR_HWP_CAPABILITIES, cap);
+ hw_min = HWP_LOWEST_PERF(cap);
+ hw_max = HWP_HIGHEST_PERF(cap);
+ range = hw_max - hw_min;
get_online_cpus();
for_each_online_cpu(cpu) {
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
- min = PCT_TO_HWP(limits.min_perf_pct);
+ adj_range = limits.min_perf_pct * range / 100;
+ min = hw_min + adj_range;
value &= ~HWP_MIN_PERF(~0L);
value |= HWP_MIN_PERF(min);
- max = PCT_TO_HWP(limits.max_perf_pct);
+ adj_range = limits.max_perf_pct * range / 100;
+ max = hw_min + adj_range;
if (limits.no_turbo) {
- rdmsrl( MSR_HWP_CAPABILITIES, freq);
- max = HWP_GUARANTEED_PERF(freq);
+ hw_max = HWP_GUARANTEED_PERF(cap);
+ if (hw_max < max)
+ max = hw_max;
}
value &= ~HWP_MAX_PERF(~0L);
@@ -423,6 +430,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
+ limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
+ limits.max_perf_pct = max(limits.min_perf_pct, limits.max_perf_pct);
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
if (hwp_active)
@@ -442,6 +451,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
limits.min_sysfs_pct = clamp_t(int, input, 0 , 100);
limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
+ limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
+ limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
if (hwp_active)
@@ -484,12 +495,11 @@ static void __init intel_pstate_sysfs_expose_params(void)
}
/************************** sysfs end ************************/
-static void intel_pstate_hwp_enable(void)
+static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
- hwp_active++;
pr_info("intel_pstate: HWP enabled\n");
- wrmsrl( MSR_PM_ENABLE, 0x1);
+ wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
}
static int byt_get_min_pstate(void)
@@ -522,7 +532,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
int32_t vid_fp;
u32 vid;
- val = pstate << 8;
+ val = (u64)pstate << 8;
if (limits.no_turbo && !limits.turbo_disabled)
val |= (u64)1 << 32;
@@ -611,7 +621,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
{
u64 val;
- val = pstate << 8;
+ val = (u64)pstate << 8;
if (limits.no_turbo && !limits.turbo_disabled)
val |= (u64)1 << 32;
@@ -681,6 +691,7 @@ static struct cpu_defaults knl_params = {
.get_max = core_get_max_pstate,
.get_min = core_get_min_pstate,
.get_turbo = knl_get_turbo_pstate,
+ .get_scaling = core_get_scaling,
.set = core_set_pstate,
},
};
@@ -765,7 +776,7 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
local_irq_save(flags);
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
- tsc = native_read_tsc();
+ tsc = rdtsc();
local_irq_restore(flags);
cpu->last_sample_time = cpu->sample.time;
@@ -908,6 +919,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(0x4c, byt_params),
ICPU(0x4e, core_params),
ICPU(0x4f, core_params),
+ ICPU(0x5e, core_params),
ICPU(0x56, core_params),
ICPU(0x57, knl_params),
{}
@@ -932,6 +944,10 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
cpu = all_cpu_data[cpunum];
cpu->cpu = cpunum;
+
+ if (hwp_active)
+ intel_pstate_hwp_enable(cpu);
+
intel_pstate_get_cpu_pstates(cpu);
init_timer_deferrable(&cpu->timer);
@@ -984,12 +1000,19 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100);
- limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
- limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
-
limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
+
+ /* Normalize user input to [min_policy_pct, max_policy_pct] */
+ limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
+ limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
+ limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
+
+ /* Make sure min_perf_pct <= max_perf_pct */
+ limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
+
+ limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
if (hwp_active)
@@ -1169,6 +1192,10 @@ static struct hw_vendor_info vendor_info[] = {
{1, "ORACLE", "X4270M3 ", PPC},
{1, "ORACLE", "X4270M2 ", PPC},
{1, "ORACLE", "X4170M2 ", PPC},
+ {1, "ORACLE", "X4170 M3", PPC},
+ {1, "ORACLE", "X4275 M3", PPC},
+ {1, "ORACLE", "X6-2 ", PPC},
+ {1, "ORACLE", "Sudbury ", PPC},
{0, "", ""},
};
@@ -1245,7 +1272,7 @@ static int __init intel_pstate_init(void)
return -ENOMEM;
if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp)
- intel_pstate_hwp_enable();
+ hwp_active++;
if (!hwp_active && hwp_only)
goto out;
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index fc897babab55..cd593c1f66dc 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -3,7 +3,7 @@
*
* The 2E revision of loongson processor not support this feature.
*
- * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology
* Author: Yanhua, yanh@lemote.com
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -20,7 +20,7 @@
#include <asm/clock.h>
#include <asm/idle.h>
-#include <asm/mach-loongson/loongson.h>
+#include <asm/mach-loongson64/loongson.h>
static uint nowait;
diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c
new file mode 100644
index 000000000000..49caed293a3b
--- /dev/null
+++ b/drivers/cpufreq/mt8173-cpufreq.c
@@ -0,0 +1,527 @@
+/*
+ * Copyright (c) 2015 Linaro Ltd.
+ * Author: Pi-Cheng Chen <pi-cheng.chen@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpu_cooling.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+#define MIN_VOLT_SHIFT (100000)
+#define MAX_VOLT_SHIFT (200000)
+#define MAX_VOLT_LIMIT (1150000)
+#define VOLT_TOL (10000)
+
+/*
+ * The struct mtk_cpu_dvfs_info holds necessary information for doing CPU DVFS
+ * on each CPU power/clock domain of Mediatek SoCs. Each CPU cluster in
+ * Mediatek SoCs has two voltage inputs, Vproc and Vsram. In some cases the two
+ * voltage inputs need to be controlled under a hardware limitation:
+ * 100mV < Vsram - Vproc < 200mV
+ *
+ * When scaling the clock frequency of a CPU clock domain, the clock source
+ * needs to be switched to another stable PLL clock temporarily until
+ * the original PLL becomes stable at target frequency.
+ */
+struct mtk_cpu_dvfs_info {
+ struct device *cpu_dev;
+ struct regulator *proc_reg;
+ struct regulator *sram_reg;
+ struct clk *cpu_clk;
+ struct clk *inter_clk;
+ struct thermal_cooling_device *cdev;
+ int intermediate_voltage;
+ bool need_voltage_tracking;
+};
+
+static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
+ int new_vproc)
+{
+ struct regulator *proc_reg = info->proc_reg;
+ struct regulator *sram_reg = info->sram_reg;
+ int old_vproc, old_vsram, new_vsram, vsram, vproc, ret;
+
+ old_vproc = regulator_get_voltage(proc_reg);
+ old_vsram = regulator_get_voltage(sram_reg);
+ /* Vsram should not exceed the maximum allowed voltage of SoC. */
+ new_vsram = min(new_vproc + MIN_VOLT_SHIFT, MAX_VOLT_LIMIT);
+
+ if (old_vproc < new_vproc) {
+ /*
+ * When scaling up voltages, Vsram and Vproc scale up step
+ * by step. At each step, set Vsram to (Vproc + 200mV) first,
+ * then set Vproc to (Vsram - 100mV).
+ * Keep doing it until Vsram and Vproc hit target voltages.
+ */
+ do {
+ old_vsram = regulator_get_voltage(sram_reg);
+ old_vproc = regulator_get_voltage(proc_reg);
+
+ vsram = min(new_vsram, old_vproc + MAX_VOLT_SHIFT);
+
+ if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
+ vsram = MAX_VOLT_LIMIT;
+
+ /*
+ * If the target Vsram hits the maximum voltage,
+ * try to set the exact voltage value first.
+ */
+ ret = regulator_set_voltage(sram_reg, vsram,
+ vsram);
+ if (ret)
+ ret = regulator_set_voltage(sram_reg,
+ vsram - VOLT_TOL,
+ vsram);
+
+ vproc = new_vproc;
+ } else {
+ ret = regulator_set_voltage(sram_reg, vsram,
+ vsram + VOLT_TOL);
+
+ vproc = vsram - MIN_VOLT_SHIFT;
+ }
+ if (ret)
+ return ret;
+
+ ret = regulator_set_voltage(proc_reg, vproc,
+ vproc + VOLT_TOL);
+ if (ret) {
+ regulator_set_voltage(sram_reg, old_vsram,
+ old_vsram);
+ return ret;
+ }
+ } while (vproc < new_vproc || vsram < new_vsram);
+ } else if (old_vproc > new_vproc) {
+ /*
+ * When scaling down voltages, Vsram and Vproc scale down step
+ * by step. At each step, set Vproc to (Vsram - 200mV) first,
+ * then set Vproc to (Vproc + 100mV).
+ * Keep doing it until Vsram and Vproc hit target voltages.
+ */
+ do {
+ old_vproc = regulator_get_voltage(proc_reg);
+ old_vsram = regulator_get_voltage(sram_reg);
+
+ vproc = max(new_vproc, old_vsram - MAX_VOLT_SHIFT);
+ ret = regulator_set_voltage(proc_reg, vproc,
+ vproc + VOLT_TOL);
+ if (ret)
+ return ret;
+
+ if (vproc == new_vproc)
+ vsram = new_vsram;
+ else
+ vsram = max(new_vsram, vproc + MIN_VOLT_SHIFT);
+
+ if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
+ vsram = MAX_VOLT_LIMIT;
+
+ /*
+ * If the target Vsram hits the maximum voltage,
+ * try to set the exact voltage value first.
+ */
+ ret = regulator_set_voltage(sram_reg, vsram,
+ vsram);
+ if (ret)
+ ret = regulator_set_voltage(sram_reg,
+ vsram - VOLT_TOL,
+ vsram);
+ } else {
+ ret = regulator_set_voltage(sram_reg, vsram,
+ vsram + VOLT_TOL);
+ }
+
+ if (ret) {
+ regulator_set_voltage(proc_reg, old_vproc,
+ old_vproc);
+ return ret;
+ }
+ } while (vproc > new_vproc + VOLT_TOL ||
+ vsram > new_vsram + VOLT_TOL);
+ }
+
+ return 0;
+}
+
+static int mtk_cpufreq_set_voltage(struct mtk_cpu_dvfs_info *info, int vproc)
+{
+ if (info->need_voltage_tracking)
+ return mtk_cpufreq_voltage_tracking(info, vproc);
+ else
+ return regulator_set_voltage(info->proc_reg, vproc,
+ vproc + VOLT_TOL);
+}
+
+static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct cpufreq_frequency_table *freq_table = policy->freq_table;
+ struct clk *cpu_clk = policy->clk;
+ struct clk *armpll = clk_get_parent(cpu_clk);
+ struct mtk_cpu_dvfs_info *info = policy->driver_data;
+ struct device *cpu_dev = info->cpu_dev;
+ struct dev_pm_opp *opp;
+ long freq_hz, old_freq_hz;
+ int vproc, old_vproc, inter_vproc, target_vproc, ret;
+
+ inter_vproc = info->intermediate_voltage;
+
+ old_freq_hz = clk_get_rate(cpu_clk);
+ old_vproc = regulator_get_voltage(info->proc_reg);
+
+ freq_hz = freq_table[index].frequency * 1000;
+
+ rcu_read_lock();
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ pr_err("cpu%d: failed to find OPP for %ld\n",
+ policy->cpu, freq_hz);
+ return PTR_ERR(opp);
+ }
+ vproc = dev_pm_opp_get_voltage(opp);
+ rcu_read_unlock();
+
+ /*
+ * If the new voltage or the intermediate voltage is higher than the
+ * current voltage, scale up voltage first.
+ */
+ target_vproc = (inter_vproc > vproc) ? inter_vproc : vproc;
+ if (old_vproc < target_vproc) {
+ ret = mtk_cpufreq_set_voltage(info, target_vproc);
+ if (ret) {
+ pr_err("cpu%d: failed to scale up voltage!\n",
+ policy->cpu);
+ mtk_cpufreq_set_voltage(info, old_vproc);
+ return ret;
+ }
+ }
+
+ /* Reparent the CPU clock to intermediate clock. */
+ ret = clk_set_parent(cpu_clk, info->inter_clk);
+ if (ret) {
+ pr_err("cpu%d: failed to re-parent cpu clock!\n",
+ policy->cpu);
+ mtk_cpufreq_set_voltage(info, old_vproc);
+ WARN_ON(1);
+ return ret;
+ }
+
+ /* Set the original PLL to target rate. */
+ ret = clk_set_rate(armpll, freq_hz);
+ if (ret) {
+ pr_err("cpu%d: failed to scale cpu clock rate!\n",
+ policy->cpu);
+ clk_set_parent(cpu_clk, armpll);
+ mtk_cpufreq_set_voltage(info, old_vproc);
+ return ret;
+ }
+
+ /* Set parent of CPU clock back to the original PLL. */
+ ret = clk_set_parent(cpu_clk, armpll);
+ if (ret) {
+ pr_err("cpu%d: failed to re-parent cpu clock!\n",
+ policy->cpu);
+ mtk_cpufreq_set_voltage(info, inter_vproc);
+ WARN_ON(1);
+ return ret;
+ }
+
+ /*
+ * If the new voltage is lower than the intermediate voltage or the
+ * original voltage, scale down to the new voltage.
+ */
+ if (vproc < inter_vproc || vproc < old_vproc) {
+ ret = mtk_cpufreq_set_voltage(info, vproc);
+ if (ret) {
+ pr_err("cpu%d: failed to scale down voltage!\n",
+ policy->cpu);
+ clk_set_parent(cpu_clk, info->inter_clk);
+ clk_set_rate(armpll, old_freq_hz);
+ clk_set_parent(cpu_clk, armpll);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
+{
+ struct mtk_cpu_dvfs_info *info = policy->driver_data;
+ struct device_node *np = of_node_get(info->cpu_dev->of_node);
+
+ if (WARN_ON(!np))
+ return;
+
+ if (of_find_property(np, "#cooling-cells", NULL)) {
+ info->cdev = of_cpufreq_cooling_register(np,
+ policy->related_cpus);
+
+ if (IS_ERR(info->cdev)) {
+ dev_err(info->cpu_dev,
+ "running cpufreq without cooling device: %ld\n",
+ PTR_ERR(info->cdev));
+
+ info->cdev = NULL;
+ }
+ }
+
+ of_node_put(np);
+}
+
+static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
+{
+ struct device *cpu_dev;
+ struct regulator *proc_reg = ERR_PTR(-ENODEV);
+ struct regulator *sram_reg = ERR_PTR(-ENODEV);
+ struct clk *cpu_clk = ERR_PTR(-ENODEV);
+ struct clk *inter_clk = ERR_PTR(-ENODEV);
+ struct dev_pm_opp *opp;
+ unsigned long rate;
+ int ret;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("failed to get cpu%d device\n", cpu);
+ return -ENODEV;
+ }
+
+ cpu_clk = clk_get(cpu_dev, "cpu");
+ if (IS_ERR(cpu_clk)) {
+ if (PTR_ERR(cpu_clk) == -EPROBE_DEFER)
+ pr_warn("cpu clk for cpu%d not ready, retry.\n", cpu);
+ else
+ pr_err("failed to get cpu clk for cpu%d\n", cpu);
+
+ ret = PTR_ERR(cpu_clk);
+ return ret;
+ }
+
+ inter_clk = clk_get(cpu_dev, "intermediate");
+ if (IS_ERR(inter_clk)) {
+ if (PTR_ERR(inter_clk) == -EPROBE_DEFER)
+ pr_warn("intermediate clk for cpu%d not ready, retry.\n",
+ cpu);
+ else
+ pr_err("failed to get intermediate clk for cpu%d\n",
+ cpu);
+
+ ret = PTR_ERR(inter_clk);
+ goto out_free_resources;
+ }
+
+ proc_reg = regulator_get_exclusive(cpu_dev, "proc");
+ if (IS_ERR(proc_reg)) {
+ if (PTR_ERR(proc_reg) == -EPROBE_DEFER)
+ pr_warn("proc regulator for cpu%d not ready, retry.\n",
+ cpu);
+ else
+ pr_err("failed to get proc regulator for cpu%d\n",
+ cpu);
+
+ ret = PTR_ERR(proc_reg);
+ goto out_free_resources;
+ }
+
+ /* Both presence and absence of sram regulator are valid cases. */
+ sram_reg = regulator_get_exclusive(cpu_dev, "sram");
+
+ ret = of_init_opp_table(cpu_dev);
+ if (ret) {
+ pr_warn("no OPP table for cpu%d\n", cpu);
+ goto out_free_resources;
+ }
+
+ /* Search a safe voltage for intermediate frequency. */
+ rate = clk_get_rate(inter_clk);
+ rcu_read_lock();
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ pr_err("failed to get intermediate opp for cpu%d\n", cpu);
+ ret = PTR_ERR(opp);
+ goto out_free_opp_table;
+ }
+ info->intermediate_voltage = dev_pm_opp_get_voltage(opp);
+ rcu_read_unlock();
+
+ info->cpu_dev = cpu_dev;
+ info->proc_reg = proc_reg;
+ info->sram_reg = IS_ERR(sram_reg) ? NULL : sram_reg;
+ info->cpu_clk = cpu_clk;
+ info->inter_clk = inter_clk;
+
+ /*
+ * If SRAM regulator is present, software "voltage tracking" is needed
+ * for this CPU power domain.
+ */
+ info->need_voltage_tracking = !IS_ERR(sram_reg);
+
+ return 0;
+
+out_free_opp_table:
+ of_free_opp_table(cpu_dev);
+
+out_free_resources:
+ if (!IS_ERR(proc_reg))
+ regulator_put(proc_reg);
+ if (!IS_ERR(sram_reg))
+ regulator_put(sram_reg);
+ if (!IS_ERR(cpu_clk))
+ clk_put(cpu_clk);
+ if (!IS_ERR(inter_clk))
+ clk_put(inter_clk);
+
+ return ret;
+}
+
+static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
+{
+ if (!IS_ERR(info->proc_reg))
+ regulator_put(info->proc_reg);
+ if (!IS_ERR(info->sram_reg))
+ regulator_put(info->sram_reg);
+ if (!IS_ERR(info->cpu_clk))
+ clk_put(info->cpu_clk);
+ if (!IS_ERR(info->inter_clk))
+ clk_put(info->inter_clk);
+
+ of_free_opp_table(info->cpu_dev);
+}
+
+static int mtk_cpufreq_init(struct cpufreq_policy *policy)
+{
+ struct mtk_cpu_dvfs_info *info;
+ struct cpufreq_frequency_table *freq_table;
+ int ret;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = mtk_cpu_dvfs_info_init(info, policy->cpu);
+ if (ret) {
+ pr_err("%s failed to initialize dvfs info for cpu%d\n",
+ __func__, policy->cpu);
+ goto out_free_dvfs_info;
+ }
+
+ ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table);
+ if (ret) {
+ pr_err("failed to init cpufreq table for cpu%d: %d\n",
+ policy->cpu, ret);
+ goto out_release_dvfs_info;
+ }
+
+ ret = cpufreq_table_validate_and_show(policy, freq_table);
+ if (ret) {
+ pr_err("%s: invalid frequency table: %d\n", __func__, ret);
+ goto out_free_cpufreq_table;
+ }
+
+ /* CPUs in the same cluster share a clock and power domain. */
+ cpumask_copy(policy->cpus, &cpu_topology[policy->cpu].core_sibling);
+ policy->driver_data = info;
+ policy->clk = info->cpu_clk;
+
+ return 0;
+
+out_free_cpufreq_table:
+ dev_pm_opp_free_cpufreq_table(info->cpu_dev, &freq_table);
+
+out_release_dvfs_info:
+ mtk_cpu_dvfs_info_release(info);
+
+out_free_dvfs_info:
+ kfree(info);
+
+ return ret;
+}
+
+static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct mtk_cpu_dvfs_info *info = policy->driver_data;
+
+ cpufreq_cooling_unregister(info->cdev);
+ dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
+ mtk_cpu_dvfs_info_release(info);
+ kfree(info);
+
+ return 0;
+}
+
+static struct cpufreq_driver mt8173_cpufreq_driver = {
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = mtk_cpufreq_set_target,
+ .get = cpufreq_generic_get,
+ .init = mtk_cpufreq_init,
+ .exit = mtk_cpufreq_exit,
+ .ready = mtk_cpufreq_ready,
+ .name = "mtk-cpufreq",
+ .attr = cpufreq_generic_attr,
+};
+
+static int mt8173_cpufreq_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = cpufreq_register_driver(&mt8173_cpufreq_driver);
+ if (ret)
+ pr_err("failed to register mtk cpufreq driver\n");
+
+ return ret;
+}
+
+static struct platform_driver mt8173_cpufreq_platdrv = {
+ .driver = {
+ .name = "mt8173-cpufreq",
+ },
+ .probe = mt8173_cpufreq_probe,
+};
+
+static int mt8173_cpufreq_driver_init(void)
+{
+ struct platform_device *pdev;
+ int err;
+
+ if (!of_machine_is_compatible("mediatek,mt8173"))
+ return -ENODEV;
+
+ err = platform_driver_register(&mt8173_cpufreq_platdrv);
+ if (err)
+ return err;
+
+ /*
+ * Since there's no place to hold device registration code and no
+ * device tree based way to match cpufreq driver yet, both the driver
+ * and the device registration codes are put here to handle defer
+ * probing.
+ */
+ pdev = platform_device_register_simple("mt8173-cpufreq", -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ pr_err("failed to register mtk-cpufreq platform device\n");
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
+device_initcall(mt8173_cpufreq_driver_init);
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 37c5742482d8..c1ae1999770a 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -421,7 +421,7 @@ static int powernow_acpi_init(void)
return 0;
err2:
- acpi_processor_unregister_performance(acpi_processor_perf, 0);
+ acpi_processor_unregister_performance(0);
err1:
free_cpumask_var(acpi_processor_perf->shared_cpu_map);
err05:
@@ -661,7 +661,7 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy)
{
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
if (acpi_processor_perf) {
- acpi_processor_unregister_performance(acpi_processor_perf, 0);
+ acpi_processor_unregister_performance(0);
free_cpumask_var(acpi_processor_perf->shared_cpu_map);
kfree(acpi_processor_perf);
}
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 5c035d04d827..0b5bf135b090 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -795,7 +795,7 @@ err_out_mem:
kfree(powernow_table);
err_out:
- acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
+ acpi_processor_unregister_performance(data->cpu);
/* data->acpi_data.state_count informs us at ->exit()
* whether ACPI was used */
@@ -863,8 +863,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
{
if (data->acpi_data.state_count)
- acpi_processor_unregister_performance(&data->acpi_data,
- data->cpu);
+ acpi_processor_unregister_performance(data->cpu);
free_cpumask_var(data->acpi_data.shared_cpu_map);
}
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index ebef0d8279c7..64994e10638e 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -27,20 +27,31 @@
#include <linux/smp.h>
#include <linux/of.h>
#include <linux/reboot.h>
+#include <linux/slab.h>
#include <asm/cputhreads.h>
#include <asm/firmware.h>
#include <asm/reg.h>
#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
+#include <asm/opal.h>
#define POWERNV_MAX_PSTATES 256
#define PMSR_PSAFE_ENABLE (1UL << 30)
#define PMSR_SPR_EM_DISABLE (1UL << 31)
#define PMSR_MAX(x) ((x >> 32) & 0xFF)
-#define PMSR_LP(x) ((x >> 48) & 0xFF)
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
-static bool rebooting, throttled;
+static bool rebooting, throttled, occ_reset;
+
+static struct chip {
+ unsigned int id;
+ bool throttled;
+ cpumask_t mask;
+ struct work_struct throttle;
+ bool restore;
+} *chips;
+
+static int nr_chips;
/*
* Note: The set of pstates consists of contiguous integers, the
@@ -298,28 +309,35 @@ static inline unsigned int get_nominal_index(void)
return powernv_pstate_info.max - powernv_pstate_info.nominal;
}
-static void powernv_cpufreq_throttle_check(unsigned int cpu)
+static void powernv_cpufreq_throttle_check(void *data)
{
+ unsigned int cpu = smp_processor_id();
unsigned long pmsr;
- int pmsr_pmax, pmsr_lp;
+ int pmsr_pmax, i;
pmsr = get_pmspr(SPRN_PMSR);
+ for (i = 0; i < nr_chips; i++)
+ if (chips[i].id == cpu_to_chip_id(cpu))
+ break;
+
/* Check for Pmax Capping */
pmsr_pmax = (s8)PMSR_MAX(pmsr);
if (pmsr_pmax != powernv_pstate_info.max) {
- throttled = true;
- pr_info("CPU %d Pmax is reduced to %d\n", cpu, pmsr_pmax);
- pr_info("Max allowed Pstate is capped\n");
+ if (chips[i].throttled)
+ goto next;
+ chips[i].throttled = true;
+ pr_info("CPU %d on Chip %u has Pmax reduced to %d\n", cpu,
+ chips[i].id, pmsr_pmax);
+ } else if (chips[i].throttled) {
+ chips[i].throttled = false;
+ pr_info("CPU %d on Chip %u has Pmax restored to %d\n", cpu,
+ chips[i].id, pmsr_pmax);
}
- /*
- * Check for Psafe by reading LocalPstate
- * or check if Psafe_mode_active is set in PMSR.
- */
- pmsr_lp = (s8)PMSR_LP(pmsr);
- if ((pmsr_lp < powernv_pstate_info.min) ||
- (pmsr & PMSR_PSAFE_ENABLE)) {
+ /* Check if Psafe_mode_active is set in PMSR. */
+next:
+ if (pmsr & PMSR_PSAFE_ENABLE) {
throttled = true;
pr_info("Pstate set to safe frequency\n");
}
@@ -350,7 +368,7 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
return 0;
if (!throttled)
- powernv_cpufreq_throttle_check(smp_processor_id());
+ powernv_cpufreq_throttle_check(NULL);
freq_data.pstate_id = powernv_freqs[new_index].driver_data;
@@ -395,6 +413,119 @@ static struct notifier_block powernv_cpufreq_reboot_nb = {
.notifier_call = powernv_cpufreq_reboot_notifier,
};
+void powernv_cpufreq_work_fn(struct work_struct *work)
+{
+ struct chip *chip = container_of(work, struct chip, throttle);
+ unsigned int cpu;
+ cpumask_var_t mask;
+
+ smp_call_function_any(&chip->mask,
+ powernv_cpufreq_throttle_check, NULL, 0);
+
+ if (!chip->restore)
+ return;
+
+ chip->restore = false;
+ cpumask_copy(mask, &chip->mask);
+ for_each_cpu_and(cpu, mask, cpu_online_mask) {
+ int index, tcpu;
+ struct cpufreq_policy policy;
+
+ cpufreq_get_policy(&policy, cpu);
+ cpufreq_frequency_table_target(&policy, policy.freq_table,
+ policy.cur,
+ CPUFREQ_RELATION_C, &index);
+ powernv_cpufreq_target_index(&policy, index);
+ for_each_cpu(tcpu, policy.cpus)
+ cpumask_clear_cpu(tcpu, mask);
+ }
+}
+
+static char throttle_reason[][30] = {
+ "No throttling",
+ "Power Cap",
+ "Processor Over Temperature",
+ "Power Supply Failure",
+ "Over Current",
+ "OCC Reset"
+ };
+
+static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
+ unsigned long msg_type, void *_msg)
+{
+ struct opal_msg *msg = _msg;
+ struct opal_occ_msg omsg;
+ int i;
+
+ if (msg_type != OPAL_MSG_OCC)
+ return 0;
+
+ omsg.type = be64_to_cpu(msg->params[0]);
+
+ switch (omsg.type) {
+ case OCC_RESET:
+ occ_reset = true;
+ pr_info("OCC (On Chip Controller - enforces hard thermal/power limits) Resetting\n");
+ /*
+ * powernv_cpufreq_throttle_check() is called in
+ * target() callback which can detect the throttle state
+ * for governors like ondemand.
+ * But static governors will not call target() often thus
+ * report throttling here.
+ */
+ if (!throttled) {
+ throttled = true;
+ pr_crit("CPU frequency is throttled for duration\n");
+ }
+
+ break;
+ case OCC_LOAD:
+ pr_info("OCC Loading, CPU frequency is throttled until OCC is started\n");
+ break;
+ case OCC_THROTTLE:
+ omsg.chip = be64_to_cpu(msg->params[1]);
+ omsg.throttle_status = be64_to_cpu(msg->params[2]);
+
+ if (occ_reset) {
+ occ_reset = false;
+ throttled = false;
+ pr_info("OCC Active, CPU frequency is no longer throttled\n");
+
+ for (i = 0; i < nr_chips; i++) {
+ chips[i].restore = true;
+ schedule_work(&chips[i].throttle);
+ }
+
+ return 0;
+ }
+
+ if (omsg.throttle_status &&
+ omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS)
+ pr_info("OCC: Chip %u Pmax reduced due to %s\n",
+ (unsigned int)omsg.chip,
+ throttle_reason[omsg.throttle_status]);
+ else if (!omsg.throttle_status)
+ pr_info("OCC: Chip %u %s\n", (unsigned int)omsg.chip,
+ throttle_reason[omsg.throttle_status]);
+ else
+ return 0;
+
+ for (i = 0; i < nr_chips; i++)
+ if (chips[i].id == omsg.chip) {
+ if (!omsg.throttle_status)
+ chips[i].restore = true;
+ schedule_work(&chips[i].throttle);
+ }
+ }
+ return 0;
+}
+
+static struct notifier_block powernv_cpufreq_opal_nb = {
+ .notifier_call = powernv_cpufreq_occ_msg,
+ .next = NULL,
+ .priority = 0,
+};
+
static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
{
struct powernv_smp_call_data freq_data;
@@ -414,6 +545,36 @@ static struct cpufreq_driver powernv_cpufreq_driver = {
.attr = powernv_cpu_freq_attr,
};
+static int init_chip_info(void)
+{
+ unsigned int chip[256];
+ unsigned int cpu, i;
+ unsigned int prev_chip_id = UINT_MAX;
+
+ for_each_possible_cpu(cpu) {
+ unsigned int id = cpu_to_chip_id(cpu);
+
+ if (prev_chip_id != id) {
+ prev_chip_id = id;
+ chip[nr_chips++] = id;
+ }
+ }
+
+ chips = kmalloc_array(nr_chips, sizeof(struct chip), GFP_KERNEL);
+ if (!chips)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_chips; i++) {
+ chips[i].id = chip[i];
+ chips[i].throttled = false;
+ cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
+ INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
+ chips[i].restore = false;
+ }
+
+ return 0;
+}
+
static int __init powernv_cpufreq_init(void)
{
int rc = 0;
@@ -429,7 +590,13 @@ static int __init powernv_cpufreq_init(void)
return rc;
}
+ /* Populate chip info */
+ rc = init_chip_info();
+ if (rc)
+ return rc;
+
register_reboot_notifier(&powernv_cpufreq_reboot_nb);
+ opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
return cpufreq_register_driver(&powernv_cpufreq_driver);
}
module_init(powernv_cpufreq_init);
@@ -437,6 +604,8 @@ module_init(powernv_cpufreq_init);
static void __exit powernv_cpufreq_exit(void)
{
unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
+ opal_message_notifier_unregister(OPAL_MSG_OCC,
+ &powernv_cpufreq_opal_nb);
cpufreq_unregister_driver(&powernv_cpufreq_driver);
}
module_exit(powernv_cpufreq_exit);
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
index d29e8da396a0..7969f7690498 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
@@ -97,8 +97,8 @@ static int pmi_notifier(struct notifier_block *nb,
struct cpufreq_frequency_table *cbe_freqs;
u8 node;
- /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE
- * and CPUFREQ_NOTIFY policy events?)
+ /* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY
+ * policy events?)
*/
if (event == CPUFREQ_START)
return 0;
diff --git a/drivers/cpufreq/sfi-cpufreq.c b/drivers/cpufreq/sfi-cpufreq.c
index ffa3389e535b..992ce6f9abec 100644
--- a/drivers/cpufreq/sfi-cpufreq.c
+++ b/drivers/cpufreq/sfi-cpufreq.c
@@ -45,12 +45,10 @@ static int sfi_parse_freq(struct sfi_table_header *table)
pentry = (struct sfi_freq_table_entry *)sb->pentry;
totallen = num_freq_table_entries * sizeof(*pentry);
- sfi_cpufreq_array = kzalloc(totallen, GFP_KERNEL);
+ sfi_cpufreq_array = kmemdup(pentry, totallen, GFP_KERNEL);
if (!sfi_cpufreq_array)
return -ENOMEM;
- memcpy(sfi_cpufreq_array, pentry, totallen);
-
return 0;
}
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 4ab7a2156672..15d3214aaa00 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -386,7 +386,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
unsigned int prev_speed;
unsigned int ret = 0;
unsigned long flags;
- struct timeval tv1, tv2;
+ ktime_t tv1, tv2;
if ((!processor) || (!low_speed) || (!high_speed) || (!set_state))
return -EINVAL;
@@ -415,14 +415,14 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
/* start latency measurement */
if (transition_latency)
- do_gettimeofday(&tv1);
+ tv1 = ktime_get();
/* switch to high state */
set_state(SPEEDSTEP_HIGH);
/* end latency measurement */
if (transition_latency)
- do_gettimeofday(&tv2);
+ tv2 = ktime_get();
*high_speed = speedstep_get_frequency(processor);
if (!*high_speed) {
@@ -442,8 +442,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
set_state(SPEEDSTEP_LOW);
if (transition_latency) {
- *transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC +
- tv2.tv_usec - tv1.tv_usec;
+ *transition_latency = ktime_to_us(ktime_sub(tv2, tv1));
pr_debug("transition latency is %u uSec\n", *transition_latency);
/* convert uSec to nSec and add 20% for safety reasons */
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
new file mode 100644
index 000000000000..20bcceb58ccc
--- /dev/null
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -0,0 +1,214 @@
+/*
+ * Tegra 124 cpufreq driver
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpufreq-dt.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+
+struct tegra124_cpufreq_priv {
+ struct regulator *vdd_cpu_reg;
+ struct clk *cpu_clk;
+ struct clk *pllp_clk;
+ struct clk *pllx_clk;
+ struct clk *dfll_clk;
+ struct platform_device *cpufreq_dt_pdev;
+};
+
+static int tegra124_cpu_switch_to_dfll(struct tegra124_cpufreq_priv *priv)
+{
+ struct clk *orig_parent;
+ int ret;
+
+ ret = clk_set_rate(priv->dfll_clk, clk_get_rate(priv->cpu_clk));
+ if (ret)
+ return ret;
+
+ orig_parent = clk_get_parent(priv->cpu_clk);
+ clk_set_parent(priv->cpu_clk, priv->pllp_clk);
+
+ ret = clk_prepare_enable(priv->dfll_clk);
+ if (ret)
+ goto out;
+
+ clk_set_parent(priv->cpu_clk, priv->dfll_clk);
+
+ return 0;
+
+out:
+ clk_set_parent(priv->cpu_clk, orig_parent);
+
+ return ret;
+}
+
+static void tegra124_cpu_switch_to_pllx(struct tegra124_cpufreq_priv *priv)
+{
+ clk_set_parent(priv->cpu_clk, priv->pllp_clk);
+ clk_disable_unprepare(priv->dfll_clk);
+ regulator_sync_voltage(priv->vdd_cpu_reg);
+ clk_set_parent(priv->cpu_clk, priv->pllx_clk);
+}
+
+static struct cpufreq_dt_platform_data cpufreq_dt_pd = {
+ .independent_clocks = false,
+};
+
+static int tegra124_cpufreq_probe(struct platform_device *pdev)
+{
+ struct tegra124_cpufreq_priv *priv;
+ struct device_node *np;
+ struct device *cpu_dev;
+ struct platform_device_info cpufreq_dt_devinfo = {};
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ np = of_cpu_device_node_get(0);
+ if (!np)
+ return -ENODEV;
+
+ priv->vdd_cpu_reg = regulator_get(cpu_dev, "vdd-cpu");
+ if (IS_ERR(priv->vdd_cpu_reg)) {
+ ret = PTR_ERR(priv->vdd_cpu_reg);
+ goto out_put_np;
+ }
+
+ priv->cpu_clk = of_clk_get_by_name(np, "cpu_g");
+ if (IS_ERR(priv->cpu_clk)) {
+ ret = PTR_ERR(priv->cpu_clk);
+ goto out_put_vdd_cpu_reg;
+ }
+
+ priv->dfll_clk = of_clk_get_by_name(np, "dfll");
+ if (IS_ERR(priv->dfll_clk)) {
+ ret = PTR_ERR(priv->dfll_clk);
+ goto out_put_cpu_clk;
+ }
+
+ priv->pllx_clk = of_clk_get_by_name(np, "pll_x");
+ if (IS_ERR(priv->pllx_clk)) {
+ ret = PTR_ERR(priv->pllx_clk);
+ goto out_put_dfll_clk;
+ }
+
+ priv->pllp_clk = of_clk_get_by_name(np, "pll_p");
+ if (IS_ERR(priv->pllp_clk)) {
+ ret = PTR_ERR(priv->pllp_clk);
+ goto out_put_pllx_clk;
+ }
+
+ ret = tegra124_cpu_switch_to_dfll(priv);
+ if (ret)
+ goto out_put_pllp_clk;
+
+ cpufreq_dt_devinfo.name = "cpufreq-dt";
+ cpufreq_dt_devinfo.parent = &pdev->dev;
+ cpufreq_dt_devinfo.data = &cpufreq_dt_pd;
+ cpufreq_dt_devinfo.size_data = sizeof(cpufreq_dt_pd);
+
+ priv->cpufreq_dt_pdev =
+ platform_device_register_full(&cpufreq_dt_devinfo);
+ if (IS_ERR(priv->cpufreq_dt_pdev)) {
+ ret = PTR_ERR(priv->cpufreq_dt_pdev);
+ goto out_switch_to_pllx;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+out_switch_to_pllx:
+ tegra124_cpu_switch_to_pllx(priv);
+out_put_pllp_clk:
+ clk_put(priv->pllp_clk);
+out_put_pllx_clk:
+ clk_put(priv->pllx_clk);
+out_put_dfll_clk:
+ clk_put(priv->dfll_clk);
+out_put_cpu_clk:
+ clk_put(priv->cpu_clk);
+out_put_vdd_cpu_reg:
+ regulator_put(priv->vdd_cpu_reg);
+out_put_np:
+ of_node_put(np);
+
+ return ret;
+}
+
+static int tegra124_cpufreq_remove(struct platform_device *pdev)
+{
+ struct tegra124_cpufreq_priv *priv = platform_get_drvdata(pdev);
+
+ platform_device_unregister(priv->cpufreq_dt_pdev);
+ tegra124_cpu_switch_to_pllx(priv);
+
+ clk_put(priv->pllp_clk);
+ clk_put(priv->pllx_clk);
+ clk_put(priv->dfll_clk);
+ clk_put(priv->cpu_clk);
+ regulator_put(priv->vdd_cpu_reg);
+
+ return 0;
+}
+
+static struct platform_driver tegra124_cpufreq_platdrv = {
+ .driver.name = "cpufreq-tegra124",
+ .probe = tegra124_cpufreq_probe,
+ .remove = tegra124_cpufreq_remove,
+};
+
+static int __init tegra_cpufreq_init(void)
+{
+ int ret;
+ struct platform_device *pdev;
+
+ if (!of_machine_is_compatible("nvidia,tegra124"))
+ return -ENODEV;
+
+ /*
+ * Platform driver+device required for handling EPROBE_DEFER with
+ * the regulator and the DFLL clock
+ */
+ ret = platform_driver_register(&tegra124_cpufreq_platdrv);
+ if (ret)
+ return ret;
+
+ pdev = platform_device_register_simple("cpufreq-tegra124", -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ platform_driver_unregister(&tegra124_cpufreq_platdrv);
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
+module_init(tegra_cpufreq_init);
+
+MODULE_AUTHOR("Tuomas Tynkkynen <ttynkkynen@nvidia.com>");
+MODULE_DESCRIPTION("cpufreq driver for NVIDIA Tegra124");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c
index 8084c7f7e206..8084c7f7e206 100644
--- a/drivers/cpufreq/tegra-cpufreq.c
+++ b/drivers/cpufreq/tegra20-cpufreq.c
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 7936dce4b878..344058f8501a 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -176,19 +176,39 @@ void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
/**
* cpuidle_state_is_coupled - check if a state is part of a coupled set
- * @dev: struct cpuidle_device for the current cpu
* @drv: struct cpuidle_driver for the platform
* @state: index of the target state in drv->states
*
* Returns true if the target state is coupled with cpus besides this one
*/
-bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int state)
+bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)
{
return drv->states[state].flags & CPUIDLE_FLAG_COUPLED;
}
/**
+ * cpuidle_coupled_state_verify - check if the coupled states are correctly set.
+ * @drv: struct cpuidle_driver for the platform
+ *
+ * Returns 0 for valid state values, a negative error code otherwise:
+ * * -EINVAL if any coupled state(safe_state_index) is wrongly set.
+ */
+int cpuidle_coupled_state_verify(struct cpuidle_driver *drv)
+{
+ int i;
+
+ for (i = drv->state_count - 1; i >= 0; i--) {
+ if (cpuidle_state_is_coupled(drv, i) &&
+ (drv->safe_state_index == i ||
+ drv->safe_state_index < 0 ||
+ drv->safe_state_index >= drv->state_count))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* cpuidle_coupled_set_ready - mark a cpu as ready
* @coupled: the struct coupled that contains the current cpu
*/
@@ -473,7 +493,7 @@ int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
return entered_state;
}
entered_state = cpuidle_enter_state(dev, drv,
- dev->safe_state_index);
+ drv->safe_state_index);
local_irq_disable();
}
@@ -521,7 +541,7 @@ retry:
}
entered_state = cpuidle_enter_state(dev, drv,
- dev->safe_state_index);
+ drv->safe_state_index);
local_irq_disable();
}
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index c13feec89ea1..ea9728fde9b3 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -25,16 +25,21 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/platform_device.h>
+#include <linux/psci.h>
+
#include <asm/cpuidle.h>
#include <asm/suspend.h>
-#include <asm/psci.h>
+
+#include <uapi/linux/psci.h>
+
+#define CALXEDA_IDLE_PARAM \
+ ((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \
+ (0 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \
+ (PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT))
static int calxeda_idle_finish(unsigned long val)
{
- const struct psci_power_state ps = {
- .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
- };
- return psci_ops.cpu_suspend(ps, __pa(cpu_resume));
+ return psci_ops.cpu_suspend(CALXEDA_IDLE_PARAM, __pa(cpu_resume));
}
static int calxeda_pwrdown_idle(struct cpuidle_device *dev,
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index e8e2775c3821..17a6dc0e2111 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -112,20 +112,27 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
static void enter_freeze_proper(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index)
{
- tick_freeze();
+ /*
+ * trace_suspend_resume() called by tick_freeze() for the last CPU
+ * executing it contains RCU usage regarded as invalid in the idle
+ * context, so tell RCU about that.
+ */
+ RCU_NONIDLE(tick_freeze());
/*
* The state used here cannot be a "coupled" one, because the "coupled"
* cpuidle mechanism enables interrupts and doing that with timekeeping
* suspended is generally unsafe.
*/
+ stop_critical_timings();
drv->states[index].enter_freeze(dev, drv, index);
WARN_ON(!irqs_disabled());
/*
* timekeeping_resume() that will be called by tick_unfreeze() for the
- * last CPU executing it calls functions containing RCU read-side
+ * first CPU executing it calls functions containing RCU read-side
* critical sections, so tell RCU about that.
*/
RCU_NONIDLE(tick_unfreeze());
+ start_critical_timings();
}
/**
@@ -190,7 +197,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
trace_cpu_idle_rcuidle(index, dev->cpu);
time_start = ktime_get();
+ stop_critical_timings();
entered_state = target_state->enter(dev, drv, index);
+ start_critical_timings();
time_end = ktime_get();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
@@ -205,7 +214,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
tick_broadcast_exit();
}
- if (!cpuidle_state_is_coupled(dev, drv, entered_state))
+ if (!cpuidle_state_is_coupled(drv, entered_state))
local_irq_enable();
diff = ktime_to_us(ktime_sub(time_end, time_start));
@@ -254,7 +263,7 @@ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
int index)
{
- if (cpuidle_state_is_coupled(dev, drv, index))
+ if (cpuidle_state_is_coupled(drv, index))
return cpuidle_enter_state_coupled(dev, drv, index);
return cpuidle_enter_state(dev, drv, index);
}
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index ee97e9672ecf..f87f399b0540 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -34,19 +34,24 @@ extern int cpuidle_add_sysfs(struct cpuidle_device *dev);
extern void cpuidle_remove_sysfs(struct cpuidle_device *dev);
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
-bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int state);
+bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state);
+int cpuidle_coupled_state_verify(struct cpuidle_driver *drv);
int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int next_state);
int cpuidle_coupled_register_device(struct cpuidle_device *dev);
void cpuidle_coupled_unregister_device(struct cpuidle_device *dev);
#else
-static inline bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int state)
+static inline
+bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)
{
return false;
}
+static inline int cpuidle_coupled_state_verify(struct cpuidle_driver *drv)
+{
+ return 0;
+}
+
static inline int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int next_state)
{
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 5db147859b90..389ade4572be 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -227,6 +227,10 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
if (!drv || !drv->state_count)
return -EINVAL;
+ ret = cpuidle_coupled_state_verify(drv);
+ if (ret)
+ return ret;
+
if (cpuidle_disabled())
return -ENODEV;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4044125fb5d5..07bc7aa6b224 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -480,4 +480,21 @@ config CRYPTO_DEV_IMGTEC_HASH
hardware hash accelerator. Supporting MD5/SHA1/SHA224/SHA256
hashing algorithms.
+config CRYPTO_DEV_SUN4I_SS
+ tristate "Support for Allwinner Security System cryptographic accelerator"
+ depends on ARCH_SUNXI
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_AES
+ select CRYPTO_DES
+ select CRYPTO_BLKCIPHER
+ help
+ Some Allwinner SoC have a crypto accelerator named
+ Security System. Select this if you want to use it.
+ The Security System handle AES/DES/3DES ciphers in CBC mode
+ and SHA1 and MD5 hash algorithms.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sun4i-ss.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index e35c07a8da85..c3ced6fbd1b8 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -28,3 +28,4 @@ obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
+obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 3b28e8c3de28..192a8fa325c1 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1113,7 +1113,7 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
struct device *dev = (struct device *)data;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
- if (core_dev->dev->ce_base == 0)
+ if (!core_dev->dev->ce_base)
return 0;
writel(PPC4XX_INTERRUPT_CLR,
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index d9af9403ab6c..2f0b3337505d 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -370,8 +370,7 @@ static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc,
sg_init_table(ctx->bufsl, nsg);
sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len);
if (nsg > 1)
- scatterwalk_sg_chain(ctx->bufsl, nsg,
- req->src);
+ sg_chain(ctx->bufsl, nsg, req->src);
ctx->sg = ctx->bufsl;
} else
ctx->sg = req->src;
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index e286e285aa8a..5652a53415dc 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -1,6 +1,6 @@
config CRYPTO_DEV_FSL_CAAM
tristate "Freescale CAAM-Multicore driver backend"
- depends on FSL_SOC
+ depends on FSL_SOC || ARCH_MXC
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -112,6 +112,14 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
To compile this as a module, choose M here: the module
will be called caamrng.
+config CRYPTO_DEV_FSL_CAAM_IMX
+ def_bool SOC_IMX6 || SOC_IMX7D
+ depends on CRYPTO_DEV_FSL_CAAM
+
+config CRYPTO_DEV_FSL_CAAM_LE
+ def_bool CRYPTO_DEV_FSL_CAAM_IMX || SOC_LS1021A
+ depends on CRYPTO_DEV_FSL_CAAM
+
config CRYPTO_DEV_FSL_CAAM_DEBUG
bool "Enable debug output in CAAM driver"
depends on CRYPTO_DEV_FSL_CAAM
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index daca933a82ec..ba79d638f782 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -68,27 +68,29 @@
#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
CAAM_CMD_SZ * 4)
+#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
+ CAAM_CMD_SZ * 5)
/* length of descriptors text */
#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
-#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
-#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
-#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
/* Note: Nonce is counted in enckeylen */
-#define DESC_AEAD_CTR_RFC3686_LEN (6 * CAAM_CMD_SZ)
+#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
-#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ)
-#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
@@ -111,6 +113,20 @@
#endif
static struct list_head alg_list;
+struct caam_alg_entry {
+ int class1_alg_type;
+ int class2_alg_type;
+ int alg_op;
+ bool rfc3686;
+ bool geniv;
+};
+
+struct caam_aead_alg {
+ struct aead_alg aead;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
/* Set DK bit in class 1 operation if shared */
static inline void append_dec_op1(u32 *desc, u32 type)
{
@@ -145,18 +161,6 @@ static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
}
/*
- * For aead encrypt and decrypt, read iv for both classes
- */
-static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset)
-{
- append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- (ivoffset << LDST_OFFSET_SHIFT));
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
- (ivoffset << MOVE_OFFSET_SHIFT) | ivsize);
-}
-
-/*
* For ablkcipher encrypt and decrypt, read from req->src and
* write to req->dst
*/
@@ -170,13 +174,6 @@ static inline void ablkcipher_append_src_dst(u32 *desc)
}
/*
- * If all data, including src (with assoc and iv) or dst (with iv only) are
- * contiguous
- */
-#define GIV_SRC_CONTIG 1
-#define GIV_DST_CONTIG (1 << 1)
-
-/*
* per-session context
*/
struct caam_ctx {
@@ -259,7 +256,6 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
- unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
bool keys_fit_inline = false;
@@ -270,11 +266,11 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
+ if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
- /* old_aead_encrypt shared descriptor */
+ /* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
init_sh_desc(desc, HDR_SHARE_SERIAL);
@@ -291,20 +287,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
KEY_DEST_MDHA_SPLIT | KEY_ENC);
set_jump_tgt_here(desc, key_jump_cmd);
- /* cryptlen = seqoutlen - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
- /*
- * NULL encryption; IV is zero
- * assoclen = (assoclen + cryptlen) - cryptlen
- */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
-
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- KEY_VLF);
+ /* assoclen + cryptlen = seqinlen */
+ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
- /* Prepare to read and write cryptlen bytes */
+ /* Prepare to read and write cryptlen + assoclen bytes */
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
@@ -363,7 +349,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
- /* old_aead_decrypt shared descriptor */
+ /* aead_decrypt shared descriptor */
init_sh_desc(desc, HDR_SHARE_SERIAL);
/* Skip if already shared */
@@ -382,18 +368,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
append_operation(desc, ctx->class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
- /* assoclen + cryptlen = seqinlen - ivsize - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
- ctx->authsize + ivsize);
- /* assoclen = (assoclen + cryptlen) - cryptlen */
+ /* assoclen + cryptlen = seqoutlen */
append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
- append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- KEY_VLF);
-
- /* Prepare to read and write cryptlen bytes */
+ /* Prepare to read and write cryptlen + assoclen bytes */
append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
@@ -450,10 +428,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
static int aead_set_sh_desc(struct crypto_aead *aead)
{
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct crypto_tfm *ctfm = crypto_aead_tfm(aead);
- const char *alg_name = crypto_tfm_alg_name(ctfm);
struct device *jrdev = ctx->jrdev;
bool keys_fit_inline;
u32 geniv, moveiv;
@@ -461,11 +439,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
u32 *desc;
const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
- const bool is_rfc3686 = (ctr_mode &&
- (strstr(alg_name, "rfc3686") != NULL));
-
- if (!ctx->authsize)
- return 0;
+ const bool is_rfc3686 = alg->caam.rfc3686;
/* NULL encryption / decryption */
if (!ctx->enckeylen)
@@ -486,18 +460,21 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
if (is_rfc3686)
ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ if (alg->caam.geniv)
+ goto skip_enc;
+
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
keys_fit_inline = false;
- if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
+ if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
ctx->split_key_pad_len + ctx->enckeylen +
(is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
- /* old_aead_encrypt shared descriptor */
+ /* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
/* Note: Context registers are saved. */
@@ -507,19 +484,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
append_operation(desc, ctx->class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
- /* cryptlen = seqoutlen - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
- /* assoclen + cryptlen = seqinlen - ivsize */
- append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, ivsize);
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
- /* assoclen = (assoclen + cryptlen) - cryptlen */
- append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
/* read assoc before reading payload */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- KEY_VLF);
- aead_append_ld_iv(desc, ivsize, ctx1_iv_off);
+ FIFOLDST_VLF);
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
@@ -534,8 +508,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
/* Read and write cryptlen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
/* Write ICV */
@@ -555,18 +529,19 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc_bytes(desc), 1);
#endif
+skip_enc:
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
keys_fit_inline = false;
- if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
+ if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
ctx->split_key_pad_len + ctx->enckeylen +
(is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
- /* old_aead_decrypt shared descriptor */
+ /* aead_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
/* Note: Context registers are saved. */
@@ -576,19 +551,17 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
append_operation(desc, ctx->class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
- /* assoclen + cryptlen = seqinlen - ivsize - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
- ctx->authsize + ivsize);
- /* assoclen = (assoclen + cryptlen) - cryptlen */
- append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
- append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
/* read assoc before reading payload */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
KEY_VLF);
- aead_append_ld_iv(desc, ivsize, ctx1_iv_off);
-
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
@@ -605,8 +578,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
append_dec_op1(desc, ctx->class1_alg_type);
/* Read and write cryptlen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
/* Load ICV */
@@ -626,12 +599,15 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc_bytes(desc), 1);
#endif
+ if (!alg->caam.geniv)
+ goto skip_givenc;
+
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
keys_fit_inline = false;
- if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
+ if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
ctx->split_key_pad_len + ctx->enckeylen +
(is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
CAAM_DESC_BYTES_MAX)
@@ -643,6 +619,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* Note: Context registers are saved. */
init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
+ if (is_rfc3686)
+ goto copy_iv;
+
/* Generate IV */
geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
@@ -656,6 +635,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
(ivsize << MOVE_LEN_SHIFT));
append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+copy_iv:
/* Copy IV to class 1 context */
append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
(ctx1_iv_off << MOVE_OFFSET_SHIFT) |
@@ -668,8 +648,12 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* ivsize + cryptlen = seqoutlen - authsize */
append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
- /* assoclen = seqinlen - (ivsize + cryptlen) */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
/* read assoc before reading payload */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
@@ -710,9 +694,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
- ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
@@ -723,6 +707,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc_bytes(desc), 1);
#endif
+skip_givenc:
return 0;
}
@@ -976,22 +961,28 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
append_operation(desc, ctx->class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
/* Read assoc data */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
- /* cryptlen = seqoutlen - assoclen */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ /* Skip IV */
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
/* Will read cryptlen bytes */
append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* cryptlen = seqoutlen - assoclen */
+ append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
+
/* Write encrypted data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
@@ -1044,21 +1035,27 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
append_operation(desc, ctx->class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
/* Read assoc data */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
- /* Will write cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ /* Skip IV */
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
/* Will read cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
+
+ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* Will write cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
/* Store payload data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
@@ -1793,22 +1790,6 @@ static void aead_unmap(struct device *dev,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}
-static void old_aead_unmap(struct device *dev,
- struct aead_edesc *edesc,
- struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- int ivsize = crypto_aead_ivsize(aead);
-
- dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
- DMA_TO_DEVICE, edesc->assoc_chained);
-
- caam_unmap(dev, req->src, req->dst,
- edesc->src_nents, edesc->src_chained, edesc->dst_nents,
- edesc->dst_chained, edesc->iv_dma, ivsize,
- edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
-}
-
static void ablkcipher_unmap(struct device *dev,
struct ablkcipher_edesc *edesc,
struct ablkcipher_request *req)
@@ -1844,45 +1825,6 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
aead_request_complete(req, err);
}
-static void old_aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
-{
- struct aead_request *req = context;
- struct aead_edesc *edesc;
-#ifdef DEBUG
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- int ivsize = crypto_aead_ivsize(aead);
-
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
-
- edesc = (struct aead_edesc *)((char *)desc -
- offsetof(struct aead_edesc, hw_desc));
-
- if (err)
- caam_jr_strstatus(jrdev, err);
-
- old_aead_unmap(jrdev, edesc, req);
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
- req->assoclen , 1);
- print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
- edesc->src_nents ? 100 : ivsize, 1);
- print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
- edesc->src_nents ? 100 : req->cryptlen +
- ctx->authsize + 4, 1);
-#endif
-
- kfree(edesc);
-
- aead_request_complete(req, err);
-}
-
static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
@@ -1911,62 +1853,6 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
aead_request_complete(req, err);
}
-static void old_aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
-{
- struct aead_request *req = context;
- struct aead_edesc *edesc;
-#ifdef DEBUG
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- int ivsize = crypto_aead_ivsize(aead);
-
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
-
- edesc = (struct aead_edesc *)((char *)desc -
- offsetof(struct aead_edesc, hw_desc));
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
- ivsize, 1);
- print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
- req->cryptlen - ctx->authsize, 1);
-#endif
-
- if (err)
- caam_jr_strstatus(jrdev, err);
-
- old_aead_unmap(jrdev, edesc, req);
-
- /*
- * verify hw auth check passed else return -EBADMSG
- */
- if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
- err = -EBADMSG;
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4,
- ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
- sizeof(struct iphdr) + req->assoclen +
- ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
- ctx->authsize + 36, 1);
- if (!err && edesc->sec4_sg_bytes) {
- struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
- print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
- sg->length + ctx->authsize + 16, 1);
- }
-#endif
-
- kfree(edesc);
-
- aead_request_complete(req, err);
-}
-
static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
@@ -2035,91 +1921,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
/*
* Fill in aead job descriptor
*/
-static void old_init_aead_job(u32 *sh_desc, dma_addr_t ptr,
- struct aead_edesc *edesc,
- struct aead_request *req,
- bool all_contig, bool encrypt)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- int ivsize = crypto_aead_ivsize(aead);
- int authsize = ctx->authsize;
- u32 *desc = edesc->hw_desc;
- u32 out_options = 0, in_options;
- dma_addr_t dst_dma, src_dma;
- int len, sec4_sg_index = 0;
- bool is_gcm = false;
-
-#ifdef DEBUG
- debug("assoclen %d cryptlen %d authsize %d\n",
- req->assoclen, req->cryptlen, authsize);
- print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
- req->assoclen , 1);
- print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
- edesc->src_nents ? 100 : ivsize, 1);
- print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
- edesc->src_nents ? 100 : req->cryptlen, 1);
- print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
- desc_bytes(sh_desc), 1);
-#endif
-
- if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
- OP_ALG_ALGSEL_AES) &&
- ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
- is_gcm = true;
-
- len = desc_len(sh_desc);
- init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
-
- if (all_contig) {
- if (is_gcm)
- src_dma = edesc->iv_dma;
- else
- src_dma = sg_dma_address(req->assoc);
- in_options = 0;
- } else {
- src_dma = edesc->sec4_sg_dma;
- sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
- (edesc->src_nents ? : 1);
- in_options = LDST_SGF;
- }
-
- append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
- in_options);
-
- if (likely(req->src == req->dst)) {
- if (all_contig) {
- dst_dma = sg_dma_address(req->src);
- } else {
- dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
- ((edesc->assoc_nents ? : 1) + 1);
- out_options = LDST_SGF;
- }
- } else {
- if (!edesc->dst_nents) {
- dst_dma = sg_dma_address(req->dst);
- } else {
- dst_dma = edesc->sec4_sg_dma +
- sec4_sg_index *
- sizeof(struct sec4_sg_entry);
- out_options = LDST_SGF;
- }
- }
- if (encrypt)
- append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
- out_options);
- else
- append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
- out_options);
-}
-
-/*
- * Fill in aead job descriptor
- */
static void init_aead_job(struct aead_request *req,
struct aead_edesc *edesc,
bool all_contig, bool encrypt)
@@ -2208,80 +2009,43 @@ static void init_gcm_job(struct aead_request *req,
/* End of blank commands */
}
-/*
- * Fill in aead givencrypt job descriptor
- */
-static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
- struct aead_edesc *edesc,
- struct aead_request *req,
- int contig)
+static void init_authenc_job(struct aead_request *req,
+ struct aead_edesc *edesc,
+ bool all_contig, bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ struct caam_aead_alg, aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
- int ivsize = crypto_aead_ivsize(aead);
- int authsize = ctx->authsize;
+ const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = alg->caam.rfc3686;
u32 *desc = edesc->hw_desc;
- u32 out_options = 0, in_options;
- dma_addr_t dst_dma, src_dma;
- int len, sec4_sg_index = 0;
- bool is_gcm = false;
-
-#ifdef DEBUG
- debug("assoclen %d cryptlen %d authsize %d\n",
- req->assoclen, req->cryptlen, authsize);
- print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
- req->assoclen , 1);
- print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
- print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
- edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
- print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
- desc_bytes(sh_desc), 1);
-#endif
+ u32 ivoffset = 0;
- if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
- OP_ALG_ALGSEL_AES) &&
- ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
- is_gcm = true;
-
- len = desc_len(sh_desc);
- init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ivoffset = 16;
- if (contig & GIV_SRC_CONTIG) {
- if (is_gcm)
- src_dma = edesc->iv_dma;
- else
- src_dma = sg_dma_address(req->assoc);
- in_options = 0;
- } else {
- src_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
- in_options = LDST_SGF;
- }
- append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
- in_options);
+ /*
+ * RFC3686 specific:
+ * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ */
+ if (is_rfc3686)
+ ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
- if (contig & GIV_DST_CONTIG) {
- dst_dma = edesc->iv_dma;
- } else {
- if (likely(req->src == req->dst)) {
- dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
- (edesc->assoc_nents +
- (is_gcm ? 1 + edesc->src_nents : 0));
- out_options = LDST_SGF;
- } else {
- dst_dma = edesc->sec4_sg_dma +
- sec4_sg_index *
- sizeof(struct sec4_sg_entry);
- out_options = LDST_SGF;
- }
- }
+ init_aead_job(req, edesc, all_contig, encrypt);
- append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
- out_options);
+ if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt)))
+ append_load_as_imm(desc, req->iv, ivsize,
+ LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (ivoffset << LDST_OFFSET_SHIFT));
}
/*
@@ -2392,150 +2156,6 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
/*
* allocate and map the aead extended descriptor
*/
-static struct aead_edesc *old_aead_edesc_alloc(struct aead_request *req,
- int desc_bytes,
- bool *all_contig_ptr,
- bool encrypt)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
- int assoc_nents, src_nents, dst_nents = 0;
- struct aead_edesc *edesc;
- dma_addr_t iv_dma = 0;
- int sgc;
- bool all_contig = true;
- bool assoc_chained = false, src_chained = false, dst_chained = false;
- int ivsize = crypto_aead_ivsize(aead);
- int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
- unsigned int authsize = ctx->authsize;
- bool is_gcm = false;
-
- assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
-
- if (unlikely(req->dst != req->src)) {
- src_nents = sg_count(req->src, req->cryptlen, &src_chained);
- dst_nents = sg_count(req->dst,
- req->cryptlen +
- (encrypt ? authsize : (-authsize)),
- &dst_chained);
- } else {
- src_nents = sg_count(req->src,
- req->cryptlen +
- (encrypt ? authsize : 0),
- &src_chained);
- }
-
- sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
- DMA_TO_DEVICE, assoc_chained);
- if (likely(req->src == req->dst)) {
- sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
- DMA_BIDIRECTIONAL, src_chained);
- } else {
- sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
- DMA_TO_DEVICE, src_chained);
- sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
- DMA_FROM_DEVICE, dst_chained);
- }
-
- iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, iv_dma)) {
- dev_err(jrdev, "unable to map IV\n");
- return ERR_PTR(-ENOMEM);
- }
-
- if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
- OP_ALG_ALGSEL_AES) &&
- ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
- is_gcm = true;
-
- /*
- * Check if data are contiguous.
- * GCM expected input sequence: IV, AAD, text
- * All other - expected input sequence: AAD, IV, text
- */
- if (is_gcm)
- all_contig = (!assoc_nents &&
- iv_dma + ivsize == sg_dma_address(req->assoc) &&
- !src_nents && sg_dma_address(req->assoc) +
- req->assoclen == sg_dma_address(req->src));
- else
- all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
- req->assoclen == iv_dma && !src_nents &&
- iv_dma + ivsize == sg_dma_address(req->src));
- if (!all_contig) {
- assoc_nents = assoc_nents ? : 1;
- src_nents = src_nents ? : 1;
- sec4_sg_len = assoc_nents + 1 + src_nents;
- }
-
- sec4_sg_len += dst_nents;
-
- sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
-
- /* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
- sec4_sg_bytes, GFP_DMA | flags);
- if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
- return ERR_PTR(-ENOMEM);
- }
-
- edesc->assoc_nents = assoc_nents;
- edesc->assoc_chained = assoc_chained;
- edesc->src_nents = src_nents;
- edesc->src_chained = src_chained;
- edesc->dst_nents = dst_nents;
- edesc->dst_chained = dst_chained;
- edesc->iv_dma = iv_dma;
- edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
- desc_bytes;
- *all_contig_ptr = all_contig;
-
- sec4_sg_index = 0;
- if (!all_contig) {
- if (!is_gcm) {
- sg_to_sec4_sg_len(req->assoc, req->assoclen,
- edesc->sec4_sg + sec4_sg_index);
- sec4_sg_index += assoc_nents;
- }
-
- dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
- iv_dma, ivsize, 0);
- sec4_sg_index += 1;
-
- if (is_gcm) {
- sg_to_sec4_sg_len(req->assoc, req->assoclen,
- edesc->sec4_sg + sec4_sg_index);
- sec4_sg_index += assoc_nents;
- }
-
- sg_to_sec4_sg_last(req->src,
- src_nents,
- edesc->sec4_sg +
- sec4_sg_index, 0);
- sec4_sg_index += src_nents;
- }
- if (dst_nents) {
- sg_to_sec4_sg_last(req->dst, dst_nents,
- edesc->sec4_sg + sec4_sg_index, 0);
- }
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
- dev_err(jrdev, "unable to map S/G table\n");
- return ERR_PTR(-ENOMEM);
- }
-
- return edesc;
-}
-
-/*
- * allocate and map the aead extended descriptor
- */
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
int desc_bytes, bool *all_contig_ptr,
bool encrypt)
@@ -2579,8 +2199,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
@@ -2685,7 +2305,15 @@ static int gcm_encrypt(struct aead_request *req)
return ret;
}
-static int old_aead_encrypt(struct aead_request *req)
+static int ipsec_gcm_encrypt(struct aead_request *req)
+{
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return gcm_encrypt(req);
+}
+
+static int aead_encrypt(struct aead_request *req)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -2696,14 +2324,13 @@ static int old_aead_encrypt(struct aead_request *req)
int ret = 0;
/* allocate extended descriptor */
- edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &all_contig, true);
+ edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
+ &all_contig, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* Create and submit job descriptor */
- old_init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
- all_contig, true);
+ init_authenc_job(req, edesc, all_contig, true);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
@@ -2711,11 +2338,11 @@ static int old_aead_encrypt(struct aead_request *req)
#endif
desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req);
+ ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- old_aead_unmap(jrdev, edesc, req);
+ aead_unmap(jrdev, edesc, req);
kfree(edesc);
}
@@ -2757,7 +2384,15 @@ static int gcm_decrypt(struct aead_request *req)
return ret;
}
-static int old_aead_decrypt(struct aead_request *req)
+static int ipsec_gcm_decrypt(struct aead_request *req)
+{
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return gcm_decrypt(req);
+}
+
+static int aead_decrypt(struct aead_request *req)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -2768,20 +2403,19 @@ static int old_aead_decrypt(struct aead_request *req)
int ret = 0;
/* allocate extended descriptor */
- edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &all_contig, false);
+ edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
+ &all_contig, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
- req->cryptlen, 1);
+ req->assoclen + req->cryptlen, 1);
#endif
/* Create and submit job descriptor*/
- old_init_aead_job(ctx->sh_desc_dec,
- ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
+ init_authenc_job(req, edesc, all_contig, false);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
@@ -2789,232 +2423,29 @@ static int old_aead_decrypt(struct aead_request *req)
#endif
desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, old_aead_decrypt_done, req);
+ ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- old_aead_unmap(jrdev, edesc, req);
+ aead_unmap(jrdev, edesc, req);
kfree(edesc);
}
return ret;
}
-/*
- * allocate and map the aead extended descriptor for aead givencrypt
- */
-static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
- *greq, int desc_bytes,
- u32 *contig_ptr)
-{
- struct aead_request *req = &greq->areq;
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
- int assoc_nents, src_nents, dst_nents = 0;
- struct aead_edesc *edesc;
- dma_addr_t iv_dma = 0;
- int sgc;
- u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
- int ivsize = crypto_aead_ivsize(aead);
- bool assoc_chained = false, src_chained = false, dst_chained = false;
- int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
- bool is_gcm = false;
-
- assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
- src_nents = sg_count(req->src, req->cryptlen, &src_chained);
-
- if (unlikely(req->dst != req->src))
- dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
- &dst_chained);
-
- sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
- DMA_TO_DEVICE, assoc_chained);
- if (likely(req->src == req->dst)) {
- sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
- DMA_BIDIRECTIONAL, src_chained);
- } else {
- sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
- DMA_TO_DEVICE, src_chained);
- sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
- DMA_FROM_DEVICE, dst_chained);
- }
-
- iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, iv_dma)) {
- dev_err(jrdev, "unable to map IV\n");
- return ERR_PTR(-ENOMEM);
- }
-
- if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
- OP_ALG_ALGSEL_AES) &&
- ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
- is_gcm = true;
-
- /*
- * Check if data are contiguous.
- * GCM expected input sequence: IV, AAD, text
- * All other - expected input sequence: AAD, IV, text
- */
-
- if (is_gcm) {
- if (assoc_nents || iv_dma + ivsize !=
- sg_dma_address(req->assoc) || src_nents ||
- sg_dma_address(req->assoc) + req->assoclen !=
- sg_dma_address(req->src))
- contig &= ~GIV_SRC_CONTIG;
- } else {
- if (assoc_nents ||
- sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
- src_nents || iv_dma + ivsize != sg_dma_address(req->src))
- contig &= ~GIV_SRC_CONTIG;
- }
-
- if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
- contig &= ~GIV_DST_CONTIG;
-
- if (!(contig & GIV_SRC_CONTIG)) {
- assoc_nents = assoc_nents ? : 1;
- src_nents = src_nents ? : 1;
- sec4_sg_len += assoc_nents + 1 + src_nents;
- if (req->src == req->dst &&
- (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
- contig &= ~GIV_DST_CONTIG;
- }
-
- /*
- * Add new sg entries for GCM output sequence.
- * Expected output sequence: IV, encrypted text.
- */
- if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
- sec4_sg_len += 1 + src_nents;
-
- if (unlikely(req->src != req->dst)) {
- dst_nents = dst_nents ? : 1;
- sec4_sg_len += 1 + dst_nents;
- }
-
- sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
-
- /* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
- sec4_sg_bytes, GFP_DMA | flags);
- if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
- return ERR_PTR(-ENOMEM);
- }
-
- edesc->assoc_nents = assoc_nents;
- edesc->assoc_chained = assoc_chained;
- edesc->src_nents = src_nents;
- edesc->src_chained = src_chained;
- edesc->dst_nents = dst_nents;
- edesc->dst_chained = dst_chained;
- edesc->iv_dma = iv_dma;
- edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
- desc_bytes;
- *contig_ptr = contig;
-
- sec4_sg_index = 0;
- if (!(contig & GIV_SRC_CONTIG)) {
- if (!is_gcm) {
- sg_to_sec4_sg_len(req->assoc, req->assoclen,
- edesc->sec4_sg + sec4_sg_index);
- sec4_sg_index += assoc_nents;
- }
-
- dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
- iv_dma, ivsize, 0);
- sec4_sg_index += 1;
-
- if (is_gcm) {
- sg_to_sec4_sg_len(req->assoc, req->assoclen,
- edesc->sec4_sg + sec4_sg_index);
- sec4_sg_index += assoc_nents;
- }
-
- sg_to_sec4_sg_last(req->src, src_nents,
- edesc->sec4_sg +
- sec4_sg_index, 0);
- sec4_sg_index += src_nents;
- }
-
- if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
- dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
- iv_dma, ivsize, 0);
- sec4_sg_index += 1;
- sg_to_sec4_sg_last(req->src, src_nents,
- edesc->sec4_sg + sec4_sg_index, 0);
- }
-
- if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
- dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
- iv_dma, ivsize, 0);
- sec4_sg_index += 1;
- sg_to_sec4_sg_last(req->dst, dst_nents,
- edesc->sec4_sg + sec4_sg_index, 0);
- }
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
- dev_err(jrdev, "unable to map S/G table\n");
- return ERR_PTR(-ENOMEM);
- }
-
- return edesc;
-}
-
-static int old_aead_givencrypt(struct aead_givcrypt_request *areq)
+static int aead_givdecrypt(struct aead_request *req)
{
- struct aead_request *req = &areq->areq;
- struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- u32 contig;
- u32 *desc;
- int ret = 0;
-
- /* allocate extended descriptor */
- edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &contig);
-
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
- req->cryptlen, 1);
-#endif
+ unsigned int ivsize = crypto_aead_ivsize(aead);
- /* Create and submit job descriptor*/
- init_aead_giv_job(ctx->sh_desc_givenc,
- ctx->sh_desc_givenc_dma, edesc, req, contig);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
+ if (req->cryptlen < ivsize)
+ return -EINVAL;
- desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- old_aead_unmap(jrdev, edesc, req);
- kfree(edesc);
- }
+ req->cryptlen -= ivsize;
+ req->assoclen += ivsize;
- return ret;
-}
-
-static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
-{
- return old_aead_encrypt(&areq->areq);
+ return aead_decrypt(req);
}
/*
@@ -3072,8 +2503,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
@@ -3251,8 +2682,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(*edesc) + desc_bytes +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
@@ -3347,7 +2778,6 @@ struct caam_alg_template {
u32 type;
union {
struct ablkcipher_alg ablkcipher;
- struct old_aead_alg aead;
} template_u;
u32 class1_alg_type;
u32 class2_alg_type;
@@ -3355,753 +2785,1426 @@ struct caam_alg_template {
};
static struct caam_alg_template driver_algs[] = {
+ /* ablkcipher descriptor */
+ {
+ .name = "cbc(aes)",
+ .driver_name = "cbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "cbc(des3_ede)",
+ .driver_name = "cbc-3des-caam",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "cbc(des)",
+ .driver_name = "cbc-des-caam",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "ctr(aes)",
+ .driver_name = "ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "chainiv",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ },
+ {
+ .name = "rfc3686(ctr(aes))",
+ .driver_name = "rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = AES_MIN_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ }
+};
+
+static struct caam_aead_alg driver_aeads[] = {
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4106_setkey,
+ .setauthsize = rfc4106_setauthsize,
+ .encrypt = ipsec_gcm_encrypt,
+ .decrypt = ipsec_gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4543(gcm(aes))",
+ .cra_driver_name = "rfc4543-gcm-aes-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4543_setkey,
+ .setauthsize = rfc4543_setauthsize,
+ .encrypt = ipsec_gcm_encrypt,
+ .decrypt = ipsec_gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+ /* Galois Counter Mode */
+ {
+ .aead = {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = gcm_setkey,
+ .setauthsize = gcm_setauthsize,
+ .encrypt = gcm_encrypt,
+ .decrypt = gcm_decrypt,
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
/* single-pass ipsec_esp descriptor */
{
- .name = "authenc(hmac(md5),ecb(cipher_null))",
- .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
- .blocksize = NULL_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),"
+ "ecb(cipher_null))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "ecb-cipher_null-caam",
+ .cra_blocksize = NULL_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = aead_null_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = NULL_IV_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
- },
- .class1_alg_type = 0,
- .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ .caam = {
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
},
{
- .name = "authenc(hmac(sha1),ecb(cipher_null))",
- .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
- .blocksize = NULL_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "ecb(cipher_null))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "ecb-cipher_null-caam",
+ .cra_blocksize = NULL_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = aead_null_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
- },
- .class1_alg_type = 0,
- .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ .caam = {
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
},
{
- .name = "authenc(hmac(sha224),ecb(cipher_null))",
- .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
- .blocksize = NULL_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "ecb(cipher_null))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "ecb-cipher_null-caam",
+ .cra_blocksize = NULL_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = aead_null_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
- },
- .class1_alg_type = 0,
- .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ .caam = {
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
},
{
- .name = "authenc(hmac(sha256),ecb(cipher_null))",
- .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
- .blocksize = NULL_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "ecb(cipher_null))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "ecb-cipher_null-caam",
+ .cra_blocksize = NULL_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = aead_null_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
- },
- .class1_alg_type = 0,
- .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ .caam = {
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
},
{
- .name = "authenc(hmac(sha384),ecb(cipher_null))",
- .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
- .blocksize = NULL_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "ecb(cipher_null))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "ecb-cipher_null-caam",
+ .cra_blocksize = NULL_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = aead_null_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
- },
- .class1_alg_type = 0,
- .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ .caam = {
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
},
{
- .name = "authenc(hmac(sha512),ecb(cipher_null))",
- .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
- .blocksize = NULL_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "ecb(cipher_null))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "ecb-cipher_null-caam",
+ .cra_blocksize = NULL_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = aead_null_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
- },
- .class1_alg_type = 0,
- .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ .caam = {
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
},
{
- .name = "authenc(hmac(md5),cbc(aes))",
- .driver_name = "authenc-hmac-md5-cbc-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha1),cbc(aes))",
- .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha224),cbc(aes))",
- .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha256),cbc(aes))",
- .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha384),cbc(aes))",
- .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
-
{
- .name = "authenc(hmac(sha512),cbc(aes))",
- .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(md5),cbc(des3_ede))",
- .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ }
},
{
- .name = "authenc(hmac(sha1),cbc(des3_ede))",
- .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha224),cbc(des3_ede))",
- .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha256),cbc(des3_ede))",
- .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha384),cbc(des3_ede))",
- .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha512),cbc(des3_ede))",
- .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-"
+ "cbc-des3_ede-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(md5),cbc(des))",
- .driver_name = "authenc-hmac-md5-cbc-des-caam",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha1),cbc(des))",
- .driver_name = "authenc-hmac-sha1-cbc-des-caam",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha224),cbc(des))",
- .driver_name = "authenc-hmac-sha224-cbc-des-caam",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha256),cbc(des))",
- .driver_name = "authenc-hmac-sha256-cbc-des-caam",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha384),cbc(des))",
- .driver_name = "authenc-hmac-sha384-cbc-des-caam",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha512),cbc(des))",
- .driver_name = "authenc-hmac-sha512-cbc-des-caam",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
- .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
},
{
- .name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
- .driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
- .ivsize = CTR_RFC3686_IV_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
- .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
- .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
- .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ },
},
{
- .name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
- .driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(md5),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-md5-"
+ "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
- .maxauthsize = SHA224_DIGEST_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
- .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
- .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
- .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ },
},
{
- .name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
- .driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(sha1),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha1-"
+ "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
- .maxauthsize = SHA384_DIGEST_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
- .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ .geniv = true,
+ },
},
{
- .name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
- .driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
+ },
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = old_aead_encrypt,
- .decrypt = old_aead_decrypt,
- .givencrypt = old_aead_givencrypt,
- .geniv = "<built-in>",
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
- .maxauthsize = SHA512_DIGEST_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
- .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
- OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ },
},
- /* ablkcipher descriptor */
{
- .name = "cbc(aes)",
- .driver_name = "cbc-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(sha224),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha224-"
+ "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ .geniv = true,
+ },
},
{
- .name = "cbc(des3_ede)",
- .driver_name = "cbc-3des-caam",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
},
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ },
},
{
- .name = "cbc(des)",
- .driver_name = "cbc-des-caam",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc(hmac(sha256),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha256-"
+ "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
},
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ .geniv = true,
+ },
},
{
- .name = "ctr(aes)",
- .driver_name = "ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "chainiv",
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
- },
- {
- .name = "rfc3686(ctr(aes))",
- .driver_name = "rfc3686-ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
- .min_keysize = AES_MIN_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
- }
-};
-
-struct caam_alg_entry {
- int class1_alg_type;
- int class2_alg_type;
- int alg_op;
-};
-
-struct caam_aead_alg {
- struct aead_alg aead;
- struct caam_alg_entry caam;
- bool registered;
-};
-
-static struct caam_aead_alg driver_aeads[] = {
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ },
+ },
{
.aead = {
.base = {
- .cra_name = "rfc4106(gcm(aes))",
- .cra_driver_name = "rfc4106-gcm-aes-caam",
+ .cra_name = "seqiv(authenc(hmac(sha384),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha384-"
+ "rfc3686-ctr-aes-caam",
.cra_blocksize = 1,
},
- .setkey = rfc4106_setkey,
- .setauthsize = rfc4106_setauthsize,
- .encrypt = gcm_encrypt,
- .decrypt = gcm_decrypt,
- .ivsize = 8,
- .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
},
.caam = {
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ .geniv = true,
},
},
{
.aead = {
.base = {
- .cra_name = "rfc4543(gcm(aes))",
- .cra_driver_name = "rfc4543-gcm-aes-caam",
+ .cra_name = "authenc(hmac(sha512),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "rfc3686-ctr-aes-caam",
.cra_blocksize = 1,
},
- .setkey = rfc4543_setkey,
- .setauthsize = rfc4543_setauthsize,
- .encrypt = gcm_encrypt,
- .decrypt = gcm_decrypt,
- .ivsize = 8,
- .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
},
.caam = {
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
},
},
- /* Galois Counter Mode */
{
.aead = {
.base = {
- .cra_name = "gcm(aes)",
- .cra_driver_name = "gcm-aes-caam",
+ .cra_name = "seqiv(authenc(hmac(sha512),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha512-"
+ "rfc3686-ctr-aes-caam",
.cra_blocksize = 1,
},
- .setkey = gcm_setkey,
- .setauthsize = gcm_setauthsize,
- .encrypt = gcm_encrypt,
- .decrypt = gcm_decrypt,
- .ivsize = 12,
- .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_givdecrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
},
.caam = {
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .rfc3686 = true,
+ .geniv = true,
},
},
};
@@ -4211,7 +4314,7 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
struct caam_crypto_alg *t_alg;
struct crypto_alg *alg;
- t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
if (!t_alg) {
pr_err("failed to allocate t_alg\n");
return ERR_PTR(-ENOMEM);
@@ -4240,10 +4343,6 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
alg->cra_type = &crypto_ablkcipher_type;
alg->cra_ablkcipher = template->template_ablkcipher;
break;
- case CRYPTO_ALG_TYPE_AEAD:
- alg->cra_type = &crypto_aead_type;
- alg->cra_aead = template->template_aead;
- break;
}
t_alg->caam.class1_alg_type = template->class1_alg_type;
@@ -4271,8 +4370,10 @@ static int __init caam_algapi_init(void)
struct device_node *dev_node;
struct platform_device *pdev;
struct device *ctrldev;
- void *priv;
+ struct caam_drv_private *priv;
int i = 0, err = 0;
+ u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
+ unsigned int md_limit = SHA512_DIGEST_SIZE;
bool registered = false;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
@@ -4302,16 +4403,39 @@ static int __init caam_algapi_init(void)
INIT_LIST_HEAD(&alg_list);
- /* register crypto algorithms the device supports */
+ /*
+ * Register crypto algorithms the device supports.
+ * First, detect presence and attributes of DES, AES, and MD blocks.
+ */
+ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+ des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
+ aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
+ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+
+ /* If MD is present, limit digest size based on LP256 */
+ if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
+ md_limit = SHA256_DIGEST_SIZE;
+
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
- /* TODO: check if h/w supports alg */
struct caam_crypto_alg *t_alg;
+ struct caam_alg_template *alg = driver_algs + i;
+ u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+
+ /* Skip DES algorithms if not supported by device */
+ if (!des_inst &&
+ ((alg_sel == OP_ALG_ALGSEL_3DES) ||
+ (alg_sel == OP_ALG_ALGSEL_DES)))
+ continue;
+
+ /* Skip AES algorithms if not supported by device */
+ if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
+ continue;
- t_alg = caam_alg_alloc(&driver_algs[i]);
+ t_alg = caam_alg_alloc(alg);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- pr_warn("%s alg allocation failed\n",
- driver_algs[i].driver_name);
+ pr_warn("%s alg allocation failed\n", alg->driver_name);
continue;
}
@@ -4329,6 +4453,37 @@ static int __init caam_algapi_init(void)
for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
struct caam_aead_alg *t_alg = driver_aeads + i;
+ u32 c1_alg_sel = t_alg->caam.class1_alg_type &
+ OP_ALG_ALGSEL_MASK;
+ u32 c2_alg_sel = t_alg->caam.class2_alg_type &
+ OP_ALG_ALGSEL_MASK;
+ u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
+
+ /* Skip DES algorithms if not supported by device */
+ if (!des_inst &&
+ ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
+ (c1_alg_sel == OP_ALG_ALGSEL_DES)))
+ continue;
+
+ /* Skip AES algorithms if not supported by device */
+ if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
+ continue;
+
+ /*
+ * Check support for AES algorithms not available
+ * on LP devices.
+ */
+ if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
+ if (alg_aai == OP_ALG_AAI_GCM)
+ continue;
+
+ /*
+ * Skip algorithms requiring message digests
+ * if MD or MD size is not supported by device.
+ */
+ if (c2_alg_sel &&
+ (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
+ continue;
caam_aead_alg_init(t_alg);
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index dae1e8099969..94433b9fc200 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -127,7 +127,7 @@ struct caam_hash_state {
int buflen_0;
u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
int buflen_1;
- u8 caam_ctx[MAX_CTX_LEN];
+ u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
@@ -807,7 +807,7 @@ static int ahash_update_ctx(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
@@ -829,7 +829,7 @@ static int ahash_update_ctx(struct ahash_request *req)
state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
edesc->sec4_sg + 1,
buf, state->buf_dma,
- *buflen, last_buflen);
+ *next_buflen, *buflen);
if (src_nents) {
src_map_to_sec4_sg(jrdev, req->src, src_nents,
@@ -909,17 +909,18 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buflen_1;
u32 *sh_desc = ctx->sh_desc_fin, *desc;
dma_addr_t ptr = ctx->sh_desc_fin_dma;
- int sec4_sg_bytes;
+ int sec4_sg_bytes, sec4_sg_src_index;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = 0;
int sh_len;
- sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
+ sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+ sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
- (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+ (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
@@ -1005,8 +1006,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
@@ -1091,8 +1092,8 @@ static int ahash_digest(struct ahash_request *req)
sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
- DESC_JOB_IO_LEN, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
@@ -1165,8 +1166,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
int sh_len;
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
- GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
@@ -1245,7 +1245,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
@@ -1353,8 +1353,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
@@ -1448,7 +1448,7 @@ static int ahash_update_first(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
@@ -1842,7 +1842,7 @@ caam_hash_alloc(struct caam_hash_template *template,
struct ahash_alg *halg;
struct crypto_alg *alg;
- t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
if (!t_alg) {
pr_err("failed to allocate t_alg\n");
return ERR_PTR(-ENOMEM);
@@ -1884,8 +1884,10 @@ static int __init caam_algapi_hash_init(void)
struct device_node *dev_node;
struct platform_device *pdev;
struct device *ctrldev;
- void *priv;
int i = 0, err = 0;
+ struct caam_drv_private *priv;
+ unsigned int md_limit = SHA512_DIGEST_SIZE;
+ u32 cha_inst, cha_vid;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
if (!dev_node) {
@@ -1911,19 +1913,40 @@ static int __init caam_algapi_hash_init(void)
if (!priv)
return -ENODEV;
+ /*
+ * Register crypto algorithms the device supports. First, identify
+ * presence and attributes of MD block.
+ */
+ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+
+ /*
+ * Skip registration of any hashing algorithms if MD block
+ * is not present.
+ */
+ if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
+ return -ENODEV;
+
+ /* Limit digest size based on LP256 */
+ if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
+ md_limit = SHA256_DIGEST_SIZE;
+
INIT_LIST_HEAD(&hash_list);
/* register crypto algorithms the device supports */
for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
- /* TODO: check if h/w supports alg */
struct caam_hash_alg *t_alg;
+ struct caam_hash_template *alg = driver_hash + i;
+
+ /* If MD size is not supported by device, skip registration */
+ if (alg->template_ahash.halg.digestsize > md_limit)
+ continue;
/* register hmac version */
- t_alg = caam_hash_alloc(&driver_hash[i], true);
+ t_alg = caam_hash_alloc(alg, true);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- pr_warn("%s alg allocation failed\n",
- driver_hash[i].driver_name);
+ pr_warn("%s alg allocation failed\n", alg->driver_name);
continue;
}
@@ -1936,11 +1959,10 @@ static int __init caam_algapi_hash_init(void)
list_add_tail(&t_alg->entry, &hash_list);
/* register unkeyed version */
- t_alg = caam_hash_alloc(&driver_hash[i], false);
+ t_alg = caam_hash_alloc(alg, false);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- pr_warn("%s alg allocation failed\n",
- driver_hash[i].driver_name);
+ pr_warn("%s alg allocation failed\n", alg->driver_name);
continue;
}
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 5095337205b8..9b92af2c7241 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -108,6 +108,10 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
atomic_set(&bd->empty, BUF_NOT_EMPTY);
complete(&bd->filled);
+
+ /* Buffer refilled, invalidate cache */
+ dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
+
#ifdef DEBUG
print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
@@ -311,7 +315,7 @@ static int __init caam_rng_init(void)
struct device_node *dev_node;
struct platform_device *pdev;
struct device *ctrldev;
- void *priv;
+ struct caam_drv_private *priv;
int err;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
@@ -338,20 +342,32 @@ static int __init caam_rng_init(void)
if (!priv)
return -ENODEV;
+ /* Check for an instantiated RNG before registration */
+ if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK))
+ return -ENODEV;
+
dev = caam_jr_alloc();
if (IS_ERR(dev)) {
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(dev);
}
- rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA);
- if (!rng_ctx)
- return -ENOMEM;
+ rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
+ if (!rng_ctx) {
+ err = -ENOMEM;
+ goto free_caam_alloc;
+ }
err = caam_init_rng(rng_ctx, dev);
if (err)
- return err;
+ goto free_rng_ctx;
dev_info(dev, "registering rng-caam\n");
return hwrng_register(&caam_rng);
+
+free_rng_ctx:
+ kfree(rng_ctx);
+free_caam_alloc:
+ caam_jr_free(dev);
+ return err;
}
module_init(caam_rng_init);
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index f57f395db33f..b6955ecdfb3f 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/debugfs.h>
#include <linux/circ_buf.h>
+#include <linux/clk.h>
#include <net/xfrm.h>
#include <crypto/algapi.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index efacab7539ef..8abb4bc548cc 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -16,6 +16,24 @@
#include "error.h"
/*
+ * i.MX targets tend to have clock control subsystems that can
+ * enable/disable clocking to our device.
+ */
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+static inline struct clk *caam_drv_identify_clk(struct device *dev,
+ char *clk_name)
+{
+ return devm_clk_get(dev, clk_name);
+}
+#else
+static inline struct clk *caam_drv_identify_clk(struct device *dev,
+ char *clk_name)
+{
+ return NULL;
+}
+#endif
+
+/*
* Descriptor to instantiate RNG State Handle 0 in normal mode and
* load the JDKEK, TDKEK and TDSK registers
*/
@@ -121,7 +139,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
flags |= DECO_JQCR_FOUR;
/* Instruct the DECO to execute it */
- wr_reg32(&deco->jr_ctl_hi, flags);
+ setbits32(&deco->jr_ctl_hi, flags);
timeout = 10000000;
do {
@@ -175,7 +193,7 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
{
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
struct caam_ctrl __iomem *ctrl;
- u32 *desc, status, rdsta_val;
+ u32 *desc, status = 0, rdsta_val;
int ret = 0, sh_idx;
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
@@ -207,7 +225,8 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
* CAAM eras), then try again.
*/
rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
- if (status || !(rdsta_val & (1 << sh_idx)))
+ if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
+ !(rdsta_val & (1 << sh_idx)))
ret = -EAGAIN;
if (ret)
break;
@@ -279,7 +298,7 @@ static int caam_remove(struct platform_device *pdev)
struct device *ctrldev;
struct caam_drv_private *ctrlpriv;
struct caam_ctrl __iomem *ctrl;
- int ring, ret = 0;
+ int ring;
ctrldev = &pdev->dev;
ctrlpriv = dev_get_drvdata(ctrldev);
@@ -303,7 +322,13 @@ static int caam_remove(struct platform_device *pdev)
/* Unmap controller region */
iounmap(ctrl);
- return ret;
+ /* shut clocks off before finalizing shutdown */
+ clk_disable_unprepare(ctrlpriv->caam_ipg);
+ clk_disable_unprepare(ctrlpriv->caam_mem);
+ clk_disable_unprepare(ctrlpriv->caam_aclk);
+ clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+
+ return 0;
}
/*
@@ -370,14 +395,14 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
int caam_get_era(void)
{
struct device_node *caam_node;
- for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
- const uint32_t *prop = (uint32_t *)of_get_property(caam_node,
- "fsl,sec-era",
- NULL);
- return prop ? *prop : -ENOTSUPP;
- }
+ int ret;
+ u32 prop;
- return -ENOTSUPP;
+ caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
+ of_node_put(caam_node);
+
+ return IS_ERR_VALUE(ret) ? -ENOTSUPP : prop;
}
EXPORT_SYMBOL(caam_get_era);
@@ -390,6 +415,7 @@ static int caam_probe(struct platform_device *pdev)
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
struct caam_drv_private *ctrlpriv;
+ struct clk *clk;
#ifdef CONFIG_DEBUG_FS
struct caam_perfmon *perfmon;
#endif
@@ -398,8 +424,7 @@ static int caam_probe(struct platform_device *pdev)
int pg_size;
int BLOCK_OFFSET = 0;
- ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private),
- GFP_KERNEL);
+ ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
if (!ctrlpriv)
return -ENOMEM;
@@ -408,12 +433,76 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->pdev = pdev;
nprop = pdev->dev.of_node;
+ /* Enable clocking */
+ clk = caam_drv_identify_clk(&pdev->dev, "ipg");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM ipg clk: %d\n", ret);
+ return ret;
+ }
+ ctrlpriv->caam_ipg = clk;
+
+ clk = caam_drv_identify_clk(&pdev->dev, "mem");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM mem clk: %d\n", ret);
+ return ret;
+ }
+ ctrlpriv->caam_mem = clk;
+
+ clk = caam_drv_identify_clk(&pdev->dev, "aclk");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM aclk clk: %d\n", ret);
+ return ret;
+ }
+ ctrlpriv->caam_aclk = clk;
+
+ clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM emi_slow clk: %d\n", ret);
+ return ret;
+ }
+ ctrlpriv->caam_emi_slow = clk;
+
+ ret = clk_prepare_enable(ctrlpriv->caam_ipg);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(ctrlpriv->caam_mem);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
+ ret);
+ goto disable_caam_ipg;
+ }
+
+ ret = clk_prepare_enable(ctrlpriv->caam_aclk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
+ goto disable_caam_mem;
+ }
+
+ ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
+ ret);
+ goto disable_caam_aclk;
+ }
+
/* Get configuration properties from device tree */
/* First, get register page */
ctrl = of_iomap(nprop, 0);
if (ctrl == NULL) {
dev_err(dev, "caam: of_iomap() failed\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto disable_caam_emi_slow;
}
/* Finding the page size for using the CTPR_MS register */
comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
@@ -444,8 +533,9 @@ static int caam_probe(struct platform_device *pdev)
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
* long pointers in master configuration register
*/
- setbits32(&ctrl->mcr, MCFGR_WDENABLE |
- (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
+ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
+ MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ?
+ MCFGR_LONG_PTR : 0));
/*
* Read the Compile Time paramters and SCFGR to determine
@@ -492,12 +582,11 @@ static int caam_probe(struct platform_device *pdev)
of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
rspec++;
- ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev,
- sizeof(struct platform_device *) * rspec,
- GFP_KERNEL);
+ ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
+ sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
if (ctrlpriv->jrpdev == NULL) {
- iounmap(ctrl);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto iounmap_ctrl;
}
ring = 0;
@@ -537,8 +626,8 @@ static int caam_probe(struct platform_device *pdev)
/* If no QI and no rings specified, quit and go home */
if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
dev_err(dev, "no queues configured, terminating\n");
- caam_remove(pdev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto caam_remove;
}
cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
@@ -595,8 +684,7 @@ static int caam_probe(struct platform_device *pdev)
} while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
if (ret) {
dev_err(dev, "failed to instantiate RNG");
- caam_remove(pdev);
- return ret;
+ goto caam_remove;
}
/*
* Set handles init'ed by this module as the complement of the
@@ -700,6 +788,20 @@ static int caam_probe(struct platform_device *pdev)
&ctrlpriv->ctl_tdsk_wrap);
#endif
return 0;
+
+caam_remove:
+ caam_remove(pdev);
+iounmap_ctrl:
+ iounmap(ctrl);
+disable_caam_emi_slow:
+ clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+disable_caam_aclk:
+ clk_disable_unprepare(ctrlpriv->caam_aclk);
+disable_caam_mem:
+ clk_disable_unprepare(ctrlpriv->caam_mem);
+disable_caam_ipg:
+ clk_disable_unprepare(ctrlpriv->caam_ipg);
+ return ret;
}
static struct of_device_id caam_match[] = {
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index d397ff9d56fd..983d663ef671 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -8,12 +8,29 @@
#ifndef DESC_H
#define DESC_H
+/*
+ * 16-byte hardware scatter/gather table
+ * An 8-byte table exists in the hardware spec, but has never been
+ * implemented to date. The 8/16 option is selected at RTL-compile-time.
+ * and this selection is visible in the Compile Time Parameters Register
+ */
+
+#define SEC4_SG_LEN_EXT 0x80000000 /* Entry points to table */
+#define SEC4_SG_LEN_FIN 0x40000000 /* Last ent in table */
+#define SEC4_SG_BPID_MASK 0x000000ff
+#define SEC4_SG_BPID_SHIFT 16
+#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
+#define SEC4_SG_OFFS_MASK 0x00001fff
+
struct sec4_sg_entry {
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+ u32 rsvd1;
+ dma_addr_t ptr;
+#else
u64 ptr;
-#define SEC4_SG_LEN_FIN 0x40000000
-#define SEC4_SG_LEN_EXT 0x80000000
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_IMX */
u32 len;
- u8 reserved;
+ u8 rsvd2;
u8 buf_pool_id;
u16 offset;
};
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 9f79fd7bd4d7..98d07de24fc4 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -367,7 +367,7 @@ do { \
if (upper) \
append_u64(desc, data); \
else \
- append_u32(desc, data); \
+ append_u32(desc, lower_32_bits(data)); \
} while (0)
#define append_math_add_imm_u64(desc, dest, src0, src1, data) \
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 89b94cc9e7a2..e2bcacc1a921 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -91,6 +91,11 @@ struct caam_drv_private {
Handles of the RNG4 block are initialized
by this driver */
+ struct clk *caam_ipg;
+ struct clk *caam_mem;
+ struct clk *caam_aclk;
+ struct clk *caam_emi_slow;
+
/*
* debugfs entries for developer view into driver/device
* variables at runtime.
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index b8b5d47acd7a..f7e0d8d4c3da 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -202,6 +202,13 @@ static void caam_jr_dequeue(unsigned long devarg)
userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
userstatus = jrp->outring[hw_idx].jrstatus;
+ /*
+ * Make sure all information from the job has been obtained
+ * before telling CAAM that the job has been removed from the
+ * output ring.
+ */
+ mb();
+
/* set done */
wr_reg32(&jrp->rregs->outring_rmvd, 1);
@@ -351,12 +358,23 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
+ /*
+ * Guarantee that the descriptor's DMA address has been written to
+ * the next slot in the ring before the write index is updated, since
+ * other cores may update this index independently.
+ */
smp_wmb();
jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
(JOBR_DEPTH - 1);
jrp->head = (head + 1) & (JOBR_DEPTH - 1);
+ /*
+ * Ensure that all job information has been written before
+ * notifying CAAM that a new job was added to the input ring.
+ */
+ wmb();
+
wr_reg32(&jrp->rregs->inpring_jobadd, 1);
spin_unlock_bh(&jrp->inplock);
@@ -392,18 +410,17 @@ static int caam_jr_init(struct device *dev)
goto out_free_irq;
error = -ENOMEM;
- jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
- &inpbusaddr, GFP_KERNEL);
+ jrp->inpring = dma_alloc_coherent(dev, sizeof(*jrp->inpring) *
+ JOBR_DEPTH, &inpbusaddr, GFP_KERNEL);
if (!jrp->inpring)
goto out_free_irq;
- jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) *
+ jrp->outring = dma_alloc_coherent(dev, sizeof(*jrp->outring) *
JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
if (!jrp->outring)
goto out_free_inpring;
- jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
- GFP_KERNEL);
+ jrp->entinfo = kcalloc(JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL);
if (!jrp->entinfo)
goto out_free_outring;
@@ -461,8 +478,7 @@ static int caam_jr_probe(struct platform_device *pdev)
int error;
jrdev = &pdev->dev;
- jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr),
- GFP_KERNEL);
+ jrpriv = devm_kmalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
if (!jrpriv)
return -ENOMEM;
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 672c97489505..a8a79975682f 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -65,9 +65,31 @@
*
*/
+#ifdef CONFIG_ARM
+/* These are common macros for Power, put here for ARM */
+#define setbits32(_addr, _v) writel((readl(_addr) | (_v)), (_addr))
+#define clrbits32(_addr, _v) writel((readl(_addr) & ~(_v)), (_addr))
+
+#define out_arch(type, endian, a, v) __raw_write##type(cpu_to_##endian(v), a)
+#define in_arch(type, endian, a) endian##_to_cpu(__raw_read##type(a))
+
+#define out_le32(a, v) out_arch(l, le32, a, v)
+#define in_le32(a) in_arch(l, le32, a)
+
+#define out_be32(a, v) out_arch(l, be32, a, v)
+#define in_be32(a) in_arch(l, be32, a)
+
+#define clrsetbits(type, addr, clear, set) \
+ out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
+
+#define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
+#define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
+#endif
+
#ifdef __BIG_ENDIAN
#define wr_reg32(reg, data) out_be32(reg, data)
#define rd_reg32(reg) in_be32(reg)
+#define clrsetbits_32(addr, clear, set) clrsetbits_be32(addr, clear, set)
#ifdef CONFIG_64BIT
#define wr_reg64(reg, data) out_be64(reg, data)
#define rd_reg64(reg) in_be64(reg)
@@ -76,6 +98,7 @@
#ifdef __LITTLE_ENDIAN
#define wr_reg32(reg, data) __raw_writel(data, reg)
#define rd_reg32(reg) __raw_readl(reg)
+#define clrsetbits_32(addr, clear, set) clrsetbits_le32(addr, clear, set)
#ifdef CONFIG_64BIT
#define wr_reg64(reg, data) __raw_writeq(data, reg)
#define rd_reg64(reg) __raw_readq(reg)
@@ -85,20 +108,31 @@
/*
* The only users of these wr/rd_reg64 functions is the Job Ring (JR).
- * The DMA address registers in the JR are a pair of 32-bit registers.
- * The layout is:
+ * The DMA address registers in the JR are handled differently depending on
+ * platform:
+ *
+ * 1. All BE CAAM platforms and i.MX platforms (LE CAAM):
*
* base + 0x0000 : most-significant 32 bits
* base + 0x0004 : least-significant 32 bits
*
* The 32-bit version of this core therefore has to write to base + 0x0004
- * to set the 32-bit wide DMA address. This seems to be independent of the
- * endianness of the written/read data.
+ * to set the 32-bit wide DMA address.
+ *
+ * 2. All other LE CAAM platforms (LS1021A etc.)
+ * base + 0x0000 : least-significant 32 bits
+ * base + 0x0004 : most-significant 32 bits
*/
#ifndef CONFIG_64BIT
+#if !defined(CONFIG_CRYPTO_DEV_FSL_CAAM_LE) || \
+ defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX)
#define REG64_MS32(reg) ((u32 __iomem *)(reg))
#define REG64_LS32(reg) ((u32 __iomem *)(reg) + 1)
+#else
+#define REG64_MS32(reg) ((u32 __iomem *)(reg) + 1)
+#define REG64_LS32(reg) ((u32 __iomem *)(reg))
+#endif
static inline void wr_reg64(u64 __iomem *reg, u64 data)
{
@@ -133,18 +167,28 @@ struct jr_outentry {
#define CHA_NUM_MS_DECONUM_SHIFT 24
#define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
-/* CHA Version IDs */
+/*
+ * CHA version IDs / instantiation bitfields
+ * Defined for use with the cha_id fields in perfmon, but the same shift/mask
+ * selectors can be used to pull out the number of instantiated blocks within
+ * cha_num fields in perfmon because the locations are the same.
+ */
#define CHA_ID_LS_AES_SHIFT 0
-#define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
+#define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
+#define CHA_ID_LS_AES_LP (0x3ull << CHA_ID_LS_AES_SHIFT)
+#define CHA_ID_LS_AES_HP (0x4ull << CHA_ID_LS_AES_SHIFT)
#define CHA_ID_LS_DES_SHIFT 4
-#define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
+#define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
#define CHA_ID_LS_ARC4_SHIFT 8
#define CHA_ID_LS_ARC4_MASK (0xfull << CHA_ID_LS_ARC4_SHIFT)
#define CHA_ID_LS_MD_SHIFT 12
#define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_LP256 (0x0ull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_LP512 (0x1ull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_HP (0x2ull << CHA_ID_LS_MD_SHIFT)
#define CHA_ID_LS_RNG_SHIFT 16
#define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT)
@@ -395,10 +439,16 @@ struct caam_ctrl {
/* AXI read cache control */
#define MCFGR_ARCACHE_SHIFT 12
#define MCFGR_ARCACHE_MASK (0xf << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_BUFF (0x1 << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_CACH (0x2 << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_RALL (0x4 << MCFGR_ARCACHE_SHIFT)
/* AXI write cache control */
#define MCFGR_AWCACHE_SHIFT 8
#define MCFGR_AWCACHE_MASK (0xf << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_BUFF (0x1 << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_CACH (0x2 << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_WALL (0x8 << MCFGR_AWCACHE_SHIFT)
/* AXI pipeline depth */
#define MCFGR_AXIPIPE_SHIFT 4
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index b68b74cc7b77..18cd6d1f5870 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -15,7 +15,6 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
{
sec4_sg_ptr->ptr = dma;
sec4_sg_ptr->len = len;
- sec4_sg_ptr->reserved = 0;
sec4_sg_ptr->buf_pool_id = 0;
sec4_sg_ptr->offset = offset;
#ifdef DEBUG
@@ -106,9 +105,15 @@ static inline void dma_unmap_sg_chained(
{
if (unlikely(chained)) {
int i;
+ struct scatterlist *tsg = sg;
+
+ /*
+ * Use a local copy of the sg pointer to avoid moving the
+ * head of the list pointed to by sg as we walk the list.
+ */
for (i = 0; i < nents; i++) {
- dma_unmap_sg(dev, sg, 1, dir);
- sg = sg_next(sg);
+ dma_unmap_sg(dev, tsg, 1, dir);
+ tsg = sg_next(tsg);
}
} else if (nents) {
dma_unmap_sg(dev, sg, nents, dir);
@@ -119,19 +124,23 @@ static inline int dma_map_sg_chained(
struct device *dev, struct scatterlist *sg, unsigned int nents,
enum dma_data_direction dir, bool chained)
{
- struct scatterlist *first = sg;
-
if (unlikely(chained)) {
int i;
+ struct scatterlist *tsg = sg;
+
+ /*
+ * Use a local copy of the sg pointer to avoid moving the
+ * head of the list pointed to by sg as we walk the list.
+ */
for (i = 0; i < nents; i++) {
- if (!dma_map_sg(dev, sg, 1, dir)) {
- dma_unmap_sg_chained(dev, first, i, dir,
+ if (!dma_map_sg(dev, tsg, 1, dir)) {
+ dma_unmap_sg_chained(dev, sg, i, dir,
chained);
nents = 0;
break;
}
- sg = sg_next(sg);
+ tsg = sg_next(tsg);
}
} else
nents = dma_map_sg(dev, sg, nents, dir);
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c
index f2e6de361fd1..bb241c3ab6b9 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -216,6 +216,7 @@ static const struct acpi_device_id ccp_acpi_match[] = {
{ "AMDI0C00", 0 },
{ },
};
+MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
#endif
#ifdef CONFIG_OF
@@ -223,6 +224,7 @@ static const struct of_device_id ccp_of_match[] = {
{ .compatible = "amd,ccp-seattle-v1a" },
{ },
};
+MODULE_DEVICE_TABLE(of, ccp_of_match);
#endif
static struct platform_driver ccp_platform_driver = {
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index ad47d0d61098..68e8aa90fe01 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -334,7 +334,7 @@ static int img_hash_dma_init(struct img_hash_dev *hdev)
hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx");
if (!hdev->dma_lch) {
- dev_err(hdev->dev, "Couldn't aquire a slave DMA channel.\n");
+ dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
return -EBUSY;
}
dma_conf.direction = DMA_MEM_TO_DEV;
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 7ba495f75370..8f2790353281 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -156,7 +156,8 @@ struct ablk_ctx {
};
struct aead_ctx {
- struct buffer_desc *buffer;
+ struct buffer_desc *src;
+ struct buffer_desc *dst;
struct scatterlist ivlist;
/* used when the hmac is not on one sg entry */
u8 *hmac_virt;
@@ -198,6 +199,15 @@ struct ixp_alg {
int registered;
};
+struct ixp_aead_alg {
+ struct aead_alg crypto;
+ const struct ix_hash_algo *hash;
+ u32 cfg_enc;
+ u32 cfg_dec;
+
+ int registered;
+};
+
static const struct ix_hash_algo hash_alg_md5 = {
.cfgword = 0xAA010004,
.icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
@@ -339,11 +349,11 @@ static void finish_scattered_hmac(struct crypt_ctl *crypt)
struct aead_ctx *req_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
int authsize = crypto_aead_authsize(tfm);
- int decryptlen = req->cryptlen - authsize;
+ int decryptlen = req->assoclen + req->cryptlen - authsize;
if (req_ctx->encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
- req->src, decryptlen, authsize, 1);
+ req->dst, decryptlen, authsize, 1);
}
dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
}
@@ -364,7 +374,8 @@ static void one_packet(dma_addr_t phys)
struct aead_request *req = crypt->data.aead_req;
struct aead_ctx *req_ctx = aead_request_ctx(req);
- free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
+ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+ free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
if (req_ctx->hmac_virt) {
finish_scattered_hmac(crypt);
}
@@ -573,11 +584,10 @@ static int init_tfm_ablk(struct crypto_tfm *tfm)
return init_tfm(tfm);
}
-static int init_tfm_aead(struct crypto_tfm *tfm)
+static int init_tfm_aead(struct crypto_aead *tfm)
{
- crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
- sizeof(struct aead_ctx));
- return init_tfm(tfm);
+ crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
+ return init_tfm(crypto_aead_tfm(tfm));
}
static void exit_tfm(struct crypto_tfm *tfm)
@@ -587,6 +597,11 @@ static void exit_tfm(struct crypto_tfm *tfm)
free_sa_dir(&ctx->decrypt);
}
+static void exit_tfm_aead(struct crypto_aead *tfm)
+{
+ exit_tfm(crypto_aead_tfm(tfm));
+}
+
static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
int init_len, u32 ctx_addr, const u8 *key, int key_len)
{
@@ -905,7 +920,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
crypt->mode |= NPE_OP_NOT_IN_PLACE;
/* This was never tested by Intel
* for more than one dst buffer, I think. */
- BUG_ON(req->dst->length < nbytes);
req_ctx->dst = NULL;
if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
flags, DMA_FROM_DEVICE))
@@ -970,24 +984,6 @@ static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
return ret;
}
-static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
- unsigned int nbytes)
-{
- int offset = 0;
-
- if (!nbytes)
- return 0;
-
- for (;;) {
- if (start < offset + sg->length)
- break;
-
- offset += sg->length;
- sg = sg_next(sg);
- }
- return (start + nbytes > offset + sg->length);
-}
-
static int aead_perform(struct aead_request *req, int encrypt,
int cryptoffset, int eff_cryptlen, u8 *iv)
{
@@ -1003,6 +999,8 @@ static int aead_perform(struct aead_request *req, int encrypt,
struct device *dev = &pdev->dev;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
+ enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
+ unsigned int lastlen;
if (qmgr_stat_full(SEND_QID))
return -EAGAIN;
@@ -1031,35 +1029,55 @@ static int aead_perform(struct aead_request *req, int encrypt,
crypt->crypt_len = eff_cryptlen;
crypt->auth_offs = 0;
- crypt->auth_len = req->assoclen + ivsize + cryptlen;
+ crypt->auth_len = req->assoclen + cryptlen;
BUG_ON(ivsize && !req->iv);
memcpy(crypt->iv, req->iv, ivsize);
+ req_ctx->dst = NULL;
+
if (req->src != req->dst) {
- BUG(); /* -ENOTSUP because of my laziness */
+ struct buffer_desc dst_hook;
+
+ crypt->mode |= NPE_OP_NOT_IN_PLACE;
+ src_direction = DMA_TO_DEVICE;
+
+ buf = chainup_buffers(dev, req->dst, crypt->auth_len,
+ &dst_hook, flags, DMA_FROM_DEVICE);
+ req_ctx->dst = dst_hook.next;
+ crypt->dst_buf = dst_hook.phys_next;
+
+ if (!buf)
+ goto free_buf_dst;
+
+ if (encrypt) {
+ lastlen = buf->buf_len;
+ if (lastlen >= authsize)
+ crypt->icv_rev_aes = buf->phys_addr +
+ buf->buf_len - authsize;
+ }
}
- /* ASSOC data */
- buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
- flags, DMA_TO_DEVICE);
- req_ctx->buffer = src_hook.next;
+ buf = chainup_buffers(dev, req->src, crypt->auth_len,
+ &src_hook, flags, src_direction);
+ req_ctx->src = src_hook.next;
crypt->src_buf = src_hook.phys_next;
if (!buf)
- goto out;
- /* IV */
- sg_init_table(&req_ctx->ivlist, 1);
- sg_set_buf(&req_ctx->ivlist, iv, ivsize);
- buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
- DMA_BIDIRECTIONAL);
- if (!buf)
- goto free_chain;
- if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
+ goto free_buf_src;
+
+ if (!encrypt || !req_ctx->dst) {
+ lastlen = buf->buf_len;
+ if (lastlen >= authsize)
+ crypt->icv_rev_aes = buf->phys_addr +
+ buf->buf_len - authsize;
+ }
+
+ if (unlikely(lastlen < authsize)) {
/* The 12 hmac bytes are scattered,
* we need to copy them into a safe buffer */
req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
&crypt->icv_rev_aes);
if (unlikely(!req_ctx->hmac_virt))
- goto free_chain;
+ goto free_buf_src;
if (!encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
req->src, cryptlen, authsize, 0);
@@ -1068,27 +1086,16 @@ static int aead_perform(struct aead_request *req, int encrypt,
} else {
req_ctx->hmac_virt = NULL;
}
- /* Crypt */
- buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
- DMA_BIDIRECTIONAL);
- if (!buf)
- goto free_hmac_virt;
- if (!req_ctx->hmac_virt) {
- crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
- }
crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
BUG_ON(qmgr_stat_overflow(SEND_QID));
return -EINPROGRESS;
-free_hmac_virt:
- if (req_ctx->hmac_virt) {
- dma_pool_free(buffer_pool, req_ctx->hmac_virt,
- crypt->icv_rev_aes);
- }
-free_chain:
- free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
-out:
+
+free_buf_src:
+ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+free_buf_dst:
+ free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
crypt->ctl_flags = CTL_FLAG_UNUSED;
return -ENOMEM;
}
@@ -1174,40 +1181,12 @@ badkey:
static int aead_encrypt(struct aead_request *req)
{
- unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
- return aead_perform(req, 1, req->assoclen + ivsize,
- req->cryptlen, req->iv);
+ return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
}
static int aead_decrypt(struct aead_request *req)
{
- unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
- return aead_perform(req, 0, req->assoclen + ivsize,
- req->cryptlen, req->iv);
-}
-
-static int aead_givencrypt(struct aead_givcrypt_request *req)
-{
- struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
- struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
- unsigned len, ivsize = crypto_aead_ivsize(tfm);
- __be64 seq;
-
- /* copied from eseqiv.c */
- if (!ctx->salted) {
- get_random_bytes(ctx->salt, ivsize);
- ctx->salted = 1;
- }
- memcpy(req->areq.iv, ctx->salt, ivsize);
- len = ivsize;
- if (ivsize > sizeof(u64)) {
- memset(req->giv, 0, ivsize - sizeof(u64));
- len = sizeof(u64);
- }
- seq = cpu_to_be64(req->seq);
- memcpy(req->giv + ivsize - len, &seq, len);
- return aead_perform(&req->areq, 1, req->areq.assoclen,
- req->areq.cryptlen +ivsize, req->giv);
+ return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
}
static struct ixp_alg ixp4xx_algos[] = {
@@ -1320,80 +1299,77 @@ static struct ixp_alg ixp4xx_algos[] = {
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
-}, {
+} };
+
+static struct ixp_aead_alg ixp4xx_aeads[] = {
+{
.crypto = {
- .cra_name = "authenc(hmac(md5),cbc(des))",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_u = { .aead = {
- .ivsize = DES_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
- }
- }
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des))",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
- .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_u = { .aead = {
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
- }
- }
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
- .cra_name = "authenc(hmac(sha1),cbc(des))",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_u = { .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des))",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
- }
- }
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
- .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_u = { .aead = {
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- }
- }
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
- .cra_name = "authenc(hmac(md5),cbc(aes))",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_u = { .aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
- }
- }
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
}, {
.crypto = {
- .cra_name = "authenc(hmac(sha1),cbc(aes))",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_u = { .aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- }
- }
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
@@ -1437,32 +1413,20 @@ static int __init ixp_module_init(void)
if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
continue;
}
- if (!ixp4xx_algos[i].hash) {
- /* block ciphers */
- cra->cra_type = &crypto_ablkcipher_type;
- cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC;
- if (!cra->cra_ablkcipher.setkey)
- cra->cra_ablkcipher.setkey = ablk_setkey;
- if (!cra->cra_ablkcipher.encrypt)
- cra->cra_ablkcipher.encrypt = ablk_encrypt;
- if (!cra->cra_ablkcipher.decrypt)
- cra->cra_ablkcipher.decrypt = ablk_decrypt;
- cra->cra_init = init_tfm_ablk;
- } else {
- /* authenc */
- cra->cra_type = &crypto_aead_type;
- cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC;
- cra->cra_aead.setkey = aead_setkey;
- cra->cra_aead.setauthsize = aead_setauthsize;
- cra->cra_aead.encrypt = aead_encrypt;
- cra->cra_aead.decrypt = aead_decrypt;
- cra->cra_aead.givencrypt = aead_givencrypt;
- cra->cra_init = init_tfm_aead;
- }
+
+ /* block ciphers */
+ cra->cra_type = &crypto_ablkcipher_type;
+ cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC;
+ if (!cra->cra_ablkcipher.setkey)
+ cra->cra_ablkcipher.setkey = ablk_setkey;
+ if (!cra->cra_ablkcipher.encrypt)
+ cra->cra_ablkcipher.encrypt = ablk_encrypt;
+ if (!cra->cra_ablkcipher.decrypt)
+ cra->cra_ablkcipher.decrypt = ablk_decrypt;
+ cra->cra_init = init_tfm_ablk;
+
cra->cra_ctxsize = sizeof(struct ixp_ctx);
cra->cra_module = THIS_MODULE;
cra->cra_alignmask = 3;
@@ -1474,6 +1438,38 @@ static int __init ixp_module_init(void)
else
ixp4xx_algos[i].registered = 1;
}
+
+ for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
+ struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
+
+ if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "%s"IXP_POSTFIX, cra->base.cra_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ continue;
+ if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
+ continue;
+
+ /* authenc */
+ cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC;
+ cra->setkey = aead_setkey;
+ cra->setauthsize = aead_setauthsize;
+ cra->encrypt = aead_encrypt;
+ cra->decrypt = aead_decrypt;
+ cra->init = init_tfm_aead;
+ cra->exit = exit_tfm_aead;
+
+ cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
+ cra->base.cra_module = THIS_MODULE;
+ cra->base.cra_alignmask = 3;
+ cra->base.cra_priority = 300;
+
+ if (crypto_register_aead(cra))
+ printk(KERN_ERR "Failed to register '%s'\n",
+ cra->base.cra_driver_name);
+ else
+ ixp4xx_aeads[i].registered = 1;
+ }
return 0;
}
@@ -1482,6 +1478,11 @@ static void __exit ixp_module_exit(void)
int num = ARRAY_SIZE(ixp4xx_algos);
int i;
+ for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
+ if (ixp4xx_aeads[i].registered)
+ crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
+ }
+
for (i=0; i< num; i++) {
if (ixp4xx_algos[i].registered)
crypto_unregister_alg(&ixp4xx_algos[i].crypto);
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 1c6f98dd88f4..0643e3366e33 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -533,7 +533,6 @@ static struct platform_driver marvell_cesa = {
.probe = mv_cesa_probe,
.remove = mv_cesa_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "marvell-cesa",
.of_match_table = mv_cesa_of_match_table,
},
diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig
index e421c96c763a..ad7552a6998c 100644
--- a/drivers/crypto/nx/Kconfig
+++ b/drivers/crypto/nx/Kconfig
@@ -14,11 +14,14 @@ config CRYPTO_DEV_NX_ENCRYPT
config CRYPTO_DEV_NX_COMPRESS
tristate "Compression acceleration support"
default y
+ select CRYPTO_ALGAPI
+ select 842_DECOMPRESS
help
Support for PowerPC Nest (NX) compression acceleration. This
module supports acceleration for compressing memory with the 842
- algorithm. One of the platform drivers must be selected also.
- If you choose 'M' here, this module will be called nx_compress.
+ algorithm using the cryptographic API. One of the platform
+ drivers must be selected also. If you choose 'M' here, this
+ module will be called nx_compress.
if CRYPTO_DEV_NX_COMPRESS
@@ -42,14 +45,4 @@ config CRYPTO_DEV_NX_COMPRESS_POWERNV
algorithm. This supports NX hardware on the PowerNV platform.
If you choose 'M' here, this module will be called nx_compress_powernv.
-config CRYPTO_DEV_NX_COMPRESS_CRYPTO
- tristate "Compression acceleration cryptographic interface"
- select CRYPTO_ALGAPI
- select 842_DECOMPRESS
- default y
- help
- Support for PowerPC Nest (NX) accelerators using the cryptographic
- API. If you choose 'M' here, this module will be called
- nx_compress_crypto.
-
endif
diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile
index e1684f5adb11..b727821c8ed4 100644
--- a/drivers/crypto/nx/Makefile
+++ b/drivers/crypto/nx/Makefile
@@ -10,12 +10,8 @@ nx-crypto-objs := nx.o \
nx-sha256.o \
nx-sha512.o
-obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o nx-compress-platform.o
-obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o
-obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o
-obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_CRYPTO) += nx-compress-crypto.o
+obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o
+obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o
nx-compress-objs := nx-842.o
-nx-compress-platform-objs := nx-842-platform.o
nx-compress-pseries-objs := nx-842-pseries.o
nx-compress-powernv-objs := nx-842-powernv.o
-nx-compress-crypto-objs := nx-842-crypto.o
diff --git a/drivers/crypto/nx/nx-842-crypto.c b/drivers/crypto/nx/nx-842-crypto.c
deleted file mode 100644
index d53a1dcd7b4e..000000000000
--- a/drivers/crypto/nx/nx-842-crypto.c
+++ /dev/null
@@ -1,580 +0,0 @@
-/*
- * Cryptographic API for the NX-842 hardware compression.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) IBM Corporation, 2011-2015
- *
- * Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
- * Seth Jennings <sjenning@linux.vnet.ibm.com>
- *
- * Rewrite: Dan Streetman <ddstreet@ieee.org>
- *
- * This is an interface to the NX-842 compression hardware in PowerPC
- * processors. Most of the complexity of this drvier is due to the fact that
- * the NX-842 compression hardware requires the input and output data buffers
- * to be specifically aligned, to be a specific multiple in length, and within
- * specific minimum and maximum lengths. Those restrictions, provided by the
- * nx-842 driver via nx842_constraints, mean this driver must use bounce
- * buffers and headers to correct misaligned in or out buffers, and to split
- * input buffers that are too large.
- *
- * This driver will fall back to software decompression if the hardware
- * decompression fails, so this driver's decompression should never fail as
- * long as the provided compressed buffer is valid. Any compressed buffer
- * created by this driver will have a header (except ones where the input
- * perfectly matches the constraints); so users of this driver cannot simply
- * pass a compressed buffer created by this driver over to the 842 software
- * decompression library. Instead, users must use this driver to decompress;
- * if the hardware fails or is unavailable, the compressed buffer will be
- * parsed and the header removed, and the raw 842 buffer(s) passed to the 842
- * software decompression library.
- *
- * This does not fall back to software compression, however, since the caller
- * of this function is specifically requesting hardware compression; if the
- * hardware compression fails, the caller can fall back to software
- * compression, and the raw 842 compressed buffer that the software compressor
- * creates can be passed to this driver for hardware decompression; any
- * buffer without our specific header magic is assumed to be a raw 842 buffer
- * and passed directly to the hardware. Note that the software compression
- * library will produce a compressed buffer that is incompatible with the
- * hardware decompressor if the original input buffer length is not a multiple
- * of 8; if such a compressed buffer is passed to this driver for
- * decompression, the hardware will reject it and this driver will then pass
- * it over to the software library for decompression.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/crypto.h>
-#include <linux/vmalloc.h>
-#include <linux/sw842.h>
-#include <linux/ratelimit.h>
-
-#include "nx-842.h"
-
-/* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit
- * template (see lib/842/842.h), so this magic number will never appear at
- * the start of a raw 842 compressed buffer. That is important, as any buffer
- * passed to us without this magic is assumed to be a raw 842 compressed
- * buffer, and passed directly to the hardware to decompress.
- */
-#define NX842_CRYPTO_MAGIC (0xf842)
-#define NX842_CRYPTO_GROUP_MAX (0x20)
-#define NX842_CRYPTO_HEADER_SIZE(g) \
- (sizeof(struct nx842_crypto_header) + \
- sizeof(struct nx842_crypto_header_group) * (g))
-#define NX842_CRYPTO_HEADER_MAX_SIZE \
- NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX)
-
-/* bounce buffer size */
-#define BOUNCE_BUFFER_ORDER (2)
-#define BOUNCE_BUFFER_SIZE \
- ((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER))
-
-/* try longer on comp because we can fallback to sw decomp if hw is busy */
-#define COMP_BUSY_TIMEOUT (250) /* ms */
-#define DECOMP_BUSY_TIMEOUT (50) /* ms */
-
-struct nx842_crypto_header_group {
- __be16 padding; /* unused bytes at start of group */
- __be32 compressed_length; /* compressed bytes in group */
- __be32 uncompressed_length; /* bytes after decompression */
-} __packed;
-
-struct nx842_crypto_header {
- __be16 magic; /* NX842_CRYPTO_MAGIC */
- __be16 ignore; /* decompressed end bytes to ignore */
- u8 groups; /* total groups in this header */
- struct nx842_crypto_header_group group[];
-} __packed;
-
-struct nx842_crypto_param {
- u8 *in;
- unsigned int iremain;
- u8 *out;
- unsigned int oremain;
- unsigned int ototal;
-};
-
-static int update_param(struct nx842_crypto_param *p,
- unsigned int slen, unsigned int dlen)
-{
- if (p->iremain < slen)
- return -EOVERFLOW;
- if (p->oremain < dlen)
- return -ENOSPC;
-
- p->in += slen;
- p->iremain -= slen;
- p->out += dlen;
- p->oremain -= dlen;
- p->ototal += dlen;
-
- return 0;
-}
-
-struct nx842_crypto_ctx {
- u8 *wmem;
- u8 *sbounce, *dbounce;
-
- struct nx842_crypto_header header;
- struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX];
-};
-
-static int nx842_crypto_init(struct crypto_tfm *tfm)
-{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-
- ctx->wmem = kmalloc(nx842_workmem_size(), GFP_KERNEL);
- ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
- ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
- if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
- kfree(ctx->wmem);
- free_page((unsigned long)ctx->sbounce);
- free_page((unsigned long)ctx->dbounce);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void nx842_crypto_exit(struct crypto_tfm *tfm)
-{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-
- kfree(ctx->wmem);
- free_page((unsigned long)ctx->sbounce);
- free_page((unsigned long)ctx->dbounce);
-}
-
-static int read_constraints(struct nx842_constraints *c)
-{
- int ret;
-
- ret = nx842_constraints(c);
- if (ret) {
- pr_err_ratelimited("could not get nx842 constraints : %d\n",
- ret);
- return ret;
- }
-
- /* limit maximum, to always have enough bounce buffer to decompress */
- if (c->maximum > BOUNCE_BUFFER_SIZE) {
- c->maximum = BOUNCE_BUFFER_SIZE;
- pr_info_once("limiting nx842 maximum to %x\n", c->maximum);
- }
-
- return 0;
-}
-
-static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf)
-{
- int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
-
- /* compress should have added space for header */
- if (s > be16_to_cpu(hdr->group[0].padding)) {
- pr_err("Internal error: no space for header\n");
- return -EINVAL;
- }
-
- memcpy(buf, hdr, s);
-
- print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0);
-
- return 0;
-}
-
-static int compress(struct nx842_crypto_ctx *ctx,
- struct nx842_crypto_param *p,
- struct nx842_crypto_header_group *g,
- struct nx842_constraints *c,
- u16 *ignore,
- unsigned int hdrsize)
-{
- unsigned int slen = p->iremain, dlen = p->oremain, tmplen;
- unsigned int adj_slen = slen;
- u8 *src = p->in, *dst = p->out;
- int ret, dskip = 0;
- ktime_t timeout;
-
- if (p->iremain == 0)
- return -EOVERFLOW;
-
- if (p->oremain == 0 || hdrsize + c->minimum > dlen)
- return -ENOSPC;
-
- if (slen % c->multiple)
- adj_slen = round_up(slen, c->multiple);
- if (slen < c->minimum)
- adj_slen = c->minimum;
- if (slen > c->maximum)
- adj_slen = slen = c->maximum;
- if (adj_slen > slen || (u64)src % c->alignment) {
- adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE);
- slen = min(slen, BOUNCE_BUFFER_SIZE);
- if (adj_slen > slen)
- memset(ctx->sbounce + slen, 0, adj_slen - slen);
- memcpy(ctx->sbounce, src, slen);
- src = ctx->sbounce;
- slen = adj_slen;
- pr_debug("using comp sbounce buffer, len %x\n", slen);
- }
-
- dst += hdrsize;
- dlen -= hdrsize;
-
- if ((u64)dst % c->alignment) {
- dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst);
- dst += dskip;
- dlen -= dskip;
- }
- if (dlen % c->multiple)
- dlen = round_down(dlen, c->multiple);
- if (dlen < c->minimum) {
-nospc:
- dst = ctx->dbounce;
- dlen = min(p->oremain, BOUNCE_BUFFER_SIZE);
- dlen = round_down(dlen, c->multiple);
- dskip = 0;
- pr_debug("using comp dbounce buffer, len %x\n", dlen);
- }
- if (dlen > c->maximum)
- dlen = c->maximum;
-
- tmplen = dlen;
- timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT);
- do {
- dlen = tmplen; /* reset dlen, if we're retrying */
- ret = nx842_compress(src, slen, dst, &dlen, ctx->wmem);
- /* possibly we should reduce the slen here, instead of
- * retrying with the dbounce buffer?
- */
- if (ret == -ENOSPC && dst != ctx->dbounce)
- goto nospc;
- } while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
- if (ret)
- return ret;
-
- dskip += hdrsize;
-
- if (dst == ctx->dbounce)
- memcpy(p->out + dskip, dst, dlen);
-
- g->padding = cpu_to_be16(dskip);
- g->compressed_length = cpu_to_be32(dlen);
- g->uncompressed_length = cpu_to_be32(slen);
-
- if (p->iremain < slen) {
- *ignore = slen - p->iremain;
- slen = p->iremain;
- }
-
- pr_debug("compress slen %x ignore %x dlen %x padding %x\n",
- slen, *ignore, dlen, dskip);
-
- return update_param(p, slen, dskip + dlen);
-}
-
-static int nx842_crypto_compress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
- struct nx842_crypto_header *hdr = &ctx->header;
- struct nx842_crypto_param p;
- struct nx842_constraints c;
- unsigned int groups, hdrsize, h;
- int ret, n;
- bool add_header;
- u16 ignore = 0;
-
- p.in = (u8 *)src;
- p.iremain = slen;
- p.out = dst;
- p.oremain = *dlen;
- p.ototal = 0;
-
- *dlen = 0;
-
- ret = read_constraints(&c);
- if (ret)
- return ret;
-
- groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX,
- DIV_ROUND_UP(p.iremain, c.maximum));
- hdrsize = NX842_CRYPTO_HEADER_SIZE(groups);
-
- /* skip adding header if the buffers meet all constraints */
- add_header = (p.iremain % c.multiple ||
- p.iremain < c.minimum ||
- p.iremain > c.maximum ||
- (u64)p.in % c.alignment ||
- p.oremain % c.multiple ||
- p.oremain < c.minimum ||
- p.oremain > c.maximum ||
- (u64)p.out % c.alignment);
-
- hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC);
- hdr->groups = 0;
- hdr->ignore = 0;
-
- while (p.iremain > 0) {
- n = hdr->groups++;
- if (hdr->groups > NX842_CRYPTO_GROUP_MAX)
- return -ENOSPC;
-
- /* header goes before first group */
- h = !n && add_header ? hdrsize : 0;
-
- if (ignore)
- pr_warn("interal error, ignore is set %x\n", ignore);
-
- ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h);
- if (ret)
- return ret;
- }
-
- if (!add_header && hdr->groups > 1) {
- pr_err("Internal error: No header but multiple groups\n");
- return -EINVAL;
- }
-
- /* ignore indicates the input stream needed to be padded */
- hdr->ignore = cpu_to_be16(ignore);
- if (ignore)
- pr_debug("marked %d bytes as ignore\n", ignore);
-
- if (add_header)
- ret = nx842_crypto_add_header(hdr, dst);
- if (ret)
- return ret;
-
- *dlen = p.ototal;
-
- pr_debug("compress total slen %x dlen %x\n", slen, *dlen);
-
- return 0;
-}
-
-static int decompress(struct nx842_crypto_ctx *ctx,
- struct nx842_crypto_param *p,
- struct nx842_crypto_header_group *g,
- struct nx842_constraints *c,
- u16 ignore,
- bool usehw)
-{
- unsigned int slen = be32_to_cpu(g->compressed_length);
- unsigned int required_len = be32_to_cpu(g->uncompressed_length);
- unsigned int dlen = p->oremain, tmplen;
- unsigned int adj_slen = slen;
- u8 *src = p->in, *dst = p->out;
- u16 padding = be16_to_cpu(g->padding);
- int ret, spadding = 0, dpadding = 0;
- ktime_t timeout;
-
- if (!slen || !required_len)
- return -EINVAL;
-
- if (p->iremain <= 0 || padding + slen > p->iremain)
- return -EOVERFLOW;
-
- if (p->oremain <= 0 || required_len - ignore > p->oremain)
- return -ENOSPC;
-
- src += padding;
-
- if (!usehw)
- goto usesw;
-
- if (slen % c->multiple)
- adj_slen = round_up(slen, c->multiple);
- if (slen < c->minimum)
- adj_slen = c->minimum;
- if (slen > c->maximum)
- goto usesw;
- if (slen < adj_slen || (u64)src % c->alignment) {
- /* we can append padding bytes because the 842 format defines
- * an "end" template (see lib/842/842_decompress.c) and will
- * ignore any bytes following it.
- */
- if (slen < adj_slen)
- memset(ctx->sbounce + slen, 0, adj_slen - slen);
- memcpy(ctx->sbounce, src, slen);
- src = ctx->sbounce;
- spadding = adj_slen - slen;
- slen = adj_slen;
- pr_debug("using decomp sbounce buffer, len %x\n", slen);
- }
-
- if (dlen % c->multiple)
- dlen = round_down(dlen, c->multiple);
- if (dlen < required_len || (u64)dst % c->alignment) {
- dst = ctx->dbounce;
- dlen = min(required_len, BOUNCE_BUFFER_SIZE);
- pr_debug("using decomp dbounce buffer, len %x\n", dlen);
- }
- if (dlen < c->minimum)
- goto usesw;
- if (dlen > c->maximum)
- dlen = c->maximum;
-
- tmplen = dlen;
- timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT);
- do {
- dlen = tmplen; /* reset dlen, if we're retrying */
- ret = nx842_decompress(src, slen, dst, &dlen, ctx->wmem);
- } while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
- if (ret) {
-usesw:
- /* reset everything, sw doesn't have constraints */
- src = p->in + padding;
- slen = be32_to_cpu(g->compressed_length);
- spadding = 0;
- dst = p->out;
- dlen = p->oremain;
- dpadding = 0;
- if (dlen < required_len) { /* have ignore bytes */
- dst = ctx->dbounce;
- dlen = BOUNCE_BUFFER_SIZE;
- }
- pr_info_ratelimited("using software 842 decompression\n");
- ret = sw842_decompress(src, slen, dst, &dlen);
- }
- if (ret)
- return ret;
-
- slen -= spadding;
-
- dlen -= ignore;
- if (ignore)
- pr_debug("ignoring last %x bytes\n", ignore);
-
- if (dst == ctx->dbounce)
- memcpy(p->out, dst, dlen);
-
- pr_debug("decompress slen %x padding %x dlen %x ignore %x\n",
- slen, padding, dlen, ignore);
-
- return update_param(p, slen + padding, dlen);
-}
-
-static int nx842_crypto_decompress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
- struct nx842_crypto_header *hdr;
- struct nx842_crypto_param p;
- struct nx842_constraints c;
- int n, ret, hdr_len;
- u16 ignore = 0;
- bool usehw = true;
-
- p.in = (u8 *)src;
- p.iremain = slen;
- p.out = dst;
- p.oremain = *dlen;
- p.ototal = 0;
-
- *dlen = 0;
-
- if (read_constraints(&c))
- usehw = false;
-
- hdr = (struct nx842_crypto_header *)src;
-
- /* If it doesn't start with our header magic number, assume it's a raw
- * 842 compressed buffer and pass it directly to the hardware driver
- */
- if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) {
- struct nx842_crypto_header_group g = {
- .padding = 0,
- .compressed_length = cpu_to_be32(p.iremain),
- .uncompressed_length = cpu_to_be32(p.oremain),
- };
-
- ret = decompress(ctx, &p, &g, &c, 0, usehw);
- if (ret)
- return ret;
-
- *dlen = p.ototal;
-
- return 0;
- }
-
- if (!hdr->groups) {
- pr_err("header has no groups\n");
- return -EINVAL;
- }
- if (hdr->groups > NX842_CRYPTO_GROUP_MAX) {
- pr_err("header has too many groups %x, max %x\n",
- hdr->groups, NX842_CRYPTO_GROUP_MAX);
- return -EINVAL;
- }
-
- hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
- if (hdr_len > slen)
- return -EOVERFLOW;
-
- memcpy(&ctx->header, src, hdr_len);
- hdr = &ctx->header;
-
- for (n = 0; n < hdr->groups; n++) {
- /* ignore applies to last group */
- if (n + 1 == hdr->groups)
- ignore = be16_to_cpu(hdr->ignore);
-
- ret = decompress(ctx, &p, &hdr->group[n], &c, ignore, usehw);
- if (ret)
- return ret;
- }
-
- *dlen = p.ototal;
-
- pr_debug("decompress total slen %x dlen %x\n", slen, *dlen);
-
- return 0;
-}
-
-static struct crypto_alg alg = {
- .cra_name = "842",
- .cra_driver_name = "842-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = nx842_crypto_init,
- .cra_exit = nx842_crypto_exit,
- .cra_u = { .compress = {
- .coa_compress = nx842_crypto_compress,
- .coa_decompress = nx842_crypto_decompress } }
-};
-
-static int __init nx842_crypto_mod_init(void)
-{
- return crypto_register_alg(&alg);
-}
-module_init(nx842_crypto_mod_init);
-
-static void __exit nx842_crypto_mod_exit(void)
-{
- crypto_unregister_alg(&alg);
-}
-module_exit(nx842_crypto_mod_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Interface");
-MODULE_ALIAS_CRYPTO("842");
-MODULE_ALIAS_CRYPTO("842-nx");
-MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
diff --git a/drivers/crypto/nx/nx-842-platform.c b/drivers/crypto/nx/nx-842-platform.c
deleted file mode 100644
index 664f13dd06ed..000000000000
--- a/drivers/crypto/nx/nx-842-platform.c
+++ /dev/null
@@ -1,84 +0,0 @@
-
-#include "nx-842.h"
-
-/* this is needed, separate from the main nx-842.c driver, because that main
- * driver loads the platform drivers during its init(), and it expects one
- * (or none) of the platform drivers to set this pointer to its driver.
- * That means this pointer can't be in the main nx-842 driver, because it
- * wouldn't be accessible until after the main driver loaded, which wouldn't
- * be possible as it's waiting for the platform driver to load. So place it
- * here.
- */
-static struct nx842_driver *driver;
-static DEFINE_SPINLOCK(driver_lock);
-
-struct nx842_driver *nx842_platform_driver(void)
-{
- return driver;
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver);
-
-bool nx842_platform_driver_set(struct nx842_driver *_driver)
-{
- bool ret = false;
-
- spin_lock(&driver_lock);
-
- if (!driver) {
- driver = _driver;
- ret = true;
- } else
- WARN(1, "can't set platform driver, already set to %s\n",
- driver->name);
-
- spin_unlock(&driver_lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver_set);
-
-/* only call this from the platform driver exit function */
-void nx842_platform_driver_unset(struct nx842_driver *_driver)
-{
- spin_lock(&driver_lock);
-
- if (driver == _driver)
- driver = NULL;
- else if (driver)
- WARN(1, "can't unset platform driver %s, currently set to %s\n",
- _driver->name, driver->name);
- else
- WARN(1, "can't unset platform driver, already unset\n");
-
- spin_unlock(&driver_lock);
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver_unset);
-
-bool nx842_platform_driver_get(void)
-{
- bool ret = false;
-
- spin_lock(&driver_lock);
-
- if (driver)
- ret = try_module_get(driver->owner);
-
- spin_unlock(&driver_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver_get);
-
-void nx842_platform_driver_put(void)
-{
- spin_lock(&driver_lock);
-
- if (driver)
- module_put(driver->owner);
-
- spin_unlock(&driver_lock);
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver_put);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
-MODULE_DESCRIPTION("842 H/W Compression platform driver");
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index 33b3b0abf4ae..3750e13d8721 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -26,6 +26,8 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
MODULE_DESCRIPTION("842 H/W Compression driver for IBM PowerNV processors");
+MODULE_ALIAS_CRYPTO("842");
+MODULE_ALIAS_CRYPTO("842-nx");
#define WORKMEM_ALIGN (CRB_ALIGN)
#define CSB_WAIT_MAX (5000) /* ms */
@@ -344,7 +346,8 @@ static int wait_for_csb(struct nx842_workmem *wmem,
}
/* successful completion */
- pr_debug_ratelimited("Processed %u bytes in %lu us\n", csb->count,
+ pr_debug_ratelimited("Processed %u bytes in %lu us\n",
+ be32_to_cpu(csb->count),
(unsigned long)ktime_us_delta(now, start));
return 0;
@@ -581,9 +584,29 @@ static struct nx842_driver nx842_powernv_driver = {
.decompress = nx842_powernv_decompress,
};
+static int nx842_powernv_crypto_init(struct crypto_tfm *tfm)
+{
+ return nx842_crypto_init(tfm, &nx842_powernv_driver);
+}
+
+static struct crypto_alg nx842_powernv_alg = {
+ .cra_name = "842",
+ .cra_driver_name = "842-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
+ .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = nx842_powernv_crypto_init,
+ .cra_exit = nx842_crypto_exit,
+ .cra_u = { .compress = {
+ .coa_compress = nx842_crypto_compress,
+ .coa_decompress = nx842_crypto_decompress } }
+};
+
static __init int nx842_powernv_init(void)
{
struct device_node *dn;
+ int ret;
/* verify workmem size/align restrictions */
BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN);
@@ -594,17 +617,14 @@ static __init int nx842_powernv_init(void)
BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT);
BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT);
- pr_info("loading\n");
-
for_each_compatible_node(dn, NULL, "ibm,power-nx")
nx842_powernv_probe(dn);
- if (!nx842_ct) {
- pr_err("no coprocessors found\n");
+ if (!nx842_ct)
return -ENODEV;
- }
- if (!nx842_platform_driver_set(&nx842_powernv_driver)) {
+ ret = crypto_register_alg(&nx842_powernv_alg);
+ if (ret) {
struct nx842_coproc *coproc, *n;
list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
@@ -612,11 +632,9 @@ static __init int nx842_powernv_init(void)
kfree(coproc);
}
- return -EEXIST;
+ return ret;
}
- pr_info("loaded\n");
-
return 0;
}
module_init(nx842_powernv_init);
@@ -625,13 +643,11 @@ static void __exit nx842_powernv_exit(void)
{
struct nx842_coproc *coproc, *n;
- nx842_platform_driver_unset(&nx842_powernv_driver);
+ crypto_unregister_alg(&nx842_powernv_alg);
list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
list_del(&coproc->list);
kfree(coproc);
}
-
- pr_info("unloaded\n");
}
module_exit(nx842_powernv_exit);
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
index 3040a6091bf2..f4cbde03c6ad 100644
--- a/drivers/crypto/nx/nx-842-pseries.c
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -29,6 +29,8 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
+MODULE_ALIAS_CRYPTO("842");
+MODULE_ALIAS_CRYPTO("842-nx");
static struct nx842_constraints nx842_pseries_constraints = {
.alignment = DDE_BUFFER_ALIGN,
@@ -99,11 +101,6 @@ struct nx842_workmem {
#define NX842_HW_PAGE_SIZE (4096)
#define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1))
-enum nx842_status {
- UNAVAILABLE,
- AVAILABLE
-};
-
struct ibm_nx842_counters {
atomic64_t comp_complete;
atomic64_t comp_failed;
@@ -121,7 +118,6 @@ static struct nx842_devdata {
unsigned int max_sg_len;
unsigned int max_sync_size;
unsigned int max_sync_sg;
- enum nx842_status status;
} __rcu *devdata;
static DEFINE_SPINLOCK(devdata_mutex);
@@ -230,9 +226,12 @@ static int nx842_validate_result(struct device *dev,
switch (csb->completion_code) {
case 0: /* Completed without error */
break;
- case 64: /* Target bytes > Source bytes during compression */
+ case 64: /* Compression ok, but output larger than input */
+ dev_dbg(dev, "%s: output size larger than input size\n",
+ __func__);
+ break;
case 13: /* Output buffer too small */
- dev_dbg(dev, "%s: Compression output larger than input\n",
+ dev_dbg(dev, "%s: Out of space in output buffer\n",
__func__);
return -ENOSPC;
case 66: /* Input data contains an illegal template field */
@@ -537,41 +536,36 @@ static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
devdata->max_sync_size = 0;
devdata->max_sync_sg = 0;
devdata->max_sg_len = 0;
- devdata->status = UNAVAILABLE;
return 0;
} else
return -ENOENT;
}
/**
- * nx842_OF_upd_status -- Update the device info from OF status prop
+ * nx842_OF_upd_status -- Check the device info from OF status prop
*
* The status property indicates if the accelerator is enabled. If the
* device is in the OF tree it indicates that the hardware is present.
* The status field indicates if the device is enabled when the status
* is 'okay'. Otherwise the device driver will be disabled.
*
- * @devdata - struct nx842_devdata to update
* @prop - struct property point containing the maxsyncop for the update
*
* Returns:
* 0 - Device is available
- * -EINVAL - Device is not available
+ * -ENODEV - Device is not available
*/
-static int nx842_OF_upd_status(struct nx842_devdata *devdata,
- struct property *prop) {
- int ret = 0;
+static int nx842_OF_upd_status(struct property *prop)
+{
const char *status = (const char *)prop->value;
- if (!strncmp(status, "okay", (size_t)prop->length)) {
- devdata->status = AVAILABLE;
- } else {
- dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n",
- __func__, status);
- devdata->status = UNAVAILABLE;
- }
+ if (!strncmp(status, "okay", (size_t)prop->length))
+ return 0;
+ if (!strncmp(status, "disabled", (size_t)prop->length))
+ return -ENODEV;
+ dev_info(devdata->dev, "%s: unknown status '%s'\n", __func__, status);
- return ret;
+ return -EINVAL;
}
/**
@@ -735,6 +729,10 @@ static int nx842_OF_upd(struct property *new_prop)
int ret = 0;
unsigned long flags;
+ new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
+ if (!new_devdata)
+ return -ENOMEM;
+
spin_lock_irqsave(&devdata_mutex, flags);
old_devdata = rcu_dereference_check(devdata,
lockdep_is_held(&devdata_mutex));
@@ -744,16 +742,10 @@ static int nx842_OF_upd(struct property *new_prop)
if (!old_devdata || !of_node) {
pr_err("%s: device is not available\n", __func__);
spin_unlock_irqrestore(&devdata_mutex, flags);
+ kfree(new_devdata);
return -ENODEV;
}
- new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
- if (!new_devdata) {
- dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__);
- ret = -ENOMEM;
- goto error_out;
- }
-
memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
new_devdata->counters = old_devdata->counters;
@@ -777,7 +769,7 @@ static int nx842_OF_upd(struct property *new_prop)
goto out;
/* Perform property updates */
- ret = nx842_OF_upd_status(new_devdata, status);
+ ret = nx842_OF_upd_status(status);
if (ret)
goto error_out;
@@ -970,13 +962,43 @@ static struct nx842_driver nx842_pseries_driver = {
.decompress = nx842_pseries_decompress,
};
-static int __init nx842_probe(struct vio_dev *viodev,
- const struct vio_device_id *id)
+static int nx842_pseries_crypto_init(struct crypto_tfm *tfm)
+{
+ return nx842_crypto_init(tfm, &nx842_pseries_driver);
+}
+
+static struct crypto_alg nx842_pseries_alg = {
+ .cra_name = "842",
+ .cra_driver_name = "842-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
+ .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = nx842_pseries_crypto_init,
+ .cra_exit = nx842_crypto_exit,
+ .cra_u = { .compress = {
+ .coa_compress = nx842_crypto_compress,
+ .coa_decompress = nx842_crypto_decompress } }
+};
+
+static int nx842_probe(struct vio_dev *viodev,
+ const struct vio_device_id *id)
{
struct nx842_devdata *old_devdata, *new_devdata = NULL;
unsigned long flags;
int ret = 0;
+ new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
+ if (!new_devdata)
+ return -ENOMEM;
+
+ new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
+ GFP_NOFS);
+ if (!new_devdata->counters) {
+ kfree(new_devdata);
+ return -ENOMEM;
+ }
+
spin_lock_irqsave(&devdata_mutex, flags);
old_devdata = rcu_dereference_check(devdata,
lockdep_is_held(&devdata_mutex));
@@ -989,21 +1011,6 @@ static int __init nx842_probe(struct vio_dev *viodev,
dev_set_drvdata(&viodev->dev, NULL);
- new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
- if (!new_devdata) {
- dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__);
- ret = -ENOMEM;
- goto error_unlock;
- }
-
- new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
- GFP_NOFS);
- if (!new_devdata->counters) {
- dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__);
- ret = -ENOMEM;
- goto error_unlock;
- }
-
new_devdata->vdev = viodev;
new_devdata->dev = &viodev->dev;
nx842_OF_set_defaults(new_devdata);
@@ -1016,9 +1023,12 @@ static int __init nx842_probe(struct vio_dev *viodev,
of_reconfig_notifier_register(&nx842_of_nb);
ret = nx842_OF_upd(NULL);
- if (ret && ret != -ENODEV) {
- dev_err(&viodev->dev, "could not parse device tree. %d\n", ret);
- ret = -1;
+ if (ret)
+ goto error;
+
+ ret = crypto_register_alg(&nx842_pseries_alg);
+ if (ret) {
+ dev_err(&viodev->dev, "could not register comp alg: %d\n", ret);
goto error;
}
@@ -1043,7 +1053,7 @@ error:
return ret;
}
-static int __exit nx842_remove(struct vio_dev *viodev)
+static int nx842_remove(struct vio_dev *viodev)
{
struct nx842_devdata *old_devdata;
unsigned long flags;
@@ -1051,6 +1061,8 @@ static int __exit nx842_remove(struct vio_dev *viodev)
pr_info("Removing IBM Power 842 compression device\n");
sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
+ crypto_unregister_alg(&nx842_pseries_alg);
+
spin_lock_irqsave(&devdata_mutex, flags);
old_devdata = rcu_dereference_check(devdata,
lockdep_is_held(&devdata_mutex));
@@ -1074,18 +1086,16 @@ static struct vio_device_id nx842_vio_driver_ids[] = {
static struct vio_driver nx842_vio_driver = {
.name = KBUILD_MODNAME,
.probe = nx842_probe,
- .remove = __exit_p(nx842_remove),
+ .remove = nx842_remove,
.get_desired_dma = nx842_get_desired_dma,
.id_table = nx842_vio_driver_ids,
};
-static int __init nx842_init(void)
+static int __init nx842_pseries_init(void)
{
struct nx842_devdata *new_devdata;
int ret;
- pr_info("Registering IBM Power 842 compression driver\n");
-
if (!of_find_compatible_node(NULL, NULL, "ibm,compression"))
return -ENODEV;
@@ -1095,7 +1105,6 @@ static int __init nx842_init(void)
pr_err("Could not allocate memory for device data\n");
return -ENOMEM;
}
- new_devdata->status = UNAVAILABLE;
RCU_INIT_POINTER(devdata, new_devdata);
ret = vio_register_driver(&nx842_vio_driver);
@@ -1106,24 +1115,18 @@ static int __init nx842_init(void)
return ret;
}
- if (!nx842_platform_driver_set(&nx842_pseries_driver)) {
- vio_unregister_driver(&nx842_vio_driver);
- kfree(new_devdata);
- return -EEXIST;
- }
-
return 0;
}
-module_init(nx842_init);
+module_init(nx842_pseries_init);
-static void __exit nx842_exit(void)
+static void __exit nx842_pseries_exit(void)
{
struct nx842_devdata *old_devdata;
unsigned long flags;
- pr_info("Exiting IBM Power 842 compression driver\n");
- nx842_platform_driver_unset(&nx842_pseries_driver);
+ crypto_unregister_alg(&nx842_pseries_alg);
+
spin_lock_irqsave(&devdata_mutex, flags);
old_devdata = rcu_dereference_check(devdata,
lockdep_is_held(&devdata_mutex));
@@ -1136,5 +1139,5 @@ static void __exit nx842_exit(void)
vio_unregister_driver(&nx842_vio_driver);
}
-module_exit(nx842_exit);
+module_exit(nx842_pseries_exit);
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 6e5e0d60d0c8..046c1c45411b 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -1,10 +1,5 @@
/*
- * Driver frontend for IBM Power 842 compression accelerator
- *
- * Copyright (C) 2015 Dan Streetman, IBM Corp
- *
- * Designer of the Power data compression engine:
- * Bulent Abali <abali@us.ibm.com>
+ * Cryptographic API for the NX-842 hardware compression.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -15,89 +10,522 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
+ *
+ * Copyright (C) IBM Corporation, 2011-2015
+ *
+ * Designer of the Power data compression engine:
+ * Bulent Abali <abali@us.ibm.com>
+ *
+ * Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
+ * Seth Jennings <sjenning@linux.vnet.ibm.com>
+ *
+ * Rewrite: Dan Streetman <ddstreet@ieee.org>
+ *
+ * This is an interface to the NX-842 compression hardware in PowerPC
+ * processors. Most of the complexity of this drvier is due to the fact that
+ * the NX-842 compression hardware requires the input and output data buffers
+ * to be specifically aligned, to be a specific multiple in length, and within
+ * specific minimum and maximum lengths. Those restrictions, provided by the
+ * nx-842 driver via nx842_constraints, mean this driver must use bounce
+ * buffers and headers to correct misaligned in or out buffers, and to split
+ * input buffers that are too large.
+ *
+ * This driver will fall back to software decompression if the hardware
+ * decompression fails, so this driver's decompression should never fail as
+ * long as the provided compressed buffer is valid. Any compressed buffer
+ * created by this driver will have a header (except ones where the input
+ * perfectly matches the constraints); so users of this driver cannot simply
+ * pass a compressed buffer created by this driver over to the 842 software
+ * decompression library. Instead, users must use this driver to decompress;
+ * if the hardware fails or is unavailable, the compressed buffer will be
+ * parsed and the header removed, and the raw 842 buffer(s) passed to the 842
+ * software decompression library.
+ *
+ * This does not fall back to software compression, however, since the caller
+ * of this function is specifically requesting hardware compression; if the
+ * hardware compression fails, the caller can fall back to software
+ * compression, and the raw 842 compressed buffer that the software compressor
+ * creates can be passed to this driver for hardware decompression; any
+ * buffer without our specific header magic is assumed to be a raw 842 buffer
+ * and passed directly to the hardware. Note that the software compression
+ * library will produce a compressed buffer that is incompatible with the
+ * hardware decompressor if the original input buffer length is not a multiple
+ * of 8; if such a compressed buffer is passed to this driver for
+ * decompression, the hardware will reject it and this driver will then pass
+ * it over to the software library for decompression.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include "nx-842.h"
+#include <linux/vmalloc.h>
+#include <linux/sw842.h>
+#include <linux/spinlock.h>
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
-MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
+#include "nx-842.h"
-/**
- * nx842_constraints
- *
- * This provides the driver's constraints. Different nx842 implementations
- * may have varying requirements. The constraints are:
- * @alignment: All buffers should be aligned to this
- * @multiple: All buffer lengths should be a multiple of this
- * @minimum: Buffer lengths must not be less than this amount
- * @maximum: Buffer lengths must not be more than this amount
- *
- * The constraints apply to all buffers and lengths, both input and output,
- * for both compression and decompression, except for the minimum which
- * only applies to compression input and decompression output; the
- * compressed data can be less than the minimum constraint. It can be
- * assumed that compressed data will always adhere to the multiple
- * constraint.
- *
- * The driver may succeed even if these constraints are violated;
- * however the driver can return failure or suffer reduced performance
- * if any constraint is not met.
+/* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit
+ * template (see lib/842/842.h), so this magic number will never appear at
+ * the start of a raw 842 compressed buffer. That is important, as any buffer
+ * passed to us without this magic is assumed to be a raw 842 compressed
+ * buffer, and passed directly to the hardware to decompress.
*/
-int nx842_constraints(struct nx842_constraints *c)
+#define NX842_CRYPTO_MAGIC (0xf842)
+#define NX842_CRYPTO_HEADER_SIZE(g) \
+ (sizeof(struct nx842_crypto_header) + \
+ sizeof(struct nx842_crypto_header_group) * (g))
+#define NX842_CRYPTO_HEADER_MAX_SIZE \
+ NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX)
+
+/* bounce buffer size */
+#define BOUNCE_BUFFER_ORDER (2)
+#define BOUNCE_BUFFER_SIZE \
+ ((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER))
+
+/* try longer on comp because we can fallback to sw decomp if hw is busy */
+#define COMP_BUSY_TIMEOUT (250) /* ms */
+#define DECOMP_BUSY_TIMEOUT (50) /* ms */
+
+struct nx842_crypto_param {
+ u8 *in;
+ unsigned int iremain;
+ u8 *out;
+ unsigned int oremain;
+ unsigned int ototal;
+};
+
+static int update_param(struct nx842_crypto_param *p,
+ unsigned int slen, unsigned int dlen)
{
- memcpy(c, nx842_platform_driver()->constraints, sizeof(*c));
+ if (p->iremain < slen)
+ return -EOVERFLOW;
+ if (p->oremain < dlen)
+ return -ENOSPC;
+
+ p->in += slen;
+ p->iremain -= slen;
+ p->out += dlen;
+ p->oremain -= dlen;
+ p->ototal += dlen;
+
return 0;
}
-EXPORT_SYMBOL_GPL(nx842_constraints);
-/**
- * nx842_workmem_size
- *
- * Get the amount of working memory the driver requires.
- */
-size_t nx842_workmem_size(void)
+int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver)
{
- return nx842_platform_driver()->workmem_size;
+ struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ spin_lock_init(&ctx->lock);
+ ctx->driver = driver;
+ ctx->wmem = kmalloc(driver->workmem_size, GFP_KERNEL);
+ ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
+ ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
+ if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
+ kfree(ctx->wmem);
+ free_page((unsigned long)ctx->sbounce);
+ free_page((unsigned long)ctx->dbounce);
+ return -ENOMEM;
+ }
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(nx842_workmem_size);
+EXPORT_SYMBOL_GPL(nx842_crypto_init);
-int nx842_compress(const unsigned char *in, unsigned int ilen,
- unsigned char *out, unsigned int *olen, void *wmem)
+void nx842_crypto_exit(struct crypto_tfm *tfm)
{
- return nx842_platform_driver()->compress(in, ilen, out, olen, wmem);
+ struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ kfree(ctx->wmem);
+ free_page((unsigned long)ctx->sbounce);
+ free_page((unsigned long)ctx->dbounce);
}
-EXPORT_SYMBOL_GPL(nx842_compress);
+EXPORT_SYMBOL_GPL(nx842_crypto_exit);
-int nx842_decompress(const unsigned char *in, unsigned int ilen,
- unsigned char *out, unsigned int *olen, void *wmem)
+static void check_constraints(struct nx842_constraints *c)
{
- return nx842_platform_driver()->decompress(in, ilen, out, olen, wmem);
+ /* limit maximum, to always have enough bounce buffer to decompress */
+ if (c->maximum > BOUNCE_BUFFER_SIZE)
+ c->maximum = BOUNCE_BUFFER_SIZE;
}
-EXPORT_SYMBOL_GPL(nx842_decompress);
-static __init int nx842_init(void)
+static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf)
{
- request_module("nx-compress-powernv");
- request_module("nx-compress-pseries");
+ int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
- /* we prevent loading if there's no platform driver, and we get the
- * module that set it so it won't unload, so we don't need to check
- * if it's set in any of the above functions
- */
- if (!nx842_platform_driver_get()) {
- pr_err("no nx842 driver found.\n");
- return -ENODEV;
+ /* compress should have added space for header */
+ if (s > be16_to_cpu(hdr->group[0].padding)) {
+ pr_err("Internal error: no space for header\n");
+ return -EINVAL;
}
+ memcpy(buf, hdr, s);
+
+ print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0);
+
return 0;
}
-module_init(nx842_init);
-static void __exit nx842_exit(void)
+static int compress(struct nx842_crypto_ctx *ctx,
+ struct nx842_crypto_param *p,
+ struct nx842_crypto_header_group *g,
+ struct nx842_constraints *c,
+ u16 *ignore,
+ unsigned int hdrsize)
+{
+ unsigned int slen = p->iremain, dlen = p->oremain, tmplen;
+ unsigned int adj_slen = slen;
+ u8 *src = p->in, *dst = p->out;
+ int ret, dskip = 0;
+ ktime_t timeout;
+
+ if (p->iremain == 0)
+ return -EOVERFLOW;
+
+ if (p->oremain == 0 || hdrsize + c->minimum > dlen)
+ return -ENOSPC;
+
+ if (slen % c->multiple)
+ adj_slen = round_up(slen, c->multiple);
+ if (slen < c->minimum)
+ adj_slen = c->minimum;
+ if (slen > c->maximum)
+ adj_slen = slen = c->maximum;
+ if (adj_slen > slen || (u64)src % c->alignment) {
+ adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE);
+ slen = min(slen, BOUNCE_BUFFER_SIZE);
+ if (adj_slen > slen)
+ memset(ctx->sbounce + slen, 0, adj_slen - slen);
+ memcpy(ctx->sbounce, src, slen);
+ src = ctx->sbounce;
+ slen = adj_slen;
+ pr_debug("using comp sbounce buffer, len %x\n", slen);
+ }
+
+ dst += hdrsize;
+ dlen -= hdrsize;
+
+ if ((u64)dst % c->alignment) {
+ dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst);
+ dst += dskip;
+ dlen -= dskip;
+ }
+ if (dlen % c->multiple)
+ dlen = round_down(dlen, c->multiple);
+ if (dlen < c->minimum) {
+nospc:
+ dst = ctx->dbounce;
+ dlen = min(p->oremain, BOUNCE_BUFFER_SIZE);
+ dlen = round_down(dlen, c->multiple);
+ dskip = 0;
+ pr_debug("using comp dbounce buffer, len %x\n", dlen);
+ }
+ if (dlen > c->maximum)
+ dlen = c->maximum;
+
+ tmplen = dlen;
+ timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT);
+ do {
+ dlen = tmplen; /* reset dlen, if we're retrying */
+ ret = ctx->driver->compress(src, slen, dst, &dlen, ctx->wmem);
+ /* possibly we should reduce the slen here, instead of
+ * retrying with the dbounce buffer?
+ */
+ if (ret == -ENOSPC && dst != ctx->dbounce)
+ goto nospc;
+ } while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
+ if (ret)
+ return ret;
+
+ dskip += hdrsize;
+
+ if (dst == ctx->dbounce)
+ memcpy(p->out + dskip, dst, dlen);
+
+ g->padding = cpu_to_be16(dskip);
+ g->compressed_length = cpu_to_be32(dlen);
+ g->uncompressed_length = cpu_to_be32(slen);
+
+ if (p->iremain < slen) {
+ *ignore = slen - p->iremain;
+ slen = p->iremain;
+ }
+
+ pr_debug("compress slen %x ignore %x dlen %x padding %x\n",
+ slen, *ignore, dlen, dskip);
+
+ return update_param(p, slen, dskip + dlen);
+}
+
+int nx842_crypto_compress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
+{
+ struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_crypto_header *hdr = &ctx->header;
+ struct nx842_crypto_param p;
+ struct nx842_constraints c = *ctx->driver->constraints;
+ unsigned int groups, hdrsize, h;
+ int ret, n;
+ bool add_header;
+ u16 ignore = 0;
+
+ check_constraints(&c);
+
+ p.in = (u8 *)src;
+ p.iremain = slen;
+ p.out = dst;
+ p.oremain = *dlen;
+ p.ototal = 0;
+
+ *dlen = 0;
+
+ groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX,
+ DIV_ROUND_UP(p.iremain, c.maximum));
+ hdrsize = NX842_CRYPTO_HEADER_SIZE(groups);
+
+ spin_lock_bh(&ctx->lock);
+
+ /* skip adding header if the buffers meet all constraints */
+ add_header = (p.iremain % c.multiple ||
+ p.iremain < c.minimum ||
+ p.iremain > c.maximum ||
+ (u64)p.in % c.alignment ||
+ p.oremain % c.multiple ||
+ p.oremain < c.minimum ||
+ p.oremain > c.maximum ||
+ (u64)p.out % c.alignment);
+
+ hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC);
+ hdr->groups = 0;
+ hdr->ignore = 0;
+
+ while (p.iremain > 0) {
+ n = hdr->groups++;
+ ret = -ENOSPC;
+ if (hdr->groups > NX842_CRYPTO_GROUP_MAX)
+ goto unlock;
+
+ /* header goes before first group */
+ h = !n && add_header ? hdrsize : 0;
+
+ if (ignore)
+ pr_warn("interal error, ignore is set %x\n", ignore);
+
+ ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h);
+ if (ret)
+ goto unlock;
+ }
+
+ if (!add_header && hdr->groups > 1) {
+ pr_err("Internal error: No header but multiple groups\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /* ignore indicates the input stream needed to be padded */
+ hdr->ignore = cpu_to_be16(ignore);
+ if (ignore)
+ pr_debug("marked %d bytes as ignore\n", ignore);
+
+ if (add_header)
+ ret = nx842_crypto_add_header(hdr, dst);
+ if (ret)
+ goto unlock;
+
+ *dlen = p.ototal;
+
+ pr_debug("compress total slen %x dlen %x\n", slen, *dlen);
+
+unlock:
+ spin_unlock_bh(&ctx->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nx842_crypto_compress);
+
+static int decompress(struct nx842_crypto_ctx *ctx,
+ struct nx842_crypto_param *p,
+ struct nx842_crypto_header_group *g,
+ struct nx842_constraints *c,
+ u16 ignore)
{
- nx842_platform_driver_put();
+ unsigned int slen = be32_to_cpu(g->compressed_length);
+ unsigned int required_len = be32_to_cpu(g->uncompressed_length);
+ unsigned int dlen = p->oremain, tmplen;
+ unsigned int adj_slen = slen;
+ u8 *src = p->in, *dst = p->out;
+ u16 padding = be16_to_cpu(g->padding);
+ int ret, spadding = 0, dpadding = 0;
+ ktime_t timeout;
+
+ if (!slen || !required_len)
+ return -EINVAL;
+
+ if (p->iremain <= 0 || padding + slen > p->iremain)
+ return -EOVERFLOW;
+
+ if (p->oremain <= 0 || required_len - ignore > p->oremain)
+ return -ENOSPC;
+
+ src += padding;
+
+ if (slen % c->multiple)
+ adj_slen = round_up(slen, c->multiple);
+ if (slen < c->minimum)
+ adj_slen = c->minimum;
+ if (slen > c->maximum)
+ goto usesw;
+ if (slen < adj_slen || (u64)src % c->alignment) {
+ /* we can append padding bytes because the 842 format defines
+ * an "end" template (see lib/842/842_decompress.c) and will
+ * ignore any bytes following it.
+ */
+ if (slen < adj_slen)
+ memset(ctx->sbounce + slen, 0, adj_slen - slen);
+ memcpy(ctx->sbounce, src, slen);
+ src = ctx->sbounce;
+ spadding = adj_slen - slen;
+ slen = adj_slen;
+ pr_debug("using decomp sbounce buffer, len %x\n", slen);
+ }
+
+ if (dlen % c->multiple)
+ dlen = round_down(dlen, c->multiple);
+ if (dlen < required_len || (u64)dst % c->alignment) {
+ dst = ctx->dbounce;
+ dlen = min(required_len, BOUNCE_BUFFER_SIZE);
+ pr_debug("using decomp dbounce buffer, len %x\n", dlen);
+ }
+ if (dlen < c->minimum)
+ goto usesw;
+ if (dlen > c->maximum)
+ dlen = c->maximum;
+
+ tmplen = dlen;
+ timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT);
+ do {
+ dlen = tmplen; /* reset dlen, if we're retrying */
+ ret = ctx->driver->decompress(src, slen, dst, &dlen, ctx->wmem);
+ } while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
+ if (ret) {
+usesw:
+ /* reset everything, sw doesn't have constraints */
+ src = p->in + padding;
+ slen = be32_to_cpu(g->compressed_length);
+ spadding = 0;
+ dst = p->out;
+ dlen = p->oremain;
+ dpadding = 0;
+ if (dlen < required_len) { /* have ignore bytes */
+ dst = ctx->dbounce;
+ dlen = BOUNCE_BUFFER_SIZE;
+ }
+ pr_info_ratelimited("using software 842 decompression\n");
+ ret = sw842_decompress(src, slen, dst, &dlen);
+ }
+ if (ret)
+ return ret;
+
+ slen -= spadding;
+
+ dlen -= ignore;
+ if (ignore)
+ pr_debug("ignoring last %x bytes\n", ignore);
+
+ if (dst == ctx->dbounce)
+ memcpy(p->out, dst, dlen);
+
+ pr_debug("decompress slen %x padding %x dlen %x ignore %x\n",
+ slen, padding, dlen, ignore);
+
+ return update_param(p, slen + padding, dlen);
}
-module_exit(nx842_exit);
+
+int nx842_crypto_decompress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
+{
+ struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_crypto_header *hdr;
+ struct nx842_crypto_param p;
+ struct nx842_constraints c = *ctx->driver->constraints;
+ int n, ret, hdr_len;
+ u16 ignore = 0;
+
+ check_constraints(&c);
+
+ p.in = (u8 *)src;
+ p.iremain = slen;
+ p.out = dst;
+ p.oremain = *dlen;
+ p.ototal = 0;
+
+ *dlen = 0;
+
+ hdr = (struct nx842_crypto_header *)src;
+
+ spin_lock_bh(&ctx->lock);
+
+ /* If it doesn't start with our header magic number, assume it's a raw
+ * 842 compressed buffer and pass it directly to the hardware driver
+ */
+ if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) {
+ struct nx842_crypto_header_group g = {
+ .padding = 0,
+ .compressed_length = cpu_to_be32(p.iremain),
+ .uncompressed_length = cpu_to_be32(p.oremain),
+ };
+
+ ret = decompress(ctx, &p, &g, &c, 0);
+ if (ret)
+ goto unlock;
+
+ goto success;
+ }
+
+ if (!hdr->groups) {
+ pr_err("header has no groups\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+ if (hdr->groups > NX842_CRYPTO_GROUP_MAX) {
+ pr_err("header has too many groups %x, max %x\n",
+ hdr->groups, NX842_CRYPTO_GROUP_MAX);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
+ if (hdr_len > slen) {
+ ret = -EOVERFLOW;
+ goto unlock;
+ }
+
+ memcpy(&ctx->header, src, hdr_len);
+ hdr = &ctx->header;
+
+ for (n = 0; n < hdr->groups; n++) {
+ /* ignore applies to last group */
+ if (n + 1 == hdr->groups)
+ ignore = be16_to_cpu(hdr->ignore);
+
+ ret = decompress(ctx, &p, &hdr->group[n], &c, ignore);
+ if (ret)
+ goto unlock;
+ }
+
+success:
+ *dlen = p.ototal;
+
+ pr_debug("decompress total slen %x dlen %x\n", slen, *dlen);
+
+ ret = 0;
+
+unlock:
+ spin_unlock_bh(&ctx->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nx842_crypto_decompress);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Driver");
+MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h
index ac0ea79d0f8b..a4eee3bba937 100644
--- a/drivers/crypto/nx/nx-842.h
+++ b/drivers/crypto/nx/nx-842.h
@@ -3,8 +3,9 @@
#define __NX_842_H__
#include <linux/kernel.h>
+#include <linux/init.h>
#include <linux/module.h>
-#include <linux/sw842.h>
+#include <linux/crypto.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/io.h>
@@ -104,6 +105,25 @@ static inline unsigned long nx842_get_pa(void *addr)
#define GET_FIELD(v, m) (((v) & (m)) >> MASK_LSH(m))
#define SET_FIELD(v, m, val) (((v) & ~(m)) | (((val) << MASK_LSH(m)) & (m)))
+/**
+ * This provides the driver's constraints. Different nx842 implementations
+ * may have varying requirements. The constraints are:
+ * @alignment: All buffers should be aligned to this
+ * @multiple: All buffer lengths should be a multiple of this
+ * @minimum: Buffer lengths must not be less than this amount
+ * @maximum: Buffer lengths must not be more than this amount
+ *
+ * The constraints apply to all buffers and lengths, both input and output,
+ * for both compression and decompression, except for the minimum which
+ * only applies to compression input and decompression output; the
+ * compressed data can be less than the minimum constraint. It can be
+ * assumed that compressed data will always adhere to the multiple
+ * constraint.
+ *
+ * The driver may succeed even if these constraints are violated;
+ * however the driver can return failure or suffer reduced performance
+ * if any constraint is not met.
+ */
struct nx842_constraints {
int alignment;
int multiple;
@@ -126,19 +146,40 @@ struct nx842_driver {
void *wrkmem);
};
-struct nx842_driver *nx842_platform_driver(void);
-bool nx842_platform_driver_set(struct nx842_driver *driver);
-void nx842_platform_driver_unset(struct nx842_driver *driver);
-bool nx842_platform_driver_get(void);
-void nx842_platform_driver_put(void);
+struct nx842_crypto_header_group {
+ __be16 padding; /* unused bytes at start of group */
+ __be32 compressed_length; /* compressed bytes in group */
+ __be32 uncompressed_length; /* bytes after decompression */
+} __packed;
+
+struct nx842_crypto_header {
+ __be16 magic; /* NX842_CRYPTO_MAGIC */
+ __be16 ignore; /* decompressed end bytes to ignore */
+ u8 groups; /* total groups in this header */
+ struct nx842_crypto_header_group group[];
+} __packed;
-size_t nx842_workmem_size(void);
+#define NX842_CRYPTO_GROUP_MAX (0x20)
-int nx842_constraints(struct nx842_constraints *constraints);
+struct nx842_crypto_ctx {
+ spinlock_t lock;
+
+ u8 *wmem;
+ u8 *sbounce, *dbounce;
+
+ struct nx842_crypto_header header;
+ struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX];
+
+ struct nx842_driver *driver;
+};
-int nx842_compress(const unsigned char *in, unsigned int in_len,
- unsigned char *out, unsigned int *out_len, void *wrkmem);
-int nx842_decompress(const unsigned char *in, unsigned int in_len,
- unsigned char *out, unsigned int *out_len, void *wrkmem);
+int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver);
+void nx842_crypto_exit(struct crypto_tfm *tfm);
+int nx842_crypto_compress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
+int nx842_crypto_decompress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
#endif /* __NX_842_H__ */
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 67f80813a06f..73ef49922788 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -94,8 +94,6 @@ static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
return -EINVAL;
}
- crypto_aead_crt(tfm)->authsize = authsize;
-
return 0;
}
@@ -111,8 +109,6 @@ static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
return -EINVAL;
}
- crypto_aead_crt(tfm)->authsize = authsize;
-
return 0;
}
@@ -174,6 +170,7 @@ static int generate_pat(u8 *iv,
struct nx_crypto_ctx *nx_ctx,
unsigned int authsize,
unsigned int nbytes,
+ unsigned int assoclen,
u8 *out)
{
struct nx_sg *nx_insg = nx_ctx->in_sg;
@@ -200,16 +197,16 @@ static int generate_pat(u8 *iv,
* greater than 2^32.
*/
- if (!req->assoclen) {
+ if (!assoclen) {
b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
- } else if (req->assoclen <= 14) {
+ } else if (assoclen <= 14) {
/* if associated data is 14 bytes or less, we do 1 GCM
* operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
* which is fed in through the source buffers here */
b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
b1 = nx_ctx->priv.ccm.iauth_tag;
- iauth_len = req->assoclen;
- } else if (req->assoclen <= 65280) {
+ iauth_len = assoclen;
+ } else if (assoclen <= 65280) {
/* if associated data is less than (2^16 - 2^8), we construct
* B1 differently and feed in the associated data to a CCA
* operation */
@@ -223,7 +220,7 @@ static int generate_pat(u8 *iv,
}
/* generate B0 */
- rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
+ rc = generate_b0(iv, assoclen, authsize, nbytes, b0);
if (rc)
return rc;
@@ -233,22 +230,22 @@ static int generate_pat(u8 *iv,
*/
if (b1) {
memset(b1, 0, 16);
- if (req->assoclen <= 65280) {
- *(u16 *)b1 = (u16)req->assoclen;
- scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
+ if (assoclen <= 65280) {
+ *(u16 *)b1 = assoclen;
+ scatterwalk_map_and_copy(b1 + 2, req->src, 0,
iauth_len, SCATTERWALK_FROM_SG);
} else {
*(u16 *)b1 = (u16)(0xfffe);
- *(u32 *)&b1[2] = (u32)req->assoclen;
- scatterwalk_map_and_copy(b1 + 6, req->assoc, 0,
+ *(u32 *)&b1[2] = assoclen;
+ scatterwalk_map_and_copy(b1 + 6, req->src, 0,
iauth_len, SCATTERWALK_FROM_SG);
}
}
/* now copy any remaining AAD to scatterlist and call nx... */
- if (!req->assoclen) {
+ if (!assoclen) {
return rc;
- } else if (req->assoclen <= 14) {
+ } else if (assoclen <= 14) {
unsigned int len = 16;
nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
@@ -280,7 +277,7 @@ static int generate_pat(u8 *iv,
return rc;
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
+ atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
} else {
unsigned int processed = 0, to_process;
@@ -294,15 +291,15 @@ static int generate_pat(u8 *iv,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
do {
- to_process = min_t(u32, req->assoclen - processed,
+ to_process = min_t(u32, assoclen - processed,
nx_ctx->ap->databytelen);
nx_insg = nx_walk_and_build(nx_ctx->in_sg,
nx_ctx->ap->sglen,
- req->assoc, processed,
+ req->src, processed,
&to_process);
- if ((to_process + processed) < req->assoclen) {
+ if ((to_process + processed) < assoclen) {
NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
NX_FDM_INTERMEDIATE;
} else {
@@ -328,11 +325,10 @@ static int generate_pat(u8 *iv,
NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(req->assoclen,
- &(nx_ctx->stats->aes_bytes));
+ atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
processed += to_process;
- } while (processed < req->assoclen);
+ } while (processed < assoclen);
result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
}
@@ -343,7 +339,8 @@ static int generate_pat(u8 *iv,
}
static int ccm_nx_decrypt(struct aead_request *req,
- struct blkcipher_desc *desc)
+ struct blkcipher_desc *desc,
+ unsigned int assoclen)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
@@ -360,10 +357,10 @@ static int ccm_nx_decrypt(struct aead_request *req,
/* copy out the auth tag to compare with later */
scatterwalk_map_and_copy(priv->oauth_tag,
- req->src, nbytes, authsize,
+ req->src, nbytes + req->assoclen, authsize,
SCATTERWALK_FROM_SG);
- rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
+ rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0);
if (rc)
goto out;
@@ -383,8 +380,8 @@ static int ccm_nx_decrypt(struct aead_request *req,
NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
- &to_process, processed,
- csbcpb->cpb.aes_ccm.iv_or_ctr);
+ &to_process, processed + req->assoclen,
+ csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc)
goto out;
@@ -420,7 +417,8 @@ out:
}
static int ccm_nx_encrypt(struct aead_request *req,
- struct blkcipher_desc *desc)
+ struct blkcipher_desc *desc,
+ unsigned int assoclen)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
@@ -432,7 +430,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
+ rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0);
if (rc)
goto out;
@@ -451,7 +449,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
- &to_process, processed,
+ &to_process, processed + req->assoclen,
csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc)
goto out;
@@ -483,7 +481,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
/* copy out the auth tag */
scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
- req->dst, nbytes, authsize,
+ req->dst, nbytes + req->assoclen, authsize,
SCATTERWALK_TO_SG);
out:
@@ -494,17 +492,17 @@ out:
static int ccm4309_aes_nx_encrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct blkcipher_desc desc;
- u8 *iv = nx_ctx->priv.ccm.iv;
+ u8 *iv = rctx->iv;
iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
memcpy(iv + 4, req->iv, 8);
desc.info = iv;
- desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
- return ccm_nx_encrypt(req, &desc);
+ return ccm_nx_encrypt(req, &desc, req->assoclen - 8);
}
static int ccm_aes_nx_encrypt(struct aead_request *req)
@@ -513,29 +511,28 @@ static int ccm_aes_nx_encrypt(struct aead_request *req)
int rc;
desc.info = req->iv;
- desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
rc = crypto_ccm_check_iv(desc.info);
if (rc)
return rc;
- return ccm_nx_encrypt(req, &desc);
+ return ccm_nx_encrypt(req, &desc, req->assoclen);
}
static int ccm4309_aes_nx_decrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct blkcipher_desc desc;
- u8 *iv = nx_ctx->priv.ccm.iv;
+ u8 *iv = rctx->iv;
iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
memcpy(iv + 4, req->iv, 8);
desc.info = iv;
- desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
- return ccm_nx_decrypt(req, &desc);
+ return ccm_nx_decrypt(req, &desc, req->assoclen - 8);
}
static int ccm_aes_nx_decrypt(struct aead_request *req)
@@ -544,13 +541,12 @@ static int ccm_aes_nx_decrypt(struct aead_request *req)
int rc;
desc.info = req->iv;
- desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
rc = crypto_ccm_check_iv(desc.info);
if (rc)
return rc;
- return ccm_nx_decrypt(req, &desc);
+ return ccm_nx_decrypt(req, &desc, req->assoclen);
}
/* tell the block cipher walk routines that this is a stream cipher by
@@ -558,47 +554,42 @@ static int ccm_aes_nx_decrypt(struct aead_request *req)
* during encrypt/decrypt doesn't solve this problem, because it calls
* blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
* but instead uses this tfm->blocksize. */
-struct crypto_alg nx_ccm_aes_alg = {
- .cra_name = "ccm(aes)",
- .cra_driver_name = "ccm-aes-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_init = nx_crypto_ctx_aes_ccm_init,
- .cra_exit = nx_crypto_ctx_exit,
- .cra_aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
- .setkey = ccm_aes_nx_set_key,
- .setauthsize = ccm_aes_nx_setauthsize,
- .encrypt = ccm_aes_nx_encrypt,
- .decrypt = ccm_aes_nx_decrypt,
- }
+struct aead_alg nx_ccm_aes_alg = {
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = nx_crypto_ctx_aes_ccm_init,
+ .exit = nx_crypto_ctx_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = ccm_aes_nx_set_key,
+ .setauthsize = ccm_aes_nx_setauthsize,
+ .encrypt = ccm_aes_nx_encrypt,
+ .decrypt = ccm_aes_nx_decrypt,
};
-struct crypto_alg nx_ccm4309_aes_alg = {
- .cra_name = "rfc4309(ccm(aes))",
- .cra_driver_name = "rfc4309-ccm-aes-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_type = &crypto_nivaead_type,
- .cra_module = THIS_MODULE,
- .cra_init = nx_crypto_ctx_aes_ccm_init,
- .cra_exit = nx_crypto_ctx_exit,
- .cra_aead = {
- .ivsize = 8,
- .maxauthsize = AES_BLOCK_SIZE,
- .setkey = ccm4309_aes_nx_set_key,
- .setauthsize = ccm4309_aes_nx_setauthsize,
- .encrypt = ccm4309_aes_nx_encrypt,
- .decrypt = ccm4309_aes_nx_decrypt,
- .geniv = "seqiv",
- }
+struct aead_alg nx_ccm4309_aes_alg = {
+ .base = {
+ .cra_name = "rfc4309(ccm(aes))",
+ .cra_driver_name = "rfc4309-ccm-aes-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = nx_crypto_ctx_aes_ccm_init,
+ .exit = nx_crypto_ctx_aead_exit,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = ccm4309_aes_nx_set_key,
+ .setauthsize = ccm4309_aes_nx_setauthsize,
+ .encrypt = ccm4309_aes_nx_encrypt,
+ .decrypt = ccm4309_aes_nx_decrypt,
};
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index 2617cd4d54dd..898c0a280511 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -72,7 +72,7 @@ static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
if (key_len < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
- memcpy(nx_ctx->priv.ctr.iv,
+ memcpy(nx_ctx->priv.ctr.nonce,
in_key + key_len - CTR_RFC3686_NONCE_SIZE,
CTR_RFC3686_NONCE_SIZE);
@@ -131,39 +131,19 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
unsigned int nbytes)
{
struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
- u8 *iv = nx_ctx->priv.ctr.iv;
+ u8 iv[16];
+ memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
memcpy(iv + CTR_RFC3686_NONCE_SIZE,
desc->info, CTR_RFC3686_IV_SIZE);
iv[12] = iv[13] = iv[14] = 0;
iv[15] = 1;
- desc->info = nx_ctx->priv.ctr.iv;
+ desc->info = iv;
return ctr_aes_nx_crypt(desc, dst, src, nbytes);
}
-struct crypto_alg nx_ctr_aes_alg = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = nx_crypto_ctx_aes_ctr_init,
- .cra_exit = nx_crypto_ctx_exit,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ctr_aes_nx_set_key,
- .encrypt = ctr_aes_nx_crypt,
- .decrypt = ctr_aes_nx_crypt,
- }
-};
-
struct crypto_alg nx_ctr3686_aes_alg = {
.cra_name = "rfc3686(ctr(aes))",
.cra_driver_name = "rfc3686-ctr-aes-nx",
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 08ac6d48688c..eee624f589b6 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -21,11 +21,9 @@
#include <crypto/internal/aead.h>
#include <crypto/aes.h>
-#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/crypto.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
@@ -36,7 +34,7 @@ static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
const u8 *in_key,
unsigned int key_len)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
+ struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
@@ -75,7 +73,7 @@ static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
const u8 *in_key,
unsigned int key_len)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
+ struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
char *nonce = nx_ctx->priv.gcm.nonce;
int rc;
@@ -110,13 +108,14 @@ static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
static int nx_gca(struct nx_crypto_ctx *nx_ctx,
struct aead_request *req,
- u8 *out)
+ u8 *out,
+ unsigned int assoclen)
{
int rc;
struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
struct scatter_walk walk;
struct nx_sg *nx_sg = nx_ctx->in_sg;
- unsigned int nbytes = req->assoclen;
+ unsigned int nbytes = assoclen;
unsigned int processed = 0, to_process;
unsigned int max_sg_len;
@@ -167,7 +166,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
+ atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
processed += to_process;
} while (processed < nbytes);
@@ -177,13 +176,15 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
return rc;
}
-static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
+static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
+ unsigned int assoclen)
{
int rc;
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_crypto_ctx *nx_ctx =
+ crypto_aead_ctx(crypto_aead_reqtfm(req));
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *nx_sg;
- unsigned int nbytes = req->assoclen;
+ unsigned int nbytes = assoclen;
unsigned int processed = 0, to_process;
unsigned int max_sg_len;
@@ -238,7 +239,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
+ atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
processed += to_process;
} while (processed < nbytes);
@@ -253,7 +254,8 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
int enc)
{
int rc;
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_crypto_ctx *nx_ctx =
+ crypto_aead_ctx(crypto_aead_reqtfm(req));
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
char out[AES_BLOCK_SIZE];
struct nx_sg *in_sg, *out_sg;
@@ -314,9 +316,12 @@ out:
return rc;
}
-static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
+static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
+ unsigned int assoclen)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_crypto_ctx *nx_ctx =
+ crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct blkcipher_desc desc;
unsigned int nbytes = req->cryptlen;
@@ -326,15 +331,15 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- desc.info = nx_ctx->priv.gcm.iv;
+ desc.info = rctx->iv;
/* initialize the counter */
*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
if (nbytes == 0) {
- if (req->assoclen == 0)
+ if (assoclen == 0)
rc = gcm_empty(req, &desc, enc);
else
- rc = gmac(req, &desc);
+ rc = gmac(req, &desc, assoclen);
if (rc)
goto out;
else
@@ -342,9 +347,10 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
}
/* Process associated data */
- csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;
- if (req->assoclen) {
- rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
+ csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8;
+ if (assoclen) {
+ rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad,
+ assoclen);
if (rc)
goto out;
}
@@ -362,7 +368,6 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
to_process = nbytes - processed;
csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
- desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
req->src, &to_process,
processed + req->assoclen,
@@ -424,46 +429,56 @@ out:
static int gcm_aes_nx_encrypt(struct aead_request *req)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
- char *iv = nx_ctx->priv.gcm.iv;
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ char *iv = rctx->iv;
memcpy(iv, req->iv, 12);
- return gcm_aes_nx_crypt(req, 1);
+ return gcm_aes_nx_crypt(req, 1, req->assoclen);
}
static int gcm_aes_nx_decrypt(struct aead_request *req)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
- char *iv = nx_ctx->priv.gcm.iv;
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ char *iv = rctx->iv;
memcpy(iv, req->iv, 12);
- return gcm_aes_nx_crypt(req, 0);
+ return gcm_aes_nx_crypt(req, 0, req->assoclen);
}
static int gcm4106_aes_nx_encrypt(struct aead_request *req)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
- char *iv = nx_ctx->priv.gcm.iv;
+ struct nx_crypto_ctx *nx_ctx =
+ crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ char *iv = rctx->iv;
char *nonce = nx_ctx->priv.gcm.nonce;
memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
- return gcm_aes_nx_crypt(req, 1);
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return gcm_aes_nx_crypt(req, 1, req->assoclen - 8);
}
static int gcm4106_aes_nx_decrypt(struct aead_request *req)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
- char *iv = nx_ctx->priv.gcm.iv;
+ struct nx_crypto_ctx *nx_ctx =
+ crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ char *iv = rctx->iv;
char *nonce = nx_ctx->priv.gcm.nonce;
memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
- return gcm_aes_nx_crypt(req, 0);
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
}
/* tell the block cipher walk routines that this is a stream cipher by
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index 8c2faffab4a3..c2f7d4befb55 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -42,6 +42,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
unsigned int key_len)
{
struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
switch (key_len) {
case AES_KEYSIZE_128:
@@ -51,7 +52,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
return -EINVAL;
}
- memcpy(nx_ctx->priv.xcbc.key, in_key, key_len);
+ memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len);
return 0;
}
@@ -148,32 +149,29 @@ out:
return rc;
}
-static int nx_xcbc_init(struct shash_desc *desc)
+static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
{
- struct xcbc_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
- struct nx_sg *out_sg;
- int len;
+ int err;
- nx_ctx_init(nx_ctx, HCOP_FC_AES);
+ err = nx_crypto_ctx_aes_xcbc_init(tfm);
+ if (err)
+ return err;
- memset(sctx, 0, sizeof *sctx);
+ nx_ctx_init(nx_ctx, HCOP_FC_AES);
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
- memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
- memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
-
- len = AES_BLOCK_SIZE;
- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- &len, nx_ctx->ap->sglen);
+ return 0;
+}
- if (len != AES_BLOCK_SIZE)
- return -EINVAL;
+static int nx_xcbc_init(struct shash_desc *desc)
+{
+ struct xcbc_state *sctx = shash_desc_ctx(desc);
- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+ memset(sctx, 0, sizeof *sctx);
return 0;
}
@@ -186,6 +184,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg;
+ struct nx_sg *out_sg;
u32 to_process = 0, leftover, total;
unsigned int max_sg_len;
unsigned long irq_flags;
@@ -213,6 +212,17 @@ static int nx_xcbc_update(struct shash_desc *desc,
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+ data_len = AES_BLOCK_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+ &len, nx_ctx->ap->sglen);
+
+ if (data_len != AES_BLOCK_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
do {
to_process = total - to_process;
to_process = to_process & ~(AES_BLOCK_SIZE - 1);
@@ -235,8 +245,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
(u8 *) sctx->buffer,
&data_len,
max_sg_len);
- if (data_len != sctx->count)
- return -EINVAL;
+ if (data_len != sctx->count) {
+ rc = -EINVAL;
+ goto out;
+ }
}
data_len = to_process - sctx->count;
@@ -245,8 +257,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
&data_len,
max_sg_len);
- if (data_len != to_process - sctx->count)
- return -EINVAL;
+ if (data_len != to_process - sctx->count) {
+ rc = -EINVAL;
+ goto out;
+ }
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg);
@@ -325,15 +339,19 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
&len, nx_ctx->ap->sglen);
- if (len != sctx->count)
- return -EINVAL;
+ if (len != sctx->count) {
+ rc = -EINVAL;
+ goto out;
+ }
len = AES_BLOCK_SIZE;
out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
nx_ctx->ap->sglen);
- if (len != AES_BLOCK_SIZE)
- return -EINVAL;
+ if (len != AES_BLOCK_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
@@ -372,7 +390,7 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
.cra_blocksize = AES_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_aes_xcbc_init,
+ .cra_init = nx_crypto_ctx_aes_xcbc_init2,
.cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index 4e91bdb83c59..becb738c897b 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -29,34 +29,28 @@
#include "nx.h"
-static int nx_sha256_init(struct shash_desc *desc)
+static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- struct nx_sg *out_sg;
- int len;
- u32 max_sg_len;
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ int err;
- nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+ err = nx_crypto_ctx_sha_init(tfm);
+ if (err)
+ return err;
- memset(sctx, 0, sizeof *sctx);
+ nx_ctx_init(nx_ctx, HCOP_FC_SHA);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
- max_sg_len = min_t(u64, nx_ctx->ap->sglen,
- nx_driver.of.max_sg_len/sizeof(struct nx_sg));
- max_sg_len = min_t(u64, max_sg_len,
- nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+ return 0;
+}
- len = SHA256_DIGEST_SIZE;
- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- &len, max_sg_len);
- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+static int nx_sha256_init(struct shash_desc *desc) {
+ struct sha256_state *sctx = shash_desc_ctx(desc);
- if (len != SHA256_DIGEST_SIZE)
- return -EINVAL;
+ memset(sctx, 0, sizeof *sctx);
sctx->state[0] = __cpu_to_be32(SHA256_H0);
sctx->state[1] = __cpu_to_be32(SHA256_H1);
@@ -77,7 +71,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct nx_sg *in_sg;
+ struct nx_sg *out_sg;
u64 to_process = 0, leftover, total;
unsigned long irq_flags;
int rc = 0;
@@ -102,24 +96,28 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+ data_len = SHA256_DIGEST_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+ &data_len, max_sg_len);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ if (data_len != SHA256_DIGEST_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
do {
- /*
- * to_process: the SHA256_BLOCK_SIZE data chunk to process in
- * this update. This value is also restricted by the sg list
- * limits.
- */
- to_process = total - to_process;
- to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+ int used_sgs = 0;
+ struct nx_sg *in_sg = nx_ctx->in_sg;
if (buf_len) {
data_len = buf_len;
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
+ in_sg = nx_build_sg_list(in_sg,
(u8 *) sctx->buf,
&data_len,
max_sg_len);
@@ -128,15 +126,27 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
rc = -EINVAL;
goto out;
}
+ used_sgs = in_sg - nx_ctx->in_sg;
}
+ /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
+ * processed in this iteration. This value is restricted
+ * by sg list limits and number of sgs we already used
+ * for leftover data. (see above)
+ * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+ * but because data may not be aligned, we need to account
+ * for that too. */
+ to_process = min_t(u64, total,
+ (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+ to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+
data_len = to_process - buf_len;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- to_process = (data_len + buf_len);
+ to_process = data_len + buf_len;
leftover = total - to_process;
/*
@@ -282,7 +292,7 @@ struct shash_alg nx_shash_sha256_alg = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_sha_init,
+ .cra_init = nx_crypto_ctx_sha256_init,
.cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index e6a58d2ee628..b6e183d58d73 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -28,34 +28,29 @@
#include "nx.h"
-static int nx_sha512_init(struct shash_desc *desc)
+static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- struct nx_sg *out_sg;
- int len;
- u32 max_sg_len;
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ int err;
- nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+ err = nx_crypto_ctx_sha_init(tfm);
+ if (err)
+ return err;
- memset(sctx, 0, sizeof *sctx);
+ nx_ctx_init(nx_ctx, HCOP_FC_SHA);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
- max_sg_len = min_t(u64, nx_ctx->ap->sglen,
- nx_driver.of.max_sg_len/sizeof(struct nx_sg));
- max_sg_len = min_t(u64, max_sg_len,
- nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+ return 0;
+}
- len = SHA512_DIGEST_SIZE;
- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- &len, max_sg_len);
- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+static int nx_sha512_init(struct shash_desc *desc)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
- if (len != SHA512_DIGEST_SIZE)
- return -EINVAL;
+ memset(sctx, 0, sizeof *sctx);
sctx->state[0] = __cpu_to_be64(SHA512_H0);
sctx->state[1] = __cpu_to_be64(SHA512_H1);
@@ -76,7 +71,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct nx_sg *in_sg;
+ struct nx_sg *out_sg;
u64 to_process, leftover = 0, total;
unsigned long irq_flags;
int rc = 0;
@@ -101,25 +96,28 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+ data_len = SHA512_DIGEST_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+ &data_len, max_sg_len);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ if (data_len != SHA512_DIGEST_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
do {
- /*
- * to_process: the SHA512_BLOCK_SIZE data chunk to process in
- * this update. This value is also restricted by the sg list
- * limits.
- */
- to_process = total - leftover;
- to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
- leftover = total - to_process;
+ int used_sgs = 0;
+ struct nx_sg *in_sg = nx_ctx->in_sg;
if (buf_len) {
data_len = buf_len;
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
+ in_sg = nx_build_sg_list(in_sg,
(u8 *) sctx->buf,
&data_len, max_sg_len);
@@ -127,8 +125,20 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
rc = -EINVAL;
goto out;
}
+ used_sgs = in_sg - nx_ctx->in_sg;
}
+ /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
+ * processed in this iteration. This value is restricted
+ * by sg list limits and number of sgs we already used
+ * for leftover data. (see above)
+ * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+ * but because data may not be aligned, we need to account
+ * for that too. */
+ to_process = min_t(u64, total,
+ (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+ to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
+
data_len = to_process - buf_len;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
@@ -140,7 +150,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
goto out;
}
- to_process = (data_len + buf_len);
+ to_process = data_len + buf_len;
leftover = total - to_process;
/*
@@ -288,7 +298,7 @@ struct shash_alg nx_shash_sha512_alg = {
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_sha_init,
+ .cra_init = nx_crypto_ctx_sha512_init,
.cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index f6198f29a4a8..0794f1cc0018 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -596,13 +596,9 @@ static int nx_register_algs(void)
if (rc)
goto out_unreg_ecb;
- rc = nx_register_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
- if (rc)
- goto out_unreg_cbc;
-
rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
if (rc)
- goto out_unreg_ctr;
+ goto out_unreg_cbc;
rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
if (rc)
@@ -612,11 +608,11 @@ static int nx_register_algs(void)
if (rc)
goto out_unreg_gcm;
- rc = nx_register_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+ rc = nx_register_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
if (rc)
goto out_unreg_gcm4106;
- rc = nx_register_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+ rc = nx_register_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
if (rc)
goto out_unreg_ccm;
@@ -644,17 +640,15 @@ out_unreg_s256:
nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
NX_PROPS_SHA256);
out_unreg_ccm4309:
- nx_unregister_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+ nx_unregister_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
out_unreg_ccm:
- nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+ nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
out_unreg_gcm4106:
nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
out_unreg_gcm:
nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
out_unreg_ctr3686:
nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
-out_unreg_ctr:
- nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
out_unreg_cbc:
nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
out_unreg_ecb:
@@ -711,14 +705,16 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
}
/* entry points from the crypto tfm initializers */
-int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ crypto_aead_set_reqsize(tfm, sizeof(struct nx_ccm_rctx));
+ return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
NX_MODE_AES_CCM);
}
int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm)
{
+ crypto_aead_set_reqsize(tfm, sizeof(struct nx_gcm_rctx));
return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
NX_MODE_AES_GCM);
}
@@ -810,16 +806,15 @@ static int nx_remove(struct vio_dev *viodev)
NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256);
nx_unregister_shash(&nx_shash_sha256_alg,
NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512);
- nx_unregister_alg(&nx_ccm4309_aes_alg,
- NX_FC_AES, NX_MODE_AES_CCM);
- nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+ nx_unregister_aead(&nx_ccm4309_aes_alg,
+ NX_FC_AES, NX_MODE_AES_CCM);
+ nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
nx_unregister_aead(&nx_gcm4106_aes_alg,
NX_FC_AES, NX_MODE_AES_GCM);
nx_unregister_aead(&nx_gcm_aes_alg,
NX_FC_AES, NX_MODE_AES_GCM);
nx_unregister_alg(&nx_ctr3686_aes_alg,
NX_FC_AES, NX_MODE_AES_CTR);
- nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
}
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index de3ea8738146..9347878d4f30 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -2,6 +2,8 @@
#ifndef __NX_H__
#define __NX_H__
+#include <crypto/ctr.h>
+
#define NX_NAME "nx-crypto"
#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver"
#define NX_VERSION "1.0"
@@ -91,8 +93,11 @@ struct nx_crypto_driver {
#define NX_GCM4106_NONCE_LEN (4)
#define NX_GCM_CTR_OFFSET (12)
-struct nx_gcm_priv {
+struct nx_gcm_rctx {
u8 iv[16];
+};
+
+struct nx_gcm_priv {
u8 iauth_tag[16];
u8 nonce[NX_GCM4106_NONCE_LEN];
};
@@ -100,8 +105,11 @@ struct nx_gcm_priv {
#define NX_CCM_AES_KEY_LEN (16)
#define NX_CCM4309_AES_KEY_LEN (19)
#define NX_CCM4309_NONCE_LEN (3)
-struct nx_ccm_priv {
+struct nx_ccm_rctx {
u8 iv[16];
+};
+
+struct nx_ccm_priv {
u8 b0[16];
u8 iauth_tag[16];
u8 oauth_tag[16];
@@ -113,7 +121,7 @@ struct nx_xcbc_priv {
};
struct nx_ctr_priv {
- u8 iv[16];
+ u8 nonce[CTR_RFC3686_NONCE_SIZE];
};
struct nx_crypto_ctx {
@@ -141,8 +149,10 @@ struct nx_crypto_ctx {
} priv;
};
+struct crypto_aead;
+
/* prototypes */
-int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm);
int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm);
int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm);
@@ -179,10 +189,9 @@ extern struct crypto_alg nx_cbc_aes_alg;
extern struct crypto_alg nx_ecb_aes_alg;
extern struct aead_alg nx_gcm_aes_alg;
extern struct aead_alg nx_gcm4106_aes_alg;
-extern struct crypto_alg nx_ctr_aes_alg;
extern struct crypto_alg nx_ctr3686_aes_alg;
-extern struct crypto_alg nx_ccm_aes_alg;
-extern struct crypto_alg nx_ccm4309_aes_alg;
+extern struct aead_alg nx_ccm_aes_alg;
+extern struct aead_alg nx_ccm4309_aes_alg;
extern struct shash_alg nx_shash_aes_xcbc_alg;
extern struct shash_alg nx_shash_sha512_alg;
extern struct shash_alg nx_shash_sha256_alg;
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 9a28b7e07c71..eba23147c0ee 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -52,29 +52,30 @@
#define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04))
#define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs)
-#define AES_REG_CTRL_CTR_WIDTH_MASK (3 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_32 (0 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_64 (1 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_96 (2 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_128 (3 << 7)
-#define AES_REG_CTRL_CTR (1 << 6)
-#define AES_REG_CTRL_CBC (1 << 5)
-#define AES_REG_CTRL_KEY_SIZE (3 << 3)
-#define AES_REG_CTRL_DIRECTION (1 << 2)
-#define AES_REG_CTRL_INPUT_READY (1 << 1)
-#define AES_REG_CTRL_OUTPUT_READY (1 << 0)
+#define AES_REG_CTRL_CTR_WIDTH_MASK GENMASK(8, 7)
+#define AES_REG_CTRL_CTR_WIDTH_32 0
+#define AES_REG_CTRL_CTR_WIDTH_64 BIT(7)
+#define AES_REG_CTRL_CTR_WIDTH_96 BIT(8)
+#define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7)
+#define AES_REG_CTRL_CTR BIT(6)
+#define AES_REG_CTRL_CBC BIT(5)
+#define AES_REG_CTRL_KEY_SIZE GENMASK(4, 3)
+#define AES_REG_CTRL_DIRECTION BIT(2)
+#define AES_REG_CTRL_INPUT_READY BIT(1)
+#define AES_REG_CTRL_OUTPUT_READY BIT(0)
+#define AES_REG_CTRL_MASK GENMASK(24, 2)
#define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04))
#define AES_REG_REV(dd) ((dd)->pdata->rev_ofs)
#define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs)
-#define AES_REG_MASK_SIDLE (1 << 6)
-#define AES_REG_MASK_START (1 << 5)
-#define AES_REG_MASK_DMA_OUT_EN (1 << 3)
-#define AES_REG_MASK_DMA_IN_EN (1 << 2)
-#define AES_REG_MASK_SOFTRESET (1 << 1)
-#define AES_REG_AUTOIDLE (1 << 0)
+#define AES_REG_MASK_SIDLE BIT(6)
+#define AES_REG_MASK_START BIT(5)
+#define AES_REG_MASK_DMA_OUT_EN BIT(3)
+#define AES_REG_MASK_DMA_IN_EN BIT(2)
+#define AES_REG_MASK_SOFTRESET BIT(1)
+#define AES_REG_AUTOIDLE BIT(0)
#define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04))
@@ -254,7 +255,7 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
{
unsigned int key32;
int i, err;
- u32 val, mask = 0;
+ u32 val;
err = omap_aes_hw_init(dd);
if (err)
@@ -274,17 +275,13 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
if (dd->flags & FLAGS_CBC)
val |= AES_REG_CTRL_CBC;
- if (dd->flags & FLAGS_CTR) {
+ if (dd->flags & FLAGS_CTR)
val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
- mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK;
- }
+
if (dd->flags & FLAGS_ENCRYPT)
val |= AES_REG_CTRL_DIRECTION;
- mask |= AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
- AES_REG_CTRL_KEY_SIZE;
-
- omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, mask);
+ omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK);
return 0;
}
@@ -558,6 +555,9 @@ static int omap_aes_check_aligned(struct scatterlist *sg, int total)
{
int len = 0;
+ if (!IS_ALIGNED(total, AES_BLOCK_SIZE))
+ return -EINVAL;
+
while (sg) {
if (!IS_ALIGNED(sg->offset, 4))
return -1;
@@ -577,9 +577,10 @@ static int omap_aes_check_aligned(struct scatterlist *sg, int total)
static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
{
void *buf_in, *buf_out;
- int pages;
+ int pages, total;
- pages = get_order(dd->total);
+ total = ALIGN(dd->total, AES_BLOCK_SIZE);
+ pages = get_order(total);
buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
@@ -594,11 +595,11 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
sg_init_table(&dd->in_sgl, 1);
- sg_set_buf(&dd->in_sgl, buf_in, dd->total);
+ sg_set_buf(&dd->in_sgl, buf_in, total);
dd->in_sg = &dd->in_sgl;
sg_init_table(&dd->out_sgl, 1);
- sg_set_buf(&dd->out_sgl, buf_out, dd->total);
+ sg_set_buf(&dd->out_sgl, buf_out, total);
dd->out_sg = &dd->out_sgl;
return 0;
@@ -611,7 +612,7 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
struct omap_aes_ctx *ctx;
struct omap_aes_reqctx *rctx;
unsigned long flags;
- int err, ret = 0;
+ int err, ret = 0, len;
spin_lock_irqsave(&dd->lock, flags);
if (req)
@@ -650,8 +651,9 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
dd->sgs_copied = 0;
}
- dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total);
- dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total);
+ len = ALIGN(dd->total, AES_BLOCK_SIZE);
+ dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, len);
+ dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, len);
BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
rctx = ablkcipher_request_ctx(req);
@@ -678,7 +680,7 @@ static void omap_aes_done_task(unsigned long data)
{
struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
void *buf_in, *buf_out;
- int pages;
+ int pages, len;
pr_debug("enter done_task\n");
@@ -697,7 +699,8 @@ static void omap_aes_done_task(unsigned long data)
sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
- pages = get_order(dd->total_save);
+ len = ALIGN(dd->total_save, AES_BLOCK_SIZE);
+ pages = get_order(len);
free_pages((unsigned long)buf_in, pages);
free_pages((unsigned long)buf_out, pages);
}
@@ -726,11 +729,6 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of AES blocks\n");
- return -EINVAL;
- }
-
dd = omap_aes_find_dev(ctx);
if (!dd)
return -ENODEV;
@@ -833,7 +831,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
{
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-omap",
- .cra_priority = 100,
+ .cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
@@ -855,7 +853,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
{
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-omap",
- .cra_priority = 100,
+ .cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
@@ -881,7 +879,7 @@ static struct crypto_alg algs_ctr[] = {
{
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-omap",
- .cra_priority = 100,
+ .cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
@@ -1046,9 +1044,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
}
}
- dd->total -= AES_BLOCK_SIZE;
-
- BUG_ON(dd->total < 0);
+ dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total);
/* Clear IRQ status */
status &= ~AES_REG_IRQ_DATA_OUT;
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 46307098f8ba..0a70e46d5416 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -536,9 +536,6 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
dmaengine_terminate_all(dd->dma_lch_in);
dmaengine_terminate_all(dd->dma_lch_out);
- dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
- dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
-
return err;
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index b2024c95a3cf..48adb2a0903e 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -588,7 +588,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
* the dmaengine may try to DMA the incorrect amount of data.
*/
sg_init_table(&ctx->sgl, 1);
- ctx->sgl.page_link = ctx->sg->page_link;
+ sg_assign_page(&ctx->sgl, sg_page(ctx->sg));
ctx->sgl.offset = ctx->sg->offset;
sg_dma_len(&ctx->sgl) = len32;
sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 4f56f3681abd..da36de26a4dc 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -99,11 +99,16 @@ struct spacc_req {
dma_addr_t src_addr, dst_addr;
struct spacc_ddt *src_ddt, *dst_ddt;
void (*complete)(struct spacc_req *req);
+};
- /* AEAD specific bits. */
- u8 *giv;
- size_t giv_len;
- dma_addr_t giv_pa;
+struct spacc_aead {
+ unsigned long ctrl_default;
+ unsigned long type;
+ struct aead_alg alg;
+ struct spacc_engine *engine;
+ struct list_head entry;
+ int key_offs;
+ int iv_offs;
};
struct spacc_engine {
@@ -121,6 +126,9 @@ struct spacc_engine {
struct spacc_alg *algs;
unsigned num_algs;
struct list_head registered_algs;
+ struct spacc_aead *aeads;
+ unsigned num_aeads;
+ struct list_head registered_aeads;
size_t cipher_pg_sz;
size_t hash_pg_sz;
const char *name;
@@ -174,8 +182,6 @@ struct spacc_aead_ctx {
u8 cipher_key_len;
u8 hash_key_len;
struct crypto_aead *sw_cipher;
- size_t auth_size;
- u8 salt[AES_BLOCK_SIZE];
};
static int spacc_ablk_submit(struct spacc_req *req);
@@ -185,6 +191,11 @@ static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
}
+static inline struct spacc_aead *to_spacc_aead(struct aead_alg *alg)
+{
+ return container_of(alg, struct spacc_aead, alg);
+}
+
static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
{
u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
@@ -310,120 +321,117 @@ out:
return NULL;
}
-static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv)
+static int spacc_aead_make_ddts(struct aead_request *areq)
{
- struct aead_request *areq = container_of(req->req, struct aead_request,
- base);
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct spacc_req *req = aead_request_ctx(areq);
struct spacc_engine *engine = req->engine;
struct spacc_ddt *src_ddt, *dst_ddt;
- unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq));
- unsigned nents = sg_count(areq->src, areq->cryptlen);
unsigned total;
- dma_addr_t iv_addr;
+ unsigned int src_nents, dst_nents;
struct scatterlist *cur;
- int i, dst_ents, src_ents, assoc_ents;
- u8 *iv = giv ? giv : areq->iv;
+ int i, dst_ents, src_ents;
+
+ total = areq->assoclen + areq->cryptlen;
+ if (req->is_encrypt)
+ total += crypto_aead_authsize(aead);
+
+ src_nents = sg_count(areq->src, total);
+ if (src_nents + 1 > MAX_DDT_LEN)
+ return -E2BIG;
+
+ dst_nents = 0;
+ if (areq->src != areq->dst) {
+ dst_nents = sg_count(areq->dst, total);
+ if (src_nents + 1 > MAX_DDT_LEN)
+ return -E2BIG;
+ }
src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
if (!src_ddt)
- return -ENOMEM;
+ goto err;
dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
- if (!dst_ddt) {
- dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
- return -ENOMEM;
- }
+ if (!dst_ddt)
+ goto err_free_src;
req->src_ddt = src_ddt;
req->dst_ddt = dst_ddt;
- assoc_ents = dma_map_sg(engine->dev, areq->assoc,
- sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
- if (areq->src != areq->dst) {
- src_ents = dma_map_sg(engine->dev, areq->src, nents,
+ if (dst_nents) {
+ src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
DMA_TO_DEVICE);
- dst_ents = dma_map_sg(engine->dev, areq->dst, nents,
+ if (!src_ents)
+ goto err_free_dst;
+
+ dst_ents = dma_map_sg(engine->dev, areq->dst, dst_nents,
DMA_FROM_DEVICE);
+
+ if (!dst_ents) {
+ dma_unmap_sg(engine->dev, areq->src, src_nents,
+ DMA_TO_DEVICE);
+ goto err_free_dst;
+ }
} else {
- src_ents = dma_map_sg(engine->dev, areq->src, nents,
+ src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
DMA_BIDIRECTIONAL);
- dst_ents = 0;
+ if (!src_ents)
+ goto err_free_dst;
+ dst_ents = src_ents;
}
/*
- * Map the IV/GIV. For the GIV it needs to be bidirectional as it is
- * formed by the crypto block and sent as the ESP IV for IPSEC.
+ * Now map in the payload for the source and destination and terminate
+ * with the NULL pointers.
*/
- iv_addr = dma_map_single(engine->dev, iv, ivsize,
- giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
- req->giv_pa = iv_addr;
+ for_each_sg(areq->src, cur, src_ents, i)
+ ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
- /*
- * Map the associated data. For decryption we don't copy the
- * associated data.
- */
- total = areq->assoclen;
- for_each_sg(areq->assoc, cur, assoc_ents, i) {
+ /* For decryption we need to skip the associated data. */
+ total = req->is_encrypt ? 0 : areq->assoclen;
+ for_each_sg(areq->dst, cur, dst_ents, i) {
unsigned len = sg_dma_len(cur);
- if (len > total)
- len = total;
-
- total -= len;
+ if (len <= total) {
+ total -= len;
+ continue;
+ }
- ddt_set(src_ddt++, sg_dma_address(cur), len);
- if (req->is_encrypt)
- ddt_set(dst_ddt++, sg_dma_address(cur), len);
+ ddt_set(dst_ddt++, sg_dma_address(cur) + total, len - total);
}
- ddt_set(src_ddt++, iv_addr, ivsize);
-
- if (giv || req->is_encrypt)
- ddt_set(dst_ddt++, iv_addr, ivsize);
-
- /*
- * Now map in the payload for the source and destination and terminate
- * with the NULL pointers.
- */
- for_each_sg(areq->src, cur, src_ents, i) {
- ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
- if (areq->src == areq->dst)
- ddt_set(dst_ddt++, sg_dma_address(cur),
- sg_dma_len(cur));
- }
-
- for_each_sg(areq->dst, cur, dst_ents, i)
- ddt_set(dst_ddt++, sg_dma_address(cur),
- sg_dma_len(cur));
ddt_set(src_ddt, 0, 0);
ddt_set(dst_ddt, 0, 0);
return 0;
+
+err_free_dst:
+ dma_pool_free(engine->req_pool, dst_ddt, req->dst_addr);
+err_free_src:
+ dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
+err:
+ return -ENOMEM;
}
static void spacc_aead_free_ddts(struct spacc_req *req)
{
struct aead_request *areq = container_of(req->req, struct aead_request,
base);
- struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg);
- struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm);
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ unsigned total = areq->assoclen + areq->cryptlen +
+ (req->is_encrypt ? crypto_aead_authsize(aead) : 0);
+ struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead);
struct spacc_engine *engine = aead_ctx->generic.engine;
- unsigned ivsize = alg->alg.cra_aead.ivsize;
- unsigned nents = sg_count(areq->src, areq->cryptlen);
+ unsigned nents = sg_count(areq->src, total);
if (areq->src != areq->dst) {
dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
dma_unmap_sg(engine->dev, areq->dst,
- sg_count(areq->dst, areq->cryptlen),
+ sg_count(areq->dst, total),
DMA_FROM_DEVICE);
} else
dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
- dma_unmap_sg(engine->dev, areq->assoc,
- sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
-
- dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL);
-
dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
}
@@ -438,65 +446,22 @@ static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
}
-/*
- * Set key for a DES operation in an AEAD cipher. This also performs weak key
- * checking if required.
- */
-static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key,
- unsigned int len)
-{
- struct crypto_tfm *tfm = crypto_aead_tfm(aead);
- struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 tmp[DES_EXPKEY_WORDS];
-
- if (unlikely(!des_ekey(tmp, key)) &&
- (crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) {
- tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
- return -EINVAL;
- }
-
- memcpy(ctx->cipher_key, key, len);
- ctx->cipher_key_len = len;
-
- return 0;
-}
-
-/* Set the key for the AES block cipher component of the AEAD transform. */
-static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key,
- unsigned int len)
-{
- struct crypto_tfm *tfm = crypto_aead_tfm(aead);
- struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-
- /*
- * IPSec engine only supports 128 and 256 bit AES keys. If we get a
- * request for any other size (192 bits) then we need to do a software
- * fallback.
- */
- if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
- /*
- * Set the fallback transform to use the same request flags as
- * the hardware transform.
- */
- ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- ctx->sw_cipher->base.crt_flags |=
- tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
- return crypto_aead_setkey(ctx->sw_cipher, key, len);
- }
-
- memcpy(ctx->cipher_key, key, len);
- ctx->cipher_key_len = len;
-
- return 0;
-}
-
static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
struct crypto_authenc_keys keys;
- int err = -EINVAL;
+ int err;
+
+ crypto_aead_clear_flags(ctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_aead_setkey(ctx->sw_cipher, key, keylen);
+ crypto_aead_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
+ crypto_aead_set_flags(tfm, crypto_aead_get_flags(ctx->sw_cipher) &
+ CRYPTO_TFM_RES_MASK);
+ if (err)
+ return err;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
@@ -507,14 +472,8 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
if (keys.authkeylen > sizeof(ctx->hash_ctx))
goto badkey;
- if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
- SPA_CTRL_CIPH_ALG_AES)
- err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen);
- else
- err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen);
-
- if (err)
- goto badkey;
+ memcpy(ctx->cipher_key, keys.enckey, keys.enckeylen);
+ ctx->cipher_key_len = keys.enckeylen;
memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
ctx->hash_key_len = keys.authkeylen;
@@ -531,9 +490,7 @@ static int spacc_aead_setauthsize(struct crypto_aead *tfm,
{
struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
- ctx->auth_size = authsize;
-
- return 0;
+ return crypto_aead_setauthsize(ctx->sw_cipher, authsize);
}
/*
@@ -541,15 +498,13 @@ static int spacc_aead_setauthsize(struct crypto_aead *tfm,
* be completed in hardware because the hardware may not support certain key
* sizes. In these cases we need to complete the request in software.
*/
-static int spacc_aead_need_fallback(struct spacc_req *req)
+static int spacc_aead_need_fallback(struct aead_request *aead_req)
{
- struct aead_request *aead_req;
- struct crypto_tfm *tfm = req->req->tfm;
- struct crypto_alg *alg = req->req->tfm->__crt_alg;
- struct spacc_alg *spacc_alg = to_spacc_alg(alg);
- struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct spacc_aead *spacc_alg = to_spacc_aead(alg);
+ struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
- aead_req = container_of(req->req, struct aead_request, base);
/*
* If we have a non-supported key-length, then we need to do a
* software fallback.
@@ -568,22 +523,17 @@ static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
{
struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
- int err;
+ struct aead_request *subreq = aead_request_ctx(req);
- if (ctx->sw_cipher) {
- /*
- * Change the request to use the software fallback transform,
- * and once the ciphering has completed, put the old transform
- * back into the request.
- */
- aead_request_set_tfm(req, ctx->sw_cipher);
- err = is_encrypt ? crypto_aead_encrypt(req) :
- crypto_aead_decrypt(req);
- aead_request_set_tfm(req, __crypto_aead_cast(old_tfm));
- } else
- err = -EINVAL;
+ aead_request_set_tfm(subreq, ctx->sw_cipher);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
- return err;
+ return is_encrypt ? crypto_aead_encrypt(subreq) :
+ crypto_aead_decrypt(subreq);
}
static void spacc_aead_complete(struct spacc_req *req)
@@ -594,18 +544,19 @@ static void spacc_aead_complete(struct spacc_req *req)
static int spacc_aead_submit(struct spacc_req *req)
{
- struct crypto_tfm *tfm = req->req->tfm;
- struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_alg *alg = req->req->tfm->__crt_alg;
- struct spacc_alg *spacc_alg = to_spacc_alg(alg);
- struct spacc_engine *engine = ctx->generic.engine;
- u32 ctrl, proc_len, assoc_len;
struct aead_request *aead_req =
container_of(req->req, struct aead_request, base);
+ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ unsigned int authsize = crypto_aead_authsize(aead);
+ struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct spacc_aead *spacc_alg = to_spacc_aead(alg);
+ struct spacc_engine *engine = ctx->generic.engine;
+ u32 ctrl, proc_len, assoc_len;
req->result = -EINPROGRESS;
req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
- ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize,
+ ctx->cipher_key_len, aead_req->iv, crypto_aead_ivsize(aead),
ctx->hash_ctx, ctx->hash_key_len);
/* Set the source and destination DDT pointers. */
@@ -617,25 +568,15 @@ static int spacc_aead_submit(struct spacc_req *req)
proc_len = aead_req->cryptlen + assoc_len;
/*
- * If we aren't generating an IV, then we need to include the IV in the
- * associated data so that it is included in the hash.
- */
- if (!req->giv) {
- assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
- proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
- } else
- proc_len += req->giv_len;
-
- /*
* If we are decrypting, we need to take the length of the ICV out of
* the processing length.
*/
if (!req->is_encrypt)
- proc_len -= ctx->auth_size;
+ proc_len -= authsize;
writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
- writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET);
+ writel(authsize, engine->regs + SPA_ICV_LEN_REG_OFFSET);
writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
@@ -674,32 +615,29 @@ static void spacc_push(struct spacc_engine *engine)
/*
* Setup an AEAD request for processing. This will configure the engine, load
* the context and then start the packet processing.
- *
- * @giv Pointer to destination address for a generated IV. If the
- * request does not need to generate an IV then this should be set to NULL.
*/
-static int spacc_aead_setup(struct aead_request *req, u8 *giv,
+static int spacc_aead_setup(struct aead_request *req,
unsigned alg_type, bool is_encrypt)
{
- struct crypto_alg *alg = req->base.tfm->__crt_alg;
- struct spacc_engine *engine = to_spacc_alg(alg)->engine;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct spacc_engine *engine = to_spacc_aead(alg)->engine;
struct spacc_req *dev_req = aead_request_ctx(req);
- int err = -EINPROGRESS;
+ int err;
unsigned long flags;
- unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
- dev_req->giv = giv;
- dev_req->giv_len = ivsize;
dev_req->req = &req->base;
dev_req->is_encrypt = is_encrypt;
dev_req->result = -EBUSY;
dev_req->engine = engine;
dev_req->complete = spacc_aead_complete;
- if (unlikely(spacc_aead_need_fallback(dev_req)))
+ if (unlikely(spacc_aead_need_fallback(req) ||
+ ((err = spacc_aead_make_ddts(req)) == -E2BIG)))
return spacc_aead_do_fallback(req, alg_type, is_encrypt);
- spacc_aead_make_ddts(dev_req, dev_req->giv);
+ if (err)
+ goto out;
err = -EINPROGRESS;
spin_lock_irqsave(&engine->hw_lock, flags);
@@ -728,70 +666,44 @@ out:
static int spacc_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_tfm *tfm = crypto_aead_tfm(aead);
- struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+ struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead));
- return spacc_aead_setup(req, NULL, alg->type, 1);
-}
-
-static int spacc_aead_givencrypt(struct aead_givcrypt_request *req)
-{
- struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
- struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- size_t ivsize = crypto_aead_ivsize(tfm);
- struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
- unsigned len;
- __be64 seq;
-
- memcpy(req->areq.iv, ctx->salt, ivsize);
- len = ivsize;
- if (ivsize > sizeof(u64)) {
- memset(req->giv, 0, ivsize - sizeof(u64));
- len = sizeof(u64);
- }
- seq = cpu_to_be64(req->seq);
- memcpy(req->giv + ivsize - len, &seq, len);
-
- return spacc_aead_setup(&req->areq, req->giv, alg->type, 1);
+ return spacc_aead_setup(req, alg->type, 1);
}
static int spacc_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_tfm *tfm = crypto_aead_tfm(aead);
- struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+ struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead));
- return spacc_aead_setup(req, NULL, alg->type, 0);
+ return spacc_aead_setup(req, alg->type, 0);
}
/*
* Initialise a new AEAD context. This is responsible for allocating the
* fallback cipher and initialising the context.
*/
-static int spacc_aead_cra_init(struct crypto_tfm *tfm)
+static int spacc_aead_cra_init(struct crypto_aead *tfm)
{
- struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_alg *alg = tfm->__crt_alg;
- struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+ struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct spacc_aead *spacc_alg = to_spacc_aead(alg);
struct spacc_engine *engine = spacc_alg->engine;
ctx->generic.flags = spacc_alg->type;
ctx->generic.engine = engine;
- ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0,
- CRYPTO_ALG_ASYNC |
+ ctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ctx->sw_cipher)) {
- dev_warn(engine->dev, "failed to allocate fallback for %s\n",
- alg->cra_name);
- ctx->sw_cipher = NULL;
- }
+ if (IS_ERR(ctx->sw_cipher))
+ return PTR_ERR(ctx->sw_cipher);
ctx->generic.key_offs = spacc_alg->key_offs;
ctx->generic.iv_offs = spacc_alg->iv_offs;
- get_random_bytes(ctx->salt, sizeof(ctx->salt));
-
- crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
- sizeof(struct spacc_req));
+ crypto_aead_set_reqsize(
+ tfm,
+ max(sizeof(struct spacc_req),
+ sizeof(struct aead_request) +
+ crypto_aead_reqsize(ctx->sw_cipher)));
return 0;
}
@@ -800,13 +712,11 @@ static int spacc_aead_cra_init(struct crypto_tfm *tfm)
* Destructor for an AEAD context. This is called when the transform is freed
* and must free the fallback cipher.
*/
-static void spacc_aead_cra_exit(struct crypto_tfm *tfm)
+static void spacc_aead_cra_exit(struct crypto_aead *tfm)
{
- struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- if (ctx->sw_cipher)
- crypto_free_aead(ctx->sw_cipher);
- ctx->sw_cipher = NULL;
+ crypto_free_aead(ctx->sw_cipher);
}
/*
@@ -1458,180 +1368,188 @@ static struct spacc_alg ipsec_engine_algs[] = {
.cra_exit = spacc_ablk_cra_exit,
},
},
+};
+
+static struct spacc_aead ipsec_engine_aeads[] = {
{
- .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
- SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+ SPA_CTRL_CIPH_MODE_CBC |
+ SPA_CTRL_HASH_ALG_SHA |
+ SPA_CTRL_HASH_MODE_HMAC,
.key_offs = 0,
.iv_offs = AES_MAX_KEY_SIZE,
.alg = {
- .cra_name = "authenc(hmac(sha1),cbc(aes))",
- .cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_aead = {
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .givencrypt = spacc_aead_givencrypt,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-aes-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_module = THIS_MODULE,
},
- .cra_init = spacc_aead_cra_init,
- .cra_exit = spacc_aead_cra_exit,
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .init = spacc_aead_cra_init,
+ .exit = spacc_aead_cra_exit,
},
},
{
- .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
+ .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+ SPA_CTRL_CIPH_MODE_CBC |
SPA_CTRL_HASH_ALG_SHA256 |
SPA_CTRL_HASH_MODE_HMAC,
.key_offs = 0,
.iv_offs = AES_MAX_KEY_SIZE,
.alg = {
- .cra_name = "authenc(hmac(sha256),cbc(aes))",
- .cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_aead = {
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .givencrypt = spacc_aead_givencrypt,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-aes-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_module = THIS_MODULE,
},
- .cra_init = spacc_aead_cra_init,
- .cra_exit = spacc_aead_cra_exit,
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .init = spacc_aead_cra_init,
+ .exit = spacc_aead_cra_exit,
},
},
{
.key_offs = 0,
.iv_offs = AES_MAX_KEY_SIZE,
- .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
- SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+ SPA_CTRL_CIPH_MODE_CBC |
+ SPA_CTRL_HASH_ALG_MD5 |
+ SPA_CTRL_HASH_MODE_HMAC,
.alg = {
- .cra_name = "authenc(hmac(md5),cbc(aes))",
- .cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_aead = {
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .givencrypt = spacc_aead_givencrypt,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-aes-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_module = THIS_MODULE,
},
- .cra_init = spacc_aead_cra_init,
- .cra_exit = spacc_aead_cra_exit,
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .init = spacc_aead_cra_init,
+ .exit = spacc_aead_cra_exit,
},
},
{
.key_offs = DES_BLOCK_SIZE,
.iv_offs = 0,
- .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
- SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_DES |
+ SPA_CTRL_CIPH_MODE_CBC |
+ SPA_CTRL_HASH_ALG_SHA |
+ SPA_CTRL_HASH_MODE_HMAC,
.alg = {
- .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_aead = {
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .givencrypt = spacc_aead_givencrypt,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-3des-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_module = THIS_MODULE,
},
- .cra_init = spacc_aead_cra_init,
- .cra_exit = spacc_aead_cra_exit,
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .init = spacc_aead_cra_init,
+ .exit = spacc_aead_cra_exit,
},
},
{
.key_offs = DES_BLOCK_SIZE,
.iv_offs = 0,
- .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
+ .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+ SPA_CTRL_CIPH_MODE_CBC |
SPA_CTRL_HASH_ALG_SHA256 |
SPA_CTRL_HASH_MODE_HMAC,
.alg = {
- .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_aead = {
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .givencrypt = spacc_aead_givencrypt,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-3des-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_module = THIS_MODULE,
},
- .cra_init = spacc_aead_cra_init,
- .cra_exit = spacc_aead_cra_exit,
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .init = spacc_aead_cra_init,
+ .exit = spacc_aead_cra_exit,
},
},
{
.key_offs = DES_BLOCK_SIZE,
.iv_offs = 0,
- .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
- SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_DES |
+ SPA_CTRL_CIPH_MODE_CBC |
+ SPA_CTRL_HASH_ALG_MD5 |
+ SPA_CTRL_HASH_MODE_HMAC,
.alg = {
- .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_aead = {
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .givencrypt = spacc_aead_givencrypt,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-3des-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_module = THIS_MODULE,
},
- .cra_init = spacc_aead_cra_init,
- .cra_exit = spacc_aead_cra_exit,
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .init = spacc_aead_cra_init,
+ .exit = spacc_aead_cra_exit,
},
},
};
@@ -1707,6 +1625,8 @@ static int spacc_probe(struct platform_device *pdev)
engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ;
engine->algs = ipsec_engine_algs;
engine->num_algs = ARRAY_SIZE(ipsec_engine_algs);
+ engine->aeads = ipsec_engine_aeads;
+ engine->num_aeads = ARRAY_SIZE(ipsec_engine_aeads);
} else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) {
engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS;
engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ;
@@ -1815,17 +1735,40 @@ static int spacc_probe(struct platform_device *pdev)
engine->algs[i].alg.cra_name);
}
+ INIT_LIST_HEAD(&engine->registered_aeads);
+ for (i = 0; i < engine->num_aeads; ++i) {
+ engine->aeads[i].engine = engine;
+ err = crypto_register_aead(&engine->aeads[i].alg);
+ if (!err) {
+ list_add_tail(&engine->aeads[i].entry,
+ &engine->registered_aeads);
+ ret = 0;
+ }
+ if (err)
+ dev_err(engine->dev, "failed to register alg \"%s\"\n",
+ engine->aeads[i].alg.base.cra_name);
+ else
+ dev_dbg(engine->dev, "registered alg \"%s\"\n",
+ engine->aeads[i].alg.base.cra_name);
+ }
+
return ret;
}
static int spacc_remove(struct platform_device *pdev)
{
+ struct spacc_aead *aead, *an;
struct spacc_alg *alg, *next;
struct spacc_engine *engine = platform_get_drvdata(pdev);
del_timer_sync(&engine->packet_timeout);
device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+ list_for_each_entry_safe(aead, an, &engine->registered_aeads, entry) {
+ list_del(&aead->entry);
+ crypto_unregister_aead(&aead->alg);
+ }
+
list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
list_del(&alg->entry);
crypto_unregister_alg(&alg->alg);
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
index 6fdb9e8b22a7..eefccf7b8be7 100644
--- a/drivers/crypto/qat/Kconfig
+++ b/drivers/crypto/qat/Kconfig
@@ -3,11 +3,13 @@ config CRYPTO_DEV_QAT
select CRYPTO_AEAD
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
+ select CRYPTO_AKCIPHER
select CRYPTO_HMAC
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
select FW_LOADER
+ select ASN1
config CRYPTO_DEV_QAT_DH895xCC
tristate "Support for Intel(R) DH895xCC"
@@ -19,3 +21,16 @@ config CRYPTO_DEV_QAT_DH895xCC
To compile this as a module, choose M here: the module
will be called qat_dh895xcc.
+
+config CRYPTO_DEV_QAT_DH895xCCVF
+ tristate "Support for Intel(R) DH895xCC Virtual Function"
+ depends on X86 && PCI
+ select PCI_IOV
+ select CRYPTO_DEV_QAT
+
+ help
+ Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
+ Virtual Function for accelerating crypto and compression workloads.
+
+ To compile this as a module, choose M here: the module
+ will be called qat_dh895xccvf.
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
index d11481be225e..a3ce0b70e32f 100644
--- a/drivers/crypto/qat/Makefile
+++ b/drivers/crypto/qat/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
diff --git a/drivers/crypto/qat/qat_common/.gitignore b/drivers/crypto/qat/qat_common/.gitignore
new file mode 100644
index 000000000000..ee328374dba8
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/.gitignore
@@ -0,0 +1 @@
+*-asn1.[ch]
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index e0424dc382fe..df20a9de1c58 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -1,3 +1,6 @@
+$(obj)/qat_rsakey-asn1.o: $(obj)/qat_rsakey-asn1.c $(obj)/qat_rsakey-asn1.h
+clean-files += qat_rsakey-asn1.c qat_rsakey-asn1.h
+
obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
intel_qat-objs := adf_cfg.o \
adf_ctl_drv.o \
@@ -6,9 +9,14 @@ intel_qat-objs := adf_cfg.o \
adf_accel_engine.o \
adf_aer.o \
adf_transport.o \
+ adf_admin.o \
+ adf_hw_arbiter.o \
qat_crypto.o \
qat_algs.o \
+ qat_rsakey-asn1.o \
+ qat_asym_algs.o \
qat_uclo.o \
qat_hal.o
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
+intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 5fe902967620..ca853d50b4b7 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -46,13 +46,17 @@
*/
#ifndef ADF_ACCEL_DEVICES_H_
#define ADF_ACCEL_DEVICES_H_
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/io.h>
+#include <linux/ratelimit.h>
#include "adf_cfg_common.h"
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
+#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
+#define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443
#define ADF_PCI_MAX_BARS 3
#define ADF_DEVICE_NAME_LENGTH 32
#define ADF_ETR_MAX_RINGS_PER_BANK 16
@@ -79,6 +83,7 @@ struct adf_bar {
struct adf_accel_msix {
struct msix_entry *entries;
char **names;
+ u32 num_entries;
} __packed;
struct adf_accel_pci {
@@ -99,6 +104,7 @@ enum dev_sku_info {
DEV_SKU_2,
DEV_SKU_3,
DEV_SKU_4,
+ DEV_SKU_VF,
DEV_SKU_UNKNOWN,
};
@@ -113,6 +119,8 @@ static inline const char *get_sku_info(enum dev_sku_info info)
return "SKU3";
case DEV_SKU_4:
return "SKU4";
+ case DEV_SKU_VF:
+ return "SKUVF";
case DEV_SKU_UNKNOWN:
default:
break;
@@ -135,23 +143,29 @@ struct adf_hw_device_data {
struct adf_hw_device_class *dev_class;
uint32_t (*get_accel_mask)(uint32_t fuse);
uint32_t (*get_ae_mask)(uint32_t fuse);
+ uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self);
uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self);
uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
+ uint32_t (*get_pf2vf_offset)(uint32_t i);
+ uint32_t (*get_vintmsk_offset)(uint32_t i);
enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
- void (*hw_arb_ring_enable)(struct adf_etr_ring_data *ring);
- void (*hw_arb_ring_disable)(struct adf_etr_ring_data *ring);
int (*alloc_irq)(struct adf_accel_dev *accel_dev);
void (*free_irq)(struct adf_accel_dev *accel_dev);
void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
+ int (*send_admin_init)(struct adf_accel_dev *accel_dev);
int (*init_arb)(struct adf_accel_dev *accel_dev);
void (*exit_arb)(struct adf_accel_dev *accel_dev);
+ void (*get_arb_mapping)(struct adf_accel_dev *accel_dev,
+ const uint32_t **cfg);
+ void (*disable_iov)(struct adf_accel_dev *accel_dev);
void (*enable_ints)(struct adf_accel_dev *accel_dev);
+ int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
const char *fw_name;
- uint32_t pci_dev_id;
+ const char *fw_mmp_name;
uint32_t fuses;
uint32_t accel_capabilities_mask;
uint16_t accel_mask;
@@ -163,6 +177,7 @@ struct adf_hw_device_data {
uint8_t num_accel;
uint8_t num_logical_accel;
uint8_t num_engines;
+ uint8_t min_iov_compat_ver;
} __packed;
/* CSR write macro */
@@ -184,6 +199,16 @@ struct icp_qat_fw_loader_handle;
struct adf_fw_loader_data {
struct icp_qat_fw_loader_handle *fw_loader;
const struct firmware *uof_fw;
+ const struct firmware *mmp_fw;
+};
+
+struct adf_accel_vf_info {
+ struct adf_accel_dev *accel_dev;
+ struct tasklet_struct vf2pf_bh_tasklet;
+ struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
+ struct ratelimit_state vf2pf_ratelimit;
+ u32 vf_nr;
+ bool init;
};
struct adf_accel_dev {
@@ -199,6 +224,21 @@ struct adf_accel_dev {
struct list_head list;
struct module *owner;
struct adf_accel_pci accel_pci_dev;
+ union {
+ struct {
+ /* vf_info is non-zero when SR-IOV is init'ed */
+ struct adf_accel_vf_info *vf_info;
+ } pf;
+ struct {
+ char *irq_name;
+ struct tasklet_struct pf2vf_bh_tasklet;
+ struct mutex vf2pf_lock; /* protect CSR access */
+ struct completion iov_msg_completion;
+ uint8_t compatible;
+ uint8_t pf_version;
+ } vf;
+ };
+ bool is_vf;
uint8_t accel_id;
} __packed;
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c
index fdda8e7ae302..20b08bdcb146 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_engine.c
+++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c
@@ -55,24 +55,36 @@ int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
- void *uof_addr;
- uint32_t uof_size;
+ void *uof_addr, *mmp_addr;
+ u32 uof_size, mmp_size;
+ if (!hw_device->fw_name)
+ return 0;
+
+ if (request_firmware(&loader_data->mmp_fw, hw_device->fw_mmp_name,
+ &accel_dev->accel_pci_dev.pci_dev->dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load MMP firmware %s\n",
+ hw_device->fw_mmp_name);
+ return -EFAULT;
+ }
if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
&accel_dev->accel_pci_dev.pci_dev->dev)) {
- dev_err(&GET_DEV(accel_dev), "Failed to load firmware %s\n",
+ dev_err(&GET_DEV(accel_dev), "Failed to load UOF firmware %s\n",
hw_device->fw_name);
- return -EFAULT;
+ goto out_err;
}
uof_size = loader_data->uof_fw->size;
uof_addr = (void *)loader_data->uof_fw->data;
+ mmp_size = loader_data->mmp_fw->size;
+ mmp_addr = (void *)loader_data->mmp_fw->data;
+ qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size);
if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) {
dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n");
goto out_err;
}
if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
- dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n");
+ dev_err(&GET_DEV(accel_dev), "Failed to load UOF\n");
goto out_err;
}
return 0;
@@ -85,11 +97,17 @@ out_err:
void adf_ae_fw_release(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+ if (!hw_device->fw_name)
+ return;
qat_uclo_del_uof_obj(loader_data->fw_loader);
qat_hal_deinit(loader_data->fw_loader);
release_firmware(loader_data->uof_fw);
+ release_firmware(loader_data->mmp_fw);
loader_data->uof_fw = NULL;
+ loader_data->mmp_fw = NULL;
loader_data->fw_loader = NULL;
}
@@ -99,6 +117,9 @@ int adf_ae_start(struct adf_accel_dev *accel_dev)
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+ if (!hw_data->fw_name)
+ return 0;
+
for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
if (hw_data->ae_mask & (1 << ae)) {
qat_hal_start(loader_data->fw_loader, ae, 0xFF);
@@ -117,6 +138,9 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev)
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+ if (!hw_data->fw_name)
+ return 0;
+
for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
if (hw_data->ae_mask & (1 << ae)) {
qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
@@ -143,6 +167,10 @@ static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
int adf_ae_init(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+ if (!hw_device->fw_name)
+ return 0;
loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
if (!loader_data)
@@ -166,6 +194,10 @@ int adf_ae_init(struct adf_accel_dev *accel_dev)
int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+ if (!hw_device->fw_name)
+ return 0;
qat_hal_deinit(loader_data->fw_loader);
kfree(accel_dev->fw_loader);
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
new file mode 100644
index 000000000000..147d755fed97
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -0,0 +1,290 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_init_admin.h"
+
+/* Admin Messages Registers */
+#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
+#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
+#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
+#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
+#define ADF_ADMINMSG_LEN 32
+
+static const u8 const_tab[1024] = {
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13,
+0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76,
+0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab,
+0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0,
+0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e,
+0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39,
+0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe,
+0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae,
+0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f,
+0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
+0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
+0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff,
+0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c,
+0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f,
+0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb,
+0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
+0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52,
+0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
+0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13,
+0x7e, 0x21, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+struct adf_admin_comms {
+ dma_addr_t phy_addr;
+ dma_addr_t const_tbl_addr;
+ void *virt_addr;
+ void __iomem *mailbox_addr;
+ struct mutex lock; /* protects adf_admin_comms struct */
+};
+
+static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
+ void *in, void *out)
+{
+ struct adf_admin_comms *admin = accel_dev->admin;
+ int offset = ae * ADF_ADMINMSG_LEN * 2;
+ void __iomem *mailbox = admin->mailbox_addr;
+ int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
+ int times, received;
+
+ mutex_lock(&admin->lock);
+
+ if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
+ mutex_unlock(&admin->lock);
+ return -EAGAIN;
+ }
+
+ memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
+ ADF_CSR_WR(mailbox, mb_offset, 1);
+ received = 0;
+ for (times = 0; times < 50; times++) {
+ msleep(20);
+ if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
+ received = 1;
+ break;
+ }
+ }
+ if (received)
+ memcpy(out, admin->virt_addr + offset +
+ ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
+ else
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send admin msg to accelerator\n");
+
+ mutex_unlock(&admin->lock);
+ return received ? 0 : -EFAULT;
+}
+
+static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ struct icp_qat_fw_init_admin_req req;
+ struct icp_qat_fw_init_admin_resp resp;
+ int i;
+
+ memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
+ req.init_admin_cmd_id = cmd;
+
+ if (cmd == ICP_QAT_FW_CONSTANTS_CFG) {
+ req.init_cfg_sz = 1024;
+ req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
+ }
+ for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+ memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
+ if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
+ resp.init_resp_hdr.status)
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * adf_send_admin_init() - Function sends init message to FW
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function sends admin init message to the FW
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_admin_init(struct adf_accel_dev *accel_dev)
+{
+ int ret = adf_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
+
+ if (ret)
+ return ret;
+ return adf_send_admin_cmd(accel_dev, ICP_QAT_FW_CONSTANTS_CFG);
+}
+EXPORT_SYMBOL_GPL(adf_send_admin_init);
+
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
+{
+ struct adf_admin_comms *admin;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *pmisc =
+ &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+ void __iomem *csr = pmisc->virt_addr;
+ void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
+ u64 reg_val;
+
+ admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
+ if (!admin)
+ return -ENOMEM;
+ admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ &admin->phy_addr, GFP_KERNEL);
+ if (!admin->virt_addr) {
+ dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
+ kfree(admin);
+ return -ENOMEM;
+ }
+
+ admin->const_tbl_addr = dma_map_single(&GET_DEV(accel_dev),
+ (void *) const_tab, 1024,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&GET_DEV(accel_dev),
+ admin->const_tbl_addr))) {
+ dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ admin->virt_addr, admin->phy_addr);
+ kfree(admin);
+ return -ENOMEM;
+ }
+ reg_val = (u64)admin->phy_addr;
+ ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
+ ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
+ mutex_init(&admin->lock);
+ admin->mailbox_addr = mailbox;
+ accel_dev->admin = admin;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_init_admin_comms);
+
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
+{
+ struct adf_admin_comms *admin = accel_dev->admin;
+
+ if (!admin)
+ return;
+
+ if (admin->virt_addr)
+ dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ admin->virt_addr, admin->phy_addr);
+
+ dma_unmap_single(&GET_DEV(accel_dev), admin->const_tbl_addr, 1024,
+ DMA_TO_DEVICE);
+ mutex_destroy(&admin->lock);
+ kfree(admin);
+ accel_dev->admin = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_exit_admin_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index 2dbc733b8ab2..a57b4194de28 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -91,6 +91,9 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev)
dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
accel_dev->accel_id);
+ if (!parent)
+ parent = pdev;
+
if (!pci_wait_for_pending_transaction(pdev))
dev_info(&GET_DEV(accel_dev),
"Transaction still in progress. Proceeding\n");
@@ -206,7 +209,7 @@ static struct pci_error_handlers adf_err_handler = {
* QAT acceleration device accel_dev.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf)
{
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index ab65bc274561..d0879790561f 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -123,7 +123,7 @@ static const struct file_operations qat_dev_cfg_fops = {
* The table stores device specific config values.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
{
@@ -178,6 +178,9 @@ void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
{
struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+ if (!dev_cfg_data)
+ return;
+
down_write(&dev_cfg_data->lock);
adf_cfg_section_del_all(&dev_cfg_data->sec_list);
up_write(&dev_cfg_data->lock);
@@ -276,7 +279,7 @@ static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev,
* in the given acceleration device
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
const char *section_name,
@@ -327,7 +330,7 @@ EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param);
* will be stored.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
{
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h
index 88b82187ac35..c697fb1cdfb5 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_common.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h
@@ -60,7 +60,7 @@
#define ADF_CFG_NO_DEVICE 0xFF
#define ADF_CFG_AFFINITY_WHATEVER 0xFF
#define MAX_DEVICE_NAME_SIZE 32
-#define ADF_MAX_DEVICES 32
+#define ADF_MAX_DEVICES (32 * 32)
enum adf_cfg_val_type {
ADF_DEC,
@@ -71,6 +71,7 @@ enum adf_cfg_val_type {
enum adf_device_type {
DEV_UNKNOWN = 0,
DEV_DH895XCC,
+ DEV_DH895XCCVF,
};
struct adf_dev_status_info {
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 27e16c09230b..7836dffc3d47 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -54,8 +54,8 @@
#include "icp_qat_hal.h"
#define ADF_MAJOR_VERSION 0
-#define ADF_MINOR_VERSION 1
-#define ADF_BUILD_VERSION 3
+#define ADF_MINOR_VERSION 2
+#define ADF_BUILD_VERSION 0
#define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \
__stringify(ADF_MINOR_VERSION) "." \
__stringify(ADF_BUILD_VERSION)
@@ -91,9 +91,13 @@ struct service_hndl {
unsigned long start_status;
char *name;
struct list_head list;
- int admin;
};
+static inline int get_current_node(void)
+{
+ return topology_physical_package_id(smp_processor_id());
+}
+
int adf_service_register(struct service_hndl *service);
int adf_service_unregister(struct service_hndl *service);
@@ -102,13 +106,24 @@ int adf_dev_start(struct adf_accel_dev *accel_dev);
int adf_dev_stop(struct adf_accel_dev *accel_dev);
void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info);
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
+void adf_clean_vf_map(bool);
+
int adf_ctl_dev_register(void);
void adf_ctl_dev_unregister(void);
int adf_processes_dev_register(void);
void adf_processes_dev_unregister(void);
-int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev);
-void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev);
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+ struct adf_accel_dev *pf);
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+ struct adf_accel_dev *pf);
struct list_head *adf_devmgr_get_head(void);
struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id);
struct adf_accel_dev *adf_devmgr_get_first(void);
@@ -130,6 +145,12 @@ int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
void adf_disable_aer(struct adf_accel_dev *accel_dev);
int adf_init_aer(void);
void adf_exit_aer(void);
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
+int adf_send_admin_init(struct adf_accel_dev *accel_dev);
+int adf_init_arb(struct adf_accel_dev *accel_dev);
+void adf_exit_arb(struct adf_accel_dev *accel_dev);
+void adf_update_ring_arb(struct adf_etr_ring_data *ring);
int adf_dev_get(struct adf_accel_dev *accel_dev);
void adf_dev_put(struct adf_accel_dev *accel_dev);
@@ -141,10 +162,13 @@ int qat_crypto_unregister(void);
struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
void qat_crypto_put_instance(struct qat_crypto_instance *inst);
void qat_alg_callback(void *resp);
+void qat_alg_asym_callback(void *resp);
int qat_algs_init(void);
void qat_algs_exit(void);
int qat_algs_register(void);
int qat_algs_unregister(void);
+int qat_asym_algs_register(void);
+void qat_asym_algs_unregister(void);
int qat_hal_init(struct adf_accel_dev *accel_dev);
void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
@@ -196,4 +220,23 @@ int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle);
int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size);
+void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
+ void *addr_ptr, int mem_size);
+#if defined(CONFIG_PCI_IOV)
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
+void adf_disable_sriov(struct adf_accel_dev *accel_dev);
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+ uint32_t vf_mask);
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+ uint32_t vf_mask);
+#else
+static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+ return 0;
+}
+
+static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index e056b9e9bf8a..cd8a12af8ec5 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -398,10 +398,9 @@ static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
}
accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
- if (!accel_dev) {
- pr_err("QAT: Device %d not found\n", dev_info.accel_id);
+ if (!accel_dev)
return -ENODEV;
- }
+
hw_data = accel_dev->hw_device;
dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
dev_info.num_ae = hw_data->get_num_aes(hw_data);
@@ -495,6 +494,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
adf_exit_aer();
qat_crypto_unregister();
qat_algs_exit();
+ adf_clean_vf_map(false);
mutex_destroy(&adf_ctl_lock);
}
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
index 3f0ff9e7d840..8dfdb8f90797 100644
--- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -50,21 +50,125 @@
#include "adf_common_drv.h"
static LIST_HEAD(accel_table);
+static LIST_HEAD(vfs_table);
static DEFINE_MUTEX(table_lock);
static uint32_t num_devices;
+struct vf_id_map {
+ u32 bdf;
+ u32 id;
+ u32 fake_id;
+ bool attached;
+ struct list_head list;
+};
+
+static int adf_get_vf_id(struct adf_accel_dev *vf)
+{
+ return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
+ PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
+ (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
+}
+
+static int adf_get_vf_num(struct adf_accel_dev *vf)
+{
+ return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
+}
+
+static struct vf_id_map *adf_find_vf(u32 bdf)
+{
+ struct list_head *itr;
+
+ list_for_each(itr, &vfs_table) {
+ struct vf_id_map *ptr =
+ list_entry(itr, struct vf_id_map, list);
+
+ if (ptr->bdf == bdf)
+ return ptr;
+ }
+ return NULL;
+}
+
+static int adf_get_vf_real_id(u32 fake)
+{
+ struct list_head *itr;
+
+ list_for_each(itr, &vfs_table) {
+ struct vf_id_map *ptr =
+ list_entry(itr, struct vf_id_map, list);
+ if (ptr->fake_id == fake)
+ return ptr->id;
+ }
+ return -1;
+}
+
+/**
+ * adf_clean_vf_map() - Cleans VF id mapings
+ *
+ * Function cleans internal ids for virtual functions.
+ * @vf: flag indicating whether mappings is cleaned
+ * for vfs only or for vfs and pfs
+ */
+void adf_clean_vf_map(bool vf)
+{
+ struct vf_id_map *map;
+ struct list_head *ptr, *tmp;
+
+ mutex_lock(&table_lock);
+ list_for_each_safe(ptr, tmp, &vfs_table) {
+ map = list_entry(ptr, struct vf_id_map, list);
+ if (map->bdf != -1)
+ num_devices--;
+
+ if (vf && map->bdf == -1)
+ continue;
+
+ list_del(ptr);
+ kfree(map);
+ }
+ mutex_unlock(&table_lock);
+}
+EXPORT_SYMBOL_GPL(adf_clean_vf_map);
+
+/**
+ * adf_devmgr_update_class_index() - Update internal index
+ * @hw_data: Pointer to internal device data.
+ *
+ * Function updates internal dev index for VFs
+ */
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
+{
+ struct adf_hw_device_class *class = hw_data->dev_class;
+ struct list_head *itr;
+ int i = 0;
+
+ list_for_each(itr, &accel_table) {
+ struct adf_accel_dev *ptr =
+ list_entry(itr, struct adf_accel_dev, list);
+
+ if (ptr->hw_device->dev_class == class)
+ ptr->hw_device->instance_id = i++;
+
+ if (i == class->instances)
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
+
/**
* adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
* @accel_dev: Pointer to acceleration device.
+ * @pf: Corresponding PF if the accel_dev is a VF
*
* Function adds acceleration device to the acceleration framework.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
-int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+ struct adf_accel_dev *pf)
{
struct list_head *itr;
+ int ret = 0;
if (num_devices == ADF_MAX_DEVICES) {
dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
@@ -73,20 +177,77 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
}
mutex_lock(&table_lock);
- list_for_each(itr, &accel_table) {
- struct adf_accel_dev *ptr =
+ atomic_set(&accel_dev->ref_count, 0);
+
+ /* PF on host or VF on guest */
+ if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
+ struct vf_id_map *map;
+
+ list_for_each(itr, &accel_table) {
+ struct adf_accel_dev *ptr =
list_entry(itr, struct adf_accel_dev, list);
- if (ptr == accel_dev) {
- mutex_unlock(&table_lock);
- return -EEXIST;
+ if (ptr == accel_dev) {
+ ret = -EEXIST;
+ goto unlock;
+ }
}
+
+ list_add_tail(&accel_dev->list, &accel_table);
+ accel_dev->accel_id = num_devices++;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ map->bdf = ~0;
+ map->id = accel_dev->accel_id;
+ map->fake_id = map->id;
+ map->attached = true;
+ list_add_tail(&map->list, &vfs_table);
+ } else if (accel_dev->is_vf && pf) {
+ /* VF on host */
+ struct adf_accel_vf_info *vf_info;
+ struct vf_id_map *map;
+
+ vf_info = pf->pf.vf_info + adf_get_vf_id(accel_dev);
+
+ map = adf_find_vf(adf_get_vf_num(accel_dev));
+ if (map) {
+ struct vf_id_map *next;
+
+ accel_dev->accel_id = map->id;
+ list_add_tail(&accel_dev->list, &accel_table);
+ map->fake_id++;
+ map->attached = true;
+ next = list_next_entry(map, list);
+ while (next && &next->list != &vfs_table) {
+ next->fake_id++;
+ next = list_next_entry(next, list);
+ }
+
+ ret = 0;
+ goto unlock;
+ }
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ accel_dev->accel_id = num_devices++;
+ list_add_tail(&accel_dev->list, &accel_table);
+ map->bdf = adf_get_vf_num(accel_dev);
+ map->id = accel_dev->accel_id;
+ map->fake_id = map->id;
+ map->attached = true;
+ list_add_tail(&map->list, &vfs_table);
}
- atomic_set(&accel_dev->ref_count, 0);
- list_add_tail(&accel_dev->list, &accel_table);
- accel_dev->accel_id = num_devices++;
+unlock:
mutex_unlock(&table_lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
@@ -98,17 +259,37 @@ struct list_head *adf_devmgr_get_head(void)
/**
* adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
* @accel_dev: Pointer to acceleration device.
+ * @pf: Corresponding PF if the accel_dev is a VF
*
* Function removes acceleration device from the acceleration framework.
* To be used by QAT device specific drivers.
*
* Return: void
*/
-void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev)
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+ struct adf_accel_dev *pf)
{
mutex_lock(&table_lock);
+ if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
+ num_devices--;
+ } else if (accel_dev->is_vf && pf) {
+ struct vf_id_map *map, *next;
+
+ map = adf_find_vf(adf_get_vf_num(accel_dev));
+ if (!map) {
+ dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
+ goto unlock;
+ }
+ map->fake_id--;
+ map->attached = false;
+ next = list_next_entry(map, list);
+ while (next && &next->list != &vfs_table) {
+ next->fake_id--;
+ next = list_next_entry(next, list);
+ }
+ }
+unlock:
list_del(&accel_dev->list);
- num_devices--;
mutex_unlock(&table_lock);
}
EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
@@ -154,17 +335,24 @@ EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
{
struct list_head *itr;
+ int real_id;
mutex_lock(&table_lock);
+ real_id = adf_get_vf_real_id(id);
+ if (real_id < 0)
+ goto unlock;
+
+ id = real_id;
+
list_for_each(itr, &accel_table) {
struct adf_accel_dev *ptr =
list_entry(itr, struct adf_accel_dev, list);
-
if (ptr->accel_id == id) {
mutex_unlock(&table_lock);
return ptr;
}
}
+unlock:
mutex_unlock(&table_lock);
return NULL;
}
@@ -180,21 +368,52 @@ int adf_devmgr_verify_id(uint32_t id)
return -ENODEV;
}
-void adf_devmgr_get_num_dev(uint32_t *num)
+static int adf_get_num_dettached_vfs(void)
{
struct list_head *itr;
+ int vfs = 0;
- *num = 0;
- list_for_each(itr, &accel_table) {
- (*num)++;
+ mutex_lock(&table_lock);
+ list_for_each(itr, &vfs_table) {
+ struct vf_id_map *ptr =
+ list_entry(itr, struct vf_id_map, list);
+ if (ptr->bdf != ~0 && !ptr->attached)
+ vfs++;
}
+ mutex_unlock(&table_lock);
+ return vfs;
+}
+
+void adf_devmgr_get_num_dev(uint32_t *num)
+{
+ *num = num_devices - adf_get_num_dettached_vfs();
}
+/**
+ * adf_dev_in_use() - Check whether accel_dev is currently in use
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when device is in use, 0 otherwise.
+ */
int adf_dev_in_use(struct adf_accel_dev *accel_dev)
{
return atomic_read(&accel_dev->ref_count) != 0;
}
+EXPORT_SYMBOL_GPL(adf_dev_in_use);
+/**
+ * adf_dev_get() - Increment accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Increment the accel_dev refcount and if this is the first time
+ * incrementing it during this period the accel_dev is in use,
+ * increment the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 when successful, EFAULT when fail to bump module refcount
+ */
int adf_dev_get(struct adf_accel_dev *accel_dev)
{
if (atomic_add_return(1, &accel_dev->ref_count) == 1)
@@ -202,19 +421,50 @@ int adf_dev_get(struct adf_accel_dev *accel_dev)
return -EFAULT;
return 0;
}
+EXPORT_SYMBOL_GPL(adf_dev_get);
+/**
+ * adf_dev_put() - Decrement accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Decrement the accel_dev refcount and if this is the last time
+ * decrementing it during this period the accel_dev is in use,
+ * decrement the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
void adf_dev_put(struct adf_accel_dev *accel_dev)
{
if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
module_put(accel_dev->owner);
}
+EXPORT_SYMBOL_GPL(adf_dev_put);
+/**
+ * adf_devmgr_in_reset() - Check whether device is in reset
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device is being reset, 0 otherwise.
+ */
int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
{
return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
}
+EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
+/**
+ * adf_dev_started() - Check whether device has started
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device has started, 0 otherwise
+ */
int adf_dev_started(struct adf_accel_dev *accel_dev)
{
return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
}
+EXPORT_SYMBOL_GPL(adf_dev_started);
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
index 1864bdb36f8f..6849422e04bb 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c
+++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
@@ -44,9 +44,8 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <adf_accel_devices.h>
-#include <adf_transport_internal.h>
-#include "adf_drv.h"
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
#define ADF_ARB_NUM 4
#define ADF_ARB_REQ_RING_NUM 8
@@ -58,7 +57,6 @@
#define ADF_ARB_RO_EN_OFFSET 0x090
#define ADF_ARB_WQCFG_OFFSET 0x100
#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
-#define ADF_ARB_WRK_2_SER_MAP 10
#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
@@ -89,10 +87,11 @@
int adf_init_arb(struct adf_accel_dev *accel_dev)
{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
- uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
- uint32_t arb, i;
- const uint32_t *thd_2_arb_cfg;
+ u32 arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
+ u32 arb, i;
+ const u32 *thd_2_arb_cfg;
/* Service arb configured for 32 bytes responses and
* ring flow control check enabled. */
@@ -109,30 +108,39 @@ int adf_init_arb(struct adf_accel_dev *accel_dev)
WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF);
/* Setup worker queue registers */
- for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+ for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WQCFG(csr, i, i);
/* Map worker threads to service arbiters */
- adf_get_arbiter_mapping(accel_dev, &thd_2_arb_cfg);
+ hw_data->get_arb_mapping(accel_dev, &thd_2_arb_cfg);
if (!thd_2_arb_cfg)
return -EFAULT;
- for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+ for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i));
return 0;
}
-
-void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring)
+EXPORT_SYMBOL_GPL(adf_init_arb);
+
+/**
+ * adf_update_ring_arb() - update ring arbitration rgister
+ * @accel_dev: Pointer to ring data.
+ *
+ * Function enables or disables rings for/from arbitration.
+ */
+void adf_update_ring_arb(struct adf_etr_ring_data *ring)
{
WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
ring->bank->bank_number,
ring->bank->ring_mask & 0xFF);
}
+EXPORT_SYMBOL_GPL(adf_update_ring_arb);
void adf_exit_arb(struct adf_accel_dev *accel_dev)
{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
void __iomem *csr;
unsigned int i;
@@ -146,14 +154,15 @@ void adf_exit_arb(struct adf_accel_dev *accel_dev)
WRITE_CSR_ARB_SARCONFIG(csr, i, 0);
/* Shutdown work queue */
- for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+ for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WQCFG(csr, i, 0);
/* Unmap worker threads to service arbiters */
- for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+ for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0);
/* Disable arbitration on all rings */
for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
}
+EXPORT_SYMBOL_GPL(adf_exit_arb);
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index 245f43237a2d..ac37a89965ac 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -69,7 +69,7 @@ static void adf_service_add(struct service_hndl *service)
* Function adds the acceleration service to the acceleration framework.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_service_register(struct service_hndl *service)
{
@@ -94,7 +94,7 @@ static void adf_service_remove(struct service_hndl *service)
* Function remove the acceleration service from the acceleration framework.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_service_unregister(struct service_hndl *service)
{
@@ -114,7 +114,7 @@ EXPORT_SYMBOL_GPL(adf_service_unregister);
* Initialize the ring data structures and the admin comms and arbitration
* services.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_dev_init(struct adf_accel_dev *accel_dev)
{
@@ -177,20 +177,6 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
*/
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
- if (!service->admin)
- continue;
- if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to initialise service %s\n",
- service->name);
- return -EFAULT;
- }
- set_bit(accel_dev->accel_id, &service->init_status);
- }
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
- if (service->admin)
- continue;
if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
dev_err(&GET_DEV(accel_dev),
"Failed to initialise service %s\n",
@@ -201,6 +187,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
}
hw_data->enable_error_correction(accel_dev);
+ hw_data->enable_vf2pf_comms(accel_dev);
return 0;
}
@@ -214,10 +201,11 @@ EXPORT_SYMBOL_GPL(adf_dev_init);
* is ready to be used.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_dev_start(struct adf_accel_dev *accel_dev)
{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
struct list_head *list_itr;
@@ -229,22 +217,13 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
}
set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
- if (!service->admin)
- continue;
- if (service->event_hld(accel_dev, ADF_EVENT_START)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to start service %s\n",
- service->name);
- return -EFAULT;
- }
- set_bit(accel_dev->accel_id, &service->start_status);
+ if (hw_data->send_admin_init(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
+ return -EFAULT;
}
+
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
- if (service->admin)
- continue;
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
dev_err(&GET_DEV(accel_dev),
"Failed to start service %s\n",
@@ -257,7 +236,8 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
set_bit(ADF_STATUS_STARTED, &accel_dev->status);
- if (qat_algs_register()) {
+ if (!list_empty(&accel_dev->crypto_list) &&
+ (qat_algs_register() || qat_asym_algs_register())) {
dev_err(&GET_DEV(accel_dev),
"Failed to register crypto algs\n");
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
@@ -276,7 +256,7 @@ EXPORT_SYMBOL_GPL(adf_dev_start);
* is shuting down.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_dev_stop(struct adf_accel_dev *accel_dev)
{
@@ -292,14 +272,15 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
- if (qat_algs_unregister())
+ if (!list_empty(&accel_dev->crypto_list) && qat_algs_unregister())
dev_err(&GET_DEV(accel_dev),
"Failed to unregister crypto algs\n");
+ if (!list_empty(&accel_dev->crypto_list))
+ qat_asym_algs_unregister();
+
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
- if (service->admin)
- continue;
if (!test_bit(accel_dev->accel_id, &service->start_status))
continue;
ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
@@ -310,19 +291,6 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
clear_bit(accel_dev->accel_id, &service->start_status);
}
}
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
- if (!service->admin)
- continue;
- if (!test_bit(accel_dev->accel_id, &service->start_status))
- continue;
- if (service->event_hld(accel_dev, ADF_EVENT_STOP))
- dev_err(&GET_DEV(accel_dev),
- "Failed to shutdown service %s\n",
- service->name);
- else
- clear_bit(accel_dev->accel_id, &service->start_status);
- }
if (wait)
msleep(100);
@@ -373,21 +341,6 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
- if (service->admin)
- continue;
- if (!test_bit(accel_dev->accel_id, &service->init_status))
- continue;
- if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
- dev_err(&GET_DEV(accel_dev),
- "Failed to shutdown service %s\n",
- service->name);
- else
- clear_bit(accel_dev->accel_id, &service->init_status);
- }
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
- if (!service->admin)
- continue;
if (!test_bit(accel_dev->accel_id, &service->init_status))
continue;
if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
@@ -413,6 +366,7 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
if (hw_data->exit_admin_comms)
hw_data->exit_admin_comms(accel_dev);
+ hw_data->disable_iov(accel_dev);
adf_cleanup_etr_data(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_dev_shutdown);
@@ -424,17 +378,6 @@ int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
- if (service->admin)
- continue;
- if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
- dev_err(&GET_DEV(accel_dev),
- "Failed to restart service %s.\n",
- service->name);
- }
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
- if (!service->admin)
- continue;
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
dev_err(&GET_DEV(accel_dev),
"Failed to restart service %s.\n",
@@ -450,17 +393,6 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
- if (service->admin)
- continue;
- if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
- dev_err(&GET_DEV(accel_dev),
- "Failed to restart service %s.\n",
- service->name);
- }
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
- if (!service->admin)
- continue;
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
dev_err(&GET_DEV(accel_dev),
"Failed to restart service %s.\n",
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
new file mode 100644
index 000000000000..5fdbad809343
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
@@ -0,0 +1,438 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2015 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2015 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <linux/pci.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pf2vf_msg.h"
+
+#define ADF_DH895XCC_EP_OFFSET 0x3A000
+#define ADF_DH895XCC_ERRMSK3 (ADF_DH895XCC_EP_OFFSET + 0x1C)
+#define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
+#define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC)
+#define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
+
+/**
+ * adf_enable_pf2vf_interrupts() - Enable PF to VF interrupts
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function enables PF to VF interrupts
+ */
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *pmisc_bar_addr =
+ pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+ ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
+}
+EXPORT_SYMBOL_GPL(adf_enable_pf2vf_interrupts);
+
+/**
+ * adf_disable_pf2vf_interrupts() - Disable PF to VF interrupts
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function disables PF to VF interrupts
+ */
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *pmisc_bar_addr =
+ pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+ ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
+}
+EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
+
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+ u32 vf_mask)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *pmisc =
+ &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+ void __iomem *pmisc_addr = pmisc->virt_addr;
+ u32 reg;
+
+ /* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
+ if (vf_mask & 0xFFFF) {
+ reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
+ reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
+ }
+
+ /* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
+ if (vf_mask >> 16) {
+ reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
+ reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
+ }
+}
+
+/**
+ * adf_disable_pf2vf_interrupts() - Disable VF to PF interrupts
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function disables VF to PF interrupts
+ */
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *pmisc =
+ &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+ void __iomem *pmisc_addr = pmisc->virt_addr;
+ u32 reg;
+
+ /* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
+ if (vf_mask & 0xFFFF) {
+ reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
+ ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
+ }
+
+ /* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
+ if (vf_mask >> 16) {
+ reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
+ ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
+ }
+}
+EXPORT_SYMBOL_GPL(adf_disable_vf2pf_interrupts);
+
+static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
+{
+ struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *pmisc_bar_addr =
+ pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+ u32 val, pf2vf_offset, count = 0;
+ u32 local_in_use_mask, local_in_use_pattern;
+ u32 remote_in_use_mask, remote_in_use_pattern;
+ struct mutex *lock; /* lock preventing concurrent acces of CSR */
+ u32 int_bit;
+ int ret = 0;
+
+ if (accel_dev->is_vf) {
+ pf2vf_offset = hw_data->get_pf2vf_offset(0);
+ lock = &accel_dev->vf.vf2pf_lock;
+ local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
+ local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
+ remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
+ remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
+ int_bit = ADF_VF2PF_INT;
+ } else {
+ pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
+ lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
+ local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
+ local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
+ remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
+ remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
+ int_bit = ADF_PF2VF_INT;
+ }
+
+ mutex_lock(lock);
+
+ /* Check if PF2VF CSR is in use by remote function */
+ val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+ if ((val & remote_in_use_mask) == remote_in_use_pattern) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "PF2VF CSR in use by remote function\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Attempt to get ownership of PF2VF CSR */
+ msg &= ~local_in_use_mask;
+ msg |= local_in_use_pattern;
+ ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
+
+ /* Wait in case remote func also attempting to get ownership */
+ msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
+
+ val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+ if ((val & local_in_use_mask) != local_in_use_pattern) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "PF2VF CSR in use by remote - collision detected\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * This function now owns the PV2VF CSR. The IN_USE_BY pattern must
+ * remain in the PF2VF CSR for all writes including ACK from remote
+ * until this local function relinquishes the CSR. Send the message
+ * by interrupting the remote.
+ */
+ ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
+
+ /* Wait for confirmation from remote func it received the message */
+ do {
+ msleep(ADF_IOV_MSG_ACK_DELAY);
+ val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+ } while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
+
+ if (val & int_bit) {
+ dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
+ val &= ~int_bit;
+ ret = -EIO;
+ }
+
+ /* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
+ ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
+out:
+ mutex_unlock(lock);
+ return ret;
+}
+
+/**
+ * adf_iov_putmsg() - send PF2VF message
+ * @accel_dev: Pointer to acceleration device.
+ * @msg: Message to send
+ * @vf_nr: VF number to which the message will be sent
+ *
+ * Function sends a messge from the PF to a VF
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
+{
+ u32 count = 0;
+ int ret;
+
+ do {
+ ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
+ if (ret)
+ msleep(ADF_IOV_MSG_RETRY_DELAY);
+ } while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_iov_putmsg);
+
+void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
+{
+ struct adf_accel_dev *accel_dev = vf_info->accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ int bar_id = hw_data->get_misc_bar_id(hw_data);
+ struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
+ void __iomem *pmisc_addr = pmisc->virt_addr;
+ u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
+
+ /* Read message from the VF */
+ msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
+
+ /* To ACK, clear the VF2PFINT bit */
+ msg &= ~ADF_VF2PF_INT;
+ ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
+
+ if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
+ /* Ignore legacy non-system (non-kernel) VF2PF messages */
+ goto err;
+
+ switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
+ case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
+ {
+ u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
+
+ resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+ (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
+ ADF_PF2VF_MSGTYPE_SHIFT) |
+ (ADF_PFVF_COMPATIBILITY_VERSION <<
+ ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
+
+ dev_dbg(&GET_DEV(accel_dev),
+ "Compatibility Version Request from VF%d vers=%u\n",
+ vf_nr + 1, vf_compat_ver);
+
+ if (vf_compat_ver < hw_data->min_iov_compat_ver) {
+ dev_err(&GET_DEV(accel_dev),
+ "VF (vers %d) incompatible with PF (vers %d)\n",
+ vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+ resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
+ ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+ } else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) {
+ dev_err(&GET_DEV(accel_dev),
+ "VF (vers %d) compat with PF (vers %d) unkn.\n",
+ vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+ resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
+ ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+ } else {
+ dev_dbg(&GET_DEV(accel_dev),
+ "VF (vers %d) compatible with PF (vers %d)\n",
+ vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+ resp |= ADF_PF2VF_VF_COMPATIBLE <<
+ ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+ }
+ }
+ break;
+ case ADF_VF2PF_MSGTYPE_VERSION_REQ:
+ dev_dbg(&GET_DEV(accel_dev),
+ "Legacy VersionRequest received from VF%d 0x%x\n",
+ vf_nr + 1, msg);
+ resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+ (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
+ ADF_PF2VF_MSGTYPE_SHIFT) |
+ (ADF_PFVF_COMPATIBILITY_VERSION <<
+ ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
+ resp |= ADF_PF2VF_VF_COMPATIBLE <<
+ ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+ /* Set legacy major and minor version num */
+ resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
+ 1 << ADF_PF2VF_MINORVERSION_SHIFT;
+ break;
+ case ADF_VF2PF_MSGTYPE_INIT:
+ {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Init message received from VF%d 0x%x\n",
+ vf_nr + 1, msg);
+ vf_info->init = true;
+ }
+ break;
+ case ADF_VF2PF_MSGTYPE_SHUTDOWN:
+ {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Shutdown message received from VF%d 0x%x\n",
+ vf_nr + 1, msg);
+ vf_info->init = false;
+ }
+ break;
+ default:
+ goto err;
+ }
+
+ if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
+ dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
+
+ /* re-enable interrupt on PF from this VF */
+ adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
+ return;
+err:
+ dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
+ vf_nr + 1, msg);
+}
+
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_vf_info *vf;
+ u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+ (ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
+ int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+
+ for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
+ if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send restarting msg to VF%d\n", i);
+ }
+}
+
+static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
+{
+ unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 msg = 0;
+ int ret;
+
+ msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
+ msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
+ msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
+ BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
+
+ /* Send request from VF to PF */
+ ret = adf_iov_putmsg(accel_dev, msg, 0);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Compatibility Version Request.\n");
+ return ret;
+ }
+
+ /* Wait for response */
+ if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
+ timeout)) {
+ dev_err(&GET_DEV(accel_dev),
+ "IOV request/response message timeout expired\n");
+ return -EIO;
+ }
+
+ /* Response from PF received, check compatibility */
+ switch (accel_dev->vf.compatible) {
+ case ADF_PF2VF_VF_COMPATIBLE:
+ break;
+ case ADF_PF2VF_VF_COMPAT_UNKNOWN:
+ /* VF is newer than PF and decides whether it is compatible */
+ if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
+ break;
+ /* fall through */
+ case ADF_PF2VF_VF_INCOMPATIBLE:
+ dev_err(&GET_DEV(accel_dev),
+ "PF (vers %d) and VF (vers %d) are not compatible\n",
+ accel_dev->vf.pf_version,
+ ADF_PFVF_COMPATIBILITY_VERSION);
+ return -EINVAL;
+ default:
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid response from PF; assume not compatible\n");
+ return -EINVAL;
+ }
+ return ret;
+}
+
+/**
+ * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
+ *
+ * @accel_dev: Pointer to acceleration device virtual function.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+ adf_enable_pf2vf_interrupts(accel_dev);
+ return adf_vf2pf_request_version(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
new file mode 100644
index 000000000000..5acd531a11ff
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
@@ -0,0 +1,146 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2015 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2015 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_PF2VF_MSG_H
+#define ADF_PF2VF_MSG_H
+
+/*
+ * PF<->VF Messaging
+ * The PF has an array of 32-bit PF2VF registers, one for each VF. The
+ * PF can access all these registers; each VF can access only the one
+ * register associated with that particular VF.
+ *
+ * The register functionally is split into two parts:
+ * The bottom half is for PF->VF messages. In particular when the first
+ * bit of this register (bit 0) gets set an interrupt will be triggered
+ * in the respective VF.
+ * The top half is for VF->PF messages. In particular when the first bit
+ * of this half of register (bit 16) gets set an interrupt will be triggered
+ * in the PF.
+ *
+ * The remaining bits within this register are available to encode messages.
+ * and implement a collision control mechanism to prevent concurrent use of
+ * the PF2VF register by both the PF and VF.
+ *
+ * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
+ * _______________________________________________
+ * | | | | | | | | | | | | | | | | |
+ * +-----------------------------------------------+
+ * \___________________________/ \_________/ ^ ^
+ * ^ ^ | |
+ * | | | VF2PF Int
+ * | | Message Origin
+ * | Message Type
+ * Message-specific Data/Reserved
+ *
+ * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ * _______________________________________________
+ * | | | | | | | | | | | | | | | | |
+ * +-----------------------------------------------+
+ * \___________________________/ \_________/ ^ ^
+ * ^ ^ | |
+ * | | | PF2VF Int
+ * | | Message Origin
+ * | Message Type
+ * Message-specific Data/Reserved
+ *
+ * Message Origin (Should always be 1)
+ * A legacy out-of-tree QAT driver allowed for a set of messages not supported
+ * by this driver; these had a Msg Origin of 0 and are ignored by this driver.
+ *
+ * When a PF or VF attempts to send a message in the lower or upper 16 bits,
+ * respectively, the other 16 bits are written to first with a defined
+ * IN_USE_BY pattern as part of a collision control scheme (see adf_iov_putmsg).
+ */
+
+#define ADF_PFVF_COMPATIBILITY_VERSION 0x1 /* PF<->VF compat */
+
+/* PF->VF messages */
+#define ADF_PF2VF_INT BIT(0)
+#define ADF_PF2VF_MSGORIGIN_SYSTEM BIT(1)
+#define ADF_PF2VF_MSGTYPE_MASK 0x0000003C
+#define ADF_PF2VF_MSGTYPE_SHIFT 2
+#define ADF_PF2VF_MSGTYPE_RESTARTING 0x01
+#define ADF_PF2VF_MSGTYPE_VERSION_RESP 0x02
+#define ADF_PF2VF_IN_USE_BY_PF 0x6AC20000
+#define ADF_PF2VF_IN_USE_BY_PF_MASK 0xFFFE0000
+
+/* PF->VF Version Response */
+#define ADF_PF2VF_VERSION_RESP_VERS_MASK 0x00003FC0
+#define ADF_PF2VF_VERSION_RESP_VERS_SHIFT 6
+#define ADF_PF2VF_VERSION_RESP_RESULT_MASK 0x0000C000
+#define ADF_PF2VF_VERSION_RESP_RESULT_SHIFT 14
+#define ADF_PF2VF_MINORVERSION_SHIFT 6
+#define ADF_PF2VF_MAJORVERSION_SHIFT 10
+#define ADF_PF2VF_VF_COMPATIBLE 1
+#define ADF_PF2VF_VF_INCOMPATIBLE 2
+#define ADF_PF2VF_VF_COMPAT_UNKNOWN 3
+
+/* VF->PF messages */
+#define ADF_VF2PF_IN_USE_BY_VF 0x00006AC2
+#define ADF_VF2PF_IN_USE_BY_VF_MASK 0x0000FFFE
+#define ADF_VF2PF_INT BIT(16)
+#define ADF_VF2PF_MSGORIGIN_SYSTEM BIT(17)
+#define ADF_VF2PF_MSGTYPE_MASK 0x003C0000
+#define ADF_VF2PF_MSGTYPE_SHIFT 18
+#define ADF_VF2PF_MSGTYPE_INIT 0x3
+#define ADF_VF2PF_MSGTYPE_SHUTDOWN 0x4
+#define ADF_VF2PF_MSGTYPE_VERSION_REQ 0x5
+#define ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ 0x6
+
+/* VF->PF Compatible Version Request */
+#define ADF_VF2PF_COMPAT_VER_REQ_SHIFT 22
+
+/* Collision detection */
+#define ADF_IOV_MSG_COLLISION_DETECT_DELAY 10
+#define ADF_IOV_MSG_ACK_DELAY 2
+#define ADF_IOV_MSG_ACK_MAX_RETRY 100
+#define ADF_IOV_MSG_RETRY_DELAY 5
+#define ADF_IOV_MSG_MAX_RETRIES 3
+#define ADF_IOV_MSG_RESP_TIMEOUT (ADF_IOV_MSG_ACK_DELAY * \
+ ADF_IOV_MSG_ACK_MAX_RETRY + \
+ ADF_IOV_MSG_COLLISION_DETECT_DELAY)
+#endif /* ADF_IOV_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
new file mode 100644
index 000000000000..2f77a4a8cecb
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -0,0 +1,309 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2015 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2015 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_pf2vf_msg.h"
+
+static struct workqueue_struct *pf2vf_resp_wq;
+
+#define ME2FUNCTION_MAP_A_OFFSET (0x3A400 + 0x190)
+#define ME2FUNCTION_MAP_A_NUM_REGS 96
+
+#define ME2FUNCTION_MAP_B_OFFSET (0x3A400 + 0x310)
+#define ME2FUNCTION_MAP_B_NUM_REGS 12
+
+#define ME2FUNCTION_MAP_REG_SIZE 4
+#define ME2FUNCTION_MAP_VALID BIT(7)
+
+#define READ_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index) \
+ ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET + \
+ ME2FUNCTION_MAP_REG_SIZE * index)
+
+#define WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index, value) \
+ ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET + \
+ ME2FUNCTION_MAP_REG_SIZE * index, value)
+
+#define READ_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index) \
+ ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET + \
+ ME2FUNCTION_MAP_REG_SIZE * index)
+
+#define WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index, value) \
+ ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET + \
+ ME2FUNCTION_MAP_REG_SIZE * index, value)
+
+struct adf_pf2vf_resp {
+ struct work_struct pf2vf_resp_work;
+ struct adf_accel_vf_info *vf_info;
+};
+
+static void adf_iov_send_resp(struct work_struct *work)
+{
+ struct adf_pf2vf_resp *pf2vf_resp =
+ container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
+
+ adf_vf2pf_req_hndl(pf2vf_resp->vf_info);
+ kfree(pf2vf_resp);
+}
+
+static void adf_vf2pf_bh_handler(void *data)
+{
+ struct adf_accel_vf_info *vf_info = (struct adf_accel_vf_info *)data;
+ struct adf_pf2vf_resp *pf2vf_resp;
+
+ pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
+ if (!pf2vf_resp)
+ return;
+
+ pf2vf_resp->vf_info = vf_info;
+ INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp);
+ queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work);
+}
+
+static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ int totalvfs = pci_sriov_get_totalvfs(pdev);
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *pmisc =
+ &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+ void __iomem *pmisc_addr = pmisc->virt_addr;
+ struct adf_accel_vf_info *vf_info;
+ int i;
+ u32 reg;
+
+ /* Workqueue for PF2VF responses */
+ pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
+ if (!pf2vf_resp_wq)
+ return -ENOMEM;
+
+ for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
+ i++, vf_info++) {
+ /* This ptr will be populated when VFs will be created */
+ vf_info->accel_dev = accel_dev;
+ vf_info->vf_nr = i;
+
+ tasklet_init(&vf_info->vf2pf_bh_tasklet,
+ (void *)adf_vf2pf_bh_handler,
+ (unsigned long)vf_info);
+ mutex_init(&vf_info->pf2vf_lock);
+ ratelimit_state_init(&vf_info->vf2pf_ratelimit,
+ DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ }
+
+ /* Set Valid bits in ME Thread to PCIe Function Mapping Group A */
+ for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) {
+ reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i);
+ reg |= ME2FUNCTION_MAP_VALID;
+ WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg);
+ }
+
+ /* Set Valid bits in ME Thread to PCIe Function Mapping Group B */
+ for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) {
+ reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i);
+ reg |= ME2FUNCTION_MAP_VALID;
+ WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg);
+ }
+
+ /* Enable VF to PF interrupts for all VFs */
+ adf_enable_vf2pf_interrupts(accel_dev, GENMASK_ULL(totalvfs - 1, 0));
+
+ /*
+ * Due to the hardware design, when SR-IOV and the ring arbiter
+ * are enabled all the VFs supported in hardware must be enabled in
+ * order for all the hardware resources (i.e. bundles) to be usable.
+ * When SR-IOV is enabled, each of the VFs will own one bundle.
+ */
+ return pci_enable_sriov(pdev, totalvfs);
+}
+
+/**
+ * adf_disable_sriov() - Disable SRIOV for the device
+ * @pdev: Pointer to pci device.
+ *
+ * Function disables SRIOV for the pci device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *pmisc =
+ &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+ void __iomem *pmisc_addr = pmisc->virt_addr;
+ int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
+ struct adf_accel_vf_info *vf;
+ u32 reg;
+ int i;
+
+ if (!accel_dev->pf.vf_info)
+ return;
+
+ adf_pf2vf_notify_restarting(accel_dev);
+
+ pci_disable_sriov(accel_to_pci_dev(accel_dev));
+
+ /* Disable VF to PF interrupts */
+ adf_disable_vf2pf_interrupts(accel_dev, 0xFFFFFFFF);
+
+ /* Clear Valid bits in ME Thread to PCIe Function Mapping Group A */
+ for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) {
+ reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i);
+ reg &= ~ME2FUNCTION_MAP_VALID;
+ WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg);
+ }
+
+ /* Clear Valid bits in ME Thread to PCIe Function Mapping Group B */
+ for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) {
+ reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i);
+ reg &= ~ME2FUNCTION_MAP_VALID;
+ WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg);
+ }
+
+ for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
+ tasklet_disable(&vf->vf2pf_bh_tasklet);
+ tasklet_kill(&vf->vf2pf_bh_tasklet);
+ mutex_destroy(&vf->pf2vf_lock);
+ }
+
+ kfree(accel_dev->pf.vf_info);
+ accel_dev->pf.vf_info = NULL;
+
+ if (pf2vf_resp_wq) {
+ destroy_workqueue(pf2vf_resp_wq);
+ pf2vf_resp_wq = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(adf_disable_sriov);
+
+/**
+ * adf_sriov_configure() - Enable SRIOV for the device
+ * @pdev: Pointer to pci device.
+ *
+ * Function enables SRIOV for the pci device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+ int totalvfs = pci_sriov_get_totalvfs(pdev);
+ unsigned long val;
+ int ret;
+
+ if (!accel_dev) {
+ dev_err(&pdev->dev, "Failed to find accel_dev\n");
+ return -EFAULT;
+ }
+
+ if (!iommu_present(&pci_bus_type)) {
+ dev_err(&pdev->dev,
+ "IOMMU must be enabled for SR-IOV to work\n");
+ return -EINVAL;
+ }
+
+ if (accel_dev->pf.vf_info) {
+ dev_info(&pdev->dev, "Already enabled for this device\n");
+ return -EINVAL;
+ }
+
+ if (adf_dev_started(accel_dev)) {
+ if (adf_devmgr_in_reset(accel_dev) ||
+ adf_dev_in_use(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Device busy\n");
+ return -EBUSY;
+ }
+
+ if (adf_dev_stop(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to stop qat_dev%d\n",
+ accel_dev->accel_id);
+ return -EFAULT;
+ }
+
+ adf_dev_shutdown(accel_dev);
+ }
+
+ if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+ return -EFAULT;
+ val = 0;
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ ADF_NUM_CY, (void *)&val, ADF_DEC))
+ return -EFAULT;
+
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+ /* Allocate memory for VF info structs */
+ accel_dev->pf.vf_info = kcalloc(totalvfs,
+ sizeof(struct adf_accel_vf_info),
+ GFP_KERNEL);
+ if (!accel_dev->pf.vf_info)
+ return -ENOMEM;
+
+ if (adf_dev_init(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to init qat_dev%d\n",
+ accel_dev->accel_id);
+ return -EFAULT;
+ }
+
+ if (adf_dev_start(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
+ accel_dev->accel_id);
+ return -EFAULT;
+ }
+
+ ret = adf_enable_sriov(accel_dev);
+ if (ret)
+ return ret;
+
+ return numvfs;
+}
+EXPORT_SYMBOL_GPL(adf_sriov_configure);
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index db2926bff8a5..3865ae8d96d9 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -264,6 +264,10 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
return -EFAULT;
}
+ if (ring_num >= ADF_ETR_MAX_RINGS_PER_BANK) {
+ dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
+ return -EFAULT;
+ }
bank = &transport_data->banks[bank_num];
if (adf_reserve_ring(bank, ring_num)) {
@@ -285,7 +289,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
goto err;
/* Enable HW arbitration for the given ring */
- accel_dev->hw_device->hw_arb_ring_enable(ring);
+ adf_update_ring_arb(ring);
if (adf_ring_debugfs_add(ring, ring_name)) {
dev_err(&GET_DEV(accel_dev),
@@ -302,14 +306,13 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
err:
adf_cleanup_ring(ring);
adf_unreserve_ring(bank, ring_num);
- accel_dev->hw_device->hw_arb_ring_disable(ring);
+ adf_update_ring_arb(ring);
return ret;
}
void adf_remove_ring(struct adf_etr_ring_data *ring)
{
struct adf_etr_bank_data *bank = ring->bank;
- struct adf_accel_dev *accel_dev = bank->accel_dev;
/* Disable interrupts for the given ring */
adf_disable_ring_irq(bank, ring->ring_number);
@@ -322,7 +325,7 @@ void adf_remove_ring(struct adf_etr_ring_data *ring)
adf_ring_debugfs_rm(ring);
adf_unreserve_ring(bank, ring->ring_number);
/* Disable HW arbitration for the given ring */
- accel_dev->hw_device->hw_arb_ring_disable(ring);
+ adf_update_ring_arb(ring);
adf_cleanup_ring(ring);
}
@@ -463,7 +466,7 @@ err:
* acceleration device accel_dev.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_init_etr_data(struct adf_accel_dev *accel_dev)
{
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
index 160c9a36c919..6ad7e4e1edca 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
@@ -97,8 +97,9 @@
#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
/* Minimum ring bufer size for memory allocation */
-#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
- ADF_RING_SIZE_4K : SIZE)
+#define ADF_RING_SIZE_BYTES_MIN(SIZE) \
+ ((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \
+ ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE)
#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
SIZE) & ~0x4)
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
index e41986967294..52340b9bb387 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -86,9 +86,7 @@ static int adf_ring_show(struct seq_file *sfile, void *v)
{
struct adf_etr_ring_data *ring = sfile->private;
struct adf_etr_bank_data *bank = ring->bank;
- uint32_t *msg = v;
void __iomem *csr = ring->bank->csr_addr;
- int i, x;
if (v == SEQ_START_TOKEN) {
int head, tail, empty;
@@ -113,18 +111,8 @@ static int adf_ring_show(struct seq_file *sfile, void *v)
seq_puts(sfile, "----------- Ring data ------------\n");
return 0;
}
- seq_printf(sfile, "%p:", msg);
- x = 0;
- i = 0;
- for (; i < (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2); i++) {
- seq_printf(sfile, " %08X", *(msg + i));
- if ((ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2) != i + 1 &&
- (++x == 8)) {
- seq_printf(sfile, "\n%p:", msg + i + 1);
- x = 0;
- }
- }
- seq_puts(sfile, "\n");
+ seq_hex_dump(sfile, "", DUMP_PREFIX_ADDRESS, 32, 4,
+ v, ADF_MSG_SIZE_TO_BYTES(ring->msg_size), false);
return 0;
}
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h
index f1e30e24a419..46747f01b1d1 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h
@@ -249,6 +249,8 @@ struct icp_qat_fw_comn_resp {
#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6
+#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1
#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
new file mode 100644
index 000000000000..0d7a9b51ce9f
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
@@ -0,0 +1,112 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_PKE_
+#define _ICP_QAT_FW_PKE_
+
+#include "icp_qat_fw.h"
+
+struct icp_qat_fw_req_hdr_pke_cd_pars {
+ u64 content_desc_addr;
+ u32 content_desc_resrvd;
+ u32 func_id;
+};
+
+struct icp_qat_fw_req_pke_mid {
+ u64 opaque;
+ u64 src_data_addr;
+ u64 dest_data_addr;
+};
+
+struct icp_qat_fw_req_pke_hdr {
+ u8 resrvd1;
+ u8 resrvd2;
+ u8 service_type;
+ u8 hdr_flags;
+ u16 comn_req_flags;
+ u16 resrvd4;
+ struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars;
+};
+
+struct icp_qat_fw_pke_request {
+ struct icp_qat_fw_req_pke_hdr pke_hdr;
+ struct icp_qat_fw_req_pke_mid pke_mid;
+ u8 output_param_count;
+ u8 input_param_count;
+ u16 resrvd1;
+ u32 resrvd2;
+ u64 next_req_adr;
+};
+
+struct icp_qat_fw_resp_pke_hdr {
+ u8 resrvd1;
+ u8 resrvd2;
+ u8 response_type;
+ u8 hdr_flags;
+ u16 comn_resp_flags;
+ u16 resrvd4;
+};
+
+struct icp_qat_fw_pke_resp {
+ struct icp_qat_fw_resp_pke_hdr pke_resp_hdr;
+ u64 opaque;
+ u64 src_data_addr;
+ u64 dest_data_addr;
+};
+
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS 7
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK 0x1
+#define ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(status_word) \
+ QAT_FIELD_GET(((status_word >> ICP_QAT_FW_COMN_ONE_BYTE_SHIFT) & \
+ ICP_QAT_FW_COMN_SINGLE_BYTE_MASK), \
+ QAT_COMN_RESP_PKE_STATUS_BITPOS, \
+ QAT_COMN_RESP_PKE_STATUS_MASK)
+
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(hdr_t, val) \
+ QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS, \
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK)
+#endif
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 067402c7c2a9..2bd913aceaeb 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -53,7 +53,6 @@
#include <crypto/hash.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
-#include <crypto/rng.h>
#include <linux/dma-mapping.h>
#include "adf_accel_devices.h"
#include "adf_transport.h"
@@ -73,7 +72,8 @@
ICP_QAT_HW_CIPHER_KEY_CONVERT, \
ICP_QAT_HW_CIPHER_DECRYPT)
-static atomic_t active_dev;
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
struct qat_alg_buf {
uint32_t len;
@@ -112,9 +112,6 @@ struct qat_alg_aead_ctx {
struct crypto_shash *hash_tfm;
enum icp_qat_hw_auth_algo qat_hash_alg;
struct qat_crypto_instance *inst;
- struct crypto_tfm *tfm;
- uint8_t salt[AES_BLOCK_SIZE];
- spinlock_t lock; /* protects qat_alg_aead_ctx struct */
};
struct qat_alg_ablkcipher_ctx {
@@ -129,11 +126,6 @@ struct qat_alg_ablkcipher_ctx {
spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
};
-static int get_current_node(void)
-{
- return cpu_data(current_thread_info()->cpu).phys_proc_id;
-}
-
static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
{
switch (qat_hash_alg) {
@@ -277,12 +269,12 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
ICP_QAT_FW_LA_NO_UPDATE_STATE);
}
-static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
+static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
int alg,
struct crypto_authenc_keys *keys)
{
- struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
- unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+ unsigned int digestsize = crypto_aead_authsize(aead_tfm);
struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
struct icp_qat_hw_auth_algo_blk *hash =
@@ -357,12 +349,12 @@ static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
return 0;
}
-static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
+static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
int alg,
struct crypto_authenc_keys *keys)
{
- struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
- unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+ unsigned int digestsize = crypto_aead_authsize(aead_tfm);
struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
struct icp_qat_hw_cipher_algo_blk *cipher =
@@ -514,30 +506,27 @@ static int qat_alg_validate_key(int key_len, int *alg)
return 0;
}
-static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx,
+static int qat_alg_aead_init_sessions(struct crypto_aead *tfm,
const uint8_t *key, unsigned int keylen)
{
struct crypto_authenc_keys keys;
int alg;
- if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
- return -EFAULT;
-
if (crypto_authenc_extractkeys(&keys, key, keylen))
goto bad_key;
if (qat_alg_validate_key(keys.enckeylen, &alg))
goto bad_key;
- if (qat_alg_aead_init_enc_session(ctx, alg, &keys))
+ if (qat_alg_aead_init_enc_session(tfm, alg, &keys))
goto error;
- if (qat_alg_aead_init_dec_session(ctx, alg, &keys))
+ if (qat_alg_aead_init_dec_session(tfm, alg, &keys))
goto error;
return 0;
bad_key:
- crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
error:
return -EFAULT;
@@ -566,7 +555,6 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev;
- spin_lock(&ctx->lock);
if (ctx->enc_cd) {
/* rekeying */
dev = &GET_DEV(ctx->inst->accel_dev);
@@ -580,7 +568,6 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
struct qat_crypto_instance *inst =
qat_crypto_get_instance_node(node);
if (!inst) {
- spin_unlock(&ctx->lock);
return -EINVAL;
}
@@ -590,19 +577,16 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
&ctx->enc_cd_paddr,
GFP_ATOMIC);
if (!ctx->enc_cd) {
- spin_unlock(&ctx->lock);
return -ENOMEM;
}
ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
&ctx->dec_cd_paddr,
GFP_ATOMIC);
if (!ctx->dec_cd) {
- spin_unlock(&ctx->lock);
goto out_free_enc;
}
}
- spin_unlock(&ctx->lock);
- if (qat_alg_aead_init_sessions(ctx, key, keylen))
+ if (qat_alg_aead_init_sessions(tfm, key, keylen))
goto out_free_all;
return 0;
@@ -653,22 +637,20 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
}
static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
- struct scatterlist *assoc, int assoclen,
struct scatterlist *sgl,
- struct scatterlist *sglout, uint8_t *iv,
- uint8_t ivlen,
+ struct scatterlist *sglout,
struct qat_crypto_request *qat_req)
{
struct device *dev = &GET_DEV(inst->accel_dev);
- int i, bufs = 0, sg_nctr = 0;
- int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
+ int i, sg_nctr = 0;
+ int n = sg_nents(sgl);
struct qat_alg_buf_list *bufl;
struct qat_alg_buf_list *buflout = NULL;
dma_addr_t blp;
dma_addr_t bloutp = 0;
struct scatterlist *sg;
size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
- ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
+ ((1 + n) * sizeof(struct qat_alg_buf));
if (unlikely(!n))
return -EINVAL;
@@ -682,35 +664,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
if (unlikely(dma_mapping_error(dev, blp)))
goto err;
- for_each_sg(assoc, sg, assoc_n, i) {
- if (!sg->length)
- continue;
-
- if (!(assoclen > 0))
- break;
-
- bufl->bufers[bufs].addr =
- dma_map_single(dev, sg_virt(sg),
- min_t(int, assoclen, sg->length),
- DMA_BIDIRECTIONAL);
- bufl->bufers[bufs].len = min_t(int, assoclen, sg->length);
- if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
- goto err;
- bufs++;
- assoclen -= sg->length;
- }
-
- if (ivlen) {
- bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
- DMA_BIDIRECTIONAL);
- bufl->bufers[bufs].len = ivlen;
- if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
- goto err;
- bufs++;
- }
-
for_each_sg(sgl, sg, n, i) {
- int y = sg_nctr + bufs;
+ int y = sg_nctr;
if (!sg->length)
continue;
@@ -723,7 +678,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
goto err;
sg_nctr++;
}
- bufl->num_bufs = sg_nctr + bufs;
+ bufl->num_bufs = sg_nctr;
qat_req->buf.bl = bufl;
qat_req->buf.blp = blp;
qat_req->buf.sz = sz;
@@ -733,7 +688,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
n = sg_nents(sglout);
sz_out = sizeof(struct qat_alg_buf_list) +
- ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
+ ((1 + n) * sizeof(struct qat_alg_buf));
sg_nctr = 0;
buflout = kzalloc_node(sz_out, GFP_ATOMIC,
dev_to_node(&GET_DEV(inst->accel_dev)));
@@ -743,14 +698,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
if (unlikely(dma_mapping_error(dev, bloutp)))
goto err;
bufers = buflout->bufers;
- /* For out of place operation dma map only data and
- * reuse assoc mapping and iv */
- for (i = 0; i < bufs; i++) {
- bufers[i].len = bufl->bufers[i].len;
- bufers[i].addr = bufl->bufers[i].addr;
- }
for_each_sg(sglout, sg, n, i) {
- int y = sg_nctr + bufs;
+ int y = sg_nctr;
if (!sg->length)
continue;
@@ -763,7 +712,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
bufers[y].len = sg->length;
sg_nctr++;
}
- buflout->num_bufs = sg_nctr + bufs;
+ buflout->num_bufs = sg_nctr;
buflout->num_mapped_bufs = sg_nctr;
qat_req->buf.blout = buflout;
qat_req->buf.bloutp = bloutp;
@@ -777,7 +726,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
err:
dev_err(dev, "Failed to map buf for dma\n");
sg_nctr = 0;
- for (i = 0; i < n + bufs; i++)
+ for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
dma_unmap_single(dev, bufl->bufers[i].addr,
bufl->bufers[i].len,
@@ -788,7 +737,7 @@ err:
kfree(bufl);
if (sgl != sglout && buflout) {
n = sg_nents(sglout);
- for (i = bufs; i < n + bufs; i++)
+ for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, buflout->bufers[i].addr))
dma_unmap_single(dev, buflout->bufers[i].addr,
buflout->bufers[i].len,
@@ -848,12 +797,10 @@ static int qat_alg_aead_dec(struct aead_request *areq)
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
struct icp_qat_fw_la_bulk_req *msg;
- int digst_size = crypto_aead_crt(aead_tfm)->authsize;
+ int digst_size = crypto_aead_authsize(aead_tfm);
int ret, ctr = 0;
- ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen,
- areq->src, areq->dst, areq->iv,
- AES_BLOCK_SIZE, qat_req);
+ ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
if (unlikely(ret))
return ret;
@@ -867,12 +814,11 @@ static int qat_alg_aead_dec(struct aead_request *areq)
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
cipher_param->cipher_length = areq->cryptlen - digst_size;
- cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
+ cipher_param->cipher_offset = areq->assoclen;
memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
auth_param->auth_off = 0;
- auth_param->auth_len = areq->assoclen +
- cipher_param->cipher_length + AES_BLOCK_SIZE;
+ auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
do {
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
@@ -884,8 +830,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
return -EINPROGRESS;
}
-static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
- int enc_iv)
+static int qat_alg_aead_enc(struct aead_request *areq)
{
struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
@@ -894,11 +839,10 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
struct icp_qat_fw_la_bulk_req *msg;
+ uint8_t *iv = areq->iv;
int ret, ctr = 0;
- ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen,
- areq->src, areq->dst, iv, AES_BLOCK_SIZE,
- qat_req);
+ ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
if (unlikely(ret))
return ret;
@@ -913,16 +857,12 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
- if (enc_iv) {
- cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
- cipher_param->cipher_offset = areq->assoclen;
- } else {
- memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
- cipher_param->cipher_length = areq->cryptlen;
- cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
- }
+ memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
+ cipher_param->cipher_length = areq->cryptlen;
+ cipher_param->cipher_offset = areq->assoclen;
+
auth_param->auth_off = 0;
- auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
+ auth_param->auth_len = areq->assoclen + areq->cryptlen;
do {
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
@@ -935,25 +875,6 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
return -EINPROGRESS;
}
-static int qat_alg_aead_enc(struct aead_request *areq)
-{
- return qat_alg_aead_enc_internal(areq, areq->iv, 0);
-}
-
-static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req)
-{
- struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
- struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
- struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
- __be64 seq;
-
- memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
- seq = cpu_to_be64(req->seq);
- memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
- &seq, sizeof(uint64_t));
- return qat_alg_aead_enc_internal(&req->areq, req->giv, 1);
-}
-
static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
const uint8_t *key,
unsigned int keylen)
@@ -1025,8 +946,7 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
struct icp_qat_fw_la_bulk_req *msg;
int ret, ctr = 0;
- ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst,
- NULL, 0, qat_req);
+ ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
if (unlikely(ret))
return ret;
@@ -1063,8 +983,7 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
struct icp_qat_fw_la_bulk_req *msg;
int ret, ctr = 0;
- ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst,
- NULL, 0, qat_req);
+ ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
if (unlikely(ret))
return ret;
@@ -1091,47 +1010,43 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
return -EINPROGRESS;
}
-static int qat_alg_aead_init(struct crypto_tfm *tfm,
+static int qat_alg_aead_init(struct crypto_aead *tfm,
enum icp_qat_hw_auth_algo hash,
const char *hash_name)
{
- struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
if (IS_ERR(ctx->hash_tfm))
- return -EFAULT;
- spin_lock_init(&ctx->lock);
+ return PTR_ERR(ctx->hash_tfm);
ctx->qat_hash_alg = hash;
- crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
- sizeof(struct aead_request) +
- sizeof(struct qat_crypto_request));
- ctx->tfm = tfm;
+ crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
+ sizeof(struct qat_crypto_request));
return 0;
}
-static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
{
return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
}
-static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
{
return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
}
-static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
{
return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
}
-static void qat_alg_aead_exit(struct crypto_tfm *tfm)
+static void qat_alg_aead_exit(struct crypto_aead *tfm)
{
- struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev;
- if (!IS_ERR(ctx->hash_tfm))
- crypto_free_shash(ctx->hash_tfm);
+ crypto_free_shash(ctx->hash_tfm);
if (!inst)
return;
@@ -1188,73 +1103,61 @@ static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
qat_crypto_put_instance(inst);
}
-static struct crypto_alg qat_algs[] = { {
- .cra_name = "authenc(hmac(sha1),cbc(aes))",
- .cra_driver_name = "qat_aes_cbc_hmac_sha1",
- .cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_init = qat_alg_aead_sha1_init,
- .cra_exit = qat_alg_aead_exit,
- .cra_u = {
- .aead = {
- .setkey = qat_alg_aead_setkey,
- .decrypt = qat_alg_aead_dec,
- .encrypt = qat_alg_aead_enc,
- .givencrypt = qat_alg_aead_genivenc,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- },
+
+static struct aead_alg qat_aeads[] = { {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "qat_aes_cbc_hmac_sha1",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+ .cra_module = THIS_MODULE,
},
+ .init = qat_alg_aead_sha1_init,
+ .exit = qat_alg_aead_exit,
+ .setkey = qat_alg_aead_setkey,
+ .decrypt = qat_alg_aead_dec,
+ .encrypt = qat_alg_aead_enc,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
}, {
- .cra_name = "authenc(hmac(sha256),cbc(aes))",
- .cra_driver_name = "qat_aes_cbc_hmac_sha256",
- .cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_init = qat_alg_aead_sha256_init,
- .cra_exit = qat_alg_aead_exit,
- .cra_u = {
- .aead = {
- .setkey = qat_alg_aead_setkey,
- .decrypt = qat_alg_aead_dec,
- .encrypt = qat_alg_aead_enc,
- .givencrypt = qat_alg_aead_genivenc,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- },
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "qat_aes_cbc_hmac_sha256",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+ .cra_module = THIS_MODULE,
},
+ .init = qat_alg_aead_sha256_init,
+ .exit = qat_alg_aead_exit,
+ .setkey = qat_alg_aead_setkey,
+ .decrypt = qat_alg_aead_dec,
+ .encrypt = qat_alg_aead_enc,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
}, {
- .cra_name = "authenc(hmac(sha512),cbc(aes))",
- .cra_driver_name = "qat_aes_cbc_hmac_sha512",
- .cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_init = qat_alg_aead_sha512_init,
- .cra_exit = qat_alg_aead_exit,
- .cra_u = {
- .aead = {
- .setkey = qat_alg_aead_setkey,
- .decrypt = qat_alg_aead_dec,
- .encrypt = qat_alg_aead_enc,
- .givencrypt = qat_alg_aead_genivenc,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA512_DIGEST_SIZE,
- },
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "qat_aes_cbc_hmac_sha512",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+ .cra_module = THIS_MODULE,
},
-}, {
+ .init = qat_alg_aead_sha512_init,
+ .exit = qat_alg_aead_exit,
+ .setkey = qat_alg_aead_setkey,
+ .decrypt = qat_alg_aead_dec,
+ .encrypt = qat_alg_aead_enc,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+} };
+
+static struct crypto_alg qat_algs[] = { {
.cra_name = "cbc(aes)",
.cra_driver_name = "qat_aes_cbc",
.cra_priority = 4001,
@@ -1280,35 +1183,54 @@ static struct crypto_alg qat_algs[] = { {
int qat_algs_register(void)
{
- if (atomic_add_return(1, &active_dev) == 1) {
- int i;
+ int ret = 0, i;
- for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
- qat_algs[i].cra_flags =
- (qat_algs[i].cra_type == &crypto_aead_type) ?
- CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
- CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+ mutex_lock(&algs_lock);
+ if (++active_devs != 1)
+ goto unlock;
- return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
- }
- return 0;
+ for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
+ qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+
+ ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ if (ret)
+ goto unlock;
+
+ for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
+ qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
+
+ ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+ if (ret)
+ goto unreg_algs;
+
+unlock:
+ mutex_unlock(&algs_lock);
+ return ret;
+
+unreg_algs:
+ crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ goto unlock;
}
int qat_algs_unregister(void)
{
- if (atomic_sub_return(1, &active_dev) == 0)
- return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ mutex_lock(&algs_lock);
+ if (--active_devs != 0)
+ goto unlock;
+
+ crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+ crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+
+unlock:
+ mutex_unlock(&algs_lock);
return 0;
}
int qat_algs_init(void)
{
- atomic_set(&active_dev, 0);
- crypto_get_default_rng();
return 0;
}
void qat_algs_exit(void)
{
- crypto_put_default_rng();
}
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
new file mode 100644
index 000000000000..e87f51023ba4
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -0,0 +1,652 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <linux/module.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <linux/dma-mapping.h>
+#include <linux/fips.h>
+#include "qat_rsakey-asn1.h"
+#include "icp_qat_fw_pke.h"
+#include "adf_accel_devices.h"
+#include "adf_transport.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+struct qat_rsa_input_params {
+ union {
+ struct {
+ dma_addr_t m;
+ dma_addr_t e;
+ dma_addr_t n;
+ } enc;
+ struct {
+ dma_addr_t c;
+ dma_addr_t d;
+ dma_addr_t n;
+ } dec;
+ u64 in_tab[8];
+ };
+} __packed __aligned(64);
+
+struct qat_rsa_output_params {
+ union {
+ struct {
+ dma_addr_t c;
+ } enc;
+ struct {
+ dma_addr_t m;
+ } dec;
+ u64 out_tab[8];
+ };
+} __packed __aligned(64);
+
+struct qat_rsa_ctx {
+ char *n;
+ char *e;
+ char *d;
+ dma_addr_t dma_n;
+ dma_addr_t dma_e;
+ dma_addr_t dma_d;
+ unsigned int key_sz;
+ struct qat_crypto_instance *inst;
+} __packed __aligned(64);
+
+struct qat_rsa_request {
+ struct qat_rsa_input_params in;
+ struct qat_rsa_output_params out;
+ dma_addr_t phy_in;
+ dma_addr_t phy_out;
+ char *src_align;
+ struct icp_qat_fw_pke_request req;
+ struct qat_rsa_ctx *ctx;
+ int err;
+} __aligned(64);
+
+static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+{
+ struct akcipher_request *areq = (void *)(__force long)resp->opaque;
+ struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64);
+ struct device *dev = &GET_DEV(req->ctx->inst->accel_dev);
+ int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+ resp->pke_resp_hdr.comn_resp_flags);
+ char *ptr = areq->dst;
+
+ err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+
+ if (req->src_align)
+ dma_free_coherent(dev, req->ctx->key_sz, req->src_align,
+ req->in.enc.m);
+ else
+ dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz,
+ DMA_TO_DEVICE);
+
+ dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, req->phy_out,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+
+ areq->dst_len = req->ctx->key_sz;
+ /* Need to set the corect length of the output */
+ while (!(*ptr) && areq->dst_len) {
+ areq->dst_len--;
+ ptr++;
+ }
+
+ if (areq->dst_len != req->ctx->key_sz)
+ memmove(areq->dst, ptr, areq->dst_len);
+
+ akcipher_request_complete(areq, err);
+}
+
+void qat_alg_asym_callback(void *_resp)
+{
+ struct icp_qat_fw_pke_resp *resp = _resp;
+
+ qat_rsa_cb(resp);
+}
+
+#define PKE_RSA_EP_512 0x1c161b21
+#define PKE_RSA_EP_1024 0x35111bf7
+#define PKE_RSA_EP_1536 0x4d111cdc
+#define PKE_RSA_EP_2048 0x6e111dba
+#define PKE_RSA_EP_3072 0x7d111ea3
+#define PKE_RSA_EP_4096 0xa5101f7e
+
+static unsigned long qat_rsa_enc_fn_id(unsigned int len)
+{
+ unsigned int bitslen = len << 3;
+
+ switch (bitslen) {
+ case 512:
+ return PKE_RSA_EP_512;
+ case 1024:
+ return PKE_RSA_EP_1024;
+ case 1536:
+ return PKE_RSA_EP_1536;
+ case 2048:
+ return PKE_RSA_EP_2048;
+ case 3072:
+ return PKE_RSA_EP_3072;
+ case 4096:
+ return PKE_RSA_EP_4096;
+ default:
+ return 0;
+ };
+}
+
+#define PKE_RSA_DP1_512 0x1c161b3c
+#define PKE_RSA_DP1_1024 0x35111c12
+#define PKE_RSA_DP1_1536 0x4d111cf7
+#define PKE_RSA_DP1_2048 0x6e111dda
+#define PKE_RSA_DP1_3072 0x7d111ebe
+#define PKE_RSA_DP1_4096 0xa5101f98
+
+static unsigned long qat_rsa_dec_fn_id(unsigned int len)
+{
+ unsigned int bitslen = len << 3;
+
+ switch (bitslen) {
+ case 512:
+ return PKE_RSA_DP1_512;
+ case 1024:
+ return PKE_RSA_DP1_1024;
+ case 1536:
+ return PKE_RSA_DP1_1536;
+ case 2048:
+ return PKE_RSA_DP1_2048;
+ case 3072:
+ return PKE_RSA_DP1_3072;
+ case 4096:
+ return PKE_RSA_DP1_4096;
+ default:
+ return 0;
+ };
+}
+
+static int qat_rsa_enc(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ struct qat_rsa_request *qat_req =
+ PTR_ALIGN(akcipher_request_ctx(req), 64);
+ struct icp_qat_fw_pke_request *msg = &qat_req->req;
+ int ret, ctr = 0;
+
+ if (unlikely(!ctx->n || !ctx->e))
+ return -EINVAL;
+
+ if (req->dst_len < ctx->key_sz) {
+ req->dst_len = ctx->key_sz;
+ return -EOVERFLOW;
+ }
+ memset(msg, '\0', sizeof(*msg));
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+ ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
+ if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+ return -EINVAL;
+
+ qat_req->ctx = ctx;
+ msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+ msg->pke_hdr.comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+ QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+ qat_req->in.enc.e = ctx->dma_e;
+ qat_req->in.enc.n = ctx->dma_n;
+ ret = -ENOMEM;
+
+ /*
+ * src can be of any size in valid range, but HW expects it to be the
+ * same as modulo n so in case it is different we need to allocate a
+ * new buf and copy src data.
+ * In other case we just need to map the user provided buffer.
+ */
+ if (req->src_len < ctx->key_sz) {
+ int shift = ctx->key_sz - req->src_len;
+
+ qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
+ &qat_req->in.enc.m,
+ GFP_KERNEL);
+ if (unlikely(!qat_req->src_align))
+ return ret;
+
+ memcpy(qat_req->src_align + shift, req->src, req->src_len);
+ } else {
+ qat_req->src_align = NULL;
+ qat_req->in.enc.m = dma_map_single(dev, req->src, req->src_len,
+ DMA_TO_DEVICE);
+ }
+ qat_req->in.in_tab[3] = 0;
+ qat_req->out.enc.c = dma_map_single(dev, req->dst, req->dst_len,
+ DMA_FROM_DEVICE);
+ qat_req->out.out_tab[1] = 0;
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+
+ if (unlikely((!qat_req->src_align &&
+ dma_mapping_error(dev, qat_req->in.enc.m)) ||
+ dma_mapping_error(dev, qat_req->out.enc.c) ||
+ dma_mapping_error(dev, qat_req->phy_in) ||
+ dma_mapping_error(dev, qat_req->phy_out)))
+ goto unmap;
+
+ msg->pke_mid.src_data_addr = qat_req->phy_in;
+ msg->pke_mid.dest_data_addr = qat_req->phy_out;
+ msg->pke_mid.opaque = (uint64_t)(__force long)req;
+ msg->input_param_count = 3;
+ msg->output_param_count = 1;
+ do {
+ ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+ } while (ret == -EBUSY && ctr++ < 100);
+
+ if (!ret)
+ return -EINPROGRESS;
+unmap:
+ if (qat_req->src_align)
+ dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+ qat_req->in.enc.m);
+ else
+ if (!dma_mapping_error(dev, qat_req->in.enc.m))
+ dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
+ DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->out.enc.c))
+ dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz,
+ DMA_FROM_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->phy_in))
+ dma_unmap_single(dev, qat_req->phy_in,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+ return ret;
+}
+
+static int qat_rsa_dec(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ struct qat_rsa_request *qat_req =
+ PTR_ALIGN(akcipher_request_ctx(req), 64);
+ struct icp_qat_fw_pke_request *msg = &qat_req->req;
+ int ret, ctr = 0;
+
+ if (unlikely(!ctx->n || !ctx->d))
+ return -EINVAL;
+
+ if (req->dst_len < ctx->key_sz) {
+ req->dst_len = ctx->key_sz;
+ return -EOVERFLOW;
+ }
+ memset(msg, '\0', sizeof(*msg));
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+ ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz);
+ if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+ return -EINVAL;
+
+ qat_req->ctx = ctx;
+ msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+ msg->pke_hdr.comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+ QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+ qat_req->in.dec.d = ctx->dma_d;
+ qat_req->in.dec.n = ctx->dma_n;
+ ret = -ENOMEM;
+
+ /*
+ * src can be of any size in valid range, but HW expects it to be the
+ * same as modulo n so in case it is different we need to allocate a
+ * new buf and copy src data.
+ * In other case we just need to map the user provided buffer.
+ */
+ if (req->src_len < ctx->key_sz) {
+ int shift = ctx->key_sz - req->src_len;
+
+ qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
+ &qat_req->in.dec.c,
+ GFP_KERNEL);
+ if (unlikely(!qat_req->src_align))
+ return ret;
+
+ memcpy(qat_req->src_align + shift, req->src, req->src_len);
+ } else {
+ qat_req->src_align = NULL;
+ qat_req->in.dec.c = dma_map_single(dev, req->src, req->src_len,
+ DMA_TO_DEVICE);
+ }
+ qat_req->in.in_tab[3] = 0;
+ qat_req->out.dec.m = dma_map_single(dev, req->dst, req->dst_len,
+ DMA_FROM_DEVICE);
+ qat_req->out.out_tab[1] = 0;
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+
+ if (unlikely((!qat_req->src_align &&
+ dma_mapping_error(dev, qat_req->in.dec.c)) ||
+ dma_mapping_error(dev, qat_req->out.dec.m) ||
+ dma_mapping_error(dev, qat_req->phy_in) ||
+ dma_mapping_error(dev, qat_req->phy_out)))
+ goto unmap;
+
+ msg->pke_mid.src_data_addr = qat_req->phy_in;
+ msg->pke_mid.dest_data_addr = qat_req->phy_out;
+ msg->pke_mid.opaque = (uint64_t)(__force long)req;
+ msg->input_param_count = 3;
+ msg->output_param_count = 1;
+ do {
+ ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+ } while (ret == -EBUSY && ctr++ < 100);
+
+ if (!ret)
+ return -EINPROGRESS;
+unmap:
+ if (qat_req->src_align)
+ dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+ qat_req->in.dec.c);
+ else
+ if (!dma_mapping_error(dev, qat_req->in.dec.c))
+ dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
+ DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->out.dec.m))
+ dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz,
+ DMA_FROM_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->phy_in))
+ dma_unmap_single(dev, qat_req->phy_in,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+ return ret;
+}
+
+int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct qat_rsa_ctx *ctx = context;
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ const char *ptr = value;
+ int ret;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+
+ ctx->key_sz = vlen;
+ ret = -EINVAL;
+ /* In FIPS mode only allow key size 2K & 3K */
+ if (fips_enabled && (ctx->key_sz != 256 && ctx->key_sz != 384)) {
+ pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
+ goto err;
+ }
+ /* invalid key size provided */
+ if (!qat_rsa_enc_fn_id(ctx->key_sz))
+ goto err;
+
+ ret = -ENOMEM;
+ ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
+ if (!ctx->n)
+ goto err;
+
+ memcpy(ctx->n, ptr, ctx->key_sz);
+ return 0;
+err:
+ ctx->key_sz = 0;
+ ctx->n = NULL;
+ return ret;
+}
+
+int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct qat_rsa_ctx *ctx = context;
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ const char *ptr = value;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+
+ if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
+ ctx->e = NULL;
+ return -EINVAL;
+ }
+
+ ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
+ if (!ctx->e) {
+ ctx->e = NULL;
+ return -ENOMEM;
+ }
+ memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
+ return 0;
+}
+
+int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct qat_rsa_ctx *ctx = context;
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ const char *ptr = value;
+ int ret;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+
+ ret = -EINVAL;
+ if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
+ goto err;
+
+ /* In FIPS mode only allow key size 2K & 3K */
+ if (fips_enabled && (vlen != 256 && vlen != 384)) {
+ pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
+ goto err;
+ }
+
+ ret = -ENOMEM;
+ ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
+ if (!ctx->n)
+ goto err;
+
+ memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
+ return 0;
+err:
+ ctx->d = NULL;
+ return ret;
+}
+
+static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+ int ret;
+
+ /* Free the old key if any */
+ if (ctx->n)
+ dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+ if (ctx->e)
+ dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+ if (ctx->d) {
+ memset(ctx->d, '\0', ctx->key_sz);
+ dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+ }
+
+ ctx->n = NULL;
+ ctx->e = NULL;
+ ctx->d = NULL;
+ ret = asn1_ber_decoder(&qat_rsakey_decoder, ctx, key, keylen);
+ if (ret < 0)
+ goto free;
+
+ if (!ctx->n || !ctx->e) {
+ /* invalid key provided */
+ ret = -EINVAL;
+ goto free;
+ }
+
+ return 0;
+free:
+ if (ctx->d) {
+ memset(ctx->d, '\0', ctx->key_sz);
+ dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+ ctx->d = NULL;
+ }
+ if (ctx->e) {
+ dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+ ctx->e = NULL;
+ }
+ if (ctx->n) {
+ dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+ ctx->n = NULL;
+ ctx->key_sz = 0;
+ }
+ return ret;
+}
+
+static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst =
+ qat_crypto_get_instance_node(get_current_node());
+
+ if (!inst)
+ return -EINVAL;
+
+ ctx->key_sz = 0;
+ ctx->inst = inst;
+ return 0;
+}
+
+static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+ if (ctx->n)
+ dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+ if (ctx->e)
+ dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+ if (ctx->d) {
+ memset(ctx->d, '\0', ctx->key_sz);
+ dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+ }
+ qat_crypto_put_instance(ctx->inst);
+ ctx->n = NULL;
+ ctx->d = NULL;
+ ctx->d = NULL;
+}
+
+static struct akcipher_alg rsa = {
+ .encrypt = qat_rsa_enc,
+ .decrypt = qat_rsa_dec,
+ .sign = qat_rsa_dec,
+ .verify = qat_rsa_enc,
+ .setkey = qat_rsa_setkey,
+ .init = qat_rsa_init_tfm,
+ .exit = qat_rsa_exit_tfm,
+ .reqsize = sizeof(struct qat_rsa_request) + 64,
+ .base = {
+ .cra_name = "rsa",
+ .cra_driver_name = "qat-rsa",
+ .cra_priority = 1000,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct qat_rsa_ctx),
+ },
+};
+
+int qat_asym_algs_register(void)
+{
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++active_devs == 1) {
+ rsa.base.cra_flags = 0;
+ ret = crypto_register_akcipher(&rsa);
+ }
+ mutex_unlock(&algs_lock);
+ return ret;
+}
+
+void qat_asym_algs_unregister(void)
+{
+ mutex_lock(&algs_lock);
+ if (--active_devs == 0)
+ crypto_unregister_akcipher(&rsa);
+ mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index 3bd705ca5973..07c2f9f9d1fc 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -88,12 +88,6 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
if (inst->pke_rx)
adf_remove_ring(inst->pke_rx);
- if (inst->rnd_tx)
- adf_remove_ring(inst->rnd_tx);
-
- if (inst->rnd_rx)
- adf_remove_ring(inst->rnd_rx);
-
list_del(list_ptr);
kfree(inst);
}
@@ -109,9 +103,11 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
list_for_each(itr, adf_devmgr_get_head()) {
accel_dev = list_entry(itr, struct adf_accel_dev, list);
+
if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
dev_to_node(&GET_DEV(accel_dev)) < 0) &&
- adf_dev_started(accel_dev))
+ adf_dev_started(accel_dev) &&
+ !list_empty(&accel_dev->crypto_list))
break;
accel_dev = NULL;
}
@@ -158,7 +154,6 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
INIT_LIST_HEAD(&accel_dev->crypto_list);
strlcpy(key, ADF_NUM_CY, sizeof(key));
-
if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
return -EFAULT;
@@ -187,7 +182,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
if (kstrtoul(val, 10, &num_msg_sym))
goto err;
+
num_msg_sym = num_msg_sym >> 1;
+
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
goto err;
@@ -202,11 +199,6 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
msg_size, key, NULL, 0, &inst->sym_tx))
goto err;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
- if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
- msg_size, key, NULL, 0, &inst->rnd_tx))
- goto err;
-
msg_size = msg_size >> 1;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
@@ -220,15 +212,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
&inst->sym_rx))
goto err;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
- if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
- msg_size, key, qat_alg_callback, 0,
- &inst->rnd_rx))
- goto err;
-
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
- msg_size, key, qat_alg_callback, 0,
+ msg_size, key, qat_alg_asym_callback, 0,
&inst->pke_rx))
goto err;
}
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
index d503007b49e6..dc0273fe3620 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -57,8 +57,6 @@ struct qat_crypto_instance {
struct adf_etr_ring_data *sym_rx;
struct adf_etr_ring_data *pke_tx;
struct adf_etr_ring_data *pke_rx;
- struct adf_etr_ring_data *rnd_tx;
- struct adf_etr_ring_data *rnd_rx;
struct adf_accel_dev *accel_dev;
struct list_head list;
unsigned long state;
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 274ff7e9de6e..8e711d1c3084 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -671,7 +671,6 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
#define ICP_DH895XCC_CAP_OFFSET (ICP_DH895XCC_AE_OFFSET + 0x10000)
#define LOCAL_TO_XFER_REG_OFFSET 0x800
#define ICP_DH895XCC_EP_OFFSET 0x3a000
-#define ICP_DH895XCC_PMISC_BAR 1
int qat_hal_init(struct adf_accel_dev *accel_dev)
{
unsigned char ae;
@@ -679,21 +678,24 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
struct icp_qat_fw_loader_handle *handle;
struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct adf_bar *bar =
+ struct adf_bar *misc_bar =
&pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)];
+ struct adf_bar *sram_bar =
+ &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
- handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr +
+ handle->hal_cap_g_ctl_csr_addr_v = misc_bar->virt_addr +
ICP_DH895XCC_CAP_OFFSET;
- handle->hal_cap_ae_xfer_csr_addr_v = bar->virt_addr +
+ handle->hal_cap_ae_xfer_csr_addr_v = misc_bar->virt_addr +
ICP_DH895XCC_AE_OFFSET;
- handle->hal_ep_csr_addr_v = bar->virt_addr + ICP_DH895XCC_EP_OFFSET;
+ handle->hal_ep_csr_addr_v = misc_bar->virt_addr +
+ ICP_DH895XCC_EP_OFFSET;
handle->hal_cap_ae_local_csr_addr_v =
handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET;
-
+ handle->hal_sram_addr_v = sram_bar->virt_addr;
handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
if (!handle->hal_handle)
goto out_hal_handle;
diff --git a/drivers/crypto/qat/qat_common/qat_rsakey.asn1 b/drivers/crypto/qat/qat_common/qat_rsakey.asn1
new file mode 100644
index 000000000000..97b0e02b600a
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_rsakey.asn1
@@ -0,0 +1,5 @@
+RsaKey ::= SEQUENCE {
+ n INTEGER ({ qat_rsa_get_n }),
+ e INTEGER ({ qat_rsa_get_e }),
+ d INTEGER ({ qat_rsa_get_d })
+}
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 1e27f9f7fddf..c48f181e8941 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -359,28 +359,7 @@ static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_uof_initmem *init_mem)
{
- unsigned int i;
- struct icp_qat_uof_memvar_attr *mem_val_attr;
-
- mem_val_attr =
- (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
- sizeof(struct icp_qat_uof_initmem));
-
switch (init_mem->region) {
- case ICP_QAT_UOF_SRAM_REGION:
- if ((init_mem->addr + init_mem->num_in_bytes) >
- ICP_DH895XCC_PESRAM_BAR_SIZE) {
- pr_err("QAT: initmem on SRAM is out of range");
- return -EINVAL;
- }
- for (i = 0; i < init_mem->val_attr_num; i++) {
- qat_uclo_wr_sram_by_words(handle,
- init_mem->addr +
- mem_val_attr->offset_in_byte,
- &mem_val_attr->value, 4);
- mem_val_attr++;
- }
- break;
case ICP_QAT_UOF_LMEM_REGION:
if (qat_uclo_init_lmem_seg(handle, init_mem))
return -EINVAL;
@@ -990,6 +969,12 @@ out_err:
return -EFAULT;
}
+void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
+ void *addr_ptr, int mem_size)
+{
+ qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, ALIGN(mem_size, 4));
+}
+
int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size)
{
diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile
index 25171c557043..8c79c543740f 100644
--- a/drivers/crypto/qat/qat_dh895xcc/Makefile
+++ b/drivers/crypto/qat/qat_dh895xcc/Makefile
@@ -2,7 +2,4 @@ ccflags-y := -I$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
qat_dh895xcc-objs := adf_drv.o \
adf_isr.o \
- adf_dh895xcc_hw_data.o \
- adf_hw_arbiter.o \
- qat_admin.o \
- adf_admin.o
+ adf_dh895xcc_hw_data.o
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
deleted file mode 100644
index e4666065c399..000000000000
--- a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <adf_accel_devices.h>
-#include "adf_drv.h"
-#include "adf_dh895xcc_hw_data.h"
-
-#define ADF_ADMINMSG_LEN 32
-
-struct adf_admin_comms {
- dma_addr_t phy_addr;
- void *virt_addr;
- void __iomem *mailbox_addr;
- struct mutex lock; /* protects adf_admin_comms struct */
-};
-
-int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
- uint32_t ae, void *in, void *out)
-{
- struct adf_admin_comms *admin = accel_dev->admin;
- int offset = ae * ADF_ADMINMSG_LEN * 2;
- void __iomem *mailbox = admin->mailbox_addr;
- int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
- int times, received;
-
- mutex_lock(&admin->lock);
-
- if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
- mutex_unlock(&admin->lock);
- return -EAGAIN;
- }
-
- memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
- ADF_CSR_WR(mailbox, mb_offset, 1);
- received = 0;
- for (times = 0; times < 50; times++) {
- msleep(20);
- if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
- received = 1;
- break;
- }
- }
- if (received)
- memcpy(out, admin->virt_addr + offset +
- ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
- else
- dev_err(&GET_DEV(accel_dev),
- "Failed to send admin msg to accelerator\n");
-
- mutex_unlock(&admin->lock);
- return received ? 0 : -EFAULT;
-}
-
-int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
-{
- struct adf_admin_comms *admin;
- struct adf_bar *pmisc = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
- void __iomem *csr = pmisc->virt_addr;
- void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
- uint64_t reg_val;
-
- admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
- dev_to_node(&GET_DEV(accel_dev)));
- if (!admin)
- return -ENOMEM;
- admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
- &admin->phy_addr, GFP_KERNEL);
- if (!admin->virt_addr) {
- dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
- kfree(admin);
- return -ENOMEM;
- }
- reg_val = (uint64_t)admin->phy_addr;
- ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
- ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
- mutex_init(&admin->lock);
- admin->mailbox_addr = mailbox;
- accel_dev->admin = admin;
- return 0;
-}
-
-void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
-{
- struct adf_admin_comms *admin = accel_dev->admin;
-
- if (!admin)
- return;
-
- if (admin->virt_addr)
- dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
- admin->virt_addr, admin->phy_addr);
-
- mutex_destroy(&admin->lock);
- kfree(admin);
- accel_dev->admin = NULL;
-}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index b1386922d7a2..ff54257eced4 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -45,8 +45,9 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <adf_accel_devices.h>
+#include <adf_pf2vf_msg.h>
+#include <adf_common_drv.h>
#include "adf_dh895xcc_hw_data.h"
-#include "adf_common_drv.h"
#include "adf_drv.h"
/* Worker thread to service arbiter mappings based on dev SKUs */
@@ -117,6 +118,11 @@ static uint32_t get_etr_bar_id(struct adf_hw_device_data *self)
return ADF_DH895XCC_ETR_BAR;
}
+static uint32_t get_sram_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCC_SRAM_BAR;
+}
+
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
{
int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
@@ -156,6 +162,16 @@ void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
}
}
+static uint32_t get_pf2vf_offset(uint32_t i)
+{
+ return ADF_DH895XCC_PF2VF_OFFSET(i);
+}
+
+static uint32_t get_vintmsk_offset(uint32_t i)
+{
+ return ADF_DH895XCC_VINTMSK_OFFSET(i);
+}
+
static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
@@ -192,18 +208,23 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
/* Enable bundle and misc interrupts */
ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
- ADF_DH895XCC_SMIA0_MASK);
+ accel_dev->pf.vf_info ? 0 :
+ GENMASK_ULL(GET_MAX_BANKS(accel_dev) - 1, 0));
ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
ADF_DH895XCC_SMIA1_MASK);
}
+static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &dh895xcc_class;
hw_data->instance_id = dh895xcc_class.instances++;
hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
- hw_data->pci_dev_id = ADF_DH895XCC_PCI_DEVICE_ID;
hw_data->num_logical_accel = 1;
hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET;
@@ -211,21 +232,28 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
hw_data->enable_error_correction = adf_enable_error_correction;
- hw_data->hw_arb_ring_enable = adf_update_ring_arb_enable;
- hw_data->hw_arb_ring_disable = adf_update_ring_arb_enable;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;
hw_data->get_num_aes = get_num_aes;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
+ hw_data->get_pf2vf_offset = get_pf2vf_offset;
+ hw_data->get_vintmsk_offset = get_vintmsk_offset;
+ hw_data->get_sram_bar_id = get_sram_bar_id;
hw_data->get_sku = get_sku;
hw_data->fw_name = ADF_DH895XCC_FW;
+ hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
+ hw_data->disable_iov = adf_disable_sriov;
+ hw_data->send_admin_init = adf_send_admin_init;
hw_data->init_arb = adf_init_arb;
hw_data->exit_arb = adf_exit_arb;
+ hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
+ hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
+ hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
}
void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index 25269a9f24a2..88dffb297346 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -48,6 +48,7 @@
#define ADF_DH895x_HW_DATA_H_
/* PCIe configuration space */
+#define ADF_DH895XCC_SRAM_BAR 0
#define ADF_DH895XCC_PMISC_BAR 1
#define ADF_DH895XCC_ETR_BAR 2
#define ADF_DH895XCC_RX_RINGS_OFFSET 8
@@ -79,10 +80,11 @@
#define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10)
#define ADF_DH895XCC_ERRSSMSH_EN BIT(3)
-/* Admin Messages Registers */
-#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
-#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
-#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
-#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
+#define ADF_DH895XCC_ERRSOU3 (0x3A000 + 0x00C)
+#define ADF_DH895XCC_ERRSOU5 (0x3A000 + 0x0D8)
+#define ADF_DH895XCC_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
+#define ADF_DH895XCC_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04))
+/* FW names */
#define ADF_DH895XCC_FW "qat_895xcc.bin"
+#define ADF_DH895XCC_MMP "qat_mmp.bin"
#endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 1bde45b7a3c5..f8dd14f232c8 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -82,16 +82,21 @@ static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = adf_driver_name,
.probe = adf_probe,
- .remove = adf_remove
+ .remove = adf_remove,
+ .sriov_configure = adf_sriov_configure,
};
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+ pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+ pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
int i;
- adf_dev_shutdown(accel_dev);
-
for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
@@ -100,7 +105,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
}
if (accel_dev->hw_device) {
- switch (accel_dev->hw_device->pci_dev_id) {
+ switch (accel_pci_dev->pci_dev->device) {
case ADF_DH895XCC_PCI_DEVICE_ID:
adf_clean_hw_data_dh895xcc(accel_dev->hw_device);
break;
@@ -108,13 +113,11 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
break;
}
kfree(accel_dev->hw_device);
+ accel_dev->hw_device = NULL;
}
adf_cfg_dev_remove(accel_dev);
debugfs_remove(accel_dev->debugfs_dir);
- adf_devmgr_rm_dev(accel_dev);
- pci_release_regions(accel_pci_dev->pci_dev);
- pci_disable_device(accel_pci_dev->pci_dev);
- kfree(accel_dev);
+ adf_devmgr_rm_dev(accel_dev, NULL);
}
static int adf_dev_configure(struct adf_accel_dev *accel_dev)
@@ -167,12 +170,6 @@ static int adf_dev_configure(struct adf_accel_dev *accel_dev)
key, (void *)&val, ADF_DEC))
goto err;
- val = 4;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
- if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, (void *)&val, ADF_DEC))
- goto err;
-
val = 8;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
@@ -185,12 +182,6 @@ static int adf_dev_configure(struct adf_accel_dev *accel_dev)
key, (void *)&val, ADF_DEC))
goto err;
- val = 12;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
- if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, (void *)&val, ADF_DEC))
- goto err;
-
val = ADF_COALESCING_DEF_TIME;
snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
@@ -217,7 +208,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret;
+ int ret, bar_mask;
switch (ent->device) {
case ADF_DH895XCC_PCI_DEVICE_ID:
@@ -241,10 +232,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
INIT_LIST_HEAD(&accel_dev->crypto_list);
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ accel_pci_dev->pci_dev = pdev;
/* Add accel device to accel table.
* This should be called before adf_cleanup_accel is called */
- if (adf_devmgr_add_dev(accel_dev)) {
+ if (adf_devmgr_add_dev(accel_dev, NULL)) {
dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
kfree(accel_dev);
return -EFAULT;
@@ -267,7 +260,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
default:
return -ENODEV;
}
- accel_pci_dev = &accel_dev->accel_pci_dev;
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET,
&hw_data->fuses);
@@ -276,7 +268,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
accel_pci_dev->sku = hw_data->get_sku(hw_data);
- accel_pci_dev->pci_dev = pdev;
/* If the device has no acceleration engines then ignore it. */
if (!hw_data->accel_mask || !hw_data->ae_mask ||
((~hw_data->ae_mask) & 0x01)) {
@@ -286,11 +277,14 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX,
- hw_data->dev_class->name, hw_data->instance_id);
+ snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d",
+ ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
if (!accel_dev->debugfs_dir) {
- dev_err(&pdev->dev, "Could not create debugfs dir\n");
+ dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
ret = -EINVAL;
goto out_err;
}
@@ -313,7 +307,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
dev_err(&pdev->dev, "No usable DMA configuration\n");
ret = -EFAULT;
- goto out_err;
+ goto out_err_disable;
} else {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
}
@@ -324,7 +318,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_request_regions(pdev, adf_driver_name)) {
ret = -EFAULT;
- goto out_err;
+ goto out_err_disable;
}
/* Read accelerator capabilities mask */
@@ -332,19 +326,21 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
&hw_data->accel_capabilities_mask);
/* Find and map all the device's BARS */
- for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
- struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+ i = 0;
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
+ ADF_PCI_MAX_BARS * 2) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
- bar_nr = i * 2;
bar->base_addr = pci_resource_start(pdev, bar_nr);
if (!bar->base_addr)
break;
bar->size = pci_resource_len(pdev, bar_nr);
bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
if (!bar->virt_addr) {
- dev_err(&pdev->dev, "Failed to map BAR %d\n", i);
+ dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
ret = -EFAULT;
- goto out_err;
+ goto out_err_free_reg;
}
}
pci_set_master(pdev);
@@ -352,32 +348,40 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (adf_enable_aer(accel_dev, &adf_driver)) {
dev_err(&pdev->dev, "Failed to enable aer\n");
ret = -EFAULT;
- goto out_err;
+ goto out_err_free_reg;
}
if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state\n");
ret = -ENOMEM;
- goto out_err;
+ goto out_err_free_reg;
}
ret = adf_dev_configure(accel_dev);
if (ret)
- goto out_err;
+ goto out_err_free_reg;
ret = adf_dev_init(accel_dev);
if (ret)
- goto out_err;
+ goto out_err_dev_shutdown;
ret = adf_dev_start(accel_dev);
- if (ret) {
- adf_dev_stop(accel_dev);
- goto out_err;
- }
+ if (ret)
+ goto out_err_dev_stop;
- return 0;
+ return ret;
+
+out_err_dev_stop:
+ adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+ adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+ pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+ pci_disable_device(accel_pci_dev->pci_dev);
out_err:
adf_cleanup_accel(accel_dev);
+ kfree(accel_dev);
return ret;
}
@@ -391,15 +395,17 @@ static void adf_remove(struct pci_dev *pdev)
}
if (adf_dev_stop(accel_dev))
dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
+
+ adf_dev_shutdown(accel_dev);
adf_disable_aer(accel_dev);
adf_cleanup_accel(accel_dev);
+ adf_cleanup_pci_dev(accel_dev);
+ kfree(accel_dev);
}
static int __init adfdrv_init(void)
{
request_module("intel_qat");
- if (qat_admin_register())
- return -EFAULT;
if (pci_register_driver(&adf_driver)) {
pr_err("QAT: Driver initialization failed\n");
@@ -411,7 +417,6 @@ static int __init adfdrv_init(void)
static void __exit adfdrv_release(void)
{
pci_unregister_driver(&adf_driver);
- qat_admin_unregister();
}
module_init(adfdrv_init);
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h
index a2fbb6ce75cd..85ff245bd1d8 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h
@@ -53,15 +53,6 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
-void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring);
void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
uint32_t const **arb_map_config);
-int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
-void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
-int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
- uint32_t ae, void *in, void *out);
-int qat_admin_register(void);
-int qat_admin_unregister(void);
-int adf_init_arb(struct adf_accel_dev *accel_dev);
-void adf_exit_arb(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
index 0d03c109c2d3..5570f78795c1 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
@@ -59,21 +59,30 @@
#include <adf_transport_access_macros.h>
#include <adf_transport_internal.h>
#include "adf_drv.h"
+#include "adf_dh895xcc_hw_data.h"
static int adf_enable_msix(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- uint32_t msix_num_entries = hw_data->num_banks + 1;
- int i;
-
- for (i = 0; i < msix_num_entries; i++)
- pci_dev_info->msix_entries.entries[i].entry = i;
+ u32 msix_num_entries = 1;
+
+ /* If SR-IOV is disabled, add entries for each bank */
+ if (!accel_dev->pf.vf_info) {
+ int i;
+
+ msix_num_entries += hw_data->num_banks;
+ for (i = 0; i < msix_num_entries; i++)
+ pci_dev_info->msix_entries.entries[i].entry = i;
+ } else {
+ pci_dev_info->msix_entries.entries[0].entry =
+ hw_data->num_banks;
+ }
if (pci_enable_msix_exact(pci_dev_info->pci_dev,
pci_dev_info->msix_entries.entries,
msix_num_entries)) {
- dev_err(&GET_DEV(accel_dev), "Failed to enable MSIX IRQ\n");
+ dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n");
return -EFAULT;
}
return 0;
@@ -97,9 +106,58 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
{
struct adf_accel_dev *accel_dev = dev_ptr;
- dev_info(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
- accel_dev->accel_id);
- return IRQ_HANDLED;
+#ifdef CONFIG_PCI_IOV
+ /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
+ if (accel_dev->pf.vf_info) {
+ void __iomem *pmisc_bar_addr =
+ (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
+ u32 vf_mask;
+
+ /* Get the interrupt sources triggered by VFs */
+ vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU5) &
+ 0x0000FFFF) << 16) |
+ ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU3) &
+ 0x01FFFE00) >> 9);
+
+ if (vf_mask) {
+ struct adf_accel_vf_info *vf_info;
+ bool irq_handled = false;
+ int i;
+
+ /* Disable VF2PF interrupts for VFs with pending ints */
+ adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+
+ /*
+ * Schedule tasklets to handle VF2PF interrupt BHs
+ * unless the VF is malicious and is attempting to
+ * flood the host OS with VF2PF interrupts.
+ */
+ for_each_set_bit(i, (const unsigned long *)&vf_mask,
+ (sizeof(vf_mask) * BITS_PER_BYTE)) {
+ vf_info = accel_dev->pf.vf_info + i;
+
+ if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
+ dev_info(&GET_DEV(accel_dev),
+ "Too many ints from VF%d\n",
+ vf_info->vf_nr + 1);
+ continue;
+ }
+
+ /* Tasklet will re-enable ints from this VF */
+ tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet);
+ irq_handled = true;
+ }
+
+ if (irq_handled)
+ return IRQ_HANDLED;
+ }
+ }
+#endif /* CONFIG_PCI_IOV */
+
+ dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
+ accel_dev->accel_id);
+
+ return IRQ_NONE;
}
static int adf_request_irqs(struct adf_accel_dev *accel_dev)
@@ -108,28 +166,32 @@ static int adf_request_irqs(struct adf_accel_dev *accel_dev)
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
struct adf_etr_data *etr_data = accel_dev->transport;
- int ret, i;
+ int ret, i = 0;
char *name;
- /* Request msix irq for all banks */
- for (i = 0; i < hw_data->num_banks; i++) {
- struct adf_etr_bank_data *bank = &etr_data->banks[i];
- unsigned int cpu, cpus = num_online_cpus();
-
- name = *(pci_dev_info->msix_entries.names + i);
- snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
- "qat%d-bundle%d", accel_dev->accel_id, i);
- ret = request_irq(msixe[i].vector,
- adf_msix_isr_bundle, 0, name, bank);
- if (ret) {
- dev_err(&GET_DEV(accel_dev),
- "failed to enable irq %d for %s\n",
- msixe[i].vector, name);
- return ret;
+ /* Request msix irq for all banks unless SR-IOV enabled */
+ if (!accel_dev->pf.vf_info) {
+ for (i = 0; i < hw_data->num_banks; i++) {
+ struct adf_etr_bank_data *bank = &etr_data->banks[i];
+ unsigned int cpu, cpus = num_online_cpus();
+
+ name = *(pci_dev_info->msix_entries.names + i);
+ snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+ "qat%d-bundle%d", accel_dev->accel_id, i);
+ ret = request_irq(msixe[i].vector,
+ adf_msix_isr_bundle, 0, name, bank);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "failed to enable irq %d for %s\n",
+ msixe[i].vector, name);
+ return ret;
+ }
+
+ cpu = ((accel_dev->accel_id * hw_data->num_banks) +
+ i) % cpus;
+ irq_set_affinity_hint(msixe[i].vector,
+ get_cpu_mask(cpu));
}
-
- cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus;
- irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu));
}
/* Request msix irq for AE */
@@ -152,11 +214,13 @@ static void adf_free_irqs(struct adf_accel_dev *accel_dev)
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
struct adf_etr_data *etr_data = accel_dev->transport;
- int i;
+ int i = 0;
- for (i = 0; i < hw_data->num_banks; i++) {
- irq_set_affinity_hint(msixe[i].vector, NULL);
- free_irq(msixe[i].vector, &etr_data->banks[i]);
+ if (pci_dev_info->msix_entries.num_entries > 1) {
+ for (i = 0; i < hw_data->num_banks; i++) {
+ irq_set_affinity_hint(msixe[i].vector, NULL);
+ free_irq(msixe[i].vector, &etr_data->banks[i]);
+ }
}
irq_set_affinity_hint(msixe[i].vector, NULL);
free_irq(msixe[i].vector, accel_dev);
@@ -168,7 +232,11 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
char **names;
struct msix_entry *entries;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- uint32_t msix_num_entries = hw_data->num_banks + 1;
+ u32 msix_num_entries = 1;
+
+ /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
+ if (!accel_dev->pf.vf_info)
+ msix_num_entries += hw_data->num_banks;
entries = kzalloc_node(msix_num_entries * sizeof(*entries),
GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
@@ -185,6 +253,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
if (!(*(names + i)))
goto err;
}
+ accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
accel_dev->accel_pci_dev.msix_entries.entries = entries;
accel_dev->accel_pci_dev.msix_entries.names = names;
return 0;
@@ -198,13 +267,11 @@ err:
static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
{
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- uint32_t msix_num_entries = hw_data->num_banks + 1;
char **names = accel_dev->accel_pci_dev.msix_entries.names;
int i;
kfree(accel_dev->accel_pci_dev.msix_entries.entries);
- for (i = 0; i < msix_num_entries; i++)
+ for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++)
kfree(*(names + i));
kfree(names);
}
diff --git a/drivers/crypto/qat/qat_dh895xccvf/Makefile b/drivers/crypto/qat/qat_dh895xccvf/Makefile
new file mode 100644
index 000000000000..85399fcbbad4
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xccvf/Makefile
@@ -0,0 +1,5 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
+qat_dh895xccvf-objs := adf_drv.o \
+ adf_isr.o \
+ adf_dh895xccvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
new file mode 100644
index 000000000000..a9a27eff41fb
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -0,0 +1,172 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2015 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2015 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_pf2vf_msg.h>
+#include <adf_common_drv.h>
+#include "adf_dh895xccvf_hw_data.h"
+#include "adf_drv.h"
+
+static struct adf_hw_device_class dh895xcciov_class = {
+ .name = ADF_DH895XCCVF_DEVICE_NAME,
+ .type = DEV_DH895XCCVF,
+ .instances = 0
+};
+
+static u32 get_accel_mask(u32 fuse)
+{
+ return ADF_DH895XCCIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(u32 fuse)
+{
+ return ADF_DH895XCCIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCCIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCCIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCCIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCCIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+ return DEV_SKU_VF;
+}
+
+static u32 get_pf2vf_offset(u32 i)
+{
+ return ADF_DH895XCCIOV_PF2VF_OFFSET;
+}
+
+static u32 get_vintmsk_offset(u32 i)
+{
+ return ADF_DH895XCCIOV_VINTMSK_OFFSET;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+{
+ u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+ (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
+
+ if (adf_iov_putmsg(accel_dev, msg, 0)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Init event to PF\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+{
+ u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+ (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
+
+ if (adf_iov_putmsg(accel_dev, msg, 0))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Shutdown event to PF\n");
+}
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class = &dh895xcciov_class;
+ hw_data->instance_id = dh895xcciov_class.instances++;
+ hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS;
+ hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS;
+ hw_data->num_logical_accel = 1;
+ hw_data->num_engines = ADF_DH895XCCIOV_MAX_ACCELENGINES;
+ hw_data->tx_rx_gap = ADF_DH895XCCIOV_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_DH895XCCIOV_TX_RINGS_MASK;
+ hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+ hw_data->free_irq = adf_vf_isr_resource_free;
+ hw_data->enable_error_correction = adf_vf_void_noop;
+ hw_data->init_admin_comms = adf_vf_int_noop;
+ hw_data->exit_admin_comms = adf_vf_void_noop;
+ hw_data->send_admin_init = adf_vf2pf_init;
+ hw_data->init_arb = adf_vf_int_noop;
+ hw_data->exit_arb = adf_vf_void_noop;
+ hw_data->disable_iov = adf_vf2pf_shutdown;
+ hw_data->get_accel_mask = get_accel_mask;
+ hw_data->get_ae_mask = get_ae_mask;
+ hw_data->get_num_accels = get_num_accels;
+ hw_data->get_num_aes = get_num_aes;
+ hw_data->get_etr_bar_id = get_etr_bar_id;
+ hw_data->get_misc_bar_id = get_misc_bar_id;
+ hw_data->get_pf2vf_offset = get_pf2vf_offset;
+ hw_data->get_vintmsk_offset = get_vintmsk_offset;
+ hw_data->get_sku = get_sku;
+ hw_data->enable_ints = adf_vf_void_noop;
+ hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
+ hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+}
+
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
new file mode 100644
index 000000000000..8f6babfef629
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
@@ -0,0 +1,68 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2015 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2015 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DH895XVF_HW_DATA_H_
+#define ADF_DH895XVF_HW_DATA_H_
+
+#define ADF_DH895XCCIOV_PMISC_BAR 1
+#define ADF_DH895XCCIOV_ACCELERATORS_MASK 0x1
+#define ADF_DH895XCCIOV_ACCELENGINES_MASK 0x1
+#define ADF_DH895XCCIOV_MAX_ACCELERATORS 1
+#define ADF_DH895XCCIOV_MAX_ACCELENGINES 1
+#define ADF_DH895XCCIOV_RX_RINGS_OFFSET 8
+#define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF
+#define ADF_DH895XCCIOV_ETR_BAR 0
+#define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
+
+#define ADF_DH895XCCIOV_PF2VF_OFFSET 0x200
+#define ADF_DH895XCC_PF2VF_PF2VFINT BIT(0)
+
+#define ADF_DH895XCCIOV_VINTSOU_OFFSET 0x204
+#define ADF_DH895XCC_VINTSOU_BUN BIT(0)
+#define ADF_DH895XCC_VINTSOU_PF2VF BIT(1)
+
+#define ADF_DH895XCCIOV_VINTMSK_OFFSET 0x208
+#endif
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
new file mode 100644
index 000000000000..789426f21882
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -0,0 +1,393 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_transport_access_macros.h>
+#include "adf_dh895xccvf_hw_data.h"
+#include "adf_drv.h"
+
+static const char adf_driver_name[] = ADF_DH895XCCVF_DEVICE_NAME;
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ ADF_SYSTEM_DEVICE(ADF_DH895XCCIOV_PCI_DEVICE_ID),
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = adf_driver_name,
+ .probe = adf_probe,
+ .remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+ pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+ pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+ struct adf_accel_dev *pf;
+ int i;
+
+ for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+ if (bar->virt_addr)
+ pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+ }
+
+ if (accel_dev->hw_device) {
+ switch (accel_pci_dev->pci_dev->device) {
+ case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+ adf_clean_hw_data_dh895xcciov(accel_dev->hw_device);
+ break;
+ default:
+ break;
+ }
+ kfree(accel_dev->hw_device);
+ accel_dev->hw_device = NULL;
+ }
+ adf_cfg_dev_remove(accel_dev);
+ debugfs_remove(accel_dev->debugfs_dir);
+ pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+ adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_dev_configure(struct adf_accel_dev *accel_dev)
+{
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ unsigned long val, bank = 0;
+
+ if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+ goto err;
+ if (adf_cfg_section_add(accel_dev, "Accelerator0"))
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
+ (void *)&bank, ADF_DEC))
+ goto err;
+
+ val = bank;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
+ (void *)&val, ADF_DEC))
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, 0);
+
+ val = 128;
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
+ (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 512;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 0;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 2;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 8;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 10;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = ADF_COALESCING_DEF_TIME;
+ snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT,
+ (int)bank);
+ if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 1;
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ ADF_NUM_CY, (void *)&val, ADF_DEC))
+ goto err;
+
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+ return 0;
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to configure QAT accel dev\n");
+ return -EINVAL;
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct adf_accel_dev *accel_dev;
+ struct adf_accel_dev *pf;
+ struct adf_accel_pci *accel_pci_dev;
+ struct adf_hw_device_data *hw_data;
+ char name[ADF_DEVICE_NAME_LENGTH];
+ unsigned int i, bar_nr;
+ int ret, bar_mask;
+
+ switch (ent->device) {
+ case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+ break;
+ default:
+ dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+ return -ENODEV;
+ }
+
+ accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!accel_dev)
+ return -ENOMEM;
+
+ accel_dev->is_vf = true;
+ pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ accel_pci_dev->pci_dev = pdev;
+
+ /* Add accel device to accel table */
+ if (adf_devmgr_add_dev(accel_dev, pf)) {
+ dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+ kfree(accel_dev);
+ return -EFAULT;
+ }
+ INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+ accel_dev->owner = THIS_MODULE;
+ /* Allocate and configure device configuration structure */
+ hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!hw_data) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ accel_dev->hw_device = hw_data;
+ switch (ent->device) {
+ case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+ adf_init_hw_data_dh895xcciov(accel_dev->hw_device);
+ break;
+ default:
+ ret = -ENODEV;
+ goto out_err;
+ }
+
+ /* Get Accelerators and Accelerators Engines masks */
+ hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+ hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+ accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+ /* Create dev top level debugfs entry */
+ snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d",
+ ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+
+ accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+ if (!accel_dev->debugfs_dir) {
+ dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+ if (ret)
+ goto out_err;
+
+ /* enable PCI device */
+ if (pci_enable_device(pdev)) {
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ /* set dma identifier */
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ dev_err(&pdev->dev, "No usable DMA configuration\n");
+ ret = -EFAULT;
+ goto out_err_disable;
+ } else {
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ }
+
+ } else {
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ }
+
+ if (pci_request_regions(pdev, adf_driver_name)) {
+ ret = -EFAULT;
+ goto out_err_disable;
+ }
+
+ /* Find and map all the device's BARS */
+ i = 0;
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
+ ADF_PCI_MAX_BARS * 2) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+ bar->base_addr = pci_resource_start(pdev, bar_nr);
+ if (!bar->base_addr)
+ break;
+ bar->size = pci_resource_len(pdev, bar_nr);
+ bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+ if (!bar->virt_addr) {
+ dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+ ret = -EFAULT;
+ goto out_err_free_reg;
+ }
+ }
+ pci_set_master(pdev);
+ /* Completion for VF2PF request/response message exchange */
+ init_completion(&accel_dev->vf.iov_msg_completion);
+
+ ret = adf_dev_configure(accel_dev);
+ if (ret)
+ goto out_err_free_reg;
+
+ ret = adf_dev_init(accel_dev);
+ if (ret)
+ goto out_err_dev_shutdown;
+
+ ret = adf_dev_start(accel_dev);
+ if (ret)
+ goto out_err_dev_stop;
+
+ return ret;
+
+out_err_dev_stop:
+ adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+ adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+ pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+ pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+ adf_cleanup_accel(accel_dev);
+ kfree(accel_dev);
+ return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ pr_err("QAT: Driver removal failed\n");
+ return;
+ }
+ if (adf_dev_stop(accel_dev))
+ dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
+
+ adf_dev_shutdown(accel_dev);
+ adf_cleanup_accel(accel_dev);
+ adf_cleanup_pci_dev(accel_dev);
+ kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+ request_module("intel_qat");
+
+ if (pci_register_driver(&adf_driver)) {
+ pr_err("QAT: Driver initialization failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+ pci_unregister_driver(&adf_driver);
+ adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_dh895xcc/qat_admin.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h
index 55b7a8e48bad..e270e4a63d14 100644
--- a/drivers/crypto/qat/qat_dh895xcc/qat_admin.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h
@@ -44,64 +44,14 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <icp_qat_fw_init_admin.h>
+#ifndef ADF_DH895xVF_DRV_H_
+#define ADF_DH895xVF_DRV_H_
#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include "adf_drv.h"
-
-static struct service_hndl qat_admin;
-
-static int qat_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
-{
- struct adf_hw_device_data *hw_device = accel_dev->hw_device;
- struct icp_qat_fw_init_admin_req req;
- struct icp_qat_fw_init_admin_resp resp;
- int i;
-
- memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
- req.init_admin_cmd_id = cmd;
- for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
- memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
- if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
- resp.init_resp_hdr.status)
- return -EFAULT;
- }
- return 0;
-}
-
-static int qat_admin_start(struct adf_accel_dev *accel_dev)
-{
- return qat_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
-}
-
-static int qat_admin_event_handler(struct adf_accel_dev *accel_dev,
- enum adf_event event)
-{
- int ret;
-
- switch (event) {
- case ADF_EVENT_START:
- ret = qat_admin_start(accel_dev);
- break;
- case ADF_EVENT_STOP:
- case ADF_EVENT_INIT:
- case ADF_EVENT_SHUTDOWN:
- default:
- ret = 0;
- }
- return ret;
-}
-
-int qat_admin_register(void)
-{
- memset(&qat_admin, 0, sizeof(struct service_hndl));
- qat_admin.event_hld = qat_admin_event_handler;
- qat_admin.name = "qat_admin";
- qat_admin.admin = 1;
- return adf_service_register(&qat_admin);
-}
-
-int qat_admin_unregister(void)
-{
- return adf_service_unregister(&qat_admin);
-}
+#include <adf_transport.h>
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
+void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring);
+#endif
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c b/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c
new file mode 100644
index 000000000000..87c5d8adb125
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c
@@ -0,0 +1,258 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_cfg_strings.h>
+#include <adf_cfg_common.h>
+#include <adf_transport_access_macros.h>
+#include <adf_transport_internal.h>
+#include <adf_pf2vf_msg.h>
+#include "adf_drv.h"
+#include "adf_dh895xccvf_hw_data.h"
+
+static int adf_enable_msi(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+ int stat = pci_enable_msi(pci_dev_info->pci_dev);
+
+ if (stat) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to enable MSI interrupts\n");
+ return stat;
+ }
+
+ accel_dev->vf.irq_name = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
+ if (!accel_dev->vf.irq_name)
+ return -ENOMEM;
+
+ return stat;
+}
+
+static void adf_disable_msi(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+ kfree(accel_dev->vf.irq_name);
+ pci_disable_msi(pdev);
+}
+
+static void adf_pf2vf_bh_handler(void *data)
+{
+ struct adf_accel_dev *accel_dev = data;
+ void __iomem *pmisc_bar_addr =
+ (&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr;
+ u32 msg;
+
+ /* Read the message from PF */
+ msg = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET);
+
+ if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM))
+ /* Ignore legacy non-system (non-kernel) PF2VF messages */
+ goto err;
+
+ switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) {
+ case ADF_PF2VF_MSGTYPE_RESTARTING:
+ dev_dbg(&GET_DEV(accel_dev),
+ "Restarting msg received from PF 0x%x\n", msg);
+ adf_dev_stop(accel_dev);
+ break;
+ case ADF_PF2VF_MSGTYPE_VERSION_RESP:
+ dev_dbg(&GET_DEV(accel_dev),
+ "Version resp received from PF 0x%x\n", msg);
+ accel_dev->vf.pf_version =
+ (msg & ADF_PF2VF_VERSION_RESP_VERS_MASK) >>
+ ADF_PF2VF_VERSION_RESP_VERS_SHIFT;
+ accel_dev->vf.compatible =
+ (msg & ADF_PF2VF_VERSION_RESP_RESULT_MASK) >>
+ ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+ complete(&accel_dev->vf.iov_msg_completion);
+ break;
+ default:
+ goto err;
+ }
+
+ /* To ack, clear the PF2VFINT bit */
+ msg &= ~ADF_DH895XCC_PF2VF_PF2VFINT;
+ ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET, msg);
+
+ /* Re-enable PF2VF interrupts */
+ adf_enable_pf2vf_interrupts(accel_dev);
+ return;
+err:
+ dev_err(&GET_DEV(accel_dev),
+ "Unknown message from PF (0x%x); leaving PF2VF ints disabled\n",
+ msg);
+}
+
+static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+{
+ tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet,
+ (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev);
+
+ mutex_init(&accel_dev->vf.vf2pf_lock);
+ return 0;
+}
+
+static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+{
+ tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet);
+ tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet);
+ mutex_destroy(&accel_dev->vf.vf2pf_lock);
+}
+
+static irqreturn_t adf_isr(int irq, void *privdata)
+{
+ struct adf_accel_dev *accel_dev = privdata;
+ void __iomem *pmisc_bar_addr =
+ (&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr;
+ u32 v_int;
+
+ /* Read VF INT source CSR to determine the source of VF interrupt */
+ v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_VINTSOU_OFFSET);
+
+ /* Check for PF2VF interrupt */
+ if (v_int & ADF_DH895XCC_VINTSOU_PF2VF) {
+ /* Disable PF to VF interrupt */
+ adf_disable_pf2vf_interrupts(accel_dev);
+
+ /* Schedule tasklet to handle interrupt BH */
+ tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
+ return IRQ_HANDLED;
+ }
+
+ /* Check bundle interrupt */
+ if (v_int & ADF_DH895XCC_VINTSOU_BUN) {
+ struct adf_etr_data *etr_data = accel_dev->transport;
+ struct adf_etr_bank_data *bank = &etr_data->banks[0];
+
+ /* Disable Flag and Coalesce Ring Interrupts */
+ WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
+ 0);
+ tasklet_hi_schedule(&bank->resp_handler);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ unsigned int cpu;
+ int ret;
+
+ snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME,
+ "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+ ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name,
+ (void *)accel_dev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n",
+ accel_dev->vf.irq_name);
+ return ret;
+ }
+ cpu = accel_dev->accel_id % num_online_cpus();
+ irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu));
+
+ return ret;
+}
+
+static int adf_setup_bh(struct adf_accel_dev *accel_dev)
+{
+ struct adf_etr_data *priv_data = accel_dev->transport;
+
+ tasklet_init(&priv_data->banks[0].resp_handler, adf_response_handler,
+ (unsigned long)priv_data->banks);
+ return 0;
+}
+
+static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
+{
+ struct adf_etr_data *priv_data = accel_dev->transport;
+
+ tasklet_disable(&priv_data->banks[0].resp_handler);
+ tasklet_kill(&priv_data->banks[0].resp_handler);
+}
+
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+ irq_set_affinity_hint(pdev->irq, NULL);
+ free_irq(pdev->irq, (void *)accel_dev);
+ adf_cleanup_bh(accel_dev);
+ adf_cleanup_pf2vf_bh(accel_dev);
+ adf_disable_msi(accel_dev);
+}
+
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+{
+ if (adf_enable_msi(accel_dev))
+ goto err_out;
+
+ if (adf_setup_pf2vf_bh(accel_dev))
+ goto err_out;
+
+ if (adf_setup_bh(accel_dev))
+ goto err_out;
+
+ if (adf_request_msi_irq(accel_dev))
+ goto err_out;
+
+ return 0;
+err_out:
+ adf_vf_isr_resource_free(accel_dev);
+ return -EFAULT;
+}
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 5c5df1d17f90..be2f5049256a 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -296,7 +296,7 @@ static int qce_ahash_update(struct ahash_request *req)
if (rctx->buflen) {
sg_init_table(rctx->sg, 2);
sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen);
- scatterwalk_sg_chain(rctx->sg, 2, req->src);
+ sg_chain(rctx->sg, 2, req->src);
req->src = rctx->sg;
}
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 397a500b3d8a..820dc3acb28c 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -999,7 +999,7 @@ static int sahara_sha_prepare_request(struct ahash_request *req)
sg_init_table(rctx->in_sg_chain, 2);
sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
- scatterwalk_sg_chain(rctx->in_sg_chain, 2, req->src);
+ sg_chain(rctx->in_sg_chain, 2, req->src);
rctx->total = req->nbytes + rctx->buf_cnt;
rctx->in_sg = rctx->in_sg_chain;
@@ -1516,7 +1516,7 @@ static int sahara_probe(struct platform_device *pdev)
}
/* Allocate HW descriptors */
- dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev,
+ dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
&dev->hw_phys_desc[0], GFP_KERNEL);
if (!dev->hw_desc[0]) {
@@ -1528,34 +1528,31 @@ static int sahara_probe(struct platform_device *pdev)
sizeof(struct sahara_hw_desc);
/* Allocate space for iv and key */
- dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
+ dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
&dev->key_phys_base, GFP_KERNEL);
if (!dev->key_base) {
dev_err(&pdev->dev, "Could not allocate memory for key\n");
- err = -ENOMEM;
- goto err_key;
+ return -ENOMEM;
}
dev->iv_base = dev->key_base + AES_KEYSIZE_128;
dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
/* Allocate space for context: largest digest + message length field */
- dev->context_base = dma_alloc_coherent(&pdev->dev,
+ dev->context_base = dmam_alloc_coherent(&pdev->dev,
SHA256_DIGEST_SIZE + 4,
&dev->context_phys_base, GFP_KERNEL);
if (!dev->context_base) {
dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
- err = -ENOMEM;
- goto err_key;
+ return -ENOMEM;
}
/* Allocate space for HW links */
- dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
+ dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
&dev->hw_phys_link[0], GFP_KERNEL);
if (!dev->hw_link[0]) {
dev_err(&pdev->dev, "Could not allocate hw links\n");
- err = -ENOMEM;
- goto err_link;
+ return -ENOMEM;
}
for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
@@ -1572,15 +1569,14 @@ static int sahara_probe(struct platform_device *pdev)
dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
if (IS_ERR(dev->kthread)) {
- err = PTR_ERR(dev->kthread);
- goto err_link;
+ return PTR_ERR(dev->kthread);
}
init_completion(&dev->dma_completion);
err = clk_prepare_enable(dev->clk_ipg);
if (err)
- goto err_link;
+ return err;
err = clk_prepare_enable(dev->clk_ahb);
if (err)
goto clk_ipg_disable;
@@ -1620,25 +1616,11 @@ static int sahara_probe(struct platform_device *pdev)
return 0;
err_algs:
- dma_free_coherent(&pdev->dev,
- SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
- dev->hw_link[0], dev->hw_phys_link[0]);
kthread_stop(dev->kthread);
dev_ptr = NULL;
clk_disable_unprepare(dev->clk_ahb);
clk_ipg_disable:
clk_disable_unprepare(dev->clk_ipg);
-err_link:
- dma_free_coherent(&pdev->dev,
- 2 * AES_KEYSIZE_128,
- dev->key_base, dev->key_phys_base);
- dma_free_coherent(&pdev->dev,
- SHA256_DIGEST_SIZE,
- dev->context_base, dev->context_phys_base);
-err_key:
- dma_free_coherent(&pdev->dev,
- SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
- dev->hw_desc[0], dev->hw_phys_desc[0]);
return err;
}
@@ -1647,16 +1629,6 @@ static int sahara_remove(struct platform_device *pdev)
{
struct sahara_dev *dev = platform_get_drvdata(pdev);
- dma_free_coherent(&pdev->dev,
- SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
- dev->hw_link[0], dev->hw_phys_link[0]);
- dma_free_coherent(&pdev->dev,
- 2 * AES_KEYSIZE_128,
- dev->key_base, dev->key_phys_base);
- dma_free_coherent(&pdev->dev,
- SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
- dev->hw_desc[0], dev->hw_phys_desc[0]);
-
kthread_stop(dev->kthread);
sahara_unregister_algs(dev);
diff --git a/drivers/crypto/sunxi-ss/Makefile b/drivers/crypto/sunxi-ss/Makefile
new file mode 100644
index 000000000000..8f4c7a273141
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sun4i-ss.o
+sun4i-ss-y += sun4i-ss-core.o sun4i-ss-hash.o sun4i-ss-cipher.o
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
new file mode 100644
index 000000000000..e070c316e8b7
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -0,0 +1,542 @@
+/*
+ * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
+ *
+ * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * This file add support for AES cipher with 128,192,256 bits
+ * keysize in CBC and ECB mode.
+ * Add support also for DES and 3DES in CBC and ECB mode.
+ *
+ * You could find the datasheet in Documentation/arm/sunxi/README
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include "sun4i-ss.h"
+
+static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_ss_ctx *ss = op->ss;
+ unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+ struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
+ u32 mode = ctx->mode;
+ /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
+ u32 rx_cnt = SS_RX_DEFAULT;
+ u32 tx_cnt = 0;
+ u32 spaces;
+ u32 v;
+ int i, err = 0;
+ unsigned int ileft = areq->nbytes;
+ unsigned int oleft = areq->nbytes;
+ unsigned int todo;
+ struct sg_mapping_iter mi, mo;
+ unsigned int oi, oo; /* offset for in and out */
+
+ if (areq->nbytes == 0)
+ return 0;
+
+ if (!areq->info) {
+ dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
+ return -EINVAL;
+ }
+
+ if (!areq->src || !areq->dst) {
+ dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&ss->slock);
+
+ for (i = 0; i < op->keylen; i += 4)
+ writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
+
+ if (areq->info) {
+ for (i = 0; i < 4 && i < ivsize / 4; i++) {
+ v = *(u32 *)(areq->info + i * 4);
+ writel(v, ss->base + SS_IV0 + i * 4);
+ }
+ }
+ writel(mode, ss->base + SS_CTL);
+
+ sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+ SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+ sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+ SG_MITER_TO_SG | SG_MITER_ATOMIC);
+ sg_miter_next(&mi);
+ sg_miter_next(&mo);
+ if (!mi.addr || !mo.addr) {
+ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+ err = -EINVAL;
+ goto release_ss;
+ }
+
+ ileft = areq->nbytes / 4;
+ oleft = areq->nbytes / 4;
+ oi = 0;
+ oo = 0;
+ do {
+ todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
+ if (todo > 0) {
+ ileft -= todo;
+ writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
+ oi += todo * 4;
+ }
+ if (oi == mi.length) {
+ sg_miter_next(&mi);
+ oi = 0;
+ }
+
+ spaces = readl(ss->base + SS_FCSR);
+ rx_cnt = SS_RXFIFO_SPACES(spaces);
+ tx_cnt = SS_TXFIFO_SPACES(spaces);
+
+ todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
+ if (todo > 0) {
+ oleft -= todo;
+ readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
+ oo += todo * 4;
+ }
+ if (oo == mo.length) {
+ sg_miter_next(&mo);
+ oo = 0;
+ }
+ } while (mo.length > 0);
+
+ if (areq->info) {
+ for (i = 0; i < 4 && i < ivsize / 4; i++) {
+ v = readl(ss->base + SS_IV0 + i * 4);
+ *(u32 *)(areq->info + i * 4) = v;
+ }
+ }
+
+release_ss:
+ sg_miter_stop(&mi);
+ sg_miter_stop(&mo);
+ writel(0, ss->base + SS_CTL);
+ spin_unlock_bh(&ss->slock);
+ return err;
+}
+
+/* Generic function that support SG with size not multiple of 4 */
+static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_ss_ctx *ss = op->ss;
+ int no_chunk = 1;
+ struct scatterlist *in_sg = areq->src;
+ struct scatterlist *out_sg = areq->dst;
+ unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+ struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
+ u32 mode = ctx->mode;
+ /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
+ u32 rx_cnt = SS_RX_DEFAULT;
+ u32 tx_cnt = 0;
+ u32 v;
+ u32 spaces;
+ int i, err = 0;
+ unsigned int ileft = areq->nbytes;
+ unsigned int oleft = areq->nbytes;
+ unsigned int todo;
+ struct sg_mapping_iter mi, mo;
+ unsigned int oi, oo; /* offset for in and out */
+ char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
+ char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
+ unsigned int ob = 0; /* offset in buf */
+ unsigned int obo = 0; /* offset in bufo*/
+ unsigned int obl = 0; /* length of data in bufo */
+
+ if (areq->nbytes == 0)
+ return 0;
+
+ if (!areq->info) {
+ dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
+ return -EINVAL;
+ }
+
+ if (!areq->src || !areq->dst) {
+ dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
+ return -EINVAL;
+ }
+
+ /*
+ * if we have only SGs with size multiple of 4,
+ * we can use the SS optimized function
+ */
+ while (in_sg && no_chunk == 1) {
+ if ((in_sg->length % 4) != 0)
+ no_chunk = 0;
+ in_sg = sg_next(in_sg);
+ }
+ while (out_sg && no_chunk == 1) {
+ if ((out_sg->length % 4) != 0)
+ no_chunk = 0;
+ out_sg = sg_next(out_sg);
+ }
+
+ if (no_chunk == 1)
+ return sun4i_ss_opti_poll(areq);
+
+ spin_lock_bh(&ss->slock);
+
+ for (i = 0; i < op->keylen; i += 4)
+ writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
+
+ if (areq->info) {
+ for (i = 0; i < 4 && i < ivsize / 4; i++) {
+ v = *(u32 *)(areq->info + i * 4);
+ writel(v, ss->base + SS_IV0 + i * 4);
+ }
+ }
+ writel(mode, ss->base + SS_CTL);
+
+ sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+ SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+ sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+ SG_MITER_TO_SG | SG_MITER_ATOMIC);
+ sg_miter_next(&mi);
+ sg_miter_next(&mo);
+ if (!mi.addr || !mo.addr) {
+ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+ err = -EINVAL;
+ goto release_ss;
+ }
+ ileft = areq->nbytes;
+ oleft = areq->nbytes;
+ oi = 0;
+ oo = 0;
+
+ while (oleft > 0) {
+ if (ileft > 0) {
+ /*
+ * todo is the number of consecutive 4byte word that we
+ * can read from current SG
+ */
+ todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
+ if (todo > 0 && ob == 0) {
+ writesl(ss->base + SS_RXFIFO, mi.addr + oi,
+ todo);
+ ileft -= todo * 4;
+ oi += todo * 4;
+ } else {
+ /*
+ * not enough consecutive bytes, so we need to
+ * linearize in buf. todo is in bytes
+ * After that copy, if we have a multiple of 4
+ * we need to be able to write all buf in one
+ * pass, so it is why we min() with rx_cnt
+ */
+ todo = min3(rx_cnt * 4 - ob, ileft,
+ mi.length - oi);
+ memcpy(buf + ob, mi.addr + oi, todo);
+ ileft -= todo;
+ oi += todo;
+ ob += todo;
+ if (ob % 4 == 0) {
+ writesl(ss->base + SS_RXFIFO, buf,
+ ob / 4);
+ ob = 0;
+ }
+ }
+ if (oi == mi.length) {
+ sg_miter_next(&mi);
+ oi = 0;
+ }
+ }
+
+ spaces = readl(ss->base + SS_FCSR);
+ rx_cnt = SS_RXFIFO_SPACES(spaces);
+ tx_cnt = SS_TXFIFO_SPACES(spaces);
+ dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u %u\n",
+ mode,
+ oi, mi.length, ileft, areq->nbytes, rx_cnt,
+ oo, mo.length, oleft, areq->nbytes, tx_cnt,
+ todo, ob);
+
+ if (tx_cnt == 0)
+ continue;
+ /* todo in 4bytes word */
+ todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
+ if (todo > 0) {
+ readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
+ oleft -= todo * 4;
+ oo += todo * 4;
+ if (oo == mo.length) {
+ sg_miter_next(&mo);
+ oo = 0;
+ }
+ } else {
+ /*
+ * read obl bytes in bufo, we read at maximum for
+ * emptying the device
+ */
+ readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
+ obl = tx_cnt * 4;
+ obo = 0;
+ do {
+ /*
+ * how many bytes we can copy ?
+ * no more than remaining SG size
+ * no more than remaining buffer
+ * no need to test against oleft
+ */
+ todo = min(mo.length - oo, obl - obo);
+ memcpy(mo.addr + oo, bufo + obo, todo);
+ oleft -= todo;
+ obo += todo;
+ oo += todo;
+ if (oo == mo.length) {
+ sg_miter_next(&mo);
+ oo = 0;
+ }
+ } while (obo < obl);
+ /* bufo must be fully used here */
+ }
+ }
+ if (areq->info) {
+ for (i = 0; i < 4 && i < ivsize / 4; i++) {
+ v = readl(ss->base + SS_IV0 + i * 4);
+ *(u32 *)(areq->info + i * 4) = v;
+ }
+ }
+
+release_ss:
+ sg_miter_stop(&mi);
+ sg_miter_stop(&mo);
+ writel(0, ss->base + SS_CTL);
+ spin_unlock_bh(&ss->slock);
+
+ return err;
+}
+
+/* CBC AES */
+int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+/* ECB AES */
+int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+/* CBC DES */
+int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+/* ECB DES */
+int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+/* CBC 3DES */
+int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+/* ECB 3DES */
+int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+ rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
+ op->keymode;
+ return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
+{
+ struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct sun4i_ss_alg_template *algt;
+
+ memset(op, 0, sizeof(struct sun4i_tfm_ctx));
+
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
+ op->ss = algt->ss;
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct sun4i_cipher_req_ctx);
+
+ return 0;
+}
+
+/* check and set the AES key, prepare the mode to be used */
+int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_ss_ctx *ss = op->ss;
+
+ switch (keylen) {
+ case 128 / 8:
+ op->keymode = SS_AES_128BITS;
+ break;
+ case 192 / 8:
+ op->keymode = SS_AES_192BITS;
+ break;
+ case 256 / 8:
+ op->keymode = SS_AES_256BITS;
+ break;
+ default:
+ dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ op->keylen = keylen;
+ memcpy(op->key, key, keylen);
+ return 0;
+}
+
+/* check and set the DES key, prepare the mode to be used */
+int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_ss_ctx *ss = op->ss;
+ u32 flags;
+ u32 tmp[DES_EXPKEY_WORDS];
+ int ret;
+
+ if (unlikely(keylen != DES_KEY_SIZE)) {
+ dev_err(ss->dev, "Invalid keylen %u\n", keylen);
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ flags = crypto_ablkcipher_get_flags(tfm);
+
+ ret = des_ekey(tmp, key);
+ if (unlikely(ret == 0) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
+ dev_dbg(ss->dev, "Weak key %u\n", keylen);
+ return -EINVAL;
+ }
+
+ op->keylen = keylen;
+ memcpy(op->key, key, keylen);
+ return 0;
+}
+
+/* check and set the 3DES key, prepare the mode to be used */
+int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ struct sun4i_ss_ctx *ss = op->ss;
+
+ if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
+ dev_err(ss->dev, "Invalid keylen %u\n", keylen);
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ op->keylen = keylen;
+ memcpy(op->key, key, keylen);
+ return 0;
+}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
new file mode 100644
index 000000000000..eab6fe227fa0
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -0,0 +1,425 @@
+/*
+ * sun4i-ss-core.c - hardware cryptographic accelerator for Allwinner A20 SoC
+ *
+ * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * Core file which registers crypto algorithms supported by the SS.
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <crypto/scatterwalk.h>
+#include <linux/scatterlist.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/reset.h>
+
+#include "sun4i-ss.h"
+
+static struct sun4i_ss_alg_template ss_algs[] = {
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .mode = SS_OP_MD5,
+ .alg.hash = {
+ .init = sun4i_hash_init,
+ .update = sun4i_hash_update,
+ .final = sun4i_hash_final,
+ .finup = sun4i_hash_finup,
+ .digest = sun4i_hash_digest,
+ .export = sun4i_hash_export_md5,
+ .import = sun4i_hash_import_md5,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "md5-sun4i-ss",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun4i_req_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_type = &crypto_ahash_type,
+ .cra_init = sun4i_hash_crainit
+ }
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .mode = SS_OP_SHA1,
+ .alg.hash = {
+ .init = sun4i_hash_init,
+ .update = sun4i_hash_update,
+ .final = sun4i_hash_final,
+ .finup = sun4i_hash_finup,
+ .digest = sun4i_hash_digest,
+ .export = sun4i_hash_export_sha1,
+ .import = sun4i_hash_import_sha1,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-sun4i-ss",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun4i_req_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_type = &crypto_ahash_type,
+ .cra_init = sun4i_hash_crainit
+ }
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .alg.crypto = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-sun4i-ss",
+ .cra_priority = 300,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = sun4i_ss_cipher_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sun4i_ss_aes_setkey,
+ .encrypt = sun4i_ss_cbc_aes_encrypt,
+ .decrypt = sun4i_ss_cbc_aes_decrypt,
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .alg.crypto = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-sun4i-ss",
+ .cra_priority = 300,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = sun4i_ss_cipher_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sun4i_ss_aes_setkey,
+ .encrypt = sun4i_ss_ecb_aes_encrypt,
+ .decrypt = sun4i_ss_ecb_aes_decrypt,
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .alg.crypto = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-sun4i-ss",
+ .cra_priority = 300,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .cra_ctxsize = sizeof(struct sun4i_req_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = sun4i_ss_cipher_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = sun4i_ss_des_setkey,
+ .encrypt = sun4i_ss_cbc_des_encrypt,
+ .decrypt = sun4i_ss_cbc_des_decrypt,
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .alg.crypto = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "ecb-des-sun4i-ss",
+ .cra_priority = 300,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .cra_ctxsize = sizeof(struct sun4i_req_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = sun4i_ss_cipher_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = sun4i_ss_des_setkey,
+ .encrypt = sun4i_ss_ecb_des_encrypt,
+ .decrypt = sun4i_ss_ecb_des_decrypt,
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .alg.crypto = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-des3-sun4i-ss",
+ .cra_priority = 300,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .cra_ctxsize = sizeof(struct sun4i_req_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = sun4i_ss_cipher_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = sun4i_ss_des3_setkey,
+ .encrypt = sun4i_ss_cbc_des3_encrypt,
+ .decrypt = sun4i_ss_cbc_des3_decrypt,
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .alg.crypto = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3-sun4i-ss",
+ .cra_priority = 300,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .cra_ctxsize = sizeof(struct sun4i_req_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = sun4i_ss_cipher_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = sun4i_ss_des3_setkey,
+ .encrypt = sun4i_ss_ecb_des3_encrypt,
+ .decrypt = sun4i_ss_ecb_des3_decrypt,
+ }
+ }
+},
+};
+
+static int sun4i_ss_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ u32 v;
+ int err, i;
+ unsigned long cr;
+ const unsigned long cr_ahb = 24 * 1000 * 1000;
+ const unsigned long cr_mod = 150 * 1000 * 1000;
+ struct sun4i_ss_ctx *ss;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL);
+ if (!ss)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ss->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ss->base)) {
+ dev_err(&pdev->dev, "Cannot request MMIO\n");
+ return PTR_ERR(ss->base);
+ }
+
+ ss->ssclk = devm_clk_get(&pdev->dev, "mod");
+ if (IS_ERR(ss->ssclk)) {
+ err = PTR_ERR(ss->ssclk);
+ dev_err(&pdev->dev, "Cannot get SS clock err=%d\n", err);
+ return err;
+ }
+ dev_dbg(&pdev->dev, "clock ss acquired\n");
+
+ ss->busclk = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(ss->busclk)) {
+ err = PTR_ERR(ss->busclk);
+ dev_err(&pdev->dev, "Cannot get AHB SS clock err=%d\n", err);
+ return err;
+ }
+ dev_dbg(&pdev->dev, "clock ahb_ss acquired\n");
+
+ ss->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
+ if (IS_ERR(ss->reset)) {
+ if (PTR_ERR(ss->reset) == -EPROBE_DEFER)
+ return PTR_ERR(ss->reset);
+ dev_info(&pdev->dev, "no reset control found\n");
+ ss->reset = NULL;
+ }
+
+ /* Enable both clocks */
+ err = clk_prepare_enable(ss->busclk);
+ if (err != 0) {
+ dev_err(&pdev->dev, "Cannot prepare_enable busclk\n");
+ return err;
+ }
+ err = clk_prepare_enable(ss->ssclk);
+ if (err != 0) {
+ dev_err(&pdev->dev, "Cannot prepare_enable ssclk\n");
+ goto error_ssclk;
+ }
+
+ /*
+ * Check that clock have the correct rates given in the datasheet
+ * Try to set the clock to the maximum allowed
+ */
+ err = clk_set_rate(ss->ssclk, cr_mod);
+ if (err != 0) {
+ dev_err(&pdev->dev, "Cannot set clock rate to ssclk\n");
+ goto error_clk;
+ }
+
+ /* Deassert reset if we have a reset control */
+ if (ss->reset) {
+ err = reset_control_deassert(ss->reset);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot deassert reset control\n");
+ goto error_clk;
+ }
+ }
+
+ /*
+ * The only impact on clocks below requirement are bad performance,
+ * so do not print "errors"
+ * warn on Overclocked clocks
+ */
+ cr = clk_get_rate(ss->busclk);
+ if (cr >= cr_ahb)
+ dev_dbg(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n",
+ cr, cr / 1000000, cr_ahb);
+ else
+ dev_warn(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n",
+ cr, cr / 1000000, cr_ahb);
+
+ cr = clk_get_rate(ss->ssclk);
+ if (cr <= cr_mod)
+ if (cr < cr_mod)
+ dev_warn(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n",
+ cr, cr / 1000000, cr_mod);
+ else
+ dev_dbg(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n",
+ cr, cr / 1000000, cr_mod);
+ else
+ dev_warn(&pdev->dev, "Clock ss is at %lu (%lu MHz) (must be <= %lu)\n",
+ cr, cr / 1000000, cr_mod);
+
+ /*
+ * Datasheet named it "Die Bonding ID"
+ * I expect to be a sort of Security System Revision number.
+ * Since the A80 seems to have an other version of SS
+ * this info could be useful
+ */
+ writel(SS_ENABLED, ss->base + SS_CTL);
+ v = readl(ss->base + SS_CTL);
+ v >>= 16;
+ v &= 0x07;
+ dev_info(&pdev->dev, "Die ID %d\n", v);
+ writel(0, ss->base + SS_CTL);
+
+ ss->dev = &pdev->dev;
+
+ spin_lock_init(&ss->slock);
+
+ for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+ ss_algs[i].ss = ss;
+ switch (ss_algs[i].type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ err = crypto_register_alg(&ss_algs[i].alg.crypto);
+ if (err != 0) {
+ dev_err(ss->dev, "Fail to register %s\n",
+ ss_algs[i].alg.crypto.cra_name);
+ goto error_alg;
+ }
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ err = crypto_register_ahash(&ss_algs[i].alg.hash);
+ if (err != 0) {
+ dev_err(ss->dev, "Fail to register %s\n",
+ ss_algs[i].alg.hash.halg.base.cra_name);
+ goto error_alg;
+ }
+ break;
+ }
+ }
+ platform_set_drvdata(pdev, ss);
+ return 0;
+error_alg:
+ i--;
+ for (; i >= 0; i--) {
+ switch (ss_algs[i].type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ crypto_unregister_alg(&ss_algs[i].alg.crypto);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto_unregister_ahash(&ss_algs[i].alg.hash);
+ break;
+ }
+ }
+ if (ss->reset)
+ reset_control_assert(ss->reset);
+error_clk:
+ clk_disable_unprepare(ss->ssclk);
+error_ssclk:
+ clk_disable_unprepare(ss->busclk);
+ return err;
+}
+
+static int sun4i_ss_remove(struct platform_device *pdev)
+{
+ int i;
+ struct sun4i_ss_ctx *ss = platform_get_drvdata(pdev);
+
+ for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+ switch (ss_algs[i].type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ crypto_unregister_alg(&ss_algs[i].alg.crypto);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto_unregister_ahash(&ss_algs[i].alg.hash);
+ break;
+ }
+ }
+
+ writel(0, ss->base + SS_CTL);
+ if (ss->reset)
+ reset_control_assert(ss->reset);
+ clk_disable_unprepare(ss->busclk);
+ clk_disable_unprepare(ss->ssclk);
+ return 0;
+}
+
+static const struct of_device_id a20ss_crypto_of_match_table[] = {
+ { .compatible = "allwinner,sun4i-a10-crypto" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table);
+
+static struct platform_driver sun4i_ss_driver = {
+ .probe = sun4i_ss_probe,
+ .remove = sun4i_ss_remove,
+ .driver = {
+ .name = "sun4i-ss",
+ .of_match_table = a20ss_crypto_of_match_table,
+ },
+};
+
+module_platform_driver(sun4i_ss_driver);
+
+MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>");
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
new file mode 100644
index 000000000000..ff8031498809
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
@@ -0,0 +1,492 @@
+/*
+ * sun4i-ss-hash.c - hardware cryptographic accelerator for Allwinner A20 SoC
+ *
+ * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * This file add support for MD5 and SHA1.
+ *
+ * You could find the datasheet in Documentation/arm/sunxi/README
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include "sun4i-ss.h"
+#include <linux/scatterlist.h>
+
+/* This is a totally arbitrary value */
+#define SS_TIMEOUT 100
+
+int sun4i_hash_crainit(struct crypto_tfm *tfm)
+{
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct sun4i_req_ctx));
+ return 0;
+}
+
+/* sun4i_hash_init: initialize request context */
+int sun4i_hash_init(struct ahash_request *areq)
+{
+ struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun4i_ss_alg_template *algt;
+ struct sun4i_ss_ctx *ss;
+
+ memset(op, 0, sizeof(struct sun4i_req_ctx));
+
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
+ ss = algt->ss;
+ op->ss = algt->ss;
+ op->mode = algt->mode;
+
+ return 0;
+}
+
+int sun4i_hash_export_md5(struct ahash_request *areq, void *out)
+{
+ struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+ struct md5_state *octx = out;
+ int i;
+
+ octx->byte_count = op->byte_count + op->len;
+
+ memcpy(octx->block, op->buf, op->len);
+
+ if (op->byte_count > 0) {
+ for (i = 0; i < 4; i++)
+ octx->hash[i] = op->hash[i];
+ } else {
+ octx->hash[0] = SHA1_H0;
+ octx->hash[1] = SHA1_H1;
+ octx->hash[2] = SHA1_H2;
+ octx->hash[3] = SHA1_H3;
+ }
+
+ return 0;
+}
+
+int sun4i_hash_import_md5(struct ahash_request *areq, const void *in)
+{
+ struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+ const struct md5_state *ictx = in;
+ int i;
+
+ sun4i_hash_init(areq);
+
+ op->byte_count = ictx->byte_count & ~0x3F;
+ op->len = ictx->byte_count & 0x3F;
+
+ memcpy(op->buf, ictx->block, op->len);
+
+ for (i = 0; i < 4; i++)
+ op->hash[i] = ictx->hash[i];
+
+ return 0;
+}
+
+int sun4i_hash_export_sha1(struct ahash_request *areq, void *out)
+{
+ struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+ struct sha1_state *octx = out;
+ int i;
+
+ octx->count = op->byte_count + op->len;
+
+ memcpy(octx->buffer, op->buf, op->len);
+
+ if (op->byte_count > 0) {
+ for (i = 0; i < 5; i++)
+ octx->state[i] = op->hash[i];
+ } else {
+ octx->state[0] = SHA1_H0;
+ octx->state[1] = SHA1_H1;
+ octx->state[2] = SHA1_H2;
+ octx->state[3] = SHA1_H3;
+ octx->state[4] = SHA1_H4;
+ }
+
+ return 0;
+}
+
+int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in)
+{
+ struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+ const struct sha1_state *ictx = in;
+ int i;
+
+ sun4i_hash_init(areq);
+
+ op->byte_count = ictx->count & ~0x3F;
+ op->len = ictx->count & 0x3F;
+
+ memcpy(op->buf, ictx->buffer, op->len);
+
+ for (i = 0; i < 5; i++)
+ op->hash[i] = ictx->state[i];
+
+ return 0;
+}
+
+/*
+ * sun4i_hash_update: update hash engine
+ *
+ * Could be used for both SHA1 and MD5
+ * Write data by step of 32bits and put then in the SS.
+ *
+ * Since we cannot leave partial data and hash state in the engine,
+ * we need to get the hash state at the end of this function.
+ * We can get the hash state every 64 bytes
+ *
+ * So the first work is to get the number of bytes to write to SS modulo 64
+ * The extra bytes will go to a temporary buffer op->buf storing op->len bytes
+ *
+ * So at the begin of update()
+ * if op->len + areq->nbytes < 64
+ * => all data will be written to wait buffer (op->buf) and end=0
+ * if not, write all data from op->buf to the device and position end to
+ * complete to 64bytes
+ *
+ * example 1:
+ * update1 60o => op->len=60
+ * update2 60o => need one more word to have 64 bytes
+ * end=4
+ * so write all data from op->buf and one word of SGs
+ * write remaining data in op->buf
+ * final state op->len=56
+ */
+int sun4i_hash_update(struct ahash_request *areq)
+{
+ u32 v, ivmode = 0;
+ unsigned int i = 0;
+ /*
+ * i is the total bytes read from SGs, to be compared to areq->nbytes
+ * i is important because we cannot rely on SG length since the sum of
+ * SG->length could be greater than areq->nbytes
+ */
+
+ struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+ struct sun4i_ss_ctx *ss = op->ss;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ unsigned int in_i = 0; /* advancement in the current SG */
+ unsigned int end;
+ /*
+ * end is the position when we need to stop writing to the device,
+ * to be compared to i
+ */
+ int in_r, err = 0;
+ unsigned int todo;
+ u32 spaces, rx_cnt = SS_RX_DEFAULT;
+ size_t copied = 0;
+ struct sg_mapping_iter mi;
+
+ dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x",
+ __func__, crypto_tfm_alg_name(areq->base.tfm),
+ op->byte_count, areq->nbytes, op->mode,
+ op->len, op->hash[0]);
+
+ if (areq->nbytes == 0)
+ return 0;
+
+ /* protect against overflow */
+ if (areq->nbytes > UINT_MAX - op->len) {
+ dev_err(ss->dev, "Cannot process too large request\n");
+ return -EINVAL;
+ }
+
+ if (op->len + areq->nbytes < 64) {
+ /* linearize data to op->buf */
+ copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+ op->buf + op->len, areq->nbytes, 0);
+ op->len += copied;
+ return 0;
+ }
+
+ end = ((areq->nbytes + op->len) / 64) * 64 - op->len;
+
+ if (end > areq->nbytes || areq->nbytes - end > 63) {
+ dev_err(ss->dev, "ERROR: Bound error %u %u\n",
+ end, areq->nbytes);
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&ss->slock);
+
+ /*
+ * if some data have been processed before,
+ * we need to restore the partial hash state
+ */
+ if (op->byte_count > 0) {
+ ivmode = SS_IV_ARBITRARY;
+ for (i = 0; i < 5; i++)
+ writel(op->hash[i], ss->base + SS_IV0 + i * 4);
+ }
+ /* Enable the device */
+ writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL);
+
+ i = 0;
+ sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+ SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+ sg_miter_next(&mi);
+ in_i = 0;
+
+ do {
+ /*
+ * we need to linearize in two case:
+ * - the buffer is already used
+ * - the SG does not have enough byte remaining ( < 4)
+ */
+ if (op->len > 0 || (mi.length - in_i) < 4) {
+ /*
+ * if we have entered here we have two reason to stop
+ * - the buffer is full
+ * - reach the end
+ */
+ while (op->len < 64 && i < end) {
+ /* how many bytes we can read from current SG */
+ in_r = min3(mi.length - in_i, end - i,
+ 64 - op->len);
+ memcpy(op->buf + op->len, mi.addr + in_i, in_r);
+ op->len += in_r;
+ i += in_r;
+ in_i += in_r;
+ if (in_i == mi.length) {
+ sg_miter_next(&mi);
+ in_i = 0;
+ }
+ }
+ if (op->len > 3 && (op->len % 4) == 0) {
+ /* write buf to the device */
+ writesl(ss->base + SS_RXFIFO, op->buf,
+ op->len / 4);
+ op->byte_count += op->len;
+ op->len = 0;
+ }
+ }
+ if (mi.length - in_i > 3 && i < end) {
+ /* how many bytes we can read from current SG */
+ in_r = min3(mi.length - in_i, areq->nbytes - i,
+ ((mi.length - in_i) / 4) * 4);
+ /* how many bytes we can write in the device*/
+ todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4);
+ writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo);
+ op->byte_count += todo * 4;
+ i += todo * 4;
+ in_i += todo * 4;
+ rx_cnt -= todo;
+ if (rx_cnt == 0) {
+ spaces = readl(ss->base + SS_FCSR);
+ rx_cnt = SS_RXFIFO_SPACES(spaces);
+ }
+ if (in_i == mi.length) {
+ sg_miter_next(&mi);
+ in_i = 0;
+ }
+ }
+ } while (i < end);
+ /* final linear */
+ if ((areq->nbytes - i) < 64) {
+ while (i < areq->nbytes && in_i < mi.length && op->len < 64) {
+ /* how many bytes we can read from current SG */
+ in_r = min3(mi.length - in_i, areq->nbytes - i,
+ 64 - op->len);
+ memcpy(op->buf + op->len, mi.addr + in_i, in_r);
+ op->len += in_r;
+ i += in_r;
+ in_i += in_r;
+ if (in_i == mi.length) {
+ sg_miter_next(&mi);
+ in_i = 0;
+ }
+ }
+ }
+
+ sg_miter_stop(&mi);
+
+ writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
+ i = 0;
+ do {
+ v = readl(ss->base + SS_CTL);
+ i++;
+ } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0);
+ if (i >= SS_TIMEOUT) {
+ dev_err_ratelimited(ss->dev,
+ "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
+ i, SS_TIMEOUT, v, areq->nbytes);
+ err = -EIO;
+ goto release_ss;
+ }
+
+ /* get the partial hash only if something was written */
+ for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
+ op->hash[i] = readl(ss->base + SS_MD0 + i * 4);
+
+release_ss:
+ writel(0, ss->base + SS_CTL);
+ spin_unlock_bh(&ss->slock);
+ return err;
+}
+
+/*
+ * sun4i_hash_final: finalize hashing operation
+ *
+ * If we have some remaining bytes, we write them.
+ * Then ask the SS for finalizing the hashing operation
+ *
+ * I do not check RX FIFO size in this function since the size is 32
+ * after each enabling and this function neither write more than 32 words.
+ */
+int sun4i_hash_final(struct ahash_request *areq)
+{
+ u32 v, ivmode = 0;
+ unsigned int i;
+ unsigned int j = 0;
+ int zeros, err = 0;
+ unsigned int index, padlen;
+ __be64 bits;
+ struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+ struct sun4i_ss_ctx *ss = op->ss;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ u32 bf[32];
+ u32 wb = 0;
+ unsigned int nwait, nbw = 0;
+
+ dev_dbg(ss->dev, "%s: byte=%llu len=%u mode=%x wl=%u h=%x",
+ __func__, op->byte_count, areq->nbytes, op->mode,
+ op->len, op->hash[0]);
+
+ spin_lock_bh(&ss->slock);
+
+ /*
+ * if we have already written something,
+ * restore the partial hash state
+ */
+ if (op->byte_count > 0) {
+ ivmode = SS_IV_ARBITRARY;
+ for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
+ writel(op->hash[i], ss->base + SS_IV0 + i * 4);
+ }
+ writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL);
+
+ /* write the remaining words of the wait buffer */
+ if (op->len > 0) {
+ nwait = op->len / 4;
+ if (nwait > 0) {
+ writesl(ss->base + SS_RXFIFO, op->buf, nwait);
+ op->byte_count += 4 * nwait;
+ }
+ nbw = op->len - 4 * nwait;
+ wb = *(u32 *)(op->buf + nwait * 4);
+ wb &= (0xFFFFFFFF >> (4 - nbw) * 8);
+ }
+
+ /* write the remaining bytes of the nbw buffer */
+ if (nbw > 0) {
+ wb |= ((1 << 7) << (nbw * 8));
+ bf[j++] = wb;
+ } else {
+ bf[j++] = 1 << 7;
+ }
+
+ /*
+ * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1)
+ * I take the operations from other MD5/SHA1 implementations
+ */
+
+ /* we have already send 4 more byte of which nbw data */
+ if (op->mode == SS_OP_MD5) {
+ index = (op->byte_count + 4) & 0x3f;
+ op->byte_count += nbw;
+ if (index > 56)
+ zeros = (120 - index) / 4;
+ else
+ zeros = (56 - index) / 4;
+ } else {
+ op->byte_count += nbw;
+ index = op->byte_count & 0x3f;
+ padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
+ zeros = (padlen - 1) / 4;
+ }
+
+ memset(bf + j, 0, 4 * zeros);
+ j += zeros;
+
+ /* write the length of data */
+ if (op->mode == SS_OP_SHA1) {
+ bits = cpu_to_be64(op->byte_count << 3);
+ bf[j++] = bits & 0xffffffff;
+ bf[j++] = (bits >> 32) & 0xffffffff;
+ } else {
+ bf[j++] = (op->byte_count << 3) & 0xffffffff;
+ bf[j++] = (op->byte_count >> 29) & 0xffffffff;
+ }
+ writesl(ss->base + SS_RXFIFO, bf, j);
+
+ /* Tell the SS to stop the hashing */
+ writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
+
+ /*
+ * Wait for SS to finish the hash.
+ * The timeout could happen only in case of bad overcloking
+ * or driver bug.
+ */
+ i = 0;
+ do {
+ v = readl(ss->base + SS_CTL);
+ i++;
+ } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0);
+ if (i >= SS_TIMEOUT) {
+ dev_err_ratelimited(ss->dev,
+ "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
+ i, SS_TIMEOUT, v, areq->nbytes);
+ err = -EIO;
+ goto release_ss;
+ }
+
+ /* Get the hash from the device */
+ if (op->mode == SS_OP_SHA1) {
+ for (i = 0; i < 5; i++) {
+ v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
+ memcpy(areq->result + i * 4, &v, 4);
+ }
+ } else {
+ for (i = 0; i < 4; i++) {
+ v = readl(ss->base + SS_MD0 + i * 4);
+ memcpy(areq->result + i * 4, &v, 4);
+ }
+ }
+
+release_ss:
+ writel(0, ss->base + SS_CTL);
+ spin_unlock_bh(&ss->slock);
+ return err;
+}
+
+/* sun4i_hash_finup: finalize hashing operation after an update */
+int sun4i_hash_finup(struct ahash_request *areq)
+{
+ int err;
+
+ err = sun4i_hash_update(areq);
+ if (err != 0)
+ return err;
+
+ return sun4i_hash_final(areq);
+}
+
+/* combo of init/update/final functions */
+int sun4i_hash_digest(struct ahash_request *areq)
+{
+ int err;
+
+ err = sun4i_hash_init(areq);
+ if (err != 0)
+ return err;
+
+ err = sun4i_hash_update(areq);
+ if (err != 0)
+ return err;
+
+ return sun4i_hash_final(areq);
+}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h
new file mode 100644
index 000000000000..8e9c05f6e4d4
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/sun4i-ss.h
@@ -0,0 +1,201 @@
+/*
+ * sun4i-ss.h - hardware cryptographic accelerator for Allwinner A20 SoC
+ *
+ * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * Support AES cipher with 128,192,256 bits keysize.
+ * Support MD5 and SHA1 hash algorithms.
+ * Support DES and 3DES
+ *
+ * You could find the datasheet in Documentation/arm/sunxi/README
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <crypto/scatterwalk.h>
+#include <linux/scatterlist.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/internal/rng.h>
+
+#define SS_CTL 0x00
+#define SS_KEY0 0x04
+#define SS_KEY1 0x08
+#define SS_KEY2 0x0C
+#define SS_KEY3 0x10
+#define SS_KEY4 0x14
+#define SS_KEY5 0x18
+#define SS_KEY6 0x1C
+#define SS_KEY7 0x20
+
+#define SS_IV0 0x24
+#define SS_IV1 0x28
+#define SS_IV2 0x2C
+#define SS_IV3 0x30
+
+#define SS_FCSR 0x44
+
+#define SS_MD0 0x4C
+#define SS_MD1 0x50
+#define SS_MD2 0x54
+#define SS_MD3 0x58
+#define SS_MD4 0x5C
+
+#define SS_RXFIFO 0x200
+#define SS_TXFIFO 0x204
+
+/* SS_CTL configuration values */
+
+/* PRNG generator mode - bit 15 */
+#define SS_PRNG_ONESHOT (0 << 15)
+#define SS_PRNG_CONTINUE (1 << 15)
+
+/* IV mode for hash */
+#define SS_IV_ARBITRARY (1 << 14)
+
+/* SS operation mode - bits 12-13 */
+#define SS_ECB (0 << 12)
+#define SS_CBC (1 << 12)
+#define SS_CTS (3 << 12)
+
+/* Counter width for CNT mode - bits 10-11 */
+#define SS_CNT_16BITS (0 << 10)
+#define SS_CNT_32BITS (1 << 10)
+#define SS_CNT_64BITS (2 << 10)
+
+/* Key size for AES - bits 8-9 */
+#define SS_AES_128BITS (0 << 8)
+#define SS_AES_192BITS (1 << 8)
+#define SS_AES_256BITS (2 << 8)
+
+/* Operation direction - bit 7 */
+#define SS_ENCRYPTION (0 << 7)
+#define SS_DECRYPTION (1 << 7)
+
+/* SS Method - bits 4-6 */
+#define SS_OP_AES (0 << 4)
+#define SS_OP_DES (1 << 4)
+#define SS_OP_3DES (2 << 4)
+#define SS_OP_SHA1 (3 << 4)
+#define SS_OP_MD5 (4 << 4)
+#define SS_OP_PRNG (5 << 4)
+
+/* Data end bit - bit 2 */
+#define SS_DATA_END (1 << 2)
+
+/* PRNG start bit - bit 1 */
+#define SS_PRNG_START (1 << 1)
+
+/* SS Enable bit - bit 0 */
+#define SS_DISABLED (0 << 0)
+#define SS_ENABLED (1 << 0)
+
+/* SS_FCSR configuration values */
+/* RX FIFO status - bit 30 */
+#define SS_RXFIFO_FREE (1 << 30)
+
+/* RX FIFO empty spaces - bits 24-29 */
+#define SS_RXFIFO_SPACES(val) (((val) >> 24) & 0x3f)
+
+/* TX FIFO status - bit 22 */
+#define SS_TXFIFO_AVAILABLE (1 << 22)
+
+/* TX FIFO available spaces - bits 16-21 */
+#define SS_TXFIFO_SPACES(val) (((val) >> 16) & 0x3f)
+
+#define SS_RX_MAX 32
+#define SS_RX_DEFAULT SS_RX_MAX
+#define SS_TX_MAX 33
+
+#define SS_RXFIFO_EMP_INT_PENDING (1 << 10)
+#define SS_TXFIFO_AVA_INT_PENDING (1 << 8)
+#define SS_RXFIFO_EMP_INT_ENABLE (1 << 2)
+#define SS_TXFIFO_AVA_INT_ENABLE (1 << 0)
+
+struct sun4i_ss_ctx {
+ void __iomem *base;
+ int irq;
+ struct clk *busclk;
+ struct clk *ssclk;
+ struct reset_control *reset;
+ struct device *dev;
+ struct resource *res;
+ spinlock_t slock; /* control the use of the device */
+};
+
+struct sun4i_ss_alg_template {
+ u32 type;
+ u32 mode;
+ union {
+ struct crypto_alg crypto;
+ struct ahash_alg hash;
+ } alg;
+ struct sun4i_ss_ctx *ss;
+};
+
+struct sun4i_tfm_ctx {
+ u32 key[AES_MAX_KEY_SIZE / 4];/* divided by sizeof(u32) */
+ u32 keylen;
+ u32 keymode;
+ struct sun4i_ss_ctx *ss;
+};
+
+struct sun4i_cipher_req_ctx {
+ u32 mode;
+};
+
+struct sun4i_req_ctx {
+ u32 mode;
+ u64 byte_count; /* number of bytes "uploaded" to the device */
+ u32 hash[5]; /* for storing SS_IVx register */
+ char buf[64];
+ unsigned int len;
+ struct sun4i_ss_ctx *ss;
+};
+
+int sun4i_hash_crainit(struct crypto_tfm *tfm);
+int sun4i_hash_init(struct ahash_request *areq);
+int sun4i_hash_update(struct ahash_request *areq);
+int sun4i_hash_final(struct ahash_request *areq);
+int sun4i_hash_finup(struct ahash_request *areq);
+int sun4i_hash_digest(struct ahash_request *areq);
+int sun4i_hash_export_md5(struct ahash_request *areq, void *out);
+int sun4i_hash_import_md5(struct ahash_request *areq, const void *in);
+int sun4i_hash_export_sha1(struct ahash_request *areq, void *out);
+int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in);
+
+int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq);
+
+int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq);
+
+int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq);
+
+int sun4i_ss_cipher_init(struct crypto_tfm *tfm);
+int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen);
+int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 83aca95a95bc..3b20a1bce703 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -766,6 +766,7 @@ static int talitos_rng_init(struct hwrng *rng)
static int talitos_register_rng(struct device *dev)
{
struct talitos_private *priv = dev_get_drvdata(dev);
+ int err;
priv->rng.name = dev_driver_string(dev),
priv->rng.init = talitos_rng_init,
@@ -773,14 +774,22 @@ static int talitos_register_rng(struct device *dev)
priv->rng.data_read = talitos_rng_data_read,
priv->rng.priv = (unsigned long)dev;
- return hwrng_register(&priv->rng);
+ err = hwrng_register(&priv->rng);
+ if (!err)
+ priv->rng_registered = true;
+
+ return err;
}
static void talitos_unregister_rng(struct device *dev)
{
struct talitos_private *priv = dev_get_drvdata(dev);
+ if (!priv->rng_registered)
+ return;
+
hwrng_unregister(&priv->rng);
+ priv->rng_registered = false;
}
/*
@@ -799,7 +808,6 @@ struct talitos_ctx {
unsigned int keylen;
unsigned int enckeylen;
unsigned int authkeylen;
- unsigned int authsize;
};
#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
@@ -819,16 +827,6 @@ struct talitos_ahash_req_ctx {
struct scatterlist *psrc;
};
-static int aead_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
-{
- struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
-
- ctx->authsize = authsize;
-
- return 0;
-}
-
static int aead_setkey(struct crypto_aead *authenc,
const u8 *key, unsigned int keylen)
{
@@ -857,12 +855,11 @@ badkey:
/*
* talitos_edesc - s/w-extended descriptor
- * @assoc_nents: number of segments in associated data scatterlist
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
- * @assoc_chained: whether assoc is chained or not
* @src_chained: whether src is chained or not
* @dst_chained: whether dst is chained or not
+ * @icv_ool: whether ICV is out-of-line
* @iv_dma: dma address of iv for checking continuity and link table
* @dma_len: length of dma mapped link_tbl space
* @dma_link_tbl: bus physical address of link_tbl/buf
@@ -875,12 +872,11 @@ badkey:
* of link_tbl data
*/
struct talitos_edesc {
- int assoc_nents;
int src_nents;
int dst_nents;
- bool assoc_chained;
bool src_chained;
bool dst_chained;
+ bool icv_ool;
dma_addr_t iv_dma;
int dma_len;
dma_addr_t dma_link_tbl;
@@ -952,14 +948,6 @@ static void ipsec_esp_unmap(struct device *dev,
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
- if (edesc->assoc_chained)
- talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
- else if (areq->assoclen)
- /* assoc_nents counts also for IV in non-contiguous cases */
- dma_unmap_sg(dev, areq->assoc,
- edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
- DMA_TO_DEVICE);
-
talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
if (edesc->dma_len)
@@ -976,7 +964,7 @@ static void ipsec_esp_encrypt_done(struct device *dev,
{
struct aead_request *areq = context;
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+ unsigned int authsize = crypto_aead_authsize(authenc);
struct talitos_edesc *edesc;
struct scatterlist *sg;
void *icvdata;
@@ -986,13 +974,12 @@ static void ipsec_esp_encrypt_done(struct device *dev,
ipsec_esp_unmap(dev, edesc, areq);
/* copy the generated ICV to dst */
- if (edesc->dst_nents) {
+ if (edesc->icv_ool) {
icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 2 +
- edesc->assoc_nents];
+ edesc->dst_nents + 2];
sg = sg_last(areq->dst, edesc->dst_nents);
- memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
- icvdata, ctx->authsize);
+ memcpy((char *)sg_virt(sg) + sg->length - authsize,
+ icvdata, authsize);
}
kfree(edesc);
@@ -1006,10 +993,10 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
{
struct aead_request *req = context;
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
- struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+ unsigned int authsize = crypto_aead_authsize(authenc);
struct talitos_edesc *edesc;
struct scatterlist *sg;
- void *icvdata;
+ char *oicv, *icv;
edesc = container_of(desc, struct talitos_edesc, desc);
@@ -1017,16 +1004,18 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
if (!err) {
/* auth check */
- if (edesc->dma_len)
- icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 2 +
- edesc->assoc_nents];
- else
- icvdata = &edesc->link_tbl[0];
-
sg = sg_last(req->dst, edesc->dst_nents ? : 1);
- err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
- ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
+ icv = (char *)sg_virt(sg) + sg->length - authsize;
+
+ if (edesc->dma_len) {
+ oicv = (char *)&edesc->link_tbl[edesc->src_nents +
+ edesc->dst_nents + 2];
+ if (edesc->icv_ool)
+ icv = oicv + authsize;
+ } else
+ oicv = (char *)&edesc->link_tbl[0];
+
+ err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0;
}
kfree(edesc);
@@ -1059,53 +1048,69 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
* convert scatterlist to SEC h/w link table format
* stop at cryptlen bytes
*/
-static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
- int cryptlen, struct talitos_ptr *link_tbl_ptr)
+static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
+ unsigned int offset, int cryptlen,
+ struct talitos_ptr *link_tbl_ptr)
{
int n_sg = sg_count;
+ int count = 0;
- while (sg && n_sg--) {
- to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg), 0);
- link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
- link_tbl_ptr->j_extent = 0;
- link_tbl_ptr++;
- cryptlen -= sg_dma_len(sg);
- sg = sg_next(sg);
- }
+ while (cryptlen && sg && n_sg--) {
+ unsigned int len = sg_dma_len(sg);
+
+ if (offset >= len) {
+ offset -= len;
+ goto next;
+ }
+
+ len -= offset;
+
+ if (len > cryptlen)
+ len = cryptlen;
- /* adjust (decrease) last one (or two) entry's len to cryptlen */
- link_tbl_ptr--;
- while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
- /* Empty this entry, and move to previous one */
- cryptlen += be16_to_cpu(link_tbl_ptr->len);
- link_tbl_ptr->len = 0;
- sg_count--;
- link_tbl_ptr--;
+ to_talitos_ptr(link_tbl_ptr + count,
+ sg_dma_address(sg) + offset, 0);
+ link_tbl_ptr[count].len = cpu_to_be16(len);
+ link_tbl_ptr[count].j_extent = 0;
+ count++;
+ cryptlen -= len;
+ offset = 0;
+
+next:
+ sg = sg_next(sg);
}
- link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
- + cryptlen);
/* tag end of link table */
- link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
+ if (count > 0)
+ link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
- return sg_count;
+ return count;
+}
+
+static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
+ int cryptlen,
+ struct talitos_ptr *link_tbl_ptr)
+{
+ return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
+ link_tbl_ptr);
}
/*
* fill in and submit ipsec_esp descriptor
*/
static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
- u64 seq, void (*callback) (struct device *dev,
- struct talitos_desc *desc,
- void *context, int error))
+ void (*callback)(struct device *dev,
+ struct talitos_desc *desc,
+ void *context, int error))
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ unsigned int authsize = crypto_aead_authsize(aead);
struct talitos_ctx *ctx = crypto_aead_ctx(aead);
struct device *dev = ctx->dev;
struct talitos_desc *desc = &edesc->desc;
unsigned int cryptlen = areq->cryptlen;
- unsigned int authsize = ctx->authsize;
unsigned int ivsize = crypto_aead_ivsize(aead);
+ int tbl_off = 0;
int sg_count, ret;
int sg_link_tbl_len;
@@ -1113,36 +1118,27 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
DMA_TO_DEVICE);
+ sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ?: 1,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
+ : DMA_TO_DEVICE,
+ edesc->src_chained);
+
/* hmac data */
- desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize);
- if (edesc->assoc_nents) {
- int tbl_off = edesc->src_nents + edesc->dst_nents + 2;
- struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
+ desc->ptr[1].len = cpu_to_be16(areq->assoclen);
+ if (sg_count > 1 &&
+ (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
+ areq->assoclen,
+ &edesc->link_tbl[tbl_off])) > 1) {
+ tbl_off += ret;
to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
sizeof(struct talitos_ptr), 0);
desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
- /* assoc_nents - 1 entries for assoc, 1 for IV */
- sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1,
- areq->assoclen, tbl_ptr);
-
- /* add IV to link table */
- tbl_ptr += sg_count - 1;
- tbl_ptr->j_extent = 0;
- tbl_ptr++;
- to_talitos_ptr(tbl_ptr, edesc->iv_dma, 0);
- tbl_ptr->len = cpu_to_be16(ivsize);
- tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
-
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
} else {
- if (areq->assoclen)
- to_talitos_ptr(&desc->ptr[1],
- sg_dma_address(areq->assoc), 0);
- else
- to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, 0);
+ to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
desc->ptr[1].j_extent = 0;
}
@@ -1150,8 +1146,6 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
desc->ptr[2].len = cpu_to_be16(ivsize);
desc->ptr[2].j_extent = 0;
- /* Sync needed for the aead_givencrypt case */
- dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
/* cipher key */
map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
@@ -1167,33 +1161,24 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
desc->ptr[4].len = cpu_to_be16(cryptlen);
desc->ptr[4].j_extent = authsize;
- sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
- (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
- : DMA_TO_DEVICE,
- edesc->src_chained);
-
- if (sg_count == 1) {
+ sg_link_tbl_len = cryptlen;
+ if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
+ sg_link_tbl_len += authsize;
+
+ if (sg_count > 1 &&
+ (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
+ sg_link_tbl_len,
+ &edesc->link_tbl[tbl_off])) > 1) {
+ tbl_off += ret;
+ desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
+ to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
+ tbl_off *
+ sizeof(struct talitos_ptr), 0);
+ dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+ edesc->dma_len,
+ DMA_BIDIRECTIONAL);
+ } else
to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
- } else {
- sg_link_tbl_len = cryptlen;
-
- if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
- sg_link_tbl_len = cryptlen + authsize;
-
- sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
- &edesc->link_tbl[0]);
- if (sg_count > 1) {
- desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
- to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl, 0);
- dma_sync_single_for_device(dev, edesc->dma_link_tbl,
- edesc->dma_len,
- DMA_BIDIRECTIONAL);
- } else {
- /* Only one segment now, so no link tbl needed */
- to_talitos_ptr(&desc->ptr[4],
- sg_dma_address(areq->src), 0);
- }
- }
/* cipher out */
desc->ptr[5].len = cpu_to_be16(cryptlen);
@@ -1204,16 +1189,17 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
edesc->dst_nents ? : 1,
DMA_FROM_DEVICE, edesc->dst_chained);
- if (sg_count == 1) {
- to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
- } else {
- int tbl_off = edesc->src_nents + 1;
+ edesc->icv_ool = false;
+
+ if (sg_count > 1 &&
+ (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
+ areq->assoclen, cryptlen,
+ &edesc->link_tbl[tbl_off])) >
+ 1) {
struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
tbl_off * sizeof(struct talitos_ptr), 0);
- sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
- tbl_ptr);
/* Add an entry to the link table for ICV data */
tbl_ptr += sg_count - 1;
@@ -1224,13 +1210,16 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
/* icv data follows link tables */
to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
- (tbl_off + edesc->dst_nents + 1 +
- edesc->assoc_nents) *
- sizeof(struct talitos_ptr), 0);
+ (edesc->src_nents + edesc->dst_nents +
+ 2) * sizeof(struct talitos_ptr) +
+ authsize, 0);
desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
- }
+
+ edesc->icv_ool = true;
+ } else
+ to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
/* iv out */
map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
@@ -1268,7 +1257,6 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
* allocate and map the extended descriptor
*/
static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
- struct scatterlist *assoc,
struct scatterlist *src,
struct scatterlist *dst,
u8 *iv,
@@ -1281,8 +1269,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
bool encrypt)
{
struct talitos_edesc *edesc;
- int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
- bool assoc_chained = false, src_chained = false, dst_chained = false;
+ int src_nents, dst_nents, alloc_len, dma_len;
+ bool src_chained = false, dst_chained = false;
dma_addr_t iv_dma = 0;
gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
@@ -1298,48 +1286,35 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
if (ivsize)
iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
- if (assoclen) {
- /*
- * Currently it is assumed that iv is provided whenever assoc
- * is.
- */
- BUG_ON(!iv);
-
- assoc_nents = sg_count(assoc, assoclen, &assoc_chained);
- talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE,
- assoc_chained);
- assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents;
-
- if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma)
- assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
- }
-
if (!dst || dst == src) {
- src_nents = sg_count(src, cryptlen + authsize, &src_chained);
+ src_nents = sg_count(src, assoclen + cryptlen + authsize,
+ &src_chained);
src_nents = (src_nents == 1) ? 0 : src_nents;
dst_nents = dst ? src_nents : 0;
} else { /* dst && dst != src*/
- src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
+ src_nents = sg_count(src, assoclen + cryptlen +
+ (encrypt ? 0 : authsize),
&src_chained);
src_nents = (src_nents == 1) ? 0 : src_nents;
- dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
+ dst_nents = sg_count(dst, assoclen + cryptlen +
+ (encrypt ? authsize : 0),
&dst_chained);
dst_nents = (dst_nents == 1) ? 0 : dst_nents;
}
/*
* allocate space for base edesc plus the link tables,
- * allowing for two separate entries for ICV and generated ICV (+ 2),
- * and the ICV data itself
+ * allowing for two separate entries for AD and generated ICV (+ 2),
+ * and space for two sets of ICVs (stashed and generated)
*/
alloc_len = sizeof(struct talitos_edesc);
- if (assoc_nents || src_nents || dst_nents) {
+ if (src_nents || dst_nents) {
if (is_sec1)
dma_len = (src_nents ? cryptlen : 0) +
(dst_nents ? cryptlen : 0);
else
- dma_len = (src_nents + dst_nents + 2 + assoc_nents) *
- sizeof(struct talitos_ptr) + authsize;
+ dma_len = (src_nents + dst_nents + 2) *
+ sizeof(struct talitos_ptr) + authsize * 2;
alloc_len += dma_len;
} else {
dma_len = 0;
@@ -1348,13 +1323,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) {
- if (assoc_chained)
- talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
- else if (assoclen)
- dma_unmap_sg(dev, assoc,
- assoc_nents ? assoc_nents - 1 : 1,
- DMA_TO_DEVICE);
-
if (iv_dma)
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
@@ -1362,10 +1330,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
return ERR_PTR(-ENOMEM);
}
- edesc->assoc_nents = assoc_nents;
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
- edesc->assoc_chained = assoc_chained;
edesc->src_chained = src_chained;
edesc->dst_chained = dst_chained;
edesc->iv_dma = iv_dma;
@@ -1382,12 +1348,13 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
int icv_stashing, bool encrypt)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+ unsigned int authsize = crypto_aead_authsize(authenc);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
unsigned int ivsize = crypto_aead_ivsize(authenc);
- return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
+ return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
iv, areq->assoclen, areq->cryptlen,
- ctx->authsize, ivsize, icv_stashing,
+ authsize, ivsize, icv_stashing,
areq->base.flags, encrypt);
}
@@ -1405,14 +1372,14 @@ static int aead_encrypt(struct aead_request *req)
/* set encrypt */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
- return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done);
+ return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
}
static int aead_decrypt(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ unsigned int authsize = crypto_aead_authsize(authenc);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
- unsigned int authsize = ctx->authsize;
struct talitos_private *priv = dev_get_drvdata(ctx->dev);
struct talitos_edesc *edesc;
struct scatterlist *sg;
@@ -1437,7 +1404,7 @@ static int aead_decrypt(struct aead_request *req)
/* reset integrity check result bits */
edesc->desc.hdr_lo = 0;
- return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done);
+ return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
}
/* Have to check the ICV with software */
@@ -1445,40 +1412,16 @@ static int aead_decrypt(struct aead_request *req)
/* stash incoming ICV for later cmp with ICV generated by the h/w */
if (edesc->dma_len)
- icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 2 +
- edesc->assoc_nents];
+ icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
+ edesc->dst_nents + 2];
else
icvdata = &edesc->link_tbl[0];
sg = sg_last(req->src, edesc->src_nents ? : 1);
- memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
- ctx->authsize);
+ memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
- return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done);
-}
-
-static int aead_givencrypt(struct aead_givcrypt_request *req)
-{
- struct aead_request *areq = &req->areq;
- struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
- struct talitos_edesc *edesc;
-
- /* allocate extended descriptor */
- edesc = aead_edesc_alloc(areq, req->giv, 0, true);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
-
- /* set encrypt */
- edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
-
- memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
- /* avoid consecutive packets going out with same IV */
- *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
-
- return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done);
+ return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
}
static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
@@ -1710,7 +1653,7 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
- return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
+ return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
areq->info, 0, areq->nbytes, 0, ivsize, 0,
areq->base.flags, encrypt);
}
@@ -1895,7 +1838,7 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
+ return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
nbytes, 0, 0, 0, areq->base.flags, false);
}
@@ -1986,7 +1929,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
sg_init_table(req_ctx->bufsl, nsg);
sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
if (nsg > 1)
- scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
+ sg_chain(req_ctx->bufsl, 2, areq->src);
req_ctx->psrc = req_ctx->bufsl;
} else
req_ctx->psrc = areq->src;
@@ -2161,6 +2104,7 @@ struct talitos_alg_template {
union {
struct crypto_alg crypto;
struct ahash_alg hash;
+ struct aead_alg aead;
} alg;
__be32 desc_hdr_template;
};
@@ -2168,15 +2112,16 @@ struct talitos_alg_template {
static struct talitos_alg_template driver_algs[] = {
/* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(sha1),cbc(aes))",
- .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
@@ -2187,15 +2132,17 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2207,15 +2154,16 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(sha224),cbc(aes))",
- .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA224_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
@@ -2226,15 +2174,17 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA224_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA224_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2246,15 +2196,16 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA224_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(sha256),cbc(aes))",
- .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
@@ -2265,15 +2216,17 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA256_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2285,15 +2238,16 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA256_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(sha384),cbc(aes))",
- .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA384_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
@@ -2304,15 +2258,17 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA384_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2324,15 +2280,16 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(sha512),cbc(aes))",
- .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA512_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
@@ -2343,15 +2300,17 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA512_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2363,15 +2322,16 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(md5),cbc(aes))",
- .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
@@ -2382,15 +2342,16 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_MD5_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
- .alg.crypto = {
- .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_aead = {
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
- }
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2658,15 +2619,9 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static int talitos_cra_init_aead(struct crypto_tfm *tfm)
+static int talitos_cra_init_aead(struct crypto_aead *tfm)
{
- struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
-
- talitos_cra_init(tfm);
-
- /* random first IV */
- get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
-
+ talitos_cra_init(crypto_aead_tfm(tfm));
return 0;
}
@@ -2713,9 +2668,9 @@ static int talitos_remove(struct platform_device *ofdev)
list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
switch (t_alg->algt.type) {
case CRYPTO_ALG_TYPE_ABLKCIPHER:
- case CRYPTO_ALG_TYPE_AEAD:
- crypto_unregister_alg(&t_alg->algt.alg.crypto);
break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ crypto_unregister_aead(&t_alg->algt.alg.aead);
case CRYPTO_ALG_TYPE_AHASH:
crypto_unregister_ahash(&t_alg->algt.alg.hash);
break;
@@ -2727,7 +2682,7 @@ static int talitos_remove(struct platform_device *ofdev)
if (hw_supports(dev, DESC_HDR_SEL0_RNG))
talitos_unregister_rng(dev);
- for (i = 0; i < priv->num_channels; i++)
+ for (i = 0; priv->chan && i < priv->num_channels; i++)
kfree(priv->chan[i].fifo);
kfree(priv->chan);
@@ -2774,15 +2729,11 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
alg->cra_ablkcipher.geniv = "eseqiv";
break;
case CRYPTO_ALG_TYPE_AEAD:
- alg = &t_alg->algt.alg.crypto;
- alg->cra_init = talitos_cra_init_aead;
- alg->cra_type = &crypto_aead_type;
- alg->cra_aead.setkey = aead_setkey;
- alg->cra_aead.setauthsize = aead_setauthsize;
- alg->cra_aead.encrypt = aead_encrypt;
- alg->cra_aead.decrypt = aead_decrypt;
- alg->cra_aead.givencrypt = aead_givencrypt;
- alg->cra_aead.geniv = "<built-in>";
+ alg = &t_alg->algt.alg.aead.base;
+ t_alg->algt.alg.aead.init = talitos_cra_init_aead;
+ t_alg->algt.alg.aead.setkey = aead_setkey;
+ t_alg->algt.alg.aead.encrypt = aead_encrypt;
+ t_alg->algt.alg.aead.decrypt = aead_decrypt;
break;
case CRYPTO_ALG_TYPE_AHASH:
alg = &t_alg->algt.alg.hash.halg.base;
@@ -3041,7 +2992,7 @@ static int talitos_probe(struct platform_device *ofdev)
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
struct talitos_crypto_alg *t_alg;
- char *name = NULL;
+ struct crypto_alg *alg = NULL;
t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
if (IS_ERR(t_alg)) {
@@ -3053,21 +3004,26 @@ static int talitos_probe(struct platform_device *ofdev)
switch (t_alg->algt.type) {
case CRYPTO_ALG_TYPE_ABLKCIPHER:
- case CRYPTO_ALG_TYPE_AEAD:
err = crypto_register_alg(
&t_alg->algt.alg.crypto);
- name = t_alg->algt.alg.crypto.cra_driver_name;
+ alg = &t_alg->algt.alg.crypto;
break;
+
+ case CRYPTO_ALG_TYPE_AEAD:
+ err = crypto_register_aead(
+ &t_alg->algt.alg.aead);
+ alg = &t_alg->algt.alg.aead.base;
+ break;
+
case CRYPTO_ALG_TYPE_AHASH:
err = crypto_register_ahash(
&t_alg->algt.alg.hash);
- name =
- t_alg->algt.alg.hash.halg.base.cra_driver_name;
+ alg = &t_alg->algt.alg.hash.halg.base;
break;
}
if (err) {
dev_err(dev, "%s alg registration failed\n",
- name);
+ alg->cra_driver_name);
kfree(t_alg);
} else
list_add_tail(&t_alg->entry, &priv->alg_list);
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 314daf55e7f7..0090f3211d68 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -52,12 +52,7 @@ struct talitos_ptr {
__be32 ptr; /* address */
};
-static const struct talitos_ptr zero_entry = {
- .len = 0,
- .j_extent = 0,
- .eptr = 0,
- .ptr = 0
-};
+static const struct talitos_ptr zero_entry;
/* descriptor */
struct talitos_desc {
@@ -154,6 +149,7 @@ struct talitos_private {
/* hwrng device */
struct hwrng rng;
+ bool rng_registered;
};
extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index e79e567e43aa..263af709e536 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -84,6 +84,7 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
pagefault_enable();
@@ -103,6 +104,7 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
aes_p8_encrypt(src, dst, &ctx->enc_key);
pagefault_enable();
preempt_enable();
@@ -119,6 +121,7 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
aes_p8_decrypt(src, dst, &ctx->dec_key);
pagefault_enable();
preempt_enable();
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index 7299995c78ec..0b8fe2ec5315 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -85,6 +85,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
pagefault_enable();
@@ -115,6 +116,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
@@ -155,6 +157,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 7adae42a7b79..ee1306cd8f59 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -82,6 +82,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
pagefault_enable();
@@ -100,6 +101,7 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
pagefault_enable();
@@ -113,6 +115,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
struct scatterlist *src, unsigned int nbytes)
{
int ret;
+ u64 inc;
struct blkcipher_walk walk;
struct p8_aes_ctr_ctx *ctx =
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
@@ -131,6 +134,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
walk.dst.virt.addr,
(nbytes &
@@ -140,7 +144,12 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
walk.iv);
pagefault_enable();
- crypto_inc(walk.iv, AES_BLOCK_SIZE);
+ /* We need to update IV mostly for last bytes/round */
+ inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;
+ if (inc > 0)
+ while (inc--)
+ crypto_inc(walk.iv, AES_BLOCK_SIZE);
+
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
index 6c5c20c6108e..228053921b3f 100644
--- a/drivers/crypto/vmx/aesp8-ppc.pl
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -1437,28 +1437,28 @@ Load_ctr32_enc_key:
?vperm v31,v31,$out0,$keyperm
lvx v25,$x10,$key_ # pre-load round[2]
- vadduwm $two,$one,$one
+ vadduqm $two,$one,$one
subi $inp,$inp,15 # undo "caller"
$SHL $len,$len,4
- vadduwm $out1,$ivec,$one # counter values ...
- vadduwm $out2,$ivec,$two
+ vadduqm $out1,$ivec,$one # counter values ...
+ vadduqm $out2,$ivec,$two
vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0]
le?li $idx,8
- vadduwm $out3,$out1,$two
+ vadduqm $out3,$out1,$two
vxor $out1,$out1,$rndkey0
le?lvsl $inpperm,0,$idx
- vadduwm $out4,$out2,$two
+ vadduqm $out4,$out2,$two
vxor $out2,$out2,$rndkey0
le?vspltisb $tmp,0x0f
- vadduwm $out5,$out3,$two
+ vadduqm $out5,$out3,$two
vxor $out3,$out3,$rndkey0
le?vxor $inpperm,$inpperm,$tmp # transform for lvx_u/stvx_u
- vadduwm $out6,$out4,$two
+ vadduqm $out6,$out4,$two
vxor $out4,$out4,$rndkey0
- vadduwm $out7,$out5,$two
+ vadduqm $out7,$out5,$two
vxor $out5,$out5,$rndkey0
- vadduwm $ivec,$out6,$two # next counter value
+ vadduqm $ivec,$out6,$two # next counter value
vxor $out6,$out6,$rndkey0
vxor $out7,$out7,$rndkey0
@@ -1594,27 +1594,27 @@ Loop_ctr32_enc8x_middle:
vcipherlast $in0,$out0,$in0
vcipherlast $in1,$out1,$in1
- vadduwm $out1,$ivec,$one # counter values ...
+ vadduqm $out1,$ivec,$one # counter values ...
vcipherlast $in2,$out2,$in2
- vadduwm $out2,$ivec,$two
+ vadduqm $out2,$ivec,$two
vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0]
vcipherlast $in3,$out3,$in3
- vadduwm $out3,$out1,$two
+ vadduqm $out3,$out1,$two
vxor $out1,$out1,$rndkey0
vcipherlast $in4,$out4,$in4
- vadduwm $out4,$out2,$two
+ vadduqm $out4,$out2,$two
vxor $out2,$out2,$rndkey0
vcipherlast $in5,$out5,$in5
- vadduwm $out5,$out3,$two
+ vadduqm $out5,$out3,$two
vxor $out3,$out3,$rndkey0
vcipherlast $in6,$out6,$in6
- vadduwm $out6,$out4,$two
+ vadduqm $out6,$out4,$two
vxor $out4,$out4,$rndkey0
vcipherlast $in7,$out7,$in7
- vadduwm $out7,$out5,$two
+ vadduqm $out7,$out5,$two
vxor $out5,$out5,$rndkey0
le?vperm $in0,$in0,$in0,$inpperm
- vadduwm $ivec,$out6,$two # next counter value
+ vadduqm $ivec,$out6,$two # next counter value
vxor $out6,$out6,$rndkey0
le?vperm $in1,$in1,$in1,$inpperm
vxor $out7,$out7,$rndkey0
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index b5e29002b666..2183a2e77641 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -119,6 +119,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
enable_kernel_fp();
gcm_init_p8(ctx->htable, (const u64 *) key);
pagefault_enable();
@@ -149,6 +150,7 @@ static int p8_ghash_update(struct shash_desc *desc,
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
enable_kernel_fp();
gcm_ghash_p8(dctx->shash, ctx->htable,
dctx->buffer, GHASH_DIGEST_SIZE);
@@ -163,6 +165,7 @@ static int p8_ghash_update(struct shash_desc *desc,
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
enable_kernel_fp();
gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
pagefault_enable();
@@ -193,6 +196,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
+ enable_kernel_vsx();
enable_kernel_fp();
gcm_ghash_p8(dctx->shash, ctx->htable,
dctx->buffer, GHASH_DIGEST_SIZE);
diff --git a/drivers/crypto/vmx/ghashp8-ppc.pl b/drivers/crypto/vmx/ghashp8-ppc.pl
index 0a6f899839dd..d8429cb71f02 100644
--- a/drivers/crypto/vmx/ghashp8-ppc.pl
+++ b/drivers/crypto/vmx/ghashp8-ppc.pl
@@ -61,6 +61,12 @@ $code=<<___;
mtspr 256,r0
li r10,0x30
lvx_u $H,0,r4 # load H
+ le?xor r7,r7,r7
+ le?addi r7,r7,0x8 # need a vperm start with 08
+ le?lvsr 5,0,r7
+ le?vspltisb 6,0x0f
+ le?vxor 5,5,6 # set a b-endian mask
+ le?vperm $H,$H,$H,5
vspltisb $xC2,-16 # 0xf0
vspltisb $t0,1 # one
diff --git a/drivers/crypto/vmx/ppc-xlate.pl b/drivers/crypto/vmx/ppc-xlate.pl
index a59188494af8..b9997335f193 100644
--- a/drivers/crypto/vmx/ppc-xlate.pl
+++ b/drivers/crypto/vmx/ppc-xlate.pl
@@ -169,6 +169,7 @@ my $vpmsumd = sub { vcrypto_op(@_, 1224); };
my $vpmsubh = sub { vcrypto_op(@_, 1096); };
my $vpmsumw = sub { vcrypto_op(@_, 1160); };
my $vaddudm = sub { vcrypto_op(@_, 192); };
+my $vadduqm = sub { vcrypto_op(@_, 256); };
my $mtsle = sub {
my ($f, $arg) = @_;
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 7d99d13bacd8..f9901f52a225 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -1,7 +1,7 @@
/*
* exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
*
- * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2014-2015 Samsung Electronics Co., Ltd.
* Author : Chanwoo Choi <cw00.choi@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -82,6 +82,15 @@ struct __exynos_ppmu_events {
PPMU_EVENT(mscl),
PPMU_EVENT(fimd0x),
PPMU_EVENT(fimd1x),
+
+ /* Only for Exynos5433 SoCs */
+ PPMU_EVENT(d0-cpu),
+ PPMU_EVENT(d0-general),
+ PPMU_EVENT(d0-rt),
+ PPMU_EVENT(d1-cpu),
+ PPMU_EVENT(d1-general),
+ PPMU_EVENT(d1-rt),
+
{ /* sentinel */ },
};
@@ -96,6 +105,9 @@ static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
return -EINVAL;
}
+/*
+ * The devfreq-event ops structure for PPMU v1.1
+ */
static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
{
struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
@@ -200,10 +212,158 @@ static const struct devfreq_event_ops exynos_ppmu_ops = {
.get_event = exynos_ppmu_get_event,
};
+/*
+ * The devfreq-event ops structure for PPMU v2.0
+ */
+static int exynos_ppmu_v2_disable(struct devfreq_event_dev *edev)
+{
+ struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+ u32 pmnc, clear;
+
+ /* Disable all counters */
+ clear = (PPMU_CCNT_MASK | PPMU_PMCNT0_MASK | PPMU_PMCNT1_MASK
+ | PPMU_PMCNT2_MASK | PPMU_PMCNT3_MASK);
+
+ __raw_writel(clear, info->ppmu.base + PPMU_V2_FLAG);
+ __raw_writel(clear, info->ppmu.base + PPMU_V2_INTENC);
+ __raw_writel(clear, info->ppmu.base + PPMU_V2_CNTENC);
+ __raw_writel(clear, info->ppmu.base + PPMU_V2_CNT_RESET);
+
+ __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG0);
+ __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG1);
+ __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG2);
+ __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_RESULT);
+ __raw_writel(0x0, info->ppmu.base + PPMU_V2_CNT_AUTO);
+ __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV0_TYPE);
+ __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV1_TYPE);
+ __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV2_TYPE);
+ __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV3_TYPE);
+ __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_V);
+ __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_A);
+ __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_V);
+ __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_A);
+ __raw_writel(0x0, info->ppmu.base + PPMU_V2_INTERRUPT_RESET);
+
+ /* Disable PPMU */
+ pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
+ pmnc &= ~PPMU_PMNC_ENABLE_MASK;
+ __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
+
+ return 0;
+}
+
+static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev)
+{
+ struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+ int id = exynos_ppmu_find_ppmu_id(edev);
+ u32 pmnc, cntens;
+
+ /* Enable all counters */
+ cntens = __raw_readl(info->ppmu.base + PPMU_V2_CNTENS);
+ cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
+ __raw_writel(cntens, info->ppmu.base + PPMU_V2_CNTENS);
+
+ /* Set the event of Read/Write data count */
+ switch (id) {
+ case PPMU_PMNCNT0:
+ case PPMU_PMNCNT1:
+ case PPMU_PMNCNT2:
+ __raw_writel(PPMU_V2_RO_DATA_CNT | PPMU_V2_WO_DATA_CNT,
+ info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
+ break;
+ case PPMU_PMNCNT3:
+ __raw_writel(PPMU_V2_EVT3_RW_DATA_CNT,
+ info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
+ break;
+ }
+
+ /* Reset cycle counter/performance counter and enable PPMU */
+ pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
+ pmnc &= ~(PPMU_PMNC_ENABLE_MASK
+ | PPMU_PMNC_COUNTER_RESET_MASK
+ | PPMU_PMNC_CC_RESET_MASK
+ | PPMU_PMNC_CC_DIVIDER_MASK
+ | PPMU_V2_PMNC_START_MODE_MASK);
+ pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
+ pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
+ pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
+ pmnc |= (PPMU_V2_MODE_MANUAL << PPMU_V2_PMNC_START_MODE_SHIFT);
+ __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
+
+ return 0;
+}
+
+static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev,
+ struct devfreq_event_data *edata)
+{
+ struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+ int id = exynos_ppmu_find_ppmu_id(edev);
+ u32 pmnc, cntenc;
+ u32 pmcnt_high, pmcnt_low;
+ u64 load_count = 0;
+
+ /* Disable PPMU */
+ pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
+ pmnc &= ~PPMU_PMNC_ENABLE_MASK;
+ __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
+
+ /* Read cycle count and performance count */
+ edata->total_count = __raw_readl(info->ppmu.base + PPMU_V2_CCNT);
+
+ switch (id) {
+ case PPMU_PMNCNT0:
+ case PPMU_PMNCNT1:
+ case PPMU_PMNCNT2:
+ load_count = __raw_readl(info->ppmu.base + PPMU_V2_PMNCT(id));
+ break;
+ case PPMU_PMNCNT3:
+ pmcnt_high = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_HIGH);
+ pmcnt_low = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_LOW);
+ load_count = (u64)((pmcnt_high & 0xff) << 32) + (u64)pmcnt_low;
+ break;
+ }
+ edata->load_count = load_count;
+
+ /* Disable all counters */
+ cntenc = __raw_readl(info->ppmu.base + PPMU_V2_CNTENC);
+ cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
+ __raw_writel(cntenc, info->ppmu.base + PPMU_V2_CNTENC);
+
+ dev_dbg(&edev->dev, "%25s (load: %ld / %ld)\n", edev->desc->name,
+ edata->load_count, edata->total_count);
+ return 0;
+}
+
+static const struct devfreq_event_ops exynos_ppmu_v2_ops = {
+ .disable = exynos_ppmu_v2_disable,
+ .set_event = exynos_ppmu_v2_set_event,
+ .get_event = exynos_ppmu_v2_get_event,
+};
+
+static const struct of_device_id exynos_ppmu_id_match[] = {
+ {
+ .compatible = "samsung,exynos-ppmu",
+ .data = (void *)&exynos_ppmu_ops,
+ }, {
+ .compatible = "samsung,exynos-ppmu-v2",
+ .data = (void *)&exynos_ppmu_v2_ops,
+ },
+ { /* sentinel */ },
+};
+
+static struct devfreq_event_ops *exynos_bus_get_ops(struct device_node *np)
+{
+ const struct of_device_id *match;
+
+ match = of_match_node(exynos_ppmu_id_match, np);
+ return (struct devfreq_event_ops *)match->data;
+}
+
static int of_get_devfreq_events(struct device_node *np,
struct exynos_ppmu *info)
{
struct devfreq_event_desc *desc;
+ struct devfreq_event_ops *event_ops;
struct device *dev = info->dev;
struct device_node *events_np, *node;
int i, j, count;
@@ -214,6 +374,7 @@ static int of_get_devfreq_events(struct device_node *np,
"failed to get child node of devfreq-event devices\n");
return -EINVAL;
}
+ event_ops = exynos_bus_get_ops(np);
count = of_get_child_count(events_np);
desc = devm_kzalloc(dev, sizeof(*desc) * count, GFP_KERNEL);
@@ -238,7 +399,7 @@ static int of_get_devfreq_events(struct device_node *np,
continue;
}
- desc[j].ops = &exynos_ppmu_ops;
+ desc[j].ops = event_ops;
desc[j].driver_data = info;
of_property_read_string(node, "event-name", &desc[j].name);
@@ -354,11 +515,6 @@ static int exynos_ppmu_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id exynos_ppmu_id_match[] = {
- { .compatible = "samsung,exynos-ppmu", },
- { /* sentinel */ },
-};
-
static struct platform_driver exynos_ppmu_driver = {
.probe = exynos_ppmu_probe,
.remove = exynos_ppmu_remove,
diff --git a/drivers/devfreq/event/exynos-ppmu.h b/drivers/devfreq/event/exynos-ppmu.h
index 4e831d48c138..05774c449137 100644
--- a/drivers/devfreq/event/exynos-ppmu.h
+++ b/drivers/devfreq/event/exynos-ppmu.h
@@ -26,6 +26,9 @@ enum ppmu_counter {
PPMU_PMNCNT_MAX,
};
+/***
+ * PPMUv1.1 Definitions
+ */
enum ppmu_event_type {
PPMU_RO_BUSY_CYCLE_CNT = 0x0,
PPMU_WO_BUSY_CYCLE_CNT = 0x1,
@@ -90,4 +93,71 @@ enum ppmu_reg {
#define PPMU_PMNCT(x) (PPMU_PMCNT0 + (0x10 * x))
#define PPMU_BEVTxSEL(x) (PPMU_BEVT0SEL + (0x100 * x))
+/***
+ * PPMU_V2.0 definitions
+ */
+enum ppmu_v2_mode {
+ PPMU_V2_MODE_MANUAL = 0,
+ PPMU_V2_MODE_AUTO = 1,
+ PPMU_V2_MODE_CIG = 2, /* CIG (Conditional Interrupt Generation) */
+};
+
+enum ppmu_v2_event_type {
+ PPMU_V2_RO_DATA_CNT = 0x4,
+ PPMU_V2_WO_DATA_CNT = 0x5,
+
+ PPMU_V2_EVT3_RW_DATA_CNT = 0x22, /* Only for Event3 */
+};
+
+enum ppmu_V2_reg {
+ /* PPC control register */
+ PPMU_V2_PMNC = 0x04,
+ PPMU_V2_CNTENS = 0x08,
+ PPMU_V2_CNTENC = 0x0c,
+ PPMU_V2_INTENS = 0x10,
+ PPMU_V2_INTENC = 0x14,
+ PPMU_V2_FLAG = 0x18,
+
+ /* Cycle Counter and Performance Event Counter Register */
+ PPMU_V2_CCNT = 0x48,
+ PPMU_V2_PMCNT0 = 0x34,
+ PPMU_V2_PMCNT1 = 0x38,
+ PPMU_V2_PMCNT2 = 0x3c,
+ PPMU_V2_PMCNT3_LOW = 0x40,
+ PPMU_V2_PMCNT3_HIGH = 0x44,
+
+ /* Bus Event Generator */
+ PPMU_V2_CIG_CFG0 = 0x1c,
+ PPMU_V2_CIG_CFG1 = 0x20,
+ PPMU_V2_CIG_CFG2 = 0x24,
+ PPMU_V2_CIG_RESULT = 0x28,
+ PPMU_V2_CNT_RESET = 0x2c,
+ PPMU_V2_CNT_AUTO = 0x30,
+ PPMU_V2_CH_EV0_TYPE = 0x200,
+ PPMU_V2_CH_EV1_TYPE = 0x204,
+ PPMU_V2_CH_EV2_TYPE = 0x208,
+ PPMU_V2_CH_EV3_TYPE = 0x20c,
+ PPMU_V2_SM_ID_V = 0x220,
+ PPMU_V2_SM_ID_A = 0x224,
+ PPMU_V2_SM_OTHERS_V = 0x228,
+ PPMU_V2_SM_OTHERS_A = 0x22c,
+ PPMU_V2_INTERRUPT_RESET = 0x260,
+};
+
+/* PMNC register */
+#define PPMU_V2_PMNC_START_MODE_SHIFT 20
+#define PPMU_V2_PMNC_START_MODE_MASK (0x3 << PPMU_V2_PMNC_START_MODE_SHIFT)
+
+#define PPMU_PMNC_CC_RESET_SHIFT 2
+#define PPMU_PMNC_COUNTER_RESET_SHIFT 1
+#define PPMU_PMNC_ENABLE_SHIFT 0
+#define PPMU_PMNC_START_MODE_MASK BIT(16)
+#define PPMU_PMNC_CC_DIVIDER_MASK BIT(3)
+#define PPMU_PMNC_CC_RESET_MASK BIT(2)
+#define PPMU_PMNC_COUNTER_RESET_MASK BIT(1)
+#define PPMU_PMNC_ENABLE_MASK BIT(0)
+
+#define PPMU_V2_PMNCT(x) (PPMU_V2_PMCNT0 + (0x4 * x))
+#define PPMU_V2_CH_EVx_TYPE(x) (PPMU_V2_CH_EV0_TYPE + (0x4 * x))
+
#endif /* __EXYNOS_PPMU_H__ */
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 88d474b78076..b4584757dae0 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -33,27 +33,29 @@ if DMADEVICES
comment "DMA Devices"
-config INTEL_MIC_X100_DMA
- tristate "Intel MIC X100 DMA Driver"
- depends on 64BIT && X86 && INTEL_MIC_BUS
- select DMA_ENGINE
- help
- This enables DMA support for the Intel Many Integrated Core
- (MIC) family of PCIe form factor coprocessor X100 devices that
- run a 64 bit Linux OS. This driver will be used by both MIC
- host and card drivers.
-
- If you are building host kernel with a MIC device or a card
- kernel for a MIC device, then say M (recommended) or Y, else
- say N. If unsure say N.
+#core
+config ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ bool
- More information about the Intel MIC family as well as the Linux
- OS and tools for MIC to use with this driver are available from
- <http://software.intel.com/en-us/mic-developer>.
+config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+ bool
-config ASYNC_TX_ENABLE_CHANNEL_SWITCH
+config DMA_ENGINE
bool
+config DMA_VIRTUAL_CHANNELS
+ tristate
+
+config DMA_ACPI
+ def_bool y
+ depends on ACPI
+
+config DMA_OF
+ def_bool y
+ depends on OF
+ select DMA_ENGINE
+
+#devices
config AMBA_PL08X
bool "ARM PrimeCell PL080 or PL081 support"
depends on ARM_AMBA
@@ -63,29 +65,15 @@ config AMBA_PL08X
Platform has a PL08x DMAC device
which can provide DMA engine support
-config INTEL_IOATDMA
- tristate "Intel I/OAT DMA support"
- depends on PCI && X86
+config AMCC_PPC440SPE_ADMA
+ tristate "AMCC PPC440SPe ADMA support"
+ depends on 440SPe || 440SP
select DMA_ENGINE
select DMA_ENGINE_RAID
- select DCA
- help
- Enable support for the Intel(R) I/OAT DMA engine present
- in recent Intel Xeon chipsets.
-
- Say Y here if you have such a chipset.
-
- If unsure, say N.
-
-config INTEL_IOP_ADMA
- tristate "Intel IOP ADMA support"
- depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
- select DMA_ENGINE
+ select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
help
- Enable support for the Intel(R) IOP Series RAID engines.
-
-source "drivers/dma/dw/Kconfig"
+ Enable support for the AMCC PPC440SPe RAID engines.
config AT_HDMAC
tristate "Atmel AHB DMA support"
@@ -101,6 +89,89 @@ config AT_XDMAC
help
Support the Atmel XDMA controller.
+config AXI_DMAC
+ tristate "Analog Devices AXI-DMAC DMA support"
+ depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_SOCFPGA || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the Analog Devices AXI-DMAC peripheral. This DMA
+ controller is often used in Analog Device's reference designs for FPGA
+ platforms.
+
+config COH901318
+ bool "ST-Ericsson COH901318 DMA support"
+ select DMA_ENGINE
+ depends on ARCH_U300
+ help
+ Enable support for ST-Ericsson COH 901 318 DMA.
+
+config DMA_BCM2835
+ tristate "BCM2835 DMA engine support"
+ depends on ARCH_BCM2835
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+
+config DMA_JZ4740
+ tristate "JZ4740 DMA support"
+ depends on MACH_JZ4740
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+
+config DMA_JZ4780
+ tristate "JZ4780 DMA support"
+ depends on MACH_JZ4780
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ This selects support for the DMA controller in Ingenic JZ4780 SoCs.
+ If you have a board based on such a SoC and wish to use DMA for
+ devices which can use the DMA controller, say Y or M here.
+
+config DMA_OMAP
+ tristate "OMAP DMA support"
+ depends on ARCH_OMAP
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ select TI_DMA_CROSSBAR if SOC_DRA7XX
+
+config DMA_SA11X0
+ tristate "SA-11x0 DMA support"
+ depends on ARCH_SA1100
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support the DMA engine found on Intel StrongARM SA-1100 and
+ SA-1110 SoCs. This DMA engine can only be used with on-chip
+ devices.
+
+config DMA_SUN4I
+ tristate "Allwinner A10 DMA SoCs support"
+ depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I
+ default (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I)
+ select DMA_ENGINE
+ select DMA_OF
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the DMA controller present in the sun4i,
+ sun5i and sun7i Allwinner ARM SoCs.
+
+config DMA_SUN6I
+ tristate "Allwinner A31 SoCs DMA support"
+ depends on MACH_SUN6I || MACH_SUN8I || COMPILE_TEST
+ depends on RESET_CONTROLLER
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support for the DMA engine first found in Allwinner A31 SoCs.
+
+config EP93XX_DMA
+ bool "Cirrus Logic EP93xx DMA support"
+ depends on ARCH_EP93XX
+ select DMA_ENGINE
+ help
+ Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
+
config FSL_DMA
tristate "Freescale Elo series DMA support"
depends on FSL_SOC
@@ -112,6 +183,16 @@ config FSL_DMA
EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on
some Txxx and Bxxx parts.
+config FSL_EDMA
+ tristate "Freescale eDMA engine support"
+ depends on OF
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support the Freescale eDMA engine with programmable channel
+ multiplexing capability for DMA request sources(slot).
+ This module can be found on Freescale Vybrid and LS-1 SoCs.
+
config FSL_RAID
tristate "Freescale RAID engine Support"
depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
@@ -123,153 +204,175 @@ config FSL_RAID
the capability to offload memcpy, xor and pq computation
for raid5/6.
-source "drivers/dma/hsu/Kconfig"
-
-config MPC512X_DMA
- tristate "Freescale MPC512x built-in DMA engine support"
- depends on PPC_MPC512x || PPC_MPC831x
+config IMG_MDC_DMA
+ tristate "IMG MDC support"
+ depends on MIPS || COMPILE_TEST
+ depends on MFD_SYSCON
select DMA_ENGINE
- ---help---
- Enable support for the Freescale MPC512x built-in DMA engine.
-
-source "drivers/dma/bestcomm/Kconfig"
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the IMG multi-threaded DMA controller (MDC).
-config MV_XOR
- bool "Marvell XOR engine support"
- depends on PLAT_ORION
+config IMX_DMA
+ tristate "i.MX DMA support"
+ depends on ARCH_MXC
select DMA_ENGINE
- select DMA_ENGINE_RAID
- select ASYNC_TX_ENABLE_CHANNEL_SWITCH
- ---help---
- Enable support for the Marvell XOR engine.
+ help
+ Support the i.MX DMA engine. This engine is integrated into
+ Freescale i.MX1/21/27 chips.
-config MX3_IPU
- bool "MX3x Image Processing Unit support"
+config IMX_SDMA
+ tristate "i.MX SDMA support"
depends on ARCH_MXC
select DMA_ENGINE
- default y
help
- If you plan to use the Image Processing unit in the i.MX3x, say
- Y here. If unsure, select Y.
+ Support the i.MX SDMA engine. This engine is integrated into
+ Freescale i.MX25/31/35/51/53/6 chips.
-config MX3_IPU_IRQS
- int "Number of dynamically mapped interrupts for IPU"
- depends on MX3_IPU
- range 2 137
- default 4
+config IDMA64
+ tristate "Intel integrated DMA 64-bit support"
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
help
- Out of 137 interrupt sources on i.MX31 IPU only very few are used.
- To avoid bloating the irq_desc[] array we allocate a sufficient
- number of IRQ slots and map them dynamically to specific sources.
+ Enable DMA support for Intel Low Power Subsystem such as found on
+ Intel Skylake PCH.
-config PXA_DMA
- bool "PXA DMA support"
- depends on (ARCH_MMP || ARCH_PXA)
+config INTEL_IOATDMA
+ tristate "Intel I/OAT DMA support"
+ depends on PCI && X86_64
select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
+ select DMA_ENGINE_RAID
+ select DCA
help
- Support the DMA engine for PXA. It is also compatible with MMP PDMA
- platform. The internal DMA IP of all PXA variants is supported, with
- 16 to 32 channels for peripheral to memory or memory to memory
- transfers.
+ Enable support for the Intel(R) I/OAT DMA engine present
+ in recent Intel Xeon chipsets.
-config TXX9_DMAC
- tristate "Toshiba TXx9 SoC DMA support"
- depends on MACH_TX49XX || MACH_TX39XX
+ Say Y here if you have such a chipset.
+
+ If unsure, say N.
+
+config INTEL_IOP_ADMA
+ tristate "Intel IOP ADMA support"
+ depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
select DMA_ENGINE
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
help
- Support the TXx9 SoC internal DMA controller. This can be
- integrated in chips such as the Toshiba TX4927/38/39.
+ Enable support for the Intel(R) IOP Series RAID engines.
-config TEGRA20_APB_DMA
- bool "NVIDIA Tegra20 APB DMA support"
- depends on ARCH_TEGRA
+config INTEL_MIC_X100_DMA
+ tristate "Intel MIC X100 DMA Driver"
+ depends on 64BIT && X86 && INTEL_MIC_BUS
select DMA_ENGINE
help
- Support for the NVIDIA Tegra20 APB DMA controller driver. The
- DMA controller is having multiple DMA channel which can be
- configured for different peripherals like audio, UART, SPI,
- I2C etc which is in APB bus.
- This DMA controller transfers data from memory to peripheral fifo
- or vice versa. It does not support memory to memory data transfer.
+ This enables DMA support for the Intel Many Integrated Core
+ (MIC) family of PCIe form factor coprocessor X100 devices that
+ run a 64 bit Linux OS. This driver will be used by both MIC
+ host and card drivers.
-config S3C24XX_DMAC
- tristate "Samsung S3C24XX DMA support"
- depends on ARCH_S3C24XX
+ If you are building host kernel with a MIC device or a card
+ kernel for a MIC device, then say M (recommended) or Y, else
+ say N. If unsure say N.
+
+ More information about the Intel MIC family as well as the Linux
+ OS and tools for MIC to use with this driver are available from
+ <http://software.intel.com/en-us/mic-developer>.
+
+config K3_DMA
+ tristate "Hisilicon K3 DMA support"
+ depends on ARCH_HI3xxx
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
- Support for the Samsung S3C24XX DMA controller driver. The
- DMA controller is having multiple DMA channels which can be
- configured for different peripherals like audio, UART, SPI.
- The DMA controller can transfer data from memory to peripheral,
- periphal to memory, periphal to periphal and memory to memory.
+ Support the DMA engine for Hisilicon K3 platform
+ devices.
-source "drivers/dma/sh/Kconfig"
+config LPC18XX_DMAMUX
+ bool "NXP LPC18xx/43xx DMA MUX for PL080"
+ depends on ARCH_LPC18XX || COMPILE_TEST
+ depends on OF && AMBA_PL08X
+ select MFD_SYSCON
+ help
+ Enable support for DMA on NXP LPC18xx/43xx platforms
+ with PL080 and multiplexed DMA request lines.
-config COH901318
- bool "ST-Ericsson COH901318 DMA support"
+config MMP_PDMA
+ bool "MMP PDMA support"
+ depends on (ARCH_MMP || ARCH_PXA)
select DMA_ENGINE
- depends on ARCH_U300
help
- Enable support for ST-Ericsson COH 901 318 DMA.
+ Support the MMP PDMA engine for PXA and MMP platform.
-config STE_DMA40
- bool "ST-Ericsson DMA40 support"
- depends on ARCH_U8500
+config MMP_TDMA
+ bool "MMP Two-Channel DMA support"
+ depends on ARCH_MMP
select DMA_ENGINE
+ select MMP_SRAM
help
- Support for ST-Ericsson DMA40 controller
+ Support the MMP Two-Channel DMA engine.
+ This engine used for MMP Audio DMA and pxa910 SQU.
+ It needs sram driver under mach-mmp.
-config AMCC_PPC440SPE_ADMA
- tristate "AMCC PPC440SPe ADMA support"
- depends on 440SPe || 440SP
+config MOXART_DMA
+ tristate "MOXART DMA support"
+ depends on ARCH_MOXART
select DMA_ENGINE
- select DMA_ENGINE_RAID
- select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
- select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ select DMA_OF
+ select DMA_VIRTUAL_CHANNELS
help
- Enable support for the AMCC PPC440SPe RAID engines.
+ Enable support for the MOXA ART SoC DMA controller.
+
+ Say Y here if you enabled MMP ADMA, otherwise say N.
-config TIMB_DMA
- tristate "Timberdale FPGA DMA support"
- depends on MFD_TIMBERDALE
+config MPC512X_DMA
+ tristate "Freescale MPC512x built-in DMA engine support"
+ depends on PPC_MPC512x || PPC_MPC831x
select DMA_ENGINE
- help
- Enable support for the Timberdale FPGA DMA engine.
+ ---help---
+ Enable support for the Freescale MPC512x built-in DMA engine.
-config SIRF_DMA
- tristate "CSR SiRFprimaII/SiRFmarco DMA support"
- depends on ARCH_SIRF
+config MV_XOR
+ bool "Marvell XOR engine support"
+ depends on PLAT_ORION
select DMA_ENGINE
- help
- Enable support for the CSR SiRFprimaII DMA engine.
+ select DMA_ENGINE_RAID
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ ---help---
+ Enable support for the Marvell XOR engine.
-config TI_EDMA
- bool "TI EDMA support"
- depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE
+config MXS_DMA
+ bool "MXS DMA support"
+ depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q
+ select STMP_DEVICE
select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- select TI_PRIV_EDMA
- default n
help
- Enable support for the TI EDMA controller. This DMA
- engine is found on TI DaVinci and AM33xx parts.
+ Support the MXS DMA engine. This engine including APBH-DMA
+ and APBX-DMA is integrated into Freescale i.MX23/28/MX6Q/MX6DL chips.
-config TI_DMA_CROSSBAR
- bool
+config MX3_IPU
+ bool "MX3x Image Processing Unit support"
+ depends on ARCH_MXC
+ select DMA_ENGINE
+ default y
+ help
+ If you plan to use the Image Processing unit in the i.MX3x, say
+ Y here. If unsure, select Y.
-config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
- bool
+config MX3_IPU_IRQS
+ int "Number of dynamically mapped interrupts for IPU"
+ depends on MX3_IPU
+ range 2 137
+ default 4
+ help
+ Out of 137 interrupt sources on i.MX31 IPU only very few are used.
+ To avoid bloating the irq_desc[] array we allocate a sufficient
+ number of IRQ slots and map them dynamically to specific sources.
-config PL330_DMA
- tristate "DMA API Driver for PL330"
+config NBPFAXI_DMA
+ tristate "Renesas Type-AXI NBPF DMA support"
select DMA_ENGINE
- depends on ARM_AMBA
+ depends on ARM || COMPILE_TEST
help
- Select if your platform has one or more PL330 DMACs.
- You need to provide platform specific settings via
- platform_data for a dma-pl330 device.
+ Support for "Type-AXI" NBPF DMA IPs from Renesas
config PCH_DMA
tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
@@ -285,72 +388,87 @@ config PCH_DMA
ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
-config IMX_SDMA
- tristate "i.MX SDMA support"
- depends on ARCH_MXC
+config PL330_DMA
+ tristate "DMA API Driver for PL330"
select DMA_ENGINE
+ depends on ARM_AMBA
help
- Support the i.MX SDMA engine. This engine is integrated into
- Freescale i.MX25/31/35/51/53/6 chips.
+ Select if your platform has one or more PL330 DMACs.
+ You need to provide platform specific settings via
+ platform_data for a dma-pl330 device.
-config IMX_DMA
- tristate "i.MX DMA support"
- depends on ARCH_MXC
+config PXA_DMA
+ bool "PXA DMA support"
+ depends on (ARCH_MMP || ARCH_PXA)
select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
help
- Support the i.MX DMA engine. This engine is integrated into
- Freescale i.MX1/21/27 chips.
+ Support the DMA engine for PXA. It is also compatible with MMP PDMA
+ platform. The internal DMA IP of all PXA variants is supported, with
+ 16 to 32 channels for peripheral to memory or memory to memory
+ transfers.
-config MXS_DMA
- bool "MXS DMA support"
- depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q
- select STMP_DEVICE
+config QCOM_BAM_DMA
+ tristate "QCOM BAM DMA support"
+ depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ ---help---
+ Enable support for the QCOM BAM DMA controller. This controller
+ provides DMA capabilities for a variety of on-chip devices.
+
+config SIRF_DMA
+ tristate "CSR SiRFprimaII/SiRFmarco DMA support"
+ depends on ARCH_SIRF
select DMA_ENGINE
help
- Support the MXS DMA engine. This engine including APBH-DMA
- and APBX-DMA is integrated into Freescale i.MX23/28/MX6Q/MX6DL chips.
+ Enable support for the CSR SiRFprimaII DMA engine.
-config EP93XX_DMA
- bool "Cirrus Logic EP93xx DMA support"
- depends on ARCH_EP93XX
+config STE_DMA40
+ bool "ST-Ericsson DMA40 support"
+ depends on ARCH_U8500
select DMA_ENGINE
help
- Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
+ Support for ST-Ericsson DMA40 controller
-config DMA_SA11X0
- tristate "SA-11x0 DMA support"
- depends on ARCH_SA1100
+config S3C24XX_DMAC
+ tristate "Samsung S3C24XX DMA support"
+ depends on ARCH_S3C24XX
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
- Support the DMA engine found on Intel StrongARM SA-1100 and
- SA-1110 SoCs. This DMA engine can only be used with on-chip
- devices.
+ Support for the Samsung S3C24XX DMA controller driver. The
+ DMA controller is having multiple DMA channels which can be
+ configured for different peripherals like audio, UART, SPI.
+ The DMA controller can transfer data from memory to peripheral,
+ periphal to memory, periphal to periphal and memory to memory.
-config MMP_TDMA
- bool "MMP Two-Channel DMA support"
- depends on ARCH_MMP
+config TXX9_DMAC
+ tristate "Toshiba TXx9 SoC DMA support"
+ depends on MACH_TX49XX || MACH_TX39XX
select DMA_ENGINE
- select MMP_SRAM
help
- Support the MMP Two-Channel DMA engine.
- This engine used for MMP Audio DMA and pxa910 SQU.
- It needs sram driver under mach-mmp.
-
- Say Y here if you enabled MMP ADMA, otherwise say N.
+ Support the TXx9 SoC internal DMA controller. This can be
+ integrated in chips such as the Toshiba TX4927/38/39.
-config DMA_OMAP
- tristate "OMAP DMA support"
- depends on ARCH_OMAP
+config TEGRA20_APB_DMA
+ bool "NVIDIA Tegra20 APB DMA support"
+ depends on ARCH_TEGRA
select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- select TI_DMA_CROSSBAR if SOC_DRA7XX
+ help
+ Support for the NVIDIA Tegra20 APB DMA controller driver. The
+ DMA controller is having multiple DMA channel which can be
+ configured for different peripherals like audio, UART, SPI,
+ I2C etc which is in APB bus.
+ This DMA controller transfers data from memory to peripheral fifo
+ or vice versa. It does not support memory to memory data transfer.
-config DMA_BCM2835
- tristate "BCM2835 DMA engine support"
- depends on ARCH_BCM2835
+config TIMB_DMA
+ tristate "Timberdale FPGA DMA support"
+ depends on MFD_TIMBERDALE
select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the Timberdale FPGA DMA engine.
config TI_CPPI41
tristate "AM33xx CPPI41 DMA support"
@@ -360,56 +478,28 @@ config TI_CPPI41
The Communications Port Programming Interface (CPPI) 4.1 DMA engine
is currently used by the USB driver on AM335x platforms.
-config MMP_PDMA
- bool "MMP PDMA support"
- depends on (ARCH_MMP || ARCH_PXA)
- select DMA_ENGINE
- help
- Support the MMP PDMA engine for PXA and MMP platform.
-
-config DMA_JZ4740
- tristate "JZ4740 DMA support"
- depends on MACH_JZ4740
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
-
-config DMA_JZ4780
- tristate "JZ4780 DMA support"
- depends on MACH_JZ4780
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- help
- This selects support for the DMA controller in Ingenic JZ4780 SoCs.
- If you have a board based on such a SoC and wish to use DMA for
- devices which can use the DMA controller, say Y or M here.
+config TI_DMA_CROSSBAR
+ bool
-config K3_DMA
- tristate "Hisilicon K3 DMA support"
- depends on ARCH_HI3xxx
+config TI_EDMA
+ bool "TI EDMA support"
+ depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
+ select TI_PRIV_EDMA
+ default n
help
- Support the DMA engine for Hisilicon K3 platform
- devices.
+ Enable support for the TI EDMA controller. This DMA
+ engine is found on TI DaVinci and AM33xx parts.
-config MOXART_DMA
- tristate "MOXART DMA support"
- depends on ARCH_MOXART
- select DMA_ENGINE
- select DMA_OF
- select DMA_VIRTUAL_CHANNELS
- help
- Enable support for the MOXA ART SoC DMA controller.
-
-config FSL_EDMA
- tristate "Freescale eDMA engine support"
- depends on OF
+config XGENE_DMA
+ tristate "APM X-Gene DMA support"
+ depends on ARCH_XGENE || COMPILE_TEST
select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
+ select DMA_ENGINE_RAID
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
help
- Support the Freescale eDMA engine with programmable channel
- multiplexing capability for DMA request sources(slot).
- This module can be found on Freescale Vybrid and LS-1 SoCs.
+ Enable support for the APM X-Gene SoC DMA engine.
config XILINX_VDMA
tristate "Xilinx AXI VDMA Engine"
@@ -425,55 +515,25 @@ config XILINX_VDMA
channels, Memory Mapped to Stream (MM2S) and Stream to
Memory Mapped (S2MM) for the data transfers.
-config DMA_SUN6I
- tristate "Allwinner A31 SoCs DMA support"
- depends on MACH_SUN6I || MACH_SUN8I || COMPILE_TEST
- depends on RESET_CONTROLLER
+config ZX_DMA
+ tristate "ZTE ZX296702 DMA support"
+ depends on ARCH_ZX
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
- Support for the DMA engine first found in Allwinner A31 SoCs.
+ Support the DMA engine for ZTE ZX296702 platform devices.
-config NBPFAXI_DMA
- tristate "Renesas Type-AXI NBPF DMA support"
- select DMA_ENGINE
- depends on ARM || COMPILE_TEST
- help
- Support for "Type-AXI" NBPF DMA IPs from Renesas
-config IMG_MDC_DMA
- tristate "IMG MDC support"
- depends on MIPS || COMPILE_TEST
- depends on MFD_SYSCON
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- help
- Enable support for the IMG multi-threaded DMA controller (MDC).
-
-config XGENE_DMA
- tristate "APM X-Gene DMA support"
- depends on ARCH_XGENE || COMPILE_TEST
- select DMA_ENGINE
- select DMA_ENGINE_RAID
- select ASYNC_TX_ENABLE_CHANNEL_SWITCH
- help
- Enable support for the APM X-Gene SoC DMA engine.
-
-config DMA_ENGINE
- bool
+# driver files
+source "drivers/dma/bestcomm/Kconfig"
-config DMA_VIRTUAL_CHANNELS
- tristate
+source "drivers/dma/dw/Kconfig"
-config DMA_ACPI
- def_bool y
- depends on ACPI
+source "drivers/dma/hsu/Kconfig"
-config DMA_OF
- def_bool y
- depends on OF
- select DMA_ENGINE
+source "drivers/dma/sh/Kconfig"
+# clients
comment "DMA Clients"
depends on DMA_ENGINE
@@ -498,13 +558,4 @@ config DMATEST
config DMA_ENGINE_RAID
bool
-config QCOM_BAM_DMA
- tristate "QCOM BAM DMA support"
- depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- ---help---
- Enable support for the QCOM BAM DMA controller. This controller
- provides DMA capabilities for a variety of on-chip devices.
-
endif
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 6a4d6f2827da..7711a7180726 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,58 +1,69 @@
+#dmaengine debug flags
subdir-ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG
subdir-ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
+#core
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
obj-$(CONFIG_DMA_ACPI) += acpi-dma.o
obj-$(CONFIG_DMA_OF) += of-dma.o
+#dmatest
obj-$(CONFIG_DMATEST) += dmatest.o
-obj-$(CONFIG_INTEL_IOATDMA) += ioat/
-obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
-obj-$(CONFIG_FSL_DMA) += fsldma.o
-obj-$(CONFIG_HSU_DMA) += hsu/
-obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
-obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
-obj-$(CONFIG_MV_XOR) += mv_xor.o
-obj-$(CONFIG_DW_DMAC_CORE) += dw/
+
+#devices
+obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
+obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
-obj-$(CONFIG_MX3_IPU) += ipu/
-obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
-obj-$(CONFIG_RENESAS_DMA) += sh/
+obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
-obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
-obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
+obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
+obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o
+obj-$(CONFIG_DMA_OMAP) += omap-dma.o
+obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
+obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o
+obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
+obj-$(CONFIG_DW_DMAC_CORE) += dw/
+obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
+obj-$(CONFIG_FSL_DMA) += fsldma.o
+obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
+obj-$(CONFIG_FSL_RAID) += fsl_raid.o
+obj-$(CONFIG_HSU_DMA) += hsu/
+obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
obj-$(CONFIG_IMX_DMA) += imx-dma.o
+obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
+obj-$(CONFIG_IDMA64) += idma64.o
+obj-$(CONFIG_INTEL_IOATDMA) += ioat/
+obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
+obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
+obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
+obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
+obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
+obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
+obj-$(CONFIG_MV_XOR) += mv_xor.o
obj-$(CONFIG_MXS_DMA) += mxs-dma.o
+obj-$(CONFIG_MX3_IPU) += ipu/
+obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
+obj-$(CONFIG_PCH_DMA) += pch_dma.o
+obj-$(CONFIG_PL330_DMA) += pl330.o
+obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
-obj-$(CONFIG_TIMB_DMA) += timb_dma.o
+obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
+obj-$(CONFIG_RENESAS_DMA) += sh/
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
-obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
-obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
-obj-$(CONFIG_PL330_DMA) += pl330.o
-obj-$(CONFIG_PCH_DMA) += pch_dma.o
-obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
-obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
-obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
-obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
-obj-$(CONFIG_DMA_OMAP) += omap-dma.o
-obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
-obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
-obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
-obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
-obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o
+obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
+obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
+obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_TI_CPPI41) += cppi41.o
-obj-$(CONFIG_K3_DMA) += k3dma.o
-obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
-obj-$(CONFIG_FSL_RAID) += fsl_raid.o
-obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
-obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
-obj-y += xilinx/
-obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
-obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
-obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
-obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
+obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
+obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
+obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
+
+obj-y += xilinx/
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 5de3cf453f35..9b42c0588550 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -83,6 +83,8 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -2030,10 +2032,188 @@ static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
}
#endif
+#ifdef CONFIG_OF
+static struct dma_chan *pl08x_find_chan_id(struct pl08x_driver_data *pl08x,
+ u32 id)
+{
+ struct pl08x_dma_chan *chan;
+
+ list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
+ if (chan->signal == id)
+ return &chan->vc.chan;
+ }
+
+ return NULL;
+}
+
+static struct dma_chan *pl08x_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct pl08x_driver_data *pl08x = ofdma->of_dma_data;
+ struct pl08x_channel_data *data;
+ struct pl08x_dma_chan *chan;
+ struct dma_chan *dma_chan;
+
+ if (!pl08x)
+ return NULL;
+
+ if (dma_spec->args_count != 2)
+ return NULL;
+
+ dma_chan = pl08x_find_chan_id(pl08x, dma_spec->args[0]);
+ if (dma_chan)
+ return dma_get_slave_channel(dma_chan);
+
+ chan = devm_kzalloc(pl08x->slave.dev, sizeof(*chan) + sizeof(*data),
+ GFP_KERNEL);
+ if (!chan)
+ return NULL;
+
+ data = (void *)&chan[1];
+ data->bus_id = "(none)";
+ data->periph_buses = dma_spec->args[1];
+
+ chan->cd = data;
+ chan->host = pl08x;
+ chan->slave = true;
+ chan->name = data->bus_id;
+ chan->state = PL08X_CHAN_IDLE;
+ chan->signal = dma_spec->args[0];
+ chan->vc.desc_free = pl08x_desc_free;
+
+ vchan_init(&chan->vc, &pl08x->slave);
+
+ return dma_get_slave_channel(&chan->vc.chan);
+}
+
+static int pl08x_of_probe(struct amba_device *adev,
+ struct pl08x_driver_data *pl08x,
+ struct device_node *np)
+{
+ struct pl08x_platform_data *pd;
+ u32 cctl_memcpy = 0;
+ u32 val;
+ int ret;
+
+ pd = devm_kzalloc(&adev->dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ /* Eligible bus masters for fetching LLIs */
+ if (of_property_read_bool(np, "lli-bus-interface-ahb1"))
+ pd->lli_buses |= PL08X_AHB1;
+ if (of_property_read_bool(np, "lli-bus-interface-ahb2"))
+ pd->lli_buses |= PL08X_AHB2;
+ if (!pd->lli_buses) {
+ dev_info(&adev->dev, "no bus masters for LLIs stated, assume all\n");
+ pd->lli_buses |= PL08X_AHB1 | PL08X_AHB2;
+ }
+
+ /* Eligible bus masters for memory access */
+ if (of_property_read_bool(np, "mem-bus-interface-ahb1"))
+ pd->mem_buses |= PL08X_AHB1;
+ if (of_property_read_bool(np, "mem-bus-interface-ahb2"))
+ pd->mem_buses |= PL08X_AHB2;
+ if (!pd->mem_buses) {
+ dev_info(&adev->dev, "no bus masters for memory stated, assume all\n");
+ pd->mem_buses |= PL08X_AHB1 | PL08X_AHB2;
+ }
+
+ /* Parse the memcpy channel properties */
+ ret = of_property_read_u32(np, "memcpy-burst-size", &val);
+ if (ret) {
+ dev_info(&adev->dev, "no memcpy burst size specified, using 1 byte\n");
+ val = 1;
+ }
+ switch (val) {
+ default:
+ dev_err(&adev->dev, "illegal burst size for memcpy, set to 1\n");
+ /* Fall through */
+ case 1:
+ cctl_memcpy |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT |
+ PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT;
+ break;
+ case 4:
+ cctl_memcpy |= PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT |
+ PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT;
+ break;
+ case 8:
+ cctl_memcpy |= PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT |
+ PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT;
+ break;
+ case 16:
+ cctl_memcpy |= PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT |
+ PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT;
+ break;
+ case 32:
+ cctl_memcpy |= PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT |
+ PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT;
+ break;
+ case 64:
+ cctl_memcpy |= PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT |
+ PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT;
+ break;
+ case 128:
+ cctl_memcpy |= PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT |
+ PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT;
+ break;
+ case 256:
+ cctl_memcpy |= PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT |
+ PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT;
+ break;
+ }
+
+ ret = of_property_read_u32(np, "memcpy-bus-width", &val);
+ if (ret) {
+ dev_info(&adev->dev, "no memcpy bus width specified, using 8 bits\n");
+ val = 8;
+ }
+ switch (val) {
+ default:
+ dev_err(&adev->dev, "illegal bus width for memcpy, set to 8 bits\n");
+ /* Fall through */
+ case 8:
+ cctl_memcpy |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT |
+ PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
+ break;
+ case 16:
+ cctl_memcpy |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT |
+ PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
+ break;
+ case 32:
+ cctl_memcpy |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT |
+ PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
+ break;
+ }
+
+ /* This is currently the only thing making sense */
+ cctl_memcpy |= PL080_CONTROL_PROT_SYS;
+
+ /* Set up memcpy channel */
+ pd->memcpy_channel.bus_id = "memcpy";
+ pd->memcpy_channel.cctl_memcpy = cctl_memcpy;
+ /* Use the buses that can access memory, obviously */
+ pd->memcpy_channel.periph_buses = pd->mem_buses;
+
+ pl08x->pd = pd;
+
+ return of_dma_controller_register(adev->dev.of_node, pl08x_of_xlate,
+ pl08x);
+}
+#else
+static inline int pl08x_of_probe(struct amba_device *adev,
+ struct pl08x_driver_data *pl08x,
+ struct device_node *np)
+{
+ return -EINVAL;
+}
+#endif
+
static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
{
struct pl08x_driver_data *pl08x;
const struct vendor_data *vd = id->data;
+ struct device_node *np = adev->dev.of_node;
u32 tsfr_size;
int ret = 0;
int i;
@@ -2093,9 +2273,15 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
/* Get the platform data */
pl08x->pd = dev_get_platdata(&adev->dev);
if (!pl08x->pd) {
- dev_err(&adev->dev, "no platform data supplied\n");
- ret = -EINVAL;
- goto out_no_platdata;
+ if (np) {
+ ret = pl08x_of_probe(adev, pl08x, np);
+ if (ret)
+ goto out_no_platdata;
+ } else {
+ dev_err(&adev->dev, "no platform data supplied\n");
+ ret = -EINVAL;
+ goto out_no_platdata;
+ }
}
/* Assign useful pointers to the driver state */
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 59892126d175..58d406230d89 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -48,6 +48,8 @@
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+#define ATC_MAX_DSCR_TRIALS 10
+
/*
* Initial number of descriptors to allocate for each channel. This could
* be increased during dma usage.
@@ -285,28 +287,19 @@ static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
*
* @current_len: the number of bytes left before reading CTRLA
* @ctrla: the value of CTRLA
- * @desc: the descriptor containing the transfer width
*/
-static inline int atc_calc_bytes_left(int current_len, u32 ctrla,
- struct at_desc *desc)
+static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
{
- return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width);
-}
+ u32 btsize = (ctrla & ATC_BTSIZE_MAX);
+ u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
-/**
- * atc_calc_bytes_left_from_reg - calculates the number of bytes left according
- * to the current value of CTRLA.
- *
- * @current_len: the number of bytes left before reading CTRLA
- * @atchan: the channel to read CTRLA for
- * @desc: the descriptor containing the transfer width
- */
-static inline int atc_calc_bytes_left_from_reg(int current_len,
- struct at_dma_chan *atchan, struct at_desc *desc)
-{
- u32 ctrla = channel_readl(atchan, CTRLA);
-
- return atc_calc_bytes_left(current_len, ctrla, desc);
+ /*
+ * According to the datasheet, when reading the Control A Register
+ * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
+ * number of transfers completed on the Source Interface.
+ * So btsize is always a number of source width transfers.
+ */
+ return current_len - (btsize << src_width);
}
/**
@@ -320,7 +313,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
struct at_desc *desc_first = atc_first_active(atchan);
struct at_desc *desc;
int ret;
- u32 ctrla, dscr;
+ u32 ctrla, dscr, trials;
/*
* If the cookie doesn't match to the currently running transfer then
@@ -346,15 +339,82 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
* the channel's DSCR register and compare it against the value
* of the hardware linked list structure of each child
* descriptor.
+ *
+ * The CTRLA register provides us with the amount of data
+ * already read from the source for the current child
+ * descriptor. So we can compute a more accurate residue by also
+ * removing the number of bytes corresponding to this amount of
+ * data.
+ *
+ * However, the DSCR and CTRLA registers cannot be read both
+ * atomically. Hence a race condition may occur: the first read
+ * register may refer to one child descriptor whereas the second
+ * read may refer to a later child descriptor in the list
+ * because of the DMA transfer progression inbetween the two
+ * reads.
+ *
+ * One solution could have been to pause the DMA transfer, read
+ * the DSCR and CTRLA then resume the DMA transfer. Nonetheless,
+ * this approach presents some drawbacks:
+ * - If the DMA transfer is paused, RX overruns or TX underruns
+ * are more likey to occur depending on the system latency.
+ * Taking the USART driver as an example, it uses a cyclic DMA
+ * transfer to read data from the Receive Holding Register
+ * (RHR) to avoid RX overruns since the RHR is not protected
+ * by any FIFO on most Atmel SoCs. So pausing the DMA transfer
+ * to compute the residue would break the USART driver design.
+ * - The atc_pause() function masks interrupts but we'd rather
+ * avoid to do so for system latency purpose.
+ *
+ * Then we'd rather use another solution: the DSCR is read a
+ * first time, the CTRLA is read in turn, next the DSCR is read
+ * a second time. If the two consecutive read values of the DSCR
+ * are the same then we assume both refers to the very same
+ * child descriptor as well as the CTRLA value read inbetween
+ * does. For cyclic tranfers, the assumption is that a full loop
+ * is "not so fast".
+ * If the two DSCR values are different, we read again the CTRLA
+ * then the DSCR till two consecutive read values from DSCR are
+ * equal or till the maxium trials is reach.
+ * This algorithm is very unlikely not to find a stable value for
+ * DSCR.
*/
- ctrla = channel_readl(atchan, CTRLA);
- rmb(); /* ensure CTRLA is read before DSCR */
dscr = channel_readl(atchan, DSCR);
+ rmb(); /* ensure DSCR is read before CTRLA */
+ ctrla = channel_readl(atchan, CTRLA);
+ for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
+ u32 new_dscr;
+
+ rmb(); /* ensure DSCR is read after CTRLA */
+ new_dscr = channel_readl(atchan, DSCR);
+
+ /*
+ * If the DSCR register value has not changed inside the
+ * DMA controller since the previous read, we assume
+ * that both the dscr and ctrla values refers to the
+ * very same descriptor.
+ */
+ if (likely(new_dscr == dscr))
+ break;
+
+ /*
+ * DSCR has changed inside the DMA controller, so the
+ * previouly read value of CTRLA may refer to an already
+ * processed descriptor hence could be outdated.
+ * We need to update ctrla to match the current
+ * descriptor.
+ */
+ dscr = new_dscr;
+ rmb(); /* ensure DSCR is read before CTRLA */
+ ctrla = channel_readl(atchan, CTRLA);
+ }
+ if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
+ return -ETIMEDOUT;
/* for the first descriptor we can be more accurate */
if (desc_first->lli.dscr == dscr)
- return atc_calc_bytes_left(ret, ctrla, desc_first);
+ return atc_calc_bytes_left(ret, ctrla);
ret -= desc_first->len;
list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
@@ -365,16 +425,14 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
}
/*
- * For the last descriptor in the chain we can calculate
+ * For the current descriptor in the chain we can calculate
* the remaining bytes using the channel's register.
- * Note that the transfer width of the first and last
- * descriptor may differ.
*/
- if (!desc->lli.dscr)
- ret = atc_calc_bytes_left_from_reg(ret, atchan, desc);
+ ret = atc_calc_bytes_left(ret, ctrla);
} else {
/* single transfer */
- ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first);
+ ctrla = channel_readl(atchan, CTRLA);
+ ret = atc_calc_bytes_left(ret, ctrla);
}
return ret;
@@ -390,6 +448,7 @@ static void
atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
{
struct dma_async_tx_descriptor *txd = &desc->txd;
+ struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
dev_vdbg(chan2dev(&atchan->chan_common),
"descriptor %u complete\n", txd->cookie);
@@ -398,6 +457,13 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
if (!atc_chan_is_cyclic(atchan))
dma_cookie_complete(txd);
+ /* If the transfer was a memset, free our temporary buffer */
+ if (desc->memset) {
+ dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
+ desc->memset_paddr);
+ desc->memset = false;
+ }
+
/* move children to free_list */
list_splice_init(&desc->tx_list, &atchan->free_list);
/* move myself to free_list */
@@ -659,14 +725,14 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
size_t len = 0;
int i;
+ if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
+ return NULL;
+
dev_info(chan2dev(chan),
"%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n",
__func__, xt->src_start, xt->dst_start, xt->numf,
xt->frame_size, flags);
- if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
- return NULL;
-
/*
* The controller can only "skip" X bytes every Y bytes, so we
* need to make sure we are given a template that fit that
@@ -726,7 +792,6 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
desc->txd.cookie = -EBUSY;
desc->total_len = desc->len = len;
- desc->tx_width = dwidth;
/* set end-of-link to the last link descriptor of list*/
set_desc_eol(desc);
@@ -804,10 +869,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
first->txd.cookie = -EBUSY;
first->total_len = len;
- /* set transfer width for the calculation of the residue */
- first->tx_width = src_width;
- prev->tx_width = src_width;
-
/* set end-of-link to the last link descriptor of list*/
set_desc_eol(desc);
@@ -820,6 +881,93 @@ err_desc_get:
return NULL;
}
+/**
+ * atc_prep_dma_memset - prepare a memcpy operation
+ * @chan: the channel to prepare operation on
+ * @dest: operation virtual destination address
+ * @value: value to set memory buffer to
+ * @len: operation length
+ * @flags: tx descriptor status flags
+ */
+static struct dma_async_tx_descriptor *
+atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
+ size_t len, unsigned long flags)
+{
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ struct at_dma *atdma = to_at_dma(chan->device);
+ struct at_desc *desc = NULL;
+ size_t xfer_count;
+ u32 ctrla;
+ u32 ctrlb;
+
+ dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__,
+ dest, value, len, flags);
+
+ if (unlikely(!len)) {
+ dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
+ return NULL;
+ }
+
+ if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
+ dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
+ __func__);
+ return NULL;
+ }
+
+ xfer_count = len >> 2;
+ if (xfer_count > ATC_BTSIZE_MAX) {
+ dev_err(chan2dev(chan), "%s: buffer is too big\n",
+ __func__);
+ return NULL;
+ }
+
+ ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
+ | ATC_SRC_ADDR_MODE_FIXED
+ | ATC_DST_ADDR_MODE_INCR
+ | ATC_FC_MEM2MEM;
+
+ ctrla = ATC_SRC_WIDTH(2) |
+ ATC_DST_WIDTH(2);
+
+ desc = atc_desc_get(atchan);
+ if (!desc) {
+ dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
+ __func__);
+ return NULL;
+ }
+
+ desc->memset_vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC,
+ &desc->memset_paddr);
+ if (!desc->memset_vaddr) {
+ dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
+ __func__);
+ goto err_put_desc;
+ }
+
+ *desc->memset_vaddr = value;
+ desc->memset = true;
+
+ desc->lli.saddr = desc->memset_paddr;
+ desc->lli.daddr = dest;
+ desc->lli.ctrla = ctrla | xfer_count;
+ desc->lli.ctrlb = ctrlb;
+
+ desc->txd.cookie = -EBUSY;
+ desc->len = len;
+ desc->total_len = len;
+
+ /* set end-of-link on the descriptor */
+ set_desc_eol(desc);
+
+ desc->txd.flags = flags;
+
+ return &desc->txd;
+
+err_put_desc:
+ atc_desc_put(atchan, desc);
+ return NULL;
+}
+
/**
* atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
@@ -956,10 +1104,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
first->txd.cookie = -EBUSY;
first->total_len = total_len;
- /* set transfer width for the calculation of the residue */
- first->tx_width = reg_width;
- prev->tx_width = reg_width;
-
/* first link descriptor of list is responsible of flags */
first->txd.flags = flags; /* client is in control of this ack */
@@ -1077,12 +1221,6 @@ atc_prep_dma_sg(struct dma_chan *chan,
desc->txd.cookie = 0;
desc->len = len;
- /*
- * Although we only need the transfer width for the first and
- * the last descriptor, its easier to set it to all descriptors.
- */
- desc->tx_width = src_width;
-
atc_desc_chain(&first, &prev, desc);
/* update the lengths and addresses for the next loop cycle */
@@ -1256,7 +1394,6 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
/* First descriptor of the chain embedds additional information */
first->txd.cookie = -EBUSY;
first->total_len = buf_len;
- first->tx_width = reg_width;
return &first->txd;
@@ -1713,6 +1850,8 @@ static int __init at_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
+ dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
+ dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
@@ -1776,7 +1915,16 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (!atdma->dma_desc_pool) {
dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
err = -ENOMEM;
- goto err_pool_create;
+ goto err_desc_pool_create;
+ }
+
+ /* create a pool of consistent memory blocks for memset blocks */
+ atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
+ &pdev->dev, sizeof(int), 4, 0);
+ if (!atdma->memset_pool) {
+ dev_err(&pdev->dev, "No memory for memset dma pool\n");
+ err = -ENOMEM;
+ goto err_memset_pool_create;
}
/* clear any pending interrupt */
@@ -1822,6 +1970,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
+ if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
+ atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
+ atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
+ }
+
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
/* controller can do slave DMA: can trigger cyclic transfers */
@@ -1842,8 +1995,9 @@ static int __init at_dma_probe(struct platform_device *pdev)
dma_writel(atdma, EN, AT_DMA_ENABLE);
- dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
+ dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n",
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
+ dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
dma_has_cap(DMA_SG, atdma->dma_common.cap_mask) ? "sg-cpy " : "",
plat_dat->nr_channels);
@@ -1868,8 +2022,10 @@ static int __init at_dma_probe(struct platform_device *pdev)
err_of_dma_controller_register:
dma_async_device_unregister(&atdma->dma_common);
+ dma_pool_destroy(atdma->memset_pool);
+err_memset_pool_create:
dma_pool_destroy(atdma->dma_desc_pool);
-err_pool_create:
+err_desc_pool_create:
free_irq(platform_get_irq(pdev, 0), atdma);
err_irq:
clk_disable_unprepare(atdma->clk);
@@ -1894,6 +2050,7 @@ static int at_dma_remove(struct platform_device *pdev)
at_dma_off(atdma);
dma_async_device_unregister(&atdma->dma_common);
+ dma_pool_destroy(atdma->memset_pool);
dma_pool_destroy(atdma->dma_desc_pool);
free_irq(platform_get_irq(pdev, 0), atdma);
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index bc8d5ebedd19..c3bebbe899ac 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -112,6 +112,7 @@
#define ATC_SRC_WIDTH_BYTE (0x0 << 24)
#define ATC_SRC_WIDTH_HALFWORD (0x1 << 24)
#define ATC_SRC_WIDTH_WORD (0x2 << 24)
+#define ATC_REG_TO_SRC_WIDTH(r) (((r) >> 24) & 0x3)
#define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */
#define ATC_DST_WIDTH(x) ((x) << 28)
#define ATC_DST_WIDTH_BYTE (0x0 << 28)
@@ -182,7 +183,6 @@ struct at_lli {
* @txd: support for the async_tx api
* @desc_node: node on the channed descriptors list
* @len: descriptor byte count
- * @tx_width: transfer width
* @total_len: total transaction byte count
*/
struct at_desc {
@@ -194,13 +194,17 @@ struct at_desc {
struct dma_async_tx_descriptor txd;
struct list_head desc_node;
size_t len;
- u32 tx_width;
size_t total_len;
/* Interleaved data */
size_t boundary;
size_t dst_hole;
size_t src_hole;
+
+ /* Memset temporary buffer */
+ bool memset;
+ dma_addr_t memset_paddr;
+ int *memset_vaddr;
};
static inline struct at_desc *
@@ -331,6 +335,7 @@ struct at_dma {
u8 all_chan_mask;
struct dma_pool *dma_desc_pool;
+ struct dma_pool *memset_pool;
/* AT THE END channels table */
struct at_dma_chan chan[0];
};
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index cf1213de7865..a165b4bfd330 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -359,18 +359,19 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
* descriptor view 2 since some fields of the configuration register
* depend on transfer size and src/dest addresses.
*/
- if (at_xdmac_chan_is_cyclic(atchan)) {
+ if (at_xdmac_chan_is_cyclic(atchan))
reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
- at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
- } else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) {
+ else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3)
reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
- } else {
- /*
- * No need to write AT_XDMAC_CC reg, it will be done when the
- * descriptor is fecthed.
- */
+ else
reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
- }
+ /*
+ * Even if the register will be updated from the configuration in the
+ * descriptor when using view 2 or higher, the PROT bit won't be set
+ * properly. This bit can be modified only by using the channel
+ * configuration register.
+ */
+ at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
reg |= AT_XDMAC_CNDC_NDDUP
| AT_XDMAC_CNDC_NDSUP
@@ -624,12 +625,12 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags, void *context)
{
- struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
- struct at_xdmac_desc *first = NULL, *prev = NULL;
- struct scatterlist *sg;
- int i;
- unsigned int xfer_size = 0;
- unsigned long irqflags;
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ struct at_xdmac_desc *first = NULL, *prev = NULL;
+ struct scatterlist *sg;
+ int i;
+ unsigned int xfer_size = 0;
+ unsigned long irqflags;
struct dma_async_tx_descriptor *ret = NULL;
if (!sgl)
@@ -681,15 +682,16 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
desc->lld.mbr_sa = mem;
desc->lld.mbr_da = atchan->sconfig.dst_addr;
}
- desc->lld.mbr_cfg = atchan->cfg;
- dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
+ dwidth = at_xdmac_get_dwidth(atchan->cfg);
fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
- ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
+ ? dwidth
: AT_XDMAC_CC_DWIDTH_BYTE;
desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
| AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
| AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
| (len >> fixed_dwidth); /* microblock length */
+ desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
+ AT_XDMAC_CC_DWIDTH(fixed_dwidth);
dev_dbg(chan2dev(chan),
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
__func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
@@ -795,10 +797,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
list_add_tail(&desc->desc_node, &first->descs_list);
}
- prev->lld.mbr_nda = first->tx_dma_desc.phys;
- dev_dbg(chan2dev(chan),
- "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
- __func__, prev, &prev->lld.mbr_nda);
+ at_xdmac_queue_desc(chan, prev, first);
first->tx_dma_desc.flags = flags;
first->xfer_size = buf_len;
first->direction = direction;
@@ -1133,7 +1132,7 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
* SAMA5D4x), so we can use the same interface for source and dest,
* that solves the fact we don't know the direction.
*/
- u32 chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM
+ u32 chan_cc = AT_XDMAC_CC_DAM_UBS_AM
| AT_XDMAC_CC_SAM_INCREMENTED_AM
| AT_XDMAC_CC_DIF(0)
| AT_XDMAC_CC_SIF(0)
@@ -1201,6 +1200,168 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
return &desc->tx_dma_desc;
}
+static struct dma_async_tx_descriptor *
+at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, int value,
+ unsigned long flags)
+{
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ struct at_xdmac_desc *desc, *pdesc = NULL,
+ *ppdesc = NULL, *first = NULL;
+ struct scatterlist *sg, *psg = NULL, *ppsg = NULL;
+ size_t stride = 0, pstride = 0, len = 0;
+ int i;
+
+ if (!sgl)
+ return NULL;
+
+ dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
+ __func__, sg_len, value, flags);
+
+ /* Prepare descriptors. */
+ for_each_sg(sgl, sg, sg_len, i) {
+ dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n",
+ __func__, sg_dma_address(sg), sg_dma_len(sg),
+ value, flags);
+ desc = at_xdmac_memset_create_desc(chan, atchan,
+ sg_dma_address(sg),
+ sg_dma_len(sg),
+ value);
+ if (!desc && first)
+ list_splice_init(&first->descs_list,
+ &atchan->free_descs_list);
+
+ if (!first)
+ first = desc;
+
+ /* Update our strides */
+ pstride = stride;
+ if (psg)
+ stride = sg_dma_address(sg) -
+ (sg_dma_address(psg) + sg_dma_len(psg));
+
+ /*
+ * The scatterlist API gives us only the address and
+ * length of each elements.
+ *
+ * Unfortunately, we don't have the stride, which we
+ * will need to compute.
+ *
+ * That make us end up in a situation like this one:
+ * len stride len stride len
+ * +-------+ +-------+ +-------+
+ * | N-2 | | N-1 | | N |
+ * +-------+ +-------+ +-------+
+ *
+ * We need all these three elements (N-2, N-1 and N)
+ * to actually take the decision on whether we need to
+ * queue N-1 or reuse N-2.
+ *
+ * We will only consider N if it is the last element.
+ */
+ if (ppdesc && pdesc) {
+ if ((stride == pstride) &&
+ (sg_dma_len(ppsg) == sg_dma_len(psg))) {
+ dev_dbg(chan2dev(chan),
+ "%s: desc 0x%p can be merged with desc 0x%p\n",
+ __func__, pdesc, ppdesc);
+
+ /*
+ * Increment the block count of the
+ * N-2 descriptor
+ */
+ at_xdmac_increment_block_count(chan, ppdesc);
+ ppdesc->lld.mbr_dus = stride;
+
+ /*
+ * Put back the N-1 descriptor in the
+ * free descriptor list
+ */
+ list_add_tail(&pdesc->desc_node,
+ &atchan->free_descs_list);
+
+ /*
+ * Make our N-1 descriptor pointer
+ * point to the N-2 since they were
+ * actually merged.
+ */
+ pdesc = ppdesc;
+
+ /*
+ * Rule out the case where we don't have
+ * pstride computed yet (our second sg
+ * element)
+ *
+ * We also want to catch the case where there
+ * would be a negative stride,
+ */
+ } else if (pstride ||
+ sg_dma_address(sg) < sg_dma_address(psg)) {
+ /*
+ * Queue the N-1 descriptor after the
+ * N-2
+ */
+ at_xdmac_queue_desc(chan, ppdesc, pdesc);
+
+ /*
+ * Add the N-1 descriptor to the list
+ * of the descriptors used for this
+ * transfer
+ */
+ list_add_tail(&desc->desc_node,
+ &first->descs_list);
+ dev_dbg(chan2dev(chan),
+ "%s: add desc 0x%p to descs_list 0x%p\n",
+ __func__, desc, first);
+ }
+ }
+
+ /*
+ * If we are the last element, just see if we have the
+ * same size than the previous element.
+ *
+ * If so, we can merge it with the previous descriptor
+ * since we don't care about the stride anymore.
+ */
+ if ((i == (sg_len - 1)) &&
+ sg_dma_len(ppsg) == sg_dma_len(psg)) {
+ dev_dbg(chan2dev(chan),
+ "%s: desc 0x%p can be merged with desc 0x%p\n",
+ __func__, desc, pdesc);
+
+ /*
+ * Increment the block count of the N-1
+ * descriptor
+ */
+ at_xdmac_increment_block_count(chan, pdesc);
+ pdesc->lld.mbr_dus = stride;
+
+ /*
+ * Put back the N descriptor in the free
+ * descriptor list
+ */
+ list_add_tail(&desc->desc_node,
+ &atchan->free_descs_list);
+ }
+
+ /* Update our descriptors */
+ ppdesc = pdesc;
+ pdesc = desc;
+
+ /* Update our scatter pointers */
+ ppsg = psg;
+ psg = sg;
+
+ len += sg_dma_len(sg);
+ }
+
+ first->tx_dma_desc.cookie = -EBUSY;
+ first->tx_dma_desc.flags = flags;
+ first->xfer_size = len;
+
+ return &first->tx_dma_desc;
+}
+
static enum dma_status
at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
@@ -1734,6 +1895,7 @@ static int at_xdmac_probe(struct platform_device *pdev)
dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
+ dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
/*
* Without DMA_PRIVATE the driver is not able to allocate more than
@@ -1749,6 +1911,7 @@ static int at_xdmac_probe(struct platform_device *pdev)
atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved;
atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset;
+ atxdmac->dma.device_prep_dma_memset_sg = at_xdmac_prep_dma_memset_sg;
atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
atxdmac->dma.device_config = at_xdmac_device_config;
atxdmac->dma.device_pause = at_xdmac_device_pause;
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index fd22dd36985f..c340ca9bd2b5 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -2730,7 +2730,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
* This controller can only access address at even 32bit boundaries,
* i.e. 2^2
*/
- base->dma_memcpy.copy_align = 2;
+ base->dma_memcpy.copy_align = DMAENGINE_ALIGN_4_BYTES;
err = dma_async_device_register(&base->dma_memcpy);
if (err)
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
new file mode 100644
index 000000000000..5b2395e7e04d
--- /dev/null
+++ b/drivers/dma/dma-axi-dmac.c
@@ -0,0 +1,691 @@
+/*
+ * Driver for the Analog Devices AXI-DMAC core
+ *
+ * Copyright 2013-2015 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/dma/axi-dmac.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+/*
+ * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
+ * various instantiation parameters which decided the exact feature set support
+ * by the core.
+ *
+ * Each channel of the core has a source interface and a destination interface.
+ * The number of channels and the type of the channel interfaces is selected at
+ * configuration time. A interface can either be a connected to a central memory
+ * interconnect, which allows access to system memory, or it can be connected to
+ * a dedicated bus which is directly connected to a data port on a peripheral.
+ * Given that those are configuration options of the core that are selected when
+ * it is instantiated this means that they can not be changed by software at
+ * runtime. By extension this means that each channel is uni-directional. It can
+ * either be device to memory or memory to device, but not both. Also since the
+ * device side is a dedicated data bus only connected to a single peripheral
+ * there is no address than can or needs to be configured for the device side.
+ */
+
+#define AXI_DMAC_REG_IRQ_MASK 0x80
+#define AXI_DMAC_REG_IRQ_PENDING 0x84
+#define AXI_DMAC_REG_IRQ_SOURCE 0x88
+
+#define AXI_DMAC_REG_CTRL 0x400
+#define AXI_DMAC_REG_TRANSFER_ID 0x404
+#define AXI_DMAC_REG_START_TRANSFER 0x408
+#define AXI_DMAC_REG_FLAGS 0x40c
+#define AXI_DMAC_REG_DEST_ADDRESS 0x410
+#define AXI_DMAC_REG_SRC_ADDRESS 0x414
+#define AXI_DMAC_REG_X_LENGTH 0x418
+#define AXI_DMAC_REG_Y_LENGTH 0x41c
+#define AXI_DMAC_REG_DEST_STRIDE 0x420
+#define AXI_DMAC_REG_SRC_STRIDE 0x424
+#define AXI_DMAC_REG_TRANSFER_DONE 0x428
+#define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
+#define AXI_DMAC_REG_STATUS 0x430
+#define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434
+#define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
+
+#define AXI_DMAC_CTRL_ENABLE BIT(0)
+#define AXI_DMAC_CTRL_PAUSE BIT(1)
+
+#define AXI_DMAC_IRQ_SOT BIT(0)
+#define AXI_DMAC_IRQ_EOT BIT(1)
+
+#define AXI_DMAC_FLAG_CYCLIC BIT(0)
+
+struct axi_dmac_sg {
+ dma_addr_t src_addr;
+ dma_addr_t dest_addr;
+ unsigned int x_len;
+ unsigned int y_len;
+ unsigned int dest_stride;
+ unsigned int src_stride;
+ unsigned int id;
+};
+
+struct axi_dmac_desc {
+ struct virt_dma_desc vdesc;
+ bool cyclic;
+
+ unsigned int num_submitted;
+ unsigned int num_completed;
+ unsigned int num_sgs;
+ struct axi_dmac_sg sg[];
+};
+
+struct axi_dmac_chan {
+ struct virt_dma_chan vchan;
+
+ struct axi_dmac_desc *next_desc;
+ struct list_head active_descs;
+ enum dma_transfer_direction direction;
+
+ unsigned int src_width;
+ unsigned int dest_width;
+ unsigned int src_type;
+ unsigned int dest_type;
+
+ unsigned int max_length;
+ unsigned int align_mask;
+
+ bool hw_cyclic;
+ bool hw_2d;
+};
+
+struct axi_dmac {
+ void __iomem *base;
+ int irq;
+
+ struct clk *clk;
+
+ struct dma_device dma_dev;
+ struct axi_dmac_chan chan;
+
+ struct device_dma_parameters dma_parms;
+};
+
+static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
+{
+ return container_of(chan->vchan.chan.device, struct axi_dmac,
+ dma_dev);
+}
+
+static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c)
+{
+ return container_of(c, struct axi_dmac_chan, vchan.chan);
+}
+
+static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct axi_dmac_desc, vdesc);
+}
+
+static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg,
+ unsigned int val)
+{
+ writel(val, axi_dmac->base + reg);
+}
+
+static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg)
+{
+ return readl(axi_dmac->base + reg);
+}
+
+static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan)
+{
+ return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM;
+}
+
+static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan)
+{
+ return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM;
+}
+
+static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
+{
+ if (len == 0 || len > chan->max_length)
+ return false;
+ if ((len & chan->align_mask) != 0) /* Not aligned */
+ return false;
+ return true;
+}
+
+static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr)
+{
+ if ((addr & chan->align_mask) != 0) /* Not aligned */
+ return false;
+ return true;
+}
+
+static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
+{
+ struct axi_dmac *dmac = chan_to_axi_dmac(chan);
+ struct virt_dma_desc *vdesc;
+ struct axi_dmac_desc *desc;
+ struct axi_dmac_sg *sg;
+ unsigned int flags = 0;
+ unsigned int val;
+
+ val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
+ if (val) /* Queue is full, wait for the next SOT IRQ */
+ return;
+
+ desc = chan->next_desc;
+
+ if (!desc) {
+ vdesc = vchan_next_desc(&chan->vchan);
+ if (!vdesc)
+ return;
+ list_move_tail(&vdesc->node, &chan->active_descs);
+ desc = to_axi_dmac_desc(vdesc);
+ }
+ sg = &desc->sg[desc->num_submitted];
+
+ desc->num_submitted++;
+ if (desc->num_submitted == desc->num_sgs)
+ chan->next_desc = NULL;
+ else
+ chan->next_desc = desc;
+
+ sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
+
+ if (axi_dmac_dest_is_mem(chan)) {
+ axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr);
+ axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride);
+ }
+
+ if (axi_dmac_src_is_mem(chan)) {
+ axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr);
+ axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride);
+ }
+
+ /*
+ * If the hardware supports cyclic transfers and there is no callback to
+ * call, enable hw cyclic mode to avoid unnecessary interrupts.
+ */
+ if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback)
+ flags |= AXI_DMAC_FLAG_CYCLIC;
+
+ axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
+ axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
+ axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
+ axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
+}
+
+static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
+{
+ return list_first_entry_or_null(&chan->active_descs,
+ struct axi_dmac_desc, vdesc.node);
+}
+
+static void axi_dmac_transfer_done(struct axi_dmac_chan *chan,
+ unsigned int completed_transfers)
+{
+ struct axi_dmac_desc *active;
+ struct axi_dmac_sg *sg;
+
+ active = axi_dmac_active_desc(chan);
+ if (!active)
+ return;
+
+ if (active->cyclic) {
+ vchan_cyclic_callback(&active->vdesc);
+ } else {
+ do {
+ sg = &active->sg[active->num_completed];
+ if (!(BIT(sg->id) & completed_transfers))
+ break;
+ active->num_completed++;
+ if (active->num_completed == active->num_sgs) {
+ list_del(&active->vdesc.node);
+ vchan_cookie_complete(&active->vdesc);
+ active = axi_dmac_active_desc(chan);
+ }
+ } while (active);
+ }
+}
+
+static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
+{
+ struct axi_dmac *dmac = devid;
+ unsigned int pending;
+
+ pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
+ axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending);
+
+ spin_lock(&dmac->chan.vchan.lock);
+ /* One or more transfers have finished */
+ if (pending & AXI_DMAC_IRQ_EOT) {
+ unsigned int completed;
+
+ completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
+ axi_dmac_transfer_done(&dmac->chan, completed);
+ }
+ /* Space has become available in the descriptor queue */
+ if (pending & AXI_DMAC_IRQ_SOT)
+ axi_dmac_start_transfer(&dmac->chan);
+ spin_unlock(&dmac->chan.vchan.lock);
+
+ return IRQ_HANDLED;
+}
+
+static int axi_dmac_terminate_all(struct dma_chan *c)
+{
+ struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+ struct axi_dmac *dmac = chan_to_axi_dmac(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
+ chan->next_desc = NULL;
+ vchan_get_all_descriptors(&chan->vchan, &head);
+ list_splice_tail_init(&chan->active_descs, &head);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&chan->vchan, &head);
+
+ return 0;
+}
+
+static void axi_dmac_issue_pending(struct dma_chan *c)
+{
+ struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+ struct axi_dmac *dmac = chan_to_axi_dmac(chan);
+ unsigned long flags;
+
+ axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ if (vchan_issue_pending(&chan->vchan))
+ axi_dmac_start_transfer(chan);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
+{
+ struct axi_dmac_desc *desc;
+
+ desc = kzalloc(sizeof(struct axi_dmac_desc) +
+ sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->num_sgs = num_sgs;
+
+ return desc;
+}
+
+static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
+ struct dma_chan *c, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+ struct axi_dmac_desc *desc;
+ struct scatterlist *sg;
+ unsigned int i;
+
+ if (direction != chan->direction)
+ return NULL;
+
+ desc = axi_dmac_alloc_desc(sg_len);
+ if (!desc)
+ return NULL;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
+ !axi_dmac_check_len(chan, sg_dma_len(sg))) {
+ kfree(desc);
+ return NULL;
+ }
+
+ if (direction == DMA_DEV_TO_MEM)
+ desc->sg[i].dest_addr = sg_dma_address(sg);
+ else
+ desc->sg[i].src_addr = sg_dma_address(sg);
+ desc->sg[i].x_len = sg_dma_len(sg);
+ desc->sg[i].y_len = 1;
+ }
+
+ desc->cyclic = false;
+
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
+ struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+ struct axi_dmac_desc *desc;
+ unsigned int num_periods, i;
+
+ if (direction != chan->direction)
+ return NULL;
+
+ if (!axi_dmac_check_len(chan, buf_len) ||
+ !axi_dmac_check_addr(chan, buf_addr))
+ return NULL;
+
+ if (period_len == 0 || buf_len % period_len)
+ return NULL;
+
+ num_periods = buf_len / period_len;
+
+ desc = axi_dmac_alloc_desc(num_periods);
+ if (!desc)
+ return NULL;
+
+ for (i = 0; i < num_periods; i++) {
+ if (direction == DMA_DEV_TO_MEM)
+ desc->sg[i].dest_addr = buf_addr;
+ else
+ desc->sg[i].src_addr = buf_addr;
+ desc->sg[i].x_len = period_len;
+ desc->sg[i].y_len = 1;
+ buf_addr += period_len;
+ }
+
+ desc->cyclic = true;
+
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
+ struct dma_chan *c, struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+ struct axi_dmac_desc *desc;
+ size_t dst_icg, src_icg;
+
+ if (xt->frame_size != 1)
+ return NULL;
+
+ if (xt->dir != chan->direction)
+ return NULL;
+
+ if (axi_dmac_src_is_mem(chan)) {
+ if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start))
+ return NULL;
+ }
+
+ if (axi_dmac_dest_is_mem(chan)) {
+ if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start))
+ return NULL;
+ }
+
+ dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
+ src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
+
+ if (chan->hw_2d) {
+ if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
+ !axi_dmac_check_len(chan, xt->numf))
+ return NULL;
+ if (xt->sgl[0].size + dst_icg > chan->max_length ||
+ xt->sgl[0].size + src_icg > chan->max_length)
+ return NULL;
+ } else {
+ if (dst_icg != 0 || src_icg != 0)
+ return NULL;
+ if (chan->max_length / xt->sgl[0].size < xt->numf)
+ return NULL;
+ if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf))
+ return NULL;
+ }
+
+ desc = axi_dmac_alloc_desc(1);
+ if (!desc)
+ return NULL;
+
+ if (axi_dmac_src_is_mem(chan)) {
+ desc->sg[0].src_addr = xt->src_start;
+ desc->sg[0].src_stride = xt->sgl[0].size + src_icg;
+ }
+
+ if (axi_dmac_dest_is_mem(chan)) {
+ desc->sg[0].dest_addr = xt->dst_start;
+ desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg;
+ }
+
+ if (chan->hw_2d) {
+ desc->sg[0].x_len = xt->sgl[0].size;
+ desc->sg[0].y_len = xt->numf;
+ } else {
+ desc->sg[0].x_len = xt->sgl[0].size * xt->numf;
+ desc->sg[0].y_len = 1;
+ }
+
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static void axi_dmac_free_chan_resources(struct dma_chan *c)
+{
+ vchan_free_chan_resources(to_virt_chan(c));
+}
+
+static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
+{
+ kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
+}
+
+/*
+ * The configuration stored in the devicetree matches the configuration
+ * parameters of the peripheral instance and allows the driver to know which
+ * features are implemented and how it should behave.
+ */
+static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
+ struct axi_dmac_chan *chan)
+{
+ u32 val;
+ int ret;
+
+ ret = of_property_read_u32(of_chan, "reg", &val);
+ if (ret)
+ return ret;
+
+ /* We only support 1 channel for now */
+ if (val != 0)
+ return -EINVAL;
+
+ ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val);
+ if (ret)
+ return ret;
+ if (val > AXI_DMAC_BUS_TYPE_FIFO)
+ return -EINVAL;
+ chan->src_type = val;
+
+ ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val);
+ if (ret)
+ return ret;
+ if (val > AXI_DMAC_BUS_TYPE_FIFO)
+ return -EINVAL;
+ chan->dest_type = val;
+
+ ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val);
+ if (ret)
+ return ret;
+ chan->src_width = val / 8;
+
+ ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val);
+ if (ret)
+ return ret;
+ chan->dest_width = val / 8;
+
+ ret = of_property_read_u32(of_chan, "adi,length-width", &val);
+ if (ret)
+ return ret;
+
+ if (val >= 32)
+ chan->max_length = UINT_MAX;
+ else
+ chan->max_length = (1ULL << val) - 1;
+
+ chan->align_mask = max(chan->dest_width, chan->src_width) - 1;
+
+ if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
+ chan->direction = DMA_MEM_TO_MEM;
+ else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
+ chan->direction = DMA_MEM_TO_DEV;
+ else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
+ chan->direction = DMA_DEV_TO_MEM;
+ else
+ chan->direction = DMA_DEV_TO_DEV;
+
+ chan->hw_cyclic = of_property_read_bool(of_chan, "adi,cyclic");
+ chan->hw_2d = of_property_read_bool(of_chan, "adi,2d");
+
+ return 0;
+}
+
+static int axi_dmac_probe(struct platform_device *pdev)
+{
+ struct device_node *of_channels, *of_chan;
+ struct dma_device *dma_dev;
+ struct axi_dmac *dmac;
+ struct resource *res;
+ int ret;
+
+ dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
+ if (!dmac)
+ return -ENOMEM;
+
+ dmac->irq = platform_get_irq(pdev, 0);
+ if (dmac->irq <= 0)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dmac->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dmac->base))
+ return PTR_ERR(dmac->base);
+
+ dmac->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dmac->clk))
+ return PTR_ERR(dmac->clk);
+
+ INIT_LIST_HEAD(&dmac->chan.active_descs);
+
+ of_channels = of_get_child_by_name(pdev->dev.of_node, "adi,channels");
+ if (of_channels == NULL)
+ return -ENODEV;
+
+ for_each_child_of_node(of_channels, of_chan) {
+ ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
+ if (ret) {
+ of_node_put(of_chan);
+ of_node_put(of_channels);
+ return -EINVAL;
+ }
+ }
+ of_node_put(of_channels);
+
+ pdev->dev.dma_parms = &dmac->dma_parms;
+ dma_set_max_seg_size(&pdev->dev, dmac->chan.max_length);
+
+ dma_dev = &dmac->dma_dev;
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+ dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
+ dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
+ dma_dev->device_tx_status = dma_cookie_status;
+ dma_dev->device_issue_pending = axi_dmac_issue_pending;
+ dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg;
+ dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
+ dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
+ dma_dev->device_terminate_all = axi_dmac_terminate_all;
+ dma_dev->dev = &pdev->dev;
+ dma_dev->chancnt = 1;
+ dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
+ dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
+ dma_dev->directions = BIT(dmac->chan.direction);
+ dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ dmac->chan.vchan.desc_free = axi_dmac_desc_free;
+ vchan_init(&dmac->chan.vchan, dma_dev);
+
+ ret = clk_prepare_enable(dmac->clk);
+ if (ret < 0)
+ return ret;
+
+ axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
+
+ ret = dma_async_device_register(dma_dev);
+ if (ret)
+ goto err_clk_disable;
+
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ of_dma_xlate_by_chan_id, dma_dev);
+ if (ret)
+ goto err_unregister_device;
+
+ ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, 0,
+ dev_name(&pdev->dev), dmac);
+ if (ret)
+ goto err_unregister_of;
+
+ platform_set_drvdata(pdev, dmac);
+
+ return 0;
+
+err_unregister_of:
+ of_dma_controller_free(pdev->dev.of_node);
+err_unregister_device:
+ dma_async_device_unregister(&dmac->dma_dev);
+err_clk_disable:
+ clk_disable_unprepare(dmac->clk);
+
+ return ret;
+}
+
+static int axi_dmac_remove(struct platform_device *pdev)
+{
+ struct axi_dmac *dmac = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ free_irq(dmac->irq, dmac);
+ tasklet_kill(&dmac->chan.vchan.task);
+ dma_async_device_unregister(&dmac->dma_dev);
+ clk_disable_unprepare(dmac->clk);
+
+ return 0;
+}
+
+static const struct of_device_id axi_dmac_of_match_table[] = {
+ { .compatible = "adi,axi-dmac-1.00.a" },
+ { },
+};
+
+static struct platform_driver axi_dmac_driver = {
+ .driver = {
+ .name = "dma-axi-dmac",
+ .of_match_table = axi_dmac_of_match_table,
+ },
+ .probe = axi_dmac_probe,
+ .remove = axi_dmac_remove,
+};
+module_platform_driver(axi_dmac_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 26d2f0e09ea3..dade7c47ff18 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -145,7 +145,8 @@ struct jz4780_dma_dev {
struct jz4780_dma_chan chan[JZ_DMA_NR_CHANNELS];
};
-struct jz4780_dma_data {
+struct jz4780_dma_filter_data {
+ struct device_node *of_node;
uint32_t transfer_type;
int channel;
};
@@ -214,11 +215,25 @@ static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
kfree(desc);
}
-static uint32_t jz4780_dma_transfer_size(unsigned long val, int *ord)
+static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift)
{
- *ord = ffs(val) - 1;
+ int ord = ffs(val) - 1;
- switch (*ord) {
+ /*
+ * 8 byte transfer sizes unsupported so fall back on 4. If it's larger
+ * than the maximum, just limit it. It is perfectly safe to fall back
+ * in this way since we won't exceed the maximum burst size supported
+ * by the device, the only effect is reduced efficiency. This is better
+ * than refusing to perform the request at all.
+ */
+ if (ord == 3)
+ ord = 2;
+ else if (ord > 7)
+ ord = 7;
+
+ *shift = ord;
+
+ switch (ord) {
case 0:
return JZ_DMA_SIZE_1_BYTE;
case 1:
@@ -231,20 +246,17 @@ static uint32_t jz4780_dma_transfer_size(unsigned long val, int *ord)
return JZ_DMA_SIZE_32_BYTE;
case 6:
return JZ_DMA_SIZE_64_BYTE;
- case 7:
- return JZ_DMA_SIZE_128_BYTE;
default:
- return -EINVAL;
+ return JZ_DMA_SIZE_128_BYTE;
}
}
-static uint32_t jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
+static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len,
enum dma_transfer_direction direction)
{
struct dma_slave_config *config = &jzchan->config;
uint32_t width, maxburst, tsz;
- int ord;
if (direction == DMA_MEM_TO_DEV) {
desc->dcm = JZ_DMA_DCM_SAI;
@@ -271,8 +283,8 @@ static uint32_t jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
* divisible by the transfer size, and we must not use more than the
* maximum burst specified by the user.
*/
- tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst), &ord);
- jzchan->transfer_shift = ord;
+ tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst),
+ &jzchan->transfer_shift);
switch (width) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:
@@ -289,12 +301,14 @@ static uint32_t jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT;
desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT;
- desc->dtc = len >> ord;
+ desc->dtc = len >> jzchan->transfer_shift;
+ return 0;
}
static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags,
+ void *context)
{
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
struct jz4780_dma_desc *desc;
@@ -307,12 +321,11 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
for (i = 0; i < sg_len; i++) {
err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i],
- sg_dma_address(&sgl[i]),
- sg_dma_len(&sgl[i]),
- direction);
+ sg_dma_address(&sgl[i]),
+ sg_dma_len(&sgl[i]),
+ direction);
if (err < 0)
- return ERR_PTR(err);
-
+ return NULL;
desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
@@ -354,9 +367,9 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
for (i = 0; i < periods; i++) {
err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr,
- period_len, direction);
+ period_len, direction);
if (err < 0)
- return ERR_PTR(err);
+ return NULL;
buf_addr += period_len;
@@ -390,15 +403,13 @@ struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
struct jz4780_dma_desc *desc;
uint32_t tsz;
- int ord;
desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY);
if (!desc)
return NULL;
- tsz = jz4780_dma_transfer_size(dest | src | len, &ord);
- if (tsz < 0)
- return ERR_PTR(tsz);
+ tsz = jz4780_dma_transfer_size(dest | src | len,
+ &jzchan->transfer_shift);
desc->desc[0].dsa = src;
desc->desc[0].dta = dest;
@@ -407,7 +418,7 @@ struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
tsz << JZ_DMA_DCM_TSZ_SHIFT |
JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT;
- desc->desc[0].dtc = len >> ord;
+ desc->desc[0].dtc = len >> jzchan->transfer_shift;
return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
}
@@ -484,8 +495,9 @@ static void jz4780_dma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
}
-static int jz4780_dma_terminate_all(struct jz4780_dma_chan *jzchan)
+static int jz4780_dma_terminate_all(struct dma_chan *chan)
{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
unsigned long flags;
LIST_HEAD(head);
@@ -507,9 +519,11 @@ static int jz4780_dma_terminate_all(struct jz4780_dma_chan *jzchan)
return 0;
}
-static int jz4780_dma_slave_config(struct jz4780_dma_chan *jzchan,
- const struct dma_slave_config *config)
+static int jz4780_dma_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+
if ((config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
|| (config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES))
return -EINVAL;
@@ -567,8 +581,8 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
txstate->residue = 0;
if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
- && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
- status = DMA_ERROR;
+ && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
+ status = DMA_ERROR;
spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
return status;
@@ -671,7 +685,10 @@ static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param)
{
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
- struct jz4780_dma_data *data = param;
+ struct jz4780_dma_filter_data *data = param;
+
+ if (jzdma->dma_device.dev->of_node != data->of_node)
+ return false;
if (data->channel > -1) {
if (data->channel != jzchan->id)
@@ -690,11 +707,12 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
{
struct jz4780_dma_dev *jzdma = ofdma->of_dma_data;
dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
- struct jz4780_dma_data data;
+ struct jz4780_dma_filter_data data;
if (dma_spec->args_count != 2)
return NULL;
+ data.of_node = ofdma->of_node;
data.transfer_type = dma_spec->args[0];
data.channel = dma_spec->args[1];
@@ -713,9 +731,14 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
data.channel);
return NULL;
}
- }
- return dma_request_channel(mask, jz4780_dma_filter_fn, &data);
+ jzdma->chan[data.channel].transfer_type = data.transfer_type;
+
+ return dma_get_slave_channel(
+ &jzdma->chan[data.channel].vchan.chan);
+ } else {
+ return dma_request_channel(mask, jz4780_dma_filter_fn, &data);
+ }
}
static int jz4780_dma_probe(struct platform_device *pdev)
@@ -743,23 +766,26 @@ static int jz4780_dma_probe(struct platform_device *pdev)
if (IS_ERR(jzdma->base))
return PTR_ERR(jzdma->base);
- jzdma->irq = platform_get_irq(pdev, 0);
- if (jzdma->irq < 0) {
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
dev_err(dev, "failed to get IRQ: %d\n", ret);
- return jzdma->irq;
+ return ret;
}
- ret = devm_request_irq(dev, jzdma->irq, jz4780_dma_irq_handler, 0,
- dev_name(dev), jzdma);
+ jzdma->irq = ret;
+
+ ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev),
+ jzdma);
if (ret) {
dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
- return -EINVAL;
+ return ret;
}
jzdma->clk = devm_clk_get(dev, NULL);
if (IS_ERR(jzdma->clk)) {
dev_err(dev, "failed to get clock\n");
- return PTR_ERR(jzdma->clk);
+ ret = PTR_ERR(jzdma->clk);
+ goto err_free_irq;
}
clk_prepare_enable(jzdma->clk);
@@ -775,13 +801,13 @@ static int jz4780_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_CYCLIC, dd->cap_mask);
dd->dev = dev;
- dd->copy_align = 2; /* 2^2 = 4 byte alignment */
+ dd->copy_align = DMAENGINE_ALIGN_4_BYTES;
dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources;
dd->device_free_chan_resources = jz4780_dma_free_chan_resources;
dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg;
dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic;
dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
- dd->device_config = jz4780_dma_slave_config;
+ dd->device_config = jz4780_dma_config;
dd->device_terminate_all = jz4780_dma_terminate_all;
dd->device_tx_status = jz4780_dma_tx_status;
dd->device_issue_pending = jz4780_dma_issue_pending;
@@ -790,7 +816,6 @@ static int jz4780_dma_probe(struct platform_device *pdev)
dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
-
/*
* Enable DMA controller, mark all channels as not programmable.
* Also set the FMSC bit - it increases MSC performance, so it makes
@@ -832,15 +857,24 @@ err_unregister_dev:
err_disable_clk:
clk_disable_unprepare(jzdma->clk);
+
+err_free_irq:
+ free_irq(jzdma->irq, jzdma);
return ret;
}
static int jz4780_dma_remove(struct platform_device *pdev)
{
struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev);
+ int i;
of_dma_controller_free(pdev->dev.of_node);
- devm_free_irq(&pdev->dev, jzdma->irq, jzdma);
+
+ free_irq(jzdma->irq, jzdma);
+
+ for (i = 0; i < JZ_DMA_NR_CHANNELS; i++)
+ tasklet_kill(&jzdma->chan[i].vchan.task);
+
dma_async_device_unregister(&jzdma->dma_device);
return 0;
}
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 4a4cce15f25d..3ff284c8e3d5 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -689,6 +689,10 @@ struct dma_chan *dma_request_slave_channel(struct device *dev,
struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
if (IS_ERR(ch))
return NULL;
+
+ dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
+ ch->device->privatecnt++;
+
return ch;
}
EXPORT_SYMBOL_GPL(dma_request_slave_channel);
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index 36e02f0f645e..e00c9b022964 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -6,6 +6,9 @@ config DW_DMAC_CORE
tristate
select DMA_ENGINE
+config DW_DMAC_BIG_ENDIAN_IO
+ bool
+
config DW_DMAC
tristate "Synopsys DesignWare AHB DMA platform driver"
select DW_DMAC_CORE
@@ -23,6 +26,3 @@ config DW_DMAC_PCI
Support the Synopsys DesignWare AHB DMA controller on the
platfroms that enumerate it as a PCI device. For example,
Intel Medfield has integrated this GPDMA controller.
-
-config DW_DMAC_BIG_ENDIAN_IO
- bool
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 1022c2e1a2b0..cf1c87fa1edd 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1746,4 +1746,4 @@ EXPORT_SYMBOL_GPL(dw_dma_enable);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 88853af69489..3e5d4f193005 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1000,7 +1000,7 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
* code using dma memcpy must make sure alignment of
* length is at dma->copy_align boundary.
*/
- dma->copy_align = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma->copy_align = DMAENGINE_ALIGN_4_BYTES;
INIT_LIST_HEAD(&dma->channels);
}
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index f42f71e37e73..7669c7dd1e34 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -99,21 +99,13 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
{
- unsigned long flags;
-
- spin_lock_irqsave(&hsuc->lock, flags);
hsu_chan_disable(hsuc);
hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
- spin_unlock_irqrestore(&hsuc->lock, flags);
}
static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
{
- unsigned long flags;
-
- spin_lock_irqsave(&hsuc->lock, flags);
hsu_dma_chan_start(hsuc);
- spin_unlock_irqrestore(&hsuc->lock, flags);
}
static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
@@ -139,9 +131,9 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
unsigned long flags;
u32 sr;
- spin_lock_irqsave(&hsuc->lock, flags);
+ spin_lock_irqsave(&hsuc->vchan.lock, flags);
sr = hsu_chan_readl(hsuc, HSU_CH_SR);
- spin_unlock_irqrestore(&hsuc->lock, flags);
+ spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
return sr;
}
@@ -273,14 +265,11 @@ static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
struct hsu_dma_desc *desc = hsuc->desc;
size_t bytes = hsu_dma_desc_size(desc);
int i;
- unsigned long flags;
- spin_lock_irqsave(&hsuc->lock, flags);
i = desc->active % HSU_DMA_CHAN_NR_DESC;
do {
bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
} while (--i >= 0);
- spin_unlock_irqrestore(&hsuc->lock, flags);
return bytes;
}
@@ -327,24 +316,6 @@ static int hsu_dma_slave_config(struct dma_chan *chan,
return 0;
}
-static void hsu_dma_chan_deactivate(struct hsu_dma_chan *hsuc)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&hsuc->lock, flags);
- hsu_chan_disable(hsuc);
- spin_unlock_irqrestore(&hsuc->lock, flags);
-}
-
-static void hsu_dma_chan_activate(struct hsu_dma_chan *hsuc)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&hsuc->lock, flags);
- hsu_chan_enable(hsuc);
- spin_unlock_irqrestore(&hsuc->lock, flags);
-}
-
static int hsu_dma_pause(struct dma_chan *chan)
{
struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
@@ -352,7 +323,7 @@ static int hsu_dma_pause(struct dma_chan *chan)
spin_lock_irqsave(&hsuc->vchan.lock, flags);
if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
- hsu_dma_chan_deactivate(hsuc);
+ hsu_chan_disable(hsuc);
hsuc->desc->status = DMA_PAUSED;
}
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
@@ -368,7 +339,7 @@ static int hsu_dma_resume(struct dma_chan *chan)
spin_lock_irqsave(&hsuc->vchan.lock, flags);
if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
hsuc->desc->status = DMA_IN_PROGRESS;
- hsu_dma_chan_activate(hsuc);
+ hsu_chan_enable(hsuc);
}
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
@@ -441,8 +412,6 @@ int hsu_dma_probe(struct hsu_dma_chip *chip)
hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
-
- spin_lock_init(&hsuc->lock);
}
dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index 0275233cf550..eeb9fff66967 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -78,7 +78,6 @@ struct hsu_dma_chan {
struct virt_dma_chan vchan;
void __iomem *reg;
- spinlock_t lock;
/* hardware configuration */
enum dma_transfer_direction direction;
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
new file mode 100644
index 000000000000..18c14e1f1414
--- /dev/null
+++ b/drivers/dma/idma64.c
@@ -0,0 +1,710 @@
+/*
+ * Core driver for the Intel integrated DMA 64-bit
+ *
+ * Copyright (C) 2015 Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "idma64.h"
+
+/* Platform driver name */
+#define DRV_NAME "idma64"
+
+/* For now we support only two channels */
+#define IDMA64_NR_CHAN 2
+
+/* ---------------------------------------------------------------------- */
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void idma64_off(struct idma64 *idma64)
+{
+ unsigned short count = 100;
+
+ dma_writel(idma64, CFG, 0);
+
+ channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask);
+ channel_clear_bit(idma64, MASK(BLOCK), idma64->all_chan_mask);
+ channel_clear_bit(idma64, MASK(SRC_TRAN), idma64->all_chan_mask);
+ channel_clear_bit(idma64, MASK(DST_TRAN), idma64->all_chan_mask);
+ channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
+
+ do {
+ cpu_relax();
+ } while (dma_readl(idma64, CFG) & IDMA64_CFG_DMA_EN && --count);
+}
+
+static void idma64_on(struct idma64 *idma64)
+{
+ dma_writel(idma64, CFG, IDMA64_CFG_DMA_EN);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void idma64_chan_init(struct idma64 *idma64, struct idma64_chan *idma64c)
+{
+ u32 cfghi = IDMA64C_CFGH_SRC_PER(1) | IDMA64C_CFGH_DST_PER(0);
+ u32 cfglo = 0;
+
+ /* Enforce FIFO drain when channel is suspended */
+ cfglo |= IDMA64C_CFGL_CH_DRAIN;
+
+ /* Set default burst alignment */
+ cfglo |= IDMA64C_CFGL_DST_BURST_ALIGN | IDMA64C_CFGL_SRC_BURST_ALIGN;
+
+ channel_writel(idma64c, CFG_LO, cfglo);
+ channel_writel(idma64c, CFG_HI, cfghi);
+
+ /* Enable interrupts */
+ channel_set_bit(idma64, MASK(XFER), idma64c->mask);
+ channel_set_bit(idma64, MASK(ERROR), idma64c->mask);
+
+ /*
+ * Enforce the controller to be turned on.
+ *
+ * The iDMA is turned off in ->probe() and looses context during system
+ * suspend / resume cycle. That's why we have to enable it each time we
+ * use it.
+ */
+ idma64_on(idma64);
+}
+
+static void idma64_chan_stop(struct idma64 *idma64, struct idma64_chan *idma64c)
+{
+ channel_clear_bit(idma64, CH_EN, idma64c->mask);
+}
+
+static void idma64_chan_start(struct idma64 *idma64, struct idma64_chan *idma64c)
+{
+ struct idma64_desc *desc = idma64c->desc;
+ struct idma64_hw_desc *hw = &desc->hw[0];
+
+ channel_writeq(idma64c, SAR, 0);
+ channel_writeq(idma64c, DAR, 0);
+
+ channel_writel(idma64c, CTL_HI, IDMA64C_CTLH_BLOCK_TS(~0UL));
+ channel_writel(idma64c, CTL_LO, IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN);
+
+ channel_writeq(idma64c, LLP, hw->llp);
+
+ channel_set_bit(idma64, CH_EN, idma64c->mask);
+}
+
+static void idma64_stop_transfer(struct idma64_chan *idma64c)
+{
+ struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device);
+
+ idma64_chan_stop(idma64, idma64c);
+}
+
+static void idma64_start_transfer(struct idma64_chan *idma64c)
+{
+ struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device);
+ struct virt_dma_desc *vdesc;
+
+ /* Get the next descriptor */
+ vdesc = vchan_next_desc(&idma64c->vchan);
+ if (!vdesc) {
+ idma64c->desc = NULL;
+ return;
+ }
+
+ list_del(&vdesc->node);
+ idma64c->desc = to_idma64_desc(vdesc);
+
+ /* Configure the channel */
+ idma64_chan_init(idma64, idma64c);
+
+ /* Start the channel with a new descriptor */
+ idma64_chan_start(idma64, idma64c);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
+ u32 status_err, u32 status_xfer)
+{
+ struct idma64_chan *idma64c = &idma64->chan[c];
+ struct idma64_desc *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&idma64c->vchan.lock, flags);
+ desc = idma64c->desc;
+ if (desc) {
+ if (status_err & (1 << c)) {
+ dma_writel(idma64, CLEAR(ERROR), idma64c->mask);
+ desc->status = DMA_ERROR;
+ } else if (status_xfer & (1 << c)) {
+ dma_writel(idma64, CLEAR(XFER), idma64c->mask);
+ desc->status = DMA_COMPLETE;
+ vchan_cookie_complete(&desc->vdesc);
+ idma64_start_transfer(idma64c);
+ }
+
+ /* idma64_start_transfer() updates idma64c->desc */
+ if (idma64c->desc == NULL || desc->status == DMA_ERROR)
+ idma64_stop_transfer(idma64c);
+ }
+ spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+}
+
+static irqreturn_t idma64_irq(int irq, void *dev)
+{
+ struct idma64 *idma64 = dev;
+ u32 status = dma_readl(idma64, STATUS_INT);
+ u32 status_xfer;
+ u32 status_err;
+ unsigned short i;
+
+ dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
+
+ /* Check if we have any interrupt from the DMA controller */
+ if (!status)
+ return IRQ_NONE;
+
+ /* Disable interrupts */
+ channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask);
+ channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
+
+ status_xfer = dma_readl(idma64, RAW(XFER));
+ status_err = dma_readl(idma64, RAW(ERROR));
+
+ for (i = 0; i < idma64->dma.chancnt; i++)
+ idma64_chan_irq(idma64, i, status_err, status_xfer);
+
+ /* Re-enable interrupts */
+ channel_set_bit(idma64, MASK(XFER), idma64->all_chan_mask);
+ channel_set_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
+
+ return IRQ_HANDLED;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct idma64_desc *idma64_alloc_desc(unsigned int ndesc)
+{
+ struct idma64_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->hw = kcalloc(ndesc, sizeof(*desc->hw), GFP_NOWAIT);
+ if (!desc->hw) {
+ kfree(desc);
+ return NULL;
+ }
+
+ return desc;
+}
+
+static void idma64_desc_free(struct idma64_chan *idma64c,
+ struct idma64_desc *desc)
+{
+ struct idma64_hw_desc *hw;
+
+ if (desc->ndesc) {
+ unsigned int i = desc->ndesc;
+
+ do {
+ hw = &desc->hw[--i];
+ dma_pool_free(idma64c->pool, hw->lli, hw->llp);
+ } while (i);
+ }
+
+ kfree(desc->hw);
+ kfree(desc);
+}
+
+static void idma64_vdesc_free(struct virt_dma_desc *vdesc)
+{
+ struct idma64_chan *idma64c = to_idma64_chan(vdesc->tx.chan);
+
+ idma64_desc_free(idma64c, to_idma64_desc(vdesc));
+}
+
+static u64 idma64_hw_desc_fill(struct idma64_hw_desc *hw,
+ struct dma_slave_config *config,
+ enum dma_transfer_direction direction, u64 llp)
+{
+ struct idma64_lli *lli = hw->lli;
+ u64 sar, dar;
+ u32 ctlhi = IDMA64C_CTLH_BLOCK_TS(hw->len);
+ u32 ctllo = IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN;
+ u32 src_width, dst_width;
+
+ if (direction == DMA_MEM_TO_DEV) {
+ sar = hw->phys;
+ dar = config->dst_addr;
+ ctllo |= IDMA64C_CTLL_DST_FIX | IDMA64C_CTLL_SRC_INC |
+ IDMA64C_CTLL_FC_M2P;
+ src_width = min_t(u32, 2, __fls(sar | hw->len));
+ dst_width = __fls(config->dst_addr_width);
+ } else { /* DMA_DEV_TO_MEM */
+ sar = config->src_addr;
+ dar = hw->phys;
+ ctllo |= IDMA64C_CTLL_DST_INC | IDMA64C_CTLL_SRC_FIX |
+ IDMA64C_CTLL_FC_P2M;
+ src_width = __fls(config->src_addr_width);
+ dst_width = min_t(u32, 2, __fls(dar | hw->len));
+ }
+
+ lli->sar = sar;
+ lli->dar = dar;
+
+ lli->ctlhi = ctlhi;
+ lli->ctllo = ctllo |
+ IDMA64C_CTLL_SRC_MSIZE(config->src_maxburst) |
+ IDMA64C_CTLL_DST_MSIZE(config->dst_maxburst) |
+ IDMA64C_CTLL_DST_WIDTH(dst_width) |
+ IDMA64C_CTLL_SRC_WIDTH(src_width);
+
+ lli->llp = llp;
+ return hw->llp;
+}
+
+static void idma64_desc_fill(struct idma64_chan *idma64c,
+ struct idma64_desc *desc)
+{
+ struct dma_slave_config *config = &idma64c->config;
+ struct idma64_hw_desc *hw = &desc->hw[desc->ndesc - 1];
+ struct idma64_lli *lli = hw->lli;
+ u64 llp = 0;
+ unsigned int i = desc->ndesc;
+
+ /* Fill the hardware descriptors and link them to a list */
+ do {
+ hw = &desc->hw[--i];
+ llp = idma64_hw_desc_fill(hw, config, desc->direction, llp);
+ desc->length += hw->len;
+ } while (i);
+
+ /* Trigger interrupt after last block */
+ lli->ctllo |= IDMA64C_CTLL_INT_EN;
+}
+
+static struct dma_async_tx_descriptor *idma64_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct idma64_chan *idma64c = to_idma64_chan(chan);
+ struct idma64_desc *desc;
+ struct scatterlist *sg;
+ unsigned int i;
+
+ desc = idma64_alloc_desc(sg_len);
+ if (!desc)
+ return NULL;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ struct idma64_hw_desc *hw = &desc->hw[i];
+
+ /* Allocate DMA capable memory for hardware descriptor */
+ hw->lli = dma_pool_alloc(idma64c->pool, GFP_NOWAIT, &hw->llp);
+ if (!hw->lli) {
+ desc->ndesc = i;
+ idma64_desc_free(idma64c, desc);
+ return NULL;
+ }
+
+ hw->phys = sg_dma_address(sg);
+ hw->len = sg_dma_len(sg);
+ }
+
+ desc->ndesc = sg_len;
+ desc->direction = direction;
+ desc->status = DMA_IN_PROGRESS;
+
+ idma64_desc_fill(idma64c, desc);
+ return vchan_tx_prep(&idma64c->vchan, &desc->vdesc, flags);
+}
+
+static void idma64_issue_pending(struct dma_chan *chan)
+{
+ struct idma64_chan *idma64c = to_idma64_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&idma64c->vchan.lock, flags);
+ if (vchan_issue_pending(&idma64c->vchan) && !idma64c->desc)
+ idma64_start_transfer(idma64c);
+ spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+}
+
+static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
+{
+ struct idma64_desc *desc = idma64c->desc;
+ struct idma64_hw_desc *hw;
+ size_t bytes = desc->length;
+ u64 llp;
+ u32 ctlhi;
+ unsigned int i = 0;
+
+ llp = channel_readq(idma64c, LLP);
+ do {
+ hw = &desc->hw[i];
+ } while ((hw->llp != llp) && (++i < desc->ndesc));
+
+ if (!i)
+ return bytes;
+
+ do {
+ bytes -= desc->hw[--i].len;
+ } while (i);
+
+ ctlhi = channel_readl(idma64c, CTL_HI);
+ return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
+}
+
+static enum dma_status idma64_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ struct idma64_chan *idma64c = to_idma64_chan(chan);
+ struct virt_dma_desc *vdesc;
+ enum dma_status status;
+ size_t bytes;
+ unsigned long flags;
+
+ status = dma_cookie_status(chan, cookie, state);
+ if (status == DMA_COMPLETE)
+ return status;
+
+ spin_lock_irqsave(&idma64c->vchan.lock, flags);
+ vdesc = vchan_find_desc(&idma64c->vchan, cookie);
+ if (idma64c->desc && cookie == idma64c->desc->vdesc.tx.cookie) {
+ bytes = idma64_active_desc_size(idma64c);
+ dma_set_residue(state, bytes);
+ status = idma64c->desc->status;
+ } else if (vdesc) {
+ bytes = to_idma64_desc(vdesc)->length;
+ dma_set_residue(state, bytes);
+ }
+ spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+
+ return status;
+}
+
+static void convert_burst(u32 *maxburst)
+{
+ if (*maxburst)
+ *maxburst = __fls(*maxburst);
+ else
+ *maxburst = 0;
+}
+
+static int idma64_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct idma64_chan *idma64c = to_idma64_chan(chan);
+
+ /* Check if chan will be configured for slave transfers */
+ if (!is_slave_direction(config->direction))
+ return -EINVAL;
+
+ memcpy(&idma64c->config, config, sizeof(idma64c->config));
+
+ convert_burst(&idma64c->config.src_maxburst);
+ convert_burst(&idma64c->config.dst_maxburst);
+
+ return 0;
+}
+
+static void idma64_chan_deactivate(struct idma64_chan *idma64c)
+{
+ unsigned short count = 100;
+ u32 cfglo;
+
+ cfglo = channel_readl(idma64c, CFG_LO);
+ channel_writel(idma64c, CFG_LO, cfglo | IDMA64C_CFGL_CH_SUSP);
+ do {
+ udelay(1);
+ cfglo = channel_readl(idma64c, CFG_LO);
+ } while (!(cfglo & IDMA64C_CFGL_FIFO_EMPTY) && --count);
+}
+
+static void idma64_chan_activate(struct idma64_chan *idma64c)
+{
+ u32 cfglo;
+
+ cfglo = channel_readl(idma64c, CFG_LO);
+ channel_writel(idma64c, CFG_LO, cfglo & ~IDMA64C_CFGL_CH_SUSP);
+}
+
+static int idma64_pause(struct dma_chan *chan)
+{
+ struct idma64_chan *idma64c = to_idma64_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&idma64c->vchan.lock, flags);
+ if (idma64c->desc && idma64c->desc->status == DMA_IN_PROGRESS) {
+ idma64_chan_deactivate(idma64c);
+ idma64c->desc->status = DMA_PAUSED;
+ }
+ spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+
+ return 0;
+}
+
+static int idma64_resume(struct dma_chan *chan)
+{
+ struct idma64_chan *idma64c = to_idma64_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&idma64c->vchan.lock, flags);
+ if (idma64c->desc && idma64c->desc->status == DMA_PAUSED) {
+ idma64c->desc->status = DMA_IN_PROGRESS;
+ idma64_chan_activate(idma64c);
+ }
+ spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+
+ return 0;
+}
+
+static int idma64_terminate_all(struct dma_chan *chan)
+{
+ struct idma64_chan *idma64c = to_idma64_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&idma64c->vchan.lock, flags);
+ idma64_chan_deactivate(idma64c);
+ idma64_stop_transfer(idma64c);
+ if (idma64c->desc) {
+ idma64_vdesc_free(&idma64c->desc->vdesc);
+ idma64c->desc = NULL;
+ }
+ vchan_get_all_descriptors(&idma64c->vchan, &head);
+ spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&idma64c->vchan, &head);
+ return 0;
+}
+
+static int idma64_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct idma64_chan *idma64c = to_idma64_chan(chan);
+
+ /* Create a pool of consistent memory blocks for hardware descriptors */
+ idma64c->pool = dma_pool_create(dev_name(chan2dev(chan)),
+ chan->device->dev,
+ sizeof(struct idma64_lli), 8, 0);
+ if (!idma64c->pool) {
+ dev_err(chan2dev(chan), "No memory for descriptors\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void idma64_free_chan_resources(struct dma_chan *chan)
+{
+ struct idma64_chan *idma64c = to_idma64_chan(chan);
+
+ vchan_free_chan_resources(to_virt_chan(chan));
+ dma_pool_destroy(idma64c->pool);
+ idma64c->pool = NULL;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define IDMA64_BUSWIDTHS \
+ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
+
+static int idma64_probe(struct idma64_chip *chip)
+{
+ struct idma64 *idma64;
+ unsigned short nr_chan = IDMA64_NR_CHAN;
+ unsigned short i;
+ int ret;
+
+ idma64 = devm_kzalloc(chip->dev, sizeof(*idma64), GFP_KERNEL);
+ if (!idma64)
+ return -ENOMEM;
+
+ idma64->regs = chip->regs;
+ chip->idma64 = idma64;
+
+ idma64->chan = devm_kcalloc(chip->dev, nr_chan, sizeof(*idma64->chan),
+ GFP_KERNEL);
+ if (!idma64->chan)
+ return -ENOMEM;
+
+ idma64->all_chan_mask = (1 << nr_chan) - 1;
+
+ /* Turn off iDMA controller */
+ idma64_off(idma64);
+
+ ret = devm_request_irq(chip->dev, chip->irq, idma64_irq, IRQF_SHARED,
+ dev_name(chip->dev), idma64);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&idma64->dma.channels);
+ for (i = 0; i < nr_chan; i++) {
+ struct idma64_chan *idma64c = &idma64->chan[i];
+
+ idma64c->vchan.desc_free = idma64_vdesc_free;
+ vchan_init(&idma64c->vchan, &idma64->dma);
+
+ idma64c->regs = idma64->regs + i * IDMA64_CH_LENGTH;
+ idma64c->mask = BIT(i);
+ }
+
+ dma_cap_set(DMA_SLAVE, idma64->dma.cap_mask);
+ dma_cap_set(DMA_PRIVATE, idma64->dma.cap_mask);
+
+ idma64->dma.device_alloc_chan_resources = idma64_alloc_chan_resources;
+ idma64->dma.device_free_chan_resources = idma64_free_chan_resources;
+
+ idma64->dma.device_prep_slave_sg = idma64_prep_slave_sg;
+
+ idma64->dma.device_issue_pending = idma64_issue_pending;
+ idma64->dma.device_tx_status = idma64_tx_status;
+
+ idma64->dma.device_config = idma64_slave_config;
+ idma64->dma.device_pause = idma64_pause;
+ idma64->dma.device_resume = idma64_resume;
+ idma64->dma.device_terminate_all = idma64_terminate_all;
+
+ idma64->dma.src_addr_widths = IDMA64_BUSWIDTHS;
+ idma64->dma.dst_addr_widths = IDMA64_BUSWIDTHS;
+ idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ idma64->dma.dev = chip->dev;
+
+ ret = dma_async_device_register(&idma64->dma);
+ if (ret)
+ return ret;
+
+ dev_info(chip->dev, "Found Intel integrated DMA 64-bit\n");
+ return 0;
+}
+
+static int idma64_remove(struct idma64_chip *chip)
+{
+ struct idma64 *idma64 = chip->idma64;
+ unsigned short i;
+
+ dma_async_device_unregister(&idma64->dma);
+
+ /*
+ * Explicitly call devm_request_irq() to avoid the side effects with
+ * the scheduled tasklets.
+ */
+ devm_free_irq(chip->dev, chip->irq, idma64);
+
+ for (i = 0; i < idma64->dma.chancnt; i++) {
+ struct idma64_chan *idma64c = &idma64->chan[i];
+
+ tasklet_kill(&idma64c->vchan.task);
+ }
+
+ return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int idma64_platform_probe(struct platform_device *pdev)
+{
+ struct idma64_chip *chip;
+ struct device *dev = &pdev->dev;
+ struct resource *mem;
+ int ret;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->irq = platform_get_irq(pdev, 0);
+ if (chip->irq < 0)
+ return chip->irq;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ chip->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(chip->regs))
+ return PTR_ERR(chip->regs);
+
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
+ chip->dev = dev;
+
+ ret = idma64_probe(chip);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, chip);
+ return 0;
+}
+
+static int idma64_platform_remove(struct platform_device *pdev)
+{
+ struct idma64_chip *chip = platform_get_drvdata(pdev);
+
+ return idma64_remove(chip);
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int idma64_pm_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct idma64_chip *chip = platform_get_drvdata(pdev);
+
+ idma64_off(chip->idma64);
+ return 0;
+}
+
+static int idma64_pm_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct idma64_chip *chip = platform_get_drvdata(pdev);
+
+ idma64_on(chip->idma64);
+ return 0;
+}
+
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops idma64_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(idma64_pm_suspend, idma64_pm_resume)
+};
+
+static struct platform_driver idma64_platform_driver = {
+ .probe = idma64_platform_probe,
+ .remove = idma64_platform_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &idma64_dev_pm_ops,
+ },
+};
+
+module_platform_driver(idma64_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("iDMA64 core driver");
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h
new file mode 100644
index 000000000000..a4d99685a7c4
--- /dev/null
+++ b/drivers/dma/idma64.h
@@ -0,0 +1,233 @@
+/*
+ * Driver for the Intel integrated DMA 64-bit
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DMA_IDMA64_H__
+#define __DMA_IDMA64_H__
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "virt-dma.h"
+
+/* Channel registers */
+
+#define IDMA64_CH_SAR 0x00 /* Source Address Register */
+#define IDMA64_CH_DAR 0x08 /* Destination Address Register */
+#define IDMA64_CH_LLP 0x10 /* Linked List Pointer */
+#define IDMA64_CH_CTL_LO 0x18 /* Control Register Low */
+#define IDMA64_CH_CTL_HI 0x1c /* Control Register High */
+#define IDMA64_CH_SSTAT 0x20
+#define IDMA64_CH_DSTAT 0x28
+#define IDMA64_CH_SSTATAR 0x30
+#define IDMA64_CH_DSTATAR 0x38
+#define IDMA64_CH_CFG_LO 0x40 /* Configuration Register Low */
+#define IDMA64_CH_CFG_HI 0x44 /* Configuration Register High */
+#define IDMA64_CH_SGR 0x48
+#define IDMA64_CH_DSR 0x50
+
+#define IDMA64_CH_LENGTH 0x58
+
+/* Bitfields in CTL_LO */
+#define IDMA64C_CTLL_INT_EN (1 << 0) /* irqs enabled? */
+#define IDMA64C_CTLL_DST_WIDTH(x) ((x) << 1) /* bytes per element */
+#define IDMA64C_CTLL_SRC_WIDTH(x) ((x) << 4)
+#define IDMA64C_CTLL_DST_INC (0 << 8) /* DAR update/not */
+#define IDMA64C_CTLL_DST_FIX (1 << 8)
+#define IDMA64C_CTLL_SRC_INC (0 << 10) /* SAR update/not */
+#define IDMA64C_CTLL_SRC_FIX (1 << 10)
+#define IDMA64C_CTLL_DST_MSIZE(x) ((x) << 11) /* burst, #elements */
+#define IDMA64C_CTLL_SRC_MSIZE(x) ((x) << 14)
+#define IDMA64C_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
+#define IDMA64C_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
+#define IDMA64C_CTLL_LLP_D_EN (1 << 27) /* dest block chain */
+#define IDMA64C_CTLL_LLP_S_EN (1 << 28) /* src block chain */
+
+/* Bitfields in CTL_HI */
+#define IDMA64C_CTLH_BLOCK_TS(x) ((x) & ((1 << 17) - 1))
+#define IDMA64C_CTLH_DONE (1 << 17)
+
+/* Bitfields in CFG_LO */
+#define IDMA64C_CFGL_DST_BURST_ALIGN (1 << 0) /* dst burst align */
+#define IDMA64C_CFGL_SRC_BURST_ALIGN (1 << 1) /* src burst align */
+#define IDMA64C_CFGL_CH_SUSP (1 << 8)
+#define IDMA64C_CFGL_FIFO_EMPTY (1 << 9)
+#define IDMA64C_CFGL_CH_DRAIN (1 << 10) /* drain FIFO */
+#define IDMA64C_CFGL_DST_OPT_BL (1 << 20) /* optimize dst burst length */
+#define IDMA64C_CFGL_SRC_OPT_BL (1 << 21) /* optimize src burst length */
+
+/* Bitfields in CFG_HI */
+#define IDMA64C_CFGH_SRC_PER(x) ((x) << 0) /* src peripheral */
+#define IDMA64C_CFGH_DST_PER(x) ((x) << 4) /* dst peripheral */
+#define IDMA64C_CFGH_RD_ISSUE_THD(x) ((x) << 8)
+#define IDMA64C_CFGH_RW_ISSUE_THD(x) ((x) << 18)
+
+/* Interrupt registers */
+
+#define IDMA64_INT_XFER 0x00
+#define IDMA64_INT_BLOCK 0x08
+#define IDMA64_INT_SRC_TRAN 0x10
+#define IDMA64_INT_DST_TRAN 0x18
+#define IDMA64_INT_ERROR 0x20
+
+#define IDMA64_RAW(x) (0x2c0 + IDMA64_INT_##x) /* r */
+#define IDMA64_STATUS(x) (0x2e8 + IDMA64_INT_##x) /* r (raw & mask) */
+#define IDMA64_MASK(x) (0x310 + IDMA64_INT_##x) /* rw (set = irq enabled) */
+#define IDMA64_CLEAR(x) (0x338 + IDMA64_INT_##x) /* w (ack, affects "raw") */
+
+/* Common registers */
+
+#define IDMA64_STATUS_INT 0x360 /* r */
+#define IDMA64_CFG 0x398
+#define IDMA64_CH_EN 0x3a0
+
+/* Bitfields in CFG */
+#define IDMA64_CFG_DMA_EN (1 << 0)
+
+/* Hardware descriptor for Linked LIst transfers */
+struct idma64_lli {
+ u64 sar;
+ u64 dar;
+ u64 llp;
+ u32 ctllo;
+ u32 ctlhi;
+ u32 sstat;
+ u32 dstat;
+};
+
+struct idma64_hw_desc {
+ struct idma64_lli *lli;
+ dma_addr_t llp;
+ dma_addr_t phys;
+ unsigned int len;
+};
+
+struct idma64_desc {
+ struct virt_dma_desc vdesc;
+ enum dma_transfer_direction direction;
+ struct idma64_hw_desc *hw;
+ unsigned int ndesc;
+ size_t length;
+ enum dma_status status;
+};
+
+static inline struct idma64_desc *to_idma64_desc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct idma64_desc, vdesc);
+}
+
+struct idma64_chan {
+ struct virt_dma_chan vchan;
+
+ void __iomem *regs;
+
+ /* hardware configuration */
+ enum dma_transfer_direction direction;
+ unsigned int mask;
+ struct dma_slave_config config;
+
+ void *pool;
+ struct idma64_desc *desc;
+};
+
+static inline struct idma64_chan *to_idma64_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct idma64_chan, vchan.chan);
+}
+
+#define channel_set_bit(idma64, reg, mask) \
+ dma_writel(idma64, reg, ((mask) << 8) | (mask))
+#define channel_clear_bit(idma64, reg, mask) \
+ dma_writel(idma64, reg, ((mask) << 8) | 0)
+
+static inline u32 idma64c_readl(struct idma64_chan *idma64c, int offset)
+{
+ return readl(idma64c->regs + offset);
+}
+
+static inline void idma64c_writel(struct idma64_chan *idma64c, int offset,
+ u32 value)
+{
+ writel(value, idma64c->regs + offset);
+}
+
+#define channel_readl(idma64c, reg) \
+ idma64c_readl(idma64c, IDMA64_CH_##reg)
+#define channel_writel(idma64c, reg, value) \
+ idma64c_writel(idma64c, IDMA64_CH_##reg, (value))
+
+static inline u64 idma64c_readq(struct idma64_chan *idma64c, int offset)
+{
+ u64 l, h;
+
+ l = idma64c_readl(idma64c, offset);
+ h = idma64c_readl(idma64c, offset + 4);
+
+ return l | (h << 32);
+}
+
+static inline void idma64c_writeq(struct idma64_chan *idma64c, int offset,
+ u64 value)
+{
+ idma64c_writel(idma64c, offset, value);
+ idma64c_writel(idma64c, offset + 4, value >> 32);
+}
+
+#define channel_readq(idma64c, reg) \
+ idma64c_readq(idma64c, IDMA64_CH_##reg)
+#define channel_writeq(idma64c, reg, value) \
+ idma64c_writeq(idma64c, IDMA64_CH_##reg, (value))
+
+struct idma64 {
+ struct dma_device dma;
+
+ void __iomem *regs;
+
+ /* channels */
+ unsigned short all_chan_mask;
+ struct idma64_chan *chan;
+};
+
+static inline struct idma64 *to_idma64(struct dma_device *ddev)
+{
+ return container_of(ddev, struct idma64, dma);
+}
+
+static inline u32 idma64_readl(struct idma64 *idma64, int offset)
+{
+ return readl(idma64->regs + offset);
+}
+
+static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value)
+{
+ writel(value, idma64->regs + offset);
+}
+
+#define dma_readl(idma64, reg) \
+ idma64_readl(idma64, IDMA64_##reg)
+#define dma_writel(idma64, reg, value) \
+ idma64_writel(idma64, IDMA64_##reg, (value))
+
+/**
+ * struct idma64_chip - representation of DesignWare DMA controller hardware
+ * @dev: struct device of the DMA controller
+ * @irq: irq line
+ * @regs: memory mapped I/O space
+ * @idma64: struct idma64 that is filed by idma64_probe()
+ */
+struct idma64_chip {
+ struct device *dev;
+ int irq;
+ void __iomem *regs;
+ struct idma64 *idma64;
+};
+
+#endif /* __DMA_IDMA64_H__ */
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 865501fcc67d..48d85f8b95fe 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -1083,8 +1083,12 @@ static int __init imxdma_probe(struct platform_device *pdev)
if (IS_ERR(imxdma->dma_ahb))
return PTR_ERR(imxdma->dma_ahb);
- clk_prepare_enable(imxdma->dma_ipg);
- clk_prepare_enable(imxdma->dma_ahb);
+ ret = clk_prepare_enable(imxdma->dma_ipg);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(imxdma->dma_ahb);
+ if (ret)
+ goto disable_dma_ipg_clk;
/* reset DMA module */
imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
@@ -1094,20 +1098,20 @@ static int __init imxdma_probe(struct platform_device *pdev)
dma_irq_handler, 0, "DMA", imxdma);
if (ret) {
dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
- goto err;
+ goto disable_dma_ahb_clk;
}
irq_err = platform_get_irq(pdev, 1);
if (irq_err < 0) {
ret = irq_err;
- goto err;
+ goto disable_dma_ahb_clk;
}
ret = devm_request_irq(&pdev->dev, irq_err,
imxdma_err_handler, 0, "DMA", imxdma);
if (ret) {
dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
- goto err;
+ goto disable_dma_ahb_clk;
}
}
@@ -1144,7 +1148,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
dev_warn(imxdma->dev, "Can't register IRQ %d "
"for DMA channel %d\n",
irq + i, i);
- goto err;
+ goto disable_dma_ahb_clk;
}
init_timer(&imxdmac->watchdog);
imxdmac->watchdog.function = &imxdma_watchdog;
@@ -1183,14 +1187,14 @@ static int __init imxdma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imxdma);
- imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
+ imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES;
imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
ret = dma_async_device_register(&imxdma->dma_device);
if (ret) {
dev_err(&pdev->dev, "unable to register\n");
- goto err;
+ goto disable_dma_ahb_clk;
}
if (pdev->dev.of_node) {
@@ -1206,9 +1210,10 @@ static int __init imxdma_probe(struct platform_device *pdev)
err_of_dma_controller:
dma_async_device_unregister(&imxdma->dma_device);
-err:
- clk_disable_unprepare(imxdma->dma_ipg);
+disable_dma_ahb_clk:
clk_disable_unprepare(imxdma->dma_ahb);
+disable_dma_ipg_clk:
+ clk_disable_unprepare(imxdma->dma_ipg);
return ret;
}
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 77b6aab04f47..9d375bc7590a 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -35,12 +35,16 @@
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <asm/irq.h>
#include <linux/platform_data/dma-imx-sdma.h>
#include <linux/platform_data/dma-imx.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include "dmaengine.h"
@@ -124,6 +128,56 @@
#define CHANGE_ENDIANNESS 0x80
/*
+ * p_2_p watermark_level description
+ * Bits Name Description
+ * 0-7 Lower WML Lower watermark level
+ * 8 PS 1: Pad Swallowing
+ * 0: No Pad Swallowing
+ * 9 PA 1: Pad Adding
+ * 0: No Pad Adding
+ * 10 SPDIF If this bit is set both source
+ * and destination are on SPBA
+ * 11 Source Bit(SP) 1: Source on SPBA
+ * 0: Source on AIPS
+ * 12 Destination Bit(DP) 1: Destination on SPBA
+ * 0: Destination on AIPS
+ * 13-15 --------- MUST BE 0
+ * 16-23 Higher WML HWML
+ * 24-27 N Total number of samples after
+ * which Pad adding/Swallowing
+ * must be done. It must be odd.
+ * 28 Lower WML Event(LWE) SDMA events reg to check for
+ * LWML event mask
+ * 0: LWE in EVENTS register
+ * 1: LWE in EVENTS2 register
+ * 29 Higher WML Event(HWE) SDMA events reg to check for
+ * HWML event mask
+ * 0: HWE in EVENTS register
+ * 1: HWE in EVENTS2 register
+ * 30 --------- MUST BE 0
+ * 31 CONT 1: Amount of samples to be
+ * transferred is unknown and
+ * script will keep on
+ * transferring samples as long as
+ * both events are detected and
+ * script must be manually stopped
+ * by the application
+ * 0: The amount of samples to be
+ * transferred is equal to the
+ * count field of mode word
+ */
+#define SDMA_WATERMARK_LEVEL_LWML 0xFF
+#define SDMA_WATERMARK_LEVEL_PS BIT(8)
+#define SDMA_WATERMARK_LEVEL_PA BIT(9)
+#define SDMA_WATERMARK_LEVEL_SPDIF BIT(10)
+#define SDMA_WATERMARK_LEVEL_SP BIT(11)
+#define SDMA_WATERMARK_LEVEL_DP BIT(12)
+#define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16)
+#define SDMA_WATERMARK_LEVEL_LWE BIT(28)
+#define SDMA_WATERMARK_LEVEL_HWE BIT(29)
+#define SDMA_WATERMARK_LEVEL_CONT BIT(31)
+
+/*
* Mode/Count of data node descriptors - IPCv2
*/
struct sdma_mode_count {
@@ -259,8 +313,9 @@ struct sdma_channel {
struct sdma_buffer_descriptor *bd;
dma_addr_t bd_phys;
unsigned int pc_from_device, pc_to_device;
+ unsigned int device_to_device;
unsigned long flags;
- dma_addr_t per_address;
+ dma_addr_t per_address, per_address2;
unsigned long event_mask[2];
unsigned long watermark_level;
u32 shp_addr, per_addr;
@@ -328,6 +383,8 @@ struct sdma_engine {
u32 script_number;
struct sdma_script_start_addrs *script_addrs;
const struct sdma_driver_data *drvdata;
+ u32 spba_start_addr;
+ u32 spba_end_addr;
};
static struct sdma_driver_data sdma_imx31 = {
@@ -705,6 +762,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
sdmac->pc_from_device = 0;
sdmac->pc_to_device = 0;
+ sdmac->device_to_device = 0;
switch (peripheral_type) {
case IMX_DMATYPE_MEMORY:
@@ -780,6 +838,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
sdmac->pc_from_device = per_2_emi;
sdmac->pc_to_device = emi_2_per;
+ sdmac->device_to_device = per_2_per;
}
static int sdma_load_context(struct sdma_channel *sdmac)
@@ -792,11 +851,12 @@ static int sdma_load_context(struct sdma_channel *sdmac)
int ret;
unsigned long flags;
- if (sdmac->direction == DMA_DEV_TO_MEM) {
+ if (sdmac->direction == DMA_DEV_TO_MEM)
load_address = sdmac->pc_from_device;
- } else {
+ else if (sdmac->direction == DMA_DEV_TO_DEV)
+ load_address = sdmac->device_to_device;
+ else
load_address = sdmac->pc_to_device;
- }
if (load_address < 0)
return load_address;
@@ -851,6 +911,46 @@ static int sdma_disable_channel(struct dma_chan *chan)
return 0;
}
+static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
+{
+ struct sdma_engine *sdma = sdmac->sdma;
+
+ int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
+ int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
+
+ set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
+ set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
+
+ if (sdmac->event_id0 > 31)
+ sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
+
+ if (sdmac->event_id1 > 31)
+ sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
+
+ /*
+ * If LWML(src_maxburst) > HWML(dst_maxburst), we need
+ * swap LWML and HWML of INFO(A.3.2.5.1), also need swap
+ * r0(event_mask[1]) and r1(event_mask[0]).
+ */
+ if (lwml > hwml) {
+ sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
+ SDMA_WATERMARK_LEVEL_HWML);
+ sdmac->watermark_level |= hwml;
+ sdmac->watermark_level |= lwml << 16;
+ swap(sdmac->event_mask[0], sdmac->event_mask[1]);
+ }
+
+ if (sdmac->per_address2 >= sdma->spba_start_addr &&
+ sdmac->per_address2 <= sdma->spba_end_addr)
+ sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
+
+ if (sdmac->per_address >= sdma->spba_start_addr &&
+ sdmac->per_address <= sdma->spba_end_addr)
+ sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
+
+ sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
+}
+
static int sdma_config_channel(struct dma_chan *chan)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
@@ -869,6 +969,12 @@ static int sdma_config_channel(struct dma_chan *chan)
sdma_event_enable(sdmac, sdmac->event_id0);
}
+ if (sdmac->event_id1) {
+ if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
+ return -EINVAL;
+ sdma_event_enable(sdmac, sdmac->event_id1);
+ }
+
switch (sdmac->peripheral_type) {
case IMX_DMATYPE_DSP:
sdma_config_ownership(sdmac, false, true, true);
@@ -887,19 +993,17 @@ static int sdma_config_channel(struct dma_chan *chan)
(sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
/* Handle multiple event channels differently */
if (sdmac->event_id1) {
- sdmac->event_mask[1] = BIT(sdmac->event_id1 % 32);
- if (sdmac->event_id1 > 31)
- __set_bit(31, &sdmac->watermark_level);
- sdmac->event_mask[0] = BIT(sdmac->event_id0 % 32);
- if (sdmac->event_id0 > 31)
- __set_bit(30, &sdmac->watermark_level);
- } else {
+ if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
+ sdmac->peripheral_type == IMX_DMATYPE_ASRC)
+ sdma_set_watermarklevel_for_p2p(sdmac);
+ } else
__set_bit(sdmac->event_id0, sdmac->event_mask);
- }
+
/* Watermark Level */
sdmac->watermark_level |= sdmac->watermark_level;
/* Address */
sdmac->shp_addr = sdmac->per_address;
+ sdmac->per_addr = sdmac->per_address2;
} else {
sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
}
@@ -987,17 +1091,22 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
sdmac->peripheral_type = data->peripheral_type;
sdmac->event_id0 = data->dma_request;
+ sdmac->event_id1 = data->dma_request2;
- clk_enable(sdmac->sdma->clk_ipg);
- clk_enable(sdmac->sdma->clk_ahb);
+ ret = clk_enable(sdmac->sdma->clk_ipg);
+ if (ret)
+ return ret;
+ ret = clk_enable(sdmac->sdma->clk_ahb);
+ if (ret)
+ goto disable_clk_ipg;
ret = sdma_request_channel(sdmac);
if (ret)
- return ret;
+ goto disable_clk_ahb;
ret = sdma_set_channel_priority(sdmac, prio);
if (ret)
- return ret;
+ goto disable_clk_ahb;
dma_async_tx_descriptor_init(&sdmac->desc, chan);
sdmac->desc.tx_submit = sdma_tx_submit;
@@ -1005,6 +1114,12 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
sdmac->desc.flags = DMA_CTRL_ACK;
return 0;
+
+disable_clk_ahb:
+ clk_disable(sdmac->sdma->clk_ahb);
+disable_clk_ipg:
+ clk_disable(sdmac->sdma->clk_ipg);
+ return ret;
}
static void sdma_free_chan_resources(struct dma_chan *chan)
@@ -1221,6 +1336,14 @@ static int sdma_config(struct dma_chan *chan,
sdmac->watermark_level = dmaengine_cfg->src_maxburst *
dmaengine_cfg->src_addr_width;
sdmac->word_size = dmaengine_cfg->src_addr_width;
+ } else if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) {
+ sdmac->per_address2 = dmaengine_cfg->src_addr;
+ sdmac->per_address = dmaengine_cfg->dst_addr;
+ sdmac->watermark_level = dmaengine_cfg->src_maxburst &
+ SDMA_WATERMARK_LEVEL_LWML;
+ sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
+ SDMA_WATERMARK_LEVEL_HWML;
+ sdmac->word_size = dmaengine_cfg->dst_addr_width;
} else {
sdmac->per_address = dmaengine_cfg->dst_addr;
sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
@@ -1337,6 +1460,72 @@ err_firmware:
release_firmware(fw);
}
+#define EVENT_REMAP_CELLS 3
+
+static int __init sdma_event_remap(struct sdma_engine *sdma)
+{
+ struct device_node *np = sdma->dev->of_node;
+ struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
+ struct property *event_remap;
+ struct regmap *gpr;
+ char propname[] = "fsl,sdma-event-remap";
+ u32 reg, val, shift, num_map, i;
+ int ret = 0;
+
+ if (IS_ERR(np) || IS_ERR(gpr_np))
+ goto out;
+
+ event_remap = of_find_property(np, propname, NULL);
+ num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
+ if (!num_map) {
+ dev_warn(sdma->dev, "no event needs to be remapped\n");
+ goto out;
+ } else if (num_map % EVENT_REMAP_CELLS) {
+ dev_err(sdma->dev, "the property %s must modulo %d\n",
+ propname, EVENT_REMAP_CELLS);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ gpr = syscon_node_to_regmap(gpr_np);
+ if (IS_ERR(gpr)) {
+ dev_err(sdma->dev, "failed to get gpr regmap\n");
+ ret = PTR_ERR(gpr);
+ goto out;
+ }
+
+ for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) {
+ ret = of_property_read_u32_index(np, propname, i, &reg);
+ if (ret) {
+ dev_err(sdma->dev, "failed to read property %s index %d\n",
+ propname, i);
+ goto out;
+ }
+
+ ret = of_property_read_u32_index(np, propname, i + 1, &shift);
+ if (ret) {
+ dev_err(sdma->dev, "failed to read property %s index %d\n",
+ propname, i + 1);
+ goto out;
+ }
+
+ ret = of_property_read_u32_index(np, propname, i + 2, &val);
+ if (ret) {
+ dev_err(sdma->dev, "failed to read property %s index %d\n",
+ propname, i + 2);
+ goto out;
+ }
+
+ regmap_update_bits(gpr, reg, BIT(shift), val << shift);
+ }
+
+out:
+ if (!IS_ERR(gpr_np))
+ of_node_put(gpr_np);
+
+ return ret;
+}
+
static int sdma_get_firmware(struct sdma_engine *sdma,
const char *fw_name)
{
@@ -1354,8 +1543,12 @@ static int sdma_init(struct sdma_engine *sdma)
int i, ret;
dma_addr_t ccb_phys;
- clk_enable(sdma->clk_ipg);
- clk_enable(sdma->clk_ahb);
+ ret = clk_enable(sdma->clk_ipg);
+ if (ret)
+ return ret;
+ ret = clk_enable(sdma->clk_ahb);
+ if (ret)
+ goto disable_clk_ipg;
/* Be sure SDMA has not started yet */
writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
@@ -1411,8 +1604,9 @@ static int sdma_init(struct sdma_engine *sdma)
return 0;
err_dma_alloc:
- clk_disable(sdma->clk_ipg);
clk_disable(sdma->clk_ahb);
+disable_clk_ipg:
+ clk_disable(sdma->clk_ipg);
dev_err(sdma->dev, "initialisation failed with %d\n", ret);
return ret;
}
@@ -1444,6 +1638,14 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
data.dma_request = dma_spec->args[0];
data.peripheral_type = dma_spec->args[1];
data.priority = dma_spec->args[2];
+ /*
+ * init dma_request2 to zero, which is not used by the dts.
+ * For P2P, dma_request2 is init from dma_request_channel(),
+ * chan->private will point to the imx_dma_data, and in
+ * device_alloc_chan_resources(), imx_dma_data.dma_request2 will
+ * be set to sdmac->event_id1.
+ */
+ data.dma_request2 = 0;
return dma_request_channel(mask, sdma_filter_fn, &data);
}
@@ -1453,10 +1655,12 @@ static int sdma_probe(struct platform_device *pdev)
const struct of_device_id *of_id =
of_match_device(sdma_dt_ids, &pdev->dev);
struct device_node *np = pdev->dev.of_node;
+ struct device_node *spba_bus;
const char *fw_name;
int ret;
int irq;
struct resource *iores;
+ struct resource spba_res;
struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
int i;
struct sdma_engine *sdma;
@@ -1551,6 +1755,10 @@ static int sdma_probe(struct platform_device *pdev)
if (ret)
goto err_init;
+ ret = sdma_event_remap(sdma);
+ if (ret)
+ goto err_init;
+
if (sdma->drvdata->script_addrs)
sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
if (pdata && pdata->script_addrs)
@@ -1608,6 +1816,14 @@ static int sdma_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to register controller\n");
goto err_register;
}
+
+ spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
+ ret = of_address_to_resource(spba_bus, 0, &spba_res);
+ if (!ret) {
+ sdma->spba_start_addr = spba_res.start;
+ sdma->spba_end_addr = spba_res.end;
+ }
+ of_node_put(spba_bus);
}
dev_info(sdma->dev, "initialized\n");
diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile
index 0ff7270af25b..cf5fedbe2b75 100644
--- a/drivers/dma/ioat/Makefile
+++ b/drivers/dma/ioat/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
-ioatdma-y := pci.o dma.o dma_v2.o dma_v3.o dca.o
+ioatdma-y := init.o dma.o prep.o dca.o sysfs.o
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index ea1e107ae884..2cb7c308d5c7 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -31,7 +31,6 @@
#include "dma.h"
#include "registers.h"
-#include "dma_v2.h"
/*
* Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
@@ -71,14 +70,6 @@ static inline int dca2_tag_map_valid(u8 *tag_map)
#define APICID_BIT(x) (DCA_TAG_MAP_VALID | (x))
#define IOAT_TAG_MAP_LEN 8
-static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = {
- 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
-static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = {
- 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
-static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = {
- 1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), };
-static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 };
-
/* pack PCI B/D/F into a u16 */
static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
{
@@ -126,96 +117,6 @@ struct ioat_dca_priv {
struct ioat_dca_slot req_slots[0];
};
-/* 5000 series chipset DCA Port Requester ID Table Entry Format
- * [15:8] PCI-Express Bus Number
- * [7:3] PCI-Express Device Number
- * [2:0] PCI-Express Function Number
- *
- * 5000 series chipset DCA control register format
- * [7:1] Reserved (0)
- * [0] Ignore Function Number
- */
-
-static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
-{
- struct ioat_dca_priv *ioatdca = dca_priv(dca);
- struct pci_dev *pdev;
- int i;
- u16 id;
-
- /* This implementation only supports PCI-Express */
- if (!dev_is_pci(dev))
- return -ENODEV;
- pdev = to_pci_dev(dev);
- id = dcaid_from_pcidev(pdev);
-
- if (ioatdca->requester_count == ioatdca->max_requesters)
- return -ENODEV;
-
- for (i = 0; i < ioatdca->max_requesters; i++) {
- if (ioatdca->req_slots[i].pdev == NULL) {
- /* found an empty slot */
- ioatdca->requester_count++;
- ioatdca->req_slots[i].pdev = pdev;
- ioatdca->req_slots[i].rid = id;
- writew(id, ioatdca->dca_base + (i * 4));
- /* make sure the ignore function bit is off */
- writeb(0, ioatdca->dca_base + (i * 4) + 2);
- return i;
- }
- }
- /* Error, ioatdma->requester_count is out of whack */
- return -EFAULT;
-}
-
-static int ioat_dca_remove_requester(struct dca_provider *dca,
- struct device *dev)
-{
- struct ioat_dca_priv *ioatdca = dca_priv(dca);
- struct pci_dev *pdev;
- int i;
-
- /* This implementation only supports PCI-Express */
- if (!dev_is_pci(dev))
- return -ENODEV;
- pdev = to_pci_dev(dev);
-
- for (i = 0; i < ioatdca->max_requesters; i++) {
- if (ioatdca->req_slots[i].pdev == pdev) {
- writew(0, ioatdca->dca_base + (i * 4));
- ioatdca->req_slots[i].pdev = NULL;
- ioatdca->req_slots[i].rid = 0;
- ioatdca->requester_count--;
- return i;
- }
- }
- return -ENODEV;
-}
-
-static u8 ioat_dca_get_tag(struct dca_provider *dca,
- struct device *dev,
- int cpu)
-{
- struct ioat_dca_priv *ioatdca = dca_priv(dca);
- int i, apic_id, bit, value;
- u8 entry, tag;
-
- tag = 0;
- apic_id = cpu_physical_id(cpu);
-
- for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
- entry = ioatdca->tag_map[i];
- if (entry & DCA_TAG_MAP_VALID) {
- bit = entry & ~DCA_TAG_MAP_VALID;
- value = (apic_id & (1 << bit)) ? 1 : 0;
- } else {
- value = entry ? 1 : 0;
- }
- tag |= (value << i);
- }
- return tag;
-}
-
static int ioat_dca_dev_managed(struct dca_provider *dca,
struct device *dev)
{
@@ -231,260 +132,7 @@ static int ioat_dca_dev_managed(struct dca_provider *dca,
return 0;
}
-static struct dca_ops ioat_dca_ops = {
- .add_requester = ioat_dca_add_requester,
- .remove_requester = ioat_dca_remove_requester,
- .get_tag = ioat_dca_get_tag,
- .dev_managed = ioat_dca_dev_managed,
-};
-
-
-struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
-{
- struct dca_provider *dca;
- struct ioat_dca_priv *ioatdca;
- u8 *tag_map = NULL;
- int i;
- int err;
- u8 version;
- u8 max_requesters;
-
- if (!system_has_dca_enabled(pdev))
- return NULL;
-
- /* I/OAT v1 systems must have a known tag_map to support DCA */
- switch (pdev->vendor) {
- case PCI_VENDOR_ID_INTEL:
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_IOAT:
- tag_map = ioat_tag_map_BNB;
- break;
- case PCI_DEVICE_ID_INTEL_IOAT_CNB:
- tag_map = ioat_tag_map_CNB;
- break;
- case PCI_DEVICE_ID_INTEL_IOAT_SCNB:
- tag_map = ioat_tag_map_SCNB;
- break;
- }
- break;
- case PCI_VENDOR_ID_UNISYS:
- switch (pdev->device) {
- case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR:
- tag_map = ioat_tag_map_UNISYS;
- break;
- }
- break;
- }
- if (tag_map == NULL)
- return NULL;
-
- version = readb(iobase + IOAT_VER_OFFSET);
- if (version == IOAT_VER_3_0)
- max_requesters = IOAT3_DCA_MAX_REQ;
- else
- max_requesters = IOAT_DCA_MAX_REQ;
-
- dca = alloc_dca_provider(&ioat_dca_ops,
- sizeof(*ioatdca) +
- (sizeof(struct ioat_dca_slot) * max_requesters));
- if (!dca)
- return NULL;
-
- ioatdca = dca_priv(dca);
- ioatdca->max_requesters = max_requesters;
- ioatdca->dca_base = iobase + 0x54;
-
- /* copy over the APIC ID to DCA tag mapping */
- for (i = 0; i < IOAT_TAG_MAP_LEN; i++)
- ioatdca->tag_map[i] = tag_map[i];
-
- err = register_dca_provider(dca, &pdev->dev);
- if (err) {
- free_dca_provider(dca);
- return NULL;
- }
-
- return dca;
-}
-
-
-static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
-{
- struct ioat_dca_priv *ioatdca = dca_priv(dca);
- struct pci_dev *pdev;
- int i;
- u16 id;
- u16 global_req_table;
-
- /* This implementation only supports PCI-Express */
- if (!dev_is_pci(dev))
- return -ENODEV;
- pdev = to_pci_dev(dev);
- id = dcaid_from_pcidev(pdev);
-
- if (ioatdca->requester_count == ioatdca->max_requesters)
- return -ENODEV;
-
- for (i = 0; i < ioatdca->max_requesters; i++) {
- if (ioatdca->req_slots[i].pdev == NULL) {
- /* found an empty slot */
- ioatdca->requester_count++;
- ioatdca->req_slots[i].pdev = pdev;
- ioatdca->req_slots[i].rid = id;
- global_req_table =
- readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
- writel(id | IOAT_DCA_GREQID_VALID,
- ioatdca->iobase + global_req_table + (i * 4));
- return i;
- }
- }
- /* Error, ioatdma->requester_count is out of whack */
- return -EFAULT;
-}
-
-static int ioat2_dca_remove_requester(struct dca_provider *dca,
- struct device *dev)
-{
- struct ioat_dca_priv *ioatdca = dca_priv(dca);
- struct pci_dev *pdev;
- int i;
- u16 global_req_table;
-
- /* This implementation only supports PCI-Express */
- if (!dev_is_pci(dev))
- return -ENODEV;
- pdev = to_pci_dev(dev);
-
- for (i = 0; i < ioatdca->max_requesters; i++) {
- if (ioatdca->req_slots[i].pdev == pdev) {
- global_req_table =
- readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
- writel(0, ioatdca->iobase + global_req_table + (i * 4));
- ioatdca->req_slots[i].pdev = NULL;
- ioatdca->req_slots[i].rid = 0;
- ioatdca->requester_count--;
- return i;
- }
- }
- return -ENODEV;
-}
-
-static u8 ioat2_dca_get_tag(struct dca_provider *dca,
- struct device *dev,
- int cpu)
-{
- u8 tag;
-
- tag = ioat_dca_get_tag(dca, dev, cpu);
- tag = (~tag) & 0x1F;
- return tag;
-}
-
-static struct dca_ops ioat2_dca_ops = {
- .add_requester = ioat2_dca_add_requester,
- .remove_requester = ioat2_dca_remove_requester,
- .get_tag = ioat2_dca_get_tag,
- .dev_managed = ioat_dca_dev_managed,
-};
-
-static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
-{
- int slots = 0;
- u32 req;
- u16 global_req_table;
-
- global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET);
- if (global_req_table == 0)
- return 0;
- do {
- req = readl(iobase + global_req_table + (slots * sizeof(u32)));
- slots++;
- } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
-
- return slots;
-}
-
-struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
-{
- struct dca_provider *dca;
- struct ioat_dca_priv *ioatdca;
- int slots;
- int i;
- int err;
- u32 tag_map;
- u16 dca_offset;
- u16 csi_fsb_control;
- u16 pcie_control;
- u8 bit;
-
- if (!system_has_dca_enabled(pdev))
- return NULL;
-
- dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
- if (dca_offset == 0)
- return NULL;
-
- slots = ioat2_dca_count_dca_slots(iobase, dca_offset);
- if (slots == 0)
- return NULL;
-
- dca = alloc_dca_provider(&ioat2_dca_ops,
- sizeof(*ioatdca)
- + (sizeof(struct ioat_dca_slot) * slots));
- if (!dca)
- return NULL;
-
- ioatdca = dca_priv(dca);
- ioatdca->iobase = iobase;
- ioatdca->dca_base = iobase + dca_offset;
- ioatdca->max_requesters = slots;
-
- /* some bios might not know to turn these on */
- csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
- if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) {
- csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH;
- writew(csi_fsb_control,
- ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
- }
- pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
- if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) {
- pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR;
- writew(pcie_control,
- ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
- }
-
-
- /* TODO version, compatibility and configuration checks */
-
- /* copy out the APIC to DCA tag map */
- tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET);
- for (i = 0; i < 5; i++) {
- bit = (tag_map >> (4 * i)) & 0x0f;
- if (bit < 8)
- ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID;
- else
- ioatdca->tag_map[i] = 0;
- }
-
- if (!dca2_tag_map_valid(ioatdca->tag_map)) {
- WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
- "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
- dev_driver_string(&pdev->dev),
- dev_name(&pdev->dev));
- free_dca_provider(dca);
- return NULL;
- }
-
- err = register_dca_provider(dca, &pdev->dev);
- if (err) {
- free_dca_provider(dca);
- return NULL;
- }
-
- return dca;
-}
-
-static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
+static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
{
struct ioat_dca_priv *ioatdca = dca_priv(dca);
struct pci_dev *pdev;
@@ -518,7 +166,7 @@ static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
return -EFAULT;
}
-static int ioat3_dca_remove_requester(struct dca_provider *dca,
+static int ioat_dca_remove_requester(struct dca_provider *dca,
struct device *dev)
{
struct ioat_dca_priv *ioatdca = dca_priv(dca);
@@ -545,7 +193,7 @@ static int ioat3_dca_remove_requester(struct dca_provider *dca,
return -ENODEV;
}
-static u8 ioat3_dca_get_tag(struct dca_provider *dca,
+static u8 ioat_dca_get_tag(struct dca_provider *dca,
struct device *dev,
int cpu)
{
@@ -576,14 +224,14 @@ static u8 ioat3_dca_get_tag(struct dca_provider *dca,
return tag;
}
-static struct dca_ops ioat3_dca_ops = {
- .add_requester = ioat3_dca_add_requester,
- .remove_requester = ioat3_dca_remove_requester,
- .get_tag = ioat3_dca_get_tag,
+static struct dca_ops ioat_dca_ops = {
+ .add_requester = ioat_dca_add_requester,
+ .remove_requester = ioat_dca_remove_requester,
+ .get_tag = ioat_dca_get_tag,
.dev_managed = ioat_dca_dev_managed,
};
-static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
+static int ioat_dca_count_dca_slots(void *iobase, u16 dca_offset)
{
int slots = 0;
u32 req;
@@ -618,7 +266,7 @@ static inline int dca3_tag_map_invalid(u8 *tag_map)
(tag_map[4] == DCA_TAG_MAP_VALID));
}
-struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{
struct dca_provider *dca;
struct ioat_dca_priv *ioatdca;
@@ -645,11 +293,11 @@ struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
if (dca_offset == 0)
return NULL;
- slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
+ slots = ioat_dca_count_dca_slots(iobase, dca_offset);
if (slots == 0)
return NULL;
- dca = alloc_dca_provider(&ioat3_dca_ops,
+ dca = alloc_dca_provider(&ioat_dca_ops,
sizeof(*ioatdca)
+ (sizeof(struct ioat_dca_slot) * slots));
if (!dca)
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index ee0aa9f4ccfa..f66b7e640610 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -1,6 +1,6 @@
/*
* Intel I/OAT DMA Linux driver
- * Copyright(c) 2004 - 2009 Intel Corporation.
+ * Copyright(c) 2004 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -31,31 +31,23 @@
#include <linux/dma-mapping.h>
#include <linux/workqueue.h>
#include <linux/prefetch.h>
-#include <linux/i7300_idle.h>
#include "dma.h"
#include "registers.h"
#include "hw.h"
#include "../dmaengine.h"
-int ioat_pending_level = 4;
-module_param(ioat_pending_level, int, 0644);
-MODULE_PARM_DESC(ioat_pending_level,
- "high-water mark for pushing ioat descriptors (default: 4)");
-
-/* internal functions */
-static void ioat1_cleanup(struct ioat_dma_chan *ioat);
-static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
+static void ioat_eh(struct ioatdma_chan *ioat_chan);
/**
* ioat_dma_do_interrupt - handler used for single vector interrupt mode
* @irq: interrupt id
* @data: interrupt data
*/
-static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
+irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
{
struct ioatdma_device *instance = data;
- struct ioat_chan_common *chan;
+ struct ioatdma_chan *ioat_chan;
unsigned long attnstatus;
int bit;
u8 intrctrl;
@@ -72,9 +64,9 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
- chan = ioat_chan_by_index(instance, bit);
- if (test_bit(IOAT_RUN, &chan->state))
- tasklet_schedule(&chan->cleanup_task);
+ ioat_chan = ioat_chan_by_index(instance, bit);
+ if (test_bit(IOAT_RUN, &ioat_chan->state))
+ tasklet_schedule(&ioat_chan->cleanup_task);
}
writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
@@ -86,1161 +78,912 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
* @irq: interrupt id
* @data: interrupt data
*/
-static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
+irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
{
- struct ioat_chan_common *chan = data;
+ struct ioatdma_chan *ioat_chan = data;
- if (test_bit(IOAT_RUN, &chan->state))
- tasklet_schedule(&chan->cleanup_task);
+ if (test_bit(IOAT_RUN, &ioat_chan->state))
+ tasklet_schedule(&ioat_chan->cleanup_task);
return IRQ_HANDLED;
}
-/* common channel initialization */
-void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
+void ioat_stop(struct ioatdma_chan *ioat_chan)
{
- struct dma_device *dma = &device->common;
- struct dma_chan *c = &chan->common;
- unsigned long data = (unsigned long) c;
-
- chan->device = device;
- chan->reg_base = device->reg_base + (0x80 * (idx + 1));
- spin_lock_init(&chan->cleanup_lock);
- chan->common.device = dma;
- dma_cookie_init(&chan->common);
- list_add_tail(&chan->common.device_node, &dma->channels);
- device->idx[idx] = chan;
- init_timer(&chan->timer);
- chan->timer.function = device->timer_fn;
- chan->timer.data = data;
- tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
+ struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+ struct pci_dev *pdev = ioat_dma->pdev;
+ int chan_id = chan_num(ioat_chan);
+ struct msix_entry *msix;
+
+ /* 1/ stop irq from firing tasklets
+ * 2/ stop the tasklet from re-arming irqs
+ */
+ clear_bit(IOAT_RUN, &ioat_chan->state);
+
+ /* flush inflight interrupts */
+ switch (ioat_dma->irq_mode) {
+ case IOAT_MSIX:
+ msix = &ioat_dma->msix_entries[chan_id];
+ synchronize_irq(msix->vector);
+ break;
+ case IOAT_MSI:
+ case IOAT_INTX:
+ synchronize_irq(pdev->irq);
+ break;
+ default:
+ break;
+ }
+
+ /* flush inflight timers */
+ del_timer_sync(&ioat_chan->timer);
+
+ /* flush inflight tasklet runs */
+ tasklet_kill(&ioat_chan->cleanup_task);
+
+ /* final cleanup now that everything is quiesced and can't re-arm */
+ ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
}
-/**
- * ioat1_dma_enumerate_channels - find and initialize the device's channels
- * @device: the device to be enumerated
- */
-static int ioat1_enumerate_channels(struct ioatdma_device *device)
+static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
{
- u8 xfercap_scale;
- u32 xfercap;
- int i;
- struct ioat_dma_chan *ioat;
- struct device *dev = &device->pdev->dev;
- struct dma_device *dma = &device->common;
-
- INIT_LIST_HEAD(&dma->channels);
- dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
- dma->chancnt &= 0x1f; /* bits [4:0] valid */
- if (dma->chancnt > ARRAY_SIZE(device->idx)) {
- dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
- dma->chancnt, ARRAY_SIZE(device->idx));
- dma->chancnt = ARRAY_SIZE(device->idx);
- }
- xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
- xfercap_scale &= 0x1f; /* bits [4:0] valid */
- xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
- dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
-
-#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
- if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
- dma->chancnt--;
-#endif
- for (i = 0; i < dma->chancnt; i++) {
- ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
- if (!ioat)
- break;
+ ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
+ ioat_chan->issued = ioat_chan->head;
+ writew(ioat_chan->dmacount,
+ ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
+ dev_dbg(to_dev(ioat_chan),
+ "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
+ __func__, ioat_chan->head, ioat_chan->tail,
+ ioat_chan->issued, ioat_chan->dmacount);
+}
+
+void ioat_issue_pending(struct dma_chan *c)
+{
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
- ioat_init_channel(device, &ioat->base, i);
- ioat->xfercap = xfercap;
- spin_lock_init(&ioat->desc_lock);
- INIT_LIST_HEAD(&ioat->free_desc);
- INIT_LIST_HEAD(&ioat->used_desc);
+ if (ioat_ring_pending(ioat_chan)) {
+ spin_lock_bh(&ioat_chan->prep_lock);
+ __ioat_issue_pending(ioat_chan);
+ spin_unlock_bh(&ioat_chan->prep_lock);
}
- dma->chancnt = i;
- return i;
}
/**
- * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
- * descriptors to hw
- * @chan: DMA channel handle
+ * ioat_update_pending - log pending descriptors
+ * @ioat: ioat+ channel
+ *
+ * Check if the number of unsubmitted descriptors has exceeded the
+ * watermark. Called with prep_lock held
*/
-static inline void
-__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
+static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
{
- void __iomem *reg_base = ioat->base.reg_base;
-
- dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
- __func__, ioat->pending);
- ioat->pending = 0;
- writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
+ if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
+ __ioat_issue_pending(ioat_chan);
}
-static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
+static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
{
- struct ioat_dma_chan *ioat = to_ioat_chan(chan);
+ struct ioat_ring_ent *desc;
+ struct ioat_dma_descriptor *hw;
- if (ioat->pending > 0) {
- spin_lock_bh(&ioat->desc_lock);
- __ioat1_dma_memcpy_issue_pending(ioat);
- spin_unlock_bh(&ioat->desc_lock);
+ if (ioat_ring_space(ioat_chan) < 1) {
+ dev_err(to_dev(ioat_chan),
+ "Unable to start null desc - ring full\n");
+ return;
}
+
+ dev_dbg(to_dev(ioat_chan),
+ "%s: head: %#x tail: %#x issued: %#x\n",
+ __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
+ desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
+
+ hw = desc->hw;
+ hw->ctl = 0;
+ hw->ctl_f.null = 1;
+ hw->ctl_f.int_en = 1;
+ hw->ctl_f.compl_write = 1;
+ /* set size to non-zero value (channel returns error when size is 0) */
+ hw->size = NULL_DESC_BUFFER_SIZE;
+ hw->src_addr = 0;
+ hw->dst_addr = 0;
+ async_tx_ack(&desc->txd);
+ ioat_set_chainaddr(ioat_chan, desc->txd.phys);
+ dump_desc_dbg(ioat_chan, desc);
+ /* make sure descriptors are written before we submit */
+ wmb();
+ ioat_chan->head += 1;
+ __ioat_issue_pending(ioat_chan);
}
-/**
- * ioat1_reset_channel - restart a channel
- * @ioat: IOAT DMA channel handle
- */
-static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
+void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
{
- struct ioat_chan_common *chan = &ioat->base;
- void __iomem *reg_base = chan->reg_base;
- u32 chansts, chanerr;
-
- dev_warn(to_dev(chan), "reset\n");
- chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
- chansts = *chan->completion & IOAT_CHANSTS_STATUS;
- if (chanerr) {
- dev_err(to_dev(chan),
- "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
- chan_num(chan), chansts, chanerr);
- writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
+ spin_lock_bh(&ioat_chan->prep_lock);
+ __ioat_start_null_desc(ioat_chan);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+}
+
+static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
+{
+ /* set the tail to be re-issued */
+ ioat_chan->issued = ioat_chan->tail;
+ ioat_chan->dmacount = 0;
+ mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+ dev_dbg(to_dev(ioat_chan),
+ "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
+ __func__, ioat_chan->head, ioat_chan->tail,
+ ioat_chan->issued, ioat_chan->dmacount);
+
+ if (ioat_ring_pending(ioat_chan)) {
+ struct ioat_ring_ent *desc;
+
+ desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
+ ioat_set_chainaddr(ioat_chan, desc->txd.phys);
+ __ioat_issue_pending(ioat_chan);
+ } else
+ __ioat_start_null_desc(ioat_chan);
+}
+
+static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
+{
+ unsigned long end = jiffies + tmo;
+ int err = 0;
+ u32 status;
+
+ status = ioat_chansts(ioat_chan);
+ if (is_ioat_active(status) || is_ioat_idle(status))
+ ioat_suspend(ioat_chan);
+ while (is_ioat_active(status) || is_ioat_idle(status)) {
+ if (tmo && time_after(jiffies, end)) {
+ err = -ETIMEDOUT;
+ break;
+ }
+ status = ioat_chansts(ioat_chan);
+ cpu_relax();
}
- /*
- * whack it upside the head with a reset
- * and wait for things to settle out.
- * force the pending count to a really big negative
- * to make sure no one forces an issue_pending
- * while we're waiting.
- */
+ return err;
+}
- ioat->pending = INT_MIN;
- writeb(IOAT_CHANCMD_RESET,
- reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
- set_bit(IOAT_RESET_PENDING, &chan->state);
- mod_timer(&chan->timer, jiffies + RESET_DELAY);
+static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
+{
+ unsigned long end = jiffies + tmo;
+ int err = 0;
+
+ ioat_reset(ioat_chan);
+ while (ioat_reset_pending(ioat_chan)) {
+ if (end && time_after(jiffies, end)) {
+ err = -ETIMEDOUT;
+ break;
+ }
+ cpu_relax();
+ }
+
+ return err;
}
-static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
+static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
+ __releases(&ioat_chan->prep_lock)
{
struct dma_chan *c = tx->chan;
- struct ioat_dma_chan *ioat = to_ioat_chan(c);
- struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
- struct ioat_chan_common *chan = &ioat->base;
- struct ioat_desc_sw *first;
- struct ioat_desc_sw *chain_tail;
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
dma_cookie_t cookie;
- spin_lock_bh(&ioat->desc_lock);
- /* cookie incr and addition to used_list must be atomic */
cookie = dma_cookie_assign(tx);
- dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
+ dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
+
+ if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
+ mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
- /* write address into NextDescriptor field of last desc in chain */
- first = to_ioat_desc(desc->tx_list.next);
- chain_tail = to_ioat_desc(ioat->used_desc.prev);
- /* make descriptor updates globally visible before chaining */
+ /* make descriptor updates visible before advancing ioat->head,
+ * this is purposefully not smp_wmb() since we are also
+ * publishing the descriptor updates to a dma device
+ */
wmb();
- chain_tail->hw->next = first->txd.phys;
- list_splice_tail_init(&desc->tx_list, &ioat->used_desc);
- dump_desc_dbg(ioat, chain_tail);
- dump_desc_dbg(ioat, first);
- if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ ioat_chan->head += ioat_chan->produce;
- ioat->active += desc->hw->tx_cnt;
- ioat->pending += desc->hw->tx_cnt;
- if (ioat->pending >= ioat_pending_level)
- __ioat1_dma_memcpy_issue_pending(ioat);
- spin_unlock_bh(&ioat->desc_lock);
+ ioat_update_pending(ioat_chan);
+ spin_unlock_bh(&ioat_chan->prep_lock);
return cookie;
}
-/**
- * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
- * @ioat: the channel supplying the memory pool for the descriptors
- * @flags: allocation flags
- */
-static struct ioat_desc_sw *
-ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
+static struct ioat_ring_ent *
+ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
{
- struct ioat_dma_descriptor *desc;
- struct ioat_desc_sw *desc_sw;
- struct ioatdma_device *ioatdma_device;
+ struct ioat_dma_descriptor *hw;
+ struct ioat_ring_ent *desc;
+ struct ioatdma_device *ioat_dma;
dma_addr_t phys;
- ioatdma_device = ioat->base.device;
- desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
- if (unlikely(!desc))
+ ioat_dma = to_ioatdma_device(chan->device);
+ hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys);
+ if (!hw)
return NULL;
+ memset(hw, 0, sizeof(*hw));
- desc_sw = kzalloc(sizeof(*desc_sw), flags);
- if (unlikely(!desc_sw)) {
- pci_pool_free(ioatdma_device->dma_pool, desc, phys);
+ desc = kmem_cache_zalloc(ioat_cache, flags);
+ if (!desc) {
+ pci_pool_free(ioat_dma->dma_pool, hw, phys);
return NULL;
}
- memset(desc, 0, sizeof(*desc));
+ dma_async_tx_descriptor_init(&desc->txd, chan);
+ desc->txd.tx_submit = ioat_tx_submit_unlock;
+ desc->hw = hw;
+ desc->txd.phys = phys;
+ return desc;
+}
- INIT_LIST_HEAD(&desc_sw->tx_list);
- dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
- desc_sw->txd.tx_submit = ioat1_tx_submit;
- desc_sw->hw = desc;
- desc_sw->txd.phys = phys;
- set_desc_id(desc_sw, -1);
+void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
+{
+ struct ioatdma_device *ioat_dma;
- return desc_sw;
+ ioat_dma = to_ioatdma_device(chan->device);
+ pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys);
+ kmem_cache_free(ioat_cache, desc);
}
-static int ioat_initial_desc_count = 256;
-module_param(ioat_initial_desc_count, int, 0644);
-MODULE_PARM_DESC(ioat_initial_desc_count,
- "ioat1: initial descriptors per channel (default: 256)");
-/**
- * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
- * @chan: the channel to be filled out
- */
-static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
+struct ioat_ring_ent **
+ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
{
- struct ioat_dma_chan *ioat = to_ioat_chan(c);
- struct ioat_chan_common *chan = &ioat->base;
- struct ioat_desc_sw *desc;
- u32 chanerr;
+ struct ioat_ring_ent **ring;
+ int descs = 1 << order;
int i;
- LIST_HEAD(tmp_list);
-
- /* have we already been set up? */
- if (!list_empty(&ioat->free_desc))
- return ioat->desccount;
- /* Setup register to interrupt and write completion status on error */
- writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
+ if (order > ioat_get_max_alloc_order())
+ return NULL;
- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
- if (chanerr) {
- dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
- writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
+ /* allocate the array to hold the software ring */
+ ring = kcalloc(descs, sizeof(*ring), flags);
+ if (!ring)
+ return NULL;
+ for (i = 0; i < descs; i++) {
+ ring[i] = ioat_alloc_ring_ent(c, flags);
+ if (!ring[i]) {
+ while (i--)
+ ioat_free_ring_ent(ring[i], c);
+ kfree(ring);
+ return NULL;
+ }
+ set_desc_id(ring[i], i);
}
- /* Allocate descriptors */
- for (i = 0; i < ioat_initial_desc_count; i++) {
- desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
- if (!desc) {
- dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
- break;
- }
- set_desc_id(desc, i);
- list_add_tail(&desc->node, &tmp_list);
+ /* link descs */
+ for (i = 0; i < descs-1; i++) {
+ struct ioat_ring_ent *next = ring[i+1];
+ struct ioat_dma_descriptor *hw = ring[i]->hw;
+
+ hw->next = next->txd.phys;
}
- spin_lock_bh(&ioat->desc_lock);
- ioat->desccount = i;
- list_splice(&tmp_list, &ioat->free_desc);
- spin_unlock_bh(&ioat->desc_lock);
-
- /* allocate a completion writeback area */
- /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
- chan->completion = pci_pool_alloc(chan->device->completion_pool,
- GFP_KERNEL, &chan->completion_dma);
- memset(chan->completion, 0, sizeof(*chan->completion));
- writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
- chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
- writel(((u64) chan->completion_dma) >> 32,
- chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
-
- set_bit(IOAT_RUN, &chan->state);
- ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
- dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
- __func__, ioat->desccount);
- return ioat->desccount;
+ ring[i]->hw->next = ring[0]->txd.phys;
+
+ return ring;
}
-void ioat_stop(struct ioat_chan_common *chan)
+static bool reshape_ring(struct ioatdma_chan *ioat_chan, int order)
{
- struct ioatdma_device *device = chan->device;
- struct pci_dev *pdev = device->pdev;
- int chan_id = chan_num(chan);
- struct msix_entry *msix;
+ /* reshape differs from normal ring allocation in that we want
+ * to allocate a new software ring while only
+ * extending/truncating the hardware ring
+ */
+ struct dma_chan *c = &ioat_chan->dma_chan;
+ const u32 curr_size = ioat_ring_size(ioat_chan);
+ const u16 active = ioat_ring_active(ioat_chan);
+ const u32 new_size = 1 << order;
+ struct ioat_ring_ent **ring;
+ u32 i;
+
+ if (order > ioat_get_max_alloc_order())
+ return false;
- /* 1/ stop irq from firing tasklets
- * 2/ stop the tasklet from re-arming irqs
+ /* double check that we have at least 1 free descriptor */
+ if (active == curr_size)
+ return false;
+
+ /* when shrinking, verify that we can hold the current active
+ * set in the new ring
*/
- clear_bit(IOAT_RUN, &chan->state);
+ if (active >= new_size)
+ return false;
- /* flush inflight interrupts */
- switch (device->irq_mode) {
- case IOAT_MSIX:
- msix = &device->msix_entries[chan_id];
- synchronize_irq(msix->vector);
- break;
- case IOAT_MSI:
- case IOAT_INTX:
- synchronize_irq(pdev->irq);
- break;
- default:
- break;
- }
+ /* allocate the array to hold the software ring */
+ ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
+ if (!ring)
+ return false;
- /* flush inflight timers */
- del_timer_sync(&chan->timer);
+ /* allocate/trim descriptors as needed */
+ if (new_size > curr_size) {
+ /* copy current descriptors to the new ring */
+ for (i = 0; i < curr_size; i++) {
+ u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
+ u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
- /* flush inflight tasklet runs */
- tasklet_kill(&chan->cleanup_task);
+ ring[new_idx] = ioat_chan->ring[curr_idx];
+ set_desc_id(ring[new_idx], new_idx);
+ }
- /* final cleanup now that everything is quiesced and can't re-arm */
- device->cleanup_fn((unsigned long) &chan->common);
-}
+ /* add new descriptors to the ring */
+ for (i = curr_size; i < new_size; i++) {
+ u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
-/**
- * ioat1_dma_free_chan_resources - release all the descriptors
- * @chan: the channel to be cleaned
- */
-static void ioat1_dma_free_chan_resources(struct dma_chan *c)
-{
- struct ioat_dma_chan *ioat = to_ioat_chan(c);
- struct ioat_chan_common *chan = &ioat->base;
- struct ioatdma_device *ioatdma_device = chan->device;
- struct ioat_desc_sw *desc, *_desc;
- int in_use_descs = 0;
-
- /* Before freeing channel resources first check
- * if they have been previously allocated for this channel.
- */
- if (ioat->desccount == 0)
- return;
+ ring[new_idx] = ioat_alloc_ring_ent(c, GFP_NOWAIT);
+ if (!ring[new_idx]) {
+ while (i--) {
+ u16 new_idx = (ioat_chan->tail+i) &
+ (new_size-1);
+
+ ioat_free_ring_ent(ring[new_idx], c);
+ }
+ kfree(ring);
+ return false;
+ }
+ set_desc_id(ring[new_idx], new_idx);
+ }
- ioat_stop(chan);
+ /* hw link new descriptors */
+ for (i = curr_size-1; i < new_size; i++) {
+ u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
+ struct ioat_ring_ent *next =
+ ring[(new_idx+1) & (new_size-1)];
+ struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
- /* Delay 100ms after reset to allow internal DMA logic to quiesce
- * before removing DMA descriptor resources.
- */
- writeb(IOAT_CHANCMD_RESET,
- chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
- mdelay(100);
-
- spin_lock_bh(&ioat->desc_lock);
- list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
- dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
- __func__, desc_id(desc));
- dump_desc_dbg(ioat, desc);
- in_use_descs++;
- list_del(&desc->node);
- pci_pool_free(ioatdma_device->dma_pool, desc->hw,
- desc->txd.phys);
- kfree(desc);
- }
- list_for_each_entry_safe(desc, _desc,
- &ioat->free_desc, node) {
- list_del(&desc->node);
- pci_pool_free(ioatdma_device->dma_pool, desc->hw,
- desc->txd.phys);
- kfree(desc);
+ hw->next = next->txd.phys;
+ }
+ } else {
+ struct ioat_dma_descriptor *hw;
+ struct ioat_ring_ent *next;
+
+ /* copy current descriptors to the new ring, dropping the
+ * removed descriptors
+ */
+ for (i = 0; i < new_size; i++) {
+ u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
+ u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
+
+ ring[new_idx] = ioat_chan->ring[curr_idx];
+ set_desc_id(ring[new_idx], new_idx);
+ }
+
+ /* free deleted descriptors */
+ for (i = new_size; i < curr_size; i++) {
+ struct ioat_ring_ent *ent;
+
+ ent = ioat_get_ring_ent(ioat_chan, ioat_chan->tail+i);
+ ioat_free_ring_ent(ent, c);
+ }
+
+ /* fix up hardware ring */
+ hw = ring[(ioat_chan->tail+new_size-1) & (new_size-1)]->hw;
+ next = ring[(ioat_chan->tail+new_size) & (new_size-1)];
+ hw->next = next->txd.phys;
}
- spin_unlock_bh(&ioat->desc_lock);
- pci_pool_free(ioatdma_device->completion_pool,
- chan->completion,
- chan->completion_dma);
+ dev_dbg(to_dev(ioat_chan), "%s: allocated %d descriptors\n",
+ __func__, new_size);
- /* one is ok since we left it on there on purpose */
- if (in_use_descs > 1)
- dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
- in_use_descs - 1);
+ kfree(ioat_chan->ring);
+ ioat_chan->ring = ring;
+ ioat_chan->alloc_order = order;
- chan->last_completion = 0;
- chan->completion_dma = 0;
- ioat->pending = 0;
- ioat->desccount = 0;
+ return true;
}
/**
- * ioat1_dma_get_next_descriptor - return the next available descriptor
- * @ioat: IOAT DMA channel handle
- *
- * Gets the next descriptor from the chain, and must be called with the
- * channel's desc_lock held. Allocates more descriptors if the channel
- * has run out.
+ * ioat_check_space_lock - verify space and grab ring producer lock
+ * @ioat: ioat,3 channel (ring) to operate on
+ * @num_descs: allocation length
*/
-static struct ioat_desc_sw *
-ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
+int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
+ __acquires(&ioat_chan->prep_lock)
{
- struct ioat_desc_sw *new;
+ bool retry;
- if (!list_empty(&ioat->free_desc)) {
- new = to_ioat_desc(ioat->free_desc.next);
- list_del(&new->node);
- } else {
- /* try to get another desc */
- new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
- if (!new) {
- dev_err(to_dev(&ioat->base), "alloc failed\n");
- return NULL;
- }
+ retry:
+ spin_lock_bh(&ioat_chan->prep_lock);
+ /* never allow the last descriptor to be consumed, we need at
+ * least one free at all times to allow for on-the-fly ring
+ * resizing.
+ */
+ if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
+ dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
+ __func__, num_descs, ioat_chan->head,
+ ioat_chan->tail, ioat_chan->issued);
+ ioat_chan->produce = num_descs;
+ return 0; /* with ioat->prep_lock held */
+ }
+ retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+
+ /* is another cpu already trying to expand the ring? */
+ if (retry)
+ goto retry;
+
+ spin_lock_bh(&ioat_chan->cleanup_lock);
+ spin_lock_bh(&ioat_chan->prep_lock);
+ retry = reshape_ring(ioat_chan, ioat_chan->alloc_order + 1);
+ clear_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+ spin_unlock_bh(&ioat_chan->cleanup_lock);
+
+ /* if we were able to expand the ring retry the allocation */
+ if (retry)
+ goto retry;
+
+ dev_dbg_ratelimited(to_dev(ioat_chan),
+ "%s: ring full! num_descs: %d (%x:%x:%x)\n",
+ __func__, num_descs, ioat_chan->head,
+ ioat_chan->tail, ioat_chan->issued);
+
+ /* progress reclaim in the allocation failure case we may be
+ * called under bh_disabled so we need to trigger the timer
+ * event directly
+ */
+ if (time_is_before_jiffies(ioat_chan->timer.expires)
+ && timer_pending(&ioat_chan->timer)) {
+ mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+ ioat_timer_event((unsigned long)ioat_chan);
}
- dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
- __func__, desc_id(new));
- prefetch(new->hw);
- return new;
+
+ return -ENOMEM;
}
-static struct dma_async_tx_descriptor *
-ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
- dma_addr_t dma_src, size_t len, unsigned long flags)
+static bool desc_has_ext(struct ioat_ring_ent *desc)
{
- struct ioat_dma_chan *ioat = to_ioat_chan(c);
- struct ioat_desc_sw *desc;
- size_t copy;
- LIST_HEAD(chain);
- dma_addr_t src = dma_src;
- dma_addr_t dest = dma_dest;
- size_t total_len = len;
- struct ioat_dma_descriptor *hw = NULL;
- int tx_cnt = 0;
-
- spin_lock_bh(&ioat->desc_lock);
- desc = ioat1_dma_get_next_descriptor(ioat);
- do {
- if (!desc)
- break;
-
- tx_cnt++;
- copy = min_t(size_t, len, ioat->xfercap);
+ struct ioat_dma_descriptor *hw = desc->hw;
- hw = desc->hw;
- hw->size = copy;
- hw->ctl = 0;
- hw->src_addr = src;
- hw->dst_addr = dest;
+ if (hw->ctl_f.op == IOAT_OP_XOR ||
+ hw->ctl_f.op == IOAT_OP_XOR_VAL) {
+ struct ioat_xor_descriptor *xor = desc->xor;
- list_add_tail(&desc->node, &chain);
+ if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
+ return true;
+ } else if (hw->ctl_f.op == IOAT_OP_PQ ||
+ hw->ctl_f.op == IOAT_OP_PQ_VAL) {
+ struct ioat_pq_descriptor *pq = desc->pq;
- len -= copy;
- dest += copy;
- src += copy;
- if (len) {
- struct ioat_desc_sw *next;
-
- async_tx_ack(&desc->txd);
- next = ioat1_dma_get_next_descriptor(ioat);
- hw->next = next ? next->txd.phys : 0;
- dump_desc_dbg(ioat, desc);
- desc = next;
- } else
- hw->next = 0;
- } while (len);
-
- if (!desc) {
- struct ioat_chan_common *chan = &ioat->base;
-
- dev_err(to_dev(chan),
- "chan%d - get_next_desc failed\n", chan_num(chan));
- list_splice(&chain, &ioat->free_desc);
- spin_unlock_bh(&ioat->desc_lock);
- return NULL;
+ if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
+ return true;
}
- spin_unlock_bh(&ioat->desc_lock);
- desc->txd.flags = flags;
- desc->len = total_len;
- list_splice(&chain, &desc->tx_list);
- hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
- hw->ctl_f.compl_write = 1;
- hw->tx_cnt = tx_cnt;
- dump_desc_dbg(ioat, desc);
-
- return &desc->txd;
+ return false;
}
-static void ioat1_cleanup_event(unsigned long data)
+static void
+ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
{
- struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
- struct ioat_chan_common *chan = &ioat->base;
-
- ioat1_cleanup(ioat);
- if (!test_bit(IOAT_RUN, &chan->state))
+ if (!sed)
return;
- writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
+
+ dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
+ kmem_cache_free(ioat_sed_cache, sed);
}
-dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
+static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
{
- dma_addr_t phys_complete;
+ u64 phys_complete;
u64 completion;
- completion = *chan->completion;
+ completion = *ioat_chan->completion;
phys_complete = ioat_chansts_to_addr(completion);
- dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
+ dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
(unsigned long long) phys_complete);
- if (is_ioat_halted(completion)) {
- u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
- dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
- chanerr);
-
- /* TODO do something to salvage the situation */
- }
-
return phys_complete;
}
-bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
- dma_addr_t *phys_complete)
+static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
+ u64 *phys_complete)
{
- *phys_complete = ioat_get_current_completion(chan);
- if (*phys_complete == chan->last_completion)
+ *phys_complete = ioat_get_current_completion(ioat_chan);
+ if (*phys_complete == ioat_chan->last_completion)
return false;
- clear_bit(IOAT_COMPLETION_ACK, &chan->state);
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+ clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
+ mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
return true;
}
-static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
+static void
+desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
{
- struct ioat_chan_common *chan = &ioat->base;
- struct list_head *_desc, *n;
- struct dma_async_tx_descriptor *tx;
+ struct ioat_dma_descriptor *hw = desc->hw;
- dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
- __func__, (unsigned long long) phys_complete);
- list_for_each_safe(_desc, n, &ioat->used_desc) {
- struct ioat_desc_sw *desc;
+ switch (hw->ctl_f.op) {
+ case IOAT_OP_PQ_VAL:
+ case IOAT_OP_PQ_VAL_16S:
+ {
+ struct ioat_pq_descriptor *pq = desc->pq;
- prefetch(n);
- desc = list_entry(_desc, typeof(*desc), node);
- tx = &desc->txd;
- /*
- * Incoming DMA requests may use multiple descriptors,
- * due to exceeding xfercap, perhaps. If so, only the
- * last one will have a cookie, and require unmapping.
- */
- dump_desc_dbg(ioat, desc);
- if (tx->cookie) {
- dma_cookie_complete(tx);
- dma_descriptor_unmap(tx);
- ioat->active -= desc->hw->tx_cnt;
- if (tx->callback) {
- tx->callback(tx->callback_param);
- tx->callback = NULL;
- }
- }
+ /* check if there's error written */
+ if (!pq->dwbes_f.wbes)
+ return;
- if (tx->phys != phys_complete) {
- /*
- * a completed entry, but not the last, so clean
- * up if the client is done with the descriptor
- */
- if (async_tx_test_ack(tx))
- list_move_tail(&desc->node, &ioat->free_desc);
- } else {
- /*
- * last used desc. Do not remove, so we can
- * append from it.
- */
-
- /* if nothing else is pending, cancel the
- * completion timeout
- */
- if (n == &ioat->used_desc) {
- dev_dbg(to_dev(chan),
- "%s cancel completion timeout\n",
- __func__);
- clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
- }
+ /* need to set a chanerr var for checking to clear later */
- /* TODO check status bits? */
- break;
- }
- }
+ if (pq->dwbes_f.p_val_err)
+ *desc->result |= SUM_CHECK_P_RESULT;
+
+ if (pq->dwbes_f.q_val_err)
+ *desc->result |= SUM_CHECK_Q_RESULT;
- chan->last_completion = phys_complete;
+ return;
+ }
+ default:
+ return;
+ }
}
/**
- * ioat1_cleanup - cleanup up finished descriptors
- * @chan: ioat channel to be cleaned up
- *
- * To prevent lock contention we defer cleanup when the locks are
- * contended with a terminal timeout that forces cleanup and catches
- * completion notification errors.
+ * __cleanup - reclaim used descriptors
+ * @ioat: channel (ring) to clean
*/
-static void ioat1_cleanup(struct ioat_dma_chan *ioat)
+static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
{
- struct ioat_chan_common *chan = &ioat->base;
- dma_addr_t phys_complete;
-
- prefetch(chan->completion);
-
- if (!spin_trylock_bh(&chan->cleanup_lock))
- return;
+ struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+ struct ioat_ring_ent *desc;
+ bool seen_current = false;
+ int idx = ioat_chan->tail, i;
+ u16 active;
- if (!ioat_cleanup_preamble(chan, &phys_complete)) {
- spin_unlock_bh(&chan->cleanup_lock);
- return;
- }
+ dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
+ __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
- if (!spin_trylock_bh(&ioat->desc_lock)) {
- spin_unlock_bh(&chan->cleanup_lock);
+ /*
+ * At restart of the channel, the completion address and the
+ * channel status will be 0 due to starting a new chain. Since
+ * it's new chain and the first descriptor "fails", there is
+ * nothing to clean up. We do not want to reap the entire submitted
+ * chain due to this 0 address value and then BUG.
+ */
+ if (!phys_complete)
return;
- }
- __cleanup(ioat, phys_complete);
+ active = ioat_ring_active(ioat_chan);
+ for (i = 0; i < active && !seen_current; i++) {
+ struct dma_async_tx_descriptor *tx;
- spin_unlock_bh(&ioat->desc_lock);
- spin_unlock_bh(&chan->cleanup_lock);
-}
-
-static void ioat1_timer_event(unsigned long data)
-{
- struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
- struct ioat_chan_common *chan = &ioat->base;
+ smp_read_barrier_depends();
+ prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
+ desc = ioat_get_ring_ent(ioat_chan, idx + i);
+ dump_desc_dbg(ioat_chan, desc);
- dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
+ /* set err stat if we are using dwbes */
+ if (ioat_dma->cap & IOAT_CAP_DWBES)
+ desc_get_errstat(ioat_chan, desc);
- spin_lock_bh(&chan->cleanup_lock);
- if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
- struct ioat_desc_sw *desc;
-
- spin_lock_bh(&ioat->desc_lock);
+ tx = &desc->txd;
+ if (tx->cookie) {
+ dma_cookie_complete(tx);
+ dma_descriptor_unmap(tx);
+ if (tx->callback) {
+ tx->callback(tx->callback_param);
+ tx->callback = NULL;
+ }
+ }
- /* restart active descriptors */
- desc = to_ioat_desc(ioat->used_desc.prev);
- ioat_set_chainaddr(ioat, desc->txd.phys);
- ioat_start(chan);
+ if (tx->phys == phys_complete)
+ seen_current = true;
- ioat->pending = 0;
- set_bit(IOAT_COMPLETION_PENDING, &chan->state);
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
- spin_unlock_bh(&ioat->desc_lock);
- } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
- dma_addr_t phys_complete;
+ /* skip extended descriptors */
+ if (desc_has_ext(desc)) {
+ BUG_ON(i + 1 >= active);
+ i++;
+ }
- spin_lock_bh(&ioat->desc_lock);
- /* if we haven't made progress and we have already
- * acknowledged a pending completion once, then be more
- * forceful with a restart
- */
- if (ioat_cleanup_preamble(chan, &phys_complete))
- __cleanup(ioat, phys_complete);
- else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
- ioat1_reset_channel(ioat);
- else {
- u64 status = ioat_chansts(chan);
-
- /* manually update the last completion address */
- if (ioat_chansts_to_addr(status) != 0)
- *chan->completion = status;
-
- set_bit(IOAT_COMPLETION_ACK, &chan->state);
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ /* cleanup super extended descriptors */
+ if (desc->sed) {
+ ioat_free_sed(ioat_dma, desc->sed);
+ desc->sed = NULL;
}
- spin_unlock_bh(&ioat->desc_lock);
}
- spin_unlock_bh(&chan->cleanup_lock);
-}
-
-enum dma_status
-ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
- struct dma_tx_state *txstate)
-{
- struct ioat_chan_common *chan = to_chan_common(c);
- struct ioatdma_device *device = chan->device;
- enum dma_status ret;
- ret = dma_cookie_status(c, cookie, txstate);
- if (ret == DMA_COMPLETE)
- return ret;
+ /* finish all descriptor reads before incrementing tail */
+ smp_mb();
+ ioat_chan->tail = idx + i;
+ /* no active descs have written a completion? */
+ BUG_ON(active && !seen_current);
+ ioat_chan->last_completion = phys_complete;
- device->cleanup_fn((unsigned long) c);
+ if (active - i == 0) {
+ dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
+ __func__);
+ mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+ }
- return dma_cookie_status(c, cookie, txstate);
+ /* 5 microsecond delay per pending descriptor */
+ writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
+ ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
}
-static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
+static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
{
- struct ioat_chan_common *chan = &ioat->base;
- struct ioat_desc_sw *desc;
- struct ioat_dma_descriptor *hw;
+ u64 phys_complete;
- spin_lock_bh(&ioat->desc_lock);
+ spin_lock_bh(&ioat_chan->cleanup_lock);
- desc = ioat1_dma_get_next_descriptor(ioat);
+ if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+ __cleanup(ioat_chan, phys_complete);
- if (!desc) {
- dev_err(to_dev(chan),
- "Unable to start null desc - get next desc failed\n");
- spin_unlock_bh(&ioat->desc_lock);
- return;
- }
+ if (is_ioat_halted(*ioat_chan->completion)) {
+ u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
- hw = desc->hw;
- hw->ctl = 0;
- hw->ctl_f.null = 1;
- hw->ctl_f.int_en = 1;
- hw->ctl_f.compl_write = 1;
- /* set size to non-zero value (channel returns error when size is 0) */
- hw->size = NULL_DESC_BUFFER_SIZE;
- hw->src_addr = 0;
- hw->dst_addr = 0;
- async_tx_ack(&desc->txd);
- hw->next = 0;
- list_add_tail(&desc->node, &ioat->used_desc);
- dump_desc_dbg(ioat, desc);
+ if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
+ mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+ ioat_eh(ioat_chan);
+ }
+ }
- ioat_set_chainaddr(ioat, desc->txd.phys);
- ioat_start(chan);
- spin_unlock_bh(&ioat->desc_lock);
+ spin_unlock_bh(&ioat_chan->cleanup_lock);
}
-/*
- * Perform a IOAT transaction to verify the HW works.
- */
-#define IOAT_TEST_SIZE 2000
+void ioat_cleanup_event(unsigned long data)
+{
+ struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
+
+ ioat_cleanup(ioat_chan);
+ if (!test_bit(IOAT_RUN, &ioat_chan->state))
+ return;
+ writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
+}
-static void ioat_dma_test_callback(void *dma_async_param)
+static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
{
- struct completion *cmp = dma_async_param;
+ u64 phys_complete;
+
+ ioat_quiesce(ioat_chan, 0);
+ if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+ __cleanup(ioat_chan, phys_complete);
- complete(cmp);
+ __ioat_restart_chan(ioat_chan);
}
-/**
- * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
- * @device: device to be tested
- */
-int ioat_dma_self_test(struct ioatdma_device *device)
+static void ioat_eh(struct ioatdma_chan *ioat_chan)
{
- int i;
- u8 *src;
- u8 *dest;
- struct dma_device *dma = &device->common;
- struct device *dev = &device->pdev->dev;
- struct dma_chan *dma_chan;
+ struct pci_dev *pdev = to_pdev(ioat_chan);
+ struct ioat_dma_descriptor *hw;
struct dma_async_tx_descriptor *tx;
- dma_addr_t dma_dest, dma_src;
- dma_cookie_t cookie;
- int err = 0;
- struct completion cmp;
- unsigned long tmo;
- unsigned long flags;
-
- src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
- if (!src)
- return -ENOMEM;
- dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
- if (!dest) {
- kfree(src);
- return -ENOMEM;
- }
+ u64 phys_complete;
+ struct ioat_ring_ent *desc;
+ u32 err_handled = 0;
+ u32 chanerr_int;
+ u32 chanerr;
- /* Fill in src buffer */
- for (i = 0; i < IOAT_TEST_SIZE; i++)
- src[i] = (u8)i;
-
- /* Start copy, using first DMA channel */
- dma_chan = container_of(dma->channels.next, struct dma_chan,
- device_node);
- if (dma->device_alloc_chan_resources(dma_chan) < 1) {
- dev_err(dev, "selftest cannot allocate chan resource\n");
- err = -ENODEV;
- goto out;
- }
+ /* cleanup so tail points to descriptor that caused the error */
+ if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+ __cleanup(ioat_chan, phys_complete);
- dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_src)) {
- dev_err(dev, "mapping src buffer failed\n");
- goto free_resources;
- }
- dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, dma_dest)) {
- dev_err(dev, "mapping dest buffer failed\n");
- goto unmap_src;
- }
- flags = DMA_PREP_INTERRUPT;
- tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
- IOAT_TEST_SIZE, flags);
- if (!tx) {
- dev_err(dev, "Self-test prep failed, disabling\n");
- err = -ENODEV;
- goto unmap_dma;
- }
+ chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+ pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
- async_tx_ack(tx);
- init_completion(&cmp);
- tx->callback = ioat_dma_test_callback;
- tx->callback_param = &cmp;
- cookie = tx->tx_submit(tx);
- if (cookie < 0) {
- dev_err(dev, "Self-test setup failed, disabling\n");
- err = -ENODEV;
- goto unmap_dma;
- }
- dma->device_issue_pending(dma_chan);
+ dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
+ __func__, chanerr, chanerr_int);
- tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+ desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
+ hw = desc->hw;
+ dump_desc_dbg(ioat_chan, desc);
- if (tmo == 0 ||
- dma->device_tx_status(dma_chan, cookie, NULL)
- != DMA_COMPLETE) {
- dev_err(dev, "Self-test copy timed out, disabling\n");
- err = -ENODEV;
- goto unmap_dma;
- }
- if (memcmp(src, dest, IOAT_TEST_SIZE)) {
- dev_err(dev, "Self-test copy failed compare, disabling\n");
- err = -ENODEV;
- goto free_resources;
+ switch (hw->ctl_f.op) {
+ case IOAT_OP_XOR_VAL:
+ if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
+ *desc->result |= SUM_CHECK_P_RESULT;
+ err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
+ }
+ break;
+ case IOAT_OP_PQ_VAL:
+ case IOAT_OP_PQ_VAL_16S:
+ if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
+ *desc->result |= SUM_CHECK_P_RESULT;
+ err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
+ }
+ if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
+ *desc->result |= SUM_CHECK_Q_RESULT;
+ err_handled |= IOAT_CHANERR_XOR_Q_ERR;
+ }
+ break;
}
-unmap_dma:
- dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
-unmap_src:
- dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
-free_resources:
- dma->device_free_chan_resources(dma_chan);
-out:
- kfree(src);
- kfree(dest);
- return err;
-}
-
-static char ioat_interrupt_style[32] = "msix";
-module_param_string(ioat_interrupt_style, ioat_interrupt_style,
- sizeof(ioat_interrupt_style), 0644);
-MODULE_PARM_DESC(ioat_interrupt_style,
- "set ioat interrupt style: msix (default), msi, intx");
-
-/**
- * ioat_dma_setup_interrupts - setup interrupt handler
- * @device: ioat device
- */
-int ioat_dma_setup_interrupts(struct ioatdma_device *device)
-{
- struct ioat_chan_common *chan;
- struct pci_dev *pdev = device->pdev;
- struct device *dev = &pdev->dev;
- struct msix_entry *msix;
- int i, j, msixcnt;
- int err = -EINVAL;
- u8 intrctrl = 0;
-
- if (!strcmp(ioat_interrupt_style, "msix"))
- goto msix;
- if (!strcmp(ioat_interrupt_style, "msi"))
- goto msi;
- if (!strcmp(ioat_interrupt_style, "intx"))
- goto intx;
- dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
- goto err_no_irq;
-
-msix:
- /* The number of MSI-X vectors should equal the number of channels */
- msixcnt = device->common.chancnt;
- for (i = 0; i < msixcnt; i++)
- device->msix_entries[i].entry = i;
-
- err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt);
- if (err)
- goto msi;
-
- for (i = 0; i < msixcnt; i++) {
- msix = &device->msix_entries[i];
- chan = ioat_chan_by_index(device, i);
- err = devm_request_irq(dev, msix->vector,
- ioat_dma_do_interrupt_msix, 0,
- "ioat-msix", chan);
- if (err) {
- for (j = 0; j < i; j++) {
- msix = &device->msix_entries[j];
- chan = ioat_chan_by_index(device, j);
- devm_free_irq(dev, msix->vector, chan);
+ /* fault on unhandled error or spurious halt */
+ if (chanerr ^ err_handled || chanerr == 0) {
+ dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
+ __func__, chanerr, err_handled);
+ BUG();
+ } else { /* cleanup the faulty descriptor */
+ tx = &desc->txd;
+ if (tx->cookie) {
+ dma_cookie_complete(tx);
+ dma_descriptor_unmap(tx);
+ if (tx->callback) {
+ tx->callback(tx->callback_param);
+ tx->callback = NULL;
}
- goto msi;
}
}
- intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
- device->irq_mode = IOAT_MSIX;
- goto done;
-msi:
- err = pci_enable_msi(pdev);
- if (err)
- goto intx;
+ writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+ pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
- err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
- "ioat-msi", device);
- if (err) {
- pci_disable_msi(pdev);
- goto intx;
- }
- device->irq_mode = IOAT_MSI;
- goto done;
-
-intx:
- err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
- IRQF_SHARED, "ioat-intx", device);
- if (err)
- goto err_no_irq;
-
- device->irq_mode = IOAT_INTX;
-done:
- if (device->intr_quirk)
- device->intr_quirk(device);
- intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
- writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
- return 0;
-
-err_no_irq:
- /* Disable all interrupt generation */
- writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
- device->irq_mode = IOAT_NOIRQ;
- dev_err(dev, "no usable interrupts\n");
- return err;
-}
-EXPORT_SYMBOL(ioat_dma_setup_interrupts);
+ /* mark faulting descriptor as complete */
+ *ioat_chan->completion = desc->txd.phys;
-static void ioat_disable_interrupts(struct ioatdma_device *device)
-{
- /* Disable all interrupt generation */
- writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
+ spin_lock_bh(&ioat_chan->prep_lock);
+ ioat_restart_channel(ioat_chan);
+ spin_unlock_bh(&ioat_chan->prep_lock);
}
-int ioat_probe(struct ioatdma_device *device)
+static void check_active(struct ioatdma_chan *ioat_chan)
{
- int err = -ENODEV;
- struct dma_device *dma = &device->common;
- struct pci_dev *pdev = device->pdev;
- struct device *dev = &pdev->dev;
-
- /* DMA coherent memory pool for DMA descriptor allocations */
- device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
- sizeof(struct ioat_dma_descriptor),
- 64, 0);
- if (!device->dma_pool) {
- err = -ENOMEM;
- goto err_dma_pool;
- }
-
- device->completion_pool = pci_pool_create("completion_pool", pdev,
- sizeof(u64), SMP_CACHE_BYTES,
- SMP_CACHE_BYTES);
-
- if (!device->completion_pool) {
- err = -ENOMEM;
- goto err_completion_pool;
+ if (ioat_ring_active(ioat_chan)) {
+ mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+ return;
}
- device->enumerate_channels(device);
-
- dma_cap_set(DMA_MEMCPY, dma->cap_mask);
- dma->dev = &pdev->dev;
+ if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
+ mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+ else if (ioat_chan->alloc_order > ioat_get_alloc_order()) {
+ /* if the ring is idle, empty, and oversized try to step
+ * down the size
+ */
+ reshape_ring(ioat_chan, ioat_chan->alloc_order - 1);
- if (!dma->chancnt) {
- dev_err(dev, "channel enumeration error\n");
- goto err_setup_interrupts;
+ /* keep shrinking until we get back to our minimum
+ * default size
+ */
+ if (ioat_chan->alloc_order > ioat_get_alloc_order())
+ mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
}
- err = ioat_dma_setup_interrupts(device);
- if (err)
- goto err_setup_interrupts;
+}
- err = device->self_test(device);
- if (err)
- goto err_self_test;
+void ioat_timer_event(unsigned long data)
+{
+ struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
+ dma_addr_t phys_complete;
+ u64 status;
- return 0;
+ status = ioat_chansts(ioat_chan);
-err_self_test:
- ioat_disable_interrupts(device);
-err_setup_interrupts:
- pci_pool_destroy(device->completion_pool);
-err_completion_pool:
- pci_pool_destroy(device->dma_pool);
-err_dma_pool:
- return err;
-}
+ /* when halted due to errors check for channel
+ * programming errors before advancing the completion state
+ */
+ if (is_ioat_halted(status)) {
+ u32 chanerr;
-int ioat_register(struct ioatdma_device *device)
-{
- int err = dma_async_device_register(&device->common);
+ chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+ dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
+ __func__, chanerr);
+ if (test_bit(IOAT_RUN, &ioat_chan->state))
+ BUG_ON(is_ioat_bug(chanerr));
+ else /* we never got off the ground */
+ return;
+ }
- if (err) {
- ioat_disable_interrupts(device);
- pci_pool_destroy(device->completion_pool);
- pci_pool_destroy(device->dma_pool);
+ /* if we haven't made progress and we have already
+ * acknowledged a pending completion once, then be more
+ * forceful with a restart
+ */
+ spin_lock_bh(&ioat_chan->cleanup_lock);
+ if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+ __cleanup(ioat_chan, phys_complete);
+ else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
+ spin_lock_bh(&ioat_chan->prep_lock);
+ ioat_restart_channel(ioat_chan);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+ spin_unlock_bh(&ioat_chan->cleanup_lock);
+ return;
+ } else {
+ set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
+ mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
}
- return err;
-}
-/* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
-static void ioat1_intr_quirk(struct ioatdma_device *device)
-{
- struct pci_dev *pdev = device->pdev;
- u32 dmactrl;
-
- pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
- if (pdev->msi_enabled)
- dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
- else
- dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
- pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
+ if (ioat_ring_active(ioat_chan))
+ mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+ else {
+ spin_lock_bh(&ioat_chan->prep_lock);
+ check_active(ioat_chan);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+ }
+ spin_unlock_bh(&ioat_chan->cleanup_lock);
}
-static ssize_t ring_size_show(struct dma_chan *c, char *page)
+enum dma_status
+ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
- struct ioat_dma_chan *ioat = to_ioat_chan(c);
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+ enum dma_status ret;
- return sprintf(page, "%d\n", ioat->desccount);
-}
-static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
+ ret = dma_cookie_status(c, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
-static ssize_t ring_active_show(struct dma_chan *c, char *page)
-{
- struct ioat_dma_chan *ioat = to_ioat_chan(c);
+ ioat_cleanup(ioat_chan);
- return sprintf(page, "%d\n", ioat->active);
+ return dma_cookie_status(c, cookie, txstate);
}
-static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
-static ssize_t cap_show(struct dma_chan *c, char *page)
+static int ioat_irq_reinit(struct ioatdma_device *ioat_dma)
{
- struct dma_device *dma = c->device;
+ struct pci_dev *pdev = ioat_dma->pdev;
+ int irq = pdev->irq, i;
- return sprintf(page, "copy%s%s%s%s%s\n",
- dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
- dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
- dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
- dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
- dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
+ if (!is_bwd_ioat(pdev))
+ return 0;
-}
-struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
-
-static ssize_t version_show(struct dma_chan *c, char *page)
-{
- struct dma_device *dma = c->device;
- struct ioatdma_device *device = to_ioatdma_device(dma);
+ switch (ioat_dma->irq_mode) {
+ case IOAT_MSIX:
+ for (i = 0; i < ioat_dma->dma_dev.chancnt; i++) {
+ struct msix_entry *msix = &ioat_dma->msix_entries[i];
+ struct ioatdma_chan *ioat_chan;
- return sprintf(page, "%d.%d\n",
- device->version >> 4, device->version & 0xf);
-}
-struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
-
-static struct attribute *ioat1_attrs[] = {
- &ring_size_attr.attr,
- &ring_active_attr.attr,
- &ioat_cap_attr.attr,
- &ioat_version_attr.attr,
- NULL,
-};
-
-static ssize_t
-ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
-{
- struct ioat_sysfs_entry *entry;
- struct ioat_chan_common *chan;
+ ioat_chan = ioat_chan_by_index(ioat_dma, i);
+ devm_free_irq(&pdev->dev, msix->vector, ioat_chan);
+ }
- entry = container_of(attr, struct ioat_sysfs_entry, attr);
- chan = container_of(kobj, struct ioat_chan_common, kobj);
+ pci_disable_msix(pdev);
+ break;
+ case IOAT_MSI:
+ pci_disable_msi(pdev);
+ /* fall through */
+ case IOAT_INTX:
+ devm_free_irq(&pdev->dev, irq, ioat_dma);
+ break;
+ default:
+ return 0;
+ }
+ ioat_dma->irq_mode = IOAT_NOIRQ;
- if (!entry->show)
- return -EIO;
- return entry->show(&chan->common, page);
+ return ioat_dma_setup_interrupts(ioat_dma);
}
-const struct sysfs_ops ioat_sysfs_ops = {
- .show = ioat_attr_show,
-};
-
-static struct kobj_type ioat1_ktype = {
- .sysfs_ops = &ioat_sysfs_ops,
- .default_attrs = ioat1_attrs,
-};
-
-void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
+int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
{
- struct dma_device *dma = &device->common;
- struct dma_chan *c;
+ /* throw away whatever the channel was doing and get it
+ * initialized, with ioat3 specific workarounds
+ */
+ struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+ struct pci_dev *pdev = ioat_dma->pdev;
+ u32 chanerr;
+ u16 dev_id;
+ int err;
+
+ ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
- list_for_each_entry(c, &dma->channels, device_node) {
- struct ioat_chan_common *chan = to_chan_common(c);
- struct kobject *parent = &c->dev->device.kobj;
- int err;
+ chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+ writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
- err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata");
+ if (ioat_dma->version < IOAT_VER_3_3) {
+ /* clear any pending errors */
+ err = pci_read_config_dword(pdev,
+ IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
if (err) {
- dev_warn(to_dev(chan),
- "sysfs init error (%d), continuing...\n", err);
- kobject_put(&chan->kobj);
- set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state);
+ dev_err(&pdev->dev,
+ "channel error register unreachable\n");
+ return err;
}
- }
-}
+ pci_write_config_dword(pdev,
+ IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
-void ioat_kobject_del(struct ioatdma_device *device)
-{
- struct dma_device *dma = &device->common;
- struct dma_chan *c;
-
- list_for_each_entry(c, &dma->channels, device_node) {
- struct ioat_chan_common *chan = to_chan_common(c);
-
- if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) {
- kobject_del(&chan->kobj);
- kobject_put(&chan->kobj);
+ /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
+ * (workaround for spurious config parity error after restart)
+ */
+ pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
+ if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
+ pci_write_config_dword(pdev,
+ IOAT_PCI_DMAUNCERRSTS_OFFSET,
+ 0x10);
}
}
-}
-int ioat1_dma_probe(struct ioatdma_device *device, int dca)
-{
- struct pci_dev *pdev = device->pdev;
- struct dma_device *dma;
- int err;
+ err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
+ if (!err)
+ err = ioat_irq_reinit(ioat_dma);
- device->intr_quirk = ioat1_intr_quirk;
- device->enumerate_channels = ioat1_enumerate_channels;
- device->self_test = ioat_dma_self_test;
- device->timer_fn = ioat1_timer_event;
- device->cleanup_fn = ioat1_cleanup_event;
- dma = &device->common;
- dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
- dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
- dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
- dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
- dma->device_tx_status = ioat_dma_tx_status;
-
- err = ioat_probe(device);
- if (err)
- return err;
- err = ioat_register(device);
if (err)
- return err;
- ioat_kobject_add(device, &ioat1_ktype);
-
- if (dca)
- device->dca = ioat_dca_init(pdev, device->reg_base);
+ dev_err(&pdev->dev, "Failed to reset: %d\n", err);
return err;
}
-
-void ioat_dma_remove(struct ioatdma_device *device)
-{
- struct dma_device *dma = &device->common;
-
- ioat_disable_interrupts(device);
-
- ioat_kobject_del(device);
-
- dma_async_device_unregister(dma);
-
- pci_pool_destroy(device->dma_pool);
- pci_pool_destroy(device->completion_pool);
-
- INIT_LIST_HEAD(&dma->channels);
-}
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 30f5c7eede16..1bc084986646 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -18,26 +18,32 @@
#define IOATDMA_H
#include <linux/dmaengine.h>
-#include "hw.h"
-#include "registers.h"
#include <linux/init.h>
#include <linux/dmapool.h>
#include <linux/cache.h>
#include <linux/pci_ids.h>
-#include <net/tcp.h>
+#include <linux/circ_buf.h>
+#include <linux/interrupt.h>
+#include "registers.h"
+#include "hw.h"
#define IOAT_DMA_VERSION "4.00"
-#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
#define IOAT_DMA_DCA_ANY_CPU ~0
-#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
-#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
-#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd)
-#define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev)
-#define to_pdev(ioat_chan) ((ioat_chan)->device->pdev)
+#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
+#define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
+#define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
+
+#define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80)
-#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
+/* ioat hardware assumes at least two sources for raid operations */
+#define src_cnt_to_sw(x) ((x) + 2)
+#define src_cnt_to_hw(x) ((x) - 2)
+#define ndest_to_sw(x) ((x) + 1)
+#define ndest_to_hw(x) ((x) - 1)
+#define src16_cnt_to_sw(x) ((x) + 9)
+#define src16_cnt_to_hw(x) ((x) - 9)
/*
* workaround for IOAT ver.3.0 null descriptor issue
@@ -57,19 +63,15 @@ enum ioat_irq_mode {
* @pdev: PCI-Express device
* @reg_base: MMIO register space base address
* @dma_pool: for allocating DMA descriptors
- * @common: embedded struct dma_device
+ * @completion_pool: DMA buffers for completion ops
+ * @sed_hw_pool: DMA super descriptor pools
+ * @dma_dev: embedded struct dma_device
* @version: version of ioatdma device
* @msix_entries: irq handlers
* @idx: per channel data
* @dca: direct cache access context
- * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
- * @enumerate_channels: hw version specific channel enumeration
- * @reset_hw: hw version specific channel (re)initialization
- * @cleanup_fn: select between the v2 and v3 cleanup routines
- * @timer_fn: select between the v2 and v3 timer watchdog routines
- * @self_test: hardware version specific self test for each supported op type
- *
- * Note: the v3 cleanup routine supports raid operations
+ * @irq_mode: interrupt mode (INTX, MSI, MSIX)
+ * @cap: read DMA capabilities register
*/
struct ioatdma_device {
struct pci_dev *pdev;
@@ -78,28 +80,21 @@ struct ioatdma_device {
struct pci_pool *completion_pool;
#define MAX_SED_POOLS 5
struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
- struct dma_device common;
+ struct dma_device dma_dev;
u8 version;
struct msix_entry msix_entries[4];
- struct ioat_chan_common *idx[4];
+ struct ioatdma_chan *idx[4];
struct dca_provider *dca;
enum ioat_irq_mode irq_mode;
u32 cap;
- void (*intr_quirk)(struct ioatdma_device *device);
- int (*enumerate_channels)(struct ioatdma_device *device);
- int (*reset_hw)(struct ioat_chan_common *chan);
- void (*cleanup_fn)(unsigned long data);
- void (*timer_fn)(unsigned long data);
- int (*self_test)(struct ioatdma_device *device);
};
-struct ioat_chan_common {
- struct dma_chan common;
+struct ioatdma_chan {
+ struct dma_chan dma_chan;
void __iomem *reg_base;
dma_addr_t last_completion;
spinlock_t cleanup_lock;
unsigned long state;
- #define IOAT_COMPLETION_PENDING 0
#define IOAT_COMPLETION_ACK 1
#define IOAT_RESET_PENDING 2
#define IOAT_KOBJ_INIT_FAIL 3
@@ -110,11 +105,32 @@ struct ioat_chan_common {
#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
#define IDLE_TIMEOUT msecs_to_jiffies(2000)
#define RESET_DELAY msecs_to_jiffies(100)
- struct ioatdma_device *device;
+ struct ioatdma_device *ioat_dma;
dma_addr_t completion_dma;
u64 *completion;
struct tasklet_struct cleanup_task;
struct kobject kobj;
+
+/* ioat v2 / v3 channel attributes
+ * @xfercap_log; log2 of channel max transfer length (for fast division)
+ * @head: allocated index
+ * @issued: hardware notification point
+ * @tail: cleanup index
+ * @dmacount: identical to 'head' except for occasionally resetting to zero
+ * @alloc_order: log2 of the number of allocated descriptors
+ * @produce: number of descriptors to produce at submit time
+ * @ring: software ring buffer implementation of hardware ring
+ * @prep_lock: serializes descriptor preparation (producers)
+ */
+ size_t xfercap_log;
+ u16 head;
+ u16 issued;
+ u16 tail;
+ u16 dmacount;
+ u16 alloc_order;
+ u16 produce;
+ struct ioat_ring_ent **ring;
+ spinlock_t prep_lock;
};
struct ioat_sysfs_entry {
@@ -123,28 +139,11 @@ struct ioat_sysfs_entry {
};
/**
- * struct ioat_dma_chan - internal representation of a DMA channel
- */
-struct ioat_dma_chan {
- struct ioat_chan_common base;
-
- size_t xfercap; /* XFERCAP register value expanded out */
-
- spinlock_t desc_lock;
- struct list_head free_desc;
- struct list_head used_desc;
-
- int pending;
- u16 desccount;
- u16 active;
-};
-
-/**
* struct ioat_sed_ent - wrapper around super extended hardware descriptor
* @hw: hardware SED
- * @sed_dma: dma address for the SED
- * @list: list member
+ * @dma: dma address for the SED
* @parent: point to the dma descriptor that's the parent
+ * @hw_pool: descriptor pool index
*/
struct ioat_sed_ent {
struct ioat_sed_raw_descriptor *hw;
@@ -153,39 +152,57 @@ struct ioat_sed_ent {
unsigned int hw_pool;
};
-static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c)
-{
- return container_of(c, struct ioat_chan_common, common);
-}
-
-static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
-{
- struct ioat_chan_common *chan = to_chan_common(c);
-
- return container_of(chan, struct ioat_dma_chan, base);
-}
-
-/* wrapper around hardware descriptor format + additional software fields */
-
/**
- * struct ioat_desc_sw - wrapper around hardware descriptor
+ * struct ioat_ring_ent - wrapper around hardware descriptor
* @hw: hardware DMA descriptor (for memcpy)
- * @node: this descriptor will either be on the free list,
- * or attached to a transaction list (tx_list)
+ * @xor: hardware xor descriptor
+ * @xor_ex: hardware xor extension descriptor
+ * @pq: hardware pq descriptor
+ * @pq_ex: hardware pq extension descriptor
+ * @pqu: hardware pq update descriptor
+ * @raw: hardware raw (un-typed) descriptor
* @txd: the generic software descriptor for all engines
+ * @len: total transaction length for unmap
+ * @result: asynchronous result of validate operations
* @id: identifier for debug
+ * @sed: pointer to super extended descriptor sw desc
*/
-struct ioat_desc_sw {
- struct ioat_dma_descriptor *hw;
- struct list_head node;
+
+struct ioat_ring_ent {
+ union {
+ struct ioat_dma_descriptor *hw;
+ struct ioat_xor_descriptor *xor;
+ struct ioat_xor_ext_descriptor *xor_ex;
+ struct ioat_pq_descriptor *pq;
+ struct ioat_pq_ext_descriptor *pq_ex;
+ struct ioat_pq_update_descriptor *pqu;
+ struct ioat_raw_descriptor *raw;
+ };
size_t len;
- struct list_head tx_list;
struct dma_async_tx_descriptor txd;
+ enum sum_check_flags *result;
#ifdef DEBUG
int id;
#endif
+ struct ioat_sed_ent *sed;
};
+extern const struct sysfs_ops ioat_sysfs_ops;
+extern struct ioat_sysfs_entry ioat_version_attr;
+extern struct ioat_sysfs_entry ioat_cap_attr;
+extern int ioat_pending_level;
+extern int ioat_ring_alloc_order;
+extern struct kobj_type ioat_ktype;
+extern struct kmem_cache *ioat_cache;
+extern int ioat_ring_max_alloc_order;
+extern struct kmem_cache *ioat_sed_cache;
+
+static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
+{
+ return container_of(c, struct ioatdma_chan, dma_chan);
+}
+
+/* wrapper around hardware descriptor format + additional software fields */
#ifdef DEBUG
#define set_desc_id(desc, i) ((desc)->id = (i))
#define desc_id(desc) ((desc)->id)
@@ -195,10 +212,10 @@ struct ioat_desc_sw {
#endif
static inline void
-__dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw,
+__dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw,
struct dma_async_tx_descriptor *tx, int id)
{
- struct device *dev = to_dev(chan);
+ struct device *dev = to_dev(ioat_chan);
dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
" ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id,
@@ -208,25 +225,25 @@ __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw,
}
#define dump_desc_dbg(c, d) \
- ({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; })
+ ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; })
-static inline struct ioat_chan_common *
-ioat_chan_by_index(struct ioatdma_device *device, int index)
+static inline struct ioatdma_chan *
+ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index)
{
- return device->idx[index];
+ return ioat_dma->idx[index];
}
-static inline u64 ioat_chansts_32(struct ioat_chan_common *chan)
+static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan)
{
- u8 ver = chan->device->version;
+ u8 ver = ioat_chan->ioat_dma->version;
u64 status;
u32 status_lo;
/* We need to read the low address first as this causes the
* chipset to latch the upper bits for the subsequent read
*/
- status_lo = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
- status = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
+ status_lo = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
+ status = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
status <<= 32;
status |= status_lo;
@@ -235,16 +252,16 @@ static inline u64 ioat_chansts_32(struct ioat_chan_common *chan)
#if BITS_PER_LONG == 64
-static inline u64 ioat_chansts(struct ioat_chan_common *chan)
+static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan)
{
- u8 ver = chan->device->version;
+ u8 ver = ioat_chan->ioat_dma->version;
u64 status;
/* With IOAT v3.3 the status register is 64bit. */
if (ver >= IOAT_VER_3_3)
- status = readq(chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
+ status = readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
else
- status = ioat_chansts_32(chan);
+ status = ioat_chansts_32(ioat_chan);
return status;
}
@@ -253,56 +270,41 @@ static inline u64 ioat_chansts(struct ioat_chan_common *chan)
#define ioat_chansts ioat_chansts_32
#endif
-static inline void ioat_start(struct ioat_chan_common *chan)
-{
- u8 ver = chan->device->version;
-
- writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
-}
-
static inline u64 ioat_chansts_to_addr(u64 status)
{
return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
}
-static inline u32 ioat_chanerr(struct ioat_chan_common *chan)
+static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan)
{
- return readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
}
-static inline void ioat_suspend(struct ioat_chan_common *chan)
+static inline void ioat_suspend(struct ioatdma_chan *ioat_chan)
{
- u8 ver = chan->device->version;
+ u8 ver = ioat_chan->ioat_dma->version;
- writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+ writeb(IOAT_CHANCMD_SUSPEND,
+ ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
}
-static inline void ioat_reset(struct ioat_chan_common *chan)
+static inline void ioat_reset(struct ioatdma_chan *ioat_chan)
{
- u8 ver = chan->device->version;
+ u8 ver = ioat_chan->ioat_dma->version;
- writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+ writeb(IOAT_CHANCMD_RESET,
+ ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
}
-static inline bool ioat_reset_pending(struct ioat_chan_common *chan)
+static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan)
{
- u8 ver = chan->device->version;
+ u8 ver = ioat_chan->ioat_dma->version;
u8 cmd;
- cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+ cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
}
-static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
-{
- struct ioat_chan_common *chan = &ioat->base;
-
- writel(addr & 0x00000000FFFFFFFF,
- chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
- writel(addr >> 32,
- chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
-}
-
static inline bool is_ioat_active(unsigned long status)
{
return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
@@ -329,24 +331,111 @@ static inline bool is_ioat_bug(unsigned long err)
return !!err;
}
-int ioat_probe(struct ioatdma_device *device);
-int ioat_register(struct ioatdma_device *device);
-int ioat1_dma_probe(struct ioatdma_device *dev, int dca);
-int ioat_dma_self_test(struct ioatdma_device *device);
-void ioat_dma_remove(struct ioatdma_device *device);
+#define IOAT_MAX_ORDER 16
+#define ioat_get_alloc_order() \
+ (min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
+#define ioat_get_max_alloc_order() \
+ (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
+
+static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
+{
+ return 1 << ioat_chan->alloc_order;
+}
+
+/* count of descriptors in flight with the engine */
+static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan)
+{
+ return CIRC_CNT(ioat_chan->head, ioat_chan->tail,
+ ioat_ring_size(ioat_chan));
+}
+
+/* count of descriptors pending submission to hardware */
+static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan)
+{
+ return CIRC_CNT(ioat_chan->head, ioat_chan->issued,
+ ioat_ring_size(ioat_chan));
+}
+
+static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan)
+{
+ return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan);
+}
+
+static inline u16
+ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len)
+{
+ u16 num_descs = len >> ioat_chan->xfercap_log;
+
+ num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1));
+ return num_descs;
+}
+
+static inline struct ioat_ring_ent *
+ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx)
+{
+ return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)];
+}
+
+static inline void
+ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
+{
+ writel(addr & 0x00000000FFFFFFFF,
+ ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
+ writel(addr >> 32,
+ ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
+}
+
+/* IOAT Prep functions */
+struct dma_async_tx_descriptor *
+ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
+ dma_addr_t dma_src, size_t len, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+ unsigned int src_cnt, size_t len, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
+ unsigned int src_cnt, size_t len,
+ enum sum_check_flags *result, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ enum sum_check_flags *pqres, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
+ unsigned int src_cnt, size_t len, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
+ unsigned int src_cnt, size_t len,
+ enum sum_check_flags *result, unsigned long flags);
+
+/* IOAT Operation functions */
+irqreturn_t ioat_dma_do_interrupt(int irq, void *data);
+irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data);
+struct ioat_ring_ent **
+ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags);
+void ioat_start_null_desc(struct ioatdma_chan *ioat_chan);
+void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan);
+int ioat_reset_hw(struct ioatdma_chan *ioat_chan);
+enum dma_status
+ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate);
+void ioat_cleanup_event(unsigned long data);
+void ioat_timer_event(unsigned long data);
+int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
+void ioat_issue_pending(struct dma_chan *chan);
+void ioat_timer_event(unsigned long data);
+
+/* IOAT Init functions */
+bool is_bwd_ioat(struct pci_dev *pdev);
struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan);
-void ioat_init_channel(struct ioatdma_device *device,
- struct ioat_chan_common *chan, int idx);
-enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
- struct dma_tx_state *txstate);
-bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
- dma_addr_t *phys_complete);
-void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
-void ioat_kobject_del(struct ioatdma_device *device);
-int ioat_dma_setup_interrupts(struct ioatdma_device *device);
-void ioat_stop(struct ioat_chan_common *chan);
-extern const struct sysfs_ops ioat_sysfs_ops;
-extern struct ioat_sysfs_entry ioat_version_attr;
-extern struct ioat_sysfs_entry ioat_cap_attr;
+void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
+void ioat_kobject_del(struct ioatdma_device *ioat_dma);
+int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
+void ioat_stop(struct ioatdma_chan *ioat_chan);
#endif /* IOATDMA_H */
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
deleted file mode 100644
index 69c7dfcad023..000000000000
--- a/drivers/dma/ioat/dma_v2.c
+++ /dev/null
@@ -1,916 +0,0 @@
-/*
- * Intel I/OAT DMA Linux driver
- * Copyright(c) 2004 - 2009 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- */
-
-/*
- * This driver supports an Intel I/OAT DMA engine (versions >= 2), which
- * does asynchronous data movement and checksumming operations.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/workqueue.h>
-#include <linux/prefetch.h>
-#include <linux/i7300_idle.h>
-#include "dma.h"
-#include "dma_v2.h"
-#include "registers.h"
-#include "hw.h"
-
-#include "../dmaengine.h"
-
-int ioat_ring_alloc_order = 8;
-module_param(ioat_ring_alloc_order, int, 0644);
-MODULE_PARM_DESC(ioat_ring_alloc_order,
- "ioat2+: allocate 2^n descriptors per channel"
- " (default: 8 max: 16)");
-static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
-module_param(ioat_ring_max_alloc_order, int, 0644);
-MODULE_PARM_DESC(ioat_ring_max_alloc_order,
- "ioat2+: upper limit for ring size (default: 16)");
-
-void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
-{
- struct ioat_chan_common *chan = &ioat->base;
-
- ioat->dmacount += ioat2_ring_pending(ioat);
- ioat->issued = ioat->head;
- writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
- dev_dbg(to_dev(chan),
- "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
- __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
-}
-
-void ioat2_issue_pending(struct dma_chan *c)
-{
- struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-
- if (ioat2_ring_pending(ioat)) {
- spin_lock_bh(&ioat->prep_lock);
- __ioat2_issue_pending(ioat);
- spin_unlock_bh(&ioat->prep_lock);
- }
-}
-
-/**
- * ioat2_update_pending - log pending descriptors
- * @ioat: ioat2+ channel
- *
- * Check if the number of unsubmitted descriptors has exceeded the
- * watermark. Called with prep_lock held
- */
-static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
-{
- if (ioat2_ring_pending(ioat) > ioat_pending_level)
- __ioat2_issue_pending(ioat);
-}
-
-static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
-{
- struct ioat_ring_ent *desc;
- struct ioat_dma_descriptor *hw;
-
- if (ioat2_ring_space(ioat) < 1) {
- dev_err(to_dev(&ioat->base),
- "Unable to start null desc - ring full\n");
- return;
- }
-
- dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n",
- __func__, ioat->head, ioat->tail, ioat->issued);
- desc = ioat2_get_ring_ent(ioat, ioat->head);
-
- hw = desc->hw;
- hw->ctl = 0;
- hw->ctl_f.null = 1;
- hw->ctl_f.int_en = 1;
- hw->ctl_f.compl_write = 1;
- /* set size to non-zero value (channel returns error when size is 0) */
- hw->size = NULL_DESC_BUFFER_SIZE;
- hw->src_addr = 0;
- hw->dst_addr = 0;
- async_tx_ack(&desc->txd);
- ioat2_set_chainaddr(ioat, desc->txd.phys);
- dump_desc_dbg(ioat, desc);
- wmb();
- ioat->head += 1;
- __ioat2_issue_pending(ioat);
-}
-
-static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
-{
- spin_lock_bh(&ioat->prep_lock);
- __ioat2_start_null_desc(ioat);
- spin_unlock_bh(&ioat->prep_lock);
-}
-
-static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
-{
- struct ioat_chan_common *chan = &ioat->base;
- struct dma_async_tx_descriptor *tx;
- struct ioat_ring_ent *desc;
- bool seen_current = false;
- u16 active;
- int idx = ioat->tail, i;
-
- dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
- __func__, ioat->head, ioat->tail, ioat->issued);
-
- active = ioat2_ring_active(ioat);
- for (i = 0; i < active && !seen_current; i++) {
- smp_read_barrier_depends();
- prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
- desc = ioat2_get_ring_ent(ioat, idx + i);
- tx = &desc->txd;
- dump_desc_dbg(ioat, desc);
- if (tx->cookie) {
- dma_descriptor_unmap(tx);
- dma_cookie_complete(tx);
- if (tx->callback) {
- tx->callback(tx->callback_param);
- tx->callback = NULL;
- }
- }
-
- if (tx->phys == phys_complete)
- seen_current = true;
- }
- smp_mb(); /* finish all descriptor reads before incrementing tail */
- ioat->tail = idx + i;
- BUG_ON(active && !seen_current); /* no active descs have written a completion? */
-
- chan->last_completion = phys_complete;
- if (active - i == 0) {
- dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
- __func__);
- clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
- mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
- }
-}
-
-/**
- * ioat2_cleanup - clean finished descriptors (advance tail pointer)
- * @chan: ioat channel to be cleaned up
- */
-static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
-{
- struct ioat_chan_common *chan = &ioat->base;
- dma_addr_t phys_complete;
-
- spin_lock_bh(&chan->cleanup_lock);
- if (ioat_cleanup_preamble(chan, &phys_complete))
- __cleanup(ioat, phys_complete);
- spin_unlock_bh(&chan->cleanup_lock);
-}
-
-void ioat2_cleanup_event(unsigned long data)
-{
- struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
- struct ioat_chan_common *chan = &ioat->base;
-
- ioat2_cleanup(ioat);
- if (!test_bit(IOAT_RUN, &chan->state))
- return;
- writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
-}
-
-void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
-{
- struct ioat_chan_common *chan = &ioat->base;
-
- /* set the tail to be re-issued */
- ioat->issued = ioat->tail;
- ioat->dmacount = 0;
- set_bit(IOAT_COMPLETION_PENDING, &chan->state);
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-
- dev_dbg(to_dev(chan),
- "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
- __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
-
- if (ioat2_ring_pending(ioat)) {
- struct ioat_ring_ent *desc;
-
- desc = ioat2_get_ring_ent(ioat, ioat->tail);
- ioat2_set_chainaddr(ioat, desc->txd.phys);
- __ioat2_issue_pending(ioat);
- } else
- __ioat2_start_null_desc(ioat);
-}
-
-int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
-{
- unsigned long end = jiffies + tmo;
- int err = 0;
- u32 status;
-
- status = ioat_chansts(chan);
- if (is_ioat_active(status) || is_ioat_idle(status))
- ioat_suspend(chan);
- while (is_ioat_active(status) || is_ioat_idle(status)) {
- if (tmo && time_after(jiffies, end)) {
- err = -ETIMEDOUT;
- break;
- }
- status = ioat_chansts(chan);
- cpu_relax();
- }
-
- return err;
-}
-
-int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
-{
- unsigned long end = jiffies + tmo;
- int err = 0;
-
- ioat_reset(chan);
- while (ioat_reset_pending(chan)) {
- if (end && time_after(jiffies, end)) {
- err = -ETIMEDOUT;
- break;
- }
- cpu_relax();
- }
-
- return err;
-}
-
-static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
-{
- struct ioat_chan_common *chan = &ioat->base;
- dma_addr_t phys_complete;
-
- ioat2_quiesce(chan, 0);
- if (ioat_cleanup_preamble(chan, &phys_complete))
- __cleanup(ioat, phys_complete);
-
- __ioat2_restart_chan(ioat);
-}
-
-static void check_active(struct ioat2_dma_chan *ioat)
-{
- struct ioat_chan_common *chan = &ioat->base;
-
- if (ioat2_ring_active(ioat)) {
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
- return;
- }
-
- if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
- mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
- else if (ioat->alloc_order > ioat_get_alloc_order()) {
- /* if the ring is idle, empty, and oversized try to step
- * down the size
- */
- reshape_ring(ioat, ioat->alloc_order - 1);
-
- /* keep shrinking until we get back to our minimum
- * default size
- */
- if (ioat->alloc_order > ioat_get_alloc_order())
- mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
- }
-
-}
-
-void ioat2_timer_event(unsigned long data)
-{
- struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
- struct ioat_chan_common *chan = &ioat->base;
- dma_addr_t phys_complete;
- u64 status;
-
- status = ioat_chansts(chan);
-
- /* when halted due to errors check for channel
- * programming errors before advancing the completion state
- */
- if (is_ioat_halted(status)) {
- u32 chanerr;
-
- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
- dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
- __func__, chanerr);
- if (test_bit(IOAT_RUN, &chan->state))
- BUG_ON(is_ioat_bug(chanerr));
- else /* we never got off the ground */
- return;
- }
-
- /* if we haven't made progress and we have already
- * acknowledged a pending completion once, then be more
- * forceful with a restart
- */
- spin_lock_bh(&chan->cleanup_lock);
- if (ioat_cleanup_preamble(chan, &phys_complete))
- __cleanup(ioat, phys_complete);
- else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
- spin_lock_bh(&ioat->prep_lock);
- ioat2_restart_channel(ioat);
- spin_unlock_bh(&ioat->prep_lock);
- spin_unlock_bh(&chan->cleanup_lock);
- return;
- } else {
- set_bit(IOAT_COMPLETION_ACK, &chan->state);
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
- }
-
-
- if (ioat2_ring_active(ioat))
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
- else {
- spin_lock_bh(&ioat->prep_lock);
- check_active(ioat);
- spin_unlock_bh(&ioat->prep_lock);
- }
- spin_unlock_bh(&chan->cleanup_lock);
-}
-
-static int ioat2_reset_hw(struct ioat_chan_common *chan)
-{
- /* throw away whatever the channel was doing and get it initialized */
- u32 chanerr;
-
- ioat2_quiesce(chan, msecs_to_jiffies(100));
-
- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
- writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
-
- return ioat2_reset_sync(chan, msecs_to_jiffies(200));
-}
-
-/**
- * ioat2_enumerate_channels - find and initialize the device's channels
- * @device: the device to be enumerated
- */
-int ioat2_enumerate_channels(struct ioatdma_device *device)
-{
- struct ioat2_dma_chan *ioat;
- struct device *dev = &device->pdev->dev;
- struct dma_device *dma = &device->common;
- u8 xfercap_log;
- int i;
-
- INIT_LIST_HEAD(&dma->channels);
- dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
- dma->chancnt &= 0x1f; /* bits [4:0] valid */
- if (dma->chancnt > ARRAY_SIZE(device->idx)) {
- dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
- dma->chancnt, ARRAY_SIZE(device->idx));
- dma->chancnt = ARRAY_SIZE(device->idx);
- }
- xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
- xfercap_log &= 0x1f; /* bits [4:0] valid */
- if (xfercap_log == 0)
- return 0;
- dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
-
- /* FIXME which i/oat version is i7300? */
-#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
- if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
- dma->chancnt--;
-#endif
- for (i = 0; i < dma->chancnt; i++) {
- ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
- if (!ioat)
- break;
-
- ioat_init_channel(device, &ioat->base, i);
- ioat->xfercap_log = xfercap_log;
- spin_lock_init(&ioat->prep_lock);
- if (device->reset_hw(&ioat->base)) {
- i = 0;
- break;
- }
- }
- dma->chancnt = i;
- return i;
-}
-
-static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
-{
- struct dma_chan *c = tx->chan;
- struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- struct ioat_chan_common *chan = &ioat->base;
- dma_cookie_t cookie;
-
- cookie = dma_cookie_assign(tx);
- dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
-
- if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &chan->state))
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-
- /* make descriptor updates visible before advancing ioat->head,
- * this is purposefully not smp_wmb() since we are also
- * publishing the descriptor updates to a dma device
- */
- wmb();
-
- ioat->head += ioat->produce;
-
- ioat2_update_pending(ioat);
- spin_unlock_bh(&ioat->prep_lock);
-
- return cookie;
-}
-
-static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
-{
- struct ioat_dma_descriptor *hw;
- struct ioat_ring_ent *desc;
- struct ioatdma_device *dma;
- dma_addr_t phys;
-
- dma = to_ioatdma_device(chan->device);
- hw = pci_pool_alloc(dma->dma_pool, flags, &phys);
- if (!hw)
- return NULL;
- memset(hw, 0, sizeof(*hw));
-
- desc = kmem_cache_zalloc(ioat2_cache, flags);
- if (!desc) {
- pci_pool_free(dma->dma_pool, hw, phys);
- return NULL;
- }
-
- dma_async_tx_descriptor_init(&desc->txd, chan);
- desc->txd.tx_submit = ioat2_tx_submit_unlock;
- desc->hw = hw;
- desc->txd.phys = phys;
- return desc;
-}
-
-static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
-{
- struct ioatdma_device *dma;
-
- dma = to_ioatdma_device(chan->device);
- pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys);
- kmem_cache_free(ioat2_cache, desc);
-}
-
-static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
-{
- struct ioat_ring_ent **ring;
- int descs = 1 << order;
- int i;
-
- if (order > ioat_get_max_alloc_order())
- return NULL;
-
- /* allocate the array to hold the software ring */
- ring = kcalloc(descs, sizeof(*ring), flags);
- if (!ring)
- return NULL;
- for (i = 0; i < descs; i++) {
- ring[i] = ioat2_alloc_ring_ent(c, flags);
- if (!ring[i]) {
- while (i--)
- ioat2_free_ring_ent(ring[i], c);
- kfree(ring);
- return NULL;
- }
- set_desc_id(ring[i], i);
- }
-
- /* link descs */
- for (i = 0; i < descs-1; i++) {
- struct ioat_ring_ent *next = ring[i+1];
- struct ioat_dma_descriptor *hw = ring[i]->hw;
-
- hw->next = next->txd.phys;
- }
- ring[i]->hw->next = ring[0]->txd.phys;
-
- return ring;
-}
-
-void ioat2_free_chan_resources(struct dma_chan *c);
-
-/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
- * @chan: channel to be initialized
- */
-int ioat2_alloc_chan_resources(struct dma_chan *c)
-{
- struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- struct ioat_chan_common *chan = &ioat->base;
- struct ioat_ring_ent **ring;
- u64 status;
- int order;
- int i = 0;
-
- /* have we already been set up? */
- if (ioat->ring)
- return 1 << ioat->alloc_order;
-
- /* Setup register to interrupt and write completion status on error */
- writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
-
- /* allocate a completion writeback area */
- /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
- chan->completion = pci_pool_alloc(chan->device->completion_pool,
- GFP_KERNEL, &chan->completion_dma);
- if (!chan->completion)
- return -ENOMEM;
-
- memset(chan->completion, 0, sizeof(*chan->completion));
- writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
- chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
- writel(((u64) chan->completion_dma) >> 32,
- chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
-
- order = ioat_get_alloc_order();
- ring = ioat2_alloc_ring(c, order, GFP_KERNEL);
- if (!ring)
- return -ENOMEM;
-
- spin_lock_bh(&chan->cleanup_lock);
- spin_lock_bh(&ioat->prep_lock);
- ioat->ring = ring;
- ioat->head = 0;
- ioat->issued = 0;
- ioat->tail = 0;
- ioat->alloc_order = order;
- set_bit(IOAT_RUN, &chan->state);
- spin_unlock_bh(&ioat->prep_lock);
- spin_unlock_bh(&chan->cleanup_lock);
-
- ioat2_start_null_desc(ioat);
-
- /* check that we got off the ground */
- do {
- udelay(1);
- status = ioat_chansts(chan);
- } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
-
- if (is_ioat_active(status) || is_ioat_idle(status)) {
- return 1 << ioat->alloc_order;
- } else {
- u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-
- dev_WARN(to_dev(chan),
- "failed to start channel chanerr: %#x\n", chanerr);
- ioat2_free_chan_resources(c);
- return -EFAULT;
- }
-}
-
-bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
-{
- /* reshape differs from normal ring allocation in that we want
- * to allocate a new software ring while only
- * extending/truncating the hardware ring
- */
- struct ioat_chan_common *chan = &ioat->base;
- struct dma_chan *c = &chan->common;
- const u32 curr_size = ioat2_ring_size(ioat);
- const u16 active = ioat2_ring_active(ioat);
- const u32 new_size = 1 << order;
- struct ioat_ring_ent **ring;
- u16 i;
-
- if (order > ioat_get_max_alloc_order())
- return false;
-
- /* double check that we have at least 1 free descriptor */
- if (active == curr_size)
- return false;
-
- /* when shrinking, verify that we can hold the current active
- * set in the new ring
- */
- if (active >= new_size)
- return false;
-
- /* allocate the array to hold the software ring */
- ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
- if (!ring)
- return false;
-
- /* allocate/trim descriptors as needed */
- if (new_size > curr_size) {
- /* copy current descriptors to the new ring */
- for (i = 0; i < curr_size; i++) {
- u16 curr_idx = (ioat->tail+i) & (curr_size-1);
- u16 new_idx = (ioat->tail+i) & (new_size-1);
-
- ring[new_idx] = ioat->ring[curr_idx];
- set_desc_id(ring[new_idx], new_idx);
- }
-
- /* add new descriptors to the ring */
- for (i = curr_size; i < new_size; i++) {
- u16 new_idx = (ioat->tail+i) & (new_size-1);
-
- ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT);
- if (!ring[new_idx]) {
- while (i--) {
- u16 new_idx = (ioat->tail+i) & (new_size-1);
-
- ioat2_free_ring_ent(ring[new_idx], c);
- }
- kfree(ring);
- return false;
- }
- set_desc_id(ring[new_idx], new_idx);
- }
-
- /* hw link new descriptors */
- for (i = curr_size-1; i < new_size; i++) {
- u16 new_idx = (ioat->tail+i) & (new_size-1);
- struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)];
- struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
-
- hw->next = next->txd.phys;
- }
- } else {
- struct ioat_dma_descriptor *hw;
- struct ioat_ring_ent *next;
-
- /* copy current descriptors to the new ring, dropping the
- * removed descriptors
- */
- for (i = 0; i < new_size; i++) {
- u16 curr_idx = (ioat->tail+i) & (curr_size-1);
- u16 new_idx = (ioat->tail+i) & (new_size-1);
-
- ring[new_idx] = ioat->ring[curr_idx];
- set_desc_id(ring[new_idx], new_idx);
- }
-
- /* free deleted descriptors */
- for (i = new_size; i < curr_size; i++) {
- struct ioat_ring_ent *ent;
-
- ent = ioat2_get_ring_ent(ioat, ioat->tail+i);
- ioat2_free_ring_ent(ent, c);
- }
-
- /* fix up hardware ring */
- hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw;
- next = ring[(ioat->tail+new_size) & (new_size-1)];
- hw->next = next->txd.phys;
- }
-
- dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
- __func__, new_size);
-
- kfree(ioat->ring);
- ioat->ring = ring;
- ioat->alloc_order = order;
-
- return true;
-}
-
-/**
- * ioat2_check_space_lock - verify space and grab ring producer lock
- * @ioat: ioat2,3 channel (ring) to operate on
- * @num_descs: allocation length
- */
-int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs)
-{
- struct ioat_chan_common *chan = &ioat->base;
- bool retry;
-
- retry:
- spin_lock_bh(&ioat->prep_lock);
- /* never allow the last descriptor to be consumed, we need at
- * least one free at all times to allow for on-the-fly ring
- * resizing.
- */
- if (likely(ioat2_ring_space(ioat) > num_descs)) {
- dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
- __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
- ioat->produce = num_descs;
- return 0; /* with ioat->prep_lock held */
- }
- retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state);
- spin_unlock_bh(&ioat->prep_lock);
-
- /* is another cpu already trying to expand the ring? */
- if (retry)
- goto retry;
-
- spin_lock_bh(&chan->cleanup_lock);
- spin_lock_bh(&ioat->prep_lock);
- retry = reshape_ring(ioat, ioat->alloc_order + 1);
- clear_bit(IOAT_RESHAPE_PENDING, &chan->state);
- spin_unlock_bh(&ioat->prep_lock);
- spin_unlock_bh(&chan->cleanup_lock);
-
- /* if we were able to expand the ring retry the allocation */
- if (retry)
- goto retry;
-
- if (printk_ratelimit())
- dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n",
- __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
-
- /* progress reclaim in the allocation failure case we may be
- * called under bh_disabled so we need to trigger the timer
- * event directly
- */
- if (time_is_before_jiffies(chan->timer.expires)
- && timer_pending(&chan->timer)) {
- struct ioatdma_device *device = chan->device;
-
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
- device->timer_fn((unsigned long) &chan->common);
- }
-
- return -ENOMEM;
-}
-
-struct dma_async_tx_descriptor *
-ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
- dma_addr_t dma_src, size_t len, unsigned long flags)
-{
- struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- struct ioat_dma_descriptor *hw;
- struct ioat_ring_ent *desc;
- dma_addr_t dst = dma_dest;
- dma_addr_t src = dma_src;
- size_t total_len = len;
- int num_descs, idx, i;
-
- num_descs = ioat2_xferlen_to_descs(ioat, len);
- if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
- idx = ioat->head;
- else
- return NULL;
- i = 0;
- do {
- size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log);
-
- desc = ioat2_get_ring_ent(ioat, idx + i);
- hw = desc->hw;
-
- hw->size = copy;
- hw->ctl = 0;
- hw->src_addr = src;
- hw->dst_addr = dst;
-
- len -= copy;
- dst += copy;
- src += copy;
- dump_desc_dbg(ioat, desc);
- } while (++i < num_descs);
-
- desc->txd.flags = flags;
- desc->len = total_len;
- hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
- hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
- hw->ctl_f.compl_write = 1;
- dump_desc_dbg(ioat, desc);
- /* we leave the channel locked to ensure in order submission */
-
- return &desc->txd;
-}
-
-/**
- * ioat2_free_chan_resources - release all the descriptors
- * @chan: the channel to be cleaned
- */
-void ioat2_free_chan_resources(struct dma_chan *c)
-{
- struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- struct ioat_chan_common *chan = &ioat->base;
- struct ioatdma_device *device = chan->device;
- struct ioat_ring_ent *desc;
- const u16 total_descs = 1 << ioat->alloc_order;
- int descs;
- int i;
-
- /* Before freeing channel resources first check
- * if they have been previously allocated for this channel.
- */
- if (!ioat->ring)
- return;
-
- ioat_stop(chan);
- device->reset_hw(chan);
-
- spin_lock_bh(&chan->cleanup_lock);
- spin_lock_bh(&ioat->prep_lock);
- descs = ioat2_ring_space(ioat);
- dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs);
- for (i = 0; i < descs; i++) {
- desc = ioat2_get_ring_ent(ioat, ioat->head + i);
- ioat2_free_ring_ent(desc, c);
- }
-
- if (descs < total_descs)
- dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
- total_descs - descs);
-
- for (i = 0; i < total_descs - descs; i++) {
- desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
- dump_desc_dbg(ioat, desc);
- ioat2_free_ring_ent(desc, c);
- }
-
- kfree(ioat->ring);
- ioat->ring = NULL;
- ioat->alloc_order = 0;
- pci_pool_free(device->completion_pool, chan->completion,
- chan->completion_dma);
- spin_unlock_bh(&ioat->prep_lock);
- spin_unlock_bh(&chan->cleanup_lock);
-
- chan->last_completion = 0;
- chan->completion_dma = 0;
- ioat->dmacount = 0;
-}
-
-static ssize_t ring_size_show(struct dma_chan *c, char *page)
-{
- struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-
- return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1);
-}
-static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
-
-static ssize_t ring_active_show(struct dma_chan *c, char *page)
-{
- struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-
- /* ...taken outside the lock, no need to be precise */
- return sprintf(page, "%d\n", ioat2_ring_active(ioat));
-}
-static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
-
-static struct attribute *ioat2_attrs[] = {
- &ring_size_attr.attr,
- &ring_active_attr.attr,
- &ioat_cap_attr.attr,
- &ioat_version_attr.attr,
- NULL,
-};
-
-struct kobj_type ioat2_ktype = {
- .sysfs_ops = &ioat_sysfs_ops,
- .default_attrs = ioat2_attrs,
-};
-
-int ioat2_dma_probe(struct ioatdma_device *device, int dca)
-{
- struct pci_dev *pdev = device->pdev;
- struct dma_device *dma;
- struct dma_chan *c;
- struct ioat_chan_common *chan;
- int err;
-
- device->enumerate_channels = ioat2_enumerate_channels;
- device->reset_hw = ioat2_reset_hw;
- device->cleanup_fn = ioat2_cleanup_event;
- device->timer_fn = ioat2_timer_event;
- device->self_test = ioat_dma_self_test;
- dma = &device->common;
- dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
- dma->device_issue_pending = ioat2_issue_pending;
- dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
- dma->device_free_chan_resources = ioat2_free_chan_resources;
- dma->device_tx_status = ioat_dma_tx_status;
-
- err = ioat_probe(device);
- if (err)
- return err;
-
- list_for_each_entry(c, &dma->channels, device_node) {
- chan = to_chan_common(c);
- writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU,
- chan->reg_base + IOAT_DCACTRL_OFFSET);
- }
-
- err = ioat_register(device);
- if (err)
- return err;
-
- ioat_kobject_add(device, &ioat2_ktype);
-
- if (dca)
- device->dca = ioat2_dca_init(pdev, device->reg_base);
-
- return err;
-}
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
deleted file mode 100644
index bf24ebe874b0..000000000000
--- a/drivers/dma/ioat/dma_v2.h
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
- */
-#ifndef IOATDMA_V2_H
-#define IOATDMA_V2_H
-
-#include <linux/dmaengine.h>
-#include <linux/circ_buf.h>
-#include "dma.h"
-#include "hw.h"
-
-
-extern int ioat_pending_level;
-extern int ioat_ring_alloc_order;
-
-/*
- * workaround for IOAT ver.3.0 null descriptor issue
- * (channel returns error when size is 0)
- */
-#define NULL_DESC_BUFFER_SIZE 1
-
-#define IOAT_MAX_ORDER 16
-#define ioat_get_alloc_order() \
- (min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
-#define ioat_get_max_alloc_order() \
- (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
-
-/* struct ioat2_dma_chan - ioat v2 / v3 channel attributes
- * @base: common ioat channel parameters
- * @xfercap_log; log2 of channel max transfer length (for fast division)
- * @head: allocated index
- * @issued: hardware notification point
- * @tail: cleanup index
- * @dmacount: identical to 'head' except for occasionally resetting to zero
- * @alloc_order: log2 of the number of allocated descriptors
- * @produce: number of descriptors to produce at submit time
- * @ring: software ring buffer implementation of hardware ring
- * @prep_lock: serializes descriptor preparation (producers)
- */
-struct ioat2_dma_chan {
- struct ioat_chan_common base;
- size_t xfercap_log;
- u16 head;
- u16 issued;
- u16 tail;
- u16 dmacount;
- u16 alloc_order;
- u16 produce;
- struct ioat_ring_ent **ring;
- spinlock_t prep_lock;
-};
-
-static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
-{
- struct ioat_chan_common *chan = to_chan_common(c);
-
- return container_of(chan, struct ioat2_dma_chan, base);
-}
-
-static inline u32 ioat2_ring_size(struct ioat2_dma_chan *ioat)
-{
- return 1 << ioat->alloc_order;
-}
-
-/* count of descriptors in flight with the engine */
-static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat)
-{
- return CIRC_CNT(ioat->head, ioat->tail, ioat2_ring_size(ioat));
-}
-
-/* count of descriptors pending submission to hardware */
-static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
-{
- return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat));
-}
-
-static inline u32 ioat2_ring_space(struct ioat2_dma_chan *ioat)
-{
- return ioat2_ring_size(ioat) - ioat2_ring_active(ioat);
-}
-
-static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len)
-{
- u16 num_descs = len >> ioat->xfercap_log;
-
- num_descs += !!(len & ((1 << ioat->xfercap_log) - 1));
- return num_descs;
-}
-
-/**
- * struct ioat_ring_ent - wrapper around hardware descriptor
- * @hw: hardware DMA descriptor (for memcpy)
- * @fill: hardware fill descriptor
- * @xor: hardware xor descriptor
- * @xor_ex: hardware xor extension descriptor
- * @pq: hardware pq descriptor
- * @pq_ex: hardware pq extension descriptor
- * @pqu: hardware pq update descriptor
- * @raw: hardware raw (un-typed) descriptor
- * @txd: the generic software descriptor for all engines
- * @len: total transaction length for unmap
- * @result: asynchronous result of validate operations
- * @id: identifier for debug
- */
-
-struct ioat_ring_ent {
- union {
- struct ioat_dma_descriptor *hw;
- struct ioat_xor_descriptor *xor;
- struct ioat_xor_ext_descriptor *xor_ex;
- struct ioat_pq_descriptor *pq;
- struct ioat_pq_ext_descriptor *pq_ex;
- struct ioat_pq_update_descriptor *pqu;
- struct ioat_raw_descriptor *raw;
- };
- size_t len;
- struct dma_async_tx_descriptor txd;
- enum sum_check_flags *result;
- #ifdef DEBUG
- int id;
- #endif
- struct ioat_sed_ent *sed;
-};
-
-static inline struct ioat_ring_ent *
-ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx)
-{
- return ioat->ring[idx & (ioat2_ring_size(ioat) - 1)];
-}
-
-static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
-{
- struct ioat_chan_common *chan = &ioat->base;
-
- writel(addr & 0x00000000FFFFFFFF,
- chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
- writel(addr >> 32,
- chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
-}
-
-int ioat2_dma_probe(struct ioatdma_device *dev, int dca);
-int ioat3_dma_probe(struct ioatdma_device *dev, int dca);
-struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs);
-int ioat2_enumerate_channels(struct ioatdma_device *device);
-struct dma_async_tx_descriptor *
-ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
- dma_addr_t dma_src, size_t len, unsigned long flags);
-void ioat2_issue_pending(struct dma_chan *chan);
-int ioat2_alloc_chan_resources(struct dma_chan *c);
-void ioat2_free_chan_resources(struct dma_chan *c);
-void __ioat2_restart_chan(struct ioat2_dma_chan *ioat);
-bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
-void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
-void ioat2_cleanup_event(unsigned long data);
-void ioat2_timer_event(unsigned long data);
-int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
-int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
-extern struct kobj_type ioat2_ktype;
-extern struct kmem_cache *ioat2_cache;
-#endif /* IOATDMA_V2_H */
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
deleted file mode 100644
index 64790a45ef5d..000000000000
--- a/drivers/dma/ioat/dma_v3.c
+++ /dev/null
@@ -1,1717 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * BSD LICENSE
- *
- * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Support routines for v3+ hardware
- */
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/gfp.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/prefetch.h>
-#include "../dmaengine.h"
-#include "registers.h"
-#include "hw.h"
-#include "dma.h"
-#include "dma_v2.h"
-
-extern struct kmem_cache *ioat3_sed_cache;
-
-/* ioat hardware assumes at least two sources for raid operations */
-#define src_cnt_to_sw(x) ((x) + 2)
-#define src_cnt_to_hw(x) ((x) - 2)
-#define ndest_to_sw(x) ((x) + 1)
-#define ndest_to_hw(x) ((x) - 1)
-#define src16_cnt_to_sw(x) ((x) + 9)
-#define src16_cnt_to_hw(x) ((x) - 9)
-
-/* provide a lookup table for setting the source address in the base or
- * extended descriptor of an xor or pq descriptor
- */
-static const u8 xor_idx_to_desc = 0xe0;
-static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
-static const u8 pq_idx_to_desc = 0xf8;
-static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
- 2, 2, 2, 2, 2, 2, 2 };
-static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
-static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
- 0, 1, 2, 3, 4, 5, 6 };
-
-static void ioat3_eh(struct ioat2_dma_chan *ioat);
-
-static void xor_set_src(struct ioat_raw_descriptor *descs[2],
- dma_addr_t addr, u32 offset, int idx)
-{
- struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
-
- raw->field[xor_idx_to_field[idx]] = addr + offset;
-}
-
-static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
-{
- struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
-
- return raw->field[pq_idx_to_field[idx]];
-}
-
-static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
-{
- struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
-
- return raw->field[pq16_idx_to_field[idx]];
-}
-
-static void pq_set_src(struct ioat_raw_descriptor *descs[2],
- dma_addr_t addr, u32 offset, u8 coef, int idx)
-{
- struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
- struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
-
- raw->field[pq_idx_to_field[idx]] = addr + offset;
- pq->coef[idx] = coef;
-}
-
-static bool is_jf_ioat(struct pci_dev *pdev)
-{
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
- return true;
- default:
- return false;
- }
-}
-
-static bool is_snb_ioat(struct pci_dev *pdev)
-{
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
- return true;
- default:
- return false;
- }
-}
-
-static bool is_ivb_ioat(struct pci_dev *pdev)
-{
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
- return true;
- default:
- return false;
- }
-
-}
-
-static bool is_hsw_ioat(struct pci_dev *pdev)
-{
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
- case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
- case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
- case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
- case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
- case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
- case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
- case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
- case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
- case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
- return true;
- default:
- return false;
- }
-
-}
-
-static bool is_xeon_cb32(struct pci_dev *pdev)
-{
- return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
- is_hsw_ioat(pdev);
-}
-
-static bool is_bwd_ioat(struct pci_dev *pdev)
-{
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
- case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
- case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
- case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
- /* even though not Atom, BDX-DE has same DMA silicon */
- case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
- case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
- case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
- case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
- return true;
- default:
- return false;
- }
-}
-
-static bool is_bwd_noraid(struct pci_dev *pdev)
-{
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
- case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
- case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
- case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
- case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
- case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
- return true;
- default:
- return false;
- }
-
-}
-
-static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
- dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
-{
- struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
- struct ioat_pq16a_descriptor *pq16 =
- (struct ioat_pq16a_descriptor *)desc[1];
- struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
-
- raw->field[pq16_idx_to_field[idx]] = addr + offset;
-
- if (idx < 8)
- pq->coef[idx] = coef;
- else
- pq16->coef[idx - 8] = coef;
-}
-
-static struct ioat_sed_ent *
-ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
-{
- struct ioat_sed_ent *sed;
- gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
-
- sed = kmem_cache_alloc(ioat3_sed_cache, flags);
- if (!sed)
- return NULL;
-
- sed->hw_pool = hw_pool;
- sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool],
- flags, &sed->dma);
- if (!sed->hw) {
- kmem_cache_free(ioat3_sed_cache, sed);
- return NULL;
- }
-
- return sed;
-}
-
-static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed)
-{
- if (!sed)
- return;
-
- dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
- kmem_cache_free(ioat3_sed_cache, sed);
-}
-
-static bool desc_has_ext(struct ioat_ring_ent *desc)
-{
- struct ioat_dma_descriptor *hw = desc->hw;
-
- if (hw->ctl_f.op == IOAT_OP_XOR ||
- hw->ctl_f.op == IOAT_OP_XOR_VAL) {
- struct ioat_xor_descriptor *xor = desc->xor;
-
- if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
- return true;
- } else if (hw->ctl_f.op == IOAT_OP_PQ ||
- hw->ctl_f.op == IOAT_OP_PQ_VAL) {
- struct ioat_pq_descriptor *pq = desc->pq;
-
- if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
- return true;
- }
-
- return false;
-}
-
-static u64 ioat3_get_current_completion(struct ioat_chan_common *chan)
-{
- u64 phys_complete;
- u64 completion;
-
- completion = *chan->completion;
- phys_complete = ioat_chansts_to_addr(completion);
-
- dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
- (unsigned long long) phys_complete);
-
- return phys_complete;
-}
-
-static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan,
- u64 *phys_complete)
-{
- *phys_complete = ioat3_get_current_completion(chan);
- if (*phys_complete == chan->last_completion)
- return false;
-
- clear_bit(IOAT_COMPLETION_ACK, &chan->state);
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-
- return true;
-}
-
-static void
-desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc)
-{
- struct ioat_dma_descriptor *hw = desc->hw;
-
- switch (hw->ctl_f.op) {
- case IOAT_OP_PQ_VAL:
- case IOAT_OP_PQ_VAL_16S:
- {
- struct ioat_pq_descriptor *pq = desc->pq;
-
- /* check if there's error written */
- if (!pq->dwbes_f.wbes)
- return;
-
- /* need to set a chanerr var for checking to clear later */
-
- if (pq->dwbes_f.p_val_err)
- *desc->result |= SUM_CHECK_P_RESULT;
-
- if (pq->dwbes_f.q_val_err)
- *desc->result |= SUM_CHECK_Q_RESULT;
-
- return;
- }
- default:
- return;
- }
-}
-
-/**
- * __cleanup - reclaim used descriptors
- * @ioat: channel (ring) to clean
- *
- * The difference from the dma_v2.c __cleanup() is that this routine
- * handles extended descriptors and dma-unmapping raid operations.
- */
-static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
-{
- struct ioat_chan_common *chan = &ioat->base;
- struct ioatdma_device *device = chan->device;
- struct ioat_ring_ent *desc;
- bool seen_current = false;
- int idx = ioat->tail, i;
- u16 active;
-
- dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
- __func__, ioat->head, ioat->tail, ioat->issued);
-
- /*
- * At restart of the channel, the completion address and the
- * channel status will be 0 due to starting a new chain. Since
- * it's new chain and the first descriptor "fails", there is
- * nothing to clean up. We do not want to reap the entire submitted
- * chain due to this 0 address value and then BUG.
- */
- if (!phys_complete)
- return;
-
- active = ioat2_ring_active(ioat);
- for (i = 0; i < active && !seen_current; i++) {
- struct dma_async_tx_descriptor *tx;
-
- smp_read_barrier_depends();
- prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
- desc = ioat2_get_ring_ent(ioat, idx + i);
- dump_desc_dbg(ioat, desc);
-
- /* set err stat if we are using dwbes */
- if (device->cap & IOAT_CAP_DWBES)
- desc_get_errstat(ioat, desc);
-
- tx = &desc->txd;
- if (tx->cookie) {
- dma_cookie_complete(tx);
- dma_descriptor_unmap(tx);
- if (tx->callback) {
- tx->callback(tx->callback_param);
- tx->callback = NULL;
- }
- }
-
- if (tx->phys == phys_complete)
- seen_current = true;
-
- /* skip extended descriptors */
- if (desc_has_ext(desc)) {
- BUG_ON(i + 1 >= active);
- i++;
- }
-
- /* cleanup super extended descriptors */
- if (desc->sed) {
- ioat3_free_sed(device, desc->sed);
- desc->sed = NULL;
- }
- }
- smp_mb(); /* finish all descriptor reads before incrementing tail */
- ioat->tail = idx + i;
- BUG_ON(active && !seen_current); /* no active descs have written a completion? */
- chan->last_completion = phys_complete;
-
- if (active - i == 0) {
- dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
- __func__);
- clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
- mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
- }
- /* 5 microsecond delay per pending descriptor */
- writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
- chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
-}
-
-static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
-{
- struct ioat_chan_common *chan = &ioat->base;
- u64 phys_complete;
-
- spin_lock_bh(&chan->cleanup_lock);
-
- if (ioat3_cleanup_preamble(chan, &phys_complete))
- __cleanup(ioat, phys_complete);
-
- if (is_ioat_halted(*chan->completion)) {
- u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-
- if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
- mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
- ioat3_eh(ioat);
- }
- }
-
- spin_unlock_bh(&chan->cleanup_lock);
-}
-
-static void ioat3_cleanup_event(unsigned long data)
-{
- struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
- struct ioat_chan_common *chan = &ioat->base;
-
- ioat3_cleanup(ioat);
- if (!test_bit(IOAT_RUN, &chan->state))
- return;
- writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
-}
-
-static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
-{
- struct ioat_chan_common *chan = &ioat->base;
- u64 phys_complete;
-
- ioat2_quiesce(chan, 0);
- if (ioat3_cleanup_preamble(chan, &phys_complete))
- __cleanup(ioat, phys_complete);
-
- __ioat2_restart_chan(ioat);
-}
-
-static void ioat3_eh(struct ioat2_dma_chan *ioat)
-{
- struct ioat_chan_common *chan = &ioat->base;
- struct pci_dev *pdev = to_pdev(chan);
- struct ioat_dma_descriptor *hw;
- struct dma_async_tx_descriptor *tx;
- u64 phys_complete;
- struct ioat_ring_ent *desc;
- u32 err_handled = 0;
- u32 chanerr_int;
- u32 chanerr;
-
- /* cleanup so tail points to descriptor that caused the error */
- if (ioat3_cleanup_preamble(chan, &phys_complete))
- __cleanup(ioat, phys_complete);
-
- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
- pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
-
- dev_dbg(to_dev(chan), "%s: error = %x:%x\n",
- __func__, chanerr, chanerr_int);
-
- desc = ioat2_get_ring_ent(ioat, ioat->tail);
- hw = desc->hw;
- dump_desc_dbg(ioat, desc);
-
- switch (hw->ctl_f.op) {
- case IOAT_OP_XOR_VAL:
- if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
- *desc->result |= SUM_CHECK_P_RESULT;
- err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
- }
- break;
- case IOAT_OP_PQ_VAL:
- case IOAT_OP_PQ_VAL_16S:
- if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
- *desc->result |= SUM_CHECK_P_RESULT;
- err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
- }
- if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
- *desc->result |= SUM_CHECK_Q_RESULT;
- err_handled |= IOAT_CHANERR_XOR_Q_ERR;
- }
- break;
- }
-
- /* fault on unhandled error or spurious halt */
- if (chanerr ^ err_handled || chanerr == 0) {
- dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
- __func__, chanerr, err_handled);
- BUG();
- } else { /* cleanup the faulty descriptor */
- tx = &desc->txd;
- if (tx->cookie) {
- dma_cookie_complete(tx);
- dma_descriptor_unmap(tx);
- if (tx->callback) {
- tx->callback(tx->callback_param);
- tx->callback = NULL;
- }
- }
- }
-
- writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
- pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
-
- /* mark faulting descriptor as complete */
- *chan->completion = desc->txd.phys;
-
- spin_lock_bh(&ioat->prep_lock);
- ioat3_restart_channel(ioat);
- spin_unlock_bh(&ioat->prep_lock);
-}
-
-static void check_active(struct ioat2_dma_chan *ioat)
-{
- struct ioat_chan_common *chan = &ioat->base;
-
- if (ioat2_ring_active(ioat)) {
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
- return;
- }
-
- if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
- mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
- else if (ioat->alloc_order > ioat_get_alloc_order()) {
- /* if the ring is idle, empty, and oversized try to step
- * down the size
- */
- reshape_ring(ioat, ioat->alloc_order - 1);
-
- /* keep shrinking until we get back to our minimum
- * default size
- */
- if (ioat->alloc_order > ioat_get_alloc_order())
- mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
- }
-
-}
-
-static void ioat3_timer_event(unsigned long data)
-{
- struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
- struct ioat_chan_common *chan = &ioat->base;
- dma_addr_t phys_complete;
- u64 status;
-
- status = ioat_chansts(chan);
-
- /* when halted due to errors check for channel
- * programming errors before advancing the completion state
- */
- if (is_ioat_halted(status)) {
- u32 chanerr;
-
- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
- dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
- __func__, chanerr);
- if (test_bit(IOAT_RUN, &chan->state))
- BUG_ON(is_ioat_bug(chanerr));
- else /* we never got off the ground */
- return;
- }
-
- /* if we haven't made progress and we have already
- * acknowledged a pending completion once, then be more
- * forceful with a restart
- */
- spin_lock_bh(&chan->cleanup_lock);
- if (ioat_cleanup_preamble(chan, &phys_complete))
- __cleanup(ioat, phys_complete);
- else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
- spin_lock_bh(&ioat->prep_lock);
- ioat3_restart_channel(ioat);
- spin_unlock_bh(&ioat->prep_lock);
- spin_unlock_bh(&chan->cleanup_lock);
- return;
- } else {
- set_bit(IOAT_COMPLETION_ACK, &chan->state);
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
- }
-
-
- if (ioat2_ring_active(ioat))
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
- else {
- spin_lock_bh(&ioat->prep_lock);
- check_active(ioat);
- spin_unlock_bh(&ioat->prep_lock);
- }
- spin_unlock_bh(&chan->cleanup_lock);
-}
-
-static enum dma_status
-ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
- struct dma_tx_state *txstate)
-{
- struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- enum dma_status ret;
-
- ret = dma_cookie_status(c, cookie, txstate);
- if (ret == DMA_COMPLETE)
- return ret;
-
- ioat3_cleanup(ioat);
-
- return dma_cookie_status(c, cookie, txstate);
-}
-
-static struct dma_async_tx_descriptor *
-__ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
- dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
- size_t len, unsigned long flags)
-{
- struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- struct ioat_ring_ent *compl_desc;
- struct ioat_ring_ent *desc;
- struct ioat_ring_ent *ext;
- size_t total_len = len;
- struct ioat_xor_descriptor *xor;
- struct ioat_xor_ext_descriptor *xor_ex = NULL;
- struct ioat_dma_descriptor *hw;
- int num_descs, with_ext, idx, i;
- u32 offset = 0;
- u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
-
- BUG_ON(src_cnt < 2);
-
- num_descs = ioat2_xferlen_to_descs(ioat, len);
- /* we need 2x the number of descriptors to cover greater than 5
- * sources
- */
- if (src_cnt > 5) {
- with_ext = 1;
- num_descs *= 2;
- } else
- with_ext = 0;
-
- /* completion writes from the raid engine may pass completion
- * writes from the legacy engine, so we need one extra null
- * (legacy) descriptor to ensure all completion writes arrive in
- * order.
- */
- if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0)
- idx = ioat->head;
- else
- return NULL;
- i = 0;
- do {
- struct ioat_raw_descriptor *descs[2];
- size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
- int s;
-
- desc = ioat2_get_ring_ent(ioat, idx + i);
- xor = desc->xor;
-
- /* save a branch by unconditionally retrieving the
- * extended descriptor xor_set_src() knows to not write
- * to it in the single descriptor case
- */
- ext = ioat2_get_ring_ent(ioat, idx + i + 1);
- xor_ex = ext->xor_ex;
-
- descs[0] = (struct ioat_raw_descriptor *) xor;
- descs[1] = (struct ioat_raw_descriptor *) xor_ex;
- for (s = 0; s < src_cnt; s++)
- xor_set_src(descs, src[s], offset, s);
- xor->size = xfer_size;
- xor->dst_addr = dest + offset;
- xor->ctl = 0;
- xor->ctl_f.op = op;
- xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
-
- len -= xfer_size;
- offset += xfer_size;
- dump_desc_dbg(ioat, desc);
- } while ((i += 1 + with_ext) < num_descs);
-
- /* last xor descriptor carries the unmap parameters and fence bit */
- desc->txd.flags = flags;
- desc->len = total_len;
- if (result)
- desc->result = result;
- xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
-
- /* completion descriptor carries interrupt bit */
- compl_desc = ioat2_get_ring_ent(ioat, idx + i);
- compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
- hw = compl_desc->hw;
- hw->ctl = 0;
- hw->ctl_f.null = 1;
- hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
- hw->ctl_f.compl_write = 1;
- hw->size = NULL_DESC_BUFFER_SIZE;
- dump_desc_dbg(ioat, compl_desc);
-
- /* we leave the channel locked to ensure in order submission */
- return &compl_desc->txd;
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
- unsigned int src_cnt, size_t len, unsigned long flags)
-{
- return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
- unsigned int src_cnt, size_t len,
- enum sum_check_flags *result, unsigned long flags)
-{
- /* the cleanup routine only sets bits on validate failure, it
- * does not clear bits on validate success... so clear it here
- */
- *result = 0;
-
- return __ioat3_prep_xor_lock(chan, result, src[0], &src[1],
- src_cnt - 1, len, flags);
-}
-
-static void
-dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext)
-{
- struct device *dev = to_dev(&ioat->base);
- struct ioat_pq_descriptor *pq = desc->pq;
- struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
- struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
- int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
- int i;
-
- dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
- " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
- " src_cnt: %d)\n",
- desc_id(desc), (unsigned long long) desc->txd.phys,
- (unsigned long long) (pq_ex ? pq_ex->next : pq->next),
- desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
- pq->ctl_f.compl_write,
- pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
- pq->ctl_f.src_cnt);
- for (i = 0; i < src_cnt; i++)
- dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
- (unsigned long long) pq_get_src(descs, i), pq->coef[i]);
- dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
- dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
- dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
-}
-
-static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat,
- struct ioat_ring_ent *desc)
-{
- struct device *dev = to_dev(&ioat->base);
- struct ioat_pq_descriptor *pq = desc->pq;
- struct ioat_raw_descriptor *descs[] = { (void *)pq,
- (void *)pq,
- (void *)pq };
- int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
- int i;
-
- if (desc->sed) {
- descs[1] = (void *)desc->sed->hw;
- descs[2] = (void *)desc->sed->hw + 64;
- }
-
- dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
- " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
- " src_cnt: %d)\n",
- desc_id(desc), (unsigned long long) desc->txd.phys,
- (unsigned long long) pq->next,
- desc->txd.flags, pq->size, pq->ctl,
- pq->ctl_f.op, pq->ctl_f.int_en,
- pq->ctl_f.compl_write,
- pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
- pq->ctl_f.src_cnt);
- for (i = 0; i < src_cnt; i++) {
- dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
- (unsigned long long) pq16_get_src(descs, i),
- pq->coef[i]);
- }
- dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
- dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
-}
-
-static struct dma_async_tx_descriptor *
-__ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
- const dma_addr_t *dst, const dma_addr_t *src,
- unsigned int src_cnt, const unsigned char *scf,
- size_t len, unsigned long flags)
-{
- struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- struct ioat_chan_common *chan = &ioat->base;
- struct ioatdma_device *device = chan->device;
- struct ioat_ring_ent *compl_desc;
- struct ioat_ring_ent *desc;
- struct ioat_ring_ent *ext;
- size_t total_len = len;
- struct ioat_pq_descriptor *pq;
- struct ioat_pq_ext_descriptor *pq_ex = NULL;
- struct ioat_dma_descriptor *hw;
- u32 offset = 0;
- u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
- int i, s, idx, with_ext, num_descs;
- int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0;
-
- dev_dbg(to_dev(chan), "%s\n", __func__);
- /* the engine requires at least two sources (we provide
- * at least 1 implied source in the DMA_PREP_CONTINUE case)
- */
- BUG_ON(src_cnt + dmaf_continue(flags) < 2);
-
- num_descs = ioat2_xferlen_to_descs(ioat, len);
- /* we need 2x the number of descriptors to cover greater than 3
- * sources (we need 1 extra source in the q-only continuation
- * case and 3 extra sources in the p+q continuation case.
- */
- if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
- (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
- with_ext = 1;
- num_descs *= 2;
- } else
- with_ext = 0;
-
- /* completion writes from the raid engine may pass completion
- * writes from the legacy engine, so we need one extra null
- * (legacy) descriptor to ensure all completion writes arrive in
- * order.
- */
- if (likely(num_descs) &&
- ioat2_check_space_lock(ioat, num_descs + cb32) == 0)
- idx = ioat->head;
- else
- return NULL;
- i = 0;
- do {
- struct ioat_raw_descriptor *descs[2];
- size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
-
- desc = ioat2_get_ring_ent(ioat, idx + i);
- pq = desc->pq;
-
- /* save a branch by unconditionally retrieving the
- * extended descriptor pq_set_src() knows to not write
- * to it in the single descriptor case
- */
- ext = ioat2_get_ring_ent(ioat, idx + i + with_ext);
- pq_ex = ext->pq_ex;
-
- descs[0] = (struct ioat_raw_descriptor *) pq;
- descs[1] = (struct ioat_raw_descriptor *) pq_ex;
-
- for (s = 0; s < src_cnt; s++)
- pq_set_src(descs, src[s], offset, scf[s], s);
-
- /* see the comment for dma_maxpq in include/linux/dmaengine.h */
- if (dmaf_p_disabled_continue(flags))
- pq_set_src(descs, dst[1], offset, 1, s++);
- else if (dmaf_continue(flags)) {
- pq_set_src(descs, dst[0], offset, 0, s++);
- pq_set_src(descs, dst[1], offset, 1, s++);
- pq_set_src(descs, dst[1], offset, 0, s++);
- }
- pq->size = xfer_size;
- pq->p_addr = dst[0] + offset;
- pq->q_addr = dst[1] + offset;
- pq->ctl = 0;
- pq->ctl_f.op = op;
- /* we turn on descriptor write back error status */
- if (device->cap & IOAT_CAP_DWBES)
- pq->ctl_f.wb_en = result ? 1 : 0;
- pq->ctl_f.src_cnt = src_cnt_to_hw(s);
- pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
- pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
-
- len -= xfer_size;
- offset += xfer_size;
- } while ((i += 1 + with_ext) < num_descs);
-
- /* last pq descriptor carries the unmap parameters and fence bit */
- desc->txd.flags = flags;
- desc->len = total_len;
- if (result)
- desc->result = result;
- pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
- dump_pq_desc_dbg(ioat, desc, ext);
-
- if (!cb32) {
- pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
- pq->ctl_f.compl_write = 1;
- compl_desc = desc;
- } else {
- /* completion descriptor carries interrupt bit */
- compl_desc = ioat2_get_ring_ent(ioat, idx + i);
- compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
- hw = compl_desc->hw;
- hw->ctl = 0;
- hw->ctl_f.null = 1;
- hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
- hw->ctl_f.compl_write = 1;
- hw->size = NULL_DESC_BUFFER_SIZE;
- dump_desc_dbg(ioat, compl_desc);
- }
-
-
- /* we leave the channel locked to ensure in order submission */
- return &compl_desc->txd;
-}
-
-static struct dma_async_tx_descriptor *
-__ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
- const dma_addr_t *dst, const dma_addr_t *src,
- unsigned int src_cnt, const unsigned char *scf,
- size_t len, unsigned long flags)
-{
- struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- struct ioat_chan_common *chan = &ioat->base;
- struct ioatdma_device *device = chan->device;
- struct ioat_ring_ent *desc;
- size_t total_len = len;
- struct ioat_pq_descriptor *pq;
- u32 offset = 0;
- u8 op;
- int i, s, idx, num_descs;
-
- /* this function is only called with 9-16 sources */
- op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
-
- dev_dbg(to_dev(chan), "%s\n", __func__);
-
- num_descs = ioat2_xferlen_to_descs(ioat, len);
-
- /*
- * 16 source pq is only available on cb3.3 and has no completion
- * write hw bug.
- */
- if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0)
- idx = ioat->head;
- else
- return NULL;
-
- i = 0;
-
- do {
- struct ioat_raw_descriptor *descs[4];
- size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
-
- desc = ioat2_get_ring_ent(ioat, idx + i);
- pq = desc->pq;
-
- descs[0] = (struct ioat_raw_descriptor *) pq;
-
- desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3);
- if (!desc->sed) {
- dev_err(to_dev(chan),
- "%s: no free sed entries\n", __func__);
- return NULL;
- }
-
- pq->sed_addr = desc->sed->dma;
- desc->sed->parent = desc;
-
- descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
- descs[2] = (void *)descs[1] + 64;
-
- for (s = 0; s < src_cnt; s++)
- pq16_set_src(descs, src[s], offset, scf[s], s);
-
- /* see the comment for dma_maxpq in include/linux/dmaengine.h */
- if (dmaf_p_disabled_continue(flags))
- pq16_set_src(descs, dst[1], offset, 1, s++);
- else if (dmaf_continue(flags)) {
- pq16_set_src(descs, dst[0], offset, 0, s++);
- pq16_set_src(descs, dst[1], offset, 1, s++);
- pq16_set_src(descs, dst[1], offset, 0, s++);
- }
-
- pq->size = xfer_size;
- pq->p_addr = dst[0] + offset;
- pq->q_addr = dst[1] + offset;
- pq->ctl = 0;
- pq->ctl_f.op = op;
- pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
- /* we turn on descriptor write back error status */
- if (device->cap & IOAT_CAP_DWBES)
- pq->ctl_f.wb_en = result ? 1 : 0;
- pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
- pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
-
- len -= xfer_size;
- offset += xfer_size;
- } while (++i < num_descs);
-
- /* last pq descriptor carries the unmap parameters and fence bit */
- desc->txd.flags = flags;
- desc->len = total_len;
- if (result)
- desc->result = result;
- pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
-
- /* with cb3.3 we should be able to do completion w/o a null desc */
- pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
- pq->ctl_f.compl_write = 1;
-
- dump_pq16_desc_dbg(ioat, desc);
-
- /* we leave the channel locked to ensure in order submission */
- return &desc->txd;
-}
-
-static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
-{
- if (dmaf_p_disabled_continue(flags))
- return src_cnt + 1;
- else if (dmaf_continue(flags))
- return src_cnt + 3;
- else
- return src_cnt;
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
- unsigned int src_cnt, const unsigned char *scf, size_t len,
- unsigned long flags)
-{
- /* specify valid address for disabled result */
- if (flags & DMA_PREP_PQ_DISABLE_P)
- dst[0] = dst[1];
- if (flags & DMA_PREP_PQ_DISABLE_Q)
- dst[1] = dst[0];
-
- /* handle the single source multiply case from the raid6
- * recovery path
- */
- if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
- dma_addr_t single_source[2];
- unsigned char single_source_coef[2];
-
- BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
- single_source[0] = src[0];
- single_source[1] = src[0];
- single_source_coef[0] = scf[0];
- single_source_coef[1] = 0;
-
- return src_cnt_flags(src_cnt, flags) > 8 ?
- __ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
- 2, single_source_coef, len,
- flags) :
- __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
- single_source_coef, len, flags);
-
- } else {
- return src_cnt_flags(src_cnt, flags) > 8 ?
- __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
- scf, len, flags) :
- __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
- scf, len, flags);
- }
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
- unsigned int src_cnt, const unsigned char *scf, size_t len,
- enum sum_check_flags *pqres, unsigned long flags)
-{
- /* specify valid address for disabled result */
- if (flags & DMA_PREP_PQ_DISABLE_P)
- pq[0] = pq[1];
- if (flags & DMA_PREP_PQ_DISABLE_Q)
- pq[1] = pq[0];
-
- /* the cleanup routine only sets bits on validate failure, it
- * does not clear bits on validate success... so clear it here
- */
- *pqres = 0;
-
- return src_cnt_flags(src_cnt, flags) > 8 ?
- __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
- flags) :
- __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
- flags);
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
- unsigned int src_cnt, size_t len, unsigned long flags)
-{
- unsigned char scf[src_cnt];
- dma_addr_t pq[2];
-
- memset(scf, 0, src_cnt);
- pq[0] = dst;
- flags |= DMA_PREP_PQ_DISABLE_Q;
- pq[1] = dst; /* specify valid address for disabled result */
-
- return src_cnt_flags(src_cnt, flags) > 8 ?
- __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
- flags) :
- __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
- flags);
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
- unsigned int src_cnt, size_t len,
- enum sum_check_flags *result, unsigned long flags)
-{
- unsigned char scf[src_cnt];
- dma_addr_t pq[2];
-
- /* the cleanup routine only sets bits on validate failure, it
- * does not clear bits on validate success... so clear it here
- */
- *result = 0;
-
- memset(scf, 0, src_cnt);
- pq[0] = src[0];
- flags |= DMA_PREP_PQ_DISABLE_Q;
- pq[1] = pq[0]; /* specify valid address for disabled result */
-
- return src_cnt_flags(src_cnt, flags) > 8 ?
- __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
- scf, len, flags) :
- __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
- scf, len, flags);
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
-{
- struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- struct ioat_ring_ent *desc;
- struct ioat_dma_descriptor *hw;
-
- if (ioat2_check_space_lock(ioat, 1) == 0)
- desc = ioat2_get_ring_ent(ioat, ioat->head);
- else
- return NULL;
-
- hw = desc->hw;
- hw->ctl = 0;
- hw->ctl_f.null = 1;
- hw->ctl_f.int_en = 1;
- hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
- hw->ctl_f.compl_write = 1;
- hw->size = NULL_DESC_BUFFER_SIZE;
- hw->src_addr = 0;
- hw->dst_addr = 0;
-
- desc->txd.flags = flags;
- desc->len = 1;
-
- dump_desc_dbg(ioat, desc);
-
- /* we leave the channel locked to ensure in order submission */
- return &desc->txd;
-}
-
-static void ioat3_dma_test_callback(void *dma_async_param)
-{
- struct completion *cmp = dma_async_param;
-
- complete(cmp);
-}
-
-#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
-static int ioat_xor_val_self_test(struct ioatdma_device *device)
-{
- int i, src_idx;
- struct page *dest;
- struct page *xor_srcs[IOAT_NUM_SRC_TEST];
- struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
- dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
- dma_addr_t dest_dma;
- struct dma_async_tx_descriptor *tx;
- struct dma_chan *dma_chan;
- dma_cookie_t cookie;
- u8 cmp_byte = 0;
- u32 cmp_word;
- u32 xor_val_result;
- int err = 0;
- struct completion cmp;
- unsigned long tmo;
- struct device *dev = &device->pdev->dev;
- struct dma_device *dma = &device->common;
- u8 op = 0;
-
- dev_dbg(dev, "%s\n", __func__);
-
- if (!dma_has_cap(DMA_XOR, dma->cap_mask))
- return 0;
-
- for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
- xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
- if (!xor_srcs[src_idx]) {
- while (src_idx--)
- __free_page(xor_srcs[src_idx]);
- return -ENOMEM;
- }
- }
-
- dest = alloc_page(GFP_KERNEL);
- if (!dest) {
- while (src_idx--)
- __free_page(xor_srcs[src_idx]);
- return -ENOMEM;
- }
-
- /* Fill in src buffers */
- for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
- u8 *ptr = page_address(xor_srcs[src_idx]);
- for (i = 0; i < PAGE_SIZE; i++)
- ptr[i] = (1 << src_idx);
- }
-
- for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
- cmp_byte ^= (u8) (1 << src_idx);
-
- cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
- (cmp_byte << 8) | cmp_byte;
-
- memset(page_address(dest), 0, PAGE_SIZE);
-
- dma_chan = container_of(dma->channels.next, struct dma_chan,
- device_node);
- if (dma->device_alloc_chan_resources(dma_chan) < 1) {
- err = -ENODEV;
- goto out;
- }
-
- /* test xor */
- op = IOAT_OP_XOR;
-
- dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, dest_dma))
- goto dma_unmap;
-
- for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
- dma_srcs[i] = DMA_ERROR_CODE;
- for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
- dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_srcs[i]))
- goto dma_unmap;
- }
- tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
- IOAT_NUM_SRC_TEST, PAGE_SIZE,
- DMA_PREP_INTERRUPT);
-
- if (!tx) {
- dev_err(dev, "Self-test xor prep failed\n");
- err = -ENODEV;
- goto dma_unmap;
- }
-
- async_tx_ack(tx);
- init_completion(&cmp);
- tx->callback = ioat3_dma_test_callback;
- tx->callback_param = &cmp;
- cookie = tx->tx_submit(tx);
- if (cookie < 0) {
- dev_err(dev, "Self-test xor setup failed\n");
- err = -ENODEV;
- goto dma_unmap;
- }
- dma->device_issue_pending(dma_chan);
-
- tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
-
- if (tmo == 0 ||
- dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
- dev_err(dev, "Self-test xor timed out\n");
- err = -ENODEV;
- goto dma_unmap;
- }
-
- for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
- dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
-
- dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
- for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
- u32 *ptr = page_address(dest);
- if (ptr[i] != cmp_word) {
- dev_err(dev, "Self-test xor failed compare\n");
- err = -ENODEV;
- goto free_resources;
- }
- }
- dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
-
- dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
-
- /* skip validate if the capability is not present */
- if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
- goto free_resources;
-
- op = IOAT_OP_XOR_VAL;
-
- /* validate the sources with the destintation page */
- for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
- xor_val_srcs[i] = xor_srcs[i];
- xor_val_srcs[i] = dest;
-
- xor_val_result = 1;
-
- for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
- dma_srcs[i] = DMA_ERROR_CODE;
- for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
- dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_srcs[i]))
- goto dma_unmap;
- }
- tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
- IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
- &xor_val_result, DMA_PREP_INTERRUPT);
- if (!tx) {
- dev_err(dev, "Self-test zero prep failed\n");
- err = -ENODEV;
- goto dma_unmap;
- }
-
- async_tx_ack(tx);
- init_completion(&cmp);
- tx->callback = ioat3_dma_test_callback;
- tx->callback_param = &cmp;
- cookie = tx->tx_submit(tx);
- if (cookie < 0) {
- dev_err(dev, "Self-test zero setup failed\n");
- err = -ENODEV;
- goto dma_unmap;
- }
- dma->device_issue_pending(dma_chan);
-
- tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
-
- if (tmo == 0 ||
- dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
- dev_err(dev, "Self-test validate timed out\n");
- err = -ENODEV;
- goto dma_unmap;
- }
-
- for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
- dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
-
- if (xor_val_result != 0) {
- dev_err(dev, "Self-test validate failed compare\n");
- err = -ENODEV;
- goto free_resources;
- }
-
- memset(page_address(dest), 0, PAGE_SIZE);
-
- /* test for non-zero parity sum */
- op = IOAT_OP_XOR_VAL;
-
- xor_val_result = 0;
- for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
- dma_srcs[i] = DMA_ERROR_CODE;
- for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
- dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_srcs[i]))
- goto dma_unmap;
- }
- tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
- IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
- &xor_val_result, DMA_PREP_INTERRUPT);
- if (!tx) {
- dev_err(dev, "Self-test 2nd zero prep failed\n");
- err = -ENODEV;
- goto dma_unmap;
- }
-
- async_tx_ack(tx);
- init_completion(&cmp);
- tx->callback = ioat3_dma_test_callback;
- tx->callback_param = &cmp;
- cookie = tx->tx_submit(tx);
- if (cookie < 0) {
- dev_err(dev, "Self-test 2nd zero setup failed\n");
- err = -ENODEV;
- goto dma_unmap;
- }
- dma->device_issue_pending(dma_chan);
-
- tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
-
- if (tmo == 0 ||
- dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
- dev_err(dev, "Self-test 2nd validate timed out\n");
- err = -ENODEV;
- goto dma_unmap;
- }
-
- if (xor_val_result != SUM_CHECK_P_RESULT) {
- dev_err(dev, "Self-test validate failed compare\n");
- err = -ENODEV;
- goto dma_unmap;
- }
-
- for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
- dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
-
- goto free_resources;
-dma_unmap:
- if (op == IOAT_OP_XOR) {
- if (dest_dma != DMA_ERROR_CODE)
- dma_unmap_page(dev, dest_dma, PAGE_SIZE,
- DMA_FROM_DEVICE);
- for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
- if (dma_srcs[i] != DMA_ERROR_CODE)
- dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
- DMA_TO_DEVICE);
- } else if (op == IOAT_OP_XOR_VAL) {
- for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
- if (dma_srcs[i] != DMA_ERROR_CODE)
- dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
- DMA_TO_DEVICE);
- }
-free_resources:
- dma->device_free_chan_resources(dma_chan);
-out:
- src_idx = IOAT_NUM_SRC_TEST;
- while (src_idx--)
- __free_page(xor_srcs[src_idx]);
- __free_page(dest);
- return err;
-}
-
-static int ioat3_dma_self_test(struct ioatdma_device *device)
-{
- int rc = ioat_dma_self_test(device);
-
- if (rc)
- return rc;
-
- rc = ioat_xor_val_self_test(device);
- if (rc)
- return rc;
-
- return 0;
-}
-
-static int ioat3_irq_reinit(struct ioatdma_device *device)
-{
- struct pci_dev *pdev = device->pdev;
- int irq = pdev->irq, i;
-
- if (!is_bwd_ioat(pdev))
- return 0;
-
- switch (device->irq_mode) {
- case IOAT_MSIX:
- for (i = 0; i < device->common.chancnt; i++) {
- struct msix_entry *msix = &device->msix_entries[i];
- struct ioat_chan_common *chan;
-
- chan = ioat_chan_by_index(device, i);
- devm_free_irq(&pdev->dev, msix->vector, chan);
- }
-
- pci_disable_msix(pdev);
- break;
- case IOAT_MSI:
- pci_disable_msi(pdev);
- /* fall through */
- case IOAT_INTX:
- devm_free_irq(&pdev->dev, irq, device);
- break;
- default:
- return 0;
- }
- device->irq_mode = IOAT_NOIRQ;
-
- return ioat_dma_setup_interrupts(device);
-}
-
-static int ioat3_reset_hw(struct ioat_chan_common *chan)
-{
- /* throw away whatever the channel was doing and get it
- * initialized, with ioat3 specific workarounds
- */
- struct ioatdma_device *device = chan->device;
- struct pci_dev *pdev = device->pdev;
- u32 chanerr;
- u16 dev_id;
- int err;
-
- ioat2_quiesce(chan, msecs_to_jiffies(100));
-
- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
- writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
-
- if (device->version < IOAT_VER_3_3) {
- /* clear any pending errors */
- err = pci_read_config_dword(pdev,
- IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
- if (err) {
- dev_err(&pdev->dev,
- "channel error register unreachable\n");
- return err;
- }
- pci_write_config_dword(pdev,
- IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
-
- /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
- * (workaround for spurious config parity error after restart)
- */
- pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
- if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
- pci_write_config_dword(pdev,
- IOAT_PCI_DMAUNCERRSTS_OFFSET,
- 0x10);
- }
- }
-
- err = ioat2_reset_sync(chan, msecs_to_jiffies(200));
- if (!err)
- err = ioat3_irq_reinit(device);
-
- if (err)
- dev_err(&pdev->dev, "Failed to reset: %d\n", err);
-
- return err;
-}
-
-static void ioat3_intr_quirk(struct ioatdma_device *device)
-{
- struct dma_device *dma;
- struct dma_chan *c;
- struct ioat_chan_common *chan;
- u32 errmask;
-
- dma = &device->common;
-
- /*
- * if we have descriptor write back error status, we mask the
- * error interrupts
- */
- if (device->cap & IOAT_CAP_DWBES) {
- list_for_each_entry(c, &dma->channels, device_node) {
- chan = to_chan_common(c);
- errmask = readl(chan->reg_base +
- IOAT_CHANERR_MASK_OFFSET);
- errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
- IOAT_CHANERR_XOR_Q_ERR;
- writel(errmask, chan->reg_base +
- IOAT_CHANERR_MASK_OFFSET);
- }
- }
-}
-
-int ioat3_dma_probe(struct ioatdma_device *device, int dca)
-{
- struct pci_dev *pdev = device->pdev;
- int dca_en = system_has_dca_enabled(pdev);
- struct dma_device *dma;
- struct dma_chan *c;
- struct ioat_chan_common *chan;
- bool is_raid_device = false;
- int err;
-
- device->enumerate_channels = ioat2_enumerate_channels;
- device->reset_hw = ioat3_reset_hw;
- device->self_test = ioat3_dma_self_test;
- device->intr_quirk = ioat3_intr_quirk;
- dma = &device->common;
- dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
- dma->device_issue_pending = ioat2_issue_pending;
- dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
- dma->device_free_chan_resources = ioat2_free_chan_resources;
-
- dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
- dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
-
- device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
-
- if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
- device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
-
- /* dca is incompatible with raid operations */
- if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
- device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
-
- if (device->cap & IOAT_CAP_XOR) {
- is_raid_device = true;
- dma->max_xor = 8;
-
- dma_cap_set(DMA_XOR, dma->cap_mask);
- dma->device_prep_dma_xor = ioat3_prep_xor;
-
- dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
- dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
- }
-
- if (device->cap & IOAT_CAP_PQ) {
- is_raid_device = true;
-
- dma->device_prep_dma_pq = ioat3_prep_pq;
- dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
- dma_cap_set(DMA_PQ, dma->cap_mask);
- dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
-
- if (device->cap & IOAT_CAP_RAID16SS) {
- dma_set_maxpq(dma, 16, 0);
- } else {
- dma_set_maxpq(dma, 8, 0);
- }
-
- if (!(device->cap & IOAT_CAP_XOR)) {
- dma->device_prep_dma_xor = ioat3_prep_pqxor;
- dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
- dma_cap_set(DMA_XOR, dma->cap_mask);
- dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
-
- if (device->cap & IOAT_CAP_RAID16SS) {
- dma->max_xor = 16;
- } else {
- dma->max_xor = 8;
- }
- }
- }
-
- dma->device_tx_status = ioat3_tx_status;
- device->cleanup_fn = ioat3_cleanup_event;
- device->timer_fn = ioat3_timer_event;
-
- /* starting with CB3.3 super extended descriptors are supported */
- if (device->cap & IOAT_CAP_RAID16SS) {
- char pool_name[14];
- int i;
-
- for (i = 0; i < MAX_SED_POOLS; i++) {
- snprintf(pool_name, 14, "ioat_hw%d_sed", i);
-
- /* allocate SED DMA pool */
- device->sed_hw_pool[i] = dmam_pool_create(pool_name,
- &pdev->dev,
- SED_SIZE * (i + 1), 64, 0);
- if (!device->sed_hw_pool[i])
- return -ENOMEM;
-
- }
- }
-
- err = ioat_probe(device);
- if (err)
- return err;
-
- list_for_each_entry(c, &dma->channels, device_node) {
- chan = to_chan_common(c);
- writel(IOAT_DMA_DCA_ANY_CPU,
- chan->reg_base + IOAT_DCACTRL_OFFSET);
- }
-
- err = ioat_register(device);
- if (err)
- return err;
-
- ioat_kobject_add(device, &ioat2_ktype);
-
- if (dca)
- device->dca = ioat3_dca_init(pdev, device->reg_base);
-
- return 0;
-}
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index a3e731edce57..690e3b4f8202 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -21,11 +21,6 @@
#define IOAT_MMIO_BAR 0
/* CB device ID's */
-#define IOAT_PCI_DID_5000 0x1A38
-#define IOAT_PCI_DID_CNB 0x360B
-#define IOAT_PCI_DID_SCNB 0x65FF
-#define IOAT_PCI_DID_SNB 0x402F
-
#define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20
#define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21
#define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22
@@ -58,6 +53,17 @@
#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE2 0x6f52
#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE3 0x6f53
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX0 0x6f20
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX1 0x6f21
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX2 0x6f22
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX3 0x6f23
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX4 0x6f24
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX5 0x6f25
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX6 0x6f26
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX7 0x6f27
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX8 0x6f2e
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX9 0x6f2f
+
#define IOAT_VER_1_2 0x12 /* Version 1.2 */
#define IOAT_VER_2_0 0x20 /* Version 2.0 */
#define IOAT_VER_3_0 0x30 /* Version 3.0 */
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
new file mode 100644
index 000000000000..1c3c9b0abf4e
--- /dev/null
+++ b/drivers/dma/ioat/init.c
@@ -0,0 +1,1314 @@
+/*
+ * Intel I/OAT DMA Linux driver
+ * Copyright(c) 2004 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/prefetch.h>
+#include <linux/dca.h>
+#include "dma.h"
+#include "registers.h"
+#include "hw.h"
+
+#include "../dmaengine.h"
+
+MODULE_VERSION(IOAT_DMA_VERSION);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+static struct pci_device_id ioat_pci_tbl[] = {
+ /* I/OAT v3 platforms */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
+
+ /* I/OAT v3.2 platforms */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX3) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX4) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX5) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX6) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX7) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
+
+ /* I/OAT v3.3 platforms */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
+
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
+
+static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+static void ioat_remove(struct pci_dev *pdev);
+static void
+ioat_init_channel(struct ioatdma_device *ioat_dma,
+ struct ioatdma_chan *ioat_chan, int idx);
+static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
+static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
+static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
+
+static int ioat_dca_enabled = 1;
+module_param(ioat_dca_enabled, int, 0644);
+MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
+int ioat_pending_level = 4;
+module_param(ioat_pending_level, int, 0644);
+MODULE_PARM_DESC(ioat_pending_level,
+ "high-water mark for pushing ioat descriptors (default: 4)");
+int ioat_ring_alloc_order = 8;
+module_param(ioat_ring_alloc_order, int, 0644);
+MODULE_PARM_DESC(ioat_ring_alloc_order,
+ "ioat+: allocate 2^n descriptors per channel (default: 8 max: 16)");
+int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
+module_param(ioat_ring_max_alloc_order, int, 0644);
+MODULE_PARM_DESC(ioat_ring_max_alloc_order,
+ "ioat+: upper limit for ring size (default: 16)");
+static char ioat_interrupt_style[32] = "msix";
+module_param_string(ioat_interrupt_style, ioat_interrupt_style,
+ sizeof(ioat_interrupt_style), 0644);
+MODULE_PARM_DESC(ioat_interrupt_style,
+ "set ioat interrupt style: msix (default), msi, intx");
+
+struct kmem_cache *ioat_cache;
+struct kmem_cache *ioat_sed_cache;
+
+static bool is_jf_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_snb_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_ivb_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
+ return true;
+ default:
+ return false;
+ }
+
+}
+
+static bool is_hsw_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
+ return true;
+ default:
+ return false;
+ }
+
+}
+
+static bool is_bdx_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_BDX0:
+ case PCI_DEVICE_ID_INTEL_IOAT_BDX1:
+ case PCI_DEVICE_ID_INTEL_IOAT_BDX2:
+ case PCI_DEVICE_ID_INTEL_IOAT_BDX3:
+ case PCI_DEVICE_ID_INTEL_IOAT_BDX4:
+ case PCI_DEVICE_ID_INTEL_IOAT_BDX5:
+ case PCI_DEVICE_ID_INTEL_IOAT_BDX6:
+ case PCI_DEVICE_ID_INTEL_IOAT_BDX7:
+ case PCI_DEVICE_ID_INTEL_IOAT_BDX8:
+ case PCI_DEVICE_ID_INTEL_IOAT_BDX9:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_xeon_cb32(struct pci_dev *pdev)
+{
+ return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
+ is_hsw_ioat(pdev) || is_bdx_ioat(pdev);
+}
+
+bool is_bwd_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
+ /* even though not Atom, BDX-DE has same DMA silicon */
+ case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
+ case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
+ case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
+ case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_bwd_noraid(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
+ case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
+ case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
+ case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
+ case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
+ return true;
+ default:
+ return false;
+ }
+
+}
+
+/*
+ * Perform a IOAT transaction to verify the HW works.
+ */
+#define IOAT_TEST_SIZE 2000
+
+static void ioat_dma_test_callback(void *dma_async_param)
+{
+ struct completion *cmp = dma_async_param;
+
+ complete(cmp);
+}
+
+/**
+ * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
+ * @ioat_dma: dma device to be tested
+ */
+static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
+{
+ int i;
+ u8 *src;
+ u8 *dest;
+ struct dma_device *dma = &ioat_dma->dma_dev;
+ struct device *dev = &ioat_dma->pdev->dev;
+ struct dma_chan *dma_chan;
+ struct dma_async_tx_descriptor *tx;
+ dma_addr_t dma_dest, dma_src;
+ dma_cookie_t cookie;
+ int err = 0;
+ struct completion cmp;
+ unsigned long tmo;
+ unsigned long flags;
+
+ src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
+ if (!src)
+ return -ENOMEM;
+ dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
+ if (!dest) {
+ kfree(src);
+ return -ENOMEM;
+ }
+
+ /* Fill in src buffer */
+ for (i = 0; i < IOAT_TEST_SIZE; i++)
+ src[i] = (u8)i;
+
+ /* Start copy, using first DMA channel */
+ dma_chan = container_of(dma->channels.next, struct dma_chan,
+ device_node);
+ if (dma->device_alloc_chan_resources(dma_chan) < 1) {
+ dev_err(dev, "selftest cannot allocate chan resource\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_src)) {
+ dev_err(dev, "mapping src buffer failed\n");
+ goto free_resources;
+ }
+ dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_dest)) {
+ dev_err(dev, "mapping dest buffer failed\n");
+ goto unmap_src;
+ }
+ flags = DMA_PREP_INTERRUPT;
+ tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest,
+ dma_src, IOAT_TEST_SIZE,
+ flags);
+ if (!tx) {
+ dev_err(dev, "Self-test prep failed, disabling\n");
+ err = -ENODEV;
+ goto unmap_dma;
+ }
+
+ async_tx_ack(tx);
+ init_completion(&cmp);
+ tx->callback = ioat_dma_test_callback;
+ tx->callback_param = &cmp;
+ cookie = tx->tx_submit(tx);
+ if (cookie < 0) {
+ dev_err(dev, "Self-test setup failed, disabling\n");
+ err = -ENODEV;
+ goto unmap_dma;
+ }
+ dma->device_issue_pending(dma_chan);
+
+ tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+ if (tmo == 0 ||
+ dma->device_tx_status(dma_chan, cookie, NULL)
+ != DMA_COMPLETE) {
+ dev_err(dev, "Self-test copy timed out, disabling\n");
+ err = -ENODEV;
+ goto unmap_dma;
+ }
+ if (memcmp(src, dest, IOAT_TEST_SIZE)) {
+ dev_err(dev, "Self-test copy failed compare, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+unmap_dma:
+ dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
+unmap_src:
+ dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
+free_resources:
+ dma->device_free_chan_resources(dma_chan);
+out:
+ kfree(src);
+ kfree(dest);
+ return err;
+}
+
+/**
+ * ioat_dma_setup_interrupts - setup interrupt handler
+ * @ioat_dma: ioat dma device
+ */
+int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma)
+{
+ struct ioatdma_chan *ioat_chan;
+ struct pci_dev *pdev = ioat_dma->pdev;
+ struct device *dev = &pdev->dev;
+ struct msix_entry *msix;
+ int i, j, msixcnt;
+ int err = -EINVAL;
+ u8 intrctrl = 0;
+
+ if (!strcmp(ioat_interrupt_style, "msix"))
+ goto msix;
+ if (!strcmp(ioat_interrupt_style, "msi"))
+ goto msi;
+ if (!strcmp(ioat_interrupt_style, "intx"))
+ goto intx;
+ dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
+ goto err_no_irq;
+
+msix:
+ /* The number of MSI-X vectors should equal the number of channels */
+ msixcnt = ioat_dma->dma_dev.chancnt;
+ for (i = 0; i < msixcnt; i++)
+ ioat_dma->msix_entries[i].entry = i;
+
+ err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt);
+ if (err)
+ goto msi;
+
+ for (i = 0; i < msixcnt; i++) {
+ msix = &ioat_dma->msix_entries[i];
+ ioat_chan = ioat_chan_by_index(ioat_dma, i);
+ err = devm_request_irq(dev, msix->vector,
+ ioat_dma_do_interrupt_msix, 0,
+ "ioat-msix", ioat_chan);
+ if (err) {
+ for (j = 0; j < i; j++) {
+ msix = &ioat_dma->msix_entries[j];
+ ioat_chan = ioat_chan_by_index(ioat_dma, j);
+ devm_free_irq(dev, msix->vector, ioat_chan);
+ }
+ goto msi;
+ }
+ }
+ intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
+ ioat_dma->irq_mode = IOAT_MSIX;
+ goto done;
+
+msi:
+ err = pci_enable_msi(pdev);
+ if (err)
+ goto intx;
+
+ err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
+ "ioat-msi", ioat_dma);
+ if (err) {
+ pci_disable_msi(pdev);
+ goto intx;
+ }
+ ioat_dma->irq_mode = IOAT_MSI;
+ goto done;
+
+intx:
+ err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
+ IRQF_SHARED, "ioat-intx", ioat_dma);
+ if (err)
+ goto err_no_irq;
+
+ ioat_dma->irq_mode = IOAT_INTX;
+done:
+ if (is_bwd_ioat(pdev))
+ ioat_intr_quirk(ioat_dma);
+ intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
+ writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
+ return 0;
+
+err_no_irq:
+ /* Disable all interrupt generation */
+ writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
+ ioat_dma->irq_mode = IOAT_NOIRQ;
+ dev_err(dev, "no usable interrupts\n");
+ return err;
+}
+
+static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma)
+{
+ /* Disable all interrupt generation */
+ writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
+}
+
+static int ioat_probe(struct ioatdma_device *ioat_dma)
+{
+ int err = -ENODEV;
+ struct dma_device *dma = &ioat_dma->dma_dev;
+ struct pci_dev *pdev = ioat_dma->pdev;
+ struct device *dev = &pdev->dev;
+
+ /* DMA coherent memory pool for DMA descriptor allocations */
+ ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev,
+ sizeof(struct ioat_dma_descriptor),
+ 64, 0);
+ if (!ioat_dma->dma_pool) {
+ err = -ENOMEM;
+ goto err_dma_pool;
+ }
+
+ ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev,
+ sizeof(u64),
+ SMP_CACHE_BYTES,
+ SMP_CACHE_BYTES);
+
+ if (!ioat_dma->completion_pool) {
+ err = -ENOMEM;
+ goto err_completion_pool;
+ }
+
+ ioat_enumerate_channels(ioat_dma);
+
+ dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+ dma->dev = &pdev->dev;
+
+ if (!dma->chancnt) {
+ dev_err(dev, "channel enumeration error\n");
+ goto err_setup_interrupts;
+ }
+
+ err = ioat_dma_setup_interrupts(ioat_dma);
+ if (err)
+ goto err_setup_interrupts;
+
+ err = ioat3_dma_self_test(ioat_dma);
+ if (err)
+ goto err_self_test;
+
+ return 0;
+
+err_self_test:
+ ioat_disable_interrupts(ioat_dma);
+err_setup_interrupts:
+ pci_pool_destroy(ioat_dma->completion_pool);
+err_completion_pool:
+ pci_pool_destroy(ioat_dma->dma_pool);
+err_dma_pool:
+ return err;
+}
+
+static int ioat_register(struct ioatdma_device *ioat_dma)
+{
+ int err = dma_async_device_register(&ioat_dma->dma_dev);
+
+ if (err) {
+ ioat_disable_interrupts(ioat_dma);
+ pci_pool_destroy(ioat_dma->completion_pool);
+ pci_pool_destroy(ioat_dma->dma_pool);
+ }
+
+ return err;
+}
+
+static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
+{
+ struct dma_device *dma = &ioat_dma->dma_dev;
+
+ ioat_disable_interrupts(ioat_dma);
+
+ ioat_kobject_del(ioat_dma);
+
+ dma_async_device_unregister(dma);
+
+ pci_pool_destroy(ioat_dma->dma_pool);
+ pci_pool_destroy(ioat_dma->completion_pool);
+
+ INIT_LIST_HEAD(&dma->channels);
+}
+
+/**
+ * ioat_enumerate_channels - find and initialize the device's channels
+ * @ioat_dma: the ioat dma device to be enumerated
+ */
+static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
+{
+ struct ioatdma_chan *ioat_chan;
+ struct device *dev = &ioat_dma->pdev->dev;
+ struct dma_device *dma = &ioat_dma->dma_dev;
+ u8 xfercap_log;
+ int i;
+
+ INIT_LIST_HEAD(&dma->channels);
+ dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
+ dma->chancnt &= 0x1f; /* bits [4:0] valid */
+ if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
+ dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
+ dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
+ dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
+ }
+ xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
+ xfercap_log &= 0x1f; /* bits [4:0] valid */
+ if (xfercap_log == 0)
+ return 0;
+ dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
+
+ for (i = 0; i < dma->chancnt; i++) {
+ ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
+ if (!ioat_chan)
+ break;
+
+ ioat_init_channel(ioat_dma, ioat_chan, i);
+ ioat_chan->xfercap_log = xfercap_log;
+ spin_lock_init(&ioat_chan->prep_lock);
+ if (ioat_reset_hw(ioat_chan)) {
+ i = 0;
+ break;
+ }
+ }
+ dma->chancnt = i;
+ return i;
+}
+
+/**
+ * ioat_free_chan_resources - release all the descriptors
+ * @chan: the channel to be cleaned
+ */
+static void ioat_free_chan_resources(struct dma_chan *c)
+{
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+ struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+ struct ioat_ring_ent *desc;
+ const int total_descs = 1 << ioat_chan->alloc_order;
+ int descs;
+ int i;
+
+ /* Before freeing channel resources first check
+ * if they have been previously allocated for this channel.
+ */
+ if (!ioat_chan->ring)
+ return;
+
+ ioat_stop(ioat_chan);
+ ioat_reset_hw(ioat_chan);
+
+ spin_lock_bh(&ioat_chan->cleanup_lock);
+ spin_lock_bh(&ioat_chan->prep_lock);
+ descs = ioat_ring_space(ioat_chan);
+ dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs);
+ for (i = 0; i < descs; i++) {
+ desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i);
+ ioat_free_ring_ent(desc, c);
+ }
+
+ if (descs < total_descs)
+ dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
+ total_descs - descs);
+
+ for (i = 0; i < total_descs - descs; i++) {
+ desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i);
+ dump_desc_dbg(ioat_chan, desc);
+ ioat_free_ring_ent(desc, c);
+ }
+
+ kfree(ioat_chan->ring);
+ ioat_chan->ring = NULL;
+ ioat_chan->alloc_order = 0;
+ pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
+ ioat_chan->completion_dma);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+ spin_unlock_bh(&ioat_chan->cleanup_lock);
+
+ ioat_chan->last_completion = 0;
+ ioat_chan->completion_dma = 0;
+ ioat_chan->dmacount = 0;
+}
+
+/* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring
+ * @chan: channel to be initialized
+ */
+static int ioat_alloc_chan_resources(struct dma_chan *c)
+{
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+ struct ioat_ring_ent **ring;
+ u64 status;
+ int order;
+ int i = 0;
+ u32 chanerr;
+
+ /* have we already been set up? */
+ if (ioat_chan->ring)
+ return 1 << ioat_chan->alloc_order;
+
+ /* Setup register to interrupt and write completion status on error */
+ writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
+
+ /* allocate a completion writeback area */
+ /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
+ ioat_chan->completion =
+ pci_pool_alloc(ioat_chan->ioat_dma->completion_pool,
+ GFP_KERNEL, &ioat_chan->completion_dma);
+ if (!ioat_chan->completion)
+ return -ENOMEM;
+
+ memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion));
+ writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF,
+ ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
+ writel(((u64)ioat_chan->completion_dma) >> 32,
+ ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
+
+ order = ioat_get_alloc_order();
+ ring = ioat_alloc_ring(c, order, GFP_KERNEL);
+ if (!ring)
+ return -ENOMEM;
+
+ spin_lock_bh(&ioat_chan->cleanup_lock);
+ spin_lock_bh(&ioat_chan->prep_lock);
+ ioat_chan->ring = ring;
+ ioat_chan->head = 0;
+ ioat_chan->issued = 0;
+ ioat_chan->tail = 0;
+ ioat_chan->alloc_order = order;
+ set_bit(IOAT_RUN, &ioat_chan->state);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+ spin_unlock_bh(&ioat_chan->cleanup_lock);
+
+ ioat_start_null_desc(ioat_chan);
+
+ /* check that we got off the ground */
+ do {
+ udelay(1);
+ status = ioat_chansts(ioat_chan);
+ } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
+
+ if (is_ioat_active(status) || is_ioat_idle(status))
+ return 1 << ioat_chan->alloc_order;
+
+ chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+
+ dev_WARN(to_dev(ioat_chan),
+ "failed to start channel chanerr: %#x\n", chanerr);
+ ioat_free_chan_resources(c);
+ return -EFAULT;
+}
+
+/* common channel initialization */
+static void
+ioat_init_channel(struct ioatdma_device *ioat_dma,
+ struct ioatdma_chan *ioat_chan, int idx)
+{
+ struct dma_device *dma = &ioat_dma->dma_dev;
+ struct dma_chan *c = &ioat_chan->dma_chan;
+ unsigned long data = (unsigned long) c;
+
+ ioat_chan->ioat_dma = ioat_dma;
+ ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1));
+ spin_lock_init(&ioat_chan->cleanup_lock);
+ ioat_chan->dma_chan.device = dma;
+ dma_cookie_init(&ioat_chan->dma_chan);
+ list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
+ ioat_dma->idx[idx] = ioat_chan;
+ init_timer(&ioat_chan->timer);
+ ioat_chan->timer.function = ioat_timer_event;
+ ioat_chan->timer.data = data;
+ tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data);
+}
+
+#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
+static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
+{
+ int i, src_idx;
+ struct page *dest;
+ struct page *xor_srcs[IOAT_NUM_SRC_TEST];
+ struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
+ dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
+ dma_addr_t dest_dma;
+ struct dma_async_tx_descriptor *tx;
+ struct dma_chan *dma_chan;
+ dma_cookie_t cookie;
+ u8 cmp_byte = 0;
+ u32 cmp_word;
+ u32 xor_val_result;
+ int err = 0;
+ struct completion cmp;
+ unsigned long tmo;
+ struct device *dev = &ioat_dma->pdev->dev;
+ struct dma_device *dma = &ioat_dma->dma_dev;
+ u8 op = 0;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (!dma_has_cap(DMA_XOR, dma->cap_mask))
+ return 0;
+
+ for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
+ xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
+ if (!xor_srcs[src_idx]) {
+ while (src_idx--)
+ __free_page(xor_srcs[src_idx]);
+ return -ENOMEM;
+ }
+ }
+
+ dest = alloc_page(GFP_KERNEL);
+ if (!dest) {
+ while (src_idx--)
+ __free_page(xor_srcs[src_idx]);
+ return -ENOMEM;
+ }
+
+ /* Fill in src buffers */
+ for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
+ u8 *ptr = page_address(xor_srcs[src_idx]);
+
+ for (i = 0; i < PAGE_SIZE; i++)
+ ptr[i] = (1 << src_idx);
+ }
+
+ for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
+ cmp_byte ^= (u8) (1 << src_idx);
+
+ cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
+ (cmp_byte << 8) | cmp_byte;
+
+ memset(page_address(dest), 0, PAGE_SIZE);
+
+ dma_chan = container_of(dma->channels.next, struct dma_chan,
+ device_node);
+ if (dma->device_alloc_chan_resources(dma_chan) < 1) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ /* test xor */
+ op = IOAT_OP_XOR;
+
+ dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dest_dma))
+ goto dma_unmap;
+
+ for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+ dma_srcs[i] = DMA_ERROR_CODE;
+ for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
+ dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_srcs[i]))
+ goto dma_unmap;
+ }
+ tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
+ IOAT_NUM_SRC_TEST, PAGE_SIZE,
+ DMA_PREP_INTERRUPT);
+
+ if (!tx) {
+ dev_err(dev, "Self-test xor prep failed\n");
+ err = -ENODEV;
+ goto dma_unmap;
+ }
+
+ async_tx_ack(tx);
+ init_completion(&cmp);
+ tx->callback = ioat_dma_test_callback;
+ tx->callback_param = &cmp;
+ cookie = tx->tx_submit(tx);
+ if (cookie < 0) {
+ dev_err(dev, "Self-test xor setup failed\n");
+ err = -ENODEV;
+ goto dma_unmap;
+ }
+ dma->device_issue_pending(dma_chan);
+
+ tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+ if (tmo == 0 ||
+ dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
+ dev_err(dev, "Self-test xor timed out\n");
+ err = -ENODEV;
+ goto dma_unmap;
+ }
+
+ for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
+ dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
+ u32 *ptr = page_address(dest);
+
+ if (ptr[i] != cmp_word) {
+ dev_err(dev, "Self-test xor failed compare\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+ }
+ dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* skip validate if the capability is not present */
+ if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
+ goto free_resources;
+
+ op = IOAT_OP_XOR_VAL;
+
+ /* validate the sources with the destintation page */
+ for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+ xor_val_srcs[i] = xor_srcs[i];
+ xor_val_srcs[i] = dest;
+
+ xor_val_result = 1;
+
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+ dma_srcs[i] = DMA_ERROR_CODE;
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
+ dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_srcs[i]))
+ goto dma_unmap;
+ }
+ tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
+ IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
+ &xor_val_result, DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(dev, "Self-test zero prep failed\n");
+ err = -ENODEV;
+ goto dma_unmap;
+ }
+
+ async_tx_ack(tx);
+ init_completion(&cmp);
+ tx->callback = ioat_dma_test_callback;
+ tx->callback_param = &cmp;
+ cookie = tx->tx_submit(tx);
+ if (cookie < 0) {
+ dev_err(dev, "Self-test zero setup failed\n");
+ err = -ENODEV;
+ goto dma_unmap;
+ }
+ dma->device_issue_pending(dma_chan);
+
+ tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+ if (tmo == 0 ||
+ dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
+ dev_err(dev, "Self-test validate timed out\n");
+ err = -ENODEV;
+ goto dma_unmap;
+ }
+
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
+ if (xor_val_result != 0) {
+ dev_err(dev, "Self-test validate failed compare\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ memset(page_address(dest), 0, PAGE_SIZE);
+
+ /* test for non-zero parity sum */
+ op = IOAT_OP_XOR_VAL;
+
+ xor_val_result = 0;
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+ dma_srcs[i] = DMA_ERROR_CODE;
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
+ dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_srcs[i]))
+ goto dma_unmap;
+ }
+ tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
+ IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
+ &xor_val_result, DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(dev, "Self-test 2nd zero prep failed\n");
+ err = -ENODEV;
+ goto dma_unmap;
+ }
+
+ async_tx_ack(tx);
+ init_completion(&cmp);
+ tx->callback = ioat_dma_test_callback;
+ tx->callback_param = &cmp;
+ cookie = tx->tx_submit(tx);
+ if (cookie < 0) {
+ dev_err(dev, "Self-test 2nd zero setup failed\n");
+ err = -ENODEV;
+ goto dma_unmap;
+ }
+ dma->device_issue_pending(dma_chan);
+
+ tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+ if (tmo == 0 ||
+ dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
+ dev_err(dev, "Self-test 2nd validate timed out\n");
+ err = -ENODEV;
+ goto dma_unmap;
+ }
+
+ if (xor_val_result != SUM_CHECK_P_RESULT) {
+ dev_err(dev, "Self-test validate failed compare\n");
+ err = -ENODEV;
+ goto dma_unmap;
+ }
+
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
+ goto free_resources;
+dma_unmap:
+ if (op == IOAT_OP_XOR) {
+ if (dest_dma != DMA_ERROR_CODE)
+ dma_unmap_page(dev, dest_dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+ if (dma_srcs[i] != DMA_ERROR_CODE)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
+ } else if (op == IOAT_OP_XOR_VAL) {
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+ if (dma_srcs[i] != DMA_ERROR_CODE)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
+ }
+free_resources:
+ dma->device_free_chan_resources(dma_chan);
+out:
+ src_idx = IOAT_NUM_SRC_TEST;
+ while (src_idx--)
+ __free_page(xor_srcs[src_idx]);
+ __free_page(dest);
+ return err;
+}
+
+static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma)
+{
+ int rc;
+
+ rc = ioat_dma_self_test(ioat_dma);
+ if (rc)
+ return rc;
+
+ rc = ioat_xor_val_self_test(ioat_dma);
+
+ return rc;
+}
+
+static void ioat_intr_quirk(struct ioatdma_device *ioat_dma)
+{
+ struct dma_device *dma;
+ struct dma_chan *c;
+ struct ioatdma_chan *ioat_chan;
+ u32 errmask;
+
+ dma = &ioat_dma->dma_dev;
+
+ /*
+ * if we have descriptor write back error status, we mask the
+ * error interrupts
+ */
+ if (ioat_dma->cap & IOAT_CAP_DWBES) {
+ list_for_each_entry(c, &dma->channels, device_node) {
+ ioat_chan = to_ioat_chan(c);
+ errmask = readl(ioat_chan->reg_base +
+ IOAT_CHANERR_MASK_OFFSET);
+ errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
+ IOAT_CHANERR_XOR_Q_ERR;
+ writel(errmask, ioat_chan->reg_base +
+ IOAT_CHANERR_MASK_OFFSET);
+ }
+ }
+}
+
+static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
+{
+ struct pci_dev *pdev = ioat_dma->pdev;
+ int dca_en = system_has_dca_enabled(pdev);
+ struct dma_device *dma;
+ struct dma_chan *c;
+ struct ioatdma_chan *ioat_chan;
+ bool is_raid_device = false;
+ int err;
+
+ dma = &ioat_dma->dma_dev;
+ dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock;
+ dma->device_issue_pending = ioat_issue_pending;
+ dma->device_alloc_chan_resources = ioat_alloc_chan_resources;
+ dma->device_free_chan_resources = ioat_free_chan_resources;
+
+ dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
+ dma->device_prep_dma_interrupt = ioat_prep_interrupt_lock;
+
+ ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET);
+
+ if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
+ ioat_dma->cap &=
+ ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
+
+ /* dca is incompatible with raid operations */
+ if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
+ ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
+
+ if (ioat_dma->cap & IOAT_CAP_XOR) {
+ is_raid_device = true;
+ dma->max_xor = 8;
+
+ dma_cap_set(DMA_XOR, dma->cap_mask);
+ dma->device_prep_dma_xor = ioat_prep_xor;
+
+ dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
+ dma->device_prep_dma_xor_val = ioat_prep_xor_val;
+ }
+
+ if (ioat_dma->cap & IOAT_CAP_PQ) {
+ is_raid_device = true;
+
+ dma->device_prep_dma_pq = ioat_prep_pq;
+ dma->device_prep_dma_pq_val = ioat_prep_pq_val;
+ dma_cap_set(DMA_PQ, dma->cap_mask);
+ dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
+
+ if (ioat_dma->cap & IOAT_CAP_RAID16SS)
+ dma_set_maxpq(dma, 16, 0);
+ else
+ dma_set_maxpq(dma, 8, 0);
+
+ if (!(ioat_dma->cap & IOAT_CAP_XOR)) {
+ dma->device_prep_dma_xor = ioat_prep_pqxor;
+ dma->device_prep_dma_xor_val = ioat_prep_pqxor_val;
+ dma_cap_set(DMA_XOR, dma->cap_mask);
+ dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
+
+ if (ioat_dma->cap & IOAT_CAP_RAID16SS)
+ dma->max_xor = 16;
+ else
+ dma->max_xor = 8;
+ }
+ }
+
+ dma->device_tx_status = ioat_tx_status;
+
+ /* starting with CB3.3 super extended descriptors are supported */
+ if (ioat_dma->cap & IOAT_CAP_RAID16SS) {
+ char pool_name[14];
+ int i;
+
+ for (i = 0; i < MAX_SED_POOLS; i++) {
+ snprintf(pool_name, 14, "ioat_hw%d_sed", i);
+
+ /* allocate SED DMA pool */
+ ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name,
+ &pdev->dev,
+ SED_SIZE * (i + 1), 64, 0);
+ if (!ioat_dma->sed_hw_pool[i])
+ return -ENOMEM;
+
+ }
+ }
+
+ if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ)))
+ dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+
+ err = ioat_probe(ioat_dma);
+ if (err)
+ return err;
+
+ list_for_each_entry(c, &dma->channels, device_node) {
+ ioat_chan = to_ioat_chan(c);
+ writel(IOAT_DMA_DCA_ANY_CPU,
+ ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
+ }
+
+ err = ioat_register(ioat_dma);
+ if (err)
+ return err;
+
+ ioat_kobject_add(ioat_dma, &ioat_ktype);
+
+ if (dca)
+ ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base);
+
+ return 0;
+}
+
+#define DRV_NAME "ioatdma"
+
+static struct pci_driver ioat_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = ioat_pci_tbl,
+ .probe = ioat_pci_probe,
+ .remove = ioat_remove,
+};
+
+static struct ioatdma_device *
+alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
+{
+ struct device *dev = &pdev->dev;
+ struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+
+ if (!d)
+ return NULL;
+ d->pdev = pdev;
+ d->reg_base = iobase;
+ return d;
+}
+
+static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ void __iomem * const *iomap;
+ struct device *dev = &pdev->dev;
+ struct ioatdma_device *device;
+ int err;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ return err;
+
+ err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
+ if (err)
+ return err;
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return -ENOMEM;
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err)
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
+ if (!device)
+ return -ENOMEM;
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, device);
+
+ device->version = readb(device->reg_base + IOAT_VER_OFFSET);
+ if (device->version >= IOAT_VER_3_0)
+ err = ioat3_dma_probe(device, ioat_dca_enabled);
+ else
+ return -ENODEV;
+
+ if (err) {
+ dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void ioat_remove(struct pci_dev *pdev)
+{
+ struct ioatdma_device *device = pci_get_drvdata(pdev);
+
+ if (!device)
+ return;
+
+ dev_err(&pdev->dev, "Removing dma and dca services\n");
+ if (device->dca) {
+ unregister_dca_provider(device->dca, &pdev->dev);
+ free_dca_provider(device->dca);
+ device->dca = NULL;
+ }
+ ioat_dma_remove(device);
+}
+
+static int __init ioat_init_module(void)
+{
+ int err = -ENOMEM;
+
+ pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
+ DRV_NAME, IOAT_DMA_VERSION);
+
+ ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!ioat_cache)
+ return -ENOMEM;
+
+ ioat_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
+ if (!ioat_sed_cache)
+ goto err_ioat_cache;
+
+ err = pci_register_driver(&ioat_pci_driver);
+ if (err)
+ goto err_ioat3_cache;
+
+ return 0;
+
+ err_ioat3_cache:
+ kmem_cache_destroy(ioat_sed_cache);
+
+ err_ioat_cache:
+ kmem_cache_destroy(ioat_cache);
+
+ return err;
+}
+module_init(ioat_init_module);
+
+static void __exit ioat_exit_module(void)
+{
+ pci_unregister_driver(&ioat_pci_driver);
+ kmem_cache_destroy(ioat_cache);
+}
+module_exit(ioat_exit_module);
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
deleted file mode 100644
index 76f0dc688a19..000000000000
--- a/drivers/dma/ioat/pci.c
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
- * Intel I/OAT DMA Linux driver
- * Copyright(c) 2007 - 2009 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- */
-
-/*
- * This driver supports an Intel I/OAT DMA engine, which does asynchronous
- * copy operations.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/dca.h>
-#include <linux/slab.h>
-#include "dma.h"
-#include "dma_v2.h"
-#include "registers.h"
-#include "hw.h"
-
-MODULE_VERSION(IOAT_DMA_VERSION);
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel Corporation");
-
-static struct pci_device_id ioat_pci_tbl[] = {
- /* I/OAT v1 platforms */
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) },
- { PCI_VDEVICE(UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
-
- /* I/OAT v2 platforms */
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) },
-
- /* I/OAT v3 platforms */
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
-
- /* I/OAT v3.2 platforms */
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
-
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
-
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
-
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
-
- /* I/OAT v3.3 platforms */
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
-
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
-
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
-
-static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
-static void ioat_remove(struct pci_dev *pdev);
-
-static int ioat_dca_enabled = 1;
-module_param(ioat_dca_enabled, int, 0644);
-MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
-
-struct kmem_cache *ioat2_cache;
-struct kmem_cache *ioat3_sed_cache;
-
-#define DRV_NAME "ioatdma"
-
-static struct pci_driver ioat_pci_driver = {
- .name = DRV_NAME,
- .id_table = ioat_pci_tbl,
- .probe = ioat_pci_probe,
- .remove = ioat_remove,
-};
-
-static struct ioatdma_device *
-alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
-{
- struct device *dev = &pdev->dev;
- struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
-
- if (!d)
- return NULL;
- d->pdev = pdev;
- d->reg_base = iobase;
- return d;
-}
-
-static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- void __iomem * const *iomap;
- struct device *dev = &pdev->dev;
- struct ioatdma_device *device;
- int err;
-
- err = pcim_enable_device(pdev);
- if (err)
- return err;
-
- err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
- if (err)
- return err;
- iomap = pcim_iomap_table(pdev);
- if (!iomap)
- return -ENOMEM;
-
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err)
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err)
- return err;
-
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err)
- return err;
-
- device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
- if (!device)
- return -ENOMEM;
- pci_set_master(pdev);
- pci_set_drvdata(pdev, device);
-
- device->version = readb(device->reg_base + IOAT_VER_OFFSET);
- if (device->version == IOAT_VER_1_2)
- err = ioat1_dma_probe(device, ioat_dca_enabled);
- else if (device->version == IOAT_VER_2_0)
- err = ioat2_dma_probe(device, ioat_dca_enabled);
- else if (device->version >= IOAT_VER_3_0)
- err = ioat3_dma_probe(device, ioat_dca_enabled);
- else
- return -ENODEV;
-
- if (err) {
- dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void ioat_remove(struct pci_dev *pdev)
-{
- struct ioatdma_device *device = pci_get_drvdata(pdev);
-
- if (!device)
- return;
-
- dev_err(&pdev->dev, "Removing dma and dca services\n");
- if (device->dca) {
- unregister_dca_provider(device->dca, &pdev->dev);
- free_dca_provider(device->dca);
- device->dca = NULL;
- }
- ioat_dma_remove(device);
-}
-
-static int __init ioat_init_module(void)
-{
- int err = -ENOMEM;
-
- pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
- DRV_NAME, IOAT_DMA_VERSION);
-
- ioat2_cache = kmem_cache_create("ioat2", sizeof(struct ioat_ring_ent),
- 0, SLAB_HWCACHE_ALIGN, NULL);
- if (!ioat2_cache)
- return -ENOMEM;
-
- ioat3_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
- if (!ioat3_sed_cache)
- goto err_ioat2_cache;
-
- err = pci_register_driver(&ioat_pci_driver);
- if (err)
- goto err_ioat3_cache;
-
- return 0;
-
- err_ioat3_cache:
- kmem_cache_destroy(ioat3_sed_cache);
-
- err_ioat2_cache:
- kmem_cache_destroy(ioat2_cache);
-
- return err;
-}
-module_init(ioat_init_module);
-
-static void __exit ioat_exit_module(void)
-{
- pci_unregister_driver(&ioat_pci_driver);
- kmem_cache_destroy(ioat2_cache);
-}
-module_exit(ioat_exit_module);
diff --git a/drivers/dma/ioat/prep.c b/drivers/dma/ioat/prep.c
new file mode 100644
index 000000000000..ad4fb41cd23b
--- /dev/null
+++ b/drivers/dma/ioat/prep.c
@@ -0,0 +1,715 @@
+/*
+ * Intel I/OAT DMA Linux driver
+ * Copyright(c) 2004 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/gfp.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/prefetch.h>
+#include "../dmaengine.h"
+#include "registers.h"
+#include "hw.h"
+#include "dma.h"
+
+#define MAX_SCF 1024
+
+/* provide a lookup table for setting the source address in the base or
+ * extended descriptor of an xor or pq descriptor
+ */
+static const u8 xor_idx_to_desc = 0xe0;
+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
+static const u8 pq_idx_to_desc = 0xf8;
+static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2 };
+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
+static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
+ 0, 1, 2, 3, 4, 5, 6 };
+
+static void xor_set_src(struct ioat_raw_descriptor *descs[2],
+ dma_addr_t addr, u32 offset, int idx)
+{
+ struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
+
+ raw->field[xor_idx_to_field[idx]] = addr + offset;
+}
+
+static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
+{
+ struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
+
+ return raw->field[pq_idx_to_field[idx]];
+}
+
+static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
+{
+ struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
+
+ return raw->field[pq16_idx_to_field[idx]];
+}
+
+static void pq_set_src(struct ioat_raw_descriptor *descs[2],
+ dma_addr_t addr, u32 offset, u8 coef, int idx)
+{
+ struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
+ struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
+
+ raw->field[pq_idx_to_field[idx]] = addr + offset;
+ pq->coef[idx] = coef;
+}
+
+static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
+ dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
+{
+ struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
+ struct ioat_pq16a_descriptor *pq16 =
+ (struct ioat_pq16a_descriptor *)desc[1];
+ struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
+
+ raw->field[pq16_idx_to_field[idx]] = addr + offset;
+
+ if (idx < 8)
+ pq->coef[idx] = coef;
+ else
+ pq16->coef[idx - 8] = coef;
+}
+
+static struct ioat_sed_ent *
+ioat3_alloc_sed(struct ioatdma_device *ioat_dma, unsigned int hw_pool)
+{
+ struct ioat_sed_ent *sed;
+ gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
+
+ sed = kmem_cache_alloc(ioat_sed_cache, flags);
+ if (!sed)
+ return NULL;
+
+ sed->hw_pool = hw_pool;
+ sed->hw = dma_pool_alloc(ioat_dma->sed_hw_pool[hw_pool],
+ flags, &sed->dma);
+ if (!sed->hw) {
+ kmem_cache_free(ioat_sed_cache, sed);
+ return NULL;
+ }
+
+ return sed;
+}
+
+struct dma_async_tx_descriptor *
+ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+ struct ioat_dma_descriptor *hw;
+ struct ioat_ring_ent *desc;
+ dma_addr_t dst = dma_dest;
+ dma_addr_t src = dma_src;
+ size_t total_len = len;
+ int num_descs, idx, i;
+
+ num_descs = ioat_xferlen_to_descs(ioat_chan, len);
+ if (likely(num_descs) &&
+ ioat_check_space_lock(ioat_chan, num_descs) == 0)
+ idx = ioat_chan->head;
+ else
+ return NULL;
+ i = 0;
+ do {
+ size_t copy = min_t(size_t, len, 1 << ioat_chan->xfercap_log);
+
+ desc = ioat_get_ring_ent(ioat_chan, idx + i);
+ hw = desc->hw;
+
+ hw->size = copy;
+ hw->ctl = 0;
+ hw->src_addr = src;
+ hw->dst_addr = dst;
+
+ len -= copy;
+ dst += copy;
+ src += copy;
+ dump_desc_dbg(ioat_chan, desc);
+ } while (++i < num_descs);
+
+ desc->txd.flags = flags;
+ desc->len = total_len;
+ hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+ hw->ctl_f.compl_write = 1;
+ dump_desc_dbg(ioat_chan, desc);
+ /* we leave the channel locked to ensure in order submission */
+
+ return &desc->txd;
+}
+
+
+static struct dma_async_tx_descriptor *
+__ioat_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
+ dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
+ size_t len, unsigned long flags)
+{
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+ struct ioat_ring_ent *compl_desc;
+ struct ioat_ring_ent *desc;
+ struct ioat_ring_ent *ext;
+ size_t total_len = len;
+ struct ioat_xor_descriptor *xor;
+ struct ioat_xor_ext_descriptor *xor_ex = NULL;
+ struct ioat_dma_descriptor *hw;
+ int num_descs, with_ext, idx, i;
+ u32 offset = 0;
+ u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
+
+ BUG_ON(src_cnt < 2);
+
+ num_descs = ioat_xferlen_to_descs(ioat_chan, len);
+ /* we need 2x the number of descriptors to cover greater than 5
+ * sources
+ */
+ if (src_cnt > 5) {
+ with_ext = 1;
+ num_descs *= 2;
+ } else
+ with_ext = 0;
+
+ /* completion writes from the raid engine may pass completion
+ * writes from the legacy engine, so we need one extra null
+ * (legacy) descriptor to ensure all completion writes arrive in
+ * order.
+ */
+ if (likely(num_descs) &&
+ ioat_check_space_lock(ioat_chan, num_descs+1) == 0)
+ idx = ioat_chan->head;
+ else
+ return NULL;
+ i = 0;
+ do {
+ struct ioat_raw_descriptor *descs[2];
+ size_t xfer_size = min_t(size_t,
+ len, 1 << ioat_chan->xfercap_log);
+ int s;
+
+ desc = ioat_get_ring_ent(ioat_chan, idx + i);
+ xor = desc->xor;
+
+ /* save a branch by unconditionally retrieving the
+ * extended descriptor xor_set_src() knows to not write
+ * to it in the single descriptor case
+ */
+ ext = ioat_get_ring_ent(ioat_chan, idx + i + 1);
+ xor_ex = ext->xor_ex;
+
+ descs[0] = (struct ioat_raw_descriptor *) xor;
+ descs[1] = (struct ioat_raw_descriptor *) xor_ex;
+ for (s = 0; s < src_cnt; s++)
+ xor_set_src(descs, src[s], offset, s);
+ xor->size = xfer_size;
+ xor->dst_addr = dest + offset;
+ xor->ctl = 0;
+ xor->ctl_f.op = op;
+ xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
+
+ len -= xfer_size;
+ offset += xfer_size;
+ dump_desc_dbg(ioat_chan, desc);
+ } while ((i += 1 + with_ext) < num_descs);
+
+ /* last xor descriptor carries the unmap parameters and fence bit */
+ desc->txd.flags = flags;
+ desc->len = total_len;
+ if (result)
+ desc->result = result;
+ xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+
+ /* completion descriptor carries interrupt bit */
+ compl_desc = ioat_get_ring_ent(ioat_chan, idx + i);
+ compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
+ hw = compl_desc->hw;
+ hw->ctl = 0;
+ hw->ctl_f.null = 1;
+ hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ hw->ctl_f.compl_write = 1;
+ hw->size = NULL_DESC_BUFFER_SIZE;
+ dump_desc_dbg(ioat_chan, compl_desc);
+
+ /* we leave the channel locked to ensure in order submission */
+ return &compl_desc->txd;
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+ unsigned int src_cnt, size_t len, unsigned long flags)
+{
+ return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
+ unsigned int src_cnt, size_t len,
+ enum sum_check_flags *result, unsigned long flags)
+{
+ /* the cleanup routine only sets bits on validate failure, it
+ * does not clear bits on validate success... so clear it here
+ */
+ *result = 0;
+
+ return __ioat_prep_xor_lock(chan, result, src[0], &src[1],
+ src_cnt - 1, len, flags);
+}
+
+static void
+dump_pq_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc,
+ struct ioat_ring_ent *ext)
+{
+ struct device *dev = to_dev(ioat_chan);
+ struct ioat_pq_descriptor *pq = desc->pq;
+ struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
+ struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
+ int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
+ int i;
+
+ dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
+ " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
+ " src_cnt: %d)\n",
+ desc_id(desc), (unsigned long long) desc->txd.phys,
+ (unsigned long long) (pq_ex ? pq_ex->next : pq->next),
+ desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op,
+ pq->ctl_f.int_en, pq->ctl_f.compl_write,
+ pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
+ pq->ctl_f.src_cnt);
+ for (i = 0; i < src_cnt; i++)
+ dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
+ (unsigned long long) pq_get_src(descs, i), pq->coef[i]);
+ dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
+ dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
+ dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
+}
+
+static void dump_pq16_desc_dbg(struct ioatdma_chan *ioat_chan,
+ struct ioat_ring_ent *desc)
+{
+ struct device *dev = to_dev(ioat_chan);
+ struct ioat_pq_descriptor *pq = desc->pq;
+ struct ioat_raw_descriptor *descs[] = { (void *)pq,
+ (void *)pq,
+ (void *)pq };
+ int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
+ int i;
+
+ if (desc->sed) {
+ descs[1] = (void *)desc->sed->hw;
+ descs[2] = (void *)desc->sed->hw + 64;
+ }
+
+ dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
+ " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
+ " src_cnt: %d)\n",
+ desc_id(desc), (unsigned long long) desc->txd.phys,
+ (unsigned long long) pq->next,
+ desc->txd.flags, pq->size, pq->ctl,
+ pq->ctl_f.op, pq->ctl_f.int_en,
+ pq->ctl_f.compl_write,
+ pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
+ pq->ctl_f.src_cnt);
+ for (i = 0; i < src_cnt; i++) {
+ dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
+ (unsigned long long) pq16_get_src(descs, i),
+ pq->coef[i]);
+ }
+ dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
+ dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
+}
+
+static struct dma_async_tx_descriptor *
+__ioat_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
+ const dma_addr_t *dst, const dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf,
+ size_t len, unsigned long flags)
+{
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+ struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+ struct ioat_ring_ent *compl_desc;
+ struct ioat_ring_ent *desc;
+ struct ioat_ring_ent *ext;
+ size_t total_len = len;
+ struct ioat_pq_descriptor *pq;
+ struct ioat_pq_ext_descriptor *pq_ex = NULL;
+ struct ioat_dma_descriptor *hw;
+ u32 offset = 0;
+ u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
+ int i, s, idx, with_ext, num_descs;
+ int cb32 = (ioat_dma->version < IOAT_VER_3_3) ? 1 : 0;
+
+ dev_dbg(to_dev(ioat_chan), "%s\n", __func__);
+ /* the engine requires at least two sources (we provide
+ * at least 1 implied source in the DMA_PREP_CONTINUE case)
+ */
+ BUG_ON(src_cnt + dmaf_continue(flags) < 2);
+
+ num_descs = ioat_xferlen_to_descs(ioat_chan, len);
+ /* we need 2x the number of descriptors to cover greater than 3
+ * sources (we need 1 extra source in the q-only continuation
+ * case and 3 extra sources in the p+q continuation case.
+ */
+ if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
+ (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
+ with_ext = 1;
+ num_descs *= 2;
+ } else
+ with_ext = 0;
+
+ /* completion writes from the raid engine may pass completion
+ * writes from the legacy engine, so we need one extra null
+ * (legacy) descriptor to ensure all completion writes arrive in
+ * order.
+ */
+ if (likely(num_descs) &&
+ ioat_check_space_lock(ioat_chan, num_descs + cb32) == 0)
+ idx = ioat_chan->head;
+ else
+ return NULL;
+ i = 0;
+ do {
+ struct ioat_raw_descriptor *descs[2];
+ size_t xfer_size = min_t(size_t, len,
+ 1 << ioat_chan->xfercap_log);
+
+ desc = ioat_get_ring_ent(ioat_chan, idx + i);
+ pq = desc->pq;
+
+ /* save a branch by unconditionally retrieving the
+ * extended descriptor pq_set_src() knows to not write
+ * to it in the single descriptor case
+ */
+ ext = ioat_get_ring_ent(ioat_chan, idx + i + with_ext);
+ pq_ex = ext->pq_ex;
+
+ descs[0] = (struct ioat_raw_descriptor *) pq;
+ descs[1] = (struct ioat_raw_descriptor *) pq_ex;
+
+ for (s = 0; s < src_cnt; s++)
+ pq_set_src(descs, src[s], offset, scf[s], s);
+
+ /* see the comment for dma_maxpq in include/linux/dmaengine.h */
+ if (dmaf_p_disabled_continue(flags))
+ pq_set_src(descs, dst[1], offset, 1, s++);
+ else if (dmaf_continue(flags)) {
+ pq_set_src(descs, dst[0], offset, 0, s++);
+ pq_set_src(descs, dst[1], offset, 1, s++);
+ pq_set_src(descs, dst[1], offset, 0, s++);
+ }
+ pq->size = xfer_size;
+ pq->p_addr = dst[0] + offset;
+ pq->q_addr = dst[1] + offset;
+ pq->ctl = 0;
+ pq->ctl_f.op = op;
+ /* we turn on descriptor write back error status */
+ if (ioat_dma->cap & IOAT_CAP_DWBES)
+ pq->ctl_f.wb_en = result ? 1 : 0;
+ pq->ctl_f.src_cnt = src_cnt_to_hw(s);
+ pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
+ pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
+
+ len -= xfer_size;
+ offset += xfer_size;
+ } while ((i += 1 + with_ext) < num_descs);
+
+ /* last pq descriptor carries the unmap parameters and fence bit */
+ desc->txd.flags = flags;
+ desc->len = total_len;
+ if (result)
+ desc->result = result;
+ pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+ dump_pq_desc_dbg(ioat_chan, desc, ext);
+
+ if (!cb32) {
+ pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ pq->ctl_f.compl_write = 1;
+ compl_desc = desc;
+ } else {
+ /* completion descriptor carries interrupt bit */
+ compl_desc = ioat_get_ring_ent(ioat_chan, idx + i);
+ compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
+ hw = compl_desc->hw;
+ hw->ctl = 0;
+ hw->ctl_f.null = 1;
+ hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ hw->ctl_f.compl_write = 1;
+ hw->size = NULL_DESC_BUFFER_SIZE;
+ dump_desc_dbg(ioat_chan, compl_desc);
+ }
+
+
+ /* we leave the channel locked to ensure in order submission */
+ return &compl_desc->txd;
+}
+
+static struct dma_async_tx_descriptor *
+__ioat_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
+ const dma_addr_t *dst, const dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf,
+ size_t len, unsigned long flags)
+{
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+ struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+ struct ioat_ring_ent *desc;
+ size_t total_len = len;
+ struct ioat_pq_descriptor *pq;
+ u32 offset = 0;
+ u8 op;
+ int i, s, idx, num_descs;
+
+ /* this function is only called with 9-16 sources */
+ op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
+
+ dev_dbg(to_dev(ioat_chan), "%s\n", __func__);
+
+ num_descs = ioat_xferlen_to_descs(ioat_chan, len);
+
+ /*
+ * 16 source pq is only available on cb3.3 and has no completion
+ * write hw bug.
+ */
+ if (num_descs && ioat_check_space_lock(ioat_chan, num_descs) == 0)
+ idx = ioat_chan->head;
+ else
+ return NULL;
+
+ i = 0;
+
+ do {
+ struct ioat_raw_descriptor *descs[4];
+ size_t xfer_size = min_t(size_t, len,
+ 1 << ioat_chan->xfercap_log);
+
+ desc = ioat_get_ring_ent(ioat_chan, idx + i);
+ pq = desc->pq;
+
+ descs[0] = (struct ioat_raw_descriptor *) pq;
+
+ desc->sed = ioat3_alloc_sed(ioat_dma, (src_cnt-2) >> 3);
+ if (!desc->sed) {
+ dev_err(to_dev(ioat_chan),
+ "%s: no free sed entries\n", __func__);
+ return NULL;
+ }
+
+ pq->sed_addr = desc->sed->dma;
+ desc->sed->parent = desc;
+
+ descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
+ descs[2] = (void *)descs[1] + 64;
+
+ for (s = 0; s < src_cnt; s++)
+ pq16_set_src(descs, src[s], offset, scf[s], s);
+
+ /* see the comment for dma_maxpq in include/linux/dmaengine.h */
+ if (dmaf_p_disabled_continue(flags))
+ pq16_set_src(descs, dst[1], offset, 1, s++);
+ else if (dmaf_continue(flags)) {
+ pq16_set_src(descs, dst[0], offset, 0, s++);
+ pq16_set_src(descs, dst[1], offset, 1, s++);
+ pq16_set_src(descs, dst[1], offset, 0, s++);
+ }
+
+ pq->size = xfer_size;
+ pq->p_addr = dst[0] + offset;
+ pq->q_addr = dst[1] + offset;
+ pq->ctl = 0;
+ pq->ctl_f.op = op;
+ pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
+ /* we turn on descriptor write back error status */
+ if (ioat_dma->cap & IOAT_CAP_DWBES)
+ pq->ctl_f.wb_en = result ? 1 : 0;
+ pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
+ pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
+
+ len -= xfer_size;
+ offset += xfer_size;
+ } while (++i < num_descs);
+
+ /* last pq descriptor carries the unmap parameters and fence bit */
+ desc->txd.flags = flags;
+ desc->len = total_len;
+ if (result)
+ desc->result = result;
+ pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+
+ /* with cb3.3 we should be able to do completion w/o a null desc */
+ pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ pq->ctl_f.compl_write = 1;
+
+ dump_pq16_desc_dbg(ioat_chan, desc);
+
+ /* we leave the channel locked to ensure in order submission */
+ return &desc->txd;
+}
+
+static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
+{
+ if (dmaf_p_disabled_continue(flags))
+ return src_cnt + 1;
+ else if (dmaf_continue(flags))
+ return src_cnt + 3;
+ else
+ return src_cnt;
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ unsigned long flags)
+{
+ /* specify valid address for disabled result */
+ if (flags & DMA_PREP_PQ_DISABLE_P)
+ dst[0] = dst[1];
+ if (flags & DMA_PREP_PQ_DISABLE_Q)
+ dst[1] = dst[0];
+
+ /* handle the single source multiply case from the raid6
+ * recovery path
+ */
+ if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
+ dma_addr_t single_source[2];
+ unsigned char single_source_coef[2];
+
+ BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
+ single_source[0] = src[0];
+ single_source[1] = src[0];
+ single_source_coef[0] = scf[0];
+ single_source_coef[1] = 0;
+
+ return src_cnt_flags(src_cnt, flags) > 8 ?
+ __ioat_prep_pq16_lock(chan, NULL, dst, single_source,
+ 2, single_source_coef, len,
+ flags) :
+ __ioat_prep_pq_lock(chan, NULL, dst, single_source, 2,
+ single_source_coef, len, flags);
+
+ } else {
+ return src_cnt_flags(src_cnt, flags) > 8 ?
+ __ioat_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
+ scf, len, flags) :
+ __ioat_prep_pq_lock(chan, NULL, dst, src, src_cnt,
+ scf, len, flags);
+ }
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ enum sum_check_flags *pqres, unsigned long flags)
+{
+ /* specify valid address for disabled result */
+ if (flags & DMA_PREP_PQ_DISABLE_P)
+ pq[0] = pq[1];
+ if (flags & DMA_PREP_PQ_DISABLE_Q)
+ pq[1] = pq[0];
+
+ /* the cleanup routine only sets bits on validate failure, it
+ * does not clear bits on validate success... so clear it here
+ */
+ *pqres = 0;
+
+ return src_cnt_flags(src_cnt, flags) > 8 ?
+ __ioat_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
+ flags) :
+ __ioat_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
+ flags);
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
+ unsigned int src_cnt, size_t len, unsigned long flags)
+{
+ unsigned char scf[MAX_SCF];
+ dma_addr_t pq[2];
+
+ if (src_cnt > MAX_SCF)
+ return NULL;
+
+ memset(scf, 0, src_cnt);
+ pq[0] = dst;
+ flags |= DMA_PREP_PQ_DISABLE_Q;
+ pq[1] = dst; /* specify valid address for disabled result */
+
+ return src_cnt_flags(src_cnt, flags) > 8 ?
+ __ioat_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
+ flags) :
+ __ioat_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
+ flags);
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
+ unsigned int src_cnt, size_t len,
+ enum sum_check_flags *result, unsigned long flags)
+{
+ unsigned char scf[MAX_SCF];
+ dma_addr_t pq[2];
+
+ if (src_cnt > MAX_SCF)
+ return NULL;
+
+ /* the cleanup routine only sets bits on validate failure, it
+ * does not clear bits on validate success... so clear it here
+ */
+ *result = 0;
+
+ memset(scf, 0, src_cnt);
+ pq[0] = src[0];
+ flags |= DMA_PREP_PQ_DISABLE_Q;
+ pq[1] = pq[0]; /* specify valid address for disabled result */
+
+ return src_cnt_flags(src_cnt, flags) > 8 ?
+ __ioat_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
+ scf, len, flags) :
+ __ioat_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
+ scf, len, flags);
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
+{
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+ struct ioat_ring_ent *desc;
+ struct ioat_dma_descriptor *hw;
+
+ if (ioat_check_space_lock(ioat_chan, 1) == 0)
+ desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
+ else
+ return NULL;
+
+ hw = desc->hw;
+ hw->ctl = 0;
+ hw->ctl_f.null = 1;
+ hw->ctl_f.int_en = 1;
+ hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+ hw->ctl_f.compl_write = 1;
+ hw->size = NULL_DESC_BUFFER_SIZE;
+ hw->src_addr = 0;
+ hw->dst_addr = 0;
+
+ desc->txd.flags = flags;
+ desc->len = 1;
+
+ dump_desc_dbg(ioat_chan, desc);
+
+ /* we leave the channel locked to ensure in order submission */
+ return &desc->txd;
+}
+
diff --git a/drivers/dma/ioat/sysfs.c b/drivers/dma/ioat/sysfs.c
new file mode 100644
index 000000000000..cb4a857ee21b
--- /dev/null
+++ b/drivers/dma/ioat/sysfs.c
@@ -0,0 +1,135 @@
+/*
+ * Intel I/OAT DMA Linux driver
+ * Copyright(c) 2004 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/pci.h>
+#include "dma.h"
+#include "registers.h"
+#include "hw.h"
+
+#include "../dmaengine.h"
+
+static ssize_t cap_show(struct dma_chan *c, char *page)
+{
+ struct dma_device *dma = c->device;
+
+ return sprintf(page, "copy%s%s%s%s%s\n",
+ dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
+ dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
+ dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
+ dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
+ dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
+
+}
+struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
+
+static ssize_t version_show(struct dma_chan *c, char *page)
+{
+ struct dma_device *dma = c->device;
+ struct ioatdma_device *ioat_dma = to_ioatdma_device(dma);
+
+ return sprintf(page, "%d.%d\n",
+ ioat_dma->version >> 4, ioat_dma->version & 0xf);
+}
+struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
+
+static ssize_t
+ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+ struct ioat_sysfs_entry *entry;
+ struct ioatdma_chan *ioat_chan;
+
+ entry = container_of(attr, struct ioat_sysfs_entry, attr);
+ ioat_chan = container_of(kobj, struct ioatdma_chan, kobj);
+
+ if (!entry->show)
+ return -EIO;
+ return entry->show(&ioat_chan->dma_chan, page);
+}
+
+const struct sysfs_ops ioat_sysfs_ops = {
+ .show = ioat_attr_show,
+};
+
+void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type)
+{
+ struct dma_device *dma = &ioat_dma->dma_dev;
+ struct dma_chan *c;
+
+ list_for_each_entry(c, &dma->channels, device_node) {
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+ struct kobject *parent = &c->dev->device.kobj;
+ int err;
+
+ err = kobject_init_and_add(&ioat_chan->kobj, type,
+ parent, "quickdata");
+ if (err) {
+ dev_warn(to_dev(ioat_chan),
+ "sysfs init error (%d), continuing...\n", err);
+ kobject_put(&ioat_chan->kobj);
+ set_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state);
+ }
+ }
+}
+
+void ioat_kobject_del(struct ioatdma_device *ioat_dma)
+{
+ struct dma_device *dma = &ioat_dma->dma_dev;
+ struct dma_chan *c;
+
+ list_for_each_entry(c, &dma->channels, device_node) {
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+
+ if (!test_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state)) {
+ kobject_del(&ioat_chan->kobj);
+ kobject_put(&ioat_chan->kobj);
+ }
+ }
+}
+
+static ssize_t ring_size_show(struct dma_chan *c, char *page)
+{
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+
+ return sprintf(page, "%d\n", (1 << ioat_chan->alloc_order) & ~1);
+}
+static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
+
+static ssize_t ring_active_show(struct dma_chan *c, char *page)
+{
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+
+ /* ...taken outside the lock, no need to be precise */
+ return sprintf(page, "%d\n", ioat_ring_active(ioat_chan));
+}
+static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
+
+static struct attribute *ioat_attrs[] = {
+ &ring_size_attr.attr,
+ &ring_active_attr.attr,
+ &ioat_cap_attr.attr,
+ &ioat_version_attr.attr,
+ NULL,
+};
+
+struct kobj_type ioat_ktype = {
+ .sysfs_ops = &ioat_sysfs_ops,
+ .default_attrs = ioat_attrs,
+};
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 998826854fdd..e4f43125e0fb 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1300,10 +1300,11 @@ static int iop_adma_probe(struct platform_device *pdev)
* note: writecombine gives slightly better performance, but
* requires that we explicitly flush the writes
*/
- if ((adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
- plat_data->pool_size,
- &adev->dma_desc_pool,
- GFP_KERNEL)) == NULL) {
+ adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
+ plat_data->pool_size,
+ &adev->dma_desc_pool,
+ GFP_KERNEL);
+ if (!adev->dma_desc_pool_virt) {
ret = -ENOMEM;
goto err_free_adev;
}
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index 2e284a4438bc..4768a829253a 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -265,10 +265,10 @@ int ipu_irq_unmap(unsigned int source)
return ret;
}
-/* Chained IRQ handler for IPU error interrupt */
-static void ipu_irq_err(unsigned int irq, struct irq_desc *desc)
+/* Chained IRQ handler for IPU function and error interrupt */
+static void ipu_irq_handler(unsigned int __irq, struct irq_desc *desc)
{
- struct ipu *ipu = irq_get_handler_data(irq);
+ struct ipu *ipu = irq_desc_get_handler_data(desc);
u32 status;
int i, line;
@@ -286,43 +286,7 @@ static void ipu_irq_err(unsigned int irq, struct irq_desc *desc)
raw_spin_unlock(&bank_lock);
while ((line = ffs(status))) {
struct ipu_irq_map *map;
-
- line--;
- status &= ~(1UL << line);
-
- raw_spin_lock(&bank_lock);
- map = src2map(32 * i + line);
- if (map)
- irq = map->irq;
- raw_spin_unlock(&bank_lock);
-
- if (!map) {
- pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
- line, i);
- continue;
- }
- generic_handle_irq(irq);
- }
- }
-}
-
-/* Chained IRQ handler for IPU function interrupt */
-static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc)
-{
- struct ipu *ipu = irq_desc_get_handler_data(desc);
- u32 status;
- int i, line;
-
- for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) {
- struct ipu_irq_bank *bank = irq_bank + i;
-
- raw_spin_lock(&bank_lock);
- status = ipu_read_reg(ipu, bank->status);
- /* Not clearing all interrupts, see above */
- status &= ipu_read_reg(ipu, bank->control);
- raw_spin_unlock(&bank_lock);
- while ((line = ffs(status))) {
- struct ipu_irq_map *map;
+ unsigned int irq = NO_IRQ;
line--;
status &= ~(1UL << line);
@@ -377,16 +341,12 @@ int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
irq_map[i].irq = irq;
irq_map[i].source = -EINVAL;
irq_set_handler(irq, handle_level_irq);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-#endif
+ irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
}
- irq_set_handler_data(ipu->irq_fn, ipu);
- irq_set_chained_handler(ipu->irq_fn, ipu_irq_fn);
+ irq_set_chained_handler_and_data(ipu->irq_fn, ipu_irq_handler, ipu);
- irq_set_handler_data(ipu->irq_err, ipu);
- irq_set_chained_handler(ipu->irq_err, ipu_irq_err);
+ irq_set_chained_handler_and_data(ipu->irq_err, ipu_irq_handler, ipu);
ipu->irq_base = irq_base;
@@ -399,16 +359,12 @@ void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev)
irq_base = ipu->irq_base;
- irq_set_chained_handler(ipu->irq_fn, NULL);
- irq_set_handler_data(ipu->irq_fn, NULL);
+ irq_set_chained_handler_and_data(ipu->irq_fn, NULL, NULL);
- irq_set_chained_handler(ipu->irq_err, NULL);
- irq_set_handler_data(ipu->irq_err, NULL);
+ irq_set_chained_handler_and_data(ipu->irq_err, NULL, NULL);
for (irq = irq_base; irq < irq_base + CONFIG_MX3_IPU_IRQS; irq++) {
-#ifdef CONFIG_ARM
- set_irq_flags(irq, 0);
-#endif
+ irq_set_status_flags(irq, IRQ_NOREQUEST);
irq_set_chip(irq, NULL);
irq_set_chip_data(irq, NULL);
}
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 647e362f01fd..1ba2fd73852d 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -24,7 +24,6 @@
#include "virt-dma.h"
#define DRIVER_NAME "k3-dma"
-#define DMA_ALIGN 3
#define DMA_MAX_SIZE 0x1ffc
#define INT_STAT 0x00
@@ -732,7 +731,7 @@ static int k3_dma_probe(struct platform_device *op)
d->slave.device_pause = k3_dma_transfer_pause;
d->slave.device_resume = k3_dma_transfer_resume;
d->slave.device_terminate_all = k3_dma_terminate_all;
- d->slave.copy_align = DMA_ALIGN;
+ d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
/* init virtual channel */
d->chans = devm_kzalloc(&op->dev,
diff --git a/drivers/dma/lpc18xx-dmamux.c b/drivers/dma/lpc18xx-dmamux.c
new file mode 100644
index 000000000000..761f32687055
--- /dev/null
+++ b/drivers/dma/lpc18xx-dmamux.c
@@ -0,0 +1,183 @@
+/*
+ * DMA Router driver for LPC18xx/43xx DMA MUX
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * Based on TI DMA Crossbar driver by:
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+/* CREG register offset and macros for mux manipulation */
+#define LPC18XX_CREG_DMAMUX 0x11c
+#define LPC18XX_DMAMUX_VAL(v, n) ((v) << (n * 2))
+#define LPC18XX_DMAMUX_MASK(n) (0x3 << (n * 2))
+#define LPC18XX_DMAMUX_MAX_VAL 0x3
+
+struct lpc18xx_dmamux {
+ u32 value;
+ bool busy;
+};
+
+struct lpc18xx_dmamux_data {
+ struct dma_router dmarouter;
+ struct lpc18xx_dmamux *muxes;
+ u32 dma_master_requests;
+ u32 dma_mux_requests;
+ struct regmap *reg;
+ spinlock_t lock;
+};
+
+static void lpc18xx_dmamux_free(struct device *dev, void *route_data)
+{
+ struct lpc18xx_dmamux_data *dmamux = dev_get_drvdata(dev);
+ struct lpc18xx_dmamux *mux = route_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dmamux->lock, flags);
+ mux->busy = false;
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+}
+
+static void *lpc18xx_dmamux_reserve(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
+ struct lpc18xx_dmamux_data *dmamux = platform_get_drvdata(pdev);
+ unsigned long flags;
+ unsigned mux;
+
+ if (dma_spec->args_count != 3) {
+ dev_err(&pdev->dev, "invalid number of dma mux args\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ mux = dma_spec->args[0];
+ if (mux >= dmamux->dma_master_requests) {
+ dev_err(&pdev->dev, "invalid mux number: %d\n",
+ dma_spec->args[0]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (dma_spec->args[1] > LPC18XX_DMAMUX_MAX_VAL) {
+ dev_err(&pdev->dev, "invalid dma mux value: %d\n",
+ dma_spec->args[1]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* The of_node_put() will be done in the core for the node */
+ dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
+ if (!dma_spec->np) {
+ dev_err(&pdev->dev, "can't get dma master\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ spin_lock_irqsave(&dmamux->lock, flags);
+ if (dmamux->muxes[mux].busy) {
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+ dev_err(&pdev->dev, "dma request %u busy with %u.%u\n",
+ mux, mux, dmamux->muxes[mux].value);
+ of_node_put(dma_spec->np);
+ return ERR_PTR(-EBUSY);
+ }
+
+ dmamux->muxes[mux].busy = true;
+ dmamux->muxes[mux].value = dma_spec->args[1];
+
+ regmap_update_bits(dmamux->reg, LPC18XX_CREG_DMAMUX,
+ LPC18XX_DMAMUX_MASK(mux),
+ LPC18XX_DMAMUX_VAL(dmamux->muxes[mux].value, mux));
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+
+ dma_spec->args[1] = dma_spec->args[2];
+ dma_spec->args_count = 2;
+
+ dev_dbg(&pdev->dev, "mapping dmamux %u.%u to dma request %u\n", mux,
+ dmamux->muxes[mux].value, mux);
+
+ return &dmamux->muxes[mux];
+}
+
+static int lpc18xx_dmamux_probe(struct platform_device *pdev)
+{
+ struct device_node *dma_np, *np = pdev->dev.of_node;
+ struct lpc18xx_dmamux_data *dmamux;
+ int ret;
+
+ dmamux = devm_kzalloc(&pdev->dev, sizeof(*dmamux), GFP_KERNEL);
+ if (!dmamux)
+ return -ENOMEM;
+
+ dmamux->reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
+ if (IS_ERR(dmamux->reg)) {
+ dev_err(&pdev->dev, "syscon lookup failed\n");
+ return PTR_ERR(dmamux->reg);
+ }
+
+ ret = of_property_read_u32(np, "dma-requests",
+ &dmamux->dma_mux_requests);
+ if (ret) {
+ dev_err(&pdev->dev, "missing dma-requests property\n");
+ return ret;
+ }
+
+ dma_np = of_parse_phandle(np, "dma-masters", 0);
+ if (!dma_np) {
+ dev_err(&pdev->dev, "can't get dma master\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(dma_np, "dma-requests",
+ &dmamux->dma_master_requests);
+ of_node_put(dma_np);
+ if (ret) {
+ dev_err(&pdev->dev, "missing master dma-requests property\n");
+ return ret;
+ }
+
+ dmamux->muxes = devm_kcalloc(&pdev->dev, dmamux->dma_master_requests,
+ sizeof(struct lpc18xx_dmamux),
+ GFP_KERNEL);
+ if (!dmamux->muxes)
+ return -ENOMEM;
+
+ spin_lock_init(&dmamux->lock);
+ platform_set_drvdata(pdev, dmamux);
+ dmamux->dmarouter.dev = &pdev->dev;
+ dmamux->dmarouter.route_free = lpc18xx_dmamux_free;
+
+ return of_dma_router_register(np, lpc18xx_dmamux_reserve,
+ &dmamux->dmarouter);
+}
+
+static const struct of_device_id lpc18xx_dmamux_match[] = {
+ { .compatible = "nxp,lpc1850-dmamux" },
+ {},
+};
+
+static struct platform_driver lpc18xx_dmamux_driver = {
+ .probe = lpc18xx_dmamux_probe,
+ .driver = {
+ .name = "lpc18xx-dmamux",
+ .of_match_table = lpc18xx_dmamux_match,
+ },
+};
+
+static int __init lpc18xx_dmamux_init(void)
+{
+ return platform_driver_register(&lpc18xx_dmamux_driver);
+}
+arch_initcall(lpc18xx_dmamux_init);
diff --git a/drivers/dma/mic_x100_dma.h b/drivers/dma/mic_x100_dma.h
index f663b0bdd11d..d89982034e68 100644
--- a/drivers/dma/mic_x100_dma.h
+++ b/drivers/dma/mic_x100_dma.h
@@ -39,7 +39,7 @@
*/
#define MIC_DMA_MAX_NUM_CHAN 8
#define MIC_DMA_NUM_CHAN 4
-#define MIC_DMA_ALIGN_SHIFT 6
+#define MIC_DMA_ALIGN_SHIFT DMAENGINE_ALIGN_64_BYTES
#define MIC_DMA_ALIGN_BYTES (1 << MIC_DMA_ALIGN_SHIFT)
#define MIC_DMA_DESC_RX_SIZE (128 * 1024 - 4)
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 462a0229a743..e39457f13d4d 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -72,7 +72,6 @@
#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
-#define PDMA_ALIGNMENT 3
#define PDMA_MAX_DESC_BYTES DCMD_LENGTH
struct mmp_pdma_desc_hw {
@@ -1071,7 +1070,7 @@ static int mmp_pdma_probe(struct platform_device *op)
pdev->device.device_issue_pending = mmp_pdma_issue_pending;
pdev->device.device_config = mmp_pdma_config;
pdev->device.device_terminate_all = mmp_pdma_terminate_all;
- pdev->device.copy_align = PDMA_ALIGNMENT;
+ pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
pdev->device.src_addr_widths = widths;
pdev->device.dst_addr_widths = widths;
pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index e683761e0f8f..3df0422607d5 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -100,7 +100,6 @@ enum mmp_tdma_type {
PXA910_SQU,
};
-#define TDMA_ALIGNMENT 3
#define TDMA_MAX_XFER_BYTES SZ_64K
struct mmp_tdma_chan {
@@ -695,7 +694,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
tdev->device.device_pause = mmp_tdma_pause_chan;
tdev->device.device_resume = mmp_tdma_resume_chan;
tdev->device.device_terminate_all = mmp_tdma_terminate_all;
- tdev->device.copy_align = TDMA_ALIGNMENT;
+ tdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
platform_set_drvdata(pdev, tdev);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index fbaf1ead2597..1c2de9a834a9 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -13,7 +13,6 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
@@ -26,6 +25,7 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/irqdomain.h>
+#include <linux/cpumask.h>
#include <linux/platform_data/dma-mv_xor.h>
#include "dmaengine.h"
@@ -162,10 +162,11 @@ static void mv_chan_set_mode(struct mv_xor_chan *chan,
config &= ~0x7;
config |= op_mode;
- if (IS_ENABLED(__BIG_ENDIAN))
- config |= XOR_DESCRIPTOR_SWAP;
- else
- config &= ~XOR_DESCRIPTOR_SWAP;
+#if defined(__BIG_ENDIAN)
+ config |= XOR_DESCRIPTOR_SWAP;
+#else
+ config &= ~XOR_DESCRIPTOR_SWAP;
+#endif
writel_relaxed(config, XOR_CONFIG(chan));
chan->current_type = type;
@@ -1125,7 +1126,8 @@ static const struct of_device_id mv_xor_dt_ids[] = {
{ .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC },
{},
};
-MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
+
+static unsigned int mv_xor_engine_count;
static int mv_xor_probe(struct platform_device *pdev)
{
@@ -1133,6 +1135,7 @@ static int mv_xor_probe(struct platform_device *pdev)
struct mv_xor_device *xordev;
struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *res;
+ unsigned int max_engines, max_channels;
int i, ret;
int op_in_desc;
@@ -1176,6 +1179,21 @@ static int mv_xor_probe(struct platform_device *pdev)
if (!IS_ERR(xordev->clk))
clk_prepare_enable(xordev->clk);
+ /*
+ * We don't want to have more than one channel per CPU in
+ * order for async_tx to perform well. So we limit the number
+ * of engines and channels so that we take into account this
+ * constraint. Note that we also want to use channels from
+ * separate engines when possible.
+ */
+ max_engines = num_present_cpus();
+ max_channels = min_t(unsigned int,
+ MV_XOR_MAX_CHANNELS,
+ DIV_ROUND_UP(num_present_cpus(), 2));
+
+ if (mv_xor_engine_count >= max_engines)
+ return 0;
+
if (pdev->dev.of_node) {
struct device_node *np;
int i = 0;
@@ -1189,13 +1207,13 @@ static int mv_xor_probe(struct platform_device *pdev)
int irq;
op_in_desc = (int)of_id->data;
+ if (i >= max_channels)
+ continue;
+
dma_cap_zero(cap_mask);
- if (of_property_read_bool(np, "dmacap,memcpy"))
- dma_cap_set(DMA_MEMCPY, cap_mask);
- if (of_property_read_bool(np, "dmacap,xor"))
- dma_cap_set(DMA_XOR, cap_mask);
- if (of_property_read_bool(np, "dmacap,interrupt"))
- dma_cap_set(DMA_INTERRUPT, cap_mask);
+ dma_cap_set(DMA_MEMCPY, cap_mask);
+ dma_cap_set(DMA_XOR, cap_mask);
+ dma_cap_set(DMA_INTERRUPT, cap_mask);
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
@@ -1215,7 +1233,7 @@ static int mv_xor_probe(struct platform_device *pdev)
i++;
}
} else if (pdata && pdata->channels) {
- for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
+ for (i = 0; i < max_channels; i++) {
struct mv_xor_channel_data *cd;
struct mv_xor_chan *chan;
int irq;
@@ -1262,27 +1280,8 @@ err_channel_add:
return ret;
}
-static int mv_xor_remove(struct platform_device *pdev)
-{
- struct mv_xor_device *xordev = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
- if (xordev->channels[i])
- mv_xor_channel_remove(xordev->channels[i]);
- }
-
- if (!IS_ERR(xordev->clk)) {
- clk_disable_unprepare(xordev->clk);
- clk_put(xordev->clk);
- }
-
- return 0;
-}
-
static struct platform_driver mv_xor_driver = {
.probe = mv_xor_probe,
- .remove = mv_xor_remove,
.driver = {
.name = MV_XOR_NAME,
.of_match_table = of_match_ptr(mv_xor_dt_ids),
@@ -1294,19 +1293,10 @@ static int __init mv_xor_init(void)
{
return platform_driver_register(&mv_xor_driver);
}
-module_init(mv_xor_init);
-
-/* it's currently unsafe to unload this module */
-#if 0
-static void __exit mv_xor_exit(void)
-{
- platform_driver_unregister(&mv_xor_driver);
- return;
-}
-
-module_exit(mv_xor_exit);
-#endif
+device_initcall(mv_xor_init);
+/*
MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
MODULE_LICENSE("GPL");
+*/
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index b859792dde95..113605f6fe20 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/dmaengine.h>
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index f513f77b1d85..17ee758b419f 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1198,6 +1198,9 @@ static inline int _loop(unsigned dry_run, u8 buf[],
unsigned lcnt0, lcnt1, ljmp0, ljmp1;
struct _arg_LPEND lpend;
+ if (*bursts == 1)
+ return _bursts(dry_run, buf, pxs, 1);
+
/* Max iterations possible in DMALP is 256 */
if (*bursts >= 256*256) {
lcnt1 = 256;
@@ -2328,7 +2331,7 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
desc->txd.callback = last->txd.callback;
desc->txd.callback_param = last->txd.callback_param;
}
- last->last = false;
+ desc->last = false;
dma_cookie_assign(&desc->txd);
@@ -2623,6 +2626,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
desc->rqcfg.brst_len = 1;
desc->rqcfg.brst_len = get_burst_len(desc, len);
+ desc->bytes_requested = len;
desc->txd.flags = flags;
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index ddcbbf5cd9e9..5cb61ce01036 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -184,19 +184,18 @@ static unsigned int pxad_drcmr(unsigned int line)
static int dbg_show_requester_chan(struct seq_file *s, void *p)
{
- int pos = 0;
struct pxad_phy *phy = s->private;
int i;
u32 drcmr;
- pos += seq_printf(s, "DMA channel %d requester :\n", phy->idx);
+ seq_printf(s, "DMA channel %d requester :\n", phy->idx);
for (i = 0; i < 70; i++) {
drcmr = readl_relaxed(phy->base + pxad_drcmr(i));
if ((drcmr & DRCMR_CHLNUM) == phy->idx)
- pos += seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
- !!(drcmr & DRCMR_MAPVLD));
+ seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
+ !!(drcmr & DRCMR_MAPVLD));
}
- return pos;
+ return 0;
}
static inline int dbg_burst_from_dcmd(u32 dcmd)
@@ -906,21 +905,21 @@ static void pxad_get_config(struct pxad_chan *chan,
enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
*dcmd = 0;
- if (chan->cfg.direction == DMA_DEV_TO_MEM) {
+ if (dir == DMA_DEV_TO_MEM) {
maxburst = chan->cfg.src_maxburst;
width = chan->cfg.src_addr_width;
dev_addr = chan->cfg.src_addr;
*dev_src = dev_addr;
*dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC;
}
- if (chan->cfg.direction == DMA_MEM_TO_DEV) {
+ if (dir == DMA_MEM_TO_DEV) {
maxburst = chan->cfg.dst_maxburst;
width = chan->cfg.dst_addr_width;
dev_addr = chan->cfg.dst_addr;
*dev_dst = dev_addr;
*dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG;
}
- if (chan->cfg.direction == DMA_MEM_TO_MEM)
+ if (dir == DMA_MEM_TO_MEM)
*dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
PXA_DCMD_INCSRCADDR;
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 0f371524a4d9..9fda65af841e 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -39,18 +39,6 @@ config SH_DMAE_R8A73A4
endif
-config SUDMAC
- tristate "Renesas SUDMAC support"
- depends on SH_DMAE_BASE
- help
- Enable support for the Renesas SUDMAC controllers.
-
-config RCAR_HPB_DMAE
- tristate "Renesas R-Car HPB DMAC support"
- depends on SH_DMAE_BASE
- help
- Enable support for the Renesas R-Car series DMA controllers.
-
config RCAR_DMAC
tristate "Renesas R-Car Gen2 DMA Controller"
depends on ARCH_SHMOBILE || COMPILE_TEST
@@ -59,6 +47,12 @@ config RCAR_DMAC
This driver supports the general purpose DMA controller found in the
Renesas R-Car second generation SoCs.
+config RCAR_HPB_DMAE
+ tristate "Renesas R-Car HPB DMAC support"
+ depends on SH_DMAE_BASE
+ help
+ Enable support for the Renesas R-Car series DMA controllers.
+
config RENESAS_USB_DMAC
tristate "Renesas USB-DMA Controller"
depends on ARCH_SHMOBILE || COMPILE_TEST
@@ -67,3 +61,9 @@ config RENESAS_USB_DMAC
help
This driver supports the USB-DMA controller found in the Renesas
SoCs.
+
+config SUDMAC
+ tristate "Renesas SUDMAC support"
+ depends on SH_DMAE_BASE
+ help
+ Enable support for the Renesas SUDMAC controllers.
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index b8a598066ce2..0133e4658196 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -13,7 +13,7 @@ shdma-$(CONFIG_SH_DMAE_R8A73A4) += shdma-r8a73a4.o
shdma-objs := $(shdma-y)
obj-$(CONFIG_SH_DMAE) += shdma.o
-obj-$(CONFIG_SUDMAC) += sudmac.o
-obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
+obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o
+obj-$(CONFIG_SUDMAC) += sudmac.o
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 8c5186cc9f63..7d5598d874e1 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -455,6 +455,7 @@ static int sirfsoc_dma_terminate_all(struct dma_chan *chan)
switch (sdma->type) {
case SIRFSOC_DMA_VER_A7V1:
writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR);
+ writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_INT);
writel_relaxed((1 << cid) | 1 << (cid + 16),
sdma->base +
SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7);
@@ -462,6 +463,8 @@ static int sirfsoc_dma_terminate_all(struct dma_chan *chan)
break;
case SIRFSOC_DMA_VER_A7V2:
writel_relaxed(0, sdma->base + SIRFSOC_DMA_INT_EN_ATLAS7);
+ writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7,
+ sdma->base + SIRFSOC_DMA_INT_ATLAS7);
writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
writel_relaxed(0, sdma->base + SIRFSOC_DMA_VALID_ATLAS7);
break;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 3c10f034d4b9..750d1b313684 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2853,7 +2853,7 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
* This controller can only access address at even
* 32bit boundaries, i.e. 2^2
*/
- dev->copy_align = 2;
+ dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
}
if (dma_has_cap(DMA_SG, dev->cap_mask))
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
new file mode 100644
index 000000000000..a1a500d96ff2
--- /dev/null
+++ b/drivers/dma/sun4i-dma.c
@@ -0,0 +1,1288 @@
+/*
+ * Copyright (C) 2014 Emilio López
+ * Emilio López <emilio@elopez.com.ar>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "virt-dma.h"
+
+/** Common macros to normal and dedicated DMA registers **/
+
+#define SUN4I_DMA_CFG_LOADING BIT(31)
+#define SUN4I_DMA_CFG_DST_DATA_WIDTH(width) ((width) << 25)
+#define SUN4I_DMA_CFG_DST_BURST_LENGTH(len) ((len) << 23)
+#define SUN4I_DMA_CFG_DST_ADDR_MODE(mode) ((mode) << 21)
+#define SUN4I_DMA_CFG_DST_DRQ_TYPE(type) ((type) << 16)
+#define SUN4I_DMA_CFG_SRC_DATA_WIDTH(width) ((width) << 9)
+#define SUN4I_DMA_CFG_SRC_BURST_LENGTH(len) ((len) << 7)
+#define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode) ((mode) << 5)
+#define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type) (type)
+
+/** Normal DMA register values **/
+
+/* Normal DMA source/destination data request type values */
+#define SUN4I_NDMA_DRQ_TYPE_SDRAM 0x16
+#define SUN4I_NDMA_DRQ_TYPE_LIMIT (0x1F + 1)
+
+/** Normal DMA register layout **/
+
+/* Dedicated DMA source/destination address mode values */
+#define SUN4I_NDMA_ADDR_MODE_LINEAR 0
+#define SUN4I_NDMA_ADDR_MODE_IO 1
+
+/* Normal DMA configuration register layout */
+#define SUN4I_NDMA_CFG_CONT_MODE BIT(30)
+#define SUN4I_NDMA_CFG_WAIT_STATE(n) ((n) << 27)
+#define SUN4I_NDMA_CFG_DST_NON_SECURE BIT(22)
+#define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15)
+#define SUN4I_NDMA_CFG_SRC_NON_SECURE BIT(6)
+
+/** Dedicated DMA register values **/
+
+/* Dedicated DMA source/destination address mode values */
+#define SUN4I_DDMA_ADDR_MODE_LINEAR 0
+#define SUN4I_DDMA_ADDR_MODE_IO 1
+#define SUN4I_DDMA_ADDR_MODE_HORIZONTAL_PAGE 2
+#define SUN4I_DDMA_ADDR_MODE_VERTICAL_PAGE 3
+
+/* Dedicated DMA source/destination data request type values */
+#define SUN4I_DDMA_DRQ_TYPE_SDRAM 0x1
+#define SUN4I_DDMA_DRQ_TYPE_LIMIT (0x1F + 1)
+
+/** Dedicated DMA register layout **/
+
+/* Dedicated DMA configuration register layout */
+#define SUN4I_DDMA_CFG_BUSY BIT(30)
+#define SUN4I_DDMA_CFG_CONT_MODE BIT(29)
+#define SUN4I_DDMA_CFG_DST_NON_SECURE BIT(28)
+#define SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15)
+#define SUN4I_DDMA_CFG_SRC_NON_SECURE BIT(12)
+
+/* Dedicated DMA parameter register layout */
+#define SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(n) (((n) - 1) << 24)
+#define SUN4I_DDMA_PARA_DST_WAIT_CYCLES(n) (((n) - 1) << 16)
+#define SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(n) (((n) - 1) << 8)
+#define SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(n) (((n) - 1) << 0)
+
+/** DMA register offsets **/
+
+/* General register offsets */
+#define SUN4I_DMA_IRQ_ENABLE_REG 0x0
+#define SUN4I_DMA_IRQ_PENDING_STATUS_REG 0x4
+
+/* Normal DMA register offsets */
+#define SUN4I_NDMA_CHANNEL_REG_BASE(n) (0x100 + (n) * 0x20)
+#define SUN4I_NDMA_CFG_REG 0x0
+#define SUN4I_NDMA_SRC_ADDR_REG 0x4
+#define SUN4I_NDMA_DST_ADDR_REG 0x8
+#define SUN4I_NDMA_BYTE_COUNT_REG 0xC
+
+/* Dedicated DMA register offsets */
+#define SUN4I_DDMA_CHANNEL_REG_BASE(n) (0x300 + (n) * 0x20)
+#define SUN4I_DDMA_CFG_REG 0x0
+#define SUN4I_DDMA_SRC_ADDR_REG 0x4
+#define SUN4I_DDMA_DST_ADDR_REG 0x8
+#define SUN4I_DDMA_BYTE_COUNT_REG 0xC
+#define SUN4I_DDMA_PARA_REG 0x18
+
+/** DMA Driver **/
+
+/*
+ * Normal DMA has 8 channels, and Dedicated DMA has another 8, so
+ * that's 16 channels. As for endpoints, there's 29 and 21
+ * respectively. Given that the Normal DMA endpoints (other than
+ * SDRAM) can be used as tx/rx, we need 78 vchans in total
+ */
+#define SUN4I_NDMA_NR_MAX_CHANNELS 8
+#define SUN4I_DDMA_NR_MAX_CHANNELS 8
+#define SUN4I_DMA_NR_MAX_CHANNELS \
+ (SUN4I_NDMA_NR_MAX_CHANNELS + SUN4I_DDMA_NR_MAX_CHANNELS)
+#define SUN4I_NDMA_NR_MAX_VCHANS (29 * 2 - 1)
+#define SUN4I_DDMA_NR_MAX_VCHANS 21
+#define SUN4I_DMA_NR_MAX_VCHANS \
+ (SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS)
+
+/* This set of SUN4I_DDMA timing parameters were found experimentally while
+ * working with the SPI driver and seem to make it behave correctly */
+#define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \
+ (SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(1) | \
+ SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(1) | \
+ SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) | \
+ SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2))
+
+struct sun4i_dma_pchan {
+ /* Register base of channel */
+ void __iomem *base;
+ /* vchan currently being serviced */
+ struct sun4i_dma_vchan *vchan;
+ /* Is this a dedicated pchan? */
+ int is_dedicated;
+};
+
+struct sun4i_dma_vchan {
+ struct virt_dma_chan vc;
+ struct dma_slave_config cfg;
+ struct sun4i_dma_pchan *pchan;
+ struct sun4i_dma_promise *processing;
+ struct sun4i_dma_contract *contract;
+ u8 endpoint;
+ int is_dedicated;
+};
+
+struct sun4i_dma_promise {
+ u32 cfg;
+ u32 para;
+ dma_addr_t src;
+ dma_addr_t dst;
+ size_t len;
+ struct list_head list;
+};
+
+/* A contract is a set of promises */
+struct sun4i_dma_contract {
+ struct virt_dma_desc vd;
+ struct list_head demands;
+ struct list_head completed_demands;
+ int is_cyclic;
+};
+
+struct sun4i_dma_dev {
+ DECLARE_BITMAP(pchans_used, SUN4I_DMA_NR_MAX_CHANNELS);
+ struct dma_device slave;
+ struct sun4i_dma_pchan *pchans;
+ struct sun4i_dma_vchan *vchans;
+ void __iomem *base;
+ struct clk *clk;
+ int irq;
+ spinlock_t lock;
+};
+
+static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev)
+{
+ return container_of(dev, struct sun4i_dma_dev, slave);
+}
+
+static struct sun4i_dma_vchan *to_sun4i_dma_vchan(struct dma_chan *chan)
+{
+ return container_of(chan, struct sun4i_dma_vchan, vc.chan);
+}
+
+static struct sun4i_dma_contract *to_sun4i_dma_contract(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct sun4i_dma_contract, vd);
+}
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static int convert_burst(u32 maxburst)
+{
+ if (maxburst > 8)
+ return -EINVAL;
+
+ /* 1 -> 0, 4 -> 1, 8 -> 2 */
+ return (maxburst >> 2);
+}
+
+static int convert_buswidth(enum dma_slave_buswidth addr_width)
+{
+ if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)
+ return -EINVAL;
+
+ /* 8 (1 byte) -> 0, 16 (2 bytes) -> 1, 32 (4 bytes) -> 2 */
+ return (addr_width >> 1);
+}
+
+static void sun4i_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+
+ vchan_free_chan_resources(&vchan->vc);
+}
+
+static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv,
+ struct sun4i_dma_vchan *vchan)
+{
+ struct sun4i_dma_pchan *pchan = NULL, *pchans = priv->pchans;
+ unsigned long flags;
+ int i, max;
+
+ /*
+ * pchans 0-SUN4I_NDMA_NR_MAX_CHANNELS are normal, and
+ * SUN4I_NDMA_NR_MAX_CHANNELS+ are dedicated ones
+ */
+ if (vchan->is_dedicated) {
+ i = SUN4I_NDMA_NR_MAX_CHANNELS;
+ max = SUN4I_DMA_NR_MAX_CHANNELS;
+ } else {
+ i = 0;
+ max = SUN4I_NDMA_NR_MAX_CHANNELS;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+ for_each_clear_bit_from(i, &priv->pchans_used, max) {
+ pchan = &pchans[i];
+ pchan->vchan = vchan;
+ set_bit(i, priv->pchans_used);
+ break;
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return pchan;
+}
+
+static void release_pchan(struct sun4i_dma_dev *priv,
+ struct sun4i_dma_pchan *pchan)
+{
+ unsigned long flags;
+ int nr = pchan - priv->pchans;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ pchan->vchan = NULL;
+ clear_bit(nr, priv->pchans_used);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void configure_pchan(struct sun4i_dma_pchan *pchan,
+ struct sun4i_dma_promise *d)
+{
+ /*
+ * Configure addresses and misc parameters depending on type
+ * SUN4I_DDMA has an extra field with timing parameters
+ */
+ if (pchan->is_dedicated) {
+ writel_relaxed(d->src, pchan->base + SUN4I_DDMA_SRC_ADDR_REG);
+ writel_relaxed(d->dst, pchan->base + SUN4I_DDMA_DST_ADDR_REG);
+ writel_relaxed(d->len, pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
+ writel_relaxed(d->para, pchan->base + SUN4I_DDMA_PARA_REG);
+ writel_relaxed(d->cfg, pchan->base + SUN4I_DDMA_CFG_REG);
+ } else {
+ writel_relaxed(d->src, pchan->base + SUN4I_NDMA_SRC_ADDR_REG);
+ writel_relaxed(d->dst, pchan->base + SUN4I_NDMA_DST_ADDR_REG);
+ writel_relaxed(d->len, pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
+ writel_relaxed(d->cfg, pchan->base + SUN4I_NDMA_CFG_REG);
+ }
+}
+
+static void set_pchan_interrupt(struct sun4i_dma_dev *priv,
+ struct sun4i_dma_pchan *pchan,
+ int half, int end)
+{
+ u32 reg;
+ int pchan_number = pchan - priv->pchans;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ reg = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
+
+ if (half)
+ reg |= BIT(pchan_number * 2);
+ else
+ reg &= ~BIT(pchan_number * 2);
+
+ if (end)
+ reg |= BIT(pchan_number * 2 + 1);
+ else
+ reg &= ~BIT(pchan_number * 2 + 1);
+
+ writel_relaxed(reg, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/**
+ * Execute pending operations on a vchan
+ *
+ * When given a vchan, this function will try to acquire a suitable
+ * pchan and, if successful, will configure it to fulfill a promise
+ * from the next pending contract.
+ *
+ * This function must be called with &vchan->vc.lock held.
+ */
+static int __execute_vchan_pending(struct sun4i_dma_dev *priv,
+ struct sun4i_dma_vchan *vchan)
+{
+ struct sun4i_dma_promise *promise = NULL;
+ struct sun4i_dma_contract *contract = NULL;
+ struct sun4i_dma_pchan *pchan;
+ struct virt_dma_desc *vd;
+ int ret;
+
+ lockdep_assert_held(&vchan->vc.lock);
+
+ /* We need a pchan to do anything, so secure one if available */
+ pchan = find_and_use_pchan(priv, vchan);
+ if (!pchan)
+ return -EBUSY;
+
+ /*
+ * Channel endpoints must not be repeated, so if this vchan
+ * has already submitted some work, we can't do anything else
+ */
+ if (vchan->processing) {
+ dev_dbg(chan2dev(&vchan->vc.chan),
+ "processing something to this endpoint already\n");
+ ret = -EBUSY;
+ goto release_pchan;
+ }
+
+ do {
+ /* Figure out which contract we're working with today */
+ vd = vchan_next_desc(&vchan->vc);
+ if (!vd) {
+ dev_dbg(chan2dev(&vchan->vc.chan),
+ "No pending contract found");
+ ret = 0;
+ goto release_pchan;
+ }
+
+ contract = to_sun4i_dma_contract(vd);
+ if (list_empty(&contract->demands)) {
+ /* The contract has been completed so mark it as such */
+ list_del(&contract->vd.node);
+ vchan_cookie_complete(&contract->vd);
+ dev_dbg(chan2dev(&vchan->vc.chan),
+ "Empty contract found and marked complete");
+ }
+ } while (list_empty(&contract->demands));
+
+ /* Now find out what we need to do */
+ promise = list_first_entry(&contract->demands,
+ struct sun4i_dma_promise, list);
+ vchan->processing = promise;
+
+ /* ... and make it reality */
+ if (promise) {
+ vchan->contract = contract;
+ vchan->pchan = pchan;
+ set_pchan_interrupt(priv, pchan, contract->is_cyclic, 1);
+ configure_pchan(pchan, promise);
+ }
+
+ return 0;
+
+release_pchan:
+ release_pchan(priv, pchan);
+ return ret;
+}
+
+static int sanitize_config(struct dma_slave_config *sconfig,
+ enum dma_transfer_direction direction)
+{
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ if ((sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
+ !sconfig->dst_maxburst)
+ return -EINVAL;
+
+ if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ sconfig->src_addr_width = sconfig->dst_addr_width;
+
+ if (!sconfig->src_maxburst)
+ sconfig->src_maxburst = sconfig->dst_maxburst;
+
+ break;
+
+ case DMA_DEV_TO_MEM:
+ if ((sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
+ !sconfig->src_maxburst)
+ return -EINVAL;
+
+ if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ sconfig->dst_addr_width = sconfig->src_addr_width;
+
+ if (!sconfig->dst_maxburst)
+ sconfig->dst_maxburst = sconfig->src_maxburst;
+
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+/**
+ * Generate a promise, to be used in a normal DMA contract.
+ *
+ * A NDMA promise contains all the information required to program the
+ * normal part of the DMA Engine and get data copied. A non-executed
+ * promise will live in the demands list on a contract. Once it has been
+ * completed, it will be moved to the completed demands list for later freeing.
+ * All linked promises will be freed when the corresponding contract is freed
+ */
+static struct sun4i_dma_promise *
+generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
+ size_t len, struct dma_slave_config *sconfig,
+ enum dma_transfer_direction direction)
+{
+ struct sun4i_dma_promise *promise;
+ int ret;
+
+ ret = sanitize_config(sconfig, direction);
+ if (ret)
+ return NULL;
+
+ promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
+ if (!promise)
+ return NULL;
+
+ promise->src = src;
+ promise->dst = dest;
+ promise->len = len;
+ promise->cfg = SUN4I_DMA_CFG_LOADING |
+ SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN;
+
+ dev_dbg(chan2dev(chan),
+ "src burst %d, dst burst %d, src buswidth %d, dst buswidth %d",
+ sconfig->src_maxburst, sconfig->dst_maxburst,
+ sconfig->src_addr_width, sconfig->dst_addr_width);
+
+ /* Source burst */
+ ret = convert_burst(sconfig->src_maxburst);
+ if (IS_ERR_VALUE(ret))
+ goto fail;
+ promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
+
+ /* Destination burst */
+ ret = convert_burst(sconfig->dst_maxburst);
+ if (IS_ERR_VALUE(ret))
+ goto fail;
+ promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
+
+ /* Source bus width */
+ ret = convert_buswidth(sconfig->src_addr_width);
+ if (IS_ERR_VALUE(ret))
+ goto fail;
+ promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
+
+ /* Destination bus width */
+ ret = convert_buswidth(sconfig->dst_addr_width);
+ if (IS_ERR_VALUE(ret))
+ goto fail;
+ promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
+
+ return promise;
+
+fail:
+ kfree(promise);
+ return NULL;
+}
+
+/**
+ * Generate a promise, to be used in a dedicated DMA contract.
+ *
+ * A DDMA promise contains all the information required to program the
+ * Dedicated part of the DMA Engine and get data copied. A non-executed
+ * promise will live in the demands list on a contract. Once it has been
+ * completed, it will be moved to the completed demands list for later freeing.
+ * All linked promises will be freed when the corresponding contract is freed
+ */
+static struct sun4i_dma_promise *
+generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
+ size_t len, struct dma_slave_config *sconfig)
+{
+ struct sun4i_dma_promise *promise;
+ int ret;
+
+ promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
+ if (!promise)
+ return NULL;
+
+ promise->src = src;
+ promise->dst = dest;
+ promise->len = len;
+ promise->cfg = SUN4I_DMA_CFG_LOADING |
+ SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN;
+
+ /* Source burst */
+ ret = convert_burst(sconfig->src_maxburst);
+ if (IS_ERR_VALUE(ret))
+ goto fail;
+ promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
+
+ /* Destination burst */
+ ret = convert_burst(sconfig->dst_maxburst);
+ if (IS_ERR_VALUE(ret))
+ goto fail;
+ promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
+
+ /* Source bus width */
+ ret = convert_buswidth(sconfig->src_addr_width);
+ if (IS_ERR_VALUE(ret))
+ goto fail;
+ promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
+
+ /* Destination bus width */
+ ret = convert_buswidth(sconfig->dst_addr_width);
+ if (IS_ERR_VALUE(ret))
+ goto fail;
+ promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
+
+ return promise;
+
+fail:
+ kfree(promise);
+ return NULL;
+}
+
+/**
+ * Generate a contract
+ *
+ * Contracts function as DMA descriptors. As our hardware does not support
+ * linked lists, we need to implement SG via software. We use a contract
+ * to hold all the pieces of the request and process them serially one
+ * after another. Each piece is represented as a promise.
+ */
+static struct sun4i_dma_contract *generate_dma_contract(void)
+{
+ struct sun4i_dma_contract *contract;
+
+ contract = kzalloc(sizeof(*contract), GFP_NOWAIT);
+ if (!contract)
+ return NULL;
+
+ INIT_LIST_HEAD(&contract->demands);
+ INIT_LIST_HEAD(&contract->completed_demands);
+
+ return contract;
+}
+
+/**
+ * Get next promise on a cyclic transfer
+ *
+ * Cyclic contracts contain a series of promises which are executed on a
+ * loop. This function returns the next promise from a cyclic contract,
+ * so it can be programmed into the hardware.
+ */
+static struct sun4i_dma_promise *
+get_next_cyclic_promise(struct sun4i_dma_contract *contract)
+{
+ struct sun4i_dma_promise *promise;
+
+ promise = list_first_entry_or_null(&contract->demands,
+ struct sun4i_dma_promise, list);
+ if (!promise) {
+ list_splice_init(&contract->completed_demands,
+ &contract->demands);
+ promise = list_first_entry(&contract->demands,
+ struct sun4i_dma_promise, list);
+ }
+
+ return promise;
+}
+
+/**
+ * Free a contract and all its associated promises
+ */
+static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
+{
+ struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
+ struct sun4i_dma_promise *promise;
+
+ /* Free all the demands and completed demands */
+ list_for_each_entry(promise, &contract->demands, list)
+ kfree(promise);
+
+ list_for_each_entry(promise, &contract->completed_demands, list)
+ kfree(promise);
+
+ kfree(contract);
+}
+
+static struct dma_async_tx_descriptor *
+sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+ struct dma_slave_config *sconfig = &vchan->cfg;
+ struct sun4i_dma_promise *promise;
+ struct sun4i_dma_contract *contract;
+
+ contract = generate_dma_contract();
+ if (!contract)
+ return NULL;
+
+ /*
+ * We can only do the copy to bus aligned addresses, so
+ * choose the best one so we get decent performance. We also
+ * maximize the burst size for this same reason.
+ */
+ sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ sconfig->src_maxburst = 8;
+ sconfig->dst_maxburst = 8;
+
+ if (vchan->is_dedicated)
+ promise = generate_ddma_promise(chan, src, dest, len, sconfig);
+ else
+ promise = generate_ndma_promise(chan, src, dest, len, sconfig,
+ DMA_MEM_TO_MEM);
+
+ if (!promise) {
+ kfree(contract);
+ return NULL;
+ }
+
+ /* Configure memcpy mode */
+ if (vchan->is_dedicated) {
+ promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM) |
+ SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM);
+ } else {
+ promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
+ SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
+ }
+
+ /* Fill the contract with our only promise */
+ list_add_tail(&promise->list, &contract->demands);
+
+ /* And add it to the vchan */
+ return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
+ size_t period_len, enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+ struct dma_slave_config *sconfig = &vchan->cfg;
+ struct sun4i_dma_promise *promise;
+ struct sun4i_dma_contract *contract;
+ dma_addr_t src, dest;
+ u32 endpoints;
+ int nr_periods, offset, plength, i;
+
+ if (!is_slave_direction(dir)) {
+ dev_err(chan2dev(chan), "Invalid DMA direction\n");
+ return NULL;
+ }
+
+ if (vchan->is_dedicated) {
+ /*
+ * As we are using this just for audio data, we need to use
+ * normal DMA. There is nothing stopping us from supporting
+ * dedicated DMA here as well, so if a client comes up and
+ * requires it, it will be simple to implement it.
+ */
+ dev_err(chan2dev(chan),
+ "Cyclic transfers are only supported on Normal DMA\n");
+ return NULL;
+ }
+
+ contract = generate_dma_contract();
+ if (!contract)
+ return NULL;
+
+ contract->is_cyclic = 1;
+
+ /* Figure out the endpoints and the address we need */
+ if (dir == DMA_MEM_TO_DEV) {
+ src = buf;
+ dest = sconfig->dst_addr;
+ endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
+ SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
+ SUN4I_DMA_CFG_DST_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO);
+ } else {
+ src = sconfig->src_addr;
+ dest = buf;
+ endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
+ SUN4I_DMA_CFG_SRC_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO) |
+ SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
+ }
+
+ /*
+ * We will be using half done interrupts to make two periods
+ * out of a promise, so we need to program the DMA engine less
+ * often
+ */
+
+ /*
+ * The engine can interrupt on half-transfer, so we can use
+ * this feature to program the engine half as often as if we
+ * didn't use it (keep in mind the hardware doesn't support
+ * linked lists).
+ *
+ * Say you have a set of periods (| marks the start/end, I for
+ * interrupt, P for programming the engine to do a new
+ * transfer), the easy but slow way would be to do
+ *
+ * |---|---|---|---| (periods / promises)
+ * P I,P I,P I,P I
+ *
+ * Using half transfer interrupts you can do
+ *
+ * |-------|-------| (promises as configured on hw)
+ * |---|---|---|---| (periods)
+ * P I I,P I I
+ *
+ * Which requires half the engine programming for the same
+ * functionality.
+ */
+ nr_periods = DIV_ROUND_UP(len / period_len, 2);
+ for (i = 0; i < nr_periods; i++) {
+ /* Calculate the offset in the buffer and the length needed */
+ offset = i * period_len * 2;
+ plength = min((len - offset), (period_len * 2));
+ if (dir == DMA_MEM_TO_DEV)
+ src = buf + offset;
+ else
+ dest = buf + offset;
+
+ /* Make the promise */
+ promise = generate_ndma_promise(chan, src, dest,
+ plength, sconfig, dir);
+ if (!promise) {
+ /* TODO: should we free everything? */
+ return NULL;
+ }
+ promise->cfg |= endpoints;
+
+ /* Then add it to the contract */
+ list_add_tail(&promise->list, &contract->demands);
+ }
+
+ /* And add it to the vchan */
+ return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+ struct dma_slave_config *sconfig = &vchan->cfg;
+ struct sun4i_dma_promise *promise;
+ struct sun4i_dma_contract *contract;
+ u8 ram_type, io_mode, linear_mode;
+ struct scatterlist *sg;
+ dma_addr_t srcaddr, dstaddr;
+ u32 endpoints, para;
+ int i;
+
+ if (!sgl)
+ return NULL;
+
+ if (!is_slave_direction(dir)) {
+ dev_err(chan2dev(chan), "Invalid DMA direction\n");
+ return NULL;
+ }
+
+ contract = generate_dma_contract();
+ if (!contract)
+ return NULL;
+
+ if (vchan->is_dedicated) {
+ io_mode = SUN4I_DDMA_ADDR_MODE_IO;
+ linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
+ ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
+ } else {
+ io_mode = SUN4I_NDMA_ADDR_MODE_IO;
+ linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
+ ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
+ }
+
+ if (dir == DMA_MEM_TO_DEV)
+ endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
+ SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
+ SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
+ SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
+ else
+ endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
+ SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
+ SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
+ SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ /* Figure out addresses */
+ if (dir == DMA_MEM_TO_DEV) {
+ srcaddr = sg_dma_address(sg);
+ dstaddr = sconfig->dst_addr;
+ } else {
+ srcaddr = sconfig->src_addr;
+ dstaddr = sg_dma_address(sg);
+ }
+
+ /*
+ * These are the magic DMA engine timings that keep SPI going.
+ * I haven't seen any interface on DMAEngine to configure
+ * timings, and so far they seem to work for everything we
+ * support, so I've kept them here. I don't know if other
+ * devices need different timings because, as usual, we only
+ * have the "para" bitfield meanings, but no comment on what
+ * the values should be when doing a certain operation :|
+ */
+ para = SUN4I_DDMA_MAGIC_SPI_PARAMETERS;
+
+ /* And make a suitable promise */
+ if (vchan->is_dedicated)
+ promise = generate_ddma_promise(chan, srcaddr, dstaddr,
+ sg_dma_len(sg),
+ sconfig);
+ else
+ promise = generate_ndma_promise(chan, srcaddr, dstaddr,
+ sg_dma_len(sg),
+ sconfig, dir);
+
+ if (!promise)
+ return NULL; /* TODO: should we free everything? */
+
+ promise->cfg |= endpoints;
+ promise->para = para;
+
+ /* Then add it to the contract */
+ list_add_tail(&promise->list, &contract->demands);
+ }
+
+ /*
+ * Once we've got all the promises ready, add the contract
+ * to the pending list on the vchan
+ */
+ return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
+}
+
+static int sun4i_dma_terminate_all(struct dma_chan *chan)
+{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
+ struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+ struct sun4i_dma_pchan *pchan = vchan->pchan;
+ LIST_HEAD(head);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+ vchan_get_all_descriptors(&vchan->vc, &head);
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+ /*
+ * Clearing the configuration register will halt the pchan. Interrupts
+ * may still trigger, so don't forget to disable them.
+ */
+ if (pchan) {
+ if (pchan->is_dedicated)
+ writel(0, pchan->base + SUN4I_DDMA_CFG_REG);
+ else
+ writel(0, pchan->base + SUN4I_NDMA_CFG_REG);
+ set_pchan_interrupt(priv, pchan, 0, 0);
+ release_pchan(priv, pchan);
+ }
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+ vchan_dma_desc_free_list(&vchan->vc, &head);
+ /* Clear these so the vchan is usable again */
+ vchan->processing = NULL;
+ vchan->pchan = NULL;
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+ return 0;
+}
+
+static int sun4i_dma_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+
+ memcpy(&vchan->cfg, config, sizeof(*config));
+
+ return 0;
+}
+
+static struct dma_chan *sun4i_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct sun4i_dma_dev *priv = ofdma->of_dma_data;
+ struct sun4i_dma_vchan *vchan;
+ struct dma_chan *chan;
+ u8 is_dedicated = dma_spec->args[0];
+ u8 endpoint = dma_spec->args[1];
+
+ /* Check if type is Normal or Dedicated */
+ if (is_dedicated != 0 && is_dedicated != 1)
+ return NULL;
+
+ /* Make sure the endpoint looks sane */
+ if ((is_dedicated && endpoint >= SUN4I_DDMA_DRQ_TYPE_LIMIT) ||
+ (!is_dedicated && endpoint >= SUN4I_NDMA_DRQ_TYPE_LIMIT))
+ return NULL;
+
+ chan = dma_get_any_slave_channel(&priv->slave);
+ if (!chan)
+ return NULL;
+
+ /* Assign the endpoint to the vchan */
+ vchan = to_sun4i_dma_vchan(chan);
+ vchan->is_dedicated = is_dedicated;
+ vchan->endpoint = endpoint;
+
+ return chan;
+}
+
+static enum dma_status sun4i_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+ struct sun4i_dma_pchan *pchan = vchan->pchan;
+ struct sun4i_dma_contract *contract;
+ struct sun4i_dma_promise *promise;
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ enum dma_status ret;
+ size_t bytes = 0;
+
+ ret = dma_cookie_status(chan, cookie, state);
+ if (!state || (ret == DMA_COMPLETE))
+ return ret;
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+ vd = vchan_find_desc(&vchan->vc, cookie);
+ if (!vd)
+ goto exit;
+ contract = to_sun4i_dma_contract(vd);
+
+ list_for_each_entry(promise, &contract->demands, list)
+ bytes += promise->len;
+
+ /*
+ * The hardware is configured to return the remaining byte
+ * quantity. If possible, replace the first listed element's
+ * full size with the actual remaining amount
+ */
+ promise = list_first_entry_or_null(&contract->demands,
+ struct sun4i_dma_promise, list);
+ if (promise && pchan) {
+ bytes -= promise->len;
+ if (pchan->is_dedicated)
+ bytes += readl(pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
+ else
+ bytes += readl(pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
+ }
+
+exit:
+
+ dma_set_residue(state, bytes);
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+ return ret;
+}
+
+static void sun4i_dma_issue_pending(struct dma_chan *chan)
+{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
+ struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+
+ /*
+ * If there are pending transactions for this vchan, push one of
+ * them into the engine to get the ball rolling.
+ */
+ if (vchan_issue_pending(&vchan->vc))
+ __execute_vchan_pending(priv, vchan);
+
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+}
+
+static irqreturn_t sun4i_dma_interrupt(int irq, void *dev_id)
+{
+ struct sun4i_dma_dev *priv = dev_id;
+ struct sun4i_dma_pchan *pchans = priv->pchans, *pchan;
+ struct sun4i_dma_vchan *vchan;
+ struct sun4i_dma_contract *contract;
+ struct sun4i_dma_promise *promise;
+ unsigned long pendirq, irqs, disableirqs;
+ int bit, i, free_room, allow_mitigation = 1;
+
+ pendirq = readl_relaxed(priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
+
+handle_pending:
+
+ disableirqs = 0;
+ free_room = 0;
+
+ for_each_set_bit(bit, &pendirq, 32) {
+ pchan = &pchans[bit >> 1];
+ vchan = pchan->vchan;
+ if (!vchan) /* a terminated channel may still interrupt */
+ continue;
+ contract = vchan->contract;
+
+ /*
+ * Disable the IRQ and free the pchan if it's an end
+ * interrupt (odd bit)
+ */
+ if (bit & 1) {
+ spin_lock(&vchan->vc.lock);
+
+ /*
+ * Move the promise into the completed list now that
+ * we're done with it
+ */
+ list_del(&vchan->processing->list);
+ list_add_tail(&vchan->processing->list,
+ &contract->completed_demands);
+
+ /*
+ * Cyclic DMA transfers are special:
+ * - There's always something we can dispatch
+ * - We need to run the callback
+ * - Latency is very important, as this is used by audio
+ * We therefore just cycle through the list and dispatch
+ * whatever we have here, reusing the pchan. There's
+ * no need to run the thread after this.
+ *
+ * For non-cyclic transfers we need to look around,
+ * so we can program some more work, or notify the
+ * client that their transfers have been completed.
+ */
+ if (contract->is_cyclic) {
+ promise = get_next_cyclic_promise(contract);
+ vchan->processing = promise;
+ configure_pchan(pchan, promise);
+ vchan_cyclic_callback(&contract->vd);
+ } else {
+ vchan->processing = NULL;
+ vchan->pchan = NULL;
+
+ free_room = 1;
+ disableirqs |= BIT(bit);
+ release_pchan(priv, pchan);
+ }
+
+ spin_unlock(&vchan->vc.lock);
+ } else {
+ /* Half done interrupt */
+ if (contract->is_cyclic)
+ vchan_cyclic_callback(&contract->vd);
+ else
+ disableirqs |= BIT(bit);
+ }
+ }
+
+ /* Disable the IRQs for events we handled */
+ spin_lock(&priv->lock);
+ irqs = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
+ writel_relaxed(irqs & ~disableirqs,
+ priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
+ spin_unlock(&priv->lock);
+
+ /* Writing 1 to the pending field will clear the pending interrupt */
+ writel_relaxed(pendirq, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
+
+ /*
+ * If a pchan was freed, we may be able to schedule something else,
+ * so have a look around
+ */
+ if (free_room) {
+ for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
+ vchan = &priv->vchans[i];
+ spin_lock(&vchan->vc.lock);
+ __execute_vchan_pending(priv, vchan);
+ spin_unlock(&vchan->vc.lock);
+ }
+ }
+
+ /*
+ * Handle newer interrupts if some showed up, but only do it once
+ * to avoid a too long a loop
+ */
+ if (allow_mitigation) {
+ pendirq = readl_relaxed(priv->base +
+ SUN4I_DMA_IRQ_PENDING_STATUS_REG);
+ if (pendirq) {
+ allow_mitigation = 0;
+ goto handle_pending;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int sun4i_dma_probe(struct platform_device *pdev)
+{
+ struct sun4i_dma_dev *priv;
+ struct resource *res;
+ int i, j, ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0) {
+ dev_err(&pdev->dev, "Cannot claim IRQ\n");
+ return priv->irq;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "No clock specified\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ platform_set_drvdata(pdev, priv);
+ spin_lock_init(&priv->lock);
+
+ dma_cap_zero(priv->slave.cap_mask);
+ dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask);
+ dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, priv->slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, priv->slave.cap_mask);
+
+ INIT_LIST_HEAD(&priv->slave.channels);
+ priv->slave.device_free_chan_resources = sun4i_dma_free_chan_resources;
+ priv->slave.device_tx_status = sun4i_dma_tx_status;
+ priv->slave.device_issue_pending = sun4i_dma_issue_pending;
+ priv->slave.device_prep_slave_sg = sun4i_dma_prep_slave_sg;
+ priv->slave.device_prep_dma_memcpy = sun4i_dma_prep_dma_memcpy;
+ priv->slave.device_prep_dma_cyclic = sun4i_dma_prep_dma_cyclic;
+ priv->slave.device_config = sun4i_dma_config;
+ priv->slave.device_terminate_all = sun4i_dma_terminate_all;
+ priv->slave.copy_align = 2;
+ priv->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ priv->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ priv->slave.directions = BIT(DMA_DEV_TO_MEM) |
+ BIT(DMA_MEM_TO_DEV);
+ priv->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ priv->slave.dev = &pdev->dev;
+
+ priv->pchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_CHANNELS,
+ sizeof(struct sun4i_dma_pchan), GFP_KERNEL);
+ priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS,
+ sizeof(struct sun4i_dma_vchan), GFP_KERNEL);
+ if (!priv->vchans || !priv->pchans)
+ return -ENOMEM;
+
+ /*
+ * [0..SUN4I_NDMA_NR_MAX_CHANNELS) are normal pchans, and
+ * [SUN4I_NDMA_NR_MAX_CHANNELS..SUN4I_DMA_NR_MAX_CHANNELS) are
+ * dedicated ones
+ */
+ for (i = 0; i < SUN4I_NDMA_NR_MAX_CHANNELS; i++)
+ priv->pchans[i].base = priv->base +
+ SUN4I_NDMA_CHANNEL_REG_BASE(i);
+
+ for (j = 0; i < SUN4I_DMA_NR_MAX_CHANNELS; i++, j++) {
+ priv->pchans[i].base = priv->base +
+ SUN4I_DDMA_CHANNEL_REG_BASE(j);
+ priv->pchans[i].is_dedicated = 1;
+ }
+
+ for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
+ struct sun4i_dma_vchan *vchan = &priv->vchans[i];
+
+ spin_lock_init(&vchan->vc.lock);
+ vchan->vc.desc_free = sun4i_dma_free_contract;
+ vchan_init(&vchan->vc, &priv->slave);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't enable the clock\n");
+ return ret;
+ }
+
+ /*
+ * Make sure the IRQs are all disabled and accounted for. The bootloader
+ * likes to leave these dirty
+ */
+ writel(0, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
+ writel(0xFFFFFFFF, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
+
+ ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt,
+ 0, dev_name(&pdev->dev), priv);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot request IRQ\n");
+ goto err_clk_disable;
+ }
+
+ ret = dma_async_device_register(&priv->slave);
+ if (ret) {
+ dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
+ goto err_clk_disable;
+ }
+
+ ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate,
+ priv);
+ if (ret) {
+ dev_err(&pdev->dev, "of_dma_controller_register failed\n");
+ goto err_dma_unregister;
+ }
+
+ dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n");
+
+ return 0;
+
+err_dma_unregister:
+ dma_async_device_unregister(&priv->slave);
+err_clk_disable:
+ clk_disable_unprepare(priv->clk);
+ return ret;
+}
+
+static int sun4i_dma_remove(struct platform_device *pdev)
+{
+ struct sun4i_dma_dev *priv = platform_get_drvdata(pdev);
+
+ /* Disable IRQ so no more work is scheduled */
+ disable_irq(priv->irq);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&priv->slave);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static const struct of_device_id sun4i_dma_match[] = {
+ { .compatible = "allwinner,sun4i-a10-dma" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver sun4i_dma_driver = {
+ .probe = sun4i_dma_probe,
+ .remove = sun4i_dma_remove,
+ .driver = {
+ .name = "sun4i-dma",
+ .of_match_table = sun4i_dma_match,
+ },
+};
+
+module_platform_driver(sun4i_dma_driver);
+
+MODULE_DESCRIPTION("Allwinner A10 Dedicated DMA Controller Driver");
+MODULE_AUTHOR("Emilio López <emilio@elopez.com.ar>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 842ff97c2cfb..73e0be6e2100 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -969,7 +969,7 @@ static int sun6i_dma_probe(struct platform_device *pdev)
sdc->slave.device_issue_pending = sun6i_dma_issue_pending;
sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg;
sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy;
- sdc->slave.copy_align = 4;
+ sdc->slave.copy_align = DMAENGINE_ALIGN_4_BYTES;
sdc->slave.device_config = sun6i_dma_config;
sdc->slave.device_pause = sun6i_dma_pause;
sdc->slave.device_resume = sun6i_dma_resume;
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index eaf585e8286b..c8f79dcaaee8 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -155,7 +155,6 @@ struct tegra_dma_sg_req {
int req_len;
bool configured;
bool last_sg;
- bool half_done;
struct list_head node;
struct tegra_dma_desc *dma_desc;
};
@@ -188,7 +187,7 @@ struct tegra_dma_channel {
bool config_init;
int id;
int irq;
- unsigned long chan_base_offset;
+ void __iomem *chan_addr;
spinlock_t lock;
bool busy;
struct tegra_dma *tdma;
@@ -203,8 +202,6 @@ struct tegra_dma_channel {
/* ISR handler and tasklet for bottom half of isr handling */
dma_isr_handler isr_handler;
struct tasklet_struct tasklet;
- dma_async_tx_callback callback;
- void *callback_param;
/* Channel-slave specific configuration */
unsigned int slave_id;
@@ -222,6 +219,13 @@ struct tegra_dma {
void __iomem *base_addr;
const struct tegra_dma_chip_data *chip_data;
+ /*
+ * Counter for managing global pausing of the DMA controller.
+ * Only applicable for devices that don't support individual
+ * channel pausing.
+ */
+ u32 global_pause_count;
+
/* Some register need to be cache before suspend */
u32 reg_gen;
@@ -242,12 +246,12 @@ static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
static inline void tdc_write(struct tegra_dma_channel *tdc,
u32 reg, u32 val)
{
- writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg);
+ writel(val, tdc->chan_addr + reg);
}
static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
{
- return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg);
+ return readl(tdc->chan_addr + reg);
}
static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
@@ -361,16 +365,32 @@ static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
struct tegra_dma *tdma = tdc->tdma;
spin_lock(&tdma->global_lock);
- tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
- if (wait_for_burst_complete)
- udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
+
+ if (tdc->tdma->global_pause_count == 0) {
+ tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
+ if (wait_for_burst_complete)
+ udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
+ }
+
+ tdc->tdma->global_pause_count++;
+
+ spin_unlock(&tdma->global_lock);
}
static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
{
struct tegra_dma *tdma = tdc->tdma;
- tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
+ spin_lock(&tdma->global_lock);
+
+ if (WARN_ON(tdc->tdma->global_pause_count == 0))
+ goto out;
+
+ if (--tdc->tdma->global_pause_count == 0)
+ tdma_write(tdma, TEGRA_APBDMA_GENERAL,
+ TEGRA_APBDMA_GENERAL_ENABLE);
+
+out:
spin_unlock(&tdma->global_lock);
}
@@ -601,7 +621,6 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc,
return;
tdc_start_head_req(tdc);
- return;
}
static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
@@ -628,7 +647,6 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
if (!st)
dma_desc->dma_status = DMA_ERROR;
}
- return;
}
static void tegra_dma_tasklet(unsigned long data)
@@ -720,7 +738,6 @@ static void tegra_dma_issue_pending(struct dma_chan *dc)
}
end:
spin_unlock_irqrestore(&tdc->lock, flags);
- return;
}
static int tegra_dma_terminate_all(struct dma_chan *dc)
@@ -932,7 +949,6 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
struct tegra_dma_sg_req *sg_req = NULL;
u32 burst_size;
enum dma_slave_buswidth slave_bw;
- int ret;
if (!tdc->config_init) {
dev_err(tdc2dev(tdc), "dma channel is not configured\n");
@@ -943,9 +959,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
return NULL;
}
- ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
- &burst_size, &slave_bw);
- if (ret < 0)
+ if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
+ &burst_size, &slave_bw) < 0)
return NULL;
INIT_LIST_HEAD(&req_list);
@@ -1048,7 +1063,6 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
dma_addr_t mem = buf_addr;
u32 burst_size;
enum dma_slave_buswidth slave_bw;
- int ret;
if (!buf_len || !period_len) {
dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
@@ -1087,12 +1101,10 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
return NULL;
}
- ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
- &burst_size, &slave_bw);
- if (ret < 0)
+ if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
+ &burst_size, &slave_bw) < 0)
return NULL;
-
ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
@@ -1136,7 +1148,6 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
sg_req->ch_regs.apb_seq = apb_seq;
sg_req->ch_regs.ahb_seq = ahb_seq;
sg_req->configured = false;
- sg_req->half_done = false;
sg_req->last_sg = false;
sg_req->dma_desc = dma_desc;
sg_req->req_len = len;
@@ -1377,8 +1388,9 @@ static int tegra_dma_probe(struct platform_device *pdev)
for (i = 0; i < cdata->nr_channels; i++) {
struct tegra_dma_channel *tdc = &tdma->channels[i];
- tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
- i * cdata->channel_reg_size;
+ tdc->chan_addr = tdma->base_addr +
+ TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
+ (i * cdata->channel_reg_size);
res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
if (!res) {
@@ -1418,6 +1430,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
+ tdma->global_pause_count = 0;
tdma->dma_dev.dev = &pdev->dev;
tdma->dma_dev.device_alloc_chan_resources =
tegra_dma_alloc_chan_resources;
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 24f5ca2356bf..5cce8c9d0026 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -20,16 +20,19 @@
#define TI_XBAR_OUTPUTS 127
#define TI_XBAR_INPUTS 256
-static DEFINE_IDR(map_idr);
+#define TI_XBAR_EDMA_OFFSET 0
+#define TI_XBAR_SDMA_OFFSET 1
struct ti_dma_xbar_data {
void __iomem *iomem;
struct dma_router dmarouter;
+ struct idr map_idr;
u16 safe_val; /* Value to rest the crossbar lines */
u32 xbar_requests; /* number of DMA requests connected to XBAR */
u32 dma_requests; /* number of DMA requests forwarded to DMA */
+ u32 dma_offset;
};
struct ti_dma_xbar_map {
@@ -51,7 +54,7 @@ static void ti_dma_xbar_free(struct device *dev, void *route_data)
map->xbar_in, map->xbar_out);
ti_dma_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val);
- idr_remove(&map_idr, map->xbar_out);
+ idr_remove(&xbar->map_idr, map->xbar_out);
kfree(map);
}
@@ -81,12 +84,11 @@ static void *ti_dma_xbar_route_allocate(struct of_phandle_args *dma_spec,
return ERR_PTR(-ENOMEM);
}
- map->xbar_out = idr_alloc(&map_idr, NULL, 0, xbar->dma_requests,
+ map->xbar_out = idr_alloc(&xbar->map_idr, NULL, 0, xbar->dma_requests,
GFP_KERNEL);
map->xbar_in = (u16)dma_spec->args[0];
- /* The DMA request is 1 based in sDMA */
- dma_spec->args[0] = map->xbar_out + 1;
+ dma_spec->args[0] = map->xbar_out + xbar->dma_offset;
dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n",
map->xbar_in, map->xbar_out);
@@ -96,9 +98,22 @@ static void *ti_dma_xbar_route_allocate(struct of_phandle_args *dma_spec,
return map;
}
+static const struct of_device_id ti_dma_master_match[] = {
+ {
+ .compatible = "ti,omap4430-sdma",
+ .data = (void *)TI_XBAR_SDMA_OFFSET,
+ },
+ {
+ .compatible = "ti,edma3",
+ .data = (void *)TI_XBAR_EDMA_OFFSET,
+ },
+ {},
+};
+
static int ti_dma_xbar_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
+ const struct of_device_id *match;
struct device_node *dma_node;
struct ti_dma_xbar_data *xbar;
struct resource *res;
@@ -113,12 +128,20 @@ static int ti_dma_xbar_probe(struct platform_device *pdev)
if (!xbar)
return -ENOMEM;
+ idr_init(&xbar->map_idr);
+
dma_node = of_parse_phandle(node, "dma-masters", 0);
if (!dma_node) {
dev_err(&pdev->dev, "Can't get DMA master node\n");
return -ENODEV;
}
+ match = of_match_node(ti_dma_master_match, dma_node);
+ if (!match) {
+ dev_err(&pdev->dev, "DMA master is not supported\n");
+ return -EINVAL;
+ }
+
if (of_property_read_u32(dma_node, "dma-requests",
&xbar->dma_requests)) {
dev_info(&pdev->dev,
@@ -139,17 +162,15 @@ static int ti_dma_xbar_probe(struct platform_device *pdev)
xbar->safe_val = (u16)safe_val;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
iomem = devm_ioremap_resource(&pdev->dev, res);
- if (!iomem)
- return -ENOMEM;
+ if (IS_ERR(iomem))
+ return PTR_ERR(iomem);
xbar->iomem = iomem;
xbar->dmarouter.dev = &pdev->dev;
xbar->dmarouter.route_free = ti_dma_xbar_free;
+ xbar->dma_offset = (u32)match->data;
platform_set_drvdata(pdev, xbar);
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index c4c3d93fdd1b..559cd4073698 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -10,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Supports:
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index 7d2c17d8d30f..6f80432a3f0a 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&vc->lock, flags);
cookie = dma_cookie_assign(tx);
- list_move_tail(&vd->node, &vc->desc_submitted);
+ list_add_tail(&vd->node, &vc->desc_submitted);
spin_unlock_irqrestore(&vc->lock, flags);
dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
@@ -83,10 +83,8 @@ static void vchan_complete(unsigned long arg)
cb_data = vd->tx.callback_param;
list_del(&vd->node);
- if (async_tx_test_ack(&vd->tx))
- list_add(&vd->node, &vc->desc_allocated);
- else
- vc->desc_free(vd);
+
+ vc->desc_free(vd);
if (cb)
cb(cb_data);
@@ -98,13 +96,9 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
while (!list_empty(head)) {
struct virt_dma_desc *vd = list_first_entry(head,
struct virt_dma_desc, node);
- if (async_tx_test_ack(&vd->tx)) {
- list_move_tail(&vd->node, &vc->desc_allocated);
- } else {
- dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
- list_del(&vd->node);
- vc->desc_free(vd);
- }
+ list_del(&vd->node);
+ dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
+ vc->desc_free(vd);
}
}
EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@@ -114,7 +108,6 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
dma_cookie_init(&vc->chan);
spin_lock_init(&vc->lock);
- INIT_LIST_HEAD(&vc->desc_allocated);
INIT_LIST_HEAD(&vc->desc_submitted);
INIT_LIST_HEAD(&vc->desc_issued);
INIT_LIST_HEAD(&vc->desc_completed);
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 189e75dbcb15..181b95267866 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -29,7 +29,6 @@ struct virt_dma_chan {
spinlock_t lock;
/* protected by vc.lock */
- struct list_head desc_allocated;
struct list_head desc_submitted;
struct list_head desc_issued;
struct list_head desc_completed;
@@ -56,16 +55,11 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
struct virt_dma_desc *vd, unsigned long tx_flags)
{
extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
- unsigned long flags;
dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
vd->tx.flags = tx_flags;
vd->tx.tx_submit = vchan_tx_submit;
- spin_lock_irqsave(&vc->lock, flags);
- list_add_tail(&vd->node, &vc->desc_allocated);
- spin_unlock_irqrestore(&vc->lock, flags);
-
return &vd->tx;
}
@@ -128,8 +122,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
}
/**
- * vchan_get_all_descriptors - obtain all allocated, submitted and issued
- * descriptors
+ * vchan_get_all_descriptors - obtain all submitted and issued descriptors
* vc: virtual channel to get descriptors from
* head: list of descriptors found
*
@@ -141,7 +134,6 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
struct list_head *head)
{
- list_splice_tail_init(&vc->desc_allocated, head);
list_splice_tail_init(&vc->desc_submitted, head);
list_splice_tail_init(&vc->desc_issued, head);
list_splice_tail_init(&vc->desc_completed, head);
@@ -149,14 +141,11 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
{
- struct virt_dma_desc *vd;
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&vc->lock, flags);
vchan_get_all_descriptors(vc, &head);
- list_for_each_entry(vd, &head, node)
- async_tx_clear_ack(&vd->tx);
spin_unlock_irqrestore(&vc->lock, flags);
vchan_dma_desc_free_list(vc, &head);
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 620fd55ec766..b23e8d52d126 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -21,6 +21,7 @@
* NOTE: PM support is currently not available.
*/
+#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
@@ -111,6 +112,7 @@
#define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070
#define XGENE_DMA_BLK_MEM_RDY 0xD074
#define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF
+#define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000
/* X-Gene SoC EFUSE csr register and bit defination */
#define XGENE_SOC_JTAG1_SHADOW 0x18
@@ -150,7 +152,6 @@
#define XGENE_DMA_PQ_CHANNEL 1
#define XGENE_DMA_MAX_BYTE_CNT 0x4000 /* 16 KB */
#define XGENE_DMA_MAX_64B_DESC_BYTE_CNT 0x14000 /* 80 KB */
-#define XGENE_DMA_XOR_ALIGNMENT 6 /* 64 Bytes */
#define XGENE_DMA_MAX_XOR_SRC 5
#define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0
#define XGENE_DMA_INVALID_LEN_CODE 0x7800000000000000ULL
@@ -763,12 +764,17 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
struct xgene_dma_ring *ring = &chan->rx_ring;
struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
struct xgene_dma_desc_hw *desc_hw;
+ struct list_head ld_completed;
u8 status;
+ INIT_LIST_HEAD(&ld_completed);
+
+ spin_lock_bh(&chan->lock);
+
/* Clean already completed and acked descriptors */
xgene_dma_clean_completed_descriptor(chan);
- /* Run the callback for each descriptor, in order */
+ /* Move all completed descriptors to ld completed queue, in order */
list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) {
/* Get subsequent hw descriptor from DMA rx ring */
desc_hw = &ring->desc_hw[ring->head];
@@ -811,15 +817,17 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
/* Mark this hw descriptor as processed */
desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
- xgene_dma_run_tx_complete_actions(chan, desc_sw);
-
- xgene_dma_clean_running_descriptor(chan, desc_sw);
-
/*
* Decrement the pending transaction count
* as we have processed one
*/
chan->pending--;
+
+ /*
+ * Delete this node from ld running queue and append it to
+ * ld completed queue for further processing
+ */
+ list_move_tail(&desc_sw->node, &ld_completed);
}
/*
@@ -828,6 +836,14 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
* ahead and free the descriptors below.
*/
xgene_chan_xfer_ld_pending(chan);
+
+ spin_unlock_bh(&chan->lock);
+
+ /* Run the callback for each descriptor, in order */
+ list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) {
+ xgene_dma_run_tx_complete_actions(chan, desc_sw);
+ xgene_dma_clean_running_descriptor(chan, desc_sw);
+ }
}
static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan)
@@ -876,11 +892,11 @@ static void xgene_dma_free_chan_resources(struct dma_chan *dchan)
if (!chan->desc_pool)
return;
- spin_lock_bh(&chan->lock);
-
/* Process all running descriptor */
xgene_dma_cleanup_descriptors(chan);
+ spin_lock_bh(&chan->lock);
+
/* Clean all link descriptor queues */
xgene_dma_free_desc_list(chan, &chan->ld_pending);
xgene_dma_free_desc_list(chan, &chan->ld_running);
@@ -1200,15 +1216,11 @@ static void xgene_dma_tasklet_cb(unsigned long data)
{
struct xgene_dma_chan *chan = (struct xgene_dma_chan *)data;
- spin_lock_bh(&chan->lock);
-
/* Run all cleanup for descriptors which have been completed */
xgene_dma_cleanup_descriptors(chan);
/* Re-enable DMA channel IRQ */
enable_irq(chan->rx_irq);
-
- spin_unlock_bh(&chan->lock);
}
static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id)
@@ -1740,13 +1752,13 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
dma_dev->device_prep_dma_xor = xgene_dma_prep_xor;
dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC;
- dma_dev->xor_align = XGENE_DMA_XOR_ALIGNMENT;
+ dma_dev->xor_align = DMAENGINE_ALIGN_64_BYTES;
}
if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
dma_dev->device_prep_dma_pq = xgene_dma_prep_pq;
dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC;
- dma_dev->pq_align = XGENE_DMA_XOR_ALIGNMENT;
+ dma_dev->pq_align = DMAENGINE_ALIGN_64_BYTES;
}
}
@@ -1887,6 +1899,8 @@ static int xgene_dma_get_resources(struct platform_device *pdev,
return -ENOMEM;
}
+ pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET;
+
/* Get efuse csr region */
res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
if (!res) {
@@ -1941,16 +1955,18 @@ static int xgene_dma_probe(struct platform_device *pdev)
return ret;
pdma->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pdma->clk)) {
+ if (IS_ERR(pdma->clk) && !ACPI_COMPANION(&pdev->dev)) {
dev_err(&pdev->dev, "Failed to get clk\n");
return PTR_ERR(pdma->clk);
}
/* Enable clk before accessing registers */
- ret = clk_prepare_enable(pdma->clk);
- if (ret) {
- dev_err(&pdev->dev, "Failed to enable clk %d\n", ret);
- return ret;
+ if (!IS_ERR(pdma->clk)) {
+ ret = clk_prepare_enable(pdma->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to enable clk %d\n", ret);
+ return ret;
+ }
}
/* Remove DMA RAM out of shutdown */
@@ -1995,7 +2011,8 @@ err_request_irq:
err_dma_mask:
err_clk_enable:
- clk_disable_unprepare(pdma->clk);
+ if (!IS_ERR(pdma->clk))
+ clk_disable_unprepare(pdma->clk);
return ret;
}
@@ -2019,11 +2036,20 @@ static int xgene_dma_remove(struct platform_device *pdev)
xgene_dma_delete_chan_rings(chan);
}
- clk_disable_unprepare(pdma->clk);
+ if (!IS_ERR(pdma->clk))
+ clk_disable_unprepare(pdma->clk);
return 0;
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_dma_acpi_match_ptr[] = {
+ {"APMC0D43", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, xgene_dma_acpi_match_ptr);
+#endif
+
static const struct of_device_id xgene_dma_of_match_ptr[] = {
{.compatible = "apm,xgene-storm-dma",},
{},
@@ -2036,6 +2062,7 @@ static struct platform_driver xgene_dma_driver = {
.driver = {
.name = "X-Gene-DMA",
.of_match_table = xgene_dma_of_match_ptr,
+ .acpi_match_table = ACPI_PTR(xgene_dma_acpi_match_ptr),
},
};
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
new file mode 100644
index 000000000000..39915a6b7986
--- /dev/null
+++ b/drivers/dma/zx296702_dma.c
@@ -0,0 +1,951 @@
+/*
+ * Copyright 2015 Linaro.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+#define DRIVER_NAME "zx-dma"
+#define DMA_ALIGN 4
+#define DMA_MAX_SIZE (0x10000 - PAGE_SIZE)
+#define LLI_BLOCK_SIZE (4 * PAGE_SIZE)
+
+#define REG_ZX_SRC_ADDR 0x00
+#define REG_ZX_DST_ADDR 0x04
+#define REG_ZX_TX_X_COUNT 0x08
+#define REG_ZX_TX_ZY_COUNT 0x0c
+#define REG_ZX_SRC_ZY_STEP 0x10
+#define REG_ZX_DST_ZY_STEP 0x14
+#define REG_ZX_LLI_ADDR 0x1c
+#define REG_ZX_CTRL 0x20
+#define REG_ZX_TC_IRQ 0x800
+#define REG_ZX_SRC_ERR_IRQ 0x804
+#define REG_ZX_DST_ERR_IRQ 0x808
+#define REG_ZX_CFG_ERR_IRQ 0x80c
+#define REG_ZX_TC_IRQ_RAW 0x810
+#define REG_ZX_SRC_ERR_IRQ_RAW 0x814
+#define REG_ZX_DST_ERR_IRQ_RAW 0x818
+#define REG_ZX_CFG_ERR_IRQ_RAW 0x81c
+#define REG_ZX_STATUS 0x820
+#define REG_ZX_DMA_GRP_PRIO 0x824
+#define REG_ZX_DMA_ARB 0x828
+
+#define ZX_FORCE_CLOSE BIT(31)
+#define ZX_DST_BURST_WIDTH(x) (((x) & 0x7) << 13)
+#define ZX_MAX_BURST_LEN 16
+#define ZX_SRC_BURST_LEN(x) (((x) & 0xf) << 9)
+#define ZX_SRC_BURST_WIDTH(x) (((x) & 0x7) << 6)
+#define ZX_IRQ_ENABLE_ALL (3 << 4)
+#define ZX_DST_FIFO_MODE BIT(3)
+#define ZX_SRC_FIFO_MODE BIT(2)
+#define ZX_SOFT_REQ BIT(1)
+#define ZX_CH_ENABLE BIT(0)
+
+#define ZX_DMA_BUSWIDTHS \
+ (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+enum zx_dma_burst_width {
+ ZX_DMA_WIDTH_8BIT = 0,
+ ZX_DMA_WIDTH_16BIT = 1,
+ ZX_DMA_WIDTH_32BIT = 2,
+ ZX_DMA_WIDTH_64BIT = 3,
+};
+
+struct zx_desc_hw {
+ u32 saddr;
+ u32 daddr;
+ u32 src_x;
+ u32 src_zy;
+ u32 src_zy_step;
+ u32 dst_zy_step;
+ u32 reserved1;
+ u32 lli;
+ u32 ctr;
+ u32 reserved[7]; /* pack as hardware registers region size */
+} __aligned(32);
+
+struct zx_dma_desc_sw {
+ struct virt_dma_desc vd;
+ dma_addr_t desc_hw_lli;
+ size_t desc_num;
+ size_t size;
+ struct zx_desc_hw *desc_hw;
+};
+
+struct zx_dma_phy;
+
+struct zx_dma_chan {
+ struct dma_slave_config slave_cfg;
+ int id; /* Request phy chan id */
+ u32 ccfg;
+ u32 cyclic;
+ struct virt_dma_chan vc;
+ struct zx_dma_phy *phy;
+ struct list_head node;
+ dma_addr_t dev_addr;
+ enum dma_status status;
+};
+
+struct zx_dma_phy {
+ u32 idx;
+ void __iomem *base;
+ struct zx_dma_chan *vchan;
+ struct zx_dma_desc_sw *ds_run;
+ struct zx_dma_desc_sw *ds_done;
+};
+
+struct zx_dma_dev {
+ struct dma_device slave;
+ void __iomem *base;
+ spinlock_t lock; /* lock for ch and phy */
+ struct list_head chan_pending;
+ struct zx_dma_phy *phy;
+ struct zx_dma_chan *chans;
+ struct clk *clk;
+ struct dma_pool *pool;
+ u32 dma_channels;
+ u32 dma_requests;
+ int irq;
+};
+
+#define to_zx_dma(dmadev) container_of(dmadev, struct zx_dma_dev, slave)
+
+static struct zx_dma_chan *to_zx_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct zx_dma_chan, vc.chan);
+}
+
+static void zx_dma_terminate_chan(struct zx_dma_phy *phy, struct zx_dma_dev *d)
+{
+ u32 val = 0;
+
+ val = readl_relaxed(phy->base + REG_ZX_CTRL);
+ val &= ~ZX_CH_ENABLE;
+ val |= ZX_FORCE_CLOSE;
+ writel_relaxed(val, phy->base + REG_ZX_CTRL);
+
+ val = 0x1 << phy->idx;
+ writel_relaxed(val, d->base + REG_ZX_TC_IRQ_RAW);
+ writel_relaxed(val, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
+ writel_relaxed(val, d->base + REG_ZX_DST_ERR_IRQ_RAW);
+ writel_relaxed(val, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
+}
+
+static void zx_dma_set_desc(struct zx_dma_phy *phy, struct zx_desc_hw *hw)
+{
+ writel_relaxed(hw->saddr, phy->base + REG_ZX_SRC_ADDR);
+ writel_relaxed(hw->daddr, phy->base + REG_ZX_DST_ADDR);
+ writel_relaxed(hw->src_x, phy->base + REG_ZX_TX_X_COUNT);
+ writel_relaxed(0, phy->base + REG_ZX_TX_ZY_COUNT);
+ writel_relaxed(0, phy->base + REG_ZX_SRC_ZY_STEP);
+ writel_relaxed(0, phy->base + REG_ZX_DST_ZY_STEP);
+ writel_relaxed(hw->lli, phy->base + REG_ZX_LLI_ADDR);
+ writel_relaxed(hw->ctr, phy->base + REG_ZX_CTRL);
+}
+
+static u32 zx_dma_get_curr_lli(struct zx_dma_phy *phy)
+{
+ return readl_relaxed(phy->base + REG_ZX_LLI_ADDR);
+}
+
+static u32 zx_dma_get_chan_stat(struct zx_dma_dev *d)
+{
+ return readl_relaxed(d->base + REG_ZX_STATUS);
+}
+
+static void zx_dma_init_state(struct zx_dma_dev *d)
+{
+ /* set same priority */
+ writel_relaxed(0x0, d->base + REG_ZX_DMA_ARB);
+ /* clear all irq */
+ writel_relaxed(0xffffffff, d->base + REG_ZX_TC_IRQ_RAW);
+ writel_relaxed(0xffffffff, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
+ writel_relaxed(0xffffffff, d->base + REG_ZX_DST_ERR_IRQ_RAW);
+ writel_relaxed(0xffffffff, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
+}
+
+static int zx_dma_start_txd(struct zx_dma_chan *c)
+{
+ struct zx_dma_dev *d = to_zx_dma(c->vc.chan.device);
+ struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+
+ if (!c->phy)
+ return -EAGAIN;
+
+ if (BIT(c->phy->idx) & zx_dma_get_chan_stat(d))
+ return -EAGAIN;
+
+ if (vd) {
+ struct zx_dma_desc_sw *ds =
+ container_of(vd, struct zx_dma_desc_sw, vd);
+ /*
+ * fetch and remove request from vc->desc_issued
+ * so vc->desc_issued only contains desc pending
+ */
+ list_del(&ds->vd.node);
+ c->phy->ds_run = ds;
+ c->phy->ds_done = NULL;
+ /* start dma */
+ zx_dma_set_desc(c->phy, ds->desc_hw);
+ return 0;
+ }
+ c->phy->ds_done = NULL;
+ c->phy->ds_run = NULL;
+ return -EAGAIN;
+}
+
+static void zx_dma_task(struct zx_dma_dev *d)
+{
+ struct zx_dma_phy *p;
+ struct zx_dma_chan *c, *cn;
+ unsigned pch, pch_alloc = 0;
+ unsigned long flags;
+
+ /* check new dma request of running channel in vc->desc_issued */
+ list_for_each_entry_safe(c, cn, &d->slave.channels,
+ vc.chan.device_node) {
+ spin_lock_irqsave(&c->vc.lock, flags);
+ p = c->phy;
+ if (p && p->ds_done && zx_dma_start_txd(c)) {
+ /* No current txd associated with this channel */
+ dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
+ /* Mark this channel free */
+ c->phy = NULL;
+ p->vchan = NULL;
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ }
+
+ /* check new channel request in d->chan_pending */
+ spin_lock_irqsave(&d->lock, flags);
+ while (!list_empty(&d->chan_pending)) {
+ c = list_first_entry(&d->chan_pending,
+ struct zx_dma_chan, node);
+ p = &d->phy[c->id];
+ if (!p->vchan) {
+ /* remove from d->chan_pending */
+ list_del_init(&c->node);
+ pch_alloc |= 1 << c->id;
+ /* Mark this channel allocated */
+ p->vchan = c;
+ c->phy = p;
+ } else {
+ dev_dbg(d->slave.dev, "pchan %u: busy!\n", c->id);
+ }
+ }
+ spin_unlock_irqrestore(&d->lock, flags);
+
+ for (pch = 0; pch < d->dma_channels; pch++) {
+ if (pch_alloc & (1 << pch)) {
+ p = &d->phy[pch];
+ c = p->vchan;
+ if (c) {
+ spin_lock_irqsave(&c->vc.lock, flags);
+ zx_dma_start_txd(c);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ }
+ }
+ }
+}
+
+static irqreturn_t zx_dma_int_handler(int irq, void *dev_id)
+{
+ struct zx_dma_dev *d = (struct zx_dma_dev *)dev_id;
+ struct zx_dma_phy *p;
+ struct zx_dma_chan *c;
+ u32 tc = readl_relaxed(d->base + REG_ZX_TC_IRQ);
+ u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ);
+ u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ);
+ u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ);
+ u32 i, irq_chan = 0, task = 0;
+
+ while (tc) {
+ i = __ffs(tc);
+ tc &= ~BIT(i);
+ p = &d->phy[i];
+ c = p->vchan;
+ if (c) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ if (c->cyclic) {
+ vchan_cyclic_callback(&p->ds_run->vd);
+ } else {
+ vchan_cookie_complete(&p->ds_run->vd);
+ p->ds_done = p->ds_run;
+ task = 1;
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ irq_chan |= BIT(i);
+ }
+ }
+
+ if (serr || derr || cfg)
+ dev_warn(d->slave.dev, "DMA ERR src 0x%x, dst 0x%x, cfg 0x%x\n",
+ serr, derr, cfg);
+
+ writel_relaxed(irq_chan, d->base + REG_ZX_TC_IRQ_RAW);
+ writel_relaxed(serr, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
+ writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW);
+ writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
+
+ if (task)
+ zx_dma_task(d);
+ return IRQ_HANDLED;
+}
+
+static void zx_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct zx_dma_chan *c = to_zx_chan(chan);
+ struct zx_dma_dev *d = to_zx_dma(chan->device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d->lock, flags);
+ list_del_init(&c->node);
+ spin_unlock_irqrestore(&d->lock, flags);
+
+ vchan_free_chan_resources(&c->vc);
+ c->ccfg = 0;
+}
+
+static enum dma_status zx_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct zx_dma_chan *c = to_zx_chan(chan);
+ struct zx_dma_phy *p;
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ enum dma_status ret;
+ size_t bytes = 0;
+
+ ret = dma_cookie_status(&c->vc.chan, cookie, state);
+ if (ret == DMA_COMPLETE || !state)
+ return ret;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ p = c->phy;
+ ret = c->status;
+
+ /*
+ * If the cookie is on our issue queue, then the residue is
+ * its total size.
+ */
+ vd = vchan_find_desc(&c->vc, cookie);
+ if (vd) {
+ bytes = container_of(vd, struct zx_dma_desc_sw, vd)->size;
+ } else if ((!p) || (!p->ds_run)) {
+ bytes = 0;
+ } else {
+ struct zx_dma_desc_sw *ds = p->ds_run;
+ u32 clli = 0, index = 0;
+
+ bytes = 0;
+ clli = zx_dma_get_curr_lli(p);
+ index = (clli - ds->desc_hw_lli) / sizeof(struct zx_desc_hw);
+ for (; index < ds->desc_num; index++) {
+ bytes += ds->desc_hw[index].src_x;
+ /* end of lli */
+ if (!ds->desc_hw[index].lli)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ dma_set_residue(state, bytes);
+ return ret;
+}
+
+static void zx_dma_issue_pending(struct dma_chan *chan)
+{
+ struct zx_dma_chan *c = to_zx_chan(chan);
+ struct zx_dma_dev *d = to_zx_dma(chan->device);
+ unsigned long flags;
+ int issue = 0;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ /* add request to vc->desc_issued */
+ if (vchan_issue_pending(&c->vc)) {
+ spin_lock(&d->lock);
+ if (!c->phy && list_empty(&c->node)) {
+ /* if new channel, add chan_pending */
+ list_add_tail(&c->node, &d->chan_pending);
+ issue = 1;
+ dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
+ }
+ spin_unlock(&d->lock);
+ } else {
+ dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+
+ if (issue)
+ zx_dma_task(d);
+}
+
+static void zx_dma_fill_desc(struct zx_dma_desc_sw *ds, dma_addr_t dst,
+ dma_addr_t src, size_t len, u32 num, u32 ccfg)
+{
+ if ((num + 1) < ds->desc_num)
+ ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
+ sizeof(struct zx_desc_hw);
+ ds->desc_hw[num].saddr = src;
+ ds->desc_hw[num].daddr = dst;
+ ds->desc_hw[num].src_x = len;
+ ds->desc_hw[num].ctr = ccfg;
+}
+
+static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num,
+ struct dma_chan *chan)
+{
+ struct zx_dma_chan *c = to_zx_chan(chan);
+ struct zx_dma_desc_sw *ds;
+ struct zx_dma_dev *d = to_zx_dma(chan->device);
+ int lli_limit = LLI_BLOCK_SIZE / sizeof(struct zx_desc_hw);
+
+ if (num > lli_limit) {
+ dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
+ &c->vc, num, lli_limit);
+ return NULL;
+ }
+
+ ds = kzalloc(sizeof(*ds), GFP_ATOMIC);
+ if (!ds)
+ return NULL;
+
+ ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
+ if (!ds->desc_hw) {
+ dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
+ kfree(ds);
+ return NULL;
+ }
+ memset(ds->desc_hw, sizeof(struct zx_desc_hw) * num, 0);
+ ds->desc_num = num;
+ return ds;
+}
+
+static enum zx_dma_burst_width zx_dma_burst_width(enum dma_slave_buswidth width)
+{
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ return ffs(width) - 1;
+ default:
+ return ZX_DMA_WIDTH_32BIT;
+ }
+}
+
+static int zx_pre_config(struct zx_dma_chan *c, enum dma_transfer_direction dir)
+{
+ struct dma_slave_config *cfg = &c->slave_cfg;
+ enum zx_dma_burst_width src_width;
+ enum zx_dma_burst_width dst_width;
+ u32 maxburst = 0;
+
+ switch (dir) {
+ case DMA_MEM_TO_MEM:
+ c->ccfg = ZX_CH_ENABLE | ZX_SOFT_REQ
+ | ZX_SRC_BURST_LEN(ZX_MAX_BURST_LEN - 1)
+ | ZX_SRC_BURST_WIDTH(ZX_DMA_WIDTH_32BIT)
+ | ZX_DST_BURST_WIDTH(ZX_DMA_WIDTH_32BIT);
+ break;
+ case DMA_MEM_TO_DEV:
+ c->dev_addr = cfg->dst_addr;
+ /* dst len is calculated from src width, len and dst width.
+ * We need make sure dst len not exceed MAX LEN.
+ * Trailing single transaction that does not fill a full
+ * burst also require identical src/dst data width.
+ */
+ dst_width = zx_dma_burst_width(cfg->dst_addr_width);
+ maxburst = cfg->dst_maxburst;
+ maxburst = maxburst < ZX_MAX_BURST_LEN ?
+ maxburst : ZX_MAX_BURST_LEN;
+ c->ccfg = ZX_DST_FIFO_MODE | ZX_CH_ENABLE
+ | ZX_SRC_BURST_LEN(maxburst - 1)
+ | ZX_SRC_BURST_WIDTH(dst_width)
+ | ZX_DST_BURST_WIDTH(dst_width);
+ break;
+ case DMA_DEV_TO_MEM:
+ c->dev_addr = cfg->src_addr;
+ src_width = zx_dma_burst_width(cfg->src_addr_width);
+ maxburst = cfg->src_maxburst;
+ maxburst = maxburst < ZX_MAX_BURST_LEN ?
+ maxburst : ZX_MAX_BURST_LEN;
+ c->ccfg = ZX_SRC_FIFO_MODE | ZX_CH_ENABLE
+ | ZX_SRC_BURST_LEN(maxburst - 1)
+ | ZX_SRC_BURST_WIDTH(src_width)
+ | ZX_DST_BURST_WIDTH(src_width);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *zx_dma_prep_memcpy(
+ struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct zx_dma_chan *c = to_zx_chan(chan);
+ struct zx_dma_desc_sw *ds;
+ size_t copy = 0;
+ int num = 0;
+
+ if (!len)
+ return NULL;
+
+ if (zx_pre_config(c, DMA_MEM_TO_MEM))
+ return NULL;
+
+ num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
+
+ ds = zx_alloc_desc_resource(num, chan);
+ if (!ds)
+ return NULL;
+
+ ds->size = len;
+ num = 0;
+
+ do {
+ copy = min_t(size_t, len, DMA_MAX_SIZE);
+ zx_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
+
+ src += copy;
+ dst += copy;
+ len -= copy;
+ } while (len);
+
+ c->cyclic = 0;
+ ds->desc_hw[num - 1].lli = 0; /* end of link */
+ ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
+ return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *zx_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
+ enum dma_transfer_direction dir, unsigned long flags, void *context)
+{
+ struct zx_dma_chan *c = to_zx_chan(chan);
+ struct zx_dma_desc_sw *ds;
+ size_t len, avail, total = 0;
+ struct scatterlist *sg;
+ dma_addr_t addr, src = 0, dst = 0;
+ int num = sglen, i;
+
+ if (!sgl)
+ return NULL;
+
+ if (zx_pre_config(c, dir))
+ return NULL;
+
+ for_each_sg(sgl, sg, sglen, i) {
+ avail = sg_dma_len(sg);
+ if (avail > DMA_MAX_SIZE)
+ num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
+ }
+
+ ds = zx_alloc_desc_resource(num, chan);
+ if (!ds)
+ return NULL;
+
+ c->cyclic = 0;
+ num = 0;
+ for_each_sg(sgl, sg, sglen, i) {
+ addr = sg_dma_address(sg);
+ avail = sg_dma_len(sg);
+ total += avail;
+
+ do {
+ len = min_t(size_t, avail, DMA_MAX_SIZE);
+
+ if (dir == DMA_MEM_TO_DEV) {
+ src = addr;
+ dst = c->dev_addr;
+ } else if (dir == DMA_DEV_TO_MEM) {
+ src = c->dev_addr;
+ dst = addr;
+ }
+
+ zx_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
+
+ addr += len;
+ avail -= len;
+ } while (avail);
+ }
+
+ ds->desc_hw[num - 1].lli = 0; /* end of link */
+ ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
+ ds->size = total;
+ return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *zx_dma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct zx_dma_chan *c = to_zx_chan(chan);
+ struct zx_dma_desc_sw *ds;
+ dma_addr_t src = 0, dst = 0;
+ int num_periods = buf_len / period_len;
+ int buf = 0, num = 0;
+
+ if (period_len > DMA_MAX_SIZE) {
+ dev_err(chan->device->dev, "maximum period size exceeded\n");
+ return NULL;
+ }
+
+ if (zx_pre_config(c, dir))
+ return NULL;
+
+ ds = zx_alloc_desc_resource(num_periods, chan);
+ if (!ds)
+ return NULL;
+ c->cyclic = 1;
+
+ while (buf < buf_len) {
+ if (dir == DMA_MEM_TO_DEV) {
+ src = dma_addr;
+ dst = c->dev_addr;
+ } else if (dir == DMA_DEV_TO_MEM) {
+ src = c->dev_addr;
+ dst = dma_addr;
+ }
+ zx_dma_fill_desc(ds, dst, src, period_len, num++,
+ c->ccfg | ZX_IRQ_ENABLE_ALL);
+ dma_addr += period_len;
+ buf += period_len;
+ }
+
+ ds->desc_hw[num - 1].lli = ds->desc_hw_lli;
+ ds->size = buf_len;
+ return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static int zx_dma_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct zx_dma_chan *c = to_zx_chan(chan);
+
+ if (!cfg)
+ return -EINVAL;
+
+ memcpy(&c->slave_cfg, cfg, sizeof(*cfg));
+
+ return 0;
+}
+
+static int zx_dma_terminate_all(struct dma_chan *chan)
+{
+ struct zx_dma_chan *c = to_zx_chan(chan);
+ struct zx_dma_dev *d = to_zx_dma(chan->device);
+ struct zx_dma_phy *p = c->phy;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
+
+ /* Prevent this channel being scheduled */
+ spin_lock(&d->lock);
+ list_del_init(&c->node);
+ spin_unlock(&d->lock);
+
+ /* Clear the tx descriptor lists */
+ spin_lock_irqsave(&c->vc.lock, flags);
+ vchan_get_all_descriptors(&c->vc, &head);
+ if (p) {
+ /* vchan is assigned to a pchan - stop the channel */
+ zx_dma_terminate_chan(p, d);
+ c->phy = NULL;
+ p->vchan = NULL;
+ p->ds_run = NULL;
+ p->ds_done = NULL;
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ vchan_dma_desc_free_list(&c->vc, &head);
+
+ return 0;
+}
+
+static int zx_dma_transfer_pause(struct dma_chan *chan)
+{
+ struct zx_dma_chan *c = to_zx_chan(chan);
+ u32 val = 0;
+
+ val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
+ val &= ~ZX_CH_ENABLE;
+ writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
+
+ return 0;
+}
+
+static int zx_dma_transfer_resume(struct dma_chan *chan)
+{
+ struct zx_dma_chan *c = to_zx_chan(chan);
+ u32 val = 0;
+
+ val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
+ val |= ZX_CH_ENABLE;
+ writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
+
+ return 0;
+}
+
+static void zx_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct zx_dma_desc_sw *ds =
+ container_of(vd, struct zx_dma_desc_sw, vd);
+ struct zx_dma_dev *d = to_zx_dma(vd->tx.chan->device);
+
+ dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
+ kfree(ds);
+}
+
+static const struct of_device_id zx6702_dma_dt_ids[] = {
+ { .compatible = "zte,zx296702-dma", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, zx6702_dma_dt_ids);
+
+static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct zx_dma_dev *d = ofdma->of_dma_data;
+ unsigned int request = dma_spec->args[0];
+ struct dma_chan *chan;
+ struct zx_dma_chan *c;
+
+ if (request > d->dma_requests)
+ return NULL;
+
+ chan = dma_get_any_slave_channel(&d->slave);
+ if (!chan) {
+ dev_err(d->slave.dev, "get channel fail in %s.\n", __func__);
+ return NULL;
+ }
+ c = to_zx_chan(chan);
+ c->id = request;
+ dev_info(d->slave.dev, "zx_dma: pchan %u: alloc vchan %p\n",
+ c->id, &c->vc);
+ return chan;
+}
+
+static int zx_dma_probe(struct platform_device *op)
+{
+ struct zx_dma_dev *d;
+ struct resource *iores;
+ int i, ret = 0;
+
+ iores = platform_get_resource(op, IORESOURCE_MEM, 0);
+ if (!iores)
+ return -EINVAL;
+
+ d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ d->base = devm_ioremap_resource(&op->dev, iores);
+ if (IS_ERR(d->base))
+ return PTR_ERR(d->base);
+
+ of_property_read_u32((&op->dev)->of_node,
+ "dma-channels", &d->dma_channels);
+ of_property_read_u32((&op->dev)->of_node,
+ "dma-requests", &d->dma_requests);
+ if (!d->dma_requests || !d->dma_channels)
+ return -EINVAL;
+
+ d->clk = devm_clk_get(&op->dev, NULL);
+ if (IS_ERR(d->clk)) {
+ dev_err(&op->dev, "no dma clk\n");
+ return PTR_ERR(d->clk);
+ }
+
+ d->irq = platform_get_irq(op, 0);
+ ret = devm_request_irq(&op->dev, d->irq, zx_dma_int_handler,
+ 0, DRIVER_NAME, d);
+ if (ret)
+ return ret;
+
+ /* A DMA memory pool for LLIs, align on 32-byte boundary */
+ d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
+ LLI_BLOCK_SIZE, 32, 0);
+ if (!d->pool)
+ return -ENOMEM;
+
+ /* init phy channel */
+ d->phy = devm_kzalloc(&op->dev,
+ d->dma_channels * sizeof(struct zx_dma_phy), GFP_KERNEL);
+ if (!d->phy)
+ return -ENOMEM;
+
+ for (i = 0; i < d->dma_channels; i++) {
+ struct zx_dma_phy *p = &d->phy[i];
+
+ p->idx = i;
+ p->base = d->base + i * 0x40;
+ }
+
+ INIT_LIST_HEAD(&d->slave.channels);
+ dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
+ dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
+ dma_cap_set(DMA_PRIVATE, d->slave.cap_mask);
+ d->slave.dev = &op->dev;
+ d->slave.device_free_chan_resources = zx_dma_free_chan_resources;
+ d->slave.device_tx_status = zx_dma_tx_status;
+ d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy;
+ d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg;
+ d->slave.device_prep_dma_cyclic = zx_dma_prep_dma_cyclic;
+ d->slave.device_issue_pending = zx_dma_issue_pending;
+ d->slave.device_config = zx_dma_config;
+ d->slave.device_terminate_all = zx_dma_terminate_all;
+ d->slave.device_pause = zx_dma_transfer_pause;
+ d->slave.device_resume = zx_dma_transfer_resume;
+ d->slave.copy_align = DMA_ALIGN;
+ d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS;
+ d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS;
+ d->slave.directions = BIT(DMA_MEM_TO_MEM) | BIT(DMA_MEM_TO_DEV)
+ | BIT(DMA_DEV_TO_MEM);
+ d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+
+ /* init virtual channel */
+ d->chans = devm_kzalloc(&op->dev,
+ d->dma_requests * sizeof(struct zx_dma_chan), GFP_KERNEL);
+ if (!d->chans)
+ return -ENOMEM;
+
+ for (i = 0; i < d->dma_requests; i++) {
+ struct zx_dma_chan *c = &d->chans[i];
+
+ c->status = DMA_IN_PROGRESS;
+ INIT_LIST_HEAD(&c->node);
+ c->vc.desc_free = zx_dma_free_desc;
+ vchan_init(&c->vc, &d->slave);
+ }
+
+ /* Enable clock before accessing registers */
+ ret = clk_prepare_enable(d->clk);
+ if (ret < 0) {
+ dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
+ goto zx_dma_out;
+ }
+
+ zx_dma_init_state(d);
+
+ spin_lock_init(&d->lock);
+ INIT_LIST_HEAD(&d->chan_pending);
+ platform_set_drvdata(op, d);
+
+ ret = dma_async_device_register(&d->slave);
+ if (ret)
+ goto clk_dis;
+
+ ret = of_dma_controller_register((&op->dev)->of_node,
+ zx_of_dma_simple_xlate, d);
+ if (ret)
+ goto of_dma_register_fail;
+
+ dev_info(&op->dev, "initialized\n");
+ return 0;
+
+of_dma_register_fail:
+ dma_async_device_unregister(&d->slave);
+clk_dis:
+ clk_disable_unprepare(d->clk);
+zx_dma_out:
+ return ret;
+}
+
+static int zx_dma_remove(struct platform_device *op)
+{
+ struct zx_dma_chan *c, *cn;
+ struct zx_dma_dev *d = platform_get_drvdata(op);
+
+ /* explictly free the irq */
+ devm_free_irq(&op->dev, d->irq, d);
+
+ dma_async_device_unregister(&d->slave);
+ of_dma_controller_free((&op->dev)->of_node);
+
+ list_for_each_entry_safe(c, cn, &d->slave.channels,
+ vc.chan.device_node) {
+ list_del(&c->vc.chan.device_node);
+ }
+ clk_disable_unprepare(d->clk);
+ dmam_pool_destroy(d->pool);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int zx_dma_suspend_dev(struct device *dev)
+{
+ struct zx_dma_dev *d = dev_get_drvdata(dev);
+ u32 stat = 0;
+
+ stat = zx_dma_get_chan_stat(d);
+ if (stat) {
+ dev_warn(d->slave.dev,
+ "chan %d is running fail to suspend\n", stat);
+ return -1;
+ }
+ clk_disable_unprepare(d->clk);
+ return 0;
+}
+
+static int zx_dma_resume_dev(struct device *dev)
+{
+ struct zx_dma_dev *d = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = clk_prepare_enable(d->clk);
+ if (ret < 0) {
+ dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
+ return ret;
+ }
+ zx_dma_init_state(d);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(zx_dma_pmops, zx_dma_suspend_dev, zx_dma_resume_dev);
+
+static struct platform_driver zx_pdma_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &zx_dma_pmops,
+ .of_match_table = zx6702_dma_dt_ids,
+ },
+ .probe = zx_dma_probe,
+ .remove = zx_dma_remove,
+};
+
+module_platform_driver(zx_pdma_driver);
+
+MODULE_DESCRIPTION("ZTE ZX296702 DMA Driver");
+MODULE_AUTHOR("Jun Nie jun.nie@linaro.org");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 8677ead2a8e1..ef25000a5bc6 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -61,16 +61,6 @@ config EDAC_DECODE_MCE
which occur really early upon boot, before the module infrastructure
has been initialized.
-config EDAC_MCE_INJ
- tristate "Simple MCE injection interface"
- depends on EDAC_DECODE_MCE && DEBUG_FS
- default n
- help
- This is a simple debugfs interface to inject MCEs and test different
- aspects of the MCE handling code.
-
- WARNING: Do not even assume this interface is staying stable!
-
config EDAC_MM_EDAC
tristate "Main Memory EDAC (Error Detection And Correction) reporting"
select RAS
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 28ef2a519f65..ae3c5f3ce405 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -17,7 +17,6 @@ edac_core-y += edac_pci.o edac_pci_sysfs.o
endif
obj-$(CONFIG_EDAC_GHES) += ghes_edac.o
-obj-$(CONFIG_EDAC_MCE_INJ) += mce_amd_inj.o
edac_mce_amd-y := mce_amd.o
obj-$(CONFIG_EDAC_DECODE_MCE) += edac_mce_amd.o
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 58586d59bf8e..e3a945ce374b 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -763,7 +763,8 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
c->x86, c->x86_model, c->x86_mask,
m->bank,
((m->status & MCI_STATUS_OVER) ? "Over" : "-"),
- ((m->status & MCI_STATUS_UC) ? "UE" : "CE"),
+ ((m->status & MCI_STATUS_UC) ? "UE" :
+ (m->status & MCI_STATUS_DEFERRED) ? "-" : "CE"),
((m->status & MCI_STATUS_MISCV) ? "MiscV" : "-"),
((m->status & MCI_STATUS_PCC) ? "PCC" : "-"),
((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-"));
diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c
deleted file mode 100644
index 4c73e4d03d46..000000000000
--- a/drivers/edac/mce_amd_inj.c
+++ /dev/null
@@ -1,375 +0,0 @@
-/*
- * A simple MCE injection facility for testing different aspects of the RAS
- * code. This driver should be built as module so that it can be loaded
- * on production kernels for testing purposes.
- *
- * This file may be distributed under the terms of the GNU General Public
- * License version 2.
- *
- * Copyright (c) 2010-14: Borislav Petkov <bp@alien8.de>
- * Advanced Micro Devices Inc.
- */
-
-#include <linux/kobject.h>
-#include <linux/debugfs.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/cpu.h>
-#include <linux/string.h>
-#include <linux/uaccess.h>
-#include <asm/mce.h>
-
-#include "mce_amd.h"
-
-/*
- * Collect all the MCi_XXX settings
- */
-static struct mce i_mce;
-static struct dentry *dfs_inj;
-
-static u8 n_banks;
-
-#define MAX_FLAG_OPT_SIZE 3
-
-enum injection_type {
- SW_INJ = 0, /* SW injection, simply decode the error */
- HW_INJ, /* Trigger a #MC */
- N_INJ_TYPES,
-};
-
-static const char * const flags_options[] = {
- [SW_INJ] = "sw",
- [HW_INJ] = "hw",
- NULL
-};
-
-/* Set default injection to SW_INJ */
-static enum injection_type inj_type = SW_INJ;
-
-#define MCE_INJECT_SET(reg) \
-static int inj_##reg##_set(void *data, u64 val) \
-{ \
- struct mce *m = (struct mce *)data; \
- \
- m->reg = val; \
- return 0; \
-}
-
-MCE_INJECT_SET(status);
-MCE_INJECT_SET(misc);
-MCE_INJECT_SET(addr);
-
-#define MCE_INJECT_GET(reg) \
-static int inj_##reg##_get(void *data, u64 *val) \
-{ \
- struct mce *m = (struct mce *)data; \
- \
- *val = m->reg; \
- return 0; \
-}
-
-MCE_INJECT_GET(status);
-MCE_INJECT_GET(misc);
-MCE_INJECT_GET(addr);
-
-DEFINE_SIMPLE_ATTRIBUTE(status_fops, inj_status_get, inj_status_set, "%llx\n");
-DEFINE_SIMPLE_ATTRIBUTE(misc_fops, inj_misc_get, inj_misc_set, "%llx\n");
-DEFINE_SIMPLE_ATTRIBUTE(addr_fops, inj_addr_get, inj_addr_set, "%llx\n");
-
-/*
- * Caller needs to be make sure this cpu doesn't disappear
- * from under us, i.e.: get_cpu/put_cpu.
- */
-static int toggle_hw_mce_inject(unsigned int cpu, bool enable)
-{
- u32 l, h;
- int err;
-
- err = rdmsr_on_cpu(cpu, MSR_K7_HWCR, &l, &h);
- if (err) {
- pr_err("%s: error reading HWCR\n", __func__);
- return err;
- }
-
- enable ? (l |= BIT(18)) : (l &= ~BIT(18));
-
- err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, l, h);
- if (err)
- pr_err("%s: error writing HWCR\n", __func__);
-
- return err;
-}
-
-static int __set_inj(const char *buf)
-{
- int i;
-
- for (i = 0; i < N_INJ_TYPES; i++) {
- if (!strncmp(flags_options[i], buf, strlen(flags_options[i]))) {
- inj_type = i;
- return 0;
- }
- }
- return -EINVAL;
-}
-
-static ssize_t flags_read(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- char buf[MAX_FLAG_OPT_SIZE];
- int n;
-
- n = sprintf(buf, "%s\n", flags_options[inj_type]);
-
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
-}
-
-static ssize_t flags_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- char buf[MAX_FLAG_OPT_SIZE], *__buf;
- int err;
- size_t ret;
-
- if (cnt > MAX_FLAG_OPT_SIZE)
- cnt = MAX_FLAG_OPT_SIZE;
-
- ret = cnt;
-
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
-
- buf[cnt - 1] = 0;
-
- /* strip whitespace */
- __buf = strstrip(buf);
-
- err = __set_inj(__buf);
- if (err) {
- pr_err("%s: Invalid flags value: %s\n", __func__, __buf);
- return err;
- }
-
- *ppos += ret;
-
- return ret;
-}
-
-static const struct file_operations flags_fops = {
- .read = flags_read,
- .write = flags_write,
- .llseek = generic_file_llseek,
-};
-
-/*
- * On which CPU to inject?
- */
-MCE_INJECT_GET(extcpu);
-
-static int inj_extcpu_set(void *data, u64 val)
-{
- struct mce *m = (struct mce *)data;
-
- if (val >= nr_cpu_ids || !cpu_online(val)) {
- pr_err("%s: Invalid CPU: %llu\n", __func__, val);
- return -EINVAL;
- }
- m->extcpu = val;
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(extcpu_fops, inj_extcpu_get, inj_extcpu_set, "%llu\n");
-
-static void trigger_mce(void *info)
-{
- asm volatile("int $18");
-}
-
-static void do_inject(void)
-{
- u64 mcg_status = 0;
- unsigned int cpu = i_mce.extcpu;
- u8 b = i_mce.bank;
-
- if (i_mce.misc)
- i_mce.status |= MCI_STATUS_MISCV;
-
- if (inj_type == SW_INJ) {
- amd_decode_mce(NULL, 0, &i_mce);
- return;
- }
-
- /* prep MCE global settings for the injection */
- mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV;
-
- if (!(i_mce.status & MCI_STATUS_PCC))
- mcg_status |= MCG_STATUS_RIPV;
-
- get_online_cpus();
- if (!cpu_online(cpu))
- goto err;
-
- toggle_hw_mce_inject(cpu, true);
-
- wrmsr_on_cpu(cpu, MSR_IA32_MCG_STATUS,
- (u32)mcg_status, (u32)(mcg_status >> 32));
-
- wrmsr_on_cpu(cpu, MSR_IA32_MCx_STATUS(b),
- (u32)i_mce.status, (u32)(i_mce.status >> 32));
-
- wrmsr_on_cpu(cpu, MSR_IA32_MCx_ADDR(b),
- (u32)i_mce.addr, (u32)(i_mce.addr >> 32));
-
- wrmsr_on_cpu(cpu, MSR_IA32_MCx_MISC(b),
- (u32)i_mce.misc, (u32)(i_mce.misc >> 32));
-
- toggle_hw_mce_inject(cpu, false);
-
- smp_call_function_single(cpu, trigger_mce, NULL, 0);
-
-err:
- put_online_cpus();
-
-}
-
-/*
- * This denotes into which bank we're injecting and triggers
- * the injection, at the same time.
- */
-static int inj_bank_set(void *data, u64 val)
-{
- struct mce *m = (struct mce *)data;
-
- if (val >= n_banks) {
- pr_err("Non-existent MCE bank: %llu\n", val);
- return -EINVAL;
- }
-
- m->bank = val;
- do_inject();
-
- return 0;
-}
-
-MCE_INJECT_GET(bank);
-
-DEFINE_SIMPLE_ATTRIBUTE(bank_fops, inj_bank_get, inj_bank_set, "%llu\n");
-
-static const char readme_msg[] =
-"Description of the files and their usages:\n"
-"\n"
-"Note1: i refers to the bank number below.\n"
-"Note2: See respective BKDGs for the exact bit definitions of the files below\n"
-"as they mirror the hardware registers.\n"
-"\n"
-"status:\t Set MCi_STATUS: the bits in that MSR control the error type and\n"
-"\t attributes of the error which caused the MCE.\n"
-"\n"
-"misc:\t Set MCi_MISC: provide auxiliary info about the error. It is mostly\n"
-"\t used for error thresholding purposes and its validity is indicated by\n"
-"\t MCi_STATUS[MiscV].\n"
-"\n"
-"addr:\t Error address value to be written to MCi_ADDR. Log address information\n"
-"\t associated with the error.\n"
-"\n"
-"cpu:\t The CPU to inject the error on.\n"
-"\n"
-"bank:\t Specify the bank you want to inject the error into: the number of\n"
-"\t banks in a processor varies and is family/model-specific, therefore, the\n"
-"\t supplied value is sanity-checked. Setting the bank value also triggers the\n"
-"\t injection.\n"
-"\n"
-"flags:\t Injection type to be performed. Writing to this file will trigger a\n"
-"\t real machine check, an APIC interrupt or invoke the error decoder routines\n"
-"\t for AMD processors.\n"
-"\n"
-"\t Allowed error injection types:\n"
-"\t - \"sw\": Software error injection. Decode error to a human-readable \n"
-"\t format only. Safe to use.\n"
-"\t - \"hw\": Hardware error injection. Causes the #MC exception handler to \n"
-"\t handle the error. Be warned: might cause system panic if MCi_STATUS[PCC] \n"
-"\t is set. Therefore, consider setting (debugfs_mountpoint)/mce/fake_panic \n"
-"\t before injecting.\n"
-"\n";
-
-static ssize_t
-inj_readme_read(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- return simple_read_from_buffer(ubuf, cnt, ppos,
- readme_msg, strlen(readme_msg));
-}
-
-static const struct file_operations readme_fops = {
- .read = inj_readme_read,
-};
-
-static struct dfs_node {
- char *name;
- struct dentry *d;
- const struct file_operations *fops;
- umode_t perm;
-} dfs_fls[] = {
- { .name = "status", .fops = &status_fops, .perm = S_IRUSR | S_IWUSR },
- { .name = "misc", .fops = &misc_fops, .perm = S_IRUSR | S_IWUSR },
- { .name = "addr", .fops = &addr_fops, .perm = S_IRUSR | S_IWUSR },
- { .name = "bank", .fops = &bank_fops, .perm = S_IRUSR | S_IWUSR },
- { .name = "flags", .fops = &flags_fops, .perm = S_IRUSR | S_IWUSR },
- { .name = "cpu", .fops = &extcpu_fops, .perm = S_IRUSR | S_IWUSR },
- { .name = "README", .fops = &readme_fops, .perm = S_IRUSR | S_IRGRP | S_IROTH },
-};
-
-static int __init init_mce_inject(void)
-{
- int i;
- u64 cap;
-
- rdmsrl(MSR_IA32_MCG_CAP, cap);
- n_banks = cap & MCG_BANKCNT_MASK;
-
- dfs_inj = debugfs_create_dir("mce-inject", NULL);
- if (!dfs_inj)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(dfs_fls); i++) {
- dfs_fls[i].d = debugfs_create_file(dfs_fls[i].name,
- dfs_fls[i].perm,
- dfs_inj,
- &i_mce,
- dfs_fls[i].fops);
-
- if (!dfs_fls[i].d)
- goto err_dfs_add;
- }
-
- return 0;
-
-err_dfs_add:
- while (--i >= 0)
- debugfs_remove(dfs_fls[i].d);
-
- debugfs_remove(dfs_inj);
- dfs_inj = NULL;
-
- return -ENOMEM;
-}
-
-static void __exit exit_mce_inject(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dfs_fls); i++)
- debugfs_remove(dfs_fls[i].d);
-
- memset(&dfs_fls, 0, sizeof(dfs_fls));
-
- debugfs_remove(dfs_inj);
- dfs_inj = NULL;
-}
-module_init(init_mce_inject);
-module_exit(exit_mce_inject);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Borislav Petkov <bp@alien8.de>");
-MODULE_AUTHOR("AMD Inc.");
-MODULE_DESCRIPTION("MCE injection facility for RAS testing");
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 3515b381c131..711d8ad74f11 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -920,7 +920,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
*/
for (row = 0; row < mci->nr_csrows; row++) {
- struct csrow_info *csi = &mci->csrows[row];
+ struct csrow_info *csi = mci->csrows[row];
/*
* Get the configuration settings for this
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index ca7831168298..cf1268ddef0c 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -280,6 +280,7 @@ struct sbridge_info {
u8 max_interleave;
u8 (*get_node_id)(struct sbridge_pvt *pvt);
enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt);
+ enum dev_type (*get_width)(struct sbridge_pvt *pvt, u32 mtr);
struct pci_dev *pci_vtd;
};
@@ -471,6 +472,9 @@ static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1 0x2fbf
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2 0x2fb9
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb
static const struct pci_id_descr pci_dev_descr_haswell[] = {
/* first item must be the HA */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0) },
@@ -488,6 +492,9 @@ static const struct pci_id_descr pci_dev_descr_haswell[] = {
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3, 1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL, 1) },
@@ -762,6 +769,49 @@ out:
return mtype;
}
+static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
+{
+ /* there's no way to figure out */
+ return DEV_UNKNOWN;
+}
+
+static enum dev_type __ibridge_get_width(u32 mtr)
+{
+ enum dev_type type;
+
+ switch (mtr) {
+ case 3:
+ type = DEV_UNKNOWN;
+ break;
+ case 2:
+ type = DEV_X16;
+ break;
+ case 1:
+ type = DEV_X8;
+ break;
+ case 0:
+ type = DEV_X4;
+ break;
+ }
+
+ return type;
+}
+
+static enum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
+{
+ /*
+ * ddr3_width on the documentation but also valid for DDR4 on
+ * Haswell
+ */
+ return __ibridge_get_width(GET_BITFIELD(mtr, 7, 8));
+}
+
+static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr)
+{
+ /* ddr3_width on the documentation but also valid for DDR4 */
+ return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9));
+}
+
static u8 get_node_id(struct sbridge_pvt *pvt)
{
u32 reg;
@@ -966,17 +1016,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
dimm->nr_pages = npages;
dimm->grain = 32;
- switch (banks) {
- case 16:
- dimm->dtype = DEV_X16;
- break;
- case 8:
- dimm->dtype = DEV_X8;
- break;
- case 4:
- dimm->dtype = DEV_X4;
- break;
- }
+ dimm->dtype = pvt->info.get_width(pvt, mtr);
dimm->mtype = mtype;
dimm->edac_mode = mode;
snprintf(dimm->label, sizeof(dimm->label),
@@ -1869,7 +1909,11 @@ static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
}
break;
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0:
- pvt->pci_ddrio = pdev;
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1:
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2:
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3:
+ if (!pvt->pci_ddrio)
+ pvt->pci_ddrio = pdev;
break;
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
pvt->pci_ha1 = pdev;
@@ -2361,6 +2405,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.interleave_list = ibridge_interleave_list;
pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
pvt->info.interleave_pkg = ibridge_interleave_pkg;
+ pvt->info.get_width = ibridge_get_width;
mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx);
/* Store pci devices at mci for faster access */
@@ -2380,6 +2425,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.interleave_list = sbridge_interleave_list;
pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list);
pvt->info.interleave_pkg = sbridge_interleave_pkg;
+ pvt->info.get_width = sbridge_get_width;
mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx);
/* Store pci devices at mci for faster access */
@@ -2399,6 +2445,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.interleave_list = ibridge_interleave_list;
pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
pvt->info.interleave_pkg = ibridge_interleave_pkg;
+ pvt->info.get_width = ibridge_get_width;
mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell Socket#%d", mci->mc_idx);
/* Store pci devices at mci for faster access */
@@ -2418,6 +2465,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.interleave_list = ibridge_interleave_list;
pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
pvt->info.interleave_pkg = ibridge_interleave_pkg;
+ pvt->info.get_width = broadwell_get_width;
mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell Socket#%d", mci->mc_idx);
/* Store pci devices at mci for faster access */
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 14636e4b6a08..ba06904af2e1 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -1168,7 +1168,6 @@ static struct platform_driver xgene_edac_driver = {
.remove = xgene_edac_remove,
.driver = {
.name = "xgene-edac",
- .owner = THIS_MODULE,
.of_match_table = xgene_edac_of_match,
},
};
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index ad87f263056f..4b9f09cc38d8 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -20,10 +20,12 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/err.h>
+#include <linux/gpio/consumer.h>
#include <linux/gpio.h>
#include <linux/input.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/extcon.h>
@@ -46,6 +48,9 @@
#define HPDET_DEBOUNCE 500
#define DEFAULT_MICD_TIMEOUT 2000
+#define MICD_DBTIME_TWO_READINGS 2
+#define MICD_DBTIME_FOUR_READINGS 4
+
#define MICD_LVL_1_TO_7 (ARIZONA_MICD_LVL_1 | ARIZONA_MICD_LVL_2 | \
ARIZONA_MICD_LVL_3 | ARIZONA_MICD_LVL_4 | \
ARIZONA_MICD_LVL_5 | ARIZONA_MICD_LVL_6 | \
@@ -94,6 +99,8 @@ struct arizona_extcon_info {
int hpdet_ip_version;
struct extcon_dev *edev;
+
+ struct gpio_desc *micd_pol_gpio;
};
static const struct arizona_micd_config micd_default_modes[] = {
@@ -204,6 +211,10 @@ static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
if (arizona->pdata.micd_pol_gpio > 0)
gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio,
info->micd_modes[mode].gpio);
+ else
+ gpiod_set_value_cansleep(info->micd_pol_gpio,
+ info->micd_modes[mode].gpio);
+
regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
ARIZONA_MICD_BIAS_SRC_MASK,
info->micd_modes[mode].bias <<
@@ -757,10 +768,11 @@ static void arizona_micd_timeout_work(struct work_struct *work)
mutex_lock(&info->lock);
dev_dbg(info->arizona->dev, "MICD timed out, reporting HP\n");
- arizona_identify_headphone(info);
info->detecting = false;
+ arizona_identify_headphone(info);
+
arizona_stop_mic(info);
mutex_unlock(&info->lock);
@@ -820,12 +832,18 @@ static void arizona_micd_detect(struct work_struct *work)
/* Due to jack detect this should never happen */
if (!(val & ARIZONA_MICD_STS)) {
dev_warn(arizona->dev, "Detected open circuit\n");
+ info->mic = false;
+ arizona_stop_mic(info);
info->detecting = false;
+ arizona_identify_headphone(info);
goto handled;
}
/* If we got a high impedence we should have a headset, report it. */
if (info->detecting && (val & ARIZONA_MICD_LVL_8)) {
+ info->mic = true;
+ info->detecting = false;
+
arizona_identify_headphone(info);
ret = extcon_set_cable_state_(info->edev,
@@ -841,8 +859,6 @@ static void arizona_micd_detect(struct work_struct *work)
ret);
}
- info->mic = true;
- info->detecting = false;
goto handled;
}
@@ -855,10 +871,11 @@ static void arizona_micd_detect(struct work_struct *work)
if (info->detecting && (val & MICD_LVL_1_TO_7)) {
if (info->jack_flips >= info->micd_num_modes * 10) {
dev_dbg(arizona->dev, "Detected HP/line\n");
- arizona_identify_headphone(info);
info->detecting = false;
+ arizona_identify_headphone(info);
+
arizona_stop_mic(info);
} else {
info->micd_mode++;
@@ -1110,12 +1127,12 @@ static void arizona_micd_set_level(struct arizona *arizona, int index,
regmap_update_bits(arizona->regmap, reg, mask, level);
}
-static int arizona_extcon_of_get_pdata(struct arizona *arizona)
+static int arizona_extcon_device_get_pdata(struct arizona *arizona)
{
struct arizona_pdata *pdata = &arizona->pdata;
unsigned int val = ARIZONA_ACCDET_MODE_HPL;
- of_property_read_u32(arizona->dev->of_node, "wlf,hpdet-channel", &val);
+ device_property_read_u32(arizona->dev, "wlf,hpdet-channel", &val);
switch (val) {
case ARIZONA_ACCDET_MODE_HPL:
case ARIZONA_ACCDET_MODE_HPR:
@@ -1127,6 +1144,24 @@ static int arizona_extcon_of_get_pdata(struct arizona *arizona)
pdata->hpdet_channel = ARIZONA_ACCDET_MODE_HPL;
}
+ device_property_read_u32(arizona->dev, "wlf,micd-detect-debounce",
+ &pdata->micd_detect_debounce);
+
+ device_property_read_u32(arizona->dev, "wlf,micd-bias-start-time",
+ &pdata->micd_bias_start_time);
+
+ device_property_read_u32(arizona->dev, "wlf,micd-rate",
+ &pdata->micd_rate);
+
+ device_property_read_u32(arizona->dev, "wlf,micd-dbtime",
+ &pdata->micd_dbtime);
+
+ device_property_read_u32(arizona->dev, "wlf,micd-timeout",
+ &pdata->micd_timeout);
+
+ pdata->micd_force_micbias = device_property_read_bool(arizona->dev,
+ "wlf,micd-force-micbias");
+
return 0;
}
@@ -1147,10 +1182,8 @@ static int arizona_extcon_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
- if (IS_ENABLED(CONFIG_OF)) {
- if (!dev_get_platdata(arizona->dev))
- arizona_extcon_of_get_pdata(arizona);
- }
+ if (!dev_get_platdata(arizona->dev))
+ arizona_extcon_device_get_pdata(arizona);
info->micvdd = devm_regulator_get(&pdev->dev, "MICVDD");
if (IS_ERR(info->micvdd)) {
@@ -1241,6 +1274,27 @@ static int arizona_extcon_probe(struct platform_device *pdev)
arizona->pdata.micd_pol_gpio, ret);
goto err_register;
}
+ } else {
+ if (info->micd_modes[0].gpio)
+ mode = GPIOD_OUT_HIGH;
+ else
+ mode = GPIOD_OUT_LOW;
+
+ /* We can't use devm here because we need to do the get
+ * against the MFD device, as that is where the of_node
+ * will reside, but if we devm against that the GPIO
+ * will not be freed if the extcon driver is unloaded.
+ */
+ info->micd_pol_gpio = gpiod_get_optional(arizona->dev,
+ "wlf,micd-pol",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(info->micd_pol_gpio)) {
+ ret = PTR_ERR(info->micd_pol_gpio);
+ dev_err(arizona->dev,
+ "Failed to get microphone polarity GPIO: %d\n",
+ ret);
+ goto err_register;
+ }
}
if (arizona->pdata.hpdet_id_gpio > 0) {
@@ -1251,7 +1305,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(arizona->dev, "Failed to request GPIO%d: %d\n",
arizona->pdata.hpdet_id_gpio, ret);
- goto err_register;
+ goto err_gpio;
}
}
@@ -1267,11 +1321,19 @@ static int arizona_extcon_probe(struct platform_device *pdev)
arizona->pdata.micd_rate
<< ARIZONA_MICD_RATE_SHIFT);
- if (arizona->pdata.micd_dbtime)
+ switch (arizona->pdata.micd_dbtime) {
+ case MICD_DBTIME_FOUR_READINGS:
regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
ARIZONA_MICD_DBTIME_MASK,
- arizona->pdata.micd_dbtime
- << ARIZONA_MICD_DBTIME_SHIFT);
+ ARIZONA_MICD_DBTIME);
+ break;
+ case MICD_DBTIME_TWO_READINGS:
+ regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
+ ARIZONA_MICD_DBTIME_MASK, 0);
+ break;
+ default:
+ break;
+ }
BUILD_BUG_ON(ARRAY_SIZE(arizona_micd_levels) != 0x40);
@@ -1295,7 +1357,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
dev_err(arizona->dev,
"MICD ranges must be sorted\n");
ret = -EINVAL;
- goto err_input;
+ goto err_gpio;
}
}
}
@@ -1314,7 +1376,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
dev_err(arizona->dev, "Unsupported MICD level %d\n",
info->micd_ranges[i].max);
ret = -EINVAL;
- goto err_input;
+ goto err_gpio;
}
dev_dbg(arizona->dev, "%d ohms for MICD threshold %d\n",
@@ -1387,7 +1449,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(&pdev->dev, "Failed to get JACKDET rise IRQ: %d\n",
ret);
- goto err_input;
+ goto err_gpio;
}
ret = arizona_set_irq_wake(arizona, jack_irq_rise, 1);
@@ -1458,7 +1520,8 @@ err_rise_wake:
arizona_set_irq_wake(arizona, jack_irq_rise, 0);
err_rise:
arizona_free_irq(arizona, jack_irq_rise, info);
-err_input:
+err_gpio:
+ gpiod_put(info->micd_pol_gpio);
err_register:
pm_runtime_disable(&pdev->dev);
return ret;
@@ -1470,6 +1533,8 @@ static int arizona_extcon_remove(struct platform_device *pdev)
struct arizona *arizona = info->arizona;
int jack_irq_rise, jack_irq_fall;
+ gpiod_put(info->micd_pol_gpio);
+
pm_runtime_disable(&pdev->dev);
regmap_update_bits(arizona->regmap,
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 355459a54e8b..57c24fa52edb 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -65,22 +65,6 @@ static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static ssize_t extcon_gpio_print_state(struct extcon_dev *edev, char *buf)
-{
- struct device *dev = edev->dev.parent;
- struct gpio_extcon_data *extcon_data = dev_get_drvdata(dev);
- const char *state;
-
- if (extcon_get_state(edev))
- state = extcon_data->state_on;
- else
- state = extcon_data->state_off;
-
- if (state)
- return sprintf(buf, "%s\n", state);
- return -EINVAL;
-}
-
static int gpio_extcon_probe(struct platform_device *pdev)
{
struct gpio_extcon_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -110,8 +94,6 @@ static int gpio_extcon_probe(struct platform_device *pdev)
extcon_data->state_on = pdata->state_on;
extcon_data->state_off = pdata->state_off;
extcon_data->check_on_resume = pdata->check_on_resume;
- if (pdata->state_on && pdata->state_off)
- extcon_data->edev->print_state = extcon_gpio_print_state;
ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN,
pdev->name);
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index f4f3b3d53928..35b9e118b2fb 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -24,6 +24,7 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77693-private.h>
#include <linux/extcon.h>
#include <linux/regmap.h>
@@ -42,7 +43,7 @@ static struct max77693_reg_data default_init_data[] = {
{
/* STATUS2 - [3]ChgDetRun */
.addr = MAX77693_MUIC_REG_STATUS2,
- .data = STATUS2_CHGDETRUN_MASK,
+ .data = MAX77693_STATUS2_CHGDETRUN_MASK,
}, {
/* INTMASK1 - Unmask [3]ADC1KM,[0]ADCM */
.addr = MAX77693_MUIC_REG_INTMASK1,
@@ -235,7 +236,7 @@ static int max77693_muic_set_debounce_time(struct max77693_muic_info *info,
*/
ret = regmap_write(info->max77693->regmap_muic,
MAX77693_MUIC_REG_CTRL3,
- time << CONTROL3_ADCDBSET_SHIFT);
+ time << MAX77693_CONTROL3_ADCDBSET_SHIFT);
if (ret) {
dev_err(info->dev, "failed to set ADC debounce time\n");
return ret;
@@ -268,7 +269,7 @@ static int max77693_muic_set_path(struct max77693_muic_info *info,
if (attached)
ctrl1 = val;
else
- ctrl1 = CONTROL1_SW_OPEN;
+ ctrl1 = MAX77693_CONTROL1_SW_OPEN;
ret = regmap_update_bits(info->max77693->regmap_muic,
MAX77693_MUIC_REG_CTRL1, COMP_SW_MASK, ctrl1);
@@ -278,13 +279,14 @@ static int max77693_muic_set_path(struct max77693_muic_info *info,
}
if (attached)
- ctrl2 |= CONTROL2_CPEN_MASK; /* LowPwr=0, CPEn=1 */
+ ctrl2 |= MAX77693_CONTROL2_CPEN_MASK; /* LowPwr=0, CPEn=1 */
else
- ctrl2 |= CONTROL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */
+ ctrl2 |= MAX77693_CONTROL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */
ret = regmap_update_bits(info->max77693->regmap_muic,
MAX77693_MUIC_REG_CTRL2,
- CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK, ctrl2);
+ MAX77693_CONTROL2_LOWPWR_MASK | MAX77693_CONTROL2_CPEN_MASK,
+ ctrl2);
if (ret < 0) {
dev_err(info->dev, "failed to update MUIC register\n");
return ret;
@@ -326,8 +328,8 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
* Read ADC value to check cable type and decide cable state
* according to cable type
*/
- adc = info->status[0] & STATUS1_ADC_MASK;
- adc >>= STATUS1_ADC_SHIFT;
+ adc = info->status[0] & MAX77693_STATUS1_ADC_MASK;
+ adc >>= MAX77693_STATUS1_ADC_SHIFT;
/*
* Check current cable state/cable type and store cable type
@@ -350,8 +352,8 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
* Read ADC value to check cable type and decide cable state
* according to cable type
*/
- adc = info->status[0] & STATUS1_ADC_MASK;
- adc >>= STATUS1_ADC_SHIFT;
+ adc = info->status[0] & MAX77693_STATUS1_ADC_MASK;
+ adc >>= MAX77693_STATUS1_ADC_SHIFT;
/*
* Check current cable state/cable type and store cable type
@@ -366,13 +368,13 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
} else {
*attached = true;
- adclow = info->status[0] & STATUS1_ADCLOW_MASK;
- adclow >>= STATUS1_ADCLOW_SHIFT;
- adc1k = info->status[0] & STATUS1_ADC1K_MASK;
- adc1k >>= STATUS1_ADC1K_SHIFT;
+ adclow = info->status[0] & MAX77693_STATUS1_ADCLOW_MASK;
+ adclow >>= MAX77693_STATUS1_ADCLOW_SHIFT;
+ adc1k = info->status[0] & MAX77693_STATUS1_ADC1K_MASK;
+ adc1k >>= MAX77693_STATUS1_ADC1K_SHIFT;
- vbvolt = info->status[1] & STATUS2_VBVOLT_MASK;
- vbvolt >>= STATUS2_VBVOLT_SHIFT;
+ vbvolt = info->status[1] & MAX77693_STATUS2_VBVOLT_MASK;
+ vbvolt >>= MAX77693_STATUS2_VBVOLT_SHIFT;
/**
* [0x1|VBVolt|ADCLow|ADC1K]
@@ -397,8 +399,8 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
* Read charger type to check cable type and decide cable state
* according to type of charger cable.
*/
- chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
- chg_type >>= STATUS2_CHGTYP_SHIFT;
+ chg_type = info->status[1] & MAX77693_STATUS2_CHGTYP_MASK;
+ chg_type >>= MAX77693_STATUS2_CHGTYP_SHIFT;
if (chg_type == MAX77693_CHARGER_TYPE_NONE) {
*attached = false;
@@ -422,10 +424,10 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
* Read ADC value to check cable type and decide cable state
* according to cable type
*/
- adc = info->status[0] & STATUS1_ADC_MASK;
- adc >>= STATUS1_ADC_SHIFT;
- chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
- chg_type >>= STATUS2_CHGTYP_SHIFT;
+ adc = info->status[0] & MAX77693_STATUS1_ADC_MASK;
+ adc >>= MAX77693_STATUS1_ADC_SHIFT;
+ chg_type = info->status[1] & MAX77693_STATUS2_CHGTYP_MASK;
+ chg_type >>= MAX77693_STATUS2_CHGTYP_SHIFT;
if (adc == MAX77693_MUIC_ADC_OPEN
&& chg_type == MAX77693_CHARGER_TYPE_NONE)
@@ -437,8 +439,8 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
* Read vbvolt field, if vbvolt is 1,
* this cable is used for charging.
*/
- vbvolt = info->status[1] & STATUS2_VBVOLT_MASK;
- vbvolt >>= STATUS2_VBVOLT_SHIFT;
+ vbvolt = info->status[1] & MAX77693_STATUS2_VBVOLT_MASK;
+ vbvolt >>= MAX77693_STATUS2_VBVOLT_SHIFT;
cable_type = vbvolt;
break;
@@ -520,7 +522,8 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
}
/* Dock-Car/Desk/Audio, PATH:AUDIO */
- ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
+ ret = max77693_muic_set_path(info, MAX77693_CONTROL1_SW_AUDIO,
+ attached);
if (ret < 0)
return ret;
extcon_set_cable_state_(info->edev, dock_id, attached);
@@ -585,14 +588,16 @@ static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
case MAX77693_MUIC_GND_USB_HOST:
case MAX77693_MUIC_GND_USB_HOST_VB:
/* USB_HOST, PATH: AP_USB */
- ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
+ ret = max77693_muic_set_path(info, MAX77693_CONTROL1_SW_USB,
+ attached);
if (ret < 0)
return ret;
extcon_set_cable_state_(info->edev, EXTCON_USB_HOST, attached);
break;
case MAX77693_MUIC_GND_AV_CABLE_LOAD:
/* Audio Video Cable with load, PATH:AUDIO */
- ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
+ ret = max77693_muic_set_path(info, MAX77693_CONTROL1_SW_AUDIO,
+ attached);
if (ret < 0)
return ret;
extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
@@ -615,7 +620,7 @@ static int max77693_muic_jig_handler(struct max77693_muic_info *info,
int cable_type, bool attached)
{
int ret = 0;
- u8 path = CONTROL1_SW_OPEN;
+ u8 path = MAX77693_CONTROL1_SW_OPEN;
dev_info(info->dev,
"external connector is %s (adc:0x%02x)\n",
@@ -625,12 +630,12 @@ static int max77693_muic_jig_handler(struct max77693_muic_info *info,
case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF: /* ADC_JIG_USB_OFF */
case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON: /* ADC_JIG_USB_ON */
/* PATH:AP_USB */
- path = CONTROL1_SW_USB;
+ path = MAX77693_CONTROL1_SW_USB;
break;
case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF: /* ADC_JIG_UART_OFF */
case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* ADC_JIG_UART_ON */
/* PATH:AP_UART */
- path = CONTROL1_SW_UART;
+ path = MAX77693_CONTROL1_SW_UART;
break;
default:
dev_err(info->dev, "failed to detect %s jig cable\n",
@@ -1077,7 +1082,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "allocate register map\n");
} else {
info->max77693->regmap_muic = devm_regmap_init_i2c(
- info->max77693->muic,
+ info->max77693->i2c_muic,
&max77693_muic_regmap_config);
if (IS_ERR(info->max77693->regmap_muic)) {
ret = PTR_ERR(info->max77693->regmap_muic);
@@ -1164,28 +1169,9 @@ static int max77693_muic_probe(struct platform_device *pdev)
}
for (i = 0; i < num_init_data; i++) {
- enum max77693_irq_source irq_src
- = MAX77693_IRQ_GROUP_NR;
-
regmap_write(info->max77693->regmap_muic,
init_data[i].addr,
init_data[i].data);
-
- switch (init_data[i].addr) {
- case MAX77693_MUIC_REG_INTMASK1:
- irq_src = MUIC_INT1;
- break;
- case MAX77693_MUIC_REG_INTMASK2:
- irq_src = MUIC_INT2;
- break;
- case MAX77693_MUIC_REG_INTMASK3:
- irq_src = MUIC_INT3;
- break;
- }
-
- if (irq_src < MAX77693_IRQ_GROUP_NR)
- info->max77693->irq_masks_cur[irq_src]
- = init_data[i].data;
}
if (pdata && pdata->muic_data) {
@@ -1199,12 +1185,12 @@ static int max77693_muic_probe(struct platform_device *pdev)
if (muic_pdata->path_uart)
info->path_uart = muic_pdata->path_uart;
else
- info->path_uart = CONTROL1_SW_UART;
+ info->path_uart = MAX77693_CONTROL1_SW_UART;
if (muic_pdata->path_usb)
info->path_usb = muic_pdata->path_usb;
else
- info->path_usb = CONTROL1_SW_USB;
+ info->path_usb = MAX77693_CONTROL1_SW_USB;
/*
* Default delay time for detecting cable state
@@ -1216,8 +1202,8 @@ static int max77693_muic_probe(struct platform_device *pdev)
else
delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
} else {
- info->path_usb = CONTROL1_SW_USB;
- info->path_uart = CONTROL1_SW_UART;
+ info->path_usb = MAX77693_CONTROL1_SW_USB;
+ info->path_uart = MAX77693_CONTROL1_SW_UART;
delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
}
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index fac2f1417a79..fdd928542c19 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -15,6 +15,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77843-private.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -32,7 +33,7 @@ enum max77843_muic_status {
struct max77843_muic_info {
struct device *dev;
- struct max77843 *max77843;
+ struct max77693_dev *max77843;
struct extcon_dev *edev;
struct mutex mutex;
@@ -198,18 +199,18 @@ static const struct regmap_irq_chip max77843_muic_irq_chip = {
static int max77843_muic_set_path(struct max77843_muic_info *info,
u8 val, bool attached)
{
- struct max77843 *max77843 = info->max77843;
+ struct max77693_dev *max77843 = info->max77843;
int ret = 0;
unsigned int ctrl1, ctrl2;
if (attached)
ctrl1 = val;
else
- ctrl1 = CONTROL1_SW_OPEN;
+ ctrl1 = MAX77843_MUIC_CONTROL1_SW_OPEN;
ret = regmap_update_bits(max77843->regmap_muic,
MAX77843_MUIC_REG_CONTROL1,
- CONTROL1_COM_SW, ctrl1);
+ MAX77843_MUIC_CONTROL1_COM_SW, ctrl1);
if (ret < 0) {
dev_err(info->dev, "Cannot switch MUIC port\n");
return ret;
@@ -243,7 +244,7 @@ static int max77843_muic_get_cable_type(struct max77843_muic_info *info,
adc = info->status[MAX77843_MUIC_STATUS1] &
MAX77843_MUIC_STATUS1_ADC_MASK;
- adc >>= STATUS1_ADC_SHIFT;
+ adc >>= MAX77843_MUIC_STATUS1_ADC_SHIFT;
switch (group) {
case MAX77843_CABLE_GROUP_ADC:
@@ -309,7 +310,7 @@ static int max77843_muic_get_cable_type(struct max77843_muic_info *info,
/* Get VBVolt register bit */
gnd_type |= (info->status[MAX77843_MUIC_STATUS2] &
MAX77843_MUIC_STATUS2_VBVOLT_MASK);
- gnd_type >>= STATUS2_VBVOLT_SHIFT;
+ gnd_type >>= MAX77843_MUIC_STATUS2_VBVOLT_SHIFT;
/* Offset of GND cable */
gnd_type |= MAX77843_MUIC_GND_USB_HOST;
@@ -338,7 +339,9 @@ static int max77843_muic_adc_gnd_handler(struct max77843_muic_info *info)
switch (gnd_cable_type) {
case MAX77843_MUIC_GND_USB_HOST:
case MAX77843_MUIC_GND_USB_HOST_VB:
- ret = max77843_muic_set_path(info, CONTROL1_SW_USB, attached);
+ ret = max77843_muic_set_path(info,
+ MAX77843_MUIC_CONTROL1_SW_USB,
+ attached);
if (ret < 0)
return ret;
@@ -346,7 +349,9 @@ static int max77843_muic_adc_gnd_handler(struct max77843_muic_info *info)
break;
case MAX77843_MUIC_GND_MHL_VB:
case MAX77843_MUIC_GND_MHL:
- ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
+ ret = max77843_muic_set_path(info,
+ MAX77843_MUIC_CONTROL1_SW_OPEN,
+ attached);
if (ret < 0)
return ret;
@@ -365,7 +370,7 @@ static int max77843_muic_jig_handler(struct max77843_muic_info *info,
int cable_type, bool attached)
{
int ret;
- u8 path = CONTROL1_SW_OPEN;
+ u8 path = MAX77843_MUIC_CONTROL1_SW_OPEN;
dev_dbg(info->dev, "external connector is %s (adc:0x%02x)\n",
attached ? "attached" : "detached", cable_type);
@@ -373,10 +378,10 @@ static int max77843_muic_jig_handler(struct max77843_muic_info *info,
switch (cable_type) {
case MAX77843_MUIC_ADC_FACTORY_MODE_USB_OFF:
case MAX77843_MUIC_ADC_FACTORY_MODE_USB_ON:
- path = CONTROL1_SW_USB;
+ path = MAX77843_MUIC_CONTROL1_SW_USB;
break;
case MAX77843_MUIC_ADC_FACTORY_MODE_UART_OFF:
- path = CONTROL1_SW_UART;
+ path = MAX77843_MUIC_CONTROL1_SW_UART;
break;
default:
return -EINVAL;
@@ -474,14 +479,18 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
switch (chg_type) {
case MAX77843_MUIC_CHG_USB:
- ret = max77843_muic_set_path(info, CONTROL1_SW_USB, attached);
+ ret = max77843_muic_set_path(info,
+ MAX77843_MUIC_CONTROL1_SW_USB,
+ attached);
if (ret < 0)
return ret;
extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
break;
case MAX77843_MUIC_CHG_DOWNSTREAM:
- ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
+ ret = max77843_muic_set_path(info,
+ MAX77843_MUIC_CONTROL1_SW_OPEN,
+ attached);
if (ret < 0)
return ret;
@@ -489,14 +498,18 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
attached);
break;
case MAX77843_MUIC_CHG_DEDICATED:
- ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
+ ret = max77843_muic_set_path(info,
+ MAX77843_MUIC_CONTROL1_SW_OPEN,
+ attached);
if (ret < 0)
return ret;
extcon_set_cable_state_(info->edev, EXTCON_TA, attached);
break;
case MAX77843_MUIC_CHG_SPECIAL_500MA:
- ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
+ ret = max77843_muic_set_path(info,
+ MAX77843_MUIC_CONTROL1_SW_OPEN,
+ attached);
if (ret < 0)
return ret;
@@ -504,7 +517,9 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
attached);
break;
case MAX77843_MUIC_CHG_SPECIAL_1A:
- ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
+ ret = max77843_muic_set_path(info,
+ MAX77843_MUIC_CONTROL1_SW_OPEN,
+ attached);
if (ret < 0)
return ret;
@@ -528,7 +543,8 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
"failed to detect %s accessory (chg_type:0x%x)\n",
attached ? "attached" : "detached", chg_type);
- max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
+ max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_OPEN,
+ attached);
return -EINVAL;
}
@@ -539,7 +555,7 @@ static void max77843_muic_irq_work(struct work_struct *work)
{
struct max77843_muic_info *info = container_of(work,
struct max77843_muic_info, irq_work);
- struct max77843 *max77843 = info->max77843;
+ struct max77693_dev *max77843 = info->max77843;
int ret = 0;
mutex_lock(&info->mutex);
@@ -615,7 +631,7 @@ static void max77843_muic_detect_cable_wq(struct work_struct *work)
{
struct max77843_muic_info *info = container_of(to_delayed_work(work),
struct max77843_muic_info, wq_detcable);
- struct max77843 *max77843 = info->max77843;
+ struct max77693_dev *max77843 = info->max77843;
int chg_type, adc, ret;
bool attached;
@@ -656,7 +672,7 @@ err_cable_wq:
static int max77843_muic_set_debounce_time(struct max77843_muic_info *info,
enum max77843_muic_adc_debounce_time time)
{
- struct max77843 *max77843 = info->max77843;
+ struct max77693_dev *max77843 = info->max77843;
int ret;
switch (time) {
@@ -667,7 +683,7 @@ static int max77843_muic_set_debounce_time(struct max77843_muic_info *info,
ret = regmap_update_bits(max77843->regmap_muic,
MAX77843_MUIC_REG_CONTROL4,
MAX77843_MUIC_CONTROL4_ADCDBSET_MASK,
- time << CONTROL4_ADCDBSET_SHIFT);
+ time << MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT);
if (ret < 0) {
dev_err(info->dev, "Cannot write MUIC regmap\n");
return ret;
@@ -681,7 +697,7 @@ static int max77843_muic_set_debounce_time(struct max77843_muic_info *info,
return 0;
}
-static int max77843_init_muic_regmap(struct max77843 *max77843)
+static int max77843_init_muic_regmap(struct max77693_dev *max77843)
{
int ret;
@@ -720,7 +736,7 @@ err_muic_i2c:
static int max77843_muic_probe(struct platform_device *pdev)
{
- struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent);
+ struct max77693_dev *max77843 = dev_get_drvdata(pdev->dev.parent);
struct max77843_muic_info *info;
unsigned int id;
int i, ret;
@@ -768,7 +784,7 @@ static int max77843_muic_probe(struct platform_device *pdev)
max77843_muic_set_debounce_time(info, MAX77843_DEBOUNCE_TIME_25MS);
/* Set initial path for UART */
- max77843_muic_set_path(info, CONTROL1_SW_UART, true);
+ max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_UART, true);
/* Check revision number of MUIC device */
ret = regmap_read(max77843->regmap_muic, MAX77843_MUIC_REG_ID, &id);
@@ -781,6 +797,15 @@ static int max77843_muic_probe(struct platform_device *pdev)
/* Support virtual irq domain for max77843 MUIC device */
INIT_WORK(&info->irq_work, max77843_muic_irq_work);
+ /* Clear IRQ bits before request IRQs */
+ ret = regmap_bulk_read(max77843->regmap_muic,
+ MAX77843_MUIC_REG_INT1, info->status,
+ MAX77843_MUIC_IRQ_NUM);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
+ goto err_muic_irq;
+ }
+
for (i = 0; i < ARRAY_SIZE(max77843_muic_irqs); i++) {
struct max77843_muic_irq *muic_irq = &max77843_muic_irqs[i];
unsigned int virq = 0;
@@ -821,7 +846,7 @@ err_muic_irq:
static int max77843_muic_remove(struct platform_device *pdev)
{
struct max77843_muic_info *info = platform_get_drvdata(pdev);
- struct max77843 *max77843 = info->max77843;
+ struct max77693_dev *max77843 = info->max77843;
cancel_work_sync(&info->irq_work);
regmap_del_irq_chip(max77843->irq, max77843->irq_data_muic);
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 080d5cc27055..93c30a885740 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -28,6 +28,11 @@
#include <linux/mfd/palmas.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/workqueue.h>
+
+#define USB_GPIO_DEBOUNCE_MS 20 /* ms */
static const unsigned int palmas_extcon_cable[] = {
EXTCON_USB,
@@ -35,8 +40,6 @@ static const unsigned int palmas_extcon_cable[] = {
EXTCON_NONE,
};
-static const int mutually_exclusive[] = {0x3, 0x0};
-
static void palmas_usb_wakeup(struct palmas *palmas, int enable)
{
if (enable)
@@ -120,19 +123,54 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
return IRQ_HANDLED;
}
+static void palmas_gpio_id_detect(struct work_struct *work)
+{
+ int id;
+ struct palmas_usb *palmas_usb = container_of(to_delayed_work(work),
+ struct palmas_usb,
+ wq_detectid);
+ struct extcon_dev *edev = palmas_usb->edev;
+
+ if (!palmas_usb->id_gpiod)
+ return;
+
+ id = gpiod_get_value_cansleep(palmas_usb->id_gpiod);
+
+ if (id) {
+ extcon_set_cable_state_(edev, EXTCON_USB_HOST, false);
+ dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
+ } else {
+ extcon_set_cable_state_(edev, EXTCON_USB_HOST, true);
+ dev_info(palmas_usb->dev, "USB-HOST cable is attached\n");
+ }
+}
+
+static irqreturn_t palmas_gpio_id_irq_handler(int irq, void *_palmas_usb)
+{
+ struct palmas_usb *palmas_usb = _palmas_usb;
+
+ queue_delayed_work(system_power_efficient_wq, &palmas_usb->wq_detectid,
+ palmas_usb->sw_debounce_jiffies);
+
+ return IRQ_HANDLED;
+}
+
static void palmas_enable_irq(struct palmas_usb *palmas_usb)
{
palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
PALMAS_USB_VBUS_CTRL_SET,
PALMAS_USB_VBUS_CTRL_SET_VBUS_ACT_COMP);
- palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
- PALMAS_USB_ID_CTRL_SET, PALMAS_USB_ID_CTRL_SET_ID_ACT_COMP);
+ if (palmas_usb->enable_id_detection) {
+ palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
+ PALMAS_USB_ID_CTRL_SET,
+ PALMAS_USB_ID_CTRL_SET_ID_ACT_COMP);
- palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
- PALMAS_USB_ID_INT_EN_HI_SET,
- PALMAS_USB_ID_INT_EN_HI_SET_ID_GND |
- PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT);
+ palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
+ PALMAS_USB_ID_INT_EN_HI_SET,
+ PALMAS_USB_ID_INT_EN_HI_SET_ID_GND |
+ PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT);
+ }
if (palmas_usb->enable_vbus_detection)
palmas_vbus_irq_handler(palmas_usb->vbus_irq, palmas_usb);
@@ -171,20 +209,37 @@ static int palmas_usb_probe(struct platform_device *pdev)
palmas_usb->wakeup = pdata->wakeup;
}
+ palmas_usb->id_gpiod = devm_gpiod_get_optional(&pdev->dev, "id",
+ GPIOD_IN);
+ if (IS_ERR(palmas_usb->id_gpiod)) {
+ dev_err(&pdev->dev, "failed to get id gpio\n");
+ return PTR_ERR(palmas_usb->id_gpiod);
+ }
+
+ if (palmas_usb->enable_id_detection && palmas_usb->id_gpiod) {
+ palmas_usb->enable_id_detection = false;
+ palmas_usb->enable_gpio_id_detection = true;
+ }
+
+ if (palmas_usb->enable_gpio_id_detection) {
+ u32 debounce;
+
+ if (of_property_read_u32(node, "debounce-delay-ms", &debounce))
+ debounce = USB_GPIO_DEBOUNCE_MS;
+
+ status = gpiod_set_debounce(palmas_usb->id_gpiod,
+ debounce * 1000);
+ if (status < 0)
+ palmas_usb->sw_debounce_jiffies = msecs_to_jiffies(debounce);
+ }
+
+ INIT_DELAYED_WORK(&palmas_usb->wq_detectid, palmas_gpio_id_detect);
+
palmas->usb = palmas_usb;
palmas_usb->palmas = palmas;
palmas_usb->dev = &pdev->dev;
- palmas_usb->id_otg_irq = regmap_irq_get_virq(palmas->irq_data,
- PALMAS_ID_OTG_IRQ);
- palmas_usb->id_irq = regmap_irq_get_virq(palmas->irq_data,
- PALMAS_ID_IRQ);
- palmas_usb->vbus_otg_irq = regmap_irq_get_virq(palmas->irq_data,
- PALMAS_VBUS_OTG_IRQ);
- palmas_usb->vbus_irq = regmap_irq_get_virq(palmas->irq_data,
- PALMAS_VBUS_IRQ);
-
palmas_usb_wakeup(palmas, palmas_usb->wakeup);
platform_set_drvdata(pdev, palmas_usb);
@@ -195,16 +250,18 @@ static int palmas_usb_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to allocate extcon device\n");
return -ENOMEM;
}
- palmas_usb->edev->mutually_exclusive = mutually_exclusive;
status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev);
if (status) {
dev_err(&pdev->dev, "failed to register extcon device\n");
- kfree(palmas_usb->edev->name);
return status;
}
if (palmas_usb->enable_id_detection) {
+ palmas_usb->id_otg_irq = regmap_irq_get_virq(palmas->irq_data,
+ PALMAS_ID_OTG_IRQ);
+ palmas_usb->id_irq = regmap_irq_get_virq(palmas->irq_data,
+ PALMAS_ID_IRQ);
status = devm_request_threaded_irq(palmas_usb->dev,
palmas_usb->id_irq,
NULL, palmas_id_irq_handler,
@@ -214,12 +271,35 @@ static int palmas_usb_probe(struct platform_device *pdev)
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
palmas_usb->id_irq, status);
- kfree(palmas_usb->edev->name);
+ return status;
+ }
+ } else if (palmas_usb->enable_gpio_id_detection) {
+ palmas_usb->gpio_id_irq = gpiod_to_irq(palmas_usb->id_gpiod);
+ if (palmas_usb->gpio_id_irq < 0) {
+ dev_err(&pdev->dev, "failed to get id irq\n");
+ return palmas_usb->gpio_id_irq;
+ }
+ status = devm_request_threaded_irq(&pdev->dev,
+ palmas_usb->gpio_id_irq,
+ NULL,
+ palmas_gpio_id_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "palmas_usb_id",
+ palmas_usb);
+ if (status < 0) {
+ dev_err(&pdev->dev,
+ "failed to request handler for id irq\n");
return status;
}
}
if (palmas_usb->enable_vbus_detection) {
+ palmas_usb->vbus_otg_irq = regmap_irq_get_virq(palmas->irq_data,
+ PALMAS_VBUS_OTG_IRQ);
+ palmas_usb->vbus_irq = regmap_irq_get_virq(palmas->irq_data,
+ PALMAS_VBUS_IRQ);
status = devm_request_threaded_irq(palmas_usb->dev,
palmas_usb->vbus_irq, NULL,
palmas_vbus_irq_handler,
@@ -229,12 +309,13 @@ static int palmas_usb_probe(struct platform_device *pdev)
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
palmas_usb->vbus_irq, status);
- kfree(palmas_usb->edev->name);
return status;
}
}
palmas_enable_irq(palmas_usb);
+ /* perform initial detection */
+ palmas_gpio_id_detect(&palmas_usb->wq_detectid.work);
device_set_wakeup_capable(&pdev->dev, true);
return 0;
}
@@ -243,7 +324,7 @@ static int palmas_usb_remove(struct platform_device *pdev)
{
struct palmas_usb *palmas_usb = platform_get_drvdata(pdev);
- kfree(palmas_usb->edev->name);
+ cancel_delayed_work_sync(&palmas_usb->wq_detectid);
return 0;
}
@@ -258,6 +339,8 @@ static int palmas_usb_suspend(struct device *dev)
enable_irq_wake(palmas_usb->vbus_irq);
if (palmas_usb->enable_id_detection)
enable_irq_wake(palmas_usb->id_irq);
+ if (palmas_usb->enable_gpio_id_detection)
+ enable_irq_wake(palmas_usb->gpio_id_irq);
}
return 0;
}
@@ -271,6 +354,8 @@ static int palmas_usb_resume(struct device *dev)
disable_irq_wake(palmas_usb->vbus_irq);
if (palmas_usb->enable_id_detection)
disable_irq_wake(palmas_usb->id_irq);
+ if (palmas_usb->enable_gpio_id_detection)
+ disable_irq_wake(palmas_usb->gpio_id_irq);
}
return 0;
};
diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c
index 92c939221a41..11592e980bc1 100644
--- a/drivers/extcon/extcon-rt8973a.c
+++ b/drivers/extcon/extcon-rt8973a.c
@@ -693,7 +693,6 @@ MODULE_DEVICE_TABLE(i2c, rt8973a_i2c_id);
static struct i2c_driver rt8973a_muic_i2c_driver = {
.driver = {
.name = "rt8973a",
- .owner = THIS_MODULE,
.pm = &rt8973a_muic_pm_ops,
.of_match_table = rt8973a_dt_match,
},
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index 817dece23b4c..0ffefefa2e26 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -685,7 +685,6 @@ MODULE_DEVICE_TABLE(i2c, sm5502_i2c_id);
static struct i2c_driver sm5502_muic_i2c_driver = {
.driver = {
.name = "sm5502",
- .owner = THIS_MODULE,
.pm = &sm5502_muic_pm_ops,
.of_match_table = sm5502_dt_match,
},
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index a2a44536a608..2b2fecffb1ad 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -15,6 +15,7 @@
*/
#include <linux/extcon.h>
+#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 76157ab9faf3..a07addde297b 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -124,25 +124,35 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id
return -EINVAL;
}
-static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
+static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
{
- unsigned int id = EXTCON_NONE;
+ int id = -EINVAL;
int i = 0;
- if (edev->max_supported == 0)
- return -EINVAL;
-
- /* Find the the number of extcon cable */
+ /* Find the id of extcon cable */
while (extcon_name[i]) {
if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) {
id = i;
break;
}
+ i++;
}
- if (id == EXTCON_NONE)
+ return id;
+}
+
+static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
+{
+ int id;
+
+ if (edev->max_supported == 0)
return -EINVAL;
+ /* Find the the number of extcon cable */
+ id = find_cable_id_by_name(edev, name);
+ if (id < 0)
+ return id;
+
return find_cable_index_by_id(edev, id);
}
@@ -162,14 +172,6 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
int i, count = 0;
struct extcon_dev *edev = dev_get_drvdata(dev);
- if (edev->print_state) {
- int ret = edev->print_state(edev, buf);
-
- if (ret >= 0)
- return ret;
- /* Use default if failed */
- }
-
if (edev->max_supported == 0)
return sprintf(buf, "%u\n", edev->state);
@@ -228,9 +230,11 @@ static ssize_t cable_state_show(struct device *dev,
struct extcon_cable *cable = container_of(attr, struct extcon_cable,
attr_state);
+ int i = cable->cable_index;
+
return sprintf(buf, "%d\n",
extcon_get_cable_state_(cable->edev,
- cable->cable_index));
+ cable->edev->supported_cable[i]));
}
/**
@@ -260,23 +264,31 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state)
unsigned long flags;
bool attached;
+ if (!edev)
+ return -EINVAL;
+
spin_lock_irqsave(&edev->lock, flags);
if (edev->state != ((edev->state & ~mask) | (state & mask))) {
+ u32 old_state;
+
if (check_mutually_exclusive(edev, (edev->state & ~mask) |
(state & mask))) {
spin_unlock_irqrestore(&edev->lock, flags);
return -EPERM;
}
- for (index = 0; index < edev->max_supported; index++) {
- if (is_extcon_changed(edev->state, state, index, &attached))
- raw_notifier_call_chain(&edev->nh[index], attached, edev);
- }
-
+ old_state = edev->state;
edev->state &= ~mask;
edev->state |= state & mask;
+ for (index = 0; index < edev->max_supported; index++) {
+ if (is_extcon_changed(old_state, edev->state, index,
+ &attached))
+ raw_notifier_call_chain(&edev->nh[index],
+ attached, edev);
+ }
+
/* This could be in interrupt handler */
prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
if (prop_buf) {
@@ -328,6 +340,9 @@ EXPORT_SYMBOL_GPL(extcon_update_state);
*/
int extcon_set_state(struct extcon_dev *edev, u32 state)
{
+ if (!edev)
+ return -EINVAL;
+
return extcon_update_state(edev, 0xffffffff, state);
}
EXPORT_SYMBOL_GPL(extcon_set_state);
@@ -341,6 +356,9 @@ int extcon_get_cable_state_(struct extcon_dev *edev, const unsigned int id)
{
int index;
+ if (!edev)
+ return -EINVAL;
+
index = find_cable_index_by_id(edev, id);
if (index < 0)
return index;
@@ -361,8 +379,13 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state_);
*/
int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
{
- return extcon_get_cable_state_(edev, find_cable_index_by_name
- (edev, cable_name));
+ int id;
+
+ id = find_cable_id_by_name(edev, cable_name);
+ if (id < 0)
+ return id;
+
+ return extcon_get_cable_state_(edev, id);
}
EXPORT_SYMBOL_GPL(extcon_get_cable_state);
@@ -380,6 +403,9 @@ int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
u32 state;
int index;
+ if (!edev)
+ return -EINVAL;
+
index = find_cable_index_by_id(edev, id);
if (index < 0)
return index;
@@ -404,8 +430,13 @@ EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
int extcon_set_cable_state(struct extcon_dev *edev,
const char *cable_name, bool cable_state)
{
- return extcon_set_cable_state_(edev, find_cable_index_by_name
- (edev, cable_name), cable_state);
+ int id;
+
+ id = find_cable_id_by_name(edev, cable_name);
+ if (id < 0)
+ return id;
+
+ return extcon_set_cable_state_(edev, id, cable_state);
}
EXPORT_SYMBOL_GPL(extcon_set_cable_state);
@@ -417,6 +448,9 @@ struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
{
struct extcon_dev *sd;
+ if (!extcon_name)
+ return ERR_PTR(-EINVAL);
+
mutex_lock(&extcon_dev_list_lock);
list_for_each_entry(sd, &extcon_dev_list, entry) {
if (!strcmp(sd->name, extcon_name))
@@ -545,6 +579,9 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
unsigned long flags;
int ret, idx;
+ if (!edev || !nb)
+ return -EINVAL;
+
idx = find_cable_index_by_id(edev, id);
spin_lock_irqsave(&edev->lock, flags);
@@ -567,6 +604,9 @@ int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
unsigned long flags;
int ret, idx;
+ if (!edev || !nb)
+ return -EINVAL;
+
idx = find_cable_index_by_id(edev, id);
spin_lock_irqsave(&edev->lock, flags);
@@ -627,6 +667,9 @@ struct extcon_dev *extcon_dev_allocate(const unsigned int *supported_cable)
{
struct extcon_dev *edev;
+ if (!supported_cable)
+ return ERR_PTR(-EINVAL);
+
edev = kzalloc(sizeof(*edev), GFP_KERNEL);
if (!edev)
return ERR_PTR(-ENOMEM);
@@ -727,7 +770,7 @@ int extcon_dev_register(struct extcon_dev *edev)
return ret;
}
- if (!edev->supported_cable)
+ if (!edev || !edev->supported_cable)
return -EINVAL;
for (; edev->supported_cable[index] != EXTCON_NONE; index++);
@@ -933,6 +976,9 @@ void extcon_dev_unregister(struct extcon_dev *edev)
{
int index;
+ if (!edev)
+ return;
+
mutex_lock(&extcon_dev_list_lock);
list_del(&edev->entry);
mutex_unlock(&extcon_dev_list_lock);
@@ -1039,6 +1085,9 @@ struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
struct device_node *node;
struct extcon_dev *edev;
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
if (!dev->of_node) {
dev_err(dev, "device does not have a device node entry\n");
return ERR_PTR(-EINVAL);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 99c69a3205c4..d8de6a8dd4de 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -5,6 +5,9 @@
menu "Firmware Drivers"
+config ARM_PSCI_FW
+ bool
+
config EDD
tristate "BIOS Enhanced Disk Drive calls determine boot disk"
depends on X86
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 4a4b897f9314..000830fc6707 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for the linux kernel.
#
+obj-$(CONFIG_ARM_PSCI_FW) += psci.o
obj-$(CONFIG_DMI) += dmi_scan.o
obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o
obj-$(CONFIG_EDD) += edd.o
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
index 87add3fdce52..e41594510b97 100644
--- a/drivers/firmware/broadcom/bcm47xx_nvram.c
+++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
@@ -245,4 +245,4 @@ char *bcm47xx_nvram_get_contents(size_t *nvram_size)
}
EXPORT_SYMBOL(bcm47xx_nvram_get_contents);
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 54071c148340..84533e02fbf8 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -43,7 +43,7 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE
config EFI_RUNTIME_MAP
bool "Export efi runtime maps to sysfs"
- depends on X86 && EFI && KEXEC
+ depends on X86 && EFI && KEXEC_CORE
default y
help
Export efi runtime memory maps to /sys/firmware/efi/runtime-map.
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 4fd9961d552e..d42537425438 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -305,10 +305,17 @@ const char *cper_mem_err_unpack(struct trace_seq *p,
return ret;
}
-static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem)
+static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
+ int len)
{
struct cper_mem_err_compact cmem;
+ /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
+ if (len == sizeof(struct cper_sec_mem_err_old) &&
+ (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) {
+ pr_err(FW_WARN "valid bits set for fields beyond structure\n");
+ return;
+ }
if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
if (mem->validation_bits & CPER_MEM_VALID_PA)
@@ -405,8 +412,10 @@ static void cper_estatus_print_section(
} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
printk("%s""section_type: memory error\n", newpfx);
- if (gdata->error_data_length >= sizeof(*mem_err))
- cper_print_mem(newpfx, mem_err);
+ if (gdata->error_data_length >=
+ sizeof(struct cper_sec_mem_err_old))
+ cper_print_mem(newpfx, mem_err,
+ gdata->error_data_length);
else
goto err_section_too_small;
} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 9fa8084a7c8d..d6144e3b97c5 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -58,6 +58,11 @@ bool efi_runtime_disabled(void)
static int __init parse_efi_cmdline(char *str)
{
+ if (!str) {
+ pr_warn("need at least one option\n");
+ return -EINVAL;
+ }
+
if (parse_option_str(str, "noruntime"))
disable_runtime = true;
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
new file mode 100644
index 000000000000..42700f09a8c5
--- /dev/null
+++ b/drivers/firmware/psci.c
@@ -0,0 +1,382 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2015 ARM Limited
+ */
+
+#define pr_fmt(fmt) "psci: " fmt
+
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/printk.h>
+#include <linux/psci.h>
+#include <linux/reboot.h>
+
+#include <uapi/linux/psci.h>
+
+#include <asm/cputype.h>
+#include <asm/system_misc.h>
+#include <asm/smp_plat.h>
+
+/*
+ * While a 64-bit OS can make calls with SMC32 calling conventions, for some
+ * calls it is necessary to use SMC64 to pass or return 64-bit values. For such
+ * calls PSCI_0_2_FN_NATIVE(x) will choose the appropriate (native-width)
+ * function ID.
+ */
+#ifdef CONFIG_64BIT
+#define PSCI_0_2_FN_NATIVE(name) PSCI_0_2_FN64_##name
+#else
+#define PSCI_0_2_FN_NATIVE(name) PSCI_0_2_FN_##name
+#endif
+
+/*
+ * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
+ * calls to its resident CPU, so we must avoid issuing those. We never migrate
+ * a Trusted OS even if it claims to be capable of migration -- doing so will
+ * require cooperation with a Trusted OS driver.
+ */
+static int resident_cpu = -1;
+
+bool psci_tos_resident_on(int cpu)
+{
+ return cpu == resident_cpu;
+}
+
+struct psci_operations psci_ops;
+
+typedef unsigned long (psci_fn)(unsigned long, unsigned long,
+ unsigned long, unsigned long);
+asmlinkage psci_fn __invoke_psci_fn_hvc;
+asmlinkage psci_fn __invoke_psci_fn_smc;
+static psci_fn *invoke_psci_fn;
+
+enum psci_function {
+ PSCI_FN_CPU_SUSPEND,
+ PSCI_FN_CPU_ON,
+ PSCI_FN_CPU_OFF,
+ PSCI_FN_MIGRATE,
+ PSCI_FN_MAX,
+};
+
+static u32 psci_function_id[PSCI_FN_MAX];
+
+static int psci_to_linux_errno(int errno)
+{
+ switch (errno) {
+ case PSCI_RET_SUCCESS:
+ return 0;
+ case PSCI_RET_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case PSCI_RET_INVALID_PARAMS:
+ return -EINVAL;
+ case PSCI_RET_DENIED:
+ return -EPERM;
+ };
+
+ return -EINVAL;
+}
+
+static u32 psci_get_version(void)
+{
+ return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
+}
+
+static int psci_cpu_suspend(u32 state, unsigned long entry_point)
+{
+ int err;
+ u32 fn;
+
+ fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
+ err = invoke_psci_fn(fn, state, entry_point, 0);
+ return psci_to_linux_errno(err);
+}
+
+static int psci_cpu_off(u32 state)
+{
+ int err;
+ u32 fn;
+
+ fn = psci_function_id[PSCI_FN_CPU_OFF];
+ err = invoke_psci_fn(fn, state, 0, 0);
+ return psci_to_linux_errno(err);
+}
+
+static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
+{
+ int err;
+ u32 fn;
+
+ fn = psci_function_id[PSCI_FN_CPU_ON];
+ err = invoke_psci_fn(fn, cpuid, entry_point, 0);
+ return psci_to_linux_errno(err);
+}
+
+static int psci_migrate(unsigned long cpuid)
+{
+ int err;
+ u32 fn;
+
+ fn = psci_function_id[PSCI_FN_MIGRATE];
+ err = invoke_psci_fn(fn, cpuid, 0, 0);
+ return psci_to_linux_errno(err);
+}
+
+static int psci_affinity_info(unsigned long target_affinity,
+ unsigned long lowest_affinity_level)
+{
+ return invoke_psci_fn(PSCI_0_2_FN_NATIVE(AFFINITY_INFO),
+ target_affinity, lowest_affinity_level, 0);
+}
+
+static int psci_migrate_info_type(void)
+{
+ return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
+}
+
+static unsigned long psci_migrate_info_up_cpu(void)
+{
+ return invoke_psci_fn(PSCI_0_2_FN_NATIVE(MIGRATE_INFO_UP_CPU),
+ 0, 0, 0);
+}
+
+static int get_set_conduit_method(struct device_node *np)
+{
+ const char *method;
+
+ pr_info("probing for conduit method from DT.\n");
+
+ if (of_property_read_string(np, "method", &method)) {
+ pr_warn("missing \"method\" property\n");
+ return -ENXIO;
+ }
+
+ if (!strcmp("hvc", method)) {
+ invoke_psci_fn = __invoke_psci_fn_hvc;
+ } else if (!strcmp("smc", method)) {
+ invoke_psci_fn = __invoke_psci_fn_smc;
+ } else {
+ pr_warn("invalid \"method\" property: %s\n", method);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
+{
+ invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+}
+
+static void psci_sys_poweroff(void)
+{
+ invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
+}
+
+/*
+ * Detect the presence of a resident Trusted OS which may cause CPU_OFF to
+ * return DENIED (which would be fatal).
+ */
+static void __init psci_init_migrate(void)
+{
+ unsigned long cpuid;
+ int type, cpu = -1;
+
+ type = psci_ops.migrate_info_type();
+
+ if (type == PSCI_0_2_TOS_MP) {
+ pr_info("Trusted OS migration not required\n");
+ return;
+ }
+
+ if (type == PSCI_RET_NOT_SUPPORTED) {
+ pr_info("MIGRATE_INFO_TYPE not supported.\n");
+ return;
+ }
+
+ if (type != PSCI_0_2_TOS_UP_MIGRATE &&
+ type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
+ pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
+ return;
+ }
+
+ cpuid = psci_migrate_info_up_cpu();
+ if (cpuid & ~MPIDR_HWID_BITMASK) {
+ pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
+ cpuid);
+ return;
+ }
+
+ cpu = get_logical_index(cpuid);
+ resident_cpu = cpu >= 0 ? cpu : -1;
+
+ pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
+}
+
+static void __init psci_0_2_set_functions(void)
+{
+ pr_info("Using standard PSCI v0.2 function IDs\n");
+ psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN_NATIVE(CPU_SUSPEND);
+ psci_ops.cpu_suspend = psci_cpu_suspend;
+
+ psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
+ psci_ops.cpu_off = psci_cpu_off;
+
+ psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN_NATIVE(CPU_ON);
+ psci_ops.cpu_on = psci_cpu_on;
+
+ psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN_NATIVE(MIGRATE);
+ psci_ops.migrate = psci_migrate;
+
+ psci_ops.affinity_info = psci_affinity_info;
+
+ psci_ops.migrate_info_type = psci_migrate_info_type;
+
+ arm_pm_restart = psci_sys_reset;
+
+ pm_power_off = psci_sys_poweroff;
+}
+
+/*
+ * Probe function for PSCI firmware versions >= 0.2
+ */
+static int __init psci_probe(void)
+{
+ u32 ver = psci_get_version();
+
+ pr_info("PSCIv%d.%d detected in firmware.\n",
+ PSCI_VERSION_MAJOR(ver),
+ PSCI_VERSION_MINOR(ver));
+
+ if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
+ pr_err("Conflicting PSCI version detected.\n");
+ return -EINVAL;
+ }
+
+ psci_0_2_set_functions();
+
+ psci_init_migrate();
+
+ return 0;
+}
+
+typedef int (*psci_initcall_t)(const struct device_node *);
+
+/*
+ * PSCI init function for PSCI versions >=0.2
+ *
+ * Probe based on PSCI PSCI_VERSION function
+ */
+static int __init psci_0_2_init(struct device_node *np)
+{
+ int err;
+
+ err = get_set_conduit_method(np);
+
+ if (err)
+ goto out_put_node;
+ /*
+ * Starting with v0.2, the PSCI specification introduced a call
+ * (PSCI_VERSION) that allows probing the firmware version, so
+ * that PSCI function IDs and version specific initialization
+ * can be carried out according to the specific version reported
+ * by firmware
+ */
+ err = psci_probe();
+
+out_put_node:
+ of_node_put(np);
+ return err;
+}
+
+/*
+ * PSCI < v0.2 get PSCI Function IDs via DT.
+ */
+static int __init psci_0_1_init(struct device_node *np)
+{
+ u32 id;
+ int err;
+
+ err = get_set_conduit_method(np);
+
+ if (err)
+ goto out_put_node;
+
+ pr_info("Using PSCI v0.1 Function IDs from DT\n");
+
+ if (!of_property_read_u32(np, "cpu_suspend", &id)) {
+ psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
+ psci_ops.cpu_suspend = psci_cpu_suspend;
+ }
+
+ if (!of_property_read_u32(np, "cpu_off", &id)) {
+ psci_function_id[PSCI_FN_CPU_OFF] = id;
+ psci_ops.cpu_off = psci_cpu_off;
+ }
+
+ if (!of_property_read_u32(np, "cpu_on", &id)) {
+ psci_function_id[PSCI_FN_CPU_ON] = id;
+ psci_ops.cpu_on = psci_cpu_on;
+ }
+
+ if (!of_property_read_u32(np, "migrate", &id)) {
+ psci_function_id[PSCI_FN_MIGRATE] = id;
+ psci_ops.migrate = psci_migrate;
+ }
+
+out_put_node:
+ of_node_put(np);
+ return err;
+}
+
+static const struct of_device_id const psci_of_match[] __initconst = {
+ { .compatible = "arm,psci", .data = psci_0_1_init},
+ { .compatible = "arm,psci-0.2", .data = psci_0_2_init},
+ {},
+};
+
+int __init psci_dt_init(void)
+{
+ struct device_node *np;
+ const struct of_device_id *matched_np;
+ psci_initcall_t init_fn;
+
+ np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
+
+ if (!np)
+ return -ENODEV;
+
+ init_fn = (psci_initcall_t)matched_np->data;
+ return init_fn(np);
+}
+
+#ifdef CONFIG_ACPI
+/*
+ * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
+ * explicitly clarified in SBBR
+ */
+int __init psci_acpi_init(void)
+{
+ if (!acpi_psci_present()) {
+ pr_info("is not implemented in ACPI.\n");
+ return -EOPNOTSUPP;
+ }
+
+ pr_info("probing for conduit method from ACPI.\n");
+
+ if (acpi_psci_use_hvc())
+ invoke_psci_fn = __invoke_psci_fn_hvc;
+ else
+ invoke_psci_fn = __invoke_psci_fn_smc;
+
+ return psci_probe();
+}
+#endif
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index 1bd6f9c34331..29e6850665eb 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -24,7 +24,6 @@
#include <linux/err.h>
#include <linux/qcom_scm.h>
-#include <asm/outercache.h>
#include <asm/cacheflush.h>
#include "qcom_scm.h"
@@ -219,8 +218,7 @@ static int __qcom_scm_call(const struct qcom_scm_command *cmd)
* Flush the command buffer so that the secure world sees
* the correct data.
*/
- __cpuc_flush_dcache_area((void *)cmd, cmd->len);
- outer_flush_range(cmd_addr, cmd_addr + cmd->len);
+ secure_flush_area(cmd, cmd->len);
ret = smc(cmd_addr);
if (ret < 0)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 8f1fe739c985..b4fc9e4d24c6 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -113,7 +113,6 @@ config GPIO_74XX_MMIO
config GPIO_ALTERA
tristate "Altera GPIO"
depends on OF_GPIO
- select GPIO_GENERIC
select GPIOLIB_IRQCHIP
help
Say Y or M here to build support for the Altera PIO device.
@@ -131,6 +130,7 @@ config GPIO_BRCMSTB
default y if ARCH_BRCMSTB
depends on OF_GPIO && (ARCH_BRCMSTB || COMPILE_TEST)
select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
help
Say yes here to enable GPIO support for Broadcom STB (BCM7XXX) SoCs.
@@ -172,6 +172,7 @@ config GPIO_ETRAXFS
depends on CRIS || COMPILE_TEST
depends on OF
select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
help
Say yes here to support the GPIO controller on Axis ETRAX FS SoCs.
@@ -308,7 +309,6 @@ config GPIO_MVEBU
def_bool y
depends on PLAT_ORION
depends on OF
- select GPIO_GENERIC
select GENERIC_IRQ_CHIP
config GPIO_MXC
@@ -1005,6 +1005,12 @@ config GPIO_MC33880
SPI driver for Freescale MC33880 high-side/low-side switch.
This provides GPIO interface supporting inputs and outputs.
+config GPIO_ZX
+ bool "ZTE ZX GPIO support"
+ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to support the GPIO device on ZTE ZX SoCs.
+
endmenu
menu "USB GPIO expanders"
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index f82cd678ce08..f79a7c482a99 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
obj-$(CONFIG_GPIO_ALTERA) += gpio-altera.o
obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o
obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
+obj-$(CONFIG_ATH79) += gpio-ath79.o
obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o
obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o
obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
@@ -116,3 +117,4 @@ obj-$(CONFIG_GPIO_XLP) += gpio-xlp.o
obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o
obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o
obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o
+obj-$(CONFIG_GPIO_ZX) += gpio-zx.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index 07ba82317ece..903fcf4d04a0 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -59,13 +59,13 @@ static int devm_gpiod_match_array(struct device *dev, void *res, void *data)
* automatically disposed on driver detach. See gpiod_get() for detailed
* information about behavior and return values.
*/
-struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
{
return devm_gpiod_get_index(dev, con_id, 0, flags);
}
-EXPORT_SYMBOL(__devm_gpiod_get);
+EXPORT_SYMBOL(devm_gpiod_get);
/**
* devm_gpiod_get_optional - Resource-managed gpiod_get_optional()
@@ -77,13 +77,13 @@ EXPORT_SYMBOL(__devm_gpiod_get);
* are automatically disposed on driver detach. See gpiod_get_optional() for
* detailed information about behavior and return values.
*/
-struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
{
return devm_gpiod_get_index_optional(dev, con_id, 0, flags);
}
-EXPORT_SYMBOL(__devm_gpiod_get_optional);
+EXPORT_SYMBOL(devm_gpiod_get_optional);
/**
* devm_gpiod_get_index - Resource-managed gpiod_get_index()
@@ -96,7 +96,7 @@ EXPORT_SYMBOL(__devm_gpiod_get_optional);
* automatically disposed on driver detach. See gpiod_get_index() for detailed
* information about behavior and return values.
*/
-struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags)
@@ -120,7 +120,7 @@ struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
return desc;
}
-EXPORT_SYMBOL(__devm_gpiod_get_index);
+EXPORT_SYMBOL(devm_gpiod_get_index);
/**
* devm_get_gpiod_from_child - get a GPIO descriptor from a device's child node
@@ -182,10 +182,10 @@ EXPORT_SYMBOL(devm_get_gpiod_from_child);
* gpiod_get_index_optional() for detailed information about behavior and
* return values.
*/
-struct gpio_desc *__must_check __devm_gpiod_get_index_optional(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev,
const char *con_id,
unsigned int index,
- enum gpiod_flags flags)
+ enum gpiod_flags flags)
{
struct gpio_desc *desc;
@@ -197,7 +197,7 @@ struct gpio_desc *__must_check __devm_gpiod_get_index_optional(struct device *de
return desc;
}
-EXPORT_SYMBOL(__devm_gpiod_get_index_optional);
+EXPORT_SYMBOL(devm_gpiod_get_index_optional);
/**
* devm_gpiod_get_array - Resource-managed gpiod_get_array()
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index 0763655cca6c..6ed7c0fb3378 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -129,7 +129,7 @@ static int mmio_74xx_gpio_probe(struct platform_device *pdev)
if (IS_ERR(dat))
return PTR_ERR(dat);
- priv->flags = (unsigned)of_id->data;
+ priv->flags = (uintptr_t) of_id->data;
err = bgpio_init(&priv->bgc, &pdev->dev,
DIV_ROUND_UP(MMIO_74XX_BIT_CNT(priv->flags), 8),
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index d3fe6a6776da..984186ee58a0 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -305,15 +305,7 @@ static int adp5588_irq_setup(struct adp5588_gpio *dev)
irq_set_chip_and_handler(irq, &adp5588_irq_chip,
handle_level_irq);
irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
- /*
- * ARM needs us to explicitly flag the IRQ as VALID,
- * once we do so, it will also set the noprobe.
- */
- set_irq_flags(irq, IRQF_VALID);
-#else
- irq_set_noprobe(irq);
-#endif
+ irq_modify_status(irq, IRQ_NOREQUEST, IRQ_NOPROBE);
}
ret = request_threaded_irq(client->irq,
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 0f3d336d6303..9b7e0b3db387 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -338,9 +338,9 @@ static int altera_gpio_remove(struct platform_device *pdev)
{
struct altera_gpio_chip *altera_gc = platform_get_drvdata(pdev);
- gpiochip_remove(&altera_gc->mmchip.gc);
+ of_mm_gpiochip_remove(&altera_gc->mmchip);
- return -EIO;
+ return 0;
}
static const struct of_device_id altera_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
new file mode 100644
index 000000000000..03b995304ad6
--- /dev/null
+++ b/drivers/gpio/gpio-ath79.c
@@ -0,0 +1,204 @@
+/*
+ * Atheros AR71XX/AR724X/AR913X GPIO API support
+ *
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/gpio.h>
+#include <linux/platform_data/gpio-ath79.h>
+#include <linux/of_device.h>
+
+#include <asm/mach-ath79/ar71xx_regs.h>
+
+static void __iomem *ath79_gpio_base;
+static u32 ath79_gpio_count;
+static DEFINE_SPINLOCK(ath79_gpio_lock);
+
+static void __ath79_gpio_set_value(unsigned gpio, int value)
+{
+ void __iomem *base = ath79_gpio_base;
+
+ if (value)
+ __raw_writel(1 << gpio, base + AR71XX_GPIO_REG_SET);
+ else
+ __raw_writel(1 << gpio, base + AR71XX_GPIO_REG_CLEAR);
+}
+
+static int __ath79_gpio_get_value(unsigned gpio)
+{
+ return (__raw_readl(ath79_gpio_base + AR71XX_GPIO_REG_IN) >> gpio) & 1;
+}
+
+static int ath79_gpio_get_value(struct gpio_chip *chip, unsigned offset)
+{
+ return __ath79_gpio_get_value(offset);
+}
+
+static void ath79_gpio_set_value(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ __ath79_gpio_set_value(offset, value);
+}
+
+static int ath79_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ void __iomem *base = ath79_gpio_base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+ __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
+ base + AR71XX_GPIO_REG_OE);
+
+ spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+ return 0;
+}
+
+static int ath79_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ void __iomem *base = ath79_gpio_base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+ if (value)
+ __raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
+ else
+ __raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
+
+ __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
+ base + AR71XX_GPIO_REG_OE);
+
+ spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+ return 0;
+}
+
+static int ar934x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ void __iomem *base = ath79_gpio_base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+ __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
+ base + AR71XX_GPIO_REG_OE);
+
+ spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+ return 0;
+}
+
+static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ void __iomem *base = ath79_gpio_base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+ if (value)
+ __raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
+ else
+ __raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
+
+ __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
+ base + AR71XX_GPIO_REG_OE);
+
+ spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+ return 0;
+}
+
+static struct gpio_chip ath79_gpio_chip = {
+ .label = "ath79",
+ .get = ath79_gpio_get_value,
+ .set = ath79_gpio_set_value,
+ .direction_input = ath79_gpio_direction_input,
+ .direction_output = ath79_gpio_direction_output,
+ .base = 0,
+};
+
+static const struct of_device_id ath79_gpio_of_match[] = {
+ { .compatible = "qca,ar7100-gpio" },
+ { .compatible = "qca,ar9340-gpio" },
+ {},
+};
+
+static int ath79_gpio_probe(struct platform_device *pdev)
+{
+ struct ath79_gpio_platform_data *pdata = pdev->dev.platform_data;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+ bool oe_inverted;
+ int err;
+
+ if (np) {
+ err = of_property_read_u32(np, "ngpios", &ath79_gpio_count);
+ if (err) {
+ dev_err(&pdev->dev, "ngpios property is not valid\n");
+ return err;
+ }
+ if (ath79_gpio_count >= 32) {
+ dev_err(&pdev->dev, "ngpios must be less than 32\n");
+ return -EINVAL;
+ }
+ oe_inverted = of_device_is_compatible(np, "qca,ar9340-gpio");
+ } else if (pdata) {
+ ath79_gpio_count = pdata->ngpios;
+ oe_inverted = pdata->oe_inverted;
+ } else {
+ dev_err(&pdev->dev, "No DT node or platform data found\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ath79_gpio_base = devm_ioremap_nocache(
+ &pdev->dev, res->start, resource_size(res));
+ if (!ath79_gpio_base)
+ return -ENOMEM;
+
+ ath79_gpio_chip.dev = &pdev->dev;
+ ath79_gpio_chip.ngpio = ath79_gpio_count;
+ if (oe_inverted) {
+ ath79_gpio_chip.direction_input = ar934x_gpio_direction_input;
+ ath79_gpio_chip.direction_output = ar934x_gpio_direction_output;
+ }
+
+ err = gpiochip_add(&ath79_gpio_chip);
+ if (err) {
+ dev_err(&pdev->dev,
+ "cannot add AR71xx GPIO chip, error=%d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static struct platform_driver ath79_gpio_driver = {
+ .driver = {
+ .name = "ath79-gpio",
+ .of_match_table = ath79_gpio_of_match,
+ },
+ .probe = ath79_gpio_probe,
+};
+
+module_platform_driver(ath79_gpio_driver);
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 40343fa92c7b..31b90ac15204 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -438,7 +438,7 @@ static void bcm_kona_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
void __iomem *reg_base;
int bit, bank_id;
unsigned long sta;
- struct bcm_kona_gpio_bank *bank = irq_get_handler_data(irq);
+ struct bcm_kona_gpio_bank *bank = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
chained_irq_enter(chip, desc);
@@ -525,11 +525,7 @@ static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq,
return ret;
irq_set_lockdep_class(irq, &gpio_lock_class);
irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip, handle_simple_irq);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
irq_set_noprobe(irq);
-#endif
return 0;
}
@@ -644,17 +640,6 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
dev_err(dev, "Couldn't add GPIO chip -- %d\n", ret);
goto err_irq_domain;
}
- for (i = 0; i < chip->ngpio; i++) {
- int irq = bcm_kona_gpio_to_irq(chip, i);
- irq_set_lockdep_class(irq, &gpio_lock_class);
- irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip,
- handle_simple_irq);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
- irq_set_noprobe(irq);
-#endif
- }
for (i = 0; i < kona_gpio->num_bank; i++) {
bank = &kona_gpio->banks[i];
irq_set_chained_handler_and_data(bank->irq,
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 7a3cb1fa0a76..9ea86d2ac054 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -17,6 +17,10 @@
#include <linux/of_irq.h>
#include <linux/module.h>
#include <linux/basic_mmio_gpio.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/interrupt.h>
+#include <linux/reboot.h>
#define GIO_BANK_SIZE 0x20
#define GIO_ODEN(bank) (((bank) * GIO_BANK_SIZE) + 0x00)
@@ -34,14 +38,18 @@ struct brcmstb_gpio_bank {
struct bgpio_chip bgc;
struct brcmstb_gpio_priv *parent_priv;
u32 width;
+ struct irq_chip irq_chip;
};
struct brcmstb_gpio_priv {
struct list_head bank_list;
void __iomem *reg_base;
- int num_banks;
struct platform_device *pdev;
+ int parent_irq;
int gpio_base;
+ bool can_wake;
+ int parent_wake_irq;
+ struct notifier_block reboot_notifier;
};
#define MAX_GPIO_PER_BANK 32
@@ -63,6 +71,203 @@ brcmstb_gpio_gc_to_priv(struct gpio_chip *gc)
return bank->parent_priv;
}
+static void brcmstb_gpio_set_imask(struct brcmstb_gpio_bank *bank,
+ unsigned int offset, bool enable)
+{
+ struct bgpio_chip *bgc = &bank->bgc;
+ struct brcmstb_gpio_priv *priv = bank->parent_priv;
+ u32 mask = bgc->pin2mask(bgc, offset);
+ u32 imask;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bgc->lock, flags);
+ imask = bgc->read_reg(priv->reg_base + GIO_MASK(bank->id));
+ if (enable)
+ imask |= mask;
+ else
+ imask &= ~mask;
+ bgc->write_reg(priv->reg_base + GIO_MASK(bank->id), imask);
+ spin_unlock_irqrestore(&bgc->lock, flags);
+}
+
+/* -------------------- IRQ chip functions -------------------- */
+
+static void brcmstb_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc);
+
+ brcmstb_gpio_set_imask(bank, d->hwirq, false);
+}
+
+static void brcmstb_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc);
+
+ brcmstb_gpio_set_imask(bank, d->hwirq, true);
+}
+
+static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc);
+ struct brcmstb_gpio_priv *priv = bank->parent_priv;
+ u32 mask = BIT(d->hwirq);
+ u32 edge_insensitive, iedge_insensitive;
+ u32 edge_config, iedge_config;
+ u32 level, ilevel;
+ unsigned long flags;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_LOW:
+ level = 0;
+ edge_config = 0;
+ edge_insensitive = 0;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ level = mask;
+ edge_config = 0;
+ edge_insensitive = 0;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ level = 0;
+ edge_config = 0;
+ edge_insensitive = 0;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ level = 0;
+ edge_config = mask;
+ edge_insensitive = 0;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ level = 0;
+ edge_config = 0; /* don't care, but want known value */
+ edge_insensitive = mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&bank->bgc.lock, flags);
+
+ iedge_config = bank->bgc.read_reg(priv->reg_base +
+ GIO_EC(bank->id)) & ~mask;
+ iedge_insensitive = bank->bgc.read_reg(priv->reg_base +
+ GIO_EI(bank->id)) & ~mask;
+ ilevel = bank->bgc.read_reg(priv->reg_base +
+ GIO_LEVEL(bank->id)) & ~mask;
+
+ bank->bgc.write_reg(priv->reg_base + GIO_EC(bank->id),
+ iedge_config | edge_config);
+ bank->bgc.write_reg(priv->reg_base + GIO_EI(bank->id),
+ iedge_insensitive | edge_insensitive);
+ bank->bgc.write_reg(priv->reg_base + GIO_LEVEL(bank->id),
+ ilevel | level);
+
+ spin_unlock_irqrestore(&bank->bgc.lock, flags);
+ return 0;
+}
+
+static int brcmstb_gpio_priv_set_wake(struct brcmstb_gpio_priv *priv,
+ unsigned int enable)
+{
+ int ret = 0;
+
+ /*
+ * Only enable wake IRQ once for however many hwirqs can wake
+ * since they all use the same wake IRQ. Mask will be set
+ * up appropriately thanks to IRQCHIP_MASK_ON_SUSPEND flag.
+ */
+ if (enable)
+ ret = enable_irq_wake(priv->parent_wake_irq);
+ else
+ ret = disable_irq_wake(priv->parent_wake_irq);
+ if (ret)
+ dev_err(&priv->pdev->dev, "failed to %s wake-up interrupt\n",
+ enable ? "enable" : "disable");
+ return ret;
+}
+
+static int brcmstb_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
+
+ return brcmstb_gpio_priv_set_wake(priv, enable);
+}
+
+static irqreturn_t brcmstb_gpio_wake_irq_handler(int irq, void *data)
+{
+ struct brcmstb_gpio_priv *priv = data;
+
+ if (!priv || irq != priv->parent_wake_irq)
+ return IRQ_NONE;
+ pm_wakeup_event(&priv->pdev->dev, 0);
+ return IRQ_HANDLED;
+}
+
+static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank)
+{
+ struct brcmstb_gpio_priv *priv = bank->parent_priv;
+ struct irq_domain *irq_domain = bank->bgc.gc.irqdomain;
+ void __iomem *reg_base = priv->reg_base;
+ unsigned long status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->bgc.lock, flags);
+ while ((status = bank->bgc.read_reg(reg_base + GIO_STAT(bank->id)) &
+ bank->bgc.read_reg(reg_base + GIO_MASK(bank->id)))) {
+ int bit;
+
+ for_each_set_bit(bit, &status, 32) {
+ u32 stat = bank->bgc.read_reg(reg_base +
+ GIO_STAT(bank->id));
+ if (bit >= bank->width)
+ dev_warn(&priv->pdev->dev,
+ "IRQ for invalid GPIO (bank=%d, offset=%d)\n",
+ bank->id, bit);
+ bank->bgc.write_reg(reg_base + GIO_STAT(bank->id),
+ stat | BIT(bit));
+ generic_handle_irq(irq_find_mapping(irq_domain, bit));
+ }
+ }
+ spin_unlock_irqrestore(&bank->bgc.lock, flags);
+}
+
+/* Each UPG GIO block has one IRQ for all banks */
+static void brcmstb_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct list_head *pos;
+
+ /* Interrupts weren't properly cleared during probe */
+ BUG_ON(!priv || !chip);
+
+ chained_irq_enter(chip, desc);
+ list_for_each(pos, &priv->bank_list) {
+ struct brcmstb_gpio_bank *bank =
+ list_entry(pos, struct brcmstb_gpio_bank, node);
+ brcmstb_gpio_irq_bank_handler(bank);
+ }
+ chained_irq_exit(chip, desc);
+}
+
+static int brcmstb_gpio_reboot(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct brcmstb_gpio_priv *priv =
+ container_of(nb, struct brcmstb_gpio_priv, reboot_notifier);
+
+ /* Enable GPIO for S5 cold boot */
+ if (action == SYS_POWER_OFF)
+ brcmstb_gpio_priv_set_wake(priv, 1);
+
+ return NOTIFY_DONE;
+}
+
/* Make sure that the number of banks matches up between properties */
static int brcmstb_gpio_sanity_check_banks(struct device *dev,
struct device_node *np, struct resource *res)
@@ -87,11 +292,26 @@ static int brcmstb_gpio_remove(struct platform_device *pdev)
struct brcmstb_gpio_bank *bank;
int ret = 0;
+ if (!priv) {
+ dev_err(&pdev->dev, "called %s without drvdata!\n", __func__);
+ return -EFAULT;
+ }
+
+ /*
+ * You can lose return values below, but we report all errors, and it's
+ * more important to actually perform all of the steps.
+ */
list_for_each(pos, &priv->bank_list) {
bank = list_entry(pos, struct brcmstb_gpio_bank, node);
ret = bgpio_remove(&bank->bgc);
if (ret)
- dev_err(&pdev->dev, "gpiochip_remove fail in cleanup");
+ dev_err(&pdev->dev, "gpiochip_remove fail in cleanup\n");
+ }
+ if (priv->reboot_notifier.notifier_call) {
+ ret = unregister_reboot_notifier(&priv->reboot_notifier);
+ if (ret)
+ dev_err(&pdev->dev,
+ "failed to unregister reboot notifier\n");
}
return ret;
}
@@ -112,7 +332,7 @@ static int brcmstb_gpio_of_xlate(struct gpio_chip *gc,
return -EINVAL;
offset = gpiospec->args[0] - (gc->base - priv->gpio_base);
- if (offset >= gc->ngpio)
+ if (offset >= gc->ngpio || offset < 0)
return -EINVAL;
if (unlikely(offset >= bank->width)) {
@@ -127,6 +347,65 @@ static int brcmstb_gpio_of_xlate(struct gpio_chip *gc,
return offset;
}
+/* Before calling, must have bank->parent_irq set and gpiochip registered */
+static int brcmstb_gpio_irq_setup(struct platform_device *pdev,
+ struct brcmstb_gpio_bank *bank)
+{
+ struct brcmstb_gpio_priv *priv = bank->parent_priv;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ bank->irq_chip.name = dev_name(dev);
+ bank->irq_chip.irq_mask = brcmstb_gpio_irq_mask;
+ bank->irq_chip.irq_unmask = brcmstb_gpio_irq_unmask;
+ bank->irq_chip.irq_set_type = brcmstb_gpio_irq_set_type;
+
+ /* Ensures that all non-wakeup IRQs are disabled at suspend */
+ bank->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+
+ if (IS_ENABLED(CONFIG_PM_SLEEP) && !priv->can_wake &&
+ of_property_read_bool(np, "wakeup-source")) {
+ priv->parent_wake_irq = platform_get_irq(pdev, 1);
+ if (priv->parent_wake_irq < 0) {
+ dev_warn(dev,
+ "Couldn't get wake IRQ - GPIOs will not be able to wake from sleep");
+ } else {
+ int err;
+
+ /*
+ * Set wakeup capability before requesting wakeup
+ * interrupt, so we can process boot-time "wakeups"
+ * (e.g., from S5 cold boot)
+ */
+ device_set_wakeup_capable(dev, true);
+ device_wakeup_enable(dev);
+ err = devm_request_irq(dev, priv->parent_wake_irq,
+ brcmstb_gpio_wake_irq_handler, 0,
+ "brcmstb-gpio-wake", priv);
+
+ if (err < 0) {
+ dev_err(dev, "Couldn't request wake IRQ");
+ return err;
+ }
+
+ priv->reboot_notifier.notifier_call =
+ brcmstb_gpio_reboot;
+ register_reboot_notifier(&priv->reboot_notifier);
+ priv->can_wake = true;
+ }
+ }
+
+ if (priv->can_wake)
+ bank->irq_chip.irq_set_wake = brcmstb_gpio_irq_set_wake;
+
+ gpiochip_irqchip_add(&bank->bgc.gc, &bank->irq_chip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
+ gpiochip_set_chained_irqchip(&bank->bgc.gc, &bank->irq_chip,
+ priv->parent_irq, brcmstb_gpio_irq_handler);
+
+ return 0;
+}
+
static int brcmstb_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -137,12 +416,15 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
struct property *prop;
const __be32 *p;
u32 bank_width;
+ int num_banks = 0;
int err;
static int gpio_base;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ platform_set_drvdata(pdev, priv);
+ INIT_LIST_HEAD(&priv->bank_list);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg_base = devm_ioremap_resource(dev, res);
@@ -153,7 +435,16 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
priv->reg_base = reg_base;
priv->pdev = pdev;
- INIT_LIST_HEAD(&priv->bank_list);
+ if (of_property_read_bool(np, "interrupt-controller")) {
+ priv->parent_irq = platform_get_irq(pdev, 0);
+ if (priv->parent_irq <= 0) {
+ dev_err(dev, "Couldn't get IRQ");
+ return -ENOENT;
+ }
+ } else {
+ priv->parent_irq = -ENOENT;
+ }
+
if (brcmstb_gpio_sanity_check_banks(dev, np, res))
return -EINVAL;
@@ -170,7 +461,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
}
bank->parent_priv = priv;
- bank->id = priv->num_banks;
+ bank->id = num_banks;
if (bank_width <= 0 || bank_width > MAX_GPIO_PER_BANK) {
dev_err(dev, "Invalid bank width %d\n", bank_width);
goto fail;
@@ -202,6 +493,12 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
/* not all ngpio lines are valid, will use bank width later */
gc->ngpio = MAX_GPIO_PER_BANK;
+ /*
+ * Mask all interrupts by default, since wakeup interrupts may
+ * be retained from S5 cold boot
+ */
+ bank->bgc.write_reg(reg_base + GIO_MASK(bank->id), 0);
+
err = gpiochip_add(gc);
if (err) {
dev_err(dev, "Could not add gpiochip for bank %d\n",
@@ -209,19 +506,24 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
goto fail;
}
gpio_base += gc->ngpio;
+
+ if (priv->parent_irq > 0) {
+ err = brcmstb_gpio_irq_setup(pdev, bank);
+ if (err)
+ goto fail;
+ }
+
dev_dbg(dev, "bank=%d, base=%d, ngpio=%d, width=%d\n", bank->id,
gc->base, gc->ngpio, bank->width);
/* Everything looks good, so add bank to list */
list_add(&bank->node, &priv->bank_list);
- priv->num_banks++;
+ num_banks++;
}
dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n",
- priv->num_banks, priv->gpio_base, gpio_base - 1);
-
- platform_set_drvdata(pdev, priv);
+ num_banks, priv->gpio_base, gpio_base - 1);
return 0;
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index c5e05c82d67c..94b0ab709721 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -65,11 +65,11 @@ static struct davinci_gpio_regs __iomem *gpio2regs(unsigned gpio)
return ptr;
}
-static inline struct davinci_gpio_regs __iomem *irq2regs(int irq)
+static inline struct davinci_gpio_regs __iomem *irq2regs(struct irq_data *d)
{
struct davinci_gpio_regs __iomem *g;
- g = (__force struct davinci_gpio_regs __iomem *)irq_get_chip_data(irq);
+ g = (__force struct davinci_gpio_regs __iomem *)irq_data_get_irq_chip_data(d);
return g;
}
@@ -287,7 +287,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
static void gpio_irq_disable(struct irq_data *d)
{
- struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
+ struct davinci_gpio_regs __iomem *g = irq2regs(d);
u32 mask = (u32) irq_data_get_irq_handler_data(d);
writel_relaxed(mask, &g->clr_falling);
@@ -296,7 +296,7 @@ static void gpio_irq_disable(struct irq_data *d)
static void gpio_irq_enable(struct irq_data *d)
{
- struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
+ struct davinci_gpio_regs __iomem *g = irq2regs(d);
u32 mask = (u32) irq_data_get_irq_handler_data(d);
unsigned status = irqd_get_trigger_type(d);
@@ -327,8 +327,9 @@ static struct irq_chip gpio_irqchip = {
};
static void
-gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+gpio_irq_handler(unsigned __irq, struct irq_desc *desc)
{
+ unsigned int irq = irq_desc_get_irq(desc);
struct davinci_gpio_regs __iomem *g;
u32 mask = 0xffff;
struct davinci_gpio_controller *d;
@@ -396,7 +397,7 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger)
struct davinci_gpio_regs __iomem *g;
u32 mask;
- d = (struct davinci_gpio_controller *)data->handler_data;
+ d = (struct davinci_gpio_controller *)irq_data_get_irq_handler_data(data);
g = (struct davinci_gpio_regs __iomem *)d->regs;
mask = __gpio_mask(data->irq - d->gpio_irq);
@@ -422,7 +423,6 @@ davinci_gpio_irq_map(struct irq_domain *d, unsigned int irq,
irq_set_irq_type(irq, IRQ_TYPE_NONE);
irq_set_chip_data(irq, (__force void *)g);
irq_set_handler_data(irq, (void *)__gpio_mask(hw));
- set_irq_flags(irq, IRQF_VALID);
return 0;
}
@@ -545,7 +545,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
chips[0].chip.to_irq = gpio_to_irq_unbanked;
chips[0].gpio_irq = bank_irq;
chips[0].gpio_unbanked = pdata->gpio_unbanked;
- binten = BIT(0);
+ binten = GENMASK(pdata->gpio_unbanked / 16, 0);
/* AINTC handles mask/unmask; GPIO handles triggering */
irq = bank_irq;
@@ -578,15 +578,13 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
writel_relaxed(~0, &g->clr_falling);
writel_relaxed(~0, &g->clr_rising);
- /* set up all irqs in this bank */
- irq_set_chained_handler(bank_irq, gpio_irq_handler);
-
/*
* Each chip handles 32 gpios, and each irq bank consists of 16
* gpio irqs. Pass the irq bank's corresponding controller to
* the chained irq handler.
*/
- irq_set_handler_data(bank_irq, &chips[gpio / 32]);
+ irq_set_chained_handler_and_data(bank_irq, gpio_irq_handler,
+ &chips[gpio / 32]);
binten |= BIT(bank);
}
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 55fa9853a7f2..c5be4b9b8baf 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -149,7 +149,7 @@ static u32 dwapb_do_irq(struct dwapb_gpio *gpio)
static void dwapb_irq_handler(u32 irq, struct irq_desc *desc)
{
- struct dwapb_gpio *gpio = irq_get_handler_data(irq);
+ struct dwapb_gpio *gpio = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
dwapb_do_irq(gpio);
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index fbf287307c4c..6bca1e125e12 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -31,7 +31,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/platform_data/gpio-em.h>
struct em_gio_priv {
void __iomem *base0;
@@ -262,7 +261,6 @@ static int em_gio_irq_domain_map(struct irq_domain *h, unsigned int irq,
irq_set_chip_data(irq, h->host_data);
irq_set_chip_and_handler(irq, &p->irq_chip, handle_level_irq);
- set_irq_flags(irq, IRQF_VALID); /* kill me now */
return 0;
}
@@ -273,13 +271,12 @@ static const struct irq_domain_ops em_gio_irq_domain_ops = {
static int em_gio_probe(struct platform_device *pdev)
{
- struct gpio_em_config pdata_dt;
- struct gpio_em_config *pdata = dev_get_platdata(&pdev->dev);
struct em_gio_priv *p;
struct resource *io[2], *irq[2];
struct gpio_chip *gpio_chip;
struct irq_chip *irq_chip;
const char *name = dev_name(&pdev->dev);
+ unsigned int ngpios;
int ret;
p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
@@ -319,18 +316,10 @@ static int em_gio_probe(struct platform_device *pdev)
goto err0;
}
- if (!pdata) {
- memset(&pdata_dt, 0, sizeof(pdata_dt));
- pdata = &pdata_dt;
-
- if (of_property_read_u32(pdev->dev.of_node, "ngpios",
- &pdata->number_of_pins)) {
- dev_err(&pdev->dev, "Missing ngpios OF property\n");
- ret = -EINVAL;
- goto err0;
- }
-
- pdata->gpio_base = -1;
+ if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) {
+ dev_err(&pdev->dev, "Missing ngpios OF property\n");
+ ret = -EINVAL;
+ goto err0;
}
gpio_chip = &p->gpio_chip;
@@ -345,8 +334,8 @@ static int em_gio_probe(struct platform_device *pdev)
gpio_chip->label = name;
gpio_chip->dev = &pdev->dev;
gpio_chip->owner = THIS_MODULE;
- gpio_chip->base = pdata->gpio_base;
- gpio_chip->ngpio = pdata->number_of_pins;
+ gpio_chip->base = -1;
+ gpio_chip->ngpio = ngpios;
irq_chip = &p->irq_chip;
irq_chip->name = name;
@@ -357,9 +346,7 @@ static int em_gio_probe(struct platform_device *pdev)
irq_chip->irq_release_resources = em_gio_irq_relres;
irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND;
- p->irq_domain = irq_domain_add_simple(pdev->dev.of_node,
- pdata->number_of_pins,
- pdata->irq_base,
+ p->irq_domain = irq_domain_add_simple(pdev->dev.of_node, ngpios, 0,
&em_gio_irq_domain_ops, p);
if (!p->irq_domain) {
ret = -ENXIO;
@@ -387,12 +374,6 @@ static int em_gio_probe(struct platform_device *pdev)
goto err1;
}
- if (pdata->pctl_name) {
- ret = gpiochip_add_pin_range(gpio_chip, pdata->pctl_name, 0,
- gpio_chip->base, gpio_chip->ngpio);
- if (ret < 0)
- dev_warn(&pdev->dev, "failed to add pin range\n");
- }
return 0;
err1:
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 45684f36ddb1..9d90366ea259 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -100,13 +100,15 @@ static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc)
}
}
-static void ep93xx_gpio_f_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ep93xx_gpio_f_irq_handler(unsigned int __irq,
+ struct irq_desc *desc)
{
/*
* map discontiguous hw irq range to continuous sw irq range:
*
* IRQ_EP93XX_GPIO{0..7}MUX -> gpio_to_irq(EP93XX_GPIO_LINE_F({0..7})
*/
+ unsigned int irq = irq_desc_get_irq(desc);
int port_f_idx = ((irq + 1) & 7) ^ 4; /* {19..22,47..50} -> {0..7} */
int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_F(0)) + port_f_idx;
@@ -208,7 +210,7 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- __irq_set_handler_locked(d->irq, handler);
+ irq_set_handler_locked(d, handler);
gpio_int_enabled[port] |= port_mask;
@@ -234,7 +236,7 @@ static void ep93xx_gpio_init_irq(void)
gpio_irq <= gpio_to_irq(EP93XX_GPIO_LINE_MAX_IRQ); ++gpio_irq) {
irq_set_chip_and_handler(gpio_irq, &ep93xx_gpio_irq_chip,
handle_level_irq);
- set_irq_flags(gpio_irq, IRQF_VALID);
+ irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST);
}
irq_set_chained_handler(IRQ_EP93XX_GPIO_AB,
diff --git a/drivers/gpio/gpio-etraxfs.c b/drivers/gpio/gpio-etraxfs.c
index 28071f4a5672..2ffcd9fdd1f2 100644
--- a/drivers/gpio/gpio-etraxfs.c
+++ b/drivers/gpio/gpio-etraxfs.c
@@ -1,8 +1,10 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/of_gpio.h>
#include <linux/io.h>
+#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/basic_mmio_gpio.h>
@@ -13,6 +15,7 @@
#define ETRAX_FS_rw_intr_mask 16
#define ETRAX_FS_rw_ack_intr 20
#define ETRAX_FS_r_intr 24
+#define ETRAX_FS_r_masked_intr 28
#define ETRAX_FS_rw_pb_dout 32
#define ETRAX_FS_r_pb_din 36
#define ETRAX_FS_rw_pb_oe 40
@@ -26,6 +29,48 @@
#define ETRAX_FS_r_pe_din 84
#define ETRAX_FS_rw_pe_oe 88
+#define ARTPEC3_r_pa_din 0
+#define ARTPEC3_rw_pa_dout 4
+#define ARTPEC3_rw_pa_oe 8
+#define ARTPEC3_r_pb_din 44
+#define ARTPEC3_rw_pb_dout 48
+#define ARTPEC3_rw_pb_oe 52
+#define ARTPEC3_r_pc_din 88
+#define ARTPEC3_rw_pc_dout 92
+#define ARTPEC3_rw_pc_oe 96
+#define ARTPEC3_r_pd_din 116
+#define ARTPEC3_rw_intr_cfg 120
+#define ARTPEC3_rw_intr_pins 124
+#define ARTPEC3_rw_intr_mask 128
+#define ARTPEC3_rw_ack_intr 132
+#define ARTPEC3_r_masked_intr 140
+
+#define GIO_CFG_OFF 0
+#define GIO_CFG_HI 1
+#define GIO_CFG_LO 2
+#define GIO_CFG_SET 3
+#define GIO_CFG_POSEDGE 5
+#define GIO_CFG_NEGEDGE 6
+#define GIO_CFG_ANYEDGE 7
+
+struct etraxfs_gpio_info;
+
+struct etraxfs_gpio_block {
+ spinlock_t lock;
+ u32 mask;
+ u32 cfg;
+ u32 pins;
+ unsigned int group[8];
+
+ void __iomem *regs;
+ const struct etraxfs_gpio_info *info;
+};
+
+struct etraxfs_gpio_chip {
+ struct bgpio_chip bgc;
+ struct etraxfs_gpio_block *block;
+};
+
struct etraxfs_gpio_port {
const char *label;
unsigned int oe;
@@ -37,6 +82,12 @@ struct etraxfs_gpio_port {
struct etraxfs_gpio_info {
unsigned int num_ports;
const struct etraxfs_gpio_port *ports;
+
+ unsigned int rw_ack_intr;
+ unsigned int rw_intr_mask;
+ unsigned int rw_intr_cfg;
+ unsigned int rw_intr_pins;
+ unsigned int r_masked_intr;
};
static const struct etraxfs_gpio_port etraxfs_gpio_etraxfs_ports[] = {
@@ -80,8 +131,56 @@ static const struct etraxfs_gpio_port etraxfs_gpio_etraxfs_ports[] = {
static const struct etraxfs_gpio_info etraxfs_gpio_etraxfs = {
.num_ports = ARRAY_SIZE(etraxfs_gpio_etraxfs_ports),
.ports = etraxfs_gpio_etraxfs_ports,
+ .rw_ack_intr = ETRAX_FS_rw_ack_intr,
+ .rw_intr_mask = ETRAX_FS_rw_intr_mask,
+ .rw_intr_cfg = ETRAX_FS_rw_intr_cfg,
+ .r_masked_intr = ETRAX_FS_r_masked_intr,
+};
+
+static const struct etraxfs_gpio_port etraxfs_gpio_artpec3_ports[] = {
+ {
+ .label = "A",
+ .ngpio = 32,
+ .oe = ARTPEC3_rw_pa_oe,
+ .dout = ARTPEC3_rw_pa_dout,
+ .din = ARTPEC3_r_pa_din,
+ },
+ {
+ .label = "B",
+ .ngpio = 32,
+ .oe = ARTPEC3_rw_pb_oe,
+ .dout = ARTPEC3_rw_pb_dout,
+ .din = ARTPEC3_r_pb_din,
+ },
+ {
+ .label = "C",
+ .ngpio = 16,
+ .oe = ARTPEC3_rw_pc_oe,
+ .dout = ARTPEC3_rw_pc_dout,
+ .din = ARTPEC3_r_pc_din,
+ },
+ {
+ .label = "D",
+ .ngpio = 32,
+ .din = ARTPEC3_r_pd_din,
+ },
+};
+
+static const struct etraxfs_gpio_info etraxfs_gpio_artpec3 = {
+ .num_ports = ARRAY_SIZE(etraxfs_gpio_artpec3_ports),
+ .ports = etraxfs_gpio_artpec3_ports,
+ .rw_ack_intr = ARTPEC3_rw_ack_intr,
+ .rw_intr_mask = ARTPEC3_rw_intr_mask,
+ .rw_intr_cfg = ARTPEC3_rw_intr_cfg,
+ .r_masked_intr = ARTPEC3_r_masked_intr,
+ .rw_intr_pins = ARTPEC3_rw_intr_pins,
};
+static unsigned int etraxfs_gpio_chip_to_port(struct gpio_chip *gc)
+{
+ return gc->label[0] - 'A';
+}
+
static int etraxfs_gpio_of_xlate(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec,
u32 *flags)
@@ -90,7 +189,7 @@ static int etraxfs_gpio_of_xlate(struct gpio_chip *gc,
* Port numbers are A to E, and the properties are integers, so we
* specify them as 0xA - 0xE.
*/
- if (gc->label[0] - 'A' + 0xA != gpiospec->args[2])
+ if (etraxfs_gpio_chip_to_port(gc) + 0xA != gpiospec->args[2])
return -EINVAL;
return of_gpio_simple_xlate(gc, gpiospec, flags);
@@ -101,24 +200,174 @@ static const struct of_device_id etraxfs_gpio_of_table[] = {
.compatible = "axis,etraxfs-gio",
.data = &etraxfs_gpio_etraxfs,
},
+ {
+ .compatible = "axis,artpec3-gio",
+ .data = &etraxfs_gpio_artpec3,
+ },
{},
};
+static unsigned int etraxfs_gpio_to_group_irq(unsigned int gpio)
+{
+ return gpio % 8;
+}
+
+static unsigned int etraxfs_gpio_to_group_pin(struct etraxfs_gpio_chip *chip,
+ unsigned int gpio)
+{
+ return 4 * etraxfs_gpio_chip_to_port(&chip->bgc.gc) + gpio / 8;
+}
+
+static void etraxfs_gpio_irq_ack(struct irq_data *d)
+{
+ struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct etraxfs_gpio_block *block = chip->block;
+ unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+
+ writel(BIT(grpirq), block->regs + block->info->rw_ack_intr);
+}
+
+static void etraxfs_gpio_irq_mask(struct irq_data *d)
+{
+ struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct etraxfs_gpio_block *block = chip->block;
+ unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+
+ spin_lock(&block->lock);
+ block->mask &= ~BIT(grpirq);
+ writel(block->mask, block->regs + block->info->rw_intr_mask);
+ spin_unlock(&block->lock);
+}
+
+static void etraxfs_gpio_irq_unmask(struct irq_data *d)
+{
+ struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct etraxfs_gpio_block *block = chip->block;
+ unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+
+ spin_lock(&block->lock);
+ block->mask |= BIT(grpirq);
+ writel(block->mask, block->regs + block->info->rw_intr_mask);
+ spin_unlock(&block->lock);
+}
+
+static int etraxfs_gpio_irq_set_type(struct irq_data *d, u32 type)
+{
+ struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct etraxfs_gpio_block *block = chip->block;
+ unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+ u32 cfg;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ cfg = GIO_CFG_POSEDGE;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ cfg = GIO_CFG_NEGEDGE;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ cfg = GIO_CFG_ANYEDGE;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ cfg = GIO_CFG_LO;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ cfg = GIO_CFG_HI;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock(&block->lock);
+ block->cfg &= ~(0x7 << (grpirq * 3));
+ block->cfg |= (cfg << (grpirq * 3));
+ writel(block->cfg, block->regs + block->info->rw_intr_cfg);
+ spin_unlock(&block->lock);
+
+ return 0;
+}
+
+static int etraxfs_gpio_irq_request_resources(struct irq_data *d)
+{
+ struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct etraxfs_gpio_block *block = chip->block;
+ unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+ int ret = -EBUSY;
+
+ spin_lock(&block->lock);
+ if (block->group[grpirq])
+ goto out;
+
+ ret = gpiochip_lock_as_irq(&chip->bgc.gc, d->hwirq);
+ if (ret)
+ goto out;
+
+ block->group[grpirq] = d->irq;
+ if (block->info->rw_intr_pins) {
+ unsigned int pin = etraxfs_gpio_to_group_pin(chip, d->hwirq);
+
+ block->pins &= ~(0xf << (grpirq * 4));
+ block->pins |= (pin << (grpirq * 4));
+
+ writel(block->pins, block->regs + block->info->rw_intr_pins);
+ }
+
+out:
+ spin_unlock(&block->lock);
+ return ret;
+}
+
+static void etraxfs_gpio_irq_release_resources(struct irq_data *d)
+{
+ struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct etraxfs_gpio_block *block = chip->block;
+ unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+
+ spin_lock(&block->lock);
+ block->group[grpirq] = 0;
+ gpiochip_unlock_as_irq(&chip->bgc.gc, d->hwirq);
+ spin_unlock(&block->lock);
+}
+
+static struct irq_chip etraxfs_gpio_irq_chip = {
+ .name = "gpio-etraxfs",
+ .irq_ack = etraxfs_gpio_irq_ack,
+ .irq_mask = etraxfs_gpio_irq_mask,
+ .irq_unmask = etraxfs_gpio_irq_unmask,
+ .irq_set_type = etraxfs_gpio_irq_set_type,
+ .irq_request_resources = etraxfs_gpio_irq_request_resources,
+ .irq_release_resources = etraxfs_gpio_irq_release_resources,
+};
+
+static irqreturn_t etraxfs_gpio_interrupt(int irq, void *dev_id)
+{
+ struct etraxfs_gpio_block *block = dev_id;
+ unsigned long intr = readl(block->regs + block->info->r_masked_intr);
+ int bit;
+
+ for_each_set_bit(bit, &intr, 8)
+ generic_handle_irq(block->group[bit]);
+
+ return IRQ_RETVAL(intr & 0xff);
+}
+
static int etraxfs_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct etraxfs_gpio_info *info;
const struct of_device_id *match;
- struct bgpio_chip *chips;
- struct resource *res;
+ struct etraxfs_gpio_block *block;
+ struct etraxfs_gpio_chip *chips;
+ struct resource *res, *irq;
+ bool allportsirq = false;
void __iomem *regs;
int ret;
int i;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(dev, res);
- if (!regs)
- return -ENOMEM;
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
match = of_match_node(etraxfs_gpio_of_table, dev->of_node);
if (!match)
@@ -130,19 +379,57 @@ static int etraxfs_gpio_probe(struct platform_device *pdev)
if (!chips)
return -ENOMEM;
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq)
+ return -EINVAL;
+
+ block = devm_kzalloc(dev, sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return -ENOMEM;
+
+ spin_lock_init(&block->lock);
+
+ block->regs = regs;
+ block->info = info;
+
+ writel(0, block->regs + info->rw_intr_mask);
+ writel(0, block->regs + info->rw_intr_cfg);
+ if (info->rw_intr_pins) {
+ allportsirq = true;
+ writel(0, block->regs + info->rw_intr_pins);
+ }
+
+ ret = devm_request_irq(dev, irq->start, etraxfs_gpio_interrupt,
+ IRQF_SHARED, dev_name(dev), block);
+ if (ret) {
+ dev_err(dev, "Unable to request irq %d\n", ret);
+ return ret;
+ }
+
for (i = 0; i < info->num_ports; i++) {
- struct bgpio_chip *bgc = &chips[i];
+ struct etraxfs_gpio_chip *chip = &chips[i];
+ struct bgpio_chip *bgc = &chip->bgc;
const struct etraxfs_gpio_port *port = &info->ports[i];
+ unsigned long flags = BGPIOF_READ_OUTPUT_REG_SET;
+ void __iomem *dat = regs + port->din;
+ void __iomem *set = regs + port->dout;
+ void __iomem *dirout = regs + port->oe;
+
+ chip->block = block;
+
+ if (dirout == set) {
+ dirout = set = NULL;
+ flags = BGPIOF_NO_OUTPUT;
+ }
ret = bgpio_init(bgc, dev, 4,
- regs + port->din, /* dat */
- regs + port->dout, /* set */
- NULL, /* clr */
- regs + port->oe, /* dirout */
- NULL, /* dirin */
- BGPIOF_UNREADABLE_REG_SET);
- if (ret)
- return ret;
+ dat, set, NULL, dirout, NULL,
+ flags);
+ if (ret) {
+ dev_err(dev, "Unable to init port %s\n",
+ port->label);
+ continue;
+ }
bgc->gc.ngpio = port->ngpio;
bgc->gc.label = port->label;
@@ -152,9 +439,21 @@ static int etraxfs_gpio_probe(struct platform_device *pdev)
bgc->gc.of_xlate = etraxfs_gpio_of_xlate;
ret = gpiochip_add(&bgc->gc);
- if (ret)
+ if (ret) {
dev_err(dev, "Unable to register port %s\n",
bgc->gc.label);
+ continue;
+ }
+
+ if (i > 0 && !allportsirq)
+ continue;
+
+ ret = gpiochip_irqchip_add(&bgc->gc, &etraxfs_gpio_irq_chip, 0,
+ handle_level_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(dev, "Unable to add irqchip to port %s\n",
+ bgc->gc.label);
+ }
}
return 0;
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index 9bda3727fac1..a3f07537fe62 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -153,6 +153,10 @@ static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
return !!(bgc->read_reg(bgc->reg_dat) & bgc->pin2mask(bgc, gpio));
}
+static void bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+}
+
static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct bgpio_chip *bgc = to_bgpio_chip(gc);
@@ -279,6 +283,12 @@ static int bgpio_simple_dir_in(struct gpio_chip *gc, unsigned int gpio)
return 0;
}
+static int bgpio_dir_out_err(struct gpio_chip *gc, unsigned int gpio,
+ int val)
+{
+ return -EINVAL;
+}
+
static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio,
int val)
{
@@ -302,6 +312,14 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
return 0;
}
+static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct bgpio_chip *bgc = to_bgpio_chip(gc);
+
+ return (bgc->read_reg(bgc->reg_dir) & bgc->pin2mask(bgc, gpio)) ?
+ GPIOF_DIR_OUT : GPIOF_DIR_IN;
+}
+
static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct bgpio_chip *bgc = to_bgpio_chip(gc);
@@ -351,6 +369,14 @@ static int bgpio_dir_out_inv(struct gpio_chip *gc, unsigned int gpio, int val)
return 0;
}
+static int bgpio_get_dir_inv(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct bgpio_chip *bgc = to_bgpio_chip(gc);
+
+ return (bgc->read_reg(bgc->reg_dir) & bgc->pin2mask(bgc, gpio)) ?
+ GPIOF_DIR_IN : GPIOF_DIR_OUT;
+}
+
static int bgpio_setup_accessors(struct device *dev,
struct bgpio_chip *bgc,
bool bit_be,
@@ -444,6 +470,9 @@ static int bgpio_setup_io(struct bgpio_chip *bgc,
bgc->reg_set = set;
bgc->gc.set = bgpio_set_set;
bgc->gc.set_multiple = bgpio_set_multiple_set;
+ } else if (flags & BGPIOF_NO_OUTPUT) {
+ bgc->gc.set = bgpio_set_none;
+ bgc->gc.set_multiple = NULL;
} else {
bgc->gc.set = bgpio_set;
bgc->gc.set_multiple = bgpio_set_multiple;
@@ -460,7 +489,8 @@ static int bgpio_setup_io(struct bgpio_chip *bgc,
static int bgpio_setup_direction(struct bgpio_chip *bgc,
void __iomem *dirout,
- void __iomem *dirin)
+ void __iomem *dirin,
+ unsigned long flags)
{
if (dirout && dirin) {
return -EINVAL;
@@ -468,12 +498,17 @@ static int bgpio_setup_direction(struct bgpio_chip *bgc,
bgc->reg_dir = dirout;
bgc->gc.direction_output = bgpio_dir_out;
bgc->gc.direction_input = bgpio_dir_in;
+ bgc->gc.get_direction = bgpio_get_dir;
} else if (dirin) {
bgc->reg_dir = dirin;
bgc->gc.direction_output = bgpio_dir_out_inv;
bgc->gc.direction_input = bgpio_dir_in_inv;
+ bgc->gc.get_direction = bgpio_get_dir_inv;
} else {
- bgc->gc.direction_output = bgpio_simple_dir_out;
+ if (flags & BGPIOF_NO_OUTPUT)
+ bgc->gc.direction_output = bgpio_dir_out_err;
+ else
+ bgc->gc.direction_output = bgpio_simple_dir_out;
bgc->gc.direction_input = bgpio_simple_dir_in;
}
@@ -525,7 +560,7 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
if (ret)
return ret;
- ret = bgpio_setup_direction(bgc, dirout, dirin);
+ ret = bgpio_setup_direction(bgc, dirout, dirin, flags);
if (ret)
return ret;
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 0a8f7617e72e..801423fe8143 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -104,17 +104,12 @@ static void grgpio_set_imask(struct grgpio_priv *priv, unsigned int offset,
{
struct bgpio_chip *bgc = &priv->bgc;
unsigned long mask = bgc->pin2mask(bgc, offset);
- unsigned long flags;
-
- spin_lock_irqsave(&bgc->lock, flags);
if (val)
priv->imask |= mask;
else
priv->imask &= ~mask;
bgc->write_reg(priv->regs + GRGPIO_IMASK, priv->imask);
-
- spin_unlock_irqrestore(&bgc->lock, flags);
}
static int grgpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -180,16 +175,26 @@ static void grgpio_irq_mask(struct irq_data *d)
{
struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
int offset = d->hwirq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->bgc.lock, flags);
grgpio_set_imask(priv, offset, 0);
+
+ spin_unlock_irqrestore(&priv->bgc.lock, flags);
}
static void grgpio_irq_unmask(struct irq_data *d)
{
struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
int offset = d->hwirq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->bgc.lock, flags);
grgpio_set_imask(priv, offset, 1);
+
+ spin_unlock_irqrestore(&priv->bgc.lock, flags);
}
static struct irq_chip grgpio_irq_chip = {
@@ -281,12 +286,7 @@ static int grgpio_irq_map(struct irq_domain *d, unsigned int irq,
irq_set_chip_data(irq, priv);
irq_set_chip_and_handler(irq, &grgpio_irq_chip,
handle_simple_irq);
- irq_clear_status_flags(irq, IRQ_NOREQUEST);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
irq_set_noprobe(irq);
-#endif
return ret;
}
@@ -301,9 +301,6 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
int ngpio = priv->bgc.gc.ngpio;
int i;
-#ifdef CONFIG_ARM
- set_irq_flags(irq, 0);
-#endif
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index aed4ca9338bc..8c5252c6c327 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -603,6 +603,7 @@ static int max732x_setup_gpio(struct max732x_chip *chip,
gc->base = gpio_start;
gc->ngpio = port;
gc->label = chip->client->name;
+ gc->dev = &chip->client->dev;
gc->owner = THIS_MODULE;
return port;
@@ -684,9 +685,14 @@ static int max732x_probe(struct i2c_client *client,
mutex_init(&chip->lock);
- max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]);
- if (nr_port > 8)
- max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]);
+ ret = max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]);
+ if (ret)
+ goto out_failed;
+ if (nr_port > 8) {
+ ret = max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]);
+ if (ret)
+ goto out_failed;
+ }
ret = gpiochip_add(&chip->gpio_chip);
if (ret)
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 2fc7ff852d16..73db7ecd7ffd 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -507,11 +507,7 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
irq_set_chip_data(irq, mcp);
irq_set_chip(irq, &mcp23s08_irq_chip);
irq_set_nested_thread(irq, true);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
irq_set_noprobe(irq);
-#endif
}
return 0;
}
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 20aa66f34f6e..8ef7a12de983 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -32,7 +32,7 @@
struct mpc8xxx_gpio_chip {
struct of_mm_gpio_chip mm_gc;
- spinlock_t lock;
+ raw_spinlock_t lock;
/*
* shadowed data register to be able to clear/set output pins in
@@ -95,7 +95,7 @@ static void mpc8xxx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
unsigned long flags;
- spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
if (val)
mpc8xxx_gc->data |= mpc8xxx_gpio2mask(gpio);
@@ -104,7 +104,7 @@ static void mpc8xxx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data);
- spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
}
static void mpc8xxx_gpio_set_multiple(struct gpio_chip *gc,
@@ -115,7 +115,7 @@ static void mpc8xxx_gpio_set_multiple(struct gpio_chip *gc,
unsigned long flags;
int i;
- spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
for (i = 0; i < gc->ngpio; i++) {
if (*mask == 0)
@@ -130,7 +130,7 @@ static void mpc8xxx_gpio_set_multiple(struct gpio_chip *gc,
out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data);
- spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
}
static int mpc8xxx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
@@ -139,11 +139,11 @@ static int mpc8xxx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
unsigned long flags;
- spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
clrbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio));
- spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
return 0;
}
@@ -156,11 +156,11 @@ static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val
mpc8xxx_gpio_set(gc, gpio, val);
- spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
setbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio));
- spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
return 0;
}
@@ -174,6 +174,15 @@ static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val
return mpc8xxx_gpio_dir_out(gc, gpio, val);
}
+static int mpc5125_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ /* GPIO 0..3 are input only on MPC5125 */
+ if (gpio <= 3)
+ return -EINVAL;
+
+ return mpc8xxx_gpio_dir_out(gc, gpio, val);
+}
+
static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@@ -206,11 +215,11 @@ static void mpc8xxx_irq_unmask(struct irq_data *d)
struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
unsigned long flags;
- spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
- spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
}
static void mpc8xxx_irq_mask(struct irq_data *d)
@@ -219,11 +228,11 @@ static void mpc8xxx_irq_mask(struct irq_data *d)
struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
unsigned long flags;
- spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
- spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
}
static void mpc8xxx_irq_ack(struct irq_data *d)
@@ -242,17 +251,17 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
switch (flow_type) {
case IRQ_TYPE_EDGE_FALLING:
- spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
setbits32(mm->regs + GPIO_ICR,
mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
- spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
case IRQ_TYPE_EDGE_BOTH:
- spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
clrbits32(mm->regs + GPIO_ICR,
mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
- spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
default:
@@ -282,22 +291,22 @@ static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
switch (flow_type) {
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_LEVEL_LOW:
- spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
clrsetbits_be32(reg, 3 << shift, 2 << shift);
- spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_LEVEL_HIGH:
- spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
clrsetbits_be32(reg, 3 << shift, 1 << shift);
- spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
case IRQ_TYPE_EDGE_BOTH:
- spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
clrbits32(reg, 3 << shift);
- spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
default:
@@ -312,17 +321,13 @@ static struct irq_chip mpc8xxx_irq_chip = {
.irq_unmask = mpc8xxx_irq_unmask,
.irq_mask = mpc8xxx_irq_mask,
.irq_ack = mpc8xxx_irq_ack,
+ /* this might get overwritten in mpc8xxx_probe() */
.irq_set_type = mpc8xxx_irq_set_type,
};
static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq,
irq_hw_number_t hwirq)
{
- struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data;
-
- if (mpc8xxx_gc->of_dev_id_data)
- mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data;
-
irq_set_chip_data(irq, h->host_data);
irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq);
@@ -334,11 +339,38 @@ static const struct irq_domain_ops mpc8xxx_gpio_irq_ops = {
.xlate = irq_domain_xlate_twocell,
};
-static struct of_device_id mpc8xxx_gpio_ids[] = {
+struct mpc8xxx_gpio_devtype {
+ int (*gpio_dir_out)(struct gpio_chip *, unsigned int, int);
+ int (*gpio_get)(struct gpio_chip *, unsigned int);
+ int (*irq_set_type)(struct irq_data *, unsigned int);
+};
+
+static const struct mpc8xxx_gpio_devtype mpc512x_gpio_devtype = {
+ .gpio_dir_out = mpc5121_gpio_dir_out,
+ .irq_set_type = mpc512x_irq_set_type,
+};
+
+static const struct mpc8xxx_gpio_devtype mpc5125_gpio_devtype = {
+ .gpio_dir_out = mpc5125_gpio_dir_out,
+ .irq_set_type = mpc512x_irq_set_type,
+};
+
+static const struct mpc8xxx_gpio_devtype mpc8572_gpio_devtype = {
+ .gpio_get = mpc8572_gpio_get,
+};
+
+static const struct mpc8xxx_gpio_devtype mpc8xxx_gpio_devtype_default = {
+ .gpio_dir_out = mpc8xxx_gpio_dir_out,
+ .gpio_get = mpc8xxx_gpio_get,
+ .irq_set_type = mpc8xxx_irq_set_type,
+};
+
+static const struct of_device_id mpc8xxx_gpio_ids[] = {
{ .compatible = "fsl,mpc8349-gpio", },
- { .compatible = "fsl,mpc8572-gpio", },
+ { .compatible = "fsl,mpc8572-gpio", .data = &mpc8572_gpio_devtype, },
{ .compatible = "fsl,mpc8610-gpio", },
- { .compatible = "fsl,mpc5121-gpio", .data = mpc512x_irq_set_type, },
+ { .compatible = "fsl,mpc5121-gpio", .data = &mpc512x_gpio_devtype, },
+ { .compatible = "fsl,mpc5125-gpio", .data = &mpc5125_gpio_devtype, },
{ .compatible = "fsl,pq3-gpio", },
{ .compatible = "fsl,qoriq-gpio", },
{}
@@ -351,6 +383,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
struct of_mm_gpio_chip *mm_gc;
struct gpio_chip *gc;
const struct of_device_id *id;
+ const struct mpc8xxx_gpio_devtype *devtype =
+ of_device_get_match_data(&pdev->dev);
int ret;
mpc8xxx_gc = devm_kzalloc(&pdev->dev, sizeof(*mpc8xxx_gc), GFP_KERNEL);
@@ -359,7 +393,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mpc8xxx_gc);
- spin_lock_init(&mpc8xxx_gc->lock);
+ raw_spin_lock_init(&mpc8xxx_gc->lock);
mm_gc = &mpc8xxx_gc->mm_gc;
gc = &mm_gc->gc;
@@ -367,10 +401,18 @@ static int mpc8xxx_probe(struct platform_device *pdev)
mm_gc->save_regs = mpc8xxx_gpio_save_regs;
gc->ngpio = MPC8XXX_GPIO_PINS;
gc->direction_input = mpc8xxx_gpio_dir_in;
- gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
- mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
- gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
- mpc8572_gpio_get : mpc8xxx_gpio_get;
+
+ if (!devtype)
+ devtype = &mpc8xxx_gpio_devtype_default;
+
+ /*
+ * It's assumed that only a single type of gpio controller is available
+ * on the current machine, so overwriting global data is fine.
+ */
+ mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
+
+ gc->direction_output = devtype->gpio_dir_out ?: mpc8xxx_gpio_dir_out;
+ gc->get = devtype->gpio_get ?: mpc8xxx_gpio_get;
gc->set = mpc8xxx_gpio_set;
gc->set_multiple = mpc8xxx_gpio_set_multiple;
gc->to_irq = mpc8xxx_gpio_to_irq;
@@ -396,8 +438,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
out_be32(mm_gc->regs + GPIO_IER, 0xffffffff);
out_be32(mm_gc->regs + GPIO_IMR, 0);
- irq_set_handler_data(mpc8xxx_gc->irqn, mpc8xxx_gc);
- irq_set_chained_handler(mpc8xxx_gc->irqn, mpc8xxx_gpio_irq_cascade);
+ irq_set_chained_handler_and_data(mpc8xxx_gc->irqn,
+ mpc8xxx_gpio_irq_cascade, mpc8xxx_gc);
return 0;
}
@@ -407,8 +449,7 @@ static int mpc8xxx_remove(struct platform_device *pdev)
struct mpc8xxx_gpio_chip *mpc8xxx_gc = platform_get_drvdata(pdev);
if (mpc8xxx_gc->irq) {
- irq_set_handler_data(mpc8xxx_gc->irqn, NULL);
- irq_set_chained_handler(mpc8xxx_gc->irqn, NULL);
+ irq_set_chained_handler_and_data(mpc8xxx_gc->irqn, NULL, NULL);
irq_domain_remove(mpc8xxx_gc->irq);
}
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index 52ff18229fdc..d2012cfb5571 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -187,14 +187,6 @@ static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
return irq_create_mapping(domain, offset);
}
-static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq)
-{
- struct irq_data *irq_data = irq_get_irq_data(irq);
-
- return irq_data->hwirq;
-}
-
-
/* For dual-edge interrupts in software, since the hardware has no
* such support:
*
@@ -238,7 +230,7 @@ static void msm_gpio_update_dual_edge_pos(unsigned gpio)
static void msm_gpio_irq_ack(struct irq_data *d)
{
- int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
+ int gpio = d->hwirq;
writel(BIT(INTR_STATUS), GPIO_INTR_STATUS(gpio));
if (test_bit(gpio, msm_gpio.dual_edge_irqs))
@@ -247,8 +239,8 @@ static void msm_gpio_irq_ack(struct irq_data *d)
static void msm_gpio_irq_mask(struct irq_data *d)
{
- int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
unsigned long irq_flags;
+ int gpio = d->hwirq;
spin_lock_irqsave(&tlmm_lock, irq_flags);
writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
@@ -259,8 +251,8 @@ static void msm_gpio_irq_mask(struct irq_data *d)
static void msm_gpio_irq_unmask(struct irq_data *d)
{
- int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
unsigned long irq_flags;
+ int gpio = d->hwirq;
spin_lock_irqsave(&tlmm_lock, irq_flags);
__set_bit(gpio, msm_gpio.enabled_irqs);
@@ -271,8 +263,8 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
- int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
unsigned long irq_flags;
+ int gpio = d->hwirq;
uint32_t bits;
spin_lock_irqsave(&tlmm_lock, irq_flags);
@@ -281,14 +273,14 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
if (flow_type & IRQ_TYPE_EDGE_BOTH) {
bits |= BIT(INTR_DECT_CTL);
- __irq_set_handler_locked(d->irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
__set_bit(gpio, msm_gpio.dual_edge_irqs);
else
__clear_bit(gpio, msm_gpio.dual_edge_irqs);
} else {
bits &= ~BIT(INTR_DECT_CTL);
- __irq_set_handler_locked(d->irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
__clear_bit(gpio, msm_gpio.dual_edge_irqs);
}
@@ -331,7 +323,7 @@ static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc)
static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
{
- int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
+ int gpio = d->hwirq;
if (on) {
if (bitmap_empty(msm_gpio.wake_irqs, MAX_NR_GPIO))
@@ -363,7 +355,6 @@ static int msm_gpio_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_set_lockdep_class(irq, &msm_gpio_lock_class);
irq_set_chip_and_handler(irq, &msm_gpio_irq_chip,
handle_level_irq);
- set_irq_flags(irq, IRQF_VALID);
return 0;
}
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 1a54205860f5..b396bf3bf294 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -458,9 +458,9 @@ static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return 0;
}
-static void mvebu_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void mvebu_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
{
- struct mvebu_gpio_chip *mvchip = irq_get_handler_data(irq);
+ struct mvebu_gpio_chip *mvchip = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
u32 cause, type;
int i;
@@ -787,8 +787,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
if (irq < 0)
continue;
- irq_set_handler_data(irq, mvchip);
- irq_set_chained_handler(irq, mvebu_gpio_irq_handler);
+ irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler,
+ mvchip);
}
mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index ec1eb1b7250f..b752b560126e 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -275,8 +275,8 @@ static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat)
static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc)
{
u32 irq_stat;
- struct mxc_gpio_port *port = irq_get_handler_data(irq);
- struct irq_chip *chip = irq_get_chip(irq);
+ struct mxc_gpio_port *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
chained_irq_enter(chip, desc);
@@ -292,7 +292,7 @@ static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc)
{
u32 irq_msk, irq_stat;
struct mxc_gpio_port *port;
- struct irq_chip *chip = irq_get_chip(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
chained_irq_enter(chip, desc);
@@ -339,7 +339,7 @@ static int gpio_set_wake_irq(struct irq_data *d, u32 enable)
return 0;
}
-static void __init mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
+static void mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
@@ -354,6 +354,7 @@ static void __init mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_set_type = gpio_set_irq_type;
ct->chip.irq_set_wake = gpio_set_wake_irq;
+ ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND;
ct->regs.ack = GPIO_ISR;
ct->regs.mask = GPIO_IMR;
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 551d15d7c369..b7f383eb18d9 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -157,7 +157,7 @@ static void mxs_flip_edge(struct mxs_gpio_port *port, u32 gpio)
static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
{
u32 irq_stat;
- struct mxs_gpio_port *port = irq_get_handler_data(irq);
+ struct mxs_gpio_port *port = irq_desc_get_handler_data(desc);
desc->irq_data.chip->irq_ack(&desc->irq_data);
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index b0c57d505be7..2ae0d47e9554 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -29,6 +29,7 @@
#include <linux/platform_data/gpio-omap.h>
#define OFF_MODE 1
+#define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
static LIST_HEAD(omap_gpio_list);
@@ -57,7 +58,7 @@ struct gpio_bank {
u32 saved_datain;
u32 level_mask;
u32 toggle_mask;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct gpio_chip chip;
struct clk *dbck;
u32 mod_usage;
@@ -175,7 +176,7 @@ static inline void omap_gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set
static inline void omap_gpio_dbck_enable(struct gpio_bank *bank)
{
if (bank->dbck_enable_mask && !bank->dbck_enabled) {
- clk_prepare_enable(bank->dbck);
+ clk_enable(bank->dbck);
bank->dbck_enabled = true;
writel_relaxed(bank->dbck_enable_mask,
@@ -193,7 +194,7 @@ static inline void omap_gpio_dbck_disable(struct gpio_bank *bank)
*/
writel_relaxed(0, bank->base + bank->regs->debounce_en);
- clk_disable_unprepare(bank->dbck);
+ clk_disable(bank->dbck);
bank->dbck_enabled = false;
}
}
@@ -204,8 +205,9 @@ static inline void omap_gpio_dbck_disable(struct gpio_bank *bank)
* @offset: the gpio number on this @bank
* @debounce: debounce time to use
*
- * OMAP's debounce time is in 31us steps so we need
- * to convert and round up to the closest unit.
+ * OMAP's debounce time is in 31us steps
+ * <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31
+ * so we need to convert and round up to the closest unit.
*/
static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
unsigned debounce)
@@ -213,34 +215,33 @@ static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
void __iomem *reg;
u32 val;
u32 l;
+ bool enable = !!debounce;
if (!bank->dbck_flag)
return;
- if (debounce < 32)
- debounce = 0x01;
- else if (debounce > 7936)
- debounce = 0xff;
- else
- debounce = (debounce / 0x1f) - 1;
+ if (enable) {
+ debounce = DIV_ROUND_UP(debounce, 31) - 1;
+ debounce &= OMAP4_GPIO_DEBOUNCINGTIME_MASK;
+ }
l = BIT(offset);
- clk_prepare_enable(bank->dbck);
+ clk_enable(bank->dbck);
reg = bank->base + bank->regs->debounce;
writel_relaxed(debounce, reg);
reg = bank->base + bank->regs->debounce_en;
val = readl_relaxed(reg);
- if (debounce)
+ if (enable)
val |= l;
else
val &= ~l;
bank->dbck_enable_mask = val;
writel_relaxed(val, reg);
- clk_disable_unprepare(bank->dbck);
+ clk_disable(bank->dbck);
/*
* Enable debounce clock per module.
* This call is mandatory because in omap_gpio_request() when
@@ -285,7 +286,7 @@ static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset)
bank->context.debounce = 0;
writel_relaxed(bank->context.debounce, bank->base +
bank->regs->debounce);
- clk_disable_unprepare(bank->dbck);
+ clk_disable(bank->dbck);
bank->dbck_enabled = false;
}
}
@@ -498,22 +499,24 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
if (!BANK_USED(bank))
pm_runtime_get_sync(bank->dev);
- spin_lock_irqsave(&bank->lock, flags);
+ raw_spin_lock_irqsave(&bank->lock, flags);
retval = omap_set_gpio_triggering(bank, offset, type);
- if (retval)
+ if (retval) {
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
goto error;
+ }
omap_gpio_init_irq(bank, offset);
if (!omap_gpio_is_input(bank, offset)) {
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
retval = -EINVAL;
goto error;
}
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
- __irq_set_handler_locked(d->irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
- __irq_set_handler_locked(d->irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
return 0;
@@ -634,14 +637,14 @@ static int omap_set_gpio_wakeup(struct gpio_bank *bank, unsigned offset,
return -EINVAL;
}
- spin_lock_irqsave(&bank->lock, flags);
+ raw_spin_lock_irqsave(&bank->lock, flags);
if (enable)
bank->context.wake_en |= gpio_bit;
else
bank->context.wake_en &= ~gpio_bit;
writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en);
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
return 0;
}
@@ -667,10 +670,10 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
if (!BANK_USED(bank))
pm_runtime_get_sync(bank->dev);
- spin_lock_irqsave(&bank->lock, flags);
+ raw_spin_lock_irqsave(&bank->lock, flags);
omap_enable_gpio_module(bank, offset);
bank->mod_usage |= BIT(offset);
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
return 0;
}
@@ -680,14 +683,14 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
unsigned long flags;
- spin_lock_irqsave(&bank->lock, flags);
+ raw_spin_lock_irqsave(&bank->lock, flags);
bank->mod_usage &= ~(BIT(offset));
if (!LINE_USED(bank->irq_usage, offset)) {
omap_set_gpio_direction(bank, offset, 1);
omap_clear_gpio_debounce(bank, offset);
}
omap_disable_gpio_module(bank, offset);
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
/*
* If this is the last gpio to be freed in the bank,
@@ -714,7 +717,8 @@ static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
struct gpio_bank *bank;
int unmasked = 0;
struct irq_chip *irqchip = irq_desc_get_chip(desc);
- struct gpio_chip *chip = irq_get_handler_data(irq);
+ struct gpio_chip *chip = irq_desc_get_handler_data(desc);
+ unsigned long lock_flags;
chained_irq_enter(irqchip, desc);
@@ -729,6 +733,8 @@ static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
u32 isr_saved, level_mask = 0;
u32 enabled;
+ raw_spin_lock_irqsave(&bank->lock, lock_flags);
+
enabled = omap_get_gpio_irqbank_mask(bank);
isr_saved = isr = readl_relaxed(isr_reg) & enabled;
@@ -742,6 +748,8 @@ static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
+ raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
+
/* if there is only edge sensitive GPIO pin interrupts
configured, we could unmask GPIO bank interrupt immediately */
if (!level_mask && !unmasked) {
@@ -756,6 +764,7 @@ static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
bit = __ffs(isr);
isr &= ~(BIT(bit));
+ raw_spin_lock_irqsave(&bank->lock, lock_flags);
/*
* Some chips can't respond to both rising and falling
* at the same time. If this irq was requested with
@@ -766,6 +775,8 @@ static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
if (bank->toggle_mask & (BIT(bit)))
omap_toggle_gpio_edge_triggering(bank, bit);
+ raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
+
generic_handle_irq(irq_find_mapping(bank->chip.irqdomain,
bit));
}
@@ -789,7 +800,7 @@ static unsigned int omap_gpio_irq_startup(struct irq_data *d)
if (!BANK_USED(bank))
pm_runtime_get_sync(bank->dev);
- spin_lock_irqsave(&bank->lock, flags);
+ raw_spin_lock_irqsave(&bank->lock, flags);
if (!LINE_USED(bank->mod_usage, offset))
omap_set_gpio_direction(bank, offset, 1);
@@ -798,12 +809,12 @@ static unsigned int omap_gpio_irq_startup(struct irq_data *d)
omap_enable_gpio_module(bank, offset);
bank->irq_usage |= BIT(offset);
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
omap_gpio_unmask_irq(d);
return 0;
err:
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
if (!BANK_USED(bank))
pm_runtime_put(bank->dev);
return -EINVAL;
@@ -815,7 +826,7 @@ static void omap_gpio_irq_shutdown(struct irq_data *d)
unsigned long flags;
unsigned offset = d->hwirq;
- spin_lock_irqsave(&bank->lock, flags);
+ raw_spin_lock_irqsave(&bank->lock, flags);
bank->irq_usage &= ~(BIT(offset));
omap_set_gpio_irqenable(bank, offset, 0);
omap_clear_gpio_irqstatus(bank, offset);
@@ -823,7 +834,7 @@ static void omap_gpio_irq_shutdown(struct irq_data *d)
if (!LINE_USED(bank->mod_usage, offset))
omap_clear_gpio_debounce(bank, offset);
omap_disable_gpio_module(bank, offset);
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
/*
* If this is the last IRQ to be freed in the bank,
@@ -847,10 +858,10 @@ static void omap_gpio_mask_irq(struct irq_data *d)
unsigned offset = d->hwirq;
unsigned long flags;
- spin_lock_irqsave(&bank->lock, flags);
+ raw_spin_lock_irqsave(&bank->lock, flags);
omap_set_gpio_irqenable(bank, offset, 0);
omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
}
static void omap_gpio_unmask_irq(struct irq_data *d)
@@ -860,7 +871,7 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
u32 trigger = irqd_get_trigger_type(d);
unsigned long flags;
- spin_lock_irqsave(&bank->lock, flags);
+ raw_spin_lock_irqsave(&bank->lock, flags);
if (trigger)
omap_set_gpio_triggering(bank, offset, trigger);
@@ -872,7 +883,7 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
}
omap_set_gpio_irqenable(bank, offset, 1);
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
}
/*---------------------------------------------------------------------*/
@@ -885,9 +896,9 @@ static int omap_mpuio_suspend_noirq(struct device *dev)
OMAP_MPUIO_GPIO_MASKIT / bank->stride;
unsigned long flags;
- spin_lock_irqsave(&bank->lock, flags);
+ raw_spin_lock_irqsave(&bank->lock, flags);
writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg);
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
return 0;
}
@@ -900,9 +911,9 @@ static int omap_mpuio_resume_noirq(struct device *dev)
OMAP_MPUIO_GPIO_MASKIT / bank->stride;
unsigned long flags;
- spin_lock_irqsave(&bank->lock, flags);
+ raw_spin_lock_irqsave(&bank->lock, flags);
writel_relaxed(bank->context.wake_en, mask_reg);
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
return 0;
}
@@ -948,9 +959,9 @@ static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
bank = container_of(chip, struct gpio_bank, chip);
reg = bank->base + bank->regs->direction;
- spin_lock_irqsave(&bank->lock, flags);
+ raw_spin_lock_irqsave(&bank->lock, flags);
dir = !!(readl_relaxed(reg) & BIT(offset));
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
return dir;
}
@@ -960,9 +971,9 @@ static int omap_gpio_input(struct gpio_chip *chip, unsigned offset)
unsigned long flags;
bank = container_of(chip, struct gpio_bank, chip);
- spin_lock_irqsave(&bank->lock, flags);
+ raw_spin_lock_irqsave(&bank->lock, flags);
omap_set_gpio_direction(bank, offset, 1);
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
return 0;
}
@@ -984,10 +995,10 @@ static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value)
unsigned long flags;
bank = container_of(chip, struct gpio_bank, chip);
- spin_lock_irqsave(&bank->lock, flags);
+ raw_spin_lock_irqsave(&bank->lock, flags);
bank->set_dataout(bank, offset, value);
omap_set_gpio_direction(bank, offset, 0);
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
return 0;
}
@@ -999,9 +1010,9 @@ static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset,
bank = container_of(chip, struct gpio_bank, chip);
- spin_lock_irqsave(&bank->lock, flags);
+ raw_spin_lock_irqsave(&bank->lock, flags);
omap2_set_gpio_debounce(bank, offset, debounce);
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
return 0;
}
@@ -1012,9 +1023,9 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
unsigned long flags;
bank = container_of(chip, struct gpio_bank, chip);
- spin_lock_irqsave(&bank->lock, flags);
+ raw_spin_lock_irqsave(&bank->lock, flags);
bank->set_dataout(bank, offset, value);
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
}
/*---------------------------------------------------------------------*/
@@ -1059,10 +1070,6 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
/* Initialize interface clk ungated, module enabled */
if (bank->regs->ctrl)
writel_relaxed(0, base + bank->regs->ctrl);
-
- bank->dbck = clk_get(bank->dev, "dbclk");
- if (IS_ERR(bank->dbck))
- dev_err(bank->dev, "Could not get gpio dbck\n");
}
static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
@@ -1176,15 +1183,19 @@ static int omap_gpio_probe(struct platform_device *pdev)
irqc->irq_set_wake = omap_gpio_wake_enable,
irqc->name = dev_name(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (unlikely(!res)) {
- dev_err(dev, "Invalid IRQ resource\n");
- return -ENODEV;
+ bank->irq = platform_get_irq(pdev, 0);
+ if (bank->irq <= 0) {
+ if (!bank->irq)
+ bank->irq = -ENXIO;
+ if (bank->irq != -EPROBE_DEFER)
+ dev_err(dev,
+ "can't get irq resource ret=%d\n", bank->irq);
+ return bank->irq;
}
- bank->irq = res->start;
bank->dev = dev;
bank->chip.dev = dev;
+ bank->chip.owner = THIS_MODULE;
bank->dbck_flag = pdata->dbck_flag;
bank->stride = pdata->bank_stride;
bank->width = pdata->bank_width;
@@ -1210,16 +1221,26 @@ static int omap_gpio_probe(struct platform_device *pdev)
else
bank->set_dataout = omap_set_gpio_dataout_mask;
- spin_lock_init(&bank->lock);
+ raw_spin_lock_init(&bank->lock);
/* Static mapping, never released */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
bank->base = devm_ioremap_resource(dev, res);
if (IS_ERR(bank->base)) {
- irq_domain_remove(bank->chip.irqdomain);
return PTR_ERR(bank->base);
}
+ if (bank->dbck_flag) {
+ bank->dbck = devm_clk_get(bank->dev, "dbclk");
+ if (IS_ERR(bank->dbck)) {
+ dev_err(bank->dev,
+ "Could not get gpio dbck. Disable debounce\n");
+ bank->dbck_flag = false;
+ } else {
+ clk_prepare(bank->dbck);
+ }
+ }
+
platform_set_drvdata(pdev, bank);
pm_runtime_enable(bank->dev);
@@ -1251,6 +1272,8 @@ static int omap_gpio_remove(struct platform_device *pdev)
list_del(&bank->node);
gpiochip_remove(&bank->chip);
pm_runtime_disable(bank->dev);
+ if (bank->dbck_flag)
+ clk_unprepare(bank->dbck);
return 0;
}
@@ -1268,7 +1291,7 @@ static int omap_gpio_runtime_suspend(struct device *dev)
unsigned long flags;
u32 wake_low, wake_hi;
- spin_lock_irqsave(&bank->lock, flags);
+ raw_spin_lock_irqsave(&bank->lock, flags);
/*
* Only edges can generate a wakeup event to the PRCM.
@@ -1321,7 +1344,7 @@ update_gpio_context_count:
bank->get_context_loss_count(bank->dev);
omap_gpio_dbck_disable(bank);
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
return 0;
}
@@ -1336,7 +1359,7 @@ static int omap_gpio_runtime_resume(struct device *dev)
unsigned long flags;
int c;
- spin_lock_irqsave(&bank->lock, flags);
+ raw_spin_lock_irqsave(&bank->lock, flags);
/*
* On the first resume during the probe, the context has not
@@ -1372,14 +1395,14 @@ static int omap_gpio_runtime_resume(struct device *dev)
if (c != bank->context_loss_count) {
omap_gpio_restore_context(bank);
} else {
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
return 0;
}
}
}
if (!bank->workaround_enabled) {
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
return 0;
}
@@ -1434,7 +1457,7 @@ static int omap_gpio_runtime_resume(struct device *dev)
}
bank->workaround_enabled = false;
- spin_unlock_irqrestore(&bank->lock, flags);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
return 0;
}
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index d233eb3b8132..50caeb1ee350 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -570,6 +570,10 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
"could not connect irqchip to gpiochip\n");
return ret;
}
+
+ gpiochip_set_chained_irqchip(&chip->gpio_chip,
+ &pca953x_irq_chip,
+ client->irq, NULL);
}
return 0;
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 404f3c61ef9b..1d4d9bc8b69d 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -88,7 +88,6 @@ struct pcf857x {
struct gpio_chip chip;
struct i2c_client *client;
struct mutex lock; /* protect 'out' */
- spinlock_t slock; /* protect irq demux */
unsigned out; /* software latch */
unsigned status; /* current status */
unsigned int irq_parent;
@@ -185,23 +184,21 @@ static void pcf857x_set(struct gpio_chip *chip, unsigned offset, int value)
static irqreturn_t pcf857x_irq(int irq, void *data)
{
struct pcf857x *gpio = data;
- unsigned long change, i, status, flags;
+ unsigned long change, i, status;
status = gpio->read(gpio->client);
- spin_lock_irqsave(&gpio->slock, flags);
-
/*
* call the interrupt handler iff gpio is used as
* interrupt source, just to avoid bad irqs
*/
-
+ mutex_lock(&gpio->lock);
change = (gpio->status ^ status) & gpio->irq_enabled;
- for_each_set_bit(i, &change, gpio->chip.ngpio)
- handle_nested_irq(irq_find_mapping(gpio->chip.irqdomain, i));
gpio->status = status;
+ mutex_unlock(&gpio->lock);
- spin_unlock_irqrestore(&gpio->slock, flags);
+ for_each_set_bit(i, &change, gpio->chip.ngpio)
+ handle_nested_irq(irq_find_mapping(gpio->chip.irqdomain, i));
return IRQ_HANDLED;
}
@@ -293,7 +290,6 @@ static int pcf857x_probe(struct i2c_client *client,
return -ENOMEM;
mutex_init(&gpio->lock);
- spin_lock_init(&gpio->slock);
gpio->chip.base = pdata ? pdata->gpio_base : -1;
gpio->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 2d9a950ca2d4..34ed176df15a 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -281,9 +281,9 @@ static int pch_irq_type(struct irq_data *d, unsigned int type)
/* And the handler */
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
- __irq_set_handler_locked(d->irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
- __irq_set_handler_locked(d->irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
unlock:
spin_unlock_irqrestore(&chip->spinlock, flags);
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index cdbbcf0faf9d..55a11de3d5b7 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -524,7 +524,7 @@ static int pxa_irq_domain_map(struct irq_domain *d, unsigned int irq,
{
irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
handle_edge_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_set_noprobe(irq);
return 0;
}
@@ -643,20 +643,20 @@ static int pxa_gpio_probe(struct platform_device *pdev)
irq = gpio_to_irq(0);
irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
handle_edge_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
}
if (irq1 > 0) {
irq = gpio_to_irq(1);
irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
handle_edge_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
}
for (irq = gpio_to_irq(gpio_offset);
irq <= gpio_to_irq(pxa_last_gpio); irq++) {
irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
handle_edge_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
}
}
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 1e14a6c74ed1..2a8122444614 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -251,17 +251,32 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
{
- return pinctrl_request_gpio(chip->base + offset);
+ struct gpio_rcar_priv *p = gpio_to_priv(chip);
+ int error;
+
+ error = pm_runtime_get_sync(&p->pdev->dev);
+ if (error < 0)
+ return error;
+
+ error = pinctrl_request_gpio(chip->base + offset);
+ if (error)
+ pm_runtime_put(&p->pdev->dev);
+
+ return error;
}
static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
{
+ struct gpio_rcar_priv *p = gpio_to_priv(chip);
+
pinctrl_free_gpio(chip->base + offset);
/* Set the GPIO as an input to ensure that the next GPIO request won't
* drive the GPIO pin as an output.
*/
gpio_rcar_config_general_input_output_mode(chip, offset, false);
+
+ pm_runtime_put(&p->pdev->dev);
}
static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -327,6 +342,10 @@ static const struct of_device_id gpio_rcar_of_table[] = {
.compatible = "renesas,gpio-r8a7794",
.data = &gpio_rcar_info_gen2,
}, {
+ .compatible = "renesas,gpio-r8a7795",
+ /* Gen3 GPIO is identical to Gen2. */
+ .data = &gpio_rcar_info_gen2,
+ }, {
.compatible = "renesas,gpio-rcar",
.data = &gpio_rcar_info_gen1,
}, {
@@ -405,7 +424,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)
}
pm_runtime_enable(dev);
- pm_runtime_get_sync(dev);
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -487,7 +505,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)
err1:
gpiochip_remove(gpio_chip);
err0:
- pm_runtime_put(dev);
pm_runtime_disable(dev);
return ret;
}
@@ -498,7 +515,6 @@ static int gpio_rcar_remove(struct platform_device *pdev)
gpiochip_remove(&p->gpio_chip);
- pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 3fa22dade243..67bd2f5d89e8 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -155,7 +155,7 @@ static int sa1100_gpio_irqdomain_map(struct irq_domain *d,
{
irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip,
handle_edge_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_set_noprobe(irq);
return 0;
}
@@ -173,9 +173,9 @@ static struct irq_domain *sa1100_gpio_irqdomain;
* and call the handler.
*/
static void
-sa1100_gpio_handler(unsigned int irq, struct irq_desc *desc)
+sa1100_gpio_handler(unsigned int __irq, struct irq_desc *desc)
{
- unsigned int mask;
+ unsigned int irq, mask;
mask = GEDR;
do {
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 18579ac65b2b..55e47828ddfc 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -346,7 +346,7 @@ static void gsta_alloc_irq_chip(struct gsta_gpio *chip)
i = chip->irq_base + j;
irq_set_chip_and_handler(i, &ct->chip, ct->handler);
irq_set_chip_data(i, gc);
- irq_modify_status(i, IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+ irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE);
}
gc->irq_cnt = i - gc->irq_base;
}
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 31b244cffabb..d1d585ddb9ab 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -102,7 +102,7 @@ static struct gpio_chip template_chip = {
static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
+ struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc);
int offset = d->hwirq;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -130,7 +130,7 @@ static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type)
static void tc3589x_gpio_irq_lock(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
+ struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc);
mutex_lock(&tc3589x_gpio->irq_lock);
}
@@ -138,7 +138,7 @@ static void tc3589x_gpio_irq_lock(struct irq_data *d)
static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
+ struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc);
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
static const u8 regmap[] = {
[REG_IBE] = TC3589x_GPIOIBE0,
@@ -167,7 +167,7 @@ static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d)
static void tc3589x_gpio_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
+ struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc);
int offset = d->hwirq;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -178,7 +178,7 @@ static void tc3589x_gpio_irq_mask(struct irq_data *d)
static void tc3589x_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
+ struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc);
int offset = d->hwirq;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 9b25c90f725c..9b14aafb576d 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -252,9 +252,9 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
tegra_gpio_enable(gpio);
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
- __irq_set_handler_locked(d->irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
- __irq_set_handler_locked(d->irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
return 0;
}
@@ -268,16 +268,14 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d)
static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
{
- struct tegra_gpio_bank *bank;
int port;
int pin;
int unmasked = 0;
struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct tegra_gpio_bank *bank = irq_desc_get_handler_data(desc);
chained_irq_enter(chip, desc);
- bank = irq_get_handler_data(irq);
-
for (port = 0; port < 4; port++) {
int gpio = tegra_gpio_compose(bank->bank, port, 0);
unsigned long sta = tegra_gpio_readl(GPIO_INT_STA(gpio)) &
@@ -509,7 +507,6 @@ static int tegra_gpio_probe(struct platform_device *pdev)
irq_set_chip_data(irq, bank);
irq_set_chip_and_handler(irq, &tegra_gpio_irq_chip,
handle_simple_irq);
- set_irq_flags(irq, IRQF_VALID);
}
for (i = 0; i < tegra_gpio_bank_count; i++) {
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index e8f97e03c9bb..5a492054589f 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -194,11 +194,12 @@ out:
static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
{
- struct timbgpio *tgpio = irq_get_handler_data(irq);
+ struct timbgpio *tgpio = irq_desc_get_handler_data(desc);
+ struct irq_data *data = irq_desc_get_irq_data(desc);
unsigned long ipr;
int offset;
- desc->irq_data.chip->irq_ack(irq_get_irq_data(irq));
+ data->chip->irq_ack(data);
ipr = ioread32(tgpio->membase + TGPIO_IPR);
iowrite32(ipr, tgpio->membase + TGPIO_ICR);
@@ -294,13 +295,10 @@ static int timbgpio_probe(struct platform_device *pdev)
irq_set_chip_and_handler(tgpio->irq_base + i,
&timbgpio_irqchip, handle_simple_irq);
irq_set_chip_data(tgpio->irq_base + i, tgpio);
-#ifdef CONFIG_ARM
- set_irq_flags(tgpio->irq_base + i, IRQF_VALID | IRQF_PROBE);
-#endif
+ irq_clear_status_flags(tgpio->irq_base + i, IRQ_NOREQUEST | IRQ_NOPROBE);
}
- irq_set_handler_data(irq, tgpio);
- irq_set_chained_handler(irq, timbgpio_irq);
+ irq_set_chained_handler_and_data(irq, timbgpio_irq, tgpio);
return 0;
}
diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c
index 445660adc898..bbac92ae4c32 100644
--- a/drivers/gpio/gpio-tz1090.c
+++ b/drivers/gpio/gpio-tz1090.c
@@ -510,8 +510,8 @@ static int tz1090_gpio_bank_probe(struct tz1090_gpio_bank_info *info)
gc->chip_types[1].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
/* Setup chained handler for this GPIO bank */
- irq_set_handler_data(bank->irq, bank);
- irq_set_chained_handler(bank->irq, tz1090_gpio_irq_handler);
+ irq_set_chained_handler_and_data(bank->irq, tz1090_gpio_irq_handler,
+ bank);
return 0;
}
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 7bd9f209ffa8..3d5714d4f405 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -60,6 +60,8 @@ struct vf610_gpio_port {
#define PORT_INT_EITHER_EDGE 0xb
#define PORT_INT_LOGIC_ONE 0xc
+static struct irq_chip vf610_gpio_irq_chip;
+
static const struct of_device_id vf610_gpio_dt_ids[] = {
{ .compatible = "fsl,vf610-gpio" },
{ /* sentinel */ }
@@ -120,7 +122,7 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
static void vf610_gpio_irq_handler(u32 irq, struct irq_desc *desc)
{
- struct vf610_gpio_port *port = irq_get_handler_data(irq);
+ struct vf610_gpio_port *port = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
int pin;
unsigned long irq_isfr;
@@ -173,6 +175,11 @@ static int vf610_gpio_irq_set_type(struct irq_data *d, u32 type)
port->irqc[d->hwirq] = irqc;
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ else
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+
return 0;
}
@@ -263,7 +270,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
vf610_gpio_writel(~0, port->base + PORT_ISFR);
ret = gpiochip_irqchip_add(gc, &vf610_gpio_irq_chip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
+ handle_edge_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(dev, "failed to add irqchip\n");
gpiochip_remove(gc);
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 77fe5d3cb105..d5284dfe01fe 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -220,9 +220,9 @@ static void xgpio_save_regs(struct of_mm_gpio_chip *mm_gc)
if (!chip->gpio_width[1])
return;
- xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET + XGPIO_TRI_OFFSET,
+ xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET + XGPIO_CHANNEL_OFFSET,
chip->gpio_state[1]);
- xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET + XGPIO_TRI_OFFSET,
+ xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET + XGPIO_CHANNEL_OFFSET,
chip->gpio_dir[1]);
}
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index 9bdab7203d65..e02499a15e72 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -387,7 +387,7 @@ static int xlp_gpio_probe(struct platform_device *pdev)
irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0);
if (irq_base < 0) {
dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n");
- return err;
+ return -ENODEV;
}
err = gpiochip_add(gc);
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
new file mode 100644
index 000000000000..12ee1969298c
--- /dev/null
+++ b/drivers/gpio/gpio-zx.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) 2015 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define ZX_GPIO_DIR 0x00
+#define ZX_GPIO_IVE 0x04
+#define ZX_GPIO_IV 0x08
+#define ZX_GPIO_IEP 0x0C
+#define ZX_GPIO_IEN 0x10
+#define ZX_GPIO_DI 0x14
+#define ZX_GPIO_DO1 0x18
+#define ZX_GPIO_DO0 0x1C
+#define ZX_GPIO_DO 0x20
+
+#define ZX_GPIO_IM 0x28
+#define ZX_GPIO_IE 0x2C
+
+#define ZX_GPIO_MIS 0x30
+#define ZX_GPIO_IC 0x34
+
+#define ZX_GPIO_NR 16
+
+struct zx_gpio {
+ spinlock_t lock;
+
+ void __iomem *base;
+ struct gpio_chip gc;
+ bool uses_pinctrl;
+};
+
+static inline struct zx_gpio *to_zx(struct gpio_chip *gc)
+{
+ return container_of(gc, struct zx_gpio, gc);
+}
+
+static int zx_gpio_request(struct gpio_chip *gc, unsigned offset)
+{
+ struct zx_gpio *chip = to_zx(gc);
+ int gpio = gc->base + offset;
+
+ if (chip->uses_pinctrl)
+ return pinctrl_request_gpio(gpio);
+ return 0;
+}
+
+static void zx_gpio_free(struct gpio_chip *gc, unsigned offset)
+{
+ struct zx_gpio *chip = to_zx(gc);
+ int gpio = gc->base + offset;
+
+ if (chip->uses_pinctrl)
+ pinctrl_free_gpio(gpio);
+}
+
+static int zx_direction_input(struct gpio_chip *gc, unsigned offset)
+{
+ struct zx_gpio *chip = to_zx(gc);
+ unsigned long flags;
+ u16 gpiodir;
+
+ if (offset >= gc->ngpio)
+ return -EINVAL;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ gpiodir = readw_relaxed(chip->base + ZX_GPIO_DIR);
+ gpiodir &= ~BIT(offset);
+ writew_relaxed(gpiodir, chip->base + ZX_GPIO_DIR);
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ return 0;
+}
+
+static int zx_direction_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct zx_gpio *chip = to_zx(gc);
+ unsigned long flags;
+ u16 gpiodir;
+
+ if (offset >= gc->ngpio)
+ return -EINVAL;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ gpiodir = readw_relaxed(chip->base + ZX_GPIO_DIR);
+ gpiodir |= BIT(offset);
+ writew_relaxed(gpiodir, chip->base + ZX_GPIO_DIR);
+
+ if (value)
+ writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO1);
+ else
+ writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO0);
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ return 0;
+}
+
+static int zx_get_value(struct gpio_chip *gc, unsigned offset)
+{
+ struct zx_gpio *chip = to_zx(gc);
+
+ return !!(readw_relaxed(chip->base + ZX_GPIO_DI) & BIT(offset));
+}
+
+static void zx_set_value(struct gpio_chip *gc, unsigned offset, int value)
+{
+ struct zx_gpio *chip = to_zx(gc);
+
+ if (value)
+ writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO1);
+ else
+ writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO0);
+}
+
+static int zx_irq_type(struct irq_data *d, unsigned trigger)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct zx_gpio *chip = to_zx(gc);
+ int offset = irqd_to_hwirq(d);
+ unsigned long flags;
+ u16 gpiois, gpioi_epos, gpioi_eneg, gpioiev;
+ u16 bit = BIT(offset);
+
+ if (offset < 0 || offset >= ZX_GPIO_NR)
+ return -EINVAL;
+
+ spin_lock_irqsave(&chip->lock, flags);
+
+ gpioiev = readw_relaxed(chip->base + ZX_GPIO_IV);
+ gpiois = readw_relaxed(chip->base + ZX_GPIO_IVE);
+ gpioi_epos = readw_relaxed(chip->base + ZX_GPIO_IEP);
+ gpioi_eneg = readw_relaxed(chip->base + ZX_GPIO_IEN);
+
+ if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
+ gpiois |= bit;
+ if (trigger & IRQ_TYPE_LEVEL_HIGH)
+ gpioiev |= bit;
+ else
+ gpioiev &= ~bit;
+ } else
+ gpiois &= ~bit;
+
+ if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
+ gpioi_epos |= bit;
+ gpioi_eneg |= bit;
+ } else {
+ if (trigger & IRQ_TYPE_EDGE_RISING) {
+ gpioi_epos |= bit;
+ gpioi_eneg &= ~bit;
+ } else if (trigger & IRQ_TYPE_EDGE_FALLING) {
+ gpioi_eneg |= bit;
+ gpioi_epos &= ~bit;
+ }
+ }
+
+ writew_relaxed(gpiois, chip->base + ZX_GPIO_IVE);
+ writew_relaxed(gpioi_epos, chip->base + ZX_GPIO_IEP);
+ writew_relaxed(gpioi_eneg, chip->base + ZX_GPIO_IEN);
+ writew_relaxed(gpioiev, chip->base + ZX_GPIO_IV);
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ return 0;
+}
+
+static void zx_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ unsigned long pending;
+ int offset;
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct zx_gpio *chip = to_zx(gc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(irqchip, desc);
+
+ pending = readw_relaxed(chip->base + ZX_GPIO_MIS);
+ writew_relaxed(pending, chip->base + ZX_GPIO_IC);
+ if (pending) {
+ for_each_set_bit(offset, &pending, ZX_GPIO_NR)
+ generic_handle_irq(irq_find_mapping(gc->irqdomain,
+ offset));
+ }
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static void zx_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct zx_gpio *chip = to_zx(gc);
+ u16 mask = BIT(irqd_to_hwirq(d) % ZX_GPIO_NR);
+ u16 gpioie;
+
+ spin_lock(&chip->lock);
+ gpioie = readw_relaxed(chip->base + ZX_GPIO_IM) | mask;
+ writew_relaxed(gpioie, chip->base + ZX_GPIO_IM);
+ gpioie = readw_relaxed(chip->base + ZX_GPIO_IE) & ~mask;
+ writew_relaxed(gpioie, chip->base + ZX_GPIO_IE);
+ spin_unlock(&chip->lock);
+}
+
+static void zx_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct zx_gpio *chip = to_zx(gc);
+ u16 mask = BIT(irqd_to_hwirq(d) % ZX_GPIO_NR);
+ u16 gpioie;
+
+ spin_lock(&chip->lock);
+ gpioie = readw_relaxed(chip->base + ZX_GPIO_IM) & ~mask;
+ writew_relaxed(gpioie, chip->base + ZX_GPIO_IM);
+ gpioie = readw_relaxed(chip->base + ZX_GPIO_IE) | mask;
+ writew_relaxed(gpioie, chip->base + ZX_GPIO_IE);
+ spin_unlock(&chip->lock);
+}
+
+static struct irq_chip zx_irqchip = {
+ .name = "zx-gpio",
+ .irq_mask = zx_irq_mask,
+ .irq_unmask = zx_irq_unmask,
+ .irq_set_type = zx_irq_type,
+};
+
+static int zx_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct zx_gpio *chip;
+ struct resource *res;
+ int irq, id, ret;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ chip->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(chip->base))
+ return PTR_ERR(chip->base);
+
+ spin_lock_init(&chip->lock);
+ if (of_property_read_bool(dev->of_node, "gpio-ranges"))
+ chip->uses_pinctrl = true;
+
+ id = of_alias_get_id(dev->of_node, "gpio");
+ chip->gc.request = zx_gpio_request;
+ chip->gc.free = zx_gpio_free;
+ chip->gc.direction_input = zx_direction_input;
+ chip->gc.direction_output = zx_direction_output;
+ chip->gc.get = zx_get_value;
+ chip->gc.set = zx_set_value;
+ chip->gc.base = ZX_GPIO_NR * id;
+ chip->gc.ngpio = ZX_GPIO_NR;
+ chip->gc.label = dev_name(dev);
+ chip->gc.dev = dev;
+ chip->gc.owner = THIS_MODULE;
+
+ ret = gpiochip_add(&chip->gc);
+ if (ret)
+ return ret;
+
+ /*
+ * irq_chip support
+ */
+ writew_relaxed(0xffff, chip->base + ZX_GPIO_IM);
+ writew_relaxed(0, chip->base + ZX_GPIO_IE);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "invalid IRQ\n");
+ gpiochip_remove(&chip->gc);
+ return -ENODEV;
+ }
+
+ ret = gpiochip_irqchip_add(&chip->gc, &zx_irqchip,
+ 0, handle_simple_irq,
+ IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(dev, "could not add irqchip\n");
+ gpiochip_remove(&chip->gc);
+ return ret;
+ }
+ gpiochip_set_chained_irqchip(&chip->gc, &zx_irqchip,
+ irq, zx_irq_handler);
+
+ platform_set_drvdata(pdev, chip);
+ dev_info(dev, "ZX GPIO chip registered\n");
+
+ return 0;
+}
+
+static const struct of_device_id zx_gpio_match[] = {
+ {
+ .compatible = "zte,zx296702-gpio",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, zx_gpio_match);
+
+static struct platform_driver zx_gpio_driver = {
+ .probe = zx_gpio_probe,
+ .driver = {
+ .name = "zx_gpio",
+ .of_match_table = of_match_ptr(zx_gpio_match),
+ },
+};
+
+module_platform_driver(zx_gpio_driver)
+
+MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
+MODULE_DESCRIPTION("ZTE ZX296702 GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 2e87c4b8da26..27348e7cb705 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -441,10 +441,10 @@ static int zynq_gpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num));
if (type & IRQ_TYPE_LEVEL_MASK) {
- __irq_set_chip_handler_name_locked(irq_data->irq,
+ irq_set_chip_handler_name_locked(irq_data,
&zynq_gpio_level_irqchip, handle_fasteoi_irq, NULL);
} else {
- __irq_set_chip_handler_name_locked(irq_data->irq,
+ irq_set_chip_handler_name_locked(irq_data,
&zynq_gpio_edge_irqchip, handle_level_irq, NULL);
}
@@ -518,7 +518,7 @@ static void zynq_gpio_irqhandler(unsigned int irq, struct irq_desc *desc)
{
u32 int_sts, int_enb;
unsigned int bank_num;
- struct zynq_gpio *gpio = irq_get_handler_data(irq);
+ struct zynq_gpio *gpio = irq_desc_get_handler_data(desc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
chained_irq_enter(irqchip, desc);
@@ -757,6 +757,7 @@ static int zynq_gpio_remove(struct platform_device *pdev)
gpiochip_remove(&gpio->chip);
clk_disable_unprepare(gpio->clk);
device_set_wakeup_capable(&pdev->dev, 0);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
@@ -781,6 +782,12 @@ static int __init zynq_gpio_init(void)
}
postcore_initcall(zynq_gpio_init);
+static void __exit zynq_gpio_exit(void)
+{
+ platform_driver_unregister(&zynq_gpio_driver);
+}
+module_exit(zynq_gpio_exit);
+
MODULE_AUTHOR("Xilinx Inc.");
MODULE_DESCRIPTION("Zynq GPIO driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 533fe5dbe6f8..143a9bdbaa53 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -68,7 +68,7 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
* GPIO controller driver.
*
* Typically the returned offset is same as @pin, but if the GPIO
- * controller uses pin controller and the mapping is not contigous the
+ * controller uses pin controller and the mapping is not contiguous the
* offset might be different.
*/
static int acpi_gpiochip_pin_to_gpio_offset(struct gpio_chip *chip, int pin)
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 9a0ec48a4737..fa6e3c8823d6 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -136,7 +136,6 @@ static struct gpio_desc *of_get_gpio_hog(struct device_node *np,
{
struct device_node *chip_np;
enum of_gpio_flags xlate_flags;
- struct gpio_desc *desc;
struct gg_data gg_data = {
.flags = &xlate_flags,
};
@@ -193,9 +192,7 @@ static struct gpio_desc *of_get_gpio_hog(struct device_node *np,
if (name && of_property_read_string(np, "line-name", name))
*name = np->name;
- desc = gg_data.out_gpio;
-
- return desc;
+ return gg_data.out_gpio;
}
/**
@@ -338,7 +335,7 @@ void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc)
EXPORT_SYMBOL(of_mm_gpiochip_remove);
#ifdef CONFIG_PINCTRL
-static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
+static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
{
struct device_node *np = chip->of_node;
struct of_phandle_args pinspec;
@@ -349,7 +346,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
struct property *group_names;
if (!np)
- return;
+ return 0;
group_names = of_find_property(np, group_names_propname, NULL);
@@ -361,11 +358,11 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
pctldev = of_pinctrl_get(pinspec.np);
if (!pctldev)
- break;
+ return -EPROBE_DEFER;
if (pinspec.args[2]) {
if (group_names) {
- ret = of_property_read_string_index(np,
+ of_property_read_string_index(np,
group_names_propname,
index, &name);
if (strlen(name)) {
@@ -381,7 +378,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
pinspec.args[1],
pinspec.args[2]);
if (ret)
- break;
+ return ret;
} else {
/* npins == 0: special range */
if (pinspec.args[1]) {
@@ -411,32 +408,41 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
ret = gpiochip_add_pingroup_range(chip, pctldev,
pinspec.args[0], name);
if (ret)
- break;
+ return ret;
}
}
+
+ return 0;
}
#else
-static void of_gpiochip_add_pin_range(struct gpio_chip *chip) {}
+static int of_gpiochip_add_pin_range(struct gpio_chip *chip) { return 0; }
#endif
-void of_gpiochip_add(struct gpio_chip *chip)
+int of_gpiochip_add(struct gpio_chip *chip)
{
+ int status;
+
if ((!chip->of_node) && (chip->dev))
chip->of_node = chip->dev->of_node;
if (!chip->of_node)
- return;
+ return 0;
if (!chip->of_xlate) {
chip->of_gpio_n_cells = 2;
chip->of_xlate = of_gpio_simple_xlate;
}
- of_gpiochip_add_pin_range(chip);
+ status = of_gpiochip_add_pin_range(chip);
+ if (status)
+ return status;
+
of_node_get(chip->of_node);
of_gpiochip_scan_hogs(chip);
+
+ return 0;
}
void of_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index bf4bd1d120c3..980c1f87866a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -190,7 +190,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_direction);
*/
static int gpiochip_add_to_list(struct gpio_chip *chip)
{
- struct list_head *pos = &gpio_chips;
+ struct list_head *pos;
struct gpio_chip *_chip;
int err = 0;
@@ -287,7 +287,13 @@ int gpiochip_add(struct gpio_chip *chip)
INIT_LIST_HEAD(&chip->pin_ranges);
#endif
- of_gpiochip_add(chip);
+ if (!chip->owner && chip->dev && chip->dev->driver)
+ chip->owner = chip->dev->driver->owner;
+
+ status = of_gpiochip_add(chip);
+ if (status)
+ goto err_remove_chip;
+
acpi_gpiochip_add(chip);
status = gpiochip_sysfs_register(chip);
@@ -443,8 +449,8 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
* The parent irqchip is already using the chip_data for this
* irqchip, so our callbacks simply use the handler_data.
*/
- irq_set_handler_data(parent_irq, gpiochip);
- irq_set_chained_handler(parent_irq, parent_handler);
+ irq_set_chained_handler_and_data(parent_irq, parent_handler,
+ gpiochip);
gpiochip->irq_parent = parent_irq;
}
@@ -456,12 +462,6 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
}
EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip);
-/*
- * This lock class tells lockdep that GPIO irqs are in a different
- * category than their parents, so it won't report false recursion.
- */
-static struct lock_class_key gpiochip_irq_lock_class;
-
/**
* gpiochip_irq_map() - maps an IRQ into a GPIO irqchip
* @d: the irqdomain used by this irqchip
@@ -478,16 +478,17 @@ static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
struct gpio_chip *chip = d->host_data;
irq_set_chip_data(irq, chip);
- irq_set_lockdep_class(irq, &gpiochip_irq_lock_class);
+ /*
+ * This lock class tells lockdep that GPIO irqs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+ irq_set_lockdep_class(irq, chip->lock_key);
irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler);
/* Chips that can sleep need nested thread handlers */
if (chip->can_sleep && !chip->irq_not_threaded)
irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
irq_set_noprobe(irq);
-#endif
+
/*
* No set-up of the hardware will happen if IRQ_TYPE_NONE
* is passed as default type.
@@ -502,9 +503,6 @@ static void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq)
{
struct gpio_chip *chip = d->host_data;
-#ifdef CONFIG_ARM
- set_irq_flags(irq, 0);
-#endif
if (chip->can_sleep)
irq_set_nested_thread(irq, 0);
irq_set_chip_and_handler(irq, NULL, NULL);
@@ -522,10 +520,14 @@ static int gpiochip_irq_reqres(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ if (!try_module_get(chip->owner))
+ return -ENODEV;
+
if (gpiochip_lock_as_irq(chip, d->hwirq)) {
chip_err(chip,
"unable to lock HW IRQ %lu for IRQ\n",
d->hwirq);
+ module_put(chip->owner);
return -EINVAL;
}
return 0;
@@ -536,6 +538,7 @@ static void gpiochip_irq_relres(struct irq_data *d)
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
gpiochip_unlock_as_irq(chip, d->hwirq);
+ module_put(chip->owner);
}
static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -584,6 +587,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
* @handler: the irq handler to use (often a predefined irq core function)
* @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
* to have the core avoid setting up any default type in the hardware.
+ * @lock_key: lockdep class
*
* This function closely associates a certain irqchip with a certain
* gpiochip, providing an irq domain to translate the local IRQs to
@@ -599,11 +603,12 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
* the pins on the gpiochip can generate a unique IRQ. Everything else
* need to be open coded.
*/
-int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type)
+int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type,
+ struct lock_class_key *lock_key)
{
struct device_node *of_node;
unsigned int offset;
@@ -629,6 +634,7 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
gpiochip->irq_handler = handler;
gpiochip->irq_default_type = type;
gpiochip->to_irq = gpiochip_to_irq;
+ gpiochip->lock_key = lock_key;
gpiochip->irqdomain = irq_domain_add_simple(of_node,
gpiochip->ngpio, first_irq,
&gpiochip_domain_ops, gpiochip);
@@ -636,8 +642,16 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
gpiochip->irqchip = NULL;
return -EINVAL;
}
- irqchip->irq_request_resources = gpiochip_irq_reqres;
- irqchip->irq_release_resources = gpiochip_irq_relres;
+
+ /*
+ * It is possible for a driver to override this, but only if the
+ * alternative functions are both implemented.
+ */
+ if (!irqchip->irq_request_resources &&
+ !irqchip->irq_release_resources) {
+ irqchip->irq_request_resources = gpiochip_irq_reqres;
+ irqchip->irq_release_resources = gpiochip_irq_relres;
+ }
/*
* Prepare the mapping since the irqchip shall be orthogonal to
@@ -658,7 +672,7 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
return 0;
}
-EXPORT_SYMBOL_GPL(gpiochip_irqchip_add);
+EXPORT_SYMBOL_GPL(_gpiochip_irqchip_add);
#else /* CONFIG_GPIOLIB_IRQCHIP */
@@ -671,7 +685,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {}
/**
* gpiochip_add_pingroup_range() - add a range for GPIO <-> pin mapping
* @chip: the gpiochip to add the range for
- * @pinctrl: the dev_name() of the pin controller to map to
+ * @pctldev: the pin controller to map to
* @gpio_offset: the start offset in the current gpio_chip number space
* @pin_group: name of the pin group inside the pin controller
*/
@@ -1672,6 +1686,19 @@ void gpiod_add_lookup_table(struct gpiod_lookup_table *table)
mutex_unlock(&gpio_lookup_lock);
}
+/**
+ * gpiod_remove_lookup_table() - unregister GPIO device consumers
+ * @table: table of consumers to unregister
+ */
+void gpiod_remove_lookup_table(struct gpiod_lookup_table *table)
+{
+ mutex_lock(&gpio_lookup_lock);
+
+ list_del(&table->list);
+
+ mutex_unlock(&gpio_lookup_lock);
+}
+
static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
unsigned int idx,
enum gpio_lookup_flags *flags)
@@ -1894,12 +1921,12 @@ EXPORT_SYMBOL_GPL(gpiod_count);
* dev, -ENOENT if no GPIO has been assigned to the requested function, or
* another IS_ERR() code if an error occurred while trying to acquire the GPIO.
*/
-struct gpio_desc *__must_check __gpiod_get(struct device *dev, const char *con_id,
+struct gpio_desc *__must_check gpiod_get(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return gpiod_get_index(dev, con_id, 0, flags);
}
-EXPORT_SYMBOL_GPL(__gpiod_get);
+EXPORT_SYMBOL_GPL(gpiod_get);
/**
* gpiod_get_optional - obtain an optional GPIO for a given GPIO function
@@ -1911,13 +1938,13 @@ EXPORT_SYMBOL_GPL(__gpiod_get);
* the requested function it will return NULL. This is convenient for drivers
* that need to handle optional GPIOs.
*/
-struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
{
return gpiod_get_index_optional(dev, con_id, 0, flags);
}
-EXPORT_SYMBOL_GPL(__gpiod_get_optional);
+EXPORT_SYMBOL_GPL(gpiod_get_optional);
/**
@@ -1974,7 +2001,7 @@ static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
* requested function and/or index, or another IS_ERR() code if an error
* occurred while trying to acquire the GPIO.
*/
-struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags)
@@ -2023,7 +2050,7 @@ struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
return desc;
}
-EXPORT_SYMBOL_GPL(__gpiod_get_index);
+EXPORT_SYMBOL_GPL(gpiod_get_index);
/**
* fwnode_get_named_gpiod - obtain a GPIO from firmware node
@@ -2092,7 +2119,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
* specified index was assigned to the requested function it will return NULL.
* This is convenient for drivers that need to handle optional GPIOs.
*/
-struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
const char *con_id,
unsigned int index,
enum gpiod_flags flags)
@@ -2107,7 +2134,7 @@ struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
return desc;
}
-EXPORT_SYMBOL_GPL(__gpiod_get_index_optional);
+EXPORT_SYMBOL_GPL(gpiod_get_index_optional);
/**
* gpiod_hog - Hog the specified GPIO desc given the provided flags
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index c46ca311d8c3..1a0a8df2eed8 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -37,9 +37,29 @@ config DRM_KMS_FB_HELPER
select FB
select FRAMEBUFFER_CONSOLE if !EXPERT
select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
+ select FB_SYS_FOPS
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
help
FBDEV helpers for KMS drivers.
+config DRM_FBDEV_EMULATION
+ bool "Enable legacy fbdev support for your modesetting driver"
+ depends on DRM
+ select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
+ default y
+ help
+ Choose this option if you have a need for the legacy fbdev
+ support. Note that this support also provides the linux console
+ support on top of your modesetting driver.
+
+ If in doubt, say "Y".
+
config DRM_LOAD_EDID_FIRMWARE
bool "Allow to specify an EDID data set instead of probing for it"
depends on DRM_KMS_HELPER
@@ -79,8 +99,6 @@ config DRM_KMS_CMA_HELPER
source "drivers/gpu/drm/i2c/Kconfig"
-source "drivers/gpu/drm/bridge/Kconfig"
-
config DRM_TDFX
tristate "3dfx Banshee/Voodoo3+"
depends on DRM && PCI
@@ -110,6 +128,7 @@ config DRM_RADEON
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
+ select BACKLIGHT_LCD_SUPPORT
select INTERVAL_TREE
help
Choose this option if you have an ATI Radeon graphics card. There
@@ -133,6 +152,7 @@ config DRM_AMDGPU
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
+ select BACKLIGHT_LCD_SUPPORT
select INTERVAL_TREE
help
Choose this option if you have a recent AMD Radeon graphics card.
@@ -231,10 +251,14 @@ source "drivers/gpu/drm/virtio/Kconfig"
source "drivers/gpu/drm/msm/Kconfig"
+source "drivers/gpu/drm/fsl-dcu/Kconfig"
+
source "drivers/gpu/drm/tegra/Kconfig"
source "drivers/gpu/drm/panel/Kconfig"
+source "drivers/gpu/drm/bridge/Kconfig"
+
source "drivers/gpu/drm/sti/Kconfig"
source "drivers/gpu/drm/amd/amdkfd/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 5713d0534504..45e7719846b1 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -23,7 +23,7 @@ drm-$(CONFIG_OF) += drm_of.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
-drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
+drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@@ -70,3 +70,4 @@ obj-$(CONFIG_DRM_IMX) += imx/
obj-y += i2c/
obj-y += panel/
obj-y += bridge/
+obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 616dfd4a1398..04c270757030 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -3,7 +3,9 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/asic_reg \
- -Idrivers/gpu/drm/amd/include
+ -Idrivers/gpu/drm/amd/include \
+ -Idrivers/gpu/drm/amd/amdgpu \
+ -Idrivers/gpu/drm/amd/scheduler
amdgpu-y := amdgpu_drv.o
@@ -21,7 +23,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
- ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
+ ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
+ amdgpu_amdkfd_gfx_v7.o
amdgpu-y += \
vi.o
@@ -43,6 +46,7 @@ amdgpu-y += \
amdgpu_dpm.o \
cz_smc.o cz_dpm.o \
tonga_smc.o tonga_dpm.o \
+ fiji_smc.o fiji_dpm.o \
iceland_smc.o iceland_dpm.o
# add DCE block
@@ -71,6 +75,20 @@ amdgpu-y += \
amdgpu_vce.o \
vce_v3_0.o
+# add amdkfd interfaces
+amdgpu-y += \
+ amdgpu_amdkfd.o \
+ amdgpu_amdkfd_gfx_v8.o
+
+# add cgs
+amdgpu-y += amdgpu_cgs.o
+
+# GPU scheduler
+amdgpu-y += \
+ ../scheduler/gpu_scheduler.o \
+ ../scheduler/sched_fence.o \
+ amdgpu_sched.o
+
amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 01657830b470..668939a14206 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -42,17 +42,19 @@
#include <ttm/ttm_module.h>
#include <ttm/ttm_execbuf_util.h>
+#include <drm/drmP.h>
#include <drm/drm_gem.h>
#include <drm/amdgpu_drm.h>
#include "amd_shared.h"
-#include "amdgpu_family.h"
#include "amdgpu_mode.h"
#include "amdgpu_ih.h"
#include "amdgpu_irq.h"
#include "amdgpu_ucode.h"
#include "amdgpu_gds.h"
+#include "gpu_scheduler.h"
+
/*
* Modules parameters.
*/
@@ -77,7 +79,11 @@ extern int amdgpu_bapm;
extern int amdgpu_deep_color;
extern int amdgpu_vm_size;
extern int amdgpu_vm_block_size;
+extern int amdgpu_enable_scheduler;
+extern int amdgpu_sched_jobs;
+extern int amdgpu_sched_hw_submission;
+#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
@@ -92,6 +98,9 @@ extern int amdgpu_vm_block_size;
#define AMDGPU_MAX_COMPUTE_RINGS 8
#define AMDGPU_MAX_VCE_RINGS 2
+/* max number of IP instances */
+#define AMDGPU_MAX_SDMA_INSTANCES 2
+
/* number of hw syncs before falling back on blocking */
#define AMDGPU_NUM_SYNCS 4
@@ -177,7 +186,9 @@ struct amdgpu_vm;
struct amdgpu_ring;
struct amdgpu_semaphore;
struct amdgpu_cs_parser;
+struct amdgpu_job;
struct amdgpu_irq_src;
+struct amdgpu_fpriv;
enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_GFX_EOP = 0,
@@ -239,7 +250,7 @@ struct amdgpu_buffer_funcs {
unsigned copy_num_dw;
/* used for buffer migration */
- void (*emit_copy_buffer)(struct amdgpu_ring *ring,
+ void (*emit_copy_buffer)(struct amdgpu_ib *ib,
/* src addr in bytes */
uint64_t src_offset,
/* dst addr in bytes */
@@ -254,7 +265,7 @@ struct amdgpu_buffer_funcs {
unsigned fill_num_dw;
/* used for buffer clearing */
- void (*emit_fill_buffer)(struct amdgpu_ring *ring,
+ void (*emit_fill_buffer)(struct amdgpu_ib *ib,
/* value to write to memory */
uint32_t src_data,
/* dst addr in bytes */
@@ -332,6 +343,8 @@ struct amdgpu_ring_funcs {
int (*test_ring)(struct amdgpu_ring *ring);
int (*test_ib)(struct amdgpu_ring *ring);
bool (*is_lockup)(struct amdgpu_ring *ring);
+ /* insert NOP packets */
+ void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
};
/*
@@ -381,10 +394,10 @@ struct amdgpu_fence_driver {
uint64_t sync_seq[AMDGPU_MAX_RINGS];
atomic64_t last_seq;
bool initialized;
- bool delayed_irq;
struct amdgpu_irq_src *irq_src;
unsigned irq_type;
struct delayed_work lockup_work;
+ wait_queue_head_t fence_queue;
};
/* some special values for the owner field */
@@ -423,20 +436,20 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq_src,
unsigned irq_type);
+void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
+void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
struct amdgpu_fence **fence);
-int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
- uint64_t seq, struct amdgpu_fence **fence);
void amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
-bool amdgpu_fence_signaled(struct amdgpu_fence *fence);
-int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
-int amdgpu_fence_wait_any(struct amdgpu_device *adev,
- struct amdgpu_fence **fences,
- bool intr);
+signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
+ struct fence **array,
+ uint32_t count,
+ bool intr,
+ signed long t);
struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
void amdgpu_fence_unref(struct amdgpu_fence **fence);
@@ -481,7 +494,7 @@ static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a,
return a->seq < b->seq;
}
-int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
+int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
void *owner, struct amdgpu_fence **fence);
/*
@@ -509,7 +522,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv,
- struct amdgpu_fence **fence);
+ struct fence **fence);
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
struct amdgpu_bo_list_entry {
@@ -532,14 +545,16 @@ struct amdgpu_bo_va_mapping {
struct amdgpu_bo_va {
/* protected by bo being reserved */
struct list_head bo_list;
- uint64_t addr;
- struct amdgpu_fence *last_pt_update;
+ struct fence *last_pt_update;
unsigned ref_count;
- /* protected by vm mutex */
- struct list_head mappings;
+ /* protected by vm mutex and spinlock */
struct list_head vm_status;
+ /* mappings for this bo_va */
+ struct list_head invalids;
+ struct list_head valids;
+
/* constant after initialization */
struct amdgpu_vm *vm;
struct amdgpu_bo *bo;
@@ -643,7 +658,7 @@ struct amdgpu_sa_bo {
struct amdgpu_sa_manager *manager;
unsigned soffset;
unsigned eoffset;
- struct amdgpu_fence *fence;
+ struct fence *fence;
};
/*
@@ -685,7 +700,7 @@ bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
struct amdgpu_semaphore *semaphore);
void amdgpu_semaphore_free(struct amdgpu_device *adev,
struct amdgpu_semaphore **semaphore,
- struct amdgpu_fence *fence);
+ struct fence *fence);
/*
* Synchronization
@@ -693,20 +708,23 @@ void amdgpu_semaphore_free(struct amdgpu_device *adev,
struct amdgpu_sync {
struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
struct amdgpu_fence *sync_to[AMDGPU_MAX_RINGS];
- struct amdgpu_fence *last_vm_update;
+ DECLARE_HASHTABLE(fences, 4);
+ struct fence *last_vm_update;
};
void amdgpu_sync_create(struct amdgpu_sync *sync);
-void amdgpu_sync_fence(struct amdgpu_sync *sync,
- struct amdgpu_fence *fence);
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+ struct fence *f);
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
void *owner);
int amdgpu_sync_rings(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
+struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
+int amdgpu_sync_wait(struct amdgpu_sync *sync);
void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct amdgpu_fence *fence);
+ struct fence *fence);
/*
* GART structures, functions & helpers
@@ -821,7 +839,9 @@ struct amdgpu_flip_work {
uint64_t base;
struct drm_pending_vblank_event *event;
struct amdgpu_bo *old_rbo;
- struct fence *fence;
+ struct fence *excl;
+ unsigned shared_count;
+ struct fence **shared;
};
@@ -844,6 +864,8 @@ struct amdgpu_ib {
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
uint32_t flags;
+ /* resulting sequence number */
+ uint64_t sequence;
};
enum amdgpu_ring_type {
@@ -854,11 +876,23 @@ enum amdgpu_ring_type {
AMDGPU_RING_TYPE_VCE
};
+extern struct amd_sched_backend_ops amdgpu_sched_ops;
+
+int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_ib *ibs,
+ unsigned num_ibs,
+ int (*free_job)(struct amdgpu_job *),
+ void *owner,
+ struct fence **fence);
+
struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
struct amdgpu_fence_driver fence_drv;
+ struct amd_gpu_scheduler *scheduler;
+ spinlock_t fence_lock;
struct mutex *ring_lock;
struct amdgpu_bo *ring_obj;
volatile uint32_t *ring;
@@ -892,6 +926,7 @@ struct amdgpu_ring {
struct amdgpu_ctx *current_ctx;
enum amdgpu_ring_type type;
char name[16];
+ bool is_pte_ring;
};
/*
@@ -933,7 +968,7 @@ struct amdgpu_vm_id {
unsigned id;
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
- struct amdgpu_fence *flushed_updates;
+ struct fence *flushed_updates;
/* last use of vmid */
struct amdgpu_fence *last_id_use;
};
@@ -943,18 +978,22 @@ struct amdgpu_vm {
struct rb_root va;
- /* protecting invalidated and freed */
+ /* protecting invalidated */
spinlock_t status_lock;
/* BOs moved, but not yet updated in the PT */
struct list_head invalidated;
- /* BOs freed, but not yet updated in the PT */
+ /* BOs cleared in the PT because of a move */
+ struct list_head cleared;
+
+ /* BO mappings freed, but not yet updated in the PT */
struct list_head freed;
/* contains the page directory */
struct amdgpu_bo *page_directory;
unsigned max_pde_used;
+ struct fence *page_directory_fence;
/* array of page tables, one for each page directory entry */
struct amdgpu_vm_pt *page_tables;
@@ -983,27 +1022,47 @@ struct amdgpu_vm_manager {
* context related structures
*/
-struct amdgpu_ctx_state {
- uint64_t flags;
- uint32_t hangs;
+#define AMDGPU_CTX_MAX_CS_PENDING 16
+
+struct amdgpu_ctx_ring {
+ uint64_t sequence;
+ struct fence *fences[AMDGPU_CTX_MAX_CS_PENDING];
+ struct amd_sched_entity entity;
};
struct amdgpu_ctx {
- /* call kref_get()before CS start and kref_put() after CS fence signaled */
- struct kref refcount;
- struct amdgpu_fpriv *fpriv;
- struct amdgpu_ctx_state state;
- uint32_t id;
- unsigned reset_counter;
+ struct kref refcount;
+ struct amdgpu_device *adev;
+ unsigned reset_counter;
+ spinlock_t ring_lock;
+ struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
};
struct amdgpu_ctx_mgr {
- struct amdgpu_device *adev;
- struct idr ctx_handles;
- /* lock for IDR system */
- struct mutex lock;
+ struct amdgpu_device *adev;
+ struct mutex lock;
+ /* protected by lock */
+ struct idr ctx_handles;
};
+int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
+ struct amdgpu_ctx *ctx);
+void amdgpu_ctx_fini(struct amdgpu_ctx *ctx);
+
+struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
+int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
+
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+ struct fence *fence);
+struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+ struct amdgpu_ring *ring, uint64_t seq);
+
+int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
+void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
+
/*
* file private structure
*/
@@ -1012,7 +1071,7 @@ struct amdgpu_fpriv {
struct amdgpu_vm vm;
struct mutex bo_list_lock;
struct idr bo_list_handles;
- struct amdgpu_ctx_mgr ctx_mgr;
+ struct amdgpu_ctx_mgr ctx_mgr;
};
/*
@@ -1130,6 +1189,9 @@ struct amdgpu_gfx {
uint32_t me_feature_version;
uint32_t ce_feature_version;
uint32_t pfp_feature_version;
+ uint32_t rlc_feature_version;
+ uint32_t mec_feature_version;
+ uint32_t mec2_feature_version;
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
unsigned num_gfx_rings;
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
@@ -1157,6 +1219,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
void amdgpu_ring_free_size(struct amdgpu_ring *ring);
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw);
+void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
@@ -1204,6 +1267,16 @@ struct amdgpu_cs_parser {
struct amdgpu_user_fence uf;
};
+struct amdgpu_job {
+ struct amd_sched_job base;
+ struct amdgpu_device *adev;
+ struct amdgpu_ib *ibs;
+ uint32_t num_ibs;
+ struct mutex job_lock;
+ struct amdgpu_user_fence uf;
+ int (*free_job)(struct amdgpu_job *sched_job);
+};
+
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
{
return p->ibs[ib_idx].ptr[idx];
@@ -1598,7 +1671,6 @@ struct amdgpu_uvd {
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
- void *saved_bo;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct delayed_work idle_work;
@@ -1614,6 +1686,9 @@ struct amdgpu_uvd {
#define AMDGPU_MAX_VCE_HANDLES 16
#define AMDGPU_VCE_FIRMWARE_OFFSET 256
+#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
+#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
+
struct amdgpu_vce {
struct amdgpu_bo *vcpu_bo;
uint64_t gpu_addr;
@@ -1626,6 +1701,7 @@ struct amdgpu_vce {
const struct firmware *fw; /* VCE firmware */
struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
struct amdgpu_irq_src irq;
+ unsigned harvest_config;
};
/*
@@ -1635,8 +1711,10 @@ struct amdgpu_sdma {
/* SDMA firmware */
const struct firmware *fw;
uint32_t fw_version;
+ uint32_t feature_version;
struct amdgpu_ring ring;
+ bool burst_nop;
};
/*
@@ -1841,17 +1919,12 @@ struct amdgpu_atcs {
struct amdgpu_atcs_functions functions;
};
-int amdgpu_ctx_alloc(struct amdgpu_device *adev,struct amdgpu_fpriv *fpriv,
- uint32_t *id,uint32_t flags);
-int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
- uint32_t id);
-
-void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv);
-struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
-int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
+/*
+ * CGS
+ */
+void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
+void amdgpu_cgs_destroy_device(void *cgs_device);
-extern int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp);
/*
* Core structure, functions and helpers.
@@ -1862,6 +1935,12 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
+struct amdgpu_ip_block_status {
+ bool valid;
+ bool sw;
+ bool hw;
+};
+
struct amdgpu_device {
struct device *dev;
struct drm_device *ddev;
@@ -1869,7 +1948,7 @@ struct amdgpu_device {
struct rw_semaphore exclusive_lock;
/* ASIC */
- enum amdgpu_asic_type asic_type;
+ enum amd_asic_type asic_type;
uint32_t family;
uint32_t rev_id;
uint32_t external_rev_id;
@@ -1962,7 +2041,6 @@ struct amdgpu_device {
struct amdgpu_irq_src hpd_irq;
/* rings */
- wait_queue_head_t fence_queue;
unsigned fence_context;
struct mutex ring_lock;
unsigned num_rings;
@@ -1985,7 +2063,7 @@ struct amdgpu_device {
struct amdgpu_gfx gfx;
/* sdma */
- struct amdgpu_sdma sdma[2];
+ struct amdgpu_sdma sdma[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_irq_src sdma_trap_irq;
struct amdgpu_irq_src sdma_illegal_inst_irq;
@@ -2004,13 +2082,19 @@ struct amdgpu_device {
const struct amdgpu_ip_block_version *ip_blocks;
int num_ip_blocks;
- bool *ip_block_enabled;
+ struct amdgpu_ip_block_status *ip_block_status;
struct mutex mn_lock;
DECLARE_HASHTABLE(mn_hash, 7);
/* tracking pinned memory */
u64 vram_pin_size;
u64 gart_pin_size;
+
+ /* amdkfd interface */
+ struct kfd_dev *kfd;
+
+ /* kernel conext for IB submission */
+ struct amdgpu_ctx kernel_ctx;
};
bool amdgpu_device_is_px(struct drm_device *dev);
@@ -2118,6 +2202,21 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
ring->ring_free_dw--;
}
+static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i;
+
+ for (i = 0; i < AMDGPU_MAX_SDMA_INSTANCES; i++)
+ if (&adev->sdma[i].ring == ring)
+ break;
+
+ if (i < AMDGPU_MAX_SDMA_INSTANCES)
+ return &adev->sdma[i];
+ else
+ return NULL;
+}
+
/*
* ASICs macro.
*/
@@ -2169,8 +2268,8 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
#define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
-#define amdgpu_emit_copy_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((r), (s), (d), (b))
-#define amdgpu_emit_fill_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((r), (s), (d), (b))
+#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
+#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
#define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev))
#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
@@ -2198,6 +2297,12 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
bool amdgpu_card_posted(struct amdgpu_device *adev);
void amdgpu_update_display_priority(struct amdgpu_device *adev);
bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
+struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ struct amdgpu_ctx *ctx,
+ struct amdgpu_ib *ibs,
+ uint32_t num_ibs);
+
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
u32 ip_instance, u32 ring,
@@ -2261,11 +2366,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct list_head *head);
-struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
- struct amdgpu_vm *vm);
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync);
void amdgpu_vm_flush(struct amdgpu_ring *ring,
struct amdgpu_vm *vm,
- struct amdgpu_fence *updates);
+ struct fence *updates);
void amdgpu_vm_fence(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_fence *fence);
@@ -2295,7 +2400,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
uint64_t addr);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
-
+int amdgpu_vm_free_job(struct amdgpu_job *job);
/*
* functions used by amdgpu_encoder.c
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
new file mode 100644
index 000000000000..496ed2192eba
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu_amdkfd.h"
+#include "amd_shared.h"
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include <linux/module.h>
+
+const struct kfd2kgd_calls *kfd2kgd;
+const struct kgd2kfd_calls *kgd2kfd;
+bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+
+bool amdgpu_amdkfd_init(void)
+{
+#if defined(CONFIG_HSA_AMD_MODULE)
+ bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+
+ kgd2kfd_init_p = symbol_request(kgd2kfd_init);
+
+ if (kgd2kfd_init_p == NULL)
+ return false;
+#endif
+ return true;
+}
+
+bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev)
+{
+#if defined(CONFIG_HSA_AMD_MODULE)
+ bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+#endif
+
+ switch (rdev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_KAVERI:
+ kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
+ break;
+#endif
+ case CHIP_CARRIZO:
+ kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
+ break;
+ default:
+ return false;
+ }
+
+#if defined(CONFIG_HSA_AMD_MODULE)
+ kgd2kfd_init_p = symbol_request(kgd2kfd_init);
+
+ if (kgd2kfd_init_p == NULL) {
+ kfd2kgd = NULL;
+ return false;
+ }
+
+ if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) {
+ symbol_put(kgd2kfd_init);
+ kfd2kgd = NULL;
+ kgd2kfd = NULL;
+
+ return false;
+ }
+
+ return true;
+#elif defined(CONFIG_HSA_AMD)
+ if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) {
+ kfd2kgd = NULL;
+ kgd2kfd = NULL;
+ return false;
+ }
+
+ return true;
+#else
+ kfd2kgd = NULL;
+ return false;
+#endif
+}
+
+void amdgpu_amdkfd_fini(void)
+{
+ if (kgd2kfd) {
+ kgd2kfd->exit();
+ symbol_put(kgd2kfd_init);
+ }
+}
+
+void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev)
+{
+ if (kgd2kfd)
+ rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev,
+ rdev->pdev, kfd2kgd);
+}
+
+void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev)
+{
+ if (rdev->kfd) {
+ struct kgd2kfd_shared_resources gpu_resources = {
+ .compute_vmid_bitmap = 0xFF00,
+
+ .first_compute_pipe = 1,
+ .compute_pipe_count = 4 - 1,
+ };
+
+ amdgpu_doorbell_get_kfd_info(rdev,
+ &gpu_resources.doorbell_physical_address,
+ &gpu_resources.doorbell_aperture_size,
+ &gpu_resources.doorbell_start_offset);
+
+ kgd2kfd->device_init(rdev->kfd, &gpu_resources);
+ }
+}
+
+void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev)
+{
+ if (rdev->kfd) {
+ kgd2kfd->device_exit(rdev->kfd);
+ rdev->kfd = NULL;
+ }
+}
+
+void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
+ const void *ih_ring_entry)
+{
+ if (rdev->kfd)
+ kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
+}
+
+void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev)
+{
+ if (rdev->kfd)
+ kgd2kfd->suspend(rdev->kfd);
+}
+
+int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
+{
+ int r = 0;
+
+ if (rdev->kfd)
+ r = kgd2kfd->resume(rdev->kfd);
+
+ return r;
+}
+
+u32 pool_to_domain(enum kgd_memory_pool p)
+{
+ switch (p) {
+ case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
+ default: return AMDGPU_GEM_DOMAIN_GTT;
+ }
+}
+
+int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+ void **cpu_ptr)
+{
+ struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
+ struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
+ int r;
+
+ BUG_ON(kgd == NULL);
+ BUG_ON(gpu_addr == NULL);
+ BUG_ON(cpu_ptr == NULL);
+
+ *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+ if ((*mem) == NULL)
+ return -ENOMEM;
+
+ r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &(*mem)->bo);
+ if (r) {
+ dev_err(rdev->dev,
+ "failed to allocate BO for amdkfd (%d)\n", r);
+ return r;
+ }
+
+ /* map the buffer */
+ r = amdgpu_bo_reserve((*mem)->bo, true);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
+ goto allocate_mem_reserve_bo_failed;
+ }
+
+ r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
+ &(*mem)->gpu_addr);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r);
+ goto allocate_mem_pin_bo_failed;
+ }
+ *gpu_addr = (*mem)->gpu_addr;
+
+ r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
+ if (r) {
+ dev_err(rdev->dev,
+ "(%d) failed to map bo to kernel for amdkfd\n", r);
+ goto allocate_mem_kmap_bo_failed;
+ }
+ *cpu_ptr = (*mem)->cpu_ptr;
+
+ amdgpu_bo_unreserve((*mem)->bo);
+
+ return 0;
+
+allocate_mem_kmap_bo_failed:
+ amdgpu_bo_unpin((*mem)->bo);
+allocate_mem_pin_bo_failed:
+ amdgpu_bo_unreserve((*mem)->bo);
+allocate_mem_reserve_bo_failed:
+ amdgpu_bo_unref(&(*mem)->bo);
+
+ return r;
+}
+
+void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
+{
+ struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
+
+ BUG_ON(mem == NULL);
+
+ amdgpu_bo_reserve(mem->bo, true);
+ amdgpu_bo_kunmap(mem->bo);
+ amdgpu_bo_unpin(mem->bo);
+ amdgpu_bo_unreserve(mem->bo);
+ amdgpu_bo_unref(&(mem->bo));
+ kfree(mem);
+}
+
+uint64_t get_vmem_size(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *rdev =
+ (struct amdgpu_device *)kgd;
+
+ BUG_ON(kgd == NULL);
+
+ return rdev->mc.real_vram_size;
+}
+
+uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
+
+ if (rdev->asic_funcs->get_gpu_clock_counter)
+ return rdev->asic_funcs->get_gpu_clock_counter(rdev);
+ return 0;
+}
+
+uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
+
+ /* The sclk is in quantas of 10kHz */
+ return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
new file mode 100644
index 000000000000..a8be765542e6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */
+
+#ifndef AMDGPU_AMDKFD_H_INCLUDED
+#define AMDGPU_AMDKFD_H_INCLUDED
+
+#include <linux/types.h>
+#include <kgd_kfd_interface.h>
+
+struct amdgpu_device;
+
+struct kgd_mem {
+ struct amdgpu_bo *bo;
+ uint64_t gpu_addr;
+ void *cpu_ptr;
+};
+
+bool amdgpu_amdkfd_init(void);
+void amdgpu_amdkfd_fini(void);
+
+bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev);
+
+void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev);
+int amdgpu_amdkfd_resume(struct amdgpu_device *rdev);
+void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
+ const void *ih_ring_entry);
+void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev);
+void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev);
+void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev);
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
+
+/* Shared API */
+int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+ void **cpu_ptr);
+void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
+uint64_t get_vmem_size(struct kgd_dev *kgd);
+uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
+
+uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+
+#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
new file mode 100644
index 000000000000..dd2037bc0b4a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -0,0 +1,670 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/fdtable.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "cikd.h"
+#include "cik_sdma.h"
+#include "amdgpu_ucode.h"
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_enum.h"
+#include "gca/gfx_7_2_sh_mask.h"
+#include "oss/oss_2_0_d.h"
+#include "oss/oss_2_0_sh_mask.h"
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+#include "cik_structs.h"
+
+#define CIK_PIPE_PER_MEC (4)
+
+enum {
+ MAX_TRAPID = 8, /* 3 bits in the bitfield. */
+ MAX_WATCH_ADDRESSES = 4
+};
+
+enum {
+ ADDRESS_WATCH_REG_ADDR_HI = 0,
+ ADDRESS_WATCH_REG_ADDR_LO,
+ ADDRESS_WATCH_REG_CNTL,
+ ADDRESS_WATCH_REG_MAX
+};
+
+/* not defined in the CI/KV reg file */
+enum {
+ ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
+ ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
+ ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
+ /* extend the mask to 26 bits to match the low address field */
+ ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
+ ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
+};
+
+static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = {
+ mmTCP_WATCH0_ADDR_H, mmTCP_WATCH0_ADDR_L, mmTCP_WATCH0_CNTL,
+ mmTCP_WATCH1_ADDR_H, mmTCP_WATCH1_ADDR_L, mmTCP_WATCH1_CNTL,
+ mmTCP_WATCH2_ADDR_H, mmTCP_WATCH2_ADDR_L, mmTCP_WATCH2_CNTL,
+ mmTCP_WATCH3_ADDR_H, mmTCP_WATCH3_ADDR_L, mmTCP_WATCH3_CNTL
+};
+
+union TCP_WATCH_CNTL_BITS {
+ struct {
+ uint32_t mask:24;
+ uint32_t vmid:4;
+ uint32_t atc:1;
+ uint32_t mode:2;
+ uint32_t valid:1;
+ } bitfields, bits;
+ uint32_t u32All;
+ signed int i32All;
+ float f32All;
+};
+
+/*
+ * Register access functions
+ */
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+ uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
+ uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+ unsigned int vmid);
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+ uint32_t hpd_size, uint64_t hpd_gpu_addr);
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t __user *wptr);
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id);
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id);
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+ unsigned int timeout);
+static int kgd_address_watch_disable(struct kgd_dev *kgd);
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+ unsigned int watch_point_id,
+ uint32_t cntl_val,
+ uint32_t addr_hi,
+ uint32_t addr_lo);
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+ uint32_t gfx_index_val,
+ uint32_t sq_cmd);
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+ unsigned int watch_point_id,
+ unsigned int reg_offset);
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+ uint8_t vmid);
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
+
+static const struct kfd2kgd_calls kfd2kgd = {
+ .init_gtt_mem_allocation = alloc_gtt_mem,
+ .free_gtt_mem = free_gtt_mem,
+ .get_vmem_size = get_vmem_size,
+ .get_gpu_clock_counter = get_gpu_clock_counter,
+ .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_pipeline = kgd_init_pipeline,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_address_watch_disable,
+ .address_watch_execute = kgd_address_watch_execute,
+ .wave_control_execute = kgd_wave_control_execute,
+ .address_watch_get_offset = kgd_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
+ .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
+ .write_vmid_invalidate_request = write_vmid_invalidate_request,
+ .get_fw_version = get_fw_version
+};
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions()
+{
+ return (struct kfd2kgd_calls *)&kfd2kgd;
+}
+
+static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
+{
+ return (struct amdgpu_device *)kgd;
+}
+
+static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+ uint32_t queue, uint32_t vmid)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
+
+ mutex_lock(&adev->srbm_mutex);
+ WREG32(mmSRBM_GFX_CNTL, value);
+}
+
+static void unlock_srbm(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ WREG32(mmSRBM_GFX_CNTL, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
+static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+ uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+ lock_srbm(kgd, mec, pipe, queue_id, 0);
+}
+
+static void release_queue(struct kgd_dev *kgd)
+{
+ unlock_srbm(kgd);
+}
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+ uint32_t sh_mem_config,
+ uint32_t sh_mem_ape1_base,
+ uint32_t sh_mem_ape1_limit,
+ uint32_t sh_mem_bases)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ lock_srbm(kgd, 0, 0, 0, vmid);
+
+ WREG32(mmSH_MEM_CONFIG, sh_mem_config);
+ WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
+ WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
+ WREG32(mmSH_MEM_BASES, sh_mem_bases);
+
+ unlock_srbm(kgd);
+}
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+ unsigned int vmid)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ /*
+ * We have to assume that there is no outstanding mapping.
+ * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
+ * a mapping is in progress or because a mapping finished and the
+ * SW cleared it. So the protocol is to always wait & clear.
+ */
+ uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
+ ATC_VMID0_PASID_MAPPING__VALID_MASK;
+
+ WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
+
+ while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
+ cpu_relax();
+ WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
+
+ /* Mapping vmid to pasid also for IH block */
+ WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
+
+ return 0;
+}
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+ uint32_t hpd_size, uint64_t hpd_gpu_addr)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+ uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+ lock_srbm(kgd, mec, pipe, 0, 0);
+ WREG32(mmCP_HPD_EOP_BASE_ADDR, lower_32_bits(hpd_gpu_addr >> 8));
+ WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(hpd_gpu_addr >> 8));
+ WREG32(mmCP_HPD_EOP_VMID, 0);
+ WREG32(mmCP_HPD_EOP_CONTROL, hpd_size);
+ unlock_srbm(kgd);
+
+ return 0;
+}
+
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t mec;
+ uint32_t pipe;
+
+ mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
+ pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+ lock_srbm(kgd, mec, pipe, 0, 0);
+
+ WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
+ CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
+
+ unlock_srbm(kgd);
+
+ return 0;
+}
+
+static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+{
+ uint32_t retval;
+
+ retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
+ m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
+
+ pr_debug("kfd: sdma base address: 0x%x\n", retval);
+
+ return retval;
+}
+
+static inline struct cik_mqd *get_mqd(void *mqd)
+{
+ return (struct cik_mqd *)mqd;
+}
+
+static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+{
+ return (struct cik_sdma_rlc_registers *)mqd;
+}
+
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t __user *wptr)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t wptr_shadow, is_wptr_shadow_valid;
+ struct cik_mqd *m;
+
+ m = get_mqd(mqd);
+
+ is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
+
+ acquire_queue(kgd, pipe_id, queue_id);
+ WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
+ WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
+ WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control);
+
+ WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
+ WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
+ WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
+
+ WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
+ WREG32(mmCP_HQD_IB_BASE_ADDR, m->cp_hqd_ib_base_addr_lo);
+ WREG32(mmCP_HQD_IB_BASE_ADDR_HI, m->cp_hqd_ib_base_addr_hi);
+
+ WREG32(mmCP_HQD_IB_RPTR, m->cp_hqd_ib_rptr);
+
+ WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state);
+ WREG32(mmCP_HQD_SEMA_CMD, m->cp_hqd_sema_cmd);
+ WREG32(mmCP_HQD_MSG_TYPE, m->cp_hqd_msg_type);
+
+ WREG32(mmCP_HQD_ATOMIC0_PREOP_LO, m->cp_hqd_atomic0_preop_lo);
+ WREG32(mmCP_HQD_ATOMIC0_PREOP_HI, m->cp_hqd_atomic0_preop_hi);
+ WREG32(mmCP_HQD_ATOMIC1_PREOP_LO, m->cp_hqd_atomic1_preop_lo);
+ WREG32(mmCP_HQD_ATOMIC1_PREOP_HI, m->cp_hqd_atomic1_preop_hi);
+
+ WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo);
+ WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+ m->cp_hqd_pq_rptr_report_addr_hi);
+
+ WREG32(mmCP_HQD_PQ_RPTR, m->cp_hqd_pq_rptr);
+
+ WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, m->cp_hqd_pq_wptr_poll_addr_lo);
+ WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, m->cp_hqd_pq_wptr_poll_addr_hi);
+
+ WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control);
+
+ WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid);
+
+ WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum);
+
+ WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
+ WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
+
+ WREG32(mmCP_HQD_IQ_RPTR, m->cp_hqd_iq_rptr);
+
+ if (is_wptr_shadow_valid)
+ WREG32(mmCP_HQD_PQ_WPTR, wptr_shadow);
+
+ WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active);
+ release_queue(kgd);
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct cik_sdma_rlc_registers *m;
+ uint32_t sdma_base_addr;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ m->sdma_rlc_virtual_addr);
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
+ m->sdma_rlc_rb_base);
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ m->sdma_rlc_rb_base_hi);
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ m->sdma_rlc_rb_rptr_addr_lo);
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ m->sdma_rlc_rb_rptr_addr_hi);
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
+ m->sdma_rlc_doorbell);
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ m->sdma_rlc_rb_cntl);
+
+ return 0;
+}
+
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t act;
+ bool retval = false;
+ uint32_t low, high;
+
+ acquire_queue(kgd, pipe_id, queue_id);
+ act = RREG32(mmCP_HQD_ACTIVE);
+ if (act) {
+ low = lower_32_bits(queue_address >> 8);
+ high = upper_32_bits(queue_address >> 8);
+
+ if (low == RREG32(mmCP_HQD_PQ_BASE) &&
+ high == RREG32(mmCP_HQD_PQ_BASE_HI))
+ retval = true;
+ }
+ release_queue(kgd);
+ return retval;
+}
+
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct cik_sdma_rlc_registers *m;
+ uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_rb_cntl;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+
+ sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+
+ if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
+ return true;
+
+ return false;
+}
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t temp;
+
+ acquire_queue(kgd, pipe_id, queue_id);
+ WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
+
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
+
+ while (true) {
+ temp = RREG32(mmCP_HQD_ACTIVE);
+ if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
+ break;
+ if (timeout == 0) {
+ pr_err("kfd: cp queue preemption time out (%dms)\n",
+ temp);
+ release_queue(kgd);
+ return -ETIME;
+ }
+ msleep(20);
+ timeout -= 20;
+ }
+
+ release_queue(kgd);
+ return 0;
+}
+
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+ unsigned int timeout)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct cik_sdma_rlc_registers *m;
+ uint32_t sdma_base_addr;
+ uint32_t temp;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+
+ temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+
+ while (true) {
+ temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+ break;
+ if (timeout == 0)
+ return -ETIME;
+ msleep(20);
+ timeout -= 20;
+ }
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+
+ return 0;
+}
+
+static int kgd_address_watch_disable(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ union TCP_WATCH_CNTL_BITS cntl;
+ unsigned int i;
+
+ cntl.u32All = 0;
+
+ cntl.bitfields.valid = 0;
+ cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
+ cntl.bitfields.atc = 1;
+
+ /* Turning off this address until we set all the registers */
+ for (i = 0; i < MAX_WATCH_ADDRESSES; i++)
+ WREG32(watchRegs[i * ADDRESS_WATCH_REG_MAX +
+ ADDRESS_WATCH_REG_CNTL], cntl.u32All);
+
+ return 0;
+}
+
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+ unsigned int watch_point_id,
+ uint32_t cntl_val,
+ uint32_t addr_hi,
+ uint32_t addr_lo)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ union TCP_WATCH_CNTL_BITS cntl;
+
+ cntl.u32All = cntl_val;
+
+ /* Turning off this watch point until we set all the registers */
+ cntl.bitfields.valid = 0;
+ WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+ ADDRESS_WATCH_REG_CNTL], cntl.u32All);
+
+ WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+ ADDRESS_WATCH_REG_ADDR_HI], addr_hi);
+
+ WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+ ADDRESS_WATCH_REG_ADDR_LO], addr_lo);
+
+ /* Enable the watch point */
+ cntl.bitfields.valid = 1;
+
+ WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+ ADDRESS_WATCH_REG_CNTL], cntl.u32All);
+
+ return 0;
+}
+
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+ uint32_t gfx_index_val,
+ uint32_t sq_cmd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t data;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
+ WREG32(mmSQ_CMD, sq_cmd);
+
+ /* Restore the GRBM_GFX_INDEX register */
+
+ data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK |
+ GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
+ GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
+
+ WREG32(mmGRBM_GFX_INDEX, data);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+ unsigned int watch_point_id,
+ unsigned int reg_offset)
+{
+ return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
+}
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+ uint8_t vmid)
+{
+ uint32_t reg;
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+ reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+ uint8_t vmid)
+{
+ uint32_t reg;
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+ reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+ WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+}
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ const union amdgpu_firmware_header *hdr;
+
+ BUG_ON(kgd == NULL);
+
+ switch (type) {
+ case KGD_ENGINE_PFP:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.pfp_fw->data;
+ break;
+
+ case KGD_ENGINE_ME:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.me_fw->data;
+ break;
+
+ case KGD_ENGINE_CE:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.ce_fw->data;
+ break;
+
+ case KGD_ENGINE_MEC1:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.mec_fw->data;
+ break;
+
+ case KGD_ENGINE_MEC2:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.mec2_fw->data;
+ break;
+
+ case KGD_ENGINE_RLC:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.rlc_fw->data;
+ break;
+
+ case KGD_ENGINE_SDMA1:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->sdma[0].fw->data;
+ break;
+
+ case KGD_ENGINE_SDMA2:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->sdma[1].fw->data;
+ break;
+
+ default:
+ return 0;
+ }
+
+ if (hdr == NULL)
+ return 0;
+
+ /* Only 12 bit in use*/
+ return hdr->common.ucode_version;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
new file mode 100644
index 000000000000..dfd1d503bccf
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -0,0 +1,543 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/fdtable.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_ucode.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_enum.h"
+#include "oss/oss_3_0_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "vi_structs.h"
+#include "vid.h"
+
+#define VI_PIPE_PER_MEC (4)
+
+struct cik_sdma_rlc_registers;
+
+/*
+ * Register access functions
+ */
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+ uint32_t sh_mem_config,
+ uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
+ uint32_t sh_mem_bases);
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+ unsigned int vmid);
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+ uint32_t hpd_size, uint64_t hpd_gpu_addr);
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t __user *wptr);
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id);
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id);
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+ unsigned int timeout);
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
+static int kgd_address_watch_disable(struct kgd_dev *kgd);
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+ unsigned int watch_point_id,
+ uint32_t cntl_val,
+ uint32_t addr_hi,
+ uint32_t addr_lo);
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+ uint32_t gfx_index_val,
+ uint32_t sq_cmd);
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+ unsigned int watch_point_id,
+ unsigned int reg_offset);
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+ uint8_t vmid);
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+ uint8_t vmid);
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
+
+static const struct kfd2kgd_calls kfd2kgd = {
+ .init_gtt_mem_allocation = alloc_gtt_mem,
+ .free_gtt_mem = free_gtt_mem,
+ .get_vmem_size = get_vmem_size,
+ .get_gpu_clock_counter = get_gpu_clock_counter,
+ .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_pipeline = kgd_init_pipeline,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_address_watch_disable,
+ .address_watch_execute = kgd_address_watch_execute,
+ .wave_control_execute = kgd_wave_control_execute,
+ .address_watch_get_offset = kgd_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_pasid =
+ get_atc_vmid_pasid_mapping_pasid,
+ .get_atc_vmid_pasid_mapping_valid =
+ get_atc_vmid_pasid_mapping_valid,
+ .write_vmid_invalidate_request = write_vmid_invalidate_request,
+ .get_fw_version = get_fw_version
+};
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions()
+{
+ return (struct kfd2kgd_calls *)&kfd2kgd;
+}
+
+static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
+{
+ return (struct amdgpu_device *)kgd;
+}
+
+static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+ uint32_t queue, uint32_t vmid)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
+
+ mutex_lock(&adev->srbm_mutex);
+ WREG32(mmSRBM_GFX_CNTL, value);
+}
+
+static void unlock_srbm(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ WREG32(mmSRBM_GFX_CNTL, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
+static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ uint32_t mec = (++pipe_id / VI_PIPE_PER_MEC) + 1;
+ uint32_t pipe = (pipe_id % VI_PIPE_PER_MEC);
+
+ lock_srbm(kgd, mec, pipe, queue_id, 0);
+}
+
+static void release_queue(struct kgd_dev *kgd)
+{
+ unlock_srbm(kgd);
+}
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+ uint32_t sh_mem_config,
+ uint32_t sh_mem_ape1_base,
+ uint32_t sh_mem_ape1_limit,
+ uint32_t sh_mem_bases)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ lock_srbm(kgd, 0, 0, 0, vmid);
+
+ WREG32(mmSH_MEM_CONFIG, sh_mem_config);
+ WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
+ WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
+ WREG32(mmSH_MEM_BASES, sh_mem_bases);
+
+ unlock_srbm(kgd);
+}
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+ unsigned int vmid)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ /*
+ * We have to assume that there is no outstanding mapping.
+ * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
+ * a mapping is in progress or because a mapping finished
+ * and the SW cleared it.
+ * So the protocol is to always wait & clear.
+ */
+ uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
+ ATC_VMID0_PASID_MAPPING__VALID_MASK;
+
+ WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
+
+ while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
+ cpu_relax();
+ WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
+
+ /* Mapping vmid to pasid also for IH block */
+ WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
+
+ return 0;
+}
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+ uint32_t hpd_size, uint64_t hpd_gpu_addr)
+{
+ return 0;
+}
+
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t mec;
+ uint32_t pipe;
+
+ mec = (++pipe_id / VI_PIPE_PER_MEC) + 1;
+ pipe = (pipe_id % VI_PIPE_PER_MEC);
+
+ lock_srbm(kgd, mec, pipe, 0, 0);
+
+ WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK);
+
+ unlock_srbm(kgd);
+
+ return 0;
+}
+
+static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+{
+ return 0;
+}
+
+static inline struct vi_mqd *get_mqd(void *mqd)
+{
+ return (struct vi_mqd *)mqd;
+}
+
+static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+{
+ return (struct cik_sdma_rlc_registers *)mqd;
+}
+
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t __user *wptr)
+{
+ struct vi_mqd *m;
+ uint32_t shadow_wptr, valid_wptr;
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ m = get_mqd(mqd);
+
+ valid_wptr = copy_from_user(&shadow_wptr, wptr, sizeof(shadow_wptr));
+ acquire_queue(kgd, pipe_id, queue_id);
+
+ WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control);
+ WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
+ WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
+
+ WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid);
+ WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state);
+ WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
+ WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
+ WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum);
+ WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
+ WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
+ WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo);
+ WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+ m->cp_hqd_pq_rptr_report_addr_hi);
+
+ if (valid_wptr > 0)
+ WREG32(mmCP_HQD_PQ_WPTR, shadow_wptr);
+
+ WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
+ WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control);
+
+ WREG32(mmCP_HQD_EOP_BASE_ADDR, m->cp_hqd_eop_base_addr_lo);
+ WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, m->cp_hqd_eop_base_addr_hi);
+ WREG32(mmCP_HQD_EOP_CONTROL, m->cp_hqd_eop_control);
+ WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
+ WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
+ WREG32(mmCP_HQD_EOP_EVENTS, m->cp_hqd_eop_done_events);
+
+ WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO, m->cp_hqd_ctx_save_base_addr_lo);
+ WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI, m->cp_hqd_ctx_save_base_addr_hi);
+ WREG32(mmCP_HQD_CTX_SAVE_CONTROL, m->cp_hqd_ctx_save_control);
+ WREG32(mmCP_HQD_CNTL_STACK_OFFSET, m->cp_hqd_cntl_stack_offset);
+ WREG32(mmCP_HQD_CNTL_STACK_SIZE, m->cp_hqd_cntl_stack_size);
+ WREG32(mmCP_HQD_WG_STATE_OFFSET, m->cp_hqd_wg_state_offset);
+ WREG32(mmCP_HQD_CTX_SAVE_SIZE, m->cp_hqd_ctx_save_size);
+
+ WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
+
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, m->cp_hqd_dequeue_request);
+ WREG32(mmCP_HQD_ERROR, m->cp_hqd_error);
+ WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
+ WREG32(mmCP_HQD_EOP_DONES, m->cp_hqd_eop_dones);
+
+ WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active);
+
+ release_queue(kgd);
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+{
+ return 0;
+}
+
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t act;
+ bool retval = false;
+ uint32_t low, high;
+
+ acquire_queue(kgd, pipe_id, queue_id);
+ act = RREG32(mmCP_HQD_ACTIVE);
+ if (act) {
+ low = lower_32_bits(queue_address >> 8);
+ high = upper_32_bits(queue_address >> 8);
+
+ if (low == RREG32(mmCP_HQD_PQ_BASE) &&
+ high == RREG32(mmCP_HQD_PQ_BASE_HI))
+ retval = true;
+ }
+ release_queue(kgd);
+ return retval;
+}
+
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct cik_sdma_rlc_registers *m;
+ uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_rb_cntl;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+
+ sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+
+ if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
+ return true;
+
+ return false;
+}
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t temp;
+
+ acquire_queue(kgd, pipe_id, queue_id);
+
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
+
+ while (true) {
+ temp = RREG32(mmCP_HQD_ACTIVE);
+ if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
+ break;
+ if (timeout == 0) {
+ pr_err("kfd: cp queue preemption time out (%dms)\n",
+ temp);
+ release_queue(kgd);
+ return -ETIME;
+ }
+ msleep(20);
+ timeout -= 20;
+ }
+
+ release_queue(kgd);
+ return 0;
+}
+
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+ unsigned int timeout)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct cik_sdma_rlc_registers *m;
+ uint32_t sdma_base_addr;
+ uint32_t temp;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+
+ temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+
+ while (true) {
+ temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+ break;
+ if (timeout == 0)
+ return -ETIME;
+ msleep(20);
+ timeout -= 20;
+ }
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+
+ return 0;
+}
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+ uint8_t vmid)
+{
+ uint32_t reg;
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+ reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+ uint8_t vmid)
+{
+ uint32_t reg;
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+ reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+ WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+}
+
+static int kgd_address_watch_disable(struct kgd_dev *kgd)
+{
+ return 0;
+}
+
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+ unsigned int watch_point_id,
+ uint32_t cntl_val,
+ uint32_t addr_hi,
+ uint32_t addr_lo)
+{
+ return 0;
+}
+
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+ uint32_t gfx_index_val,
+ uint32_t sq_cmd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t data = 0;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
+ WREG32(mmSQ_CMD, sq_cmd);
+
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ INSTANCE_BROADCAST_WRITES, 1);
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ SH_BROADCAST_WRITES, 1);
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ SE_BROADCAST_WRITES, 1);
+
+ WREG32(mmGRBM_GFX_INDEX, data);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+ unsigned int watch_point_id,
+ unsigned int reg_offset)
+{
+ return 0;
+}
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ const union amdgpu_firmware_header *hdr;
+
+ BUG_ON(kgd == NULL);
+
+ switch (type) {
+ case KGD_ENGINE_PFP:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.pfp_fw->data;
+ break;
+
+ case KGD_ENGINE_ME:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.me_fw->data;
+ break;
+
+ case KGD_ENGINE_CE:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.ce_fw->data;
+ break;
+
+ case KGD_ENGINE_MEC1:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.mec_fw->data;
+ break;
+
+ case KGD_ENGINE_MEC2:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.mec2_fw->data;
+ break;
+
+ case KGD_ENGINE_RLC:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->gfx.rlc_fw->data;
+ break;
+
+ case KGD_ENGINE_SDMA1:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->sdma[0].fw->data;
+ break;
+
+ case KGD_ENGINE_SDMA2:
+ hdr = (const union amdgpu_firmware_header *)
+ adev->sdma[1].fw->data;
+ break;
+
+ default:
+ return 0;
+ }
+
+ if (hdr == NULL)
+ return 0;
+
+ /* Only 12 bit in use*/
+ return hdr->common.ucode_version;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 6a588371d54a..77f1d7c6ea3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -897,7 +897,7 @@ bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev,
if ((id == ASIC_INTERNAL_ENGINE_SS) ||
(id == ASIC_INTERNAL_MEMORY_SS))
ss->rate /= 100;
- if (adev->flags & AMDGPU_IS_APU)
+ if (adev->flags & AMD_IS_APU)
amdgpu_atombios_get_igp_ss_overrides(adev, ss, id);
return true;
}
@@ -1058,7 +1058,7 @@ void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
SET_MEMORY_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
- if (adev->flags & AMDGPU_IS_APU)
+ if (adev->flags & AMD_IS_APU)
return;
args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 2742b9a35cbc..98d59ee640ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
{
unsigned long start_jiffies;
unsigned long end_jiffies;
- struct amdgpu_fence *fence = NULL;
+ struct fence *fence = NULL;
int i, r;
start_jiffies = jiffies;
@@ -42,17 +42,17 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
if (r)
goto exit_do_move;
- r = amdgpu_fence_wait(fence, false);
+ r = fence_wait(fence, false);
if (r)
goto exit_do_move;
- amdgpu_fence_unref(&fence);
+ fence_put(fence);
}
end_jiffies = jiffies;
r = jiffies_to_msecs(end_jiffies - start_jiffies);
exit_do_move:
if (fence)
- amdgpu_fence_unref(&fence);
+ fence_put(fence);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index ceb444f6d418..02add0a508cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -48,7 +48,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
resource_size_t vram_base;
resource_size_t size = 256 * 1024; /* ??? */
- if (!(adev->flags & AMDGPU_IS_APU))
+ if (!(adev->flags & AMD_IS_APU))
if (!amdgpu_card_posted(adev))
return false;
@@ -184,7 +184,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
bool found = false;
/* ATRM is for the discrete card only */
- if (adev->flags & AMDGPU_IS_APU)
+ if (adev->flags & AMD_IS_APU)
return false;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
@@ -246,7 +246,7 @@ static inline bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev)
{
- if (adev->flags & AMDGPU_IS_APU)
+ if (adev->flags & AMD_IS_APU)
return igp_read_bios_from_vram(adev);
else
return amdgpu_asic_read_disabled_bios(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
new file mode 100644
index 000000000000..6b1243f9f86d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -0,0 +1,838 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <drm/drmP.h>
+#include <linux/firmware.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+#include "cgs_linux.h"
+#include "atom.h"
+#include "amdgpu_ucode.h"
+
+
+struct amdgpu_cgs_device {
+ struct cgs_device base;
+ struct amdgpu_device *adev;
+};
+
+#define CGS_FUNC_ADEV \
+ struct amdgpu_device *adev = \
+ ((struct amdgpu_cgs_device *)cgs_device)->adev
+
+static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
+ uint64_t *mc_start, uint64_t *mc_size,
+ uint64_t *mem_size)
+{
+ CGS_FUNC_ADEV;
+ switch(type) {
+ case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
+ case CGS_GPU_MEM_TYPE__VISIBLE_FB:
+ *mc_start = 0;
+ *mc_size = adev->mc.visible_vram_size;
+ *mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
+ break;
+ case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
+ case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
+ *mc_start = adev->mc.visible_vram_size;
+ *mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
+ *mem_size = *mc_size;
+ break;
+ case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
+ case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
+ *mc_start = adev->mc.gtt_start;
+ *mc_size = adev->mc.gtt_size;
+ *mem_size = adev->mc.gtt_size - adev->gart_pin_size;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
+ uint64_t size,
+ uint64_t min_offset, uint64_t max_offset,
+ cgs_handle_t *kmem_handle, uint64_t *mcaddr)
+{
+ CGS_FUNC_ADEV;
+ int ret;
+ struct amdgpu_bo *bo;
+ struct page *kmem_page = vmalloc_to_page(kmem);
+ int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
+
+ struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
+ ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
+ AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo);
+ if (ret)
+ return ret;
+ ret = amdgpu_bo_reserve(bo, false);
+ if (unlikely(ret != 0))
+ return ret;
+
+ /* pin buffer into GTT */
+ ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
+ min_offset, max_offset, mcaddr);
+ amdgpu_bo_unreserve(bo);
+
+ *kmem_handle = (cgs_handle_t)bo;
+ return ret;
+}
+
+static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
+{
+ struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
+
+ if (obj) {
+ int r = amdgpu_bo_reserve(obj, false);
+ if (likely(r == 0)) {
+ amdgpu_bo_unpin(obj);
+ amdgpu_bo_unreserve(obj);
+ }
+ amdgpu_bo_unref(&obj);
+
+ }
+ return 0;
+}
+
+static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
+ enum cgs_gpu_mem_type type,
+ uint64_t size, uint64_t align,
+ uint64_t min_offset, uint64_t max_offset,
+ cgs_handle_t *handle)
+{
+ CGS_FUNC_ADEV;
+ uint16_t flags = 0;
+ int ret = 0;
+ uint32_t domain = 0;
+ struct amdgpu_bo *obj;
+ struct ttm_placement placement;
+ struct ttm_place place;
+
+ if (min_offset > max_offset) {
+ BUG_ON(1);
+ return -EINVAL;
+ }
+
+ /* fail if the alignment is not a power of 2 */
+ if (((align != 1) && (align & (align - 1)))
+ || size == 0 || align == 0)
+ return -EINVAL;
+
+
+ switch(type) {
+ case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
+ case CGS_GPU_MEM_TYPE__VISIBLE_FB:
+ flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ domain = AMDGPU_GEM_DOMAIN_VRAM;
+ if (max_offset > adev->mc.real_vram_size)
+ return -EINVAL;
+ place.fpfn = min_offset >> PAGE_SHIFT;
+ place.lpfn = max_offset >> PAGE_SHIFT;
+ place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_VRAM;
+ break;
+ case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
+ case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
+ flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+ domain = AMDGPU_GEM_DOMAIN_VRAM;
+ if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
+ place.fpfn =
+ max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
+ place.lpfn =
+ min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
+ place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_VRAM;
+ }
+
+ break;
+ case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
+ domain = AMDGPU_GEM_DOMAIN_GTT;
+ place.fpfn = min_offset >> PAGE_SHIFT;
+ place.lpfn = max_offset >> PAGE_SHIFT;
+ place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+ break;
+ case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
+ flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ domain = AMDGPU_GEM_DOMAIN_GTT;
+ place.fpfn = min_offset >> PAGE_SHIFT;
+ place.lpfn = max_offset >> PAGE_SHIFT;
+ place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
+ TTM_PL_FLAG_UNCACHED;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+
+ *handle = 0;
+
+ placement.placement = &place;
+ placement.num_placement = 1;
+ placement.busy_placement = &place;
+ placement.num_busy_placement = 1;
+
+ ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
+ true, domain, flags,
+ NULL, &placement, &obj);
+ if (ret) {
+ DRM_ERROR("(%d) bo create failed\n", ret);
+ return ret;
+ }
+ *handle = (cgs_handle_t)obj;
+
+ return ret;
+}
+
+static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd,
+ cgs_handle_t *handle)
+{
+ CGS_FUNC_ADEV;
+ int r;
+ uint32_t dma_handle;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ struct drm_device *dev = adev->ddev;
+ struct drm_file *file_priv = NULL, *priv;
+
+ mutex_lock(&dev->struct_mutex);
+ list_for_each_entry(priv, &dev->filelist, lhead) {
+ rcu_read_lock();
+ if (priv->pid == get_pid(task_pid(current)))
+ file_priv = priv;
+ rcu_read_unlock();
+ if (file_priv)
+ break;
+ }
+ mutex_unlock(&dev->struct_mutex);
+ r = dev->driver->prime_fd_to_handle(dev,
+ file_priv, dmabuf_fd,
+ &dma_handle);
+ spin_lock(&file_priv->table_lock);
+
+ /* Check if we currently have a reference on the object */
+ obj = idr_find(&file_priv->object_idr, dma_handle);
+ if (obj == NULL) {
+ spin_unlock(&file_priv->table_lock);
+ return -EINVAL;
+ }
+ spin_unlock(&file_priv->table_lock);
+ bo = gem_to_amdgpu_bo(obj);
+ *handle = (cgs_handle_t)bo;
+ return 0;
+}
+
+static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
+{
+ struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
+
+ if (obj) {
+ int r = amdgpu_bo_reserve(obj, false);
+ if (likely(r == 0)) {
+ amdgpu_bo_kunmap(obj);
+ amdgpu_bo_unpin(obj);
+ amdgpu_bo_unreserve(obj);
+ }
+ amdgpu_bo_unref(&obj);
+
+ }
+ return 0;
+}
+
+static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
+ uint64_t *mcaddr)
+{
+ int r;
+ u64 min_offset, max_offset;
+ struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
+
+ WARN_ON_ONCE(obj->placement.num_placement > 1);
+
+ min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
+ max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
+
+ r = amdgpu_bo_reserve(obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
+ min_offset, max_offset, mcaddr);
+ amdgpu_bo_unreserve(obj);
+ return r;
+}
+
+static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
+{
+ int r;
+ struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
+ r = amdgpu_bo_reserve(obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = amdgpu_bo_unpin(obj);
+ amdgpu_bo_unreserve(obj);
+ return r;
+}
+
+static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
+ void **map)
+{
+ int r;
+ struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
+ r = amdgpu_bo_reserve(obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = amdgpu_bo_kmap(obj, map);
+ amdgpu_bo_unreserve(obj);
+ return r;
+}
+
+static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
+{
+ int r;
+ struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
+ r = amdgpu_bo_reserve(obj, false);
+ if (unlikely(r != 0))
+ return r;
+ amdgpu_bo_kunmap(obj);
+ amdgpu_bo_unreserve(obj);
+ return r;
+}
+
+static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset)
+{
+ CGS_FUNC_ADEV;
+ return RREG32(offset);
+}
+
+static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset,
+ uint32_t value)
+{
+ CGS_FUNC_ADEV;
+ WREG32(offset, value);
+}
+
+static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
+ enum cgs_ind_reg space,
+ unsigned index)
+{
+ CGS_FUNC_ADEV;
+ switch (space) {
+ case CGS_IND_REG__MMIO:
+ return RREG32_IDX(index);
+ case CGS_IND_REG__PCIE:
+ return RREG32_PCIE(index);
+ case CGS_IND_REG__SMC:
+ return RREG32_SMC(index);
+ case CGS_IND_REG__UVD_CTX:
+ return RREG32_UVD_CTX(index);
+ case CGS_IND_REG__DIDT:
+ return RREG32_DIDT(index);
+ case CGS_IND_REG__AUDIO_ENDPT:
+ DRM_ERROR("audio endpt register access not implemented.\n");
+ return 0;
+ }
+ WARN(1, "Invalid indirect register space");
+ return 0;
+}
+
+static void amdgpu_cgs_write_ind_register(void *cgs_device,
+ enum cgs_ind_reg space,
+ unsigned index, uint32_t value)
+{
+ CGS_FUNC_ADEV;
+ switch (space) {
+ case CGS_IND_REG__MMIO:
+ return WREG32_IDX(index, value);
+ case CGS_IND_REG__PCIE:
+ return WREG32_PCIE(index, value);
+ case CGS_IND_REG__SMC:
+ return WREG32_SMC(index, value);
+ case CGS_IND_REG__UVD_CTX:
+ return WREG32_UVD_CTX(index, value);
+ case CGS_IND_REG__DIDT:
+ return WREG32_DIDT(index, value);
+ case CGS_IND_REG__AUDIO_ENDPT:
+ DRM_ERROR("audio endpt register access not implemented.\n");
+ return;
+ }
+ WARN(1, "Invalid indirect register space");
+}
+
+static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
+{
+ CGS_FUNC_ADEV;
+ uint8_t val;
+ int ret = pci_read_config_byte(adev->pdev, addr, &val);
+ if (WARN(ret, "pci_read_config_byte error"))
+ return 0;
+ return val;
+}
+
+static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
+{
+ CGS_FUNC_ADEV;
+ uint16_t val;
+ int ret = pci_read_config_word(adev->pdev, addr, &val);
+ if (WARN(ret, "pci_read_config_word error"))
+ return 0;
+ return val;
+}
+
+static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
+ unsigned addr)
+{
+ CGS_FUNC_ADEV;
+ uint32_t val;
+ int ret = pci_read_config_dword(adev->pdev, addr, &val);
+ if (WARN(ret, "pci_read_config_dword error"))
+ return 0;
+ return val;
+}
+
+static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
+ uint8_t value)
+{
+ CGS_FUNC_ADEV;
+ int ret = pci_write_config_byte(adev->pdev, addr, value);
+ WARN(ret, "pci_write_config_byte error");
+}
+
+static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
+ uint16_t value)
+{
+ CGS_FUNC_ADEV;
+ int ret = pci_write_config_word(adev->pdev, addr, value);
+ WARN(ret, "pci_write_config_word error");
+}
+
+static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
+ uint32_t value)
+{
+ CGS_FUNC_ADEV;
+ int ret = pci_write_config_dword(adev->pdev, addr, value);
+ WARN(ret, "pci_write_config_dword error");
+}
+
+static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
+ unsigned table, uint16_t *size,
+ uint8_t *frev, uint8_t *crev)
+{
+ CGS_FUNC_ADEV;
+ uint16_t data_start;
+
+ if (amdgpu_atom_parse_data_header(
+ adev->mode_info.atom_context, table, size,
+ frev, crev, &data_start))
+ return (uint8_t*)adev->mode_info.atom_context->bios +
+ data_start;
+
+ return NULL;
+}
+
+static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
+ uint8_t *frev, uint8_t *crev)
+{
+ CGS_FUNC_ADEV;
+
+ if (amdgpu_atom_parse_cmd_header(
+ adev->mode_info.atom_context, table,
+ frev, crev))
+ return 0;
+
+ return -EINVAL;
+}
+
+static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
+ void *args)
+{
+ CGS_FUNC_ADEV;
+
+ return amdgpu_atom_execute_table(
+ adev->mode_info.atom_context, table, args);
+}
+
+static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request)
+{
+ /* TODO */
+ return 0;
+}
+
+static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request)
+{
+ /* TODO */
+ return 0;
+}
+
+static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request,
+ int active)
+{
+ /* TODO */
+ return 0;
+}
+
+static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request,
+ enum cgs_clock clock, unsigned freq)
+{
+ /* TODO */
+ return 0;
+}
+
+static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
+ enum cgs_engine engine, int powered)
+{
+ /* TODO */
+ return 0;
+}
+
+
+
+static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
+ enum cgs_clock clock,
+ struct cgs_clock_limits *limits)
+{
+ /* TODO */
+ return 0;
+}
+
+static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask,
+ const uint32_t *voltages)
+{
+ DRM_ERROR("not implemented");
+ return -EPERM;
+}
+
+struct cgs_irq_params {
+ unsigned src_id;
+ cgs_irq_source_set_func_t set;
+ cgs_irq_handler_func_t handler;
+ void *private_data;
+};
+
+static int cgs_set_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ struct cgs_irq_params *irq_params =
+ (struct cgs_irq_params *)src->data;
+ if (!irq_params)
+ return -EINVAL;
+ if (!irq_params->set)
+ return -EINVAL;
+ return irq_params->set(irq_params->private_data,
+ irq_params->src_id,
+ type,
+ (int)state);
+}
+
+static int cgs_process_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct cgs_irq_params *irq_params =
+ (struct cgs_irq_params *)source->data;
+ if (!irq_params)
+ return -EINVAL;
+ if (!irq_params->handler)
+ return -EINVAL;
+ return irq_params->handler(irq_params->private_data,
+ irq_params->src_id,
+ entry->iv_entry);
+}
+
+static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
+ .set = cgs_set_irq_state,
+ .process = cgs_process_irq,
+};
+
+static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
+ unsigned num_types,
+ cgs_irq_source_set_func_t set,
+ cgs_irq_handler_func_t handler,
+ void *private_data)
+{
+ CGS_FUNC_ADEV;
+ int ret = 0;
+ struct cgs_irq_params *irq_params;
+ struct amdgpu_irq_src *source =
+ kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
+ if (!source)
+ return -ENOMEM;
+ irq_params =
+ kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
+ if (!irq_params) {
+ kfree(source);
+ return -ENOMEM;
+ }
+ source->num_types = num_types;
+ source->funcs = &cgs_irq_funcs;
+ irq_params->src_id = src_id;
+ irq_params->set = set;
+ irq_params->handler = handler;
+ irq_params->private_data = private_data;
+ source->data = (void *)irq_params;
+ ret = amdgpu_irq_add_id(adev, src_id, source);
+ if (ret) {
+ kfree(irq_params);
+ kfree(source);
+ }
+
+ return ret;
+}
+
+static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type)
+{
+ CGS_FUNC_ADEV;
+ return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
+}
+
+static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type)
+{
+ CGS_FUNC_ADEV;
+ return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
+}
+
+int amdgpu_cgs_set_clockgating_state(void *cgs_device,
+ enum amd_ip_block_type block_type,
+ enum amd_clockgating_state state)
+{
+ CGS_FUNC_ADEV;
+ int i, r = -1;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+
+ if (adev->ip_blocks[i].type == block_type) {
+ r = adev->ip_blocks[i].funcs->set_clockgating_state(
+ (void *)adev,
+ state);
+ break;
+ }
+ }
+ return r;
+}
+
+int amdgpu_cgs_set_powergating_state(void *cgs_device,
+ enum amd_ip_block_type block_type,
+ enum amd_powergating_state state)
+{
+ CGS_FUNC_ADEV;
+ int i, r = -1;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+
+ if (adev->ip_blocks[i].type == block_type) {
+ r = adev->ip_blocks[i].funcs->set_powergating_state(
+ (void *)adev,
+ state);
+ break;
+ }
+ }
+ return r;
+}
+
+
+static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
+{
+ CGS_FUNC_ADEV;
+ enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
+
+ switch (fw_type) {
+ case CGS_UCODE_ID_SDMA0:
+ result = AMDGPU_UCODE_ID_SDMA0;
+ break;
+ case CGS_UCODE_ID_SDMA1:
+ result = AMDGPU_UCODE_ID_SDMA1;
+ break;
+ case CGS_UCODE_ID_CP_CE:
+ result = AMDGPU_UCODE_ID_CP_CE;
+ break;
+ case CGS_UCODE_ID_CP_PFP:
+ result = AMDGPU_UCODE_ID_CP_PFP;
+ break;
+ case CGS_UCODE_ID_CP_ME:
+ result = AMDGPU_UCODE_ID_CP_ME;
+ break;
+ case CGS_UCODE_ID_CP_MEC:
+ case CGS_UCODE_ID_CP_MEC_JT1:
+ result = AMDGPU_UCODE_ID_CP_MEC1;
+ break;
+ case CGS_UCODE_ID_CP_MEC_JT2:
+ if (adev->asic_type == CHIP_TONGA)
+ result = AMDGPU_UCODE_ID_CP_MEC2;
+ else if (adev->asic_type == CHIP_CARRIZO)
+ result = AMDGPU_UCODE_ID_CP_MEC1;
+ break;
+ case CGS_UCODE_ID_RLC_G:
+ result = AMDGPU_UCODE_ID_RLC_G;
+ break;
+ default:
+ DRM_ERROR("Firmware type not supported\n");
+ }
+ return result;
+}
+
+static int amdgpu_cgs_get_firmware_info(void *cgs_device,
+ enum cgs_ucode_id type,
+ struct cgs_firmware_info *info)
+{
+ CGS_FUNC_ADEV;
+
+ if (CGS_UCODE_ID_SMU != type) {
+ uint64_t gpu_addr;
+ uint32_t data_size;
+ const struct gfx_firmware_header_v1_0 *header;
+ enum AMDGPU_UCODE_ID id;
+ struct amdgpu_firmware_info *ucode;
+
+ id = fw_type_convert(cgs_device, type);
+ ucode = &adev->firmware.ucode[id];
+ if (ucode->fw == NULL)
+ return -EINVAL;
+
+ gpu_addr = ucode->mc_addr;
+ header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+ data_size = le32_to_cpu(header->header.ucode_size_bytes);
+
+ if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
+ (type == CGS_UCODE_ID_CP_MEC_JT2)) {
+ gpu_addr += le32_to_cpu(header->jt_offset) << 2;
+ data_size = le32_to_cpu(header->jt_size) << 2;
+ }
+ info->mc_addr = gpu_addr;
+ info->image_size = data_size;
+ info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+ info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
+ } else {
+ char fw_name[30] = {0};
+ int err = 0;
+ uint32_t ucode_size;
+ uint32_t ucode_start_address;
+ const uint8_t *src;
+ const struct smc_firmware_header_v1_0 *hdr;
+
+ switch (adev->asic_type) {
+ case CHIP_TONGA:
+ strcpy(fw_name, "amdgpu/tonga_smc.bin");
+ break;
+ default:
+ DRM_ERROR("SMC firmware not supported\n");
+ return -EINVAL;
+ }
+
+ err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+ if (err) {
+ DRM_ERROR("Failed to request firmware\n");
+ return err;
+ }
+
+ err = amdgpu_ucode_validate(adev->pm.fw);
+ if (err) {
+ DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+ return err;
+ }
+
+ hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+ ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+ src = (const uint8_t *)(adev->pm.fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+ info->version = adev->pm.fw_version;
+ info->image_size = ucode_size;
+ info->kptr = (void *)src;
+ }
+ return 0;
+}
+
+static const struct cgs_ops amdgpu_cgs_ops = {
+ amdgpu_cgs_gpu_mem_info,
+ amdgpu_cgs_gmap_kmem,
+ amdgpu_cgs_gunmap_kmem,
+ amdgpu_cgs_alloc_gpu_mem,
+ amdgpu_cgs_free_gpu_mem,
+ amdgpu_cgs_gmap_gpu_mem,
+ amdgpu_cgs_gunmap_gpu_mem,
+ amdgpu_cgs_kmap_gpu_mem,
+ amdgpu_cgs_kunmap_gpu_mem,
+ amdgpu_cgs_read_register,
+ amdgpu_cgs_write_register,
+ amdgpu_cgs_read_ind_register,
+ amdgpu_cgs_write_ind_register,
+ amdgpu_cgs_read_pci_config_byte,
+ amdgpu_cgs_read_pci_config_word,
+ amdgpu_cgs_read_pci_config_dword,
+ amdgpu_cgs_write_pci_config_byte,
+ amdgpu_cgs_write_pci_config_word,
+ amdgpu_cgs_write_pci_config_dword,
+ amdgpu_cgs_atom_get_data_table,
+ amdgpu_cgs_atom_get_cmd_table_revs,
+ amdgpu_cgs_atom_exec_cmd_table,
+ amdgpu_cgs_create_pm_request,
+ amdgpu_cgs_destroy_pm_request,
+ amdgpu_cgs_set_pm_request,
+ amdgpu_cgs_pm_request_clock,
+ amdgpu_cgs_pm_request_engine,
+ amdgpu_cgs_pm_query_clock_limits,
+ amdgpu_cgs_set_camera_voltages,
+ amdgpu_cgs_get_firmware_info,
+ amdgpu_cgs_set_powergating_state,
+ amdgpu_cgs_set_clockgating_state
+};
+
+static const struct cgs_os_ops amdgpu_cgs_os_ops = {
+ amdgpu_cgs_import_gpu_mem,
+ amdgpu_cgs_add_irq_source,
+ amdgpu_cgs_irq_get,
+ amdgpu_cgs_irq_put
+};
+
+void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
+{
+ struct amdgpu_cgs_device *cgs_device =
+ kmalloc(sizeof(*cgs_device), GFP_KERNEL);
+
+ if (!cgs_device) {
+ DRM_ERROR("Couldn't allocate CGS device structure\n");
+ return NULL;
+ }
+
+ cgs_device->base.ops = &amdgpu_cgs_ops;
+ cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
+ cgs_device->adev = adev;
+
+ return cgs_device;
+}
+
+void amdgpu_cgs_destroy_device(void *cgs_device)
+{
+ kfree(cgs_device);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 27df17a0e620..89c3dd62ba21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -75,6 +75,11 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
} else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
+ /* Don't try to start link training before we
+ * have the dpcd */
+ if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
+ return;
+
/* set it to OFF so that drm_helper_connector_dpms()
* won't return immediately since the current state
* is ON at this point.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d63135bf29c0..3b355aeb62fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -126,6 +126,30 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
return 0;
}
+struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ struct amdgpu_ctx *ctx,
+ struct amdgpu_ib *ibs,
+ uint32_t num_ibs)
+{
+ struct amdgpu_cs_parser *parser;
+ int i;
+
+ parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL);
+ if (!parser)
+ return NULL;
+
+ parser->adev = adev;
+ parser->filp = filp;
+ parser->ctx = ctx;
+ parser->ibs = ibs;
+ parser->num_ibs = num_ibs;
+ for (i = 0; i < num_ibs; i++)
+ ibs[i].ctx = ctx;
+
+ return parser;
+}
+
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{
union drm_amdgpu_cs *cs = data;
@@ -147,13 +171,13 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
/* get chunks */
INIT_LIST_HEAD(&p->validated);
- chunk_array = kcalloc(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
+ chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (chunk_array == NULL) {
r = -ENOMEM;
goto out;
}
- chunk_array_user = (uint64_t *)(unsigned long)(cs->in.chunks);
+ chunk_array_user = (uint64_t __user *)(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) {
r = -EFAULT;
@@ -161,7 +185,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
}
p->nchunks = cs->in.num_chunks;
- p->chunks = kcalloc(p->nchunks, sizeof(struct amdgpu_cs_chunk),
+ p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
GFP_KERNEL);
if (p->chunks == NULL) {
r = -ENOMEM;
@@ -173,7 +197,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
struct drm_amdgpu_cs_chunk user_chunk;
uint32_t __user *cdata;
- chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
+ chunk_ptr = (void __user *)chunk_array[i];
if (copy_from_user(&user_chunk, chunk_ptr,
sizeof(struct drm_amdgpu_cs_chunk))) {
r = -EFAULT;
@@ -183,7 +207,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
p->chunks[i].length_dw = user_chunk.length_dw;
size = p->chunks[i].length_dw;
- cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
+ cdata = (void __user *)user_chunk.chunk_data;
p->chunks[i].user_ptr = cdata;
p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
@@ -235,11 +259,10 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
}
}
+
p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
- if (!p->ibs) {
+ if (!p->ibs)
r = -ENOMEM;
- goto out;
- }
out:
kfree(chunk_array);
@@ -331,7 +354,7 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
* into account. We don't want to disallow buffer moves
* completely.
*/
- if (current_domain != AMDGPU_GEM_DOMAIN_CPU &&
+ if ((lobj->allowed_domains & current_domain) != 0 &&
(domain & current_domain) == 0 && /* will be moved */
bytes_moved > bytes_moved_threshold) {
/* don't move it */
@@ -415,18 +438,8 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
}
-/**
- * cs_parser_fini() - clean parser states
- * @parser: parser structure holding parsing context.
- * @error: error number
- *
- * If error is set than unvalidate buffer, otherwise just free memory
- * used by parsing context.
- **/
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
+static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff)
{
- unsigned i;
-
if (!error) {
/* Sort the buffer list from the smallest to largest buffer,
* which affects the order of buffers in the LRU list.
@@ -447,21 +460,45 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
}
+}
+static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
+{
+ unsigned i;
if (parser->ctx)
amdgpu_ctx_put(parser->ctx);
if (parser->bo_list)
amdgpu_bo_list_put(parser->bo_list);
+
drm_free_large(parser->vm_bos);
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
- if (parser->ibs)
- for (i = 0; i < parser->num_ibs; i++)
- amdgpu_ib_free(parser->adev, &parser->ibs[i]);
- kfree(parser->ibs);
- if (parser->uf.bo)
- drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
+ if (!amdgpu_enable_scheduler)
+ {
+ if (parser->ibs)
+ for (i = 0; i < parser->num_ibs; i++)
+ amdgpu_ib_free(parser->adev, &parser->ibs[i]);
+ kfree(parser->ibs);
+ if (parser->uf.bo)
+ drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
+ }
+
+ kfree(parser);
+}
+
+/**
+ * cs_parser_fini() - clean parser states
+ * @parser: parser structure holding parsing context.
+ * @error: error number
+ *
+ * If error is set than unvalidate buffer, otherwise just free memory
+ * used by parsing context.
+ **/
+static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
+{
+ amdgpu_cs_parser_fini_early(parser, error, backoff);
+ amdgpu_cs_parser_fini_late(parser);
}
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@@ -476,12 +513,18 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (r)
return r;
+ r = amdgpu_sync_fence(adev, &p->ibs[0].sync, vm->page_directory_fence);
+ if (r)
+ return r;
+
r = amdgpu_vm_clear_freed(adev, vm);
if (r)
return r;
if (p->bo_list) {
for (i = 0; i < p->bo_list->num_entries; i++) {
+ struct fence *f;
+
/* ignore duplicates */
bo = p->bo_list->array[i].robj;
if (!bo)
@@ -495,7 +538,10 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (r)
return r;
- amdgpu_sync_fence(&p->ibs[0].sync, bo_va->last_pt_update);
+ f = bo_va->last_pt_update;
+ r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
+ if (r)
+ return r;
}
}
@@ -529,9 +575,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
goto out;
}
amdgpu_cs_sync_rings(parser);
-
- r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
- parser->filp);
+ if (!amdgpu_enable_scheduler)
+ r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
+ parser->filp);
out:
mutex_unlock(&vm->mutex);
@@ -650,7 +696,6 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
ib->oa_size = amdgpu_bo_size(oa);
}
}
-
/* wrap the last IB with user fence */
if (parser->uf.bo) {
struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1];
@@ -669,6 +714,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
struct amdgpu_cs_parser *p)
{
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_ib *ib;
int i, j, r;
@@ -692,8 +738,9 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
sizeof(struct drm_amdgpu_cs_chunk_dep);
for (j = 0; j < num_deps; ++j) {
- struct amdgpu_fence *fence;
struct amdgpu_ring *ring;
+ struct amdgpu_ctx *ctx;
+ struct fence *fence;
r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
deps[j].ip_instance,
@@ -701,82 +748,141 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
if (r)
return r;
- r = amdgpu_fence_recreate(ring, p->filp,
- deps[j].handle,
- &fence);
- if (r)
+ ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
+ if (ctx == NULL)
+ return -EINVAL;
+
+ fence = amdgpu_ctx_get_fence(ctx, ring,
+ deps[j].handle);
+ if (IS_ERR(fence)) {
+ r = PTR_ERR(fence);
+ amdgpu_ctx_put(ctx);
return r;
- amdgpu_sync_fence(&ib->sync, fence);
- amdgpu_fence_unref(&fence);
+ } else if (fence) {
+ r = amdgpu_sync_fence(adev, &ib->sync, fence);
+ fence_put(fence);
+ amdgpu_ctx_put(ctx);
+ if (r)
+ return r;
+ }
}
}
return 0;
}
+static int amdgpu_cs_free_job(struct amdgpu_job *sched_job)
+{
+ int i;
+ if (sched_job->ibs)
+ for (i = 0; i < sched_job->num_ibs; i++)
+ amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
+ kfree(sched_job->ibs);
+ if (sched_job->uf.bo)
+ drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base);
+ return 0;
+}
+
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
union drm_amdgpu_cs *cs = data;
- struct amdgpu_cs_parser parser;
- int r, i;
+ struct amdgpu_cs_parser *parser;
bool reserved_buffers = false;
+ int i, r;
down_read(&adev->exclusive_lock);
if (!adev->accel_working) {
up_read(&adev->exclusive_lock);
return -EBUSY;
}
- /* initialize parser */
- memset(&parser, 0, sizeof(struct amdgpu_cs_parser));
- parser.filp = filp;
- parser.adev = adev;
- r = amdgpu_cs_parser_init(&parser, data);
+
+ parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0);
+ if (!parser)
+ return -ENOMEM;
+ r = amdgpu_cs_parser_init(parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
- amdgpu_cs_parser_fini(&parser, r, false);
+ amdgpu_cs_parser_fini(parser, r, false);
up_read(&adev->exclusive_lock);
r = amdgpu_cs_handle_lockup(adev, r);
return r;
}
- r = amdgpu_cs_parser_relocs(&parser);
- if (r) {
- if (r != -ERESTARTSYS) {
- if (r == -ENOMEM)
- DRM_ERROR("Not enough memory for command submission!\n");
- else
- DRM_ERROR("Failed to process the buffer list %d!\n", r);
- }
+ r = amdgpu_cs_parser_relocs(parser);
+ if (r == -ENOMEM)
+ DRM_ERROR("Not enough memory for command submission!\n");
+ else if (r && r != -ERESTARTSYS)
+ DRM_ERROR("Failed to process the buffer list %d!\n", r);
+ else if (!r) {
+ reserved_buffers = true;
+ r = amdgpu_cs_ib_fill(adev, parser);
}
if (!r) {
- reserved_buffers = true;
- r = amdgpu_cs_ib_fill(adev, &parser);
+ r = amdgpu_cs_dependencies(adev, parser);
+ if (r)
+ DRM_ERROR("Failed in the dependencies handling %d!\n", r);
}
- if (!r)
- r = amdgpu_cs_dependencies(adev, &parser);
-
- if (r) {
- amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
- up_read(&adev->exclusive_lock);
- r = amdgpu_cs_handle_lockup(adev, r);
- return r;
- }
+ if (r)
+ goto out;
- for (i = 0; i < parser.num_ibs; i++)
- trace_amdgpu_cs(&parser, i);
+ for (i = 0; i < parser->num_ibs; i++)
+ trace_amdgpu_cs(parser, i);
- r = amdgpu_cs_ib_vm_chunk(adev, &parser);
- if (r) {
+ r = amdgpu_cs_ib_vm_chunk(adev, parser);
+ if (r)
goto out;
+
+ if (amdgpu_enable_scheduler && parser->num_ibs) {
+ struct amdgpu_job *job;
+ struct amdgpu_ring * ring = parser->ibs->ring;
+ job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
+ job->base.sched = ring->scheduler;
+ job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
+ job->adev = parser->adev;
+ job->ibs = parser->ibs;
+ job->num_ibs = parser->num_ibs;
+ job->base.owner = parser->filp;
+ mutex_init(&job->job_lock);
+ if (job->ibs[job->num_ibs - 1].user) {
+ memcpy(&job->uf, &parser->uf,
+ sizeof(struct amdgpu_user_fence));
+ job->ibs[job->num_ibs - 1].user = &job->uf;
+ }
+
+ job->free_job = amdgpu_cs_free_job;
+ mutex_lock(&job->job_lock);
+ r = amd_sched_entity_push_job((struct amd_sched_job *)job);
+ if (r) {
+ mutex_unlock(&job->job_lock);
+ amdgpu_cs_free_job(job);
+ kfree(job);
+ goto out;
+ }
+ cs->out.handle =
+ amdgpu_ctx_add_fence(parser->ctx, ring,
+ &job->base.s_fence->base);
+ parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle;
+
+ list_sort(NULL, &parser->validated, cmp_size_smaller_first);
+ ttm_eu_fence_buffer_objects(&parser->ticket,
+ &parser->validated,
+ &job->base.s_fence->base);
+
+ mutex_unlock(&job->job_lock);
+ amdgpu_cs_parser_fini_late(parser);
+ up_read(&adev->exclusive_lock);
+ return 0;
}
- cs->out.handle = parser.ibs[parser.num_ibs - 1].fence->seq;
+ cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
out:
- amdgpu_cs_parser_fini(&parser, r, true);
+ amdgpu_cs_parser_fini(parser, r, reserved_buffers);
up_read(&adev->exclusive_lock);
r = amdgpu_cs_handle_lockup(adev, r);
return r;
@@ -797,26 +903,29 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
union drm_amdgpu_wait_cs *wait = data;
struct amdgpu_device *adev = dev->dev_private;
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
- struct amdgpu_fence *fence = NULL;
struct amdgpu_ring *ring = NULL;
struct amdgpu_ctx *ctx;
+ struct fence *fence;
long r;
- ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
- if (ctx == NULL)
- return -EINVAL;
-
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
wait->in.ring, &ring);
if (r)
return r;
- r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence);
- if (r)
- return r;
+ ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
+ if (ctx == NULL)
+ return -EINVAL;
+
+ fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
+ if (IS_ERR(fence))
+ r = PTR_ERR(fence);
+ else if (fence) {
+ r = fence_wait_timeout(fence, true, timeout);
+ fence_put(fence);
+ } else
+ r = 1;
- r = fence_wait_timeout(&fence->base, true, timeout);
- amdgpu_fence_unref(&fence);
amdgpu_ctx_put(ctx);
if (r < 0)
return r;
@@ -851,7 +960,16 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
if (!reloc->bo_va)
continue;
- list_for_each_entry(mapping, &reloc->bo_va->mappings, list) {
+ list_for_each_entry(mapping, &reloc->bo_va->valids, list) {
+ if (mapping->it.start > addr ||
+ addr > mapping->it.last)
+ continue;
+
+ *bo = reloc->bo_va->bo;
+ return mapping;
+ }
+
+ list_for_each_entry(mapping, &reloc->bo_va->invalids, list) {
if (mapping->it.start > addr ||
addr > mapping->it.last)
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 6c66ac8a1891..20cbc4eb5a6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -25,54 +25,107 @@
#include <drm/drmP.h>
#include "amdgpu.h"
-static void amdgpu_ctx_do_release(struct kref *ref)
+int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
+ struct amdgpu_ctx *ctx)
{
- struct amdgpu_ctx *ctx;
- struct amdgpu_ctx_mgr *mgr;
+ unsigned i, j;
+ int r;
- ctx = container_of(ref, struct amdgpu_ctx, refcount);
- mgr = &ctx->fpriv->ctx_mgr;
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->adev = adev;
+ kref_init(&ctx->refcount);
+ spin_lock_init(&ctx->ring_lock);
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+ ctx->rings[i].sequence = 1;
- idr_remove(&mgr->ctx_handles, ctx->id);
- kfree(ctx);
+ if (amdgpu_enable_scheduler) {
+ /* create context entity for each ring */
+ for (i = 0; i < adev->num_rings; i++) {
+ struct amd_sched_rq *rq;
+ if (kernel)
+ rq = &adev->rings[i]->scheduler->kernel_rq;
+ else
+ rq = &adev->rings[i]->scheduler->sched_rq;
+ r = amd_sched_entity_init(adev->rings[i]->scheduler,
+ &ctx->rings[i].entity,
+ rq, amdgpu_sched_jobs);
+ if (r)
+ break;
+ }
+
+ if (i < adev->num_rings) {
+ for (j = 0; j < i; j++)
+ amd_sched_entity_fini(adev->rings[j]->scheduler,
+ &ctx->rings[j].entity);
+ kfree(ctx);
+ return r;
+ }
+ }
+ return 0;
}
-int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t *id, uint32_t flags)
+void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
+{
+ struct amdgpu_device *adev = ctx->adev;
+ unsigned i, j;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+ for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
+ fence_put(ctx->rings[i].fences[j]);
+
+ if (amdgpu_enable_scheduler) {
+ for (i = 0; i < adev->num_rings; i++)
+ amd_sched_entity_fini(adev->rings[i]->scheduler,
+ &ctx->rings[i].entity);
+ }
+}
+
+static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
+ struct amdgpu_fpriv *fpriv,
+ uint32_t *id)
{
- int r;
- struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+ struct amdgpu_ctx *ctx;
+ int r;
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
mutex_lock(&mgr->lock);
- r = idr_alloc(&mgr->ctx_handles, ctx, 0, 0, GFP_KERNEL);
+ r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
if (r < 0) {
mutex_unlock(&mgr->lock);
kfree(ctx);
return r;
}
*id = (uint32_t)r;
-
- memset(ctx, 0, sizeof(*ctx));
- ctx->id = *id;
- ctx->fpriv = fpriv;
- kref_init(&ctx->refcount);
+ r = amdgpu_ctx_init(adev, false, ctx);
mutex_unlock(&mgr->lock);
- return 0;
+ return r;
}
-int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id)
+static void amdgpu_ctx_do_release(struct kref *ref)
{
struct amdgpu_ctx *ctx;
+
+ ctx = container_of(ref, struct amdgpu_ctx, refcount);
+
+ amdgpu_ctx_fini(ctx);
+
+ kfree(ctx);
+}
+
+static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
+{
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+ struct amdgpu_ctx *ctx;
mutex_lock(&mgr->lock);
ctx = idr_find(&mgr->ctx_handles, id);
if (ctx) {
+ idr_remove(&mgr->ctx_handles, id);
kref_put(&ctx->refcount, amdgpu_ctx_do_release);
mutex_unlock(&mgr->lock);
return 0;
@@ -86,9 +139,13 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
union drm_amdgpu_ctx_out *out)
{
struct amdgpu_ctx *ctx;
- struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+ struct amdgpu_ctx_mgr *mgr;
unsigned reset_counter;
+ if (!fpriv)
+ return -EINVAL;
+
+ mgr = &fpriv->ctx_mgr;
mutex_lock(&mgr->lock);
ctx = idr_find(&mgr->ctx_handles, id);
if (!ctx) {
@@ -97,8 +154,8 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
}
/* TODO: these two are always zero */
- out->state.flags = ctx->state.flags;
- out->state.hangs = ctx->state.hangs;
+ out->state.flags = 0x0;
+ out->state.hangs = 0x0;
/* determine if a GPU reset has occured since the last call */
reset_counter = atomic_read(&adev->gpu_reset_counter);
@@ -113,28 +170,11 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
return 0;
}
-void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv)
-{
- struct idr *idp;
- struct amdgpu_ctx *ctx;
- uint32_t id;
- struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
- idp = &mgr->ctx_handles;
-
- idr_for_each_entry(idp,ctx,id) {
- if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
- DRM_ERROR("ctx (id=%ul) is still alive\n",ctx->id);
- }
-
- mutex_destroy(&mgr->lock);
-}
-
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
int r;
uint32_t id;
- uint32_t flags;
union drm_amdgpu_ctx *args = data;
struct amdgpu_device *adev = dev->dev_private;
@@ -142,15 +182,14 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
r = 0;
id = args->in.ctx_id;
- flags = args->in.flags;
switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX:
- r = amdgpu_ctx_alloc(adev, fpriv, &id, flags);
+ r = amdgpu_ctx_alloc(adev, fpriv, &id);
args->out.alloc.ctx_id = id;
break;
case AMDGPU_CTX_OP_FREE_CTX:
- r = amdgpu_ctx_free(adev, fpriv, id);
+ r = amdgpu_ctx_free(fpriv, id);
break;
case AMDGPU_CTX_OP_QUERY_STATE:
r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
@@ -165,7 +204,12 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
{
struct amdgpu_ctx *ctx;
- struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+ struct amdgpu_ctx_mgr *mgr;
+
+ if (!fpriv)
+ return NULL;
+
+ mgr = &fpriv->ctx_mgr;
mutex_lock(&mgr->lock);
ctx = idr_find(&mgr->ctx_handles, id);
@@ -177,17 +221,86 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
{
- struct amdgpu_fpriv *fpriv;
- struct amdgpu_ctx_mgr *mgr;
-
if (ctx == NULL)
return -EINVAL;
- fpriv = ctx->fpriv;
- mgr = &fpriv->ctx_mgr;
- mutex_lock(&mgr->lock);
kref_put(&ctx->refcount, amdgpu_ctx_do_release);
- mutex_unlock(&mgr->lock);
-
return 0;
}
+
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+ struct fence *fence)
+{
+ struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
+ uint64_t seq = cring->sequence;
+ unsigned idx = 0;
+ struct fence *other = NULL;
+
+ idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
+ other = cring->fences[idx];
+ if (other) {
+ signed long r;
+ r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+ if (r < 0)
+ DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+ }
+
+ fence_get(fence);
+
+ spin_lock(&ctx->ring_lock);
+ cring->fences[idx] = fence;
+ cring->sequence++;
+ spin_unlock(&ctx->ring_lock);
+
+ fence_put(other);
+
+ return seq;
+}
+
+struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+ struct amdgpu_ring *ring, uint64_t seq)
+{
+ struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
+ struct fence *fence;
+
+ spin_lock(&ctx->ring_lock);
+
+ if (seq >= cring->sequence) {
+ spin_unlock(&ctx->ring_lock);
+ return ERR_PTR(-EINVAL);
+ }
+
+
+ if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) {
+ spin_unlock(&ctx->ring_lock);
+ return NULL;
+ }
+
+ fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]);
+ spin_unlock(&ctx->ring_lock);
+
+ return fence;
+}
+
+void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
+{
+ mutex_init(&mgr->lock);
+ idr_init(&mgr->ctx_handles);
+}
+
+void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
+{
+ struct amdgpu_ctx *ctx;
+ struct idr *idp;
+ uint32_t id;
+
+ idp = &mgr->ctx_handles;
+
+ idr_for_each_entry(idp, ctx, id) {
+ if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
+ DRM_ERROR("ctx %p is still alive\n", ctx);
+ }
+
+ idr_destroy(&mgr->ctx_handles);
+ mutex_destroy(&mgr->lock);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index ba46be361c9b..6ff6ae945794 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -55,6 +55,7 @@ static const char *amdgpu_asic_name[] = {
"MULLINS",
"TOPAZ",
"TONGA",
+ "FIJI",
"CARRIZO",
"LAST",
};
@@ -63,7 +64,7 @@ bool amdgpu_device_is_px(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
- if (adev->flags & AMDGPU_IS_PX)
+ if (adev->flags & AMD_IS_PX)
return true;
return false;
}
@@ -243,7 +244,8 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
if (adev->vram_scratch.robj == NULL) {
r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
- PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
+ PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
NULL, &adev->vram_scratch.robj);
if (r) {
return r;
@@ -1160,6 +1162,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_TOPAZ:
case CHIP_TONGA:
+ case CHIP_FIJI:
case CHIP_CARRIZO:
if (adev->asic_type == CHIP_CARRIZO)
adev->family = AMDGPU_FAMILY_CZ;
@@ -1191,8 +1194,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
return -EINVAL;
}
- adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
- if (adev->ip_block_enabled == NULL)
+ adev->ip_block_status = kcalloc(adev->num_ip_blocks,
+ sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
+ if (adev->ip_block_status == NULL)
return -ENOMEM;
if (adev->ip_blocks == NULL) {
@@ -1203,14 +1207,19 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
DRM_ERROR("disabled ip block: %d\n", i);
- adev->ip_block_enabled[i] = false;
+ adev->ip_block_status[i].valid = false;
} else {
if (adev->ip_blocks[i].funcs->early_init) {
r = adev->ip_blocks[i].funcs->early_init((void *)adev);
- if (r)
+ if (r == -ENOENT)
+ adev->ip_block_status[i].valid = false;
+ else if (r)
return r;
+ else
+ adev->ip_block_status[i].valid = true;
+ } else {
+ adev->ip_block_status[i].valid = true;
}
- adev->ip_block_enabled[i] = true;
}
}
@@ -1222,11 +1231,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].valid)
continue;
r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
if (r)
return r;
+ adev->ip_block_status[i].sw = true;
/* need to do gmc hw init early so we can allocate gpu mem */
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
r = amdgpu_vram_scratch_init(adev);
@@ -1238,11 +1248,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
r = amdgpu_wb_init(adev);
if (r)
return r;
+ adev->ip_block_status[i].hw = true;
}
}
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].sw)
continue;
/* gmc hw init is done early */
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
@@ -1250,6 +1261,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
if (r)
return r;
+ adev->ip_block_status[i].hw = true;
}
return 0;
@@ -1260,7 +1272,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
int i = 0, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].valid)
continue;
/* enable clockgating to save power */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@@ -1282,7 +1294,7 @@ static int amdgpu_fini(struct amdgpu_device *adev)
int i, r;
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].hw)
continue;
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_wb_fini(adev);
@@ -1295,14 +1307,16 @@ static int amdgpu_fini(struct amdgpu_device *adev)
return r;
r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
/* XXX handle errors */
+ adev->ip_block_status[i].hw = false;
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].sw)
continue;
r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
/* XXX handle errors */
- adev->ip_block_enabled[i] = false;
+ adev->ip_block_status[i].sw = false;
+ adev->ip_block_status[i].valid = false;
}
return 0;
@@ -1313,7 +1327,7 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
int i, r;
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].valid)
continue;
/* ungate blocks so that suspend can properly shut them down */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@@ -1331,7 +1345,7 @@ static int amdgpu_resume(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_enabled[i])
+ if (!adev->ip_block_status[i].valid)
continue;
r = adev->ip_blocks[i].funcs->resume(adev);
if (r)
@@ -1366,7 +1380,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->ddev = ddev;
adev->pdev = pdev;
adev->flags = flags;
- adev->asic_type = flags & AMDGPU_ASIC_MASK;
+ adev->asic_type = flags & AMD_ASIC_MASK;
adev->is_atom_bios = false;
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
adev->mc.gtt_size = 512 * 1024 * 1024;
@@ -1512,6 +1526,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
return r;
}
+ r = amdgpu_ctx_init(adev, true, &adev->kernel_ctx);
+ if (r) {
+ dev_err(adev->dev, "failed to create kernel context (%d).\n", r);
+ return r;
+ }
r = amdgpu_ib_ring_tests(adev);
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
@@ -1573,12 +1592,13 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->shutdown = true;
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
+ amdgpu_ctx_fini(&adev->kernel_ctx);
amdgpu_ib_pool_fini(adev);
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
- kfree(adev->ip_block_enabled);
- adev->ip_block_enabled = NULL;
+ kfree(adev->ip_block_status);
+ adev->ip_block_status = NULL;
adev->accel_working = false;
/* free i2c buses */
amdgpu_i2c_fini(adev);
@@ -1616,8 +1636,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
struct amdgpu_device *adev;
struct drm_crtc *crtc;
struct drm_connector *connector;
- int i, r;
- bool force_completion = false;
+ int r;
if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
@@ -1656,21 +1675,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
- /* wait for gpu to finish processing current batch */
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (!ring)
- continue;
-
- r = amdgpu_fence_wait_empty(ring);
- if (r) {
- /* delay GPU reset to resume */
- force_completion = true;
- }
- }
- if (force_completion) {
- amdgpu_fence_driver_force_completion(adev);
- }
+ amdgpu_fence_driver_suspend(adev);
r = amdgpu_suspend(adev);
@@ -1728,6 +1733,8 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
r = amdgpu_resume(adev);
+ amdgpu_fence_driver_resume(adev);
+
r = amdgpu_ib_ring_tests(adev);
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b16b9256883e..e3d70772b531 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -35,6 +35,36 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+static void amdgpu_flip_wait_fence(struct amdgpu_device *adev,
+ struct fence **f)
+{
+ struct amdgpu_fence *fence;
+ long r;
+
+ if (*f == NULL)
+ return;
+
+ fence = to_amdgpu_fence(*f);
+ if (fence) {
+ r = fence_wait(&fence->base, false);
+ if (r == -EDEADLK) {
+ up_read(&adev->exclusive_lock);
+ r = amdgpu_gpu_reset(adev);
+ down_read(&adev->exclusive_lock);
+ }
+ } else
+ r = fence_wait(*f, false);
+
+ if (r)
+ DRM_ERROR("failed to wait on page flip fence (%ld)!\n", r);
+
+ /* We continue with the page flip even if we failed to wait on
+ * the fence, otherwise the DRM core and userspace will be
+ * confused about which BO the CRTC is scanning out
+ */
+ fence_put(*f);
+ *f = NULL;
+}
static void amdgpu_flip_work_func(struct work_struct *__work)
{
@@ -44,34 +74,13 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
struct drm_crtc *crtc = &amdgpuCrtc->base;
- struct amdgpu_fence *fence;
unsigned long flags;
- int r;
+ unsigned i;
down_read(&adev->exclusive_lock);
- if (work->fence) {
- fence = to_amdgpu_fence(work->fence);
- if (fence) {
- r = amdgpu_fence_wait(fence, false);
- if (r == -EDEADLK) {
- up_read(&adev->exclusive_lock);
- r = amdgpu_gpu_reset(adev);
- down_read(&adev->exclusive_lock);
- }
- } else
- r = fence_wait(work->fence, false);
-
- if (r)
- DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
-
- /* We continue with the page flip even if we failed to wait on
- * the fence, otherwise the DRM core and userspace will be
- * confused about which BO the CRTC is scanning out
- */
-
- fence_put(work->fence);
- work->fence = NULL;
- }
+ amdgpu_flip_wait_fence(adev, &work->excl);
+ for (i = 0; i < work->shared_count; ++i)
+ amdgpu_flip_wait_fence(adev, &work->shared[i]);
/* We borrow the event spin lock for protecting flip_status */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -108,6 +117,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
DRM_ERROR("failed to reserve buffer after flip\n");
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+ kfree(work->shared);
kfree(work);
}
@@ -127,7 +137,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
unsigned long flags;
u64 tiling_flags;
u64 base;
- int r;
+ int i, r;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
@@ -167,7 +177,19 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
goto cleanup;
}
- work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
+ r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
+ &work->shared_count,
+ &work->shared);
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unreserve(new_rbo);
+ DRM_ERROR("failed to get fences for buffer\n");
+ goto cleanup;
+ }
+
+ fence_get(work->excl);
+ for (i = 0; i < work->shared_count; ++i)
+ fence_get(work->shared[i]);
+
amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
amdgpu_bo_unreserve(new_rbo);
@@ -212,7 +234,10 @@ pflip_cleanup:
cleanup:
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
- fence_put(work->fence);
+ fence_put(work->excl);
+ for (i = 0; i < work->shared_count; ++i)
+ fence_put(work->shared[i]);
+ kfree(work->shared);
kfree(work);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 56da962231fc..0fcc0bd1622c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -44,12 +44,15 @@
#include "amdgpu.h"
#include "amdgpu_irq.h"
+#include "amdgpu_amdkfd.h"
+
/*
* KMS wrapper.
* - 3.0.0 - initial driver
+ * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP)
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 0
+#define KMS_DRIVER_MINOR 1
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -61,7 +64,7 @@ int amdgpu_disp_priority = 0;
int amdgpu_hw_i2c = 0;
int amdgpu_pcie_gen2 = -1;
int amdgpu_msi = -1;
-int amdgpu_lockup_timeout = 10000;
+int amdgpu_lockup_timeout = 0;
int amdgpu_dpm = -1;
int amdgpu_smc_load_fw = 1;
int amdgpu_aspm = -1;
@@ -73,6 +76,9 @@ int amdgpu_deep_color = 0;
int amdgpu_vm_size = 8;
int amdgpu_vm_block_size = -1;
int amdgpu_exp_hw_support = 0;
+int amdgpu_enable_scheduler = 0;
+int amdgpu_sched_jobs = 16;
+int amdgpu_sched_hw_submission = 2;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -101,7 +107,7 @@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(msi, amdgpu_msi, int, 0444);
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 0 = disable)");
module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
@@ -137,36 +143,45 @@ module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
+MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable, 0 = disable ((default))");
+module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444);
+
+MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 16)");
+module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
+
+MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
+module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
+
static struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
- {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
- {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
- {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
- {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
- {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
- {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
- {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
- {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
- {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
- {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
- {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
- {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
+ {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+ {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+ {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+ {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+ {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+ {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+ {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+ {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+ {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+ {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+ {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+ {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
/* Bonaire */
- {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
- {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
- {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
- {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
+ {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
+ {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
+ {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
+ {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
{0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
{0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
{0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
@@ -188,39 +203,39 @@ static struct pci_device_id pciidlist[] = {
{0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
{0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
/* Kabini */
- {0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
- {0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
- {0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
- {0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
- {0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
- {0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
- {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
- {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
- {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
+ {0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+ {0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+ {0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+ {0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+ {0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+ {0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+ {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+ {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+ {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
/* mullins */
- {0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
- {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+ {0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+ {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
#endif
/* topaz */
{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
@@ -238,12 +253,14 @@ static struct pci_device_id pciidlist[] = {
{0x1002, 0x6930, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
{0x1002, 0x6938, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
{0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
+ /* fiji */
+ {0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
/* carrizo */
- {0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
- {0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
- {0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
- {0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
- {0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
+ {0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
+ {0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
+ {0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
+ {0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
+ {0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
{0, 0, 0}
};
@@ -279,7 +296,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
unsigned long flags = ent->driver_data;
int ret;
- if ((flags & AMDGPU_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
+ if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
DRM_INFO("This hardware requires experimental hardware support.\n"
"See modparam exp_hw_support\n");
return -ENODEV;
@@ -527,12 +544,15 @@ static int __init amdgpu_init(void)
driver->num_ioctls = amdgpu_max_kms_ioctl;
amdgpu_register_atpx_handler();
+ amdgpu_amdkfd_init();
+
/* let modprobe override vga console setting */
return drm_pci_init(driver, pdriver);
}
static void __exit amdgpu_exit(void)
{
+ amdgpu_amdkfd_fini();
drm_pci_exit(driver, pdriver);
amdgpu_unregister_atpx_handler();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
index cceeb33c447a..e3a4f7048042 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
@@ -31,7 +31,7 @@
#include <linux/firmware.h>
#include <linux/platform_device.h>
-#include "amdgpu_family.h"
+#include "amd_shared.h"
/* General customization:
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index c1645d21f8e2..8a122b1b7786 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -53,9 +53,9 @@ static struct fb_ops amdgpufb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
@@ -126,8 +126,8 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
aligned_size = ALIGN(size, PAGE_SIZE);
ret = amdgpu_gem_object_create(adev, aligned_size, 0,
AMDGPU_GEM_DOMAIN_VRAM,
- 0, true,
- &gobj);
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ true, &gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
aligned_size);
@@ -179,7 +179,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct amdgpu_bo *rbo = NULL;
- struct device *device = &adev->pdev->dev;
int ret;
unsigned long tmp;
@@ -201,9 +200,9 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
rbo = gem_to_amdgpu_bo(gobj);
/* okay we have an object now allocate the framebuffer */
- info = framebuffer_alloc(0, device);
- if (info == NULL) {
- ret = -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto out_unref;
}
@@ -212,14 +211,13 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
- goto out_unref;
+ goto out_destroy_fbi;
}
fb = &rfbdev->rfb.base;
/* setup helper */
rfbdev->helper.fb = fb;
- rfbdev->helper.fbdev = info;
memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo));
@@ -239,11 +237,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto out_unref;
- }
info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = adev->mc.aper_size;
@@ -251,13 +244,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
if (info->screen_base == NULL) {
ret = -ENOSPC;
- goto out_unref;
- }
-
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto out_unref;
+ goto out_destroy_fbi;
}
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
@@ -269,6 +256,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
vga_switcheroo_client_fb_set(adev->ddev->pdev, info);
return 0;
+out_destroy_fbi:
+ drm_fb_helper_release_fbi(helper);
out_unref:
if (rbo) {
@@ -290,17 +279,10 @@ void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev)
static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
{
- struct fb_info *info;
struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
- if (rfbdev->helper.fbdev) {
- info = rfbdev->helper.fbdev;
-
- unregister_framebuffer(info);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&rfbdev->helper);
+ drm_fb_helper_release_fbi(&rfbdev->helper);
if (rfb->obj) {
amdgpufb_destroy_pinned_object(rfb->obj);
@@ -395,7 +377,8 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev)
void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
{
if (adev->mode_info.rfbdev)
- fb_set_suspend(adev->mode_info.rfbdev->helper.fbdev, state);
+ drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper,
+ state);
}
int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index a7189a1fa6a1..1be2bd6d07ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -126,7 +126,8 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
(*fence)->ring = ring;
(*fence)->owner = owner;
fence_init(&(*fence)->base, &amdgpu_fence_ops,
- &adev->fence_queue.lock, adev->fence_context + ring->idx,
+ &ring->fence_drv.fence_queue.lock,
+ adev->fence_context + ring->idx,
(*fence)->seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
(*fence)->seq,
@@ -136,38 +137,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
}
/**
- * amdgpu_fence_recreate - recreate a fence from an user fence
- *
- * @ring: ring the fence is associated with
- * @owner: creator of the fence
- * @seq: user fence sequence number
- * @fence: resulting amdgpu fence object
- *
- * Recreates a fence command from the user fence sequence number (all asics).
- * Returns 0 on success, -ENOMEM on failure.
- */
-int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
- uint64_t seq, struct amdgpu_fence **fence)
-{
- struct amdgpu_device *adev = ring->adev;
-
- if (seq > ring->fence_drv.sync_seq[ring->idx])
- return -EINVAL;
-
- *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
- if ((*fence) == NULL)
- return -ENOMEM;
-
- (*fence)->seq = seq;
- (*fence)->ring = ring;
- (*fence)->owner = owner;
- fence_init(&(*fence)->base, &amdgpu_fence_ops,
- &adev->fence_queue.lock, adev->fence_context + ring->idx,
- (*fence)->seq);
- return 0;
-}
-
-/**
* amdgpu_fence_check_signaled - callback from fence_queue
*
* this function is called with fence_queue lock held, which is also used
@@ -196,9 +165,7 @@ static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl
else
FENCE_TRACE(&fence->base, "was already signaled\n");
- amdgpu_irq_put(adev, fence->ring->fence_drv.irq_src,
- fence->ring->fence_drv.irq_type);
- __remove_wait_queue(&adev->fence_queue, &fence->fence_wake);
+ __remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
fence_put(&fence->base);
} else
FENCE_TRACE(&fence->base, "pending\n");
@@ -299,14 +266,9 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
return;
}
- if (fence_drv->delayed_irq && ring->adev->ddev->irq_enabled) {
- fence_drv->delayed_irq = false;
- amdgpu_irq_update(ring->adev, fence_drv->irq_src,
- fence_drv->irq_type);
+ if (amdgpu_fence_activity(ring)) {
+ wake_up_all(&ring->fence_drv.fence_queue);
}
-
- if (amdgpu_fence_activity(ring))
- wake_up_all(&ring->adev->fence_queue);
else if (amdgpu_ring_is_lockup(ring)) {
/* good news we believe it's a lockup */
dev_warn(ring->adev->dev, "GPU lockup (current fence id "
@@ -316,7 +278,7 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
/* remember that we need an reset */
ring->adev->needs_reset = true;
- wake_up_all(&ring->adev->fence_queue);
+ wake_up_all(&ring->fence_drv.fence_queue);
}
up_read(&ring->adev->exclusive_lock);
}
@@ -332,62 +294,8 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
*/
void amdgpu_fence_process(struct amdgpu_ring *ring)
{
- uint64_t seq, last_seq, last_emitted;
- unsigned count_loop = 0;
- bool wake = false;
-
- /* Note there is a scenario here for an infinite loop but it's
- * very unlikely to happen. For it to happen, the current polling
- * process need to be interrupted by another process and another
- * process needs to update the last_seq btw the atomic read and
- * xchg of the current process.
- *
- * More over for this to go in infinite loop there need to be
- * continuously new fence signaled ie amdgpu_fence_read needs
- * to return a different value each time for both the currently
- * polling process and the other process that xchg the last_seq
- * btw atomic read and xchg of the current process. And the
- * value the other process set as last seq must be higher than
- * the seq value we just read. Which means that current process
- * need to be interrupted after amdgpu_fence_read and before
- * atomic xchg.
- *
- * To be even more safe we count the number of time we loop and
- * we bail after 10 loop just accepting the fact that we might
- * have temporarly set the last_seq not to the true real last
- * seq but to an older one.
- */
- last_seq = atomic64_read(&ring->fence_drv.last_seq);
- do {
- last_emitted = ring->fence_drv.sync_seq[ring->idx];
- seq = amdgpu_fence_read(ring);
- seq |= last_seq & 0xffffffff00000000LL;
- if (seq < last_seq) {
- seq &= 0xffffffff;
- seq |= last_emitted & 0xffffffff00000000LL;
- }
-
- if (seq <= last_seq || seq > last_emitted) {
- break;
- }
- /* If we loop over we don't want to return without
- * checking if a fence is signaled as it means that the
- * seq we just read is different from the previous on.
- */
- wake = true;
- last_seq = seq;
- if ((count_loop++) > 10) {
- /* We looped over too many time leave with the
- * fact that we might have set an older fence
- * seq then the current real last seq as signaled
- * by the hw.
- */
- break;
- }
- } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
-
- if (wake)
- wake_up_all(&ring->adev->fence_queue);
+ if (amdgpu_fence_activity(ring))
+ wake_up_all(&ring->fence_drv.fence_queue);
}
/**
@@ -447,284 +355,49 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
struct amdgpu_ring *ring = fence->ring;
- struct amdgpu_device *adev = ring->adev;
if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
return false;
- if (down_read_trylock(&adev->exclusive_lock)) {
- amdgpu_irq_get(adev, ring->fence_drv.irq_src,
- ring->fence_drv.irq_type);
- if (amdgpu_fence_activity(ring))
- wake_up_all_locked(&adev->fence_queue);
-
- /* did fence get signaled after we enabled the sw irq? */
- if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) {
- amdgpu_irq_put(adev, ring->fence_drv.irq_src,
- ring->fence_drv.irq_type);
- up_read(&adev->exclusive_lock);
- return false;
- }
-
- up_read(&adev->exclusive_lock);
- } else {
- /* we're probably in a lockup, lets not fiddle too much */
- if (amdgpu_irq_get_delayed(adev, ring->fence_drv.irq_src,
- ring->fence_drv.irq_type))
- ring->fence_drv.delayed_irq = true;
- amdgpu_fence_schedule_check(ring);
- }
-
fence->fence_wake.flags = 0;
fence->fence_wake.private = NULL;
fence->fence_wake.func = amdgpu_fence_check_signaled;
- __add_wait_queue(&adev->fence_queue, &fence->fence_wake);
+ __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
fence_get(f);
FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
return true;
}
-/**
- * amdgpu_fence_signaled - check if a fence has signaled
- *
- * @fence: amdgpu fence object
- *
- * Check if the requested fence has signaled (all asics).
- * Returns true if the fence has signaled or false if it has not.
- */
-bool amdgpu_fence_signaled(struct amdgpu_fence *fence)
-{
- if (!fence)
- return true;
-
- if (amdgpu_fence_seq_signaled(fence->ring, fence->seq)) {
- if (!fence_signal(&fence->base))
- FENCE_TRACE(&fence->base, "signaled from amdgpu_fence_signaled\n");
- return true;
- }
-
- return false;
-}
-
-/**
- * amdgpu_fence_any_seq_signaled - check if any sequence number is signaled
- *
- * @adev: amdgpu device pointer
- * @seq: sequence numbers
- *
- * Check if the last signaled fence sequnce number is >= the requested
- * sequence number (all asics).
- * Returns true if any has signaled (current value is >= requested value)
- * or false if it has not. Helper function for amdgpu_fence_wait_seq.
- */
-static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq)
-{
- unsigned i;
-
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- if (!adev->rings[i] || !seq[i])
- continue;
-
- if (amdgpu_fence_seq_signaled(adev->rings[i], seq[i]))
- return true;
- }
-
- return false;
-}
-
-/**
- * amdgpu_fence_wait_seq_timeout - wait for a specific sequence numbers
- *
- * @adev: amdgpu device pointer
- * @target_seq: sequence number(s) we want to wait for
- * @intr: use interruptable sleep
- * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
- *
- * Wait for the requested sequence number(s) to be written by any ring
- * (all asics). Sequnce number array is indexed by ring id.
- * @intr selects whether to use interruptable (true) or non-interruptable
- * (false) sleep when waiting for the sequence number. Helper function
- * for amdgpu_fence_wait_*().
- * Returns remaining time if the sequence number has passed, 0 when
- * the wait timeout, or an error for all other cases.
- * -EDEADLK is returned when a GPU lockup has been detected.
- */
-static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
- u64 *target_seq, bool intr,
- long timeout)
-{
- uint64_t last_seq[AMDGPU_MAX_RINGS];
- bool signaled;
- int i;
- long r;
-
- if (timeout == 0) {
- return amdgpu_fence_any_seq_signaled(adev, target_seq);
- }
-
- while (!amdgpu_fence_any_seq_signaled(adev, target_seq)) {
-
- /* Save current sequence values, used to check for GPU lockups */
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (!ring || !target_seq[i])
- continue;
-
- last_seq[i] = atomic64_read(&ring->fence_drv.last_seq);
- trace_amdgpu_fence_wait_begin(adev->ddev, i, target_seq[i]);
- amdgpu_irq_get(adev, ring->fence_drv.irq_src,
- ring->fence_drv.irq_type);
- }
-
- if (intr) {
- r = wait_event_interruptible_timeout(adev->fence_queue, (
- (signaled = amdgpu_fence_any_seq_signaled(adev, target_seq))
- || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
- } else {
- r = wait_event_timeout(adev->fence_queue, (
- (signaled = amdgpu_fence_any_seq_signaled(adev, target_seq))
- || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
- }
-
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (!ring || !target_seq[i])
- continue;
-
- amdgpu_irq_put(adev, ring->fence_drv.irq_src,
- ring->fence_drv.irq_type);
- trace_amdgpu_fence_wait_end(adev->ddev, i, target_seq[i]);
- }
-
- if (unlikely(r < 0))
- return r;
-
- if (unlikely(!signaled)) {
-
- if (adev->needs_reset)
- return -EDEADLK;
-
- /* we were interrupted for some reason and fence
- * isn't signaled yet, resume waiting */
- if (r)
- continue;
-
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (!ring || !target_seq[i])
- continue;
-
- if (last_seq[i] != atomic64_read(&ring->fence_drv.last_seq))
- break;
- }
-
- if (i != AMDGPU_MAX_RINGS)
- continue;
-
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- if (!adev->rings[i] || !target_seq[i])
- continue;
-
- if (amdgpu_ring_is_lockup(adev->rings[i]))
- break;
- }
-
- if (i < AMDGPU_MAX_RINGS) {
- /* good news we believe it's a lockup */
- dev_warn(adev->dev, "GPU lockup (waiting for "
- "0x%016llx last fence id 0x%016llx on"
- " ring %d)\n",
- target_seq[i], last_seq[i], i);
-
- /* remember that we need an reset */
- adev->needs_reset = true;
- wake_up_all(&adev->fence_queue);
- return -EDEADLK;
- }
-
- if (timeout < MAX_SCHEDULE_TIMEOUT) {
- timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT;
- if (timeout <= 0) {
- return 0;
- }
- }
- }
- }
- return timeout;
-}
-
-/**
- * amdgpu_fence_wait - wait for a fence to signal
- *
- * @fence: amdgpu fence object
- * @intr: use interruptable sleep
- *
- * Wait for the requested fence to signal (all asics).
- * @intr selects whether to use interruptable (true) or non-interruptable
- * (false) sleep when waiting for the fence.
- * Returns 0 if the fence has passed, error for all other cases.
- */
-int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr)
-{
- uint64_t seq[AMDGPU_MAX_RINGS] = {};
- long r;
-
- seq[fence->ring->idx] = fence->seq;
- r = amdgpu_fence_wait_seq_timeout(fence->ring->adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
- if (r < 0) {
- return r;
- }
-
- r = fence_signal(&fence->base);
- if (!r)
- FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
- return 0;
-}
-
-/**
- * amdgpu_fence_wait_any - wait for a fence to signal on any ring
- *
- * @adev: amdgpu device pointer
- * @fences: amdgpu fence object(s)
- * @intr: use interruptable sleep
+/*
+ * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal
+ * @ring: ring to wait on for the seq number
+ * @seq: seq number wait for
*
- * Wait for any requested fence to signal (all asics). Fence
- * array is indexed by ring id. @intr selects whether to use
- * interruptable (true) or non-interruptable (false) sleep when
- * waiting for the fences. Used by the suballocator.
- * Returns 0 if any fence has passed, error for all other cases.
+ * return value:
+ * 0: seq signaled, and gpu not hang
+ * -EDEADL: GPU hang detected
+ * -EINVAL: some paramter is not valid
*/
-int amdgpu_fence_wait_any(struct amdgpu_device *adev,
- struct amdgpu_fence **fences,
- bool intr)
+static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
{
- uint64_t seq[AMDGPU_MAX_RINGS];
- unsigned i, num_rings = 0;
- long r;
-
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- seq[i] = 0;
+ struct amdgpu_device *adev = ring->adev;
+ bool signaled = false;
- if (!fences[i]) {
- continue;
- }
+ BUG_ON(!ring);
+ if (seq > ring->fence_drv.sync_seq[ring->idx])
+ return -EINVAL;
- seq[i] = fences[i]->seq;
- ++num_rings;
- }
+ if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
+ return 0;
- /* nothing to wait for ? */
- if (num_rings == 0)
- return -ENOENT;
+ wait_event(ring->fence_drv.fence_queue, (
+ (signaled = amdgpu_fence_seq_signaled(ring, seq))
+ || adev->needs_reset));
- r = amdgpu_fence_wait_seq_timeout(adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
- if (r < 0) {
- return r;
- }
- return 0;
+ if (signaled)
+ return 0;
+ else
+ return -EDEADLK;
}
/**
@@ -739,19 +412,12 @@ int amdgpu_fence_wait_any(struct amdgpu_device *adev,
*/
int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
{
- uint64_t seq[AMDGPU_MAX_RINGS] = {};
- long r;
+ uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
- seq[ring->idx] = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
- if (seq[ring->idx] >= ring->fence_drv.sync_seq[ring->idx]) {
- /* nothing to wait for, last_seq is
- already the last emited fence */
+ if (seq >= ring->fence_drv.sync_seq[ring->idx])
return -ENOENT;
- }
- r = amdgpu_fence_wait_seq_timeout(ring->adev, seq, false, MAX_SCHEDULE_TIMEOUT);
- if (r < 0)
- return r;
- return 0;
+
+ return amdgpu_fence_ring_wait_seq(ring, seq);
}
/**
@@ -766,23 +432,12 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
*/
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
{
- struct amdgpu_device *adev = ring->adev;
- uint64_t seq[AMDGPU_MAX_RINGS] = {};
- long r;
+ uint64_t seq = ring->fence_drv.sync_seq[ring->idx];
- seq[ring->idx] = ring->fence_drv.sync_seq[ring->idx];
- if (!seq[ring->idx])
+ if (!seq)
return 0;
- r = amdgpu_fence_wait_seq_timeout(adev, seq, false, MAX_SCHEDULE_TIMEOUT);
- if (r < 0) {
- if (r == -EDEADLK)
- return -EDEADLK;
-
- dev_err(adev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
- ring->idx, r);
- }
- return 0;
+ return amdgpu_fence_ring_wait_seq(ring, seq);
}
/**
@@ -933,9 +588,12 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
}
amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
- ring->fence_drv.initialized = true;
+ amdgpu_irq_get(adev, irq_src, irq_type);
+
ring->fence_drv.irq_src = irq_src;
ring->fence_drv.irq_type = irq_type;
+ ring->fence_drv.initialized = true;
+
dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
"cpu addr 0x%p\n", ring->idx,
ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
@@ -966,6 +624,16 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
INIT_DELAYED_WORK(&ring->fence_drv.lockup_work,
amdgpu_fence_check_lockup);
ring->fence_drv.ring = ring;
+
+ if (amdgpu_enable_scheduler) {
+ ring->scheduler = amd_sched_create(&amdgpu_sched_ops,
+ ring->idx,
+ amdgpu_sched_hw_submission,
+ (void *)ring->adev);
+ if (!ring->scheduler)
+ DRM_ERROR("Failed to create scheduler on ring %d.\n",
+ ring->idx);
+ }
}
/**
@@ -982,7 +650,6 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
*/
int amdgpu_fence_driver_init(struct amdgpu_device *adev)
{
- init_waitqueue_head(&adev->fence_queue);
if (amdgpu_debugfs_fence_init(adev))
dev_err(adev->dev, "fence debugfs file creation failed\n");
@@ -1011,13 +678,78 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
/* no need to trigger GPU reset as we are unloading */
amdgpu_fence_driver_force_completion(adev);
}
- wake_up_all(&adev->fence_queue);
+ wake_up_all(&ring->fence_drv.fence_queue);
+ amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+ ring->fence_drv.irq_type);
+ if (ring->scheduler)
+ amd_sched_destroy(ring->scheduler);
ring->fence_drv.initialized = false;
}
mutex_unlock(&adev->ring_lock);
}
/**
+ * amdgpu_fence_driver_suspend - suspend the fence driver
+ * for all possible rings.
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Suspend the fence driver for all possible rings (all asics).
+ */
+void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ mutex_lock(&adev->ring_lock);
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (!ring || !ring->fence_drv.initialized)
+ continue;
+
+ /* wait for gpu to finish processing current batch */
+ r = amdgpu_fence_wait_empty(ring);
+ if (r) {
+ /* delay GPU reset to resume */
+ amdgpu_fence_driver_force_completion(adev);
+ }
+
+ /* disable the interrupt */
+ amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+ ring->fence_drv.irq_type);
+ }
+ mutex_unlock(&adev->ring_lock);
+}
+
+/**
+ * amdgpu_fence_driver_resume - resume the fence driver
+ * for all possible rings.
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Resume the fence driver for all possible rings (all asics).
+ * Not all asics have all rings, so each asic will only
+ * start the fence driver on the rings it has using
+ * amdgpu_fence_driver_start_ring().
+ * Returns 0 for success.
+ */
+void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
+{
+ int i;
+
+ mutex_lock(&adev->ring_lock);
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (!ring || !ring->fence_drv.initialized)
+ continue;
+
+ /* enable the interrupt */
+ amdgpu_irq_get(adev, ring->fence_drv.irq_src,
+ ring->fence_drv.irq_type);
+ }
+ mutex_unlock(&adev->ring_lock);
+}
+
+/**
* amdgpu_fence_driver_force_completion - force all fence waiter to complete
*
* @adev: amdgpu device pointer
@@ -1104,6 +836,21 @@ static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence)
return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
}
+static bool amdgpu_test_signaled_any(struct fence **fences, uint32_t count)
+{
+ int idx;
+ struct fence *fence;
+
+ for (idx = 0; idx < count; ++idx) {
+ fence = fences[idx];
+ if (fence) {
+ if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return true;
+ }
+ }
+ return false;
+}
+
struct amdgpu_wait_cb {
struct fence_cb base;
struct task_struct *task;
@@ -1121,12 +868,48 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
struct amdgpu_device *adev = fence->ring->adev;
- struct amdgpu_wait_cb cb;
- cb.task = current;
+ return amdgpu_fence_wait_any(adev, &f, 1, intr, t);
+}
- if (fence_add_callback(f, &cb.base, amdgpu_fence_wait_cb))
- return t;
+/**
+ * Wait the fence array with timeout
+ *
+ * @adev: amdgpu device
+ * @array: the fence array with amdgpu fence pointer
+ * @count: the number of the fence array
+ * @intr: when sleep, set the current task interruptable or not
+ * @t: timeout to wait
+ *
+ * It will return when any fence is signaled or timeout.
+ */
+signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
+ struct fence **array, uint32_t count,
+ bool intr, signed long t)
+{
+ struct amdgpu_wait_cb *cb;
+ struct fence *fence;
+ unsigned idx;
+
+ BUG_ON(!array);
+
+ cb = kcalloc(count, sizeof(struct amdgpu_wait_cb), GFP_KERNEL);
+ if (cb == NULL) {
+ t = -ENOMEM;
+ goto err_free_cb;
+ }
+
+ for (idx = 0; idx < count; ++idx) {
+ fence = array[idx];
+ if (fence) {
+ cb[idx].task = current;
+ if (fence_add_callback(fence,
+ &cb[idx].base, amdgpu_fence_wait_cb)) {
+ /* The fence is already signaled */
+ goto fence_rm_cb;
+ }
+ }
+ }
while (t > 0) {
if (intr)
@@ -1135,10 +918,10 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
set_current_state(TASK_UNINTERRUPTIBLE);
/*
- * amdgpu_test_signaled must be called after
+ * amdgpu_test_signaled_any must be called after
* set_current_state to prevent a race with wake_up_process
*/
- if (amdgpu_test_signaled(fence))
+ if (amdgpu_test_signaled_any(array, count))
break;
if (adev->needs_reset) {
@@ -1153,7 +936,16 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
}
__set_current_state(TASK_RUNNING);
- fence_remove_callback(f, &cb.base);
+
+fence_rm_cb:
+ for (idx = 0; idx < count; ++idx) {
+ fence = array[idx];
+ if (fence && cb[idx].base.func)
+ fence_remove_callback(fence, &cb[idx].base);
+ }
+
+err_free_cb:
+ kfree(cb);
return t;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index e02db0b2e839..cbd3a486c5c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -125,7 +125,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
if (adev->gart.robj == NULL) {
r = amdgpu_bo_create(adev, adev->gart.table_size,
- PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
+ PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
NULL, &adev->gart.robj);
if (r) {
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 975edb1000a2..5839fab374bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -352,7 +352,7 @@ unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
if (((int64_t)timeout_ns) < 0)
return MAX_SCHEDULE_TIMEOUT;
- timeout = ktime_sub_ns(ktime_get(), timeout_ns);
+ timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
if (ktime_to_ns(timeout) < 0)
return 0;
@@ -449,7 +449,7 @@ out:
* vital here, so they are not reported back to userspace.
*/
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va)
+ struct amdgpu_bo_va *bo_va, uint32_t operation)
{
struct ttm_validate_buffer tv, *entry;
struct amdgpu_bo_list_entry *vm_bos;
@@ -485,7 +485,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (r)
goto error_unlock;
- r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
+
+ if (operation == AMDGPU_VA_OP_MAP)
+ r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
error_unlock:
mutex_unlock(&bo_va->vm->mutex);
@@ -580,7 +582,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
}
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
- amdgpu_gem_va_update_vm(adev, bo_va);
+ amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
drm_gem_object_unreference_unlocked(gobj);
return r;
@@ -613,6 +615,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
info.domains = robj->initial_domain;
info.domain_flags = robj->flags;
+ amdgpu_bo_unreserve(robj);
if (copy_to_user(out, &info, sizeof(info)))
r = -EFAULT;
break;
@@ -620,17 +623,19 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
case AMDGPU_GEM_OP_SET_PLACEMENT:
if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) {
r = -EPERM;
+ amdgpu_bo_unreserve(robj);
break;
}
robj->initial_domain = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT |
AMDGPU_GEM_DOMAIN_CPU);
+ amdgpu_bo_unreserve(robj);
break;
default:
+ amdgpu_bo_unreserve(robj);
r = -EINVAL;
}
- amdgpu_bo_unreserve(robj);
out:
drm_gem_object_unreference_unlocked(gobj);
return r;
@@ -651,7 +656,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
r = amdgpu_gem_object_create(adev, args->size, 0,
AMDGPU_GEM_DOMAIN_VRAM,
- 0, ttm_bo_type_device,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ ttm_bo_type_device,
&gobj);
if (r)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 52dff75aac6f..c439735ee670 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -73,28 +73,12 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
if (!vm)
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
- else
- ib->gpu_addr = 0;
-
- } else {
- ib->sa_bo = NULL;
- ib->ptr = NULL;
- ib->gpu_addr = 0;
}
amdgpu_sync_create(&ib->sync);
ib->ring = ring;
- ib->fence = NULL;
- ib->user = NULL;
ib->vm = vm;
- ib->gds_base = 0;
- ib->gds_size = 0;
- ib->gws_base = 0;
- ib->gws_size = 0;
- ib->oa_base = 0;
- ib->oa_size = 0;
- ib->flags = 0;
return 0;
}
@@ -109,8 +93,8 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
*/
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
{
- amdgpu_sync_free(adev, &ib->sync, ib->fence);
- amdgpu_sa_bo_free(adev, &ib->sa_bo, ib->fence);
+ amdgpu_sync_free(adev, &ib->sync, &ib->fence->base);
+ amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base);
amdgpu_fence_unref(&ib->fence);
}
@@ -156,7 +140,11 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
dev_err(adev->dev, "couldn't schedule ib\n");
return -EINVAL;
}
-
+ r = amdgpu_sync_wait(&ibs->sync);
+ if (r) {
+ dev_err(adev->dev, "IB sync failed (%d).\n", r);
+ return r;
+ }
r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs);
if (r) {
dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
@@ -165,9 +153,11 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
if (vm) {
/* grab a vm id if necessary */
- struct amdgpu_fence *vm_id_fence = NULL;
- vm_id_fence = amdgpu_vm_grab_id(ibs->ring, ibs->vm);
- amdgpu_sync_fence(&ibs->sync, vm_id_fence);
+ r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync);
+ if (r) {
+ amdgpu_ring_unlock_undo(ring);
+ return r;
+ }
}
r = amdgpu_sync_rings(&ibs->sync, ring);
@@ -180,16 +170,16 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
if (vm) {
/* do context switch */
amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
- }
- if (vm && ring->funcs->emit_gds_switch)
- amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
- ib->gds_base, ib->gds_size,
- ib->gws_base, ib->gws_size,
- ib->oa_base, ib->oa_size);
+ if (ring->funcs->emit_gds_switch)
+ amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
+ ib->gds_base, ib->gds_size,
+ ib->gws_base, ib->gws_size,
+ ib->oa_base, ib->oa_size);
- if (ring->funcs->emit_hdp_flush)
- amdgpu_ring_emit_hdp_flush(ring);
+ if (ring->funcs->emit_hdp_flush)
+ amdgpu_ring_emit_hdp_flush(ring);
+ }
old_ctx = ring->current_ctx;
for (i = 0; i < num_ibs; ++i) {
@@ -212,11 +202,15 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
return r;
}
+ if (!amdgpu_enable_scheduler && ib->ctx)
+ ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring,
+ &ib->fence->base);
+
/* wrap the last IB with fence */
if (ib->user) {
uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo);
addr += ib->user->offset;
- amdgpu_ring_emit_fence(ring, addr, ib->fence->seq,
+ amdgpu_ring_emit_fence(ring, addr, ib->sequence,
AMDGPU_FENCE_FLAG_64BIT);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index db5422e65ec5..5c8a803acedc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -24,6 +24,7 @@
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_ih.h"
+#include "amdgpu_amdkfd.h"
/**
* amdgpu_ih_ring_alloc - allocate memory for the IH ring
@@ -97,18 +98,12 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
/* add 8 bytes for the rptr/wptr shadows and
* add them to the end of the ring allocation.
*/
- adev->irq.ih.ring = kzalloc(adev->irq.ih.ring_size + 8, GFP_KERNEL);
+ adev->irq.ih.ring = pci_alloc_consistent(adev->pdev,
+ adev->irq.ih.ring_size + 8,
+ &adev->irq.ih.rb_dma_addr);
if (adev->irq.ih.ring == NULL)
return -ENOMEM;
- adev->irq.ih.rb_dma_addr = pci_map_single(adev->pdev,
- (void *)adev->irq.ih.ring,
- adev->irq.ih.ring_size,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(adev->pdev, adev->irq.ih.rb_dma_addr)) {
- dev_err(&adev->pdev->dev, "Failed to DMA MAP the IH RB page\n");
- kfree((void *)adev->irq.ih.ring);
- return -ENOMEM;
- }
+ memset((void *)adev->irq.ih.ring, 0, adev->irq.ih.ring_size + 8);
adev->irq.ih.wptr_offs = (adev->irq.ih.ring_size / 4) + 0;
adev->irq.ih.rptr_offs = (adev->irq.ih.ring_size / 4) + 1;
}
@@ -148,9 +143,9 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
/* add 8 bytes for the rptr/wptr shadows and
* add them to the end of the ring allocation.
*/
- pci_unmap_single(adev->pdev, adev->irq.ih.rb_dma_addr,
- adev->irq.ih.ring_size + 8, PCI_DMA_BIDIRECTIONAL);
- kfree((void *)adev->irq.ih.ring);
+ pci_free_consistent(adev->pdev, adev->irq.ih.ring_size + 8,
+ (void *)adev->irq.ih.ring,
+ adev->irq.ih.rb_dma_addr);
adev->irq.ih.ring = NULL;
}
} else {
@@ -199,6 +194,14 @@ restart_ih:
rmb();
while (adev->irq.ih.rptr != wptr) {
+ u32 ring_index = adev->irq.ih.rptr >> 2;
+
+ /* Before dispatching irq to IP blocks, send it to amdkfd */
+ amdgpu_amdkfd_interrupt(adev,
+ (const void *) &adev->irq.ih.ring[ring_index]);
+
+ entry.iv_entry = (const uint32_t *)
+ &adev->irq.ih.ring[ring_index];
amdgpu_ih_decode_iv(adev, &entry);
adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index c62b09e555d6..ba38ae6a1463 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -52,6 +52,7 @@ struct amdgpu_iv_entry {
unsigned ring_id;
unsigned vm_id;
unsigned pas_id;
+ const uint32_t *iv_entry;
};
int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index b4d36f0f2153..0aba8e9bc8a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -272,6 +272,11 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
kfree(src->enabled_types);
src->enabled_types = NULL;
+ if (src->data) {
+ kfree(src->data);
+ kfree(src);
+ adev->irq.sources[i] = NULL;
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 8299795f2b2d..17b01aef4278 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -40,6 +40,7 @@ struct amdgpu_irq_src {
unsigned num_types;
atomic_t *enabled_types;
const struct amdgpu_irq_src_funcs *funcs;
+ void *data;
};
/* provided by interrupt generating IP blocks */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 5533434c7a8f..22367939ebf1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -34,6 +34,7 @@
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include "amdgpu_amdkfd.h"
#if defined(CONFIG_VGA_SWITCHEROO)
bool amdgpu_has_atpx(void);
@@ -61,6 +62,8 @@ int amdgpu_driver_unload_kms(struct drm_device *dev)
pm_runtime_get_sync(dev->dev);
+ amdgpu_amdkfd_device_fini(adev);
+
amdgpu_acpi_fini(adev);
amdgpu_device_fini(adev);
@@ -93,8 +96,8 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
if ((amdgpu_runtime_pm != 0) &&
amdgpu_has_atpx() &&
- ((flags & AMDGPU_IS_APU) == 0))
- flags |= AMDGPU_IS_PX;
+ ((flags & AMD_IS_APU) == 0))
+ flags |= AMD_IS_PX;
/* amdgpu_device_init should report only fatal error
* like memory allocation failure or iomapping failure,
@@ -118,6 +121,10 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
"Error during ACPI methods call\n");
}
+ amdgpu_amdkfd_load_interface(adev);
+ amdgpu_amdkfd_device_probe(adev);
+ amdgpu_amdkfd_device_init(adev);
+
if (amdgpu_device_is_px(dev)) {
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
@@ -235,7 +242,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
for (i = 0; i < adev->num_ip_blocks; i++) {
if (adev->ip_blocks[i].type == type &&
- adev->ip_block_enabled[i]) {
+ adev->ip_block_status[i].valid) {
ip.hw_ip_version_major = adev->ip_blocks[i].major;
ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
ip.capabilities_flags = 0;
@@ -274,7 +281,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
for (i = 0; i < adev->num_ip_blocks; i++)
if (adev->ip_blocks[i].type == type &&
- adev->ip_block_enabled[i] &&
+ adev->ip_block_status[i].valid &&
count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
count++;
@@ -317,16 +324,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
break;
case AMDGPU_INFO_FW_GFX_RLC:
fw_info.ver = adev->gfx.rlc_fw_version;
- fw_info.feature = 0;
+ fw_info.feature = adev->gfx.rlc_feature_version;
break;
case AMDGPU_INFO_FW_GFX_MEC:
- if (info->query_fw.index == 0)
+ if (info->query_fw.index == 0) {
fw_info.ver = adev->gfx.mec_fw_version;
- else if (info->query_fw.index == 1)
+ fw_info.feature = adev->gfx.mec_feature_version;
+ } else if (info->query_fw.index == 1) {
fw_info.ver = adev->gfx.mec2_fw_version;
- else
+ fw_info.feature = adev->gfx.mec2_feature_version;
+ } else
return -EINVAL;
- fw_info.feature = 0;
break;
case AMDGPU_INFO_FW_SMC:
fw_info.ver = adev->pm.fw_version;
@@ -336,7 +344,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (info->query_fw.index >= 2)
return -EINVAL;
fw_info.ver = adev->sdma[info->query_fw.index].fw_version;
- fw_info.feature = 0;
+ fw_info.feature = adev->sdma[info->query_fw.index].feature_version;
break;
default:
return -EINVAL;
@@ -416,7 +424,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return n ? -EFAULT : 0;
}
case AMDGPU_INFO_DEV_INFO: {
- struct drm_amdgpu_info_device dev_info;
+ struct drm_amdgpu_info_device dev_info = {};
struct amdgpu_cu_info cu_info;
dev_info.device_id = dev->pdev->device;
@@ -443,11 +451,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
dev_info._pad = 0;
dev_info.ids_flags = 0;
- if (adev->flags & AMDGPU_IS_APU)
+ if (adev->flags & AMD_IS_APU)
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
- dev_info.virtual_address_alignment = max(PAGE_SIZE, 0x10000UL);
+ dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) *
AMDGPU_GPU_PAGE_SIZE;
dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
@@ -459,6 +467,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap));
dev_info.vram_type = adev->mc.vram_type;
dev_info.vram_bit_width = adev->mc.vram_width;
+ dev_info.vce_harvest_config = adev->vce.harvest_config;
return copy_to_user(out, &dev_info,
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
@@ -518,10 +527,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
mutex_init(&fpriv->bo_list_lock);
idr_init(&fpriv->bo_list_handles);
- /* init context manager */
- mutex_init(&fpriv->ctx_mgr.lock);
- idr_init(&fpriv->ctx_mgr.ctx_handles);
- fpriv->ctx_mgr.adev = adev;
+ amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
file_priv->driver_priv = fpriv;
@@ -554,6 +560,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
if (!fpriv)
return;
+ amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
+
amdgpu_vm_fini(adev, &fpriv->vm);
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
@@ -562,9 +570,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
idr_destroy(&fpriv->bo_list_handles);
mutex_destroy(&fpriv->bo_list_lock);
- /* release context */
- amdgpu_ctx_fini(fpriv);
-
kfree(fpriv);
file_priv->driver_priv = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 8da64245b31b..08b09d55b96f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -127,7 +127,7 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
placements[c].fpfn =
adev->mc.visible_vram_size >> PAGE_SHIFT;
placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
+ TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN;
}
placements[c].fpfn = 0;
placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
@@ -223,18 +223,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
size_t acc_size;
int r;
- /* VI has a hw bug where VM PTEs have to be allocated in groups of 8.
- * do this as a temporary workaround
- */
- if (!(domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
- if (adev->asic_type >= CHIP_TOPAZ) {
- if (byte_align & 0x7fff)
- byte_align = ALIGN(byte_align, 0x8000);
- if (size & 0x7fff)
- size = ALIGN(size, 0x8000);
- }
- }
-
page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
size = ALIGN(size, PAGE_SIZE);
@@ -462,7 +450,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
{
/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
- if (0 && (adev->flags & AMDGPU_IS_APU)) {
+ if (0 && (adev->flags & AMD_IS_APU)) {
/* Useless to evict on IGP chips */
return 0;
}
@@ -478,7 +466,6 @@ void amdgpu_bo_force_delete(struct amdgpu_device *adev)
}
dev_err(adev->dev, "Userspace still has active objects !\n");
list_for_each_entry_safe(bo, n, &adev->gem.objects, list) {
- mutex_lock(&adev->ddev->struct_mutex);
dev_err(adev->dev, "%p %p %lu %lu force free\n",
&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
*((unsigned long *)&bo->gem_base.refcount));
@@ -486,8 +473,7 @@ void amdgpu_bo_force_delete(struct amdgpu_device *adev)
list_del_init(&bo->list);
mutex_unlock(&bo->adev->gem.mutex);
/* this should unref the ttm bo */
- drm_gem_object_unreference(&bo->gem_base);
- mutex_unlock(&adev->ddev->struct_mutex);
+ drm_gem_object_unreference_unlocked(&bo->gem_base);
}
}
@@ -658,13 +644,13 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
* @shared: true if fence should be added shared
*
*/
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
bool shared)
{
struct reservation_object *resv = bo->tbo.resv;
if (shared)
- reservation_object_add_shared_fence(resv, &fence->base);
+ reservation_object_add_shared_fence(resv, fence);
else
- reservation_object_add_excl_fence(resv, &fence->base);
+ reservation_object_add_excl_fence(resv, fence);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 675bdc30e41d..6ea18dcec561 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -161,7 +161,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
bool shared);
/*
@@ -193,7 +193,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
unsigned size, unsigned align);
void amdgpu_sa_bo_free(struct amdgpu_device *adev,
struct amdgpu_sa_bo **sa_bo,
- struct amdgpu_fence *fence);
+ struct fence *fence);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index ed13baa7c976..efed11509f4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -82,7 +82,7 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
mutex_unlock(&adev->pm.mutex);
/* Can't set dpm state when the card is off */
- if (!(adev->flags & AMDGPU_IS_PX) ||
+ if (!(adev->flags & AMD_IS_PX) ||
(ddev->switch_power_state == DRM_SWITCH_POWER_ON))
amdgpu_pm_compute_clocks(adev);
fail:
@@ -538,7 +538,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
/* vce just modifies an existing state so force a change */
if (ps->vce_active != adev->pm.dpm.vce_active)
goto force;
- if (adev->flags & AMDGPU_IS_APU) {
+ if (adev->flags & AMD_IS_APU) {
/* for APUs if the num crtcs changed but state is the same,
* all we need to do is update the display configuration.
*/
@@ -580,7 +580,6 @@ force:
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
}
- mutex_lock(&adev->ddev->struct_mutex);
mutex_lock(&adev->ring_lock);
/* update whether vce is active */
@@ -628,7 +627,6 @@ force:
done:
mutex_unlock(&adev->ring_lock);
- mutex_unlock(&adev->ddev->struct_mutex);
}
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 855e2196657a..9bec91484c24 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -131,6 +131,21 @@ int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw)
return 0;
}
+/** amdgpu_ring_insert_nop - insert NOP packets
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @count: the number of NOP packets to insert
+ *
+ * This is the generic insert_nop function for rings except SDMA
+ */
+void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ amdgpu_ring_write(ring, ring->nop);
+}
+
/**
* amdgpu_ring_commit - tell the GPU to execute the new
* commands on the ring buffer
@@ -143,10 +158,13 @@ int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw)
*/
void amdgpu_ring_commit(struct amdgpu_ring *ring)
{
+ uint32_t count;
+
/* We pad to match fetch size */
- while (ring->wptr & ring->align_mask) {
- amdgpu_ring_write(ring, ring->nop);
- }
+ count = ring->align_mask + 1 - (ring->wptr & ring->align_mask);
+ count %= ring->align_mask + 1;
+ ring->funcs->insert_nop(ring, count);
+
mb();
amdgpu_ring_set_wptr(ring);
}
@@ -342,6 +360,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
amdgpu_fence_driver_init_ring(ring);
}
+ init_waitqueue_head(&ring->fence_drv.fence_queue);
+
r = amdgpu_wb_get(adev, &ring->rptr_offs);
if (r) {
dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
@@ -367,7 +387,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
}
ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4);
ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
-
+ spin_lock_init(&ring->fence_lock);
r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
if (r) {
dev_err(adev->dev, "failed initializing fences (%d).\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index eb20987ce18d..74dad270362c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -139,6 +139,20 @@ int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
return r;
}
+static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
+{
+ struct amdgpu_fence *a_fence;
+ struct amd_sched_fence *s_fence;
+
+ s_fence = to_amd_sched_fence(f);
+ if (s_fence)
+ return s_fence->scheduler->ring_id;
+ a_fence = to_amdgpu_fence(f);
+ if (a_fence)
+ return a_fence->ring->idx;
+ return 0;
+}
+
static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
{
struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
@@ -147,7 +161,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
}
list_del_init(&sa_bo->olist);
list_del_init(&sa_bo->flist);
- amdgpu_fence_unref(&sa_bo->fence);
+ fence_put(sa_bo->fence);
kfree(sa_bo);
}
@@ -160,7 +174,8 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
- if (sa_bo->fence == NULL || !amdgpu_fence_signaled(sa_bo->fence)) {
+ if (sa_bo->fence == NULL ||
+ !fence_is_signaled(sa_bo->fence)) {
return;
}
amdgpu_sa_bo_remove_locked(sa_bo);
@@ -245,7 +260,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
}
static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
- struct amdgpu_fence **fences,
+ struct fence **fences,
unsigned *tries)
{
struct amdgpu_sa_bo *best_bo = NULL;
@@ -274,7 +289,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
sa_bo = list_first_entry(&sa_manager->flist[i],
struct amdgpu_sa_bo, flist);
- if (!amdgpu_fence_signaled(sa_bo->fence)) {
+ if (!fence_is_signaled(sa_bo->fence)) {
fences[i] = sa_bo->fence;
continue;
}
@@ -298,7 +313,8 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
}
if (best_bo) {
- ++tries[best_bo->fence->ring->idx];
+ uint32_t idx = amdgpu_sa_get_ring_from_fence(best_bo->fence);
+ ++tries[idx];
sa_manager->hole = best_bo->olist.prev;
/* we knew that this one is signaled,
@@ -314,9 +330,10 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
struct amdgpu_sa_bo **sa_bo,
unsigned size, unsigned align)
{
- struct amdgpu_fence *fences[AMDGPU_MAX_RINGS];
+ struct fence *fences[AMDGPU_MAX_RINGS];
unsigned tries[AMDGPU_MAX_RINGS];
int i, r;
+ signed long t;
BUG_ON(align > sa_manager->align);
BUG_ON(size > sa_manager->size);
@@ -350,7 +367,9 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
spin_unlock(&sa_manager->wq.lock);
- r = amdgpu_fence_wait_any(adev, fences, false);
+ t = amdgpu_fence_wait_any(adev, fences, AMDGPU_MAX_RINGS,
+ false, MAX_SCHEDULE_TIMEOUT);
+ r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock);
/* if we have nothing to wait for block */
if (r == -ENOENT) {
@@ -369,7 +388,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
}
void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
- struct amdgpu_fence *fence)
+ struct fence *fence)
{
struct amdgpu_sa_manager *sa_manager;
@@ -379,10 +398,11 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
sa_manager = (*sa_bo)->manager;
spin_lock(&sa_manager->wq.lock);
- if (fence && !amdgpu_fence_signaled(fence)) {
- (*sa_bo)->fence = amdgpu_fence_ref(fence);
- list_add_tail(&(*sa_bo)->flist,
- &sa_manager->flist[fence->ring->idx]);
+ if (fence && !fence_is_signaled(fence)) {
+ uint32_t idx;
+ (*sa_bo)->fence = fence_get(fence);
+ idx = amdgpu_sa_get_ring_from_fence(fence);
+ list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
} else {
amdgpu_sa_bo_remove_locked(*sa_bo);
}
@@ -409,8 +429,16 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
soffset, eoffset, eoffset - soffset);
if (i->fence) {
- seq_printf(m, " protected by 0x%016llx on ring %d",
- i->fence->seq, i->fence->ring->idx);
+ struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence);
+ struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence);
+ if (a_fence)
+ seq_printf(m, " protected by 0x%016llx on ring %d",
+ a_fence->seq, a_fence->ring->idx);
+ if (s_fence)
+ seq_printf(m, " protected by 0x%016x on ring %d",
+ s_fence->base.seqno,
+ s_fence->scheduler->ring_id);
+
}
seq_printf(m, "\n");
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
new file mode 100644
index 000000000000..de98fbd2971e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+
+static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job)
+{
+ struct amdgpu_job *sched_job = (struct amdgpu_job *)job;
+ return amdgpu_sync_get_fence(&sched_job->ibs->sync);
+}
+
+static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job)
+{
+ struct amdgpu_job *sched_job;
+ struct amdgpu_fence *fence;
+ int r;
+
+ if (!job) {
+ DRM_ERROR("job is null\n");
+ return NULL;
+ }
+ sched_job = (struct amdgpu_job *)job;
+ mutex_lock(&sched_job->job_lock);
+ r = amdgpu_ib_schedule(sched_job->adev,
+ sched_job->num_ibs,
+ sched_job->ibs,
+ sched_job->base.owner);
+ if (r)
+ goto err;
+ fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
+
+ if (sched_job->free_job)
+ sched_job->free_job(sched_job);
+
+ mutex_unlock(&sched_job->job_lock);
+ return &fence->base;
+
+err:
+ DRM_ERROR("Run job error\n");
+ mutex_unlock(&sched_job->job_lock);
+ job->sched->ops->process_job(job);
+ return NULL;
+}
+
+static void amdgpu_sched_process_job(struct amd_sched_job *job)
+{
+ struct amdgpu_job *sched_job;
+
+ if (!job) {
+ DRM_ERROR("job is null\n");
+ return;
+ }
+ sched_job = (struct amdgpu_job *)job;
+ /* after processing job, free memory */
+ fence_put(&sched_job->base.s_fence->base);
+ kfree(sched_job);
+}
+
+struct amd_sched_backend_ops amdgpu_sched_ops = {
+ .dependency = amdgpu_sched_dependency,
+ .run_job = amdgpu_sched_run_job,
+ .process_job = amdgpu_sched_process_job
+};
+
+int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_ib *ibs,
+ unsigned num_ibs,
+ int (*free_job)(struct amdgpu_job *),
+ void *owner,
+ struct fence **f)
+{
+ int r = 0;
+ if (amdgpu_enable_scheduler) {
+ struct amdgpu_job *job =
+ kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
+ job->base.sched = ring->scheduler;
+ job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
+ job->adev = adev;
+ job->ibs = ibs;
+ job->num_ibs = num_ibs;
+ job->base.owner = owner;
+ mutex_init(&job->job_lock);
+ job->free_job = free_job;
+ mutex_lock(&job->job_lock);
+ r = amd_sched_entity_push_job((struct amd_sched_job *)job);
+ if (r) {
+ mutex_unlock(&job->job_lock);
+ kfree(job);
+ return r;
+ }
+ *f = fence_get(&job->base.s_fence->base);
+ mutex_unlock(&job->job_lock);
+ } else {
+ r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
+ if (r)
+ return r;
+ *f = fence_get(&ibs[num_ibs - 1].fence->base);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
index d6d41a42ab65..ff3ca52ec6fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
@@ -87,7 +87,7 @@ bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
void amdgpu_semaphore_free(struct amdgpu_device *adev,
struct amdgpu_semaphore **semaphore,
- struct amdgpu_fence *fence)
+ struct fence *fence)
{
if (semaphore == NULL || *semaphore == NULL) {
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 21accbdd0a1a..068aeaff7183 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -32,6 +32,11 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
+struct amdgpu_sync_entry {
+ struct hlist_node node;
+ struct fence *fence;
+};
+
/**
* amdgpu_sync_create - zero init sync object
*
@@ -49,36 +54,104 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
sync->sync_to[i] = NULL;
+ hash_init(sync->fences);
sync->last_vm_update = NULL;
}
+static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
+{
+ struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
+ struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+ if (a_fence)
+ return a_fence->ring->adev == adev;
+ if (s_fence)
+ return (struct amdgpu_device *)s_fence->scheduler->priv == adev;
+ return false;
+}
+
+static bool amdgpu_sync_test_owner(struct fence *f, void *owner)
+{
+ struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
+ struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ if (s_fence)
+ return s_fence->owner == owner;
+ if (a_fence)
+ return a_fence->owner == owner;
+ return false;
+}
+
/**
- * amdgpu_sync_fence - use the semaphore to sync to a fence
+ * amdgpu_sync_fence - remember to sync to this fence
*
* @sync: sync object to add fence to
* @fence: fence to sync to
*
- * Sync to the fence using the semaphore objects
*/
-void amdgpu_sync_fence(struct amdgpu_sync *sync,
- struct amdgpu_fence *fence)
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+ struct fence *f)
{
+ struct amdgpu_sync_entry *e;
+ struct amdgpu_fence *fence;
struct amdgpu_fence *other;
+ struct fence *tmp, *later;
- if (!fence)
- return;
+ if (!f)
+ return 0;
+
+ if (amdgpu_sync_same_dev(adev, f) &&
+ amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM)) {
+ if (sync->last_vm_update) {
+ tmp = sync->last_vm_update;
+ BUG_ON(f->context != tmp->context);
+ later = (f->seqno - tmp->seqno <= INT_MAX) ? f : tmp;
+ sync->last_vm_update = fence_get(later);
+ fence_put(tmp);
+ } else
+ sync->last_vm_update = fence_get(f);
+ }
+
+ fence = to_amdgpu_fence(f);
+ if (!fence || fence->ring->adev != adev) {
+ hash_for_each_possible(sync->fences, e, node, f->context) {
+ struct fence *new;
+ if (unlikely(e->fence->context != f->context))
+ continue;
+ new = fence_get(fence_later(e->fence, f));
+ if (new) {
+ fence_put(e->fence);
+ e->fence = new;
+ }
+ return 0;
+ }
+
+ e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ hash_add(sync->fences, &e->node, f->context);
+ e->fence = fence_get(f);
+ return 0;
+ }
other = sync->sync_to[fence->ring->idx];
sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
amdgpu_fence_later(fence, other));
amdgpu_fence_unref(&other);
- if (fence->owner == AMDGPU_FENCE_OWNER_VM) {
- other = sync->last_vm_update;
- sync->last_vm_update = amdgpu_fence_ref(
- amdgpu_fence_later(fence, other));
- amdgpu_fence_unref(&other);
- }
+ return 0;
+}
+
+static void *amdgpu_sync_get_owner(struct fence *f)
+{
+ struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
+ struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+ if (s_fence)
+ return s_fence->owner;
+ else if (a_fence)
+ return a_fence->owner;
+ return AMDGPU_FENCE_OWNER_UNDEFINED;
}
/**
@@ -97,7 +170,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
{
struct reservation_object_list *flist;
struct fence *f;
- struct amdgpu_fence *fence;
+ void *fence_owner;
unsigned i;
int r = 0;
@@ -106,11 +179,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
/* always sync to the exclusive fence */
f = reservation_object_get_excl(resv);
- fence = f ? to_amdgpu_fence(f) : NULL;
- if (fence && fence->ring->adev == adev)
- amdgpu_sync_fence(sync, fence);
- else if (f)
- r = fence_wait(f, true);
+ r = amdgpu_sync_fence(adev, sync, f);
flist = reservation_object_get_list(resv);
if (!flist || r)
@@ -119,20 +188,72 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
for (i = 0; i < flist->shared_count; ++i) {
f = rcu_dereference_protected(flist->shared[i],
reservation_object_held(resv));
- fence = f ? to_amdgpu_fence(f) : NULL;
- if (fence && fence->ring->adev == adev) {
- if (fence->owner != owner ||
- fence->owner == AMDGPU_FENCE_OWNER_UNDEFINED)
- amdgpu_sync_fence(sync, fence);
- } else if (f) {
- r = fence_wait(f, true);
- if (r)
- break;
+ if (amdgpu_sync_same_dev(adev, f)) {
+ /* VM updates are only interesting
+ * for other VM updates and moves.
+ */
+ fence_owner = amdgpu_sync_get_owner(f);
+ if ((owner != AMDGPU_FENCE_OWNER_MOVE) &&
+ (fence_owner != AMDGPU_FENCE_OWNER_MOVE) &&
+ ((owner == AMDGPU_FENCE_OWNER_VM) !=
+ (fence_owner == AMDGPU_FENCE_OWNER_VM)))
+ continue;
+
+ /* Ignore fence from the same owner as
+ * long as it isn't undefined.
+ */
+ if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
+ fence_owner == owner)
+ continue;
}
+
+ r = amdgpu_sync_fence(adev, sync, f);
+ if (r)
+ break;
}
return r;
}
+struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
+{
+ struct amdgpu_sync_entry *e;
+ struct hlist_node *tmp;
+ struct fence *f;
+ int i;
+
+ hash_for_each_safe(sync->fences, i, tmp, e, node) {
+
+ f = e->fence;
+
+ hash_del(&e->node);
+ kfree(e);
+
+ if (!fence_is_signaled(f))
+ return f;
+
+ fence_put(f);
+ }
+ return NULL;
+}
+
+int amdgpu_sync_wait(struct amdgpu_sync *sync)
+{
+ struct amdgpu_sync_entry *e;
+ struct hlist_node *tmp;
+ int i, r;
+
+ hash_for_each_safe(sync->fences, i, tmp, e, node) {
+ r = fence_wait(e->fence, false);
+ if (r)
+ return r;
+
+ hash_del(&e->node);
+ fence_put(e->fence);
+ kfree(e);
+ }
+ return 0;
+}
+
/**
* amdgpu_sync_rings - sync ring to all registered fences
*
@@ -164,9 +285,9 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
return -EINVAL;
}
- if (count >= AMDGPU_NUM_SYNCS) {
+ if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) {
/* not enough room, wait manually */
- r = amdgpu_fence_wait(fence, false);
+ r = fence_wait(&fence->base, false);
if (r)
return r;
continue;
@@ -186,7 +307,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
if (!amdgpu_semaphore_emit_signal(other, semaphore)) {
/* signaling wasn't successful wait manually */
amdgpu_ring_undo(other);
- r = amdgpu_fence_wait(fence, false);
+ r = fence_wait(&fence->base, false);
if (r)
return r;
continue;
@@ -196,7 +317,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
if (!amdgpu_semaphore_emit_wait(ring, semaphore)) {
/* waiting wasn't successful wait manually */
amdgpu_ring_undo(other);
- r = amdgpu_fence_wait(fence, false);
+ r = fence_wait(&fence->base, false);
if (r)
return r;
continue;
@@ -220,15 +341,23 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
*/
void amdgpu_sync_free(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
- struct amdgpu_fence *fence)
+ struct fence *fence)
{
+ struct amdgpu_sync_entry *e;
+ struct hlist_node *tmp;
unsigned i;
+ hash_for_each_safe(sync->fences, i, tmp, e, node) {
+ hash_del(&e->node);
+ fence_put(e->fence);
+ kfree(e);
+ }
+
for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
amdgpu_semaphore_free(adev, &sync->semaphores[i], fence);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
amdgpu_fence_unref(&sync->sync_to[i]);
- amdgpu_fence_unref(&sync->last_vm_update);
+ fence_put(sync->last_vm_update);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index df202999fbfe..f80b1a43be8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -77,7 +77,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
void *gtt_map, *vram_map;
void **gtt_start, **gtt_end;
void **vram_start, **vram_end;
- struct amdgpu_fence *fence = NULL;
+ struct fence *fence = NULL;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
@@ -116,13 +116,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin;
}
- r = amdgpu_fence_wait(fence, false);
+ r = fence_wait(fence, false);
if (r) {
DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
goto out_lclean_unpin;
}
- amdgpu_fence_unref(&fence);
+ fence_put(fence);
r = amdgpu_bo_kmap(vram_obj, &vram_map);
if (r) {
@@ -161,13 +161,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin;
}
- r = amdgpu_fence_wait(fence, false);
+ r = fence_wait(fence, false);
if (r) {
DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
goto out_lclean_unpin;
}
- amdgpu_fence_unref(&fence);
+ fence_put(fence);
r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
@@ -214,7 +214,7 @@ out_lclean:
amdgpu_bo_unref(&gtt_obj[i]);
}
if (fence)
- amdgpu_fence_unref(&fence);
+ fence_put(fence);
break;
}
@@ -238,7 +238,7 @@ void amdgpu_test_moves(struct amdgpu_device *adev)
static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
- struct amdgpu_fence **fence)
+ struct fence **fence)
{
uint32_t handle = ring->idx ^ 0xdeafbeef;
int r;
@@ -269,15 +269,16 @@ static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev,
DRM_ERROR("Failed to get dummy destroy msg\n");
return r;
}
-
} else {
+ struct amdgpu_fence *a_fence = NULL;
r = amdgpu_ring_lock(ring, 64);
if (r) {
DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
return r;
}
- amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+ amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, &a_fence);
amdgpu_ring_unlock_commit(ring);
+ *fence = &a_fence->base;
}
return 0;
}
@@ -286,7 +287,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev,
struct amdgpu_ring *ringA,
struct amdgpu_ring *ringB)
{
- struct amdgpu_fence *fence1 = NULL, *fence2 = NULL;
+ struct fence *fence1 = NULL, *fence2 = NULL;
struct amdgpu_semaphore *semaphore = NULL;
int r;
@@ -322,7 +323,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev,
mdelay(1000);
- if (amdgpu_fence_signaled(fence1)) {
+ if (fence_is_signaled(fence1)) {
DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
goto out_cleanup;
}
@@ -335,7 +336,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev,
amdgpu_semaphore_emit_signal(ringB, semaphore);
amdgpu_ring_unlock_commit(ringB);
- r = amdgpu_fence_wait(fence1, false);
+ r = fence_wait(fence1, false);
if (r) {
DRM_ERROR("Failed to wait for sync fence 1\n");
goto out_cleanup;
@@ -343,7 +344,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev,
mdelay(1000);
- if (amdgpu_fence_signaled(fence2)) {
+ if (fence_is_signaled(fence2)) {
DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
goto out_cleanup;
}
@@ -356,7 +357,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev,
amdgpu_semaphore_emit_signal(ringB, semaphore);
amdgpu_ring_unlock_commit(ringB);
- r = amdgpu_fence_wait(fence2, false);
+ r = fence_wait(fence2, false);
if (r) {
DRM_ERROR("Failed to wait for sync fence 1\n");
goto out_cleanup;
@@ -366,10 +367,10 @@ out_cleanup:
amdgpu_semaphore_free(adev, &semaphore, NULL);
if (fence1)
- amdgpu_fence_unref(&fence1);
+ fence_put(fence1);
if (fence2)
- amdgpu_fence_unref(&fence2);
+ fence_put(fence2);
if (r)
printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
@@ -380,7 +381,7 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
struct amdgpu_ring *ringB,
struct amdgpu_ring *ringC)
{
- struct amdgpu_fence *fenceA = NULL, *fenceB = NULL;
+ struct fence *fenceA = NULL, *fenceB = NULL;
struct amdgpu_semaphore *semaphore = NULL;
bool sigA, sigB;
int i, r;
@@ -416,11 +417,11 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
mdelay(1000);
- if (amdgpu_fence_signaled(fenceA)) {
+ if (fence_is_signaled(fenceA)) {
DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
goto out_cleanup;
}
- if (amdgpu_fence_signaled(fenceB)) {
+ if (fence_is_signaled(fenceB)) {
DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
goto out_cleanup;
}
@@ -435,8 +436,8 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
for (i = 0; i < 30; ++i) {
mdelay(100);
- sigA = amdgpu_fence_signaled(fenceA);
- sigB = amdgpu_fence_signaled(fenceB);
+ sigA = fence_is_signaled(fenceA);
+ sigB = fence_is_signaled(fenceB);
if (sigA || sigB)
break;
}
@@ -461,12 +462,12 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
mdelay(1000);
- r = amdgpu_fence_wait(fenceA, false);
+ r = fence_wait(fenceA, false);
if (r) {
DRM_ERROR("Failed to wait for sync fence A\n");
goto out_cleanup;
}
- r = amdgpu_fence_wait(fenceB, false);
+ r = fence_wait(fenceB, false);
if (r) {
DRM_ERROR("Failed to wait for sync fence B\n");
goto out_cleanup;
@@ -476,10 +477,10 @@ out_cleanup:
amdgpu_semaphore_free(adev, &semaphore, NULL);
if (fenceA)
- amdgpu_fence_unref(&fenceA);
+ fence_put(fenceA);
if (fenceB)
- amdgpu_fence_unref(&fenceB);
+ fence_put(fenceB);
if (r)
printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dd3415d2e45d..b5abd5cde413 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -228,7 +228,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
struct amdgpu_device *adev;
struct amdgpu_ring *ring;
uint64_t old_start, new_start;
- struct amdgpu_fence *fence;
+ struct fence *fence;
int r;
adev = amdgpu_get_adev(bo->bdev);
@@ -269,9 +269,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
new_mem->num_pages * PAGE_SIZE, /* bytes */
bo->resv, &fence);
/* FIXME: handle copy error */
- r = ttm_bo_move_accel_cleanup(bo, &fence->base,
+ r = ttm_bo_move_accel_cleanup(bo, fence,
evict, no_wait_gpu, new_mem);
- amdgpu_fence_unref(&fence);
+ fence_put(fence);
return r;
}
@@ -859,7 +859,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
NULL, &adev->stollen_vga_memory);
if (r) {
return r;
@@ -987,46 +988,48 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv,
- struct amdgpu_fence **fence)
+ struct fence **fence)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_sync sync;
uint32_t max_bytes;
unsigned num_loops, num_dw;
+ struct amdgpu_ib *ib;
unsigned i;
int r;
- /* sync other rings */
- amdgpu_sync_create(&sync);
- if (resv) {
- r = amdgpu_sync_resv(adev, &sync, resv, false);
- if (r) {
- DRM_ERROR("sync failed (%d).\n", r);
- amdgpu_sync_free(adev, &sync, NULL);
- return r;
- }
- }
-
max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
- /* for fence and sync */
- num_dw += 64 + AMDGPU_NUM_SYNCS * 8;
+ /* for IB padding */
+ while (num_dw & 0x7)
+ num_dw++;
+
+ ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+ if (!ib)
+ return -ENOMEM;
- r = amdgpu_ring_lock(ring, num_dw);
+ r = amdgpu_ib_get(ring, NULL, num_dw * 4, ib);
if (r) {
- DRM_ERROR("ring lock failed (%d).\n", r);
- amdgpu_sync_free(adev, &sync, NULL);
+ kfree(ib);
return r;
}
- amdgpu_sync_rings(&sync, ring);
+ ib->length_dw = 0;
+
+ if (resv) {
+ r = amdgpu_sync_resv(adev, &ib->sync, resv,
+ AMDGPU_FENCE_OWNER_UNDEFINED);
+ if (r) {
+ DRM_ERROR("sync failed (%d).\n", r);
+ goto error_free;
+ }
+ }
for (i = 0; i < num_loops; i++) {
uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
- amdgpu_emit_copy_buffer(adev, ring, src_offset, dst_offset,
+ amdgpu_emit_copy_buffer(adev, ib, src_offset, dst_offset,
cur_size_in_bytes);
src_offset += cur_size_in_bytes;
@@ -1034,17 +1037,24 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
byte_count -= cur_size_in_bytes;
}
- r = amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_MOVE, fence);
- if (r) {
- amdgpu_ring_unlock_undo(ring);
- amdgpu_sync_free(adev, &sync, NULL);
- return r;
- }
-
- amdgpu_ring_unlock_commit(ring);
- amdgpu_sync_free(adev, &sync, *fence);
+ amdgpu_vm_pad_ib(adev, ib);
+ WARN_ON(ib->length_dw > num_dw);
+ r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+ &amdgpu_vm_free_job,
+ AMDGPU_FENCE_OWNER_MOVE,
+ fence);
+ if (r)
+ goto error_free;
+ if (!amdgpu_enable_scheduler) {
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
+ }
return 0;
+error_free:
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
+ return r;
}
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2f7a5efa21c2..2cf6c6b06e3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -52,6 +52,7 @@
#endif
#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
+#define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin"
/**
* amdgpu_uvd_cs_ctx - Command submission parser context
@@ -81,6 +82,7 @@ MODULE_FIRMWARE(FIRMWARE_MULLINS);
#endif
MODULE_FIRMWARE(FIRMWARE_TONGA);
MODULE_FIRMWARE(FIRMWARE_CARRIZO);
+MODULE_FIRMWARE(FIRMWARE_FIJI);
static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
@@ -116,6 +118,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
case CHIP_TONGA:
fw_name = FIRMWARE_TONGA;
break;
+ case CHIP_FIJI:
+ fw_name = FIRMWARE_FIJI;
+ break;
case CHIP_CARRIZO:
fw_name = FIRMWARE_CARRIZO;
break;
@@ -149,7 +154,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->uvd.vcpu_bo);
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, &adev->uvd.vcpu_bo);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
@@ -216,31 +223,32 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
int amdgpu_uvd_suspend(struct amdgpu_device *adev)
{
- unsigned size;
- void *ptr;
- const struct common_firmware_header *hdr;
- int i;
+ struct amdgpu_ring *ring = &adev->uvd.ring;
+ int i, r;
if (adev->uvd.vcpu_bo == NULL)
return 0;
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
- if (atomic_read(&adev->uvd.handles[i]))
- break;
-
- if (i == AMDGPU_MAX_UVD_HANDLES)
- return 0;
+ for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+ uint32_t handle = atomic_read(&adev->uvd.handles[i]);
+ if (handle != 0) {
+ struct fence *fence;
- hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
+ amdgpu_uvd_note_usage(adev);
- size = amdgpu_bo_size(adev->uvd.vcpu_bo);
- size -= le32_to_cpu(hdr->ucode_size_bytes);
+ r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence);
+ if (r) {
+ DRM_ERROR("Error destroying UVD (%d)!\n", r);
+ continue;
+ }
- ptr = adev->uvd.cpu_addr;
- ptr += le32_to_cpu(hdr->ucode_size_bytes);
+ fence_wait(fence, false);
+ fence_put(fence);
- adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
- memcpy(adev->uvd.saved_bo, ptr, size);
+ adev->uvd.filp[i] = NULL;
+ atomic_set(&adev->uvd.handles[i], 0);
+ }
+ }
return 0;
}
@@ -265,12 +273,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
ptr = adev->uvd.cpu_addr;
ptr += le32_to_cpu(hdr->ucode_size_bytes);
- if (adev->uvd.saved_bo != NULL) {
- memcpy(ptr, adev->uvd.saved_bo, size);
- kfree(adev->uvd.saved_bo);
- adev->uvd.saved_bo = NULL;
- } else
- memset(ptr, 0, size);
+ memset(ptr, 0, size);
return 0;
}
@@ -283,7 +286,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
uint32_t handle = atomic_read(&adev->uvd.handles[i]);
if (handle != 0 && adev->uvd.filp[i] == filp) {
- struct amdgpu_fence *fence;
+ struct fence *fence;
amdgpu_uvd_note_usage(adev);
@@ -293,8 +296,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
continue;
}
- amdgpu_fence_wait(fence, false);
- amdgpu_fence_unref(&fence);
+ fence_wait(fence, false);
+ fence_put(fence);
adev->uvd.filp[i] = NULL;
atomic_set(&adev->uvd.handles[i], 0);
@@ -375,6 +378,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
unsigned fs_in_mb = width_in_mb * height_in_mb;
unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
+ unsigned min_ctx_size = 0;
image_size = width * height;
image_size += image_size / 2;
@@ -466,6 +470,8 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
min_dpb_size = image_size * num_dpb_buffer;
+ min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
+ * 16 * num_dpb_buffer + 52 * 1024;
break;
default:
@@ -486,6 +492,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
buf_sizes[0x1] = dpb_size;
buf_sizes[0x2] = image_size;
+ buf_sizes[0x4] = min_ctx_size;
return 0;
}
@@ -504,28 +511,25 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
{
struct amdgpu_device *adev = ctx->parser->adev;
int32_t *msg, msg_type, handle;
- struct fence *f;
void *ptr;
-
- int i, r;
+ long r;
+ int i;
if (offset & 0x3F) {
DRM_ERROR("UVD messages must be 64 byte aligned!\n");
return -EINVAL;
}
- f = reservation_object_get_excl(bo->tbo.resv);
- if (f) {
- r = amdgpu_fence_wait((struct amdgpu_fence *)f, false);
- if (r) {
- DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
- return r;
- }
+ r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (r < 0) {
+ DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r);
+ return r;
}
r = amdgpu_bo_kmap(bo, &ptr);
if (r) {
- DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
+ DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
return r;
}
@@ -628,6 +632,13 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
return -EINVAL;
}
+ } else if (cmd == 0x206) {
+ if ((end - start) < ctx->buf_sizes[4]) {
+ DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
+ (unsigned)(end - start),
+ ctx->buf_sizes[4]);
+ return -EINVAL;
+ }
} else if ((cmd != 0x100) && (cmd != 0x204)) {
DRM_ERROR("invalid UVD command %X!\n", cmd);
return -EINVAL;
@@ -755,9 +766,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
struct amdgpu_uvd_cs_ctx ctx = {};
unsigned buf_sizes[] = {
[0x00000000] = 2048,
- [0x00000001] = 32 * 1024 * 1024,
- [0x00000002] = 2048 * 1152 * 3,
+ [0x00000001] = 0xFFFFFFFF,
+ [0x00000002] = 0xFFFFFFFF,
[0x00000003] = 2048,
+ [0x00000004] = 0xFFFFFFFF,
};
struct amdgpu_ib *ib = &parser->ibs[ib_idx];
int r;
@@ -792,14 +804,24 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
return 0;
}
+static int amdgpu_uvd_free_job(
+ struct amdgpu_job *sched_job)
+{
+ amdgpu_ib_free(sched_job->adev, sched_job->ibs);
+ kfree(sched_job->ibs);
+ return 0;
+}
+
static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
- struct amdgpu_fence **fence)
+ struct fence **fence)
{
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head head;
- struct amdgpu_ib ib;
+ struct amdgpu_ib *ib = NULL;
+ struct fence *f = NULL;
+ struct amdgpu_device *adev = ring->adev;
uint64_t addr;
int i, r;
@@ -821,34 +843,49 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
if (r)
goto err;
-
- r = amdgpu_ib_get(ring, NULL, 64, &ib);
- if (r)
+ ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+ if (!ib) {
+ r = -ENOMEM;
goto err;
+ }
+ r = amdgpu_ib_get(ring, NULL, 64, ib);
+ if (r)
+ goto err1;
addr = amdgpu_bo_gpu_offset(bo);
- ib.ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
- ib.ptr[1] = addr;
- ib.ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
- ib.ptr[3] = addr >> 32;
- ib.ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
- ib.ptr[5] = 0;
+ ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
+ ib->ptr[1] = addr;
+ ib->ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
+ ib->ptr[3] = addr >> 32;
+ ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
+ ib->ptr[5] = 0;
for (i = 6; i < 16; ++i)
- ib.ptr[i] = PACKET2(0);
- ib.length_dw = 16;
+ ib->ptr[i] = PACKET2(0);
+ ib->length_dw = 16;
- r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+ r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+ &amdgpu_uvd_free_job,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ &f);
if (r)
- goto err;
- ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base);
+ goto err2;
- if (fence)
- *fence = amdgpu_fence_ref(ib.fence);
+ ttm_eu_fence_buffer_objects(&ticket, &head, f);
- amdgpu_ib_free(ring->adev, &ib);
+ if (fence)
+ *fence = fence_get(f);
amdgpu_bo_unref(&bo);
- return 0;
+ fence_put(f);
+ if (amdgpu_enable_scheduler)
+ return 0;
+ amdgpu_ib_free(ring->adev, ib);
+ kfree(ib);
+ return 0;
+err2:
+ amdgpu_ib_free(ring->adev, ib);
+err1:
+ kfree(ib);
err:
ttm_eu_backoff_reservation(&ticket, &head);
return r;
@@ -858,7 +895,7 @@ err:
crash the vcpu so just try to emmit a dummy create/destroy msg to
avoid this */
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_fence **fence)
+ struct fence **fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo;
@@ -866,7 +903,9 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
int r, i;
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo);
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, &bo);
if (r)
return r;
@@ -905,7 +944,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
}
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_fence **fence)
+ struct fence **fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo;
@@ -913,7 +952,9 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
int r, i;
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo);
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, &bo);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 2255aa710e33..1724c2c86151 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -29,9 +29,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
int amdgpu_uvd_suspend(struct amdgpu_device *adev);
int amdgpu_uvd_resume(struct amdgpu_device *adev);
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_fence **fence);
+ struct fence **fence);
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_fence **fence);
+ struct fence **fence);
void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
struct drm_file *filp);
int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index d3ca73090e39..3cab96c42aa8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -48,6 +48,7 @@
#endif
#define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
#define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
+#define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
#ifdef CONFIG_DRM_AMDGPU_CIK
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
@@ -58,6 +59,7 @@ MODULE_FIRMWARE(FIRMWARE_MULLINS);
#endif
MODULE_FIRMWARE(FIRMWARE_TONGA);
MODULE_FIRMWARE(FIRMWARE_CARRIZO);
+MODULE_FIRMWARE(FIRMWARE_FIJI);
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
@@ -101,6 +103,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
case CHIP_CARRIZO:
fw_name = FIRMWARE_CARRIZO;
break;
+ case CHIP_FIJI:
+ fw_name = FIRMWARE_FIJI;
+ break;
default:
return -EINVAL;
@@ -136,7 +141,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
/* allocate firmware, stack and heap BO */
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->vce.vcpu_bo);
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, &adev->vce.vcpu_bo);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
return r;
@@ -334,6 +341,14 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
}
}
+static int amdgpu_vce_free_job(
+ struct amdgpu_job *sched_job)
+{
+ amdgpu_ib_free(sched_job->adev, sched_job->ibs);
+ kfree(sched_job->ibs);
+ return 0;
+}
+
/**
* amdgpu_vce_get_create_msg - generate a VCE create msg
*
@@ -345,59 +360,69 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
* Open up a stream for HW test
*/
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_fence **fence)
+ struct fence **fence)
{
const unsigned ib_size_dw = 1024;
- struct amdgpu_ib ib;
+ struct amdgpu_ib *ib = NULL;
+ struct fence *f = NULL;
+ struct amdgpu_device *adev = ring->adev;
uint64_t dummy;
int i, r;
- r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib);
+ ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+ if (!ib)
+ return -ENOMEM;
+ r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+ kfree(ib);
return r;
}
- dummy = ib.gpu_addr + 1024;
+ dummy = ib->gpu_addr + 1024;
/* stitch together an VCE create msg */
- ib.length_dw = 0;
- ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
- ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
- ib.ptr[ib.length_dw++] = handle;
-
- ib.ptr[ib.length_dw++] = 0x00000030; /* len */
- ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */
- ib.ptr[ib.length_dw++] = 0x00000000;
- ib.ptr[ib.length_dw++] = 0x00000042;
- ib.ptr[ib.length_dw++] = 0x0000000a;
- ib.ptr[ib.length_dw++] = 0x00000001;
- ib.ptr[ib.length_dw++] = 0x00000080;
- ib.ptr[ib.length_dw++] = 0x00000060;
- ib.ptr[ib.length_dw++] = 0x00000100;
- ib.ptr[ib.length_dw++] = 0x00000100;
- ib.ptr[ib.length_dw++] = 0x0000000c;
- ib.ptr[ib.length_dw++] = 0x00000000;
-
- ib.ptr[ib.length_dw++] = 0x00000014; /* len */
- ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
- ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
- ib.ptr[ib.length_dw++] = dummy;
- ib.ptr[ib.length_dw++] = 0x00000001;
-
- for (i = ib.length_dw; i < ib_size_dw; ++i)
- ib.ptr[i] = 0x0;
-
- r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
- if (r) {
- DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
- }
-
+ ib->length_dw = 0;
+ ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
+ ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
+ ib->ptr[ib->length_dw++] = handle;
+
+ ib->ptr[ib->length_dw++] = 0x00000030; /* len */
+ ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
+ ib->ptr[ib->length_dw++] = 0x00000000;
+ ib->ptr[ib->length_dw++] = 0x00000042;
+ ib->ptr[ib->length_dw++] = 0x0000000a;
+ ib->ptr[ib->length_dw++] = 0x00000001;
+ ib->ptr[ib->length_dw++] = 0x00000080;
+ ib->ptr[ib->length_dw++] = 0x00000060;
+ ib->ptr[ib->length_dw++] = 0x00000100;
+ ib->ptr[ib->length_dw++] = 0x00000100;
+ ib->ptr[ib->length_dw++] = 0x0000000c;
+ ib->ptr[ib->length_dw++] = 0x00000000;
+
+ ib->ptr[ib->length_dw++] = 0x00000014; /* len */
+ ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
+ ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+ ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = 0x00000001;
+
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+ r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+ &amdgpu_vce_free_job,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ &f);
+ if (r)
+ goto err;
if (fence)
- *fence = amdgpu_fence_ref(ib.fence);
-
- amdgpu_ib_free(ring->adev, &ib);
-
+ *fence = fence_get(f);
+ fence_put(f);
+ if (amdgpu_enable_scheduler)
+ return 0;
+err:
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
return r;
}
@@ -412,49 +437,59 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
* Close up a stream for HW test or if userspace failed to do so
*/
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_fence **fence)
+ struct fence **fence)
{
const unsigned ib_size_dw = 1024;
- struct amdgpu_ib ib;
+ struct amdgpu_ib *ib = NULL;
+ struct fence *f = NULL;
+ struct amdgpu_device *adev = ring->adev;
uint64_t dummy;
int i, r;
- r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib);
+ ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+ if (!ib)
+ return -ENOMEM;
+
+ r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib);
if (r) {
+ kfree(ib);
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
return r;
}
- dummy = ib.gpu_addr + 1024;
+ dummy = ib->gpu_addr + 1024;
/* stitch together an VCE destroy msg */
- ib.length_dw = 0;
- ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
- ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
- ib.ptr[ib.length_dw++] = handle;
-
- ib.ptr[ib.length_dw++] = 0x00000014; /* len */
- ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
- ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
- ib.ptr[ib.length_dw++] = dummy;
- ib.ptr[ib.length_dw++] = 0x00000001;
-
- ib.ptr[ib.length_dw++] = 0x00000008; /* len */
- ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */
-
- for (i = ib.length_dw; i < ib_size_dw; ++i)
- ib.ptr[i] = 0x0;
-
- r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
- if (r) {
- DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
- }
-
+ ib->length_dw = 0;
+ ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
+ ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
+ ib->ptr[ib->length_dw++] = handle;
+
+ ib->ptr[ib->length_dw++] = 0x00000014; /* len */
+ ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
+ ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+ ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = 0x00000001;
+
+ ib->ptr[ib->length_dw++] = 0x00000008; /* len */
+ ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
+
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+ r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+ &amdgpu_vce_free_job,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ &f);
+ if (r)
+ goto err;
if (fence)
- *fence = amdgpu_fence_ref(ib.fence);
-
- amdgpu_ib_free(ring->adev, &ib);
-
+ *fence = fence_get(f);
+ fence_put(f);
+ if (amdgpu_enable_scheduler)
+ return 0;
+err:
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
return r;
}
@@ -800,9 +835,13 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
*/
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
{
- struct amdgpu_fence *fence = NULL;
+ struct fence *fence = NULL;
int r;
+ /* skip vce ring1 ib test for now, since it's not reliable */
+ if (ring == &ring->adev->vce.ring[1])
+ return 0;
+
r = amdgpu_vce_get_create_msg(ring, 1, NULL);
if (r) {
DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
@@ -815,13 +854,13 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
goto error;
}
- r = amdgpu_fence_wait(fence, false);
+ r = fence_wait(fence, false);
if (r) {
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
} else {
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
}
error:
- amdgpu_fence_unref(&fence);
+ fence_put(fence);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 7ccdb5927da5..ba2da8ee5906 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -29,9 +29,9 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_fence **fence);
+ struct fence **fence);
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_fence **fence);
+ struct fence **fence);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9a4e3b63f1cb..f68b7cdc370a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -127,16 +127,16 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
/**
* amdgpu_vm_grab_id - allocate the next free VMID
*
- * @ring: ring we want to submit job to
* @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ * @sync: sync object where we add dependencies
*
- * Allocate an id for the vm (cayman+).
- * Returns the fence we need to sync to (if any).
+ * Allocate an id for the vm, adding fences to the sync obj as necessary.
*
- * Global and local mutex must be locked!
+ * Global mutex must be locked!
*/
-struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
- struct amdgpu_vm *vm)
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync)
{
struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {};
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
@@ -148,7 +148,7 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
/* check if the id is still valid */
if (vm_id->id && vm_id->last_id_use &&
vm_id->last_id_use == adev->vm_manager.active[vm_id->id])
- return NULL;
+ return 0;
/* we definately need to flush */
vm_id->pd_gpu_addr = ~0ll;
@@ -161,7 +161,7 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
/* found a free one */
vm_id->id = i;
trace_amdgpu_vm_grab_id(i, ring->idx);
- return NULL;
+ return 0;
}
if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) {
@@ -172,15 +172,19 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
for (i = 0; i < 2; ++i) {
if (choices[i]) {
+ struct amdgpu_fence *fence;
+
+ fence = adev->vm_manager.active[choices[i]];
vm_id->id = choices[i];
+
trace_amdgpu_vm_grab_id(choices[i], ring->idx);
- return adev->vm_manager.active[choices[i]];
+ return amdgpu_sync_fence(ring->adev, sync, &fence->base);
}
}
/* should never happen */
BUG();
- return NULL;
+ return -EINVAL;
}
/**
@@ -196,17 +200,29 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
*/
void amdgpu_vm_flush(struct amdgpu_ring *ring,
struct amdgpu_vm *vm,
- struct amdgpu_fence *updates)
+ struct fence *updates)
{
uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
+ struct fence *flushed_updates = vm_id->flushed_updates;
+ bool is_earlier = false;
- if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates ||
- amdgpu_fence_is_earlier(vm_id->flushed_updates, updates)) {
+ if (flushed_updates && updates) {
+ BUG_ON(flushed_updates->context != updates->context);
+ is_earlier = (updates->seqno - flushed_updates->seqno <=
+ INT_MAX) ? true : false;
+ }
+
+ if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates ||
+ is_earlier) {
trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
- amdgpu_fence_unref(&vm_id->flushed_updates);
- vm_id->flushed_updates = amdgpu_fence_ref(updates);
+ if (is_earlier) {
+ vm_id->flushed_updates = fence_get(updates);
+ fence_put(flushed_updates);
+ }
+ if (!flushed_updates)
+ vm_id->flushed_updates = fence_get(updates);
vm_id->pd_gpu_addr = pd_addr;
amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
}
@@ -300,6 +316,15 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
}
}
+int amdgpu_vm_free_job(struct amdgpu_job *sched_job)
+{
+ int i;
+ for (i = 0; i < sched_job->num_ibs; i++)
+ amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
+ kfree(sched_job->ibs);
+ return 0;
+}
+
/**
* amdgpu_vm_clear_bo - initially clear the page dir/table
*
@@ -310,7 +335,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_bo *bo)
{
struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
- struct amdgpu_ib ib;
+ struct fence *fence = NULL;
+ struct amdgpu_ib *ib;
unsigned entries;
uint64_t addr;
int r;
@@ -330,24 +356,33 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
addr = amdgpu_bo_gpu_offset(bo);
entries = amdgpu_bo_size(bo) / 8;
- r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, &ib);
- if (r)
+ ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+ if (!ib)
goto error_unreserve;
- ib.length_dw = 0;
-
- amdgpu_vm_update_pages(adev, &ib, addr, 0, entries, 0, 0, 0);
- amdgpu_vm_pad_ib(adev, &ib);
- WARN_ON(ib.length_dw > 64);
-
- r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
+ r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
if (r)
goto error_free;
- amdgpu_bo_fence(bo, ib.fence, true);
-
+ ib->length_dw = 0;
+
+ amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
+ amdgpu_vm_pad_ib(adev, ib);
+ WARN_ON(ib->length_dw > 64);
+ r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+ &amdgpu_vm_free_job,
+ AMDGPU_FENCE_OWNER_VM,
+ &fence);
+ if (!r)
+ amdgpu_bo_fence(bo, fence, true);
+ fence_put(fence);
+ if (amdgpu_enable_scheduler) {
+ amdgpu_bo_unreserve(bo);
+ return 0;
+ }
error_free:
- amdgpu_ib_free(adev, &ib);
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
error_unreserve:
amdgpu_bo_unreserve(bo);
@@ -400,7 +435,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
uint64_t last_pde = ~0, last_pt = ~0;
unsigned count = 0, pt_idx, ndw;
- struct amdgpu_ib ib;
+ struct amdgpu_ib *ib;
+ struct fence *fence = NULL;
+
int r;
/* padding, etc. */
@@ -413,10 +450,14 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
if (ndw > 0xfffff)
return -ENOMEM;
- r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib);
+ ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+ if (!ib)
+ return -ENOMEM;
+
+ r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
if (r)
return r;
- ib.length_dw = 0;
+ ib->length_dw = 0;
/* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -436,7 +477,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
((last_pt + incr * count) != pt)) {
if (count) {
- amdgpu_vm_update_pages(adev, &ib, last_pde,
+ amdgpu_vm_update_pages(adev, ib, last_pde,
last_pt, count, incr,
AMDGPU_PTE_VALID, 0);
}
@@ -450,23 +491,37 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
}
if (count)
- amdgpu_vm_update_pages(adev, &ib, last_pde, last_pt, count,
+ amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count,
incr, AMDGPU_PTE_VALID, 0);
- if (ib.length_dw != 0) {
- amdgpu_vm_pad_ib(adev, &ib);
- amdgpu_sync_resv(adev, &ib.sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
- WARN_ON(ib.length_dw > ndw);
- r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
- if (r) {
- amdgpu_ib_free(adev, &ib);
- return r;
- }
- amdgpu_bo_fence(pd, ib.fence, true);
+ if (ib->length_dw != 0) {
+ amdgpu_vm_pad_ib(adev, ib);
+ amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
+ WARN_ON(ib->length_dw > ndw);
+ r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+ &amdgpu_vm_free_job,
+ AMDGPU_FENCE_OWNER_VM,
+ &fence);
+ if (r)
+ goto error_free;
+
+ amdgpu_bo_fence(pd, fence, true);
+ fence_put(vm->page_directory_fence);
+ vm->page_directory_fence = fence_get(fence);
+ fence_put(fence);
+ }
+
+ if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
}
- amdgpu_ib_free(adev, &ib);
return 0;
+
+error_free:
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
+ return r;
}
/**
@@ -572,9 +627,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
{
uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
uint64_t last_pte = ~0, last_dst = ~0;
+ void *owner = AMDGPU_FENCE_OWNER_VM;
unsigned count = 0;
uint64_t addr;
+ /* sync to everything on unmapping */
+ if (!(flags & AMDGPU_PTE_VALID))
+ owner = AMDGPU_FENCE_OWNER_UNDEFINED;
+
/* walk over the address space and update the page tables */
for (addr = start; addr < end; ) {
uint64_t pt_idx = addr >> amdgpu_vm_block_size;
@@ -583,8 +643,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
uint64_t pte;
int r;
- amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv,
- AMDGPU_FENCE_OWNER_VM);
+ amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner);
r = reservation_object_reserve_shared(pt->tbo.resv);
if (r)
return r;
@@ -640,7 +699,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
*/
static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
uint64_t start, uint64_t end,
- struct amdgpu_fence *fence)
+ struct fence *fence)
{
unsigned i;
@@ -670,12 +729,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
uint64_t addr, uint32_t gtt_flags,
- struct amdgpu_fence **fence)
+ struct fence **fence)
{
struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
unsigned nptes, ncmds, ndw;
uint32_t flags = gtt_flags;
- struct amdgpu_ib ib;
+ struct amdgpu_ib *ib;
+ struct fence *f = NULL;
int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -722,46 +782,54 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (ndw > 0xfffff)
return -ENOMEM;
- r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib);
- if (r)
- return r;
- ib.length_dw = 0;
-
- if (!(flags & AMDGPU_PTE_VALID)) {
- unsigned i;
+ ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+ if (!ib)
+ return -ENOMEM;
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_fence *f = vm->ids[i].last_id_use;
- amdgpu_sync_fence(&ib.sync, f);
- }
+ r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
+ if (r) {
+ kfree(ib);
+ return r;
}
- r = amdgpu_vm_update_ptes(adev, vm, &ib, mapping->it.start,
+ ib->length_dw = 0;
+
+ r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
mapping->it.last + 1, addr + mapping->offset,
flags, gtt_flags);
if (r) {
- amdgpu_ib_free(adev, &ib);
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
return r;
}
- amdgpu_vm_pad_ib(adev, &ib);
- WARN_ON(ib.length_dw > ndw);
+ amdgpu_vm_pad_ib(adev, ib);
+ WARN_ON(ib->length_dw > ndw);
+ r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+ &amdgpu_vm_free_job,
+ AMDGPU_FENCE_OWNER_VM,
+ &f);
+ if (r)
+ goto error_free;
- r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
- if (r) {
- amdgpu_ib_free(adev, &ib);
- return r;
- }
amdgpu_vm_fence_pts(vm, mapping->it.start,
- mapping->it.last + 1, ib.fence);
+ mapping->it.last + 1, f);
if (fence) {
- amdgpu_fence_unref(fence);
- *fence = amdgpu_fence_ref(ib.fence);
+ fence_put(*fence);
+ *fence = fence_get(f);
+ }
+ fence_put(f);
+ if (!amdgpu_enable_scheduler) {
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
}
- amdgpu_ib_free(adev, &ib);
-
return 0;
+
+error_free:
+ amdgpu_ib_free(adev, ib);
+ kfree(ib);
+ return r;
}
/**
@@ -794,21 +862,25 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
addr = 0;
}
- if (addr == bo_va->addr)
- return 0;
-
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
- list_for_each_entry(mapping, &bo_va->mappings, list) {
+ spin_lock(&vm->status_lock);
+ if (!list_empty(&bo_va->vm_status))
+ list_splice_init(&bo_va->valids, &bo_va->invalids);
+ spin_unlock(&vm->status_lock);
+
+ list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr,
flags, &bo_va->last_pt_update);
if (r)
return r;
}
- bo_va->addr = addr;
spin_lock(&vm->status_lock);
+ list_splice_init(&bo_va->invalids, &bo_va->valids);
list_del_init(&bo_va->vm_status);
+ if (!mem)
+ list_add(&bo_va->vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
return 0;
@@ -861,7 +933,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_sync *sync)
{
struct amdgpu_bo_va *bo_va = NULL;
- int r;
+ int r = 0;
spin_lock(&vm->status_lock);
while (!list_empty(&vm->invalidated)) {
@@ -878,8 +950,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
spin_unlock(&vm->status_lock);
if (bo_va)
- amdgpu_sync_fence(sync, bo_va->last_pt_update);
- return 0;
+ r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
+
+ return r;
}
/**
@@ -907,10 +980,10 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
}
bo_va->vm = vm;
bo_va->bo = bo;
- bo_va->addr = 0;
bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list);
- INIT_LIST_HEAD(&bo_va->mappings);
+ INIT_LIST_HEAD(&bo_va->valids);
+ INIT_LIST_HEAD(&bo_va->invalids);
INIT_LIST_HEAD(&bo_va->vm_status);
mutex_lock(&vm->mutex);
@@ -999,12 +1072,10 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
mapping->offset = offset;
mapping->flags = flags;
- list_add(&mapping->list, &bo_va->mappings);
+ list_add(&mapping->list, &bo_va->invalids);
interval_tree_insert(&mapping->it, &vm->va);
trace_amdgpu_vm_bo_map(bo_va, mapping);
- bo_va->addr = 0;
-
/* Make sure the page tables are allocated */
saddr >>= amdgpu_vm_block_size;
eaddr >>= amdgpu_vm_block_size;
@@ -1028,7 +1099,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
AMDGPU_GPU_PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &pt);
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+ NULL, &pt);
if (r)
goto error_free;
@@ -1085,17 +1158,27 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_vm *vm = bo_va->vm;
+ bool valid = true;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- list_for_each_entry(mapping, &bo_va->mappings, list) {
+ list_for_each_entry(mapping, &bo_va->valids, list) {
if (mapping->it.start == saddr)
break;
}
- if (&mapping->list == &bo_va->mappings) {
- amdgpu_bo_unreserve(bo_va->bo);
- return -ENOENT;
+ if (&mapping->list == &bo_va->valids) {
+ valid = false;
+
+ list_for_each_entry(mapping, &bo_va->invalids, list) {
+ if (mapping->it.start == saddr)
+ break;
+ }
+
+ if (&mapping->list == &bo_va->invalids) {
+ amdgpu_bo_unreserve(bo_va->bo);
+ return -ENOENT;
+ }
}
mutex_lock(&vm->mutex);
@@ -1103,12 +1186,10 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
interval_tree_remove(&mapping->it, &vm->va);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
- if (bo_va->addr) {
- /* clear the old address */
+ if (valid)
list_add(&mapping->list, &vm->freed);
- } else {
+ else
kfree(mapping);
- }
mutex_unlock(&vm->mutex);
amdgpu_bo_unreserve(bo_va->bo);
@@ -1139,16 +1220,19 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
list_del(&bo_va->vm_status);
spin_unlock(&vm->status_lock);
- list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) {
+ list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
interval_tree_remove(&mapping->it, &vm->va);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
- if (bo_va->addr)
- list_add(&mapping->list, &vm->freed);
- else
- kfree(mapping);
+ list_add(&mapping->list, &vm->freed);
+ }
+ list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
+ list_del(&mapping->list);
+ interval_tree_remove(&mapping->it, &vm->va);
+ kfree(mapping);
}
- amdgpu_fence_unref(&bo_va->last_pt_update);
+
+ fence_put(bo_va->last_pt_update);
kfree(bo_va);
mutex_unlock(&vm->mutex);
@@ -1169,12 +1253,10 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va;
list_for_each_entry(bo_va, &bo->va, bo_list) {
- if (bo_va->addr) {
- spin_lock(&bo_va->vm->status_lock);
- list_del(&bo_va->vm_status);
+ spin_lock(&bo_va->vm->status_lock);
+ if (list_empty(&bo_va->vm_status))
list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
- spin_unlock(&bo_va->vm->status_lock);
- }
+ spin_unlock(&bo_va->vm->status_lock);
}
}
@@ -1202,6 +1284,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->va = RB_ROOT;
spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->invalidated);
+ INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
pd_size = amdgpu_vm_directory_size(adev);
@@ -1215,8 +1298,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
return -ENOMEM;
}
+ vm->page_directory_fence = NULL;
+
r = amdgpu_bo_create(adev, pd_size, align, true,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
NULL, &vm->page_directory);
if (r)
return r;
@@ -1263,9 +1349,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
kfree(vm->page_tables);
amdgpu_bo_unref(&vm->page_directory);
+ fence_put(vm->page_directory_fence);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- amdgpu_fence_unref(&vm->ids[i].flushed_updates);
+ fence_put(vm->ids[i].flushed_updates);
amdgpu_fence_unref(&vm->ids[i].last_id_use);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 9ba0a7d5bc8e..92b6acadfc52 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -139,7 +139,8 @@ amdgpu_atombios_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *m
tx_buf[0] = msg->address & 0xff;
tx_buf[1] = msg->address >> 8;
- tx_buf[2] = msg->request << 4;
+ tx_buf[2] = (msg->request << 4) |
+ ((msg->address >> 16) & 0xf);
tx_buf[3] = msg->size ? (msg->size - 1) : 0;
switch (msg->request & ~DP_AUX_I2C_MOT) {
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index ae8caca61e04..cd6edc40c9cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -812,7 +812,7 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
- if ((adev->flags & AMDGPU_IS_APU) &&
+ if ((adev->flags & AMD_IS_APU) &&
(amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
if (is_dp ||
!amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 341c56681841..4b6ce74753cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -64,6 +64,8 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "amdgpu_amdkfd.h"
+
/*
* Indirect registers accessor
*/
@@ -836,7 +838,7 @@ static u32 cik_get_xclk(struct amdgpu_device *adev)
{
u32 reference_clock = adev->clock.spll.reference_freq;
- if (adev->flags & AMDGPU_IS_APU) {
+ if (adev->flags & AMD_IS_APU) {
if (RREG32_SMC(ixGENERAL_PWRMGT) & GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK)
return reference_clock / 2;
} else {
@@ -1233,7 +1235,7 @@ static void cik_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
if (reset_mask & AMDGPU_RESET_VMC)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_VMC_MASK;
- if (!(adev->flags & AMDGPU_IS_APU)) {
+ if (!(adev->flags & AMD_IS_APU)) {
if (reset_mask & AMDGPU_RESET_MC)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_MC_MASK;
}
@@ -1409,7 +1411,7 @@ static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
dev_warn(adev->dev, "Wait for MC idle timed out !\n");
}
- if (adev->flags & AMDGPU_IS_APU)
+ if (adev->flags & AMD_IS_APU)
kv_save_regs_for_reset(adev, &kv_save);
/* disable BM */
@@ -1427,7 +1429,7 @@ static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
}
/* does asic init need to be run first??? */
- if (adev->flags & AMDGPU_IS_APU)
+ if (adev->flags & AMD_IS_APU)
kv_restore_regs_for_reset(adev, &kv_save);
}
@@ -1568,7 +1570,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
if (amdgpu_pcie_gen2 == 0)
return;
- if (adev->flags & AMDGPU_IS_APU)
+ if (adev->flags & AMD_IS_APU)
return;
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
@@ -1728,7 +1730,7 @@ static void cik_program_aspm(struct amdgpu_device *adev)
return;
/* XXX double check APUs */
- if (adev->flags & AMDGPU_IS_APU)
+ if (adev->flags & AMD_IS_APU)
return;
orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
@@ -2448,14 +2450,21 @@ static int cik_common_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_amdkfd_suspend(adev);
+
return cik_common_hw_fini(adev);
}
static int cik_common_resume(void *handle)
{
+ int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return cik_common_hw_init(adev);
+ r = cik_common_hw_init(adev);
+ if (r)
+ return r;
+
+ return amdgpu_amdkfd_resume(adev);
}
static bool cik_common_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index ab83cc1ca4cc..9ea9de457da3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -188,6 +188,19 @@ static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
}
+static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+ int i;
+
+ for (i = 0; i < count; i++)
+ if (sdma && sdma->burst_nop && (i == 0))
+ amdgpu_ring_write(ring, ring->nop |
+ SDMA_NOP_COUNT(count - 1));
+ else
+ amdgpu_ring_write(ring, ring->nop);
+}
+
/**
* cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine
*
@@ -213,8 +226,8 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, next_rptr);
/* IB packet must end on a 8 DW boundary */
- while ((ring->wptr & 7) != 4)
- amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
+ cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8);
+
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
@@ -500,6 +513,9 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev)
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+ if (adev->sdma[i].feature_version >= 20)
+ adev->sdma[i].burst_nop = true;
fw_data = (const __le32 *)
(adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
@@ -613,6 +629,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
+ struct fence *f = NULL;
unsigned i;
unsigned index;
int r;
@@ -628,12 +645,11 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
-
+ memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(ring, NULL, 256, &ib);
if (r) {
- amdgpu_wb_free(adev, index);
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
- return r;
+ goto err0;
}
ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
@@ -642,20 +658,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[3] = 1;
ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5;
+ r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ &f);
+ if (r)
+ goto err1;
- r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+ r = fence_wait(f, false);
if (r) {
- amdgpu_ib_free(adev, &ib);
- amdgpu_wb_free(adev, index);
- DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
- return r;
- }
- r = amdgpu_fence_wait(ib.fence, false);
- if (r) {
- amdgpu_ib_free(adev, &ib);
- amdgpu_wb_free(adev, index);
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
- return r;
+ goto err1;
}
for (i = 0; i < adev->usec_timeout; i++) {
tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -665,12 +677,17 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
}
if (i < adev->usec_timeout) {
DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
- ib.fence->ring->idx, i);
+ ring->idx, i);
+ goto err1;
} else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
r = -EINVAL;
}
+
+err1:
+ fence_put(f);
amdgpu_ib_free(adev, &ib);
+err0:
amdgpu_wb_free(adev, index);
return r;
}
@@ -813,8 +830,19 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
*/
static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib)
{
- while (ib->length_dw & 0x7)
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
+ struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+ u32 pad_count;
+ int i;
+
+ pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+ for (i = 0; i < pad_count; i++)
+ if (sdma && sdma->burst_nop && (i == 0))
+ ib->ptr[ib->length_dw++] =
+ SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0) |
+ SDMA_NOP_COUNT(pad_count - 1);
+ else
+ ib->ptr[ib->length_dw++] =
+ SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
}
/**
@@ -1301,6 +1329,7 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
.test_ring = cik_sdma_ring_test_ring,
.test_ib = cik_sdma_ring_test_ib,
.is_lockup = cik_sdma_ring_is_lockup,
+ .insert_nop = cik_sdma_ring_insert_nop,
};
static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
@@ -1337,18 +1366,18 @@ static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
* Used by the amdgpu ttm implementation to move pages if
* registered as the asic copy callback.
*/
-static void cik_sdma_emit_copy_buffer(struct amdgpu_ring *ring,
+static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count)
{
- amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
- amdgpu_ring_write(ring, byte_count);
- amdgpu_ring_write(ring, 0); /* src/dst endian swap */
- amdgpu_ring_write(ring, lower_32_bits(src_offset));
- amdgpu_ring_write(ring, upper_32_bits(src_offset));
- amdgpu_ring_write(ring, lower_32_bits(dst_offset));
- amdgpu_ring_write(ring, upper_32_bits(dst_offset));
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = byte_count;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
+ ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
}
/**
@@ -1361,16 +1390,16 @@ static void cik_sdma_emit_copy_buffer(struct amdgpu_ring *ring,
*
* Fill GPU buffers using the DMA engine (CIK).
*/
-static void cik_sdma_emit_fill_buffer(struct amdgpu_ring *ring,
+static void cik_sdma_emit_fill_buffer(struct amdgpu_ib *ib,
uint32_t src_data,
uint64_t dst_offset,
uint32_t byte_count)
{
- amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0));
- amdgpu_ring_write(ring, lower_32_bits(dst_offset));
- amdgpu_ring_write(ring, upper_32_bits(dst_offset));
- amdgpu_ring_write(ring, src_data);
- amdgpu_ring_write(ring, byte_count);
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0);
+ ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = src_data;
+ ib->ptr[ib->length_dw++] = byte_count;
}
static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
@@ -1403,5 +1432,6 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
if (adev->vm_manager.vm_pte_funcs == NULL) {
adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+ adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index d19085a97064..7f6d457f250a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -487,6 +487,7 @@
(((op) & 0xFF) << 0))
/* sDMA opcodes */
#define SDMA_OPCODE_NOP 0
+# define SDMA_NOP_COUNT(x) (((x) & 0x3FFF) << 16)
#define SDMA_OPCODE_COPY 1
# define SDMA_COPY_SUB_OPCODE_LINEAR 0
# define SDMA_COPY_SUB_OPCODE_TILED 1
@@ -552,6 +553,12 @@
#define VCE_CMD_IB_AUTO 0x00000005
#define VCE_CMD_SEMAPHORE 0x00000006
+/* if PTR32, these are the bases for scratch and lds */
+#define PRIVATE_BASE(x) ((x) << 0) /* scratch */
+#define SHARED_BASE(x) ((x) << 16) /* LDS */
+
+#define KFD_CIK_SDMA_QUEUE_OFFSET 0x200
+
/* valid for both DEFAULT_MTYPE and APE1_MTYPE */
enum {
MTYPE_CACHED = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index f75a31df30bd..44fa96ad4709 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -494,29 +494,67 @@ static void cz_dpm_fini(struct amdgpu_device *adev)
amdgpu_free_extended_power_table(adev);
}
+#define ixSMUSVI_NB_CURRENTVID 0xD8230044
+#define CURRENT_NB_VID_MASK 0xff000000
+#define CURRENT_NB_VID__SHIFT 24
+#define ixSMUSVI_GFX_CURRENTVID 0xD8230048
+#define CURRENT_GFX_VID_MASK 0xff000000
+#define CURRENT_GFX_VID__SHIFT 24
+
static void
cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
struct seq_file *m)
{
+ struct cz_power_info *pi = cz_get_pi(adev);
struct amdgpu_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- u32 current_index =
- (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
- u32 sclk, tmp;
- u16 vddc;
-
- if (current_index >= NUM_SCLK_LEVELS) {
- seq_printf(m, "invalid dpm profile %d\n", current_index);
+ struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
+ &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+ struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
+ &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+ u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX),
+ TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
+ u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+ TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
+ u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+ TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
+ u32 sclk, vclk, dclk, ecclk, tmp;
+ u16 vddnb, vddgfx;
+
+ if (sclk_index >= NUM_SCLK_LEVELS) {
+ seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index);
} else {
- sclk = table->entries[current_index].clk;
- tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
- SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
- SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
- vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
- seq_printf(m, "power level %d sclk: %u vddc: %u\n",
- current_index, sclk, vddc);
+ sclk = table->entries[sclk_index].clk;
+ seq_printf(m, "%u sclk: %u\n", sclk_index, sclk);
+ }
+
+ tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) &
+ CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
+ vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
+ tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) &
+ CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
+ vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
+ seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
+
+ seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
+ if (!pi->uvd_power_gated) {
+ if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ seq_printf(m, "invalid uvd dpm level %d\n", uvd_index);
+ } else {
+ vclk = uvd_table->entries[uvd_index].vclk;
+ dclk = uvd_table->entries[uvd_index].dclk;
+ seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk);
+ }
+ }
+
+ seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
+ if (!pi->vce_power_gated) {
+ if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ seq_printf(m, "invalid vce dpm level %d\n", vce_index);
+ } else {
+ ecclk = vce_table->entries[vce_index].ecclk;
+ seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk);
+ }
}
}
@@ -1558,9 +1596,9 @@ static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
if (pi->sys_info.nb_dpm_enable) {
if (ps->force_high)
- cz_dpm_nbdpm_lm_pstate_enable(adev, true);
- else
cz_dpm_nbdpm_lm_pstate_enable(adev, false);
+ else
+ cz_dpm_nbdpm_lm_pstate_enable(adev, true);
}
return ret;
@@ -1679,25 +1717,31 @@ static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev)
if (ret)
return ret;
- DRM_INFO("DPM unforce state min=%d, max=%d.\n",
- pi->sclk_dpm.soft_min_clk,
- pi->sclk_dpm.soft_max_clk);
+ DRM_DEBUG("DPM unforce state min=%d, max=%d.\n",
+ pi->sclk_dpm.soft_min_clk,
+ pi->sclk_dpm.soft_max_clk);
return 0;
}
static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
- enum amdgpu_dpm_forced_level level)
+ enum amdgpu_dpm_forced_level level)
{
int ret = 0;
switch (level) {
case AMDGPU_DPM_FORCED_LEVEL_HIGH:
+ ret = cz_dpm_unforce_dpm_levels(adev);
+ if (ret)
+ return ret;
ret = cz_dpm_force_highest(adev);
if (ret)
return ret;
break;
case AMDGPU_DPM_FORCED_LEVEL_LOW:
+ ret = cz_dpm_unforce_dpm_levels(adev);
+ if (ret)
+ return ret;
ret = cz_dpm_force_lowest(adev);
if (ret)
return ret;
@@ -1711,6 +1755,8 @@ static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
break;
}
+ adev->pm.dpm.forced_level = level;
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 5cde635978f9..e4d101b1252a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -126,9 +126,31 @@ static const u32 tonga_mgcg_cgcg_init[] =
mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
};
+static const u32 golden_settings_fiji_a10[] =
+{
+ mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
+ mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
+ mmFBC_MISC, 0x1f311fff, 0x12300000,
+ mmHDMI_CONTROL, 0x31000111, 0x00000011,
+};
+
+static const u32 fiji_mgcg_cgcg_init[] =
+{
+ mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
+ mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
+};
+
static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
+ case CHIP_FIJI:
+ amdgpu_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ golden_settings_fiji_a10,
+ (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
+ break;
case CHIP_TONGA:
amdgpu_program_register_sequence(adev,
tonga_mgcg_cgcg_init,
@@ -803,11 +825,11 @@ static u32 dce_v10_0_line_buffer_adjust(struct amdgpu_device *adev,
buffer_alloc = 2;
} else if (mode->crtc_hdisplay < 4096) {
mem_cfg = 0;
- buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+ buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
} else {
DRM_DEBUG_KMS("Mode too big for LB!\n");
mem_cfg = 0;
- buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+ buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
}
} else {
mem_cfg = 1;
@@ -1331,7 +1353,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
+ tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
/* restore original selection */
@@ -2632,6 +2654,7 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ unsigned type;
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -2640,6 +2663,9 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
dce_v10_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v10_0_vga_enable(crtc, false);
+ /* Make sure VBLANK interrupt is still enabled */
+ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ amdgpu_irq_update(adev, &adev->crtc_irq, type);
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
dce_v10_0_crtc_load_lut(crtc);
break;
@@ -2884,6 +2910,7 @@ static int dce_v10_0_early_init(void *handle)
dce_v10_0_set_irq_funcs(adev);
switch (adev->asic_type) {
+ case CHIP_FIJI:
case CHIP_TONGA:
adev->mode_info.num_crtc = 6; /* XXX 7??? */
adev->mode_info.num_hpd = 6;
@@ -3403,19 +3430,25 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
switch (entry->src_data) {
case 0: /* vblank */
- if (disp_int & interrupt_status_offsets[crtc].vblank) {
+ if (disp_int & interrupt_status_offsets[crtc].vblank)
dce_v10_0_crtc_vblank_int_ack(adev, crtc);
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
- }
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (amdgpu_irq_enabled(adev, source, irq_type)) {
+ drm_handle_vblank(adev->ddev, crtc);
}
+ DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+
break;
case 1: /* vline */
- if (disp_int & interrupt_status_offsets[crtc].vline) {
+ if (disp_int & interrupt_status_offsets[crtc].vline)
dce_v10_0_crtc_vline_int_ack(adev, crtc);
- DRM_DEBUG("IH: D%d vline\n", crtc + 1);
- }
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 95efd98b202d..6411e8244671 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -801,11 +801,11 @@ static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev,
buffer_alloc = 2;
} else if (mode->crtc_hdisplay < 4096) {
mem_cfg = 0;
- buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+ buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
} else {
DRM_DEBUG_KMS("Mode too big for LB!\n");
mem_cfg = 0;
- buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+ buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
}
} else {
mem_cfg = 1;
@@ -1329,7 +1329,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
+ tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
/* restore original selection */
@@ -2631,6 +2631,7 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ unsigned type;
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -2639,6 +2640,9 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
dce_v11_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v11_0_vga_enable(crtc, false);
+ /* Make sure VBLANK interrupt is still enabled */
+ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ amdgpu_irq_update(adev, &adev->crtc_irq, type);
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
dce_v11_0_crtc_load_lut(crtc);
break;
@@ -3402,19 +3406,25 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
switch (entry->src_data) {
case 0: /* vblank */
- if (disp_int & interrupt_status_offsets[crtc].vblank) {
+ if (disp_int & interrupt_status_offsets[crtc].vblank)
dce_v11_0_crtc_vblank_int_ack(adev, crtc);
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
- }
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (amdgpu_irq_enabled(adev, source, irq_type)) {
+ drm_handle_vblank(adev->ddev, crtc);
}
+ DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+
break;
case 1: /* vline */
- if (disp_int & interrupt_status_offsets[crtc].vline) {
+ if (disp_int & interrupt_status_offsets[crtc].vline)
dce_v11_0_crtc_vline_int_ack(adev, crtc);
- DRM_DEBUG("IH: D%d vline\n", crtc + 1);
- }
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index aaca8d663f2c..c86911c2ea2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -770,11 +770,11 @@ static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
buffer_alloc = 2;
} else if (mode->crtc_hdisplay < 4096) {
tmp = 0;
- buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+ buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
} else {
DRM_DEBUG_KMS("Mode too big for LB!\n");
tmp = 0;
- buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+ buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
}
} else {
tmp = 1;
@@ -2566,6 +2566,7 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ unsigned type;
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -2574,6 +2575,9 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
dce_v8_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v8_0_vga_enable(crtc, false);
+ /* Make sure VBLANK interrupt is still enabled */
+ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ amdgpu_irq_update(adev, &adev->crtc_irq, type);
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
dce_v8_0_crtc_load_lut(crtc);
break;
@@ -3237,19 +3241,25 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
switch (entry->src_data) {
case 0: /* vblank */
- if (disp_int & interrupt_status_offsets[crtc].vblank) {
+ if (disp_int & interrupt_status_offsets[crtc].vblank)
WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
- }
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (amdgpu_irq_enabled(adev, source, irq_type)) {
+ drm_handle_vblank(adev->ddev, crtc);
}
+ DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+
break;
case 1: /* vline */
- if (disp_int & interrupt_status_offsets[crtc].vline) {
+ if (disp_int & interrupt_status_offsets[crtc].vline)
WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
- DRM_DEBUG("IH: D%d vline\n", crtc + 1);
- }
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
new file mode 100644
index 000000000000..8f9845d9a986
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "fiji_smumgr.h"
+
+MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
+
+static void fiji_dpm_set_funcs(struct amdgpu_device *adev);
+
+static int fiji_dpm_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ fiji_dpm_set_funcs(adev);
+
+ return 0;
+}
+
+static int fiji_dpm_init_microcode(struct amdgpu_device *adev)
+{
+ char fw_name[30] = "amdgpu/fiji_smc.bin";
+ int err;
+
+ err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->pm.fw);
+
+out:
+ if (err) {
+ DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+ }
+ return err;
+}
+
+static int fiji_dpm_sw_init(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ ret = fiji_dpm_init_microcode(adev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int fiji_dpm_sw_fini(void *handle)
+{
+ return 0;
+}
+
+static int fiji_dpm_hw_init(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ mutex_lock(&adev->pm.mutex);
+
+ ret = fiji_smu_init(adev);
+ if (ret) {
+ DRM_ERROR("SMU initialization failed\n");
+ goto fail;
+ }
+
+ ret = fiji_smu_start(adev);
+ if (ret) {
+ DRM_ERROR("SMU start failed\n");
+ goto fail;
+ }
+
+ mutex_unlock(&adev->pm.mutex);
+ return 0;
+
+fail:
+ adev->firmware.smu_load = false;
+ mutex_unlock(&adev->pm.mutex);
+ return -EINVAL;
+}
+
+static int fiji_dpm_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ mutex_lock(&adev->pm.mutex);
+ fiji_smu_fini(adev);
+ mutex_unlock(&adev->pm.mutex);
+ return 0;
+}
+
+static int fiji_dpm_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ fiji_dpm_hw_fini(adev);
+
+ return 0;
+}
+
+static int fiji_dpm_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ fiji_dpm_hw_init(adev);
+
+ return 0;
+}
+
+static int fiji_dpm_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int fiji_dpm_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+const struct amd_ip_funcs fiji_dpm_ip_funcs = {
+ .early_init = fiji_dpm_early_init,
+ .late_init = NULL,
+ .sw_init = fiji_dpm_sw_init,
+ .sw_fini = fiji_dpm_sw_fini,
+ .hw_init = fiji_dpm_hw_init,
+ .hw_fini = fiji_dpm_hw_fini,
+ .suspend = fiji_dpm_suspend,
+ .resume = fiji_dpm_resume,
+ .is_idle = NULL,
+ .wait_for_idle = NULL,
+ .soft_reset = NULL,
+ .print_status = NULL,
+ .set_clockgating_state = fiji_dpm_set_clockgating_state,
+ .set_powergating_state = fiji_dpm_set_powergating_state,
+};
+
+static const struct amdgpu_dpm_funcs fiji_dpm_funcs = {
+ .get_temperature = NULL,
+ .pre_set_power_state = NULL,
+ .set_power_state = NULL,
+ .post_set_power_state = NULL,
+ .display_configuration_changed = NULL,
+ .get_sclk = NULL,
+ .get_mclk = NULL,
+ .print_power_state = NULL,
+ .debugfs_print_current_performance_level = NULL,
+ .force_performance_level = NULL,
+ .vblank_too_short = NULL,
+ .powergate_uvd = NULL,
+};
+
+static void fiji_dpm_set_funcs(struct amdgpu_device *adev)
+{
+ if (NULL == adev->pm.funcs)
+ adev->pm.funcs = &fiji_dpm_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h b/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h
new file mode 100644
index 000000000000..3c4824082990
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef FIJI_PP_SMC_H
+#define FIJI_PP_SMC_H
+
+#pragma pack(push, 1)
+
+#define PPSMC_SWSTATE_FLAG_DC 0x01
+#define PPSMC_SWSTATE_FLAG_UVD 0x02
+#define PPSMC_SWSTATE_FLAG_VCE 0x04
+
+#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00
+#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01
+#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff
+
+#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01
+#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02
+#define PPSMC_SYSTEMFLAG_GDDR5 0x04
+
+#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08
+
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07
+#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01
+
+#define PPSMC_DPM2FLAGS_TDPCLMP 0x01
+#define PPSMC_DPM2FLAGS_PWRSHFT 0x02
+#define PPSMC_DPM2FLAGS_OCP 0x04
+
+#define PPSMC_DISPLAY_WATERMARK_LOW 0
+#define PPSMC_DISPLAY_WATERMARK_HIGH 1
+
+#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
+#define PPSMC_STATEFLAG_POWERBOOST 0x02
+#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04
+#define PPSMC_STATEFLAG_POWERSHIFT 0x08
+#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10
+#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
+#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
+
+#define FDO_MODE_HARDWARE 0
+#define FDO_MODE_PIECE_WISE_LINEAR 1
+
+enum FAN_CONTROL {
+ FAN_CONTROL_FUZZY,
+ FAN_CONTROL_TABLE
+};
+
+//Gemini Modes
+#define PPSMC_GeminiModeNone 0 //Single GPU board
+#define PPSMC_GeminiModeMaster 1 //Master GPU on a Gemini board
+#define PPSMC_GeminiModeSlave 2 //Slave GPU on a Gemini board
+
+#define PPSMC_Result_OK ((uint16_t)0x01)
+#define PPSMC_Result_NoMore ((uint16_t)0x02)
+#define PPSMC_Result_NotNow ((uint16_t)0x03)
+#define PPSMC_Result_Failed ((uint16_t)0xFF)
+#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE)
+#define PPSMC_Result_UnknownVT ((uint16_t)0xFD)
+
+typedef uint16_t PPSMC_Result;
+
+#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
+
+#define PPSMC_MSG_Halt ((uint16_t)0x10)
+#define PPSMC_MSG_Resume ((uint16_t)0x11)
+#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12)
+#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13)
+#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14)
+#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15)
+#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16)
+#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17)
+#define PPSMC_MSG_LevelUp ((uint16_t)0x18)
+#define PPSMC_MSG_LevelDown ((uint16_t)0x19)
+#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a)
+#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20)
+#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f)
+#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40)
+#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41)
+#define PPSMC_MSG_ForceHigh ((uint16_t)0x42)
+#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43)
+#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51)
+#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52)
+#define PPSMC_MSG_EnableCac ((uint16_t)0x53)
+#define PPSMC_MSG_DisableCac ((uint16_t)0x54)
+#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55)
+#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56)
+#define PPSMC_CACHistoryStart ((uint16_t)0x57)
+#define PPSMC_CACHistoryStop ((uint16_t)0x58)
+#define PPSMC_TDPClampingActive ((uint16_t)0x59)
+#define PPSMC_TDPClampingInactive ((uint16_t)0x5A)
+#define PPSMC_StartFanControl ((uint16_t)0x5B)
+#define PPSMC_StopFanControl ((uint16_t)0x5C)
+#define PPSMC_NoDisplay ((uint16_t)0x5D)
+#define PPSMC_HasDisplay ((uint16_t)0x5E)
+#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60)
+#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61)
+#define PPSMC_MSG_EnableULV ((uint16_t)0x62)
+#define PPSMC_MSG_DisableULV ((uint16_t)0x63)
+#define PPSMC_MSG_EnterULV ((uint16_t)0x64)
+#define PPSMC_MSG_ExitULV ((uint16_t)0x65)
+#define PPSMC_PowerShiftActive ((uint16_t)0x6A)
+#define PPSMC_PowerShiftInactive ((uint16_t)0x6B)
+#define PPSMC_OCPActive ((uint16_t)0x6C)
+#define PPSMC_OCPInactive ((uint16_t)0x6D)
+#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E)
+#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F)
+#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70)
+#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71)
+#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72)
+#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73)
+#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74)
+#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75)
+#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76)
+#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77)
+#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78)
+#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79)
+#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A)
+#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B)
+#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C)
+#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D)
+#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E)
+#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F)
+#define PPSMC_FlushDataCache ((uint16_t)0x80)
+#define PPSMC_FlushInstrCache ((uint16_t)0x81)
+#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82)
+#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83)
+#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84)
+#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85)
+#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86)
+#define PPSMC_MSG_EnableDTE ((uint16_t)0x87)
+#define PPSMC_MSG_DisableDTE ((uint16_t)0x88)
+#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89)
+#define PPSMC_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A)
+#define PPSMC_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B)
+#define PPSMC_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C)
+
+#define PPSMC_MSG_BREAK ((uint16_t)0xF8)
+
+#define PPSMC_MSG_Test ((uint16_t)0x100)
+#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t)0x250)
+#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t)0x251)
+#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t)0x252)
+#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t)0x253)
+#define PPSMC_MSG_LoadUcodes ((uint16_t)0x254)
+
+typedef uint16_t PPSMC_Msg;
+
+#define PPSMC_EVENT_STATUS_THERMAL 0x00000001
+#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002
+#define PPSMC_EVENT_STATUS_DC 0x00000004
+#define PPSMC_EVENT_STATUS_GPIO17 0x00000008
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
new file mode 100644
index 000000000000..322edea65857
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
@@ -0,0 +1,857 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "fiji_ppsmc.h"
+#include "fiji_smumgr.h"
+#include "smu_ucode_xfer_vi.h"
+#include "amdgpu_ucode.h"
+
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+#define FIJI_SMC_SIZE 0x20000
+
+static int fiji_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
+{
+ uint32_t val;
+
+ if (smc_address & 3)
+ return -EINVAL;
+
+ if ((smc_address + 3) > limit)
+ return -EINVAL;
+
+ WREG32(mmSMC_IND_INDEX_0, smc_address);
+
+ val = RREG32(mmSMC_IND_ACCESS_CNTL);
+ val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+ WREG32(mmSMC_IND_ACCESS_CNTL, val);
+
+ return 0;
+}
+
+static int fiji_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
+{
+ uint32_t addr;
+ uint32_t data, orig_data;
+ int result = 0;
+ uint32_t extra_shift;
+ unsigned long flags;
+
+ if (smc_start_address & 3)
+ return -EINVAL;
+
+ if ((smc_start_address + byte_count) > limit)
+ return -EINVAL;
+
+ addr = smc_start_address;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ while (byte_count >= 4) {
+ /* Bytes are written into the SMC addres space with the MSB first */
+ data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
+
+ result = fiji_set_smc_sram_address(adev, addr, limit);
+
+ if (result)
+ goto out;
+
+ WREG32(mmSMC_IND_DATA_0, data);
+
+ src += 4;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ if (0 != byte_count) {
+ /* Now write odd bytes left, do a read modify write cycle */
+ data = 0;
+
+ result = fiji_set_smc_sram_address(adev, addr, limit);
+ if (result)
+ goto out;
+
+ orig_data = RREG32(mmSMC_IND_DATA_0);
+ extra_shift = 8 * (4 - byte_count);
+
+ while (byte_count > 0) {
+ data = (data << 8) + *src++;
+ byte_count--;
+ }
+
+ data <<= extra_shift;
+ data |= (orig_data & ~((~0UL) << extra_shift));
+
+ result = fiji_set_smc_sram_address(adev, addr, limit);
+ if (result)
+ goto out;
+
+ WREG32(mmSMC_IND_DATA_0, data);
+ }
+
+out:
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ return result;
+}
+
+static int fiji_program_jump_on_start(struct amdgpu_device *adev)
+{
+ static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
+ fiji_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
+
+ return 0;
+}
+
+static bool fiji_is_smc_ram_running(struct amdgpu_device *adev)
+{
+ uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+ val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
+
+ return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
+}
+
+static int wait_smu_response(struct amdgpu_device *adev)
+{
+ int i;
+ uint32_t val;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ val = RREG32(mmSMC_RESP_0);
+ if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
+ break;
+ udelay(1);
+ }
+
+ if (i == adev->usec_timeout)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fiji_send_msg_to_smc_offset(struct amdgpu_device *adev)
+{
+ if (wait_smu_response(adev)) {
+ DRM_ERROR("Failed to send previous message\n");
+ return -EINVAL;
+ }
+
+ WREG32(mmSMC_MSG_ARG_0, 0x20000);
+ WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
+
+ if (wait_smu_response(adev)) {
+ DRM_ERROR("Failed to send message\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
+{
+ if (!fiji_is_smc_ram_running(adev))
+ {
+ return -EINVAL;;
+ }
+
+ if (wait_smu_response(adev)) {
+ DRM_ERROR("Failed to send previous message\n");
+ return -EINVAL;
+ }
+
+ WREG32(mmSMC_MESSAGE_0, msg);
+
+ if (wait_smu_response(adev)) {
+ DRM_ERROR("Failed to send message\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fiji_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
+ PPSMC_Msg msg)
+{
+ if (wait_smu_response(adev)) {
+ DRM_ERROR("Failed to send previous message\n");
+ return -EINVAL;
+ }
+
+ WREG32(mmSMC_MESSAGE_0, msg);
+
+ return 0;
+}
+
+static int fiji_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+ PPSMC_Msg msg,
+ uint32_t parameter)
+{
+ if (!fiji_is_smc_ram_running(adev))
+ return -EINVAL;
+
+ if (wait_smu_response(adev)) {
+ DRM_ERROR("Failed to send previous message\n");
+ return -EINVAL;
+ }
+
+ WREG32(mmSMC_MSG_ARG_0, parameter);
+
+ return fiji_send_msg_to_smc(adev, msg);
+}
+
+static int fiji_send_msg_to_smc_with_parameter_without_waiting(
+ struct amdgpu_device *adev,
+ PPSMC_Msg msg, uint32_t parameter)
+{
+ if (wait_smu_response(adev)) {
+ DRM_ERROR("Failed to send previous message\n");
+ return -EINVAL;
+ }
+
+ WREG32(mmSMC_MSG_ARG_0, parameter);
+
+ return fiji_send_msg_to_smc_without_waiting(adev, msg);
+}
+
+#if 0 /* not used yet */
+static int fiji_wait_for_smc_inactive(struct amdgpu_device *adev)
+{
+ int i;
+ uint32_t val;
+
+ if (!fiji_is_smc_ram_running(adev))
+ return -EINVAL;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+ if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
+ break;
+ udelay(1);
+ }
+
+ if (i == adev->usec_timeout)
+ return -EINVAL;
+
+ return 0;
+}
+#endif
+
+static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev)
+{
+ const struct smc_firmware_header_v1_0 *hdr;
+ uint32_t ucode_size;
+ uint32_t ucode_start_address;
+ const uint8_t *src;
+ uint32_t val;
+ uint32_t byte_count;
+ uint32_t *data;
+ unsigned long flags;
+
+ if (!adev->pm.fw)
+ return -EINVAL;
+
+ hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(&hdr->header);
+
+ adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+ ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+ src = (const uint8_t *)
+ (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+ if (ucode_size & 3) {
+ DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
+ return -EINVAL;
+ }
+
+ if (ucode_size > FIJI_SMC_SIZE) {
+ DRM_ERROR("SMC address is beyond the SMC RAM area\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
+
+ val = RREG32(mmSMC_IND_ACCESS_CNTL);
+ val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
+ WREG32(mmSMC_IND_ACCESS_CNTL, val);
+
+ byte_count = ucode_size;
+ data = (uint32_t *)src;
+ for (; byte_count >= 4; data++, byte_count -= 4)
+ WREG32(mmSMC_IND_DATA_0, data[0]);
+
+ val = RREG32(mmSMC_IND_ACCESS_CNTL);
+ val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+ WREG32(mmSMC_IND_ACCESS_CNTL, val);
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+ return 0;
+}
+
+#if 0 /* not used yet */
+static int fiji_read_smc_sram_dword(struct amdgpu_device *adev,
+ uint32_t smc_address,
+ uint32_t *value,
+ uint32_t limit)
+{
+ int result;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ result = fiji_set_smc_sram_address(adev, smc_address, limit);
+ if (result == 0)
+ *value = RREG32(mmSMC_IND_DATA_0);
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ return result;
+}
+
+static int fiji_write_smc_sram_dword(struct amdgpu_device *adev,
+ uint32_t smc_address,
+ uint32_t value,
+ uint32_t limit)
+{
+ int result;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ result = fiji_set_smc_sram_address(adev, smc_address, limit);
+ if (result == 0)
+ WREG32(mmSMC_IND_DATA_0, value);
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ return result;
+}
+
+static int fiji_smu_stop_smc(struct amdgpu_device *adev)
+{
+ uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+ val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+ WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+ val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+ val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
+ WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
+
+ return 0;
+}
+#endif
+
+static enum AMDGPU_UCODE_ID fiji_convert_fw_type(uint32_t fw_type)
+{
+ switch (fw_type) {
+ case UCODE_ID_SDMA0:
+ return AMDGPU_UCODE_ID_SDMA0;
+ case UCODE_ID_SDMA1:
+ return AMDGPU_UCODE_ID_SDMA1;
+ case UCODE_ID_CP_CE:
+ return AMDGPU_UCODE_ID_CP_CE;
+ case UCODE_ID_CP_PFP:
+ return AMDGPU_UCODE_ID_CP_PFP;
+ case UCODE_ID_CP_ME:
+ return AMDGPU_UCODE_ID_CP_ME;
+ case UCODE_ID_CP_MEC:
+ case UCODE_ID_CP_MEC_JT1:
+ case UCODE_ID_CP_MEC_JT2:
+ return AMDGPU_UCODE_ID_CP_MEC1;
+ case UCODE_ID_RLC_G:
+ return AMDGPU_UCODE_ID_RLC_G;
+ default:
+ DRM_ERROR("ucode type is out of range!\n");
+ return AMDGPU_UCODE_ID_MAXIMUM;
+ }
+}
+
+static int fiji_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
+ uint32_t fw_type,
+ struct SMU_Entry *entry)
+{
+ enum AMDGPU_UCODE_ID id = fiji_convert_fw_type(fw_type);
+ struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
+ const struct gfx_firmware_header_v1_0 *header = NULL;
+ uint64_t gpu_addr;
+ uint32_t data_size;
+
+ if (ucode->fw == NULL)
+ return -EINVAL;
+ gpu_addr = ucode->mc_addr;
+ header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+ data_size = le32_to_cpu(header->header.ucode_size_bytes);
+
+ if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
+ (fw_type == UCODE_ID_CP_MEC_JT2)) {
+ gpu_addr += le32_to_cpu(header->jt_offset) << 2;
+ data_size = le32_to_cpu(header->jt_size) << 2;
+ }
+
+ entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+ entry->id = (uint16_t)fw_type;
+ entry->image_addr_high = upper_32_bits(gpu_addr);
+ entry->image_addr_low = lower_32_bits(gpu_addr);
+ entry->meta_data_addr_high = 0;
+ entry->meta_data_addr_low = 0;
+ entry->data_size_byte = data_size;
+ entry->num_register_entries = 0;
+
+ if (fw_type == UCODE_ID_RLC_G)
+ entry->flags = 1;
+ else
+ entry->flags = 0;
+
+ return 0;
+}
+
+static int fiji_smu_request_load_fw(struct amdgpu_device *adev)
+{
+ struct fiji_smu_private_data *private = (struct fiji_smu_private_data *)adev->smu.priv;
+ struct SMU_DRAMData_TOC *toc;
+ uint32_t fw_to_load;
+
+ WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
+
+ fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
+ fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
+
+ toc = (struct SMU_DRAMData_TOC *)private->header;
+ toc->num_entries = 0;
+ toc->structure_version = 1;
+
+ if (!adev->firmware.smu_load)
+ return 0;
+
+ if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
+ &toc->entry[toc->num_entries++])) {
+ DRM_ERROR("Failed to get firmware entry for RLC\n");
+ return -EINVAL;
+ }
+
+ if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
+ &toc->entry[toc->num_entries++])) {
+ DRM_ERROR("Failed to get firmware entry for CE\n");
+ return -EINVAL;
+ }
+
+ if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
+ &toc->entry[toc->num_entries++])) {
+ DRM_ERROR("Failed to get firmware entry for PFP\n");
+ return -EINVAL;
+ }
+
+ if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
+ &toc->entry[toc->num_entries++])) {
+ DRM_ERROR("Failed to get firmware entry for ME\n");
+ return -EINVAL;
+ }
+
+ if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
+ &toc->entry[toc->num_entries++])) {
+ DRM_ERROR("Failed to get firmware entry for MEC\n");
+ return -EINVAL;
+ }
+
+ if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
+ &toc->entry[toc->num_entries++])) {
+ DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
+ return -EINVAL;
+ }
+
+ if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
+ &toc->entry[toc->num_entries++])) {
+ DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
+ return -EINVAL;
+ }
+
+ if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
+ &toc->entry[toc->num_entries++])) {
+ DRM_ERROR("Failed to get firmware entry for SDMA0\n");
+ return -EINVAL;
+ }
+
+ if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
+ &toc->entry[toc->num_entries++])) {
+ DRM_ERROR("Failed to get firmware entry for SDMA1\n");
+ return -EINVAL;
+ }
+
+ fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
+ fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
+
+ fw_to_load = UCODE_ID_RLC_G_MASK |
+ UCODE_ID_SDMA0_MASK |
+ UCODE_ID_SDMA1_MASK |
+ UCODE_ID_CP_CE_MASK |
+ UCODE_ID_CP_ME_MASK |
+ UCODE_ID_CP_PFP_MASK |
+ UCODE_ID_CP_MEC_MASK;
+
+ if (fiji_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
+ DRM_ERROR("Fail to request SMU load ucode\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type)
+{
+ switch (fw_type) {
+ case AMDGPU_UCODE_ID_SDMA0:
+ return UCODE_ID_SDMA0_MASK;
+ case AMDGPU_UCODE_ID_SDMA1:
+ return UCODE_ID_SDMA1_MASK;
+ case AMDGPU_UCODE_ID_CP_CE:
+ return UCODE_ID_CP_CE_MASK;
+ case AMDGPU_UCODE_ID_CP_PFP:
+ return UCODE_ID_CP_PFP_MASK;
+ case AMDGPU_UCODE_ID_CP_ME:
+ return UCODE_ID_CP_ME_MASK;
+ case AMDGPU_UCODE_ID_CP_MEC1:
+ return UCODE_ID_CP_MEC_MASK;
+ case AMDGPU_UCODE_ID_CP_MEC2:
+ return UCODE_ID_CP_MEC_MASK;
+ case AMDGPU_UCODE_ID_RLC_G:
+ return UCODE_ID_RLC_G_MASK;
+ default:
+ DRM_ERROR("ucode type is out of range!\n");
+ return 0;
+ }
+}
+
+static int fiji_smu_check_fw_load_finish(struct amdgpu_device *adev,
+ uint32_t fw_type)
+{
+ uint32_t fw_mask = fiji_smu_get_mask_for_fw_type(fw_type);
+ int i;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
+ break;
+ udelay(1);
+ }
+
+ if (i == adev->usec_timeout) {
+ DRM_ERROR("check firmware loading failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fiji_smu_start_in_protection_mode(struct amdgpu_device *adev)
+{
+ int result;
+ uint32_t val;
+ int i;
+
+ /* Assert reset */
+ val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+ val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+ WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+ result = fiji_smu_upload_firmware_image(adev);
+ if (result)
+ return result;
+
+ /* Clear status */
+ WREG32_SMC(ixSMU_STATUS, 0);
+
+ /* Enable clock */
+ val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+ val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+ WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
+
+ /* De-assert reset */
+ val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+ val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+ WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+ /* Set SMU Auto Start */
+ val = RREG32_SMC(ixSMU_INPUT_DATA);
+ val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
+ WREG32_SMC(ixSMU_INPUT_DATA, val);
+
+ /* Clear firmware interrupt enable flag */
+ WREG32_SMC(ixFIRMWARE_FLAGS, 0);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ val = RREG32_SMC(ixRCU_UC_EVENTS);
+ if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
+ break;
+ udelay(1);
+ }
+
+ if (i == adev->usec_timeout) {
+ DRM_ERROR("Interrupt is not enabled by firmware\n");
+ return -EINVAL;
+ }
+
+ /* Call Test SMU message with 0x20000 offset
+ * to trigger SMU start
+ */
+ fiji_send_msg_to_smc_offset(adev);
+ DRM_INFO("[FM]try triger smu start\n");
+ /* Wait for done bit to be set */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ val = RREG32_SMC(ixSMU_STATUS);
+ if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
+ break;
+ udelay(1);
+ }
+
+ if (i == adev->usec_timeout) {
+ DRM_ERROR("Timeout for SMU start\n");
+ return -EINVAL;
+ }
+
+ /* Check pass/failed indicator */
+ val = RREG32_SMC(ixSMU_STATUS);
+ if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
+ DRM_ERROR("SMU Firmware start failed\n");
+ return -EINVAL;
+ }
+ DRM_INFO("[FM]smu started\n");
+ /* Wait for firmware to initialize */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ val = RREG32_SMC(ixFIRMWARE_FLAGS);
+ if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
+ break;
+ udelay(1);
+ }
+
+ if (i == adev->usec_timeout) {
+ DRM_ERROR("SMU firmware initialization failed\n");
+ return -EINVAL;
+ }
+ DRM_INFO("[FM]smu initialized\n");
+
+ return 0;
+}
+
+static int fiji_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
+{
+ int i, result;
+ uint32_t val;
+
+ /* wait for smc boot up */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ val = RREG32_SMC(ixRCU_UC_EVENTS);
+ val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
+ if (val)
+ break;
+ udelay(1);
+ }
+
+ if (i == adev->usec_timeout) {
+ DRM_ERROR("SMC boot sequence is not completed\n");
+ return -EINVAL;
+ }
+
+ /* Clear firmware interrupt enable flag */
+ WREG32_SMC(ixFIRMWARE_FLAGS, 0);
+
+ /* Assert reset */
+ val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+ val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+ WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+ result = fiji_smu_upload_firmware_image(adev);
+ if (result)
+ return result;
+
+ /* Set smc instruct start point at 0x0 */
+ fiji_program_jump_on_start(adev);
+
+ /* Enable clock */
+ val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+ val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+ WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
+
+ /* De-assert reset */
+ val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+ val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+ WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+ /* Wait for firmware to initialize */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ val = RREG32_SMC(ixFIRMWARE_FLAGS);
+ if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
+ break;
+ udelay(1);
+ }
+
+ if (i == adev->usec_timeout) {
+ DRM_ERROR("Timeout for SMC firmware initialization\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int fiji_smu_start(struct amdgpu_device *adev)
+{
+ int result;
+ uint32_t val;
+
+ if (!fiji_is_smc_ram_running(adev)) {
+ val = RREG32_SMC(ixSMU_FIRMWARE);
+ if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
+ DRM_INFO("[FM]start smu in nonprotection mode\n");
+ result = fiji_smu_start_in_non_protection_mode(adev);
+ if (result)
+ return result;
+ } else {
+ DRM_INFO("[FM]start smu in protection mode\n");
+ result = fiji_smu_start_in_protection_mode(adev);
+ if (result)
+ return result;
+ }
+ }
+
+ return fiji_smu_request_load_fw(adev);
+}
+
+static const struct amdgpu_smumgr_funcs fiji_smumgr_funcs = {
+ .check_fw_load_finish = fiji_smu_check_fw_load_finish,
+ .request_smu_load_fw = NULL,
+ .request_smu_specific_fw = NULL,
+};
+
+int fiji_smu_init(struct amdgpu_device *adev)
+{
+ struct fiji_smu_private_data *private;
+ uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
+ uint32_t smu_internal_buffer_size = 200*4096;
+ struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
+ struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
+ uint64_t mc_addr;
+ void *toc_buf_ptr;
+ void *smu_buf_ptr;
+ int ret;
+
+ private = kzalloc(sizeof(struct fiji_smu_private_data), GFP_KERNEL);
+ if (NULL == private)
+ return -ENOMEM;
+
+ /* allocate firmware buffers */
+ if (adev->firmware.smu_load)
+ amdgpu_ucode_init_bo(adev);
+
+ adev->smu.priv = private;
+ adev->smu.fw_flags = 0;
+
+ /* Allocate FW image data structure and header buffer */
+ ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
+ true, AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, toc_buf);
+ if (ret) {
+ DRM_ERROR("Failed to allocate memory for TOC buffer\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate buffer for SMU internal buffer */
+ ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
+ true, AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, smu_buf);
+ if (ret) {
+ DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
+ return -ENOMEM;
+ }
+
+ /* Retrieve GPU address for header buffer and internal buffer */
+ ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
+ if (ret) {
+ amdgpu_bo_unref(&adev->smu.toc_buf);
+ DRM_ERROR("Failed to reserve the TOC buffer\n");
+ return -EINVAL;
+ }
+
+ ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
+ if (ret) {
+ amdgpu_bo_unreserve(adev->smu.toc_buf);
+ amdgpu_bo_unref(&adev->smu.toc_buf);
+ DRM_ERROR("Failed to pin the TOC buffer\n");
+ return -EINVAL;
+ }
+
+ ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
+ if (ret) {
+ amdgpu_bo_unreserve(adev->smu.toc_buf);
+ amdgpu_bo_unref(&adev->smu.toc_buf);
+ DRM_ERROR("Failed to map the TOC buffer\n");
+ return -EINVAL;
+ }
+
+ amdgpu_bo_unreserve(adev->smu.toc_buf);
+ private->header_addr_low = lower_32_bits(mc_addr);
+ private->header_addr_high = upper_32_bits(mc_addr);
+ private->header = toc_buf_ptr;
+
+ ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
+ if (ret) {
+ amdgpu_bo_unref(&adev->smu.smu_buf);
+ amdgpu_bo_unref(&adev->smu.toc_buf);
+ DRM_ERROR("Failed to reserve the SMU internal buffer\n");
+ return -EINVAL;
+ }
+
+ ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
+ if (ret) {
+ amdgpu_bo_unreserve(adev->smu.smu_buf);
+ amdgpu_bo_unref(&adev->smu.smu_buf);
+ amdgpu_bo_unref(&adev->smu.toc_buf);
+ DRM_ERROR("Failed to pin the SMU internal buffer\n");
+ return -EINVAL;
+ }
+
+ ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
+ if (ret) {
+ amdgpu_bo_unreserve(adev->smu.smu_buf);
+ amdgpu_bo_unref(&adev->smu.smu_buf);
+ amdgpu_bo_unref(&adev->smu.toc_buf);
+ DRM_ERROR("Failed to map the SMU internal buffer\n");
+ return -EINVAL;
+ }
+
+ amdgpu_bo_unreserve(adev->smu.smu_buf);
+ private->smu_buffer_addr_low = lower_32_bits(mc_addr);
+ private->smu_buffer_addr_high = upper_32_bits(mc_addr);
+
+ adev->smu.smumgr_funcs = &fiji_smumgr_funcs;
+
+ return 0;
+}
+
+int fiji_smu_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_unref(&adev->smu.toc_buf);
+ amdgpu_bo_unref(&adev->smu.smu_buf);
+ kfree(adev->smu.priv);
+ adev->smu.priv = NULL;
+ if (adev->firmware.fw_buf)
+ amdgpu_ucode_fini_bo(adev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h b/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h
new file mode 100644
index 000000000000..1cef03deeac3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef FIJI_SMUMGR_H
+#define FIJI_SMUMGR_H
+
+#include "fiji_ppsmc.h"
+
+int fiji_smu_init(struct amdgpu_device *adev);
+int fiji_smu_fini(struct amdgpu_device *adev);
+int fiji_smu_start(struct amdgpu_device *adev);
+
+struct fiji_smu_private_data
+{
+ uint8_t *header;
+ uint32_t smu_buffer_addr_high;
+ uint32_t smu_buffer_addr_low;
+ uint32_t header_addr_high;
+ uint32_t header_addr_low;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 2c188fb9fd22..4bd1e5cf65ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2173,7 +2173,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
adev->gfx.config.mem_max_burst_length_bytes = 256;
- if (adev->flags & AMDGPU_IS_APU) {
+ if (adev->flags & AMD_IS_APU) {
/* Get memory bank mapping mode. */
tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
@@ -2561,7 +2561,7 @@ static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring,
* sheduling on the ring. This function schedules the IB
* on the gfx ring for execution by the GPU.
*/
-static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
+static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
{
bool need_ctx_switch = ring->current_ctx != ib->ctx;
@@ -2569,15 +2569,10 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
u32 next_rptr = ring->wptr + 5;
/* drop the CE preamble IB for the same context */
- if ((ring->type == AMDGPU_RING_TYPE_GFX) &&
- (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
- !need_ctx_switch)
+ if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
return;
- if (ring->type == AMDGPU_RING_TYPE_COMPUTE)
- control |= INDIRECT_BUFFER_VALID;
-
- if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
+ if (need_ctx_switch)
next_rptr += 2;
next_rptr += 4;
@@ -2588,7 +2583,7 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
- if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) {
+ if (need_ctx_switch) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0);
}
@@ -2611,6 +2606,35 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, control);
}
+static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib)
+{
+ u32 header, control = 0;
+ u32 next_rptr = ring->wptr + 5;
+
+ control |= INDIRECT_BUFFER_VALID;
+ next_rptr += 4;
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
+ amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+ amdgpu_ring_write(ring, next_rptr);
+
+ header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+ control |= ib->length_dw |
+ (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
+
+ amdgpu_ring_write(ring, header);
+ amdgpu_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (ib->gpu_addr & 0xFFFFFFFC));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+ amdgpu_ring_write(ring, control);
+}
+
/**
* gfx_v7_0_ring_test_ib - basic ring IB test
*
@@ -2624,6 +2648,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
+ struct fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
unsigned i;
@@ -2635,29 +2660,27 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
+ memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(ring, NULL, 256, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
- amdgpu_gfx_scratch_free(adev, scratch);
- return r;
+ goto err1;
}
ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
- r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
- if (r) {
- amdgpu_gfx_scratch_free(adev, scratch);
- amdgpu_ib_free(adev, &ib);
- DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
- return r;
- }
- r = amdgpu_fence_wait(ib.fence, false);
+
+ r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ &f);
+ if (r)
+ goto err2;
+
+ r = fence_wait(f, false);
if (r) {
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
- amdgpu_gfx_scratch_free(adev, scratch);
- amdgpu_ib_free(adev, &ib);
- return r;
+ goto err2;
}
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(scratch);
@@ -2667,14 +2690,19 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
}
if (i < adev->usec_timeout) {
DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
- ib.fence->ring->idx, i);
+ ring->idx, i);
+ goto err2;
} else {
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
r = -EINVAL;
}
- amdgpu_gfx_scratch_free(adev, scratch);
+
+err2:
+ fence_put(f);
amdgpu_ib_free(adev, &ib);
+err1:
+ amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
@@ -3056,6 +3084,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
+ adev->gfx.mec_feature_version = le32_to_cpu(
+ mec_hdr->ucode_feature_version);
gfx_v7_0_cp_compute_enable(adev, false);
@@ -3078,6 +3108,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
+ adev->gfx.mec2_feature_version = le32_to_cpu(
+ mec2_hdr->ucode_feature_version);
/* MEC2 */
fw_data = (const __le32 *)
@@ -3730,7 +3762,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
int r;
/* allocate rlc buffers */
- if (adev->flags & AMDGPU_IS_APU) {
+ if (adev->flags & AMD_IS_APU) {
if (adev->asic_type == CHIP_KAVERI) {
adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list;
adev->gfx.rlc.reg_list_size =
@@ -3754,7 +3786,9 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
/* save restore block */
if (adev->gfx.rlc.save_restore_obj == NULL) {
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.save_restore_obj);
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, &adev->gfx.rlc.save_restore_obj);
if (r) {
dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
return r;
@@ -3795,7 +3829,9 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.clear_state_obj == NULL) {
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.clear_state_obj);
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, &adev->gfx.rlc.clear_state_obj);
if (r) {
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
gfx_v7_0_rlc_fini(adev);
@@ -3832,7 +3868,9 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.cp_table_size) {
if (adev->gfx.rlc.cp_table_obj == NULL) {
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.cp_table_obj);
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, &adev->gfx.rlc.cp_table_obj);
if (r) {
dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
gfx_v7_0_rlc_fini(adev);
@@ -4042,6 +4080,8 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
amdgpu_ucode_print_rlc_hdr(&hdr->header);
adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->gfx.rlc_feature_version = le32_to_cpu(
+ hdr->ucode_feature_version);
gfx_v7_0_rlc_stop(adev);
@@ -5098,7 +5138,7 @@ static void gfx_v7_0_print_status(void *handle)
dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n",
RREG32(mmCP_HPD_EOP_CONTROL));
- for (queue = 0; queue < 8; i++) {
+ for (queue = 0; queue < 8; queue++) {
cik_srbm_select(adev, me, pipe, queue, 0);
dev_info(adev->dev, " queue: %d\n", queue);
dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n",
@@ -5555,7 +5595,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
.parse_cs = NULL,
- .emit_ib = gfx_v7_0_ring_emit_ib,
+ .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
.emit_semaphore = gfx_v7_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
@@ -5564,6 +5604,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.test_ring = gfx_v7_0_ring_test_ring,
.test_ib = gfx_v7_0_ring_test_ib,
.is_lockup = gfx_v7_0_ring_is_lockup,
+ .insert_nop = amdgpu_ring_insert_nop,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
@@ -5571,7 +5612,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.get_wptr = gfx_v7_0_ring_get_wptr_compute,
.set_wptr = gfx_v7_0_ring_set_wptr_compute,
.parse_cs = NULL,
- .emit_ib = gfx_v7_0_ring_emit_ib,
+ .emit_ib = gfx_v7_0_ring_emit_ib_compute,
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
.emit_semaphore = gfx_v7_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
@@ -5580,6 +5621,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.test_ring = gfx_v7_0_ring_test_ring,
.test_ib = gfx_v7_0_ring_test_ib,
.is_lockup = gfx_v7_0_ring_is_lockup,
+ .insert_nop = amdgpu_ring_insert_nop,
};
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 7b683fb2173c..53f07439a512 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -87,6 +87,13 @@ MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
+MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
+MODULE_FIRMWARE("amdgpu/fiji_pfp.bin");
+MODULE_FIRMWARE("amdgpu/fiji_me.bin");
+MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
+MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
+MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
+
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
{
{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
@@ -217,6 +224,71 @@ static const u32 tonga_mgcg_cgcg_init[] =
mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
};
+static const u32 fiji_golden_common_all[] =
+{
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+ mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
+ mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
+ mmGB_ADDR_CONFIG, 0xffffffff, 0x12011003,
+ mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
+ mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
+ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
+};
+
+static const u32 golden_settings_fiji_a10[] =
+{
+ mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
+ mmDB_DEBUG2, 0xf00fffff, 0x00000400,
+ mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
+ mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x00000100,
+ mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+ mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
+ mmTCC_CTRL, 0x00100000, 0xf30fff7f,
+ mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
+ mmTCP_CHAN_STEER_HI, 0xffffffff, 0x7d6cf5e4,
+ mmTCP_CHAN_STEER_LO, 0xffffffff, 0x3928b1a0,
+};
+
+static const u32 fiji_mgcg_cgcg_init[] =
+{
+ mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffc0,
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+ mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
+ mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
+ mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
+ mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
+ mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
+ mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
+ mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
+ mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+ mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
+ mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
+ mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
+ mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
+};
+
static const u32 golden_settings_iceland_a11[] =
{
mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
@@ -439,6 +511,18 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
iceland_golden_common_all,
(const u32)ARRAY_SIZE(iceland_golden_common_all));
break;
+ case CHIP_FIJI:
+ amdgpu_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ golden_settings_fiji_a10,
+ (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
+ amdgpu_program_register_sequence(adev,
+ fiji_golden_common_all,
+ (const u32)ARRAY_SIZE(fiji_golden_common_all));
+ break;
+
case CHIP_TONGA:
amdgpu_program_register_sequence(adev,
tonga_mgcg_cgcg_init,
@@ -526,6 +610,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
+ struct fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
unsigned i;
@@ -537,29 +622,27 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
+ memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(ring, NULL, 256, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
- amdgpu_gfx_scratch_free(adev, scratch);
- return r;
+ goto err1;
}
ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
- r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
- if (r) {
- amdgpu_gfx_scratch_free(adev, scratch);
- amdgpu_ib_free(adev, &ib);
- DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
- return r;
- }
- r = amdgpu_fence_wait(ib.fence, false);
+
+ r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ &f);
+ if (r)
+ goto err2;
+
+ r = fence_wait(f, false);
if (r) {
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
- amdgpu_gfx_scratch_free(adev, scratch);
- amdgpu_ib_free(adev, &ib);
- return r;
+ goto err2;
}
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(scratch);
@@ -569,14 +652,18 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
}
if (i < adev->usec_timeout) {
DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
- ib.fence->ring->idx, i);
+ ring->idx, i);
+ goto err2;
} else {
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
r = -EINVAL;
}
- amdgpu_gfx_scratch_free(adev, scratch);
+err2:
+ fence_put(f);
amdgpu_ib_free(adev, &ib);
+err1:
+ amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
@@ -587,6 +674,7 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
int err;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
+ const struct gfx_firmware_header_v1_0 *cp_hdr;
DRM_DEBUG("\n");
@@ -600,6 +688,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
case CHIP_CARRIZO:
chip_name = "carrizo";
break;
+ case CHIP_FIJI:
+ chip_name = "fiji";
+ break;
default:
BUG();
}
@@ -611,6 +702,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
if (err)
goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
@@ -619,6 +713,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.me_fw);
if (err)
goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+ adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
@@ -627,12 +724,18 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.ce_fw);
if (err)
goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+ adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
@@ -641,6 +744,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.mec_fw);
if (err)
goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
@@ -648,6 +754,12 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
if (err)
goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec2_fw->data;
+ adev->gfx.mec2_fw_version = le32_to_cpu(
+ cp_hdr->header.ucode_version);
+ adev->gfx.mec2_feature_version = le32_to_cpu(
+ cp_hdr->ucode_feature_version);
} else {
err = 0;
adev->gfx.mec2_fw = NULL;
@@ -1214,6 +1326,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
}
+ case CHIP_FIJI:
case CHIP_TONGA:
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
@@ -1813,10 +1926,7 @@ static u32 gfx_v8_0_get_rb_disabled(struct amdgpu_device *adev,
u32 data, mask;
data = RREG32(mmCC_RB_BACKEND_DISABLE);
- if (data & 1)
- data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
- else
- data = 0;
+ data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
@@ -1895,7 +2005,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev,
}
/**
- * gmc_v8_0_init_compute_vmid - gart enable
+ * gfx_v8_0_init_compute_vmid - gart enable
*
* @rdev: amdgpu_device pointer
*
@@ -1905,7 +2015,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev,
#define DEFAULT_SH_MEM_BASES (0x6000)
#define FIRST_COMPUTE_VMID (8)
#define LAST_COMPUTE_VMID (16)
-static void gmc_v8_0_init_compute_vmid(struct amdgpu_device *adev)
+static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
{
int i;
uint32_t sh_mem_config;
@@ -1965,6 +2075,23 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
break;
+ case CHIP_FIJI:
+ adev->gfx.config.max_shader_engines = 4;
+ adev->gfx.config.max_tile_pipes = 16;
+ adev->gfx.config.max_cu_per_sh = 16;
+ adev->gfx.config.max_sh_per_se = 1;
+ adev->gfx.config.max_backends_per_se = 4;
+ adev->gfx.config.max_texture_channel_caches = 8;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 32;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
+ break;
case CHIP_TONGA:
adev->gfx.config.max_shader_engines = 4;
adev->gfx.config.max_tile_pipes = 8;
@@ -1986,6 +2113,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
adev->gfx.config.max_shader_engines = 1;
adev->gfx.config.max_tile_pipes = 2;
adev->gfx.config.max_sh_per_se = 1;
+ adev->gfx.config.max_backends_per_se = 2;
switch (adev->pdev->revision) {
case 0xc4:
@@ -1994,7 +2122,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
case 0xcc:
/* B10 */
adev->gfx.config.max_cu_per_sh = 8;
- adev->gfx.config.max_backends_per_se = 2;
break;
case 0xc5:
case 0x81:
@@ -2003,14 +2130,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
case 0xcd:
/* B8 */
adev->gfx.config.max_cu_per_sh = 6;
- adev->gfx.config.max_backends_per_se = 2;
break;
case 0xc6:
case 0xca:
case 0xce:
/* B6 */
adev->gfx.config.max_cu_per_sh = 6;
- adev->gfx.config.max_backends_per_se = 2;
break;
case 0xc7:
case 0x87:
@@ -2018,7 +2143,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
default:
/* B4 */
adev->gfx.config.max_cu_per_sh = 4;
- adev->gfx.config.max_backends_per_se = 1;
break;
}
@@ -2062,7 +2186,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
adev->gfx.config.mem_max_burst_length_bytes = 256;
- if (adev->flags & AMDGPU_IS_APU) {
+ if (adev->flags & AMD_IS_APU) {
/* Get memory bank mapping mode. */
tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
@@ -2158,7 +2282,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- gmc_v8_0_init_compute_vmid(adev);
+ gfx_v8_0_init_compute_vmid(adev);
mutex_lock(&adev->grbm_idx_mutex);
/*
@@ -2278,7 +2402,6 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
amdgpu_ucode_print_rlc_hdr(&hdr->header);
- adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
@@ -2364,12 +2487,6 @@ static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
- adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
- adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
- adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
- adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
- adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
- adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
gfx_v8_0_cp_gfx_enable(adev, false);
@@ -2481,6 +2598,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
switch (adev->asic_type) {
case CHIP_TONGA:
+ case CHIP_FIJI:
amdgpu_ring_write(ring, 0x16000012);
amdgpu_ring_write(ring, 0x0000002A);
break;
@@ -2625,7 +2743,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
- adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
fw_data = (const __le32 *)
(adev->gfx.mec_fw->data +
@@ -2644,7 +2761,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
- adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
fw_data = (const __le32 *)
(adev->gfx.mec2_fw->data +
@@ -3124,11 +3240,12 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
/* enable the doorbell if requested */
if (use_doorbell) {
- if (adev->asic_type == CHIP_CARRIZO) {
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_FIJI)) {
WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
AMDGPU_DOORBELL_KIQ << 2);
WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
- 0x7FFFF << 2);
+ AMDGPU_DOORBELL_MEC_RING7 << 2);
}
tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
@@ -3756,7 +3873,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x20); /* poll interval */
}
-static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
+static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
{
bool need_ctx_switch = ring->current_ctx != ib->ctx;
@@ -3764,15 +3881,10 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
u32 next_rptr = ring->wptr + 5;
/* drop the CE preamble IB for the same context */
- if ((ring->type == AMDGPU_RING_TYPE_GFX) &&
- (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
- !need_ctx_switch)
+ if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
return;
- if (ring->type == AMDGPU_RING_TYPE_COMPUTE)
- control |= INDIRECT_BUFFER_VALID;
-
- if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
+ if (need_ctx_switch)
next_rptr += 2;
next_rptr += 4;
@@ -3783,7 +3895,7 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
- if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) {
+ if (need_ctx_switch) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0);
}
@@ -3806,6 +3918,36 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, control);
}
+static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib)
+{
+ u32 header, control = 0;
+ u32 next_rptr = ring->wptr + 5;
+
+ control |= INDIRECT_BUFFER_VALID;
+
+ next_rptr += 4;
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
+ amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+ amdgpu_ring_write(ring, next_rptr);
+
+ header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+ control |= ib->length_dw |
+ (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
+
+ amdgpu_ring_write(ring, header);
+ amdgpu_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (ib->gpu_addr & 0xFFFFFFFC));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+ amdgpu_ring_write(ring, control);
+}
+
static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned flags)
{
@@ -3843,7 +3985,8 @@ static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring,
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
if (ring->adev->asic_type == CHIP_TOPAZ ||
- ring->adev->asic_type == CHIP_TONGA)
+ ring->adev->asic_type == CHIP_TONGA ||
+ ring->adev->asic_type == CHIP_FIJI)
/* we got a hw semaphore bug in VI TONGA, return false to switch back to sw fence wait */
return false;
else {
@@ -4227,7 +4370,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
.parse_cs = NULL,
- .emit_ib = gfx_v8_0_ring_emit_ib,
+ .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
.emit_semaphore = gfx_v8_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
@@ -4236,6 +4379,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.test_ring = gfx_v8_0_ring_test_ring,
.test_ib = gfx_v8_0_ring_test_ib,
.is_lockup = gfx_v8_0_ring_is_lockup,
+ .insert_nop = amdgpu_ring_insert_nop,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
@@ -4243,7 +4387,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
.parse_cs = NULL,
- .emit_ib = gfx_v8_0_ring_emit_ib,
+ .emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
.emit_semaphore = gfx_v8_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
@@ -4252,6 +4396,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.test_ring = gfx_v8_0_ring_test_ring,
.test_ib = gfx_v8_0_ring_test_ib,
.is_lockup = gfx_v8_0_ring_is_lockup,
+ .insert_nop = amdgpu_ring_insert_nop,
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index ae37fce36520..774528ab8704 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -523,17 +523,11 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
tmp = RREG32(mmVM_CONTEXT1_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
amdgpu_vm_block_size - 9);
@@ -636,7 +630,7 @@ static int gmc_v7_0_vm_init(struct amdgpu_device *adev)
adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
/* base offset of vram pages */
- if (adev->flags & AMDGPU_IS_APU) {
+ if (adev->flags & AMD_IS_APU) {
u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
tmp <<= 22;
adev->vm_manager.vram_base_offset = tmp;
@@ -841,7 +835,7 @@ static int gmc_v7_0_early_init(void *handle)
gmc_v7_0_set_gart_funcs(adev);
gmc_v7_0_set_irq_funcs(adev);
- if (adev->flags & AMDGPU_IS_APU) {
+ if (adev->flags & AMD_IS_APU) {
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else {
u32 tmp = RREG32(mmMC_SEQ_MISC0);
@@ -852,6 +846,13 @@ static int gmc_v7_0_early_init(void *handle)
return 0;
}
+static int gmc_v7_0_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+}
+
static int gmc_v7_0_sw_init(void *handle)
{
int r;
@@ -957,7 +958,7 @@ static int gmc_v7_0_hw_init(void *handle)
gmc_v7_0_mc_program(adev);
- if (!(adev->flags & AMDGPU_IS_APU)) {
+ if (!(adev->flags & AMD_IS_APU)) {
r = gmc_v7_0_mc_load_microcode(adev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
@@ -976,6 +977,7 @@ static int gmc_v7_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
gmc_v7_0_gart_disable(adev);
return 0;
@@ -1172,7 +1174,7 @@ static int gmc_v7_0_soft_reset(void *handle)
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
- if (!(adev->flags & AMDGPU_IS_APU))
+ if (!(adev->flags & AMD_IS_APU))
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
}
@@ -1282,7 +1284,7 @@ static int gmc_v7_0_set_clockgating_state(void *handle,
if (state == AMD_CG_STATE_GATE)
gate = true;
- if (!(adev->flags & AMDGPU_IS_APU)) {
+ if (!(adev->flags & AMD_IS_APU)) {
gmc_v7_0_enable_mc_mgcg(adev, gate);
gmc_v7_0_enable_mc_ls(adev, gate);
}
@@ -1301,7 +1303,7 @@ static int gmc_v7_0_set_powergating_state(void *handle,
const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
.early_init = gmc_v7_0_early_init,
- .late_init = NULL,
+ .late_init = gmc_v7_0_late_init,
.sw_init = gmc_v7_0_sw_init,
.sw_fini = gmc_v7_0_sw_fini,
.hw_init = gmc_v7_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 8135963a66be..9a07742620d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -44,6 +44,7 @@ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
+MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
static const u32 golden_settings_tonga_a11[] =
{
@@ -61,6 +62,19 @@ static const u32 tonga_mgcg_cgcg_init[] =
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
+static const u32 golden_settings_fiji_a10[] =
+{
+ mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+};
+
+static const u32 fiji_mgcg_cgcg_init[] =
+{
+ mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
+};
+
static const u32 golden_settings_iceland_a11[] =
{
mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
@@ -90,6 +104,14 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_iceland_a11,
(const u32)ARRAY_SIZE(golden_settings_iceland_a11));
break;
+ case CHIP_FIJI:
+ amdgpu_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ golden_settings_fiji_a10,
+ (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
+ break;
case CHIP_TONGA:
amdgpu_program_register_sequence(adev,
tonga_mgcg_cgcg_init,
@@ -202,6 +224,9 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
case CHIP_TONGA:
chip_name = "tonga";
break;
+ case CHIP_FIJI:
+ chip_name = "fiji";
+ break;
case CHIP_CARRIZO:
return 0;
default: BUG();
@@ -628,19 +653,12 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
tmp = RREG32(mmVM_CONTEXT1_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
amdgpu_vm_block_size - 9);
@@ -737,7 +755,7 @@ static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
/* base offset of vram pages */
- if (adev->flags & AMDGPU_IS_APU) {
+ if (adev->flags & AMD_IS_APU) {
u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
tmp <<= 22;
adev->vm_manager.vram_base_offset = tmp;
@@ -816,7 +834,7 @@ static int gmc_v8_0_early_init(void *handle)
gmc_v8_0_set_gart_funcs(adev);
gmc_v8_0_set_irq_funcs(adev);
- if (adev->flags & AMDGPU_IS_APU) {
+ if (adev->flags & AMD_IS_APU) {
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else {
u32 tmp = RREG32(mmMC_SEQ_MISC0);
@@ -827,6 +845,13 @@ static int gmc_v8_0_early_init(void *handle)
return 0;
}
+static int gmc_v8_0_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+}
+
static int gmc_v8_0_sw_init(void *handle)
{
int r;
@@ -934,7 +959,7 @@ static int gmc_v8_0_hw_init(void *handle)
gmc_v8_0_mc_program(adev);
- if (!(adev->flags & AMDGPU_IS_APU)) {
+ if (!(adev->flags & AMD_IS_APU)) {
r = gmc_v8_0_mc_load_microcode(adev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
@@ -953,6 +978,7 @@ static int gmc_v8_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
gmc_v8_0_gart_disable(adev);
return 0;
@@ -1147,7 +1173,7 @@ static int gmc_v8_0_soft_reset(void *handle)
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
- if (!(adev->flags & AMDGPU_IS_APU))
+ if (!(adev->flags & AMD_IS_APU))
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
}
@@ -1263,7 +1289,7 @@ static int gmc_v8_0_set_powergating_state(void *handle,
const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.early_init = gmc_v8_0_early_init,
- .late_init = NULL,
+ .late_init = gmc_v8_0_late_init,
.sw_init = gmc_v8_0_sw_init,
.sw_fini = gmc_v8_0_sw_fini,
.hw_init = gmc_v8_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h
index c723602c7b0c..ee6a041cb288 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h
@@ -2163,5 +2163,10 @@
#define SDMA_PKT_NOP_HEADER_sub_op_shift 8
#define SDMA_PKT_NOP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_NOP_HEADER_sub_op_mask) << SDMA_PKT_NOP_HEADER_sub_op_shift)
+/*define for count field*/
+#define SDMA_PKT_NOP_HEADER_count_offset 0
+#define SDMA_PKT_NOP_HEADER_count_mask 0x00003FFF
+#define SDMA_PKT_NOP_HEADER_count_shift 16
+#define SDMA_PKT_NOP_HEADER_COUNT(x) (((x) & SDMA_PKT_NOP_HEADER_count_mask) << SDMA_PKT_NOP_HEADER_count_shift)
#endif /* __ICELAND_SDMA_PKT_OPEN_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index c6f1e2f12b5f..c900aa942ade 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -623,7 +623,9 @@ int iceland_smu_init(struct amdgpu_device *adev)
/* Allocate FW image data structure and header buffer */
ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf);
+ true, AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, toc_buf);
if (ret) {
DRM_ERROR("Failed to allocate memory for TOC buffer\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index d7895885fe0c..14e87234171a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -121,6 +121,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
int err, i;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
+ const struct sdma_firmware_header_v1_0 *hdr;
DRM_DEBUG("\n");
@@ -142,6 +143,11 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->sdma[i].fw);
if (err)
goto out;
+ hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
+ adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+ if (adev->sdma[i].feature_version >= 20)
+ adev->sdma[i].burst_nop = true;
if (adev->firmware.smu_load) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -214,6 +220,19 @@ static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
}
+static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+ int i;
+
+ for (i = 0; i < count; i++)
+ if (sdma && sdma->burst_nop && (i == 0))
+ amdgpu_ring_write(ring, ring->nop |
+ SDMA_PKT_NOP_HEADER_COUNT(count - 1));
+ else
+ amdgpu_ring_write(ring, ring->nop);
+}
+
/**
* sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine
*
@@ -241,8 +260,8 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, next_rptr);
/* IB packet must end on a 8 DW boundary */
- while ((ring->wptr & 7) != 2)
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP));
+ sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
+
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
/* base must be 32 byte aligned */
@@ -541,8 +560,6 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
- adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
-
fw_data = (const __le32 *)
(adev->sdma[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
@@ -671,6 +688,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
+ struct fence *f = NULL;
unsigned i;
unsigned index;
int r;
@@ -686,12 +704,11 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
-
+ memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(ring, NULL, 256, &ib);
if (r) {
- amdgpu_wb_free(adev, index);
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
- return r;
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
@@ -705,19 +722,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8;
- r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
- if (r) {
- amdgpu_ib_free(adev, &ib);
- amdgpu_wb_free(adev, index);
- DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
- return r;
- }
- r = amdgpu_fence_wait(ib.fence, false);
+ r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ &f);
+ if (r)
+ goto err1;
+
+ r = fence_wait(f, false);
if (r) {
- amdgpu_ib_free(adev, &ib);
- amdgpu_wb_free(adev, index);
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
- return r;
+ goto err1;
}
for (i = 0; i < adev->usec_timeout; i++) {
tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -727,12 +741,17 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
}
if (i < adev->usec_timeout) {
DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
- ib.fence->ring->idx, i);
+ ring->idx, i);
+ goto err1;
} else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
r = -EINVAL;
}
+
+err1:
+ fence_put(f);
amdgpu_ib_free(adev, &ib);
+err0:
amdgpu_wb_free(adev, index);
return r;
}
@@ -875,8 +894,19 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib,
*/
static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib)
{
- while (ib->length_dw & 0x7)
- ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
+ struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+ u32 pad_count;
+ int i;
+
+ pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+ for (i = 0; i < pad_count; i++)
+ if (sdma && sdma->burst_nop && (i == 0))
+ ib->ptr[ib->length_dw++] =
+ SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
+ SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
+ else
+ ib->ptr[ib->length_dw++] =
+ SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
}
/**
@@ -1310,6 +1340,7 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
.test_ring = sdma_v2_4_ring_test_ring,
.test_ib = sdma_v2_4_ring_test_ib,
.is_lockup = sdma_v2_4_ring_is_lockup,
+ .insert_nop = sdma_v2_4_ring_insert_nop,
};
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
@@ -1346,19 +1377,19 @@ static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
* Used by the amdgpu ttm implementation to move pages if
* registered as the asic copy callback.
*/
-static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ring *ring,
+static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count)
{
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR));
- amdgpu_ring_write(ring, byte_count);
- amdgpu_ring_write(ring, 0); /* src/dst endian swap */
- amdgpu_ring_write(ring, lower_32_bits(src_offset));
- amdgpu_ring_write(ring, upper_32_bits(src_offset));
- amdgpu_ring_write(ring, lower_32_bits(dst_offset));
- amdgpu_ring_write(ring, upper_32_bits(dst_offset));
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ ib->ptr[ib->length_dw++] = byte_count;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
+ ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
}
/**
@@ -1371,16 +1402,16 @@ static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ring *ring,
*
* Fill GPU buffers using the DMA engine (VI).
*/
-static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ring *ring,
+static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ib *ib,
uint32_t src_data,
uint64_t dst_offset,
uint32_t byte_count)
{
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL));
- amdgpu_ring_write(ring, lower_32_bits(dst_offset));
- amdgpu_ring_write(ring, upper_32_bits(dst_offset));
- amdgpu_ring_write(ring, src_data);
- amdgpu_ring_write(ring, byte_count);
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
+ ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = src_data;
+ ib->ptr[ib->length_dw++] = byte_count;
}
static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
@@ -1413,5 +1444,6 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
if (adev->vm_manager.vm_pte_funcs == NULL) {
adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+ adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 7bb37b93993f..9bfe92df15f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -53,6 +53,8 @@ MODULE_FIRMWARE("amdgpu/tonga_sdma.bin");
MODULE_FIRMWARE("amdgpu/tonga_sdma1.bin");
MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin");
MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/fiji_sdma.bin");
+MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin");
static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
{
@@ -80,6 +82,24 @@ static const u32 tonga_mgcg_cgcg_init[] =
mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
};
+static const u32 golden_settings_fiji_a10[] =
+{
+ mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
+ mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
+ mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+};
+
+static const u32 fiji_mgcg_cgcg_init[] =
+{
+ mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
+ mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
+};
+
static const u32 cz_golden_settings_a11[] =
{
mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
@@ -122,6 +142,14 @@ static const u32 cz_mgcg_cgcg_init[] =
static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
+ case CHIP_FIJI:
+ amdgpu_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ golden_settings_fiji_a10,
+ (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
+ break;
case CHIP_TONGA:
amdgpu_program_register_sequence(adev,
tonga_mgcg_cgcg_init,
@@ -159,6 +187,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
int err, i;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
+ const struct sdma_firmware_header_v1_0 *hdr;
DRM_DEBUG("\n");
@@ -166,6 +195,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
case CHIP_TONGA:
chip_name = "tonga";
break;
+ case CHIP_FIJI:
+ chip_name = "fiji";
+ break;
case CHIP_CARRIZO:
chip_name = "carrizo";
break;
@@ -183,6 +215,11 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->sdma[i].fw);
if (err)
goto out;
+ hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
+ adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+ if (adev->sdma[i].feature_version >= 20)
+ adev->sdma[i].burst_nop = true;
if (adev->firmware.smu_load) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -269,6 +306,19 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+ int i;
+
+ for (i = 0; i < count; i++)
+ if (sdma && sdma->burst_nop && (i == 0))
+ amdgpu_ring_write(ring, ring->nop |
+ SDMA_PKT_NOP_HEADER_COUNT(count - 1));
+ else
+ amdgpu_ring_write(ring, ring->nop);
+}
+
/**
* sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine
*
@@ -295,8 +345,7 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, next_rptr);
/* IB packet must end on a 8 DW boundary */
- while ((ring->wptr & 7) != 2)
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP));
+ sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
@@ -630,8 +679,6 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
- adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
-
fw_data = (const __le32 *)
(adev->sdma[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
@@ -761,6 +808,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
+ struct fence *f = NULL;
unsigned i;
unsigned index;
int r;
@@ -776,12 +824,11 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
-
+ memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(ring, NULL, 256, &ib);
if (r) {
- amdgpu_wb_free(adev, index);
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
- return r;
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
@@ -795,19 +842,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8;
- r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
- if (r) {
- amdgpu_ib_free(adev, &ib);
- amdgpu_wb_free(adev, index);
- DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
- return r;
- }
- r = amdgpu_fence_wait(ib.fence, false);
+ r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ &f);
+ if (r)
+ goto err1;
+
+ r = fence_wait(f, false);
if (r) {
- amdgpu_ib_free(adev, &ib);
- amdgpu_wb_free(adev, index);
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
- return r;
+ goto err1;
}
for (i = 0; i < adev->usec_timeout; i++) {
tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -817,12 +861,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
}
if (i < adev->usec_timeout) {
DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
- ib.fence->ring->idx, i);
+ ring->idx, i);
+ goto err1;
} else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
r = -EINVAL;
}
+err1:
+ fence_put(f);
amdgpu_ib_free(adev, &ib);
+err0:
amdgpu_wb_free(adev, index);
return r;
}
@@ -965,8 +1013,19 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib,
*/
static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib)
{
- while (ib->length_dw & 0x7)
- ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
+ struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+ u32 pad_count;
+ int i;
+
+ pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+ for (i = 0; i < pad_count; i++)
+ if (sdma && sdma->burst_nop && (i == 0))
+ ib->ptr[ib->length_dw++] =
+ SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
+ SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
+ else
+ ib->ptr[ib->length_dw++] =
+ SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
}
/**
@@ -1404,6 +1463,7 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
.test_ring = sdma_v3_0_ring_test_ring,
.test_ib = sdma_v3_0_ring_test_ib,
.is_lockup = sdma_v3_0_ring_is_lockup,
+ .insert_nop = sdma_v3_0_ring_insert_nop,
};
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1440,19 +1500,19 @@ static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
* Used by the amdgpu ttm implementation to move pages if
* registered as the asic copy callback.
*/
-static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ring *ring,
+static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count)
{
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR));
- amdgpu_ring_write(ring, byte_count);
- amdgpu_ring_write(ring, 0); /* src/dst endian swap */
- amdgpu_ring_write(ring, lower_32_bits(src_offset));
- amdgpu_ring_write(ring, upper_32_bits(src_offset));
- amdgpu_ring_write(ring, lower_32_bits(dst_offset));
- amdgpu_ring_write(ring, upper_32_bits(dst_offset));
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ ib->ptr[ib->length_dw++] = byte_count;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
+ ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
}
/**
@@ -1465,16 +1525,16 @@ static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ring *ring,
*
* Fill GPU buffers using the DMA engine (VI).
*/
-static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ring *ring,
+static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib,
uint32_t src_data,
uint64_t dst_offset,
uint32_t byte_count)
{
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL));
- amdgpu_ring_write(ring, lower_32_bits(dst_offset));
- amdgpu_ring_write(ring, upper_32_bits(dst_offset));
- amdgpu_ring_write(ring, src_data);
- amdgpu_ring_write(ring, byte_count);
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
+ ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = src_data;
+ ib->ptr[ib->length_dw++] = byte_count;
}
static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
@@ -1507,5 +1567,6 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
if (adev->vm_manager.vm_pte_funcs == NULL) {
adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+ adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h
index 099b7b56113c..e5ebd084288d 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h
@@ -2236,5 +2236,10 @@
#define SDMA_PKT_NOP_HEADER_sub_op_shift 8
#define SDMA_PKT_NOP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_NOP_HEADER_sub_op_mask) << SDMA_PKT_NOP_HEADER_sub_op_shift)
+/*define for count field*/
+#define SDMA_PKT_NOP_HEADER_count_offset 0
+#define SDMA_PKT_NOP_HEADER_count_mask 0x00003FFF
+#define SDMA_PKT_NOP_HEADER_count_shift 16
+#define SDMA_PKT_NOP_HEADER_COUNT(x) (((x) & SDMA_PKT_NOP_HEADER_count_mask) << SDMA_PKT_NOP_HEADER_count_shift)
#endif /* __TONGA_SDMA_PKT_OPEN_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
index 5fc53a40c7ac..1f5ac941a610 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
@@ -761,7 +761,9 @@ int tonga_smu_init(struct amdgpu_device *adev)
/* Allocate FW image data structure and header buffer */
ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf);
+ true, AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, toc_buf);
if (ret) {
DRM_ERROR("Failed to allocate memory for TOC buffer\n");
return -ENOMEM;
@@ -769,7 +771,9 @@ int tonga_smu_init(struct amdgpu_device *adev)
/* Allocate buffer for SMU internal buffer */
ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, smu_buf);
+ true, AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, smu_buf);
if (ret) {
DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 4efd671d7a9b..5fac5da694f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -534,7 +534,7 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_fence *fence = NULL;
+ struct fence *fence = NULL;
int r;
r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
@@ -555,14 +555,14 @@ static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring)
goto error;
}
- r = amdgpu_fence_wait(fence, false);
+ r = fence_wait(fence, false);
if (r) {
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
goto error;
}
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
error:
- amdgpu_fence_unref(&fence);
+ fence_put(fence);
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
return r;
}
@@ -886,6 +886,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.test_ring = uvd_v4_2_ring_test_ring,
.test_ib = uvd_v4_2_ring_test_ib,
.is_lockup = amdgpu_ring_test_lockup,
+ .insert_nop = amdgpu_ring_insert_nop,
};
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index b756bd99c0fd..2d5c59c318af 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -580,7 +580,7 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_fence *fence = NULL;
+ struct fence *fence = NULL;
int r;
r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
@@ -601,14 +601,14 @@ static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring)
goto error;
}
- r = amdgpu_fence_wait(fence, false);
+ r = fence_wait(fence, false);
if (r) {
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
goto error;
}
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
error:
- amdgpu_fence_unref(&fence);
+ fence_put(fence);
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
return r;
}
@@ -825,6 +825,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.test_ring = uvd_v5_0_ring_test_ring,
.test_ib = uvd_v5_0_ring_test_ib,
.is_lockup = amdgpu_ring_test_lockup,
+ .insert_nop = amdgpu_ring_insert_nop,
};
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 49aa931b2cb4..d9f553fce531 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -575,7 +575,7 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
*/
static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring)
{
- struct amdgpu_fence *fence = NULL;
+ struct fence *fence = NULL;
int r;
r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
@@ -590,14 +590,14 @@ static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring)
goto error;
}
- r = amdgpu_fence_wait(fence, false);
+ r = fence_wait(fence, false);
if (r) {
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
goto error;
}
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
error:
- amdgpu_fence_unref(&fence);
+ fence_put(fence);
return r;
}
@@ -805,6 +805,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
.test_ring = uvd_v6_0_ring_test_ring,
.test_ib = uvd_v6_0_ring_test_ib,
.is_lockup = amdgpu_ring_test_lockup,
+ .insert_nop = amdgpu_ring_insert_nop,
};
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 303d961d57bd..cd16df543f64 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -643,6 +643,7 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
.test_ring = amdgpu_vce_ring_test_ring,
.test_ib = amdgpu_vce_ring_test_ib,
.is_lockup = amdgpu_ring_test_lockup,
+ .insert_nop = amdgpu_ring_insert_nop,
};
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index d62c4002e39c..f0656dfb53f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -32,9 +32,11 @@
#include "vid.h"
#include "vce/vce_3_0_d.h"
#include "vce/vce_3_0_sh_mask.h"
-#include "oss/oss_2_0_d.h"
-#include "oss/oss_2_0_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "oss/oss_3_0_sh_mask.h"
#include "gca/gfx_8_0_d.h"
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
@@ -112,6 +114,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (idx = 0; idx < 2; ++idx) {
+
+ if (adev->vce.harvest_config & (1 << idx))
+ continue;
+
if(idx == 0)
WREG32_P(mmGRBM_GFX_INDEX, 0,
~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
@@ -190,10 +196,59 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
return 0;
}
+#define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074
+#define VCE_HARVEST_FUSE_MACRO__SHIFT 27
+#define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000
+
+static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
+{
+ u32 tmp;
+ unsigned ret;
+
+ /* Fiji is single pipe */
+ if (adev->asic_type == CHIP_FIJI) {
+ ret = AMDGPU_VCE_HARVEST_VCE1;
+ return ret;
+ }
+
+ /* Tonga and CZ are dual or single pipe */
+ if (adev->flags & AMD_IS_APU)
+ tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
+ VCE_HARVEST_FUSE_MACRO__MASK) >>
+ VCE_HARVEST_FUSE_MACRO__SHIFT;
+ else
+ tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
+ CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
+ CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
+
+ switch (tmp) {
+ case 1:
+ ret = AMDGPU_VCE_HARVEST_VCE0;
+ break;
+ case 2:
+ ret = AMDGPU_VCE_HARVEST_VCE1;
+ break;
+ case 3:
+ ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
static int vce_v3_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
+
+ if ((adev->vce.harvest_config &
+ (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
+ (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
+ return -ENOENT;
+
vce_v3_0_set_ring_funcs(adev);
vce_v3_0_set_irq_funcs(adev);
@@ -371,17 +426,41 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
static bool vce_v3_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 mask = 0;
+ int idx;
+
+ for (idx = 0; idx < 2; ++idx) {
+ if (adev->vce.harvest_config & (1 << idx))
+ continue;
- return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
+ if (idx == 0)
+ mask |= SRBM_STATUS2__VCE0_BUSY_MASK;
+ else
+ mask |= SRBM_STATUS2__VCE1_BUSY_MASK;
+ }
+
+ return !(RREG32(mmSRBM_STATUS2) & mask);
}
static int vce_v3_0_wait_for_idle(void *handle)
{
unsigned i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 mask = 0;
+ int idx;
+
+ for (idx = 0; idx < 2; ++idx) {
+ if (adev->vce.harvest_config & (1 << idx))
+ continue;
+
+ if (idx == 0)
+ mask |= SRBM_STATUS2__VCE0_BUSY_MASK;
+ else
+ mask |= SRBM_STATUS2__VCE1_BUSY_MASK;
+ }
for (i = 0; i < adev->usec_timeout; i++) {
- if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK))
+ if (!(RREG32(mmSRBM_STATUS2) & mask))
return 0;
}
return -ETIMEDOUT;
@@ -390,9 +469,21 @@ static int vce_v3_0_wait_for_idle(void *handle)
static int vce_v3_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 mask = 0;
+ int idx;
- WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK,
- ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK);
+ for (idx = 0; idx < 2; ++idx) {
+ if (adev->vce.harvest_config & (1 << idx))
+ continue;
+
+ if (idx == 0)
+ mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK;
+ else
+ mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK;
+ }
+ WREG32_P(mmSRBM_SOFT_RESET, mask,
+ ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK |
+ SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK));
mdelay(5);
return vce_v3_0_start(adev);
@@ -553,6 +644,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
.test_ring = amdgpu_vce_ring_test_ring,
.test_ib = amdgpu_vce_ring_test_ib,
.is_lockup = amdgpu_ring_test_lockup,
+ .insert_nop = amdgpu_ring_insert_nop,
};
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index fa5a4448531d..552d9e75ad1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -122,6 +122,32 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
+/* smu_8_0_d.h */
+#define mmMP0PUB_IND_INDEX 0x180
+#define mmMP0PUB_IND_DATA 0x181
+
+static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ WREG32(mmMP0PUB_IND_INDEX, (reg));
+ r = RREG32(mmMP0PUB_IND_DATA);
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ return r;
+}
+
+static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ WREG32(mmMP0PUB_IND_INDEX, (reg));
+ WREG32(mmMP0PUB_IND_DATA, (v));
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+}
+
static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags;
@@ -177,6 +203,17 @@ static const u32 tonga_mgcg_cgcg_init[] =
mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
};
+static const u32 fiji_mgcg_cgcg_init[] =
+{
+ mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
+ mmPCIE_INDEX, 0xffffffff, 0x0140001c,
+ mmPCIE_DATA, 0x000f0000, 0x00000000,
+ mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
+ mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
+ mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
+ mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
+};
+
static const u32 iceland_mgcg_cgcg_init[] =
{
mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
@@ -206,6 +243,11 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
iceland_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
break;
+ case CHIP_FIJI:
+ amdgpu_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ break;
case CHIP_TONGA:
amdgpu_program_register_sequence(adev,
tonga_mgcg_cgcg_init,
@@ -235,7 +277,7 @@ static u32 vi_get_xclk(struct amdgpu_device *adev)
u32 reference_clock = adev->clock.spll.reference_freq;
u32 tmp;
- if (adev->flags & AMDGPU_IS_APU)
+ if (adev->flags & AMD_IS_APU)
return reference_clock;
tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
@@ -336,6 +378,26 @@ static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
{mmGRBM_STATUS, false},
+ {mmGRBM_STATUS2, false},
+ {mmGRBM_STATUS_SE0, false},
+ {mmGRBM_STATUS_SE1, false},
+ {mmGRBM_STATUS_SE2, false},
+ {mmGRBM_STATUS_SE3, false},
+ {mmSRBM_STATUS, false},
+ {mmSRBM_STATUS2, false},
+ {mmSRBM_STATUS3, false},
+ {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false},
+ {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false},
+ {mmCP_STAT, false},
+ {mmCP_STALLED_STAT1, false},
+ {mmCP_STALLED_STAT2, false},
+ {mmCP_STALLED_STAT3, false},
+ {mmCP_CPF_BUSY_STAT, false},
+ {mmCP_CPF_STALLED_STAT1, false},
+ {mmCP_CPF_STATUS, false},
+ {mmCP_CPC_BUSY_STAT, false},
+ {mmCP_CPC_STALLED_STAT1, false},
+ {mmCP_CPC_STATUS, false},
{mmGB_ADDR_CONFIG, false},
{mmMC_ARB_RAMCFG, false},
{mmGB_TILE_MODE0, false},
@@ -423,6 +485,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
asic_register_table = tonga_allowed_read_registers;
size = ARRAY_SIZE(tonga_allowed_read_registers);
break;
+ case CHIP_FIJI:
case CHIP_TONGA:
case CHIP_CARRIZO:
asic_register_table = cz_allowed_read_registers;
@@ -725,7 +788,7 @@ static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
srbm_soft_reset =
REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
- if (!(adev->flags & AMDGPU_IS_APU)) {
+ if (!(adev->flags & AMD_IS_APU)) {
if (reset_mask & AMDGPU_RESET_MC)
srbm_soft_reset =
REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
@@ -945,7 +1008,7 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
if (amdgpu_pcie_gen2 == 0)
return;
- if (adev->flags & AMDGPU_IS_APU)
+ if (adev->flags & AMD_IS_APU)
return;
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
@@ -973,7 +1036,7 @@ static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
u32 tmp;
/* not necessary on CZ */
- if (adev->flags & AMDGPU_IS_APU)
+ if (adev->flags & AMD_IS_APU)
return;
tmp = RREG32(mmBIF_DOORBELL_APER_EN);
@@ -1101,6 +1164,74 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &tonga_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &fiji_dpm_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 10,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &dce_v10_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version cz_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -1176,6 +1307,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_blocks = topaz_ip_blocks;
adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
break;
+ case CHIP_FIJI:
+ adev->ip_blocks = fiji_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
+ break;
case CHIP_TONGA:
adev->ip_blocks = tonga_ip_blocks;
adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
@@ -1222,8 +1357,13 @@ static int vi_common_early_init(void *handle)
bool smc_enabled = false;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->smc_rreg = &vi_smc_rreg;
- adev->smc_wreg = &vi_smc_wreg;
+ if (adev->flags & AMD_IS_APU) {
+ adev->smc_rreg = &cz_smc_rreg;
+ adev->smc_wreg = &cz_smc_wreg;
+ } else {
+ adev->smc_rreg = &vi_smc_rreg;
+ adev->smc_wreg = &vi_smc_wreg;
+ }
adev->pcie_rreg = &vi_pcie_rreg;
adev->pcie_wreg = &vi_pcie_wreg;
adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
@@ -1248,6 +1388,7 @@ static int vi_common_early_init(void *handle)
if (amdgpu_smc_load_fw && smc_enabled)
adev->firmware.smu_load = true;
break;
+ case CHIP_FIJI:
case CHIP_TONGA:
adev->has_uvd = true;
adev->cg_flags = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
index 3b45332f5df4..fc120ba18aad 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
@@ -30,7 +30,7 @@ int cz_smu_start(struct amdgpu_device *adev);
int cz_smu_fini(struct amdgpu_device *adev);
extern const struct amd_ip_funcs tonga_dpm_ip_funcs;
-
+extern const struct amd_ip_funcs fiji_dpm_ip_funcs;
extern const struct amd_ip_funcs iceland_dpm_ip_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index 31bb89452e12..d98aa9d82fa1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -66,6 +66,11 @@
#define AMDGPU_NUM_OF_VMIDS 8
+#define PIPEID(x) ((x) << 0)
+#define MEID(x) ((x) << 2)
+#define VMID(x) ((x) << 4)
+#define QUEUEID(x) ((x) << 8)
+
#define RB_BITMAP_WIDTH_PER_SH 2
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index 8dfac37ff327..e13c67c8d2c0 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -4,6 +4,6 @@
config HSA_AMD
tristate "HSA kernel driver for AMD GPU devices"
- depends on DRM_RADEON && AMD_IOMMU_V2 && X86_64
+ depends on (DRM_RADEON || DRM_AMDGPU) && AMD_IOMMU_V2 && X86_64
help
Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 28551153ec6d..7fc9b0f444cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -2,7 +2,8 @@
# Makefile for Heterogenous System Architecture support for AMD GPU devices
#
-ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/ \
+ -Idrivers/gpu/drm/amd/include/asic_reg
amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_regs.h b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
index 183be5b8414f..48769d12dd7b 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_regs.h
+++ b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
@@ -65,17 +65,6 @@
#define AQL_ENABLE 1
-#define SDMA_RB_VMID(x) (x << 24)
-#define SDMA_RB_ENABLE (1 << 0)
-#define SDMA_RB_SIZE(x) ((x) << 1) /* log2 */
-#define SDMA_RPTR_WRITEBACK_ENABLE (1 << 12)
-#define SDMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
-#define SDMA_OFFSET(x) (x << 0)
-#define SDMA_DB_ENABLE (1 << 28)
-#define SDMA_ATC (1 << 0)
-#define SDMA_VA_PTR32 (1 << 4)
-#define SDMA_VA_SHARED_BASE(x) (x << 8)
-
#define GRBM_GFX_INDEX 0x30800
#define ATC_VMID_PASID_MAPPING_VALID (1U << 31)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index c991973019d0..c6a1b4cc6458 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -31,7 +31,7 @@
#include <uapi/linux/kfd_ioctl.h>
#include <linux/time.h>
#include <linux/mm.h>
-#include <uapi/asm-generic/mman-common.h>
+#include <linux/mman.h>
#include <asm/processor.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 75312c82969f..3f95f7cb4019 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -80,7 +80,12 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x1318, &kaveri_device_info }, /* Kaveri */
{ 0x131B, &kaveri_device_info }, /* Kaveri */
{ 0x131C, &kaveri_device_info }, /* Kaveri */
- { 0x131D, &kaveri_device_info } /* Kaveri */
+ { 0x131D, &kaveri_device_info }, /* Kaveri */
+ { 0x9870, &carrizo_device_info }, /* Carrizo */
+ { 0x9874, &carrizo_device_info }, /* Carrizo */
+ { 0x9875, &carrizo_device_info }, /* Carrizo */
+ { 0x9876, &carrizo_device_info }, /* Carrizo */
+ { 0x9877, &carrizo_device_info } /* Carrizo */
};
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index 9ce8a20a7aff..c6f435aa803f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -23,6 +23,7 @@
#include "kfd_device_queue_manager.h"
#include "cik_regs.h"
+#include "oss/oss_2_4_sh_mask.h"
static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
@@ -135,13 +136,16 @@ static int register_process_cik(struct device_queue_manager *dqm,
static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd)
{
- uint32_t value = SDMA_ATC;
+ uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT);
if (q->process->is_32bit_user_mode)
- value |= SDMA_VA_PTR32 | get_sh_mem_bases_32(qpd_to_pdd(qpd));
+ value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) |
+ get_sh_mem_bases_32(qpd_to_pdd(qpd));
else
- value |= SDMA_VA_SHARED_BASE(get_sh_mem_bases_nybble_64(
- qpd_to_pdd(qpd)));
+ value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
+ SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
+ SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
+
q->properties.sdma_vm_addr = value;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index 4c15212a3899..7e9cae9d349b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -22,6 +22,10 @@
*/
#include "kfd_device_queue_manager.h"
+#include "gca/gfx_8_0_enum.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gca/gfx_8_0_enum.h"
+#include "oss/oss_3_0_sh_mask.h"
static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
@@ -37,14 +41,40 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops)
{
- pr_warn("amdkfd: VI DQM is not currently supported\n");
-
ops->set_cache_memory_policy = set_cache_memory_policy_vi;
ops->register_process = register_process_vi;
ops->initialize = initialize_cpsch_vi;
ops->init_sdma_vm = init_sdma_vm;
}
+static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
+{
+ /* In 64-bit mode, we can only control the top 3 bits of the LDS,
+ * scratch and GPUVM apertures.
+ * The hardware fills in the remaining 59 bits according to the
+ * following pattern:
+ * LDS: X0000000'00000000 - X0000001'00000000 (4GB)
+ * Scratch: X0000001'00000000 - X0000002'00000000 (4GB)
+ * GPUVM: Y0010000'00000000 - Y0020000'00000000 (1TB)
+ *
+ * (where X/Y is the configurable nybble with the low-bit 0)
+ *
+ * LDS and scratch will have the same top nybble programmed in the
+ * top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
+ * GPUVM can have a different top nybble programmed in the
+ * top 3 bits of SH_MEM_BASES.SHARED_BASE.
+ * We don't bother to support different top nybbles
+ * for LDS/Scratch and GPUVM.
+ */
+
+ BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
+ top_address_nybble == 0);
+
+ return top_address_nybble << 12 |
+ (top_address_nybble << 12) <<
+ SH_MEM_BASES__SHARED_BASE__SHIFT;
+}
+
static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
@@ -52,18 +82,83 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size)
{
- return false;
+ uint32_t default_mtype;
+ uint32_t ape1_mtype;
+
+ default_mtype = (default_policy == cache_policy_coherent) ?
+ MTYPE_CC :
+ MTYPE_NC;
+
+ ape1_mtype = (alternate_policy == cache_policy_coherent) ?
+ MTYPE_CC :
+ MTYPE_NC;
+
+ qpd->sh_mem_config = (qpd->sh_mem_config &
+ SH_MEM_CONFIG__ADDRESS_MODE_MASK) |
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
+ default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
+ ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT |
+ SH_MEM_CONFIG__PRIVATE_ATC_MASK;
+
+ return true;
}
static int register_process_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
- return -1;
+ struct kfd_process_device *pdd;
+ unsigned int temp;
+
+ BUG_ON(!dqm || !qpd);
+
+ pdd = qpd_to_pdd(qpd);
+
+ /* check if sh_mem_config register already configured */
+ if (qpd->sh_mem_config == 0) {
+ qpd->sh_mem_config =
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
+ MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
+ MTYPE_CC << SH_MEM_CONFIG__APE1_MTYPE__SHIFT |
+ SH_MEM_CONFIG__PRIVATE_ATC_MASK;
+
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ }
+
+ if (qpd->pqm->process->is_32bit_user_mode) {
+ temp = get_sh_mem_bases_32(pdd);
+ qpd->sh_mem_bases = temp << SH_MEM_BASES__SHARED_BASE__SHIFT;
+ qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA32 <<
+ SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
+ } else {
+ temp = get_sh_mem_bases_nybble_64(pdd);
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
+ qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA64 <<
+ SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
+ }
+
+ pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
+ qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
+
+ return 0;
}
static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd)
{
+ uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT);
+
+ if (q->process->is_32bit_user_mode)
+ value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) |
+ get_sh_mem_bases_32(qpd_to_pdd(qpd));
+ else
+ value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
+ SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
+ SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
+
+ q->properties.sdma_vm_addr = value;
}
static int initialize_cpsch_vi(struct device_queue_manager *dqm)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 35b987574633..2b655103ba79 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -33,7 +33,7 @@
#include <linux/time.h>
#include "kfd_priv.h"
#include <linux/mm.h>
-#include <uapi/asm-generic/mman-common.h>
+#include <linux/mman.h>
#include <asm/processor.h>
/*
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 434979428fc0..d83de985e88c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -27,6 +27,7 @@
#include "kfd_mqd_manager.h"
#include "cik_regs.h"
#include "cik_structs.h"
+#include "oss/oss_2_4_sh_mask.h"
static inline struct cik_mqd *get_mqd(void *mqd)
{
@@ -214,17 +215,20 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
BUG_ON(!mm || !mqd || !q);
m = get_sdma_mqd(mqd);
- m->sdma_rlc_rb_cntl =
- SDMA_RB_SIZE((ffs(q->queue_size / sizeof(unsigned int)))) |
- SDMA_RB_VMID(q->vmid) |
- SDMA_RPTR_WRITEBACK_ENABLE |
- SDMA_RPTR_WRITEBACK_TIMER(6);
+ m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
+ SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+ q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
+ 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
+ 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
m->sdma_rlc_rb_base = lower_32_bits(q->queue_address >> 8);
m->sdma_rlc_rb_base_hi = upper_32_bits(q->queue_address >> 8);
m->sdma_rlc_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->sdma_rlc_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
- m->sdma_rlc_doorbell = SDMA_OFFSET(q->doorbell_off) | SDMA_DB_ENABLE;
+ m->sdma_rlc_doorbell = q->doorbell_off <<
+ SDMA0_RLC0_DOORBELL__OFFSET__SHIFT |
+ 1 << SDMA0_RLC0_DOORBELL__ENABLE__SHIFT;
+
m->sdma_rlc_virtual_addr = q->sdma_vm_addr;
m->sdma_engine_id = q->sdma_engine_id;
@@ -234,7 +238,9 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
if (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0) {
- m->sdma_rlc_rb_cntl |= SDMA_RB_ENABLE;
+ m->sdma_rlc_rb_cntl |=
+ 1 << SDMA0_RLC0_RB_CNTL__RB_ENABLE__SHIFT;
+
q->is_active = true;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index b3a7e3ba1e38..fa32c32fa1c2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -22,12 +22,255 @@
*/
#include <linux/printk.h>
+#include <linux/slab.h>
#include "kfd_priv.h"
#include "kfd_mqd_manager.h"
+#include "vi_structs.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gca/gfx_8_0_enum.h"
+
+#define CP_MQD_CONTROL__PRIV_STATE__SHIFT 0x8
+
+static inline struct vi_mqd *get_mqd(void *mqd)
+{
+ return (struct vi_mqd *)mqd;
+}
+
+static int init_mqd(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q)
+{
+ int retval;
+ uint64_t addr;
+ struct vi_mqd *m;
+
+ retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct vi_mqd),
+ mqd_mem_obj);
+ if (retval != 0)
+ return -ENOMEM;
+
+ m = (struct vi_mqd *) (*mqd_mem_obj)->cpu_ptr;
+ addr = (*mqd_mem_obj)->gpu_addr;
+
+ memset(m, 0, sizeof(struct vi_mqd));
+
+ m->header = 0xC0310800;
+ m->compute_pipelinestat_enable = 1;
+ m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+
+ m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
+ 0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+
+ m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT |
+ MTYPE_UC << CP_MQD_CONTROL__MTYPE__SHIFT;
+
+ m->cp_mqd_base_addr_lo = lower_32_bits(addr);
+ m->cp_mqd_base_addr_hi = upper_32_bits(addr);
+
+ m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
+ 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
+ 10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
+
+ m->cp_hqd_pipe_priority = 1;
+ m->cp_hqd_queue_priority = 15;
+
+ m->cp_hqd_eop_rptr = 1 << CP_HQD_EOP_RPTR__INIT_FETCHER__SHIFT;
+
+ if (q->format == KFD_QUEUE_FORMAT_AQL)
+ m->cp_hqd_iq_rptr = 1;
+
+ *mqd = m;
+ if (gart_addr != NULL)
+ *gart_addr = addr;
+ retval = mm->update_mqd(mm, m, q);
+
+ return retval;
+}
+
+static int load_mqd(struct mqd_manager *mm, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr)
+{
+ return mm->dev->kfd2kgd->hqd_load
+ (mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
+}
+
+static int __update_mqd(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q, unsigned int mtype,
+ unsigned int atc_bit)
+{
+ struct vi_mqd *m;
+
+ BUG_ON(!mm || !q || !mqd);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ m = get_mqd(mqd);
+
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT |
+ atc_bit << CP_HQD_PQ_CONTROL__PQ_ATC__SHIFT |
+ mtype << CP_HQD_PQ_CONTROL__MTYPE__SHIFT;
+ m->cp_hqd_pq_control |=
+ ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+ pr_debug("kfd: cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+
+ m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+ m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
+
+ m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
+ m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+
+ m->cp_hqd_pq_doorbell_control =
+ 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT |
+ q->doorbell_off <<
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+ pr_debug("kfd: cp_hqd_pq_doorbell_control 0x%x\n",
+ m->cp_hqd_pq_doorbell_control);
+
+ m->cp_hqd_eop_control = atc_bit << CP_HQD_EOP_CONTROL__EOP_ATC__SHIFT |
+ mtype << CP_HQD_EOP_CONTROL__MTYPE__SHIFT;
+
+ m->cp_hqd_ib_control = atc_bit << CP_HQD_IB_CONTROL__IB_ATC__SHIFT |
+ 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
+ mtype << CP_HQD_IB_CONTROL__MTYPE__SHIFT;
+
+ m->cp_hqd_eop_control |=
+ ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1;
+ m->cp_hqd_eop_base_addr_lo =
+ lower_32_bits(q->eop_ring_buffer_address >> 8);
+ m->cp_hqd_eop_base_addr_hi =
+ upper_32_bits(q->eop_ring_buffer_address >> 8);
+
+ m->cp_hqd_iq_timer = atc_bit << CP_HQD_IQ_TIMER__IQ_ATC__SHIFT |
+ mtype << CP_HQD_IQ_TIMER__MTYPE__SHIFT;
+
+ m->cp_hqd_vmid = q->vmid;
+
+ if (q->format == KFD_QUEUE_FORMAT_AQL) {
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
+ 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT;
+ }
+
+ m->cp_hqd_active = 0;
+ q->is_active = false;
+ if (q->queue_size > 0 &&
+ q->queue_address != 0 &&
+ q->queue_percent > 0) {
+ m->cp_hqd_active = 1;
+ q->is_active = true;
+ }
+
+ return 0;
+}
+
+
+static int update_mqd(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ return __update_mqd(mm, mqd, q, MTYPE_CC, 1);
+}
+
+static int destroy_mqd(struct mqd_manager *mm, void *mqd,
+ enum kfd_preempt_type type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ return mm->dev->kfd2kgd->hqd_destroy
+ (mm->dev->kgd, type, timeout,
+ pipe_id, queue_id);
+}
+
+static void uninit_mqd(struct mqd_manager *mm, void *mqd,
+ struct kfd_mem_obj *mqd_mem_obj)
+{
+ BUG_ON(!mm || !mqd);
+ kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
+}
+
+static bool is_occupied(struct mqd_manager *mm, void *mqd,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ return mm->dev->kfd2kgd->hqd_is_occupied(
+ mm->dev->kgd, queue_address,
+ pipe_id, queue_id);
+}
+
+static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q)
+{
+ struct vi_mqd *m;
+ int retval = init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
+
+ if (retval != 0)
+ return retval;
+
+ m = get_mqd(*mqd);
+
+ m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
+ 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
+
+ return retval;
+}
+
+static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct vi_mqd *m;
+ int retval = __update_mqd(mm, mqd, q, MTYPE_UC, 0);
+
+ if (retval != 0)
+ return retval;
+
+ m = get_mqd(mqd);
+ m->cp_hqd_vmid = q->vmid;
+ return retval;
+}
struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev)
+ struct kfd_dev *dev)
{
- pr_warn("amdkfd: VI MQD is not currently supported\n");
- return NULL;
+ struct mqd_manager *mqd;
+
+ BUG_ON(!dev);
+ BUG_ON(type >= KFD_MQD_TYPE_MAX);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
+ if (!mqd)
+ return NULL;
+
+ mqd->dev = dev;
+
+ switch (type) {
+ case KFD_MQD_TYPE_CP:
+ case KFD_MQD_TYPE_COMPUTE:
+ mqd->init_mqd = init_mqd;
+ mqd->uninit_mqd = uninit_mqd;
+ mqd->load_mqd = load_mqd;
+ mqd->update_mqd = update_mqd;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+ break;
+ case KFD_MQD_TYPE_HIQ:
+ mqd->init_mqd = init_mqd_hiq;
+ mqd->uninit_mqd = uninit_mqd;
+ mqd->load_mqd = load_mqd;
+ mqd->update_mqd = update_mqd_hiq;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+ break;
+ case KFD_MQD_TYPE_SDMA:
+ break;
+ default:
+ kfree(mqd);
+ return NULL;
+ }
+
+ return mqd;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 99b6d28a11c3..90f391434fa3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -27,6 +27,7 @@
#include "kfd_kernel_queue.h"
#include "kfd_priv.h"
#include "kfd_pm4_headers.h"
+#include "kfd_pm4_headers_vi.h"
#include "kfd_pm4_opcodes.h"
static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
@@ -55,6 +56,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
bool *over_subscription)
{
unsigned int process_count, queue_count;
+ unsigned int map_queue_size;
BUG_ON(!pm || !rlib_size || !over_subscription);
@@ -69,9 +71,13 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
pr_debug("kfd: over subscribed runlist\n");
}
+ map_queue_size =
+ (pm->dqm->dev->device_info->asic_family == CHIP_CARRIZO) ?
+ sizeof(struct pm4_mes_map_queues) :
+ sizeof(struct pm4_map_queues);
/* calculate run list ib allocation size */
*rlib_size = process_count * sizeof(struct pm4_map_process) +
- queue_count * sizeof(struct pm4_map_queues);
+ queue_count * map_queue_size;
/*
* Increase the allocation size in case we need a chained run list
@@ -176,6 +182,71 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
return 0;
}
+static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
+ struct queue *q, bool is_static)
+{
+ struct pm4_mes_map_queues *packet;
+ bool use_static = is_static;
+
+ BUG_ON(!pm || !buffer || !q);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ packet = (struct pm4_mes_map_queues *)buffer;
+ memset(buffer, 0, sizeof(struct pm4_map_queues));
+
+ packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
+ sizeof(struct pm4_map_queues));
+ packet->bitfields2.alloc_format =
+ alloc_format__mes_map_queues__one_per_pipe_vi;
+ packet->bitfields2.num_queues = 1;
+ packet->bitfields2.queue_sel =
+ queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
+
+ packet->bitfields2.engine_sel =
+ engine_sel__mes_map_queues__compute_vi;
+ packet->bitfields2.queue_type =
+ queue_type__mes_map_queues__normal_compute_vi;
+
+ switch (q->properties.type) {
+ case KFD_QUEUE_TYPE_COMPUTE:
+ if (use_static)
+ packet->bitfields2.queue_type =
+ queue_type__mes_map_queues__normal_latency_static_queue_vi;
+ break;
+ case KFD_QUEUE_TYPE_DIQ:
+ packet->bitfields2.queue_type =
+ queue_type__mes_map_queues__debug_interface_queue_vi;
+ break;
+ case KFD_QUEUE_TYPE_SDMA:
+ packet->bitfields2.engine_sel =
+ engine_sel__mes_map_queues__sdma0_vi;
+ use_static = false; /* no static queues under SDMA */
+ break;
+ default:
+ pr_err("kfd: in %s queue type %d\n", __func__,
+ q->properties.type);
+ BUG();
+ break;
+ }
+ packet->bitfields3.doorbell_offset =
+ q->properties.doorbell_off;
+
+ packet->mqd_addr_lo =
+ lower_32_bits(q->gart_mqd_addr);
+
+ packet->mqd_addr_hi =
+ upper_32_bits(q->gart_mqd_addr);
+
+ packet->wptr_addr_lo =
+ lower_32_bits((uint64_t)q->properties.write_ptr);
+
+ packet->wptr_addr_hi =
+ upper_32_bits((uint64_t)q->properties.write_ptr);
+
+ return 0;
+}
+
static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
struct queue *q, bool is_static)
{
@@ -292,8 +363,17 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
pr_debug("kfd: static_queue, mapping kernel q %d, is debug status %d\n",
kq->queue->queue, qpd->is_debug);
- retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
- kq->queue, qpd->is_debug);
+ if (pm->dqm->dev->device_info->asic_family ==
+ CHIP_CARRIZO)
+ retval = pm_create_map_queue_vi(pm,
+ &rl_buffer[rl_wptr],
+ kq->queue,
+ qpd->is_debug);
+ else
+ retval = pm_create_map_queue(pm,
+ &rl_buffer[rl_wptr],
+ kq->queue,
+ qpd->is_debug);
if (retval != 0)
return retval;
@@ -309,8 +389,17 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
pr_debug("kfd: static_queue, mapping user queue %d, is debug status %d\n",
q->queue, qpd->is_debug);
- retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
- q, qpd->is_debug);
+ if (pm->dqm->dev->device_info->asic_family ==
+ CHIP_CARRIZO)
+ retval = pm_create_map_queue_vi(pm,
+ &rl_buffer[rl_wptr],
+ q,
+ qpd->is_debug);
+ else
+ retval = pm_create_map_queue(pm,
+ &rl_buffer[rl_wptr],
+ q,
+ qpd->is_debug);
if (retval != 0)
return retval;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
new file mode 100644
index 000000000000..08c721922812
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
@@ -0,0 +1,398 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef F32_MES_PM4_PACKETS_H
+#define F32_MES_PM4_PACKETS_H
+
+#ifndef PM4_MES_HEADER_DEFINED
+#define PM4_MES_HEADER_DEFINED
+union PM4_MES_TYPE_3_HEADER {
+ struct {
+ uint32_t reserved1 : 8; /* < reserved */
+ uint32_t opcode : 8; /* < IT opcode */
+ uint32_t count : 14;/* < number of DWORDs - 1 in the
+ information body. */
+ uint32_t type : 2; /* < packet identifier.
+ It should be 3 for type 3 packets */
+ };
+ uint32_t u32All;
+};
+#endif /* PM4_MES_HEADER_DEFINED */
+
+/*--------------------MES_SET_RESOURCES--------------------*/
+
+#ifndef PM4_MES_SET_RESOURCES_DEFINED
+#define PM4_MES_SET_RESOURCES_DEFINED
+enum mes_set_resources_queue_type_enum {
+ queue_type__mes_set_resources__kernel_interface_queue_kiq = 0,
+ queue_type__mes_set_resources__hsa_interface_queue_hiq = 1,
+ queue_type__mes_set_resources__hsa_debug_interface_queue = 4
+};
+
+
+struct pm4_mes_set_resources {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ uint32_t vmid_mask:16;
+ uint32_t unmap_latency:8;
+ uint32_t reserved1:5;
+ enum mes_set_resources_queue_type_enum queue_type:3;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ uint32_t queue_mask_lo;
+ uint32_t queue_mask_hi;
+ uint32_t gws_mask_lo;
+ uint32_t gws_mask_hi;
+
+ union {
+ struct {
+ uint32_t oac_mask:16;
+ uint32_t reserved2:16;
+ } bitfields7;
+ uint32_t ordinal7;
+ };
+
+ union {
+ struct {
+ uint32_t gds_heap_base:6;
+ uint32_t reserved3:5;
+ uint32_t gds_heap_size:6;
+ uint32_t reserved4:15;
+ } bitfields8;
+ uint32_t ordinal8;
+ };
+
+};
+#endif
+
+/*--------------------MES_RUN_LIST--------------------*/
+
+#ifndef PM4_MES_RUN_LIST_DEFINED
+#define PM4_MES_RUN_LIST_DEFINED
+
+struct pm4_mes_runlist {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ uint32_t reserved1:2;
+ uint32_t ib_base_lo:30;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ union {
+ struct {
+ uint32_t ib_base_hi:16;
+ uint32_t reserved2:16;
+ } bitfields3;
+ uint32_t ordinal3;
+ };
+
+ union {
+ struct {
+ uint32_t ib_size:20;
+ uint32_t chain:1;
+ uint32_t offload_polling:1;
+ uint32_t reserved3:1;
+ uint32_t valid:1;
+ uint32_t reserved4:8;
+ } bitfields4;
+ uint32_t ordinal4;
+ };
+
+};
+#endif
+
+/*--------------------MES_MAP_PROCESS--------------------*/
+
+#ifndef PM4_MES_MAP_PROCESS_DEFINED
+#define PM4_MES_MAP_PROCESS_DEFINED
+
+struct pm4_mes_map_process {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ uint32_t pasid:16;
+ uint32_t reserved1:8;
+ uint32_t diq_enable:1;
+ uint32_t process_quantum:7;
+ } bitfields2;
+ uint32_t ordinal2;
+};
+
+ union {
+ struct {
+ uint32_t page_table_base:28;
+ uint32_t reserved2:4;
+ } bitfields3;
+ uint32_t ordinal3;
+ };
+
+ uint32_t sh_mem_bases;
+ uint32_t sh_mem_ape1_base;
+ uint32_t sh_mem_ape1_limit;
+ uint32_t sh_mem_config;
+ uint32_t gds_addr_lo;
+ uint32_t gds_addr_hi;
+
+ union {
+ struct {
+ uint32_t num_gws:6;
+ uint32_t reserved3:2;
+ uint32_t num_oac:4;
+ uint32_t reserved4:4;
+ uint32_t gds_size:6;
+ uint32_t num_queues:10;
+ } bitfields10;
+ uint32_t ordinal10;
+ };
+
+};
+#endif
+
+/*--------------------MES_MAP_QUEUES--------------------*/
+
+#ifndef PM4_MES_MAP_QUEUES_VI_DEFINED
+#define PM4_MES_MAP_QUEUES_VI_DEFINED
+enum mes_map_queues_queue_sel_vi_enum {
+ queue_sel__mes_map_queues__map_to_specified_queue_slots_vi = 0,
+queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi = 1
+};
+
+enum mes_map_queues_queue_type_vi_enum {
+ queue_type__mes_map_queues__normal_compute_vi = 0,
+ queue_type__mes_map_queues__debug_interface_queue_vi = 1,
+ queue_type__mes_map_queues__normal_latency_static_queue_vi = 2,
+queue_type__mes_map_queues__low_latency_static_queue_vi = 3
+};
+
+enum mes_map_queues_alloc_format_vi_enum {
+ alloc_format__mes_map_queues__one_per_pipe_vi = 0,
+alloc_format__mes_map_queues__all_on_one_pipe_vi = 1
+};
+
+enum mes_map_queues_engine_sel_vi_enum {
+ engine_sel__mes_map_queues__compute_vi = 0,
+ engine_sel__mes_map_queues__sdma0_vi = 2,
+ engine_sel__mes_map_queues__sdma1_vi = 3
+};
+
+
+struct pm4_mes_map_queues {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ uint32_t reserved1:4;
+ enum mes_map_queues_queue_sel_vi_enum queue_sel:2;
+ uint32_t reserved2:15;
+ enum mes_map_queues_queue_type_vi_enum queue_type:3;
+ enum mes_map_queues_alloc_format_vi_enum alloc_format:2;
+ enum mes_map_queues_engine_sel_vi_enum engine_sel:3;
+ uint32_t num_queues:3;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ union {
+ struct {
+ uint32_t reserved3:1;
+ uint32_t check_disable:1;
+ uint32_t doorbell_offset:21;
+ uint32_t reserved4:3;
+ uint32_t queue:6;
+ } bitfields3;
+ uint32_t ordinal3;
+ };
+
+ uint32_t mqd_addr_lo;
+ uint32_t mqd_addr_hi;
+ uint32_t wptr_addr_lo;
+ uint32_t wptr_addr_hi;
+};
+#endif
+
+/*--------------------MES_QUERY_STATUS--------------------*/
+
+#ifndef PM4_MES_QUERY_STATUS_DEFINED
+#define PM4_MES_QUERY_STATUS_DEFINED
+enum mes_query_status_interrupt_sel_enum {
+ interrupt_sel__mes_query_status__completion_status = 0,
+ interrupt_sel__mes_query_status__process_status = 1,
+ interrupt_sel__mes_query_status__queue_status = 2
+};
+
+enum mes_query_status_command_enum {
+ command__mes_query_status__interrupt_only = 0,
+ command__mes_query_status__fence_only_immediate = 1,
+ command__mes_query_status__fence_only_after_write_ack = 2,
+ command__mes_query_status__fence_wait_for_write_ack_send_interrupt = 3
+};
+
+enum mes_query_status_engine_sel_enum {
+ engine_sel__mes_query_status__compute = 0,
+ engine_sel__mes_query_status__sdma0_queue = 2,
+ engine_sel__mes_query_status__sdma1_queue = 3
+};
+
+struct pm4_mes_query_status {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ uint32_t context_id:28;
+ enum mes_query_status_interrupt_sel_enum
+ interrupt_sel:2;
+ enum mes_query_status_command_enum command:2;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ union {
+ struct {
+ uint32_t pasid:16;
+ uint32_t reserved1:16;
+ } bitfields3a;
+ struct {
+ uint32_t reserved2:2;
+ uint32_t doorbell_offset:21;
+ uint32_t reserved3:2;
+ enum mes_query_status_engine_sel_enum engine_sel:3;
+ uint32_t reserved4:4;
+ } bitfields3b;
+ uint32_t ordinal3;
+ };
+
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t data_lo;
+ uint32_t data_hi;
+};
+#endif
+
+/*--------------------MES_UNMAP_QUEUES--------------------*/
+
+#ifndef PM4_MES_UNMAP_QUEUES_DEFINED
+#define PM4_MES_UNMAP_QUEUES_DEFINED
+enum mes_unmap_queues_action_enum {
+ action__mes_unmap_queues__preempt_queues = 0,
+ action__mes_unmap_queues__reset_queues = 1,
+ action__mes_unmap_queues__disable_process_queues = 2,
+ action__mes_unmap_queues__reserved = 3
+};
+
+enum mes_unmap_queues_queue_sel_enum {
+ queue_sel__mes_unmap_queues__perform_request_on_specified_queues = 0,
+ queue_sel__mes_unmap_queues__perform_request_on_pasid_queues = 1,
+ queue_sel__mes_unmap_queues__unmap_all_queues = 2,
+ queue_sel__mes_unmap_queues__unmap_all_non_static_queues = 3
+};
+
+enum mes_unmap_queues_engine_sel_enum {
+ engine_sel__mes_unmap_queues__compute = 0,
+ engine_sel__mes_unmap_queues__sdma0 = 2,
+ engine_sel__mes_unmap_queues__sdmal = 3
+};
+
+struct PM4_MES_UNMAP_QUEUES {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ enum mes_unmap_queues_action_enum action:2;
+ uint32_t reserved1:2;
+ enum mes_unmap_queues_queue_sel_enum queue_sel:2;
+ uint32_t reserved2:20;
+ enum mes_unmap_queues_engine_sel_enum engine_sel:3;
+ uint32_t num_queues:3;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ union {
+ struct {
+ uint32_t pasid:16;
+ uint32_t reserved3:16;
+ } bitfields3a;
+ struct {
+ uint32_t reserved4:2;
+ uint32_t doorbell_offset0:21;
+ uint32_t reserved5:9;
+ } bitfields3b;
+ uint32_t ordinal3;
+ };
+
+ union {
+ struct {
+ uint32_t reserved6:2;
+ uint32_t doorbell_offset1:21;
+ uint32_t reserved7:9;
+ } bitfields4;
+ uint32_t ordinal4;
+ };
+
+ union {
+ struct {
+ uint32_t reserved8:2;
+ uint32_t doorbell_offset2:21;
+ uint32_t reserved9:9;
+ } bitfields5;
+ uint32_t ordinal5;
+ };
+
+ union {
+ struct {
+ uint32_t reserved10:2;
+ uint32_t doorbell_offset3:21;
+ uint32_t reserved11:9;
+ } bitfields6;
+ uint32_t ordinal6;
+ };
+};
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 8a1f999daa24..9be007081b72 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -420,6 +420,12 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
pqm_uninit(&p->pqm);
pdd = kfd_get_process_device_data(dev, p);
+
+ if (!pdd) {
+ mutex_unlock(&p->mutex);
+ return;
+ }
+
if (pdd->reset_wavefronts) {
dbgdev_wave_reset_wavefronts(pdd->dev, p);
pdd->reset_wavefronts = false;
@@ -431,8 +437,7 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
* We don't call amd_iommu_unbind_pasid() here
* because the IOMMU called us.
*/
- if (pdd)
- pdd->bound = false;
+ pdd->bound = false;
mutex_unlock(&p->mutex);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index c25728bc388a..74909e72a009 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1186,6 +1186,11 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
* TODO: Retrieve max engine clock values from KGD
*/
+ if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) {
+ dev->node_props.capability |= HSA_CAP_DOORBELL_PACKET_TYPE;
+ pr_info("amdkfd: adding doorbell packet type capability\n");
+ }
+
res = 0;
err:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 989624b3cd14..c3ddb9b95ff8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -40,6 +40,7 @@
#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00
#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8
#define HSA_CAP_RESERVED 0xfffff000
+#define HSA_CAP_DOORBELL_PACKET_TYPE 0x00001000
struct kfd_node_properties {
uint32_t cpu_cores_count;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 5bdf1b4397a0..68a8eaa1b7d0 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -23,6 +23,45 @@
#ifndef __AMD_SHARED_H__
#define __AMD_SHARED_H__
+#define AMD_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+
+/*
+* Supported GPU families (aligned with amdgpu_drm.h)
+*/
+#define AMD_FAMILY_UNKNOWN 0
+#define AMD_FAMILY_CI 120 /* Bonaire, Hawaii */
+#define AMD_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
+#define AMD_FAMILY_VI 130 /* Iceland, Tonga */
+#define AMD_FAMILY_CZ 135 /* Carrizo */
+
+/*
+ * Supported ASIC types
+ */
+enum amd_asic_type {
+ CHIP_BONAIRE = 0,
+ CHIP_KAVERI,
+ CHIP_KABINI,
+ CHIP_HAWAII,
+ CHIP_MULLINS,
+ CHIP_TOPAZ,
+ CHIP_TONGA,
+ CHIP_FIJI,
+ CHIP_CARRIZO,
+ CHIP_LAST,
+};
+
+/*
+ * Chip flags
+ */
+enum amd_chip_flags {
+ AMD_ASIC_MASK = 0x0000ffffUL,
+ AMD_FLAGS_MASK = 0xffff0000UL,
+ AMD_IS_MOBILITY = 0x00010000UL,
+ AMD_IS_APU = 0x00020000UL,
+ AMD_IS_PX = 0x00040000UL,
+ AMD_EXP_HW_SUPPORT = 0x00080000UL,
+};
+
enum amd_ip_block_type {
AMD_IP_BLOCK_TYPE_COMMON,
AMD_IP_BLOCK_TYPE_GMC,
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
new file mode 100644
index 000000000000..44b1855cb8df
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
@@ -0,0 +1,1246 @@
+/*
+ * SMU_7_1_3 Register documentation
+ *
+ * Copyright (C) 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SMU_7_1_3_D_H
+#define SMU_7_1_3_D_H
+
+#define mmGCK_SMC_IND_INDEX 0x80
+#define mmGCK0_GCK_SMC_IND_INDEX 0x80
+#define mmGCK1_GCK_SMC_IND_INDEX 0x82
+#define mmGCK2_GCK_SMC_IND_INDEX 0x84
+#define mmGCK3_GCK_SMC_IND_INDEX 0x86
+#define mmGCK_SMC_IND_DATA 0x81
+#define mmGCK0_GCK_SMC_IND_DATA 0x81
+#define mmGCK1_GCK_SMC_IND_DATA 0x83
+#define mmGCK2_GCK_SMC_IND_DATA 0x85
+#define mmGCK3_GCK_SMC_IND_DATA 0x87
+#define ixGCK_MCLK_FUSES 0xc0500008
+#define ixCG_DCLK_CNTL 0xc050009c
+#define ixCG_DCLK_STATUS 0xc05000a0
+#define ixCG_VCLK_CNTL 0xc05000a4
+#define ixCG_VCLK_STATUS 0xc05000a8
+#define ixCG_ECLK_CNTL 0xc05000ac
+#define ixCG_ECLK_STATUS 0xc05000b0
+#define ixCG_ACLK_CNTL 0xc05000dc
+#define ixCG_MCLK_CNTL 0xc0500120
+#define ixCG_MCLK_STATUS 0xc0500124
+#define ixGCK_DFS_BYPASS_CNTL 0xc0500118
+#define ixCG_SPLL_FUNC_CNTL 0xc0500140
+#define ixCG_SPLL_FUNC_CNTL_2 0xc0500144
+#define ixCG_SPLL_FUNC_CNTL_3 0xc0500148
+#define ixCG_SPLL_FUNC_CNTL_4 0xc050014c
+#define ixCG_SPLL_FUNC_CNTL_5 0xc0500150
+#define ixCG_SPLL_FUNC_CNTL_6 0xc0500154
+#define ixCG_SPLL_FUNC_CNTL_7 0xc0500158
+#define ixSPLL_CNTL_MODE 0xc0500160
+#define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164
+#define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168
+#define ixMPLL_BYPASSCLK_SEL 0xc050019c
+#define ixCG_CLKPIN_CNTL 0xc05001a0
+#define ixCG_CLKPIN_CNTL_2 0xc05001a4
+#define ixCG_CLKPIN_CNTL_DC 0xc0500204
+#define ixTHM_CLK_CNTL 0xc05001a8
+#define ixMISC_CLK_CTRL 0xc05001ac
+#define ixGCK_PLL_TEST_CNTL 0xc05001c0
+#define ixGCK_PLL_TEST_CNTL_2 0xc05001c4
+#define ixGCK_ADFS_CLK_BYPASS_CNTL1 0xc05001c8
+#define mmSMC_IND_INDEX 0x80
+#define mmSMC0_SMC_IND_INDEX 0x80
+#define mmSMC1_SMC_IND_INDEX 0x82
+#define mmSMC2_SMC_IND_INDEX 0x84
+#define mmSMC3_SMC_IND_INDEX 0x86
+#define mmSMC_IND_DATA 0x81
+#define mmSMC0_SMC_IND_DATA 0x81
+#define mmSMC1_SMC_IND_DATA 0x83
+#define mmSMC2_SMC_IND_DATA 0x85
+#define mmSMC3_SMC_IND_DATA 0x87
+#define mmSMC_IND_INDEX_0 0x80
+#define mmSMC_IND_DATA_0 0x81
+#define mmSMC_IND_INDEX_1 0x82
+#define mmSMC_IND_DATA_1 0x83
+#define mmSMC_IND_INDEX_2 0x84
+#define mmSMC_IND_DATA_2 0x85
+#define mmSMC_IND_INDEX_3 0x86
+#define mmSMC_IND_DATA_3 0x87
+#define mmSMC_IND_INDEX_4 0x88
+#define mmSMC_IND_DATA_4 0x89
+#define mmSMC_IND_INDEX_5 0x8a
+#define mmSMC_IND_DATA_5 0x8b
+#define mmSMC_IND_INDEX_6 0x8c
+#define mmSMC_IND_DATA_6 0x8d
+#define mmSMC_IND_INDEX_7 0x8e
+#define mmSMC_IND_DATA_7 0x8f
+#define mmSMC_IND_ACCESS_CNTL 0x92
+#define mmSMC_MESSAGE_0 0x94
+#define mmSMC_RESP_0 0x95
+#define mmSMC_MESSAGE_1 0x96
+#define mmSMC_RESP_1 0x97
+#define mmSMC_MESSAGE_2 0x98
+#define mmSMC_RESP_2 0x99
+#define mmSMC_MESSAGE_3 0x9a
+#define mmSMC_RESP_3 0x9b
+#define mmSMC_MESSAGE_4 0x9c
+#define mmSMC_RESP_4 0x9d
+#define mmSMC_MESSAGE_5 0x9e
+#define mmSMC_RESP_5 0x9f
+#define mmSMC_MESSAGE_6 0xa0
+#define mmSMC_RESP_6 0xa1
+#define mmSMC_MESSAGE_7 0xa2
+#define mmSMC_RESP_7 0xa3
+#define mmSMC_MSG_ARG_0 0xa4
+#define mmSMC_MSG_ARG_1 0xa5
+#define mmSMC_MSG_ARG_2 0xa6
+#define mmSMC_MSG_ARG_3 0xa7
+#define mmSMC_MSG_ARG_4 0xa8
+#define mmSMC_MSG_ARG_5 0xa9
+#define mmSMC_MSG_ARG_6 0xaa
+#define mmSMC_MSG_ARG_7 0xab
+#define mmSMC_MESSAGE_8 0xb5
+#define mmSMC_RESP_8 0xb6
+#define mmSMC_MESSAGE_9 0xb7
+#define mmSMC_RESP_9 0xb8
+#define mmSMC_MESSAGE_10 0xb9
+#define mmSMC_RESP_10 0xba
+#define mmSMC_MESSAGE_11 0xbb
+#define mmSMC_RESP_11 0xbc
+#define mmSMC_MSG_ARG_8 0xbd
+#define mmSMC_MSG_ARG_9 0xbe
+#define mmSMC_MSG_ARG_10 0xbf
+#define mmSMC_MSG_ARG_11 0x93
+#define ixSMC_SYSCON_RESET_CNTL 0x80000000
+#define ixSMC_SYSCON_CLOCK_CNTL_0 0x80000004
+#define ixSMC_SYSCON_CLOCK_CNTL_1 0x80000008
+#define ixSMC_SYSCON_CLOCK_CNTL_2 0x8000000c
+#define ixSMC_SYSCON_MISC_CNTL 0x80000010
+#define ixSMC_SYSCON_MSG_ARG_0 0x80000068
+#define ixSMC_PC_C 0x80000370
+#define ixSMC_SCRATCH9 0x80000424
+#define mmGPIOPAD_SW_INT_STAT 0x180
+#define mmGPIOPAD_STRENGTH 0x181
+#define mmGPIOPAD_MASK 0x182
+#define mmGPIOPAD_A 0x183
+#define mmGPIOPAD_EN 0x184
+#define mmGPIOPAD_Y 0x185
+#define mmGPIOPAD_PINSTRAPS 0x186
+#define mmGPIOPAD_INT_STAT_EN 0x187
+#define mmGPIOPAD_INT_STAT 0x188
+#define mmGPIOPAD_INT_STAT_AK 0x189
+#define mmGPIOPAD_INT_EN 0x18a
+#define mmGPIOPAD_INT_TYPE 0x18b
+#define mmGPIOPAD_INT_POLARITY 0x18c
+#define mmGPIOPAD_EXTERN_TRIG_CNTL 0x18d
+#define mmGPIOPAD_RCVR_SEL 0x191
+#define mmGPIOPAD_PU_EN 0x192
+#define mmGPIOPAD_PD_EN 0x193
+#define mmCG_FPS_CNT 0x1b6
+#define mmSMU_IND_INDEX_0 0x1a6
+#define mmSMU_IND_DATA_0 0x1a7
+#define mmSMU_IND_INDEX_1 0x1a8
+#define mmSMU_IND_DATA_1 0x1a9
+#define mmSMU_IND_INDEX_2 0x1aa
+#define mmSMU_IND_DATA_2 0x1ab
+#define mmSMU_IND_INDEX_3 0x1ac
+#define mmSMU_IND_DATA_3 0x1ad
+#define mmSMU_IND_INDEX_4 0x1ae
+#define mmSMU_IND_DATA_4 0x1af
+#define mmSMU_IND_INDEX_5 0x1b0
+#define mmSMU_IND_DATA_5 0x1b1
+#define mmSMU_IND_INDEX_6 0x1b2
+#define mmSMU_IND_DATA_6 0x1b3
+#define mmSMU_IND_INDEX_7 0x1b4
+#define mmSMU_IND_DATA_7 0x1b5
+#define mmSMU_SMC_IND_INDEX 0x80
+#define mmSMU0_SMU_SMC_IND_INDEX 0x80
+#define mmSMU1_SMU_SMC_IND_INDEX 0x82
+#define mmSMU2_SMU_SMC_IND_INDEX 0x84
+#define mmSMU3_SMU_SMC_IND_INDEX 0x86
+#define mmSMU_SMC_IND_DATA 0x81
+#define mmSMU0_SMU_SMC_IND_DATA 0x81
+#define mmSMU1_SMU_SMC_IND_DATA 0x83
+#define mmSMU2_SMU_SMC_IND_DATA 0x85
+#define mmSMU3_SMU_SMC_IND_DATA 0x87
+#define ixRCU_UC_EVENTS 0xc0000004
+#define ixRCU_MISC_CTRL 0xc0000010
+#define ixRCU_VIRT_RESET_REQ 0xc0000024
+#define ixCC_RCU_FUSES 0xc00c0000
+#define ixCC_SMU_MISC_FUSES 0xc00c0004
+#define ixCC_SCLK_VID_FUSES 0xc00c0008
+#define ixCC_GIO_IOCCFG_FUSES 0xc00c000c
+#define ixCC_GIO_IOC_FUSES 0xc00c0010
+#define ixCC_SMU_TST_EFUSE1_MISC 0xc00c001c
+#define ixCC_TST_ID_STRAPS 0xc00c0020
+#define ixCC_FCTRL_FUSES 0xc00c0024
+#define ixCC_HARVEST_FUSES 0xc00c0028
+#define ixSMU_MAIN_PLL_OP_FREQ 0xe0003020
+#define ixSMU_STATUS 0xe0003088
+#define ixSMU_FIRMWARE 0xe00030a4
+#define ixSMU_INPUT_DATA 0xe00030b8
+#define ixSMU_EFUSE_0 0xc0100000
+#define ixFIRMWARE_FLAGS 0x3f000
+#define ixTDC_STATUS 0x3f004
+#define ixTDC_MV_AVERAGE 0x3f008
+#define ixTDC_VRM_LIMIT 0x3f00c
+#define ixFEATURE_STATUS 0x3f010
+#define ixENTITY_TEMPERATURES_1 0x3f014
+#define ixMCARB_DRAM_TIMING_TABLE_1 0x3f018
+#define ixMCARB_DRAM_TIMING_TABLE_2 0x3f01c
+#define ixMCARB_DRAM_TIMING_TABLE_3 0x3f020
+#define ixMCARB_DRAM_TIMING_TABLE_4 0x3f024
+#define ixMCARB_DRAM_TIMING_TABLE_5 0x3f028
+#define ixMCARB_DRAM_TIMING_TABLE_6 0x3f02c
+#define ixMCARB_DRAM_TIMING_TABLE_7 0x3f030
+#define ixMCARB_DRAM_TIMING_TABLE_8 0x3f034
+#define ixMCARB_DRAM_TIMING_TABLE_9 0x3f038
+#define ixMCARB_DRAM_TIMING_TABLE_10 0x3f03c
+#define ixMCARB_DRAM_TIMING_TABLE_11 0x3f040
+#define ixMCARB_DRAM_TIMING_TABLE_12 0x3f044
+#define ixMCARB_DRAM_TIMING_TABLE_13 0x3f048
+#define ixMCARB_DRAM_TIMING_TABLE_14 0x3f04c
+#define ixMCARB_DRAM_TIMING_TABLE_15 0x3f050
+#define ixMCARB_DRAM_TIMING_TABLE_16 0x3f054
+#define ixMCARB_DRAM_TIMING_TABLE_17 0x3f058
+#define ixMCARB_DRAM_TIMING_TABLE_18 0x3f05c
+#define ixMCARB_DRAM_TIMING_TABLE_19 0x3f060
+#define ixMCARB_DRAM_TIMING_TABLE_20 0x3f064
+#define ixMCARB_DRAM_TIMING_TABLE_21 0x3f068
+#define ixMCARB_DRAM_TIMING_TABLE_22 0x3f06c
+#define ixMCARB_DRAM_TIMING_TABLE_23 0x3f070
+#define ixMCARB_DRAM_TIMING_TABLE_24 0x3f074
+#define ixMCARB_DRAM_TIMING_TABLE_25 0x3f078
+#define ixMCARB_DRAM_TIMING_TABLE_26 0x3f07c
+#define ixMCARB_DRAM_TIMING_TABLE_27 0x3f080
+#define ixMCARB_DRAM_TIMING_TABLE_28 0x3f084
+#define ixMCARB_DRAM_TIMING_TABLE_29 0x3f088
+#define ixMCARB_DRAM_TIMING_TABLE_30 0x3f08c
+#define ixMCARB_DRAM_TIMING_TABLE_31 0x3f090
+#define ixMCARB_DRAM_TIMING_TABLE_32 0x3f094
+#define ixMCARB_DRAM_TIMING_TABLE_33 0x3f098
+#define ixMCARB_DRAM_TIMING_TABLE_34 0x3f09c
+#define ixMCARB_DRAM_TIMING_TABLE_35 0x3f0a0
+#define ixMCARB_DRAM_TIMING_TABLE_36 0x3f0a4
+#define ixMCARB_DRAM_TIMING_TABLE_37 0x3f0a8
+#define ixMCARB_DRAM_TIMING_TABLE_38 0x3f0ac
+#define ixMCARB_DRAM_TIMING_TABLE_39 0x3f0b0
+#define ixMCARB_DRAM_TIMING_TABLE_40 0x3f0b4
+#define ixMCARB_DRAM_TIMING_TABLE_41 0x3f0b8
+#define ixMCARB_DRAM_TIMING_TABLE_42 0x3f0bc
+#define ixMCARB_DRAM_TIMING_TABLE_43 0x3f0c0
+#define ixMCARB_DRAM_TIMING_TABLE_44 0x3f0c4
+#define ixMCARB_DRAM_TIMING_TABLE_45 0x3f0c8
+#define ixMCARB_DRAM_TIMING_TABLE_46 0x3f0cc
+#define ixMCARB_DRAM_TIMING_TABLE_47 0x3f0d0
+#define ixMCARB_DRAM_TIMING_TABLE_48 0x3f0d4
+#define ixMCARB_DRAM_TIMING_TABLE_49 0x3f0d8
+#define ixMCARB_DRAM_TIMING_TABLE_50 0x3f0dc
+#define ixMCARB_DRAM_TIMING_TABLE_51 0x3f0e0
+#define ixMCARB_DRAM_TIMING_TABLE_52 0x3f0e4
+#define ixMCARB_DRAM_TIMING_TABLE_53 0x3f0e8
+#define ixMCARB_DRAM_TIMING_TABLE_54 0x3f0ec
+#define ixMCARB_DRAM_TIMING_TABLE_55 0x3f0f0
+#define ixMCARB_DRAM_TIMING_TABLE_56 0x3f0f4
+#define ixMCARB_DRAM_TIMING_TABLE_57 0x3f0f8
+#define ixMCARB_DRAM_TIMING_TABLE_58 0x3f0fc
+#define ixMCARB_DRAM_TIMING_TABLE_59 0x3f100
+#define ixMCARB_DRAM_TIMING_TABLE_60 0x3f104
+#define ixMCARB_DRAM_TIMING_TABLE_61 0x3f108
+#define ixMCARB_DRAM_TIMING_TABLE_62 0x3f10c
+#define ixMCARB_DRAM_TIMING_TABLE_63 0x3f110
+#define ixMCARB_DRAM_TIMING_TABLE_64 0x3f114
+#define ixMCARB_DRAM_TIMING_TABLE_65 0x3f118
+#define ixMCARB_DRAM_TIMING_TABLE_66 0x3f11c
+#define ixMCARB_DRAM_TIMING_TABLE_67 0x3f120
+#define ixMCARB_DRAM_TIMING_TABLE_68 0x3f124
+#define ixMCARB_DRAM_TIMING_TABLE_69 0x3f128
+#define ixMCARB_DRAM_TIMING_TABLE_70 0x3f12c
+#define ixMCARB_DRAM_TIMING_TABLE_71 0x3f130
+#define ixMCARB_DRAM_TIMING_TABLE_72 0x3f134
+#define ixMCARB_DRAM_TIMING_TABLE_73 0x3f138
+#define ixMCARB_DRAM_TIMING_TABLE_74 0x3f13c
+#define ixMCARB_DRAM_TIMING_TABLE_75 0x3f140
+#define ixMCARB_DRAM_TIMING_TABLE_76 0x3f144
+#define ixMCARB_DRAM_TIMING_TABLE_77 0x3f148
+#define ixMCARB_DRAM_TIMING_TABLE_78 0x3f14c
+#define ixMCARB_DRAM_TIMING_TABLE_79 0x3f150
+#define ixMCARB_DRAM_TIMING_TABLE_80 0x3f154
+#define ixMCARB_DRAM_TIMING_TABLE_81 0x3f158
+#define ixMCARB_DRAM_TIMING_TABLE_82 0x3f15c
+#define ixMCARB_DRAM_TIMING_TABLE_83 0x3f160
+#define ixMCARB_DRAM_TIMING_TABLE_84 0x3f164
+#define ixMCARB_DRAM_TIMING_TABLE_85 0x3f168
+#define ixMCARB_DRAM_TIMING_TABLE_86 0x3f16c
+#define ixMCARB_DRAM_TIMING_TABLE_87 0x3f170
+#define ixMCARB_DRAM_TIMING_TABLE_88 0x3f174
+#define ixMCARB_DRAM_TIMING_TABLE_89 0x3f178
+#define ixMCARB_DRAM_TIMING_TABLE_90 0x3f17c
+#define ixMCARB_DRAM_TIMING_TABLE_91 0x3f180
+#define ixMCARB_DRAM_TIMING_TABLE_92 0x3f184
+#define ixMCARB_DRAM_TIMING_TABLE_93 0x3f188
+#define ixMCARB_DRAM_TIMING_TABLE_94 0x3f18c
+#define ixMCARB_DRAM_TIMING_TABLE_95 0x3f190
+#define ixMCARB_DRAM_TIMING_TABLE_96 0x3f194
+#define ixDPM_TABLE_1 0x3f198
+#define ixDPM_TABLE_2 0x3f19c
+#define ixDPM_TABLE_3 0x3f1a0
+#define ixDPM_TABLE_4 0x3f1a4
+#define ixDPM_TABLE_5 0x3f1a8
+#define ixDPM_TABLE_6 0x3f1ac
+#define ixDPM_TABLE_7 0x3f1b0
+#define ixDPM_TABLE_8 0x3f1b4
+#define ixDPM_TABLE_9 0x3f1b8
+#define ixDPM_TABLE_10 0x3f1bc
+#define ixDPM_TABLE_11 0x3f1c0
+#define ixDPM_TABLE_12 0x3f1c4
+#define ixDPM_TABLE_13 0x3f1c8
+#define ixDPM_TABLE_14 0x3f1cc
+#define ixDPM_TABLE_15 0x3f1d0
+#define ixDPM_TABLE_16 0x3f1d4
+#define ixDPM_TABLE_17 0x3f1d8
+#define ixDPM_TABLE_18 0x3f1dc
+#define ixDPM_TABLE_19 0x3f1e0
+#define ixDPM_TABLE_20 0x3f1e4
+#define ixDPM_TABLE_21 0x3f1e8
+#define ixDPM_TABLE_22 0x3f1ec
+#define ixDPM_TABLE_23 0x3f1f0
+#define ixDPM_TABLE_24 0x3f1f4
+#define ixDPM_TABLE_25 0x3f1f8
+#define ixDPM_TABLE_26 0x3f1fc
+#define ixDPM_TABLE_27 0x3f200
+#define ixDPM_TABLE_28 0x3f204
+#define ixDPM_TABLE_29 0x3f208
+#define ixDPM_TABLE_30 0x3f20c
+#define ixDPM_TABLE_31 0x3f210
+#define ixDPM_TABLE_32 0x3f214
+#define ixDPM_TABLE_33 0x3f218
+#define ixDPM_TABLE_34 0x3f21c
+#define ixDPM_TABLE_35 0x3f220
+#define ixDPM_TABLE_36 0x3f224
+#define ixDPM_TABLE_37 0x3f228
+#define ixDPM_TABLE_38 0x3f22c
+#define ixDPM_TABLE_39 0x3f230
+#define ixDPM_TABLE_40 0x3f234
+#define ixDPM_TABLE_41 0x3f238
+#define ixDPM_TABLE_42 0x3f23c
+#define ixDPM_TABLE_43 0x3f240
+#define ixDPM_TABLE_44 0x3f244
+#define ixDPM_TABLE_45 0x3f248
+#define ixDPM_TABLE_46 0x3f24c
+#define ixDPM_TABLE_47 0x3f250
+#define ixDPM_TABLE_48 0x3f254
+#define ixDPM_TABLE_49 0x3f258
+#define ixDPM_TABLE_50 0x3f25c
+#define ixDPM_TABLE_51 0x3f260
+#define ixDPM_TABLE_52 0x3f264
+#define ixDPM_TABLE_53 0x3f268
+#define ixDPM_TABLE_54 0x3f26c
+#define ixDPM_TABLE_55 0x3f270
+#define ixDPM_TABLE_56 0x3f274
+#define ixDPM_TABLE_57 0x3f278
+#define ixDPM_TABLE_58 0x3f27c
+#define ixDPM_TABLE_59 0x3f280
+#define ixDPM_TABLE_60 0x3f284
+#define ixDPM_TABLE_61 0x3f288
+#define ixDPM_TABLE_62 0x3f28c
+#define ixDPM_TABLE_63 0x3f290
+#define ixDPM_TABLE_64 0x3f294
+#define ixDPM_TABLE_65 0x3f298
+#define ixDPM_TABLE_66 0x3f29c
+#define ixDPM_TABLE_67 0x3f2a0
+#define ixDPM_TABLE_68 0x3f2a4
+#define ixDPM_TABLE_69 0x3f2a8
+#define ixDPM_TABLE_70 0x3f2ac
+#define ixDPM_TABLE_71 0x3f2b0
+#define ixDPM_TABLE_72 0x3f2b4
+#define ixDPM_TABLE_73 0x3f2b8
+#define ixDPM_TABLE_74 0x3f2bc
+#define ixDPM_TABLE_75 0x3f2c0
+#define ixDPM_TABLE_76 0x3f2c4
+#define ixDPM_TABLE_77 0x3f2c8
+#define ixDPM_TABLE_78 0x3f2cc
+#define ixDPM_TABLE_79 0x3f2d0
+#define ixDPM_TABLE_80 0x3f2d4
+#define ixDPM_TABLE_81 0x3f2d8
+#define ixDPM_TABLE_82 0x3f2dc
+#define ixDPM_TABLE_83 0x3f2e0
+#define ixDPM_TABLE_84 0x3f2e4
+#define ixDPM_TABLE_85 0x3f2e8
+#define ixDPM_TABLE_86 0x3f2ec
+#define ixDPM_TABLE_87 0x3f2f0
+#define ixDPM_TABLE_88 0x3f2f4
+#define ixDPM_TABLE_89 0x3f2f8
+#define ixDPM_TABLE_90 0x3f2fc
+#define ixDPM_TABLE_91 0x3f300
+#define ixDPM_TABLE_92 0x3f304
+#define ixDPM_TABLE_93 0x3f308
+#define ixDPM_TABLE_94 0x3f30c
+#define ixDPM_TABLE_95 0x3f310
+#define ixDPM_TABLE_96 0x3f314
+#define ixDPM_TABLE_97 0x3f318
+#define ixDPM_TABLE_98 0x3f31c
+#define ixDPM_TABLE_99 0x3f320
+#define ixDPM_TABLE_100 0x3f324
+#define ixDPM_TABLE_101 0x3f328
+#define ixDPM_TABLE_102 0x3f32c
+#define ixDPM_TABLE_103 0x3f330
+#define ixDPM_TABLE_104 0x3f334
+#define ixDPM_TABLE_105 0x3f338
+#define ixDPM_TABLE_106 0x3f33c
+#define ixDPM_TABLE_107 0x3f340
+#define ixDPM_TABLE_108 0x3f344
+#define ixDPM_TABLE_109 0x3f348
+#define ixDPM_TABLE_110 0x3f34c
+#define ixDPM_TABLE_111 0x3f350
+#define ixDPM_TABLE_112 0x3f354
+#define ixDPM_TABLE_113 0x3f358
+#define ixDPM_TABLE_114 0x3f35c
+#define ixDPM_TABLE_115 0x3f360
+#define ixDPM_TABLE_116 0x3f364
+#define ixDPM_TABLE_117 0x3f368
+#define ixDPM_TABLE_118 0x3f36c
+#define ixDPM_TABLE_119 0x3f370
+#define ixDPM_TABLE_120 0x3f374
+#define ixDPM_TABLE_121 0x3f378
+#define ixDPM_TABLE_122 0x3f37c
+#define ixDPM_TABLE_123 0x3f380
+#define ixDPM_TABLE_124 0x3f384
+#define ixDPM_TABLE_125 0x3f388
+#define ixDPM_TABLE_126 0x3f38c
+#define ixDPM_TABLE_127 0x3f390
+#define ixDPM_TABLE_128 0x3f394
+#define ixDPM_TABLE_129 0x3f398
+#define ixDPM_TABLE_130 0x3f39c
+#define ixDPM_TABLE_131 0x3f3a0
+#define ixDPM_TABLE_132 0x3f3a4
+#define ixDPM_TABLE_133 0x3f3a8
+#define ixDPM_TABLE_134 0x3f3ac
+#define ixDPM_TABLE_135 0x3f3b0
+#define ixDPM_TABLE_136 0x3f3b4
+#define ixDPM_TABLE_137 0x3f3b8
+#define ixDPM_TABLE_138 0x3f3bc
+#define ixDPM_TABLE_139 0x3f3c0
+#define ixDPM_TABLE_140 0x3f3c4
+#define ixDPM_TABLE_141 0x3f3c8
+#define ixDPM_TABLE_142 0x3f3cc
+#define ixDPM_TABLE_143 0x3f3d0
+#define ixDPM_TABLE_144 0x3f3d4
+#define ixDPM_TABLE_145 0x3f3d8
+#define ixDPM_TABLE_146 0x3f3dc
+#define ixDPM_TABLE_147 0x3f3e0
+#define ixDPM_TABLE_148 0x3f3e4
+#define ixDPM_TABLE_149 0x3f3e8
+#define ixDPM_TABLE_150 0x3f3ec
+#define ixDPM_TABLE_151 0x3f3f0
+#define ixDPM_TABLE_152 0x3f3f4
+#define ixDPM_TABLE_153 0x3f3f8
+#define ixDPM_TABLE_154 0x3f3fc
+#define ixDPM_TABLE_155 0x3f400
+#define ixDPM_TABLE_156 0x3f404
+#define ixDPM_TABLE_157 0x3f408
+#define ixDPM_TABLE_158 0x3f40c
+#define ixDPM_TABLE_159 0x3f410
+#define ixDPM_TABLE_160 0x3f414
+#define ixDPM_TABLE_161 0x3f418
+#define ixDPM_TABLE_162 0x3f41c
+#define ixDPM_TABLE_163 0x3f420
+#define ixDPM_TABLE_164 0x3f424
+#define ixDPM_TABLE_165 0x3f428
+#define ixDPM_TABLE_166 0x3f42c
+#define ixDPM_TABLE_167 0x3f430
+#define ixDPM_TABLE_168 0x3f434
+#define ixDPM_TABLE_169 0x3f438
+#define ixDPM_TABLE_170 0x3f43c
+#define ixDPM_TABLE_171 0x3f440
+#define ixDPM_TABLE_172 0x3f444
+#define ixDPM_TABLE_173 0x3f448
+#define ixDPM_TABLE_174 0x3f44c
+#define ixDPM_TABLE_175 0x3f450
+#define ixDPM_TABLE_176 0x3f454
+#define ixDPM_TABLE_177 0x3f458
+#define ixDPM_TABLE_178 0x3f45c
+#define ixDPM_TABLE_179 0x3f460
+#define ixDPM_TABLE_180 0x3f464
+#define ixDPM_TABLE_181 0x3f468
+#define ixDPM_TABLE_182 0x3f46c
+#define ixDPM_TABLE_183 0x3f470
+#define ixDPM_TABLE_184 0x3f474
+#define ixDPM_TABLE_185 0x3f478
+#define ixDPM_TABLE_186 0x3f47c
+#define ixDPM_TABLE_187 0x3f480
+#define ixDPM_TABLE_188 0x3f484
+#define ixDPM_TABLE_189 0x3f488
+#define ixDPM_TABLE_190 0x3f48c
+#define ixDPM_TABLE_191 0x3f490
+#define ixDPM_TABLE_192 0x3f494
+#define ixDPM_TABLE_193 0x3f498
+#define ixDPM_TABLE_194 0x3f49c
+#define ixDPM_TABLE_195 0x3f4a0
+#define ixDPM_TABLE_196 0x3f4a4
+#define ixDPM_TABLE_197 0x3f4a8
+#define ixDPM_TABLE_198 0x3f4ac
+#define ixDPM_TABLE_199 0x3f4b0
+#define ixDPM_TABLE_200 0x3f4b4
+#define ixDPM_TABLE_201 0x3f4b8
+#define ixDPM_TABLE_202 0x3f4bc
+#define ixDPM_TABLE_203 0x3f4c0
+#define ixDPM_TABLE_204 0x3f4c4
+#define ixDPM_TABLE_205 0x3f4c8
+#define ixDPM_TABLE_206 0x3f4cc
+#define ixDPM_TABLE_207 0x3f4d0
+#define ixDPM_TABLE_208 0x3f4d4
+#define ixDPM_TABLE_209 0x3f4d8
+#define ixDPM_TABLE_210 0x3f4dc
+#define ixDPM_TABLE_211 0x3f4e0
+#define ixDPM_TABLE_212 0x3f4e4
+#define ixDPM_TABLE_213 0x3f4e8
+#define ixDPM_TABLE_214 0x3f4ec
+#define ixDPM_TABLE_215 0x3f4f0
+#define ixDPM_TABLE_216 0x3f4f4
+#define ixDPM_TABLE_217 0x3f4f8
+#define ixDPM_TABLE_218 0x3f4fc
+#define ixDPM_TABLE_219 0x3f500
+#define ixDPM_TABLE_220 0x3f504
+#define ixDPM_TABLE_221 0x3f508
+#define ixDPM_TABLE_222 0x3f50c
+#define ixDPM_TABLE_223 0x3f510
+#define ixDPM_TABLE_224 0x3f514
+#define ixDPM_TABLE_225 0x3f518
+#define ixDPM_TABLE_226 0x3f51c
+#define ixDPM_TABLE_227 0x3f520
+#define ixDPM_TABLE_228 0x3f524
+#define ixDPM_TABLE_229 0x3f528
+#define ixDPM_TABLE_230 0x3f52c
+#define ixDPM_TABLE_231 0x3f530
+#define ixDPM_TABLE_232 0x3f534
+#define ixDPM_TABLE_233 0x3f538
+#define ixDPM_TABLE_234 0x3f53c
+#define ixDPM_TABLE_235 0x3f540
+#define ixDPM_TABLE_236 0x3f544
+#define ixDPM_TABLE_237 0x3f548
+#define ixDPM_TABLE_238 0x3f54c
+#define ixDPM_TABLE_239 0x3f550
+#define ixDPM_TABLE_240 0x3f554
+#define ixDPM_TABLE_241 0x3f558
+#define ixDPM_TABLE_242 0x3f55c
+#define ixDPM_TABLE_243 0x3f560
+#define ixDPM_TABLE_244 0x3f564
+#define ixDPM_TABLE_245 0x3f568
+#define ixDPM_TABLE_246 0x3f56c
+#define ixDPM_TABLE_247 0x3f570
+#define ixDPM_TABLE_248 0x3f574
+#define ixDPM_TABLE_249 0x3f578
+#define ixDPM_TABLE_250 0x3f57c
+#define ixDPM_TABLE_251 0x3f580
+#define ixDPM_TABLE_252 0x3f584
+#define ixDPM_TABLE_253 0x3f588
+#define ixDPM_TABLE_254 0x3f58c
+#define ixDPM_TABLE_255 0x3f590
+#define ixDPM_TABLE_256 0x3f594
+#define ixDPM_TABLE_257 0x3f598
+#define ixDPM_TABLE_258 0x3f59c
+#define ixDPM_TABLE_259 0x3f5a0
+#define ixDPM_TABLE_260 0x3f5a4
+#define ixDPM_TABLE_261 0x3f5a8
+#define ixDPM_TABLE_262 0x3f5ac
+#define ixDPM_TABLE_263 0x3f5b0
+#define ixDPM_TABLE_264 0x3f5b4
+#define ixDPM_TABLE_265 0x3f5b8
+#define ixDPM_TABLE_266 0x3f5bc
+#define ixDPM_TABLE_267 0x3f5c0
+#define ixDPM_TABLE_268 0x3f5c4
+#define ixDPM_TABLE_269 0x3f5c8
+#define ixDPM_TABLE_270 0x3f5cc
+#define ixDPM_TABLE_271 0x3f5d0
+#define ixDPM_TABLE_272 0x3f5d4
+#define ixDPM_TABLE_273 0x3f5d8
+#define ixDPM_TABLE_274 0x3f5dc
+#define ixDPM_TABLE_275 0x3f5e0
+#define ixDPM_TABLE_276 0x3f5e4
+#define ixDPM_TABLE_277 0x3f5e8
+#define ixDPM_TABLE_278 0x3f5ec
+#define ixDPM_TABLE_279 0x3f5f0
+#define ixDPM_TABLE_280 0x3f5f4
+#define ixDPM_TABLE_281 0x3f5f8
+#define ixDPM_TABLE_282 0x3f5fc
+#define ixDPM_TABLE_283 0x3f600
+#define ixDPM_TABLE_284 0x3f604
+#define ixDPM_TABLE_285 0x3f608
+#define ixDPM_TABLE_286 0x3f60c
+#define ixDPM_TABLE_287 0x3f610
+#define ixDPM_TABLE_288 0x3f614
+#define ixDPM_TABLE_289 0x3f618
+#define ixDPM_TABLE_290 0x3f61c
+#define ixDPM_TABLE_291 0x3f620
+#define ixDPM_TABLE_292 0x3f624
+#define ixDPM_TABLE_293 0x3f628
+#define ixDPM_TABLE_294 0x3f62c
+#define ixDPM_TABLE_295 0x3f630
+#define ixDPM_TABLE_296 0x3f634
+#define ixDPM_TABLE_297 0x3f638
+#define ixDPM_TABLE_298 0x3f63c
+#define ixDPM_TABLE_299 0x3f640
+#define ixDPM_TABLE_300 0x3f644
+#define ixDPM_TABLE_301 0x3f648
+#define ixDPM_TABLE_302 0x3f64c
+#define ixDPM_TABLE_303 0x3f650
+#define ixDPM_TABLE_304 0x3f654
+#define ixDPM_TABLE_305 0x3f658
+#define ixDPM_TABLE_306 0x3f65c
+#define ixDPM_TABLE_307 0x3f660
+#define ixDPM_TABLE_308 0x3f664
+#define ixDPM_TABLE_309 0x3f668
+#define ixDPM_TABLE_310 0x3f66c
+#define ixDPM_TABLE_311 0x3f670
+#define ixDPM_TABLE_312 0x3f674
+#define ixDPM_TABLE_313 0x3f678
+#define ixDPM_TABLE_314 0x3f67c
+#define ixDPM_TABLE_315 0x3f680
+#define ixDPM_TABLE_316 0x3f684
+#define ixDPM_TABLE_317 0x3f688
+#define ixDPM_TABLE_318 0x3f68c
+#define ixDPM_TABLE_319 0x3f690
+#define ixDPM_TABLE_320 0x3f694
+#define ixDPM_TABLE_321 0x3f698
+#define ixDPM_TABLE_322 0x3f69c
+#define ixDPM_TABLE_323 0x3f6a0
+#define ixDPM_TABLE_324 0x3f6a4
+#define ixDPM_TABLE_325 0x3f6a8
+#define ixDPM_TABLE_326 0x3f6ac
+#define ixDPM_TABLE_327 0x3f6b0
+#define ixDPM_TABLE_328 0x3f6b4
+#define ixDPM_TABLE_329 0x3f6b8
+#define ixDPM_TABLE_330 0x3f6bc
+#define ixDPM_TABLE_331 0x3f6c0
+#define ixDPM_TABLE_332 0x3f6c4
+#define ixDPM_TABLE_333 0x3f6c8
+#define ixDPM_TABLE_334 0x3f6cc
+#define ixDPM_TABLE_335 0x3f6d0
+#define ixDPM_TABLE_336 0x3f6d4
+#define ixDPM_TABLE_337 0x3f6d8
+#define ixDPM_TABLE_338 0x3f6dc
+#define ixDPM_TABLE_339 0x3f6e0
+#define ixDPM_TABLE_340 0x3f6e4
+#define ixDPM_TABLE_341 0x3f6e8
+#define ixDPM_TABLE_342 0x3f6ec
+#define ixDPM_TABLE_343 0x3f6f0
+#define ixDPM_TABLE_344 0x3f6f4
+#define ixDPM_TABLE_345 0x3f6f8
+#define ixDPM_TABLE_346 0x3f6fc
+#define ixDPM_TABLE_347 0x3f700
+#define ixDPM_TABLE_348 0x3f704
+#define ixDPM_TABLE_349 0x3f708
+#define ixDPM_TABLE_350 0x3f70c
+#define ixDPM_TABLE_351 0x3f710
+#define ixDPM_TABLE_352 0x3f714
+#define ixDPM_TABLE_353 0x3f718
+#define ixDPM_TABLE_354 0x3f71c
+#define ixDPM_TABLE_355 0x3f720
+#define ixDPM_TABLE_356 0x3f724
+#define ixDPM_TABLE_357 0x3f728
+#define ixDPM_TABLE_358 0x3f72c
+#define ixDPM_TABLE_359 0x3f730
+#define ixDPM_TABLE_360 0x3f734
+#define ixDPM_TABLE_361 0x3f738
+#define ixDPM_TABLE_362 0x3f73c
+#define ixDPM_TABLE_363 0x3f740
+#define ixDPM_TABLE_364 0x3f744
+#define ixDPM_TABLE_365 0x3f748
+#define ixDPM_TABLE_366 0x3f74c
+#define ixDPM_TABLE_367 0x3f750
+#define ixDPM_TABLE_368 0x3f754
+#define ixDPM_TABLE_369 0x3f758
+#define ixDPM_TABLE_370 0x3f75c
+#define ixDPM_TABLE_371 0x3f760
+#define ixDPM_TABLE_372 0x3f764
+#define ixDPM_TABLE_373 0x3f768
+#define ixDPM_TABLE_374 0x3f76c
+#define ixDPM_TABLE_375 0x3f770
+#define ixDPM_TABLE_376 0x3f774
+#define ixDPM_TABLE_377 0x3f778
+#define ixDPM_TABLE_378 0x3f77c
+#define ixDPM_TABLE_379 0x3f780
+#define ixDPM_TABLE_380 0x3f784
+#define ixDPM_TABLE_381 0x3f788
+#define ixDPM_TABLE_382 0x3f78c
+#define ixDPM_TABLE_383 0x3f790
+#define ixDPM_TABLE_384 0x3f794
+#define ixDPM_TABLE_385 0x3f798
+#define ixDPM_TABLE_386 0x3f79c
+#define ixDPM_TABLE_387 0x3f7a0
+#define ixDPM_TABLE_388 0x3f7a4
+#define ixDPM_TABLE_389 0x3f7a8
+#define ixDPM_TABLE_390 0x3f7ac
+#define ixDPM_TABLE_391 0x3f7b0
+#define ixDPM_TABLE_392 0x3f7b4
+#define ixDPM_TABLE_393 0x3f7b8
+#define ixDPM_TABLE_394 0x3f7bc
+#define ixDPM_TABLE_395 0x3f7c0
+#define ixDPM_TABLE_396 0x3f7c4
+#define ixDPM_TABLE_397 0x3f7c8
+#define ixDPM_TABLE_398 0x3f7cc
+#define ixDPM_TABLE_399 0x3f7d0
+#define ixDPM_TABLE_400 0x3f7d4
+#define ixDPM_TABLE_401 0x3f7d8
+#define ixDPM_TABLE_402 0x3f7dc
+#define ixDPM_TABLE_403 0x3f7e0
+#define ixDPM_TABLE_404 0x3f7e4
+#define ixDPM_TABLE_405 0x3f7e8
+#define ixDPM_TABLE_406 0x3f7ec
+#define ixDPM_TABLE_407 0x3f7f0
+#define ixDPM_TABLE_408 0x3f7f4
+#define ixDPM_TABLE_409 0x3f7f8
+#define ixDPM_TABLE_410 0x3f7fc
+#define ixDPM_TABLE_411 0x3f800
+#define ixDPM_TABLE_412 0x3f804
+#define ixDPM_TABLE_413 0x3f808
+#define ixDPM_TABLE_414 0x3f80c
+#define ixDPM_TABLE_415 0x3f810
+#define ixDPM_TABLE_416 0x3f814
+#define ixDPM_TABLE_417 0x3f818
+#define ixDPM_TABLE_418 0x3f81c
+#define ixDPM_TABLE_419 0x3f820
+#define ixDPM_TABLE_420 0x3f824
+#define ixDPM_TABLE_421 0x3f828
+#define ixDPM_TABLE_422 0x3f82c
+#define ixDPM_TABLE_423 0x3f830
+#define ixDPM_TABLE_424 0x3f834
+#define ixDPM_TABLE_425 0x3f838
+#define ixDPM_TABLE_426 0x3f83c
+#define ixDPM_TABLE_427 0x3f840
+#define ixDPM_TABLE_428 0x3f844
+#define ixDPM_TABLE_429 0x3f848
+#define ixDPM_TABLE_430 0x3f84c
+#define ixDPM_TABLE_431 0x3f850
+#define ixDPM_TABLE_432 0x3f854
+#define ixDPM_TABLE_433 0x3f858
+#define ixDPM_TABLE_434 0x3f85c
+#define ixDPM_TABLE_435 0x3f860
+#define ixDPM_TABLE_436 0x3f864
+#define ixDPM_TABLE_437 0x3f868
+#define ixDPM_TABLE_438 0x3f86c
+#define ixDPM_TABLE_439 0x3f870
+#define ixDPM_TABLE_440 0x3f874
+#define ixSOFT_REGISTERS_TABLE_1 0x3f89c
+#define ixSOFT_REGISTERS_TABLE_2 0x3f8a0
+#define ixSOFT_REGISTERS_TABLE_3 0x3f8a4
+#define ixSOFT_REGISTERS_TABLE_4 0x3f8a8
+#define ixSOFT_REGISTERS_TABLE_5 0x3f8ac
+#define ixSOFT_REGISTERS_TABLE_6 0x3f8b0
+#define ixSOFT_REGISTERS_TABLE_7 0x3f8b4
+#define ixSOFT_REGISTERS_TABLE_8 0x3f8b8
+#define ixSOFT_REGISTERS_TABLE_9 0x3f8bc
+#define ixSOFT_REGISTERS_TABLE_10 0x3f8c0
+#define ixSOFT_REGISTERS_TABLE_11 0x3f8c4
+#define ixSOFT_REGISTERS_TABLE_12 0x3f8c8
+#define ixSOFT_REGISTERS_TABLE_13 0x3f8cc
+#define ixSOFT_REGISTERS_TABLE_14 0x3f8d0
+#define ixSOFT_REGISTERS_TABLE_15 0x3f8d4
+#define ixSOFT_REGISTERS_TABLE_16 0x3f8d8
+#define ixSOFT_REGISTERS_TABLE_17 0x3f8dc
+#define ixSOFT_REGISTERS_TABLE_18 0x3f8e0
+#define ixSOFT_REGISTERS_TABLE_19 0x3f8e4
+#define ixSOFT_REGISTERS_TABLE_20 0x3f8e8
+#define ixSOFT_REGISTERS_TABLE_21 0x3f8ec
+#define ixSOFT_REGISTERS_TABLE_22 0x3f8f0
+#define ixSOFT_REGISTERS_TABLE_23 0x3f8f4
+#define ixSOFT_REGISTERS_TABLE_24 0x3f8f8
+#define ixSOFT_REGISTERS_TABLE_25 0x3f8fc
+#define ixSOFT_REGISTERS_TABLE_26 0x3f900
+#define ixSOFT_REGISTERS_TABLE_27 0x3f904
+#define ixSOFT_REGISTERS_TABLE_28 0x3f888
+#define ixSOFT_REGISTERS_TABLE_29 0x3f90c
+#define ixSOFT_REGISTERS_TABLE_30 0x3f910
+#define ixPM_FUSES_1 0x3f914
+#define ixPM_FUSES_2 0x3f918
+#define ixPM_FUSES_3 0x3f91c
+#define ixPM_FUSES_4 0x3f920
+#define ixPM_FUSES_5 0x3f924
+#define ixPM_FUSES_6 0x3f928
+#define ixPM_FUSES_7 0x3f92c
+#define ixPM_FUSES_8 0x3f930
+#define ixPM_FUSES_9 0x3f934
+#define ixPM_FUSES_10 0x3f938
+#define ixPM_FUSES_11 0x3f93c
+#define ixPM_FUSES_12 0x3f940
+#define ixPM_FUSES_13 0x3f944
+#define ixPM_FUSES_14 0x3f948
+#define ixPM_FUSES_15 0x3f94c
+#define ixSMU_PM_STATUS_0 0x3fe00
+#define ixSMU_PM_STATUS_1 0x3fe04
+#define ixSMU_PM_STATUS_2 0x3fe08
+#define ixSMU_PM_STATUS_3 0x3fe0c
+#define ixSMU_PM_STATUS_4 0x3fe10
+#define ixSMU_PM_STATUS_5 0x3fe14
+#define ixSMU_PM_STATUS_6 0x3fe18
+#define ixSMU_PM_STATUS_7 0x3fe1c
+#define ixSMU_PM_STATUS_8 0x3fe20
+#define ixSMU_PM_STATUS_9 0x3fe24
+#define ixSMU_PM_STATUS_10 0x3fe28
+#define ixSMU_PM_STATUS_11 0x3fe2c
+#define ixSMU_PM_STATUS_12 0x3fe30
+#define ixSMU_PM_STATUS_13 0x3fe34
+#define ixSMU_PM_STATUS_14 0x3fe38
+#define ixSMU_PM_STATUS_15 0x3fe3c
+#define ixSMU_PM_STATUS_16 0x3fe40
+#define ixSMU_PM_STATUS_17 0x3fe44
+#define ixSMU_PM_STATUS_18 0x3fe48
+#define ixSMU_PM_STATUS_19 0x3fe4c
+#define ixSMU_PM_STATUS_20 0x3fe50
+#define ixSMU_PM_STATUS_21 0x3fe54
+#define ixSMU_PM_STATUS_22 0x3fe58
+#define ixSMU_PM_STATUS_23 0x3fe5c
+#define ixSMU_PM_STATUS_24 0x3fe60
+#define ixSMU_PM_STATUS_25 0x3fe64
+#define ixSMU_PM_STATUS_26 0x3fe68
+#define ixSMU_PM_STATUS_27 0x3fe6c
+#define ixSMU_PM_STATUS_28 0x3fe70
+#define ixSMU_PM_STATUS_29 0x3fe74
+#define ixSMU_PM_STATUS_30 0x3fe78
+#define ixSMU_PM_STATUS_31 0x3fe7c
+#define ixSMU_PM_STATUS_32 0x3fe80
+#define ixSMU_PM_STATUS_33 0x3fe84
+#define ixSMU_PM_STATUS_34 0x3fe88
+#define ixSMU_PM_STATUS_35 0x3fe8c
+#define ixSMU_PM_STATUS_36 0x3fe90
+#define ixSMU_PM_STATUS_37 0x3fe94
+#define ixSMU_PM_STATUS_38 0x3fe98
+#define ixSMU_PM_STATUS_39 0x3fe9c
+#define ixSMU_PM_STATUS_40 0x3fea0
+#define ixSMU_PM_STATUS_41 0x3fea4
+#define ixSMU_PM_STATUS_42 0x3fea8
+#define ixSMU_PM_STATUS_43 0x3feac
+#define ixSMU_PM_STATUS_44 0x3feb0
+#define ixSMU_PM_STATUS_45 0x3feb4
+#define ixSMU_PM_STATUS_46 0x3feb8
+#define ixSMU_PM_STATUS_47 0x3febc
+#define ixSMU_PM_STATUS_48 0x3fec0
+#define ixSMU_PM_STATUS_49 0x3fec4
+#define ixSMU_PM_STATUS_50 0x3fec8
+#define ixSMU_PM_STATUS_51 0x3fecc
+#define ixSMU_PM_STATUS_52 0x3fed0
+#define ixSMU_PM_STATUS_53 0x3fed4
+#define ixSMU_PM_STATUS_54 0x3fed8
+#define ixSMU_PM_STATUS_55 0x3fedc
+#define ixSMU_PM_STATUS_56 0x3fee0
+#define ixSMU_PM_STATUS_57 0x3fee4
+#define ixSMU_PM_STATUS_58 0x3fee8
+#define ixSMU_PM_STATUS_59 0x3feec
+#define ixSMU_PM_STATUS_60 0x3fef0
+#define ixSMU_PM_STATUS_61 0x3fef4
+#define ixSMU_PM_STATUS_62 0x3fef8
+#define ixSMU_PM_STATUS_63 0x3fefc
+#define ixSMU_PM_STATUS_64 0x3ff00
+#define ixSMU_PM_STATUS_65 0x3ff04
+#define ixSMU_PM_STATUS_66 0x3ff08
+#define ixSMU_PM_STATUS_67 0x3ff0c
+#define ixSMU_PM_STATUS_68 0x3ff10
+#define ixSMU_PM_STATUS_69 0x3ff14
+#define ixSMU_PM_STATUS_70 0x3ff18
+#define ixSMU_PM_STATUS_71 0x3ff1c
+#define ixSMU_PM_STATUS_72 0x3ff20
+#define ixSMU_PM_STATUS_73 0x3ff24
+#define ixSMU_PM_STATUS_74 0x3ff28
+#define ixSMU_PM_STATUS_75 0x3ff2c
+#define ixSMU_PM_STATUS_76 0x3ff30
+#define ixSMU_PM_STATUS_77 0x3ff34
+#define ixSMU_PM_STATUS_78 0x3ff38
+#define ixSMU_PM_STATUS_79 0x3ff3c
+#define ixSMU_PM_STATUS_80 0x3ff40
+#define ixSMU_PM_STATUS_81 0x3ff44
+#define ixSMU_PM_STATUS_82 0x3ff48
+#define ixSMU_PM_STATUS_83 0x3ff4c
+#define ixSMU_PM_STATUS_84 0x3ff50
+#define ixSMU_PM_STATUS_85 0x3ff54
+#define ixSMU_PM_STATUS_86 0x3ff58
+#define ixSMU_PM_STATUS_87 0x3ff5c
+#define ixSMU_PM_STATUS_88 0x3ff60
+#define ixSMU_PM_STATUS_89 0x3ff64
+#define ixSMU_PM_STATUS_90 0x3ff68
+#define ixSMU_PM_STATUS_91 0x3ff6c
+#define ixSMU_PM_STATUS_92 0x3ff70
+#define ixSMU_PM_STATUS_93 0x3ff74
+#define ixSMU_PM_STATUS_94 0x3ff78
+#define ixSMU_PM_STATUS_95 0x3ff7c
+#define ixSMU_PM_STATUS_96 0x3ff80
+#define ixSMU_PM_STATUS_97 0x3ff84
+#define ixSMU_PM_STATUS_98 0x3ff88
+#define ixSMU_PM_STATUS_99 0x3ff8c
+#define ixSMU_PM_STATUS_100 0x3ff90
+#define ixSMU_PM_STATUS_101 0x3ff94
+#define ixSMU_PM_STATUS_102 0x3ff98
+#define ixSMU_PM_STATUS_103 0x3ff9c
+#define ixSMU_PM_STATUS_104 0x3ffa0
+#define ixSMU_PM_STATUS_105 0x3ffa4
+#define ixSMU_PM_STATUS_106 0x3ffa8
+#define ixSMU_PM_STATUS_107 0x3ffac
+#define ixSMU_PM_STATUS_108 0x3ffb0
+#define ixSMU_PM_STATUS_109 0x3ffb4
+#define ixSMU_PM_STATUS_110 0x3ffb8
+#define ixSMU_PM_STATUS_111 0x3ffbc
+#define ixSMU_PM_STATUS_112 0x3ffc0
+#define ixSMU_PM_STATUS_113 0x3ffc4
+#define ixSMU_PM_STATUS_114 0x3ffc8
+#define ixSMU_PM_STATUS_115 0x3ffcc
+#define ixSMU_PM_STATUS_116 0x3ffd0
+#define ixSMU_PM_STATUS_117 0x3ffd4
+#define ixSMU_PM_STATUS_118 0x3ffd8
+#define ixSMU_PM_STATUS_119 0x3ffdc
+#define ixSMU_PM_STATUS_120 0x3ffe0
+#define ixSMU_PM_STATUS_121 0x3ffe4
+#define ixSMU_PM_STATUS_122 0x3ffe8
+#define ixSMU_PM_STATUS_123 0x3ffec
+#define ixSMU_PM_STATUS_124 0x3fff0
+#define ixSMU_PM_STATUS_125 0x3fff4
+#define ixSMU_PM_STATUS_126 0x3fff8
+#define ixSMU_PM_STATUS_127 0x3fffc
+#define ixCG_THERMAL_INT_ENA 0xc2100024
+#define ixCG_THERMAL_INT_CTRL 0xc2100028
+#define ixCG_THERMAL_INT_STATUS 0xc210002c
+#define ixCG_THERMAL_CTRL 0xc0300004
+#define ixCG_THERMAL_STATUS 0xc0300008
+#define ixCG_THERMAL_INT 0xc030000c
+#define ixCG_MULT_THERMAL_CTRL 0xc0300010
+#define ixCG_MULT_THERMAL_STATUS 0xc0300014
+#define ixTHM_TMON2_CTRL 0xc0300034
+#define ixTHM_TMON2_CTRL2 0xc0300038
+#define ixTHM_TMON2_CSR_WR 0xc0300054
+#define ixTHM_TMON2_CSR_RD 0xc0300058
+#define ixCG_FDO_CTRL0 0xc0300064
+#define ixCG_FDO_CTRL1 0xc0300068
+#define ixCG_FDO_CTRL2 0xc030006c
+#define ixCG_TACH_CTRL 0xc0300070
+#define ixCG_TACH_STATUS 0xc0300074
+#define ixCC_THM_STRAPS0 0xc0300080
+#define ixTHM_TMON0_RDIL0_DATA 0xc0300100
+#define ixTHM_TMON0_RDIL1_DATA 0xc0300104
+#define ixTHM_TMON0_RDIL2_DATA 0xc0300108
+#define ixTHM_TMON0_RDIL3_DATA 0xc030010c
+#define ixTHM_TMON0_RDIL4_DATA 0xc0300110
+#define ixTHM_TMON0_RDIL5_DATA 0xc0300114
+#define ixTHM_TMON0_RDIL6_DATA 0xc0300118
+#define ixTHM_TMON0_RDIL7_DATA 0xc030011c
+#define ixTHM_TMON0_RDIL8_DATA 0xc0300120
+#define ixTHM_TMON0_RDIL9_DATA 0xc0300124
+#define ixTHM_TMON0_RDIL10_DATA 0xc0300128
+#define ixTHM_TMON0_RDIL11_DATA 0xc030012c
+#define ixTHM_TMON0_RDIL12_DATA 0xc0300130
+#define ixTHM_TMON0_RDIL13_DATA 0xc0300134
+#define ixTHM_TMON0_RDIL14_DATA 0xc0300138
+#define ixTHM_TMON0_RDIL15_DATA 0xc030013c
+#define ixTHM_TMON0_RDIR0_DATA 0xc0300140
+#define ixTHM_TMON0_RDIR1_DATA 0xc0300144
+#define ixTHM_TMON0_RDIR2_DATA 0xc0300148
+#define ixTHM_TMON0_RDIR3_DATA 0xc030014c
+#define ixTHM_TMON0_RDIR4_DATA 0xc0300150
+#define ixTHM_TMON0_RDIR5_DATA 0xc0300154
+#define ixTHM_TMON0_RDIR6_DATA 0xc0300158
+#define ixTHM_TMON0_RDIR7_DATA 0xc030015c
+#define ixTHM_TMON0_RDIR8_DATA 0xc0300160
+#define ixTHM_TMON0_RDIR9_DATA 0xc0300164
+#define ixTHM_TMON0_RDIR10_DATA 0xc0300168
+#define ixTHM_TMON0_RDIR11_DATA 0xc030016c
+#define ixTHM_TMON0_RDIR12_DATA 0xc0300170
+#define ixTHM_TMON0_RDIR13_DATA 0xc0300174
+#define ixTHM_TMON0_RDIR14_DATA 0xc0300178
+#define ixTHM_TMON0_RDIR15_DATA 0xc030017c
+#define ixTHM_TMON1_RDIL0_DATA 0xc0300180
+#define ixTHM_TMON1_RDIL1_DATA 0xc0300184
+#define ixTHM_TMON1_RDIL2_DATA 0xc0300188
+#define ixTHM_TMON1_RDIL3_DATA 0xc030018c
+#define ixTHM_TMON1_RDIL4_DATA 0xc0300190
+#define ixTHM_TMON1_RDIL5_DATA 0xc0300194
+#define ixTHM_TMON1_RDIL6_DATA 0xc0300198
+#define ixTHM_TMON1_RDIL7_DATA 0xc030019c
+#define ixTHM_TMON1_RDIL8_DATA 0xc03001a0
+#define ixTHM_TMON1_RDIL9_DATA 0xc03001a4
+#define ixTHM_TMON1_RDIL10_DATA 0xc03001a8
+#define ixTHM_TMON1_RDIL11_DATA 0xc03001ac
+#define ixTHM_TMON1_RDIL12_DATA 0xc03001b0
+#define ixTHM_TMON1_RDIL13_DATA 0xc03001b4
+#define ixTHM_TMON1_RDIL14_DATA 0xc03001b8
+#define ixTHM_TMON1_RDIL15_DATA 0xc03001bc
+#define ixTHM_TMON1_RDIR0_DATA 0xc03001c0
+#define ixTHM_TMON1_RDIR1_DATA 0xc03001c4
+#define ixTHM_TMON1_RDIR2_DATA 0xc03001c8
+#define ixTHM_TMON1_RDIR3_DATA 0xc03001cc
+#define ixTHM_TMON1_RDIR4_DATA 0xc03001d0
+#define ixTHM_TMON1_RDIR5_DATA 0xc03001d4
+#define ixTHM_TMON1_RDIR6_DATA 0xc03001d8
+#define ixTHM_TMON1_RDIR7_DATA 0xc03001dc
+#define ixTHM_TMON1_RDIR8_DATA 0xc03001e0
+#define ixTHM_TMON1_RDIR9_DATA 0xc03001e4
+#define ixTHM_TMON1_RDIR10_DATA 0xc03001e8
+#define ixTHM_TMON1_RDIR11_DATA 0xc03001ec
+#define ixTHM_TMON1_RDIR12_DATA 0xc03001f0
+#define ixTHM_TMON1_RDIR13_DATA 0xc03001f4
+#define ixTHM_TMON1_RDIR14_DATA 0xc03001f8
+#define ixTHM_TMON1_RDIR15_DATA 0xc03001fc
+#define ixTHM_TMON2_RDIL0_DATA 0xc0300200
+#define ixTHM_TMON2_RDIL1_DATA 0xc0300204
+#define ixTHM_TMON2_RDIL2_DATA 0xc0300208
+#define ixTHM_TMON2_RDIL3_DATA 0xc030020c
+#define ixTHM_TMON2_RDIL4_DATA 0xc0300210
+#define ixTHM_TMON2_RDIL5_DATA 0xc0300214
+#define ixTHM_TMON2_RDIL6_DATA 0xc0300218
+#define ixTHM_TMON2_RDIL7_DATA 0xc030021c
+#define ixTHM_TMON2_RDIL8_DATA 0xc0300220
+#define ixTHM_TMON2_RDIL9_DATA 0xc0300224
+#define ixTHM_TMON2_RDIL10_DATA 0xc0300228
+#define ixTHM_TMON2_RDIL11_DATA 0xc030022c
+#define ixTHM_TMON2_RDIL12_DATA 0xc0300230
+#define ixTHM_TMON2_RDIL13_DATA 0xc0300234
+#define ixTHM_TMON2_RDIL14_DATA 0xc0300238
+#define ixTHM_TMON2_RDIL15_DATA 0xc030023c
+#define ixTHM_TMON2_RDIR0_DATA 0xc0300240
+#define ixTHM_TMON2_RDIR1_DATA 0xc0300244
+#define ixTHM_TMON2_RDIR2_DATA 0xc0300248
+#define ixTHM_TMON2_RDIR3_DATA 0xc030024c
+#define ixTHM_TMON2_RDIR4_DATA 0xc0300250
+#define ixTHM_TMON2_RDIR5_DATA 0xc0300254
+#define ixTHM_TMON2_RDIR6_DATA 0xc0300258
+#define ixTHM_TMON2_RDIR7_DATA 0xc030025c
+#define ixTHM_TMON2_RDIR8_DATA 0xc0300260
+#define ixTHM_TMON2_RDIR9_DATA 0xc0300264
+#define ixTHM_TMON2_RDIR10_DATA 0xc0300268
+#define ixTHM_TMON2_RDIR11_DATA 0xc030026c
+#define ixTHM_TMON2_RDIR12_DATA 0xc0300270
+#define ixTHM_TMON2_RDIR13_DATA 0xc0300274
+#define ixTHM_TMON2_RDIR14_DATA 0xc0300278
+#define ixTHM_TMON2_RDIR15_DATA 0xc030027c
+#define ixTHM_TMON0_INT_DATA 0xc0300300
+#define ixTHM_TMON1_INT_DATA 0xc0300304
+#define ixTHM_TMON2_INT_DATA 0xc0300308
+#define ixTHM_TMON0_DEBUG 0xc0300310
+#define ixTHM_TMON1_DEBUG 0xc0300314
+#define ixTHM_TMON2_DEBUG 0xc0300318
+#define ixTHM_TMON0_STATUS 0xc0300320
+#define ixTHM_TMON1_STATUS 0xc0300324
+#define ixTHM_TMON2_STATUS 0xc0300328
+#define ixGENERAL_PWRMGT 0xc0200000
+#define ixCNB_PWRMGT_CNTL 0xc0200004
+#define ixSCLK_PWRMGT_CNTL 0xc0200008
+#define ixTARGET_AND_CURRENT_PROFILE_INDEX 0xc0200014
+#define ixPWR_PCC_CONTROL 0xc0200018
+#define ixPWR_PCC_GPIO_SELECT 0xc020001c
+#define ixCG_FREQ_TRAN_VOTING_0 0xc02001a8
+#define ixCG_FREQ_TRAN_VOTING_1 0xc02001ac
+#define ixCG_FREQ_TRAN_VOTING_2 0xc02001b0
+#define ixCG_FREQ_TRAN_VOTING_3 0xc02001b4
+#define ixCG_FREQ_TRAN_VOTING_4 0xc02001b8
+#define ixCG_FREQ_TRAN_VOTING_5 0xc02001bc
+#define ixCG_FREQ_TRAN_VOTING_6 0xc02001c0
+#define ixCG_FREQ_TRAN_VOTING_7 0xc02001c4
+#define ixPLL_TEST_CNTL 0xc020003c
+#define ixCG_STATIC_SCREEN_PARAMETER 0xc0200044
+#define ixCG_DISPLAY_GAP_CNTL 0xc0200060
+#define ixCG_DISPLAY_GAP_CNTL2 0xc0200230
+#define ixCG_ACPI_CNTL 0xc0200064
+#define ixSCLK_DEEP_SLEEP_CNTL 0xc0200080
+#define ixSCLK_DEEP_SLEEP_CNTL2 0xc0200084
+#define ixSCLK_DEEP_SLEEP_CNTL3 0xc020009c
+#define ixSCLK_DEEP_SLEEP_MISC_CNTL 0xc0200088
+#define ixLCLK_DEEP_SLEEP_CNTL 0xc020008c
+#define ixLCLK_DEEP_SLEEP_CNTL2 0xc0200310
+#define ixTARGET_AND_CURRENT_PROFILE_INDEX_1 0xc02000f0
+#define ixCG_ULV_PARAMETER 0xc020015c
+#define ixSCLK_MIN_DIV 0xc02003ac
+#define ixPWR_AVFS_SEL 0xc0200384
+#define ixPWR_AVFS_CNTL 0xc0200388
+#define ixPWR_AVFS0_CNTL_STATUS 0xc0200400
+#define ixPWR_AVFS1_CNTL_STATUS 0xc0200404
+#define ixPWR_AVFS2_CNTL_STATUS 0xc0200408
+#define ixPWR_AVFS3_CNTL_STATUS 0xc020040c
+#define ixPWR_AVFS4_CNTL_STATUS 0xc0200410
+#define ixPWR_AVFS5_CNTL_STATUS 0xc0200414
+#define ixPWR_AVFS6_CNTL_STATUS 0xc0200418
+#define ixPWR_AVFS7_CNTL_STATUS 0xc020041c
+#define ixPWR_AVFS8_CNTL_STATUS 0xc0200420
+#define ixPWR_AVFS9_CNTL_STATUS 0xc0200424
+#define ixPWR_AVFS10_CNTL_STATUS 0xc0200428
+#define ixPWR_AVFS11_CNTL_STATUS 0xc020042c
+#define ixPWR_AVFS12_CNTL_STATUS 0xc0200430
+#define ixPWR_AVFS13_CNTL_STATUS 0xc0200434
+#define ixPWR_AVFS14_CNTL_STATUS 0xc0200438
+#define ixPWR_AVFS15_CNTL_STATUS 0xc020043c
+#define ixPWR_AVFS16_CNTL_STATUS 0xc0200440
+#define ixPWR_AVFS17_CNTL_STATUS 0xc0200444
+#define ixPWR_AVFS18_CNTL_STATUS 0xc0200448
+#define ixPWR_AVFS19_CNTL_STATUS 0xc020044c
+#define ixPWR_AVFS20_CNTL_STATUS 0xc0200450
+#define ixPWR_AVFS21_CNTL_STATUS 0xc0200454
+#define ixPWR_AVFS22_CNTL_STATUS 0xc0200458
+#define ixPWR_AVFS23_CNTL_STATUS 0xc020045c
+#define ixPWR_AVFS24_CNTL_STATUS 0xc0200460
+#define ixPWR_AVFS25_CNTL_STATUS 0xc0200464
+#define ixPWR_AVFS26_CNTL_STATUS 0xc0200468
+#define ixPWR_AVFS27_CNTL_STATUS 0xc020046c
+#define ixPWR_CKS_ENABLE 0xc020034c
+#define ixPWR_CKS_CNTL 0xc0200350
+#define ixPWR_DISP_TIMER_CONTROL 0xc02003c0
+#define ixPWR_DISP_TIMER_DEBUG 0xc02003c4
+#define ixPWR_DISP_TIMER2_CONTROL 0xc02003c8
+#define ixPWR_DISP_TIMER2_DEBUG 0xc02003cc
+#define ixPWR_DISP_TIMER_CONTROL2 0xc0200378
+#define ixVDDGFX_IDLE_PARAMETER 0xc020036c
+#define ixVDDGFX_IDLE_CONTROL 0xc0200370
+#define ixVDDGFX_IDLE_EXIT 0xc0200374
+#define ixLCAC_MC0_CNTL 0xc0400130
+#define ixLCAC_MC0_OVR_SEL 0xc0400134
+#define ixLCAC_MC0_OVR_VAL 0xc0400138
+#define ixLCAC_MC1_CNTL 0xc040013c
+#define ixLCAC_MC1_OVR_SEL 0xc0400140
+#define ixLCAC_MC1_OVR_VAL 0xc0400144
+#define ixLCAC_MC2_CNTL 0xc0400148
+#define ixLCAC_MC2_OVR_SEL 0xc040014c
+#define ixLCAC_MC2_OVR_VAL 0xc0400150
+#define ixLCAC_MC3_CNTL 0xc0400154
+#define ixLCAC_MC3_OVR_SEL 0xc0400158
+#define ixLCAC_MC3_OVR_VAL 0xc040015c
+#define ixLCAC_MC4_CNTL 0xc0400d60
+#define ixLCAC_MC4_OVR_SEL 0xc0400d64
+#define ixLCAC_MC4_OVR_VAL 0xc0400d68
+#define ixLCAC_MC5_CNTL 0xc0400d6c
+#define ixLCAC_MC5_OVR_SEL 0xc0400d70
+#define ixLCAC_MC5_OVR_VAL 0xc0400d74
+#define ixLCAC_MC6_CNTL 0xc0400d78
+#define ixLCAC_MC6_OVR_SEL 0xc0400d7c
+#define ixLCAC_MC6_OVR_VAL 0xc0400d80
+#define ixLCAC_MC7_CNTL 0xc0400d84
+#define ixLCAC_MC7_OVR_SEL 0xc0400d88
+#define ixLCAC_MC7_OVR_VAL 0xc0400d8c
+#define ixLCAC_CPL_CNTL 0xc0400160
+#define ixLCAC_CPL_OVR_SEL 0xc0400164
+#define ixLCAC_CPL_OVR_VAL 0xc0400168
+#define mmROM_SMC_IND_INDEX 0x80
+#define mmROM0_ROM_SMC_IND_INDEX 0x80
+#define mmROM1_ROM_SMC_IND_INDEX 0x82
+#define mmROM2_ROM_SMC_IND_INDEX 0x84
+#define mmROM3_ROM_SMC_IND_INDEX 0x86
+#define mmROM_SMC_IND_DATA 0x81
+#define mmROM0_ROM_SMC_IND_DATA 0x81
+#define mmROM1_ROM_SMC_IND_DATA 0x83
+#define mmROM2_ROM_SMC_IND_DATA 0x85
+#define mmROM3_ROM_SMC_IND_DATA 0x87
+#define ixROM_CNTL 0xc0600000
+#define ixPAGE_MIRROR_CNTL 0xc0600004
+#define ixROM_STATUS 0xc0600008
+#define ixCGTT_ROM_CLK_CTRL0 0xc060000c
+#define ixROM_INDEX 0xc0600010
+#define ixROM_DATA 0xc0600014
+#define ixROM_START 0xc0600018
+#define ixROM_SW_CNTL 0xc060001c
+#define ixROM_SW_STATUS 0xc0600020
+#define ixROM_SW_COMMAND 0xc0600024
+#define ixROM_SW_DATA_1 0xc0600028
+#define ixROM_SW_DATA_2 0xc060002c
+#define ixROM_SW_DATA_3 0xc0600030
+#define ixROM_SW_DATA_4 0xc0600034
+#define ixROM_SW_DATA_5 0xc0600038
+#define ixROM_SW_DATA_6 0xc060003c
+#define ixROM_SW_DATA_7 0xc0600040
+#define ixROM_SW_DATA_8 0xc0600044
+#define ixROM_SW_DATA_9 0xc0600048
+#define ixROM_SW_DATA_10 0xc060004c
+#define ixROM_SW_DATA_11 0xc0600050
+#define ixROM_SW_DATA_12 0xc0600054
+#define ixROM_SW_DATA_13 0xc0600058
+#define ixROM_SW_DATA_14 0xc060005c
+#define ixROM_SW_DATA_15 0xc0600060
+#define ixROM_SW_DATA_16 0xc0600064
+#define ixROM_SW_DATA_17 0xc0600068
+#define ixROM_SW_DATA_18 0xc060006c
+#define ixROM_SW_DATA_19 0xc0600070
+#define ixROM_SW_DATA_20 0xc0600074
+#define ixROM_SW_DATA_21 0xc0600078
+#define ixROM_SW_DATA_22 0xc060007c
+#define ixROM_SW_DATA_23 0xc0600080
+#define ixROM_SW_DATA_24 0xc0600084
+#define ixROM_SW_DATA_25 0xc0600088
+#define ixROM_SW_DATA_26 0xc060008c
+#define ixROM_SW_DATA_27 0xc0600090
+#define ixROM_SW_DATA_28 0xc0600094
+#define ixROM_SW_DATA_29 0xc0600098
+#define ixROM_SW_DATA_30 0xc060009c
+#define ixROM_SW_DATA_31 0xc06000a0
+#define ixROM_SW_DATA_32 0xc06000a4
+#define ixROM_SW_DATA_33 0xc06000a8
+#define ixROM_SW_DATA_34 0xc06000ac
+#define ixROM_SW_DATA_35 0xc06000b0
+#define ixROM_SW_DATA_36 0xc06000b4
+#define ixROM_SW_DATA_37 0xc06000b8
+#define ixROM_SW_DATA_38 0xc06000bc
+#define ixROM_SW_DATA_39 0xc06000c0
+#define ixROM_SW_DATA_40 0xc06000c4
+#define ixROM_SW_DATA_41 0xc06000c8
+#define ixROM_SW_DATA_42 0xc06000cc
+#define ixROM_SW_DATA_43 0xc06000d0
+#define ixROM_SW_DATA_44 0xc06000d4
+#define ixROM_SW_DATA_45 0xc06000d8
+#define ixROM_SW_DATA_46 0xc06000dc
+#define ixROM_SW_DATA_47 0xc06000e0
+#define ixROM_SW_DATA_48 0xc06000e4
+#define ixROM_SW_DATA_49 0xc06000e8
+#define ixROM_SW_DATA_50 0xc06000ec
+#define ixROM_SW_DATA_51 0xc06000f0
+#define ixROM_SW_DATA_52 0xc06000f4
+#define ixROM_SW_DATA_53 0xc06000f8
+#define ixROM_SW_DATA_54 0xc06000fc
+#define ixROM_SW_DATA_55 0xc0600100
+#define ixROM_SW_DATA_56 0xc0600104
+#define ixROM_SW_DATA_57 0xc0600108
+#define ixROM_SW_DATA_58 0xc060010c
+#define ixROM_SW_DATA_59 0xc0600110
+#define ixROM_SW_DATA_60 0xc0600114
+#define ixROM_SW_DATA_61 0xc0600118
+#define ixROM_SW_DATA_62 0xc060011c
+#define ixROM_SW_DATA_63 0xc0600120
+#define ixROM_SW_DATA_64 0xc0600124
+#define mmGC_CAC_CGTT_CLK_CTRL 0x3292
+#define mmSE_CAC_CGTT_CLK_CTRL 0x3293
+#define mmGC_CAC_LKG_AGGR_LOWER 0x3296
+#define mmGC_CAC_LKG_AGGR_UPPER 0x3297
+#define ixGC_CAC_WEIGHT_CU_0 0x32
+#define ixGC_CAC_WEIGHT_CU_1 0x33
+#define ixGC_CAC_WEIGHT_CU_2 0x34
+#define ixGC_CAC_WEIGHT_CU_3 0x35
+#define ixGC_CAC_WEIGHT_CU_4 0x36
+#define ixGC_CAC_WEIGHT_CU_5 0x37
+#define ixGC_CAC_WEIGHT_CU_6 0x38
+#define ixGC_CAC_WEIGHT_CU_7 0x39
+#define ixGC_CAC_ACC_CU0 0xba
+#define ixGC_CAC_ACC_CU1 0xbb
+#define ixGC_CAC_ACC_CU2 0xbc
+#define ixGC_CAC_ACC_CU3 0xbd
+#define ixGC_CAC_ACC_CU4 0xbe
+#define ixGC_CAC_ACC_CU5 0xbf
+#define ixGC_CAC_ACC_CU6 0xc0
+#define ixGC_CAC_ACC_CU7 0xc1
+#define ixGC_CAC_ACC_CU8 0xc2
+#define ixGC_CAC_ACC_CU9 0xc3
+#define ixGC_CAC_ACC_CU10 0xc4
+#define ixGC_CAC_ACC_CU11 0xc5
+#define ixGC_CAC_ACC_CU12 0xc6
+#define ixGC_CAC_ACC_CU13 0xc7
+#define ixGC_CAC_ACC_CU14 0xc8
+#define ixGC_CAC_ACC_CU15 0xc9
+#define ixGC_CAC_OVRD_CU 0xe7
+
+#endif /* SMU_7_1_3_D_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h
new file mode 100644
index 000000000000..f19c4208d963
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h
@@ -0,0 +1,1282 @@
+/*
+ * SMU_7_1_3 Register documentation
+ *
+ * Copyright (C) 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SMU_7_1_3_ENUM_H
+#define SMU_7_1_3_ENUM_H
+
+#define CG_SRBM_START_ADDR 0x600
+#define CG_SRBM_END_ADDR 0x8ff
+#define RCU_CCF_DWORDS0 0xa0
+#define RCU_CCF_BITS0 0x1400
+#define RCU_SAM_BYTES 0x2c
+#define RCU_SAM_RTL_BYTES 0x2c
+#define RCU_SMU_BYTES 0x14
+#define RCU_SMU_RTL_BYTES 0x14
+#define SFP_CHAIN_ADDR 0x1
+#define SFP_SADR 0x0
+#define SFP_EADR 0x37f
+#define SAMU_KEY_CHAIN_ADR 0x0
+#define SAMU_KEY_SADR 0x280
+#define SAMU_KEY_EADR 0x2ab
+#define SMU_KEY_CHAIN_ADR 0x0
+#define SMU_KEY_SADR 0x2ac
+#define SMU_KEY_EADR 0x2bf
+#define SMC_MSG_TEST 0x1
+#define SMC_MSG_PHY_LN_OFF 0x2
+#define SMC_MSG_PHY_LN_ON 0x3
+#define SMC_MSG_DDI_PHY_OFF 0x4
+#define SMC_MSG_DDI_PHY_ON 0x5
+#define SMC_MSG_CASCADE_PLL_OFF 0x6
+#define SMC_MSG_CASCADE_PLL_ON 0x7
+#define SMC_MSG_PWR_OFF_x16 0x8
+#define SMC_MSG_CONFIG_LCLK_DPM 0x9
+#define SMC_MSG_FLUSH_DATA_CACHE 0xa
+#define SMC_MSG_FLUSH_INSTRUCTION_CACHE 0xb
+#define SMC_MSG_CONFIG_VPC_ACCUMULATOR 0xc
+#define SMC_MSG_CONFIG_BAPM 0xd
+#define SMC_MSG_CONFIG_TDC_LIMIT 0xe
+#define SMC_MSG_CONFIG_LPMx 0xf
+#define SMC_MSG_CONFIG_HTC_LIMIT 0x10
+#define SMC_MSG_CONFIG_THERMAL_CNTL 0x11
+#define SMC_MSG_CONFIG_VOLTAGE_CNTL 0x12
+#define SMC_MSG_CONFIG_TDP_CNTL 0x13
+#define SMC_MSG_EN_PM_CNTL 0x14
+#define SMC_MSG_DIS_PM_CNTL 0x15
+#define SMC_MSG_CONFIG_NBDPM 0x16
+#define SMC_MSG_CONFIG_LOADLINE 0x17
+#define SMC_MSG_ADJUST_LOADLINE 0x18
+#define SMC_MSG_RESET 0x20
+#define SMC_MSG_VOLTAGE 0x25
+#define SMC_VERSION_MAJOR 0x7
+#define SMC_VERSION_MINOR 0x0
+#define SMC_HEADER_SIZE 0x40
+#define ROM_SIGNATURE 0xaa55
+typedef enum SurfaceEndian {
+ ENDIAN_NONE = 0x0,
+ ENDIAN_8IN16 = 0x1,
+ ENDIAN_8IN32 = 0x2,
+ ENDIAN_8IN64 = 0x3,
+} SurfaceEndian;
+typedef enum ArrayMode {
+ ARRAY_LINEAR_GENERAL = 0x0,
+ ARRAY_LINEAR_ALIGNED = 0x1,
+ ARRAY_1D_TILED_THIN1 = 0x2,
+ ARRAY_1D_TILED_THICK = 0x3,
+ ARRAY_2D_TILED_THIN1 = 0x4,
+ ARRAY_PRT_TILED_THIN1 = 0x5,
+ ARRAY_PRT_2D_TILED_THIN1 = 0x6,
+ ARRAY_2D_TILED_THICK = 0x7,
+ ARRAY_2D_TILED_XTHICK = 0x8,
+ ARRAY_PRT_TILED_THICK = 0x9,
+ ARRAY_PRT_2D_TILED_THICK = 0xa,
+ ARRAY_PRT_3D_TILED_THIN1 = 0xb,
+ ARRAY_3D_TILED_THIN1 = 0xc,
+ ARRAY_3D_TILED_THICK = 0xd,
+ ARRAY_3D_TILED_XTHICK = 0xe,
+ ARRAY_PRT_3D_TILED_THICK = 0xf,
+} ArrayMode;
+typedef enum PipeTiling {
+ CONFIG_1_PIPE = 0x0,
+ CONFIG_2_PIPE = 0x1,
+ CONFIG_4_PIPE = 0x2,
+ CONFIG_8_PIPE = 0x3,
+} PipeTiling;
+typedef enum BankTiling {
+ CONFIG_4_BANK = 0x0,
+ CONFIG_8_BANK = 0x1,
+} BankTiling;
+typedef enum GroupInterleave {
+ CONFIG_256B_GROUP = 0x0,
+ CONFIG_512B_GROUP = 0x1,
+} GroupInterleave;
+typedef enum RowTiling {
+ CONFIG_1KB_ROW = 0x0,
+ CONFIG_2KB_ROW = 0x1,
+ CONFIG_4KB_ROW = 0x2,
+ CONFIG_8KB_ROW = 0x3,
+ CONFIG_1KB_ROW_OPT = 0x4,
+ CONFIG_2KB_ROW_OPT = 0x5,
+ CONFIG_4KB_ROW_OPT = 0x6,
+ CONFIG_8KB_ROW_OPT = 0x7,
+} RowTiling;
+typedef enum BankSwapBytes {
+ CONFIG_128B_SWAPS = 0x0,
+ CONFIG_256B_SWAPS = 0x1,
+ CONFIG_512B_SWAPS = 0x2,
+ CONFIG_1KB_SWAPS = 0x3,
+} BankSwapBytes;
+typedef enum SampleSplitBytes {
+ CONFIG_1KB_SPLIT = 0x0,
+ CONFIG_2KB_SPLIT = 0x1,
+ CONFIG_4KB_SPLIT = 0x2,
+ CONFIG_8KB_SPLIT = 0x3,
+} SampleSplitBytes;
+typedef enum NumPipes {
+ ADDR_CONFIG_1_PIPE = 0x0,
+ ADDR_CONFIG_2_PIPE = 0x1,
+ ADDR_CONFIG_4_PIPE = 0x2,
+ ADDR_CONFIG_8_PIPE = 0x3,
+} NumPipes;
+typedef enum PipeInterleaveSize {
+ ADDR_CONFIG_PIPE_INTERLEAVE_256B = 0x0,
+ ADDR_CONFIG_PIPE_INTERLEAVE_512B = 0x1,
+} PipeInterleaveSize;
+typedef enum BankInterleaveSize {
+ ADDR_CONFIG_BANK_INTERLEAVE_1 = 0x0,
+ ADDR_CONFIG_BANK_INTERLEAVE_2 = 0x1,
+ ADDR_CONFIG_BANK_INTERLEAVE_4 = 0x2,
+ ADDR_CONFIG_BANK_INTERLEAVE_8 = 0x3,
+} BankInterleaveSize;
+typedef enum NumShaderEngines {
+ ADDR_CONFIG_1_SHADER_ENGINE = 0x0,
+ ADDR_CONFIG_2_SHADER_ENGINE = 0x1,
+} NumShaderEngines;
+typedef enum ShaderEngineTileSize {
+ ADDR_CONFIG_SE_TILE_16 = 0x0,
+ ADDR_CONFIG_SE_TILE_32 = 0x1,
+} ShaderEngineTileSize;
+typedef enum NumGPUs {
+ ADDR_CONFIG_1_GPU = 0x0,
+ ADDR_CONFIG_2_GPU = 0x1,
+ ADDR_CONFIG_4_GPU = 0x2,
+} NumGPUs;
+typedef enum MultiGPUTileSize {
+ ADDR_CONFIG_GPU_TILE_16 = 0x0,
+ ADDR_CONFIG_GPU_TILE_32 = 0x1,
+ ADDR_CONFIG_GPU_TILE_64 = 0x2,
+ ADDR_CONFIG_GPU_TILE_128 = 0x3,
+} MultiGPUTileSize;
+typedef enum RowSize {
+ ADDR_CONFIG_1KB_ROW = 0x0,
+ ADDR_CONFIG_2KB_ROW = 0x1,
+ ADDR_CONFIG_4KB_ROW = 0x2,
+} RowSize;
+typedef enum NumLowerPipes {
+ ADDR_CONFIG_1_LOWER_PIPES = 0x0,
+ ADDR_CONFIG_2_LOWER_PIPES = 0x1,
+} NumLowerPipes;
+typedef enum DebugBlockId {
+ DBG_CLIENT_BLKID_RESERVED = 0x0,
+ DBG_CLIENT_BLKID_dbg = 0x1,
+ DBG_CLIENT_BLKID_scf2 = 0x2,
+ DBG_CLIENT_BLKID_mcd5_0 = 0x3,
+ DBG_CLIENT_BLKID_mcd5_1 = 0x4,
+ DBG_CLIENT_BLKID_mcd6_0 = 0x5,
+ DBG_CLIENT_BLKID_mcd6_1 = 0x6,
+ DBG_CLIENT_BLKID_mcd7_0 = 0x7,
+ DBG_CLIENT_BLKID_mcd7_1 = 0x8,
+ DBG_CLIENT_BLKID_vmc = 0x9,
+ DBG_CLIENT_BLKID_sx30 = 0xa,
+ DBG_CLIENT_BLKID_mcd2_0 = 0xb,
+ DBG_CLIENT_BLKID_mcd2_1 = 0xc,
+ DBG_CLIENT_BLKID_bci1 = 0xd,
+ DBG_CLIENT_BLKID_xdma_dbg_client_wrapper = 0xe,
+ DBG_CLIENT_BLKID_mcc0 = 0xf,
+ DBG_CLIENT_BLKID_uvdf_0 = 0x10,
+ DBG_CLIENT_BLKID_uvdf_1 = 0x11,
+ DBG_CLIENT_BLKID_uvdf_2 = 0x12,
+ DBG_CLIENT_BLKID_bci0 = 0x13,
+ DBG_CLIENT_BLKID_vcec0_0 = 0x14,
+ DBG_CLIENT_BLKID_cb100 = 0x15,
+ DBG_CLIENT_BLKID_cb001 = 0x16,
+ DBG_CLIENT_BLKID_cb002 = 0x17,
+ DBG_CLIENT_BLKID_cb003 = 0x18,
+ DBG_CLIENT_BLKID_mcd4_0 = 0x19,
+ DBG_CLIENT_BLKID_mcd4_1 = 0x1a,
+ DBG_CLIENT_BLKID_tmonw00 = 0x1b,
+ DBG_CLIENT_BLKID_cb101 = 0x1c,
+ DBG_CLIENT_BLKID_cb102 = 0x1d,
+ DBG_CLIENT_BLKID_cb103 = 0x1e,
+ DBG_CLIENT_BLKID_sx10 = 0x1f,
+ DBG_CLIENT_BLKID_cb301 = 0x20,
+ DBG_CLIENT_BLKID_cb302 = 0x21,
+ DBG_CLIENT_BLKID_cb303 = 0x22,
+ DBG_CLIENT_BLKID_tmonw01 = 0x23,
+ DBG_CLIENT_BLKID_tmonw02 = 0x24,
+ DBG_CLIENT_BLKID_vcea0_0 = 0x25,
+ DBG_CLIENT_BLKID_vcea0_1 = 0x26,
+ DBG_CLIENT_BLKID_vcea0_2 = 0x27,
+ DBG_CLIENT_BLKID_vcea0_3 = 0x28,
+ DBG_CLIENT_BLKID_scf1 = 0x29,
+ DBG_CLIENT_BLKID_sx20 = 0x2a,
+ DBG_CLIENT_BLKID_spim1 = 0x2b,
+ DBG_CLIENT_BLKID_scb1 = 0x2c,
+ DBG_CLIENT_BLKID_pa10 = 0x2d,
+ DBG_CLIENT_BLKID_pa00 = 0x2e,
+ DBG_CLIENT_BLKID_gmcon = 0x2f,
+ DBG_CLIENT_BLKID_mcb = 0x30,
+ DBG_CLIENT_BLKID_vgt0 = 0x31,
+ DBG_CLIENT_BLKID_pc0 = 0x32,
+ DBG_CLIENT_BLKID_bci2 = 0x33,
+ DBG_CLIENT_BLKID_uvdb_0 = 0x34,
+ DBG_CLIENT_BLKID_spim3 = 0x35,
+ DBG_CLIENT_BLKID_scb3 = 0x36,
+ DBG_CLIENT_BLKID_cpc_0 = 0x37,
+ DBG_CLIENT_BLKID_cpc_1 = 0x38,
+ DBG_CLIENT_BLKID_uvdm_0 = 0x39,
+ DBG_CLIENT_BLKID_uvdm_1 = 0x3a,
+ DBG_CLIENT_BLKID_uvdm_2 = 0x3b,
+ DBG_CLIENT_BLKID_uvdm_3 = 0x3c,
+ DBG_CLIENT_BLKID_cb000 = 0x3d,
+ DBG_CLIENT_BLKID_spim0 = 0x3e,
+ DBG_CLIENT_BLKID_scb0 = 0x3f,
+ DBG_CLIENT_BLKID_mcc2 = 0x40,
+ DBG_CLIENT_BLKID_ds0 = 0x41,
+ DBG_CLIENT_BLKID_srbm = 0x42,
+ DBG_CLIENT_BLKID_ih = 0x43,
+ DBG_CLIENT_BLKID_sem = 0x44,
+ DBG_CLIENT_BLKID_sdma_0 = 0x45,
+ DBG_CLIENT_BLKID_sdma_1 = 0x46,
+ DBG_CLIENT_BLKID_hdp = 0x47,
+ DBG_CLIENT_BLKID_acp_0 = 0x48,
+ DBG_CLIENT_BLKID_acp_1 = 0x49,
+ DBG_CLIENT_BLKID_cb200 = 0x4a,
+ DBG_CLIENT_BLKID_scf3 = 0x4b,
+ DBG_CLIENT_BLKID_bci3 = 0x4c,
+ DBG_CLIENT_BLKID_mcd0_0 = 0x4d,
+ DBG_CLIENT_BLKID_mcd0_1 = 0x4e,
+ DBG_CLIENT_BLKID_pa11 = 0x4f,
+ DBG_CLIENT_BLKID_pa01 = 0x50,
+ DBG_CLIENT_BLKID_cb201 = 0x51,
+ DBG_CLIENT_BLKID_cb202 = 0x52,
+ DBG_CLIENT_BLKID_cb203 = 0x53,
+ DBG_CLIENT_BLKID_spim2 = 0x54,
+ DBG_CLIENT_BLKID_scb2 = 0x55,
+ DBG_CLIENT_BLKID_vgt2 = 0x56,
+ DBG_CLIENT_BLKID_pc2 = 0x57,
+ DBG_CLIENT_BLKID_smu_0 = 0x58,
+ DBG_CLIENT_BLKID_smu_1 = 0x59,
+ DBG_CLIENT_BLKID_smu_2 = 0x5a,
+ DBG_CLIENT_BLKID_cb1 = 0x5b,
+ DBG_CLIENT_BLKID_ia0 = 0x5c,
+ DBG_CLIENT_BLKID_wd = 0x5d,
+ DBG_CLIENT_BLKID_ia1 = 0x5e,
+ DBG_CLIENT_BLKID_scf0 = 0x5f,
+ DBG_CLIENT_BLKID_vgt1 = 0x60,
+ DBG_CLIENT_BLKID_pc1 = 0x61,
+ DBG_CLIENT_BLKID_cb0 = 0x62,
+ DBG_CLIENT_BLKID_gdc_one_0 = 0x63,
+ DBG_CLIENT_BLKID_gdc_one_1 = 0x64,
+ DBG_CLIENT_BLKID_gdc_one_2 = 0x65,
+ DBG_CLIENT_BLKID_gdc_one_3 = 0x66,
+ DBG_CLIENT_BLKID_gdc_one_4 = 0x67,
+ DBG_CLIENT_BLKID_gdc_one_5 = 0x68,
+ DBG_CLIENT_BLKID_gdc_one_6 = 0x69,
+ DBG_CLIENT_BLKID_gdc_one_7 = 0x6a,
+ DBG_CLIENT_BLKID_gdc_one_8 = 0x6b,
+ DBG_CLIENT_BLKID_gdc_one_9 = 0x6c,
+ DBG_CLIENT_BLKID_gdc_one_10 = 0x6d,
+ DBG_CLIENT_BLKID_gdc_one_11 = 0x6e,
+ DBG_CLIENT_BLKID_gdc_one_12 = 0x6f,
+ DBG_CLIENT_BLKID_gdc_one_13 = 0x70,
+ DBG_CLIENT_BLKID_gdc_one_14 = 0x71,
+ DBG_CLIENT_BLKID_gdc_one_15 = 0x72,
+ DBG_CLIENT_BLKID_gdc_one_16 = 0x73,
+ DBG_CLIENT_BLKID_gdc_one_17 = 0x74,
+ DBG_CLIENT_BLKID_gdc_one_18 = 0x75,
+ DBG_CLIENT_BLKID_gdc_one_19 = 0x76,
+ DBG_CLIENT_BLKID_gdc_one_20 = 0x77,
+ DBG_CLIENT_BLKID_gdc_one_21 = 0x78,
+ DBG_CLIENT_BLKID_gdc_one_22 = 0x79,
+ DBG_CLIENT_BLKID_gdc_one_23 = 0x7a,
+ DBG_CLIENT_BLKID_gdc_one_24 = 0x7b,
+ DBG_CLIENT_BLKID_gdc_one_25 = 0x7c,
+ DBG_CLIENT_BLKID_gdc_one_26 = 0x7d,
+ DBG_CLIENT_BLKID_gdc_one_27 = 0x7e,
+ DBG_CLIENT_BLKID_gdc_one_28 = 0x7f,
+ DBG_CLIENT_BLKID_gdc_one_29 = 0x80,
+ DBG_CLIENT_BLKID_gdc_one_30 = 0x81,
+ DBG_CLIENT_BLKID_gdc_one_31 = 0x82,
+ DBG_CLIENT_BLKID_gdc_one_32 = 0x83,
+ DBG_CLIENT_BLKID_gdc_one_33 = 0x84,
+ DBG_CLIENT_BLKID_gdc_one_34 = 0x85,
+ DBG_CLIENT_BLKID_gdc_one_35 = 0x86,
+ DBG_CLIENT_BLKID_vceb0_0 = 0x87,
+ DBG_CLIENT_BLKID_vgt3 = 0x88,
+ DBG_CLIENT_BLKID_pc3 = 0x89,
+ DBG_CLIENT_BLKID_mcd3_0 = 0x8a,
+ DBG_CLIENT_BLKID_mcd3_1 = 0x8b,
+ DBG_CLIENT_BLKID_uvdu_0 = 0x8c,
+ DBG_CLIENT_BLKID_uvdu_1 = 0x8d,
+ DBG_CLIENT_BLKID_uvdu_2 = 0x8e,
+ DBG_CLIENT_BLKID_uvdu_3 = 0x8f,
+ DBG_CLIENT_BLKID_uvdu_4 = 0x90,
+ DBG_CLIENT_BLKID_uvdu_5 = 0x91,
+ DBG_CLIENT_BLKID_uvdu_6 = 0x92,
+ DBG_CLIENT_BLKID_cb300 = 0x93,
+ DBG_CLIENT_BLKID_mcd1_0 = 0x94,
+ DBG_CLIENT_BLKID_mcd1_1 = 0x95,
+ DBG_CLIENT_BLKID_sx00 = 0x96,
+ DBG_CLIENT_BLKID_uvdc_0 = 0x97,
+ DBG_CLIENT_BLKID_uvdc_1 = 0x98,
+ DBG_CLIENT_BLKID_mcc3 = 0x99,
+ DBG_CLIENT_BLKID_mcc4 = 0x9a,
+ DBG_CLIENT_BLKID_mcc5 = 0x9b,
+ DBG_CLIENT_BLKID_mcc6 = 0x9c,
+ DBG_CLIENT_BLKID_mcc7 = 0x9d,
+ DBG_CLIENT_BLKID_cpg_0 = 0x9e,
+ DBG_CLIENT_BLKID_cpg_1 = 0x9f,
+ DBG_CLIENT_BLKID_gck = 0xa0,
+ DBG_CLIENT_BLKID_mcc1 = 0xa1,
+ DBG_CLIENT_BLKID_cpf_0 = 0xa2,
+ DBG_CLIENT_BLKID_cpf_1 = 0xa3,
+ DBG_CLIENT_BLKID_rlc = 0xa4,
+ DBG_CLIENT_BLKID_grbm = 0xa5,
+ DBG_CLIENT_BLKID_sammsp = 0xa6,
+ DBG_CLIENT_BLKID_dci_pg = 0xa7,
+ DBG_CLIENT_BLKID_dci_0 = 0xa8,
+ DBG_CLIENT_BLKID_dccg0_0 = 0xa9,
+ DBG_CLIENT_BLKID_dccg0_1 = 0xaa,
+ DBG_CLIENT_BLKID_dcfe01_0 = 0xab,
+ DBG_CLIENT_BLKID_dcfe02_0 = 0xac,
+ DBG_CLIENT_BLKID_dcfe03_0 = 0xad,
+ DBG_CLIENT_BLKID_dcfe04_0 = 0xae,
+ DBG_CLIENT_BLKID_dcfe05_0 = 0xaf,
+ DBG_CLIENT_BLKID_dcfe06_0 = 0xb0,
+ DBG_CLIENT_BLKID_mcq0_0 = 0xb1,
+ DBG_CLIENT_BLKID_mcq0_1 = 0xb2,
+ DBG_CLIENT_BLKID_mcq1_0 = 0xb3,
+ DBG_CLIENT_BLKID_mcq1_1 = 0xb4,
+ DBG_CLIENT_BLKID_mcq2_0 = 0xb5,
+ DBG_CLIENT_BLKID_mcq2_1 = 0xb6,
+ DBG_CLIENT_BLKID_mcq3_0 = 0xb7,
+ DBG_CLIENT_BLKID_mcq3_1 = 0xb8,
+ DBG_CLIENT_BLKID_mcq4_0 = 0xb9,
+ DBG_CLIENT_BLKID_mcq4_1 = 0xba,
+ DBG_CLIENT_BLKID_mcq5_0 = 0xbb,
+ DBG_CLIENT_BLKID_mcq5_1 = 0xbc,
+ DBG_CLIENT_BLKID_mcq6_0 = 0xbd,
+ DBG_CLIENT_BLKID_mcq6_1 = 0xbe,
+ DBG_CLIENT_BLKID_mcq7_0 = 0xbf,
+ DBG_CLIENT_BLKID_mcq7_1 = 0xc0,
+ DBG_CLIENT_BLKID_uvdi_0 = 0xc1,
+ DBG_CLIENT_BLKID_RESERVED_LAST = 0xc2,
+} DebugBlockId;
+typedef enum DebugBlockId_OLD {
+ DBG_BLOCK_ID_RESERVED = 0x0,
+ DBG_BLOCK_ID_DBG = 0x1,
+ DBG_BLOCK_ID_VMC = 0x2,
+ DBG_BLOCK_ID_PDMA = 0x3,
+ DBG_BLOCK_ID_CG = 0x4,
+ DBG_BLOCK_ID_SRBM = 0x5,
+ DBG_BLOCK_ID_GRBM = 0x6,
+ DBG_BLOCK_ID_RLC = 0x7,
+ DBG_BLOCK_ID_CSC = 0x8,
+ DBG_BLOCK_ID_SEM = 0x9,
+ DBG_BLOCK_ID_IH = 0xa,
+ DBG_BLOCK_ID_SC = 0xb,
+ DBG_BLOCK_ID_SQ = 0xc,
+ DBG_BLOCK_ID_AVP = 0xd,
+ DBG_BLOCK_ID_GMCON = 0xe,
+ DBG_BLOCK_ID_SMU = 0xf,
+ DBG_BLOCK_ID_DMA0 = 0x10,
+ DBG_BLOCK_ID_DMA1 = 0x11,
+ DBG_BLOCK_ID_SPIM = 0x12,
+ DBG_BLOCK_ID_GDS = 0x13,
+ DBG_BLOCK_ID_SPIS = 0x14,
+ DBG_BLOCK_ID_UNUSED0 = 0x15,
+ DBG_BLOCK_ID_PA0 = 0x16,
+ DBG_BLOCK_ID_PA1 = 0x17,
+ DBG_BLOCK_ID_CP0 = 0x18,
+ DBG_BLOCK_ID_CP1 = 0x19,
+ DBG_BLOCK_ID_CP2 = 0x1a,
+ DBG_BLOCK_ID_UNUSED1 = 0x1b,
+ DBG_BLOCK_ID_UVDU = 0x1c,
+ DBG_BLOCK_ID_UVDM = 0x1d,
+ DBG_BLOCK_ID_VCE = 0x1e,
+ DBG_BLOCK_ID_UNUSED2 = 0x1f,
+ DBG_BLOCK_ID_VGT0 = 0x20,
+ DBG_BLOCK_ID_VGT1 = 0x21,
+ DBG_BLOCK_ID_IA = 0x22,
+ DBG_BLOCK_ID_UNUSED3 = 0x23,
+ DBG_BLOCK_ID_SCT0 = 0x24,
+ DBG_BLOCK_ID_SCT1 = 0x25,
+ DBG_BLOCK_ID_SPM0 = 0x26,
+ DBG_BLOCK_ID_SPM1 = 0x27,
+ DBG_BLOCK_ID_TCAA = 0x28,
+ DBG_BLOCK_ID_TCAB = 0x29,
+ DBG_BLOCK_ID_TCCA = 0x2a,
+ DBG_BLOCK_ID_TCCB = 0x2b,
+ DBG_BLOCK_ID_MCC0 = 0x2c,
+ DBG_BLOCK_ID_MCC1 = 0x2d,
+ DBG_BLOCK_ID_MCC2 = 0x2e,
+ DBG_BLOCK_ID_MCC3 = 0x2f,
+ DBG_BLOCK_ID_SX0 = 0x30,
+ DBG_BLOCK_ID_SX1 = 0x31,
+ DBG_BLOCK_ID_SX2 = 0x32,
+ DBG_BLOCK_ID_SX3 = 0x33,
+ DBG_BLOCK_ID_UNUSED4 = 0x34,
+ DBG_BLOCK_ID_UNUSED5 = 0x35,
+ DBG_BLOCK_ID_UNUSED6 = 0x36,
+ DBG_BLOCK_ID_UNUSED7 = 0x37,
+ DBG_BLOCK_ID_PC0 = 0x38,
+ DBG_BLOCK_ID_PC1 = 0x39,
+ DBG_BLOCK_ID_UNUSED8 = 0x3a,
+ DBG_BLOCK_ID_UNUSED9 = 0x3b,
+ DBG_BLOCK_ID_UNUSED10 = 0x3c,
+ DBG_BLOCK_ID_UNUSED11 = 0x3d,
+ DBG_BLOCK_ID_MCB = 0x3e,
+ DBG_BLOCK_ID_UNUSED12 = 0x3f,
+ DBG_BLOCK_ID_SCB0 = 0x40,
+ DBG_BLOCK_ID_SCB1 = 0x41,
+ DBG_BLOCK_ID_UNUSED13 = 0x42,
+ DBG_BLOCK_ID_UNUSED14 = 0x43,
+ DBG_BLOCK_ID_SCF0 = 0x44,
+ DBG_BLOCK_ID_SCF1 = 0x45,
+ DBG_BLOCK_ID_UNUSED15 = 0x46,
+ DBG_BLOCK_ID_UNUSED16 = 0x47,
+ DBG_BLOCK_ID_BCI0 = 0x48,
+ DBG_BLOCK_ID_BCI1 = 0x49,
+ DBG_BLOCK_ID_BCI2 = 0x4a,
+ DBG_BLOCK_ID_BCI3 = 0x4b,
+ DBG_BLOCK_ID_UNUSED17 = 0x4c,
+ DBG_BLOCK_ID_UNUSED18 = 0x4d,
+ DBG_BLOCK_ID_UNUSED19 = 0x4e,
+ DBG_BLOCK_ID_UNUSED20 = 0x4f,
+ DBG_BLOCK_ID_CB00 = 0x50,
+ DBG_BLOCK_ID_CB01 = 0x51,
+ DBG_BLOCK_ID_CB02 = 0x52,
+ DBG_BLOCK_ID_CB03 = 0x53,
+ DBG_BLOCK_ID_CB04 = 0x54,
+ DBG_BLOCK_ID_UNUSED21 = 0x55,
+ DBG_BLOCK_ID_UNUSED22 = 0x56,
+ DBG_BLOCK_ID_UNUSED23 = 0x57,
+ DBG_BLOCK_ID_CB10 = 0x58,
+ DBG_BLOCK_ID_CB11 = 0x59,
+ DBG_BLOCK_ID_CB12 = 0x5a,
+ DBG_BLOCK_ID_CB13 = 0x5b,
+ DBG_BLOCK_ID_CB14 = 0x5c,
+ DBG_BLOCK_ID_UNUSED24 = 0x5d,
+ DBG_BLOCK_ID_UNUSED25 = 0x5e,
+ DBG_BLOCK_ID_UNUSED26 = 0x5f,
+ DBG_BLOCK_ID_TCP0 = 0x60,
+ DBG_BLOCK_ID_TCP1 = 0x61,
+ DBG_BLOCK_ID_TCP2 = 0x62,
+ DBG_BLOCK_ID_TCP3 = 0x63,
+ DBG_BLOCK_ID_TCP4 = 0x64,
+ DBG_BLOCK_ID_TCP5 = 0x65,
+ DBG_BLOCK_ID_TCP6 = 0x66,
+ DBG_BLOCK_ID_TCP7 = 0x67,
+ DBG_BLOCK_ID_TCP8 = 0x68,
+ DBG_BLOCK_ID_TCP9 = 0x69,
+ DBG_BLOCK_ID_TCP10 = 0x6a,
+ DBG_BLOCK_ID_TCP11 = 0x6b,
+ DBG_BLOCK_ID_TCP12 = 0x6c,
+ DBG_BLOCK_ID_TCP13 = 0x6d,
+ DBG_BLOCK_ID_TCP14 = 0x6e,
+ DBG_BLOCK_ID_TCP15 = 0x6f,
+ DBG_BLOCK_ID_TCP16 = 0x70,
+ DBG_BLOCK_ID_TCP17 = 0x71,
+ DBG_BLOCK_ID_TCP18 = 0x72,
+ DBG_BLOCK_ID_TCP19 = 0x73,
+ DBG_BLOCK_ID_TCP20 = 0x74,
+ DBG_BLOCK_ID_TCP21 = 0x75,
+ DBG_BLOCK_ID_TCP22 = 0x76,
+ DBG_BLOCK_ID_TCP23 = 0x77,
+ DBG_BLOCK_ID_TCP_RESERVED0 = 0x78,
+ DBG_BLOCK_ID_TCP_RESERVED1 = 0x79,
+ DBG_BLOCK_ID_TCP_RESERVED2 = 0x7a,
+ DBG_BLOCK_ID_TCP_RESERVED3 = 0x7b,
+ DBG_BLOCK_ID_TCP_RESERVED4 = 0x7c,
+ DBG_BLOCK_ID_TCP_RESERVED5 = 0x7d,
+ DBG_BLOCK_ID_TCP_RESERVED6 = 0x7e,
+ DBG_BLOCK_ID_TCP_RESERVED7 = 0x7f,
+ DBG_BLOCK_ID_DB00 = 0x80,
+ DBG_BLOCK_ID_DB01 = 0x81,
+ DBG_BLOCK_ID_DB02 = 0x82,
+ DBG_BLOCK_ID_DB03 = 0x83,
+ DBG_BLOCK_ID_DB04 = 0x84,
+ DBG_BLOCK_ID_UNUSED27 = 0x85,
+ DBG_BLOCK_ID_UNUSED28 = 0x86,
+ DBG_BLOCK_ID_UNUSED29 = 0x87,
+ DBG_BLOCK_ID_DB10 = 0x88,
+ DBG_BLOCK_ID_DB11 = 0x89,
+ DBG_BLOCK_ID_DB12 = 0x8a,
+ DBG_BLOCK_ID_DB13 = 0x8b,
+ DBG_BLOCK_ID_DB14 = 0x8c,
+ DBG_BLOCK_ID_UNUSED30 = 0x8d,
+ DBG_BLOCK_ID_UNUSED31 = 0x8e,
+ DBG_BLOCK_ID_UNUSED32 = 0x8f,
+ DBG_BLOCK_ID_TCC0 = 0x90,
+ DBG_BLOCK_ID_TCC1 = 0x91,
+ DBG_BLOCK_ID_TCC2 = 0x92,
+ DBG_BLOCK_ID_TCC3 = 0x93,
+ DBG_BLOCK_ID_TCC4 = 0x94,
+ DBG_BLOCK_ID_TCC5 = 0x95,
+ DBG_BLOCK_ID_TCC6 = 0x96,
+ DBG_BLOCK_ID_TCC7 = 0x97,
+ DBG_BLOCK_ID_SPS00 = 0x98,
+ DBG_BLOCK_ID_SPS01 = 0x99,
+ DBG_BLOCK_ID_SPS02 = 0x9a,
+ DBG_BLOCK_ID_SPS10 = 0x9b,
+ DBG_BLOCK_ID_SPS11 = 0x9c,
+ DBG_BLOCK_ID_SPS12 = 0x9d,
+ DBG_BLOCK_ID_UNUSED33 = 0x9e,
+ DBG_BLOCK_ID_UNUSED34 = 0x9f,
+ DBG_BLOCK_ID_TA00 = 0xa0,
+ DBG_BLOCK_ID_TA01 = 0xa1,
+ DBG_BLOCK_ID_TA02 = 0xa2,
+ DBG_BLOCK_ID_TA03 = 0xa3,
+ DBG_BLOCK_ID_TA04 = 0xa4,
+ DBG_BLOCK_ID_TA05 = 0xa5,
+ DBG_BLOCK_ID_TA06 = 0xa6,
+ DBG_BLOCK_ID_TA07 = 0xa7,
+ DBG_BLOCK_ID_TA08 = 0xa8,
+ DBG_BLOCK_ID_TA09 = 0xa9,
+ DBG_BLOCK_ID_TA0A = 0xaa,
+ DBG_BLOCK_ID_TA0B = 0xab,
+ DBG_BLOCK_ID_UNUSED35 = 0xac,
+ DBG_BLOCK_ID_UNUSED36 = 0xad,
+ DBG_BLOCK_ID_UNUSED37 = 0xae,
+ DBG_BLOCK_ID_UNUSED38 = 0xaf,
+ DBG_BLOCK_ID_TA10 = 0xb0,
+ DBG_BLOCK_ID_TA11 = 0xb1,
+ DBG_BLOCK_ID_TA12 = 0xb2,
+ DBG_BLOCK_ID_TA13 = 0xb3,
+ DBG_BLOCK_ID_TA14 = 0xb4,
+ DBG_BLOCK_ID_TA15 = 0xb5,
+ DBG_BLOCK_ID_TA16 = 0xb6,
+ DBG_BLOCK_ID_TA17 = 0xb7,
+ DBG_BLOCK_ID_TA18 = 0xb8,
+ DBG_BLOCK_ID_TA19 = 0xb9,
+ DBG_BLOCK_ID_TA1A = 0xba,
+ DBG_BLOCK_ID_TA1B = 0xbb,
+ DBG_BLOCK_ID_UNUSED39 = 0xbc,
+ DBG_BLOCK_ID_UNUSED40 = 0xbd,
+ DBG_BLOCK_ID_UNUSED41 = 0xbe,
+ DBG_BLOCK_ID_UNUSED42 = 0xbf,
+ DBG_BLOCK_ID_TD00 = 0xc0,
+ DBG_BLOCK_ID_TD01 = 0xc1,
+ DBG_BLOCK_ID_TD02 = 0xc2,
+ DBG_BLOCK_ID_TD03 = 0xc3,
+ DBG_BLOCK_ID_TD04 = 0xc4,
+ DBG_BLOCK_ID_TD05 = 0xc5,
+ DBG_BLOCK_ID_TD06 = 0xc6,
+ DBG_BLOCK_ID_TD07 = 0xc7,
+ DBG_BLOCK_ID_TD08 = 0xc8,
+ DBG_BLOCK_ID_TD09 = 0xc9,
+ DBG_BLOCK_ID_TD0A = 0xca,
+ DBG_BLOCK_ID_TD0B = 0xcb,
+ DBG_BLOCK_ID_UNUSED43 = 0xcc,
+ DBG_BLOCK_ID_UNUSED44 = 0xcd,
+ DBG_BLOCK_ID_UNUSED45 = 0xce,
+ DBG_BLOCK_ID_UNUSED46 = 0xcf,
+ DBG_BLOCK_ID_TD10 = 0xd0,
+ DBG_BLOCK_ID_TD11 = 0xd1,
+ DBG_BLOCK_ID_TD12 = 0xd2,
+ DBG_BLOCK_ID_TD13 = 0xd3,
+ DBG_BLOCK_ID_TD14 = 0xd4,
+ DBG_BLOCK_ID_TD15 = 0xd5,
+ DBG_BLOCK_ID_TD16 = 0xd6,
+ DBG_BLOCK_ID_TD17 = 0xd7,
+ DBG_BLOCK_ID_TD18 = 0xd8,
+ DBG_BLOCK_ID_TD19 = 0xd9,
+ DBG_BLOCK_ID_TD1A = 0xda,
+ DBG_BLOCK_ID_TD1B = 0xdb,
+ DBG_BLOCK_ID_UNUSED47 = 0xdc,
+ DBG_BLOCK_ID_UNUSED48 = 0xdd,
+ DBG_BLOCK_ID_UNUSED49 = 0xde,
+ DBG_BLOCK_ID_UNUSED50 = 0xdf,
+ DBG_BLOCK_ID_MCD0 = 0xe0,
+ DBG_BLOCK_ID_MCD1 = 0xe1,
+ DBG_BLOCK_ID_MCD2 = 0xe2,
+ DBG_BLOCK_ID_MCD3 = 0xe3,
+ DBG_BLOCK_ID_MCD4 = 0xe4,
+ DBG_BLOCK_ID_MCD5 = 0xe5,
+ DBG_BLOCK_ID_UNUSED51 = 0xe6,
+ DBG_BLOCK_ID_UNUSED52 = 0xe7,
+} DebugBlockId_OLD;
+typedef enum DebugBlockId_BY2 {
+ DBG_BLOCK_ID_RESERVED_BY2 = 0x0,
+ DBG_BLOCK_ID_VMC_BY2 = 0x1,
+ DBG_BLOCK_ID_CG_BY2 = 0x2,
+ DBG_BLOCK_ID_GRBM_BY2 = 0x3,
+ DBG_BLOCK_ID_CSC_BY2 = 0x4,
+ DBG_BLOCK_ID_IH_BY2 = 0x5,
+ DBG_BLOCK_ID_SQ_BY2 = 0x6,
+ DBG_BLOCK_ID_GMCON_BY2 = 0x7,
+ DBG_BLOCK_ID_DMA0_BY2 = 0x8,
+ DBG_BLOCK_ID_SPIM_BY2 = 0x9,
+ DBG_BLOCK_ID_SPIS_BY2 = 0xa,
+ DBG_BLOCK_ID_PA0_BY2 = 0xb,
+ DBG_BLOCK_ID_CP0_BY2 = 0xc,
+ DBG_BLOCK_ID_CP2_BY2 = 0xd,
+ DBG_BLOCK_ID_UVDU_BY2 = 0xe,
+ DBG_BLOCK_ID_VCE_BY2 = 0xf,
+ DBG_BLOCK_ID_VGT0_BY2 = 0x10,
+ DBG_BLOCK_ID_IA_BY2 = 0x11,
+ DBG_BLOCK_ID_SCT0_BY2 = 0x12,
+ DBG_BLOCK_ID_SPM0_BY2 = 0x13,
+ DBG_BLOCK_ID_TCAA_BY2 = 0x14,
+ DBG_BLOCK_ID_TCCA_BY2 = 0x15,
+ DBG_BLOCK_ID_MCC0_BY2 = 0x16,
+ DBG_BLOCK_ID_MCC2_BY2 = 0x17,
+ DBG_BLOCK_ID_SX0_BY2 = 0x18,
+ DBG_BLOCK_ID_SX2_BY2 = 0x19,
+ DBG_BLOCK_ID_UNUSED4_BY2 = 0x1a,
+ DBG_BLOCK_ID_UNUSED6_BY2 = 0x1b,
+ DBG_BLOCK_ID_PC0_BY2 = 0x1c,
+ DBG_BLOCK_ID_UNUSED8_BY2 = 0x1d,
+ DBG_BLOCK_ID_UNUSED10_BY2 = 0x1e,
+ DBG_BLOCK_ID_MCB_BY2 = 0x1f,
+ DBG_BLOCK_ID_SCB0_BY2 = 0x20,
+ DBG_BLOCK_ID_UNUSED13_BY2 = 0x21,
+ DBG_BLOCK_ID_SCF0_BY2 = 0x22,
+ DBG_BLOCK_ID_UNUSED15_BY2 = 0x23,
+ DBG_BLOCK_ID_BCI0_BY2 = 0x24,
+ DBG_BLOCK_ID_BCI2_BY2 = 0x25,
+ DBG_BLOCK_ID_UNUSED17_BY2 = 0x26,
+ DBG_BLOCK_ID_UNUSED19_BY2 = 0x27,
+ DBG_BLOCK_ID_CB00_BY2 = 0x28,
+ DBG_BLOCK_ID_CB02_BY2 = 0x29,
+ DBG_BLOCK_ID_CB04_BY2 = 0x2a,
+ DBG_BLOCK_ID_UNUSED22_BY2 = 0x2b,
+ DBG_BLOCK_ID_CB10_BY2 = 0x2c,
+ DBG_BLOCK_ID_CB12_BY2 = 0x2d,
+ DBG_BLOCK_ID_CB14_BY2 = 0x2e,
+ DBG_BLOCK_ID_UNUSED25_BY2 = 0x2f,
+ DBG_BLOCK_ID_TCP0_BY2 = 0x30,
+ DBG_BLOCK_ID_TCP2_BY2 = 0x31,
+ DBG_BLOCK_ID_TCP4_BY2 = 0x32,
+ DBG_BLOCK_ID_TCP6_BY2 = 0x33,
+ DBG_BLOCK_ID_TCP8_BY2 = 0x34,
+ DBG_BLOCK_ID_TCP10_BY2 = 0x35,
+ DBG_BLOCK_ID_TCP12_BY2 = 0x36,
+ DBG_BLOCK_ID_TCP14_BY2 = 0x37,
+ DBG_BLOCK_ID_TCP16_BY2 = 0x38,
+ DBG_BLOCK_ID_TCP18_BY2 = 0x39,
+ DBG_BLOCK_ID_TCP20_BY2 = 0x3a,
+ DBG_BLOCK_ID_TCP22_BY2 = 0x3b,
+ DBG_BLOCK_ID_TCP_RESERVED0_BY2 = 0x3c,
+ DBG_BLOCK_ID_TCP_RESERVED2_BY2 = 0x3d,
+ DBG_BLOCK_ID_TCP_RESERVED4_BY2 = 0x3e,
+ DBG_BLOCK_ID_TCP_RESERVED6_BY2 = 0x3f,
+ DBG_BLOCK_ID_DB00_BY2 = 0x40,
+ DBG_BLOCK_ID_DB02_BY2 = 0x41,
+ DBG_BLOCK_ID_DB04_BY2 = 0x42,
+ DBG_BLOCK_ID_UNUSED28_BY2 = 0x43,
+ DBG_BLOCK_ID_DB10_BY2 = 0x44,
+ DBG_BLOCK_ID_DB12_BY2 = 0x45,
+ DBG_BLOCK_ID_DB14_BY2 = 0x46,
+ DBG_BLOCK_ID_UNUSED31_BY2 = 0x47,
+ DBG_BLOCK_ID_TCC0_BY2 = 0x48,
+ DBG_BLOCK_ID_TCC2_BY2 = 0x49,
+ DBG_BLOCK_ID_TCC4_BY2 = 0x4a,
+ DBG_BLOCK_ID_TCC6_BY2 = 0x4b,
+ DBG_BLOCK_ID_SPS00_BY2 = 0x4c,
+ DBG_BLOCK_ID_SPS02_BY2 = 0x4d,
+ DBG_BLOCK_ID_SPS11_BY2 = 0x4e,
+ DBG_BLOCK_ID_UNUSED33_BY2 = 0x4f,
+ DBG_BLOCK_ID_TA00_BY2 = 0x50,
+ DBG_BLOCK_ID_TA02_BY2 = 0x51,
+ DBG_BLOCK_ID_TA04_BY2 = 0x52,
+ DBG_BLOCK_ID_TA06_BY2 = 0x53,
+ DBG_BLOCK_ID_TA08_BY2 = 0x54,
+ DBG_BLOCK_ID_TA0A_BY2 = 0x55,
+ DBG_BLOCK_ID_UNUSED35_BY2 = 0x56,
+ DBG_BLOCK_ID_UNUSED37_BY2 = 0x57,
+ DBG_BLOCK_ID_TA10_BY2 = 0x58,
+ DBG_BLOCK_ID_TA12_BY2 = 0x59,
+ DBG_BLOCK_ID_TA14_BY2 = 0x5a,
+ DBG_BLOCK_ID_TA16_BY2 = 0x5b,
+ DBG_BLOCK_ID_TA18_BY2 = 0x5c,
+ DBG_BLOCK_ID_TA1A_BY2 = 0x5d,
+ DBG_BLOCK_ID_UNUSED39_BY2 = 0x5e,
+ DBG_BLOCK_ID_UNUSED41_BY2 = 0x5f,
+ DBG_BLOCK_ID_TD00_BY2 = 0x60,
+ DBG_BLOCK_ID_TD02_BY2 = 0x61,
+ DBG_BLOCK_ID_TD04_BY2 = 0x62,
+ DBG_BLOCK_ID_TD06_BY2 = 0x63,
+ DBG_BLOCK_ID_TD08_BY2 = 0x64,
+ DBG_BLOCK_ID_TD0A_BY2 = 0x65,
+ DBG_BLOCK_ID_UNUSED43_BY2 = 0x66,
+ DBG_BLOCK_ID_UNUSED45_BY2 = 0x67,
+ DBG_BLOCK_ID_TD10_BY2 = 0x68,
+ DBG_BLOCK_ID_TD12_BY2 = 0x69,
+ DBG_BLOCK_ID_TD14_BY2 = 0x6a,
+ DBG_BLOCK_ID_TD16_BY2 = 0x6b,
+ DBG_BLOCK_ID_TD18_BY2 = 0x6c,
+ DBG_BLOCK_ID_TD1A_BY2 = 0x6d,
+ DBG_BLOCK_ID_UNUSED47_BY2 = 0x6e,
+ DBG_BLOCK_ID_UNUSED49_BY2 = 0x6f,
+ DBG_BLOCK_ID_MCD0_BY2 = 0x70,
+ DBG_BLOCK_ID_MCD2_BY2 = 0x71,
+ DBG_BLOCK_ID_MCD4_BY2 = 0x72,
+ DBG_BLOCK_ID_UNUSED51_BY2 = 0x73,
+} DebugBlockId_BY2;
+typedef enum DebugBlockId_BY4 {
+ DBG_BLOCK_ID_RESERVED_BY4 = 0x0,
+ DBG_BLOCK_ID_CG_BY4 = 0x1,
+ DBG_BLOCK_ID_CSC_BY4 = 0x2,
+ DBG_BLOCK_ID_SQ_BY4 = 0x3,
+ DBG_BLOCK_ID_DMA0_BY4 = 0x4,
+ DBG_BLOCK_ID_SPIS_BY4 = 0x5,
+ DBG_BLOCK_ID_CP0_BY4 = 0x6,
+ DBG_BLOCK_ID_UVDU_BY4 = 0x7,
+ DBG_BLOCK_ID_VGT0_BY4 = 0x8,
+ DBG_BLOCK_ID_SCT0_BY4 = 0x9,
+ DBG_BLOCK_ID_TCAA_BY4 = 0xa,
+ DBG_BLOCK_ID_MCC0_BY4 = 0xb,
+ DBG_BLOCK_ID_SX0_BY4 = 0xc,
+ DBG_BLOCK_ID_UNUSED4_BY4 = 0xd,
+ DBG_BLOCK_ID_PC0_BY4 = 0xe,
+ DBG_BLOCK_ID_UNUSED10_BY4 = 0xf,
+ DBG_BLOCK_ID_SCB0_BY4 = 0x10,
+ DBG_BLOCK_ID_SCF0_BY4 = 0x11,
+ DBG_BLOCK_ID_BCI0_BY4 = 0x12,
+ DBG_BLOCK_ID_UNUSED17_BY4 = 0x13,
+ DBG_BLOCK_ID_CB00_BY4 = 0x14,
+ DBG_BLOCK_ID_CB04_BY4 = 0x15,
+ DBG_BLOCK_ID_CB10_BY4 = 0x16,
+ DBG_BLOCK_ID_CB14_BY4 = 0x17,
+ DBG_BLOCK_ID_TCP0_BY4 = 0x18,
+ DBG_BLOCK_ID_TCP4_BY4 = 0x19,
+ DBG_BLOCK_ID_TCP8_BY4 = 0x1a,
+ DBG_BLOCK_ID_TCP12_BY4 = 0x1b,
+ DBG_BLOCK_ID_TCP16_BY4 = 0x1c,
+ DBG_BLOCK_ID_TCP20_BY4 = 0x1d,
+ DBG_BLOCK_ID_TCP_RESERVED0_BY4 = 0x1e,
+ DBG_BLOCK_ID_TCP_RESERVED4_BY4 = 0x1f,
+ DBG_BLOCK_ID_DB_BY4 = 0x20,
+ DBG_BLOCK_ID_DB04_BY4 = 0x21,
+ DBG_BLOCK_ID_DB10_BY4 = 0x22,
+ DBG_BLOCK_ID_DB14_BY4 = 0x23,
+ DBG_BLOCK_ID_TCC0_BY4 = 0x24,
+ DBG_BLOCK_ID_TCC4_BY4 = 0x25,
+ DBG_BLOCK_ID_SPS00_BY4 = 0x26,
+ DBG_BLOCK_ID_SPS11_BY4 = 0x27,
+ DBG_BLOCK_ID_TA00_BY4 = 0x28,
+ DBG_BLOCK_ID_TA04_BY4 = 0x29,
+ DBG_BLOCK_ID_TA08_BY4 = 0x2a,
+ DBG_BLOCK_ID_UNUSED35_BY4 = 0x2b,
+ DBG_BLOCK_ID_TA10_BY4 = 0x2c,
+ DBG_BLOCK_ID_TA14_BY4 = 0x2d,
+ DBG_BLOCK_ID_TA18_BY4 = 0x2e,
+ DBG_BLOCK_ID_UNUSED39_BY4 = 0x2f,
+ DBG_BLOCK_ID_TD00_BY4 = 0x30,
+ DBG_BLOCK_ID_TD04_BY4 = 0x31,
+ DBG_BLOCK_ID_TD08_BY4 = 0x32,
+ DBG_BLOCK_ID_UNUSED43_BY4 = 0x33,
+ DBG_BLOCK_ID_TD10_BY4 = 0x34,
+ DBG_BLOCK_ID_TD14_BY4 = 0x35,
+ DBG_BLOCK_ID_TD18_BY4 = 0x36,
+ DBG_BLOCK_ID_UNUSED47_BY4 = 0x37,
+ DBG_BLOCK_ID_MCD0_BY4 = 0x38,
+ DBG_BLOCK_ID_MCD4_BY4 = 0x39,
+} DebugBlockId_BY4;
+typedef enum DebugBlockId_BY8 {
+ DBG_BLOCK_ID_RESERVED_BY8 = 0x0,
+ DBG_BLOCK_ID_CSC_BY8 = 0x1,
+ DBG_BLOCK_ID_DMA0_BY8 = 0x2,
+ DBG_BLOCK_ID_CP0_BY8 = 0x3,
+ DBG_BLOCK_ID_VGT0_BY8 = 0x4,
+ DBG_BLOCK_ID_TCAA_BY8 = 0x5,
+ DBG_BLOCK_ID_SX0_BY8 = 0x6,
+ DBG_BLOCK_ID_PC0_BY8 = 0x7,
+ DBG_BLOCK_ID_SCB0_BY8 = 0x8,
+ DBG_BLOCK_ID_BCI0_BY8 = 0x9,
+ DBG_BLOCK_ID_CB00_BY8 = 0xa,
+ DBG_BLOCK_ID_CB10_BY8 = 0xb,
+ DBG_BLOCK_ID_TCP0_BY8 = 0xc,
+ DBG_BLOCK_ID_TCP8_BY8 = 0xd,
+ DBG_BLOCK_ID_TCP16_BY8 = 0xe,
+ DBG_BLOCK_ID_TCP_RESERVED0_BY8 = 0xf,
+ DBG_BLOCK_ID_DB00_BY8 = 0x10,
+ DBG_BLOCK_ID_DB10_BY8 = 0x11,
+ DBG_BLOCK_ID_TCC0_BY8 = 0x12,
+ DBG_BLOCK_ID_SPS00_BY8 = 0x13,
+ DBG_BLOCK_ID_TA00_BY8 = 0x14,
+ DBG_BLOCK_ID_TA08_BY8 = 0x15,
+ DBG_BLOCK_ID_TA10_BY8 = 0x16,
+ DBG_BLOCK_ID_TA18_BY8 = 0x17,
+ DBG_BLOCK_ID_TD00_BY8 = 0x18,
+ DBG_BLOCK_ID_TD08_BY8 = 0x19,
+ DBG_BLOCK_ID_TD10_BY8 = 0x1a,
+ DBG_BLOCK_ID_TD18_BY8 = 0x1b,
+ DBG_BLOCK_ID_MCD0_BY8 = 0x1c,
+} DebugBlockId_BY8;
+typedef enum DebugBlockId_BY16 {
+ DBG_BLOCK_ID_RESERVED_BY16 = 0x0,
+ DBG_BLOCK_ID_DMA0_BY16 = 0x1,
+ DBG_BLOCK_ID_VGT0_BY16 = 0x2,
+ DBG_BLOCK_ID_SX0_BY16 = 0x3,
+ DBG_BLOCK_ID_SCB0_BY16 = 0x4,
+ DBG_BLOCK_ID_CB00_BY16 = 0x5,
+ DBG_BLOCK_ID_TCP0_BY16 = 0x6,
+ DBG_BLOCK_ID_TCP16_BY16 = 0x7,
+ DBG_BLOCK_ID_DB00_BY16 = 0x8,
+ DBG_BLOCK_ID_TCC0_BY16 = 0x9,
+ DBG_BLOCK_ID_TA00_BY16 = 0xa,
+ DBG_BLOCK_ID_TA10_BY16 = 0xb,
+ DBG_BLOCK_ID_TD00_BY16 = 0xc,
+ DBG_BLOCK_ID_TD10_BY16 = 0xd,
+ DBG_BLOCK_ID_MCD0_BY16 = 0xe,
+} DebugBlockId_BY16;
+typedef enum ColorTransform {
+ DCC_CT_AUTO = 0x0,
+ DCC_CT_NONE = 0x1,
+ ABGR_TO_A_BG_G_RB = 0x2,
+ BGRA_TO_BG_G_RB_A = 0x3,
+} ColorTransform;
+typedef enum CompareRef {
+ REF_NEVER = 0x0,
+ REF_LESS = 0x1,
+ REF_EQUAL = 0x2,
+ REF_LEQUAL = 0x3,
+ REF_GREATER = 0x4,
+ REF_NOTEQUAL = 0x5,
+ REF_GEQUAL = 0x6,
+ REF_ALWAYS = 0x7,
+} CompareRef;
+typedef enum ReadSize {
+ READ_256_BITS = 0x0,
+ READ_512_BITS = 0x1,
+} ReadSize;
+typedef enum DepthFormat {
+ DEPTH_INVALID = 0x0,
+ DEPTH_16 = 0x1,
+ DEPTH_X8_24 = 0x2,
+ DEPTH_8_24 = 0x3,
+ DEPTH_X8_24_FLOAT = 0x4,
+ DEPTH_8_24_FLOAT = 0x5,
+ DEPTH_32_FLOAT = 0x6,
+ DEPTH_X24_8_32_FLOAT = 0x7,
+} DepthFormat;
+typedef enum ZFormat {
+ Z_INVALID = 0x0,
+ Z_16 = 0x1,
+ Z_24 = 0x2,
+ Z_32_FLOAT = 0x3,
+} ZFormat;
+typedef enum StencilFormat {
+ STENCIL_INVALID = 0x0,
+ STENCIL_8 = 0x1,
+} StencilFormat;
+typedef enum CmaskMode {
+ CMASK_CLEAR_NONE = 0x0,
+ CMASK_CLEAR_ONE = 0x1,
+ CMASK_CLEAR_ALL = 0x2,
+ CMASK_ANY_EXPANDED = 0x3,
+ CMASK_ALPHA0_FRAG1 = 0x4,
+ CMASK_ALPHA0_FRAG2 = 0x5,
+ CMASK_ALPHA0_FRAG4 = 0x6,
+ CMASK_ALPHA0_FRAGS = 0x7,
+ CMASK_ALPHA1_FRAG1 = 0x8,
+ CMASK_ALPHA1_FRAG2 = 0x9,
+ CMASK_ALPHA1_FRAG4 = 0xa,
+ CMASK_ALPHA1_FRAGS = 0xb,
+ CMASK_ALPHAX_FRAG1 = 0xc,
+ CMASK_ALPHAX_FRAG2 = 0xd,
+ CMASK_ALPHAX_FRAG4 = 0xe,
+ CMASK_ALPHAX_FRAGS = 0xf,
+} CmaskMode;
+typedef enum QuadExportFormat {
+ EXPORT_UNUSED = 0x0,
+ EXPORT_32_R = 0x1,
+ EXPORT_32_GR = 0x2,
+ EXPORT_32_AR = 0x3,
+ EXPORT_FP16_ABGR = 0x4,
+ EXPORT_UNSIGNED16_ABGR = 0x5,
+ EXPORT_SIGNED16_ABGR = 0x6,
+ EXPORT_32_ABGR = 0x7,
+} QuadExportFormat;
+typedef enum QuadExportFormatOld {
+ EXPORT_4P_32BPC_ABGR = 0x0,
+ EXPORT_4P_16BPC_ABGR = 0x1,
+ EXPORT_4P_32BPC_GR = 0x2,
+ EXPORT_4P_32BPC_AR = 0x3,
+ EXPORT_2P_32BPC_ABGR = 0x4,
+ EXPORT_8P_32BPC_R = 0x5,
+} QuadExportFormatOld;
+typedef enum ColorFormat {
+ COLOR_INVALID = 0x0,
+ COLOR_8 = 0x1,
+ COLOR_16 = 0x2,
+ COLOR_8_8 = 0x3,
+ COLOR_32 = 0x4,
+ COLOR_16_16 = 0x5,
+ COLOR_10_11_11 = 0x6,
+ COLOR_11_11_10 = 0x7,
+ COLOR_10_10_10_2 = 0x8,
+ COLOR_2_10_10_10 = 0x9,
+ COLOR_8_8_8_8 = 0xa,
+ COLOR_32_32 = 0xb,
+ COLOR_16_16_16_16 = 0xc,
+ COLOR_RESERVED_13 = 0xd,
+ COLOR_32_32_32_32 = 0xe,
+ COLOR_RESERVED_15 = 0xf,
+ COLOR_5_6_5 = 0x10,
+ COLOR_1_5_5_5 = 0x11,
+ COLOR_5_5_5_1 = 0x12,
+ COLOR_4_4_4_4 = 0x13,
+ COLOR_8_24 = 0x14,
+ COLOR_24_8 = 0x15,
+ COLOR_X24_8_32_FLOAT = 0x16,
+ COLOR_RESERVED_23 = 0x17,
+} ColorFormat;
+typedef enum SurfaceFormat {
+ FMT_INVALID = 0x0,
+ FMT_8 = 0x1,
+ FMT_16 = 0x2,
+ FMT_8_8 = 0x3,
+ FMT_32 = 0x4,
+ FMT_16_16 = 0x5,
+ FMT_10_11_11 = 0x6,
+ FMT_11_11_10 = 0x7,
+ FMT_10_10_10_2 = 0x8,
+ FMT_2_10_10_10 = 0x9,
+ FMT_8_8_8_8 = 0xa,
+ FMT_32_32 = 0xb,
+ FMT_16_16_16_16 = 0xc,
+ FMT_32_32_32 = 0xd,
+ FMT_32_32_32_32 = 0xe,
+ FMT_RESERVED_4 = 0xf,
+ FMT_5_6_5 = 0x10,
+ FMT_1_5_5_5 = 0x11,
+ FMT_5_5_5_1 = 0x12,
+ FMT_4_4_4_4 = 0x13,
+ FMT_8_24 = 0x14,
+ FMT_24_8 = 0x15,
+ FMT_X24_8_32_FLOAT = 0x16,
+ FMT_RESERVED_33 = 0x17,
+ FMT_11_11_10_FLOAT = 0x18,
+ FMT_16_FLOAT = 0x19,
+ FMT_32_FLOAT = 0x1a,
+ FMT_16_16_FLOAT = 0x1b,
+ FMT_8_24_FLOAT = 0x1c,
+ FMT_24_8_FLOAT = 0x1d,
+ FMT_32_32_FLOAT = 0x1e,
+ FMT_10_11_11_FLOAT = 0x1f,
+ FMT_16_16_16_16_FLOAT = 0x20,
+ FMT_3_3_2 = 0x21,
+ FMT_6_5_5 = 0x22,
+ FMT_32_32_32_32_FLOAT = 0x23,
+ FMT_RESERVED_36 = 0x24,
+ FMT_1 = 0x25,
+ FMT_1_REVERSED = 0x26,
+ FMT_GB_GR = 0x27,
+ FMT_BG_RG = 0x28,
+ FMT_32_AS_8 = 0x29,
+ FMT_32_AS_8_8 = 0x2a,
+ FMT_5_9_9_9_SHAREDEXP = 0x2b,
+ FMT_8_8_8 = 0x2c,
+ FMT_16_16_16 = 0x2d,
+ FMT_16_16_16_FLOAT = 0x2e,
+ FMT_4_4 = 0x2f,
+ FMT_32_32_32_FLOAT = 0x30,
+ FMT_BC1 = 0x31,
+ FMT_BC2 = 0x32,
+ FMT_BC3 = 0x33,
+ FMT_BC4 = 0x34,
+ FMT_BC5 = 0x35,
+ FMT_BC6 = 0x36,
+ FMT_BC7 = 0x37,
+ FMT_32_AS_32_32_32_32 = 0x38,
+ FMT_APC3 = 0x39,
+ FMT_APC4 = 0x3a,
+ FMT_APC5 = 0x3b,
+ FMT_APC6 = 0x3c,
+ FMT_APC7 = 0x3d,
+ FMT_CTX1 = 0x3e,
+ FMT_RESERVED_63 = 0x3f,
+} SurfaceFormat;
+typedef enum BUF_DATA_FORMAT {
+ BUF_DATA_FORMAT_INVALID = 0x0,
+ BUF_DATA_FORMAT_8 = 0x1,
+ BUF_DATA_FORMAT_16 = 0x2,
+ BUF_DATA_FORMAT_8_8 = 0x3,
+ BUF_DATA_FORMAT_32 = 0x4,
+ BUF_DATA_FORMAT_16_16 = 0x5,
+ BUF_DATA_FORMAT_10_11_11 = 0x6,
+ BUF_DATA_FORMAT_11_11_10 = 0x7,
+ BUF_DATA_FORMAT_10_10_10_2 = 0x8,
+ BUF_DATA_FORMAT_2_10_10_10 = 0x9,
+ BUF_DATA_FORMAT_8_8_8_8 = 0xa,
+ BUF_DATA_FORMAT_32_32 = 0xb,
+ BUF_DATA_FORMAT_16_16_16_16 = 0xc,
+ BUF_DATA_FORMAT_32_32_32 = 0xd,
+ BUF_DATA_FORMAT_32_32_32_32 = 0xe,
+ BUF_DATA_FORMAT_RESERVED_15 = 0xf,
+} BUF_DATA_FORMAT;
+typedef enum IMG_DATA_FORMAT {
+ IMG_DATA_FORMAT_INVALID = 0x0,
+ IMG_DATA_FORMAT_8 = 0x1,
+ IMG_DATA_FORMAT_16 = 0x2,
+ IMG_DATA_FORMAT_8_8 = 0x3,
+ IMG_DATA_FORMAT_32 = 0x4,
+ IMG_DATA_FORMAT_16_16 = 0x5,
+ IMG_DATA_FORMAT_10_11_11 = 0x6,
+ IMG_DATA_FORMAT_11_11_10 = 0x7,
+ IMG_DATA_FORMAT_10_10_10_2 = 0x8,
+ IMG_DATA_FORMAT_2_10_10_10 = 0x9,
+ IMG_DATA_FORMAT_8_8_8_8 = 0xa,
+ IMG_DATA_FORMAT_32_32 = 0xb,
+ IMG_DATA_FORMAT_16_16_16_16 = 0xc,
+ IMG_DATA_FORMAT_32_32_32 = 0xd,
+ IMG_DATA_FORMAT_32_32_32_32 = 0xe,
+ IMG_DATA_FORMAT_RESERVED_15 = 0xf,
+ IMG_DATA_FORMAT_5_6_5 = 0x10,
+ IMG_DATA_FORMAT_1_5_5_5 = 0x11,
+ IMG_DATA_FORMAT_5_5_5_1 = 0x12,
+ IMG_DATA_FORMAT_4_4_4_4 = 0x13,
+ IMG_DATA_FORMAT_8_24 = 0x14,
+ IMG_DATA_FORMAT_24_8 = 0x15,
+ IMG_DATA_FORMAT_X24_8_32 = 0x16,
+ IMG_DATA_FORMAT_RESERVED_23 = 0x17,
+ IMG_DATA_FORMAT_RESERVED_24 = 0x18,
+ IMG_DATA_FORMAT_RESERVED_25 = 0x19,
+ IMG_DATA_FORMAT_RESERVED_26 = 0x1a,
+ IMG_DATA_FORMAT_RESERVED_27 = 0x1b,
+ IMG_DATA_FORMAT_RESERVED_28 = 0x1c,
+ IMG_DATA_FORMAT_RESERVED_29 = 0x1d,
+ IMG_DATA_FORMAT_RESERVED_30 = 0x1e,
+ IMG_DATA_FORMAT_RESERVED_31 = 0x1f,
+ IMG_DATA_FORMAT_GB_GR = 0x20,
+ IMG_DATA_FORMAT_BG_RG = 0x21,
+ IMG_DATA_FORMAT_5_9_9_9 = 0x22,
+ IMG_DATA_FORMAT_BC1 = 0x23,
+ IMG_DATA_FORMAT_BC2 = 0x24,
+ IMG_DATA_FORMAT_BC3 = 0x25,
+ IMG_DATA_FORMAT_BC4 = 0x26,
+ IMG_DATA_FORMAT_BC5 = 0x27,
+ IMG_DATA_FORMAT_BC6 = 0x28,
+ IMG_DATA_FORMAT_BC7 = 0x29,
+ IMG_DATA_FORMAT_RESERVED_42 = 0x2a,
+ IMG_DATA_FORMAT_RESERVED_43 = 0x2b,
+ IMG_DATA_FORMAT_FMASK8_S2_F1 = 0x2c,
+ IMG_DATA_FORMAT_FMASK8_S4_F1 = 0x2d,
+ IMG_DATA_FORMAT_FMASK8_S8_F1 = 0x2e,
+ IMG_DATA_FORMAT_FMASK8_S2_F2 = 0x2f,
+ IMG_DATA_FORMAT_FMASK8_S4_F2 = 0x30,
+ IMG_DATA_FORMAT_FMASK8_S4_F4 = 0x31,
+ IMG_DATA_FORMAT_FMASK16_S16_F1 = 0x32,
+ IMG_DATA_FORMAT_FMASK16_S8_F2 = 0x33,
+ IMG_DATA_FORMAT_FMASK32_S16_F2 = 0x34,
+ IMG_DATA_FORMAT_FMASK32_S8_F4 = 0x35,
+ IMG_DATA_FORMAT_FMASK32_S8_F8 = 0x36,
+ IMG_DATA_FORMAT_FMASK64_S16_F4 = 0x37,
+ IMG_DATA_FORMAT_FMASK64_S16_F8 = 0x38,
+ IMG_DATA_FORMAT_4_4 = 0x39,
+ IMG_DATA_FORMAT_6_5_5 = 0x3a,
+ IMG_DATA_FORMAT_1 = 0x3b,
+ IMG_DATA_FORMAT_1_REVERSED = 0x3c,
+ IMG_DATA_FORMAT_32_AS_8 = 0x3d,
+ IMG_DATA_FORMAT_32_AS_8_8 = 0x3e,
+ IMG_DATA_FORMAT_32_AS_32_32_32_32 = 0x3f,
+} IMG_DATA_FORMAT;
+typedef enum BUF_NUM_FORMAT {
+ BUF_NUM_FORMAT_UNORM = 0x0,
+ BUF_NUM_FORMAT_SNORM = 0x1,
+ BUF_NUM_FORMAT_USCALED = 0x2,
+ BUF_NUM_FORMAT_SSCALED = 0x3,
+ BUF_NUM_FORMAT_UINT = 0x4,
+ BUF_NUM_FORMAT_SINT = 0x5,
+ BUF_NUM_FORMAT_RESERVED_6 = 0x6,
+ BUF_NUM_FORMAT_FLOAT = 0x7,
+} BUF_NUM_FORMAT;
+typedef enum IMG_NUM_FORMAT {
+ IMG_NUM_FORMAT_UNORM = 0x0,
+ IMG_NUM_FORMAT_SNORM = 0x1,
+ IMG_NUM_FORMAT_USCALED = 0x2,
+ IMG_NUM_FORMAT_SSCALED = 0x3,
+ IMG_NUM_FORMAT_UINT = 0x4,
+ IMG_NUM_FORMAT_SINT = 0x5,
+ IMG_NUM_FORMAT_RESERVED_6 = 0x6,
+ IMG_NUM_FORMAT_FLOAT = 0x7,
+ IMG_NUM_FORMAT_RESERVED_8 = 0x8,
+ IMG_NUM_FORMAT_SRGB = 0x9,
+ IMG_NUM_FORMAT_RESERVED_10 = 0xa,
+ IMG_NUM_FORMAT_RESERVED_11 = 0xb,
+ IMG_NUM_FORMAT_RESERVED_12 = 0xc,
+ IMG_NUM_FORMAT_RESERVED_13 = 0xd,
+ IMG_NUM_FORMAT_RESERVED_14 = 0xe,
+ IMG_NUM_FORMAT_RESERVED_15 = 0xf,
+} IMG_NUM_FORMAT;
+typedef enum TileType {
+ ARRAY_COLOR_TILE = 0x0,
+ ARRAY_DEPTH_TILE = 0x1,
+} TileType;
+typedef enum NonDispTilingOrder {
+ ADDR_SURF_MICRO_TILING_DISPLAY = 0x0,
+ ADDR_SURF_MICRO_TILING_NON_DISPLAY = 0x1,
+} NonDispTilingOrder;
+typedef enum MicroTileMode {
+ ADDR_SURF_DISPLAY_MICRO_TILING = 0x0,
+ ADDR_SURF_THIN_MICRO_TILING = 0x1,
+ ADDR_SURF_DEPTH_MICRO_TILING = 0x2,
+ ADDR_SURF_ROTATED_MICRO_TILING = 0x3,
+ ADDR_SURF_THICK_MICRO_TILING = 0x4,
+} MicroTileMode;
+typedef enum TileSplit {
+ ADDR_SURF_TILE_SPLIT_64B = 0x0,
+ ADDR_SURF_TILE_SPLIT_128B = 0x1,
+ ADDR_SURF_TILE_SPLIT_256B = 0x2,
+ ADDR_SURF_TILE_SPLIT_512B = 0x3,
+ ADDR_SURF_TILE_SPLIT_1KB = 0x4,
+ ADDR_SURF_TILE_SPLIT_2KB = 0x5,
+ ADDR_SURF_TILE_SPLIT_4KB = 0x6,
+} TileSplit;
+typedef enum SampleSplit {
+ ADDR_SURF_SAMPLE_SPLIT_1 = 0x0,
+ ADDR_SURF_SAMPLE_SPLIT_2 = 0x1,
+ ADDR_SURF_SAMPLE_SPLIT_4 = 0x2,
+ ADDR_SURF_SAMPLE_SPLIT_8 = 0x3,
+} SampleSplit;
+typedef enum PipeConfig {
+ ADDR_SURF_P2 = 0x0,
+ ADDR_SURF_P2_RESERVED0 = 0x1,
+ ADDR_SURF_P2_RESERVED1 = 0x2,
+ ADDR_SURF_P2_RESERVED2 = 0x3,
+ ADDR_SURF_P4_8x16 = 0x4,
+ ADDR_SURF_P4_16x16 = 0x5,
+ ADDR_SURF_P4_16x32 = 0x6,
+ ADDR_SURF_P4_32x32 = 0x7,
+ ADDR_SURF_P8_16x16_8x16 = 0x8,
+ ADDR_SURF_P8_16x32_8x16 = 0x9,
+ ADDR_SURF_P8_32x32_8x16 = 0xa,
+ ADDR_SURF_P8_16x32_16x16 = 0xb,
+ ADDR_SURF_P8_32x32_16x16 = 0xc,
+ ADDR_SURF_P8_32x32_16x32 = 0xd,
+ ADDR_SURF_P8_32x64_32x32 = 0xe,
+ ADDR_SURF_P8_RESERVED0 = 0xf,
+ ADDR_SURF_P16_32x32_8x16 = 0x10,
+ ADDR_SURF_P16_32x32_16x16 = 0x11,
+} PipeConfig;
+typedef enum NumBanks {
+ ADDR_SURF_2_BANK = 0x0,
+ ADDR_SURF_4_BANK = 0x1,
+ ADDR_SURF_8_BANK = 0x2,
+ ADDR_SURF_16_BANK = 0x3,
+} NumBanks;
+typedef enum BankWidth {
+ ADDR_SURF_BANK_WIDTH_1 = 0x0,
+ ADDR_SURF_BANK_WIDTH_2 = 0x1,
+ ADDR_SURF_BANK_WIDTH_4 = 0x2,
+ ADDR_SURF_BANK_WIDTH_8 = 0x3,
+} BankWidth;
+typedef enum BankHeight {
+ ADDR_SURF_BANK_HEIGHT_1 = 0x0,
+ ADDR_SURF_BANK_HEIGHT_2 = 0x1,
+ ADDR_SURF_BANK_HEIGHT_4 = 0x2,
+ ADDR_SURF_BANK_HEIGHT_8 = 0x3,
+} BankHeight;
+typedef enum BankWidthHeight {
+ ADDR_SURF_BANK_WH_1 = 0x0,
+ ADDR_SURF_BANK_WH_2 = 0x1,
+ ADDR_SURF_BANK_WH_4 = 0x2,
+ ADDR_SURF_BANK_WH_8 = 0x3,
+} BankWidthHeight;
+typedef enum MacroTileAspect {
+ ADDR_SURF_MACRO_ASPECT_1 = 0x0,
+ ADDR_SURF_MACRO_ASPECT_2 = 0x1,
+ ADDR_SURF_MACRO_ASPECT_4 = 0x2,
+ ADDR_SURF_MACRO_ASPECT_8 = 0x3,
+} MacroTileAspect;
+typedef enum GATCL1RequestType {
+ GATCL1_TYPE_NORMAL = 0x0,
+ GATCL1_TYPE_SHOOTDOWN = 0x1,
+ GATCL1_TYPE_BYPASS = 0x2,
+} GATCL1RequestType;
+typedef enum TCC_CACHE_POLICIES {
+ TCC_CACHE_POLICY_LRU = 0x0,
+ TCC_CACHE_POLICY_STREAM = 0x1,
+} TCC_CACHE_POLICIES;
+typedef enum MTYPE {
+ MTYPE_NC_NV = 0x0,
+ MTYPE_NC = 0x1,
+ MTYPE_CC = 0x2,
+ MTYPE_UC = 0x3,
+} MTYPE;
+typedef enum PERFMON_COUNTER_MODE {
+ PERFMON_COUNTER_MODE_ACCUM = 0x0,
+ PERFMON_COUNTER_MODE_ACTIVE_CYCLES = 0x1,
+ PERFMON_COUNTER_MODE_MAX = 0x2,
+ PERFMON_COUNTER_MODE_DIRTY = 0x3,
+ PERFMON_COUNTER_MODE_SAMPLE = 0x4,
+ PERFMON_COUNTER_MODE_CYCLES_SINCE_FIRST_EVENT = 0x5,
+ PERFMON_COUNTER_MODE_CYCLES_SINCE_LAST_EVENT = 0x6,
+ PERFMON_COUNTER_MODE_CYCLES_GE_HI = 0x7,
+ PERFMON_COUNTER_MODE_CYCLES_EQ_HI = 0x8,
+ PERFMON_COUNTER_MODE_INACTIVE_CYCLES = 0x9,
+ PERFMON_COUNTER_MODE_RESERVED = 0xf,
+} PERFMON_COUNTER_MODE;
+typedef enum PERFMON_SPM_MODE {
+ PERFMON_SPM_MODE_OFF = 0x0,
+ PERFMON_SPM_MODE_16BIT_CLAMP = 0x1,
+ PERFMON_SPM_MODE_16BIT_NO_CLAMP = 0x2,
+ PERFMON_SPM_MODE_32BIT_CLAMP = 0x3,
+ PERFMON_SPM_MODE_32BIT_NO_CLAMP = 0x4,
+ PERFMON_SPM_MODE_RESERVED_5 = 0x5,
+ PERFMON_SPM_MODE_RESERVED_6 = 0x6,
+ PERFMON_SPM_MODE_RESERVED_7 = 0x7,
+ PERFMON_SPM_MODE_TEST_MODE_0 = 0x8,
+ PERFMON_SPM_MODE_TEST_MODE_1 = 0x9,
+ PERFMON_SPM_MODE_TEST_MODE_2 = 0xa,
+} PERFMON_SPM_MODE;
+typedef enum SurfaceTiling {
+ ARRAY_LINEAR = 0x0,
+ ARRAY_TILED = 0x1,
+} SurfaceTiling;
+typedef enum SurfaceArray {
+ ARRAY_1D = 0x0,
+ ARRAY_2D = 0x1,
+ ARRAY_3D = 0x2,
+ ARRAY_3D_SLICE = 0x3,
+} SurfaceArray;
+typedef enum ColorArray {
+ ARRAY_2D_ALT_COLOR = 0x0,
+ ARRAY_2D_COLOR = 0x1,
+ ARRAY_3D_SLICE_COLOR = 0x3,
+} ColorArray;
+typedef enum DepthArray {
+ ARRAY_2D_ALT_DEPTH = 0x0,
+ ARRAY_2D_DEPTH = 0x1,
+} DepthArray;
+typedef enum ENUM_NUM_SIMD_PER_CU {
+ NUM_SIMD_PER_CU = 0x4,
+} ENUM_NUM_SIMD_PER_CU;
+typedef enum MEM_PWR_FORCE_CTRL {
+ NO_FORCE_REQUEST = 0x0,
+ FORCE_LIGHT_SLEEP_REQUEST = 0x1,
+ FORCE_DEEP_SLEEP_REQUEST = 0x2,
+ FORCE_SHUT_DOWN_REQUEST = 0x3,
+} MEM_PWR_FORCE_CTRL;
+typedef enum MEM_PWR_FORCE_CTRL2 {
+ NO_FORCE_REQ = 0x0,
+ FORCE_LIGHT_SLEEP_REQ = 0x1,
+} MEM_PWR_FORCE_CTRL2;
+typedef enum MEM_PWR_DIS_CTRL {
+ ENABLE_MEM_PWR_CTRL = 0x0,
+ DISABLE_MEM_PWR_CTRL = 0x1,
+} MEM_PWR_DIS_CTRL;
+typedef enum MEM_PWR_SEL_CTRL {
+ DYNAMIC_SHUT_DOWN_ENABLE = 0x0,
+ DYNAMIC_DEEP_SLEEP_ENABLE = 0x1,
+ DYNAMIC_LIGHT_SLEEP_ENABLE = 0x2,
+} MEM_PWR_SEL_CTRL;
+typedef enum MEM_PWR_SEL_CTRL2 {
+ DYNAMIC_DEEP_SLEEP_EN = 0x0,
+ DYNAMIC_LIGHT_SLEEP_EN = 0x1,
+} MEM_PWR_SEL_CTRL2;
+
+#endif /* SMU_7_1_3_ENUM_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
new file mode 100644
index 000000000000..1ede9e274714
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
@@ -0,0 +1,6080 @@
+/*
+ * SMU_7_1_3 Register documentation
+ *
+ * Copyright (C) 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SMU_7_1_3_SH_MASK_H
+#define SMU_7_1_3_SH_MASK_H
+
+#define GCK_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff
+#define GCK_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0
+#define GCK_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff
+#define GCK_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0
+#define GCK_MCLK_FUSES__StartupMClkDid_MASK 0x7f
+#define GCK_MCLK_FUSES__StartupMClkDid__SHIFT 0x0
+#define GCK_MCLK_FUSES__MClkADCA_MASK 0x780
+#define GCK_MCLK_FUSES__MClkADCA__SHIFT 0x7
+#define GCK_MCLK_FUSES__MClkDDCA_MASK 0x1800
+#define GCK_MCLK_FUSES__MClkDDCA__SHIFT 0xb
+#define GCK_MCLK_FUSES__MClkDiDtWait_MASK 0xe000
+#define GCK_MCLK_FUSES__MClkDiDtWait__SHIFT 0xd
+#define GCK_MCLK_FUSES__MClkDiDtFloor_MASK 0x30000
+#define GCK_MCLK_FUSES__MClkDiDtFloor__SHIFT 0x10
+#define CG_DCLK_CNTL__DCLK_DIVIDER_MASK 0x7f
+#define CG_DCLK_CNTL__DCLK_DIVIDER__SHIFT 0x0
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK 0x100
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_EN__SHIFT 0x8
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_TOG_MASK 0x200
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_TOG__SHIFT 0x9
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_DIVIDER__SHIFT 0xa
+#define CG_DCLK_STATUS__DCLK_STATUS_MASK 0x1
+#define CG_DCLK_STATUS__DCLK_STATUS__SHIFT 0x0
+#define CG_DCLK_STATUS__DCLK_DIR_CNTL_DONETOG_MASK 0x2
+#define CG_DCLK_STATUS__DCLK_DIR_CNTL_DONETOG__SHIFT 0x1
+#define CG_VCLK_CNTL__VCLK_DIVIDER_MASK 0x7f
+#define CG_VCLK_CNTL__VCLK_DIVIDER__SHIFT 0x0
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_EN_MASK 0x100
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_EN__SHIFT 0x8
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_TOG_MASK 0x200
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_TOG__SHIFT 0x9
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_DIVIDER__SHIFT 0xa
+#define CG_VCLK_STATUS__VCLK_STATUS_MASK 0x1
+#define CG_VCLK_STATUS__VCLK_STATUS__SHIFT 0x0
+#define CG_VCLK_STATUS__VCLK_DIR_CNTL_DONETOG_MASK 0x2
+#define CG_VCLK_STATUS__VCLK_DIR_CNTL_DONETOG__SHIFT 0x1
+#define CG_ECLK_CNTL__ECLK_DIVIDER_MASK 0x7f
+#define CG_ECLK_CNTL__ECLK_DIVIDER__SHIFT 0x0
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK 0x100
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_EN__SHIFT 0x8
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_TOG_MASK 0x200
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_TOG__SHIFT 0x9
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_DIVIDER__SHIFT 0xa
+#define CG_ECLK_STATUS__ECLK_STATUS_MASK 0x1
+#define CG_ECLK_STATUS__ECLK_STATUS__SHIFT 0x0
+#define CG_ECLK_STATUS__ECLK_DIR_CNTL_DONETOG_MASK 0x2
+#define CG_ECLK_STATUS__ECLK_DIR_CNTL_DONETOG__SHIFT 0x1
+#define CG_ACLK_CNTL__ACLK_DIVIDER_MASK 0x7f
+#define CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT 0x0
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_EN_MASK 0x100
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_EN__SHIFT 0x8
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_TOG_MASK 0x200
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_TOG__SHIFT 0x9
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_DIVIDER__SHIFT 0xa
+#define CG_MCLK_CNTL__MCLK_DIVIDER_MASK 0x7f
+#define CG_MCLK_CNTL__MCLK_DIVIDER__SHIFT 0x0
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_EN_MASK 0x100
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_EN__SHIFT 0x8
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_TOG_MASK 0x200
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_TOG__SHIFT 0x9
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_DIVIDER__SHIFT 0xa
+#define CG_MCLK_STATUS__MCLK_STATUS_MASK 0x1
+#define CG_MCLK_STATUS__MCLK_STATUS__SHIFT 0x0
+#define CG_MCLK_STATUS__MCLK_DIR_CNTL_DONETOG_MASK 0x2
+#define CG_MCLK_STATUS__MCLK_DIR_CNTL_DONETOG__SHIFT 0x1
+#define GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK 0x1
+#define GCK_DFS_BYPASS_CNTL__BYPASSECLK__SHIFT 0x0
+#define GCK_DFS_BYPASS_CNTL__BYPASSLCLK_MASK 0x2
+#define GCK_DFS_BYPASS_CNTL__BYPASSLCLK__SHIFT 0x1
+#define GCK_DFS_BYPASS_CNTL__BYPASSEVCLK_MASK 0x4
+#define GCK_DFS_BYPASS_CNTL__BYPASSEVCLK__SHIFT 0x2
+#define GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK 0x8
+#define GCK_DFS_BYPASS_CNTL__BYPASSDCLK__SHIFT 0x3
+#define GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK 0x10
+#define GCK_DFS_BYPASS_CNTL__BYPASSVCLK__SHIFT 0x4
+#define GCK_DFS_BYPASS_CNTL__BYPASSDISPCLK_MASK 0x20
+#define GCK_DFS_BYPASS_CNTL__BYPASSDISPCLK__SHIFT 0x5
+#define GCK_DFS_BYPASS_CNTL__BYPASSDPREFCLK_MASK 0x40
+#define GCK_DFS_BYPASS_CNTL__BYPASSDPREFCLK__SHIFT 0x6
+#define GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK 0x80
+#define GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT 0x7
+#define GCK_DFS_BYPASS_CNTL__BYPASSADIVCLK_MASK 0x100
+#define GCK_DFS_BYPASS_CNTL__BYPASSADIVCLK__SHIFT 0x8
+#define GCK_DFS_BYPASS_CNTL__BYPASSPSPCLK_MASK 0x200
+#define GCK_DFS_BYPASS_CNTL__BYPASSPSPCLK__SHIFT 0x9
+#define GCK_DFS_BYPASS_CNTL__BYPASSSAMCLK_MASK 0x400
+#define GCK_DFS_BYPASS_CNTL__BYPASSSAMCLK__SHIFT 0xa
+#define GCK_DFS_BYPASS_CNTL__BYPASSSCLK_MASK 0x800
+#define GCK_DFS_BYPASS_CNTL__BYPASSSCLK__SHIFT 0xb
+#define GCK_DFS_BYPASS_CNTL__USE_SPLL_BYPASS_EN_MASK 0x1000
+#define GCK_DFS_BYPASS_CNTL__USE_SPLL_BYPASS_EN__SHIFT 0xc
+#define GCK_DFS_BYPASS_CNTL__BYPASSMCLK_MASK 0x2000
+#define GCK_DFS_BYPASS_CNTL__BYPASSMCLK__SHIFT 0xd
+#define CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK 0x1
+#define CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK 0x2
+#define CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT 0x1
+#define CG_SPLL_FUNC_CNTL__SPLL_DIVEN_MASK 0x4
+#define CG_SPLL_FUNC_CNTL__SPLL_DIVEN__SHIFT 0x2
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK 0x8
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT 0x3
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_THRU_DFS_MASK 0x10
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_THRU_DFS__SHIFT 0x4
+#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x7e0
+#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT 0x5
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_UPDATE_MASK 0x800
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_UPDATE__SHIFT 0xb
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_EN_MASK 0x1000
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_EN__SHIFT 0xc
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK 0x7f00000
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT 0x14
+#define CG_SPLL_FUNC_CNTL__SPLL_DIVA_ACK_MASK 0x8000000
+#define CG_SPLL_FUNC_CNTL__SPLL_DIVA_ACK__SHIFT 0x1b
+#define CG_SPLL_FUNC_CNTL__SPLL_OTEST_LOCK_EN_MASK 0x10000000
+#define CG_SPLL_FUNC_CNTL__SPLL_OTEST_LOCK_EN__SHIFT 0x1c
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK 0x1ff
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_MASK 0x800
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ__SHIFT 0xb
+#define CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK 0x400000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT 0x16
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK 0x800000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT 0x17
+#define CG_SPLL_FUNC_CNTL_2__SPLL_RESET_CHG_MASK 0x1000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_RESET_CHG__SHIFT 0x18
+#define CG_SPLL_FUNC_CNTL_2__SPLL_BABY_STEP_CHG_MASK 0x2000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_BABY_STEP_CHG__SHIFT 0x19
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE_MASK 0x4000000
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE__SHIFT 0x1a
+#define CG_SPLL_FUNC_CNTL_2__SPLL_UNLOCK_CLEAR_MASK 0x8000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_UNLOCK_CLEAR__SHIFT 0x1b
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CLKF_UPDATE_MASK 0x10000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CLKF_UPDATE__SHIFT 0x1c
+#define CG_SPLL_FUNC_CNTL_2__SPLL_TEST_UNLOCK_CLR_MASK 0x40000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_TEST_UNLOCK_CLR__SHIFT 0x1e
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x3ffffff
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK 0x10000000
+#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN__SHIFT 0x1c
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_TEST_SEL_MASK 0xf
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_TEST_SEL__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_SEL_MASK 0x60
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_SEL__SHIFT 0x5
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EN_MASK 0x180
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EN__SHIFT 0x7
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_MASK 0xe00
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE__SHIFT 0x9
+#define CG_SPLL_FUNC_CNTL_4__PCC_INC_DIV_MASK 0x7f000
+#define CG_SPLL_FUNC_CNTL_4__PCC_INC_DIV__SHIFT 0xc
+#define CG_SPLL_FUNC_CNTL_4__TEST_FRAC_BYPASS_MASK 0x200000
+#define CG_SPLL_FUNC_CNTL_4__TEST_FRAC_BYPASS__SHIFT 0x15
+#define CG_SPLL_FUNC_CNTL_4__SPLL_ILOCK_MASK 0x800000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_ILOCK__SHIFT 0x17
+#define CG_SPLL_FUNC_CNTL_4__SPLL_FBCLK_SEL_MASK 0x1000000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_FBCLK_SEL__SHIFT 0x18
+#define CG_SPLL_FUNC_CNTL_4__SPLL_VCTRLADC_EN_MASK 0x2000000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_VCTRLADC_EN__SHIFT 0x19
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_MASK 0xc000000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT__SHIFT 0x1a
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_EXT_MASK 0x70000000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_EXT__SHIFT 0x1c
+#define CG_SPLL_FUNC_CNTL_4__SPLL_VTOI_BIAS_CNTL_MASK 0x80000000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_VTOI_BIAS_CNTL__SHIFT 0x1f
+#define CG_SPLL_FUNC_CNTL_5__FBDIV_SSC_BYPASS_MASK 0x1
+#define CG_SPLL_FUNC_CNTL_5__FBDIV_SSC_BYPASS__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL_5__RISEFBVCO_EN_MASK 0x2
+#define CG_SPLL_FUNC_CNTL_5__RISEFBVCO_EN__SHIFT 0x1
+#define CG_SPLL_FUNC_CNTL_5__PFD_RESET_CNTRL_MASK 0xc
+#define CG_SPLL_FUNC_CNTL_5__PFD_RESET_CNTRL__SHIFT 0x2
+#define CG_SPLL_FUNC_CNTL_5__RESET_TIMER_MASK 0x30
+#define CG_SPLL_FUNC_CNTL_5__RESET_TIMER__SHIFT 0x4
+#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_CNTRL_MASK 0xc0
+#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_CNTRL__SHIFT 0x6
+#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_EN_MASK 0x100
+#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_EN__SHIFT 0x8
+#define CG_SPLL_FUNC_CNTL_5__RESET_ANTI_MUX_MASK 0x200
+#define CG_SPLL_FUNC_CNTL_5__RESET_ANTI_MUX__SHIFT 0x9
+#define CG_SPLL_FUNC_CNTL_6__SCLKMUX0_CLKOFF_CNT_MASK 0xff
+#define CG_SPLL_FUNC_CNTL_6__SCLKMUX0_CLKOFF_CNT__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL_6__SCLKMUX1_CLKOFF_CNT_MASK 0xff00
+#define CG_SPLL_FUNC_CNTL_6__SCLKMUX1_CLKOFF_CNT__SHIFT 0x8
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_EN_MASK 0x10000
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_EN__SHIFT 0x10
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_IN_MASK 0x1e0000
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_IN__SHIFT 0x11
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_OUT_MASK 0x1e00000
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_OUT__SHIFT 0x15
+#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR_MASK 0xfe000000
+#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19
+#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff
+#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0
+#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1
+#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0
+#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2
+#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV__SHIFT 0x1
+#define SPLL_CNTL_MODE__SPLL_TEST_MASK 0x4
+#define SPLL_CNTL_MODE__SPLL_TEST__SHIFT 0x2
+#define SPLL_CNTL_MODE__SPLL_FASTEN_MASK 0x8
+#define SPLL_CNTL_MODE__SPLL_FASTEN__SHIFT 0x3
+#define SPLL_CNTL_MODE__SPLL_ENSAT_MASK 0x10
+#define SPLL_CNTL_MODE__SPLL_ENSAT__SHIFT 0x4
+#define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV_MASK 0xc00
+#define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV__SHIFT 0xa
+#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT_MASK 0xff000
+#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT__SHIFT 0xc
+#define SPLL_CNTL_MODE__SPLL_RESET_EN_MASK 0x10000000
+#define SPLL_CNTL_MODE__SPLL_RESET_EN__SHIFT 0x1c
+#define SPLL_CNTL_MODE__SPLL_VCO_MODE_MASK 0x60000000
+#define SPLL_CNTL_MODE__SPLL_VCO_MODE__SHIFT 0x1d
+#define CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK 0x1
+#define CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT 0x0
+#define CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK 0xfff0
+#define CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT 0x4
+#define CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK 0x3ffffff
+#define CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT 0x0
+#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK 0xff00
+#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT 0x8
+#define CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK 0x2
+#define CG_CLKPIN_CNTL__XTALIN_DIVIDE__SHIFT 0x1
+#define CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK 0x4
+#define CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT 0x2
+#define CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK 0x1
+#define CG_CLKPIN_CNTL_2__ENABLE_XCLK__SHIFT 0x0
+#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK 0x8
+#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT 0x3
+#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK_MASK 0x100
+#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK__SHIFT 0x8
+#define CG_CLKPIN_CNTL_2__XO_IN_OSCIN_EN_MASK 0x4000
+#define CG_CLKPIN_CNTL_2__XO_IN_OSCIN_EN__SHIFT 0xe
+#define CG_CLKPIN_CNTL_2__XO_IN_ICORE_CLK_OE_MASK 0x8000
+#define CG_CLKPIN_CNTL_2__XO_IN_ICORE_CLK_OE__SHIFT 0xf
+#define CG_CLKPIN_CNTL_2__XO_IN_CML_RXEN_MASK 0x10000
+#define CG_CLKPIN_CNTL_2__XO_IN_CML_RXEN__SHIFT 0x10
+#define CG_CLKPIN_CNTL_2__XO_IN_BIDIR_CML_OE_MASK 0x20000
+#define CG_CLKPIN_CNTL_2__XO_IN_BIDIR_CML_OE__SHIFT 0x11
+#define CG_CLKPIN_CNTL_2__XO_IN2_OSCIN_EN_MASK 0x40000
+#define CG_CLKPIN_CNTL_2__XO_IN2_OSCIN_EN__SHIFT 0x12
+#define CG_CLKPIN_CNTL_2__XO_IN2_ICORE_CLK_OE_MASK 0x80000
+#define CG_CLKPIN_CNTL_2__XO_IN2_ICORE_CLK_OE__SHIFT 0x13
+#define CG_CLKPIN_CNTL_2__XO_IN2_CML_RXEN_MASK 0x100000
+#define CG_CLKPIN_CNTL_2__XO_IN2_CML_RXEN__SHIFT 0x14
+#define CG_CLKPIN_CNTL_2__XO_IN2_BIDIR_CML_OE_MASK 0x200000
+#define CG_CLKPIN_CNTL_2__XO_IN2_BIDIR_CML_OE__SHIFT 0x15
+#define CG_CLKPIN_CNTL_2__CML_CTRL_MASK 0xc00000
+#define CG_CLKPIN_CNTL_2__CML_CTRL__SHIFT 0x16
+#define CG_CLKPIN_CNTL_2__CLK_SPARE_MASK 0xff000000
+#define CG_CLKPIN_CNTL_2__CLK_SPARE__SHIFT 0x18
+#define CG_CLKPIN_CNTL_DC__OSC_EN_MASK 0x1
+#define CG_CLKPIN_CNTL_DC__OSC_EN__SHIFT 0x0
+#define CG_CLKPIN_CNTL_DC__XTL_LOW_GAIN_MASK 0x6
+#define CG_CLKPIN_CNTL_DC__XTL_LOW_GAIN__SHIFT 0x1
+#define CG_CLKPIN_CNTL_DC__XTL_XOCLK_DRV_R_EN_MASK 0x200
+#define CG_CLKPIN_CNTL_DC__XTL_XOCLK_DRV_R_EN__SHIFT 0x9
+#define CG_CLKPIN_CNTL_DC__XTALIN_SEL_MASK 0x1c00
+#define CG_CLKPIN_CNTL_DC__XTALIN_SEL__SHIFT 0xa
+#define THM_CLK_CNTL__CMON_CLK_SEL_MASK 0xff
+#define THM_CLK_CNTL__CMON_CLK_SEL__SHIFT 0x0
+#define THM_CLK_CNTL__TMON_CLK_SEL_MASK 0xff00
+#define THM_CLK_CNTL__TMON_CLK_SEL__SHIFT 0x8
+#define THM_CLK_CNTL__CTF_CLK_SHUTOFF_EN_MASK 0x10000
+#define THM_CLK_CNTL__CTF_CLK_SHUTOFF_EN__SHIFT 0x10
+#define MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK 0xff
+#define MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT 0x0
+#define MISC_CLK_CTRL__ZCLK_SEL_MASK 0xff00
+#define MISC_CLK_CTRL__ZCLK_SEL__SHIFT 0x8
+#define MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK 0xff0000
+#define MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT 0x10
+#define GCK_PLL_TEST_CNTL__TST_SRC_SEL_MASK 0x1f
+#define GCK_PLL_TEST_CNTL__TST_SRC_SEL__SHIFT 0x0
+#define GCK_PLL_TEST_CNTL__TST_REF_SEL_MASK 0x3e0
+#define GCK_PLL_TEST_CNTL__TST_REF_SEL__SHIFT 0x5
+#define GCK_PLL_TEST_CNTL__REF_TEST_COUNT_MASK 0x1fc00
+#define GCK_PLL_TEST_CNTL__REF_TEST_COUNT__SHIFT 0xa
+#define GCK_PLL_TEST_CNTL__TST_RESET_MASK 0x20000
+#define GCK_PLL_TEST_CNTL__TST_RESET__SHIFT 0x11
+#define GCK_PLL_TEST_CNTL__TST_CLK_SEL_MODE_MASK 0x40000
+#define GCK_PLL_TEST_CNTL__TST_CLK_SEL_MODE__SHIFT 0x12
+#define GCK_PLL_TEST_CNTL_2__TEST_COUNT_MASK 0xfffe0000
+#define GCK_PLL_TEST_CNTL_2__TEST_COUNT__SHIFT 0x11
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ECLK_BYPASS_CNTL_MASK 0x7
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ECLK_BYPASS_CNTL__SHIFT 0x0
+#define GCK_ADFS_CLK_BYPASS_CNTL1__SCLK_BYPASS_CNTL_MASK 0x38
+#define GCK_ADFS_CLK_BYPASS_CNTL1__SCLK_BYPASS_CNTL__SHIFT 0x3
+#define GCK_ADFS_CLK_BYPASS_CNTL1__LCLK_BYPASS_CNTL_MASK 0x1c0
+#define GCK_ADFS_CLK_BYPASS_CNTL1__LCLK_BYPASS_CNTL__SHIFT 0x6
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DCLK_BYPASS_CNTL_MASK 0xe00
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DCLK_BYPASS_CNTL__SHIFT 0x9
+#define GCK_ADFS_CLK_BYPASS_CNTL1__VCLK_BYPASS_CNTL_MASK 0x7000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__VCLK_BYPASS_CNTL__SHIFT 0xc
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DISPCLK_BYPASS_CNTL_MASK 0x38000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DISPCLK_BYPASS_CNTL__SHIFT 0xf
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DRREFCLK_BYPASS_CNTL_MASK 0x1c0000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DRREFCLK_BYPASS_CNTL__SHIFT 0x12
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_BYPASS_CNTL_MASK 0xe00000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_BYPASS_CNTL__SHIFT 0x15
+#define GCK_ADFS_CLK_BYPASS_CNTL1__SAMCLK_BYPASS_CNTL_MASK 0x7000000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__SAMCLK_BYPASS_CNTL__SHIFT 0x18
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_DIV_BYPASS_CNTL_MASK 0x38000000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_DIV_BYPASS_CNTL__SHIFT 0x1b
+#define SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_0__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_0__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_0__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_0__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_1__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_1__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_1__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_1__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_2__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_2__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_2__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_2__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_3__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_3__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_3__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_3__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_4__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_4__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_4__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_4__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_5__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_5__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_5__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_5__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_6__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_6__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_6__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_6__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_7__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_7__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_7__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_7__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK 0x1
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0__SHIFT 0x0
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1_MASK 0x2
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1__SHIFT 0x1
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2_MASK 0x4
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2__SHIFT 0x2
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3_MASK 0x8
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3__SHIFT 0x3
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_4_MASK 0x10
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_4__SHIFT 0x4
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_5_MASK 0x20
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_5__SHIFT 0x5
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_6_MASK 0x40
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_6__SHIFT 0x6
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_7_MASK 0x80
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_7__SHIFT 0x7
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_8_MASK 0x100
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_8__SHIFT 0x8
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_9_MASK 0x200
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_9__SHIFT 0x9
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_10_MASK 0x400
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_10__SHIFT 0xa
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_11_MASK 0x800
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_11__SHIFT 0xb
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_12_MASK 0x1000
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_12__SHIFT 0xc
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_13_MASK 0x2000
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_13__SHIFT 0xd
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_14_MASK 0x4000
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_14__SHIFT 0xe
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_15_MASK 0x8000
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_15__SHIFT 0xf
+#define SMC_MESSAGE_0__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_0__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_0__SMC_RESP_MASK 0xffff
+#define SMC_RESP_0__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_1__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_1__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_1__SMC_RESP_MASK 0xffff
+#define SMC_RESP_1__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_2__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_2__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_2__SMC_RESP_MASK 0xffff
+#define SMC_RESP_2__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_3__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_3__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_3__SMC_RESP_MASK 0xffff
+#define SMC_RESP_3__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_4__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_4__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_4__SMC_RESP_MASK 0xffff
+#define SMC_RESP_4__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_5__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_5__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_5__SMC_RESP_MASK 0xffff
+#define SMC_RESP_5__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_6__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_6__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_6__SMC_RESP_MASK 0xffff
+#define SMC_RESP_6__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_7__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_7__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_7__SMC_RESP_MASK 0xffff
+#define SMC_RESP_7__SMC_RESP__SHIFT 0x0
+#define SMC_MSG_ARG_0__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_0__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_1__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_1__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_2__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_2__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_3__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_3__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_4__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_4__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_5__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_5__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_6__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_6__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_7__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_7__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MESSAGE_8__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_8__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_8__SMC_RESP_MASK 0xffff
+#define SMC_RESP_8__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_9__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_9__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_9__SMC_RESP_MASK 0xffff
+#define SMC_RESP_9__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_10__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_10__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_10__SMC_RESP_MASK 0xffff
+#define SMC_RESP_10__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_11__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_11__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_11__SMC_RESP_MASK 0xffff
+#define SMC_RESP_11__SMC_RESP__SHIFT 0x0
+#define SMC_MSG_ARG_8__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_8__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_9__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_9__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_10__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_10__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_11__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_11__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_SYSCON_RESET_CNTL__rst_reg_MASK 0x1
+#define SMC_SYSCON_RESET_CNTL__rst_reg__SHIFT 0x0
+#define SMC_SYSCON_RESET_CNTL__srbm_soft_rst_override_MASK 0x2
+#define SMC_SYSCON_RESET_CNTL__srbm_soft_rst_override__SHIFT 0x1
+#define SMC_SYSCON_RESET_CNTL__RegReset_MASK 0x40000000
+#define SMC_SYSCON_RESET_CNTL__RegReset__SHIFT 0x1e
+#define SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK 0x1
+#define SMC_SYSCON_CLOCK_CNTL_0__ck_disable__SHIFT 0x0
+#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_en_MASK 0x2
+#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_en__SHIFT 0x1
+#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_timeout_MASK 0xffff00
+#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_timeout__SHIFT 0x8
+#define SMC_SYSCON_CLOCK_CNTL_0__cken_MASK 0x1000000
+#define SMC_SYSCON_CLOCK_CNTL_0__cken__SHIFT 0x18
+#define SMC_SYSCON_CLOCK_CNTL_1__auto_ck_disable_MASK 0x1
+#define SMC_SYSCON_CLOCK_CNTL_1__auto_ck_disable__SHIFT 0x0
+#define SMC_SYSCON_CLOCK_CNTL_2__wake_on_irq_MASK 0xffffffff
+#define SMC_SYSCON_CLOCK_CNTL_2__wake_on_irq__SHIFT 0x0
+#define SMC_SYSCON_MISC_CNTL__dma_no_outstanding_MASK 0x2
+#define SMC_SYSCON_MISC_CNTL__dma_no_outstanding__SHIFT 0x1
+#define SMC_SYSCON_MSG_ARG_0__smc_msg_arg_MASK 0xffffffff
+#define SMC_SYSCON_MSG_ARG_0__smc_msg_arg__SHIFT 0x0
+#define SMC_PC_C__smc_pc_c_MASK 0xffffffff
+#define SMC_PC_C__smc_pc_c__SHIFT 0x0
+#define SMC_SCRATCH9__SCRATCH_VALUE_MASK 0xffffffff
+#define SMC_SCRATCH9__SCRATCH_VALUE__SHIFT 0x0
+#define GPIOPAD_SW_INT_STAT__SW_INT_STAT_MASK 0x1
+#define GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x0
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SN_MASK 0xf
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SN__SHIFT 0x0
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SP_MASK 0xf0
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SP__SHIFT 0x4
+#define GPIOPAD_MASK__GPIO_MASK_MASK 0x7fffffff
+#define GPIOPAD_MASK__GPIO_MASK__SHIFT 0x0
+#define GPIOPAD_A__GPIO_A_MASK 0x7fffffff
+#define GPIOPAD_A__GPIO_A__SHIFT 0x0
+#define GPIOPAD_EN__GPIO_EN_MASK 0x7fffffff
+#define GPIOPAD_EN__GPIO_EN__SHIFT 0x0
+#define GPIOPAD_Y__GPIO_Y_MASK 0x7fffffff
+#define GPIOPAD_Y__GPIO_Y__SHIFT 0x0
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0_MASK 0x1
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0__SHIFT 0x0
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1_MASK 0x2
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1__SHIFT 0x1
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2_MASK 0x4
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2__SHIFT 0x2
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3_MASK 0x8
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3__SHIFT 0x3
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4_MASK 0x10
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4__SHIFT 0x4
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5_MASK 0x20
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5__SHIFT 0x5
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6_MASK 0x40
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6__SHIFT 0x6
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7_MASK 0x80
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7__SHIFT 0x7
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8_MASK 0x100
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8__SHIFT 0x8
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9_MASK 0x200
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9__SHIFT 0x9
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10_MASK 0x400
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10__SHIFT 0xa
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11_MASK 0x800
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11__SHIFT 0xb
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12_MASK 0x1000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12__SHIFT 0xc
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13_MASK 0x2000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13__SHIFT 0xd
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14_MASK 0x4000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14__SHIFT 0xe
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15_MASK 0x8000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15__SHIFT 0xf
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16_MASK 0x10000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16__SHIFT 0x10
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17_MASK 0x20000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17__SHIFT 0x11
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18_MASK 0x40000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18__SHIFT 0x12
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19_MASK 0x80000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19__SHIFT 0x13
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20_MASK 0x100000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20__SHIFT 0x14
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21_MASK 0x200000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21__SHIFT 0x15
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22_MASK 0x400000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22__SHIFT 0x16
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23_MASK 0x800000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23__SHIFT 0x17
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24_MASK 0x1000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24__SHIFT 0x18
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25_MASK 0x2000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25__SHIFT 0x19
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26_MASK 0x4000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26__SHIFT 0x1a
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27_MASK 0x8000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27__SHIFT 0x1b
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28_MASK 0x10000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28__SHIFT 0x1c
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29_MASK 0x20000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29__SHIFT 0x1d
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30_MASK 0x40000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30__SHIFT 0x1e
+#define GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN_MASK 0x1fffffff
+#define GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN__SHIFT 0x0
+#define GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN_MASK 0x80000000
+#define GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN__SHIFT 0x1f
+#define GPIOPAD_INT_STAT__GPIO_INT_STAT_MASK 0x1fffffff
+#define GPIOPAD_INT_STAT__GPIO_INT_STAT__SHIFT 0x0
+#define GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT_MASK 0x80000000
+#define GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT__SHIFT 0x1f
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0_MASK 0x1
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0__SHIFT 0x0
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1_MASK 0x2
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1__SHIFT 0x1
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2_MASK 0x4
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2__SHIFT 0x2
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3_MASK 0x8
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3__SHIFT 0x3
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4_MASK 0x10
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4__SHIFT 0x4
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5_MASK 0x20
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5__SHIFT 0x5
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6_MASK 0x40
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6__SHIFT 0x6
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7_MASK 0x80
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7__SHIFT 0x7
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8_MASK 0x100
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8__SHIFT 0x8
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9_MASK 0x200
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9__SHIFT 0x9
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10_MASK 0x400
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10__SHIFT 0xa
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11_MASK 0x800
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11__SHIFT 0xb
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12_MASK 0x1000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12__SHIFT 0xc
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13_MASK 0x2000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13__SHIFT 0xd
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14_MASK 0x4000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14__SHIFT 0xe
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15_MASK 0x8000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15__SHIFT 0xf
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16_MASK 0x10000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16__SHIFT 0x10
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17_MASK 0x20000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17__SHIFT 0x11
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18_MASK 0x40000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18__SHIFT 0x12
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19_MASK 0x80000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19__SHIFT 0x13
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20_MASK 0x100000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20__SHIFT 0x14
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21_MASK 0x200000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21__SHIFT 0x15
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22_MASK 0x400000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22__SHIFT 0x16
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23_MASK 0x800000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23__SHIFT 0x17
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24_MASK 0x1000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24__SHIFT 0x18
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25_MASK 0x2000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25__SHIFT 0x19
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26_MASK 0x4000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26__SHIFT 0x1a
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27_MASK 0x8000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27__SHIFT 0x1b
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28_MASK 0x10000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28__SHIFT 0x1c
+#define GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK_MASK 0x80000000
+#define GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK__SHIFT 0x1f
+#define GPIOPAD_INT_EN__GPIO_INT_EN_MASK 0x1fffffff
+#define GPIOPAD_INT_EN__GPIO_INT_EN__SHIFT 0x0
+#define GPIOPAD_INT_EN__SW_INITIATED_INT_EN_MASK 0x80000000
+#define GPIOPAD_INT_EN__SW_INITIATED_INT_EN__SHIFT 0x1f
+#define GPIOPAD_INT_TYPE__GPIO_INT_TYPE_MASK 0x1fffffff
+#define GPIOPAD_INT_TYPE__GPIO_INT_TYPE__SHIFT 0x0
+#define GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE_MASK 0x80000000
+#define GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE__SHIFT 0x1f
+#define GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY_MASK 0x1fffffff
+#define GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY__SHIFT 0x0
+#define GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY_MASK 0x80000000
+#define GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY__SHIFT 0x1f
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_SEL_MASK 0x1f
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_SEL__SHIFT 0x0
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_CLR_MASK 0x20
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_CLR__SHIFT 0x5
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_READ_MASK 0x40
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_READ__SHIFT 0x6
+#define GPIOPAD_RCVR_SEL__GPIO_RCVR_SEL_MASK 0x7fffffff
+#define GPIOPAD_RCVR_SEL__GPIO_RCVR_SEL__SHIFT 0x0
+#define GPIOPAD_PU_EN__GPIO_PU_EN_MASK 0x7fffffff
+#define GPIOPAD_PU_EN__GPIO_PU_EN__SHIFT 0x0
+#define GPIOPAD_PD_EN__GPIO_PD_EN_MASK 0x7fffffff
+#define GPIOPAD_PD_EN__GPIO_PD_EN__SHIFT 0x0
+#define CG_FPS_CNT__FPS_CNT_MASK 0xffffffff
+#define CG_FPS_CNT__FPS_CNT__SHIFT 0x0
+#define SMU_IND_INDEX_0__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_0__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_0__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_0__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_1__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_1__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_1__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_1__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_2__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_2__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_2__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_2__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_3__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_3__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_3__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_3__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_4__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_4__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_4__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_4__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_5__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_5__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_5__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_5__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_6__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_6__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_6__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_6__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_7__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_7__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_7__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_7__SMC_IND_DATA__SHIFT 0x0
+#define SMU_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0
+#define RCU_UC_EVENTS__RCU_TST_jpc_rep_req_MASK 0x1
+#define RCU_UC_EVENTS__RCU_TST_jpc_rep_req__SHIFT 0x0
+#define RCU_UC_EVENTS__TST_RCU_jpc_rep_done_MASK 0x2
+#define RCU_UC_EVENTS__TST_RCU_jpc_rep_done__SHIFT 0x1
+#define RCU_UC_EVENTS__drv_rst_mode_MASK 0x4
+#define RCU_UC_EVENTS__drv_rst_mode__SHIFT 0x2
+#define RCU_UC_EVENTS__SMU_DC_efuse_status_invalid_MASK 0x8
+#define RCU_UC_EVENTS__SMU_DC_efuse_status_invalid__SHIFT 0x3
+#define RCU_UC_EVENTS__TP_Tester_MASK 0x40
+#define RCU_UC_EVENTS__TP_Tester__SHIFT 0x6
+#define RCU_UC_EVENTS__boot_seq_done_MASK 0x80
+#define RCU_UC_EVENTS__boot_seq_done__SHIFT 0x7
+#define RCU_UC_EVENTS__sclk_deep_sleep_exit_MASK 0x100
+#define RCU_UC_EVENTS__sclk_deep_sleep_exit__SHIFT 0x8
+#define RCU_UC_EVENTS__BREAK_PT1_ACTIVE_MASK 0x200
+#define RCU_UC_EVENTS__BREAK_PT1_ACTIVE__SHIFT 0x9
+#define RCU_UC_EVENTS__BREAK_PT2_ACTIVE_MASK 0x400
+#define RCU_UC_EVENTS__BREAK_PT2_ACTIVE__SHIFT 0xa
+#define RCU_UC_EVENTS__FCH_HALT_MASK 0x800
+#define RCU_UC_EVENTS__FCH_HALT__SHIFT 0xb
+#define RCU_UC_EVENTS__RCU_GIO_fch_lockdown_MASK 0x2000
+#define RCU_UC_EVENTS__RCU_GIO_fch_lockdown__SHIFT 0xd
+#define RCU_UC_EVENTS__INTERRUPTS_ENABLED_MASK 0x10000
+#define RCU_UC_EVENTS__INTERRUPTS_ENABLED__SHIFT 0x10
+#define RCU_UC_EVENTS__RCU_DtmCnt0_Done_MASK 0x20000
+#define RCU_UC_EVENTS__RCU_DtmCnt0_Done__SHIFT 0x11
+#define RCU_UC_EVENTS__RCU_DtmCnt1_Done_MASK 0x40000
+#define RCU_UC_EVENTS__RCU_DtmCnt1_Done__SHIFT 0x12
+#define RCU_UC_EVENTS__RCU_DtmCnt2_Done_MASK 0x80000
+#define RCU_UC_EVENTS__RCU_DtmCnt2_Done__SHIFT 0x13
+#define RCU_UC_EVENTS__irq31_sel_MASK 0x3000000
+#define RCU_UC_EVENTS__irq31_sel__SHIFT 0x18
+#define RCU_MISC_CTRL__REG_DRV_RST_MODE_MASK 0x2
+#define RCU_MISC_CTRL__REG_DRV_RST_MODE__SHIFT 0x1
+#define RCU_MISC_CTRL__REG_RCU_MEMREP_DIS_MASK 0x8
+#define RCU_MISC_CTRL__REG_RCU_MEMREP_DIS__SHIFT 0x3
+#define RCU_MISC_CTRL__REG_CC_FUSE_DISABLE_MASK 0x10
+#define RCU_MISC_CTRL__REG_CC_FUSE_DISABLE__SHIFT 0x4
+#define RCU_MISC_CTRL__REG_SAMU_FUSE_DISABLE_MASK 0x20
+#define RCU_MISC_CTRL__REG_SAMU_FUSE_DISABLE__SHIFT 0x5
+#define RCU_MISC_CTRL__REG_CC_SRBM_RD_DISABLE_MASK 0x100
+#define RCU_MISC_CTRL__REG_CC_SRBM_RD_DISABLE__SHIFT 0x8
+#define RCU_MISC_CTRL__BREAK_PT1_DONE_MASK 0x10000
+#define RCU_MISC_CTRL__BREAK_PT1_DONE__SHIFT 0x10
+#define RCU_MISC_CTRL__BREAK_PT2_DONE_MASK 0x20000
+#define RCU_MISC_CTRL__BREAK_PT2_DONE__SHIFT 0x11
+#define RCU_MISC_CTRL__SAMU_START_MASK 0x400000
+#define RCU_MISC_CTRL__SAMU_START__SHIFT 0x16
+#define RCU_MISC_CTRL__RST_PULSE_WIDTH_MASK 0xff800000
+#define RCU_MISC_CTRL__RST_PULSE_WIDTH__SHIFT 0x17
+#define RCU_VIRT_RESET_REQ__VF_MASK 0xffff
+#define RCU_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define RCU_VIRT_RESET_REQ__PF_MASK 0x80000000
+#define RCU_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define CC_RCU_FUSES__GPU_DIS_MASK 0x2
+#define CC_RCU_FUSES__GPU_DIS__SHIFT 0x1
+#define CC_RCU_FUSES__DEBUG_DISABLE_MASK 0x4
+#define CC_RCU_FUSES__DEBUG_DISABLE__SHIFT 0x2
+#define CC_RCU_FUSES__EFUSE_RD_DISABLE_MASK 0x10
+#define CC_RCU_FUSES__EFUSE_RD_DISABLE__SHIFT 0x4
+#define CC_RCU_FUSES__CG_RST_GLB_REQ_DIS_MASK 0x20
+#define CC_RCU_FUSES__CG_RST_GLB_REQ_DIS__SHIFT 0x5
+#define CC_RCU_FUSES__DRV_RST_MODE_MASK 0x40
+#define CC_RCU_FUSES__DRV_RST_MODE__SHIFT 0x6
+#define CC_RCU_FUSES__ROM_DIS_MASK 0x80
+#define CC_RCU_FUSES__ROM_DIS__SHIFT 0x7
+#define CC_RCU_FUSES__JPC_REP_DISABLE_MASK 0x100
+#define CC_RCU_FUSES__JPC_REP_DISABLE__SHIFT 0x8
+#define CC_RCU_FUSES__RCU_BREAK_POINT1_MASK 0x200
+#define CC_RCU_FUSES__RCU_BREAK_POINT1__SHIFT 0x9
+#define CC_RCU_FUSES__RCU_BREAK_POINT2_MASK 0x400
+#define CC_RCU_FUSES__RCU_BREAK_POINT2__SHIFT 0xa
+#define CC_RCU_FUSES__SMU_IOC_MST_DISABLE_MASK 0x4000
+#define CC_RCU_FUSES__SMU_IOC_MST_DISABLE__SHIFT 0xe
+#define CC_RCU_FUSES__FCH_LOCKOUT_ENABLE_MASK 0x8000
+#define CC_RCU_FUSES__FCH_LOCKOUT_ENABLE__SHIFT 0xf
+#define CC_RCU_FUSES__FCH_XFIRE_FILTER_ENABLE_MASK 0x10000
+#define CC_RCU_FUSES__FCH_XFIRE_FILTER_ENABLE__SHIFT 0x10
+#define CC_RCU_FUSES__XFIRE_DISABLE_MASK 0x20000
+#define CC_RCU_FUSES__XFIRE_DISABLE__SHIFT 0x11
+#define CC_RCU_FUSES__SAMU_FUSE_DISABLE_MASK 0x40000
+#define CC_RCU_FUSES__SAMU_FUSE_DISABLE__SHIFT 0x12
+#define CC_RCU_FUSES__BIF_RST_POLLING_DISABLE_MASK 0x80000
+#define CC_RCU_FUSES__BIF_RST_POLLING_DISABLE__SHIFT 0x13
+#define CC_RCU_FUSES__MEM_HARDREP_EN_MASK 0x200000
+#define CC_RCU_FUSES__MEM_HARDREP_EN__SHIFT 0x15
+#define CC_RCU_FUSES__PCIE_INIT_DISABLE_MASK 0x400000
+#define CC_RCU_FUSES__PCIE_INIT_DISABLE__SHIFT 0x16
+#define CC_RCU_FUSES__DSMU_DISABLE_MASK 0x800000
+#define CC_RCU_FUSES__DSMU_DISABLE__SHIFT 0x17
+#define CC_RCU_FUSES__WRP_FUSE_VALID_MASK 0x1000000
+#define CC_RCU_FUSES__WRP_FUSE_VALID__SHIFT 0x18
+#define CC_RCU_FUSES__PHY_FUSE_VALID_MASK 0x2000000
+#define CC_RCU_FUSES__PHY_FUSE_VALID__SHIFT 0x19
+#define CC_RCU_FUSES__RCU_SPARE_MASK 0xfc000000
+#define CC_RCU_FUSES__RCU_SPARE__SHIFT 0x1a
+#define CC_SMU_MISC_FUSES__IOMMU_V2_DISABLE_MASK 0x2
+#define CC_SMU_MISC_FUSES__IOMMU_V2_DISABLE__SHIFT 0x1
+#define CC_SMU_MISC_FUSES__MinSClkDid_MASK 0x1fc
+#define CC_SMU_MISC_FUSES__MinSClkDid__SHIFT 0x2
+#define CC_SMU_MISC_FUSES__MISC_SPARE_MASK 0x600
+#define CC_SMU_MISC_FUSES__MISC_SPARE__SHIFT 0x9
+#define CC_SMU_MISC_FUSES__PostResetGnbClkDid_MASK 0x3f800
+#define CC_SMU_MISC_FUSES__PostResetGnbClkDid__SHIFT 0xb
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_dtc_half_MASK 0x40000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_dtc_half__SHIFT 0x12
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_half_MASK 0x80000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_half__SHIFT 0x13
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_half_MASK 0x100000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_half__SHIFT 0x14
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_pdc_half_MASK 0x200000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_pdc_half__SHIFT 0x15
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_dis_MASK 0x400000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_dis__SHIFT 0x16
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_dis_MASK 0x800000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_dis__SHIFT 0x17
+#define CC_SMU_MISC_FUSES__VCE_DISABLE_MASK 0x8000000
+#define CC_SMU_MISC_FUSES__VCE_DISABLE__SHIFT 0x1b
+#define CC_SMU_MISC_FUSES__IOC_IOMMU_DISABLE_MASK 0x10000000
+#define CC_SMU_MISC_FUSES__IOC_IOMMU_DISABLE__SHIFT 0x1c
+#define CC_SMU_MISC_FUSES__GNB_SPARE_MASK 0x60000000
+#define CC_SMU_MISC_FUSES__GNB_SPARE__SHIFT 0x1d
+#define CC_SCLK_VID_FUSES__SClkVid0_MASK 0xff
+#define CC_SCLK_VID_FUSES__SClkVid0__SHIFT 0x0
+#define CC_SCLK_VID_FUSES__SClkVid1_MASK 0xff00
+#define CC_SCLK_VID_FUSES__SClkVid1__SHIFT 0x8
+#define CC_SCLK_VID_FUSES__SClkVid2_MASK 0xff0000
+#define CC_SCLK_VID_FUSES__SClkVid2__SHIFT 0x10
+#define CC_SCLK_VID_FUSES__SClkVid3_MASK 0xff000000
+#define CC_SCLK_VID_FUSES__SClkVid3__SHIFT 0x18
+#define CC_GIO_IOCCFG_FUSES__NB_REV_ID_MASK 0x7fe
+#define CC_GIO_IOCCFG_FUSES__NB_REV_ID__SHIFT 0x1
+#define CC_GIO_IOC_FUSES__IOC_FUSES_MASK 0x3e
+#define CC_GIO_IOC_FUSES__IOC_FUSES__SHIFT 0x1
+#define CC_SMU_TST_EFUSE1_MISC__RF_RM_6_2_MASK 0x3e
+#define CC_SMU_TST_EFUSE1_MISC__RF_RM_6_2__SHIFT 0x1
+#define CC_SMU_TST_EFUSE1_MISC__RME_MASK 0x40
+#define CC_SMU_TST_EFUSE1_MISC__RME__SHIFT 0x6
+#define CC_SMU_TST_EFUSE1_MISC__MBIST_DISABLE_MASK 0x80
+#define CC_SMU_TST_EFUSE1_MISC__MBIST_DISABLE__SHIFT 0x7
+#define CC_SMU_TST_EFUSE1_MISC__HARD_REPAIR_DISABLE_MASK 0x100
+#define CC_SMU_TST_EFUSE1_MISC__HARD_REPAIR_DISABLE__SHIFT 0x8
+#define CC_SMU_TST_EFUSE1_MISC__SOFT_REPAIR_DISABLE_MASK 0x200
+#define CC_SMU_TST_EFUSE1_MISC__SOFT_REPAIR_DISABLE__SHIFT 0x9
+#define CC_SMU_TST_EFUSE1_MISC__GPU_DIS_MASK 0x400
+#define CC_SMU_TST_EFUSE1_MISC__GPU_DIS__SHIFT 0xa
+#define CC_SMU_TST_EFUSE1_MISC__SMS_PWRDWN_DISABLE_MASK 0x800
+#define CC_SMU_TST_EFUSE1_MISC__SMS_PWRDWN_DISABLE__SHIFT 0xb
+#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISA_MASK 0x1000
+#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISA__SHIFT 0xc
+#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISB_MASK 0x2000
+#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISB__SHIFT 0xd
+#define CC_SMU_TST_EFUSE1_MISC__RM_RF8_MASK 0x4000
+#define CC_SMU_TST_EFUSE1_MISC__RM_RF8__SHIFT 0xe
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE1_MASK 0x400000
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE1__SHIFT 0x16
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE2_MASK 0x800000
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE2__SHIFT 0x17
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE3_MASK 0x1000000
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE3__SHIFT 0x18
+#define CC_SMU_TST_EFUSE1_MISC__VCE_DISABLE_MASK 0x2000000
+#define CC_SMU_TST_EFUSE1_MISC__VCE_DISABLE__SHIFT 0x19
+#define CC_SMU_TST_EFUSE1_MISC__DCE_SCAN_DISABLE_MASK 0x4000000
+#define CC_SMU_TST_EFUSE1_MISC__DCE_SCAN_DISABLE__SHIFT 0x1a
+#define CC_TST_ID_STRAPS__DEVICE_ID_MASK 0xffff0
+#define CC_TST_ID_STRAPS__DEVICE_ID__SHIFT 0x4
+#define CC_TST_ID_STRAPS__MAJOR_REV_ID_MASK 0xf00000
+#define CC_TST_ID_STRAPS__MAJOR_REV_ID__SHIFT 0x14
+#define CC_TST_ID_STRAPS__MINOR_REV_ID_MASK 0xf000000
+#define CC_TST_ID_STRAPS__MINOR_REV_ID__SHIFT 0x18
+#define CC_TST_ID_STRAPS__ATI_REV_ID_MASK 0xf0000000
+#define CC_TST_ID_STRAPS__ATI_REV_ID__SHIFT 0x1c
+#define CC_FCTRL_FUSES__EXT_EFUSE_MACRO_PRESENT_MASK 0x2
+#define CC_FCTRL_FUSES__EXT_EFUSE_MACRO_PRESENT__SHIFT 0x1
+#define CC_HARVEST_FUSES__VCE_DISABLE_MASK 0x6
+#define CC_HARVEST_FUSES__VCE_DISABLE__SHIFT 0x1
+#define CC_HARVEST_FUSES__UVD_DISABLE_MASK 0x10
+#define CC_HARVEST_FUSES__UVD_DISABLE__SHIFT 0x4
+#define CC_HARVEST_FUSES__ACP_DISABLE_MASK 0x40
+#define CC_HARVEST_FUSES__ACP_DISABLE__SHIFT 0x6
+#define CC_HARVEST_FUSES__DC_DISABLE_MASK 0x3f00
+#define CC_HARVEST_FUSES__DC_DISABLE__SHIFT 0x8
+#define SMU_MAIN_PLL_OP_FREQ__PLL_OP_FREQ_MASK 0xffffffff
+#define SMU_MAIN_PLL_OP_FREQ__PLL_OP_FREQ__SHIFT 0x0
+#define SMU_STATUS__SMU_DONE_MASK 0x1
+#define SMU_STATUS__SMU_DONE__SHIFT 0x0
+#define SMU_STATUS__SMU_PASS_MASK 0x2
+#define SMU_STATUS__SMU_PASS__SHIFT 0x1
+#define SMU_FIRMWARE__SMU_IN_PROG_MASK 0x1
+#define SMU_FIRMWARE__SMU_IN_PROG__SHIFT 0x0
+#define SMU_FIRMWARE__SMU_RD_DONE_MASK 0x6
+#define SMU_FIRMWARE__SMU_RD_DONE__SHIFT 0x1
+#define SMU_FIRMWARE__SMU_SRAM_RD_BLOCK_EN_MASK 0x8
+#define SMU_FIRMWARE__SMU_SRAM_RD_BLOCK_EN__SHIFT 0x3
+#define SMU_FIRMWARE__SMU_SRAM_WR_BLOCK_EN_MASK 0x10
+#define SMU_FIRMWARE__SMU_SRAM_WR_BLOCK_EN__SHIFT 0x4
+#define SMU_FIRMWARE__SMU_counter_MASK 0xf00
+#define SMU_FIRMWARE__SMU_counter__SHIFT 0x8
+#define SMU_FIRMWARE__SMU_MODE_MASK 0x10000
+#define SMU_FIRMWARE__SMU_MODE__SHIFT 0x10
+#define SMU_FIRMWARE__SMU_SEL_MASK 0x20000
+#define SMU_FIRMWARE__SMU_SEL__SHIFT 0x11
+#define SMU_INPUT_DATA__START_ADDR_MASK 0x7fffffff
+#define SMU_INPUT_DATA__START_ADDR__SHIFT 0x0
+#define SMU_INPUT_DATA__AUTO_START_MASK 0x80000000
+#define SMU_INPUT_DATA__AUTO_START__SHIFT 0x1f
+#define SMU_EFUSE_0__EFUSE_DATA_MASK 0xffffffff
+#define SMU_EFUSE_0__EFUSE_DATA__SHIFT 0x0
+#define FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x1
+#define FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0
+#define FIRMWARE_FLAGS__RESERVED_MASK 0xfffffe
+#define FIRMWARE_FLAGS__RESERVED__SHIFT 0x1
+#define FIRMWARE_FLAGS__TEST_COUNT_MASK 0xff000000
+#define FIRMWARE_FLAGS__TEST_COUNT__SHIFT 0x18
+#define TDC_STATUS__VDD_Boost_MASK 0xff
+#define TDC_STATUS__VDD_Boost__SHIFT 0x0
+#define TDC_STATUS__VDD_Throttle_MASK 0xff00
+#define TDC_STATUS__VDD_Throttle__SHIFT 0x8
+#define TDC_STATUS__VDDC_Boost_MASK 0xff0000
+#define TDC_STATUS__VDDC_Boost__SHIFT 0x10
+#define TDC_STATUS__VDDC_Throttle_MASK 0xff000000
+#define TDC_STATUS__VDDC_Throttle__SHIFT 0x18
+#define TDC_MV_AVERAGE__IDD_MASK 0xffff
+#define TDC_MV_AVERAGE__IDD__SHIFT 0x0
+#define TDC_MV_AVERAGE__IDDC_MASK 0xffff0000
+#define TDC_MV_AVERAGE__IDDC__SHIFT 0x10
+#define TDC_VRM_LIMIT__IDD_MASK 0xffff
+#define TDC_VRM_LIMIT__IDD__SHIFT 0x0
+#define TDC_VRM_LIMIT__IDDC_MASK 0xffff0000
+#define TDC_VRM_LIMIT__IDDC__SHIFT 0x10
+#define FEATURE_STATUS__SCLK_DPM_ON_MASK 0x1
+#define FEATURE_STATUS__SCLK_DPM_ON__SHIFT 0x0
+#define FEATURE_STATUS__MCLK_DPM_ON_MASK 0x2
+#define FEATURE_STATUS__MCLK_DPM_ON__SHIFT 0x1
+#define FEATURE_STATUS__LCLK_DPM_ON_MASK 0x4
+#define FEATURE_STATUS__LCLK_DPM_ON__SHIFT 0x2
+#define FEATURE_STATUS__UVD_DPM_ON_MASK 0x8
+#define FEATURE_STATUS__UVD_DPM_ON__SHIFT 0x3
+#define FEATURE_STATUS__VCE_DPM_ON_MASK 0x10
+#define FEATURE_STATUS__VCE_DPM_ON__SHIFT 0x4
+#define FEATURE_STATUS__SAMU_DPM_ON_MASK 0x20
+#define FEATURE_STATUS__SAMU_DPM_ON__SHIFT 0x5
+#define FEATURE_STATUS__ACP_DPM_ON_MASK 0x40
+#define FEATURE_STATUS__ACP_DPM_ON__SHIFT 0x6
+#define FEATURE_STATUS__PCIE_DPM_ON_MASK 0x80
+#define FEATURE_STATUS__PCIE_DPM_ON__SHIFT 0x7
+#define FEATURE_STATUS__BAPM_ON_MASK 0x100
+#define FEATURE_STATUS__BAPM_ON__SHIFT 0x8
+#define FEATURE_STATUS__LPMX_ON_MASK 0x200
+#define FEATURE_STATUS__LPMX_ON__SHIFT 0x9
+#define FEATURE_STATUS__NBDPM_ON_MASK 0x400
+#define FEATURE_STATUS__NBDPM_ON__SHIFT 0xa
+#define FEATURE_STATUS__LHTC_ON_MASK 0x800
+#define FEATURE_STATUS__LHTC_ON__SHIFT 0xb
+#define FEATURE_STATUS__VPC_ON_MASK 0x1000
+#define FEATURE_STATUS__VPC_ON__SHIFT 0xc
+#define FEATURE_STATUS__VOLTAGE_CONTROLLER_ON_MASK 0x2000
+#define FEATURE_STATUS__VOLTAGE_CONTROLLER_ON__SHIFT 0xd
+#define FEATURE_STATUS__TDC_LIMIT_ON_MASK 0x4000
+#define FEATURE_STATUS__TDC_LIMIT_ON__SHIFT 0xe
+#define FEATURE_STATUS__GPU_CAC_ON_MASK 0x8000
+#define FEATURE_STATUS__GPU_CAC_ON__SHIFT 0xf
+#define FEATURE_STATUS__AVS_ON_MASK 0x10000
+#define FEATURE_STATUS__AVS_ON__SHIFT 0x10
+#define FEATURE_STATUS__SPMI_ON_MASK 0x20000
+#define FEATURE_STATUS__SPMI_ON__SHIFT 0x11
+#define FEATURE_STATUS__SCLK_DPM_FORCED_MASK 0x40000
+#define FEATURE_STATUS__SCLK_DPM_FORCED__SHIFT 0x12
+#define FEATURE_STATUS__MCLK_DPM_FORCED_MASK 0x80000
+#define FEATURE_STATUS__MCLK_DPM_FORCED__SHIFT 0x13
+#define FEATURE_STATUS__LCLK_DPM_FORCED_MASK 0x100000
+#define FEATURE_STATUS__LCLK_DPM_FORCED__SHIFT 0x14
+#define FEATURE_STATUS__PCIE_DPM_FORCED_MASK 0x200000
+#define FEATURE_STATUS__PCIE_DPM_FORCED__SHIFT 0x15
+#define FEATURE_STATUS__RESERVED_MASK 0xffc00000
+#define FEATURE_STATUS__RESERVED__SHIFT 0x16
+#define ENTITY_TEMPERATURES_1__GPU_MASK 0xffffffff
+#define ENTITY_TEMPERATURES_1__GPU__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_1__entries_0_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_1__entries_0_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_2__entries_0_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_2__entries_0_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_4__entries_0_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_4__entries_0_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_5__entries_0_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_5__entries_0_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_7__entries_0_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_7__entries_0_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_8__entries_0_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_8__entries_0_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_10__entries_0_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_10__entries_0_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_11__entries_0_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_11__entries_0_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_13__entries_1_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_13__entries_1_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_14__entries_1_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_14__entries_1_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_16__entries_1_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_16__entries_1_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_17__entries_1_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_17__entries_1_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_19__entries_1_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_19__entries_1_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_20__entries_1_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_20__entries_1_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_22__entries_1_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_22__entries_1_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_23__entries_1_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_23__entries_1_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_25__entries_2_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_25__entries_2_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_26__entries_2_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_26__entries_2_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_28__entries_2_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_28__entries_2_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_29__entries_2_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_29__entries_2_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_31__entries_2_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_31__entries_2_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_32__entries_2_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_32__entries_2_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_34__entries_2_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_34__entries_2_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_35__entries_2_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_35__entries_2_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_37__entries_3_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_37__entries_3_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_38__entries_3_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_38__entries_3_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_40__entries_3_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_40__entries_3_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_41__entries_3_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_41__entries_3_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_43__entries_3_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_43__entries_3_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_44__entries_3_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_44__entries_3_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_46__entries_3_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_46__entries_3_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_47__entries_3_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_47__entries_3_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_49__entries_4_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_49__entries_4_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_50__entries_4_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_50__entries_4_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_52__entries_4_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_52__entries_4_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_53__entries_4_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_53__entries_4_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_55__entries_4_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_55__entries_4_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_56__entries_4_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_56__entries_4_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_58__entries_4_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_58__entries_4_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_59__entries_4_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_59__entries_4_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_61__entries_5_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_61__entries_5_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_62__entries_5_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_62__entries_5_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_64__entries_5_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_64__entries_5_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_65__entries_5_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_65__entries_5_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_67__entries_5_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_67__entries_5_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_68__entries_5_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_68__entries_5_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_70__entries_5_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_70__entries_5_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_71__entries_5_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_71__entries_5_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_73__entries_6_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_73__entries_6_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_74__entries_6_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_74__entries_6_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_76__entries_6_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_76__entries_6_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_77__entries_6_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_77__entries_6_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_79__entries_6_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_79__entries_6_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_80__entries_6_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_80__entries_6_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_82__entries_6_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_82__entries_6_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_83__entries_6_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_83__entries_6_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_85__entries_7_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_85__entries_7_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_86__entries_7_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_86__entries_7_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_88__entries_7_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_88__entries_7_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_89__entries_7_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_89__entries_7_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_91__entries_7_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_91__entries_7_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_92__entries_7_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_92__entries_7_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_94__entries_7_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_94__entries_7_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_95__entries_7_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_95__entries_7_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_McArbBurstTime__SHIFT 0x18
+#define DPM_TABLE_1__GraphicsPIDController_Ki_MASK 0xffffffff
+#define DPM_TABLE_1__GraphicsPIDController_Ki__SHIFT 0x0
+#define DPM_TABLE_2__GraphicsPIDController_LFWindupUpperLim_MASK 0xffffffff
+#define DPM_TABLE_2__GraphicsPIDController_LFWindupUpperLim__SHIFT 0x0
+#define DPM_TABLE_3__GraphicsPIDController_LFWindupLowerLim_MASK 0xffffffff
+#define DPM_TABLE_3__GraphicsPIDController_LFWindupLowerLim__SHIFT 0x0
+#define DPM_TABLE_4__GraphicsPIDController_StatePrecision_MASK 0xffffffff
+#define DPM_TABLE_4__GraphicsPIDController_StatePrecision__SHIFT 0x0
+#define DPM_TABLE_5__GraphicsPIDController_LfPrecision_MASK 0xffffffff
+#define DPM_TABLE_5__GraphicsPIDController_LfPrecision__SHIFT 0x0
+#define DPM_TABLE_6__GraphicsPIDController_LfOffset_MASK 0xffffffff
+#define DPM_TABLE_6__GraphicsPIDController_LfOffset__SHIFT 0x0
+#define DPM_TABLE_7__GraphicsPIDController_MaxState_MASK 0xffffffff
+#define DPM_TABLE_7__GraphicsPIDController_MaxState__SHIFT 0x0
+#define DPM_TABLE_8__GraphicsPIDController_MaxLfFraction_MASK 0xffffffff
+#define DPM_TABLE_8__GraphicsPIDController_MaxLfFraction__SHIFT 0x0
+#define DPM_TABLE_9__GraphicsPIDController_StateShift_MASK 0xffffffff
+#define DPM_TABLE_9__GraphicsPIDController_StateShift__SHIFT 0x0
+#define DPM_TABLE_10__MemoryPIDController_Ki_MASK 0xffffffff
+#define DPM_TABLE_10__MemoryPIDController_Ki__SHIFT 0x0
+#define DPM_TABLE_11__MemoryPIDController_LFWindupUpperLim_MASK 0xffffffff
+#define DPM_TABLE_11__MemoryPIDController_LFWindupUpperLim__SHIFT 0x0
+#define DPM_TABLE_12__MemoryPIDController_LFWindupLowerLim_MASK 0xffffffff
+#define DPM_TABLE_12__MemoryPIDController_LFWindupLowerLim__SHIFT 0x0
+#define DPM_TABLE_13__MemoryPIDController_StatePrecision_MASK 0xffffffff
+#define DPM_TABLE_13__MemoryPIDController_StatePrecision__SHIFT 0x0
+#define DPM_TABLE_14__MemoryPIDController_LfPrecision_MASK 0xffffffff
+#define DPM_TABLE_14__MemoryPIDController_LfPrecision__SHIFT 0x0
+#define DPM_TABLE_15__MemoryPIDController_LfOffset_MASK 0xffffffff
+#define DPM_TABLE_15__MemoryPIDController_LfOffset__SHIFT 0x0
+#define DPM_TABLE_16__MemoryPIDController_MaxState_MASK 0xffffffff
+#define DPM_TABLE_16__MemoryPIDController_MaxState__SHIFT 0x0
+#define DPM_TABLE_17__MemoryPIDController_MaxLfFraction_MASK 0xffffffff
+#define DPM_TABLE_17__MemoryPIDController_MaxLfFraction__SHIFT 0x0
+#define DPM_TABLE_18__MemoryPIDController_StateShift_MASK 0xffffffff
+#define DPM_TABLE_18__MemoryPIDController_StateShift__SHIFT 0x0
+#define DPM_TABLE_19__LinkPIDController_Ki_MASK 0xffffffff
+#define DPM_TABLE_19__LinkPIDController_Ki__SHIFT 0x0
+#define DPM_TABLE_20__LinkPIDController_LFWindupUpperLim_MASK 0xffffffff
+#define DPM_TABLE_20__LinkPIDController_LFWindupUpperLim__SHIFT 0x0
+#define DPM_TABLE_21__LinkPIDController_LFWindupLowerLim_MASK 0xffffffff
+#define DPM_TABLE_21__LinkPIDController_LFWindupLowerLim__SHIFT 0x0
+#define DPM_TABLE_22__LinkPIDController_StatePrecision_MASK 0xffffffff
+#define DPM_TABLE_22__LinkPIDController_StatePrecision__SHIFT 0x0
+#define DPM_TABLE_23__LinkPIDController_LfPrecision_MASK 0xffffffff
+#define DPM_TABLE_23__LinkPIDController_LfPrecision__SHIFT 0x0
+#define DPM_TABLE_24__LinkPIDController_LfOffset_MASK 0xffffffff
+#define DPM_TABLE_24__LinkPIDController_LfOffset__SHIFT 0x0
+#define DPM_TABLE_25__LinkPIDController_MaxState_MASK 0xffffffff
+#define DPM_TABLE_25__LinkPIDController_MaxState__SHIFT 0x0
+#define DPM_TABLE_26__LinkPIDController_MaxLfFraction_MASK 0xffffffff
+#define DPM_TABLE_26__LinkPIDController_MaxLfFraction__SHIFT 0x0
+#define DPM_TABLE_27__LinkPIDController_StateShift_MASK 0xffffffff
+#define DPM_TABLE_27__LinkPIDController_StateShift__SHIFT 0x0
+#define DPM_TABLE_28__SystemFlags_MASK 0xffffffff
+#define DPM_TABLE_28__SystemFlags__SHIFT 0x0
+#define DPM_TABLE_29__VRConfig_MASK 0xffffffff
+#define DPM_TABLE_29__VRConfig__SHIFT 0x0
+#define DPM_TABLE_30__SmioMask1_MASK 0xffffffff
+#define DPM_TABLE_30__SmioMask1__SHIFT 0x0
+#define DPM_TABLE_31__SmioMask2_MASK 0xffffffff
+#define DPM_TABLE_31__SmioMask2__SHIFT 0x0
+#define DPM_TABLE_32__SmioTable1_Pattern_0_padding_MASK 0xff
+#define DPM_TABLE_32__SmioTable1_Pattern_0_padding__SHIFT 0x0
+#define DPM_TABLE_32__SmioTable1_Pattern_0_Smio_MASK 0xff00
+#define DPM_TABLE_32__SmioTable1_Pattern_0_Smio__SHIFT 0x8
+#define DPM_TABLE_32__SmioTable1_Pattern_0_Voltage_MASK 0xffff0000
+#define DPM_TABLE_32__SmioTable1_Pattern_0_Voltage__SHIFT 0x10
+#define DPM_TABLE_33__SmioTable1_Pattern_1_padding_MASK 0xff
+#define DPM_TABLE_33__SmioTable1_Pattern_1_padding__SHIFT 0x0
+#define DPM_TABLE_33__SmioTable1_Pattern_1_Smio_MASK 0xff00
+#define DPM_TABLE_33__SmioTable1_Pattern_1_Smio__SHIFT 0x8
+#define DPM_TABLE_33__SmioTable1_Pattern_1_Voltage_MASK 0xffff0000
+#define DPM_TABLE_33__SmioTable1_Pattern_1_Voltage__SHIFT 0x10
+#define DPM_TABLE_34__SmioTable1_Pattern_2_padding_MASK 0xff
+#define DPM_TABLE_34__SmioTable1_Pattern_2_padding__SHIFT 0x0
+#define DPM_TABLE_34__SmioTable1_Pattern_2_Smio_MASK 0xff00
+#define DPM_TABLE_34__SmioTable1_Pattern_2_Smio__SHIFT 0x8
+#define DPM_TABLE_34__SmioTable1_Pattern_2_Voltage_MASK 0xffff0000
+#define DPM_TABLE_34__SmioTable1_Pattern_2_Voltage__SHIFT 0x10
+#define DPM_TABLE_35__SmioTable1_Pattern_3_padding_MASK 0xff
+#define DPM_TABLE_35__SmioTable1_Pattern_3_padding__SHIFT 0x0
+#define DPM_TABLE_35__SmioTable1_Pattern_3_Smio_MASK 0xff00
+#define DPM_TABLE_35__SmioTable1_Pattern_3_Smio__SHIFT 0x8
+#define DPM_TABLE_35__SmioTable1_Pattern_3_Voltage_MASK 0xffff0000
+#define DPM_TABLE_35__SmioTable1_Pattern_3_Voltage__SHIFT 0x10
+#define DPM_TABLE_36__SmioTable2_Pattern_0_padding_MASK 0xff
+#define DPM_TABLE_36__SmioTable2_Pattern_0_padding__SHIFT 0x0
+#define DPM_TABLE_36__SmioTable2_Pattern_0_Smio_MASK 0xff00
+#define DPM_TABLE_36__SmioTable2_Pattern_0_Smio__SHIFT 0x8
+#define DPM_TABLE_36__SmioTable2_Pattern_0_Voltage_MASK 0xffff0000
+#define DPM_TABLE_36__SmioTable2_Pattern_0_Voltage__SHIFT 0x10
+#define DPM_TABLE_37__SmioTable2_Pattern_1_padding_MASK 0xff
+#define DPM_TABLE_37__SmioTable2_Pattern_1_padding__SHIFT 0x0
+#define DPM_TABLE_37__SmioTable2_Pattern_1_Smio_MASK 0xff00
+#define DPM_TABLE_37__SmioTable2_Pattern_1_Smio__SHIFT 0x8
+#define DPM_TABLE_37__SmioTable2_Pattern_1_Voltage_MASK 0xffff0000
+#define DPM_TABLE_37__SmioTable2_Pattern_1_Voltage__SHIFT 0x10
+#define DPM_TABLE_38__SmioTable2_Pattern_2_padding_MASK 0xff
+#define DPM_TABLE_38__SmioTable2_Pattern_2_padding__SHIFT 0x0
+#define DPM_TABLE_38__SmioTable2_Pattern_2_Smio_MASK 0xff00
+#define DPM_TABLE_38__SmioTable2_Pattern_2_Smio__SHIFT 0x8
+#define DPM_TABLE_38__SmioTable2_Pattern_2_Voltage_MASK 0xffff0000
+#define DPM_TABLE_38__SmioTable2_Pattern_2_Voltage__SHIFT 0x10
+#define DPM_TABLE_39__SmioTable2_Pattern_3_padding_MASK 0xff
+#define DPM_TABLE_39__SmioTable2_Pattern_3_padding__SHIFT 0x0
+#define DPM_TABLE_39__SmioTable2_Pattern_3_Smio_MASK 0xff00
+#define DPM_TABLE_39__SmioTable2_Pattern_3_Smio__SHIFT 0x8
+#define DPM_TABLE_39__SmioTable2_Pattern_3_Voltage_MASK 0xffff0000
+#define DPM_TABLE_39__SmioTable2_Pattern_3_Voltage__SHIFT 0x10
+#define DPM_TABLE_40__VddcLevelCount_MASK 0xffffffff
+#define DPM_TABLE_40__VddcLevelCount__SHIFT 0x0
+#define DPM_TABLE_41__VddciLevelCount_MASK 0xffffffff
+#define DPM_TABLE_41__VddciLevelCount__SHIFT 0x0
+#define DPM_TABLE_42__VddGfxLevelCount_MASK 0xffffffff
+#define DPM_TABLE_42__VddGfxLevelCount__SHIFT 0x0
+#define DPM_TABLE_43__MvddLevelCount_MASK 0xffffffff
+#define DPM_TABLE_43__MvddLevelCount__SHIFT 0x0
+#define DPM_TABLE_44__VddcTable_1_MASK 0xffff
+#define DPM_TABLE_44__VddcTable_1__SHIFT 0x0
+#define DPM_TABLE_44__VddcTable_0_MASK 0xffff0000
+#define DPM_TABLE_44__VddcTable_0__SHIFT 0x10
+#define DPM_TABLE_45__VddcTable_3_MASK 0xffff
+#define DPM_TABLE_45__VddcTable_3__SHIFT 0x0
+#define DPM_TABLE_45__VddcTable_2_MASK 0xffff0000
+#define DPM_TABLE_45__VddcTable_2__SHIFT 0x10
+#define DPM_TABLE_46__VddcTable_5_MASK 0xffff
+#define DPM_TABLE_46__VddcTable_5__SHIFT 0x0
+#define DPM_TABLE_46__VddcTable_4_MASK 0xffff0000
+#define DPM_TABLE_46__VddcTable_4__SHIFT 0x10
+#define DPM_TABLE_47__VddcTable_7_MASK 0xffff
+#define DPM_TABLE_47__VddcTable_7__SHIFT 0x0
+#define DPM_TABLE_47__VddcTable_6_MASK 0xffff0000
+#define DPM_TABLE_47__VddcTable_6__SHIFT 0x10
+#define DPM_TABLE_48__VddcTable_9_MASK 0xffff
+#define DPM_TABLE_48__VddcTable_9__SHIFT 0x0
+#define DPM_TABLE_48__VddcTable_8_MASK 0xffff0000
+#define DPM_TABLE_48__VddcTable_8__SHIFT 0x10
+#define DPM_TABLE_49__VddcTable_11_MASK 0xffff
+#define DPM_TABLE_49__VddcTable_11__SHIFT 0x0
+#define DPM_TABLE_49__VddcTable_10_MASK 0xffff0000
+#define DPM_TABLE_49__VddcTable_10__SHIFT 0x10
+#define DPM_TABLE_50__VddcTable_13_MASK 0xffff
+#define DPM_TABLE_50__VddcTable_13__SHIFT 0x0
+#define DPM_TABLE_50__VddcTable_12_MASK 0xffff0000
+#define DPM_TABLE_50__VddcTable_12__SHIFT 0x10
+#define DPM_TABLE_51__VddcTable_15_MASK 0xffff
+#define DPM_TABLE_51__VddcTable_15__SHIFT 0x0
+#define DPM_TABLE_51__VddcTable_14_MASK 0xffff0000
+#define DPM_TABLE_51__VddcTable_14__SHIFT 0x10
+#define DPM_TABLE_52__VddGfxTable_1_MASK 0xffff
+#define DPM_TABLE_52__VddGfxTable_1__SHIFT 0x0
+#define DPM_TABLE_52__VddGfxTable_0_MASK 0xffff0000
+#define DPM_TABLE_52__VddGfxTable_0__SHIFT 0x10
+#define DPM_TABLE_53__VddGfxTable_3_MASK 0xffff
+#define DPM_TABLE_53__VddGfxTable_3__SHIFT 0x0
+#define DPM_TABLE_53__VddGfxTable_2_MASK 0xffff0000
+#define DPM_TABLE_53__VddGfxTable_2__SHIFT 0x10
+#define DPM_TABLE_54__VddGfxTable_5_MASK 0xffff
+#define DPM_TABLE_54__VddGfxTable_5__SHIFT 0x0
+#define DPM_TABLE_54__VddGfxTable_4_MASK 0xffff0000
+#define DPM_TABLE_54__VddGfxTable_4__SHIFT 0x10
+#define DPM_TABLE_55__VddGfxTable_7_MASK 0xffff
+#define DPM_TABLE_55__VddGfxTable_7__SHIFT 0x0
+#define DPM_TABLE_55__VddGfxTable_6_MASK 0xffff0000
+#define DPM_TABLE_55__VddGfxTable_6__SHIFT 0x10
+#define DPM_TABLE_56__VddGfxTable_9_MASK 0xffff
+#define DPM_TABLE_56__VddGfxTable_9__SHIFT 0x0
+#define DPM_TABLE_56__VddGfxTable_8_MASK 0xffff0000
+#define DPM_TABLE_56__VddGfxTable_8__SHIFT 0x10
+#define DPM_TABLE_57__VddGfxTable_11_MASK 0xffff
+#define DPM_TABLE_57__VddGfxTable_11__SHIFT 0x0
+#define DPM_TABLE_57__VddGfxTable_10_MASK 0xffff0000
+#define DPM_TABLE_57__VddGfxTable_10__SHIFT 0x10
+#define DPM_TABLE_58__VddGfxTable_13_MASK 0xffff
+#define DPM_TABLE_58__VddGfxTable_13__SHIFT 0x0
+#define DPM_TABLE_58__VddGfxTable_12_MASK 0xffff0000
+#define DPM_TABLE_58__VddGfxTable_12__SHIFT 0x10
+#define DPM_TABLE_59__VddGfxTable_15_MASK 0xffff
+#define DPM_TABLE_59__VddGfxTable_15__SHIFT 0x0
+#define DPM_TABLE_59__VddGfxTable_14_MASK 0xffff0000
+#define DPM_TABLE_59__VddGfxTable_14__SHIFT 0x10
+#define DPM_TABLE_60__VddciTable_1_MASK 0xffff
+#define DPM_TABLE_60__VddciTable_1__SHIFT 0x0
+#define DPM_TABLE_60__VddciTable_0_MASK 0xffff0000
+#define DPM_TABLE_60__VddciTable_0__SHIFT 0x10
+#define DPM_TABLE_61__VddciTable_3_MASK 0xffff
+#define DPM_TABLE_61__VddciTable_3__SHIFT 0x0
+#define DPM_TABLE_61__VddciTable_2_MASK 0xffff0000
+#define DPM_TABLE_61__VddciTable_2__SHIFT 0x10
+#define DPM_TABLE_62__VddciTable_5_MASK 0xffff
+#define DPM_TABLE_62__VddciTable_5__SHIFT 0x0
+#define DPM_TABLE_62__VddciTable_4_MASK 0xffff0000
+#define DPM_TABLE_62__VddciTable_4__SHIFT 0x10
+#define DPM_TABLE_63__VddciTable_7_MASK 0xffff
+#define DPM_TABLE_63__VddciTable_7__SHIFT 0x0
+#define DPM_TABLE_63__VddciTable_6_MASK 0xffff0000
+#define DPM_TABLE_63__VddciTable_6__SHIFT 0x10
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_3_MASK 0xff
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_3__SHIFT 0x0
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_2_MASK 0xff00
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_2__SHIFT 0x8
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_1_MASK 0xff0000
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_1__SHIFT 0x10
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_0_MASK 0xff000000
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_0__SHIFT 0x18
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_7_MASK 0xff
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_7__SHIFT 0x0
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_6_MASK 0xff00
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_6__SHIFT 0x8
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_5_MASK 0xff0000
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_5__SHIFT 0x10
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_4_MASK 0xff000000
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_4__SHIFT 0x18
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_11_MASK 0xff
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_11__SHIFT 0x0
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_10_MASK 0xff00
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_10__SHIFT 0x8
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_9_MASK 0xff0000
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_9__SHIFT 0x10
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_8_MASK 0xff000000
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_8__SHIFT 0x18
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_15_MASK 0xff
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_15__SHIFT 0x0
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_14_MASK 0xff00
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_14__SHIFT 0x8
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_13_MASK 0xff0000
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_13__SHIFT 0x10
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_12_MASK 0xff000000
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_12__SHIFT 0x18
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_3_MASK 0xff
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_3__SHIFT 0x0
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_2_MASK 0xff00
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_2__SHIFT 0x8
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_1_MASK 0xff0000
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_1__SHIFT 0x10
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_0_MASK 0xff000000
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_0__SHIFT 0x18
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_7_MASK 0xff
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_7__SHIFT 0x0
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_6_MASK 0xff00
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_6__SHIFT 0x8
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_5_MASK 0xff0000
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_5__SHIFT 0x10
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_4_MASK 0xff000000
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_4__SHIFT 0x18
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_11_MASK 0xff
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_11__SHIFT 0x0
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_10_MASK 0xff00
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_10__SHIFT 0x8
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_9_MASK 0xff0000
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_9__SHIFT 0x10
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_8_MASK 0xff000000
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_8__SHIFT 0x18
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_15_MASK 0xff
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_15__SHIFT 0x0
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_14_MASK 0xff00
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_14__SHIFT 0x8
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_13_MASK 0xff0000
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_13__SHIFT 0x10
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_12_MASK 0xff000000
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_12__SHIFT 0x18
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_3_MASK 0xff
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_3__SHIFT 0x0
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_2_MASK 0xff00
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_2__SHIFT 0x8
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_1_MASK 0xff0000
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_1__SHIFT 0x10
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_0_MASK 0xff000000
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_0__SHIFT 0x18
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_7_MASK 0xff
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_7__SHIFT 0x0
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_6_MASK 0xff00
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_6__SHIFT 0x8
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_5_MASK 0xff0000
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_5__SHIFT 0x10
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_4_MASK 0xff000000
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_4__SHIFT 0x18
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_11_MASK 0xff
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_11__SHIFT 0x0
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_10_MASK 0xff00
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_10__SHIFT 0x8
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_9_MASK 0xff0000
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_9__SHIFT 0x10
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_8_MASK 0xff000000
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_8__SHIFT 0x18
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_15_MASK 0xff
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_15__SHIFT 0x0
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_14_MASK 0xff00
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_14__SHIFT 0x8
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_13_MASK 0xff0000
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_13__SHIFT 0x10
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_12_MASK 0xff000000
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_12__SHIFT 0x18
+#define DPM_TABLE_76__BapmVddcVidHiSidd_3_MASK 0xff
+#define DPM_TABLE_76__BapmVddcVidHiSidd_3__SHIFT 0x0
+#define DPM_TABLE_76__BapmVddcVidHiSidd_2_MASK 0xff00
+#define DPM_TABLE_76__BapmVddcVidHiSidd_2__SHIFT 0x8
+#define DPM_TABLE_76__BapmVddcVidHiSidd_1_MASK 0xff0000
+#define DPM_TABLE_76__BapmVddcVidHiSidd_1__SHIFT 0x10
+#define DPM_TABLE_76__BapmVddcVidHiSidd_0_MASK 0xff000000
+#define DPM_TABLE_76__BapmVddcVidHiSidd_0__SHIFT 0x18
+#define DPM_TABLE_77__BapmVddcVidHiSidd_7_MASK 0xff
+#define DPM_TABLE_77__BapmVddcVidHiSidd_7__SHIFT 0x0
+#define DPM_TABLE_77__BapmVddcVidHiSidd_6_MASK 0xff00
+#define DPM_TABLE_77__BapmVddcVidHiSidd_6__SHIFT 0x8
+#define DPM_TABLE_77__BapmVddcVidHiSidd_5_MASK 0xff0000
+#define DPM_TABLE_77__BapmVddcVidHiSidd_5__SHIFT 0x10
+#define DPM_TABLE_77__BapmVddcVidHiSidd_4_MASK 0xff000000
+#define DPM_TABLE_77__BapmVddcVidHiSidd_4__SHIFT 0x18
+#define DPM_TABLE_78__BapmVddcVidHiSidd_11_MASK 0xff
+#define DPM_TABLE_78__BapmVddcVidHiSidd_11__SHIFT 0x0
+#define DPM_TABLE_78__BapmVddcVidHiSidd_10_MASK 0xff00
+#define DPM_TABLE_78__BapmVddcVidHiSidd_10__SHIFT 0x8
+#define DPM_TABLE_78__BapmVddcVidHiSidd_9_MASK 0xff0000
+#define DPM_TABLE_78__BapmVddcVidHiSidd_9__SHIFT 0x10
+#define DPM_TABLE_78__BapmVddcVidHiSidd_8_MASK 0xff000000
+#define DPM_TABLE_78__BapmVddcVidHiSidd_8__SHIFT 0x18
+#define DPM_TABLE_79__BapmVddcVidHiSidd_15_MASK 0xff
+#define DPM_TABLE_79__BapmVddcVidHiSidd_15__SHIFT 0x0
+#define DPM_TABLE_79__BapmVddcVidHiSidd_14_MASK 0xff00
+#define DPM_TABLE_79__BapmVddcVidHiSidd_14__SHIFT 0x8
+#define DPM_TABLE_79__BapmVddcVidHiSidd_13_MASK 0xff0000
+#define DPM_TABLE_79__BapmVddcVidHiSidd_13__SHIFT 0x10
+#define DPM_TABLE_79__BapmVddcVidHiSidd_12_MASK 0xff000000
+#define DPM_TABLE_79__BapmVddcVidHiSidd_12__SHIFT 0x18
+#define DPM_TABLE_80__BapmVddcVidLoSidd_3_MASK 0xff
+#define DPM_TABLE_80__BapmVddcVidLoSidd_3__SHIFT 0x0
+#define DPM_TABLE_80__BapmVddcVidLoSidd_2_MASK 0xff00
+#define DPM_TABLE_80__BapmVddcVidLoSidd_2__SHIFT 0x8
+#define DPM_TABLE_80__BapmVddcVidLoSidd_1_MASK 0xff0000
+#define DPM_TABLE_80__BapmVddcVidLoSidd_1__SHIFT 0x10
+#define DPM_TABLE_80__BapmVddcVidLoSidd_0_MASK 0xff000000
+#define DPM_TABLE_80__BapmVddcVidLoSidd_0__SHIFT 0x18
+#define DPM_TABLE_81__BapmVddcVidLoSidd_7_MASK 0xff
+#define DPM_TABLE_81__BapmVddcVidLoSidd_7__SHIFT 0x0
+#define DPM_TABLE_81__BapmVddcVidLoSidd_6_MASK 0xff00
+#define DPM_TABLE_81__BapmVddcVidLoSidd_6__SHIFT 0x8
+#define DPM_TABLE_81__BapmVddcVidLoSidd_5_MASK 0xff0000
+#define DPM_TABLE_81__BapmVddcVidLoSidd_5__SHIFT 0x10
+#define DPM_TABLE_81__BapmVddcVidLoSidd_4_MASK 0xff000000
+#define DPM_TABLE_81__BapmVddcVidLoSidd_4__SHIFT 0x18
+#define DPM_TABLE_82__BapmVddcVidLoSidd_11_MASK 0xff
+#define DPM_TABLE_82__BapmVddcVidLoSidd_11__SHIFT 0x0
+#define DPM_TABLE_82__BapmVddcVidLoSidd_10_MASK 0xff00
+#define DPM_TABLE_82__BapmVddcVidLoSidd_10__SHIFT 0x8
+#define DPM_TABLE_82__BapmVddcVidLoSidd_9_MASK 0xff0000
+#define DPM_TABLE_82__BapmVddcVidLoSidd_9__SHIFT 0x10
+#define DPM_TABLE_82__BapmVddcVidLoSidd_8_MASK 0xff000000
+#define DPM_TABLE_82__BapmVddcVidLoSidd_8__SHIFT 0x18
+#define DPM_TABLE_83__BapmVddcVidLoSidd_15_MASK 0xff
+#define DPM_TABLE_83__BapmVddcVidLoSidd_15__SHIFT 0x0
+#define DPM_TABLE_83__BapmVddcVidLoSidd_14_MASK 0xff00
+#define DPM_TABLE_83__BapmVddcVidLoSidd_14__SHIFT 0x8
+#define DPM_TABLE_83__BapmVddcVidLoSidd_13_MASK 0xff0000
+#define DPM_TABLE_83__BapmVddcVidLoSidd_13__SHIFT 0x10
+#define DPM_TABLE_83__BapmVddcVidLoSidd_12_MASK 0xff000000
+#define DPM_TABLE_83__BapmVddcVidLoSidd_12__SHIFT 0x18
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_3_MASK 0xff
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_3__SHIFT 0x0
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_2_MASK 0xff00
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_2__SHIFT 0x8
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_1_MASK 0xff0000
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_1__SHIFT 0x10
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_0_MASK 0xff000000
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_0__SHIFT 0x18
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_7_MASK 0xff
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_7__SHIFT 0x0
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_6_MASK 0xff00
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_6__SHIFT 0x8
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_5_MASK 0xff0000
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_5__SHIFT 0x10
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_4_MASK 0xff000000
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_4__SHIFT 0x18
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_11_MASK 0xff
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_11__SHIFT 0x0
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_10_MASK 0xff00
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_10__SHIFT 0x8
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_9_MASK 0xff0000
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_9__SHIFT 0x10
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_8_MASK 0xff000000
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_8__SHIFT 0x18
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_15_MASK 0xff
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_15__SHIFT 0x0
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_14_MASK 0xff00
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_14__SHIFT 0x8
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_13_MASK 0xff0000
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_13__SHIFT 0x10
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_12_MASK 0xff000000
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_12__SHIFT 0x18
+#define DPM_TABLE_88__MasterDeepSleepControl_MASK 0xff
+#define DPM_TABLE_88__MasterDeepSleepControl__SHIFT 0x0
+#define DPM_TABLE_88__LinkLevelCount_MASK 0xff00
+#define DPM_TABLE_88__LinkLevelCount__SHIFT 0x8
+#define DPM_TABLE_88__MemoryDpmLevelCount_MASK 0xff0000
+#define DPM_TABLE_88__MemoryDpmLevelCount__SHIFT 0x10
+#define DPM_TABLE_88__GraphicsDpmLevelCount_MASK 0xff000000
+#define DPM_TABLE_88__GraphicsDpmLevelCount__SHIFT 0x18
+#define DPM_TABLE_89__SamuLevelCount_MASK 0xff
+#define DPM_TABLE_89__SamuLevelCount__SHIFT 0x0
+#define DPM_TABLE_89__AcpLevelCount_MASK 0xff00
+#define DPM_TABLE_89__AcpLevelCount__SHIFT 0x8
+#define DPM_TABLE_89__VceLevelCount_MASK 0xff0000
+#define DPM_TABLE_89__VceLevelCount__SHIFT 0x10
+#define DPM_TABLE_89__UvdLevelCount_MASK 0xff000000
+#define DPM_TABLE_89__UvdLevelCount__SHIFT 0x18
+#define DPM_TABLE_90__Reserved_0_MASK 0xff
+#define DPM_TABLE_90__Reserved_0__SHIFT 0x0
+#define DPM_TABLE_90__ThermOutMode_MASK 0xff00
+#define DPM_TABLE_90__ThermOutMode__SHIFT 0x8
+#define DPM_TABLE_90__ThermOutPolarity_MASK 0xff0000
+#define DPM_TABLE_90__ThermOutPolarity__SHIFT 0x10
+#define DPM_TABLE_90__ThermOutGpio_MASK 0xff000000
+#define DPM_TABLE_90__ThermOutGpio__SHIFT 0x18
+#define DPM_TABLE_91__Reserved_0_MASK 0xffffffff
+#define DPM_TABLE_91__Reserved_0__SHIFT 0x0
+#define DPM_TABLE_92__Reserved_1_MASK 0xffffffff
+#define DPM_TABLE_92__Reserved_1__SHIFT 0x0
+#define DPM_TABLE_93__Reserved_2_MASK 0xffffffff
+#define DPM_TABLE_93__Reserved_2__SHIFT 0x0
+#define DPM_TABLE_94__Reserved_3_MASK 0xffffffff
+#define DPM_TABLE_94__Reserved_3__SHIFT 0x0
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_96__GraphicsLevel_0_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_96__GraphicsLevel_0_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_97__GraphicsLevel_0_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_97__GraphicsLevel_0_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_97__GraphicsLevel_0_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_97__GraphicsLevel_0_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_97__GraphicsLevel_0_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_97__GraphicsLevel_0_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_98__GraphicsLevel_0_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_98__GraphicsLevel_0_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_99__GraphicsLevel_0_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_99__GraphicsLevel_0_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_100__GraphicsLevel_0_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_100__GraphicsLevel_0_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_101__GraphicsLevel_0_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_101__GraphicsLevel_0_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_102__GraphicsLevel_0_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_102__GraphicsLevel_0_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_103__GraphicsLevel_0_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_103__GraphicsLevel_0_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_104__GraphicsLevel_0_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_104__GraphicsLevel_0_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_104__GraphicsLevel_0_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_104__GraphicsLevel_0_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_104__GraphicsLevel_0_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_104__GraphicsLevel_0_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_104__GraphicsLevel_0_SclkDid_MASK 0xff000000
+#define DPM_TABLE_104__GraphicsLevel_0_SclkDid__SHIFT 0x18
+#define DPM_TABLE_105__GraphicsLevel_0_PowerThrottle_MASK 0xff
+#define DPM_TABLE_105__GraphicsLevel_0_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_105__GraphicsLevel_0_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_105__GraphicsLevel_0_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_105__GraphicsLevel_0_DownHyst_MASK 0xff0000
+#define DPM_TABLE_105__GraphicsLevel_0_DownHyst__SHIFT 0x10
+#define DPM_TABLE_105__GraphicsLevel_0_UpHyst_MASK 0xff000000
+#define DPM_TABLE_105__GraphicsLevel_0_UpHyst__SHIFT 0x18
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_107__GraphicsLevel_1_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_107__GraphicsLevel_1_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_108__GraphicsLevel_1_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_108__GraphicsLevel_1_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_108__GraphicsLevel_1_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_108__GraphicsLevel_1_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_108__GraphicsLevel_1_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_108__GraphicsLevel_1_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_109__GraphicsLevel_1_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_109__GraphicsLevel_1_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_110__GraphicsLevel_1_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_110__GraphicsLevel_1_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_111__GraphicsLevel_1_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_111__GraphicsLevel_1_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_112__GraphicsLevel_1_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_112__GraphicsLevel_1_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_113__GraphicsLevel_1_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_113__GraphicsLevel_1_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_114__GraphicsLevel_1_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_114__GraphicsLevel_1_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_115__GraphicsLevel_1_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_115__GraphicsLevel_1_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_115__GraphicsLevel_1_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_115__GraphicsLevel_1_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_115__GraphicsLevel_1_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_115__GraphicsLevel_1_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_115__GraphicsLevel_1_SclkDid_MASK 0xff000000
+#define DPM_TABLE_115__GraphicsLevel_1_SclkDid__SHIFT 0x18
+#define DPM_TABLE_116__GraphicsLevel_1_PowerThrottle_MASK 0xff
+#define DPM_TABLE_116__GraphicsLevel_1_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_116__GraphicsLevel_1_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_116__GraphicsLevel_1_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_116__GraphicsLevel_1_DownHyst_MASK 0xff0000
+#define DPM_TABLE_116__GraphicsLevel_1_DownHyst__SHIFT 0x10
+#define DPM_TABLE_116__GraphicsLevel_1_UpHyst_MASK 0xff000000
+#define DPM_TABLE_116__GraphicsLevel_1_UpHyst__SHIFT 0x18
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_118__GraphicsLevel_2_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_118__GraphicsLevel_2_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_119__GraphicsLevel_2_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_119__GraphicsLevel_2_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_119__GraphicsLevel_2_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_119__GraphicsLevel_2_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_119__GraphicsLevel_2_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_119__GraphicsLevel_2_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_120__GraphicsLevel_2_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_120__GraphicsLevel_2_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_121__GraphicsLevel_2_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_121__GraphicsLevel_2_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_122__GraphicsLevel_2_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_122__GraphicsLevel_2_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_123__GraphicsLevel_2_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_123__GraphicsLevel_2_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_124__GraphicsLevel_2_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_124__GraphicsLevel_2_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_125__GraphicsLevel_2_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_125__GraphicsLevel_2_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_126__GraphicsLevel_2_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_126__GraphicsLevel_2_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_126__GraphicsLevel_2_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_126__GraphicsLevel_2_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_126__GraphicsLevel_2_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_126__GraphicsLevel_2_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_126__GraphicsLevel_2_SclkDid_MASK 0xff000000
+#define DPM_TABLE_126__GraphicsLevel_2_SclkDid__SHIFT 0x18
+#define DPM_TABLE_127__GraphicsLevel_2_PowerThrottle_MASK 0xff
+#define DPM_TABLE_127__GraphicsLevel_2_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_127__GraphicsLevel_2_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_127__GraphicsLevel_2_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_127__GraphicsLevel_2_DownHyst_MASK 0xff0000
+#define DPM_TABLE_127__GraphicsLevel_2_DownHyst__SHIFT 0x10
+#define DPM_TABLE_127__GraphicsLevel_2_UpHyst_MASK 0xff000000
+#define DPM_TABLE_127__GraphicsLevel_2_UpHyst__SHIFT 0x18
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_129__GraphicsLevel_3_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_129__GraphicsLevel_3_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_130__GraphicsLevel_3_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_130__GraphicsLevel_3_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_130__GraphicsLevel_3_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_130__GraphicsLevel_3_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_130__GraphicsLevel_3_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_130__GraphicsLevel_3_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_131__GraphicsLevel_3_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_131__GraphicsLevel_3_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_132__GraphicsLevel_3_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_132__GraphicsLevel_3_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_133__GraphicsLevel_3_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_133__GraphicsLevel_3_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_134__GraphicsLevel_3_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_134__GraphicsLevel_3_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_135__GraphicsLevel_3_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_135__GraphicsLevel_3_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_136__GraphicsLevel_3_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_136__GraphicsLevel_3_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_137__GraphicsLevel_3_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_137__GraphicsLevel_3_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_137__GraphicsLevel_3_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_137__GraphicsLevel_3_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_137__GraphicsLevel_3_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_137__GraphicsLevel_3_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_137__GraphicsLevel_3_SclkDid_MASK 0xff000000
+#define DPM_TABLE_137__GraphicsLevel_3_SclkDid__SHIFT 0x18
+#define DPM_TABLE_138__GraphicsLevel_3_PowerThrottle_MASK 0xff
+#define DPM_TABLE_138__GraphicsLevel_3_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_138__GraphicsLevel_3_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_138__GraphicsLevel_3_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_138__GraphicsLevel_3_DownHyst_MASK 0xff0000
+#define DPM_TABLE_138__GraphicsLevel_3_DownHyst__SHIFT 0x10
+#define DPM_TABLE_138__GraphicsLevel_3_UpHyst_MASK 0xff000000
+#define DPM_TABLE_138__GraphicsLevel_3_UpHyst__SHIFT 0x18
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_140__GraphicsLevel_4_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_140__GraphicsLevel_4_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_141__GraphicsLevel_4_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_141__GraphicsLevel_4_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_141__GraphicsLevel_4_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_141__GraphicsLevel_4_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_141__GraphicsLevel_4_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_141__GraphicsLevel_4_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_142__GraphicsLevel_4_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_142__GraphicsLevel_4_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_143__GraphicsLevel_4_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_143__GraphicsLevel_4_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_144__GraphicsLevel_4_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_144__GraphicsLevel_4_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_145__GraphicsLevel_4_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_145__GraphicsLevel_4_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_146__GraphicsLevel_4_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_146__GraphicsLevel_4_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_147__GraphicsLevel_4_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_147__GraphicsLevel_4_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_148__GraphicsLevel_4_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_148__GraphicsLevel_4_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_148__GraphicsLevel_4_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_148__GraphicsLevel_4_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_148__GraphicsLevel_4_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_148__GraphicsLevel_4_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_148__GraphicsLevel_4_SclkDid_MASK 0xff000000
+#define DPM_TABLE_148__GraphicsLevel_4_SclkDid__SHIFT 0x18
+#define DPM_TABLE_149__GraphicsLevel_4_PowerThrottle_MASK 0xff
+#define DPM_TABLE_149__GraphicsLevel_4_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_149__GraphicsLevel_4_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_149__GraphicsLevel_4_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_149__GraphicsLevel_4_DownHyst_MASK 0xff0000
+#define DPM_TABLE_149__GraphicsLevel_4_DownHyst__SHIFT 0x10
+#define DPM_TABLE_149__GraphicsLevel_4_UpHyst_MASK 0xff000000
+#define DPM_TABLE_149__GraphicsLevel_4_UpHyst__SHIFT 0x18
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_151__GraphicsLevel_5_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_151__GraphicsLevel_5_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_152__GraphicsLevel_5_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_152__GraphicsLevel_5_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_152__GraphicsLevel_5_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_152__GraphicsLevel_5_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_152__GraphicsLevel_5_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_152__GraphicsLevel_5_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_153__GraphicsLevel_5_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_153__GraphicsLevel_5_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_154__GraphicsLevel_5_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_154__GraphicsLevel_5_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_155__GraphicsLevel_5_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_155__GraphicsLevel_5_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_156__GraphicsLevel_5_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_156__GraphicsLevel_5_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_157__GraphicsLevel_5_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_157__GraphicsLevel_5_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_158__GraphicsLevel_5_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_158__GraphicsLevel_5_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_159__GraphicsLevel_5_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_159__GraphicsLevel_5_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_159__GraphicsLevel_5_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_159__GraphicsLevel_5_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_159__GraphicsLevel_5_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_159__GraphicsLevel_5_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_159__GraphicsLevel_5_SclkDid_MASK 0xff000000
+#define DPM_TABLE_159__GraphicsLevel_5_SclkDid__SHIFT 0x18
+#define DPM_TABLE_160__GraphicsLevel_5_PowerThrottle_MASK 0xff
+#define DPM_TABLE_160__GraphicsLevel_5_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_160__GraphicsLevel_5_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_160__GraphicsLevel_5_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_160__GraphicsLevel_5_DownHyst_MASK 0xff0000
+#define DPM_TABLE_160__GraphicsLevel_5_DownHyst__SHIFT 0x10
+#define DPM_TABLE_160__GraphicsLevel_5_UpHyst_MASK 0xff000000
+#define DPM_TABLE_160__GraphicsLevel_5_UpHyst__SHIFT 0x18
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_162__GraphicsLevel_6_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_162__GraphicsLevel_6_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_163__GraphicsLevel_6_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_163__GraphicsLevel_6_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_163__GraphicsLevel_6_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_163__GraphicsLevel_6_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_163__GraphicsLevel_6_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_163__GraphicsLevel_6_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_164__GraphicsLevel_6_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_164__GraphicsLevel_6_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_165__GraphicsLevel_6_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_165__GraphicsLevel_6_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_166__GraphicsLevel_6_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_166__GraphicsLevel_6_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_167__GraphicsLevel_6_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_167__GraphicsLevel_6_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_168__GraphicsLevel_6_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_168__GraphicsLevel_6_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_169__GraphicsLevel_6_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_169__GraphicsLevel_6_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_170__GraphicsLevel_6_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_170__GraphicsLevel_6_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_170__GraphicsLevel_6_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_170__GraphicsLevel_6_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_170__GraphicsLevel_6_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_170__GraphicsLevel_6_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_170__GraphicsLevel_6_SclkDid_MASK 0xff000000
+#define DPM_TABLE_170__GraphicsLevel_6_SclkDid__SHIFT 0x18
+#define DPM_TABLE_171__GraphicsLevel_6_PowerThrottle_MASK 0xff
+#define DPM_TABLE_171__GraphicsLevel_6_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_171__GraphicsLevel_6_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_171__GraphicsLevel_6_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_171__GraphicsLevel_6_DownHyst_MASK 0xff0000
+#define DPM_TABLE_171__GraphicsLevel_6_DownHyst__SHIFT 0x10
+#define DPM_TABLE_171__GraphicsLevel_6_UpHyst_MASK 0xff000000
+#define DPM_TABLE_171__GraphicsLevel_6_UpHyst__SHIFT 0x18
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_173__GraphicsLevel_7_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_173__GraphicsLevel_7_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_174__GraphicsLevel_7_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_174__GraphicsLevel_7_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_174__GraphicsLevel_7_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_174__GraphicsLevel_7_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_174__GraphicsLevel_7_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_174__GraphicsLevel_7_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_175__GraphicsLevel_7_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_175__GraphicsLevel_7_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_176__GraphicsLevel_7_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_176__GraphicsLevel_7_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_177__GraphicsLevel_7_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_177__GraphicsLevel_7_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_178__GraphicsLevel_7_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_178__GraphicsLevel_7_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_179__GraphicsLevel_7_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_179__GraphicsLevel_7_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_180__GraphicsLevel_7_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_180__GraphicsLevel_7_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_181__GraphicsLevel_7_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_181__GraphicsLevel_7_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_181__GraphicsLevel_7_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_181__GraphicsLevel_7_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_181__GraphicsLevel_7_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_181__GraphicsLevel_7_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_181__GraphicsLevel_7_SclkDid_MASK 0xff000000
+#define DPM_TABLE_181__GraphicsLevel_7_SclkDid__SHIFT 0x18
+#define DPM_TABLE_182__GraphicsLevel_7_PowerThrottle_MASK 0xff
+#define DPM_TABLE_182__GraphicsLevel_7_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_182__GraphicsLevel_7_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_182__GraphicsLevel_7_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_182__GraphicsLevel_7_DownHyst_MASK 0xff0000
+#define DPM_TABLE_182__GraphicsLevel_7_DownHyst__SHIFT 0x10
+#define DPM_TABLE_182__GraphicsLevel_7_UpHyst_MASK 0xff000000
+#define DPM_TABLE_182__GraphicsLevel_7_UpHyst__SHIFT 0x18
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_184__MemoryACPILevel_MinMvdd_MASK 0xffffffff
+#define DPM_TABLE_184__MemoryACPILevel_MinMvdd__SHIFT 0x0
+#define DPM_TABLE_185__MemoryACPILevel_MclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_185__MemoryACPILevel_MclkFrequency__SHIFT 0x0
+#define DPM_TABLE_186__MemoryACPILevel_EnabledForActivity_MASK 0xff
+#define DPM_TABLE_186__MemoryACPILevel_EnabledForActivity__SHIFT 0x0
+#define DPM_TABLE_186__MemoryACPILevel_EnabledForThrottle_MASK 0xff00
+#define DPM_TABLE_186__MemoryACPILevel_EnabledForThrottle__SHIFT 0x8
+#define DPM_TABLE_186__MemoryACPILevel_FreqRange_MASK 0xff0000
+#define DPM_TABLE_186__MemoryACPILevel_FreqRange__SHIFT 0x10
+#define DPM_TABLE_186__MemoryACPILevel_StutterEnable_MASK 0xff000000
+#define DPM_TABLE_186__MemoryACPILevel_StutterEnable__SHIFT 0x18
+#define DPM_TABLE_187__MemoryACPILevel_padding_MASK 0xff
+#define DPM_TABLE_187__MemoryACPILevel_padding__SHIFT 0x0
+#define DPM_TABLE_187__MemoryACPILevel_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_187__MemoryACPILevel_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_187__MemoryACPILevel_DownHyst_MASK 0xff0000
+#define DPM_TABLE_187__MemoryACPILevel_DownHyst__SHIFT 0x10
+#define DPM_TABLE_187__MemoryACPILevel_UpHyst_MASK 0xff000000
+#define DPM_TABLE_187__MemoryACPILevel_UpHyst__SHIFT 0x18
+#define DPM_TABLE_188__MemoryACPILevel_MclkDivider_MASK 0xff
+#define DPM_TABLE_188__MemoryACPILevel_MclkDivider__SHIFT 0x0
+#define DPM_TABLE_188__MemoryACPILevel_DisplayWatermark_MASK 0xff00
+#define DPM_TABLE_188__MemoryACPILevel_DisplayWatermark__SHIFT 0x8
+#define DPM_TABLE_188__MemoryACPILevel_ActivityLevel_MASK 0xffff0000
+#define DPM_TABLE_188__MemoryACPILevel_ActivityLevel__SHIFT 0x10
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_190__MemoryLevel_0_MinMvdd_MASK 0xffffffff
+#define DPM_TABLE_190__MemoryLevel_0_MinMvdd__SHIFT 0x0
+#define DPM_TABLE_191__MemoryLevel_0_MclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_191__MemoryLevel_0_MclkFrequency__SHIFT 0x0
+#define DPM_TABLE_192__MemoryLevel_0_EnabledForActivity_MASK 0xff
+#define DPM_TABLE_192__MemoryLevel_0_EnabledForActivity__SHIFT 0x0
+#define DPM_TABLE_192__MemoryLevel_0_EnabledForThrottle_MASK 0xff00
+#define DPM_TABLE_192__MemoryLevel_0_EnabledForThrottle__SHIFT 0x8
+#define DPM_TABLE_192__MemoryLevel_0_FreqRange_MASK 0xff0000
+#define DPM_TABLE_192__MemoryLevel_0_FreqRange__SHIFT 0x10
+#define DPM_TABLE_192__MemoryLevel_0_StutterEnable_MASK 0xff000000
+#define DPM_TABLE_192__MemoryLevel_0_StutterEnable__SHIFT 0x18
+#define DPM_TABLE_193__MemoryLevel_0_padding_MASK 0xff
+#define DPM_TABLE_193__MemoryLevel_0_padding__SHIFT 0x0
+#define DPM_TABLE_193__MemoryLevel_0_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_193__MemoryLevel_0_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_193__MemoryLevel_0_DownHyst_MASK 0xff0000
+#define DPM_TABLE_193__MemoryLevel_0_DownHyst__SHIFT 0x10
+#define DPM_TABLE_193__MemoryLevel_0_UpHyst_MASK 0xff000000
+#define DPM_TABLE_193__MemoryLevel_0_UpHyst__SHIFT 0x18
+#define DPM_TABLE_194__MemoryLevel_0_MclkDivider_MASK 0xff
+#define DPM_TABLE_194__MemoryLevel_0_MclkDivider__SHIFT 0x0
+#define DPM_TABLE_194__MemoryLevel_0_DisplayWatermark_MASK 0xff00
+#define DPM_TABLE_194__MemoryLevel_0_DisplayWatermark__SHIFT 0x8
+#define DPM_TABLE_194__MemoryLevel_0_ActivityLevel_MASK 0xffff0000
+#define DPM_TABLE_194__MemoryLevel_0_ActivityLevel__SHIFT 0x10
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_196__MemoryLevel_1_MinMvdd_MASK 0xffffffff
+#define DPM_TABLE_196__MemoryLevel_1_MinMvdd__SHIFT 0x0
+#define DPM_TABLE_197__MemoryLevel_1_MclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_197__MemoryLevel_1_MclkFrequency__SHIFT 0x0
+#define DPM_TABLE_198__MemoryLevel_1_EnabledForActivity_MASK 0xff
+#define DPM_TABLE_198__MemoryLevel_1_EnabledForActivity__SHIFT 0x0
+#define DPM_TABLE_198__MemoryLevel_1_EnabledForThrottle_MASK 0xff00
+#define DPM_TABLE_198__MemoryLevel_1_EnabledForThrottle__SHIFT 0x8
+#define DPM_TABLE_198__MemoryLevel_1_FreqRange_MASK 0xff0000
+#define DPM_TABLE_198__MemoryLevel_1_FreqRange__SHIFT 0x10
+#define DPM_TABLE_198__MemoryLevel_1_StutterEnable_MASK 0xff000000
+#define DPM_TABLE_198__MemoryLevel_1_StutterEnable__SHIFT 0x18
+#define DPM_TABLE_199__MemoryLevel_1_padding_MASK 0xff
+#define DPM_TABLE_199__MemoryLevel_1_padding__SHIFT 0x0
+#define DPM_TABLE_199__MemoryLevel_1_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_199__MemoryLevel_1_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_199__MemoryLevel_1_DownHyst_MASK 0xff0000
+#define DPM_TABLE_199__MemoryLevel_1_DownHyst__SHIFT 0x10
+#define DPM_TABLE_199__MemoryLevel_1_UpHyst_MASK 0xff000000
+#define DPM_TABLE_199__MemoryLevel_1_UpHyst__SHIFT 0x18
+#define DPM_TABLE_200__MemoryLevel_1_MclkDivider_MASK 0xff
+#define DPM_TABLE_200__MemoryLevel_1_MclkDivider__SHIFT 0x0
+#define DPM_TABLE_200__MemoryLevel_1_DisplayWatermark_MASK 0xff00
+#define DPM_TABLE_200__MemoryLevel_1_DisplayWatermark__SHIFT 0x8
+#define DPM_TABLE_200__MemoryLevel_1_ActivityLevel_MASK 0xffff0000
+#define DPM_TABLE_200__MemoryLevel_1_ActivityLevel__SHIFT 0x10
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_202__MemoryLevel_2_MinMvdd_MASK 0xffffffff
+#define DPM_TABLE_202__MemoryLevel_2_MinMvdd__SHIFT 0x0
+#define DPM_TABLE_203__MemoryLevel_2_MclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_203__MemoryLevel_2_MclkFrequency__SHIFT 0x0
+#define DPM_TABLE_204__MemoryLevel_2_EnabledForActivity_MASK 0xff
+#define DPM_TABLE_204__MemoryLevel_2_EnabledForActivity__SHIFT 0x0
+#define DPM_TABLE_204__MemoryLevel_2_EnabledForThrottle_MASK 0xff00
+#define DPM_TABLE_204__MemoryLevel_2_EnabledForThrottle__SHIFT 0x8
+#define DPM_TABLE_204__MemoryLevel_2_FreqRange_MASK 0xff0000
+#define DPM_TABLE_204__MemoryLevel_2_FreqRange__SHIFT 0x10
+#define DPM_TABLE_204__MemoryLevel_2_StutterEnable_MASK 0xff000000
+#define DPM_TABLE_204__MemoryLevel_2_StutterEnable__SHIFT 0x18
+#define DPM_TABLE_205__MemoryLevel_2_padding_MASK 0xff
+#define DPM_TABLE_205__MemoryLevel_2_padding__SHIFT 0x0
+#define DPM_TABLE_205__MemoryLevel_2_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_205__MemoryLevel_2_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_205__MemoryLevel_2_DownHyst_MASK 0xff0000
+#define DPM_TABLE_205__MemoryLevel_2_DownHyst__SHIFT 0x10
+#define DPM_TABLE_205__MemoryLevel_2_UpHyst_MASK 0xff000000
+#define DPM_TABLE_205__MemoryLevel_2_UpHyst__SHIFT 0x18
+#define DPM_TABLE_206__MemoryLevel_2_MclkDivider_MASK 0xff
+#define DPM_TABLE_206__MemoryLevel_2_MclkDivider__SHIFT 0x0
+#define DPM_TABLE_206__MemoryLevel_2_DisplayWatermark_MASK 0xff00
+#define DPM_TABLE_206__MemoryLevel_2_DisplayWatermark__SHIFT 0x8
+#define DPM_TABLE_206__MemoryLevel_2_ActivityLevel_MASK 0xffff0000
+#define DPM_TABLE_206__MemoryLevel_2_ActivityLevel__SHIFT 0x10
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_208__MemoryLevel_3_MinMvdd_MASK 0xffffffff
+#define DPM_TABLE_208__MemoryLevel_3_MinMvdd__SHIFT 0x0
+#define DPM_TABLE_209__MemoryLevel_3_MclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_209__MemoryLevel_3_MclkFrequency__SHIFT 0x0
+#define DPM_TABLE_210__MemoryLevel_3_EnabledForActivity_MASK 0xff
+#define DPM_TABLE_210__MemoryLevel_3_EnabledForActivity__SHIFT 0x0
+#define DPM_TABLE_210__MemoryLevel_3_EnabledForThrottle_MASK 0xff00
+#define DPM_TABLE_210__MemoryLevel_3_EnabledForThrottle__SHIFT 0x8
+#define DPM_TABLE_210__MemoryLevel_3_FreqRange_MASK 0xff0000
+#define DPM_TABLE_210__MemoryLevel_3_FreqRange__SHIFT 0x10
+#define DPM_TABLE_210__MemoryLevel_3_StutterEnable_MASK 0xff000000
+#define DPM_TABLE_210__MemoryLevel_3_StutterEnable__SHIFT 0x18
+#define DPM_TABLE_211__MemoryLevel_3_padding_MASK 0xff
+#define DPM_TABLE_211__MemoryLevel_3_padding__SHIFT 0x0
+#define DPM_TABLE_211__MemoryLevel_3_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_211__MemoryLevel_3_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_211__MemoryLevel_3_DownHyst_MASK 0xff0000
+#define DPM_TABLE_211__MemoryLevel_3_DownHyst__SHIFT 0x10
+#define DPM_TABLE_211__MemoryLevel_3_UpHyst_MASK 0xff000000
+#define DPM_TABLE_211__MemoryLevel_3_UpHyst__SHIFT 0x18
+#define DPM_TABLE_212__MemoryLevel_3_MclkDivider_MASK 0xff
+#define DPM_TABLE_212__MemoryLevel_3_MclkDivider__SHIFT 0x0
+#define DPM_TABLE_212__MemoryLevel_3_DisplayWatermark_MASK 0xff00
+#define DPM_TABLE_212__MemoryLevel_3_DisplayWatermark__SHIFT 0x8
+#define DPM_TABLE_212__MemoryLevel_3_ActivityLevel_MASK 0xffff0000
+#define DPM_TABLE_212__MemoryLevel_3_ActivityLevel__SHIFT 0x10
+#define DPM_TABLE_213__LinkLevel_0_SPC_MASK 0xff
+#define DPM_TABLE_213__LinkLevel_0_SPC__SHIFT 0x0
+#define DPM_TABLE_213__LinkLevel_0_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_213__LinkLevel_0_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_213__LinkLevel_0_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_213__LinkLevel_0_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_213__LinkLevel_0_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_213__LinkLevel_0_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_214__LinkLevel_0_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_214__LinkLevel_0_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_215__LinkLevel_0_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_215__LinkLevel_0_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_216__LinkLevel_0_Reserved_MASK 0xffffffff
+#define DPM_TABLE_216__LinkLevel_0_Reserved__SHIFT 0x0
+#define DPM_TABLE_217__LinkLevel_1_SPC_MASK 0xff
+#define DPM_TABLE_217__LinkLevel_1_SPC__SHIFT 0x0
+#define DPM_TABLE_217__LinkLevel_1_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_217__LinkLevel_1_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_217__LinkLevel_1_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_217__LinkLevel_1_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_217__LinkLevel_1_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_217__LinkLevel_1_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_218__LinkLevel_1_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_218__LinkLevel_1_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_219__LinkLevel_1_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_219__LinkLevel_1_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_220__LinkLevel_1_Reserved_MASK 0xffffffff
+#define DPM_TABLE_220__LinkLevel_1_Reserved__SHIFT 0x0
+#define DPM_TABLE_221__LinkLevel_2_SPC_MASK 0xff
+#define DPM_TABLE_221__LinkLevel_2_SPC__SHIFT 0x0
+#define DPM_TABLE_221__LinkLevel_2_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_221__LinkLevel_2_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_221__LinkLevel_2_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_221__LinkLevel_2_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_221__LinkLevel_2_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_221__LinkLevel_2_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_222__LinkLevel_2_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_222__LinkLevel_2_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_223__LinkLevel_2_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_223__LinkLevel_2_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_224__LinkLevel_2_Reserved_MASK 0xffffffff
+#define DPM_TABLE_224__LinkLevel_2_Reserved__SHIFT 0x0
+#define DPM_TABLE_225__LinkLevel_3_SPC_MASK 0xff
+#define DPM_TABLE_225__LinkLevel_3_SPC__SHIFT 0x0
+#define DPM_TABLE_225__LinkLevel_3_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_225__LinkLevel_3_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_225__LinkLevel_3_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_225__LinkLevel_3_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_225__LinkLevel_3_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_225__LinkLevel_3_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_226__LinkLevel_3_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_226__LinkLevel_3_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_227__LinkLevel_3_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_227__LinkLevel_3_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_228__LinkLevel_3_Reserved_MASK 0xffffffff
+#define DPM_TABLE_228__LinkLevel_3_Reserved__SHIFT 0x0
+#define DPM_TABLE_229__LinkLevel_4_SPC_MASK 0xff
+#define DPM_TABLE_229__LinkLevel_4_SPC__SHIFT 0x0
+#define DPM_TABLE_229__LinkLevel_4_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_229__LinkLevel_4_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_229__LinkLevel_4_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_229__LinkLevel_4_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_229__LinkLevel_4_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_229__LinkLevel_4_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_230__LinkLevel_4_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_230__LinkLevel_4_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_231__LinkLevel_4_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_231__LinkLevel_4_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_232__LinkLevel_4_Reserved_MASK 0xffffffff
+#define DPM_TABLE_232__LinkLevel_4_Reserved__SHIFT 0x0
+#define DPM_TABLE_233__LinkLevel_5_SPC_MASK 0xff
+#define DPM_TABLE_233__LinkLevel_5_SPC__SHIFT 0x0
+#define DPM_TABLE_233__LinkLevel_5_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_233__LinkLevel_5_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_233__LinkLevel_5_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_233__LinkLevel_5_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_233__LinkLevel_5_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_233__LinkLevel_5_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_234__LinkLevel_5_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_234__LinkLevel_5_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_235__LinkLevel_5_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_235__LinkLevel_5_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_236__LinkLevel_5_Reserved_MASK 0xffffffff
+#define DPM_TABLE_236__LinkLevel_5_Reserved__SHIFT 0x0
+#define DPM_TABLE_237__LinkLevel_6_SPC_MASK 0xff
+#define DPM_TABLE_237__LinkLevel_6_SPC__SHIFT 0x0
+#define DPM_TABLE_237__LinkLevel_6_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_237__LinkLevel_6_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_237__LinkLevel_6_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_237__LinkLevel_6_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_237__LinkLevel_6_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_237__LinkLevel_6_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_238__LinkLevel_6_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_238__LinkLevel_6_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_239__LinkLevel_6_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_239__LinkLevel_6_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_240__LinkLevel_6_Reserved_MASK 0xffffffff
+#define DPM_TABLE_240__LinkLevel_6_Reserved__SHIFT 0x0
+#define DPM_TABLE_241__LinkLevel_7_SPC_MASK 0xff
+#define DPM_TABLE_241__LinkLevel_7_SPC__SHIFT 0x0
+#define DPM_TABLE_241__LinkLevel_7_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_241__LinkLevel_7_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_241__LinkLevel_7_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_241__LinkLevel_7_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_241__LinkLevel_7_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_241__LinkLevel_7_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_242__LinkLevel_7_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_242__LinkLevel_7_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_243__LinkLevel_7_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_243__LinkLevel_7_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_244__LinkLevel_7_Reserved_MASK 0xffffffff
+#define DPM_TABLE_244__LinkLevel_7_Reserved__SHIFT 0x0
+#define DPM_TABLE_245__ACPILevel_Flags_MASK 0xffffffff
+#define DPM_TABLE_245__ACPILevel_Flags__SHIFT 0x0
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_246__ACPILevel_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_246__ACPILevel_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_247__ACPILevel_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_247__ACPILevel_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_248__ACPILevel_padding_MASK 0xff
+#define DPM_TABLE_248__ACPILevel_padding__SHIFT 0x0
+#define DPM_TABLE_248__ACPILevel_DeepSleepDivId_MASK 0xff00
+#define DPM_TABLE_248__ACPILevel_DeepSleepDivId__SHIFT 0x8
+#define DPM_TABLE_248__ACPILevel_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_248__ACPILevel_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_248__ACPILevel_SclkDid_MASK 0xff000000
+#define DPM_TABLE_248__ACPILevel_SclkDid__SHIFT 0x18
+#define DPM_TABLE_249__ACPILevel_CgSpllFuncCntl_MASK 0xffffffff
+#define DPM_TABLE_249__ACPILevel_CgSpllFuncCntl__SHIFT 0x0
+#define DPM_TABLE_250__ACPILevel_CgSpllFuncCntl2_MASK 0xffffffff
+#define DPM_TABLE_250__ACPILevel_CgSpllFuncCntl2__SHIFT 0x0
+#define DPM_TABLE_251__ACPILevel_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_251__ACPILevel_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_252__ACPILevel_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_252__ACPILevel_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_253__ACPILevel_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_253__ACPILevel_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_254__ACPILevel_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_254__ACPILevel_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_255__ACPILevel_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_255__ACPILevel_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_256__ACPILevel_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_256__ACPILevel_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_257__UvdLevel_0_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_257__UvdLevel_0_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_258__UvdLevel_0_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_258__UvdLevel_0_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_260__UvdLevel_0_padding_1_MASK 0xff
+#define DPM_TABLE_260__UvdLevel_0_padding_1__SHIFT 0x0
+#define DPM_TABLE_260__UvdLevel_0_padding_0_MASK 0xff00
+#define DPM_TABLE_260__UvdLevel_0_padding_0__SHIFT 0x8
+#define DPM_TABLE_260__UvdLevel_0_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_260__UvdLevel_0_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_260__UvdLevel_0_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_260__UvdLevel_0_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_261__UvdLevel_1_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_261__UvdLevel_1_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_262__UvdLevel_1_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_262__UvdLevel_1_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_264__UvdLevel_1_padding_1_MASK 0xff
+#define DPM_TABLE_264__UvdLevel_1_padding_1__SHIFT 0x0
+#define DPM_TABLE_264__UvdLevel_1_padding_0_MASK 0xff00
+#define DPM_TABLE_264__UvdLevel_1_padding_0__SHIFT 0x8
+#define DPM_TABLE_264__UvdLevel_1_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_264__UvdLevel_1_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_264__UvdLevel_1_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_264__UvdLevel_1_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_265__UvdLevel_2_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_265__UvdLevel_2_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_266__UvdLevel_2_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_266__UvdLevel_2_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_268__UvdLevel_2_padding_1_MASK 0xff
+#define DPM_TABLE_268__UvdLevel_2_padding_1__SHIFT 0x0
+#define DPM_TABLE_268__UvdLevel_2_padding_0_MASK 0xff00
+#define DPM_TABLE_268__UvdLevel_2_padding_0__SHIFT 0x8
+#define DPM_TABLE_268__UvdLevel_2_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_268__UvdLevel_2_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_268__UvdLevel_2_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_268__UvdLevel_2_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_269__UvdLevel_3_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_269__UvdLevel_3_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_270__UvdLevel_3_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_270__UvdLevel_3_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_272__UvdLevel_3_padding_1_MASK 0xff
+#define DPM_TABLE_272__UvdLevel_3_padding_1__SHIFT 0x0
+#define DPM_TABLE_272__UvdLevel_3_padding_0_MASK 0xff00
+#define DPM_TABLE_272__UvdLevel_3_padding_0__SHIFT 0x8
+#define DPM_TABLE_272__UvdLevel_3_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_272__UvdLevel_3_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_272__UvdLevel_3_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_272__UvdLevel_3_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_273__UvdLevel_4_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_273__UvdLevel_4_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_274__UvdLevel_4_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_274__UvdLevel_4_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_276__UvdLevel_4_padding_1_MASK 0xff
+#define DPM_TABLE_276__UvdLevel_4_padding_1__SHIFT 0x0
+#define DPM_TABLE_276__UvdLevel_4_padding_0_MASK 0xff00
+#define DPM_TABLE_276__UvdLevel_4_padding_0__SHIFT 0x8
+#define DPM_TABLE_276__UvdLevel_4_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_276__UvdLevel_4_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_276__UvdLevel_4_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_276__UvdLevel_4_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_277__UvdLevel_5_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_277__UvdLevel_5_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_278__UvdLevel_5_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_278__UvdLevel_5_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_280__UvdLevel_5_padding_1_MASK 0xff
+#define DPM_TABLE_280__UvdLevel_5_padding_1__SHIFT 0x0
+#define DPM_TABLE_280__UvdLevel_5_padding_0_MASK 0xff00
+#define DPM_TABLE_280__UvdLevel_5_padding_0__SHIFT 0x8
+#define DPM_TABLE_280__UvdLevel_5_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_280__UvdLevel_5_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_280__UvdLevel_5_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_280__UvdLevel_5_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_281__UvdLevel_6_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_281__UvdLevel_6_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_282__UvdLevel_6_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_282__UvdLevel_6_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_284__UvdLevel_6_padding_1_MASK 0xff
+#define DPM_TABLE_284__UvdLevel_6_padding_1__SHIFT 0x0
+#define DPM_TABLE_284__UvdLevel_6_padding_0_MASK 0xff00
+#define DPM_TABLE_284__UvdLevel_6_padding_0__SHIFT 0x8
+#define DPM_TABLE_284__UvdLevel_6_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_284__UvdLevel_6_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_284__UvdLevel_6_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_284__UvdLevel_6_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_285__UvdLevel_7_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_285__UvdLevel_7_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_286__UvdLevel_7_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_286__UvdLevel_7_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_288__UvdLevel_7_padding_1_MASK 0xff
+#define DPM_TABLE_288__UvdLevel_7_padding_1__SHIFT 0x0
+#define DPM_TABLE_288__UvdLevel_7_padding_0_MASK 0xff00
+#define DPM_TABLE_288__UvdLevel_7_padding_0__SHIFT 0x8
+#define DPM_TABLE_288__UvdLevel_7_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_288__UvdLevel_7_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_288__UvdLevel_7_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_288__UvdLevel_7_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_289__VceLevel_0_Frequency_MASK 0xffffffff
+#define DPM_TABLE_289__VceLevel_0_Frequency__SHIFT 0x0
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_291__VceLevel_0_padding_2_MASK 0xff
+#define DPM_TABLE_291__VceLevel_0_padding_2__SHIFT 0x0
+#define DPM_TABLE_291__VceLevel_0_padding_1_MASK 0xff00
+#define DPM_TABLE_291__VceLevel_0_padding_1__SHIFT 0x8
+#define DPM_TABLE_291__VceLevel_0_padding_0_MASK 0xff0000
+#define DPM_TABLE_291__VceLevel_0_padding_0__SHIFT 0x10
+#define DPM_TABLE_291__VceLevel_0_Divider_MASK 0xff000000
+#define DPM_TABLE_291__VceLevel_0_Divider__SHIFT 0x18
+#define DPM_TABLE_292__VceLevel_1_Frequency_MASK 0xffffffff
+#define DPM_TABLE_292__VceLevel_1_Frequency__SHIFT 0x0
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_294__VceLevel_1_padding_2_MASK 0xff
+#define DPM_TABLE_294__VceLevel_1_padding_2__SHIFT 0x0
+#define DPM_TABLE_294__VceLevel_1_padding_1_MASK 0xff00
+#define DPM_TABLE_294__VceLevel_1_padding_1__SHIFT 0x8
+#define DPM_TABLE_294__VceLevel_1_padding_0_MASK 0xff0000
+#define DPM_TABLE_294__VceLevel_1_padding_0__SHIFT 0x10
+#define DPM_TABLE_294__VceLevel_1_Divider_MASK 0xff000000
+#define DPM_TABLE_294__VceLevel_1_Divider__SHIFT 0x18
+#define DPM_TABLE_295__VceLevel_2_Frequency_MASK 0xffffffff
+#define DPM_TABLE_295__VceLevel_2_Frequency__SHIFT 0x0
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_297__VceLevel_2_padding_2_MASK 0xff
+#define DPM_TABLE_297__VceLevel_2_padding_2__SHIFT 0x0
+#define DPM_TABLE_297__VceLevel_2_padding_1_MASK 0xff00
+#define DPM_TABLE_297__VceLevel_2_padding_1__SHIFT 0x8
+#define DPM_TABLE_297__VceLevel_2_padding_0_MASK 0xff0000
+#define DPM_TABLE_297__VceLevel_2_padding_0__SHIFT 0x10
+#define DPM_TABLE_297__VceLevel_2_Divider_MASK 0xff000000
+#define DPM_TABLE_297__VceLevel_2_Divider__SHIFT 0x18
+#define DPM_TABLE_298__VceLevel_3_Frequency_MASK 0xffffffff
+#define DPM_TABLE_298__VceLevel_3_Frequency__SHIFT 0x0
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_300__VceLevel_3_padding_2_MASK 0xff
+#define DPM_TABLE_300__VceLevel_3_padding_2__SHIFT 0x0
+#define DPM_TABLE_300__VceLevel_3_padding_1_MASK 0xff00
+#define DPM_TABLE_300__VceLevel_3_padding_1__SHIFT 0x8
+#define DPM_TABLE_300__VceLevel_3_padding_0_MASK 0xff0000
+#define DPM_TABLE_300__VceLevel_3_padding_0__SHIFT 0x10
+#define DPM_TABLE_300__VceLevel_3_Divider_MASK 0xff000000
+#define DPM_TABLE_300__VceLevel_3_Divider__SHIFT 0x18
+#define DPM_TABLE_301__VceLevel_4_Frequency_MASK 0xffffffff
+#define DPM_TABLE_301__VceLevel_4_Frequency__SHIFT 0x0
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_303__VceLevel_4_padding_2_MASK 0xff
+#define DPM_TABLE_303__VceLevel_4_padding_2__SHIFT 0x0
+#define DPM_TABLE_303__VceLevel_4_padding_1_MASK 0xff00
+#define DPM_TABLE_303__VceLevel_4_padding_1__SHIFT 0x8
+#define DPM_TABLE_303__VceLevel_4_padding_0_MASK 0xff0000
+#define DPM_TABLE_303__VceLevel_4_padding_0__SHIFT 0x10
+#define DPM_TABLE_303__VceLevel_4_Divider_MASK 0xff000000
+#define DPM_TABLE_303__VceLevel_4_Divider__SHIFT 0x18
+#define DPM_TABLE_304__VceLevel_5_Frequency_MASK 0xffffffff
+#define DPM_TABLE_304__VceLevel_5_Frequency__SHIFT 0x0
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_306__VceLevel_5_padding_2_MASK 0xff
+#define DPM_TABLE_306__VceLevel_5_padding_2__SHIFT 0x0
+#define DPM_TABLE_306__VceLevel_5_padding_1_MASK 0xff00
+#define DPM_TABLE_306__VceLevel_5_padding_1__SHIFT 0x8
+#define DPM_TABLE_306__VceLevel_5_padding_0_MASK 0xff0000
+#define DPM_TABLE_306__VceLevel_5_padding_0__SHIFT 0x10
+#define DPM_TABLE_306__VceLevel_5_Divider_MASK 0xff000000
+#define DPM_TABLE_306__VceLevel_5_Divider__SHIFT 0x18
+#define DPM_TABLE_307__VceLevel_6_Frequency_MASK 0xffffffff
+#define DPM_TABLE_307__VceLevel_6_Frequency__SHIFT 0x0
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_309__VceLevel_6_padding_2_MASK 0xff
+#define DPM_TABLE_309__VceLevel_6_padding_2__SHIFT 0x0
+#define DPM_TABLE_309__VceLevel_6_padding_1_MASK 0xff00
+#define DPM_TABLE_309__VceLevel_6_padding_1__SHIFT 0x8
+#define DPM_TABLE_309__VceLevel_6_padding_0_MASK 0xff0000
+#define DPM_TABLE_309__VceLevel_6_padding_0__SHIFT 0x10
+#define DPM_TABLE_309__VceLevel_6_Divider_MASK 0xff000000
+#define DPM_TABLE_309__VceLevel_6_Divider__SHIFT 0x18
+#define DPM_TABLE_310__VceLevel_7_Frequency_MASK 0xffffffff
+#define DPM_TABLE_310__VceLevel_7_Frequency__SHIFT 0x0
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_312__VceLevel_7_padding_2_MASK 0xff
+#define DPM_TABLE_312__VceLevel_7_padding_2__SHIFT 0x0
+#define DPM_TABLE_312__VceLevel_7_padding_1_MASK 0xff00
+#define DPM_TABLE_312__VceLevel_7_padding_1__SHIFT 0x8
+#define DPM_TABLE_312__VceLevel_7_padding_0_MASK 0xff0000
+#define DPM_TABLE_312__VceLevel_7_padding_0__SHIFT 0x10
+#define DPM_TABLE_312__VceLevel_7_Divider_MASK 0xff000000
+#define DPM_TABLE_312__VceLevel_7_Divider__SHIFT 0x18
+#define DPM_TABLE_313__AcpLevel_0_Frequency_MASK 0xffffffff
+#define DPM_TABLE_313__AcpLevel_0_Frequency__SHIFT 0x0
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_315__AcpLevel_0_padding_2_MASK 0xff
+#define DPM_TABLE_315__AcpLevel_0_padding_2__SHIFT 0x0
+#define DPM_TABLE_315__AcpLevel_0_padding_1_MASK 0xff00
+#define DPM_TABLE_315__AcpLevel_0_padding_1__SHIFT 0x8
+#define DPM_TABLE_315__AcpLevel_0_padding_0_MASK 0xff0000
+#define DPM_TABLE_315__AcpLevel_0_padding_0__SHIFT 0x10
+#define DPM_TABLE_315__AcpLevel_0_Divider_MASK 0xff000000
+#define DPM_TABLE_315__AcpLevel_0_Divider__SHIFT 0x18
+#define DPM_TABLE_316__AcpLevel_1_Frequency_MASK 0xffffffff
+#define DPM_TABLE_316__AcpLevel_1_Frequency__SHIFT 0x0
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_318__AcpLevel_1_padding_2_MASK 0xff
+#define DPM_TABLE_318__AcpLevel_1_padding_2__SHIFT 0x0
+#define DPM_TABLE_318__AcpLevel_1_padding_1_MASK 0xff00
+#define DPM_TABLE_318__AcpLevel_1_padding_1__SHIFT 0x8
+#define DPM_TABLE_318__AcpLevel_1_padding_0_MASK 0xff0000
+#define DPM_TABLE_318__AcpLevel_1_padding_0__SHIFT 0x10
+#define DPM_TABLE_318__AcpLevel_1_Divider_MASK 0xff000000
+#define DPM_TABLE_318__AcpLevel_1_Divider__SHIFT 0x18
+#define DPM_TABLE_319__AcpLevel_2_Frequency_MASK 0xffffffff
+#define DPM_TABLE_319__AcpLevel_2_Frequency__SHIFT 0x0
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_321__AcpLevel_2_padding_2_MASK 0xff
+#define DPM_TABLE_321__AcpLevel_2_padding_2__SHIFT 0x0
+#define DPM_TABLE_321__AcpLevel_2_padding_1_MASK 0xff00
+#define DPM_TABLE_321__AcpLevel_2_padding_1__SHIFT 0x8
+#define DPM_TABLE_321__AcpLevel_2_padding_0_MASK 0xff0000
+#define DPM_TABLE_321__AcpLevel_2_padding_0__SHIFT 0x10
+#define DPM_TABLE_321__AcpLevel_2_Divider_MASK 0xff000000
+#define DPM_TABLE_321__AcpLevel_2_Divider__SHIFT 0x18
+#define DPM_TABLE_322__AcpLevel_3_Frequency_MASK 0xffffffff
+#define DPM_TABLE_322__AcpLevel_3_Frequency__SHIFT 0x0
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_324__AcpLevel_3_padding_2_MASK 0xff
+#define DPM_TABLE_324__AcpLevel_3_padding_2__SHIFT 0x0
+#define DPM_TABLE_324__AcpLevel_3_padding_1_MASK 0xff00
+#define DPM_TABLE_324__AcpLevel_3_padding_1__SHIFT 0x8
+#define DPM_TABLE_324__AcpLevel_3_padding_0_MASK 0xff0000
+#define DPM_TABLE_324__AcpLevel_3_padding_0__SHIFT 0x10
+#define DPM_TABLE_324__AcpLevel_3_Divider_MASK 0xff000000
+#define DPM_TABLE_324__AcpLevel_3_Divider__SHIFT 0x18
+#define DPM_TABLE_325__AcpLevel_4_Frequency_MASK 0xffffffff
+#define DPM_TABLE_325__AcpLevel_4_Frequency__SHIFT 0x0
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_327__AcpLevel_4_padding_2_MASK 0xff
+#define DPM_TABLE_327__AcpLevel_4_padding_2__SHIFT 0x0
+#define DPM_TABLE_327__AcpLevel_4_padding_1_MASK 0xff00
+#define DPM_TABLE_327__AcpLevel_4_padding_1__SHIFT 0x8
+#define DPM_TABLE_327__AcpLevel_4_padding_0_MASK 0xff0000
+#define DPM_TABLE_327__AcpLevel_4_padding_0__SHIFT 0x10
+#define DPM_TABLE_327__AcpLevel_4_Divider_MASK 0xff000000
+#define DPM_TABLE_327__AcpLevel_4_Divider__SHIFT 0x18
+#define DPM_TABLE_328__AcpLevel_5_Frequency_MASK 0xffffffff
+#define DPM_TABLE_328__AcpLevel_5_Frequency__SHIFT 0x0
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_330__AcpLevel_5_padding_2_MASK 0xff
+#define DPM_TABLE_330__AcpLevel_5_padding_2__SHIFT 0x0
+#define DPM_TABLE_330__AcpLevel_5_padding_1_MASK 0xff00
+#define DPM_TABLE_330__AcpLevel_5_padding_1__SHIFT 0x8
+#define DPM_TABLE_330__AcpLevel_5_padding_0_MASK 0xff0000
+#define DPM_TABLE_330__AcpLevel_5_padding_0__SHIFT 0x10
+#define DPM_TABLE_330__AcpLevel_5_Divider_MASK 0xff000000
+#define DPM_TABLE_330__AcpLevel_5_Divider__SHIFT 0x18
+#define DPM_TABLE_331__AcpLevel_6_Frequency_MASK 0xffffffff
+#define DPM_TABLE_331__AcpLevel_6_Frequency__SHIFT 0x0
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_333__AcpLevel_6_padding_2_MASK 0xff
+#define DPM_TABLE_333__AcpLevel_6_padding_2__SHIFT 0x0
+#define DPM_TABLE_333__AcpLevel_6_padding_1_MASK 0xff00
+#define DPM_TABLE_333__AcpLevel_6_padding_1__SHIFT 0x8
+#define DPM_TABLE_333__AcpLevel_6_padding_0_MASK 0xff0000
+#define DPM_TABLE_333__AcpLevel_6_padding_0__SHIFT 0x10
+#define DPM_TABLE_333__AcpLevel_6_Divider_MASK 0xff000000
+#define DPM_TABLE_333__AcpLevel_6_Divider__SHIFT 0x18
+#define DPM_TABLE_334__AcpLevel_7_Frequency_MASK 0xffffffff
+#define DPM_TABLE_334__AcpLevel_7_Frequency__SHIFT 0x0
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_336__AcpLevel_7_padding_2_MASK 0xff
+#define DPM_TABLE_336__AcpLevel_7_padding_2__SHIFT 0x0
+#define DPM_TABLE_336__AcpLevel_7_padding_1_MASK 0xff00
+#define DPM_TABLE_336__AcpLevel_7_padding_1__SHIFT 0x8
+#define DPM_TABLE_336__AcpLevel_7_padding_0_MASK 0xff0000
+#define DPM_TABLE_336__AcpLevel_7_padding_0__SHIFT 0x10
+#define DPM_TABLE_336__AcpLevel_7_Divider_MASK 0xff000000
+#define DPM_TABLE_336__AcpLevel_7_Divider__SHIFT 0x18
+#define DPM_TABLE_337__SamuLevel_0_Frequency_MASK 0xffffffff
+#define DPM_TABLE_337__SamuLevel_0_Frequency__SHIFT 0x0
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_339__SamuLevel_0_padding_2_MASK 0xff
+#define DPM_TABLE_339__SamuLevel_0_padding_2__SHIFT 0x0
+#define DPM_TABLE_339__SamuLevel_0_padding_1_MASK 0xff00
+#define DPM_TABLE_339__SamuLevel_0_padding_1__SHIFT 0x8
+#define DPM_TABLE_339__SamuLevel_0_padding_0_MASK 0xff0000
+#define DPM_TABLE_339__SamuLevel_0_padding_0__SHIFT 0x10
+#define DPM_TABLE_339__SamuLevel_0_Divider_MASK 0xff000000
+#define DPM_TABLE_339__SamuLevel_0_Divider__SHIFT 0x18
+#define DPM_TABLE_340__SamuLevel_1_Frequency_MASK 0xffffffff
+#define DPM_TABLE_340__SamuLevel_1_Frequency__SHIFT 0x0
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_342__SamuLevel_1_padding_2_MASK 0xff
+#define DPM_TABLE_342__SamuLevel_1_padding_2__SHIFT 0x0
+#define DPM_TABLE_342__SamuLevel_1_padding_1_MASK 0xff00
+#define DPM_TABLE_342__SamuLevel_1_padding_1__SHIFT 0x8
+#define DPM_TABLE_342__SamuLevel_1_padding_0_MASK 0xff0000
+#define DPM_TABLE_342__SamuLevel_1_padding_0__SHIFT 0x10
+#define DPM_TABLE_342__SamuLevel_1_Divider_MASK 0xff000000
+#define DPM_TABLE_342__SamuLevel_1_Divider__SHIFT 0x18
+#define DPM_TABLE_343__SamuLevel_2_Frequency_MASK 0xffffffff
+#define DPM_TABLE_343__SamuLevel_2_Frequency__SHIFT 0x0
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_345__SamuLevel_2_padding_2_MASK 0xff
+#define DPM_TABLE_345__SamuLevel_2_padding_2__SHIFT 0x0
+#define DPM_TABLE_345__SamuLevel_2_padding_1_MASK 0xff00
+#define DPM_TABLE_345__SamuLevel_2_padding_1__SHIFT 0x8
+#define DPM_TABLE_345__SamuLevel_2_padding_0_MASK 0xff0000
+#define DPM_TABLE_345__SamuLevel_2_padding_0__SHIFT 0x10
+#define DPM_TABLE_345__SamuLevel_2_Divider_MASK 0xff000000
+#define DPM_TABLE_345__SamuLevel_2_Divider__SHIFT 0x18
+#define DPM_TABLE_346__SamuLevel_3_Frequency_MASK 0xffffffff
+#define DPM_TABLE_346__SamuLevel_3_Frequency__SHIFT 0x0
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_348__SamuLevel_3_padding_2_MASK 0xff
+#define DPM_TABLE_348__SamuLevel_3_padding_2__SHIFT 0x0
+#define DPM_TABLE_348__SamuLevel_3_padding_1_MASK 0xff00
+#define DPM_TABLE_348__SamuLevel_3_padding_1__SHIFT 0x8
+#define DPM_TABLE_348__SamuLevel_3_padding_0_MASK 0xff0000
+#define DPM_TABLE_348__SamuLevel_3_padding_0__SHIFT 0x10
+#define DPM_TABLE_348__SamuLevel_3_Divider_MASK 0xff000000
+#define DPM_TABLE_348__SamuLevel_3_Divider__SHIFT 0x18
+#define DPM_TABLE_349__SamuLevel_4_Frequency_MASK 0xffffffff
+#define DPM_TABLE_349__SamuLevel_4_Frequency__SHIFT 0x0
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_351__SamuLevel_4_padding_2_MASK 0xff
+#define DPM_TABLE_351__SamuLevel_4_padding_2__SHIFT 0x0
+#define DPM_TABLE_351__SamuLevel_4_padding_1_MASK 0xff00
+#define DPM_TABLE_351__SamuLevel_4_padding_1__SHIFT 0x8
+#define DPM_TABLE_351__SamuLevel_4_padding_0_MASK 0xff0000
+#define DPM_TABLE_351__SamuLevel_4_padding_0__SHIFT 0x10
+#define DPM_TABLE_351__SamuLevel_4_Divider_MASK 0xff000000
+#define DPM_TABLE_351__SamuLevel_4_Divider__SHIFT 0x18
+#define DPM_TABLE_352__SamuLevel_5_Frequency_MASK 0xffffffff
+#define DPM_TABLE_352__SamuLevel_5_Frequency__SHIFT 0x0
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_354__SamuLevel_5_padding_2_MASK 0xff
+#define DPM_TABLE_354__SamuLevel_5_padding_2__SHIFT 0x0
+#define DPM_TABLE_354__SamuLevel_5_padding_1_MASK 0xff00
+#define DPM_TABLE_354__SamuLevel_5_padding_1__SHIFT 0x8
+#define DPM_TABLE_354__SamuLevel_5_padding_0_MASK 0xff0000
+#define DPM_TABLE_354__SamuLevel_5_padding_0__SHIFT 0x10
+#define DPM_TABLE_354__SamuLevel_5_Divider_MASK 0xff000000
+#define DPM_TABLE_354__SamuLevel_5_Divider__SHIFT 0x18
+#define DPM_TABLE_355__SamuLevel_6_Frequency_MASK 0xffffffff
+#define DPM_TABLE_355__SamuLevel_6_Frequency__SHIFT 0x0
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_357__SamuLevel_6_padding_2_MASK 0xff
+#define DPM_TABLE_357__SamuLevel_6_padding_2__SHIFT 0x0
+#define DPM_TABLE_357__SamuLevel_6_padding_1_MASK 0xff00
+#define DPM_TABLE_357__SamuLevel_6_padding_1__SHIFT 0x8
+#define DPM_TABLE_357__SamuLevel_6_padding_0_MASK 0xff0000
+#define DPM_TABLE_357__SamuLevel_6_padding_0__SHIFT 0x10
+#define DPM_TABLE_357__SamuLevel_6_Divider_MASK 0xff000000
+#define DPM_TABLE_357__SamuLevel_6_Divider__SHIFT 0x18
+#define DPM_TABLE_358__SamuLevel_7_Frequency_MASK 0xffffffff
+#define DPM_TABLE_358__SamuLevel_7_Frequency__SHIFT 0x0
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_360__SamuLevel_7_padding_2_MASK 0xff
+#define DPM_TABLE_360__SamuLevel_7_padding_2__SHIFT 0x0
+#define DPM_TABLE_360__SamuLevel_7_padding_1_MASK 0xff00
+#define DPM_TABLE_360__SamuLevel_7_padding_1__SHIFT 0x8
+#define DPM_TABLE_360__SamuLevel_7_padding_0_MASK 0xff0000
+#define DPM_TABLE_360__SamuLevel_7_padding_0__SHIFT 0x10
+#define DPM_TABLE_360__SamuLevel_7_Divider_MASK 0xff000000
+#define DPM_TABLE_360__SamuLevel_7_Divider__SHIFT 0x18
+#define DPM_TABLE_361__Ulv_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_361__Ulv_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_362__Ulv_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_362__Ulv_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_363__Ulv_VddcPhase_MASK 0xff
+#define DPM_TABLE_363__Ulv_VddcPhase__SHIFT 0x0
+#define DPM_TABLE_363__Ulv_VddcOffsetVid_MASK 0xff00
+#define DPM_TABLE_363__Ulv_VddcOffsetVid__SHIFT 0x8
+#define DPM_TABLE_363__Ulv_VddcOffset_MASK 0xffff0000
+#define DPM_TABLE_363__Ulv_VddcOffset__SHIFT 0x10
+#define DPM_TABLE_364__Ulv_Reserved_MASK 0xffffffff
+#define DPM_TABLE_364__Ulv_Reserved__SHIFT 0x0
+#define DPM_TABLE_365__SclkStepSize_MASK 0xffffffff
+#define DPM_TABLE_365__SclkStepSize__SHIFT 0x0
+#define DPM_TABLE_366__Smio_0_MASK 0xffffffff
+#define DPM_TABLE_366__Smio_0__SHIFT 0x0
+#define DPM_TABLE_367__Smio_1_MASK 0xffffffff
+#define DPM_TABLE_367__Smio_1__SHIFT 0x0
+#define DPM_TABLE_368__Smio_2_MASK 0xffffffff
+#define DPM_TABLE_368__Smio_2__SHIFT 0x0
+#define DPM_TABLE_369__Smio_3_MASK 0xffffffff
+#define DPM_TABLE_369__Smio_3__SHIFT 0x0
+#define DPM_TABLE_370__Smio_4_MASK 0xffffffff
+#define DPM_TABLE_370__Smio_4__SHIFT 0x0
+#define DPM_TABLE_371__Smio_5_MASK 0xffffffff
+#define DPM_TABLE_371__Smio_5__SHIFT 0x0
+#define DPM_TABLE_372__Smio_6_MASK 0xffffffff
+#define DPM_TABLE_372__Smio_6__SHIFT 0x0
+#define DPM_TABLE_373__Smio_7_MASK 0xffffffff
+#define DPM_TABLE_373__Smio_7__SHIFT 0x0
+#define DPM_TABLE_374__Smio_8_MASK 0xffffffff
+#define DPM_TABLE_374__Smio_8__SHIFT 0x0
+#define DPM_TABLE_375__Smio_9_MASK 0xffffffff
+#define DPM_TABLE_375__Smio_9__SHIFT 0x0
+#define DPM_TABLE_376__Smio_10_MASK 0xffffffff
+#define DPM_TABLE_376__Smio_10__SHIFT 0x0
+#define DPM_TABLE_377__Smio_11_MASK 0xffffffff
+#define DPM_TABLE_377__Smio_11__SHIFT 0x0
+#define DPM_TABLE_378__Smio_12_MASK 0xffffffff
+#define DPM_TABLE_378__Smio_12__SHIFT 0x0
+#define DPM_TABLE_379__Smio_13_MASK 0xffffffff
+#define DPM_TABLE_379__Smio_13__SHIFT 0x0
+#define DPM_TABLE_380__Smio_14_MASK 0xffffffff
+#define DPM_TABLE_380__Smio_14__SHIFT 0x0
+#define DPM_TABLE_381__Smio_15_MASK 0xffffffff
+#define DPM_TABLE_381__Smio_15__SHIFT 0x0
+#define DPM_TABLE_382__Smio_16_MASK 0xffffffff
+#define DPM_TABLE_382__Smio_16__SHIFT 0x0
+#define DPM_TABLE_383__Smio_17_MASK 0xffffffff
+#define DPM_TABLE_383__Smio_17__SHIFT 0x0
+#define DPM_TABLE_384__Smio_18_MASK 0xffffffff
+#define DPM_TABLE_384__Smio_18__SHIFT 0x0
+#define DPM_TABLE_385__Smio_19_MASK 0xffffffff
+#define DPM_TABLE_385__Smio_19__SHIFT 0x0
+#define DPM_TABLE_386__Smio_20_MASK 0xffffffff
+#define DPM_TABLE_386__Smio_20__SHIFT 0x0
+#define DPM_TABLE_387__Smio_21_MASK 0xffffffff
+#define DPM_TABLE_387__Smio_21__SHIFT 0x0
+#define DPM_TABLE_388__Smio_22_MASK 0xffffffff
+#define DPM_TABLE_388__Smio_22__SHIFT 0x0
+#define DPM_TABLE_389__Smio_23_MASK 0xffffffff
+#define DPM_TABLE_389__Smio_23__SHIFT 0x0
+#define DPM_TABLE_390__Smio_24_MASK 0xffffffff
+#define DPM_TABLE_390__Smio_24__SHIFT 0x0
+#define DPM_TABLE_391__Smio_25_MASK 0xffffffff
+#define DPM_TABLE_391__Smio_25__SHIFT 0x0
+#define DPM_TABLE_392__Smio_26_MASK 0xffffffff
+#define DPM_TABLE_392__Smio_26__SHIFT 0x0
+#define DPM_TABLE_393__Smio_27_MASK 0xffffffff
+#define DPM_TABLE_393__Smio_27__SHIFT 0x0
+#define DPM_TABLE_394__Smio_28_MASK 0xffffffff
+#define DPM_TABLE_394__Smio_28__SHIFT 0x0
+#define DPM_TABLE_395__Smio_29_MASK 0xffffffff
+#define DPM_TABLE_395__Smio_29__SHIFT 0x0
+#define DPM_TABLE_396__Smio_30_MASK 0xffffffff
+#define DPM_TABLE_396__Smio_30__SHIFT 0x0
+#define DPM_TABLE_397__Smio_31_MASK 0xffffffff
+#define DPM_TABLE_397__Smio_31__SHIFT 0x0
+#define DPM_TABLE_398__SamuBootLevel_MASK 0xff
+#define DPM_TABLE_398__SamuBootLevel__SHIFT 0x0
+#define DPM_TABLE_398__AcpBootLevel_MASK 0xff00
+#define DPM_TABLE_398__AcpBootLevel__SHIFT 0x8
+#define DPM_TABLE_398__VceBootLevel_MASK 0xff0000
+#define DPM_TABLE_398__VceBootLevel__SHIFT 0x10
+#define DPM_TABLE_398__UvdBootLevel_MASK 0xff000000
+#define DPM_TABLE_398__UvdBootLevel__SHIFT 0x18
+#define DPM_TABLE_399__GraphicsInterval_MASK 0xff
+#define DPM_TABLE_399__GraphicsInterval__SHIFT 0x0
+#define DPM_TABLE_399__GraphicsThermThrottleEnable_MASK 0xff00
+#define DPM_TABLE_399__GraphicsThermThrottleEnable__SHIFT 0x8
+#define DPM_TABLE_399__GraphicsVoltageChangeEnable_MASK 0xff0000
+#define DPM_TABLE_399__GraphicsVoltageChangeEnable__SHIFT 0x10
+#define DPM_TABLE_399__GraphicsBootLevel_MASK 0xff000000
+#define DPM_TABLE_399__GraphicsBootLevel__SHIFT 0x18
+#define DPM_TABLE_400__TemperatureLimitHigh_MASK 0xffff
+#define DPM_TABLE_400__TemperatureLimitHigh__SHIFT 0x0
+#define DPM_TABLE_400__ThermalInterval_MASK 0xff0000
+#define DPM_TABLE_400__ThermalInterval__SHIFT 0x10
+#define DPM_TABLE_400__VoltageInterval_MASK 0xff000000
+#define DPM_TABLE_400__VoltageInterval__SHIFT 0x18
+#define DPM_TABLE_401__MemoryVoltageChangeEnable_MASK 0xff
+#define DPM_TABLE_401__MemoryVoltageChangeEnable__SHIFT 0x0
+#define DPM_TABLE_401__MemoryBootLevel_MASK 0xff00
+#define DPM_TABLE_401__MemoryBootLevel__SHIFT 0x8
+#define DPM_TABLE_401__TemperatureLimitLow_MASK 0xffff0000
+#define DPM_TABLE_401__TemperatureLimitLow__SHIFT 0x10
+#define DPM_TABLE_402__MemoryThermThrottleEnable_MASK 0xff
+#define DPM_TABLE_402__MemoryThermThrottleEnable__SHIFT 0x0
+#define DPM_TABLE_402__MemoryInterval_MASK 0xff00
+#define DPM_TABLE_402__MemoryInterval__SHIFT 0x8
+#define DPM_TABLE_402__BootMVdd_MASK 0xffff0000
+#define DPM_TABLE_402__BootMVdd__SHIFT 0x10
+#define DPM_TABLE_403__PhaseResponseTime_MASK 0xffff
+#define DPM_TABLE_403__PhaseResponseTime__SHIFT 0x0
+#define DPM_TABLE_403__VoltageResponseTime_MASK 0xffff0000
+#define DPM_TABLE_403__VoltageResponseTime__SHIFT 0x10
+#define DPM_TABLE_404__DTEMode_MASK 0xff
+#define DPM_TABLE_404__DTEMode__SHIFT 0x0
+#define DPM_TABLE_404__DTEInterval_MASK 0xff00
+#define DPM_TABLE_404__DTEInterval__SHIFT 0x8
+#define DPM_TABLE_404__PCIeGenInterval_MASK 0xff0000
+#define DPM_TABLE_404__PCIeGenInterval__SHIFT 0x10
+#define DPM_TABLE_404__PCIeBootLinkLevel_MASK 0xff000000
+#define DPM_TABLE_404__PCIeBootLinkLevel__SHIFT 0x18
+#define DPM_TABLE_405__ThermGpio_MASK 0xff
+#define DPM_TABLE_405__ThermGpio__SHIFT 0x0
+#define DPM_TABLE_405__AcDcGpio_MASK 0xff00
+#define DPM_TABLE_405__AcDcGpio__SHIFT 0x8
+#define DPM_TABLE_405__VRHotGpio_MASK 0xff0000
+#define DPM_TABLE_405__VRHotGpio__SHIFT 0x10
+#define DPM_TABLE_405__SVI2Enable_MASK 0xff000000
+#define DPM_TABLE_405__SVI2Enable__SHIFT 0x18
+#define DPM_TABLE_406__PPM_TemperatureLimit_MASK 0xffff
+#define DPM_TABLE_406__PPM_TemperatureLimit__SHIFT 0x0
+#define DPM_TABLE_406__PPM_PkgPwrLimit_MASK 0xffff0000
+#define DPM_TABLE_406__PPM_PkgPwrLimit__SHIFT 0x10
+#define DPM_TABLE_407__TargetTdp_MASK 0xffff
+#define DPM_TABLE_407__TargetTdp__SHIFT 0x0
+#define DPM_TABLE_407__DefaultTdp_MASK 0xffff0000
+#define DPM_TABLE_407__DefaultTdp__SHIFT 0x10
+#define DPM_TABLE_408__FpsLowThreshold_MASK 0xffff
+#define DPM_TABLE_408__FpsLowThreshold__SHIFT 0x0
+#define DPM_TABLE_408__FpsHighThreshold_MASK 0xffff0000
+#define DPM_TABLE_408__FpsHighThreshold__SHIFT 0x10
+#define DPM_TABLE_409__BAPMTI_R_0_1_0_MASK 0xffff
+#define DPM_TABLE_409__BAPMTI_R_0_1_0__SHIFT 0x0
+#define DPM_TABLE_409__BAPMTI_R_0_0_0_MASK 0xffff0000
+#define DPM_TABLE_409__BAPMTI_R_0_0_0__SHIFT 0x10
+#define DPM_TABLE_410__BAPMTI_R_1_0_0_MASK 0xffff
+#define DPM_TABLE_410__BAPMTI_R_1_0_0__SHIFT 0x0
+#define DPM_TABLE_410__BAPMTI_R_0_2_0_MASK 0xffff0000
+#define DPM_TABLE_410__BAPMTI_R_0_2_0__SHIFT 0x10
+#define DPM_TABLE_411__BAPMTI_R_1_2_0_MASK 0xffff
+#define DPM_TABLE_411__BAPMTI_R_1_2_0__SHIFT 0x0
+#define DPM_TABLE_411__BAPMTI_R_1_1_0_MASK 0xffff0000
+#define DPM_TABLE_411__BAPMTI_R_1_1_0__SHIFT 0x10
+#define DPM_TABLE_412__BAPMTI_R_2_1_0_MASK 0xffff
+#define DPM_TABLE_412__BAPMTI_R_2_1_0__SHIFT 0x0
+#define DPM_TABLE_412__BAPMTI_R_2_0_0_MASK 0xffff0000
+#define DPM_TABLE_412__BAPMTI_R_2_0_0__SHIFT 0x10
+#define DPM_TABLE_413__BAPMTI_R_3_0_0_MASK 0xffff
+#define DPM_TABLE_413__BAPMTI_R_3_0_0__SHIFT 0x0
+#define DPM_TABLE_413__BAPMTI_R_2_2_0_MASK 0xffff0000
+#define DPM_TABLE_413__BAPMTI_R_2_2_0__SHIFT 0x10
+#define DPM_TABLE_414__BAPMTI_R_3_2_0_MASK 0xffff
+#define DPM_TABLE_414__BAPMTI_R_3_2_0__SHIFT 0x0
+#define DPM_TABLE_414__BAPMTI_R_3_1_0_MASK 0xffff0000
+#define DPM_TABLE_414__BAPMTI_R_3_1_0__SHIFT 0x10
+#define DPM_TABLE_415__BAPMTI_R_4_1_0_MASK 0xffff
+#define DPM_TABLE_415__BAPMTI_R_4_1_0__SHIFT 0x0
+#define DPM_TABLE_415__BAPMTI_R_4_0_0_MASK 0xffff0000
+#define DPM_TABLE_415__BAPMTI_R_4_0_0__SHIFT 0x10
+#define DPM_TABLE_416__BAPMTI_RC_0_0_0_MASK 0xffff
+#define DPM_TABLE_416__BAPMTI_RC_0_0_0__SHIFT 0x0
+#define DPM_TABLE_416__BAPMTI_R_4_2_0_MASK 0xffff0000
+#define DPM_TABLE_416__BAPMTI_R_4_2_0__SHIFT 0x10
+#define DPM_TABLE_417__BAPMTI_RC_0_2_0_MASK 0xffff
+#define DPM_TABLE_417__BAPMTI_RC_0_2_0__SHIFT 0x0
+#define DPM_TABLE_417__BAPMTI_RC_0_1_0_MASK 0xffff0000
+#define DPM_TABLE_417__BAPMTI_RC_0_1_0__SHIFT 0x10
+#define DPM_TABLE_418__BAPMTI_RC_1_1_0_MASK 0xffff
+#define DPM_TABLE_418__BAPMTI_RC_1_1_0__SHIFT 0x0
+#define DPM_TABLE_418__BAPMTI_RC_1_0_0_MASK 0xffff0000
+#define DPM_TABLE_418__BAPMTI_RC_1_0_0__SHIFT 0x10
+#define DPM_TABLE_419__BAPMTI_RC_2_0_0_MASK 0xffff
+#define DPM_TABLE_419__BAPMTI_RC_2_0_0__SHIFT 0x0
+#define DPM_TABLE_419__BAPMTI_RC_1_2_0_MASK 0xffff0000
+#define DPM_TABLE_419__BAPMTI_RC_1_2_0__SHIFT 0x10
+#define DPM_TABLE_420__BAPMTI_RC_2_2_0_MASK 0xffff
+#define DPM_TABLE_420__BAPMTI_RC_2_2_0__SHIFT 0x0
+#define DPM_TABLE_420__BAPMTI_RC_2_1_0_MASK 0xffff0000
+#define DPM_TABLE_420__BAPMTI_RC_2_1_0__SHIFT 0x10
+#define DPM_TABLE_421__BAPMTI_RC_3_1_0_MASK 0xffff
+#define DPM_TABLE_421__BAPMTI_RC_3_1_0__SHIFT 0x0
+#define DPM_TABLE_421__BAPMTI_RC_3_0_0_MASK 0xffff0000
+#define DPM_TABLE_421__BAPMTI_RC_3_0_0__SHIFT 0x10
+#define DPM_TABLE_422__BAPMTI_RC_4_0_0_MASK 0xffff
+#define DPM_TABLE_422__BAPMTI_RC_4_0_0__SHIFT 0x0
+#define DPM_TABLE_422__BAPMTI_RC_3_2_0_MASK 0xffff0000
+#define DPM_TABLE_422__BAPMTI_RC_3_2_0__SHIFT 0x10
+#define DPM_TABLE_423__BAPMTI_RC_4_2_0_MASK 0xffff
+#define DPM_TABLE_423__BAPMTI_RC_4_2_0__SHIFT 0x0
+#define DPM_TABLE_423__BAPMTI_RC_4_1_0_MASK 0xffff0000
+#define DPM_TABLE_423__BAPMTI_RC_4_1_0__SHIFT 0x10
+#define DPM_TABLE_424__GpuTjHyst_MASK 0xff
+#define DPM_TABLE_424__GpuTjHyst__SHIFT 0x0
+#define DPM_TABLE_424__GpuTjMax_MASK 0xff00
+#define DPM_TABLE_424__GpuTjMax__SHIFT 0x8
+#define DPM_TABLE_424__DTETjOffset_MASK 0xff0000
+#define DPM_TABLE_424__DTETjOffset__SHIFT 0x10
+#define DPM_TABLE_424__DTEAmbientTempBase_MASK 0xff000000
+#define DPM_TABLE_424__DTEAmbientTempBase__SHIFT 0x18
+#define DPM_TABLE_425__BootVoltage_Phases_MASK 0xff
+#define DPM_TABLE_425__BootVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_425__BootVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_425__BootVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_425__BootVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_425__BootVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_425__BootVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_425__BootVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_426__BAPM_TEMP_GRADIENT_MASK 0xffffffff
+#define DPM_TABLE_426__BAPM_TEMP_GRADIENT__SHIFT 0x0
+#define DPM_TABLE_427__LowSclkInterruptThreshold_MASK 0xffffffff
+#define DPM_TABLE_427__LowSclkInterruptThreshold__SHIFT 0x0
+#define DPM_TABLE_428__VddGfxReChkWait_MASK 0xffffffff
+#define DPM_TABLE_428__VddGfxReChkWait__SHIFT 0x0
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_1_MASK 0xff
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_1__SHIFT 0x0
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_0_MASK 0xff00
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_0__SHIFT 0x8
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_maxVID_MASK 0xff0000
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_maxVID__SHIFT 0x10
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_minVID_MASK 0xff000000
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_minVID__SHIFT 0x18
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_3_MASK 0xff
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_3__SHIFT 0x0
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_2_MASK 0xff00
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_2__SHIFT 0x8
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_1_MASK 0xff0000
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_1__SHIFT 0x10
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_0_MASK 0xff000000
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_0__SHIFT 0x18
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_7_MASK 0xff
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_7__SHIFT 0x0
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_6_MASK 0xff00
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_6__SHIFT 0x8
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_5_MASK 0xff0000
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_5__SHIFT 0x10
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_4_MASK 0xff000000
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_4__SHIFT 0x18
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_1_MASK 0xff
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_1__SHIFT 0x0
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_0_MASK 0xff00
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_0__SHIFT 0x8
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_maxVID_MASK 0xff0000
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_maxVID__SHIFT 0x10
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_minVID_MASK 0xff000000
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_minVID__SHIFT 0x18
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_3_MASK 0xff
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_3__SHIFT 0x0
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_2_MASK 0xff00
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_2__SHIFT 0x8
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_1_MASK 0xff0000
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_1__SHIFT 0x10
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_0_MASK 0xff000000
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_0__SHIFT 0x18
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_7_MASK 0xff
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_7__SHIFT 0x0
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_6_MASK 0xff00
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_6__SHIFT 0x8
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_5_MASK 0xff0000
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_5__SHIFT 0x10
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_4_MASK 0xff000000
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_4__SHIFT 0x18
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_1_MASK 0xff
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_1__SHIFT 0x0
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_0_MASK 0xff00
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_0__SHIFT 0x8
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_maxVID_MASK 0xff0000
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_maxVID__SHIFT 0x10
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_minVID_MASK 0xff000000
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_minVID__SHIFT 0x18
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_3_MASK 0xff
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_3__SHIFT 0x0
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_2_MASK 0xff00
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_2__SHIFT 0x8
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_1_MASK 0xff0000
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_1__SHIFT 0x10
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_0_MASK 0xff000000
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_0__SHIFT 0x18
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_7_MASK 0xff
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_7__SHIFT 0x0
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_6_MASK 0xff00
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_6__SHIFT 0x8
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_5_MASK 0xff0000
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_5__SHIFT 0x10
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_4_MASK 0xff000000
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_4__SHIFT 0x18
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_1_MASK 0xff
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_1__SHIFT 0x0
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_0_MASK 0xff00
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_0__SHIFT 0x8
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_maxVID_MASK 0xff0000
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_maxVID__SHIFT 0x10
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_minVID_MASK 0xff000000
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_minVID__SHIFT 0x18
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_3_MASK 0xff
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_3__SHIFT 0x0
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_2_MASK 0xff00
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_2__SHIFT 0x8
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_1_MASK 0xff0000
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_1__SHIFT 0x10
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_0_MASK 0xff000000
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_0__SHIFT 0x18
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_7_MASK 0xff
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_7__SHIFT 0x0
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_6_MASK 0xff00
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_6__SHIFT 0x8
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_5_MASK 0xff0000
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_5__SHIFT 0x10
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_4_MASK 0xff000000
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_4__SHIFT 0x18
+#define SOFT_REGISTERS_TABLE_1__RefClockFrequency_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_1__RefClockFrequency__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_2__PmTimerPeriod_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_2__PmTimerPeriod__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_3__FeatureEnables_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_3__FeatureEnables__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_4__PreVBlankGap_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_4__PreVBlankGap__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_5__VBlankTimeout_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_5__VBlankTimeout__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_6__TrainTimeGap_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_6__TrainTimeGap__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_7__MvddSwitchTime_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_7__MvddSwitchTime__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_8__LongestAcpiTrainTime_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_8__LongestAcpiTrainTime__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_9__AcpiDelay_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_9__AcpiDelay__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_10__G5TrainTime_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_10__G5TrainTime__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_11__DelayMpllPwron_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_11__DelayMpllPwron__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_12__VoltageChangeTimeout_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_12__VoltageChangeTimeout__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_13__HandshakeDisables_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_13__HandshakeDisables__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy4Config_MASK 0xff
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy4Config__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy3Config_MASK 0xff00
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy3Config__SHIFT 0x8
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy2Config_MASK 0xff0000
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy2Config__SHIFT 0x10
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy1Config_MASK 0xff000000
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy1Config__SHIFT 0x18
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy8Config_MASK 0xff
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy8Config__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy7Config_MASK 0xff00
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy7Config__SHIFT 0x8
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy6Config_MASK 0xff0000
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy6Config__SHIFT 0x10
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy5Config_MASK 0xff000000
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy5Config__SHIFT 0x18
+#define SOFT_REGISTERS_TABLE_16__AverageGraphicsActivity_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_16__AverageGraphicsActivity__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_17__AverageMemoryActivity_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_17__AverageMemoryActivity__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_18__AverageGioActivity_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_18__AverageGioActivity__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_19__PCIeDpmEnabledLevels_MASK 0xff
+#define SOFT_REGISTERS_TABLE_19__PCIeDpmEnabledLevels__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_19__LClkDpmEnabledLevels_MASK 0xff00
+#define SOFT_REGISTERS_TABLE_19__LClkDpmEnabledLevels__SHIFT 0x8
+#define SOFT_REGISTERS_TABLE_19__MClkDpmEnabledLevels_MASK 0xff0000
+#define SOFT_REGISTERS_TABLE_19__MClkDpmEnabledLevels__SHIFT 0x10
+#define SOFT_REGISTERS_TABLE_19__SClkDpmEnabledLevels_MASK 0xff000000
+#define SOFT_REGISTERS_TABLE_19__SClkDpmEnabledLevels__SHIFT 0x18
+#define SOFT_REGISTERS_TABLE_20__VCEDpmEnabledLevels_MASK 0xff
+#define SOFT_REGISTERS_TABLE_20__VCEDpmEnabledLevels__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_20__ACPDpmEnabledLevels_MASK 0xff00
+#define SOFT_REGISTERS_TABLE_20__ACPDpmEnabledLevels__SHIFT 0x8
+#define SOFT_REGISTERS_TABLE_20__SAMUDpmEnabledLevels_MASK 0xff0000
+#define SOFT_REGISTERS_TABLE_20__SAMUDpmEnabledLevels__SHIFT 0x10
+#define SOFT_REGISTERS_TABLE_20__UVDDpmEnabledLevels_MASK 0xff000000
+#define SOFT_REGISTERS_TABLE_20__UVDDpmEnabledLevels__SHIFT 0x18
+#define SOFT_REGISTERS_TABLE_21__DRAM_LOG_ADDR_H_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_21__DRAM_LOG_ADDR_H__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_22__DRAM_LOG_ADDR_L_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_22__DRAM_LOG_ADDR_L__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_23__DRAM_LOG_PHY_ADDR_H_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_23__DRAM_LOG_PHY_ADDR_H__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_24__DRAM_LOG_PHY_ADDR_L_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_24__DRAM_LOG_PHY_ADDR_L__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_25__DRAM_LOG_BUFF_SIZE_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_25__DRAM_LOG_BUFF_SIZE__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_26__UlvEnterCount_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_26__UlvEnterCount__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_27__UlvTime_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_27__UlvTime__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_28__UcodeLoadStatus_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_28__UcodeLoadStatus__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_29__Reserved_0_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_29__Reserved_0__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_30__Reserved_1_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_30__Reserved_1__SHIFT 0x0
+#define PM_FUSES_1__SviLoadLineOffsetVddC_MASK 0xff
+#define PM_FUSES_1__SviLoadLineOffsetVddC__SHIFT 0x0
+#define PM_FUSES_1__SviLoadLineTrimVddC_MASK 0xff00
+#define PM_FUSES_1__SviLoadLineTrimVddC__SHIFT 0x8
+#define PM_FUSES_1__SviLoadLineVddC_MASK 0xff0000
+#define PM_FUSES_1__SviLoadLineVddC__SHIFT 0x10
+#define PM_FUSES_1__SviLoadLineEn_MASK 0xff000000
+#define PM_FUSES_1__SviLoadLineEn__SHIFT 0x18
+#define PM_FUSES_2__TDC_MAWt_MASK 0xff
+#define PM_FUSES_2__TDC_MAWt__SHIFT 0x0
+#define PM_FUSES_2__TDC_VDDC_ThrottleReleaseLimitPerc_MASK 0xff00
+#define PM_FUSES_2__TDC_VDDC_ThrottleReleaseLimitPerc__SHIFT 0x8
+#define PM_FUSES_2__TDC_VDDC_PkgLimit_MASK 0xffff0000
+#define PM_FUSES_2__TDC_VDDC_PkgLimit__SHIFT 0x10
+#define PM_FUSES_3__Reserved_MASK 0xff
+#define PM_FUSES_3__Reserved__SHIFT 0x0
+#define PM_FUSES_3__LPMLTemperatureMax_MASK 0xff00
+#define PM_FUSES_3__LPMLTemperatureMax__SHIFT 0x8
+#define PM_FUSES_3__LPMLTemperatureMin_MASK 0xff0000
+#define PM_FUSES_3__LPMLTemperatureMin__SHIFT 0x10
+#define PM_FUSES_3__TdcWaterfallCtl_MASK 0xff000000
+#define PM_FUSES_3__TdcWaterfallCtl__SHIFT 0x18
+#define PM_FUSES_4__LPMLTemperatureScaler_3_MASK 0xff
+#define PM_FUSES_4__LPMLTemperatureScaler_3__SHIFT 0x0
+#define PM_FUSES_4__LPMLTemperatureScaler_2_MASK 0xff00
+#define PM_FUSES_4__LPMLTemperatureScaler_2__SHIFT 0x8
+#define PM_FUSES_4__LPMLTemperatureScaler_1_MASK 0xff0000
+#define PM_FUSES_4__LPMLTemperatureScaler_1__SHIFT 0x10
+#define PM_FUSES_4__LPMLTemperatureScaler_0_MASK 0xff000000
+#define PM_FUSES_4__LPMLTemperatureScaler_0__SHIFT 0x18
+#define PM_FUSES_5__LPMLTemperatureScaler_7_MASK 0xff
+#define PM_FUSES_5__LPMLTemperatureScaler_7__SHIFT 0x0
+#define PM_FUSES_5__LPMLTemperatureScaler_6_MASK 0xff00
+#define PM_FUSES_5__LPMLTemperatureScaler_6__SHIFT 0x8
+#define PM_FUSES_5__LPMLTemperatureScaler_5_MASK 0xff0000
+#define PM_FUSES_5__LPMLTemperatureScaler_5__SHIFT 0x10
+#define PM_FUSES_5__LPMLTemperatureScaler_4_MASK 0xff000000
+#define PM_FUSES_5__LPMLTemperatureScaler_4__SHIFT 0x18
+#define PM_FUSES_6__LPMLTemperatureScaler_11_MASK 0xff
+#define PM_FUSES_6__LPMLTemperatureScaler_11__SHIFT 0x0
+#define PM_FUSES_6__LPMLTemperatureScaler_10_MASK 0xff00
+#define PM_FUSES_6__LPMLTemperatureScaler_10__SHIFT 0x8
+#define PM_FUSES_6__LPMLTemperatureScaler_9_MASK 0xff0000
+#define PM_FUSES_6__LPMLTemperatureScaler_9__SHIFT 0x10
+#define PM_FUSES_6__LPMLTemperatureScaler_8_MASK 0xff000000
+#define PM_FUSES_6__LPMLTemperatureScaler_8__SHIFT 0x18
+#define PM_FUSES_7__LPMLTemperatureScaler_15_MASK 0xff
+#define PM_FUSES_7__LPMLTemperatureScaler_15__SHIFT 0x0
+#define PM_FUSES_7__LPMLTemperatureScaler_14_MASK 0xff00
+#define PM_FUSES_7__LPMLTemperatureScaler_14__SHIFT 0x8
+#define PM_FUSES_7__LPMLTemperatureScaler_13_MASK 0xff0000
+#define PM_FUSES_7__LPMLTemperatureScaler_13__SHIFT 0x10
+#define PM_FUSES_7__LPMLTemperatureScaler_12_MASK 0xff000000
+#define PM_FUSES_7__LPMLTemperatureScaler_12__SHIFT 0x18
+#define PM_FUSES_8__FuzzyFan_ErrorRateSetDelta_MASK 0xffff
+#define PM_FUSES_8__FuzzyFan_ErrorRateSetDelta__SHIFT 0x0
+#define PM_FUSES_8__FuzzyFan_ErrorSetDelta_MASK 0xffff0000
+#define PM_FUSES_8__FuzzyFan_ErrorSetDelta__SHIFT 0x10
+#define PM_FUSES_9__Reserved6_MASK 0xffff
+#define PM_FUSES_9__Reserved6__SHIFT 0x0
+#define PM_FUSES_9__FuzzyFan_PwmSetDelta_MASK 0xffff0000
+#define PM_FUSES_9__FuzzyFan_PwmSetDelta__SHIFT 0x10
+#define PM_FUSES_10__GnbLPML_3_MASK 0xff
+#define PM_FUSES_10__GnbLPML_3__SHIFT 0x0
+#define PM_FUSES_10__GnbLPML_2_MASK 0xff00
+#define PM_FUSES_10__GnbLPML_2__SHIFT 0x8
+#define PM_FUSES_10__GnbLPML_1_MASK 0xff0000
+#define PM_FUSES_10__GnbLPML_1__SHIFT 0x10
+#define PM_FUSES_10__GnbLPML_0_MASK 0xff000000
+#define PM_FUSES_10__GnbLPML_0__SHIFT 0x18
+#define PM_FUSES_11__GnbLPML_7_MASK 0xff
+#define PM_FUSES_11__GnbLPML_7__SHIFT 0x0
+#define PM_FUSES_11__GnbLPML_6_MASK 0xff00
+#define PM_FUSES_11__GnbLPML_6__SHIFT 0x8
+#define PM_FUSES_11__GnbLPML_5_MASK 0xff0000
+#define PM_FUSES_11__GnbLPML_5__SHIFT 0x10
+#define PM_FUSES_11__GnbLPML_4_MASK 0xff000000
+#define PM_FUSES_11__GnbLPML_4__SHIFT 0x18
+#define PM_FUSES_12__GnbLPML_11_MASK 0xff
+#define PM_FUSES_12__GnbLPML_11__SHIFT 0x0
+#define PM_FUSES_12__GnbLPML_10_MASK 0xff00
+#define PM_FUSES_12__GnbLPML_10__SHIFT 0x8
+#define PM_FUSES_12__GnbLPML_9_MASK 0xff0000
+#define PM_FUSES_12__GnbLPML_9__SHIFT 0x10
+#define PM_FUSES_12__GnbLPML_8_MASK 0xff000000
+#define PM_FUSES_12__GnbLPML_8__SHIFT 0x18
+#define PM_FUSES_13__GnbLPML_15_MASK 0xff
+#define PM_FUSES_13__GnbLPML_15__SHIFT 0x0
+#define PM_FUSES_13__GnbLPML_14_MASK 0xff00
+#define PM_FUSES_13__GnbLPML_14__SHIFT 0x8
+#define PM_FUSES_13__GnbLPML_13_MASK 0xff0000
+#define PM_FUSES_13__GnbLPML_13__SHIFT 0x10
+#define PM_FUSES_13__GnbLPML_12_MASK 0xff000000
+#define PM_FUSES_13__GnbLPML_12__SHIFT 0x18
+#define PM_FUSES_14__Reserved1_1_MASK 0xff
+#define PM_FUSES_14__Reserved1_1__SHIFT 0x0
+#define PM_FUSES_14__Reserved1_0_MASK 0xff00
+#define PM_FUSES_14__Reserved1_0__SHIFT 0x8
+#define PM_FUSES_14__GnbLPMLMinVid_MASK 0xff0000
+#define PM_FUSES_14__GnbLPMLMinVid__SHIFT 0x10
+#define PM_FUSES_14__GnbLPMLMaxVid_MASK 0xff000000
+#define PM_FUSES_14__GnbLPMLMaxVid__SHIFT 0x18
+#define PM_FUSES_15__BapmVddCBaseLeakageLoSidd_MASK 0xffff
+#define PM_FUSES_15__BapmVddCBaseLeakageLoSidd__SHIFT 0x0
+#define PM_FUSES_15__BapmVddCBaseLeakageHiSidd_MASK 0xffff0000
+#define PM_FUSES_15__BapmVddCBaseLeakageHiSidd__SHIFT 0x10
+#define SMU_PM_STATUS_0__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_0__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_1__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_1__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_2__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_2__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_3__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_3__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_4__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_4__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_5__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_5__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_6__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_6__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_7__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_7__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_8__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_8__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_9__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_9__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_10__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_10__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_11__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_11__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_12__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_12__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_13__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_13__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_14__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_14__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_15__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_15__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_16__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_16__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_17__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_17__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_18__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_18__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_19__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_19__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_20__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_20__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_21__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_21__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_22__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_22__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_23__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_23__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_24__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_24__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_25__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_25__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_26__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_26__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_27__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_27__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_28__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_28__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_29__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_29__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_30__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_30__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_31__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_31__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_32__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_32__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_33__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_33__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_34__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_34__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_35__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_35__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_36__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_36__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_37__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_37__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_38__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_38__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_39__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_39__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_40__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_40__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_41__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_41__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_42__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_42__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_43__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_43__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_44__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_44__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_45__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_45__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_46__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_46__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_47__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_47__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_48__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_48__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_49__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_49__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_50__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_50__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_51__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_51__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_52__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_52__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_53__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_53__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_54__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_54__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_55__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_55__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_56__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_56__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_57__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_57__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_58__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_58__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_59__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_59__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_60__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_60__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_61__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_61__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_62__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_62__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_63__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_63__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_64__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_64__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_65__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_65__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_66__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_66__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_67__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_67__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_68__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_68__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_69__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_69__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_70__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_70__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_71__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_71__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_72__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_72__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_73__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_73__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_74__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_74__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_75__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_75__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_76__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_76__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_77__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_77__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_78__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_78__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_79__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_79__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_80__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_80__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_81__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_81__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_82__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_82__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_83__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_83__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_84__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_84__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_85__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_85__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_86__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_86__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_87__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_87__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_88__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_88__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_89__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_89__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_90__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_90__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_91__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_91__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_92__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_92__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_93__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_93__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_94__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_94__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_95__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_95__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_96__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_96__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_97__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_97__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_98__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_98__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_99__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_99__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_100__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_100__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_101__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_101__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_102__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_102__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_103__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_103__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_104__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_104__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_105__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_105__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_106__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_106__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_107__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_107__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_108__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_108__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_109__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_109__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_110__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_110__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_111__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_111__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_112__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_112__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_113__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_113__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_114__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_114__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_115__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_115__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_116__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_116__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_117__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_117__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_118__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_118__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_119__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_119__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_120__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_120__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_121__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_121__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_122__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_122__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_123__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_123__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_124__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_124__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_125__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_125__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_126__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_126__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_127__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_127__DATA__SHIFT 0x0
+#define CG_THERMAL_INT_ENA__THERM_INTH_SET_MASK 0x1
+#define CG_THERMAL_INT_ENA__THERM_INTH_SET__SHIFT 0x0
+#define CG_THERMAL_INT_ENA__THERM_INTL_SET_MASK 0x2
+#define CG_THERMAL_INT_ENA__THERM_INTL_SET__SHIFT 0x1
+#define CG_THERMAL_INT_ENA__THERM_TRIGGER_SET_MASK 0x4
+#define CG_THERMAL_INT_ENA__THERM_TRIGGER_SET__SHIFT 0x2
+#define CG_THERMAL_INT_ENA__THERM_INTH_CLR_MASK 0x8
+#define CG_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT 0x3
+#define CG_THERMAL_INT_ENA__THERM_INTL_CLR_MASK 0x10
+#define CG_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT 0x4
+#define CG_THERMAL_INT_ENA__THERM_TRIGGER_CLR_MASK 0x20
+#define CG_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT 0x5
+#define CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK 0xff
+#define CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT 0x0
+#define CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK 0xff00
+#define CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT 0x8
+#define CG_THERMAL_INT_CTRL__GNB_TEMP_THRESHOLD_MASK 0xff0000
+#define CG_THERMAL_INT_CTRL__GNB_TEMP_THRESHOLD__SHIFT 0x10
+#define CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK 0x1000000
+#define CG_THERMAL_INT_CTRL__THERM_INTH_MASK__SHIFT 0x18
+#define CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK 0x2000000
+#define CG_THERMAL_INT_CTRL__THERM_INTL_MASK__SHIFT 0x19
+#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK 0x4000000
+#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_MASK__SHIFT 0x1a
+#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_CNB_MASK_MASK 0x8000000
+#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_CNB_MASK__SHIFT 0x1b
+#define CG_THERMAL_INT_CTRL__THERM_GNB_HW_ENA_MASK 0x10000000
+#define CG_THERMAL_INT_CTRL__THERM_GNB_HW_ENA__SHIFT 0x1c
+#define CG_THERMAL_INT_STATUS__THERM_INTH_DETECT_MASK 0x1
+#define CG_THERMAL_INT_STATUS__THERM_INTH_DETECT__SHIFT 0x0
+#define CG_THERMAL_INT_STATUS__THERM_INTL_DETECT_MASK 0x2
+#define CG_THERMAL_INT_STATUS__THERM_INTL_DETECT__SHIFT 0x1
+#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_DETECT_MASK 0x4
+#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_DETECT__SHIFT 0x2
+#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_CNB_DETECT_MASK 0x8
+#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_CNB_DETECT__SHIFT 0x3
+#define CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK 0x7
+#define CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT 0x0
+#define CG_THERMAL_CTRL__THERM_INC_CLK_MASK 0x8
+#define CG_THERMAL_CTRL__THERM_INC_CLK__SHIFT 0x3
+#define CG_THERMAL_CTRL__SPARE_MASK 0x3ff0
+#define CG_THERMAL_CTRL__SPARE__SHIFT 0x4
+#define CG_THERMAL_CTRL__DIG_THERM_DPM_MASK 0x3fc000
+#define CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT 0xe
+#define CG_THERMAL_CTRL__RESERVED_MASK 0x1c00000
+#define CG_THERMAL_CTRL__RESERVED__SHIFT 0x16
+#define CG_THERMAL_CTRL__CTF_PAD_POLARITY_MASK 0x2000000
+#define CG_THERMAL_CTRL__CTF_PAD_POLARITY__SHIFT 0x19
+#define CG_THERMAL_CTRL__CTF_PAD_EN_MASK 0x4000000
+#define CG_THERMAL_CTRL__CTF_PAD_EN__SHIFT 0x1a
+#define CG_THERMAL_STATUS__SPARE_MASK 0x1ff
+#define CG_THERMAL_STATUS__SPARE__SHIFT 0x0
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK 0x1fe00
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT 0x9
+#define CG_THERMAL_STATUS__THERM_ALERT_MASK 0x20000
+#define CG_THERMAL_STATUS__THERM_ALERT__SHIFT 0x11
+#define CG_THERMAL_STATUS__GEN_STATUS_MASK 0x3c0000
+#define CG_THERMAL_STATUS__GEN_STATUS__SHIFT 0x12
+#define CG_THERMAL_INT__DIG_THERM_CTF_MASK 0xff
+#define CG_THERMAL_INT__DIG_THERM_CTF__SHIFT 0x0
+#define CG_THERMAL_INT__DIG_THERM_INTH_MASK 0xff00
+#define CG_THERMAL_INT__DIG_THERM_INTH__SHIFT 0x8
+#define CG_THERMAL_INT__DIG_THERM_INTL_MASK 0xff0000
+#define CG_THERMAL_INT__DIG_THERM_INTL__SHIFT 0x10
+#define CG_THERMAL_INT__THERM_INT_MASK_MASK 0xf000000
+#define CG_THERMAL_INT__THERM_INT_MASK__SHIFT 0x18
+#define CG_MULT_THERMAL_CTRL__TS_FILTER_MASK 0xf
+#define CG_MULT_THERMAL_CTRL__TS_FILTER__SHIFT 0x0
+#define CG_MULT_THERMAL_CTRL__UNUSED_MASK 0x1f0
+#define CG_MULT_THERMAL_CTRL__UNUSED__SHIFT 0x4
+#define CG_MULT_THERMAL_CTRL__THERMAL_RANGE_RST_MASK 0x200
+#define CG_MULT_THERMAL_CTRL__THERMAL_RANGE_RST__SHIFT 0x9
+#define CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK 0xff00000
+#define CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT 0x14
+#define CG_MULT_THERMAL_CTRL__THM_READY_CLEAR_MASK 0x10000000
+#define CG_MULT_THERMAL_CTRL__THM_READY_CLEAR__SHIFT 0x1c
+#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK 0x1ff
+#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP__SHIFT 0x0
+#define CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK 0x3fe00
+#define CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT 0x9
+#define THM_TMON2_CTRL__POWER_DOWN_MASK 0x1
+#define THM_TMON2_CTRL__POWER_DOWN__SHIFT 0x0
+#define THM_TMON2_CTRL__BGADJ_MASK 0x1fe
+#define THM_TMON2_CTRL__BGADJ__SHIFT 0x1
+#define THM_TMON2_CTRL__BGADJ_MODE_MASK 0x200
+#define THM_TMON2_CTRL__BGADJ_MODE__SHIFT 0x9
+#define THM_TMON2_CTRL__TMON_PAUSE_MASK 0x400
+#define THM_TMON2_CTRL__TMON_PAUSE__SHIFT 0xa
+#define THM_TMON2_CTRL__INT_MEAS_EN_MASK 0x800
+#define THM_TMON2_CTRL__INT_MEAS_EN__SHIFT 0xb
+#define THM_TMON2_CTRL__DEBUG_MODE_MASK 0x1000
+#define THM_TMON2_CTRL__DEBUG_MODE__SHIFT 0xc
+#define THM_TMON2_CTRL__EN_CFG_SERDES_MASK 0x2000
+#define THM_TMON2_CTRL__EN_CFG_SERDES__SHIFT 0xd
+#define THM_TMON2_CTRL2__RDIL_PRESENT_MASK 0xffff
+#define THM_TMON2_CTRL2__RDIL_PRESENT__SHIFT 0x0
+#define THM_TMON2_CTRL2__RDIR_PRESENT_MASK 0xffff0000
+#define THM_TMON2_CTRL2__RDIR_PRESENT__SHIFT 0x10
+#define THM_TMON2_CSR_WR__CSR_WRITE_MASK 0x1
+#define THM_TMON2_CSR_WR__CSR_WRITE__SHIFT 0x0
+#define THM_TMON2_CSR_WR__CSR_READ_MASK 0x2
+#define THM_TMON2_CSR_WR__CSR_READ__SHIFT 0x1
+#define THM_TMON2_CSR_WR__CSR_ADDR_MASK 0xffc
+#define THM_TMON2_CSR_WR__CSR_ADDR__SHIFT 0x2
+#define THM_TMON2_CSR_WR__WRITE_DATA_MASK 0xfff000
+#define THM_TMON2_CSR_WR__WRITE_DATA__SHIFT 0xc
+#define THM_TMON2_CSR_WR__SPARE_MASK 0x1000000
+#define THM_TMON2_CSR_WR__SPARE__SHIFT 0x18
+#define THM_TMON2_CSR_RD__READ_DATA_MASK 0xfff
+#define THM_TMON2_CSR_RD__READ_DATA__SHIFT 0x0
+#define CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK 0xff
+#define CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT 0x0
+#define CG_FDO_CTRL0__FAN_SPINUP_DUTY_MASK 0xff00
+#define CG_FDO_CTRL0__FAN_SPINUP_DUTY__SHIFT 0x8
+#define CG_FDO_CTRL0__FDO_PWM_MANUAL_MASK 0x10000
+#define CG_FDO_CTRL0__FDO_PWM_MANUAL__SHIFT 0x10
+#define CG_FDO_CTRL0__FDO_PWM_HYSTER_MASK 0x7e0000
+#define CG_FDO_CTRL0__FDO_PWM_HYSTER__SHIFT 0x11
+#define CG_FDO_CTRL0__FDO_PWM_RAMP_EN_MASK 0x800000
+#define CG_FDO_CTRL0__FDO_PWM_RAMP_EN__SHIFT 0x17
+#define CG_FDO_CTRL0__FDO_PWM_RAMP_MASK 0xff000000
+#define CG_FDO_CTRL0__FDO_PWM_RAMP__SHIFT 0x18
+#define CG_FDO_CTRL1__FMAX_DUTY100_MASK 0xff
+#define CG_FDO_CTRL1__FMAX_DUTY100__SHIFT 0x0
+#define CG_FDO_CTRL1__FMIN_DUTY_MASK 0xff00
+#define CG_FDO_CTRL1__FMIN_DUTY__SHIFT 0x8
+#define CG_FDO_CTRL1__M_MASK 0xff0000
+#define CG_FDO_CTRL1__M__SHIFT 0x10
+#define CG_FDO_CTRL1__RESERVED_MASK 0x3f000000
+#define CG_FDO_CTRL1__RESERVED__SHIFT 0x18
+#define CG_FDO_CTRL1__FDO_PWRDNB_MASK 0x40000000
+#define CG_FDO_CTRL1__FDO_PWRDNB__SHIFT 0x1e
+#define CG_FDO_CTRL2__TMIN_MASK 0xff
+#define CG_FDO_CTRL2__TMIN__SHIFT 0x0
+#define CG_FDO_CTRL2__FAN_SPINUP_TIME_MASK 0x700
+#define CG_FDO_CTRL2__FAN_SPINUP_TIME__SHIFT 0x8
+#define CG_FDO_CTRL2__FDO_PWM_MODE_MASK 0x3800
+#define CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT 0xb
+#define CG_FDO_CTRL2__TMIN_HYSTER_MASK 0x1c000
+#define CG_FDO_CTRL2__TMIN_HYSTER__SHIFT 0xe
+#define CG_FDO_CTRL2__TMAX_MASK 0x1fe0000
+#define CG_FDO_CTRL2__TMAX__SHIFT 0x11
+#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK 0xfe000000
+#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT 0x19
+#define CG_TACH_CTRL__EDGE_PER_REV_MASK 0x7
+#define CG_TACH_CTRL__EDGE_PER_REV__SHIFT 0x0
+#define CG_TACH_CTRL__TARGET_PERIOD_MASK 0xfffffff8
+#define CG_TACH_CTRL__TARGET_PERIOD__SHIFT 0x3
+#define CG_TACH_STATUS__TACH_PERIOD_MASK 0xffffffff
+#define CG_TACH_STATUS__TACH_PERIOD__SHIFT 0x0
+#define CC_THM_STRAPS0__TMON0_BGADJ_MASK 0x1fe
+#define CC_THM_STRAPS0__TMON0_BGADJ__SHIFT 0x1
+#define CC_THM_STRAPS0__TMON1_BGADJ_MASK 0x1fe00
+#define CC_THM_STRAPS0__TMON1_BGADJ__SHIFT 0x9
+#define CC_THM_STRAPS0__TMON_CMON_FUSE_SEL_MASK 0x20000
+#define CC_THM_STRAPS0__TMON_CMON_FUSE_SEL__SHIFT 0x11
+#define CC_THM_STRAPS0__NUM_ACQ_MASK 0x1c0000
+#define CC_THM_STRAPS0__NUM_ACQ__SHIFT 0x12
+#define CC_THM_STRAPS0__TMON_CLK_SEL_MASK 0xe00000
+#define CC_THM_STRAPS0__TMON_CLK_SEL__SHIFT 0x15
+#define CC_THM_STRAPS0__TMON_CONFIG_SOURCE_MASK 0x1000000
+#define CC_THM_STRAPS0__TMON_CONFIG_SOURCE__SHIFT 0x18
+#define CC_THM_STRAPS0__CTF_DISABLE_MASK 0x2000000
+#define CC_THM_STRAPS0__CTF_DISABLE__SHIFT 0x19
+#define CC_THM_STRAPS0__TMON0_DISABLE_MASK 0x4000000
+#define CC_THM_STRAPS0__TMON0_DISABLE__SHIFT 0x1a
+#define CC_THM_STRAPS0__TMON1_DISABLE_MASK 0x8000000
+#define CC_THM_STRAPS0__TMON1_DISABLE__SHIFT 0x1b
+#define CC_THM_STRAPS0__TMON2_DISABLE_MASK 0x10000000
+#define CC_THM_STRAPS0__TMON2_DISABLE__SHIFT 0x1c
+#define CC_THM_STRAPS0__TMON3_DISABLE_MASK 0x20000000
+#define CC_THM_STRAPS0__TMON3_DISABLE__SHIFT 0x1d
+#define CC_THM_STRAPS0__UNUSED_MASK 0x80000000
+#define CC_THM_STRAPS0__UNUSED__SHIFT 0x1f
+#define THM_TMON0_RDIL0_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL0_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL0_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL0_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL1_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL1_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL1_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL1_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL2_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL2_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL2_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL2_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL3_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL3_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL3_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL3_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL4_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL4_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL4_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL4_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL5_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL5_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL5_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL5_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL6_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL6_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL6_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL6_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL7_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL7_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL7_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL7_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL8_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL8_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL8_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL8_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL9_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL9_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL9_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL9_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL10_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL10_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL10_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL10_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL11_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL11_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL11_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL11_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL12_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL12_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL12_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL12_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL13_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL13_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL13_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL13_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL14_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL14_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL14_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL14_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL15_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL15_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL15_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL15_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR0_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR0_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR0_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR0_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR1_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR1_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR1_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR1_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR2_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR2_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR2_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR2_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR3_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR3_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR3_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR3_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR4_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR4_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR4_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR4_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR5_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR5_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR5_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR5_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR6_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR6_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR6_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR6_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR7_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR7_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR7_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR7_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR8_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR8_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR8_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR8_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR9_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR9_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR9_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR9_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR10_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR10_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR10_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR10_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR11_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR11_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR11_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR11_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR12_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR12_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR12_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR12_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR13_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR13_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR13_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR13_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR14_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR14_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR14_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR14_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR15_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR15_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR15_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR15_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL0_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL0_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL0_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL0_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL1_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL1_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL1_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL1_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL2_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL2_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL2_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL2_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL3_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL3_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL3_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL3_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL4_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL4_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL4_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL4_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL5_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL5_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL5_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL5_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL6_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL6_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL6_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL6_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL7_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL7_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL7_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL7_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL8_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL8_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL8_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL8_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL9_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL9_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL9_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL9_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL10_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL10_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL10_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL10_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL11_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL11_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL11_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL11_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL12_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL12_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL12_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL12_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL13_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL13_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL13_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL13_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL14_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL14_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL14_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL14_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL15_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL15_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL15_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL15_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR0_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR0_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR0_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR0_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR1_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR1_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR1_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR1_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR2_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR2_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR2_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR2_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR3_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR3_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR3_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR3_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR4_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR4_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR4_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR4_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR5_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR5_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR5_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR5_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR6_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR6_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR6_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR6_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR7_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR7_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR7_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR7_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR8_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR8_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR8_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR8_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR9_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR9_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR9_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR9_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR10_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR10_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR10_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR10_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR11_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR11_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR11_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR11_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR12_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR12_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR12_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR12_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR13_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR13_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR13_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR13_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR14_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR14_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR14_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR14_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR15_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR15_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR15_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR15_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL0_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL0_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL0_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL0_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL1_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL1_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL1_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL1_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL2_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL2_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL2_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL2_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL3_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL3_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL3_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL3_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL4_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL4_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL4_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL4_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL5_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL5_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL5_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL5_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL6_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL6_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL6_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL6_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL7_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL7_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL7_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL7_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL8_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL8_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL8_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL8_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL9_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL9_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL9_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL9_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL10_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL10_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL10_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL10_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL11_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL11_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL11_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL11_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL12_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL12_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL12_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL12_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL13_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL13_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL13_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL13_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL14_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL14_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL14_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL14_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL15_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL15_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL15_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL15_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR0_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR0_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR0_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR0_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR1_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR1_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR1_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR1_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR2_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR2_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR2_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR2_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR3_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR3_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR3_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR3_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR4_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR4_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR4_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR4_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR5_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR5_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR5_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR5_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR6_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR6_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR6_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR6_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR7_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR7_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR7_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR7_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR8_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR8_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR8_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR8_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR9_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR9_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR9_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR9_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR10_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR10_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR10_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR10_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR11_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR11_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR11_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR11_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR12_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR12_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR12_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR12_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR13_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR13_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR13_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR13_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR14_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR14_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR14_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR14_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR15_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR15_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR15_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR15_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_INT_DATA__Z_MASK 0x7ff
+#define THM_TMON0_INT_DATA__Z__SHIFT 0x0
+#define THM_TMON0_INT_DATA__VALID_MASK 0x800
+#define THM_TMON0_INT_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_INT_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_INT_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_INT_DATA__Z_MASK 0x7ff
+#define THM_TMON1_INT_DATA__Z__SHIFT 0x0
+#define THM_TMON1_INT_DATA__VALID_MASK 0x800
+#define THM_TMON1_INT_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_INT_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_INT_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_INT_DATA__Z_MASK 0x7ff
+#define THM_TMON2_INT_DATA__Z__SHIFT 0x0
+#define THM_TMON2_INT_DATA__VALID_MASK 0x800
+#define THM_TMON2_INT_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_INT_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_INT_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_DEBUG__DEBUG_RDI_MASK 0x1f
+#define THM_TMON0_DEBUG__DEBUG_RDI__SHIFT 0x0
+#define THM_TMON0_DEBUG__DEBUG_Z_MASK 0xffe0
+#define THM_TMON0_DEBUG__DEBUG_Z__SHIFT 0x5
+#define THM_TMON1_DEBUG__DEBUG_RDI_MASK 0x1f
+#define THM_TMON1_DEBUG__DEBUG_RDI__SHIFT 0x0
+#define THM_TMON1_DEBUG__DEBUG_Z_MASK 0xffe0
+#define THM_TMON1_DEBUG__DEBUG_Z__SHIFT 0x5
+#define THM_TMON2_DEBUG__DEBUG_RDI_MASK 0x1f
+#define THM_TMON2_DEBUG__DEBUG_RDI__SHIFT 0x0
+#define THM_TMON2_DEBUG__DEBUG_Z_MASK 0xffe0
+#define THM_TMON2_DEBUG__DEBUG_Z__SHIFT 0x5
+#define THM_TMON0_STATUS__CURRENT_RDI_MASK 0x1f
+#define THM_TMON0_STATUS__CURRENT_RDI__SHIFT 0x0
+#define THM_TMON0_STATUS__MEAS_DONE_MASK 0x20
+#define THM_TMON0_STATUS__MEAS_DONE__SHIFT 0x5
+#define THM_TMON1_STATUS__CURRENT_RDI_MASK 0x1f
+#define THM_TMON1_STATUS__CURRENT_RDI__SHIFT 0x0
+#define THM_TMON1_STATUS__MEAS_DONE_MASK 0x20
+#define THM_TMON1_STATUS__MEAS_DONE__SHIFT 0x5
+#define THM_TMON2_STATUS__CURRENT_RDI_MASK 0x1f
+#define THM_TMON2_STATUS__CURRENT_RDI__SHIFT 0x0
+#define THM_TMON2_STATUS__MEAS_DONE_MASK 0x20
+#define THM_TMON2_STATUS__MEAS_DONE__SHIFT 0x5
+#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK 0x1
+#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN__SHIFT 0x0
+#define GENERAL_PWRMGT__STATIC_PM_EN_MASK 0x2
+#define GENERAL_PWRMGT__STATIC_PM_EN__SHIFT 0x1
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK 0x4
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS__SHIFT 0x2
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE_MASK 0x8
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE__SHIFT 0x3
+#define GENERAL_PWRMGT__SW_SMIO_INDEX_MASK 0x40
+#define GENERAL_PWRMGT__SW_SMIO_INDEX__SHIFT 0x6
+#define GENERAL_PWRMGT__LOW_VOLT_D2_ACPI_MASK 0x100
+#define GENERAL_PWRMGT__LOW_VOLT_D2_ACPI__SHIFT 0x8
+#define GENERAL_PWRMGT__LOW_VOLT_D3_ACPI_MASK 0x200
+#define GENERAL_PWRMGT__LOW_VOLT_D3_ACPI__SHIFT 0x9
+#define GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK 0x400
+#define GENERAL_PWRMGT__VOLT_PWRMGT_EN__SHIFT 0xa
+#define GENERAL_PWRMGT__SPARE11_MASK 0x800
+#define GENERAL_PWRMGT__SPARE11__SHIFT 0xb
+#define GENERAL_PWRMGT__GPU_COUNTER_ACPI_MASK 0x4000
+#define GENERAL_PWRMGT__GPU_COUNTER_ACPI__SHIFT 0xe
+#define GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK 0x8000
+#define GENERAL_PWRMGT__GPU_COUNTER_CLK__SHIFT 0xf
+#define GENERAL_PWRMGT__GPU_COUNTER_OFF_MASK 0x10000
+#define GENERAL_PWRMGT__GPU_COUNTER_OFF__SHIFT 0x10
+#define GENERAL_PWRMGT__GPU_COUNTER_INTF_OFF_MASK 0x20000
+#define GENERAL_PWRMGT__GPU_COUNTER_INTF_OFF__SHIFT 0x11
+#define GENERAL_PWRMGT__SPARE18_MASK 0x40000
+#define GENERAL_PWRMGT__SPARE18__SHIFT 0x12
+#define GENERAL_PWRMGT__ACPI_D3_VID_MASK 0x180000
+#define GENERAL_PWRMGT__ACPI_D3_VID__SHIFT 0x13
+#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK 0x800000
+#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN__SHIFT 0x17
+#define GENERAL_PWRMGT__SPARE27_MASK 0x8000000
+#define GENERAL_PWRMGT__SPARE27__SHIFT 0x1b
+#define GENERAL_PWRMGT__SPARE_MASK 0xf0000000
+#define GENERAL_PWRMGT__SPARE__SHIFT 0x1c
+#define CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK 0x3
+#define CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT 0x0
+#define CNB_PWRMGT_CNTL__GNB_SLOW_MASK 0x4
+#define CNB_PWRMGT_CNTL__GNB_SLOW__SHIFT 0x2
+#define CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK 0x8
+#define CNB_PWRMGT_CNTL__FORCE_NB_PS1__SHIFT 0x3
+#define CNB_PWRMGT_CNTL__DPM_ENABLED_MASK 0x10
+#define CNB_PWRMGT_CNTL__DPM_ENABLED__SHIFT 0x4
+#define CNB_PWRMGT_CNTL__SPARE_MASK 0xffffffe0
+#define CNB_PWRMGT_CNTL__SPARE__SHIFT 0x5
+#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK 0x1
+#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF__SHIFT 0x0
+#define SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK 0x10
+#define SCLK_PWRMGT_CNTL__RESET_BUSY_CNT__SHIFT 0x4
+#define SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK 0x20
+#define SCLK_PWRMGT_CNTL__RESET_SCLK_CNT__SHIFT 0x5
+#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN_MASK 0x4000
+#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN__SHIFT 0xe
+#define SCLK_PWRMGT_CNTL__AUTO_SCLK_PULSE_SKIP_MASK 0x8000
+#define SCLK_PWRMGT_CNTL__AUTO_SCLK_PULSE_SKIP__SHIFT 0xf
+#define SCLK_PWRMGT_CNTL__LIGHT_SLEEP_COUNTER_MASK 0x1f0000
+#define SCLK_PWRMGT_CNTL__LIGHT_SLEEP_COUNTER__SHIFT 0x10
+#define SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK 0x200000
+#define SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN__SHIFT 0x15
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARGET_STATE_MASK 0xf
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARGET_STATE__SHIFT 0x0
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_MASK 0xf0
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE__SHIFT 0x4
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK 0xf00
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT 0x8
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_MCLK_INDEX_MASK 0xf000
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_MCLK_INDEX__SHIFT 0xc
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK 0x1f0000
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT 0x10
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_SCLK_INDEX_MASK 0x3e00000
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_SCLK_INDEX__SHIFT 0x15
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_LCLK_INDEX_MASK 0x1c000000
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_LCLK_INDEX__SHIFT 0x1a
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_LCLK_INDEX_MASK 0xe0000000
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_LCLK_INDEX__SHIFT 0x1d
+#define PWR_PCC_CONTROL__PCC_POLARITY_MASK 0x1
+#define PWR_PCC_CONTROL__PCC_POLARITY__SHIFT 0x0
+#define PWR_PCC_GPIO_SELECT__GPIO_MASK 0xffffffff
+#define PWR_PCC_GPIO_SELECT__GPIO__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_0__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_0__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_0__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_0__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_0__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_0__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_0__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_0__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_0__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_0__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_0__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_0__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_0__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_0__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_0__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_0__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_0__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_0__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_0__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_0__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_0__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_0__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_0__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_0__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_0__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_0__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_0__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_0__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_0__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_0__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_0__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_0__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_0__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_0__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_0__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_0__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_0__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_0__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_0__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_0__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_0__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_0__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_0__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_0__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_0__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_0__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_1__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_1__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_1__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_1__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_1__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_1__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_1__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_1__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_1__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_1__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_1__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_1__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_1__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_1__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_1__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_1__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_1__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_1__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_1__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_1__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_1__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_1__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_1__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_1__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_1__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_1__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_1__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_1__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_1__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_1__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_1__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_1__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_1__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_1__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_1__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_1__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_1__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_1__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_1__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_1__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_1__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_1__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_1__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_1__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_1__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_1__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_2__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_2__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_2__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_2__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_2__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_2__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_2__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_2__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_2__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_2__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_2__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_2__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_2__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_2__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_2__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_2__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_2__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_2__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_2__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_2__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_2__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_2__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_2__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_2__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_2__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_2__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_2__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_2__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_2__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_2__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_2__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_2__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_2__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_2__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_2__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_2__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_2__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_2__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_2__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_2__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_2__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_2__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_2__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_2__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_2__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_2__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_3__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_3__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_3__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_3__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_3__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_3__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_3__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_3__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_3__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_3__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_3__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_3__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_3__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_3__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_3__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_3__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_3__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_3__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_3__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_3__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_3__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_3__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_3__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_3__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_3__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_3__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_3__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_3__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_3__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_3__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_3__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_3__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_3__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_3__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_3__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_3__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_3__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_3__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_3__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_3__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_3__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_3__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_3__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_3__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_3__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_3__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_4__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_4__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_4__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_4__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_4__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_4__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_4__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_4__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_4__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_4__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_4__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_4__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_4__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_4__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_4__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_4__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_4__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_4__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_4__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_4__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_4__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_4__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_4__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_4__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_4__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_4__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_4__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_4__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_4__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_4__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_4__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_4__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_4__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_4__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_4__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_4__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_4__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_4__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_4__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_4__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_4__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_4__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_4__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_4__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_4__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_4__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_5__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_5__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_5__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_5__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_5__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_5__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_5__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_5__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_5__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_5__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_5__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_5__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_5__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_5__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_5__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_5__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_5__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_5__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_5__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_5__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_5__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_5__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_5__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_5__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_5__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_5__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_5__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_5__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_5__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_5__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_5__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_5__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_5__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_5__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_5__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_5__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_5__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_5__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_5__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_5__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_5__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_5__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_5__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_5__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_5__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_5__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_6__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_6__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_6__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_6__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_6__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_6__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_6__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_6__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_6__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_6__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_6__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_6__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_6__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_6__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_6__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_6__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_6__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_6__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_6__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_6__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_6__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_6__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_6__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_6__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_6__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_6__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_6__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_6__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_6__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_6__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_6__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_6__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_6__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_6__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_6__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_6__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_6__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_6__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_6__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_6__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_6__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_6__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_6__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_6__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_6__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_6__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_7__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_7__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_7__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_7__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_7__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_7__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_7__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_7__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_7__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_7__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_7__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_7__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_7__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_7__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_7__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_7__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_7__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_7__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_7__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_7__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_7__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_7__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_7__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_7__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_7__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_7__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_7__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_7__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_7__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_7__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_7__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_7__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_7__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_7__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_7__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_7__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_7__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_7__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_7__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_7__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_7__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_7__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_7__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_7__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_7__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_7__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define PLL_TEST_CNTL__TST_SRC_SEL_MASK 0xf
+#define PLL_TEST_CNTL__TST_SRC_SEL__SHIFT 0x0
+#define PLL_TEST_CNTL__TST_REF_SEL_MASK 0xf0
+#define PLL_TEST_CNTL__TST_REF_SEL__SHIFT 0x4
+#define PLL_TEST_CNTL__REF_TEST_COUNT_MASK 0x7f00
+#define PLL_TEST_CNTL__REF_TEST_COUNT__SHIFT 0x8
+#define PLL_TEST_CNTL__TST_RESET_MASK 0x8000
+#define PLL_TEST_CNTL__TST_RESET__SHIFT 0xf
+#define PLL_TEST_CNTL__TEST_COUNT_MASK 0xfffe0000
+#define PLL_TEST_CNTL__TEST_COUNT__SHIFT 0x11
+#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_MASK 0xffff
+#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT 0x0
+#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT_MASK 0xf0000
+#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT 0x10
+#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK 0x3
+#define CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT 0x0
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT_MASK 0x3fff0
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT__SHIFT 0x4
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT_MASK 0x700000
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT__SHIFT 0x14
+#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK 0x3000000
+#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT 0x18
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_DISABLE_MASK 0x10000000
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_DISABLE__SHIFT 0x1c
+#define CG_DISPLAY_GAP_CNTL2__VBI_PREDICTION_MASK 0xffffffff
+#define CG_DISPLAY_GAP_CNTL2__VBI_PREDICTION__SHIFT 0x0
+#define CG_ACPI_CNTL__SCLK_ACPI_DIV_MASK 0x7f
+#define CG_ACPI_CNTL__SCLK_ACPI_DIV__SHIFT 0x0
+#define CG_ACPI_CNTL__SCLK_CHANGE_SKIP_MASK 0x80
+#define CG_ACPI_CNTL__SCLK_CHANGE_SKIP__SHIFT 0x7
+#define SCLK_DEEP_SLEEP_CNTL__DIV_ID_MASK 0x7
+#define SCLK_DEEP_SLEEP_CNTL__DIV_ID__SHIFT 0x0
+#define SCLK_DEEP_SLEEP_CNTL__RAMP_DIS_MASK 0x8
+#define SCLK_DEEP_SLEEP_CNTL__RAMP_DIS__SHIFT 0x3
+#define SCLK_DEEP_SLEEP_CNTL__HYSTERESIS_MASK 0xfff0
+#define SCLK_DEEP_SLEEP_CNTL__HYSTERESIS__SHIFT 0x4
+#define SCLK_DEEP_SLEEP_CNTL__SCLK_RUNNING_MASK_MASK 0x10000
+#define SCLK_DEEP_SLEEP_CNTL__SCLK_RUNNING_MASK__SHIFT 0x10
+#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_MASK_MASK 0x20000
+#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_MASK__SHIFT 0x11
+#define SCLK_DEEP_SLEEP_CNTL__ALLOW_NBPSTATE_MASK_MASK 0x40000
+#define SCLK_DEEP_SLEEP_CNTL__ALLOW_NBPSTATE_MASK__SHIFT 0x12
+#define SCLK_DEEP_SLEEP_CNTL__BIF_BUSY_MASK_MASK 0x80000
+#define SCLK_DEEP_SLEEP_CNTL__BIF_BUSY_MASK__SHIFT 0x13
+#define SCLK_DEEP_SLEEP_CNTL__UVD_BUSY_MASK_MASK 0x100000
+#define SCLK_DEEP_SLEEP_CNTL__UVD_BUSY_MASK__SHIFT 0x14
+#define SCLK_DEEP_SLEEP_CNTL__MC0SRBM_BUSY_MASK_MASK 0x200000
+#define SCLK_DEEP_SLEEP_CNTL__MC0SRBM_BUSY_MASK__SHIFT 0x15
+#define SCLK_DEEP_SLEEP_CNTL__MC1SRBM_BUSY_MASK_MASK 0x400000
+#define SCLK_DEEP_SLEEP_CNTL__MC1SRBM_BUSY_MASK__SHIFT 0x16
+#define SCLK_DEEP_SLEEP_CNTL__MC_ALLOW_MASK_MASK 0x800000
+#define SCLK_DEEP_SLEEP_CNTL__MC_ALLOW_MASK__SHIFT 0x17
+#define SCLK_DEEP_SLEEP_CNTL__SMU_BUSY_MASK_MASK 0x1000000
+#define SCLK_DEEP_SLEEP_CNTL__SMU_BUSY_MASK__SHIFT 0x18
+#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_NLC_MASK_MASK 0x2000000
+#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_NLC_MASK__SHIFT 0x19
+#define SCLK_DEEP_SLEEP_CNTL__FAST_EXIT_REQ_NBPSTATE_MASK 0x4000000
+#define SCLK_DEEP_SLEEP_CNTL__FAST_EXIT_REQ_NBPSTATE__SHIFT 0x1a
+#define SCLK_DEEP_SLEEP_CNTL__DEEP_SLEEP_ENTRY_MODE_MASK 0x8000000
+#define SCLK_DEEP_SLEEP_CNTL__DEEP_SLEEP_ENTRY_MODE__SHIFT 0x1b
+#define SCLK_DEEP_SLEEP_CNTL__MBUS2_ACTIVE_MASK_MASK 0x10000000
+#define SCLK_DEEP_SLEEP_CNTL__MBUS2_ACTIVE_MASK__SHIFT 0x1c
+#define SCLK_DEEP_SLEEP_CNTL__VCE_BUSY_MASK_MASK 0x20000000
+#define SCLK_DEEP_SLEEP_CNTL__VCE_BUSY_MASK__SHIFT 0x1d
+#define SCLK_DEEP_SLEEP_CNTL__AZ_BUSY_MASK_MASK 0x40000000
+#define SCLK_DEEP_SLEEP_CNTL__AZ_BUSY_MASK__SHIFT 0x1e
+#define SCLK_DEEP_SLEEP_CNTL__ENABLE_DS_MASK 0x80000000
+#define SCLK_DEEP_SLEEP_CNTL__ENABLE_DS__SHIFT 0x1f
+#define SCLK_DEEP_SLEEP_CNTL2__RLC_BUSY_MASK_MASK 0x1
+#define SCLK_DEEP_SLEEP_CNTL2__RLC_BUSY_MASK__SHIFT 0x0
+#define SCLK_DEEP_SLEEP_CNTL2__HDP_BUSY_MASK_MASK 0x2
+#define SCLK_DEEP_SLEEP_CNTL2__HDP_BUSY_MASK__SHIFT 0x1
+#define SCLK_DEEP_SLEEP_CNTL2__ROM_BUSY_MASK_MASK 0x4
+#define SCLK_DEEP_SLEEP_CNTL2__ROM_BUSY_MASK__SHIFT 0x2
+#define SCLK_DEEP_SLEEP_CNTL2__IH_SEM_BUSY_MASK_MASK 0x8
+#define SCLK_DEEP_SLEEP_CNTL2__IH_SEM_BUSY_MASK__SHIFT 0x3
+#define SCLK_DEEP_SLEEP_CNTL2__PDMA_BUSY_MASK_MASK 0x10
+#define SCLK_DEEP_SLEEP_CNTL2__PDMA_BUSY_MASK__SHIFT 0x4
+#define SCLK_DEEP_SLEEP_CNTL2__IDCT_BUSY_MASK_MASK 0x40
+#define SCLK_DEEP_SLEEP_CNTL2__IDCT_BUSY_MASK__SHIFT 0x6
+#define SCLK_DEEP_SLEEP_CNTL2__SDMA_BUSY_MASK_MASK 0x80
+#define SCLK_DEEP_SLEEP_CNTL2__SDMA_BUSY_MASK__SHIFT 0x7
+#define SCLK_DEEP_SLEEP_CNTL2__DC_AZ_BUSY_MASK_MASK 0x100
+#define SCLK_DEEP_SLEEP_CNTL2__DC_AZ_BUSY_MASK__SHIFT 0x8
+#define SCLK_DEEP_SLEEP_CNTL2__ACP_SMU_ALLOW_DSLEEP_STUTTER_MASK_MASK 0x200
+#define SCLK_DEEP_SLEEP_CNTL2__ACP_SMU_ALLOW_DSLEEP_STUTTER_MASK__SHIFT 0x9
+#define SCLK_DEEP_SLEEP_CNTL2__UVD_CG_MC_STAT_BUSY_MASK_MASK 0x400
+#define SCLK_DEEP_SLEEP_CNTL2__UVD_CG_MC_STAT_BUSY_MASK__SHIFT 0xa
+#define SCLK_DEEP_SLEEP_CNTL2__VCE_CG_MC_STAT_BUSY_MASK_MASK 0x800
+#define SCLK_DEEP_SLEEP_CNTL2__VCE_CG_MC_STAT_BUSY_MASK__SHIFT 0xb
+#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_MC_STAT_BUSY_MASK_MASK 0x1000
+#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_MC_STAT_BUSY_MASK__SHIFT 0xc
+#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_STATUS_BUSY_MASK_MASK 0x2000
+#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_STATUS_BUSY_MASK__SHIFT 0xd
+#define SCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK_MASK 0x4000
+#define SCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK__SHIFT 0xe
+#define SCLK_DEEP_SLEEP_CNTL2__SHALLOW_DIV_ID_MASK 0xe00000
+#define SCLK_DEEP_SLEEP_CNTL2__SHALLOW_DIV_ID__SHIFT 0x15
+#define SCLK_DEEP_SLEEP_CNTL2__INOUT_CUSHION_MASK 0xff000000
+#define SCLK_DEEP_SLEEP_CNTL2__INOUT_CUSHION__SHIFT 0x18
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_0_SMU_BUSY_MASK_MASK 0x1
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_0_SMU_BUSY_MASK__SHIFT 0x0
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_1_SMU_BUSY_MASK_MASK 0x2
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_1_SMU_BUSY_MASK__SHIFT 0x1
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_2_SMU_BUSY_MASK_MASK 0x4
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_2_SMU_BUSY_MASK__SHIFT 0x2
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_3_SMU_BUSY_MASK_MASK 0x8
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_3_SMU_BUSY_MASK__SHIFT 0x3
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_4_SMU_BUSY_MASK_MASK 0x10
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_4_SMU_BUSY_MASK__SHIFT 0x4
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_5_SMU_BUSY_MASK_MASK 0x20
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_5_SMU_BUSY_MASK__SHIFT 0x5
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_6_SMU_BUSY_MASK_MASK 0x40
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_6_SMU_BUSY_MASK__SHIFT 0x6
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_7_SMU_BUSY_MASK_MASK 0x80
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_7_SMU_BUSY_MASK__SHIFT 0x7
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_8_SMU_BUSY_MASK_MASK 0x100
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_8_SMU_BUSY_MASK__SHIFT 0x8
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_9_SMU_BUSY_MASK_MASK 0x200
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_9_SMU_BUSY_MASK__SHIFT 0x9
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_10_SMU_BUSY_MASK_MASK 0x400
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_10_SMU_BUSY_MASK__SHIFT 0xa
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_11_SMU_BUSY_MASK_MASK 0x800
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_11_SMU_BUSY_MASK__SHIFT 0xb
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_12_SMU_BUSY_MASK_MASK 0x1000
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_12_SMU_BUSY_MASK__SHIFT 0xc
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_13_SMU_BUSY_MASK_MASK 0x2000
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_13_SMU_BUSY_MASK__SHIFT 0xd
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_14_SMU_BUSY_MASK_MASK 0x4000
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_14_SMU_BUSY_MASK__SHIFT 0xe
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_15_SMU_BUSY_MASK_MASK 0x8000
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_15_SMU_BUSY_MASK__SHIFT 0xf
+#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_DS_DIV_ID_MASK 0x7
+#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_DS_DIV_ID__SHIFT 0x0
+#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_SS_DIV_ID_MASK 0x38
+#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_SS_DIV_ID__SHIFT 0x3
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_ENABLE_MASK 0x10000
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_ENABLE__SHIFT 0x10
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_DS_DIV_ID_MASK 0xe0000
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_DS_DIV_ID__SHIFT 0x11
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_SS_DIV_ID_MASK 0x700000
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_SS_DIV_ID__SHIFT 0x14
+#define LCLK_DEEP_SLEEP_CNTL__DIV_ID_MASK 0x7
+#define LCLK_DEEP_SLEEP_CNTL__DIV_ID__SHIFT 0x0
+#define LCLK_DEEP_SLEEP_CNTL__RAMP_DIS_MASK 0x8
+#define LCLK_DEEP_SLEEP_CNTL__RAMP_DIS__SHIFT 0x3
+#define LCLK_DEEP_SLEEP_CNTL__HYSTERESIS_MASK 0xfff0
+#define LCLK_DEEP_SLEEP_CNTL__HYSTERESIS__SHIFT 0x4
+#define LCLK_DEEP_SLEEP_CNTL__RESERVED_MASK 0x7fff0000
+#define LCLK_DEEP_SLEEP_CNTL__RESERVED__SHIFT 0x10
+#define LCLK_DEEP_SLEEP_CNTL__ENABLE_DS_MASK 0x80000000
+#define LCLK_DEEP_SLEEP_CNTL__ENABLE_DS__SHIFT 0x1f
+#define LCLK_DEEP_SLEEP_CNTL2__RFE_BUSY_MASK_MASK 0x1
+#define LCLK_DEEP_SLEEP_CNTL2__RFE_BUSY_MASK__SHIFT 0x0
+#define LCLK_DEEP_SLEEP_CNTL2__BIF_CG_LCLK_BUSY_MASK_MASK 0x2
+#define LCLK_DEEP_SLEEP_CNTL2__BIF_CG_LCLK_BUSY_MASK__SHIFT 0x1
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMU_SMU_IDLE_MASK_MASK 0x4
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMU_SMU_IDLE_MASK__SHIFT 0x2
+#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_BIT3_MASK 0x8
+#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_BIT3__SHIFT 0x3
+#define LCLK_DEEP_SLEEP_CNTL2__SCLK_RUNNING_MASK_MASK 0x10
+#define LCLK_DEEP_SLEEP_CNTL2__SCLK_RUNNING_MASK__SHIFT 0x4
+#define LCLK_DEEP_SLEEP_CNTL2__SMU_BUSY_MASK_MASK 0x20
+#define LCLK_DEEP_SLEEP_CNTL2__SMU_BUSY_MASK__SHIFT 0x5
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE1_MASK_MASK 0x40
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE1_MASK__SHIFT 0x6
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE2_MASK_MASK 0x80
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE2_MASK__SHIFT 0x7
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE3_MASK_MASK 0x100
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE3_MASK__SHIFT 0x8
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE4_MASK_MASK 0x200
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE4_MASK__SHIFT 0x9
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPP_IDLE_MASK_MASK 0x400
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPP_IDLE_MASK__SHIFT 0xa
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPPSB_IDLE_MASK_MASK 0x800
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPPSB_IDLE_MASK__SHIFT 0xb
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUBIF_IDLE_MASK_MASK 0x1000
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUBIF_IDLE_MASK__SHIFT 0xc
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUINTGEN_IDLE_MASK_MASK 0x2000
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUINTGEN_IDLE_MASK__SHIFT 0xd
+#define LCLK_DEEP_SLEEP_CNTL2__L2IMU_IDLE_MASK_MASK 0x4000
+#define LCLK_DEEP_SLEEP_CNTL2__L2IMU_IDLE_MASK__SHIFT 0xe
+#define LCLK_DEEP_SLEEP_CNTL2__ORB_IDLE_MASK_MASK 0x8000
+#define LCLK_DEEP_SLEEP_CNTL2__ORB_IDLE_MASK__SHIFT 0xf
+#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_MASK_MASK 0x10000
+#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_MASK__SHIFT 0x10
+#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_ACK_MASK_MASK 0x20000
+#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_ACK_MASK__SHIFT 0x11
+#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_MASK_MASK 0x40000
+#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_MASK__SHIFT 0x12
+#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_ACK_MASK_MASK 0x80000
+#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_ACK_MASK__SHIFT 0x13
+#define LCLK_DEEP_SLEEP_CNTL2__DMAACTIVE_MASK_MASK 0x100000
+#define LCLK_DEEP_SLEEP_CNTL2__DMAACTIVE_MASK__SHIFT 0x14
+#define LCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK_MASK 0x200000
+#define LCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK__SHIFT 0x15
+#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_MASK 0xffc00000
+#define LCLK_DEEP_SLEEP_CNTL2__RESERVED__SHIFT 0x16
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDCI_INDEX_MASK 0xf
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDCI_INDEX__SHIFT 0x0
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDCI_INDEX_MASK 0xf0
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDCI_INDEX__SHIFT 0x4
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_MVDD_INDEX_MASK 0xf00
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_MVDD_INDEX__SHIFT 0x8
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_MVDD_INDEX_MASK 0xf000
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_MVDD_INDEX__SHIFT 0xc
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDC_INDEX_MASK 0xf0000
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDC_INDEX__SHIFT 0x10
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDC_INDEX_MASK 0xf00000
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDC_INDEX__SHIFT 0x14
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK 0xf000000
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT 0x18
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX_MASK 0xf0000000
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX__SHIFT 0x1c
+#define CG_ULV_PARAMETER__ULV_THRESHOLD_MASK 0xffff
+#define CG_ULV_PARAMETER__ULV_THRESHOLD__SHIFT 0x0
+#define CG_ULV_PARAMETER__ULV_THRESHOLD_UNIT_MASK 0xf0000
+#define CG_ULV_PARAMETER__ULV_THRESHOLD_UNIT__SHIFT 0x10
+#define SCLK_MIN_DIV__FRACV_MASK 0xfff
+#define SCLK_MIN_DIV__FRACV__SHIFT 0x0
+#define SCLK_MIN_DIV__INTV_MASK 0x7f000
+#define SCLK_MIN_DIV__INTV__SHIFT 0xc
+#define PWR_AVFS_SEL__AvfsSel_MASK 0xfffffff
+#define PWR_AVFS_SEL__AvfsSel__SHIFT 0x0
+#define PWR_AVFS_CNTL__MmBusIn_MASK 0xff
+#define PWR_AVFS_CNTL__MmBusIn__SHIFT 0x0
+#define PWR_AVFS_CNTL__MmLclRdEn_MASK 0x100
+#define PWR_AVFS_CNTL__MmLclRdEn__SHIFT 0x8
+#define PWR_AVFS_CNTL__MmLclWrEn_MASK 0x200
+#define PWR_AVFS_CNTL__MmLclWrEn__SHIFT 0x9
+#define PWR_AVFS_CNTL__MmLclSz_MASK 0xc00
+#define PWR_AVFS_CNTL__MmLclSz__SHIFT 0xa
+#define PWR_AVFS_CNTL__MmState_MASK 0x3f000
+#define PWR_AVFS_CNTL__MmState__SHIFT 0xc
+#define PWR_AVFS_CNTL__PsmScanMode_MASK 0x40000
+#define PWR_AVFS_CNTL__PsmScanMode__SHIFT 0x12
+#define PWR_AVFS_CNTL__PsmGater_MASK 0x80000
+#define PWR_AVFS_CNTL__PsmGater__SHIFT 0x13
+#define PWR_AVFS_CNTL__PsmTrst_MASK 0x100000
+#define PWR_AVFS_CNTL__PsmTrst__SHIFT 0x14
+#define PWR_AVFS_CNTL__PsmEn_MASK 0x200000
+#define PWR_AVFS_CNTL__PsmEn__SHIFT 0x15
+#define PWR_AVFS_CNTL__SkipPhaseEn_MASK 0x400000
+#define PWR_AVFS_CNTL__SkipPhaseEn__SHIFT 0x16
+#define PWR_AVFS_CNTL__Isolate_MASK 0x800000
+#define PWR_AVFS_CNTL__Isolate__SHIFT 0x17
+#define PWR_AVFS_CNTL__AvfsRst_MASK 0x1000000
+#define PWR_AVFS_CNTL__AvfsRst__SHIFT 0x18
+#define PWR_AVFS_CNTL__PccIsolateEn_MASK 0x2000000
+#define PWR_AVFS_CNTL__PccIsolateEn__SHIFT 0x19
+#define PWR_AVFS_CNTL__DeepSleepIsolateEn_MASK 0x4000000
+#define PWR_AVFS_CNTL__DeepSleepIsolateEn__SHIFT 0x1a
+#define PWR_AVFS0_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS0_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS0_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS0_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS0_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS0_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS1_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS1_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS1_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS1_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS1_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS1_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS2_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS2_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS2_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS2_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS2_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS2_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS3_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS3_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS3_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS3_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS3_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS3_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS4_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS4_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS4_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS4_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS4_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS4_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS5_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS5_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS5_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS5_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS5_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS5_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS6_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS6_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS6_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS6_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS6_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS6_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS7_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS7_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS7_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS7_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS7_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS7_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS8_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS8_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS8_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS8_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS8_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS8_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS9_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS9_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS9_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS9_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS9_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS9_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS10_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS10_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS10_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS10_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS10_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS10_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS11_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS11_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS11_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS11_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS11_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS11_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS12_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS12_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS12_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS12_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS12_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS12_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS13_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS13_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS13_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS13_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS13_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS13_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS14_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS14_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS14_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS14_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS14_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS14_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS15_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS15_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS15_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS15_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS15_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS15_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS16_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS16_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS16_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS16_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS16_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS16_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS17_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS17_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS17_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS17_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS17_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS17_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS18_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS18_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS18_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS18_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS18_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS18_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS19_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS19_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS19_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS19_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS19_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS19_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS20_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS20_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS20_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS20_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS20_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS20_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS21_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS21_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS21_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS21_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS21_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS21_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS22_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS22_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS22_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS22_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS22_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS22_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS23_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS23_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS23_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS23_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS23_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS23_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS24_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS24_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS24_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS24_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS24_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS24_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS25_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS25_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS25_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS25_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS25_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS25_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS26_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS26_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS26_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS26_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS26_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS26_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS27_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS27_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS27_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS27_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS27_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS27_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_CKS_ENABLE__STRETCH_ENABLE_MASK 0x1
+#define PWR_CKS_ENABLE__STRETCH_ENABLE__SHIFT 0x0
+#define PWR_CKS_ENABLE__masterReset_MASK 0x2
+#define PWR_CKS_ENABLE__masterReset__SHIFT 0x1
+#define PWR_CKS_ENABLE__staticEnable_MASK 0x4
+#define PWR_CKS_ENABLE__staticEnable__SHIFT 0x2
+#define PWR_CKS_ENABLE__IGNORE_DROOP_DETECT_MASK 0x8
+#define PWR_CKS_ENABLE__IGNORE_DROOP_DETECT__SHIFT 0x3
+#define PWR_CKS_ENABLE__PCC_HAND_SHAKE_EN_MASK 0x10
+#define PWR_CKS_ENABLE__PCC_HAND_SHAKE_EN__SHIFT 0x4
+#define PWR_CKS_ENABLE__MET_CTRL_SEL_MASK 0x60
+#define PWR_CKS_ENABLE__MET_CTRL_SEL__SHIFT 0x5
+#define PWR_CKS_ENABLE__DS_HAND_SHAKE_EN_MASK 0x80
+#define PWR_CKS_ENABLE__DS_HAND_SHAKE_EN__SHIFT 0x7
+#define PWR_CKS_CNTL__CKS_BYPASS_MASK 0x1
+#define PWR_CKS_CNTL__CKS_BYPASS__SHIFT 0x0
+#define PWR_CKS_CNTL__CKS_PCCEnable_MASK 0x2
+#define PWR_CKS_CNTL__CKS_PCCEnable__SHIFT 0x1
+#define PWR_CKS_CNTL__CKS_TEMP_COMP_MASK 0x4
+#define PWR_CKS_CNTL__CKS_TEMP_COMP__SHIFT 0x2
+#define PWR_CKS_CNTL__CKS_STRETCH_AMOUNT_MASK 0x78
+#define PWR_CKS_CNTL__CKS_STRETCH_AMOUNT__SHIFT 0x3
+#define PWR_CKS_CNTL__CKS_SKIP_PHASE_BYPASS_MASK 0x80
+#define PWR_CKS_CNTL__CKS_SKIP_PHASE_BYPASS__SHIFT 0x7
+#define PWR_CKS_CNTL__CKS_SAMPLE_SIZE_MASK 0xf00
+#define PWR_CKS_CNTL__CKS_SAMPLE_SIZE__SHIFT 0x8
+#define PWR_CKS_CNTL__CKS_FSM_WAIT_CYCLES_MASK 0xf000
+#define PWR_CKS_CNTL__CKS_FSM_WAIT_CYCLES__SHIFT 0xc
+#define PWR_CKS_CNTL__CKS_USE_FOR_LOW_FREQ_MASK 0x10000
+#define PWR_CKS_CNTL__CKS_USE_FOR_LOW_FREQ__SHIFT 0x10
+#define PWR_CKS_CNTL__CKS_NO_EXTRA_COARSE_STEP_MASK 0x20000
+#define PWR_CKS_CNTL__CKS_NO_EXTRA_COARSE_STEP__SHIFT 0x11
+#define PWR_CKS_CNTL__CKS_LDO_REFSEL_MASK 0x3c0000
+#define PWR_CKS_CNTL__CKS_LDO_REFSEL__SHIFT 0x12
+#define PWR_CKS_CNTL__DDT_DEBUS_SEL_MASK 0x400000
+#define PWR_CKS_CNTL__DDT_DEBUS_SEL__SHIFT 0x16
+#define PWR_CKS_CNTL__CKS_LDO_READY_COUNT_VAL_MASK 0x7f800000
+#define PWR_CKS_CNTL__CKS_LDO_READY_COUNT_VAL__SHIFT 0x17
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x1ffffff
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x2000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x4000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x8000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x1
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT_MASK 0x2
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_MASK 0x4
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT__SHIFT 0x2
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xffffff80
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x1ffffff
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x2000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x4000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x8000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x1
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT_MASK 0x2
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_MASK 0x4
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT__SHIFT 0x2
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xffffff80
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7
+#define PWR_DISP_TIMER_CONTROL2__DISP_TIMER_PULSE_WIDTH_MASK 0x3ff
+#define PWR_DISP_TIMER_CONTROL2__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0
+#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_MASK 0xffff
+#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD__SHIFT 0x0
+#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_UNIT_MASK 0xf0000
+#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_UNIT__SHIFT 0x10
+#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_EN_MASK 0x1
+#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_EN__SHIFT 0x0
+#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_DETECT_MASK 0x2
+#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_DETECT__SHIFT 0x1
+#define VDDGFX_IDLE_CONTROL__FORCE_VDDGFX_IDLE_EXIT_MASK 0x4
+#define VDDGFX_IDLE_CONTROL__FORCE_VDDGFX_IDLE_EXIT__SHIFT 0x2
+#define VDDGFX_IDLE_CONTROL__SMC_VDDGFX_IDLE_STATE_MASK 0x8
+#define VDDGFX_IDLE_CONTROL__SMC_VDDGFX_IDLE_STATE__SHIFT 0x3
+#define VDDGFX_IDLE_EXIT__BIF_EXIT_REQ_MASK 0x1
+#define VDDGFX_IDLE_EXIT__BIF_EXIT_REQ__SHIFT 0x0
+#define LCAC_MC0_CNTL__MC0_ENABLE_MASK 0x1
+#define LCAC_MC0_CNTL__MC0_ENABLE__SHIFT 0x0
+#define LCAC_MC0_CNTL__MC0_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT 0x1
+#define LCAC_MC0_CNTL__MC0_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC0_CNTL__MC0_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC0_CNTL__MC0_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC0_CNTL__MC0_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC0_OVR_SEL__MC0_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC0_OVR_SEL__MC0_OVR_SEL__SHIFT 0x0
+#define LCAC_MC0_OVR_VAL__MC0_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC0_OVR_VAL__MC0_OVR_VAL__SHIFT 0x0
+#define LCAC_MC1_CNTL__MC1_ENABLE_MASK 0x1
+#define LCAC_MC1_CNTL__MC1_ENABLE__SHIFT 0x0
+#define LCAC_MC1_CNTL__MC1_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC1_CNTL__MC1_THRESHOLD__SHIFT 0x1
+#define LCAC_MC1_CNTL__MC1_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC1_CNTL__MC1_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC1_CNTL__MC1_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC1_CNTL__MC1_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC1_OVR_SEL__MC1_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC1_OVR_SEL__MC1_OVR_SEL__SHIFT 0x0
+#define LCAC_MC1_OVR_VAL__MC1_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC1_OVR_VAL__MC1_OVR_VAL__SHIFT 0x0
+#define LCAC_MC2_CNTL__MC2_ENABLE_MASK 0x1
+#define LCAC_MC2_CNTL__MC2_ENABLE__SHIFT 0x0
+#define LCAC_MC2_CNTL__MC2_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC2_CNTL__MC2_THRESHOLD__SHIFT 0x1
+#define LCAC_MC2_CNTL__MC2_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC2_CNTL__MC2_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC2_CNTL__MC2_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC2_CNTL__MC2_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC2_OVR_SEL__MC2_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC2_OVR_SEL__MC2_OVR_SEL__SHIFT 0x0
+#define LCAC_MC2_OVR_VAL__MC2_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC2_OVR_VAL__MC2_OVR_VAL__SHIFT 0x0
+#define LCAC_MC3_CNTL__MC3_ENABLE_MASK 0x1
+#define LCAC_MC3_CNTL__MC3_ENABLE__SHIFT 0x0
+#define LCAC_MC3_CNTL__MC3_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC3_CNTL__MC3_THRESHOLD__SHIFT 0x1
+#define LCAC_MC3_CNTL__MC3_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC3_CNTL__MC3_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC3_CNTL__MC3_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC3_CNTL__MC3_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC3_OVR_SEL__MC3_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC3_OVR_SEL__MC3_OVR_SEL__SHIFT 0x0
+#define LCAC_MC3_OVR_VAL__MC3_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC3_OVR_VAL__MC3_OVR_VAL__SHIFT 0x0
+#define LCAC_MC4_CNTL__MC4_ENABLE_MASK 0x1
+#define LCAC_MC4_CNTL__MC4_ENABLE__SHIFT 0x0
+#define LCAC_MC4_CNTL__MC4_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC4_CNTL__MC4_THRESHOLD__SHIFT 0x1
+#define LCAC_MC4_CNTL__MC4_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC4_CNTL__MC4_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC4_CNTL__MC4_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC4_CNTL__MC4_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC4_OVR_SEL__MC4_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC4_OVR_SEL__MC4_OVR_SEL__SHIFT 0x0
+#define LCAC_MC4_OVR_VAL__MC4_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC4_OVR_VAL__MC4_OVR_VAL__SHIFT 0x0
+#define LCAC_MC5_CNTL__MC5_ENABLE_MASK 0x1
+#define LCAC_MC5_CNTL__MC5_ENABLE__SHIFT 0x0
+#define LCAC_MC5_CNTL__MC5_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC5_CNTL__MC5_THRESHOLD__SHIFT 0x1
+#define LCAC_MC5_CNTL__MC5_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC5_CNTL__MC5_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC5_CNTL__MC5_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC5_CNTL__MC5_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL__SHIFT 0x0
+#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL__SHIFT 0x0
+#define LCAC_MC6_CNTL__MC6_ENABLE_MASK 0x1
+#define LCAC_MC6_CNTL__MC6_ENABLE__SHIFT 0x0
+#define LCAC_MC6_CNTL__MC6_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC6_CNTL__MC6_THRESHOLD__SHIFT 0x1
+#define LCAC_MC6_CNTL__MC6_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC6_CNTL__MC6_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC6_CNTL__MC6_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC6_CNTL__MC6_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC6_OVR_SEL__MC6_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC6_OVR_SEL__MC6_OVR_SEL__SHIFT 0x0
+#define LCAC_MC6_OVR_VAL__MC6_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC6_OVR_VAL__MC6_OVR_VAL__SHIFT 0x0
+#define LCAC_MC7_CNTL__MC7_ENABLE_MASK 0x1
+#define LCAC_MC7_CNTL__MC7_ENABLE__SHIFT 0x0
+#define LCAC_MC7_CNTL__MC7_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC7_CNTL__MC7_THRESHOLD__SHIFT 0x1
+#define LCAC_MC7_CNTL__MC7_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC7_CNTL__MC7_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC7_CNTL__MC7_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC7_CNTL__MC7_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC7_OVR_SEL__MC7_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC7_OVR_SEL__MC7_OVR_SEL__SHIFT 0x0
+#define LCAC_MC7_OVR_VAL__MC7_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC7_OVR_VAL__MC7_OVR_VAL__SHIFT 0x0
+#define LCAC_CPL_CNTL__CPL_ENABLE_MASK 0x1
+#define LCAC_CPL_CNTL__CPL_ENABLE__SHIFT 0x0
+#define LCAC_CPL_CNTL__CPL_THRESHOLD_MASK 0x1fffe
+#define LCAC_CPL_CNTL__CPL_THRESHOLD__SHIFT 0x1
+#define LCAC_CPL_CNTL__CPL_BLOCK_ID_MASK 0x3e0000
+#define LCAC_CPL_CNTL__CPL_BLOCK_ID__SHIFT 0x11
+#define LCAC_CPL_CNTL__CPL_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_CPL_CNTL__CPL_SIGNAL_ID__SHIFT 0x16
+#define LCAC_CPL_OVR_SEL__CPL_OVR_SEL_MASK 0xffffffff
+#define LCAC_CPL_OVR_SEL__CPL_OVR_SEL__SHIFT 0x0
+#define LCAC_CPL_OVR_VAL__CPL_OVR_VAL_MASK 0xffffffff
+#define LCAC_CPL_OVR_VAL__CPL_OVR_VAL__SHIFT 0x0
+#define ROM_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff
+#define ROM_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0
+#define ROM_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff
+#define ROM_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0
+#define ROM_CNTL__SCK_OVERWRITE_MASK 0x2
+#define ROM_CNTL__SCK_OVERWRITE__SHIFT 0x1
+#define ROM_CNTL__CLOCK_GATING_EN_MASK 0x4
+#define ROM_CNTL__CLOCK_GATING_EN__SHIFT 0x2
+#define ROM_CNTL__CSB_ACTIVE_TO_SCK_SETUP_TIME_MASK 0xff00
+#define ROM_CNTL__CSB_ACTIVE_TO_SCK_SETUP_TIME__SHIFT 0x8
+#define ROM_CNTL__CSB_ACTIVE_TO_SCK_HOLD_TIME_MASK 0xff0000
+#define ROM_CNTL__CSB_ACTIVE_TO_SCK_HOLD_TIME__SHIFT 0x10
+#define ROM_CNTL__SCK_PRESCALE_REFCLK_MASK 0xf000000
+#define ROM_CNTL__SCK_PRESCALE_REFCLK__SHIFT 0x18
+#define ROM_CNTL__SCK_PRESCALE_CRYSTAL_CLK_MASK 0xf0000000
+#define ROM_CNTL__SCK_PRESCALE_CRYSTAL_CLK__SHIFT 0x1c
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR_MASK 0xffffff
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR__SHIFT 0x0
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE_MASK 0x1000000
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE__SHIFT 0x18
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE_MASK 0x2000000
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE__SHIFT 0x19
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE_MASK 0xc000000
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE__SHIFT 0x1a
+#define ROM_STATUS__ROM_BUSY_MASK 0x1
+#define ROM_STATUS__ROM_BUSY__SHIFT 0x0
+#define CGTT_ROM_CLK_CTRL0__ON_DELAY_MASK 0xf
+#define CGTT_ROM_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS_MASK 0xff0
+#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f
+#define ROM_INDEX__ROM_INDEX_MASK 0xffffff
+#define ROM_INDEX__ROM_INDEX__SHIFT 0x0
+#define ROM_DATA__ROM_DATA_MASK 0xffffffff
+#define ROM_DATA__ROM_DATA__SHIFT 0x0
+#define ROM_START__ROM_START_MASK 0xffffff
+#define ROM_START__ROM_START__SHIFT 0x0
+#define ROM_SW_CNTL__DATA_SIZE_MASK 0xffff
+#define ROM_SW_CNTL__DATA_SIZE__SHIFT 0x0
+#define ROM_SW_CNTL__COMMAND_SIZE_MASK 0x30000
+#define ROM_SW_CNTL__COMMAND_SIZE__SHIFT 0x10
+#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE_MASK 0x40000
+#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE__SHIFT 0x12
+#define ROM_SW_STATUS__ROM_SW_DONE_MASK 0x1
+#define ROM_SW_STATUS__ROM_SW_DONE__SHIFT 0x0
+#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION_MASK 0xff
+#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION__SHIFT 0x0
+#define ROM_SW_COMMAND__ROM_SW_ADDRESS_MASK 0xffffff00
+#define ROM_SW_COMMAND__ROM_SW_ADDRESS__SHIFT 0x8
+#define ROM_SW_DATA_1__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_1__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_2__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_2__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_3__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_3__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_4__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_4__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_5__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_5__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_6__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_6__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_7__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_7__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_8__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_8__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_9__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_9__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_10__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_10__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_11__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_11__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_12__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_12__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_13__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_13__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_14__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_14__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_15__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_15__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_16__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_16__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_17__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_17__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_18__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_18__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_19__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_19__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_20__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_20__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_21__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_21__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_22__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_22__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_23__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_23__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_24__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_24__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_25__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_25__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_26__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_26__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_27__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_27__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_28__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_28__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_29__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_29__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_30__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_30__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_31__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_31__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_32__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_32__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_33__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_33__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_34__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_34__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_35__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_35__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_36__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_36__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_37__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_37__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_38__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_38__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_39__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_39__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_40__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_40__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_41__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_41__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_42__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_42__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_43__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_43__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_44__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_44__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_45__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_45__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_46__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_46__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_47__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_47__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_48__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_48__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_49__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_49__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_50__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_50__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_51__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_51__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_52__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_52__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_53__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_53__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_54__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_54__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_55__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_55__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_56__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_56__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_57__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_57__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_58__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_58__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_59__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_59__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_60__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_60__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_61__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_61__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_62__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_62__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_63__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
+#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0xf
+#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
+#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0xf
+#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
+#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define GC_CAC_LKG_AGGR_LOWER__LKG_AGGR_31_0_MASK 0xffffffff
+#define GC_CAC_LKG_AGGR_LOWER__LKG_AGGR_31_0__SHIFT 0x0
+#define GC_CAC_LKG_AGGR_UPPER__LKG_AGGR_63_32_MASK 0xffffffff
+#define GC_CAC_LKG_AGGR_UPPER__LKG_AGGR_63_32__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG1_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG2_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG3_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG4_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG5_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG6_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG6__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG7_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG7__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG8_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG8__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG9_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG9__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG10_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG10__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG11_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG11__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG12_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG12__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG13_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG13__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG14_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG14__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG15_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG15__SHIFT 0x10
+#define GC_CAC_ACC_CU0__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU1__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU2__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU3__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU4__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU5__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU6__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU6__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU7__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU7__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU8__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU8__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU9__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU9__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU10__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU10__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU11__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU11__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU12__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU12__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU13__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU13__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU14__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU14__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU15__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU15__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_OVRD_CU__OVRRD_SELECT_MASK 0xffff
+#define GC_CAC_OVRD_CU__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_CU__OVRRD_VALUE_MASK 0xffff0000
+#define GC_CAC_OVRD_CU__OVRRD_VALUE__SHIFT 0x10
+
+#endif /* SMU_7_1_3_SH_MASK_H */
diff --git a/drivers/gpu/drm/amd/amdgpu/atom-bits.h b/drivers/gpu/drm/amd/include/atom-bits.h
index e8fae5c77514..e8fae5c77514 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom-bits.h
+++ b/drivers/gpu/drm/amd/include/atom-bits.h
diff --git a/drivers/gpu/drm/amd/amdgpu/atom-names.h b/drivers/gpu/drm/amd/include/atom-names.h
index 6f907a5ffa5f..6f907a5ffa5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom-names.h
+++ b/drivers/gpu/drm/amd/include/atom-names.h
diff --git a/drivers/gpu/drm/amd/amdgpu/atom-types.h b/drivers/gpu/drm/amd/include/atom-types.h
index 1125b866cdb0..1125b866cdb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom-types.h
+++ b/drivers/gpu/drm/amd/include/atom-types.h
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index 44c5d4a4d1bf..44c5d4a4d1bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
new file mode 100644
index 000000000000..992dcd8a5c6a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -0,0 +1,624 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _CGS_COMMON_H
+#define _CGS_COMMON_H
+
+#include "amd_shared.h"
+
+/**
+ * enum cgs_gpu_mem_type - GPU memory types
+ */
+enum cgs_gpu_mem_type {
+ CGS_GPU_MEM_TYPE__VISIBLE_FB,
+ CGS_GPU_MEM_TYPE__INVISIBLE_FB,
+ CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
+ CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB,
+ CGS_GPU_MEM_TYPE__GART_CACHEABLE,
+ CGS_GPU_MEM_TYPE__GART_WRITECOMBINE
+};
+
+/**
+ * enum cgs_ind_reg - Indirect register spaces
+ */
+enum cgs_ind_reg {
+ CGS_IND_REG__MMIO,
+ CGS_IND_REG__PCIE,
+ CGS_IND_REG__SMC,
+ CGS_IND_REG__UVD_CTX,
+ CGS_IND_REG__DIDT,
+ CGS_IND_REG__AUDIO_ENDPT
+};
+
+/**
+ * enum cgs_clock - Clocks controlled by the SMU
+ */
+enum cgs_clock {
+ CGS_CLOCK__SCLK,
+ CGS_CLOCK__MCLK,
+ CGS_CLOCK__VCLK,
+ CGS_CLOCK__DCLK,
+ CGS_CLOCK__ECLK,
+ CGS_CLOCK__ACLK,
+ CGS_CLOCK__ICLK,
+ /* ... */
+};
+
+/**
+ * enum cgs_engine - Engines that can be statically power-gated
+ */
+enum cgs_engine {
+ CGS_ENGINE__UVD,
+ CGS_ENGINE__VCE,
+ CGS_ENGINE__VP8,
+ CGS_ENGINE__ACP_DMA,
+ CGS_ENGINE__ACP_DSP0,
+ CGS_ENGINE__ACP_DSP1,
+ CGS_ENGINE__ISP,
+ /* ... */
+};
+
+/**
+ * enum cgs_voltage_planes - Voltage planes for external camera HW
+ */
+enum cgs_voltage_planes {
+ CGS_VOLTAGE_PLANE__SENSOR0,
+ CGS_VOLTAGE_PLANE__SENSOR1,
+ /* ... */
+};
+
+/*
+ * enum cgs_ucode_id - Firmware types for different IPs
+ */
+enum cgs_ucode_id {
+ CGS_UCODE_ID_SMU = 0,
+ CGS_UCODE_ID_SDMA0,
+ CGS_UCODE_ID_SDMA1,
+ CGS_UCODE_ID_CP_CE,
+ CGS_UCODE_ID_CP_PFP,
+ CGS_UCODE_ID_CP_ME,
+ CGS_UCODE_ID_CP_MEC,
+ CGS_UCODE_ID_CP_MEC_JT1,
+ CGS_UCODE_ID_CP_MEC_JT2,
+ CGS_UCODE_ID_GMCON_RENG,
+ CGS_UCODE_ID_RLC_G,
+ CGS_UCODE_ID_MAXIMUM,
+};
+
+/**
+ * struct cgs_clock_limits - Clock limits
+ *
+ * Clocks are specified in 10KHz units.
+ */
+struct cgs_clock_limits {
+ unsigned min; /**< Minimum supported frequency */
+ unsigned max; /**< Maxumim supported frequency */
+ unsigned sustainable; /**< Thermally sustainable frequency */
+};
+
+/**
+ * struct cgs_firmware_info - Firmware information
+ */
+struct cgs_firmware_info {
+ uint16_t version;
+ uint16_t feature_version;
+ uint32_t image_size;
+ uint64_t mc_addr;
+ void *kptr;
+};
+
+typedef unsigned long cgs_handle_t;
+
+/**
+ * cgs_gpu_mem_info() - Return information about memory heaps
+ * @cgs_device: opaque device handle
+ * @type: memory type
+ * @mc_start: Start MC address of the heap (output)
+ * @mc_size: MC address space size (output)
+ * @mem_size: maximum amount of memory available for allocation (output)
+ *
+ * This function returns information about memory heaps. The type
+ * parameter is used to select the memory heap. The mc_start and
+ * mc_size for GART heaps may be bigger than the memory available for
+ * allocation.
+ *
+ * mc_start and mc_size are undefined for non-contiguous FB memory
+ * types, since buffers allocated with these types may or may not be
+ * GART mapped.
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_gpu_mem_info_t)(void *cgs_device, enum cgs_gpu_mem_type type,
+ uint64_t *mc_start, uint64_t *mc_size,
+ uint64_t *mem_size);
+
+/**
+ * cgs_gmap_kmem() - map kernel memory to GART aperture
+ * @cgs_device: opaque device handle
+ * @kmem: pointer to kernel memory
+ * @size: size to map
+ * @min_offset: minimum offset from start of GART aperture
+ * @max_offset: maximum offset from start of GART aperture
+ * @kmem_handle: kernel memory handle (output)
+ * @mcaddr: MC address (output)
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_gmap_kmem_t)(void *cgs_device, void *kmem, uint64_t size,
+ uint64_t min_offset, uint64_t max_offset,
+ cgs_handle_t *kmem_handle, uint64_t *mcaddr);
+
+/**
+ * cgs_gunmap_kmem() - unmap kernel memory
+ * @cgs_device: opaque device handle
+ * @kmem_handle: kernel memory handle returned by gmap_kmem
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_gunmap_kmem_t)(void *cgs_device, cgs_handle_t kmem_handle);
+
+/**
+ * cgs_alloc_gpu_mem() - Allocate GPU memory
+ * @cgs_device: opaque device handle
+ * @type: memory type
+ * @size: size in bytes
+ * @align: alignment in bytes
+ * @min_offset: minimum offset from start of heap
+ * @max_offset: maximum offset from start of heap
+ * @handle: memory handle (output)
+ *
+ * The memory types CGS_GPU_MEM_TYPE_*_CONTIG_FB force contiguous
+ * memory allocation. This guarantees that the MC address returned by
+ * cgs_gmap_gpu_mem is not mapped through the GART. The non-contiguous
+ * FB memory types may be GART mapped depending on memory
+ * fragmentation and memory allocator policies.
+ *
+ * If min/max_offset are non-0, the allocation will be forced to
+ * reside between these offsets in its respective memory heap. The
+ * base address that the offset relates to, depends on the memory
+ * type.
+ *
+ * - CGS_GPU_MEM_TYPE__*_CONTIG_FB: FB MC base address
+ * - CGS_GPU_MEM_TYPE__GART_*: GART aperture base address
+ * - others: undefined, don't use with max_offset
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_alloc_gpu_mem_t)(void *cgs_device, enum cgs_gpu_mem_type type,
+ uint64_t size, uint64_t align,
+ uint64_t min_offset, uint64_t max_offset,
+ cgs_handle_t *handle);
+
+/**
+ * cgs_free_gpu_mem() - Free GPU memory
+ * @cgs_device: opaque device handle
+ * @handle: memory handle returned by alloc or import
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_free_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
+
+/**
+ * cgs_gmap_gpu_mem() - GPU-map GPU memory
+ * @cgs_device: opaque device handle
+ * @handle: memory handle returned by alloc or import
+ * @mcaddr: MC address (output)
+ *
+ * Ensures that a buffer is GPU accessible and returns its MC address.
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_gmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle,
+ uint64_t *mcaddr);
+
+/**
+ * cgs_gunmap_gpu_mem() - GPU-unmap GPU memory
+ * @cgs_device: opaque device handle
+ * @handle: memory handle returned by alloc or import
+ *
+ * Allows the buffer to be migrated while it's not used by the GPU.
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_gunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
+
+/**
+ * cgs_kmap_gpu_mem() - Kernel-map GPU memory
+ *
+ * @cgs_device: opaque device handle
+ * @handle: memory handle returned by alloc or import
+ * @map: Kernel virtual address the memory was mapped to (output)
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_kmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle,
+ void **map);
+
+/**
+ * cgs_kunmap_gpu_mem() - Kernel-unmap GPU memory
+ * @cgs_device: opaque device handle
+ * @handle: memory handle returned by alloc or import
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_kunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
+
+/**
+ * cgs_read_register() - Read an MMIO register
+ * @cgs_device: opaque device handle
+ * @offset: register offset
+ *
+ * Return: register value
+ */
+typedef uint32_t (*cgs_read_register_t)(void *cgs_device, unsigned offset);
+
+/**
+ * cgs_write_register() - Write an MMIO register
+ * @cgs_device: opaque device handle
+ * @offset: register offset
+ * @value: register value
+ */
+typedef void (*cgs_write_register_t)(void *cgs_device, unsigned offset,
+ uint32_t value);
+
+/**
+ * cgs_read_ind_register() - Read an indirect register
+ * @cgs_device: opaque device handle
+ * @offset: register offset
+ *
+ * Return: register value
+ */
+typedef uint32_t (*cgs_read_ind_register_t)(void *cgs_device, enum cgs_ind_reg space,
+ unsigned index);
+
+/**
+ * cgs_write_ind_register() - Write an indirect register
+ * @cgs_device: opaque device handle
+ * @offset: register offset
+ * @value: register value
+ */
+typedef void (*cgs_write_ind_register_t)(void *cgs_device, enum cgs_ind_reg space,
+ unsigned index, uint32_t value);
+
+/**
+ * cgs_read_pci_config_byte() - Read byte from PCI configuration space
+ * @cgs_device: opaque device handle
+ * @addr: address
+ *
+ * Return: Value read
+ */
+typedef uint8_t (*cgs_read_pci_config_byte_t)(void *cgs_device, unsigned addr);
+
+/**
+ * cgs_read_pci_config_word() - Read word from PCI configuration space
+ * @cgs_device: opaque device handle
+ * @addr: address, must be word-aligned
+ *
+ * Return: Value read
+ */
+typedef uint16_t (*cgs_read_pci_config_word_t)(void *cgs_device, unsigned addr);
+
+/**
+ * cgs_read_pci_config_dword() - Read dword from PCI configuration space
+ * @cgs_device: opaque device handle
+ * @addr: address, must be dword-aligned
+ *
+ * Return: Value read
+ */
+typedef uint32_t (*cgs_read_pci_config_dword_t)(void *cgs_device,
+ unsigned addr);
+
+/**
+ * cgs_write_pci_config_byte() - Write byte to PCI configuration space
+ * @cgs_device: opaque device handle
+ * @addr: address
+ * @value: value to write
+ */
+typedef void (*cgs_write_pci_config_byte_t)(void *cgs_device, unsigned addr,
+ uint8_t value);
+
+/**
+ * cgs_write_pci_config_word() - Write byte to PCI configuration space
+ * @cgs_device: opaque device handle
+ * @addr: address, must be word-aligned
+ * @value: value to write
+ */
+typedef void (*cgs_write_pci_config_word_t)(void *cgs_device, unsigned addr,
+ uint16_t value);
+
+/**
+ * cgs_write_pci_config_dword() - Write byte to PCI configuration space
+ * @cgs_device: opaque device handle
+ * @addr: address, must be dword-aligned
+ * @value: value to write
+ */
+typedef void (*cgs_write_pci_config_dword_t)(void *cgs_device, unsigned addr,
+ uint32_t value);
+
+/**
+ * cgs_atom_get_data_table() - Get a pointer to an ATOM BIOS data table
+ * @cgs_device: opaque device handle
+ * @table: data table index
+ * @size: size of the table (output, may be NULL)
+ * @frev: table format revision (output, may be NULL)
+ * @crev: table content revision (output, may be NULL)
+ *
+ * Return: Pointer to start of the table, or NULL on failure
+ */
+typedef const void *(*cgs_atom_get_data_table_t)(
+ void *cgs_device, unsigned table,
+ uint16_t *size, uint8_t *frev, uint8_t *crev);
+
+/**
+ * cgs_atom_get_cmd_table_revs() - Get ATOM BIOS command table revisions
+ * @cgs_device: opaque device handle
+ * @table: data table index
+ * @frev: table format revision (output, may be NULL)
+ * @crev: table content revision (output, may be NULL)
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_atom_get_cmd_table_revs_t)(void *cgs_device, unsigned table,
+ uint8_t *frev, uint8_t *crev);
+
+/**
+ * cgs_atom_exec_cmd_table() - Execute an ATOM BIOS command table
+ * @cgs_device: opaque device handle
+ * @table: command table index
+ * @args: arguments
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_atom_exec_cmd_table_t)(void *cgs_device,
+ unsigned table, void *args);
+
+/**
+ * cgs_create_pm_request() - Create a power management request
+ * @cgs_device: opaque device handle
+ * @request: handle of created PM request (output)
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_create_pm_request_t)(void *cgs_device, cgs_handle_t *request);
+
+/**
+ * cgs_destroy_pm_request() - Destroy a power management request
+ * @cgs_device: opaque device handle
+ * @request: handle of created PM request
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_destroy_pm_request_t)(void *cgs_device, cgs_handle_t request);
+
+/**
+ * cgs_set_pm_request() - Activate or deactiveate a PM request
+ * @cgs_device: opaque device handle
+ * @request: PM request handle
+ * @active: 0 = deactivate, non-0 = activate
+ *
+ * While a PM request is active, its minimum clock requests are taken
+ * into account as the requested engines are powered up. When the
+ * request is inactive, the engines may be powered down and clocks may
+ * be lower, depending on other PM requests by other driver
+ * components.
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_set_pm_request_t)(void *cgs_device, cgs_handle_t request,
+ int active);
+
+/**
+ * cgs_pm_request_clock() - Request a minimum frequency for a specific clock
+ * @cgs_device: opaque device handle
+ * @request: PM request handle
+ * @clock: which clock?
+ * @freq: requested min. frequency in 10KHz units (0 to clear request)
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_pm_request_clock_t)(void *cgs_device, cgs_handle_t request,
+ enum cgs_clock clock, unsigned freq);
+
+/**
+ * cgs_pm_request_engine() - Request an engine to be powered up
+ * @cgs_device: opaque device handle
+ * @request: PM request handle
+ * @engine: which engine?
+ * @powered: 0 = powered down, non-0 = powered up
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_pm_request_engine_t)(void *cgs_device, cgs_handle_t request,
+ enum cgs_engine engine, int powered);
+
+/**
+ * cgs_pm_query_clock_limits() - Query clock frequency limits
+ * @cgs_device: opaque device handle
+ * @clock: which clock?
+ * @limits: clock limits
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_pm_query_clock_limits_t)(void *cgs_device,
+ enum cgs_clock clock,
+ struct cgs_clock_limits *limits);
+
+/**
+ * cgs_set_camera_voltages() - Apply specific voltages to PMIC voltage planes
+ * @cgs_device: opaque device handle
+ * @mask: bitmask of voltages to change (1<<CGS_VOLTAGE_PLANE__xyz|...)
+ * @voltages: pointer to array of voltage values in 1mV units
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_set_camera_voltages_t)(void *cgs_device, uint32_t mask,
+ const uint32_t *voltages);
+/**
+ * cgs_get_firmware_info - Get the firmware information from core driver
+ * @cgs_device: opaque device handle
+ * @type: the firmware type
+ * @info: returend firmware information
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_get_firmware_info)(void *cgs_device,
+ enum cgs_ucode_id type,
+ struct cgs_firmware_info *info);
+
+typedef int(*cgs_set_powergating_state)(void *cgs_device,
+ enum amd_ip_block_type block_type,
+ enum amd_powergating_state state);
+
+typedef int(*cgs_set_clockgating_state)(void *cgs_device,
+ enum amd_ip_block_type block_type,
+ enum amd_clockgating_state state);
+
+struct cgs_ops {
+ /* memory management calls (similar to KFD interface) */
+ cgs_gpu_mem_info_t gpu_mem_info;
+ cgs_gmap_kmem_t gmap_kmem;
+ cgs_gunmap_kmem_t gunmap_kmem;
+ cgs_alloc_gpu_mem_t alloc_gpu_mem;
+ cgs_free_gpu_mem_t free_gpu_mem;
+ cgs_gmap_gpu_mem_t gmap_gpu_mem;
+ cgs_gunmap_gpu_mem_t gunmap_gpu_mem;
+ cgs_kmap_gpu_mem_t kmap_gpu_mem;
+ cgs_kunmap_gpu_mem_t kunmap_gpu_mem;
+ /* MMIO access */
+ cgs_read_register_t read_register;
+ cgs_write_register_t write_register;
+ cgs_read_ind_register_t read_ind_register;
+ cgs_write_ind_register_t write_ind_register;
+ /* PCI configuration space access */
+ cgs_read_pci_config_byte_t read_pci_config_byte;
+ cgs_read_pci_config_word_t read_pci_config_word;
+ cgs_read_pci_config_dword_t read_pci_config_dword;
+ cgs_write_pci_config_byte_t write_pci_config_byte;
+ cgs_write_pci_config_word_t write_pci_config_word;
+ cgs_write_pci_config_dword_t write_pci_config_dword;
+ /* ATOM BIOS */
+ cgs_atom_get_data_table_t atom_get_data_table;
+ cgs_atom_get_cmd_table_revs_t atom_get_cmd_table_revs;
+ cgs_atom_exec_cmd_table_t atom_exec_cmd_table;
+ /* Power management */
+ cgs_create_pm_request_t create_pm_request;
+ cgs_destroy_pm_request_t destroy_pm_request;
+ cgs_set_pm_request_t set_pm_request;
+ cgs_pm_request_clock_t pm_request_clock;
+ cgs_pm_request_engine_t pm_request_engine;
+ cgs_pm_query_clock_limits_t pm_query_clock_limits;
+ cgs_set_camera_voltages_t set_camera_voltages;
+ /* Firmware Info */
+ cgs_get_firmware_info get_firmware_info;
+ /* cg pg interface*/
+ cgs_set_powergating_state set_powergating_state;
+ cgs_set_clockgating_state set_clockgating_state;
+ /* ACPI (TODO) */
+};
+
+struct cgs_os_ops; /* To be define in OS-specific CGS header */
+
+struct cgs_device
+{
+ const struct cgs_ops *ops;
+ const struct cgs_os_ops *os_ops;
+ /* to be embedded at the start of driver private structure */
+};
+
+/* Convenience macros that make CGS indirect function calls look like
+ * normal function calls */
+#define CGS_CALL(func,dev,...) \
+ (((struct cgs_device *)dev)->ops->func(dev, ##__VA_ARGS__))
+#define CGS_OS_CALL(func,dev,...) \
+ (((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__))
+
+#define cgs_gpu_mem_info(dev,type,mc_start,mc_size,mem_size) \
+ CGS_CALL(gpu_mem_info,dev,type,mc_start,mc_size,mem_size)
+#define cgs_gmap_kmem(dev,kmem,size,min_off,max_off,kmem_handle,mcaddr) \
+ CGS_CALL(gmap_kmem,dev,kmem,size,min_off,max_off,kmem_handle,mcaddr)
+#define cgs_gunmap_kmem(dev,kmem_handle) \
+ CGS_CALL(gunmap_kmem,dev,keme_handle)
+#define cgs_alloc_gpu_mem(dev,type,size,align,min_off,max_off,handle) \
+ CGS_CALL(alloc_gpu_mem,dev,type,size,align,min_off,max_off,handle)
+#define cgs_free_gpu_mem(dev,handle) \
+ CGS_CALL(free_gpu_mem,dev,handle)
+#define cgs_gmap_gpu_mem(dev,handle,mcaddr) \
+ CGS_CALL(gmap_gpu_mem,dev,handle,mcaddr)
+#define cgs_gunmap_gpu_mem(dev,handle) \
+ CGS_CALL(gunmap_gpu_mem,dev,handle)
+#define cgs_kmap_gpu_mem(dev,handle,map) \
+ CGS_CALL(kmap_gpu_mem,dev,handle,map)
+#define cgs_kunmap_gpu_mem(dev,handle) \
+ CGS_CALL(kunmap_gpu_mem,dev,handle)
+
+#define cgs_read_register(dev,offset) \
+ CGS_CALL(read_register,dev,offset)
+#define cgs_write_register(dev,offset,value) \
+ CGS_CALL(write_register,dev,offset,value)
+#define cgs_read_ind_register(dev,space,index) \
+ CGS_CALL(read_ind_register,dev,space,index)
+#define cgs_write_ind_register(dev,space,index,value) \
+ CGS_CALL(write_ind_register,dev,space,index,value)
+
+#define cgs_read_pci_config_byte(dev,addr) \
+ CGS_CALL(read_pci_config_byte,dev,addr)
+#define cgs_read_pci_config_word(dev,addr) \
+ CGS_CALL(read_pci_config_word,dev,addr)
+#define cgs_read_pci_config_dword(dev,addr) \
+ CGS_CALL(read_pci_config_dword,dev,addr)
+#define cgs_write_pci_config_byte(dev,addr,value) \
+ CGS_CALL(write_pci_config_byte,dev,addr,value)
+#define cgs_write_pci_config_word(dev,addr,value) \
+ CGS_CALL(write_pci_config_word,dev,addr,value)
+#define cgs_write_pci_config_dword(dev,addr,value) \
+ CGS_CALL(write_pci_config_dword,dev,addr,value)
+
+#define cgs_atom_get_data_table(dev,table,size,frev,crev) \
+ CGS_CALL(atom_get_data_table,dev,table,size,frev,crev)
+#define cgs_atom_get_cmd_table_revs(dev,table,frev,crev) \
+ CGS_CALL(atom_get_cmd_table_revs,dev,table,frev,crev)
+#define cgs_atom_exec_cmd_table(dev,table,args) \
+ CGS_CALL(atom_exec_cmd_table,dev,table,args)
+
+#define cgs_create_pm_request(dev,request) \
+ CGS_CALL(create_pm_request,dev,request)
+#define cgs_destroy_pm_request(dev,request) \
+ CGS_CALL(destroy_pm_request,dev,request)
+#define cgs_set_pm_request(dev,request,active) \
+ CGS_CALL(set_pm_request,dev,request,active)
+#define cgs_pm_request_clock(dev,request,clock,freq) \
+ CGS_CALL(pm_request_clock,dev,request,clock,freq)
+#define cgs_pm_request_engine(dev,request,engine,powered) \
+ CGS_CALL(pm_request_engine,dev,request,engine,powered)
+#define cgs_pm_query_clock_limits(dev,clock,limits) \
+ CGS_CALL(pm_query_clock_limits,dev,clock,limits)
+#define cgs_set_camera_voltages(dev,mask,voltages) \
+ CGS_CALL(set_camera_voltages,dev,mask,voltages)
+#define cgs_get_firmware_info(dev, type, info) \
+ CGS_CALL(get_firmware_info, dev, type, info)
+#define cgs_set_powergating_state(dev, block_type, state) \
+ CGS_CALL(set_powergating_state, dev, block_type, state)
+#define cgs_set_clockgating_state(dev, block_type, state) \
+ CGS_CALL(set_clockgating_state, dev, block_type, state)
+
+#endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h
new file mode 100644
index 000000000000..488642f08267
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/cgs_linux.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _CGS_LINUX_H
+#define _CGS_LINUX_H
+
+#include "cgs_common.h"
+
+/**
+ * cgs_import_gpu_mem() - Import dmabuf handle
+ * @cgs_device: opaque device handle
+ * @dmabuf_fd: DMABuf file descriptor
+ * @handle: memory handle (output)
+ *
+ * Must be called in the process context that dmabuf_fd belongs to.
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd,
+ cgs_handle_t *handle);
+
+/**
+ * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources
+ * @private_data: private data provided to cgs_add_irq_source
+ * @src_id: interrupt source ID
+ * @type: interrupt type
+ * @enabled: 0 = disable source, non-0 = enable source
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_irq_source_set_func_t)(void *private_data,
+ unsigned src_id, unsigned type,
+ int enabled);
+
+/**
+ * cgs_irq_handler_func() - Interrupt handler callback
+ * @private_data: private data provided to cgs_add_irq_source
+ * @src_id: interrupt source ID
+ * @iv_entry: pointer to raw ih ring entry
+ *
+ * This callback runs in interrupt context.
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_irq_handler_func_t)(void *private_data,
+ unsigned src_id, const uint32_t *iv_entry);
+
+/**
+ * cgs_add_irq_source() - Add an IRQ source
+ * @cgs_device: opaque device handle
+ * @src_id: interrupt source ID
+ * @num_types: number of interrupt types that can be independently enabled
+ * @set: callback function to enable/disable an interrupt type
+ * @handler: interrupt handler callback
+ * @private_data: private data to pass to callback functions
+ *
+ * The same IRQ source can be added only once. Adding an IRQ source
+ * indicates ownership of that IRQ source and all its IRQ types.
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_add_irq_source_t)(void *cgs_device, unsigned src_id,
+ unsigned num_types,
+ cgs_irq_source_set_func_t set,
+ cgs_irq_handler_func_t handler,
+ void *private_data);
+
+/**
+ * cgs_irq_get() - Request enabling an IRQ source and type
+ * @cgs_device: opaque device handle
+ * @src_id: interrupt source ID
+ * @type: interrupt type
+ *
+ * cgs_irq_get and cgs_irq_put calls must be balanced. They count
+ * "references" to IRQ sources.
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type);
+
+/**
+ * cgs_irq_put() - Indicate IRQ source is no longer needed
+ * @cgs_device: opaque device handle
+ * @src_id: interrupt source ID
+ * @type: interrupt type
+ *
+ * cgs_irq_get and cgs_irq_put calls must be balanced. They count
+ * "references" to IRQ sources. Even after cgs_irq_put is called, the
+ * IRQ handler may still be called if there are more refecences to
+ * the IRQ source.
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type);
+
+struct cgs_os_ops {
+ cgs_import_gpu_mem_t import_gpu_mem;
+
+ /* IRQ handling */
+ cgs_add_irq_source_t add_irq_source;
+ cgs_irq_get_t irq_get;
+ cgs_irq_put_t irq_put;
+};
+
+#define cgs_import_gpu_mem(dev,dmabuf_fd,handle) \
+ CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle)
+#define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \
+ CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \
+ private_data)
+#define cgs_irq_get(dev,src_id,type) \
+ CGS_OS_CALL(irq_get,dev,src_id,type)
+#define cgs_irq_put(dev,src_id,type) \
+ CGS_OS_CALL(irq_put,dev,src_id,type)
+
+#endif /* _CGS_LINUX_H */
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 9080daa116b6..888250b33ea8 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -52,7 +52,8 @@ enum kgd_engine_type {
KGD_ENGINE_MEC1,
KGD_ENGINE_MEC2,
KGD_ENGINE_RLC,
- KGD_ENGINE_SDMA,
+ KGD_ENGINE_SDMA1,
+ KGD_ENGINE_SDMA2,
KGD_ENGINE_MAX
};
diff --git a/drivers/gpu/drm/amd/amdgpu/pptable.h b/drivers/gpu/drm/amd/include/pptable.h
index 0030f726e68c..ee6978b30b77 100644
--- a/drivers/gpu/drm/amd/amdgpu/pptable.h
+++ b/drivers/gpu/drm/amd/include/pptable.h
@@ -146,6 +146,9 @@ typedef struct _ATOM_PPLIB_EXTENDEDHEADER
#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE 0x00200000 // Does the driver supports VR HOT GPIO Configurable.
#define ATOM_PP_PLATFORM_CAP_TEMP_INVERSION 0x00400000 // Does the driver supports Temp Inversion feature.
#define ATOM_PP_PLATFORM_CAP_EVV 0x00800000
+#define ATOM_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL 0x01000000
+#define ATOM_PP_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE 0x02000000
+#define ATOM_PP_PLATFORM_CAP_DISABLE_USING_ACTUAL_TEMPERATURE_FOR_POWER_CALC 0x04000000
typedef struct _ATOM_PPLIB_POWERPLAYTABLE
{
@@ -673,7 +676,8 @@ typedef struct _ATOM_PPLIB_POWERTUNE_Table_V1
UCHAR revid;
ATOM_PowerTune_Table power_tune_table;
USHORT usMaximumPowerDeliveryLimit;
- USHORT usReserve[7];
+ USHORT usTjMax;
+ USHORT usReserve[6];
} ATOM_PPLIB_POWERTUNE_Table_V1;
#define ATOM_PPM_A_A 1
diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h
new file mode 100644
index 000000000000..65cfacd7a66c
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/vi_structs.h
@@ -0,0 +1,417 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef VI_STRUCTS_H_
+#define VI_STRUCTS_H_
+
+struct vi_sdma_mqd {
+ uint32_t sdmax_rlcx_rb_cntl;
+ uint32_t sdmax_rlcx_rb_base;
+ uint32_t sdmax_rlcx_rb_base_hi;
+ uint32_t sdmax_rlcx_rb_rptr;
+ uint32_t sdmax_rlcx_rb_wptr;
+ uint32_t sdmax_rlcx_rb_wptr_poll_cntl;
+ uint32_t sdmax_rlcx_rb_wptr_poll_addr_hi;
+ uint32_t sdmax_rlcx_rb_wptr_poll_addr_lo;
+ uint32_t sdmax_rlcx_rb_rptr_addr_hi;
+ uint32_t sdmax_rlcx_rb_rptr_addr_lo;
+ uint32_t sdmax_rlcx_ib_cntl;
+ uint32_t sdmax_rlcx_ib_rptr;
+ uint32_t sdmax_rlcx_ib_offset;
+ uint32_t sdmax_rlcx_ib_base_lo;
+ uint32_t sdmax_rlcx_ib_base_hi;
+ uint32_t sdmax_rlcx_ib_size;
+ uint32_t sdmax_rlcx_skip_cntl;
+ uint32_t sdmax_rlcx_context_status;
+ uint32_t sdmax_rlcx_doorbell;
+ uint32_t sdmax_rlcx_virtual_addr;
+ uint32_t sdmax_rlcx_ape1_cntl;
+ uint32_t sdmax_rlcx_doorbell_log;
+ uint32_t reserved_22;
+ uint32_t reserved_23;
+ uint32_t reserved_24;
+ uint32_t reserved_25;
+ uint32_t reserved_26;
+ uint32_t reserved_27;
+ uint32_t reserved_28;
+ uint32_t reserved_29;
+ uint32_t reserved_30;
+ uint32_t reserved_31;
+ uint32_t reserved_32;
+ uint32_t reserved_33;
+ uint32_t reserved_34;
+ uint32_t reserved_35;
+ uint32_t reserved_36;
+ uint32_t reserved_37;
+ uint32_t reserved_38;
+ uint32_t reserved_39;
+ uint32_t reserved_40;
+ uint32_t reserved_41;
+ uint32_t reserved_42;
+ uint32_t reserved_43;
+ uint32_t reserved_44;
+ uint32_t reserved_45;
+ uint32_t reserved_46;
+ uint32_t reserved_47;
+ uint32_t reserved_48;
+ uint32_t reserved_49;
+ uint32_t reserved_50;
+ uint32_t reserved_51;
+ uint32_t reserved_52;
+ uint32_t reserved_53;
+ uint32_t reserved_54;
+ uint32_t reserved_55;
+ uint32_t reserved_56;
+ uint32_t reserved_57;
+ uint32_t reserved_58;
+ uint32_t reserved_59;
+ uint32_t reserved_60;
+ uint32_t reserved_61;
+ uint32_t reserved_62;
+ uint32_t reserved_63;
+ uint32_t reserved_64;
+ uint32_t reserved_65;
+ uint32_t reserved_66;
+ uint32_t reserved_67;
+ uint32_t reserved_68;
+ uint32_t reserved_69;
+ uint32_t reserved_70;
+ uint32_t reserved_71;
+ uint32_t reserved_72;
+ uint32_t reserved_73;
+ uint32_t reserved_74;
+ uint32_t reserved_75;
+ uint32_t reserved_76;
+ uint32_t reserved_77;
+ uint32_t reserved_78;
+ uint32_t reserved_79;
+ uint32_t reserved_80;
+ uint32_t reserved_81;
+ uint32_t reserved_82;
+ uint32_t reserved_83;
+ uint32_t reserved_84;
+ uint32_t reserved_85;
+ uint32_t reserved_86;
+ uint32_t reserved_87;
+ uint32_t reserved_88;
+ uint32_t reserved_89;
+ uint32_t reserved_90;
+ uint32_t reserved_91;
+ uint32_t reserved_92;
+ uint32_t reserved_93;
+ uint32_t reserved_94;
+ uint32_t reserved_95;
+ uint32_t reserved_96;
+ uint32_t reserved_97;
+ uint32_t reserved_98;
+ uint32_t reserved_99;
+ uint32_t reserved_100;
+ uint32_t reserved_101;
+ uint32_t reserved_102;
+ uint32_t reserved_103;
+ uint32_t reserved_104;
+ uint32_t reserved_105;
+ uint32_t reserved_106;
+ uint32_t reserved_107;
+ uint32_t reserved_108;
+ uint32_t reserved_109;
+ uint32_t reserved_110;
+ uint32_t reserved_111;
+ uint32_t reserved_112;
+ uint32_t reserved_113;
+ uint32_t reserved_114;
+ uint32_t reserved_115;
+ uint32_t reserved_116;
+ uint32_t reserved_117;
+ uint32_t reserved_118;
+ uint32_t reserved_119;
+ uint32_t reserved_120;
+ uint32_t reserved_121;
+ uint32_t reserved_122;
+ uint32_t reserved_123;
+ uint32_t reserved_124;
+ uint32_t reserved_125;
+ uint32_t reserved_126;
+ uint32_t reserved_127;
+};
+
+struct vi_mqd {
+ uint32_t header;
+ uint32_t compute_dispatch_initiator;
+ uint32_t compute_dim_x;
+ uint32_t compute_dim_y;
+ uint32_t compute_dim_z;
+ uint32_t compute_start_x;
+ uint32_t compute_start_y;
+ uint32_t compute_start_z;
+ uint32_t compute_num_thread_x;
+ uint32_t compute_num_thread_y;
+ uint32_t compute_num_thread_z;
+ uint32_t compute_pipelinestat_enable;
+ uint32_t compute_perfcount_enable;
+ uint32_t compute_pgm_lo;
+ uint32_t compute_pgm_hi;
+ uint32_t compute_tba_lo;
+ uint32_t compute_tba_hi;
+ uint32_t compute_tma_lo;
+ uint32_t compute_tma_hi;
+ uint32_t compute_pgm_rsrc1;
+ uint32_t compute_pgm_rsrc2;
+ uint32_t compute_vmid;
+ uint32_t compute_resource_limits;
+ uint32_t compute_static_thread_mgmt_se0;
+ uint32_t compute_static_thread_mgmt_se1;
+ uint32_t compute_tmpring_size;
+ uint32_t compute_static_thread_mgmt_se2;
+ uint32_t compute_static_thread_mgmt_se3;
+ uint32_t compute_restart_x;
+ uint32_t compute_restart_y;
+ uint32_t compute_restart_z;
+ uint32_t compute_thread_trace_enable;
+ uint32_t compute_misc_reserved;
+ uint32_t compute_dispatch_id;
+ uint32_t compute_threadgroup_id;
+ uint32_t compute_relaunch;
+ uint32_t compute_wave_restore_addr_lo;
+ uint32_t compute_wave_restore_addr_hi;
+ uint32_t compute_wave_restore_control;
+ uint32_t reserved_39;
+ uint32_t reserved_40;
+ uint32_t reserved_41;
+ uint32_t reserved_42;
+ uint32_t reserved_43;
+ uint32_t reserved_44;
+ uint32_t reserved_45;
+ uint32_t reserved_46;
+ uint32_t reserved_47;
+ uint32_t reserved_48;
+ uint32_t reserved_49;
+ uint32_t reserved_50;
+ uint32_t reserved_51;
+ uint32_t reserved_52;
+ uint32_t reserved_53;
+ uint32_t reserved_54;
+ uint32_t reserved_55;
+ uint32_t reserved_56;
+ uint32_t reserved_57;
+ uint32_t reserved_58;
+ uint32_t reserved_59;
+ uint32_t reserved_60;
+ uint32_t reserved_61;
+ uint32_t reserved_62;
+ uint32_t reserved_63;
+ uint32_t reserved_64;
+ uint32_t compute_user_data_0;
+ uint32_t compute_user_data_1;
+ uint32_t compute_user_data_2;
+ uint32_t compute_user_data_3;
+ uint32_t compute_user_data_4;
+ uint32_t compute_user_data_5;
+ uint32_t compute_user_data_6;
+ uint32_t compute_user_data_7;
+ uint32_t compute_user_data_8;
+ uint32_t compute_user_data_9;
+ uint32_t compute_user_data_10;
+ uint32_t compute_user_data_11;
+ uint32_t compute_user_data_12;
+ uint32_t compute_user_data_13;
+ uint32_t compute_user_data_14;
+ uint32_t compute_user_data_15;
+ uint32_t cp_compute_csinvoc_count_lo;
+ uint32_t cp_compute_csinvoc_count_hi;
+ uint32_t reserved_83;
+ uint32_t reserved_84;
+ uint32_t reserved_85;
+ uint32_t cp_mqd_query_time_lo;
+ uint32_t cp_mqd_query_time_hi;
+ uint32_t cp_mqd_connect_start_time_lo;
+ uint32_t cp_mqd_connect_start_time_hi;
+ uint32_t cp_mqd_connect_end_time_lo;
+ uint32_t cp_mqd_connect_end_time_hi;
+ uint32_t cp_mqd_connect_end_wf_count;
+ uint32_t cp_mqd_connect_end_pq_rptr;
+ uint32_t cp_mqd_connect_end_pq_wptr;
+ uint32_t cp_mqd_connect_end_ib_rptr;
+ uint32_t reserved_96;
+ uint32_t reserved_97;
+ uint32_t cp_mqd_save_start_time_lo;
+ uint32_t cp_mqd_save_start_time_hi;
+ uint32_t cp_mqd_save_end_time_lo;
+ uint32_t cp_mqd_save_end_time_hi;
+ uint32_t cp_mqd_restore_start_time_lo;
+ uint32_t cp_mqd_restore_start_time_hi;
+ uint32_t cp_mqd_restore_end_time_lo;
+ uint32_t cp_mqd_restore_end_time_hi;
+ uint32_t reserved_106;
+ uint32_t reserved_107;
+ uint32_t gds_cs_ctxsw_cnt0;
+ uint32_t gds_cs_ctxsw_cnt1;
+ uint32_t gds_cs_ctxsw_cnt2;
+ uint32_t gds_cs_ctxsw_cnt3;
+ uint32_t reserved_112;
+ uint32_t reserved_113;
+ uint32_t cp_pq_exe_status_lo;
+ uint32_t cp_pq_exe_status_hi;
+ uint32_t cp_packet_id_lo;
+ uint32_t cp_packet_id_hi;
+ uint32_t cp_packet_exe_status_lo;
+ uint32_t cp_packet_exe_status_hi;
+ uint32_t gds_save_base_addr_lo;
+ uint32_t gds_save_base_addr_hi;
+ uint32_t gds_save_mask_lo;
+ uint32_t gds_save_mask_hi;
+ uint32_t ctx_save_base_addr_lo;
+ uint32_t ctx_save_base_addr_hi;
+ uint32_t reserved_126;
+ uint32_t reserved_127;
+ uint32_t cp_mqd_base_addr_lo;
+ uint32_t cp_mqd_base_addr_hi;
+ uint32_t cp_hqd_active;
+ uint32_t cp_hqd_vmid;
+ uint32_t cp_hqd_persistent_state;
+ uint32_t cp_hqd_pipe_priority;
+ uint32_t cp_hqd_queue_priority;
+ uint32_t cp_hqd_quantum;
+ uint32_t cp_hqd_pq_base_lo;
+ uint32_t cp_hqd_pq_base_hi;
+ uint32_t cp_hqd_pq_rptr;
+ uint32_t cp_hqd_pq_rptr_report_addr_lo;
+ uint32_t cp_hqd_pq_rptr_report_addr_hi;
+ uint32_t cp_hqd_pq_wptr_poll_addr_lo;
+ uint32_t cp_hqd_pq_wptr_poll_addr_hi;
+ uint32_t cp_hqd_pq_doorbell_control;
+ uint32_t cp_hqd_pq_wptr;
+ uint32_t cp_hqd_pq_control;
+ uint32_t cp_hqd_ib_base_addr_lo;
+ uint32_t cp_hqd_ib_base_addr_hi;
+ uint32_t cp_hqd_ib_rptr;
+ uint32_t cp_hqd_ib_control;
+ uint32_t cp_hqd_iq_timer;
+ uint32_t cp_hqd_iq_rptr;
+ uint32_t cp_hqd_dequeue_request;
+ uint32_t cp_hqd_dma_offload;
+ uint32_t cp_hqd_sema_cmd;
+ uint32_t cp_hqd_msg_type;
+ uint32_t cp_hqd_atomic0_preop_lo;
+ uint32_t cp_hqd_atomic0_preop_hi;
+ uint32_t cp_hqd_atomic1_preop_lo;
+ uint32_t cp_hqd_atomic1_preop_hi;
+ uint32_t cp_hqd_hq_status0;
+ uint32_t cp_hqd_hq_control0;
+ uint32_t cp_mqd_control;
+ uint32_t cp_hqd_hq_status1;
+ uint32_t cp_hqd_hq_control1;
+ uint32_t cp_hqd_eop_base_addr_lo;
+ uint32_t cp_hqd_eop_base_addr_hi;
+ uint32_t cp_hqd_eop_control;
+ uint32_t cp_hqd_eop_rptr;
+ uint32_t cp_hqd_eop_wptr;
+ uint32_t cp_hqd_eop_done_events;
+ uint32_t cp_hqd_ctx_save_base_addr_lo;
+ uint32_t cp_hqd_ctx_save_base_addr_hi;
+ uint32_t cp_hqd_ctx_save_control;
+ uint32_t cp_hqd_cntl_stack_offset;
+ uint32_t cp_hqd_cntl_stack_size;
+ uint32_t cp_hqd_wg_state_offset;
+ uint32_t cp_hqd_ctx_save_size;
+ uint32_t cp_hqd_gds_resource_state;
+ uint32_t cp_hqd_error;
+ uint32_t cp_hqd_eop_wptr_mem;
+ uint32_t cp_hqd_eop_dones;
+ uint32_t reserved_182;
+ uint32_t reserved_183;
+ uint32_t reserved_184;
+ uint32_t reserved_185;
+ uint32_t reserved_186;
+ uint32_t reserved_187;
+ uint32_t reserved_188;
+ uint32_t reserved_189;
+ uint32_t reserved_190;
+ uint32_t reserved_191;
+ uint32_t iqtimer_pkt_header;
+ uint32_t iqtimer_pkt_dw0;
+ uint32_t iqtimer_pkt_dw1;
+ uint32_t iqtimer_pkt_dw2;
+ uint32_t iqtimer_pkt_dw3;
+ uint32_t iqtimer_pkt_dw4;
+ uint32_t iqtimer_pkt_dw5;
+ uint32_t iqtimer_pkt_dw6;
+ uint32_t iqtimer_pkt_dw7;
+ uint32_t iqtimer_pkt_dw8;
+ uint32_t iqtimer_pkt_dw9;
+ uint32_t iqtimer_pkt_dw10;
+ uint32_t iqtimer_pkt_dw11;
+ uint32_t iqtimer_pkt_dw12;
+ uint32_t iqtimer_pkt_dw13;
+ uint32_t iqtimer_pkt_dw14;
+ uint32_t iqtimer_pkt_dw15;
+ uint32_t iqtimer_pkt_dw16;
+ uint32_t iqtimer_pkt_dw17;
+ uint32_t iqtimer_pkt_dw18;
+ uint32_t iqtimer_pkt_dw19;
+ uint32_t iqtimer_pkt_dw20;
+ uint32_t iqtimer_pkt_dw21;
+ uint32_t iqtimer_pkt_dw22;
+ uint32_t iqtimer_pkt_dw23;
+ uint32_t iqtimer_pkt_dw24;
+ uint32_t iqtimer_pkt_dw25;
+ uint32_t iqtimer_pkt_dw26;
+ uint32_t iqtimer_pkt_dw27;
+ uint32_t iqtimer_pkt_dw28;
+ uint32_t iqtimer_pkt_dw29;
+ uint32_t iqtimer_pkt_dw30;
+ uint32_t iqtimer_pkt_dw31;
+ uint32_t reserved_225;
+ uint32_t reserved_226;
+ uint32_t reserved_227;
+ uint32_t set_resources_header;
+ uint32_t set_resources_dw1;
+ uint32_t set_resources_dw2;
+ uint32_t set_resources_dw3;
+ uint32_t set_resources_dw4;
+ uint32_t set_resources_dw5;
+ uint32_t set_resources_dw6;
+ uint32_t set_resources_dw7;
+ uint32_t reserved_236;
+ uint32_t reserved_237;
+ uint32_t reserved_238;
+ uint32_t reserved_239;
+ uint32_t queue_doorbell_id0;
+ uint32_t queue_doorbell_id1;
+ uint32_t queue_doorbell_id2;
+ uint32_t queue_doorbell_id3;
+ uint32_t queue_doorbell_id4;
+ uint32_t queue_doorbell_id5;
+ uint32_t queue_doorbell_id6;
+ uint32_t queue_doorbell_id7;
+ uint32_t queue_doorbell_id8;
+ uint32_t queue_doorbell_id9;
+ uint32_t queue_doorbell_id10;
+ uint32_t queue_doorbell_id11;
+ uint32_t queue_doorbell_id12;
+ uint32_t queue_doorbell_id13;
+ uint32_t queue_doorbell_id14;
+ uint32_t queue_doorbell_id15;
+};
+
+#endif /* VI_STRUCTS_H_ */
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
new file mode 100644
index 000000000000..9259f1b6664c
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -0,0 +1,424 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <drm/drmP.h>
+#include "gpu_scheduler.h"
+
+static struct amd_sched_job *
+amd_sched_entity_pop_job(struct amd_sched_entity *entity);
+static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
+
+/* Initialize a given run queue struct */
+static void amd_sched_rq_init(struct amd_sched_rq *rq)
+{
+ spin_lock_init(&rq->lock);
+ INIT_LIST_HEAD(&rq->entities);
+ rq->current_entity = NULL;
+}
+
+static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
+ struct amd_sched_entity *entity)
+{
+ spin_lock(&rq->lock);
+ list_add_tail(&entity->list, &rq->entities);
+ spin_unlock(&rq->lock);
+}
+
+static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
+ struct amd_sched_entity *entity)
+{
+ spin_lock(&rq->lock);
+ list_del_init(&entity->list);
+ if (rq->current_entity == entity)
+ rq->current_entity = NULL;
+ spin_unlock(&rq->lock);
+}
+
+/**
+ * Select next job from a specified run queue with round robin policy.
+ * Return NULL if nothing available.
+ */
+static struct amd_sched_job *
+amd_sched_rq_select_job(struct amd_sched_rq *rq)
+{
+ struct amd_sched_entity *entity;
+ struct amd_sched_job *job;
+
+ spin_lock(&rq->lock);
+
+ entity = rq->current_entity;
+ if (entity) {
+ list_for_each_entry_continue(entity, &rq->entities, list) {
+ job = amd_sched_entity_pop_job(entity);
+ if (job) {
+ rq->current_entity = entity;
+ spin_unlock(&rq->lock);
+ return job;
+ }
+ }
+ }
+
+ list_for_each_entry(entity, &rq->entities, list) {
+
+ job = amd_sched_entity_pop_job(entity);
+ if (job) {
+ rq->current_entity = entity;
+ spin_unlock(&rq->lock);
+ return job;
+ }
+
+ if (entity == rq->current_entity)
+ break;
+ }
+
+ spin_unlock(&rq->lock);
+
+ return NULL;
+}
+
+/**
+ * Init a context entity used by scheduler when submit to HW ring.
+ *
+ * @sched The pointer to the scheduler
+ * @entity The pointer to a valid amd_sched_entity
+ * @rq The run queue this entity belongs
+ * @kernel If this is an entity for the kernel
+ * @jobs The max number of jobs in the job queue
+ *
+ * return 0 if succeed. negative error code on failure
+*/
+int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity,
+ struct amd_sched_rq *rq,
+ uint32_t jobs)
+{
+ if (!(sched && entity && rq))
+ return -EINVAL;
+
+ memset(entity, 0, sizeof(struct amd_sched_entity));
+ entity->belongto_rq = rq;
+ entity->scheduler = sched;
+ entity->fence_context = fence_context_alloc(1);
+ if(kfifo_alloc(&entity->job_queue,
+ jobs * sizeof(void *),
+ GFP_KERNEL))
+ return -EINVAL;
+
+ spin_lock_init(&entity->queue_lock);
+ atomic_set(&entity->fence_seq, 0);
+
+ /* Add the entity to the run queue */
+ amd_sched_rq_add_entity(rq, entity);
+ return 0;
+}
+
+/**
+ * Query if entity is initialized
+ *
+ * @sched Pointer to scheduler instance
+ * @entity The pointer to a valid scheduler entity
+ *
+ * return true if entity is initialized, false otherwise
+*/
+static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity)
+{
+ return entity->scheduler == sched &&
+ entity->belongto_rq != NULL;
+}
+
+/**
+ * Check if entity is idle
+ *
+ * @entity The pointer to a valid scheduler entity
+ *
+ * Return true if entity don't has any unscheduled jobs.
+ */
+static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
+{
+ rmb();
+ if (kfifo_is_empty(&entity->job_queue))
+ return true;
+
+ return false;
+}
+
+/**
+ * Destroy a context entity
+ *
+ * @sched Pointer to scheduler instance
+ * @entity The pointer to a valid scheduler entity
+ *
+ * Cleanup and free the allocated resources.
+ */
+void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity)
+{
+ struct amd_sched_rq *rq = entity->belongto_rq;
+
+ if (!amd_sched_entity_is_initialized(sched, entity))
+ return;
+
+ /**
+ * The client will not queue more IBs during this fini, consume existing
+ * queued IBs
+ */
+ wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
+
+ amd_sched_rq_remove_entity(rq, entity);
+ kfifo_free(&entity->job_queue);
+}
+
+static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
+{
+ struct amd_sched_entity *entity =
+ container_of(cb, struct amd_sched_entity, cb);
+ entity->dependency = NULL;
+ fence_put(f);
+ amd_sched_wakeup(entity->scheduler);
+}
+
+static struct amd_sched_job *
+amd_sched_entity_pop_job(struct amd_sched_entity *entity)
+{
+ struct amd_gpu_scheduler *sched = entity->scheduler;
+ struct amd_sched_job *job;
+
+ if (ACCESS_ONCE(entity->dependency))
+ return NULL;
+
+ if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job)))
+ return NULL;
+
+ while ((entity->dependency = sched->ops->dependency(job))) {
+
+ if (fence_add_callback(entity->dependency, &entity->cb,
+ amd_sched_entity_wakeup))
+ fence_put(entity->dependency);
+ else
+ return NULL;
+ }
+
+ return job;
+}
+
+/**
+ * Helper to submit a job to the job queue
+ *
+ * @job The pointer to job required to submit
+ *
+ * Returns true if we could submit the job.
+ */
+static bool amd_sched_entity_in(struct amd_sched_job *job)
+{
+ struct amd_sched_entity *entity = job->s_entity;
+ bool added, first = false;
+
+ spin_lock(&entity->queue_lock);
+ added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job);
+
+ if (added && kfifo_len(&entity->job_queue) == sizeof(job))
+ first = true;
+
+ spin_unlock(&entity->queue_lock);
+
+ /* first job wakes up scheduler */
+ if (first)
+ amd_sched_wakeup(job->sched);
+
+ return added;
+}
+
+/**
+ * Submit a job to the job queue
+ *
+ * @job The pointer to job required to submit
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
+{
+ struct amd_sched_entity *entity = sched_job->s_entity;
+ struct amd_sched_fence *fence = amd_sched_fence_create(
+ entity, sched_job->owner);
+
+ if (!fence)
+ return -ENOMEM;
+
+ fence_get(&fence->base);
+ sched_job->s_fence = fence;
+
+ wait_event(entity->scheduler->job_scheduled,
+ amd_sched_entity_in(sched_job));
+
+ return 0;
+}
+
+/**
+ * Return ture if we can push more jobs to the hw.
+ */
+static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
+{
+ return atomic_read(&sched->hw_rq_count) <
+ sched->hw_submission_limit;
+}
+
+/**
+ * Wake up the scheduler when it is ready
+ */
+static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
+{
+ if (amd_sched_ready(sched))
+ wake_up_interruptible(&sched->wake_up_worker);
+}
+
+/**
+ * Select next to run
+*/
+static struct amd_sched_job *
+amd_sched_select_job(struct amd_gpu_scheduler *sched)
+{
+ struct amd_sched_job *job;
+
+ if (!amd_sched_ready(sched))
+ return NULL;
+
+ /* Kernel run queue has higher priority than normal run queue*/
+ job = amd_sched_rq_select_job(&sched->kernel_rq);
+ if (job == NULL)
+ job = amd_sched_rq_select_job(&sched->sched_rq);
+
+ return job;
+}
+
+static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
+{
+ struct amd_sched_job *sched_job =
+ container_of(cb, struct amd_sched_job, cb);
+ struct amd_gpu_scheduler *sched;
+
+ sched = sched_job->sched;
+ amd_sched_fence_signal(sched_job->s_fence);
+ atomic_dec(&sched->hw_rq_count);
+ fence_put(&sched_job->s_fence->base);
+ sched->ops->process_job(sched_job);
+ wake_up_interruptible(&sched->wake_up_worker);
+}
+
+static int amd_sched_main(void *param)
+{
+ struct sched_param sparam = {.sched_priority = 1};
+ struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
+ int r, count;
+
+ sched_setscheduler(current, SCHED_FIFO, &sparam);
+
+ while (!kthread_should_stop()) {
+ struct amd_sched_entity *entity;
+ struct amd_sched_job *job;
+ struct fence *fence;
+
+ wait_event_interruptible(sched->wake_up_worker,
+ kthread_should_stop() ||
+ (job = amd_sched_select_job(sched)));
+
+ if (!job)
+ continue;
+
+ entity = job->s_entity;
+ atomic_inc(&sched->hw_rq_count);
+ fence = sched->ops->run_job(job);
+ if (fence) {
+ r = fence_add_callback(fence, &job->cb,
+ amd_sched_process_job);
+ if (r == -ENOENT)
+ amd_sched_process_job(fence, &job->cb);
+ else if (r)
+ DRM_ERROR("fence add callback failed (%d)\n", r);
+ fence_put(fence);
+ }
+
+ count = kfifo_out(&entity->job_queue, &job, sizeof(job));
+ WARN_ON(count != sizeof(job));
+ wake_up(&sched->job_scheduled);
+ }
+ return 0;
+}
+
+/**
+ * Create a gpu scheduler
+ *
+ * @ops The backend operations for this scheduler.
+ * @ring The the ring id for the scheduler.
+ * @hw_submissions Number of hw submissions to do.
+ *
+ * Return the pointer to scheduler for success, otherwise return NULL
+*/
+struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops,
+ unsigned ring, unsigned hw_submission,
+ void *priv)
+{
+ struct amd_gpu_scheduler *sched;
+
+ sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
+ if (!sched)
+ return NULL;
+
+ sched->ops = ops;
+ sched->ring_id = ring;
+ sched->hw_submission_limit = hw_submission;
+ sched->priv = priv;
+ snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring);
+ amd_sched_rq_init(&sched->sched_rq);
+ amd_sched_rq_init(&sched->kernel_rq);
+
+ init_waitqueue_head(&sched->wake_up_worker);
+ init_waitqueue_head(&sched->job_scheduled);
+ atomic_set(&sched->hw_rq_count, 0);
+ /* Each scheduler will run on a seperate kernel thread */
+ sched->thread = kthread_run(amd_sched_main, sched, sched->name);
+ if (IS_ERR(sched->thread)) {
+ DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
+ kfree(sched);
+ return NULL;
+ }
+
+ return sched;
+}
+
+/**
+ * Destroy a gpu scheduler
+ *
+ * @sched The pointer to the scheduler
+ *
+ * return 0 if succeed. -1 if failed.
+ */
+int amd_sched_destroy(struct amd_gpu_scheduler *sched)
+{
+ kthread_stop(sched->thread);
+ kfree(sched);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
new file mode 100644
index 000000000000..2af0e4d4d817
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _GPU_SCHEDULER_H_
+#define _GPU_SCHEDULER_H_
+
+#include <linux/kfifo.h>
+#include <linux/fence.h>
+
+struct amd_gpu_scheduler;
+struct amd_sched_rq;
+
+/**
+ * A scheduler entity is a wrapper around a job queue or a group
+ * of other entities. Entities take turns emitting jobs from their
+ * job queues to corresponding hardware ring based on scheduling
+ * policy.
+*/
+struct amd_sched_entity {
+ struct list_head list;
+ struct amd_sched_rq *belongto_rq;
+ atomic_t fence_seq;
+ /* the job_queue maintains the jobs submitted by clients */
+ struct kfifo job_queue;
+ spinlock_t queue_lock;
+ struct amd_gpu_scheduler *scheduler;
+ uint64_t fence_context;
+ struct fence *dependency;
+ struct fence_cb cb;
+};
+
+/**
+ * Run queue is a set of entities scheduling command submissions for
+ * one specific ring. It implements the scheduling policy that selects
+ * the next entity to emit commands from.
+*/
+struct amd_sched_rq {
+ spinlock_t lock;
+ struct list_head entities;
+ struct amd_sched_entity *current_entity;
+};
+
+struct amd_sched_fence {
+ struct fence base;
+ struct amd_gpu_scheduler *scheduler;
+ spinlock_t lock;
+ void *owner;
+};
+
+struct amd_sched_job {
+ struct fence_cb cb;
+ struct amd_gpu_scheduler *sched;
+ struct amd_sched_entity *s_entity;
+ struct amd_sched_fence *s_fence;
+ void *owner;
+};
+
+extern const struct fence_ops amd_sched_fence_ops;
+static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
+{
+ struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base);
+
+ if (__f->base.ops == &amd_sched_fence_ops)
+ return __f;
+
+ return NULL;
+}
+
+/**
+ * Define the backend operations called by the scheduler,
+ * these functions should be implemented in driver side
+*/
+struct amd_sched_backend_ops {
+ struct fence *(*dependency)(struct amd_sched_job *job);
+ struct fence *(*run_job)(struct amd_sched_job *job);
+ void (*process_job)(struct amd_sched_job *job);
+};
+
+/**
+ * One scheduler is implemented for each hardware ring
+*/
+struct amd_gpu_scheduler {
+ struct task_struct *thread;
+ struct amd_sched_rq sched_rq;
+ struct amd_sched_rq kernel_rq;
+ atomic_t hw_rq_count;
+ struct amd_sched_backend_ops *ops;
+ uint32_t ring_id;
+ wait_queue_head_t wake_up_worker;
+ wait_queue_head_t job_scheduled;
+ uint32_t hw_submission_limit;
+ char name[20];
+ void *priv;
+};
+
+struct amd_gpu_scheduler *
+amd_sched_create(struct amd_sched_backend_ops *ops,
+ uint32_t ring, uint32_t hw_submission, void *priv);
+int amd_sched_destroy(struct amd_gpu_scheduler *sched);
+
+int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity,
+ struct amd_sched_rq *rq,
+ uint32_t jobs);
+void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity);
+int amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+
+struct amd_sched_fence *amd_sched_fence_create(
+ struct amd_sched_entity *s_entity, void *owner);
+void amd_sched_fence_signal(struct amd_sched_fence *fence);
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
new file mode 100644
index 000000000000..e62c37920e11
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <drm/drmP.h>
+#include "gpu_scheduler.h"
+
+struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner)
+{
+ struct amd_sched_fence *fence = NULL;
+ unsigned seq;
+
+ fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL);
+ if (fence == NULL)
+ return NULL;
+ fence->owner = owner;
+ fence->scheduler = s_entity->scheduler;
+ spin_lock_init(&fence->lock);
+
+ seq = atomic_inc_return(&s_entity->fence_seq);
+ fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock,
+ s_entity->fence_context, seq);
+
+ return fence;
+}
+
+void amd_sched_fence_signal(struct amd_sched_fence *fence)
+{
+ int ret = fence_signal(&fence->base);
+ if (!ret)
+ FENCE_TRACE(&fence->base, "signaled from irq context\n");
+ else
+ FENCE_TRACE(&fence->base, "was already signaled\n");
+}
+
+static const char *amd_sched_fence_get_driver_name(struct fence *fence)
+{
+ return "amd_sched";
+}
+
+static const char *amd_sched_fence_get_timeline_name(struct fence *f)
+{
+ struct amd_sched_fence *fence = to_amd_sched_fence(f);
+ return (const char *)fence->scheduler->name;
+}
+
+static bool amd_sched_fence_enable_signaling(struct fence *f)
+{
+ return true;
+}
+
+const struct fence_ops amd_sched_fence_ops = {
+ .get_driver_name = amd_sched_fence_get_driver_name,
+ .get_timeline_name = amd_sched_fence_get_timeline_name,
+ .enable_signaling = amd_sched_fence_enable_signaling,
+ .signaled = NULL,
+ .wait = fence_default_wait,
+ .release = NULL,
+};
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 42d2ffa08716..01ffe9bffe38 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -531,8 +531,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
- crtc->mode = *adj;
-
val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
if (val != dcrtc->dumb_ctrl) {
dcrtc->dumb_ctrl = val;
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 7838e731b0de..7d03c51abcb9 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -22,9 +22,9 @@ static /*const*/ struct fb_ops armada_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
@@ -80,18 +80,12 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
if (IS_ERR(dfb))
return PTR_ERR(dfb);
- info = framebuffer_alloc(0, dev->dev);
- if (!info) {
- ret = -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(fbh);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto err_fballoc;
}
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto err_fbcmap;
- }
-
strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
info->par = fbh;
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
@@ -101,7 +95,7 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
info->screen_size = obj->obj.size;
info->screen_base = ptr;
fbh->fb = &dfb->fb;
- fbh->fbdev = info;
+
drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
@@ -111,8 +105,6 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
return 0;
- err_fbcmap:
- framebuffer_release(info);
err_fballoc:
dfb->fb.funcs->destroy(&dfb->fb);
return ret;
@@ -171,6 +163,7 @@ int armada_fbdev_init(struct drm_device *dev)
return 0;
err_fb_setup:
+ drm_fb_helper_release_fbi(fbh);
drm_fb_helper_fini(fbh);
err_fb_helper:
priv->fbdev = NULL;
@@ -191,14 +184,8 @@ void armada_fbdev_fini(struct drm_device *dev)
struct drm_fb_helper *fbh = priv->fbdev;
if (fbh) {
- struct fb_info *info = fbh->fbdev;
-
- if (info) {
- unregister_framebuffer(info);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(fbh);
+ drm_fb_helper_release_fbi(fbh);
drm_fb_helper_fini(fbh);
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 580e10acaa3a..60a688ef81c7 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -69,8 +69,9 @@ void armada_gem_free_object(struct drm_gem_object *obj)
if (dobj->obj.import_attach) {
/* We only ever display imported data */
- dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
- DMA_TO_DEVICE);
+ if (dobj->sgt)
+ dma_buf_unmap_attachment(dobj->obj.import_attach,
+ dobj->sgt, DMA_TO_DEVICE);
drm_prime_gem_destroy(&dobj->obj, NULL);
}
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index c5b06fdb459c..e939faba7fcc 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -7,6 +7,7 @@
* published by the Free Software Foundation.
*/
#include <drm/drmP.h>
+#include <drm/drm_plane_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
@@ -85,16 +86,8 @@ static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
if (fb)
armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
-}
-static unsigned armada_limit(int start, unsigned size, unsigned max)
-{
- int end = start + size;
- if (end < 0)
- return 0;
- if (start < 0)
- start = 0;
- return (unsigned)end > max ? max - start : end - start;
+ wake_up(&dplane->vbl.wait);
}
static int
@@ -105,26 +98,39 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
{
struct armada_plane *dplane = drm_to_armada_plane(plane);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct drm_rect src = {
+ .x1 = src_x,
+ .y1 = src_y,
+ .x2 = src_x + src_w,
+ .y2 = src_y + src_h,
+ };
+ struct drm_rect dest = {
+ .x1 = crtc_x,
+ .y1 = crtc_y,
+ .x2 = crtc_x + crtc_w,
+ .y2 = crtc_y + crtc_h,
+ };
+ const struct drm_rect clip = {
+ .x2 = crtc->mode.hdisplay,
+ .y2 = crtc->mode.vdisplay,
+ };
uint32_t val, ctrl0;
unsigned idx = 0;
+ bool visible;
int ret;
- crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay);
- crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay);
+ ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
+ 0, INT_MAX, true, false, &visible);
+ if (ret)
+ return ret;
+
ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
/* Does the position/size result in nothing to display? */
- if (crtc_w == 0 || crtc_h == 0) {
+ if (!visible)
ctrl0 &= ~CFG_DMA_ENA;
- }
-
- /*
- * FIXME: if the starting point is off screen, we need to
- * adjust src_x, src_y, src_w, src_h appropriately, and
- * according to the scale.
- */
if (!dcrtc->plane) {
dcrtc->plane = plane;
@@ -134,15 +140,19 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
/* FIXME: overlay on an interlaced display */
/* Just updating the position/size? */
if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
- val = (src_h & 0xffff0000) | src_w >> 16;
+ val = (drm_rect_height(&src) & 0xffff0000) |
+ drm_rect_width(&src) >> 16;
dplane->src_hw = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
- val = crtc_h << 16 | crtc_w;
+
+ val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
dplane->dst_hw = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
- val = crtc_y << 16 | crtc_x;
+
+ val = dest.y1 << 16 | dest.x1;
dplane->dst_yx = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
+
return 0;
} else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
/* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
@@ -150,15 +160,14 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
dcrtc->base + LCD_SPU_SRAM_PARA1);
}
- ret = wait_event_timeout(dplane->vbl.wait,
- list_empty(&dplane->vbl.update.node),
- HZ/25);
- if (ret < 0)
- return ret;
+ wait_event_timeout(dplane->vbl.wait,
+ list_empty(&dplane->vbl.update.node),
+ HZ/25);
if (plane->fb != fb) {
struct armada_gem_object *obj = drm_fb_obj(fb);
- uint32_t sy, su, sv;
+ uint32_t addr[3], pixel_format;
+ int i, num_planes, hsub;
/*
* Take a reference on the new framebuffer - we want to
@@ -178,26 +187,39 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
older_fb);
}
- src_y >>= 16;
- src_x >>= 16;
- sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] +
- src_x * fb->bits_per_pixel / 8;
- su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] +
- src_x;
- sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] +
- src_x;
+ src_y = src.y1 >> 16;
+ src_x = src.x1 >> 16;
- armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+ pixel_format = fb->pixel_format;
+ hsub = drm_format_horz_chroma_subsampling(pixel_format);
+ num_planes = drm_format_num_planes(pixel_format);
+
+ /*
+ * Annoyingly, shifting a YUYV-format image by one pixel
+ * causes the U/V planes to toggle. Toggle the UV swap.
+ * (Unfortunately, this causes momentary colour flickering.)
+ */
+ if (src_x & (hsub - 1) && num_planes == 1)
+ ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
+
+ for (i = 0; i < num_planes; i++)
+ addr[i] = obj->dev_addr + fb->offsets[i] +
+ src_y * fb->pitches[i] +
+ src_x * drm_format_plane_cpp(pixel_format, i);
+ for (; i < ARRAY_SIZE(addr); i++)
+ addr[i] = 0;
+
+ armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
LCD_SPU_DMA_START_ADDR_Y0);
- armada_reg_queue_set(dplane->vbl.regs, idx, su,
+ armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
LCD_SPU_DMA_START_ADDR_U0);
- armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+ armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
LCD_SPU_DMA_START_ADDR_V0);
- armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+ armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
LCD_SPU_DMA_START_ADDR_Y1);
- armada_reg_queue_set(dplane->vbl.regs, idx, su,
+ armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
LCD_SPU_DMA_START_ADDR_U1);
- armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+ armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
LCD_SPU_DMA_START_ADDR_V1);
val = fb->pitches[0] << 16 | fb->pitches[0];
@@ -208,24 +230,27 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
LCD_SPU_DMA_PITCH_UV);
}
- val = (src_h & 0xffff0000) | src_w >> 16;
+ val = (drm_rect_height(&src) & 0xffff0000) | drm_rect_width(&src) >> 16;
if (dplane->src_hw != val) {
dplane->src_hw = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DMA_HPXL_VLN);
}
- val = crtc_h << 16 | crtc_w;
+
+ val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
if (dplane->dst_hw != val) {
dplane->dst_hw = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DZM_HPXL_VLN);
}
- val = crtc_y << 16 | crtc_x;
+
+ val = dest.y1 << 16 | dest.x1;
if (dplane->dst_yx != val) {
dplane->dst_yx = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DMA_OVSA_HPXL_VLN);
}
+
if (dplane->ctrl0 != ctrl0) {
dplane->ctrl0 = ctrl0;
armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
@@ -279,7 +304,11 @@ static int armada_plane_disable(struct drm_plane *plane)
static void armada_plane_destroy(struct drm_plane *plane)
{
- kfree(plane);
+ struct armada_plane *dplane = drm_to_armada_plane(plane);
+
+ drm_plane_cleanup(plane);
+
+ kfree(dplane);
}
static int armada_plane_set_property(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index ff68eefae273..f31db28a684b 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -125,7 +125,7 @@ static void ast_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
struct ast_fbdev *afbdev = info->par;
- sys_fillrect(info, rect);
+ drm_fb_helper_sys_fillrect(info, rect);
ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
rect->height);
}
@@ -134,7 +134,7 @@ static void ast_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
struct ast_fbdev *afbdev = info->par;
- sys_copyarea(info, area);
+ drm_fb_helper_sys_copyarea(info, area);
ast_dirty_update(afbdev, area->dx, area->dy, area->width,
area->height);
}
@@ -143,7 +143,7 @@ static void ast_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct ast_fbdev *afbdev = info->par;
- sys_imageblit(info, image);
+ drm_fb_helper_sys_imageblit(info, image);
ast_dirty_update(afbdev, image->dx, image->dy, image->width,
image->height);
}
@@ -193,7 +193,6 @@ static int astfb_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb;
struct fb_info *info;
int size, ret;
- struct device *device = &dev->pdev->dev;
void *sysram;
struct drm_gem_object *gobj = NULL;
struct ast_bo *bo = NULL;
@@ -217,40 +216,28 @@ static int astfb_create(struct drm_fb_helper *helper,
if (!sysram)
return -ENOMEM;
- info = framebuffer_alloc(0, device);
- if (!info) {
- ret = -ENOMEM;
- goto out;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+ goto err_free_vram;
}
info->par = afbdev;
ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
if (ret)
- goto out;
+ goto err_release_fbi;
afbdev->sysram = sysram;
afbdev->size = size;
fb = &afbdev->afb.base;
afbdev->helper.fb = fb;
- afbdev->helper.fbdev = info;
strcpy(info->fix.id, "astdrmfb");
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &astfb_ops;
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto out;
- }
-
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto out;
- }
info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
@@ -266,7 +253,11 @@ static int astfb_create(struct drm_fb_helper *helper,
fb->width, fb->height);
return 0;
-out:
+
+err_release_fbi:
+ drm_fb_helper_release_fbi(helper);
+err_free_vram:
+ vfree(afbdev->sysram);
return ret;
}
@@ -297,15 +288,10 @@ static const struct drm_fb_helper_funcs ast_fb_helper_funcs = {
static void ast_fbdev_destroy(struct drm_device *dev,
struct ast_fbdev *afbdev)
{
- struct fb_info *info;
struct ast_framebuffer *afb = &afbdev->afb;
- if (afbdev->helper.fbdev) {
- info = afbdev->helper.fbdev;
- unregister_framebuffer(info);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+
+ drm_fb_helper_unregister_fbi(&afbdev->helper);
+ drm_fb_helper_release_fbi(&afbdev->helper);
if (afb->obj) {
drm_gem_object_unreference_unlocked(afb->obj);
@@ -377,5 +363,5 @@ void ast_fbdev_set_suspend(struct drm_device *dev, int state)
if (!ast->fbdev)
return;
- fb_set_suspend(ast->fbdev->helper.fbdev, state);
+ drm_fb_helper_set_suspend(&ast->fbdev->helper, state);
}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 035dacc93382..838217f8ce7d 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -571,24 +571,18 @@ ast_dumb_mmap_offset(struct drm_file *file,
uint64_t *offset)
{
struct drm_gem_object *obj;
- int ret;
struct ast_bo *bo;
- mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file, handle);
- if (obj == NULL) {
- ret = -ENOENT;
- goto out_unlock;
- }
+ if (obj == NULL)
+ return -ENOENT;
bo = gem_to_ast_bo(obj);
*offset = ast_bo_mmap_offset(bo);
- drm_gem_object_unreference(obj);
- ret = 0;
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ drm_gem_object_unreference_unlocked(obj);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index f69b92535505..9f6e234e7029 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -239,7 +239,8 @@ static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c,
return atmel_hlcdc_plane_prepare_disc_area(s);
}
-static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c)
+static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
+ struct drm_crtc_state *old_s)
{
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
@@ -253,7 +254,8 @@ static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c)
}
}
-static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc)
+static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_s)
{
/* TODO: write common plane control register if available */
}
@@ -355,6 +357,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
planes->overlays[i]->base.possible_crtcs = 1 << crtc->id;
drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs);
+ drm_crtc_vblank_reset(&crtc->base);
dc->crtc = &crtc->base;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 60b0c13d7ff5..8bc62ec407f9 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -29,6 +29,115 @@
#define ATMEL_HLCDC_LAYER_IRQS_OFFSET 8
+static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9n12_layers[] = {
+ {
+ .name = "base",
+ .formats = &atmel_hlcdc_plane_rgb_formats,
+ .regs_offset = 0x40,
+ .id = 0,
+ .type = ATMEL_HLCDC_BASE_LAYER,
+ .nconfigs = 5,
+ .layout = {
+ .xstride = { 2 },
+ .default_color = 3,
+ .general_config = 4,
+ },
+ },
+};
+
+static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9n12 = {
+ .min_width = 0,
+ .min_height = 0,
+ .max_width = 1280,
+ .max_height = 860,
+ .nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9n12_layers),
+ .layers = atmel_hlcdc_at91sam9n12_layers,
+};
+
+static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = {
+ {
+ .name = "base",
+ .formats = &atmel_hlcdc_plane_rgb_formats,
+ .regs_offset = 0x40,
+ .id = 0,
+ .type = ATMEL_HLCDC_BASE_LAYER,
+ .nconfigs = 5,
+ .layout = {
+ .xstride = { 2 },
+ .default_color = 3,
+ .general_config = 4,
+ .disc_pos = 5,
+ .disc_size = 6,
+ },
+ },
+ {
+ .name = "overlay1",
+ .formats = &atmel_hlcdc_plane_rgb_formats,
+ .regs_offset = 0x100,
+ .id = 1,
+ .type = ATMEL_HLCDC_OVERLAY_LAYER,
+ .nconfigs = 10,
+ .layout = {
+ .pos = 2,
+ .size = 3,
+ .xstride = { 4 },
+ .pstride = { 5 },
+ .default_color = 6,
+ .chroma_key = 7,
+ .chroma_key_mask = 8,
+ .general_config = 9,
+ },
+ },
+ {
+ .name = "high-end-overlay",
+ .formats = &atmel_hlcdc_plane_rgb_and_yuv_formats,
+ .regs_offset = 0x280,
+ .id = 2,
+ .type = ATMEL_HLCDC_OVERLAY_LAYER,
+ .nconfigs = 17,
+ .layout = {
+ .pos = 2,
+ .size = 3,
+ .memsize = 4,
+ .xstride = { 5, 7 },
+ .pstride = { 6, 8 },
+ .default_color = 9,
+ .chroma_key = 10,
+ .chroma_key_mask = 11,
+ .general_config = 12,
+ .csc = 14,
+ },
+ },
+ {
+ .name = "cursor",
+ .formats = &atmel_hlcdc_plane_rgb_formats,
+ .regs_offset = 0x340,
+ .id = 3,
+ .type = ATMEL_HLCDC_CURSOR_LAYER,
+ .nconfigs = 10,
+ .max_width = 128,
+ .max_height = 128,
+ .layout = {
+ .pos = 2,
+ .size = 3,
+ .xstride = { 4 },
+ .default_color = 6,
+ .chroma_key = 7,
+ .chroma_key_mask = 8,
+ .general_config = 9,
+ },
+ },
+};
+
+static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9x5 = {
+ .min_width = 0,
+ .min_height = 0,
+ .max_width = 800,
+ .max_height = 600,
+ .nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9x5_layers),
+ .layers = atmel_hlcdc_at91sam9x5_layers,
+};
+
static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = {
{
.name = "base",
@@ -132,11 +241,105 @@ static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d3 = {
.layers = atmel_hlcdc_sama5d3_layers,
};
+static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = {
+ {
+ .name = "base",
+ .formats = &atmel_hlcdc_plane_rgb_formats,
+ .regs_offset = 0x40,
+ .id = 0,
+ .type = ATMEL_HLCDC_BASE_LAYER,
+ .nconfigs = 7,
+ .layout = {
+ .xstride = { 2 },
+ .default_color = 3,
+ .general_config = 4,
+ .disc_pos = 5,
+ .disc_size = 6,
+ },
+ },
+ {
+ .name = "overlay1",
+ .formats = &atmel_hlcdc_plane_rgb_formats,
+ .regs_offset = 0x140,
+ .id = 1,
+ .type = ATMEL_HLCDC_OVERLAY_LAYER,
+ .nconfigs = 10,
+ .layout = {
+ .pos = 2,
+ .size = 3,
+ .xstride = { 4 },
+ .pstride = { 5 },
+ .default_color = 6,
+ .chroma_key = 7,
+ .chroma_key_mask = 8,
+ .general_config = 9,
+ },
+ },
+ {
+ .name = "overlay2",
+ .formats = &atmel_hlcdc_plane_rgb_formats,
+ .regs_offset = 0x240,
+ .id = 2,
+ .type = ATMEL_HLCDC_OVERLAY_LAYER,
+ .nconfigs = 10,
+ .layout = {
+ .pos = 2,
+ .size = 3,
+ .xstride = { 4 },
+ .pstride = { 5 },
+ .default_color = 6,
+ .chroma_key = 7,
+ .chroma_key_mask = 8,
+ .general_config = 9,
+ },
+ },
+ {
+ .name = "high-end-overlay",
+ .formats = &atmel_hlcdc_plane_rgb_and_yuv_formats,
+ .regs_offset = 0x340,
+ .id = 3,
+ .type = ATMEL_HLCDC_OVERLAY_LAYER,
+ .nconfigs = 42,
+ .layout = {
+ .pos = 2,
+ .size = 3,
+ .memsize = 4,
+ .xstride = { 5, 7 },
+ .pstride = { 6, 8 },
+ .default_color = 9,
+ .chroma_key = 10,
+ .chroma_key_mask = 11,
+ .general_config = 12,
+ .csc = 14,
+ },
+ },
+};
+
+static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d4 = {
+ .min_width = 0,
+ .min_height = 0,
+ .max_width = 2048,
+ .max_height = 2048,
+ .nlayers = ARRAY_SIZE(atmel_hlcdc_sama5d4_layers),
+ .layers = atmel_hlcdc_sama5d4_layers,
+};
static const struct of_device_id atmel_hlcdc_of_match[] = {
{
+ .compatible = "atmel,at91sam9n12-hlcdc",
+ .data = &atmel_hlcdc_dc_at91sam9n12,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-hlcdc",
+ .data = &atmel_hlcdc_dc_at91sam9x5,
+ },
+ {
.compatible = "atmel,sama5d3-hlcdc",
.data = &atmel_hlcdc_dc_sama5d3,
},
+ {
+ .compatible = "atmel,sama5d4-hlcdc",
+ .data = &atmel_hlcdc_dc_sama5d4,
+ },
{ /* sentinel */ },
};
@@ -313,20 +516,20 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
pm_runtime_enable(dev->dev);
- ret = atmel_hlcdc_dc_modeset_init(dev);
+ ret = drm_vblank_init(dev, 1);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize mode setting\n");
+ dev_err(dev->dev, "failed to initialize vblank\n");
goto err_periph_clk_disable;
}
- drm_mode_config_reset(dev);
-
- ret = drm_vblank_init(dev, 1);
+ ret = atmel_hlcdc_dc_modeset_init(dev);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize vblank\n");
+ dev_err(dev->dev, "failed to initialize mode setting\n");
goto err_periph_clk_disable;
}
+ drm_mode_config_reset(dev);
+
pm_runtime_get_sync(dev->dev);
ret = drm_irq_install(dev, dc->hlcdc->irq);
pm_runtime_put_sync(dev->dev);
@@ -485,7 +688,9 @@ static const struct file_operations fops = {
};
static struct drm_driver atmel_hlcdc_dc_driver = {
- .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
+ DRIVER_MODESET | DRIVER_PRIME |
+ DRIVER_ATOMIC,
.preclose = atmel_hlcdc_dc_preclose,
.lastclose = atmel_hlcdc_dc_lastclose,
.irq_handler = atmel_hlcdc_dc_irq_handler,
@@ -497,6 +702,15 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
.disable_vblank = atmel_hlcdc_dc_disable_vblank,
.gem_free_object = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
.dumb_destroy = drm_gem_dumb_destroy,
@@ -559,7 +773,7 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 9c4513005310..067e4c144bd6 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -126,12 +126,16 @@ atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder,
if (info->num_bus_formats) {
switch (info->bus_formats[0]) {
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ cfg |= ATMEL_HLCDC_CONNECTOR_RGB565 << 8;
+ break;
case MEDIA_BUS_FMT_RGB666_1X18:
cfg |= ATMEL_HLCDC_CONNECTOR_RGB666 << 8;
break;
case MEDIA_BUS_FMT_RGB888_1X24:
cfg |= ATMEL_HLCDC_CONNECTOR_RGB888 << 8;
break;
+ case MEDIA_BUS_FMT_RGB444_1X12:
default:
break;
}
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 98837bde2d25..7f1a3604b19f 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -109,7 +109,7 @@ static int bochs_pm_suspend(struct device *dev)
if (bochs->fb.initialized) {
console_lock();
- fb_set_suspend(bochs->fb.helper.fbdev, 1);
+ drm_fb_helper_set_suspend(&bochs->fb.helper, 1);
console_unlock();
}
@@ -126,7 +126,7 @@ static int bochs_pm_resume(struct device *dev)
if (bochs->fb.initialized) {
console_lock();
- fb_set_suspend(bochs->fb.helper.fbdev, 0);
+ drm_fb_helper_set_suspend(&bochs->fb.helper, 0);
console_unlock();
}
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index 976d9798dc99..09a0637aab3e 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -24,9 +24,9 @@ static struct fb_ops bochsfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
@@ -56,11 +56,9 @@ static int bochsfb_create(struct drm_fb_helper *helper,
{
struct bochs_device *bochs =
container_of(helper, struct bochs_device, fb.helper);
- struct drm_device *dev = bochs->dev;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd;
- struct device *device = &dev->pdev->dev;
struct drm_gem_object *gobj = NULL;
struct bochs_bo *bo = NULL;
int size, ret;
@@ -106,22 +104,23 @@ static int bochsfb_create(struct drm_fb_helper *helper,
ttm_bo_unreserve(&bo->bo);
/* init fb device */
- info = framebuffer_alloc(0, device);
- if (info == NULL)
- return -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
info->par = &bochs->fb.helper;
ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj);
- if (ret)
+ if (ret) {
+ drm_fb_helper_release_fbi(helper);
return ret;
+ }
bochs->fb.size = size;
/* setup helper */
fb = &bochs->fb.gfb.base;
bochs->fb.helper.fb = fb;
- bochs->fb.helper.fbdev = info;
strcpy(info->fix.id, "bochsdrmfb");
@@ -139,30 +138,17 @@ static int bochsfb_create(struct drm_fb_helper *helper,
info->fix.smem_start = 0;
info->fix.smem_len = size;
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
- return -ENOMEM;
- }
-
return 0;
}
static int bochs_fbdev_destroy(struct bochs_device *bochs)
{
struct bochs_framebuffer *gfb = &bochs->fb.gfb;
- struct fb_info *info;
DRM_DEBUG_DRIVER("\n");
- if (bochs->fb.helper.fbdev) {
- info = bochs->fb.helper.fbdev;
-
- unregister_framebuffer(info);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&bochs->fb.helper);
+ drm_fb_helper_release_fbi(&bochs->fb.helper);
if (gfb->obj) {
drm_gem_object_unreference_unlocked(gfb->obj);
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 66286ff518d4..f69e6bf9bb0e 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -454,25 +454,17 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset)
{
struct drm_gem_object *obj;
- int ret;
struct bochs_bo *bo;
- mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file, handle);
- if (obj == NULL) {
- ret = -ENOENT;
- goto out_unlock;
- }
+ if (obj == NULL)
+ return -ENOENT;
bo = gem_to_bochs_bo(obj);
*offset = bochs_bo_mmap_offset(bo);
- drm_gem_object_unreference(obj);
- ret = 0;
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
-
+ drm_gem_object_unreference_unlocked(obj);
+ return 0;
}
/* ---------------------------------------------------------------------- */
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index acef3223772c..2de52a53a803 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -1,24 +1,32 @@
+config DRM_BRIDGE
+ def_bool y
+ depends on DRM
+ help
+ Bridge registration and lookup framework.
+
+menu "Display Interface Bridges"
+ depends on DRM && DRM_BRIDGE
+
config DRM_DW_HDMI
tristate
- depends on DRM
select DRM_KMS_HELPER
-config DRM_PTN3460
- tristate "PTN3460 DP/LVDS bridge"
- depends on DRM
+config DRM_NXP_PTN3460
+ tristate "NXP PTN3460 DP/LVDS bridge"
depends on OF
select DRM_KMS_HELPER
select DRM_PANEL
---help---
- ptn3460 eDP-LVDS bridge chip driver.
+ NXP PTN3460 eDP-LVDS bridge chip driver.
-config DRM_PS8622
+config DRM_PARADE_PS8622
tristate "Parade eDP/LVDS bridge"
- depends on DRM
depends on OF
select DRM_PANEL
select DRM_KMS_HELPER
select BACKLIGHT_LCD_SUPPORT
select BACKLIGHT_CLASS_DEVICE
---help---
- parade eDP-LVDS bridge chip driver.
+ Parade eDP-LVDS bridge chip driver.
+
+endmenu
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 8dfebd984370..e2eef1c2f4c3 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,5 +1,5 @@
ccflags-y := -Iinclude/drm
-obj-$(CONFIG_DRM_PS8622) += ps8622.o
-obj-$(CONFIG_DRM_PTN3460) += ptn3460.o
obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o
+obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
+obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
diff --git a/drivers/gpu/drm/bridge/dw_hdmi.c b/drivers/gpu/drm/bridge/dw_hdmi.c
index 816d104ca4da..0083d4e7e7e2 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi.c
+++ b/drivers/gpu/drm/bridge/dw_hdmi.c
@@ -18,6 +18,7 @@
#include <linux/hdmi.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
+#include <linux/spinlock.h>
#include <drm/drm_of.h>
#include <drm/drmP.h>
@@ -81,10 +82,6 @@ static const u16 csc_coeff_rgb_in_eitu709[3][4] = {
};
struct hdmi_vmode {
- bool mdvi;
- bool mhsyncpolarity;
- bool mvsyncpolarity;
- bool minterlaced;
bool mdataenablepolarity;
unsigned int mpixelclock;
@@ -123,12 +120,20 @@ struct dw_hdmi {
bool phy_enabled;
struct drm_display_mode previous_mode;
- struct regmap *regmap;
struct i2c_adapter *ddc;
void __iomem *regs;
+ bool sink_is_hdmi;
+ bool sink_has_audio;
+ struct mutex mutex; /* for state below and previous_mode */
+ bool disabled; /* DRM has disabled our bridge */
+
+ spinlock_t audio_lock;
struct mutex audio_mutex;
unsigned int sample_rate;
+ unsigned int audio_cts;
+ unsigned int audio_n;
+ bool audio_enable;
int ratio;
void (*write)(struct dw_hdmi *hdmi, u8 val, int offset);
@@ -335,42 +340,76 @@ static unsigned int hdmi_compute_cts(unsigned int freq, unsigned long pixel_clk,
}
static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
- unsigned long pixel_clk)
+ unsigned long pixel_clk, unsigned int sample_rate, unsigned int ratio)
{
- unsigned int clk_n, clk_cts;
-
- clk_n = hdmi_compute_n(hdmi->sample_rate, pixel_clk,
- hdmi->ratio);
- clk_cts = hdmi_compute_cts(hdmi->sample_rate, pixel_clk,
- hdmi->ratio);
+ unsigned int n, cts;
- if (!clk_cts) {
- dev_dbg(hdmi->dev, "%s: pixel clock not supported: %lu\n",
- __func__, pixel_clk);
- return;
+ n = hdmi_compute_n(sample_rate, pixel_clk, ratio);
+ cts = hdmi_compute_cts(sample_rate, pixel_clk, ratio);
+ if (!cts) {
+ dev_err(hdmi->dev,
+ "%s: pixel clock/sample rate not supported: %luMHz / %ukHz\n",
+ __func__, pixel_clk, sample_rate);
}
- dev_dbg(hdmi->dev, "%s: samplerate=%d ratio=%d pixelclk=%lu N=%d cts=%d\n",
- __func__, hdmi->sample_rate, hdmi->ratio,
- pixel_clk, clk_n, clk_cts);
+ dev_dbg(hdmi->dev, "%s: samplerate=%ukHz ratio=%d pixelclk=%luMHz N=%d cts=%d\n",
+ __func__, sample_rate, ratio, pixel_clk, n, cts);
- hdmi_set_cts_n(hdmi, clk_cts, clk_n);
+ spin_lock_irq(&hdmi->audio_lock);
+ hdmi->audio_n = n;
+ hdmi->audio_cts = cts;
+ hdmi_set_cts_n(hdmi, cts, hdmi->audio_enable ? n : 0);
+ spin_unlock_irq(&hdmi->audio_lock);
}
static void hdmi_init_clk_regenerator(struct dw_hdmi *hdmi)
{
mutex_lock(&hdmi->audio_mutex);
- hdmi_set_clk_regenerator(hdmi, 74250000);
+ hdmi_set_clk_regenerator(hdmi, 74250000, hdmi->sample_rate,
+ hdmi->ratio);
mutex_unlock(&hdmi->audio_mutex);
}
static void hdmi_clk_regenerator_update_pixel_clock(struct dw_hdmi *hdmi)
{
mutex_lock(&hdmi->audio_mutex);
- hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock);
+ hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock,
+ hdmi->sample_rate, hdmi->ratio);
mutex_unlock(&hdmi->audio_mutex);
}
+void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate)
+{
+ mutex_lock(&hdmi->audio_mutex);
+ hdmi->sample_rate = rate;
+ hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock,
+ hdmi->sample_rate, hdmi->ratio);
+ mutex_unlock(&hdmi->audio_mutex);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_rate);
+
+void dw_hdmi_audio_enable(struct dw_hdmi *hdmi)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi->audio_lock, flags);
+ hdmi->audio_enable = true;
+ hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n);
+ spin_unlock_irqrestore(&hdmi->audio_lock, flags);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_audio_enable);
+
+void dw_hdmi_audio_disable(struct dw_hdmi *hdmi)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi->audio_lock, flags);
+ hdmi->audio_enable = false;
+ hdmi_set_cts_n(hdmi, hdmi->audio_cts, 0);
+ spin_unlock_irqrestore(&hdmi->audio_lock, flags);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_audio_disable);
+
/*
* this submodule is responsible for the video data synchronization.
* for example, for RGB 4:4:4 input, the data map is defined as
@@ -701,9 +740,9 @@ static int hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
return 0;
}
-static void dw_hdmi_phy_enable_power(struct dw_hdmi *hdmi, u8 enable)
+static void dw_hdmi_phy_enable_powerdown(struct dw_hdmi *hdmi, bool enable)
{
- hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
+ hdmi_mask_writeb(hdmi, !enable, HDMI_PHY_CONF0,
HDMI_PHY_CONF0_PDZ_OFFSET,
HDMI_PHY_CONF0_PDZ_MASK);
}
@@ -753,12 +792,12 @@ static void dw_hdmi_phy_sel_interface_control(struct dw_hdmi *hdmi, u8 enable)
static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep,
unsigned char res, int cscon)
{
- unsigned res_idx, i;
+ unsigned res_idx;
u8 val, msec;
- const struct dw_hdmi_plat_data *plat_data = hdmi->plat_data;
- const struct dw_hdmi_mpll_config *mpll_config = plat_data->mpll_cfg;
- const struct dw_hdmi_curr_ctrl *curr_ctrl = plat_data->cur_ctr;
- const struct dw_hdmi_phy_config *phy_config = plat_data->phy_config;
+ const struct dw_hdmi_plat_data *pdata = hdmi->plat_data;
+ const struct dw_hdmi_mpll_config *mpll_config = pdata->mpll_cfg;
+ const struct dw_hdmi_curr_ctrl *curr_ctrl = pdata->cur_ctr;
+ const struct dw_hdmi_phy_config *phy_config = pdata->phy_config;
if (prep)
return -EINVAL;
@@ -778,6 +817,30 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep,
return -EINVAL;
}
+ /* PLL/MPLL Cfg - always match on final entry */
+ for (; mpll_config->mpixelclock != ~0UL; mpll_config++)
+ if (hdmi->hdmi_data.video_mode.mpixelclock <=
+ mpll_config->mpixelclock)
+ break;
+
+ for (; curr_ctrl->mpixelclock != ~0UL; curr_ctrl++)
+ if (hdmi->hdmi_data.video_mode.mpixelclock <=
+ curr_ctrl->mpixelclock)
+ break;
+
+ for (; phy_config->mpixelclock != ~0UL; phy_config++)
+ if (hdmi->hdmi_data.video_mode.mpixelclock <=
+ phy_config->mpixelclock)
+ break;
+
+ if (mpll_config->mpixelclock == ~0UL ||
+ curr_ctrl->mpixelclock == ~0UL ||
+ phy_config->mpixelclock == ~0UL) {
+ dev_err(hdmi->dev, "Pixel clock %d - unsupported by HDMI\n",
+ hdmi->hdmi_data.video_mode.mpixelclock);
+ return -EINVAL;
+ }
+
/* Enable csc path */
if (cscon)
val = HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH;
@@ -803,48 +866,23 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep,
HDMI_PHY_I2CM_SLAVE_ADDR);
hdmi_phy_test_clear(hdmi, 0);
- /* PLL/MPLL Cfg - always match on final entry */
- for (i = 0; mpll_config[i].mpixelclock != (~0UL); i++)
- if (hdmi->hdmi_data.video_mode.mpixelclock <=
- mpll_config[i].mpixelclock)
- break;
-
- hdmi_phy_i2c_write(hdmi, mpll_config[i].res[res_idx].cpce, 0x06);
- hdmi_phy_i2c_write(hdmi, mpll_config[i].res[res_idx].gmp, 0x15);
-
- for (i = 0; curr_ctrl[i].mpixelclock != (~0UL); i++)
- if (hdmi->hdmi_data.video_mode.mpixelclock <=
- curr_ctrl[i].mpixelclock)
- break;
-
- if (curr_ctrl[i].mpixelclock == (~0UL)) {
- dev_err(hdmi->dev, "Pixel clock %d - unsupported by HDMI\n",
- hdmi->hdmi_data.video_mode.mpixelclock);
- return -EINVAL;
- }
+ hdmi_phy_i2c_write(hdmi, mpll_config->res[res_idx].cpce, 0x06);
+ hdmi_phy_i2c_write(hdmi, mpll_config->res[res_idx].gmp, 0x15);
/* CURRCTRL */
- hdmi_phy_i2c_write(hdmi, curr_ctrl[i].curr[res_idx], 0x10);
+ hdmi_phy_i2c_write(hdmi, curr_ctrl->curr[res_idx], 0x10);
hdmi_phy_i2c_write(hdmi, 0x0000, 0x13); /* PLLPHBYCTRL */
hdmi_phy_i2c_write(hdmi, 0x0006, 0x17);
- for (i = 0; phy_config[i].mpixelclock != (~0UL); i++)
- if (hdmi->hdmi_data.video_mode.mpixelclock <=
- phy_config[i].mpixelclock)
- break;
-
- /* RESISTANCE TERM 133Ohm Cfg */
- hdmi_phy_i2c_write(hdmi, phy_config[i].term, 0x19); /* TXTERM */
- /* PREEMP Cgf 0.00 */
- hdmi_phy_i2c_write(hdmi, phy_config[i].sym_ctr, 0x09); /* CKSYMTXCTRL */
- /* TX/CK LVL 10 */
- hdmi_phy_i2c_write(hdmi, phy_config[i].vlev_ctr, 0x0E); /* VLEVCTRL */
+ hdmi_phy_i2c_write(hdmi, phy_config->term, 0x19); /* TXTERM */
+ hdmi_phy_i2c_write(hdmi, phy_config->sym_ctr, 0x09); /* CKSYMTXCTRL */
+ hdmi_phy_i2c_write(hdmi, phy_config->vlev_ctr, 0x0E); /* VLEVCTRL */
/* REMOVE CLK TERM */
hdmi_phy_i2c_write(hdmi, 0x8000, 0x05); /* CKCALCTRL */
- dw_hdmi_phy_enable_power(hdmi, 1);
+ dw_hdmi_phy_enable_powerdown(hdmi, false);
/* toggle TMDS enable */
dw_hdmi_phy_enable_tmds(hdmi, 0);
@@ -879,18 +917,17 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep,
static int dw_hdmi_phy_init(struct dw_hdmi *hdmi)
{
int i, ret;
- bool cscon = false;
+ bool cscon;
/*check csc whether needed activated in HDMI mode */
- cscon = (is_color_space_conversion(hdmi) &&
- !hdmi->hdmi_data.video_mode.mdvi);
+ cscon = hdmi->sink_is_hdmi && is_color_space_conversion(hdmi);
/* HDMI Phy spec says to do the phy initialization sequence twice */
for (i = 0; i < 2; i++) {
dw_hdmi_phy_sel_data_en_pol(hdmi, 1);
dw_hdmi_phy_sel_interface_control(hdmi, 0);
dw_hdmi_phy_enable_tmds(hdmi, 0);
- dw_hdmi_phy_enable_power(hdmi, 0);
+ dw_hdmi_phy_enable_powerdown(hdmi, true);
/* Enable CSC */
ret = hdmi_phy_configure(hdmi, 0, 8, cscon);
@@ -921,74 +958,76 @@ static void hdmi_tx_hdcp_config(struct dw_hdmi *hdmi)
HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_MASK, HDMI_A_HDCPCFG1);
}
-static void hdmi_config_AVI(struct dw_hdmi *hdmi)
+static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
{
- u8 val, pix_fmt, under_scan;
- u8 act_ratio, coded_ratio, colorimetry, ext_colorimetry;
- bool aspect_16_9;
+ struct hdmi_avi_infoframe frame;
+ u8 val;
- aspect_16_9 = false; /* FIXME */
+ /* Initialise info frame from DRM mode */
+ drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
- /* AVI Data Byte 1 */
if (hdmi->hdmi_data.enc_out_format == YCBCR444)
- pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR444;
+ frame.colorspace = HDMI_COLORSPACE_YUV444;
else if (hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS)
- pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR422;
+ frame.colorspace = HDMI_COLORSPACE_YUV422;
else
- pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_RGB;
-
- under_scan = HDMI_FC_AVICONF0_SCAN_INFO_NODATA;
-
- /*
- * Active format identification data is present in the AVI InfoFrame.
- * Under scan info, no bar data
- */
- val = pix_fmt | under_scan |
- HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT |
- HDMI_FC_AVICONF0_BAR_DATA_NO_DATA;
-
- hdmi_writeb(hdmi, val, HDMI_FC_AVICONF0);
-
- /* AVI Data Byte 2 -Set the Aspect Ratio */
- if (aspect_16_9) {
- act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_16_9;
- coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_16_9;
- } else {
- act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_4_3;
- coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_4_3;
- }
+ frame.colorspace = HDMI_COLORSPACE_RGB;
/* Set up colorimetry */
if (hdmi->hdmi_data.enc_out_format == XVYCC444) {
- colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO;
+ frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
- ext_colorimetry =
- HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+ frame.extended_colorimetry =
+ HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
- ext_colorimetry =
- HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709;
+ frame.extended_colorimetry =
+ HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
} else if (hdmi->hdmi_data.enc_out_format != RGB) {
- if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
- colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE;
- else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
- colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR;
- ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+ frame.colorimetry = hdmi->hdmi_data.colorimetry;
+ frame.extended_colorimetry = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
} else { /* Carries no data */
- colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_NO_DATA;
- ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+ frame.colorimetry = HDMI_COLORIMETRY_NONE;
+ frame.extended_colorimetry = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
}
- val = colorimetry | coded_ratio | act_ratio;
+ frame.scan_mode = HDMI_SCAN_MODE_NONE;
+
+ /*
+ * The Designware IP uses a different byte format from standard
+ * AVI info frames, though generally the bits are in the correct
+ * bytes.
+ */
+
+ /*
+ * AVI data byte 1 differences: Colorspace in bits 4,5 rather than 5,6,
+ * active aspect present in bit 6 rather than 4.
+ */
+ val = (frame.colorspace & 3) << 4 | (frame.scan_mode & 0x3);
+ if (frame.active_aspect & 15)
+ val |= HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT;
+ if (frame.top_bar || frame.bottom_bar)
+ val |= HDMI_FC_AVICONF0_BAR_DATA_HORIZ_BAR;
+ if (frame.left_bar || frame.right_bar)
+ val |= HDMI_FC_AVICONF0_BAR_DATA_VERT_BAR;
+ hdmi_writeb(hdmi, val, HDMI_FC_AVICONF0);
+
+ /* AVI data byte 2 differences: none */
+ val = ((frame.colorimetry & 0x3) << 6) |
+ ((frame.picture_aspect & 0x3) << 4) |
+ (frame.active_aspect & 0xf);
hdmi_writeb(hdmi, val, HDMI_FC_AVICONF1);
- /* AVI Data Byte 3 */
- val = HDMI_FC_AVICONF2_IT_CONTENT_NO_DATA | ext_colorimetry |
- HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT |
- HDMI_FC_AVICONF2_SCALING_NONE;
+ /* AVI data byte 3 differences: none */
+ val = ((frame.extended_colorimetry & 0x7) << 4) |
+ ((frame.quantization_range & 0x3) << 2) |
+ (frame.nups & 0x3);
+ if (frame.itc)
+ val |= HDMI_FC_AVICONF2_IT_CONTENT_VALID;
hdmi_writeb(hdmi, val, HDMI_FC_AVICONF2);
- /* AVI Data Byte 4 */
- hdmi_writeb(hdmi, hdmi->vic, HDMI_FC_AVIVID);
+ /* AVI data byte 4 differences: none */
+ val = frame.video_code & 0x7f;
+ hdmi_writeb(hdmi, val, HDMI_FC_AVIVID);
/* AVI Data Byte 5- set up input and output pixel repetition */
val = (((hdmi->hdmi_data.video_mode.mpixelrepetitioninput + 1) <<
@@ -999,20 +1038,23 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi)
HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK);
hdmi_writeb(hdmi, val, HDMI_FC_PRCONF);
- /* IT Content and quantization range = don't care */
- val = HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GRAPHICS |
- HDMI_FC_AVICONF3_QUANT_RANGE_LIMITED;
+ /*
+ * AVI data byte 5 differences: content type in 0,1 rather than 4,5,
+ * ycc range in bits 2,3 rather than 6,7
+ */
+ val = ((frame.ycc_quantization_range & 0x3) << 2) |
+ (frame.content_type & 0x3);
hdmi_writeb(hdmi, val, HDMI_FC_AVICONF3);
/* AVI Data Bytes 6-13 */
- hdmi_writeb(hdmi, 0, HDMI_FC_AVIETB0);
- hdmi_writeb(hdmi, 0, HDMI_FC_AVIETB1);
- hdmi_writeb(hdmi, 0, HDMI_FC_AVISBB0);
- hdmi_writeb(hdmi, 0, HDMI_FC_AVISBB1);
- hdmi_writeb(hdmi, 0, HDMI_FC_AVIELB0);
- hdmi_writeb(hdmi, 0, HDMI_FC_AVIELB1);
- hdmi_writeb(hdmi, 0, HDMI_FC_AVISRB0);
- hdmi_writeb(hdmi, 0, HDMI_FC_AVISRB1);
+ hdmi_writeb(hdmi, frame.top_bar & 0xff, HDMI_FC_AVIETB0);
+ hdmi_writeb(hdmi, (frame.top_bar >> 8) & 0xff, HDMI_FC_AVIETB1);
+ hdmi_writeb(hdmi, frame.bottom_bar & 0xff, HDMI_FC_AVISBB0);
+ hdmi_writeb(hdmi, (frame.bottom_bar >> 8) & 0xff, HDMI_FC_AVISBB1);
+ hdmi_writeb(hdmi, frame.left_bar & 0xff, HDMI_FC_AVIELB0);
+ hdmi_writeb(hdmi, (frame.left_bar >> 8) & 0xff, HDMI_FC_AVIELB1);
+ hdmi_writeb(hdmi, frame.right_bar & 0xff, HDMI_FC_AVISRB0);
+ hdmi_writeb(hdmi, (frame.right_bar >> 8) & 0xff, HDMI_FC_AVISRB1);
}
static void hdmi_av_composer(struct dw_hdmi *hdmi,
@@ -1022,9 +1064,6 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode;
int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len;
- vmode->mhsyncpolarity = !!(mode->flags & DRM_MODE_FLAG_PHSYNC);
- vmode->mvsyncpolarity = !!(mode->flags & DRM_MODE_FLAG_PVSYNC);
- vmode->minterlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
vmode->mpixelclock = mode->clock * 1000;
dev_dbg(hdmi->dev, "final pixclk = %d\n", vmode->mpixelclock);
@@ -1034,13 +1073,13 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE :
HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE);
- inv_val |= (vmode->mvsyncpolarity ?
+ inv_val |= mode->flags & DRM_MODE_FLAG_PVSYNC ?
HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH :
- HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW);
+ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW;
- inv_val |= (vmode->mhsyncpolarity ?
+ inv_val |= mode->flags & DRM_MODE_FLAG_PHSYNC ?
HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_HIGH :
- HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW);
+ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW;
inv_val |= (vmode->mdataenablepolarity ?
HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_HIGH :
@@ -1049,17 +1088,17 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
if (hdmi->vic == 39)
inv_val |= HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH;
else
- inv_val |= (vmode->minterlaced ?
+ inv_val |= mode->flags & DRM_MODE_FLAG_INTERLACE ?
HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH :
- HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW);
+ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW;
- inv_val |= (vmode->minterlaced ?
+ inv_val |= mode->flags & DRM_MODE_FLAG_INTERLACE ?
HDMI_FC_INVIDCONF_IN_I_P_INTERLACED :
- HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE);
+ HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE;
- inv_val |= (vmode->mdvi ?
- HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE :
- HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE);
+ inv_val |= hdmi->sink_is_hdmi ?
+ HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE :
+ HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE;
hdmi_writeb(hdmi, inv_val, HDMI_FC_INVIDCONF);
@@ -1105,7 +1144,7 @@ static void dw_hdmi_phy_disable(struct dw_hdmi *hdmi)
return;
dw_hdmi_phy_enable_tmds(hdmi, 0);
- dw_hdmi_phy_enable_power(hdmi, 0);
+ dw_hdmi_phy_enable_powerdown(hdmi, true);
hdmi->phy_enabled = false;
}
@@ -1186,10 +1225,8 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
if (!hdmi->vic) {
dev_dbg(hdmi->dev, "Non-CEA mode used in HDMI\n");
- hdmi->hdmi_data.video_mode.mdvi = true;
} else {
dev_dbg(hdmi->dev, "CEA mode used vic=%d\n", hdmi->vic);
- hdmi->hdmi_data.video_mode.mdvi = false;
}
if ((hdmi->vic == 6) || (hdmi->vic == 7) ||
@@ -1200,18 +1237,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
else
hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
- if ((hdmi->vic == 10) || (hdmi->vic == 11) ||
- (hdmi->vic == 12) || (hdmi->vic == 13) ||
- (hdmi->vic == 14) || (hdmi->vic == 15) ||
- (hdmi->vic == 25) || (hdmi->vic == 26) ||
- (hdmi->vic == 27) || (hdmi->vic == 28) ||
- (hdmi->vic == 29) || (hdmi->vic == 30) ||
- (hdmi->vic == 35) || (hdmi->vic == 36) ||
- (hdmi->vic == 37) || (hdmi->vic == 38))
- hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 1;
- else
- hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
-
+ hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0;
/* TODO: Get input format from IPU (via FB driver interface) */
@@ -1235,18 +1261,22 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
/* HDMI Initialization Step B.3 */
dw_hdmi_enable_video_path(hdmi);
- /* not for DVI mode */
- if (hdmi->hdmi_data.video_mode.mdvi) {
- dev_dbg(hdmi->dev, "%s DVI mode\n", __func__);
- } else {
- dev_dbg(hdmi->dev, "%s CEA mode\n", __func__);
+ if (hdmi->sink_has_audio) {
+ dev_dbg(hdmi->dev, "sink has audio support\n");
/* HDMI Initialization Step E - Configure audio */
hdmi_clk_regenerator_update_pixel_clock(hdmi);
hdmi_enable_audio_clk(hdmi);
+ }
+
+ /* not for DVI mode */
+ if (hdmi->sink_is_hdmi) {
+ dev_dbg(hdmi->dev, "%s HDMI mode\n", __func__);
/* HDMI Initialization Step F - Configure AVI InfoFrame */
- hdmi_config_AVI(hdmi);
+ hdmi_config_AVI(hdmi, mode);
+ } else {
+ dev_dbg(hdmi->dev, "%s DVI mode\n", __func__);
}
hdmi_video_packetize(hdmi);
@@ -1255,7 +1285,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
hdmi_tx_hdcp_config(hdmi);
dw_hdmi_clear_overflow(hdmi);
- if (hdmi->cable_plugin && !hdmi->hdmi_data.video_mode.mdvi)
+ if (hdmi->cable_plugin && hdmi->sink_is_hdmi)
hdmi_enable_overflow_interrupts(hdmi);
return 0;
@@ -1348,10 +1378,12 @@ static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
{
struct dw_hdmi *hdmi = bridge->driver_private;
- dw_hdmi_setup(hdmi, mode);
+ mutex_lock(&hdmi->mutex);
/* Store the display mode for plugin/DKMS poweron events */
memcpy(&hdmi->previous_mode, mode, sizeof(hdmi->previous_mode));
+
+ mutex_unlock(&hdmi->mutex);
}
static bool dw_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
@@ -1365,14 +1397,20 @@ static void dw_hdmi_bridge_disable(struct drm_bridge *bridge)
{
struct dw_hdmi *hdmi = bridge->driver_private;
+ mutex_lock(&hdmi->mutex);
+ hdmi->disabled = true;
dw_hdmi_poweroff(hdmi);
+ mutex_unlock(&hdmi->mutex);
}
static void dw_hdmi_bridge_enable(struct drm_bridge *bridge)
{
struct dw_hdmi *hdmi = bridge->driver_private;
+ mutex_lock(&hdmi->mutex);
dw_hdmi_poweron(hdmi);
+ hdmi->disabled = false;
+ mutex_unlock(&hdmi->mutex);
}
static void dw_hdmi_bridge_nop(struct drm_bridge *bridge)
@@ -1405,6 +1443,8 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
dev_dbg(hdmi->dev, "got edid: width[%d] x height[%d]\n",
edid->width_cm, edid->height_cm);
+ hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
+ hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -1423,6 +1463,10 @@ dw_hdmi_connector_mode_valid(struct drm_connector *connector,
struct dw_hdmi, connector);
enum drm_mode_status mode_status = MODE_OK;
+ /* We don't support double-clocked modes */
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_BAD;
+
if (hdmi->plat_data->mode_valid)
mode_status = hdmi->plat_data->mode_valid(connector, mode);
@@ -1489,21 +1533,21 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0);
if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
+ hdmi_modb(hdmi, ~phy_int_pol, HDMI_PHY_HPD, HDMI_PHY_POL0);
+ mutex_lock(&hdmi->mutex);
if (phy_int_pol & HDMI_PHY_HPD) {
dev_dbg(hdmi->dev, "EVENT=plugin\n");
- hdmi_modb(hdmi, 0, HDMI_PHY_HPD, HDMI_PHY_POL0);
-
- dw_hdmi_poweron(hdmi);
+ if (!hdmi->disabled)
+ dw_hdmi_poweron(hdmi);
} else {
dev_dbg(hdmi->dev, "EVENT=plugout\n");
- hdmi_modb(hdmi, HDMI_PHY_HPD, HDMI_PHY_HPD,
- HDMI_PHY_POL0);
-
- dw_hdmi_poweroff(hdmi);
+ if (!hdmi->disabled)
+ dw_hdmi_poweroff(hdmi);
}
- drm_helper_hpd_irq_event(hdmi->connector.dev);
+ mutex_unlock(&hdmi->mutex);
+ drm_helper_hpd_irq_event(hdmi->bridge->dev);
}
hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0);
@@ -1570,8 +1614,11 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
hdmi->sample_rate = 48000;
hdmi->ratio = 100;
hdmi->encoder = encoder;
+ hdmi->disabled = true;
+ mutex_init(&hdmi->mutex);
mutex_init(&hdmi->audio_mutex);
+ spin_lock_init(&hdmi->audio_lock);
of_property_read_u32(np, "reg-io-width", &val);
diff --git a/drivers/gpu/drm/bridge/dw_hdmi.h b/drivers/gpu/drm/bridge/dw_hdmi.h
index 175dbc89a824..ee7f7ed2ab12 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi.h
+++ b/drivers/gpu/drm/bridge/dw_hdmi.h
@@ -7,8 +7,8 @@
* (at your option) any later version.
*/
-#ifndef __IMX_HDMI_H__
-#define __IMX_HDMI_H__
+#ifndef __DW_HDMI_H__
+#define __DW_HDMI_H__
/* Identification Registers */
#define HDMI_DESIGN_ID 0x0000
@@ -525,7 +525,7 @@
/* I2C Master Registers (E-DDC) */
#define HDMI_I2CM_SLAVE 0x7E00
-#define HDMI_I2CMESS 0x7E01
+#define HDMI_I2CM_ADDRESS 0x7E01
#define HDMI_I2CM_DATAO 0x7E02
#define HDMI_I2CM_DATAI 0x7E03
#define HDMI_I2CM_OPERATION 0x7E04
@@ -1031,4 +1031,4 @@ enum {
HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_LOW = 0x0,
};
-#endif /* __IMX_HDMI_H__ */
+#endif /* __DW_HDMI_H__ */
diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 1b1bf2384815..1b1bf2384815 100644
--- a/drivers/gpu/drm/bridge/ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
diff --git a/drivers/gpu/drm/bridge/ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 1a6607beb29f..1a6607beb29f 100644
--- a/drivers/gpu/drm/bridge/ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index b9140032962d..b1619e29a564 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -92,7 +92,7 @@ static int cirrus_pm_suspend(struct device *dev)
if (cdev->mode_info.gfbdev) {
console_lock();
- fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 1);
+ drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 1);
console_unlock();
}
@@ -109,7 +109,7 @@ static int cirrus_pm_resume(struct device *dev)
if (cdev->mode_info.gfbdev) {
console_lock();
- fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 0);
+ drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 0);
console_unlock();
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 13ddf1c4bb8e..589103bcc06c 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -98,7 +98,7 @@ static void cirrus_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
struct cirrus_fbdev *afbdev = info->par;
- sys_fillrect(info, rect);
+ drm_fb_helper_sys_fillrect(info, rect);
cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
rect->height);
}
@@ -107,7 +107,7 @@ static void cirrus_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
struct cirrus_fbdev *afbdev = info->par;
- sys_copyarea(info, area);
+ drm_fb_helper_sys_copyarea(info, area);
cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
area->height);
}
@@ -116,7 +116,7 @@ static void cirrus_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct cirrus_fbdev *afbdev = info->par;
- sys_imageblit(info, image);
+ drm_fb_helper_sys_imageblit(info, image);
cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
image->height);
}
@@ -165,12 +165,10 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
{
struct cirrus_fbdev *gfbdev =
container_of(helper, struct cirrus_fbdev, helper);
- struct drm_device *dev = gfbdev->helper.dev;
struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd;
- struct device *device = &dev->pdev->dev;
void *sysram;
struct drm_gem_object *gobj = NULL;
struct cirrus_bo *bo = NULL;
@@ -195,9 +193,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
if (!sysram)
return -ENOMEM;
- info = framebuffer_alloc(0, device);
- if (info == NULL)
- return -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
info->par = gfbdev;
@@ -216,11 +214,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
/* setup helper */
gfbdev->helper.fb = fb;
- gfbdev->helper.fbdev = info;
strcpy(info->fix.id, "cirrusdrmfb");
-
info->flags = FBINFO_DEFAULT;
info->fbops = &cirrusfb_ops;
@@ -229,11 +225,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto out_iounmap;
- }
info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
info->apertures->ranges[0].size = cdev->mc.vram_size;
@@ -246,13 +237,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
- ret = -ENOMEM;
- goto out_iounmap;
- }
-
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
@@ -260,24 +244,15 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
return 0;
-out_iounmap:
- return ret;
}
static int cirrus_fbdev_destroy(struct drm_device *dev,
struct cirrus_fbdev *gfbdev)
{
- struct fb_info *info;
struct cirrus_framebuffer *gfb = &gfbdev->gfb;
- if (gfbdev->helper.fbdev) {
- info = gfbdev->helper.fbdev;
-
- unregister_framebuffer(info);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&gfbdev->helper);
+ drm_fb_helper_release_fbi(&gfbdev->helper);
if (gfb->obj) {
drm_gem_object_unreference_unlocked(gfb->obj);
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index e4b976658087..055fd86ba717 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -293,25 +293,18 @@ cirrus_dumb_mmap_offset(struct drm_file *file,
uint64_t *offset)
{
struct drm_gem_object *obj;
- int ret;
struct cirrus_bo *bo;
- mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file, handle);
- if (obj == NULL) {
- ret = -ENOENT;
- goto out_unlock;
- }
+ if (obj == NULL)
+ return -ENOENT;
bo = gem_to_cirrus_bo(obj);
*offset = cirrus_bo_mmap_offset(bo);
- drm_gem_object_unreference(obj);
- ret = 0;
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ drm_gem_object_unreference_unlocked(obj);
+ return 0;
}
bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index f6f2fb58eb37..f7d5166f89b2 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -153,9 +153,15 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
if (!connector)
continue;
- WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
-
- connector->funcs->atomic_destroy_state(connector,
+ /*
+ * FIXME: Async commits can race with connector unplugging and
+ * there's currently nothing that prevents cleanup up state for
+ * deleted connectors. As long as the callback doesn't look at
+ * the connector we'll be fine though, so make sure that's the
+ * case by setting all connector pointers to NULL.
+ */
+ state->connector_states[i]->connector = NULL;
+ connector->funcs->atomic_destroy_state(NULL,
state->connector_states[i]);
state->connectors[i] = NULL;
state->connector_states[i] = NULL;
@@ -1063,7 +1069,7 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
* Changed connectors are already in @state, so only need to look at the
* current configuration.
*/
- list_for_each_entry(connector, &config->connector_list, head) {
+ drm_for_each_connector(connector, state->dev) {
if (connector->state->crtc != crtc)
continue;
@@ -1463,24 +1469,18 @@ retry:
if (get_user(obj_id, objs_ptr + copied_objs)) {
ret = -EFAULT;
- goto fail;
+ goto out;
}
obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
if (!obj || !obj->properties) {
ret = -ENOENT;
- goto fail;
- }
-
- if (obj->type == DRM_MODE_OBJECT_PLANE) {
- plane = obj_to_plane(obj);
- plane_mask |= (1 << drm_plane_index(plane));
- plane->old_fb = plane->fb;
+ goto out;
}
if (get_user(count_props, count_props_ptr + copied_objs)) {
ret = -EFAULT;
- goto fail;
+ goto out;
}
copied_objs++;
@@ -1492,28 +1492,35 @@ retry:
if (get_user(prop_id, props_ptr + copied_props)) {
ret = -EFAULT;
- goto fail;
+ goto out;
}
prop = drm_property_find(dev, prop_id);
if (!prop) {
ret = -ENOENT;
- goto fail;
+ goto out;
}
if (copy_from_user(&prop_value,
prop_values_ptr + copied_props,
sizeof(prop_value))) {
ret = -EFAULT;
- goto fail;
+ goto out;
}
ret = atomic_set_prop(state, obj, prop, prop_value);
if (ret)
- goto fail;
+ goto out;
copied_props++;
}
+
+ if (obj->type == DRM_MODE_OBJECT_PLANE && count_props &&
+ !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) {
+ plane = obj_to_plane(obj);
+ plane_mask |= (1 << drm_plane_index(plane));
+ plane->old_fb = plane->fb;
+ }
}
if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
@@ -1523,7 +1530,7 @@ retry:
e = create_vblank_event(dev, file_priv, arg->user_data);
if (!e) {
ret = -ENOMEM;
- goto fail;
+ goto out;
}
crtc_state->event = e;
@@ -1531,15 +1538,18 @@ retry:
}
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
+ /*
+ * Unlike commit, check_only does not clean up state.
+ * Below we call drm_atomic_state_free for it.
+ */
ret = drm_atomic_check_only(state);
- /* _check_only() does not free state, unlike _commit() */
- drm_atomic_state_free(state);
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
ret = drm_atomic_async_commit(state);
} else {
ret = drm_atomic_commit(state);
}
+out:
/* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
* locks (ie. while it is still safe to deref plane->state). We
* need to do this here because the driver entry points cannot
@@ -1552,41 +1562,40 @@ retry:
drm_framebuffer_reference(new_fb);
plane->fb = new_fb;
plane->crtc = plane->state->crtc;
- } else {
- plane->old_fb = NULL;
- }
- if (plane->old_fb) {
- drm_framebuffer_unreference(plane->old_fb);
- plane->old_fb = NULL;
+
+ if (plane->old_fb)
+ drm_framebuffer_unreference(plane->old_fb);
}
+ plane->old_fb = NULL;
}
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- return ret;
-
-fail:
- if (ret == -EDEADLK)
- goto backoff;
+ if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ /*
+ * TEST_ONLY and PAGE_FLIP_EVENT are mutually exclusive,
+ * if they weren't, this code should be called on success
+ * for TEST_ONLY too.
+ */
- if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
for_each_crtc_in_state(state, crtc, crtc_state, i) {
- destroy_vblank_event(dev, file_priv, crtc_state->event);
- crtc_state->event = NULL;
+ if (!crtc_state->event)
+ continue;
+
+ destroy_vblank_event(dev, file_priv,
+ crtc_state->event);
}
}
- drm_atomic_state_free(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ if (ret || arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
+ drm_atomic_state_free(state);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
-
-backoff:
- drm_atomic_state_clear(state);
- drm_modeset_backoff(&ctx);
-
- goto retry;
}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 5b59d5ad7d1c..aecb5d69bc2d 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -89,7 +89,7 @@ get_current_crtc_for_encoder(struct drm_device *dev,
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
- list_for_each_entry(connector, &config->connector_list, head) {
+ drm_for_each_connector(connector, dev) {
if (connector->state->best_encoder != encoder)
continue;
@@ -124,7 +124,7 @@ steal_encoder(struct drm_atomic_state *state,
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- crtc_state->mode_changed = true;
+ crtc_state->connectors_changed = true;
list_for_each_entry(connector, &config->connector_list, head) {
if (connector->state->best_encoder != encoder)
@@ -174,14 +174,14 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
idx = drm_crtc_index(connector->state->crtc);
crtc_state = state->crtc_states[idx];
- crtc_state->mode_changed = true;
+ crtc_state->connectors_changed = true;
}
if (connector_state->crtc) {
idx = drm_crtc_index(connector_state->crtc);
crtc_state = state->crtc_states[idx];
- crtc_state->mode_changed = true;
+ crtc_state->connectors_changed = true;
}
}
@@ -196,7 +196,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
}
funcs = connector->helper_private;
- new_encoder = funcs->best_encoder(connector);
+
+ if (funcs->atomic_best_encoder)
+ new_encoder = funcs->atomic_best_encoder(connector,
+ connector_state);
+ else
+ new_encoder = funcs->best_encoder(connector);
if (!new_encoder) {
DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -229,11 +234,14 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
}
}
+ if (WARN_ON(!connector_state->crtc))
+ return -EINVAL;
+
connector_state->best_encoder = new_encoder;
idx = drm_crtc_index(connector_state->crtc);
crtc_state = state->crtc_states[idx];
- crtc_state->mode_changed = true;
+ crtc_state->connectors_changed = true;
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
connector->base.id,
@@ -256,7 +264,8 @@ mode_fixup(struct drm_atomic_state *state)
bool ret;
for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (!crtc_state->mode_changed)
+ if (!crtc_state->mode_changed &&
+ !crtc_state->connectors_changed)
continue;
drm_mode_copy(&crtc_state->adjusted_mode, &crtc_state->mode);
@@ -298,7 +307,7 @@ mode_fixup(struct drm_atomic_state *state)
encoder->base.id, encoder->name);
return ret;
}
- } else {
+ } else if (funcs->mode_fixup) {
ret = funcs->mode_fixup(encoder, &crtc_state->mode,
&crtc_state->adjusted_mode);
if (!ret) {
@@ -312,7 +321,8 @@ mode_fixup(struct drm_atomic_state *state)
for_each_crtc_in_state(state, crtc, crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
- if (!crtc_state->mode_changed)
+ if (!crtc_state->mode_changed &&
+ !crtc_state->connectors_changed)
continue;
funcs = crtc->helper_private;
@@ -338,9 +348,14 @@ mode_fixup(struct drm_atomic_state *state)
*
* Check the state object to see if the requested state is physically possible.
* This does all the crtc and connector related computations for an atomic
- * update. It computes and updates crtc_state->mode_changed, adds any additional
- * connectors needed for full modesets and calls down into ->mode_fixup
- * functions of the driver backend.
+ * update and adds any additional connectors needed for full modesets and calls
+ * down into ->mode_fixup functions of the driver backend.
+ *
+ * crtc_state->mode_changed is set when the input mode is changed.
+ * crtc_state->connectors_changed is set when a connector is added or
+ * removed from the crtc.
+ * crtc_state->active_changed is set when crtc_state->active changes,
+ * which is used for dpms.
*
* IMPORTANT:
*
@@ -373,7 +388,17 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
if (crtc->state->enable != crtc_state->enable) {
DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n",
crtc->base.id);
+
+ /*
+ * For clarity this assignment is done here, but
+ * enable == 0 is only true when there are no
+ * connectors and a NULL mode.
+ *
+ * The other way around is true as well. enable != 0
+ * iff connectors are attached and a mode is set.
+ */
crtc_state->mode_changed = true;
+ crtc_state->connectors_changed = true;
}
}
@@ -448,6 +473,9 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
* This does all the plane update related checks using by calling into the
* ->atomic_check hooks provided by the driver.
*
+ * It also sets crtc_state->planes_changed to indicate that a crtc has
+ * updated planes.
+ *
* RETURNS
* Zero for success or -errno
*/
@@ -640,15 +668,29 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
struct drm_crtc_state *old_crtc_state;
int i;
- /* clear out existing links */
+ /* clear out existing links and update dpms */
for_each_connector_in_state(old_state, connector, old_conn_state, i) {
- if (!connector->encoder)
- continue;
+ if (connector->encoder) {
+ WARN_ON(!connector->encoder->crtc);
+
+ connector->encoder->crtc = NULL;
+ connector->encoder = NULL;
+ }
+
+ crtc = connector->state->crtc;
+ if ((!crtc && old_conn_state->crtc) ||
+ (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
+ struct drm_property *dpms_prop =
+ dev->mode_config.dpms_property;
+ int mode = DRM_MODE_DPMS_OFF;
- WARN_ON(!connector->encoder->crtc);
+ if (crtc && crtc->state->active)
+ mode = DRM_MODE_DPMS_ON;
- connector->encoder->crtc = NULL;
- connector->encoder = NULL;
+ connector->dpms = mode;
+ drm_object_property_set_value(&connector->base,
+ dpms_prop, mode);
+ }
}
/* set new links */
@@ -665,10 +707,16 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
/* set legacy state in the crtc structure */
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ struct drm_plane *primary = crtc->primary;
+
crtc->mode = crtc->state->mode;
crtc->enabled = crtc->state->enable;
- crtc->x = crtc->primary->state->src_x >> 16;
- crtc->y = crtc->primary->state->src_y >> 16;
+
+ if (drm_atomic_get_existing_plane_state(old_state, primary) &&
+ primary->state->crtc == crtc) {
+ crtc->x = primary->state->src_x >> 16;
+ crtc->y = primary->state->src_y >> 16;
+ }
if (crtc->state->enable)
drm_calc_timestamping_constants(crtc,
@@ -742,7 +790,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
* This function shuts down all the outputs that need to be shut down and
* prepares them (if required) with the new mode.
*
- * For compatability with legacy crtc helpers this should be called before
+ * For compatibility with legacy crtc helpers this should be called before
* drm_atomic_helper_commit_planes(), which is what the default commit function
* does. But drivers with different needs can group the modeset commits together
* and do the plane commits at the end. This is useful for drivers doing runtime
@@ -767,7 +815,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
* This function enables all the outputs with the new configuration which had to
* be turned off for the update.
*
- * For compatability with legacy crtc helpers this should be called after
+ * For compatibility with legacy crtc helpers this should be called after
* drm_atomic_helper_commit_planes(), which is what the default commit function
* does. But drivers with different needs can group the modeset commits together
* and do the plane commits at the end. This is useful for drivers doing runtime
@@ -918,7 +966,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
continue;
old_crtc_state->enable = true;
- old_crtc_state->last_vblank_count = drm_vblank_count(dev, i);
+ old_crtc_state->last_vblank_count = drm_crtc_vblank_count(crtc);
}
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
@@ -927,7 +975,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
ret = wait_event_timeout(dev->vblank[i].queue,
old_crtc_state->last_vblank_count !=
- drm_vblank_count(dev, i),
+ drm_crtc_vblank_count(crtc),
msecs_to_jiffies(50));
drm_crtc_vblank_put(crtc);
@@ -1138,7 +1186,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
if (!funcs || !funcs->atomic_begin)
continue;
- funcs->atomic_begin(crtc);
+ funcs->atomic_begin(crtc, old_crtc_state);
}
for_each_plane_in_state(old_state, plane, old_plane_state, i) {
@@ -1168,7 +1216,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
if (!funcs || !funcs->atomic_flush)
continue;
- funcs->atomic_flush(crtc);
+ funcs->atomic_flush(crtc, old_crtc_state);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
@@ -1204,7 +1252,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
crtc_funcs = crtc->helper_private;
if (crtc_funcs && crtc_funcs->atomic_begin)
- crtc_funcs->atomic_begin(crtc);
+ crtc_funcs->atomic_begin(crtc, old_crtc_state);
drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
struct drm_plane_state *old_plane_state =
@@ -1227,7 +1275,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
}
if (crtc_funcs && crtc_funcs->atomic_flush)
- crtc_funcs->atomic_flush(crtc);
+ crtc_funcs->atomic_flush(crtc, old_crtc_state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
@@ -1915,10 +1963,6 @@ retry:
if (ret != 0)
goto fail;
- /* TODO: ->page_flip is the only driver callback where the core
- * doesn't update plane->fb. For now patch it up here. */
- plane->fb = plane->state->fb;
-
/* Driver takes ownership of state on successful async commit. */
return 0;
fail:
@@ -1952,9 +1996,12 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip);
* implementing the legacy DPMS connector interface. It computes the new desired
* ->active state for the corresponding CRTC (if the connector is enabled) and
* updates it.
+ *
+ * Returns:
+ * Returns 0 on success, negative errno numbers on failure.
*/
-void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
- int mode)
+int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
+ int mode)
{
struct drm_mode_config *config = &connector->dev->mode_config;
struct drm_atomic_state *state;
@@ -1963,6 +2010,7 @@ void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
struct drm_connector *tmp_connector;
int ret;
bool active = false;
+ int old_mode = connector->dpms;
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
@@ -1971,22 +2019,23 @@ void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
crtc = connector->state->crtc;
if (!crtc)
- return;
+ return 0;
- /* FIXME: ->dpms has no return value so can't forward the -ENOMEM. */
state = drm_atomic_state_alloc(connector->dev);
if (!state)
- return;
+ return -ENOMEM;
state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
retry:
crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state))
- return;
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
- list_for_each_entry(tmp_connector, &config->connector_list, head) {
+ drm_for_each_connector(tmp_connector, connector->dev) {
if (tmp_connector->state->crtc != crtc)
continue;
@@ -2001,17 +2050,16 @@ retry:
if (ret != 0)
goto fail;
- /* Driver takes ownership of state on successful async commit. */
- return;
+ /* Driver takes ownership of state on successful commit. */
+ return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
+ connector->dpms = old_mode;
drm_atomic_state_free(state);
- WARN(1, "Driver bug: Changing ->active failed with ret=%i\n", ret);
-
- return;
+ return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2072,6 +2120,7 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
state->mode_changed = false;
state->active_changed = false;
state->planes_changed = false;
+ state->connectors_changed = false;
state->event = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 9b23525c0ed0..192a5f9eeb74 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -53,6 +53,10 @@ struct drm_ctx_list {
*/
void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
mutex_lock(&dev->struct_mutex);
idr_remove(&dev->ctx_idr, ctx_handle);
mutex_unlock(&dev->struct_mutex);
@@ -85,10 +89,13 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
*
* Initialise the drm_device::ctx_idr
*/
-int drm_legacy_ctxbitmap_init(struct drm_device * dev)
+void drm_legacy_ctxbitmap_init(struct drm_device * dev)
{
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
idr_init(&dev->ctx_idr);
- return 0;
}
/**
@@ -101,6 +108,10 @@ int drm_legacy_ctxbitmap_init(struct drm_device * dev)
*/
void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
{
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
mutex_lock(&dev->struct_mutex);
idr_destroy(&dev->ctx_idr);
mutex_unlock(&dev->struct_mutex);
@@ -119,6 +130,10 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
{
struct drm_ctx_list *pos, *tmp;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
mutex_lock(&dev->ctxlist_mutex);
list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
@@ -161,6 +176,10 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data,
struct drm_local_map *map;
struct drm_map_list *_entry;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->struct_mutex);
map = idr_find(&dev->ctx_idr, request->ctx_id);
@@ -205,6 +224,10 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data,
struct drm_local_map *map = NULL;
struct drm_map_list *r_list = NULL;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->struct_mutex);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map
@@ -305,6 +328,10 @@ int drm_legacy_resctx(struct drm_device *dev, void *data,
struct drm_ctx ctx;
int i;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (res->count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
@@ -335,6 +362,10 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
struct drm_ctx_list *ctx_entry;
struct drm_ctx *ctx = data;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
ctx->handle = drm_legacy_ctxbitmap_next(dev);
if (ctx->handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
@@ -378,6 +409,10 @@ int drm_legacy_getctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
/* This is 0, because we don't handle any context flags */
ctx->flags = 0;
@@ -400,6 +435,10 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
DRM_DEBUG("%d\n", ctx->handle);
return drm_context_switch(dev, dev->last_context, ctx->handle);
}
@@ -420,6 +459,10 @@ int drm_legacy_newctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
DRM_DEBUG("%d\n", ctx->handle);
drm_context_switch_complete(dev, file_priv, ctx->handle);
@@ -442,6 +485,10 @@ int drm_legacy_rmctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index b9ba06176eb1..33d877c65ced 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -615,7 +615,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
if (atomic_read(&fb->refcount.refcount) > 1) {
drm_modeset_lock_all(dev);
/* remove from any CRTC */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ drm_for_each_crtc(crtc, dev) {
if (crtc->primary->fb == fb) {
/* should turn off the crtc */
memset(&set, 0, sizeof(struct drm_mode_set));
@@ -627,7 +627,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
}
}
- list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ drm_for_each_plane(plane, dev) {
if (plane->fb == fb)
drm_plane_force_disable(plane);
}
@@ -736,7 +736,7 @@ unsigned int drm_crtc_index(struct drm_crtc *crtc)
unsigned int index = 0;
struct drm_crtc *tmp;
- list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+ drm_for_each_crtc(tmp, crtc->dev) {
if (tmp == crtc)
return index;
@@ -988,7 +988,7 @@ unsigned int drm_connector_index(struct drm_connector *connector)
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
- list_for_each_entry(tmp, &connector->dev->mode_config.connector_list, head) {
+ drm_for_each_connector(tmp, connector->dev) {
if (tmp == connector)
return index;
@@ -1054,7 +1054,7 @@ void drm_connector_unplug_all(struct drm_device *dev)
{
struct drm_connector *connector;
- /* taking the mode config mutex ends up in a clash with sysfs */
+ /* FIXME: taking the mode config mutex ends up in a clash with sysfs */
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
drm_connector_unregister(connector);
@@ -1151,7 +1151,7 @@ EXPORT_SYMBOL(drm_encoder_cleanup);
int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
- const uint32_t *formats, uint32_t format_count,
+ const uint32_t *formats, unsigned int format_count,
enum drm_plane_type type)
{
struct drm_mode_config *config = &dev->mode_config;
@@ -1225,7 +1225,7 @@ EXPORT_SYMBOL(drm_universal_plane_init);
int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
- const uint32_t *formats, uint32_t format_count,
+ const uint32_t *formats, unsigned int format_count,
bool is_primary)
{
enum drm_plane_type type;
@@ -1280,7 +1280,7 @@ unsigned int drm_plane_index(struct drm_plane *plane)
unsigned int index = 0;
struct drm_plane *tmp;
- list_for_each_entry(tmp, &plane->dev->mode_config.plane_list, head) {
+ drm_for_each_plane(tmp, plane->dev) {
if (tmp == plane)
return index;
@@ -1305,7 +1305,7 @@ drm_plane_from_index(struct drm_device *dev, int idx)
struct drm_plane *plane;
unsigned int i = 0;
- list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ drm_for_each_plane(plane, dev) {
if (i == idx)
return plane;
i++;
@@ -1679,70 +1679,6 @@ int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
-static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
-{
- uint32_t total_objects = 0;
-
- total_objects += dev->mode_config.num_crtc;
- total_objects += dev->mode_config.num_connector;
- total_objects += dev->mode_config.num_encoder;
-
- group->id_list = kcalloc(total_objects, sizeof(uint32_t), GFP_KERNEL);
- if (!group->id_list)
- return -ENOMEM;
-
- group->num_crtcs = 0;
- group->num_connectors = 0;
- group->num_encoders = 0;
- return 0;
-}
-
-void drm_mode_group_destroy(struct drm_mode_group *group)
-{
- kfree(group->id_list);
- group->id_list = NULL;
-}
-
-/*
- * NOTE: Driver's shouldn't ever call drm_mode_group_init_legacy_group - it is
- * the drm core's responsibility to set up mode control groups.
- */
-int drm_mode_group_init_legacy_group(struct drm_device *dev,
- struct drm_mode_group *group)
-{
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- int ret;
-
- ret = drm_mode_group_init(dev, group);
- if (ret)
- return ret;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- group->id_list[group->num_crtcs++] = crtc->base.id;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
- group->id_list[group->num_crtcs + group->num_encoders++] =
- encoder->base.id;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- group->id_list[group->num_crtcs + group->num_encoders +
- group->num_connectors++] = connector->base.id;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
-
-void drm_reinit_primary_mode_group(struct drm_device *dev)
-{
- drm_modeset_lock_all(dev);
- drm_mode_group_destroy(&dev->primary->mode_group);
- drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
- drm_modeset_unlock_all(dev);
-}
-EXPORT_SYMBOL(drm_reinit_primary_mode_group);
-
/**
* drm_mode_getresources - get graphics configuration
* @dev: drm device for the ioctl
@@ -1771,12 +1707,11 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
int crtc_count = 0;
int fb_count = 0;
int encoder_count = 0;
- int copied = 0, i;
+ int copied = 0;
uint32_t __user *fb_id;
uint32_t __user *crtc_id;
uint32_t __user *connector_id;
uint32_t __user *encoder_id;
- struct drm_mode_group *mode_group;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -1809,24 +1744,14 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
/* mode_config.mutex protects the connector list against e.g. DP MST
* connector hot-adding. CRTC/Plane lists are invariant. */
mutex_lock(&dev->mode_config.mutex);
- if (!drm_is_primary_client(file_priv)) {
-
- mode_group = NULL;
- list_for_each(lh, &dev->mode_config.crtc_list)
- crtc_count++;
+ drm_for_each_crtc(crtc, dev)
+ crtc_count++;
- list_for_each(lh, &dev->mode_config.connector_list)
- connector_count++;
+ drm_for_each_connector(connector, dev)
+ connector_count++;
- list_for_each(lh, &dev->mode_config.encoder_list)
- encoder_count++;
- } else {
-
- mode_group = &file_priv->master->minor->mode_group;
- crtc_count = mode_group->num_crtcs;
- connector_count = mode_group->num_connectors;
- encoder_count = mode_group->num_encoders;
- }
+ drm_for_each_encoder(encoder, dev)
+ encoder_count++;
card_res->max_height = dev->mode_config.max_height;
card_res->min_height = dev->mode_config.min_height;
@@ -1837,25 +1762,13 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (card_res->count_crtcs >= crtc_count) {
copied = 0;
crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
- if (!mode_group) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list,
- head) {
- DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
- if (put_user(crtc->base.id, crtc_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- } else {
- for (i = 0; i < mode_group->num_crtcs; i++) {
- if (put_user(mode_group->id_list[i],
- crtc_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
+ drm_for_each_crtc(crtc, dev) {
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+ if (put_user(crtc->base.id, crtc_id + copied)) {
+ ret = -EFAULT;
+ goto out;
}
+ copied++;
}
}
card_res->count_crtcs = crtc_count;
@@ -1864,29 +1777,15 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (card_res->count_encoders >= encoder_count) {
copied = 0;
encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
- if (!mode_group) {
- list_for_each_entry(encoder,
- &dev->mode_config.encoder_list,
- head) {
- DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
- encoder->name);
- if (put_user(encoder->base.id, encoder_id +
- copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- } else {
- for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
- if (put_user(mode_group->id_list[i],
- encoder_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
+ drm_for_each_encoder(encoder, dev) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
+ encoder->name);
+ if (put_user(encoder->base.id, encoder_id +
+ copied)) {
+ ret = -EFAULT;
+ goto out;
}
-
+ copied++;
}
}
card_res->count_encoders = encoder_count;
@@ -1895,31 +1794,16 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (card_res->count_connectors >= connector_count) {
copied = 0;
connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
- if (!mode_group) {
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- head) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id,
- connector->name);
- if (put_user(connector->base.id,
- connector_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- } else {
- int start = mode_group->num_crtcs +
- mode_group->num_encoders;
- for (i = start; i < start + mode_group->num_connectors; i++) {
- if (put_user(mode_group->id_list[i],
- connector_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
+ drm_for_each_connector(connector, dev) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
+ if (put_user(connector->base.id,
+ connector_id + copied)) {
+ ret = -EFAULT;
+ goto out;
}
+ copied++;
}
}
card_res->count_connectors = connector_count;
@@ -2187,7 +2071,7 @@ static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
/* For atomic drivers only state objects are synchronously updated and
* protected by modeset locks, so check those first. */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector(connector, dev) {
if (!connector->state)
continue;
@@ -2291,7 +2175,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
/* Plane lists are invariant, no locking needed. */
- list_for_each_entry(plane, &config->plane_list, head) {
+ drm_for_each_plane(plane, dev) {
/*
* Unless userspace set the 'universal planes'
* capability bit, only advertise overlays.
@@ -2596,7 +2480,7 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
* connectors from it), hence we need to refcount the fbs across all
* crtcs. Atomic modeset will have saner semantics ...
*/
- list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
+ drm_for_each_crtc(tmp, crtc->dev)
tmp->primary->old_fb = tmp->primary->fb;
fb = set->fb;
@@ -2607,7 +2491,7 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
crtc->primary->fb = fb;
}
- list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+ drm_for_each_crtc(tmp, crtc->dev) {
if (tmp->primary->fb)
drm_framebuffer_reference(tmp->primary->fb);
if (tmp->primary->old_fb)
@@ -2706,8 +2590,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- /* For some reason crtc x/y offsets are signed internally. */
- if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
+ /*
+ * Universal plane src offsets are only 16.16, prevent havoc for
+ * drivers using universal plane code internally.
+ */
+ if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
return -ERANGE;
drm_modeset_lock_all(dev);
@@ -4298,7 +4185,6 @@ void drm_property_unreference_blob(struct drm_property_blob *blob)
mutex_unlock(&dev->mode_config.blob_lock);
else
might_lock(&dev->mode_config.blob_lock);
-
}
EXPORT_SYMBOL(drm_property_unreference_blob);
@@ -4469,9 +4355,7 @@ static int drm_property_replace_global_blob(struct drm_device *dev,
goto err_created;
}
- if (old_blob)
- drm_property_unreference_blob(old_blob);
-
+ drm_property_unreference_blob(old_blob);
*replace = new_blob;
return 0;
@@ -4869,9 +4753,9 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
/* Do DPMS ourselves */
if (property == connector->dev->mode_config.dpms_property) {
- if (connector->funcs->dpms)
- (*connector->funcs->dpms)(connector, (int)value);
ret = 0;
+ if (connector->funcs->dpms)
+ ret = (*connector->funcs->dpms)(connector, (int)value);
} else if (connector->funcs->set_property)
ret = connector->funcs->set_property(connector, property, value);
@@ -5346,13 +5230,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
/* Keep the old fb, don't unref it. */
crtc->primary->old_fb = NULL;
} else {
- /*
- * Warn if the driver hasn't properly updated the crtc->fb
- * field to reflect that the new framebuffer is now used.
- * Failing to do so will screw with the reference counting
- * on framebuffers.
- */
- WARN_ON(crtc->primary->fb != fb);
+ crtc->primary->fb = fb;
/* Unref only the old framebuffer. */
fb = NULL;
}
@@ -5383,24 +5261,23 @@ void drm_mode_config_reset(struct drm_device *dev)
struct drm_encoder *encoder;
struct drm_connector *connector;
- list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+ drm_for_each_plane(plane, dev)
if (plane->funcs->reset)
plane->funcs->reset(plane);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ drm_for_each_crtc(crtc, dev)
if (crtc->funcs->reset)
crtc->funcs->reset(crtc);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ drm_for_each_encoder(encoder, dev)
if (encoder->funcs->reset)
encoder->funcs->reset(encoder);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- connector->status = connector_status_unknown;
-
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_connector(connector, dev)
if (connector->funcs->reset)
connector->funcs->reset(connector);
- }
+ mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_mode_config_reset);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 393114df88a3..ef534758a02c 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -121,7 +121,7 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
}
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ drm_for_each_connector(connector, dev)
if (connector->encoder == encoder)
return true;
return false;
@@ -151,7 +151,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
if (!oops_in_progress)
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ drm_for_each_encoder(encoder, dev)
if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
return true;
return false;
@@ -180,7 +180,7 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
drm_warn_on_modeset_not_all_locked(dev);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ drm_for_each_encoder(encoder, dev) {
if (!drm_helper_encoder_in_use(encoder)) {
drm_encoder_disable(encoder);
/* disconnect encoder from any connector */
@@ -188,7 +188,7 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
}
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ drm_for_each_crtc(crtc, dev) {
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled) {
@@ -230,7 +230,7 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
const struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_encoder *encoder;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ drm_for_each_encoder(encoder, dev) {
encoder_funcs = encoder->helper_private;
/* Disable unused encoders */
if (encoder->crtc == NULL)
@@ -305,7 +305,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ drm_for_each_encoder(encoder, dev) {
if (encoder->crtc != crtc)
continue;
@@ -334,7 +334,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
crtc->hwmode = *adjusted_mode;
/* Prepare the encoders and CRTCs before setting the mode. */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ drm_for_each_encoder(encoder, dev) {
if (encoder->crtc != crtc)
continue;
@@ -359,7 +359,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!ret)
goto done;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ drm_for_each_encoder(encoder, dev) {
if (encoder->crtc != crtc)
continue;
@@ -376,7 +376,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
crtc_funcs->commit(crtc);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ drm_for_each_encoder(encoder, dev) {
if (encoder->crtc != crtc)
continue;
@@ -418,11 +418,11 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
struct drm_encoder *encoder;
/* Decouple all encoders and their attached connectors from this crtc */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ drm_for_each_encoder(encoder, dev) {
if (encoder->crtc != crtc)
continue;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector(connector, dev) {
if (connector->encoder != encoder)
continue;
@@ -519,12 +519,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
* restored, not the drivers personal bookkeeping.
*/
count = 0;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ drm_for_each_encoder(encoder, dev) {
save_encoders[count++] = *encoder;
}
count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector(connector, dev) {
save_connectors[count++] = *connector;
}
@@ -562,7 +562,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
/* a) traverse passed in connector list and get encoders for them */
count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector(connector, dev) {
const struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
new_encoder = connector->encoder;
@@ -602,7 +602,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
}
count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector(connector, dev) {
if (!connector->encoder)
continue;
@@ -685,12 +685,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
fail:
/* Restore all previous data. */
count = 0;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ drm_for_each_encoder(encoder, dev) {
*encoder = save_encoders[count++];
}
count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector(connector, dev) {
*connector = save_connectors[count++];
}
@@ -712,7 +712,7 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ drm_for_each_connector(connector, dev)
if (connector->encoder == encoder)
if (connector->dpms < dpms)
dpms = connector->dpms;
@@ -746,7 +746,7 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
struct drm_connector *connector;
struct drm_device *dev = crtc->dev;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ drm_for_each_connector(connector, dev)
if (connector->encoder && connector->encoder->crtc == crtc)
if (connector->dpms < dpms)
dpms = connector->dpms;
@@ -762,15 +762,18 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
* implementing the DPMS connector attribute. It computes the new desired DPMS
* state for all encoders and crtcs in the output mesh and calls the ->dpms()
* callback provided by the driver appropriately.
+ *
+ * Returns:
+ * Always returns 0.
*/
-void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
+int drm_helper_connector_dpms(struct drm_connector *connector, int mode)
{
struct drm_encoder *encoder = connector->encoder;
struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
if (mode == connector->dpms)
- return;
+ return 0;
old_dpms = connector->dpms;
connector->dpms = mode;
@@ -802,7 +805,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
}
}
- return;
+ return 0;
}
EXPORT_SYMBOL(drm_helper_connector_dpms);
@@ -862,7 +865,7 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
bool ret;
drm_modeset_lock_all(dev);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ drm_for_each_crtc(crtc, dev) {
if (!crtc->enabled)
continue;
@@ -876,7 +879,7 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
/* Turn off outputs that were already powered off */
if (drm_helper_choose_crtc_dpms(crtc)) {
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ drm_for_each_encoder(encoder, dev) {
if(encoder->crtc != crtc)
continue;
@@ -928,15 +931,15 @@ int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mod
if (crtc->funcs->atomic_duplicate_state)
crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
else {
- crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
- if (!crtc_state)
- return -ENOMEM;
- if (crtc->state)
- __drm_atomic_helper_crtc_duplicate_state(crtc, crtc_state);
- else
- crtc_state->crtc = crtc;
+ if (!crtc->state)
+ drm_atomic_helper_crtc_reset(crtc);
+
+ crtc_state = drm_atomic_helper_crtc_duplicate_state(crtc);
}
+ if (!crtc_state)
+ return -ENOMEM;
+
crtc_state->planes_changed = true;
crtc_state->mode_changed = true;
ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
@@ -957,11 +960,11 @@ int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mod
ret = drm_helper_crtc_mode_set_base(crtc, x, y, old_fb);
out:
- if (crtc->funcs->atomic_destroy_state)
- crtc->funcs->atomic_destroy_state(crtc, crtc_state);
- else {
- __drm_atomic_helper_crtc_destroy_state(crtc, crtc_state);
- kfree(crtc_state);
+ if (crtc_state) {
+ if (crtc->funcs->atomic_destroy_state)
+ crtc->funcs->atomic_destroy_state(crtc, crtc_state);
+ else
+ drm_atomic_helper_crtc_destroy_state(crtc, crtc_state);
}
return ret;
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 80a02a412607..291734e87fca 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -159,6 +159,8 @@ int drm_dp_bw_code_to_link_rate(u8 link_bw)
}
EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
+#define AUX_RETRY_INTERVAL 500 /* us */
+
/**
* DOC: dp helpers
*
@@ -213,7 +215,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
return -EIO;
case DP_AUX_NATIVE_REPLY_DEFER:
- usleep_range(400, 500);
+ usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
break;
}
}
@@ -422,6 +424,90 @@ static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter)
I2C_FUNC_10BIT_ADDR;
}
+#define AUX_PRECHARGE_LEN 10 /* 10 to 16 */
+#define AUX_SYNC_LEN (16 + 4) /* preamble + AUX_SYNC_END */
+#define AUX_STOP_LEN 4
+#define AUX_CMD_LEN 4
+#define AUX_ADDRESS_LEN 20
+#define AUX_REPLY_PAD_LEN 4
+#define AUX_LENGTH_LEN 8
+
+/*
+ * Calculate the duration of the AUX request/reply in usec. Gives the
+ * "best" case estimate, ie. successful while as short as possible.
+ */
+static int drm_dp_aux_req_duration(const struct drm_dp_aux_msg *msg)
+{
+ int len = AUX_PRECHARGE_LEN + AUX_SYNC_LEN + AUX_STOP_LEN +
+ AUX_CMD_LEN + AUX_ADDRESS_LEN + AUX_LENGTH_LEN;
+
+ if ((msg->request & DP_AUX_I2C_READ) == 0)
+ len += msg->size * 8;
+
+ return len;
+}
+
+static int drm_dp_aux_reply_duration(const struct drm_dp_aux_msg *msg)
+{
+ int len = AUX_PRECHARGE_LEN + AUX_SYNC_LEN + AUX_STOP_LEN +
+ AUX_CMD_LEN + AUX_REPLY_PAD_LEN;
+
+ /*
+ * For read we expect what was asked. For writes there will
+ * be 0 or 1 data bytes. Assume 0 for the "best" case.
+ */
+ if (msg->request & DP_AUX_I2C_READ)
+ len += msg->size * 8;
+
+ return len;
+}
+
+#define I2C_START_LEN 1
+#define I2C_STOP_LEN 1
+#define I2C_ADDR_LEN 9 /* ADDRESS + R/W + ACK/NACK */
+#define I2C_DATA_LEN 9 /* DATA + ACK/NACK */
+
+/*
+ * Calculate the length of the i2c transfer in usec, assuming
+ * the i2c bus speed is as specified. Gives the the "worst"
+ * case estimate, ie. successful while as long as possible.
+ * Doesn't account the the "MOT" bit, and instead assumes each
+ * message includes a START, ADDRESS and STOP. Neither does it
+ * account for additional random variables such as clock stretching.
+ */
+static int drm_dp_i2c_msg_duration(const struct drm_dp_aux_msg *msg,
+ int i2c_speed_khz)
+{
+ /* AUX bitrate is 1MHz, i2c bitrate as specified */
+ return DIV_ROUND_UP((I2C_START_LEN + I2C_ADDR_LEN +
+ msg->size * I2C_DATA_LEN +
+ I2C_STOP_LEN) * 1000, i2c_speed_khz);
+}
+
+/*
+ * Deterine how many retries should be attempted to successfully transfer
+ * the specified message, based on the estimated durations of the
+ * i2c and AUX transfers.
+ */
+static int drm_dp_i2c_retry_count(const struct drm_dp_aux_msg *msg,
+ int i2c_speed_khz)
+{
+ int aux_time_us = drm_dp_aux_req_duration(msg) +
+ drm_dp_aux_reply_duration(msg);
+ int i2c_time_us = drm_dp_i2c_msg_duration(msg, i2c_speed_khz);
+
+ return DIV_ROUND_UP(i2c_time_us, aux_time_us + AUX_RETRY_INTERVAL);
+}
+
+/*
+ * FIXME currently assumes 10 kHz as some real world devices seem
+ * to require it. We should query/set the speed via DPCD if supported.
+ */
+static int dp_aux_i2c_speed_khz __read_mostly = 10;
+module_param_unsafe(dp_aux_i2c_speed_khz, int, 0644);
+MODULE_PARM_DESC(dp_aux_i2c_speed_khz,
+ "Assumed speed of the i2c bus in kHz, (1-400, default 10)");
+
/*
* Transfer a single I2C-over-AUX message and handle various error conditions,
* retrying the transaction as appropriate. It is assumed that the
@@ -434,13 +520,16 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
unsigned int retry, defer_i2c;
int ret;
-
/*
* DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device
* is required to retry at least seven times upon receiving AUX_DEFER
* before giving up the AUX transaction.
+ *
+ * We also try to account for the i2c bus speed.
*/
- for (retry = 0, defer_i2c = 0; retry < (7 + defer_i2c); retry++) {
+ int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
+
+ for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
mutex_lock(&aux->hw_mutex);
ret = aux->transfer(aux, msg);
mutex_unlock(&aux->hw_mutex);
@@ -476,7 +565,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
* For now just defer for long enough to hopefully be
* safe for all use-cases.
*/
- usleep_range(500, 600);
+ usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
continue;
default:
@@ -506,7 +595,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
aux->i2c_defer_count++;
if (defer_i2c < 7)
defer_i2c++;
- usleep_range(400, 500);
+ usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
continue;
default:
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 778bbb6425b8..e23df5fd3836 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -873,9 +873,10 @@ static void drm_dp_destroy_port(struct kref *kref)
from an EDID retrieval */
if (port->connector) {
mutex_lock(&mgr->destroy_connector_lock);
- list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
+ list_add(&port->next, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock);
schedule_work(&mgr->destroy_connector_work);
+ return;
}
drm_dp_port_teardown_pdt(port, port->pdt);
@@ -1294,7 +1295,6 @@ retry:
goto retry;
}
DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
- WARN(1, "fail\n");
return -EIO;
}
@@ -2632,6 +2632,16 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
seq_printf(m, "%02x ", buf[i]);
seq_printf(m, "\n");
+ /* dump the standard OUI branch header */
+ ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
+ seq_printf(m, "branch oui: ");
+ for (i = 0; i < 0x3; i++)
+ seq_printf(m, "%02x", buf[i]);
+ seq_printf(m, " devid: ");
+ for (i = 0x3; i < 0x8; i++)
+ seq_printf(m, "%c", buf[i]);
+ seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
+ seq_printf(m, "\n");
bret = dump_dp_payload_table(mgr, buf);
if (bret == true) {
seq_printf(m, "payload table: ");
@@ -2660,7 +2670,7 @@ static void drm_dp_tx_work(struct work_struct *work)
static void drm_dp_destroy_connector_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
- struct drm_connector *connector;
+ struct drm_dp_mst_port *port;
/*
* Not a regular list traverse as we have to drop the destroy
@@ -2669,15 +2679,21 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
*/
for (;;) {
mutex_lock(&mgr->destroy_connector_lock);
- connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
- if (!connector) {
+ port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
+ if (!port) {
mutex_unlock(&mgr->destroy_connector_lock);
break;
}
- list_del(&connector->destroy_list);
+ list_del(&port->next);
mutex_unlock(&mgr->destroy_connector_lock);
- mgr->cbs->destroy_connector(mgr, connector);
+ mgr->cbs->destroy_connector(mgr, port->connector);
+
+ drm_dp_port_teardown_pdt(port, port->pdt);
+
+ if (!port->input && port->vcpi.vcpi > 0)
+ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+ kfree(port);
}
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index b7bf4ce8c012..53d09a19f7e1 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -285,7 +285,6 @@ static void drm_minor_free(struct drm_device *dev, unsigned int type)
if (!minor)
return;
- drm_mode_group_destroy(&minor->mode_group);
put_device(minor->kdev);
spin_lock_irqsave(&drm_minor_lock, flags);
@@ -582,11 +581,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
if (drm_ht_create(&dev->map_hash, 12))
goto err_minors;
- ret = drm_legacy_ctxbitmap_init(dev);
- if (ret) {
- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
- goto err_ht;
- }
+ drm_legacy_ctxbitmap_init(dev);
if (drm_core_check_feature(dev, DRIVER_GEM)) {
ret = drm_gem_init(dev);
@@ -600,7 +595,6 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
err_ctxbitmap:
drm_legacy_ctxbitmap_cleanup(dev);
-err_ht:
drm_ht_remove(&dev->map_hash);
err_minors:
drm_minor_free(dev, DRM_MINOR_LEGACY);
@@ -705,20 +699,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto err_minors;
}
- /* setup grouping for legacy outputs */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_mode_group_init_legacy_group(dev,
- &dev->primary->mode_group);
- if (ret)
- goto err_unload;
- }
-
ret = 0;
goto out_unlock;
-err_unload:
- if (dev->driver->unload)
- dev->driver->unload(dev);
err_minors:
drm_minor_unregister(dev, DRM_MINOR_LEGACY);
drm_minor_unregister(dev, DRM_MINOR_RENDER);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 7087da37dae0..05bb7311ac5d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3413,7 +3413,7 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ drm_for_each_connector(connector, dev)
if (connector->encoder == encoder && connector->eld[0])
return connector;
@@ -3802,7 +3802,7 @@ int drm_add_modes_noedid(struct drm_connector *connector,
struct drm_display_mode *mode;
struct drm_device *dev = connector->dev;
- count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+ count = ARRAY_SIZE(drm_dmt_modes);
if (hdisplay < 0)
hdisplay = 0;
if (vdisplay < 0)
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 5c1aca443e54..c19a62561183 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -209,23 +209,11 @@ int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_framebuffer *fb;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
- if (ret)
- return ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret) {
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
- }
-
- list_for_each_entry(fb, &dev->mode_config.fb_list, head)
+ mutex_lock(&dev->mode_config.fb_lock);
+ drm_for_each_fb(fb, dev)
drm_fb_cma_describe(fb, m);
-
- mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev->mode_config.fb_lock);
return 0;
}
@@ -234,9 +222,9 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
static struct fb_ops drm_fbdev_cma_ops = {
.owner = THIS_MODULE,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_blank = drm_fb_helper_blank,
@@ -275,10 +263,9 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
if (IS_ERR(obj))
return -ENOMEM;
- fbi = framebuffer_alloc(0, dev->dev);
- if (!fbi) {
- dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
- ret = -ENOMEM;
+ fbi = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(fbi)) {
+ ret = PTR_ERR(fbi);
goto err_drm_gem_cma_free_object;
}
@@ -286,23 +273,16 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
if (IS_ERR(fbdev_cma->fb)) {
dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
ret = PTR_ERR(fbdev_cma->fb);
- goto err_framebuffer_release;
+ goto err_fb_info_destroy;
}
fb = &fbdev_cma->fb->fb;
helper->fb = fb;
- helper->fbdev = fbi;
fbi->par = helper;
fbi->flags = FBINFO_FLAG_DEFAULT;
fbi->fbops = &drm_fbdev_cma_ops;
- ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
- if (ret) {
- dev_err(dev->dev, "Failed to allocate color map.\n");
- goto err_drm_fb_cma_destroy;
- }
-
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
@@ -317,11 +297,8 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
return 0;
-err_drm_fb_cma_destroy:
- drm_framebuffer_unregister_private(fb);
- drm_fb_cma_destroy(fb);
-err_framebuffer_release:
- framebuffer_release(fbi);
+err_fb_info_destroy:
+ drm_fb_helper_release_fbi(helper);
err_drm_gem_cma_free_object:
drm_gem_cma_free_object(&obj->base);
return ret;
@@ -397,20 +374,8 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
*/
void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
{
- if (fbdev_cma->fb_helper.fbdev) {
- struct fb_info *info;
- int ret;
-
- info = fbdev_cma->fb_helper.fbdev;
- ret = unregister_framebuffer(info);
- if (ret < 0)
- DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
-
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
-
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
+ drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
if (fbdev_cma->fb) {
drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index cac422916c7a..418d299f3b12 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -56,8 +56,8 @@ static LIST_HEAD(kernel_fb_helper_list);
* Teardown is done with drm_fb_helper_fini().
*
* At runtime drivers should restore the fbdev console by calling
- * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
- * should also notify the fb helper code from updates to the output
+ * drm_fb_helper_restore_fbdev_mode_unlocked() from their ->lastclose callback.
+ * They should also notify the fb helper code from updates to the output
* configuration by calling drm_fb_helper_hotplug_event(). For easier
* integration with the output polling code in drm_crtc_helper.c the modeset
* code provides a ->output_poll_changed callback.
@@ -89,8 +89,9 @@ static LIST_HEAD(kernel_fb_helper_list);
* connectors to the fbdev, e.g. if some are reserved for special purposes or
* not adequate to be used for the fbcon.
*
- * Since this is part of the initial setup before the fbdev is published, no
- * locking is required.
+ * This function is protected against concurrent connector hotadds/removals
+ * using drm_fb_helper_add_one_connector() and
+ * drm_fb_helper_remove_one_connector().
*/
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
@@ -98,7 +99,8 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
struct drm_connector *connector;
int i;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_connector(connector, dev) {
struct drm_fb_helper_connector *fb_helper_connector;
fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
@@ -108,6 +110,7 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
fb_helper_connector->connector = connector;
fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
}
+ mutex_unlock(&dev->mode_config.mutex);
return 0;
fail:
for (i = 0; i < fb_helper->connector_count; i++) {
@@ -115,6 +118,8 @@ fail:
fb_helper->connector_info[i] = NULL;
}
fb_helper->connector_count = 0;
+ mutex_unlock(&dev->mode_config.mutex);
+
return -ENOMEM;
}
EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
@@ -163,11 +168,14 @@ static void remove_from_modeset(struct drm_mode_set *set,
}
set->num_connectors--;
- /* because i915 is pissy about this..
+ /*
* TODO maybe need to makes sure we set it back to !=NULL somewhere?
*/
- if (set->num_connectors == 0)
+ if (set->num_connectors == 0) {
set->fb = NULL;
+ drm_mode_destroy(connector->dev, set->mode);
+ set->mode = NULL;
+ }
}
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
@@ -269,7 +277,7 @@ static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_crtc *c;
- list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+ drm_for_each_crtc(c, dev) {
if (crtc->base.id == c->base.id)
return c->primary->fb;
}
@@ -321,7 +329,7 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
drm_warn_on_modeset_not_all_locked(dev);
- list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ drm_for_each_plane(plane, dev) {
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
drm_plane_force_disable(plane);
@@ -349,21 +357,6 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
}
return error;
}
-/**
- * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
- * @fb_helper: fbcon to restore
- *
- * This should be called from driver's drm ->lastclose callback
- * when implementing an fbcon on top of kms using this helper. This ensures that
- * the user isn't greeted with a black screen when e.g. X dies.
- *
- * Use this variant if you need to bypass locking (panic), or already
- * hold all modeset locks. Otherwise use drm_fb_helper_restore_fbdev_mode_unlocked()
- */
-static bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
-{
- return restore_fbdev_mode(fb_helper);
-}
/**
* drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
@@ -393,6 +386,31 @@ bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
+static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_crtc *crtc;
+ int bound = 0, crtcs_bound = 0;
+
+ /* Sometimes user space wants everything disabled, so don't steal the
+ * display if there's a master. */
+ if (dev->primary->master)
+ return false;
+
+ drm_for_each_crtc(crtc, dev) {
+ if (crtc->primary->fb)
+ crtcs_bound++;
+ if (crtc->primary->fb == fb_helper->fb)
+ bound++;
+ }
+
+ if (bound < crtcs_bound)
+ return false;
+
+ return true;
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
/*
* restore fbcon display for all kms driver's using this helper, used for sysrq
* and panic handling.
@@ -411,67 +429,15 @@ static bool drm_fb_helper_force_kernel_mode(void)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
continue;
- /*
- * NOTE: Use trylock mode to avoid deadlocks and sleeping in
- * panic context.
- */
- if (__drm_modeset_lock_all(dev, true) != 0) {
- error = true;
- continue;
- }
-
- ret = drm_fb_helper_restore_fbdev_mode(helper);
+ drm_modeset_lock_all(dev);
+ ret = restore_fbdev_mode(helper);
if (ret)
error = true;
-
drm_modeset_unlock_all(dev);
}
return error;
}
-static int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
- void *panic_str)
-{
- /*
- * It's a waste of time and effort to switch back to text console
- * if the kernel should reboot before panic messages can be seen.
- */
- if (panic_timeout < 0)
- return 0;
-
- pr_err("panic occurred, switching back to text console\n");
- return drm_fb_helper_force_kernel_mode();
-}
-
-static struct notifier_block paniced = {
- .notifier_call = drm_fb_helper_panic,
-};
-
-static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
-{
- struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
- int bound = 0, crtcs_bound = 0;
-
- /* Sometimes user space wants everything disabled, so don't steal the
- * display if there's a master. */
- if (dev->primary->master)
- return false;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->primary->fb)
- crtcs_bound++;
- if (crtc->primary->fb == fb_helper->fb)
- bound++;
- }
-
- if (bound < crtcs_bound)
- return false;
-
- return true;
-}
-
-#ifdef CONFIG_MAGIC_SYSRQ
static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
{
bool ret;
@@ -504,14 +470,6 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
int i, j;
/*
- * fbdev->blank can be called from irq context in case of a panic.
- * Since we already have our own special panic handler which will
- * restore the fbdev console mode completely, just bail out early.
- */
- if (oops_in_progress)
- return;
-
- /*
* For each CRTC in this fb, turn the connectors on/off.
*/
drm_modeset_lock_all(dev);
@@ -544,6 +502,9 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
*/
int drm_fb_helper_blank(int blank, struct fb_info *info)
{
+ if (oops_in_progress)
+ return -EBUSY;
+
switch (blank) {
/* Display: On; HSync: On, VSync: On */
case FB_BLANK_UNBLANK:
@@ -655,7 +616,7 @@ int drm_fb_helper_init(struct drm_device *dev,
}
i = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ drm_for_each_crtc(crtc, dev) {
fb_helper->crtc_info[i].mode_set.crtc = crtc;
i++;
}
@@ -667,14 +628,91 @@ out_free:
}
EXPORT_SYMBOL(drm_fb_helper_init);
+/**
+ * drm_fb_helper_alloc_fbi - allocate fb_info and some of its members
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A helper to alloc fb_info and the members cmap and apertures. Called
+ * by the driver within the fb_probe fb_helper callback function.
+ *
+ * RETURNS:
+ * fb_info pointer if things went okay, pointer containing error code
+ * otherwise
+ */
+struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
+{
+ struct device *dev = fb_helper->dev->dev;
+ struct fb_info *info;
+ int ret;
+
+ info = framebuffer_alloc(0, dev);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret)
+ goto err_release;
+
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto err_free_cmap;
+ }
+
+ fb_helper->fbdev = info;
+
+ return info;
+
+err_free_cmap:
+ fb_dealloc_cmap(&info->cmap);
+err_release:
+ framebuffer_release(info);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_fb_helper_alloc_fbi);
+
+/**
+ * drm_fb_helper_unregister_fbi - unregister fb_info framebuffer device
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A wrapper around unregister_framebuffer, to release the fb_info
+ * framebuffer device
+ */
+void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
+{
+ if (fb_helper && fb_helper->fbdev)
+ unregister_framebuffer(fb_helper->fbdev);
+}
+EXPORT_SYMBOL(drm_fb_helper_unregister_fbi);
+
+/**
+ * drm_fb_helper_release_fbi - dealloc fb_info and its members
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A helper to free memory taken by fb_info and the members cmap and
+ * apertures
+ */
+void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper)
+{
+ if (fb_helper) {
+ struct fb_info *info = fb_helper->fbdev;
+
+ if (info) {
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ fb_helper->fbdev = NULL;
+ }
+}
+EXPORT_SYMBOL(drm_fb_helper_release_fbi);
+
void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
{
if (!list_empty(&fb_helper->kernel_fb_list)) {
list_del(&fb_helper->kernel_fb_list);
if (list_empty(&kernel_fb_helper_list)) {
- pr_info("drm: unregistered panic notifier\n");
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &paniced);
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
}
@@ -684,6 +722,149 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_fini);
+/**
+ * drm_fb_helper_unlink_fbi - wrapper around unlink_framebuffer
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A wrapper around unlink_framebuffer implemented by fbdev core
+ */
+void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
+{
+ if (fb_helper && fb_helper->fbdev)
+ unlink_framebuffer(fb_helper->fbdev);
+}
+EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
+
+/**
+ * drm_fb_helper_sys_read - wrapper around fb_sys_read
+ * @info: fb_info struct pointer
+ * @buf: userspace buffer to read from framebuffer memory
+ * @count: number of bytes to read from framebuffer memory
+ * @ppos: read offset within framebuffer memory
+ *
+ * A wrapper around fb_sys_read implemented by fbdev core
+ */
+ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return fb_sys_read(info, buf, count, ppos);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_read);
+
+/**
+ * drm_fb_helper_sys_write - wrapper around fb_sys_write
+ * @info: fb_info struct pointer
+ * @buf: userspace buffer to write to framebuffer memory
+ * @count: number of bytes to write to framebuffer memory
+ * @ppos: write offset within framebuffer memory
+ *
+ * A wrapper around fb_sys_write implemented by fbdev core
+ */
+ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return fb_sys_write(info, buf, count, ppos);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_write);
+
+/**
+ * drm_fb_helper_sys_fillrect - wrapper around sys_fillrect
+ * @info: fbdev registered by the helper
+ * @rect: info about rectangle to fill
+ *
+ * A wrapper around sys_fillrect implemented by fbdev core
+ */
+void drm_fb_helper_sys_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ sys_fillrect(info, rect);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_fillrect);
+
+/**
+ * drm_fb_helper_sys_copyarea - wrapper around sys_copyarea
+ * @info: fbdev registered by the helper
+ * @area: info about area to copy
+ *
+ * A wrapper around sys_copyarea implemented by fbdev core
+ */
+void drm_fb_helper_sys_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ sys_copyarea(info, area);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_copyarea);
+
+/**
+ * drm_fb_helper_sys_imageblit - wrapper around sys_imageblit
+ * @info: fbdev registered by the helper
+ * @image: info about image to blit
+ *
+ * A wrapper around sys_imageblit implemented by fbdev core
+ */
+void drm_fb_helper_sys_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ sys_imageblit(info, image);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
+
+/**
+ * drm_fb_helper_cfb_fillrect - wrapper around cfb_fillrect
+ * @info: fbdev registered by the helper
+ * @rect: info about rectangle to fill
+ *
+ * A wrapper around cfb_imageblit implemented by fbdev core
+ */
+void drm_fb_helper_cfb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ cfb_fillrect(info, rect);
+}
+EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect);
+
+/**
+ * drm_fb_helper_cfb_copyarea - wrapper around cfb_copyarea
+ * @info: fbdev registered by the helper
+ * @area: info about area to copy
+ *
+ * A wrapper around cfb_copyarea implemented by fbdev core
+ */
+void drm_fb_helper_cfb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ cfb_copyarea(info, area);
+}
+EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea);
+
+/**
+ * drm_fb_helper_cfb_imageblit - wrapper around cfb_imageblit
+ * @info: fbdev registered by the helper
+ * @image: info about image to blit
+ *
+ * A wrapper around cfb_imageblit implemented by fbdev core
+ */
+void drm_fb_helper_cfb_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ cfb_imageblit(info, image);
+}
+EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit);
+
+/**
+ * drm_fb_helper_set_suspend - wrapper around fb_set_suspend
+ * @fb_helper: driver-allocated fbdev helper
+ * @state: desired state, zero to resume, non-zero to suspend
+ *
+ * A wrapper around fb_set_suspend implemented by fbdev core
+ */
+void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state)
+{
+ if (fb_helper && fb_helper->fbdev)
+ fb_set_suspend(fb_helper->fbdev, state);
+}
+EXPORT_SYMBOL(drm_fb_helper_set_suspend);
+
static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, u16 regno, struct fb_info *info)
{
@@ -771,9 +952,10 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
int i, j, rc = 0;
int start;
- if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
+ if (oops_in_progress)
return -EBUSY;
- }
+
+ drm_modeset_lock_all(dev);
if (!drm_fb_helper_is_bound(fb_helper)) {
drm_modeset_unlock_all(dev);
return -EBUSY;
@@ -922,6 +1104,9 @@ int drm_fb_helper_set_par(struct fb_info *info)
struct drm_fb_helper *fb_helper = info->par;
struct fb_var_screeninfo *var = &info->var;
+ if (oops_in_progress)
+ return -EBUSY;
+
if (var->pixclock != 0) {
DRM_ERROR("PIXEL CLOCK SET\n");
return -EINVAL;
@@ -947,9 +1132,10 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
int ret = 0;
int i;
- if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
+ if (oops_in_progress)
return -EBUSY;
- }
+
+ drm_modeset_lock_all(dev);
if (!drm_fb_helper_is_bound(fb_helper)) {
drm_modeset_unlock_all(dev);
return -EBUSY;
@@ -1109,12 +1295,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
info->node, info->fix.id);
- /* Switch back to kernel console on panic */
- /* multi card linked list maybe */
if (list_empty(&kernel_fb_helper_list)) {
- dev_info(fb_helper->dev->dev, "registered panic notifier\n");
- atomic_notifier_chain_register(&panic_notifier_list,
- &paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 16a164770713..3c2d4abd71c5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -766,7 +766,7 @@ drm_gem_object_free(struct kref *kref)
struct drm_gem_object *obj = (struct drm_gem_object *) kref;
struct drm_device *dev = obj->dev;
- BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
@@ -778,22 +778,14 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
struct drm_gem_object *obj = vma->vm_private_data;
drm_gem_object_reference(obj);
-
- mutex_lock(&obj->dev->struct_mutex);
- drm_vm_open_locked(obj->dev, vma);
- mutex_unlock(&obj->dev->struct_mutex);
}
EXPORT_SYMBOL(drm_gem_vm_open);
void drm_gem_vm_close(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
- struct drm_device *dev = obj->dev;
- mutex_lock(&dev->struct_mutex);
- drm_vm_close_locked(obj->dev, vma);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
}
EXPORT_SYMBOL(drm_gem_vm_close);
@@ -850,7 +842,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
*/
drm_gem_object_reference(obj);
- drm_vm_open_locked(dev, vma);
return 0;
}
EXPORT_SYMBOL(drm_gem_mmap_obj);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index bd75f303da63..86cc793cdf79 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -289,20 +289,15 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
{
struct drm_gem_object *gem_obj;
- mutex_lock(&drm->struct_mutex);
-
gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
if (!gem_obj) {
dev_err(drm->dev, "failed to lookup GEM object\n");
- mutex_unlock(&drm->struct_mutex);
return -EINVAL;
}
*offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
- drm_gem_object_unreference(gem_obj);
-
- mutex_unlock(&drm->struct_mutex);
+ drm_gem_object_unreference_unlocked(gem_obj);
return 0;
}
@@ -381,11 +376,8 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
struct seq_file *m)
{
struct drm_gem_object *obj = &cma_obj->base;
- struct drm_device *dev = obj->dev;
uint64_t off;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
off = drm_vma_node_start(&obj->vma_node);
seq_printf(m, "%2d (%2d) %08llx %pad %p %zu",
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index aa8bbb460c57..ddfa6014c2c2 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -70,6 +70,8 @@
#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t)
+#define DRM_IOCTL_MODE_ADDFB232 DRM_IOWR(0xb8, drm_mode_fb_cmd232_t)
+
typedef struct drm_version_32 {
int version_major; /**< Major version */
int version_minor; /**< Minor version */
@@ -93,7 +95,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
return -EFAULT;
version = compat_alloc_user_space(sizeof(*version));
- if (!access_ok(VERIFY_WRITE, version, sizeof(*version)))
+ if (!version)
return -EFAULT;
if (__put_user(v32.name_len, &version->name_len)
|| __put_user((void __user *)(unsigned long)v32.name,
@@ -140,7 +142,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
return -EFAULT;
u = compat_alloc_user_space(sizeof(*u));
- if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+ if (!u)
return -EFAULT;
if (__put_user(uq32.unique_len, &u->unique_len)
|| __put_user((void __user *)(unsigned long)uq32.unique,
@@ -168,7 +170,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
return -EFAULT;
u = compat_alloc_user_space(sizeof(*u));
- if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+ if (!u)
return -EFAULT;
if (__put_user(uq32.unique_len, &u->unique_len)
|| __put_user((void __user *)(unsigned long)uq32.unique,
@@ -200,7 +202,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
return -EFAULT;
map = compat_alloc_user_space(sizeof(*map));
- if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+ if (!map)
return -EFAULT;
if (__put_user(idx, &map->offset))
return -EFAULT;
@@ -237,7 +239,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
return -EFAULT;
map = compat_alloc_user_space(sizeof(*map));
- if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+ if (!map)
return -EFAULT;
if (__put_user(m32.offset, &map->offset)
|| __put_user(m32.size, &map->size)
@@ -277,7 +279,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd,
return -EFAULT;
map = compat_alloc_user_space(sizeof(*map));
- if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+ if (!map)
return -EFAULT;
if (__put_user((void *)(unsigned long)handle, &map->handle))
return -EFAULT;
@@ -306,7 +308,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
return -EFAULT;
client = compat_alloc_user_space(sizeof(*client));
- if (!access_ok(VERIFY_WRITE, client, sizeof(*client)))
+ if (!client)
return -EFAULT;
if (__put_user(idx, &client->idx))
return -EFAULT;
@@ -345,7 +347,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
int i, err;
stats = compat_alloc_user_space(sizeof(*stats));
- if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
+ if (!stats)
return -EFAULT;
err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats);
@@ -382,8 +384,7 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd,
unsigned long agp_start;
buf = compat_alloc_user_space(sizeof(*buf));
- if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))
- || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
+ if (!buf || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
return -EFAULT;
if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start))
@@ -414,7 +415,7 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd,
return -EFAULT;
buf = compat_alloc_user_space(sizeof(*buf));
- if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)))
+ if (!buf)
return -EFAULT;
if (__put_user(b32.size, &buf->size)
@@ -455,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
request = compat_alloc_user_space(nbytes);
- if (!access_ok(VERIFY_WRITE, request, nbytes))
+ if (!request)
return -EFAULT;
list = (struct drm_buf_desc *) (request + 1);
@@ -516,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
return -EINVAL;
nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
request = compat_alloc_user_space(nbytes);
- if (!access_ok(VERIFY_WRITE, request, nbytes))
+ if (!request)
return -EFAULT;
list = (struct drm_buf_pub *) (request + 1);
@@ -563,7 +564,7 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd,
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+ if (!request)
return -EFAULT;
if (__put_user(req32.count, &request->count)
|| __put_user((int __user *)(unsigned long)req32.list,
@@ -589,7 +590,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+ if (!request)
return -EFAULT;
if (__put_user(req32.ctx_id, &request->ctx_id)
|| __put_user((void *)(unsigned long)req32.handle,
@@ -613,7 +614,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+ if (!request)
return -EFAULT;
if (__put_user(ctx_id, &request->ctx_id))
return -EFAULT;
@@ -646,7 +647,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd,
return -EFAULT;
res = compat_alloc_user_space(sizeof(*res));
- if (!access_ok(VERIFY_WRITE, res, sizeof(*res)))
+ if (!res)
return -EFAULT;
if (__put_user(res32.count, &res->count)
|| __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts,
@@ -689,7 +690,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
return -EFAULT;
d = compat_alloc_user_space(sizeof(*d));
- if (!access_ok(VERIFY_WRITE, d, sizeof(*d)))
+ if (!d)
return -EFAULT;
if (__put_user(d32.context, &d->context)
@@ -764,7 +765,7 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd,
int err;
info = compat_alloc_user_space(sizeof(*info));
- if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
+ if (!info)
return -EFAULT;
err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info);
@@ -807,7 +808,7 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ if (!request
|| __put_user(req32.size, &request->size)
|| __put_user(req32.type, &request->type))
return -EFAULT;
@@ -834,7 +835,7 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd,
u32 handle;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ if (!request
|| get_user(handle, &argp->handle)
|| __put_user(handle, &request->handle))
return -EFAULT;
@@ -858,7 +859,7 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ if (!request
|| __put_user(req32.handle, &request->handle)
|| __put_user(req32.offset, &request->offset))
return -EFAULT;
@@ -874,7 +875,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
u32 handle;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ if (!request
|| get_user(handle, &argp->handle)
|| __put_user(handle, &request->handle))
return -EFAULT;
@@ -897,8 +898,7 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
unsigned long x;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+ if (!request || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
|| __get_user(x, &argp->size)
|| __put_user(x, &request->size))
return -EFAULT;
@@ -923,8 +923,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
unsigned long x;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+ if (!request || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
|| __get_user(x, &argp->handle)
|| __put_user(x << PAGE_SHIFT, &request->handle))
return -EFAULT;
@@ -952,7 +951,7 @@ static int compat_drm_update_draw(struct file *file, unsigned int cmd,
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ||
+ if (!request ||
__put_user(update32.handle, &request->handle) ||
__put_user(update32.type, &request->type) ||
__put_user(update32.num, &request->num) ||
@@ -994,7 +993,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ if (!request
|| __put_user(req32.request.type, &request->request.type)
|| __put_user(req32.request.sequence, &request->request.sequence)
|| __put_user(req32.request.signal, &request->request.signal))
@@ -1016,6 +1015,63 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
return 0;
}
+typedef struct drm_mode_fb_cmd232 {
+ u32 fb_id;
+ u32 width;
+ u32 height;
+ u32 pixel_format;
+ u32 flags;
+ u32 handles[4];
+ u32 pitches[4];
+ u32 offsets[4];
+ u64 modifier[4];
+} __attribute__((packed)) drm_mode_fb_cmd232_t;
+
+static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct drm_mode_fb_cmd232 __user *argp = (void __user *)arg;
+ struct drm_mode_fb_cmd232 req32;
+ struct drm_mode_fb_cmd2 __user *req64;
+ int i;
+ int err;
+
+ if (copy_from_user(&req32, argp, sizeof(req32)))
+ return -EFAULT;
+
+ req64 = compat_alloc_user_space(sizeof(*req64));
+
+ if (!access_ok(VERIFY_WRITE, req64, sizeof(*req64))
+ || __put_user(req32.width, &req64->width)
+ || __put_user(req32.height, &req64->height)
+ || __put_user(req32.pixel_format, &req64->pixel_format)
+ || __put_user(req32.flags, &req64->flags))
+ return -EFAULT;
+
+ for (i = 0; i < 4; i++) {
+ if (__put_user(req32.handles[i], &req64->handles[i]))
+ return -EFAULT;
+ if (__put_user(req32.pitches[i], &req64->pitches[i]))
+ return -EFAULT;
+ if (__put_user(req32.offsets[i], &req64->offsets[i]))
+ return -EFAULT;
+ if (__put_user(req32.modifier[i], &req64->modifier[i]))
+ return -EFAULT;
+ }
+
+ err = drm_ioctl(file, DRM_IOCTL_MODE_ADDFB2, (unsigned long)req64);
+ if (err)
+ return err;
+
+ if (__get_user(req32.fb_id, &req64->fb_id))
+ return -EFAULT;
+
+ if (copy_to_user(argp, &req32, sizeof(req32)))
+ return -EFAULT;
+
+ return 0;
+}
+
static drm_ioctl_compat_t *drm_compat_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
@@ -1048,6 +1104,7 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
#endif
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
+ [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
};
/**
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index b1d303fa2327..9a860ca1e9d7 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -480,7 +480,7 @@ static int drm_version(struct drm_device *dev, void *data,
* indicated permissions. If so, returns zero. Otherwise returns an
* error code suitable for ioctl return.
*/
-static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
+int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
{
/* ROOT_ONLY is only for CAP_SYS_ADMIN */
if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
@@ -508,6 +508,7 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
return 0;
}
+EXPORT_SYMBOL(drm_ioctl_permit);
#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
[DRM_IOCTL_NR(ioctl)] = { \
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f9cc68fbd2a3..22d207e211e7 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -43,8 +43,8 @@
#include <linux/export.h>
/* Access macro for slots in vblank timestamp ringbuffer. */
-#define vblanktimestamp(dev, crtc, count) \
- ((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE])
+#define vblanktimestamp(dev, pipe, count) \
+ ((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE])
/* Retry timestamp calculation up to 3 times to satisfy
* drm_timestamp_precision before giving up.
@@ -57,7 +57,7 @@
#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
static bool
-drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
struct timeval *tvblank, unsigned flags);
static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
@@ -75,7 +75,7 @@ module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600)
module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
static void store_vblank(struct drm_device *dev, int crtc,
- unsigned vblank_count_inc,
+ u32 vblank_count_inc,
struct timeval *t_vblank)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
@@ -107,7 +107,7 @@ static void store_vblank(struct drm_device *dev, int crtc,
/**
* drm_update_vblank_count - update the master vblank counter
* @dev: DRM device
- * @crtc: counter to update
+ * @pipe: counter to update
*
* Call back into the driver to update the appropriate vblank counter
* (specified by @crtc). Deal with wraparound, if it occurred, and
@@ -120,9 +120,9 @@ static void store_vblank(struct drm_device *dev, int crtc,
* Note: caller must hold dev->vbl_lock since this reads & writes
* device vblank fields.
*/
-static void drm_update_vblank_count(struct drm_device *dev, int crtc)
+static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u32 cur_vblank, diff;
bool rc;
struct timeval t_vblank;
@@ -140,21 +140,21 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
* corresponding vblank timestamp.
*/
do {
- cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
- rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
- } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
+ cur_vblank = dev->driver->get_vblank_counter(dev, pipe);
+ rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, 0);
+ } while (cur_vblank != dev->driver->get_vblank_counter(dev, pipe));
/* Deal with counter wrap */
diff = cur_vblank - vblank->last;
if (cur_vblank < vblank->last) {
diff += dev->max_vblank_count + 1;
- DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
- crtc, vblank->last, cur_vblank, diff);
+ DRM_DEBUG("last_vblank[%u]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
+ pipe, vblank->last, cur_vblank, diff);
}
- DRM_DEBUG("updating vblank count on crtc %d, missed %d\n",
- crtc, diff);
+ DRM_DEBUG("updating vblank count on crtc %u, missed %d\n",
+ pipe, diff);
if (diff == 0)
return;
@@ -167,7 +167,7 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
if (!rc)
t_vblank = (struct timeval) {0, 0};
- store_vblank(dev, crtc, diff, &t_vblank);
+ store_vblank(dev, pipe, diff, &t_vblank);
}
/*
@@ -176,9 +176,9 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
* are preserved, even if there are any spurious vblank irq's after
* disable.
*/
-static void vblank_disable_and_save(struct drm_device *dev, int crtc)
+static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
u32 vblcount;
s64 diff_ns;
@@ -206,8 +206,8 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* vblank interrupt is disabled.
*/
if (!vblank->enabled &&
- drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0)) {
- drm_update_vblank_count(dev, crtc);
+ drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0)) {
+ drm_update_vblank_count(dev, pipe);
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
return;
}
@@ -218,7 +218,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* hardware potentially runtime suspended.
*/
if (vblank->enabled) {
- dev->driver->disable_vblank(dev, crtc);
+ dev->driver->disable_vblank(dev, pipe);
vblank->enabled = false;
}
@@ -235,9 +235,9 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* delayed gpu counter increment.
*/
do {
- vblank->last = dev->driver->get_vblank_counter(dev, crtc);
- vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
- } while (vblank->last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+ vblank->last = dev->driver->get_vblank_counter(dev, pipe);
+ vblrc = drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0);
+ } while (vblank->last != dev->driver->get_vblank_counter(dev, pipe) && (--count) && vblrc);
if (!count)
vblrc = 0;
@@ -247,7 +247,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
*/
vblcount = vblank->count;
diff_ns = timeval_to_ns(&tvblank) -
- timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+ timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount));
/* If there is at least 1 msec difference between the last stored
* timestamp and tvblank, then we are currently executing our
@@ -262,7 +262,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* hope for the best.
*/
if (vblrc && (abs64(diff_ns) > 1000000))
- store_vblank(dev, crtc, 1, &tvblank);
+ store_vblank(dev, pipe, 1, &tvblank);
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
}
@@ -271,16 +271,16 @@ static void vblank_disable_fn(unsigned long arg)
{
struct drm_vblank_crtc *vblank = (void *)arg;
struct drm_device *dev = vblank->dev;
+ unsigned int pipe = vblank->pipe;
unsigned long irqflags;
- int crtc = vblank->crtc;
if (!dev->vblank_disable_allowed)
return;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
- DRM_DEBUG("disabling vblank on crtc %d\n", crtc);
- vblank_disable_and_save(dev, crtc);
+ DRM_DEBUG("disabling vblank on crtc %u\n", pipe);
+ vblank_disable_and_save(dev, pipe);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
@@ -293,14 +293,14 @@ static void vblank_disable_fn(unsigned long arg)
*/
void drm_vblank_cleanup(struct drm_device *dev)
{
- int crtc;
+ unsigned int pipe;
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
return;
- for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ for (pipe = 0; pipe < dev->num_crtcs; pipe++) {
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
WARN_ON(vblank->enabled &&
drm_core_check_feature(dev, DRIVER_MODESET));
@@ -316,17 +316,18 @@ EXPORT_SYMBOL(drm_vblank_cleanup);
/**
* drm_vblank_init - initialize vblank support
- * @dev: drm_device
- * @num_crtcs: number of crtcs supported by @dev
+ * @dev: DRM device
+ * @num_crtcs: number of CRTCs supported by @dev
*
* This function initializes vblank support for @num_crtcs display pipelines.
*
* Returns:
* Zero on success or a negative error code on failure.
*/
-int drm_vblank_init(struct drm_device *dev, int num_crtcs)
+int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
{
- int i, ret = -ENOMEM;
+ int ret = -ENOMEM;
+ unsigned int i;
spin_lock_init(&dev->vbl_lock);
spin_lock_init(&dev->vblank_time_lock);
@@ -341,7 +342,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
struct drm_vblank_crtc *vblank = &dev->vblank[i];
vblank->dev = dev;
- vblank->crtc = i;
+ vblank->pipe = i;
init_waitqueue_head(&vblank->queue);
setup_timer(&vblank->disable_timer, vblank_disable_fn,
(unsigned long)vblank);
@@ -624,17 +625,17 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
framedur_ns /= 2;
} else
- DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
+ DRM_ERROR("crtc %u: Can't calculate constants, dotclock = 0!\n",
crtc->base.id);
crtc->pixeldur_ns = pixeldur_ns;
crtc->linedur_ns = linedur_ns;
crtc->framedur_ns = framedur_ns;
- DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
+ DRM_DEBUG("crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
crtc->base.id, mode->crtc_htotal,
mode->crtc_vtotal, mode->crtc_vdisplay);
- DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
+ DRM_DEBUG("crtc %u: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
crtc->base.id, dotclock, framedur_ns,
linedur_ns, pixeldur_ns);
}
@@ -643,7 +644,7 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
/**
* drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
* @dev: DRM device
- * @crtc: Which CRTC's vblank timestamp to retrieve
+ * @pipe: index of CRTC whose vblank timestamp to retrieve
* @max_error: Desired maximum allowable error in timestamps (nanosecs)
* On return contains true maximum error of timestamp
* @vblank_time: Pointer to struct timeval which should receive the timestamp
@@ -686,7 +687,8 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
*
*/
-int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+ unsigned int pipe,
int *max_error,
struct timeval *vblank_time,
unsigned flags,
@@ -700,8 +702,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
bool invbl;
- if (crtc < 0 || crtc >= dev->num_crtcs) {
- DRM_ERROR("Invalid crtc %d\n", crtc);
+ if (pipe >= dev->num_crtcs) {
+ DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
@@ -720,7 +722,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
* Happens during initial modesetting of a crtc.
*/
if (framedur_ns == 0) {
- DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
+ DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe);
return -EAGAIN;
}
@@ -736,13 +738,13 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
* Get vertical and horizontal scanout position vpos, hpos,
* and bounding timestamps stime, etime, pre/post query.
*/
- vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos,
+ vbl_status = dev->driver->get_scanout_position(dev, pipe, flags, &vpos,
&hpos, &stime, &etime);
/* Return as no-op if scanout query unsupported or failed. */
if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
- DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
- crtc, vbl_status);
+ DRM_DEBUG("crtc %u : scanoutpos query failed [%d].\n",
+ pipe, vbl_status);
return -EIO;
}
@@ -756,8 +758,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
/* Noisy system timing? */
if (i == DRM_TIMESTAMP_MAXRETRIES) {
- DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
- crtc, duration_ns/1000, *max_error/1000, i);
+ DRM_DEBUG("crtc %u: Noisy timestamp %d us > %d us [%d reps].\n",
+ pipe, duration_ns/1000, *max_error/1000, i);
}
/* Return upper bound of timestamp precision error. */
@@ -790,8 +792,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
etime = ktime_sub_ns(etime, delta_ns);
*vblank_time = ktime_to_timeval(etime);
- DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
- crtc, (int)vbl_status, hpos, vpos,
+ DRM_DEBUG("crtc %u : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
+ pipe, (int)vbl_status, hpos, vpos,
(long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
(long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
duration_ns/1000, i);
@@ -816,7 +818,7 @@ static struct timeval get_drm_timestamp(void)
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
* vblank interval
* @dev: DRM device
- * @crtc: which CRTC's vblank timestamp to retrieve
+ * @pipe: index of CRTC whose vblank timestamp to retrieve
* @tvblank: Pointer to target struct timeval which should receive the timestamp
* @flags: Flags to pass to driver:
* 0 = Default,
@@ -833,7 +835,7 @@ static struct timeval get_drm_timestamp(void)
* True if timestamp is considered to be very precise, false otherwise.
*/
static bool
-drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
struct timeval *tvblank, unsigned flags)
{
int ret;
@@ -843,7 +845,7 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
/* Query driver if possible and precision timestamping enabled. */
if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
- ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
+ ret = dev->driver->get_vblank_timestamp(dev, pipe, &max_error,
tvblank, flags);
if (ret > 0)
return true;
@@ -860,7 +862,7 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
/**
* drm_vblank_count - retrieve "cooked" vblank counter value
* @dev: DRM device
- * @crtc: which counter to retrieve
+ * @pipe: index of CRTC for which to retrieve the counter
*
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
@@ -871,12 +873,13 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
* Returns:
* The software vblank counter.
*/
-u32 drm_vblank_count(struct drm_device *dev, int crtc)
+u32 drm_vblank_count(struct drm_device *dev, int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- if (WARN_ON(crtc >= dev->num_crtcs))
+ if (WARN_ON(pipe >= dev->num_crtcs))
return 0;
+
return vblank->count;
}
EXPORT_SYMBOL(drm_vblank_count);
@@ -901,11 +904,10 @@ u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_vblank_count);
/**
- * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
- * and the system timestamp corresponding to that vblank counter value.
- *
+ * drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the
+ * system timestamp corresponding to that vblank counter value.
* @dev: DRM device
- * @crtc: which counter to retrieve
+ * @pipe: index of CRTC whose counter to retrieve
* @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
*
* Fetches the "cooked" vblank count value that represents the number of
@@ -913,13 +915,13 @@ EXPORT_SYMBOL(drm_crtc_vblank_count);
* modesetting activity. Returns corresponding system timestamp of the time
* of the vblank interval that corresponds to the current vblank counter value.
*/
-u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
struct timeval *vblanktime)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u32 cur_vblank;
- if (WARN_ON(crtc >= dev->num_crtcs))
+ if (WARN_ON(pipe >= dev->num_crtcs))
return 0;
/*
@@ -930,7 +932,7 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
do {
cur_vblank = vblank->count;
smp_rmb();
- *vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
+ *vblanktime = vblanktimestamp(dev, pipe, cur_vblank);
smp_rmb();
} while (cur_vblank != vblank->count);
@@ -957,7 +959,7 @@ static void send_vblank_event(struct drm_device *dev,
/**
* drm_send_vblank_event - helper to send vblank event after pageflip
* @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
* @e: the event to send
*
* Updates sequence # and timestamp on event, and sends it to userspace.
@@ -965,20 +967,20 @@ static void send_vblank_event(struct drm_device *dev,
*
* This is the legacy version of drm_crtc_send_vblank_event().
*/
-void drm_send_vblank_event(struct drm_device *dev, int crtc,
- struct drm_pending_vblank_event *e)
+void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
+ struct drm_pending_vblank_event *e)
{
struct timeval now;
unsigned int seq;
- if (crtc >= 0) {
- seq = drm_vblank_count_and_time(dev, crtc, &now);
+ if (dev->num_crtcs > 0) {
+ seq = drm_vblank_count_and_time(dev, pipe, &now);
} else {
seq = 0;
now = get_drm_timestamp();
}
- e->pipe = crtc;
+ e->pipe = pipe;
send_vblank_event(dev, e, seq, &now);
}
EXPORT_SYMBOL(drm_send_vblank_event);
@@ -1003,11 +1005,14 @@ EXPORT_SYMBOL(drm_crtc_send_vblank_event);
/**
* drm_vblank_enable - enable the vblank interrupt on a CRTC
* @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
+ *
+ * Returns:
+ * Zero on success or a negative error code on failure.
*/
-static int drm_vblank_enable(struct drm_device *dev, int crtc)
+static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
int ret = 0;
assert_spin_locked(&dev->vbl_lock);
@@ -1022,13 +1027,13 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc)
* timestamps. Filtercode in drm_handle_vblank() will
* prevent double-accounting of same vblank interval.
*/
- ret = dev->driver->enable_vblank(dev, crtc);
- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+ ret = dev->driver->enable_vblank(dev, pipe);
+ DRM_DEBUG("enabling vblank on crtc %u, ret: %d\n", pipe, ret);
if (ret)
atomic_dec(&vblank->refcount);
else {
vblank->enabled = true;
- drm_update_vblank_count(dev, crtc);
+ drm_update_vblank_count(dev, pipe);
}
}
@@ -1040,7 +1045,7 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc)
/**
* drm_vblank_get - get a reference count on vblank events
* @dev: DRM device
- * @crtc: which CRTC to own
+ * @pipe: index of CRTC to own
*
* Acquire a reference count on vblank events to avoid having them disabled
* while in use.
@@ -1048,24 +1053,24 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc)
* This is the legacy version of drm_crtc_vblank_get().
*
* Returns:
- * Zero on success, nonzero on failure.
+ * Zero on success or a negative error code on failure.
*/
-int drm_vblank_get(struct drm_device *dev, int crtc)
+int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
int ret = 0;
if (!dev->num_crtcs)
return -EINVAL;
- if (WARN_ON(crtc >= dev->num_crtcs))
+ if (WARN_ON(pipe >= dev->num_crtcs))
return -EINVAL;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
if (atomic_add_return(1, &vblank->refcount) == 1) {
- ret = drm_vblank_enable(dev, crtc);
+ ret = drm_vblank_enable(dev, pipe);
} else {
if (!vblank->enabled) {
atomic_dec(&vblank->refcount);
@@ -1088,7 +1093,7 @@ EXPORT_SYMBOL(drm_vblank_get);
* This is the native kms version of drm_vblank_get().
*
* Returns:
- * Zero on success, nonzero on failure.
+ * Zero on success or a negative error code on failure.
*/
int drm_crtc_vblank_get(struct drm_crtc *crtc)
{
@@ -1097,23 +1102,23 @@ int drm_crtc_vblank_get(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_vblank_get);
/**
- * drm_vblank_put - give up ownership of vblank events
+ * drm_vblank_put - release ownership of vblank events
* @dev: DRM device
- * @crtc: which counter to give up
+ * @pipe: index of CRTC to release
*
* Release ownership of a given vblank counter, turning off interrupts
* if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
*
* This is the legacy version of drm_crtc_vblank_put().
*/
-void drm_vblank_put(struct drm_device *dev, int crtc)
+void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- if (WARN_ON(atomic_read(&vblank->refcount) == 0))
+ if (WARN_ON(pipe >= dev->num_crtcs))
return;
- if (WARN_ON(crtc >= dev->num_crtcs))
+ if (WARN_ON(atomic_read(&vblank->refcount) == 0))
return;
/* Last user schedules interrupt disable */
@@ -1147,30 +1152,34 @@ EXPORT_SYMBOL(drm_crtc_vblank_put);
/**
* drm_wait_one_vblank - wait for one vblank
* @dev: DRM device
- * @crtc: crtc index
+ * @pipe: CRTC index
*
* This waits for one vblank to pass on @crtc, using the irq driver interfaces.
* It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
* due to lack of driver support or because the crtc is off.
*/
-void drm_wait_one_vblank(struct drm_device *dev, int crtc)
+void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
{
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
int ret;
u32 last;
- ret = drm_vblank_get(dev, crtc);
- if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", crtc, ret))
+ if (WARN_ON(pipe >= dev->num_crtcs))
+ return;
+
+ ret = drm_vblank_get(dev, pipe);
+ if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", pipe, ret))
return;
- last = drm_vblank_count(dev, crtc);
+ last = drm_vblank_count(dev, pipe);
- ret = wait_event_timeout(dev->vblank[crtc].queue,
- last != drm_vblank_count(dev, crtc),
+ ret = wait_event_timeout(vblank->queue,
+ last != drm_vblank_count(dev, pipe),
msecs_to_jiffies(100));
- WARN(ret == 0, "vblank wait timed out on crtc %i\n", crtc);
+ WARN(ret == 0, "vblank wait timed out on crtc %i\n", pipe);
- drm_vblank_put(dev, crtc);
+ drm_vblank_put(dev, pipe);
}
EXPORT_SYMBOL(drm_wait_one_vblank);
@@ -1191,7 +1200,7 @@ EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
/**
* drm_vblank_off - disable vblank events on a CRTC
* @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
*
* Drivers can use this function to shut down the vblank interrupt handling when
* disabling a crtc. This function ensures that the latest vblank frame count is
@@ -1202,21 +1211,21 @@ EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
*
* This is the legacy version of drm_crtc_vblank_off().
*/
-void drm_vblank_off(struct drm_device *dev, int crtc)
+void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long irqflags;
unsigned int seq;
- if (WARN_ON(crtc >= dev->num_crtcs))
+ if (WARN_ON(pipe >= dev->num_crtcs))
return;
spin_lock_irqsave(&dev->event_lock, irqflags);
spin_lock(&dev->vbl_lock);
- vblank_disable_and_save(dev, crtc);
+ vblank_disable_and_save(dev, pipe);
wake_up(&vblank->queue);
/*
@@ -1230,16 +1239,16 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
spin_unlock(&dev->vbl_lock);
/* Send any queued vblank events, lest the natives grow disquiet */
- seq = drm_vblank_count_and_time(dev, crtc, &now);
+ seq = drm_vblank_count_and_time(dev, pipe, &now);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
- if (e->pipe != crtc)
+ if (e->pipe != pipe)
continue;
DRM_DEBUG("Sending premature vblank event on disable: \
wanted %d, current %d\n",
e->event.sequence, seq);
list_del(&e->base.link);
- drm_vblank_put(dev, e->pipe);
+ drm_vblank_put(dev, pipe);
send_vblank_event(dev, e, seq, &now);
}
spin_unlock_irqrestore(&dev->event_lock, irqflags);
@@ -1267,7 +1276,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_off);
/**
* drm_crtc_vblank_reset - reset vblank state to off on a CRTC
- * @crtc: CRTC in question
+ * @drm_crtc: CRTC in question
*
* Drivers can use this function to reset the vblank state to off at load time.
* Drivers should use this together with the drm_crtc_vblank_off() and
@@ -1300,7 +1309,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_reset);
/**
* drm_vblank_on - enable vblank events on a CRTC
* @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
*
* This functions restores the vblank interrupt state captured with
* drm_vblank_off() again. Note that calls to drm_vblank_on() and
@@ -1309,12 +1318,12 @@ EXPORT_SYMBOL(drm_crtc_vblank_reset);
*
* This is the legacy version of drm_crtc_vblank_on().
*/
-void drm_vblank_on(struct drm_device *dev, int crtc)
+void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
- if (WARN_ON(crtc >= dev->num_crtcs))
+ if (WARN_ON(pipe >= dev->num_crtcs))
return;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
@@ -1332,7 +1341,7 @@ void drm_vblank_on(struct drm_device *dev, int crtc)
* vblank counter value before and after a modeset
*/
vblank->last =
- (dev->driver->get_vblank_counter(dev, crtc) - 1) &
+ (dev->driver->get_vblank_counter(dev, pipe) - 1) &
dev->max_vblank_count;
/*
* re-enable interrupts if there are users left, or the
@@ -1340,7 +1349,7 @@ void drm_vblank_on(struct drm_device *dev, int crtc)
*/
if (atomic_read(&vblank->refcount) != 0 ||
(!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
- WARN_ON(drm_vblank_enable(dev, crtc));
+ WARN_ON(drm_vblank_enable(dev, pipe));
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
EXPORT_SYMBOL(drm_vblank_on);
@@ -1365,7 +1374,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_on);
/**
* drm_vblank_pre_modeset - account for vblanks across mode sets
* @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
*
* Account for vblank events across mode setting events, which will likely
* reset the hardware frame counter.
@@ -1385,15 +1394,15 @@ EXPORT_SYMBOL(drm_crtc_vblank_on);
* Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc
* again.
*/
-void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
+void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
/* vblank is not initialized (IRQ not installed ?), or has been freed */
if (!dev->num_crtcs)
return;
- if (WARN_ON(crtc >= dev->num_crtcs))
+ if (WARN_ON(pipe >= dev->num_crtcs))
return;
/*
@@ -1405,7 +1414,7 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
*/
if (!vblank->inmodeset) {
vblank->inmodeset = 0x1;
- if (drm_vblank_get(dev, crtc) == 0)
+ if (drm_vblank_get(dev, pipe) == 0)
vblank->inmodeset |= 0x2;
}
}
@@ -1414,27 +1423,30 @@ EXPORT_SYMBOL(drm_vblank_pre_modeset);
/**
* drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes
* @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
*
* This function again drops the temporary vblank reference acquired in
* drm_vblank_pre_modeset.
*/
-void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
+void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
/* vblank is not initialized (IRQ not installed ?), or has been freed */
if (!dev->num_crtcs)
return;
+ if (WARN_ON(pipe >= dev->num_crtcs))
+ return;
+
if (vblank->inmodeset) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = true;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
if (vblank->inmodeset & 0x2)
- drm_vblank_put(dev, crtc);
+ drm_vblank_put(dev, pipe);
vblank->inmodeset = 0;
}
@@ -1456,7 +1468,7 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
- unsigned int crtc;
+ unsigned int pipe;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
@@ -1466,16 +1478,16 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- crtc = modeset->crtc;
- if (crtc >= dev->num_crtcs)
+ pipe = modeset->crtc;
+ if (pipe >= dev->num_crtcs)
return -EINVAL;
switch (modeset->cmd) {
case _DRM_PRE_MODESET:
- drm_vblank_pre_modeset(dev, crtc);
+ drm_vblank_pre_modeset(dev, pipe);
break;
case _DRM_POST_MODESET:
- drm_vblank_post_modeset(dev, crtc);
+ drm_vblank_post_modeset(dev, pipe);
break;
default:
return -EINVAL;
@@ -1484,7 +1496,7 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
return 0;
}
-static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
+static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
union drm_wait_vblank *vblwait,
struct drm_file *file_priv)
{
@@ -1538,7 +1550,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
vblwait->reply.sequence = vblwait->request.sequence;
}
- DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
+ DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n",
vblwait->request.sequence, seq, pipe);
trace_drm_vblank_event_queued(current->pid, pipe,
@@ -1587,7 +1599,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_vblank_crtc *vblank;
union drm_wait_vblank *vblwait = data;
int ret;
- unsigned int flags, seq, crtc, high_crtc;
+ unsigned int flags, seq, pipe, high_pipe;
if (!dev->irq_enabled)
return -EINVAL;
@@ -1606,22 +1618,22 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
}
flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
- high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
- if (high_crtc)
- crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
+ high_pipe = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
+ if (high_pipe)
+ pipe = high_pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
else
- crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
- if (crtc >= dev->num_crtcs)
+ pipe = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+ if (pipe >= dev->num_crtcs)
return -EINVAL;
- vblank = &dev->vblank[crtc];
+ vblank = &dev->vblank[pipe];
- ret = drm_vblank_get(dev, crtc);
+ ret = drm_vblank_get(dev, pipe);
if (ret) {
DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
return ret;
}
- seq = drm_vblank_count(dev, crtc);
+ seq = drm_vblank_count(dev, pipe);
switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
case _DRM_VBLANK_RELATIVE:
@@ -1638,7 +1650,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
/* must hold on to the vblank ref until the event fires
* drm_vblank_put will be called asynchronously
*/
- return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
+ return drm_queue_vblank_event(dev, pipe, vblwait, file_priv);
}
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
@@ -1646,11 +1658,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
vblwait->request.sequence = seq + 1;
}
- DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
- vblwait->request.sequence, crtc);
+ DRM_DEBUG("waiting on vblank count %d, crtc %u\n",
+ vblwait->request.sequence, pipe);
vblank->last_wait = vblwait->request.sequence;
DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
- (((drm_vblank_count(dev, crtc) -
+ (((drm_vblank_count(dev, pipe) -
vblwait->request.sequence) <= (1 << 23)) ||
!vblank->enabled ||
!dev->irq_enabled));
@@ -1658,7 +1670,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
if (ret != -EINTR) {
struct timeval now;
- vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now);
+ vblwait->reply.sequence = drm_vblank_count_and_time(dev, pipe, &now);
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
@@ -1669,11 +1681,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
}
done:
- drm_vblank_put(dev, crtc);
+ drm_vblank_put(dev, pipe);
return ret;
}
-static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
{
struct drm_pending_vblank_event *e, *t;
struct timeval now;
@@ -1681,10 +1693,10 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
assert_spin_locked(&dev->event_lock);
- seq = drm_vblank_count_and_time(dev, crtc, &now);
+ seq = drm_vblank_count_and_time(dev, pipe, &now);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
- if (e->pipe != crtc)
+ if (e->pipe != pipe)
continue;
if ((seq - e->event.sequence) > (1<<23))
continue;
@@ -1693,26 +1705,26 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
e->event.sequence, seq);
list_del(&e->base.link);
- drm_vblank_put(dev, e->pipe);
+ drm_vblank_put(dev, pipe);
send_vblank_event(dev, e, seq, &now);
}
- trace_drm_vblank_event(crtc, seq);
+ trace_drm_vblank_event(pipe, seq);
}
/**
* drm_handle_vblank - handle a vblank event
* @dev: DRM device
- * @crtc: where this event occurred
+ * @pipe: index of CRTC where this event occurred
*
* Drivers should call this routine in their vblank interrupt handlers to
* update the vblank counter and send any signals that may be pending.
*
* This is the legacy version of drm_crtc_handle_vblank().
*/
-bool drm_handle_vblank(struct drm_device *dev, int crtc)
+bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u32 vblcount;
s64 diff_ns;
struct timeval tvblank;
@@ -1721,7 +1733,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
if (WARN_ON_ONCE(!dev->num_crtcs))
return false;
- if (WARN_ON(crtc >= dev->num_crtcs))
+ if (WARN_ON(pipe >= dev->num_crtcs))
return false;
spin_lock_irqsave(&dev->event_lock, irqflags);
@@ -1745,11 +1757,11 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
/* Get current timestamp and count. */
vblcount = vblank->count;
- drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
+ drm_get_last_vbltimestamp(dev, pipe, &tvblank, DRM_CALLED_FROM_VBLIRQ);
/* Compute time difference to timestamp of last vblank */
diff_ns = timeval_to_ns(&tvblank) -
- timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+ timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount));
/* Update vblank timestamp and count if at least
* DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
@@ -1761,15 +1773,15 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
* ignore those for accounting.
*/
if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS)
- store_vblank(dev, crtc, 1, &tvblank);
+ store_vblank(dev, pipe, 1, &tvblank);
else
- DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
- crtc, (int) diff_ns);
+ DRM_DEBUG("crtc %u: Redundant vblirq ignored. diff_ns = %d\n",
+ pipe, (int) diff_ns);
spin_unlock(&dev->vblank_time_lock);
wake_up(&vblank->queue);
- drm_handle_vblank_events(dev, crtc);
+ drm_handle_vblank_events(dev, pipe);
spin_unlock_irqrestore(&dev->event_lock, irqflags);
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index c1dc61473db5..9b731786e4db 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -42,7 +42,7 @@ struct drm_file;
#define DRM_KERNEL_CONTEXT 0
#define DRM_RESERVED_CONTEXTS 1
-int drm_legacy_ctxbitmap_init(struct drm_device *dev);
+void drm_legacy_ctxbitmap_init(struct drm_device *dev);
void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index f861361a635e..4924d381b664 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -61,6 +61,9 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
struct drm_master *master = file_priv->master;
int ret = 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
++file_priv->lock_count;
if (lock->context == DRM_KERNEL_CONTEXT) {
@@ -153,6 +156,9 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
struct drm_lock *lock = data;
struct drm_master *master = file_priv->master;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
task_pid_nr(current), lock->context);
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index c0a5cd8c5262..fba321ca4344 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -55,41 +55,27 @@
* drm_modeset_acquire_fini(&ctx);
*/
-
/**
- * __drm_modeset_lock_all - internal helper to grab all modeset locks
- * @dev: DRM device
- * @trylock: trylock mode for atomic contexts
- *
- * This is a special version of drm_modeset_lock_all() which can also be used in
- * atomic contexts. Then @trylock must be set to true.
+ * drm_modeset_lock_all - take all modeset locks
+ * @dev: drm device
*
- * Returns:
- * 0 on success or negative error code on failure.
+ * This function takes all modeset locks, suitable where a more fine-grained
+ * scheme isn't (yet) implemented. Locks must be dropped with
+ * drm_modeset_unlock_all.
*/
-int __drm_modeset_lock_all(struct drm_device *dev,
- bool trylock)
+void drm_modeset_lock_all(struct drm_device *dev)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_modeset_acquire_ctx *ctx;
int ret;
- ctx = kzalloc(sizeof(*ctx),
- trylock ? GFP_ATOMIC : GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (WARN_ON(!ctx))
+ return;
- if (trylock) {
- if (!mutex_trylock(&config->mutex)) {
- ret = -EBUSY;
- goto out;
- }
- } else {
- mutex_lock(&config->mutex);
- }
+ mutex_lock(&config->mutex);
drm_modeset_acquire_init(ctx, 0);
- ctx->trylock_only = trylock;
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
@@ -108,7 +94,7 @@ retry:
drm_warn_on_modeset_not_all_locked(dev);
- return 0;
+ return;
fail:
if (ret == -EDEADLK) {
@@ -116,23 +102,7 @@ fail:
goto retry;
}
-out:
kfree(ctx);
- return ret;
-}
-EXPORT_SYMBOL(__drm_modeset_lock_all);
-
-/**
- * drm_modeset_lock_all - take all modeset locks
- * @dev: drm device
- *
- * This function takes all modeset locks, suitable where a more fine-grained
- * scheme isn't (yet) implemented. Locks must be dropped with
- * drm_modeset_unlock_all.
- */
-void drm_modeset_lock_all(struct drm_device *dev)
-{
- WARN_ON(__drm_modeset_lock_all(dev, false) != 0);
}
EXPORT_SYMBOL(drm_modeset_lock_all);
@@ -276,7 +246,7 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
if (oops_in_progress)
return;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ drm_for_each_crtc(crtc, dev)
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
@@ -464,18 +434,17 @@ EXPORT_SYMBOL(drm_modeset_unlock);
int drm_modeset_lock_all_crtcs(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_mode_config *config = &dev->mode_config;
struct drm_crtc *crtc;
struct drm_plane *plane;
int ret = 0;
- list_for_each_entry(crtc, &config->crtc_list, head) {
+ drm_for_each_crtc(crtc, dev) {
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
return ret;
}
- list_for_each_entry(plane, &config->plane_list, head) {
+ drm_for_each_plane(plane, dev) {
ret = drm_modeset_lock(&plane->mutex, ctx);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index aaa130736bf8..be3884073ea4 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -19,7 +19,7 @@ static uint32_t drm_crtc_port_mask(struct drm_device *dev,
unsigned int index = 0;
struct drm_crtc *tmp;
- list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+ drm_for_each_crtc(tmp, dev) {
if (tmp->port == port)
return 1 << index;
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 2f0ed11024eb..5e5a07af02c8 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -91,13 +91,14 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
*/
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ drm_for_each_connector(connector, dev) {
if (connector->encoder && connector->encoder->crtc == crtc) {
if (connector_list != NULL && count < num_connectors)
*(connector_list++) = connector;
count++;
}
+ }
return count;
}
@@ -436,7 +437,7 @@ int drm_plane_helper_commit(struct drm_plane *plane,
for (i = 0; i < 2; i++) {
if (crtc_funcs[i] && crtc_funcs[i]->atomic_begin)
- crtc_funcs[i]->atomic_begin(crtc[i]);
+ crtc_funcs[i]->atomic_begin(crtc[i], crtc[i]->state);
}
/*
@@ -451,7 +452,7 @@ int drm_plane_helper_commit(struct drm_plane *plane,
for (i = 0; i < 2; i++) {
if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush)
- crtc_funcs[i]->atomic_flush(crtc[i]);
+ crtc_funcs[i]->atomic_flush(crtc[i], crtc[i]->state);
}
/*
@@ -525,10 +526,12 @@ int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
if (plane->funcs->atomic_duplicate_state)
plane_state = plane->funcs->atomic_duplicate_state(plane);
- else if (plane->state)
+ else {
+ if (!plane->state)
+ drm_atomic_helper_plane_reset(plane);
+
plane_state = drm_atomic_helper_plane_duplicate_state(plane);
- else
- plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+ }
if (!plane_state)
return -ENOMEM;
plane_state->plane = plane;
@@ -572,10 +575,12 @@ int drm_plane_helper_disable(struct drm_plane *plane)
if (plane->funcs->atomic_duplicate_state)
plane_state = plane->funcs->atomic_duplicate_state(plane);
- else if (plane->state)
+ else {
+ if (!plane->state)
+ drm_atomic_helper_plane_reset(plane);
+
plane_state = drm_atomic_helper_plane_duplicate_state(plane);
- else
- plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+ }
if (!plane_state)
return -ENOMEM;
plane_state->plane = plane;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 04203c0d2ecb..d734780b31c0 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -93,6 +93,27 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
return 1;
}
+#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
+static void __drm_kms_helper_poll_enable(struct drm_device *dev)
+{
+ bool poll = false;
+ struct drm_connector *connector;
+
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+ if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+ return;
+
+ drm_for_each_connector(connector, dev) {
+ if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT))
+ poll = true;
+ }
+
+ if (poll)
+ schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+}
+
static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY, bool merge_type_bits)
{
@@ -153,7 +174,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
/* Re-enable polling in case the global poll config changed. */
if (drm_kms_helper_poll != dev->mode_config.poll_running)
- drm_kms_helper_poll_enable(dev);
+ __drm_kms_helper_poll_enable(dev);
dev->mode_config.poll_running = drm_kms_helper_poll;
@@ -295,7 +316,6 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
-#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void output_poll_execute(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
@@ -312,7 +332,7 @@ static void output_poll_execute(struct work_struct *work)
goto out;
mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector(connector, dev) {
/* Ignore forced connectors. */
if (connector->force)
@@ -407,20 +427,9 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable);
*/
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
- bool poll = false;
- struct drm_connector *connector;
-
- if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
- return;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT))
- poll = true;
- }
-
- if (poll)
- schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+ mutex_lock(&dev->mode_config.mutex);
+ __drm_kms_helper_poll_enable(dev);
+ mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
@@ -495,7 +504,7 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev)
return false;
mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector(connector, dev) {
/* Only handle HPD capable connectors. */
if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 43003c4ad80b..bd1a4156f647 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -56,7 +56,7 @@ config DRM_EXYNOS_DSI
config DRM_EXYNOS_DP
bool "EXYNOS DRM DP driver support"
- depends on DRM_EXYNOS && (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
+ depends on DRM_EXYNOS && (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON)
default DRM_EXYNOS
select DRM_PANEL
help
@@ -77,6 +77,7 @@ config DRM_EXYNOS_VIDI
config DRM_EXYNOS_G2D
bool "Exynos DRM G2D"
depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
+ select FRAME_VECTOR
help
Choose this option if you want to use Exynos G2D for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 7de0b1084fcd..02aecfed6354 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -3,10 +3,9 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
-exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o \
- exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
- exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
- exynos_drm_plane.o exynos_drm_dmabuf.o
+exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \
+ exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \
+ exynos_drm_plane.o
exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 8b1225f245fc..b3c730770b0f 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -54,6 +54,13 @@ static const char * const decon_clks_name[] = {
"sclk_decon_eclk",
};
+static const uint32_t decon_formats[] = {
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+};
+
static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
@@ -152,15 +159,15 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
#define OFFSIZE(x) (((x) & 0x3fff) << 14)
#define PAGEWIDTH(x) ((x) & 0x3fff)
-static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
+static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
+ struct drm_framebuffer *fb)
{
- struct exynos_drm_plane *plane = &ctx->planes[win];
unsigned long val;
val = readl(ctx->addr + DECON_WINCONx(win));
val &= ~WINCONx_BPPMODE_MASK;
- switch (plane->pixel_format) {
+ switch (fb->pixel_format) {
case DRM_FORMAT_XRGB1555:
val |= WINCONx_BPPMODE_16BPP_I1555;
val |= WINCONx_HAWSWP_F;
@@ -186,7 +193,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
return;
}
- DRM_DEBUG_KMS("bpp = %u\n", plane->bpp);
+ DRM_DEBUG_KMS("bpp = %u\n", fb->bits_per_pixel);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -196,7 +203,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
* movement causes unstable DMA which results into iommu crash/tear.
*/
- if (plane->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+ if (fb->width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_8WORD;
}
@@ -219,27 +226,35 @@ static void decon_shadow_protect_win(struct decon_context *ctx, int win,
writel(val, ctx->addr + DECON_SHADOWCON);
}
-static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void decon_atomic_begin(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
struct decon_context *ctx = crtc->ctx;
- struct exynos_drm_plane *plane;
- u32 val;
- if (win < 0 || win >= WINDOWS_NR)
+ if (ctx->suspended)
return;
- plane = &ctx->planes[win];
+ decon_shadow_protect_win(ctx, plane->zpos, true);
+}
+
+static void decon_update_plane(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
+{
+ struct decon_context *ctx = crtc->ctx;
+ struct drm_plane_state *state = plane->base.state;
+ unsigned int win = plane->zpos;
+ unsigned int bpp = state->fb->bits_per_pixel >> 3;
+ unsigned int pitch = state->fb->pitches[0];
+ u32 val;
if (ctx->suspended)
return;
- decon_shadow_protect_win(ctx, win, true);
-
val = COORDINATE_X(plane->crtc_x) | COORDINATE_Y(plane->crtc_y);
writel(val, ctx->addr + DECON_VIDOSDxA(win));
- val = COORDINATE_X(plane->crtc_x + plane->crtc_width - 1) |
- COORDINATE_Y(plane->crtc_y + plane->crtc_height - 1);
+ val = COORDINATE_X(plane->crtc_x + plane->crtc_w - 1) |
+ COORDINATE_Y(plane->crtc_y + plane->crtc_h - 1);
writel(val, ctx->addr + DECON_VIDOSDxB(win));
val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
@@ -252,42 +267,33 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
writel(plane->dma_addr[0], ctx->addr + DECON_VIDW0xADD0B0(win));
- val = plane->dma_addr[0] + plane->pitch * plane->crtc_height;
+ val = plane->dma_addr[0] + pitch * plane->crtc_h;
writel(val, ctx->addr + DECON_VIDW0xADD1B0(win));
- val = OFFSIZE(plane->pitch - plane->crtc_width * (plane->bpp >> 3))
- | PAGEWIDTH(plane->crtc_width * (plane->bpp >> 3));
+ val = OFFSIZE(pitch - plane->crtc_w * bpp)
+ | PAGEWIDTH(plane->crtc_w * bpp);
writel(val, ctx->addr + DECON_VIDW0xADD2(win));
- decon_win_set_pixfmt(ctx, win);
+ decon_win_set_pixfmt(ctx, win, state->fb);
/* window enable */
val = readl(ctx->addr + DECON_WINCONx(win));
val |= WINCONx_ENWIN_F;
writel(val, ctx->addr + DECON_WINCONx(win));
- decon_shadow_protect_win(ctx, win, false);
-
/* standalone update */
val = readl(ctx->addr + DECON_UPDATE);
val |= STANDALONE_UPDATE_F;
writel(val, ctx->addr + DECON_UPDATE);
-
- if (ctx->i80_if)
- atomic_set(&ctx->win_updated, 1);
}
-static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
+static void decon_disable_plane(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
struct decon_context *ctx = crtc->ctx;
- struct exynos_drm_plane *plane;
+ unsigned int win = plane->zpos;
u32 val;
- if (win < 0 || win >= WINDOWS_NR)
- return;
-
- plane = &ctx->planes[win];
-
if (ctx->suspended)
return;
@@ -306,6 +312,20 @@ static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
writel(val, ctx->addr + DECON_UPDATE);
}
+static void decon_atomic_flush(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
+{
+ struct decon_context *ctx = crtc->ctx;
+
+ if (ctx->suspended)
+ return;
+
+ decon_shadow_protect_win(ctx, plane->zpos, false);
+
+ if (ctx->i80_if)
+ atomic_set(&ctx->win_updated, 1);
+}
+
static void decon_swreset(struct decon_context *ctx)
{
unsigned int tries;
@@ -378,7 +398,7 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
* a destroyed buffer later.
*/
for (i = 0; i < WINDOWS_NR; i++)
- decon_win_disable(crtc, i);
+ decon_disable_plane(crtc, &ctx->planes[i]);
decon_swreset(ctx);
@@ -407,7 +427,7 @@ void decon_te_irq_handler(struct exynos_drm_crtc *crtc)
writel(val, ctx->addr + DECON_TRIGCON);
}
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ drm_crtc_handle_vblank(&ctx->crtc->base);
}
static void decon_clear_channels(struct exynos_drm_crtc *crtc)
@@ -460,10 +480,11 @@ static struct exynos_drm_crtc_ops decon_crtc_ops = {
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
.commit = decon_commit,
- .win_commit = decon_win_commit,
- .win_disable = decon_win_disable,
+ .atomic_begin = decon_atomic_begin,
+ .update_plane = decon_update_plane,
+ .disable_plane = decon_disable_plane,
+ .atomic_flush = decon_atomic_flush,
.te_handler = decon_te_irq_handler,
- .clear_channels = decon_clear_channels,
};
static int decon_bind(struct device *dev, struct device *master, void *data)
@@ -483,7 +504,8 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
DRM_PLANE_TYPE_OVERLAY;
ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
- 1 << ctx->pipe, type, zpos);
+ 1 << ctx->pipe, type, decon_formats,
+ ARRAY_SIZE(decon_formats), zpos);
if (ret)
return ret;
}
@@ -497,7 +519,9 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
goto err;
}
- ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, dev);
+ decon_clear_channels(ctx->crtc);
+
+ ret = drm_iommu_attach_device(drm_dev, dev);
if (ret)
goto err;
@@ -514,8 +538,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
decon_disable(ctx->crtc);
/* detach this sub driver from iommu mapping if supported. */
- if (is_drm_iommu_supported(ctx->drm_dev))
- drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+ drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
}
static const struct component_ops decon_component_ops = {
@@ -533,7 +556,7 @@ static irqreturn_t decon_vsync_irq_handler(int irq, void *dev_id)
val = readl(ctx->addr + DECON_VIDINTCON1);
if (val & VIDINTCON1_INTFRMPEND) {
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ drm_crtc_handle_vblank(&ctx->crtc->base);
/* clear */
writel(VIDINTCON1_INTFRMPEND, ctx->addr + DECON_VIDINTCON1);
@@ -547,13 +570,21 @@ static irqreturn_t decon_lcd_sys_irq_handler(int irq, void *dev_id)
{
struct decon_context *ctx = dev_id;
u32 val;
+ int win;
if (!test_bit(BIT_CLKS_ENABLED, &ctx->enabled))
goto out;
val = readl(ctx->addr + DECON_VIDINTCON1);
if (val & VIDINTCON1_INTFRMDONEPEND) {
- exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+ for (win = 0 ; win < WINDOWS_NR ; win++) {
+ struct exynos_drm_plane *plane = &ctx->planes[win];
+
+ if (!plane->pending_fb)
+ continue;
+
+ exynos_drm_crtc_finish_update(ctx->crtc, plane);
+ }
/* clear */
writel(VIDINTCON1_INTFRMDONEPEND,
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 362532afd1a5..cbdb78ef3bac 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -61,7 +61,7 @@ struct decon_context {
atomic_t wait_vsync_event;
struct exynos_drm_panel_info panel;
- struct exynos_drm_display *display;
+ struct drm_encoder *encoder;
};
static const struct of_device_id decon_driver_dt_match[] = {
@@ -70,6 +70,18 @@ static const struct of_device_id decon_driver_dt_match[] = {
};
MODULE_DEVICE_TABLE(of, decon_driver_dt_match);
+static const uint32_t decon_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRA8888,
+};
+
static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
@@ -126,7 +138,9 @@ static int decon_ctx_initialize(struct decon_context *ctx,
ctx->drm_dev = drm_dev;
ctx->pipe = priv->pipe++;
- ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, ctx->dev);
+ decon_clear_channels(ctx->crtc);
+
+ ret = drm_iommu_attach_device(drm_dev, ctx->dev);
if (ret)
priv->pipe--;
@@ -136,8 +150,7 @@ static int decon_ctx_initialize(struct decon_context *ctx,
static void decon_ctx_remove(struct decon_context *ctx)
{
/* detach this sub driver from iommu mapping if supported. */
- if (is_drm_iommu_supported(ctx->drm_dev))
- drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+ drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
}
static u32 decon_calc_clkdiv(struct decon_context *ctx,
@@ -271,16 +284,16 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
}
}
-static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
+static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
+ struct drm_framebuffer *fb)
{
- struct exynos_drm_plane *plane = &ctx->planes[win];
unsigned long val;
int padding;
val = readl(ctx->regs + WINCON(win));
val &= ~WINCONx_BPPMODE_MASK;
- switch (plane->pixel_format) {
+ switch (fb->pixel_format) {
case DRM_FORMAT_RGB565:
val |= WINCONx_BPPMODE_16BPP_565;
val |= WINCONx_BURSTLEN_16WORD;
@@ -329,7 +342,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
break;
}
- DRM_DEBUG_KMS("bpp = %d\n", plane->bpp);
+ DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -339,8 +352,8 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
* movement causes unstable DMA which results into iommu crash/tear.
*/
- padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
- if (plane->fb_width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+ padding = (fb->pitches[0] / (fb->bits_per_pixel >> 3)) - fb->width;
+ if (fb->width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) {
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_8WORD;
}
@@ -382,23 +395,30 @@ static void decon_shadow_protect_win(struct decon_context *ctx,
writel(val, ctx->regs + SHADOWCON);
}
-static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void decon_atomic_begin(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
struct decon_context *ctx = crtc->ctx;
- struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
- struct exynos_drm_plane *plane;
- int padding;
- unsigned long val, alpha;
- unsigned int last_x;
- unsigned int last_y;
if (ctx->suspended)
return;
- if (win < 0 || win >= WINDOWS_NR)
- return;
+ decon_shadow_protect_win(ctx, plane->zpos, true);
+}
- plane = &ctx->planes[win];
+static void decon_update_plane(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
+{
+ struct decon_context *ctx = crtc->ctx;
+ struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
+ struct drm_plane_state *state = plane->base.state;
+ int padding;
+ unsigned long val, alpha;
+ unsigned int last_x;
+ unsigned int last_y;
+ unsigned int win = plane->zpos;
+ unsigned int bpp = state->fb->bits_per_pixel >> 3;
+ unsigned int pitch = state->fb->pitches[0];
if (ctx->suspended)
return;
@@ -413,18 +433,15 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
* is set.
*/
- /* protect windows */
- decon_shadow_protect_win(ctx, win, true);
-
/* buffer start address */
val = (unsigned long)plane->dma_addr[0];
writel(val, ctx->regs + VIDW_BUF_START(win));
- padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
+ padding = (pitch / bpp) - state->fb->width;
/* buffer size */
- writel(plane->fb_width + padding, ctx->regs + VIDW_WHOLE_X(win));
- writel(plane->fb_height, ctx->regs + VIDW_WHOLE_Y(win));
+ writel(state->fb->width + padding, ctx->regs + VIDW_WHOLE_X(win));
+ writel(state->fb->height, ctx->regs + VIDW_WHOLE_Y(win));
/* offset from the start of the buffer to read */
writel(plane->src_x, ctx->regs + VIDW_OFFSET_X(win));
@@ -433,25 +450,25 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
DRM_DEBUG_KMS("start addr = 0x%lx\n",
(unsigned long)val);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- plane->crtc_width, plane->crtc_height);
+ plane->crtc_w, plane->crtc_h);
/*
* OSD position.
* In case the window layout goes of LCD layout, DECON fails.
*/
- if ((plane->crtc_x + plane->crtc_width) > mode->hdisplay)
- plane->crtc_x = mode->hdisplay - plane->crtc_width;
- if ((plane->crtc_y + plane->crtc_height) > mode->vdisplay)
- plane->crtc_y = mode->vdisplay - plane->crtc_height;
+ if ((plane->crtc_x + plane->crtc_w) > mode->hdisplay)
+ plane->crtc_x = mode->hdisplay - plane->crtc_w;
+ if ((plane->crtc_y + plane->crtc_h) > mode->vdisplay)
+ plane->crtc_y = mode->vdisplay - plane->crtc_h;
val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) |
VIDOSDxA_TOPLEFT_Y(plane->crtc_y);
writel(val, ctx->regs + VIDOSD_A(win));
- last_x = plane->crtc_x + plane->crtc_width;
+ last_x = plane->crtc_x + plane->crtc_w;
if (last_x)
last_x--;
- last_y = plane->crtc_y + plane->crtc_height;
+ last_y = plane->crtc_y + plane->crtc_h;
if (last_y)
last_y--;
@@ -475,7 +492,7 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
writel(alpha, ctx->regs + VIDOSD_D(win));
- decon_win_set_pixfmt(ctx, win);
+ decon_win_set_pixfmt(ctx, win, state->fb);
/* hardware window 0 doesn't support color key. */
if (win != 0)
@@ -495,17 +512,13 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
writel(val, ctx->regs + DECON_UPDATE);
}
-static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
+static void decon_disable_plane(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
struct decon_context *ctx = crtc->ctx;
- struct exynos_drm_plane *plane;
+ unsigned int win = plane->zpos;
u32 val;
- if (win < 0 || win >= WINDOWS_NR)
- return;
-
- plane = &ctx->planes[win];
-
if (ctx->suspended)
return;
@@ -517,14 +530,22 @@ static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
val &= ~WINCONx_ENWIN;
writel(val, ctx->regs + WINCON(win));
- /* unprotect windows */
- decon_shadow_protect_win(ctx, win, false);
-
val = readl(ctx->regs + DECON_UPDATE);
val |= DECON_UPDATE_STANDALONE_F;
writel(val, ctx->regs + DECON_UPDATE);
}
+static void decon_atomic_flush(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
+{
+ struct decon_context *ctx = crtc->ctx;
+
+ if (ctx->suspended)
+ return;
+
+ decon_shadow_protect_win(ctx, plane->zpos, false);
+}
+
static void decon_init(struct decon_context *ctx)
{
u32 val;
@@ -601,7 +622,7 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
* a destroyed buffer later.
*/
for (i = 0; i < WINDOWS_NR; i++)
- decon_win_disable(crtc, i);
+ decon_disable_plane(crtc, &ctx->planes[i]);
clk_disable_unprepare(ctx->vclk);
clk_disable_unprepare(ctx->eclk);
@@ -621,9 +642,10 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
.wait_for_vblank = decon_wait_for_vblank,
- .win_commit = decon_win_commit,
- .win_disable = decon_win_disable,
- .clear_channels = decon_clear_channels,
+ .atomic_begin = decon_atomic_begin,
+ .update_plane = decon_update_plane,
+ .disable_plane = decon_disable_plane,
+ .atomic_flush = decon_atomic_flush,
};
@@ -631,6 +653,7 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
{
struct decon_context *ctx = (struct decon_context *)dev_id;
u32 val, clear_bit;
+ int win;
val = readl(ctx->regs + VIDINTCON1);
@@ -643,8 +666,15 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
goto out;
if (!ctx->i80_if) {
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
- exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+ drm_crtc_handle_vblank(&ctx->crtc->base);
+ for (win = 0 ; win < WINDOWS_NR ; win++) {
+ struct exynos_drm_plane *plane = &ctx->planes[win];
+
+ if (!plane->pending_fb)
+ continue;
+
+ exynos_drm_crtc_finish_update(ctx->crtc, plane);
+ }
/* set wait vsync event to zero and wake up queue. */
if (atomic_read(&ctx->wait_vsync_event)) {
@@ -675,7 +705,8 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
DRM_PLANE_TYPE_OVERLAY;
ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
- 1 << ctx->pipe, type, zpos);
+ 1 << ctx->pipe, type, decon_formats,
+ ARRAY_SIZE(decon_formats), zpos);
if (ret)
return ret;
}
@@ -689,8 +720,8 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(ctx->crtc);
}
- if (ctx->display)
- exynos_drm_create_enc_conn(drm_dev, ctx->display);
+ if (ctx->encoder)
+ exynos_dpi_bind(drm_dev, ctx->encoder);
return 0;
@@ -703,8 +734,8 @@ static void decon_unbind(struct device *dev, struct device *master,
decon_disable(ctx->crtc);
- if (ctx->display)
- exynos_dpi_remove(ctx->display);
+ if (ctx->encoder)
+ exynos_dpi_remove(ctx->encoder);
decon_ctx_remove(ctx);
}
@@ -789,9 +820,9 @@ static int decon_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
- ctx->display = exynos_dpi_probe(dev);
- if (IS_ERR(ctx->display)) {
- ret = PTR_ERR(ctx->display);
+ ctx->encoder = exynos_dpi_probe(dev);
+ if (IS_ERR(ctx->encoder)) {
+ ret = PTR_ERR(ctx->encoder);
goto err_iounmap;
}
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 172b8002a2c8..d66ade0efac8 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -32,19 +32,20 @@
#include <drm/drm_panel.h>
#include "exynos_dp_core.h"
+#include "exynos_drm_crtc.h"
#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
connector)
static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp)
{
- return to_exynos_crtc(dp->encoder->crtc);
+ return to_exynos_crtc(dp->encoder.crtc);
}
-static inline struct exynos_dp_device *
-display_to_dp(struct exynos_drm_display *d)
+static inline struct exynos_dp_device *encoder_to_dp(
+ struct drm_encoder *e)
{
- return container_of(d, struct exynos_dp_device, display);
+ return container_of(e, struct exynos_dp_device, encoder);
}
struct bridge_init {
@@ -795,9 +796,6 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp)
/* Configure video slave mode */
exynos_dp_enable_video_master(dp, 0);
- /* Enable video */
- exynos_dp_start_video(dp);
-
timeout_loop = 0;
for (;;) {
@@ -891,9 +889,9 @@ static void exynos_dp_hotplug(struct work_struct *work)
drm_helper_hpd_irq_event(dp->drm_dev);
}
-static void exynos_dp_commit(struct exynos_drm_display *display)
+static void exynos_dp_commit(struct drm_encoder *encoder)
{
- struct exynos_dp_device *dp = display_to_dp(display);
+ struct exynos_dp_device *dp = encoder_to_dp(encoder);
int ret;
/* Keep the panel disabled while we configure video */
@@ -938,6 +936,9 @@ static void exynos_dp_commit(struct exynos_drm_display *display)
if (drm_panel_enable(dp->panel))
DRM_ERROR("failed to enable the panel\n");
}
+
+ /* Enable video */
+ exynos_dp_start_video(dp);
}
static enum drm_connector_status exynos_dp_detect(
@@ -994,7 +995,7 @@ static struct drm_encoder *exynos_dp_best_encoder(
{
struct exynos_dp_device *dp = ctx_from_connector(connector);
- return dp->encoder;
+ return &dp->encoder;
}
static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
@@ -1019,15 +1020,12 @@ static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp,
return 0;
}
-static int exynos_dp_create_connector(struct exynos_drm_display *display,
- struct drm_encoder *encoder)
+static int exynos_dp_create_connector(struct drm_encoder *encoder)
{
- struct exynos_dp_device *dp = display_to_dp(display);
+ struct exynos_dp_device *dp = encoder_to_dp(encoder);
struct drm_connector *connector = &dp->connector;
int ret;
- dp->encoder = encoder;
-
/* Pre-empt DP connector creation if there's a bridge */
if (dp->bridge) {
ret = exynos_drm_attach_lcd_bridge(dp, encoder);
@@ -1054,20 +1052,22 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display,
return ret;
}
-static void exynos_dp_phy_init(struct exynos_dp_device *dp)
+static bool exynos_dp_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- if (dp->phy)
- phy_power_on(dp->phy);
+ return true;
}
-static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
+static void exynos_dp_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- if (dp->phy)
- phy_power_off(dp->phy);
}
-static void exynos_dp_poweron(struct exynos_dp_device *dp)
+static void exynos_dp_enable(struct drm_encoder *encoder)
{
+ struct exynos_dp_device *dp = encoder_to_dp(encoder);
struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
if (dp->dpms_mode == DRM_MODE_DPMS_ON)
@@ -1084,14 +1084,17 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
crtc->ops->clock_enable(dp_to_crtc(dp), true);
clk_prepare_enable(dp->clock);
- exynos_dp_phy_init(dp);
+ phy_power_on(dp->phy);
exynos_dp_init_dp(dp);
enable_irq(dp->irq);
- exynos_dp_commit(&dp->display);
+ exynos_dp_commit(&dp->encoder);
+
+ dp->dpms_mode = DRM_MODE_DPMS_ON;
}
-static void exynos_dp_poweroff(struct exynos_dp_device *dp)
+static void exynos_dp_disable(struct drm_encoder *encoder)
{
+ struct exynos_dp_device *dp = encoder_to_dp(encoder);
struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
@@ -1106,7 +1109,7 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
disable_irq(dp->irq);
flush_work(&dp->hotplug_work);
- exynos_dp_phy_exit(dp);
+ phy_power_off(dp->phy);
clk_disable_unprepare(dp->clock);
if (crtc->ops->clock_enable)
@@ -1116,31 +1119,19 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
if (drm_panel_unprepare(dp->panel))
DRM_ERROR("failed to turnoff the panel\n");
}
-}
-
-static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
-{
- struct exynos_dp_device *dp = display_to_dp(display);
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- exynos_dp_poweron(dp);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- exynos_dp_poweroff(dp);
- break;
- default:
- break;
- }
- dp->dpms_mode = mode;
+ dp->dpms_mode = DRM_MODE_DPMS_OFF;
}
-static struct exynos_drm_display_ops exynos_dp_display_ops = {
- .create_connector = exynos_dp_create_connector,
- .dpms = exynos_dp_dpms,
- .commit = exynos_dp_commit,
+static struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
+ .mode_fixup = exynos_dp_mode_fixup,
+ .mode_set = exynos_dp_mode_set,
+ .enable = exynos_dp_enable,
+ .disable = exynos_dp_disable,
+};
+
+static struct drm_encoder_funcs exynos_dp_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
};
static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
@@ -1219,9 +1210,10 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
struct exynos_dp_device *dp = dev_get_drvdata(dev);
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm_dev = data;
+ struct drm_encoder *encoder = &dp->encoder;
struct resource *res;
unsigned int irq_flags;
- int ret = 0;
+ int pipe, ret = 0;
dp->dev = &pdev->dev;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
@@ -1297,7 +1289,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug);
- exynos_dp_phy_init(dp);
+ phy_power_on(dp->phy);
exynos_dp_init_dp(dp);
@@ -1311,7 +1303,28 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
dp->drm_dev = drm_dev;
- return exynos_drm_create_enc_conn(drm_dev, &dp->display);
+ pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+ EXYNOS_DISPLAY_TYPE_LCD);
+ if (pipe < 0)
+ return pipe;
+
+ encoder->possible_crtcs = 1 << pipe;
+
+ DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+ drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
+
+ ret = exynos_dp_create_connector(encoder);
+ if (ret) {
+ DRM_ERROR("failed to create connector ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+
+ return 0;
}
static void exynos_dp_unbind(struct device *dev, struct device *master,
@@ -1319,7 +1332,7 @@ static void exynos_dp_unbind(struct device *dev, struct device *master,
{
struct exynos_dp_device *dp = dev_get_drvdata(dev);
- exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF);
+ exynos_dp_disable(&dp->encoder);
}
static const struct component_ops exynos_dp_ops = {
@@ -1338,8 +1351,6 @@ static int exynos_dp_probe(struct platform_device *pdev)
if (!dp)
return -ENOMEM;
- dp->display.type = EXYNOS_DISPLAY_TYPE_LCD;
- dp->display.ops = &exynos_dp_display_ops;
platform_set_drvdata(pdev, dp);
panel_node = of_parse_phandle(dev->of_node, "panel", 0);
@@ -1377,7 +1388,7 @@ static int exynos_dp_suspend(struct device *dev)
{
struct exynos_dp_device *dp = dev_get_drvdata(dev);
- exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF);
+ exynos_dp_disable(&dp->encoder);
return 0;
}
@@ -1385,7 +1396,7 @@ static int exynos_dp_resume(struct device *dev)
{
struct exynos_dp_device *dp = dev_get_drvdata(dev);
- exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_ON);
+ exynos_dp_enable(&dp->encoder);
return 0;
}
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index a4e799679669..e413b6f7b0e7 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -147,11 +147,10 @@ struct link_train {
};
struct exynos_dp_device {
- struct exynos_drm_display display;
+ struct drm_encoder encoder;
struct device *dev;
struct drm_device *drm_dev;
struct drm_connector connector;
- struct drm_encoder *encoder;
struct drm_panel *panel;
struct drm_bridge *bridge;
struct clk *clock;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
deleted file mode 100644
index 24994ba10e28..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/* exynos_drm_buf.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_gem.h"
-#include "exynos_drm_buf.h"
-#include "exynos_drm_iommu.h"
-
-static int lowlevel_buffer_allocate(struct drm_device *dev,
- unsigned int flags, struct exynos_drm_gem_buf *buf)
-{
- int ret = 0;
- enum dma_attr attr;
- unsigned int nr_pages;
-
- if (buf->dma_addr) {
- DRM_DEBUG_KMS("already allocated.\n");
- return 0;
- }
-
- init_dma_attrs(&buf->dma_attrs);
-
- /*
- * if EXYNOS_BO_CONTIG, fully physically contiguous memory
- * region will be allocated else physically contiguous
- * as possible.
- */
- if (!(flags & EXYNOS_BO_NONCONTIG))
- dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
-
- /*
- * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
- * else cachable mapping.
- */
- if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
- attr = DMA_ATTR_WRITE_COMBINE;
- else
- attr = DMA_ATTR_NON_CONSISTENT;
-
- dma_set_attr(attr, &buf->dma_attrs);
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
-
- nr_pages = buf->size >> PAGE_SHIFT;
-
- if (!is_drm_iommu_supported(dev)) {
- dma_addr_t start_addr;
- unsigned int i = 0;
-
- buf->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
- if (!buf->pages) {
- DRM_ERROR("failed to allocate pages.\n");
- return -ENOMEM;
- }
-
- buf->cookie = dma_alloc_attrs(dev->dev,
- buf->size,
- &buf->dma_addr, GFP_KERNEL,
- &buf->dma_attrs);
- if (!buf->cookie) {
- DRM_ERROR("failed to allocate buffer.\n");
- ret = -ENOMEM;
- goto err_free;
- }
-
- start_addr = buf->dma_addr;
- while (i < nr_pages) {
- buf->pages[i] = phys_to_page(start_addr);
- start_addr += PAGE_SIZE;
- i++;
- }
- } else {
-
- buf->pages = dma_alloc_attrs(dev->dev, buf->size,
- &buf->dma_addr, GFP_KERNEL,
- &buf->dma_attrs);
- if (!buf->pages) {
- DRM_ERROR("failed to allocate buffer.\n");
- return -ENOMEM;
- }
- }
-
- buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
- if (IS_ERR(buf->sgt)) {
- DRM_ERROR("failed to get sg table.\n");
- ret = PTR_ERR(buf->sgt);
- goto err_free_attrs;
- }
-
- DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
- (unsigned long)buf->dma_addr,
- buf->size);
-
- return ret;
-
-err_free_attrs:
- dma_free_attrs(dev->dev, buf->size, buf->pages,
- (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
- buf->dma_addr = (dma_addr_t)NULL;
-err_free:
- if (!is_drm_iommu_supported(dev))
- drm_free_large(buf->pages);
-
- return ret;
-}
-
-static void lowlevel_buffer_deallocate(struct drm_device *dev,
- unsigned int flags, struct exynos_drm_gem_buf *buf)
-{
- if (!buf->dma_addr) {
- DRM_DEBUG_KMS("dma_addr is invalid.\n");
- return;
- }
-
- DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
- (unsigned long)buf->dma_addr,
- buf->size);
-
- sg_free_table(buf->sgt);
-
- kfree(buf->sgt);
- buf->sgt = NULL;
-
- if (!is_drm_iommu_supported(dev)) {
- dma_free_attrs(dev->dev, buf->size, buf->cookie,
- (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
- drm_free_large(buf->pages);
- } else
- dma_free_attrs(dev->dev, buf->size, buf->pages,
- (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
-
- buf->dma_addr = (dma_addr_t)NULL;
-}
-
-struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
- unsigned int size)
-{
- struct exynos_drm_gem_buf *buffer;
-
- DRM_DEBUG_KMS("desired size = 0x%x\n", size);
-
- buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
- if (!buffer)
- return NULL;
-
- buffer->size = size;
- return buffer;
-}
-
-void exynos_drm_fini_buf(struct drm_device *dev,
- struct exynos_drm_gem_buf *buffer)
-{
- kfree(buffer);
- buffer = NULL;
-}
-
-int exynos_drm_alloc_buf(struct drm_device *dev,
- struct exynos_drm_gem_buf *buf, unsigned int flags)
-{
-
- /*
- * allocate memory region and set the memory information
- * to vaddr and dma_addr of a buffer object.
- */
- if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
- return -ENOMEM;
-
- return 0;
-}
-
-void exynos_drm_free_buf(struct drm_device *dev,
- unsigned int flags, struct exynos_drm_gem_buf *buffer)
-{
-
- lowlevel_buffer_deallocate(dev, flags, buffer);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
deleted file mode 100644
index a6412f19673c..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* exynos_drm_buf.h
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_BUF_H_
-#define _EXYNOS_DRM_BUF_H_
-
-/* create and initialize buffer object. */
-struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
- unsigned int size);
-
-/* destroy buffer object. */
-void exynos_drm_fini_buf(struct drm_device *dev,
- struct exynos_drm_gem_buf *buffer);
-
-/* allocate physical memory region and setup sgt. */
-int exynos_drm_alloc_buf(struct drm_device *dev,
- struct exynos_drm_gem_buf *buf,
- unsigned int flags);
-
-/* release physical memory region, and sgt. */
-void exynos_drm_free_buf(struct drm_device *dev,
- unsigned int flags,
- struct exynos_drm_gem_buf *buffer);
-
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 4c9f972eaa07..c68a6a2a9b57 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -15,46 +15,10 @@
#include <drm/drmP.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
-#include "exynos_drm_encoder.h"
#include "exynos_drm_fbdev.h"
static LIST_HEAD(exynos_drm_subdrv_list);
-int exynos_drm_create_enc_conn(struct drm_device *dev,
- struct exynos_drm_display *display)
-{
- struct drm_encoder *encoder;
- int ret;
- unsigned long possible_crtcs = 0;
-
- ret = exynos_drm_crtc_get_pipe_from_type(dev, display->type);
- if (ret < 0)
- return ret;
-
- possible_crtcs |= 1 << ret;
-
- /* create and initialize a encoder for this sub driver. */
- encoder = exynos_drm_encoder_create(dev, display, possible_crtcs);
- if (!encoder) {
- DRM_ERROR("failed to create encoder\n");
- return -EFAULT;
- }
-
- display->encoder = encoder;
-
- ret = display->ops->create_connector(display, encoder);
- if (ret) {
- DRM_ERROR("failed to create connector ret = %d\n", ret);
- goto err_destroy_encoder;
- }
-
- return 0;
-
-err_destroy_encoder:
- encoder->funcs->destroy(encoder);
- return ret;
-}
-
int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
{
if (!subdrv)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 644b4b76e071..0872aa2f450f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -19,21 +19,15 @@
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_encoder.h"
#include "exynos_drm_plane.h"
static void exynos_drm_crtc_enable(struct drm_crtc *crtc)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- if (exynos_crtc->enabled)
- return;
-
if (exynos_crtc->ops->enable)
exynos_crtc->ops->enable(exynos_crtc);
- exynos_crtc->enabled = true;
-
drm_crtc_vblank_on(crtc);
}
@@ -41,20 +35,10 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- if (!exynos_crtc->enabled)
- return;
-
- /* wait for the completion of page flip. */
- if (!wait_event_timeout(exynos_crtc->pending_flip_queue,
- (exynos_crtc->event == NULL), HZ/20))
- exynos_crtc->event = NULL;
-
drm_crtc_vblank_off(crtc);
if (exynos_crtc->ops->disable)
exynos_crtc->ops->disable(exynos_crtc);
-
- exynos_crtc->enabled = false;
}
static bool
@@ -80,18 +64,36 @@ exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
exynos_crtc->ops->commit(exynos_crtc);
}
-static void exynos_crtc_atomic_begin(struct drm_crtc *crtc)
+static void exynos_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct drm_plane *plane;
- if (crtc->state->event) {
- WARN_ON(drm_crtc_vblank_get(crtc) != 0);
- exynos_crtc->event = crtc->state->event;
+ exynos_crtc->event = crtc->state->event;
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
+
+ if (exynos_crtc->ops->atomic_begin)
+ exynos_crtc->ops->atomic_begin(exynos_crtc,
+ exynos_plane);
}
}
-static void exynos_crtc_atomic_flush(struct drm_crtc *crtc)
+static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct drm_plane *plane;
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
+
+ if (exynos_crtc->ops->atomic_flush)
+ exynos_crtc->ops->atomic_flush(exynos_crtc,
+ exynos_plane);
+ }
}
static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
@@ -139,13 +141,13 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
if (!exynos_crtc)
return ERR_PTR(-ENOMEM);
- init_waitqueue_head(&exynos_crtc->pending_flip_queue);
-
exynos_crtc->pipe = pipe;
exynos_crtc->type = type;
exynos_crtc->ops = ops;
exynos_crtc->ctx = ctx;
+ init_waitqueue_head(&exynos_crtc->wait_update);
+
crtc = &exynos_crtc->base;
private->crtc[pipe] = crtc;
@@ -171,11 +173,8 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
struct exynos_drm_crtc *exynos_crtc =
to_exynos_crtc(private->crtc[pipe]);
- if (!exynos_crtc->enabled)
- return -EPERM;
-
if (exynos_crtc->ops->enable_vblank)
- exynos_crtc->ops->enable_vblank(exynos_crtc);
+ return exynos_crtc->ops->enable_vblank(exynos_crtc);
return 0;
}
@@ -186,31 +185,34 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
struct exynos_drm_crtc *exynos_crtc =
to_exynos_crtc(private->crtc[pipe]);
- if (!exynos_crtc->enabled)
- return;
-
if (exynos_crtc->ops->disable_vblank)
exynos_crtc->ops->disable_vblank(exynos_crtc);
}
-void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe)
+void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc)
{
- struct exynos_drm_private *dev_priv = dev->dev_private;
- struct drm_crtc *drm_crtc = dev_priv->crtc[pipe];
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc);
+ wait_event_timeout(exynos_crtc->wait_update,
+ (atomic_read(&exynos_crtc->pending_update) == 0),
+ msecs_to_jiffies(50));
+}
+
+void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
+ struct exynos_drm_plane *exynos_plane)
+{
+ struct drm_crtc *crtc = &exynos_crtc->base;
unsigned long flags;
- spin_lock_irqsave(&dev->event_lock, flags);
- if (exynos_crtc->event) {
+ exynos_plane->pending_fb = NULL;
- drm_send_vblank_event(dev, -1, exynos_crtc->event);
- drm_vblank_put(dev, pipe);
- wake_up(&exynos_crtc->pending_flip_queue);
+ if (atomic_dec_and_test(&exynos_crtc->pending_update))
+ wake_up(&exynos_crtc->wait_update);
- }
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (exynos_crtc->event)
+ drm_crtc_send_vblank_event(crtc, exynos_crtc->event);
exynos_crtc->event = NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
@@ -237,7 +239,7 @@ void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
}
int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
- unsigned int out_type)
+ enum exynos_drm_output_type out_type)
{
struct drm_crtc *crtc;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 0f3aa70818e3..f87d4abda6f7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -25,12 +25,14 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
void *context);
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
-void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe);
+void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc);
+void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
+ struct exynos_drm_plane *exynos_plane);
void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb);
/* This function gets pipe value to crtc device matched with out_type. */
int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
- unsigned int out_type);
+ enum exynos_drm_output_type out_type);
/*
* This function calls the crtc device(manager)'s te_handler() callback
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
deleted file mode 100644
index cd485c091b30..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ /dev/null
@@ -1,286 +0,0 @@
-/* exynos_drm_dmabuf.c
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-#include "exynos_drm_dmabuf.h"
-#include "exynos_drm_drv.h"
-#include "exynos_drm_gem.h"
-
-#include <linux/dma-buf.h>
-
-struct exynos_drm_dmabuf_attachment {
- struct sg_table sgt;
- enum dma_data_direction dir;
- bool is_mapped;
-};
-
-static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf)
-{
- return to_exynos_gem_obj(buf->priv);
-}
-
-static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
- struct device *dev,
- struct dma_buf_attachment *attach)
-{
- struct exynos_drm_dmabuf_attachment *exynos_attach;
-
- exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
- if (!exynos_attach)
- return -ENOMEM;
-
- exynos_attach->dir = DMA_NONE;
- attach->priv = exynos_attach;
-
- return 0;
-}
-
-static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
- struct dma_buf_attachment *attach)
-{
- struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
- struct sg_table *sgt;
-
- if (!exynos_attach)
- return;
-
- sgt = &exynos_attach->sgt;
-
- if (exynos_attach->dir != DMA_NONE)
- dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
- exynos_attach->dir);
-
- sg_free_table(sgt);
- kfree(exynos_attach);
- attach->priv = NULL;
-}
-
-static struct sg_table *
- exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
- enum dma_data_direction dir)
-{
- struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
- struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
- struct drm_device *dev = gem_obj->base.dev;
- struct exynos_drm_gem_buf *buf;
- struct scatterlist *rd, *wr;
- struct sg_table *sgt = NULL;
- unsigned int i;
- int nents, ret;
-
- /* just return current sgt if already requested. */
- if (exynos_attach->dir == dir && exynos_attach->is_mapped)
- return &exynos_attach->sgt;
-
- buf = gem_obj->buffer;
- if (!buf) {
- DRM_ERROR("buffer is null.\n");
- return ERR_PTR(-ENOMEM);
- }
-
- sgt = &exynos_attach->sgt;
-
- ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
- if (ret) {
- DRM_ERROR("failed to alloc sgt.\n");
- return ERR_PTR(-ENOMEM);
- }
-
- mutex_lock(&dev->struct_mutex);
-
- rd = buf->sgt->sgl;
- wr = sgt->sgl;
- for (i = 0; i < sgt->orig_nents; ++i) {
- sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
- rd = sg_next(rd);
- wr = sg_next(wr);
- }
-
- if (dir != DMA_NONE) {
- nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
- if (!nents) {
- DRM_ERROR("failed to map sgl with iommu.\n");
- sg_free_table(sgt);
- sgt = ERR_PTR(-EIO);
- goto err_unlock;
- }
- }
-
- exynos_attach->is_mapped = true;
- exynos_attach->dir = dir;
- attach->priv = exynos_attach;
-
- DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
-
-err_unlock:
- mutex_unlock(&dev->struct_mutex);
- return sgt;
-}
-
-static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
- struct sg_table *sgt,
- enum dma_data_direction dir)
-{
- /* Nothing to do. */
-}
-
-static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- /* TODO */
-
- return NULL;
-}
-
-static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num,
- void *addr)
-{
- /* TODO */
-}
-
-static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- /* TODO */
-
- return NULL;
-}
-
-static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
-{
- /* TODO */
-}
-
-static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
- struct vm_area_struct *vma)
-{
- return -ENOTTY;
-}
-
-static struct dma_buf_ops exynos_dmabuf_ops = {
- .attach = exynos_gem_attach_dma_buf,
- .detach = exynos_gem_detach_dma_buf,
- .map_dma_buf = exynos_gem_map_dma_buf,
- .unmap_dma_buf = exynos_gem_unmap_dma_buf,
- .kmap = exynos_gem_dmabuf_kmap,
- .kmap_atomic = exynos_gem_dmabuf_kmap_atomic,
- .kunmap = exynos_gem_dmabuf_kunmap,
- .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
- .mmap = exynos_gem_dmabuf_mmap,
- .release = drm_gem_dmabuf_release,
-};
-
-struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
- struct drm_gem_object *obj, int flags)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
- exp_info.ops = &exynos_dmabuf_ops;
- exp_info.size = exynos_gem_obj->base.size;
- exp_info.flags = flags;
- exp_info.priv = obj;
-
- return dma_buf_export(&exp_info);
-}
-
-struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
- struct dma_buf *dma_buf)
-{
- struct dma_buf_attachment *attach;
- struct sg_table *sgt;
- struct scatterlist *sgl;
- struct exynos_drm_gem_obj *exynos_gem_obj;
- struct exynos_drm_gem_buf *buffer;
- int ret;
-
- /* is this one of own objects? */
- if (dma_buf->ops == &exynos_dmabuf_ops) {
- struct drm_gem_object *obj;
-
- obj = dma_buf->priv;
-
- /* is it from our device? */
- if (obj->dev == drm_dev) {
- /*
- * Importing dmabuf exported from out own gem increases
- * refcount on gem itself instead of f_count of dmabuf.
- */
- drm_gem_object_reference(obj);
- return obj;
- }
- }
-
- attach = dma_buf_attach(dma_buf, drm_dev->dev);
- if (IS_ERR(attach))
- return ERR_PTR(-EINVAL);
-
- get_dma_buf(dma_buf);
-
- sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sgt)) {
- ret = PTR_ERR(sgt);
- goto err_buf_detach;
- }
-
- buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
- if (!buffer) {
- ret = -ENOMEM;
- goto err_unmap_attach;
- }
-
- exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
- if (!exynos_gem_obj) {
- ret = -ENOMEM;
- goto err_free_buffer;
- }
-
- sgl = sgt->sgl;
-
- buffer->size = dma_buf->size;
- buffer->dma_addr = sg_dma_address(sgl);
-
- if (sgt->nents == 1) {
- /* always physically continuous memory if sgt->nents is 1. */
- exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
- } else {
- /*
- * this case could be CONTIG or NONCONTIG type but for now
- * sets NONCONTIG.
- * TODO. we have to find a way that exporter can notify
- * the type of its own buffer to importer.
- */
- exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
- }
-
- exynos_gem_obj->buffer = buffer;
- buffer->sgt = sgt;
- exynos_gem_obj->base.import_attach = attach;
-
- DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr,
- buffer->size);
-
- return &exynos_gem_obj->base;
-
-err_free_buffer:
- kfree(buffer);
- buffer = NULL;
-err_unmap_attach:
- dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
-err_buf_detach:
- dma_buf_detach(dma_buf, attach);
- dma_buf_put(dma_buf);
-
- return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
deleted file mode 100644
index 886de9ff484d..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* exynos_drm_dmabuf.h
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_DMABUF_H_
-#define _EXYNOS_DRM_DMABUF_H_
-
-struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
- struct drm_gem_object *obj, int flags);
-
-struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
- struct dma_buf *dma_buf);
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 7cb6595c1894..c748b8790de3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -20,26 +20,24 @@
#include <video/of_videomode.h>
#include <video/videomode.h>
-#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
struct exynos_dpi {
- struct exynos_drm_display display;
+ struct drm_encoder encoder;
struct device *dev;
struct device_node *panel_node;
struct drm_panel *panel;
struct drm_connector connector;
- struct drm_encoder *encoder;
struct videomode *vm;
- int dpms_mode;
};
#define connector_to_dpi(c) container_of(c, struct exynos_dpi, connector)
-static inline struct exynos_dpi *display_to_dpi(struct exynos_drm_display *d)
+static inline struct exynos_dpi *encoder_to_dpi(struct drm_encoder *e)
{
- return container_of(d, struct exynos_dpi, display);
+ return container_of(e, struct exynos_dpi, encoder);
}
static enum drm_connector_status
@@ -99,7 +97,7 @@ exynos_dpi_best_encoder(struct drm_connector *connector)
{
struct exynos_dpi *ctx = connector_to_dpi(connector);
- return ctx->encoder;
+ return &ctx->encoder;
}
static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
@@ -107,15 +105,12 @@ static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
.best_encoder = exynos_dpi_best_encoder,
};
-static int exynos_dpi_create_connector(struct exynos_drm_display *display,
- struct drm_encoder *encoder)
+static int exynos_dpi_create_connector(struct drm_encoder *encoder)
{
- struct exynos_dpi *ctx = display_to_dpi(display);
+ struct exynos_dpi *ctx = encoder_to_dpi(encoder);
struct drm_connector *connector = &ctx->connector;
int ret;
- ctx->encoder = encoder;
-
connector->polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(encoder->dev, connector,
@@ -133,46 +128,48 @@ static int exynos_dpi_create_connector(struct exynos_drm_display *display,
return 0;
}
-static void exynos_dpi_poweron(struct exynos_dpi *ctx)
+static bool exynos_dpi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void exynos_dpi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
+}
+
+static void exynos_dpi_enable(struct drm_encoder *encoder)
+{
+ struct exynos_dpi *ctx = encoder_to_dpi(encoder);
+
if (ctx->panel) {
drm_panel_prepare(ctx->panel);
drm_panel_enable(ctx->panel);
}
}
-static void exynos_dpi_poweroff(struct exynos_dpi *ctx)
+static void exynos_dpi_disable(struct drm_encoder *encoder)
{
+ struct exynos_dpi *ctx = encoder_to_dpi(encoder);
+
if (ctx->panel) {
drm_panel_disable(ctx->panel);
drm_panel_unprepare(ctx->panel);
}
}
-static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode)
-{
- struct exynos_dpi *ctx = display_to_dpi(display);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- if (ctx->dpms_mode != DRM_MODE_DPMS_ON)
- exynos_dpi_poweron(ctx);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- if (ctx->dpms_mode == DRM_MODE_DPMS_ON)
- exynos_dpi_poweroff(ctx);
- break;
- default:
- break;
- }
- ctx->dpms_mode = mode;
-}
+static struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = {
+ .mode_fixup = exynos_dpi_mode_fixup,
+ .mode_set = exynos_dpi_mode_set,
+ .enable = exynos_dpi_enable,
+ .disable = exynos_dpi_disable,
+};
-static struct exynos_drm_display_ops exynos_dpi_display_ops = {
- .create_connector = exynos_dpi_create_connector,
- .dpms = exynos_dpi_dpms
+static struct drm_encoder_funcs exynos_dpi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
};
/* of_* functions will be removed after merge of of_graph patches */
@@ -299,7 +296,34 @@ static int exynos_dpi_parse_dt(struct exynos_dpi *ctx)
return 0;
}
-struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
+int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
+{
+ int ret;
+
+ ret = exynos_drm_crtc_get_pipe_from_type(dev, EXYNOS_DISPLAY_TYPE_LCD);
+ if (ret < 0)
+ return ret;
+
+ encoder->possible_crtcs = 1 << ret;
+
+ DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+ drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs);
+
+ ret = exynos_dpi_create_connector(encoder);
+ if (ret) {
+ DRM_ERROR("failed to create connector ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct drm_encoder *exynos_dpi_probe(struct device *dev)
{
struct exynos_dpi *ctx;
int ret;
@@ -308,10 +332,7 @@ struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
if (!ctx)
return ERR_PTR(-ENOMEM);
- ctx->display.type = EXYNOS_DISPLAY_TYPE_LCD;
- ctx->display.ops = &exynos_dpi_display_ops;
ctx->dev = dev;
- ctx->dpms_mode = DRM_MODE_DPMS_OFF;
ret = exynos_dpi_parse_dt(ctx);
if (ret < 0) {
@@ -325,14 +346,14 @@ struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
return ERR_PTR(-EPROBE_DEFER);
}
- return &ctx->display;
+ return &ctx->encoder;
}
-int exynos_dpi_remove(struct exynos_drm_display *display)
+int exynos_dpi_remove(struct drm_encoder *encoder)
{
- struct exynos_dpi *ctx = display_to_dpi(display);
+ struct exynos_dpi *ctx = encoder_to_dpi(encoder);
- exynos_dpi_dpms(&ctx->display, DRM_MODE_DPMS_OFF);
+ exynos_dpi_disable(&ctx->encoder);
if (ctx->panel)
drm_panel_detach(ctx->panel);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 63a68c60a353..831d2e4cacf9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -13,6 +13,8 @@
#include <linux/pm_runtime.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <linux/component.h>
@@ -21,13 +23,11 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
-#include "exynos_drm_encoder.h"
#include "exynos_drm_fbdev.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_vidi.h"
-#include "exynos_drm_dmabuf.h"
#include "exynos_drm_g2d.h"
#include "exynos_drm_ipp.h"
#include "exynos_drm_iommu.h"
@@ -38,15 +38,112 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
+struct exynos_atomic_commit {
+ struct work_struct work;
+ struct drm_device *dev;
+ struct drm_atomic_state *state;
+ u32 crtcs;
+};
+
+static void exynos_atomic_wait_for_commit(struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int i, ret;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+ if (!crtc->state->enable)
+ continue;
+
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret)
+ continue;
+
+ exynos_drm_crtc_wait_pending_update(exynos_crtc);
+ drm_crtc_vblank_put(crtc);
+ }
+}
+
+static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
+{
+ struct drm_device *dev = commit->dev;
+ struct exynos_drm_private *priv = dev->dev_private;
+ struct drm_atomic_state *state = commit->state;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_plane_state *plane_state;
+ struct drm_crtc_state *crtc_state;
+ int i;
+
+ drm_atomic_helper_commit_modeset_disables(dev, state);
+
+ drm_atomic_helper_commit_modeset_enables(dev, state);
+
+ /*
+ * Exynos can't update planes with CRTCs and encoders disabled,
+ * its updates routines, specially for FIMD, requires the clocks
+ * to be enabled. So it is necessary to handle the modeset operations
+ * *before* the commit_planes() step, this way it will always
+ * have the relevant clocks enabled to perform the update.
+ */
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+ atomic_set(&exynos_crtc->pending_update, 0);
+ }
+
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct exynos_drm_crtc *exynos_crtc =
+ to_exynos_crtc(plane->crtc);
+
+ if (!plane->crtc)
+ continue;
+
+ atomic_inc(&exynos_crtc->pending_update);
+ }
+
+ drm_atomic_helper_commit_planes(dev, state);
+
+ exynos_atomic_wait_for_commit(state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+
+ drm_atomic_state_free(state);
+
+ spin_lock(&priv->lock);
+ priv->pending &= ~commit->crtcs;
+ spin_unlock(&priv->lock);
+
+ wake_up_all(&priv->wait);
+
+ kfree(commit);
+}
+
+static void exynos_drm_atomic_work(struct work_struct *work)
+{
+ struct exynos_atomic_commit *commit = container_of(work,
+ struct exynos_atomic_commit, work);
+
+ exynos_atomic_commit_complete(commit);
+}
+
static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
{
struct exynos_drm_private *private;
- int ret;
+ struct drm_encoder *encoder;
+ unsigned int clone_mask;
+ int cnt, ret;
private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
if (!private)
return -ENOMEM;
+ init_waitqueue_head(&private->wait);
+ spin_lock_init(&private->lock);
+
dev_set_drvdata(dev->dev, dev);
dev->dev_private = (void *)private;
@@ -67,7 +164,13 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
exynos_drm_mode_config_init(dev);
/* setup possible_clones. */
- exynos_drm_encoder_setup(dev);
+ cnt = 0;
+ clone_mask = 0;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ clone_mask |= (1 << (cnt++));
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ encoder->possible_clones = clone_mask;
platform_set_drvdata(dev->platformdev, dev);
@@ -143,6 +246,64 @@ static int exynos_drm_unload(struct drm_device *dev)
return 0;
}
+static int commit_is_pending(struct exynos_drm_private *priv, u32 crtcs)
+{
+ bool pending;
+
+ spin_lock(&priv->lock);
+ pending = priv->pending & crtcs;
+ spin_unlock(&priv->lock);
+
+ return pending;
+}
+
+int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
+ bool async)
+{
+ struct exynos_drm_private *priv = dev->dev_private;
+ struct exynos_atomic_commit *commit;
+ int i, ret;
+
+ commit = kzalloc(sizeof(*commit), GFP_KERNEL);
+ if (!commit)
+ return -ENOMEM;
+
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret) {
+ kfree(commit);
+ return ret;
+ }
+
+ /* This is the point of no return */
+
+ INIT_WORK(&commit->work, exynos_drm_atomic_work);
+ commit->dev = dev;
+ commit->state = state;
+
+ /* Wait until all affected CRTCs have completed previous commits and
+ * mark them as pending.
+ */
+ for (i = 0; i < dev->mode_config.num_crtc; ++i) {
+ if (state->crtcs[i])
+ commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
+ }
+
+ wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs));
+
+ spin_lock(&priv->lock);
+ priv->pending |= commit->crtcs;
+ spin_unlock(&priv->lock);
+
+ drm_atomic_helper_swap_state(dev, state);
+
+ if (async)
+ schedule_work(&commit->work);
+ else
+ exynos_atomic_commit_complete(commit);
+
+ return 0;
+}
+
static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
{
struct drm_connector *connector;
@@ -242,25 +403,25 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
static const struct drm_ioctl_desc exynos_ioctls[] = {
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl,
DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
- exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
- vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER,
- exynos_g2d_get_ver_ioctl, DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST,
- exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
- exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
- exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
- exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
- exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
- exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, exynos_g2d_get_ver_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY, exynos_drm_ipp_get_property,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY, exynos_drm_ipp_set_property,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF, exynos_drm_ipp_queue_buf,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL, exynos_drm_ipp_cmd_ctrl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
};
static const struct file_operations exynos_drm_driver_fops = {
@@ -277,11 +438,10 @@ static const struct file_operations exynos_drm_driver_fops = {
};
static struct drm_driver exynos_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
+ | DRIVER_ATOMIC | DRIVER_RENDER,
.load = exynos_drm_load,
.unload = exynos_drm_unload,
- .suspend = exynos_drm_suspend,
- .resume = exynos_drm_resume,
.open = exynos_drm_open,
.preclose = exynos_drm_preclose,
.lastclose = exynos_drm_lastclose,
@@ -297,8 +457,12 @@ static struct drm_driver exynos_drm_driver = {
.dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = exynos_dmabuf_prime_export,
- .gem_prime_import = exynos_dmabuf_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
+ .gem_prime_vmap = exynos_drm_gem_prime_vmap,
+ .gem_prime_vunmap = exynos_drm_gem_prime_vunmap,
.ioctls = exynos_ioctls,
.num_ioctls = ARRAY_SIZE(exynos_ioctls),
.fops = &exynos_drm_driver_fops,
@@ -345,9 +509,6 @@ static struct platform_driver exynos_drm_platform_driver;
* because connector requires pipe number of its crtc during initialization.
*/
static struct platform_driver *const exynos_drm_kms_drivers[] = {
-#ifdef CONFIG_DRM_EXYNOS_VIDI
- &vidi_driver,
-#endif
#ifdef CONFIG_DRM_EXYNOS_FIMD
&fimd_driver,
#endif
@@ -370,6 +531,9 @@ static struct platform_driver *const exynos_drm_kms_drivers[] = {
&mixer_driver,
&hdmi_driver,
#endif
+#ifdef CONFIG_DRM_EXYNOS_VIDI
+ &vidi_driver,
+#endif
};
static struct platform_driver *const exynos_drm_non_kms_drivers[] = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index dd00f160c1e5..b7ba21dfb696 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -44,23 +44,14 @@ enum exynos_drm_output_type {
* - the unit is screen coordinates.
* @src_y: offset y on a framebuffer to be displayed.
* - the unit is screen coordinates.
- * @src_width: width of a partial image to be displayed from framebuffer.
- * @src_height: height of a partial image to be displayed from framebuffer.
- * @fb_width: width of a framebuffer.
- * @fb_height: height of a framebuffer.
+ * @src_w: width of a partial image to be displayed from framebuffer.
+ * @src_h: height of a partial image to be displayed from framebuffer.
* @crtc_x: offset x on hardware screen.
* @crtc_y: offset y on hardware screen.
- * @crtc_width: window width to be displayed (hardware screen).
- * @crtc_height: window height to be displayed (hardware screen).
- * @mode_width: width of screen mode.
- * @mode_height: height of screen mode.
+ * @crtc_w: window width to be displayed (hardware screen).
+ * @crtc_h: window height to be displayed (hardware screen).
* @h_ratio: horizontal scaling ratio, 16.16 fixed point
* @v_ratio: vertical scaling ratio, 16.16 fixed point
- * @refresh: refresh rate.
- * @scan_flag: interlace or progressive way.
- * (it could be DRM_MODE_FLAG_*)
- * @bpp: pixel size.(in bit)
- * @pixel_format: fourcc pixel format of this overlay
* @dma_addr: array of bus(accessed by dma) address to the memory region
* allocated for a overlay.
* @zpos: order of overlay layer(z position).
@@ -73,73 +64,17 @@ struct exynos_drm_plane {
struct drm_plane base;
unsigned int src_x;
unsigned int src_y;
- unsigned int src_width;
- unsigned int src_height;
- unsigned int fb_width;
- unsigned int fb_height;
+ unsigned int src_w;
+ unsigned int src_h;
unsigned int crtc_x;
unsigned int crtc_y;
- unsigned int crtc_width;
- unsigned int crtc_height;
- unsigned int mode_width;
- unsigned int mode_height;
+ unsigned int crtc_w;
+ unsigned int crtc_h;
unsigned int h_ratio;
unsigned int v_ratio;
- unsigned int refresh;
- unsigned int scan_flag;
- unsigned int bpp;
- unsigned int pitch;
- uint32_t pixel_format;
dma_addr_t dma_addr[MAX_FB_BUFFER];
unsigned int zpos;
-};
-
-/*
- * Exynos DRM Display Structure.
- * - this structure is common to analog tv, digital tv and lcd panel.
- *
- * @create_connector: initialize and register a new connector
- * @remove: cleans up the display for removal
- * @mode_fixup: fix mode data comparing to hw specific display mode.
- * @mode_set: convert drm_display_mode to hw specific display mode and
- * would be called by encoder->mode_set().
- * @check_mode: check if mode is valid or not.
- * @dpms: display device on or off.
- * @commit: apply changes to hw
- */
-struct exynos_drm_display;
-struct exynos_drm_display_ops {
- int (*create_connector)(struct exynos_drm_display *display,
- struct drm_encoder *encoder);
- void (*remove)(struct exynos_drm_display *display);
- void (*mode_fixup)(struct exynos_drm_display *display,
- struct drm_connector *connector,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
- void (*mode_set)(struct exynos_drm_display *display,
- struct drm_display_mode *mode);
- int (*check_mode)(struct exynos_drm_display *display,
- struct drm_display_mode *mode);
- void (*dpms)(struct exynos_drm_display *display, int mode);
- void (*commit)(struct exynos_drm_display *display);
-};
-
-/*
- * Exynos drm display structure, maps 1:1 with an encoder/connector
- *
- * @list: the list entry for this manager
- * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
- * @encoder: encoder object this display maps to
- * @connector: connector object this display maps to
- * @ops: pointer to callbacks for exynos drm specific functionality
- * @ctx: A pointer to the display's implementation specific context
- */
-struct exynos_drm_display {
- struct list_head list;
- enum exynos_drm_output_type type;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- struct exynos_drm_display_ops *ops;
+ struct drm_framebuffer *pending_fb;
};
/*
@@ -153,8 +88,10 @@ struct exynos_drm_display {
* @disable_vblank: specific driver callback for disabling vblank interrupt.
* @wait_for_vblank: wait for vblank interrupt to make sure that
* hardware overlay is updated.
- * @win_commit: apply hardware specific overlay data to registers.
- * @win_disable: disable hardware specific overlay.
+ * @atomic_begin: prepare a window to receive a update
+ * @atomic_flush: mark the end of a window update
+ * @update_plane: apply hardware specific overlay data to registers.
+ * @disable_plane: disable hardware specific overlay.
* @te_handler: trigger to transfer video image at the tearing effect
* synchronization signal if there is a page flip request.
* @clock_enable: optional function enabling/disabling display domain clock,
@@ -173,11 +110,16 @@ struct exynos_drm_crtc_ops {
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
void (*wait_for_vblank)(struct exynos_drm_crtc *crtc);
- void (*win_commit)(struct exynos_drm_crtc *crtc, unsigned int zpos);
- void (*win_disable)(struct exynos_drm_crtc *crtc, unsigned int zpos);
+ void (*atomic_begin)(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane);
+ void (*update_plane)(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane);
+ void (*disable_plane)(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane);
+ void (*atomic_flush)(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane);
void (*te_handler)(struct exynos_drm_crtc *crtc);
void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable);
- void (*clear_channels)(struct exynos_drm_crtc *crtc);
};
/*
@@ -194,6 +136,8 @@ struct exynos_drm_crtc_ops {
* this pipe value.
* @enabled: if the crtc is enabled or not
* @event: vblank event that is currently queued for flip
+ * @wait_update: wait all pending planes updates to finish
+ * @pending_update: number of pending plane updates in this crtc
* @ops: pointer to callbacks for exynos drm specific functionality
* @ctx: A pointer to the crtc's implementation specific context
*/
@@ -201,9 +145,9 @@ struct exynos_drm_crtc {
struct drm_crtc base;
enum exynos_drm_output_type type;
unsigned int pipe;
- bool enabled;
- wait_queue_head_t pending_flip_queue;
struct drm_pending_vblank_event *event;
+ wait_queue_head_t wait_update;
+ atomic_t pending_update;
const struct exynos_drm_crtc_ops *ops;
void *ctx;
};
@@ -229,6 +173,9 @@ struct drm_exynos_file_private {
* @da_space_size: size of device address space.
* if 0 then default value is used for it.
* @pipe: the pipe number for this crtc/manager.
+ * @pending: the crtcs that have pending updates to finish
+ * @lock: protect access to @pending
+ * @wait: wait an atomic commit to finish
*/
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
@@ -244,6 +191,11 @@ struct exynos_drm_private {
unsigned long da_space_size;
unsigned int pipe;
+
+ /* for atomic commit */
+ u32 pending;
+ spinlock_t lock;
+ wait_queue_head_t wait;
};
/*
@@ -285,20 +237,26 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
#ifdef CONFIG_DRM_EXYNOS_DPI
-struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
-int exynos_dpi_remove(struct exynos_drm_display *display);
+struct drm_encoder *exynos_dpi_probe(struct device *dev);
+int exynos_dpi_remove(struct drm_encoder *encoder);
+int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder);
#else
-static inline struct exynos_drm_display *
+static inline struct drm_encoder *
exynos_dpi_probe(struct device *dev) { return NULL; }
-static inline int exynos_dpi_remove(struct exynos_drm_display *display)
+static inline int exynos_dpi_remove(struct drm_encoder *encoder)
+{
+ return 0;
+}
+static inline int exynos_dpi_bind(struct drm_device *dev,
+ struct drm_encoder *encoder)
{
return 0;
}
#endif
-/* This function creates a encoder and a connector, and initializes them. */
-int exynos_drm_create_enc_conn(struct drm_device *dev,
- struct exynos_drm_display *display);
+int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
+ bool async);
+
extern struct platform_driver fimd_driver;
extern struct platform_driver exynos5433_decon_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 0e58b36cb8c2..12b03b364703 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -259,7 +259,7 @@ struct exynos_dsi_driver_data {
};
struct exynos_dsi {
- struct exynos_drm_display display;
+ struct drm_encoder encoder;
struct mipi_dsi_host dsi_host;
struct drm_connector connector;
struct device_node *panel_node;
@@ -295,9 +295,9 @@ struct exynos_dsi {
#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
-static inline struct exynos_dsi *display_to_dsi(struct exynos_drm_display *d)
+static inline struct exynos_dsi *encoder_to_dsi(struct drm_encoder *e)
{
- return container_of(d, struct exynos_dsi, display);
+ return container_of(e, struct exynos_dsi, encoder);
}
enum reg_idx {
@@ -1272,7 +1272,7 @@ static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id)
{
struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id;
- struct drm_encoder *encoder = dsi->display.encoder;
+ struct drm_encoder *encoder = &dsi->encoder;
if (dsi->state & DSIM_STATE_VIDOUT_AVAILABLE)
exynos_drm_crtc_te_handler(encoder->crtc);
@@ -1518,16 +1518,17 @@ static void exynos_dsi_poweroff(struct exynos_dsi *dsi)
dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
}
-static int exynos_dsi_enable(struct exynos_dsi *dsi)
+static void exynos_dsi_enable(struct drm_encoder *encoder)
{
+ struct exynos_dsi *dsi = encoder_to_dsi(encoder);
int ret;
if (dsi->state & DSIM_STATE_ENABLED)
- return 0;
+ return;
ret = exynos_dsi_poweron(dsi);
if (ret < 0)
- return ret;
+ return;
dsi->state |= DSIM_STATE_ENABLED;
@@ -1535,7 +1536,7 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
if (ret < 0) {
dsi->state &= ~DSIM_STATE_ENABLED;
exynos_dsi_poweroff(dsi);
- return ret;
+ return;
}
exynos_dsi_set_display_mode(dsi);
@@ -1547,16 +1548,16 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
exynos_dsi_set_display_enable(dsi, false);
drm_panel_unprepare(dsi->panel);
exynos_dsi_poweroff(dsi);
- return ret;
+ return;
}
dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
-
- return 0;
}
-static void exynos_dsi_disable(struct exynos_dsi *dsi)
+static void exynos_dsi_disable(struct drm_encoder *encoder)
{
+ struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+
if (!(dsi->state & DSIM_STATE_ENABLED))
return;
@@ -1571,26 +1572,6 @@ static void exynos_dsi_disable(struct exynos_dsi *dsi)
exynos_dsi_poweroff(dsi);
}
-static void exynos_dsi_dpms(struct exynos_drm_display *display, int mode)
-{
- struct exynos_dsi *dsi = display_to_dsi(display);
-
- if (dsi->panel) {
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- exynos_dsi_enable(dsi);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- exynos_dsi_disable(dsi);
- break;
- default:
- break;
- }
- }
-}
-
static enum drm_connector_status
exynos_dsi_detect(struct drm_connector *connector, bool force)
{
@@ -1601,10 +1582,10 @@ exynos_dsi_detect(struct drm_connector *connector, bool force)
if (dsi->panel)
drm_panel_attach(dsi->panel, &dsi->connector);
} else if (!dsi->panel_node) {
- struct exynos_drm_display *display;
+ struct drm_encoder *encoder;
- display = platform_get_drvdata(to_platform_device(dsi->dev));
- exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF);
+ encoder = platform_get_drvdata(to_platform_device(dsi->dev));
+ exynos_dsi_disable(encoder);
drm_panel_detach(dsi->panel);
dsi->panel = NULL;
}
@@ -1647,7 +1628,7 @@ exynos_dsi_best_encoder(struct drm_connector *connector)
{
struct exynos_dsi *dsi = connector_to_dsi(connector);
- return dsi->display.encoder;
+ return &dsi->encoder;
}
static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
@@ -1655,10 +1636,9 @@ static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
.best_encoder = exynos_dsi_best_encoder,
};
-static int exynos_dsi_create_connector(struct exynos_drm_display *display,
- struct drm_encoder *encoder)
+static int exynos_dsi_create_connector(struct drm_encoder *encoder)
{
- struct exynos_dsi *dsi = display_to_dsi(display);
+ struct exynos_dsi *dsi = encoder_to_dsi(encoder);
struct drm_connector *connector = &dsi->connector;
int ret;
@@ -1679,26 +1659,40 @@ static int exynos_dsi_create_connector(struct exynos_drm_display *display,
return 0;
}
-static void exynos_dsi_mode_set(struct exynos_drm_display *display,
- struct drm_display_mode *mode)
+static bool exynos_dsi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct exynos_dsi *dsi = display_to_dsi(display);
- struct videomode *vm = &dsi->vm;
+ return true;
+}
- vm->hactive = mode->hdisplay;
- vm->vactive = mode->vdisplay;
- vm->vfront_porch = mode->vsync_start - mode->vdisplay;
- vm->vback_porch = mode->vtotal - mode->vsync_end;
- vm->vsync_len = mode->vsync_end - mode->vsync_start;
- vm->hfront_porch = mode->hsync_start - mode->hdisplay;
- vm->hback_porch = mode->htotal - mode->hsync_end;
- vm->hsync_len = mode->hsync_end - mode->hsync_start;
+static void exynos_dsi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+ struct videomode *vm = &dsi->vm;
+ struct drm_display_mode *m = adjusted_mode;
+
+ vm->hactive = m->hdisplay;
+ vm->vactive = m->vdisplay;
+ vm->vfront_porch = m->vsync_start - m->vdisplay;
+ vm->vback_porch = m->vtotal - m->vsync_end;
+ vm->vsync_len = m->vsync_end - m->vsync_start;
+ vm->hfront_porch = m->hsync_start - m->hdisplay;
+ vm->hback_porch = m->htotal - m->hsync_end;
+ vm->hsync_len = m->hsync_end - m->hsync_start;
}
-static struct exynos_drm_display_ops exynos_dsi_display_ops = {
- .create_connector = exynos_dsi_create_connector,
+static struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
+ .mode_fixup = exynos_dsi_mode_fixup,
.mode_set = exynos_dsi_mode_set,
- .dpms = exynos_dsi_dpms
+ .enable = exynos_dsi_enable,
+ .disable = exynos_dsi_disable,
+};
+
+static struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
};
MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
@@ -1821,22 +1815,35 @@ end:
static int exynos_dsi_bind(struct device *dev, struct device *master,
void *data)
{
- struct exynos_drm_display *display = dev_get_drvdata(dev);
- struct exynos_dsi *dsi = display_to_dsi(display);
+ struct drm_encoder *encoder = dev_get_drvdata(dev);
+ struct exynos_dsi *dsi = encoder_to_dsi(encoder);
struct drm_device *drm_dev = data;
struct drm_bridge *bridge;
int ret;
- ret = exynos_drm_create_enc_conn(drm_dev, display);
+ ret = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+ EXYNOS_DISPLAY_TYPE_LCD);
+ if (ret < 0)
+ return ret;
+
+ encoder->possible_crtcs = 1 << ret;
+
+ DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+ drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs);
+
+ ret = exynos_dsi_create_connector(encoder);
if (ret) {
- DRM_ERROR("Encoder create [%d] failed with %d\n",
- display->type, ret);
+ DRM_ERROR("failed to create connector ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
return ret;
}
bridge = of_drm_find_bridge(dsi->bridge_node);
if (bridge) {
- display->encoder->bridge = bridge;
drm_bridge_attach(drm_dev, bridge);
}
@@ -1846,10 +1853,10 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
static void exynos_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
- struct exynos_drm_display *display = dev_get_drvdata(dev);
- struct exynos_dsi *dsi = display_to_dsi(display);
+ struct drm_encoder *encoder = dev_get_drvdata(dev);
+ struct exynos_dsi *dsi = encoder_to_dsi(encoder);
- exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF);
+ exynos_dsi_disable(encoder);
mipi_dsi_host_unregister(&dsi->dsi_host);
}
@@ -1870,9 +1877,6 @@ static int exynos_dsi_probe(struct platform_device *pdev)
if (!dsi)
return -ENOMEM;
- dsi->display.type = EXYNOS_DISPLAY_TYPE_LCD;
- dsi->display.ops = &exynos_dsi_display_ops;
-
/* To be checked as invalid one */
dsi->te_gpio = -ENOENT;
@@ -1948,7 +1952,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
return ret;
}
- platform_set_drvdata(pdev, &dsi->display);
+ platform_set_drvdata(pdev, &dsi->encoder);
return component_add(dev, &exynos_dsi_component_ops);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
deleted file mode 100644
index 7b89fd520e45..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/* exynos_drm_encoder.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Authors:
- * Inki Dae <inki.dae@samsung.com>
- * Joonyoung Shim <jy0922.shim@samsung.com>
- * Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_encoder.h"
-
-#define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\
- drm_encoder)
-
-/*
- * exynos specific encoder structure.
- *
- * @drm_encoder: encoder object.
- * @display: the display structure that maps to this encoder
- */
-struct exynos_drm_encoder {
- struct drm_encoder drm_encoder;
- struct exynos_drm_display *display;
-};
-
-static bool
-exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct drm_device *dev = encoder->dev;
- struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
- struct exynos_drm_display *display = exynos_encoder->display;
- struct drm_connector *connector;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder != encoder)
- continue;
-
- if (display->ops->mode_fixup)
- display->ops->mode_fixup(display, connector, mode,
- adjusted_mode);
- }
-
- return true;
-}
-
-static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
- struct exynos_drm_display *display = exynos_encoder->display;
-
- if (display->ops->mode_set)
- display->ops->mode_set(display, adjusted_mode);
-}
-
-static void exynos_drm_encoder_enable(struct drm_encoder *encoder)
-{
- struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
- struct exynos_drm_display *display = exynos_encoder->display;
-
- if (display->ops->dpms)
- display->ops->dpms(display, DRM_MODE_DPMS_ON);
-
- if (display->ops->commit)
- display->ops->commit(display);
-}
-
-static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
-{
- struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
- struct exynos_drm_display *display = exynos_encoder->display;
-
- if (display->ops->dpms)
- display->ops->dpms(display, DRM_MODE_DPMS_OFF);
-}
-
-static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
- .mode_fixup = exynos_drm_encoder_mode_fixup,
- .mode_set = exynos_drm_encoder_mode_set,
- .enable = exynos_drm_encoder_enable,
- .disable = exynos_drm_encoder_disable,
-};
-
-static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
-{
- struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
-
- drm_encoder_cleanup(encoder);
- kfree(exynos_encoder);
-}
-
-static struct drm_encoder_funcs exynos_encoder_funcs = {
- .destroy = exynos_drm_encoder_destroy,
-};
-
-static unsigned int exynos_drm_encoder_clones(struct drm_encoder *encoder)
-{
- struct drm_encoder *clone;
- struct drm_device *dev = encoder->dev;
- struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
- struct exynos_drm_display *display = exynos_encoder->display;
- unsigned int clone_mask = 0;
- int cnt = 0;
-
- list_for_each_entry(clone, &dev->mode_config.encoder_list, head) {
- switch (display->type) {
- case EXYNOS_DISPLAY_TYPE_LCD:
- case EXYNOS_DISPLAY_TYPE_HDMI:
- case EXYNOS_DISPLAY_TYPE_VIDI:
- clone_mask |= (1 << (cnt++));
- break;
- default:
- continue;
- }
- }
-
- return clone_mask;
-}
-
-void exynos_drm_encoder_setup(struct drm_device *dev)
-{
- struct drm_encoder *encoder;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
- encoder->possible_clones = exynos_drm_encoder_clones(encoder);
-}
-
-struct drm_encoder *
-exynos_drm_encoder_create(struct drm_device *dev,
- struct exynos_drm_display *display,
- unsigned long possible_crtcs)
-{
- struct drm_encoder *encoder;
- struct exynos_drm_encoder *exynos_encoder;
-
- if (!possible_crtcs)
- return NULL;
-
- exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL);
- if (!exynos_encoder)
- return NULL;
-
- exynos_encoder->display = display;
- encoder = &exynos_encoder->drm_encoder;
- encoder->possible_crtcs = possible_crtcs;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
- drm_encoder_init(dev, encoder, &exynos_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
-
- drm_encoder_helper_add(encoder, &exynos_encoder_helper_funcs);
-
- DRM_DEBUG_KMS("encoder has been created\n");
-
- return encoder;
-}
-
-struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder)
-{
- return to_exynos_encoder(encoder)->display;
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
deleted file mode 100644
index 26305d8dd93a..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Authors:
- * Inki Dae <inki.dae@samsung.com>
- * Joonyoung Shim <jy0922.shim@samsung.com>
- * Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_ENCODER_H_
-#define _EXYNOS_DRM_ENCODER_H_
-
-void exynos_drm_encoder_setup(struct drm_device *dev);
-struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev,
- struct exynos_drm_display *mgr,
- unsigned long possible_crtcs);
-struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder);
-
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 2b6320e6eae2..084280859589 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -23,7 +23,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_fbdev.h"
-#include "exynos_drm_gem.h"
#include "exynos_drm_iommu.h"
#include "exynos_drm_crtc.h"
@@ -33,12 +32,10 @@
* exynos specific framebuffer structure.
*
* @fb: drm framebuffer obejct.
- * @buf_cnt: a buffer count to drm framebuffer.
* @exynos_gem_obj: array of exynos specific gem object containing a gem object.
*/
struct exynos_drm_fb {
struct drm_framebuffer fb;
- unsigned int buf_cnt;
struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
};
@@ -98,10 +95,6 @@ static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
- /* This fb should have only one gem object. */
- if (WARN_ON(exynos_fb->buf_cnt != 1))
- return -EINVAL;
-
return drm_gem_handle_create(file_priv,
&exynos_fb->exynos_gem_obj[0]->base, handle);
}
@@ -122,138 +115,96 @@ static struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
.dirty = exynos_drm_fb_dirty,
};
-void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb,
- unsigned int cnt)
-{
- struct exynos_drm_fb *exynos_fb;
-
- exynos_fb = to_exynos_fb(fb);
-
- exynos_fb->buf_cnt = cnt;
-}
-
-unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb)
-{
- struct exynos_drm_fb *exynos_fb;
-
- exynos_fb = to_exynos_fb(fb);
-
- return exynos_fb->buf_cnt;
-}
-
struct drm_framebuffer *
exynos_drm_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
+ struct exynos_drm_gem_obj **gem_obj,
+ int count)
{
struct exynos_drm_fb *exynos_fb;
- struct exynos_drm_gem_obj *exynos_gem_obj;
+ int i;
int ret;
- exynos_gem_obj = to_exynos_gem_obj(obj);
-
- ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
- if (ret < 0)
- return ERR_PTR(ret);
-
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb)
return ERR_PTR(-ENOMEM);
+ for (i = 0; i < count; i++) {
+ ret = check_fb_gem_memory_type(dev, gem_obj[i]);
+ if (ret < 0)
+ goto err;
+
+ exynos_fb->exynos_gem_obj[i] = gem_obj[i];
+ }
+
drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
- exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
- if (ret) {
- kfree(exynos_fb);
+ if (ret < 0) {
DRM_ERROR("failed to initialize framebuffer\n");
- return ERR_PTR(ret);
+ goto err;
}
return &exynos_fb->fb;
+
+err:
+ kfree(exynos_fb);
+ return ERR_PTR(ret);
}
static struct drm_framebuffer *
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd)
{
+ struct exynos_drm_gem_obj *gem_objs[MAX_FB_BUFFER];
struct drm_gem_object *obj;
- struct exynos_drm_gem_obj *exynos_gem_obj;
- struct exynos_drm_fb *exynos_fb;
- int i, ret;
-
- exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
- if (!exynos_fb)
- return ERR_PTR(-ENOMEM);
-
- obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object\n");
- ret = -ENOENT;
- goto err_free;
- }
-
- drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
- exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
- exynos_fb->buf_cnt = drm_format_num_planes(mode_cmd->pixel_format);
-
- DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
+ struct drm_framebuffer *fb;
+ int i;
+ int ret;
- for (i = 1; i < exynos_fb->buf_cnt; i++) {
+ for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
obj = drm_gem_object_lookup(dev, file_priv,
- mode_cmd->handles[i]);
+ mode_cmd->handles[i]);
if (!obj) {
DRM_ERROR("failed to lookup gem object\n");
ret = -ENOENT;
- exynos_fb->buf_cnt = i;
- goto err_unreference;
+ goto err;
}
- exynos_gem_obj = to_exynos_gem_obj(obj);
- exynos_fb->exynos_gem_obj[i] = exynos_gem_obj;
-
- ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
- if (ret < 0)
- goto err_unreference;
+ gem_objs[i] = to_exynos_gem_obj(obj);
}
- ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
- if (ret) {
- DRM_ERROR("failed to init framebuffer.\n");
- goto err_unreference;
+ fb = exynos_drm_framebuffer_init(dev, mode_cmd, gem_objs, i);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
+ goto err;
}
- return &exynos_fb->fb;
+ return fb;
-err_unreference:
- for (i = 0; i < exynos_fb->buf_cnt; i++) {
- struct drm_gem_object *obj;
+err:
+ while (i--)
+ drm_gem_object_unreference_unlocked(&gem_objs[i]->base);
- obj = &exynos_fb->exynos_gem_obj[i]->base;
- if (obj)
- drm_gem_object_unreference_unlocked(obj);
- }
-err_free:
- kfree(exynos_fb);
return ERR_PTR(ret);
}
-struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
- int index)
+struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb,
+ int index)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
- struct exynos_drm_gem_buf *buffer;
+ struct exynos_drm_gem_obj *obj;
if (index >= MAX_FB_BUFFER)
return NULL;
- buffer = exynos_fb->exynos_gem_obj[index]->buffer;
- if (!buffer)
+ obj = exynos_fb->exynos_gem_obj[index];
+ if (!obj)
return NULL;
- DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
+ DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)obj->dma_addr);
- return buffer;
+ return obj;
}
static void exynos_drm_output_poll_changed(struct drm_device *dev)
@@ -267,41 +218,6 @@ static void exynos_drm_output_poll_changed(struct drm_device *dev)
exynos_drm_fbdev_init(dev);
}
-static int exynos_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool async)
-{
- int ret;
-
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret)
- return ret;
-
- /* This is the point of no return */
-
- drm_atomic_helper_swap_state(dev, state);
-
- drm_atomic_helper_commit_modeset_disables(dev, state);
-
- drm_atomic_helper_commit_modeset_enables(dev, state);
-
- /*
- * Exynos can't update planes with CRTCs and encoders disabled,
- * its updates routines, specially for FIMD, requires the clocks
- * to be enabled. So it is necessary to handle the modeset operations
- * *before* the commit_planes() step, this way it will always
- * have the relevant clocks enabled to perform the update.
- */
-
- drm_atomic_helper_commit_planes(dev, state);
-
- drm_atomic_helper_cleanup_planes(dev, state);
-
- drm_atomic_state_free(state);
-
- return 0;
-}
-
static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_user_fb_create,
.output_poll_changed = exynos_drm_output_poll_changed,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 517471b37566..85e4445b920e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -14,22 +14,18 @@
#ifndef _EXYNOS_DRM_FB_H_
#define _EXYNOS_DRM_FB_H
+#include "exynos_drm_gem.h"
+
struct drm_framebuffer *
exynos_drm_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
+ struct exynos_drm_gem_obj **gem_obj,
+ int count);
-/* get memory information of a drm framebuffer */
-struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
+/* get gem object of a drm framebuffer */
+struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb,
int index);
void exynos_drm_mode_config_init(struct drm_device *dev);
-/* set a buffer count to drm framebuffer. */
-void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb,
- unsigned int cnt);
-
-/* get a buffer count to drm framebuffer. */
-unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb);
-
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e0b085b4bdfa..a221f753ad9c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -21,7 +21,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_fbdev.h"
-#include "exynos_drm_gem.h"
#include "exynos_drm_iommu.h"
#define MAX_CONNECTOR 4
@@ -32,7 +31,7 @@
struct exynos_drm_fbdev {
struct drm_fb_helper drm_fb_helper;
- struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_gem_obj *obj;
};
static int exynos_drm_fb_mmap(struct fb_info *info,
@@ -40,8 +39,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
{
struct drm_fb_helper *helper = info->par;
struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
- struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
- struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
+ struct exynos_drm_gem_obj *obj = exynos_fbd->obj;
unsigned long vm_size;
int ret;
@@ -49,11 +47,11 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
vm_size = vma->vm_end - vma->vm_start;
- if (vm_size > buffer->size)
+ if (vm_size > obj->size)
return -EINVAL;
- ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
- buffer->dma_addr, buffer->size, &buffer->dma_attrs);
+ ret = dma_mmap_attrs(helper->dev->dev, vma, obj->pages, obj->dma_addr,
+ obj->size, &obj->dma_attrs);
if (ret < 0) {
DRM_ERROR("failed to mmap.\n");
return ret;
@@ -65,9 +63,9 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
static struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE,
.fb_mmap = exynos_drm_fb_mmap,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_blank = drm_fb_helper_blank,
@@ -76,42 +74,42 @@ static struct fb_ops exynos_drm_fb_ops = {
};
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes,
- struct drm_framebuffer *fb)
+ struct drm_fb_helper_surface_size *sizes,
+ struct exynos_drm_gem_obj *obj)
{
- struct fb_info *fbi = helper->fbdev;
- struct exynos_drm_gem_buf *buffer;
+ struct fb_info *fbi;
+ struct drm_framebuffer *fb = helper->fb;
unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
unsigned int nr_pages;
unsigned long offset;
+ fbi = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(fbi)) {
+ DRM_ERROR("failed to allocate fb info.\n");
+ return PTR_ERR(fbi);
+ }
+
+ fbi->par = helper;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->fbops = &exynos_drm_fb_ops;
+
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
- /* RGB formats use only one buffer */
- buffer = exynos_drm_fb_buffer(fb, 0);
- if (!buffer) {
- DRM_DEBUG_KMS("buffer is null.\n");
- return -EFAULT;
- }
-
- nr_pages = buffer->size >> PAGE_SHIFT;
+ nr_pages = obj->size >> PAGE_SHIFT;
- buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
- nr_pages, VM_MAP,
+ obj->kvaddr = (void __iomem *) vmap(obj->pages, nr_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
- if (!buffer->kvaddr) {
+ if (!obj->kvaddr) {
DRM_ERROR("failed to map pages to kernel space.\n");
+ drm_fb_helper_release_fbi(helper);
return -EIO;
}
- /* buffer count to framebuffer always is 1 at booting time. */
- exynos_drm_fb_set_buf_cnt(fb, 1);
-
offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
offset += fbi->var.yoffset * fb->pitches[0];
- fbi->screen_base = buffer->kvaddr + offset;
+ fbi->screen_base = obj->kvaddr + offset;
fbi->screen_size = size;
fbi->fix.smem_len = size;
@@ -122,9 +120,8 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
- struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_gem_obj *obj;
struct drm_device *dev = helper->dev;
- struct fb_info *fbi;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct platform_device *pdev = dev->platformdev;
unsigned long size;
@@ -142,69 +139,44 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
mutex_lock(&dev->struct_mutex);
- fbi = framebuffer_alloc(0, &pdev->dev);
- if (!fbi) {
- DRM_ERROR("failed to allocate fb info.\n");
- ret = -ENOMEM;
- goto out;
- }
-
size = mode_cmd.pitches[0] * mode_cmd.height;
- exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
+ obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
/*
* If physically contiguous memory allocation fails and if IOMMU is
* supported then try to get buffer from non physically contiguous
* memory area.
*/
- if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
+ if (IS_ERR(obj) && is_drm_iommu_supported(dev)) {
dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
- exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
- size);
+ obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG, size);
}
- if (IS_ERR(exynos_gem_obj)) {
- ret = PTR_ERR(exynos_gem_obj);
- goto err_release_framebuffer;
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto out;
}
- exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
+ exynos_fbdev->obj = obj;
- helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
- &exynos_gem_obj->base);
+ helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd, &obj, 1);
if (IS_ERR(helper->fb)) {
DRM_ERROR("failed to create drm framebuffer.\n");
ret = PTR_ERR(helper->fb);
goto err_destroy_gem;
}
- helper->fbdev = fbi;
-
- fbi->par = helper;
- fbi->flags = FBINFO_FLAG_DEFAULT;
- fbi->fbops = &exynos_drm_fb_ops;
-
- ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
- if (ret) {
- DRM_ERROR("failed to allocate cmap.\n");
- goto err_destroy_framebuffer;
- }
-
- ret = exynos_drm_fbdev_update(helper, sizes, helper->fb);
+ ret = exynos_drm_fbdev_update(helper, sizes, obj);
if (ret < 0)
- goto err_dealloc_cmap;
+ goto err_destroy_framebuffer;
mutex_unlock(&dev->struct_mutex);
return ret;
-err_dealloc_cmap:
- fb_dealloc_cmap(&fbi->cmap);
err_destroy_framebuffer:
drm_framebuffer_cleanup(helper->fb);
err_destroy_gem:
- exynos_drm_gem_destroy(exynos_gem_obj);
-err_release_framebuffer:
- framebuffer_release(fbi);
+ exynos_drm_gem_destroy(obj);
/*
* if failed, all resources allocated above would be released by
@@ -297,11 +269,11 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
struct drm_fb_helper *fb_helper)
{
struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
- struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
+ struct exynos_drm_gem_obj *obj = exynos_fbd->obj;
struct drm_framebuffer *fb;
- if (exynos_gem_obj->buffer->kvaddr)
- vunmap(exynos_gem_obj->buffer->kvaddr);
+ if (obj->kvaddr)
+ vunmap(obj->kvaddr);
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
@@ -312,21 +284,8 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
}
}
- /* release linux framebuffer */
- if (fb_helper->fbdev) {
- struct fb_info *info;
- int ret;
-
- info = fb_helper->fbdev;
- ret = unregister_framebuffer(info);
- if (ret < 0)
- DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
-
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
-
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(fb_helper);
+ drm_fb_helper_release_fbi(fb_helper);
drm_fb_helper_fini(fb_helper);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 842d6b8dc3c4..2a652359af64 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1745,7 +1745,6 @@ static int fimc_probe(struct platform_device *pdev)
spin_lock_init(&ctx->lock);
platform_set_drvdata(pdev, ctx);
- pm_runtime_set_active(dev);
pm_runtime_enable(dev);
ret = exynos_drm_ippdrv_register(ippdrv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 794e56c8798e..750a9e6b9e8d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -59,6 +59,7 @@
#define VIDWnALPHA1(win) (VIDW_ALPHA + 0x04 + (win) * 8)
#define VIDWx_BUF_START(win, buf) (VIDW_BUF_START(buf) + (win) * 8)
+#define VIDWx_BUF_START_S(win, buf) (VIDW_BUF_START_S(buf) + (win) * 8)
#define VIDWx_BUF_END(win, buf) (VIDW_BUF_END(buf) + (win) * 8)
#define VIDWx_BUF_SIZE(win, buf) (VIDW_BUF_SIZE(buf) + (win) * 4)
@@ -169,7 +170,7 @@ struct fimd_context {
struct exynos_drm_panel_info panel;
struct fimd_driver_data *driver_data;
- struct exynos_drm_display *display;
+ struct drm_encoder *encoder;
};
static const struct of_device_id fimd_driver_dt_match[] = {
@@ -187,6 +188,14 @@ static const struct of_device_id fimd_driver_dt_match[] = {
};
MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
+static const uint32_t fimd_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+};
+
static inline struct fimd_driver_data *drm_fimd_get_driver_data(
struct platform_device *pdev)
{
@@ -348,13 +357,6 @@ static void fimd_clear_channels(struct exynos_drm_crtc *crtc)
pm_runtime_put(ctx->dev);
}
-static void fimd_iommu_detach_devices(struct fimd_context *ctx)
-{
- /* detach this sub driver from iommu mapping if supported. */
- if (is_drm_iommu_supported(ctx->drm_dev))
- drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
-}
-
static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
const struct drm_display_mode *mode)
{
@@ -486,9 +488,9 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
}
-static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
+static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
+ struct drm_framebuffer *fb)
{
- struct exynos_drm_plane *plane = &ctx->planes[win];
unsigned long val;
val = WINCONx_ENWIN;
@@ -498,11 +500,11 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
* So the request format is ARGB8888 then change it to XRGB8888.
*/
if (ctx->driver_data->has_limited_fmt && !win) {
- if (plane->pixel_format == DRM_FORMAT_ARGB8888)
- plane->pixel_format = DRM_FORMAT_XRGB8888;
+ if (fb->pixel_format == DRM_FORMAT_ARGB8888)
+ fb->pixel_format = DRM_FORMAT_XRGB8888;
}
- switch (plane->pixel_format) {
+ switch (fb->pixel_format) {
case DRM_FORMAT_C8:
val |= WINCON0_BPPMODE_8BPP_PALETTE;
val |= WINCONx_BURSTLEN_8WORD;
@@ -538,7 +540,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
break;
}
- DRM_DEBUG_KMS("bpp = %d\n", plane->bpp);
+ DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -548,7 +550,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
* movement causes unstable DMA which results into iommu crash/tear.
*/
- if (plane->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+ if (fb->width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_4WORD;
}
@@ -598,6 +600,16 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx,
{
u32 reg, bits, val;
+ /*
+ * SHADOWCON/PRTCON register is used for enabling timing.
+ *
+ * for example, once only width value of a register is set,
+ * if the dma is started then fimd hardware could malfunction so
+ * with protect window setting, the register fields with prefix '_F'
+ * wouldn't be updated at vsync also but updated once unprotect window
+ * is set.
+ */
+
if (ctx->driver_data->has_shadowcon) {
reg = SHADOWCON;
bits = SHADOWCON_WINx_PROTECT(win);
@@ -614,41 +626,45 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx,
writel(val, ctx->regs + reg);
}
-static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void fimd_atomic_begin(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
struct fimd_context *ctx = crtc->ctx;
- struct exynos_drm_plane *plane;
- dma_addr_t dma_addr;
- unsigned long val, size, offset;
- unsigned int last_x, last_y, buf_offsize, line_size;
if (ctx->suspended)
return;
- if (win < 0 || win >= WINDOWS_NR)
- return;
+ fimd_shadow_protect_win(ctx, plane->zpos, true);
+}
- plane = &ctx->planes[win];
+static void fimd_atomic_flush(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
+{
+ struct fimd_context *ctx = crtc->ctx;
if (ctx->suspended)
return;
- /*
- * SHADOWCON/PRTCON register is used for enabling timing.
- *
- * for example, once only width value of a register is set,
- * if the dma is started then fimd hardware could malfunction so
- * with protect window setting, the register fields with prefix '_F'
- * wouldn't be updated at vsync also but updated once unprotect window
- * is set.
- */
+ fimd_shadow_protect_win(ctx, plane->zpos, false);
+}
- /* protect windows */
- fimd_shadow_protect_win(ctx, win, true);
+static void fimd_update_plane(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
+{
+ struct fimd_context *ctx = crtc->ctx;
+ struct drm_plane_state *state = plane->base.state;
+ dma_addr_t dma_addr;
+ unsigned long val, size, offset;
+ unsigned int last_x, last_y, buf_offsize, line_size;
+ unsigned int win = plane->zpos;
+ unsigned int bpp = state->fb->bits_per_pixel >> 3;
+ unsigned int pitch = state->fb->pitches[0];
+ if (ctx->suspended)
+ return;
- offset = plane->src_x * (plane->bpp >> 3);
- offset += plane->src_y * plane->pitch;
+ offset = plane->src_x * bpp;
+ offset += plane->src_y * pitch;
/* buffer start address */
dma_addr = plane->dma_addr[0] + offset;
@@ -656,18 +672,18 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
/* buffer end address */
- size = plane->pitch * plane->crtc_height;
+ size = pitch * plane->crtc_h;
val = (unsigned long)(dma_addr + size);
writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
(unsigned long)dma_addr, val, size);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- plane->crtc_width, plane->crtc_height);
+ plane->crtc_w, plane->crtc_h);
/* buffer size */
- buf_offsize = plane->pitch - (plane->crtc_width * (plane->bpp >> 3));
- line_size = plane->crtc_width * (plane->bpp >> 3);
+ buf_offsize = pitch - (plane->crtc_w * bpp);
+ line_size = plane->crtc_w * bpp;
val = VIDW_BUF_SIZE_OFFSET(buf_offsize) |
VIDW_BUF_SIZE_PAGEWIDTH(line_size) |
VIDW_BUF_SIZE_OFFSET_E(buf_offsize) |
@@ -681,10 +697,10 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
VIDOSDxA_TOPLEFT_Y_E(plane->crtc_y);
writel(val, ctx->regs + VIDOSD_A(win));
- last_x = plane->crtc_x + plane->crtc_width;
+ last_x = plane->crtc_x + plane->crtc_w;
if (last_x)
last_x--;
- last_y = plane->crtc_y + plane->crtc_height;
+ last_y = plane->crtc_y + plane->crtc_h;
if (last_y)
last_y--;
@@ -701,13 +717,13 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
u32 offset = VIDOSD_D(win);
if (win == 0)
offset = VIDOSD_C(win);
- val = plane->crtc_width * plane->crtc_height;
+ val = plane->crtc_w * plane->crtc_h;
writel(val, ctx->regs + offset);
DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
}
- fimd_win_set_pixfmt(ctx, win);
+ fimd_win_set_pixfmt(ctx, win, state->fb);
/* hardware window 0 doesn't support color key. */
if (win != 0)
@@ -718,36 +734,23 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
if (ctx->driver_data->has_shadowcon)
fimd_enable_shadow_channel_path(ctx, win, true);
- /* Enable DMA channel and unprotect windows */
- fimd_shadow_protect_win(ctx, win, false);
-
if (ctx->i80_if)
atomic_set(&ctx->win_updated, 1);
}
-static void fimd_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
+static void fimd_disable_plane(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
struct fimd_context *ctx = crtc->ctx;
- struct exynos_drm_plane *plane;
-
- if (win < 0 || win >= WINDOWS_NR)
- return;
-
- plane = &ctx->planes[win];
+ unsigned int win = plane->zpos;
if (ctx->suspended)
return;
- /* protect windows */
- fimd_shadow_protect_win(ctx, win, true);
-
fimd_enable_video_output(ctx, win, false);
if (ctx->driver_data->has_shadowcon)
fimd_enable_shadow_channel_path(ctx, win, false);
-
- /* unprotect windows */
- fimd_shadow_protect_win(ctx, win, false);
}
static void fimd_enable(struct exynos_drm_crtc *crtc)
@@ -795,7 +798,7 @@ static void fimd_disable(struct exynos_drm_crtc *crtc)
* a destroyed buffer later.
*/
for (i = 0; i < WINDOWS_NR; i++)
- fimd_win_disable(crtc, i);
+ fimd_disable_plane(crtc, &ctx->planes[i]);
fimd_enable_vblank(crtc);
fimd_wait_for_vblank(crtc);
@@ -862,7 +865,7 @@ static void fimd_te_handler(struct exynos_drm_crtc *crtc)
}
if (test_bit(0, &ctx->irq_flags))
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ drm_crtc_handle_vblank(&ctx->crtc->base);
}
static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
@@ -890,17 +893,19 @@ static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
.wait_for_vblank = fimd_wait_for_vblank,
- .win_commit = fimd_win_commit,
- .win_disable = fimd_win_disable,
+ .atomic_begin = fimd_atomic_begin,
+ .update_plane = fimd_update_plane,
+ .disable_plane = fimd_disable_plane,
+ .atomic_flush = fimd_atomic_flush,
.te_handler = fimd_te_handler,
.clock_enable = fimd_dp_clock_enable,
- .clear_channels = fimd_clear_channels,
};
static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
{
struct fimd_context *ctx = (struct fimd_context *)dev_id;
- u32 val, clear_bit;
+ u32 val, clear_bit, start, start_s;
+ int win;
val = readl(ctx->regs + VIDINTCON1);
@@ -912,15 +917,25 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
if (ctx->pipe < 0 || !ctx->drm_dev)
goto out;
- if (ctx->i80_if) {
- exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+ if (!ctx->i80_if)
+ drm_crtc_handle_vblank(&ctx->crtc->base);
+
+ for (win = 0 ; win < WINDOWS_NR ; win++) {
+ struct exynos_drm_plane *plane = &ctx->planes[win];
+
+ if (!plane->pending_fb)
+ continue;
+ start = readl(ctx->regs + VIDWx_BUF_START(win, 0));
+ start_s = readl(ctx->regs + VIDWx_BUF_START_S(win, 0));
+ if (start == start_s)
+ exynos_drm_crtc_finish_update(ctx->crtc, plane);
+ }
+
+ if (ctx->i80_if) {
/* Exits triggering mode */
atomic_set(&ctx->triggering, 0);
} else {
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
- exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
-
/* set wait vsync event to zero and wake up queue. */
if (atomic_read(&ctx->wait_vsync_event)) {
atomic_set(&ctx->wait_vsync_event, 0);
@@ -949,7 +964,8 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
DRM_PLANE_TYPE_OVERLAY;
ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
- 1 << ctx->pipe, type, zpos);
+ 1 << ctx->pipe, type, fimd_formats,
+ ARRAY_SIZE(fimd_formats), zpos);
if (ret)
return ret;
}
@@ -961,10 +977,13 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(ctx->crtc))
return PTR_ERR(ctx->crtc);
- if (ctx->display)
- exynos_drm_create_enc_conn(drm_dev, ctx->display);
+ if (ctx->encoder)
+ exynos_dpi_bind(drm_dev, ctx->encoder);
+
+ if (is_drm_iommu_supported(drm_dev))
+ fimd_clear_channels(ctx->crtc);
- ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, dev);
+ ret = drm_iommu_attach_device(drm_dev, dev);
if (ret)
priv->pipe--;
@@ -978,10 +997,10 @@ static void fimd_unbind(struct device *dev, struct device *master,
fimd_disable(ctx->crtc);
- fimd_iommu_detach_devices(ctx);
+ drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
- if (ctx->display)
- exynos_dpi_remove(ctx->display);
+ if (ctx->encoder)
+ exynos_dpi_remove(ctx->encoder);
}
static const struct component_ops fimd_component_ops = {
@@ -1088,10 +1107,9 @@ static int fimd_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
- ctx->display = exynos_dpi_probe(dev);
- if (IS_ERR(ctx->display)) {
- return PTR_ERR(ctx->display);
- }
+ ctx->encoder = exynos_dpi_probe(dev);
+ if (IS_ERR(ctx->encoder))
+ return PTR_ERR(ctx->encoder);
pm_runtime_enable(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 81a250830808..3734c34aed16 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -48,11 +48,13 @@
/* registers for base address */
#define G2D_SRC_BASE_ADDR 0x0304
+#define G2D_SRC_STRIDE_REG 0x0308
#define G2D_SRC_COLOR_MODE 0x030C
#define G2D_SRC_LEFT_TOP 0x0310
#define G2D_SRC_RIGHT_BOTTOM 0x0314
#define G2D_SRC_PLANE2_BASE_ADDR 0x0318
#define G2D_DST_BASE_ADDR 0x0404
+#define G2D_DST_STRIDE_REG 0x0408
#define G2D_DST_COLOR_MODE 0x040C
#define G2D_DST_LEFT_TOP 0x0410
#define G2D_DST_RIGHT_BOTTOM 0x0414
@@ -148,6 +150,7 @@ struct g2d_cmdlist {
* A structure of buffer description
*
* @format: color format
+ * @stride: buffer stride/pitch in bytes
* @left_x: the x coordinates of left top corner
* @top_y: the y coordinates of left top corner
* @right_x: the x coordinates of right bottom corner
@@ -156,6 +159,7 @@ struct g2d_cmdlist {
*/
struct g2d_buf_desc {
unsigned int format;
+ unsigned int stride;
unsigned int left_x;
unsigned int top_y;
unsigned int right_x;
@@ -190,10 +194,8 @@ struct g2d_cmdlist_userptr {
dma_addr_t dma_addr;
unsigned long userptr;
unsigned long size;
- struct page **pages;
- unsigned int npages;
+ struct frame_vector *vec;
struct sg_table *sgt;
- struct vm_area_struct *vma;
atomic_t refcount;
bool in_pool;
bool out_of_list;
@@ -363,6 +365,7 @@ static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
{
struct g2d_cmdlist_userptr *g2d_userptr =
(struct g2d_cmdlist_userptr *)obj;
+ struct page **pages;
if (!obj)
return;
@@ -382,19 +385,21 @@ out:
exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
DMA_BIDIRECTIONAL);
- exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
- g2d_userptr->npages,
- g2d_userptr->vma);
+ pages = frame_vector_pages(g2d_userptr->vec);
+ if (!IS_ERR(pages)) {
+ int i;
- exynos_gem_put_vma(g2d_userptr->vma);
+ for (i = 0; i < frame_vector_count(g2d_userptr->vec); i++)
+ set_page_dirty_lock(pages[i]);
+ }
+ put_vaddr_frames(g2d_userptr->vec);
+ frame_vector_destroy(g2d_userptr->vec);
if (!g2d_userptr->out_of_list)
list_del_init(&g2d_userptr->list);
sg_free_table(g2d_userptr->sgt);
kfree(g2d_userptr->sgt);
-
- drm_free_large(g2d_userptr->pages);
kfree(g2d_userptr);
}
@@ -408,9 +413,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct g2d_cmdlist_userptr *g2d_userptr;
struct g2d_data *g2d;
- struct page **pages;
struct sg_table *sgt;
- struct vm_area_struct *vma;
unsigned long start, end;
unsigned int npages, offset;
int ret;
@@ -456,65 +459,40 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
return ERR_PTR(-ENOMEM);
atomic_set(&g2d_userptr->refcount, 1);
+ g2d_userptr->size = size;
start = userptr & PAGE_MASK;
offset = userptr & ~PAGE_MASK;
end = PAGE_ALIGN(userptr + size);
npages = (end - start) >> PAGE_SHIFT;
- g2d_userptr->npages = npages;
-
- pages = drm_calloc_large(npages, sizeof(struct page *));
- if (!pages) {
- DRM_ERROR("failed to allocate pages.\n");
+ g2d_userptr->vec = frame_vector_create(npages);
+ if (!g2d_userptr->vec) {
ret = -ENOMEM;
goto err_free;
}
- down_read(&current->mm->mmap_sem);
- vma = find_vma(current->mm, userptr);
- if (!vma) {
- up_read(&current->mm->mmap_sem);
- DRM_ERROR("failed to get vm region.\n");
+ ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec);
+ if (ret != npages) {
+ DRM_ERROR("failed to get user pages from userptr.\n");
+ if (ret < 0)
+ goto err_destroy_framevec;
ret = -EFAULT;
- goto err_free_pages;
+ goto err_put_framevec;
}
-
- if (vma->vm_end < userptr + size) {
- up_read(&current->mm->mmap_sem);
- DRM_ERROR("vma is too small.\n");
+ if (frame_vector_to_pages(g2d_userptr->vec) < 0) {
ret = -EFAULT;
- goto err_free_pages;
- }
-
- g2d_userptr->vma = exynos_gem_get_vma(vma);
- if (!g2d_userptr->vma) {
- up_read(&current->mm->mmap_sem);
- DRM_ERROR("failed to copy vma.\n");
- ret = -ENOMEM;
- goto err_free_pages;
- }
-
- g2d_userptr->size = size;
-
- ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
- npages, pages, vma);
- if (ret < 0) {
- up_read(&current->mm->mmap_sem);
- DRM_ERROR("failed to get user pages from userptr.\n");
- goto err_put_vma;
+ goto err_put_framevec;
}
- up_read(&current->mm->mmap_sem);
- g2d_userptr->pages = pages;
-
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
ret = -ENOMEM;
- goto err_free_userptr;
+ goto err_put_framevec;
}
- ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
- size, GFP_KERNEL);
+ ret = sg_alloc_table_from_pages(sgt,
+ frame_vector_pages(g2d_userptr->vec),
+ npages, offset, size, GFP_KERNEL);
if (ret < 0) {
DRM_ERROR("failed to get sgt from pages.\n");
goto err_free_sgt;
@@ -549,16 +527,11 @@ err_sg_free_table:
err_free_sgt:
kfree(sgt);
-err_free_userptr:
- exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
- g2d_userptr->npages,
- g2d_userptr->vma);
-
-err_put_vma:
- exynos_gem_put_vma(g2d_userptr->vma);
+err_put_framevec:
+ put_vaddr_frames(g2d_userptr->vec);
-err_free_pages:
- drm_free_large(pages);
+err_destroy_framevec:
+ frame_vector_destroy(g2d_userptr->vec);
err_free:
kfree(g2d_userptr);
@@ -589,6 +562,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
switch (reg_offset) {
case G2D_SRC_BASE_ADDR:
+ case G2D_SRC_STRIDE_REG:
case G2D_SRC_COLOR_MODE:
case G2D_SRC_LEFT_TOP:
case G2D_SRC_RIGHT_BOTTOM:
@@ -598,6 +572,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
reg_type = REG_TYPE_SRC_PLANE2;
break;
case G2D_DST_BASE_ADDR:
+ case G2D_DST_STRIDE_REG:
case G2D_DST_COLOR_MODE:
case G2D_DST_LEFT_TOP:
case G2D_DST_RIGHT_BOTTOM:
@@ -652,8 +627,8 @@ static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
enum g2d_reg_type reg_type,
unsigned long size)
{
- unsigned int width, height;
- unsigned long area;
+ int width, height;
+ unsigned long bpp, last_pos;
/*
* check source and destination buffers only.
@@ -662,22 +637,37 @@ static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST)
return true;
- width = buf_desc->right_x - buf_desc->left_x;
+ /* This check also makes sure that right_x > left_x. */
+ width = (int)buf_desc->right_x - (int)buf_desc->left_x;
if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
- DRM_ERROR("width[%u] is out of range!\n", width);
+ DRM_ERROR("width[%d] is out of range!\n", width);
return false;
}
- height = buf_desc->bottom_y - buf_desc->top_y;
+ /* This check also makes sure that bottom_y > top_y. */
+ height = (int)buf_desc->bottom_y - (int)buf_desc->top_y;
if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
- DRM_ERROR("height[%u] is out of range!\n", height);
+ DRM_ERROR("height[%d] is out of range!\n", height);
return false;
}
- area = (unsigned long)width * (unsigned long)height *
- g2d_get_buf_bpp(buf_desc->format);
- if (area > size) {
- DRM_ERROR("area[%lu] is out of range[%lu]!\n", area, size);
+ bpp = g2d_get_buf_bpp(buf_desc->format);
+
+ /* Compute the position of the last byte that the engine accesses. */
+ last_pos = ((unsigned long)buf_desc->bottom_y - 1) *
+ (unsigned long)buf_desc->stride +
+ (unsigned long)buf_desc->right_x * bpp - 1;
+
+ /*
+ * Since right_x > left_x and bottom_y > top_y we already know
+ * that the first_pos < last_pos (first_pos being the position
+ * of the first byte the engine accesses), it just remains to
+ * check if last_pos is smaller then the buffer size.
+ */
+
+ if (last_pos >= size) {
+ DRM_ERROR("last engine access position [%lu] "
+ "is out of range [%lu]!\n", last_pos, size);
return false;
}
@@ -973,8 +963,6 @@ static int g2d_check_reg_offset(struct device *dev,
goto err;
reg_type = g2d_get_reg_type(reg_offset);
- if (reg_type == REG_TYPE_NONE)
- goto err;
/* check userptr buffer type. */
if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
@@ -983,14 +971,22 @@ static int g2d_check_reg_offset(struct device *dev,
} else
buf_info->types[reg_type] = BUF_TYPE_GEM;
break;
+ case G2D_SRC_STRIDE_REG:
+ case G2D_DST_STRIDE_REG:
+ if (for_addr)
+ goto err;
+
+ reg_type = g2d_get_reg_type(reg_offset);
+
+ buf_desc = &buf_info->descs[reg_type];
+ buf_desc->stride = cmdlist->data[index + 1];
+ break;
case G2D_SRC_COLOR_MODE:
case G2D_DST_COLOR_MODE:
if (for_addr)
goto err;
reg_type = g2d_get_reg_type(reg_offset);
- if (reg_type == REG_TYPE_NONE)
- goto err;
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
@@ -1003,8 +999,6 @@ static int g2d_check_reg_offset(struct device *dev,
goto err;
reg_type = g2d_get_reg_type(reg_offset);
- if (reg_type == REG_TYPE_NONE)
- goto err;
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
@@ -1018,8 +1012,6 @@ static int g2d_check_reg_offset(struct device *dev,
goto err;
reg_type = g2d_get_reg_type(reg_offset);
- if (reg_type == REG_TYPE_NONE)
- goto err;
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
@@ -1319,9 +1311,6 @@ static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
return ret;
}
- if (!is_drm_iommu_supported(drm_dev))
- return 0;
-
ret = drm_iommu_attach_device(drm_dev, dev);
if (ret < 0) {
dev_err(dev, "failed to enable iommu.\n");
@@ -1334,9 +1323,6 @@ static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
- if (!is_drm_iommu_supported(drm_dev))
- return;
-
drm_iommu_detach_device(drm_dev, dev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 0d5b9698d384..f12fbc36b120 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -13,98 +13,112 @@
#include <drm/drm_vma_manager.h>
#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
-#include "exynos_drm_buf.h"
#include "exynos_drm_iommu.h"
-static unsigned int convert_to_vm_err_msg(int msg)
+static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
{
- unsigned int out_msg;
+ struct drm_device *dev = obj->base.dev;
+ enum dma_attr attr;
+ unsigned int nr_pages;
- switch (msg) {
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- out_msg = VM_FAULT_NOPAGE;
- break;
+ if (obj->dma_addr) {
+ DRM_DEBUG_KMS("already allocated.\n");
+ return 0;
+ }
- case -ENOMEM:
- out_msg = VM_FAULT_OOM;
- break;
+ init_dma_attrs(&obj->dma_attrs);
- default:
- out_msg = VM_FAULT_SIGBUS;
- break;
- }
+ /*
+ * if EXYNOS_BO_CONTIG, fully physically contiguous memory
+ * region will be allocated else physically contiguous
+ * as possible.
+ */
+ if (!(obj->flags & EXYNOS_BO_NONCONTIG))
+ dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &obj->dma_attrs);
- return out_msg;
-}
+ /*
+ * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
+ * else cachable mapping.
+ */
+ if (obj->flags & EXYNOS_BO_WC || !(obj->flags & EXYNOS_BO_CACHABLE))
+ attr = DMA_ATTR_WRITE_COMBINE;
+ else
+ attr = DMA_ATTR_NON_CONSISTENT;
-static int check_gem_flags(unsigned int flags)
-{
- if (flags & ~(EXYNOS_BO_MASK)) {
- DRM_ERROR("invalid flags.\n");
- return -EINVAL;
- }
+ dma_set_attr(attr, &obj->dma_attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &obj->dma_attrs);
- return 0;
-}
+ nr_pages = obj->size >> PAGE_SHIFT;
-static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
- struct vm_area_struct *vma)
-{
- DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
+ if (!is_drm_iommu_supported(dev)) {
+ dma_addr_t start_addr;
+ unsigned int i = 0;
- /* non-cachable as default. */
- if (obj->flags & EXYNOS_BO_CACHABLE)
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- else if (obj->flags & EXYNOS_BO_WC)
- vma->vm_page_prot =
- pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- else
- vma->vm_page_prot =
- pgprot_noncached(vm_get_page_prot(vma->vm_flags));
-}
+ obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
+ if (!obj->pages) {
+ DRM_ERROR("failed to allocate pages.\n");
+ return -ENOMEM;
+ }
-static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
-{
- /* TODO */
+ obj->cookie = dma_alloc_attrs(dev->dev,
+ obj->size,
+ &obj->dma_addr, GFP_KERNEL,
+ &obj->dma_attrs);
+ if (!obj->cookie) {
+ DRM_ERROR("failed to allocate buffer.\n");
+ drm_free_large(obj->pages);
+ return -ENOMEM;
+ }
- return roundup(size, PAGE_SIZE);
+ start_addr = obj->dma_addr;
+ while (i < nr_pages) {
+ obj->pages[i] = phys_to_page(start_addr);
+ start_addr += PAGE_SIZE;
+ i++;
+ }
+ } else {
+ obj->pages = dma_alloc_attrs(dev->dev, obj->size,
+ &obj->dma_addr, GFP_KERNEL,
+ &obj->dma_attrs);
+ if (!obj->pages) {
+ DRM_ERROR("failed to allocate buffer.\n");
+ return -ENOMEM;
+ }
+ }
+
+ DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+ (unsigned long)obj->dma_addr,
+ obj->size);
+
+ return 0;
}
-static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
- struct vm_area_struct *vma,
- unsigned long f_vaddr,
- pgoff_t page_offset)
+static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
{
- struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
- struct scatterlist *sgl;
- unsigned long pfn;
- int i;
+ struct drm_device *dev = obj->base.dev;
- if (!buf->sgt)
- return -EINTR;
-
- if (page_offset >= (buf->size >> PAGE_SHIFT)) {
- DRM_ERROR("invalid page offset\n");
- return -EINVAL;
+ if (!obj->dma_addr) {
+ DRM_DEBUG_KMS("dma_addr is invalid.\n");
+ return;
}
- sgl = buf->sgt->sgl;
- for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
- if (page_offset < (sgl->length >> PAGE_SHIFT))
- break;
- page_offset -= (sgl->length >> PAGE_SHIFT);
- }
+ DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+ (unsigned long)obj->dma_addr, obj->size);
- pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
+ if (!is_drm_iommu_supported(dev)) {
+ dma_free_attrs(dev->dev, obj->size, obj->cookie,
+ (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
+ drm_free_large(obj->pages);
+ } else
+ dma_free_attrs(dev->dev, obj->size, obj->pages,
+ (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
- return vm_insert_mixed(vma, f_vaddr, pfn);
+ obj->dma_addr = (dma_addr_t)NULL;
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -131,11 +145,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
{
- struct drm_gem_object *obj;
- struct exynos_drm_gem_buf *buf;
-
- obj = &exynos_gem_obj->base;
- buf = exynos_gem_obj->buffer;
+ struct drm_gem_object *obj = &exynos_gem_obj->base;
DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
@@ -148,12 +158,9 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
if (obj->import_attach)
goto out;
- exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+ exynos_drm_free_buf(exynos_gem_obj);
out:
- exynos_drm_fini_buf(obj->dev, buf);
- exynos_gem_obj->buffer = NULL;
-
drm_gem_free_mmap_offset(obj);
/* release file pointer to gem object. */
@@ -180,7 +187,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
drm_gem_object_unreference_unlocked(obj);
- return exynos_gem_obj->buffer->size;
+ return exynos_gem_obj->size;
}
@@ -193,7 +200,7 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
if (!exynos_gem_obj)
- return NULL;
+ return ERR_PTR(-ENOMEM);
exynos_gem_obj->size = size;
obj = &exynos_gem_obj->base;
@@ -202,7 +209,7 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
if (ret < 0) {
DRM_ERROR("failed to initialize gem object\n");
kfree(exynos_gem_obj);
- return NULL;
+ return ERR_PTR(ret);
}
DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
@@ -215,47 +222,35 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
unsigned long size)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
- struct exynos_drm_gem_buf *buf;
int ret;
+ if (flags & ~(EXYNOS_BO_MASK)) {
+ DRM_ERROR("invalid flags.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
if (!size) {
DRM_ERROR("invalid size.\n");
return ERR_PTR(-EINVAL);
}
- size = roundup_gem_size(size, flags);
-
- ret = check_gem_flags(flags);
- if (ret)
- return ERR_PTR(ret);
-
- buf = exynos_drm_init_buf(dev, size);
- if (!buf)
- return ERR_PTR(-ENOMEM);
+ size = roundup(size, PAGE_SIZE);
exynos_gem_obj = exynos_drm_gem_init(dev, size);
- if (!exynos_gem_obj) {
- ret = -ENOMEM;
- goto err_fini_buf;
- }
-
- exynos_gem_obj->buffer = buf;
+ if (IS_ERR(exynos_gem_obj))
+ return exynos_gem_obj;
/* set memory type and cache attribute from user side. */
exynos_gem_obj->flags = flags;
- ret = exynos_drm_alloc_buf(dev, buf, flags);
- if (ret < 0)
- goto err_gem_fini;
+ ret = exynos_drm_alloc_buf(exynos_gem_obj);
+ if (ret < 0) {
+ drm_gem_object_release(&exynos_gem_obj->base);
+ kfree(exynos_gem_obj);
+ return ERR_PTR(ret);
+ }
return exynos_gem_obj;
-
-err_gem_fini:
- drm_gem_object_release(&exynos_gem_obj->base);
- kfree(exynos_gem_obj);
-err_fini_buf:
- exynos_drm_fini_buf(dev, buf);
- return ERR_PTR(ret);
}
int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -294,7 +289,7 @@ dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
- return &exynos_gem_obj->buffer->dma_addr;
+ return &exynos_gem_obj->dma_addr;
}
void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
@@ -322,7 +317,6 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
struct vm_area_struct *vma)
{
struct drm_device *drm_dev = exynos_gem_obj->base.dev;
- struct exynos_drm_gem_buf *buffer;
unsigned long vm_size;
int ret;
@@ -331,19 +325,13 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
vm_size = vma->vm_end - vma->vm_start;
- /*
- * a buffer contains information to physically continuous memory
- * allocated by user request or at framebuffer creation.
- */
- buffer = exynos_gem_obj->buffer;
-
/* check if user-requested size is valid. */
- if (vm_size > buffer->size)
+ if (vm_size > exynos_gem_obj->size)
return -EINVAL;
- ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
- buffer->dma_addr, buffer->size,
- &buffer->dma_attrs);
+ ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem_obj->pages,
+ exynos_gem_obj->dma_addr, exynos_gem_obj->size,
+ &exynos_gem_obj->dma_attrs);
if (ret < 0) {
DRM_ERROR("failed to mmap.\n");
return ret;
@@ -378,103 +366,6 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
return 0;
}
-struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
-{
- struct vm_area_struct *vma_copy;
-
- vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
- if (!vma_copy)
- return NULL;
-
- if (vma->vm_ops && vma->vm_ops->open)
- vma->vm_ops->open(vma);
-
- if (vma->vm_file)
- get_file(vma->vm_file);
-
- memcpy(vma_copy, vma, sizeof(*vma));
-
- vma_copy->vm_mm = NULL;
- vma_copy->vm_next = NULL;
- vma_copy->vm_prev = NULL;
-
- return vma_copy;
-}
-
-void exynos_gem_put_vma(struct vm_area_struct *vma)
-{
- if (!vma)
- return;
-
- if (vma->vm_ops && vma->vm_ops->close)
- vma->vm_ops->close(vma);
-
- if (vma->vm_file)
- fput(vma->vm_file);
-
- kfree(vma);
-}
-
-int exynos_gem_get_pages_from_userptr(unsigned long start,
- unsigned int npages,
- struct page **pages,
- struct vm_area_struct *vma)
-{
- int get_npages;
-
- /* the memory region mmaped with VM_PFNMAP. */
- if (vma_is_io(vma)) {
- unsigned int i;
-
- for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
- unsigned long pfn;
- int ret = follow_pfn(vma, start, &pfn);
- if (ret)
- return ret;
-
- pages[i] = pfn_to_page(pfn);
- }
-
- if (i != npages) {
- DRM_ERROR("failed to get user_pages.\n");
- return -EINVAL;
- }
-
- return 0;
- }
-
- get_npages = get_user_pages(current, current->mm, start,
- npages, 1, 1, pages, NULL);
- get_npages = max(get_npages, 0);
- if (get_npages != npages) {
- DRM_ERROR("failed to get user_pages.\n");
- while (get_npages)
- put_page(pages[--get_npages]);
- return -EFAULT;
- }
-
- return 0;
-}
-
-void exynos_gem_put_pages_to_userptr(struct page **pages,
- unsigned int npages,
- struct vm_area_struct *vma)
-{
- if (!vma_is_io(vma)) {
- unsigned int i;
-
- for (i = 0; i < npages; i++) {
- set_page_dirty_lock(pages[i]);
-
- /*
- * undo the reference we took when populating
- * the table.
- */
- put_page(pages[i]);
- }
- }
-}
-
int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
struct sg_table *sgt,
enum dma_data_direction dir)
@@ -503,15 +394,6 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
void exynos_drm_gem_free_object(struct drm_gem_object *obj)
{
- struct exynos_drm_gem_obj *exynos_gem_obj;
- struct exynos_drm_gem_buf *buf;
-
- exynos_gem_obj = to_exynos_gem_obj(obj);
- buf = exynos_gem_obj->buffer;
-
- if (obj->import_attach)
- drm_prime_gem_destroy(obj, buf->sgt);
-
exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
}
@@ -595,24 +477,34 @@ unlock:
int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
- struct drm_device *dev = obj->dev;
- unsigned long f_vaddr;
+ struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ unsigned long pfn;
pgoff_t page_offset;
int ret;
page_offset = ((unsigned long)vmf->virtual_address -
vma->vm_start) >> PAGE_SHIFT;
- f_vaddr = (unsigned long)vmf->virtual_address;
-
- mutex_lock(&dev->struct_mutex);
- ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
- if (ret < 0)
- DRM_ERROR("failed to map a buffer with user.\n");
+ if (page_offset >= (exynos_gem_obj->size >> PAGE_SHIFT)) {
+ DRM_ERROR("invalid page offset\n");
+ ret = -EINVAL;
+ goto out;
+ }
- mutex_unlock(&dev->struct_mutex);
+ pfn = page_to_pfn(exynos_gem_obj->pages[page_offset]);
+ ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
- return convert_to_vm_err_msg(ret);
+out:
+ switch (ret) {
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
}
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -631,11 +523,17 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
obj = vma->vm_private_data;
exynos_gem_obj = to_exynos_gem_obj(obj);
- ret = check_gem_flags(exynos_gem_obj->flags);
- if (ret)
- goto err_close_vm;
+ DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem_obj->flags);
- update_vm_cache_attr(exynos_gem_obj, vma);
+ /* non-cachable as default. */
+ if (exynos_gem_obj->flags & EXYNOS_BO_CACHABLE)
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ else if (exynos_gem_obj->flags & EXYNOS_BO_WC)
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ else
+ vma->vm_page_prot =
+ pgprot_noncached(vm_get_page_prot(vma->vm_flags));
ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
if (ret)
@@ -649,3 +547,76 @@ err_close_vm:
return ret;
}
+
+/* low-level interface prime helpers */
+struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ int npages;
+
+ npages = exynos_gem_obj->size >> PAGE_SHIFT;
+
+ return drm_prime_pages_to_sg(exynos_gem_obj->pages, npages);
+}
+
+struct drm_gem_object *
+exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ int npages;
+ int ret;
+
+ exynos_gem_obj = exynos_drm_gem_init(dev, attach->dmabuf->size);
+ if (IS_ERR(exynos_gem_obj)) {
+ ret = PTR_ERR(exynos_gem_obj);
+ return ERR_PTR(ret);
+ }
+
+ exynos_gem_obj->dma_addr = sg_dma_address(sgt->sgl);
+
+ npages = exynos_gem_obj->size >> PAGE_SHIFT;
+ exynos_gem_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (!exynos_gem_obj->pages) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem_obj->pages, NULL,
+ npages);
+ if (ret < 0)
+ goto err_free_large;
+
+ if (sgt->nents == 1) {
+ /* always physically continuous memory if sgt->nents is 1. */
+ exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
+ } else {
+ /*
+ * this case could be CONTIG or NONCONTIG type but for now
+ * sets NONCONTIG.
+ * TODO. we have to find a way that exporter can notify
+ * the type of its own buffer to importer.
+ */
+ exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
+ }
+
+ return &exynos_gem_obj->base;
+
+err_free_large:
+ drm_free_large(exynos_gem_obj->pages);
+err:
+ drm_gem_object_release(&exynos_gem_obj->base);
+ kfree(exynos_gem_obj);
+ return ERR_PTR(ret);
+}
+
+void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
+{
+ return NULL;
+}
+
+void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ /* Nothing to do */
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 6f42e2248288..cd62f8410d1e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -20,35 +20,6 @@
#define IS_NONCONTIG_BUFFER(f) (f & EXYNOS_BO_NONCONTIG)
/*
- * exynos drm gem buffer structure.
- *
- * @cookie: cookie returned by dma_alloc_attrs
- * @kvaddr: kernel virtual address to allocated memory region.
- * *userptr: user space address.
- * @dma_addr: bus address(accessed by dma) to allocated memory region.
- * - this address could be physical address without IOMMU and
- * device address with IOMMU.
- * @write: whether pages will be written to by the caller.
- * @pages: Array of backing pages.
- * @sgt: sg table to transfer page data.
- * @size: size of allocated memory region.
- * @pfnmap: indicate whether memory region from userptr is mmaped with
- * VM_PFNMAP or not.
- */
-struct exynos_drm_gem_buf {
- void *cookie;
- void __iomem *kvaddr;
- unsigned long userptr;
- dma_addr_t dma_addr;
- struct dma_attrs dma_attrs;
- unsigned int write;
- struct page **pages;
- struct sg_table *sgt;
- unsigned long size;
- bool pfnmap;
-};
-
-/*
* exynos drm buffer structure.
*
* @base: a gem object.
@@ -59,18 +30,28 @@ struct exynos_drm_gem_buf {
* by user request or at framebuffer creation.
* continuous memory region allocated by user request
* or at framebuffer creation.
+ * @flags: indicate memory type to allocated buffer and cache attruibute.
* @size: size requested from user, in bytes and this size is aligned
* in page unit.
- * @flags: indicate memory type to allocated buffer and cache attruibute.
+ * @cookie: cookie returned by dma_alloc_attrs
+ * @kvaddr: kernel virtual address to allocated memory region.
+ * @dma_addr: bus address(accessed by dma) to allocated memory region.
+ * - this address could be physical address without IOMMU and
+ * device address with IOMMU.
+ * @pages: Array of backing pages.
*
* P.S. this object would be transferred to user as kms_bo.handle so
* user can access the buffer through kms_bo.handle.
*/
struct exynos_drm_gem_obj {
- struct drm_gem_object base;
- struct exynos_drm_gem_buf *buffer;
- unsigned long size;
- unsigned int flags;
+ struct drm_gem_object base;
+ unsigned int flags;
+ unsigned long size;
+ void *cookie;
+ void __iomem *kvaddr;
+ dma_addr_t dma_addr;
+ struct dma_attrs dma_attrs;
+ struct page **pages;
};
struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
@@ -177,4 +158,13 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
struct sg_table *sgt,
enum dma_data_direction dir);
+/* low-level interface prime helpers */
+struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *
+exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj);
+void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 8040ed2a831f..808a0a013780 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -582,9 +582,17 @@ static int gsc_src_set_transf(struct device *dev,
break;
case EXYNOS_DRM_DEGREE_180:
cfg |= GSC_IN_ROT_180;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~GSC_IN_ROT_YFLIP;
break;
case EXYNOS_DRM_DEGREE_270:
cfg |= GSC_IN_ROT_270;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~GSC_IN_ROT_YFLIP;
break;
default:
dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
@@ -593,8 +601,7 @@ static int gsc_src_set_transf(struct device *dev,
gsc_write(cfg, GSC_IN_CON);
- ctx->rotation = cfg &
- (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
*swap = ctx->rotation;
return 0;
@@ -846,9 +853,17 @@ static int gsc_dst_set_transf(struct device *dev,
break;
case EXYNOS_DRM_DEGREE_180:
cfg |= GSC_IN_ROT_180;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~GSC_IN_ROT_YFLIP;
break;
case EXYNOS_DRM_DEGREE_270:
cfg |= GSC_IN_ROT_270;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~GSC_IN_ROT_YFLIP;
break;
default:
dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
@@ -857,8 +872,7 @@ static int gsc_dst_set_transf(struct device *dev,
gsc_write(cfg, GSC_IN_CON);
- ctx->rotation = cfg &
- (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
*swap = ctx->rotation;
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index d4ec7465e9cc..055e8ec2ef21 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -87,10 +87,8 @@ int drm_iommu_attach_device(struct drm_device *drm_dev,
struct device *dev = drm_dev->dev;
int ret;
- if (!dev->archdata.mapping) {
- DRM_ERROR("iommu_mapping is null.\n");
- return -EFAULT;
- }
+ if (!dev->archdata.mapping)
+ return 0;
subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
sizeof(*subdrv_dev->dma_parms),
@@ -144,17 +142,3 @@ void drm_iommu_detach_device(struct drm_device *drm_dev,
iommu_detach_device(mapping->domain, subdrv_dev);
drm_release_iommu_mapping(drm_dev);
}
-
-int drm_iommu_attach_device_if_possible(struct exynos_drm_crtc *exynos_crtc,
- struct drm_device *drm_dev, struct device *subdrv_dev)
-{
- int ret = 0;
-
- if (is_drm_iommu_supported(drm_dev)) {
- if (exynos_crtc->ops->clear_channels)
- exynos_crtc->ops->clear_channels(exynos_crtc);
- return drm_iommu_attach_device(drm_dev, subdrv_dev);
- }
-
- return ret;
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 8341c7a475b4..dc1b5441f491 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -29,19 +29,11 @@ void drm_iommu_detach_device(struct drm_device *dev_dev,
static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
{
-#ifdef CONFIG_ARM_DMA_USE_IOMMU
struct device *dev = drm_dev->dev;
return dev->archdata.mapping ? true : false;
-#else
- return false;
-#endif
}
-int drm_iommu_attach_device_if_possible(
- struct exynos_drm_crtc *exynos_crtc, struct drm_device *drm_dev,
- struct device *subdrv_dev);
-
#else
static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
@@ -69,12 +61,5 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
return false;
}
-static inline int drm_iommu_attach_device_if_possible(
- struct exynos_drm_crtc *exynos_crtc, struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
- return 0;
-}
-
#endif
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 67e5451e066f..67d24236e745 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -1622,12 +1622,10 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
INIT_LIST_HEAD(&ippdrv->cmd_list);
mutex_init(&ippdrv->cmd_lock);
- if (is_drm_iommu_supported(drm_dev)) {
- ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
- if (ret) {
- DRM_ERROR("failed to activate iommu\n");
- goto err;
- }
+ ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
+ if (ret) {
+ DRM_ERROR("failed to activate iommu\n");
+ goto err;
}
}
@@ -1637,8 +1635,7 @@ err:
/* get ipp driver entry */
list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
drv_list) {
- if (is_drm_iommu_supported(drm_dev))
- drm_iommu_detach_device(drm_dev, ippdrv->dev);
+ drm_iommu_detach_device(drm_dev, ippdrv->dev);
ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
ippdrv->prop_list.ipp_id);
@@ -1654,8 +1651,7 @@ static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
/* get ipp driver entry */
list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) {
- if (is_drm_iommu_supported(drm_dev))
- drm_iommu_detach_device(drm_dev, ippdrv->dev);
+ drm_iommu_detach_device(drm_dev, ippdrv->dev);
ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
ippdrv->prop_list.ipp_id);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index a729980d3c2f..714822441467 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -20,12 +20,6 @@
#include "exynos_drm_gem.h"
#include "exynos_drm_plane.h"
-static const uint32_t formats[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_NV12,
-};
-
/*
* This function is to get X or Y size shown via screen. This needs length and
* start position of CRTC.
@@ -97,29 +91,18 @@ static void exynos_plane_mode_set(struct drm_plane *plane,
/* set drm framebuffer data. */
exynos_plane->src_x = src_x;
exynos_plane->src_y = src_y;
- exynos_plane->src_width = (actual_w * exynos_plane->h_ratio) >> 16;
- exynos_plane->src_height = (actual_h * exynos_plane->v_ratio) >> 16;
- exynos_plane->fb_width = fb->width;
- exynos_plane->fb_height = fb->height;
- exynos_plane->bpp = fb->bits_per_pixel;
- exynos_plane->pitch = fb->pitches[0];
- exynos_plane->pixel_format = fb->pixel_format;
+ exynos_plane->src_w = (actual_w * exynos_plane->h_ratio) >> 16;
+ exynos_plane->src_h = (actual_h * exynos_plane->v_ratio) >> 16;
/* set plane range to be displayed. */
exynos_plane->crtc_x = crtc_x;
exynos_plane->crtc_y = crtc_y;
- exynos_plane->crtc_width = actual_w;
- exynos_plane->crtc_height = actual_h;
-
- /* set drm mode data. */
- exynos_plane->mode_width = mode->hdisplay;
- exynos_plane->mode_height = mode->vdisplay;
- exynos_plane->refresh = mode->vrefresh;
- exynos_plane->scan_flag = mode->flags;
+ exynos_plane->crtc_w = actual_w;
+ exynos_plane->crtc_h = actual_h;
DRM_DEBUG_KMS("plane : offset_x/y(%d,%d), width/height(%d,%d)",
exynos_plane->crtc_x, exynos_plane->crtc_y,
- exynos_plane->crtc_width, exynos_plane->crtc_height);
+ exynos_plane->crtc_w, exynos_plane->crtc_h);
plane->crtc = crtc;
}
@@ -143,17 +126,17 @@ static int exynos_plane_atomic_check(struct drm_plane *plane,
if (!state->fb)
return 0;
- nr = exynos_drm_fb_get_buf_cnt(state->fb);
+ nr = drm_format_num_planes(state->fb->pixel_format);
for (i = 0; i < nr; i++) {
- struct exynos_drm_gem_buf *buffer =
- exynos_drm_fb_buffer(state->fb, i);
+ struct exynos_drm_gem_obj *obj =
+ exynos_drm_fb_gem_obj(state->fb, i);
- if (!buffer) {
- DRM_DEBUG_KMS("buffer is null\n");
+ if (!obj) {
+ DRM_DEBUG_KMS("gem object is null\n");
return -EFAULT;
}
- exynos_plane->dma_addr[i] = buffer->dma_addr +
+ exynos_plane->dma_addr[i] = obj->dma_addr +
state->fb->offsets[i];
DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
@@ -179,8 +162,10 @@ static void exynos_plane_atomic_update(struct drm_plane *plane,
state->src_x >> 16, state->src_y >> 16,
state->src_w >> 16, state->src_h >> 16);
- if (exynos_crtc->ops->win_commit)
- exynos_crtc->ops->win_commit(exynos_crtc, exynos_plane->zpos);
+ exynos_plane->pending_fb = state->fb;
+
+ if (exynos_crtc->ops->update_plane)
+ exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane);
}
static void exynos_plane_atomic_disable(struct drm_plane *plane,
@@ -192,9 +177,9 @@ static void exynos_plane_atomic_disable(struct drm_plane *plane,
if (!old_state->crtc)
return;
- if (exynos_crtc->ops->win_disable)
- exynos_crtc->ops->win_disable(exynos_crtc,
- exynos_plane->zpos);
+ if (exynos_crtc->ops->disable_plane)
+ exynos_crtc->ops->disable_plane(exynos_crtc,
+ exynos_plane);
}
static const struct drm_plane_helper_funcs plane_helper_funcs = {
@@ -226,13 +211,14 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane,
int exynos_plane_init(struct drm_device *dev,
struct exynos_drm_plane *exynos_plane,
unsigned long possible_crtcs, enum drm_plane_type type,
+ const uint32_t *formats, unsigned int fcount,
unsigned int zpos)
{
int err;
err = drm_universal_plane_init(dev, &exynos_plane->base, possible_crtcs,
- &exynos_plane_funcs, formats,
- ARRAY_SIZE(formats), type);
+ &exynos_plane_funcs, formats, fcount,
+ type);
if (err) {
DRM_ERROR("failed to initialize plane\n");
return err;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index 8c88ae983c38..476c9340b591 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -12,4 +12,5 @@
int exynos_plane_init(struct drm_device *dev,
struct exynos_drm_plane *exynos_plane,
unsigned long possible_crtcs, enum drm_plane_type type,
+ const uint32_t *formats, unsigned int fcount,
unsigned int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 3413393d8a16..75718e1bc3dd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -25,7 +25,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_plane.h"
-#include "exynos_drm_encoder.h"
#include "exynos_drm_vidi.h"
/* vidi has totally three virtual windows. */
@@ -35,11 +34,10 @@
connector)
struct vidi_context {
- struct exynos_drm_display display;
+ struct drm_encoder encoder;
struct platform_device *pdev;
struct drm_device *drm_dev;
struct exynos_drm_crtc *crtc;
- struct drm_encoder *encoder;
struct drm_connector connector;
struct exynos_drm_plane planes[WINDOWS_NR];
struct edid *raw_edid;
@@ -55,9 +53,9 @@ struct vidi_context {
int pipe;
};
-static inline struct vidi_context *display_to_vidi(struct exynos_drm_display *d)
+static inline struct vidi_context *encoder_to_vidi(struct drm_encoder *e)
{
- return container_of(d, struct vidi_context, display);
+ return container_of(e, struct vidi_context, encoder);
}
static const char fake_edid_info[] = {
@@ -85,6 +83,12 @@ static const char fake_edid_info[] = {
0x00, 0x00, 0x00, 0x06
};
+static const uint32_t formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_NV12,
+};
+
static int vidi_enable_vblank(struct exynos_drm_crtc *crtc)
{
struct vidi_context *ctx = crtc->ctx;
@@ -100,7 +104,7 @@ static int vidi_enable_vblank(struct exynos_drm_crtc *crtc)
/*
* in case of page flip request, vidi_finish_pageflip function
* will not be called because direct_vblank is true and then
- * that function will be called by crtc_ops->win_commit callback
+ * that function will be called by crtc_ops->update_plane callback
*/
schedule_work(&ctx->work);
@@ -118,19 +122,14 @@ static void vidi_disable_vblank(struct exynos_drm_crtc *crtc)
ctx->vblank_on = false;
}
-static void vidi_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void vidi_update_plane(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
struct vidi_context *ctx = crtc->ctx;
- struct exynos_drm_plane *plane;
if (ctx->suspended)
return;
- if (win < 0 || win >= WINDOWS_NR)
- return;
-
- plane = &ctx->planes[win];
-
DRM_DEBUG_KMS("dma_addr = %pad\n", plane->dma_addr);
if (ctx->vblank_on)
@@ -179,13 +178,14 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
.disable = vidi_disable,
.enable_vblank = vidi_enable_vblank,
.disable_vblank = vidi_disable_vblank,
- .win_commit = vidi_win_commit,
+ .update_plane = vidi_update_plane,
};
static void vidi_fake_vblank_handler(struct work_struct *work)
{
struct vidi_context *ctx = container_of(work, struct vidi_context,
work);
+ int win;
if (ctx->pipe < 0)
return;
@@ -196,7 +196,7 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
mutex_lock(&ctx->lock);
if (ctx->direct_vblank) {
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ drm_crtc_handle_vblank(&ctx->crtc->base);
ctx->direct_vblank = false;
mutex_unlock(&ctx->lock);
return;
@@ -204,7 +204,14 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
mutex_unlock(&ctx->lock);
- exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+ for (win = 0 ; win < WINDOWS_NR ; win++) {
+ struct exynos_drm_plane *plane = &ctx->planes[win];
+
+ if (!plane->pending_fb)
+ continue;
+
+ exynos_drm_crtc_finish_update(ctx->crtc, plane);
+ }
}
static int vidi_show_connection(struct device *dev,
@@ -259,9 +266,7 @@ static DEVICE_ATTR(connection, 0644, vidi_show_connection,
int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file_priv)
{
- struct vidi_context *ctx = NULL;
- struct drm_encoder *encoder;
- struct exynos_drm_display *display;
+ struct vidi_context *ctx = dev_get_drvdata(drm_dev->dev);
struct drm_exynos_vidi_connection *vidi = data;
if (!vidi) {
@@ -274,21 +279,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
return -EINVAL;
}
- list_for_each_entry(encoder, &drm_dev->mode_config.encoder_list,
- head) {
- display = exynos_drm_get_display(encoder);
-
- if (display->type == EXYNOS_DISPLAY_TYPE_VIDI) {
- ctx = display_to_vidi(display);
- break;
- }
- }
-
- if (!ctx) {
- DRM_DEBUG_KMS("not found virtual device type encoder.\n");
- return -EINVAL;
- }
-
if (ctx->connected == vidi->connection) {
DRM_DEBUG_KMS("same connection request.\n");
return -EINVAL;
@@ -381,7 +371,7 @@ static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
{
struct vidi_context *ctx = ctx_from_connector(connector);
- return ctx->encoder;
+ return &ctx->encoder;
}
static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
@@ -389,14 +379,12 @@ static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
.best_encoder = vidi_best_encoder,
};
-static int vidi_create_connector(struct exynos_drm_display *display,
- struct drm_encoder *encoder)
+static int vidi_create_connector(struct drm_encoder *encoder)
{
- struct vidi_context *ctx = display_to_vidi(display);
+ struct vidi_context *ctx = encoder_to_vidi(encoder);
struct drm_connector *connector = &ctx->connector;
int ret;
- ctx->encoder = encoder;
connector->polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(ctx->drm_dev, connector,
@@ -413,19 +401,47 @@ static int vidi_create_connector(struct exynos_drm_display *display,
return 0;
}
+static bool exynos_vidi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void exynos_vidi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void exynos_vidi_enable(struct drm_encoder *encoder)
+{
+}
+
+static void exynos_vidi_disable(struct drm_encoder *encoder)
+{
+}
-static struct exynos_drm_display_ops vidi_display_ops = {
- .create_connector = vidi_create_connector,
+static struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs = {
+ .mode_fixup = exynos_vidi_mode_fixup,
+ .mode_set = exynos_vidi_mode_set,
+ .enable = exynos_vidi_enable,
+ .disable = exynos_vidi_disable,
+};
+
+static struct drm_encoder_funcs exynos_vidi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
};
static int vidi_bind(struct device *dev, struct device *master, void *data)
{
struct vidi_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
+ struct drm_encoder *encoder = &ctx->encoder;
struct exynos_drm_plane *exynos_plane;
enum drm_plane_type type;
unsigned int zpos;
- int ret;
+ int pipe, ret;
vidi_ctx_initialize(ctx, drm_dev);
@@ -433,7 +449,8 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
DRM_PLANE_TYPE_OVERLAY;
ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
- 1 << ctx->pipe, type, zpos);
+ 1 << ctx->pipe, type, formats,
+ ARRAY_SIZE(formats), zpos);
if (ret)
return ret;
}
@@ -447,9 +464,24 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(ctx->crtc);
}
- ret = exynos_drm_create_enc_conn(drm_dev, &ctx->display);
+ pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+ EXYNOS_DISPLAY_TYPE_VIDI);
+ if (pipe < 0)
+ return pipe;
+
+ encoder->possible_crtcs = 1 << pipe;
+
+ DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+ drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs);
+
+ ret = vidi_create_connector(encoder);
if (ret) {
- ctx->crtc->base.funcs->destroy(&ctx->crtc->base);
+ DRM_ERROR("failed to create connector ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
return ret;
}
@@ -475,8 +507,6 @@ static int vidi_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- ctx->display.type = EXYNOS_DISPLAY_TYPE_VIDI;
- ctx->display.ops = &vidi_display_ops;
ctx->default_win = 0;
ctx->pdev = pdev;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 99e286489031..932f7fa240f8 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -22,7 +22,6 @@
#include "regs-hdmi.h"
#include <linux/kernel.h>
-#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
@@ -33,8 +32,8 @@
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/io.h>
-#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/hdmi.h>
#include <linux/component.h>
@@ -48,7 +47,6 @@
#include "exynos_mixer.h"
#include <linux/gpio.h>
-#include <media/s5p_hdmi.h>
#define ctx_from_connector(c) container_of(c, struct hdmi_context, connector)
@@ -88,109 +86,14 @@ struct hdmi_resources {
int regul_count;
};
-struct hdmi_tg_regs {
- u8 cmd[1];
- u8 h_fsz[2];
- u8 hact_st[2];
- u8 hact_sz[2];
- u8 v_fsz[2];
- u8 vsync[2];
- u8 vsync2[2];
- u8 vact_st[2];
- u8 vact_sz[2];
- u8 field_chg[2];
- u8 vact_st2[2];
- u8 vact_st3[2];
- u8 vact_st4[2];
- u8 vsync_top_hdmi[2];
- u8 vsync_bot_hdmi[2];
- u8 field_top_hdmi[2];
- u8 field_bot_hdmi[2];
- u8 tg_3d[1];
-};
-
-struct hdmi_v13_core_regs {
- u8 h_blank[2];
- u8 v_blank[3];
- u8 h_v_line[3];
- u8 vsync_pol[1];
- u8 int_pro_mode[1];
- u8 v_blank_f[3];
- u8 h_sync_gen[3];
- u8 v_sync_gen1[3];
- u8 v_sync_gen2[3];
- u8 v_sync_gen3[3];
-};
-
-struct hdmi_v14_core_regs {
- u8 h_blank[2];
- u8 v2_blank[2];
- u8 v1_blank[2];
- u8 v_line[2];
- u8 h_line[2];
- u8 hsync_pol[1];
- u8 vsync_pol[1];
- u8 int_pro_mode[1];
- u8 v_blank_f0[2];
- u8 v_blank_f1[2];
- u8 h_sync_start[2];
- u8 h_sync_end[2];
- u8 v_sync_line_bef_2[2];
- u8 v_sync_line_bef_1[2];
- u8 v_sync_line_aft_2[2];
- u8 v_sync_line_aft_1[2];
- u8 v_sync_line_aft_pxl_2[2];
- u8 v_sync_line_aft_pxl_1[2];
- u8 v_blank_f2[2]; /* for 3D mode */
- u8 v_blank_f3[2]; /* for 3D mode */
- u8 v_blank_f4[2]; /* for 3D mode */
- u8 v_blank_f5[2]; /* for 3D mode */
- u8 v_sync_line_aft_3[2];
- u8 v_sync_line_aft_4[2];
- u8 v_sync_line_aft_5[2];
- u8 v_sync_line_aft_6[2];
- u8 v_sync_line_aft_pxl_3[2];
- u8 v_sync_line_aft_pxl_4[2];
- u8 v_sync_line_aft_pxl_5[2];
- u8 v_sync_line_aft_pxl_6[2];
- u8 vact_space_1[2];
- u8 vact_space_2[2];
- u8 vact_space_3[2];
- u8 vact_space_4[2];
- u8 vact_space_5[2];
- u8 vact_space_6[2];
-};
-
-struct hdmi_v13_conf {
- struct hdmi_v13_core_regs core;
- struct hdmi_tg_regs tg;
-};
-
-struct hdmi_v14_conf {
- struct hdmi_v14_core_regs core;
- struct hdmi_tg_regs tg;
-};
-
-struct hdmi_conf_regs {
- int pixel_clock;
- int cea_video_id;
- enum hdmi_picture_aspect aspect_ratio;
- union {
- struct hdmi_v13_conf v13_conf;
- struct hdmi_v14_conf v14_conf;
- } conf;
-};
-
struct hdmi_context {
- struct exynos_drm_display display;
+ struct drm_encoder encoder;
struct device *dev;
struct drm_device *drm_dev;
struct drm_connector connector;
- struct drm_encoder *encoder;
bool hpd;
bool powered;
bool dvi_mode;
- struct mutex hdmi_mutex;
void __iomem *regs;
int irq;
@@ -201,22 +104,20 @@ struct hdmi_context {
/* current hdmiphy conf regs */
struct drm_display_mode current_mode;
- struct hdmi_conf_regs mode_conf;
+ u8 cea_video_id;
struct hdmi_resources res;
+ const struct hdmi_driver_data *drv_data;
int hpd_gpio;
void __iomem *regs_hdmiphy;
- const struct hdmiphy_config *phy_confs;
- unsigned int phy_conf_count;
struct regmap *pmureg;
- enum hdmi_type type;
};
-static inline struct hdmi_context *display_to_hdmi(struct exynos_drm_display *d)
+static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e)
{
- return container_of(d, struct hdmi_context, display);
+ return container_of(e, struct hdmi_context, encoder);
}
struct hdmiphy_config {
@@ -624,6 +525,16 @@ static inline void hdmi_reg_writeb(struct hdmi_context *hdata,
writeb(value, hdata->regs + reg_id);
}
+static inline void hdmi_reg_writev(struct hdmi_context *hdata, u32 reg_id,
+ int bytes, u32 val)
+{
+ while (--bytes >= 0) {
+ writeb(val & 0xff, hdata->regs + reg_id);
+ val >>= 8;
+ reg_id += 4;
+ }
+}
+
static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
u32 reg_id, u32 value, u32 mask)
{
@@ -930,7 +841,7 @@ static void hdmi_v14_regs_dump(struct hdmi_context *hdata, char *prefix)
static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
{
- if (hdata->type == HDMI_TYPE13)
+ if (hdata->drv_data->type == HDMI_TYPE13)
hdmi_v13_regs_dump(hdata, prefix);
else
hdmi_v14_regs_dump(hdata, prefix);
@@ -957,7 +868,7 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
u32 hdr_sum;
u8 chksum;
u32 mod;
- u32 vic;
+ u8 ar;
mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
if (hdata->dvi_mode) {
@@ -988,27 +899,22 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
* Set the aspect ratio as per the mode, mentioned in
* Table 9 AVI InfoFrame Data Byte 2 of CEA-861-D Standard
*/
- switch (hdata->mode_conf.aspect_ratio) {
+ ar = hdata->current_mode.picture_aspect_ratio;
+ switch (ar) {
case HDMI_PICTURE_ASPECT_4_3:
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
- hdata->mode_conf.aspect_ratio |
- AVI_4_3_CENTER_RATIO);
+ ar |= AVI_4_3_CENTER_RATIO;
break;
case HDMI_PICTURE_ASPECT_16_9:
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
- hdata->mode_conf.aspect_ratio |
- AVI_16_9_CENTER_RATIO);
+ ar |= AVI_16_9_CENTER_RATIO;
break;
case HDMI_PICTURE_ASPECT_NONE:
default:
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
- hdata->mode_conf.aspect_ratio |
- AVI_SAME_AS_PIC_ASPECT_RATIO);
+ ar |= AVI_SAME_AS_PIC_ASPECT_RATIO;
break;
}
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), ar);
- vic = hdata->mode_conf.cea_video_id;
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), hdata->cea_video_id);
chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
infoframe->any.length, hdr_sum);
@@ -1038,10 +944,10 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
{
struct hdmi_context *hdata = ctx_from_connector(connector);
- hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+ if (gpio_get_value(hdata->hpd_gpio))
+ return connector_status_connected;
- return hdata->hpd ? connector_status_connected :
- connector_status_disconnected;
+ return connector_status_disconnected;
}
static void hdmi_connector_destroy(struct drm_connector *connector)
@@ -1064,6 +970,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
{
struct hdmi_context *hdata = ctx_from_connector(connector);
struct edid *edid;
+ int ret;
if (!hdata->ddc_adpt)
return -ENODEV;
@@ -1079,15 +986,19 @@ static int hdmi_get_modes(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid);
- return drm_add_edid_modes(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+
+ kfree(edid);
+
+ return ret;
}
static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
{
int i;
- for (i = 0; i < hdata->phy_conf_count; i++)
- if (hdata->phy_confs[i].pixel_clock == pixel_clock)
+ for (i = 0; i < hdata->drv_data->phy_conf_count; i++)
+ if (hdata->drv_data->phy_confs[i].pixel_clock == pixel_clock)
return i;
DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
@@ -1120,7 +1031,7 @@ static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector)
{
struct hdmi_context *hdata = ctx_from_connector(connector);
- return hdata->encoder;
+ return &hdata->encoder;
}
static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
@@ -1129,14 +1040,12 @@ static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
.best_encoder = hdmi_best_encoder,
};
-static int hdmi_create_connector(struct exynos_drm_display *display,
- struct drm_encoder *encoder)
+static int hdmi_create_connector(struct drm_encoder *encoder)
{
- struct hdmi_context *hdata = display_to_hdmi(display);
+ struct hdmi_context *hdata = encoder_to_hdmi(encoder);
struct drm_connector *connector = &hdata->connector;
int ret;
- hdata->encoder = encoder;
connector->interlace_allowed = true;
connector->polled = DRM_CONNECTOR_POLL_HPD;
@@ -1154,23 +1063,30 @@ static int hdmi_create_connector(struct exynos_drm_display *display,
return 0;
}
-static void hdmi_mode_fixup(struct exynos_drm_display *display,
- struct drm_connector *connector,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool hdmi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector;
struct drm_display_mode *m;
int mode_ok;
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
drm_mode_set_crtcinfo(adjusted_mode, 0);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ break;
+ }
+
+ if (connector->encoder != encoder)
+ return true;
+
mode_ok = hdmi_mode_valid(connector, adjusted_mode);
/* just return if user desired mode exists. */
if (mode_ok == MODE_OK)
- return;
+ return true;
/*
* otherwise, find the most suitable mode among modes and change it
@@ -1190,6 +1106,8 @@ static void hdmi_mode_fixup(struct exynos_drm_display *display,
break;
}
}
+
+ return true;
}
static void hdmi_set_acr(u32 freq, u8 *acr)
@@ -1252,7 +1170,7 @@ static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr)
hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]);
hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]);
- if (hdata->type == HDMI_TYPE13)
+ if (hdata->drv_data->type == HDMI_TYPE13)
hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4);
else
hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4);
@@ -1386,7 +1304,7 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS);
}
- if (hdata->type == HDMI_TYPE13) {
+ if (hdata->drv_data->type == HDMI_TYPE13) {
/* choose bluescreen (fecal) color */
hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12);
hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34);
@@ -1419,66 +1337,94 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
{
- const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
- const struct hdmi_v13_core_regs *core =
- &hdata->mode_conf.conf.v13_conf.core;
+ struct drm_display_mode *m = &hdata->current_mode;
+ unsigned int val;
int tries;
- /* setting core registers */
- hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
- hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_0, core->v_blank[0]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_1, core->v_blank[1]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_2, core->v_blank[2]);
- hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_0, core->h_v_line[0]);
- hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_1, core->h_v_line[1]);
- hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_2, core->h_v_line[2]);
- hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
- hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_0, core->v_blank_f[0]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_1, core->v_blank_f[1]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_2, core->v_blank_f[2]);
- hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_0, core->h_sync_gen[0]);
- hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_1, core->h_sync_gen[1]);
- hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_2, core->h_sync_gen[2]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
- hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
+ hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay);
+ hdmi_reg_writev(hdata, HDMI_V13_H_V_LINE_0, 3,
+ (m->htotal << 12) | m->vtotal);
+
+ val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+ hdmi_reg_writev(hdata, HDMI_VSYNC_POL, 1, val);
+
+ val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0;
+ hdmi_reg_writev(hdata, HDMI_INT_PRO_MODE, 1, val);
+
+ val = (m->hsync_start - m->hdisplay - 2);
+ val |= ((m->hsync_end - m->hdisplay - 2) << 10);
+ val |= ((m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0)<<20;
+ hdmi_reg_writev(hdata, HDMI_V13_H_SYNC_GEN_0, 3, val);
+
+ /*
+ * Quirk requirement for exynos HDMI IP design,
+ * 2 pixels less than the actual calculation for hsync_start
+ * and end.
+ */
+
+ /* Following values & calculations differ for different type of modes */
+ if (m->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* Interlaced Mode */
+ val = ((m->vsync_end - m->vdisplay) / 2);
+ val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
+ hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_1_0, 3, val);
+
+ val = m->vtotal / 2;
+ val |= ((m->vtotal - m->vdisplay) / 2) << 11;
+ hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_0, 3, val);
+
+ val = (m->vtotal +
+ ((m->vsync_end - m->vsync_start) * 4) + 5) / 2;
+ val |= m->vtotal << 11;
+ hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_F_0, 3, val);
+
+ val = ((m->vtotal / 2) + 7);
+ val |= ((m->vtotal / 2) + 2) << 12;
+ hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_2_0, 3, val);
+
+ val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay));
+ val |= ((m->htotal / 2) +
+ (m->hsync_start - m->hdisplay)) << 12;
+ hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_3_0, 3, val);
+
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
+ (m->vtotal - m->vdisplay) / 2);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay / 2);
+
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x249);
+ } else {
+ /* Progressive Mode */
+
+ val = m->vtotal;
+ val |= (m->vtotal - m->vdisplay) << 11;
+ hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_0, 3, val);
+
+ hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_F_0, 3, 0);
+
+ val = (m->vsync_end - m->vdisplay);
+ val |= ((m->vsync_start - m->vdisplay) << 12);
+ hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_1_0, 3, val);
+
+ hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_2_0, 3, 0x1001);
+ hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_3_0, 3, 0x1001);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
+ m->vtotal - m->vdisplay);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x248);
+ }
+
/* Timing generator registers */
- hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
+ hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
+ hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
+ hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
+ hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
+ hdmi_reg_writev(hdata, HDMI_TG_VSYNC_L, 2, 0x1);
+ hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, 0x233);
+ hdmi_reg_writev(hdata, HDMI_TG_FIELD_CHG_L, 2, 0x233);
+ hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1);
+ hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233);
+ hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1);
+ hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233);
/* waiting for HDMIPHY's PLL to get to steady state */
for (tries = 100; tries; --tries) {
@@ -1503,144 +1449,119 @@ static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
{
- const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
- const struct hdmi_v14_core_regs *core =
- &hdata->mode_conf.conf.v14_conf.core;
+ struct drm_display_mode *m = &hdata->current_mode;
int tries;
- /* setting core registers */
- hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
- hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
- hdmi_reg_writeb(hdata, HDMI_V2_BLANK_0, core->v2_blank[0]);
- hdmi_reg_writeb(hdata, HDMI_V2_BLANK_1, core->v2_blank[1]);
- hdmi_reg_writeb(hdata, HDMI_V1_BLANK_0, core->v1_blank[0]);
- hdmi_reg_writeb(hdata, HDMI_V1_BLANK_1, core->v1_blank[1]);
- hdmi_reg_writeb(hdata, HDMI_V_LINE_0, core->v_line[0]);
- hdmi_reg_writeb(hdata, HDMI_V_LINE_1, core->v_line[1]);
- hdmi_reg_writeb(hdata, HDMI_H_LINE_0, core->h_line[0]);
- hdmi_reg_writeb(hdata, HDMI_H_LINE_1, core->h_line[1]);
- hdmi_reg_writeb(hdata, HDMI_HSYNC_POL, core->hsync_pol[0]);
- hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
- hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_0, core->v_blank_f0[0]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_1, core->v_blank_f0[1]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_0, core->v_blank_f1[0]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_1, core->v_blank_f1[1]);
- hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_0, core->h_sync_start[0]);
- hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_1, core->h_sync_start[1]);
- hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_0, core->h_sync_end[0]);
- hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_1, core->h_sync_end[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_0,
- core->v_sync_line_bef_2[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_1,
- core->v_sync_line_bef_2[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_0,
- core->v_sync_line_bef_1[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_1,
- core->v_sync_line_bef_1[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_0,
- core->v_sync_line_aft_2[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_1,
- core->v_sync_line_aft_2[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_0,
- core->v_sync_line_aft_1[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_1,
- core->v_sync_line_aft_1[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0,
- core->v_sync_line_aft_pxl_2[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_1,
- core->v_sync_line_aft_pxl_2[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0,
- core->v_sync_line_aft_pxl_1[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_1,
- core->v_sync_line_aft_pxl_1[1]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_0, core->v_blank_f2[0]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_1, core->v_blank_f2[1]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_0, core->v_blank_f3[0]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_1, core->v_blank_f3[1]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_0, core->v_blank_f4[0]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_1, core->v_blank_f4[1]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_0, core->v_blank_f5[0]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_1, core->v_blank_f5[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_0,
- core->v_sync_line_aft_3[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_1,
- core->v_sync_line_aft_3[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_0,
- core->v_sync_line_aft_4[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_1,
- core->v_sync_line_aft_4[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_0,
- core->v_sync_line_aft_5[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_1,
- core->v_sync_line_aft_5[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_0,
- core->v_sync_line_aft_6[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_1,
- core->v_sync_line_aft_6[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0,
- core->v_sync_line_aft_pxl_3[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_1,
- core->v_sync_line_aft_pxl_3[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0,
- core->v_sync_line_aft_pxl_4[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_1,
- core->v_sync_line_aft_pxl_4[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0,
- core->v_sync_line_aft_pxl_5[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_1,
- core->v_sync_line_aft_pxl_5[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0,
- core->v_sync_line_aft_pxl_6[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_1,
- core->v_sync_line_aft_pxl_6[1]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_0, core->vact_space_1[0]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_1, core->vact_space_1[1]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_0, core->vact_space_2[0]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_1, core->vact_space_2[1]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_0, core->vact_space_3[0]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_1, core->vact_space_3[1]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_0, core->vact_space_4[0]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_1, core->vact_space_4[1]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_0, core->vact_space_5[0]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_1, core->vact_space_5[1]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_0, core->vact_space_6[0]);
- hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]);
+ hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay);
+ hdmi_reg_writev(hdata, HDMI_V_LINE_0, 2, m->vtotal);
+ hdmi_reg_writev(hdata, HDMI_H_LINE_0, 2, m->htotal);
+ hdmi_reg_writev(hdata, HDMI_HSYNC_POL, 1,
+ (m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0);
+ hdmi_reg_writev(hdata, HDMI_VSYNC_POL, 1,
+ (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
+ hdmi_reg_writev(hdata, HDMI_INT_PRO_MODE, 1,
+ (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
+
+ /*
+ * Quirk requirement for exynos 5 HDMI IP design,
+ * 2 pixels less than the actual calculation for hsync_start
+ * and end.
+ */
+
+ /* Following values & calculations differ for different type of modes */
+ if (m->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* Interlaced Mode */
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2,
+ (m->vsync_end - m->vdisplay) / 2);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2,
+ (m->vsync_start - m->vdisplay) / 2);
+ hdmi_reg_writev(hdata, HDMI_V2_BLANK_0, 2, m->vtotal / 2);
+ hdmi_reg_writev(hdata, HDMI_V1_BLANK_0, 2,
+ (m->vtotal - m->vdisplay) / 2);
+ hdmi_reg_writev(hdata, HDMI_V_BLANK_F0_0, 2,
+ m->vtotal - m->vdisplay / 2);
+ hdmi_reg_writev(hdata, HDMI_V_BLANK_F1_0, 2, m->vtotal);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_2_0, 2,
+ (m->vtotal / 2) + 7);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_1_0, 2,
+ (m->vtotal / 2) + 2);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, 2,
+ (m->htotal / 2) + (m->hsync_start - m->hdisplay));
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, 2,
+ (m->htotal / 2) + (m->hsync_start - m->hdisplay));
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
+ (m->vtotal - m->vdisplay) / 2);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay / 2);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2,
+ m->vtotal - m->vdisplay / 2);
+ hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2,
+ (m->vtotal / 2) + 1);
+ hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2,
+ (m->vtotal / 2) + 1);
+ hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2,
+ (m->vtotal / 2) + 1);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x0);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x0);
+ } else {
+ /* Progressive Mode */
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2,
+ m->vsync_end - m->vdisplay);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2,
+ m->vsync_start - m->vdisplay);
+ hdmi_reg_writev(hdata, HDMI_V2_BLANK_0, 2, m->vtotal);
+ hdmi_reg_writev(hdata, HDMI_V1_BLANK_0, 2,
+ m->vtotal - m->vdisplay);
+ hdmi_reg_writev(hdata, HDMI_V_BLANK_F0_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_BLANK_F1_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_2_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_1_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
+ m->vtotal - m->vdisplay);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x248);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x47b);
+ hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x6ae);
+ hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, 0x233);
+ hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233);
+ hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233);
+ }
+
+ /* Following values & calculations are same irrespective of mode type */
+ hdmi_reg_writev(hdata, HDMI_H_SYNC_START_0, 2,
+ m->hsync_start - m->hdisplay - 2);
+ hdmi_reg_writev(hdata, HDMI_H_SYNC_END_0, 2,
+ m->hsync_end - m->hdisplay - 2);
+ hdmi_reg_writev(hdata, HDMI_VACT_SPACE_1_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_VACT_SPACE_2_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_VACT_SPACE_3_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_VACT_SPACE_4_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_VACT_SPACE_5_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_VACT_SPACE_6_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_BLANK_F2_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_BLANK_F3_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_BLANK_F4_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_BLANK_F5_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_3_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_4_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_5_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_6_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0, 2, 0xffff);
+ hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, 2, 0xffff);
/* Timing generator registers */
- hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
- hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
- hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d[0]);
+ hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
+ hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
+ hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
+ hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
+ hdmi_reg_writev(hdata, HDMI_TG_VSYNC_L, 2, 0x1);
+ hdmi_reg_writev(hdata, HDMI_TG_FIELD_CHG_L, 2, 0x233);
+ hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1);
+ hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1);
+ hdmi_reg_writev(hdata, HDMI_TG_3D, 1, 0x0);
/* waiting for HDMIPHY's PLL to get to steady state */
for (tries = 100; tries; --tries) {
@@ -1665,7 +1586,7 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
static void hdmi_mode_apply(struct hdmi_context *hdata)
{
- if (hdata->type == HDMI_TYPE13)
+ if (hdata->drv_data->type == HDMI_TYPE13)
hdmi_v13_mode_apply(hdata);
else
hdmi_v14_mode_apply(hdata);
@@ -1683,7 +1604,7 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
HDMI_PHY_ENABLE_MODE_SET);
- if (hdata->type == HDMI_TYPE13)
+ if (hdata->drv_data->type == HDMI_TYPE13)
reg = HDMI_V13_PHY_RSTOUT;
else
reg = HDMI_PHY_RSTOUT;
@@ -1697,7 +1618,7 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
static void hdmiphy_poweron(struct hdmi_context *hdata)
{
- if (hdata->type != HDMI_TYPE14)
+ if (hdata->drv_data->type != HDMI_TYPE14)
return;
DRM_DEBUG_KMS("\n");
@@ -1717,7 +1638,7 @@ static void hdmiphy_poweron(struct hdmi_context *hdata)
static void hdmiphy_poweroff(struct hdmi_context *hdata)
{
- if (hdata->type != HDMI_TYPE14)
+ if (hdata->drv_data->type != HDMI_TYPE14)
return;
DRM_DEBUG_KMS("\n");
@@ -1743,13 +1664,14 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
int i;
/* pixel clock */
- i = hdmi_find_phy_conf(hdata, hdata->mode_conf.pixel_clock);
+ i = hdmi_find_phy_conf(hdata, hdata->current_mode.clock * 1000);
if (i < 0) {
DRM_ERROR("failed to find hdmiphy conf\n");
return;
}
- ret = hdmiphy_reg_write_buf(hdata, 0, hdata->phy_confs[i].conf, 32);
+ ret = hdmiphy_reg_write_buf(hdata, 0,
+ hdata->drv_data->phy_confs[i].conf, 32);
if (ret) {
DRM_ERROR("failed to configure hdmiphy\n");
return;
@@ -1771,10 +1693,8 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
hdmiphy_conf_reset(hdata);
hdmiphy_conf_apply(hdata);
- mutex_lock(&hdata->hdmi_mutex);
hdmi_start(hdata, false);
hdmi_conf_init(hdata);
- mutex_unlock(&hdata->hdmi_mutex);
hdmi_audio_init(hdata);
@@ -1785,271 +1705,32 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
hdmi_regs_dump(hdata, "start");
}
-static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value)
-{
- int i;
- BUG_ON(num_bytes > 4);
- for (i = 0; i < num_bytes; i++)
- reg_pair[i] = (value >> (8 * i)) & 0xff;
-}
-
-static void hdmi_v13_mode_set(struct hdmi_context *hdata,
- struct drm_display_mode *m)
+static void hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct hdmi_v13_core_regs *core = &hdata->mode_conf.conf.v13_conf.core;
- struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
- unsigned int val;
-
- hdata->mode_conf.cea_video_id =
- drm_match_cea_mode((struct drm_display_mode *)m);
- hdata->mode_conf.pixel_clock = m->clock * 1000;
- hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio;
-
- hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
- hdmi_set_reg(core->h_v_line, 3, (m->htotal << 12) | m->vtotal);
-
- val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
- hdmi_set_reg(core->vsync_pol, 1, val);
-
- val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0;
- hdmi_set_reg(core->int_pro_mode, 1, val);
-
- val = (m->hsync_start - m->hdisplay - 2);
- val |= ((m->hsync_end - m->hdisplay - 2) << 10);
- val |= ((m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0)<<20;
- hdmi_set_reg(core->h_sync_gen, 3, val);
-
- /*
- * Quirk requirement for exynos HDMI IP design,
- * 2 pixels less than the actual calculation for hsync_start
- * and end.
- */
-
- /* Following values & calculations differ for different type of modes */
- if (m->flags & DRM_MODE_FLAG_INTERLACE) {
- /* Interlaced Mode */
- val = ((m->vsync_end - m->vdisplay) / 2);
- val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
- hdmi_set_reg(core->v_sync_gen1, 3, val);
-
- val = m->vtotal / 2;
- val |= ((m->vtotal - m->vdisplay) / 2) << 11;
- hdmi_set_reg(core->v_blank, 3, val);
-
- val = (m->vtotal +
- ((m->vsync_end - m->vsync_start) * 4) + 5) / 2;
- val |= m->vtotal << 11;
- hdmi_set_reg(core->v_blank_f, 3, val);
-
- val = ((m->vtotal / 2) + 7);
- val |= ((m->vtotal / 2) + 2) << 12;
- hdmi_set_reg(core->v_sync_gen2, 3, val);
-
- val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay));
- val |= ((m->htotal / 2) +
- (m->hsync_start - m->hdisplay)) << 12;
- hdmi_set_reg(core->v_sync_gen3, 3, val);
-
- hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
- hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
-
- hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/
- } else {
- /* Progressive Mode */
-
- val = m->vtotal;
- val |= (m->vtotal - m->vdisplay) << 11;
- hdmi_set_reg(core->v_blank, 3, val);
-
- hdmi_set_reg(core->v_blank_f, 3, 0);
-
- val = (m->vsync_end - m->vdisplay);
- val |= ((m->vsync_start - m->vdisplay) << 12);
- hdmi_set_reg(core->v_sync_gen1, 3, val);
-
- hdmi_set_reg(core->v_sync_gen2, 3, 0x1001);/* Reset value */
- hdmi_set_reg(core->v_sync_gen3, 3, 0x1001);/* Reset value */
- hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
- hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
- hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
- }
-
- /* Timing generator registers */
- hdmi_set_reg(tg->cmd, 1, 0x0);
- hdmi_set_reg(tg->h_fsz, 2, m->htotal);
- hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
- hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
- hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
- hdmi_set_reg(tg->vsync, 2, 0x1);
- hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
- hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
- hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
- hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
- hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
- hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
- hdmi_set_reg(tg->tg_3d, 1, 0x0); /* Not used */
-}
-
-static void hdmi_v14_mode_set(struct hdmi_context *hdata,
- struct drm_display_mode *m)
-{
- struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
- struct hdmi_v14_core_regs *core =
- &hdata->mode_conf.conf.v14_conf.core;
-
- hdata->mode_conf.cea_video_id =
- drm_match_cea_mode((struct drm_display_mode *)m);
- hdata->mode_conf.pixel_clock = m->clock * 1000;
- hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio;
-
- hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
- hdmi_set_reg(core->v_line, 2, m->vtotal);
- hdmi_set_reg(core->h_line, 2, m->htotal);
- hdmi_set_reg(core->hsync_pol, 1,
- (m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0);
- hdmi_set_reg(core->vsync_pol, 1,
- (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
- hdmi_set_reg(core->int_pro_mode, 1,
- (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
-
- /*
- * Quirk requirement for exynos 5 HDMI IP design,
- * 2 pixels less than the actual calculation for hsync_start
- * and end.
- */
-
- /* Following values & calculations differ for different type of modes */
- if (m->flags & DRM_MODE_FLAG_INTERLACE) {
- /* Interlaced Mode */
- hdmi_set_reg(core->v_sync_line_bef_2, 2,
- (m->vsync_end - m->vdisplay) / 2);
- hdmi_set_reg(core->v_sync_line_bef_1, 2,
- (m->vsync_start - m->vdisplay) / 2);
- hdmi_set_reg(core->v2_blank, 2, m->vtotal / 2);
- hdmi_set_reg(core->v1_blank, 2, (m->vtotal - m->vdisplay) / 2);
- hdmi_set_reg(core->v_blank_f0, 2, m->vtotal - m->vdisplay / 2);
- hdmi_set_reg(core->v_blank_f1, 2, m->vtotal);
- hdmi_set_reg(core->v_sync_line_aft_2, 2, (m->vtotal / 2) + 7);
- hdmi_set_reg(core->v_sync_line_aft_1, 2, (m->vtotal / 2) + 2);
- hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2,
- (m->htotal / 2) + (m->hsync_start - m->hdisplay));
- hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2,
- (m->htotal / 2) + (m->hsync_start - m->hdisplay));
- hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
- hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
- hdmi_set_reg(tg->vact_st2, 2, m->vtotal - m->vdisplay / 2);
- hdmi_set_reg(tg->vsync2, 2, (m->vtotal / 2) + 1);
- hdmi_set_reg(tg->vsync_bot_hdmi, 2, (m->vtotal / 2) + 1);
- hdmi_set_reg(tg->field_bot_hdmi, 2, (m->vtotal / 2) + 1);
- hdmi_set_reg(tg->vact_st3, 2, 0x0);
- hdmi_set_reg(tg->vact_st4, 2, 0x0);
- } else {
- /* Progressive Mode */
- hdmi_set_reg(core->v_sync_line_bef_2, 2,
- m->vsync_end - m->vdisplay);
- hdmi_set_reg(core->v_sync_line_bef_1, 2,
- m->vsync_start - m->vdisplay);
- hdmi_set_reg(core->v2_blank, 2, m->vtotal);
- hdmi_set_reg(core->v1_blank, 2, m->vtotal - m->vdisplay);
- hdmi_set_reg(core->v_blank_f0, 2, 0xffff);
- hdmi_set_reg(core->v_blank_f1, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_2, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_1, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2, 0xffff);
- hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
- hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
- hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
- hdmi_set_reg(tg->vact_st3, 2, 0x47b); /* Reset value */
- hdmi_set_reg(tg->vact_st4, 2, 0x6ae); /* Reset value */
- hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
- hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
- hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
- }
-
- /* Following values & calculations are same irrespective of mode type */
- hdmi_set_reg(core->h_sync_start, 2, m->hsync_start - m->hdisplay - 2);
- hdmi_set_reg(core->h_sync_end, 2, m->hsync_end - m->hdisplay - 2);
- hdmi_set_reg(core->vact_space_1, 2, 0xffff);
- hdmi_set_reg(core->vact_space_2, 2, 0xffff);
- hdmi_set_reg(core->vact_space_3, 2, 0xffff);
- hdmi_set_reg(core->vact_space_4, 2, 0xffff);
- hdmi_set_reg(core->vact_space_5, 2, 0xffff);
- hdmi_set_reg(core->vact_space_6, 2, 0xffff);
- hdmi_set_reg(core->v_blank_f2, 2, 0xffff);
- hdmi_set_reg(core->v_blank_f3, 2, 0xffff);
- hdmi_set_reg(core->v_blank_f4, 2, 0xffff);
- hdmi_set_reg(core->v_blank_f5, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_3, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_4, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_5, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_6, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_pxl_3, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_pxl_4, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_pxl_5, 2, 0xffff);
- hdmi_set_reg(core->v_sync_line_aft_pxl_6, 2, 0xffff);
-
- /* Timing generator registers */
- hdmi_set_reg(tg->cmd, 1, 0x0);
- hdmi_set_reg(tg->h_fsz, 2, m->htotal);
- hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
- hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
- hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
- hdmi_set_reg(tg->vsync, 2, 0x1);
- hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
- hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
- hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
- hdmi_set_reg(tg->tg_3d, 1, 0x0);
-}
-
-static void hdmi_mode_set(struct exynos_drm_display *display,
- struct drm_display_mode *mode)
-{
- struct hdmi_context *hdata = display_to_hdmi(display);
- struct drm_display_mode *m = mode;
+ struct hdmi_context *hdata = encoder_to_hdmi(encoder);
+ struct drm_display_mode *m = adjusted_mode;
DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n",
m->hdisplay, m->vdisplay,
m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ?
"INTERLACED" : "PROGRESSIVE");
- /* preserve mode information for later use. */
- drm_mode_copy(&hdata->current_mode, mode);
-
- if (hdata->type == HDMI_TYPE13)
- hdmi_v13_mode_set(hdata, mode);
- else
- hdmi_v14_mode_set(hdata, mode);
-}
-
-static void hdmi_commit(struct exynos_drm_display *display)
-{
- struct hdmi_context *hdata = display_to_hdmi(display);
-
- mutex_lock(&hdata->hdmi_mutex);
- if (!hdata->powered) {
- mutex_unlock(&hdata->hdmi_mutex);
- return;
- }
- mutex_unlock(&hdata->hdmi_mutex);
-
- hdmi_conf_apply(hdata);
+ drm_mode_copy(&hdata->current_mode, m);
+ hdata->cea_video_id = drm_match_cea_mode(mode);
}
-static void hdmi_poweron(struct hdmi_context *hdata)
+static void hdmi_enable(struct drm_encoder *encoder)
{
+ struct hdmi_context *hdata = encoder_to_hdmi(encoder);
struct hdmi_resources *res = &hdata->res;
- mutex_lock(&hdata->hdmi_mutex);
- if (hdata->powered) {
- mutex_unlock(&hdata->hdmi_mutex);
+ if (hdata->powered)
return;
- }
hdata->powered = true;
- mutex_unlock(&hdata->hdmi_mutex);
-
pm_runtime_get_sync(hdata->dev);
if (regulator_bulk_enable(res->regul_count, res->regul_bulk))
@@ -2063,17 +1744,32 @@ static void hdmi_poweron(struct hdmi_context *hdata)
clk_prepare_enable(res->sclk_hdmi);
hdmiphy_poweron(hdata);
- hdmi_commit(&hdata->display);
+ hdmi_conf_apply(hdata);
}
-static void hdmi_poweroff(struct hdmi_context *hdata)
+static void hdmi_disable(struct drm_encoder *encoder)
{
+ struct hdmi_context *hdata = encoder_to_hdmi(encoder);
struct hdmi_resources *res = &hdata->res;
+ struct drm_crtc *crtc = encoder->crtc;
+ const struct drm_crtc_helper_funcs *funcs = NULL;
- mutex_lock(&hdata->hdmi_mutex);
if (!hdata->powered)
- goto out;
- mutex_unlock(&hdata->hdmi_mutex);
+ return;
+
+ /*
+ * The SFRs of VP and Mixer are updated by Vertical Sync of
+ * Timing generator which is a part of HDMI so the sequence
+ * to disable TV Subsystem should be as following,
+ * VP -> Mixer -> HDMI
+ *
+ * Below codes will try to disable Mixer and VP(if used)
+ * prior to disabling HDMI.
+ */
+ if (crtc)
+ funcs = crtc->helper_private;
+ if (funcs && funcs->disable)
+ (*funcs->disable)(crtc);
/* HDMI System Disable */
hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN);
@@ -2093,57 +1789,18 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
pm_runtime_put_sync(hdata->dev);
- mutex_lock(&hdata->hdmi_mutex);
hdata->powered = false;
-
-out:
- mutex_unlock(&hdata->hdmi_mutex);
}
-static void hdmi_dpms(struct exynos_drm_display *display, int mode)
-{
- struct hdmi_context *hdata = display_to_hdmi(display);
- struct drm_encoder *encoder = hdata->encoder;
- struct drm_crtc *crtc = encoder->crtc;
- const struct drm_crtc_helper_funcs *funcs = NULL;
-
- DRM_DEBUG_KMS("mode %d\n", mode);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- hdmi_poweron(hdata);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- /*
- * The SFRs of VP and Mixer are updated by Vertical Sync of
- * Timing generator which is a part of HDMI so the sequence
- * to disable TV Subsystem should be as following,
- * VP -> Mixer -> HDMI
- *
- * Below codes will try to disable Mixer and VP(if used)
- * prior to disabling HDMI.
- */
- if (crtc)
- funcs = crtc->helper_private;
- if (funcs && funcs->disable)
- (*funcs->disable)(crtc);
-
- hdmi_poweroff(hdata);
- break;
- default:
- DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
- break;
- }
-}
-
-static struct exynos_drm_display_ops hdmi_display_ops = {
- .create_connector = hdmi_create_connector,
+static struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
.mode_fixup = hdmi_mode_fixup,
.mode_set = hdmi_mode_set,
- .dpms = hdmi_dpms,
- .commit = hdmi_commit,
+ .enable = hdmi_enable,
+ .disable = hdmi_disable,
+};
+
+static struct drm_encoder_funcs exynos_hdmi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
};
static void hdmi_hotplug_work_func(struct work_struct *work)
@@ -2152,10 +1809,6 @@ static void hdmi_hotplug_work_func(struct work_struct *work)
hdata = container_of(work, struct hdmi_context, hotplug_work.work);
- mutex_lock(&hdata->hdmi_mutex);
- hdata->hpd = gpio_get_value(hdata->hpd_gpio);
- mutex_unlock(&hdata->hdmi_mutex);
-
if (hdata->drm_dev)
drm_helper_hpd_irq_event(hdata->drm_dev);
}
@@ -2254,30 +1907,6 @@ fail:
return ret;
}
-static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
- (struct device *dev)
-{
- struct device_node *np = dev->of_node;
- struct s5p_hdmi_platform_data *pd;
- u32 value;
-
- pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
- if (!pd)
- goto err_data;
-
- if (!of_find_property(np, "hpd-gpio", &value)) {
- DRM_ERROR("no hpd gpio property found\n");
- goto err_data;
- }
-
- pd->hpd_gpio = of_get_named_gpio(np, "hpd-gpio", 0);
-
- return pd;
-
-err_data:
- return NULL;
-}
-
static struct of_device_id hdmi_match_types[] = {
{
.compatible = "samsung,exynos5-hdmi",
@@ -2301,10 +1930,33 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm_dev = data;
struct hdmi_context *hdata = dev_get_drvdata(dev);
+ struct drm_encoder *encoder = &hdata->encoder;
+ int ret, pipe;
hdata->drm_dev = drm_dev;
- return exynos_drm_create_enc_conn(drm_dev, &hdata->display);
+ pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+ EXYNOS_DISPLAY_TYPE_HDMI);
+ if (pipe < 0)
+ return pipe;
+
+ encoder->possible_crtcs = 1 << pipe;
+
+ DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+ drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs);
+
+ ret = hdmi_create_connector(encoder);
+ if (ret) {
+ DRM_ERROR("failed to create connector ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+
+ return 0;
}
static void hdmi_unbind(struct device *dev, struct device *master, void *data)
@@ -2338,43 +1990,30 @@ static struct device_node *hdmi_legacy_phy_dt_binding(struct device *dev)
static int hdmi_probe(struct platform_device *pdev)
{
struct device_node *ddc_node, *phy_node;
- struct s5p_hdmi_platform_data *pdata;
- struct hdmi_driver_data *drv_data;
const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct hdmi_context *hdata;
struct resource *res;
int ret;
- if (!dev->of_node)
- return -ENODEV;
-
- pdata = drm_hdmi_dt_parse_pdata(dev);
- if (!pdata)
- return -EINVAL;
-
hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
if (!hdata)
return -ENOMEM;
- hdata->display.type = EXYNOS_DISPLAY_TYPE_HDMI;
- hdata->display.ops = &hdmi_display_ops;
-
- mutex_init(&hdata->hdmi_mutex);
-
- platform_set_drvdata(pdev, hdata);
-
- match = of_match_node(hdmi_match_types, dev->of_node);
+ match = of_match_device(hdmi_match_types, dev);
if (!match)
return -ENODEV;
- drv_data = (struct hdmi_driver_data *)match->data;
- hdata->type = drv_data->type;
- hdata->phy_confs = drv_data->phy_confs;
- hdata->phy_conf_count = drv_data->phy_conf_count;
+ hdata->drv_data = match->data;
+
+ platform_set_drvdata(pdev, hdata);
- hdata->hpd_gpio = pdata->hpd_gpio;
hdata->dev = dev;
+ hdata->hpd_gpio = of_get_named_gpio(dev->of_node, "hpd-gpio", 0);
+ if (hdata->hpd_gpio < 0) {
+ DRM_ERROR("cannot get hpd gpio property\n");
+ return hdata->hpd_gpio;
+ }
ret = hdmi_resources_init(hdata);
if (ret) {
@@ -2426,7 +2065,7 @@ out_get_ddc_adpt:
}
out_get_phy_port:
- if (drv_data->is_apb_phy) {
+ if (hdata->drv_data->is_apb_phy) {
hdata->regs_hdmiphy = of_iomap(phy_node, 0);
if (!hdata->regs_hdmiphy) {
DRM_ERROR("failed to ioremap hdmi phy\n");
@@ -2449,8 +2088,6 @@ out_get_phy_port:
goto err_hdmiphy;
}
- hdata->hpd = gpio_get_value(hdata->hpd_gpio);
-
INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func);
ret = devm_request_threaded_irq(dev, hdata->irq, NULL,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index cae98db33062..7f81cce966d4 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -43,6 +43,7 @@
#define MIXER_WIN_NR 3
#define MIXER_DEFAULT_WIN 0
+#define VP_DEFAULT_WIN 2
/* The pixelformats that are natively supported by the mixer. */
#define MXR_FORMAT_RGB565 4
@@ -69,6 +70,24 @@ enum mixer_version_id {
MXR_VER_128_0_0_184,
};
+enum mixer_flag_bits {
+ MXR_BIT_POWERED,
+ MXR_BIT_VSYNC,
+};
+
+static const uint32_t mixer_formats[] = {
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+};
+
+static const uint32_t vp_formats[] = {
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+};
+
struct mixer_context {
struct platform_device *pdev;
struct device *dev;
@@ -76,13 +95,11 @@ struct mixer_context {
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[MIXER_WIN_NR];
int pipe;
+ unsigned long flags;
bool interlace;
- bool powered;
bool vp_enabled;
bool has_sclk;
- u32 int_en;
- struct mutex mixer_mutex;
struct mixer_resources mixer_res;
enum mixer_version_id mxr_ver;
wait_queue_head_t wait_vsync_queue;
@@ -380,19 +397,20 @@ static void mixer_stop(struct mixer_context *ctx)
usleep_range(10000, 12000);
}
-static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
+static void vp_video_buffer(struct mixer_context *ctx,
+ struct exynos_drm_plane *plane)
{
struct mixer_resources *res = &ctx->mixer_res;
+ struct drm_plane_state *state = plane->base.state;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_display_mode *mode = &state->crtc->mode;
unsigned long flags;
- struct exynos_drm_plane *plane;
dma_addr_t luma_addr[2], chroma_addr[2];
bool tiled_mode = false;
bool crcb_mode = false;
u32 val;
- plane = &ctx->planes[win];
-
- switch (plane->pixel_format) {
+ switch (fb->pixel_format) {
case DRM_FORMAT_NV12:
crcb_mode = false;
break;
@@ -401,21 +419,21 @@ static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
break;
default:
DRM_ERROR("pixel format for vp is wrong [%d].\n",
- plane->pixel_format);
+ fb->pixel_format);
return;
}
luma_addr[0] = plane->dma_addr[0];
chroma_addr[0] = plane->dma_addr[1];
- if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE) {
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
ctx->interlace = true;
if (tiled_mode) {
luma_addr[1] = luma_addr[0] + 0x40;
chroma_addr[1] = chroma_addr[0] + 0x40;
} else {
- luma_addr[1] = luma_addr[0] + plane->pitch;
- chroma_addr[1] = chroma_addr[0] + plane->pitch;
+ luma_addr[1] = luma_addr[0] + fb->pitches[0];
+ chroma_addr[1] = chroma_addr[0] + fb->pitches[0];
}
} else {
ctx->interlace = false;
@@ -436,25 +454,25 @@ static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
/* setting size of input image */
- vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(plane->pitch) |
- VP_IMG_VSIZE(plane->fb_height));
+ vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
+ VP_IMG_VSIZE(fb->height));
/* chroma height has to reduced by 2 to avoid chroma distorions */
- vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(plane->pitch) |
- VP_IMG_VSIZE(plane->fb_height / 2));
+ vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
+ VP_IMG_VSIZE(fb->height / 2));
- vp_reg_write(res, VP_SRC_WIDTH, plane->src_width);
- vp_reg_write(res, VP_SRC_HEIGHT, plane->src_height);
+ vp_reg_write(res, VP_SRC_WIDTH, plane->src_w);
+ vp_reg_write(res, VP_SRC_HEIGHT, plane->src_h);
vp_reg_write(res, VP_SRC_H_POSITION,
VP_SRC_H_POSITION_VAL(plane->src_x));
vp_reg_write(res, VP_SRC_V_POSITION, plane->src_y);
- vp_reg_write(res, VP_DST_WIDTH, plane->crtc_width);
+ vp_reg_write(res, VP_DST_WIDTH, plane->crtc_w);
vp_reg_write(res, VP_DST_H_POSITION, plane->crtc_x);
if (ctx->interlace) {
- vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_height / 2);
+ vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h / 2);
vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y / 2);
} else {
- vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_height);
+ vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h);
vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y);
}
@@ -469,9 +487,9 @@ static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
- mixer_cfg_scan(ctx, plane->mode_height);
- mixer_cfg_rgb_fmt(ctx, plane->mode_height);
- mixer_cfg_layer(ctx, win, true);
+ mixer_cfg_scan(ctx, mode->vdisplay);
+ mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
+ mixer_cfg_layer(ctx, plane->zpos, true);
mixer_run(ctx);
mixer_vsync_set_update(ctx, true);
@@ -491,15 +509,15 @@ static void mixer_layer_update(struct mixer_context *ctx)
static int mixer_setup_scale(const struct exynos_drm_plane *plane,
unsigned int *x_ratio, unsigned int *y_ratio)
{
- if (plane->crtc_width != plane->src_width) {
- if (plane->crtc_width == 2 * plane->src_width)
+ if (plane->crtc_w != plane->src_w) {
+ if (plane->crtc_w == 2 * plane->src_w)
*x_ratio = 1;
else
goto fail;
}
- if (plane->crtc_height != plane->src_height) {
- if (plane->crtc_height == 2 * plane->src_height)
+ if (plane->crtc_h != plane->src_h) {
+ if (plane->crtc_h == 2 * plane->src_h)
*y_ratio = 1;
else
goto fail;
@@ -512,20 +530,22 @@ fail:
return -ENOTSUPP;
}
-static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
+static void mixer_graph_buffer(struct mixer_context *ctx,
+ struct exynos_drm_plane *plane)
{
struct mixer_resources *res = &ctx->mixer_res;
+ struct drm_plane_state *state = plane->base.state;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_display_mode *mode = &state->crtc->mode;
unsigned long flags;
- struct exynos_drm_plane *plane;
+ unsigned int win = plane->zpos;
unsigned int x_ratio = 0, y_ratio = 0;
unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
dma_addr_t dma_addr;
unsigned int fmt;
u32 val;
- plane = &ctx->planes[win];
-
- switch (plane->pixel_format) {
+ switch (fb->pixel_format) {
case DRM_FORMAT_XRGB4444:
fmt = MXR_FORMAT_ARGB4444;
break;
@@ -557,12 +577,12 @@ static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
/* converting dma address base and source offset */
dma_addr = plane->dma_addr[0]
- + (plane->src_x * plane->bpp >> 3)
- + (plane->src_y * plane->pitch);
+ + (plane->src_x * fb->bits_per_pixel >> 3)
+ + (plane->src_y * fb->pitches[0]);
src_x_offset = 0;
src_y_offset = 0;
- if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE)
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
ctx->interlace = true;
else
ctx->interlace = false;
@@ -576,18 +596,18 @@ static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
/* setup geometry */
mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
- plane->pitch / (plane->bpp >> 3));
+ fb->pitches[0] / (fb->bits_per_pixel >> 3));
/* setup display size */
if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
win == MIXER_DEFAULT_WIN) {
- val = MXR_MXR_RES_HEIGHT(plane->mode_height);
- val |= MXR_MXR_RES_WIDTH(plane->mode_width);
+ val = MXR_MXR_RES_HEIGHT(mode->vdisplay);
+ val |= MXR_MXR_RES_WIDTH(mode->hdisplay);
mixer_reg_write(res, MXR_RESOLUTION, val);
}
- val = MXR_GRP_WH_WIDTH(plane->src_width);
- val |= MXR_GRP_WH_HEIGHT(plane->src_height);
+ val = MXR_GRP_WH_WIDTH(plane->src_w);
+ val |= MXR_GRP_WH_HEIGHT(plane->src_h);
val |= MXR_GRP_WH_H_SCALE(x_ratio);
val |= MXR_GRP_WH_V_SCALE(y_ratio);
mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
@@ -605,8 +625,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
/* set buffer address to mixer */
mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
- mixer_cfg_scan(ctx, plane->mode_height);
- mixer_cfg_rgb_fmt(ctx, plane->mode_height);
+ mixer_cfg_scan(ctx, mode->vdisplay);
+ mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
mixer_cfg_layer(ctx, win, true);
/* layer update mandatory for mixer 16.0.33.0 */
@@ -710,6 +730,7 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
struct mixer_context *ctx = arg;
struct mixer_resources *res = &ctx->mixer_res;
u32 val, base, shadow;
+ int win;
spin_lock(&res->reg_slock);
@@ -718,6 +739,10 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
/* handling VSYNC */
if (val & MXR_INT_STATUS_VSYNC) {
+ /* vsync interrupt use different bit for read and clear */
+ val |= MXR_INT_CLEAR_VSYNC;
+ val &= ~MXR_INT_STATUS_VSYNC;
+
/* interlace scan need to check shadow register */
if (ctx->interlace) {
base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
@@ -731,8 +756,15 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
goto out;
}
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
- exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+ drm_crtc_handle_vblank(&ctx->crtc->base);
+ for (win = 0 ; win < MIXER_WIN_NR ; win++) {
+ struct exynos_drm_plane *plane = &ctx->planes[win];
+
+ if (!plane->pending_fb)
+ continue;
+
+ exynos_drm_crtc_finish_update(ctx->crtc, plane);
+ }
/* set wait vsync event to zero and wake up queue. */
if (atomic_read(&ctx->wait_vsync_event)) {
@@ -743,11 +775,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
out:
/* clear interrupts */
- if (~val & MXR_INT_EN_VSYNC) {
- /* vsync interrupt use different bit for read and clear */
- val &= ~MXR_INT_EN_VSYNC;
- val |= MXR_INT_CLEAR_VSYNC;
- }
mixer_reg_write(res, MXR_INT_STATUS, val);
spin_unlock(&res->reg_slock);
@@ -882,8 +909,7 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
}
}
- ret = drm_iommu_attach_device_if_possible(mixer_ctx->crtc, drm_dev,
- mixer_ctx->dev);
+ ret = drm_iommu_attach_device(drm_dev, mixer_ctx->dev);
if (ret)
priv->pipe--;
@@ -892,8 +918,7 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
{
- if (is_drm_iommu_supported(mixer_ctx->drm_dev))
- drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
+ drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
}
static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
@@ -901,14 +926,13 @@ static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
struct mixer_context *mixer_ctx = crtc->ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
- if (!mixer_ctx->powered) {
- mixer_ctx->int_en |= MXR_INT_EN_VSYNC;
+ __set_bit(MXR_BIT_VSYNC, &mixer_ctx->flags);
+ if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return 0;
- }
/* enable vsync interrupt */
- mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
- MXR_INT_EN_VSYNC);
+ mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
+ mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
return 0;
}
@@ -918,48 +942,48 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
struct mixer_context *mixer_ctx = crtc->ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
+ __clear_bit(MXR_BIT_VSYNC, &mixer_ctx->flags);
+
+ if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
+ return;
+
/* disable vsync interrupt */
+ mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
-static void mixer_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void mixer_update_plane(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
struct mixer_context *mixer_ctx = crtc->ctx;
- DRM_DEBUG_KMS("win: %d\n", win);
+ DRM_DEBUG_KMS("win: %d\n", plane->zpos);
- mutex_lock(&mixer_ctx->mixer_mutex);
- if (!mixer_ctx->powered) {
- mutex_unlock(&mixer_ctx->mixer_mutex);
+ if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
- }
- mutex_unlock(&mixer_ctx->mixer_mutex);
- if (win > 1 && mixer_ctx->vp_enabled)
- vp_video_buffer(mixer_ctx, win);
+ if (plane->zpos > 1 && mixer_ctx->vp_enabled)
+ vp_video_buffer(mixer_ctx, plane);
else
- mixer_graph_buffer(mixer_ctx, win);
+ mixer_graph_buffer(mixer_ctx, plane);
}
-static void mixer_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
+static void mixer_disable_plane(struct exynos_drm_crtc *crtc,
+ struct exynos_drm_plane *plane)
{
struct mixer_context *mixer_ctx = crtc->ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
unsigned long flags;
- DRM_DEBUG_KMS("win: %d\n", win);
+ DRM_DEBUG_KMS("win: %d\n", plane->zpos);
- mutex_lock(&mixer_ctx->mixer_mutex);
- if (!mixer_ctx->powered) {
- mutex_unlock(&mixer_ctx->mixer_mutex);
+ if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
- }
- mutex_unlock(&mixer_ctx->mixer_mutex);
spin_lock_irqsave(&res->reg_slock, flags);
mixer_vsync_set_update(mixer_ctx, false);
- mixer_cfg_layer(mixer_ctx, win, false);
+ mixer_cfg_layer(mixer_ctx, plane->zpos, false);
mixer_vsync_set_update(mixer_ctx, true);
spin_unlock_irqrestore(&res->reg_slock, flags);
@@ -970,12 +994,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc)
struct mixer_context *mixer_ctx = crtc->ctx;
int err;
- mutex_lock(&mixer_ctx->mixer_mutex);
- if (!mixer_ctx->powered) {
- mutex_unlock(&mixer_ctx->mixer_mutex);
+ if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
- }
- mutex_unlock(&mixer_ctx->mixer_mutex);
err = drm_vblank_get(mixer_ctx->drm_dev, mixer_ctx->pipe);
if (err < 0) {
@@ -1003,13 +1023,8 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
struct mixer_resources *res = &ctx->mixer_res;
int ret;
- mutex_lock(&ctx->mixer_mutex);
- if (ctx->powered) {
- mutex_unlock(&ctx->mixer_mutex);
+ if (test_bit(MXR_BIT_POWERED, &ctx->flags))
return;
- }
-
- mutex_unlock(&ctx->mixer_mutex);
pm_runtime_get_sync(ctx->dev);
@@ -1041,13 +1056,14 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
}
}
- mutex_lock(&ctx->mixer_mutex);
- ctx->powered = true;
- mutex_unlock(&ctx->mixer_mutex);
+ set_bit(MXR_BIT_POWERED, &ctx->flags);
mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
- mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
+ if (test_bit(MXR_BIT_VSYNC, &ctx->flags)) {
+ mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
+ mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
+ }
mixer_win_reset(ctx);
}
@@ -1057,24 +1073,16 @@ static void mixer_disable(struct exynos_drm_crtc *crtc)
struct mixer_resources *res = &ctx->mixer_res;
int i;
- mutex_lock(&ctx->mixer_mutex);
- if (!ctx->powered) {
- mutex_unlock(&ctx->mixer_mutex);
+ if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
return;
- }
- mutex_unlock(&ctx->mixer_mutex);
mixer_stop(ctx);
mixer_regs_dump(ctx);
for (i = 0; i < MIXER_WIN_NR; i++)
- mixer_win_disable(crtc, i);
+ mixer_disable_plane(crtc, &ctx->planes[i]);
- ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
-
- mutex_lock(&ctx->mixer_mutex);
- ctx->powered = false;
- mutex_unlock(&ctx->mixer_mutex);
+ clear_bit(MXR_BIT_POWERED, &ctx->flags);
clk_disable_unprepare(res->hdmi);
clk_disable_unprepare(res->mixer);
@@ -1113,8 +1121,8 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
.wait_for_vblank = mixer_wait_for_vblank,
- .win_commit = mixer_win_commit,
- .win_disable = mixer_win_disable,
+ .update_plane = mixer_update_plane,
+ .disable_plane = mixer_disable_plane,
};
static struct mixer_drv_data exynos5420_mxr_drv_data = {
@@ -1177,7 +1185,6 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
struct mixer_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_plane *exynos_plane;
- enum drm_plane_type type;
unsigned int zpos;
int ret;
@@ -1186,10 +1193,23 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
return ret;
for (zpos = 0; zpos < MIXER_WIN_NR; zpos++) {
+ enum drm_plane_type type;
+ const uint32_t *formats;
+ unsigned int fcount;
+
type = (zpos == MIXER_DEFAULT_WIN) ? DRM_PLANE_TYPE_PRIMARY :
DRM_PLANE_TYPE_OVERLAY;
+ if (zpos < VP_DEFAULT_WIN) {
+ formats = mixer_formats;
+ fcount = ARRAY_SIZE(mixer_formats);
+ } else {
+ formats = vp_formats;
+ fcount = ARRAY_SIZE(vp_formats);
+ }
+
ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
- 1 << ctx->pipe, type, zpos);
+ 1 << ctx->pipe, type, formats, fcount,
+ zpos);
if (ret)
return ret;
}
@@ -1236,8 +1256,6 @@ static int mixer_probe(struct platform_device *pdev)
return -ENOMEM;
}
- mutex_init(&ctx->mixer_mutex);
-
if (dev->of_node) {
const struct of_device_id *match;
diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig
new file mode 100644
index 000000000000..c78cf3f605d0
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/Kconfig
@@ -0,0 +1,18 @@
+config DRM_FSL_DCU
+ tristate "DRM Support for Freescale DCU"
+ depends on DRM && OF && ARM
+ select BACKLIGHT_CLASS_DEVICE
+ select BACKLIGHT_LCD_SUPPORT
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_KMS_FB_HELPER
+ select DRM_PANEL
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ select REGMAP_MMIO
+ select VIDEOMODE_HELPERS
+ help
+ Choose this option if you have an Freescale DCU chipset.
+ If M is selected the module will be called fsl-dcu-drm.
diff --git a/drivers/gpu/drm/fsl-dcu/Makefile b/drivers/gpu/drm/fsl-dcu/Makefile
new file mode 100644
index 000000000000..6ea1523ae6ec
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/Makefile
@@ -0,0 +1,7 @@
+fsl-dcu-drm-y := fsl_dcu_drm_drv.o \
+ fsl_dcu_drm_kms.o \
+ fsl_dcu_drm_rgb.o \
+ fsl_dcu_drm_plane.o \
+ fsl_dcu_drm_crtc.o \
+ fsl_dcu_drm_fbdev.o
+obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu-drm.o
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
new file mode 100644
index 000000000000..82a3d311e164
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "fsl_dcu_drm_crtc.h"
+#include "fsl_dcu_drm_drv.h"
+#include "fsl_dcu_drm_plane.h"
+
+static void fsl_dcu_drm_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+}
+
+static int fsl_dcu_drm_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ return 0;
+}
+
+static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+}
+
+static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ int ret;
+
+ ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+ DCU_MODE_DCU_MODE_MASK,
+ DCU_MODE_DCU_MODE(DCU_MODE_OFF));
+ if (ret)
+ dev_err(fsl_dev->dev, "Disable CRTC failed\n");
+ ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+ DCU_UPDATE_MODE_READREG);
+ if (ret)
+ dev_err(fsl_dev->dev, "Enable CRTC failed\n");
+}
+
+static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ int ret;
+
+ ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+ DCU_MODE_DCU_MODE_MASK,
+ DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
+ if (ret)
+ dev_err(fsl_dev->dev, "Enable CRTC failed\n");
+ ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+ DCU_UPDATE_MODE_READREG);
+ if (ret)
+ dev_err(fsl_dev->dev, "Enable CRTC failed\n");
+}
+
+static bool fsl_dcu_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ struct drm_display_mode *mode = &crtc->state->mode;
+ unsigned int hbp, hfp, hsw, vbp, vfp, vsw, div, index;
+ unsigned long dcuclk;
+ int ret;
+
+ index = drm_crtc_index(crtc);
+ dcuclk = clk_get_rate(fsl_dev->clk);
+ div = dcuclk / mode->clock / 1000;
+
+ /* Configure timings: */
+ hbp = mode->htotal - mode->hsync_end;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsw = mode->hsync_end - mode->hsync_start;
+ vbp = mode->vtotal - mode->vsync_end;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vsw = mode->vsync_end - mode->vsync_start;
+
+ ret = regmap_write(fsl_dev->regmap, DCU_HSYN_PARA,
+ DCU_HSYN_PARA_BP(hbp) |
+ DCU_HSYN_PARA_PW(hsw) |
+ DCU_HSYN_PARA_FP(hfp));
+ if (ret)
+ goto set_failed;
+ ret = regmap_write(fsl_dev->regmap, DCU_VSYN_PARA,
+ DCU_VSYN_PARA_BP(vbp) |
+ DCU_VSYN_PARA_PW(vsw) |
+ DCU_VSYN_PARA_FP(vfp));
+ if (ret)
+ goto set_failed;
+ ret = regmap_write(fsl_dev->regmap, DCU_DISP_SIZE,
+ DCU_DISP_SIZE_DELTA_Y(mode->vdisplay) |
+ DCU_DISP_SIZE_DELTA_X(mode->hdisplay));
+ if (ret)
+ goto set_failed;
+ ret = regmap_write(fsl_dev->regmap, DCU_DIV_RATIO, div);
+ if (ret)
+ goto set_failed;
+ ret = regmap_write(fsl_dev->regmap, DCU_SYN_POL,
+ DCU_SYN_POL_INV_VS_LOW | DCU_SYN_POL_INV_HS_LOW);
+ if (ret)
+ goto set_failed;
+ ret = regmap_write(fsl_dev->regmap, DCU_BGND, DCU_BGND_R(0) |
+ DCU_BGND_G(0) | DCU_BGND_B(0));
+ if (ret)
+ goto set_failed;
+ ret = regmap_write(fsl_dev->regmap, DCU_DCU_MODE,
+ DCU_MODE_BLEND_ITER(1) | DCU_MODE_RASTER_EN);
+ if (ret)
+ goto set_failed;
+ ret = regmap_write(fsl_dev->regmap, DCU_THRESHOLD,
+ DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) |
+ DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) |
+ DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL));
+ if (ret)
+ goto set_failed;
+ ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+ DCU_UPDATE_MODE_READREG);
+ if (ret)
+ goto set_failed;
+ return;
+set_failed:
+ dev_err(dev->dev, "set DCU register failed\n");
+}
+
+static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
+ .atomic_begin = fsl_dcu_drm_crtc_atomic_begin,
+ .atomic_check = fsl_dcu_drm_crtc_atomic_check,
+ .atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
+ .disable = fsl_dcu_drm_disable_crtc,
+ .enable = fsl_dcu_drm_crtc_enable,
+ .mode_fixup = fsl_dcu_drm_crtc_mode_fixup,
+ .mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb,
+};
+
+static const struct drm_crtc_funcs fsl_dcu_drm_crtc_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .destroy = drm_crtc_cleanup,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .set_config = drm_atomic_helper_set_config,
+};
+
+int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
+{
+ struct drm_plane *primary;
+ struct drm_crtc *crtc = &fsl_dev->crtc;
+ unsigned int i, j, reg_num;
+ int ret;
+
+ primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm);
+ ret = drm_crtc_init_with_planes(fsl_dev->drm, crtc, primary, NULL,
+ &fsl_dcu_drm_crtc_funcs);
+ if (ret < 0)
+ return ret;
+
+ drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs);
+
+ if (!strcmp(fsl_dev->soc->name, "ls1021a"))
+ reg_num = LS1021A_LAYER_REG_NUM;
+ else
+ reg_num = VF610_LAYER_REG_NUM;
+ for (i = 0; i <= fsl_dev->soc->total_layer; i++) {
+ for (j = 0; j < reg_num; j++) {
+ ret = regmap_write(fsl_dev->regmap,
+ DCU_CTRLDESCLN(i, j), 0);
+ if (ret)
+ goto init_failed;
+ }
+ }
+ ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+ DCU_MODE_DCU_MODE_MASK,
+ DCU_MODE_DCU_MODE(DCU_MODE_OFF));
+ if (ret)
+ goto init_failed;
+ ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+ DCU_UPDATE_MODE_READREG);
+ if (ret)
+ goto init_failed;
+
+ return 0;
+init_failed:
+ dev_err(fsl_dev->dev, "init DCU register failed\n");
+ return ret;
+}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h
new file mode 100644
index 000000000000..43d4da2c5fe5
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __FSL_DCU_DRM_CRTC_H__
+#define __FSL_DCU_DRM_CRTC_H__
+
+struct fsl_dcu_drm_device;
+
+int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev);
+
+#endif /* __FSL_DCU_DRM_CRTC_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
new file mode 100644
index 000000000000..9a8e2da47158
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -0,0 +1,404 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "fsl_dcu_drm_crtc.h"
+#include "fsl_dcu_drm_drv.h"
+
+static const struct regmap_config fsl_dcu_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int fsl_dcu_drm_irq_init(struct drm_device *dev)
+{
+ struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ unsigned int value;
+ int ret;
+
+ ret = drm_irq_install(dev, fsl_dev->irq);
+ if (ret < 0)
+ dev_err(dev->dev, "failed to install IRQ handler\n");
+
+ ret = regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0);
+ if (ret)
+ dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
+ ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
+ if (ret)
+ dev_err(dev->dev, "read DCU_INT_MASK failed\n");
+ value &= DCU_INT_MASK_VBLANK;
+ ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
+ if (ret)
+ dev_err(dev->dev, "set DCU_INT_MASK failed\n");
+ ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+ DCU_UPDATE_MODE_READREG);
+ if (ret)
+ dev_err(dev->dev, "set DCU_UPDATE_MODE failed\n");
+
+ return ret;
+}
+
+static int fsl_dcu_load(struct drm_device *drm, unsigned long flags)
+{
+ struct device *dev = drm->dev;
+ struct fsl_dcu_drm_device *fsl_dev = drm->dev_private;
+ int ret;
+
+ ret = fsl_dcu_drm_modeset_init(fsl_dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to initialize mode setting\n");
+ return ret;
+ }
+
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret < 0) {
+ dev_err(dev, "failed to initialize vblank\n");
+ goto done;
+ }
+ drm->vblank_disable_allowed = true;
+
+ ret = fsl_dcu_drm_irq_init(drm);
+ if (ret < 0)
+ goto done;
+ drm->irq_enabled = true;
+
+ fsl_dcu_fbdev_init(drm);
+
+ return 0;
+done:
+ if (ret) {
+ drm_mode_config_cleanup(drm);
+ drm_vblank_cleanup(drm);
+ drm_irq_uninstall(drm);
+ drm->dev_private = NULL;
+ }
+
+ return ret;
+}
+
+static int fsl_dcu_unload(struct drm_device *dev)
+{
+ drm_mode_config_cleanup(dev);
+ drm_vblank_cleanup(dev);
+ drm_irq_uninstall(dev);
+
+ dev->dev_private = NULL;
+
+ return 0;
+}
+
+static void fsl_dcu_drm_preclose(struct drm_device *dev, struct drm_file *file)
+{
+}
+
+static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ unsigned int int_status;
+ int ret;
+
+ ret = regmap_read(fsl_dev->regmap, DCU_INT_STATUS, &int_status);
+ if (ret)
+ dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
+ if (int_status & DCU_INT_STATUS_VBLANK)
+ drm_handle_vblank(dev, 0);
+
+ ret = regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0xffffffff);
+ if (ret)
+ dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
+ ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+ DCU_UPDATE_MODE_READREG);
+ if (ret)
+ dev_err(dev->dev, "set DCU_UPDATE_MODE failed\n");
+
+ return IRQ_HANDLED;
+}
+
+static int fsl_dcu_drm_enable_vblank(struct drm_device *dev, int crtc)
+{
+ struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
+ if (ret)
+ dev_err(dev->dev, "read DCU_INT_MASK failed\n");
+ value &= ~DCU_INT_MASK_VBLANK;
+ ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
+ if (ret)
+ dev_err(dev->dev, "set DCU_INT_MASK failed\n");
+ return 0;
+}
+
+static void fsl_dcu_drm_disable_vblank(struct drm_device *dev, int crtc)
+{
+ struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
+ if (ret)
+ dev_err(dev->dev, "read DCU_INT_MASK failed\n");
+ value |= DCU_INT_MASK_VBLANK;
+ ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
+ if (ret)
+ dev_err(dev->dev, "set DCU_INT_MASK failed\n");
+}
+
+static const struct file_operations fsl_dcu_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static struct drm_driver fsl_dcu_drm_driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET
+ | DRIVER_PRIME | DRIVER_ATOMIC,
+ .load = fsl_dcu_load,
+ .unload = fsl_dcu_unload,
+ .preclose = fsl_dcu_drm_preclose,
+ .irq_handler = fsl_dcu_drm_irq,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = fsl_dcu_drm_enable_vblank,
+ .disable_vblank = fsl_dcu_drm_disable_vblank,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .fops = &fsl_dcu_drm_fops,
+ .name = "fsl-dcu-drm",
+ .desc = "Freescale DCU DRM",
+ .date = "20150213",
+ .major = 1,
+ .minor = 0,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int fsl_dcu_drm_pm_suspend(struct device *dev)
+{
+ struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev);
+
+ if (!fsl_dev)
+ return 0;
+
+ drm_kms_helper_poll_disable(fsl_dev->drm);
+ regcache_cache_only(fsl_dev->regmap, true);
+ regcache_mark_dirty(fsl_dev->regmap);
+ clk_disable(fsl_dev->clk);
+ clk_unprepare(fsl_dev->clk);
+
+ return 0;
+}
+
+static int fsl_dcu_drm_pm_resume(struct device *dev)
+{
+ struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev);
+ int ret;
+
+ if (!fsl_dev)
+ return 0;
+
+ ret = clk_enable(fsl_dev->clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable dcu clk\n");
+ clk_unprepare(fsl_dev->clk);
+ return ret;
+ }
+ ret = clk_prepare(fsl_dev->clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to prepare dcu clk\n");
+ return ret;
+ }
+
+ drm_kms_helper_poll_enable(fsl_dev->drm);
+ regcache_cache_only(fsl_dev->regmap, false);
+ regcache_sync(fsl_dev->regmap);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops fsl_dcu_drm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fsl_dcu_drm_pm_suspend, fsl_dcu_drm_pm_resume)
+};
+
+static const struct fsl_dcu_soc_data fsl_dcu_ls1021a_data = {
+ .name = "ls1021a",
+ .total_layer = 16,
+ .max_layer = 4,
+};
+
+static const struct fsl_dcu_soc_data fsl_dcu_vf610_data = {
+ .name = "vf610",
+ .total_layer = 64,
+ .max_layer = 6,
+};
+
+static const struct of_device_id fsl_dcu_of_match[] = {
+ {
+ .compatible = "fsl,ls1021a-dcu",
+ .data = &fsl_dcu_ls1021a_data,
+ }, {
+ .compatible = "fsl,vf610-dcu",
+ .data = &fsl_dcu_vf610_data,
+ }, {
+ },
+};
+MODULE_DEVICE_TABLE(of, fsl_dcu_of_match);
+
+static int fsl_dcu_drm_probe(struct platform_device *pdev)
+{
+ struct fsl_dcu_drm_device *fsl_dev;
+ struct drm_device *drm;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *base;
+ struct drm_driver *driver = &fsl_dcu_drm_driver;
+ const struct of_device_id *id;
+ int ret;
+
+ fsl_dev = devm_kzalloc(dev, sizeof(*fsl_dev), GFP_KERNEL);
+ if (!fsl_dev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "could not get memory IO resource\n");
+ return -ENODEV;
+ }
+
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ return ret;
+ }
+
+ fsl_dev->irq = platform_get_irq(pdev, 0);
+ if (fsl_dev->irq < 0) {
+ dev_err(dev, "failed to get irq\n");
+ return -ENXIO;
+ }
+
+ fsl_dev->clk = devm_clk_get(dev, "dcu");
+ if (IS_ERR(fsl_dev->clk)) {
+ ret = PTR_ERR(fsl_dev->clk);
+ dev_err(dev, "failed to get dcu clock\n");
+ return ret;
+ }
+ ret = clk_prepare(fsl_dev->clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to prepare dcu clk\n");
+ return ret;
+ }
+ ret = clk_enable(fsl_dev->clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable dcu clk\n");
+ clk_unprepare(fsl_dev->clk);
+ return ret;
+ }
+
+ fsl_dev->regmap = devm_regmap_init_mmio(dev, base,
+ &fsl_dcu_regmap_config);
+ if (IS_ERR(fsl_dev->regmap)) {
+ dev_err(dev, "regmap init failed\n");
+ return PTR_ERR(fsl_dev->regmap);
+ }
+
+ id = of_match_node(fsl_dcu_of_match, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+ fsl_dev->soc = id->data;
+
+ drm = drm_dev_alloc(driver, dev);
+ if (!drm)
+ return -ENOMEM;
+
+ fsl_dev->dev = dev;
+ fsl_dev->drm = drm;
+ fsl_dev->np = dev->of_node;
+ drm->dev_private = fsl_dev;
+ dev_set_drvdata(dev, fsl_dev);
+ drm_dev_set_unique(drm, dev_name(dev));
+
+ ret = drm_dev_register(drm, 0);
+ if (ret < 0)
+ goto unref;
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
+ driver->major, driver->minor, driver->patchlevel,
+ driver->date, drm->primary->index);
+
+ return 0;
+
+unref:
+ drm_dev_unref(drm);
+ return ret;
+}
+
+static int fsl_dcu_drm_remove(struct platform_device *pdev)
+{
+ struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
+
+ drm_put_dev(fsl_dev->drm);
+
+ return 0;
+}
+
+static struct platform_driver fsl_dcu_drm_platform_driver = {
+ .probe = fsl_dcu_drm_probe,
+ .remove = fsl_dcu_drm_remove,
+ .driver = {
+ .name = "fsl-dcu",
+ .pm = &fsl_dcu_drm_pm_ops,
+ .of_match_table = fsl_dcu_of_match,
+ },
+};
+
+module_platform_driver(fsl_dcu_drm_platform_driver);
+
+MODULE_DESCRIPTION("Freescale DCU DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
new file mode 100644
index 000000000000..579b9e44e764
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __FSL_DCU_DRM_DRV_H__
+#define __FSL_DCU_DRM_DRV_H__
+
+#include "fsl_dcu_drm_crtc.h"
+#include "fsl_dcu_drm_output.h"
+#include "fsl_dcu_drm_plane.h"
+
+#define DCU_DCU_MODE 0x0010
+#define DCU_MODE_BLEND_ITER(x) ((x) << 20)
+#define DCU_MODE_RASTER_EN BIT(14)
+#define DCU_MODE_DCU_MODE(x) (x)
+#define DCU_MODE_DCU_MODE_MASK 0x03
+#define DCU_MODE_OFF 0
+#define DCU_MODE_NORMAL 1
+#define DCU_MODE_TEST 2
+#define DCU_MODE_COLORBAR 3
+
+#define DCU_BGND 0x0014
+#define DCU_BGND_R(x) ((x) << 16)
+#define DCU_BGND_G(x) ((x) << 8)
+#define DCU_BGND_B(x) (x)
+
+#define DCU_DISP_SIZE 0x0018
+#define DCU_DISP_SIZE_DELTA_Y(x) ((x) << 16)
+/*Regisiter value 1/16 of horizontal resolution*/
+#define DCU_DISP_SIZE_DELTA_X(x) ((x) >> 4)
+
+#define DCU_HSYN_PARA 0x001c
+#define DCU_HSYN_PARA_BP(x) ((x) << 22)
+#define DCU_HSYN_PARA_PW(x) ((x) << 11)
+#define DCU_HSYN_PARA_FP(x) (x)
+
+#define DCU_VSYN_PARA 0x0020
+#define DCU_VSYN_PARA_BP(x) ((x) << 22)
+#define DCU_VSYN_PARA_PW(x) ((x) << 11)
+#define DCU_VSYN_PARA_FP(x) (x)
+
+#define DCU_SYN_POL 0x0024
+#define DCU_SYN_POL_INV_PXCK_FALL (0 << 6)
+#define DCU_SYN_POL_NEG_REMAIN (0 << 5)
+#define DCU_SYN_POL_INV_VS_LOW BIT(1)
+#define DCU_SYN_POL_INV_HS_LOW BIT(0)
+
+#define DCU_THRESHOLD 0x0028
+#define DCU_THRESHOLD_LS_BF_VS(x) ((x) << 16)
+#define DCU_THRESHOLD_OUT_BUF_HIGH(x) ((x) << 8)
+#define DCU_THRESHOLD_OUT_BUF_LOW(x) (x)
+#define BF_VS_VAL 0x03
+#define BUF_MAX_VAL 0x78
+#define BUF_MIN_VAL 0x0a
+
+#define DCU_INT_STATUS 0x002C
+#define DCU_INT_STATUS_VSYNC BIT(0)
+#define DCU_INT_STATUS_UNDRUN BIT(1)
+#define DCU_INT_STATUS_LSBFVS BIT(2)
+#define DCU_INT_STATUS_VBLANK BIT(3)
+#define DCU_INT_STATUS_CRCREADY BIT(4)
+#define DCU_INT_STATUS_CRCOVERFLOW BIT(5)
+#define DCU_INT_STATUS_P1FIFOLO BIT(6)
+#define DCU_INT_STATUS_P1FIFOHI BIT(7)
+#define DCU_INT_STATUS_P2FIFOLO BIT(8)
+#define DCU_INT_STATUS_P2FIFOHI BIT(9)
+#define DCU_INT_STATUS_PROGEND BIT(10)
+#define DCU_INT_STATUS_IPMERROR BIT(11)
+#define DCU_INT_STATUS_LYRTRANS BIT(12)
+#define DCU_INT_STATUS_DMATRANS BIT(14)
+#define DCU_INT_STATUS_P3FIFOLO BIT(16)
+#define DCU_INT_STATUS_P3FIFOHI BIT(17)
+#define DCU_INT_STATUS_P4FIFOLO BIT(18)
+#define DCU_INT_STATUS_P4FIFOHI BIT(19)
+#define DCU_INT_STATUS_P1EMPTY BIT(26)
+#define DCU_INT_STATUS_P2EMPTY BIT(27)
+#define DCU_INT_STATUS_P3EMPTY BIT(28)
+#define DCU_INT_STATUS_P4EMPTY BIT(29)
+
+#define DCU_INT_MASK 0x0030
+#define DCU_INT_MASK_VSYNC BIT(0)
+#define DCU_INT_MASK_UNDRUN BIT(1)
+#define DCU_INT_MASK_LSBFVS BIT(2)
+#define DCU_INT_MASK_VBLANK BIT(3)
+#define DCU_INT_MASK_CRCREADY BIT(4)
+#define DCU_INT_MASK_CRCOVERFLOW BIT(5)
+#define DCU_INT_MASK_P1FIFOLO BIT(6)
+#define DCU_INT_MASK_P1FIFOHI BIT(7)
+#define DCU_INT_MASK_P2FIFOLO BIT(8)
+#define DCU_INT_MASK_P2FIFOHI BIT(9)
+#define DCU_INT_MASK_PROGEND BIT(10)
+#define DCU_INT_MASK_IPMERROR BIT(11)
+#define DCU_INT_MASK_LYRTRANS BIT(12)
+#define DCU_INT_MASK_DMATRANS BIT(14)
+#define DCU_INT_MASK_P3FIFOLO BIT(16)
+#define DCU_INT_MASK_P3FIFOHI BIT(17)
+#define DCU_INT_MASK_P4FIFOLO BIT(18)
+#define DCU_INT_MASK_P4FIFOHI BIT(19)
+#define DCU_INT_MASK_P1EMPTY BIT(26)
+#define DCU_INT_MASK_P2EMPTY BIT(27)
+#define DCU_INT_MASK_P3EMPTY BIT(28)
+#define DCU_INT_MASK_P4EMPTY BIT(29)
+
+#define DCU_DIV_RATIO 0x0054
+
+#define DCU_UPDATE_MODE 0x00cc
+#define DCU_UPDATE_MODE_MODE BIT(31)
+#define DCU_UPDATE_MODE_READREG BIT(30)
+
+#define DCU_DCFB_MAX 0x300
+
+#define DCU_CTRLDESCLN(layer, reg) (0x200 + (reg - 1) * 4 + (layer) * 0x40)
+
+#define DCU_LAYER_HEIGHT(x) ((x) << 16)
+#define DCU_LAYER_WIDTH(x) (x)
+
+#define DCU_LAYER_POSY(x) ((x) << 16)
+#define DCU_LAYER_POSX(x) (x)
+
+#define DCU_LAYER_EN BIT(31)
+#define DCU_LAYER_TILE_EN BIT(30)
+#define DCU_LAYER_DATA_SEL_CLUT BIT(29)
+#define DCU_LAYER_SAFETY_EN BIT(28)
+#define DCU_LAYER_TRANS(x) ((x) << 20)
+#define DCU_LAYER_BPP(x) ((x) << 16)
+#define DCU_LAYER_RLE_EN BIT(15)
+#define DCU_LAYER_LUOFFS(x) ((x) << 4)
+#define DCU_LAYER_BB_ON BIT(2)
+#define DCU_LAYER_AB(x) (x)
+
+#define DCU_LAYER_CKMAX_R(x) ((x) << 16)
+#define DCU_LAYER_CKMAX_G(x) ((x) << 8)
+#define DCU_LAYER_CKMAX_B(x) (x)
+
+#define DCU_LAYER_CKMIN_R(x) ((x) << 16)
+#define DCU_LAYER_CKMIN_G(x) ((x) << 8)
+#define DCU_LAYER_CKMIN_B(x) (x)
+
+#define DCU_LAYER_TILE_VER(x) ((x) << 16)
+#define DCU_LAYER_TILE_HOR(x) (x)
+
+#define DCU_LAYER_FG_FCOLOR(x) (x)
+
+#define DCU_LAYER_BG_BCOLOR(x) (x)
+
+#define DCU_LAYER_POST_SKIP(x) ((x) << 16)
+#define DCU_LAYER_PRE_SKIP(x) (x)
+
+#define FSL_DCU_RGB565 4
+#define FSL_DCU_RGB888 5
+#define FSL_DCU_ARGB8888 6
+#define FSL_DCU_ARGB1555 11
+#define FSL_DCU_ARGB4444 12
+#define FSL_DCU_YUV422 14
+
+#define VF610_LAYER_REG_NUM 9
+#define LS1021A_LAYER_REG_NUM 10
+
+struct clk;
+struct device;
+struct drm_device;
+
+struct fsl_dcu_soc_data {
+ const char *name;
+ /*total layer number*/
+ unsigned int total_layer;
+ /*max layer number DCU supported*/
+ unsigned int max_layer;
+};
+
+struct fsl_dcu_drm_device {
+ struct device *dev;
+ struct device_node *np;
+ struct regmap *regmap;
+ int irq;
+ struct clk *clk;
+ /*protects hardware register*/
+ spinlock_t irq_lock;
+ struct drm_device *drm;
+ struct drm_fbdev_cma *fbdev;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct fsl_dcu_drm_connector connector;
+ const struct fsl_dcu_soc_data *soc;
+};
+
+void fsl_dcu_fbdev_init(struct drm_device *dev);
+int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev);
+
+#endif /* __FSL_DCU_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c
new file mode 100644
index 000000000000..8b8b819ea704
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "fsl_dcu_drm_drv.h"
+
+/* initialize fbdev helper */
+void fsl_dcu_fbdev_init(struct drm_device *dev)
+{
+ struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev->dev);
+
+ fsl_dev->fbdev = drm_fbdev_cma_init(dev, 24, 1, 1);
+}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
new file mode 100644
index 000000000000..0ef5959710e7
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "fsl_dcu_drm_crtc.h"
+#include "fsl_dcu_drm_drv.h"
+
+static const struct drm_mode_config_funcs fsl_dcu_drm_mode_config_funcs = {
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+ .fb_create = drm_fb_cma_create,
+};
+
+int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev)
+{
+ drm_mode_config_init(fsl_dev->drm);
+
+ fsl_dev->drm->mode_config.min_width = 0;
+ fsl_dev->drm->mode_config.min_height = 0;
+ fsl_dev->drm->mode_config.max_width = 2031;
+ fsl_dev->drm->mode_config.max_height = 2047;
+ fsl_dev->drm->mode_config.funcs = &fsl_dcu_drm_mode_config_funcs;
+
+ drm_kms_helper_poll_init(fsl_dev->drm);
+ fsl_dcu_drm_crtc_create(fsl_dev);
+ fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc);
+ fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder);
+ drm_mode_config_reset(fsl_dev->drm);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h
new file mode 100644
index 000000000000..7093109fbc21
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __FSL_DCU_DRM_CONNECTOR_H__
+#define __FSL_DCU_DRM_CONNECTOR_H__
+
+struct fsl_dcu_drm_connector {
+ struct drm_connector base;
+ struct drm_encoder *encoder;
+ struct drm_panel *panel;
+};
+
+static inline struct fsl_dcu_drm_connector *
+to_fsl_dcu_connector(struct drm_connector *con)
+{
+ return con ? container_of(con, struct fsl_dcu_drm_connector, base)
+ : NULL;
+}
+
+int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
+ struct drm_encoder *encoder);
+int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
+ struct drm_crtc *crtc);
+
+#endif /* __FSL_DCU_DRM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
new file mode 100644
index 000000000000..82be6b86a168
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/regmap.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "fsl_dcu_drm_drv.h"
+#include "fsl_dcu_drm_plane.h"
+
+static int fsl_dcu_drm_plane_index(struct drm_plane *plane)
+{
+ struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
+ unsigned int total_layer = fsl_dev->soc->total_layer;
+ unsigned int index;
+
+ index = drm_plane_index(plane);
+ if (index < total_layer)
+ return total_layer - index - 1;
+
+ dev_err(fsl_dev->dev, "No more layer left\n");
+ return -EINVAL;
+}
+
+static int fsl_dcu_drm_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_BGRA4444:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_YUV422:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
+ unsigned int index, value, ret;
+
+ index = fsl_dcu_drm_plane_index(plane);
+ if (index < 0)
+ return;
+
+ ret = regmap_read(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), &value);
+ if (ret)
+ dev_err(fsl_dev->dev, "read DCU_INT_MASK failed\n");
+ value &= ~DCU_LAYER_EN;
+ ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), value);
+ if (ret)
+ dev_err(fsl_dev->dev, "set DCU register failed\n");
+}
+
+static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+
+{
+ struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = plane->state->fb;
+ struct drm_gem_cma_object *gem;
+ unsigned int alpha, bpp;
+ int index, ret;
+
+ if (!fb)
+ return;
+
+ index = fsl_dcu_drm_plane_index(plane);
+ if (index < 0)
+ return;
+
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_RGB565:
+ bpp = FSL_DCU_RGB565;
+ alpha = 0xff;
+ break;
+ case DRM_FORMAT_RGB888:
+ bpp = FSL_DCU_RGB888;
+ alpha = 0xff;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ bpp = FSL_DCU_ARGB8888;
+ alpha = 0xff;
+ break;
+ case DRM_FORMAT_BGRA4444:
+ bpp = FSL_DCU_ARGB4444;
+ alpha = 0xff;
+ break;
+ case DRM_FORMAT_ARGB1555:
+ bpp = FSL_DCU_ARGB1555;
+ alpha = 0xff;
+ break;
+ case DRM_FORMAT_YUV422:
+ bpp = FSL_DCU_YUV422;
+ alpha = 0xff;
+ break;
+ default:
+ return;
+ }
+
+ ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 1),
+ DCU_LAYER_HEIGHT(state->crtc_h) |
+ DCU_LAYER_WIDTH(state->crtc_w));
+ if (ret)
+ goto set_failed;
+ ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 2),
+ DCU_LAYER_POSY(state->crtc_y) |
+ DCU_LAYER_POSX(state->crtc_x));
+ if (ret)
+ goto set_failed;
+ ret = regmap_write(fsl_dev->regmap,
+ DCU_CTRLDESCLN(index, 3), gem->paddr);
+ if (ret)
+ goto set_failed;
+ ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4),
+ DCU_LAYER_EN |
+ DCU_LAYER_TRANS(alpha) |
+ DCU_LAYER_BPP(bpp) |
+ DCU_LAYER_AB(0));
+ if (ret)
+ goto set_failed;
+ ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 5),
+ DCU_LAYER_CKMAX_R(0xFF) |
+ DCU_LAYER_CKMAX_G(0xFF) |
+ DCU_LAYER_CKMAX_B(0xFF));
+ if (ret)
+ goto set_failed;
+ ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 6),
+ DCU_LAYER_CKMIN_R(0) |
+ DCU_LAYER_CKMIN_G(0) |
+ DCU_LAYER_CKMIN_B(0));
+ if (ret)
+ goto set_failed;
+ ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 7), 0);
+ if (ret)
+ goto set_failed;
+ ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 8),
+ DCU_LAYER_FG_FCOLOR(0));
+ if (ret)
+ goto set_failed;
+ ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 9),
+ DCU_LAYER_BG_BCOLOR(0));
+ if (ret)
+ goto set_failed;
+ if (!strcmp(fsl_dev->soc->name, "ls1021a")) {
+ ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 10),
+ DCU_LAYER_POST_SKIP(0) |
+ DCU_LAYER_PRE_SKIP(0));
+ if (ret)
+ goto set_failed;
+ }
+ ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+ DCU_MODE_DCU_MODE_MASK,
+ DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
+ if (ret)
+ goto set_failed;
+ ret = regmap_write(fsl_dev->regmap,
+ DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
+ if (ret)
+ goto set_failed;
+ return;
+
+set_failed:
+ dev_err(fsl_dev->dev, "set DCU register failed\n");
+}
+
+static void
+fsl_dcu_drm_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state)
+{
+}
+
+static int
+fsl_dcu_drm_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state)
+{
+ return 0;
+}
+
+static const struct drm_plane_helper_funcs fsl_dcu_drm_plane_helper_funcs = {
+ .atomic_check = fsl_dcu_drm_plane_atomic_check,
+ .atomic_disable = fsl_dcu_drm_plane_atomic_disable,
+ .atomic_update = fsl_dcu_drm_plane_atomic_update,
+ .cleanup_fb = fsl_dcu_drm_plane_cleanup_fb,
+ .prepare_fb = fsl_dcu_drm_plane_prepare_fb,
+};
+
+static void fsl_dcu_drm_plane_destroy(struct drm_plane *plane)
+{
+ drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs fsl_dcu_drm_plane_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .destroy = fsl_dcu_drm_plane_destroy,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .reset = drm_atomic_helper_plane_reset,
+ .update_plane = drm_atomic_helper_update_plane,
+};
+
+static const u32 fsl_dcu_drm_plane_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_YUV422,
+};
+
+struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
+{
+ struct drm_plane *primary;
+ int ret;
+
+ primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+ if (!primary) {
+ DRM_DEBUG_KMS("Failed to allocate primary plane\n");
+ return NULL;
+ }
+
+ /* possible_crtc's will be filled in later by crtc_init */
+ ret = drm_universal_plane_init(dev, primary, 0,
+ &fsl_dcu_drm_plane_funcs,
+ fsl_dcu_drm_plane_formats,
+ ARRAY_SIZE(fsl_dcu_drm_plane_formats),
+ DRM_PLANE_TYPE_PRIMARY);
+ if (ret) {
+ kfree(primary);
+ primary = NULL;
+ }
+ drm_plane_helper_add(primary, &fsl_dcu_drm_plane_helper_funcs);
+
+ return primary;
+}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
new file mode 100644
index 000000000000..d657f088d859
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __FSL_DCU_DRM_PLANE_H__
+#define __FSL_DCU_DRM_PLANE_H__
+
+struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev);
+
+#endif /* __FSL_DCU_DRM_PLANE_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
new file mode 100644
index 000000000000..fe8ab5da04fb
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/backlight.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+
+#include "fsl_dcu_drm_drv.h"
+
+static int
+fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ return 0;
+}
+
+static void fsl_dcu_drm_encoder_disable(struct drm_encoder *encoder)
+{
+}
+
+static void fsl_dcu_drm_encoder_enable(struct drm_encoder *encoder)
+{
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+ .atomic_check = fsl_dcu_drm_encoder_atomic_check,
+ .disable = fsl_dcu_drm_encoder_disable,
+ .enable = fsl_dcu_drm_encoder_enable,
+};
+
+static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs encoder_funcs = {
+ .destroy = fsl_dcu_drm_encoder_destroy,
+};
+
+int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder = &fsl_dev->encoder;
+ int ret;
+
+ encoder->possible_crtcs = 1;
+ ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
+ DRM_MODE_ENCODER_LVDS);
+ if (ret < 0)
+ return ret;
+
+ drm_encoder_helper_add(encoder, &encoder_helper_funcs);
+
+ return 0;
+}
+
+static void fsl_dcu_drm_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static enum drm_connector_status
+fsl_dcu_drm_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .destroy = fsl_dcu_drm_connector_destroy,
+ .detect = fsl_dcu_drm_connector_detect,
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .reset = drm_atomic_helper_connector_reset,
+};
+
+static struct drm_encoder *
+fsl_dcu_drm_connector_best_encoder(struct drm_connector *connector)
+{
+ struct fsl_dcu_drm_connector *fsl_con = to_fsl_dcu_connector(connector);
+
+ return fsl_con->encoder;
+}
+
+static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
+{
+ struct fsl_dcu_drm_connector *fsl_connector;
+ int (*get_modes)(struct drm_panel *panel);
+ int num_modes = 0;
+
+ fsl_connector = to_fsl_dcu_connector(connector);
+ if (fsl_connector->panel && fsl_connector->panel->funcs &&
+ fsl_connector->panel->funcs->get_modes) {
+ get_modes = fsl_connector->panel->funcs->get_modes;
+ num_modes = get_modes(fsl_connector->panel);
+ }
+
+ return num_modes;
+}
+
+static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ if (mode->hdisplay & 0xf)
+ return MODE_ERROR;
+
+ return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+ .best_encoder = fsl_dcu_drm_connector_best_encoder,
+ .get_modes = fsl_dcu_drm_connector_get_modes,
+ .mode_valid = fsl_dcu_drm_connector_mode_valid,
+};
+
+int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
+ struct drm_encoder *encoder)
+{
+ struct drm_connector *connector = &fsl_dev->connector.base;
+ struct drm_mode_config mode_config = fsl_dev->drm->mode_config;
+ struct device_node *panel_node;
+ int ret;
+
+ fsl_dev->connector.encoder = encoder;
+
+ ret = drm_connector_init(fsl_dev->drm, connector,
+ &fsl_dcu_drm_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ if (ret < 0)
+ return ret;
+
+ drm_connector_helper_add(connector, &connector_helper_funcs);
+ ret = drm_connector_register(connector);
+ if (ret < 0)
+ goto err_cleanup;
+
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
+ if (ret < 0)
+ goto err_sysfs;
+
+ drm_object_property_set_value(&connector->base,
+ mode_config.dpms_property,
+ DRM_MODE_DPMS_OFF);
+
+ panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0);
+ if (panel_node) {
+ fsl_dev->connector.panel = of_drm_find_panel(panel_node);
+ if (!fsl_dev->connector.panel) {
+ ret = -EPROBE_DEFER;
+ goto err_sysfs;
+ }
+ of_node_put(panel_node);
+ }
+
+ ret = drm_panel_attach(fsl_dev->connector.panel, connector);
+ if (ret) {
+ dev_err(fsl_dev->dev, "failed to attach panel\n");
+ goto err_sysfs;
+ }
+
+ return 0;
+
+err_sysfs:
+ drm_connector_unregister(connector);
+err_cleanup:
+ drm_connector_cleanup(connector);
+ return ret;
+}
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index de6f62a6ceb7..db9f7d011832 100644
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -276,12 +276,12 @@ static void psbfb_copyarea_accel(struct fb_info *info,
break;
default:
/* software fallback */
- cfb_copyarea(info, a);
+ drm_fb_helper_cfb_copyarea(info, a);
return;
}
if (!gma_power_begin(dev, false)) {
- cfb_copyarea(info, a);
+ drm_fb_helper_cfb_copyarea(info, a);
return;
}
psb_accel_2d_copy(dev_priv,
@@ -308,7 +308,7 @@ void psbfb_copyarea(struct fb_info *info,
/* Avoid the 8 pixel erratum */
if (region->width == 8 || region->height == 8 ||
(info->flags & FBINFO_HWACCEL_DISABLED))
- return cfb_copyarea(info, region);
+ return drm_fb_helper_cfb_copyarea(info, region);
psbfb_copyarea_accel(info, region);
}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 2d42ce6d3757..2eaf1b31c7bd 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -194,9 +194,9 @@ static struct fb_ops psbfb_ops = {
.fb_set_par = drm_fb_helper_set_par,
.fb_blank = drm_fb_helper_blank,
.fb_setcolreg = psbfb_setcolreg,
- .fb_fillrect = cfb_fillrect,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = psbfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_mmap = psbfb_mmap,
.fb_sync = psbfb_sync,
.fb_ioctl = psbfb_ioctl,
@@ -208,9 +208,9 @@ static struct fb_ops psbfb_roll_ops = {
.fb_set_par = drm_fb_helper_set_par,
.fb_blank = drm_fb_helper_blank,
.fb_setcolreg = psbfb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = psbfb_pan,
.fb_mmap = psbfb_mmap,
.fb_ioctl = psbfb_ioctl,
@@ -222,9 +222,9 @@ static struct fb_ops psbfb_unaccel_ops = {
.fb_set_par = drm_fb_helper_set_par,
.fb_blank = drm_fb_helper_blank,
.fb_setcolreg = psbfb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_mmap = psbfb_mmap,
.fb_ioctl = psbfb_ioctl,
};
@@ -343,7 +343,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
struct drm_framebuffer *fb;
struct psb_framebuffer *psbfb = &fbdev->pfb;
struct drm_mode_fb_cmd2 mode_cmd;
- struct device *device = &dev->pdev->dev;
int size;
int ret;
struct gtt_range *backing;
@@ -409,9 +408,9 @@ static int psbfb_create(struct psb_fbdev *fbdev,
mutex_lock(&dev->struct_mutex);
- info = framebuffer_alloc(0, device);
- if (!info) {
- ret = -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto out_err1;
}
info->par = fbdev;
@@ -426,7 +425,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
psbfb->fbdev = info;
fbdev->psb_fb_helper.fb = fb;
- fbdev->psb_fb_helper.fbdev = info;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
strcpy(info->fix.id, "psbdrmfb");
@@ -440,12 +438,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
} else /* Software */
info->fbops = &psbfb_unaccel_ops;
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto out_unref;
- }
-
info->fix.smem_start = dev->mode_config.fb_base;
info->fix.smem_len = size;
info->fix.ywrapstep = gtt_roll;
@@ -456,11 +448,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info->screen_size = size;
if (dev_priv->gtt.stolen_size) {
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto out_unref;
- }
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
}
@@ -483,6 +470,8 @@ out_unref:
psb_gtt_free_range(dev, backing);
else
drm_gem_object_unreference(&backing->gem);
+
+ drm_fb_helper_release_fbi(&fbdev->psb_fb_helper);
out_err1:
mutex_unlock(&dev->struct_mutex);
psb_gtt_free_range(dev, backing);
@@ -570,16 +559,11 @@ static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
{
- struct fb_info *info;
struct psb_framebuffer *psbfb = &fbdev->pfb;
- if (fbdev->psb_fb_helper.fbdev) {
- info = fbdev->psb_fb_helper.fbdev;
- unregister_framebuffer(info);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&fbdev->psb_fb_helper);
+ drm_fb_helper_release_fbi(&fbdev->psb_fb_helper);
+
drm_fb_helper_fini(&fbdev->psb_fb_helper);
drm_framebuffer_unregister_private(&psbfb->base);
drm_framebuffer_cleanup(&psbfb->base);
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index 2aaa3c88999e..00416f23b5cb 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -54,7 +54,7 @@ static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
}
/* ADI recommended values for proper operation. */
-static const struct reg_default adv7511_fixed_registers[] = {
+static const struct reg_sequence adv7511_fixed_registers[] = {
{ 0x98, 0x03 },
{ 0x9a, 0xe0 },
{ 0x9c, 0x30 },
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index fe1599d75f14..424228be79ae 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -606,8 +606,6 @@ static void
tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr,
uint8_t *buf, size_t size)
{
- buf[PB(0)] = tda998x_cksum(buf, size);
-
reg_clear(priv, REG_DIP_IF_FLAGS, bit);
reg_write_range(priv, addr, buf, size);
reg_set(priv, REG_DIP_IF_FLAGS, bit);
@@ -627,6 +625,8 @@ tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p)
buf[PB(4)] = p->audio_frame[4];
buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
+ buf[PB(0)] = tda998x_cksum(buf, sizeof(buf));
+
tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
sizeof(buf));
}
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 74acca9bcd9d..051eab33e4c7 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -36,30 +36,6 @@ config DRM_I915
i810 driver instead, and the Atom z5xx series has an entirely
different implementation.
-config DRM_I915_KMS
- bool "Enable modesetting on intel by default"
- depends on DRM_I915
- default y
- help
- Choose this option if you want kernel modesetting enabled by default.
-
- If in doubt, say "Y".
-
-config DRM_I915_FBDEV
- bool "Enable legacy fbdev support for the modesetting intel driver"
- depends on DRM_I915
- select DRM_KMS_FB_HELPER
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- default y
- help
- Choose this option if you have a need for the legacy fbdev
- support. Note that this support also provide the linux console
- support on top of the intel modesetting driver.
-
- If in doubt, say "Y".
-
config DRM_I915_PRELIMINARY_HW_SUPPORT
bool "Enable preliminary support for prerelease Intel hardware by default"
depends on DRM_I915
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b7ddf48e1d75..998b4643109f 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -6,12 +6,13 @@
# core driver code
i915-y := i915_drv.o \
+ i915_irq.o \
i915_params.o \
i915_suspend.o \
i915_sysfs.o \
+ intel_csr.o \
intel_pm.o \
- intel_runtime_pm.o \
- intel_csr.o
+ intel_runtime_pm.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
@@ -20,21 +21,22 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
i915-y += i915_cmd_parser.o \
i915_gem_batch_pool.o \
i915_gem_context.o \
- i915_gem_render_state.o \
i915_gem_debug.o \
i915_gem_dmabuf.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
+ i915_gem_fence.o \
i915_gem_gtt.o \
i915_gem.o \
+ i915_gem_render_state.o \
i915_gem_shrinker.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
i915_gem_userptr.o \
i915_gpu_error.o \
- i915_irq.o \
i915_trace_points.o \
intel_lrc.o \
+ intel_mocs.o \
intel_ringbuffer.o \
intel_uncore.o
@@ -46,18 +48,21 @@ i915-y += intel_renderstate_gen6.o \
# modesetting core code
i915-y += intel_audio.o \
+ intel_atomic.o \
+ intel_atomic_plane.o \
intel_bios.o \
intel_display.o \
intel_fbc.o \
intel_fifo_underrun.o \
intel_frontbuffer.o \
+ intel_hotplug.o \
intel_modes.o \
intel_overlay.o \
intel_psr.o \
intel_sideband.o \
intel_sprite.o
i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
-i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
+i915-$(CONFIG_DRM_FBDEV_EMULATION) += intel_fbdev.o
# modesetting output/encoder code
i915-y += dvo_ch7017.o \
@@ -66,15 +71,13 @@ i915-y += dvo_ch7017.o \
dvo_ns2501.o \
dvo_sil164.o \
dvo_tfp410.o \
- intel_atomic.o \
- intel_atomic_plane.o \
intel_crt.o \
intel_ddi.o \
- intel_dp.o \
intel_dp_mst.o \
+ intel_dp.o \
intel_dsi.o \
- intel_dsi_pll.o \
intel_dsi_panel_vbt.o \
+ intel_dsi_pll.o \
intel_dvo.o \
intel_hdmi.o \
intel_i2c.o \
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 89b08a896d20..732ce8785945 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -22,6 +22,7 @@
*
* Authors:
* Eric Anholt <eric@anholt.net>
+ * Thomas Richter <thor@math.tu-berlin.de>
*
* Minor modifications (Dithering enable):
* Thomas Richter <thor@math.tu-berlin.de>
@@ -90,7 +91,7 @@
/*
* LCD Vertical Display Size
*/
-#define VR21 0x20
+#define VR21 0x21
/*
* Panel power down status
@@ -155,16 +156,33 @@
# define VR8F_POWER_MASK (0x3c)
# define VR8F_POWER_POS (2)
+/* Some Bios implementations do not restore the DVO state upon
+ * resume from standby. Thus, this driver has to handle it
+ * instead. The following list contains all registers that
+ * require saving.
+ */
+static const uint16_t backup_addresses[] = {
+ 0x11, 0x12,
+ 0x18, 0x19, 0x1a, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x8e, 0x8f,
+ 0x10 /* this must come last */
+};
+
struct ivch_priv {
bool quiet;
uint16_t width, height;
+
+ /* Register backup */
+
+ uint16_t reg_backup[ARRAY_SIZE(backup_addresses)];
};
static void ivch_dump_regs(struct intel_dvo_device *dvo);
-
/**
* Reads a register on the ivch.
*
@@ -246,6 +264,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
{
struct ivch_priv *priv;
uint16_t temp;
+ int i;
priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
if (priv == NULL)
@@ -273,6 +292,14 @@ static bool ivch_init(struct intel_dvo_device *dvo,
ivch_read(dvo, VR20, &priv->width);
ivch_read(dvo, VR21, &priv->height);
+ /* Make a backup of the registers to be able to restore them
+ * upon suspend.
+ */
+ for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
+ ivch_read(dvo, backup_addresses[i], priv->reg_backup + i);
+
+ ivch_dump_regs(dvo);
+
return true;
out:
@@ -294,12 +321,31 @@ static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
return MODE_OK;
}
+/* Restore the DVO registers after a resume
+ * from RAM. Registers have been saved during
+ * the initialization.
+ */
+static void ivch_reset(struct intel_dvo_device *dvo)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+ int i;
+
+ DRM_DEBUG_KMS("Resetting the IVCH registers\n");
+
+ ivch_write(dvo, VR10, 0x0000);
+
+ for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
+ ivch_write(dvo, backup_addresses[i], priv->reg_backup[i]);
+}
+
/** Sets the power state of the panel connected to the ivch */
static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
{
int i;
uint16_t vr01, vr30, backlight;
+ ivch_reset(dvo);
+
/* Set the new power state of the panel. */
if (!ivch_read(dvo, VR01, &vr01))
return;
@@ -308,6 +354,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
backlight = 1;
else
backlight = 0;
+
ivch_write(dvo, VR80, backlight);
if (enable)
@@ -334,6 +381,8 @@ static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
{
uint16_t vr01;
+ ivch_reset(dvo);
+
/* Set the new power state of the panel. */
if (!ivch_read(dvo, VR01, &vr01))
return false;
@@ -348,11 +397,15 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct ivch_priv *priv = dvo->dev_priv;
uint16_t vr40 = 0;
uint16_t vr01 = 0;
uint16_t vr10;
- ivch_read(dvo, VR10, &vr10);
+ ivch_reset(dvo);
+
+ vr10 = priv->reg_backup[ARRAY_SIZE(backup_addresses) - 1];
+
/* Enable dithering for 18 bpp pipelines */
vr10 &= VR10_INTERFACE_DEPTH_MASK;
if (vr10 == VR10_INTERFACE_2X18 || vr10 == VR10_INTERFACE_1X18)
@@ -366,7 +419,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
uint16_t x_ratio, y_ratio;
vr01 |= VR01_PANEL_FIT_ENABLE;
- vr40 |= VR40_CLOCK_GATING_ENABLE | VR40_ENHANCED_PANEL_FITTING;
+ vr40 |= VR40_CLOCK_GATING_ENABLE;
x_ratio = (((mode->hdisplay - 1) << 16) /
(adjusted_mode->hdisplay - 1)) >> 2;
y_ratio = (((mode->vdisplay - 1) << 16) /
@@ -381,8 +434,6 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
ivch_write(dvo, VR01, vr01);
ivch_write(dvo, VR40, vr40);
-
- ivch_dump_regs(dvo);
}
static void ivch_dump_regs(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 306d9e4e5cf3..237ff6884a22 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -131,7 +131,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
- CMD( MI_LOAD_REGISTER_MEM, SMI, !F, 0xFF, W | B,
+ CMD( MI_LOAD_REGISTER_MEM(1), SMI, !F, 0xFF, W | B,
.reg = { .offset = 1, .mask = 0x007FFFFC },
.bits = {{
.offset = 0,
@@ -151,8 +151,8 @@ static const struct drm_i915_cmd_descriptor render_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_PREDICATE, SMI, F, 1, S ),
CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ),
- CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
+ CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ),
CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B,
@@ -564,7 +564,7 @@ static bool validate_cmds_sorted(struct intel_engine_cs *ring,
for (j = 0; j < table->count; j++) {
const struct drm_i915_cmd_descriptor *desc =
- &table->table[i];
+ &table->table[j];
u32 curr = desc->cmd.value & desc->cmd.mask;
if (curr < previous) {
@@ -1021,7 +1021,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* only MI_LOAD_REGISTER_IMM commands.
*/
if (reg_addr == OACONTROL) {
- if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+ if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) {
DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
return false;
}
@@ -1035,7 +1035,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* allowed mask/value pair given in the whitelist entry.
*/
if (reg->mask) {
- if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+ if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) {
DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
reg_addr);
return false;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 82bbe3f2a7e1..e3ec9049081f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -117,6 +117,20 @@ static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
}
+static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
+{
+ u64 size = 0;
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ drm_mm_node_allocated(&vma->node))
+ size += vma->node.size;
+ }
+
+ return size;
+}
+
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
@@ -156,13 +170,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
list_for_each_entry(vma, &obj->vma_list, vma_link) {
- if (!i915_is_ggtt(vma->vm))
- seq_puts(m, " (pp");
+ seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
+ i915_is_ggtt(vma->vm) ? "g" : "pp",
+ vma->node.start, vma->node.size);
+ if (i915_is_ggtt(vma->vm))
+ seq_printf(m, ", type: %u)", vma->ggtt_view.type);
else
- seq_puts(m, " (g");
- seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)",
- vma->node.start, vma->node.size,
- vma->ggtt_view.type);
+ seq_puts(m, ")");
}
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
@@ -198,7 +212,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct i915_vma *vma;
- size_t total_obj_size, total_gtt_size;
+ u64 total_obj_size, total_gtt_size;
int count, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -231,7 +245,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
}
mutex_unlock(&dev->struct_mutex);
- seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
count, total_obj_size, total_gtt_size);
return 0;
}
@@ -253,7 +267,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
- size_t total_obj_size, total_gtt_size;
+ u64 total_obj_size, total_gtt_size;
LIST_HEAD(stolen);
int count, ret;
@@ -269,7 +283,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
list_add(&obj->obj_exec_link, &stolen);
total_obj_size += obj->base.size;
- total_gtt_size += i915_gem_obj_ggtt_size(obj);
+ total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
count++;
}
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
@@ -292,14 +306,14 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
}
mutex_unlock(&dev->struct_mutex);
- seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
count, total_obj_size, total_gtt_size);
return 0;
}
#define count_objects(list, member) do { \
list_for_each_entry(obj, list, member) { \
- size += i915_gem_obj_ggtt_size(obj); \
+ size += i915_gem_obj_total_ggtt_size(obj); \
++count; \
if (obj->map_and_fenceable) { \
mappable_size += i915_gem_obj_ggtt_size(obj); \
@@ -310,10 +324,10 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
struct file_stats {
struct drm_i915_file_private *file_priv;
- int count;
- size_t total, unbound;
- size_t global, shared;
- size_t active, inactive;
+ unsigned long count;
+ u64 total, unbound;
+ u64 global, shared;
+ u64 active, inactive;
};
static int per_file_stats(int id, void *ptr, void *data)
@@ -370,7 +384,7 @@ static int per_file_stats(int id, void *ptr, void *data)
#define print_file_stats(m, name, stats) do { \
if (stats.count) \
- seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \
+ seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
name, \
stats.count, \
stats.total, \
@@ -405,7 +419,7 @@ static void print_batch_pool_stats(struct seq_file *m,
#define count_vmas(list, member) do { \
list_for_each_entry(vma, list, member) { \
- size += i915_gem_obj_ggtt_size(vma->obj); \
+ size += i915_gem_obj_total_ggtt_size(vma->obj); \
++count; \
if (vma->obj->map_and_fenceable) { \
mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
@@ -420,7 +434,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 count, mappable_count, purgeable_count;
- size_t size, mappable_size, purgeable_size;
+ u64 size, mappable_size, purgeable_size;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_file *file;
@@ -437,17 +451,17 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
size = count = mappable_size = mappable_count = 0;
count_objects(&dev_priv->mm.bound_list, global_list);
- seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
+ seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_vmas(&vm->active_list, mm_list);
- seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
+ seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_vmas(&vm->inactive_list, mm_list);
- seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
+ seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = purgeable_size = purgeable_count = 0;
@@ -456,7 +470,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
if (obj->madv == I915_MADV_DONTNEED)
purgeable_size += obj->base.size, ++purgeable_count;
}
- seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
+ seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
size = count = mappable_size = mappable_count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
@@ -473,16 +487,16 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
++purgeable_count;
}
}
- seq_printf(m, "%u purgeable objects, %zu bytes\n",
+ seq_printf(m, "%u purgeable objects, %llu bytes\n",
purgeable_count, purgeable_size);
- seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
+ seq_printf(m, "%u pinned mappable objects, %llu bytes\n",
mappable_count, mappable_size);
- seq_printf(m, "%u fault mappable objects, %zu bytes\n",
+ seq_printf(m, "%u fault mappable objects, %llu bytes\n",
count, size);
- seq_printf(m, "%zu [%lu] gtt total\n",
+ seq_printf(m, "%llu [%llu] gtt total\n",
dev_priv->gtt.base.total,
- dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
+ (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv);
@@ -519,7 +533,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
uintptr_t list = (uintptr_t) node->info_ent->data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
- size_t total_obj_size, total_gtt_size;
+ u64 total_obj_size, total_gtt_size;
int count, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -535,13 +549,13 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
describe_obj(m, obj);
seq_putc(m, '\n');
total_obj_size += obj->base.size;
- total_gtt_size += i915_gem_obj_ggtt_size(obj);
+ total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
count++;
}
mutex_unlock(&dev->struct_mutex);
- seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
count, total_obj_size, total_gtt_size);
return 0;
@@ -1132,9 +1146,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
} else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
IS_BROADWELL(dev) || IS_GEN9(dev)) {
- u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
- u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
- u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ u32 rp_state_limits;
+ u32 gt_perf_status;
+ u32 rp_state_cap;
u32 rpmodectl, rpinclimit, rpdeclimit;
u32 rpstat, cagf, reqf;
u32 rpupei, rpcurup, rpprevup;
@@ -1142,6 +1156,15 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
int max_freq;
+ rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+ if (IS_BROXTON(dev)) {
+ rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
+ gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
+ } else {
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ }
+
/* RPSTAT1 is in the GT power well */
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
@@ -1229,7 +1252,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Down threshold: %d%%\n",
dev_priv->rps.down_threshold);
- max_freq = (rp_state_cap & 0xff0000) >> 16;
+ max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
+ rp_state_cap >> 16) & 0xff;
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
@@ -1239,7 +1263,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
- max_freq = rp_state_cap & 0xff;
+ max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
+ rp_state_cap >> 0) & 0xff;
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
@@ -1581,6 +1606,21 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
return ironlake_drpc_info(m);
}
+static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ seq_printf(m, "FB tracking busy bits: 0x%08x\n",
+ dev_priv->fb_tracking.busy_bits);
+
+ seq_printf(m, "FB tracking flip bits: 0x%08x\n",
+ dev_priv->fb_tracking.flip_bits);
+
+ return 0;
+}
+
static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
@@ -1593,51 +1633,20 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
}
intel_runtime_pm_get(dev_priv);
+ mutex_lock(&dev_priv->fbc.lock);
- if (intel_fbc_enabled(dev)) {
+ if (intel_fbc_enabled(dev_priv))
seq_puts(m, "FBC enabled\n");
- } else {
- seq_puts(m, "FBC disabled: ");
- switch (dev_priv->fbc.no_fbc_reason) {
- case FBC_OK:
- seq_puts(m, "FBC actived, but currently disabled in hardware");
- break;
- case FBC_UNSUPPORTED:
- seq_puts(m, "unsupported by this chipset");
- break;
- case FBC_NO_OUTPUT:
- seq_puts(m, "no outputs");
- break;
- case FBC_STOLEN_TOO_SMALL:
- seq_puts(m, "not enough stolen memory");
- break;
- case FBC_UNSUPPORTED_MODE:
- seq_puts(m, "mode not supported");
- break;
- case FBC_MODE_TOO_LARGE:
- seq_puts(m, "mode too large");
- break;
- case FBC_BAD_PLANE:
- seq_puts(m, "FBC unsupported on plane");
- break;
- case FBC_NOT_TILED:
- seq_puts(m, "scanout buffer not tiled");
- break;
- case FBC_MULTIPLE_PIPES:
- seq_puts(m, "multiple pipes are enabled");
- break;
- case FBC_MODULE_PARAM:
- seq_puts(m, "disabled per module param (default off)");
- break;
- case FBC_CHIP_DEFAULT:
- seq_puts(m, "disabled per chip default");
- break;
- default:
- seq_puts(m, "unknown reason");
- }
- seq_putc(m, '\n');
- }
+ else
+ seq_printf(m, "FBC disabled: %s\n",
+ intel_no_fbc_reason_str(dev_priv->fbc.no_fbc_reason));
+
+ if (INTEL_INFO(dev_priv)->gen >= 7)
+ seq_printf(m, "Compressing: %s\n",
+ yesno(I915_READ(FBC_STATUS2) &
+ FBC_COMPRESSION_MASK));
+ mutex_unlock(&dev_priv->fbc.lock);
intel_runtime_pm_put(dev_priv);
return 0;
@@ -1651,9 +1660,7 @@ static int i915_fbc_fc_get(void *data, u64 *val)
if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
return -ENODEV;
- drm_modeset_lock_all(dev);
*val = dev_priv->fbc.false_color;
- drm_modeset_unlock_all(dev);
return 0;
}
@@ -1667,7 +1674,7 @@ static int i915_fbc_fc_set(void *data, u64 val)
if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
return -ENODEV;
- drm_modeset_lock_all(dev);
+ mutex_lock(&dev_priv->fbc.lock);
reg = I915_READ(ILK_DPFC_CONTROL);
dev_priv->fbc.false_color = val;
@@ -1676,7 +1683,7 @@ static int i915_fbc_fc_set(void *data, u64 val)
(reg | FBC_CTL_FALSE_COLOR) :
(reg & ~FBC_CTL_FALSE_COLOR));
- drm_modeset_unlock_all(dev);
+ mutex_unlock(&dev_priv->fbc.lock);
return 0;
}
@@ -1778,8 +1785,9 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
int gpu_freq, ia_freq;
+ unsigned int max_gpu_freq, min_gpu_freq;
- if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
+ if (!HAS_CORE_RING_FREQ(dev)) {
seq_puts(m, "unsupported on this chipset\n");
return 0;
}
@@ -1792,17 +1800,27 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
if (ret)
goto out;
+ if (IS_SKYLAKE(dev)) {
+ /* Convert GT frequency to 50 HZ units */
+ min_gpu_freq =
+ dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
+ max_gpu_freq =
+ dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
+ } else {
+ min_gpu_freq = dev_priv->rps.min_freq_softlimit;
+ max_gpu_freq = dev_priv->rps.max_freq_softlimit;
+ }
+
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
- for (gpu_freq = dev_priv->rps.min_freq_softlimit;
- gpu_freq <= dev_priv->rps.max_freq_softlimit;
- gpu_freq++) {
+ for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
ia_freq = gpu_freq;
sandybridge_pcode_read(dev_priv,
GEN6_PCODE_READ_MIN_FREQ_TABLE,
&ia_freq);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
- intel_gpu_freq(dev_priv, gpu_freq),
+ intel_gpu_freq(dev_priv, (gpu_freq *
+ (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1))),
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
}
@@ -1848,8 +1866,9 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct intel_fbdev *ifbdev = NULL;
struct intel_framebuffer *fb;
+ struct drm_framebuffer *drm_fb;
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = dev->dev_private;
ifbdev = dev_priv->fbdev;
@@ -1867,7 +1886,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
#endif
mutex_lock(&dev->mode_config.fb_lock);
- list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
+ drm_for_each_fb(drm_fb, dev) {
+ fb = to_intel_framebuffer(drm_fb);
if (ifbdev && &fb->base == ifbdev->helper.fb)
continue;
@@ -2248,7 +2268,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
seq_puts(m, "aliasing PPGTT:\n");
- seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset);
+ seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
ppgtt->debug_dump(ppgtt, m);
}
@@ -2479,13 +2499,13 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
return 0;
}
-static int i915_pc8_status(struct seq_file *m, void *unused)
+static int i915_runtime_pm_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
+ if (!HAS_RUNTIME_PM(dev)) {
seq_puts(m, "not supported\n");
return 0;
}
@@ -2493,6 +2513,12 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
seq_printf(m, "IRQs disabled: %s\n",
yesno(!intel_irqs_enabled(dev_priv)));
+#ifdef CONFIG_PM
+ seq_printf(m, "Usage count: %d\n",
+ atomic_read(&dev->dev->power.usage_count));
+#else
+ seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
+#endif
return 0;
}
@@ -2536,6 +2562,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
return "PORT_DDI_D_2_LANES";
case POWER_DOMAIN_PORT_DDI_D_4_LANES:
return "PORT_DDI_D_4_LANES";
+ case POWER_DOMAIN_PORT_DDI_E_2_LANES:
+ return "PORT_DDI_E_2_LANES";
case POWER_DOMAIN_PORT_DSI:
return "PORT_DSI";
case POWER_DOMAIN_PORT_CRT:
@@ -2780,13 +2808,16 @@ static int i915_display_info(struct seq_file *m, void *unused)
seq_printf(m, "---------\n");
for_each_intel_crtc(dev, crtc) {
bool active;
+ struct intel_crtc_state *pipe_config;
int x, y;
+ pipe_config = to_intel_crtc_state(crtc->base.state);
+
seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
crtc->base.base.id, pipe_name(crtc->pipe),
- yesno(crtc->active), crtc->config->pipe_src_w,
- crtc->config->pipe_src_h);
- if (crtc->active) {
+ yesno(pipe_config->base.active),
+ pipe_config->pipe_src_w, pipe_config->pipe_src_h);
+ if (pipe_config->base.active) {
intel_crtc_info(m, crtc);
active = cursor_position(dev, crtc->pipe, &x, &y);
@@ -3027,7 +3058,7 @@ static void drrs_status_per_crtc(struct seq_file *m,
seq_puts(m, "\n\n");
- if (intel_crtc->config->has_drrs) {
+ if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
struct intel_panel *panel;
mutex_lock(&drrs->mutex);
@@ -3079,7 +3110,7 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
for_each_intel_crtc(dev, intel_crtc) {
drm_modeset_lock(&intel_crtc->base.mutex, NULL);
- if (intel_crtc->active) {
+ if (intel_crtc->base.state->active) {
active_crtc_cnt++;
seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
@@ -3616,53 +3647,40 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+ struct intel_crtc_state *pipe_config;
+ struct drm_atomic_state *state;
+ int ret = 0;
drm_modeset_lock_all(dev);
- /*
- * If we use the eDP transcoder we need to make sure that we don't
- * bypass the pfit, since otherwise the pipe CRC source won't work. Only
- * relevant on hsw with pipe A when using the always-on power well
- * routing.
- */
- if (crtc->config->cpu_transcoder == TRANSCODER_EDP &&
- !crtc->config->pch_pfit.enabled) {
- crtc->config->pch_pfit.force_thru = true;
-
- intel_display_power_get(dev_priv,
- POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
-
- intel_crtc_reset(crtc);
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out;
}
- drm_modeset_unlock_all(dev);
-}
-
-static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
- drm_modeset_lock_all(dev);
- /*
- * If we use the eDP transcoder we need to make sure that we don't
- * bypass the pfit, since otherwise the pipe CRC source won't work. Only
- * relevant on hsw with pipe A when using the always-on power well
- * routing.
- */
- if (crtc->config->pch_pfit.force_thru) {
- crtc->config->pch_pfit.force_thru = false;
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
+ pipe_config = intel_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(pipe_config)) {
+ ret = PTR_ERR(pipe_config);
+ goto out;
+ }
- intel_crtc_reset(crtc);
+ pipe_config->pch_pfit.force_thru = enable;
+ if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
+ pipe_config->pch_pfit.enabled != enable)
+ pipe_config->base.connectors_changed = true;
- intel_display_power_put(dev_priv,
- POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
- }
+ ret = drm_atomic_commit(state);
+out:
drm_modeset_unlock_all(dev);
+ WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
+ if (ret)
+ drm_atomic_state_free(state);
}
static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
@@ -3682,7 +3700,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
break;
case INTEL_PIPE_CRC_SOURCE_PF:
if (IS_HASWELL(dev) && pipe == PIPE_A)
- hsw_trans_edp_pipe_A_crc_wa(dev);
+ hsw_trans_edp_pipe_A_crc_wa(dev, true);
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
break;
@@ -3776,7 +3794,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
pipe_name(pipe));
drm_modeset_lock(&crtc->base.mutex, NULL);
- if (crtc->active)
+ if (crtc->base.state->active)
intel_wait_for_vblank(dev, pipe);
drm_modeset_unlock(&crtc->base.mutex);
@@ -3794,7 +3812,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
else if (IS_VALLEYVIEW(dev))
vlv_undo_pipe_scramble_reset(dev, pipe);
else if (IS_HASWELL(dev) && pipe == PIPE_A)
- hsw_undo_trans_edp_pipe_A_crc_wa(dev);
+ hsw_trans_edp_pipe_A_crc_wa(dev, false);
hsw_enable_ips(crtc);
}
@@ -3980,24 +3998,14 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
{
char *input_buffer;
int status = 0;
- struct seq_file *m;
struct drm_device *dev;
struct drm_connector *connector;
struct list_head *connector_list;
struct intel_dp *intel_dp;
int val = 0;
- m = file->private_data;
- if (!m) {
- status = -ENODEV;
- return status;
- }
- dev = m->private;
+ dev = ((struct seq_file *)file->private_data)->private;
- if (!dev) {
- status = -ENODEV;
- return status;
- }
connector_list = &dev->mode_config.connector_list;
if (len == 0)
@@ -4021,9 +4029,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
DRM_MODE_CONNECTOR_DisplayPort)
continue;
- if (connector->connector_type ==
- DRM_MODE_CONNECTOR_DisplayPort &&
- connector->status == connector_status_connected &&
+ if (connector->status == connector_status_connected &&
connector->encoder != NULL) {
intel_dp = enc_to_intel_dp(connector->encoder);
status = kstrtoint(input_buffer, 10, &val);
@@ -4055,9 +4061,6 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
struct list_head *connector_list = &dev->mode_config.connector_list;
struct intel_dp *intel_dp;
- if (!dev)
- return -ENODEV;
-
list_for_each_entry(connector, connector_list, head) {
if (connector->connector_type !=
@@ -4102,9 +4105,6 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
struct list_head *connector_list = &dev->mode_config.connector_list;
struct intel_dp *intel_dp;
- if (!dev)
- return -ENODEV;
-
list_for_each_entry(connector, connector_list, head) {
if (connector->connector_type !=
@@ -4144,9 +4144,6 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
struct list_head *connector_list = &dev->mode_config.connector_list;
struct intel_dp *intel_dp;
- if (!dev)
- return -ENODEV;
-
list_for_each_entry(connector, connector_list, head) {
if (connector->connector_type !=
@@ -4183,8 +4180,15 @@ static const struct file_operations i915_displayport_test_type_fops = {
static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
{
struct drm_device *dev = m->private;
- int num_levels = ilk_wm_max_level(dev) + 1;
int level;
+ int num_levels;
+
+ if (IS_CHERRYVIEW(dev))
+ num_levels = 3;
+ else if (IS_VALLEYVIEW(dev))
+ num_levels = 1;
+ else
+ num_levels = ilk_wm_max_level(dev) + 1;
drm_modeset_lock_all(dev);
@@ -4193,9 +4197,9 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
/*
* - WM1+ latency values in 0.5us units
- * - latencies are in us on gen9
+ * - latencies are in us on gen9/vlv/chv
*/
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev))
latency *= 10;
else if (level > 0)
latency *= 5;
@@ -4259,7 +4263,7 @@ static int pri_wm_latency_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- if (HAS_GMCH_DISPLAY(dev))
+ if (INTEL_INFO(dev)->gen < 5)
return -ENODEV;
return single_open(file, pri_wm_latency_show, dev);
@@ -4291,11 +4295,18 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
uint16_t new[8] = { 0 };
- int num_levels = ilk_wm_max_level(dev) + 1;
+ int num_levels;
int level;
int ret;
char tmp[32];
+ if (IS_CHERRYVIEW(dev))
+ num_levels = 3;
+ else if (IS_VALLEYVIEW(dev))
+ num_levels = 1;
+ else
+ num_levels = ilk_wm_max_level(dev) + 1;
+
if (len >= sizeof(tmp))
return -EINVAL;
@@ -5027,6 +5038,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
+ {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
{"i915_fbc_status", i915_fbc_status, 0},
{"i915_ips_status", i915_ips_status, 0},
{"i915_sr_status", i915_sr_status, 0},
@@ -5042,7 +5054,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_edp_psr_status", i915_edp_psr_status, 0},
{"i915_sink_crc_eDP1", i915_sink_crc, 0},
{"i915_energy_uJ", i915_energy_uJ, 0},
- {"i915_pc8_status", i915_pc8_status, 0},
+ {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
{"i915_semaphore_status", i915_semaphore_status, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d2df321ba634..ab37d1121be8 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -163,6 +163,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
if (!value)
return -ENODEV;
break;
+ case I915_PARAM_HAS_GPU_RESET:
+ value = i915.enable_hangcheck &&
+ intel_has_gpu_reset(dev);
+ break;
+ case I915_PARAM_HAS_RESOURCE_STREAMER:
+ value = HAS_RESOURCE_STREAMER(dev);
+ break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
@@ -719,11 +726,19 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
info = (struct intel_device_info *)&dev_priv->info;
+ /*
+ * Skylake and Broxton currently don't expose the topmost plane as its
+ * use is exclusive with the legacy cursor and we only want to expose
+ * one of those, not both. Until we can safely expose the topmost plane
+ * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
+ * we don't expose the topmost plane at all to prevent ABI breakage
+ * down the line.
+ */
if (IS_BROXTON(dev)) {
- info->num_sprites[PIPE_A] = 3;
- info->num_sprites[PIPE_B] = 3;
- info->num_sprites[PIPE_C] = 2;
- } else if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
+ info->num_sprites[PIPE_A] = 2;
+ info->num_sprites[PIPE_B] = 2;
+ info->num_sprites[PIPE_C] = 1;
+ } else if (IS_VALLEYVIEW(dev))
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2;
else
@@ -933,8 +948,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_mtrrfree;
}
- dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
- if (dev_priv->dp_wq == NULL) {
+ dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+ if (dev_priv->hotplug.dp_wq == NULL) {
DRM_ERROR("Failed to create our dp workqueue.\n");
ret = -ENOMEM;
goto out_freewq;
@@ -1029,7 +1044,7 @@ out_gem_unload:
pm_qos_remove_request(&dev_priv->pm_qos);
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
out_freedpwq:
- destroy_workqueue(dev_priv->dp_wq);
+ destroy_workqueue(dev_priv->hotplug.dp_wq);
out_freewq:
destroy_workqueue(dev_priv->wq);
out_mtrrfree:
@@ -1116,6 +1131,7 @@ int i915_driver_unload(struct drm_device *dev)
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
+ intel_fbc_cleanup_cfb(dev_priv);
i915_gem_cleanup_stolen(dev);
intel_csr_ucode_fini(dev);
@@ -1123,7 +1139,7 @@ int i915_driver_unload(struct drm_device *dev)
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
- destroy_workqueue(dev_priv->dp_wq);
+ destroy_workqueue(dev_priv->hotplug.dp_wq);
destroy_workqueue(dev_priv->wq);
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
pm_qos_remove_request(&dev_priv->pm_qos);
@@ -1258,13 +1274,3 @@ const struct drm_ioctl_desc i915_ioctls[] = {
};
int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
-
-/*
- * This is really ugly: Because old userspace abused the linux agp interface to
- * manage the gtt, we need to claim that all intel devices are agp. For
- * otherwise the drm core refuses to initialize the agp support code.
- */
-int i915_driver_device_is_agp(struct drm_device *dev)
-{
- return 1;
-}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 884b4f9b81c4..ab64d68388f2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -356,7 +356,6 @@ static const struct intel_device_info intel_cherryview_info = {
};
static const struct intel_device_info intel_skylake_info = {
- .is_preliminary = 1,
.is_skylake = 1,
.gen = 9, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
@@ -369,7 +368,6 @@ static const struct intel_device_info intel_skylake_info = {
};
static const struct intel_device_info intel_skylake_gt3_info = {
- .is_preliminary = 1,
.is_skylake = 1,
.gen = 9, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
@@ -440,9 +438,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */
{0, 0, 0}
};
-#if defined(CONFIG_DRM_I915_KMS)
MODULE_DEVICE_TABLE(pci, pciidlist);
-#endif
void intel_detect_pch(struct drm_device *dev)
{
@@ -541,21 +537,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
return true;
}
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
-{
- spin_lock_irq(&dev_priv->irq_lock);
-
- dev_priv->long_hpd_port_mask = 0;
- dev_priv->short_hpd_port_mask = 0;
- dev_priv->hpd_event_bits = 0;
-
- spin_unlock_irq(&dev_priv->irq_lock);
-
- cancel_work_sync(&dev_priv->dig_port_work);
- cancel_work_sync(&dev_priv->hotplug_work);
- cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
-}
-
void i915_firmware_load_error_print(const char *fw_path, int err)
{
DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
@@ -601,7 +582,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
pci_power_t opregion_target_state;
int error;
@@ -632,8 +612,7 @@ static int i915_drm_suspend(struct drm_device *dev)
* for _thaw. Also, power gate the CRTC power wells.
*/
drm_modeset_lock_all(dev);
- for_each_crtc(dev, crtc)
- intel_crtc_control(crtc, false);
+ intel_display_suspend(dev);
drm_modeset_unlock_all(dev);
intel_dp_mst_suspend(dev);
@@ -683,15 +662,18 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
pci_disable_device(drm_dev->pdev);
/*
- * During hibernation on some GEN4 platforms the BIOS may try to access
+ * During hibernation on some platforms the BIOS may try to access
* the device even though it's already in D3 and hang the machine. So
* leave the device in D0 on those platforms and hope the BIOS will
- * power down the device properly. Platforms where this was seen:
- * Lenovo Thinkpad X301, X61s
+ * power down the device properly. The issue was seen on multiple old
+ * GENs with different BIOS vendors, so having an explicit blacklist
+ * is inpractical; apply the workaround on everything pre GEN6. The
+ * platforms where the issue was seen:
+ * Lenovo Thinkpad X301, X61s, X60, T60, X41
+ * Fujitsu FSC S7110
+ * Acer Aspire 1830T
*/
- if (!(hibernation &&
- drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
- INTEL_INFO(dev_priv)->gen == 4))
+ if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
pci_set_power_state(drm_dev->pdev, PCI_D3hot);
return 0;
@@ -748,7 +730,7 @@ static int i915_drm_resume(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
if (i915_gem_init_hw(dev)) {
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
- atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+ atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
}
mutex_unlock(&dev->struct_mutex);
@@ -760,7 +742,7 @@ static int i915_drm_resume(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
drm_modeset_lock_all(dev);
- intel_modeset_setup_hw_state(dev, true);
+ intel_display_resume(dev);
drm_modeset_unlock_all(dev);
intel_dp_mst_resume(dev);
@@ -865,9 +847,6 @@ int i915_reset(struct drm_device *dev)
bool simulated;
int ret;
- if (!i915.reset)
- return 0;
-
intel_reset_gt_powersave(dev);
mutex_lock(&dev->struct_mutex);
@@ -959,8 +938,6 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (PCI_FUNC(pdev->devfn))
return -ENODEV;
- driver.driver_features &= ~(DRIVER_USE_AGP);
-
return drm_get_pci_dev(pdev, ent, &driver);
}
@@ -1515,7 +1492,15 @@ static int intel_runtime_suspend(struct device *device)
* FIXME: We really should find a document that references the arguments
* used below!
*/
- if (IS_HASWELL(dev)) {
+ if (IS_BROADWELL(dev)) {
+ /*
+ * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
+ * being detected, and the call we do at intel_runtime_resume()
+ * won't be able to restore them. Since PCI_D3hot matches the
+ * actual specification and appears to be working, use it.
+ */
+ intel_opregion_notify_adapter(dev, PCI_D3hot);
+ } else {
/*
* current versions of firmware which depend on this opregion
* notification have repurposed the D1 definition to mean
@@ -1524,16 +1509,6 @@ static int intel_runtime_suspend(struct device *device)
* the suspend path.
*/
intel_opregion_notify_adapter(dev, PCI_D1);
- } else {
- /*
- * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
- * being detected, and the call we do at intel_runtime_resume()
- * won't be able to restore them. Since PCI_D3hot matches the
- * actual specification and appears to be working, use it. Let's
- * assume the other non-Haswell platforms will stay the same as
- * Broadwell.
- */
- intel_opregion_notify_adapter(dev, PCI_D3hot);
}
assert_forcewakes_inactive(dev_priv);
@@ -1673,7 +1648,6 @@ static struct drm_driver driver = {
* deal with them for Intel hardware.
*/
.driver_features =
- DRIVER_USE_AGP |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
DRIVER_RENDER,
.load = i915_driver_load,
@@ -1688,7 +1662,6 @@ static struct drm_driver driver = {
.suspend = i915_suspend_legacy,
.resume = i915_resume_legacy,
- .device_is_agp = i915_driver_device_is_agp,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = i915_debugfs_init,
.debugfs_cleanup = i915_debugfs_cleanup,
@@ -1727,20 +1700,14 @@ static int __init i915_init(void)
driver.num_ioctls = i915_max_ioctl;
/*
- * If CONFIG_DRM_I915_KMS is set, default to KMS unless
- * explicitly disabled with the module pararmeter.
- *
- * Otherwise, just follow the parameter (defaulting to off).
- *
- * Allow optional vga_text_mode_force boot option to override
- * the default behavior.
+ * Enable KMS by default, unless explicitly overriden by
+ * either the i915.modeset prarameter or by the
+ * vga_text_mode_force boot option.
*/
-#if defined(CONFIG_DRM_I915_KMS)
- if (i915.modeset != 0)
- driver.driver_features |= DRIVER_MODESET;
-#endif
- if (i915.modeset == 1)
- driver.driver_features |= DRIVER_MODESET;
+ driver.driver_features |= DRIVER_MODESET;
+
+ if (i915.modeset == 0)
+ driver.driver_features &= ~DRIVER_MODESET;
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force() && i915.modeset == -1)
@@ -1759,7 +1726,7 @@ static int __init i915_init(void)
* to the atomic ioctl and the atomic properties. Only plane operations on
* a single CRTC will actually work.
*/
- if (i915.nuclear_pageflip)
+ if (driver.driver_features & DRIVER_MODESET)
driver.driver_features |= DRIVER_ATOMIC;
return drm_pci_init(&driver, &i915_pci_driver);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 542fac628b28..e1db8de52851 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -56,7 +56,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20150522"
+#define DRIVER_DATE "20150731"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -182,6 +182,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_DDI_C_4_LANES,
POWER_DOMAIN_PORT_DDI_D_2_LANES,
POWER_DOMAIN_PORT_DDI_D_4_LANES,
+ POWER_DOMAIN_PORT_DDI_E_2_LANES,
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
@@ -206,17 +207,51 @@ enum intel_display_power_domain {
enum hpd_pin {
HPD_NONE = 0,
- HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
HPD_CRT,
HPD_SDVO_B,
HPD_SDVO_C,
+ HPD_PORT_A,
HPD_PORT_B,
HPD_PORT_C,
HPD_PORT_D,
+ HPD_PORT_E,
HPD_NUM_PINS
};
+#define for_each_hpd_pin(__pin) \
+ for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
+
+struct i915_hotplug {
+ struct work_struct hotplug_work;
+
+ struct {
+ unsigned long last_jiffies;
+ int count;
+ enum {
+ HPD_ENABLED = 0,
+ HPD_DISABLED = 1,
+ HPD_MARK_DISABLED = 2
+ } state;
+ } stats[HPD_NUM_PINS];
+ u32 event_bits;
+ struct delayed_work reenable_work;
+
+ struct intel_digital_port *irq_port[I915_MAX_PORTS];
+ u32 long_port_mask;
+ u32 short_port_mask;
+ struct work_struct dig_port_work;
+
+ /*
+ * if we get a HPD irq from DP and a HPD irq from non-DP
+ * the non-DP HPD could block the workqueue on a mode config
+ * mutex getting, that userspace may have taken. However
+ * userspace is waiting on the DP workqueue to run which is
+ * blocked behind the non-DP one.
+ */
+ struct workqueue_struct *dp_wq;
+};
+
#define I915_GEM_GPU_DOMAINS \
(I915_GEM_DOMAIN_RENDER | \
I915_GEM_DOMAIN_SAMPLER | \
@@ -243,6 +278,12 @@ enum hpd_pin {
&dev->mode_config.plane_list, \
base.head)
+#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head) \
+ if ((intel_plane)->pipe == (intel_crtc)->pipe)
+
#define for_each_intel_crtc(dev, intel_crtc) \
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
@@ -333,7 +374,8 @@ struct intel_dpll_hw_state {
uint32_t cfgcr1, cfgcr2;
/* bxt */
- uint32_t ebb0, pll0, pll1, pll2, pll3, pll6, pll8, pll10, pcsdw12;
+ uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
+ pcsdw12;
};
struct intel_shared_dpll_config {
@@ -343,7 +385,6 @@ struct intel_shared_dpll_config {
struct intel_shared_dpll {
struct intel_shared_dpll_config config;
- struct intel_shared_dpll_config *new_config;
int active; /* count of number of active CRTCs (i.e. DPMS on) */
bool on; /* is the PLL actually active? Disabled during modeset */
@@ -445,6 +486,7 @@ struct drm_i915_error_state {
struct timeval time;
char error_msg[128];
+ int iommu;
u32 reset_count;
u32 suspend_count;
@@ -559,9 +601,6 @@ struct intel_limit;
struct dpll;
struct drm_i915_display_funcs {
- bool (*fbc_enabled)(struct drm_device *dev);
- void (*enable_fbc)(struct drm_crtc *crtc);
- void (*disable_fbc)(struct drm_device *dev);
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
/**
@@ -587,7 +626,8 @@ struct drm_i915_display_funcs {
struct drm_crtc *crtc,
uint32_t sprite_width, uint32_t sprite_height,
int pixel_size, bool enable, bool scaled);
- void (*modeset_global_resources)(struct drm_atomic_state *state);
+ int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
+ void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
@@ -598,7 +638,6 @@ struct drm_i915_display_funcs {
struct intel_crtc_state *crtc_state);
void (*crtc_enable)(struct drm_crtc *crtc);
void (*crtc_disable)(struct drm_crtc *crtc);
- void (*off)(struct drm_crtc *crtc);
void (*audio_codec_enable)(struct drm_connector *connector,
struct intel_encoder *encoder,
struct drm_display_mode *mode);
@@ -608,7 +647,7 @@ struct drm_i915_display_funcs {
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
+ struct drm_i915_gem_request *req,
uint32_t flags);
void (*update_primary_plane)(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -706,7 +745,7 @@ enum csr_state {
struct intel_csr {
const char *fw_path;
- __be32 *dmc_payload;
+ uint32_t *dmc_payload;
uint32_t dmc_fw_size;
uint32_t mmio_count;
uint32_t mmioaddr[8];
@@ -805,11 +844,15 @@ struct i915_ctx_hang_stats {
/* This must match up with the value previously used for execbuf2.rsvd1. */
#define DEFAULT_CONTEXT_HANDLE 0
+
+#define CONTEXT_NO_ZEROMAP (1<<0)
/**
* struct intel_context - as the name implies, represents a context.
* @ref: reference count.
* @user_handle: userspace tracking identity for this context.
* @remap_slice: l3 row remapping information.
+ * @flags: context specific flags:
+ * CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0.
* @file_priv: filp associated with this context (NULL for global default
* context).
* @hang_stats: information about the role of this context in possible GPU
@@ -826,6 +869,8 @@ struct intel_context {
struct kref ref;
int user_handle;
uint8_t remap_slice;
+ struct drm_i915_private *i915;
+ int flags;
struct drm_i915_file_private *file_priv;
struct i915_ctx_hang_stats hang_stats;
struct i915_hw_ppgtt *ppgtt;
@@ -852,9 +897,13 @@ enum fb_op_origin {
ORIGIN_CPU,
ORIGIN_CS,
ORIGIN_FLIP,
+ ORIGIN_DIRTYFB,
};
struct i915_fbc {
+ /* This is always the inner lock when overlapping with struct_mutex and
+ * it's the outer lock when overlapping with stolen_lock. */
+ struct mutex lock;
unsigned long uncompressed_size;
unsigned threshold;
unsigned int fb_id;
@@ -874,7 +923,7 @@ struct i915_fbc {
struct intel_fbc_work {
struct delayed_work work;
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
struct drm_framebuffer *fb;
} *fbc_work;
@@ -890,7 +939,13 @@ struct i915_fbc {
FBC_MULTIPLE_PIPES, /* more than one pipe active */
FBC_MODULE_PARAM,
FBC_CHIP_DEFAULT, /* disabled by default on this chip */
+ FBC_ROTATION, /* rotation is not supported */
+ FBC_IN_DBG_MASTER, /* kernel debugger is active */
} no_fbc_reason;
+
+ bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
+ void (*enable_fbc)(struct intel_crtc *crtc);
+ void (*disable_fbc)(struct drm_i915_private *dev_priv);
};
/**
@@ -1200,6 +1255,10 @@ struct intel_l3_parity {
struct i915_gem_mm {
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
+ /** Protects the usage of the GTT stolen memory allocator. This is
+ * always the inner lock when overlapping with struct_mutex. */
+ struct mutex stolen_lock;
+
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
struct list_head bound_list;
@@ -1353,6 +1412,15 @@ enum modeset_restore {
MODESET_SUSPENDED,
};
+#define DP_AUX_A 0x40
+#define DP_AUX_B 0x10
+#define DP_AUX_C 0x20
+#define DP_AUX_D 0x30
+
+#define DDC_PIN_B 0x05
+#define DDC_PIN_C 0x04
+#define DDC_PIN_D 0x06
+
struct ddi_vbt_port_info {
/*
* This is an index in the HDMI/DVI DDI buffer translation table.
@@ -1365,6 +1433,12 @@ struct ddi_vbt_port_info {
uint8_t supports_dvi:1;
uint8_t supports_hdmi:1;
uint8_t supports_dp:1;
+
+ uint8_t alternate_aux_channel;
+ uint8_t alternate_ddc_pin;
+
+ uint8_t dp_boost_level;
+ uint8_t hdmi_boost_level;
};
enum psr_lines_to_wait {
@@ -1460,23 +1534,27 @@ struct ilk_wm_values {
enum intel_ddb_partitioning partitioning;
};
-struct vlv_wm_values {
- struct {
- uint16_t primary;
- uint16_t sprite[2];
- uint8_t cursor;
- } pipe[3];
+struct vlv_pipe_wm {
+ uint16_t primary;
+ uint16_t sprite[2];
+ uint8_t cursor;
+};
- struct {
- uint16_t plane;
- uint8_t cursor;
- } sr;
+struct vlv_sr_wm {
+ uint16_t plane;
+ uint8_t cursor;
+};
+struct vlv_wm_values {
+ struct vlv_pipe_wm pipe[3];
+ struct vlv_sr_wm sr;
struct {
uint8_t cursor;
uint8_t sprite[2];
uint8_t primary;
} ddl[3];
+ uint8_t level;
+ bool cxsr;
};
struct skl_ddb_entry {
@@ -1610,6 +1688,18 @@ struct i915_virtual_gpu {
bool active;
};
+struct i915_execbuffer_params {
+ struct drm_device *dev;
+ struct drm_file *file;
+ uint32_t dispatch_flags;
+ uint32_t args_batch_start_offset;
+ uint32_t batch_obj_vm_offset;
+ struct intel_engine_cs *ring;
+ struct drm_i915_gem_object *batch_obj;
+ struct intel_context *ctx;
+ struct drm_i915_gem_request *request;
+};
+
struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *objects;
@@ -1679,19 +1769,7 @@ struct drm_i915_private {
u32 pm_rps_events;
u32 pipestat_irq_mask[I915_MAX_PIPES];
- struct work_struct hotplug_work;
- struct {
- unsigned long hpd_last_jiffies;
- int hpd_cnt;
- enum {
- HPD_ENABLED = 0,
- HPD_DISABLED = 1,
- HPD_MARK_DISABLED = 2
- } hpd_mark;
- } hpd_stats[HPD_NUM_PINS];
- u32 hpd_event_bits;
- struct delayed_work hotplug_reenable_work;
-
+ struct i915_hotplug hotplug;
struct i915_fbc fbc;
struct i915_drrs drrs;
struct intel_opregion opregion;
@@ -1717,7 +1795,7 @@ struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_boot_cdclk;
- unsigned int cdclk_freq;
+ unsigned int cdclk_freq, max_cdclk_freq;
unsigned int hpll_freq;
/**
@@ -1768,9 +1846,6 @@ struct drm_i915_private {
/* Reclocking support */
bool render_reclock_avail;
- bool lvds_downclock_avail;
- /* indicates the reduced downclock for LVDS*/
- int lvds_downclock;
struct i915_frontbuffer_tracking fb_tracking;
@@ -1798,7 +1873,7 @@ struct drm_i915_private {
struct drm_i915_gem_object *vlv_pctx;
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
struct work_struct fbdev_suspend_work;
@@ -1808,6 +1883,7 @@ struct drm_i915_private {
struct drm_property *force_audio_property;
/* hda/i915 audio component */
+ struct i915_audio_component *audio_component;
bool audio_component_registered;
uint32_t hw_context_size;
@@ -1853,33 +1929,17 @@ struct drm_i915_private {
struct skl_wm_values skl_hw;
struct vlv_wm_values vlv;
};
+
+ uint8_t max_level;
} wm;
struct i915_runtime_pm pm;
- struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
- u32 long_hpd_port_mask;
- u32 short_hpd_port_mask;
- struct work_struct dig_port_work;
-
- /*
- * if we get a HPD irq from DP and a HPD irq from non-DP
- * the non-DP HPD could block the workqueue on a mode config
- * mutex getting, that userspace may have taken. However
- * userspace is waiting on the DP workqueue to run which is
- * blocked behind the non-DP one.
- */
- struct workqueue_struct *dp_wq;
-
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
- int (*execbuf_submit)(struct drm_device *dev, struct drm_file *file,
- struct intel_engine_cs *ring,
- struct intel_context *ctx,
+ int (*execbuf_submit)(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas,
- struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 flags);
+ struct list_head *vmas);
int (*init_rings)(struct drm_device *dev);
void (*cleanup_ring)(struct intel_engine_cs *ring);
void (*stop_ring)(struct intel_engine_cs *ring);
@@ -2036,8 +2096,6 @@ struct drm_i915_gem_object {
unsigned int cache_level:3;
unsigned int cache_dirty:1;
- unsigned int has_dma_mapping:1;
-
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
unsigned int pin_display;
@@ -2149,7 +2207,8 @@ struct drm_i915_gem_request {
struct intel_context *ctx;
struct intel_ringbuffer *ringbuf;
- /** Batch buffer related to this request if any */
+ /** Batch buffer related to this request if any (used for
+ error state dump only) */
struct drm_i915_gem_object *batch_obj;
/** Time at which this request was emitted, in jiffies. */
@@ -2187,8 +2246,12 @@ struct drm_i915_gem_request {
};
int i915_gem_request_alloc(struct intel_engine_cs *ring,
- struct intel_context *ctx);
+ struct intel_context *ctx,
+ struct drm_i915_gem_request **req_out);
+void i915_gem_request_cancel(struct drm_i915_gem_request *req);
void i915_gem_request_free(struct kref *req_ref);
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+ struct drm_file *file);
static inline uint32_t
i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
@@ -2392,6 +2455,9 @@ struct drm_i915_cmd_table {
((INTEL_DEVID(dev) & 0xf) == 0x6 || \
(INTEL_DEVID(dev) & 0xf) == 0xb || \
(INTEL_DEVID(dev) & 0xf) == 0xe))
+/* ULX machines are also considered ULT. */
+#define IS_BDW_ULX(dev) (IS_BROADWELL(dev) && \
+ (INTEL_DEVID(dev) & 0xf) == 0xe)
#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
@@ -2401,6 +2467,14 @@ struct drm_i915_cmd_table {
/* ULX machines are also considered ULT. */
#define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \
INTEL_DEVID(dev) == 0x0A1E)
+#define IS_SKL_ULT(dev) (INTEL_DEVID(dev) == 0x1906 || \
+ INTEL_DEVID(dev) == 0x1913 || \
+ INTEL_DEVID(dev) == 0x1916 || \
+ INTEL_DEVID(dev) == 0x1921 || \
+ INTEL_DEVID(dev) == 0x1926)
+#define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \
+ INTEL_DEVID(dev) == 0x1915 || \
+ INTEL_DEVID(dev) == 0x191E)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
#define SKL_REVID_A0 (0x0)
@@ -2467,9 +2541,6 @@ struct drm_i915_cmd_table {
*/
#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
-#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
@@ -2495,6 +2566,12 @@ struct drm_i915_cmd_table {
#define HAS_CSR(dev) (IS_SKYLAKE(dev))
+#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
+ INTEL_INFO(dev)->gen >= 8)
+
+#define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \
+ !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
+
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
@@ -2534,7 +2611,6 @@ struct i915_params {
int modeset;
int panel_ignore_lid;
int semaphores;
- unsigned int lvds_downclock;
int lvds_channel_mode;
int panel_use_ssc;
int vbt_sdvo_panel_type;
@@ -2556,10 +2632,11 @@ struct i915_params {
bool reset;
bool disable_display;
bool disable_vtd_wa;
+ bool enable_guc_submission;
+ int guc_log_level;
int use_mmio_flip;
int mmio_debug;
bool verbose_state_checks;
- bool nuclear_pageflip;
int edp_vswing;
};
extern struct i915_params i915 __read_mostly;
@@ -2573,21 +2650,27 @@ extern void i915_driver_preclose(struct drm_device *dev,
struct drm_file *file);
extern void i915_driver_postclose(struct drm_device *dev,
struct drm_file *file);
-extern int i915_driver_device_is_agp(struct drm_device * dev);
#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
#endif
extern int intel_gpu_reset(struct drm_device *dev);
+extern bool intel_has_gpu_reset(struct drm_device *dev);
extern int i915_reset(struct drm_device *dev);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
void i915_firmware_load_error_print(const char *fw_path, int err);
+/* intel_hotplug.c */
+void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
+void intel_hpd_init(struct drm_i915_private *dev_priv);
+void intel_hpd_init_work(struct drm_i915_private *dev_priv);
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
+bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
+
/* i915_irq.c */
void i915_queue_hangcheck(struct drm_device *dev);
__printf(3, 4)
@@ -2595,7 +2678,6 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
const char *fmt, ...);
extern void intel_irq_init(struct drm_i915_private *dev_priv);
-extern void intel_hpd_init(struct drm_i915_private *dev_priv);
int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
@@ -2662,19 +2744,11 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
- struct intel_engine_cs *ring);
-void i915_gem_execbuffer_retire_commands(struct drm_device *dev,
- struct drm_file *file,
- struct intel_engine_cs *ring,
- struct drm_i915_gem_object *obj);
-int i915_gem_ringbuffer_submission(struct drm_device *dev,
- struct drm_file *file,
- struct intel_engine_cs *ring,
- struct intel_context *ctx,
+ struct drm_i915_gem_request *req);
+void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
+int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas,
- struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 flags);
+ struct list_head *vmas);
int i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_execbuffer2(struct drm_device *dev, void *data,
@@ -2707,6 +2781,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
+struct drm_i915_gem_object *i915_gem_object_create_from_data(
+ struct drm_device *dev, const void *data, size_t size);
void i915_init_vm(struct drm_i915_private *dev_priv,
struct i915_address_space *vm);
void i915_gem_free_object(struct drm_gem_object *obj);
@@ -2781,9 +2857,10 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *to);
+ struct intel_engine_cs *to,
+ struct drm_i915_gem_request **to_req);
void i915_vma_move_to_active(struct i915_vma *vma,
- struct intel_engine_cs *ring);
+ struct drm_i915_gem_request *req);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -2812,11 +2889,6 @@ static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
-int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
-
-bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
-void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *ring);
@@ -2825,7 +2897,6 @@ bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
-int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
@@ -2860,16 +2931,18 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_init(struct drm_device *dev);
int i915_gem_init_rings(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
-int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
+int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
-int __i915_add_request(struct intel_engine_cs *ring,
- struct drm_file *file,
- struct drm_i915_gem_object *batch_obj);
-#define i915_add_request(ring) \
- __i915_add_request(ring, NULL, NULL)
+void __i915_add_request(struct drm_i915_gem_request *req,
+ struct drm_i915_gem_object *batch_obj,
+ bool flush_caches);
+#define i915_add_request(req) \
+ __i915_add_request(req, NULL, true)
+#define i915_add_request_no_flush(req) \
+ __i915_add_request(req, NULL, false)
int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
@@ -2889,6 +2962,7 @@ int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_engine_cs *pipelined,
+ struct drm_i915_gem_request **pipelined_request,
const struct i915_ggtt_view *view);
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view);
@@ -2912,8 +2986,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags);
-void i915_gem_restore_fences(struct drm_device *dev);
-
unsigned long
i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view);
@@ -3008,15 +3080,27 @@ i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
}
+/* i915_gem_fence.c */
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+
+bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
+void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
+
+void i915_gem_restore_fences(struct drm_device *dev);
+
+void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
+
/* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
void i915_gem_context_reset(struct drm_device *dev);
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
-int i915_gem_context_enable(struct drm_i915_private *dev_priv);
+int i915_gem_context_enable(struct drm_i915_gem_request *req);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
-int i915_switch_context(struct intel_engine_cs *ring,
- struct intel_context *to);
+int i915_switch_context(struct drm_i915_gem_request *req);
struct intel_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
void i915_gem_context_free(struct kref *ctx_ref);
@@ -3066,9 +3150,12 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
}
/* i915_gem_stolen.c */
+int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *node, u64 size,
+ unsigned alignment);
+void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *node);
int i915_gem_init_stolen(struct drm_device *dev);
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
-void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
@@ -3098,10 +3185,6 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec
obj->tiling_mode != I915_TILING_NONE;
}
-void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
-
/* i915_gem_debug.c */
#if WATCH_LISTS
int i915_verify_lists(struct drm_device *dev);
@@ -3116,7 +3199,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor);
int i915_debugfs_connector_add(struct drm_connector *connector);
void intel_display_crc_init(struct drm_device *dev);
#else
-static inline int i915_debugfs_connector_add(struct drm_connector *connector) {}
+static inline int i915_debugfs_connector_add(struct drm_connector *connector)
+{ return 0; }
static inline void intel_display_crc_init(struct drm_device *dev) {}
#endif
@@ -3222,8 +3306,7 @@ extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern void intel_connector_unregister(struct intel_connector *);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
-extern void intel_modeset_setup_hw_state(struct drm_device *dev,
- bool force_restore);
+extern void intel_display_resume(struct drm_device *dev);
extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
@@ -3303,15 +3386,14 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
- u32 upper = I915_READ(upper_reg); \
- u32 lower = I915_READ(lower_reg); \
- u32 tmp = I915_READ(upper_reg); \
- if (upper != tmp) { \
- upper = tmp; \
- lower = I915_READ(lower_reg); \
- WARN_ON(I915_READ(upper_reg) != upper); \
- } \
- (u64)upper << 32 | lower; })
+ u32 upper, lower, old_upper, loop = 0; \
+ upper = I915_READ(upper_reg); \
+ do { \
+ old_upper = upper; \
+ lower = I915_READ(lower_reg); \
+ upper = I915_READ(upper_reg); \
+ } while (upper != old_upper && loop++ < 2); \
+ (u64)upper << 32 | lower; })
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 248fd1ac7b3a..4d631a946481 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -46,11 +46,6 @@ static void
i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
static void
i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
-static void i915_gem_write_fence(struct drm_device *dev, int reg,
- struct drm_i915_gem_object *obj);
-static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
- struct drm_i915_fence_reg *fence,
- bool enable);
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
@@ -66,18 +61,6 @@ static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
return obj->pin_display;
}
-static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
-{
- if (obj->tiling_mode)
- i915_gem_release_mmap(obj);
-
- /* As we do not have an associated fence register, we will force
- * a tiling change if we ever need to acquire one.
- */
- obj->fence_dirty = false;
- obj->fence_reg = I915_FENCE_REG_NONE;
-}
-
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
size_t size)
@@ -149,14 +132,18 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_get_aperture *args = data;
- struct drm_i915_gem_object *obj;
+ struct i915_gtt *ggtt = &dev_priv->gtt;
+ struct i915_vma *vma;
size_t pinned;
pinned = 0;
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
- if (i915_gem_obj_is_pinned(obj))
- pinned += i915_gem_obj_ggtt_size(obj);
+ list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
+ if (vma->pin_count)
+ pinned += vma->node.size;
+ list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
+ if (vma->pin_count)
+ pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
args->aper_size = dev_priv->gtt.base.total;
@@ -213,7 +200,6 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
sg_dma_len(sg) = obj->base.size;
obj->pages = st;
- obj->has_dma_mapping = true;
return 0;
}
@@ -265,8 +251,6 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
sg_free_table(obj->pages);
kfree(obj->pages);
-
- obj->has_dma_mapping = false;
}
static void
@@ -350,7 +334,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+ intel_fb_obj_invalidate(obj, ORIGIN_CPU);
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
unsigned long unwritten;
@@ -371,7 +355,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
i915_gem_chipset_flush(dev);
out:
- intel_fb_obj_flush(obj, false);
+ intel_fb_obj_flush(obj, false, ORIGIN_CPU);
return ret;
}
@@ -804,7 +788,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
- intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
+ intel_fb_obj_invalidate(obj, ORIGIN_GTT);
while (remain > 0) {
/* Operation in this page
@@ -835,7 +819,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
}
out_flush:
- intel_fb_obj_flush(obj, false);
+ intel_fb_obj_flush(obj, false, ORIGIN_GTT);
out_unpin:
i915_gem_object_ggtt_unpin(obj);
out:
@@ -948,7 +932,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
if (ret)
return ret;
- intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+ intel_fb_obj_invalidate(obj, ORIGIN_CPU);
i915_gem_object_pin_pages(obj);
@@ -1028,7 +1012,7 @@ out:
if (needs_clflush_after)
i915_gem_chipset_flush(dev);
- intel_fb_obj_flush(obj, false);
+ intel_fb_obj_flush(obj, false, ORIGIN_CPU);
return ret;
}
@@ -1149,23 +1133,6 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
return 0;
}
-/*
- * Compare arbitrary request against outstanding lazy request. Emit on match.
- */
-int
-i915_gem_check_olr(struct drm_i915_gem_request *req)
-{
- int ret;
-
- WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
-
- ret = 0;
- if (req == req->ring->outstanding_lazy_request)
- ret = i915_add_request(req->ring);
-
- return ret;
-}
-
static void fake_irq(unsigned long data)
{
wake_up_process((struct task_struct *)data);
@@ -1337,6 +1304,33 @@ out:
return ret;
}
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+ struct drm_file *file)
+{
+ struct drm_i915_private *dev_private;
+ struct drm_i915_file_private *file_priv;
+
+ WARN_ON(!req || !file || req->file_priv);
+
+ if (!req || !file)
+ return -EINVAL;
+
+ if (req->file_priv)
+ return -EINVAL;
+
+ dev_private = req->ring->dev->dev_private;
+ file_priv = file->driver_priv;
+
+ spin_lock(&file_priv->mm.lock);
+ req->file_priv = file_priv;
+ list_add_tail(&req->client_list, &file_priv->mm.request_list);
+ spin_unlock(&file_priv->mm.lock);
+
+ req->pid = get_pid(task_pid(current));
+
+ return 0;
+}
+
static inline void
i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
{
@@ -1349,6 +1343,9 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
list_del(&request->client_list);
request->file_priv = NULL;
spin_unlock(&file_priv->mm.lock);
+
+ put_pid(request->pid);
+ request->pid = NULL;
}
static void i915_gem_request_retire(struct drm_i915_gem_request *request)
@@ -1368,8 +1365,6 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
list_del_init(&request->list);
i915_gem_request_remove_from_client(request);
- put_pid(request->pid);
-
i915_gem_request_unreference(request);
}
@@ -1418,10 +1413,6 @@ i915_wait_request(struct drm_i915_gem_request *req)
if (ret)
return ret;
- ret = i915_gem_check_olr(req);
- if (ret)
- return ret;
-
ret = __i915_wait_request(req,
atomic_read(&dev_priv->gpu_error.reset_counter),
interruptible, NULL, NULL);
@@ -1521,10 +1512,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (req == NULL)
return 0;
- ret = i915_gem_check_olr(req);
- if (ret)
- goto err;
-
requests[n++] = i915_gem_request_reference(req);
} else {
for (i = 0; i < I915_NUM_RINGS; i++) {
@@ -1534,10 +1521,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (req == NULL)
continue;
- ret = i915_gem_check_olr(req);
- if (ret)
- goto err;
-
requests[n++] = i915_gem_request_reference(req);
}
}
@@ -1548,7 +1531,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
NULL, rps);
mutex_lock(&dev->struct_mutex);
-err:
for (i = 0; i < n; i++) {
if (ret == 0)
i915_gem_object_retire_request(obj, requests[i]);
@@ -1616,6 +1598,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
else
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+ if (write_domain != 0)
+ intel_fb_obj_invalidate(obj,
+ write_domain == I915_GEM_DOMAIN_GTT ?
+ ORIGIN_GTT : ORIGIN_CPU);
+
unref:
drm_gem_object_unreference(&obj->base);
unlock:
@@ -2139,6 +2126,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
+ i915_gem_gtt_finish_object(obj);
+
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj);
@@ -2199,6 +2188,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct sg_page_iter sg_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
+ int ret;
gfp_t gfp;
/* Assert that the object is not currently in any GPU domain. As it
@@ -2246,8 +2236,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
*/
i915_gem_shrink_all(dev_priv);
page = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(page))
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
goto err_pages;
+ }
}
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
@@ -2276,6 +2268,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
sg_mark_end(sg);
obj->pages = st;
+ ret = i915_gem_gtt_prepare_object(obj);
+ if (ret)
+ goto err_pages;
+
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj);
@@ -2300,10 +2296,10 @@ err_pages:
* space and so want to translate the error from shmemfs back to our
* usual understanding of ENOMEM.
*/
- if (PTR_ERR(page) == -ENOSPC)
- return -ENOMEM;
- else
- return PTR_ERR(page);
+ if (ret == -ENOSPC)
+ ret = -ENOMEM;
+
+ return ret;
}
/* Ensure that the associated pages are gathered from the backing storage
@@ -2343,9 +2339,12 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
}
void i915_vma_move_to_active(struct i915_vma *vma,
- struct intel_engine_cs *ring)
+ struct drm_i915_gem_request *req)
{
struct drm_i915_gem_object *obj = vma->obj;
+ struct intel_engine_cs *ring;
+
+ ring = i915_gem_request_get_ring(req);
/* Add a reference if we're newly entering the active list. */
if (obj->active == 0)
@@ -2353,8 +2352,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
obj->active |= intel_ring_flag(ring);
list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
- i915_gem_request_assign(&obj->last_read_req[ring->id],
- intel_ring_get_request(ring));
+ i915_gem_request_assign(&obj->last_read_req[ring->id], req);
list_move_tail(&vma->mm_list, &vma->vm->active_list);
}
@@ -2366,7 +2364,7 @@ i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
i915_gem_request_assign(&obj->last_write_req, NULL);
- intel_fb_obj_flush(obj, true);
+ intel_fb_obj_flush(obj, true, ORIGIN_CS);
}
static void
@@ -2387,6 +2385,13 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
if (obj->active)
return;
+ /* Bump our place on the bound list to keep it roughly in LRU order
+ * so that we don't steal from recently used but inactive objects
+ * (unless we are forced to ofc!)
+ */
+ list_move_tail(&obj->global_list,
+ &to_i915(obj->base.dev)->mm.bound_list);
+
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (!list_empty(&vma->mm_list))
list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
@@ -2466,24 +2471,34 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
return 0;
}
-int __i915_add_request(struct intel_engine_cs *ring,
- struct drm_file *file,
- struct drm_i915_gem_object *obj)
+/*
+ * NB: This function is not allowed to fail. Doing so would mean the the
+ * request is not being tracked for completion but the work itself is
+ * going to happen on the hardware. This would be a Bad Thing(tm).
+ */
+void __i915_add_request(struct drm_i915_gem_request *request,
+ struct drm_i915_gem_object *obj,
+ bool flush_caches)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct drm_i915_gem_request *request;
+ struct intel_engine_cs *ring;
+ struct drm_i915_private *dev_priv;
struct intel_ringbuffer *ringbuf;
u32 request_start;
int ret;
- request = ring->outstanding_lazy_request;
if (WARN_ON(request == NULL))
- return -ENOMEM;
+ return;
- if (i915.enable_execlists) {
- ringbuf = request->ctx->engine[ring->id].ringbuf;
- } else
- ringbuf = ring->buffer;
+ ring = request->ring;
+ dev_priv = ring->dev->dev_private;
+ ringbuf = request->ringbuf;
+
+ /*
+ * To ensure that this call will not fail, space for its emissions
+ * should already have been reserved in the ring buffer. Let the ring
+ * know that it is time to use that space up.
+ */
+ intel_ring_reserved_space_use(ringbuf);
request_start = intel_ring_get_tail(ringbuf);
/*
@@ -2493,14 +2508,13 @@ int __i915_add_request(struct intel_engine_cs *ring,
* is that the flush _must_ happen before the next request, no matter
* what.
*/
- if (i915.enable_execlists) {
- ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
- if (ret)
- return ret;
- } else {
- ret = intel_ring_flush_all_caches(ring);
- if (ret)
- return ret;
+ if (flush_caches) {
+ if (i915.enable_execlists)
+ ret = logical_ring_flush_all_caches(request);
+ else
+ ret = intel_ring_flush_all_caches(request);
+ /* Not allowed to fail! */
+ WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
}
/* Record the position of the start of the request so that
@@ -2510,17 +2524,15 @@ int __i915_add_request(struct intel_engine_cs *ring,
*/
request->postfix = intel_ring_get_tail(ringbuf);
- if (i915.enable_execlists) {
- ret = ring->emit_request(ringbuf, request);
- if (ret)
- return ret;
- } else {
- ret = ring->add_request(ring);
- if (ret)
- return ret;
+ if (i915.enable_execlists)
+ ret = ring->emit_request(request);
+ else {
+ ret = ring->add_request(request);
request->tail = intel_ring_get_tail(ringbuf);
}
+ /* Not allowed to fail! */
+ WARN(ret, "emit|add_request failed: %d!\n", ret);
request->head = request_start;
@@ -2532,33 +2544,11 @@ int __i915_add_request(struct intel_engine_cs *ring,
*/
request->batch_obj = obj;
- if (!i915.enable_execlists) {
- /* Hold a reference to the current context so that we can inspect
- * it later in case a hangcheck error event fires.
- */
- request->ctx = ring->last_context;
- if (request->ctx)
- i915_gem_context_reference(request->ctx);
- }
-
request->emitted_jiffies = jiffies;
+ ring->last_submitted_seqno = request->seqno;
list_add_tail(&request->list, &ring->request_list);
- request->file_priv = NULL;
-
- if (file) {
- struct drm_i915_file_private *file_priv = file->driver_priv;
-
- spin_lock(&file_priv->mm.lock);
- request->file_priv = file_priv;
- list_add_tail(&request->client_list,
- &file_priv->mm.request_list);
- spin_unlock(&file_priv->mm.lock);
-
- request->pid = get_pid(task_pid(current));
- }
trace_i915_gem_request_add(request);
- ring->outstanding_lazy_request = NULL;
i915_queue_hangcheck(ring->dev);
@@ -2567,7 +2557,8 @@ int __i915_add_request(struct intel_engine_cs *ring,
round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev);
- return 0;
+ /* Sanity check that the reserved size was large enough. */
+ intel_ring_reserved_space_end(ringbuf);
}
static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
@@ -2621,12 +2612,13 @@ void i915_gem_request_free(struct kref *req_ref)
typeof(*req), ref);
struct intel_context *ctx = req->ctx;
+ if (req->file_priv)
+ i915_gem_request_remove_from_client(req);
+
if (ctx) {
if (i915.enable_execlists) {
- struct intel_engine_cs *ring = req->ring;
-
- if (ctx != ring->default_context)
- intel_lr_context_unpin(ring, ctx);
+ if (ctx != req->ring->default_context)
+ intel_lr_context_unpin(req);
}
i915_gem_context_unreference(ctx);
@@ -2636,36 +2628,63 @@ void i915_gem_request_free(struct kref *req_ref)
}
int i915_gem_request_alloc(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+ struct intel_context *ctx,
+ struct drm_i915_gem_request **req_out)
{
struct drm_i915_private *dev_priv = to_i915(ring->dev);
struct drm_i915_gem_request *req;
int ret;
- if (ring->outstanding_lazy_request)
- return 0;
+ if (!req_out)
+ return -EINVAL;
+
+ *req_out = NULL;
req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
if (req == NULL)
return -ENOMEM;
- kref_init(&req->ref);
- req->i915 = dev_priv;
-
ret = i915_gem_get_seqno(ring->dev, &req->seqno);
if (ret)
goto err;
+ kref_init(&req->ref);
+ req->i915 = dev_priv;
req->ring = ring;
+ req->ctx = ctx;
+ i915_gem_context_reference(req->ctx);
if (i915.enable_execlists)
- ret = intel_logical_ring_alloc_request_extras(req, ctx);
+ ret = intel_logical_ring_alloc_request_extras(req);
else
ret = intel_ring_alloc_request_extras(req);
- if (ret)
+ if (ret) {
+ i915_gem_context_unreference(req->ctx);
goto err;
+ }
+
+ /*
+ * Reserve space in the ring buffer for all the commands required to
+ * eventually emit this request. This is to guarantee that the
+ * i915_add_request() call can't fail. Note that the reserve may need
+ * to be redone if the request is not actually submitted straight
+ * away, e.g. because a GPU scheduler has deferred it.
+ */
+ if (i915.enable_execlists)
+ ret = intel_logical_ring_reserve_space(req);
+ else
+ ret = intel_ring_reserve_space(req);
+ if (ret) {
+ /*
+ * At this point, the request is fully allocated even if not
+ * fully prepared. Thus it can be cleaned up using the proper
+ * free code.
+ */
+ i915_gem_request_cancel(req);
+ return ret;
+ }
- ring->outstanding_lazy_request = req;
+ *req_out = req;
return 0;
err:
@@ -2673,6 +2692,13 @@ err:
return ret;
}
+void i915_gem_request_cancel(struct drm_i915_gem_request *req)
+{
+ intel_ring_reserved_space_cancel(req->ringbuf);
+
+ i915_gem_request_unreference(req);
+}
+
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *ring)
{
@@ -2734,7 +2760,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
list_del(&submit_req->execlist_link);
if (submit_req->ctx != ring->default_context)
- intel_lr_context_unpin(ring, submit_req->ctx);
+ intel_lr_context_unpin(submit_req);
i915_gem_request_unreference(submit_req);
}
@@ -2755,30 +2781,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
i915_gem_request_retire(request);
}
-
- /* This may not have been flushed before the reset, so clean it now */
- i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
-}
-
-void i915_gem_restore_fences(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
-
- /*
- * Commit delayed tiling changes if we have an object still
- * attached to the fence, otherwise just clear the fence.
- */
- if (reg->obj) {
- i915_gem_object_update_fence(reg->obj, reg,
- reg->obj->tiling_mode);
- } else {
- i915_gem_write_fence(dev, i, NULL);
- }
- }
}
void i915_gem_reset(struct drm_device *dev)
@@ -2940,7 +2942,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
{
- int ret, i;
+ int i;
if (!obj->active)
return 0;
@@ -2955,10 +2957,6 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
if (list_empty(&req->list))
goto retire;
- ret = i915_gem_check_olr(req);
- if (ret)
- return ret;
-
if (i915_gem_request_completed(req, true)) {
__i915_gem_request_retire__upto(req);
retire:
@@ -3061,25 +3059,22 @@ out:
static int
__i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *to,
- struct drm_i915_gem_request *req)
+ struct drm_i915_gem_request *from_req,
+ struct drm_i915_gem_request **to_req)
{
struct intel_engine_cs *from;
int ret;
- from = i915_gem_request_get_ring(req);
+ from = i915_gem_request_get_ring(from_req);
if (to == from)
return 0;
- if (i915_gem_request_completed(req, true))
+ if (i915_gem_request_completed(from_req, true))
return 0;
- ret = i915_gem_check_olr(req);
- if (ret)
- return ret;
-
if (!i915_semaphore_is_enabled(obj->base.dev)) {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- ret = __i915_wait_request(req,
+ ret = __i915_wait_request(from_req,
atomic_read(&i915->gpu_error.reset_counter),
i915->mm.interruptible,
NULL,
@@ -3087,16 +3082,24 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- i915_gem_object_retire_request(obj, req);
+ i915_gem_object_retire_request(obj, from_req);
} else {
int idx = intel_ring_sync_index(from, to);
- u32 seqno = i915_gem_request_get_seqno(req);
+ u32 seqno = i915_gem_request_get_seqno(from_req);
+
+ WARN_ON(!to_req);
if (seqno <= from->semaphore.sync_seqno[idx])
return 0;
- trace_i915_gem_ring_sync_to(from, to, req);
- ret = to->semaphore.sync_to(to, from, seqno);
+ if (*to_req == NULL) {
+ ret = i915_gem_request_alloc(to, to->default_context, to_req);
+ if (ret)
+ return ret;
+ }
+
+ trace_i915_gem_ring_sync_to(*to_req, from, from_req);
+ ret = to->semaphore.sync_to(*to_req, from, seqno);
if (ret)
return ret;
@@ -3116,11 +3119,14 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
*
* @obj: object which may be in use on another ring.
* @to: ring we wish to use the object on. May be NULL.
+ * @to_req: request we wish to use the object for. See below.
+ * This will be allocated and returned if a request is
+ * required but not passed in.
*
* This code is meant to abstract object synchronization with the GPU.
* Calling with NULL implies synchronizing the object with the CPU
* rather than a particular GPU ring. Conceptually we serialise writes
- * between engines inside the GPU. We only allow on engine to write
+ * between engines inside the GPU. We only allow one engine to write
* into a buffer at any time, but multiple readers. To ensure each has
* a coherent view of memory, we must:
*
@@ -3131,11 +3137,22 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
* - If we are a write request (pending_write_domain is set), the new
* request must wait for outstanding read requests to complete.
*
+ * For CPU synchronisation (NULL to) no request is required. For syncing with
+ * rings to_req must be non-NULL. However, a request does not have to be
+ * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
+ * request will be allocated automatically and returned through *to_req. Note
+ * that it is not guaranteed that commands will be emitted (because the system
+ * might already be idle). Hence there is no need to create a request that
+ * might never have any work submitted. Note further that if a request is
+ * returned in *to_req, it is the responsibility of the caller to submit
+ * that request (after potentially adding more work to it).
+ *
* Returns 0 if successful, else propagates up the lower layer error.
*/
int
i915_gem_object_sync(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *to)
+ struct intel_engine_cs *to,
+ struct drm_i915_gem_request **to_req)
{
const bool readonly = obj->base.pending_write_domain == 0;
struct drm_i915_gem_request *req[I915_NUM_RINGS];
@@ -3157,7 +3174,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
req[n++] = obj->last_read_req[i];
}
for (i = 0; i < n; i++) {
- ret = __i915_gem_object_sync(obj, to, req[i]);
+ ret = __i915_gem_object_sync(obj, to, req[i], to_req);
if (ret)
return ret;
}
@@ -3247,10 +3264,8 @@ int i915_vma_unbind(struct i915_vma *vma)
/* Since the unbound list is global, only move to that list if
* no more VMAs exist. */
- if (list_empty(&obj->vma_list)) {
- i915_gem_gtt_finish_object(obj);
+ if (list_empty(&obj->vma_list))
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
- }
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
@@ -3270,354 +3285,27 @@ int i915_gpu_idle(struct drm_device *dev)
/* Flush everything onto the inactive list. */
for_each_ring(ring, dev_priv, i) {
if (!i915.enable_execlists) {
- ret = i915_switch_context(ring, ring->default_context);
+ struct drm_i915_gem_request *req;
+
+ ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret)
return ret;
- }
-
- ret = intel_ring_idle(ring);
- if (ret)
- return ret;
- }
-
- WARN_ON(i915_verify_lists(dev));
- return 0;
-}
-
-static void i965_write_fence_reg(struct drm_device *dev, int reg,
- struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int fence_reg;
- int fence_pitch_shift;
- if (INTEL_INFO(dev)->gen >= 6) {
- fence_reg = FENCE_REG_SANDYBRIDGE_0;
- fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
- } else {
- fence_reg = FENCE_REG_965_0;
- fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
- }
-
- fence_reg += reg * 8;
+ ret = i915_switch_context(req);
+ if (ret) {
+ i915_gem_request_cancel(req);
+ return ret;
+ }
- /* To w/a incoherency with non-atomic 64-bit register updates,
- * we split the 64-bit update into two 32-bit writes. In order
- * for a partial fence not to be evaluated between writes, we
- * precede the update with write to turn off the fence register,
- * and only enable the fence as the last step.
- *
- * For extra levels of paranoia, we make sure each step lands
- * before applying the next step.
- */
- I915_WRITE(fence_reg, 0);
- POSTING_READ(fence_reg);
-
- if (obj) {
- u32 size = i915_gem_obj_ggtt_size(obj);
- uint64_t val;
-
- /* Adjust fence size to match tiled area */
- if (obj->tiling_mode != I915_TILING_NONE) {
- uint32_t row_size = obj->stride *
- (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
- size = (size / row_size) * row_size;
+ i915_add_request_no_flush(req);
}
- val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
- 0xfffff000) << 32;
- val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
- val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I965_FENCE_TILING_Y_SHIFT;
- val |= I965_FENCE_REG_VALID;
-
- I915_WRITE(fence_reg + 4, val >> 32);
- POSTING_READ(fence_reg + 4);
-
- I915_WRITE(fence_reg + 0, val);
- POSTING_READ(fence_reg);
- } else {
- I915_WRITE(fence_reg + 4, 0);
- POSTING_READ(fence_reg + 4);
- }
-}
-
-static void i915_write_fence_reg(struct drm_device *dev, int reg,
- struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 val;
-
- if (obj) {
- u32 size = i915_gem_obj_ggtt_size(obj);
- int pitch_val;
- int tile_width;
-
- WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
- (size & -size) != size ||
- (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
- "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
- i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
-
- if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
- tile_width = 128;
- else
- tile_width = 512;
-
- /* Note: pitch better be a power of two tile widths */
- pitch_val = obj->stride / tile_width;
- pitch_val = ffs(pitch_val) - 1;
-
- val = i915_gem_obj_ggtt_offset(obj);
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I915_FENCE_SIZE_BITS(size);
- val |= pitch_val << I830_FENCE_PITCH_SHIFT;
- val |= I830_FENCE_REG_VALID;
- } else
- val = 0;
-
- if (reg < 8)
- reg = FENCE_REG_830_0 + reg * 4;
- else
- reg = FENCE_REG_945_8 + (reg - 8) * 4;
-
- I915_WRITE(reg, val);
- POSTING_READ(reg);
-}
-
-static void i830_write_fence_reg(struct drm_device *dev, int reg,
- struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t val;
-
- if (obj) {
- u32 size = i915_gem_obj_ggtt_size(obj);
- uint32_t pitch_val;
-
- WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
- (size & -size) != size ||
- (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
- "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
- i915_gem_obj_ggtt_offset(obj), size);
-
- pitch_val = obj->stride / 128;
- pitch_val = ffs(pitch_val) - 1;
-
- val = i915_gem_obj_ggtt_offset(obj);
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I830_FENCE_SIZE_BITS(size);
- val |= pitch_val << I830_FENCE_PITCH_SHIFT;
- val |= I830_FENCE_REG_VALID;
- } else
- val = 0;
-
- I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
- POSTING_READ(FENCE_REG_830_0 + reg * 4);
-}
-
-inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
-{
- return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
-}
-
-static void i915_gem_write_fence(struct drm_device *dev, int reg,
- struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* Ensure that all CPU reads are completed before installing a fence
- * and all writes before removing the fence.
- */
- if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
- mb();
-
- WARN(obj && (!obj->stride || !obj->tiling_mode),
- "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
- obj->stride, obj->tiling_mode);
-
- if (IS_GEN2(dev))
- i830_write_fence_reg(dev, reg, obj);
- else if (IS_GEN3(dev))
- i915_write_fence_reg(dev, reg, obj);
- else if (INTEL_INFO(dev)->gen >= 4)
- i965_write_fence_reg(dev, reg, obj);
-
- /* And similarly be paranoid that no direct access to this region
- * is reordered to before the fence is installed.
- */
- if (i915_gem_object_needs_mb(obj))
- mb();
-}
-
-static inline int fence_number(struct drm_i915_private *dev_priv,
- struct drm_i915_fence_reg *fence)
-{
- return fence - dev_priv->fence_regs;
-}
-
-static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
- struct drm_i915_fence_reg *fence,
- bool enable)
-{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- int reg = fence_number(dev_priv, fence);
-
- i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
-
- if (enable) {
- obj->fence_reg = reg;
- fence->obj = obj;
- list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
- } else {
- obj->fence_reg = I915_FENCE_REG_NONE;
- fence->obj = NULL;
- list_del_init(&fence->lru_list);
- }
- obj->fence_dirty = false;
-}
-
-static int
-i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
-{
- if (obj->last_fenced_req) {
- int ret = i915_wait_request(obj->last_fenced_req);
- if (ret)
- return ret;
-
- i915_gem_request_assign(&obj->last_fenced_req, NULL);
- }
-
- return 0;
-}
-
-int
-i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- struct drm_i915_fence_reg *fence;
- int ret;
-
- ret = i915_gem_object_wait_fence(obj);
- if (ret)
- return ret;
-
- if (obj->fence_reg == I915_FENCE_REG_NONE)
- return 0;
-
- fence = &dev_priv->fence_regs[obj->fence_reg];
-
- if (WARN_ON(fence->pin_count))
- return -EBUSY;
-
- i915_gem_object_fence_lost(obj);
- i915_gem_object_update_fence(obj, fence, false);
-
- return 0;
-}
-
-static struct drm_i915_fence_reg *
-i915_find_fence_reg(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_fence_reg *reg, *avail;
- int i;
-
- /* First try to find a free reg */
- avail = NULL;
- for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
- reg = &dev_priv->fence_regs[i];
- if (!reg->obj)
- return reg;
-
- if (!reg->pin_count)
- avail = reg;
- }
-
- if (avail == NULL)
- goto deadlock;
-
- /* None available, try to steal one or wait for a user to finish */
- list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
- if (reg->pin_count)
- continue;
-
- return reg;
- }
-
-deadlock:
- /* Wait for completion of pending flips which consume fences */
- if (intel_has_pending_fb_unpin(dev))
- return ERR_PTR(-EAGAIN);
-
- return ERR_PTR(-EDEADLK);
-}
-
-/**
- * i915_gem_object_get_fence - set up fencing for an object
- * @obj: object to map through a fence reg
- *
- * When mapping objects through the GTT, userspace wants to be able to write
- * to them without having to worry about swizzling if the object is tiled.
- * This function walks the fence regs looking for a free one for @obj,
- * stealing one if it can't find any.
- *
- * It then sets up the reg based on the object's properties: address, pitch
- * and tiling format.
- *
- * For an untiled surface, this removes any existing fence.
- */
-int
-i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- bool enable = obj->tiling_mode != I915_TILING_NONE;
- struct drm_i915_fence_reg *reg;
- int ret;
-
- /* Have we updated the tiling parameters upon the object and so
- * will need to serialise the write to the associated fence register?
- */
- if (obj->fence_dirty) {
- ret = i915_gem_object_wait_fence(obj);
+ ret = intel_ring_idle(ring);
if (ret)
return ret;
}
- /* Just update our place in the LRU if our fence is getting reused. */
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- reg = &dev_priv->fence_regs[obj->fence_reg];
- if (!obj->fence_dirty) {
- list_move_tail(&reg->lru_list,
- &dev_priv->mm.fence_list);
- return 0;
- }
- } else if (enable) {
- if (WARN_ON(!obj->map_and_fenceable))
- return -EINVAL;
-
- reg = i915_find_fence_reg(dev);
- if (IS_ERR(reg))
- return PTR_ERR(reg);
-
- if (reg->obj) {
- struct drm_i915_gem_object *old = reg->obj;
-
- ret = i915_gem_object_wait_fence(old);
- if (ret)
- return ret;
-
- i915_gem_object_fence_lost(old);
- }
- } else
- return 0;
-
- i915_gem_object_update_fence(obj, reg, enable);
-
+ WARN_ON(i915_verify_lists(dev));
return 0;
}
@@ -3668,9 +3356,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 size, fence_size, fence_alignment, unfenced_alignment;
- unsigned long start =
+ u64 start =
flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
- unsigned long end =
+ u64 end =
flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
struct i915_vma *vma;
int ret;
@@ -3726,7 +3414,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
* attempt to find space.
*/
if (size > end) {
- DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%lu\n",
+ DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%llu\n",
ggtt_view ? ggtt_view->type : 0,
size,
flags & PIN_MAPPABLE ? "mappable" : "total",
@@ -3768,22 +3456,16 @@ search_free:
goto err_remove_node;
}
- ret = i915_gem_gtt_prepare_object(obj);
- if (ret)
- goto err_remove_node;
-
trace_i915_vma_bind(vma, flags);
ret = i915_vma_bind(vma, obj->cache_level, flags);
if (ret)
- goto err_finish_gtt;
+ goto err_remove_node;
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &vm->inactive_list);
return vma;
-err_finish_gtt:
- i915_gem_gtt_finish_object(obj);
err_remove_node:
drm_mm_remove_node(&vma->node);
err_free_vma:
@@ -3854,7 +3536,7 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
- intel_fb_obj_flush(obj, false);
+ intel_fb_obj_flush(obj, false, ORIGIN_GTT);
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
@@ -3876,7 +3558,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
- intel_fb_obj_flush(obj, false);
+ intel_fb_obj_flush(obj, false, ORIGIN_CPU);
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
@@ -3938,9 +3620,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
obj->dirty = 1;
}
- if (write)
- intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
-
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
@@ -4095,12 +3774,13 @@ int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_engine_cs *pipelined,
+ struct drm_i915_gem_request **pipelined_request,
const struct i915_ggtt_view *view)
{
u32 old_read_domains, old_write_domain;
int ret;
- ret = i915_gem_object_sync(obj, pipelined);
+ ret = i915_gem_object_sync(obj, pipelined, pipelined_request);
if (ret)
return ret;
@@ -4211,9 +3891,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
- if (write)
- intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
-
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
@@ -4254,6 +3931,13 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
+ /*
+ * Note that the request might not have been submitted yet.
+ * In which case emitted_jiffies will be zero.
+ */
+ if (!request->emitted_jiffies)
+ continue;
+
target = request;
}
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
@@ -4424,32 +4108,6 @@ i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
--vma->pin_count;
}
-bool
-i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
-{
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
-
- WARN_ON(!ggtt_vma ||
- dev_priv->fence_regs[obj->fence_reg].pin_count >
- ggtt_vma->pin_count);
- dev_priv->fence_regs[obj->fence_reg].pin_count++;
- return true;
- } else
- return false;
-}
-
-void
-i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
-{
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
- dev_priv->fence_regs[obj->fence_reg].pin_count--;
- }
-}
-
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -4811,8 +4469,9 @@ err:
return ret;
}
-int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
+int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
{
+ struct intel_engine_cs *ring = req->ring;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
@@ -4822,7 +4481,7 @@ int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
if (!HAS_L3_DPF(dev) || !remap_info)
return 0;
- ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
+ ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
if (ret)
return ret;
@@ -4968,7 +4627,7 @@ i915_gem_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
- int ret, i;
+ int ret, i, j;
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO;
@@ -5005,27 +4664,55 @@ i915_gem_init_hw(struct drm_device *dev)
*/
init_unused_rings(dev);
+ BUG_ON(!dev_priv->ring[RCS].default_context);
+
+ ret = i915_ppgtt_init_hw(dev);
+ if (ret) {
+ DRM_ERROR("PPGTT enable HW failed %d\n", ret);
+ goto out;
+ }
+
+ /* Need to do basic initialisation of all rings first: */
for_each_ring(ring, dev_priv, i) {
ret = ring->init_hw(ring);
if (ret)
goto out;
}
- for (i = 0; i < NUM_L3_SLICES(dev); i++)
- i915_gem_l3_remap(&dev_priv->ring[RCS], i);
+ /* Now it is safe to go back round and do everything else: */
+ for_each_ring(ring, dev_priv, i) {
+ struct drm_i915_gem_request *req;
- ret = i915_ppgtt_init_hw(dev);
- if (ret && ret != -EIO) {
- DRM_ERROR("PPGTT enable failed %d\n", ret);
- i915_gem_cleanup_ringbuffer(dev);
- }
+ WARN_ON(!ring->default_context);
- ret = i915_gem_context_enable(dev_priv);
- if (ret && ret != -EIO) {
- DRM_ERROR("Context enable failed %d\n", ret);
- i915_gem_cleanup_ringbuffer(dev);
+ ret = i915_gem_request_alloc(ring, ring->default_context, &req);
+ if (ret) {
+ i915_gem_cleanup_ringbuffer(dev);
+ goto out;
+ }
- goto out;
+ if (ring->id == RCS) {
+ for (j = 0; j < NUM_L3_SLICES(dev); j++)
+ i915_gem_l3_remap(req, j);
+ }
+
+ ret = i915_ppgtt_init_ring(req);
+ if (ret && ret != -EIO) {
+ DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
+ i915_gem_request_cancel(req);
+ i915_gem_cleanup_ringbuffer(dev);
+ goto out;
+ }
+
+ ret = i915_gem_context_enable(req);
+ if (ret && ret != -EIO) {
+ DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
+ i915_gem_request_cancel(req);
+ i915_gem_cleanup_ringbuffer(dev);
+ goto out;
+ }
+
+ i915_add_request_no_flush(req);
}
out:
@@ -5092,7 +4779,7 @@ int i915_gem_init(struct drm_device *dev)
* for all other failure, such as an allocation failure, bail.
*/
DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
- atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+ atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
ret = 0;
}
@@ -5112,6 +4799,14 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
for_each_ring(ring, dev_priv, i)
dev_priv->gt.cleanup_ring(ring);
+
+ if (i915.enable_execlists)
+ /*
+ * Neither the BIOS, ourselves or any other kernel
+ * expects the system to be in execlists mode on startup,
+ * so we need to reset the GPU back to legacy mode.
+ */
+ intel_gpu_reset(dev);
}
static void
@@ -5389,3 +5084,42 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
return false;
}
+/* Allocate a new GEM object and fill it with the supplied data */
+struct drm_i915_gem_object *
+i915_gem_object_create_from_data(struct drm_device *dev,
+ const void *data, size_t size)
+{
+ struct drm_i915_gem_object *obj;
+ struct sg_table *sg;
+ size_t bytes;
+ int ret;
+
+ obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE));
+ if (IS_ERR_OR_NULL(obj))
+ return obj;
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
+ if (ret)
+ goto fail;
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ goto fail;
+
+ i915_gem_object_pin_pages(obj);
+ sg = obj->pages;
+ bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
+ i915_gem_object_unpin_pages(obj);
+
+ if (WARN_ON(bytes != size)) {
+ DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ return obj;
+
+fail:
+ drm_gem_object_unreference(&obj->base);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8867818b1401..8e893b354bcc 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -135,8 +135,7 @@ static int get_context_size(struct drm_device *dev)
void i915_gem_context_free(struct kref *ctx_ref)
{
- struct intel_context *ctx = container_of(ctx_ref,
- typeof(*ctx), ref);
+ struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
trace_i915_context_free(ctx);
@@ -157,9 +156,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
struct drm_i915_gem_object *obj;
int ret;
- obj = i915_gem_object_create_stolen(dev, size);
- if (obj == NULL)
- obj = i915_gem_alloc_object(dev, size);
+ obj = i915_gem_alloc_object(dev, size);
if (obj == NULL)
return ERR_PTR(-ENOMEM);
@@ -197,6 +194,7 @@ __create_hw_context(struct drm_device *dev,
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->context_list);
+ ctx->i915 = dev_priv;
if (dev_priv->hw_context_size) {
struct drm_i915_gem_object *obj =
@@ -289,6 +287,7 @@ err_unpin:
if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
err_destroy:
+ idr_remove(&file_priv->context_idr, ctx->user_handle);
i915_gem_context_unreference(ctx);
return ERR_PTR(ret);
}
@@ -409,32 +408,23 @@ void i915_gem_context_fini(struct drm_device *dev)
i915_gem_context_unreference(dctx);
}
-int i915_gem_context_enable(struct drm_i915_private *dev_priv)
+int i915_gem_context_enable(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring;
- int ret, i;
-
- BUG_ON(!dev_priv->ring[RCS].default_context);
+ struct intel_engine_cs *ring = req->ring;
+ int ret;
if (i915.enable_execlists) {
- for_each_ring(ring, dev_priv, i) {
- if (ring->init_context) {
- ret = ring->init_context(ring,
- ring->default_context);
- if (ret) {
- DRM_ERROR("ring init context: %d\n",
- ret);
- return ret;
- }
- }
- }
+ if (ring->init_context == NULL)
+ return 0;
+ ret = ring->init_context(req);
} else
- for_each_ring(ring, dev_priv, i) {
- ret = i915_switch_context(ring, ring->default_context);
- if (ret)
- return ret;
- }
+ ret = i915_switch_context(req);
+
+ if (ret) {
+ DRM_ERROR("ring init context: %d\n", ret);
+ return ret;
+ }
return 0;
}
@@ -487,10 +477,9 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
}
static inline int
-mi_set_context(struct intel_engine_cs *ring,
- struct intel_context *new_context,
- u32 hw_flags)
+mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
+ struct intel_engine_cs *ring = req->ring;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
@@ -505,13 +494,15 @@ mi_set_context(struct intel_engine_cs *ring,
* itlb_before_ctx_switch.
*/
if (IS_GEN6(ring->dev)) {
- ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
+ ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
}
/* These flags are for resource streamer on HSW+ */
- if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
+ if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
+ flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
+ else if (INTEL_INFO(ring->dev)->gen < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
@@ -519,7 +510,7 @@ mi_set_context(struct intel_engine_cs *ring,
if (INTEL_INFO(ring->dev)->gen >= 7)
len += 2 + (num_rings ? 4*num_rings + 2 : 0);
- ret = intel_ring_begin(ring, len);
+ ret = intel_ring_begin(req, len);
if (ret)
return ret;
@@ -542,7 +533,7 @@ mi_set_context(struct intel_engine_cs *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
flags);
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
@@ -623,9 +614,10 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
return false;
}
-static int do_switch(struct intel_engine_cs *ring,
- struct intel_context *to)
+static int do_switch(struct drm_i915_gem_request *req)
{
+ struct intel_context *to = req->ctx;
+ struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_context *from = ring->last_context;
u32 hw_flags = 0;
@@ -661,7 +653,7 @@ static int do_switch(struct intel_engine_cs *ring,
* Register Immediate commands in Ring Buffer before submitting
* a context."*/
trace_switch_mm(ring, to);
- ret = to->ppgtt->switch_mm(to->ppgtt, ring);
+ ret = to->ppgtt->switch_mm(to->ppgtt, req);
if (ret)
goto unpin_out;
@@ -703,7 +695,7 @@ static int do_switch(struct intel_engine_cs *ring,
WARN_ON(needs_pd_load_pre(ring, to) &&
needs_pd_load_post(ring, to, hw_flags));
- ret = mi_set_context(ring, to, hw_flags);
+ ret = mi_set_context(req, hw_flags);
if (ret)
goto unpin_out;
@@ -712,7 +704,7 @@ static int do_switch(struct intel_engine_cs *ring,
*/
if (needs_pd_load_post(ring, to, hw_flags)) {
trace_switch_mm(ring, to);
- ret = to->ppgtt->switch_mm(to->ppgtt, ring);
+ ret = to->ppgtt->switch_mm(to->ppgtt, req);
/* The hardware context switch is emitted, but we haven't
* actually changed the state - so it's probably safe to bail
* here. Still, let the user know something dangerous has
@@ -728,7 +720,7 @@ static int do_switch(struct intel_engine_cs *ring,
if (!(to->remap_slice & (1<<i)))
continue;
- ret = i915_gem_l3_remap(ring, i);
+ ret = i915_gem_l3_remap(req, i);
/* If it failed, try again next round */
if (ret)
DRM_DEBUG_DRIVER("L3 remapping failed\n");
@@ -744,7 +736,7 @@ static int do_switch(struct intel_engine_cs *ring,
*/
if (from != NULL) {
from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@@ -768,7 +760,7 @@ done:
if (uninitialized) {
if (ring->init_context) {
- ret = ring->init_context(ring, to);
+ ret = ring->init_context(req);
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}
@@ -784,8 +776,7 @@ unpin_out:
/**
* i915_switch_context() - perform a GPU context switch.
- * @ring: ring for which we'll execute the context switch
- * @to: the context to switch to
+ * @req: request for which we'll execute the context switch
*
* The context life cycle is simple. The context refcount is incremented and
* decremented by 1 and create and destroy. If the context is in use by the GPU,
@@ -796,25 +787,25 @@ unpin_out:
* switched by writing to the ELSP and requests keep a reference to their
* context.
*/
-int i915_switch_context(struct intel_engine_cs *ring,
- struct intel_context *to)
+int i915_switch_context(struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
WARN_ON(i915.enable_execlists);
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
- if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
- if (to != ring->last_context) {
- i915_gem_context_reference(to);
+ if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
+ if (req->ctx != ring->last_context) {
+ i915_gem_context_reference(req->ctx);
if (ring->last_context)
i915_gem_context_unreference(ring->last_context);
- ring->last_context = to;
+ ring->last_context = req->ctx;
}
return 0;
}
- return do_switch(ring, to);
+ return do_switch(req);
}
static bool contexts_enabled(struct drm_device *dev)
@@ -900,6 +891,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_BAN_PERIOD:
args->value = ctx->hang_stats.ban_period_seconds;
break;
+ case I915_CONTEXT_PARAM_NO_ZEROMAP:
+ args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
+ break;
default:
ret = -EINVAL;
break;
@@ -937,6 +931,14 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
else
ctx->hang_stats.ban_period_seconds = args->value;
break;
+ case I915_CONTEXT_PARAM_NO_ZEROMAP:
+ if (args->size) {
+ ret = -EINVAL;
+ } else {
+ ctx->flags &= ~CONTEXT_NO_ZEROMAP;
+ ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
+ }
+ break;
default:
ret = -EINVAL;
break;
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 7998da27c500..e9c2bfd85b52 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -256,7 +256,6 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
return PTR_ERR(sg);
obj->pages = sg;
- obj->has_dma_mapping = true;
return 0;
}
@@ -264,7 +263,6 @@ static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
{
dma_buf_unmap_attachment(obj->base.import_attach,
obj->pages, DMA_BIDIRECTIONAL);
- obj->has_dma_mapping = false;
}
static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a7fa14516cda..a953d4975b8c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -677,6 +677,7 @@ eb_vma_misplaced(struct i915_vma *vma)
static int
i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
struct list_head *vmas,
+ struct intel_context *ctx,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
@@ -699,6 +700,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
obj = vma->obj;
entry = vma->exec_entry;
+ if (ctx->flags & CONTEXT_NO_ZEROMAP)
+ entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
+
if (!has_fenced_gpu_access)
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
need_fence =
@@ -776,7 +780,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_file *file,
struct intel_engine_cs *ring,
struct eb_vmas *eb,
- struct drm_i915_gem_exec_object2 *exec)
+ struct drm_i915_gem_exec_object2 *exec,
+ struct intel_context *ctx)
{
struct drm_i915_gem_relocation_entry *reloc;
struct i915_address_space *vm;
@@ -862,7 +867,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err;
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
if (ret)
goto err;
@@ -887,10 +892,10 @@ err:
}
static int
-i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
+i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
- const unsigned other_rings = ~intel_ring_flag(ring);
+ const unsigned other_rings = ~intel_ring_flag(req->ring);
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
@@ -900,7 +905,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
struct drm_i915_gem_object *obj = vma->obj;
if (obj->active & other_rings) {
- ret = i915_gem_object_sync(obj, ring);
+ ret = i915_gem_object_sync(obj, req->ring, &req);
if (ret)
return ret;
}
@@ -912,7 +917,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
}
if (flush_chipset)
- i915_gem_chipset_flush(ring->dev);
+ i915_gem_chipset_flush(req->ring->dev);
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
@@ -920,7 +925,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
- return intel_ring_invalidate_all_caches(ring);
+ return intel_ring_invalidate_all_caches(req);
}
static bool
@@ -953,6 +958,9 @@ validate_exec_list(struct drm_device *dev,
if (exec[i].flags & invalid_flags)
return -EINVAL;
+ if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
+ return -EINVAL;
+
/* First check for malicious input causing overflow in
* the worst case where we need to allocate the entire
* relocation tree as a single array.
@@ -1013,9 +1021,9 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
- struct intel_engine_cs *ring)
+ struct drm_i915_gem_request *req)
{
- struct drm_i915_gem_request *req = intel_ring_get_request(ring);
+ struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) {
@@ -1024,17 +1032,17 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
+ obj->dirty = 1; /* be paranoid */
obj->base.write_domain = obj->base.pending_write_domain;
if (obj->base.write_domain == 0)
obj->base.pending_read_domains |= obj->base.read_domains;
obj->base.read_domains = obj->base.pending_read_domains;
- i915_vma_move_to_active(vma, ring);
+ i915_vma_move_to_active(vma, req);
if (obj->base.write_domain) {
- obj->dirty = 1;
i915_gem_request_assign(&obj->last_write_req, req);
- intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
+ intel_fb_obj_invalidate(obj, ORIGIN_CS);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1053,22 +1061,20 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
}
void
-i915_gem_execbuffer_retire_commands(struct drm_device *dev,
- struct drm_file *file,
- struct intel_engine_cs *ring,
- struct drm_i915_gem_object *obj)
+i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
{
/* Unconditionally force add_request to emit a full flush. */
- ring->gpu_caches_dirty = true;
+ params->ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
- (void)__i915_add_request(ring, file, obj);
+ __i915_add_request(params->request, params->batch_obj, true);
}
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
- struct intel_engine_cs *ring)
+ struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
@@ -1077,7 +1083,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return -EINVAL;
}
- ret = intel_ring_begin(ring, 4 * 3);
+ ret = intel_ring_begin(req, 4 * 3);
if (ret)
return ret;
@@ -1093,10 +1099,11 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
}
static int
-i915_emit_box(struct intel_engine_cs *ring,
+i915_emit_box(struct drm_i915_gem_request *req,
struct drm_clip_rect *box,
int DR1, int DR4)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
@@ -1107,7 +1114,7 @@ i915_emit_box(struct intel_engine_cs *ring,
}
if (INTEL_INFO(ring->dev)->gen >= 4) {
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -1116,7 +1123,7 @@ i915_emit_box(struct intel_engine_cs *ring,
intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
intel_ring_emit(ring, DR4);
} else {
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(req, 6);
if (ret)
return ret;
@@ -1186,17 +1193,15 @@ err:
}
int
-i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
- struct intel_engine_cs *ring,
- struct intel_context *ctx,
+i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas,
- struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 dispatch_flags)
+ struct list_head *vmas)
{
struct drm_clip_rect *cliprects = NULL;
+ struct drm_device *dev = params->dev;
+ struct intel_engine_cs *ring = params->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
- u64 exec_len;
+ u64 exec_start, exec_len;
int instp_mode;
u32 instp_mask;
int i, ret = 0;
@@ -1244,15 +1249,15 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
}
}
- ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
+ ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
if (ret)
goto error;
- ret = i915_switch_context(ring, ctx);
+ ret = i915_switch_context(params->request);
if (ret)
goto error;
- WARN(ctx->ppgtt && ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
+ WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
"%s didn't clear reload\n", ring->name);
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
@@ -1294,7 +1299,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
if (ring == &dev_priv->ring[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(params->request, 4);
if (ret)
goto error;
@@ -1308,37 +1313,40 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
}
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
- ret = i915_reset_gen7_sol_offsets(dev, ring);
+ ret = i915_reset_gen7_sol_offsets(dev, params->request);
if (ret)
goto error;
}
- exec_len = args->batch_len;
+ exec_len = args->batch_len;
+ exec_start = params->batch_obj_vm_offset +
+ params->args_batch_start_offset;
+
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
- ret = i915_emit_box(ring, &cliprects[i],
+ ret = i915_emit_box(params->request, &cliprects[i],
args->DR1, args->DR4);
if (ret)
goto error;
- ret = ring->dispatch_execbuffer(ring,
+ ret = ring->dispatch_execbuffer(params->request,
exec_start, exec_len,
- dispatch_flags);
+ params->dispatch_flags);
if (ret)
goto error;
}
} else {
- ret = ring->dispatch_execbuffer(ring,
+ ret = ring->dispatch_execbuffer(params->request,
exec_start, exec_len,
- dispatch_flags);
+ params->dispatch_flags);
if (ret)
return ret;
}
- trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
+ trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
- i915_gem_execbuffer_move_to_active(vmas, ring);
- i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+ i915_gem_execbuffer_move_to_active(vmas, params->request);
+ i915_gem_execbuffer_retire_commands(params);
error:
kfree(cliprects);
@@ -1408,8 +1416,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct intel_engine_cs *ring;
struct intel_context *ctx;
struct i915_address_space *vm;
+ struct i915_execbuffer_params params_master; /* XXX: will be removed later */
+ struct i915_execbuffer_params *params = &params_master;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
- u64 exec_start = args->batch_start_offset;
u32 dispatch_flags;
int ret;
bool need_relocs;
@@ -1482,6 +1491,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
+ if (!HAS_RESOURCE_STREAMER(dev)) {
+ DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
+ return -EINVAL;
+ }
+ if (ring->id != RCS) {
+ DRM_DEBUG("RS is not available on %s\n",
+ ring->name);
+ return -EINVAL;
+ }
+
+ dispatch_flags |= I915_DISPATCH_RS;
+ }
+
intel_runtime_pm_get(dev_priv);
ret = i915_mutex_lock_interruptible(dev);
@@ -1502,6 +1525,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
else
vm = &dev_priv->gtt.base;
+ memset(&params_master, 0x00, sizeof(params_master));
+
eb = eb_create(args);
if (eb == NULL) {
i915_gem_context_unreference(ctx);
@@ -1520,7 +1545,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
if (ret)
goto err;
@@ -1530,7 +1555,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
- eb, exec);
+ eb, exec, ctx);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
@@ -1544,6 +1569,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
+ params->args_batch_start_offset = args->batch_start_offset;
if (i915_needs_cmd_parser(ring) && args->batch_len) {
struct drm_i915_gem_object *parsed_batch_obj;
@@ -1575,7 +1601,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* command parser has accepted.
*/
dispatch_flags |= I915_DISPATCH_SECURE;
- exec_start = 0;
+ params->args_batch_start_offset = 0;
batch_obj = parsed_batch_obj;
}
}
@@ -1600,15 +1626,36 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto err;
- exec_start += i915_gem_obj_ggtt_offset(batch_obj);
+ params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
} else
- exec_start += i915_gem_obj_offset(batch_obj, vm);
+ params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
- ret = dev_priv->gt.execbuf_submit(dev, file, ring, ctx, args,
- &eb->vmas, batch_obj, exec_start,
- dispatch_flags);
+ /* Allocate a request for this batch buffer nice and early. */
+ ret = i915_gem_request_alloc(ring, ctx, &params->request);
+ if (ret)
+ goto err_batch_unpin;
+
+ ret = i915_gem_request_add_to_client(params->request, file);
+ if (ret)
+ goto err_batch_unpin;
/*
+ * Save assorted stuff away to pass through to *_submission().
+ * NB: This data should be 'persistent' and not local as it will
+ * kept around beyond the duration of the IOCTL once the GPU
+ * scheduler arrives.
+ */
+ params->dev = dev;
+ params->file = file;
+ params->ring = ring;
+ params->dispatch_flags = dispatch_flags;
+ params->batch_obj = batch_obj;
+ params->ctx = ctx;
+
+ ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
+
+err_batch_unpin:
+ /*
* FIXME: We crucially rely upon the active tracking for the (ppgtt)
* batch vma for correctness. For less ugly and less fragility this
* needs to be adjusted to also track the ggtt batch vma properly as
@@ -1616,11 +1663,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
*/
if (dispatch_flags & I915_DISPATCH_SECURE)
i915_gem_object_ggtt_unpin(batch_obj);
+
err:
/* the request owns the ref now */
i915_gem_context_unreference(ctx);
eb_destroy(eb);
+ /*
+ * If the request was created but not successfully submitted then it
+ * must be freed again. If it was submitted then it is being tracked
+ * on the active request list and no clean up is required here.
+ */
+ if (ret && params->request)
+ i915_gem_request_cancel(params->request);
+
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
new file mode 100644
index 000000000000..af1f8c461060
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -0,0 +1,787 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+/**
+ * DOC: fence register handling
+ *
+ * Important to avoid confusions: "fences" in the i915 driver are not execution
+ * fences used to track command completion but hardware detiler objects which
+ * wrap a given range of the global GTT. Each platform has only a fairly limited
+ * set of these objects.
+ *
+ * Fences are used to detile GTT memory mappings. They're also connected to the
+ * hardware frontbuffer render tracking and hence interract with frontbuffer
+ * conmpression. Furthermore on older platforms fences are required for tiled
+ * objects used by the display engine. They can also be used by the render
+ * engine - they're required for blitter commands and are optional for render
+ * commands. But on gen4+ both display (with the exception of fbc) and rendering
+ * have their own tiling state bits and don't need fences.
+ *
+ * Also note that fences only support X and Y tiling and hence can't be used for
+ * the fancier new tiling formats like W, Ys and Yf.
+ *
+ * Finally note that because fences are such a restricted resource they're
+ * dynamically associated with objects. Furthermore fence state is committed to
+ * the hardware lazily to avoid unecessary stalls on gen2/3. Therefore code must
+ * explictly call i915_gem_object_get_fence() to synchronize fencing status
+ * for cpu access. Also note that some code wants an unfenced view, for those
+ * cases the fence can be removed forcefully with i915_gem_object_put_fence().
+ *
+ * Internally these functions will synchronize with userspace access by removing
+ * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
+ */
+
+static void i965_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int fence_reg;
+ int fence_pitch_shift;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ fence_reg = FENCE_REG_SANDYBRIDGE_0;
+ fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
+ } else {
+ fence_reg = FENCE_REG_965_0;
+ fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
+ }
+
+ fence_reg += reg * 8;
+
+ /* To w/a incoherency with non-atomic 64-bit register updates,
+ * we split the 64-bit update into two 32-bit writes. In order
+ * for a partial fence not to be evaluated between writes, we
+ * precede the update with write to turn off the fence register,
+ * and only enable the fence as the last step.
+ *
+ * For extra levels of paranoia, we make sure each step lands
+ * before applying the next step.
+ */
+ I915_WRITE(fence_reg, 0);
+ POSTING_READ(fence_reg);
+
+ if (obj) {
+ u32 size = i915_gem_obj_ggtt_size(obj);
+ uint64_t val;
+
+ /* Adjust fence size to match tiled area */
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ uint32_t row_size = obj->stride *
+ (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
+ size = (size / row_size) * row_size;
+ }
+
+ val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
+ 0xfffff000) << 32;
+ val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
+ val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
+
+ I915_WRITE(fence_reg + 4, val >> 32);
+ POSTING_READ(fence_reg + 4);
+
+ I915_WRITE(fence_reg + 0, val);
+ POSTING_READ(fence_reg);
+ } else {
+ I915_WRITE(fence_reg + 4, 0);
+ POSTING_READ(fence_reg + 4);
+ }
+}
+
+static void i915_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val;
+
+ if (obj) {
+ u32 size = i915_gem_obj_ggtt_size(obj);
+ int pitch_val;
+ int tile_width;
+
+ WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+ "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+ i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
+
+ if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+ tile_width = 128;
+ else
+ tile_width = 512;
+
+ /* Note: pitch better be a power of two tile widths */
+ pitch_val = obj->stride / tile_width;
+ pitch_val = ffs(pitch_val) - 1;
+
+ val = i915_gem_obj_ggtt_offset(obj);
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I915_FENCE_SIZE_BITS(size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
+ } else
+ val = 0;
+
+ if (reg < 8)
+ reg = FENCE_REG_830_0 + reg * 4;
+ else
+ reg = FENCE_REG_945_8 + (reg - 8) * 4;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+}
+
+static void i830_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val;
+
+ if (obj) {
+ u32 size = i915_gem_obj_ggtt_size(obj);
+ uint32_t pitch_val;
+
+ WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+ "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
+ i915_gem_obj_ggtt_offset(obj), size);
+
+ pitch_val = obj->stride / 128;
+ pitch_val = ffs(pitch_val) - 1;
+
+ val = i915_gem_obj_ggtt_offset(obj);
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I830_FENCE_SIZE_BITS(size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
+ } else
+ val = 0;
+
+ I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
+ POSTING_READ(FENCE_REG_830_0 + reg * 4);
+}
+
+inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
+{
+ return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
+}
+
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Ensure that all CPU reads are completed before installing a fence
+ * and all writes before removing the fence.
+ */
+ if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
+ mb();
+
+ WARN(obj && (!obj->stride || !obj->tiling_mode),
+ "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+ obj->stride, obj->tiling_mode);
+
+ if (IS_GEN2(dev))
+ i830_write_fence_reg(dev, reg, obj);
+ else if (IS_GEN3(dev))
+ i915_write_fence_reg(dev, reg, obj);
+ else if (INTEL_INFO(dev)->gen >= 4)
+ i965_write_fence_reg(dev, reg, obj);
+
+ /* And similarly be paranoid that no direct access to this region
+ * is reordered to before the fence is installed.
+ */
+ if (i915_gem_object_needs_mb(obj))
+ mb();
+}
+
+static inline int fence_number(struct drm_i915_private *dev_priv,
+ struct drm_i915_fence_reg *fence)
+{
+ return fence - dev_priv->fence_regs;
+}
+
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+ struct drm_i915_fence_reg *fence,
+ bool enable)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ int reg = fence_number(dev_priv, fence);
+
+ i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+
+ if (enable) {
+ obj->fence_reg = reg;
+ fence->obj = obj;
+ list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
+ } else {
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ fence->obj = NULL;
+ list_del_init(&fence->lru_list);
+ }
+ obj->fence_dirty = false;
+}
+
+static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
+{
+ if (obj->tiling_mode)
+ i915_gem_release_mmap(obj);
+
+ /* As we do not have an associated fence register, we will force
+ * a tiling change if we ever need to acquire one.
+ */
+ obj->fence_dirty = false;
+ obj->fence_reg = I915_FENCE_REG_NONE;
+}
+
+static int
+i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->last_fenced_req) {
+ int ret = i915_wait_request(obj->last_fenced_req);
+ if (ret)
+ return ret;
+
+ i915_gem_request_assign(&obj->last_fenced_req, NULL);
+ }
+
+ return 0;
+}
+
+/**
+ * i915_gem_object_put_fence - force-remove fence for an object
+ * @obj: object to map through a fence reg
+ *
+ * This function force-removes any fence from the given object, which is useful
+ * if the kernel wants to do untiled GTT access.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+int
+i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_fence_reg *fence;
+ int ret;
+
+ ret = i915_gem_object_wait_fence(obj);
+ if (ret)
+ return ret;
+
+ if (obj->fence_reg == I915_FENCE_REG_NONE)
+ return 0;
+
+ fence = &dev_priv->fence_regs[obj->fence_reg];
+
+ if (WARN_ON(fence->pin_count))
+ return -EBUSY;
+
+ i915_gem_object_fence_lost(obj);
+ i915_gem_object_update_fence(obj, fence, false);
+
+ return 0;
+}
+
+static struct drm_i915_fence_reg *
+i915_find_fence_reg(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_fence_reg *reg, *avail;
+ int i;
+
+ /* First try to find a free reg */
+ avail = NULL;
+ for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
+ reg = &dev_priv->fence_regs[i];
+ if (!reg->obj)
+ return reg;
+
+ if (!reg->pin_count)
+ avail = reg;
+ }
+
+ if (avail == NULL)
+ goto deadlock;
+
+ /* None available, try to steal one or wait for a user to finish */
+ list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
+ if (reg->pin_count)
+ continue;
+
+ return reg;
+ }
+
+deadlock:
+ /* Wait for completion of pending flips which consume fences */
+ if (intel_has_pending_fb_unpin(dev))
+ return ERR_PTR(-EAGAIN);
+
+ return ERR_PTR(-EDEADLK);
+}
+
+/**
+ * i915_gem_object_get_fence - set up fencing for an object
+ * @obj: object to map through a fence reg
+ *
+ * When mapping objects through the GTT, userspace wants to be able to write
+ * to them without having to worry about swizzling if the object is tiled.
+ * This function walks the fence regs looking for a free one for @obj,
+ * stealing one if it can't find any.
+ *
+ * It then sets up the reg based on the object's properties: address, pitch
+ * and tiling format.
+ *
+ * For an untiled surface, this removes any existing fence.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+int
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool enable = obj->tiling_mode != I915_TILING_NONE;
+ struct drm_i915_fence_reg *reg;
+ int ret;
+
+ /* Have we updated the tiling parameters upon the object and so
+ * will need to serialise the write to the associated fence register?
+ */
+ if (obj->fence_dirty) {
+ ret = i915_gem_object_wait_fence(obj);
+ if (ret)
+ return ret;
+ }
+
+ /* Just update our place in the LRU if our fence is getting reused. */
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ reg = &dev_priv->fence_regs[obj->fence_reg];
+ if (!obj->fence_dirty) {
+ list_move_tail(&reg->lru_list,
+ &dev_priv->mm.fence_list);
+ return 0;
+ }
+ } else if (enable) {
+ if (WARN_ON(!obj->map_and_fenceable))
+ return -EINVAL;
+
+ reg = i915_find_fence_reg(dev);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ if (reg->obj) {
+ struct drm_i915_gem_object *old = reg->obj;
+
+ ret = i915_gem_object_wait_fence(old);
+ if (ret)
+ return ret;
+
+ i915_gem_object_fence_lost(old);
+ }
+ } else
+ return 0;
+
+ i915_gem_object_update_fence(obj, reg, enable);
+
+ return 0;
+}
+
+/**
+ * i915_gem_object_pin_fence - pin fencing state
+ * @obj: object to pin fencing for
+ *
+ * This pins the fencing state (whether tiled or untiled) to make sure the
+ * object is ready to be used as a scanout target. Fencing status must be
+ * synchronize first by calling i915_gem_object_get_fence():
+ *
+ * The resulting fence pin reference must be released again with
+ * i915_gem_object_unpin_fence().
+ *
+ * Returns:
+ *
+ * True if the object has a fence, false otherwise.
+ */
+bool
+i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
+
+ WARN_ON(!ggtt_vma ||
+ dev_priv->fence_regs[obj->fence_reg].pin_count >
+ ggtt_vma->pin_count);
+ dev_priv->fence_regs[obj->fence_reg].pin_count++;
+ return true;
+ } else
+ return false;
+}
+
+/**
+ * i915_gem_object_unpin_fence - unpin fencing state
+ * @obj: object to unpin fencing for
+ *
+ * This releases the fence pin reference acquired through
+ * i915_gem_object_pin_fence. It will handle both objects with and without an
+ * attached fence correctly, callers do not need to distinguish this.
+ */
+void
+i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
+ dev_priv->fence_regs[obj->fence_reg].pin_count--;
+ }
+}
+
+/**
+ * i915_gem_restore_fences - restore fence state
+ * @dev: DRM device
+ *
+ * Restore the hw fence state to match the software tracking again, to be called
+ * after a gpu reset and on resume.
+ */
+void i915_gem_restore_fences(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
+ struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+
+ /*
+ * Commit delayed tiling changes if we have an object still
+ * attached to the fence, otherwise just clear the fence.
+ */
+ if (reg->obj) {
+ i915_gem_object_update_fence(reg->obj, reg,
+ reg->obj->tiling_mode);
+ } else {
+ i915_gem_write_fence(dev, i, NULL);
+ }
+ }
+}
+
+/**
+ * DOC: tiling swizzling details
+ *
+ * The idea behind tiling is to increase cache hit rates by rearranging
+ * pixel data so that a group of pixel accesses are in the same cacheline.
+ * Performance improvement from doing this on the back/depth buffer are on
+ * the order of 30%.
+ *
+ * Intel architectures make this somewhat more complicated, though, by
+ * adjustments made to addressing of data when the memory is in interleaved
+ * mode (matched pairs of DIMMS) to improve memory bandwidth.
+ * For interleaved memory, the CPU sends every sequential 64 bytes
+ * to an alternate memory channel so it can get the bandwidth from both.
+ *
+ * The GPU also rearranges its accesses for increased bandwidth to interleaved
+ * memory, and it matches what the CPU does for non-tiled. However, when tiled
+ * it does it a little differently, since one walks addresses not just in the
+ * X direction but also Y. So, along with alternating channels when bit
+ * 6 of the address flips, it also alternates when other bits flip -- Bits 9
+ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
+ * are common to both the 915 and 965-class hardware.
+ *
+ * The CPU also sometimes XORs in higher bits as well, to improve
+ * bandwidth doing strided access like we do so frequently in graphics. This
+ * is called "Channel XOR Randomization" in the MCH documentation. The result
+ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
+ * decode.
+ *
+ * All of this bit 6 XORing has an effect on our memory management,
+ * as we need to make sure that the 3d driver can correctly address object
+ * contents.
+ *
+ * If we don't have interleaved memory, all tiling is safe and no swizzling is
+ * required.
+ *
+ * When bit 17 is XORed in, we simply refuse to tile at all. Bit
+ * 17 is not just a page offset, so as we page an objet out and back in,
+ * individual pages in it will have different bit 17 addresses, resulting in
+ * each 64 bytes being swapped with its neighbor!
+ *
+ * Otherwise, if interleaved, we have to tell the 3d driver what the address
+ * swizzling it needs to do is, since it's writing with the CPU to the pages
+ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
+ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
+ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
+ * to match what the GPU expects.
+ */
+
+/**
+ * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
+ * @dev: DRM device
+ *
+ * Detects bit 6 swizzling of address lookup between IGD access and CPU
+ * access through main memory.
+ */
+void
+i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+
+ if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
+ /*
+ * On BDW+, swizzling is not used. We leave the CPU memory
+ * controller in charge of optimizing memory accesses without
+ * the extra address manipulation GPU side.
+ *
+ * VLV and CHV don't have GPU swizzling.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ if (dev_priv->preserve_bios_swizzle) {
+ if (I915_READ(DISP_ARB_CTL) &
+ DISP_TILE_SURFACE_SWIZZLING) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
+ } else {
+ uint32_t dimm_c0, dimm_c1;
+ dimm_c0 = I915_READ(MAD_DIMM_C0);
+ dimm_c1 = I915_READ(MAD_DIMM_C1);
+ dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ /* Enable swizzling when the channels are populated
+ * with identically sized dimms. We don't need to check
+ * the 3rd channel because no cpu with gpu attached
+ * ships in that configuration. Also, swizzling only
+ * makes sense for 2 channels anyway. */
+ if (dimm_c0 == dimm_c1) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
+ }
+ } else if (IS_GEN5(dev)) {
+ /* On Ironlake whatever DRAM config, GPU always do
+ * same swizzling setup.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else if (IS_GEN2(dev)) {
+ /* As far as we know, the 865 doesn't have these bit 6
+ * swizzling issues.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
+ uint32_t dcc;
+
+ /* On 9xx chipsets, channel interleave by the CPU is
+ * determined by DCC. For single-channel, neither the CPU
+ * nor the GPU do swizzling. For dual channel interleaved,
+ * the GPU's interleave is bit 9 and 10 for X tiled, and bit
+ * 9 for Y tiled. The CPU's interleave is independent, and
+ * can be based on either bit 11 (haven't seen this yet) or
+ * bit 17 (common).
+ */
+ dcc = I915_READ(DCC);
+ switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+ case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ break;
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+ if (dcc & DCC_CHANNEL_XOR_DISABLE) {
+ /* This is the base swizzling by the GPU for
+ * tiled buffers.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+ /* Bit 11 swizzling by the CPU in addition. */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+ swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+ } else {
+ /* Bit 17 swizzling by the CPU in addition. */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
+ swizzle_y = I915_BIT_6_SWIZZLE_9_17;
+ }
+ break;
+ }
+
+ /* check for L-shaped memory aka modified enhanced addressing */
+ if (IS_GEN4(dev)) {
+ uint32_t ddc2 = I915_READ(DCC2);
+
+ if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
+ dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+ }
+
+ if (dcc == 0xffffffff) {
+ DRM_ERROR("Couldn't read from MCHBAR. "
+ "Disabling tiling.\n");
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ }
+ } else {
+ /* The 965, G33, and newer, have a very flexible memory
+ * configuration. It will enable dual-channel mode
+ * (interleaving) on as much memory as it can, and the GPU
+ * will additionally sometimes enable different bit 6
+ * swizzling for tiled objects from the CPU.
+ *
+ * Here's what I found on the G965:
+ * slot fill memory size swizzling
+ * 0A 0B 1A 1B 1-ch 2-ch
+ * 512 0 0 0 512 0 O
+ * 512 0 512 0 16 1008 X
+ * 512 0 0 512 16 1008 X
+ * 0 512 0 512 16 1008 X
+ * 1024 1024 1024 0 2048 1024 O
+ *
+ * We could probably detect this based on either the DRB
+ * matching, which was the case for the swizzling required in
+ * the table above, or from the 1-ch value being less than
+ * the minimum size of a rank.
+ */
+ if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ }
+ }
+
+ dev_priv->mm.bit_6_swizzle_x = swizzle_x;
+ dev_priv->mm.bit_6_swizzle_y = swizzle_y;
+}
+
+/*
+ * Swap every 64 bytes of this page around, to account for it having a new
+ * bit 17 of its physical address and therefore being interpreted differently
+ * by the GPU.
+ */
+static void
+i915_gem_swizzle_page(struct page *page)
+{
+ char temp[64];
+ char *vaddr;
+ int i;
+
+ vaddr = kmap(page);
+
+ for (i = 0; i < PAGE_SIZE; i += 128) {
+ memcpy(temp, &vaddr[i], 64);
+ memcpy(&vaddr[i], &vaddr[i + 64], 64);
+ memcpy(&vaddr[i + 64], temp, 64);
+ }
+
+ kunmap(page);
+}
+
+/**
+ * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
+ * @obj: i915 GEM buffer object
+ *
+ * This function fixes up the swizzling in case any page frame number for this
+ * object has changed in bit 17 since that state has been saved with
+ * i915_gem_object_save_bit_17_swizzle().
+ *
+ * This is called when pinning backing storage again, since the kernel is free
+ * to move unpinned backing storage around (either by directly moving pages or
+ * by swapping them out and back in again).
+ */
+void
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+ struct sg_page_iter sg_iter;
+ int i;
+
+ if (obj->bit_17 == NULL)
+ return;
+
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ struct page *page = sg_page_iter_page(&sg_iter);
+ char new_bit_17 = page_to_phys(page) >> 17;
+ if ((new_bit_17 & 0x1) !=
+ (test_bit(i, obj->bit_17) != 0)) {
+ i915_gem_swizzle_page(page);
+ set_page_dirty(page);
+ }
+ i++;
+ }
+}
+
+/**
+ * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
+ * @obj: i915 GEM buffer object
+ *
+ * This function saves the bit 17 of each page frame number so that swizzling
+ * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
+ * be called before the backing storage can be unpinned.
+ */
+void
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+ struct sg_page_iter sg_iter;
+ int page_count = obj->base.size >> PAGE_SHIFT;
+ int i;
+
+ if (obj->bit_17 == NULL) {
+ obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
+ sizeof(long), GFP_KERNEL);
+ if (obj->bit_17 == NULL) {
+ DRM_ERROR("Failed to allocate memory for bit 17 "
+ "record\n");
+ return;
+ }
+ }
+
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
+ __set_bit(i, obj->bit_17);
+ else
+ __clear_bit(i, obj->bit_17);
+ i++;
+ }
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9daa2883ac18..96054a560f4f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -192,9 +192,8 @@ static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
return pte;
}
-static gen8_pde_t gen8_pde_encode(struct drm_device *dev,
- dma_addr_t addr,
- enum i915_cache_level level)
+static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
+ const enum i915_cache_level level)
{
gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
pde |= addr;
@@ -301,75 +300,120 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
return pte;
}
-#define i915_dma_unmap_single(px, dev) \
- __i915_dma_unmap_single((px)->daddr, dev)
-
-static void __i915_dma_unmap_single(dma_addr_t daddr,
- struct drm_device *dev)
+static int __setup_page_dma(struct drm_device *dev,
+ struct i915_page_dma *p, gfp_t flags)
{
struct device *device = &dev->pdev->dev;
- dma_unmap_page(device, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+ p->page = alloc_page(flags);
+ if (!p->page)
+ return -ENOMEM;
+
+ p->daddr = dma_map_page(device,
+ p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(device, p->daddr)) {
+ __free_page(p->page);
+ return -EINVAL;
+ }
+
+ return 0;
}
-/**
- * i915_dma_map_single() - Create a dma mapping for a page table/dir/etc.
- * @px: Page table/dir/etc to get a DMA map for
- * @dev: drm device
- *
- * Page table allocations are unified across all gens. They always require a
- * single 4k allocation, as well as a DMA mapping. If we keep the structs
- * symmetric here, the simple macro covers us for every page table type.
- *
- * Return: 0 if success.
- */
-#define i915_dma_map_single(px, dev) \
- i915_dma_map_page_single((px)->page, (dev), &(px)->daddr)
+static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
+{
+ return __setup_page_dma(dev, p, GFP_KERNEL);
+}
-static int i915_dma_map_page_single(struct page *page,
- struct drm_device *dev,
- dma_addr_t *daddr)
+static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
{
- struct device *device = &dev->pdev->dev;
+ if (WARN_ON(!p->page))
+ return;
- *daddr = dma_map_page(device, page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
- if (dma_mapping_error(device, *daddr))
- return -ENOMEM;
+ dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+ __free_page(p->page);
+ memset(p, 0, sizeof(*p));
+}
- return 0;
+static void *kmap_page_dma(struct i915_page_dma *p)
+{
+ return kmap_atomic(p->page);
}
-static void unmap_and_free_pt(struct i915_page_table *pt,
- struct drm_device *dev)
+/* We use the flushing unmap only with ppgtt structures:
+ * page directories, page tables and scratch pages.
+ */
+static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
{
- if (WARN_ON(!pt->page))
- return;
+ /* There are only few exceptions for gen >=6. chv and bxt.
+ * And we are not sure about the latter so play safe for now.
+ */
+ if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
- i915_dma_unmap_single(pt, dev);
- __free_page(pt->page);
- kfree(pt->used_ptes);
- kfree(pt);
+ kunmap_atomic(vaddr);
}
-static void gen8_initialize_pt(struct i915_address_space *vm,
- struct i915_page_table *pt)
+#define kmap_px(px) kmap_page_dma(px_base(px))
+#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
+
+#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
+#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
+#define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
+#define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
+
+static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
+ const uint64_t val)
{
- gen8_pte_t *pt_vaddr, scratch_pte;
int i;
+ uint64_t * const vaddr = kmap_page_dma(p);
- pt_vaddr = kmap_atomic(pt->page);
- scratch_pte = gen8_pte_encode(vm->scratch.addr,
- I915_CACHE_LLC, true);
+ for (i = 0; i < 512; i++)
+ vaddr[i] = val;
+
+ kunmap_page_dma(dev, vaddr);
+}
+
+static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
+ const uint32_t val32)
+{
+ uint64_t v = val32;
+
+ v = v << 32 | val32;
+
+ fill_page_dma(dev, p, v);
+}
+
+static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev)
+{
+ struct i915_page_scratch *sp;
+ int ret;
+
+ sp = kzalloc(sizeof(*sp), GFP_KERNEL);
+ if (sp == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
+ if (ret) {
+ kfree(sp);
+ return ERR_PTR(ret);
+ }
+
+ set_pages_uc(px_page(sp), 1);
+
+ return sp;
+}
- for (i = 0; i < GEN8_PTES; i++)
- pt_vaddr[i] = scratch_pte;
+static void free_scratch_page(struct drm_device *dev,
+ struct i915_page_scratch *sp)
+{
+ set_pages_wb(px_page(sp), 1);
- if (!HAS_LLC(vm->dev))
- drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
- kunmap_atomic(pt_vaddr);
+ cleanup_px(dev, sp);
+ kfree(sp);
}
-static struct i915_page_table *alloc_pt_single(struct drm_device *dev)
+static struct i915_page_table *alloc_pt(struct drm_device *dev)
{
struct i915_page_table *pt;
const size_t count = INTEL_INFO(dev)->gen >= 8 ?
@@ -386,19 +430,13 @@ static struct i915_page_table *alloc_pt_single(struct drm_device *dev)
if (!pt->used_ptes)
goto fail_bitmap;
- pt->page = alloc_page(GFP_KERNEL);
- if (!pt->page)
- goto fail_page;
-
- ret = i915_dma_map_single(pt, dev);
+ ret = setup_px(dev, pt);
if (ret)
- goto fail_dma;
+ goto fail_page_m;
return pt;
-fail_dma:
- __free_page(pt->page);
-fail_page:
+fail_page_m:
kfree(pt->used_ptes);
fail_bitmap:
kfree(pt);
@@ -406,18 +444,38 @@ fail_bitmap:
return ERR_PTR(ret);
}
-static void unmap_and_free_pd(struct i915_page_directory *pd,
- struct drm_device *dev)
+static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
{
- if (pd->page) {
- i915_dma_unmap_single(pd, dev);
- __free_page(pd->page);
- kfree(pd->used_pdes);
- kfree(pd);
- }
+ cleanup_px(dev, pt);
+ kfree(pt->used_ptes);
+ kfree(pt);
+}
+
+static void gen8_initialize_pt(struct i915_address_space *vm,
+ struct i915_page_table *pt)
+{
+ gen8_pte_t scratch_pte;
+
+ scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+ I915_CACHE_LLC, true);
+
+ fill_px(vm->dev, pt, scratch_pte);
+}
+
+static void gen6_initialize_pt(struct i915_address_space *vm,
+ struct i915_page_table *pt)
+{
+ gen6_pte_t scratch_pte;
+
+ WARN_ON(px_dma(vm->scratch_page) == 0);
+
+ scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+ I915_CACHE_LLC, true, 0);
+
+ fill32_px(vm->dev, pt, scratch_pte);
}
-static struct i915_page_directory *alloc_pd_single(struct drm_device *dev)
+static struct i915_page_directory *alloc_pd(struct drm_device *dev)
{
struct i915_page_directory *pd;
int ret = -ENOMEM;
@@ -429,38 +487,52 @@ static struct i915_page_directory *alloc_pd_single(struct drm_device *dev)
pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
sizeof(*pd->used_pdes), GFP_KERNEL);
if (!pd->used_pdes)
- goto free_pd;
-
- pd->page = alloc_page(GFP_KERNEL);
- if (!pd->page)
- goto free_bitmap;
+ goto fail_bitmap;
- ret = i915_dma_map_single(pd, dev);
+ ret = setup_px(dev, pd);
if (ret)
- goto free_page;
+ goto fail_page_m;
return pd;
-free_page:
- __free_page(pd->page);
-free_bitmap:
+fail_page_m:
kfree(pd->used_pdes);
-free_pd:
+fail_bitmap:
kfree(pd);
return ERR_PTR(ret);
}
+static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
+{
+ if (px_page(pd)) {
+ cleanup_px(dev, pd);
+ kfree(pd->used_pdes);
+ kfree(pd);
+ }
+}
+
+static void gen8_initialize_pd(struct i915_address_space *vm,
+ struct i915_page_directory *pd)
+{
+ gen8_pde_t scratch_pde;
+
+ scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
+
+ fill_px(vm->dev, pd, scratch_pde);
+}
+
/* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct intel_engine_cs *ring,
+static int gen8_write_pdp(struct drm_i915_gem_request *req,
unsigned entry,
dma_addr_t addr)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
BUG_ON(entry >= 4);
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(req, 6);
if (ret)
return ret;
@@ -476,16 +548,14 @@ static int gen8_write_pdp(struct intel_engine_cs *ring,
}
static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_engine_cs *ring)
+ struct drm_i915_gem_request *req)
{
int i, ret;
for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
- struct i915_page_directory *pd = ppgtt->pdp.page_directory[i];
- dma_addr_t pd_daddr = pd ? pd->daddr : ppgtt->scratch_pd->daddr;
- /* The page directory might be NULL, but we need to clear out
- * whatever the previous context might have used. */
- ret = gen8_write_pdp(ring, i, pd_daddr);
+ const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+ ret = gen8_write_pdp(req, i, pd_daddr);
if (ret)
return ret;
}
@@ -507,13 +577,12 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
unsigned num_entries = length >> PAGE_SHIFT;
unsigned last_pte, i;
- scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
+ scratch_pte = gen8_pte_encode(px_dma(ppgtt->base.scratch_page),
I915_CACHE_LLC, use_scratch);
while (num_entries) {
struct i915_page_directory *pd;
struct i915_page_table *pt;
- struct page *page_table;
if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
break;
@@ -525,25 +594,21 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
pt = pd->page_table[pde];
- if (WARN_ON(!pt->page))
+ if (WARN_ON(!px_page(pt)))
break;
- page_table = pt->page;
-
last_pte = pte + num_entries;
if (last_pte > GEN8_PTES)
last_pte = GEN8_PTES;
- pt_vaddr = kmap_atomic(page_table);
+ pt_vaddr = kmap_px(pt);
for (i = pte; i < last_pte; i++) {
pt_vaddr[i] = scratch_pte;
num_entries--;
}
- if (!HAS_LLC(ppgtt->base.dev))
- drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
- kunmap_atomic(pt_vaddr);
+ kunmap_px(ppgtt, pt);
pte = 0;
if (++pde == I915_PDES) {
@@ -575,18 +640,14 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
if (pt_vaddr == NULL) {
struct i915_page_directory *pd = ppgtt->pdp.page_directory[pdpe];
struct i915_page_table *pt = pd->page_table[pde];
- struct page *page_table = pt->page;
-
- pt_vaddr = kmap_atomic(page_table);
+ pt_vaddr = kmap_px(pt);
}
pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true);
if (++pte == GEN8_PTES) {
- if (!HAS_LLC(ppgtt->base.dev))
- drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
- kunmap_atomic(pt_vaddr);
+ kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL;
if (++pde == I915_PDES) {
pdpe++;
@@ -595,58 +656,64 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
pte = 0;
}
}
- if (pt_vaddr) {
- if (!HAS_LLC(ppgtt->base.dev))
- drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
- kunmap_atomic(pt_vaddr);
- }
-}
-
-static void __gen8_do_map_pt(gen8_pde_t * const pde,
- struct i915_page_table *pt,
- struct drm_device *dev)
-{
- gen8_pde_t entry =
- gen8_pde_encode(dev, pt->daddr, I915_CACHE_LLC);
- *pde = entry;
-}
-
-static void gen8_initialize_pd(struct i915_address_space *vm,
- struct i915_page_directory *pd)
-{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
- gen8_pde_t *page_directory;
- struct i915_page_table *pt;
- int i;
-
- page_directory = kmap_atomic(pd->page);
- pt = ppgtt->scratch_pt;
- for (i = 0; i < I915_PDES; i++)
- /* Map the PDE to the page table */
- __gen8_do_map_pt(page_directory + i, pt, vm->dev);
- if (!HAS_LLC(vm->dev))
- drm_clflush_virt_range(page_directory, PAGE_SIZE);
- kunmap_atomic(page_directory);
+ if (pt_vaddr)
+ kunmap_px(ppgtt, pt_vaddr);
}
-static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_device *dev)
+static void gen8_free_page_tables(struct drm_device *dev,
+ struct i915_page_directory *pd)
{
int i;
- if (!pd->page)
+ if (!px_page(pd))
return;
for_each_set_bit(i, pd->used_pdes, I915_PDES) {
if (WARN_ON(!pd->page_table[i]))
continue;
- unmap_and_free_pt(pd->page_table[i], dev);
+ free_pt(dev, pd->page_table[i]);
pd->page_table[i] = NULL;
}
}
+static int gen8_init_scratch(struct i915_address_space *vm)
+{
+ struct drm_device *dev = vm->dev;
+
+ vm->scratch_page = alloc_scratch_page(dev);
+ if (IS_ERR(vm->scratch_page))
+ return PTR_ERR(vm->scratch_page);
+
+ vm->scratch_pt = alloc_pt(dev);
+ if (IS_ERR(vm->scratch_pt)) {
+ free_scratch_page(dev, vm->scratch_page);
+ return PTR_ERR(vm->scratch_pt);
+ }
+
+ vm->scratch_pd = alloc_pd(dev);
+ if (IS_ERR(vm->scratch_pd)) {
+ free_pt(dev, vm->scratch_pt);
+ free_scratch_page(dev, vm->scratch_page);
+ return PTR_ERR(vm->scratch_pd);
+ }
+
+ gen8_initialize_pt(vm, vm->scratch_pt);
+ gen8_initialize_pd(vm, vm->scratch_pd);
+
+ return 0;
+}
+
+static void gen8_free_scratch(struct i915_address_space *vm)
+{
+ struct drm_device *dev = vm->dev;
+
+ free_pd(dev, vm->scratch_pd);
+ free_pt(dev, vm->scratch_pt);
+ free_scratch_page(dev, vm->scratch_page);
+}
+
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt =
@@ -657,12 +724,12 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
if (WARN_ON(!ppgtt->pdp.page_directory[i]))
continue;
- gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
- unmap_and_free_pd(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
+ gen8_free_page_tables(ppgtt->base.dev,
+ ppgtt->pdp.page_directory[i]);
+ free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]);
}
- unmap_and_free_pd(ppgtt->scratch_pd, ppgtt->base.dev);
- unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
+ gen8_free_scratch(vm);
}
/**
@@ -698,24 +765,24 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
/* Don't reallocate page tables */
if (pt) {
/* Scratch is never allocated this way */
- WARN_ON(pt == ppgtt->scratch_pt);
+ WARN_ON(pt == ppgtt->base.scratch_pt);
continue;
}
- pt = alloc_pt_single(dev);
+ pt = alloc_pt(dev);
if (IS_ERR(pt))
goto unwind_out;
gen8_initialize_pt(&ppgtt->base, pt);
pd->page_table[pde] = pt;
- set_bit(pde, new_pts);
+ __set_bit(pde, new_pts);
}
return 0;
unwind_out:
for_each_set_bit(pde, new_pts, I915_PDES)
- unmap_and_free_pt(pd->page_table[pde], dev);
+ free_pt(dev, pd->page_table[pde]);
return -ENOMEM;
}
@@ -756,27 +823,24 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt,
WARN_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES));
- /* FIXME: upper bound must not overflow 32 bits */
- WARN_ON((start + length) > (1ULL << 32));
-
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
if (pd)
continue;
- pd = alloc_pd_single(dev);
+ pd = alloc_pd(dev);
if (IS_ERR(pd))
goto unwind_out;
gen8_initialize_pd(&ppgtt->base, pd);
pdp->page_directory[pdpe] = pd;
- set_bit(pdpe, new_pds);
+ __set_bit(pdpe, new_pds);
}
return 0;
unwind_out:
for_each_set_bit(pdpe, new_pds, GEN8_LEGACY_PDPES)
- unmap_and_free_pd(pdp->page_directory[pdpe], dev);
+ free_pd(dev, pdp->page_directory[pdpe]);
return -ENOMEM;
}
@@ -830,6 +894,16 @@ err_out:
return -ENOMEM;
}
+/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
+ * the page table structures, we mark them dirty so that
+ * context switching/execlist queuing code takes extra steps
+ * to ensure that tlbs are flushed.
+ */
+static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
+{
+ ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
+}
+
static int gen8_alloc_va_range(struct i915_address_space *vm,
uint64_t start,
uint64_t length)
@@ -848,7 +922,10 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
* actually use the other side of the canonical address space.
*/
if (WARN_ON(start + length < start))
- return -ERANGE;
+ return -ENODEV;
+
+ if (WARN_ON(start + length > ppgtt->base.total))
+ return -ENODEV;
ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables);
if (ret)
@@ -876,7 +953,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
/* Allocations have completed successfully, so set the bitmaps, and do
* the mappings. */
gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
- gen8_pde_t *const page_directory = kmap_atomic(pd->page);
+ gen8_pde_t *const page_directory = kmap_px(pd);
struct i915_page_table *pt;
uint64_t pd_len = gen8_clamp_pd(start, length);
uint64_t pd_start = start;
@@ -897,36 +974,36 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
gen8_pte_count(pd_start, pd_len));
/* Our pde is now pointing to the pagetable, pt */
- set_bit(pde, pd->used_pdes);
+ __set_bit(pde, pd->used_pdes);
/* Map the PDE to the page table */
- __gen8_do_map_pt(page_directory + pde, pt, vm->dev);
+ page_directory[pde] = gen8_pde_encode(px_dma(pt),
+ I915_CACHE_LLC);
/* NB: We haven't yet mapped ptes to pages. At this
* point we're still relying on insert_entries() */
}
- if (!HAS_LLC(vm->dev))
- drm_clflush_virt_range(page_directory, PAGE_SIZE);
-
- kunmap_atomic(page_directory);
+ kunmap_px(ppgtt, page_directory);
- set_bit(pdpe, ppgtt->pdp.used_pdpes);
+ __set_bit(pdpe, ppgtt->pdp.used_pdpes);
}
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+ mark_tlbs_dirty(ppgtt);
return 0;
err_out:
while (pdpe--) {
for_each_set_bit(temp, new_page_tables[pdpe], I915_PDES)
- unmap_and_free_pt(ppgtt->pdp.page_directory[pdpe]->page_table[temp], vm->dev);
+ free_pt(vm->dev, ppgtt->pdp.page_directory[pdpe]->page_table[temp]);
}
for_each_set_bit(pdpe, new_page_dirs, GEN8_LEGACY_PDPES)
- unmap_and_free_pd(ppgtt->pdp.page_directory[pdpe], vm->dev);
+ free_pd(vm->dev, ppgtt->pdp.page_directory[pdpe]);
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+ mark_tlbs_dirty(ppgtt);
return ret;
}
@@ -939,16 +1016,11 @@ err_out:
*/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
- ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev);
- if (IS_ERR(ppgtt->scratch_pt))
- return PTR_ERR(ppgtt->scratch_pt);
-
- ppgtt->scratch_pd = alloc_pd_single(ppgtt->base.dev);
- if (IS_ERR(ppgtt->scratch_pd))
- return PTR_ERR(ppgtt->scratch_pd);
+ int ret;
- gen8_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
- gen8_initialize_pd(&ppgtt->base, ppgtt->scratch_pd);
+ ret = gen8_init_scratch(&ppgtt->base);
+ if (ret)
+ return ret;
ppgtt->base.start = 0;
ppgtt->base.total = 1ULL << 32;
@@ -980,12 +1052,13 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint32_t pte, pde, temp;
uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
+ scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+ I915_CACHE_LLC, true, 0);
gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
u32 expected;
gen6_pte_t *pt_vaddr;
- dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr;
+ const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
pd_entry = readl(ppgtt->pd_addr + pde);
expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
@@ -996,7 +1069,8 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
expected);
seq_printf(m, "\tPDE: %x\n", pd_entry);
- pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page);
+ pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
+
for (pte = 0; pte < GEN6_PTES; pte+=4) {
unsigned long va =
(pde * PAGE_SIZE * GEN6_PTES) +
@@ -1018,7 +1092,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
}
seq_puts(m, "\n");
}
- kunmap_atomic(pt_vaddr);
+ kunmap_px(ppgtt, pt_vaddr);
}
}
@@ -1031,7 +1105,7 @@ static void gen6_write_pde(struct i915_page_directory *pd,
container_of(pd, struct i915_hw_ppgtt, pd);
u32 pd_entry;
- pd_entry = GEN6_PDE_ADDR_ENCODE(pt->daddr);
+ pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt));
pd_entry |= GEN6_PDE_VALID;
writel(pd_entry, ppgtt->pd_addr + pde);
@@ -1056,22 +1130,23 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
{
- BUG_ON(ppgtt->pd.pd_offset & 0x3f);
+ BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
- return (ppgtt->pd.pd_offset / 64) << 16;
+ return (ppgtt->pd.base.ggtt_offset / 64) << 16;
}
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_engine_cs *ring)
+ struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
- ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(req, 6);
if (ret)
return ret;
@@ -1087,8 +1162,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
}
static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_engine_cs *ring)
+ struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
@@ -1097,16 +1173,17 @@ static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
}
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_engine_cs *ring)
+ struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
- ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(req, 6);
if (ret)
return ret;
@@ -1120,7 +1197,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
/* XXX: RCS is the only one to auto invalidate the TLBs? */
if (ring->id != RCS) {
- ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
}
@@ -1129,8 +1206,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
}
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_engine_cs *ring)
+ struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1214,19 +1292,20 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_pte = first_entry % GEN6_PTES;
unsigned last_pte, i;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
+ scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+ I915_CACHE_LLC, true, 0);
while (num_entries) {
last_pte = first_pte + num_entries;
if (last_pte > GEN6_PTES)
last_pte = GEN6_PTES;
- pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
+ pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte;
- kunmap_atomic(pt_vaddr);
+ kunmap_px(ppgtt, pt_vaddr);
num_entries -= last_pte - first_pte;
first_pte = 0;
@@ -1250,54 +1329,25 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
if (pt_vaddr == NULL)
- pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
+ pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
pt_vaddr[act_pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true, flags);
if (++act_pte == GEN6_PTES) {
- kunmap_atomic(pt_vaddr);
+ kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL;
act_pt++;
act_pte = 0;
}
}
if (pt_vaddr)
- kunmap_atomic(pt_vaddr);
-}
-
-/* PDE TLBs are a pain invalidate pre GEN8. It requires a context reload. If we
- * are switching between contexts with the same LRCA, we also must do a force
- * restore.
- */
-static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
-{
- /* If current vm != vm, */
- ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
-}
-
-static void gen6_initialize_pt(struct i915_address_space *vm,
- struct i915_page_table *pt)
-{
- gen6_pte_t *pt_vaddr, scratch_pte;
- int i;
-
- WARN_ON(vm->scratch.addr == 0);
-
- scratch_pte = vm->pte_encode(vm->scratch.addr,
- I915_CACHE_LLC, true, 0);
-
- pt_vaddr = kmap_atomic(pt->page);
-
- for (i = 0; i < GEN6_PTES; i++)
- pt_vaddr[i] = scratch_pte;
-
- kunmap_atomic(pt_vaddr);
+ kunmap_px(ppgtt, pt_vaddr);
}
static int gen6_alloc_va_range(struct i915_address_space *vm,
- uint64_t start, uint64_t length)
+ uint64_t start_in, uint64_t length_in)
{
DECLARE_BITMAP(new_page_tables, I915_PDES);
struct drm_device *dev = vm->dev;
@@ -1305,11 +1355,15 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
struct i915_page_table *pt;
- const uint32_t start_save = start, length_save = length;
+ uint32_t start, length, start_save, length_save;
uint32_t pde, temp;
int ret;
- WARN_ON(upper_32_bits(start));
+ if (WARN_ON(start_in + length_in > ppgtt->base.total))
+ return -ENODEV;
+
+ start = start_save = start_in;
+ length = length_save = length_in;
bitmap_zero(new_page_tables, I915_PDES);
@@ -1319,7 +1373,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
* tables.
*/
gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
- if (pt != ppgtt->scratch_pt) {
+ if (pt != vm->scratch_pt) {
WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
continue;
}
@@ -1327,7 +1381,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
/* We've already allocated a page table */
WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
- pt = alloc_pt_single(dev);
+ pt = alloc_pt(dev);
if (IS_ERR(pt)) {
ret = PTR_ERR(pt);
goto unwind_out;
@@ -1336,7 +1390,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
gen6_initialize_pt(vm, pt);
ppgtt->pd.page_table[pde] = pt;
- set_bit(pde, new_page_tables);
+ __set_bit(pde, new_page_tables);
trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
}
@@ -1350,7 +1404,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
bitmap_set(tmp_bitmap, gen6_pte_index(start),
gen6_pte_count(start, length));
- if (test_and_clear_bit(pde, new_page_tables))
+ if (__test_and_clear_bit(pde, new_page_tables))
gen6_write_pde(&ppgtt->pd, pde, pt);
trace_i915_page_table_entry_map(vm, pde, pt,
@@ -1374,14 +1428,41 @@ unwind_out:
for_each_set_bit(pde, new_page_tables, I915_PDES) {
struct i915_page_table *pt = ppgtt->pd.page_table[pde];
- ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
- unmap_and_free_pt(pt, vm->dev);
+ ppgtt->pd.page_table[pde] = vm->scratch_pt;
+ free_pt(vm->dev, pt);
}
mark_tlbs_dirty(ppgtt);
return ret;
}
+static int gen6_init_scratch(struct i915_address_space *vm)
+{
+ struct drm_device *dev = vm->dev;
+
+ vm->scratch_page = alloc_scratch_page(dev);
+ if (IS_ERR(vm->scratch_page))
+ return PTR_ERR(vm->scratch_page);
+
+ vm->scratch_pt = alloc_pt(dev);
+ if (IS_ERR(vm->scratch_pt)) {
+ free_scratch_page(dev, vm->scratch_page);
+ return PTR_ERR(vm->scratch_pt);
+ }
+
+ gen6_initialize_pt(vm, vm->scratch_pt);
+
+ return 0;
+}
+
+static void gen6_free_scratch(struct i915_address_space *vm)
+{
+ struct drm_device *dev = vm->dev;
+
+ free_pt(dev, vm->scratch_pt);
+ free_scratch_page(dev, vm->scratch_page);
+}
+
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt =
@@ -1389,20 +1470,19 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
struct i915_page_table *pt;
uint32_t pde;
-
drm_mm_remove_node(&ppgtt->node);
gen6_for_all_pdes(pt, ppgtt, pde) {
- if (pt != ppgtt->scratch_pt)
- unmap_and_free_pt(pt, ppgtt->base.dev);
+ if (pt != vm->scratch_pt)
+ free_pt(ppgtt->base.dev, pt);
}
- unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
- unmap_and_free_pd(&ppgtt->pd, ppgtt->base.dev);
+ gen6_free_scratch(vm);
}
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
{
+ struct i915_address_space *vm = &ppgtt->base;
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool retried = false;
@@ -1413,11 +1493,10 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
- ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev);
- if (IS_ERR(ppgtt->scratch_pt))
- return PTR_ERR(ppgtt->scratch_pt);
- gen6_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
+ ret = gen6_init_scratch(vm);
+ if (ret)
+ return ret;
alloc:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
@@ -1448,7 +1527,7 @@ alloc:
return 0;
err_out:
- unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
+ gen6_free_scratch(vm);
return ret;
}
@@ -1464,7 +1543,7 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
uint32_t pde, temp;
gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
- ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
+ ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
}
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
@@ -1500,11 +1579,11 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
ppgtt->debug_dump = gen6_dump_ppgtt;
- ppgtt->pd.pd_offset =
+ ppgtt->pd.base.ggtt_offset =
ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
- ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
+ ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
@@ -1515,23 +1594,21 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->node.start / PAGE_SIZE);
DRM_DEBUG("Adding PPGTT at offset %x\n",
- ppgtt->pd.pd_offset << 10);
+ ppgtt->pd.base.ggtt_offset << 10);
return 0;
}
static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
ppgtt->base.dev = dev;
- ppgtt->base.scratch = dev_priv->gtt.base.scratch;
if (INTEL_INFO(dev)->gen < 8)
return gen6_ppgtt_init(ppgtt);
else
return gen8_ppgtt_init(ppgtt);
}
+
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1550,11 +1627,6 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
int i915_ppgtt_init_hw(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- int i, ret = 0;
-
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
* need to do anything here. */
@@ -1573,16 +1645,23 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
else
MISSING_CASE(INTEL_INFO(dev)->gen);
- if (ppgtt) {
- for_each_ring(ring, dev_priv, i) {
- ret = ppgtt->switch_mm(ppgtt, ring);
- if (ret != 0)
- return ret;
- }
- }
+ return 0;
+}
- return ret;
+int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
+{
+ struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+ if (i915.enable_execlists)
+ return 0;
+
+ if (!ppgtt)
+ return 0;
+
+ return ppgtt->switch_mm(ppgtt, req);
}
+
struct i915_hw_ppgtt *
i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
{
@@ -1723,9 +1802,6 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
- if (obj->has_dma_mapping)
- return 0;
-
if (!dma_map_sg(&obj->base.dev->pdev->dev,
obj->pages->sgl, obj->pages->nents,
PCI_DMA_BIDIRECTIONAL))
@@ -1846,7 +1922,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = gen8_pte_encode(vm->scratch.addr,
+ scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC,
use_scratch);
for (i = 0; i < num_entries; i++)
@@ -1872,7 +1948,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0);
+ scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+ I915_CACHE_LLC, use_scratch, 0);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
@@ -1926,6 +2003,17 @@ static int ggtt_bind_vma(struct i915_vma *vma,
vma->vm->insert_entries(vma->vm, pages,
vma->node.start,
cache_level, pte_flags);
+
+ /* Note the inconsistency here is due to absence of the
+ * aliasing ppgtt on gen4 and earlier. Though we always
+ * request PIN_USER for execbuffer (translated to LOCAL_BIND),
+ * without the appgtt, we cannot honour that request and so
+ * must substitute it with a global binding. Since we do this
+ * behind the upper layers back, we need to explicitly set
+ * the bound flag ourselves.
+ */
+ vma->bound |= GLOBAL_BIND;
+
}
if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) {
@@ -1972,10 +2060,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
interruptible = do_idling(dev_priv);
- if (!obj->has_dma_mapping)
- dma_unmap_sg(&dev->pdev->dev,
- obj->pages->sgl, obj->pages->nents,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
+ PCI_DMA_BIDIRECTIONAL);
undo_idling(dev_priv, interruptible);
}
@@ -2099,7 +2185,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
void i915_gem_init_global_gtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long gtt_size, mappable_size;
+ u64 gtt_size, mappable_size;
gtt_size = dev_priv->gtt.base.total;
mappable_size = dev_priv->gtt.mappable_end;
@@ -2129,42 +2215,6 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
vm->cleanup(vm);
}
-static int setup_scratch_page(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct page *page;
- dma_addr_t dma_addr;
-
- page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
- if (page == NULL)
- return -ENOMEM;
- set_pages_uc(page, 1);
-
-#ifdef CONFIG_INTEL_IOMMU
- dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, dma_addr))
- return -EINVAL;
-#else
- dma_addr = page_to_phys(page);
-#endif
- dev_priv->gtt.base.scratch.page = page;
- dev_priv->gtt.base.scratch.addr = dma_addr;
-
- return 0;
-}
-
-static void teardown_scratch_page(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct page *page = dev_priv->gtt.base.scratch.page;
-
- set_pages_wb(page, 1);
- pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- __free_page(page);
-}
-
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
@@ -2247,8 +2297,8 @@ static int ggtt_probe_common(struct drm_device *dev,
size_t gtt_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_page_scratch *scratch_page;
phys_addr_t gtt_phys_addr;
- int ret;
/* For Modern GENs the PTEs and register space are split in the BAR */
gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
@@ -2270,14 +2320,17 @@ static int ggtt_probe_common(struct drm_device *dev,
return -ENOMEM;
}
- ret = setup_scratch_page(dev);
- if (ret) {
+ scratch_page = alloc_scratch_page(dev);
+ if (IS_ERR(scratch_page)) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
iounmap(dev_priv->gtt.gsm);
+ return PTR_ERR(scratch_page);
}
- return ret;
+ dev_priv->gtt.base.scratch_page = scratch_page;
+
+ return 0;
}
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
@@ -2354,13 +2407,13 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
}
static int gen8_gmch_probe(struct drm_device *dev,
- size_t *gtt_total,
+ u64 *gtt_total,
size_t *stolen,
phys_addr_t *mappable_base,
- unsigned long *mappable_end)
+ u64 *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned int gtt_size;
+ u64 gtt_size;
u16 snb_gmch_ctl;
int ret;
@@ -2402,10 +2455,10 @@ static int gen8_gmch_probe(struct drm_device *dev,
}
static int gen6_gmch_probe(struct drm_device *dev,
- size_t *gtt_total,
+ u64 *gtt_total,
size_t *stolen,
phys_addr_t *mappable_base,
- unsigned long *mappable_end)
+ u64 *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int gtt_size;
@@ -2419,7 +2472,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
* a coarse sanity check.
*/
if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
- DRM_ERROR("Unknown GMADR size (%lx)\n",
+ DRM_ERROR("Unknown GMADR size (%llx)\n",
dev_priv->gtt.mappable_end);
return -ENXIO;
}
@@ -2449,14 +2502,14 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
iounmap(gtt->gsm);
- teardown_scratch_page(vm->dev);
+ free_scratch_page(vm->dev, vm->scratch_page);
}
static int i915_gmch_probe(struct drm_device *dev,
- size_t *gtt_total,
+ u64 *gtt_total,
size_t *stolen,
phys_addr_t *mappable_base,
- unsigned long *mappable_end)
+ u64 *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -2513,17 +2566,17 @@ int i915_gem_gtt_init(struct drm_device *dev)
dev_priv->gtt.base.cleanup = gen6_gmch_remove;
}
+ gtt->base.dev = dev;
+
ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
&gtt->mappable_base, &gtt->mappable_end);
if (ret)
return ret;
- gtt->base.dev = dev;
-
/* GMADR is the PCI mmio aperture into the global GTT. */
- DRM_INFO("Memory usable by graphics device = %zdM\n",
+ DRM_INFO("Memory usable by graphics device = %lluM\n",
gtt->base.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped)
@@ -2546,6 +2599,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
+ struct i915_vma *vma;
+ bool flush;
i915_check_and_clear_faults(dev);
@@ -2555,16 +2610,23 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
dev_priv->gtt.base.total,
true);
+ /* Cache flush objects bound into GGTT and rebind them. */
+ vm = &dev_priv->gtt.base;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- struct i915_vma *vma = i915_gem_obj_to_vma(obj,
- &dev_priv->gtt.base);
- if (!vma)
- continue;
+ flush = false;
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (vma->vm != vm)
+ continue;
- i915_gem_clflush_object(obj, obj->pin_display);
- WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE));
- }
+ WARN_ON(i915_vma_bind(vma, obj->cache_level,
+ PIN_UPDATE));
+
+ flush = true;
+ }
+ if (flush)
+ i915_gem_clflush_object(obj, obj->pin_display);
+ }
if (INTEL_INFO(dev)->gen >= 8) {
if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
@@ -2691,30 +2753,17 @@ static struct sg_table *
intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
- unsigned long size, pages, rot_pages;
+ unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
struct sg_page_iter sg_iter;
unsigned long i;
dma_addr_t *page_addr_list;
struct sg_table *st;
- unsigned int tile_pitch, tile_height;
- unsigned int width_pages, height_pages;
int ret = -ENOMEM;
- pages = obj->base.size / PAGE_SIZE;
-
- /* Calculate tiling geometry. */
- tile_height = intel_tile_height(dev, rot_info->pixel_format,
- rot_info->fb_modifier);
- tile_pitch = PAGE_SIZE / tile_height;
- width_pages = DIV_ROUND_UP(rot_info->pitch, tile_pitch);
- height_pages = DIV_ROUND_UP(rot_info->height, tile_height);
- rot_pages = width_pages * height_pages;
- size = rot_pages * PAGE_SIZE;
-
/* Allocate a temporary list of source pages for random access. */
- page_addr_list = drm_malloc_ab(pages, sizeof(dma_addr_t));
+ page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE,
+ sizeof(dma_addr_t));
if (!page_addr_list)
return ERR_PTR(ret);
@@ -2723,7 +2772,7 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
if (!st)
goto err_st_alloc;
- ret = sg_alloc_table(st, rot_pages, GFP_KERNEL);
+ ret = sg_alloc_table(st, size_pages, GFP_KERNEL);
if (ret)
goto err_sg_alloc;
@@ -2735,13 +2784,15 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
}
/* Rotate the pages. */
- rotate_pages(page_addr_list, width_pages, height_pages, st);
+ rotate_pages(page_addr_list,
+ rot_info->width_pages, rot_info->height_pages,
+ st);
DRM_DEBUG_KMS(
- "Created rotated page mapping for object size %lu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages).\n",
- size, rot_info->pitch, rot_info->height,
- rot_info->pixel_format, width_pages, height_pages,
- rot_pages);
+ "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages).\n",
+ obj->base.size, rot_info->pitch, rot_info->height,
+ rot_info->pixel_format, rot_info->width_pages,
+ rot_info->height_pages, size_pages);
drm_free_large(page_addr_list);
@@ -2753,10 +2804,10 @@ err_st_alloc:
drm_free_large(page_addr_list);
DRM_DEBUG_KMS(
- "Failed to create rotated mapping for object size %lu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages)\n",
- size, ret, rot_info->pitch, rot_info->height,
- rot_info->pixel_format, width_pages, height_pages,
- rot_pages);
+ "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages)\n",
+ obj->base.size, ret, rot_info->pitch, rot_info->height,
+ rot_info->pixel_format, rot_info->width_pages,
+ rot_info->height_pages, size_pages);
return ERR_PTR(ret);
}
@@ -2874,9 +2925,12 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
vma->node.size,
VM_TO_TRACE_NAME(vma->vm));
+ /* XXX: i915_vma_pin() will fix this +- hack */
+ vma->pin_count++;
ret = vma->vm->allocate_va_range(vma->vm,
vma->node.start,
vma->node.size);
+ vma->pin_count--;
if (ret)
return ret;
}
@@ -2901,9 +2955,10 @@ size_t
i915_ggtt_view_size(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
- if (view->type == I915_GGTT_VIEW_NORMAL ||
- view->type == I915_GGTT_VIEW_ROTATED) {
+ if (view->type == I915_GGTT_VIEW_NORMAL) {
return obj->base.size;
+ } else if (view->type == I915_GGTT_VIEW_ROTATED) {
+ return view->rotation_info.size;
} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
return view->params.partial.size << PAGE_SHIFT;
} else {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 0d46dd20bf71..e1cfa292f9ad 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -126,6 +126,8 @@ struct intel_rotation_info {
unsigned int pitch;
uint32_t pixel_format;
uint64_t fb_modifier;
+ unsigned int width_pages, height_pages;
+ uint64_t size;
};
struct i915_ggtt_view {
@@ -205,19 +207,34 @@ struct i915_vma {
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
};
-struct i915_page_table {
+struct i915_page_dma {
struct page *page;
- dma_addr_t daddr;
+ union {
+ dma_addr_t daddr;
+
+ /* For gen6/gen7 only. This is the offset in the GGTT
+ * where the page directory entries for PPGTT begin
+ */
+ uint32_t ggtt_offset;
+ };
+};
+
+#define px_base(px) (&(px)->base)
+#define px_page(px) (px_base(px)->page)
+#define px_dma(px) (px_base(px)->daddr)
+
+struct i915_page_scratch {
+ struct i915_page_dma base;
+};
+
+struct i915_page_table {
+ struct i915_page_dma base;
unsigned long *used_ptes;
};
struct i915_page_directory {
- struct page *page; /* NULL for GEN6-GEN7 */
- union {
- uint32_t pd_offset;
- dma_addr_t daddr;
- };
+ struct i915_page_dma base;
unsigned long *used_pdes;
struct i915_page_table *page_table[I915_PDES]; /* PDEs */
@@ -233,13 +250,12 @@ struct i915_address_space {
struct drm_mm mm;
struct drm_device *dev;
struct list_head global_link;
- unsigned long start; /* Start offset always 0 for dri2 */
- size_t total; /* size addr space maps (ex. 2GB for ggtt) */
+ u64 start; /* Start offset always 0 for dri2 */
+ u64 total; /* size addr space maps (ex. 2GB for ggtt) */
- struct {
- dma_addr_t addr;
- struct page *page;
- } scratch;
+ struct i915_page_scratch *scratch_page;
+ struct i915_page_table *scratch_pt;
+ struct i915_page_directory *scratch_pd;
/**
* List of objects currently involved in rendering.
@@ -300,9 +316,9 @@ struct i915_address_space {
*/
struct i915_gtt {
struct i915_address_space base;
- size_t stolen_size; /* Total size of stolen memory */
- unsigned long mappable_end; /* End offset that we can CPU map */
+ size_t stolen_size; /* Total size of stolen memory */
+ u64 mappable_end; /* End offset that we can CPU map */
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */
@@ -314,9 +330,9 @@ struct i915_gtt {
int mtrr;
/* global gtt ops */
- int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
+ int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
size_t *stolen, phys_addr_t *mappable_base,
- unsigned long *mappable_end);
+ u64 *mappable_end);
};
struct i915_hw_ppgtt {
@@ -329,16 +345,13 @@ struct i915_hw_ppgtt {
struct i915_page_directory pd;
};
- struct i915_page_table *scratch_pt;
- struct i915_page_directory *scratch_pd;
-
struct drm_i915_file_private *file_priv;
gen6_pte_t __iomem *pd_addr;
int (*enable)(struct i915_hw_ppgtt *ppgtt);
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
- struct intel_engine_cs *ring);
+ struct drm_i915_gem_request *req);
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
@@ -468,6 +481,14 @@ static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
return i915_pte_count(address, length, GEN8_PDE_SHIFT);
}
+static inline dma_addr_t
+i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
+{
+ return test_bit(n, ppgtt->pdp.used_pdpes) ?
+ px_dma(ppgtt->pdp.page_directory[n]) :
+ px_dma(ppgtt->base.scratch_pd);
+}
+
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_global_gtt_cleanup(struct drm_device *dev);
@@ -475,6 +496,7 @@ void i915_global_gtt_cleanup(struct drm_device *dev);
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
int i915_ppgtt_init_hw(struct drm_device *dev);
+int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
struct drm_i915_file_private *fpriv);
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 521548a08578..5026a6267a88 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -73,6 +73,24 @@ free_gem:
return ret;
}
+/*
+ * Macro to add commands to auxiliary batch.
+ * This macro only checks for page overflow before inserting the commands,
+ * this is sufficient as the null state generator makes the final batch
+ * with two passes to build command and state separately. At this point
+ * the size of both are known and it compacts them by relocating the state
+ * right after the commands taking care of aligment so we should sufficient
+ * space below them for adding new commands.
+ */
+#define OUT_BATCH(batch, i, val) \
+ do { \
+ if (WARN_ON((i) >= PAGE_SIZE / sizeof(u32))) { \
+ ret = -ENOSPC; \
+ goto err_out; \
+ } \
+ (batch)[(i)++] = (val); \
+ } while(0)
+
static int render_state_setup(struct render_state *so)
{
const struct intel_renderstate_rodata *rodata = so->rodata;
@@ -96,8 +114,10 @@ static int render_state_setup(struct render_state *so)
s = lower_32_bits(r);
if (so->gen >= 8) {
if (i + 1 >= rodata->batch_items ||
- rodata->batch[i + 1] != 0)
- return -EINVAL;
+ rodata->batch[i + 1] != 0) {
+ ret = -EINVAL;
+ goto err_out;
+ }
d[i++] = s;
s = upper_32_bits(r);
@@ -108,6 +128,21 @@ static int render_state_setup(struct render_state *so)
d[i++] = s;
}
+
+ while (i % CACHELINE_DWORDS)
+ OUT_BATCH(d, i, MI_NOOP);
+
+ so->aux_batch_offset = i * sizeof(u32);
+
+ OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
+ so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
+
+ /*
+ * Since we are sending length, we need to strictly conform to
+ * all requirements. For Gen2 this must be a multiple of 8.
+ */
+ so->aux_batch_size = ALIGN(so->aux_batch_size, 8);
+
kunmap(page);
ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
@@ -120,8 +155,14 @@ static int render_state_setup(struct render_state *so)
}
return 0;
+
+err_out:
+ kunmap(page);
+ return ret;
}
+#undef OUT_BATCH
+
void i915_gem_render_state_fini(struct render_state *so)
{
i915_gem_object_ggtt_unpin(so->obj);
@@ -152,29 +193,36 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
return 0;
}
-int i915_gem_render_state_init(struct intel_engine_cs *ring)
+int i915_gem_render_state_init(struct drm_i915_gem_request *req)
{
struct render_state so;
int ret;
- ret = i915_gem_render_state_prepare(ring, &so);
+ ret = i915_gem_render_state_prepare(req->ring, &so);
if (ret)
return ret;
if (so.rodata == NULL)
return 0;
- ret = ring->dispatch_execbuffer(ring,
- so.ggtt_offset,
- so.rodata->batch_items * 4,
- I915_DISPATCH_SECURE);
+ ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset,
+ so.rodata->batch_items * 4,
+ I915_DISPATCH_SECURE);
if (ret)
goto out;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
+ if (so.aux_batch_size > 8) {
+ ret = req->ring->dispatch_execbuffer(req,
+ (so.ggtt_offset +
+ so.aux_batch_offset),
+ so.aux_batch_size,
+ I915_DISPATCH_SECURE);
+ if (ret)
+ goto out;
+ }
+
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
- ret = __i915_add_request(ring, NULL, so.obj);
- /* __i915_add_request moves object to inactive if it fails */
out:
i915_gem_render_state_fini(&so);
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index c44961ed3fad..e641bb093a90 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -37,9 +37,11 @@ struct render_state {
struct drm_i915_gem_object *obj;
u64 ggtt_offset;
int gen;
+ u32 aux_batch_size;
+ u32 aux_batch_offset;
};
-int i915_gem_render_state_init(struct intel_engine_cs *ring);
+int i915_gem_render_state_init(struct drm_i915_gem_request *req);
void i915_gem_render_state_fini(struct render_state *so);
int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
struct render_state *so);
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 348ed5abcdbf..f361c4a56995 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -42,6 +42,31 @@
* for is a boon.
*/
+int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *node, u64 size,
+ unsigned alignment)
+{
+ int ret;
+
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ return -ENODEV;
+
+ mutex_lock(&dev_priv->mm.stolen_lock);
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, size, alignment,
+ DRM_MM_SEARCH_DEFAULT);
+ mutex_unlock(&dev_priv->mm.stolen_lock);
+
+ return ret;
+}
+
+void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *node)
+{
+ mutex_lock(&dev_priv->mm.stolen_lock);
+ drm_mm_remove_node(node);
+ mutex_unlock(&dev_priv->mm.stolen_lock);
+}
+
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -151,150 +176,115 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
return base;
}
-static int find_compression_threshold(struct drm_device *dev,
- struct drm_mm_node *node,
- int size,
- int fb_cpp)
+void i915_gem_cleanup_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int compression_threshold = 1;
- int ret;
-
- /* HACK: This code depends on what we will do in *_enable_fbc. If that
- * code changes, this code needs to change as well.
- *
- * The enable_fbc code will attempt to use one of our 2 compression
- * thresholds, therefore, in that case, we only have 1 resort.
- */
- /* Try to over-allocate to reduce reallocations and fragmentation. */
- ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
- size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
- if (ret == 0)
- return compression_threshold;
-
-again:
- /* HW's ability to limit the CFB is 1:4 */
- if (compression_threshold > 4 ||
- (fb_cpp == 2 && compression_threshold == 2))
- return 0;
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ return;
- ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
- size >>= 1, 4096,
- DRM_MM_SEARCH_DEFAULT);
- if (ret && INTEL_INFO(dev)->gen <= 4) {
- return 0;
- } else if (ret) {
- compression_threshold <<= 1;
- goto again;
- } else {
- return compression_threshold;
- }
+ drm_mm_takedown(&dev_priv->mm.stolen);
}
-static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
+static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
+ unsigned long *base, unsigned long *size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mm_node *uninitialized_var(compressed_llb);
- int ret;
-
- ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
- size, fb_cpp);
- if (!ret)
- goto err_llb;
- else if (ret > 1) {
- DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
-
- }
-
- dev_priv->fbc.threshold = ret;
-
- if (INTEL_INFO(dev_priv)->gen >= 5)
- I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
- else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
- } else {
- compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
- if (!compressed_llb)
- goto err_fb;
-
- ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
- 4096, 4096, DRM_MM_SEARCH_DEFAULT);
- if (ret)
- goto err_fb;
-
- dev_priv->fbc.compressed_llb = compressed_llb;
-
- I915_WRITE(FBC_CFB_BASE,
- dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
- I915_WRITE(FBC_LL_BASE,
- dev_priv->mm.stolen_base + compressed_llb->start);
+ uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+ *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+ switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
+ case GEN6_STOLEN_RESERVED_1M:
+ *size = 1024 * 1024;
+ break;
+ case GEN6_STOLEN_RESERVED_512K:
+ *size = 512 * 1024;
+ break;
+ case GEN6_STOLEN_RESERVED_256K:
+ *size = 256 * 1024;
+ break;
+ case GEN6_STOLEN_RESERVED_128K:
+ *size = 128 * 1024;
+ break;
+ default:
+ *size = 1024 * 1024;
+ MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
}
-
- dev_priv->fbc.uncompressed_size = size;
-
- DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
- size);
-
- return 0;
-
-err_fb:
- kfree(compressed_llb);
- drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
-err_llb:
- pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
- return -ENOSPC;
}
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
+static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
+ unsigned long *base, unsigned long *size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
- return -ENODEV;
-
- if (size <= dev_priv->fbc.uncompressed_size)
- return 0;
-
- /* Release any current block */
- i915_gem_stolen_cleanup_compression(dev);
-
- return i915_setup_compression(dev, size, fb_cpp);
+ uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+ *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
+
+ switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
+ case GEN7_STOLEN_RESERVED_1M:
+ *size = 1024 * 1024;
+ break;
+ case GEN7_STOLEN_RESERVED_256K:
+ *size = 256 * 1024;
+ break;
+ default:
+ *size = 1024 * 1024;
+ MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
+ }
}
-void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
+static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
+ unsigned long *base, unsigned long *size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->fbc.uncompressed_size == 0)
- return;
-
- drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
-
- if (dev_priv->fbc.compressed_llb) {
- drm_mm_remove_node(dev_priv->fbc.compressed_llb);
- kfree(dev_priv->fbc.compressed_llb);
+ uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+ *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+ switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
+ case GEN8_STOLEN_RESERVED_1M:
+ *size = 1024 * 1024;
+ break;
+ case GEN8_STOLEN_RESERVED_2M:
+ *size = 2 * 1024 * 1024;
+ break;
+ case GEN8_STOLEN_RESERVED_4M:
+ *size = 4 * 1024 * 1024;
+ break;
+ case GEN8_STOLEN_RESERVED_8M:
+ *size = 8 * 1024 * 1024;
+ break;
+ default:
+ *size = 8 * 1024 * 1024;
+ MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
}
-
- dev_priv->fbc.uncompressed_size = 0;
}
-void i915_gem_cleanup_stolen(struct drm_device *dev)
+static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
+ unsigned long *base, unsigned long *size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ unsigned long stolen_top;
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
- return;
+ stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
- i915_gem_stolen_cleanup_compression(dev);
- drm_mm_takedown(&dev_priv->mm.stolen);
+ *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+ /* On these platforms, the register doesn't have a size field, so the
+ * size is the distance between the base and the top of the stolen
+ * memory. We also have the genuine case where base is zero and there's
+ * nothing reserved. */
+ if (*base == 0)
+ *size = 0;
+ else
+ *size = stolen_top - *base;
}
int i915_gem_init_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 tmp;
- int bios_reserved = 0;
+ unsigned long reserved_total, reserved_base, reserved_size;
+ unsigned long stolen_top;
+
+ mutex_init(&dev_priv->mm.stolen_lock);
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
@@ -310,26 +300,61 @@ int i915_gem_init_stolen(struct drm_device *dev)
if (dev_priv->mm.stolen_base == 0)
return 0;
- DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
- dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
-
- if (INTEL_INFO(dev)->gen >= 8) {
- tmp = I915_READ(GEN7_BIOS_RESERVED);
- tmp >>= GEN8_BIOS_RESERVED_SHIFT;
- tmp &= GEN8_BIOS_RESERVED_MASK;
- bios_reserved = (1024*1024) << tmp;
- } else if (IS_GEN7(dev)) {
- tmp = I915_READ(GEN7_BIOS_RESERVED);
- bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
- 256*1024 : 1024*1024;
+ stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
+
+ switch (INTEL_INFO(dev_priv)->gen) {
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ /* Assume the gen6 maximum for the older platforms. */
+ reserved_size = 1024 * 1024;
+ reserved_base = stolen_top - reserved_size;
+ break;
+ case 6:
+ gen6_get_stolen_reserved(dev_priv, &reserved_base,
+ &reserved_size);
+ break;
+ case 7:
+ gen7_get_stolen_reserved(dev_priv, &reserved_base,
+ &reserved_size);
+ break;
+ default:
+ if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+ bdw_get_stolen_reserved(dev_priv, &reserved_base,
+ &reserved_size);
+ else
+ gen8_get_stolen_reserved(dev_priv, &reserved_base,
+ &reserved_size);
+ break;
+ }
+
+ /* It is possible for the reserved base to be zero, but the register
+ * field for size doesn't have a zero option. */
+ if (reserved_base == 0) {
+ reserved_size = 0;
+ reserved_base = stolen_top;
}
- if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
+ if (reserved_base < dev_priv->mm.stolen_base ||
+ reserved_base + reserved_size > stolen_top) {
+ DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
+ reserved_base, reserved_base + reserved_size,
+ dev_priv->mm.stolen_base, stolen_top);
return 0;
+ }
+
+ /* It is possible for the reserved area to end before the end of stolen
+ * memory, so just consider the start. */
+ reserved_total = stolen_top - reserved_base;
+
+ DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
+ dev_priv->gtt.stolen_size >> 10,
+ (dev_priv->gtt.stolen_size - reserved_total) >> 10);
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
- bios_reserved);
+ reserved_total);
return 0;
}
@@ -386,8 +411,10 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
if (obj->stolen) {
- drm_mm_remove_node(obj->stolen);
+ i915_gem_stolen_remove_node(dev_priv, obj->stolen);
kfree(obj->stolen);
obj->stolen = NULL;
}
@@ -416,7 +443,6 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
if (obj->pages == NULL)
goto cleanup;
- obj->has_dma_mapping = true;
i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
@@ -449,8 +475,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
if (!stolen)
return NULL;
- ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
- 4096, DRM_MM_SEARCH_DEFAULT);
+ ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
if (ret) {
kfree(stolen);
return NULL;
@@ -460,7 +485,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
if (obj)
return obj;
- drm_mm_remove_node(stolen);
+ i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
return NULL;
}
@@ -495,7 +520,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
stolen->start = stolen_offset;
stolen->size = size;
+ mutex_lock(&dev_priv->mm.stolen_lock);
ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
+ mutex_unlock(&dev_priv->mm.stolen_lock);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen space\n");
kfree(stolen);
@@ -505,7 +532,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
obj = _i915_gem_object_create_stolen(dev, stolen);
if (obj == NULL) {
DRM_DEBUG_KMS("failed to allocate stolen object\n");
- drm_mm_remove_node(stolen);
+ i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
return NULL;
}
@@ -546,7 +573,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
err_vma:
i915_gem_vma_destroy(vma);
err_out:
- drm_mm_remove_node(stolen);
+ i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
drm_gem_object_unreference(&obj->base);
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 633bd1fcab69..8a6717cc265c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -31,201 +31,32 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-/** @file i915_gem_tiling.c
- *
- * Support for managing tiling state of buffer objects.
- *
- * The idea behind tiling is to increase cache hit rates by rearranging
- * pixel data so that a group of pixel accesses are in the same cacheline.
- * Performance improvement from doing this on the back/depth buffer are on
- * the order of 30%.
- *
- * Intel architectures make this somewhat more complicated, though, by
- * adjustments made to addressing of data when the memory is in interleaved
- * mode (matched pairs of DIMMS) to improve memory bandwidth.
- * For interleaved memory, the CPU sends every sequential 64 bytes
- * to an alternate memory channel so it can get the bandwidth from both.
- *
- * The GPU also rearranges its accesses for increased bandwidth to interleaved
- * memory, and it matches what the CPU does for non-tiled. However, when tiled
- * it does it a little differently, since one walks addresses not just in the
- * X direction but also Y. So, along with alternating channels when bit
- * 6 of the address flips, it also alternates when other bits flip -- Bits 9
- * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
- * are common to both the 915 and 965-class hardware.
- *
- * The CPU also sometimes XORs in higher bits as well, to improve
- * bandwidth doing strided access like we do so frequently in graphics. This
- * is called "Channel XOR Randomization" in the MCH documentation. The result
- * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
- * decode.
+/**
+ * DOC: buffer object tiling
*
- * All of this bit 6 XORing has an effect on our memory management,
- * as we need to make sure that the 3d driver can correctly address object
- * contents.
+ * i915_gem_set_tiling() and i915_gem_get_tiling() is the userspace interface to
+ * declare fence register requirements.
*
- * If we don't have interleaved memory, all tiling is safe and no swizzling is
- * required.
+ * In principle GEM doesn't care at all about the internal data layout of an
+ * object, and hence it also doesn't care about tiling or swizzling. There's two
+ * exceptions:
*
- * When bit 17 is XORed in, we simply refuse to tile at all. Bit
- * 17 is not just a page offset, so as we page an objet out and back in,
- * individual pages in it will have different bit 17 addresses, resulting in
- * each 64 bytes being swapped with its neighbor!
+ * - For X and Y tiling the hardware provides detilers for CPU access, so called
+ * fences. Since there's only a limited amount of them the kernel must manage
+ * these, and therefore userspace must tell the kernel the object tiling if it
+ * wants to use fences for detiling.
+ * - On gen3 and gen4 platforms have a swizzling pattern for tiled objects which
+ * depends upon the physical page frame number. When swapping such objects the
+ * page frame number might change and the kernel must be able to fix this up
+ * and hence now the tiling. Note that on a subset of platforms with
+ * asymmetric memory channel population the swizzling pattern changes in an
+ * unknown way, and for those the kernel simply forbids swapping completely.
*
- * Otherwise, if interleaved, we have to tell the 3d driver what the address
- * swizzling it needs to do is, since it's writing with the CPU to the pages
- * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
- * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
- * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
- * to match what the GPU expects.
- */
-
-/**
- * Detects bit 6 swizzling of address lookup between IGD access and CPU
- * access through main memory.
+ * Since neither of this applies for new tiling layouts on modern platforms like
+ * W, Ys and Yf tiling GEM only allows object tiling to be set to X or Y tiled.
+ * Anything else can be handled in userspace entirely without the kernel's
+ * invovlement.
*/
-void
-i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
- uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
-
- if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
- /*
- * On BDW+, swizzling is not used. We leave the CPU memory
- * controller in charge of optimizing memory accesses without
- * the extra address manipulation GPU side.
- *
- * VLV and CHV don't have GPU swizzling.
- */
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (INTEL_INFO(dev)->gen >= 6) {
- if (dev_priv->preserve_bios_swizzle) {
- if (I915_READ(DISP_ARB_CTL) &
- DISP_TILE_SURFACE_SWIZZLING) {
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else {
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- }
- } else {
- uint32_t dimm_c0, dimm_c1;
- dimm_c0 = I915_READ(MAD_DIMM_C0);
- dimm_c1 = I915_READ(MAD_DIMM_C1);
- dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
- dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
- /* Enable swizzling when the channels are populated
- * with identically sized dimms. We don't need to check
- * the 3rd channel because no cpu with gpu attached
- * ships in that configuration. Also, swizzling only
- * makes sense for 2 channels anyway. */
- if (dimm_c0 == dimm_c1) {
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else {
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- }
- }
- } else if (IS_GEN5(dev)) {
- /* On Ironlake whatever DRAM config, GPU always do
- * same swizzling setup.
- */
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if (IS_GEN2(dev)) {
- /* As far as we know, the 865 doesn't have these bit 6
- * swizzling issues.
- */
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
- uint32_t dcc;
-
- /* On 9xx chipsets, channel interleave by the CPU is
- * determined by DCC. For single-channel, neither the CPU
- * nor the GPU do swizzling. For dual channel interleaved,
- * the GPU's interleave is bit 9 and 10 for X tiled, and bit
- * 9 for Y tiled. The CPU's interleave is independent, and
- * can be based on either bit 11 (haven't seen this yet) or
- * bit 17 (common).
- */
- dcc = I915_READ(DCC);
- switch (dcc & DCC_ADDRESSING_MODE_MASK) {
- case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
- case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- break;
- case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
- if (dcc & DCC_CHANNEL_XOR_DISABLE) {
- /* This is the base swizzling by the GPU for
- * tiled buffers.
- */
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
- /* Bit 11 swizzling by the CPU in addition. */
- swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
- swizzle_y = I915_BIT_6_SWIZZLE_9_11;
- } else {
- /* Bit 17 swizzling by the CPU in addition. */
- swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
- swizzle_y = I915_BIT_6_SWIZZLE_9_17;
- }
- break;
- }
-
- /* check for L-shaped memory aka modified enhanced addressing */
- if (IS_GEN4(dev)) {
- uint32_t ddc2 = I915_READ(DCC2);
-
- if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
- dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
- }
-
- if (dcc == 0xffffffff) {
- DRM_ERROR("Couldn't read from MCHBAR. "
- "Disabling tiling.\n");
- swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
- swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- }
- } else {
- /* The 965, G33, and newer, have a very flexible memory
- * configuration. It will enable dual-channel mode
- * (interleaving) on as much memory as it can, and the GPU
- * will additionally sometimes enable different bit 6
- * swizzling for tiled objects from the CPU.
- *
- * Here's what I found on the G965:
- * slot fill memory size swizzling
- * 0A 0B 1A 1B 1-ch 2-ch
- * 512 0 0 0 512 0 O
- * 512 0 512 0 16 1008 X
- * 512 0 0 512 16 1008 X
- * 0 512 0 512 16 1008 X
- * 1024 1024 1024 0 2048 1024 O
- *
- * We could probably detect this based on either the DRB
- * matching, which was the case for the swizzling required in
- * the table above, or from the 1-ch value being less than
- * the minimum size of a rank.
- */
- if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else {
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- }
- }
-
- dev_priv->mm.bit_6_swizzle_x = swizzle_x;
- dev_priv->mm.bit_6_swizzle_y = swizzle_y;
-}
/* Check pitch constriants for all chips & tiling formats */
static bool
@@ -313,8 +144,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
}
/**
+ * i915_gem_set_tiling - IOCTL handler to set tiling mode
+ * @dev: DRM device
+ * @data: data pointer for the ioctl
+ * @file: DRM file for the ioctl call
+ *
* Sets the tiling mode of an object, returning the required swizzling of
* bit 6 of addresses in the object.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
*/
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
@@ -432,7 +273,17 @@ err:
}
/**
+ * i915_gem_get_tiling - IOCTL handler to get tiling mode
+ * @dev: DRM device
+ * @data: data pointer for the ioctl
+ * @file: DRM file for the ioctl call
+ *
* Returns the current tiling mode and required bit 6 swizzling for the object.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
*/
int
i915_gem_get_tiling(struct drm_device *dev, void *data,
@@ -464,7 +315,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
- args->phys_swizzle_mode = args->swizzle_mode;
+ if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
+ else
+ args->phys_swizzle_mode = args->swizzle_mode;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
@@ -475,75 +329,3 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
return 0;
}
-
-/**
- * Swap every 64 bytes of this page around, to account for it having a new
- * bit 17 of its physical address and therefore being interpreted differently
- * by the GPU.
- */
-static void
-i915_gem_swizzle_page(struct page *page)
-{
- char temp[64];
- char *vaddr;
- int i;
-
- vaddr = kmap(page);
-
- for (i = 0; i < PAGE_SIZE; i += 128) {
- memcpy(temp, &vaddr[i], 64);
- memcpy(&vaddr[i], &vaddr[i + 64], 64);
- memcpy(&vaddr[i + 64], temp, 64);
- }
-
- kunmap(page);
-}
-
-void
-i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
-{
- struct sg_page_iter sg_iter;
- int i;
-
- if (obj->bit_17 == NULL)
- return;
-
- i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- struct page *page = sg_page_iter_page(&sg_iter);
- char new_bit_17 = page_to_phys(page) >> 17;
- if ((new_bit_17 & 0x1) !=
- (test_bit(i, obj->bit_17) != 0)) {
- i915_gem_swizzle_page(page);
- set_page_dirty(page);
- }
- i++;
- }
-}
-
-void
-i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
-{
- struct sg_page_iter sg_iter;
- int page_count = obj->base.size >> PAGE_SHIFT;
- int i;
-
- if (obj->bit_17 == NULL) {
- obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
- sizeof(long), GFP_KERNEL);
- if (obj->bit_17 == NULL) {
- DRM_ERROR("Failed to allocate memory for bit 17 "
- "record\n");
- return;
- }
- }
-
- i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
- __set_bit(i, obj->bit_17);
- else
- __clear_bit(i, obj->bit_17);
- i++;
- }
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 1f4e5a32a16e..8fd431bcdfd3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -545,6 +545,26 @@ err:
return ret;
}
+static int
+__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
+ struct page **pvec, int num_pages)
+{
+ int ret;
+
+ ret = st_set_pages(&obj->pages, pvec, num_pages);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_gtt_prepare_object(obj);
+ if (ret) {
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+ obj->pages = NULL;
+ }
+
+ return ret;
+}
+
static void
__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
{
@@ -584,9 +604,12 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
if (obj->userptr.work != &work->work) {
ret = 0;
} else if (pinned == num_pages) {
- ret = st_set_pages(&obj->pages, pvec, num_pages);
+ ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
if (ret == 0) {
list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
+ obj->get_page.sg = obj->pages->sgl;
+ obj->get_page.last = 0;
+
pinned = 0;
}
}
@@ -693,7 +716,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
}
}
} else {
- ret = st_set_pages(&obj->pages, pvec, num_pages);
+ ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
if (ret == 0) {
obj->userptr.work = NULL;
pinned = 0;
@@ -715,6 +738,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
if (obj->madv != I915_MADV_WILLNEED)
obj->dirty = 0;
+ i915_gem_gtt_finish_object(obj);
+
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
struct page *page = sg_page_iter_page(&sg_iter);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 6f4256918f76..41d0739e6fdf 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -369,6 +369,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
+ err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
if (INTEL_INFO(dev)->gen >= 8) {
@@ -1266,6 +1267,10 @@ static void i915_error_capture_msg(struct drm_device *dev,
static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
+ error->iommu = -1;
+#ifdef CONFIG_INTEL_IOMMU
+ error->iommu = intel_iommu_gfx_mapped;
+#endif
error->reset_count = i915_reset_count(&dev_priv->gpu_error);
error->suspend_count = dev_priv->suspend_count;
}
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
new file mode 100644
index 000000000000..ccdc6c8ac20b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#ifndef _I915_GUC_REG_H_
+#define _I915_GUC_REG_H_
+
+/* Definitions of GuC H/W registers, bits, etc */
+
+#define GUC_STATUS 0xc000
+#define GS_BOOTROM_SHIFT 1
+#define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT)
+#define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT)
+#define GS_UKERNEL_SHIFT 8
+#define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT)
+#define GS_UKERNEL_LAPIC_DONE (0x30 << GS_UKERNEL_SHIFT)
+#define GS_UKERNEL_DPC_ERROR (0x60 << GS_UKERNEL_SHIFT)
+#define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT)
+#define GS_MIA_SHIFT 16
+#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
+
+#define GUC_WOPCM_SIZE 0xc050
+#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
+#define GUC_WOPCM_OFFSET 0x80000 /* 512KB */
+
+#define SOFT_SCRATCH(n) (0xc180 + ((n) * 4))
+
+#define UOS_RSA_SCRATCH_0 0xc200
+#define DMA_ADDR_0_LOW 0xc300
+#define DMA_ADDR_0_HIGH 0xc304
+#define DMA_ADDR_1_LOW 0xc308
+#define DMA_ADDR_1_HIGH 0xc30c
+#define DMA_ADDRESS_SPACE_WOPCM (7 << 16)
+#define DMA_ADDRESS_SPACE_GTT (8 << 16)
+#define DMA_COPY_SIZE 0xc310
+#define DMA_CTRL 0xc314
+#define UOS_MOVE (1<<4)
+#define START_DMA (1<<0)
+#define DMA_GUC_WOPCM_OFFSET 0xc340
+
+#define GEN8_GT_PM_CONFIG 0x138140
+#define GEN9_GT_PM_CONFIG 0x13816c
+#define GEN8_GT_DOORBELL_ENABLE (1<<0)
+
+#define GEN8_GTCR 0x4274
+#define GEN8_GTCR_INVALIDATE (1<<0)
+
+#define GUC_ARAT_C6DIS 0xA178
+
+#define GUC_SHIM_CONTROL 0xc064
+#define GUC_DISABLE_SRAM_INIT_TO_ZEROES (1<<0)
+#define GUC_ENABLE_READ_CACHE_LOGIC (1<<1)
+#define GUC_ENABLE_MIA_CACHING (1<<2)
+#define GUC_GEN10_MSGCH_ENABLE (1<<4)
+#define GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA (1<<9)
+#define GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA (1<<10)
+#define GUC_ENABLE_MIA_CLOCK_GATING (1<<15)
+#define GUC_GEN10_SHIM_WC_ENABLE (1<<21)
+
+#define GUC_SHIM_CONTROL_VALUE (GUC_DISABLE_SRAM_INIT_TO_ZEROES | \
+ GUC_ENABLE_READ_CACHE_LOGIC | \
+ GUC_ENABLE_MIA_CACHING | \
+ GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | \
+ GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA)
+
+#define HOST2GUC_INTERRUPT 0xc4c8
+#define HOST2GUC_TRIGGER (1<<0)
+
+#define DRBMISC1 0x1984
+#define DOORBELL_ENABLE (1<<0)
+
+#define GEN8_DRBREGL(x) (0x1000 + (x) * 8)
+#define GEN8_DRB_VALID (1<<0)
+#define GEN8_DRBREGU(x) (GEN8_DRBREGL(x) + 4)
+
+#define DE_GUCRMR 0x44054
+
+#define GUC_BCS_RCS_IER 0xC550
+#define GUC_VCS2_VCS1_IER 0xC554
+#define GUC_WD_VECS_IER 0xC558
+#define GUC_PM_P24C_IER 0xC55C
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 176de6322e4d..97f3a5640289 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -35,107 +35,20 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-typedef struct _drm_i915_batchbuffer32 {
- int start; /* agp offset */
- int used; /* nr bytes in use */
- int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
- int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
- int num_cliprects; /* mulitpass with multiple cliprects? */
- u32 cliprects; /* pointer to userspace cliprects */
-} drm_i915_batchbuffer32_t;
-
-static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_i915_batchbuffer32_t batchbuffer32;
- drm_i915_batchbuffer_t __user *batchbuffer;
-
- if (copy_from_user
- (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
- return -EFAULT;
-
- batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
- if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
- || __put_user(batchbuffer32.start, &batchbuffer->start)
- || __put_user(batchbuffer32.used, &batchbuffer->used)
- || __put_user(batchbuffer32.DR1, &batchbuffer->DR1)
- || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
- || __put_user(batchbuffer32.num_cliprects,
- &batchbuffer->num_cliprects)
- || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
- &batchbuffer->cliprects))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER,
- (unsigned long)batchbuffer);
-}
-
-typedef struct _drm_i915_cmdbuffer32 {
- u32 buf; /* pointer to userspace command buffer */
- int sz; /* nr bytes in buf */
- int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
- int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
- int num_cliprects; /* mulitpass with multiple cliprects? */
- u32 cliprects; /* pointer to userspace cliprects */
-} drm_i915_cmdbuffer32_t;
-
-static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_i915_cmdbuffer32_t cmdbuffer32;
- drm_i915_cmdbuffer_t __user *cmdbuffer;
-
- if (copy_from_user
- (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
- return -EFAULT;
-
- cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
- if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
- || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
- &cmdbuffer->buf)
- || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
- || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
- || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
- || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
- || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
- &cmdbuffer->cliprects))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER,
- (unsigned long)cmdbuffer);
-}
-
-typedef struct drm_i915_irq_emit32 {
- u32 irq_seq;
-} drm_i915_irq_emit32_t;
-
-static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_i915_irq_emit32_t req32;
- drm_i915_irq_emit_t __user *request;
-
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user((int __user *)(unsigned long)req32.irq_seq,
- &request->irq_seq))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT,
- (unsigned long)request);
-}
-typedef struct drm_i915_getparam32 {
- int param;
+struct drm_i915_getparam32 {
+ s32 param;
+ /*
+ * We screwed up the generic ioctl struct here and used a variable-sized
+ * pointer. Use u32 in the compat struct to match the 32bit pointer
+ * userspace expects.
+ */
u32 value;
-} drm_i915_getparam32_t;
+};
static int compat_i915_getparam(struct file *file, unsigned int cmd,
unsigned long arg)
{
- drm_i915_getparam32_t req32;
+ struct drm_i915_getparam32 req32;
drm_i915_getparam_t __user *request;
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
@@ -152,41 +65,8 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
(unsigned long)request);
}
-typedef struct drm_i915_mem_alloc32 {
- int region;
- int alignment;
- int size;
- u32 region_offset; /* offset from start of fb or agp */
-} drm_i915_mem_alloc32_t;
-
-static int compat_i915_alloc(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_i915_mem_alloc32_t req32;
- drm_i915_mem_alloc_t __user *request;
-
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.region, &request->region)
- || __put_user(req32.alignment, &request->alignment)
- || __put_user(req32.size, &request->size)
- || __put_user((void __user *)(unsigned long)req32.region_offset,
- &request->region_offset))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_I915_ALLOC,
- (unsigned long)request);
-}
-
static drm_ioctl_compat_t *i915_compat_ioctls[] = {
- [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
- [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
[DRM_I915_GETPARAM] = compat_i915_getparam,
- [DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
- [DRM_I915_ALLOC] = compat_i915_alloc
};
/**
@@ -204,7 +84,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
drm_ioctl_compat_t *fn = NULL;
int ret;
- if (nr < DRM_COMMAND_BASE)
+ if (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END)
return drm_compat_ioctl(filp, cmd, arg);
if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index e6bb72dca3ff..5a244ab9395b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -61,6 +61,13 @@ static const u32 hpd_cpt[HPD_NUM_PINS] = {
[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
};
+static const u32 hpd_spt[HPD_NUM_PINS] = {
+ [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
+ [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
+ [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
+ [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
+};
+
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
[HPD_CRT] = CRT_HOTPLUG_INT_EN,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
@@ -564,8 +571,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- const struct drm_display_mode *mode =
- &intel_crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
htotal = mode->crtc_htotal;
hsync_start = mode->crtc_hsync_start;
@@ -620,7 +626,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *mode = &crtc->base.hwmode;
enum pipe pipe = crtc->pipe;
int position, vtotal;
@@ -647,14 +653,14 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
bool in_vbl = true;
int ret = 0;
unsigned long irqflags;
- if (!intel_crtc->active) {
+ if (WARN_ON(!mode->crtc_clock)) {
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
"pipe %c\n", pipe_name(pipe));
return 0;
@@ -796,7 +802,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
return -EINVAL;
}
- if (!crtc->state->enable) {
+ if (!crtc->hwmode.crtc_clock) {
DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
return -EBUSY;
}
@@ -805,151 +811,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
vblank_time, flags,
crtc,
- &to_intel_crtc(crtc)->config->base.adjusted_mode);
-}
-
-static bool intel_hpd_irq_event(struct drm_device *dev,
- struct drm_connector *connector)
-{
- enum drm_connector_status old_status;
-
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
- old_status = connector->status;
-
- connector->status = connector->funcs->detect(connector, false);
- if (old_status == connector->status)
- return false;
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
- connector->base.id,
- connector->name,
- drm_get_connector_status_name(old_status),
- drm_get_connector_status_name(connector->status));
-
- return true;
-}
-
-static void i915_digport_work_func(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, dig_port_work);
- u32 long_port_mask, short_port_mask;
- struct intel_digital_port *intel_dig_port;
- int i;
- u32 old_bits = 0;
-
- spin_lock_irq(&dev_priv->irq_lock);
- long_port_mask = dev_priv->long_hpd_port_mask;
- dev_priv->long_hpd_port_mask = 0;
- short_port_mask = dev_priv->short_hpd_port_mask;
- dev_priv->short_hpd_port_mask = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
-
- for (i = 0; i < I915_MAX_PORTS; i++) {
- bool valid = false;
- bool long_hpd = false;
- intel_dig_port = dev_priv->hpd_irq_port[i];
- if (!intel_dig_port || !intel_dig_port->hpd_pulse)
- continue;
-
- if (long_port_mask & (1 << i)) {
- valid = true;
- long_hpd = true;
- } else if (short_port_mask & (1 << i))
- valid = true;
-
- if (valid) {
- enum irqreturn ret;
-
- ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
- if (ret == IRQ_NONE) {
- /* fall back to old school hpd */
- old_bits |= (1 << intel_dig_port->base.hpd_pin);
- }
- }
- }
-
- if (old_bits) {
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->hpd_event_bits |= old_bits;
- spin_unlock_irq(&dev_priv->irq_lock);
- schedule_work(&dev_priv->hotplug_work);
- }
-}
-
-/*
- * Handle hotplug events outside the interrupt handler proper.
- */
-#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
-
-static void i915_hotplug_work_func(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, hotplug_work);
- struct drm_device *dev = dev_priv->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct intel_connector *intel_connector;
- struct intel_encoder *intel_encoder;
- struct drm_connector *connector;
- bool hpd_disabled = false;
- bool changed = false;
- u32 hpd_event_bits;
-
- mutex_lock(&mode_config->mutex);
- DRM_DEBUG_KMS("running encoder hotplug functions\n");
-
- spin_lock_irq(&dev_priv->irq_lock);
-
- hpd_event_bits = dev_priv->hpd_event_bits;
- dev_priv->hpd_event_bits = 0;
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- intel_connector = to_intel_connector(connector);
- if (!intel_connector->encoder)
- continue;
- intel_encoder = intel_connector->encoder;
- if (intel_encoder->hpd_pin > HPD_NONE &&
- dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
- connector->polled == DRM_CONNECTOR_POLL_HPD) {
- DRM_INFO("HPD interrupt storm detected on connector %s: "
- "switching from hotplug detection to polling\n",
- connector->name);
- dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT
- | DRM_CONNECTOR_POLL_DISCONNECT;
- hpd_disabled = true;
- }
- if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
- DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
- connector->name, intel_encoder->hpd_pin);
- }
- }
- /* if there were no outputs to poll, poll was disabled,
- * therefore make sure it's enabled when disabling HPD on
- * some connectors */
- if (hpd_disabled) {
- drm_kms_helper_poll_enable(dev);
- mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
- msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
- }
-
- spin_unlock_irq(&dev_priv->irq_lock);
-
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- intel_connector = to_intel_connector(connector);
- if (!intel_connector->encoder)
- continue;
- intel_encoder = intel_connector->encoder;
- if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
- if (intel_encoder->hot_plug)
- intel_encoder->hot_plug(intel_encoder);
- if (intel_hpd_irq_event(dev, connector))
- changed = true;
- }
- }
- mutex_unlock(&mode_config->mutex);
-
- if (changed)
- drm_kms_helper_hotplug_event(dev);
+ &crtc->hwmode);
}
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
@@ -1372,165 +1234,80 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
return ret;
}
-#define HPD_STORM_DETECT_PERIOD 1000
-#define HPD_STORM_THRESHOLD 5
-
-static int pch_port_to_hotplug_shift(enum port port)
+static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
{
switch (port) {
case PORT_A:
- case PORT_E:
- default:
- return -1;
+ return val & BXT_PORTA_HOTPLUG_LONG_DETECT;
case PORT_B:
- return 0;
+ return val & PORTB_HOTPLUG_LONG_DETECT;
case PORT_C:
- return 8;
+ return val & PORTC_HOTPLUG_LONG_DETECT;
case PORT_D:
- return 16;
+ return val & PORTD_HOTPLUG_LONG_DETECT;
+ default:
+ return false;
}
}
-static int i915_port_to_hotplug_shift(enum port port)
+static bool pch_port_hotplug_long_detect(enum port port, u32 val)
{
switch (port) {
- case PORT_A:
- case PORT_E:
- default:
- return -1;
case PORT_B:
- return 17;
+ return val & PORTB_HOTPLUG_LONG_DETECT;
case PORT_C:
- return 19;
+ return val & PORTC_HOTPLUG_LONG_DETECT;
case PORT_D:
- return 21;
+ return val & PORTD_HOTPLUG_LONG_DETECT;
+ case PORT_E:
+ return val & PORTE_HOTPLUG_LONG_DETECT;
+ default:
+ return false;
}
}
-static enum port get_port_from_pin(enum hpd_pin pin)
+static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
{
- switch (pin) {
- case HPD_PORT_B:
- return PORT_B;
- case HPD_PORT_C:
- return PORT_C;
- case HPD_PORT_D:
- return PORT_D;
+ switch (port) {
+ case PORT_B:
+ return val & PORTB_HOTPLUG_INT_LONG_PULSE;
+ case PORT_C:
+ return val & PORTC_HOTPLUG_INT_LONG_PULSE;
+ case PORT_D:
+ return val & PORTD_HOTPLUG_INT_LONG_PULSE;
default:
- return PORT_A; /* no hpd */
+ return false;
}
}
-static void intel_hpd_irq_handler(struct drm_device *dev,
- u32 hotplug_trigger,
- u32 dig_hotplug_reg,
- const u32 hpd[HPD_NUM_PINS])
+/* Get a bit mask of pins that have triggered, and which ones may be long. */
+static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
+ u32 hotplug_trigger, u32 dig_hotplug_reg,
+ const u32 hpd[HPD_NUM_PINS],
+ bool long_pulse_detect(enum port port, u32 val))
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
enum port port;
- bool storm_detected = false;
- bool queue_dig = false, queue_hp = false;
- u32 dig_shift;
- u32 dig_port_mask = 0;
-
- if (!hotplug_trigger)
- return;
+ int i;
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
- hotplug_trigger, dig_hotplug_reg);
+ *pin_mask = 0;
+ *long_mask = 0;
- spin_lock(&dev_priv->irq_lock);
- for (i = 1; i < HPD_NUM_PINS; i++) {
- if (!(hpd[i] & hotplug_trigger))
+ for_each_hpd_pin(i) {
+ if ((hpd[i] & hotplug_trigger) == 0)
continue;
- port = get_port_from_pin(i);
- if (port && dev_priv->hpd_irq_port[port]) {
- bool long_hpd;
-
- if (!HAS_GMCH_DISPLAY(dev_priv)) {
- dig_shift = pch_port_to_hotplug_shift(port);
- long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
- } else {
- dig_shift = i915_port_to_hotplug_shift(port);
- long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
- }
-
- DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
- port_name(port),
- long_hpd ? "long" : "short");
- /* for long HPD pulses we want to have the digital queue happen,
- but we still want HPD storm detection to function. */
- if (long_hpd) {
- dev_priv->long_hpd_port_mask |= (1 << port);
- dig_port_mask |= hpd[i];
- } else {
- /* for short HPD just trigger the digital queue */
- dev_priv->short_hpd_port_mask |= (1 << port);
- hotplug_trigger &= ~hpd[i];
- }
- queue_dig = true;
- }
- }
-
- for (i = 1; i < HPD_NUM_PINS; i++) {
- if (hpd[i] & hotplug_trigger &&
- dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
- /*
- * On GMCH platforms the interrupt mask bits only
- * prevent irq generation, not the setting of the
- * hotplug bits itself. So only WARN about unexpected
- * interrupts on saner platforms.
- */
- WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
- "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
- hotplug_trigger, i, hpd[i]);
-
- continue;
- }
+ *pin_mask |= BIT(i);
- if (!(hpd[i] & hotplug_trigger) ||
- dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
+ if (!intel_hpd_pin_to_port(i, &port))
continue;
- if (!(dig_port_mask & hpd[i])) {
- dev_priv->hpd_event_bits |= (1 << i);
- queue_hp = true;
- }
-
- if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
- dev_priv->hpd_stats[i].hpd_last_jiffies
- + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
- dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
- dev_priv->hpd_stats[i].hpd_cnt = 0;
- DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
- } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
- dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
- dev_priv->hpd_event_bits &= ~(1 << i);
- DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
- storm_detected = true;
- } else {
- dev_priv->hpd_stats[i].hpd_cnt++;
- DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
- dev_priv->hpd_stats[i].hpd_cnt);
- }
+ if (long_pulse_detect(port, dig_hotplug_reg))
+ *long_mask |= BIT(i);
}
- if (storm_detected)
- dev_priv->display.hpd_irq_setup(dev);
- spin_unlock(&dev_priv->irq_lock);
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
+ hotplug_trigger, dig_hotplug_reg, *pin_mask);
- /*
- * Our hotplug handler can grab modeset locks (by calling down into the
- * fb helpers). Hence it must not be run on our own dev-priv->wq work
- * queue for otherwise the flush_work in the pageflip code will
- * deadlock.
- */
- if (queue_dig)
- queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
- if (queue_hp)
- schedule_work(&dev_priv->hotplug_work);
}
static void gmbus_irq_handler(struct drm_device *dev)
@@ -1755,28 +1532,35 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+ u32 pin_mask, long_mask;
- if (hotplug_status) {
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
- /*
- * Make sure hotplug status is cleared before we clear IIR, or else we
- * may miss hotplug events.
- */
- POSTING_READ(PORT_HOTPLUG_STAT);
+ if (!hotplug_status)
+ return;
- if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
- u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ /*
+ * Make sure hotplug status is cleared before we clear IIR, or else we
+ * may miss hotplug events.
+ */
+ POSTING_READ(PORT_HOTPLUG_STAT);
- intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
- } else {
- u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+ if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
+ u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
- intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
- }
+ intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+ hotplug_trigger, hpd_status_g4x,
+ i9xx_port_hotplug_long_detect);
+ intel_hpd_irq_handler(dev, pin_mask, long_mask);
- if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
- hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+ if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
dp_aux_irq_handler(dev);
+ } else {
+ u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+
+ intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+ hotplug_trigger, hpd_status_i915,
+ i9xx_port_hotplug_long_detect);
+ intel_hpd_irq_handler(dev, pin_mask, long_mask);
}
}
@@ -1875,12 +1659,18 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
- u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
- I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ if (hotplug_trigger) {
+ u32 dig_hotplug_reg, pin_mask, long_mask;
- intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
+ dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+ I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+ intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+ dig_hotplug_reg, hpd_ibx,
+ pch_port_hotplug_long_detect);
+ intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ }
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1971,13 +1761,38 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
- u32 dig_hotplug_reg;
+ u32 hotplug_trigger;
+
+ if (HAS_PCH_SPT(dev))
+ hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT;
+ else
+ hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
- dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
- I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ if (hotplug_trigger) {
+ u32 dig_hotplug_reg, pin_mask, long_mask;
- intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
+ dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+ I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+ if (HAS_PCH_SPT(dev)) {
+ intel_get_hpd_pins(&pin_mask, &long_mask,
+ hotplug_trigger,
+ dig_hotplug_reg, hpd_spt,
+ pch_port_hotplug_long_detect);
+
+ /* detect PORTE HP event */
+ dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
+ if (pch_port_hotplug_long_detect(PORT_E,
+ dig_hotplug_reg))
+ long_mask |= 1 << HPD_PORT_E;
+ } else
+ intel_get_hpd_pins(&pin_mask, &long_mask,
+ hotplug_trigger,
+ dig_hotplug_reg, hpd_cpt,
+ pch_port_hotplug_long_detect);
+
+ intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ }
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2176,8 +1991,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t hp_control;
- uint32_t hp_trigger;
+ u32 hp_control, hp_trigger;
+ u32 pin_mask, long_mask;
/* Get the status */
hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK;
@@ -2189,20 +2004,12 @@ static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
return;
}
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
- hp_control & BXT_HOTPLUG_CTL_MASK);
-
- /* Check for HPD storm and schedule bottom half */
- intel_hpd_irq_handler(dev, hp_trigger, hp_control, hpd_bxt);
-
- /*
- * FIXME: Save the hot plug status for bottom half before
- * clearing the sticky status bits, else the status will be
- * lost.
- */
-
/* Clear sticky bits in hpd status */
I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
+
+ intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control,
+ hpd_bxt, bxt_port_hotplug_long_detect);
+ intel_hpd_irq_handler(dev, pin_mask, long_mask);
}
static irqreturn_t gen8_irq_handler(int irq, void *arg)
@@ -2446,7 +2253,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
kobject_uevent_env(&dev->primary->kdev->kobj,
KOBJ_CHANGE, reset_done_event);
} else {
- atomic_set_mask(I915_WEDGED, &error->reset_counter);
+ atomic_or(I915_WEDGED, &error->reset_counter);
}
/*
@@ -2574,7 +2381,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
i915_report_and_clear_eir(dev);
if (wedged) {
- atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
+ atomic_or(I915_RESET_IN_PROGRESS_FLAG,
&dev_priv->gpu_error.reset_counter);
/*
@@ -2706,18 +2513,11 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static struct drm_i915_gem_request *
-ring_last_request(struct intel_engine_cs *ring)
-{
- return list_entry(ring->request_list.prev,
- struct drm_i915_gem_request, list);
-}
-
static bool
-ring_idle(struct intel_engine_cs *ring)
+ring_idle(struct intel_engine_cs *ring, u32 seqno)
{
return (list_empty(&ring->request_list) ||
- i915_gem_request_completed(ring_last_request(ring), false));
+ i915_seqno_passed(seqno, ring->last_submitted_seqno));
}
static bool
@@ -2939,7 +2739,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
acthd = intel_ring_get_active_head(ring);
if (ring->hangcheck.seqno == seqno) {
- if (ring_idle(ring)) {
+ if (ring_idle(ring, seqno)) {
ring->hangcheck.action = HANGCHECK_IDLE;
if (waitqueue_active(&ring->irq_queue)) {
@@ -3210,12 +3010,17 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
if (HAS_PCH_IBX(dev)) {
hotplug_irqs = SDE_HOTPLUG_MASK;
for_each_intel_encoder(dev, intel_encoder)
- if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+ if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
+ } else if (HAS_PCH_SPT(dev)) {
+ hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
+ for_each_intel_encoder(dev, intel_encoder)
+ if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
+ enabled_irqs |= hpd_spt[intel_encoder->hpd_pin];
} else {
hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
for_each_intel_encoder(dev, intel_encoder)
- if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+ if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
}
@@ -3233,6 +3038,13 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+
+ /* enable SPT PORTE hot plug */
+ if (HAS_PCH_SPT(dev)) {
+ hotplug = I915_READ(PCH_PORT_HOTPLUG2);
+ hotplug |= PORTE_HOTPLUG_ENABLE;
+ I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
+ }
}
static void bxt_hpd_irq_setup(struct drm_device *dev)
@@ -3244,7 +3056,7 @@ static void bxt_hpd_irq_setup(struct drm_device *dev)
/* Now, enable HPD */
for_each_intel_encoder(dev, intel_encoder) {
- if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark
+ if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state
== HPD_ENABLED)
hotplug_port |= hpd_bxt[intel_encoder->hpd_pin];
}
@@ -4137,7 +3949,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
/* Note HDMI and DP share hotplug bits */
/* enable bits are the same for all generations */
for_each_intel_encoder(dev, intel_encoder)
- if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+ if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
/* Programming the CRT detection parameters tends
to generate a spurious hotplug event about three
@@ -4277,46 +4089,6 @@ static void i965_irq_uninstall(struct drm_device * dev)
I915_WRITE(IIR, I915_READ(IIR));
}
-static void intel_hpd_irq_reenable_work(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv),
- hotplug_reenable_work.work);
- struct drm_device *dev = dev_priv->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- int i;
-
- intel_runtime_pm_get(dev_priv);
-
- spin_lock_irq(&dev_priv->irq_lock);
- for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
- struct drm_connector *connector;
-
- if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
- continue;
-
- dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
-
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- if (intel_connector->encoder->hpd_pin == i) {
- if (connector->polled != intel_connector->polled)
- DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
- connector->name);
- connector->polled = intel_connector->polled;
- if (!connector->polled)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- }
- }
- }
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- intel_runtime_pm_put(dev_priv);
-}
-
/**
* intel_irq_init - initializes irq support
* @dev_priv: i915 device instance
@@ -4328,8 +4100,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
- INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
+ intel_hpd_init_work(dev_priv);
+
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
@@ -4342,8 +4114,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
i915_hangcheck_elapsed);
- INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
- intel_hpd_irq_reenable_work);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
@@ -4429,46 +4199,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
}
/**
- * intel_hpd_init - initializes and enables hpd support
- * @dev_priv: i915 device instance
- *
- * This function enables the hotplug support. It requires that interrupts have
- * already been enabled with intel_irq_init_hw(). From this point on hotplug and
- * poll request can run concurrently to other code, so locking rules must be
- * obeyed.
- *
- * This is a separate step from interrupt enabling to simplify the locking rules
- * in the driver load and resume code.
- */
-void intel_hpd_init(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
- int i;
-
- for (i = 1; i < HPD_NUM_PINS; i++) {
- dev_priv->hpd_stats[i].hpd_cnt = 0;
- dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
- }
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_connector *intel_connector = to_intel_connector(connector);
- connector->polled = intel_connector->polled;
- if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- if (intel_connector->mst_port)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- }
-
- /* Interrupt setup is already guaranteed to be single-threaded, this is
- * just to make the assert_spin_locked checks happy. */
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-/**
* intel_irq_install - enables the hardware interrupt
* @dev_priv: i915 device instance
*
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 8ac5a1b29ac0..5ae4b0aba564 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -28,7 +28,6 @@ struct i915_params i915 __read_mostly = {
.modeset = -1,
.panel_ignore_lid = 1,
.semaphores = -1,
- .lvds_downclock = 0,
.lvds_channel_mode = 0,
.panel_use_ssc = -1,
.vbt_sdvo_panel_type = -1,
@@ -52,13 +51,14 @@ struct i915_params i915 __read_mostly = {
.use_mmio_flip = 0,
.mmio_debug = 0,
.verbose_state_checks = 1,
- .nuclear_pageflip = 0,
.edp_vswing = 0,
+ .enable_guc_submission = false,
+ .guc_log_level = -1,
};
module_param_named(modeset, i915.modeset, int, 0400);
MODULE_PARM_DESC(modeset,
- "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
+ "Use kernel modesetting [KMS] (0=disable, "
"1=on, -1=force vga console preference [default])");
module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
@@ -84,11 +84,6 @@ MODULE_PARM_DESC(enable_fbc,
"Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))");
-module_param_named(lvds_downclock, i915.lvds_downclock, int, 0400);
-MODULE_PARM_DESC(lvds_downclock,
- "Use panel (LVDS/eDP) downclocking for power savings "
- "(default: false)");
-
module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
MODULE_PARM_DESC(lvds_channel_mode,
"Specify LVDS channel mode "
@@ -104,7 +99,7 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
-module_param_named(reset, i915.reset, bool, 0600);
+module_param_named_unsafe(reset, i915.reset, bool, 0600);
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
@@ -182,13 +177,16 @@ module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
MODULE_PARM_DESC(verbose_state_checks,
"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
-module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
-MODULE_PARM_DESC(nuclear_pageflip,
- "Force atomic modeset functionality; only planes work for now (default: false).");
-
/* WA to get away with the default setting in VBT for early platforms.Will be removed */
module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
MODULE_PARM_DESC(edp_vswing,
"Ignore/Override vswing pre-emph table selection from VBT "
"(0=use value from vbt [default], 1=low power swing(200mV),"
"2=default swing(400mV))");
+
+module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, bool, 0400);
+MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)");
+
+module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
+MODULE_PARM_DESC(guc_log_level,
+ "GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2030f602cbf8..83a0888756d6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -50,12 +50,17 @@
/* PCI config space */
-#define HPLLCC 0xc0 /* 855 only */
-#define GC_CLOCK_CONTROL_MASK (0xf << 0)
+#define HPLLCC 0xc0 /* 85x only */
+#define GC_CLOCK_CONTROL_MASK (0x7 << 0)
#define GC_CLOCK_133_200 (0 << 0)
#define GC_CLOCK_100_200 (1 << 0)
#define GC_CLOCK_100_133 (2 << 0)
-#define GC_CLOCK_166_250 (3 << 0)
+#define GC_CLOCK_133_266 (3 << 0)
+#define GC_CLOCK_133_200_2 (4 << 0)
+#define GC_CLOCK_133_266_2 (5 << 0)
+#define GC_CLOCK_166_266 (6 << 0)
+#define GC_CLOCK_166_250 (7 << 0)
+
#define GCFGC2 0xda
#define GCFGC 0xf0 /* 915+ only */
#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
@@ -155,6 +160,7 @@
#define GAM_ECOCHK 0x4090
#define BDW_DISABLE_HDC_INVALIDATION (1<<25)
#define ECOCHK_SNB_BIT (1<<10)
+#define ECOCHK_DIS_TLB (1<<8)
#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
@@ -172,13 +178,22 @@
#define GAB_CTL 0x24000
#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
-#define GEN7_BIOS_RESERVED 0x1082C0
-#define GEN7_BIOS_RESERVED_1M (0 << 5)
-#define GEN7_BIOS_RESERVED_256K (1 << 5)
-#define GEN8_BIOS_RESERVED_SHIFT 7
-#define GEN7_BIOS_RESERVED_MASK 0x1
-#define GEN8_BIOS_RESERVED_MASK 0x3
-
+#define GEN6_STOLEN_RESERVED 0x1082C0
+#define GEN6_STOLEN_RESERVED_ADDR_MASK (0xFFF << 20)
+#define GEN7_STOLEN_RESERVED_ADDR_MASK (0x3FFF << 18)
+#define GEN6_STOLEN_RESERVED_SIZE_MASK (3 << 4)
+#define GEN6_STOLEN_RESERVED_1M (0 << 4)
+#define GEN6_STOLEN_RESERVED_512K (1 << 4)
+#define GEN6_STOLEN_RESERVED_256K (2 << 4)
+#define GEN6_STOLEN_RESERVED_128K (3 << 4)
+#define GEN7_STOLEN_RESERVED_SIZE_MASK (1 << 5)
+#define GEN7_STOLEN_RESERVED_1M (0 << 5)
+#define GEN7_STOLEN_RESERVED_256K (1 << 5)
+#define GEN8_STOLEN_RESERVED_SIZE_MASK (3 << 7)
+#define GEN8_STOLEN_RESERVED_1M (0 << 7)
+#define GEN8_STOLEN_RESERVED_2M (1 << 7)
+#define GEN8_STOLEN_RESERVED_4M (2 << 7)
+#define GEN8_STOLEN_RESERVED_8M (3 << 7)
/* VGA stuff */
@@ -316,6 +331,8 @@
#define MI_RESTORE_EXT_STATE_EN (1<<2)
#define MI_FORCE_RESTORE (1<<1)
#define MI_RESTORE_INHIBIT (1<<0)
+#define HSW_MI_RS_SAVE_STATE_EN (1<<3)
+#define HSW_MI_RS_RESTORE_STATE_EN (1<<2)
#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
@@ -347,6 +364,8 @@
#define MI_INVALIDATE_BSD (1<<7)
#define MI_FLUSH_DW_USE_GTT (1<<2)
#define MI_FLUSH_DW_USE_PPGTT (0<<2)
+#define MI_LOAD_REGISTER_MEM(x) MI_INSTR(0x29, 2*(x)-1)
+#define MI_LOAD_REGISTER_MEM_GEN8(x) MI_INSTR(0x29, 3*(x)-1)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
@@ -356,6 +375,7 @@
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
+#define MI_BATCH_RESOURCE_STREAMER (1<<10)
#define MI_PREDICATE_SRC0 (0x2400)
#define MI_PREDICATE_SRC1 (0x2408)
@@ -410,6 +430,7 @@
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
+#define PIPE_CONTROL_FLUSH_L3 (1<<27)
#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
#define PIPE_CONTROL_MMIO_WRITE (1<<23)
#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
@@ -426,6 +447,7 @@
#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
#define PIPE_CONTROL_NOTIFY (1<<8)
#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
+#define PIPE_CONTROL_DC_FLUSH_ENABLE (1<<5)
#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
@@ -449,7 +471,6 @@
#define MI_CLFLUSH MI_INSTR(0x27, 0)
#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0)
#define MI_REPORT_PERF_COUNT_GGTT (1<<0)
-#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 0)
#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0)
#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0)
#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
@@ -1163,10 +1184,12 @@ enum skl_disp_power_wells {
#define _PORT_PLL_EBB_0_A 0x162034
#define _PORT_PLL_EBB_0_B 0x6C034
#define _PORT_PLL_EBB_0_C 0x6C340
-#define PORT_PLL_P1_MASK (0x07 << 13)
-#define PORT_PLL_P1(x) ((x) << 13)
-#define PORT_PLL_P2_MASK (0x1f << 8)
-#define PORT_PLL_P2(x) ((x) << 8)
+#define PORT_PLL_P1_SHIFT 13
+#define PORT_PLL_P1_MASK (0x07 << PORT_PLL_P1_SHIFT)
+#define PORT_PLL_P1(x) ((x) << PORT_PLL_P1_SHIFT)
+#define PORT_PLL_P2_SHIFT 8
+#define PORT_PLL_P2_MASK (0x1f << PORT_PLL_P2_SHIFT)
+#define PORT_PLL_P2(x) ((x) << PORT_PLL_P2_SHIFT)
#define BXT_PORT_PLL_EBB_0(port) _PORT3(port, _PORT_PLL_EBB_0_A, \
_PORT_PLL_EBB_0_B, \
_PORT_PLL_EBB_0_C)
@@ -1186,8 +1209,9 @@ enum skl_disp_power_wells {
/* PORT_PLL_0_A */
#define PORT_PLL_M2_MASK 0xFF
/* PORT_PLL_1_A */
-#define PORT_PLL_N_MASK (0x0F << 8)
-#define PORT_PLL_N(x) ((x) << 8)
+#define PORT_PLL_N_SHIFT 8
+#define PORT_PLL_N_MASK (0x0F << PORT_PLL_N_SHIFT)
+#define PORT_PLL_N(x) ((x) << PORT_PLL_N_SHIFT)
/* PORT_PLL_2_A */
#define PORT_PLL_M2_FRAC_MASK 0x3FFFFF
/* PORT_PLL_3_A */
@@ -1201,9 +1225,11 @@ enum skl_disp_power_wells {
/* PORT_PLL_8_A */
#define PORT_PLL_TARGET_CNT_MASK 0x3FF
/* PORT_PLL_9_A */
-#define PORT_PLL_LOCK_THRESHOLD_MASK 0xe
+#define PORT_PLL_LOCK_THRESHOLD_SHIFT 1
+#define PORT_PLL_LOCK_THRESHOLD_MASK (0x7 << PORT_PLL_LOCK_THRESHOLD_SHIFT)
/* PORT_PLL_10_A */
#define PORT_PLL_DCO_AMP_OVR_EN_H (1<<27)
+#define PORT_PLL_DCO_AMP_DEFAULT 15
#define PORT_PLL_DCO_AMP_MASK 0x3c00
#define PORT_PLL_DCO_AMP(x) (x<<10)
#define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \
@@ -1377,6 +1403,18 @@ enum skl_disp_power_wells {
_PORT_TX_DW14_LN0_C) + \
_BXT_LANE_OFFSET(lane))
+/* UAIMI scratch pad register 1 */
+#define UAIMI_SPR1 0x4F074
+/* SKL VccIO mask */
+#define SKL_VCCIO_MASK 0x1
+/* SKL balance leg register */
+#define DISPIO_CR_TX_BMU_CR0 0x6C00C
+/* I_boost values */
+#define BALANCE_LEG_SHIFT(port) (8+3*(port))
+#define BALANCE_LEG_MASK(port) (7<<(8+3*(port)))
+/* Balance leg disable bits */
+#define BALANCE_LEG_DISABLE_SHIFT 23
+
/*
* Fence registers
*/
@@ -1456,6 +1494,9 @@ enum skl_disp_power_wells {
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
+#define RING_RESET_CTL(base) ((base)+0xd0)
+#define RESET_CTL_REQUEST_RESET (1 << 0)
+#define RESET_CTL_READY_TO_RESET (1 << 1)
#define HSW_GTT_CACHE_EN 0x4024
#define GTT_CACHE_EN_ALL 0xF0007FFF
@@ -1946,6 +1987,9 @@ enum skl_disp_power_wells {
#define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */
#define FBC_TAG 0x03300
+#define FBC_STATUS2 0x43214
+#define FBC_COMPRESSION_MASK 0x7ff
+
#define FBC_LL_SIZE (1536)
/* Framebuffer compression for GM45+ */
@@ -2116,7 +2160,7 @@ enum skl_disp_power_wells {
#define DPLL_DVO_2X_MODE (1 << 30)
#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
-#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
+#define DPLL_REF_CLK_ENABLE_VLV (1 << 29)
#define DPLL_VGA_MODE_DIS (1 << 28)
#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
@@ -2130,8 +2174,8 @@ enum skl_disp_power_wells {
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
#define DPLL_LOCK_VLV (1<<15)
#define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14)
-#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
-#define DPLL_SSC_REF_CLOCK_CHV (1<<13)
+#define DPLL_INTEGRATED_REF_CLK_VLV (1<<13)
+#define DPLL_SSC_REF_CLK_CHV (1<<13)
#define DPLL_PORTC_READY_MASK (0xf << 4)
#define DPLL_PORTB_READY_MASK (0xf)
@@ -2488,6 +2532,9 @@ enum skl_disp_power_wells {
#define CLKCFG_MEM_800 (3 << 4)
#define CLKCFG_MEM_MASK (7 << 4)
+#define HPLLVCO (MCHBAR_MIRROR_BASE + 0xc38)
+#define HPLLVCO_MOBILE (MCHBAR_MIRROR_BASE + 0xc0f)
+
#define TSC1 0x11001
#define TSE (1<<0)
#define TR1 0x11006
@@ -2718,8 +2765,10 @@ enum skl_disp_power_wells {
#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948)
+#define BXT_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x7070)
#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998)
+#define BXT_RP_STATE_CAP 0x138170
#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
@@ -2767,7 +2816,8 @@ enum skl_disp_power_wells {
* valid. Now, docs explain in dwords what is in the context object. The full
* size is 70720 bytes, however, the power context and execlist context will
* never be saved (power context is stored elsewhere, and execlists don't work
- * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
+ * on HSW) - so the final size, including the extra state required for the
+ * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
*/
#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
/* Same as Haswell, but 72064 bytes now. */
@@ -4398,9 +4448,32 @@ enum skl_disp_power_wells {
#define DSPARB_BSTART_SHIFT 0
#define DSPARB_BEND_SHIFT 9 /* on 855 */
#define DSPARB_AEND_SHIFT 0
-
+#define DSPARB_SPRITEA_SHIFT_VLV 0
+#define DSPARB_SPRITEA_MASK_VLV (0xff << 0)
+#define DSPARB_SPRITEB_SHIFT_VLV 8
+#define DSPARB_SPRITEB_MASK_VLV (0xff << 8)
+#define DSPARB_SPRITEC_SHIFT_VLV 16
+#define DSPARB_SPRITEC_MASK_VLV (0xff << 16)
+#define DSPARB_SPRITED_SHIFT_VLV 24
+#define DSPARB_SPRITED_MASK_VLV (0xff << 24)
#define DSPARB2 (VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
+#define DSPARB_SPRITEA_HI_SHIFT_VLV 0
+#define DSPARB_SPRITEA_HI_MASK_VLV (0x1 << 0)
+#define DSPARB_SPRITEB_HI_SHIFT_VLV 4
+#define DSPARB_SPRITEB_HI_MASK_VLV (0x1 << 4)
+#define DSPARB_SPRITEC_HI_SHIFT_VLV 8
+#define DSPARB_SPRITEC_HI_MASK_VLV (0x1 << 8)
+#define DSPARB_SPRITED_HI_SHIFT_VLV 12
+#define DSPARB_SPRITED_HI_MASK_VLV (0x1 << 12)
+#define DSPARB_SPRITEE_HI_SHIFT_VLV 16
+#define DSPARB_SPRITEE_HI_MASK_VLV (0x1 << 16)
+#define DSPARB_SPRITEF_HI_SHIFT_VLV 20
+#define DSPARB_SPRITEF_HI_MASK_VLV (0x1 << 20)
#define DSPARB3 (VLV_DISPLAY_BASE + 0x7006c) /* chv */
+#define DSPARB_SPRITEE_SHIFT_VLV 0
+#define DSPARB_SPRITEE_MASK_VLV (0xff << 0)
+#define DSPARB_SPRITEF_SHIFT_VLV 8
+#define DSPARB_SPRITEF_MASK_VLV (0xff << 8)
/* pnv/gen4/g4x/vlv/chv */
#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034)
@@ -5754,6 +5827,13 @@ enum skl_disp_power_wells {
#define HSW_NDE_RSTWRN_OPT 0x46408
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
+#define SKL_DFSM 0x51000
+#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
+
#define FF_SLICE_CS_CHICKEN2 0x20e4
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
@@ -5791,6 +5871,7 @@ enum skl_disp_power_wells {
#define GEN8_L3SQCREG4 0xb118
#define GEN8_LQSC_RO_PERF_DIS (1<<27)
+#define GEN8_LQSC_FLUSH_COHERENT_LINES (1<<21)
/* GEN8 chicken */
#define HDC_CHICKEN0 0x7300
@@ -5868,6 +5949,7 @@ enum skl_disp_power_wells {
#define SDE_AUXC_CPT (1 << 26)
#define SDE_AUXB_CPT (1 << 25)
#define SDE_AUX_MASK_CPT (7 << 25)
+#define SDE_PORTE_HOTPLUG_SPT (1 << 25)
#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
@@ -5878,6 +5960,10 @@ enum skl_disp_power_wells {
SDE_PORTD_HOTPLUG_CPT | \
SDE_PORTC_HOTPLUG_CPT | \
SDE_PORTB_HOTPLUG_CPT)
+#define SDE_HOTPLUG_MASK_SPT (SDE_PORTE_HOTPLUG_SPT | \
+ SDE_PORTD_HOTPLUG_CPT | \
+ SDE_PORTC_HOTPLUG_CPT | \
+ SDE_PORTB_HOTPLUG_CPT)
#define SDE_GMBUS_CPT (1 << 17)
#define SDE_ERROR_CPT (1 << 16)
#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
@@ -5913,6 +5999,11 @@ enum skl_disp_power_wells {
/* digital port hotplug */
#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
+#define BXT_PORTA_HOTPLUG_ENABLE (1 << 28)
+#define BXT_PORTA_HOTPLUG_STATUS_MASK (0x3 << 24)
+#define BXT_PORTA_HOTPLUG_NO_DETECT (0 << 24)
+#define BXT_PORTA_HOTPLUG_SHORT_DETECT (1 << 24)
+#define BXT_PORTA_HOTPLUG_LONG_DETECT (2 << 24)
#define PORTD_HOTPLUG_ENABLE (1 << 20)
#define PORTD_PULSE_DURATION_2ms (0)
#define PORTD_PULSE_DURATION_4_5ms (1 << 18)
@@ -5944,6 +6035,13 @@ enum skl_disp_power_wells {
#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
+#define PCH_PORT_HOTPLUG2 0xc403C /* SHOTPLUG_CTL2 */
+#define PORTE_HOTPLUG_ENABLE (1 << 4)
+#define PORTE_HOTPLUG_STATUS_MASK (0x3 << 0)
+#define PORTE_HOTPLUG_NO_DETECT (0 << 0)
+#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0)
+#define PORTE_HOTPLUG_LONG_DETECT (2 << 0)
+
#define PCH_GPIOA 0xc5010
#define PCH_GPIOB 0xc5014
#define PCH_GPIOC 0xc5018
@@ -6047,6 +6145,9 @@ enum skl_disp_power_wells {
#define _VIDEO_DIP_CTL_A 0xe0200
#define _VIDEO_DIP_DATA_A 0xe0208
#define _VIDEO_DIP_GCP_A 0xe0210
+#define GCP_COLOR_INDICATION (1 << 2)
+#define GCP_DEFAULT_PHASE_ENABLE (1 << 1)
+#define GCP_AV_MUTE (1 << 0)
#define _VIDEO_DIP_CTL_B 0xe1200
#define _VIDEO_DIP_DATA_B 0xe1208
@@ -6186,6 +6287,7 @@ enum skl_disp_power_wells {
#define _TRANSA_CHICKEN1 0xf0060
#define _TRANSB_CHICKEN1 0xf1060
#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
+#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1<<10)
#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
#define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064
@@ -6370,6 +6472,8 @@ enum skl_disp_power_wells {
#define PCH_PP_CONTROL 0xc7204
#define PANEL_UNLOCK_REGS (0xabcd << 16)
#define PANEL_UNLOCK_MASK (0xffff << 16)
+#define BXT_POWER_CYCLE_DELAY_MASK (0x1f0)
+#define BXT_POWER_CYCLE_DELAY_SHIFT 4
#define EDP_FORCE_VDD (1 << 3)
#define EDP_BLC_ENABLE (1 << 2)
#define PANEL_POWER_RESET (1 << 1)
@@ -6398,6 +6502,17 @@ enum skl_disp_power_wells {
#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f)
#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
+/* BXT PPS changes - 2nd set of PPS registers */
+#define _BXT_PP_STATUS2 0xc7300
+#define _BXT_PP_CONTROL2 0xc7304
+#define _BXT_PP_ON_DELAYS2 0xc7308
+#define _BXT_PP_OFF_DELAYS2 0xc730c
+
+#define BXT_PP_STATUS(n) ((!n) ? PCH_PP_STATUS : _BXT_PP_STATUS2)
+#define BXT_PP_CONTROL(n) ((!n) ? PCH_PP_CONTROL : _BXT_PP_CONTROL2)
+#define BXT_PP_ON_DELAYS(n) ((!n) ? PCH_PP_ON_DELAYS : _BXT_PP_ON_DELAYS2)
+#define BXT_PP_OFF_DELAYS(n) ((!n) ? PCH_PP_OFF_DELAYS : _BXT_PP_OFF_DELAYS2)
+
#define PCH_DP_B 0xe4100
#define PCH_DPB_AUX_CH_CTL 0xe4110
#define PCH_DPB_AUX_CH_DATA1 0xe4114
@@ -6698,6 +6813,7 @@ enum skl_disp_power_wells {
#define GEN6_PCODE_READ_RC6VIDS 0x5
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
+#define BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ 0x18
#define GEN9_PCODE_READ_MEM_LATENCY 0x6
#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
@@ -6756,6 +6872,9 @@ enum skl_disp_power_wells {
#define GEN7_MISCCPCTL (0x9424)
#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
+#define GEN8_GARBCNTL 0xB004
+#define GEN9_GAPS_TSV_CREDIT_DISABLE (1<<7)
+
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */
#define HSW_L3CDERRST11 0xB208 /* L3CD Error Status register 1 slice 1 */
@@ -7163,6 +7282,7 @@ enum skl_disp_power_wells {
#define LCPLL_CLK_FREQ_337_5_BDW (2<<26)
#define LCPLL_CLK_FREQ_675_BDW (3<<26)
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
+#define LCPLL_ROOT_CD_CLOCK_DISABLE (1<<24)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
#define LCPLL_POWER_DOWN_ALLOW (1<<22)
#define LCPLL_CD_SOURCE_FCLK (1<<21)
@@ -7265,12 +7385,6 @@ enum skl_disp_power_wells {
#define DC_STATE_EN 0x45504
#define DC_STATE_EN_UPTO_DC5 (1<<0)
#define DC_STATE_EN_DC9 (1<<3)
-
-/*
-* SKL DC
-*/
-#define DC_STATE_EN 0x45504
-#define DC_STATE_EN_UPTO_DC5 (1<<0)
#define DC_STATE_EN_UPTO_DC6 (2<<0)
#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
@@ -7822,4 +7936,13 @@ enum skl_disp_power_wells {
#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
+/* MOCS (Memory Object Control State) registers */
+#define GEN9_LNCFCMOCS0 0xb020 /* L3 Cache Control base */
+
+#define GEN9_GFX_MOCS_0 0xc800 /* Graphics MOCS base register*/
+#define GEN9_MFX0_MOCS_0 0xc900 /* Media 0 MOCS base register*/
+#define GEN9_MFX1_MOCS_0 0xca00 /* Media 1 MOCS base register*/
+#define GEN9_VEBOX_MOCS_0 0xcb00 /* Video MOCS base register*/
+#define GEN9_BLT_MOCS_0 0xcc00 /* Blitter MOCS base register*/
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index cf67f82f7b7f..1ccac618468e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -92,7 +92,7 @@ static void i915_restore_display(struct drm_device *dev)
}
/* only restore FBC info on the platform that supports FBC*/
- intel_fbc_disable(dev);
+ intel_fbc_disable(dev_priv);
/* restore FBC interval */
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 247626885f49..55bd04c6b939 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -64,24 +64,16 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
goto out;
}
- units = 0;
- div = 1000000ULL;
-
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev) && czcount_30ns == 1) {
/* Special case for 320Mhz */
- if (czcount_30ns == 1) {
- div = 10000000ULL;
- units = 3125ULL;
- } else {
- /* chv counts are one less */
- czcount_30ns += 1;
- }
+ div = 10000000ULL;
+ units = 3125ULL;
+ } else {
+ czcount_30ns += 1;
+ div = 1000000ULL;
+ units = DIV_ROUND_UP_ULL(30ULL * bias, czcount_30ns);
}
- if (units == 0)
- units = DIV_ROUND_UP_ULL(30ULL * bias,
- (u64)czcount_30ns);
-
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
units <<= 8;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 497cba5deb1e..2f34c47bd4bf 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -424,10 +424,10 @@ TRACE_EVENT(i915_gem_evict_vm,
);
TRACE_EVENT(i915_gem_ring_sync_to,
- TP_PROTO(struct intel_engine_cs *from,
- struct intel_engine_cs *to,
+ TP_PROTO(struct drm_i915_gem_request *to_req,
+ struct intel_engine_cs *from,
struct drm_i915_gem_request *req),
- TP_ARGS(from, to, req),
+ TP_ARGS(to_req, from, req),
TP_STRUCT__entry(
__field(u32, dev)
@@ -439,7 +439,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
TP_fast_assign(
__entry->dev = from->dev->primary->index;
__entry->sync_from = from->id;
- __entry->sync_to = to->id;
+ __entry->sync_to = to_req->ring->id;
__entry->seqno = i915_gem_request_get_seqno(req);
),
@@ -475,8 +475,8 @@ TRACE_EVENT(i915_gem_ring_dispatch,
);
TRACE_EVENT(i915_gem_ring_flush,
- TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush),
- TP_ARGS(ring, invalidate, flush),
+ TP_PROTO(struct drm_i915_gem_request *req, u32 invalidate, u32 flush),
+ TP_ARGS(req, invalidate, flush),
TP_STRUCT__entry(
__field(u32, dev)
@@ -486,8 +486,8 @@ TRACE_EVENT(i915_gem_ring_flush,
),
TP_fast_assign(
- __entry->dev = ring->dev->primary->index;
- __entry->ring = ring->id;
+ __entry->dev = req->ring->dev->primary->index;
+ __entry->ring = req->ring->id;
__entry->invalidate = invalidate;
__entry->flush = flush;
),
@@ -727,7 +727,7 @@ DECLARE_EVENT_CLASS(i915_context,
TP_fast_assign(
__entry->ctx = ctx;
__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
- __entry->dev = ctx->file_priv->dev_priv->dev->primary->index;
+ __entry->dev = ctx->i915->dev->primary->index;
),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 7ed8033aae60..e2531cf59266 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -35,162 +35,6 @@
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
-
-/**
- * intel_atomic_check - validate state object
- * @dev: drm device
- * @state: state to validate
- */
-int intel_atomic_check(struct drm_device *dev,
- struct drm_atomic_state *state)
-{
- int nplanes = dev->mode_config.num_total_plane;
- int ncrtcs = dev->mode_config.num_crtc;
- int nconnectors = dev->mode_config.num_connector;
- enum pipe nuclear_pipe = INVALID_PIPE;
- struct intel_crtc *nuclear_crtc = NULL;
- struct intel_crtc_state *crtc_state = NULL;
- int ret;
- int i;
- bool not_nuclear = false;
-
- /*
- * FIXME: At the moment, we only support "nuclear pageflip" on a
- * single CRTC. Cross-crtc updates will be added later.
- */
- for (i = 0; i < nplanes; i++) {
- struct intel_plane *plane = to_intel_plane(state->planes[i]);
- if (!plane)
- continue;
-
- if (nuclear_pipe == INVALID_PIPE) {
- nuclear_pipe = plane->pipe;
- } else if (nuclear_pipe != plane->pipe) {
- DRM_DEBUG_KMS("i915 only support atomic plane operations on a single CRTC at the moment\n");
- return -EINVAL;
- }
- }
-
- /*
- * FIXME: We only handle planes for now; make sure there are no CRTC's
- * or connectors involved.
- */
- state->allow_modeset = false;
- for (i = 0; i < ncrtcs; i++) {
- struct intel_crtc *crtc = to_intel_crtc(state->crtcs[i]);
- if (crtc)
- memset(&crtc->atomic, 0, sizeof(crtc->atomic));
- if (crtc && crtc->pipe != nuclear_pipe)
- not_nuclear = true;
- if (crtc && crtc->pipe == nuclear_pipe) {
- nuclear_crtc = crtc;
- crtc_state = to_intel_crtc_state(state->crtc_states[i]);
- }
- }
- for (i = 0; i < nconnectors; i++)
- if (state->connectors[i] != NULL)
- not_nuclear = true;
-
- if (not_nuclear) {
- DRM_DEBUG_KMS("i915 only supports atomic plane operations at the moment\n");
- return -EINVAL;
- }
-
- ret = drm_atomic_helper_check_planes(dev, state);
- if (ret)
- return ret;
-
- /* FIXME: move to crtc atomic check function once it is ready */
- ret = intel_atomic_setup_scalers(dev, nuclear_crtc, crtc_state);
- if (ret)
- return ret;
-
- return ret;
-}
-
-
-/**
- * intel_atomic_commit - commit validated state object
- * @dev: DRM device
- * @state: the top-level driver state object
- * @async: asynchronous commit
- *
- * This function commits a top-level state object that has been validated
- * with drm_atomic_helper_check().
- *
- * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
- * we can only handle plane-related operations and do not yet support
- * asynchronous commit.
- *
- * RETURNS
- * Zero for success or -errno.
- */
-int intel_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool async)
-{
- int ret;
- int i;
-
- if (async) {
- DRM_DEBUG_KMS("i915 does not yet support async commit\n");
- return -EINVAL;
- }
-
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret)
- return ret;
-
- /* Point of no return */
-
- /*
- * FIXME: The proper sequence here will eventually be:
- *
- * drm_atomic_helper_swap_state(dev, state)
- * drm_atomic_helper_commit_modeset_disables(dev, state);
- * drm_atomic_helper_commit_planes(dev, state);
- * drm_atomic_helper_commit_modeset_enables(dev, state);
- * drm_atomic_helper_wait_for_vblanks(dev, state);
- * drm_atomic_helper_cleanup_planes(dev, state);
- * drm_atomic_state_free(state);
- *
- * once we have full atomic modeset. For now, just manually update
- * plane states to avoid clobbering good states with dummy states
- * while nuclear pageflipping.
- */
- for (i = 0; i < dev->mode_config.num_total_plane; i++) {
- struct drm_plane *plane = state->planes[i];
-
- if (!plane)
- continue;
-
- plane->state->state = state;
- swap(state->plane_states[i], plane->state);
- plane->state->state = NULL;
- }
-
- /* swap crtc_scaler_state */
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
- if (!crtc) {
- continue;
- }
-
- to_intel_crtc(crtc)->config->scaler_state =
- to_intel_crtc_state(state->crtc_states[i])->scaler_state;
-
- if (INTEL_INFO(dev)->gen >= 9)
- skl_detach_scalers(to_intel_crtc(crtc));
- }
-
- drm_atomic_helper_commit_planes(dev, state);
- drm_atomic_helper_wait_for_vblanks(dev, state);
- drm_atomic_helper_cleanup_planes(dev, state);
- drm_atomic_state_free(state);
-
- return 0;
-}
-
/**
* intel_connector_atomic_get_property - fetch connector property value
* @connector: connector to fetch property for
@@ -298,17 +142,12 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
struct drm_plane *plane = NULL;
struct intel_plane *intel_plane;
struct intel_plane_state *plane_state = NULL;
- struct intel_crtc_scaler_state *scaler_state;
- struct drm_atomic_state *drm_state;
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ struct drm_atomic_state *drm_state = crtc_state->base.state;
int num_scalers_need;
int i, j;
- if (INTEL_INFO(dev)->gen < 9 || !intel_crtc || !crtc_state)
- return 0;
-
- scaler_state = &crtc_state->scaler_state;
- drm_state = crtc_state->base.state;
-
num_scalers_need = hweight32(scaler_state->scaler_users);
DRM_DEBUG_KMS("crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n",
crtc_state, num_scalers_need, intel_crtc->num_scalers,
@@ -336,17 +175,21 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
/* walkthrough scaler_users bits and start assigning scalers */
for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
int *scaler_id;
+ const char *name;
+ int idx;
/* skip if scaler not required */
if (!(scaler_state->scaler_users & (1 << i)))
continue;
if (i == SKL_CRTC_INDEX) {
+ name = "CRTC";
+ idx = intel_crtc->base.base.id;
+
/* panel fitter case: assign as a crtc scaler */
scaler_id = &scaler_state->scaler_id;
} else {
- if (!drm_state)
- continue;
+ name = "PLANE";
/* plane scaler case: assign as a plane scaler */
/* find the plane that set the bit as scaler_user */
@@ -365,9 +208,19 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
plane->base.id);
return PTR_ERR(state);
}
+
+ /*
+ * the plane is added after plane checks are run,
+ * but since this plane is unchanged just do the
+ * minimum required validation.
+ */
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ intel_crtc->atomic.wait_for_flips = true;
+ crtc_state->base.planes_changed = true;
}
intel_plane = to_intel_plane(plane);
+ idx = plane->base.id;
/* plane on different crtc cannot be a scaler user of this crtc */
if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
@@ -383,23 +236,16 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
for (j = 0; j < intel_crtc->num_scalers; j++) {
if (!scaler_state->scalers[j].in_use) {
scaler_state->scalers[j].in_use = 1;
- *scaler_id = scaler_state->scalers[j].id;
+ *scaler_id = j;
DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
- intel_crtc->pipe,
- i == SKL_CRTC_INDEX ? scaler_state->scaler_id :
- plane_state->scaler_id,
- i == SKL_CRTC_INDEX ? "CRTC" : "PLANE",
- i == SKL_CRTC_INDEX ? intel_crtc->base.base.id :
- plane->base.id);
+ intel_crtc->pipe, *scaler_id, name, idx);
break;
}
}
}
if (WARN_ON(*scaler_id < 0)) {
- DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n",
- i == SKL_CRTC_INDEX ? "CRTC" : "PLANE",
- i == SKL_CRTC_INDEX ? intel_crtc->base.base.id:plane->base.id);
+ DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
continue;
}
@@ -421,3 +267,54 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
return 0;
}
+
+static void
+intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll_config *shared_dpll)
+{
+ enum intel_dpll_id i;
+
+ /* Copy shared dpll state */
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+ shared_dpll[i] = pll->config;
+ }
+}
+
+struct intel_shared_dpll_config *
+intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(s);
+
+ WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
+
+ if (!state->dpll_set) {
+ state->dpll_set = true;
+
+ intel_atomic_duplicate_dpll_state(to_i915(s->dev),
+ state->shared_dpll);
+ }
+
+ return state->shared_dpll;
+}
+
+struct drm_atomic_state *
+intel_atomic_state_alloc(struct drm_device *dev)
+{
+ struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
+ kfree(state);
+ return NULL;
+ }
+
+ return &state->base;
+}
+
+void intel_atomic_state_clear(struct drm_atomic_state *s)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(s);
+ drm_atomic_state_default_clear(&state->base);
+ state->dpll_set = false;
+}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 86ba4b2c3a65..f1ab8e4b9c11 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -56,6 +56,7 @@ intel_create_plane_state(struct drm_plane *plane)
state->base.plane = plane;
state->base.rotation = BIT(DRM_ROTATE_0);
+ state->ckey.flags = I915_SET_COLORKEY_NONE;
return state;
}
@@ -114,8 +115,10 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
struct intel_crtc_state *crtc_state;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_plane_state *intel_state = to_intel_plane_state(state);
+ struct drm_crtc_state *drm_crtc_state;
+ int ret;
- crtc = crtc ? crtc : plane->crtc;
+ crtc = crtc ? crtc : plane->state->crtc;
intel_crtc = to_intel_crtc(crtc);
/*
@@ -127,16 +130,11 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
if (!crtc)
return 0;
- /* FIXME: temporary hack necessary while we still use the plane update
- * helper. */
- if (state->state) {
- crtc_state =
- intel_atomic_get_crtc_state(state->state, intel_crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- } else {
- crtc_state = intel_crtc->config;
- }
+ drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ if (WARN_ON(!drm_crtc_state))
+ return -EINVAL;
+
+ crtc_state = to_intel_crtc_state(drm_crtc_state);
/*
* The original src/dest coordinates are stored in state->base, but
@@ -160,20 +158,6 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
intel_state->clip.y2 =
crtc_state->base.active ? crtc_state->pipe_src_h : 0;
- /*
- * Disabling a plane is always okay; we just need to update
- * fb tracking in a special way since cleanup_fb() won't
- * get called by the plane helpers.
- */
- if (state->fb == NULL && plane->state->fb != NULL) {
- /*
- * 'prepare' is never called when plane is being disabled, so
- * we need to handle frontbuffer tracking as a special case
- */
- intel_crtc->atomic.disabled_planes |=
- (1 << drm_plane_index(plane));
- }
-
if (state->fb && intel_rotation_90_or_270(state->rotation)) {
if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
@@ -198,7 +182,12 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
}
}
- return intel_plane->check_plane(plane, intel_state);
+ intel_state->visible = false;
+ ret = intel_plane->check_plane(plane, crtc_state, intel_state);
+ if (ret)
+ return ret;
+
+ return intel_plane_atomic_calc_changes(&crtc_state->base, state);
}
static void intel_plane_atomic_update(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 3da9b8409f20..89c1a8ce1f98 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -41,7 +41,8 @@
*
* The disable sequences must be performed before disabling the transcoder or
* port. The enable sequences may only be performed after enabling the
- * transcoder and port, and after completed link training.
+ * transcoder and port, and after completed link training. Therefore the audio
+ * enable/disable sequences are part of the modeset sequence.
*
* The codec and controller sequences could be done either parallel or serial,
* but generally the ELDV/PD change in the codec sequence indicates to the audio
@@ -399,6 +400,9 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_audio_component *acomp = dev_priv->audio_component;
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ enum port port = intel_dig_port->port;
connector = drm_select_eld(encoder, mode);
if (!connector)
@@ -419,6 +423,9 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
if (dev_priv->display.audio_codec_enable)
dev_priv->display.audio_codec_enable(connector, intel_encoder, mode);
+
+ if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
}
/**
@@ -428,13 +435,20 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
* The disable sequences must be performed before disabling the transcoder or
* port.
*/
-void intel_audio_codec_disable(struct intel_encoder *encoder)
+void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_audio_component *acomp = dev_priv->audio_component;
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ enum port port = intel_dig_port->port;
if (dev_priv->display.audio_codec_disable)
- dev_priv->display.audio_codec_disable(encoder);
+ dev_priv->display.audio_codec_disable(intel_encoder);
+
+ if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
}
/**
@@ -525,12 +539,16 @@ static int i915_audio_component_bind(struct device *i915_dev,
struct device *hda_dev, void *data)
{
struct i915_audio_component *acomp = data;
+ struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
if (WARN_ON(acomp->ops || acomp->dev))
return -EEXIST;
+ drm_modeset_lock_all(dev_priv->dev);
acomp->ops = &i915_audio_component_ops;
acomp->dev = i915_dev;
+ dev_priv->audio_component = acomp;
+ drm_modeset_unlock_all(dev_priv->dev);
return 0;
}
@@ -539,9 +557,13 @@ static void i915_audio_component_unbind(struct device *i915_dev,
struct device *hda_dev, void *data)
{
struct i915_audio_component *acomp = data;
+ struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
+ drm_modeset_lock_all(dev_priv->dev);
acomp->ops = NULL;
acomp->dev = NULL;
+ dev_priv->audio_component = NULL;
+ drm_modeset_unlock_all(dev_priv->dev);
}
static const struct component_ops i915_audio_component_bind_ops = {
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 198fc3c3291b..b3e437b3bb54 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -122,42 +122,6 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
drm_mode_set_name(panel_fixed_mode);
}
-static bool
-lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a,
- const struct lvds_dvo_timing *b)
-{
- if (a->hactive_hi != b->hactive_hi ||
- a->hactive_lo != b->hactive_lo)
- return false;
-
- if (a->hsync_off_hi != b->hsync_off_hi ||
- a->hsync_off_lo != b->hsync_off_lo)
- return false;
-
- if (a->hsync_pulse_width != b->hsync_pulse_width)
- return false;
-
- if (a->hblank_hi != b->hblank_hi ||
- a->hblank_lo != b->hblank_lo)
- return false;
-
- if (a->vactive_hi != b->vactive_hi ||
- a->vactive_lo != b->vactive_lo)
- return false;
-
- if (a->vsync_off != b->vsync_off)
- return false;
-
- if (a->vsync_pulse_width != b->vsync_pulse_width)
- return false;
-
- if (a->vblank_hi != b->vblank_hi ||
- a->vblank_lo != b->vblank_lo)
- return false;
-
- return true;
-}
-
static const struct lvds_dvo_timing *
get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
@@ -213,7 +177,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
const struct lvds_dvo_timing *panel_dvo_timing;
const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
- int i, downclock, drrs_mode;
+ int drrs_mode;
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
if (!lvds_options)
@@ -272,30 +236,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
- /*
- * Iterate over the LVDS panel timing info to find the lowest clock
- * for the native resolution.
- */
- downclock = panel_dvo_timing->clock;
- for (i = 0; i < 16; i++) {
- const struct lvds_dvo_timing *dvo_timing;
-
- dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
- lvds_lfp_data_ptrs,
- i);
- if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) &&
- dvo_timing->clock < downclock)
- downclock = dvo_timing->clock;
- }
-
- if (downclock < panel_dvo_timing->clock && i915.lvds_downclock) {
- dev_priv->lvds_downclock_avail = 1;
- dev_priv->lvds_downclock = downclock * 10;
- DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
- "Normal Clock %dKHz, downclock %dKHz\n",
- panel_fixed_mode->clock, 10*downclock);
- }
-
fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
lvds_lfp_data_ptrs,
lvds_options->panel_type);
@@ -461,7 +401,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
{
struct sdvo_device_mapping *p_mapping;
const struct bdb_general_definitions *p_defs;
- const union child_device_config *p_child;
+ const struct old_child_dev_config *child; /* legacy */
int i, child_device_num, count;
u16 block_size;
@@ -470,14 +410,14 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
return;
}
- /* judge whether the size of child device meets the requirements.
- * If the child device size obtained from general definition block
- * is different with sizeof(struct child_device_config), skip the
- * parsing of sdvo device info
+
+ /*
+ * Only parse SDVO mappings when the general definitions block child
+ * device size matches that of the *legacy* child device config
+ * struct. Thus, SDVO mapping will be skipped for newer VBT.
*/
- if (p_defs->child_dev_size != sizeof(*p_child)) {
- /* different child dev size . Ignore it */
- DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ if (p_defs->child_dev_size != sizeof(*child)) {
+ DRM_DEBUG_KMS("Unsupported child device size for SDVO mapping.\n");
return;
}
/* get the block size of general definitions */
@@ -487,37 +427,37 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_defs->child_dev_size;
count = 0;
for (i = 0; i < child_device_num; i++) {
- p_child = child_device_ptr(p_defs, i);
- if (!p_child->old.device_type) {
+ child = &child_device_ptr(p_defs, i)->old;
+ if (!child->device_type) {
/* skip the device block if device type is invalid */
continue;
}
- if (p_child->old.slave_addr != SLAVE_ADDR1 &&
- p_child->old.slave_addr != SLAVE_ADDR2) {
+ if (child->slave_addr != SLAVE_ADDR1 &&
+ child->slave_addr != SLAVE_ADDR2) {
/*
* If the slave address is neither 0x70 nor 0x72,
* it is not a SDVO device. Skip it.
*/
continue;
}
- if (p_child->old.dvo_port != DEVICE_PORT_DVOB &&
- p_child->old.dvo_port != DEVICE_PORT_DVOC) {
+ if (child->dvo_port != DEVICE_PORT_DVOB &&
+ child->dvo_port != DEVICE_PORT_DVOC) {
/* skip the incorrect SDVO port */
DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
continue;
}
DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
- " %s port\n",
- p_child->old.slave_addr,
- (p_child->old.dvo_port == DEVICE_PORT_DVOB) ?
- "SDVOB" : "SDVOC");
- p_mapping = &(dev_priv->sdvo_mappings[p_child->old.dvo_port - 1]);
+ " %s port\n",
+ child->slave_addr,
+ (child->dvo_port == DEVICE_PORT_DVOB) ?
+ "SDVOB" : "SDVOC");
+ p_mapping = &(dev_priv->sdvo_mappings[child->dvo_port - 1]);
if (!p_mapping->initialized) {
- p_mapping->dvo_port = p_child->old.dvo_port;
- p_mapping->slave_addr = p_child->old.slave_addr;
- p_mapping->dvo_wiring = p_child->old.dvo_wiring;
- p_mapping->ddc_pin = p_child->old.ddc_pin;
- p_mapping->i2c_pin = p_child->old.i2c_pin;
+ p_mapping->dvo_port = child->dvo_port;
+ p_mapping->slave_addr = child->slave_addr;
+ p_mapping->dvo_wiring = child->dvo_wiring;
+ p_mapping->ddc_pin = child->ddc_pin;
+ p_mapping->i2c_pin = child->i2c_pin;
p_mapping->initialized = 1;
DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
p_mapping->dvo_port,
@@ -529,7 +469,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
}
- if (p_child->old.slave2_addr) {
+ if (child->slave2_addr) {
/* Maybe this is a SDVO device with multiple inputs */
/* And the mapping info is not added */
DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
@@ -946,6 +886,17 @@ err:
memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
}
+static u8 translate_iboost(u8 val)
+{
+ static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */
+
+ if (val >= ARRAY_SIZE(mapping)) {
+ DRM_DEBUG_KMS("Unsupported I_boost value found in VBT (%d), display may not work properly\n", val);
+ return 0;
+ }
+ return mapping[val];
+}
+
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
const struct bdb_header *bdb)
{
@@ -954,23 +905,23 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
uint8_t hdmi_level_shift;
int i, j;
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
- uint8_t aux_channel;
+ uint8_t aux_channel, ddc_pin;
/* Each DDI port can have more than one value on the "DVO Port" field,
* so look for all the possible values for each port and abort if more
* than one is found. */
- int dvo_ports[][2] = {
- {DVO_PORT_HDMIA, DVO_PORT_DPA},
- {DVO_PORT_HDMIB, DVO_PORT_DPB},
- {DVO_PORT_HDMIC, DVO_PORT_DPC},
- {DVO_PORT_HDMID, DVO_PORT_DPD},
- {DVO_PORT_CRT, -1 /* Port E can only be DVO_PORT_CRT */ },
+ int dvo_ports[][3] = {
+ {DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
+ {DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
+ {DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
+ {DVO_PORT_HDMID, DVO_PORT_DPD, -1},
+ {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
};
/* Find the child device to use, abort if more than one found. */
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
it = dev_priv->vbt.child_dev + i;
- for (j = 0; j < 2; j++) {
+ for (j = 0; j < 3; j++) {
if (dvo_ports[port][j] == -1)
break;
@@ -988,6 +939,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
return;
aux_channel = child->raw[25];
+ ddc_pin = child->common.ddc_pin;
is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
@@ -1019,22 +971,53 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
if (is_dvi) {
- if (child->common.ddc_pin == 0x05 && port != PORT_B)
+ if (port == PORT_E) {
+ info->alternate_ddc_pin = ddc_pin;
+ /* if DDIE share ddc pin with other port, then
+ * dvi/hdmi couldn't exist on the shared port.
+ * Otherwise they share the same ddc bin and system
+ * couldn't communicate with them seperately. */
+ if (ddc_pin == DDC_PIN_B) {
+ dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0;
+ dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0;
+ } else if (ddc_pin == DDC_PIN_C) {
+ dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0;
+ dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0;
+ } else if (ddc_pin == DDC_PIN_D) {
+ dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0;
+ dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0;
+ }
+ } else if (ddc_pin == DDC_PIN_B && port != PORT_B)
DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
- if (child->common.ddc_pin == 0x04 && port != PORT_C)
+ else if (ddc_pin == DDC_PIN_C && port != PORT_C)
DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
- if (child->common.ddc_pin == 0x06 && port != PORT_D)
+ else if (ddc_pin == DDC_PIN_D && port != PORT_D)
DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
}
if (is_dp) {
- if (aux_channel == 0x40 && port != PORT_A)
+ if (port == PORT_E) {
+ info->alternate_aux_channel = aux_channel;
+ /* if DDIE share aux channel with other port, then
+ * DP couldn't exist on the shared port. Otherwise
+ * they share the same aux channel and system
+ * couldn't communicate with them seperately. */
+ if (aux_channel == DP_AUX_A)
+ dev_priv->vbt.ddi_port_info[PORT_A].supports_dp = 0;
+ else if (aux_channel == DP_AUX_B)
+ dev_priv->vbt.ddi_port_info[PORT_B].supports_dp = 0;
+ else if (aux_channel == DP_AUX_C)
+ dev_priv->vbt.ddi_port_info[PORT_C].supports_dp = 0;
+ else if (aux_channel == DP_AUX_D)
+ dev_priv->vbt.ddi_port_info[PORT_D].supports_dp = 0;
+ }
+ else if (aux_channel == DP_AUX_A && port != PORT_A)
DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
- if (aux_channel == 0x10 && port != PORT_B)
+ else if (aux_channel == DP_AUX_B && port != PORT_B)
DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
- if (aux_channel == 0x20 && port != PORT_C)
+ else if (aux_channel == DP_AUX_C && port != PORT_C)
DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
- if (aux_channel == 0x30 && port != PORT_D)
+ else if (aux_channel == DP_AUX_D && port != PORT_D)
DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
}
@@ -1046,6 +1029,16 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
hdmi_level_shift);
info->hdmi_level_shift = hdmi_level_shift;
}
+
+ /* Parse the I_boost config for SKL and above */
+ if (bdb->version >= 196 && (child->common.flags_1 & IBOOST_ENABLE)) {
+ info->dp_boost_level = translate_iboost(child->common.iboost_level & 0xF);
+ DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
+ port_name(port), info->dp_boost_level);
+ info->hdmi_boost_level = translate_iboost(child->common.iboost_level >> 4);
+ DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n",
+ port_name(port), info->hdmi_boost_level);
+ }
}
static void parse_ddi_ports(struct drm_i915_private *dev_priv,
@@ -1075,17 +1068,39 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
const union child_device_config *p_child;
union child_device_config *child_dev_ptr;
int i, child_device_num, count;
- u16 block_size;
+ u8 expected_size;
+ u16 block_size;
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) {
DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return;
}
- if (p_defs->child_dev_size < sizeof(*p_child)) {
- DRM_ERROR("General definiton block child device size is too small.\n");
+ if (bdb->version < 195) {
+ expected_size = sizeof(struct old_child_dev_config);
+ } else if (bdb->version == 195) {
+ expected_size = 37;
+ } else if (bdb->version <= 197) {
+ expected_size = 38;
+ } else {
+ expected_size = 38;
+ BUILD_BUG_ON(sizeof(*p_child) < 38);
+ DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
+ bdb->version, expected_size);
+ }
+
+ /* The legacy sized child device config is the minimum we need. */
+ if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
+ DRM_ERROR("Child device config size %u is too small.\n",
+ p_defs->child_dev_size);
return;
}
+
+ /* Flag an error for unexpected size, but continue anyway. */
+ if (p_defs->child_dev_size != expected_size)
+ DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
+ p_defs->child_dev_size, expected_size, bdb->version);
+
/* get the block size of general definitions */
block_size = get_blocksize(p_defs);
/* get the number of child device */
@@ -1130,7 +1145,14 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
child_dev_ptr = dev_priv->vbt.child_dev + count;
count++;
- memcpy(child_dev_ptr, p_child, sizeof(*p_child));
+
+ /*
+ * Copy as much as we know (sizeof) and is available
+ * (child_dev_size) of the child device. Accessing the data must
+ * depend on VBT version.
+ */
+ memcpy(child_dev_ptr, p_child,
+ min_t(size_t, p_defs->child_dev_size, sizeof(*p_child)));
}
return;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index af0b47652752..46cd5c7ebacd 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -203,9 +203,11 @@ struct bdb_general_features {
#define DEVICE_PORT_DVOB 0x01
#define DEVICE_PORT_DVOC 0x02
-/* We used to keep this struct but without any version control. We should avoid
+/*
+ * We used to keep this struct but without any version control. We should avoid
* using it in the future, but it should be safe to keep using it in the old
- * code. */
+ * code. Do not change; we rely on its size.
+ */
struct old_child_dev_config {
u16 handle;
u16 device_type;
@@ -231,6 +233,10 @@ struct old_child_dev_config {
/* This one contains field offsets that are known to be common for all BDB
* versions. Notice that the meaning of the contents contents may still change,
* but at least the offsets are consistent. */
+
+/* Definitions for flags_1 */
+#define IBOOST_ENABLE (1<<3)
+
struct common_child_dev_config {
u16 handle;
u16 device_type;
@@ -239,8 +245,13 @@ struct common_child_dev_config {
u8 not_common2[2];
u8 ddc_pin;
u16 edid_ptr;
+ u8 obsolete;
+ u8 flags_1;
+ u8 not_common3[13];
+ u8 iboost_level;
} __packed;
+
/* This field changes depending on the BDB version, so the most reliable way to
* read it is by checking the BDB version and reading the raw pointer. */
union child_device_config {
@@ -747,11 +758,6 @@ int intel_parse_bios(struct drm_device *dev);
#define DVO_C 2
#define DVO_D 3
-/* define the PORT for DP output type */
-#define PORT_IDPB 7
-#define PORT_IDPC 8
-#define PORT_IDPD 9
-
/* Possible values for the "DVO Port" field for versions >= 155: */
#define DVO_PORT_HDMIA 0
#define DVO_PORT_HDMIB 1
@@ -764,6 +770,8 @@ int intel_parse_bios(struct drm_device *dev);
#define DVO_PORT_DPC 8
#define DVO_PORT_DPD 9
#define DVO_PORT_DPA 10
+#define DVO_PORT_DPE 11
+#define DVO_PORT_HDMIE 12
#define DVO_PORT_MIPIA 21
#define DVO_PORT_MIPIB 22
#define DVO_PORT_MIPIC 23
@@ -778,6 +786,13 @@ int intel_parse_bios(struct drm_device *dev);
#define MIPI_DSI_UNDEFINED_PANEL_ID 0
#define MIPI_DSI_GENERIC_PANEL_ID 1
+/*
+ * PMIC vs SoC Backlight support specified in pwm_blc
+ * field in mipi_config block below.
+*/
+#define PPS_BLC_PMIC 0
+#define PPS_BLC_SOC 1
+
struct mipi_config {
u16 panel_id;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 521af2c069cb..af5e43bef4a4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -236,53 +236,6 @@ static void intel_enable_crt(struct intel_encoder *encoder)
intel_crt_set_dpms(encoder, crt->connector->base.dpms);
}
-/* Special dpms function to support cloning between dvo/sdvo/crt. */
-static void intel_crt_dpms(struct drm_connector *connector, int mode)
-{
- struct drm_device *dev = connector->dev;
- struct intel_encoder *encoder = intel_attached_encoder(connector);
- struct drm_crtc *crtc;
- int old_dpms;
-
- /* PCH platforms and VLV only support on/off. */
- if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
-
- if (mode == connector->dpms)
- return;
-
- old_dpms = connector->dpms;
- connector->dpms = mode;
-
- /* Only need to change hw state when actually enabled */
- crtc = encoder->base.crtc;
- if (!crtc) {
- encoder->connectors_active = false;
- return;
- }
-
- /* We need the pipe to run for anything but OFF. */
- if (mode == DRM_MODE_DPMS_OFF)
- encoder->connectors_active = false;
- else
- encoder->connectors_active = true;
-
- /* We call connector dpms manually below in case pipe dpms doesn't
- * change due to cloning. */
- if (mode < old_dpms) {
- /* From off to on, enable the pipe first. */
- intel_crtc_update_dpms(crtc);
-
- intel_crt_set_dpms(encoder, mode);
- } else {
- intel_crt_set_dpms(encoder, mode);
-
- intel_crtc_update_dpms(crtc);
- }
-
- intel_modeset_check_state(connector->dev);
-}
-
static enum drm_mode_status
intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -798,7 +751,7 @@ static void intel_crt_reset(struct drm_connector *connector)
static const struct drm_connector_funcs intel_crt_connector_funcs = {
.reset = intel_crt_reset,
- .dpms = intel_crt_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_crt_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_crt_destroy,
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index bcb41e61877d..d0f1b8d833cd 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -244,7 +244,7 @@ void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
void intel_csr_load_program(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- __be32 *payload = dev_priv->csr.dmc_payload;
+ u32 *payload = dev_priv->csr.dmc_payload;
uint32_t i, fw_size;
if (!IS_GEN9(dev)) {
@@ -256,7 +256,7 @@ void intel_csr_load_program(struct drm_device *dev)
fw_size = dev_priv->csr.dmc_fw_size;
for (i = 0; i < fw_size; i++)
I915_WRITE(CSR_PROGRAM_BASE + i * 4,
- (u32 __force)payload[i]);
+ payload[i]);
for (i = 0; i < dev_priv->csr.mmio_count; i++) {
I915_WRITE(dev_priv->csr.mmioaddr[i],
@@ -279,7 +279,7 @@ static void finish_csr_load(const struct firmware *fw, void *context)
char substepping = intel_get_substepping(dev);
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
uint32_t i;
- __be32 *dmc_payload;
+ uint32_t *dmc_payload;
bool fw_loaded = false;
if (!fw) {
@@ -350,7 +350,7 @@ static void finish_csr_load(const struct firmware *fw, void *context)
}
csr->mmio_count = dmc_header->mmio_count;
for (i = 0; i < dmc_header->mmio_count; i++) {
- if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE &&
+ if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
DRM_ERROR(" Firmware has wrong mmio address 0x%x\n",
dmc_header->mmioaddr[i]);
@@ -375,20 +375,13 @@ static void finish_csr_load(const struct firmware *fw, void *context)
}
dmc_payload = csr->dmc_payload;
- for (i = 0; i < dmc_header->fw_size; i++) {
- uint32_t *tmp = (u32 *)&fw->data[readcount + i * 4];
- /*
- * The firmware payload is an array of 32 bit words stored in
- * little-endian format in the firmware image and programmed
- * as 32 bit big-endian format to memory.
- */
- dmc_payload[i] = cpu_to_be32(*tmp);
- }
+ memcpy(dmc_payload, &fw->data[readcount], nbytes);
/* load csr program during system boot, as needed for DC states */
intel_csr_load_program(dev);
fw_loaded = true;
+ DRM_DEBUG_KMS("Finished loading %s\n", dev_priv->csr.fw_path);
out:
if (fw_loaded)
intel_runtime_pm_put(dev_priv);
@@ -422,6 +415,8 @@ void intel_csr_ucode_init(struct drm_device *dev)
return;
}
+ DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
+
/*
* Obtain a runtime pm reference, until CSR is loaded,
* to avoid entering runtime-suspend.
@@ -459,7 +454,8 @@ void intel_csr_ucode_fini(struct drm_device *dev)
void assert_csr_loaded(struct drm_i915_private *dev_priv)
{
- WARN((intel_csr_load_status_get(dev_priv) != FW_LOADED), "CSR is not loaded.\n");
+ WARN(intel_csr_load_status_get(dev_priv) != FW_LOADED,
+ "CSR is not loaded.\n");
WARN(!I915_READ(CSR_PROGRAM_BASE),
"CSR program storage start is NULL\n");
WARN(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index cacb07b7a8f1..61575f67a626 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -31,6 +31,7 @@
struct ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
u32 trans2; /* vref sel, vswing */
+ u8 i_boost; /* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */
};
/* HDMI/DVI modes ignore everything but the last 2 items. So we share
@@ -38,134 +39,213 @@ struct ddi_buf_trans {
* automatically adapt to HDMI connections as well
*/
static const struct ddi_buf_trans hsw_ddi_translations_dp[] = {
- { 0x00FFFFFF, 0x0006000E },
- { 0x00D75FFF, 0x0005000A },
- { 0x00C30FFF, 0x00040006 },
- { 0x80AAAFFF, 0x000B0000 },
- { 0x00FFFFFF, 0x0005000A },
- { 0x00D75FFF, 0x000C0004 },
- { 0x80C30FFF, 0x000B0000 },
- { 0x00FFFFFF, 0x00040006 },
- { 0x80D75FFF, 0x000B0000 },
+ { 0x00FFFFFF, 0x0006000E, 0x0 },
+ { 0x00D75FFF, 0x0005000A, 0x0 },
+ { 0x00C30FFF, 0x00040006, 0x0 },
+ { 0x80AAAFFF, 0x000B0000, 0x0 },
+ { 0x00FFFFFF, 0x0005000A, 0x0 },
+ { 0x00D75FFF, 0x000C0004, 0x0 },
+ { 0x80C30FFF, 0x000B0000, 0x0 },
+ { 0x00FFFFFF, 0x00040006, 0x0 },
+ { 0x80D75FFF, 0x000B0000, 0x0 },
};
static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = {
- { 0x00FFFFFF, 0x0007000E },
- { 0x00D75FFF, 0x000F000A },
- { 0x00C30FFF, 0x00060006 },
- { 0x00AAAFFF, 0x001E0000 },
- { 0x00FFFFFF, 0x000F000A },
- { 0x00D75FFF, 0x00160004 },
- { 0x00C30FFF, 0x001E0000 },
- { 0x00FFFFFF, 0x00060006 },
- { 0x00D75FFF, 0x001E0000 },
+ { 0x00FFFFFF, 0x0007000E, 0x0 },
+ { 0x00D75FFF, 0x000F000A, 0x0 },
+ { 0x00C30FFF, 0x00060006, 0x0 },
+ { 0x00AAAFFF, 0x001E0000, 0x0 },
+ { 0x00FFFFFF, 0x000F000A, 0x0 },
+ { 0x00D75FFF, 0x00160004, 0x0 },
+ { 0x00C30FFF, 0x001E0000, 0x0 },
+ { 0x00FFFFFF, 0x00060006, 0x0 },
+ { 0x00D75FFF, 0x001E0000, 0x0 },
};
static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = {
/* Idx NT mV d T mV d db */
- { 0x00FFFFFF, 0x0006000E }, /* 0: 400 400 0 */
- { 0x00E79FFF, 0x000E000C }, /* 1: 400 500 2 */
- { 0x00D75FFF, 0x0005000A }, /* 2: 400 600 3.5 */
- { 0x00FFFFFF, 0x0005000A }, /* 3: 600 600 0 */
- { 0x00E79FFF, 0x001D0007 }, /* 4: 600 750 2 */
- { 0x00D75FFF, 0x000C0004 }, /* 5: 600 900 3.5 */
- { 0x00FFFFFF, 0x00040006 }, /* 6: 800 800 0 */
- { 0x80E79FFF, 0x00030002 }, /* 7: 800 1000 2 */
- { 0x00FFFFFF, 0x00140005 }, /* 8: 850 850 0 */
- { 0x00FFFFFF, 0x000C0004 }, /* 9: 900 900 0 */
- { 0x00FFFFFF, 0x001C0003 }, /* 10: 950 950 0 */
- { 0x80FFFFFF, 0x00030002 }, /* 11: 1000 1000 0 */
+ { 0x00FFFFFF, 0x0006000E, 0x0 },/* 0: 400 400 0 */
+ { 0x00E79FFF, 0x000E000C, 0x0 },/* 1: 400 500 2 */
+ { 0x00D75FFF, 0x0005000A, 0x0 },/* 2: 400 600 3.5 */
+ { 0x00FFFFFF, 0x0005000A, 0x0 },/* 3: 600 600 0 */
+ { 0x00E79FFF, 0x001D0007, 0x0 },/* 4: 600 750 2 */
+ { 0x00D75FFF, 0x000C0004, 0x0 },/* 5: 600 900 3.5 */
+ { 0x00FFFFFF, 0x00040006, 0x0 },/* 6: 800 800 0 */
+ { 0x80E79FFF, 0x00030002, 0x0 },/* 7: 800 1000 2 */
+ { 0x00FFFFFF, 0x00140005, 0x0 },/* 8: 850 850 0 */
+ { 0x00FFFFFF, 0x000C0004, 0x0 },/* 9: 900 900 0 */
+ { 0x00FFFFFF, 0x001C0003, 0x0 },/* 10: 950 950 0 */
+ { 0x80FFFFFF, 0x00030002, 0x0 },/* 11: 1000 1000 0 */
};
static const struct ddi_buf_trans bdw_ddi_translations_edp[] = {
- { 0x00FFFFFF, 0x00000012 },
- { 0x00EBAFFF, 0x00020011 },
- { 0x00C71FFF, 0x0006000F },
- { 0x00AAAFFF, 0x000E000A },
- { 0x00FFFFFF, 0x00020011 },
- { 0x00DB6FFF, 0x0005000F },
- { 0x00BEEFFF, 0x000A000C },
- { 0x00FFFFFF, 0x0005000F },
- { 0x00DB6FFF, 0x000A000C },
+ { 0x00FFFFFF, 0x00000012, 0x0 },
+ { 0x00EBAFFF, 0x00020011, 0x0 },
+ { 0x00C71FFF, 0x0006000F, 0x0 },
+ { 0x00AAAFFF, 0x000E000A, 0x0 },
+ { 0x00FFFFFF, 0x00020011, 0x0 },
+ { 0x00DB6FFF, 0x0005000F, 0x0 },
+ { 0x00BEEFFF, 0x000A000C, 0x0 },
+ { 0x00FFFFFF, 0x0005000F, 0x0 },
+ { 0x00DB6FFF, 0x000A000C, 0x0 },
};
static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
- { 0x00FFFFFF, 0x0007000E },
- { 0x00D75FFF, 0x000E000A },
- { 0x00BEFFFF, 0x00140006 },
- { 0x80B2CFFF, 0x001B0002 },
- { 0x00FFFFFF, 0x000E000A },
- { 0x00DB6FFF, 0x00160005 },
- { 0x80C71FFF, 0x001A0002 },
- { 0x00F7DFFF, 0x00180004 },
- { 0x80D75FFF, 0x001B0002 },
+ { 0x00FFFFFF, 0x0007000E, 0x0 },
+ { 0x00D75FFF, 0x000E000A, 0x0 },
+ { 0x00BEFFFF, 0x00140006, 0x0 },
+ { 0x80B2CFFF, 0x001B0002, 0x0 },
+ { 0x00FFFFFF, 0x000E000A, 0x0 },
+ { 0x00DB6FFF, 0x00160005, 0x0 },
+ { 0x80C71FFF, 0x001A0002, 0x0 },
+ { 0x00F7DFFF, 0x00180004, 0x0 },
+ { 0x80D75FFF, 0x001B0002, 0x0 },
};
static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = {
- { 0x00FFFFFF, 0x0001000E },
- { 0x00D75FFF, 0x0004000A },
- { 0x00C30FFF, 0x00070006 },
- { 0x00AAAFFF, 0x000C0000 },
- { 0x00FFFFFF, 0x0004000A },
- { 0x00D75FFF, 0x00090004 },
- { 0x00C30FFF, 0x000C0000 },
- { 0x00FFFFFF, 0x00070006 },
- { 0x00D75FFF, 0x000C0000 },
+ { 0x00FFFFFF, 0x0001000E, 0x0 },
+ { 0x00D75FFF, 0x0004000A, 0x0 },
+ { 0x00C30FFF, 0x00070006, 0x0 },
+ { 0x00AAAFFF, 0x000C0000, 0x0 },
+ { 0x00FFFFFF, 0x0004000A, 0x0 },
+ { 0x00D75FFF, 0x00090004, 0x0 },
+ { 0x00C30FFF, 0x000C0000, 0x0 },
+ { 0x00FFFFFF, 0x00070006, 0x0 },
+ { 0x00D75FFF, 0x000C0000, 0x0 },
};
static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
/* Idx NT mV d T mV df db */
- { 0x00FFFFFF, 0x0007000E }, /* 0: 400 400 0 */
- { 0x00D75FFF, 0x000E000A }, /* 1: 400 600 3.5 */
- { 0x00BEFFFF, 0x00140006 }, /* 2: 400 800 6 */
- { 0x00FFFFFF, 0x0009000D }, /* 3: 450 450 0 */
- { 0x00FFFFFF, 0x000E000A }, /* 4: 600 600 0 */
- { 0x00D7FFFF, 0x00140006 }, /* 5: 600 800 2.5 */
- { 0x80CB2FFF, 0x001B0002 }, /* 6: 600 1000 4.5 */
- { 0x00FFFFFF, 0x00140006 }, /* 7: 800 800 0 */
- { 0x80E79FFF, 0x001B0002 }, /* 8: 800 1000 2 */
- { 0x80FFFFFF, 0x001B0002 }, /* 9: 1000 1000 0 */
+ { 0x00FFFFFF, 0x0007000E, 0x0 },/* 0: 400 400 0 */
+ { 0x00D75FFF, 0x000E000A, 0x0 },/* 1: 400 600 3.5 */
+ { 0x00BEFFFF, 0x00140006, 0x0 },/* 2: 400 800 6 */
+ { 0x00FFFFFF, 0x0009000D, 0x0 },/* 3: 450 450 0 */
+ { 0x00FFFFFF, 0x000E000A, 0x0 },/* 4: 600 600 0 */
+ { 0x00D7FFFF, 0x00140006, 0x0 },/* 5: 600 800 2.5 */
+ { 0x80CB2FFF, 0x001B0002, 0x0 },/* 6: 600 1000 4.5 */
+ { 0x00FFFFFF, 0x00140006, 0x0 },/* 7: 800 800 0 */
+ { 0x80E79FFF, 0x001B0002, 0x0 },/* 8: 800 1000 2 */
+ { 0x80FFFFFF, 0x001B0002, 0x0 },/* 9: 1000 1000 0 */
};
+/* Skylake H and S */
static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
- { 0x00000018, 0x000000a2 },
- { 0x00004014, 0x0000009B },
- { 0x00006012, 0x00000088 },
- { 0x00008010, 0x00000087 },
- { 0x00000018, 0x0000009B },
- { 0x00004014, 0x00000088 },
- { 0x00006012, 0x00000087 },
- { 0x00000018, 0x00000088 },
- { 0x00004014, 0x00000087 },
+ { 0x00002016, 0x000000A0, 0x0 },
+ { 0x00005012, 0x0000009B, 0x0 },
+ { 0x00007011, 0x00000088, 0x0 },
+ { 0x00009010, 0x000000C7, 0x0 },
+ { 0x00002016, 0x0000009B, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x00007011, 0x000000C7, 0x0 },
+ { 0x00002016, 0x000000DF, 0x0 },
+ { 0x00005012, 0x000000C7, 0x0 },
};
-/* eDP 1.4 low vswing translation parameters */
+/* Skylake U */
+static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
+ { 0x0000201B, 0x000000A2, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x00007011, 0x00000087, 0x0 },
+ { 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost level 0x1 */
+ { 0x0000201B, 0x0000009D, 0x0 },
+ { 0x00005012, 0x000000C7, 0x0 },
+ { 0x00007011, 0x000000C7, 0x0 },
+ { 0x00002016, 0x00000088, 0x0 },
+ { 0x00005012, 0x000000C7, 0x0 },
+};
+
+/* Skylake Y */
+static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
+ { 0x00000018, 0x000000A2, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x00007011, 0x00000087, 0x0 },
+ { 0x80009010, 0x000000C7, 0x3 }, /* Uses I_boost level 0x3 */
+ { 0x00000018, 0x0000009D, 0x0 },
+ { 0x00005012, 0x000000C7, 0x0 },
+ { 0x00007011, 0x000000C7, 0x0 },
+ { 0x00000018, 0x00000088, 0x0 },
+ { 0x00005012, 0x000000C7, 0x0 },
+};
+
+/*
+ * Skylake H and S
+ * eDP 1.4 low vswing translation parameters
+ */
static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
- { 0x00000018, 0x000000a8 },
- { 0x00002016, 0x000000ab },
- { 0x00006012, 0x000000a2 },
- { 0x00008010, 0x00000088 },
- { 0x00000018, 0x000000ab },
- { 0x00004014, 0x000000a2 },
- { 0x00006012, 0x000000a6 },
- { 0x00000018, 0x000000a2 },
- { 0x00005013, 0x0000009c },
- { 0x00000018, 0x00000088 },
+ { 0x00000018, 0x000000A8, 0x0 },
+ { 0x00004013, 0x000000A9, 0x0 },
+ { 0x00007011, 0x000000A2, 0x0 },
+ { 0x00009010, 0x0000009C, 0x0 },
+ { 0x00000018, 0x000000A9, 0x0 },
+ { 0x00006013, 0x000000A2, 0x0 },
+ { 0x00007011, 0x000000A6, 0x0 },
+ { 0x00000018, 0x000000AB, 0x0 },
+ { 0x00007013, 0x0000009F, 0x0 },
+ { 0x00000018, 0x000000DF, 0x0 },
+};
+
+/*
+ * Skylake U
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
+ { 0x00000018, 0x000000A8, 0x0 },
+ { 0x00004013, 0x000000A9, 0x0 },
+ { 0x00007011, 0x000000A2, 0x0 },
+ { 0x00009010, 0x0000009C, 0x0 },
+ { 0x00000018, 0x000000A9, 0x0 },
+ { 0x00006013, 0x000000A2, 0x0 },
+ { 0x00007011, 0x000000A6, 0x0 },
+ { 0x00002016, 0x000000AB, 0x0 },
+ { 0x00005013, 0x0000009F, 0x0 },
+ { 0x00000018, 0x000000DF, 0x0 },
};
+/*
+ * Skylake Y
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
+ { 0x00000018, 0x000000A8, 0x0 },
+ { 0x00004013, 0x000000AB, 0x0 },
+ { 0x00007011, 0x000000A4, 0x0 },
+ { 0x00009010, 0x000000DF, 0x0 },
+ { 0x00000018, 0x000000AA, 0x0 },
+ { 0x00006013, 0x000000A4, 0x0 },
+ { 0x00007011, 0x0000009D, 0x0 },
+ { 0x00000018, 0x000000A0, 0x0 },
+ { 0x00006012, 0x000000DF, 0x0 },
+ { 0x00000018, 0x0000008A, 0x0 },
+};
+/* Skylake U, H and S */
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
- { 0x00000018, 0x000000ac },
- { 0x00005012, 0x0000009d },
- { 0x00007011, 0x00000088 },
- { 0x00000018, 0x000000a1 },
- { 0x00000018, 0x00000098 },
- { 0x00004013, 0x00000088 },
- { 0x00006012, 0x00000087 },
- { 0x00000018, 0x000000df },
- { 0x00003015, 0x00000087 },
- { 0x00003015, 0x000000c7 },
- { 0x00000018, 0x000000c7 },
+ { 0x00000018, 0x000000AC, 0x0 },
+ { 0x00005012, 0x0000009D, 0x0 },
+ { 0x00007011, 0x00000088, 0x0 },
+ { 0x00000018, 0x000000A1, 0x0 },
+ { 0x00000018, 0x00000098, 0x0 },
+ { 0x00004013, 0x00000088, 0x0 },
+ { 0x00006012, 0x00000087, 0x0 },
+ { 0x00000018, 0x000000DF, 0x0 },
+ { 0x00003015, 0x00000087, 0x0 }, /* Default */
+ { 0x00003015, 0x000000C7, 0x0 },
+ { 0x00000018, 0x000000C7, 0x0 },
+};
+
+/* Skylake Y */
+static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
+ { 0x00000018, 0x000000A1, 0x0 },
+ { 0x00005012, 0x000000DF, 0x0 },
+ { 0x00007011, 0x00000084, 0x0 },
+ { 0x00000018, 0x000000A4, 0x0 },
+ { 0x00000018, 0x0000009D, 0x0 },
+ { 0x00004013, 0x00000080, 0x0 },
+ { 0x00006013, 0x000000C7, 0x0 },
+ { 0x00000018, 0x0000008A, 0x0 },
+ { 0x00003015, 0x000000C7, 0x0 }, /* Default */
+ { 0x80003015, 0x000000C7, 0x7 }, /* Uses I_boost level 0x7 */
+ { 0x00000018, 0x000000C7, 0x0 },
};
struct bxt_ddi_buf_trans {
@@ -181,16 +261,16 @@ struct bxt_ddi_buf_trans {
*/
static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
/* Idx NT mV diff db */
- { 52, 0, 0, 128, true }, /* 0: 400 0 */
- { 78, 0, 0, 85, false }, /* 1: 400 3.5 */
- { 104, 0, 0, 64, false }, /* 2: 400 6 */
- { 154, 0, 0, 43, false }, /* 3: 400 9.5 */
- { 77, 0, 0, 128, false }, /* 4: 600 0 */
- { 116, 0, 0, 85, false }, /* 5: 600 3.5 */
- { 154, 0, 0, 64, false }, /* 6: 600 6 */
- { 102, 0, 0, 128, false }, /* 7: 800 0 */
- { 154, 0, 0, 85, false }, /* 8: 800 3.5 */
- { 154, 0x9A, 1, 128, false }, /* 9: 1200 0 */
+ { 52, 0x9A, 0, 128, true }, /* 0: 400 0 */
+ { 78, 0x9A, 0, 85, false }, /* 1: 400 3.5 */
+ { 104, 0x9A, 0, 64, false }, /* 2: 400 6 */
+ { 154, 0x9A, 0, 43, false }, /* 3: 400 9.5 */
+ { 77, 0x9A, 0, 128, false }, /* 4: 600 0 */
+ { 116, 0x9A, 0, 85, false }, /* 5: 600 3.5 */
+ { 154, 0x9A, 0, 64, false }, /* 6: 600 6 */
+ { 102, 0x9A, 0, 128, false }, /* 7: 800 0 */
+ { 154, 0x9A, 0, 85, false }, /* 8: 800 3.5 */
+ { 154, 0x9A, 1, 128, false }, /* 9: 1200 0 */
};
/* BSpec has 2 recommended values - entries 0 and 8.
@@ -198,18 +278,21 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
*/
static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
/* Idx NT mV diff db */
- { 52, 0, 0, 128, false }, /* 0: 400 0 */
- { 52, 0, 0, 85, false }, /* 1: 400 3.5 */
- { 52, 0, 0, 64, false }, /* 2: 400 6 */
- { 42, 0, 0, 43, false }, /* 3: 400 9.5 */
- { 77, 0, 0, 128, false }, /* 4: 600 0 */
- { 77, 0, 0, 85, false }, /* 5: 600 3.5 */
- { 77, 0, 0, 64, false }, /* 6: 600 6 */
- { 102, 0, 0, 128, false }, /* 7: 800 0 */
- { 102, 0, 0, 85, false }, /* 8: 800 3.5 */
+ { 52, 0x9A, 0, 128, false }, /* 0: 400 0 */
+ { 52, 0x9A, 0, 85, false }, /* 1: 400 3.5 */
+ { 52, 0x9A, 0, 64, false }, /* 2: 400 6 */
+ { 42, 0x9A, 0, 43, false }, /* 3: 400 9.5 */
+ { 77, 0x9A, 0, 128, false }, /* 4: 600 0 */
+ { 77, 0x9A, 0, 85, false }, /* 5: 600 3.5 */
+ { 77, 0x9A, 0, 64, false }, /* 6: 600 6 */
+ { 102, 0x9A, 0, 128, false }, /* 7: 800 0 */
+ { 102, 0x9A, 0, 85, false }, /* 8: 800 3.5 */
{ 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */
};
+static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
+ enum port port, int type);
+
static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
struct intel_digital_port **dig_port,
enum port *port)
@@ -249,6 +332,77 @@ intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
return intel_dig_port->hdmi.hdmi_reg;
}
+static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
+ int *n_entries)
+{
+ const struct ddi_buf_trans *ddi_translations;
+
+ if (IS_SKL_ULX(dev)) {
+ ddi_translations = skl_y_ddi_translations_dp;
+ *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
+ } else if (IS_SKL_ULT(dev)) {
+ ddi_translations = skl_u_ddi_translations_dp;
+ *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
+ } else {
+ ddi_translations = skl_ddi_translations_dp;
+ *n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+ }
+
+ return ddi_translations;
+}
+
+static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
+ int *n_entries)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct ddi_buf_trans *ddi_translations;
+
+ if (IS_SKL_ULX(dev)) {
+ if (dev_priv->edp_low_vswing) {
+ ddi_translations = skl_y_ddi_translations_edp;
+ *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
+ } else {
+ ddi_translations = skl_y_ddi_translations_dp;
+ *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
+ }
+ } else if (IS_SKL_ULT(dev)) {
+ if (dev_priv->edp_low_vswing) {
+ ddi_translations = skl_u_ddi_translations_edp;
+ *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
+ } else {
+ ddi_translations = skl_u_ddi_translations_dp;
+ *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
+ }
+ } else {
+ if (dev_priv->edp_low_vswing) {
+ ddi_translations = skl_ddi_translations_edp;
+ *n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
+ } else {
+ ddi_translations = skl_ddi_translations_dp;
+ *n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+ }
+ }
+
+ return ddi_translations;
+}
+
+static const struct ddi_buf_trans *
+skl_get_buf_trans_hdmi(struct drm_device *dev,
+ int *n_entries)
+{
+ const struct ddi_buf_trans *ddi_translations;
+
+ if (IS_SKL_ULX(dev)) {
+ ddi_translations = skl_y_ddi_translations_hdmi;
+ *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
+ } else {
+ ddi_translations = skl_ddi_translations_hdmi;
+ *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
+ }
+
+ return ddi_translations;
+}
+
/*
* Starting with Haswell, DDI port buffers must be programmed with correct
* values in advance. The buffer values are different for FDI and DP modes,
@@ -261,6 +415,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
+ u32 iboost_bit = 0;
int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
size;
int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
@@ -280,19 +435,17 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
return;
} else if (IS_SKYLAKE(dev)) {
ddi_translations_fdi = NULL;
- ddi_translations_dp = skl_ddi_translations_dp;
- n_dp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
- if (dev_priv->edp_low_vswing) {
- ddi_translations_edp = skl_ddi_translations_edp;
- n_edp_entries = ARRAY_SIZE(skl_ddi_translations_edp);
- } else {
- ddi_translations_edp = skl_ddi_translations_dp;
- n_edp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
- }
-
- ddi_translations_hdmi = skl_ddi_translations_hdmi;
- n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
- hdmi_default_entry = 7;
+ ddi_translations_dp =
+ skl_get_buf_trans_dp(dev, &n_dp_entries);
+ ddi_translations_edp =
+ skl_get_buf_trans_edp(dev, &n_edp_entries);
+ ddi_translations_hdmi =
+ skl_get_buf_trans_hdmi(dev, &n_hdmi_entries);
+ hdmi_default_entry = 8;
+ /* If we're boosting the current, set bit 31 of trans1 */
+ if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
+ dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+ iboost_bit = 1<<31;
} else if (IS_BROADWELL(dev)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
@@ -353,7 +506,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
}
for (i = 0, reg = DDI_BUF_TRANS(port); i < size; i++) {
- I915_WRITE(reg, ddi_translations[i].trans1);
+ I915_WRITE(reg, ddi_translations[i].trans1 | iboost_bit);
reg += 4;
I915_WRITE(reg, ddi_translations[i].trans2);
reg += 4;
@@ -368,7 +521,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
hdmi_level = hdmi_default_entry;
/* Entry 9 is for HDMI: */
- I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1);
+ I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
reg += 4;
I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans2);
reg += 4;
@@ -625,11 +778,11 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
(void) (&__a == &__b); \
__a > __b ? (__a - __b) : (__b - __a); })
-struct wrpll_rnp {
+struct hsw_wrpll_rnp {
unsigned p, n2, r2;
};
-static unsigned wrpll_get_budget_for_freq(int clock)
+static unsigned hsw_wrpll_get_budget_for_freq(int clock)
{
unsigned budget;
@@ -703,9 +856,9 @@ static unsigned wrpll_get_budget_for_freq(int clock)
return budget;
}
-static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
- unsigned r2, unsigned n2, unsigned p,
- struct wrpll_rnp *best)
+static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
+ unsigned r2, unsigned n2, unsigned p,
+ struct hsw_wrpll_rnp *best)
{
uint64_t a, b, c, d, diff, diff_best;
@@ -762,8 +915,7 @@ static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
/* Otherwise a < c && b >= d, do nothing */
}
-static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
- int reg)
+static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg)
{
int refclk = LC_FREQ;
int n, p, r;
@@ -856,6 +1008,26 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
return dco_freq / (p0 * p1 * p2 * 5);
}
+static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
+{
+ int dotclock;
+
+ if (pipe_config->has_pch_encoder)
+ dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->fdi_m_n);
+ else if (pipe_config->has_dp_encoder)
+ dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+ else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
+ dotclock = pipe_config->port_clock * 2 / 3;
+ else
+ dotclock = pipe_config->port_clock;
+
+ if (pipe_config->pixel_multiplier)
+ dotclock /= pipe_config->pixel_multiplier;
+
+ pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+}
static void skl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
@@ -902,12 +1074,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
pipe_config->port_clock = link_clock;
- if (pipe_config->has_dp_encoder)
- pipe_config->base.adjusted_mode.crtc_clock =
- intel_dotclock_calculate(pipe_config->port_clock,
- &pipe_config->dp_m_n);
- else
- pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ ddi_dotclock_get(pipe_config);
}
static void hsw_ddi_clock_get(struct intel_encoder *encoder,
@@ -929,10 +1096,10 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
link_clock = 270000;
break;
case PORT_CLK_SEL_WRPLL1:
- link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
+ link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
break;
case PORT_CLK_SEL_WRPLL2:
- link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
+ link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
break;
case PORT_CLK_SEL_SPLL:
pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
@@ -954,23 +1121,32 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
pipe_config->port_clock = link_clock * 2;
- if (pipe_config->has_pch_encoder)
- pipe_config->base.adjusted_mode.crtc_clock =
- intel_dotclock_calculate(pipe_config->port_clock,
- &pipe_config->fdi_m_n);
- else if (pipe_config->has_dp_encoder)
- pipe_config->base.adjusted_mode.crtc_clock =
- intel_dotclock_calculate(pipe_config->port_clock,
- &pipe_config->dp_m_n);
- else
- pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ ddi_dotclock_get(pipe_config);
}
static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
enum intel_dpll_id dpll)
{
- /* FIXME formula not available in bspec */
- return 0;
+ struct intel_shared_dpll *pll;
+ struct intel_dpll_hw_state *state;
+ intel_clock_t clock;
+
+ /* For DDI ports we always use a shared PLL. */
+ if (WARN_ON(dpll == DPLL_ID_PRIVATE))
+ return 0;
+
+ pll = &dev_priv->shared_dplls[dpll];
+ state = &pll->config.hw_state;
+
+ clock.m1 = 2;
+ clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22;
+ if (state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+ clock.m2 |= state->pll2 & PORT_PLL_M2_FRAC_MASK;
+ clock.n = (state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
+ clock.p1 = (state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
+ clock.p2 = (state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
+
+ return chv_calc_dpll_params(100000, &clock);
}
static void bxt_ddi_clock_get(struct intel_encoder *encoder,
@@ -980,16 +1156,9 @@ static void bxt_ddi_clock_get(struct intel_encoder *encoder,
enum port port = intel_ddi_get_encoder_port(encoder);
uint32_t dpll = port;
- pipe_config->port_clock =
- bxt_calc_pll_link(dev_priv, dpll);
+ pipe_config->port_clock = bxt_calc_pll_link(dev_priv, dpll);
- if (pipe_config->has_dp_encoder)
- pipe_config->base.adjusted_mode.crtc_clock =
- intel_dotclock_calculate(pipe_config->port_clock,
- &pipe_config->dp_m_n);
- else
- pipe_config->base.adjusted_mode.crtc_clock =
- pipe_config->port_clock;
+ ddi_dotclock_get(pipe_config);
}
void intel_ddi_clock_get(struct intel_encoder *encoder,
@@ -1011,12 +1180,12 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
{
uint64_t freq2k;
unsigned p, n2, r2;
- struct wrpll_rnp best = { 0, 0, 0 };
+ struct hsw_wrpll_rnp best = { 0, 0, 0 };
unsigned budget;
freq2k = clock / 100;
- budget = wrpll_get_budget_for_freq(clock);
+ budget = hsw_wrpll_get_budget_for_freq(clock);
/* Special case handling for 540 pixel clock: bypass WR PLL entirely
* and directly pass the LC PLL to it. */
@@ -1060,8 +1229,8 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
n2++) {
for (p = P_MIN; p <= P_MAX; p += P_INC)
- wrpll_update_rnp(freq2k, budget,
- r2, n2, p, &best);
+ hsw_wrpll_update_rnp(freq2k, budget,
+ r2, n2, p, &best);
}
}
@@ -1105,6 +1274,102 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
return true;
}
+struct skl_wrpll_context {
+ uint64_t min_deviation; /* current minimal deviation */
+ uint64_t central_freq; /* chosen central freq */
+ uint64_t dco_freq; /* chosen dco freq */
+ unsigned int p; /* chosen divider */
+};
+
+static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
+{
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->min_deviation = U64_MAX;
+}
+
+/* DCO freq must be within +1%/-6% of the DCO central freq */
+#define SKL_DCO_MAX_PDEVIATION 100
+#define SKL_DCO_MAX_NDEVIATION 600
+
+static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
+ uint64_t central_freq,
+ uint64_t dco_freq,
+ unsigned int divider)
+{
+ uint64_t deviation;
+
+ deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
+ central_freq);
+
+ /* positive deviation */
+ if (dco_freq >= central_freq) {
+ if (deviation < SKL_DCO_MAX_PDEVIATION &&
+ deviation < ctx->min_deviation) {
+ ctx->min_deviation = deviation;
+ ctx->central_freq = central_freq;
+ ctx->dco_freq = dco_freq;
+ ctx->p = divider;
+ }
+ /* negative deviation */
+ } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
+ deviation < ctx->min_deviation) {
+ ctx->min_deviation = deviation;
+ ctx->central_freq = central_freq;
+ ctx->dco_freq = dco_freq;
+ ctx->p = divider;
+ }
+}
+
+static void skl_wrpll_get_multipliers(unsigned int p,
+ unsigned int *p0 /* out */,
+ unsigned int *p1 /* out */,
+ unsigned int *p2 /* out */)
+{
+ /* even dividers */
+ if (p % 2 == 0) {
+ unsigned int half = p / 2;
+
+ if (half == 1 || half == 2 || half == 3 || half == 5) {
+ *p0 = 2;
+ *p1 = 1;
+ *p2 = half;
+ } else if (half % 2 == 0) {
+ *p0 = 2;
+ *p1 = half / 2;
+ *p2 = 2;
+ } else if (half % 3 == 0) {
+ *p0 = 3;
+ *p1 = half / 3;
+ *p2 = 2;
+ } else if (half % 7 == 0) {
+ *p0 = 7;
+ *p1 = half / 7;
+ *p2 = 2;
+ }
+ } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
+ *p0 = 3;
+ *p1 = 1;
+ *p2 = p / 3;
+ } else if (p == 5 || p == 7) {
+ *p0 = p;
+ *p1 = 1;
+ *p2 = 1;
+ } else if (p == 15) {
+ *p0 = 3;
+ *p1 = 1;
+ *p2 = 5;
+ } else if (p == 21) {
+ *p0 = 7;
+ *p1 = 1;
+ *p2 = 3;
+ } else if (p == 35) {
+ *p0 = 7;
+ *p1 = 1;
+ *p2 = 5;
+ }
+}
+
struct skl_wrpll_params {
uint32_t dco_fraction;
uint32_t dco_integer;
@@ -1115,150 +1380,145 @@ struct skl_wrpll_params {
uint32_t central_freq;
};
-static void
-skl_ddi_calculate_wrpll(int clock /* in Hz */,
- struct skl_wrpll_params *wrpll_params)
+static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
+ uint64_t afe_clock,
+ uint64_t central_freq,
+ uint32_t p0, uint32_t p1, uint32_t p2)
{
- uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
- uint64_t dco_central_freq[3] = {8400000000ULL,
- 9000000000ULL,
- 9600000000ULL};
- uint32_t min_dco_deviation = 400;
- uint32_t min_dco_index = 3;
- uint32_t P0[4] = {1, 2, 3, 7};
- uint32_t P2[4] = {1, 2, 3, 5};
- bool found = false;
- uint32_t candidate_p = 0;
- uint32_t candidate_p0[3] = {0}, candidate_p1[3] = {0};
- uint32_t candidate_p2[3] = {0};
- uint32_t dco_central_freq_deviation[3];
- uint32_t i, P1, k, dco_count;
- bool retry_with_odd = false;
uint64_t dco_freq;
- /* Determine P0, P1 or P2 */
- for (dco_count = 0; dco_count < 3; dco_count++) {
- found = false;
- candidate_p =
- div64_u64(dco_central_freq[dco_count], afe_clock);
- if (retry_with_odd == false)
- candidate_p = (candidate_p % 2 == 0 ?
- candidate_p : candidate_p + 1);
-
- for (P1 = 1; P1 < candidate_p; P1++) {
- for (i = 0; i < 4; i++) {
- if (!(P0[i] != 1 || P1 == 1))
- continue;
-
- for (k = 0; k < 4; k++) {
- if (P1 != 1 && P2[k] != 2)
- continue;
-
- if (candidate_p == P0[i] * P1 * P2[k]) {
- /* Found possible P0, P1, P2 */
- found = true;
- candidate_p0[dco_count] = P0[i];
- candidate_p1[dco_count] = P1;
- candidate_p2[dco_count] = P2[k];
- goto found;
- }
-
- }
- }
- }
+ switch (central_freq) {
+ case 9600000000ULL:
+ params->central_freq = 0;
+ break;
+ case 9000000000ULL:
+ params->central_freq = 1;
+ break;
+ case 8400000000ULL:
+ params->central_freq = 3;
+ }
-found:
- if (found) {
- dco_central_freq_deviation[dco_count] =
- div64_u64(10000 *
- abs_diff((candidate_p * afe_clock),
- dco_central_freq[dco_count]),
- dco_central_freq[dco_count]);
-
- if (dco_central_freq_deviation[dco_count] <
- min_dco_deviation) {
- min_dco_deviation =
- dco_central_freq_deviation[dco_count];
- min_dco_index = dco_count;
- }
- }
+ switch (p0) {
+ case 1:
+ params->pdiv = 0;
+ break;
+ case 2:
+ params->pdiv = 1;
+ break;
+ case 3:
+ params->pdiv = 2;
+ break;
+ case 7:
+ params->pdiv = 4;
+ break;
+ default:
+ WARN(1, "Incorrect PDiv\n");
+ }
- if (min_dco_index > 2 && dco_count == 2) {
- retry_with_odd = true;
- dco_count = 0;
- }
+ switch (p2) {
+ case 5:
+ params->kdiv = 0;
+ break;
+ case 2:
+ params->kdiv = 1;
+ break;
+ case 3:
+ params->kdiv = 2;
+ break;
+ case 1:
+ params->kdiv = 3;
+ break;
+ default:
+ WARN(1, "Incorrect KDiv\n");
}
- if (min_dco_index > 2) {
- WARN(1, "No valid values found for the given pixel clock\n");
- } else {
- wrpll_params->central_freq = dco_central_freq[min_dco_index];
+ params->qdiv_ratio = p1;
+ params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
- switch (dco_central_freq[min_dco_index]) {
- case 9600000000ULL:
- wrpll_params->central_freq = 0;
- break;
- case 9000000000ULL:
- wrpll_params->central_freq = 1;
- break;
- case 8400000000ULL:
- wrpll_params->central_freq = 3;
- }
+ dco_freq = p0 * p1 * p2 * afe_clock;
- switch (candidate_p0[min_dco_index]) {
- case 1:
- wrpll_params->pdiv = 0;
- break;
- case 2:
- wrpll_params->pdiv = 1;
- break;
- case 3:
- wrpll_params->pdiv = 2;
- break;
- case 7:
- wrpll_params->pdiv = 4;
- break;
- default:
- WARN(1, "Incorrect PDiv\n");
- }
+ /*
+ * Intermediate values are in Hz.
+ * Divide by MHz to match bsepc
+ */
+ params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
+ params->dco_fraction =
+ div_u64((div_u64(dco_freq, 24) -
+ params->dco_integer * MHz(1)) * 0x8000, MHz(1));
+}
- switch (candidate_p2[min_dco_index]) {
- case 5:
- wrpll_params->kdiv = 0;
- break;
- case 2:
- wrpll_params->kdiv = 1;
- break;
- case 3:
- wrpll_params->kdiv = 2;
- break;
- case 1:
- wrpll_params->kdiv = 3;
- break;
- default:
- WARN(1, "Incorrect KDiv\n");
+static bool
+skl_ddi_calculate_wrpll(int clock /* in Hz */,
+ struct skl_wrpll_params *wrpll_params)
+{
+ uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
+ uint64_t dco_central_freq[3] = {8400000000ULL,
+ 9000000000ULL,
+ 9600000000ULL};
+ static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
+ 24, 28, 30, 32, 36, 40, 42, 44,
+ 48, 52, 54, 56, 60, 64, 66, 68,
+ 70, 72, 76, 78, 80, 84, 88, 90,
+ 92, 96, 98 };
+ static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
+ static const struct {
+ const int *list;
+ int n_dividers;
+ } dividers[] = {
+ { even_dividers, ARRAY_SIZE(even_dividers) },
+ { odd_dividers, ARRAY_SIZE(odd_dividers) },
+ };
+ struct skl_wrpll_context ctx;
+ unsigned int dco, d, i;
+ unsigned int p0, p1, p2;
+
+ skl_wrpll_context_init(&ctx);
+
+ for (d = 0; d < ARRAY_SIZE(dividers); d++) {
+ for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
+ for (i = 0; i < dividers[d].n_dividers; i++) {
+ unsigned int p = dividers[d].list[i];
+ uint64_t dco_freq = p * afe_clock;
+
+ skl_wrpll_try_divider(&ctx,
+ dco_central_freq[dco],
+ dco_freq,
+ p);
+ /*
+ * Skip the remaining dividers if we're sure to
+ * have found the definitive divider, we can't
+ * improve a 0 deviation.
+ */
+ if (ctx.min_deviation == 0)
+ goto skip_remaining_dividers;
+ }
}
- wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
- wrpll_params->qdiv_mode =
- (wrpll_params->qdiv_ratio == 1) ? 0 : 1;
-
- dco_freq = candidate_p0[min_dco_index] *
- candidate_p1[min_dco_index] *
- candidate_p2[min_dco_index] * afe_clock;
-
+skip_remaining_dividers:
/*
- * Intermediate values are in Hz.
- * Divide by MHz to match bsepc
+ * If a solution is found with an even divider, prefer
+ * this one.
*/
- wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
- wrpll_params->dco_fraction =
- div_u64(((div_u64(dco_freq, 24) -
- wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
+ if (d == 0 && ctx.p)
+ break;
+ }
+ if (!ctx.p) {
+ DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
+ return false;
}
-}
+ /*
+ * gcc incorrectly analyses that these can be used without being
+ * initialized. To be fair, it's hard to guess.
+ */
+ p0 = p1 = p2 = 0;
+ skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
+ skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
+ p0, p1, p2);
+
+ return true;
+}
static bool
skl_ddi_pll_select(struct intel_crtc *intel_crtc,
@@ -1281,7 +1541,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
- skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params);
+ if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
+ return false;
cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
@@ -1293,17 +1554,14 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
wrpll_params.central_freq;
} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
- struct drm_encoder *encoder = &intel_encoder->base;
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- switch (intel_dp->link_bw) {
- case DP_LINK_BW_1_62:
+ switch (crtc_state->port_clock / 2) {
+ case 81000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
break;
- case DP_LINK_BW_2_7:
+ case 135000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
break;
- case DP_LINK_BW_5_4:
+ case 270000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
break;
}
@@ -1334,6 +1592,7 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
/* bxt clock parameters */
struct bxt_clk_div {
+ int clock;
uint32_t p1;
uint32_t p2;
uint32_t m2_int;
@@ -1343,14 +1602,14 @@ struct bxt_clk_div {
};
/* pre-calculated values for DP linkrates */
-static struct bxt_clk_div bxt_dp_clk_val[7] = {
- /* 162 */ {4, 2, 32, 1677722, 1, 1},
- /* 270 */ {4, 1, 27, 0, 0, 1},
- /* 540 */ {2, 1, 27, 0, 0, 1},
- /* 216 */ {3, 2, 32, 1677722, 1, 1},
- /* 243 */ {4, 1, 24, 1258291, 1, 1},
- /* 324 */ {4, 1, 32, 1677722, 1, 1},
- /* 432 */ {3, 1, 32, 1677722, 1, 1}
+static const struct bxt_clk_div bxt_dp_clk_val[] = {
+ {162000, 4, 2, 32, 1677722, 1, 1},
+ {270000, 4, 1, 27, 0, 0, 1},
+ {540000, 2, 1, 27, 0, 0, 1},
+ {216000, 3, 2, 32, 1677722, 1, 1},
+ {243000, 4, 1, 24, 1258291, 1, 1},
+ {324000, 4, 1, 32, 1677722, 1, 1},
+ {432000, 3, 1, 32, 1677722, 1, 1}
};
static bool
@@ -1363,7 +1622,7 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
struct bxt_clk_div clk_div = {0};
int vco = 0;
uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
- uint32_t dcoampovr_en_h, dco_amp, lanestagger;
+ uint32_t lanestagger;
if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
intel_clock_t best_clock;
@@ -1390,29 +1649,19 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
vco = best_clock.vco;
} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
intel_encoder->type == INTEL_OUTPUT_EDP) {
- struct drm_encoder *encoder = &intel_encoder->base;
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ int i;
- switch (intel_dp->link_bw) {
- case DP_LINK_BW_1_62:
- clk_div = bxt_dp_clk_val[0];
- break;
- case DP_LINK_BW_2_7:
- clk_div = bxt_dp_clk_val[1];
- break;
- case DP_LINK_BW_5_4:
- clk_div = bxt_dp_clk_val[2];
- break;
- default:
- clk_div = bxt_dp_clk_val[0];
- DRM_ERROR("Unknown link rate\n");
+ clk_div = bxt_dp_clk_val[0];
+ for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
+ if (bxt_dp_clk_val[i].clock == clock) {
+ clk_div = bxt_dp_clk_val[i];
+ break;
+ }
}
vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
}
- dco_amp = 15;
- dcoampovr_en_h = 0;
- if (vco >= 6200000 && vco <= 6480000) {
+ if (vco >= 6200000 && vco <= 6700000) {
prop_coef = 4;
int_coef = 9;
gain_ctl = 3;
@@ -1423,8 +1672,6 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
int_coef = 11;
gain_ctl = 3;
targ_cnt = 9;
- if (vco >= 4800000 && vco < 5400000)
- dcoampovr_en_h = 1;
} else if (vco == 5400000) {
prop_coef = 3;
int_coef = 8;
@@ -1466,10 +1713,13 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
crtc_state->dpll_hw_state.pll8 = targ_cnt;
- if (dcoampovr_en_h)
- crtc_state->dpll_hw_state.pll10 = PORT_PLL_DCO_AMP_OVR_EN_H;
+ crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
- crtc_state->dpll_hw_state.pll10 |= PORT_PLL_DCO_AMP(dco_amp);
+ crtc_state->dpll_hw_state.pll10 =
+ PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
+ | PORT_PLL_DCO_AMP_OVR_EN_H;
+
+ crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
crtc_state->dpll_hw_state.pcsdw12 =
LANESTAGGER_STRAP_OVRD | lanestagger;
@@ -1799,8 +2049,65 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
TRANS_CLK_SEL_DISABLED);
}
-void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
- enum port port, int type)
+static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
+ enum port port, int type)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct ddi_buf_trans *ddi_translations;
+ uint8_t iboost;
+ uint8_t dp_iboost, hdmi_iboost;
+ int n_entries;
+ u32 reg;
+
+ /* VBT may override standard boost values */
+ dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
+ hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT) {
+ if (dp_iboost) {
+ iboost = dp_iboost;
+ } else {
+ ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
+ iboost = ddi_translations[port].i_boost;
+ }
+ } else if (type == INTEL_OUTPUT_EDP) {
+ if (dp_iboost) {
+ iboost = dp_iboost;
+ } else {
+ ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
+ iboost = ddi_translations[port].i_boost;
+ }
+ } else if (type == INTEL_OUTPUT_HDMI) {
+ if (hdmi_iboost) {
+ iboost = hdmi_iboost;
+ } else {
+ ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
+ iboost = ddi_translations[port].i_boost;
+ }
+ } else {
+ return;
+ }
+
+ /* Make sure that the requested I_boost is valid */
+ if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) {
+ DRM_ERROR("Invalid I_boost value %u\n", iboost);
+ return;
+ }
+
+ reg = I915_READ(DISPIO_CR_TX_BMU_CR0);
+ reg &= ~BALANCE_LEG_MASK(port);
+ reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port));
+
+ if (iboost)
+ reg |= iboost << BALANCE_LEG_SHIFT(port);
+ else
+ reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port);
+
+ I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
+}
+
+static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
+ enum port port, int type)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const struct bxt_ddi_buf_trans *ddi_translations;
@@ -1860,6 +2167,73 @@ void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
}
+static uint32_t translate_signal_level(int signal_levels)
+{
+ uint32_t level;
+
+ switch (signal_levels) {
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level: 0x%x\n",
+ signal_levels);
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ level = 0;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ level = 1;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+ level = 2;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
+ level = 3;
+ break;
+
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ level = 4;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ level = 5;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+ level = 6;
+ break;
+
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ level = 7;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ level = 8;
+ break;
+
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ level = 9;
+ break;
+ }
+
+ return level;
+}
+
+uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dport->base.base.dev;
+ struct intel_encoder *encoder = &dport->base;
+ uint8_t train_set = intel_dp->train_set[0];
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ enum port port = dport->port;
+ uint32_t level;
+
+ level = translate_signal_level(signal_levels);
+
+ if (IS_SKYLAKE(dev))
+ skl_ddi_set_iboost(dev, level, port, encoder->type);
+ else if (IS_BROXTON(dev))
+ bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
+
+ return DDI_BUF_TRANS_SELECT(level);
+}
+
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
@@ -2404,7 +2778,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
temp = I915_READ(BXT_PORT_PLL(port, 9));
temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
- temp |= (5 << 1);
+ temp |= pll->config.hw_state.pll9;
I915_WRITE(BXT_PORT_PLL(port, 9), temp);
temp = I915_READ(BXT_PORT_PLL(port, 10));
@@ -2417,8 +2791,8 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
temp |= PORT_PLL_RECALIBRATE;
I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
- /* Enable 10 bit clock */
- temp |= PORT_PLL_10BIT_CLK_ENABLE;
+ temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
+ temp |= pll->config.hw_state.ebb4;
I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
/* Enable PLL */
@@ -2469,13 +2843,38 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
return false;
hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
+ hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
+
+ hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
+
hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
+ hw_state->pll0 &= PORT_PLL_M2_MASK;
+
hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
+ hw_state->pll1 &= PORT_PLL_N_MASK;
+
hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
+ hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
+
hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
+ hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
+
hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
+ hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
+ PORT_PLL_INT_COEFF_MASK |
+ PORT_PLL_GAIN_CTL_MASK;
+
hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
+ hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
+
+ hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9));
+ hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
+
hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
+ hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
+ PORT_PLL_DCO_AMP_MASK;
+
/*
* While we write to the group register to program all lanes at once we
* can read only lane registers. We configure all lanes the same way, so
@@ -2486,6 +2885,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
hw_state->pcsdw12,
I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
+ hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
return true;
}
@@ -2510,7 +2910,6 @@ void intel_ddi_pll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val = I915_READ(LCPLL_CTL);
- int cdclk_freq;
if (IS_SKYLAKE(dev))
skl_shared_dplls_init(dev_priv);
@@ -2519,10 +2918,10 @@ void intel_ddi_pll_init(struct drm_device *dev)
else
hsw_shared_dplls_init(dev_priv);
- cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
- DRM_DEBUG_KMS("CDCLK running at %dKHz\n", cdclk_freq);
-
if (IS_SKYLAKE(dev)) {
+ int cdclk_freq;
+
+ cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
dev_priv->skl_boot_cdclk = cdclk_freq;
if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
DRM_ERROR("LCPLL1 is disabled\n");
@@ -2618,20 +3017,6 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
I915_WRITE(_FDI_RXA_CTL, val);
}
-static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
-{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(&intel_encoder->base);
- int type = intel_dig_port->base.type;
-
- if (type != INTEL_OUTPUT_DISPLAYPORT &&
- type != INTEL_OUTPUT_EDP &&
- type != INTEL_OUTPUT_UNKNOWN) {
- return;
- }
-
- intel_dp_hot_plug(intel_encoder);
-}
-
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -2793,10 +3178,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
if (!init_dp && !init_hdmi) {
- DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, assuming it is\n",
+ DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
port_name(port));
- init_hdmi = true;
- init_dp = true;
+ return;
}
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
@@ -2825,14 +3209,13 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
- intel_encoder->hot_plug = intel_ddi_hot_plug;
if (init_dp) {
if (!intel_ddi_init_dp_connector(intel_dig_port))
goto err;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
- dev_priv->hpd_irq_port[port] = intel_dig_port;
+ dev_priv->hotplug.irq_port[port] = intel_dig_port;
}
/* In theory we don't need the encoder->type check, but leave it just in
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1b61f9810387..8cc9264f7809 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -86,9 +86,6 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
-static int intel_set_mode(struct drm_crtc *crtc,
- struct drm_atomic_state *state,
- bool force_restore);
static int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
@@ -105,22 +102,13 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
-static void intel_begin_crtc_commit(struct drm_crtc *crtc);
-static void intel_finish_crtc_commit(struct drm_crtc *crtc);
+static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
+static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
int num_connectors);
-static void intel_crtc_enable_planes(struct drm_crtc *crtc);
-static void intel_crtc_disable_planes(struct drm_crtc *crtc);
-
-static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
-{
- if (!connector->mst_port)
- return connector->encoder;
- else
- return &connector->mst_port->mst_encoders[pipe]->base;
-}
+static void intel_modeset_setup_hw_state(struct drm_device *dev);
typedef struct {
int min, max;
@@ -413,7 +401,7 @@ static const intel_limit_t intel_limits_chv = {
static const intel_limit_t intel_limits_bxt = {
/* FIXME: find real dot limits */
.dot = { .min = 0, .max = INT_MAX },
- .vco = { .min = 4800000, .max = 6480000 },
+ .vco = { .min = 4800000, .max = 6700000 },
.n = { .min = 1, .max = 1 },
.m1 = { .min = 2, .max = 2 },
/* FIXME: find real m2 limits */
@@ -422,14 +410,10 @@ static const intel_limit_t intel_limits_bxt = {
.p2 = { .p2_slow = 1, .p2_fast = 20 },
};
-static void vlv_clock(int refclk, intel_clock_t *clock)
+static bool
+needs_modeset(struct drm_crtc_state *state)
{
- clock->m = clock->m1 * clock->m2;
- clock->p = clock->p1 * clock->p2;
- if (WARN_ON(clock->n == 0 || clock->p == 0))
- return;
- clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
- clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+ return drm_atomic_crtc_needs_modeset(state);
}
/**
@@ -561,15 +545,25 @@ intel_limit(struct intel_crtc_state *crtc_state, int refclk)
return limit;
}
+/*
+ * Platform specific helpers to calculate the port PLL loopback- (clock.m),
+ * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
+ * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
+ * The helpers' return value is the rate of the clock that is fed to the
+ * display engine's pipe which can be the above fast dot clock rate or a
+ * divided-down version of it.
+ */
/* m1 is reserved as 0 in Pineview, n is a ring counter */
-static void pineview_clock(int refclk, intel_clock_t *clock)
+static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
if (WARN_ON(clock->n == 0 || clock->p == 0))
- return;
+ return 0;
clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot;
}
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
@@ -577,25 +571,41 @@ static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
}
-static void i9xx_clock(int refclk, intel_clock_t *clock)
+static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
{
clock->m = i9xx_dpll_compute_m(clock);
clock->p = clock->p1 * clock->p2;
if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
- return;
+ return 0;
clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot;
}
-static void chv_clock(int refclk, intel_clock_t *clock)
+static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
{
clock->m = clock->m1 * clock->m2;
clock->p = clock->p1 * clock->p2;
if (WARN_ON(clock->n == 0 || clock->p == 0))
- return;
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot / 5;
+}
+
+int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
+{
+ clock->m = clock->m1 * clock->m2;
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return 0;
clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
clock->n << 22);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot / 5;
}
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
@@ -639,16 +649,12 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
return true;
}
-static bool
-i9xx_find_best_dpll(const intel_limit_t *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+static int
+i9xx_select_p2_div(const intel_limit_t *limit,
+ const struct intel_crtc_state *crtc_state,
+ int target)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_device *dev = crtc->base.dev;
- intel_clock_t clock;
- int err = target;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
@@ -657,18 +663,31 @@ i9xx_find_best_dpll(const intel_limit_t *limit,
* single/dual channel state, if we even can.
*/
if (intel_is_dual_link_lvds(dev))
- clock.p2 = limit->p2.p2_fast;
+ return limit->p2.p2_fast;
else
- clock.p2 = limit->p2.p2_slow;
+ return limit->p2.p2_slow;
} else {
if (target < limit->p2.dot_limit)
- clock.p2 = limit->p2.p2_slow;
+ return limit->p2.p2_slow;
else
- clock.p2 = limit->p2.p2_fast;
+ return limit->p2.p2_fast;
}
+}
+
+static bool
+i9xx_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc_state->base.crtc->dev;
+ intel_clock_t clock;
+ int err = target;
memset(best_clock, 0, sizeof(*best_clock));
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
clock.m1++) {
for (clock.m2 = limit->m2.min;
@@ -681,7 +700,7 @@ i9xx_find_best_dpll(const intel_limit_t *limit,
clock.p1 <= limit->p1.max; clock.p1++) {
int this_err;
- i9xx_clock(refclk, &clock);
+ i9xx_calc_dpll_params(refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
@@ -708,30 +727,14 @@ pnv_find_best_dpll(const intel_limit_t *limit,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
intel_clock_t clock;
int err = target;
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- /*
- * For LVDS just rely on its current settings for dual-channel.
- * We haven't figured out how to reliably set up different
- * single/dual channel state, if we even can.
- */
- if (intel_is_dual_link_lvds(dev))
- clock.p2 = limit->p2.p2_fast;
- else
- clock.p2 = limit->p2.p2_slow;
- } else {
- if (target < limit->p2.dot_limit)
- clock.p2 = limit->p2.p2_slow;
- else
- clock.p2 = limit->p2.p2_fast;
- }
-
memset(best_clock, 0, sizeof(*best_clock));
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
clock.m1++) {
for (clock.m2 = limit->m2.min;
@@ -742,7 +745,7 @@ pnv_find_best_dpll(const intel_limit_t *limit,
clock.p1 <= limit->p1.max; clock.p1++) {
int this_err;
- pineview_clock(refclk, &clock);
+ pnv_calc_dpll_params(refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
@@ -769,28 +772,17 @@ g4x_find_best_dpll(const intel_limit_t *limit,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
intel_clock_t clock;
int max_n;
- bool found;
+ bool found = false;
/* approximately equals target * 0.00585 */
int err_most = (target >> 8) + (target >> 9);
- found = false;
-
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_is_dual_link_lvds(dev))
- clock.p2 = limit->p2.p2_fast;
- else
- clock.p2 = limit->p2.p2_slow;
- } else {
- if (target < limit->p2.dot_limit)
- clock.p2 = limit->p2.p2_slow;
- else
- clock.p2 = limit->p2.p2_fast;
- }
memset(best_clock, 0, sizeof(*best_clock));
+
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
max_n = limit->n.max;
/* based on hardware requirement, prefer smaller n to precision */
for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
@@ -803,7 +795,7 @@ g4x_find_best_dpll(const intel_limit_t *limit,
clock.p1 >= limit->p1.min; clock.p1--) {
int this_err;
- i9xx_clock(refclk, &clock);
+ i9xx_calc_dpll_params(refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
@@ -893,7 +885,7 @@ vlv_find_best_dpll(const intel_limit_t *limit,
clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
refclk * clock.m1);
- vlv_clock(refclk, &clock);
+ vlv_calc_dpll_params(refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
@@ -956,7 +948,7 @@ chv_find_best_dpll(const intel_limit_t *limit,
clock.m2 = m2;
- chv_clock(refclk, &clock);
+ chv_calc_dpll_params(refclk, &clock);
if (!intel_PLL_is_valid(dev, limit, &clock))
continue;
@@ -1026,7 +1018,7 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
line_mask = DSL_LINEMASK_GEN3;
line1 = I915_READ(reg) & line_mask;
- mdelay(5);
+ msleep(5);
line2 = I915_READ(reg) & line_mask;
return line1 == line2;
@@ -1106,6 +1098,9 @@ bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
case PORT_D:
bit = SDE_PORTD_HOTPLUG_CPT;
break;
+ case PORT_E:
+ bit = SDE_PORTE_HOTPLUG_SPT;
+ break;
default:
return true;
}
@@ -1694,7 +1689,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
int count = 0;
for_each_intel_crtc(dev, crtc)
- count += crtc->active &&
+ count += crtc->base.state->active &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
return count;
@@ -1775,7 +1770,7 @@ static void i9xx_disable_pll(struct intel_crtc *crtc)
/* Disable DVO 2x clock on both PLLs if necessary */
if (IS_I830(dev) &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
- intel_num_dvo_pipes(dev) == 1) {
+ !intel_num_dvo_pipes(dev)) {
I915_WRITE(DPLL(PIPE_B),
I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
I915_WRITE(DPLL(PIPE_A),
@@ -1790,13 +1785,13 @@ static void i9xx_disable_pll(struct intel_crtc *crtc)
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
- I915_WRITE(DPLL(pipe), 0);
+ I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
POSTING_READ(DPLL(pipe));
}
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- u32 val = 0;
+ u32 val;
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
@@ -1805,8 +1800,9 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
* Leave integrated clock source and reference clock enabled for pipe B.
* The latter is needed for VGA hotplug / manual detection.
*/
+ val = DPLL_VGA_MODE_DIS;
if (pipe == PIPE_B)
- val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
+ val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
I915_WRITE(DPLL(pipe), val);
POSTING_READ(DPLL(pipe));
@@ -1821,7 +1817,8 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
assert_pipe_disabled(dev_priv, pipe);
/* Set PLL en = 0 */
- val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
+ val = DPLL_SSC_REF_CLK_CHV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
I915_WRITE(DPLL(pipe), val);
@@ -1942,11 +1939,13 @@ static void intel_disable_shared_dpll(struct intel_crtc *crtc)
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
/* PCH only available on ILK+ */
- BUG_ON(INTEL_INFO(dev)->gen < 5);
- if (WARN_ON(pll == NULL))
- return;
+ if (INTEL_INFO(dev)->gen < 5)
+ return;
- if (WARN_ON(pll->config.crtc_mask == 0))
+ if (pll == NULL)
+ return;
+
+ if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
return;
DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
@@ -2004,11 +2003,15 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
if (HAS_PCH_IBX(dev_priv->dev)) {
/*
- * make the BPC in transcoder be consistent with
- * that in pipeconf reg.
+ * Make the BPC in transcoder be consistent with
+ * that in pipeconf reg. For HDMI we must use 8bpc
+ * here for both 8bpc and 12bpc.
*/
val &= ~PIPECONF_BPC_MASK;
- val |= pipeconf_val & PIPECONF_BPC_MASK;
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
+ val |= PIPECONF_8BPC;
+ else
+ val |= pipeconf_val & PIPECONF_BPC_MASK;
}
val &= ~TRANS_INTERLACE_MASK;
@@ -2122,6 +2125,8 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
int reg;
u32 val;
+ DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
+
assert_planes_disabled(dev_priv, pipe);
assert_cursor_disabled(dev_priv, pipe);
assert_sprites_disabled(dev_priv, pipe);
@@ -2181,6 +2186,8 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
int reg;
u32 val;
+ DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
+
/*
* Make sure planes won't keep trying to pump pixels to us,
* or we might hang the display.
@@ -2211,28 +2218,6 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
intel_wait_for_pipe_off(crtc);
}
-/**
- * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
- * @plane: plane to be enabled
- * @crtc: crtc for the plane
- *
- * Enable @plane on @crtc, making sure that the pipe is running first.
- */
-static void intel_enable_primary_hw_plane(struct drm_plane *plane,
- struct drm_crtc *crtc)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- /* If the pipe isn't enabled, we can't pump pixels and may hang */
- assert_pipe_enabled(dev_priv, intel_crtc->pipe);
- to_intel_plane_state(plane->state)->visible = true;
-
- dev_priv->display.update_primary_plane(crtc, plane->fb,
- crtc->x, crtc->y);
-}
-
static bool need_vtd_wa(struct drm_device *dev)
{
#ifdef CONFIG_INTEL_IOMMU
@@ -2302,6 +2287,7 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
const struct drm_plane_state *plane_state)
{
struct intel_rotation_info *info = &view->rotation_info;
+ unsigned int tile_height, tile_pitch;
*view = i915_ggtt_view_normal;
@@ -2318,14 +2304,35 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
info->pitch = fb->pitches[0];
info->fb_modifier = fb->modifier[0];
+ tile_height = intel_tile_height(fb->dev, fb->pixel_format,
+ fb->modifier[0]);
+ tile_pitch = PAGE_SIZE / tile_height;
+ info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
+ info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
+ info->size = info->width_pages * info->height_pages * PAGE_SIZE;
+
return 0;
}
+static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_INFO(dev_priv)->gen >= 9)
+ return 256 * 1024;
+ else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv))
+ return 128 * 1024;
+ else if (INTEL_INFO(dev_priv)->gen >= 4)
+ return 4 * 1024;
+ else
+ return 0;
+}
+
int
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
const struct drm_plane_state *plane_state,
- struct intel_engine_cs *pipelined)
+ struct intel_engine_cs *pipelined,
+ struct drm_i915_gem_request **pipelined_request)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2338,14 +2345,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
switch (fb->modifier[0]) {
case DRM_FORMAT_MOD_NONE:
- if (INTEL_INFO(dev)->gen >= 9)
- alignment = 256 * 1024;
- else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
- alignment = 128 * 1024;
- else if (INTEL_INFO(dev)->gen >= 4)
- alignment = 4 * 1024;
- else
- alignment = 64 * 1024;
+ alignment = intel_linear_alignment(dev_priv);
break;
case I915_FORMAT_MOD_X_TILED:
if (INTEL_INFO(dev)->gen >= 9)
@@ -2390,7 +2390,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
dev_priv->mm.interruptible = false;
ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
- &view);
+ pipelined_request, &view);
if (ret)
goto err_interruptible;
@@ -2400,7 +2400,18 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
* a fence as the cost is not that onerous.
*/
ret = i915_gem_object_get_fence(obj);
- if (ret)
+ if (ret == -EDEADLK) {
+ /*
+ * -EDEADLK means there are no free fences
+ * no pending flips.
+ *
+ * This is propagated to atomic, but it uses
+ * -EDEADLK to force a locking recovery, so
+ * change the returned error to -EBUSY.
+ */
+ ret = -EBUSY;
+ goto err_unpin;
+ } else if (ret)
goto err_unpin;
i915_gem_object_pin_fence(obj);
@@ -2435,7 +2446,8 @@ static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
* is assumed to be a power-of-two. */
-unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
+ int *x, int *y,
unsigned int tiling_mode,
unsigned int cpp,
unsigned int pitch)
@@ -2451,12 +2463,13 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y,
return tile_rows * pitch * 8 + tiles * 4096;
} else {
+ unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
unsigned int offset;
offset = *y * pitch + *x * cpp;
- *y = 0;
- *x = (offset & 4095) / cpp;
- return offset & -4096;
+ *y = (offset & alignment) / pitch;
+ *x = ((offset & alignment) - *y * pitch) / cpp;
+ return offset & ~alignment;
}
}
@@ -2583,6 +2596,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct intel_crtc *i;
struct drm_i915_gem_object *obj;
struct drm_plane *primary = intel_crtc->base.primary;
+ struct drm_plane_state *plane_state = primary->state;
struct drm_framebuffer *fb;
if (!plane_config->fb)
@@ -2622,15 +2636,23 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
return;
valid_fb:
+ plane_state->src_x = plane_state->src_y = 0;
+ plane_state->src_w = fb->width << 16;
+ plane_state->src_h = fb->height << 16;
+
+ plane_state->crtc_x = plane_state->src_y = 0;
+ plane_state->crtc_w = fb->width;
+ plane_state->crtc_h = fb->height;
+
obj = intel_fb_obj(fb);
if (obj->tiling_mode != I915_TILING_NONE)
dev_priv->preserve_bios_swizzle = true;
- primary->fb = fb;
- primary->state->crtc = &intel_crtc->base;
- primary->crtc = &intel_crtc->base;
- update_state_fb(primary);
- obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
+ drm_framebuffer_reference(fb);
+ primary->fb = primary->state->fb = fb;
+ primary->crtc = primary->state->crtc = &intel_crtc->base;
+ intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
+ obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
}
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
@@ -2725,7 +2747,8 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
if (INTEL_INFO(dev)->gen >= 4) {
intel_crtc->dspaddr_offset =
- intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ intel_gen4_compute_page_offset(dev_priv,
+ &x, &y, obj->tiling_mode,
pixel_size,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
@@ -2826,7 +2849,8 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
linear_offset = y * fb->pitches[0] + x * pixel_size;
intel_crtc->dspaddr_offset =
- intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ intel_gen4_compute_page_offset(dev_priv,
+ &x, &y, obj->tiling_mode,
pixel_size,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
@@ -2904,32 +2928,32 @@ unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
return i915_gem_obj_ggtt_offset_view(obj, view);
}
+static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
+ I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
+ I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
+ DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n",
+ intel_crtc->base.base.id, intel_crtc->pipe, id);
+}
+
/*
* This function detaches (aka. unbinds) unused scalers in hardware
*/
-void skl_detach_scalers(struct intel_crtc *intel_crtc)
+static void skl_detach_scalers(struct intel_crtc *intel_crtc)
{
- struct drm_device *dev;
- struct drm_i915_private *dev_priv;
struct intel_crtc_scaler_state *scaler_state;
int i;
- if (!intel_crtc || !intel_crtc->config)
- return;
-
- dev = intel_crtc->base.dev;
- dev_priv = dev->dev_private;
scaler_state = &intel_crtc->config->scaler_state;
/* loop through and disable scalers that aren't in use */
for (i = 0; i < intel_crtc->num_scalers; i++) {
- if (!scaler_state->scalers[i].in_use) {
- I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, i), 0);
- I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, i), 0);
- I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, i), 0);
- DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n",
- intel_crtc->base.base.id, intel_crtc->pipe, i);
- }
+ if (!scaler_state->scalers[i].in_use)
+ skl_detach_scaler(intel_crtc, i);
}
}
@@ -3132,8 +3156,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->display.disable_fbc)
- dev_priv->display.disable_fbc(dev);
+ if (dev_priv->fbc.disable_fbc)
+ dev_priv->fbc.disable_fbc(dev_priv);
dev_priv->display.update_primary_plane(crtc, fb, x, y);
@@ -3176,24 +3200,8 @@ static void intel_update_primary_planes(struct drm_device *dev)
}
}
-void intel_crtc_reset(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (!crtc->active)
- return;
-
- intel_crtc_disable_planes(&crtc->base);
- dev_priv->display.crtc_disable(&crtc->base);
- dev_priv->display.crtc_enable(&crtc->base);
- intel_crtc_enable_planes(&crtc->base);
-}
-
void intel_prepare_reset(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc;
-
/* no reset support for gen2 */
if (IS_GEN2(dev))
return;
@@ -3203,18 +3211,11 @@ void intel_prepare_reset(struct drm_device *dev)
return;
drm_modeset_lock_all(dev);
-
/*
* Disabling the crtcs gracefully seems nicer. Also the
* g33 docs say we should at least disable all the planes.
*/
- for_each_intel_crtc(dev, crtc) {
- if (!crtc->active)
- continue;
-
- intel_crtc_disable_planes(&crtc->base);
- dev_priv->display.crtc_disable(&crtc->base);
- }
+ intel_display_suspend(dev);
}
void intel_finish_reset(struct drm_device *dev)
@@ -3258,7 +3259,7 @@ void intel_finish_reset(struct drm_device *dev)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irq(&dev_priv->irq_lock);
- intel_modeset_setup_hw_state(dev, true);
+ intel_display_resume(dev);
intel_hpd_init(dev_priv);
@@ -4200,34 +4201,16 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
-void intel_put_shared_dpll(struct intel_crtc *crtc)
-{
- struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
-
- if (pll == NULL)
- return;
-
- if (!(pll->config.crtc_mask & (1 << crtc->pipe))) {
- WARN(1, "bad %s crtc mask\n", pll->name);
- return;
- }
-
- pll->config.crtc_mask &= ~(1 << crtc->pipe);
- if (pll->config.crtc_mask == 0) {
- WARN_ON(pll->on);
- WARN_ON(pll->active);
- }
-
- crtc->config->shared_dpll = DPLL_ID_PRIVATE;
-}
-
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll;
+ struct intel_shared_dpll_config *shared_dpll;
enum intel_dpll_id i;
+ shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+
if (HAS_PCH_IBX(dev_priv->dev)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
i = (enum intel_dpll_id) crtc->pipe;
@@ -4236,7 +4219,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
crtc->base.base.id, pll->name);
- WARN_ON(pll->new_config->crtc_mask);
+ WARN_ON(shared_dpll[i].crtc_mask);
goto found;
}
@@ -4256,7 +4239,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
pll = &dev_priv->shared_dplls[i];
DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
crtc->base.base.id, pll->name);
- WARN_ON(pll->new_config->crtc_mask);
+ WARN_ON(shared_dpll[i].crtc_mask);
goto found;
}
@@ -4265,15 +4248,15 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
pll = &dev_priv->shared_dplls[i];
/* Only want to check enabled timings first */
- if (pll->new_config->crtc_mask == 0)
+ if (shared_dpll[i].crtc_mask == 0)
continue;
if (memcmp(&crtc_state->dpll_hw_state,
- &pll->new_config->hw_state,
- sizeof(pll->new_config->hw_state)) == 0) {
+ &shared_dpll[i].hw_state,
+ sizeof(crtc_state->dpll_hw_state)) == 0) {
DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
crtc->base.base.id, pll->name,
- pll->new_config->crtc_mask,
+ shared_dpll[i].crtc_mask,
pll->active);
goto found;
}
@@ -4282,7 +4265,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
/* Ok no matching timings, maybe there's a free one? */
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
pll = &dev_priv->shared_dplls[i];
- if (pll->new_config->crtc_mask == 0) {
+ if (shared_dpll[i].crtc_mask == 0) {
DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
crtc->base.base.id, pll->name);
goto found;
@@ -4292,83 +4275,33 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
return NULL;
found:
- if (pll->new_config->crtc_mask == 0)
- pll->new_config->hw_state = crtc_state->dpll_hw_state;
+ if (shared_dpll[i].crtc_mask == 0)
+ shared_dpll[i].hw_state =
+ crtc_state->dpll_hw_state;
crtc_state->shared_dpll = i;
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
pipe_name(crtc->pipe));
- pll->new_config->crtc_mask |= 1 << crtc->pipe;
+ shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
return pll;
}
-/**
- * intel_shared_dpll_start_config - start a new PLL staged config
- * @dev_priv: DRM device
- * @clear_pipes: mask of pipes that will have their PLLs freed
- *
- * Starts a new PLL staged config, copying the current config but
- * releasing the references of pipes specified in clear_pipes.
- */
-static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv,
- unsigned clear_pipes)
-{
- struct intel_shared_dpll *pll;
- enum intel_dpll_id i;
-
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- pll = &dev_priv->shared_dplls[i];
-
- pll->new_config = kmemdup(&pll->config, sizeof pll->config,
- GFP_KERNEL);
- if (!pll->new_config)
- goto cleanup;
-
- pll->new_config->crtc_mask &= ~clear_pipes;
- }
-
- return 0;
-
-cleanup:
- while (--i >= 0) {
- pll = &dev_priv->shared_dplls[i];
- kfree(pll->new_config);
- pll->new_config = NULL;
- }
-
- return -ENOMEM;
-}
-
-static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv)
+static void intel_shared_dpll_commit(struct drm_atomic_state *state)
{
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
+ struct intel_shared_dpll_config *shared_dpll;
struct intel_shared_dpll *pll;
enum intel_dpll_id i;
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- pll = &dev_priv->shared_dplls[i];
-
- WARN_ON(pll->new_config == &pll->config);
-
- pll->config = *pll->new_config;
- kfree(pll->new_config);
- pll->new_config = NULL;
- }
-}
-
-static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv)
-{
- struct intel_shared_dpll *pll;
- enum intel_dpll_id i;
+ if (!to_intel_atomic_state(state)->dpll_set)
+ return;
+ shared_dpll = to_intel_atomic_state(state)->shared_dpll;
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
pll = &dev_priv->shared_dplls[i];
-
- WARN_ON(pll->new_config == &pll->config);
-
- kfree(pll->new_config);
- pll->new_config = NULL;
+ pll->config = shared_dpll[i];
}
}
@@ -4386,62 +4319,16 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
}
}
-/**
- * skl_update_scaler_users - Stages update to crtc's scaler state
- * @intel_crtc: crtc
- * @crtc_state: crtc_state
- * @plane: plane (NULL indicates crtc is requesting update)
- * @plane_state: plane's state
- * @force_detach: request unconditional detachment of scaler
- *
- * This function updates scaler state for requested plane or crtc.
- * To request scaler usage update for a plane, caller shall pass plane pointer.
- * To request scaler usage update for crtc, caller shall pass plane pointer
- * as NULL.
- *
- * Return
- * 0 - scaler_usage updated successfully
- * error - requested scaling cannot be supported or other error condition
- */
-int
-skl_update_scaler_users(
- struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state,
- struct intel_plane *intel_plane, struct intel_plane_state *plane_state,
- int force_detach)
+static int
+skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+ unsigned scaler_user, int *scaler_id, unsigned int rotation,
+ int src_w, int src_h, int dst_w, int dst_h)
{
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(crtc_state->base.crtc);
int need_scaling;
- int idx;
- int src_w, src_h, dst_w, dst_h;
- int *scaler_id;
- struct drm_framebuffer *fb;
- struct intel_crtc_scaler_state *scaler_state;
- unsigned int rotation;
-
- if (!intel_crtc || !crtc_state)
- return 0;
-
- scaler_state = &crtc_state->scaler_state;
-
- idx = intel_plane ? drm_plane_index(&intel_plane->base) : SKL_CRTC_INDEX;
- fb = intel_plane ? plane_state->base.fb : NULL;
-
- if (intel_plane) {
- src_w = drm_rect_width(&plane_state->src) >> 16;
- src_h = drm_rect_height(&plane_state->src) >> 16;
- dst_w = drm_rect_width(&plane_state->dst);
- dst_h = drm_rect_height(&plane_state->dst);
- scaler_id = &plane_state->scaler_id;
- rotation = plane_state->base.rotation;
- } else {
- struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
- src_w = crtc_state->pipe_src_w;
- src_h = crtc_state->pipe_src_h;
- dst_w = adjusted_mode->hdisplay;
- dst_h = adjusted_mode->vdisplay;
- scaler_id = &scaler_state->scaler_id;
- rotation = DRM_ROTATE_0;
- }
need_scaling = intel_rotation_90_or_270(rotation) ?
(src_h != dst_w || src_w != dst_h):
@@ -4457,17 +4344,14 @@ skl_update_scaler_users(
* update to free the scaler is done in plane/panel-fit programming.
* For this purpose crtc/plane_state->scaler_id isn't reset here.
*/
- if (force_detach || !need_scaling || (intel_plane &&
- (!fb || !plane_state->visible))) {
+ if (force_detach || !need_scaling) {
if (*scaler_id >= 0) {
- scaler_state->scaler_users &= ~(1 << idx);
+ scaler_state->scaler_users &= ~(1 << scaler_user);
scaler_state->scalers[*scaler_id].in_use = 0;
- DRM_DEBUG_KMS("Staged freeing scaler id %d.%d from %s:%d "
- "crtc_state = %p scaler_users = 0x%x\n",
- intel_crtc->pipe, *scaler_id, intel_plane ? "PLANE" : "CRTC",
- intel_plane ? intel_plane->base.base.id :
- intel_crtc->base.base.id, crtc_state,
+ DRM_DEBUG_KMS("scaler_user index %u.%u: "
+ "Staged freeing scaler id %d scaler_users = 0x%x\n",
+ intel_crtc->pipe, scaler_user, *scaler_id,
scaler_state->scaler_users);
*scaler_id = -1;
}
@@ -4480,55 +4364,123 @@ skl_update_scaler_users(
src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
- DRM_DEBUG_KMS("%s:%d scaler_user index %u.%u: src %ux%u dst %ux%u "
+ DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
"size is out of scaler range\n",
- intel_plane ? "PLANE" : "CRTC",
- intel_plane ? intel_plane->base.base.id : intel_crtc->base.base.id,
- intel_crtc->pipe, idx, src_w, src_h, dst_w, dst_h);
+ intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
return -EINVAL;
}
+ /* mark this plane as a scaler user in crtc_state */
+ scaler_state->scaler_users |= (1 << scaler_user);
+ DRM_DEBUG_KMS("scaler_user index %u.%u: "
+ "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
+ intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
+ scaler_state->scaler_users);
+
+ return 0;
+}
+
+/**
+ * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
+ *
+ * @state: crtc's scaler state
+ *
+ * Return
+ * 0 - scaler_usage updated successfully
+ * error - requested scaling cannot be supported or other error condition
+ */
+int skl_update_scaler_crtc(struct intel_crtc_state *state)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
+ struct drm_display_mode *adjusted_mode =
+ &state->base.adjusted_mode;
+
+ DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
+ intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
+
+ return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
+ &state->scaler_state.scaler_id, DRM_ROTATE_0,
+ state->pipe_src_w, state->pipe_src_h,
+ adjusted_mode->hdisplay, adjusted_mode->vdisplay);
+}
+
+/**
+ * skl_update_scaler_plane - Stages update to scaler state for a given plane.
+ *
+ * @state: crtc's scaler state
+ * @plane_state: atomic plane state to update
+ *
+ * Return
+ * 0 - scaler_usage updated successfully
+ * error - requested scaling cannot be supported or other error condition
+ */
+static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_plane *intel_plane =
+ to_intel_plane(plane_state->base.plane);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ int ret;
+
+ bool force_detach = !fb || !plane_state->visible;
+
+ DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
+ intel_plane->base.base.id, intel_crtc->pipe,
+ drm_plane_index(&intel_plane->base));
+
+ ret = skl_update_scaler(crtc_state, force_detach,
+ drm_plane_index(&intel_plane->base),
+ &plane_state->scaler_id,
+ plane_state->base.rotation,
+ drm_rect_width(&plane_state->src) >> 16,
+ drm_rect_height(&plane_state->src) >> 16,
+ drm_rect_width(&plane_state->dst),
+ drm_rect_height(&plane_state->dst));
+
+ if (ret || plane_state->scaler_id < 0)
+ return ret;
+
/* check colorkey */
- if (WARN_ON(intel_plane &&
- intel_plane->ckey.flags != I915_SET_COLORKEY_NONE)) {
- DRM_DEBUG_KMS("PLANE:%d scaling %ux%u->%ux%u not allowed with colorkey",
- intel_plane->base.base.id, src_w, src_h, dst_w, dst_h);
+ if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
+ DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
+ intel_plane->base.base.id);
return -EINVAL;
}
/* Check src format */
- if (intel_plane) {
- switch (fb->pixel_format) {
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- break;
- default:
- DRM_DEBUG_KMS("PLANE:%d FB:%d unsupported scaling format 0x%x\n",
- intel_plane->base.base.id, fb->base.id, fb->pixel_format);
- return -EINVAL;
- }
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ break;
+ default:
+ DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
+ intel_plane->base.base.id, fb->base.id, fb->pixel_format);
+ return -EINVAL;
}
- /* mark this plane as a scaler user in crtc_state */
- scaler_state->scaler_users |= (1 << idx);
- DRM_DEBUG_KMS("%s:%d staged scaling request for %ux%u->%ux%u "
- "crtc_state = %p scaler_users = 0x%x\n",
- intel_plane ? "PLANE" : "CRTC",
- intel_plane ? intel_plane->base.base.id : intel_crtc->base.base.id,
- src_w, src_h, dst_w, dst_h, crtc_state, scaler_state->scaler_users);
return 0;
}
-static void skylake_pfit_update(struct intel_crtc *crtc, int enable)
+static void skylake_scaler_disable(struct intel_crtc *crtc)
+{
+ int i;
+
+ for (i = 0; i < crtc->num_scalers; i++)
+ skl_detach_scaler(crtc, i);
+}
+
+static void skylake_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4538,13 +4490,6 @@ static void skylake_pfit_update(struct intel_crtc *crtc, int enable)
DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
- /* To update pfit, first update scaler state */
- skl_update_scaler_users(crtc, crtc->config, NULL, NULL, !enable);
- intel_atomic_setup_scalers(crtc->base.dev, crtc, crtc->config);
- skl_detach_scalers(crtc);
- if (!enable)
- return;
-
if (crtc->config->pch_pfit.enabled) {
int id;
@@ -4584,20 +4529,6 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
}
}
-static void intel_enable_sprite_planes(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
- struct drm_plane *plane;
- struct intel_plane *intel_plane;
-
- drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
- intel_plane = to_intel_plane(plane);
- if (intel_plane->pipe == pipe)
- intel_plane_restore(&intel_plane->base);
- }
-}
-
void hsw_enable_ips(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -4668,7 +4599,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
bool reenable_ips = false;
/* The clocks have to be on to load the palette. */
- if (!crtc->state->enable || !intel_crtc->active)
+ if (!crtc->state->active)
return;
if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
@@ -4755,10 +4686,6 @@ intel_post_enable_primary(struct drm_crtc *crtc)
*/
hsw_enable_ips(intel_crtc);
- mutex_lock(&dev->struct_mutex);
- intel_fbc_update(dev);
- mutex_unlock(&dev->struct_mutex);
-
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
* So don't enable underrun reporting before at least some planes
@@ -4810,13 +4737,11 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev)) {
intel_set_memory_cxsr(dev_priv, false);
-
- mutex_lock(&dev->struct_mutex);
- if (dev_priv->fbc.crtc == intel_crtc)
- intel_fbc_disable(dev);
- mutex_unlock(&dev->struct_mutex);
+ dev_priv->wm.vlv.cxsr = false;
+ intel_wait_for_vblank(dev, pipe);
+ }
/*
* FIXME IPS should be fine as long as one plane is
@@ -4827,46 +4752,83 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
hsw_disable_ips(intel_crtc);
}
-static void intel_crtc_enable_planes(struct drm_crtc *crtc)
+static void intel_post_plane_update(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_plane *plane;
- intel_enable_primary_hw_plane(crtc->primary, crtc);
- intel_enable_sprite_planes(crtc);
- intel_crtc_update_cursor(crtc, true);
+ if (atomic->wait_vblank)
+ intel_wait_for_vblank(dev, crtc->pipe);
- intel_post_enable_primary(crtc);
+ intel_frontbuffer_flip(dev, atomic->fb_bits);
- /*
- * FIXME: Once we grow proper nuclear flip support out of this we need
- * to compute the mask of flip planes precisely. For the time being
- * consider this a flip to a NULL plane.
- */
- intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
+ if (atomic->disable_cxsr)
+ crtc->wm.cxsr_allowed = true;
+
+ if (crtc->atomic.update_wm_post)
+ intel_update_watermarks(&crtc->base);
+
+ if (atomic->update_fbc)
+ intel_fbc_update(dev_priv);
+
+ if (atomic->post_enable_primary)
+ intel_post_enable_primary(&crtc->base);
+
+ drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks)
+ intel_update_sprite_watermarks(plane, &crtc->base,
+ 0, 0, 0, false, false);
+
+ memset(atomic, 0, sizeof(*atomic));
}
-static void intel_crtc_disable_planes(struct drm_crtc *crtc)
+static void intel_pre_plane_update(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
+ struct drm_plane *p;
+
+ /* Track fb's for any planes being disabled */
+ drm_for_each_plane_mask(p, dev, atomic->disabled_planes) {
+ struct intel_plane *plane = to_intel_plane(p);
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_track_fb(intel_fb_obj(plane->base.fb), NULL,
+ plane->frontbuffer_bit);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ if (atomic->wait_for_flips)
+ intel_crtc_wait_for_pending_flips(&crtc->base);
+
+ if (atomic->disable_fbc)
+ intel_fbc_disable_crtc(crtc);
+
+ if (crtc->atomic.disable_ips)
+ hsw_disable_ips(crtc);
+
+ if (atomic->pre_disable_primary)
+ intel_pre_disable_primary(&crtc->base);
+
+ if (atomic->disable_cxsr) {
+ crtc->wm.cxsr_allowed = false;
+ intel_set_memory_cxsr(dev_priv, false);
+ }
+}
+
+static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane;
+ struct drm_plane *p;
int pipe = intel_crtc->pipe;
- intel_crtc_wait_for_pending_flips(crtc);
-
- intel_pre_disable_primary(crtc);
-
intel_crtc_dpms_overlay_disable(intel_crtc);
- for_each_intel_plane(dev, intel_plane) {
- if (intel_plane->pipe == pipe) {
- struct drm_crtc *from = intel_plane->base.crtc;
- intel_plane->disable_plane(&intel_plane->base,
- from ?: crtc, true);
- }
- }
+ drm_for_each_plane_mask(p, dev, plane_mask)
+ to_intel_plane(p)->disable_plane(p, crtc);
/*
* FIXME: Once we grow proper nuclear flip support out of this we need
@@ -4884,9 +4846,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- WARN_ON(!crtc->state->enable);
-
- if (intel_crtc->active)
+ if (WARN_ON(intel_crtc->active))
return;
if (intel_crtc->config->has_pch_encoder)
@@ -4953,46 +4913,17 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
}
-/*
- * This implements the workaround described in the "notes" section of the mode
- * set sequence documentation. When going from no pipes or single pipe to
- * multiple pipes, and planes are enabled after the pipe, we need to wait at
- * least 2 vblanks on the first pipe before enabling planes on the second pipe.
- */
-static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct intel_crtc *crtc_it, *other_active_crtc = NULL;
-
- /* We want to get the other_active_crtc only if there's only 1 other
- * active crtc. */
- for_each_intel_crtc(dev, crtc_it) {
- if (!crtc_it->active || crtc_it == crtc)
- continue;
-
- if (other_active_crtc)
- return;
-
- other_active_crtc = crtc_it;
- }
- if (!other_active_crtc)
- return;
-
- intel_wait_for_vblank(dev, other_active_crtc->pipe);
- intel_wait_for_vblank(dev, other_active_crtc->pipe);
-}
-
static void haswell_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
- int pipe = intel_crtc->pipe;
+ int pipe = intel_crtc->pipe, hsw_workaround_pipe;
+ struct intel_crtc_state *pipe_config =
+ to_intel_crtc_state(crtc->state);
- WARN_ON(!crtc->state->enable);
-
- if (intel_crtc->active)
+ if (WARN_ON(intel_crtc->active))
return;
if (intel_crtc_to_shared_dpll(intel_crtc))
@@ -5033,7 +4964,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_ddi_enable_pipe_clock(intel_crtc);
if (INTEL_INFO(dev)->gen == 9)
- skylake_pfit_update(intel_crtc, 1);
+ skylake_pfit_enable(intel_crtc);
else if (INTEL_INFO(dev)->gen < 9)
ironlake_pfit_enable(intel_crtc);
else
@@ -5067,7 +4998,11 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
- haswell_mode_set_planes_workaround(intel_crtc);
+ hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
+ if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
+ intel_wait_for_vblank(dev, hsw_workaround_pipe);
+ intel_wait_for_vblank(dev, hsw_workaround_pipe);
+ }
}
static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -5094,9 +5029,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
u32 reg, temp;
- if (!intel_crtc->active)
- return;
-
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
@@ -5135,18 +5067,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
I915_WRITE(PCH_DPLL_SEL, temp);
}
- /* disable PCH DPLL */
- intel_disable_shared_dpll(intel_crtc);
-
ironlake_fdi_pll_disable(intel_crtc);
}
intel_crtc->active = false;
intel_update_watermarks(crtc);
-
- mutex_lock(&dev->struct_mutex);
- intel_fbc_update(dev);
- mutex_unlock(&dev->struct_mutex);
}
static void haswell_crtc_disable(struct drm_crtc *crtc)
@@ -5157,9 +5082,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
- if (!intel_crtc->active)
- return;
-
for_each_encoder_on_crtc(dev, crtc, encoder) {
intel_opregion_notify_encoder(encoder, false);
encoder->disable(encoder);
@@ -5179,7 +5101,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
if (INTEL_INFO(dev)->gen == 9)
- skylake_pfit_update(intel_crtc, 0);
+ skylake_scaler_disable(intel_crtc);
else if (INTEL_INFO(dev)->gen < 9)
ironlake_pfit_disable(intel_crtc);
else
@@ -5198,22 +5120,8 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_crtc->active = false;
intel_update_watermarks(crtc);
-
- mutex_lock(&dev->struct_mutex);
- intel_fbc_update(dev);
- mutex_unlock(&dev->struct_mutex);
-
- if (intel_crtc_to_shared_dpll(intel_crtc))
- intel_disable_shared_dpll(intel_crtc);
-}
-
-static void ironlake_crtc_off(struct drm_crtc *crtc)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- intel_put_shared_dpll(intel_crtc);
}
-
static void i9xx_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -5249,6 +5157,8 @@ static enum intel_display_power_domain port_to_power_domain(enum port port)
return POWER_DOMAIN_PORT_DDI_C_4_LANES;
case PORT_D:
return POWER_DOMAIN_PORT_DDI_D_4_LANES;
+ case PORT_E:
+ return POWER_DOMAIN_PORT_DDI_E_2_LANES;
default:
WARN_ON_ONCE(1);
return POWER_DOMAIN_PORT_OTHER;
@@ -5295,6 +5205,9 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
unsigned long mask;
enum transcoder transcoder;
+ if (!crtc->state->active)
+ return 0;
+
transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
mask = BIT(POWER_DOMAIN_PIPE(pipe));
@@ -5309,45 +5222,131 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
return mask;
}
+static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum intel_display_power_domain domain;
+ unsigned long domains, new_domains, old_domains;
+
+ old_domains = intel_crtc->enabled_power_domains;
+ intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc);
+
+ domains = new_domains & ~old_domains;
+
+ for_each_power_domain(domain, domains)
+ intel_display_power_get(dev_priv, domain);
+
+ return old_domains & ~new_domains;
+}
+
+static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
+ unsigned long domains)
+{
+ enum intel_display_power_domain domain;
+
+ for_each_power_domain(domain, domains)
+ intel_display_power_put(dev_priv, domain);
+}
+
static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
- struct intel_crtc *crtc;
+ unsigned long put_domains[I915_MAX_PIPES] = {};
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int i;
- /*
- * First get all needed power domains, then put all unneeded, to avoid
- * any unnecessary toggling of the power wells.
- */
- for_each_intel_crtc(dev, crtc) {
- enum intel_display_power_domain domain;
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (needs_modeset(crtc->state))
+ put_domains[to_intel_crtc(crtc)->pipe] =
+ modeset_get_crtc_power_domains(crtc);
+ }
- if (!crtc->base.state->enable)
- continue;
+ if (dev_priv->display.modeset_commit_cdclk) {
+ unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
- pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
+ if (cdclk != dev_priv->cdclk_freq &&
+ !WARN_ON(!state->allow_modeset))
+ dev_priv->display.modeset_commit_cdclk(state);
+ }
+
+ for (i = 0; i < I915_MAX_PIPES; i++)
+ if (put_domains[i])
+ modeset_put_power_domains(dev_priv, put_domains[i]);
+}
+
+static void intel_update_max_cdclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_SKYLAKE(dev)) {
+ u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
- for_each_power_domain(domain, pipe_domains[crtc->pipe])
- intel_display_power_get(dev_priv, domain);
+ if (limit == SKL_DFSM_CDCLK_LIMIT_675)
+ dev_priv->max_cdclk_freq = 675000;
+ else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
+ dev_priv->max_cdclk_freq = 540000;
+ else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
+ dev_priv->max_cdclk_freq = 450000;
+ else
+ dev_priv->max_cdclk_freq = 337500;
+ } else if (IS_BROADWELL(dev)) {
+ /*
+ * FIXME with extra cooling we can allow
+ * 540 MHz for ULX and 675 Mhz for ULT.
+ * How can we know if extra cooling is
+ * available? PCI ID, VTB, something else?
+ */
+ if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ dev_priv->max_cdclk_freq = 450000;
+ else if (IS_BDW_ULX(dev))
+ dev_priv->max_cdclk_freq = 450000;
+ else if (IS_BDW_ULT(dev))
+ dev_priv->max_cdclk_freq = 540000;
+ else
+ dev_priv->max_cdclk_freq = 675000;
+ } else if (IS_CHERRYVIEW(dev)) {
+ dev_priv->max_cdclk_freq = 320000;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->max_cdclk_freq = 400000;
+ } else {
+ /* otherwise assume cdclk is fixed */
+ dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
}
- if (dev_priv->display.modeset_global_resources)
- dev_priv->display.modeset_global_resources(state);
+ DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
+ dev_priv->max_cdclk_freq);
+}
- for_each_intel_crtc(dev, crtc) {
- enum intel_display_power_domain domain;
+static void intel_update_cdclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
- for_each_power_domain(domain, crtc->enabled_power_domains)
- intel_display_power_put(dev_priv, domain);
+ dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+ DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
+ dev_priv->cdclk_freq);
- crtc->enabled_power_domains = pipe_domains[crtc->pipe];
+ /*
+ * Program the gmbus_freq based on the cdclk frequency.
+ * BSpec erroneously claims we should aim for 4MHz, but
+ * in fact 1MHz is the correct frequency.
+ */
+ if (IS_VALLEYVIEW(dev)) {
+ /*
+ * Program the gmbus_freq based on the cdclk frequency.
+ * BSpec erroneously claims we should aim for 4MHz, but
+ * in fact 1MHz is the correct frequency.
+ */
+ I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
}
- intel_display_set_init_power(dev_priv, false);
+ if (dev_priv->max_cdclk_freq == 0)
+ intel_update_max_cdclk(dev);
}
-void broxton_set_cdclk(struct drm_device *dev, int frequency)
+static void broxton_set_cdclk(struct drm_device *dev, int frequency)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t divider;
@@ -5463,7 +5462,7 @@ void broxton_set_cdclk(struct drm_device *dev, int frequency)
return;
}
- dev_priv->cdclk_freq = frequency;
+ intel_update_cdclk(dev);
}
void broxton_init_cdclk(struct drm_device *dev)
@@ -5638,6 +5637,7 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
{
+ struct drm_device *dev = dev_priv->dev;
u32 freq_select, pcu_ack;
DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
@@ -5678,6 +5678,8 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
mutex_lock(&dev_priv->rps.hw_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
mutex_unlock(&dev_priv->rps.hw_lock);
+
+ intel_update_cdclk(dev);
}
void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
@@ -5711,16 +5713,13 @@ void skl_init_cdclk(struct drm_i915_private *dev_priv)
/* enable PG1 and Misc I/O */
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
- /* DPLL0 already enabed !? */
- if (I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE) {
- DRM_DEBUG_DRIVER("DPLL0 already running\n");
- return;
+ /* DPLL0 not enabled (happens on early BIOS versions) */
+ if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
+ /* enable DPLL0 */
+ required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
+ skl_dpll0_enable(dev_priv, required_vco);
}
- /* enable DPLL0 */
- required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
- skl_dpll0_enable(dev_priv, required_vco);
-
/* set CDCLK to the frequency the BIOS chose */
skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
@@ -5748,22 +5747,6 @@ static int valleyview_get_vco(struct drm_i915_private *dev_priv)
return vco_freq[hpll_freq] * 1000;
}
-static void vlv_update_cdclk(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
- DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
- dev_priv->cdclk_freq);
-
- /*
- * Program the gmbus_freq based on the cdclk frequency.
- * BSpec erroneously claims we should aim for 4MHz, but
- * in fact 1MHz is the correct frequency.
- */
- I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
-}
-
/* Adjust CDclk dividers to allow high res or save power if possible */
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
{
@@ -5827,7 +5810,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
mutex_unlock(&dev_priv->sb_lock);
- vlv_update_cdclk(dev);
+ intel_update_cdclk(dev);
}
static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
@@ -5868,7 +5851,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
}
mutex_unlock(&dev_priv->rps.hw_lock);
- vlv_update_cdclk(dev);
+ intel_update_cdclk(dev);
}
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
@@ -5931,11 +5914,7 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
int max_pixclk = 0;
for_each_intel_crtc(dev, intel_crtc) {
- if (state)
- crtc_state =
- intel_atomic_get_crtc_state(state, intel_crtc);
- else
- crtc_state = intel_crtc->config;
+ crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
@@ -5949,39 +5928,32 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
return max_pixclk;
}
-static int valleyview_modeset_global_pipes(struct drm_atomic_state *state)
+static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- int max_pixclk = intel_mode_max_pixclk(state->dev, state);
- int cdclk, i;
+ struct drm_device *dev = state->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int max_pixclk = intel_mode_max_pixclk(dev, state);
if (max_pixclk < 0)
return max_pixclk;
- if (IS_VALLEYVIEW(dev_priv))
- cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
- else
- cdclk = broxton_calc_cdclk(dev_priv, max_pixclk);
+ to_intel_atomic_state(state)->cdclk =
+ valleyview_calc_cdclk(dev_priv, max_pixclk);
- if (cdclk == dev_priv->cdclk_freq)
- return 0;
+ return 0;
+}
- /* add all active pipes to the state */
- for_each_crtc(state->dev, crtc) {
- if (!crtc->state->enable)
- continue;
+static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int max_pixclk = intel_mode_max_pixclk(dev, state);
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- }
+ if (max_pixclk < 0)
+ return max_pixclk;
- /* disable/enable all currently active pipes while we change cdclk */
- for_each_crtc_in_state(state, crtc, crtc_state, i)
- if (crtc_state->enable)
- crtc_state->mode_changed = true;
+ to_intel_atomic_state(state)->cdclk =
+ broxton_calc_cdclk(dev_priv, max_pixclk);
return 0;
}
@@ -5998,7 +5970,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
if (DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 1000) >= dev_priv->rps.cz_freq) {
/* CHV suggested value is 31 or 63 */
if (IS_CHERRYVIEW(dev_priv))
- credits = PFI_CREDIT_31;
+ credits = PFI_CREDIT_63;
else
credits = PFI_CREDIT(15);
} else {
@@ -6022,41 +5994,31 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
}
-static void valleyview_modeset_global_resources(struct drm_atomic_state *old_state)
+static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
+ unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
struct drm_i915_private *dev_priv = dev->dev_private;
- int max_pixclk = intel_mode_max_pixclk(dev, NULL);
- int req_cdclk;
-
- /* The path in intel_mode_max_pixclk() with a NULL atomic state should
- * never fail. */
- if (WARN_ON(max_pixclk < 0))
- return;
-
- req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
- if (req_cdclk != dev_priv->cdclk_freq) {
- /*
- * FIXME: We can end up here with all power domains off, yet
- * with a CDCLK frequency other than the minimum. To account
- * for this take the PIPE-A power domain, which covers the HW
- * blocks needed for the following programming. This can be
- * removed once it's guaranteed that we get here either with
- * the minimum CDCLK set, or the required power domains
- * enabled.
- */
- intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+ /*
+ * FIXME: We can end up here with all power domains off, yet
+ * with a CDCLK frequency other than the minimum. To account
+ * for this take the PIPE-A power domain, which covers the HW
+ * blocks needed for the following programming. This can be
+ * removed once it's guaranteed that we get here either with
+ * the minimum CDCLK set, or the required power domains
+ * enabled.
+ */
+ intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
- if (IS_CHERRYVIEW(dev))
- cherryview_set_cdclk(dev, req_cdclk);
- else
- valleyview_set_cdclk(dev, req_cdclk);
+ if (IS_CHERRYVIEW(dev))
+ cherryview_set_cdclk(dev, req_cdclk);
+ else
+ valleyview_set_cdclk(dev, req_cdclk);
- vlv_program_pfi_credits(dev_priv);
+ vlv_program_pfi_credits(dev_priv);
- intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
- }
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
}
static void valleyview_crtc_enable(struct drm_crtc *crtc)
@@ -6068,9 +6030,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
bool is_dsi;
- WARN_ON(!crtc->state->enable);
-
- if (intel_crtc->active)
+ if (WARN_ON(intel_crtc->active))
return;
is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
@@ -6119,7 +6079,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_crtc_load_lut(crtc);
- intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
assert_vblank_disabled(crtc);
@@ -6146,9 +6105,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- WARN_ON(!crtc->state->enable);
-
- if (intel_crtc->active)
+ if (WARN_ON(intel_crtc->active))
return;
i9xx_set_pll_dividers(intel_crtc);
@@ -6208,9 +6165,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- if (!intel_crtc->active)
- return;
-
/*
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
@@ -6247,91 +6201,89 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_crtc->active = false;
intel_update_watermarks(crtc);
-
- mutex_lock(&dev->struct_mutex);
- intel_fbc_update(dev);
- mutex_unlock(&dev->struct_mutex);
-}
-
-static void i9xx_crtc_off(struct drm_crtc *crtc)
-{
}
-/* Master function to enable/disable CRTC and corresponding power wells */
-void intel_crtc_control(struct drm_crtc *crtc, bool enable)
+static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum intel_display_power_domain domain;
unsigned long domains;
- if (enable) {
- if (!intel_crtc->active) {
- domains = get_crtc_power_domains(crtc);
- for_each_power_domain(domain, domains)
- intel_display_power_get(dev_priv, domain);
- intel_crtc->enabled_power_domains = domains;
-
- dev_priv->display.crtc_enable(crtc);
- intel_crtc_enable_planes(crtc);
- }
- } else {
- if (intel_crtc->active) {
- intel_crtc_disable_planes(crtc);
- dev_priv->display.crtc_disable(crtc);
+ if (!intel_crtc->active)
+ return;
- domains = intel_crtc->enabled_power_domains;
- for_each_power_domain(domain, domains)
- intel_display_power_put(dev_priv, domain);
- intel_crtc->enabled_power_domains = 0;
- }
+ if (to_intel_plane_state(crtc->primary->state)->visible) {
+ intel_crtc_wait_for_pending_flips(crtc);
+ intel_pre_disable_primary(crtc);
}
+
+ intel_crtc_disable_planes(crtc, crtc->state->plane_mask);
+ dev_priv->display.crtc_disable(crtc);
+ intel_disable_shared_dpll(intel_crtc);
+
+ domains = intel_crtc->enabled_power_domains;
+ for_each_power_domain(domain, domains)
+ intel_display_power_put(dev_priv, domain);
+ intel_crtc->enabled_power_domains = 0;
}
-/**
- * Sets the power management mode of the pipe and plane.
+/*
+ * turn all crtc's off, but do not adjust state
+ * This has to be paired with a call to intel_modeset_setup_hw_state.
*/
-void intel_crtc_update_dpms(struct drm_crtc *crtc)
+int intel_display_suspend(struct drm_device *dev)
{
- struct drm_device *dev = crtc->dev;
- struct intel_encoder *intel_encoder;
- bool enable = false;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
+ struct drm_atomic_state *state;
+ struct drm_crtc *crtc;
+ unsigned crtc_mask = 0;
+ int ret = 0;
- for_each_encoder_on_crtc(dev, crtc, intel_encoder)
- enable |= intel_encoder->connectors_active;
+ if (WARN_ON(!ctx))
+ return 0;
- intel_crtc_control(crtc, enable);
+ lockdep_assert_held(&ctx->ww_ctx);
+ state = drm_atomic_state_alloc(dev);
+ if (WARN_ON(!state))
+ return -ENOMEM;
- crtc->state->active = enable;
-}
+ state->acquire_ctx = ctx;
+ state->allow_modeset = true;
-static void intel_crtc_disable(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_connector *connector;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ for_each_crtc(dev, crtc) {
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_crtc_state(state, crtc);
- /* crtc should still be enabled when we disable it. */
- WARN_ON(!crtc->state->enable);
+ ret = PTR_ERR_OR_ZERO(crtc_state);
+ if (ret)
+ goto free;
- intel_crtc_disable_planes(crtc);
- dev_priv->display.crtc_disable(crtc);
- dev_priv->display.off(crtc);
+ if (!crtc_state->active)
+ continue;
- drm_plane_helper_disable(crtc->primary);
+ crtc_state->active = false;
+ crtc_mask |= 1 << drm_crtc_index(crtc);
+ }
- /* Update computed state. */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (!connector->encoder || !connector->encoder->crtc)
- continue;
+ if (crtc_mask) {
+ ret = drm_atomic_commit(state);
- if (connector->encoder->crtc != crtc)
- continue;
+ if (!ret) {
+ for_each_crtc(dev, crtc)
+ if (crtc_mask & (1 << drm_crtc_index(crtc)))
+ crtc->state->active = true;
- connector->dpms = DRM_MODE_DPMS_OFF;
- to_intel_encoder(connector->encoder)->connectors_active = false;
+ return ret;
+ }
}
+
+free:
+ if (ret)
+ DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+ drm_atomic_state_free(state);
+ return ret;
}
void intel_encoder_destroy(struct drm_encoder *encoder)
@@ -6342,62 +6294,42 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_encoder);
}
-/* Simple dpms helper for encoders with just one connector, no cloning and only
- * one kind of off state. It clamps all !ON modes to fully OFF and changes the
- * state of the entire output pipe. */
-static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
-{
- if (mode == DRM_MODE_DPMS_ON) {
- encoder->connectors_active = true;
-
- intel_crtc_update_dpms(encoder->base.crtc);
- } else {
- encoder->connectors_active = false;
-
- intel_crtc_update_dpms(encoder->base.crtc);
- }
-}
-
/* Cross check the actual hw state with our own modeset state tracking (and it's
* internal consistency). */
static void intel_connector_check_state(struct intel_connector *connector)
{
+ struct drm_crtc *crtc = connector->base.state->crtc;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.base.id,
+ connector->base.name);
+
if (connector->get_hw_state(connector)) {
struct intel_encoder *encoder = connector->encoder;
- struct drm_crtc *crtc;
- bool encoder_enabled;
- enum pipe pipe;
+ struct drm_connector_state *conn_state = connector->base.state;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.base.id,
- connector->base.name);
+ I915_STATE_WARN(!crtc,
+ "connector enabled without attached crtc\n");
- /* there is no real hw state for MST connectors */
- if (connector->mst_port)
+ if (!crtc)
return;
- I915_STATE_WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
- "wrong connector dpms state\n");
- I915_STATE_WARN(connector->base.encoder != &encoder->base,
- "active connector not linked to encoder\n");
-
- if (encoder) {
- I915_STATE_WARN(!encoder->connectors_active,
- "encoder->connectors_active not set\n");
+ I915_STATE_WARN(!crtc->state->active,
+ "connector is active, but attached crtc isn't\n");
- encoder_enabled = encoder->get_hw_state(encoder, &pipe);
- I915_STATE_WARN(!encoder_enabled, "encoder not enabled\n");
- if (I915_STATE_WARN_ON(!encoder->base.crtc))
- return;
+ if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
+ return;
- crtc = encoder->base.crtc;
+ I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
+ "atomic encoder doesn't match attached encoder\n");
- I915_STATE_WARN(!crtc->state->enable,
- "crtc not enabled\n");
- I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
- I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe,
- "encoder active on the wrong pipe\n");
- }
+ I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
+ "attached encoder crtc differs from connector crtc\n");
+ } else {
+ I915_STATE_WARN(crtc && crtc->state->active,
+ "attached crtc is active, but connector isn't\n");
+ I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
+ "best encoder set without crtc!\n");
}
}
@@ -6429,26 +6361,6 @@ struct intel_connector *intel_connector_alloc(void)
return connector;
}
-/* Even simpler default implementation, if there's really no special case to
- * consider. */
-void intel_connector_dpms(struct drm_connector *connector, int mode)
-{
- /* All the simple cases only support two dpms states. */
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
-
- if (mode == connector->dpms)
- return;
-
- connector->dpms = mode;
-
- /* Only need to change hw state when actually enabled */
- if (connector->encoder)
- intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
-
- intel_modeset_check_state(connector->dev);
-}
-
/* Simple connector->get_hw_state implementation for encoders that support only
* one connector and no cloning and hence the encoder state determines the state
* of the connector. */
@@ -6586,12 +6498,36 @@ retry:
return ret;
}
+static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *pipe_config)
+{
+ if (pipe_config->pipe_bpp > 24)
+ return false;
+
+ /* HSW can handle pixel rate up to cdclk? */
+ if (IS_HASWELL(dev_priv->dev))
+ return true;
+
+ /*
+ * We compare against max which means we must take
+ * the increased cdclk requirement into account when
+ * calculating the new cdclk.
+ *
+ * Should measure whether using a lower cdclk w/o IPS
+ */
+ return ilk_pipe_pixel_rate(pipe_config) <=
+ dev_priv->max_cdclk_freq * 95 / 100;
+}
+
static void hsw_compute_ips_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
pipe_config->ips_enabled = i915.enable_ips &&
- hsw_crtc_supports_ips(crtc) &&
- pipe_config->pipe_bpp <= 24;
+ hsw_crtc_supports_ips(crtc) &&
+ pipe_config_supports_ips(dev_priv, pipe_config);
}
static int intel_crtc_compute_config(struct intel_crtc *crtc,
@@ -6600,12 +6536,10 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- int ret;
/* FIXME should check pixel clock limits on all platforms */
if (INTEL_INFO(dev)->gen < 4) {
- int clock_limit =
- dev_priv->display.get_display_clock_speed(dev);
+ int clock_limit = dev_priv->max_cdclk_freq;
/*
* Enable pixel doubling when the dot clock
@@ -6647,14 +6581,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
if (pipe_config->has_pch_encoder)
return ironlake_fdi_compute_config(crtc, pipe_config);
- /* FIXME: remove below call once atomic mode set is place and all crtc
- * related checks called from atomic_crtc_check function */
- ret = 0;
- DRM_DEBUG_KMS("intel_crtc = %p drm_state (pipe_config->base.state) = %p\n",
- crtc, pipe_config->base.state);
- ret = intel_atomic_setup_scalers(dev, crtc, pipe_config);
-
- return ret;
+ return 0;
}
static int skylake_get_display_clock_speed(struct drm_device *dev)
@@ -6664,10 +6591,8 @@ static int skylake_get_display_clock_speed(struct drm_device *dev)
uint32_t cdctl = I915_READ(CDCLK_CTL);
uint32_t linkrate;
- if (!(lcpll1 & LCPLL_PLL_ENABLE)) {
- WARN(1, "LCPLL1 not enabled\n");
+ if (!(lcpll1 & LCPLL_PLL_ENABLE))
return 24000; /* 24MHz is the cd freq with NSSC ref */
- }
if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
return 540000;
@@ -6706,6 +6631,34 @@ static int skylake_get_display_clock_speed(struct drm_device *dev)
return 24000;
}
+static int broxton_get_display_clock_speed(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ uint32_t cdctl = I915_READ(CDCLK_CTL);
+ uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
+ uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
+ int cdclk;
+
+ if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
+ return 19200;
+
+ cdclk = 19200 * pll_ratio / 2;
+
+ switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
+ case BXT_CDCLK_CD2X_DIV_SEL_1:
+ return cdclk; /* 576MHz or 624MHz */
+ case BXT_CDCLK_CD2X_DIV_SEL_1_5:
+ return cdclk * 2 / 3; /* 384MHz */
+ case BXT_CDCLK_CD2X_DIV_SEL_2:
+ return cdclk / 2; /* 288MHz */
+ case BXT_CDCLK_CD2X_DIV_SEL_4:
+ return cdclk / 4; /* 144MHz */
+ }
+
+ /* error case, do as if DE PLL isn't enabled */
+ return 19200;
+}
+
static int broadwell_get_display_clock_speed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6834,20 +6787,37 @@ static int i865_get_display_clock_speed(struct drm_device *dev)
return 266667;
}
-static int i855_get_display_clock_speed(struct drm_device *dev)
+static int i85x_get_display_clock_speed(struct drm_device *dev)
{
u16 hpllcc = 0;
+
+ /*
+ * 852GM/852GMV only supports 133 MHz and the HPLLCC
+ * encoding is different :(
+ * FIXME is this the right way to detect 852GM/852GMV?
+ */
+ if (dev->pdev->revision == 0x1)
+ return 133333;
+
+ pci_bus_read_config_word(dev->pdev->bus,
+ PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
+
/* Assume that the hardware is in the high speed state. This
* should be the default.
*/
switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
case GC_CLOCK_133_200:
+ case GC_CLOCK_133_200_2:
case GC_CLOCK_100_200:
return 200000;
case GC_CLOCK_166_250:
return 250000;
case GC_CLOCK_100_133:
return 133333;
+ case GC_CLOCK_133_266:
+ case GC_CLOCK_133_266_2:
+ case GC_CLOCK_166_266:
+ return 266667;
}
/* Shouldn't happen */
@@ -6859,6 +6829,175 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
return 133333;
}
+static unsigned int intel_hpll_vco(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ static const unsigned int blb_vco[8] = {
+ [0] = 3200000,
+ [1] = 4000000,
+ [2] = 5333333,
+ [3] = 4800000,
+ [4] = 6400000,
+ };
+ static const unsigned int pnv_vco[8] = {
+ [0] = 3200000,
+ [1] = 4000000,
+ [2] = 5333333,
+ [3] = 4800000,
+ [4] = 2666667,
+ };
+ static const unsigned int cl_vco[8] = {
+ [0] = 3200000,
+ [1] = 4000000,
+ [2] = 5333333,
+ [3] = 6400000,
+ [4] = 3333333,
+ [5] = 3566667,
+ [6] = 4266667,
+ };
+ static const unsigned int elk_vco[8] = {
+ [0] = 3200000,
+ [1] = 4000000,
+ [2] = 5333333,
+ [3] = 4800000,
+ };
+ static const unsigned int ctg_vco[8] = {
+ [0] = 3200000,
+ [1] = 4000000,
+ [2] = 5333333,
+ [3] = 6400000,
+ [4] = 2666667,
+ [5] = 4266667,
+ };
+ const unsigned int *vco_table;
+ unsigned int vco;
+ uint8_t tmp = 0;
+
+ /* FIXME other chipsets? */
+ if (IS_GM45(dev))
+ vco_table = ctg_vco;
+ else if (IS_G4X(dev))
+ vco_table = elk_vco;
+ else if (IS_CRESTLINE(dev))
+ vco_table = cl_vco;
+ else if (IS_PINEVIEW(dev))
+ vco_table = pnv_vco;
+ else if (IS_G33(dev))
+ vco_table = blb_vco;
+ else
+ return 0;
+
+ tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
+
+ vco = vco_table[tmp & 0x7];
+ if (vco == 0)
+ DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
+ else
+ DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
+
+ return vco;
+}
+
+static int gm45_get_display_clock_speed(struct drm_device *dev)
+{
+ unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+ uint16_t tmp = 0;
+
+ pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+ cdclk_sel = (tmp >> 12) & 0x1;
+
+ switch (vco) {
+ case 2666667:
+ case 4000000:
+ case 5333333:
+ return cdclk_sel ? 333333 : 222222;
+ case 3200000:
+ return cdclk_sel ? 320000 : 228571;
+ default:
+ DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
+ return 222222;
+ }
+}
+
+static int i965gm_get_display_clock_speed(struct drm_device *dev)
+{
+ static const uint8_t div_3200[] = { 16, 10, 8 };
+ static const uint8_t div_4000[] = { 20, 12, 10 };
+ static const uint8_t div_5333[] = { 24, 16, 14 };
+ const uint8_t *div_table;
+ unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+ uint16_t tmp = 0;
+
+ pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+ cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
+
+ if (cdclk_sel >= ARRAY_SIZE(div_3200))
+ goto fail;
+
+ switch (vco) {
+ case 3200000:
+ div_table = div_3200;
+ break;
+ case 4000000:
+ div_table = div_4000;
+ break;
+ case 5333333:
+ div_table = div_5333;
+ break;
+ default:
+ goto fail;
+ }
+
+ return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
+
+fail:
+ DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
+ return 200000;
+}
+
+static int g33_get_display_clock_speed(struct drm_device *dev)
+{
+ static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
+ static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
+ static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
+ static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
+ const uint8_t *div_table;
+ unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+ uint16_t tmp = 0;
+
+ pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+ cdclk_sel = (tmp >> 4) & 0x7;
+
+ if (cdclk_sel >= ARRAY_SIZE(div_3200))
+ goto fail;
+
+ switch (vco) {
+ case 3200000:
+ div_table = div_3200;
+ break;
+ case 4000000:
+ div_table = div_4000;
+ break;
+ case 4800000:
+ div_table = div_4800;
+ break;
+ case 5333333:
+ div_table = div_5333;
+ break;
+ default:
+ goto fail;
+ }
+
+ return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
+
+fail:
+ DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
+ return 190476;
+}
+
static void
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
{
@@ -7064,8 +7203,8 @@ void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
}
-static void vlv_update_pll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void vlv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
u32 dpll, dpll_md;
@@ -7074,8 +7213,8 @@ static void vlv_update_pll(struct intel_crtc *crtc,
* clock for pipe B, since VGA hotplug / manual detection depends
* on it.
*/
- dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
- DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
+ dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
+ DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
/* We should never disable this, set it here for state tracking */
if (crtc->pipe == PIPE_B)
dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
@@ -7178,11 +7317,11 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
mutex_unlock(&dev_priv->sb_lock);
}
-static void chv_update_pll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void chv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
- pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
- DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
+ pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
DPLL_VCO_ENABLE;
if (crtc->pipe != PIPE_A)
pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
@@ -7318,11 +7457,11 @@ void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
};
if (IS_CHERRYVIEW(dev)) {
- chv_update_pll(crtc, &pipe_config);
+ chv_compute_dpll(crtc, &pipe_config);
chv_prepare_pll(crtc, &pipe_config);
chv_enable_pll(crtc, &pipe_config);
} else {
- vlv_update_pll(crtc, &pipe_config);
+ vlv_compute_dpll(crtc, &pipe_config);
vlv_prepare_pll(crtc, &pipe_config);
vlv_enable_pll(crtc, &pipe_config);
}
@@ -7344,10 +7483,10 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
vlv_disable_pll(to_i915(dev), pipe);
}
-static void i9xx_update_pll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- intel_clock_t *reduced_clock,
- int num_connectors)
+static void i9xx_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ intel_clock_t *reduced_clock,
+ int num_connectors)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7421,10 +7560,10 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
}
}
-static void i8xx_update_pll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- intel_clock_t *reduced_clock,
- int num_connectors)
+static void i8xx_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ intel_clock_t *reduced_clock,
+ int num_connectors)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7584,9 +7723,14 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
mode->flags = pipe_config->base.adjusted_mode.flags;
+ mode->type = DRM_MODE_TYPE_DRIVER;
mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
mode->flags |= pipe_config->base.adjusted_mode.flags;
+
+ mode->hsync = drm_mode_hsync(mode);
+ mode->vrefresh = drm_mode_vrefresh(mode);
+ drm_mode_set_name(mode);
}
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
@@ -7658,9 +7802,9 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int refclk, num_connectors = 0;
- intel_clock_t clock, reduced_clock;
- bool ok, has_reduced_clock = false;
- bool is_lvds = false, is_dsi = false;
+ intel_clock_t clock;
+ bool ok;
+ bool is_dsi = false;
struct intel_encoder *encoder;
const intel_limit_t *limit;
struct drm_atomic_state *state = crtc_state->base.state;
@@ -7678,9 +7822,6 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
encoder = to_intel_encoder(connector_state->best_encoder);
switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
case INTEL_OUTPUT_DSI:
is_dsi = true;
break;
@@ -7712,19 +7853,6 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- if (is_lvds && dev_priv->lvds_downclock_avail) {
- /*
- * Ensure we match the reduced clock's P to the target
- * clock. If the clocks don't match, we can't switch
- * the display clock by using the FP0/FP1. In such case
- * we will disable the LVDS downclock feature.
- */
- has_reduced_clock =
- dev_priv->display.find_dpll(limit, crtc_state,
- dev_priv->lvds_downclock,
- refclk, &clock,
- &reduced_clock);
- }
/* Compat-code for transition, will disappear. */
crtc_state->dpll.n = clock.n;
crtc_state->dpll.m1 = clock.m1;
@@ -7734,17 +7862,15 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
}
if (IS_GEN2(dev)) {
- i8xx_update_pll(crtc, crtc_state,
- has_reduced_clock ? &reduced_clock : NULL,
- num_connectors);
+ i8xx_compute_dpll(crtc, crtc_state, NULL,
+ num_connectors);
} else if (IS_CHERRYVIEW(dev)) {
- chv_update_pll(crtc, crtc_state);
+ chv_compute_dpll(crtc, crtc_state);
} else if (IS_VALLEYVIEW(dev)) {
- vlv_update_pll(crtc, crtc_state);
+ vlv_compute_dpll(crtc, crtc_state);
} else {
- i9xx_update_pll(crtc, crtc_state,
- has_reduced_clock ? &reduced_clock : NULL,
- num_connectors);
+ i9xx_compute_dpll(crtc, crtc_state, NULL,
+ num_connectors);
}
return 0;
@@ -7804,10 +7930,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
- vlv_clock(refclk, &clock);
-
- /* clock.dot is the fast clock */
- pipe_config->port_clock = clock.dot / 5;
+ pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
}
static void
@@ -7887,7 +8010,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
int pipe = pipe_config->cpu_transcoder;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
intel_clock_t clock;
- u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
+ u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
int refclk = 100000;
mutex_lock(&dev_priv->sb_lock);
@@ -7895,18 +8018,18 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
+ pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
mutex_unlock(&dev_priv->sb_lock);
clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
- clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
+ clock.m2 = (pll_dw0 & 0xff) << 22;
+ if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
+ clock.m2 |= pll_dw2 & 0x3fffff;
clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
- chv_clock(refclk, &clock);
-
- /* clock.dot is the fast clock */
- pipe_config->port_clock = clock.dot / 5;
+ pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
}
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
@@ -8555,9 +8678,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
int refclk;
const intel_limit_t *limit;
- bool ret, is_lvds = false;
-
- is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
+ bool ret;
refclk = ironlake_get_refclk(crtc_state);
@@ -8573,20 +8694,6 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
if (!ret)
return false;
- if (is_lvds && dev_priv->lvds_downclock_avail) {
- /*
- * Ensure we match the reduced clock's P to the target clock.
- * If the clocks don't match, we can't switch the display clock
- * by using the FP0/FP1. In such case we will disable the LVDS
- * downclock feature.
- */
- *has_reduced_clock =
- dev_priv->display.find_dpll(limit, crtc_state,
- dev_priv->lvds_downclock,
- refclk, clock,
- reduced_clock);
- }
-
return true;
}
@@ -9294,6 +9401,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
}
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_update_cdclk(dev_priv->dev);
}
/*
@@ -9355,21 +9463,160 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
intel_prepare_ddi(dev);
}
-static void broxton_modeset_global_resources(struct drm_atomic_state *old_state)
+static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
+ unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
+
+ broxton_set_cdclk(dev, req_cdclk);
+}
+
+/* compute the max rate for new configuration */
+static int ilk_max_pixel_rate(struct drm_atomic_state *state)
+{
+ struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *crtc_state;
+ int max_pixel_rate = 0;
+
+ for_each_intel_crtc(state->dev, intel_crtc) {
+ int pixel_rate;
+
+ crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (!crtc_state->base.enable)
+ continue;
+
+ pixel_rate = ilk_pipe_pixel_rate(crtc_state);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled)
+ pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+
+ max_pixel_rate = max(max_pixel_rate, pixel_rate);
+ }
+
+ return max_pixel_rate;
+}
+
+static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
+{
struct drm_i915_private *dev_priv = dev->dev_private;
- int max_pixclk = intel_mode_max_pixclk(dev, NULL);
- int req_cdclk;
+ uint32_t val, data;
+ int ret;
- /* see the comment in valleyview_modeset_global_resources */
- if (WARN_ON(max_pixclk < 0))
+ if (WARN((I915_READ(LCPLL_CTL) &
+ (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
+ LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
+ LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
+ LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
+ "trying to change cdclk frequency with cdclk not enabled\n"))
return;
- req_cdclk = broxton_calc_cdclk(dev_priv, max_pixclk);
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = sandybridge_pcode_write(dev_priv,
+ BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ if (ret) {
+ DRM_ERROR("failed to inform pcode about cdclk change\n");
+ return;
+ }
+
+ val = I915_READ(LCPLL_CTL);
+ val |= LCPLL_CD_SOURCE_FCLK;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ DRM_ERROR("Switching to FCLK failed\n");
+
+ val = I915_READ(LCPLL_CTL);
+ val &= ~LCPLL_CLK_FREQ_MASK;
+
+ switch (cdclk) {
+ case 450000:
+ val |= LCPLL_CLK_FREQ_450;
+ data = 0;
+ break;
+ case 540000:
+ val |= LCPLL_CLK_FREQ_54O_BDW;
+ data = 1;
+ break;
+ case 337500:
+ val |= LCPLL_CLK_FREQ_337_5_BDW;
+ data = 2;
+ break;
+ case 675000:
+ val |= LCPLL_CLK_FREQ_675_BDW;
+ data = 3;
+ break;
+ default:
+ WARN(1, "invalid cdclk frequency\n");
+ return;
+ }
+
+ I915_WRITE(LCPLL_CTL, val);
+
+ val = I915_READ(LCPLL_CTL);
+ val &= ~LCPLL_CD_SOURCE_FCLK;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ DRM_ERROR("Switching back to LCPLL failed\n");
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ intel_update_cdclk(dev);
+
+ WARN(cdclk != dev_priv->cdclk_freq,
+ "cdclk requested %d kHz but got %d kHz\n",
+ cdclk, dev_priv->cdclk_freq);
+}
+
+static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
+ int max_pixclk = ilk_max_pixel_rate(state);
+ int cdclk;
+
+ /*
+ * FIXME should also account for plane ratio
+ * once 64bpp pixel formats are supported.
+ */
+ if (max_pixclk > 540000)
+ cdclk = 675000;
+ else if (max_pixclk > 450000)
+ cdclk = 540000;
+ else if (max_pixclk > 337500)
+ cdclk = 450000;
+ else
+ cdclk = 337500;
+
+ /*
+ * FIXME move the cdclk caclulation to
+ * compute_config() so we can fail gracegully.
+ */
+ if (cdclk > dev_priv->max_cdclk_freq) {
+ DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
+ cdclk, dev_priv->max_cdclk_freq);
+ cdclk = dev_priv->max_cdclk_freq;
+ }
+
+ to_intel_atomic_state(state)->cdclk = cdclk;
+
+ return 0;
+}
+
+static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = old_state->dev;
+ unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
- if (req_cdclk != dev_priv->cdclk_freq)
- broxton_set_cdclk(dev, req_cdclk);
+ broadwell_set_cdclk(dev, req_cdclk);
}
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
@@ -9886,7 +10133,7 @@ static struct drm_framebuffer *
mode_fits_in_fbdev(struct drm_device *dev,
struct drm_display_mode *mode)
{
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_framebuffer *fb;
@@ -9975,7 +10222,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
if (ret)
- goto fail_unlock;
+ goto fail;
/*
* Algorithm gets a little messy:
@@ -9993,10 +10240,10 @@ retry:
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
- goto fail_unlock;
+ goto fail;
ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
if (ret)
- goto fail_unlock;
+ goto fail;
old->dpms_mode = connector->dpms;
old->load_detect_temp = false;
@@ -10015,9 +10262,6 @@ retry:
continue;
if (possible_crtc->state->enable)
continue;
- /* This can occur when applying the pipe A quirk on resume. */
- if (to_intel_crtc(possible_crtc)->new_enabled)
- continue;
crtc = possible_crtc;
break;
@@ -10028,20 +10272,17 @@ retry:
*/
if (!crtc) {
DRM_DEBUG_KMS("no pipe available for load-detect\n");
- goto fail_unlock;
+ goto fail;
}
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
- goto fail_unlock;
+ goto fail;
ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
if (ret)
- goto fail_unlock;
- intel_encoder->new_crtc = to_intel_crtc(crtc);
- to_intel_connector(connector)->new_encoder = intel_encoder;
+ goto fail;
intel_crtc = to_intel_crtc(crtc);
- intel_crtc->new_enabled = true;
old->dpms_mode = connector->dpms;
old->load_detect_temp = true;
old->release_fb = NULL;
@@ -10097,7 +10338,7 @@ retry:
drm_mode_copy(&crtc_state->base.mode, mode);
- if (intel_set_mode(crtc, state, true)) {
+ if (drm_atomic_commit(state)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
@@ -10109,9 +10350,7 @@ retry:
intel_wait_for_vblank(dev, intel_crtc->pipe);
return true;
- fail:
- intel_crtc->new_enabled = crtc->state->enable;
-fail_unlock:
+fail:
drm_atomic_state_free(state);
state = NULL;
@@ -10157,10 +10396,6 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
if (IS_ERR(crtc_state))
goto fail;
- to_intel_connector(connector)->new_encoder = NULL;
- intel_encoder->new_crtc = NULL;
- intel_crtc->new_enabled = false;
-
connector_state->best_encoder = NULL;
connector_state->crtc = NULL;
@@ -10171,7 +10406,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
if (ret)
goto fail;
- ret = intel_set_mode(crtc, state, true);
+ ret = drm_atomic_commit(state);
if (ret)
goto fail;
@@ -10219,6 +10454,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
u32 dpll = pipe_config->dpll_hw_state.dpll;
u32 fp;
intel_clock_t clock;
+ int port_clock;
int refclk = i9xx_pll_refclk(dev, pipe_config);
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
@@ -10259,9 +10495,9 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
}
if (IS_PINEVIEW(dev))
- pineview_clock(refclk, &clock);
+ port_clock = pnv_calc_dpll_params(refclk, &clock);
else
- i9xx_clock(refclk, &clock);
+ port_clock = i9xx_calc_dpll_params(refclk, &clock);
} else {
u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
@@ -10287,7 +10523,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
clock.p2 = 2;
}
- i9xx_clock(refclk, &clock);
+ port_clock = i9xx_calc_dpll_params(refclk, &clock);
}
/*
@@ -10295,7 +10531,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
* port_clock to compute adjusted_mode.crtc_clock in the
* encoder's get_config() function.
*/
- pipe_config->port_clock = clock.dot;
+ pipe_config->port_clock = port_clock;
}
int intel_dotclock_calculate(int link_freq,
@@ -10384,42 +10620,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-static void intel_decrease_pllclock(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- if (!HAS_GMCH_DISPLAY(dev))
- return;
-
- if (!dev_priv->lvds_downclock_avail)
- return;
-
- /*
- * Since this is called by a timer, we should never get here in
- * the manual case.
- */
- if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
- int pipe = intel_crtc->pipe;
- int dpll_reg = DPLL(pipe);
- int dpll;
-
- DRM_DEBUG_DRIVER("downclocking LVDS\n");
-
- assert_panel_unlocked(dev_priv, pipe);
-
- dpll = I915_READ(dpll_reg);
- dpll |= DISPLAY_RATE_SELECT_FPA1;
- I915_WRITE(dpll_reg, dpll);
- intel_wait_for_vblank(dev, pipe);
- dpll = I915_READ(dpll_reg);
- if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
- DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
- }
-
-}
-
void intel_mark_busy(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10437,20 +10637,12 @@ void intel_mark_busy(struct drm_device *dev)
void intel_mark_idle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
if (!dev_priv->mm.busy)
return;
dev_priv->mm.busy = false;
- for_each_crtc(dev, crtc) {
- if (!crtc->primary->fb)
- continue;
-
- intel_decrease_pllclock(crtc);
- }
-
if (INTEL_INFO(dev)->gen >= 6)
gen6_rps_idle(dev->dev_private);
@@ -10482,24 +10674,23 @@ static void intel_unpin_work_fn(struct work_struct *__work)
{
struct intel_unpin_work *work =
container_of(__work, struct intel_unpin_work, work);
- struct drm_device *dev = work->crtc->dev;
- enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(work->crtc);
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_plane *primary = crtc->base.primary;
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(work->old_fb, work->crtc->primary->state);
+ intel_unpin_fb_obj(work->old_fb, primary->state);
drm_gem_object_unreference(&work->pending_flip_obj->base);
- intel_fbc_update(dev);
-
if (work->flip_queued_req)
i915_gem_request_assign(&work->flip_queued_req, NULL);
mutex_unlock(&dev->struct_mutex);
- intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+ intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
drm_framebuffer_unreference(work->old_fb);
- BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
- atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
+ BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
+ atomic_dec(&crtc->unpin_work_count);
kfree(work);
}
@@ -10632,14 +10823,15 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
+ struct drm_i915_gem_request *req,
uint32_t flags)
{
+ struct intel_engine_cs *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(req, 6);
if (ret)
return ret;
@@ -10659,7 +10851,6 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, 0); /* aux display base address, unused */
intel_mark_page_flip_active(intel_crtc);
- __intel_ring_advance(ring);
return 0;
}
@@ -10667,14 +10858,15 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
+ struct drm_i915_gem_request *req,
uint32_t flags)
{
+ struct intel_engine_cs *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(req, 6);
if (ret)
return ret;
@@ -10691,7 +10883,6 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_NOOP);
intel_mark_page_flip_active(intel_crtc);
- __intel_ring_advance(ring);
return 0;
}
@@ -10699,15 +10890,16 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
+ struct drm_i915_gem_request *req,
uint32_t flags)
{
+ struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
int ret;
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -10730,7 +10922,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc);
- __intel_ring_advance(ring);
return 0;
}
@@ -10738,15 +10929,16 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
+ struct drm_i915_gem_request *req,
uint32_t flags)
{
+ struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
int ret;
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -10766,7 +10958,6 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc);
- __intel_ring_advance(ring);
return 0;
}
@@ -10774,9 +10965,10 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
+ struct drm_i915_gem_request *req,
uint32_t flags)
{
+ struct intel_engine_cs *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t plane_bit = 0;
int len, ret;
@@ -10818,11 +11010,11 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
* then do the cacheline alignment, and finally emit the
* MI_DISPLAY_FLIP.
*/
- ret = intel_ring_cacheline_align(ring);
+ ret = intel_ring_cacheline_align(req);
if (ret)
return ret;
- ret = intel_ring_begin(ring, len);
+ ret = intel_ring_begin(req, len);
if (ret)
return ret;
@@ -10861,7 +11053,6 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, (MI_NOOP));
intel_mark_page_flip_active(intel_crtc);
- __intel_ring_advance(ring);
return 0;
}
@@ -10970,12 +11161,11 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc)
static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
- bool atomic_update;
u32 start_vbl_count;
intel_mark_page_flip_active(intel_crtc);
- atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+ intel_pipe_update_start(intel_crtc, &start_vbl_count);
if (INTEL_INFO(dev)->gen >= 9)
skl_do_mmio_flip(intel_crtc);
@@ -10983,8 +11173,7 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
/* use_mmio_flip() retricts MMIO flips to ilk+ */
ilk_do_mmio_flip(intel_crtc);
- if (atomic_update)
- intel_pipe_update_end(intel_crtc, start_vbl_count);
+ intel_pipe_update_end(intel_crtc, start_vbl_count);
}
static void intel_mmio_flip_work_func(struct work_struct *work)
@@ -11031,7 +11220,7 @@ static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
+ struct drm_i915_gem_request *req,
uint32_t flags)
{
return -ENODEV;
@@ -11117,6 +11306,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct intel_unpin_work *work;
struct intel_engine_cs *ring;
bool mmio_flip;
+ struct drm_i915_gem_request *request = NULL;
int ret;
/*
@@ -11223,7 +11413,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
*/
ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
crtc->primary->state,
- mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring);
+ mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request);
if (ret)
goto cleanup_pending;
@@ -11239,31 +11429,34 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
i915_gem_request_assign(&work->flip_queued_req,
obj->last_write_req);
} else {
- if (obj->last_write_req) {
- ret = i915_gem_check_olr(obj->last_write_req);
+ if (!request) {
+ ret = i915_gem_request_alloc(ring, ring->default_context, &request);
if (ret)
goto cleanup_unpin;
}
- ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
page_flip_flags);
if (ret)
goto cleanup_unpin;
- i915_gem_request_assign(&work->flip_queued_req,
- intel_ring_get_request(ring));
+ i915_gem_request_assign(&work->flip_queued_req, request);
}
+ if (request)
+ i915_add_request_no_flush(request);
+
work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
work->enable_stall_check = true;
i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
- INTEL_FRONTBUFFER_PRIMARY(pipe));
-
- intel_fbc_disable(dev);
- intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+ to_intel_plane(primary)->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);
+ intel_fbc_disable_crtc(intel_crtc);
+ intel_frontbuffer_flip_prepare(dev,
+ to_intel_plane(primary)->frontbuffer_bit);
+
trace_i915_flip_request(intel_crtc->plane, obj);
return 0;
@@ -11271,6 +11464,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_unpin:
intel_unpin_fb_obj(fb, crtc->primary->state);
cleanup_pending:
+ if (request)
+ i915_gem_request_cancel(request);
atomic_dec(&intel_crtc->unpin_work_count);
mutex_unlock(&dev->struct_mutex);
cleanup:
@@ -11289,8 +11484,35 @@ free_work:
kfree(work);
if (ret == -EIO) {
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+
out_hang:
- ret = intel_plane_restore(primary);
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+
+retry:
+ plane_state = drm_atomic_get_plane_state(state, primary);
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (!ret) {
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+ if (!ret)
+ ret = drm_atomic_commit(state);
+ }
+
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(state->acquire_ctx);
+ drm_atomic_state_clear(state);
+ goto retry;
+ }
+
+ if (ret)
+ drm_atomic_state_free(state);
+
if (ret == 0 && event) {
spin_lock_irq(&dev->event_lock);
drm_send_vblank_event(dev, pipe, event);
@@ -11300,44 +11522,274 @@ out_hang:
return ret;
}
-static const struct drm_crtc_helper_funcs intel_helper_funcs = {
- .mode_set_base_atomic = intel_pipe_set_base_atomic,
- .load_lut = intel_crtc_load_lut,
- .atomic_begin = intel_begin_crtc_commit,
- .atomic_flush = intel_finish_crtc_commit,
-};
/**
- * intel_modeset_update_staged_output_state
+ * intel_wm_need_update - Check whether watermarks need updating
+ * @plane: drm plane
+ * @state: new plane state
+ *
+ * Check current plane state versus the new one to determine whether
+ * watermarks need to be recalculated.
*
- * Updates the staged output configuration state, e.g. after we've read out the
- * current hw state.
+ * Returns true or false.
*/
-static void intel_modeset_update_staged_output_state(struct drm_device *dev)
+static bool intel_wm_need_update(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ /* Update watermarks on tiling changes. */
+ if (!plane->state->fb || !state->fb ||
+ plane->state->fb->modifier[0] != state->fb->modifier[0] ||
+ plane->state->rotation != state->rotation)
+ return true;
+
+ if (plane->state->crtc_w != state->crtc_w)
+ return true;
+
+ return false;
+}
+
+int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_crtc *crtc = crtc_state->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_plane *plane = plane_state->plane;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane_state *old_plane_state =
+ to_intel_plane_state(plane->state);
+ int idx = intel_crtc->base.base.id, ret;
+ int i = drm_plane_index(plane);
+ bool mode_changed = needs_modeset(crtc_state);
+ bool was_crtc_enabled = crtc->state->active;
+ bool is_crtc_enabled = crtc_state->active;
+
+ bool turn_off, turn_on, visible, was_visible;
+ struct drm_framebuffer *fb = plane_state->fb;
+
+ if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
+ plane->type != DRM_PLANE_TYPE_CURSOR) {
+ ret = skl_update_scaler_plane(
+ to_intel_crtc_state(crtc_state),
+ to_intel_plane_state(plane_state));
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Disabling a plane is always okay; we just need to update
+ * fb tracking in a special way since cleanup_fb() won't
+ * get called by the plane helpers.
+ */
+ if (old_plane_state->base.fb && !fb)
+ intel_crtc->atomic.disabled_planes |= 1 << i;
+
+ was_visible = old_plane_state->visible;
+ visible = to_intel_plane_state(plane_state)->visible;
+
+ if (!was_crtc_enabled && WARN_ON(was_visible))
+ was_visible = false;
+
+ if (!is_crtc_enabled && WARN_ON(visible))
+ visible = false;
+
+ if (!was_visible && !visible)
+ return 0;
+
+ turn_off = was_visible && (!visible || mode_changed);
+ turn_on = visible && (!was_visible || mode_changed);
+
+ DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
+ plane->base.id, fb ? fb->base.id : -1);
+
+ DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
+ plane->base.id, was_visible, visible,
+ turn_off, turn_on, mode_changed);
+
+ if (turn_on) {
+ intel_crtc->atomic.update_wm_pre = true;
+ /* must disable cxsr around plane enable/disable */
+ if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+ intel_crtc->atomic.disable_cxsr = true;
+ /* to potentially re-enable cxsr */
+ intel_crtc->atomic.wait_vblank = true;
+ intel_crtc->atomic.update_wm_post = true;
+ }
+ } else if (turn_off) {
+ intel_crtc->atomic.update_wm_post = true;
+ /* must disable cxsr around plane enable/disable */
+ if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+ if (is_crtc_enabled)
+ intel_crtc->atomic.wait_vblank = true;
+ intel_crtc->atomic.disable_cxsr = true;
+ }
+ } else if (intel_wm_need_update(plane, plane_state)) {
+ intel_crtc->atomic.update_wm_pre = true;
+ }
+
+ if (visible)
+ intel_crtc->atomic.fb_bits |=
+ to_intel_plane(plane)->frontbuffer_bit;
+
+ switch (plane->type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ intel_crtc->atomic.wait_for_flips = true;
+ intel_crtc->atomic.pre_disable_primary = turn_off;
+ intel_crtc->atomic.post_enable_primary = turn_on;
+
+ if (turn_off) {
+ /*
+ * FIXME: Actually if we will still have any other
+ * plane enabled on the pipe we could let IPS enabled
+ * still, but for now lets consider that when we make
+ * primary invisible by setting DSPCNTR to 0 on
+ * update_primary_plane function IPS needs to be
+ * disable.
+ */
+ intel_crtc->atomic.disable_ips = true;
+
+ intel_crtc->atomic.disable_fbc = true;
+ }
+
+ /*
+ * FBC does not work on some platforms for rotated
+ * planes, so disable it when rotation is not 0 and
+ * update it when rotation is set back to 0.
+ *
+ * FIXME: This is redundant with the fbc update done in
+ * the primary plane enable function except that that
+ * one is done too late. We eventually need to unify
+ * this.
+ */
+
+ if (visible &&
+ INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+ dev_priv->fbc.crtc == intel_crtc &&
+ plane_state->rotation != BIT(DRM_ROTATE_0))
+ intel_crtc->atomic.disable_fbc = true;
+
+ /*
+ * BDW signals flip done immediately if the plane
+ * is disabled, even if the plane enable is already
+ * armed to occur at the next vblank :(
+ */
+ if (turn_on && IS_BROADWELL(dev))
+ intel_crtc->atomic.wait_vblank = true;
+
+ intel_crtc->atomic.update_fbc |= visible || mode_changed;
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ if (turn_off && !mode_changed) {
+ intel_crtc->atomic.wait_vblank = true;
+ intel_crtc->atomic.update_sprite_watermarks |=
+ 1 << i;
+ }
+ }
+ return 0;
+}
+
+static bool encoders_cloneable(const struct intel_encoder *a,
+ const struct intel_encoder *b)
+{
+ /* masks could be asymmetric, so check both ways */
+ return a == b || (a->cloneable & (1 << b->type) &&
+ b->cloneable & (1 << a->type));
+}
+
+static bool check_single_encoder_cloning(struct drm_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_encoder *source_encoder;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ int i;
+
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->crtc != &crtc->base)
+ continue;
+
+ source_encoder =
+ to_intel_encoder(connector_state->best_encoder);
+ if (!encoders_cloneable(encoder, source_encoder))
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_encoder_cloning(struct drm_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc;
struct intel_encoder *encoder;
- struct intel_connector *connector;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ int i;
- for_each_intel_connector(dev, connector) {
- connector->new_encoder =
- to_intel_encoder(connector->base.encoder);
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->crtc != &crtc->base)
+ continue;
+
+ encoder = to_intel_encoder(connector_state->best_encoder);
+ if (!check_single_encoder_cloning(state, crtc, encoder))
+ return false;
}
- for_each_intel_encoder(dev, encoder) {
- encoder->new_crtc =
- to_intel_crtc(encoder->base.crtc);
+ return true;
+}
+
+static int intel_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *pipe_config =
+ to_intel_crtc_state(crtc_state);
+ struct drm_atomic_state *state = crtc_state->state;
+ int ret;
+ bool mode_changed = needs_modeset(crtc_state);
+
+ if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
+ DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
+ return -EINVAL;
}
- for_each_intel_crtc(dev, crtc) {
- crtc->new_enabled = crtc->base.state->enable;
+ if (mode_changed && !crtc_state->active)
+ intel_crtc->atomic.update_wm_post = true;
+
+ if (mode_changed && crtc_state->enable &&
+ dev_priv->display.crtc_compute_clock &&
+ !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
+ ret = dev_priv->display.crtc_compute_clock(intel_crtc,
+ pipe_config);
+ if (ret)
+ return ret;
+ }
+
+ ret = 0;
+ if (INTEL_INFO(dev)->gen >= 9) {
+ if (mode_changed)
+ ret = skl_update_scaler_crtc(pipe_config);
+
+ if (!ret)
+ ret = intel_atomic_setup_scalers(dev, intel_crtc,
+ pipe_config);
}
+
+ return ret;
}
-/* Transitional helper to copy current connector/encoder state to
- * connector->state. This is needed so that code that is partially
- * converted to atomic does the right thing.
- */
+static const struct drm_crtc_helper_funcs intel_helper_funcs = {
+ .mode_set_base_atomic = intel_pipe_set_base_atomic,
+ .load_lut = intel_crtc_load_lut,
+ .atomic_begin = intel_begin_crtc_commit,
+ .atomic_flush = intel_finish_crtc_commit,
+ .atomic_check = intel_crtc_atomic_check,
+};
+
static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
{
struct intel_connector *connector;
@@ -11355,39 +11807,6 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
}
}
-/* Fixup legacy state after an atomic state swap.
- */
-static void intel_modeset_fixup_state(struct drm_atomic_state *state)
-{
- struct intel_crtc *crtc;
- struct intel_encoder *encoder;
- struct intel_connector *connector;
-
- for_each_intel_connector(state->dev, connector) {
- connector->base.encoder = connector->base.state->best_encoder;
- if (connector->base.encoder)
- connector->base.encoder->crtc =
- connector->base.state->crtc;
- }
-
- /* Update crtc of disabled encoders */
- for_each_intel_encoder(state->dev, encoder) {
- int num_connectors = 0;
-
- for_each_intel_connector(state->dev, connector)
- if (connector->base.encoder == &encoder->base)
- num_connectors++;
-
- if (num_connectors == 0)
- encoder->base.crtc = NULL;
- }
-
- for_each_intel_crtc(state->dev, crtc) {
- crtc->base.enabled = crtc->base.state->enable;
- crtc->config = to_intel_crtc_state(crtc->base.state);
- }
-}
-
static void
connected_sink_compute_bpp(struct intel_connector *connector,
struct intel_crtc_state *pipe_config)
@@ -11523,17 +11942,20 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
if (IS_BROXTON(dev)) {
- DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, "
+ DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
"pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
- "pll6: 0x%x, pll8: 0x%x, pcsdw12: 0x%x\n",
+ "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
pipe_config->ddi_pll_sel,
pipe_config->dpll_hw_state.ebb0,
+ pipe_config->dpll_hw_state.ebb4,
pipe_config->dpll_hw_state.pll0,
pipe_config->dpll_hw_state.pll1,
pipe_config->dpll_hw_state.pll2,
pipe_config->dpll_hw_state.pll3,
pipe_config->dpll_hw_state.pll6,
pipe_config->dpll_hw_state.pll8,
+ pipe_config->dpll_hw_state.pll9,
+ pipe_config->dpll_hw_state.pll10,
pipe_config->dpll_hw_state.pcsdw12);
} else if (IS_SKYLAKE(dev)) {
DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
@@ -11590,56 +12012,6 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
}
}
-static bool encoders_cloneable(const struct intel_encoder *a,
- const struct intel_encoder *b)
-{
- /* masks could be asymmetric, so check both ways */
- return a == b || (a->cloneable & (1 << b->type) &&
- b->cloneable & (1 << a->type));
-}
-
-static bool check_single_encoder_cloning(struct drm_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
-{
- struct intel_encoder *source_encoder;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- int i;
-
- for_each_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != &crtc->base)
- continue;
-
- source_encoder =
- to_intel_encoder(connector_state->best_encoder);
- if (!encoders_cloneable(encoder, source_encoder))
- return false;
- }
-
- return true;
-}
-
-static bool check_encoder_cloning(struct drm_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_encoder *encoder;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- int i;
-
- for_each_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != &crtc->base)
- continue;
-
- encoder = to_intel_encoder(connector_state->best_encoder);
- if (!check_single_encoder_cloning(state, crtc, encoder))
- return false;
- }
-
- return true;
-}
-
static bool check_digital_port_conflicts(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
@@ -11693,6 +12065,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
struct intel_dpll_hw_state dpll_hw_state;
enum intel_dpll_id shared_dpll;
uint32_t ddi_pll_sel;
+ bool force_thru;
/* FIXME: before the switch to atomic started, a new pipe_config was
* kzalloc'd. Code that depends on any field being zero should be
@@ -11704,6 +12077,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
shared_dpll = crtc_state->shared_dpll;
dpll_hw_state = crtc_state->dpll_hw_state;
ddi_pll_sel = crtc_state->ddi_pll_sel;
+ force_thru = crtc_state->pch_pfit.force_thru;
memset(crtc_state, 0, sizeof *crtc_state);
@@ -11712,13 +12086,14 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
crtc_state->shared_dpll = shared_dpll;
crtc_state->dpll_hw_state = dpll_hw_state;
crtc_state->ddi_pll_sel = ddi_pll_sel;
+ crtc_state->pch_pfit.force_thru = force_thru;
}
static int
intel_modeset_pipe_config(struct drm_crtc *crtc,
- struct drm_atomic_state *state,
struct intel_crtc_state *pipe_config)
{
+ struct drm_atomic_state *state = pipe_config->base.state;
struct intel_encoder *encoder;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
@@ -11726,16 +12101,6 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
int i;
bool retry = true;
- if (!check_encoder_cloning(state, to_intel_crtc(crtc))) {
- DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
- return -EINVAL;
- }
-
- if (!check_digital_port_conflicts(state)) {
- DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
- return -EINVAL;
- }
-
clear_intel_crtc_state(pipe_config);
pipe_config->cpu_transcoder =
@@ -11823,94 +12188,33 @@ encoder_retry:
goto encoder_retry;
}
- pipe_config->dither = pipe_config->pipe_bpp != base_bpp;
+ /* Dithering seems to not pass-through bits correctly when it should, so
+ * only enable it on 6bpc panels. */
+ pipe_config->dither = pipe_config->pipe_bpp == 6*3;
DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
- return 0;
fail:
return ret;
}
-static bool intel_crtc_in_use(struct drm_crtc *crtc)
-{
- struct drm_encoder *encoder;
- struct drm_device *dev = crtc->dev;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
- if (encoder->crtc == crtc)
- return true;
-
- return false;
-}
-
-static bool
-needs_modeset(struct drm_crtc_state *state)
-{
- return state->mode_changed || state->active_changed;
-}
-
static void
-intel_modeset_update_state(struct drm_atomic_state *state)
+intel_modeset_update_crtc_state(struct drm_atomic_state *state)
{
- struct drm_device *dev = state->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- struct drm_connector *connector;
int i;
- intel_shared_dpll_commit(dev_priv);
-
- for_each_intel_encoder(dev, intel_encoder) {
- if (!intel_encoder->base.crtc)
- continue;
-
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (crtc != intel_encoder->base.crtc)
- continue;
-
- if (crtc_state->enable && needs_modeset(crtc_state))
- intel_encoder->connectors_active = false;
-
- break;
- }
- }
-
- drm_atomic_helper_swap_state(state->dev, state);
- intel_modeset_fixup_state(state);
-
/* Double check state. */
- for_each_crtc(dev, crtc) {
- WARN_ON(crtc->state->enable != intel_crtc_in_use(crtc));
- }
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (!connector->encoder || !connector->encoder->crtc)
- continue;
-
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (crtc != connector->encoder->crtc)
- continue;
-
- if (crtc->state->enable && needs_modeset(crtc->state)) {
- struct drm_property *dpms_property =
- dev->mode_config.dpms_property;
-
- connector->dpms = DRM_MODE_DPMS_ON;
- drm_object_property_set_value(&connector->base,
- dpms_property,
- DRM_MODE_DPMS_ON);
-
- intel_encoder = to_intel_encoder(connector->encoder);
- intel_encoder->connectors_active = true;
- }
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
- break;
- }
+ /* Update hwmode for vblank functions */
+ if (crtc->state->active)
+ crtc->hwmode = crtc->state->adjusted_mode;
+ else
+ crtc->hwmode.crtc_clock = 0;
}
-
}
static bool intel_fuzzy_clock_check(int clock1, int clock2)
@@ -11937,27 +12241,133 @@ static bool intel_fuzzy_clock_check(int clock1, int clock2)
base.head) \
if (mask & (1 <<(intel_crtc)->pipe))
+
+static bool
+intel_compare_m_n(unsigned int m, unsigned int n,
+ unsigned int m2, unsigned int n2,
+ bool exact)
+{
+ if (m == m2 && n == n2)
+ return true;
+
+ if (exact || !m || !n || !m2 || !n2)
+ return false;
+
+ BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
+
+ if (m > m2) {
+ while (m > m2) {
+ m2 <<= 1;
+ n2 <<= 1;
+ }
+ } else if (m < m2) {
+ while (m < m2) {
+ m <<= 1;
+ n <<= 1;
+ }
+ }
+
+ return m == m2 && n == n2;
+}
+
+static bool
+intel_compare_link_m_n(const struct intel_link_m_n *m_n,
+ struct intel_link_m_n *m2_n2,
+ bool adjust)
+{
+ if (m_n->tu == m2_n2->tu &&
+ intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
+ m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
+ intel_compare_m_n(m_n->link_m, m_n->link_n,
+ m2_n2->link_m, m2_n2->link_n, !adjust)) {
+ if (adjust)
+ *m2_n2 = *m_n;
+
+ return true;
+ }
+
+ return false;
+}
+
static bool
intel_pipe_config_compare(struct drm_device *dev,
struct intel_crtc_state *current_config,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ bool adjust)
{
+ bool ret = true;
+
+#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
+ do { \
+ if (!adjust) \
+ DRM_ERROR(fmt, ##__VA_ARGS__); \
+ else \
+ DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
+ } while (0)
+
#define PIPE_CONF_CHECK_X(name) \
if (current_config->name != pipe_config->name) { \
- DRM_ERROR("mismatch in " #name " " \
+ INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
"(expected 0x%08x, found 0x%08x)\n", \
current_config->name, \
pipe_config->name); \
- return false; \
+ ret = false; \
}
#define PIPE_CONF_CHECK_I(name) \
if (current_config->name != pipe_config->name) { \
- DRM_ERROR("mismatch in " #name " " \
+ INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
"(expected %i, found %i)\n", \
current_config->name, \
pipe_config->name); \
- return false; \
+ ret = false; \
+ }
+
+#define PIPE_CONF_CHECK_M_N(name) \
+ if (!intel_compare_link_m_n(&current_config->name, \
+ &pipe_config->name,\
+ adjust)) { \
+ INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+ "(expected tu %i gmch %i/%i link %i/%i, " \
+ "found tu %i, gmch %i/%i link %i/%i)\n", \
+ current_config->name.tu, \
+ current_config->name.gmch_m, \
+ current_config->name.gmch_n, \
+ current_config->name.link_m, \
+ current_config->name.link_n, \
+ pipe_config->name.tu, \
+ pipe_config->name.gmch_m, \
+ pipe_config->name.gmch_n, \
+ pipe_config->name.link_m, \
+ pipe_config->name.link_n); \
+ ret = false; \
+ }
+
+#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
+ if (!intel_compare_link_m_n(&current_config->name, \
+ &pipe_config->name, adjust) && \
+ !intel_compare_link_m_n(&current_config->alt_name, \
+ &pipe_config->name, adjust)) { \
+ INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+ "(expected tu %i gmch %i/%i link %i/%i, " \
+ "or tu %i gmch %i/%i link %i/%i, " \
+ "found tu %i, gmch %i/%i link %i/%i)\n", \
+ current_config->name.tu, \
+ current_config->name.gmch_m, \
+ current_config->name.gmch_n, \
+ current_config->name.link_m, \
+ current_config->name.link_n, \
+ current_config->alt_name.tu, \
+ current_config->alt_name.gmch_m, \
+ current_config->alt_name.gmch_n, \
+ current_config->alt_name.link_m, \
+ current_config->alt_name.link_n, \
+ pipe_config->name.tu, \
+ pipe_config->name.gmch_m, \
+ pipe_config->name.gmch_n, \
+ pipe_config->name.link_m, \
+ pipe_config->name.link_n); \
+ ret = false; \
}
/* This is required for BDW+ where there is only one set of registers for
@@ -11968,30 +12378,30 @@ intel_pipe_config_compare(struct drm_device *dev,
#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
if ((current_config->name != pipe_config->name) && \
(current_config->alt_name != pipe_config->name)) { \
- DRM_ERROR("mismatch in " #name " " \
+ INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
"(expected %i or %i, found %i)\n", \
current_config->name, \
current_config->alt_name, \
pipe_config->name); \
- return false; \
+ ret = false; \
}
#define PIPE_CONF_CHECK_FLAGS(name, mask) \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
- DRM_ERROR("mismatch in " #name "(" #mask ") " \
+ INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
"(expected %i, found %i)\n", \
current_config->name & (mask), \
pipe_config->name & (mask)); \
- return false; \
+ ret = false; \
}
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
- DRM_ERROR("mismatch in " #name " " \
+ INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
"(expected %i, found %i)\n", \
current_config->name, \
pipe_config->name); \
- return false; \
+ ret = false; \
}
#define PIPE_CONF_QUIRK(quirk) \
@@ -12001,35 +12411,18 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(has_pch_encoder);
PIPE_CONF_CHECK_I(fdi_lanes);
- PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
- PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
- PIPE_CONF_CHECK_I(fdi_m_n.link_m);
- PIPE_CONF_CHECK_I(fdi_m_n.link_n);
- PIPE_CONF_CHECK_I(fdi_m_n.tu);
+ PIPE_CONF_CHECK_M_N(fdi_m_n);
PIPE_CONF_CHECK_I(has_dp_encoder);
if (INTEL_INFO(dev)->gen < 8) {
- PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
- PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
- PIPE_CONF_CHECK_I(dp_m_n.link_m);
- PIPE_CONF_CHECK_I(dp_m_n.link_n);
- PIPE_CONF_CHECK_I(dp_m_n.tu);
-
- if (current_config->has_drrs) {
- PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m);
- PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n);
- PIPE_CONF_CHECK_I(dp_m2_n2.link_m);
- PIPE_CONF_CHECK_I(dp_m2_n2.link_n);
- PIPE_CONF_CHECK_I(dp_m2_n2.tu);
- }
- } else {
- PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m);
- PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n);
- PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m);
- PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n);
- PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu);
- }
+ PIPE_CONF_CHECK_M_N(dp_m_n);
+
+ PIPE_CONF_CHECK_I(has_drrs);
+ if (current_config->has_drrs)
+ PIPE_CONF_CHECK_M_N(dp_m2_n2);
+ } else
+ PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
@@ -12071,21 +12464,11 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h);
- /*
- * FIXME: BIOS likes to set up a cloned config with lvds+external
- * screen. Since we don't yet re-compute the pipe config when moving
- * just the lvds port away to another pipe the sw tracking won't match.
- *
- * Proper atomic modesets with recomputed global state will fix this.
- * Until then just don't check gmch state for inherited modes.
- */
- if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
- PIPE_CONF_CHECK_I(gmch_pfit.control);
- /* pfit ratios are autocomputed by the hw on gen4+ */
- if (INTEL_INFO(dev)->gen < 4)
- PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
- PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
- }
+ PIPE_CONF_CHECK_I(gmch_pfit.control);
+ /* pfit ratios are autocomputed by the hw on gen4+ */
+ if (INTEL_INFO(dev)->gen < 4)
+ PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+ PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
PIPE_CONF_CHECK_I(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) {
@@ -12125,8 +12508,9 @@ intel_pipe_config_compare(struct drm_device *dev,
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
#undef PIPE_CONF_QUIRK
+#undef INTEL_ERR_OR_DBG_KMS
- return true;
+ return ret;
}
static void check_wm_state(struct drm_device *dev)
@@ -12180,17 +12564,23 @@ static void check_wm_state(struct drm_device *dev)
}
static void
-check_connector_state(struct drm_device *dev)
+check_connector_state(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
{
- struct intel_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector *connector;
+ int i;
+
+ for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_connector_state *state = connector->state;
- for_each_intel_connector(dev, connector) {
/* This also checks the encoder/connector hw state with the
* ->get_hw_state callbacks. */
- intel_connector_check_state(connector);
+ intel_connector_check_state(to_intel_connector(connector));
- I915_STATE_WARN(&connector->new_encoder->base != connector->base.encoder,
- "connector's staged encoder doesn't match current encoder\n");
+ I915_STATE_WARN(state->best_encoder != encoder,
+ "connector's atomic encoder doesn't match legacy encoder\n");
}
}
@@ -12202,124 +12592,106 @@ check_encoder_state(struct drm_device *dev)
for_each_intel_encoder(dev, encoder) {
bool enabled = false;
- bool active = false;
- enum pipe pipe, tracked_pipe;
+ enum pipe pipe;
DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
encoder->base.base.id,
encoder->base.name);
- I915_STATE_WARN(&encoder->new_crtc->base != encoder->base.crtc,
- "encoder's stage crtc doesn't match current crtc\n");
- I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc,
- "encoder's active_connectors set, but no crtc\n");
-
for_each_intel_connector(dev, connector) {
- if (connector->base.encoder != &encoder->base)
+ if (connector->base.state->best_encoder != &encoder->base)
continue;
enabled = true;
- if (connector->base.dpms != DRM_MODE_DPMS_OFF)
- active = true;
+
+ I915_STATE_WARN(connector->base.state->crtc !=
+ encoder->base.crtc,
+ "connector's crtc doesn't match encoder crtc\n");
}
- /*
- * for MST connectors if we unplug the connector is gone
- * away but the encoder is still connected to a crtc
- * until a modeset happens in response to the hotplug.
- */
- if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
- continue;
I915_STATE_WARN(!!encoder->base.crtc != enabled,
"encoder's enabled state mismatch "
"(expected %i, found %i)\n",
!!encoder->base.crtc, enabled);
- I915_STATE_WARN(active && !encoder->base.crtc,
- "active encoder with no crtc\n");
-
- I915_STATE_WARN(encoder->connectors_active != active,
- "encoder's computed active state doesn't match tracked active state "
- "(expected %i, found %i)\n", active, encoder->connectors_active);
-
- active = encoder->get_hw_state(encoder, &pipe);
- I915_STATE_WARN(active != encoder->connectors_active,
- "encoder's hw state doesn't match sw tracking "
- "(expected %i, found %i)\n",
- encoder->connectors_active, active);
-
- if (!encoder->base.crtc)
- continue;
- tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
- I915_STATE_WARN(active && pipe != tracked_pipe,
- "active encoder's pipe doesn't match"
- "(expected %i, found %i)\n",
- tracked_pipe, pipe);
+ if (!encoder->base.crtc) {
+ bool active;
+ active = encoder->get_hw_state(encoder, &pipe);
+ I915_STATE_WARN(active,
+ "encoder detached but still enabled on pipe %c.\n",
+ pipe_name(pipe));
+ }
}
}
static void
-check_crtc_state(struct drm_device *dev)
+check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *crtc;
struct intel_encoder *encoder;
- struct intel_crtc_state pipe_config;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_crtc *crtc;
+ int i;
- for_each_intel_crtc(dev, crtc) {
- bool enabled = false;
- bool active = false;
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *pipe_config, *sw_config;
+ bool active;
- memset(&pipe_config, 0, sizeof(pipe_config));
+ if (!needs_modeset(crtc->state))
+ continue;
- DRM_DEBUG_KMS("[CRTC:%d]\n",
- crtc->base.base.id);
+ __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
+ pipe_config = to_intel_crtc_state(old_crtc_state);
+ memset(pipe_config, 0, sizeof(*pipe_config));
+ pipe_config->base.crtc = crtc;
+ pipe_config->base.state = old_state;
- I915_STATE_WARN(crtc->active && !crtc->base.state->enable,
- "active crtc, but not enabled in sw tracking\n");
+ DRM_DEBUG_KMS("[CRTC:%d]\n",
+ crtc->base.id);
- for_each_intel_encoder(dev, encoder) {
- if (encoder->base.crtc != &crtc->base)
- continue;
- enabled = true;
- if (encoder->connectors_active)
- active = true;
- }
+ active = dev_priv->display.get_pipe_config(intel_crtc,
+ pipe_config);
- I915_STATE_WARN(active != crtc->active,
- "crtc's computed active state doesn't match tracked active state "
- "(expected %i, found %i)\n", active, crtc->active);
- I915_STATE_WARN(enabled != crtc->base.state->enable,
- "crtc's computed enabled state doesn't match tracked enabled state "
- "(expected %i, found %i)\n", enabled,
- crtc->base.state->enable);
+ /* hw state is inconsistent with the pipe quirk */
+ if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
+ (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
+ active = crtc->state->active;
- active = dev_priv->display.get_pipe_config(crtc,
- &pipe_config);
+ I915_STATE_WARN(crtc->state->active != active,
+ "crtc active state doesn't match with hw state "
+ "(expected %i, found %i)\n", crtc->state->active, active);
- /* hw state is inconsistent with the pipe quirk */
- if ((crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
- (crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
- active = crtc->active;
+ I915_STATE_WARN(intel_crtc->active != crtc->state->active,
+ "transitional active state does not match atomic hw state "
+ "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
- for_each_intel_encoder(dev, encoder) {
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
enum pipe pipe;
- if (encoder->base.crtc != &crtc->base)
- continue;
- if (encoder->get_hw_state(encoder, &pipe))
- encoder->get_config(encoder, &pipe_config);
+
+ active = encoder->get_hw_state(encoder, &pipe);
+ I915_STATE_WARN(active != crtc->state->active,
+ "[ENCODER:%i] active %i with crtc active %i\n",
+ encoder->base.base.id, active, crtc->state->active);
+
+ I915_STATE_WARN(active && intel_crtc->pipe != pipe,
+ "Encoder connected to wrong pipe %c\n",
+ pipe_name(pipe));
+
+ if (active)
+ encoder->get_config(encoder, pipe_config);
}
- I915_STATE_WARN(crtc->active != active,
- "crtc active state doesn't match with hw state "
- "(expected %i, found %i)\n", crtc->active, active);
+ if (!crtc->state->active)
+ continue;
- if (active &&
- !intel_pipe_config_compare(dev, crtc->config, &pipe_config)) {
+ sw_config = to_intel_crtc_state(crtc->state);
+ if (!intel_pipe_config_compare(dev, sw_config,
+ pipe_config, false)) {
I915_STATE_WARN(1, "pipe state doesn't match!\n");
- intel_dump_pipe_config(crtc, &pipe_config,
+ intel_dump_pipe_config(intel_crtc, pipe_config,
"[hw state]");
- intel_dump_pipe_config(crtc, crtc->config,
+ intel_dump_pipe_config(intel_crtc, sw_config,
"[sw state]");
}
}
@@ -12374,13 +12746,14 @@ check_shared_dpll_state(struct drm_device *dev)
}
}
-void
-intel_modeset_check_state(struct drm_device *dev)
+static void
+intel_modeset_check_state(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
{
check_wm_state(dev);
- check_connector_state(dev);
+ check_connector_state(dev, old_state);
check_encoder_state(dev);
- check_crtc_state(dev);
+ check_crtc_state(dev, old_state);
check_shared_dpll_state(dev);
}
@@ -12434,557 +12807,390 @@ static void update_scanline_offset(struct intel_crtc *crtc)
crtc->scanline_offset = 1;
}
-static struct intel_crtc_state *
-intel_modeset_compute_config(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- struct intel_crtc_state *pipe_config;
- int ret = 0;
-
- ret = drm_atomic_add_affected_connectors(state, crtc);
- if (ret)
- return ERR_PTR(ret);
-
- ret = drm_atomic_helper_check_modeset(state->dev, state);
- if (ret)
- return ERR_PTR(ret);
-
- /*
- * Note this needs changes when we start tracking multiple modes
- * and crtcs. At that point we'll need to compute the whole config
- * (i.e. one pipe_config for each crtc) rather than just the one
- * for this crtc.
- */
- pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
- if (IS_ERR(pipe_config))
- return pipe_config;
-
- if (!pipe_config->base.enable)
- return pipe_config;
-
- ret = intel_modeset_pipe_config(crtc, state, pipe_config);
- if (ret)
- return ERR_PTR(ret);
-
- /* Check things that can only be changed through modeset */
- if (pipe_config->has_audio !=
- to_intel_crtc(crtc)->config->has_audio)
- pipe_config->base.mode_changed = true;
-
- /*
- * Note we have an issue here with infoframes: current code
- * only updates them on the full mode set path per hw
- * requirements. So here we should be checking for any
- * required changes and forcing a mode set.
- */
-
- intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,"[modeset]");
-
- ret = drm_atomic_helper_check_planes(state->dev, state);
- if (ret)
- return ERR_PTR(ret);
-
- return pipe_config;
-}
-
-static int __intel_set_mode_setup_plls(struct drm_atomic_state *state)
+static void intel_modeset_clear_plls(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned clear_pipes = 0;
+ struct intel_shared_dpll_config *shared_dpll = NULL;
struct intel_crtc *intel_crtc;
struct intel_crtc_state *intel_crtc_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- int ret = 0;
int i;
if (!dev_priv->display.crtc_compute_clock)
- return 0;
-
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- intel_crtc = to_intel_crtc(crtc);
- intel_crtc_state = to_intel_crtc_state(crtc_state);
-
- if (needs_modeset(crtc_state)) {
- clear_pipes |= 1 << intel_crtc->pipe;
- intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
- }
- }
-
- ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
- if (ret)
- goto done;
+ return;
for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (!needs_modeset(crtc_state) || !crtc_state->enable)
- continue;
+ int dpll;
intel_crtc = to_intel_crtc(crtc);
intel_crtc_state = to_intel_crtc_state(crtc_state);
+ dpll = intel_crtc_state->shared_dpll;
- ret = dev_priv->display.crtc_compute_clock(intel_crtc,
- intel_crtc_state);
- if (ret) {
- intel_shared_dpll_abort_config(dev_priv);
- goto done;
- }
- }
+ if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE)
+ continue;
-done:
- return ret;
-}
+ intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
-/* Code that should eventually be part of atomic_check() */
-static int __intel_set_mode_checks(struct drm_atomic_state *state)
-{
- struct drm_device *dev = state->dev;
- int ret;
+ if (!shared_dpll)
+ shared_dpll = intel_atomic_get_shared_dpll_state(state);
- /*
- * See if the config requires any additional preparation, e.g.
- * to adjust global state with pipes off. We need to do this
- * here so we can get the modeset_pipe updated config for the new
- * mode set on this crtc. For other crtcs we need to use the
- * adjusted_mode bits in the crtc directly.
- */
- if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) {
- ret = valleyview_modeset_global_pipes(state);
- if (ret)
- return ret;
+ shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
}
-
- ret = __intel_set_mode_setup_plls(state);
- if (ret)
- return ret;
-
- return 0;
}
-static int __intel_set_mode(struct drm_crtc *modeset_crtc,
- struct intel_crtc_state *pipe_config)
+/*
+ * This implements the workaround described in the "notes" section of the mode
+ * set sequence documentation. When going from no pipes or single pipe to
+ * multiple pipes, and planes are enabled after the pipe, we need to wait at
+ * least 2 vblanks on the first pipe before enabling planes on the second pipe.
+ */
+static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
{
- struct drm_device *dev = modeset_crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_atomic_state *state = pipe_config->base.state;
- struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- int ret = 0;
+ struct intel_crtc *intel_crtc;
+ struct drm_crtc *crtc;
+ struct intel_crtc_state *first_crtc_state = NULL;
+ struct intel_crtc_state *other_crtc_state = NULL;
+ enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
int i;
- ret = __intel_set_mode_checks(state);
- if (ret < 0)
- return ret;
-
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret)
- return ret;
-
+ /* look at all crtc's that are going to be enabled in during modeset */
for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (!needs_modeset(crtc_state))
+ intel_crtc = to_intel_crtc(crtc);
+
+ if (!crtc_state->active || !needs_modeset(crtc_state))
continue;
- if (!crtc_state->enable) {
- intel_crtc_disable(crtc);
- } else if (crtc->state->enable) {
- intel_crtc_disable_planes(crtc);
- dev_priv->display.crtc_disable(crtc);
+ if (first_crtc_state) {
+ other_crtc_state = to_intel_crtc_state(crtc_state);
+ break;
+ } else {
+ first_crtc_state = to_intel_crtc_state(crtc_state);
+ first_pipe = intel_crtc->pipe;
}
}
- /* crtc->mode is already used by the ->mode_set callbacks, hence we need
- * to set it here already despite that we pass it down the callchain.
- *
- * Note we'll need to fix this up when we start tracking multiple
- * pipes; here we assume a single modeset_pipe and only track the
- * single crtc and mode.
- */
- if (pipe_config->base.enable && needs_modeset(&pipe_config->base)) {
- modeset_crtc->mode = pipe_config->base.mode;
-
- /*
- * Calculate and store various constants which
- * are later needed by vblank and swap-completion
- * timestamping. They are derived from true hwmode.
- */
- drm_calc_timestamping_constants(modeset_crtc,
- &pipe_config->base.adjusted_mode);
- }
-
- /* Only after disabling all output pipelines that will be changed can we
- * update the the output configuration. */
- intel_modeset_update_state(state);
+ /* No workaround needed? */
+ if (!first_crtc_state)
+ return 0;
- /* The state has been swaped above, so state actually contains the
- * old state now. */
+ /* w/a possibly needed, check how many crtc's are already enabled. */
+ for_each_intel_crtc(state->dev, intel_crtc) {
+ struct intel_crtc_state *pipe_config;
- modeset_update_crtc_power_domains(state);
+ pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
+ if (IS_ERR(pipe_config))
+ return PTR_ERR(pipe_config);
- drm_atomic_helper_commit_planes(dev, state);
+ pipe_config->hsw_workaround_pipe = INVALID_PIPE;
- /* Now enable the clocks, plane, pipe, and connectors that we set up. */
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (!needs_modeset(crtc->state) || !crtc->state->enable)
+ if (!pipe_config->base.active ||
+ needs_modeset(&pipe_config->base))
continue;
- update_scanline_offset(to_intel_crtc(crtc));
+ /* 2 or more enabled crtcs means no need for w/a */
+ if (enabled_pipe != INVALID_PIPE)
+ return 0;
- dev_priv->display.crtc_enable(crtc);
- intel_crtc_enable_planes(crtc);
+ enabled_pipe = intel_crtc->pipe;
}
- /* FIXME: add subpixel order */
-
- drm_atomic_helper_cleanup_planes(dev, state);
-
- drm_atomic_state_free(state);
+ if (enabled_pipe != INVALID_PIPE)
+ first_crtc_state->hsw_workaround_pipe = enabled_pipe;
+ else if (other_crtc_state)
+ other_crtc_state->hsw_workaround_pipe = first_pipe;
return 0;
}
-static int intel_set_mode_with_config(struct drm_crtc *crtc,
- struct intel_crtc_state *pipe_config,
- bool force_restore)
+static int intel_modeset_all_pipes(struct drm_atomic_state *state)
{
- int ret;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int ret = 0;
- ret = __intel_set_mode(crtc, pipe_config);
+ /* add all active pipes to the state */
+ for_each_crtc(state->dev, crtc) {
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
- if (ret == 0 && force_restore) {
- intel_modeset_update_staged_output_state(crtc->dev);
- intel_modeset_check_state(crtc->dev);
- }
+ if (!crtc_state->active || needs_modeset(crtc_state))
+ continue;
- return ret;
-}
+ crtc_state->mode_changed = true;
-static int intel_set_mode(struct drm_crtc *crtc,
- struct drm_atomic_state *state,
- bool force_restore)
-{
- struct intel_crtc_state *pipe_config;
- int ret = 0;
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ break;
- pipe_config = intel_modeset_compute_config(crtc, state);
- if (IS_ERR(pipe_config)) {
- ret = PTR_ERR(pipe_config);
- goto out;
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ break;
}
- ret = intel_set_mode_with_config(crtc, pipe_config, force_restore);
- if (ret)
- goto out;
-
-out:
return ret;
}
-void intel_crtc_restore_mode(struct drm_crtc *crtc)
+
+static int intel_modeset_checks(struct drm_atomic_state *state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_atomic_state *state;
- struct intel_encoder *encoder;
- struct intel_connector *connector;
- struct drm_connector_state *connector_state;
- struct intel_crtc_state *crtc_state;
+ struct drm_device *dev = state->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- state = drm_atomic_state_alloc(dev);
- if (!state) {
- DRM_DEBUG_KMS("[CRTC:%d] mode restore failed, out of memory",
- crtc->base.id);
- return;
- }
-
- state->acquire_ctx = dev->mode_config.acquire_ctx;
-
- /* The force restore path in the HW readout code relies on the staged
- * config still keeping the user requested config while the actual
- * state has been overwritten by the configuration read from HW. We
- * need to copy the staged config to the atomic state, otherwise the
- * mode set will just reapply the state the HW is already in. */
- for_each_intel_encoder(dev, encoder) {
- if (&encoder->new_crtc->base != crtc)
- continue;
-
- for_each_intel_connector(dev, connector) {
- if (connector->new_encoder != encoder)
- continue;
-
- connector_state = drm_atomic_get_connector_state(state, &connector->base);
- if (IS_ERR(connector_state)) {
- DRM_DEBUG_KMS("Failed to add [CONNECTOR:%d:%s] to state: %ld\n",
- connector->base.base.id,
- connector->base.name,
- PTR_ERR(connector_state));
- continue;
- }
-
- connector_state->crtc = crtc;
- connector_state->best_encoder = &encoder->base;
- }
- }
-
- crtc_state = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
- if (IS_ERR(crtc_state)) {
- DRM_DEBUG_KMS("Failed to add [CRTC:%d] to state: %ld\n",
- crtc->base.id, PTR_ERR(crtc_state));
- drm_atomic_state_free(state);
- return;
+ if (!check_digital_port_conflicts(state)) {
+ DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
+ return -EINVAL;
}
- crtc_state->base.active = crtc_state->base.enable =
- to_intel_crtc(crtc)->new_enabled;
-
- drm_mode_copy(&crtc_state->base.mode, &crtc->mode);
+ /*
+ * See if the config requires any additional preparation, e.g.
+ * to adjust global state with pipes off. We need to do this
+ * here so we can get the modeset_pipe updated config for the new
+ * mode set on this crtc. For other crtcs we need to use the
+ * adjusted_mode bits in the crtc directly.
+ */
+ if (dev_priv->display.modeset_calc_cdclk) {
+ unsigned int cdclk;
- intel_modeset_setup_plane_state(state, crtc, &crtc->mode,
- crtc->primary->fb, crtc->x, crtc->y);
+ ret = dev_priv->display.modeset_calc_cdclk(state);
- ret = intel_set_mode(crtc, state, false);
- if (ret)
- drm_atomic_state_free(state);
-}
+ cdclk = to_intel_atomic_state(state)->cdclk;
+ if (!ret && cdclk != dev_priv->cdclk_freq)
+ ret = intel_modeset_all_pipes(state);
-#undef for_each_intel_crtc_masked
+ if (ret < 0)
+ return ret;
+ } else
+ to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq;
-static bool intel_connector_in_mode_set(struct intel_connector *connector,
- struct drm_mode_set *set)
-{
- int ro;
+ intel_modeset_clear_plls(state);
- for (ro = 0; ro < set->num_connectors; ro++)
- if (set->connectors[ro] == &connector->base)
- return true;
+ if (IS_HASWELL(dev))
+ return haswell_mode_set_planes_workaround(state);
- return false;
+ return 0;
}
-static int
-intel_modeset_stage_output_state(struct drm_device *dev,
- struct drm_mode_set *set,
- struct drm_atomic_state *state)
+/**
+ * intel_atomic_check - validate state object
+ * @dev: drm device
+ * @state: state to validate
+ */
+static int intel_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
{
- struct intel_connector *connector;
- struct drm_connector *drm_connector;
- struct drm_connector_state *connector_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- int i, ret;
-
- /* The upper layers ensure that we either disable a crtc or have a list
- * of connectors. For paranoia, double-check this. */
- WARN_ON(!set->fb && (set->num_connectors != 0));
- WARN_ON(set->fb && (set->num_connectors == 0));
+ int ret, i;
+ bool any_ms = false;
- for_each_intel_connector(dev, connector) {
- bool in_mode_set = intel_connector_in_mode_set(connector, set);
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
- if (!in_mode_set && connector->base.state->crtc != set->crtc)
- continue;
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ struct intel_crtc_state *pipe_config =
+ to_intel_crtc_state(crtc_state);
- connector_state =
- drm_atomic_get_connector_state(state, &connector->base);
- if (IS_ERR(connector_state))
- return PTR_ERR(connector_state);
+ /* Catch I915_MODE_FLAG_INHERITED */
+ if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
+ crtc_state->mode_changed = true;
- if (in_mode_set) {
- int pipe = to_intel_crtc(set->crtc)->pipe;
- connector_state->best_encoder =
- &intel_find_encoder(connector, pipe)->base;
+ if (!crtc_state->enable) {
+ if (needs_modeset(crtc_state))
+ any_ms = true;
+ continue;
}
- if (connector->base.state->crtc != set->crtc)
+ if (!needs_modeset(crtc_state))
continue;
- /* If we disable the crtc, disable all its connectors. Also, if
- * the connector is on the changing crtc but not on the new
- * connector list, disable it. */
- if (!set->fb || !in_mode_set) {
- connector_state->best_encoder = NULL;
+ /* FIXME: For only active_changed we shouldn't need to do any
+ * state recomputation at all. */
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
- connector->base.base.id,
- connector->base.name);
- }
- }
- /* connector->new_encoder is now updated for all connectors. */
-
- for_each_connector_in_state(state, drm_connector, connector_state, i) {
- connector = to_intel_connector(drm_connector);
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ return ret;
- if (!connector_state->best_encoder) {
- ret = drm_atomic_set_crtc_for_connector(connector_state,
- NULL);
- if (ret)
- return ret;
+ ret = intel_modeset_pipe_config(crtc, pipe_config);
+ if (ret)
+ return ret;
- continue;
+ if (i915.fastboot &&
+ intel_pipe_config_compare(state->dev,
+ to_intel_crtc_state(crtc->state),
+ pipe_config, true)) {
+ crtc_state->mode_changed = false;
}
- if (intel_connector_in_mode_set(connector, set)) {
- struct drm_crtc *crtc = connector->base.state->crtc;
-
- /* If this connector was in a previous crtc, add it
- * to the state. We might need to disable it. */
- if (crtc) {
- crtc_state =
- drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- }
+ if (needs_modeset(crtc_state)) {
+ any_ms = true;
- ret = drm_atomic_set_crtc_for_connector(connector_state,
- set->crtc);
+ ret = drm_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;
}
- /* Make sure the new CRTC will work with the encoder */
- if (!drm_encoder_crtc_ok(connector_state->best_encoder,
- connector_state->crtc)) {
- return -EINVAL;
- }
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
- connector->base.base.id,
- connector->base.name,
- connector_state->crtc->base.id);
-
- if (connector_state->best_encoder != &connector->encoder->base)
- connector->encoder =
- to_intel_encoder(connector_state->best_encoder);
+ intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
+ needs_modeset(crtc_state) ?
+ "[modeset]" : "[fastset]");
}
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- bool has_connectors;
+ if (any_ms) {
+ ret = intel_modeset_checks(state);
- ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret)
return ret;
+ } else
+ to_intel_atomic_state(state)->cdclk =
+ to_i915(state->dev)->cdclk_freq;
+
+ return drm_atomic_helper_check_planes(state->dev, state);
+}
+
+/**
+ * intel_atomic_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the top-level driver state object
+ * @async: asynchronous commit
+ *
+ * This function commits a top-level state object that has been validated
+ * with drm_atomic_helper_check().
+ *
+ * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
+ * we can only handle plane-related operations and do not yet support
+ * asynchronous commit.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+static int intel_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool async)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int ret = 0;
+ int i;
+ bool any_ms = false;
- has_connectors = !!drm_atomic_connectors_for_crtc(state, crtc);
- if (has_connectors != crtc_state->enable)
- crtc_state->enable =
- crtc_state->active = has_connectors;
+ if (async) {
+ DRM_DEBUG_KMS("i915 does not yet support async commit\n");
+ return -EINVAL;
}
- ret = intel_modeset_setup_plane_state(state, set->crtc, set->mode,
- set->fb, set->x, set->y);
+ ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
return ret;
- crtc_state = drm_atomic_get_crtc_state(state, set->crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ drm_atomic_helper_swap_state(dev, state);
- if (set->mode)
- drm_mode_copy(&crtc_state->mode, set->mode);
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (set->num_connectors)
- crtc_state->active = true;
+ if (!needs_modeset(crtc->state))
+ continue;
- return 0;
-}
+ any_ms = true;
+ intel_pre_plane_update(intel_crtc);
-static bool primary_plane_visible(struct drm_crtc *crtc)
-{
- struct intel_plane_state *plane_state =
- to_intel_plane_state(crtc->primary->state);
+ if (crtc_state->active) {
+ intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
+ dev_priv->display.crtc_disable(crtc);
+ intel_crtc->active = false;
+ intel_disable_shared_dpll(intel_crtc);
+ }
+ }
- return plane_state->visible;
-}
+ /* Only after disabling all output pipelines that will be changed can we
+ * update the the output configuration. */
+ intel_modeset_update_crtc_state(state);
-static int intel_crtc_set_config(struct drm_mode_set *set)
-{
- struct drm_device *dev;
- struct drm_atomic_state *state = NULL;
- struct intel_crtc_state *pipe_config;
- bool primary_plane_was_visible;
- int ret;
+ if (any_ms) {
+ intel_shared_dpll_commit(state);
- BUG_ON(!set);
- BUG_ON(!set->crtc);
- BUG_ON(!set->crtc->helper_private);
+ drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
+ modeset_update_crtc_power_domains(state);
+ }
- /* Enforce sane interface api - has been abused by the fb helper. */
- BUG_ON(!set->mode && set->fb);
- BUG_ON(set->fb && set->num_connectors == 0);
+ /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ bool modeset = needs_modeset(crtc->state);
- if (set->fb) {
- DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
- set->crtc->base.id, set->fb->base.id,
- (int)set->num_connectors, set->x, set->y);
- } else {
- DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
- }
+ if (modeset && crtc->state->active) {
+ update_scanline_offset(to_intel_crtc(crtc));
+ dev_priv->display.crtc_enable(crtc);
+ }
- dev = set->crtc->dev;
+ if (!modeset)
+ intel_pre_plane_update(intel_crtc);
- state = drm_atomic_state_alloc(dev);
- if (!state)
- return -ENOMEM;
+ drm_atomic_helper_commit_planes_on_crtc(crtc_state);
+ intel_post_plane_update(intel_crtc);
+ }
- state->acquire_ctx = dev->mode_config.acquire_ctx;
+ /* FIXME: add subpixel order */
- ret = intel_modeset_stage_output_state(dev, set, state);
- if (ret)
- goto out;
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+ drm_atomic_helper_cleanup_planes(dev, state);
- pipe_config = intel_modeset_compute_config(set->crtc, state);
- if (IS_ERR(pipe_config)) {
- ret = PTR_ERR(pipe_config);
- goto out;
- }
+ if (any_ms)
+ intel_modeset_check_state(dev, state);
- intel_update_pipe_size(to_intel_crtc(set->crtc));
+ drm_atomic_state_free(state);
- primary_plane_was_visible = primary_plane_visible(set->crtc);
+ return 0;
+}
- ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
+void intel_crtc_restore_mode(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ int ret;
- if (ret == 0 &&
- pipe_config->base.enable &&
- pipe_config->base.planes_changed &&
- !needs_modeset(&pipe_config->base)) {
- struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
+ crtc->base.id);
+ return;
+ }
- /*
- * We need to make sure the primary plane is re-enabled if it
- * has previously been turned off.
- */
- if (ret == 0 && !primary_plane_was_visible &&
- primary_plane_visible(set->crtc)) {
- WARN_ON(!intel_crtc->active);
- intel_post_enable_primary(set->crtc);
- }
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
- /*
- * In the fastboot case this may be our only check of the
- * state after boot. It would be better to only do it on
- * the first update, but we don't have a nice way of doing that
- * (and really, set_config isn't used much for high freq page
- * flipping, so increasing its cost here shouldn't be a big
- * deal).
- */
- if (i915.fastboot && ret == 0)
- intel_modeset_check_state(set->crtc->dev);
+retry:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ ret = PTR_ERR_OR_ZERO(crtc_state);
+ if (!ret) {
+ if (!crtc_state->active)
+ goto out;
+
+ crtc_state->mode_changed = true;
+ ret = drm_atomic_commit(state);
}
- if (ret) {
- DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
- set->crtc->base.id, ret);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(state->acquire_ctx);
+ goto retry;
}
-out:
if (ret)
+out:
drm_atomic_state_free(state);
- return ret;
}
+#undef for_each_intel_crtc_masked
+
static const struct drm_crtc_funcs intel_crtc_funcs = {
.gamma_set = intel_crtc_gamma_set,
- .set_config = intel_crtc_set_config,
+ .set_config = drm_atomic_helper_set_config,
.destroy = intel_crtc_destroy,
.page_flip = intel_crtc_page_flip,
.atomic_duplicate_state = intel_crtc_duplicate_state,
@@ -13081,6 +13287,8 @@ static void intel_shared_dpll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ intel_update_cdclk(dev);
+
if (HAS_DDI(dev))
intel_ddi_pll_init(dev);
else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
@@ -13092,28 +13300,6 @@ static void intel_shared_dpll_init(struct drm_device *dev)
}
/**
- * intel_wm_need_update - Check whether watermarks need updating
- * @plane: drm plane
- * @state: new plane state
- *
- * Check current plane state versus the new one to determine whether
- * watermarks need to be recalculated.
- *
- * Returns true or false.
- */
-bool intel_wm_need_update(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- /* Update watermarks on tiling changes. */
- if (!plane->state->fb || !state->fb ||
- plane->state->fb->modifier[0] != state->fb->modifier[0] ||
- plane->state->rotation != state->rotation)
- return true;
-
- return false;
-}
-
-/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @plane: drm plane to prepare for
* @fb: framebuffer to prepare for presentation
@@ -13132,27 +13318,13 @@ intel_prepare_plane_fb(struct drm_plane *plane,
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
- enum pipe pipe = intel_plane->pipe;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
- unsigned frontbuffer_bits = 0;
int ret = 0;
if (!obj)
return 0;
- switch (plane->type) {
- case DRM_PLANE_TYPE_PRIMARY:
- frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(pipe);
- break;
- case DRM_PLANE_TYPE_CURSOR:
- frontbuffer_bits = INTEL_FRONTBUFFER_CURSOR(pipe);
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- frontbuffer_bits = INTEL_FRONTBUFFER_SPRITE(pipe);
- break;
- }
-
mutex_lock(&dev->struct_mutex);
if (plane->type == DRM_PLANE_TYPE_CURSOR &&
@@ -13162,11 +13334,11 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (ret)
DRM_DEBUG_KMS("failed to attach phys object\n");
} else {
- ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL);
+ ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL, NULL);
}
if (ret == 0)
- i915_gem_track_fb(old_obj, obj, frontbuffer_bits);
+ i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);
@@ -13213,7 +13385,7 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state
dev = intel_crtc->base.dev;
dev_priv = dev->dev_private;
crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
- cdclk = dev_priv->display.get_display_clock_speed(dev);
+ cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
if (!crtc_clock || !cdclk)
return DRM_PLANE_HELPER_NO_SCALING;
@@ -13231,105 +13403,28 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state
static int
intel_check_primary_plane(struct drm_plane *plane,
+ struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = state->base.crtc;
- struct intel_crtc *intel_crtc;
- struct intel_crtc_state *crtc_state;
struct drm_framebuffer *fb = state->base.fb;
- struct drm_rect *dest = &state->dst;
- struct drm_rect *src = &state->src;
- const struct drm_rect *clip = &state->clip;
- bool can_position = false;
- int max_scale = DRM_PLANE_HELPER_NO_SCALING;
int min_scale = DRM_PLANE_HELPER_NO_SCALING;
- int ret;
-
- crtc = crtc ? crtc : plane->crtc;
- intel_crtc = to_intel_crtc(crtc);
- crtc_state = state->base.state ?
- intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL;
+ int max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ bool can_position = false;
- if (INTEL_INFO(dev)->gen >= 9) {
- /* use scaler when colorkey is not required */
- if (to_intel_plane(plane)->ckey.flags == I915_SET_COLORKEY_NONE) {
- min_scale = 1;
- max_scale = skl_max_scale(intel_crtc, crtc_state);
- }
+ /* use scaler when colorkey is not required */
+ if (INTEL_INFO(plane->dev)->gen >= 9 &&
+ state->ckey.flags == I915_SET_COLORKEY_NONE) {
+ min_scale = 1;
+ max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
can_position = true;
}
- ret = drm_plane_helper_check_update(plane, crtc, fb,
- src, dest, clip,
- min_scale,
- max_scale,
- can_position, true,
- &state->visible);
- if (ret)
- return ret;
-
- if (intel_crtc->active) {
- struct intel_plane_state *old_state =
- to_intel_plane_state(plane->state);
-
- intel_crtc->atomic.wait_for_flips = true;
-
- /*
- * FBC does not work on some platforms for rotated
- * planes, so disable it when rotation is not 0 and
- * update it when rotation is set back to 0.
- *
- * FIXME: This is redundant with the fbc update done in
- * the primary plane enable function except that that
- * one is done too late. We eventually need to unify
- * this.
- */
- if (state->visible &&
- INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
- dev_priv->fbc.crtc == intel_crtc &&
- state->base.rotation != BIT(DRM_ROTATE_0)) {
- intel_crtc->atomic.disable_fbc = true;
- }
-
- if (state->visible && !old_state->visible) {
- /*
- * BDW signals flip done immediately if the plane
- * is disabled, even if the plane enable is already
- * armed to occur at the next vblank :(
- */
- if (IS_BROADWELL(dev))
- intel_crtc->atomic.wait_vblank = true;
- }
-
- /*
- * FIXME: Actually if we will still have any other plane enabled
- * on the pipe we could let IPS enabled still, but for
- * now lets consider that when we make primary invisible
- * by setting DSPCNTR to 0 on update_primary_plane function
- * IPS needs to be disable.
- */
- if (!state->visible || !fb)
- intel_crtc->atomic.disable_ips = true;
-
- intel_crtc->atomic.fb_bits |=
- INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
-
- intel_crtc->atomic.update_fbc = true;
-
- if (intel_wm_need_update(plane, &state->base))
- intel_crtc->atomic.update_wm = true;
- }
-
- if (INTEL_INFO(dev)->gen >= 9) {
- ret = skl_update_scaler_users(intel_crtc, crtc_state,
- to_intel_plane(plane), state, 0);
- if (ret)
- return ret;
- }
-
- return 0;
+ return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
+ &state->dst, &state->clip,
+ min_scale, max_scale,
+ can_position, true,
+ &state->visible);
}
static void
@@ -13350,20 +13445,19 @@ intel_commit_primary_plane(struct drm_plane *plane,
crtc->x = src->x1 >> 16;
crtc->y = src->y1 >> 16;
- if (intel_crtc->active) {
- if (state->visible)
- /* FIXME: kill this fastboot hack */
- intel_update_pipe_size(intel_crtc);
+ if (!crtc->state->active)
+ return;
- dev_priv->display.update_primary_plane(crtc, plane->fb,
- crtc->x, crtc->y);
- }
+ if (state->visible)
+ /* FIXME: kill this fastboot hack */
+ intel_update_pipe_size(intel_crtc);
+
+ dev_priv->display.update_primary_plane(crtc, fb, crtc->x, crtc->y);
}
static void
intel_disable_primary_plane(struct drm_plane *plane,
- struct drm_crtc *crtc,
- bool force)
+ struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -13371,96 +13465,30 @@ intel_disable_primary_plane(struct drm_plane *plane,
dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
}
-static void intel_begin_crtc_commit(struct drm_crtc *crtc)
+static void intel_begin_crtc_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane;
- struct drm_plane *p;
- unsigned fb_bits = 0;
- /* Track fb's for any planes being disabled */
- list_for_each_entry(p, &dev->mode_config.plane_list, head) {
- intel_plane = to_intel_plane(p);
-
- if (intel_crtc->atomic.disabled_planes &
- (1 << drm_plane_index(p))) {
- switch (p->type) {
- case DRM_PLANE_TYPE_PRIMARY:
- fb_bits = INTEL_FRONTBUFFER_PRIMARY(intel_plane->pipe);
- break;
- case DRM_PLANE_TYPE_CURSOR:
- fb_bits = INTEL_FRONTBUFFER_CURSOR(intel_plane->pipe);
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- fb_bits = INTEL_FRONTBUFFER_SPRITE(intel_plane->pipe);
- break;
- }
-
- mutex_lock(&dev->struct_mutex);
- i915_gem_track_fb(intel_fb_obj(p->fb), NULL, fb_bits);
- mutex_unlock(&dev->struct_mutex);
- }
- }
-
- if (intel_crtc->atomic.wait_for_flips)
- intel_crtc_wait_for_pending_flips(crtc);
-
- if (intel_crtc->atomic.disable_fbc)
- intel_fbc_disable(dev);
-
- if (intel_crtc->atomic.disable_ips)
- hsw_disable_ips(intel_crtc);
-
- if (intel_crtc->atomic.pre_disable_primary)
- intel_pre_disable_primary(crtc);
-
- if (intel_crtc->atomic.update_wm)
+ if (intel_crtc->atomic.update_wm_pre)
intel_update_watermarks(crtc);
- intel_runtime_pm_get(dev_priv);
-
/* Perform vblank evasion around commit operation */
- if (intel_crtc->active)
- intel_crtc->atomic.evade =
- intel_pipe_update_start(intel_crtc,
- &intel_crtc->atomic.start_vbl_count);
+ if (crtc->state->active)
+ intel_pipe_update_start(intel_crtc, &intel_crtc->start_vbl_count);
+
+ if (!needs_modeset(crtc->state) && INTEL_INFO(dev)->gen >= 9)
+ skl_detach_scalers(intel_crtc);
}
-static void intel_finish_crtc_commit(struct drm_crtc *crtc)
+static void intel_finish_crtc_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_plane *p;
-
- if (intel_crtc->atomic.evade)
- intel_pipe_update_end(intel_crtc,
- intel_crtc->atomic.start_vbl_count);
-
- intel_runtime_pm_put(dev_priv);
-
- if (intel_crtc->atomic.wait_vblank)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
-
- intel_frontbuffer_flip(dev, intel_crtc->atomic.fb_bits);
-
- if (intel_crtc->atomic.update_fbc) {
- mutex_lock(&dev->struct_mutex);
- intel_fbc_update(dev);
- mutex_unlock(&dev->struct_mutex);
- }
-
- if (intel_crtc->atomic.post_enable_primary)
- intel_post_enable_primary(crtc);
- drm_for_each_legacy_plane(p, &dev->mode_config.plane_list)
- if (intel_crtc->atomic.update_sprite_watermarks & drm_plane_index(p))
- intel_update_sprite_watermarks(p, crtc, 0, 0, 0,
- false, false);
-
- memset(&intel_crtc->atomic, 0, sizeof(intel_crtc->atomic));
+ if (crtc->state->active)
+ intel_pipe_update_end(intel_crtc, intel_crtc->start_vbl_count);
}
/**
@@ -13495,7 +13523,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
struct intel_plane *primary;
struct intel_plane_state *state;
const uint32_t *intel_primary_formats;
- int num_formats;
+ unsigned int num_formats;
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
if (primary == NULL)
@@ -13516,10 +13544,10 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
}
primary->pipe = pipe;
primary->plane = pipe;
+ primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
primary->commit_plane = intel_commit_primary_plane;
primary->disable_plane = intel_disable_primary_plane;
- primary->ckey.flags = I915_SET_COLORKEY_NONE;
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
primary->plane = !pipe;
@@ -13567,37 +13595,29 @@ void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *
static int
intel_check_cursor_plane(struct drm_plane *plane,
+ struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
- struct drm_crtc *crtc = state->base.crtc;
- struct drm_device *dev = plane->dev;
+ struct drm_crtc *crtc = crtc_state->base.crtc;
struct drm_framebuffer *fb = state->base.fb;
- struct drm_rect *dest = &state->dst;
- struct drm_rect *src = &state->src;
- const struct drm_rect *clip = &state->clip;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc;
unsigned stride;
int ret;
- crtc = crtc ? crtc : plane->crtc;
- intel_crtc = to_intel_crtc(crtc);
-
- ret = drm_plane_helper_check_update(plane, crtc, fb,
- src, dest, clip,
+ ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
+ &state->dst, &state->clip,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
true, true, &state->visible);
if (ret)
return ret;
-
/* if we want to turn off the cursor ignore width and height */
if (!obj)
- goto finish;
+ return 0;
/* Check for which cursor types we support */
- if (!cursor_size_ok(dev, state->base.crtc_w, state->base.crtc_h)) {
+ if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
state->base.crtc_w, state->base.crtc_h);
return -EINVAL;
@@ -13611,34 +13631,16 @@ intel_check_cursor_plane(struct drm_plane *plane,
if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
DRM_DEBUG_KMS("cursor cannot be tiled\n");
- ret = -EINVAL;
- }
-
-finish:
- if (intel_crtc->active) {
- if (plane->state->crtc_w != state->base.crtc_w)
- intel_crtc->atomic.update_wm = true;
-
- intel_crtc->atomic.fb_bits |=
- INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe);
+ return -EINVAL;
}
- return ret;
+ return 0;
}
static void
intel_disable_cursor_plane(struct drm_plane *plane,
- struct drm_crtc *crtc,
- bool force)
+ struct drm_crtc *crtc)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- if (!force) {
- plane->fb = NULL;
- intel_crtc->cursor_bo = NULL;
- intel_crtc->cursor_addr = 0;
- }
-
intel_crtc_update_cursor(crtc, false);
}
@@ -13671,9 +13673,9 @@ intel_commit_cursor_plane(struct drm_plane *plane,
intel_crtc->cursor_addr = addr;
intel_crtc->cursor_bo = obj;
-update:
- if (intel_crtc->active)
+update:
+ if (crtc->state->active)
intel_crtc_update_cursor(crtc, state->visible);
}
@@ -13698,6 +13700,7 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
cursor->max_downscale = 1;
cursor->pipe = pipe;
cursor->plane = pipe;
+ cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
cursor->check_plane = intel_check_cursor_plane;
cursor->commit_plane = intel_commit_cursor_plane;
cursor->disable_plane = intel_disable_cursor_plane;
@@ -13738,8 +13741,6 @@ static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_cr
for (i = 0; i < intel_crtc->num_scalers; i++) {
intel_scaler = &scaler_state->scalers[i];
intel_scaler->in_use = 0;
- intel_scaler->id = i;
-
intel_scaler->mode = PS_SCALER_MODE_DYN;
}
@@ -13811,6 +13812,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc->cursor_cntl = ~0;
intel_crtc->cursor_size = ~0;
+ intel_crtc->wm.cxsr_allowed = true;
+
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
@@ -13945,8 +13948,7 @@ static void intel_setup_outputs(struct drm_device *dev)
*/
found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
/* WaIgnoreDDIAStrap: skl */
- if (found ||
- (IS_SKYLAKE(dev) && INTEL_REVID(dev) < SKL_REVID_D0))
+ if (found || IS_SKYLAKE(dev))
intel_ddi_init(dev, PORT_A);
/* DDI B, C and D detection is indicated by the SFUSE_STRAP
@@ -13959,6 +13961,15 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_ddi_init(dev, PORT_C);
if (found & SFUSE_STRAP_DDID_DETECTED)
intel_ddi_init(dev, PORT_D);
+ /*
+ * On SKL we don't have a way to detect DDI-E so we rely on VBT.
+ */
+ if (IS_SKYLAKE(dev) &&
+ (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
+ dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
+ dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
+ intel_ddi_init(dev, PORT_E);
+
} else if (HAS_PCH_SPLIT(dev)) {
int found;
dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
@@ -14022,18 +14033,18 @@ static void intel_setup_outputs(struct drm_device *dev)
}
intel_dsi_init(dev);
- } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
+ } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
bool found = false;
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
found = intel_sdvo_init(dev, GEN3_SDVOB, true);
- if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
+ if (!found && IS_G4X(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
}
- if (!found && SUPPORTS_INTEGRATED_DP(dev))
+ if (!found && IS_G4X(dev))
intel_dp_init(dev, DP_B, PORT_B);
}
@@ -14046,15 +14057,15 @@ static void intel_setup_outputs(struct drm_device *dev)
if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
- if (SUPPORTS_INTEGRATED_HDMI(dev)) {
+ if (IS_G4X(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
}
- if (SUPPORTS_INTEGRATED_DP(dev))
+ if (IS_G4X(dev))
intel_dp_init(dev, DP_C, PORT_C);
}
- if (SUPPORTS_INTEGRATED_DP(dev) &&
+ if (IS_G4X(dev) &&
(I915_READ(DP_D) & DP_DETECTED))
intel_dp_init(dev, DP_D, PORT_D);
} else if (IS_GEN2(dev))
@@ -14099,9 +14110,27 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
return drm_gem_handle_create(file, &obj->base, handle);
}
+static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips)
+{
+ struct drm_device *dev = fb->dev;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+
+ mutex_lock(&dev->struct_mutex);
+ intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
static const struct drm_framebuffer_funcs intel_fb_funcs = {
.destroy = intel_user_framebuffer_destroy,
.create_handle = intel_user_framebuffer_create_handle,
+ .dirty = intel_user_framebuffer_dirty,
};
static
@@ -14296,7 +14325,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
return intel_framebuffer_create(dev, mode_cmd, obj);
}
-#ifndef CONFIG_DRM_I915_FBDEV
+#ifndef CONFIG_DRM_FBDEV_EMULATION
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
}
@@ -14307,6 +14336,8 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.output_poll_changed = intel_fbdev_output_poll_changed,
.atomic_check = intel_atomic_check,
.atomic_commit = intel_atomic_commit,
+ .atomic_state_alloc = intel_atomic_state_alloc,
+ .atomic_state_clear = intel_atomic_state_clear,
};
/* Set up chip specific display functions */
@@ -14333,7 +14364,6 @@ static void intel_init_display(struct drm_device *dev)
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
- dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_primary_plane =
skylake_update_primary_plane;
} else if (HAS_DDI(dev)) {
@@ -14344,7 +14374,6 @@ static void intel_init_display(struct drm_device *dev)
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
- dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_primary_plane =
ironlake_update_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) {
@@ -14355,7 +14384,6 @@ static void intel_init_display(struct drm_device *dev)
ironlake_crtc_compute_clock;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
- dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_primary_plane =
ironlake_update_primary_plane;
} else if (IS_VALLEYVIEW(dev)) {
@@ -14365,7 +14393,6 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
- dev_priv->display.off = i9xx_crtc_off;
dev_priv->display.update_primary_plane =
i9xx_update_primary_plane;
} else {
@@ -14375,7 +14402,6 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
- dev_priv->display.off = i9xx_crtc_off;
dev_priv->display.update_primary_plane =
i9xx_update_primary_plane;
}
@@ -14384,6 +14410,9 @@ static void intel_init_display(struct drm_device *dev)
if (IS_SKYLAKE(dev))
dev_priv->display.get_display_clock_speed =
skylake_get_display_clock_speed;
+ else if (IS_BROXTON(dev))
+ dev_priv->display.get_display_clock_speed =
+ broxton_get_display_clock_speed;
else if (IS_BROADWELL(dev))
dev_priv->display.get_display_clock_speed =
broadwell_get_display_clock_speed;
@@ -14397,9 +14426,21 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.get_display_clock_speed =
ilk_get_display_clock_speed;
else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
- IS_GEN6(dev) || IS_IVYBRIDGE(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
+ IS_GEN6(dev) || IS_IVYBRIDGE(dev))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
+ else if (IS_GM45(dev))
+ dev_priv->display.get_display_clock_speed =
+ gm45_get_display_clock_speed;
+ else if (IS_CRESTLINE(dev))
+ dev_priv->display.get_display_clock_speed =
+ i965gm_get_display_clock_speed;
+ else if (IS_PINEVIEW(dev))
+ dev_priv->display.get_display_clock_speed =
+ pnv_get_display_clock_speed;
+ else if (IS_G33(dev) || IS_G4X(dev))
+ dev_priv->display.get_display_clock_speed =
+ g33_get_display_clock_speed;
else if (IS_I915G(dev))
dev_priv->display.get_display_clock_speed =
i915_get_display_clock_speed;
@@ -14417,10 +14458,12 @@ static void intel_init_display(struct drm_device *dev)
i865_get_display_clock_speed;
else if (IS_I85X(dev))
dev_priv->display.get_display_clock_speed =
- i855_get_display_clock_speed;
- else /* 852, 830 */
+ i85x_get_display_clock_speed;
+ else { /* 830 */
+ WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
dev_priv->display.get_display_clock_speed =
i830_get_display_clock_speed;
+ }
if (IS_GEN5(dev)) {
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
@@ -14431,12 +14474,22 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
+ if (IS_BROADWELL(dev)) {
+ dev_priv->display.modeset_commit_cdclk =
+ broadwell_modeset_commit_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ broadwell_modeset_calc_cdclk;
+ }
} else if (IS_VALLEYVIEW(dev)) {
- dev_priv->display.modeset_global_resources =
- valleyview_modeset_global_resources;
+ dev_priv->display.modeset_commit_cdclk =
+ valleyview_modeset_commit_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ valleyview_modeset_calc_cdclk;
} else if (IS_BROXTON(dev)) {
- dev_priv->display.modeset_global_resources =
- broxton_modeset_global_resources;
+ dev_priv->display.modeset_commit_cdclk =
+ broxton_modeset_commit_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ broxton_modeset_calc_cdclk;
}
switch (INTEL_INFO(dev)->gen) {
@@ -14655,13 +14708,9 @@ static void i915_disable_vga(struct drm_device *dev)
void intel_modeset_init_hw(struct drm_device *dev)
{
+ intel_update_cdclk(dev);
intel_prepare_ddi(dev);
-
- if (IS_VALLEYVIEW(dev))
- vlv_update_cdclk(dev);
-
intel_init_clock_gating(dev);
-
intel_enable_gt_powersave(dev);
}
@@ -14691,6 +14740,24 @@ void intel_modeset_init(struct drm_device *dev)
if (INTEL_INFO(dev)->num_pipes == 0)
return;
+ /*
+ * There may be no VBT; and if the BIOS enabled SSC we can
+ * just keep using it to avoid unnecessary flicker. Whereas if the
+ * BIOS isn't using it, don't assume it will work even if the VBT
+ * indicates as much.
+ */
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
+ DREF_SSC1_ENABLE);
+
+ if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+ DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
+ bios_lvds_use_ssc ? "en" : "dis",
+ dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
+ dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
+ }
+ }
+
intel_init_display(dev);
intel_init_audio(dev);
@@ -14741,13 +14808,15 @@ void intel_modeset_init(struct drm_device *dev)
intel_setup_outputs(dev);
/* Just in case the BIOS is doing something questionable. */
- intel_fbc_disable(dev);
+ intel_fbc_disable(dev_priv);
drm_modeset_lock_all(dev);
- intel_modeset_setup_hw_state(dev, false);
+ intel_modeset_setup_hw_state(dev);
drm_modeset_unlock_all(dev);
for_each_intel_crtc(dev, crtc) {
+ struct intel_initial_plane_config plane_config = {};
+
if (!crtc->active)
continue;
@@ -14758,15 +14827,14 @@ void intel_modeset_init(struct drm_device *dev)
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
- if (dev_priv->display.get_initial_plane_config) {
- dev_priv->display.get_initial_plane_config(crtc,
- &crtc->plane_config);
- /*
- * If the fb is shared between multiple heads, we'll
- * just get the first one.
- */
- intel_find_initial_plane_obj(crtc, &crtc->plane_config);
- }
+ dev_priv->display.get_initial_plane_config(crtc,
+ &plane_config);
+
+ /*
+ * If the fb is shared between multiple heads, we'll
+ * just get the first one.
+ */
+ intel_find_initial_plane_obj(crtc, &plane_config);
}
}
@@ -14818,7 +14886,9 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *encoder;
u32 reg;
+ bool enable;
/* Clear any frame start delays used for debugging left by the BIOS */
reg = PIPECONF(crtc->config->cpu_transcoder);
@@ -14827,6 +14897,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
/* restore vblank interrupts to correct state */
drm_crtc_vblank_reset(&crtc->base);
if (crtc->active) {
+ drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
update_scanline_offset(crtc);
drm_crtc_vblank_on(&crtc->base);
}
@@ -14835,7 +14906,6 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* disable the crtc (and hence change the state) if it is wrong. Note
* that gen4+ has a fixed plane -> pipe mapping. */
if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
- struct intel_connector *connector;
bool plane;
DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
@@ -14847,30 +14917,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
plane = crtc->plane;
to_intel_plane_state(crtc->base.primary->state)->visible = true;
crtc->plane = !plane;
- intel_crtc_disable_planes(&crtc->base);
- dev_priv->display.crtc_disable(&crtc->base);
+ intel_crtc_disable_noatomic(&crtc->base);
crtc->plane = plane;
-
- /* ... and break all links. */
- for_each_intel_connector(dev, connector) {
- if (connector->encoder->base.crtc != &crtc->base)
- continue;
-
- connector->base.dpms = DRM_MODE_DPMS_OFF;
- connector->base.encoder = NULL;
- }
- /* multiple connectors may have the same encoder:
- * handle them and break crtc link separately */
- for_each_intel_connector(dev, connector)
- if (connector->encoder->base.crtc == &crtc->base) {
- connector->encoder->base.crtc = NULL;
- connector->encoder->connectors_active = false;
- }
-
- WARN_ON(crtc->active);
- crtc->base.state->enable = false;
- crtc->base.state->active = false;
- crtc->base.enabled = false;
}
if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
@@ -14884,20 +14932,27 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
- intel_crtc_update_dpms(&crtc->base);
+ enable = false;
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
+ enable = true;
+ break;
+ }
- if (crtc->active != crtc->base.state->enable) {
- struct intel_encoder *encoder;
+ if (!enable)
+ intel_crtc_disable_noatomic(&crtc->base);
+
+ if (crtc->active != crtc->base.state->active) {
/* This can happen either due to bugs in the get_hw_state
- * functions or because the pipe is force-enabled due to the
+ * functions or because of calls to intel_crtc_disable_noatomic,
+ * or because the pipe is force-enabled due to the
* pipe A quirk. */
DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
crtc->base.base.id,
crtc->base.state->enable ? "enabled" : "disabled",
crtc->active ? "enabled" : "disabled");
- crtc->base.state->enable = crtc->active;
+ WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
crtc->base.state->active = crtc->active;
crtc->base.enabled = crtc->active;
@@ -14908,10 +14963,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* actually up, hence no need to break them. */
WARN_ON(crtc->active);
- for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
- WARN_ON(encoder->connectors_active);
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder)
encoder->base.crtc = NULL;
- }
}
if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
@@ -14937,6 +14990,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
{
struct intel_connector *connector;
struct drm_device *dev = encoder->base.dev;
+ bool active = false;
/* We need to check both for a crtc link (meaning that the
* encoder is active and trying to read from a pipe) and the
@@ -14944,7 +14998,15 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
bool has_active_crtc = encoder->base.crtc &&
to_intel_crtc(encoder->base.crtc)->active;
- if (encoder->connectors_active && !has_active_crtc) {
+ for_each_intel_connector(dev, connector) {
+ if (connector->base.encoder != &encoder->base)
+ continue;
+
+ active = true;
+ break;
+ }
+
+ if (active && !has_active_crtc) {
DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
encoder->base.base.id,
encoder->base.name);
@@ -14961,7 +15023,6 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
encoder->post_disable(encoder);
}
encoder->base.crtc = NULL;
- encoder->connectors_active = false;
/* Inconsistent output/port/pipe state happens presumably due to
* a bug in one of the get_hw_state functions. Or someplace else
@@ -15010,10 +15071,31 @@ static bool primary_get_hw_state(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- if (!crtc->active)
- return false;
+ return !!(I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE);
+}
+
+static void readout_plane_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_plane *p;
+ struct intel_plane_state *plane_state;
+ bool active = crtc_state->base.active;
- return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
+ for_each_intel_plane(crtc->base.dev, p) {
+ if (crtc->pipe != p->pipe)
+ continue;
+
+ plane_state = to_intel_plane_state(p->base.state);
+
+ if (p->base.type == DRM_PLANE_TYPE_PRIMARY)
+ plane_state->visible = primary_get_hw_state(crtc);
+ else {
+ if (active)
+ p->disable_plane(&p->base, &crtc->base);
+
+ plane_state->visible = false;
+ }
+ }
}
static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -15026,22 +15108,44 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
int i;
for_each_intel_crtc(dev, crtc) {
- struct drm_plane *primary = crtc->base.primary;
- struct intel_plane_state *plane_state;
-
+ __drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
memset(crtc->config, 0, sizeof(*crtc->config));
-
- crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
+ crtc->config->base.crtc = &crtc->base;
crtc->active = dev_priv->display.get_pipe_config(crtc,
crtc->config);
- crtc->base.state->enable = crtc->active;
crtc->base.state->active = crtc->active;
crtc->base.enabled = crtc->active;
- plane_state = to_intel_plane_state(primary->state);
- plane_state->visible = primary_get_hw_state(crtc);
+ memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
+ if (crtc->base.state->active) {
+ intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
+ intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
+ WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
+
+ /*
+ * The initial mode needs to be set in order to keep
+ * the atomic core happy. It wants a valid mode if the
+ * crtc's enabled, so we do the above call.
+ *
+ * At this point some state updated by the connectors
+ * in their ->detect() callback has not run yet, so
+ * no recalculation can be done yet.
+ *
+ * Even if we could do a recalculation and modeset
+ * right now it would cause a double modeset if
+ * fbdev or userspace chooses a different initial mode.
+ *
+ * If that happens, someone indicated they wanted a
+ * mode change, which means it's safe to do a full
+ * recalculation.
+ */
+ crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
+ }
+
+ crtc->base.hwmode = crtc->config->base.adjusted_mode;
+ readout_plane_state(crtc, to_intel_crtc_state(crtc->base.state));
DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
crtc->base.base.id,
@@ -15080,7 +15184,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
encoder->base.crtc = NULL;
}
- encoder->connectors_active = false;
DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
encoder->base.base.id,
encoder->base.name,
@@ -15091,7 +15194,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
for_each_intel_connector(dev, connector) {
if (connector->get_hw_state(connector)) {
connector->base.dpms = DRM_MODE_DPMS_ON;
- connector->encoder->connectors_active = true;
connector->base.encoder = &connector->encoder->base;
} else {
connector->base.dpms = DRM_MODE_DPMS_OFF;
@@ -15104,10 +15206,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
}
-/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
- * and i915 state tracking structures. */
-void intel_modeset_setup_hw_state(struct drm_device *dev,
- bool force_restore)
+/* Scan out the current hw modeset state,
+ * and sanitizes it to the current state
+ */
+static void
+intel_modeset_setup_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
@@ -15117,21 +15220,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
intel_modeset_readout_hw_state(dev);
- /*
- * Now that we have the config, copy it to each CRTC struct
- * Note that this could go away if we move to using crtc_config
- * checking everywhere.
- */
- for_each_intel_crtc(dev, crtc) {
- if (crtc->active && i915.fastboot) {
- intel_mode_from_pipe_config(&crtc->base.mode,
- crtc->config);
- DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
- crtc->base.base.id);
- drm_mode_debug_printmodeline(&crtc->base.mode);
- }
- }
-
/* HW state is read out, now we need to sanitize this mess. */
for_each_intel_encoder(dev, encoder) {
intel_sanitize_encoder(encoder);
@@ -15158,34 +15246,77 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
pll->on = false;
}
- if (IS_GEN9(dev))
+ if (IS_VALLEYVIEW(dev))
+ vlv_wm_get_hw_state(dev);
+ else if (IS_GEN9(dev))
skl_wm_get_hw_state(dev);
else if (HAS_PCH_SPLIT(dev))
ilk_wm_get_hw_state(dev);
- if (force_restore) {
- i915_redisable_vga(dev);
+ for_each_intel_crtc(dev, crtc) {
+ unsigned long put_domains;
- /*
- * We need to use raw interfaces for restoring state to avoid
- * checking (bogus) intermediate states.
- */
- for_each_pipe(dev_priv, pipe) {
- struct drm_crtc *crtc =
- dev_priv->pipe_to_crtc_mapping[pipe];
+ put_domains = modeset_get_crtc_power_domains(&crtc->base);
+ if (WARN_ON(put_domains))
+ modeset_put_power_domains(dev_priv, put_domains);
+ }
+ intel_display_set_init_power(dev_priv, false);
+}
- intel_crtc_restore_mode(crtc);
- }
- } else {
- intel_modeset_update_staged_output_state(dev);
+void intel_display_resume(struct drm_device *dev)
+{
+ struct drm_atomic_state *state = drm_atomic_state_alloc(dev);
+ struct intel_connector *conn;
+ struct intel_plane *plane;
+ struct drm_crtc *crtc;
+ int ret;
+
+ if (!state)
+ return;
+
+ state->acquire_ctx = dev->mode_config.acquire_ctx;
+
+ /* preserve complete old state, including dpll */
+ intel_atomic_get_shared_dpll_state(state);
+
+ for_each_crtc(dev, crtc) {
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_crtc_state(state, crtc);
+
+ ret = PTR_ERR_OR_ZERO(crtc_state);
+ if (ret)
+ goto err;
+
+ /* force a restore */
+ crtc_state->mode_changed = true;
+ }
+
+ for_each_intel_plane(dev, plane) {
+ ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base));
+ if (ret)
+ goto err;
+ }
+
+ for_each_intel_connector(dev, conn) {
+ ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base));
+ if (ret)
+ goto err;
}
- intel_modeset_check_state(dev);
+ intel_modeset_setup_hw_state(dev);
+
+ i915_redisable_vga(dev);
+ ret = drm_atomic_commit(state);
+ if (!ret)
+ return;
+
+err:
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_atomic_state_free(state);
}
void intel_modeset_gem_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct drm_i915_gem_object *obj;
int ret;
@@ -15194,16 +15325,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_init_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex);
- /*
- * There may be no VBT; and if the BIOS enabled SSC we can
- * just keep using it to avoid unnecessary flicker. Whereas if the
- * BIOS isn't using it, don't assume it will work even if the VBT
- * indicates as much.
- */
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
- DREF_SSC1_ENABLE);
-
intel_modeset_init_hw(dev);
intel_setup_overlay(dev);
@@ -15222,14 +15343,16 @@ void intel_modeset_gem_init(struct drm_device *dev)
ret = intel_pin_and_fence_fb_obj(c->primary,
c->primary->fb,
c->primary->state,
- NULL);
+ NULL, NULL);
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb);
c->primary->fb = NULL;
+ c->primary->crtc = c->primary->state->crtc = NULL;
update_state_fb(c->primary);
+ c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
}
}
@@ -15266,13 +15389,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
*/
drm_kms_helper_poll_fini(dev);
- mutex_lock(&dev->struct_mutex);
-
intel_unregister_dsm_handler();
- intel_fbc_disable(dev);
-
- mutex_unlock(&dev->struct_mutex);
+ intel_fbc_disable(dev_priv);
/* flush any delayed tasks or pending work */
flush_scheduled_work();
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 6e8faa253792..0a2e33fbf20d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -48,28 +48,28 @@
#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
struct dp_link_dpll {
- int link_bw;
+ int clock;
struct dpll dpll;
};
static const struct dp_link_dpll gen4_dpll[] = {
- { DP_LINK_BW_1_62,
+ { 162000,
{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
- { DP_LINK_BW_2_7,
+ { 270000,
{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
};
static const struct dp_link_dpll pch_dpll[] = {
- { DP_LINK_BW_1_62,
+ { 162000,
{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
- { DP_LINK_BW_2_7,
+ { 270000,
{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
};
static const struct dp_link_dpll vlv_dpll[] = {
- { DP_LINK_BW_1_62,
+ { 162000,
{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
- { DP_LINK_BW_2_7,
+ { 270000,
{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
};
@@ -83,19 +83,18 @@ static const struct dp_link_dpll chv_dpll[] = {
* m2 is stored in fixed point format using formula below
* (m2_int << 22) | m2_fraction
*/
- { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
+ { 162000, /* m2_int = 32, m2_fraction = 1677722 */
{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
- { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
+ { 270000, /* m2_int = 27, m2_fraction = 0 */
{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
- { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
+ { 540000, /* m2_int = 27, m2_fraction = 0 */
{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
};
+static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
+ 324000, 432000, 540000 };
static const int skl_rates[] = { 162000, 216000, 270000,
324000, 432000, 540000 };
-static const int chv_rates[] = { 162000, 202500, 210000, 216000,
- 243000, 270000, 324000, 405000,
- 420000, 432000, 540000 };
static const int default_rates[] = { 162000, 270000, 540000 };
/**
@@ -565,7 +564,9 @@ static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- if (HAS_PCH_SPLIT(dev))
+ if (IS_BROXTON(dev))
+ return BXT_PP_CONTROL(0);
+ else if (HAS_PCH_SPLIT(dev))
return PCH_PP_CONTROL;
else
return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
@@ -575,7 +576,9 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- if (HAS_PCH_SPLIT(dev))
+ if (IS_BROXTON(dev))
+ return BXT_PP_STATUS(0);
+ else if (HAS_PCH_SPLIT(dev))
return PCH_PP_STATUS;
else
return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
@@ -708,7 +711,8 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return 0;
if (intel_dig_port->port == PORT_A) {
- return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
+ return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
+
} else {
return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
}
@@ -723,7 +727,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
if (intel_dig_port->port == PORT_A) {
if (index)
return 0;
- return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
+ return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
/* Workaround for non-ULT HSW */
switch (index) {
@@ -842,8 +846,15 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
}
if (try == 3) {
- WARN(1, "dp_aux_ch not started status 0x%08x\n",
- I915_READ(ch_ctl));
+ static u32 last_status = -1;
+ const u32 status = I915_READ(ch_ctl);
+
+ if (status != last_status) {
+ WARN(1, "dp_aux_ch not started status 0x%08x\n",
+ status);
+ last_status = status;
+ }
+
ret = -EBUSY;
goto out;
}
@@ -1019,11 +1030,34 @@ static void
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
+ struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
const char *name = NULL;
+ uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
int ret;
+ /* On SKL we don't have Aux for port E so we rely on VBT to set
+ * a proper alternate aux channel.
+ */
+ if (IS_SKYLAKE(dev) && port == PORT_E) {
+ switch (info->alternate_aux_channel) {
+ case DP_AUX_B:
+ porte_aux_ctl_reg = DPB_AUX_CH_CTL;
+ break;
+ case DP_AUX_C:
+ porte_aux_ctl_reg = DPC_AUX_CH_CTL;
+ break;
+ case DP_AUX_D:
+ porte_aux_ctl_reg = DPD_AUX_CH_CTL;
+ break;
+ case DP_AUX_A:
+ default:
+ porte_aux_ctl_reg = DPA_AUX_CH_CTL;
+ }
+ }
+
switch (port) {
case PORT_A:
intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
@@ -1041,6 +1075,10 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
name = "DPDDC-D";
break;
+ case PORT_E:
+ intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
+ name = "DPDDC-E";
+ break;
default:
BUG();
}
@@ -1054,7 +1092,7 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
*
* Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
*/
- if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
+ if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
intel_dp->aux.name = name;
@@ -1092,7 +1130,7 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
}
static void
-skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
+skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
{
u32 ctrl1;
@@ -1104,7 +1142,7 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
pipe_config->dpll_hw_state.cfgcr2 = 0;
ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
- switch (link_clock / 2) {
+ switch (pipe_config->port_clock / 2) {
case 81000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
SKL_DPLL0);
@@ -1137,20 +1175,20 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
pipe_config->dpll_hw_state.ctrl1 = ctrl1;
}
-static void
-hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
+void
+hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
{
memset(&pipe_config->dpll_hw_state, 0,
sizeof(pipe_config->dpll_hw_state));
- switch (link_bw) {
- case DP_LINK_BW_1_62:
+ switch (pipe_config->port_clock / 2) {
+ case 81000:
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
break;
- case DP_LINK_BW_2_7:
+ case 135000:
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
break;
- case DP_LINK_BW_5_4:
+ case 270000:
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
break;
}
@@ -1169,32 +1207,45 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
}
+static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
+{
+ /* WaDisableHBR2:skl */
+ if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
+ return false;
+
+ if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
+ (INTEL_INFO(dev)->gen >= 9))
+ return true;
+ else
+ return false;
+}
+
static int
intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
{
- if (IS_SKYLAKE(dev)) {
+ int size;
+
+ if (IS_BROXTON(dev)) {
+ *source_rates = bxt_rates;
+ size = ARRAY_SIZE(bxt_rates);
+ } else if (IS_SKYLAKE(dev)) {
*source_rates = skl_rates;
- return ARRAY_SIZE(skl_rates);
- } else if (IS_CHERRYVIEW(dev)) {
- *source_rates = chv_rates;
- return ARRAY_SIZE(chv_rates);
+ size = ARRAY_SIZE(skl_rates);
+ } else {
+ *source_rates = default_rates;
+ size = ARRAY_SIZE(default_rates);
}
- *source_rates = default_rates;
+ /* This depends on the fact that 5.4 is last value in the array */
+ if (!intel_dp_source_supports_hbr2(dev))
+ size--;
- if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
- /* WaDisableHBR2:skl */
- return (DP_LINK_BW_2_7 >> 3) + 1;
- else if (INTEL_INFO(dev)->gen >= 8 ||
- (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
- return (DP_LINK_BW_5_4 >> 3) + 1;
- else
- return (DP_LINK_BW_2_7 >> 3) + 1;
+ return size;
}
static void
intel_dp_set_clock(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config, int link_bw)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
const struct dp_link_dpll *divisor = NULL;
@@ -1216,7 +1267,7 @@ intel_dp_set_clock(struct intel_encoder *encoder,
if (divisor && count) {
for (i = 0; i < count; i++) {
- if (link_bw == divisor[i].link_bw) {
+ if (pipe_config->port_clock == divisor[i].clock) {
pipe_config->dpll = divisor[i].dpll;
pipe_config->clock_set = true;
break;
@@ -1374,7 +1425,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (INTEL_INFO(dev)->gen >= 9) {
int ret;
- ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
+ ret = skl_update_scaler_crtc(pipe_config);
if (ret)
return ret;
}
@@ -1399,7 +1450,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
* bpc in between. */
bpp = pipe_config->pipe_bpp;
if (is_edp(intel_dp)) {
- if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
+
+ /* Get bpp from vbt only for panels that dont have bpp in edid */
+ if (intel_connector->base.display_info.bpc == 0 &&
+ (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
dev_priv->vbt.edp_bpp);
bpp = dev_priv->vbt.edp_bpp;
@@ -1490,13 +1544,13 @@ found:
}
if (IS_SKYLAKE(dev) && is_edp(intel_dp))
- skl_edp_set_pll_config(pipe_config, common_rates[clock]);
+ skl_edp_set_pll_config(pipe_config);
else if (IS_BROXTON(dev))
/* handled in ddi */;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
+ hsw_dp_set_ddi_pll_sel(pipe_config);
else
- intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
+ intel_dp_set_clock(encoder, pipe_config);
return true;
}
@@ -1699,8 +1753,10 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
control = I915_READ(_pp_ctrl_reg(intel_dp));
- control &= ~PANEL_UNLOCK_MASK;
- control |= PANEL_UNLOCK_REGS;
+ if (!IS_BROXTON(dev)) {
+ control &= ~PANEL_UNLOCK_MASK;
+ control |= PANEL_UNLOCK_REGS;
+ }
return control;
}
@@ -2612,7 +2668,7 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
pipe_name(pipe), port_name(port));
- WARN(encoder->connectors_active,
+ WARN(encoder->base.crtc,
"stealing pipe %c power sequencer from active eDP port %c\n",
pipe_name(pipe), port_name(port));
@@ -3414,92 +3470,6 @@ gen7_edp_signal_levels(uint8_t train_set)
}
}
-/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
-static uint32_t
-hsw_signal_levels(uint8_t train_set)
-{
- int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
- DP_TRAIN_PRE_EMPHASIS_MASK);
- switch (signal_levels) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- return DDI_BUF_TRANS_SELECT(0);
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- return DDI_BUF_TRANS_SELECT(1);
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
- return DDI_BUF_TRANS_SELECT(2);
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
- return DDI_BUF_TRANS_SELECT(3);
-
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- return DDI_BUF_TRANS_SELECT(4);
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- return DDI_BUF_TRANS_SELECT(5);
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
- return DDI_BUF_TRANS_SELECT(6);
-
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- return DDI_BUF_TRANS_SELECT(7);
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- return DDI_BUF_TRANS_SELECT(8);
-
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- return DDI_BUF_TRANS_SELECT(9);
- default:
- DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
- "0x%x\n", signal_levels);
- return DDI_BUF_TRANS_SELECT(0);
- }
-}
-
-static void bxt_signal_levels(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- enum port port = dport->port;
- struct drm_device *dev = dport->base.base.dev;
- struct intel_encoder *encoder = &dport->base;
- uint8_t train_set = intel_dp->train_set[0];
- uint32_t level = 0;
-
- int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
- DP_TRAIN_PRE_EMPHASIS_MASK);
- switch (signal_levels) {
- default:
- DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- level = 0;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- level = 1;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
- level = 2;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
- level = 3;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- level = 4;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- level = 5;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
- level = 6;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- level = 7;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- level = 8;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- level = 9;
- break;
- }
-
- bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
-}
-
/* Properly updates "DP" with the correct signal levels. */
static void
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
@@ -3507,22 +3477,20 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
struct drm_device *dev = intel_dig_port->base.base.dev;
- uint32_t signal_levels, mask;
+ uint32_t signal_levels, mask = 0;
uint8_t train_set = intel_dp->train_set[0];
- if (IS_BROXTON(dev)) {
- signal_levels = 0;
- bxt_signal_levels(intel_dp);
- mask = 0;
- } else if (HAS_DDI(dev)) {
- signal_levels = hsw_signal_levels(train_set);
- mask = DDI_BUF_EMP_MASK;
+ if (HAS_DDI(dev)) {
+ signal_levels = ddi_signal_levels(intel_dp);
+
+ if (IS_BROXTON(dev))
+ signal_levels = 0;
+ else
+ mask = DDI_BUF_EMP_MASK;
} else if (IS_CHERRYVIEW(dev)) {
signal_levels = chv_signal_levels(intel_dp);
- mask = 0;
} else if (IS_VALLEYVIEW(dev)) {
signal_levels = vlv_signal_levels(intel_dp);
- mask = 0;
} else if (IS_GEN7(dev) && port == PORT_A) {
signal_levels = gen7_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
@@ -3941,10 +3909,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
}
}
- /* Training Pattern 3 support, both source and sink */
+ /* Training Pattern 3 support, Intel platforms that support HBR2 alone
+ * have support for TP3 hence that check is used along with dpcd check
+ * to ensure TP3 can be enabled.
+ * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
+ * supported but still not enabled.
+ */
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
- (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
+ intel_dp_source_supports_hbr2(dev)) {
intel_dp->use_tps3 = true;
DRM_DEBUG_KMS("Displayport TPS3 supported\n");
} else
@@ -4034,43 +4007,67 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
return intel_dp->is_mst;
}
-int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
+static void intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(intel_dig_port->base.base.crtc);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
- int test_crc_count;
- int attempts = 6;
- int ret = 0;
- hsw_disable_ips(intel_crtc);
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
- ret = -EIO;
- goto out;
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
+ DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
+ return;
}
- if (!(buf & DP_TEST_CRC_SUPPORTED)) {
- ret = -ENOTTY;
- goto out;
- }
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
+ buf & ~DP_TEST_SINK_START) < 0)
+ DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
- ret = -EIO;
- goto out;
- }
+ hsw_enable_ips(intel_crtc);
+}
+
+static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+ u8 buf;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
+ return -EIO;
+
+ if (!(buf & DP_TEST_CRC_SUPPORTED))
+ return -ENOTTY;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
+ return -EIO;
+
+ hsw_disable_ips(intel_crtc);
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
- buf | DP_TEST_SINK_START) < 0) {
- ret = -EIO;
- goto out;
+ buf | DP_TEST_SINK_START) < 0) {
+ hsw_enable_ips(intel_crtc);
+ return -EIO;
}
+ return 0;
+}
+
+int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+ u8 buf;
+ int test_crc_count;
+ int attempts = 6;
+ int ret;
+
+ ret = intel_dp_sink_crc_start(intel_dp);
+ if (ret)
+ return ret;
+
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
ret = -EIO;
- goto out;
+ goto stop;
}
test_crc_count = buf & DP_TEST_COUNT_MASK;
@@ -4079,7 +4076,7 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_TEST_SINK_MISC, &buf) < 0) {
ret = -EIO;
- goto out;
+ goto stop;
}
intel_wait_for_vblank(dev, intel_crtc->pipe);
} while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
@@ -4087,25 +4084,13 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
if (attempts == 0) {
DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
ret = -ETIMEDOUT;
- goto out;
+ goto stop;
}
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
ret = -EIO;
- goto out;
- }
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
- ret = -EIO;
- goto out;
- }
- if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
- buf & ~DP_TEST_SINK_START) < 0) {
- ret = -EIO;
- goto out;
- }
-out:
- hsw_enable_ips(intel_crtc);
+stop:
+ intel_dp_sink_crc_stop(intel_dp);
return ret;
}
@@ -4166,9 +4151,16 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
intel_dp->aux.i2c_defer_count);
intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
} else {
+ struct edid *block = intel_connector->detect_edid;
+
+ /* We have to write the checksum
+ * of the last block read
+ */
+ block += intel_connector->detect_edid->extensions;
+
if (!drm_dp_dpcd_write(&intel_dp->aux,
DP_TEST_EDID_CHECKSUM,
- &intel_connector->detect_edid->checksum,
+ &block->checksum,
1))
DRM_DEBUG_KMS("Failed to write EDID checksum\n");
@@ -4316,10 +4308,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- if (!intel_encoder->connectors_active)
- return;
-
- if (WARN_ON(!intel_encoder->base.crtc))
+ if (!intel_encoder->base.crtc)
return;
if (!to_intel_crtc(intel_encoder->base.crtc)->active)
@@ -4900,7 +4889,7 @@ static void intel_dp_encoder_reset(struct drm_encoder *encoder)
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
- .dpms = intel_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_dp_detect,
.force = intel_dp_force,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -4922,12 +4911,6 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
.destroy = intel_dp_encoder_destroy,
};
-void
-intel_dp_hot_plug(struct intel_encoder *intel_encoder)
-{
- return;
-}
-
enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
@@ -4978,9 +4961,12 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
intel_dp_probe_oui(intel_dp);
- if (!intel_dp_probe_mst(intel_dp))
+ if (!intel_dp_probe_mst(intel_dp)) {
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ intel_dp_check_link_status(intel_dp);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
goto mst_fail;
-
+ }
} else {
if (intel_dp->is_mst) {
if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
@@ -4988,10 +4974,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
}
if (!intel_dp->is_mst) {
- /*
- * we'll check the link status via the normal hot plug path later -
- * but for short hpds we should check it now
- */
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
intel_dp_check_link_status(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -5033,16 +5015,17 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
return -1;
}
-/* check the VBT to see whether the eDP is on DP-D port */
+/* check the VBT to see whether the eDP is on another port */
bool intel_dp_is_edp(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
union child_device_config *p_child;
int i;
static const short port_mapping[] = {
- [PORT_B] = PORT_IDPB,
- [PORT_C] = PORT_IDPC,
- [PORT_D] = PORT_IDPD,
+ [PORT_B] = DVO_PORT_DPB,
+ [PORT_C] = DVO_PORT_DPC,
+ [PORT_D] = DVO_PORT_DPD,
+ [PORT_E] = DVO_PORT_DPE,
};
if (port == PORT_A)
@@ -5095,8 +5078,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_power_seq cur, vbt, spec,
*final = &intel_dp->pps_delays;
- u32 pp_on, pp_off, pp_div, pp;
- int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
+ u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
+ int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -5104,7 +5087,16 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
if (final->t11_t12 != 0)
return;
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_BROXTON(dev)) {
+ /*
+ * TODO: BXT has 2 sets of PPS registers.
+ * Correct Register for Broxton need to be identified
+ * using VBT. hardcoding for now
+ */
+ pp_ctrl_reg = BXT_PP_CONTROL(0);
+ pp_on_reg = BXT_PP_ON_DELAYS(0);
+ pp_off_reg = BXT_PP_OFF_DELAYS(0);
+ } else if (HAS_PCH_SPLIT(dev)) {
pp_ctrl_reg = PCH_PP_CONTROL;
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
@@ -5120,12 +5112,14 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
/* Workaround: Need to write PP_CONTROL with the unlock key as
* the very first thing. */
- pp = ironlake_get_pp_control(intel_dp);
- I915_WRITE(pp_ctrl_reg, pp);
+ pp_ctl = ironlake_get_pp_control(intel_dp);
pp_on = I915_READ(pp_on_reg);
pp_off = I915_READ(pp_off_reg);
- pp_div = I915_READ(pp_div_reg);
+ if (!IS_BROXTON(dev)) {
+ I915_WRITE(pp_ctrl_reg, pp_ctl);
+ pp_div = I915_READ(pp_div_reg);
+ }
/* Pull timing values out of registers */
cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
@@ -5140,8 +5134,17 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
PANEL_POWER_DOWN_DELAY_SHIFT;
- cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+ if (IS_BROXTON(dev)) {
+ u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
+ BXT_POWER_CYCLE_DELAY_SHIFT;
+ if (tmp > 0)
+ cur.t11_t12 = (tmp - 1) * 1000;
+ else
+ cur.t11_t12 = 0;
+ } else {
+ cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+ }
DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
@@ -5198,13 +5201,23 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_on, pp_off, pp_div, port_sel = 0;
int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
- int pp_on_reg, pp_off_reg, pp_div_reg;
+ int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
enum port port = dp_to_dig_port(intel_dp)->port;
const struct edp_power_seq *seq = &intel_dp->pps_delays;
lockdep_assert_held(&dev_priv->pps_mutex);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_BROXTON(dev)) {
+ /*
+ * TODO: BXT has 2 sets of PPS registers.
+ * Correct Register for Broxton need to be identified
+ * using VBT. hardcoding for now
+ */
+ pp_ctrl_reg = BXT_PP_CONTROL(0);
+ pp_on_reg = BXT_PP_ON_DELAYS(0);
+ pp_off_reg = BXT_PP_OFF_DELAYS(0);
+
+ } else if (HAS_PCH_SPLIT(dev)) {
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_div_reg = PCH_PP_DIVISOR;
@@ -5230,9 +5243,16 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
- pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
- pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
- << PANEL_POWER_CYCLE_DELAY_SHIFT);
+ if (IS_BROXTON(dev)) {
+ pp_div = I915_READ(pp_ctrl_reg);
+ pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
+ pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
+ << BXT_POWER_CYCLE_DELAY_SHIFT);
+ } else {
+ pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
+ pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
+ << PANEL_POWER_CYCLE_DELAY_SHIFT);
+ }
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
@@ -5249,11 +5269,16 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_WRITE(pp_on_reg, pp_on);
I915_WRITE(pp_off_reg, pp_off);
- I915_WRITE(pp_div_reg, pp_div);
+ if (IS_BROXTON(dev))
+ I915_WRITE(pp_ctrl_reg, pp_div);
+ else
+ I915_WRITE(pp_div_reg, pp_div);
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
I915_READ(pp_on_reg),
I915_READ(pp_off_reg),
+ IS_BROXTON(dev) ?
+ (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
I915_READ(pp_div_reg));
}
@@ -5458,13 +5483,12 @@ unlock:
}
/**
- * intel_edp_drrs_invalidate - Invalidate DRRS
+ * intel_edp_drrs_invalidate - Disable Idleness DRRS
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
- * When there is a disturbance on screen (due to cursor movement/time
- * update etc), DRRS needs to be invalidated, i.e. need to switch to
- * high RR.
+ * This function gets called everytime rendering on the given planes start.
+ * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
*
* Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
*/
@@ -5489,26 +5513,27 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
- if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+ dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
+
+ /* invalidate means busy screen hence upclock */
+ if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
intel_dp_set_drrs_state(dev_priv->dev,
dev_priv->drrs.dp->attached_connector->panel.
fixed_mode->vrefresh);
- }
-
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
- dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
mutex_unlock(&dev_priv->drrs.mutex);
}
/**
- * intel_edp_drrs_flush - Flush DRRS
+ * intel_edp_drrs_flush - Restart Idleness DRRS
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
- * When there is no movement on screen, DRRS work can be scheduled.
- * This DRRS work is responsible for setting relevant registers after a
- * timeout of 1 second.
+ * This function gets called every time rendering on the given planes has
+ * completed or flip on a crtc is completed. So DRRS should be upclocked
+ * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
+ * if no other planes are dirty.
*
* Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
*/
@@ -5532,10 +5557,21 @@ void intel_edp_drrs_flush(struct drm_device *dev,
crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
+
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
- if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
- !dev_priv->drrs.busy_frontbuffer_bits)
+ /* flush means busy screen hence upclock */
+ if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
+ intel_dp_set_drrs_state(dev_priv->dev,
+ dev_priv->drrs.dp->attached_connector->panel.
+ fixed_mode->vrefresh);
+
+ /*
+ * flush also means no more activity hence schedule downclock, if all
+ * other fbs are quiescent too
+ */
+ if (!dev_priv->drrs.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->drrs.work,
msecs_to_jiffies(1000));
mutex_unlock(&dev_priv->drrs.mutex);
@@ -5824,6 +5860,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
case PORT_D:
intel_encoder->hpd_pin = HPD_PORT_D;
break;
+ case PORT_E:
+ intel_encoder->hpd_pin = HPD_PORT_E;
+ break;
default:
BUG();
}
@@ -5939,10 +5978,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
}
intel_encoder->cloneable = 0;
- intel_encoder->hot_plug = intel_dp_hot_plug;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
- dev_priv->hpd_irq_port[port] = intel_dig_port;
+ dev_priv->hotplug.irq_port[port] = intel_dig_port;
if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
drm_encoder_cleanup(encoder);
@@ -5958,7 +5996,7 @@ void intel_dp_mst_suspend(struct drm_device *dev)
/* disable MST */
for (i = 0; i < I915_MAX_PORTS; i++) {
- struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+ struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
if (!intel_dig_port)
continue;
@@ -5977,7 +6015,7 @@ void intel_dp_mst_resume(struct drm_device *dev)
int i;
for (i = 0; i < I915_MAX_PORTS; i++) {
- struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+ struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
if (!intel_dig_port)
continue;
if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 6e4cc5334f47..3e4be5a3becd 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -33,6 +33,7 @@
static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct drm_device *dev = encoder->base.dev;
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
@@ -97,6 +98,10 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
&pipe_config->dp_m_n);
pipe_config->dp_m_n.tu = slots;
+
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ hsw_dp_set_ddi_pll_sel(pipe_config);
+
return true;
}
@@ -168,6 +173,11 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
return;
}
+ /* MST encoders are bound to a crtc, not to a connector,
+ * force the mapping here for get_hw_state.
+ */
+ found->encoder = encoder;
+
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
intel_mst->port = found->port;
@@ -328,7 +338,7 @@ intel_dp_mst_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
- .dpms = intel_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_mst_set_property,
@@ -357,6 +367,16 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
+static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+ struct intel_crtc *crtc = to_intel_crtc(state->crtc);
+
+ return &intel_dp->mst_encoders[crtc->pipe]->base.base;
+}
+
static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -367,6 +387,7 @@ static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connecto
static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
.get_modes = intel_dp_mst_get_modes,
.mode_valid = intel_dp_mst_mode_valid,
+ .atomic_best_encoder = intel_mst_atomic_best_encoder,
.best_encoder = intel_mst_best_encoder,
};
@@ -384,7 +405,7 @@ static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
{
- if (connector->encoder) {
+ if (connector->encoder && connector->base.state->crtc) {
enum pipe pipe;
if (!connector->encoder->get_hw_state(connector->encoder, &pipe))
return false;
@@ -395,7 +416,7 @@ static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
static void intel_connector_add_to_fbdev(struct intel_connector *connector)
{
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
#endif
@@ -403,7 +424,7 @@ static void intel_connector_add_to_fbdev(struct intel_connector *connector)
static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
{
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
#endif
@@ -441,10 +462,9 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
drm_mode_connector_set_path_property(connector, pathprop);
- drm_reinit_primary_mode_group(dev);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
intel_connector_add_to_fbdev(intel_connector);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
drm_connector_register(&intel_connector->base);
return connector;
}
@@ -454,19 +474,28 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
+
/* need to nuke the connector */
- mutex_lock(&dev->mode_config.mutex);
- intel_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
+ if (connector->state->crtc) {
+ struct drm_mode_set set;
+ int ret;
+
+ memset(&set, 0, sizeof(set));
+ set.crtc = connector->state->crtc,
+
+ ret = drm_atomic_helper_set_config(&set);
+
+ WARN(ret, "Disabling mst crtc failed with %i\n", ret);
+ }
+ drm_modeset_unlock_all(dev);
intel_connector->unregister(intel_connector);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
intel_connector_remove_from_fbdev(intel_connector);
drm_connector_cleanup(connector);
- mutex_unlock(&dev->mode_config.mutex);
-
- drm_reinit_primary_mode_group(dev);
+ drm_modeset_unlock_all(dev);
kfree(intel_connector);
DRM_DEBUG_KMS("\n");
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 105928382e21..2b9e6f9775c5 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -130,15 +130,9 @@ struct intel_fbdev {
struct intel_encoder {
struct drm_encoder base;
- /*
- * The new crtc this encoder will be driven from. Only differs from
- * base->crtc while a modeset is in progress.
- */
- struct intel_crtc *new_crtc;
enum intel_output_type type;
unsigned int cloneable;
- bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
bool (*compute_config)(struct intel_encoder *,
struct intel_crtc_state *);
@@ -182,6 +176,10 @@ struct intel_panel {
bool enabled;
bool combination_mode; /* gen 2/4 only */
bool active_low_pwm;
+
+ /* PWM chip */
+ struct pwm_device *pwm;
+
struct backlight_device *device;
} backlight;
@@ -195,12 +193,6 @@ struct intel_connector {
*/
struct intel_encoder *encoder;
- /*
- * The new encoder this connector will be driven. Only differs from
- * encoder while a modeset is in progress.
- */
- struct intel_encoder *new_encoder;
-
/* Reads out the current hw, returning true if the connector is enabled
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
@@ -241,6 +233,14 @@ typedef struct dpll {
int p;
} intel_clock_t;
+struct intel_atomic_state {
+ struct drm_atomic_state base;
+
+ unsigned int cdclk;
+ bool dpll_set;
+ struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
+};
+
struct intel_plane_state {
struct drm_plane_state base;
struct drm_rect src;
@@ -256,7 +256,7 @@ struct intel_plane_state {
* plane requiring a scaler:
* - During check_plane, its bit is set in
* crtc_state->scaler_state.scaler_users by calling helper function
- * update_scaler_users.
+ * update_scaler_plane.
* - scaler_id indicates the scaler it got assigned.
*
* plane doesn't require a scaler:
@@ -264,9 +264,11 @@ struct intel_plane_state {
* got disabled.
* - During check_plane, corresponding bit is reset in
* crtc_state->scaler_state.scaler_users by calling helper function
- * update_scaler_users.
+ * update_scaler_plane.
*/
int scaler_id;
+
+ struct drm_intel_sprite_colorkey ckey;
};
struct intel_initial_plane_config {
@@ -286,7 +288,6 @@ struct intel_initial_plane_config {
#define SKL_MAX_DST_H 4096
struct intel_scaler {
- int id;
int in_use;
uint32_t mode;
};
@@ -319,6 +320,9 @@ struct intel_crtc_scaler_state {
int scaler_id;
};
+/* drm_mode->private_flags */
+#define I915_MODE_FLAG_INHERITED 1
+
struct intel_crtc_state {
struct drm_crtc_state base;
@@ -331,7 +335,6 @@ struct intel_crtc_state {
* accordingly.
*/
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
-#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */
unsigned long quirks;
/* Pipe source size (ie. panel fitter input size)
@@ -447,6 +450,18 @@ struct intel_crtc_state {
int pbn;
struct intel_crtc_scaler_state scaler_state;
+
+ /* w/a for waiting 2 vblanks during crtc enable */
+ enum pipe hsw_workaround_pipe;
+};
+
+struct vlv_wm_state {
+ struct vlv_pipe_wm wm[3];
+ struct vlv_sr_wm sr[3];
+ uint8_t num_active_planes;
+ uint8_t num_levels;
+ uint8_t level;
+ bool cxsr;
};
struct intel_pipe_wm {
@@ -478,16 +493,13 @@ struct skl_pipe_wm {
* and thus can't be run with interrupts disabled.
*/
struct intel_crtc_atomic_commit {
- /* vblank evasion */
- bool evade;
- unsigned start_vbl_count;
-
/* Sleepable operations to perform before commit */
bool wait_for_flips;
bool disable_fbc;
bool disable_ips;
+ bool disable_cxsr;
bool pre_disable_primary;
- bool update_wm;
+ bool update_wm_pre, update_wm_post;
unsigned disabled_planes;
/* Sleepable operations to perform after commit */
@@ -527,9 +539,7 @@ struct intel_crtc {
uint32_t cursor_size;
uint32_t cursor_base;
- struct intel_initial_plane_config plane_config;
struct intel_crtc_state *config;
- bool new_enabled;
/* reset counter value when the last flip was submitted */
unsigned int reset_counter;
@@ -544,14 +554,19 @@ struct intel_crtc {
struct intel_pipe_wm active;
/* SKL wm values currently in use */
struct skl_pipe_wm skl_active;
+ /* allow CxSR on this pipe */
+ bool cxsr_allowed;
} wm;
int scanline_offset;
+ unsigned start_vbl_count;
struct intel_crtc_atomic_commit atomic;
/* scalers available on this crtc */
int num_scalers;
+
+ struct vlv_wm_state wm_state;
};
struct intel_plane_wm_parameters {
@@ -570,6 +585,7 @@ struct intel_plane_wm_parameters {
bool scaled;
u64 tiling;
unsigned int rotation;
+ uint16_t fifo_size;
};
struct intel_plane {
@@ -578,9 +594,7 @@ struct intel_plane {
enum pipe pipe;
bool can_scale;
int max_downscale;
-
- /* FIXME convert to properties */
- struct drm_intel_sprite_colorkey ckey;
+ uint32_t frontbuffer_bit;
/* Since we need to change the watermarks before/after
* enabling/disabling the planes, we need to store the parameters here
@@ -603,8 +617,9 @@ struct intel_plane {
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h);
void (*disable_plane)(struct drm_plane *plane,
- struct drm_crtc *crtc, bool force);
+ struct drm_crtc *crtc);
int (*check_plane)(struct drm_plane *plane,
+ struct intel_crtc_state *crtc_state,
struct intel_plane_state *state);
void (*commit_plane)(struct drm_plane *plane,
struct intel_plane_state *state);
@@ -629,6 +644,7 @@ struct cxsr_latency {
unsigned long cursor_hpll_disable;
};
+#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
@@ -940,43 +956,23 @@ void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
-void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
- enum port port, int type);
+uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
/* intel_frontbuffer.c */
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
enum fb_op_origin origin);
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_device *dev,
unsigned frontbuffer_bits);
-void intel_frontbuffer_flush(struct drm_device *dev,
- unsigned frontbuffer_bits);
-/**
- * intel_frontbuffer_flip - synchronous frontbuffer flip
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after scheduling a flip on @obj. This is for
- * synchronous plane updates which will happen on the next vblank and which will
- * not get delayed by pending gpu rendering.
- *
- * Can be called without any locks held.
- */
-static inline
void intel_frontbuffer_flip(struct drm_device *dev,
- unsigned frontbuffer_bits)
-{
- intel_frontbuffer_flush(dev, frontbuffer_bits);
-}
-
+ unsigned frontbuffer_bits);
unsigned int intel_fb_align_height(struct drm_device *dev,
unsigned int height,
uint32_t pixel_format,
uint64_t fb_format_modifier);
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
-
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
+ enum fb_op_origin origin);
u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
uint32_t pixel_format);
@@ -994,15 +990,11 @@ int intel_pch_rawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
-void intel_crtc_control(struct drm_crtc *crtc, bool enable);
-void intel_crtc_reset(struct intel_crtc *crtc);
-void intel_crtc_update_dpms(struct drm_crtc *crtc);
+int intel_display_suspend(struct drm_device *dev);
void intel_encoder_destroy(struct drm_encoder *encoder);
int intel_connector_init(struct intel_connector *);
struct intel_connector *intel_connector_alloc(void);
-void intel_connector_dpms(struct drm_connector *, int mode);
bool intel_connector_get_hw_state(struct intel_connector *connector);
-void intel_modeset_check_state(struct drm_device *dev);
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port);
void intel_connector_attach_encoder(struct intel_connector *connector,
@@ -1035,7 +1027,8 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
const struct drm_plane_state *plane_state,
- struct intel_engine_cs *pipelined);
+ struct intel_engine_cs *pipelined,
+ struct drm_i915_gem_request **pipelined_request);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1058,6 +1051,8 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val);
+int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state);
unsigned int
intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
@@ -1072,9 +1067,6 @@ intel_rotation_90_or_270(unsigned int rotation)
void intel_create_rotation_property(struct drm_device *dev,
struct intel_plane *plane);
-bool intel_wm_need_update(struct drm_plane *plane,
- struct drm_plane_state *state);
-
/* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv,
@@ -1084,7 +1076,6 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *state);
-void intel_put_shared_dpll(struct intel_crtc *crtc);
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
const struct dpll *dpll);
@@ -1104,7 +1095,8 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
+ int *x, int *y,
unsigned int tiling_mode,
unsigned int bpp,
unsigned int pitch);
@@ -1114,7 +1106,6 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void broxton_init_cdclk(struct drm_device *dev);
void broxton_uninit_cdclk(struct drm_device *dev);
-void broxton_set_cdclk(struct drm_device *dev, int frequency);
void broxton_ddi_phy_init(struct drm_device *dev);
void broxton_ddi_phy_uninit(struct drm_device *dev);
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
@@ -1130,6 +1121,8 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
int dotclock);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
intel_clock_t *best_clock);
+int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock);
+
bool intel_crtc_active(struct drm_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
@@ -1139,10 +1132,8 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
-void skl_detach_scalers(struct intel_crtc *intel_crtc);
-int skl_update_scaler_users(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state, struct intel_plane *intel_plane,
- struct intel_plane_state *plane_state, int force_detach);
+
+int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
@@ -1194,6 +1185,7 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp);
void intel_edp_drrs_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
+void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
@@ -1207,7 +1199,7 @@ void intel_dvo_init(struct drm_device *dev);
/* legacy fbdev emulation in intel_fbdev.c */
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie);
extern void intel_fbdev_fini(struct drm_device *dev);
@@ -1238,15 +1230,18 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
#endif
/* intel_fbc.c */
-bool intel_fbc_enabled(struct drm_device *dev);
-void intel_fbc_update(struct drm_device *dev);
+bool intel_fbc_enabled(struct drm_i915_private *dev_priv);
+void intel_fbc_update(struct drm_i915_private *dev_priv);
void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_disable(struct drm_device *dev);
+void intel_fbc_disable(struct drm_i915_private *dev_priv);
+void intel_fbc_disable_crtc(struct intel_crtc *crtc);
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits,
enum fb_op_origin origin);
void intel_fbc_flush(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits);
+ unsigned int frontbuffer_bits, enum fb_op_origin origin);
+const char *intel_no_fbc_reason_str(enum no_fbc_reason reason);
+void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
/* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
@@ -1314,11 +1309,13 @@ void intel_backlight_unregister(struct drm_device *dev);
void intel_psr_enable(struct intel_dp *intel_dp);
void intel_psr_disable(struct intel_dp *intel_dp);
void intel_psr_invalidate(struct drm_device *dev,
- unsigned frontbuffer_bits);
+ unsigned frontbuffer_bits);
void intel_psr_flush(struct drm_device *dev,
- unsigned frontbuffer_bits);
+ unsigned frontbuffer_bits,
+ enum fb_op_origin origin);
void intel_psr_init(struct drm_device *dev);
-void intel_psr_single_frame_update(struct drm_device *dev);
+void intel_psr_single_frame_update(struct drm_device *dev,
+ unsigned frontbuffer_bits);
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);
@@ -1372,11 +1369,12 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
unsigned long submitted);
void intel_queue_rps_boost_for_request(struct drm_device *dev,
struct drm_i915_gem_request *req);
+void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
-
+uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
/* intel_sdvo.c */
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
@@ -1384,10 +1382,9 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
/* intel_sprite.c */
int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
-int intel_plane_restore(struct drm_plane *plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-bool intel_pipe_update_start(struct intel_crtc *crtc,
+void intel_pipe_update_start(struct intel_crtc *crtc,
uint32_t *start_vbl_count);
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
@@ -1395,11 +1392,6 @@ void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
void intel_tv_init(struct drm_device *dev);
/* intel_atomic.c */
-int intel_atomic_check(struct drm_device *dev,
- struct drm_atomic_state *state);
-int intel_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool async);
int intel_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
@@ -1407,6 +1399,11 @@ int intel_connector_atomic_get_property(struct drm_connector *connector,
struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
void intel_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
+struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
+void intel_atomic_state_clear(struct drm_atomic_state *);
+struct intel_shared_dpll_config *
+intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s);
+
static inline struct intel_crtc_state *
intel_atomic_get_crtc_state(struct drm_atomic_state *state,
struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index b5a5558ecd63..32a6c7184ca4 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -31,6 +31,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_mipi_dsi.h>
#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
@@ -261,11 +262,6 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
}
-static void intel_dsi_hot_plug(struct intel_encoder *encoder)
-{
- DRM_DEBUG_KMS("\n");
-}
-
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
@@ -401,6 +397,8 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
intel_dsi_port_enable(encoder);
}
+
+ intel_panel_enable_backlight(intel_dsi->attached_connector);
}
static void intel_dsi_pre_enable(struct intel_encoder *encoder)
@@ -415,15 +413,21 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
+ /* Panel Enable over CRC PMIC */
+ if (intel_dsi->gpio_panel)
+ gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
+
+ msleep(intel_dsi->panel_on_delay);
+
/* Disable DPOunit clock gating, can stall pipe
* and we need DPLL REFA always enabled */
tmp = I915_READ(DPLL(pipe));
- tmp |= DPLL_REFA_CLK_ENABLE_VLV;
+ tmp |= DPLL_REF_CLK_ENABLE_VLV;
I915_WRITE(DPLL(pipe), tmp);
/* update the hw state for DPLL */
- intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
- DPLL_REFA_CLK_ENABLE_VLV;
+ intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
tmp = I915_READ(DSPCLK_GATE_D);
tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
@@ -432,8 +436,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
/* put device in ready state */
intel_dsi_device_ready(encoder);
- msleep(intel_dsi->panel_on_delay);
-
drm_panel_prepare(intel_dsi->panel);
for_each_dsi_port(port, intel_dsi->ports)
@@ -461,6 +463,8 @@ static void intel_dsi_pre_disable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
+ intel_panel_disable_backlight(intel_dsi->attached_connector);
+
if (is_vid_mode(intel_dsi)) {
/* Send Shutdown command to the panel in LP mode */
for_each_dsi_port(port, intel_dsi->ports)
@@ -576,6 +580,10 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
msleep(intel_dsi->panel_off_delay);
msleep(intel_dsi->panel_pwr_cycle_delay);
+
+ /* Panel Disable over CRC PMIC */
+ if (intel_dsi->gpio_panel)
+ gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
}
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
@@ -955,6 +963,11 @@ static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
/* XXX: Logically this call belongs in the panel driver. */
drm_panel_remove(intel_dsi->panel);
}
+
+ /* dispose of the gpios */
+ if (intel_dsi->gpio_panel)
+ gpiod_put(intel_dsi->gpio_panel);
+
intel_encoder_destroy(encoder);
}
@@ -969,7 +982,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
};
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
- .dpms = intel_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_dsi_detect,
.destroy = intel_dsi_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -1022,7 +1035,6 @@ void intel_dsi_init(struct drm_device *dev)
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
/* XXX: very likely not all of these are needed */
- intel_encoder->hot_plug = intel_dsi_hot_plug;
intel_encoder->compute_config = intel_dsi_compute_config;
intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
intel_encoder->pre_enable = intel_dsi_pre_enable;
@@ -1036,11 +1048,7 @@ void intel_dsi_init(struct drm_device *dev)
intel_connector->unregister = intel_connector_unregister;
/* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */
- if (dev_priv->vbt.dsi.config->dual_link) {
- /* XXX: does dual link work on either pipe? */
- intel_encoder->crtc_mask = (1 << PIPE_A);
- intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
- } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
+ if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
intel_encoder->crtc_mask = (1 << PIPE_A);
intel_dsi->ports = (1 << PORT_A);
} else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) {
@@ -1048,6 +1056,9 @@ void intel_dsi_init(struct drm_device *dev)
intel_dsi->ports = (1 << PORT_C);
}
+ if (dev_priv->vbt.dsi.config->dual_link)
+ intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
+
/* Create a DSI host (and a device) for each port. */
for_each_dsi_port(port, intel_dsi->ports) {
struct intel_dsi_host *host;
@@ -1071,6 +1082,20 @@ void intel_dsi_init(struct drm_device *dev)
goto err;
}
+ /*
+ * In case of BYT with CRC PMIC, we need to use GPIO for
+ * Panel control.
+ */
+ if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
+ intel_dsi->gpio_panel =
+ gpiod_get(dev->dev, "panel", GPIOD_OUT_HIGH);
+
+ if (IS_ERR(intel_dsi->gpio_panel)) {
+ DRM_ERROR("Failed to own gpio for panel control\n");
+ intel_dsi->gpio_panel = NULL;
+ }
+ }
+
intel_encoder->type = INTEL_OUTPUT_DSI;
intel_encoder->cloneable = 0;
drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
@@ -1104,6 +1129,7 @@ void intel_dsi_init(struct drm_device *dev)
}
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+ intel_panel_setup_backlight(connector, INVALID_PIPE);
return;
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 2784ac442368..42a68593e32a 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -42,6 +42,9 @@ struct intel_dsi {
struct drm_panel *panel;
struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
+ /* GPIO Desc for CRC based Panel control */
+ struct gpio_desc *gpio_panel;
+
struct intel_connector *attached_connector;
/* bit mask of ports being driven */
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index d20cf37b6901..c6a8975b128f 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -38,6 +38,27 @@
#define DSI_HFP_PACKET_EXTRA_SIZE 6
#define DSI_EOTP_PACKET_SIZE 4
+static int dsi_pixel_format_bpp(int pixel_format)
+{
+ int bpp;
+
+ switch (pixel_format) {
+ default:
+ case VID_MODE_FORMAT_RGB888:
+ case VID_MODE_FORMAT_RGB666_LOOSE:
+ bpp = 24;
+ break;
+ case VID_MODE_FORMAT_RGB666:
+ bpp = 18;
+ break;
+ case VID_MODE_FORMAT_RGB565:
+ bpp = 16;
+ break;
+ }
+
+ return bpp;
+}
+
struct dsi_mnp {
u32 dsi_pll_ctrl;
u32 dsi_pll_div;
@@ -46,8 +67,8 @@ struct dsi_mnp {
static const u32 lfsr_converts[] = {
426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
- 106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
- 71, 35 /* 91 - 92 */
+ 106, 53, 282, 397, 454, 227, 113, 56, 284, 142, /* 81 - 90 */
+ 71, 35, 273, 136, 324, 418, 465, 488, 500, 506 /* 91 - 100 */
};
#ifdef DSI_CLK_FROM_RR
@@ -65,19 +86,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
u32 dsi_bit_clock_hz;
u32 dsi_clk;
- switch (pixel_format) {
- default:
- case VID_MODE_FORMAT_RGB888:
- case VID_MODE_FORMAT_RGB666_LOOSE:
- bpp = 24;
- break;
- case VID_MODE_FORMAT_RGB666:
- bpp = 18;
- break;
- case VID_MODE_FORMAT_RGB565:
- bpp = 16;
- break;
- }
+ bpp = dsi_pixel_format_bpp(pixel_format);
hactive = mode->hdisplay;
vactive = mode->vdisplay;
@@ -137,21 +146,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
{
u32 dsi_clk_khz;
- u32 bpp;
-
- switch (pixel_format) {
- default:
- case VID_MODE_FORMAT_RGB888:
- case VID_MODE_FORMAT_RGB666_LOOSE:
- bpp = 24;
- break;
- case VID_MODE_FORMAT_RGB666:
- bpp = 18;
- break;
- case VID_MODE_FORMAT_RGB565:
- bpp = 16;
- break;
- }
+ u32 bpp = dsi_pixel_format_bpp(pixel_format);
/* DSI data rate = pixel clock * bits per pixel / lane count
pixel clock is converted from KHz to Hz */
@@ -162,11 +157,13 @@ static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
#endif
-static int dsi_calc_mnp(int target_dsi_clk, struct dsi_mnp *dsi_mnp)
+static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
+ struct dsi_mnp *dsi_mnp, int target_dsi_clk)
{
unsigned int calc_m = 0, calc_p = 0;
- unsigned int m, n = 1, p;
- int ref_clk = 25000;
+ unsigned int m_min, m_max, p_min = 2, p_max = 6;
+ unsigned int m, n, p;
+ int ref_clk;
int delta = target_dsi_clk;
u32 m_seed;
@@ -176,8 +173,20 @@ static int dsi_calc_mnp(int target_dsi_clk, struct dsi_mnp *dsi_mnp)
return -ECHRNG;
}
- for (m = 62; m <= 92 && delta; m++) {
- for (p = 2; p <= 6 && delta; p++) {
+ if (IS_CHERRYVIEW(dev_priv)) {
+ ref_clk = 100000;
+ n = 4;
+ m_min = 70;
+ m_max = 96;
+ } else {
+ ref_clk = 25000;
+ n = 1;
+ m_min = 62;
+ m_max = 92;
+ }
+
+ for (m = m_min; m <= m_max && delta; m++) {
+ for (p = p_min; p <= p_max && delta; p++) {
/*
* Find the optimal m and p divisors with minimal delta
* +/- the required clock
@@ -217,7 +226,7 @@ static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
- ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
+ ret = dsi_calc_mnp(dev_priv, &dsi_mnp, dsi_clk);
if (ret) {
DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
return;
@@ -286,21 +295,7 @@ void vlv_disable_dsi_pll(struct intel_encoder *encoder)
static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
{
- int bpp;
-
- switch (pixel_format) {
- default:
- case VID_MODE_FORMAT_RGB888:
- case VID_MODE_FORMAT_RGB666_LOOSE:
- bpp = 24;
- break;
- case VID_MODE_FORMAT_RGB666:
- bpp = 18;
- break;
- case VID_MODE_FORMAT_RGB565:
- bpp = 16;
- break;
- }
+ int bpp = dsi_pixel_format_bpp(pixel_format);
WARN(bpp != pipe_bpp,
"bpp match assertion failure (expected %d, current %d)\n",
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ece5bd754f85..dc532bb61d22 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -196,50 +196,6 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
-/* Special dpms function to support cloning between dvo/sdvo/crt. */
-static void intel_dvo_dpms(struct drm_connector *connector, int mode)
-{
- struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
- struct drm_crtc *crtc;
- struct intel_crtc_state *config;
-
- /* dvo supports only 2 dpms states. */
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
-
- if (mode == connector->dpms)
- return;
-
- connector->dpms = mode;
-
- /* Only need to change hw state when actually enabled */
- crtc = intel_dvo->base.base.crtc;
- if (!crtc) {
- intel_dvo->base.connectors_active = false;
- return;
- }
-
- /* We call connector dpms manually below in case pipe dpms doesn't
- * change due to cloning. */
- if (mode == DRM_MODE_DPMS_ON) {
- config = to_intel_crtc(crtc)->config;
-
- intel_dvo->base.connectors_active = true;
-
- intel_crtc_update_dpms(crtc);
-
- intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
- } else {
- intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
-
- intel_dvo->base.connectors_active = false;
-
- intel_crtc_update_dpms(crtc);
- }
-
- intel_modeset_check_state(connector->dev);
-}
-
static enum drm_mode_status
intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -387,7 +343,7 @@ static void intel_dvo_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
- .dpms = intel_dvo_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_dvo_detect,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 6abb83432d4d..1f97fb548c2a 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -41,9 +41,8 @@
#include "intel_drv.h"
#include "i915_drv.h"
-static void i8xx_fbc_disable(struct drm_device *dev)
+static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 fbc_ctl;
dev_priv->fbc.enabled = false;
@@ -65,13 +64,11 @@ static void i8xx_fbc_disable(struct drm_device *dev)
DRM_DEBUG_KMS("disabled FBC\n");
}
-static void i8xx_fbc_enable(struct drm_crtc *crtc)
+static void i8xx_fbc_enable(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int cfb_pitch;
int i;
u32 fbc_ctl;
@@ -84,7 +81,7 @@ static void i8xx_fbc_enable(struct drm_crtc *crtc)
cfb_pitch = fb->pitches[0];
/* FBC_CTL wants 32B or 64B units */
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
cfb_pitch = (cfb_pitch / 32) - 1;
else
cfb_pitch = (cfb_pitch / 64) - 1;
@@ -93,66 +90,61 @@ static void i8xx_fbc_enable(struct drm_crtc *crtc)
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
I915_WRITE(FBC_TAG + (i * 4), 0);
- if (IS_GEN4(dev)) {
+ if (IS_GEN4(dev_priv)) {
u32 fbc_ctl2;
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
- fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
+ fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
- I915_WRITE(FBC_FENCE_OFF, crtc->y);
+ I915_WRITE(FBC_FENCE_OFF, crtc->base.y);
}
/* enable it... */
fbc_ctl = I915_READ(FBC_CONTROL);
fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
- if (IS_I945GM(dev))
+ if (IS_I945GM(dev_priv))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
- cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
+ cfb_pitch, crtc->base.y, plane_name(crtc->plane));
}
-static bool i8xx_fbc_enabled(struct drm_device *dev)
+static bool i8xx_fbc_enabled(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}
-static void g4x_fbc_enable(struct drm_crtc *crtc)
+static void g4x_fbc_enable(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
dev_priv->fbc.enabled = true;
- dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
+ dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
- I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+ I915_WRITE(DPFC_FENCE_YOFF, crtc->base.y);
/* enable it... */
I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
- DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
}
-static void g4x_fbc_disable(struct drm_device *dev)
+static void g4x_fbc_disable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
dev_priv->fbc.enabled = false;
@@ -167,10 +159,8 @@ static void g4x_fbc_disable(struct drm_device *dev)
}
}
-static bool g4x_fbc_enabled(struct drm_device *dev)
+static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
@@ -180,22 +170,21 @@ static void intel_fbc_nuke(struct drm_i915_private *dev_priv)
POSTING_READ(MSG_FBC_REND_STATE);
}
-static void ilk_fbc_enable(struct drm_crtc *crtc)
+static void ilk_fbc_enable(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
+ int threshold = dev_priv->fbc.threshold;
dev_priv->fbc.enabled = true;
- dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
+ dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
- dev_priv->fbc.threshold++;
+ threshold++;
- switch (dev_priv->fbc.threshold) {
+ switch (threshold) {
case 4:
case 3:
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
@@ -208,28 +197,27 @@ static void ilk_fbc_enable(struct drm_crtc *crtc)
break;
}
dpfc_ctl |= DPFC_CTL_FENCE_EN;
- if (IS_GEN5(dev))
+ if (IS_GEN5(dev_priv))
dpfc_ctl |= obj->fence_reg;
- I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+ I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->base.y);
I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev_priv)) {
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y);
}
intel_fbc_nuke(dev_priv);
- DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
}
-static void ilk_fbc_disable(struct drm_device *dev)
+static void ilk_fbc_disable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
dev_priv->fbc.enabled = false;
@@ -244,29 +232,29 @@ static void ilk_fbc_disable(struct drm_device *dev)
}
}
-static bool ilk_fbc_enabled(struct drm_device *dev)
+static bool ilk_fbc_enabled(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
-static void gen7_fbc_enable(struct drm_crtc *crtc)
+static void gen7_fbc_enable(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
+ int threshold = dev_priv->fbc.threshold;
dev_priv->fbc.enabled = true;
- dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
+ dpfc_ctl = 0;
+ if (IS_IVYBRIDGE(dev_priv))
+ dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
+
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
- dev_priv->fbc.threshold++;
+ threshold++;
- switch (dev_priv->fbc.threshold) {
+ switch (threshold) {
case 4:
case 3:
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
@@ -286,39 +274,37 @@ static void gen7_fbc_enable(struct drm_crtc *crtc)
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_IVYBRIDGE(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
} else {
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
- I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
+ I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
+ I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
HSW_FBCQ_DIS);
}
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y);
intel_fbc_nuke(dev_priv);
- DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
}
/**
* intel_fbc_enabled - Is FBC enabled?
- * @dev: the drm_device
+ * @dev_priv: i915 device instance
*
* This function is used to verify the current state of FBC.
* FIXME: This should be tracked in the plane config eventually
* instead of queried at runtime for most callers.
*/
-bool intel_fbc_enabled(struct drm_device *dev)
+bool intel_fbc_enabled(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
return dev_priv->fbc.enabled;
}
@@ -327,31 +313,33 @@ static void intel_fbc_work_fn(struct work_struct *__work)
struct intel_fbc_work *work =
container_of(to_delayed_work(__work),
struct intel_fbc_work, work);
- struct drm_device *dev = work->crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = work->crtc->base.dev->dev_private;
+ struct drm_framebuffer *crtc_fb = work->crtc->base.primary->fb;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->fbc.lock);
if (work == dev_priv->fbc.fbc_work) {
/* Double check that we haven't switched fb without cancelling
* the prior work.
*/
- if (work->crtc->primary->fb == work->fb) {
- dev_priv->display.enable_fbc(work->crtc);
+ if (crtc_fb == work->fb) {
+ dev_priv->fbc.enable_fbc(work->crtc);
- dev_priv->fbc.crtc = to_intel_crtc(work->crtc);
- dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
- dev_priv->fbc.y = work->crtc->y;
+ dev_priv->fbc.crtc = work->crtc;
+ dev_priv->fbc.fb_id = crtc_fb->base.id;
+ dev_priv->fbc.y = work->crtc->base.y;
}
dev_priv->fbc.fbc_work = NULL;
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->fbc.lock);
kfree(work);
}
static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
{
+ WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+
if (dev_priv->fbc.fbc_work == NULL)
return;
@@ -373,26 +361,24 @@ static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
dev_priv->fbc.fbc_work = NULL;
}
-static void intel_fbc_enable(struct drm_crtc *crtc)
+static void intel_fbc_enable(struct intel_crtc *crtc)
{
struct intel_fbc_work *work;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- if (!dev_priv->display.enable_fbc)
- return;
+ WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
intel_fbc_cancel_work(dev_priv);
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL) {
DRM_ERROR("Failed to allocate FBC work structure\n");
- dev_priv->display.enable_fbc(crtc);
+ dev_priv->fbc.enable_fbc(crtc);
return;
}
work->crtc = crtc;
- work->fb = crtc->primary->fb;
+ work->fb = crtc->base.primary->fb;
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
dev_priv->fbc.fbc_work = work;
@@ -413,75 +399,274 @@ static void intel_fbc_enable(struct drm_crtc *crtc)
schedule_delayed_work(&work->work, msecs_to_jiffies(50));
}
+static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
+{
+ WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+
+ intel_fbc_cancel_work(dev_priv);
+
+ dev_priv->fbc.disable_fbc(dev_priv);
+ dev_priv->fbc.crtc = NULL;
+}
+
/**
* intel_fbc_disable - disable FBC
- * @dev: the drm_device
+ * @dev_priv: i915 device instance
*
* This function disables FBC.
*/
-void intel_fbc_disable(struct drm_device *dev)
+void intel_fbc_disable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ if (!dev_priv->fbc.enable_fbc)
+ return;
- intel_fbc_cancel_work(dev_priv);
+ mutex_lock(&dev_priv->fbc.lock);
+ __intel_fbc_disable(dev_priv);
+ mutex_unlock(&dev_priv->fbc.lock);
+}
+
+/*
+ * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
+ * @crtc: the CRTC
+ *
+ * This function disables FBC if it's associated with the provided CRTC.
+ */
+void intel_fbc_disable_crtc(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- if (!dev_priv->display.disable_fbc)
+ if (!dev_priv->fbc.enable_fbc)
return;
- dev_priv->display.disable_fbc(dev);
- dev_priv->fbc.crtc = NULL;
+ mutex_lock(&dev_priv->fbc.lock);
+ if (dev_priv->fbc.crtc == crtc)
+ __intel_fbc_disable(dev_priv);
+ mutex_unlock(&dev_priv->fbc.lock);
}
-static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
+const char *intel_no_fbc_reason_str(enum no_fbc_reason reason)
+{
+ switch (reason) {
+ case FBC_OK:
+ return "FBC enabled but currently disabled in hardware";
+ case FBC_UNSUPPORTED:
+ return "unsupported by this chipset";
+ case FBC_NO_OUTPUT:
+ return "no output";
+ case FBC_STOLEN_TOO_SMALL:
+ return "not enough stolen memory";
+ case FBC_UNSUPPORTED_MODE:
+ return "mode incompatible with compression";
+ case FBC_MODE_TOO_LARGE:
+ return "mode too large for compression";
+ case FBC_BAD_PLANE:
+ return "FBC unsupported on plane";
+ case FBC_NOT_TILED:
+ return "framebuffer not tiled or fenced";
+ case FBC_MULTIPLE_PIPES:
+ return "more than one pipe active";
+ case FBC_MODULE_PARAM:
+ return "disabled per module param";
+ case FBC_CHIP_DEFAULT:
+ return "disabled per chip default";
+ case FBC_ROTATION:
+ return "rotation unsupported";
+ case FBC_IN_DBG_MASTER:
+ return "Kernel debugger is active";
+ default:
+ MISSING_CASE(reason);
+ return "unknown reason";
+ }
+}
+
+static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
enum no_fbc_reason reason)
{
if (dev_priv->fbc.no_fbc_reason == reason)
- return false;
+ return;
dev_priv->fbc.no_fbc_reason = reason;
- return true;
+ DRM_DEBUG_KMS("Disabling FBC: %s\n", intel_no_fbc_reason_str(reason));
}
static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
{
struct drm_crtc *crtc = NULL, *tmp_crtc;
enum pipe pipe;
- bool pipe_a_only = false, one_pipe_only = false;
+ bool pipe_a_only = false;
if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
pipe_a_only = true;
- else if (INTEL_INFO(dev_priv)->gen <= 4)
- one_pipe_only = true;
for_each_pipe(dev_priv, pipe) {
tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
if (intel_crtc_active(tmp_crtc) &&
- to_intel_plane_state(tmp_crtc->primary->state)->visible) {
- if (one_pipe_only && crtc) {
- if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
- DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
- return NULL;
- }
+ to_intel_plane_state(tmp_crtc->primary->state)->visible)
crtc = tmp_crtc;
- }
if (pipe_a_only)
break;
}
- if (!crtc || crtc->primary->fb == NULL) {
- if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
- DRM_DEBUG_KMS("no output, disabling\n");
+ if (!crtc || crtc->primary->fb == NULL)
return NULL;
- }
return crtc;
}
+static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
+{
+ enum pipe pipe;
+ int n_pipes = 0;
+ struct drm_crtc *crtc;
+
+ if (INTEL_INFO(dev_priv)->gen > 4)
+ return true;
+
+ for_each_pipe(dev_priv, pipe) {
+ crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+ if (intel_crtc_active(crtc) &&
+ to_intel_plane_state(crtc->primary->state)->visible)
+ n_pipes++;
+ }
+
+ return (n_pipes < 2);
+}
+
+static int find_compression_threshold(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *node,
+ int size,
+ int fb_cpp)
+{
+ int compression_threshold = 1;
+ int ret;
+
+ /* HACK: This code depends on what we will do in *_enable_fbc. If that
+ * code changes, this code needs to change as well.
+ *
+ * The enable_fbc code will attempt to use one of our 2 compression
+ * thresholds, therefore, in that case, we only have 1 resort.
+ */
+
+ /* Try to over-allocate to reduce reallocations and fragmentation. */
+ ret = i915_gem_stolen_insert_node(dev_priv, node, size <<= 1, 4096);
+ if (ret == 0)
+ return compression_threshold;
+
+again:
+ /* HW's ability to limit the CFB is 1:4 */
+ if (compression_threshold > 4 ||
+ (fb_cpp == 2 && compression_threshold == 2))
+ return 0;
+
+ ret = i915_gem_stolen_insert_node(dev_priv, node, size >>= 1, 4096);
+ if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
+ return 0;
+ } else if (ret) {
+ compression_threshold <<= 1;
+ goto again;
+ } else {
+ return compression_threshold;
+ }
+}
+
+static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
+ int fb_cpp)
+{
+ struct drm_mm_node *uninitialized_var(compressed_llb);
+ int ret;
+
+ ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
+ size, fb_cpp);
+ if (!ret)
+ goto err_llb;
+ else if (ret > 1) {
+ DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
+
+ }
+
+ dev_priv->fbc.threshold = ret;
+
+ if (INTEL_INFO(dev_priv)->gen >= 5)
+ I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
+ else if (IS_GM45(dev_priv)) {
+ I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
+ } else {
+ compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
+ if (!compressed_llb)
+ goto err_fb;
+
+ ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
+ 4096, 4096);
+ if (ret)
+ goto err_fb;
+
+ dev_priv->fbc.compressed_llb = compressed_llb;
+
+ I915_WRITE(FBC_CFB_BASE,
+ dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
+ I915_WRITE(FBC_LL_BASE,
+ dev_priv->mm.stolen_base + compressed_llb->start);
+ }
+
+ dev_priv->fbc.uncompressed_size = size;
+
+ DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
+ size);
+
+ return 0;
+
+err_fb:
+ kfree(compressed_llb);
+ i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
+err_llb:
+ pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
+ return -ENOSPC;
+}
+
+static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->fbc.uncompressed_size == 0)
+ return;
+
+ i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
+
+ if (dev_priv->fbc.compressed_llb) {
+ i915_gem_stolen_remove_node(dev_priv,
+ dev_priv->fbc.compressed_llb);
+ kfree(dev_priv->fbc.compressed_llb);
+ }
+
+ dev_priv->fbc.uncompressed_size = 0;
+}
+
+void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->fbc.enable_fbc)
+ return;
+
+ mutex_lock(&dev_priv->fbc.lock);
+ __intel_fbc_cleanup_cfb(dev_priv);
+ mutex_unlock(&dev_priv->fbc.lock);
+}
+
+static int intel_fbc_setup_cfb(struct drm_i915_private *dev_priv, int size,
+ int fb_cpp)
+{
+ if (size <= dev_priv->fbc.uncompressed_size)
+ return 0;
+
+ /* Release any current block */
+ __intel_fbc_cleanup_cfb(dev_priv);
+
+ return intel_fbc_alloc_cfb(dev_priv, size, fb_cpp);
+}
+
/**
- * intel_fbc_update - enable/disable FBC as needed
- * @dev: the drm_device
+ * __intel_fbc_update - enable/disable FBC as needed, unlocked
+ * @dev_priv: i915 device instance
*
* Set up the framebuffer compression hardware at mode set time. We
* enable it if possible:
@@ -498,9 +683,8 @@ static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
*
* We need to enable/disable FBC on a global basis.
*/
-void intel_fbc_update(struct drm_device *dev)
+static void __intel_fbc_update(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = NULL;
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
@@ -508,22 +692,19 @@ void intel_fbc_update(struct drm_device *dev)
const struct drm_display_mode *adjusted_mode;
unsigned int max_width, max_height;
- if (!HAS_FBC(dev))
- return;
+ WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
/* disable framebuffer compression in vGPU */
- if (intel_vgpu_active(dev))
+ if (intel_vgpu_active(dev_priv->dev))
i915.enable_fbc = 0;
if (i915.enable_fbc < 0) {
- if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
- DRM_DEBUG_KMS("disabled per chip default\n");
+ set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT);
goto out_disable;
}
if (!i915.enable_fbc) {
- if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
- DRM_DEBUG_KMS("fbc disabled per module param\n");
+ set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM);
goto out_disable;
}
@@ -537,8 +718,15 @@ void intel_fbc_update(struct drm_device *dev)
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
crtc = intel_fbc_find_crtc(dev_priv);
- if (!crtc)
+ if (!crtc) {
+ set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT);
goto out_disable;
+ }
+
+ if (!multiple_pipes_ok(dev_priv)) {
+ set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES);
+ goto out_disable;
+ }
intel_crtc = to_intel_crtc(crtc);
fb = crtc->primary->fb;
@@ -547,16 +735,14 @@ void intel_fbc_update(struct drm_device *dev)
if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
- if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
- DRM_DEBUG_KMS("mode incompatible with compression, "
- "disabling\n");
+ set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE);
goto out_disable;
}
- if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
+ if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
max_width = 4096;
max_height = 4096;
- } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
max_width = 4096;
max_height = 2048;
} else {
@@ -565,14 +751,12 @@ void intel_fbc_update(struct drm_device *dev)
}
if (intel_crtc->config->pipe_src_w > max_width ||
intel_crtc->config->pipe_src_h > max_height) {
- if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
- DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+ set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE);
goto out_disable;
}
- if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
+ if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
intel_crtc->plane != PLANE_A) {
- if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
- DRM_DEBUG_KMS("plane not A, disabling compression\n");
+ set_no_fbc_reason(dev_priv, FBC_BAD_PLANE);
goto out_disable;
}
@@ -581,25 +765,24 @@ void intel_fbc_update(struct drm_device *dev)
*/
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
- if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
- DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
+ set_no_fbc_reason(dev_priv, FBC_NOT_TILED);
goto out_disable;
}
- if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+ if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) {
- if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
- DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
+ set_no_fbc_reason(dev_priv, FBC_ROTATION);
goto out_disable;
}
/* If the kernel debugger is active, always disable compression */
- if (in_dbg_master())
+ if (in_dbg_master()) {
+ set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER);
goto out_disable;
+ }
- if (i915_gem_stolen_setup_compression(dev, obj->base.size,
- drm_format_plane_cpp(fb->pixel_format, 0))) {
- if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
- DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
+ if (intel_fbc_setup_cfb(dev_priv, obj->base.size,
+ drm_format_plane_cpp(fb->pixel_format, 0))) {
+ set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL);
goto out_disable;
}
@@ -613,7 +796,7 @@ void intel_fbc_update(struct drm_device *dev)
dev_priv->fbc.y == crtc->y)
return;
- if (intel_fbc_enabled(dev)) {
+ if (intel_fbc_enabled(dev_priv)) {
/* We update FBC along two paths, after changing fb/crtc
* configuration (modeswitching) and after page-flipping
* finishes. For the latter, we know that not only did
@@ -638,58 +821,87 @@ void intel_fbc_update(struct drm_device *dev)
* some point. And we wait before enabling FBC anyway.
*/
DRM_DEBUG_KMS("disabling active FBC for update\n");
- intel_fbc_disable(dev);
+ __intel_fbc_disable(dev_priv);
}
- intel_fbc_enable(crtc);
+ intel_fbc_enable(intel_crtc);
dev_priv->fbc.no_fbc_reason = FBC_OK;
return;
out_disable:
/* Multiple disables should be harmless */
- if (intel_fbc_enabled(dev)) {
+ if (intel_fbc_enabled(dev_priv)) {
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
- intel_fbc_disable(dev);
+ __intel_fbc_disable(dev_priv);
}
- i915_gem_stolen_cleanup_compression(dev);
+ __intel_fbc_cleanup_cfb(dev_priv);
+}
+
+/*
+ * intel_fbc_update - enable/disable FBC as needed
+ * @dev_priv: i915 device instance
+ *
+ * This function reevaluates the overall state and enables or disables FBC.
+ */
+void intel_fbc_update(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->fbc.enable_fbc)
+ return;
+
+ mutex_lock(&dev_priv->fbc.lock);
+ __intel_fbc_update(dev_priv);
+ mutex_unlock(&dev_priv->fbc.lock);
}
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
- struct drm_device *dev = dev_priv->dev;
unsigned int fbc_bits;
+ if (!dev_priv->fbc.enable_fbc)
+ return;
+
if (origin == ORIGIN_GTT)
return;
+ mutex_lock(&dev_priv->fbc.lock);
+
if (dev_priv->fbc.enabled)
fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
else if (dev_priv->fbc.fbc_work)
fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
- to_intel_crtc(dev_priv->fbc.fbc_work->crtc)->pipe);
+ dev_priv->fbc.fbc_work->crtc->pipe);
else
fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
if (dev_priv->fbc.busy_bits)
- intel_fbc_disable(dev);
+ __intel_fbc_disable(dev_priv);
+
+ mutex_unlock(&dev_priv->fbc.lock);
}
void intel_fbc_flush(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits)
+ unsigned int frontbuffer_bits, enum fb_op_origin origin)
{
- struct drm_device *dev = dev_priv->dev;
+ if (!dev_priv->fbc.enable_fbc)
+ return;
- if (!dev_priv->fbc.busy_bits)
+ if (origin == ORIGIN_GTT)
return;
+ mutex_lock(&dev_priv->fbc.lock);
+
dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
- if (!dev_priv->fbc.busy_bits)
- intel_fbc_update(dev);
+ if (!dev_priv->fbc.busy_bits) {
+ __intel_fbc_disable(dev_priv);
+ __intel_fbc_update(dev_priv);
+ }
+
+ mutex_unlock(&dev_priv->fbc.lock);
}
/**
@@ -702,6 +914,8 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
+ mutex_init(&dev_priv->fbc.lock);
+
if (!HAS_FBC(dev_priv)) {
dev_priv->fbc.enabled = false;
dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
@@ -717,25 +931,25 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
}
if (INTEL_INFO(dev_priv)->gen >= 7) {
- dev_priv->display.fbc_enabled = ilk_fbc_enabled;
- dev_priv->display.enable_fbc = gen7_fbc_enable;
- dev_priv->display.disable_fbc = ilk_fbc_disable;
+ dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
+ dev_priv->fbc.enable_fbc = gen7_fbc_enable;
+ dev_priv->fbc.disable_fbc = ilk_fbc_disable;
} else if (INTEL_INFO(dev_priv)->gen >= 5) {
- dev_priv->display.fbc_enabled = ilk_fbc_enabled;
- dev_priv->display.enable_fbc = ilk_fbc_enable;
- dev_priv->display.disable_fbc = ilk_fbc_disable;
+ dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
+ dev_priv->fbc.enable_fbc = ilk_fbc_enable;
+ dev_priv->fbc.disable_fbc = ilk_fbc_disable;
} else if (IS_GM45(dev_priv)) {
- dev_priv->display.fbc_enabled = g4x_fbc_enabled;
- dev_priv->display.enable_fbc = g4x_fbc_enable;
- dev_priv->display.disable_fbc = g4x_fbc_disable;
+ dev_priv->fbc.fbc_enabled = g4x_fbc_enabled;
+ dev_priv->fbc.enable_fbc = g4x_fbc_enable;
+ dev_priv->fbc.disable_fbc = g4x_fbc_disable;
} else {
- dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
- dev_priv->display.enable_fbc = i8xx_fbc_enable;
- dev_priv->display.disable_fbc = i8xx_fbc_disable;
+ dev_priv->fbc.fbc_enabled = i8xx_fbc_enabled;
+ dev_priv->fbc.enable_fbc = i8xx_fbc_enable;
+ dev_priv->fbc.disable_fbc = i8xx_fbc_disable;
/* This value was pulled out of someone's hat */
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
}
- dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
+ dev_priv->fbc.enabled = dev_priv->fbc.fbc_enabled(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 6372cfc7d053..8c6a6fa46005 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -55,16 +55,8 @@ static int intel_fbdev_set_par(struct fb_info *info)
ret = drm_fb_helper_set_par(info);
if (ret == 0) {
- /*
- * FIXME: fbdev presumes that all callbacks also work from
- * atomic contexts and relies on that for emergency oops
- * printing. KMS totally doesn't do that and the locking here is
- * by far not the only place this goes wrong. Ignore this for
- * now until we solve this for real.
- */
mutex_lock(&fb_helper->dev->struct_mutex);
- ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj,
- true);
+ intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
mutex_unlock(&fb_helper->dev->struct_mutex);
}
@@ -81,15 +73,8 @@ static int intel_fbdev_blank(int blank, struct fb_info *info)
ret = drm_fb_helper_blank(blank, info);
if (ret == 0) {
- /*
- * FIXME: fbdev presumes that all callbacks also work from
- * atomic contexts and relies on that for emergency oops
- * printing. KMS totally doesn't do that and the locking here is
- * by far not the only place this goes wrong. Ignore this for
- * now until we solve this for real.
- */
mutex_lock(&fb_helper->dev->struct_mutex);
- intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT);
+ intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
mutex_unlock(&fb_helper->dev->struct_mutex);
}
@@ -107,15 +92,8 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
ret = drm_fb_helper_pan_display(var, info);
if (ret == 0) {
- /*
- * FIXME: fbdev presumes that all callbacks also work from
- * atomic contexts and relies on that for emergency oops
- * printing. KMS totally doesn't do that and the locking here is
- * by far not the only place this goes wrong. Ignore this for
- * now until we solve this for real.
- */
mutex_lock(&fb_helper->dev->struct_mutex);
- intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT);
+ intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
mutex_unlock(&fb_helper->dev->struct_mutex);
}
@@ -126,9 +104,9 @@ static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = intel_fbdev_set_par,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = intel_fbdev_pan_display,
.fb_blank = intel_fbdev_blank,
.fb_setcmap = drm_fb_helper_setcmap,
@@ -177,7 +155,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
}
/* Flush everything out, we'll be doing GTT only from now on */
- ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL);
+ ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL, NULL);
if (ret) {
DRM_ERROR("failed to pin obj: %d\n", ret);
goto out_fb;
@@ -237,9 +215,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
obj = intel_fb->obj;
size = obj->base.size;
- info = framebuffer_alloc(0, &dev->pdev->dev);
- if (!info) {
- ret = -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto out_unpin;
}
@@ -248,24 +226,13 @@ static int intelfb_create(struct drm_fb_helper *helper,
fb = &ifbdev->fb->base;
ifbdev->helper.fb = fb;
- ifbdev->helper.fbdev = info;
strcpy(info->fix.id, "inteldrmfb");
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &intelfb_ops;
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto out_unpin;
- }
/* setup aperture base/size for vesafb takeover */
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto out_unpin;
- }
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
@@ -277,7 +244,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
size);
if (!info->screen_base) {
ret = -ENOSPC;
- goto out_unpin;
+ goto out_destroy_fbi;
}
info->screen_size = size;
@@ -304,6 +271,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
+out_destroy_fbi:
+ drm_fb_helper_release_fbi(helper);
out_unpin:
i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
@@ -484,18 +453,13 @@ retry:
* IMPORTANT: We want to use the adjusted mode (i.e.
* after the panel fitter upscaling) as the initial
* config, not the input mode, which is what crtc->mode
- * usually contains. But since our current fastboot
+ * usually contains. But since our current
* code puts a mode derived from the post-pfit timings
- * into crtc->mode this works out correctly. We don't
- * use hwmode anywhere right now, so use it for this
- * since the fb helper layer wants a pointer to
- * something we own.
+ * into crtc->mode this works out correctly.
*/
DRM_DEBUG_KMS("looking for current mode on connector %s\n",
connector->name);
- intel_mode_from_pipe_config(&encoder->crtc->hwmode,
- to_intel_crtc(encoder->crtc)->config);
- modes[i] = &encoder->crtc->hwmode;
+ modes[i] = &encoder->crtc->mode;
}
crtcs[i] = new_crtc;
@@ -550,16 +514,9 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
static void intel_fbdev_destroy(struct drm_device *dev,
struct intel_fbdev *ifbdev)
{
- if (ifbdev->helper.fbdev) {
- struct fb_info *info = ifbdev->helper.fbdev;
- unregister_framebuffer(info);
- iounmap(info->screen_base);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
-
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&ifbdev->helper);
+ drm_fb_helper_release_fbi(&ifbdev->helper);
drm_fb_helper_fini(&ifbdev->helper);
@@ -582,7 +539,6 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
struct intel_framebuffer *fb = NULL;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
- struct intel_initial_plane_config *plane_config = NULL;
unsigned int max_size = 0;
if (!i915.fastboot)
@@ -590,20 +546,21 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
/* Find the largest fb */
for_each_crtc(dev, crtc) {
+ struct drm_i915_gem_object *obj =
+ intel_fb_obj(crtc->primary->state->fb);
intel_crtc = to_intel_crtc(crtc);
- if (!intel_crtc->active || !crtc->primary->fb) {
+ if (!intel_crtc->active || !obj) {
DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
pipe_name(intel_crtc->pipe));
continue;
}
- if (intel_crtc->plane_config.size > max_size) {
+ if (obj->base.size > max_size) {
DRM_DEBUG_KMS("found possible fb from plane %c\n",
pipe_name(intel_crtc->pipe));
- plane_config = &intel_crtc->plane_config;
- fb = to_intel_framebuffer(crtc->primary->fb);
- max_size = plane_config->size;
+ fb = to_intel_framebuffer(crtc->primary->state->fb);
+ max_size = obj->base.size;
}
}
@@ -638,7 +595,6 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
pipe_name(intel_crtc->pipe),
cur_size, fb->base.pitches[0]);
- plane_config = NULL;
fb = NULL;
break;
}
@@ -659,7 +615,6 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
pipe_name(intel_crtc->pipe),
cur_size, max_size);
- plane_config = NULL;
fb = NULL;
break;
}
@@ -810,7 +765,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen)
memset_io(info->screen_base, 0, info->screen_size);
- fb_set_suspend(info, state);
+ drm_fb_helper_set_suspend(&ifbdev->helper, state);
console_unlock();
}
@@ -825,11 +780,20 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
{
int ret;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_fbdev *ifbdev = dev_priv->fbdev;
+ struct drm_fb_helper *fb_helper;
- if (!dev_priv->fbdev)
+ if (!ifbdev)
return;
- ret = drm_fb_helper_restore_fbdev_mode_unlocked(&dev_priv->fbdev->helper);
- if (ret)
+ fb_helper = &ifbdev->helper;
+
+ ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+ if (ret) {
DRM_DEBUG("failed to restore crtc mode\n");
+ } else {
+ mutex_lock(&fb_helper->dev->struct_mutex);
+ intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
+ mutex_unlock(&fb_helper->dev->struct_mutex);
+ }
}
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 57095f54c1f2..ac85357010b4 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -65,84 +65,29 @@
#include "intel_drv.h"
#include "i915_drv.h"
-static void intel_increase_pllclock(struct drm_device *dev,
- enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int dpll_reg = DPLL(pipe);
- int dpll;
-
- if (!HAS_GMCH_DISPLAY(dev))
- return;
-
- if (!dev_priv->lvds_downclock_avail)
- return;
-
- dpll = I915_READ(dpll_reg);
- if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
- DRM_DEBUG_DRIVER("upclocking LVDS\n");
-
- assert_panel_unlocked(dev_priv, pipe);
-
- dpll &= ~DISPLAY_RATE_SELECT_FPA1;
- I915_WRITE(dpll_reg, dpll);
- intel_wait_for_vblank(dev, pipe);
-
- dpll = I915_READ(dpll_reg);
- if (dpll & DISPLAY_RATE_SELECT_FPA1)
- DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
- }
-}
-
-/**
- * intel_mark_fb_busy - mark given planes as busy
- * @dev: DRM device
- * @frontbuffer_bits: bits for the affected planes
- * @ring: optional ring for asynchronous commands
- *
- * This function gets called every time the screen contents change. It can be
- * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
- */
-static void intel_mark_fb_busy(struct drm_device *dev,
- unsigned frontbuffer_bits,
- struct intel_engine_cs *ring)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe) {
- if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
- continue;
-
- intel_increase_pllclock(dev, pipe);
- }
-}
-
/**
* intel_fb_obj_invalidate - invalidate frontbuffer object
* @obj: GEM object to invalidate
- * @ring: set for asynchronous rendering
* @origin: which operation caused the invalidation
*
* This function gets called every time rendering on the given object starts and
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
- * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
+ * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
* until the rendering completes or a flip on this frontbuffer plane is
* scheduled.
*/
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring,
enum fb_op_origin origin)
{
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
if (!obj->frontbuffer_bits)
return;
- if (ring) {
+ if (origin == ORIGIN_CS) {
mutex_lock(&dev_priv->fb_tracking.lock);
dev_priv->fb_tracking.busy_bits
|= obj->frontbuffer_bits;
@@ -151,8 +96,6 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
mutex_unlock(&dev_priv->fb_tracking.lock);
}
- intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
-
intel_psr_invalidate(dev, obj->frontbuffer_bits);
intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
@@ -162,6 +105,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
* intel_frontbuffer_flush - flush frontbuffer
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
*
* This function gets called every time rendering on the given planes has
* completed and frontbuffer caching can be started again. Flushes will get
@@ -169,37 +113,40 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flush(struct drm_device *dev,
- unsigned frontbuffer_bits)
+static void intel_frontbuffer_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits,
+ enum fb_op_origin origin)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* Delay flushing when rings are still busy.*/
mutex_lock(&dev_priv->fb_tracking.lock);
frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
- intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
+ if (!frontbuffer_bits)
+ return;
intel_edp_drrs_flush(dev, frontbuffer_bits);
- intel_psr_flush(dev, frontbuffer_bits);
- intel_fbc_flush(dev_priv, frontbuffer_bits);
+ intel_psr_flush(dev, frontbuffer_bits, origin);
+ intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
}
/**
* intel_fb_obj_flush - flush frontbuffer object
* @obj: GEM object to flush
* @retire: set when retiring asynchronous rendering
+ * @origin: which operation caused the flush
*
* This function gets called every time rendering on the given object has
* completed and frontbuffer caching can be started again. If @retire is true
* then any delayed flushes will be unblocked.
*/
void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- bool retire)
+ bool retire, enum fb_op_origin origin)
{
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned frontbuffer_bits;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -218,7 +165,7 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
mutex_unlock(&dev_priv->fb_tracking.lock);
}
- intel_frontbuffer_flush(dev, frontbuffer_bits);
+ intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
}
/**
@@ -236,7 +183,7 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
unsigned frontbuffer_bits)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
mutex_lock(&dev_priv->fb_tracking.lock);
dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
@@ -244,7 +191,7 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
- intel_psr_single_frame_update(dev);
+ intel_psr_single_frame_update(dev, frontbuffer_bits);
}
/**
@@ -260,7 +207,7 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
void intel_frontbuffer_flip_complete(struct drm_device *dev,
unsigned frontbuffer_bits)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
mutex_lock(&dev_priv->fb_tracking.lock);
/* Mask any cancelled flips. */
@@ -268,5 +215,29 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
- intel_frontbuffer_flush(dev, frontbuffer_bits);
+ intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+}
+
+/**
+ * intel_frontbuffer_flip - synchronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. This is for
+ * synchronous plane updates which will happen on the next vblank and which will
+ * not get delayed by pending gpu rendering.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ /* Remove stale busy bits due to the old buffer. */
+ dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+
+ intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
}
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
new file mode 100644
index 000000000000..18d7f20936c8
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef _INTEL_GUC_FWIF_H
+#define _INTEL_GUC_FWIF_H
+
+/*
+ * This file is partially autogenerated, although currently with some manual
+ * fixups afterwards. In future, it should be entirely autogenerated, in order
+ * to ensure that the definitions herein remain in sync with those used by the
+ * GuC's own firmware.
+ *
+ * EDITING THIS FILE IS THEREFORE NOT RECOMMENDED - YOUR CHANGES MAY BE LOST.
+ */
+
+#define GFXCORE_FAMILY_GEN8 11
+#define GFXCORE_FAMILY_GEN9 12
+#define GFXCORE_FAMILY_FORCE_ULONG 0x7fffffff
+
+#define GUC_CTX_PRIORITY_CRITICAL 0
+#define GUC_CTX_PRIORITY_HIGH 1
+#define GUC_CTX_PRIORITY_NORMAL 2
+#define GUC_CTX_PRIORITY_LOW 3
+
+#define GUC_MAX_GPU_CONTEXTS 1024
+#define GUC_INVALID_CTX_ID (GUC_MAX_GPU_CONTEXTS + 1)
+
+/* Work queue item header definitions */
+#define WQ_STATUS_ACTIVE 1
+#define WQ_STATUS_SUSPENDED 2
+#define WQ_STATUS_CMD_ERROR 3
+#define WQ_STATUS_ENGINE_ID_NOT_USED 4
+#define WQ_STATUS_SUSPENDED_FROM_RESET 5
+#define WQ_TYPE_SHIFT 0
+#define WQ_TYPE_BATCH_BUF (0x1 << WQ_TYPE_SHIFT)
+#define WQ_TYPE_PSEUDO (0x2 << WQ_TYPE_SHIFT)
+#define WQ_TYPE_INORDER (0x3 << WQ_TYPE_SHIFT)
+#define WQ_TARGET_SHIFT 10
+#define WQ_LEN_SHIFT 16
+#define WQ_NO_WCFLUSH_WAIT (1 << 27)
+#define WQ_PRESENT_WORKLOAD (1 << 28)
+#define WQ_WORKLOAD_SHIFT 29
+#define WQ_WORKLOAD_GENERAL (0 << WQ_WORKLOAD_SHIFT)
+#define WQ_WORKLOAD_GPGPU (1 << WQ_WORKLOAD_SHIFT)
+#define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT)
+
+#define WQ_RING_TAIL_SHIFT 20
+#define WQ_RING_TAIL_MASK (0x7FF << WQ_RING_TAIL_SHIFT)
+
+#define GUC_DOORBELL_ENABLED 1
+#define GUC_DOORBELL_DISABLED 0
+
+#define GUC_CTX_DESC_ATTR_ACTIVE (1 << 0)
+#define GUC_CTX_DESC_ATTR_PENDING_DB (1 << 1)
+#define GUC_CTX_DESC_ATTR_KERNEL (1 << 2)
+#define GUC_CTX_DESC_ATTR_PREEMPT (1 << 3)
+#define GUC_CTX_DESC_ATTR_RESET (1 << 4)
+#define GUC_CTX_DESC_ATTR_WQLOCKED (1 << 5)
+#define GUC_CTX_DESC_ATTR_PCH (1 << 6)
+
+/* The guc control data is 10 DWORDs */
+#define GUC_CTL_CTXINFO 0
+#define GUC_CTL_CTXNUM_IN16_SHIFT 0
+#define GUC_CTL_BASE_ADDR_SHIFT 12
+#define GUC_CTL_ARAT_HIGH 1
+#define GUC_CTL_ARAT_LOW 2
+#define GUC_CTL_DEVICE_INFO 3
+#define GUC_CTL_GTTYPE_SHIFT 0
+#define GUC_CTL_COREFAMILY_SHIFT 7
+#define GUC_CTL_LOG_PARAMS 4
+#define GUC_LOG_VALID (1 << 0)
+#define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1)
+#define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3)
+#define GUC_LOG_CRASH_PAGES 1
+#define GUC_LOG_CRASH_SHIFT 4
+#define GUC_LOG_DPC_PAGES 3
+#define GUC_LOG_DPC_SHIFT 6
+#define GUC_LOG_ISR_PAGES 3
+#define GUC_LOG_ISR_SHIFT 9
+#define GUC_LOG_BUF_ADDR_SHIFT 12
+#define GUC_CTL_PAGE_FAULT_CONTROL 5
+#define GUC_CTL_WA 6
+#define GUC_CTL_WA_UK_BY_DRIVER (1 << 3)
+#define GUC_CTL_FEATURE 7
+#define GUC_CTL_VCS2_ENABLED (1 << 0)
+#define GUC_CTL_KERNEL_SUBMISSIONS (1 << 1)
+#define GUC_CTL_FEATURE2 (1 << 2)
+#define GUC_CTL_POWER_GATING (1 << 3)
+#define GUC_CTL_DISABLE_SCHEDULER (1 << 4)
+#define GUC_CTL_PREEMPTION_LOG (1 << 5)
+#define GUC_CTL_ENABLE_SLPC (1 << 7)
+#define GUC_CTL_DEBUG 8
+#define GUC_LOG_VERBOSITY_SHIFT 0
+#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT)
+#define GUC_LOG_VERBOSITY_MED (1 << GUC_LOG_VERBOSITY_SHIFT)
+#define GUC_LOG_VERBOSITY_HIGH (2 << GUC_LOG_VERBOSITY_SHIFT)
+#define GUC_LOG_VERBOSITY_ULTRA (3 << GUC_LOG_VERBOSITY_SHIFT)
+/* Verbosity range-check limits, without the shift */
+#define GUC_LOG_VERBOSITY_MIN 0
+#define GUC_LOG_VERBOSITY_MAX 3
+
+#define GUC_CTL_MAX_DWORDS (GUC_CTL_DEBUG + 1)
+
+struct guc_doorbell_info {
+ u32 db_status;
+ u32 cookie;
+ u32 reserved[14];
+} __packed;
+
+union guc_doorbell_qw {
+ struct {
+ u32 db_status;
+ u32 cookie;
+ };
+ u64 value_qw;
+} __packed;
+
+#define GUC_MAX_DOORBELLS 256
+#define GUC_INVALID_DOORBELL_ID (GUC_MAX_DOORBELLS)
+
+#define GUC_DB_SIZE (PAGE_SIZE)
+#define GUC_WQ_SIZE (PAGE_SIZE * 2)
+
+/* Work item for submitting workloads into work queue of GuC. */
+struct guc_wq_item {
+ u32 header;
+ u32 context_desc;
+ u32 ring_tail;
+ u32 fence_id;
+} __packed;
+
+struct guc_process_desc {
+ u32 context_id;
+ u64 db_base_addr;
+ u32 head;
+ u32 tail;
+ u32 error_offset;
+ u64 wq_base_addr;
+ u32 wq_size_bytes;
+ u32 wq_status;
+ u32 engine_presence;
+ u32 priority;
+ u32 reserved[30];
+} __packed;
+
+/* engine id and context id is packed into guc_execlist_context.context_id*/
+#define GUC_ELC_CTXID_OFFSET 0
+#define GUC_ELC_ENGINE_OFFSET 29
+
+/* The execlist context including software and HW information */
+struct guc_execlist_context {
+ u32 context_desc;
+ u32 context_id;
+ u32 ring_status;
+ u32 ring_lcra;
+ u32 ring_begin;
+ u32 ring_end;
+ u32 ring_next_free_location;
+ u32 ring_current_tail_pointer_value;
+ u8 engine_state_submit_value;
+ u8 engine_state_wait_value;
+ u16 pagefault_count;
+ u16 engine_submit_queue_count;
+} __packed;
+
+/*Context descriptor for communicating between uKernel and Driver*/
+struct guc_context_desc {
+ u32 sched_common_area;
+ u32 context_id;
+ u32 pas_id;
+ u8 engines_used;
+ u64 db_trigger_cpu;
+ u32 db_trigger_uk;
+ u64 db_trigger_phy;
+ u16 db_id;
+
+ struct guc_execlist_context lrc[I915_NUM_RINGS];
+
+ u8 attribute;
+
+ u32 priority;
+
+ u32 wq_sampled_tail_offset;
+ u32 wq_total_submit_enqueues;
+
+ u32 process_desc;
+ u32 wq_addr;
+ u32 wq_size;
+
+ u32 engine_presence;
+
+ u32 reserved0[1];
+ u64 reserved1[1];
+
+ u64 desc_private;
+} __packed;
+
+/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
+enum host2guc_action {
+ HOST2GUC_ACTION_DEFAULT = 0x0,
+ HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
+ HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
+ HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
+ HOST2GUC_ACTION_SLPC_REQUEST = 0x3003,
+ HOST2GUC_ACTION_LIMIT
+};
+
+/*
+ * The GuC sends its response to a command by overwriting the
+ * command in SS0. The response is distinguishable from a command
+ * by the fact that all the MASK bits are set. The remaining bits
+ * give more detail.
+ */
+#define GUC2HOST_RESPONSE_MASK ((u32)0xF0000000)
+#define GUC2HOST_IS_RESPONSE(x) ((u32)(x) >= GUC2HOST_RESPONSE_MASK)
+#define GUC2HOST_STATUS(x) (GUC2HOST_RESPONSE_MASK | (x))
+
+/* GUC will return status back to SOFT_SCRATCH_O_REG */
+enum guc2host_status {
+ GUC2HOST_STATUS_SUCCESS = GUC2HOST_STATUS(0x0),
+ GUC2HOST_STATUS_ALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x10),
+ GUC2HOST_STATUS_DEALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x20),
+ GUC2HOST_STATUS_GENERIC_FAIL = GUC2HOST_STATUS(0x0000F000)
+};
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index e97731aab6dc..dcd336bcdfe7 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -174,10 +174,14 @@ static bool g4x_infoframe_enabled(struct drm_encoder *encoder)
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
u32 val = I915_READ(VIDEO_DIP_CTL);
- if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
- return val & VIDEO_DIP_ENABLE;
+ if ((val & VIDEO_DIP_ENABLE) == 0)
+ return false;
- return false;
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+ return false;
+
+ return val & (VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
}
static void ibx_write_infoframe(struct drm_encoder *encoder,
@@ -227,10 +231,15 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
- return val & VIDEO_DIP_ENABLE;
+ if ((val & VIDEO_DIP_ENABLE) == 0)
+ return false;
- return false;
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+ return false;
+
+ return val & (VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
static void cpt_write_infoframe(struct drm_encoder *encoder,
@@ -282,7 +291,12 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- return val & VIDEO_DIP_ENABLE;
+ if ((val & VIDEO_DIP_ENABLE) == 0)
+ return false;
+
+ return val & (VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
static void vlv_write_infoframe(struct drm_encoder *encoder,
@@ -332,10 +346,15 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
- return val & VIDEO_DIP_ENABLE;
+ if ((val & VIDEO_DIP_ENABLE) == 0)
+ return false;
- return false;
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+ return false;
+
+ return val & (VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
static void hsw_write_infoframe(struct drm_encoder *encoder,
@@ -383,8 +402,9 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
u32 val = I915_READ(ctl_reg);
- return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
- VIDEO_DIP_ENABLE_VS_HSW);
+ return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
+ VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
+ VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
}
/*
@@ -514,7 +534,13 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
- val &= ~VIDEO_DIP_ENABLE;
+ if (port != (val & VIDEO_DIP_PORT_MASK)) {
+ DRM_DEBUG_KMS("video DIP still enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
+ return;
+ }
+ val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
@@ -522,16 +548,17 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
if (port != (val & VIDEO_DIP_PORT_MASK)) {
if (val & VIDEO_DIP_ENABLE) {
- val &= ~VIDEO_DIP_ENABLE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ DRM_DEBUG_KMS("video DIP already enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
+ return;
}
val &= ~VIDEO_DIP_PORT_MASK;
val |= port;
}
val |= VIDEO_DIP_ENABLE;
- val &= ~VIDEO_DIP_ENABLE_VENDOR;
+ val &= ~(VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
I915_WRITE(reg, val);
POSTING_READ(reg);
@@ -541,6 +568,97 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
+static bool hdmi_sink_is_deep_color(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector;
+
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+ /*
+ * HDMI cloning is only supported on g4x which doesn't
+ * support deep color or GCP infoframes anyway so no
+ * need to worry about multiple HDMI sinks here.
+ */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ return connector->display_info.bpc > 8;
+
+ return false;
+}
+
+/*
+ * Determine if default_phase=1 can be indicated in the GCP infoframe.
+ *
+ * From HDMI specification 1.4a:
+ * - The first pixel of each Video Data Period shall always have a pixel packing phase of 0
+ * - The first pixel following each Video Data Period shall have a pixel packing phase of 0
+ * - The PP bits shall be constant for all GCPs and will be equal to the last packing phase
+ * - The first pixel following every transition of HSYNC or VSYNC shall have a pixel packing
+ * phase of 0
+ */
+static bool gcp_default_phase_possible(int pipe_bpp,
+ const struct drm_display_mode *mode)
+{
+ unsigned int pixels_per_group;
+
+ switch (pipe_bpp) {
+ case 30:
+ /* 4 pixels in 5 clocks */
+ pixels_per_group = 4;
+ break;
+ case 36:
+ /* 2 pixels in 3 clocks */
+ pixels_per_group = 2;
+ break;
+ case 48:
+ /* 1 pixel in 2 clocks */
+ pixels_per_group = 1;
+ break;
+ default:
+ /* phase information not relevant for 8bpc */
+ return false;
+ }
+
+ return mode->crtc_hdisplay % pixels_per_group == 0 &&
+ mode->crtc_htotal % pixels_per_group == 0 &&
+ mode->crtc_hblank_start % pixels_per_group == 0 &&
+ mode->crtc_hblank_end % pixels_per_group == 0 &&
+ mode->crtc_hsync_start % pixels_per_group == 0 &&
+ mode->crtc_hsync_end % pixels_per_group == 0 &&
+ ((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0 ||
+ mode->crtc_htotal/2 % pixels_per_group == 0);
+}
+
+static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+ u32 reg, val = 0;
+
+ if (HAS_DDI(dev_priv))
+ reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
+ else if (IS_VALLEYVIEW(dev_priv))
+ reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
+ else if (HAS_PCH_SPLIT(dev_priv->dev))
+ reg = TVIDEO_DIP_GCP(crtc->pipe);
+ else
+ return false;
+
+ /* Indicate color depth whenever the sink supports deep color */
+ if (hdmi_sink_is_deep_color(encoder))
+ val |= GCP_COLOR_INDICATION;
+
+ /* Enable default_phase whenever the display mode is suitably aligned */
+ if (gcp_default_phase_possible(crtc->config->pipe_bpp,
+ &crtc->config->base.adjusted_mode))
+ val |= GCP_DEFAULT_PHASE_ENABLE;
+
+ I915_WRITE(reg, val);
+
+ return val != 0;
+}
+
static void ibx_set_infoframes(struct drm_encoder *encoder,
bool enable,
struct drm_display_mode *adjusted_mode)
@@ -561,25 +679,29 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
- val &= ~VIDEO_DIP_ENABLE;
+ val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- if (val & VIDEO_DIP_ENABLE) {
- val &= ~VIDEO_DIP_ENABLE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
- }
+ WARN(val & VIDEO_DIP_ENABLE,
+ "DIP already enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
val &= ~VIDEO_DIP_PORT_MASK;
val |= port;
}
val |= VIDEO_DIP_ENABLE;
- val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
- VIDEO_DIP_ENABLE_GCP);
+ val &= ~(VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+ if (intel_hdmi_set_gcp_infoframe(encoder))
+ val |= VIDEO_DIP_ENABLE_GCP;
I915_WRITE(reg, val);
POSTING_READ(reg);
@@ -607,7 +729,9 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
- val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
+ val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
@@ -616,7 +740,10 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
/* Set both together, unset both together: see the spec. */
val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI;
val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
- VIDEO_DIP_ENABLE_GCP);
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+ if (intel_hdmi_set_gcp_infoframe(encoder))
+ val |= VIDEO_DIP_ENABLE_GCP;
I915_WRITE(reg, val);
POSTING_READ(reg);
@@ -646,25 +773,29 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
- val &= ~VIDEO_DIP_ENABLE;
+ val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- if (val & VIDEO_DIP_ENABLE) {
- val &= ~VIDEO_DIP_ENABLE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
- }
+ WARN(val & VIDEO_DIP_ENABLE,
+ "DIP already enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
val &= ~VIDEO_DIP_PORT_MASK;
val |= port;
}
val |= VIDEO_DIP_ENABLE;
- val &= ~(VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR |
- VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_GCP);
+ val &= ~(VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+ if (intel_hdmi_set_gcp_infoframe(encoder))
+ val |= VIDEO_DIP_ENABLE_GCP;
I915_WRITE(reg, val);
POSTING_READ(reg);
@@ -686,14 +817,18 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
assert_hdmi_port_disabled(intel_hdmi);
+ val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
+ VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
+ VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
+
if (!enable) {
- I915_WRITE(reg, 0);
+ I915_WRITE(reg, val);
POSTING_READ(reg);
return;
}
- val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
- VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW);
+ if (intel_hdmi_set_gcp_infoframe(encoder))
+ val |= VIDEO_DIP_ENABLE_GCP_HSW;
I915_WRITE(reg, val);
POSTING_READ(reg);
@@ -808,58 +943,146 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
else
dotclock = pipe_config->port_clock;
+ if (pipe_config->pixel_multiplier)
+ dotclock /= pipe_config->pixel_multiplier;
+
if (HAS_PCH_SPLIT(dev_priv->dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
-static void intel_enable_hdmi(struct intel_encoder *encoder)
+static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
+{
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+ WARN_ON(!crtc->config->has_hdmi_sink);
+ DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
+ pipe_name(crtc->pipe));
+ intel_audio_codec_enable(encoder);
+}
+
+static void g4x_enable_hdmi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
- u32 enable_bits = SDVO_ENABLE;
- if (intel_crtc->config->has_audio)
- enable_bits |= SDVO_AUDIO_ENABLE;
+ temp = I915_READ(intel_hdmi->hdmi_reg);
+
+ temp |= SDVO_ENABLE;
+ if (crtc->config->has_audio)
+ temp |= SDVO_AUDIO_ENABLE;
+
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+
+ if (crtc->config->has_audio)
+ intel_enable_hdmi_audio(encoder);
+}
+
+static void ibx_enable_hdmi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ u32 temp;
temp = I915_READ(intel_hdmi->hdmi_reg);
- /* HW workaround for IBX, we need to move the port to transcoder A
- * before disabling it, so restore the transcoder select bit here. */
- if (HAS_PCH_IBX(dev))
- enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe);
+ temp |= SDVO_ENABLE;
+ if (crtc->config->has_audio)
+ temp |= SDVO_AUDIO_ENABLE;
- /* HW workaround, need to toggle enable bit off and on for 12bpc, but
- * we do this anyway which shows more stable in testing.
+ /*
+ * HW workaround, need to write this twice for issue
+ * that may result in first write getting masked.
*/
- if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+
+ /*
+ * HW workaround, need to toggle enable bit off and on
+ * for 12bpc with pixel repeat.
+ *
+ * FIXME: BSpec says this should be done at the end of
+ * of the modeset sequence, so not sure if this isn't too soon.
+ */
+ if (crtc->config->pipe_bpp > 24 &&
+ crtc->config->pixel_multiplier > 1) {
I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
POSTING_READ(intel_hdmi->hdmi_reg);
+
+ /*
+ * HW workaround, need to write this twice for issue
+ * that may result in first write getting masked.
+ */
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
}
- temp |= enable_bits;
+ if (crtc->config->has_audio)
+ intel_enable_hdmi_audio(encoder);
+}
+
+static void cpt_enable_hdmi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ enum pipe pipe = crtc->pipe;
+ u32 temp;
+
+ temp = I915_READ(intel_hdmi->hdmi_reg);
+
+ temp |= SDVO_ENABLE;
+ if (crtc->config->has_audio)
+ temp |= SDVO_AUDIO_ENABLE;
+
+ /*
+ * WaEnableHDMI8bpcBefore12bpc:snb,ivb
+ *
+ * The procedure for 12bpc is as follows:
+ * 1. disable HDMI clock gating
+ * 2. enable HDMI with 8bpc
+ * 3. enable HDMI with 12bpc
+ * 4. enable HDMI clock gating
+ */
+
+ if (crtc->config->pipe_bpp > 24) {
+ I915_WRITE(TRANS_CHICKEN1(pipe),
+ I915_READ(TRANS_CHICKEN1(pipe)) |
+ TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+
+ temp &= ~SDVO_COLOR_FORMAT_MASK;
+ temp |= SDVO_COLOR_FORMAT_8bpc;
+ }
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- /* HW workaround, need to write this twice for issue that may result
- * in first write getting masked.
- */
- if (HAS_PCH_SPLIT(dev)) {
+ if (crtc->config->pipe_bpp > 24) {
+ temp &= ~SDVO_COLOR_FORMAT_MASK;
+ temp |= HDMI_COLOR_FORMAT_12bpc;
+
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- }
- if (intel_crtc->config->has_audio) {
- WARN_ON(!intel_crtc->config->has_hdmi_sink);
- DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
- pipe_name(intel_crtc->pipe));
- intel_audio_codec_enable(encoder);
+ I915_WRITE(TRANS_CHICKEN1(pipe),
+ I915_READ(TRANS_CHICKEN1(pipe)) &
+ ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
}
+
+ if (crtc->config->has_audio)
+ intel_enable_hdmi_audio(encoder);
}
static void vlv_enable_hdmi(struct intel_encoder *encoder)
@@ -901,6 +1124,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
}
+
+ intel_hdmi->set_infoframes(&encoder->base, false, NULL);
}
static void g4x_disable_hdmi(struct intel_encoder *encoder)
@@ -926,7 +1151,7 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder)
intel_disable_hdmi(encoder);
}
-static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
+static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
@@ -939,24 +1164,51 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
}
static enum drm_mode_status
+hdmi_port_clock_valid(struct intel_hdmi *hdmi,
+ int clock, bool respect_dvi_limit)
+{
+ struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+
+ if (clock < 25000)
+ return MODE_CLOCK_LOW;
+ if (clock > hdmi_port_clock_limit(hdmi, respect_dvi_limit))
+ return MODE_CLOCK_HIGH;
+
+ /* BXT DPLL can't generate 223-240 MHz */
+ if (IS_BROXTON(dev) && clock > 223333 && clock < 240000)
+ return MODE_CLOCK_RANGE;
+
+ /* CHV DPLL can't generate 216-240 MHz */
+ if (IS_CHERRYVIEW(dev) && clock > 216000 && clock < 240000)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static enum drm_mode_status
intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- int clock = mode->clock;
+ struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
+ struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+ enum drm_mode_status status;
+ int clock;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ clock = mode->clock;
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
clock *= 2;
- if (clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
- true))
- return MODE_CLOCK_HIGH;
- if (clock < 20000)
- return MODE_CLOCK_LOW;
+ /* check if we can do 8bpc */
+ status = hdmi_port_clock_valid(hdmi, clock, true);
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
+ /* if we can't do 8bpc we may still be able to do 12bpc */
+ if (!HAS_GMCH_DISPLAY(dev) && status != MODE_OK)
+ status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true);
- return MODE_OK;
+ return status;
}
static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
@@ -997,8 +1249,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- int clock_12bpc = pipe_config->base.adjusted_mode.crtc_clock * 3 / 2;
- int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
+ int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
+ int clock_12bpc = clock_8bpc * 3 / 2;
int desired_bpp;
pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
@@ -1017,6 +1269,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
pipe_config->pixel_multiplier = 2;
+ clock_8bpc *= 2;
+ clock_12bpc *= 2;
}
if (intel_hdmi->color_range)
@@ -1035,9 +1289,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
* within limits.
*/
if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
- clock_12bpc <= portclock_limit &&
- hdmi_12bpc_possible(pipe_config) &&
- 0 /* FIXME 12bpc support totally broken */) {
+ hdmi_port_clock_valid(intel_hdmi, clock_12bpc, false) == MODE_OK &&
+ hdmi_12bpc_possible(pipe_config)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
@@ -1046,6 +1299,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
} else {
DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
desired_bpp = 8*3;
+
+ pipe_config->port_clock = clock_8bpc;
}
if (!pipe_config->bw_constrained) {
@@ -1053,8 +1308,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp = desired_bpp;
}
- if (adjusted_mode->crtc_clock > portclock_limit) {
- DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
+ if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock,
+ false) != MODE_OK) {
+ DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n");
return false;
}
@@ -1323,7 +1579,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
intel_crtc->config->has_hdmi_sink,
adjusted_mode);
- intel_enable_hdmi(encoder);
+ g4x_enable_hdmi(encoder);
vlv_wait_port_ready(dev_priv, dport, 0x0);
}
@@ -1640,7 +1896,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
intel_crtc->config->has_hdmi_sink,
adjusted_mode);
- intel_enable_hdmi(encoder);
+ g4x_enable_hdmi(encoder);
vlv_wait_port_ready(dev_priv, dport, 0x0);
}
@@ -1653,7 +1909,7 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
- .dpms = intel_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_hdmi_detect,
.force = intel_hdmi_force,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -1702,6 +1958,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
+ uint8_t alternate_ddc_pin;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
@@ -1735,6 +1992,26 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
intel_encoder->hpd_pin = HPD_PORT_D;
break;
+ case PORT_E:
+ /* On SKL PORT E doesn't have seperate GMBUS pin
+ * We rely on VBT to set a proper alternate GMBUS pin. */
+ alternate_ddc_pin =
+ dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
+ switch (alternate_ddc_pin) {
+ case DDC_PIN_B:
+ intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
+ break;
+ case DDC_PIN_C:
+ intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
+ break;
+ case DDC_PIN_D:
+ intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
+ break;
+ default:
+ MISSING_CASE(alternate_ddc_pin);
+ }
+ intel_encoder->hpd_pin = HPD_PORT_E;
+ break;
case PORT_A:
intel_encoder->hpd_pin = HPD_PORT_A;
/* Internal port only for eDP. */
@@ -1827,7 +2104,12 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
intel_encoder->post_disable = vlv_hdmi_post_disable;
} else {
intel_encoder->pre_enable = intel_hdmi_pre_enable;
- intel_encoder->enable = intel_enable_hdmi;
+ if (HAS_PCH_CPT(dev))
+ intel_encoder->enable = cpt_enable_hdmi;
+ else if (HAS_PCH_IBX(dev))
+ intel_encoder->enable = ibx_enable_hdmi;
+ else
+ intel_encoder->enable = g4x_enable_hdmi;
}
intel_encoder->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
new file mode 100644
index 000000000000..53c0173a39fe
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -0,0 +1,508 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/**
+ * DOC: Hotplug
+ *
+ * Simply put, hotplug occurs when a display is connected to or disconnected
+ * from the system. However, there may be adapters and docking stations and
+ * Display Port short pulses and MST devices involved, complicating matters.
+ *
+ * Hotplug in i915 is handled in many different levels of abstraction.
+ *
+ * The platform dependent interrupt handling code in i915_irq.c enables,
+ * disables, and does preliminary handling of the interrupts. The interrupt
+ * handlers gather the hotplug detect (HPD) information from relevant registers
+ * into a platform independent mask of hotplug pins that have fired.
+ *
+ * The platform independent interrupt handler intel_hpd_irq_handler() in
+ * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
+ * further processing to appropriate bottom halves (Display Port specific and
+ * regular hotplug).
+ *
+ * The Display Port work function i915_digport_work_func() calls into
+ * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
+ * pulses, with failures and non-MST long pulses triggering regular hotplug
+ * processing on the connector.
+ *
+ * The regular hotplug work function i915_hotplug_work_func() calls connector
+ * detect hooks, and, if connector status changes, triggers sending of hotplug
+ * uevent to userspace via drm_kms_helper_hotplug_event().
+ *
+ * Finally, the userspace is responsible for triggering a modeset upon receiving
+ * the hotplug uevent, disabling or enabling the crtc as needed.
+ *
+ * The hotplug interrupt storm detection and mitigation code keeps track of the
+ * number of interrupts per hotplug pin per a period of time, and if the number
+ * of interrupts exceeds a certain threshold, the interrupt is disabled for a
+ * while before being re-enabled. The intention is to mitigate issues raising
+ * from broken hardware triggering massive amounts of interrupts and grinding
+ * the system to a halt.
+ *
+ * Current implementation expects that hotplug interrupt storm will not be
+ * seen when display port sink is connected, hence on platforms whose DP
+ * callback is handled by i915_digport_work_func reenabling of hpd is not
+ * performed (it was never expected to be disabled in the first place ;) )
+ * this is specific to DP sinks handled by this routine and any other display
+ * such as HDMI or DVI enabled on the same port will have proper logic since
+ * it will use i915_hotplug_work_func where this logic is handled.
+ */
+
+bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port)
+{
+ switch (pin) {
+ case HPD_PORT_A:
+ *port = PORT_A;
+ return true;
+ case HPD_PORT_B:
+ *port = PORT_B;
+ return true;
+ case HPD_PORT_C:
+ *port = PORT_C;
+ return true;
+ case HPD_PORT_D:
+ *port = PORT_D;
+ return true;
+ case HPD_PORT_E:
+ *port = PORT_E;
+ return true;
+ default:
+ return false; /* no hpd */
+ }
+}
+
+#define HPD_STORM_DETECT_PERIOD 1000
+#define HPD_STORM_THRESHOLD 5
+#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
+
+/**
+ * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin
+ * @dev_priv: private driver data pointer
+ * @pin: the pin to gather stats on
+ *
+ * Gather stats about HPD irqs from the specified @pin, and detect irq
+ * storms. Only the pin specific stats and state are changed, the caller is
+ * responsible for further action.
+ *
+ * @HPD_STORM_THRESHOLD irqs are allowed within @HPD_STORM_DETECT_PERIOD ms,
+ * otherwise it's considered an irq storm, and the irq state is set to
+ * @HPD_MARK_DISABLED.
+ *
+ * Return true if an irq storm was detected on @pin.
+ */
+static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
+ enum hpd_pin pin)
+{
+ unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies;
+ unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
+ bool storm = false;
+
+ if (!time_in_range(jiffies, start, end)) {
+ dev_priv->hotplug.stats[pin].last_jiffies = jiffies;
+ dev_priv->hotplug.stats[pin].count = 0;
+ DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin);
+ } else if (dev_priv->hotplug.stats[pin].count > HPD_STORM_THRESHOLD) {
+ dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED;
+ DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
+ storm = true;
+ } else {
+ dev_priv->hotplug.stats[pin].count++;
+ DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
+ dev_priv->hotplug.stats[pin].count);
+ }
+
+ return storm;
+}
+
+static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_connector *intel_connector;
+ struct intel_encoder *intel_encoder;
+ struct drm_connector *connector;
+ enum hpd_pin pin;
+ bool hpd_disabled = false;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ if (connector->polled != DRM_CONNECTOR_POLL_HPD)
+ continue;
+
+ intel_connector = to_intel_connector(connector);
+ intel_encoder = intel_connector->encoder;
+ if (!intel_encoder)
+ continue;
+
+ pin = intel_encoder->hpd_pin;
+ if (pin == HPD_NONE ||
+ dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
+ continue;
+
+ DRM_INFO("HPD interrupt storm detected on connector %s: "
+ "switching from hotplug detection to polling\n",
+ connector->name);
+
+ dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT
+ | DRM_CONNECTOR_POLL_DISCONNECT;
+ hpd_disabled = true;
+ }
+
+ /* Enable polling and queue hotplug re-enabling. */
+ if (hpd_disabled) {
+ drm_kms_helper_poll_enable(dev);
+ mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
+ msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
+ }
+}
+
+static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv),
+ hotplug.reenable_work.work);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ int i;
+
+ intel_runtime_pm_get(dev_priv);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ for_each_hpd_pin(i) {
+ struct drm_connector *connector;
+
+ if (dev_priv->hotplug.stats[i].state != HPD_DISABLED)
+ continue;
+
+ dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ if (intel_connector->encoder->hpd_pin == i) {
+ if (connector->polled != intel_connector->polled)
+ DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
+ connector->name);
+ connector->polled = intel_connector->polled;
+ if (!connector->polled)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ }
+ }
+ }
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ intel_runtime_pm_put(dev_priv);
+}
+
+static bool intel_hpd_irq_event(struct drm_device *dev,
+ struct drm_connector *connector)
+{
+ enum drm_connector_status old_status;
+
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ old_status = connector->status;
+
+ connector->status = connector->funcs->detect(connector, false);
+ if (old_status == connector->status)
+ return false;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
+ connector->base.id,
+ connector->name,
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->status));
+
+ return true;
+}
+
+static void i915_digport_work_func(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, hotplug.dig_port_work);
+ u32 long_port_mask, short_port_mask;
+ struct intel_digital_port *intel_dig_port;
+ int i;
+ u32 old_bits = 0;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ long_port_mask = dev_priv->hotplug.long_port_mask;
+ dev_priv->hotplug.long_port_mask = 0;
+ short_port_mask = dev_priv->hotplug.short_port_mask;
+ dev_priv->hotplug.short_port_mask = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ for (i = 0; i < I915_MAX_PORTS; i++) {
+ bool valid = false;
+ bool long_hpd = false;
+ intel_dig_port = dev_priv->hotplug.irq_port[i];
+ if (!intel_dig_port || !intel_dig_port->hpd_pulse)
+ continue;
+
+ if (long_port_mask & (1 << i)) {
+ valid = true;
+ long_hpd = true;
+ } else if (short_port_mask & (1 << i))
+ valid = true;
+
+ if (valid) {
+ enum irqreturn ret;
+
+ ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
+ if (ret == IRQ_NONE) {
+ /* fall back to old school hpd */
+ old_bits |= (1 << intel_dig_port->base.hpd_pin);
+ }
+ }
+ }
+
+ if (old_bits) {
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->hotplug.event_bits |= old_bits;
+ spin_unlock_irq(&dev_priv->irq_lock);
+ schedule_work(&dev_priv->hotplug.hotplug_work);
+ }
+}
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void i915_hotplug_work_func(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, hotplug.hotplug_work);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_connector *intel_connector;
+ struct intel_encoder *intel_encoder;
+ struct drm_connector *connector;
+ bool changed = false;
+ u32 hpd_event_bits;
+
+ mutex_lock(&mode_config->mutex);
+ DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
+ spin_lock_irq(&dev_priv->irq_lock);
+
+ hpd_event_bits = dev_priv->hotplug.event_bits;
+ dev_priv->hotplug.event_bits = 0;
+
+ /* Disable hotplug on connectors that hit an irq storm. */
+ intel_hpd_irq_storm_disable(dev_priv);
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ intel_connector = to_intel_connector(connector);
+ if (!intel_connector->encoder)
+ continue;
+ intel_encoder = intel_connector->encoder;
+ if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
+ DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
+ connector->name, intel_encoder->hpd_pin);
+ if (intel_encoder->hot_plug)
+ intel_encoder->hot_plug(intel_encoder);
+ if (intel_hpd_irq_event(dev, connector))
+ changed = true;
+ }
+ }
+ mutex_unlock(&mode_config->mutex);
+
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
+}
+
+
+/**
+ * intel_hpd_irq_handler - main hotplug irq handler
+ * @dev: drm device
+ * @pin_mask: a mask of hpd pins that have triggered the irq
+ * @long_mask: a mask of hpd pins that may be long hpd pulses
+ *
+ * This is the main hotplug irq handler for all platforms. The platform specific
+ * irq handlers call the platform specific hotplug irq handlers, which read and
+ * decode the appropriate registers into bitmasks about hpd pins that have
+ * triggered (@pin_mask), and which of those pins may be long pulses
+ * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
+ * is not a digital port.
+ *
+ * Here, we do hotplug irq storm detection and mitigation, and pass further
+ * processing to appropriate bottom halves.
+ */
+void intel_hpd_irq_handler(struct drm_device *dev,
+ u32 pin_mask, u32 long_mask)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+ enum port port;
+ bool storm_detected = false;
+ bool queue_dig = false, queue_hp = false;
+ bool is_dig_port;
+
+ if (!pin_mask)
+ return;
+
+ spin_lock(&dev_priv->irq_lock);
+ for_each_hpd_pin(i) {
+ if (!(BIT(i) & pin_mask))
+ continue;
+
+ is_dig_port = intel_hpd_pin_to_port(i, &port) &&
+ dev_priv->hotplug.irq_port[port];
+
+ if (is_dig_port) {
+ bool long_hpd = long_mask & BIT(i);
+
+ DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
+ long_hpd ? "long" : "short");
+ /*
+ * For long HPD pulses we want to have the digital queue happen,
+ * but we still want HPD storm detection to function.
+ */
+ queue_dig = true;
+ if (long_hpd) {
+ dev_priv->hotplug.long_port_mask |= (1 << port);
+ } else {
+ /* for short HPD just trigger the digital queue */
+ dev_priv->hotplug.short_port_mask |= (1 << port);
+ continue;
+ }
+ }
+
+ if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) {
+ /*
+ * On GMCH platforms the interrupt mask bits only
+ * prevent irq generation, not the setting of the
+ * hotplug bits itself. So only WARN about unexpected
+ * interrupts on saner platforms.
+ */
+ WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
+ "Received HPD interrupt on pin %d although disabled\n", i);
+ continue;
+ }
+
+ if (dev_priv->hotplug.stats[i].state != HPD_ENABLED)
+ continue;
+
+ if (!is_dig_port) {
+ dev_priv->hotplug.event_bits |= BIT(i);
+ queue_hp = true;
+ }
+
+ if (intel_hpd_irq_storm_detect(dev_priv, i)) {
+ dev_priv->hotplug.event_bits &= ~BIT(i);
+ storm_detected = true;
+ }
+ }
+
+ if (storm_detected)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock(&dev_priv->irq_lock);
+
+ /*
+ * Our hotplug handler can grab modeset locks (by calling down into the
+ * fb helpers). Hence it must not be run on our own dev-priv->wq work
+ * queue for otherwise the flush_work in the pageflip code will
+ * deadlock.
+ */
+ if (queue_dig)
+ queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
+ if (queue_hp)
+ schedule_work(&dev_priv->hotplug.hotplug_work);
+}
+
+/**
+ * intel_hpd_init - initializes and enables hpd support
+ * @dev_priv: i915 device instance
+ *
+ * This function enables the hotplug support. It requires that interrupts have
+ * already been enabled with intel_irq_init_hw(). From this point on hotplug and
+ * poll request can run concurrently to other code, so locking rules must be
+ * obeyed.
+ *
+ * This is a separate step from interrupt enabling to simplify the locking rules
+ * in the driver load and resume code.
+ */
+void intel_hpd_init(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+ int i;
+
+ for_each_hpd_pin(i) {
+ dev_priv->hotplug.stats[i].count = 0;
+ dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+ }
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ connector->polled = intel_connector->polled;
+ if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ if (intel_connector->mst_port)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ }
+
+ /*
+ * Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked checks happy.
+ */
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void intel_hpd_init_work(struct drm_i915_private *dev_priv)
+{
+ INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
+ INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
+ intel_hpd_irq_storm_reenable_work);
+}
+
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+
+ dev_priv->hotplug.long_port_mask = 0;
+ dev_priv->hotplug.short_port_mask = 0;
+ dev_priv->hotplug.event_bits = 0;
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ cancel_work_sync(&dev_priv->hotplug.dig_port_work);
+ cancel_work_sync(&dev_priv->hotplug.hotplug_work);
+ cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
+}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9b74ffae5f5a..72e0edd7bbde 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -135,6 +135,7 @@
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "intel_mocs.h"
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
@@ -190,9 +191,7 @@
#define GEN8_CTX_PRIVILEGE (1<<8)
#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) { \
- const u64 _addr = test_bit(n, ppgtt->pdp.used_pdpes) ? \
- ppgtt->pdp.page_directory[n]->daddr : \
- ppgtt->scratch_pd->daddr; \
+ const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
}
@@ -211,9 +210,9 @@ enum {
FAULT_AND_CONTINUE /* Unsupported */
};
#define GEN8_CTX_ID_SHIFT 32
+#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
-static int intel_lr_context_pin(struct intel_engine_cs *ring,
- struct intel_context *ctx);
+static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
/**
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@@ -263,10 +262,11 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
return lrca >> 12;
}
-static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring,
- struct drm_i915_gem_object *ctx_obj)
+static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq)
{
+ struct intel_engine_cs *ring = rq->ring;
struct drm_device *dev = ring->dev;
+ struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
uint64_t desc;
uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
@@ -294,55 +294,59 @@ static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring,
return desc;
}
-static void execlists_elsp_write(struct intel_engine_cs *ring,
- struct drm_i915_gem_object *ctx_obj0,
- struct drm_i915_gem_object *ctx_obj1)
+static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
+ struct drm_i915_gem_request *rq1)
{
+
+ struct intel_engine_cs *ring = rq0->ring;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint64_t temp = 0;
- uint32_t desc[4];
+ uint64_t desc[2];
- /* XXX: You must always write both descriptors in the order below. */
- if (ctx_obj1)
- temp = execlists_ctx_descriptor(ring, ctx_obj1);
- else
- temp = 0;
- desc[1] = (u32)(temp >> 32);
- desc[0] = (u32)temp;
+ if (rq1) {
+ desc[1] = execlists_ctx_descriptor(rq1);
+ rq1->elsp_submitted++;
+ } else {
+ desc[1] = 0;
+ }
- temp = execlists_ctx_descriptor(ring, ctx_obj0);
- desc[3] = (u32)(temp >> 32);
- desc[2] = (u32)temp;
+ desc[0] = execlists_ctx_descriptor(rq0);
+ rq0->elsp_submitted++;
+ /* You must always write both descriptors in the order below. */
spin_lock(&dev_priv->uncore.lock);
intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
- I915_WRITE_FW(RING_ELSP(ring), desc[1]);
- I915_WRITE_FW(RING_ELSP(ring), desc[0]);
- I915_WRITE_FW(RING_ELSP(ring), desc[3]);
+ I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1]));
+ I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1]));
+ I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0]));
/* The context is automatically loaded after the following */
- I915_WRITE_FW(RING_ELSP(ring), desc[2]);
+ I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
- /* ELSP is a wo register, so use another nearby reg for posting instead */
+ /* ELSP is a wo register, use another nearby reg for posting */
POSTING_READ_FW(RING_EXECLIST_STATUS(ring));
intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
spin_unlock(&dev_priv->uncore.lock);
}
-static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
- struct drm_i915_gem_object *ring_obj,
- struct i915_hw_ppgtt *ppgtt,
- u32 tail)
+static int execlists_update_context(struct drm_i915_gem_request *rq)
{
+ struct intel_engine_cs *ring = rq->ring;
+ struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+ struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+ struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
struct page *page;
uint32_t *reg_state;
+ BUG_ON(!ctx_obj);
+ WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
+ WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
+
page = i915_gem_object_get_page(ctx_obj, 1);
reg_state = kmap_atomic(page);
- reg_state[CTX_RING_TAIL+1] = tail;
- reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
+ reg_state[CTX_RING_TAIL+1] = rq->tail;
+ reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
/* True PPGTT with dynamic page allocation: update PDP registers and
* point the unallocated PDPs to the scratch page
@@ -359,32 +363,15 @@ static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
return 0;
}
-static void execlists_submit_contexts(struct intel_engine_cs *ring,
- struct intel_context *to0, u32 tail0,
- struct intel_context *to1, u32 tail1)
+static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
+ struct drm_i915_gem_request *rq1)
{
- struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state;
- struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf;
- struct drm_i915_gem_object *ctx_obj1 = NULL;
- struct intel_ringbuffer *ringbuf1 = NULL;
-
- BUG_ON(!ctx_obj0);
- WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
- WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
-
- execlists_update_context(ctx_obj0, ringbuf0->obj, to0->ppgtt, tail0);
+ execlists_update_context(rq0);
- if (to1) {
- ringbuf1 = to1->engine[ring->id].ringbuf;
- ctx_obj1 = to1->engine[ring->id].state;
- BUG_ON(!ctx_obj1);
- WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
- WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
+ if (rq1)
+ execlists_update_context(rq1);
- execlists_update_context(ctx_obj1, ringbuf1->obj, to1->ppgtt, tail1);
- }
-
- execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
+ execlists_elsp_write(rq0, rq1);
}
static void execlists_context_unqueue(struct intel_engine_cs *ring)
@@ -444,13 +431,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
WARN_ON(req1 && req1->elsp_submitted);
- execlists_submit_contexts(ring, req0->ctx, req0->tail,
- req1 ? req1->ctx : NULL,
- req1 ? req1->tail : 0);
-
- req0->elsp_submitted++;
- if (req1)
- req1->elsp_submitted++;
+ execlists_submit_requests(req0, req1);
}
static bool execlists_check_remove_request(struct intel_engine_cs *ring,
@@ -516,6 +497,9 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
(read_pointer % 6) * 8 + 4);
+ if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
+ continue;
+
if (status & GEN8_CTX_STATUS_PREEMPTED) {
if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
if (execlists_check_remove_request(ring, status_id))
@@ -540,37 +524,21 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
ring->next_context_status_buffer = write_pointer % 6;
I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
- ((u32)ring->next_context_status_buffer & 0x07) << 8);
+ _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8));
}
-static int execlists_context_queue(struct intel_engine_cs *ring,
- struct intel_context *to,
- u32 tail,
- struct drm_i915_gem_request *request)
+static int execlists_context_queue(struct drm_i915_gem_request *request)
{
+ struct intel_engine_cs *ring = request->ring;
struct drm_i915_gem_request *cursor;
int num_elements = 0;
- if (to != ring->default_context)
- intel_lr_context_pin(ring, to);
+ if (request->ctx != ring->default_context)
+ intel_lr_context_pin(request);
- if (!request) {
- /*
- * If there isn't a request associated with this submission,
- * create one as a temporary holder.
- */
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
- request->ring = ring;
- request->ctx = to;
- kref_init(&request->ref);
- i915_gem_context_reference(request->ctx);
- } else {
- i915_gem_request_reference(request);
- WARN_ON(to != request->ctx);
- }
- request->tail = tail;
+ i915_gem_request_reference(request);
+
+ request->tail = request->ringbuf->tail;
spin_lock_irq(&ring->execlist_lock);
@@ -585,7 +553,7 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
struct drm_i915_gem_request,
execlist_link);
- if (to == tail_req->ctx) {
+ if (request->ctx == tail_req->ctx) {
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
list_del(&tail_req->execlist_link);
@@ -603,10 +571,9 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
return 0;
}
-static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx)
+static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = ringbuf->ring;
+ struct intel_engine_cs *ring = req->ring;
uint32_t flush_domains;
int ret;
@@ -614,8 +581,7 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
if (ring->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
- ret = ring->emit_flush(ringbuf, ctx,
- I915_GEM_GPU_DOMAINS, flush_domains);
+ ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
@@ -623,12 +589,10 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
return 0;
}
-static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
+static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
- struct intel_engine_cs *ring = ringbuf->ring;
- const unsigned other_rings = ~intel_ring_flag(ring);
+ const unsigned other_rings = ~intel_ring_flag(req->ring);
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
@@ -638,7 +602,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
struct drm_i915_gem_object *obj = vma->obj;
if (obj->active & other_rings) {
- ret = i915_gem_object_sync(obj, ring);
+ ret = i915_gem_object_sync(obj, req->ring, &req);
if (ret)
return ret;
}
@@ -655,59 +619,59 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
- return logical_ring_invalidate_all_caches(ringbuf, ctx);
+ return logical_ring_invalidate_all_caches(req);
}
-int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
- struct intel_context *ctx)
+int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
int ret;
- if (ctx != request->ring->default_context) {
- ret = intel_lr_context_pin(request->ring, ctx);
+ request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
+
+ if (request->ctx != request->ring->default_context) {
+ ret = intel_lr_context_pin(request);
if (ret)
return ret;
}
- request->ringbuf = ctx->engine[request->ring->id].ringbuf;
- request->ctx = ctx;
- i915_gem_context_reference(request->ctx);
-
return 0;
}
-static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
+static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
int bytes)
{
- struct intel_engine_cs *ring = ringbuf->ring;
- struct drm_i915_gem_request *request;
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
+ struct intel_engine_cs *ring = req->ring;
+ struct drm_i915_gem_request *target;
unsigned space;
int ret;
if (intel_ring_space(ringbuf) >= bytes)
return 0;
- list_for_each_entry(request, &ring->request_list, list) {
+ /* The whole point of reserving space is to not wait! */
+ WARN_ON(ringbuf->reserved_in_use);
+
+ list_for_each_entry(target, &ring->request_list, list) {
/*
* The request queue is per-engine, so can contain requests
* from multiple ringbuffers. Here, we must ignore any that
* aren't from the ringbuffer we're considering.
*/
- if (request->ringbuf != ringbuf)
+ if (target->ringbuf != ringbuf)
continue;
/* Would completion of this request free enough space? */
- space = __intel_ring_space(request->postfix, ringbuf->tail,
+ space = __intel_ring_space(target->postfix, ringbuf->tail,
ringbuf->size);
if (space >= bytes)
break;
}
- if (WARN_ON(&request->list == &ring->request_list))
+ if (WARN_ON(&target->list == &ring->request_list))
return -ENOSPC;
- ret = i915_wait_request(request);
+ ret = i915_wait_request(target);
if (ret)
return ret;
@@ -717,7 +681,7 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
/*
* intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
- * @ringbuf: Logical Ringbuffer to advance.
+ * @request: Request to advance the logical ringbuffer of.
*
* The tail is updated in our logical ringbuffer struct, not in the actual context. What
* really happens during submission is that the context and current tail will be placed
@@ -725,33 +689,23 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
* point, the tail *inside* the context is updated and the ELSP written to.
*/
static void
-intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
- struct drm_i915_gem_request *request)
+intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
{
- struct intel_engine_cs *ring = ringbuf->ring;
+ struct intel_engine_cs *ring = request->ring;
- intel_logical_ring_advance(ringbuf);
+ intel_logical_ring_advance(request->ringbuf);
if (intel_ring_stopped(ring))
return;
- execlists_context_queue(ring, ctx, ringbuf->tail, request);
+ execlists_context_queue(request);
}
-static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx)
+static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
{
uint32_t __iomem *virt;
int rem = ringbuf->size - ringbuf->tail;
- if (ringbuf->space < rem) {
- int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
-
- if (ret)
- return ret;
- }
-
virt = ringbuf->virtual_start + ringbuf->tail;
rem /= 4;
while (rem--)
@@ -759,25 +713,50 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
ringbuf->tail = 0;
intel_ring_update_space(ringbuf);
-
- return 0;
}
-static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx, int bytes)
+static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
{
- int ret;
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
+ int remain_usable = ringbuf->effective_size - ringbuf->tail;
+ int remain_actual = ringbuf->size - ringbuf->tail;
+ int ret, total_bytes, wait_bytes = 0;
+ bool need_wrap = false;
+
+ if (ringbuf->reserved_in_use)
+ total_bytes = bytes;
+ else
+ total_bytes = bytes + ringbuf->reserved_size;
- if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
- ret = logical_ring_wrap_buffer(ringbuf, ctx);
- if (unlikely(ret))
- return ret;
+ if (unlikely(bytes > remain_usable)) {
+ /*
+ * Not enough space for the basic request. So need to flush
+ * out the remainder and then wait for base + reserved.
+ */
+ wait_bytes = remain_actual + total_bytes;
+ need_wrap = true;
+ } else {
+ if (unlikely(total_bytes > remain_usable)) {
+ /*
+ * The base request will fit but the reserved space
+ * falls off the end. So only need to to wait for the
+ * reserved size after flushing out the remainder.
+ */
+ wait_bytes = remain_actual + ringbuf->reserved_size;
+ need_wrap = true;
+ } else if (total_bytes > ringbuf->space) {
+ /* No wrapping required, just waiting. */
+ wait_bytes = total_bytes;
+ }
}
- if (unlikely(ringbuf->space < bytes)) {
- ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
+ if (wait_bytes) {
+ ret = logical_ring_wait_for_space(req, wait_bytes);
if (unlikely(ret))
return ret;
+
+ if (need_wrap)
+ __wrap_ring_buffer(ringbuf);
}
return 0;
@@ -786,7 +765,8 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
/**
* intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
*
- * @ringbuf: Logical ringbuffer.
+ * @request: The request to start some new work for
+ * @ctx: Logical ring context whose ringbuffer is being prepared.
* @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
*
* The ringbuffer might not be ready to accept the commands right away (maybe it needs to
@@ -796,32 +776,42 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
*
* Return: non-zero if the ringbuffer is not ready to be written to.
*/
-static int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx, int num_dwords)
+int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{
- struct intel_engine_cs *ring = ringbuf->ring;
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv;
int ret;
+ WARN_ON(req == NULL);
+ dev_priv = req->ring->dev->dev_private;
+
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
return ret;
- ret = logical_ring_prepare(ringbuf, ctx, num_dwords * sizeof(uint32_t));
- if (ret)
- return ret;
-
- /* Preallocate the olr before touching the ring */
- ret = i915_gem_request_alloc(ring, ctx);
+ ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
if (ret)
return ret;
- ringbuf->space -= num_dwords * sizeof(uint32_t);
+ req->ringbuf->space -= num_dwords * sizeof(uint32_t);
return 0;
}
+int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
+{
+ /*
+ * The first call merely notes the reserve request and is common for
+ * all back ends. The subsequent localised _begin() call actually
+ * ensures that the reservation is available. Without the begin, if
+ * the request creator immediately submitted the request without
+ * adding any commands to it then there might not actually be
+ * sufficient room for the submission commands.
+ */
+ intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
+
+ return intel_logical_ring_begin(request, 0);
+}
+
/**
* execlists_submission() - submit a batchbuffer for execution, Execlists style
* @dev: DRM device.
@@ -839,16 +829,15 @@ static int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
*
* Return: non-zero if the submission fails.
*/
-int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
- struct intel_engine_cs *ring,
- struct intel_context *ctx,
+int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas,
- struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 dispatch_flags)
+ struct list_head *vmas)
{
+ struct drm_device *dev = params->dev;
+ struct intel_engine_cs *ring = params->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf;
+ u64 exec_start;
int instp_mode;
u32 instp_mask;
int ret;
@@ -899,13 +888,13 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
return -EINVAL;
}
- ret = execlists_move_to_gpu(ringbuf, ctx, vmas);
+ ret = execlists_move_to_gpu(params->request, vmas);
if (ret)
return ret;
if (ring == &dev_priv->ring[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
- ret = intel_logical_ring_begin(ringbuf, ctx, 4);
+ ret = intel_logical_ring_begin(params->request, 4);
if (ret)
return ret;
@@ -918,14 +907,17 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
dev_priv->relative_constants_mode = instp_mode;
}
- ret = ring->emit_bb_start(ringbuf, ctx, exec_start, dispatch_flags);
+ exec_start = params->batch_obj_vm_offset +
+ args->batch_start_offset;
+
+ ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags);
if (ret)
return ret;
- trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
+ trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
- i915_gem_execbuffer_move_to_active(vmas, ring);
- i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+ i915_gem_execbuffer_move_to_active(vmas, params->request);
+ i915_gem_execbuffer_retire_commands(params);
return 0;
}
@@ -950,7 +942,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
ctx->engine[ring->id].state;
if (ctx_obj && (ctx != ring->default_context))
- intel_lr_context_unpin(ring, ctx);
+ intel_lr_context_unpin(req);
list_del(&req->execlist_link);
i915_gem_request_unreference(req);
}
@@ -978,16 +970,15 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring)
I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
}
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx)
+int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = ringbuf->ring;
+ struct intel_engine_cs *ring = req->ring;
int ret;
if (!ring->gpu_caches_dirty)
return 0;
- ret = ring->emit_flush(ringbuf, ctx, 0, I915_GEM_GPU_DOMAINS);
+ ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
@@ -995,15 +986,15 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
return 0;
}
-static int intel_lr_context_pin(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
{
- struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
- struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ struct intel_engine_cs *ring = rq->ring;
+ struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+ struct intel_ringbuffer *ringbuf = rq->ringbuf;
int ret = 0;
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
- if (ctx->engine[ring->id].pin_count++ == 0) {
+ if (rq->ctx->engine[ring->id].pin_count++ == 0) {
ret = i915_gem_obj_ggtt_pin(ctx_obj,
GEN8_LR_CONTEXT_ALIGN, 0);
if (ret)
@@ -1012,6 +1003,8 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
if (ret)
goto unpin_ctx_obj;
+
+ ctx_obj->dirty = true;
}
return ret;
@@ -1019,31 +1012,31 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
unpin_ctx_obj:
i915_gem_object_ggtt_unpin(ctx_obj);
reset_pin_count:
- ctx->engine[ring->id].pin_count = 0;
+ rq->ctx->engine[ring->id].pin_count = 0;
return ret;
}
-void intel_lr_context_unpin(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
{
- struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
- struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ struct intel_engine_cs *ring = rq->ring;
+ struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+ struct intel_ringbuffer *ringbuf = rq->ringbuf;
if (ctx_obj) {
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
- if (--ctx->engine[ring->id].pin_count == 0) {
+ if (--rq->ctx->engine[ring->id].pin_count == 0) {
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
}
}
}
-static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
int ret, i;
- struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ struct intel_engine_cs *ring = req->ring;
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
@@ -1052,11 +1045,11 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
return 0;
ring->gpu_caches_dirty = true;
- ret = logical_ring_flush_all_caches(ringbuf, ctx);
+ ret = logical_ring_flush_all_caches(req);
if (ret)
return ret;
- ret = intel_logical_ring_begin(ringbuf, ctx, w->count * 2 + 2);
+ ret = intel_logical_ring_begin(req, w->count * 2 + 2);
if (ret)
return ret;
@@ -1070,13 +1063,361 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
intel_logical_ring_advance(ringbuf);
ring->gpu_caches_dirty = true;
- ret = logical_ring_flush_all_caches(ringbuf, ctx);
+ ret = logical_ring_flush_all_caches(req);
if (ret)
return ret;
return 0;
}
+#define wa_ctx_emit(batch, index, cmd) \
+ do { \
+ int __index = (index)++; \
+ if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
+ return -ENOSPC; \
+ } \
+ batch[__index] = (cmd); \
+ } while (0)
+
+
+/*
+ * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
+ * PIPE_CONTROL instruction. This is required for the flush to happen correctly
+ * but there is a slight complication as this is applied in WA batch where the
+ * values are only initialized once so we cannot take register value at the
+ * beginning and reuse it further; hence we save its value to memory, upload a
+ * constant value with bit21 set and then we restore it back with the saved value.
+ * To simplify the WA, a constant value is formed by using the default value
+ * of this register. This shouldn't be a problem because we are only modifying
+ * it for a short period and this batch in non-premptible. We can ofcourse
+ * use additional instructions that read the actual value of the register
+ * at that time and set our bit of interest but it makes the WA complicated.
+ *
+ * This WA is also required for Gen9 so extracting as a function avoids
+ * code duplication.
+ */
+static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
+ uint32_t *const batch,
+ uint32_t index)
+{
+ uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+ /*
+ * WaDisableLSQCROPERFforOCL:skl
+ * This WA is implemented in skl_init_clock_gating() but since
+ * this batch updates GEN8_L3SQCREG4 with default value we need to
+ * set this bit here to retain the WA during flush.
+ */
+ if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0)
+ l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
+
+ wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8(1) |
+ MI_SRM_LRM_GLOBAL_GTT));
+ wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+ wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+ wa_ctx_emit(batch, index, 0);
+
+ wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
+ wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+ wa_ctx_emit(batch, index, l3sqc4_flush);
+
+ wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+ wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_DC_FLUSH_ENABLE));
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+
+ wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8(1) |
+ MI_SRM_LRM_GLOBAL_GTT));
+ wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+ wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+ wa_ctx_emit(batch, index, 0);
+
+ return index;
+}
+
+static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
+ uint32_t offset,
+ uint32_t start_alignment)
+{
+ return wa_ctx->offset = ALIGN(offset, start_alignment);
+}
+
+static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
+ uint32_t offset,
+ uint32_t size_alignment)
+{
+ wa_ctx->size = offset - wa_ctx->offset;
+
+ WARN(wa_ctx->size % size_alignment,
+ "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
+ wa_ctx->size, size_alignment);
+ return 0;
+}
+
+/**
+ * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
+ *
+ * @ring: only applicable for RCS
+ * @wa_ctx: structure representing wa_ctx
+ * offset: specifies start of the batch, should be cache-aligned. This is updated
+ * with the offset value received as input.
+ * size: size of the batch in DWORDS but HW expects in terms of cachelines
+ * @batch: page in which WA are loaded
+ * @offset: This field specifies the start of the batch, it should be
+ * cache-aligned otherwise it is adjusted accordingly.
+ * Typically we only have one indirect_ctx and per_ctx batch buffer which are
+ * initialized at the beginning and shared across all contexts but this field
+ * helps us to have multiple batches at different offsets and select them based
+ * on a criteria. At the moment this batch always start at the beginning of the page
+ * and at this point we don't have multiple wa_ctx batch buffers.
+ *
+ * The number of WA applied are not known at the beginning; we use this field
+ * to return the no of DWORDS written.
+ *
+ * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
+ * so it adds NOOPs as padding to make it cacheline aligned.
+ * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
+ * makes a complete batch buffer.
+ *
+ * Return: non-zero if we exceed the PAGE_SIZE limit.
+ */
+
+static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
+ struct i915_wa_ctx_bb *wa_ctx,
+ uint32_t *const batch,
+ uint32_t *offset)
+{
+ uint32_t scratch_addr;
+ uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+ /* WaDisableCtxRestoreArbitration:bdw,chv */
+ wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+
+ /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
+ if (IS_BROADWELL(ring->dev)) {
+ index = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+ if (index < 0)
+ return index;
+ }
+
+ /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
+ /* Actual scratch location is at 128 bytes offset */
+ scratch_addr = ring->scratch.gtt_offset + 2*CACHELINE_BYTES;
+
+ wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+ wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE));
+ wa_ctx_emit(batch, index, scratch_addr);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+
+ /* Pad to end of cacheline */
+ while (index % CACHELINE_DWORDS)
+ wa_ctx_emit(batch, index, MI_NOOP);
+
+ /*
+ * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
+ * execution depends on the length specified in terms of cache lines
+ * in the register CTX_RCS_INDIRECT_CTX
+ */
+
+ return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
+}
+
+/**
+ * gen8_init_perctx_bb() - initialize per ctx batch with WA
+ *
+ * @ring: only applicable for RCS
+ * @wa_ctx: structure representing wa_ctx
+ * offset: specifies start of the batch, should be cache-aligned.
+ * size: size of the batch in DWORDS but HW expects in terms of cachelines
+ * @batch: page in which WA are loaded
+ * @offset: This field specifies the start of this batch.
+ * This batch is started immediately after indirect_ctx batch. Since we ensure
+ * that indirect_ctx ends on a cacheline this batch is aligned automatically.
+ *
+ * The number of DWORDS written are returned using this field.
+ *
+ * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
+ * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
+ */
+static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
+ struct i915_wa_ctx_bb *wa_ctx,
+ uint32_t *const batch,
+ uint32_t *offset)
+{
+ uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+ /* WaDisableCtxRestoreArbitration:bdw,chv */
+ wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+
+ wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
+
+ return wa_ctx_end(wa_ctx, *offset = index, 1);
+}
+
+static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
+ struct i915_wa_ctx_bb *wa_ctx,
+ uint32_t *const batch,
+ uint32_t *offset)
+{
+ int ret;
+ struct drm_device *dev = ring->dev;
+ uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+ /* WaDisableCtxRestoreArbitration:skl,bxt */
+ if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
+ (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
+ wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+
+ /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
+ ret = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+ if (ret < 0)
+ return ret;
+ index = ret;
+
+ /* Pad to end of cacheline */
+ while (index % CACHELINE_DWORDS)
+ wa_ctx_emit(batch, index, MI_NOOP);
+
+ return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
+}
+
+static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
+ struct i915_wa_ctx_bb *wa_ctx,
+ uint32_t *const batch,
+ uint32_t *offset)
+{
+ struct drm_device *dev = ring->dev;
+ uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
+ if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_B0)) ||
+ (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) {
+ wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
+ wa_ctx_emit(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
+ wa_ctx_emit(batch, index,
+ _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
+ wa_ctx_emit(batch, index, MI_NOOP);
+ }
+
+ /* WaDisableCtxRestoreArbitration:skl,bxt */
+ if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
+ (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
+ wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+
+ wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
+
+ return wa_ctx_end(wa_ctx, *offset = index, 1);
+}
+
+static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size)
+{
+ int ret;
+
+ ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size));
+ if (!ring->wa_ctx.obj) {
+ DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
+ return -ENOMEM;
+ }
+
+ ret = i915_gem_obj_ggtt_pin(ring->wa_ctx.obj, PAGE_SIZE, 0);
+ if (ret) {
+ DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
+ ret);
+ drm_gem_object_unreference(&ring->wa_ctx.obj->base);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring)
+{
+ if (ring->wa_ctx.obj) {
+ i915_gem_object_ggtt_unpin(ring->wa_ctx.obj);
+ drm_gem_object_unreference(&ring->wa_ctx.obj->base);
+ ring->wa_ctx.obj = NULL;
+ }
+}
+
+static int intel_init_workaround_bb(struct intel_engine_cs *ring)
+{
+ int ret;
+ uint32_t *batch;
+ uint32_t offset;
+ struct page *page;
+ struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+
+ WARN_ON(ring->id != RCS);
+
+ /* update this when WA for higher Gen are added */
+ if (INTEL_INFO(ring->dev)->gen > 9) {
+ DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
+ INTEL_INFO(ring->dev)->gen);
+ return 0;
+ }
+
+ /* some WA perform writes to scratch page, ensure it is valid */
+ if (ring->scratch.obj == NULL) {
+ DRM_ERROR("scratch page not allocated for %s\n", ring->name);
+ return -EINVAL;
+ }
+
+ ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
+ return ret;
+ }
+
+ page = i915_gem_object_get_page(wa_ctx->obj, 0);
+ batch = kmap_atomic(page);
+ offset = 0;
+
+ if (INTEL_INFO(ring->dev)->gen == 8) {
+ ret = gen8_init_indirectctx_bb(ring,
+ &wa_ctx->indirect_ctx,
+ batch,
+ &offset);
+ if (ret)
+ goto out;
+
+ ret = gen8_init_perctx_bb(ring,
+ &wa_ctx->per_ctx,
+ batch,
+ &offset);
+ if (ret)
+ goto out;
+ } else if (INTEL_INFO(ring->dev)->gen == 9) {
+ ret = gen9_init_indirectctx_bb(ring,
+ &wa_ctx->indirect_ctx,
+ batch,
+ &offset);
+ if (ret)
+ goto out;
+
+ ret = gen9_init_perctx_bb(ring,
+ &wa_ctx->per_ctx,
+ batch,
+ &offset);
+ if (ret)
+ goto out;
+ }
+
+out:
+ kunmap_atomic(batch);
+ if (ret)
+ lrc_destroy_wa_ctx_obj(ring);
+
+ return ret;
+}
+
static int gen8_init_common_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
@@ -1137,19 +1478,64 @@ static int gen9_init_render_ring(struct intel_engine_cs *ring)
return init_workarounds_ring(ring);
}
-static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
+static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
+{
+ struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
+ struct intel_engine_cs *ring = req->ring;
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
+ const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
+ int i, ret;
+
+ ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2);
+ if (ret)
+ return ret;
+
+ intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+ for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
+ const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+ intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i));
+ intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
+ intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i));
+ intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
+ }
+
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
+ intel_logical_ring_advance(ringbuf);
+
+ return 0;
+}
+
+static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
u64 offset, unsigned dispatch_flags)
{
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
- ret = intel_logical_ring_begin(ringbuf, ctx, 4);
+ /* Don't rely in hw updating PDPs, specially in lite-restore.
+ * Ideally, we should set Force PD Restore in ctx descriptor,
+ * but we can't. Force Restore would be a second option, but
+ * it is unsafe in case of lite-restore (because the ctx is
+ * not idle). */
+ if (req->ctx->ppgtt &&
+ (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
+ ret = intel_logical_ring_emit_pdps(req);
+ if (ret)
+ return ret;
+
+ req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
+ }
+
+ ret = intel_logical_ring_begin(req, 4);
if (ret)
return ret;
/* FIXME(BDW): Address space and security selectors. */
- intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
+ intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
+ (ppgtt<<8) |
+ (dispatch_flags & I915_DISPATCH_RS ?
+ MI_BATCH_RESOURCE_STREAMER : 0));
intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
intel_logical_ring_emit(ringbuf, MI_NOOP);
@@ -1191,18 +1577,18 @@ static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
-static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
+static int gen8_emit_flush(struct drm_i915_gem_request *request,
u32 invalidate_domains,
u32 unused)
{
+ struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t cmd;
int ret;
- ret = intel_logical_ring_begin(ringbuf, ctx, 4);
+ ret = intel_logical_ring_begin(request, 4);
if (ret)
return ret;
@@ -1232,11 +1618,11 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
return 0;
}
-static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
+static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
u32 invalidate_domains,
u32 flush_domains)
{
+ struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
bool vf_flush_wa;
@@ -1268,7 +1654,7 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 &&
flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
- ret = intel_logical_ring_begin(ringbuf, ctx, vf_flush_wa ? 12 : 6);
+ ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
if (ret)
return ret;
@@ -1302,9 +1688,9 @@ static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
}
-static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
- struct drm_i915_gem_request *request)
+static int gen8_emit_request(struct drm_i915_gem_request *request)
{
+ struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
u32 cmd;
int ret;
@@ -1314,7 +1700,7 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
* used as a workaround for not being allowed to do lite
* restore with HEAD==TAIL (WaIdleLiteRestore).
*/
- ret = intel_logical_ring_begin(ringbuf, request->ctx, 8);
+ ret = intel_logical_ring_begin(request, 8);
if (ret)
return ret;
@@ -1326,11 +1712,10 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
(ring->status_page.gfx_addr +
(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf,
- i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+ intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP);
- intel_logical_ring_advance_and_submit(ringbuf, request->ctx, request);
+ intel_logical_ring_advance_and_submit(request);
/*
* Here we add two extra NOOPs as padding to avoid
@@ -1343,49 +1728,53 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
return 0;
}
-static int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
{
- struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
struct render_state so;
- struct drm_i915_file_private *file_priv = ctx->file_priv;
- struct drm_file *file = file_priv ? file_priv->file : NULL;
int ret;
- ret = i915_gem_render_state_prepare(ring, &so);
+ ret = i915_gem_render_state_prepare(req->ring, &so);
if (ret)
return ret;
if (so.rodata == NULL)
return 0;
- ret = ring->emit_bb_start(ringbuf,
- ctx,
- so.ggtt_offset,
- I915_DISPATCH_SECURE);
+ ret = req->ring->emit_bb_start(req, so.ggtt_offset,
+ I915_DISPATCH_SECURE);
+ if (ret)
+ goto out;
+
+ ret = req->ring->emit_bb_start(req,
+ (so.ggtt_offset + so.aux_batch_offset),
+ I915_DISPATCH_SECURE);
if (ret)
goto out;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
- ret = __i915_add_request(ring, file, so.obj);
- /* intel_logical_ring_add_request moves object to inactive if it
- * fails */
out:
i915_gem_render_state_fini(&so);
return ret;
}
-static int gen8_init_rcs_context(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
{
int ret;
- ret = intel_logical_ring_workarounds_emit(ring, ctx);
+ ret = intel_logical_ring_workarounds_emit(req);
if (ret)
return ret;
- return intel_lr_context_render_state_init(ring, ctx);
+ ret = intel_rcs_context_init_mocs(req);
+ /*
+ * Failing to program the MOCS is non-fatal.The system will not
+ * run at peak performance. So generate an error and carry on.
+ */
+ if (ret)
+ DRM_ERROR("MOCS failed to program: expect performance issues.\n");
+
+ return intel_lr_context_render_state_init(req);
}
/**
@@ -1405,7 +1794,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
intel_logical_ring_stop(ring);
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
- i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
if (ring->cleanup)
ring->cleanup(ring);
@@ -1417,6 +1805,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
kunmap(sg_page(ring->status_page.obj->pages->sgl));
ring->status_page.obj = NULL;
}
+
+ lrc_destroy_wa_ctx_obj(ring);
}
static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
@@ -1476,11 +1866,28 @@ static int logical_render_ring_init(struct drm_device *dev)
ring->emit_bb_start = gen8_emit_bb_start;
ring->dev = dev;
- ret = logical_ring_init(dev, ring);
+
+ ret = intel_init_pipe_control(ring);
if (ret)
return ret;
- return intel_init_pipe_control(ring);
+ ret = intel_init_workaround_bb(ring);
+ if (ret) {
+ /*
+ * We continue even if we fail to initialize WA batch
+ * because we only expect rare glitches but nothing
+ * critical to prevent us from using GPU
+ */
+ DRM_ERROR("WA batch buffer initialization failed: %d\n",
+ ret);
+ }
+
+ ret = logical_ring_init(dev, ring);
+ if (ret) {
+ lrc_destroy_wa_ctx_obj(ring);
+ }
+
+ return ret;
}
static int logical_bsd_ring_init(struct drm_device *dev)
@@ -1735,7 +2142,8 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
reg_state[CTX_CONTEXT_CONTROL+1] =
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
- CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
+ CTX_CTRL_RS_CTX_ENABLE);
reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
reg_state[CTX_RING_HEAD+1] = 0;
reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
@@ -1760,15 +2168,27 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
reg_state[CTX_SECOND_BB_STATE+1] = 0;
if (ring->id == RCS) {
- /* TODO: according to BSpec, the register state context
- * for CHV does not have these. OTOH, these registers do
- * exist in CHV. I'm waiting for a clarification */
reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
+ if (ring->wa_ctx.obj) {
+ struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+ uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
+
+ reg_state[CTX_RCS_INDIRECT_CTX+1] =
+ (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
+ (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
+
+ reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
+ CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT << 6;
+
+ reg_state[CTX_BB_PER_CTX_PTR+1] =
+ (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
+ 0x01;
+ }
}
reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
@@ -1973,13 +2393,22 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
lrc_setup_hardware_status_page(ring, ctx_obj);
else if (ring->id == RCS && !ctx->rcs_initialized) {
if (ring->init_context) {
- ret = ring->init_context(ring, ctx);
+ struct drm_i915_gem_request *req;
+
+ ret = i915_gem_request_alloc(ring, ctx, &req);
+ if (ret)
+ return ret;
+
+ ret = ring->init_context(req);
if (ret) {
DRM_ERROR("ring init context: %d\n", ret);
+ i915_gem_request_cancel(req);
ctx->engine[ring->id].ringbuf = NULL;
ctx->engine[ring->id].state = NULL;
goto error;
}
+
+ i915_add_request_no_flush(req);
}
ctx->rcs_initialized = true;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 04d3a6d8b207..64f89f9982a2 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -32,18 +32,19 @@
#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
+#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
/* Logical Rings */
-int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
- struct intel_context *ctx);
+int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
+int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
void intel_logical_ring_stop(struct intel_engine_cs *ring);
void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
int intel_logical_rings_init(struct drm_device *dev);
+int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx);
+int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
/**
* intel_logical_ring_advance() - advance the ringbuffer tail
* @ringbuf: Ringbuffer to advance.
@@ -70,20 +71,16 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
void intel_lr_context_free(struct intel_context *ctx);
int intel_lr_context_deferred_create(struct intel_context *ctx,
struct intel_engine_cs *ring);
-void intel_lr_context_unpin(struct intel_engine_cs *ring,
- struct intel_context *ctx);
+void intel_lr_context_unpin(struct drm_i915_gem_request *req);
void intel_lr_context_reset(struct drm_device *dev,
struct intel_context *ctx);
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
-int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
- struct intel_engine_cs *ring,
- struct intel_context *ctx,
+struct i915_execbuffer_params;
+int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas,
- struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 dispatch_flags);
+ struct list_head *vmas);
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
void intel_lrc_irq_handler(struct intel_engine_cs *ring);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 161ab26f81fb..881b5d13592e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -239,8 +239,6 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
- struct intel_connector *intel_connector =
- &lvds_encoder->attached_connector->base;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, stat_reg;
@@ -252,8 +250,6 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
stat_reg = PP_STATUS;
}
- intel_panel_disable_backlight(intel_connector);
-
I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
@@ -262,6 +258,31 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
POSTING_READ(lvds_encoder->reg);
}
+static void gmch_disable_lvds(struct intel_encoder *encoder)
+{
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct intel_connector *intel_connector =
+ &lvds_encoder->attached_connector->base;
+
+ intel_panel_disable_backlight(intel_connector);
+
+ intel_disable_lvds(encoder);
+}
+
+static void pch_disable_lvds(struct intel_encoder *encoder)
+{
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct intel_connector *intel_connector =
+ &lvds_encoder->attached_connector->base;
+
+ intel_panel_disable_backlight(intel_connector);
+}
+
+static void pch_post_disable_lvds(struct intel_encoder *encoder)
+{
+ intel_disable_lvds(encoder);
+}
+
static enum drm_mode_status
intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -452,7 +473,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
*/
if (!HAS_PCH_SPLIT(dev)) {
drm_modeset_lock_all(dev);
- intel_modeset_setup_hw_state(dev, true);
+ intel_display_resume(dev);
drm_modeset_unlock_all(dev);
}
@@ -528,7 +549,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
- .dpms = intel_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
@@ -942,12 +963,6 @@ void intel_lvds_init(struct drm_device *dev)
if (dmi_check_system(intel_no_lvds))
return;
- pin = GMBUS_PIN_PANEL;
- if (!lvds_is_present_in_vbt(dev, &pin)) {
- DRM_DEBUG_KMS("LVDS is not present in VBT\n");
- return;
- }
-
if (HAS_PCH_SPLIT(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
return;
@@ -957,6 +972,16 @@ void intel_lvds_init(struct drm_device *dev)
}
}
+ pin = GMBUS_PIN_PANEL;
+ if (!lvds_is_present_in_vbt(dev, &pin)) {
+ u32 reg = HAS_PCH_SPLIT(dev) ? PCH_LVDS : LVDS;
+ if ((I915_READ(reg) & LVDS_PORT_EN) == 0) {
+ DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+ return;
+ }
+ DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
+ }
+
lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
if (!lvds_encoder)
return;
@@ -988,7 +1013,12 @@ void intel_lvds_init(struct drm_device *dev)
intel_encoder->enable = intel_enable_lvds;
intel_encoder->pre_enable = intel_pre_enable_lvds;
intel_encoder->compute_config = intel_lvds_compute_config;
- intel_encoder->disable = intel_disable_lvds;
+ if (HAS_PCH_SPLIT(dev_priv)) {
+ intel_encoder->disable = pch_disable_lvds;
+ intel_encoder->post_disable = pch_post_disable_lvds;
+ } else {
+ intel_encoder->disable = gmch_disable_lvds;
+ }
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_encoder->get_config = intel_lvds_get_config;
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -1068,24 +1098,8 @@ void intel_lvds_init(struct drm_device *dev)
drm_mode_debug_printmodeline(scan);
fixed_mode = drm_mode_duplicate(dev, scan);
- if (fixed_mode) {
- downclock_mode =
- intel_find_panel_downclock(dev,
- fixed_mode, connector);
- if (downclock_mode != NULL &&
- i915.lvds_downclock) {
- /* We found the downclock for LVDS. */
- dev_priv->lvds_downclock_avail = true;
- dev_priv->lvds_downclock =
- downclock_mode->clock;
- DRM_DEBUG_KMS("LVDS downclock is found"
- " in EDID. Normal clock %dKhz, "
- "downclock %dKhz\n",
- fixed_mode->clock,
- dev_priv->lvds_downclock);
- }
+ if (fixed_mode)
goto out;
- }
}
}
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
new file mode 100644
index 000000000000..6d3c6c0a5c62
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions: *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "intel_mocs.h"
+#include "intel_lrc.h"
+#include "intel_ringbuffer.h"
+
+/* structures required */
+struct drm_i915_mocs_entry {
+ u32 control_value;
+ u16 l3cc_value;
+};
+
+struct drm_i915_mocs_table {
+ u32 size;
+ const struct drm_i915_mocs_entry *table;
+};
+
+/* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
+#define LE_CACHEABILITY(value) ((value) << 0)
+#define LE_TGT_CACHE(value) ((value) << 2)
+#define LE_LRUM(value) ((value) << 4)
+#define LE_AOM(value) ((value) << 6)
+#define LE_RSC(value) ((value) << 7)
+#define LE_SCC(value) ((value) << 8)
+#define LE_PFM(value) ((value) << 11)
+#define LE_SCF(value) ((value) << 14)
+
+/* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */
+#define L3_ESC(value) ((value) << 0)
+#define L3_SCC(value) ((value) << 1)
+#define L3_CACHEABILITY(value) ((value) << 4)
+
+/* Helper defines */
+#define GEN9_NUM_MOCS_ENTRIES 62 /* 62 out of 64 - 63 & 64 are reserved. */
+
+/* (e)LLC caching options */
+#define LE_PAGETABLE 0
+#define LE_UC 1
+#define LE_WT 2
+#define LE_WB 3
+
+/* L3 caching options */
+#define L3_DIRECT 0
+#define L3_UC 1
+#define L3_RESERVED 2
+#define L3_WB 3
+
+/* Target cache */
+#define ELLC 0
+#define LLC 1
+#define LLC_ELLC 2
+
+/*
+ * MOCS tables
+ *
+ * These are the MOCS tables that are programmed across all the rings.
+ * The control value is programmed to all the rings that support the
+ * MOCS registers. While the l3cc_values are only programmed to the
+ * LNCFCMOCS0 - LNCFCMOCS32 registers.
+ *
+ * These tables are intended to be kept reasonably consistent across
+ * platforms. However some of the fields are not applicable to all of
+ * them.
+ *
+ * Entries not part of the following tables are undefined as far as
+ * userspace is concerned and shouldn't be relied upon. For the time
+ * being they will be implicitly initialized to the strictest caching
+ * configuration (uncached) to guarantee forwards compatibility with
+ * userspace programs written against more recent kernels providing
+ * additional MOCS entries.
+ *
+ * NOTE: These tables MUST start with being uncached and the length
+ * MUST be less than 63 as the last two registers are reserved
+ * by the hardware. These tables are part of the kernel ABI and
+ * may only be updated incrementally by adding entries at the
+ * end.
+ */
+static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
+ /* { 0x00000009, 0x0010 } */
+ { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
+ LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+ (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
+ /* { 0x00000038, 0x0030 } */
+ { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+ LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+ (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
+ /* { 0x0000003b, 0x0030 } */
+ { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+ LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+ (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+};
+
+/* NOTE: the LE_TGT_CACHE is not used on Broxton */
+static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
+ /* { 0x00000009, 0x0010 } */
+ { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
+ LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+ (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
+ /* { 0x00000038, 0x0030 } */
+ { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+ LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+ (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
+ /* { 0x0000003b, 0x0030 } */
+ { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+ LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+ (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+};
+
+/**
+ * get_mocs_settings()
+ * @dev: DRM device.
+ * @table: Output table that will be made to point at appropriate
+ * MOCS values for the device.
+ *
+ * This function will return the values of the MOCS table that needs to
+ * be programmed for the platform. It will return the values that need
+ * to be programmed and if they need to be programmed.
+ *
+ * Return: true if there are applicable MOCS settings for the device.
+ */
+static bool get_mocs_settings(struct drm_device *dev,
+ struct drm_i915_mocs_table *table)
+{
+ bool result = false;
+
+ if (IS_SKYLAKE(dev)) {
+ table->size = ARRAY_SIZE(skylake_mocs_table);
+ table->table = skylake_mocs_table;
+ result = true;
+ } else if (IS_BROXTON(dev)) {
+ table->size = ARRAY_SIZE(broxton_mocs_table);
+ table->table = broxton_mocs_table;
+ result = true;
+ } else {
+ WARN_ONCE(INTEL_INFO(dev)->gen >= 9,
+ "Platform that should have a MOCS table does not.\n");
+ }
+
+ return result;
+}
+
+/**
+ * emit_mocs_control_table() - emit the mocs control table
+ * @req: Request to set up the MOCS table for.
+ * @table: The values to program into the control regs.
+ * @reg_base: The base for the engine that needs to be programmed.
+ *
+ * This function simply emits a MI_LOAD_REGISTER_IMM command for the
+ * given table starting at the given address.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+static int emit_mocs_control_table(struct drm_i915_gem_request *req,
+ const struct drm_i915_mocs_table *table,
+ u32 reg_base)
+{
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
+ unsigned int index;
+ int ret;
+
+ if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
+ return -ENODEV;
+
+ ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+ if (ret) {
+ DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+ return ret;
+ }
+
+ intel_logical_ring_emit(ringbuf,
+ MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+
+ for (index = 0; index < table->size; index++) {
+ intel_logical_ring_emit(ringbuf, reg_base + index * 4);
+ intel_logical_ring_emit(ringbuf,
+ table->table[index].control_value);
+ }
+
+ /*
+ * Ok, now set the unused entries to uncached. These entries
+ * are officially undefined and no contract for the contents
+ * and settings is given for these entries.
+ *
+ * Entry 0 in the table is uncached - so we are just writing
+ * that value to all the used entries.
+ */
+ for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
+ intel_logical_ring_emit(ringbuf, reg_base + index * 4);
+ intel_logical_ring_emit(ringbuf, table->table[0].control_value);
+ }
+
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
+ intel_logical_ring_advance(ringbuf);
+
+ return 0;
+}
+
+/**
+ * emit_mocs_l3cc_table() - emit the mocs control table
+ * @req: Request to set up the MOCS table for.
+ * @table: The values to program into the control regs.
+ *
+ * This function simply emits a MI_LOAD_REGISTER_IMM command for the
+ * given table starting at the given address. This register set is
+ * programmed in pairs.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
+ const struct drm_i915_mocs_table *table)
+{
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
+ unsigned int count;
+ unsigned int i;
+ u32 value;
+ u32 filler = (table->table[0].l3cc_value & 0xffff) |
+ ((table->table[0].l3cc_value & 0xffff) << 16);
+ int ret;
+
+ if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
+ return -ENODEV;
+
+ ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+ if (ret) {
+ DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+ return ret;
+ }
+
+ intel_logical_ring_emit(ringbuf,
+ MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
+
+ for (i = 0, count = 0; i < table->size / 2; i++, count += 2) {
+ value = (table->table[count].l3cc_value & 0xffff) |
+ ((table->table[count + 1].l3cc_value & 0xffff) << 16);
+
+ intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
+ intel_logical_ring_emit(ringbuf, value);
+ }
+
+ if (table->size & 0x01) {
+ /* Odd table size - 1 left over */
+ value = (table->table[count].l3cc_value & 0xffff) |
+ ((table->table[0].l3cc_value & 0xffff) << 16);
+ } else
+ value = filler;
+
+ /*
+ * Now set the rest of the table to uncached - use entry 0 as
+ * this will be uncached. Leave the last pair uninitialised as
+ * they are reserved by the hardware.
+ */
+ for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
+ intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
+ intel_logical_ring_emit(ringbuf, value);
+
+ value = filler;
+ }
+
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
+ intel_logical_ring_advance(ringbuf);
+
+ return 0;
+}
+
+/**
+ * intel_rcs_context_init_mocs() - program the MOCS register.
+ * @req: Request to set up the MOCS tables for.
+ *
+ * This function will emit a batch buffer with the values required for
+ * programming the MOCS register values for all the currently supported
+ * rings.
+ *
+ * These registers are partially stored in the RCS context, so they are
+ * emitted at the same time so that when a context is created these registers
+ * are set up. These registers have to be emitted into the start of the
+ * context as setting the ELSP will re-init some of these registers back
+ * to the hw values.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
+{
+ struct drm_i915_mocs_table t;
+ int ret;
+
+ if (get_mocs_settings(req->ring->dev, &t)) {
+ /* Program the control registers */
+ ret = emit_mocs_control_table(req, &t, GEN9_GFX_MOCS_0);
+ if (ret)
+ return ret;
+
+ ret = emit_mocs_control_table(req, &t, GEN9_MFX0_MOCS_0);
+ if (ret)
+ return ret;
+
+ ret = emit_mocs_control_table(req, &t, GEN9_MFX1_MOCS_0);
+ if (ret)
+ return ret;
+
+ ret = emit_mocs_control_table(req, &t, GEN9_VEBOX_MOCS_0);
+ if (ret)
+ return ret;
+
+ ret = emit_mocs_control_table(req, &t, GEN9_BLT_MOCS_0);
+ if (ret)
+ return ret;
+
+ /* Now program the l3cc registers */
+ ret = emit_mocs_l3cc_table(req, &t);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
new file mode 100644
index 000000000000..76e45b1748b3
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_mocs.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef INTEL_MOCS_H
+#define INTEL_MOCS_H
+
+/**
+ * DOC: Memory Objects Control State (MOCS)
+ *
+ * Motivation:
+ * In previous Gens the MOCS settings was a value that was set by user land as
+ * part of the batch. In Gen9 this has changed to be a single table (per ring)
+ * that all batches now reference by index instead of programming the MOCS
+ * directly.
+ *
+ * The one wrinkle in this is that only PART of the MOCS tables are included
+ * in context (The GFX_MOCS_0 - GFX_MOCS_64 and the LNCFCMOCS0 - LNCFCMOCS32
+ * registers). The rest are not (the settings for the other rings).
+ *
+ * This table needs to be set at system start-up because the way the table
+ * interacts with the contexts and the GmmLib interface.
+ *
+ *
+ * Implementation:
+ *
+ * The tables (one per supported platform) are defined in intel_mocs.c
+ * and are programmed in the first batch after the context is loaded
+ * (with the hardware workarounds). This will then let the usual
+ * context handling keep the MOCS in step.
+ */
+
+#include <drm/drmP.h>
+#include "i915_drv.h"
+
+int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 481337436f72..cb1c65739425 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -25,8 +25,6 @@
*
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/acpi.h>
#include <acpi/video.h>
@@ -53,6 +51,7 @@
#define MBOX_ACPI (1<<0)
#define MBOX_SWSCI (1<<1)
#define MBOX_ASLE (1<<2)
+#define MBOX_ASLE_EXT (1<<4)
struct opregion_header {
u8 signature[16];
@@ -62,7 +61,10 @@ struct opregion_header {
u8 vbios_ver[16];
u8 driver_ver[16];
u32 mboxes;
- u8 reserved[164];
+ u32 driver_model;
+ u32 pcon;
+ u8 dver[32];
+ u8 rsvd[124];
} __packed;
/* OpRegion mailbox #1: public ACPI methods */
@@ -84,7 +86,9 @@ struct opregion_acpi {
u32 evts; /* ASL supported events */
u32 cnot; /* current OS notification */
u32 nrdy; /* driver status */
- u8 rsvd2[60];
+ u32 did2[7]; /* extended supported display devices ID list */
+ u32 cpd2[7]; /* extended attached display devices list */
+ u8 rsvd2[4];
} __packed;
/* OpRegion mailbox #2: SWSCI */
@@ -113,7 +117,10 @@ struct opregion_asle {
u32 pcft; /* power conservation features */
u32 srot; /* supported rotation angles */
u32 iuer; /* IUER events */
- u8 rsvd[86];
+ u64 fdss;
+ u32 fdsp;
+ u32 stat;
+ u8 rsvd[70];
} __packed;
/* Driver readiness indicator */
@@ -611,6 +618,38 @@ static struct notifier_block intel_opregion_notifier = {
* (version 3)
*/
+static u32 get_did(struct intel_opregion *opregion, int i)
+{
+ u32 did;
+
+ if (i < ARRAY_SIZE(opregion->acpi->didl)) {
+ did = ioread32(&opregion->acpi->didl[i]);
+ } else {
+ i -= ARRAY_SIZE(opregion->acpi->didl);
+
+ if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
+ return 0;
+
+ did = ioread32(&opregion->acpi->did2[i]);
+ }
+
+ return did;
+}
+
+static void set_did(struct intel_opregion *opregion, int i, u32 val)
+{
+ if (i < ARRAY_SIZE(opregion->acpi->didl)) {
+ iowrite32(val, &opregion->acpi->didl[i]);
+ } else {
+ i -= ARRAY_SIZE(opregion->acpi->didl);
+
+ if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
+ return;
+
+ iowrite32(val, &opregion->acpi->did2[i]);
+ }
+}
+
static void intel_didl_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -620,7 +659,7 @@ static void intel_didl_outputs(struct drm_device *dev)
struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
unsigned long long device_id;
acpi_status status;
- u32 temp;
+ u32 temp, max_outputs;
int i = 0;
handle = ACPI_HANDLE(&dev->pdev->dev);
@@ -639,41 +678,50 @@ static void intel_didl_outputs(struct drm_device *dev)
}
if (!acpi_video_bus) {
- pr_warn("No ACPI video bus found\n");
+ DRM_ERROR("No ACPI video bus found\n");
return;
}
+ /*
+ * In theory, did2, the extended didl, gets added at opregion version
+ * 3.0. In practice, however, we're supposed to set it for earlier
+ * versions as well, since a BIOS that doesn't understand did2 should
+ * not look at it anyway. Use a variable so we can tweak this if a need
+ * arises later.
+ */
+ max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
+ ARRAY_SIZE(opregion->acpi->did2);
+
list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
- if (i >= 8) {
- dev_dbg(&dev->pdev->dev,
- "More than 8 outputs detected via ACPI\n");
+ if (i >= max_outputs) {
+ DRM_DEBUG_KMS("More than %u outputs detected via ACPI\n",
+ max_outputs);
return;
}
- status =
- acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
- NULL, &device_id);
+ status = acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
+ NULL, &device_id);
if (ACPI_SUCCESS(status)) {
if (!device_id)
goto blind_set;
- iowrite32((u32)(device_id & 0x0f0f),
- &opregion->acpi->didl[i]);
- i++;
+ set_did(opregion, i++, (u32)(device_id & 0x0f0f));
}
}
end:
- /* If fewer than 8 outputs, the list must be null terminated */
- if (i < 8)
- iowrite32(0, &opregion->acpi->didl[i]);
+ DRM_DEBUG_KMS("%d outputs detected\n", i);
+
+ /* If fewer than max outputs, the list must be null terminated */
+ if (i < max_outputs)
+ set_did(opregion, i, 0);
return;
blind_set:
i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
int output_type = ACPI_OTHER_OUTPUT;
- if (i >= 8) {
- dev_dbg(&dev->pdev->dev,
- "More than 8 outputs in connector list\n");
+ if (i >= max_outputs) {
+ DRM_DEBUG_KMS("More than %u outputs in connector list\n",
+ max_outputs);
return;
}
switch (connector->connector_type) {
@@ -698,9 +746,8 @@ blind_set:
output_type = ACPI_LVDS_OUTPUT;
break;
}
- temp = ioread32(&opregion->acpi->didl[i]);
- iowrite32(temp | (1<<31) | output_type | i,
- &opregion->acpi->didl[i]);
+ temp = get_did(opregion, i);
+ set_did(opregion, i, temp | (1 << 31) | output_type | i);
i++;
}
goto end;
@@ -720,7 +767,7 @@ static void intel_setup_cadls(struct drm_device *dev)
* display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
* there are less than eight devices. */
do {
- disp_id = ioread32(&opregion->acpi->didl[i]);
+ disp_id = get_did(opregion, i);
iowrite32(disp_id, &opregion->acpi->cadl[i]);
} while (++i < 8 && disp_id != 0);
}
@@ -852,6 +899,11 @@ int intel_opregion_setup(struct drm_device *dev)
char buf[sizeof(OPREGION_SIGNATURE)];
int err = 0;
+ BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100);
+ BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100);
+ BUILD_BUG_ON(sizeof(struct opregion_swsci) != 0x100);
+ BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
+
pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
if (asls == 0) {
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 25c8ec697da1..444542696a2c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -210,19 +210,14 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
}
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+ struct drm_i915_gem_request *req,
void (*tail)(struct intel_overlay *))
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
WARN_ON(overlay->last_flip_req);
- i915_gem_request_assign(&overlay->last_flip_req,
- ring->outstanding_lazy_request);
- ret = i915_add_request(ring);
- if (ret)
- return ret;
+ i915_gem_request_assign(&overlay->last_flip_req, req);
+ i915_add_request(req);
overlay->flip_tail = tail;
ret = i915_wait_request(overlay->last_flip_req);
@@ -239,15 +234,22 @@ static int intel_overlay_on(struct intel_overlay *overlay)
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct drm_i915_gem_request *req;
int ret;
WARN_ON(overlay->active);
WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
- ret = intel_ring_begin(ring, 4);
+ ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret)
return ret;
+ ret = intel_ring_begin(req, 4);
+ if (ret) {
+ i915_gem_request_cancel(req);
+ return ret;
+ }
+
overlay->active = true;
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
@@ -256,7 +258,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
- return intel_overlay_do_wait_request(overlay, NULL);
+ return intel_overlay_do_wait_request(overlay, req, NULL);
}
/* overlay needs to be enabled in OCMD reg */
@@ -266,6 +268,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
int ret;
@@ -280,18 +283,25 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- ret = intel_ring_begin(ring, 2);
+ ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret)
return ret;
+ ret = intel_ring_begin(req, 2);
+ if (ret) {
+ i915_gem_request_cancel(req);
+ return ret;
+ }
+
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_advance(ring);
WARN_ON(overlay->last_flip_req);
- i915_gem_request_assign(&overlay->last_flip_req,
- ring->outstanding_lazy_request);
- return i915_add_request(ring);
+ i915_gem_request_assign(&overlay->last_flip_req, req);
+ i915_add_request(req);
+
+ return 0;
}
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
@@ -327,6 +337,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr;
int ret;
@@ -338,10 +349,16 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- ret = intel_ring_begin(ring, 6);
+ ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret)
return ret;
+ ret = intel_ring_begin(req, 6);
+ if (ret) {
+ i915_gem_request_cancel(req);
+ return ret;
+ }
+
/* wait for overlay to go idle */
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
@@ -360,7 +377,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
}
intel_ring_advance(ring);
- return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
+ return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
}
/* recover from an interruption due to a signal
@@ -404,15 +421,23 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
/* synchronous slowpath */
- ret = intel_ring_begin(ring, 2);
+ struct drm_i915_gem_request *req;
+
+ ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret)
return ret;
+ ret = intel_ring_begin(req, 2);
+ if (ret) {
+ i915_gem_request_cancel(req);
+ return ret;
+ }
+
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
- ret = intel_overlay_do_wait_request(overlay,
+ ret = intel_overlay_do_wait_request(overlay, req,
intel_overlay_release_old_vid_tail);
if (ret)
return ret;
@@ -724,7 +749,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
- ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL,
+ ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL, NULL,
&i915_ggtt_view_normal);
if (ret != 0)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 55aad2322e10..e2ab3f6ed022 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -32,8 +32,11 @@
#include <linux/kernel.h>
#include <linux/moduleparam.h>
+#include <linux/pwm.h>
#include "intel_drv.h"
+#define CRC_PMIC_PWM_PERIOD_NS 21333
+
void
intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
@@ -544,6 +547,15 @@ static u32 bxt_get_backlight(struct intel_connector *connector)
return I915_READ(BXT_BLC_PWM_DUTY1);
}
+static u32 pwm_get_backlight(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+ int duty_ns;
+
+ duty_ns = pwm_get_duty_cycle(panel->backlight.pwm);
+ return DIV_ROUND_UP(duty_ns * 100, CRC_PMIC_PWM_PERIOD_NS);
+}
+
static u32 intel_panel_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
@@ -632,6 +644,14 @@ static void bxt_set_backlight(struct intel_connector *connector, u32 level)
I915_WRITE(BXT_BLC_PWM_DUTY1, level);
}
+static void pwm_set_backlight(struct intel_connector *connector, u32 level)
+{
+ struct intel_panel *panel = &connector->panel;
+ int duty_ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100);
+
+ pwm_config(panel->backlight.pwm, duty_ns, CRC_PMIC_PWM_PERIOD_NS);
+}
+
static void
intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
{
@@ -769,6 +789,16 @@ static void bxt_disable_backlight(struct intel_connector *connector)
I915_WRITE(BXT_BLC_PWM_CTL1, tmp & ~BXT_BLC_PWM_ENABLE);
}
+static void pwm_disable_backlight(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ /* Disable the backlight */
+ pwm_config(panel->backlight.pwm, 0, CRC_PMIC_PWM_PERIOD_NS);
+ usleep_range(2000, 3000);
+ pwm_disable(panel->backlight.pwm);
+}
+
void intel_panel_disable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
@@ -1010,6 +1040,14 @@ static void bxt_enable_backlight(struct intel_connector *connector)
I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl | BXT_BLC_PWM_ENABLE);
}
+static void pwm_enable_backlight(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ pwm_enable(panel->backlight.pwm);
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
+}
+
void intel_panel_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
@@ -1386,6 +1424,40 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
return 0;
}
+static int pwm_setup_backlight(struct intel_connector *connector,
+ enum pipe pipe)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct intel_panel *panel = &connector->panel;
+ int retval;
+
+ /* Get the PWM chip for backlight control */
+ panel->backlight.pwm = pwm_get(dev->dev, "pwm_backlight");
+ if (IS_ERR(panel->backlight.pwm)) {
+ DRM_ERROR("Failed to own the pwm chip\n");
+ panel->backlight.pwm = NULL;
+ return -ENODEV;
+ }
+
+ retval = pwm_config(panel->backlight.pwm, CRC_PMIC_PWM_PERIOD_NS,
+ CRC_PMIC_PWM_PERIOD_NS);
+ if (retval < 0) {
+ DRM_ERROR("Failed to configure the pwm chip\n");
+ pwm_put(panel->backlight.pwm);
+ panel->backlight.pwm = NULL;
+ return retval;
+ }
+
+ panel->backlight.min = 0; /* 0% */
+ panel->backlight.max = 100; /* 100% */
+ panel->backlight.level = DIV_ROUND_UP(
+ pwm_get_duty_cycle(panel->backlight.pwm) * 100,
+ CRC_PMIC_PWM_PERIOD_NS);
+ panel->backlight.enabled = panel->backlight.level != 0;
+
+ return 0;
+}
+
int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
{
struct drm_device *dev = connector->dev;
@@ -1429,6 +1501,10 @@ void intel_panel_destroy_backlight(struct drm_connector *connector)
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_panel *panel = &intel_connector->panel;
+ /* dispose of the pwm */
+ if (panel->backlight.pwm)
+ pwm_put(panel->backlight.pwm);
+
panel->backlight.present = false;
}
@@ -1456,11 +1532,19 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
dev_priv->display.set_backlight = pch_set_backlight;
dev_priv->display.get_backlight = pch_get_backlight;
} else if (IS_VALLEYVIEW(dev)) {
- dev_priv->display.setup_backlight = vlv_setup_backlight;
- dev_priv->display.enable_backlight = vlv_enable_backlight;
- dev_priv->display.disable_backlight = vlv_disable_backlight;
- dev_priv->display.set_backlight = vlv_set_backlight;
- dev_priv->display.get_backlight = vlv_get_backlight;
+ if (dev_priv->vbt.has_mipi) {
+ dev_priv->display.setup_backlight = pwm_setup_backlight;
+ dev_priv->display.enable_backlight = pwm_enable_backlight;
+ dev_priv->display.disable_backlight = pwm_disable_backlight;
+ dev_priv->display.set_backlight = pwm_set_backlight;
+ dev_priv->display.get_backlight = pwm_get_backlight;
+ } else {
+ dev_priv->display.setup_backlight = vlv_setup_backlight;
+ dev_priv->display.enable_backlight = vlv_enable_backlight;
+ dev_priv->display.disable_backlight = vlv_disable_backlight;
+ dev_priv->display.set_backlight = vlv_set_backlight;
+ dev_priv->display.get_backlight = vlv_get_backlight;
+ }
} else if (IS_GEN4(dev)) {
dev_priv->display.setup_backlight = i965_setup_backlight;
dev_priv->display.enable_backlight = i965_enable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index eadc15cddbeb..ddbb7ed0a193 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -59,6 +59,10 @@ static void gen9_init_clock_gating(struct drm_device *dev)
/* WaEnableLbsSlaRetryTimerDecrement:skl */
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+ /* WaDisableKillLogic:bxt,skl */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ ECOCHK_DIS_TLB);
}
static void skl_init_clock_gating(struct drm_device *dev)
@@ -91,10 +95,19 @@ static void skl_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
}
+ /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
+ * involving this register should also be added to WA batch as required.
+ */
if (INTEL_REVID(dev) <= SKL_REVID_E0)
/* WaDisableLSQCROPERFforOCL:skl */
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_RO_PERF_DIS);
+
+ /* WaEnableGapsTsvCreditFix:skl */
+ if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) {
+ I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+ GEN9_GAPS_TSV_CREDIT_DISABLE));
+ }
}
static void bxt_init_clock_gating(struct drm_device *dev)
@@ -334,22 +347,26 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
if (IS_VALLEYVIEW(dev)) {
I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
- if (IS_CHERRYVIEW(dev))
- chv_set_memory_pm5(dev_priv, enable);
+ POSTING_READ(FW_BLC_SELF_VLV);
+ dev_priv->wm.vlv.cxsr = enable;
} else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+ POSTING_READ(FW_BLC_SELF);
} else if (IS_PINEVIEW(dev)) {
val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
I915_WRITE(DSPFW3, val);
+ POSTING_READ(DSPFW3);
} else if (IS_I945G(dev) || IS_I945GM(dev)) {
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
I915_WRITE(FW_BLC_SELF, val);
+ POSTING_READ(FW_BLC_SELF);
} else if (IS_I915GM(dev)) {
val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
_MASKED_BIT_DISABLE(INSTPM_SELF_EN);
I915_WRITE(INSTPM, val);
+ POSTING_READ(INSTPM);
} else {
return;
}
@@ -923,223 +940,480 @@ static void vlv_write_wm_values(struct intel_crtc *crtc,
FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
}
- POSTING_READ(DSPFW1);
+ /* zero (unused) WM1 watermarks */
+ I915_WRITE(DSPFW4, 0);
+ I915_WRITE(DSPFW5, 0);
+ I915_WRITE(DSPFW6, 0);
+ I915_WRITE(DSPHOWM1, 0);
- dev_priv->wm.vlv = *wm;
+ POSTING_READ(DSPFW1);
}
#undef FW_WM_VLV
-static uint8_t vlv_compute_drain_latency(struct drm_crtc *crtc,
- struct drm_plane *plane)
+enum vlv_wm_level {
+ VLV_WM_LEVEL_PM2,
+ VLV_WM_LEVEL_PM5,
+ VLV_WM_LEVEL_DDR_DVFS,
+};
+
+/* latency must be in 0.1us units. */
+static unsigned int vlv_wm_method2(unsigned int pixel_rate,
+ unsigned int pipe_htotal,
+ unsigned int horiz_pixels,
+ unsigned int bytes_per_pixel,
+ unsigned int latency)
{
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int entries, prec_mult, drain_latency, pixel_size;
- int clock = intel_crtc->config->base.adjusted_mode.crtc_clock;
- const int high_precision = IS_CHERRYVIEW(dev) ? 16 : 64;
+ unsigned int ret;
- /*
- * FIXME the plane might have an fb
- * but be invisible (eg. due to clipping)
- */
- if (!intel_crtc->active || !plane->state->fb)
- return 0;
+ ret = (latency * pixel_rate) / (pipe_htotal * 10000);
+ ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
+ ret = DIV_ROUND_UP(ret, 64);
- if (WARN(clock == 0, "Pixel clock is zero!\n"))
- return 0;
+ return ret;
+}
- pixel_size = drm_format_plane_cpp(plane->state->fb->pixel_format, 0);
+static void vlv_setup_wm_latency(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
- if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
- return 0;
+ /* all latencies in usec */
+ dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
- entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
+ dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
- prec_mult = high_precision;
- drain_latency = 64 * prec_mult * 4 / entries;
+ if (IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
+ dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
- if (drain_latency > DRAIN_LATENCY_MASK) {
- prec_mult /= 2;
- drain_latency = 64 * prec_mult * 4 / entries;
+ dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
}
-
- if (drain_latency > DRAIN_LATENCY_MASK)
- drain_latency = DRAIN_LATENCY_MASK;
-
- return drain_latency | (prec_mult == high_precision ?
- DDL_PRECISION_HIGH : DDL_PRECISION_LOW);
}
-static int vlv_compute_wm(struct intel_crtc *crtc,
- struct intel_plane *plane,
- int fifo_size)
+static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
+ struct intel_crtc *crtc,
+ const struct intel_plane_state *state,
+ int level)
{
- int clock, entries, pixel_size;
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ int clock, htotal, pixel_size, width, wm;
- /*
- * FIXME the plane might have an fb
- * but be invisible (eg. due to clipping)
- */
- if (!crtc->active || !plane->base.state->fb)
+ if (dev_priv->wm.pri_latency[level] == 0)
+ return USHRT_MAX;
+
+ if (!state->visible)
return 0;
- pixel_size = drm_format_plane_cpp(plane->base.state->fb->pixel_format, 0);
+ pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
clock = crtc->config->base.adjusted_mode.crtc_clock;
+ htotal = crtc->config->base.adjusted_mode.crtc_htotal;
+ width = crtc->config->pipe_src_w;
+ if (WARN_ON(htotal == 0))
+ htotal = 1;
- entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
+ if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+ /*
+ * FIXME the formula gives values that are
+ * too big for the cursor FIFO, and hence we
+ * would never be able to use cursors. For
+ * now just hardcode the watermark.
+ */
+ wm = 63;
+ } else {
+ wm = vlv_wm_method2(clock, htotal, width, pixel_size,
+ dev_priv->wm.pri_latency[level] * 10);
+ }
- /*
- * Set up the watermark such that we don't start issuing memory
- * requests until we are within PND's max deadline value (256us).
- * Idea being to be idle as long as possible while still taking
- * advatange of PND's deadline scheduling. The limit of 8
- * cachelines (used when the FIFO will anyway drain in less time
- * than 256us) should match what we would be done if trickle
- * feed were enabled.
- */
- return fifo_size - clamp(DIV_ROUND_UP(256 * entries, 64), 0, fifo_size - 8);
+ return min_t(int, wm, USHRT_MAX);
}
-static bool vlv_compute_sr_wm(struct drm_device *dev,
- struct vlv_wm_values *wm)
+static void vlv_compute_fifo(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc;
- enum pipe pipe = INVALID_PIPE;
- int num_planes = 0;
- int fifo_size = 0;
+ struct drm_device *dev = crtc->base.dev;
+ struct vlv_wm_state *wm_state = &crtc->wm_state;
struct intel_plane *plane;
+ unsigned int total_rate = 0;
+ const int fifo_size = 512 - 1;
+ int fifo_extra, fifo_left = fifo_size;
- wm->sr.cursor = wm->sr.plane = 0;
+ for_each_intel_plane_on_crtc(dev, crtc, plane) {
+ struct intel_plane_state *state =
+ to_intel_plane_state(plane->base.state);
- crtc = single_enabled_crtc(dev);
- /* maxfifo not supported on pipe C */
- if (crtc && to_intel_crtc(crtc)->pipe != PIPE_C) {
- pipe = to_intel_crtc(crtc)->pipe;
- num_planes = !!wm->pipe[pipe].primary +
- !!wm->pipe[pipe].sprite[0] +
- !!wm->pipe[pipe].sprite[1];
- fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
+ if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ if (state->visible) {
+ wm_state->num_active_planes++;
+ total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+ }
}
- if (fifo_size == 0 || num_planes > 1)
- return false;
+ for_each_intel_plane_on_crtc(dev, crtc, plane) {
+ struct intel_plane_state *state =
+ to_intel_plane_state(plane->base.state);
+ unsigned int rate;
+
+ if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+ plane->wm.fifo_size = 63;
+ continue;
+ }
+
+ if (!state->visible) {
+ plane->wm.fifo_size = 0;
+ continue;
+ }
+
+ rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+ plane->wm.fifo_size = fifo_size * rate / total_rate;
+ fifo_left -= plane->wm.fifo_size;
+ }
- wm->sr.cursor = vlv_compute_wm(to_intel_crtc(crtc),
- to_intel_plane(crtc->cursor), 0x3f);
+ fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
+
+ /* spread the remainder evenly */
+ for_each_intel_plane_on_crtc(dev, crtc, plane) {
+ int plane_extra;
+
+ if (fifo_left == 0)
+ break;
- list_for_each_entry(plane, &dev->mode_config.plane_list, base.head) {
if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
continue;
- if (plane->pipe != pipe)
+ /* give it all to the first plane if none are active */
+ if (plane->wm.fifo_size == 0 &&
+ wm_state->num_active_planes)
+ continue;
+
+ plane_extra = min(fifo_extra, fifo_left);
+ plane->wm.fifo_size += plane_extra;
+ fifo_left -= plane_extra;
+ }
+
+ WARN_ON(fifo_left != 0);
+}
+
+static void vlv_invert_wms(struct intel_crtc *crtc)
+{
+ struct vlv_wm_state *wm_state = &crtc->wm_state;
+ int level;
+
+ for (level = 0; level < wm_state->num_levels; level++) {
+ struct drm_device *dev = crtc->base.dev;
+ const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+ struct intel_plane *plane;
+
+ wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
+ wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
+
+ for_each_intel_plane_on_crtc(dev, crtc, plane) {
+ switch (plane->base.type) {
+ int sprite;
+ case DRM_PLANE_TYPE_CURSOR:
+ wm_state->wm[level].cursor = plane->wm.fifo_size -
+ wm_state->wm[level].cursor;
+ break;
+ case DRM_PLANE_TYPE_PRIMARY:
+ wm_state->wm[level].primary = plane->wm.fifo_size -
+ wm_state->wm[level].primary;
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ sprite = plane->plane;
+ wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
+ wm_state->wm[level].sprite[sprite];
+ break;
+ }
+ }
+ }
+}
+
+static void vlv_compute_wm(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct vlv_wm_state *wm_state = &crtc->wm_state;
+ struct intel_plane *plane;
+ int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+ int level;
+
+ memset(wm_state, 0, sizeof(*wm_state));
+
+ wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
+ wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
+
+ wm_state->num_active_planes = 0;
+
+ vlv_compute_fifo(crtc);
+
+ if (wm_state->num_active_planes != 1)
+ wm_state->cxsr = false;
+
+ if (wm_state->cxsr) {
+ for (level = 0; level < wm_state->num_levels; level++) {
+ wm_state->sr[level].plane = sr_fifo_size;
+ wm_state->sr[level].cursor = 63;
+ }
+ }
+
+ for_each_intel_plane_on_crtc(dev, crtc, plane) {
+ struct intel_plane_state *state =
+ to_intel_plane_state(plane->base.state);
+
+ if (!state->visible)
+ continue;
+
+ /* normal watermarks */
+ for (level = 0; level < wm_state->num_levels; level++) {
+ int wm = vlv_compute_wm_level(plane, crtc, state, level);
+ int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
+
+ /* hack */
+ if (WARN_ON(level == 0 && wm > max_wm))
+ wm = max_wm;
+
+ if (wm > plane->wm.fifo_size)
+ break;
+
+ switch (plane->base.type) {
+ int sprite;
+ case DRM_PLANE_TYPE_CURSOR:
+ wm_state->wm[level].cursor = wm;
+ break;
+ case DRM_PLANE_TYPE_PRIMARY:
+ wm_state->wm[level].primary = wm;
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ sprite = plane->plane;
+ wm_state->wm[level].sprite[sprite] = wm;
+ break;
+ }
+ }
+
+ wm_state->num_levels = level;
+
+ if (!wm_state->cxsr)
continue;
- wm->sr.plane = vlv_compute_wm(to_intel_crtc(crtc),
- plane, fifo_size);
- if (wm->sr.plane != 0)
+ /* maxfifo watermarks */
+ switch (plane->base.type) {
+ int sprite, level;
+ case DRM_PLANE_TYPE_CURSOR:
+ for (level = 0; level < wm_state->num_levels; level++)
+ wm_state->sr[level].cursor =
+ wm_state->sr[level].cursor;
+ break;
+ case DRM_PLANE_TYPE_PRIMARY:
+ for (level = 0; level < wm_state->num_levels; level++)
+ wm_state->sr[level].plane =
+ min(wm_state->sr[level].plane,
+ wm_state->wm[level].primary);
break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ sprite = plane->plane;
+ for (level = 0; level < wm_state->num_levels; level++)
+ wm_state->sr[level].plane =
+ min(wm_state->sr[level].plane,
+ wm_state->wm[level].sprite[sprite]);
+ break;
+ }
}
- return true;
+ /* clear any (partially) filled invalid levels */
+ for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
+ memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
+ memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
+ }
+
+ vlv_invert_wms(crtc);
}
-static void valleyview_update_wm(struct drm_crtc *crtc)
+#define VLV_FIFO(plane, value) \
+ (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
+
+static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
- bool cxsr_enabled;
- struct vlv_wm_values wm = dev_priv->wm.vlv;
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *plane;
+ int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
- wm.ddl[pipe].primary = vlv_compute_drain_latency(crtc, crtc->primary);
- wm.pipe[pipe].primary = vlv_compute_wm(intel_crtc,
- to_intel_plane(crtc->primary),
- vlv_get_fifo_size(dev, pipe, 0));
+ for_each_intel_plane_on_crtc(dev, crtc, plane) {
+ if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+ WARN_ON(plane->wm.fifo_size != 63);
+ continue;
+ }
- wm.ddl[pipe].cursor = vlv_compute_drain_latency(crtc, crtc->cursor);
- wm.pipe[pipe].cursor = vlv_compute_wm(intel_crtc,
- to_intel_plane(crtc->cursor),
- 0x3f);
+ if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+ sprite0_start = plane->wm.fifo_size;
+ else if (plane->plane == 0)
+ sprite1_start = sprite0_start + plane->wm.fifo_size;
+ else
+ fifo_size = sprite1_start + plane->wm.fifo_size;
+ }
- cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
+ WARN_ON(fifo_size != 512 - 1);
- if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
- return;
+ DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
+ pipe_name(crtc->pipe), sprite0_start,
+ sprite1_start, fifo_size);
- DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
- "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
- wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
- wm.sr.plane, wm.sr.cursor);
+ switch (crtc->pipe) {
+ uint32_t dsparb, dsparb2, dsparb3;
+ case PIPE_A:
+ dsparb = I915_READ(DSPARB);
+ dsparb2 = I915_READ(DSPARB2);
- /*
- * FIXME DDR DVFS introduces massive memory latencies which
- * are not known to system agent so any deadline specified
- * by the display may not be respected. To support DDR DVFS
- * the watermark code needs to be rewritten to essentially
- * bypass deadline mechanism and rely solely on the
- * watermarks. For now disable DDR DVFS.
- */
- if (IS_CHERRYVIEW(dev_priv))
- chv_set_memory_dvfs(dev_priv, false);
+ dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
+ VLV_FIFO(SPRITEB, 0xff));
+ dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
+ VLV_FIFO(SPRITEB, sprite1_start));
- if (!cxsr_enabled)
- intel_set_memory_cxsr(dev_priv, false);
+ dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
+ VLV_FIFO(SPRITEB_HI, 0x1));
+ dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
+ VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
- vlv_write_wm_values(intel_crtc, &wm);
+ I915_WRITE(DSPARB, dsparb);
+ I915_WRITE(DSPARB2, dsparb2);
+ break;
+ case PIPE_B:
+ dsparb = I915_READ(DSPARB);
+ dsparb2 = I915_READ(DSPARB2);
- if (cxsr_enabled)
- intel_set_memory_cxsr(dev_priv, true);
+ dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
+ VLV_FIFO(SPRITED, 0xff));
+ dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
+ VLV_FIFO(SPRITED, sprite1_start));
+
+ dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
+ VLV_FIFO(SPRITED_HI, 0xff));
+ dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
+ VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
+
+ I915_WRITE(DSPARB, dsparb);
+ I915_WRITE(DSPARB2, dsparb2);
+ break;
+ case PIPE_C:
+ dsparb3 = I915_READ(DSPARB3);
+ dsparb2 = I915_READ(DSPARB2);
+
+ dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
+ VLV_FIFO(SPRITEF, 0xff));
+ dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
+ VLV_FIFO(SPRITEF, sprite1_start));
+
+ dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
+ VLV_FIFO(SPRITEF_HI, 0xff));
+ dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
+ VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
+
+ I915_WRITE(DSPARB3, dsparb3);
+ I915_WRITE(DSPARB2, dsparb2);
+ break;
+ default:
+ break;
+ }
}
-static void valleyview_update_sprite_wm(struct drm_plane *plane,
- struct drm_crtc *crtc,
- uint32_t sprite_width,
- uint32_t sprite_height,
- int pixel_size,
- bool enabled, bool scaled)
+#undef VLV_FIFO
+
+static void vlv_merge_wm(struct drm_device *dev,
+ struct vlv_wm_values *wm)
+{
+ struct intel_crtc *crtc;
+ int num_active_crtcs = 0;
+
+ wm->level = to_i915(dev)->wm.max_level;
+ wm->cxsr = true;
+
+ for_each_intel_crtc(dev, crtc) {
+ const struct vlv_wm_state *wm_state = &crtc->wm_state;
+
+ if (!crtc->active)
+ continue;
+
+ if (!wm_state->cxsr)
+ wm->cxsr = false;
+
+ num_active_crtcs++;
+ wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
+ }
+
+ if (num_active_crtcs != 1)
+ wm->cxsr = false;
+
+ if (num_active_crtcs > 1)
+ wm->level = VLV_WM_LEVEL_PM2;
+
+ for_each_intel_crtc(dev, crtc) {
+ struct vlv_wm_state *wm_state = &crtc->wm_state;
+ enum pipe pipe = crtc->pipe;
+
+ if (!crtc->active)
+ continue;
+
+ wm->pipe[pipe] = wm_state->wm[wm->level];
+ if (wm->cxsr)
+ wm->sr = wm_state->sr[wm->level];
+
+ wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
+ }
+}
+
+static void vlv_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
- int sprite = to_intel_plane(plane)->plane;
- bool cxsr_enabled;
- struct vlv_wm_values wm = dev_priv->wm.vlv;
+ struct vlv_wm_values wm = {};
- if (enabled) {
- wm.ddl[pipe].sprite[sprite] =
- vlv_compute_drain_latency(crtc, plane);
+ vlv_compute_wm(intel_crtc);
+ vlv_merge_wm(dev, &wm);
- wm.pipe[pipe].sprite[sprite] =
- vlv_compute_wm(intel_crtc,
- to_intel_plane(plane),
- vlv_get_fifo_size(dev, pipe, sprite+1));
- } else {
- wm.ddl[pipe].sprite[sprite] = 0;
- wm.pipe[pipe].sprite[sprite] = 0;
+ if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
+ /* FIXME should be part of crtc atomic commit */
+ vlv_pipe_set_fifo_size(intel_crtc);
+ return;
}
- cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
-
- if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
- return;
+ if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
+ dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
+ chv_set_memory_dvfs(dev_priv, false);
- DRM_DEBUG_KMS("Setting FIFO watermarks - %c: sprite %c=%d, "
- "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
- sprite_name(pipe, sprite),
- wm.pipe[pipe].sprite[sprite],
- wm.sr.plane, wm.sr.cursor);
+ if (wm.level < VLV_WM_LEVEL_PM5 &&
+ dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
+ chv_set_memory_pm5(dev_priv, false);
- if (!cxsr_enabled)
+ if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
intel_set_memory_cxsr(dev_priv, false);
+ /* FIXME should be part of crtc atomic commit */
+ vlv_pipe_set_fifo_size(intel_crtc);
+
vlv_write_wm_values(intel_crtc, &wm);
- if (cxsr_enabled)
+ DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
+ "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
+ pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
+ wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
+ wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
+
+ if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
intel_set_memory_cxsr(dev_priv, true);
+
+ if (wm.level >= VLV_WM_LEVEL_PM5 &&
+ dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
+ chv_set_memory_pm5(dev_priv, true);
+
+ if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
+ dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
+ chv_set_memory_dvfs(dev_priv, true);
+
+ dev_priv->wm.vlv = wm;
}
#define single_plane_enabled(mask) is_power_of_2(mask)
@@ -1434,23 +1708,22 @@ static void i845_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(FW_BLC, fwater_lo);
}
-static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
- struct drm_crtc *crtc)
+uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pixel_rate;
- pixel_rate = intel_crtc->config->base.adjusted_mode.crtc_clock;
+ pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
* adjust the pixel_rate here. */
- if (intel_crtc->config->pch_pfit.enabled) {
+ if (pipe_config->pch_pfit.enabled) {
uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
- uint32_t pfit_size = intel_crtc->config->pch_pfit.size;
+ uint32_t pfit_size = pipe_config->pch_pfit.size;
+
+ pipe_w = pipe_config->pipe_src_w;
+ pipe_h = pipe_config->pipe_src_h;
- pipe_w = intel_crtc->config->pipe_src_w;
- pipe_h = intel_crtc->config->pipe_src_h;
pfit_w = (pfit_size >> 16) & 0xFFFF;
pfit_h = pfit_size & 0xFFFF;
if (pipe_w < pfit_w)
@@ -1815,7 +2088,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
mode->crtc_clock);
ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
- dev_priv->display.get_display_clock_speed(dev_priv->dev));
+ dev_priv->cdclk_freq);
return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
PIPE_WM_LINETIME_TIME(linetime);
@@ -2066,7 +2339,7 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
p->active = true;
p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
- p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
+ p->pixel_rate = ilk_pipe_pixel_rate(intel_crtc->config);
if (crtc->primary->state->fb)
p->pri.bytes_per_pixel =
@@ -2085,7 +2358,7 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
- drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
+ drm_for_each_legacy_plane(plane, dev) {
struct intel_plane *intel_plane = to_intel_plane(plane);
if (intel_plane->pipe == pipe) {
@@ -2215,6 +2488,7 @@ static void ilk_wm_merge(struct drm_device *dev,
const struct ilk_wm_maximums *max,
struct intel_pipe_wm *merged)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
int level, max_level = ilk_wm_max_level(dev);
int last_enabled_level = max_level;
@@ -2255,7 +2529,8 @@ static void ilk_wm_merge(struct drm_device *dev,
* What we should check here is whether FBC can be
* enabled sometime later.
*/
- if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
+ if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
+ intel_fbc_enabled(dev_priv)) {
for (level = 2; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@@ -3043,8 +3318,10 @@ skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
if (!to_intel_crtc(crtc)->active)
return 0;
- return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
+ if (WARN_ON(p->pixel_rate == 0))
+ return 0;
+ return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
}
static void skl_compute_transition_wm(struct drm_crtc *crtc,
@@ -3685,6 +3962,159 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
}
}
+#define _FW_WM(value, plane) \
+ (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
+#define _FW_WM_VLV(value, plane) \
+ (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
+
+static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
+ struct vlv_wm_values *wm)
+{
+ enum pipe pipe;
+ uint32_t tmp;
+
+ for_each_pipe(dev_priv, pipe) {
+ tmp = I915_READ(VLV_DDL(pipe));
+
+ wm->ddl[pipe].primary =
+ (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+ wm->ddl[pipe].cursor =
+ (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+ wm->ddl[pipe].sprite[0] =
+ (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+ wm->ddl[pipe].sprite[1] =
+ (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+ }
+
+ tmp = I915_READ(DSPFW1);
+ wm->sr.plane = _FW_WM(tmp, SR);
+ wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
+ wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
+ wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
+
+ tmp = I915_READ(DSPFW2);
+ wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
+ wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
+ wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
+
+ tmp = I915_READ(DSPFW3);
+ wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ tmp = I915_READ(DSPFW7_CHV);
+ wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
+ wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
+
+ tmp = I915_READ(DSPFW8_CHV);
+ wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
+ wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
+
+ tmp = I915_READ(DSPFW9_CHV);
+ wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
+ wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
+
+ tmp = I915_READ(DSPHOWM);
+ wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
+ wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
+ wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
+ wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
+ wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+ wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+ wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
+ wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+ wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+ wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
+ } else {
+ tmp = I915_READ(DSPFW7);
+ wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
+ wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
+
+ tmp = I915_READ(DSPHOWM);
+ wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
+ wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+ wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+ wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
+ wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+ wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+ wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
+ }
+}
+
+#undef _FW_WM
+#undef _FW_WM_VLV
+
+void vlv_wm_get_hw_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct vlv_wm_values *wm = &dev_priv->wm.vlv;
+ struct intel_plane *plane;
+ enum pipe pipe;
+ u32 val;
+
+ vlv_read_wm_values(dev_priv, wm);
+
+ for_each_intel_plane(dev, plane) {
+ switch (plane->base.type) {
+ int sprite;
+ case DRM_PLANE_TYPE_CURSOR:
+ plane->wm.fifo_size = 63;
+ break;
+ case DRM_PLANE_TYPE_PRIMARY:
+ plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ sprite = plane->plane;
+ plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
+ break;
+ }
+ }
+
+ wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ wm->level = VLV_WM_LEVEL_PM2;
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ if (val & DSP_MAXFIFO_PM5_ENABLE)
+ wm->level = VLV_WM_LEVEL_PM5;
+
+ /*
+ * If DDR DVFS is disabled in the BIOS, Punit
+ * will never ack the request. So if that happens
+ * assume we don't have to enable/disable DDR DVFS
+ * dynamically. To test that just set the REQ_ACK
+ * bit to poke the Punit, but don't change the
+ * HIGH/LOW bits so that we don't actually change
+ * the current state.
+ */
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ val |= FORCE_DDR_FREQ_REQ_ACK;
+ vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+ FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
+ DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
+ "assuming DDR DVFS is disabled\n");
+ dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
+ } else {
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ if ((val & FORCE_DDR_HIGH_FREQ) == 0)
+ wm->level = VLV_WM_LEVEL_DDR_DVFS;
+ }
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ }
+
+ for_each_pipe(dev_priv, pipe)
+ DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
+ pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
+ wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
+
+ DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
+ wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
+}
+
void ilk_wm_get_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4083,14 +4513,14 @@ static void valleyview_set_rps(struct drm_device *dev, u8 val)
"Odd GPU freq value\n"))
val &= ~1;
+ I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+
if (val != dev_priv->rps.cur_freq) {
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
if (!IS_CHERRYVIEW(dev_priv))
gen6_set_rps_thresholds(dev_priv, val);
}
- I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
dev_priv->rps.cur_freq = val;
trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
}
@@ -4250,12 +4680,8 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
{
- /* No RC6 before Ironlake */
- if (INTEL_INFO(dev)->gen < 5)
- return 0;
-
- /* RC6 is only on Ironlake mobile not on desktop */
- if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
+ /* No RC6 before Ironlake and code is gone for ilk. */
+ if (INTEL_INFO(dev)->gen < 6)
return 0;
/* Respect the kernel parameter if it is set */
@@ -4275,10 +4701,6 @@ static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
return enable_rc6 & mask;
}
- /* Disable RC6 on Ironlake */
- if (INTEL_INFO(dev)->gen == 5)
- return 0;
-
if (IS_IVYBRIDGE(dev))
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
@@ -4297,25 +4719,26 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
u32 ddcc_status = 0;
int ret;
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
/* All of these values are in units of 50MHz */
dev_priv->rps.cur_freq = 0;
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
- dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
- dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
- dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
- if (IS_SKYLAKE(dev)) {
- /* Store the frequency values in 16.66 MHZ units, which is
- the natural hardware unit for SKL */
- dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
- dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
- dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
+ if (IS_BROXTON(dev)) {
+ rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
+ dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
+ dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
+ dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
+ } else {
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
+ dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
+ dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
}
+
/* hw_max = RP0 until we check for overclocking */
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {
ret = sandybridge_pcode_read(dev_priv,
HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
&ddcc_status);
@@ -4327,6 +4750,16 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.max_freq);
}
+ if (IS_SKYLAKE(dev)) {
+ /* Store the frequency values in 16.66 MHZ units, which is
+ the natural hardware unit for SKL */
+ dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
+ dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
+ dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
+ dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
+ dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
+ }
+
dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
/* Preserve min/max settings in case of re-init */
@@ -4619,6 +5052,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
int min_freq = 15;
unsigned int gpu_freq;
unsigned int max_ia_freq, min_ring_freq;
+ unsigned int max_gpu_freq, min_gpu_freq;
int scaling_factor = 180;
struct cpufreq_policy *policy;
@@ -4643,17 +5077,31 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
/* convert DDR frequency from units of 266.6MHz to bandwidth */
min_ring_freq = mult_frac(min_ring_freq, 8, 3);
+ if (IS_SKYLAKE(dev)) {
+ /* Convert GT frequency to 50 HZ units */
+ min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
+ max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
+ } else {
+ min_gpu_freq = dev_priv->rps.min_freq;
+ max_gpu_freq = dev_priv->rps.max_freq;
+ }
+
/*
* For each potential GPU frequency, load a ring frequency we'd like
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
*/
- for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq;
- gpu_freq--) {
- int diff = dev_priv->rps.max_freq - gpu_freq;
+ for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
+ int diff = max_gpu_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (IS_SKYLAKE(dev)) {
+ /*
+ * ring_freq = 2 * GT. ring_freq is in 100MHz units
+ * No floor required for ring frequency on SKL.
+ */
+ ring_freq = gpu_freq;
+ } else if (INTEL_INFO(dev)->gen >= 8) {
/* max(2 * GT, DDR). NB: GT is 50MHz units */
ring_freq = max(min_ring_freq, gpu_freq);
} else if (IS_HASWELL(dev)) {
@@ -4687,7 +5135,7 @@ void gen6_update_ring_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
+ if (!HAS_CORE_RING_FREQ(dev))
return;
mutex_lock(&dev_priv->rps.hw_lock);
@@ -5802,7 +6250,8 @@ static void intel_gen6_powersave_work(struct work_struct *work)
} else if (INTEL_INFO(dev)->gen >= 9) {
gen9_enable_rc6(dev);
gen9_enable_rps(dev);
- __gen6_update_ring_freq(dev);
+ if (IS_SKYLAKE(dev))
+ __gen6_update_ring_freq(dev);
} else if (IS_BROADWELL(dev)) {
gen8_enable_rps(dev);
__gen6_update_ring_freq(dev);
@@ -6686,13 +7135,15 @@ void intel_init_pm(struct drm_device *dev)
else if (INTEL_INFO(dev)->gen == 8)
dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
} else if (IS_CHERRYVIEW(dev)) {
- dev_priv->display.update_wm = valleyview_update_wm;
- dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
+ vlv_setup_wm_latency(dev);
+
+ dev_priv->display.update_wm = vlv_update_wm;
dev_priv->display.init_clock_gating =
cherryview_init_clock_gating;
} else if (IS_VALLEYVIEW(dev)) {
- dev_priv->display.update_wm = valleyview_update_wm;
- dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
+ vlv_setup_wm_latency(dev);
+
+ dev_priv->display.update_wm = vlv_update_wm;
dev_priv->display.init_clock_gating =
valleyview_init_clock_gating;
} else if (IS_PINEVIEW(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 5ee0fa57ed19..a04b4dc5ed9b 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -254,10 +254,13 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
uint32_t max_sleep_time = 0x1f;
/* Lately it was identified that depending on panel idle frame count
* calculated at HW can be off by 1. So let's use what came
- * from VBT + 1 and at minimum 2 to be on the safe side.
+ * from VBT + 1.
+ * There are also other cases where panel demands at least 4
+ * but VBT is not being set. To cover these 2 cases lets use
+ * at least 5 when VBT isn't set to be on the safest side.
*/
uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ?
- dev_priv->vbt.psr.idle_frames + 1 : 2;
+ dev_priv->vbt.psr.idle_frames + 1 : 5;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
@@ -400,7 +403,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
/* Avoid continuous PSR exit by masking memup and hpd */
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
+ EDP_PSR_DEBUG_MASK_HPD);
/* Enable PSR on the panel */
hsw_psr_enable_sink(intel_dp);
@@ -596,13 +599,15 @@ static void intel_psr_exit(struct drm_device *dev)
/**
* intel_psr_single_frame_update - Single Frame Update
* @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
*
* Some platforms support a single frame update feature that is used to
* send and update only one frame on Remote Frame Buffer.
* So far it is only implemented for Valleyview and Cherryview because
* hardware requires this to be done before a page flip.
*/
-void intel_psr_single_frame_update(struct drm_device *dev)
+void intel_psr_single_frame_update(struct drm_device *dev,
+ unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
@@ -624,14 +629,16 @@ void intel_psr_single_frame_update(struct drm_device *dev)
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
- val = I915_READ(VLV_PSRCTL(pipe));
- /*
- * We need to set this bit before writing registers for a flip.
- * This bit will be self-clear when it gets to the PSR active state.
- */
- I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
+ if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
+ val = I915_READ(VLV_PSRCTL(pipe));
+ /*
+ * We need to set this bit before writing registers for a flip.
+ * This bit will be self-clear when it gets to the PSR active state.
+ */
+ I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
+ }
mutex_unlock(&dev_priv->psr.lock);
}
@@ -648,7 +655,7 @@ void intel_psr_single_frame_update(struct drm_device *dev)
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
*/
void intel_psr_invalidate(struct drm_device *dev,
- unsigned frontbuffer_bits)
+ unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
@@ -663,11 +670,12 @@ void intel_psr_invalidate(struct drm_device *dev,
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
- intel_psr_exit(dev);
-
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
-
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+
+ if (frontbuffer_bits)
+ intel_psr_exit(dev);
+
mutex_unlock(&dev_priv->psr.lock);
}
@@ -675,6 +683,7 @@ void intel_psr_invalidate(struct drm_device *dev,
* intel_psr_flush - Flush PSR
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
*
* Since the hardware frontbuffer tracking has gaps we need to integrate
* with the software frontbuffer tracking. This function gets called every
@@ -684,11 +693,12 @@ void intel_psr_invalidate(struct drm_device *dev,
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
*/
void intel_psr_flush(struct drm_device *dev,
- unsigned frontbuffer_bits)
+ unsigned frontbuffer_bits, enum fb_op_origin origin)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
enum pipe pipe;
+ int delay_ms = HAS_DDI(dev) ? 100 : 500;
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
@@ -698,30 +708,33 @@ void intel_psr_flush(struct drm_device *dev,
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
- dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
- /*
- * On Haswell sprite plane updates don't result in a psr invalidating
- * signal in the hardware. Which means we need to manually fake this in
- * software for all flushes, not just when we've seen a preceding
- * invalidation through frontbuffer rendering.
- */
- if (IS_HASWELL(dev) &&
- (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
- intel_psr_exit(dev);
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+ dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
- /*
- * On Valleyview and Cherryview we don't use hardware tracking so
- * any plane updates or cursor moves don't result in a PSR
- * invalidating. Which means we need to manually fake this in
- * software for all flushes, not just when we've seen a preceding
- * invalidation through frontbuffer rendering. */
- if (!HAS_DDI(dev))
- intel_psr_exit(dev);
+ if (HAS_DDI(dev)) {
+ /*
+ * By definition every flush should mean invalidate + flush,
+ * however on core platforms let's minimize the
+ * disable/re-enable so we can avoid the invalidate when flip
+ * originated the flush.
+ */
+ if (frontbuffer_bits && origin != ORIGIN_FLIP)
+ intel_psr_exit(dev);
+ } else {
+ /*
+ * On Valleyview and Cherryview we don't use hardware tracking
+ * so any plane updates or cursor moves don't result in a PSR
+ * invalidating. Which means we need to manually fake this in
+ * software for all flushes.
+ */
+ if (frontbuffer_bits)
+ intel_psr_exit(dev);
+ }
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->psr.work,
- msecs_to_jiffies(100));
+ msecs_to_jiffies(delay_ms));
mutex_unlock(&dev_priv->psr.lock);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 3817a6f00d9e..6e6b8db996ef 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -81,7 +81,7 @@ bool intel_ring_stopped(struct intel_engine_cs *ring)
return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
}
-void __intel_ring_advance(struct intel_engine_cs *ring)
+static void __intel_ring_advance(struct intel_engine_cs *ring)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
ringbuf->tail &= ringbuf->size - 1;
@@ -91,10 +91,11 @@ void __intel_ring_advance(struct intel_engine_cs *ring)
}
static int
-gen2_render_ring_flush(struct intel_engine_cs *ring,
+gen2_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains,
u32 flush_domains)
{
+ struct intel_engine_cs *ring = req->ring;
u32 cmd;
int ret;
@@ -105,7 +106,7 @@ gen2_render_ring_flush(struct intel_engine_cs *ring,
if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
cmd |= MI_READ_FLUSH;
- ret = intel_ring_begin(ring, 2);
+ ret = intel_ring_begin(req, 2);
if (ret)
return ret;
@@ -117,10 +118,11 @@ gen2_render_ring_flush(struct intel_engine_cs *ring,
}
static int
-gen4_render_ring_flush(struct intel_engine_cs *ring,
+gen4_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains,
u32 flush_domains)
{
+ struct intel_engine_cs *ring = req->ring;
struct drm_device *dev = ring->dev;
u32 cmd;
int ret;
@@ -163,7 +165,7 @@ gen4_render_ring_flush(struct intel_engine_cs *ring,
(IS_G4X(dev) || IS_GEN5(dev)))
cmd |= MI_INVALIDATE_ISP;
- ret = intel_ring_begin(ring, 2);
+ ret = intel_ring_begin(req, 2);
if (ret)
return ret;
@@ -212,13 +214,13 @@ gen4_render_ring_flush(struct intel_engine_cs *ring,
* really our business. That leaves only stall at scoreboard.
*/
static int
-intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
+intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
-
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(req, 6);
if (ret)
return ret;
@@ -231,7 +233,7 @@ intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(req, 6);
if (ret)
return ret;
@@ -247,15 +249,16 @@ intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
}
static int
-gen6_render_ring_flush(struct intel_engine_cs *ring,
- u32 invalidate_domains, u32 flush_domains)
+gen6_render_ring_flush(struct drm_i915_gem_request *req,
+ u32 invalidate_domains, u32 flush_domains)
{
+ struct intel_engine_cs *ring = req->ring;
u32 flags = 0;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
- ret = intel_emit_post_sync_nonzero_flush(ring);
+ ret = intel_emit_post_sync_nonzero_flush(req);
if (ret)
return ret;
@@ -285,7 +288,7 @@ gen6_render_ring_flush(struct intel_engine_cs *ring,
flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
}
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -299,11 +302,12 @@ gen6_render_ring_flush(struct intel_engine_cs *ring,
}
static int
-gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
+gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -318,9 +322,10 @@ gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
}
static int
-gen7_render_ring_flush(struct intel_engine_cs *ring,
+gen7_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains, u32 flush_domains)
{
+ struct intel_engine_cs *ring = req->ring;
u32 flags = 0;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
@@ -362,10 +367,10 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
* invalidate bit set. */
- gen7_render_ring_cs_stall_wa(ring);
+ gen7_render_ring_cs_stall_wa(req);
}
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -379,12 +384,13 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
}
static int
-gen8_emit_pipe_control(struct intel_engine_cs *ring,
+gen8_emit_pipe_control(struct drm_i915_gem_request *req,
u32 flags, u32 scratch_addr)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(req, 6);
if (ret)
return ret;
@@ -400,11 +406,11 @@ gen8_emit_pipe_control(struct intel_engine_cs *ring,
}
static int
-gen8_render_ring_flush(struct intel_engine_cs *ring,
+gen8_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
- u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
flags |= PIPE_CONTROL_CS_STALL;
@@ -424,7 +430,7 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
/* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
- ret = gen8_emit_pipe_control(ring,
+ ret = gen8_emit_pipe_control(req,
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD,
0);
@@ -432,7 +438,7 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
return ret;
}
- return gen8_emit_pipe_control(ring, flags, scratch_addr);
+ return gen8_emit_pipe_control(req, flags, scratch_addr);
}
static void ring_write_tail(struct intel_engine_cs *ring,
@@ -703,10 +709,10 @@ err:
return ret;
}
-static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
int ret, i;
+ struct intel_engine_cs *ring = req->ring;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
@@ -715,11 +721,11 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
return 0;
ring->gpu_caches_dirty = true;
- ret = intel_ring_flush_all_caches(ring);
+ ret = intel_ring_flush_all_caches(req);
if (ret)
return ret;
- ret = intel_ring_begin(ring, (w->count * 2 + 2));
+ ret = intel_ring_begin(req, (w->count * 2 + 2));
if (ret)
return ret;
@@ -733,7 +739,7 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
intel_ring_advance(ring);
ring->gpu_caches_dirty = true;
- ret = intel_ring_flush_all_caches(ring);
+ ret = intel_ring_flush_all_caches(req);
if (ret)
return ret;
@@ -742,16 +748,15 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
return 0;
}
-static int intel_rcs_ctx_init(struct intel_engine_cs *ring,
- struct intel_context *ctx)
+static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
{
int ret;
- ret = intel_ring_workarounds_emit(ring, ctx);
+ ret = intel_ring_workarounds_emit(req);
if (ret != 0)
return ret;
- ret = i915_gem_render_state_init(ring);
+ ret = i915_gem_render_state_init(req);
if (ret)
DRM_ERROR("init render state: %d\n", ret);
@@ -775,11 +780,11 @@ static int wa_add(struct drm_i915_private *dev_priv,
return 0;
}
-#define WA_REG(addr, mask, val) { \
+#define WA_REG(addr, mask, val) do { \
const int r = wa_add(dev_priv, (addr), (mask), (val)); \
if (r) \
return r; \
- }
+ } while (0)
#define WA_SET_BIT_MASKED(addr, mask) \
WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
@@ -800,6 +805,11 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+
+ /* WaDisableAsyncFlipPerfMode:bdw */
+ WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+
/* WaDisablePartialInstShootdown:bdw */
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -861,6 +871,11 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+
+ /* WaDisableAsyncFlipPerfMode:chv */
+ WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+
/* WaDisablePartialInstShootdown:chv */
/* WaDisableThreadStallDopClockGating:chv */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -931,8 +946,11 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
- WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN0,
- DISABLE_PIXEL_MASK_CAMMING);
+ /*
+ * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
+ * but we do that in per ctx batchbuffer as there is an issue
+ * with this register not getting restored on ctx restore
+ */
}
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
@@ -1023,13 +1041,6 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
- if (INTEL_REVID(dev) == SKL_REVID_C0 ||
- INTEL_REVID(dev) == SKL_REVID_D0)
- /* WaBarrierPerformanceFixDisable:skl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FENCE_DEST_SLM_DISABLE |
- HDC_BARRIER_PERFORMANCE_DISABLE);
-
if (INTEL_REVID(dev) <= SKL_REVID_D0) {
/*
*Use Force Non-Coherent whenever executing a 3D context. This
@@ -1041,6 +1052,20 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
HDC_FORCE_NON_COHERENT);
}
+ if (INTEL_REVID(dev) == SKL_REVID_C0 ||
+ INTEL_REVID(dev) == SKL_REVID_D0)
+ /* WaBarrierPerformanceFixDisable:skl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FENCE_DEST_SLM_DISABLE |
+ HDC_BARRIER_PERFORMANCE_DISABLE);
+
+ /* WaDisableSbeCacheDispatchPortSharing:skl */
+ if (INTEL_REVID(dev) <= SKL_REVID_F0) {
+ WA_SET_BIT_MASKED(
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ }
+
return skl_tune_iz_hashing(ring);
}
@@ -1105,9 +1130,9 @@ static int init_render_ring(struct intel_engine_cs *ring)
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
* programmed to '1' on all products.
*
- * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
+ * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
*/
- if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
+ if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
/* Required for the hardware to program scanline values for waiting */
@@ -1132,7 +1157,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
}
- if (INTEL_INFO(dev)->gen >= 6)
+ if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
if (HAS_L3_DPF(dev))
@@ -1155,10 +1180,11 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
intel_fini_pipe_control(ring);
}
-static int gen8_rcs_signal(struct intel_engine_cs *signaller,
+static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
unsigned int num_dwords)
{
#define MBOX_UPDATE_DWORDS 8
+ struct intel_engine_cs *signaller = signaller_req->ring;
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *waiter;
@@ -1168,7 +1194,7 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
#undef MBOX_UPDATE_DWORDS
- ret = intel_ring_begin(signaller, num_dwords);
+ ret = intel_ring_begin(signaller_req, num_dwords);
if (ret)
return ret;
@@ -1178,8 +1204,7 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- seqno = i915_gem_request_get_seqno(
- signaller->outstanding_lazy_request);
+ seqno = i915_gem_request_get_seqno(signaller_req);
intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_QW_WRITE |
@@ -1196,10 +1221,11 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
return 0;
}
-static int gen8_xcs_signal(struct intel_engine_cs *signaller,
+static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
unsigned int num_dwords)
{
#define MBOX_UPDATE_DWORDS 6
+ struct intel_engine_cs *signaller = signaller_req->ring;
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *waiter;
@@ -1209,7 +1235,7 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
#undef MBOX_UPDATE_DWORDS
- ret = intel_ring_begin(signaller, num_dwords);
+ ret = intel_ring_begin(signaller_req, num_dwords);
if (ret)
return ret;
@@ -1219,8 +1245,7 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- seqno = i915_gem_request_get_seqno(
- signaller->outstanding_lazy_request);
+ seqno = i915_gem_request_get_seqno(signaller_req);
intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
MI_FLUSH_DW_OP_STOREDW);
intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
@@ -1235,9 +1260,10 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
return 0;
}
-static int gen6_signal(struct intel_engine_cs *signaller,
+static int gen6_signal(struct drm_i915_gem_request *signaller_req,
unsigned int num_dwords)
{
+ struct intel_engine_cs *signaller = signaller_req->ring;
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *useless;
@@ -1248,15 +1274,14 @@ static int gen6_signal(struct intel_engine_cs *signaller,
num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
#undef MBOX_UPDATE_DWORDS
- ret = intel_ring_begin(signaller, num_dwords);
+ ret = intel_ring_begin(signaller_req, num_dwords);
if (ret)
return ret;
for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = signaller->semaphore.mbox.signal[i];
if (mbox_reg != GEN6_NOSYNC) {
- u32 seqno = i915_gem_request_get_seqno(
- signaller->outstanding_lazy_request);
+ u32 seqno = i915_gem_request_get_seqno(signaller_req);
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(signaller, mbox_reg);
intel_ring_emit(signaller, seqno);
@@ -1272,30 +1297,29 @@ static int gen6_signal(struct intel_engine_cs *signaller,
/**
* gen6_add_request - Update the semaphore mailbox registers
- *
- * @ring - ring that is adding a request
- * @seqno - return seqno stuck into the ring
+ *
+ * @request - request to write to the ring
*
* Update the mailbox registers in the *other* rings with the current seqno.
* This acts like a signal in the canonical semaphore.
*/
static int
-gen6_add_request(struct intel_engine_cs *ring)
+gen6_add_request(struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
if (ring->semaphore.signal)
- ret = ring->semaphore.signal(ring, 4);
+ ret = ring->semaphore.signal(req, 4);
else
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring,
- i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+ intel_ring_emit(ring, i915_gem_request_get_seqno(req));
intel_ring_emit(ring, MI_USER_INTERRUPT);
__intel_ring_advance(ring);
@@ -1318,14 +1342,15 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
*/
static int
-gen8_ring_sync(struct intel_engine_cs *waiter,
+gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
struct intel_engine_cs *signaller,
u32 seqno)
{
+ struct intel_engine_cs *waiter = waiter_req->ring;
struct drm_i915_private *dev_priv = waiter->dev->dev_private;
int ret;
- ret = intel_ring_begin(waiter, 4);
+ ret = intel_ring_begin(waiter_req, 4);
if (ret)
return ret;
@@ -1343,10 +1368,11 @@ gen8_ring_sync(struct intel_engine_cs *waiter,
}
static int
-gen6_ring_sync(struct intel_engine_cs *waiter,
+gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
struct intel_engine_cs *signaller,
u32 seqno)
{
+ struct intel_engine_cs *waiter = waiter_req->ring;
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
@@ -1361,7 +1387,7 @@ gen6_ring_sync(struct intel_engine_cs *waiter,
WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
- ret = intel_ring_begin(waiter, 4);
+ ret = intel_ring_begin(waiter_req, 4);
if (ret)
return ret;
@@ -1392,8 +1418,9 @@ do { \
} while (0)
static int
-pc_render_add_request(struct intel_engine_cs *ring)
+pc_render_add_request(struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
@@ -1405,7 +1432,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
* incoherence by flushing the 6 PIPE_NOTIFY buffers out to
* memory before requesting an interrupt.
*/
- ret = intel_ring_begin(ring, 32);
+ ret = intel_ring_begin(req, 32);
if (ret)
return ret;
@@ -1413,8 +1440,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring,
- i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+ intel_ring_emit(ring, i915_gem_request_get_seqno(req));
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
@@ -1433,8 +1459,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring,
- i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+ intel_ring_emit(ring, i915_gem_request_get_seqno(req));
intel_ring_emit(ring, 0);
__intel_ring_advance(ring);
@@ -1585,13 +1610,14 @@ i8xx_ring_put_irq(struct intel_engine_cs *ring)
}
static int
-bsd_ring_flush(struct intel_engine_cs *ring,
+bsd_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains,
u32 flush_domains)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
- ret = intel_ring_begin(ring, 2);
+ ret = intel_ring_begin(req, 2);
if (ret)
return ret;
@@ -1602,18 +1628,18 @@ bsd_ring_flush(struct intel_engine_cs *ring,
}
static int
-i9xx_add_request(struct intel_engine_cs *ring)
+i9xx_add_request(struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring,
- i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+ intel_ring_emit(ring, i915_gem_request_get_seqno(req));
intel_ring_emit(ring, MI_USER_INTERRUPT);
__intel_ring_advance(ring);
@@ -1745,13 +1771,14 @@ gen8_ring_put_irq(struct intel_engine_cs *ring)
}
static int
-i965_dispatch_execbuffer(struct intel_engine_cs *ring,
+i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 length,
unsigned dispatch_flags)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
- ret = intel_ring_begin(ring, 2);
+ ret = intel_ring_begin(req, 2);
if (ret)
return ret;
@@ -1771,14 +1798,15 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
-i830_dispatch_execbuffer(struct intel_engine_cs *ring,
+i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned dispatch_flags)
{
+ struct intel_engine_cs *ring = req->ring;
u32 cs_offset = ring->scratch.gtt_offset;
int ret;
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(req, 6);
if (ret)
return ret;
@@ -1795,7 +1823,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
- ret = intel_ring_begin(ring, 6 + 2);
+ ret = intel_ring_begin(req, 6 + 2);
if (ret)
return ret;
@@ -1818,7 +1846,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
offset = cs_offset;
}
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -1833,13 +1861,14 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
}
static int
-i915_dispatch_execbuffer(struct intel_engine_cs *ring,
+i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned dispatch_flags)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
- ret = intel_ring_begin(ring, 2);
+ ret = intel_ring_begin(req, 2);
if (ret)
return ret;
@@ -2082,7 +2111,6 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
intel_unpin_ringbuffer_obj(ringbuf);
intel_destroy_ringbuffer_obj(ringbuf);
- i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
if (ring->cleanup)
ring->cleanup(ring);
@@ -2106,6 +2134,9 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
if (intel_ring_space(ringbuf) >= n)
return 0;
+ /* The whole point of reserving space is to not wait! */
+ WARN_ON(ringbuf->reserved_in_use);
+
list_for_each_entry(request, &ring->request_list, list) {
space = __intel_ring_space(request->postfix, ringbuf->tail,
ringbuf->size);
@@ -2124,18 +2155,11 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
return 0;
}
-static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
+static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
{
uint32_t __iomem *virt;
- struct intel_ringbuffer *ringbuf = ring->buffer;
int rem = ringbuf->size - ringbuf->tail;
- if (ringbuf->space < rem) {
- int ret = ring_wait_for_space(ring, rem);
- if (ret)
- return ret;
- }
-
virt = ringbuf->virtual_start + ringbuf->tail;
rem /= 4;
while (rem--)
@@ -2143,21 +2167,11 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
ringbuf->tail = 0;
intel_ring_update_space(ringbuf);
-
- return 0;
}
int intel_ring_idle(struct intel_engine_cs *ring)
{
struct drm_i915_gem_request *req;
- int ret;
-
- /* We need to add any requests required to flush the objects and ring */
- if (ring->outstanding_lazy_request) {
- ret = i915_add_request(ring);
- if (ret)
- return ret;
- }
/* Wait upon the last request to be completed */
if (list_empty(&ring->request_list))
@@ -2180,33 +2194,126 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
return 0;
}
-static int __intel_ring_prepare(struct intel_engine_cs *ring,
- int bytes)
+int intel_ring_reserve_space(struct drm_i915_gem_request *request)
+{
+ /*
+ * The first call merely notes the reserve request and is common for
+ * all back ends. The subsequent localised _begin() call actually
+ * ensures that the reservation is available. Without the begin, if
+ * the request creator immediately submitted the request without
+ * adding any commands to it then there might not actually be
+ * sufficient room for the submission commands.
+ */
+ intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
+
+ return intel_ring_begin(request, 0);
+}
+
+void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
+{
+ WARN_ON(ringbuf->reserved_size);
+ WARN_ON(ringbuf->reserved_in_use);
+
+ ringbuf->reserved_size = size;
+}
+
+void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
+{
+ WARN_ON(ringbuf->reserved_in_use);
+
+ ringbuf->reserved_size = 0;
+ ringbuf->reserved_in_use = false;
+}
+
+void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
+{
+ WARN_ON(ringbuf->reserved_in_use);
+
+ ringbuf->reserved_in_use = true;
+ ringbuf->reserved_tail = ringbuf->tail;
+}
+
+void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
+{
+ WARN_ON(!ringbuf->reserved_in_use);
+ if (ringbuf->tail > ringbuf->reserved_tail) {
+ WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
+ "request reserved size too small: %d vs %d!\n",
+ ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
+ } else {
+ /*
+ * The ring was wrapped while the reserved space was in use.
+ * That means that some unknown amount of the ring tail was
+ * no-op filled and skipped. Thus simply adding the ring size
+ * to the tail and doing the above space check will not work.
+ * Rather than attempt to track how much tail was skipped,
+ * it is much simpler to say that also skipping the sanity
+ * check every once in a while is not a big issue.
+ */
+ }
+
+ ringbuf->reserved_size = 0;
+ ringbuf->reserved_in_use = false;
+}
+
+static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
- int ret;
+ int remain_usable = ringbuf->effective_size - ringbuf->tail;
+ int remain_actual = ringbuf->size - ringbuf->tail;
+ int ret, total_bytes, wait_bytes = 0;
+ bool need_wrap = false;
- if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
- ret = intel_wrap_ring_buffer(ring);
- if (unlikely(ret))
- return ret;
+ if (ringbuf->reserved_in_use)
+ total_bytes = bytes;
+ else
+ total_bytes = bytes + ringbuf->reserved_size;
+
+ if (unlikely(bytes > remain_usable)) {
+ /*
+ * Not enough space for the basic request. So need to flush
+ * out the remainder and then wait for base + reserved.
+ */
+ wait_bytes = remain_actual + total_bytes;
+ need_wrap = true;
+ } else {
+ if (unlikely(total_bytes > remain_usable)) {
+ /*
+ * The base request will fit but the reserved space
+ * falls off the end. So only need to to wait for the
+ * reserved size after flushing out the remainder.
+ */
+ wait_bytes = remain_actual + ringbuf->reserved_size;
+ need_wrap = true;
+ } else if (total_bytes > ringbuf->space) {
+ /* No wrapping required, just waiting. */
+ wait_bytes = total_bytes;
+ }
}
- if (unlikely(ringbuf->space < bytes)) {
- ret = ring_wait_for_space(ring, bytes);
+ if (wait_bytes) {
+ ret = ring_wait_for_space(ring, wait_bytes);
if (unlikely(ret))
return ret;
+
+ if (need_wrap)
+ __wrap_ring_buffer(ringbuf);
}
return 0;
}
-int intel_ring_begin(struct intel_engine_cs *ring,
+int intel_ring_begin(struct drm_i915_gem_request *req,
int num_dwords)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct intel_engine_cs *ring;
+ struct drm_i915_private *dev_priv;
int ret;
+ WARN_ON(req == NULL);
+ ring = req->ring;
+ dev_priv = ring->dev->dev_private;
+
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
@@ -2216,18 +2323,14 @@ int intel_ring_begin(struct intel_engine_cs *ring,
if (ret)
return ret;
- /* Preallocate the olr before touching the ring */
- ret = i915_gem_request_alloc(ring, ring->default_context);
- if (ret)
- return ret;
-
ring->buffer->space -= num_dwords * sizeof(uint32_t);
return 0;
}
/* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct intel_engine_cs *ring)
+int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
int ret;
@@ -2235,7 +2338,7 @@ int intel_ring_cacheline_align(struct intel_engine_cs *ring)
return 0;
num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
- ret = intel_ring_begin(ring, num_dwords);
+ ret = intel_ring_begin(req, num_dwords);
if (ret)
return ret;
@@ -2252,8 +2355,6 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- BUG_ON(ring->outstanding_lazy_request);
-
if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
@@ -2298,13 +2399,14 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
_MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
}
-static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
+static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate, u32 flush)
{
+ struct intel_engine_cs *ring = req->ring;
uint32_t cmd;
int ret;
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -2342,20 +2444,23 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
}
static int
-gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned dispatch_flags)
{
+ struct intel_engine_cs *ring = req->ring;
bool ppgtt = USES_PPGTT(ring->dev) &&
!(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
/* FIXME(BDW): Address space and security selectors. */
- intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+ (dispatch_flags & I915_DISPATCH_RS ?
+ MI_BATCH_RESOURCE_STREAMER : 0));
intel_ring_emit(ring, lower_32_bits(offset));
intel_ring_emit(ring, upper_32_bits(offset));
intel_ring_emit(ring, MI_NOOP);
@@ -2365,20 +2470,23 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
}
static int
-hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned dispatch_flags)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
- ret = intel_ring_begin(ring, 2);
+ ret = intel_ring_begin(req, 2);
if (ret)
return ret;
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
(dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
+ 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
+ (dispatch_flags & I915_DISPATCH_RS ?
+ MI_BATCH_RESOURCE_STREAMER : 0));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
@@ -2387,13 +2495,14 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
}
static int
-gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned dispatch_flags)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
- ret = intel_ring_begin(ring, 2);
+ ret = intel_ring_begin(req, 2);
if (ret)
return ret;
@@ -2410,14 +2519,15 @@ gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
/* Blitter support (SandyBridge+) */
-static int gen6_ring_flush(struct intel_engine_cs *ring,
+static int gen6_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate, u32 flush)
{
+ struct intel_engine_cs *ring = req->ring;
struct drm_device *dev = ring->dev;
uint32_t cmd;
int ret;
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -2818,26 +2928,28 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
}
int
-intel_ring_flush_all_caches(struct intel_engine_cs *ring)
+intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
int ret;
if (!ring->gpu_caches_dirty)
return 0;
- ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+ ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
- trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
+ trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
ring->gpu_caches_dirty = false;
return 0;
}
int
-intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
+intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *ring = req->ring;
uint32_t flush_domains;
int ret;
@@ -2845,11 +2957,11 @@ intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
if (ring->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
- ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+ ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
- trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+ trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
ring->gpu_caches_dirty = false;
return 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index e539314ae87e..2e85fda94963 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -12,6 +12,7 @@
* workarounds!
*/
#define CACHELINE_BYTES 64
+#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
/*
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
@@ -105,6 +106,9 @@ struct intel_ringbuffer {
int space;
int size;
int effective_size;
+ int reserved_size;
+ int reserved_tail;
+ bool reserved_in_use;
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
@@ -120,6 +124,25 @@ struct intel_ringbuffer {
struct intel_context;
struct drm_i915_reg_descriptor;
+/*
+ * we use a single page to load ctx workarounds so all of these
+ * values are referred in terms of dwords
+ *
+ * struct i915_wa_ctx_bb:
+ * offset: specifies batch starting position, also helpful in case
+ * if we want to have multiple batches at different offsets based on
+ * some criteria. It is not a requirement at the moment but provides
+ * an option for future use.
+ * size: size of the batch in DWORDS
+ */
+struct i915_ctx_workarounds {
+ struct i915_wa_ctx_bb {
+ u32 offset;
+ u32 size;
+ } indirect_ctx, per_ctx;
+ struct drm_i915_gem_object *obj;
+};
+
struct intel_engine_cs {
const char *name;
enum intel_ring_id {
@@ -143,6 +166,7 @@ struct intel_engine_cs {
struct i915_gem_batch_pool batch_pool;
struct intel_hw_status_page status_page;
+ struct i915_ctx_workarounds wa_ctx;
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
@@ -152,15 +176,14 @@ struct intel_engine_cs {
int (*init_hw)(struct intel_engine_cs *ring);
- int (*init_context)(struct intel_engine_cs *ring,
- struct intel_context *ctx);
+ int (*init_context)(struct drm_i915_gem_request *req);
void (*write_tail)(struct intel_engine_cs *ring,
u32 value);
- int __must_check (*flush)(struct intel_engine_cs *ring,
+ int __must_check (*flush)(struct drm_i915_gem_request *req,
u32 invalidate_domains,
u32 flush_domains);
- int (*add_request)(struct intel_engine_cs *ring);
+ int (*add_request)(struct drm_i915_gem_request *req);
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
@@ -171,11 +194,12 @@ struct intel_engine_cs {
bool lazy_coherency);
void (*set_seqno)(struct intel_engine_cs *ring,
u32 seqno);
- int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
+ int (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
u64 offset, u32 length,
unsigned dispatch_flags);
#define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2
+#define I915_DISPATCH_RS 0x4
void (*cleanup)(struct intel_engine_cs *ring);
/* GEN8 signal/wait table - never trust comments!
@@ -229,10 +253,10 @@ struct intel_engine_cs {
};
/* AKA wait() */
- int (*sync_to)(struct intel_engine_cs *ring,
- struct intel_engine_cs *to,
+ int (*sync_to)(struct drm_i915_gem_request *to_req,
+ struct intel_engine_cs *from,
u32 seqno);
- int (*signal)(struct intel_engine_cs *signaller,
+ int (*signal)(struct drm_i915_gem_request *signaller_req,
/* num_dwords needed by caller */
unsigned int num_dwords);
} semaphore;
@@ -243,14 +267,11 @@ struct intel_engine_cs {
struct list_head execlist_retired_req_list;
u8 next_context_status_buffer;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
- int (*emit_request)(struct intel_ringbuffer *ringbuf,
- struct drm_i915_gem_request *request);
- int (*emit_flush)(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
+ int (*emit_request)(struct drm_i915_gem_request *request);
+ int (*emit_flush)(struct drm_i915_gem_request *request,
u32 invalidate_domains,
u32 flush_domains);
- int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
+ int (*emit_bb_start)(struct drm_i915_gem_request *req,
u64 offset, unsigned dispatch_flags);
/**
@@ -272,9 +293,12 @@ struct intel_engine_cs {
struct list_head request_list;
/**
- * Do we have some not yet emitted requests outstanding?
+ * Seqno of request most recently submitted to request_list.
+ * Used exclusively by hang checker to avoid grabbing lock while
+ * inspecting request list.
*/
- struct drm_i915_gem_request *outstanding_lazy_request;
+ u32 last_submitted_seqno;
+
bool gpu_caches_dirty;
wait_queue_head_t irq_queue;
@@ -401,8 +425,8 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
-int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
-int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
+int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
+int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
static inline void intel_ring_emit(struct intel_engine_cs *ring,
u32 data)
{
@@ -419,12 +443,11 @@ int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
int intel_ring_space(struct intel_ringbuffer *ringbuf);
bool intel_ring_stopped(struct intel_engine_cs *ring);
-void __intel_ring_advance(struct intel_engine_cs *ring);
int __must_check intel_ring_idle(struct intel_engine_cs *ring);
void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
-int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
-int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
+int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
+int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
void intel_fini_pipe_control(struct intel_engine_cs *ring);
int intel_init_pipe_control(struct intel_engine_cs *ring);
@@ -444,11 +467,29 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
return ringbuf->tail;
}
-static inline struct drm_i915_gem_request *
-intel_ring_get_request(struct intel_engine_cs *ring)
-{
- BUG_ON(ring->outstanding_lazy_request == NULL);
- return ring->outstanding_lazy_request;
-}
+/*
+ * Arbitrary size for largest possible 'add request' sequence. The code paths
+ * are complex and variable. Empirical measurement shows that the worst case
+ * is ILK at 136 words. Reserving too much is better than reserving too little
+ * as that allows for corner cases that might have been missed. So the figure
+ * has been rounded up to 160 words.
+ */
+#define MIN_SPACE_FOR_ADD_REQUEST 160
+
+/*
+ * Reserve space in the ring to guarantee that the i915_add_request() call
+ * will always have sufficient room to do its stuff. The request creation
+ * code calls this automatically.
+ */
+void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
+/* Cancel the reservation, e.g. because the request is being discarded. */
+void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
+/* Use the reserved space - for use by i915_add_request() only. */
+void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
+/* Finish with the reserved space - for use by i915_add_request() only. */
+void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
+
+/* Legacy ringbuffer specific portion of reservation code: */
+int intel_ring_reserve_space(struct drm_i915_gem_request *request);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 1a45385f4d66..af7fdb3bd663 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -68,6 +68,22 @@
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
int power_well_id);
+static void intel_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ DRM_DEBUG_KMS("enabling %s\n", power_well->name);
+ power_well->ops->enable(dev_priv, power_well);
+ power_well->hw_enabled = true;
+}
+
+static void intel_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+ power_well->hw_enabled = false;
+ power_well->ops->disable(dev_priv, power_well);
+}
+
/*
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
@@ -281,6 +297,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_AUX_D) | \
@@ -300,6 +317,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
@@ -835,12 +853,8 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
return enabled;
}
-static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
-
- vlv_set_power_well(dev_priv, power_well, true);
spin_lock_irq(&dev_priv->irq_lock);
valleyview_enable_display_irqs(dev_priv);
@@ -858,18 +872,33 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
i915_redisable_vga_power_on(dev_priv->dev);
}
+static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_disable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ vlv_power_sequencer_reset(dev_priv);
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ vlv_display_power_well_init(dev_priv);
+}
+
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_disable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ vlv_display_power_well_deinit(dev_priv);
vlv_set_power_well(dev_priv, power_well, false);
-
- vlv_power_sequencer_reset(dev_priv);
}
static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
@@ -882,8 +911,8 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
* display and the reference clock for VGA
* hotplug / manual detection.
*/
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
vlv_set_power_well(dev_priv, power_well, true);
@@ -933,14 +962,14 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
*/
if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
phy = DPIO_PHY0;
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_REFA_CLK_ENABLE_VLV);
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
+ DPLL_REF_CLK_ENABLE_VLV);
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
} else {
phy = DPIO_PHY1;
- I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
- DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) | DPLL_VGA_MODE_DIS |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
}
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
vlv_set_power_well(dev_priv, power_well, true);
@@ -1042,53 +1071,29 @@ out:
static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
+ WARN_ON_ONCE(power_well->data != PIPE_A);
+
chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
}
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PIPE_A &&
- power_well->data != PIPE_B &&
- power_well->data != PIPE_C);
+ WARN_ON_ONCE(power_well->data != PIPE_A);
chv_set_pipe_power_well(dev_priv, power_well, true);
- if (power_well->data == PIPE_A) {
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_enable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /*
- * During driver initialization/resume we can avoid restoring the
- * part of the HW/SW state that will be inited anyway explicitly.
- */
- if (dev_priv->power_domains.initializing)
- return;
-
- intel_hpd_init(dev_priv);
-
- i915_redisable_vga_power_on(dev_priv->dev);
- }
+ vlv_display_power_well_init(dev_priv);
}
static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PIPE_A &&
- power_well->data != PIPE_B &&
- power_well->data != PIPE_C);
+ WARN_ON_ONCE(power_well->data != PIPE_A);
- if (power_well->data == PIPE_A) {
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_disable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
- }
+ vlv_display_power_well_deinit(dev_priv);
chv_set_pipe_power_well(dev_priv, power_well, false);
-
- if (power_well->data == PIPE_A)
- vlv_power_sequencer_reset(dev_priv);
}
/**
@@ -1117,11 +1122,8 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
mutex_lock(&power_domains->lock);
for_each_power_well(i, power_well, BIT(domain), power_domains) {
- if (!power_well->count++) {
- DRM_DEBUG_KMS("enabling %s\n", power_well->name);
- power_well->ops->enable(dev_priv, power_well);
- power_well->hw_enabled = true;
- }
+ if (!power_well->count++)
+ intel_power_well_enable(dev_priv, power_well);
}
power_domains->domain_use_count[domain]++;
@@ -1155,11 +1157,8 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
WARN_ON(!power_well->count);
- if (!--power_well->count && i915.disable_power_well) {
- DRM_DEBUG_KMS("disabling %s\n", power_well->name);
- power_well->hw_enabled = false;
- power_well->ops->disable(dev_priv, power_well);
- }
+ if (!--power_well->count && i915.disable_power_well)
+ intel_power_well_disable(dev_priv, power_well);
}
mutex_unlock(&power_domains->lock);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index aa2fd751609c..c98098e884cc 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1508,51 +1508,6 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
}
-/* Special dpms function to support cloning between dvo/sdvo/crt. */
-static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
-{
- struct drm_crtc *crtc;
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
-
- /* dvo supports only 2 dpms states. */
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
-
- if (mode == connector->dpms)
- return;
-
- connector->dpms = mode;
-
- /* Only need to change hw state when actually enabled */
- crtc = intel_sdvo->base.base.crtc;
- if (!crtc) {
- intel_sdvo->base.connectors_active = false;
- return;
- }
-
- /* We set active outputs manually below in case pipe dpms doesn't change
- * due to cloning. */
- if (mode != DRM_MODE_DPMS_ON) {
- intel_sdvo_set_active_outputs(intel_sdvo, 0);
- if (0)
- intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
-
- intel_sdvo->base.connectors_active = false;
-
- intel_crtc_update_dpms(crtc);
- } else {
- intel_sdvo->base.connectors_active = true;
-
- intel_crtc_update_dpms(crtc);
-
- if (0)
- intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
- intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
- }
-
- intel_modeset_check_state(connector->dev);
-}
-
static enum drm_mode_status
intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -2190,7 +2145,7 @@ done:
}
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
- .dpms = intel_sdvo_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_sdvo_set_property,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 8193a35388d7..9d8af2f8a875 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -75,10 +75,8 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
* until a subsequent call to intel_pipe_update_end(). That is done to
* avoid random delays. The value written to @start_vbl_count should be
* supplied to intel_pipe_update_end() for error checking.
- *
- * Return: true if the call was successful
*/
-bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
+void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
{
struct drm_device *dev = crtc->base.dev;
const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
@@ -96,13 +94,14 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
min = vblank_start - usecs_to_scanlines(mode, 100);
max = vblank_start - 1;
+ local_irq_disable();
+ *start_vbl_count = 0;
+
if (min <= 0 || max <= 0)
- return false;
+ return;
if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
- return false;
-
- local_irq_disable();
+ return;
trace_i915_pipe_update_start(crtc, min, max);
@@ -138,8 +137,6 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
*start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
trace_i915_pipe_update_vblank_evaded(crtc, min, max, *start_vbl_count);
-
- return true;
}
/**
@@ -161,7 +158,7 @@ void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
local_irq_enable();
- if (start_vbl_count != end_vbl_count)
+ if (start_vbl_count && start_vbl_count != end_vbl_count)
DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u)\n",
pipe_name(pipe), start_vbl_count, end_vbl_count);
}
@@ -182,7 +179,8 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
const int plane = intel_plane->plane + 1;
u32 plane_ctl, stride_div, stride;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+ const struct drm_intel_sprite_colorkey *key =
+ &to_intel_plane_state(drm_plane->state)->ckey;
unsigned long surf_addr;
u32 tile_height, plane_offset, plane_size;
unsigned int rotation;
@@ -272,7 +270,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
}
static void
-skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
+skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
{
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -344,7 +342,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
u32 sprctl;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+ const struct drm_intel_sprite_colorkey *key =
+ &to_intel_plane_state(dplane->state)->ckey;
sprctl = SP_ENABLE;
@@ -400,10 +399,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SP_TILED;
- intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
- pixel_size, true,
- src_w != crtc_w || src_h != crtc_h);
-
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -411,7 +406,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
crtc_h--;
linear_offset = y * fb->pitches[0] + x * pixel_size;
- sprsurf_offset = intel_gen4_compute_page_offset(&x, &y,
+ sprsurf_offset = intel_gen4_compute_page_offset(dev_priv,
+ &x, &y,
obj->tiling_mode,
pixel_size,
fb->pitches[0]);
@@ -455,7 +451,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
}
static void
-vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
+vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
{
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -467,8 +463,6 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
I915_WRITE(SPSURF(pipe, plane), 0);
POSTING_READ(SPSURF(pipe, plane));
-
- intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
}
static void
@@ -487,7 +481,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+ const struct drm_intel_sprite_colorkey *key =
+ &to_intel_plane_state(plane->state)->ckey;
sprctl = SPRITE_ENABLE;
@@ -546,7 +541,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset =
- intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ intel_gen4_compute_page_offset(dev_priv,
+ &x, &y, obj->tiling_mode,
pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
@@ -595,7 +591,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
}
static void
-ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
+ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -627,7 +623,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+ const struct drm_intel_sprite_colorkey *key =
+ &to_intel_plane_state(plane->state)->ckey;
dvscntr = DVS_ENABLE;
@@ -682,7 +679,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
linear_offset = y * fb->pitches[0] + x * pixel_size;
dvssurf_offset =
- intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ intel_gen4_compute_page_offset(dev_priv,
+ &x, &y, obj->tiling_mode,
pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
@@ -722,7 +720,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
}
static void
-ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
+ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -739,11 +737,12 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
static int
intel_check_sprite_plane(struct drm_plane *plane,
+ struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
struct drm_device *dev = plane->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
- struct intel_crtc_state *crtc_state;
+ struct drm_crtc *crtc = state->base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = state->base.fb;
int crtc_x, crtc_y;
@@ -756,15 +755,10 @@ intel_check_sprite_plane(struct drm_plane *plane,
int max_scale, min_scale;
bool can_scale;
int pixel_size;
- int ret;
-
- intel_crtc = intel_crtc ? intel_crtc : to_intel_crtc(plane->crtc);
- crtc_state = state->base.state ?
- intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL;
if (!fb) {
state->visible = false;
- goto finish;
+ return 0;
}
/* Don't modify another pipe's plane */
@@ -782,7 +776,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
/* setup can_scale, min_scale, max_scale */
if (INTEL_INFO(dev)->gen >= 9) {
/* use scaler when colorkey is not required */
- if (intel_plane->ckey.flags == I915_SET_COLORKEY_NONE) {
+ if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
can_scale = 1;
min_scale = 1;
max_scale = skl_max_scale(intel_crtc, crtc_state);
@@ -802,7 +796,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
* coordinates and sizes. We probably need some way to decide whether
* more strict checking should be done instead.
*/
-
drm_rect_rotate(src, fb->width << 16, fb->height << 16,
state->base.rotation);
@@ -812,7 +805,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
BUG_ON(vscale < 0);
- state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
+ state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
crtc_x = dst->x1;
crtc_y = dst->y1;
@@ -917,36 +910,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
dst->y1 = crtc_y;
dst->y2 = crtc_y + crtc_h;
-finish:
- /*
- * If the sprite is completely covering the primary plane,
- * we can disable the primary and save power.
- */
- if (intel_crtc->active) {
- intel_crtc->atomic.fb_bits |=
- INTEL_FRONTBUFFER_SPRITE(intel_crtc->pipe);
-
- if (intel_wm_need_update(plane, &state->base))
- intel_crtc->atomic.update_wm = true;
-
- if (!state->visible) {
- /*
- * Avoid underruns when disabling the sprite.
- * FIXME remove once watermark updates are done properly.
- */
- intel_crtc->atomic.wait_vblank = true;
- intel_crtc->atomic.update_sprite_watermarks |=
- (1 << drm_plane_index(plane));
- }
- }
-
- if (INTEL_INFO(dev)->gen >= 9) {
- ret = skl_update_scaler_users(intel_crtc, crtc_state, intel_plane,
- state, 0);
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -955,34 +918,27 @@ intel_commit_sprite_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct drm_crtc *crtc = state->base.crtc;
- struct intel_crtc *intel_crtc;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = state->base.fb;
- int crtc_x, crtc_y;
- unsigned int crtc_w, crtc_h;
- uint32_t src_x, src_y, src_w, src_h;
crtc = crtc ? crtc : plane->crtc;
- intel_crtc = to_intel_crtc(crtc);
plane->fb = fb;
- if (intel_crtc->active) {
- if (state->visible) {
- crtc_x = state->dst.x1;
- crtc_y = state->dst.y1;
- crtc_w = drm_rect_width(&state->dst);
- crtc_h = drm_rect_height(&state->dst);
- src_x = state->src.x1 >> 16;
- src_y = state->src.y1 >> 16;
- src_w = drm_rect_width(&state->src) >> 16;
- src_h = drm_rect_height(&state->src) >> 16;
- intel_plane->update_plane(plane, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h);
- } else {
- intel_plane->disable_plane(plane, crtc, false);
- }
+ if (!crtc->state->active)
+ return;
+
+ if (state->visible) {
+ intel_plane->update_plane(plane, crtc, fb,
+ state->dst.x1, state->dst.y1,
+ drm_rect_width(&state->dst),
+ drm_rect_height(&state->dst),
+ state->src.x1 >> 16,
+ state->src.y1 >> 16,
+ drm_rect_width(&state->src) >> 16,
+ drm_rect_height(&state->src) >> 16);
+ } else {
+ intel_plane->disable_plane(plane, crtc);
}
}
@@ -991,7 +947,9 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
{
struct drm_intel_sprite_colorkey *set = data;
struct drm_plane *plane;
- struct intel_plane *intel_plane;
+ struct drm_plane_state *plane_state;
+ struct drm_atomic_state *state;
+ struct drm_modeset_acquire_ctx ctx;
int ret = 0;
/* Make sure we don't try to enable both src & dest simultaneously */
@@ -1002,50 +960,41 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
- drm_modeset_lock_all(dev);
-
plane = drm_plane_find(dev, set->plane_id);
- if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
- ret = -ENOENT;
- goto out_unlock;
- }
+ if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
+ return -ENOENT;
- intel_plane = to_intel_plane(plane);
+ drm_modeset_acquire_init(&ctx, 0);
- if (INTEL_INFO(dev)->gen >= 9) {
- /* plane scaling and colorkey are mutually exclusive */
- if (to_intel_plane_state(plane->state)->scaler_id >= 0) {
- DRM_ERROR("colorkey not allowed with scaler\n");
- ret = -EINVAL;
- goto out_unlock;
- }
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out;
}
+ state->acquire_ctx = &ctx;
+
+ while (1) {
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (!ret) {
+ to_intel_plane_state(plane_state)->ckey = *set;
+ ret = drm_atomic_commit(state);
+ }
- intel_plane->ckey = *set;
-
- /*
- * The only way this could fail would be due to
- * the current plane state being unsupportable already,
- * and we dont't consider that an error for the
- * colorkey ioctl. So just ignore any error.
- */
- intel_plane_restore(plane);
+ if (ret != -EDEADLK)
+ break;
-out_unlock:
- drm_modeset_unlock_all(dev);
- return ret;
-}
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ }
-int intel_plane_restore(struct drm_plane *plane)
-{
- if (!plane->crtc || !plane->state->fb)
- return 0;
+ if (ret)
+ drm_atomic_state_free(state);
- return drm_plane_helper_update(plane, plane->crtc, plane->state->fb,
- plane->state->crtc_x, plane->state->crtc_y,
- plane->state->crtc_w, plane->state->crtc_h,
- plane->state->src_x, plane->state->src_y,
- plane->state->src_w, plane->state->src_h);
+out:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ return ret;
}
static const uint32_t ilk_plane_formats[] = {
@@ -1172,9 +1121,9 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->pipe = pipe;
intel_plane->plane = plane;
+ intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe);
intel_plane->check_plane = intel_check_sprite_plane;
intel_plane->commit_plane = intel_commit_sprite_plane;
- intel_plane->ckey.flags = I915_SET_COLORKEY_NONE;
possible_crtcs = (1 << pipe);
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,
@@ -1189,6 +1138,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
- out:
+out:
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 8b9d325bda3c..0568ae6ec9dd 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1509,7 +1509,7 @@ out:
}
static const struct drm_connector_funcs intel_tv_connector_funcs = {
- .dpms = intel_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_tv_detect,
.destroy = intel_tv_destroy,
.set_property = intel_tv_set_property,
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index a6d8a3ee7750..9d3c2e420d2b 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1274,10 +1274,12 @@ int i915_reg_read_ioctl(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reg_read *reg = data;
struct register_whitelist const *entry = whitelist;
+ unsigned size;
+ u64 offset;
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
- if (entry->offset == reg->offset &&
+ if (entry->offset == (reg->offset & -entry->size) &&
(1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
break;
}
@@ -1285,23 +1287,33 @@ int i915_reg_read_ioctl(struct drm_device *dev,
if (i == ARRAY_SIZE(whitelist))
return -EINVAL;
+ /* We use the low bits to encode extra flags as the register should
+ * be naturally aligned (and those that are not so aligned merely
+ * limit the available flags for that register).
+ */
+ offset = entry->offset;
+ size = entry->size;
+ size |= reg->offset ^ offset;
+
intel_runtime_pm_get(dev_priv);
- switch (entry->size) {
+ switch (size) {
+ case 8 | 1:
+ reg->val = I915_READ64_2x32(offset, offset+4);
+ break;
case 8:
- reg->val = I915_READ64(reg->offset);
+ reg->val = I915_READ64(offset);
break;
case 4:
- reg->val = I915_READ(reg->offset);
+ reg->val = I915_READ(offset);
break;
case 2:
- reg->val = I915_READ16(reg->offset);
+ reg->val = I915_READ16(offset);
break;
case 1:
- reg->val = I915_READ8(reg->offset);
+ reg->val = I915_READ8(offset);
break;
default:
- MISSING_CASE(entry->size);
ret = -EINVAL;
goto out;
}
@@ -1455,20 +1467,80 @@ static int gen6_do_reset(struct drm_device *dev)
return ret;
}
-int intel_gpu_reset(struct drm_device *dev)
+static int wait_for_register(struct drm_i915_private *dev_priv,
+ const u32 reg,
+ const u32 mask,
+ const u32 value,
+ const unsigned long timeout_ms)
+{
+ return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
+}
+
+static int gen8_do_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *engine;
+ int i;
+
+ for_each_ring(engine, dev_priv, i) {
+ I915_WRITE(RING_RESET_CTL(engine->mmio_base),
+ _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
+
+ if (wait_for_register(dev_priv,
+ RING_RESET_CTL(engine->mmio_base),
+ RESET_CTL_READY_TO_RESET,
+ RESET_CTL_READY_TO_RESET,
+ 700)) {
+ DRM_ERROR("%s: reset request timeout\n", engine->name);
+ goto not_ready;
+ }
+ }
+
+ return gen6_do_reset(dev);
+
+not_ready:
+ for_each_ring(engine, dev_priv, i)
+ I915_WRITE(RING_RESET_CTL(engine->mmio_base),
+ _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+
+ return -EIO;
+}
+
+static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
{
- if (INTEL_INFO(dev)->gen >= 6)
- return gen6_do_reset(dev);
+ if (!i915.reset)
+ return NULL;
+
+ if (INTEL_INFO(dev)->gen >= 8)
+ return gen8_do_reset;
+ else if (INTEL_INFO(dev)->gen >= 6)
+ return gen6_do_reset;
else if (IS_GEN5(dev))
- return ironlake_do_reset(dev);
+ return ironlake_do_reset;
else if (IS_G4X(dev))
- return g4x_do_reset(dev);
+ return g4x_do_reset;
else if (IS_G33(dev))
- return g33_do_reset(dev);
+ return g33_do_reset;
else if (INTEL_INFO(dev)->gen >= 3)
- return i915_do_reset(dev);
+ return i915_do_reset;
else
+ return NULL;
+}
+
+int intel_gpu_reset(struct drm_device *dev)
+{
+ int (*reset)(struct drm_device *);
+
+ reset = intel_get_gpu_reset(dev);
+ if (reset == NULL)
return -ENODEV;
+
+ return reset(dev);
+}
+
+bool intel_has_gpu_reset(struct drm_device *dev)
+{
+ return intel_get_gpu_reset(dev) != NULL;
}
void intel_uncore_check_errors(struct drm_device *dev)
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index a3ecf1069b76..644edf65dbe0 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -75,6 +75,11 @@ static const struct dw_hdmi_curr_ctrl imx_cur_ctr[] = {
},
};
+/*
+ * Resistance term 133Ohm Cfg
+ * PREEMP config 0.00
+ * TX/CK level 10
+ */
static const struct dw_hdmi_phy_config imx_phy_config[] = {
/*pixelclk symbol term vlev */
{ 148500000, 0x800d, 0x0005, 0x01ad},
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 214eceefc981..e671ad369416 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -301,7 +301,7 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
switch (tve->mode) {
case TVE_MODE_VGA:
- imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_YUV8_1X24,
+ imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24,
tve->hsync_pin, tve->vsync_pin);
break;
case TVE_MODE_TVOUT:
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 74a9ce40ddc4..b4deb9cf9d71 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -21,6 +21,7 @@
#include <drm/drm_panel.h>
#include <linux/videodev2.h>
#include <video/of_display_timing.h>
+#include <linux/of_graph.h>
#include "imx-drm.h"
@@ -208,7 +209,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = data;
struct device_node *np = dev->of_node;
- struct device_node *panel_node;
+ struct device_node *port;
const u8 *edidp;
struct imx_parallel_display *imxpd;
int ret;
@@ -234,11 +235,19 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
}
- panel_node = of_parse_phandle(np, "fsl,panel", 0);
- if (panel_node) {
- imxpd->panel = of_drm_find_panel(panel_node);
- if (!imxpd->panel)
- return -EPROBE_DEFER;
+ /* port@1 is the output port */
+ port = of_graph_get_port_by_id(np, 1);
+ if (port) {
+ struct device_node *endpoint, *remote;
+
+ endpoint = of_get_child_by_name(port, "endpoint");
+ if (endpoint) {
+ remote = of_graph_get_remote_port_parent(endpoint);
+ if (remote)
+ imxpd->panel = of_drm_find_panel(remote);
+ if (!imxpd->panel)
+ return -EPROBE_DEFER;
+ }
}
imxpd->dev = dev;
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 9f9780b7ddf0..4f2068fe5d88 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -70,18 +70,22 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev);
BUG_ON(pixels_current == pixels_prev);
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj)
+ return -ENOENT;
+
ret = mgag200_bo_reserve(pixels_1, true);
if (ret) {
WREG8(MGA_CURPOSXL, 0);
WREG8(MGA_CURPOSXH, 0);
- return ret;
+ goto out_unref;
}
ret = mgag200_bo_reserve(pixels_2, true);
if (ret) {
WREG8(MGA_CURPOSXL, 0);
WREG8(MGA_CURPOSXH, 0);
mgag200_bo_unreserve(pixels_1);
- return ret;
+ goto out_unreserve1;
}
if (!handle) {
@@ -106,16 +110,6 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
}
}
- mutex_lock(&dev->struct_mutex);
- obj = drm_gem_object_lookup(dev, file_priv, handle);
- if (!obj) {
- mutex_unlock(&dev->struct_mutex);
- ret = -ENOENT;
- goto out1;
- }
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-
bo = gem_to_mga_bo(obj);
ret = mgag200_bo_reserve(bo, true);
if (ret) {
@@ -252,7 +246,11 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
if (ret)
mga_hide_cursor(mdev);
mgag200_bo_unreserve(pixels_1);
+out_unreserve1:
mgag200_bo_unreserve(pixels_2);
+out_unref:
+ drm_gem_object_unreference_unlocked(obj);
+
return ret;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 97745991544d..b0af77454d52 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -35,6 +35,7 @@ static const struct pci_device_id pciidlist[] = {
{ PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB },
{ PCI_VENDOR_ID_MATROX, 0x533, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH },
{ PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER },
+ { PCI_VENDOR_ID_MATROX, 0x536, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EW3 },
{0,}
};
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index e9eea1d4e7c3..912151c36d59 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -180,6 +180,7 @@ enum mga_type {
G200_EV,
G200_EH,
G200_ER,
+ G200_EW3,
};
#define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index c36b8304042b..87de15ea1f93 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -101,7 +101,7 @@ static void mga_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
struct mga_fbdev *mfbdev = info->par;
- sys_fillrect(info, rect);
+ drm_fb_helper_sys_fillrect(info, rect);
mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width,
rect->height);
}
@@ -110,7 +110,7 @@ static void mga_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
struct mga_fbdev *mfbdev = info->par;
- sys_copyarea(info, area);
+ drm_fb_helper_sys_copyarea(info, area);
mga_dirty_update(mfbdev, area->dx, area->dy, area->width,
area->height);
}
@@ -119,7 +119,7 @@ static void mga_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct mga_fbdev *mfbdev = info->par;
- sys_imageblit(info, image);
+ drm_fb_helper_sys_imageblit(info, image);
mga_dirty_update(mfbdev, image->dx, image->dy, image->width,
image->height);
}
@@ -166,8 +166,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_gem_object *gobj = NULL;
- struct device *device = &dev->pdev->dev;
- struct mgag200_bo *bo;
int ret;
void *sysram;
int size;
@@ -185,15 +183,14 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
DRM_ERROR("failed to create fbcon backing object %d\n", ret);
return ret;
}
- bo = gem_to_mga_bo(gobj);
sysram = vmalloc(size);
if (!sysram)
return -ENOMEM;
- info = framebuffer_alloc(0, device);
- if (info == NULL)
- return -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
info->par = mfbdev;
@@ -208,14 +205,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
/* setup helper */
mfbdev->helper.fb = fb;
- mfbdev->helper.fbdev = info;
-
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
- ret = -ENOMEM;
- goto out;
- }
strcpy(info->fix.id, "mgadrmfb");
@@ -223,11 +212,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
info->fbops = &mgag200fb_ops;
/* setup aperture base/size for vesafb takeover */
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto out;
- }
info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
info->apertures->ranges[0].size = mdev->mc.vram_size;
@@ -242,24 +226,15 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
DRM_DEBUG_KMS("allocated %dx%d\n",
fb->width, fb->height);
return 0;
-out:
- return ret;
}
static int mga_fbdev_destroy(struct drm_device *dev,
struct mga_fbdev *mfbdev)
{
- struct fb_info *info;
struct mga_framebuffer *mfb = &mfbdev->mfb;
- if (mfbdev->helper.fbdev) {
- info = mfbdev->helper.fbdev;
-
- unregister_framebuffer(info);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&mfbdev->helper);
+ drm_fb_helper_release_fbi(&mfbdev->helper);
if (mfb->obj) {
drm_gem_object_unreference_unlocked(mfb->obj);
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index d3dcf54e6233..10535e3b75f2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -101,6 +101,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
case G200_SE_B:
case G200_EV:
case G200_WB:
+ case G200_EW3:
data = 1;
clock = 2;
break;
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index f6b283b8375e..de06388069e7 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -82,12 +82,19 @@ static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
int orig;
int test1, test2;
int orig1, orig2;
+ unsigned int vram_size;
/* Probe */
orig = ioread16(mem);
iowrite16(0, mem);
- for (offset = 0x100000; offset < mdev->mc.vram_window; offset += 0x4000) {
+ vram_size = mdev->mc.vram_window;
+
+ if ((mdev->type == G200_EW3) && (vram_size >= 0x1000000)) {
+ vram_size = vram_size - 0x400000;
+ }
+
+ for (offset = 0x100000; offset < vram_size; offset += 0x4000) {
orig1 = ioread8(mem + offset);
orig2 = ioread8(mem + offset + 0x100);
@@ -345,23 +352,15 @@ mgag200_dumb_mmap_offset(struct drm_file *file,
uint64_t *offset)
{
struct drm_gem_object *obj;
- int ret;
struct mgag200_bo *bo;
- mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file, handle);
- if (obj == NULL) {
- ret = -ENOENT;
- goto out_unlock;
- }
+ if (obj == NULL)
+ return -ENOENT;
bo = gem_to_mga_bo(obj);
*offset = mgag200_bo_mmap_offset(bo);
- drm_gem_object_unreference(obj);
- ret = 0;
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
-
+ drm_gem_object_unreference_unlocked(obj);
+ return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index ad4b9010dfb0..c99d3fe12881 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -104,6 +104,8 @@ static bool mga_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
+#define P_ARRAY_SIZE 9
+
static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
{
unsigned int vcomax, vcomin, pllreffreq;
@@ -111,37 +113,97 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
unsigned int testp, testm, testn;
unsigned int p, m, n;
unsigned int computed;
+ unsigned int pvalues_e4[P_ARRAY_SIZE] = {16, 14, 12, 10, 8, 6, 4, 2, 1};
+ unsigned int fvv;
+ unsigned int i;
+
+ if (mdev->unique_rev_id <= 0x03) {
+
+ m = n = p = 0;
+ vcomax = 320000;
+ vcomin = 160000;
+ pllreffreq = 25000;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 8; testp > 0; testp /= 2) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testn = 17; testn < 256; testn++) {
+ for (testm = 1; testm < 32; testm++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = testm - 1;
+ n = testn - 1;
+ p = testp - 1;
+ }
+ }
+ }
+ }
+ } else {
- m = n = p = 0;
- vcomax = 320000;
- vcomin = 160000;
- pllreffreq = 25000;
- delta = 0xffffffff;
- permitteddelta = clock * 5 / 1000;
+ m = n = p = 0;
+ vcomax = 1600000;
+ vcomin = 800000;
+ pllreffreq = 25000;
- for (testp = 8; testp > 0; testp /= 2) {
- if (clock * testp > vcomax)
- continue;
- if (clock * testp < vcomin)
- continue;
+ if (clock < 25000)
+ clock = 25000;
- for (testn = 17; testn < 256; testn++) {
- for (testm = 1; testm < 32; testm++) {
- computed = (pllreffreq * testn) /
- (testm * testp);
- if (computed > clock)
- tmpdelta = computed - clock;
- else
- tmpdelta = clock - computed;
- if (tmpdelta < delta) {
- delta = tmpdelta;
- m = testm - 1;
- n = testn - 1;
- p = testp - 1;
+ clock = clock * 2;
+
+ delta = 0xFFFFFFFF;
+ /* Permited delta is 0.5% as VESA Specification */
+ permitteddelta = clock * 5 / 1000;
+
+ for (i = 0 ; i < P_ARRAY_SIZE ; i++) {
+ testp = pvalues_e4[i];
+
+ if ((clock * testp) > vcomax)
+ continue;
+ if ((clock * testp) < vcomin)
+ continue;
+
+ for (testn = 50; testn <= 256; testn++) {
+ for (testm = 1; testm <= 32; testm++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = testm - 1;
+ n = testn - 1;
+ p = testp - 1;
+ }
}
}
}
+
+ fvv = pllreffreq * testn / testm;
+ fvv = (fvv - 800000) / 50000;
+
+ if (fvv > 15)
+ fvv = 15;
+
+ p |= (fvv << 4);
+ m |= 0x80;
+
+ clock = clock / 2;
}
if (delta > permitteddelta) {
@@ -158,8 +220,8 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
{
unsigned int vcomax, vcomin, pllreffreq;
- unsigned int delta, tmpdelta, permitteddelta;
- unsigned int testp, testm, testn;
+ unsigned int delta, tmpdelta;
+ unsigned int testp, testm, testn, testp2;
unsigned int p, m, n;
unsigned int computed;
int i, j, tmpcount, vcount;
@@ -167,32 +229,71 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
u8 tmp;
m = n = p = 0;
- vcomax = 550000;
- vcomin = 150000;
- pllreffreq = 48000;
delta = 0xffffffff;
- permitteddelta = clock * 5 / 1000;
- for (testp = 1; testp < 9; testp++) {
- if (clock * testp > vcomax)
- continue;
- if (clock * testp < vcomin)
- continue;
+ if (mdev->type == G200_EW3) {
+
+ vcomax = 800000;
+ vcomin = 400000;
+ pllreffreq = 25000;
+
+ for (testp = 1; testp < 8; testp++) {
+ for (testp2 = 1; testp2 < 8; testp2++) {
+ if (testp < testp2)
+ continue;
+ if ((clock * testp * testp2) > vcomax)
+ continue;
+ if ((clock * testp * testp2) < vcomin)
+ continue;
+ for (testm = 1; testm < 26; testm++) {
+ for (testn = 32; testn < 2048 ; testn++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp * testp2);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = ((testn & 0x100) >> 1) |
+ (testm);
+ n = (testn & 0xFF);
+ p = ((testn & 0x600) >> 3) |
+ (testp2 << 3) |
+ (testp);
+ }
+ }
+ }
+ }
+ }
+ } else {
- for (testm = 1; testm < 17; testm++) {
- for (testn = 1; testn < 151; testn++) {
- computed = (pllreffreq * testn) /
- (testm * testp);
- if (computed > clock)
- tmpdelta = computed - clock;
- else
- tmpdelta = clock - computed;
- if (tmpdelta < delta) {
- delta = tmpdelta;
- n = testn - 1;
- m = (testm - 1) | ((n >> 1) & 0x80);
- p = testp - 1;
+ vcomax = 550000;
+ vcomin = 150000;
+ pllreffreq = 48000;
+
+ for (testp = 1; testp < 9; testp++) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testm = 1; testm < 17; testm++) {
+ for (testn = 1; testn < 151; testn++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn - 1;
+ m = (testm - 1) |
+ ((n >> 1) & 0x80);
+ p = testp - 1;
+ }
}
}
}
@@ -298,7 +399,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
{
unsigned int vcomax, vcomin, pllreffreq;
- unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int delta, tmpdelta;
unsigned int testp, testm, testn;
unsigned int p, m, n;
unsigned int computed;
@@ -310,7 +411,6 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
pllreffreq = 50000;
delta = 0xffffffff;
- permitteddelta = clock * 5 / 1000;
for (testp = 16; testp > 0; testp--) {
if (clock * testp > vcomax)
@@ -392,7 +492,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
{
unsigned int vcomax, vcomin, pllreffreq;
- unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int delta, tmpdelta;
unsigned int testp, testm, testn;
unsigned int p, m, n;
unsigned int computed;
@@ -406,7 +506,6 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
pllreffreq = 33333;
delta = 0xffffffff;
- permitteddelta = clock * 5 / 1000;
for (testp = 16; testp > 0; testp >>= 1) {
if (clock * testp > vcomax)
@@ -572,6 +671,7 @@ static int mga_crtc_set_plls(struct mga_device *mdev, long clock)
return mga_g200se_set_plls(mdev, clock);
break;
case G200_WB:
+ case G200_EW3:
return mga_g200wb_set_plls(mdev, clock);
break;
case G200_EV:
@@ -823,6 +923,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
option2 = 0x00008000;
break;
case G200_WB:
+ case G200_EW3:
dacvalue[MGA1064_VREF_CTL] = 0x07;
option = 0x41049120;
option2 = 0x0000b000;
@@ -878,7 +979,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
if (IS_G200_SE(mdev) &&
((i == 0x2c) || (i == 0x2d) || (i == 0x2e)))
continue;
- if ((mdev->type == G200_EV || mdev->type == G200_WB || mdev->type == G200_EH) &&
+ if ((mdev->type == G200_EV ||
+ mdev->type == G200_WB ||
+ mdev->type == G200_EH ||
+ mdev->type == G200_EW3) &&
(i >= 0x44) && (i <= 0x4e))
continue;
@@ -980,7 +1084,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
else
ext_vga[3] = ((1 << bppshift) - 1) | 0x80;
ext_vga[4] = 0;
- if (mdev->type == G200_WB)
+ if (mdev->type == G200_WB || mdev->type == G200_EW3)
ext_vga[1] |= 0x88;
/* Set pixel clocks */
@@ -996,6 +1100,9 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
if (mdev->type == G200_ER)
WREG_ECRT(0x24, 0x5);
+ if (mdev->type == G200_EW3)
+ WREG_ECRT(0x34, 0x5);
+
if (mdev->type == G200_EV) {
WREG_ECRT(6, 0);
}
@@ -1208,7 +1315,7 @@ static void mga_crtc_prepare(struct drm_crtc *crtc)
WREG_SEQ(1, tmp | 0x20);
}
- if (mdev->type == G200_WB)
+ if (mdev->type == G200_WB || mdev->type == G200_EW3)
mga_g200wb_prepare(crtc);
WREG_CRT(17, 0);
@@ -1225,7 +1332,7 @@ static void mga_crtc_commit(struct drm_crtc *crtc)
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
u8 tmp;
- if (mdev->type == G200_WB)
+ if (mdev->type == G200_WB || mdev->type == G200_EW3)
mga_g200wb_commit(crtc);
if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
@@ -1495,7 +1602,7 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
if (mga_vga_calculate_mode_bandwidth(mode, bpp)
> (24400 * 1024))
return MODE_BANDWIDTH;
- } else if (mdev->unique_rev_id >= 0x02) {
+ } else if (mdev->unique_rev_id == 0x02) {
if (mode->hdisplay > 1920)
return MODE_VIRTUAL_X;
if (mode->vdisplay > 1200)
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index d16964ea0ed4..05108b505fbf 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -378,7 +378,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
int mgag200_bo_unpin(struct mgag200_bo *bo)
{
- int i, ret;
+ int i;
if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo);
return 0;
@@ -389,11 +389,7 @@ int mgag200_bo_unpin(struct mgag200_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
- if (ret)
- return ret;
-
- return 0;
+ return ttm_bo_validate(&bo->bo, &bo->placement, false, false);
}
int mgag200_bo_push_sysram(struct mgag200_bo *bo)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 08ba8d0d93f5..8e6c7c638e24 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -9,6 +9,7 @@ config DRM_MSM
select DRM_PANEL
select SHMEM
select TMPFS
+ select QCOM_SCM
default y
help
DRM/KMS driver for MSM/snapdragon.
@@ -53,3 +54,17 @@ config DRM_MSM_DSI_PLL
help
Choose this option to enable DSI PLL driver which provides DSI
source clocks under common clock framework.
+
+config DRM_MSM_DSI_28NM_PHY
+ bool "Enable DSI 28nm PHY driver in MSM DRM"
+ depends on DRM_MSM_DSI
+ default y
+ help
+ Choose this option if the 28nm DSI PHY is used on the platform.
+
+config DRM_MSM_DSI_20NM_PHY
+ bool "Enable DSI 20nm PHY driver in MSM DRM"
+ depends on DRM_MSM_DSI
+ default y
+ help
+ Choose this option if the 20nm DSI PHY is used on the platform.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 16a81b94d6f0..0a543eb5e5d7 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,5 +1,5 @@
ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
-ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
+ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
msm-y := \
adreno/adreno_device.o \
@@ -10,6 +10,7 @@ msm-y := \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
hdmi/hdmi_connector.o \
+ hdmi/hdmi_hdcp.o \
hdmi/hdmi_i2c.o \
hdmi/hdmi_phy_8960.o \
hdmi/hdmi_phy_8x60.o \
@@ -53,12 +54,18 @@ msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
+ dsi/dsi_cfg.o \
dsi/dsi_host.o \
dsi/dsi_manager.o \
- dsi/dsi_phy.o \
+ dsi/phy/dsi_phy.o \
mdp/mdp5/mdp5_cmd_encoder.o
-msm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \
- dsi/pll/dsi_pll_28nm.o
+msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
+msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
+
+ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
+msm-y += dsi/pll/dsi_pll.o
+msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
+endif
obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 23176e402796..0261f0d31612 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,15 +8,15 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19)
-
-Copyright (C) 2013-2014 by the following authors:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06)
+
+Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 1c599e5cf318..48d133711487 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,13 +8,13 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -326,6 +326,13 @@ enum a3xx_tex_type {
A3XX_TEX_3D = 3,
};
+enum a3xx_tex_msaa {
+ A3XX_TPL1_MSAA1X = 0,
+ A3XX_TPL1_MSAA2X = 1,
+ A3XX_TPL1_MSAA4X = 2,
+ A3XX_TPL1_MSAA8X = 3,
+};
+
#define A3XX_INT0_RBBM_GPU_IDLE 0x00000001
#define A3XX_INT0_RBBM_AHB_ERROR 0x00000002
#define A3XX_INT0_RBBM_REG_TIMEOUT 0x00000004
@@ -2652,6 +2659,7 @@ static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
#define REG_A3XX_VGT_IMMED_DATA 0x000021fd
#define REG_A3XX_TEX_SAMP_0 0x00000000
+#define A3XX_TEX_SAMP_0_CLAMPENABLE 0x00000001
#define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002
#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c
#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2
@@ -2695,6 +2703,7 @@ static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val
{
return ((val) << A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT) & A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK;
}
+#define A3XX_TEX_SAMP_0_CUBEMAPSEAMLESSFILTOFF 0x01000000
#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
#define REG_A3XX_TEX_SAMP_1 0x00000001
@@ -2750,6 +2759,12 @@ static inline uint32_t A3XX_TEX_CONST_0_MIPLVLS(uint32_t val)
{
return ((val) << A3XX_TEX_CONST_0_MIPLVLS__SHIFT) & A3XX_TEX_CONST_0_MIPLVLS__MASK;
}
+#define A3XX_TEX_CONST_0_MSAATEX__MASK 0x00300000
+#define A3XX_TEX_CONST_0_MSAATEX__SHIFT 20
+static inline uint32_t A3XX_TEX_CONST_0_MSAATEX(enum a3xx_tex_msaa val)
+{
+ return ((val) << A3XX_TEX_CONST_0_MSAATEX__SHIFT) & A3XX_TEX_CONST_0_MSAATEX__MASK;
+}
#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000
#define A3XX_TEX_CONST_0_FMT__SHIFT 22
static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
@@ -2785,7 +2800,7 @@ static inline uint32_t A3XX_TEX_CONST_1_FETCHSIZE(enum a3xx_tex_fetchsize val)
}
#define REG_A3XX_TEX_CONST_2 0x00000002
-#define A3XX_TEX_CONST_2_INDX__MASK 0x000000ff
+#define A3XX_TEX_CONST_2_INDX__MASK 0x000001ff
#define A3XX_TEX_CONST_2_INDX__SHIFT 0
static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val)
{
@@ -2805,7 +2820,7 @@ static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
}
#define REG_A3XX_TEX_CONST_3 0x00000003
-#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x00007fff
+#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x0001ffff
#define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT 0
static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val)
{
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 3f06ecf62583..ac55066db3b0 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,13 +8,13 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -227,6 +227,7 @@ enum a4xx_depth_format {
DEPTH4_NONE = 0,
DEPTH4_16 = 1,
DEPTH4_24_8 = 2,
+ DEPTH4_32 = 3,
};
enum a4xx_tess_spacing {
@@ -429,7 +430,7 @@ static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
}
#define A4XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00002000
-#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0x007fc000
+#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xffffc000
#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 14
static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
{
@@ -439,7 +440,7 @@ static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
static inline uint32_t REG_A4XX_RB_MRT_BASE(uint32_t i0) { return 0x000020a6 + 0x5*i0; }
static inline uint32_t REG_A4XX_RB_MRT_CONTROL3(uint32_t i0) { return 0x000020a7 + 0x5*i0; }
-#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK 0x0001fff8
+#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK 0x03fffff8
#define A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT 3
static inline uint32_t A4XX_RB_MRT_CONTROL3_STRIDE(uint32_t val)
{
@@ -570,6 +571,15 @@ static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val)
return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK;
}
+#define REG_A4XX_RB_SAMPLE_COUNT_CONTROL 0x000020fa
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK 0xfffffffc
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT 2
+static inline uint32_t A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR(uint32_t val)
+{
+ return ((val >> 2) << A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT) & A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK;
+}
+
#define REG_A4XX_RB_RENDER_COMPONENTS 0x000020fb
#define A4XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f
#define A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0
@@ -811,6 +821,23 @@ static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v
#define REG_A4XX_RB_STENCIL_CONTROL2 0x00002107
#define A4XX_RB_STENCIL_CONTROL2_STENCIL_BUFFER 0x00000001
+#define REG_A4XX_RB_STENCIL_INFO 0x00002108
+#define A4XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff000
+#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 12
+static inline uint32_t A4XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
+{
+ return ((val >> 12) << A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
+}
+
+#define REG_A4XX_RB_STENCIL_PITCH 0x00002109
+#define A4XX_RB_STENCIL_PITCH__MASK 0xffffffff
+#define A4XX_RB_STENCIL_PITCH__SHIFT 0
+static inline uint32_t A4XX_RB_STENCIL_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_STENCIL_PITCH__SHIFT) & A4XX_RB_STENCIL_PITCH__MASK;
+}
+
#define REG_A4XX_RB_STENCILREFMASK 0x0000210b
#define A4XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
#define A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
@@ -1433,6 +1460,7 @@ static inline uint32_t A4XX_SP_FS_MRT_REG_MRTFORMAT(enum a4xx_color_fmt val)
{
return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK;
}
+#define A4XX_SP_FS_MRT_REG_COLOR_SRGB 0x00040000
#define REG_A4XX_SP_CS_CTRL_REG0 0x00002300
@@ -1470,6 +1498,76 @@ static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A4XX_SP_HS_LENGTH_REG 0x00002312
+#define REG_A4XX_SP_DS_PARAM_REG 0x0000231a
+#define A4XX_SP_DS_PARAM_REG_POSREGID__MASK 0x000000ff
+#define A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT 0
+static inline uint32_t A4XX_SP_DS_PARAM_REG_POSREGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_DS_PARAM_REG_POSREGID__MASK;
+}
+#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK 0xfff00000
+#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT 20
+static inline uint32_t A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_DS_OUT(uint32_t i0) { return 0x0000231b + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000231b + 0x1*i0; }
+#define A4XX_SP_DS_OUT_REG_A_REGID__MASK 0x000001ff
+#define A4XX_SP_DS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A4XX_SP_DS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_A_REGID__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK 0x00001e00
+#define A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT 9
+static inline uint32_t A4XX_SP_DS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_B_REGID__MASK 0x01ff0000
+#define A4XX_SP_DS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A4XX_SP_DS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_B_REGID__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK 0x1e000000
+#define A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT 25
+static inline uint32_t A4XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_DS_VPC_DST(uint32_t i0) { return 0x0000232c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000232c + 0x1*i0; }
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
#define REG_A4XX_SP_DS_OBJ_OFFSET_REG 0x00002334
#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
@@ -1492,6 +1590,82 @@ static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A4XX_SP_DS_LENGTH_REG 0x00002339
+#define REG_A4XX_SP_GS_PARAM_REG 0x00002341
+#define A4XX_SP_GS_PARAM_REG_POSREGID__MASK 0x000000ff
+#define A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT 0
+static inline uint32_t A4XX_SP_GS_PARAM_REG_POSREGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_POSREGID__MASK;
+}
+#define A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK 0x0000ff00
+#define A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT 8
+static inline uint32_t A4XX_SP_GS_PARAM_REG_PRIMREGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK;
+}
+#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK 0xfff00000
+#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT 20
+static inline uint32_t A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_GS_OUT(uint32_t i0) { return 0x00002342 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_GS_OUT_REG(uint32_t i0) { return 0x00002342 + 0x1*i0; }
+#define A4XX_SP_GS_OUT_REG_A_REGID__MASK 0x000001ff
+#define A4XX_SP_GS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A4XX_SP_GS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_A_REGID__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK 0x00001e00
+#define A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT 9
+static inline uint32_t A4XX_SP_GS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_B_REGID__MASK 0x01ff0000
+#define A4XX_SP_GS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A4XX_SP_GS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_B_REGID__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK 0x1e000000
+#define A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT 25
+static inline uint32_t A4XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_GS_VPC_DST(uint32_t i0) { return 0x00002353 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x00002353 + 0x1*i0; }
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
#define REG_A4XX_SP_GS_OBJ_OFFSET_REG 0x0000235b
#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
@@ -1693,6 +1867,18 @@ static inline uint32_t A4XX_VFD_CONTROL_3_REGID_VTXCNT(uint32_t val)
{
return ((val) << A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT) & A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK;
}
+#define A4XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
+#define A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
+static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSX__MASK;
+}
+#define A4XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000
+#define A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24
+static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSY__MASK;
+}
#define REG_A4XX_VFD_CONTROL_4 0x00002204
@@ -2489,6 +2675,8 @@ static inline uint32_t A4XX_UNKNOWN_20F7(float val)
#define REG_A4XX_UNKNOWN_22D7 0x000022d7
+#define REG_A4XX_UNKNOWN_2352 0x00002352
+
#define REG_A4XX_TEX_SAMP_0 0x00000000
#define A4XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
#define A4XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 9562a1fa552b..399a9e528139 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,15 +8,15 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19)
-
-Copyright (C) 2013-2014 by the following authors:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06)
+
+Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index bd5b23bf9041..41904fed1350 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,13 +8,13 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63785 bytes, from 2015-08-14 18:27:06)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -67,7 +67,7 @@ enum vgt_event_type {
enum pc_di_primtype {
DI_PT_NONE = 0,
- DI_PT_POINTLIST_A2XX = 1,
+ DI_PT_POINTLIST_PSIZE = 1,
DI_PT_LINELIST = 2,
DI_PT_LINESTRIP = 3,
DI_PT_TRILIST = 4,
@@ -75,7 +75,7 @@ enum pc_di_primtype {
DI_PT_TRISTRIP = 6,
DI_PT_LINELOOP = 7,
DI_PT_RECTLIST = 8,
- DI_PT_POINTLIST_A3XX = 9,
+ DI_PT_POINTLIST = 9,
DI_PT_LINE_ADJ = 10,
DI_PT_LINESTRIP_ADJ = 11,
DI_PT_TRI_ADJ = 12,
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 1f2561e2ff71..6edcd6f57e70 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -15,10 +15,10 @@
struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
{
- if (!msm_dsi || !msm_dsi->panel)
+ if (!msm_dsi || !msm_dsi_device_connected(msm_dsi))
return NULL;
- return (msm_dsi->panel_flags & MIPI_DSI_MODE_VIDEO) ?
+ return (msm_dsi->device_flags & MIPI_DSI_MODE_VIDEO) ?
msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] :
msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID];
}
@@ -74,19 +74,15 @@ static void dsi_destroy(struct msm_dsi *msm_dsi)
static struct msm_dsi *dsi_init(struct platform_device *pdev)
{
- struct msm_dsi *msm_dsi = NULL;
+ struct msm_dsi *msm_dsi;
int ret;
- if (!pdev) {
- ret = -ENXIO;
- goto fail;
- }
+ if (!pdev)
+ return ERR_PTR(-ENXIO);
msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL);
- if (!msm_dsi) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!msm_dsi)
+ return ERR_PTR(-ENOMEM);
DBG("dsi probed=%p", msm_dsi);
msm_dsi->pdev = pdev;
@@ -95,24 +91,22 @@ static struct msm_dsi *dsi_init(struct platform_device *pdev)
/* Init dsi host */
ret = msm_dsi_host_init(msm_dsi);
if (ret)
- goto fail;
+ goto destroy_dsi;
/* GET dsi PHY */
ret = dsi_get_phy(msm_dsi);
if (ret)
- goto fail;
+ goto destroy_dsi;
/* Register to dsi manager */
ret = msm_dsi_manager_register(msm_dsi);
if (ret)
- goto fail;
+ goto destroy_dsi;
return msm_dsi;
-fail:
- if (msm_dsi)
- dsi_destroy(msm_dsi);
-
+destroy_dsi:
+ dsi_destroy(msm_dsi);
return ERR_PTR(ret);
}
@@ -196,6 +190,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
{
struct msm_drm_private *priv = dev->dev_private;
+ struct drm_bridge *ext_bridge;
int ret, i;
if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] ||
@@ -223,10 +218,25 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
msm_dsi->encoders[i] = encoders[i];
}
- msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id);
+ /*
+ * check if the dsi encoder output is connected to a panel or an
+ * external bridge. We create a connector only if we're connected to a
+ * drm_panel device. When we're connected to an external bridge, we
+ * assume that the drm_bridge driver will create the connector itself.
+ */
+ ext_bridge = msm_dsi_host_get_bridge(msm_dsi->host);
+
+ if (ext_bridge)
+ msm_dsi->connector =
+ msm_dsi_manager_ext_bridge_init(msm_dsi->id);
+ else
+ msm_dsi->connector =
+ msm_dsi_manager_connector_init(msm_dsi->id);
+
if (IS_ERR(msm_dsi->connector)) {
ret = PTR_ERR(msm_dsi->connector);
- dev_err(dev->dev, "failed to create dsi connector: %d\n", ret);
+ dev_err(dev->dev,
+ "failed to create dsi connector: %d\n", ret);
msm_dsi->connector = NULL;
goto fail;
}
@@ -242,10 +252,12 @@ fail:
msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
msm_dsi->bridge = NULL;
}
- if (msm_dsi->connector) {
+
+ /* don't destroy connector if we didn't make it */
+ if (msm_dsi->connector && !msm_dsi->external_bridge)
msm_dsi->connector->funcs->destroy(msm_dsi->connector);
- msm_dsi->connector = NULL;
- }
+
+ msm_dsi->connector = NULL;
}
return ret;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 92d697de4858..5f5a3732cdf6 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -27,21 +27,10 @@
#define DSI_1 1
#define DSI_MAX 2
-#define DSI_CLOCK_MASTER DSI_0
-#define DSI_CLOCK_SLAVE DSI_1
-
-#define DSI_LEFT DSI_0
-#define DSI_RIGHT DSI_1
-
-/* According to the current drm framework sequence, take the encoder of
- * DSI_1 as master encoder
- */
-#define DSI_ENCODER_MASTER DSI_1
-#define DSI_ENCODER_SLAVE DSI_0
-
enum msm_dsi_phy_type {
MSM_DSI_PHY_28NM_HPM,
MSM_DSI_PHY_28NM_LP,
+ MSM_DSI_PHY_20NM,
MSM_DSI_PHY_MAX
};
@@ -65,13 +54,21 @@ struct msm_dsi {
struct drm_device *dev;
struct platform_device *pdev;
+ /* connector managed by us when we're connected to a drm_panel */
struct drm_connector *connector;
+ /* internal dsi bridge attached to MDP interface */
struct drm_bridge *bridge;
struct mipi_dsi_host *host;
struct msm_dsi_phy *phy;
+
+ /*
+ * panel/external_bridge connected to dsi bridge output, only one of the
+ * two can be valid at a time
+ */
struct drm_panel *panel;
- unsigned long panel_flags;
+ struct drm_bridge *external_bridge;
+ unsigned long device_flags;
struct device *phy_dev;
bool phy_enabled;
@@ -86,6 +83,7 @@ struct msm_dsi {
struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
struct drm_connector *msm_dsi_manager_connector_init(u8 id);
+struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
int msm_dsi_manager_phy_enable(int id,
const unsigned long bit_rate, const unsigned long esc_rate,
u32 *clk_pre, u32 *clk_post);
@@ -96,6 +94,11 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
/* msm dsi */
+static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
+{
+ return msm_dsi->panel || msm_dsi->external_bridge;
+}
+
struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi);
/* dsi pll */
@@ -106,6 +109,8 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
void msm_dsi_pll_destroy(struct msm_dsi_pll *pll);
int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
+void msm_dsi_pll_save_state(struct msm_dsi_pll *pll);
+int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll);
#else
static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id) {
@@ -119,6 +124,13 @@ static inline int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
{
return -ENODEV;
}
+static inline void msm_dsi_pll_save_state(struct msm_dsi_pll *pll)
+{
+}
+static inline int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
+{
+ return 0;
+}
#endif
/* dsi host */
@@ -140,6 +152,7 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
struct drm_display_mode *mode);
struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
unsigned long *panel_flags);
+struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
void msm_dsi_host_unregister(struct mipi_dsi_host *host);
int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
@@ -153,9 +166,9 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi);
struct msm_dsi_phy;
void msm_dsi_phy_driver_register(void);
void msm_dsi_phy_driver_unregister(void);
-int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
const unsigned long bit_rate, const unsigned long esc_rate);
-int msm_dsi_phy_disable(struct msm_dsi_phy *phy);
+void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
u32 *clk_pre, u32 *clk_post);
struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 9791ea04bcbc..1d2e32f0817b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -382,6 +382,11 @@ static inline uint32_t DSI_TRIG_CTRL_STREAM(uint32_t val)
#define REG_DSI_TRIG_DMA 0x0000008c
#define REG_DSI_DLN0_PHY_ERR 0x000000b0
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_ESC 0x00000001
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC 0x00000010
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL 0x00000100
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 0x00001000
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1 0x00010000
#define REG_DSI_TIMEOUT_STATUS 0x000000bc
@@ -435,6 +440,9 @@ static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val)
#define REG_DSI_PHY_RESET 0x00000128
#define DSI_PHY_RESET_RESET 0x00000001
+#define REG_DSI_T_CLK_PRE_EXTEND 0x0000017c
+#define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK 0x00000001
+
#define REG_DSI_RDBK_DATA_CTRL 0x000001d0
#define DSI_RDBK_DATA_CTRL_COUNT__MASK 0x00ff0000
#define DSI_RDBK_DATA_CTRL_COUNT__SHIFT 16
@@ -830,6 +838,7 @@ static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
#define REG_DSI_28nm_PHY_BIST_CTRL_5 0x000001c8
#define REG_DSI_28nm_PHY_GLBL_TEST_CTRL 0x000001d4
+#define DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000001
#define REG_DSI_28nm_PHY_LDO_CNTRL 0x000001dc
@@ -994,5 +1003,185 @@ static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(uint32_t val)
#define REG_DSI_28nm_PHY_PLL_CTRL_54 0x000000d4
+static inline uint32_t REG_DSI_20nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_0 0x00000100
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_1 0x00000104
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_2 0x00000108
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_3 0x0000010c
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_4 0x00000110
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_DATAPATH 0x00000114
+
+#define REG_DSI_20nm_PHY_LNCK_DEBUG_SEL 0x00000118
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_STR0 0x0000011c
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_STR1 0x00000120
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_0 0x00000140
+#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_1 0x00000144
+#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_2 0x00000148
+#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_3 0x0000014c
+#define DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8 0x00000001
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_4 0x00000150
+#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_5 0x00000154
+#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_6 0x00000158
+#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_7 0x0000015c
+#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_8 0x00000160
+#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_9 0x00000164
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_10 0x00000168
+#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007
+#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_11 0x0000016c
+#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+#define REG_DSI_20nm_PHY_CTRL_0 0x00000170
+
+#define REG_DSI_20nm_PHY_CTRL_1 0x00000174
+
+#define REG_DSI_20nm_PHY_CTRL_2 0x00000178
+
+#define REG_DSI_20nm_PHY_CTRL_3 0x0000017c
+
+#define REG_DSI_20nm_PHY_CTRL_4 0x00000180
+
+#define REG_DSI_20nm_PHY_STRENGTH_0 0x00000184
+
+#define REG_DSI_20nm_PHY_STRENGTH_1 0x00000188
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_0 0x000001b4
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_1 0x000001b8
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_2 0x000001bc
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_3 0x000001c0
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_4 0x000001c4
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_5 0x000001c8
+
+#define REG_DSI_20nm_PHY_GLBL_TEST_CTRL 0x000001d4
+#define DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000001
+
+#define REG_DSI_20nm_PHY_LDO_CNTRL 0x000001dc
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_0 0x00000000
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_1 0x00000004
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_2 0x00000008
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_3 0x0000000c
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_4 0x00000010
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_5 0x00000014
+
+#define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018
+
#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
new file mode 100644
index 000000000000..5872d5e5934f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi_cfg.h"
+
+/* DSI v2 has not been supported by now */
+static const struct msm_dsi_config dsi_v2_cfg = {
+ .io_offset = 0,
+};
+
+static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .reg_cfg = {
+ .num = 4,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdd", 3000000, 3000000, 150000, 100},
+ {"vdda", 1200000, 1200000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+};
+
+static const struct msm_dsi_config msm8916_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .reg_cfg = {
+ .num = 4,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdd", 2850000, 2850000, 100000, 100},
+ {"vdda", 1200000, 1200000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+};
+
+static const struct msm_dsi_config msm8994_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .reg_cfg = {
+ .num = 7,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdda", 1250000, 1250000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ {"vcca", 1000000, 1000000, 10000, 100},
+ {"vdd", 1800000, 1800000, 100000, 100},
+ {"lab_reg", -1, -1, -1, -1},
+ {"ibb_reg", -1, -1, -1, -1},
+ },
+ }
+};
+
+static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
+ {MSM_DSI_VER_MAJOR_V2, U32_MAX, &dsi_v2_cfg},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
+ &msm8974_apq8084_dsi_cfg},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1,
+ &msm8974_apq8084_dsi_cfg},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1_1,
+ &msm8974_apq8084_dsi_cfg},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_2,
+ &msm8974_apq8084_dsi_cfg},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg},
+};
+
+const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
+{
+ const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
+ int i;
+
+ for (i = ARRAY_SIZE(dsi_cfg_handlers) - 1; i >= 0; i--) {
+ if ((dsi_cfg_handlers[i].major == major) &&
+ (dsi_cfg_handlers[i].minor == minor)) {
+ cfg_hnd = &dsi_cfg_handlers[i];
+ break;
+ }
+ }
+
+ return cfg_hnd;
+}
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
new file mode 100644
index 000000000000..4cf887240177
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_DSI_CFG_H__
+#define __MSM_DSI_CFG_H__
+
+#include "dsi.h"
+
+#define MSM_DSI_VER_MAJOR_V2 0x02
+#define MSM_DSI_VER_MAJOR_6G 0x03
+#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000
+#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000
+#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001
+#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000
+#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
+#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
+
+#define DSI_6G_REG_SHIFT 4
+
+struct msm_dsi_config {
+ u32 io_offset;
+ struct dsi_reg_config reg_cfg;
+};
+
+struct msm_dsi_cfg_handler {
+ u32 major;
+ u32 minor;
+ const struct msm_dsi_config *cfg;
+};
+
+const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor);
+
+#endif /* __MSM_DSI_CFG_H__ */
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index de0400923303..8d82973fe9db 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -20,103 +20,15 @@
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include <video/mipi_display.h>
#include "dsi.h"
#include "dsi.xml.h"
-
-#define MSM_DSI_VER_MAJOR_V2 0x02
-#define MSM_DSI_VER_MAJOR_6G 0x03
-#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000
-#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000
-#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001
-#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000
-#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
-
-#define DSI_6G_REG_SHIFT 4
-
-struct dsi_config {
- u32 major;
- u32 minor;
- u32 io_offset;
- struct dsi_reg_config reg_cfg;
-};
-
-static const struct dsi_config dsi_cfgs[] = {
- {MSM_DSI_VER_MAJOR_V2, 0, 0, {0,} },
- { /* 8974 v1 */
- .major = MSM_DSI_VER_MAJOR_6G,
- .minor = MSM_DSI_6G_VER_MINOR_V1_0,
- .io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 4,
- .regs = {
- {"gdsc", -1, -1, -1, -1},
- {"vdd", 3000000, 3000000, 150000, 100},
- {"vdda", 1200000, 1200000, 100000, 100},
- {"vddio", 1800000, 1800000, 100000, 100},
- },
- },
- },
- { /* 8974 v2 */
- .major = MSM_DSI_VER_MAJOR_6G,
- .minor = MSM_DSI_6G_VER_MINOR_V1_1,
- .io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 4,
- .regs = {
- {"gdsc", -1, -1, -1, -1},
- {"vdd", 3000000, 3000000, 150000, 100},
- {"vdda", 1200000, 1200000, 100000, 100},
- {"vddio", 1800000, 1800000, 100000, 100},
- },
- },
- },
- { /* 8974 v3 */
- .major = MSM_DSI_VER_MAJOR_6G,
- .minor = MSM_DSI_6G_VER_MINOR_V1_1_1,
- .io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 4,
- .regs = {
- {"gdsc", -1, -1, -1, -1},
- {"vdd", 3000000, 3000000, 150000, 100},
- {"vdda", 1200000, 1200000, 100000, 100},
- {"vddio", 1800000, 1800000, 100000, 100},
- },
- },
- },
- { /* 8084 */
- .major = MSM_DSI_VER_MAJOR_6G,
- .minor = MSM_DSI_6G_VER_MINOR_V1_2,
- .io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 4,
- .regs = {
- {"gdsc", -1, -1, -1, -1},
- {"vdd", 3000000, 3000000, 150000, 100},
- {"vdda", 1200000, 1200000, 100000, 100},
- {"vddio", 1800000, 1800000, 100000, 100},
- },
- },
- },
- { /* 8916 */
- .major = MSM_DSI_VER_MAJOR_6G,
- .minor = MSM_DSI_6G_VER_MINOR_V1_3_1,
- .io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 4,
- .regs = {
- {"gdsc", -1, -1, -1, -1},
- {"vdd", 2850000, 2850000, 100000, 100},
- {"vdda", 1200000, 1200000, 100000, 100},
- {"vddio", 1800000, 1800000, 100000, 100},
- },
- },
- },
-};
+#include "dsi_cfg.h"
static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
{
@@ -194,7 +106,7 @@ struct msm_dsi_host {
struct gpio_desc *disp_en_gpio;
struct gpio_desc *te_gpio;
- const struct dsi_config *cfg;
+ const struct msm_dsi_cfg_handler *cfg_hnd;
struct completion dma_comp;
struct completion video_comp;
@@ -212,8 +124,8 @@ struct msm_dsi_host {
struct drm_display_mode *mode;
- /* Panel info */
- struct device_node *panel_node;
+ /* connected device info */
+ struct device_node *device_node;
unsigned int channel;
unsigned int lanes;
enum mipi_dsi_pixel_format format;
@@ -239,61 +151,58 @@ static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
{
- return msm_readl(msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
+ return msm_readl(msm_host->ctrl_base + reg);
}
static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
{
- msm_writel(data, msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
+ msm_writel(data, msm_host->ctrl_base + reg);
}
static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
-static const struct dsi_config *dsi_get_config(struct msm_dsi_host *msm_host)
+static const struct msm_dsi_cfg_handler *dsi_get_config(
+ struct msm_dsi_host *msm_host)
{
- const struct dsi_config *cfg;
+ const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
struct regulator *gdsc_reg;
- int i, ret;
+ int ret;
u32 major = 0, minor = 0;
gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc");
if (IS_ERR(gdsc_reg)) {
pr_err("%s: cannot get gdsc\n", __func__);
- goto fail;
+ goto exit;
}
ret = regulator_enable(gdsc_reg);
if (ret) {
pr_err("%s: unable to enable gdsc\n", __func__);
- regulator_put(gdsc_reg);
- goto fail;
+ goto put_gdsc;
}
ret = clk_prepare_enable(msm_host->ahb_clk);
if (ret) {
pr_err("%s: unable to enable ahb_clk\n", __func__);
- regulator_disable(gdsc_reg);
- regulator_put(gdsc_reg);
- goto fail;
+ goto disable_gdsc;
}
ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
-
- clk_disable_unprepare(msm_host->ahb_clk);
- regulator_disable(gdsc_reg);
- regulator_put(gdsc_reg);
if (ret) {
pr_err("%s: Invalid version\n", __func__);
- goto fail;
+ goto disable_clks;
}
- for (i = 0; i < ARRAY_SIZE(dsi_cfgs); i++) {
- cfg = dsi_cfgs + i;
- if ((cfg->major == major) && (cfg->minor == minor))
- return cfg;
- }
- pr_err("%s: Version %x:%x not support\n", __func__, major, minor);
+ cfg_hnd = msm_dsi_cfg_get(major, minor);
-fail:
- return NULL;
+ DBG("%s: Version %x:%x\n", __func__, major, minor);
+
+disable_clks:
+ clk_disable_unprepare(msm_host->ahb_clk);
+disable_gdsc:
+ regulator_disable(gdsc_reg);
+put_gdsc:
+ regulator_put(gdsc_reg);
+exit:
+ return cfg_hnd;
}
static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
@@ -304,8 +213,8 @@ static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
{
struct regulator_bulk_data *s = msm_host->supplies;
- const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
- int num = msm_host->cfg->reg_cfg.num;
+ const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
+ int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
int i;
DBG("");
@@ -320,8 +229,8 @@ static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
{
struct regulator_bulk_data *s = msm_host->supplies;
- const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
- int num = msm_host->cfg->reg_cfg.num;
+ const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
+ int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
int ret, i;
DBG("");
@@ -354,8 +263,8 @@ fail:
static int dsi_regulator_init(struct msm_dsi_host *msm_host)
{
struct regulator_bulk_data *s = msm_host->supplies;
- const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
- int num = msm_host->cfg->reg_cfg.num;
+ const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
+ int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
int i, ret;
for (i = 0; i < num; i++)
@@ -697,6 +606,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
{
u32 flags = msm_host->mode_flags;
enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
u32 data = 0;
if (!enable) {
@@ -750,8 +660,8 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
- if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
- (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
+ if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
+ (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
@@ -1257,7 +1167,11 @@ static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
- if (status) {
+ if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
+ DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
+ DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
+ DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
+ DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
}
@@ -1359,7 +1273,8 @@ static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
return PTR_ERR(msm_host->disp_en_gpio);
}
- msm_host->te_gpio = devm_gpiod_get(panel_device, "disp-te", GPIOD_IN);
+ msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
+ GPIOD_IN);
if (IS_ERR(msm_host->te_gpio)) {
DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
return PTR_ERR(msm_host->te_gpio);
@@ -1379,7 +1294,7 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
msm_host->format = dsi->format;
msm_host->mode_flags = dsi->mode_flags;
- msm_host->panel_node = dsi->dev.of_node;
+ WARN_ON(dsi->dev.of_node != msm_host->device_node);
/* Some gpios defined in panel DT need to be controlled by host */
ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
@@ -1398,7 +1313,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host,
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- msm_host->panel_node = NULL;
+ msm_host->device_node = NULL;
DBG("id=%d", msm_host->id);
if (msm_host->dev)
@@ -1429,6 +1344,48 @@ static struct mipi_dsi_host_ops dsi_host_ops = {
.transfer = dsi_host_transfer,
};
+static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
+{
+ struct device *dev = &msm_host->pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *endpoint, *device_node;
+ int ret;
+
+ ret = of_property_read_u32(np, "qcom,dsi-host-index", &msm_host->id);
+ if (ret) {
+ dev_err(dev, "%s: host index not specified, ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /*
+ * Get the first endpoint node. In our case, dsi has one output port
+ * to which the panel is connected. Don't return an error if a port
+ * isn't defined. It's possible that there is nothing connected to
+ * the dsi output.
+ */
+ endpoint = of_graph_get_next_endpoint(np, NULL);
+ if (!endpoint) {
+ dev_dbg(dev, "%s: no endpoint\n", __func__);
+ return 0;
+ }
+
+ /* Get panel node from the output port's endpoint data */
+ device_node = of_graph_get_remote_port_parent(endpoint);
+ if (!device_node) {
+ dev_err(dev, "%s: no valid device\n", __func__);
+ of_node_put(endpoint);
+ return -ENODEV;
+ }
+
+ of_node_put(endpoint);
+ of_node_put(device_node);
+
+ msm_host->device_node = device_node;
+
+ return 0;
+}
+
int msm_dsi_host_init(struct msm_dsi *msm_dsi)
{
struct msm_dsi_host *msm_host = NULL;
@@ -1443,15 +1400,13 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
goto fail;
}
- ret = of_property_read_u32(pdev->dev.of_node,
- "qcom,dsi-host-index", &msm_host->id);
+ msm_host->pdev = pdev;
+
+ ret = dsi_host_parse_dt(msm_host);
if (ret) {
- dev_err(&pdev->dev,
- "%s: host index not specified, ret=%d\n",
- __func__, ret);
+ pr_err("%s: failed to parse dt\n", __func__);
goto fail;
}
- msm_host->pdev = pdev;
ret = dsi_clk_init(msm_host);
if (ret) {
@@ -1466,13 +1421,16 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
goto fail;
}
- msm_host->cfg = dsi_get_config(msm_host);
- if (!msm_host->cfg) {
+ msm_host->cfg_hnd = dsi_get_config(msm_host);
+ if (!msm_host->cfg_hnd) {
ret = -EINVAL;
pr_err("%s: get config failed\n", __func__);
goto fail;
}
+ /* fixup base address by io offset */
+ msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
+
ret = dsi_regulator_init(msm_host);
if (ret) {
pr_err("%s: regulator init failed\n", __func__);
@@ -1559,7 +1517,6 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- struct device_node *node;
int ret;
/* Register mipi dsi host */
@@ -1577,14 +1534,13 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
* It makes sure panel is connected when fbcon detects
* connector status and gets the proper display mode to
* create framebuffer.
+ * Don't try to defer if there is nothing connected to the dsi
+ * output
*/
- if (check_defer) {
- node = of_get_child_by_name(msm_host->pdev->dev.of_node,
- "panel");
- if (node) {
- if (!of_drm_find_panel(node))
+ if (check_defer && msm_host->device_node) {
+ if (!of_drm_find_panel(msm_host->device_node))
+ if (!of_drm_find_bridge(msm_host->device_node))
return -EPROBE_DEFER;
- }
}
}
@@ -1663,6 +1619,7 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int data_byte, rx_byte, dlen, end;
int short_response, diff, pkt_size, ret = 0;
char cmd;
@@ -1704,8 +1661,8 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
return -EINVAL;
}
- if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
- (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
+ if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
+ (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
/* Clear the RDBK_DATA registers */
dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
DSI_RDBK_DATA_CTRL_CLR);
@@ -1919,6 +1876,13 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
goto fail_disable_reg;
}
+ ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
+ if (ret) {
+ pr_err("%s: failed to set pinctrl default state, %d\n",
+ __func__, ret);
+ goto fail_disable_clk;
+ }
+
dsi_timing_setup(msm_host);
dsi_sw_reset(msm_host);
dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
@@ -1931,6 +1895,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
return 0;
+fail_disable_clk:
+ dsi_clk_ctrl(msm_host, 0);
fail_disable_reg:
dsi_host_regulator_disable(msm_host);
unlock_ret:
@@ -1953,6 +1919,8 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
if (msm_host->disp_en_gpio)
gpiod_set_value(msm_host->disp_en_gpio, 0);
+ pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
+
msm_dsi_manager_phy_disable(msm_host->id);
dsi_clk_ctrl(msm_host, 0);
@@ -1993,10 +1961,16 @@ struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
struct drm_panel *panel;
- panel = of_drm_find_panel(msm_host->panel_node);
+ panel = of_drm_find_panel(msm_host->device_node);
if (panel_flags)
*panel_flags = msm_host->mode_flags;
return panel;
}
+struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ return of_drm_find_bridge(msm_host->device_node);
+}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 87ac6612b6f8..0455ff75074a 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -14,19 +14,31 @@
#include "msm_kms.h"
#include "dsi.h"
+#define DSI_CLOCK_MASTER DSI_0
+#define DSI_CLOCK_SLAVE DSI_1
+
+#define DSI_LEFT DSI_0
+#define DSI_RIGHT DSI_1
+
+/* According to the current drm framework sequence, take the encoder of
+ * DSI_1 as master encoder
+ */
+#define DSI_ENCODER_MASTER DSI_1
+#define DSI_ENCODER_SLAVE DSI_0
+
struct msm_dsi_manager {
struct msm_dsi *dsi[DSI_MAX];
- bool is_dual_panel;
+ bool is_dual_dsi;
bool is_sync_needed;
- int master_panel_id;
+ int master_dsi_link_id;
};
static struct msm_dsi_manager msm_dsim_glb;
-#define IS_DUAL_PANEL() (msm_dsim_glb.is_dual_panel)
+#define IS_DUAL_DSI() (msm_dsim_glb.is_dual_dsi)
#define IS_SYNC_NEEDED() (msm_dsim_glb.is_sync_needed)
-#define IS_MASTER_PANEL(id) (msm_dsim_glb.master_panel_id == id)
+#define IS_MASTER_DSI_LINK(id) (msm_dsim_glb.master_dsi_link_id == id)
static inline struct msm_dsi *dsi_mgr_get_dsi(int id)
{
@@ -38,23 +50,23 @@ static inline struct msm_dsi *dsi_mgr_get_other_dsi(int id)
return msm_dsim_glb.dsi[(id + 1) % DSI_MAX];
}
-static int dsi_mgr_parse_dual_panel(struct device_node *np, int id)
+static int dsi_mgr_parse_dual_dsi(struct device_node *np, int id)
{
struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
- /* We assume 2 dsi nodes have the same information of dual-panel and
+ /* We assume 2 dsi nodes have the same information of dual-dsi and
* sync-mode, and only one node specifies master in case of dual mode.
*/
- if (!msm_dsim->is_dual_panel)
- msm_dsim->is_dual_panel = of_property_read_bool(
- np, "qcom,dual-panel-mode");
+ if (!msm_dsim->is_dual_dsi)
+ msm_dsim->is_dual_dsi = of_property_read_bool(
+ np, "qcom,dual-dsi-mode");
- if (msm_dsim->is_dual_panel) {
- if (of_property_read_bool(np, "qcom,master-panel"))
- msm_dsim->master_panel_id = id;
+ if (msm_dsim->is_dual_dsi) {
+ if (of_property_read_bool(np, "qcom,master-dsi"))
+ msm_dsim->master_dsi_link_id = id;
if (!msm_dsim->is_sync_needed)
msm_dsim->is_sync_needed = of_property_read_bool(
- np, "qcom,sync-dual-panel");
+ np, "qcom,sync-dual-dsi");
}
return 0;
@@ -68,7 +80,7 @@ static int dsi_mgr_host_register(int id)
struct msm_dsi_pll *src_pll;
int ret;
- if (!IS_DUAL_PANEL()) {
+ if (!IS_DUAL_DSI()) {
ret = msm_dsi_host_register(msm_dsi->host, true);
if (ret)
return ret;
@@ -78,9 +90,9 @@ static int dsi_mgr_host_register(int id)
} else if (!other_dsi) {
ret = 0;
} else {
- struct msm_dsi *mdsi = IS_MASTER_PANEL(id) ?
+ struct msm_dsi *mdsi = IS_MASTER_DSI_LINK(id) ?
msm_dsi : other_dsi;
- struct msm_dsi *sdsi = IS_MASTER_PANEL(id) ?
+ struct msm_dsi *sdsi = IS_MASTER_DSI_LINK(id) ?
other_dsi : msm_dsi;
/* Register slave host first, so that slave DSI device
* has a chance to probe, and do not block the master
@@ -144,28 +156,28 @@ static enum drm_connector_status dsi_mgr_connector_detect(
DBG("id=%d", id);
if (!msm_dsi->panel) {
msm_dsi->panel = msm_dsi_host_get_panel(msm_dsi->host,
- &msm_dsi->panel_flags);
+ &msm_dsi->device_flags);
/* There is only 1 panel in the global panel list
- * for dual panel mode. Therefore slave dsi should get
+ * for dual DSI mode. Therefore slave dsi should get
* the drm_panel instance from master dsi, and
* keep using the panel flags got from the current DSI link.
*/
- if (!msm_dsi->panel && IS_DUAL_PANEL() &&
- !IS_MASTER_PANEL(id) && other_dsi)
+ if (!msm_dsi->panel && IS_DUAL_DSI() &&
+ !IS_MASTER_DSI_LINK(id) && other_dsi)
msm_dsi->panel = msm_dsi_host_get_panel(
other_dsi->host, NULL);
- if (msm_dsi->panel && IS_DUAL_PANEL())
+ if (msm_dsi->panel && IS_DUAL_DSI())
drm_object_attach_property(&connector->base,
connector->dev->mode_config.tile_property, 0);
- /* Set split display info to kms once dual panel is connected
- * to both hosts
+ /* Set split display info to kms once dual DSI panel is
+ * connected to both hosts.
*/
- if (msm_dsi->panel && IS_DUAL_PANEL() &&
+ if (msm_dsi->panel && IS_DUAL_DSI() &&
other_dsi && other_dsi->panel) {
- bool cmd_mode = !(msm_dsi->panel_flags &
+ bool cmd_mode = !(msm_dsi->device_flags &
MIPI_DSI_MODE_VIDEO);
struct drm_encoder *encoder = msm_dsi_get_encoder(
dsi_mgr_get_dsi(DSI_ENCODER_MASTER));
@@ -176,7 +188,7 @@ static enum drm_connector_status dsi_mgr_connector_detect(
kms->funcs->set_split_display(kms, encoder,
slave_enc, cmd_mode);
else
- pr_err("mdp does not support dual panel\n");
+ pr_err("mdp does not support dual DSI\n");
}
}
@@ -273,7 +285,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
if (!num)
return 0;
- if (IS_DUAL_PANEL()) {
+ if (IS_DUAL_DSI()) {
/* report half resolution to user */
dsi_dual_connector_fix_modes(connector);
ret = dsi_dual_connector_tile_init(connector, id);
@@ -328,11 +340,12 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
struct drm_panel *panel = msm_dsi->panel;
- bool is_dual_panel = IS_DUAL_PANEL();
+ bool is_dual_dsi = IS_DUAL_DSI();
int ret;
DBG("id=%d", id);
- if (!panel || (is_dual_panel && (DSI_1 == id)))
+ if (!msm_dsi_device_connected(msm_dsi) ||
+ (is_dual_dsi && (DSI_1 == id)))
return;
ret = msm_dsi_host_power_on(host);
@@ -341,7 +354,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
goto host_on_fail;
}
- if (is_dual_panel && msm_dsi1) {
+ if (is_dual_dsi && msm_dsi1) {
ret = msm_dsi_host_power_on(msm_dsi1->host);
if (ret) {
pr_err("%s: power on host1 failed, %d\n",
@@ -353,10 +366,13 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
/* Always call panel functions once, because even for dual panels,
* there is only one drm_panel instance.
*/
- ret = drm_panel_prepare(panel);
- if (ret) {
- pr_err("%s: prepare panel %d failed, %d\n", __func__, id, ret);
- goto panel_prep_fail;
+ if (panel) {
+ ret = drm_panel_prepare(panel);
+ if (ret) {
+ pr_err("%s: prepare panel %d failed, %d\n", __func__,
+ id, ret);
+ goto panel_prep_fail;
+ }
}
ret = msm_dsi_host_enable(host);
@@ -365,7 +381,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
goto host_en_fail;
}
- if (is_dual_panel && msm_dsi1) {
+ if (is_dual_dsi && msm_dsi1) {
ret = msm_dsi_host_enable(msm_dsi1->host);
if (ret) {
pr_err("%s: enable host1 failed, %d\n", __func__, ret);
@@ -373,23 +389,27 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
}
}
- ret = drm_panel_enable(panel);
- if (ret) {
- pr_err("%s: enable panel %d failed, %d\n", __func__, id, ret);
- goto panel_en_fail;
+ if (panel) {
+ ret = drm_panel_enable(panel);
+ if (ret) {
+ pr_err("%s: enable panel %d failed, %d\n", __func__, id,
+ ret);
+ goto panel_en_fail;
+ }
}
return;
panel_en_fail:
- if (is_dual_panel && msm_dsi1)
+ if (is_dual_dsi && msm_dsi1)
msm_dsi_host_disable(msm_dsi1->host);
host1_en_fail:
msm_dsi_host_disable(host);
host_en_fail:
- drm_panel_unprepare(panel);
+ if (panel)
+ drm_panel_unprepare(panel);
panel_prep_fail:
- if (is_dual_panel && msm_dsi1)
+ if (is_dual_dsi && msm_dsi1)
msm_dsi_host_power_off(msm_dsi1->host);
host1_on_fail:
msm_dsi_host_power_off(host);
@@ -414,37 +434,44 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
struct drm_panel *panel = msm_dsi->panel;
- bool is_dual_panel = IS_DUAL_PANEL();
+ bool is_dual_dsi = IS_DUAL_DSI();
int ret;
DBG("id=%d", id);
- if (!panel || (is_dual_panel && (DSI_1 == id)))
+ if (!msm_dsi_device_connected(msm_dsi) ||
+ (is_dual_dsi && (DSI_1 == id)))
return;
- ret = drm_panel_disable(panel);
- if (ret)
- pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, ret);
+ if (panel) {
+ ret = drm_panel_disable(panel);
+ if (ret)
+ pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
+ ret);
+ }
ret = msm_dsi_host_disable(host);
if (ret)
pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
- if (is_dual_panel && msm_dsi1) {
+ if (is_dual_dsi && msm_dsi1) {
ret = msm_dsi_host_disable(msm_dsi1->host);
if (ret)
pr_err("%s: host1 disable failed, %d\n", __func__, ret);
}
- ret = drm_panel_unprepare(panel);
- if (ret)
- pr_err("%s: Panel %d unprepare failed,%d\n", __func__, id, ret);
+ if (panel) {
+ ret = drm_panel_unprepare(panel);
+ if (ret)
+ pr_err("%s: Panel %d unprepare failed,%d\n", __func__,
+ id, ret);
+ }
ret = msm_dsi_host_power_off(host);
if (ret)
pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
- if (is_dual_panel && msm_dsi1) {
+ if (is_dual_dsi && msm_dsi1) {
ret = msm_dsi_host_power_off(msm_dsi1->host);
if (ret)
pr_err("%s: host1 power off failed, %d\n",
@@ -460,7 +487,7 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
struct mipi_dsi_host *host = msm_dsi->host;
- bool is_dual_panel = IS_DUAL_PANEL();
+ bool is_dual_dsi = IS_DUAL_DSI();
DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
mode->base.id, mode->name,
@@ -471,11 +498,11 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
- if (is_dual_panel && (DSI_1 == id))
+ if (is_dual_dsi && (DSI_1 == id))
return;
msm_dsi_host_set_display_mode(host, adjusted_mode);
- if (is_dual_panel && other_dsi)
+ if (is_dual_dsi && other_dsi)
msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode);
}
@@ -503,7 +530,7 @@ static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
.mode_set = dsi_mgr_bridge_mode_set,
};
-/* initialize connector */
+/* initialize connector when we're connected to a drm_panel */
struct drm_connector *msm_dsi_manager_connector_init(u8 id)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
@@ -588,6 +615,53 @@ fail:
return ERR_PTR(ret);
}
+struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_device *dev = msm_dsi->dev;
+ struct drm_encoder *encoder;
+ struct drm_bridge *int_bridge, *ext_bridge;
+ struct drm_connector *connector;
+ struct list_head *connector_list;
+
+ int_bridge = msm_dsi->bridge;
+ ext_bridge = msm_dsi->external_bridge =
+ msm_dsi_host_get_bridge(msm_dsi->host);
+
+ /*
+ * HACK: we may not know the external DSI bridge device's mode
+ * flags here. We'll get to know them only when the device
+ * attaches to the dsi host. For now, assume the bridge supports
+ * DSI video mode
+ */
+ encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID];
+
+ /* link the internal dsi bridge to the external bridge */
+ int_bridge->next = ext_bridge;
+ /* set the external bridge's encoder as dsi's encoder */
+ ext_bridge->encoder = encoder;
+
+ drm_bridge_attach(dev, ext_bridge);
+
+ /*
+ * we need the drm_connector created by the external bridge
+ * driver (or someone else) to feed it to our driver's
+ * priv->connector[] list, mainly for msm_fbdev_init()
+ */
+ connector_list = &dev->mode_config.connector_list;
+
+ list_for_each_entry(connector, connector_list, head) {
+ int i;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == encoder->base.id)
+ return connector;
+ }
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
{
}
@@ -598,12 +672,29 @@ int msm_dsi_manager_phy_enable(int id,
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi_phy *phy = msm_dsi->phy;
+ int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id;
+ struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy);
int ret;
- ret = msm_dsi_phy_enable(phy, IS_DUAL_PANEL(), bit_rate, esc_rate);
+ ret = msm_dsi_phy_enable(phy, src_pll_id, bit_rate, esc_rate);
if (ret)
return ret;
+ /*
+ * Reset DSI PHY silently changes its PLL registers to reset status,
+ * which will confuse clock driver and result in wrong output rate of
+ * link clocks. Restore PLL status if its PLL is being used as clock
+ * source.
+ */
+ if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER)) {
+ ret = msm_dsi_pll_restore_state(pll);
+ if (ret) {
+ pr_err("%s: failed to restore pll state\n", __func__);
+ msm_dsi_phy_disable(phy);
+ return ret;
+ }
+ }
+
msm_dsi->phy_enabled = true;
msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post);
@@ -616,13 +707,18 @@ void msm_dsi_manager_phy_disable(int id)
struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
struct msm_dsi_phy *phy = msm_dsi->phy;
+ struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy);
+
+ /* Save PLL status if it is a clock source */
+ if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER))
+ msm_dsi_pll_save_state(pll);
/* disable DSI phy
* In dual-dsi configuration, the phy should be disabled for the
* first controller only when the second controller is disabled.
*/
msm_dsi->phy_enabled = false;
- if (IS_DUAL_PANEL() && mdsi && sdsi) {
+ if (IS_DUAL_DSI() && mdsi && sdsi) {
if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
msm_dsi_phy_disable(sdsi->phy);
msm_dsi_phy_disable(mdsi->phy);
@@ -713,9 +809,9 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
msm_dsim->dsi[id] = msm_dsi;
- ret = dsi_mgr_parse_dual_panel(msm_dsi->pdev->dev.of_node, id);
+ ret = dsi_mgr_parse_dual_dsi(msm_dsi->pdev->dev.of_node, id);
if (ret) {
- pr_err("%s: failed to parse dual panel info\n", __func__);
+ pr_err("%s: failed to parse dual DSI info\n", __func__);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 728152f3ef48..5de505e627be 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
-
-Copyright (C) 2013-2014 by the following authors:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
+
+Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 2d3b33ce1cc5..401ff58d6893 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -12,142 +12,8 @@
*/
#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-#include "dsi.h"
-#include "dsi.xml.h"
-
-#define dsi_phy_read(offset) msm_readl((offset))
-#define dsi_phy_write(offset, data) msm_writel((data), (offset))
-
-struct dsi_phy_ops {
- int (*enable)(struct msm_dsi_phy *phy, bool is_dual_panel,
- const unsigned long bit_rate, const unsigned long esc_rate);
- int (*disable)(struct msm_dsi_phy *phy);
-};
-
-struct dsi_phy_cfg {
- enum msm_dsi_phy_type type;
- struct dsi_reg_config reg_cfg;
- struct dsi_phy_ops ops;
-};
-
-struct dsi_dphy_timing {
- u32 clk_pre;
- u32 clk_post;
- u32 clk_zero;
- u32 clk_trail;
- u32 clk_prepare;
- u32 hs_exit;
- u32 hs_zero;
- u32 hs_prepare;
- u32 hs_trail;
- u32 hs_rqst;
- u32 ta_go;
- u32 ta_sure;
- u32 ta_get;
-};
-
-struct msm_dsi_phy {
- struct platform_device *pdev;
- void __iomem *base;
- void __iomem *reg_base;
- int id;
-
- struct clk *ahb_clk;
- struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
-
- struct dsi_dphy_timing timing;
- const struct dsi_phy_cfg *cfg;
-
- struct msm_dsi_pll *pll;
-};
-
-static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
-{
- struct regulator_bulk_data *s = phy->supplies;
- const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
- struct device *dev = &phy->pdev->dev;
- int num = phy->cfg->reg_cfg.num;
- int i, ret;
-
- for (i = 0; i < num; i++)
- s[i].supply = regs[i].name;
-
- ret = devm_regulator_bulk_get(&phy->pdev->dev, num, s);
- if (ret < 0) {
- dev_err(dev, "%s: failed to init regulator, ret=%d\n",
- __func__, ret);
- return ret;
- }
-
- for (i = 0; i < num; i++) {
- if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
- ret = regulator_set_voltage(s[i].consumer,
- regs[i].min_voltage, regs[i].max_voltage);
- if (ret < 0) {
- dev_err(dev,
- "regulator %d set voltage failed, %d\n",
- i, ret);
- return ret;
- }
- }
- }
-
- return 0;
-}
-
-static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
-{
- struct regulator_bulk_data *s = phy->supplies;
- const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
- int num = phy->cfg->reg_cfg.num;
- int i;
-
- DBG("");
- for (i = num - 1; i >= 0; i--)
- if (regs[i].disable_load >= 0)
- regulator_set_load(s[i].consumer,
- regs[i].disable_load);
-
- regulator_bulk_disable(num, s);
-}
-
-static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
-{
- struct regulator_bulk_data *s = phy->supplies;
- const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
- struct device *dev = &phy->pdev->dev;
- int num = phy->cfg->reg_cfg.num;
- int ret, i;
-
- DBG("");
- for (i = 0; i < num; i++) {
- if (regs[i].enable_load >= 0) {
- ret = regulator_set_load(s[i].consumer,
- regs[i].enable_load);
- if (ret < 0) {
- dev_err(dev,
- "regulator %d set op mode failed, %d\n",
- i, ret);
- goto fail;
- }
- }
- }
-
- ret = regulator_bulk_enable(num, s);
- if (ret < 0) {
- dev_err(dev, "regulator enable failed, %d\n", ret);
- goto fail;
- }
-
- return 0;
-
-fail:
- for (i--; i >= 0; i--)
- regulator_set_load(s[i].consumer, regs[i].disable_load);
- return ret;
-}
+#include "dsi_phy.h"
#define S_DIV_ROUND_UP(n, d) \
(((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
@@ -156,6 +22,7 @@ static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
s32 min_result, bool even)
{
s32 v;
+
v = (tmax - tmin) * percent;
v = S_DIV_ROUND_UP(v, 100) + tmin;
if (even && (v & 0x1))
@@ -164,7 +31,7 @@ static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
return max_t(s32, min_result, v);
}
-static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing,
+static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
s32 ui, s32 coeff, s32 pcnt)
{
s32 tmax, tmin, clk_z;
@@ -186,7 +53,7 @@ static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing,
timing->clk_zero = clk_z + 8 - temp;
}
-static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing,
+int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
const unsigned long bit_rate, const unsigned long esc_rate)
{
s32 ui, lpx;
@@ -256,9 +123,8 @@ static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing,
temp += 8 * ui + lpx;
tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
if (tmin > tmax) {
- temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false) >> 1;
+ temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
timing->clk_pre = temp >> 1;
- temp = (2 * tmax - tmin) * pcnt2;
} else {
timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
}
@@ -276,130 +142,119 @@ static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing,
return 0;
}
-static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
+ u32 bit_mask)
{
- void __iomem *base = phy->reg_base;
+ int phy_id = phy->id;
+ u32 val;
- if (!enable) {
- dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+ if ((phy_id >= DSI_MAX) || (pll_id >= DSI_MAX))
return;
- }
- dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
- dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
- dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
- dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
- dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+ val = dsi_phy_read(phy->base + reg);
+
+ if (phy->cfg->src_pll_truthtable[phy_id][pll_id])
+ dsi_phy_write(phy->base + reg, val | bit_mask);
+ else
+ dsi_phy_write(phy->base + reg, val & (~bit_mask));
}
-static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
- const unsigned long bit_rate, const unsigned long esc_rate)
+static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
{
- struct dsi_dphy_timing *timing = &phy->timing;
- int i;
- void __iomem *base = phy->base;
+ struct regulator_bulk_data *s = phy->supplies;
+ const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
+ struct device *dev = &phy->pdev->dev;
+ int num = phy->cfg->reg_cfg.num;
+ int i, ret;
- DBG("");
+ for (i = 0; i < num; i++)
+ s[i].supply = regs[i].name;
- if (dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
- pr_err("%s: D-PHY timing calculation failed\n", __func__);
- return -EINVAL;
+ ret = devm_regulator_bulk_get(dev, num, s);
+ if (ret < 0) {
+ dev_err(dev, "%s: failed to init regulator, ret=%d\n",
+ __func__, ret);
+ return ret;
}
- dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
-
- dsi_28nm_phy_regulator_ctrl(phy, true);
-
- dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
-
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
- DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
- DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
- DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
- if (timing->clk_zero & BIT(8))
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
- DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
- DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
- DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
- DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
- DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
- DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
- DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
- DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
- DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
- dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
- DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
-
- dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
- dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
-
- dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
-
- for (i = 0; i < 4; i++) {
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
+ for (i = 0; i < num; i++) {
+ if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
+ ret = regulator_set_voltage(s[i].consumer,
+ regs[i].min_voltage, regs[i].max_voltage);
+ if (ret < 0) {
+ dev_err(dev,
+ "regulator %d set voltage failed, %d\n",
+ i, ret);
+ return ret;
+ }
+ }
}
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
- dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
+ return 0;
+}
- dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
+{
+ struct regulator_bulk_data *s = phy->supplies;
+ const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
+ int num = phy->cfg->reg_cfg.num;
+ int i;
- if (is_dual_panel && (phy->id != DSI_CLOCK_MASTER))
- dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x00);
- else
- dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x01);
+ DBG("");
+ for (i = num - 1; i >= 0; i--)
+ if (regs[i].disable_load >= 0)
+ regulator_set_load(s[i].consumer, regs[i].disable_load);
- return 0;
+ regulator_bulk_disable(num, s);
}
-static int dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
+static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
{
- dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
- dsi_28nm_phy_regulator_ctrl(phy, false);
+ struct regulator_bulk_data *s = phy->supplies;
+ const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
+ struct device *dev = &phy->pdev->dev;
+ int num = phy->cfg->reg_cfg.num;
+ int ret, i;
- /*
- * Wait for the registers writes to complete in order to
- * ensure that the phy is completely disabled
- */
- wmb();
+ DBG("");
+ for (i = 0; i < num; i++) {
+ if (regs[i].enable_load >= 0) {
+ ret = regulator_set_load(s[i].consumer,
+ regs[i].enable_load);
+ if (ret < 0) {
+ dev_err(dev,
+ "regulator %d set op mode failed, %d\n",
+ i, ret);
+ goto fail;
+ }
+ }
+ }
+
+ ret = regulator_bulk_enable(num, s);
+ if (ret < 0) {
+ dev_err(dev, "regulator enable failed, %d\n", ret);
+ goto fail;
+ }
return 0;
+
+fail:
+ for (i--; i >= 0; i--)
+ regulator_set_load(s[i].consumer, regs[i].disable_load);
+ return ret;
}
static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
{
+ struct device *dev = &phy->pdev->dev;
int ret;
- pm_runtime_get_sync(&phy->pdev->dev);
+ pm_runtime_get_sync(dev);
ret = clk_prepare_enable(phy->ahb_clk);
if (ret) {
- pr_err("%s: can't enable ahb clk, %d\n", __func__, ret);
- pm_runtime_put_sync(&phy->pdev->dev);
+ dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
+ pm_runtime_put_sync(dev);
}
return ret;
@@ -411,92 +266,74 @@ static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
pm_runtime_put_sync(&phy->pdev->dev);
}
-static const struct dsi_phy_cfg dsi_phy_cfgs[MSM_DSI_PHY_MAX] = {
- [MSM_DSI_PHY_28NM_HPM] = {
- .type = MSM_DSI_PHY_28NM_HPM,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vddio", 1800000, 1800000, 100000, 100},
- },
- },
- .ops = {
- .enable = dsi_28nm_phy_enable,
- .disable = dsi_28nm_phy_disable,
- }
- },
- [MSM_DSI_PHY_28NM_LP] = {
- .type = MSM_DSI_PHY_28NM_LP,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vddio", 1800000, 1800000, 100000, 100},
- },
- },
- .ops = {
- .enable = dsi_28nm_phy_enable,
- .disable = dsi_28nm_phy_disable,
- }
- },
-};
-
static const struct of_device_id dsi_phy_dt_match[] = {
+#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
{ .compatible = "qcom,dsi-phy-28nm-hpm",
- .data = &dsi_phy_cfgs[MSM_DSI_PHY_28NM_HPM],},
+ .data = &dsi_phy_28nm_hpm_cfgs },
{ .compatible = "qcom,dsi-phy-28nm-lp",
- .data = &dsi_phy_cfgs[MSM_DSI_PHY_28NM_LP],},
+ .data = &dsi_phy_28nm_lp_cfgs },
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
+ { .compatible = "qcom,dsi-phy-20nm",
+ .data = &dsi_phy_20nm_cfgs },
+#endif
{}
};
static int dsi_phy_driver_probe(struct platform_device *pdev)
{
struct msm_dsi_phy *phy;
+ struct device *dev = &pdev->dev;
const struct of_device_id *match;
int ret;
- phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
- match = of_match_node(dsi_phy_dt_match, pdev->dev.of_node);
+ match = of_match_node(dsi_phy_dt_match, dev->of_node);
if (!match)
return -ENODEV;
phy->cfg = match->data;
phy->pdev = pdev;
- ret = of_property_read_u32(pdev->dev.of_node,
+ ret = of_property_read_u32(dev->of_node,
"qcom,dsi-phy-index", &phy->id);
if (ret) {
- dev_err(&pdev->dev,
- "%s: PHY index not specified, ret=%d\n",
+ dev_err(dev, "%s: PHY index not specified, %d\n",
__func__, ret);
goto fail;
}
+ phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
+ "qcom,dsi-phy-regulator-ldo-mode");
+
phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
if (IS_ERR(phy->base)) {
- dev_err(&pdev->dev, "%s: failed to map phy base\n", __func__);
+ dev_err(dev, "%s: failed to map phy base\n", __func__);
ret = -ENOMEM;
goto fail;
}
- phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG");
+
+ phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
+ "DSI_PHY_REG");
if (IS_ERR(phy->reg_base)) {
- dev_err(&pdev->dev,
- "%s: failed to map phy regulator base\n", __func__);
+ dev_err(dev, "%s: failed to map phy regulator base\n",
+ __func__);
ret = -ENOMEM;
goto fail;
}
ret = dsi_phy_regulator_init(phy);
if (ret) {
- dev_err(&pdev->dev, "%s: failed to init regulator\n", __func__);
+ dev_err(dev, "%s: failed to init regulator\n", __func__);
goto fail;
}
- phy->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
+ phy->ahb_clk = devm_clk_get(dev, "iface_clk");
if (IS_ERR(phy->ahb_clk)) {
- pr_err("%s: Unable to get ahb clk\n", __func__);
+ dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
ret = PTR_ERR(phy->ahb_clk);
goto fail;
}
@@ -510,7 +347,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
if (!phy->pll)
- dev_info(&pdev->dev,
+ dev_info(dev,
"%s: pll init failed, need separate pll clk driver\n",
__func__);
@@ -557,9 +394,10 @@ void __exit msm_dsi_phy_driver_unregister(void)
platform_driver_unregister(&dsi_phy_platform_driver);
}
-int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
const unsigned long bit_rate, const unsigned long esc_rate)
{
+ struct device *dev = &phy->pdev->dev;
int ret;
if (!phy || !phy->cfg->ops.enable)
@@ -567,30 +405,37 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
ret = dsi_phy_regulator_enable(phy);
if (ret) {
- dev_err(&phy->pdev->dev, "%s: regulator enable failed, %d\n",
+ dev_err(dev, "%s: regulator enable failed, %d\n",
__func__, ret);
return ret;
}
- return phy->cfg->ops.enable(phy, is_dual_panel, bit_rate, esc_rate);
+ ret = phy->cfg->ops.enable(phy, src_pll_id, bit_rate, esc_rate);
+ if (ret) {
+ dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
+ dsi_phy_regulator_disable(phy);
+ return ret;
+ }
+
+ return 0;
}
-int msm_dsi_phy_disable(struct msm_dsi_phy *phy)
+void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
{
if (!phy || !phy->cfg->ops.disable)
- return -EINVAL;
+ return;
phy->cfg->ops.disable(phy);
- dsi_phy_regulator_disable(phy);
- return 0;
+ dsi_phy_regulator_disable(phy);
}
void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
- u32 *clk_pre, u32 *clk_post)
+ u32 *clk_pre, u32 *clk_post)
{
if (!phy)
return;
+
if (clk_pre)
*clk_pre = phy->timing.clk_pre;
if (clk_post)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
new file mode 100644
index 000000000000..0456b253239f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DSI_PHY_H__
+#define __DSI_PHY_H__
+
+#include <linux/regulator/consumer.h>
+
+#include "dsi.h"
+
+#define dsi_phy_read(offset) msm_readl((offset))
+#define dsi_phy_write(offset, data) msm_writel((data), (offset))
+
+struct msm_dsi_phy_ops {
+ int (*enable)(struct msm_dsi_phy *phy, int src_pll_id,
+ const unsigned long bit_rate, const unsigned long esc_rate);
+ void (*disable)(struct msm_dsi_phy *phy);
+};
+
+struct msm_dsi_phy_cfg {
+ enum msm_dsi_phy_type type;
+ struct dsi_reg_config reg_cfg;
+ struct msm_dsi_phy_ops ops;
+
+ /*
+ * Each cell {phy_id, pll_id} of the truth table indicates
+ * if the source PLL selection bit should be set for each PHY.
+ * Fill default H/W values in illegal cells, eg. cell {0, 1}.
+ */
+ bool src_pll_truthtable[DSI_MAX][DSI_MAX];
+};
+
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
+
+struct msm_dsi_dphy_timing {
+ u32 clk_pre;
+ u32 clk_post;
+ u32 clk_zero;
+ u32 clk_trail;
+ u32 clk_prepare;
+ u32 hs_exit;
+ u32 hs_zero;
+ u32 hs_prepare;
+ u32 hs_trail;
+ u32 hs_rqst;
+ u32 ta_go;
+ u32 ta_sure;
+ u32 ta_get;
+};
+
+struct msm_dsi_phy {
+ struct platform_device *pdev;
+ void __iomem *base;
+ void __iomem *reg_base;
+ int id;
+
+ struct clk *ahb_clk;
+ struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
+
+ struct msm_dsi_dphy_timing timing;
+ const struct msm_dsi_phy_cfg *cfg;
+
+ bool regulator_ldo_mode;
+
+ struct msm_dsi_pll *pll;
+};
+
+/*
+ * PHY internal functions
+ */
+int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
+ const unsigned long bit_rate, const unsigned long esc_rate);
+void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
+ u32 bit_mask);
+
+#endif /* __DSI_PHY_H__ */
+
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
new file mode 100644
index 000000000000..2e9ba118d50a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static void dsi_20nm_dphy_set_timing(struct msm_dsi_phy *phy,
+ struct msm_dsi_dphy_timing *timing)
+{
+ void __iomem *base = phy->base;
+
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_0,
+ DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_1,
+ DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_2,
+ DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+ if (timing->clk_zero & BIT(8))
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_3,
+ DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_4,
+ DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_5,
+ DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_6,
+ DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_7,
+ DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_8,
+ DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_9,
+ DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_10,
+ DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_11,
+ DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+}
+
+static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *base = phy->reg_base;
+
+ if (!enable) {
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+ return;
+ }
+
+ if (phy->regulator_ldo_mode) {
+ dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x1d);
+ return;
+ }
+
+ /* non LDO mode */
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_1, 0x03);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_2, 0x03);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_3, 0x00);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_4, 0x20);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0x01);
+ dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x00);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_0, 0x03);
+}
+
+static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+ const unsigned long bit_rate, const unsigned long esc_rate)
+{
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ int i;
+ void __iomem *base = phy->base;
+ u32 cfg_4[4] = {0x20, 0x40, 0x20, 0x00};
+
+ DBG("");
+
+ if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+ dev_err(&phy->pdev->dev,
+ "%s: D-PHY timing calculation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ dsi_20nm_phy_regulator_ctrl(phy, true);
+
+ dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_0, 0xff);
+
+ msm_dsi_phy_set_src_pll(phy, src_pll_id,
+ REG_DSI_20nm_PHY_GLBL_TEST_CTRL,
+ DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL);
+
+ for (i = 0; i < 4; i++) {
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_3(i),
+ (i >> 1) * 0x40);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_0(i), 0x01);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_1(i), 0x46);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_0(i), 0x02);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_1(i), 0xa0);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_4(i), cfg_4[i]);
+ }
+
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_3, 0x80);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR0, 0x01);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR1, 0x46);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_0, 0x00);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_1, 0xa0);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_2, 0x00);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_4, 0x00);
+
+ dsi_20nm_dphy_set_timing(phy, timing);
+
+ dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_1, 0x00);
+
+ dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_1, 0x06);
+
+ /* make sure everything is written before enable */
+ wmb();
+ dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_0, 0x7f);
+
+ return 0;
+}
+
+static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ dsi_phy_write(phy->base + REG_DSI_20nm_PHY_CTRL_0, 0);
+ dsi_20nm_phy_regulator_ctrl(phy, false);
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
+ .type = MSM_DSI_PHY_20NM,
+ .src_pll_truthtable = { {false, true}, {false, true} },
+ .reg_cfg = {
+ .num = 2,
+ .regs = {
+ {"vddio", 1800000, 1800000, 100000, 100},
+ {"vcca", 1000000, 1000000, 10000, 100},
+ },
+ },
+ .ops = {
+ .enable = dsi_20nm_phy_enable,
+ .disable = dsi_20nm_phy_disable,
+ }
+};
+
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
new file mode 100644
index 000000000000..f1a7c7b46420
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
+ struct msm_dsi_dphy_timing *timing)
+{
+ void __iomem *base = phy->base;
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
+ DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
+ DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
+ DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+ if (timing->clk_zero & BIT(8))
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
+ DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
+ DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
+ DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
+ DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
+ DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
+ DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
+ DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
+ DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
+ DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+}
+
+static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *base = phy->reg_base;
+
+ if (!enable) {
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+ return;
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+}
+
+static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+ const unsigned long bit_rate, const unsigned long esc_rate)
+{
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ int i;
+ void __iomem *base = phy->base;
+
+ DBG("");
+
+ if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+ dev_err(&phy->pdev->dev,
+ "%s: D-PHY timing calculation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
+
+ dsi_28nm_phy_regulator_ctrl(phy, true);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
+
+ dsi_28nm_dphy_set_timing(phy, timing);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
+
+ for (i = 0; i < 4; i++) {
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
+ }
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+ msm_dsi_phy_set_src_pll(phy, src_pll_id,
+ REG_DSI_28nm_PHY_GLBL_TEST_CTRL,
+ DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL);
+
+ return 0;
+}
+
+static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
+ dsi_28nm_phy_regulator_ctrl(phy, false);
+
+ /*
+ * Wait for the registers writes to complete in order to
+ * ensure that the phy is completely disabled
+ */
+ wmb();
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
+ .type = MSM_DSI_PHY_28NM_HPM,
+ .src_pll_truthtable = { {true, true}, {false, true} },
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ .ops = {
+ .enable = dsi_28nm_phy_enable,
+ .disable = dsi_28nm_phy_disable,
+ },
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
+ .type = MSM_DSI_PHY_28NM_LP,
+ .src_pll_truthtable = { {true, true}, {true, true} },
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ .ops = {
+ .enable = dsi_28nm_phy_enable,
+ .disable = dsi_28nm_phy_disable,
+ },
+};
+
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index 509376fdd112..5104fc9f9a53 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -72,31 +72,14 @@ long msm_dsi_pll_helper_clk_round_rate(struct clk_hw *hw,
int msm_dsi_pll_helper_clk_prepare(struct clk_hw *hw)
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
- int ret;
-
- /*
- * Certain PLLs need to update the same VCO rate and registers
- * after resume in suspend/resume scenario.
- */
- if (pll->restore_state) {
- ret = pll->restore_state(pll);
- if (ret)
- goto error;
- }
- ret = dsi_pll_enable(pll);
-
-error:
- return ret;
+ return dsi_pll_enable(pll);
}
void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw)
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
- if (pll->save_state)
- pll->save_state(pll);
-
dsi_pll_disable(pll);
}
@@ -134,6 +117,29 @@ void msm_dsi_pll_destroy(struct msm_dsi_pll *pll)
pll->destroy(pll);
}
+void msm_dsi_pll_save_state(struct msm_dsi_pll *pll)
+{
+ if (pll->save_state) {
+ pll->save_state(pll);
+ pll->state_saved = true;
+ }
+}
+
+int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
+{
+ int ret;
+
+ if (pll->restore_state && pll->state_saved) {
+ ret = pll->restore_state(pll);
+ if (ret)
+ return ret;
+
+ pll->state_saved = false;
+ }
+
+ return 0;
+}
+
struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id)
{
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index 5a3bb241c039..063caa2c5740 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -27,6 +27,7 @@ struct msm_dsi_pll {
struct clk_hw clk_hw;
bool pll_on;
+ bool state_saved;
unsigned long min_rate;
unsigned long max_rate;
@@ -82,8 +83,16 @@ void msm_dsi_pll_helper_unregister_clks(struct platform_device *pdev,
/*
* Initialization for Each PLL Type
*/
+#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id);
+#else
+static inline struct msm_dsi_pll *msm_dsi_pll_28nm_init(
+ struct platform_device *pdev, enum msm_dsi_phy_type type, int id)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
#endif /* __DSI_PLL_H__ */
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index eb8ac3097ff5..598fdaff0a41 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -455,7 +455,7 @@ static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)
cached_state->postdiv1 =
pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
cached_state->byte_mux = pll_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
- cached_state->vco_rate = __clk_get_rate(pll->clk_hw.clk);
+ cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
}
static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
@@ -465,26 +465,21 @@ static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
void __iomem *base = pll_28nm->mmio;
int ret;
- if ((cached_state->vco_rate != 0) &&
- (cached_state->vco_rate == __clk_get_rate(pll->clk_hw.clk))) {
- ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
- cached_state->vco_rate, 0);
- if (ret) {
- dev_err(&pll_28nm->pdev->dev,
- "restore vco rate failed. ret=%d\n", ret);
- return ret;
- }
-
- pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
- cached_state->postdiv3);
- pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
- cached_state->postdiv1);
- pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
- cached_state->byte_mux);
-
- cached_state->vco_rate = 0;
+ ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
+ cached_state->vco_rate, 0);
+ if (ret) {
+ dev_err(&pll_28nm->pdev->dev,
+ "restore vco rate failed. ret=%d\n", ret);
+ return ret;
}
+ pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
+ cached_state->postdiv3);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
+ cached_state->postdiv1);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
+ cached_state->byte_mux);
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 26f268e2dd3d..06cbddfc914f 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
-
-Copyright (C) 2013 by the following authors:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
+
+Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index f9c71dceb5e2..bef1d65fe28c 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 7991069dd492..81200e9be382 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -373,7 +373,7 @@ static int edp_gpio_config(struct edp_ctrl *ctrl)
struct device *dev = &ctrl->pdev->dev;
int ret;
- ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd");
+ ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd", GPIOD_IN);
if (IS_ERR(ctrl->panel_hpd_gpio)) {
ret = PTR_ERR(ctrl->panel_hpd_gpio);
ctrl->panel_hpd_gpio = NULL;
@@ -381,13 +381,7 @@ static int edp_gpio_config(struct edp_ctrl *ctrl)
return ret;
}
- ret = gpiod_direction_input(ctrl->panel_hpd_gpio);
- if (ret) {
- pr_err("%s: Set direction for hpd failed, %d\n", __func__, ret);
- return ret;
- }
-
- ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en");
+ ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en", GPIOD_OUT_LOW);
if (IS_ERR(ctrl->panel_en_gpio)) {
ret = PTR_ERR(ctrl->panel_en_gpio);
ctrl->panel_en_gpio = NULL;
@@ -395,13 +389,6 @@ static int edp_gpio_config(struct edp_ctrl *ctrl)
return ret;
}
- ret = gpiod_direction_output(ctrl->panel_en_gpio, 0);
- if (ret) {
- pr_err("%s: Set direction for panel_en failed, %d\n",
- __func__, ret);
- return ret;
- }
-
DBG("gpio on");
return 0;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 814536202efe..101b324cdeef 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -22,7 +22,9 @@
void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
{
uint32_t ctrl = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
if (power_on) {
ctrl |= HDMI_CTRL_ENABLE;
if (!hdmi->hdmi_mode) {
@@ -37,6 +39,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
}
hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
DBG("HDMI Core: %s, HDMI_CTRL=0x%08x",
power_on ? "Enable" : "Disable", ctrl);
}
@@ -51,6 +54,10 @@ static irqreturn_t hdmi_irq(int irq, void *dev_id)
/* Process DDC: */
hdmi_i2c_irq(hdmi->i2c);
+ /* Process HDCP: */
+ if (hdmi->hdcp_ctrl)
+ hdmi_hdcp_irq(hdmi->hdcp_ctrl);
+
/* TODO audio.. */
return IRQ_HANDLED;
@@ -60,6 +67,15 @@ static void hdmi_destroy(struct hdmi *hdmi)
{
struct hdmi_phy *phy = hdmi->phy;
+ /*
+ * at this point, hpd has been disabled,
+ * after flush workq, it's safe to deinit hdcp
+ */
+ if (hdmi->workq) {
+ flush_workqueue(hdmi->workq);
+ destroy_workqueue(hdmi->workq);
+ }
+ hdmi_hdcp_destroy(hdmi);
if (phy)
phy->funcs->destroy(phy);
@@ -77,6 +93,7 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
{
struct hdmi_platform_config *config = pdev->dev.platform_data;
struct hdmi *hdmi = NULL;
+ struct resource *res;
int i, ret;
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
@@ -87,18 +104,18 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
hdmi->pdev = pdev;
hdmi->config = config;
+ spin_lock_init(&hdmi->reg_lock);
/* not sure about which phy maps to which msm.. probably I miss some */
- if (config->phy_init)
+ if (config->phy_init) {
hdmi->phy = config->phy_init(hdmi);
- else
- hdmi->phy = ERR_PTR(-ENXIO);
- if (IS_ERR(hdmi->phy)) {
- ret = PTR_ERR(hdmi->phy);
- dev_err(&pdev->dev, "failed to load phy: %d\n", ret);
- hdmi->phy = NULL;
- goto fail;
+ if (IS_ERR(hdmi->phy)) {
+ ret = PTR_ERR(hdmi->phy);
+ dev_err(&pdev->dev, "failed to load phy: %d\n", ret);
+ hdmi->phy = NULL;
+ goto fail;
+ }
}
hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI");
@@ -107,6 +124,18 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
goto fail;
}
+ /* HDCP needs physical address of hdmi register */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ config->mmio_name);
+ hdmi->mmio_phy_addr = res->start;
+
+ hdmi->qfprom_mmio = msm_ioremap(pdev,
+ config->qfprom_mmio_name, "HDMI_QFPROM");
+ if (IS_ERR(hdmi->qfprom_mmio)) {
+ dev_info(&pdev->dev, "can't find qfprom resource\n");
+ hdmi->qfprom_mmio = NULL;
+ }
+
hdmi->hpd_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_regs[0]) *
config->hpd_reg_cnt, GFP_KERNEL);
if (!hdmi->hpd_regs) {
@@ -189,6 +218,8 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
hdmi->pwr_clks[i] = clk;
}
+ hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
+
hdmi->i2c = hdmi_i2c_init(hdmi);
if (IS_ERR(hdmi->i2c)) {
ret = PTR_ERR(hdmi->i2c);
@@ -197,6 +228,12 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
goto fail;
}
+ hdmi->hdcp_ctrl = hdmi_hdcp_init(hdmi);
+ if (IS_ERR(hdmi->hdcp_ctrl)) {
+ dev_warn(&pdev->dev, "failed to init hdcp: disabled\n");
+ hdmi->hdcp_ctrl = NULL;
+ }
+
return hdmi;
fail:
@@ -310,7 +347,7 @@ static const char *pwr_clk_names_8x74[] = {"extp_clk", "alt_iface_clk"};
static const char *hpd_clk_names_8x74[] = {"iface_clk", "core_clk", "mdp_core_clk"};
static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0};
-static struct hdmi_platform_config hdmi_tx_8074_config = {
+static struct hdmi_platform_config hdmi_tx_8974_config = {
.phy_init = hdmi_phy_8x74_init,
HDMI_CFG(pwr_reg, 8x74),
HDMI_CFG(hpd_reg, 8x74),
@@ -330,9 +367,21 @@ static struct hdmi_platform_config hdmi_tx_8084_config = {
.hpd_freq = hpd_clk_freq_8x74,
};
+static const char *hpd_reg_names_8x94[] = {};
+
+static struct hdmi_platform_config hdmi_tx_8994_config = {
+ .phy_init = NULL, /* nothing to do for this HDMI PHY 20nm */
+ HDMI_CFG(pwr_reg, 8x74),
+ HDMI_CFG(hpd_reg, 8x94),
+ HDMI_CFG(pwr_clk, 8x74),
+ HDMI_CFG(hpd_clk, 8x74),
+ .hpd_freq = hpd_clk_freq_8x74,
+};
+
static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
{ .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
- { .compatible = "qcom,hdmi-tx-8074", .data = &hdmi_tx_8074_config },
+ { .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config },
{ .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config },
{}
@@ -347,8 +396,7 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char
snprintf(name2, sizeof(name2), "%s-gpio", name);
gpio = of_get_named_gpio(of_node, name2, 0);
if (gpio < 0) {
- dev_err(dev, "failed to get gpio: %s (%d)\n",
- name, gpio);
+ DBG("failed to get gpio: %s (%d)", name, gpio);
gpio = -1;
}
}
@@ -376,6 +424,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
}
hdmi_cfg->mmio_name = "core_physical";
+ hdmi_cfg->qfprom_mmio_name = "qfprom_physical";
hdmi_cfg->ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
hdmi_cfg->hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
@@ -391,7 +440,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
if (cpu_is_apq8064()) {
static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
config.phy_init = hdmi_phy_8960_init;
- config.mmio_name = "hdmi_msm_hdmi_addr";
config.hpd_reg_names = hpd_reg_names;
config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
config.hpd_clk_names = hpd_clk_names;
@@ -404,7 +452,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
} else if (cpu_is_msm8960() || cpu_is_msm8960ab()) {
static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
config.phy_init = hdmi_phy_8960_init;
- config.mmio_name = "hdmi_msm_hdmi_addr";
config.hpd_reg_names = hpd_reg_names;
config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
config.hpd_clk_names = hpd_clk_names;
@@ -419,7 +466,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
"8901_hdmi_mvs", "8901_mpp0"
};
config.phy_init = hdmi_phy_8x60_init;
- config.mmio_name = "hdmi_msm_hdmi_addr";
config.hpd_reg_names = hpd_reg_names;
config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
config.hpd_clk_names = hpd_clk_names;
@@ -430,6 +476,9 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
config.mux_en_gpio = -1;
config.mux_sel_gpio = -1;
}
+ config.mmio_name = "hdmi_msm_hdmi_addr";
+ config.qfprom_mmio_name = "hdmi_msm_qfprom_addr";
+
hdmi_cfg = &config;
#endif
dev->platform_data = hdmi_cfg;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 68fdfb3622a5..d0e663192d01 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -37,6 +37,8 @@ struct hdmi_audio {
int rate;
};
+struct hdmi_hdcp_ctrl;
+
struct hdmi {
struct drm_device *dev;
struct platform_device *pdev;
@@ -51,6 +53,8 @@ struct hdmi {
unsigned long int pixclock;
void __iomem *mmio;
+ void __iomem *qfprom_mmio;
+ phys_addr_t mmio_phy_addr;
struct regulator **hpd_regs;
struct regulator **pwr_regs;
@@ -68,12 +72,25 @@ struct hdmi {
bool hdmi_mode; /* are we in hdmi mode? */
int irq;
+ struct workqueue_struct *workq;
+
+ struct hdmi_hdcp_ctrl *hdcp_ctrl;
+
+ /*
+ * spinlock to protect registers shared by different execution
+ * REG_HDMI_CTRL
+ * REG_HDMI_DDC_ARBITRATION
+ * REG_HDMI_HDCP_INT_CTRL
+ * REG_HDMI_HPD_CTRL
+ */
+ spinlock_t reg_lock;
};
/* platform config data (ie. from DT, or pdata) */
struct hdmi_platform_config {
struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
const char *mmio_name;
+ const char *qfprom_mmio_name;
/* regulators that need to be on for hpd: */
const char **hpd_reg_names;
@@ -109,6 +126,11 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
return msm_readl(hdmi->mmio + reg);
}
+static inline u32 hdmi_qfprom_read(struct hdmi *hdmi, u32 reg)
+{
+ return msm_readl(hdmi->qfprom_mmio + reg);
+}
+
/*
* The phy appears to be different, for example between 8960 and 8x60,
* so split the phy related functions out and load the correct one at
@@ -117,7 +139,6 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
struct hdmi_phy_funcs {
void (*destroy)(struct hdmi_phy *phy);
- void (*reset)(struct hdmi_phy *phy);
void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock);
void (*powerdown)(struct hdmi_phy *phy);
};
@@ -163,4 +184,13 @@ void hdmi_i2c_irq(struct i2c_adapter *i2c);
void hdmi_i2c_destroy(struct i2c_adapter *i2c);
struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi);
+/*
+ * hdcp
+ */
+struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi);
+void hdmi_hdcp_destroy(struct hdmi *hdmi);
+void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+
#endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index e6f034808371..0b1b5586ff35 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -441,6 +441,12 @@ static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
#define REG_HDMI_HDCP_SW_LOWER_AKSV 0x00000288
+#define REG_HDMI_CEC_CTRL 0x0000028c
+
+#define REG_HDMI_CEC_WR_DATA 0x00000290
+
+#define REG_HDMI_CEC_CEC_RETRANSMIT 0x00000294
+
#define REG_HDMI_CEC_STATUS 0x00000298
#define REG_HDMI_CEC_INT 0x0000029c
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index 872485f60134..df232e20c13e 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -203,7 +203,6 @@ int hdmi_audio_update(struct hdmi *hdmi)
audio_config |= HDMI_AUDIO_CFG_FIFO_WATERMARK(4);
audio_config |= HDMI_AUDIO_CFG_ENGINE_ENABLE;
} else {
- hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE);
acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_CONT;
acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SEND;
vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_ENABLE;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index a7a1d8267cf0..92b69ae8caf9 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -100,8 +100,13 @@ static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
hdmi_audio_update(hdmi);
}
- phy->funcs->powerup(phy, hdmi->pixclock);
+ if (phy)
+ phy->funcs->powerup(phy, hdmi->pixclock);
+
hdmi_set_mode(hdmi, true);
+
+ if (hdmi->hdcp_ctrl)
+ hdmi_hdcp_on(hdmi->hdcp_ctrl);
}
static void hdmi_bridge_enable(struct drm_bridge *bridge)
@@ -118,9 +123,14 @@ static void hdmi_bridge_post_disable(struct drm_bridge *bridge)
struct hdmi *hdmi = hdmi_bridge->hdmi;
struct hdmi_phy *phy = hdmi->phy;
+ if (hdmi->hdcp_ctrl)
+ hdmi_hdcp_off(hdmi->hdcp_ctrl);
+
DBG("power down");
hdmi_set_mode(hdmi, false);
- phy->funcs->powerdown(phy);
+
+ if (phy)
+ phy->funcs->powerdown(phy);
if (hdmi->power_on) {
power_off(bridge);
@@ -142,8 +152,6 @@ static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
hdmi->pixclock = mode->clock * 1000;
- hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1;
-
hstart = mode->htotal - mode->hsync_start;
hend = mode->htotal - mode->hsync_start + mode->hdisplay;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 54aa93ff5473..a3b05ae52dae 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -28,6 +28,55 @@ struct hdmi_connector {
};
#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
+static void hdmi_phy_reset(struct hdmi *hdmi)
+{
+ unsigned int val;
+
+ val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ }
+
+ msleep(100);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ }
+}
+
static int gpio_config(struct hdmi *hdmi, bool on)
{
struct device *dev = &hdmi->pdev->dev;
@@ -35,21 +84,25 @@ static int gpio_config(struct hdmi *hdmi, bool on)
int ret;
if (on) {
- ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
- if (ret) {
- dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
- "HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
- goto error1;
+ if (config->ddc_clk_gpio != -1) {
+ ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
+ if (ret) {
+ dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
+ goto error1;
+ }
+ gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
}
- gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
- ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
- if (ret) {
- dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
- "HDMI_DDC_DATA", config->ddc_data_gpio, ret);
- goto error2;
+ if (config->ddc_data_gpio != -1) {
+ ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
+ if (ret) {
+ dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_DDC_DATA", config->ddc_data_gpio, ret);
+ goto error2;
+ }
+ gpio_set_value_cansleep(config->ddc_data_gpio, 1);
}
- gpio_set_value_cansleep(config->ddc_data_gpio, 1);
ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
if (ret) {
@@ -94,8 +147,12 @@ static int gpio_config(struct hdmi *hdmi, bool on)
}
DBG("gpio on");
} else {
- gpio_free(config->ddc_clk_gpio);
- gpio_free(config->ddc_data_gpio);
+ if (config->ddc_clk_gpio != -1)
+ gpio_free(config->ddc_clk_gpio);
+
+ if (config->ddc_data_gpio != -1)
+ gpio_free(config->ddc_data_gpio);
+
gpio_free(config->hpd_gpio);
if (config->mux_en_gpio != -1) {
@@ -126,9 +183,11 @@ error5:
error4:
gpio_free(config->hpd_gpio);
error3:
- gpio_free(config->ddc_data_gpio);
+ if (config->ddc_data_gpio != -1)
+ gpio_free(config->ddc_data_gpio);
error2:
- gpio_free(config->ddc_clk_gpio);
+ if (config->ddc_clk_gpio != -1)
+ gpio_free(config->ddc_clk_gpio);
error1:
return ret;
}
@@ -138,9 +197,9 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
struct hdmi *hdmi = hdmi_connector->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
- struct hdmi_phy *phy = hdmi->phy;
uint32_t hpd_ctrl;
int i, ret;
+ unsigned long flags;
for (i = 0; i < config->hpd_reg_cnt; i++) {
ret = regulator_enable(hdmi->hpd_regs[i]);
@@ -181,7 +240,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
}
hdmi_set_mode(hdmi, false);
- phy->funcs->reset(phy);
+ hdmi_phy_reset(hdmi);
hdmi_set_mode(hdmi, true);
hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
@@ -192,6 +251,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
HDMI_HPD_INT_CTRL_INT_EN);
/* set timeout to 4.1ms (max) for hardware debounce */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
@@ -200,6 +260,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
return 0;
@@ -250,7 +311,6 @@ hotplug_work(struct work_struct *work)
void hdmi_connector_irq(struct drm_connector *connector)
{
struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
- struct msm_drm_private *priv = connector->dev->dev_private;
struct hdmi *hdmi = hdmi_connector->hdmi;
uint32_t hpd_int_status, hpd_int_ctrl;
@@ -274,7 +334,7 @@ void hdmi_connector_irq(struct drm_connector *connector)
hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
- queue_work(priv->wq, &hdmi_connector->hpd_work);
+ queue_work(hdmi->workq, &hdmi_connector->hpd_work);
}
}
@@ -350,6 +410,7 @@ static int hdmi_connector_get_modes(struct drm_connector *connector)
hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
+ hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
drm_mode_connector_update_edid_property(connector, edid);
if (edid) {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
new file mode 100644
index 000000000000..1dc9c34eb0df
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
@@ -0,0 +1,1437 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "hdmi.h"
+#include <linux/qcom_scm.h>
+
+#define HDCP_REG_ENABLE 0x01
+#define HDCP_REG_DISABLE 0x00
+#define HDCP_PORT_ADDR 0x74
+
+#define HDCP_INT_STATUS_MASK ( \
+ HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT | \
+ HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT | \
+ HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_INT | \
+ HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_INT)
+
+#define AUTH_WORK_RETRIES_TIME 100
+#define AUTH_RETRIES_TIME 30
+
+/* QFPROM Registers for HDMI/HDCP */
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_LSB 0x000000F8
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB 0x000000FC
+#define HDCP_KSV_LSB 0x000060D8
+#define HDCP_KSV_MSB 0x000060DC
+
+enum DS_TYPE { /* type of downstream device */
+ DS_UNKNOWN,
+ DS_RECEIVER,
+ DS_REPEATER,
+};
+
+enum hdmi_hdcp_state {
+ HDCP_STATE_NO_AKSV,
+ HDCP_STATE_INACTIVE,
+ HDCP_STATE_AUTHENTICATING,
+ HDCP_STATE_AUTHENTICATED,
+ HDCP_STATE_AUTH_FAILED
+};
+
+struct hdmi_hdcp_reg_data {
+ u32 reg_id;
+ u32 off;
+ char *name;
+ u32 reg_val;
+};
+
+struct hdmi_hdcp_ctrl {
+ struct hdmi *hdmi;
+ u32 auth_retries;
+ bool tz_hdcp;
+ enum hdmi_hdcp_state hdcp_state;
+ struct work_struct hdcp_auth_work;
+ struct work_struct hdcp_reauth_work;
+
+#define AUTH_ABORT_EV 1
+#define AUTH_RESULT_RDY_EV 2
+ unsigned long auth_event;
+ wait_queue_head_t auth_event_queue;
+
+ u32 ksv_fifo_w_index;
+ /*
+ * store aksv from qfprom
+ */
+ u32 aksv_lsb;
+ u32 aksv_msb;
+ bool aksv_valid;
+ u32 ds_type;
+ u32 bksv_lsb;
+ u32 bksv_msb;
+ u8 dev_count;
+ u8 depth;
+ u8 ksv_list[5 * 127];
+ bool max_cascade_exceeded;
+ bool max_dev_exceeded;
+};
+
+static int hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset,
+ u8 *data, u16 data_len)
+{
+ int rc;
+ int retry = 5;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = addr >> 1,
+ .flags = 0,
+ .len = 1,
+ .buf = &offset,
+ }, {
+ .addr = addr >> 1,
+ .flags = I2C_M_RD,
+ .len = data_len,
+ .buf = data,
+ }
+ };
+
+ DBG("Start DDC read");
+retry:
+ rc = i2c_transfer(hdmi->i2c, msgs, 2);
+
+ retry--;
+ if (rc == 2)
+ rc = 0;
+ else if (retry > 0)
+ goto retry;
+ else
+ rc = -EIO;
+
+ DBG("End DDC read %d", rc);
+
+ return rc;
+}
+
+#define HDCP_DDC_WRITE_MAX_BYTE_NUM 32
+
+static int hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset,
+ u8 *data, u16 data_len)
+{
+ int rc;
+ int retry = 10;
+ u8 buf[HDCP_DDC_WRITE_MAX_BYTE_NUM];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = addr >> 1,
+ .flags = 0,
+ .len = 1,
+ }
+ };
+
+ DBG("Start DDC write");
+ if (data_len > (HDCP_DDC_WRITE_MAX_BYTE_NUM - 1)) {
+ pr_err("%s: write size too big\n", __func__);
+ return -ERANGE;
+ }
+
+ buf[0] = offset;
+ memcpy(&buf[1], data, data_len);
+ msgs[0].buf = buf;
+ msgs[0].len = data_len + 1;
+retry:
+ rc = i2c_transfer(hdmi->i2c, msgs, 1);
+
+ retry--;
+ if (rc == 1)
+ rc = 0;
+ else if (retry > 0)
+ goto retry;
+ else
+ rc = -EIO;
+
+ DBG("End DDC write %d", rc);
+
+ return rc;
+}
+
+static int hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg,
+ u32 *pdata, u32 count)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ struct qcom_scm_hdcp_req scm_buf[QCOM_SCM_HDCP_MAX_REQ_CNT];
+ u32 resp, phy_addr, idx = 0;
+ int i, ret = 0;
+
+ WARN_ON(!pdata || !preg || (count == 0));
+
+ if (hdcp_ctrl->tz_hdcp) {
+ phy_addr = (u32)hdmi->mmio_phy_addr;
+
+ while (count) {
+ memset(scm_buf, 0, sizeof(scm_buf));
+ for (i = 0; i < count && i < QCOM_SCM_HDCP_MAX_REQ_CNT;
+ i++) {
+ scm_buf[i].addr = phy_addr + preg[idx];
+ scm_buf[i].val = pdata[idx];
+ idx++;
+ }
+ ret = qcom_scm_hdcp_req(scm_buf, i, &resp);
+
+ if (ret || resp) {
+ pr_err("%s: error: scm_call ret=%d resp=%u\n",
+ __func__, ret, resp);
+ ret = -EINVAL;
+ break;
+ }
+
+ count -= i;
+ }
+ } else {
+ for (i = 0; i < count; i++)
+ hdmi_write(hdmi, preg[i], pdata[i]);
+ }
+
+ return ret;
+}
+
+void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg_val, hdcp_int_status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_INT_CTRL);
+ hdcp_int_status = reg_val & HDCP_INT_STATUS_MASK;
+ if (!hdcp_int_status) {
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+ return;
+ }
+ /* Clear Interrupts */
+ reg_val |= hdcp_int_status << 1;
+ /* Clear AUTH_FAIL_INFO as well */
+ if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT)
+ reg_val |= HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK;
+ hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ DBG("hdcp irq %x", hdcp_int_status);
+
+ if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT) {
+ pr_info("%s:AUTH_SUCCESS_INT received\n", __func__);
+ if (HDCP_STATE_AUTHENTICATING == hdcp_ctrl->hdcp_state) {
+ set_bit(AUTH_RESULT_RDY_EV, &hdcp_ctrl->auth_event);
+ wake_up_all(&hdcp_ctrl->auth_event_queue);
+ }
+ }
+
+ if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT) {
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+ pr_info("%s: AUTH_FAIL_INT rcvd, LINK0_STATUS=0x%08x\n",
+ __func__, reg_val);
+ if (HDCP_STATE_AUTHENTICATED == hdcp_ctrl->hdcp_state)
+ queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work);
+ else if (HDCP_STATE_AUTHENTICATING ==
+ hdcp_ctrl->hdcp_state) {
+ set_bit(AUTH_RESULT_RDY_EV, &hdcp_ctrl->auth_event);
+ wake_up_all(&hdcp_ctrl->auth_event_queue);
+ }
+ }
+}
+
+static int hdmi_hdcp_msleep(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 ms, u32 ev)
+{
+ int rc;
+
+ rc = wait_event_timeout(hdcp_ctrl->auth_event_queue,
+ !!test_bit(ev, &hdcp_ctrl->auth_event),
+ msecs_to_jiffies(ms));
+ if (rc) {
+ pr_info("%s: msleep is canceled by event %d\n",
+ __func__, ev);
+ clear_bit(ev, &hdcp_ctrl->auth_event);
+ return -ECANCELED;
+ }
+
+ return 0;
+}
+
+static int hdmi_hdcp_read_validate_aksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+
+ /* Fetch aksv from QFPROM, this info should be public. */
+ hdcp_ctrl->aksv_lsb = hdmi_qfprom_read(hdmi, HDCP_KSV_LSB);
+ hdcp_ctrl->aksv_msb = hdmi_qfprom_read(hdmi, HDCP_KSV_MSB);
+
+ /* check there are 20 ones in AKSV */
+ if ((hweight32(hdcp_ctrl->aksv_lsb) + hweight32(hdcp_ctrl->aksv_msb))
+ != 20) {
+ pr_err("%s: AKSV QFPROM doesn't have 20 1's, 20 0's\n",
+ __func__);
+ pr_err("%s: QFPROM AKSV chk failed (AKSV=%02x%08x)\n",
+ __func__, hdcp_ctrl->aksv_msb,
+ hdcp_ctrl->aksv_lsb);
+ return -EINVAL;
+ }
+ DBG("AKSV=%02x%08x", hdcp_ctrl->aksv_msb, hdcp_ctrl->aksv_lsb);
+
+ return 0;
+}
+
+static int reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg_val, failure, nack0;
+ int rc = 0;
+
+ /* Check for any DDC transfer failures */
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
+ failure = reg_val & HDMI_HDCP_DDC_STATUS_FAILED;
+ nack0 = reg_val & HDMI_HDCP_DDC_STATUS_NACK0;
+ DBG("HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d",
+ reg_val, failure, nack0);
+
+ if (failure) {
+ /*
+ * Indicates that the last HDCP HW DDC transfer failed.
+ * This occurs when a transfer is attempted with HDCP DDC
+ * disabled (HDCP_DDC_DISABLE=1) or the number of retries
+ * matches HDCP_DDC_RETRY_CNT.
+ * Failure occurred, let's clear it.
+ */
+ DBG("DDC failure detected");
+
+ /* First, Disable DDC */
+ hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_0,
+ HDMI_HDCP_DDC_CTRL_0_DISABLE);
+
+ /* ACK the Failure to Clear it */
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_CTRL_1);
+ reg_val |= HDMI_HDCP_DDC_CTRL_1_FAILED_ACK;
+ hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_1, reg_val);
+
+ /* Check if the FAILURE got Cleared */
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
+ if (reg_val & HDMI_HDCP_DDC_STATUS_FAILED)
+ pr_info("%s: Unable to clear HDCP DDC Failure\n",
+ __func__);
+
+ /* Re-Enable HDCP DDC */
+ hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_0, 0);
+ }
+
+ if (nack0) {
+ DBG("Before: HDMI_DDC_SW_STATUS=0x%08x",
+ hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS));
+ /* Reset HDMI DDC software status */
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+ reg_val |= HDMI_DDC_CTRL_SW_STATUS_RESET;
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+ reg_val &= ~HDMI_DDC_CTRL_SW_STATUS_RESET;
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+
+ /* Reset HDMI DDC Controller */
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+ reg_val |= HDMI_DDC_CTRL_SOFT_RESET;
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+
+ /* If previous msleep is aborted, skip this msleep */
+ if (!rc)
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+ reg_val &= ~HDMI_DDC_CTRL_SOFT_RESET;
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+ DBG("After: HDMI_DDC_SW_STATUS=0x%08x",
+ hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS));
+ }
+
+ return rc;
+}
+
+static int hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc;
+ u32 hdcp_ddc_status, ddc_hw_status;
+ u32 xfer_done, xfer_req, hw_done;
+ bool hw_not_ready;
+ u32 timeout_count;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+
+ if (hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS) == 0)
+ return 0;
+
+ /* Wait to be clean on DDC HW engine */
+ timeout_count = 100;
+ do {
+ hdcp_ddc_status = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
+ ddc_hw_status = hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS);
+
+ xfer_done = hdcp_ddc_status & HDMI_HDCP_DDC_STATUS_XFER_DONE;
+ xfer_req = hdcp_ddc_status & HDMI_HDCP_DDC_STATUS_XFER_REQ;
+ hw_done = ddc_hw_status & HDMI_DDC_HW_STATUS_DONE;
+ hw_not_ready = !xfer_done || xfer_req || !hw_done;
+
+ if (hw_not_ready)
+ break;
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_warn("%s: hw_ddc_clean failed\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ return 0;
+}
+
+static void hdmi_hdcp_reauth_work(struct work_struct *work)
+{
+ struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
+ struct hdmi_hdcp_ctrl, hdcp_reauth_work);
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ unsigned long flags;
+ u32 reg_val;
+
+ DBG("HDCP REAUTH WORK");
+ /*
+ * Disable HPD circuitry.
+ * This is needed to reset the HDCP cipher engine so that when we
+ * attempt a re-authentication, HW would clear the AN0_READY and
+ * AN1_READY bits in HDMI_HDCP_LINK0_STATUS register
+ */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+ reg_val &= ~HDMI_HPD_CTRL_ENABLE;
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+
+ /* Disable HDCP interrupts */
+ hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, 0);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ hdmi_write(hdmi, REG_HDMI_HDCP_RESET,
+ HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE);
+
+ /* Wait to be clean on DDC HW engine */
+ if (hdmi_hdcp_hw_ddc_clean(hdcp_ctrl)) {
+ pr_info("%s: reauth work aborted\n", __func__);
+ return;
+ }
+
+ /* Disable encryption and disable the HDCP block */
+ hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, 0);
+
+ /* Enable HPD circuitry */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+ reg_val |= HDMI_HPD_CTRL_ENABLE;
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ /*
+ * Only retry defined times then abort current authenticating process
+ */
+ if (++hdcp_ctrl->auth_retries == AUTH_RETRIES_TIME) {
+ hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+ hdcp_ctrl->auth_retries = 0;
+ pr_info("%s: abort reauthentication!\n", __func__);
+
+ return;
+ }
+
+ DBG("Queue AUTH WORK");
+ hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
+ queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
+}
+
+static int hdmi_hdcp_auth_prepare(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 link0_status;
+ u32 reg_val;
+ unsigned long flags;
+ int rc;
+
+ if (!hdcp_ctrl->aksv_valid) {
+ rc = hdmi_hdcp_read_validate_aksv(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: ASKV validation failed\n", __func__);
+ hdcp_ctrl->hdcp_state = HDCP_STATE_NO_AKSV;
+ return -ENOTSUPP;
+ }
+ hdcp_ctrl->aksv_valid = true;
+ }
+
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ /* disable HDMI Encrypt */
+ reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+ reg_val &= ~HDMI_CTRL_ENCRYPTED;
+ hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+
+ /* Enabling Software DDC */
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_ARBITRATION);
+ reg_val &= ~HDMI_DDC_ARBITRATION_HW_ARBITRATION;
+ hdmi_write(hdmi, REG_HDMI_DDC_ARBITRATION, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ /*
+ * Write AKSV read from QFPROM to the HDCP registers.
+ * This step is needed for HDCP authentication and must be
+ * written before enabling HDCP.
+ */
+ hdmi_write(hdmi, REG_HDMI_HDCP_SW_LOWER_AKSV, hdcp_ctrl->aksv_lsb);
+ hdmi_write(hdmi, REG_HDMI_HDCP_SW_UPPER_AKSV, hdcp_ctrl->aksv_msb);
+
+ /*
+ * HDCP setup prior to enabling HDCP_CTRL.
+ * Setup seed values for random number An.
+ */
+ hdmi_write(hdmi, REG_HDMI_HDCP_ENTROPY_CTRL0, 0xB1FFB0FF);
+ hdmi_write(hdmi, REG_HDMI_HDCP_ENTROPY_CTRL1, 0xF00DFACE);
+
+ /* Disable the RngCipher state */
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DEBUG_CTRL);
+ reg_val &= ~HDMI_HDCP_DEBUG_CTRL_RNG_CIPHER;
+ hdmi_write(hdmi, REG_HDMI_HDCP_DEBUG_CTRL, reg_val);
+ DBG("HDCP_DEBUG_CTRL=0x%08x",
+ hdmi_read(hdmi, REG_HDMI_HDCP_DEBUG_CTRL));
+
+ /*
+ * Ensure that all register writes are completed before
+ * enabling HDCP cipher
+ */
+ wmb();
+
+ /*
+ * Enable HDCP
+ * This needs to be done as early as possible in order for the
+ * hardware to make An available to read
+ */
+ hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, HDMI_HDCP_CTRL_ENABLE);
+
+ /*
+ * If we had stale values for the An ready bit, it should most
+ * likely be cleared now after enabling HDCP cipher
+ */
+ link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+ DBG("After enabling HDCP Link0_Status=0x%08x", link0_status);
+ if (!(link0_status &
+ (HDMI_HDCP_LINK0_STATUS_AN_0_READY |
+ HDMI_HDCP_LINK0_STATUS_AN_1_READY)))
+ DBG("An not ready after enabling HDCP");
+
+ /* Clear any DDC failures from previous tries before enable HDCP*/
+ rc = reset_hdcp_ddc_failures(hdcp_ctrl);
+
+ return rc;
+}
+
+static void hdmi_hdcp_auth_fail(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg_val;
+ unsigned long flags;
+
+ DBG("hdcp auth failed, queue reauth work");
+ /* clear HDMI Encrypt */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+ reg_val &= ~HDMI_CTRL_ENCRYPTED;
+ hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ hdcp_ctrl->hdcp_state = HDCP_STATE_AUTH_FAILED;
+ queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work);
+}
+
+static void hdmi_hdcp_auth_done(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg_val;
+ unsigned long flags;
+
+ /*
+ * Disable software DDC before going into part3 to make sure
+ * there is no Arbitration between software and hardware for DDC
+ */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_ARBITRATION);
+ reg_val |= HDMI_DDC_ARBITRATION_HW_ARBITRATION;
+ hdmi_write(hdmi, REG_HDMI_DDC_ARBITRATION, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ /* enable HDMI Encrypt */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+ reg_val |= HDMI_CTRL_ENCRYPTED;
+ hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATED;
+ hdcp_ctrl->auth_retries = 0;
+}
+
+/*
+ * hdcp authenticating part 1
+ * Wait Key/An ready
+ * Read BCAPS from sink
+ * Write BCAPS and AKSV into HDCP engine
+ * Write An and AKSV to sink
+ * Read BKSV from sink and write into HDCP engine
+ */
+static int hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 link0_status, keys_state;
+ u32 timeout_count;
+ bool an_ready;
+
+ /* Wait for HDCP keys to be checked and validated */
+ timeout_count = 100;
+ do {
+ link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+ keys_state = (link0_status >> 28) & 0x7;
+ if (keys_state == HDCP_KEYS_STATE_VALID)
+ break;
+
+ DBG("Keys not ready(%d). s=%d, l0=%0x08x",
+ timeout_count, keys_state, link0_status);
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_err("%s: Wait key state timedout", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ timeout_count = 100;
+ do {
+ link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+ an_ready = (link0_status & HDMI_HDCP_LINK0_STATUS_AN_0_READY)
+ && (link0_status & HDMI_HDCP_LINK0_STATUS_AN_1_READY);
+ if (an_ready)
+ break;
+
+ DBG("An not ready(%d). l0_status=0x%08x",
+ timeout_count, link0_status);
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_err("%s: Wait An timedout", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ return 0;
+}
+
+static int hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc = 0;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 link0_aksv_0, link0_aksv_1;
+ u32 link0_an[2];
+ u8 aksv[5];
+
+ /* Read An0 and An1 */
+ link0_an[0] = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA5);
+ link0_an[1] = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA6);
+
+ /* Read AKSV */
+ link0_aksv_0 = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA3);
+ link0_aksv_1 = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4);
+
+ DBG("Link ASKV=%08x%08x", link0_aksv_0, link0_aksv_1);
+ /* Copy An and AKSV to byte arrays for transmission */
+ aksv[0] = link0_aksv_0 & 0xFF;
+ aksv[1] = (link0_aksv_0 >> 8) & 0xFF;
+ aksv[2] = (link0_aksv_0 >> 16) & 0xFF;
+ aksv[3] = (link0_aksv_0 >> 24) & 0xFF;
+ aksv[4] = link0_aksv_1 & 0xFF;
+
+ /* Write An to offset 0x18 */
+ rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x18, (u8 *)link0_an,
+ (u16)sizeof(link0_an));
+ if (rc) {
+ pr_err("%s:An write failed\n", __func__);
+ return rc;
+ }
+ DBG("Link0-An=%08x%08x", link0_an[0], link0_an[1]);
+
+ /* Write AKSV to offset 0x10 */
+ rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x10, aksv, 5);
+ if (rc) {
+ pr_err("%s:AKSV write failed\n", __func__);
+ return rc;
+ }
+ DBG("Link0-AKSV=%02x%08x", link0_aksv_1 & 0xFF, link0_aksv_0);
+
+ return 0;
+}
+
+static int hdmi_hdcp_recv_bksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc = 0;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u8 bksv[5];
+ u32 reg[2], data[2];
+
+ /* Read BKSV at offset 0x00 */
+ rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x00, bksv, 5);
+ if (rc) {
+ pr_err("%s:BKSV read failed\n", __func__);
+ return rc;
+ }
+
+ hdcp_ctrl->bksv_lsb = bksv[0] | (bksv[1] << 8) |
+ (bksv[2] << 16) | (bksv[3] << 24);
+ hdcp_ctrl->bksv_msb = bksv[4];
+ DBG(":BKSV=%02x%08x", hdcp_ctrl->bksv_msb, hdcp_ctrl->bksv_lsb);
+
+ /* check there are 20 ones in BKSV */
+ if ((hweight32(hdcp_ctrl->bksv_lsb) + hweight32(hdcp_ctrl->bksv_msb))
+ != 20) {
+ pr_err(": BKSV doesn't have 20 1's and 20 0's\n");
+ pr_err(": BKSV chk fail. BKSV=%02x%02x%02x%02x%02x\n",
+ bksv[4], bksv[3], bksv[2], bksv[1], bksv[0]);
+ return -EINVAL;
+ }
+
+ /* Write BKSV read from sink to HDCP registers */
+ reg[0] = REG_HDMI_HDCP_RCVPORT_DATA0;
+ data[0] = hdcp_ctrl->bksv_lsb;
+ reg[1] = REG_HDMI_HDCP_RCVPORT_DATA1;
+ data[1] = hdcp_ctrl->bksv_msb;
+ rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
+
+ return rc;
+}
+
+static int hdmi_hdcp_recv_bcaps(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc = 0;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg, data;
+ u8 bcaps;
+
+ rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
+ if (rc) {
+ pr_err("%s:BCAPS read failed\n", __func__);
+ return rc;
+ }
+ DBG("BCAPS=%02x", bcaps);
+
+ /* receiver (0), repeater (1) */
+ hdcp_ctrl->ds_type = (bcaps & BIT(6)) ? DS_REPEATER : DS_RECEIVER;
+
+ /* Write BCAPS to the hardware */
+ reg = REG_HDMI_HDCP_RCVPORT_DATA12;
+ data = (u32)bcaps;
+ rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+
+ return rc;
+}
+
+static int hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ unsigned long flags;
+ int rc;
+
+ /* Wait for AKSV key and An ready */
+ rc = hdmi_hdcp_wait_key_an_ready(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: wait key and an ready failed\n", __func__);
+ return rc;
+ };
+
+ /* Read BCAPS and send to HDCP engine */
+ rc = hdmi_hdcp_recv_bcaps(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: read bcaps error, abort\n", __func__);
+ return rc;
+ }
+
+ /*
+ * 1.1_Features turned off by default.
+ * No need to write AInfo since 1.1_Features is disabled.
+ */
+ hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4, 0);
+
+ /* Send AKSV and An to sink */
+ rc = hdmi_hdcp_send_aksv_an(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s:An/Aksv write failed\n", __func__);
+ return rc;
+ }
+
+ /* Read BKSV and send to HDCP engine*/
+ rc = hdmi_hdcp_recv_bksv(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s:BKSV Process failed\n", __func__);
+ return rc;
+ }
+
+ /* Enable HDCP interrupts and ack/clear any stale interrupts */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL,
+ HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_ACK |
+ HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_MASK |
+ HDMI_HDCP_INT_CTRL_AUTH_FAIL_ACK |
+ HDMI_HDCP_INT_CTRL_AUTH_FAIL_MASK |
+ HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ return 0;
+}
+
+/* read R0' from sink and pass it to HDCP engine */
+static int hdmi_hdcp_auth_part1_recv_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ int rc = 0;
+ u8 buf[2];
+
+ /*
+ * HDCP Compliance Test case 1A-01:
+ * Wait here at least 100ms before reading R0'
+ */
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 125, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+
+ /* Read R0' at offset 0x08 */
+ rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x08, buf, 2);
+ if (rc) {
+ pr_err("%s:R0' read failed\n", __func__);
+ return rc;
+ }
+ DBG("R0'=%02x%02x", buf[1], buf[0]);
+
+ /* Write R0' to HDCP registers and check to see if it is a match */
+ hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA2_0,
+ (((u32)buf[1]) << 8) | buf[0]);
+
+ return 0;
+}
+
+/* Wait for authenticating result: R0/R0' are matched or not */
+static int hdmi_hdcp_auth_part1_verify_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 link0_status;
+ int rc;
+
+ /* wait for hdcp irq, 10 sec should be long enough */
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 10000, AUTH_RESULT_RDY_EV);
+ if (!rc) {
+ pr_err("%s: Wait Auth IRQ timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+ if (!(link0_status & HDMI_HDCP_LINK0_STATUS_RI_MATCHES)) {
+ pr_err("%s: Authentication Part I failed\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Enable HDCP Encryption */
+ hdmi_write(hdmi, REG_HDMI_HDCP_CTRL,
+ HDMI_HDCP_CTRL_ENABLE |
+ HDMI_HDCP_CTRL_ENCRYPTION_ENABLE);
+
+ return 0;
+}
+
+static int hdmi_hdcp_recv_check_bstatus(struct hdmi_hdcp_ctrl *hdcp_ctrl,
+ u16 *pbstatus)
+{
+ int rc;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ bool max_devs_exceeded = false, max_cascade_exceeded = false;
+ u32 repeater_cascade_depth = 0, down_stream_devices = 0;
+ u16 bstatus;
+ u8 buf[2];
+
+ /* Read BSTATUS at offset 0x41 */
+ rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x41, buf, 2);
+ if (rc) {
+ pr_err("%s: BSTATUS read failed\n", __func__);
+ goto error;
+ }
+ *pbstatus = bstatus = (buf[1] << 8) | buf[0];
+
+
+ down_stream_devices = bstatus & 0x7F;
+ repeater_cascade_depth = (bstatus >> 8) & 0x7;
+ max_devs_exceeded = (bstatus & BIT(7)) ? true : false;
+ max_cascade_exceeded = (bstatus & BIT(11)) ? true : false;
+
+ if (down_stream_devices == 0) {
+ /*
+ * If no downstream devices are attached to the repeater
+ * then part II fails.
+ * todo: The other approach would be to continue PART II.
+ */
+ pr_err("%s: No downstream devices\n", __func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * HDCP Compliance 1B-05:
+ * Check if no. of devices connected to repeater
+ * exceed max_devices_connected from bit 7 of Bstatus.
+ */
+ if (max_devs_exceeded) {
+ pr_err("%s: no. of devs connected exceeds max allowed",
+ __func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * HDCP Compliance 1B-06:
+ * Check if no. of cascade connected to repeater
+ * exceed max_cascade_connected from bit 11 of Bstatus.
+ */
+ if (max_cascade_exceeded) {
+ pr_err("%s: no. of cascade conn exceeds max allowed",
+ __func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+error:
+ hdcp_ctrl->dev_count = down_stream_devices;
+ hdcp_ctrl->max_cascade_exceeded = max_cascade_exceeded;
+ hdcp_ctrl->max_dev_exceeded = max_devs_exceeded;
+ hdcp_ctrl->depth = repeater_cascade_depth;
+ return rc;
+}
+
+static int hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(
+ struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg, data;
+ u32 timeout_count;
+ u16 bstatus;
+ u8 bcaps;
+
+ /*
+ * Wait until READY bit is set in BCAPS, as per HDCP specifications
+ * maximum permitted time to check for READY bit is five seconds.
+ */
+ timeout_count = 100;
+ do {
+ /* Read BCAPS at offset 0x40 */
+ rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
+ if (rc) {
+ pr_err("%s: BCAPS read failed\n", __func__);
+ return rc;
+ }
+
+ if (bcaps & BIT(5))
+ break;
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_err("%s: Wait KSV fifo ready timedout", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ rc = hdmi_hdcp_recv_check_bstatus(hdcp_ctrl, &bstatus);
+ if (rc) {
+ pr_err("%s: bstatus error\n", __func__);
+ return rc;
+ }
+
+ /* Write BSTATUS and BCAPS to HDCP registers */
+ reg = REG_HDMI_HDCP_RCVPORT_DATA12;
+ data = bcaps | (bstatus << 8);
+ rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+ if (rc) {
+ pr_err("%s: BSTATUS write failed\n", __func__);
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * hdcp authenticating part 2: 2nd
+ * read ksv fifo from sink
+ * transfer V' from sink to HDCP engine
+ * reset SHA engine
+ */
+static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ int rc = 0;
+ struct hdmi_hdcp_reg_data reg_data[] = {
+ {REG_HDMI_HDCP_RCVPORT_DATA7, 0x20, "V' H0"},
+ {REG_HDMI_HDCP_RCVPORT_DATA8, 0x24, "V' H1"},
+ {REG_HDMI_HDCP_RCVPORT_DATA9, 0x28, "V' H2"},
+ {REG_HDMI_HDCP_RCVPORT_DATA10, 0x2C, "V' H3"},
+ {REG_HDMI_HDCP_RCVPORT_DATA11, 0x30, "V' H4"},
+ };
+ struct hdmi_hdcp_reg_data *rd;
+ u32 size = ARRAY_SIZE(reg_data);
+ u32 reg[ARRAY_SIZE(reg_data)];
+ u32 data[ARRAY_SIZE(reg_data)];
+ int i;
+
+ for (i = 0; i < size; i++) {
+ rd = &reg_data[i];
+ rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR,
+ rd->off, (u8 *)&data[i], (u16)sizeof(data[i]));
+ if (rc) {
+ pr_err("%s: Read %s failed\n", __func__, rd->name);
+ goto error;
+ }
+
+ DBG("%s =%x", rd->name, data[i]);
+ reg[i] = reg_data[i].reg_id;
+ }
+
+ rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, size);
+
+error:
+ return rc;
+}
+
+static int hdmi_hdcp_recv_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 ksv_bytes;
+
+ ksv_bytes = 5 * hdcp_ctrl->dev_count;
+
+ rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x43,
+ hdcp_ctrl->ksv_list, ksv_bytes);
+ if (rc)
+ pr_err("%s: KSV FIFO read failed\n", __func__);
+
+ return rc;
+}
+
+static int hdmi_hdcp_reset_sha_engine(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ u32 reg[2], data[2];
+ u32 rc = 0;
+
+ reg[0] = REG_HDMI_HDCP_SHA_CTRL;
+ data[0] = HDCP_REG_ENABLE;
+ reg[1] = REG_HDMI_HDCP_SHA_CTRL;
+ data[1] = HDCP_REG_DISABLE;
+
+ rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
+
+ return rc;
+}
+
+static int hdmi_hdcp_auth_part2_recv_ksv_fifo(
+ struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc;
+ u32 timeout_count;
+
+ /*
+ * Read KSV FIFO over DDC
+ * Key Selection vector FIFO Used to pull downstream KSVs
+ * from HDCP Repeaters.
+ * All bytes (DEVICE_COUNT * 5) must be read in a single,
+ * auto incrementing access.
+ * All bytes read as 0x00 for HDCP Receivers that are not
+ * HDCP Repeaters (REPEATER == 0).
+ */
+ timeout_count = 100;
+ do {
+ rc = hdmi_hdcp_recv_ksv_fifo(hdcp_ctrl);
+ if (!rc)
+ break;
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_err("%s: Recv ksv fifo timedout", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 25, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ rc = hdmi_hdcp_transfer_v_h(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: transfer V failed\n", __func__);
+ return rc;
+ }
+
+ /* reset SHA engine before write ksv fifo */
+ rc = hdmi_hdcp_reset_sha_engine(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: fail to reset sha engine\n", __func__);
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * Write KSV FIFO to HDCP_SHA_DATA.
+ * This is done 1 byte at time starting with the LSB.
+ * Once 64 bytes have been written, we need to poll for
+ * HDCP_SHA_BLOCK_DONE before writing any further
+ * If the last byte is written, we need to poll for
+ * HDCP_SHA_COMP_DONE to wait until HW finish
+ */
+static int hdmi_hdcp_write_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int i;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 ksv_bytes, last_byte = 0;
+ u8 *ksv_fifo = NULL;
+ u32 reg_val, data, reg;
+ u32 rc = 0;
+
+ ksv_bytes = 5 * hdcp_ctrl->dev_count;
+
+ /* Check if need to wait for HW completion */
+ if (hdcp_ctrl->ksv_fifo_w_index) {
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_SHA_STATUS);
+ DBG("HDCP_SHA_STATUS=%08x", reg_val);
+ if (hdcp_ctrl->ksv_fifo_w_index == ksv_bytes) {
+ /* check COMP_DONE if last write */
+ if (reg_val & HDMI_HDCP_SHA_STATUS_COMP_DONE) {
+ DBG("COMP_DONE");
+ return 0;
+ } else {
+ return -EAGAIN;
+ }
+ } else {
+ /* check BLOCK_DONE if not last write */
+ if (!(reg_val & HDMI_HDCP_SHA_STATUS_BLOCK_DONE))
+ return -EAGAIN;
+
+ DBG("BLOCK_DONE");
+ }
+ }
+
+ ksv_bytes -= hdcp_ctrl->ksv_fifo_w_index;
+ if (ksv_bytes <= 64)
+ last_byte = 1;
+ else
+ ksv_bytes = 64;
+
+ ksv_fifo = hdcp_ctrl->ksv_list;
+ ksv_fifo += hdcp_ctrl->ksv_fifo_w_index;
+
+ for (i = 0; i < ksv_bytes; i++) {
+ /* Write KSV byte and set DONE bit[0] for last byte*/
+ reg_val = ksv_fifo[i] << 16;
+ if ((i == (ksv_bytes - 1)) && last_byte)
+ reg_val |= HDMI_HDCP_SHA_DATA_DONE;
+
+ reg = REG_HDMI_HDCP_SHA_DATA;
+ data = reg_val;
+ rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+
+ if (rc)
+ return rc;
+ }
+
+ hdcp_ctrl->ksv_fifo_w_index += ksv_bytes;
+
+ /*
+ *return -EAGAIN to notify caller to wait for COMP_DONE or BLOCK_DONE
+ */
+ return -EAGAIN;
+}
+
+/* write ksv fifo into HDCP engine */
+static int hdmi_hdcp_auth_part2_write_ksv_fifo(
+ struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc;
+ u32 timeout_count;
+
+ hdcp_ctrl->ksv_fifo_w_index = 0;
+ timeout_count = 100;
+ do {
+ rc = hdmi_hdcp_write_ksv_fifo(hdcp_ctrl);
+ if (!rc)
+ break;
+
+ if (rc != -EAGAIN)
+ return rc;
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_err("%s: Write KSV fifo timedout", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ return 0;
+}
+
+static int hdmi_hdcp_auth_part2_check_v_match(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc = 0;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 link0_status;
+ u32 timeout_count = 100;
+
+ do {
+ link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+ if (link0_status & HDMI_HDCP_LINK0_STATUS_V_MATCHES)
+ break;
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_err("%s: HDCP V Match timedout", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ return 0;
+}
+
+static void hdmi_hdcp_auth_work(struct work_struct *work)
+{
+ struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
+ struct hdmi_hdcp_ctrl, hdcp_auth_work);
+ int rc;
+
+ rc = hdmi_hdcp_auth_prepare(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: auth prepare failed %d\n", __func__, rc);
+ goto end;
+ }
+
+ /* HDCP PartI */
+ rc = hdmi_hdcp_auth_part1_key_exchange(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: key exchange failed %d\n", __func__, rc);
+ goto end;
+ }
+
+ rc = hdmi_hdcp_auth_part1_recv_r0(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: receive r0 failed %d\n", __func__, rc);
+ goto end;
+ }
+
+ rc = hdmi_hdcp_auth_part1_verify_r0(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: verify r0 failed %d\n", __func__, rc);
+ goto end;
+ }
+ pr_info("%s: Authentication Part I successful\n", __func__);
+ if (hdcp_ctrl->ds_type == DS_RECEIVER)
+ goto end;
+
+ /* HDCP PartII */
+ rc = hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: wait ksv fifo ready failed %d\n", __func__, rc);
+ goto end;
+ }
+
+ rc = hdmi_hdcp_auth_part2_recv_ksv_fifo(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: recv ksv fifo failed %d\n", __func__, rc);
+ goto end;
+ }
+
+ rc = hdmi_hdcp_auth_part2_write_ksv_fifo(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: write ksv fifo failed %d\n", __func__, rc);
+ goto end;
+ }
+
+ rc = hdmi_hdcp_auth_part2_check_v_match(hdcp_ctrl);
+ if (rc)
+ pr_err("%s: check v match failed %d\n", __func__, rc);
+
+end:
+ if (rc == -ECANCELED) {
+ pr_info("%s: hdcp authentication canceled\n", __func__);
+ } else if (rc == -ENOTSUPP) {
+ pr_info("%s: hdcp is not supported\n", __func__);
+ } else if (rc) {
+ pr_err("%s: hdcp authentication failed\n", __func__);
+ hdmi_hdcp_auth_fail(hdcp_ctrl);
+ } else {
+ hdmi_hdcp_auth_done(hdcp_ctrl);
+ }
+}
+
+void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg_val;
+ unsigned long flags;
+
+ if ((HDCP_STATE_INACTIVE != hdcp_ctrl->hdcp_state) ||
+ (HDCP_STATE_NO_AKSV == hdcp_ctrl->hdcp_state)) {
+ DBG("still active or activating or no askv. returning");
+ return;
+ }
+
+ /* clear HDMI Encrypt */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+ reg_val &= ~HDMI_CTRL_ENCRYPTED;
+ hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ hdcp_ctrl->auth_event = 0;
+ hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
+ hdcp_ctrl->auth_retries = 0;
+ queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
+}
+
+void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ unsigned long flags;
+ u32 reg_val;
+
+ if ((HDCP_STATE_INACTIVE == hdcp_ctrl->hdcp_state) ||
+ (HDCP_STATE_NO_AKSV == hdcp_ctrl->hdcp_state)) {
+ DBG("hdcp inactive or no aksv. returning");
+ return;
+ }
+
+ /*
+ * Disable HPD circuitry.
+ * This is needed to reset the HDCP cipher engine so that when we
+ * attempt a re-authentication, HW would clear the AN0_READY and
+ * AN1_READY bits in HDMI_HDCP_LINK0_STATUS register
+ */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+ reg_val &= ~HDMI_HPD_CTRL_ENABLE;
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+
+ /*
+ * Disable HDCP interrupts.
+ * Also, need to set the state to inactive here so that any ongoing
+ * reauth works will know that the HDCP session has been turned off.
+ */
+ hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, 0);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ /*
+ * Cancel any pending auth/reauth attempts.
+ * If one is ongoing, this will wait for it to finish.
+ * No more reauthentication attempts will be scheduled since we
+ * set the current state to inactive.
+ */
+ set_bit(AUTH_ABORT_EV, &hdcp_ctrl->auth_event);
+ wake_up_all(&hdcp_ctrl->auth_event_queue);
+ cancel_work_sync(&hdcp_ctrl->hdcp_auth_work);
+ cancel_work_sync(&hdcp_ctrl->hdcp_reauth_work);
+
+ hdmi_write(hdmi, REG_HDMI_HDCP_RESET,
+ HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE);
+
+ /* Disable encryption and disable the HDCP block */
+ hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, 0);
+
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+ reg_val &= ~HDMI_CTRL_ENCRYPTED;
+ hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+
+ /* Enable HPD circuitry */
+ reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+ reg_val |= HDMI_HPD_CTRL_ENABLE;
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+
+ DBG("HDCP: Off");
+}
+
+struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi)
+{
+ struct hdmi_hdcp_ctrl *hdcp_ctrl = NULL;
+
+ if (!hdmi->qfprom_mmio) {
+ pr_err("%s: HDCP is not supported without qfprom\n",
+ __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ hdcp_ctrl = kzalloc(sizeof(*hdcp_ctrl), GFP_KERNEL);
+ if (!hdcp_ctrl)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_WORK(&hdcp_ctrl->hdcp_auth_work, hdmi_hdcp_auth_work);
+ INIT_WORK(&hdcp_ctrl->hdcp_reauth_work, hdmi_hdcp_reauth_work);
+ init_waitqueue_head(&hdcp_ctrl->auth_event_queue);
+ hdcp_ctrl->hdmi = hdmi;
+ hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+ hdcp_ctrl->aksv_valid = false;
+
+ if (qcom_scm_hdcp_available())
+ hdcp_ctrl->tz_hdcp = true;
+ else
+ hdcp_ctrl->tz_hdcp = false;
+
+ return hdcp_ctrl;
+}
+
+void hdmi_hdcp_destroy(struct hdmi *hdmi)
+{
+ if (hdmi && hdmi->hdcp_ctrl) {
+ kfree(hdmi->hdcp_ctrl);
+ hdmi->hdcp_ctrl = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index 6997ec636c6d..3a01cb5051e2 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -426,57 +426,6 @@ static void hdmi_phy_8960_destroy(struct hdmi_phy *phy)
kfree(phy_8960);
}
-static void hdmi_phy_8960_reset(struct hdmi_phy *phy)
-{
- struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
- struct hdmi *hdmi = phy_8960->hdmi;
- unsigned int val;
-
- val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
-
- if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
- /* pull low */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val & ~HDMI_PHY_CTRL_SW_RESET);
- } else {
- /* pull high */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val | HDMI_PHY_CTRL_SW_RESET);
- }
-
- if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
- /* pull low */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
- } else {
- /* pull high */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val | HDMI_PHY_CTRL_SW_RESET_PLL);
- }
-
- msleep(100);
-
- if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
- /* pull high */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val | HDMI_PHY_CTRL_SW_RESET);
- } else {
- /* pull low */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val & ~HDMI_PHY_CTRL_SW_RESET);
- }
-
- if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
- /* pull high */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val | HDMI_PHY_CTRL_SW_RESET_PLL);
- } else {
- /* pull low */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
- }
-}
-
static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
unsigned long int pixclock)
{
@@ -511,7 +460,6 @@ static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy)
static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = {
.destroy = hdmi_phy_8960_destroy,
- .reset = hdmi_phy_8960_reset,
.powerup = hdmi_phy_8960_powerup,
.powerdown = hdmi_phy_8960_powerdown,
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
index 391433c1af7c..cb01421ae1e4 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
@@ -29,37 +29,6 @@ static void hdmi_phy_8x60_destroy(struct hdmi_phy *phy)
kfree(phy_8x60);
}
-static void hdmi_phy_8x60_reset(struct hdmi_phy *phy)
-{
- struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
- struct hdmi *hdmi = phy_8x60->hdmi;
- unsigned int val;
-
- val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
-
- if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
- /* pull low */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val & ~HDMI_PHY_CTRL_SW_RESET);
- } else {
- /* pull high */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val | HDMI_PHY_CTRL_SW_RESET);
- }
-
- msleep(100);
-
- if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
- /* pull high */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val | HDMI_PHY_CTRL_SW_RESET);
- } else {
- /* pull low */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val & ~HDMI_PHY_CTRL_SW_RESET);
- }
-}
-
static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy,
unsigned long int pixclock)
{
@@ -182,7 +151,6 @@ static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy)
static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = {
.destroy = hdmi_phy_8x60_destroy,
- .reset = hdmi_phy_8x60_reset,
.powerup = hdmi_phy_8x60_powerup,
.powerdown = hdmi_phy_8x60_powerdown,
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
index 59fa6cdacb2a..56ab8917ee9a 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
@@ -19,7 +19,6 @@
struct hdmi_phy_8x74 {
struct hdmi_phy base;
- struct hdmi *hdmi;
void __iomem *mmio;
};
#define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base)
@@ -41,59 +40,6 @@ static void hdmi_phy_8x74_destroy(struct hdmi_phy *phy)
kfree(phy_8x74);
}
-static void hdmi_phy_8x74_reset(struct hdmi_phy *phy)
-{
- struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
- struct hdmi *hdmi = phy_8x74->hdmi;
- unsigned int val;
-
- /* NOTE that HDMI_PHY_CTL is in core mmio, not phy mmio: */
-
- val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
-
- if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
- /* pull low */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val & ~HDMI_PHY_CTRL_SW_RESET);
- } else {
- /* pull high */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val | HDMI_PHY_CTRL_SW_RESET);
- }
-
- if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
- /* pull low */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
- } else {
- /* pull high */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val | HDMI_PHY_CTRL_SW_RESET_PLL);
- }
-
- msleep(100);
-
- if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
- /* pull high */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val | HDMI_PHY_CTRL_SW_RESET);
- } else {
- /* pull low */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val & ~HDMI_PHY_CTRL_SW_RESET);
- }
-
- if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
- /* pull high */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val | HDMI_PHY_CTRL_SW_RESET_PLL);
- } else {
- /* pull low */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
- }
-}
-
static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy,
unsigned long int pixclock)
{
@@ -117,7 +63,6 @@ static void hdmi_phy_8x74_powerdown(struct hdmi_phy *phy)
static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = {
.destroy = hdmi_phy_8x74_destroy,
- .reset = hdmi_phy_8x74_reset,
.powerup = hdmi_phy_8x74_powerup,
.powerdown = hdmi_phy_8x74_powerdown,
};
@@ -138,8 +83,6 @@ struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi)
phy->funcs = &hdmi_phy_8x74_funcs;
- phy_8x74->hdmi = hdmi;
-
/* for 8x74, the phy mmio is mapped separately: */
phy_8x74->mmio = msm_ioremap(hdmi->pdev,
"phy_physical", "HDMI_8x74");
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index 978c3f70872a..2aa23b98f8aa 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
-
-Copyright (C) 2013 by the following authors:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
+
+Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 153fc487d683..74b86734fef5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index c4bb9d9c7667..6ac9aa165768 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -334,13 +334,15 @@ static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
-static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc)
+static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
DBG("%s: begin", mdp4_crtc->name);
}
-static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
+static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -680,7 +682,5 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
plane->crtc = crtc;
- mdp4_plane_install_properties(plane, &crtc->base);
-
return crtc;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
index 7369ee7f0c55..5ed38cf548a1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -19,8 +19,11 @@
#include "msm_drv.h"
#include "mdp4_kms.h"
-void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+ uint32_t old_irqmask)
{
+ mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_CLEAR,
+ irqmask ^ (irqmask & old_irqmask));
mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask);
}
@@ -68,9 +71,10 @@ irqreturn_t mdp4_irq(struct msm_kms *kms)
struct drm_device *dev = mdp4_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
unsigned int id;
- uint32_t status;
+ uint32_t status, enable;
- status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
+ enable = mdp4_read(mdp4_kms, REG_MDP4_INTR_ENABLE);
+ status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS) & enable;
mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
VERB("status=%08x", status);
@@ -86,13 +90,22 @@ irqreturn_t mdp4_irq(struct msm_kms *kms)
int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+
+ mdp4_enable(mdp4_kms);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp4_crtc_vblank(crtc), true);
+ mdp4_disable(mdp4_kms);
+
return 0;
}
void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+
+ mdp4_enable(mdp4_kms);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp4_crtc_vblank(crtc), false);
+ mdp4_disable(mdp4_kms);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 531e4acc2a87..077f7521a971 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -241,22 +241,37 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
}
#ifdef CONFIG_OF
-static struct drm_panel *detect_panel(struct drm_device *dev, const char *name)
+static struct drm_panel *detect_panel(struct drm_device *dev)
{
- struct device_node *n;
+ struct device_node *endpoint, *panel_node;
+ struct device_node *np = dev->dev->of_node;
struct drm_panel *panel = NULL;
- n = of_parse_phandle(dev->dev->of_node, name, 0);
- if (n) {
- panel = of_drm_find_panel(n);
- if (!panel)
- panel = ERR_PTR(-EPROBE_DEFER);
+ endpoint = of_graph_get_next_endpoint(np, NULL);
+ if (!endpoint) {
+ dev_err(dev->dev, "no valid endpoint\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ panel_node = of_graph_get_remote_port_parent(endpoint);
+ if (!panel_node) {
+ dev_err(dev->dev, "no valid panel node\n");
+ of_node_put(endpoint);
+ return ERR_PTR(-ENODEV);
+ }
+
+ of_node_put(endpoint);
+
+ panel = of_drm_find_panel(panel_node);
+ if (!panel) {
+ of_node_put(panel_node);
+ return ERR_PTR(-EPROBE_DEFER);
}
return panel;
}
#else
-static struct drm_panel *detect_panel(struct drm_device *dev, const char *name)
+static struct drm_panel *detect_panel(struct drm_device *dev)
{
// ??? maybe use a module param to specify which panel is attached?
}
@@ -294,7 +309,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
* Setup the LCDC/LVDS path: RGB2 -> DMA_P -> LCDC -> LVDS:
*/
- panel = detect_panel(dev, "qcom,lvds-panel");
+ panel = detect_panel(dev);
if (IS_ERR(panel)) {
ret = PTR_ERR(panel);
dev_err(dev->dev, "failed to detect LVDS panel: %d\n", ret);
@@ -527,6 +542,11 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+
return kms;
fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index c1ecb9d6bdef..8a7f6e1e2bca 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -167,7 +167,8 @@ static inline uint32_t mixercfg(uint32_t mixer_cfg, int mixer,
int mdp4_disable(struct mdp4_kms *mdp4_kms);
int mdp4_enable(struct mdp4_kms *mdp4_kms);
-void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+ uint32_t old_irqmask);
void mdp4_irq_preinstall(struct msm_kms *kms);
int mdp4_irq_postinstall(struct msm_kms *kms);
void mdp4_irq_uninstall(struct msm_kms *kms);
@@ -175,29 +176,24 @@ irqreturn_t mdp4_irq(struct msm_kms *kms);
int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
-static inline bool pipe_supports_yuv(enum mdp4_pipe pipe)
+static inline uint32_t mdp4_pipe_caps(enum mdp4_pipe pipe)
{
switch (pipe) {
case VG1:
case VG2:
case VG3:
case VG4:
- return true;
+ return MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
+ case RGB1:
+ case RGB2:
+ case RGB3:
+ return MDP_PIPE_CAP_SCALE;
default:
- return false;
+ return 0;
}
}
-static inline
-uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
- uint32_t max_formats)
-{
- return mdp_get_formats(pixel_formats, max_formats,
- !pipe_supports_yuv(pipe_id));
-}
-
-void mdp4_plane_install_properties(struct drm_plane *plane,
- struct drm_mode_object *obj);
enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp4_plane_init(struct drm_device *dev,
enum mdp4_pipe pipe_id, bool private_plane);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
index c04843376c54..4cd6e721aa0a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
@@ -346,8 +346,10 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
- if (panel)
+ if (panel) {
drm_panel_disable(panel);
+ drm_panel_unprepare(panel);
+ }
/*
* Wait for a vsync so we know the ENABLE=0 latched before
@@ -412,8 +414,10 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
if (ret)
dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
- if (panel)
+ if (panel) {
+ drm_panel_prepare(panel);
drm_panel_enable(panel);
+ }
setup_phy(encoder);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 0d1dbb737933..e9dee367b597 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -26,6 +26,7 @@ struct mdp4_plane {
enum mdp4_pipe pipe;
+ uint32_t caps;
uint32_t nformats;
uint32_t formats[32];
@@ -74,7 +75,7 @@ static void mdp4_plane_destroy(struct drm_plane *plane)
}
/* helper to install properties which are common to planes and crtcs */
-void mdp4_plane_install_properties(struct drm_plane *plane,
+static void mdp4_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj)
{
// XXX
@@ -220,13 +221,15 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
uint32_t op_mode = 0;
uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
- enum mdp4_frame_format frame_type = mdp4_get_frame_format(fb);
+ enum mdp4_frame_format frame_type;
if (!(crtc && fb)) {
DBG("%s: disabled!", mdp4_plane->name);
return 0;
}
+ frame_type = mdp4_get_frame_format(fb);
+
/* src values are in Q16 fixed point, convert to integer: */
src_x = src_x >> 16;
src_y = src_y >> 16;
@@ -380,9 +383,11 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
mdp4_plane->pipe = pipe_id;
mdp4_plane->name = pipe_names[pipe_id];
+ mdp4_plane->caps = mdp4_pipe_caps(pipe_id);
- mdp4_plane->nformats = mdp4_get_formats(pipe_id, mdp4_plane->formats,
- ARRAY_SIZE(mdp4_plane->formats));
+ mdp4_plane->nformats = mdp_get_formats(mdp4_plane->formats,
+ ARRAY_SIZE(mdp4_plane->formats),
+ !pipe_supports_yuv(mdp4_plane->caps));
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 50e17527e2e5..3469f50d5590 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -381,49 +381,49 @@ static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x0
static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); }
#define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007
#define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(uint32_t val)
{
return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK;
}
#define MDP5_CTL_LAYER_REG_VIG1__MASK 0x00000038
#define MDP5_CTL_LAYER_REG_VIG1__SHIFT 3
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(uint32_t val)
{
return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK;
}
#define MDP5_CTL_LAYER_REG_VIG2__MASK 0x000001c0
#define MDP5_CTL_LAYER_REG_VIG2__SHIFT 6
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(uint32_t val)
{
return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK;
}
#define MDP5_CTL_LAYER_REG_RGB0__MASK 0x00000e00
#define MDP5_CTL_LAYER_REG_RGB0__SHIFT 9
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(uint32_t val)
{
return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK;
}
#define MDP5_CTL_LAYER_REG_RGB1__MASK 0x00007000
#define MDP5_CTL_LAYER_REG_RGB1__SHIFT 12
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(uint32_t val)
{
return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK;
}
#define MDP5_CTL_LAYER_REG_RGB2__MASK 0x00038000
#define MDP5_CTL_LAYER_REG_RGB2__SHIFT 15
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(uint32_t val)
{
return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK;
}
#define MDP5_CTL_LAYER_REG_DMA0__MASK 0x001c0000
#define MDP5_CTL_LAYER_REG_DMA0__SHIFT 18
-static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(uint32_t val)
{
return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK;
}
#define MDP5_CTL_LAYER_REG_DMA1__MASK 0x00e00000
#define MDP5_CTL_LAYER_REG_DMA1__SHIFT 21
-static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(uint32_t val)
{
return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK;
}
@@ -431,13 +431,13 @@ static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val)
#define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000
#define MDP5_CTL_LAYER_REG_VIG3__MASK 0x1c000000
#define MDP5_CTL_LAYER_REG_VIG3__SHIFT 26
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(uint32_t val)
{
return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK;
}
#define MDP5_CTL_LAYER_REG_RGB3__MASK 0xe0000000
#define MDP5_CTL_LAYER_REG_RGB3__SHIFT 29
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(uint32_t val)
{
return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK;
}
@@ -499,6 +499,44 @@ static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __o
static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); }
+static inline uint32_t __offset_LAYER_EXT(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00000040;
+ case 1: return 0x00000044;
+ case 2: return 0x00000048;
+ case 3: return 0x0000004c;
+ case 4: return 0x00000050;
+ case 5: return 0x00000054;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_CTL_LAYER_EXT(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); }
+
+static inline uint32_t REG_MDP5_CTL_LAYER_EXT_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); }
+#define MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3 0x00000001
+#define MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3 0x00000004
+#define MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3 0x00000010
+#define MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3 0x00000040
+#define MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3 0x00000100
+#define MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3 0x00000400
+#define MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3 0x00001000
+#define MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3 0x00004000
+#define MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3 0x00010000
+#define MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3 0x00040000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK 0x00f00000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT 20
+static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR0(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK;
+}
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK 0x3c000000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT 26
+static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK;
+}
+
static inline uint32_t __offset_PIPE(enum mdp5_pipe idx)
{
switch (idx) {
@@ -803,11 +841,11 @@ static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
}
#define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
#define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
-#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK 0x00180000
-#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT 19
-static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(enum mdp_fetch_type val)
+#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK 0x00180000
+#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT 19
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(enum mdp_fetch_type val)
{
- return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK;
+ return ((val) << MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT) & MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK;
}
#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000
#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23
@@ -897,41 +935,41 @@ static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val)
static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); }
#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001
#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK 0x00000300
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT 8
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK 0x00000300
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT 8
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(enum mdp5_scale_filter val)
{
- return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK;
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK;
}
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK 0x00000c00
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT 10
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK 0x00000c00
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT 10
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(enum mdp5_scale_filter val)
{
- return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK;
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK;
}
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK 0x00003000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT 12
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK 0x00003000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT 12
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(enum mdp5_scale_filter val)
{
- return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK;
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK;
}
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK 0x0000c000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT 14
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK 0x0000c000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT 14
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(enum mdp5_scale_filter val)
{
- return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK;
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK;
}
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK 0x00030000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT 16
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK 0x00030000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT 16
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(enum mdp5_scale_filter val)
{
- return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK;
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK;
}
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK 0x000c0000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT 18
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK 0x000c0000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT 18
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(enum mdp5_scale_filter val)
{
- return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK;
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK;
}
static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); }
@@ -984,9 +1022,22 @@ static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x000000
static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t __offset_BLEND(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00000020;
+ case 1: return 0x00000050;
+ case 2: return 0x00000080;
+ case 3: return 0x000000b0;
+ case 4: return 0x00000230;
+ case 5: return 0x00000260;
+ case 6: return 0x00000290;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); }
-static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); }
#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003
#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0
static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val)
@@ -1008,25 +1059,25 @@ static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val)
#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000
#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000
-static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_LM(i0) + __offset_BLEND(i1); }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_LM(i0) + __offset_BLEND(i1); }
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000002c + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_LM(i0) + __offset_BLEND(i1); }
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000030 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_LM(i0) + __offset_BLEND(i1); }
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000034 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_LM(i0) + __offset_BLEND(i1); }
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000038 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_LM(i0) + __offset_BLEND(i1); }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000003c + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000001c + __offset_LM(i0) + __offset_BLEND(i1); }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000040 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + __offset_BLEND(i1); }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000044 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + __offset_BLEND(i1); }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000048 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + __offset_BLEND(i1); }
static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); }
#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK 0x0000ffff
@@ -1260,6 +1311,13 @@ static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x000000
static inline uint32_t __offset_WB(uint32_t idx)
{
switch (idx) {
+#if 0 /* TEMPORARY until patch that adds wb.base[] is merged */
+ case 0: return (mdp5_cfg->wb.base[0]);
+ case 1: return (mdp5_cfg->wb.base[1]);
+ case 2: return (mdp5_cfg->wb.base[2]);
+ case 3: return (mdp5_cfg->wb.base[3]);
+ case 4: return (mdp5_cfg->wb.base[4]);
+#endif
default: return INVALID_IDX(idx);
}
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index 8b9a7931b162..a1e26f23c7cc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -22,7 +22,76 @@ struct mdp5_cfg_handler {
/* mdp5_cfg must be exposed (used in mdp5.xml.h) */
const struct mdp5_cfg_hw *mdp5_cfg = NULL;
-const struct mdp5_cfg_hw msm8x74_config = {
+const struct mdp5_cfg_hw msm8x74v1_config = {
+ .name = "msm8x74v1",
+ .mdp = {
+ .count = 1,
+ .base = { 0x00100 },
+ },
+ .smp = {
+ .mmb_count = 22,
+ .mmb_size = 4096,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7,
+ [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+ [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18,
+ },
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ .flush_hw_mask = 0x0003ffff,
+ },
+ .pipe_vig = {
+ .count = 3,
+ .base = { 0x01200, 0x01600, 0x01a00 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 3,
+ .base = { 0x01e00, 0x02200, 0x02600 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x02a00, 0x02e00 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ 0,
+ },
+ .lm = {
+ .count = 5,
+ .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
+ .nb_stages = 5,
+ },
+ .dspp = {
+ .count = 3,
+ .base = { 0x04600, 0x04a00, 0x04e00 },
+ },
+ .pp = {
+ .count = 3,
+ .base = { 0x21b00, 0x21c00, 0x21d00 },
+ },
+ .intf = {
+ .base = { 0x21100, 0x21300, 0x21500, 0x21700 },
+ .connect = {
+ [0] = INTF_eDP,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .max_clk = 200000000,
+};
+
+const struct mdp5_cfg_hw msm8x74v2_config = {
.name = "msm8x74",
.mdp = {
.count = 1,
@@ -45,19 +114,27 @@ const struct mdp5_cfg_hw msm8x74_config = {
.pipe_vig = {
.count = 3,
.base = { 0x01200, 0x01600, 0x01a00 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 3,
.base = { 0x01e00, 0x02200, 0x02600 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 2,
.base = { 0x02a00, 0x02e00 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 5,
.base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
.nb_stages = 5,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
},
.dspp = {
.count = 3,
@@ -65,7 +142,7 @@ const struct mdp5_cfg_hw msm8x74_config = {
},
.ad = {
.count = 2,
- .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
+ .base = { 0x13100, 0x13300 },
},
.pp = {
.count = 3,
@@ -113,19 +190,27 @@ const struct mdp5_cfg_hw apq8084_config = {
.pipe_vig = {
.count = 4,
.base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 4,
.base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 2,
.base = { 0x03200, 0x03600 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 6,
.base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
.nb_stages = 5,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
},
.dspp = {
.count = 4,
@@ -174,19 +259,27 @@ const struct mdp5_cfg_hw msm8x16_config = {
.pipe_vig = {
.count = 1,
.base = { 0x05000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 2,
.base = { 0x15000, 0x17000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 1,
.base = { 0x25000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 2, /* LM0 and LM3 */
.base = { 0x45000, 0x48000 },
.nb_stages = 5,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
},
.dspp = {
.count = 1,
@@ -203,14 +296,91 @@ const struct mdp5_cfg_hw msm8x16_config = {
.max_clk = 320000000,
};
+const struct mdp5_cfg_hw msm8x94_config = {
+ .name = "msm8x94",
+ .mdp = {
+ .count = 1,
+ .base = { 0x01000 },
+ },
+ .smp = {
+ .mmb_count = 44,
+ .mmb_size = 8192,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 4,
+ [SSPP_VIG2] = 7, [SSPP_VIG3] = 19,
+ [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+ [SSPP_RGB0] = 16, [SSPP_RGB1] = 17,
+ [SSPP_RGB2] = 18, [SSPP_RGB3] = 22,
+ },
+ .reserved_state[0] = GENMASK(23, 0), /* first 24 MMBs */
+ .reserved = {
+ [1] = 1, [4] = 1, [7] = 1, [19] = 1,
+ [16] = 5, [17] = 5, [18] = 5, [22] = 5,
+ },
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+ .flush_hw_mask = 0xf0ffffff,
+ },
+ .pipe_vig = {
+ .count = 4,
+ .base = { 0x05000, 0x07000, 0x09000, 0x0b000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x15000, 0x17000, 0x19000, 0x1b000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x25000, 0x27000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
+ },
+ .lm = {
+ .count = 6,
+ .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 },
+ .nb_stages = 8,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 4,
+ .base = { 0x55000, 0x57000, 0x59000, 0x5b000 },
+
+ },
+ .ad = {
+ .count = 3,
+ .base = { 0x79000, 0x79800, 0x7a000 },
+ },
+ .pp = {
+ .count = 4,
+ .base = { 0x71000, 0x71800, 0x72000, 0x72800 },
+ },
+ .intf = {
+ .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .max_clk = 320000000,
+};
+
static const struct mdp5_cfg_handler cfg_handlers[] = {
- { .revision = 0, .config = { .hw = &msm8x74_config } },
- { .revision = 2, .config = { .hw = &msm8x74_config } },
+ { .revision = 0, .config = { .hw = &msm8x74v1_config } },
+ { .revision = 2, .config = { .hw = &msm8x74v2_config } },
{ .revision = 3, .config = { .hw = &apq8084_config } },
{ .revision = 6, .config = { .hw = &msm8x16_config } },
+ { .revision = 9, .config = { .hw = &msm8x94_config } },
};
-
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
index 69349abe59f2..efb918d9f68b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -42,6 +42,13 @@ struct mdp5_sub_block {
struct mdp5_lm_block {
MDP5_SUB_BLOCK_DEFINITION;
uint32_t nb_stages; /* number of stages per blender */
+ uint32_t max_width; /* Maximum output resolution */
+ uint32_t max_height;
+};
+
+struct mdp5_pipe_block {
+ MDP5_SUB_BLOCK_DEFINITION;
+ uint32_t caps; /* pipe capabilities */
};
struct mdp5_ctl_block {
@@ -70,9 +77,9 @@ struct mdp5_cfg_hw {
struct mdp5_sub_block mdp;
struct mdp5_smp_block smp;
struct mdp5_ctl_block ctl;
- struct mdp5_sub_block pipe_vig;
- struct mdp5_sub_block pipe_rgb;
- struct mdp5_sub_block pipe_dma;
+ struct mdp5_pipe_block pipe_vig;
+ struct mdp5_pipe_block pipe_rgb;
+ struct mdp5_pipe_block pipe_dma;
struct mdp5_lm_block lm;
struct mdp5_sub_block dspp;
struct mdp5_sub_block ad;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index ee31b16fe7ea..8e6c9b598a57 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -21,6 +21,8 @@ struct mdp5_cmd_encoder {
struct mdp5_interface intf;
bool enabled;
uint32_t bsc;
+
+ struct mdp5_ctl *ctl;
};
#define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base)
@@ -210,13 +212,14 @@ static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
pingpong_tearcheck_setup(encoder, mode);
- mdp5_crtc_set_intf(encoder->crtc, &mdp5_cmd_enc->intf);
+ mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_cmd_enc->intf,
+ mdp5_cmd_enc->ctl);
}
static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
{
struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
- struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
if (WARN_ON(!mdp5_cmd_enc->enabled))
@@ -235,7 +238,7 @@ static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
- struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
if (WARN_ON(mdp5_cmd_enc->enabled))
@@ -300,7 +303,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
/* initialize command mode encoder */
struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
- struct mdp5_interface *intf)
+ struct mdp5_interface *intf, struct mdp5_ctl *ctl)
{
struct drm_encoder *encoder = NULL;
struct mdp5_cmd_encoder *mdp5_cmd_enc;
@@ -320,6 +323,7 @@ struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf));
encoder = &mdp5_cmd_enc->base;
+ mdp5_cmd_enc->ctl = ctl;
drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs,
DRM_MODE_ENCODER_DSI);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index dea3d2e559b1..7f9f4ac88029 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -160,8 +160,7 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
if (mdp5_crtc->ctl && !crtc->state->enable) {
/* set STAGE_UNUSED for all layers */
- mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
- mdp5_ctl_release(mdp5_crtc->ctl);
+ mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0);
mdp5_crtc->ctl = NULL;
}
}
@@ -196,13 +195,9 @@ static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
/*
* blend_setup() - blend all the planes of a CRTC
*
- * When border is enabled, the border color will ALWAYS be the base layer.
- * Therefore, the first plane (private RGB pipe) will start at STAGE0.
- * If disabled, the first plane starts at STAGE_BASE.
- *
- * Note:
- * Border is not enabled here because the private plane is exactly
- * the CRTC resolution.
+ * If no base layer is available, border will be enabled as the base layer.
+ * Otherwise all layers will be blended based on their stage calculated
+ * in mdp5_crtc_atomic_check.
*/
static void blend_setup(struct drm_crtc *crtc)
{
@@ -210,9 +205,14 @@ static void blend_setup(struct drm_crtc *crtc)
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_plane *plane;
const struct mdp5_cfg_hw *hw_cfg;
- uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
+ struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
+ const struct mdp_format *format;
+ uint32_t lm = mdp5_crtc->lm;
+ uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
unsigned long flags;
-#define blender(stage) ((stage) - STAGE_BASE)
+ uint8_t stage[STAGE_MAX + 1];
+ int i, plane_cnt = 0;
+#define blender(stage) ((stage) - STAGE0)
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
@@ -222,33 +222,73 @@ static void blend_setup(struct drm_crtc *crtc)
if (!mdp5_crtc->ctl)
goto out;
+ /* Collect all plane information */
drm_atomic_crtc_for_each_plane(plane, crtc) {
- enum mdp_mixer_stage_id stage =
- to_mdp5_plane_state(plane->state)->stage;
+ pstate = to_mdp5_plane_state(plane->state);
+ pstates[pstate->stage] = pstate;
+ stage[pstate->stage] = mdp5_plane_pipe(plane);
+ plane_cnt++;
+ }
- /*
- * Note: This cannot happen with current implementation but
- * we need to check this condition once z property is added
- */
- BUG_ON(stage > hw_cfg->lm.nb_stages);
+ /*
+ * If there is no base layer, enable border color.
+ * Although it's not possbile in current blend logic,
+ * put it here as a reminder.
+ */
+ if (!pstates[STAGE_BASE] && plane_cnt) {
+ ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
+ DBG("Border Color is enabled");
+ }
- /* LM */
- mdp5_write(mdp5_kms,
- REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
- MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
- MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
+ /* The reset for blending */
+ for (i = STAGE0; i <= STAGE_MAX; i++) {
+ if (!pstates[i])
+ continue;
+
+ format = to_mdp_format(
+ msm_framebuffer_format(pstates[i]->base.fb));
+ plane = pstates[i]->base.plane;
+ blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+ MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
+ fg_alpha = pstates[i]->alpha;
+ bg_alpha = 0xFF - pstates[i]->alpha;
+ DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
+
+ if (format->alpha_enable && pstates[i]->premultiplied) {
+ blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+ MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |=
+ MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
+ MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
+ }
+ } else if (format->alpha_enable) {
+ blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
+ MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |=
+ MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
+ MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
+ MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
+ MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
+ }
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
+ blender(i)), blend_op);
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
- blender(stage)), 0xff);
+ blender(i)), fg_alpha);
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
- blender(stage)), 0x00);
- /* CTL */
- blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
- DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
- pipe2name(mdp5_plane_pipe(plane)), stage);
+ blender(i)), bg_alpha);
}
- DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
- mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
+ mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags);
out:
spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
@@ -339,25 +379,19 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_plane *plane;
struct drm_device *dev = crtc->dev;
- struct plane_state pstates[STAGE3 + 1];
+ struct plane_state pstates[STAGE_MAX + 1];
+ const struct mdp5_cfg_hw *hw_cfg;
int cnt = 0, i;
DBG("%s: check", mdp5_crtc->name);
- /* request a free CTL, if none is already allocated for this CRTC */
- if (state->enable && !mdp5_crtc->ctl) {
- mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
- if (WARN_ON(!mdp5_crtc->ctl))
- return -EINVAL;
- }
-
/* verify that there are not too many planes attached to crtc
* and that we don't have conflicting mixer stages:
*/
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
drm_atomic_crtc_state_for_each_plane(plane, state) {
struct drm_plane_state *pstate;
-
- if (cnt >= ARRAY_SIZE(pstates)) {
+ if (cnt >= (hw_cfg->lm.nb_stages)) {
dev_err(dev->dev, "too many planes!\n");
return -EINVAL;
}
@@ -369,13 +403,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
*/
if (!pstate)
pstate = plane->state;
-
pstates[cnt].plane = plane;
pstates[cnt].state = to_mdp5_plane_state(pstate);
cnt++;
}
+ /* assign a stage based on sorted zpos property */
sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
for (i = 0; i < cnt; i++) {
@@ -388,13 +422,15 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
-static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
+static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
DBG("%s: begin", mdp5_crtc->name);
}
-static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
+static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -691,8 +727,8 @@ void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
complete_flip(crtc, file);
}
-/* set interface for routing crtc->encoder: */
-void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
+ struct mdp5_interface *intf, struct mdp5_ctl *ctl)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
@@ -715,7 +751,8 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
mdp_irq_update(&mdp5_kms->base);
- mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
+ mdp5_crtc->ctl = ctl;
+ mdp5_ctl_set_pipeline(ctl, intf, lm);
}
int mdp5_crtc_get_lm(struct drm_crtc *crtc)
@@ -724,12 +761,6 @@ int mdp5_crtc_get_lm(struct drm_crtc *crtc)
return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
}
-struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
-{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl;
-}
-
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
@@ -774,7 +805,5 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
plane->crtc = crtc;
- mdp5_plane_install_properties(plane, &crtc->base);
-
return crtc;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index f2530f224a76..4e81ca4f964a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -17,7 +17,7 @@
/*
* CTL - MDP Control Pool Manager
*
- * Controls are shared between all CRTCs.
+ * Controls are shared between all display interfaces.
*
* They are intended to be used for data path configuration.
* The top level register programming describes the complete data path for
@@ -27,12 +27,11 @@
*
* In certain use cases (high-resolution dual pipe), one single CTL can be
* shared across multiple CRTCs.
- *
- * Because the number of CTLs can be less than the number of CRTCs,
- * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
- * requested by the client (in mdp5_crtc_mode_set()).
*/
+#define CTL_STAT_BUSY 0x1
+#define CTL_STAT_BOOKED 0x2
+
struct op_mode {
struct mdp5_interface intf;
@@ -46,8 +45,8 @@ struct mdp5_ctl {
u32 id;
int lm;
- /* whether this CTL has been allocated or not: */
- bool busy;
+ /* CTL status bitmask */
+ u32 status;
/* Operation Mode Configuration for the Pipeline */
struct op_mode pipeline;
@@ -61,7 +60,10 @@ struct mdp5_ctl {
bool cursor_on;
- struct drm_crtc *crtc;
+ /* True if the current CTL has FLUSH bits pending for single FLUSH. */
+ bool flush_pending;
+
+ struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
};
struct mdp5_ctl_manager {
@@ -74,6 +76,10 @@ struct mdp5_ctl_manager {
/* to filter out non-present bits in the current hardware config */
u32 flush_hw_mask;
+ /* status for single FLUSH */
+ bool single_flush_supported;
+ u32 single_flush_pending_mask;
+
/* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
spinlock_t pool_lock;
struct mdp5_ctl ctls[MAX_CTL];
@@ -168,11 +174,21 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl,
+ struct mdp5_interface *intf, int lm)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
+ if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) {
+ dev_err(mdp5_kms->dev->dev,
+ "CTL %d is allocated by INTF %d, but used by INTF %d\n",
+ ctl->id, ctl->pipeline.intf.num, intf->num);
+ return -EINVAL;
+ }
+
+ ctl->lm = lm;
+
memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
@@ -287,29 +303,85 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
+ ctl->cursor_on = enable;
spin_unlock_irqrestore(&ctl->hw_lock, flags);
ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
- ctl->cursor_on = enable;
return 0;
}
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
+static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
+ enum mdp_mixer_stage_id stage)
+{
+ switch (pipe) {
+ case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
+ case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
+ case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
+ case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
+ case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
+ case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
+ case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
+ case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
+ case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
+ case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
+ default: return 0;
+ }
+}
+
+static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
+ enum mdp_mixer_stage_id stage)
+{
+ if (stage < STAGE6)
+ return 0;
+
+ switch (pipe) {
+ case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3;
+ case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3;
+ case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3;
+ case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3;
+ case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3;
+ case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3;
+ case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3;
+ case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
+ case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
+ case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
+ default: return 0;
+ }
+}
+
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
+ u32 ctl_blend_op_flags)
{
unsigned long flags;
+ u32 blend_cfg = 0, blend_ext_cfg = 0;
+ int i, start_stage;
+
+ if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
+ start_stage = STAGE0;
+ blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
+ } else {
+ start_stage = STAGE_BASE;
+ }
+ for (i = start_stage; i < start_stage + stage_cnt; i++) {
+ blend_cfg |= mdp_ctl_blend_mask(stage[i], i);
+ blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i);
+ }
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
if (ctl->cursor_on)
blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
- else
- blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
- spin_lock_irqsave(&ctl->hw_lock, flags);
- ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
- ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm);
+ ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm);
+
+ DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm,
+ blend_cfg, blend_ext_cfg);
return 0;
}
@@ -379,6 +451,31 @@ static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
return sw_mask;
}
+static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
+ u32 *flush_id)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+
+ if (ctl->pair) {
+ DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
+ ctl->flush_pending = true;
+ ctl_mgr->single_flush_pending_mask |= (*flush_mask);
+ *flush_mask = 0;
+
+ if (ctl->pair->flush_pending) {
+ *flush_id = min_t(u32, ctl->id, ctl->pair->id);
+ *flush_mask = ctl_mgr->single_flush_pending_mask;
+
+ ctl->flush_pending = false;
+ ctl->pair->flush_pending = false;
+ ctl_mgr->single_flush_pending_mask = 0;
+
+ DBG("Single FLUSH mask %x,ID %d", *flush_mask,
+ *flush_id);
+ }
+ }
+}
+
/**
* mdp5_ctl_commit() - Register Flush
*
@@ -400,6 +497,8 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
struct op_mode *pipeline = &ctl->pipeline;
unsigned long flags;
+ u32 flush_id = ctl->id;
+ u32 curr_ctl_flush_mask;
pipeline->start_mask &= ~flush_mask;
@@ -415,9 +514,13 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
flush_mask &= ctl_mgr->flush_hw_mask;
+ curr_ctl_flush_mask = flush_mask;
+
+ fix_for_single_flush(ctl, &flush_mask, &flush_id);
+
if (flush_mask) {
spin_lock_irqsave(&ctl->hw_lock, flags);
- ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
+ ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
@@ -426,7 +529,7 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
refill_start_mask(ctl);
}
- return flush_mask;
+ return curr_ctl_flush_mask;
}
u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
@@ -434,59 +537,85 @@ u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
}
-void mdp5_ctl_release(struct mdp5_ctl *ctl)
+int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
{
- struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
- unsigned long flags;
+ return WARN_ON(!ctl) ? -EINVAL : ctl->id;
+}
- if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
- dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
- ctl->id, ctl->busy);
- return;
+/*
+ * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH
+ */
+int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm;
+ struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
+
+ /* do nothing silently if hw doesn't support */
+ if (!ctl_mgr->single_flush_supported)
+ return 0;
+
+ if (!enable) {
+ ctlx->pair = NULL;
+ ctly->pair = NULL;
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 0);
+ return 0;
+ } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
+ dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
+ return -EINVAL;
+ } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
+ dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
+ return -EINVAL;
}
- spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
- ctl->busy = false;
- spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
+ ctlx->pair = ctly;
+ ctly->pair = ctlx;
- DBG("CTL %d released", ctl->id);
-}
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
+ MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
-int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
-{
- return WARN_ON(!ctl) ? -EINVAL : ctl->id;
+ return 0;
}
/*
- * mdp5_ctl_request() - CTL dynamic allocation
+ * mdp5_ctl_request() - CTL allocation
*
- * Note: Current implementation considers that we can only have one CRTC per CTL
+ * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
+ * If no CTL is available in preferred category, allocate from the other one.
*
- * @return first free CTL
+ * @return fail if no CTL is available.
*/
struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
- struct drm_crtc *crtc)
+ int intf_num)
{
struct mdp5_ctl *ctl = NULL;
+ const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
+ u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0;
unsigned long flags;
int c;
spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
+ /* search the preferred */
for (c = 0; c < ctl_mgr->nctl; c++)
- if (!ctl_mgr->ctls[c].busy)
- break;
+ if ((ctl_mgr->ctls[c].status & checkm) == match)
+ goto found;
- if (unlikely(c >= ctl_mgr->nctl)) {
- dev_err(ctl_mgr->dev->dev, "No more CTL available!");
- goto unlock;
- }
+ dev_warn(ctl_mgr->dev->dev,
+ "fall back to the other CTL category for INTF %d!\n", intf_num);
- ctl = &ctl_mgr->ctls[c];
+ match ^= CTL_STAT_BOOKED;
+ for (c = 0; c < ctl_mgr->nctl; c++)
+ if ((ctl_mgr->ctls[c].status & checkm) == match)
+ goto found;
- ctl->lm = mdp5_crtc_get_lm(crtc);
- ctl->crtc = crtc;
- ctl->busy = true;
+ dev_err(ctl_mgr->dev->dev, "No more CTL available!");
+ goto unlock;
+
+found:
+ ctl = &ctl_mgr->ctls[c];
+ ctl->pipeline.intf.num = intf_num;
+ ctl->lm = -1;
+ ctl->status |= CTL_STAT_BUSY;
ctl->pending_ctl_trigger = 0;
DBG("CTL %d allocated", ctl->id);
@@ -515,9 +644,11 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
}
struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
- void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
+ void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
{
struct mdp5_ctl_manager *ctl_mgr;
+ const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
+ int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
unsigned long flags;
int c, ret;
@@ -551,14 +682,28 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
if (WARN_ON(!ctl_cfg->base[c])) {
dev_err(dev->dev, "CTL_%d: base is null!\n", c);
ret = -EINVAL;
+ spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
goto fail;
}
ctl->ctlm = ctl_mgr;
ctl->id = c;
ctl->reg_offset = ctl_cfg->base[c];
- ctl->busy = false;
+ ctl->status = 0;
spin_lock_init(&ctl->hw_lock);
}
+
+ /*
+ * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI
+ * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
+ * only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
+ * Single FLUSH is supported from hw rev v3.0.
+ */
+ if (rev >= 3) {
+ ctl_mgr->single_flush_supported = true;
+ /* Reserve CTL0/1 for INTF1/2 */
+ ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
+ ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
+ }
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index 4678228c4f14..96148c6f863c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -23,7 +23,7 @@
*/
struct mdp5_ctl_manager;
struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
- void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg);
+ void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd);
void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
@@ -32,49 +32,32 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
* mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
* which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
*/
-struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
+struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num);
+
int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
struct mdp5_interface;
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf);
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf,
+ int lm);
int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled);
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable);
-
-/*
- * blend_cfg (LM blender config):
- *
- * The function below allows the caller of mdp5_ctl_blend() to specify how pipes
- * are being blended according to their stage (z-order), through @blend_cfg arg.
- */
-static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
- enum mdp_mixer_stage_id stage)
-{
- switch (pipe) {
- case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
- case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
- case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
- case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
- case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
- case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
- case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
- case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
- case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
- case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
- default: return 0;
- }
-}
+int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
/*
* mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM)
*
- * @blend_cfg: see LM blender config definition below
+ * @stage: array to contain the pipe num for each stage
+ * @stage_cnt: valid stage number in stage array
+ * @ctl_blend_op_flags: blender operation mode flags
*
* Note:
* CTL registers need to be flushed after calling this function
* (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
*/
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
+#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0)
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
+ u32 ctl_blend_op_flags);
/**
* mdp_ctl_flush_mask...() - Register FLUSH masks
@@ -91,8 +74,6 @@ u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);
-void mdp5_ctl_release(struct mdp5_ctl *ctl);
-
#endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index de97c08f3f1f..c9e32b08a7a0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -27,6 +27,8 @@ struct mdp5_encoder {
spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
bool enabled;
uint32_t bsc;
+
+ struct mdp5_ctl *ctl;
};
#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
@@ -222,14 +224,15 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
- mdp5_crtc_set_intf(encoder->crtc, &mdp5_encoder->intf);
+ mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_encoder->intf,
+ mdp5_encoder->ctl);
}
static void mdp5_encoder_disable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
- struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ struct mdp5_ctl *ctl = mdp5_encoder->ctl;
int lm = mdp5_crtc_get_lm(encoder->crtc);
struct mdp5_interface *intf = &mdp5_encoder->intf;
int intfn = mdp5_encoder->intf.num;
@@ -264,7 +267,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
- struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ struct mdp5_ctl *ctl = mdp5_encoder->ctl;
struct mdp5_interface *intf = &mdp5_encoder->intf;
int intfn = mdp5_encoder->intf.num;
unsigned long flags;
@@ -294,6 +297,7 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
struct drm_encoder *slave_encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder);
struct mdp5_kms *mdp5_kms;
int intf_num;
u32 data = 0;
@@ -316,12 +320,13 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
/* Make sure clocks are on when connectors calling this function. */
mdp5_enable(mdp5_kms);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
- MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
/* Dumb Panel, Sync mode */
mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0);
mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data);
mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1);
+
+ mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
+
mdp5_disable(mdp5_kms);
return 0;
@@ -329,7 +334,7 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
/* initialize encoder */
struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
- struct mdp5_interface *intf)
+ struct mdp5_interface *intf, struct mdp5_ctl *ctl)
{
struct drm_encoder *encoder = NULL;
struct mdp5_encoder *mdp5_encoder;
@@ -345,6 +350,7 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf));
encoder = &mdp5_encoder->base;
+ mdp5_encoder->ctl = ctl;
spin_lock_init(&mdp5_encoder->intf_lock);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 33bd4c6160dd..b1f73bee1368 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -21,8 +21,11 @@
#include "msm_drv.h"
#include "mdp5_kms.h"
-void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+ uint32_t old_irqmask)
{
+ mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_CLEAR(0),
+ irqmask ^ (irqmask & old_irqmask));
mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask);
}
@@ -71,9 +74,10 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
unsigned int id;
- uint32_t status;
+ uint32_t status, enable;
- status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0));
+ enable = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_EN(0));
+ status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)) & enable;
mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status);
VERB("status=%08x", status);
@@ -112,15 +116,24 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+
+ mdp5_enable(mdp5_kms);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), true);
+ mdp5_disable(mdp5_kms);
+
return 0;
}
void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+
+ mdp5_enable(mdp5_kms);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), false);
+ mdp5_disable(mdp5_kms);
}
/*
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 206f758f7d64..047cb0433ccb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -76,7 +76,20 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
+ int i;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ int nplanes = mdp5_kms->dev->mode_config.num_total_plane;
+
+ for (i = 0; i < nplanes; i++) {
+ struct drm_plane *plane = state->planes[i];
+ struct drm_plane_state *plane_state = state->plane_states[i];
+
+ if (!plane)
+ continue;
+
+ mdp5_plane_complete_commit(plane, plane_state);
+ }
+
mdp5_disable(mdp5_kms);
}
@@ -164,7 +177,8 @@ int mdp5_disable(struct mdp5_kms *mdp5_kms)
clk_disable_unprepare(mdp5_kms->ahb_clk);
clk_disable_unprepare(mdp5_kms->axi_clk);
clk_disable_unprepare(mdp5_kms->core_clk);
- clk_disable_unprepare(mdp5_kms->lut_clk);
+ if (mdp5_kms->lut_clk)
+ clk_disable_unprepare(mdp5_kms->lut_clk);
return 0;
}
@@ -176,14 +190,15 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
clk_prepare_enable(mdp5_kms->ahb_clk);
clk_prepare_enable(mdp5_kms->axi_clk);
clk_prepare_enable(mdp5_kms->core_clk);
- clk_prepare_enable(mdp5_kms->lut_clk);
+ if (mdp5_kms->lut_clk)
+ clk_prepare_enable(mdp5_kms->lut_clk);
return 0;
}
static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
enum mdp5_intf_type intf_type, int intf_num,
- enum mdp5_intf_mode intf_mode)
+ enum mdp5_intf_mode intf_mode, struct mdp5_ctl *ctl)
{
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
@@ -196,9 +211,9 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
if ((intf_type == INTF_DSI) &&
(intf_mode == MDP5_INTF_DSI_MODE_COMMAND))
- encoder = mdp5_cmd_encoder_init(dev, &intf);
+ encoder = mdp5_cmd_encoder_init(dev, &intf, ctl);
else
- encoder = mdp5_encoder_init(dev, &intf);
+ encoder = mdp5_encoder_init(dev, &intf, ctl);
if (IS_ERR(encoder)) {
dev_err(dev->dev, "failed to construct encoder\n");
@@ -236,6 +251,8 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
const struct mdp5_cfg_hw *hw_cfg =
mdp5_cfg_get_hw_config(mdp5_kms->cfg);
enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num];
+ struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
+ struct mdp5_ctl *ctl;
struct drm_encoder *encoder;
int ret = 0;
@@ -246,8 +263,14 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
if (!priv->edp)
break;
+ ctl = mdp5_ctlm_request(ctlm, intf_num);
+ if (!ctl) {
+ ret = -EINVAL;
+ break;
+ }
+
encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num,
- MDP5_INTF_MODE_NONE);
+ MDP5_INTF_MODE_NONE, ctl);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
break;
@@ -259,8 +282,14 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
if (!priv->hdmi)
break;
+ ctl = mdp5_ctlm_request(ctlm, intf_num);
+ if (!ctl) {
+ ret = -EINVAL;
+ break;
+ }
+
encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num,
- MDP5_INTF_MODE_NONE);
+ MDP5_INTF_MODE_NONE, ctl);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
break;
@@ -285,14 +314,20 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
if (!priv->dsi[dsi_id])
break;
+ ctl = mdp5_ctlm_request(ctlm, intf_num);
+ if (!ctl) {
+ ret = -EINVAL;
+ break;
+ }
+
for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
mode = (i == MSM_DSI_CMD_ENCODER_ID) ?
MDP5_INTF_DSI_MODE_COMMAND :
MDP5_INTF_DSI_MODE_VIDEO;
dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI,
- intf_num, mode);
- if (IS_ERR(dsi_encs)) {
- ret = PTR_ERR(dsi_encs);
+ intf_num, mode, ctl);
+ if (IS_ERR(dsi_encs[i])) {
+ ret = PTR_ERR(dsi_encs[i]);
break;
}
}
@@ -314,9 +349,12 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
static const enum mdp5_pipe crtcs[] = {
SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
};
- static const enum mdp5_pipe pub_planes[] = {
+ static const enum mdp5_pipe vig_planes[] = {
SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
};
+ static const enum mdp5_pipe dma_planes[] = {
+ SSPP_DMA0, SSPP_DMA1,
+ };
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
const struct mdp5_cfg_hw *hw_cfg;
@@ -337,7 +375,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
struct drm_crtc *crtc;
plane = mdp5_plane_init(dev, crtcs[i], true,
- hw_cfg->pipe_rgb.base[i]);
+ hw_cfg->pipe_rgb.base[i], hw_cfg->pipe_rgb.caps);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
@@ -355,16 +393,30 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
priv->crtcs[priv->num_crtcs++] = crtc;
}
- /* Construct public planes: */
+ /* Construct video planes: */
for (i = 0; i < hw_cfg->pipe_vig.count; i++) {
struct drm_plane *plane;
- plane = mdp5_plane_init(dev, pub_planes[i], false,
- hw_cfg->pipe_vig.base[i]);
+ plane = mdp5_plane_init(dev, vig_planes[i], false,
+ hw_cfg->pipe_vig.base[i], hw_cfg->pipe_vig.caps);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
dev_err(dev->dev, "failed to construct %s plane: %d\n",
- pipe2name(pub_planes[i]), ret);
+ pipe2name(vig_planes[i]), ret);
+ goto fail;
+ }
+ }
+
+ /* DMA planes */
+ for (i = 0; i < hw_cfg->pipe_dma.count; i++) {
+ struct drm_plane *plane;
+
+ plane = mdp5_plane_init(dev, dma_planes[i], false,
+ hw_cfg->pipe_dma.base[i], hw_cfg->pipe_dma.caps);
+ if (IS_ERR(plane)) {
+ ret = PTR_ERR(plane);
+ dev_err(dev->dev, "failed to construct %s plane: %d\n",
+ pipe2name(dma_planes[i]), ret);
goto fail;
}
}
@@ -476,7 +528,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
goto fail;
ret = get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk");
if (ret)
- goto fail;
+ DBG("failed to get (optional) lut_clk clock");
ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk");
if (ret)
goto fail;
@@ -508,7 +560,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
goto fail;
}
- mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
+ mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg);
if (IS_ERR(mdp5_kms->ctlm)) {
ret = PTR_ERR(mdp5_kms->ctlm);
mdp5_kms->ctlm = NULL;
@@ -564,6 +616,11 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
goto fail;
}
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = config->hw->lm.max_width;
+ dev->mode_config.max_height = config->hw->lm.max_height;
+
return kms;
fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index e0eb24587c84..0bb62423586e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -70,18 +70,12 @@ struct mdp5_kms {
struct mdp5_plane_state {
struct drm_plane_state base;
- /* "virtual" zpos.. we calculate actual mixer-stage at runtime
- * by sorting the attached planes by zpos and then assigning
- * mixer stage lowest to highest. Private planes get default
- * zpos of zero, and public planes a unique value that is
- * greater than zero. This way, things work out if a naive
- * userspace assigns planes to a crtc without setting zpos.
- */
- int zpos;
+ /* aligned with property */
+ uint8_t premultiplied;
+ uint8_t zpos;
+ uint8_t alpha;
- /* the actual mixer stage, calculated in crtc->atomic_check()
- * NOTE: this should move to mdp5_crtc_state, when that exists
- */
+ /* assigned by crtc blender */
enum mdp_mixer_stage_id stage;
/* some additional transactional status to help us know in the
@@ -192,7 +186,8 @@ static inline uint32_t lm2ppdone(int lm)
int mdp5_disable(struct mdp5_kms *mdp5_kms);
int mdp5_enable(struct mdp5_kms *mdp5_kms);
-void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+ uint32_t old_irqmask);
void mdp5_irq_preinstall(struct msm_kms *kms);
int mdp5_irq_postinstall(struct msm_kms *kms);
void mdp5_irq_uninstall(struct msm_kms *kms);
@@ -202,58 +197,38 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
-static inline bool pipe_supports_yuv(enum mdp5_pipe pipe)
-{
- switch (pipe) {
- case SSPP_VIG0:
- case SSPP_VIG1:
- case SSPP_VIG2:
- case SSPP_VIG3:
- return true;
- default:
- return false;
- }
-}
-
-static inline
-uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
- uint32_t max_formats)
-{
- return mdp_get_formats(pixel_formats, max_formats,
- !pipe_supports_yuv(pipe));
-}
-
-void mdp5_plane_install_properties(struct drm_plane *plane,
- struct drm_mode_object *obj);
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
void mdp5_plane_complete_flip(struct drm_plane *plane);
+void mdp5_plane_complete_commit(struct drm_plane *plane,
+ struct drm_plane_state *state);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
- enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
+ enum mdp5_pipe pipe, bool private_plane,
+ uint32_t reg_offset, uint32_t caps);
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
int mdp5_crtc_get_lm(struct drm_crtc *crtc);
-struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
-void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf);
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
+ struct mdp5_interface *intf, struct mdp5_ctl *ctl);
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id);
struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
- struct mdp5_interface *intf);
+ struct mdp5_interface *intf, struct mdp5_ctl *ctl);
int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
struct drm_encoder *slave_encoder);
#ifdef CONFIG_DRM_MSM_DSI
struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
- struct mdp5_interface *intf);
+ struct mdp5_interface *intf, struct mdp5_ctl *ctl);
int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
struct drm_encoder *slave_encoder);
#else
-static inline struct drm_encoder *mdp5_cmd_encoder_init(
- struct drm_device *dev, struct mdp5_interface *intf)
+static inline struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf, struct mdp5_ctl *ctl)
{
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 57b8f56ae9d0..07fb62fea6dc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2014-2015 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -26,13 +26,12 @@ struct mdp5_plane {
spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
uint32_t reg_offset;
+ uint32_t caps;
uint32_t flush_mask; /* used to commit pipe registers */
uint32_t nformats;
uint32_t formats[32];
-
- bool enabled;
};
#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
@@ -42,6 +41,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
+
static void set_scanout_locked(struct drm_plane *plane,
struct drm_framebuffer *fb);
@@ -56,44 +56,132 @@ static bool plane_enabled(struct drm_plane_state *state)
return state->fb && state->crtc;
}
-static int mdp5_plane_disable(struct drm_plane *plane)
+static void mdp5_plane_destroy(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- struct mdp5_kms *mdp5_kms = get_kms(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
- DBG("%s: disable", mdp5_plane->name);
-
- if (mdp5_kms) {
- /* Release the memory we requested earlier from the SMP: */
- mdp5_smp_release(mdp5_kms->smp, pipe);
- }
+ drm_plane_helper_disable(plane);
+ drm_plane_cleanup(plane);
- return 0;
+ kfree(mdp5_plane);
}
-static void mdp5_plane_destroy(struct drm_plane *plane)
+static void mdp5_plane_install_rotation_property(struct drm_device *dev,
+ struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- drm_plane_helper_disable(plane);
- drm_plane_cleanup(plane);
+ if (!(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP) &&
+ !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP))
+ return;
- kfree(mdp5_plane);
+ if (!dev->mode_config.rotation_property)
+ dev->mode_config.rotation_property =
+ drm_mode_create_rotation_property(dev,
+ BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y));
+
+ if (dev->mode_config.rotation_property)
+ drm_object_attach_property(&plane->base,
+ dev->mode_config.rotation_property,
+ 0);
}
/* helper to install properties which are common to planes and crtcs */
-void mdp5_plane_install_properties(struct drm_plane *plane,
+static void mdp5_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj)
{
- // XXX
+ struct drm_device *dev = plane->dev;
+ struct msm_drm_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+
+#define INSTALL_PROPERTY(name, NAME, init_val, fnc, ...) do { \
+ prop = dev_priv->plane_property[PLANE_PROP_##NAME]; \
+ if (!prop) { \
+ prop = drm_property_##fnc(dev, 0, #name, \
+ ##__VA_ARGS__); \
+ if (!prop) { \
+ dev_warn(dev->dev, \
+ "Create property %s failed\n", \
+ #name); \
+ return; \
+ } \
+ dev_priv->plane_property[PLANE_PROP_##NAME] = prop; \
+ } \
+ drm_object_attach_property(&plane->base, prop, init_val); \
+ } while (0)
+
+#define INSTALL_RANGE_PROPERTY(name, NAME, min, max, init_val) \
+ INSTALL_PROPERTY(name, NAME, init_val, \
+ create_range, min, max)
+
+#define INSTALL_ENUM_PROPERTY(name, NAME, init_val) \
+ INSTALL_PROPERTY(name, NAME, init_val, \
+ create_enum, name##_prop_enum_list, \
+ ARRAY_SIZE(name##_prop_enum_list))
+
+ INSTALL_RANGE_PROPERTY(zpos, ZPOS, 1, 255, 1);
+
+ mdp5_plane_install_rotation_property(dev, plane);
+
+#undef INSTALL_RANGE_PROPERTY
+#undef INSTALL_ENUM_PROPERTY
+#undef INSTALL_PROPERTY
+}
+
+static int mdp5_plane_atomic_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state, struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = plane->dev;
+ struct mdp5_plane_state *pstate;
+ struct msm_drm_private *dev_priv = dev->dev_private;
+ int ret = 0;
+
+ pstate = to_mdp5_plane_state(state);
+
+#define SET_PROPERTY(name, NAME, type) do { \
+ if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \
+ pstate->name = (type)val; \
+ DBG("Set property %s %d", #name, (type)val); \
+ goto done; \
+ } \
+ } while (0)
+
+ SET_PROPERTY(zpos, ZPOS, uint8_t);
+
+ dev_err(dev->dev, "Invalid property\n");
+ ret = -EINVAL;
+done:
+ return ret;
+#undef SET_PROPERTY
}
-int mdp5_plane_set_property(struct drm_plane *plane,
- struct drm_property *property, uint64_t val)
+static int mdp5_plane_atomic_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property, uint64_t *val)
{
- // XXX
- return -EINVAL;
+ struct drm_device *dev = plane->dev;
+ struct mdp5_plane_state *pstate;
+ struct msm_drm_private *dev_priv = dev->dev_private;
+ int ret = 0;
+
+ pstate = to_mdp5_plane_state(state);
+
+#define GET_PROPERTY(name, NAME, type) do { \
+ if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \
+ *val = pstate->name; \
+ DBG("Get property %s %lld", #name, *val); \
+ goto done; \
+ } \
+ } while (0)
+
+ GET_PROPERTY(zpos, ZPOS, uint8_t);
+
+ dev_err(dev->dev, "Invalid property\n");
+ ret = -EINVAL;
+done:
+ return ret;
+#undef SET_PROPERTY
}
static void mdp5_plane_reset(struct drm_plane *plane)
@@ -106,11 +194,15 @@ static void mdp5_plane_reset(struct drm_plane *plane)
kfree(to_mdp5_plane_state(plane->state));
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
- if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
- mdp5_state->zpos = 0;
- } else {
- mdp5_state->zpos = 1 + drm_plane_index(plane);
- }
+ /* assign default blend parameters */
+ mdp5_state->alpha = 255;
+ mdp5_state->premultiplied = 0;
+
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ mdp5_state->zpos = STAGE_BASE;
+ else
+ mdp5_state->zpos = STAGE0 + drm_plane_index(plane);
+
mdp5_state->base.plane = plane;
plane->state = &mdp5_state->base;
@@ -149,7 +241,9 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = mdp5_plane_destroy,
- .set_property = mdp5_plane_set_property,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .atomic_set_property = mdp5_plane_atomic_set_property,
+ .atomic_get_property = mdp5_plane_atomic_get_property,
.reset = mdp5_plane_reset,
.atomic_duplicate_state = mdp5_plane_duplicate_state,
.atomic_destroy_state = mdp5_plane_destroy_state,
@@ -182,10 +276,44 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct drm_plane_state *old_state = plane->state;
+ const struct mdp_format *format;
+ bool vflip, hflip;
DBG("%s: check (%d -> %d)", mdp5_plane->name,
plane_enabled(old_state), plane_enabled(state));
+ if (plane_enabled(state)) {
+ format = to_mdp_format(msm_framebuffer_format(state->fb));
+ if (MDP_FORMAT_IS_YUV(format) &&
+ !pipe_supports_yuv(mdp5_plane->caps)) {
+ dev_err(plane->dev->dev,
+ "Pipe doesn't support YUV\n");
+
+ return -EINVAL;
+ }
+
+ if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) &&
+ (((state->src_w >> 16) != state->crtc_w) ||
+ ((state->src_h >> 16) != state->crtc_h))) {
+ dev_err(plane->dev->dev,
+ "Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
+ state->src_w >> 16, state->src_h >> 16,
+ state->crtc_w, state->crtc_h);
+
+ return -EINVAL;
+ }
+
+ hflip = !!(state->rotation & BIT(DRM_REFLECT_X));
+ vflip = !!(state->rotation & BIT(DRM_REFLECT_Y));
+ if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
+ (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
+ dev_err(plane->dev->dev,
+ "Pipe doesn't support flip\n");
+
+ return -EINVAL;
+ }
+ }
+
if (plane_enabled(state) && plane_enabled(old_state)) {
/* we cannot change SMP block configuration during scanout: */
bool full_modeset = false;
@@ -224,7 +352,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
if (!plane_enabled(state)) {
to_mdp5_plane_state(state)->pending = true;
- mdp5_plane_disable(plane);
} else if (to_mdp5_plane_state(state)->mode_changed) {
int ret;
to_mdp5_plane_state(state)->pending = true;
@@ -365,16 +492,21 @@ static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase)
return 0;
}
-static int calc_scalex_steps(uint32_t pixel_format, uint32_t src, uint32_t dest,
+static int calc_scalex_steps(struct drm_plane *plane,
+ uint32_t pixel_format, uint32_t src, uint32_t dest,
uint32_t phasex_steps[2])
{
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ struct device *dev = mdp5_kms->dev->dev;
uint32_t phasex_step;
unsigned int hsub;
int ret;
ret = calc_phase_step(src, dest, &phasex_step);
- if (ret)
+ if (ret) {
+ dev_err(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
return ret;
+ }
hsub = drm_format_horz_chroma_subsampling(pixel_format);
@@ -384,16 +516,21 @@ static int calc_scalex_steps(uint32_t pixel_format, uint32_t src, uint32_t dest,
return 0;
}
-static int calc_scaley_steps(uint32_t pixel_format, uint32_t src, uint32_t dest,
+static int calc_scaley_steps(struct drm_plane *plane,
+ uint32_t pixel_format, uint32_t src, uint32_t dest,
uint32_t phasey_steps[2])
{
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ struct device *dev = mdp5_kms->dev->dev;
uint32_t phasey_step;
unsigned int vsub;
int ret;
ret = calc_phase_step(src, dest, &phasey_step);
- if (ret)
+ if (ret) {
+ dev_err(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
return ret;
+ }
vsub = drm_format_vert_chroma_subsampling(pixel_format);
@@ -403,28 +540,38 @@ static int calc_scaley_steps(uint32_t pixel_format, uint32_t src, uint32_t dest,
return 0;
}
-static uint32_t get_scalex_config(uint32_t src, uint32_t dest)
-{
- uint32_t filter;
-
- filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
-
- return MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
- MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(filter) |
- MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(filter) |
- MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(filter);
-}
-
-static uint32_t get_scaley_config(uint32_t src, uint32_t dest)
+static uint32_t get_scale_config(enum mdp_chroma_samp_type chroma_sample,
+ uint32_t src, uint32_t dest, bool hor)
{
- uint32_t filter;
-
- filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+ uint32_t y_filter = (src <= dest) ? SCALE_FILTER_CA : SCALE_FILTER_PCMN;
+ uint32_t y_a_filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+ uint32_t uv_filter = ((src / 2) <= dest) ? /* 2x upsample */
+ SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+ uint32_t value = 0;
+
+ if (chroma_sample == CHROMA_420 || chroma_sample == CHROMA_H2V1) {
+ if (hor)
+ value = MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(y_filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(y_a_filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter);
+ else
+ value = MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(y_filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(y_a_filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter);
+ } else if (src != dest) {
+ if (hor)
+ value = MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(y_a_filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(y_a_filter);
+ else
+ value = MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(y_a_filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(y_a_filter);
+ }
- return MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
- MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(filter) |
- MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(filter) |
- MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(filter);
+ return value;
}
static int mdp5_plane_mode_set(struct drm_plane *plane,
@@ -435,8 +582,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
uint32_t src_w, uint32_t src_h)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct drm_plane_state *pstate = plane->state;
struct mdp5_kms *mdp5_kms = get_kms(plane);
- struct device *dev = mdp5_kms->dev->dev;
enum mdp5_pipe pipe = mdp5_plane->pipe;
const struct mdp_format *format;
uint32_t nplanes, config = 0;
@@ -444,6 +591,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
uint32_t phasex_step[2] = {0,}, phasey_step[2] = {0,};
uint32_t hdecm = 0, vdecm = 0;
uint32_t pix_format;
+ bool vflip, hflip;
unsigned long flags;
int ret;
@@ -468,7 +616,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
/* Request some memory from the SMP: */
ret = mdp5_smp_request(mdp5_kms->smp,
- mdp5_plane->pipe, fb->pixel_format, src_w);
+ mdp5_plane->pipe, format, src_w, false);
if (ret)
return ret;
@@ -480,29 +628,23 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
*/
mdp5_smp_configure(mdp5_kms->smp, pipe);
- /* SCALE is used to both scale and up-sample chroma components */
+ ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step);
+ if (ret)
+ return ret;
- if ((src_w != crtc_w) || MDP_FORMAT_IS_YUV(format)) {
- /* TODO calc hdecm */
- ret = calc_scalex_steps(pix_format, src_w, crtc_w, phasex_step);
- if (ret) {
- dev_err(dev, "X scaling (%d -> %d) failed: %d\n",
- src_w, crtc_w, ret);
- return ret;
- }
- config |= get_scalex_config(src_w, crtc_w);
- }
+ ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, phasey_step);
+ if (ret)
+ return ret;
- if ((src_h != crtc_h) || MDP_FORMAT_IS_YUV(format)) {
- /* TODO calc vdecm */
- ret = calc_scaley_steps(pix_format, src_h, crtc_h, phasey_step);
- if (ret) {
- dev_err(dev, "Y scaling (%d -> %d) failed: %d\n",
- src_h, crtc_h, ret);
- return ret;
- }
- config |= get_scaley_config(src_h, crtc_h);
- }
+ /* TODO calc hdecm, vdecm */
+
+ /* SCALE is used to both scale and up-sample chroma components */
+ config |= get_scale_config(format->chroma_sample, src_w, crtc_w, true);
+ config |= get_scale_config(format->chroma_sample, src_h, crtc_h, false);
+ DBG("scale config = %x", config);
+
+ hflip = !!(pstate->rotation & BIT(DRM_REFLECT_X));
+ vflip = !!(pstate->rotation & BIT(DRM_REFLECT_Y));
spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
@@ -535,7 +677,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
- MDP5_PIPE_SRC_FORMAT_NUM_PLANES(format->fetch_type) |
+ MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
@@ -545,29 +687,35 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
+ (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
+ (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
/* not using secure mode: */
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
- phasex_step[0]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
- phasey_step[0]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
- phasex_step[1]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
- phasey_step[1]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
- MDP5_PIPE_DECIMATION_VERT(vdecm) |
- MDP5_PIPE_DECIMATION_HORZ(hdecm));
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config);
-
- if (MDP_FORMAT_IS_YUV(format))
- csc_enable(mdp5_kms, pipe,
- mdp_get_default_csc_cfg(CSC_YUV2RGB));
- else
- csc_disable(mdp5_kms, pipe);
+ if (mdp5_plane->caps & MDP_PIPE_CAP_SCALE) {
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
+ phasex_step[0]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
+ phasey_step[0]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
+ phasex_step[1]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
+ phasey_step[1]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
+ MDP5_PIPE_DECIMATION_VERT(vdecm) |
+ MDP5_PIPE_DECIMATION_HORZ(hdecm));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config);
+ }
+
+ if (mdp5_plane->caps & MDP_PIPE_CAP_CSC) {
+ if (MDP_FORMAT_IS_YUV(format))
+ csc_enable(mdp5_kms, pipe,
+ mdp_get_default_csc_cfg(CSC_YUV2RGB));
+ else
+ csc_disable(mdp5_kms, pipe);
+ }
set_scanout_locked(plane, fb);
@@ -602,9 +750,24 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
return mdp5_plane->flush_mask;
}
+/* called after vsync in thread context */
+void mdp5_plane_complete_commit(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ enum mdp5_pipe pipe = mdp5_plane->pipe;
+
+ if (!plane_enabled(plane->state)) {
+ DBG("%s: free SMP", mdp5_plane->name);
+ mdp5_smp_release(mdp5_kms->smp, pipe);
+ }
+}
+
/* initialize plane */
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
- enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
+ enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset,
+ uint32_t caps)
{
struct drm_plane *plane = NULL;
struct mdp5_plane *mdp5_plane;
@@ -621,9 +784,11 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
mdp5_plane->pipe = pipe;
mdp5_plane->name = pipe2name(pipe);
+ mdp5_plane->caps = caps;
- mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
- ARRAY_SIZE(mdp5_plane->formats));
+ mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
+ ARRAY_SIZE(mdp5_plane->formats),
+ !pipe_supports_yuv(mdp5_plane->caps));
mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
mdp5_plane->reg_offset = reg_offset;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 16702aecf0df..563cca972dcb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -34,22 +34,44 @@
* and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
*
* For each block that can be dynamically allocated, it can be either
- * free, or pending/in-use by a client. The updates happen in three steps:
+ * free:
+ * The block is free.
+ *
+ * pending:
+ * The block is allocated to some client and not free.
+ *
+ * configured:
+ * The block is allocated to some client, and assigned to that
+ * client in MDP5_MDP_SMP_ALLOC registers.
+ *
+ * inuse:
+ * The block is being actively used by a client.
+ *
+ * The updates happen in the following steps:
*
* 1) mdp5_smp_request():
* When plane scanout is setup, calculate required number of
- * blocks needed per client, and request. Blocks not inuse or
- * pending by any other client are added to client's pending
- * set.
+ * blocks needed per client, and request. Blocks neither inuse nor
+ * configured nor pending by any other client are added to client's
+ * pending set.
+ * For shrinking, blocks in pending but not in configured can be freed
+ * directly, but those already in configured will be freed later by
+ * mdp5_smp_commit.
*
* 2) mdp5_smp_configure():
* As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
* are configured for the union(pending, inuse)
+ * Current pending is copied to configured.
+ * It is assumed that mdp5_smp_request and mdp5_smp_configure not run
+ * concurrently for the same pipe.
*
* 3) mdp5_smp_commit():
- * After next vblank, copy pending -> inuse. Optionally update
+ * After next vblank, copy configured -> inuse. Optionally update
* MDP5_SMP_ALLOC registers if there are newly unused blocks
*
+ * 4) mdp5_smp_release():
+ * Must be called after the pipe is disabled and no longer uses any SMB
+ *
* On the next vblank after changes have been committed to hw, the
* client's pending blocks become it's in-use blocks (and no-longer
* in-use blocks become available to other clients).
@@ -68,6 +90,8 @@
struct mdp5_smp {
struct drm_device *dev;
+ const struct mdp5_smp_block *cfg;
+
int blk_cnt;
int blk_size;
@@ -77,6 +101,9 @@ struct mdp5_smp {
struct mdp5_client_smp_state client_state[MAX_CLIENTS];
};
+static void update_smp_state(struct mdp5_smp *smp,
+ u32 cid, mdp5_smp_state_t *assigned);
+
static inline
struct mdp5_kms *get_kms(struct mdp5_smp *smp)
{
@@ -112,14 +139,12 @@ static int smp_request_block(struct mdp5_smp *smp,
u32 cid, int nblks)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
- const struct mdp5_cfg_hw *hw_cfg;
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
int reserved;
unsigned long flags;
- hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
- reserved = hw_cfg->smp.reserved[cid];
+ reserved = smp->cfg->reserved[cid];
spin_lock_irqsave(&smp->state_lock, flags);
@@ -149,7 +174,12 @@ static int smp_request_block(struct mdp5_smp *smp,
for (i = cur_nblks; i > nblks; i--) {
int blk = find_first_bit(ps->pending, cnt);
clear_bit(blk, ps->pending);
- /* don't clear in global smp_state until _commit() */
+
+ /* clear in global smp_state if not in configured
+ * otherwise until _commit()
+ */
+ if (!test_bit(blk, ps->configured))
+ clear_bit(blk, smp->state);
}
}
@@ -179,12 +209,14 @@ static void set_fifo_thresholds(struct mdp5_smp *smp,
* decimated width. Ie. SMP buffering sits downstream of decimation (which
* presumably happens during the dma from scanout buffer).
*/
-int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width)
+int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
+ const struct mdp_format *format, u32 width, bool hdecim)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
struct drm_device *dev = mdp5_kms->dev;
int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
int i, hsub, nplanes, nlines, nblks, ret;
+ u32 fmt = format->base.pixel_format;
nplanes = drm_format_num_planes(fmt);
hsub = drm_format_horz_chroma_subsampling(fmt);
@@ -192,6 +224,21 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid
/* different if BWC (compressed framebuffer?) enabled: */
nlines = 2;
+ /* Newer MDPs have split/packing logic, which fetches sub-sampled
+ * U and V components (splits them from Y if necessary) and packs
+ * them together, writes to SMP using a single client.
+ */
+ if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
+ fmt = DRM_FORMAT_NV24;
+ nplanes = 2;
+
+ /* if decimation is enabled, HW decimates less on the
+ * sub sampled chroma components
+ */
+ if (hdecim && (hsub > 1))
+ hsub = 1;
+ }
+
for (i = 0, nblks = 0; i < nplanes; i++) {
int n, fetch_stride, cpp;
@@ -223,10 +270,33 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid
/* Release SMP blocks for all clients of the pipe */
void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
- int i, nblks;
+ int i;
+ unsigned long flags;
+ int cnt = smp->blk_cnt;
+
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ mdp5_smp_state_t assigned;
+ u32 cid = pipe2client(pipe, i);
+ struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+
+ spin_lock_irqsave(&smp->state_lock, flags);
+
+ /* clear hw assignment */
+ bitmap_or(assigned, ps->inuse, ps->configured, cnt);
+ update_smp_state(smp, CID_UNUSED, &assigned);
+
+ /* free to global pool */
+ bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
+ bitmap_andnot(smp->state, smp->state, assigned, cnt);
+
+ /* clear client's infor */
+ bitmap_zero(ps->pending, cnt);
+ bitmap_zero(ps->configured, cnt);
+ bitmap_zero(ps->inuse, cnt);
+
+ spin_unlock_irqrestore(&smp->state_lock, flags);
+ }
- for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
- smp_request_block(smp, pipe2client(pipe, i), 0);
set_fifo_thresholds(smp, pipe, 0);
}
@@ -274,12 +344,20 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
u32 cid = pipe2client(pipe, i);
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
- bitmap_or(assigned, ps->inuse, ps->pending, cnt);
+ /*
+ * if vblank has not happened since last smp_configure
+ * skip the configure for now
+ */
+ if (!bitmap_equal(ps->inuse, ps->configured, cnt))
+ continue;
+
+ bitmap_copy(ps->configured, ps->pending, cnt);
+ bitmap_or(assigned, ps->inuse, ps->configured, cnt);
update_smp_state(smp, cid, &assigned);
}
}
-/* step #3: after vblank, copy pending -> inuse: */
+/* step #3: after vblank, copy configured -> inuse: */
void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
int cnt = smp->blk_cnt;
@@ -295,7 +373,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
* using, which can be released and made available to other
* clients:
*/
- if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
+ if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
unsigned long flags;
spin_lock_irqsave(&smp->state_lock, flags);
@@ -306,7 +384,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
update_smp_state(smp, CID_UNUSED, &released);
}
- bitmap_copy(ps->inuse, ps->pending, cnt);
+ bitmap_copy(ps->inuse, ps->configured, cnt);
}
}
@@ -327,6 +405,7 @@ struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_blo
}
smp->dev = dev;
+ smp->cfg = cfg;
smp->blk_cnt = cfg->mmb_count;
smp->blk_size = cfg->mmb_size;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index e47179f63585..20b87e800ea3 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -23,6 +23,7 @@
struct mdp5_client_smp_state {
mdp5_smp_state_t inuse;
+ mdp5_smp_state_t configured;
mdp5_smp_state_t pending;
};
@@ -38,7 +39,8 @@ struct mdp5_smp;
struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
void mdp5_smp_destroy(struct mdp5_smp *smp);
-int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width);
+int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
+ const struct mdp_format *format, u32 width, bool hdecim);
void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe);
void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe);
void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe);
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index 641d036c5bcb..4f792c4e40f4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -46,7 +46,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
enum mdp_chroma_samp_type {
- CHROMA_RGB = 0,
+ CHROMA_FULL = 0,
CHROMA_H2V1 = 1,
CHROMA_H1V2 = 2,
CHROMA_420 = 3,
@@ -65,6 +65,10 @@ enum mdp_mixer_stage_id {
STAGE1 = 3,
STAGE2 = 4,
STAGE3 = 5,
+ STAGE4 = 6,
+ STAGE5 = 7,
+ STAGE6 = 8,
+ STAGE_MAX = 8,
};
enum mdp_alpha_type {
diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c
index 7b0524dc1872..1c2caffc97e4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_format.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_format.c
@@ -71,7 +71,7 @@ static struct csc_cfg csc_convert[CSC_MAX] = {
},
};
-#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs) { \
+#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs, yuv) { \
.base = { .pixel_format = DRM_FORMAT_ ## name }, \
.bpc_a = BPC ## a ## A, \
.bpc_r = BPC ## r, \
@@ -83,7 +83,8 @@ static struct csc_cfg csc_convert[CSC_MAX] = {
.cpp = c, \
.unpack_count = cnt, \
.fetch_type = fp, \
- .chroma_sample = cs \
+ .chroma_sample = cs, \
+ .is_yuv = yuv, \
}
#define BPC0A 0
@@ -95,30 +96,49 @@ static struct csc_cfg csc_convert[CSC_MAX] = {
static const struct mdp_format formats[] = {
/* name a r g b e0 e1 e2 e3 alpha tight cpp cnt ... */
FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(ABGR8888, 8, 8, 8, 8, 2, 0, 1, 3, true, true, 4, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(RGBA8888, 8, 8, 8, 8, 3, 1, 0, 2, true, true, 4, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(BGRA8888, 8, 8, 8, 8, 3, 2, 0, 1, true, true, 4, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4,
- MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3,
- MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3,
- MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3,
- MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3,
- MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
/* --- RGB formats above / YUV formats below this line --- */
+ /* 2 plane YUV */
FMT(NV12, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2,
- MDP_PLANE_PSEUDO_PLANAR, CHROMA_420),
+ MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true),
FMT(NV21, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2,
- MDP_PLANE_PSEUDO_PLANAR, CHROMA_420),
+ MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true),
+ FMT(NV16, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2,
+ MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true),
+ FMT(NV61, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2,
+ MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true),
+ /* 1 plane YUV */
+ FMT(VYUY, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 2, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+ FMT(UYVY, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 2, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+ FMT(YUYV, 0, 8, 8, 8, 0, 1, 0, 2, false, true, 2, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+ FMT(YVYU, 0, 8, 8, 8, 0, 2, 0, 1, false, true, 2, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+ /* 3 plane YUV */
+ FMT(YUV420, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 1, 1,
+ MDP_PLANE_PLANAR, CHROMA_420, true),
+ FMT(YVU420, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 1, 1,
+ MDP_PLANE_PLANAR, CHROMA_420, true),
};
/*
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c
index 1988c243f437..64287304054d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c
@@ -39,7 +39,8 @@ static void update_irq(struct mdp_kms *mdp_kms)
list_for_each_entry(irq, &mdp_kms->irq_list, node)
irqmask |= irq->irqmask;
- mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
+ mdp_kms->funcs->set_irqmask(mdp_kms, irqmask, mdp_kms->cur_irq_mask);
+ mdp_kms->cur_irq_mask = irqmask;
}
/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 2d3428cb74d0..46a94e7d50e2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -30,7 +30,8 @@ struct mdp_kms;
struct mdp_kms_funcs {
struct msm_kms_funcs base;
- void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask);
+ void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask,
+ uint32_t old_irqmask);
};
struct mdp_kms {
@@ -42,6 +43,7 @@ struct mdp_kms {
bool in_irq;
struct list_head irq_list; /* list of mdp4_irq */
uint32_t vblank_mask; /* irq bits set for userspace vblank */
+ uint32_t cur_irq_mask; /* current irq mask */
};
#define to_mdp_kms(x) container_of(x, struct mdp_kms, base)
@@ -90,13 +92,27 @@ struct mdp_format {
uint8_t cpp, unpack_count;
enum mdp_fetch_type fetch_type;
enum mdp_chroma_samp_type chroma_sample;
+ bool is_yuv;
};
#define to_mdp_format(x) container_of(x, struct mdp_format, base)
-#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->chroma_sample > CHROMA_RGB)
+#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv)
uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
+/* MDP pipe capabilities */
+#define MDP_PIPE_CAP_HFLIP BIT(0)
+#define MDP_PIPE_CAP_VFLIP BIT(1)
+#define MDP_PIPE_CAP_SCALE BIT(2)
+#define MDP_PIPE_CAP_CSC BIT(3)
+#define MDP_PIPE_CAP_DECIMATION BIT(4)
+
+static inline bool pipe_supports_yuv(uint32_t pipe_caps)
+{
+ return (pipe_caps & MDP_PIPE_CAP_SCALE) &&
+ (pipe_caps & MDP_PIPE_CAP_CSC);
+}
+
enum csc_type {
CSC_RGB2RGB = 0,
CSC_YUV2RGB,
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 1b22d8bfe142..1ceb4f22dd89 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -283,12 +283,8 @@ int msm_atomic_commit(struct drm_device *dev,
timeout = ktime_add_ms(ktime_get(), 1000);
- ret = msm_wait_fence_interruptable(dev, c->fence, &timeout);
- if (ret) {
- WARN_ON(ret); // TODO unswap state back? or??
- commit_destroy(c);
- return ret;
- }
+ /* uninterruptible wait */
+ msm_wait_fence(dev, c->fence, &timeout, false);
complete_commit(c);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b7ef56ed8d1c..0339c5d82d37 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -116,6 +116,65 @@ u32 msm_readl(const void __iomem *addr)
return val;
}
+struct vblank_event {
+ struct list_head node;
+ int crtc_id;
+ bool enable;
+};
+
+static void vblank_ctrl_worker(struct work_struct *work)
+{
+ struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
+ struct msm_vblank_ctrl, work);
+ struct msm_drm_private *priv = container_of(vbl_ctrl,
+ struct msm_drm_private, vblank_ctrl);
+ struct msm_kms *kms = priv->kms;
+ struct vblank_event *vbl_ev, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vbl_ctrl->lock, flags);
+ list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
+ list_del(&vbl_ev->node);
+ spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+
+ if (vbl_ev->enable)
+ kms->funcs->enable_vblank(kms,
+ priv->crtcs[vbl_ev->crtc_id]);
+ else
+ kms->funcs->disable_vblank(kms,
+ priv->crtcs[vbl_ev->crtc_id]);
+
+ kfree(vbl_ev);
+
+ spin_lock_irqsave(&vbl_ctrl->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+}
+
+static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
+ int crtc_id, bool enable)
+{
+ struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
+ struct vblank_event *vbl_ev;
+ unsigned long flags;
+
+ vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
+ if (!vbl_ev)
+ return -ENOMEM;
+
+ vbl_ev->crtc_id = crtc_id;
+ vbl_ev->enable = enable;
+
+ spin_lock_irqsave(&vbl_ctrl->lock, flags);
+ list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
+ spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+
+ queue_work(priv->wq, &vbl_ctrl->work);
+
+ return 0;
+}
+
/*
* DRM operations:
*/
@@ -125,6 +184,18 @@ static int msm_unload(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
struct msm_gpu *gpu = priv->gpu;
+ struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
+ struct vblank_event *vbl_ev, *tmp;
+
+ /* We must cancel and cleanup any pending vblank enable/disable
+ * work before drm_irq_uninstall() to avoid work re-enabling an
+ * irq after uninstall has disabled it.
+ */
+ cancel_work_sync(&vbl_ctrl->work);
+ list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
+ list_del(&vbl_ev->node);
+ kfree(vbl_ev);
+ }
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
@@ -282,6 +353,9 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
INIT_LIST_HEAD(&priv->inactive_list);
INIT_LIST_HEAD(&priv->fence_cbs);
+ INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
+ INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
+ spin_lock_init(&priv->vblank_ctrl.lock);
drm_mode_config_init(dev);
@@ -331,10 +405,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
}
}
- dev->mode_config.min_width = 0;
- dev->mode_config.min_height = 0;
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
dev->mode_config.funcs = &mode_config_funcs;
ret = drm_vblank_init(dev, priv->num_crtcs);
@@ -468,7 +538,7 @@ static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
if (!kms)
return -ENXIO;
DBG("dev=%p, crtc=%d", dev, crtc_id);
- return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
+ return vblank_ctrl_queue_work(priv, crtc_id, true);
}
static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
@@ -478,7 +548,7 @@ static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
if (!kms)
return;
DBG("dev=%p, crtc=%d", dev, crtc_id);
- kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
+ vblank_ctrl_queue_work(priv, crtc_id, false);
}
/*
@@ -637,8 +707,8 @@ static void msm_debugfs_cleanup(struct drm_minor *minor)
* Fences:
*/
-int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
- ktime_t *timeout)
+int msm_wait_fence(struct drm_device *dev, uint32_t fence,
+ ktime_t *timeout , bool interruptible)
{
struct msm_drm_private *priv = dev->dev_private;
int ret;
@@ -667,7 +737,12 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
remaining_jiffies = timespec_to_jiffies(&ts);
}
- ret = wait_event_interruptible_timeout(priv->fence_event,
+ if (interruptible)
+ ret = wait_event_interruptible_timeout(priv->fence_event,
+ fence_completed(dev, fence),
+ remaining_jiffies);
+ else
+ ret = wait_event_timeout(priv->fence_event,
fence_completed(dev, fence),
remaining_jiffies);
@@ -853,7 +928,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
return -EINVAL;
}
- return msm_wait_fence_interruptable(dev, args->fence, &timeout);
+ return msm_wait_fence(dev, args->fence, &timeout, true);
}
static const struct drm_ioctl_desc msm_ioctls[] = {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index e7c5ea125d45..3be7a56b14f1 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -30,6 +30,7 @@
#include <linux/list.h>
#include <linux/iommu.h>
#include <linux/types.h>
+#include <linux/of_graph.h>
#include <asm/sizes.h>
#ifndef CONFIG_OF
@@ -64,6 +65,19 @@ struct msm_file_private {
int dummy;
};
+enum msm_mdp_plane_property {
+ PLANE_PROP_ZPOS,
+ PLANE_PROP_ALPHA,
+ PLANE_PROP_PREMULTIPLIED,
+ PLANE_PROP_MAX_NUM
+};
+
+struct msm_vblank_ctrl {
+ struct work_struct work;
+ struct list_head event_list;
+ spinlock_t lock;
+};
+
struct msm_drm_private {
struct msm_kms *kms;
@@ -128,6 +142,9 @@ struct msm_drm_private {
unsigned int num_connectors;
struct drm_connector *connectors[8];
+ /* Properties */
+ struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
+
/* VRAM carveout, used when no IOMMU: */
struct {
unsigned long size;
@@ -137,6 +154,8 @@ struct msm_drm_private {
*/
struct drm_mm mm;
} vram;
+
+ struct msm_vblank_ctrl vblank_ctrl;
};
struct msm_format {
@@ -164,8 +183,8 @@ int msm_atomic_commit(struct drm_device *dev,
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
- ktime_t *timeout);
+int msm_wait_fence(struct drm_device *dev, uint32_t fence,
+ ktime_t *timeout, bool interruptible);
int msm_queue_fence_cb(struct drm_device *dev,
struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct drm_device *dev, uint32_t fence);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 95f6532df02d..f97a1964ef39 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -43,11 +43,11 @@ static struct fb_ops msm_fb_ops = {
/* Note: to properly handle manual update displays, we wrap the
* basic fbdev ops which write to the framebuffer
*/
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ .fb_read = drm_fb_helper_sys_read,
+ .fb_write = drm_fb_helper_sys_write,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
.fb_mmap = msm_fbdev_mmap,
.fb_check_var = drm_fb_helper_check_var,
@@ -144,10 +144,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
goto fail_unlock;
}
- fbi = framebuffer_alloc(0, dev->dev);
- if (!fbi) {
+ fbi = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(fbi)) {
dev_err(dev->dev, "failed to allocate fb info\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(fbi);
goto fail_unlock;
}
@@ -155,7 +155,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
fbdev->fb = fb;
helper->fb = fb;
- helper->fbdev = fbi;
fbi->par = helper;
fbi->flags = FBINFO_DEFAULT;
@@ -163,12 +162,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
strcpy(fbi->fix.id, "msm");
- ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto fail_unlock;
- }
-
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
@@ -191,7 +184,6 @@ fail_unlock:
fail:
if (ret) {
- framebuffer_release(fbi);
if (fb) {
drm_framebuffer_unregister_private(fb);
drm_framebuffer_remove(fb);
@@ -266,17 +258,11 @@ void msm_fbdev_free(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct drm_fb_helper *helper = priv->fbdev;
struct msm_fbdev *fbdev;
- struct fb_info *fbi;
DBG();
- fbi = helper->fbdev;
-
- /* only cleanup framebuffer if it is present */
- if (fbi) {
- unregister_framebuffer(fbi);
- framebuffer_release(fbi);
- }
+ drm_fb_helper_unregister_fbi(helper);
+ drm_fb_helper_release_fbi(helper);
drm_fb_helper_fini(helper);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f211b80e3a1e..c76cc853b08a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -460,7 +460,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
if (op & MSM_PREP_NOSYNC)
timeout = NULL;
- ret = msm_wait_fence_interruptable(dev, fence, timeout);
+ ret = msm_wait_fence(dev, fence, timeout, true);
}
/* TODO cache maintenance */
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index dd7a7ab603e2..831461bc98a5 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -23,8 +23,12 @@
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- BUG_ON(!msm_obj->sgt); /* should have already pinned! */
- return msm_obj->sgt;
+ int npages = obj->size >> PAGE_SHIFT;
+
+ if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
+ return NULL;
+
+ return drm_prime_pages_to_sg(msm_obj->pages, npages);
}
void *msm_gem_prime_vmap(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 2b765663c1a3..a34b437dbc8f 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -18,7 +18,6 @@ nouveau-y += $(nvkm-y)
ifdef CONFIG_X86
nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
endif
-nouveau-y += nouveau_agp.o
nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
nouveau-y += nouveau_drm.o
nouveau-y += nouveau_hwmon.o
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index c6361422a0b2..82bd4658aa58 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -198,7 +198,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
int *burst, int *lwm)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_object *device = &nouveau_drm(dev)->device.object;
struct nv_fifo_info fifo_data;
struct nv_sim_state sim_data;
int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index af7249ca0f4b..78cb033bc015 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -65,8 +65,8 @@ int nv04_dac_output_offset(struct drm_encoder *encoder)
static int sample_load_twice(struct drm_device *dev, bool sense[2])
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
- struct nvkm_timer *ptimer = nvxx_timer(device);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvif_object *device = &drm->device.object;
int i;
for (i = 0; i < 2; i++) {
@@ -80,17 +80,22 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2])
* use a 10ms timeout (guards against crtc being inactive, in
* which case blank state would never change)
*/
- if (!nvkm_timer_wait_eq(ptimer, 10000000,
- NV_PRMCIO_INP0__COLOR,
- 0x00000001, 0x00000000))
+ if (nvif_msec(&drm->device, 10,
+ if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
+ break;
+ ) < 0)
return -EBUSY;
- if (!nvkm_timer_wait_eq(ptimer, 10000000,
- NV_PRMCIO_INP0__COLOR,
- 0x00000001, 0x00000001))
+
+ if (nvif_msec(&drm->device, 10,
+ if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
+ break;
+ ) < 0)
return -EBUSY;
- if (!nvkm_timer_wait_eq(ptimer, 10000000,
- NV_PRMCIO_INP0__COLOR,
- 0x00000001, 0x00000000))
+
+ if (nvif_msec(&drm->device, 10,
+ if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
+ break;
+ ) < 0)
return -EBUSY;
udelay(100);
@@ -128,7 +133,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_object *device = &nouveau_drm(dev)->device.object;
struct nouveau_drm *drm = nouveau_drm(dev);
uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
uint8_t saved_palette0[3], saved_palette_mask;
@@ -231,8 +236,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_device *device = &nouveau_drm(dev)->device;
- struct nvkm_gpio *gpio = nvxx_gpio(device);
+ struct nvif_object *device = &nouveau_drm(dev)->device.object;
+ struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
@@ -265,10 +270,10 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
}
if (gpio) {
- saved_gpio1 = gpio->get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
- saved_gpio0 = gpio->get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
- gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, dcb->type == DCB_OUTPUT_TV);
- gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, dcb->type == DCB_OUTPUT_TV);
+ saved_gpio1 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
+ saved_gpio0 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
+ nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, dcb->type == DCB_OUTPUT_TV);
+ nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, dcb->type == DCB_OUTPUT_TV);
}
msleep(4);
@@ -320,8 +325,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
nvif_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
if (gpio) {
- gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1);
- gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, saved_gpio0);
+ nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1);
+ nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, saved_gpio0);
}
return sample;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 7cfb0cbc9b6e..429ab5e3025a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -281,7 +281,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_object *device = &nouveau_drm(dev)->device.object;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
@@ -485,7 +485,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
{
#ifdef __powerpc__
struct drm_device *dev = encoder->dev;
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_object *device = &nouveau_drm(dev)->device.object;
/* BIOS scripts usually take care of the backlight, thanks
* Apple for your consistency.
@@ -493,11 +493,11 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 ||
dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) {
if (mode == DRM_MODE_DPMS_ON) {
- nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31);
- nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
+ nvif_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31);
+ nvif_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
} else {
- nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
- nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 0);
+ nvif_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
+ nvif_mask(device, NV_PCRTC_GPIO_EXT, 3, 0);
}
}
#endif
@@ -624,8 +624,8 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
- struct nvkm_i2c_port *port = i2c->find(i2c, 2);
- struct nvkm_i2c_board_info info[] = {
+ struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
+ struct nvkm_i2c_bus_probe info[] = {
{
{
.type = "sil164",
@@ -639,16 +639,15 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
};
int type;
- if (!nv_gf4_disp_arch(dev) || !port ||
- get_tmds_slave(encoder))
+ if (!nv_gf4_disp_arch(dev) || !bus || get_tmds_slave(encoder))
return;
- type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL, NULL);
+ type = nvkm_i2c_bus_probe(bus, "TMDS transmitter", info, NULL, NULL);
if (type < 0)
return;
drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
- &port->adapter, &info[type].dev);
+ &bus->i2c, &info[type].dev);
}
static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 4131be5507ab..9e650081c357 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -47,7 +47,7 @@ nv04_display_create(struct drm_device *dev)
if (!disp)
return -ENOMEM;
- nvif_object_map(nvif_object(&drm->device));
+ nvif_object_map(&drm->device.object);
nouveau_display(dev)->priv = disp;
nouveau_display(dev)->dtor = nv04_display_destroy;
@@ -101,7 +101,9 @@ nv04_display_create(struct drm_device *dev)
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- nv_encoder->i2c = i2c->find(i2c, nv_encoder->dcb->i2c_index);
+ struct nvkm_i2c_bus *bus =
+ nvkm_i2c_bus_find(i2c, nv_encoder->dcb->i2c_index);
+ nv_encoder->i2c = bus ? &bus->i2c : NULL;
}
/* Save previous state */
@@ -151,7 +153,7 @@ nv04_display_destroy(struct drm_device *dev)
nouveau_display(dev)->priv = NULL;
kfree(disp);
- nvif_object_unmap(nvif_object(&drm->device));
+ nvif_object_unmap(&drm->device.object);
}
int
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index c910c5d5c662..6c9a1e89810f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -172,7 +172,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, u16 table,
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_bios *bios = nvxx_bios(&drm->device);
struct nvbios_init init = {
- .subdev = nv_subdev(bios),
+ .subdev = &bios->subdev,
.bios = bios,
.offset = table,
.outp = outp,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 42e07afc4c2b..956a833b8200 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -165,8 +165,8 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
struct nvkm_pll_vals *pllvals)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_device *device = &drm->device;
- struct nvkm_bios *bios = nvxx_bios(device);
+ struct nvif_object *device = &drm->device.object;
+ struct nvkm_bios *bios = nvxx_bios(&drm->device);
uint32_t reg1, pll1, pll2 = 0;
struct nvbios_pll pll_lim;
int ret;
@@ -660,8 +660,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_device *device = &drm->device;
- struct nvkm_timer *ptimer = nvxx_timer(device);
+ struct nvif_object *device = &drm->device.object;
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
uint32_t reg900;
int i;
@@ -678,10 +677,10 @@ nv_load_state_ext(struct drm_device *dev, int head,
nvif_wr32(device, NV_PVIDEO_INTR_EN, 0);
nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
- nvif_wr32(device, NV_PVIDEO_LIMIT(0), device->info.ram_size - 1);
- nvif_wr32(device, NV_PVIDEO_LIMIT(1), device->info.ram_size - 1);
- nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), device->info.ram_size - 1);
- nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), device->info.ram_size - 1);
+ nvif_wr32(device, NV_PVIDEO_LIMIT(0), drm->device.info.ram_size - 1);
+ nvif_wr32(device, NV_PVIDEO_LIMIT(1), drm->device.info.ram_size - 1);
+ nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), drm->device.info.ram_size - 1);
+ nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), drm->device.info.ram_size - 1);
nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0);
NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
@@ -741,8 +740,14 @@ nv_load_state_ext(struct drm_device *dev, int head,
if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) {
/* Not waiting for vertical retrace before modifying
CRE_53/CRE_54 causes lockups. */
- nvkm_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
- nvkm_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
+ nvif_msec(&drm->device, 650,
+ if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8))
+ break;
+ );
+ nvif_msec(&drm->device, 650,
+ if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8))
+ break;
+ );
}
wr_cio_state(dev, head, regp, NV_CIO_CRE_42);
@@ -765,7 +770,7 @@ static void
nv_save_state_palette(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_object *device = &nouveau_drm(dev)->device.object;
int head_offset = head * NV_PRMDIO_SIZE, i;
nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
@@ -784,7 +789,7 @@ void
nouveau_hw_load_state_palette(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_object *device = &nouveau_drm(dev)->device.object;
int head_offset = head * NV_PRMDIO_SIZE, i;
nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h
index 6c796178bf0c..3bded60c5596 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h
@@ -60,7 +60,7 @@ extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp,
static inline uint32_t NVReadCRTC(struct drm_device *dev,
int head, uint32_t reg)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_object *device = &nouveau_drm(dev)->device.object;
uint32_t val;
if (head)
reg += NV_PCRTC0_SIZE;
@@ -71,7 +71,7 @@ static inline uint32_t NVReadCRTC(struct drm_device *dev,
static inline void NVWriteCRTC(struct drm_device *dev,
int head, uint32_t reg, uint32_t val)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_object *device = &nouveau_drm(dev)->device.object;
if (head)
reg += NV_PCRTC0_SIZE;
nvif_wr32(device, reg, val);
@@ -80,7 +80,7 @@ static inline void NVWriteCRTC(struct drm_device *dev,
static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
int head, uint32_t reg)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_object *device = &nouveau_drm(dev)->device.object;
uint32_t val;
if (head)
reg += NV_PRAMDAC0_SIZE;
@@ -91,7 +91,7 @@ static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
static inline void NVWriteRAMDAC(struct drm_device *dev,
int head, uint32_t reg, uint32_t val)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_object *device = &nouveau_drm(dev)->device.object;
if (head)
reg += NV_PRAMDAC0_SIZE;
nvif_wr32(device, reg, val);
@@ -120,7 +120,7 @@ static inline void nv_write_tmds(struct drm_device *dev,
static inline void NVWriteVgaCrtc(struct drm_device *dev,
int head, uint8_t index, uint8_t value)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_object *device = &nouveau_drm(dev)->device.object;
nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
}
@@ -128,7 +128,7 @@ static inline void NVWriteVgaCrtc(struct drm_device *dev,
static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
int head, uint8_t index)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_object *device = &nouveau_drm(dev)->device.object;
uint8_t val;
nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
@@ -165,7 +165,7 @@ static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_
static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
int head, uint32_t reg)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_object *device = &nouveau_drm(dev)->device.object;
struct nouveau_drm *drm = nouveau_drm(dev);
uint8_t val;
@@ -181,7 +181,7 @@ static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
static inline void NVWritePRMVIO(struct drm_device *dev,
int head, uint32_t reg, uint8_t value)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_object *device = &nouveau_drm(dev)->device.object;
struct nouveau_drm *drm = nouveau_drm(dev);
/* Only NV4x have two pvio ranges; other twoHeads cards MUST call
@@ -194,14 +194,14 @@ static inline void NVWritePRMVIO(struct drm_device *dev,
static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_object *device = &nouveau_drm(dev)->device.object;
nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
}
static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_object *device = &nouveau_drm(dev)->device.object;
nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
}
@@ -209,7 +209,7 @@ static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
static inline void NVWriteVgaAttr(struct drm_device *dev,
int head, uint8_t index, uint8_t value)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_object *device = &nouveau_drm(dev)->device.object;
if (NVGetEnablePalette(dev, head))
index &= ~0x20;
else
@@ -223,7 +223,7 @@ static inline void NVWriteVgaAttr(struct drm_device *dev,
static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
int head, uint8_t index)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_object *device = &nouveau_drm(dev)->device.object;
uint8_t val;
if (NVGetEnablePalette(dev, head))
index &= ~0x20;
@@ -259,7 +259,7 @@ static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect)
static inline bool
nv_heads_tied(struct drm_device *dev)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_object *device = &nouveau_drm(dev)->device.object;
struct nouveau_drm *drm = nouveau_drm(dev);
if (drm->device.info.chipset == 0x11)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 9f2498571d09..aeebdd402478 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -96,7 +96,8 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
- struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ struct nvif_object *dev = &drm->device.object;
struct nouveau_plane *nv_plane =
container_of(plane, struct nouveau_plane, base);
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
@@ -118,7 +119,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (format > 0xffff)
return -ERANGE;
- if (dev->info.chipset >= 0x30) {
+ if (drm->device.info.chipset >= 0x30) {
if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
return -ERANGE;
} else {
@@ -173,7 +174,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
static int
nv10_disable_plane(struct drm_plane *plane)
{
- struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
+ struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
struct nouveau_plane *nv_plane =
container_of(plane, struct nouveau_plane, base);
@@ -197,7 +198,7 @@ nv_destroy_plane(struct drm_plane *plane)
static void
nv10_set_params(struct nouveau_plane *plane)
{
- struct nvif_device *dev = &nouveau_drm(plane->base.dev)->device;
+ struct nvif_object *dev = &nouveau_drm(plane->base.dev)->device.object;
u32 luma = (plane->brightness - 512) << 16 | plane->contrast;
u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) |
(cos_mul(plane->hue, plane->saturation) & 0xffff);
@@ -261,7 +262,7 @@ nv10_overlay_init(struct drm_device *device)
{
struct nouveau_drm *drm = nouveau_drm(device);
struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
- int num_formats = ARRAY_SIZE(formats);
+ unsigned int num_formats = ARRAY_SIZE(formats);
int ret;
if (!plane)
@@ -346,7 +347,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
- struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
+ struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
struct nouveau_plane *nv_plane =
container_of(plane, struct nouveau_plane, base);
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
@@ -426,7 +427,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
static int
nv04_disable_plane(struct drm_plane *plane)
{
- struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
+ struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
struct nouveau_plane *nv_plane =
container_of(plane, struct nouveau_plane, base);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 70e95cf6fd19..5345eb5378a8 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -35,7 +35,7 @@
#include <drm/i2c/ch7006.h>
-static struct nvkm_i2c_board_info nv04_tv_encoder_info[] = {
+static struct nvkm_i2c_bus_probe nv04_tv_encoder_info[] = {
{
{
I2C_BOARD_INFO("ch7006", 0x75),
@@ -55,9 +55,13 @@ int nv04_tv_identify(struct drm_device *dev, int i2c_index)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
-
- return i2c->identify(i2c, i2c_index, "TV encoder",
- nv04_tv_encoder_info, NULL, NULL);
+ struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, i2c_index);
+ if (bus) {
+ return nvkm_i2c_bus_probe(bus, "TV encoder",
+ nv04_tv_encoder_info,
+ NULL, NULL);
+ }
+ return -ENODEV;
}
@@ -205,7 +209,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
- struct nvkm_i2c_port *port = i2c->find(i2c, entry->i2c_index);
+ struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, entry->i2c_index);
int type, ret;
/* Ensure that we can talk to this encoder */
@@ -231,7 +235,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
/* Run the slave-specific initialization */
ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
- &port->adapter,
+ &bus->i2c,
&nv04_tv_encoder_info[type].dev);
if (ret < 0)
goto fail_cleanup;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index d9720dda8385..b734195d80a0 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -62,8 +62,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
head = (dacclk & 0x100) >> 8;
/* Save the previous state. */
- gpio1 = gpio->get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
- gpio0 = gpio->get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
+ gpio1 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
+ gpio0 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
@@ -74,8 +74,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
/* Prepare the DAC for load detection. */
- gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, true);
- gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, true);
+ nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, true);
+ nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, true);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
@@ -120,8 +120,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
- gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, gpio1);
- gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, gpio0);
+ nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, gpio1);
+ nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, gpio0);
return sample;
}
@@ -130,18 +130,10 @@ static bool
get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_device *device = &drm->device;
+ struct nvkm_device *device = nvxx_device(&drm->device);
- /* Zotac FX5200 */
- if (nv_device_match(nvxx_object(device), 0x0322, 0x19da, 0x1035) ||
- nv_device_match(nvxx_object(device), 0x0322, 0x19da, 0x2035)) {
- *pin_mask = 0xc;
- return false;
- }
-
- /* MSI nForce2 IGP */
- if (nv_device_match(nvxx_object(device), 0x01f0, 0x1462, 0x5710)) {
- *pin_mask = 0xc;
+ if (device->quirk && device->quirk->tv_pin_mask) {
+ *pin_mask = device->quirk->tv_pin_mask;
return false;
}
@@ -395,8 +387,8 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
nv_load_ptv(dev, regs, 200);
- gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, mode == DRM_MODE_DPMS_ON);
- gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, mode == DRM_MODE_DPMS_ON);
+ nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, mode == DRM_MODE_DPMS_ON);
+ nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, mode == DRM_MODE_DPMS_ON);
nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
index 225894cdcac2..459910b6bb32 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
@@ -131,13 +131,13 @@ static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg,
uint32_t val)
{
struct nvif_device *device = &nouveau_drm(dev)->device;
- nvif_wr32(device, reg, val);
+ nvif_wr32(&device->object, reg, val);
}
static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
{
struct nvif_device *device = &nouveau_drm(dev)->device;
- return nvif_rd32(device, reg);
+ return nvif_rd32(&device->object, reg);
}
static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg,
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 64f8b2f687d2..95a64d89547c 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -45,6 +45,11 @@
#define GM107_DISP 0x00009470
#define GM204_DISP 0x00009570
+#define NV31_MPEG 0x00003174
+#define G82_MPEG 0x00008274
+
+#define NV74_VP2 0x00007476
+
#define NV50_DISP_CURSOR 0x0000507a
#define G82_DISP_CURSOR 0x0000827a
#define GT214_DISP_CURSOR 0x0000857a
@@ -94,15 +99,40 @@
#define MAXWELL_A 0x0000b097
#define MAXWELL_B 0x0000b197
+#define NV74_BSP 0x000074b0
+
+#define GT212_MSVLD 0x000085b1
+#define IGT21A_MSVLD 0x000086b1
+#define G98_MSVLD 0x000088b1
+#define GF100_MSVLD 0x000090b1
+#define GK104_MSVLD 0x000095b1
+
+#define GT212_MSPDEC 0x000085b2
+#define G98_MSPDEC 0x000088b2
+#define GF100_MSPDEC 0x000090b2
+#define GK104_MSPDEC 0x000095b2
+
+#define GT212_MSPPP 0x000085b3
+#define G98_MSPPP 0x000088b3
+#define GF100_MSPPP 0x000090b3
+
+#define G98_SEC 0x000088b4
+
+#define GT212_DMA 0x000085b5
+#define FERMI_DMA 0x000090b5
+#define KEPLER_DMA_COPY_A 0x0000a0b5
+#define MAXWELL_DMA_COPY_A 0x0000b0b5
+
+#define FERMI_DECOMPRESS 0x000090b8
+
#define FERMI_COMPUTE_A 0x000090c0
#define FERMI_COMPUTE_B 0x000091c0
-
#define KEPLER_COMPUTE_A 0x0000a0c0
#define KEPLER_COMPUTE_B 0x0000a1c0
-
#define MAXWELL_COMPUTE_A 0x0000b0c0
#define MAXWELL_COMPUTE_B 0x0000b1c0
+#define NV74_CIPHER 0x000074c1
/*******************************************************************************
* client
@@ -126,32 +156,10 @@ struct nv_device_v0 {
__u8 version;
__u8 pad01[7];
__u64 device; /* device identifier, ~0 for client default */
-#define NV_DEVICE_V0_DISABLE_IDENTIFY 0x0000000000000001ULL
-#define NV_DEVICE_V0_DISABLE_MMIO 0x0000000000000002ULL
-#define NV_DEVICE_V0_DISABLE_VBIOS 0x0000000000000004ULL
-#define NV_DEVICE_V0_DISABLE_CORE 0x0000000000000008ULL
-#define NV_DEVICE_V0_DISABLE_DISP 0x0000000000010000ULL
-#define NV_DEVICE_V0_DISABLE_FIFO 0x0000000000020000ULL
-#define NV_DEVICE_V0_DISABLE_GR 0x0000000100000000ULL
-#define NV_DEVICE_V0_DISABLE_MPEG 0x0000000200000000ULL
-#define NV_DEVICE_V0_DISABLE_ME 0x0000000400000000ULL
-#define NV_DEVICE_V0_DISABLE_VP 0x0000000800000000ULL
-#define NV_DEVICE_V0_DISABLE_CIPHER 0x0000001000000000ULL
-#define NV_DEVICE_V0_DISABLE_BSP 0x0000002000000000ULL
-#define NV_DEVICE_V0_DISABLE_MSPPP 0x0000004000000000ULL
-#define NV_DEVICE_V0_DISABLE_CE0 0x0000008000000000ULL
-#define NV_DEVICE_V0_DISABLE_CE1 0x0000010000000000ULL
-#define NV_DEVICE_V0_DISABLE_VIC 0x0000020000000000ULL
-#define NV_DEVICE_V0_DISABLE_MSENC 0x0000040000000000ULL
-#define NV_DEVICE_V0_DISABLE_CE2 0x0000080000000000ULL
-#define NV_DEVICE_V0_DISABLE_MSVLD 0x0000100000000000ULL
-#define NV_DEVICE_V0_DISABLE_SEC 0x0000200000000000ULL
-#define NV_DEVICE_V0_DISABLE_MSPDEC 0x0000400000000000ULL
- __u64 disable; /* disable particular subsystems */
- __u64 debug0; /* as above, but *internal* ids, and *NOT* ABI */
};
#define NV_DEVICE_V0_INFO 0x00
+#define NV_DEVICE_V0_TIME 0x01
struct nv_device_info_v0 {
__u8 version;
@@ -176,6 +184,14 @@ struct nv_device_info_v0 {
__u8 pad06[2];
__u64 ram_size;
__u64 ram_user;
+ char chip[16];
+ char name[64];
+};
+
+struct nv_device_time_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u64 time;
};
@@ -235,13 +251,13 @@ struct gf100_dma_v0 {
__u8 pad03[5];
};
-struct gf110_dma_v0 {
+struct gf119_dma_v0 {
__u8 version;
-#define GF110_DMA_V0_PAGE_LP 0x00
-#define GF110_DMA_V0_PAGE_SP 0x01
+#define GF119_DMA_V0_PAGE_LP 0x00
+#define GF119_DMA_V0_PAGE_SP 0x01
__u8 page;
-#define GF110_DMA_V0_KIND_PITCH 0x00
-#define GF110_DMA_V0_KIND_VM 0xff
+#define GF119_DMA_V0_KIND_PITCH 0x00
+#define GF119_DMA_V0_KIND_VM 0xff
__u8 kind;
__u8 pad03[5];
};
@@ -251,33 +267,74 @@ struct gf110_dma_v0 {
* perfmon
******************************************************************************/
-struct nvif_perfctr_v0 {
+#define NVIF_PERFMON_V0_QUERY_DOMAIN 0x00
+#define NVIF_PERFMON_V0_QUERY_SIGNAL 0x01
+#define NVIF_PERFMON_V0_QUERY_SOURCE 0x02
+
+struct nvif_perfmon_query_domain_v0 {
__u8 version;
- __u8 pad01[1];
- __u16 logic_op;
- __u8 pad04[4];
- char name[4][64];
+ __u8 id;
+ __u8 counter_nr;
+ __u8 iter;
+ __u16 signal_nr;
+ __u8 pad05[2];
+ char name[64];
};
-#define NVIF_PERFCTR_V0_QUERY 0x00
-#define NVIF_PERFCTR_V0_SAMPLE 0x01
-#define NVIF_PERFCTR_V0_READ 0x02
+struct nvif_perfmon_query_signal_v0 {
+ __u8 version;
+ __u8 domain;
+ __u16 iter;
+ __u8 signal;
+ __u8 source_nr;
+ __u8 pad05[2];
+ char name[64];
+};
-struct nvif_perfctr_query_v0 {
+struct nvif_perfmon_query_source_v0 {
__u8 version;
- __u8 pad01[3];
- __u32 iter;
+ __u8 domain;
+ __u8 signal;
+ __u8 iter;
+ __u8 pad04[4];
+ __u32 source;
+ __u32 mask;
char name[64];
};
-struct nvif_perfctr_sample {
+
+/*******************************************************************************
+ * perfdom
+ ******************************************************************************/
+
+struct nvif_perfdom_v0 {
+ __u8 version;
+ __u8 domain;
+ __u8 mode;
+ __u8 pad03[1];
+ struct {
+ __u8 signal[4];
+ __u64 source[4][8];
+ __u16 logic_op;
+ } ctr[4];
};
-struct nvif_perfctr_read_v0 {
+#define NVIF_PERFDOM_V0_INIT 0x00
+#define NVIF_PERFDOM_V0_SAMPLE 0x01
+#define NVIF_PERFDOM_V0_READ 0x02
+
+struct nvif_perfdom_init {
+};
+
+struct nvif_perfdom_sample {
+};
+
+struct nvif_perfdom_read_v0 {
__u8 version;
__u8 pad01[7];
- __u32 ctr;
+ __u32 ctr[4];
__u32 clk;
+ __u8 pad04[4];
};
@@ -337,7 +394,16 @@ struct nv03_channel_dma_v0 {
__u8 version;
__u8 chid;
__u8 pad02[2];
- __u32 pushbuf;
+ __u32 offset;
+ __u64 pushbuf;
+};
+
+struct nv50_channel_dma_v0 {
+ __u8 version;
+ __u8 chid;
+ __u8 pad02[6];
+ __u64 vm;
+ __u64 pushbuf;
__u64 offset;
};
@@ -350,10 +416,20 @@ struct nv03_channel_dma_v0 {
struct nv50_channel_gpfifo_v0 {
__u8 version;
__u8 chid;
- __u8 pad01[6];
- __u32 pushbuf;
+ __u8 pad02[2];
__u32 ilength;
__u64 ioffset;
+ __u64 pushbuf;
+ __u64 vm;
+};
+
+struct fermi_channel_gpfifo_v0 {
+ __u8 version;
+ __u8 chid;
+ __u8 pad02[2];
+ __u32 ilength;
+ __u64 ioffset;
+ __u64 vm;
};
struct kepler_channel_gpfifo_a_v0 {
@@ -367,10 +443,9 @@ struct kepler_channel_gpfifo_a_v0 {
#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_ENC 0x40
__u8 engine;
__u16 chid;
- __u8 pad04[4];
- __u32 pushbuf;
__u32 ilength;
__u64 ioffset;
+ __u64 vm;
};
/*******************************************************************************
@@ -491,8 +566,8 @@ struct nv50_disp_pior_pwr_v0 {
/* core */
struct nv50_disp_core_channel_dma_v0 {
__u8 version;
- __u8 pad01[3];
- __u32 pushbuf;
+ __u8 pad01[7];
+ __u64 pushbuf;
};
#define NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
@@ -509,9 +584,9 @@ struct nv50_disp_cursor_v0 {
/* base */
struct nv50_disp_base_channel_dma_v0 {
__u8 version;
- __u8 pad01[2];
__u8 head;
- __u32 pushbuf;
+ __u8 pad02[6];
+ __u64 pushbuf;
};
#define NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
@@ -519,9 +594,9 @@ struct nv50_disp_base_channel_dma_v0 {
/* overlay */
struct nv50_disp_overlay_channel_dma_v0 {
__u8 version;
- __u8 pad01[2];
__u8 head;
- __u32 pushbuf;
+ __u8 pad02[6];
+ __u64 pushbuf;
};
#define NV50_DISP_OVERLAY_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
@@ -536,6 +611,20 @@ struct nv50_disp_overlay_v0 {
#define NV50_DISP_OVERLAY_V0_NTFY_UEVENT 0x00
/*******************************************************************************
+ * software
+ ******************************************************************************/
+
+#define NVSW_NTFY_UEVENT 0x00
+
+#define NV04_NVSW_GET_REF 0x00
+
+struct nv04_nvsw_get_ref_v0 {
+ __u8 version;
+ __u8 pad01[3];
+ __u32 ref;
+};
+
+/*******************************************************************************
* fermi
******************************************************************************/
diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h
index eca648ef0f7a..4a7f6f7b836d 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/client.h
@@ -4,36 +4,25 @@
#include <nvif/object.h>
struct nvif_client {
- struct nvif_object base;
- struct nvif_object *object; /*XXX: hack for nvif_object() */
+ struct nvif_object object;
const struct nvif_driver *driver;
+ u64 version;
+ u8 route;
bool super;
};
-static inline struct nvif_client *
-nvif_client(struct nvif_object *object)
-{
- while (object && object->parent != object)
- object = object->parent;
- return (void *)object;
-}
-
-int nvif_client_init(void (*dtor)(struct nvif_client *), const char *,
- const char *, u64, const char *, const char *,
+int nvif_client_init(const char *drv, const char *name, u64 device,
+ const char *cfg, const char *dbg,
struct nvif_client *);
void nvif_client_fini(struct nvif_client *);
-int nvif_client_new(const char *, const char *, u64, const char *,
- const char *, struct nvif_client **);
-void nvif_client_ref(struct nvif_client *, struct nvif_client **);
int nvif_client_ioctl(struct nvif_client *, void *, u32);
int nvif_client_suspend(struct nvif_client *);
int nvif_client_resume(struct nvif_client *);
/*XXX*/
#include <core/client.h>
-#define nvxx_client(a) ({ \
- struct nvif_client *_client = nvif_client(nvif_object(a)); \
- nvkm_client(_client->base.priv); \
+#define nvxx_client(a) ({ \
+ struct nvif_client *_client = (a); \
+ (struct nvkm_client *)_client->object.priv; \
})
-
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index 88553a741ab7..700a9b206726 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -5,26 +5,35 @@
#include <nvif/class.h>
struct nvif_device {
- struct nvif_object base;
- struct nvif_object *object; /*XXX: hack for nvif_object() */
+ struct nvif_object object;
struct nv_device_info_v0 info;
};
-static inline struct nvif_device *
-nvif_device(struct nvif_object *object)
-{
- while (object && object->oclass != 0x0080 /*XXX: NV_DEVICE_CLASS*/ )
- object = object->parent;
- return (void *)object;
-}
-
-int nvif_device_init(struct nvif_object *, void (*dtor)(struct nvif_device *),
- u32 handle, u32 oclass, void *, u32,
+int nvif_device_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32,
struct nvif_device *);
void nvif_device_fini(struct nvif_device *);
-int nvif_device_new(struct nvif_object *, u32 handle, u32 oclass,
- void *, u32, struct nvif_device **);
-void nvif_device_ref(struct nvif_device *, struct nvif_device **);
+u64 nvif_device_time(struct nvif_device *);
+
+/* Delay based on GPU time (ie. PTIMER).
+ *
+ * Will return -ETIMEDOUT unless the loop was terminated with 'break',
+ * where it will return the number of nanoseconds taken instead.
+ */
+#define nvif_nsec(d,n,cond...) ({ \
+ struct nvif_device *_device = (d); \
+ u64 _nsecs = (n), _time0 = nvif_device_time(_device); \
+ s64 _taken = 0; \
+ \
+ do { \
+ cond \
+ } while (_taken = nvif_device_time(_device) - _time0, _taken < _nsecs);\
+ \
+ if (_taken >= _nsecs) \
+ _taken = -ETIMEDOUT; \
+ _taken; \
+})
+#define nvif_usec(d,u,cond...) nvif_nsec((d), (u) * 1000, ##cond)
+#define nvif_msec(d,m,cond...) nvif_usec((d), (m) * 1000, ##cond)
/*XXX*/
#include <subdev/bios.h>
@@ -36,26 +45,30 @@ void nvif_device_ref(struct nvif_device *, struct nvif_device **);
#include <subdev/i2c.h>
#include <subdev/timer.h>
#include <subdev/therm.h>
+#include <subdev/pci.h>
-#define nvxx_device(a) nv_device(nvxx_object((a)))
-#define nvxx_bios(a) nvkm_bios(nvxx_device(a))
-#define nvxx_fb(a) nvkm_fb(nvxx_device(a))
-#define nvxx_mmu(a) nvkm_mmu(nvxx_device(a))
-#define nvxx_bar(a) nvkm_bar(nvxx_device(a))
-#define nvxx_gpio(a) nvkm_gpio(nvxx_device(a))
-#define nvxx_clk(a) nvkm_clk(nvxx_device(a))
-#define nvxx_i2c(a) nvkm_i2c(nvxx_device(a))
-#define nvxx_timer(a) nvkm_timer(nvxx_device(a))
-#define nvxx_wait(a,b,c,d) nv_wait(nvxx_timer(a), (b), (c), (d))
-#define nvxx_wait_cb(a,b,c) nv_wait_cb(nvxx_timer(a), (b), (c))
-#define nvxx_therm(a) nvkm_therm(nvxx_device(a))
+#define nvxx_device(a) ({ \
+ struct nvif_device *_device = (a); \
+ struct { \
+ struct nvkm_object object; \
+ struct nvkm_device *device; \
+ } *_udevice = _device->object.priv; \
+ _udevice->device; \
+})
+#define nvxx_bios(a) nvxx_device(a)->bios
+#define nvxx_fb(a) nvxx_device(a)->fb
+#define nvxx_mmu(a) nvxx_device(a)->mmu
+#define nvxx_bar(a) nvxx_device(a)->bar
+#define nvxx_gpio(a) nvxx_device(a)->gpio
+#define nvxx_clk(a) nvxx_device(a)->clk
+#define nvxx_i2c(a) nvxx_device(a)->i2c
+#define nvxx_therm(a) nvxx_device(a)->therm
#include <core/device.h>
#include <engine/fifo.h>
#include <engine/gr.h>
#include <engine/sw.h>
-#define nvxx_fifo(a) nvkm_fifo(nvxx_device(a))
-#define nvxx_fifo_chan(a) ((struct nvkm_fifo_chan *)nvxx_object(a))
-#define nvxx_gr(a) ((struct nvkm_gr *)nvkm_engine(nvxx_object(a), NVDEV_ENGINE_GR))
+#define nvxx_fifo(a) nvxx_device(a)->fifo
+#define nvxx_gr(a) nvxx_device(a)->gr
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
index 4cd8e323b23d..b0ac0215ebf9 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
@@ -1,11 +1,10 @@
#ifndef __NVIF_IOCTL_H__
#define __NVIF_IOCTL_H__
+#define NVIF_VERSION_LATEST 0x0000000000000000ULL
+
struct nvif_ioctl_v0 {
__u8 version;
-#define NVIF_IOCTL_V0_OWNER_NVIF 0x00
-#define NVIF_IOCTL_V0_OWNER_ANY 0xff
- __u8 owner;
#define NVIF_IOCTL_V0_NOP 0x00
#define NVIF_IOCTL_V0_SCLASS 0x01
#define NVIF_IOCTL_V0_NEW 0x02
@@ -20,17 +19,20 @@ struct nvif_ioctl_v0 {
#define NVIF_IOCTL_V0_NTFY_GET 0x0b
#define NVIF_IOCTL_V0_NTFY_PUT 0x0c
__u8 type;
- __u8 path_nr;
+ __u8 pad02[4];
+#define NVIF_IOCTL_V0_OWNER_NVIF 0x00
+#define NVIF_IOCTL_V0_OWNER_ANY 0xff
+ __u8 owner;
#define NVIF_IOCTL_V0_ROUTE_NVIF 0x00
#define NVIF_IOCTL_V0_ROUTE_HIDDEN 0xff
- __u8 pad04[3];
__u8 route;
__u64 token;
- __u32 path[8]; /* in reverse */
+ __u64 object;
__u8 data[]; /* ioctl data (below) */
};
-struct nvif_ioctl_nop {
+struct nvif_ioctl_nop_v0 {
+ __u64 version;
};
struct nvif_ioctl_sclass_v0 {
@@ -38,7 +40,11 @@ struct nvif_ioctl_sclass_v0 {
__u8 version;
__u8 count;
__u8 pad02[6];
- __u32 oclass[];
+ struct nvif_ioctl_sclass_oclass_v0 {
+ __s32 oclass;
+ __s16 minver;
+ __s16 maxver;
+ } oclass[];
};
struct nvif_ioctl_new_v0 {
@@ -47,11 +53,17 @@ struct nvif_ioctl_new_v0 {
__u8 pad01[6];
__u8 route;
__u64 token;
+ __u64 object;
__u32 handle;
/* these class numbers are made up by us, and not nvidia-assigned */
-#define NVIF_IOCTL_NEW_V0_PERFCTR 0x0000ffff
-#define NVIF_IOCTL_NEW_V0_CONTROL 0x0000fffe
- __u32 oclass;
+#define NVIF_IOCTL_NEW_V0_CONTROL -1
+#define NVIF_IOCTL_NEW_V0_PERFMON -2
+#define NVIF_IOCTL_NEW_V0_PERFDOM -3
+#define NVIF_IOCTL_NEW_V0_SW_NV04 -4
+#define NVIF_IOCTL_NEW_V0_SW_NV10 -5
+#define NVIF_IOCTL_NEW_V0_SW_NV50 -6
+#define NVIF_IOCTL_NEW_V0_SW_GF100 -7
+ __s32 oclass;
__u8 data[]; /* class data (class.h) */
};
diff --git a/drivers/gpu/drm/nouveau/include/nvif/notify.h b/drivers/gpu/drm/nouveau/include/nvif/notify.h
index 9ebfa3b45e76..51e2eb580809 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/notify.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/notify.h
@@ -23,17 +23,11 @@ struct nvif_notify {
struct work_struct work;
};
-int nvif_notify_init(struct nvif_object *, void (*dtor)(struct nvif_notify *),
- int (*func)(struct nvif_notify *), bool work, u8 type,
- void *data, u32 size, u32 reply, struct nvif_notify *);
+int nvif_notify_init(struct nvif_object *, int (*func)(struct nvif_notify *),
+ bool work, u8 type, void *data, u32 size, u32 reply,
+ struct nvif_notify *);
int nvif_notify_fini(struct nvif_notify *);
int nvif_notify_get(struct nvif_notify *);
int nvif_notify_put(struct nvif_notify *);
int nvif_notify(const void *, u32, const void *, u32);
-
-int nvif_notify_new(struct nvif_object *, int (*func)(struct nvif_notify *),
- bool work, u8 type, void *data, u32 size, u32 reply,
- struct nvif_notify **);
-void nvif_notify_ref(struct nvif_notify *, struct nvif_notify **);
-
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 04c874707b96..8d815967767f 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -3,73 +3,73 @@
#include <nvif/os.h>
+struct nvif_sclass {
+ s32 oclass;
+ int minver;
+ int maxver;
+};
+
struct nvif_object {
- struct nvif_object *parent;
- struct nvif_object *object; /*XXX: hack for nvif_object() */
- struct kref refcount;
+ struct nvif_client *client;
u32 handle;
- u32 oclass;
- void *data;
- u32 size;
+ s32 oclass;
void *priv; /*XXX: hack */
- void (*dtor)(struct nvif_object *);
struct {
void __iomem *ptr;
u32 size;
} map;
};
-int nvif_object_init(struct nvif_object *, void (*dtor)(struct nvif_object *),
- u32 handle, u32 oclass, void *, u32,
+int nvif_object_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32,
struct nvif_object *);
void nvif_object_fini(struct nvif_object *);
-int nvif_object_new(struct nvif_object *, u32 handle, u32 oclass,
- void *, u32, struct nvif_object **);
-void nvif_object_ref(struct nvif_object *, struct nvif_object **);
int nvif_object_ioctl(struct nvif_object *, void *, u32, void **);
-int nvif_object_sclass(struct nvif_object *, u32 *, int);
+int nvif_object_sclass_get(struct nvif_object *, struct nvif_sclass **);
+void nvif_object_sclass_put(struct nvif_sclass **);
u32 nvif_object_rd(struct nvif_object *, int, u64);
void nvif_object_wr(struct nvif_object *, int, u64, u32);
int nvif_object_mthd(struct nvif_object *, u32, void *, u32);
int nvif_object_map(struct nvif_object *);
void nvif_object_unmap(struct nvif_object *);
+#define nvif_handle(a) (unsigned long)(void *)(a)
#define nvif_object(a) (a)->object
-#define ioread8_native ioread8
-#define iowrite8_native iowrite8
-#define nvif_rd(a,b,c) ({ \
- struct nvif_object *_object = nvif_object(a); \
+#define nvif_rd(a,f,b,c) ({ \
+ struct nvif_object *_object = (a); \
u32 _data; \
if (likely(_object->map.ptr)) \
- _data = ioread##b##_native((u8 __iomem *)_object->map.ptr + (c)); \
+ _data = f((u8 __iomem *)_object->map.ptr + (c)); \
else \
- _data = nvif_object_rd(_object, (b) / 8, (c)); \
+ _data = nvif_object_rd(_object, (b), (c)); \
_data; \
})
-#define nvif_wr(a,b,c,d) ({ \
- struct nvif_object *_object = nvif_object(a); \
+#define nvif_wr(a,f,b,c,d) ({ \
+ struct nvif_object *_object = (a); \
if (likely(_object->map.ptr)) \
- iowrite##b##_native((d), (u8 __iomem *)_object->map.ptr + (c)); \
+ f((d), (u8 __iomem *)_object->map.ptr + (c)); \
else \
- nvif_object_wr(_object, (b) / 8, (c), (d)); \
+ nvif_object_wr(_object, (b), (c), (d)); \
})
-#define nvif_rd08(a,b) ({ u8 _v = nvif_rd((a), 8, (b)); _v; })
-#define nvif_rd16(a,b) ({ u16 _v = nvif_rd((a), 16, (b)); _v; })
-#define nvif_rd32(a,b) ({ u32 _v = nvif_rd((a), 32, (b)); _v; })
-#define nvif_wr08(a,b,c) nvif_wr((a), 8, (b), (u8)(c))
-#define nvif_wr16(a,b,c) nvif_wr((a), 16, (b), (u16)(c))
-#define nvif_wr32(a,b,c) nvif_wr((a), 32, (b), (u32)(c))
+#define nvif_rd08(a,b) ({ ((u8)nvif_rd((a), ioread8, 1, (b))); })
+#define nvif_rd16(a,b) ({ ((u16)nvif_rd((a), ioread16_native, 2, (b))); })
+#define nvif_rd32(a,b) ({ ((u32)nvif_rd((a), ioread32_native, 4, (b))); })
+#define nvif_wr08(a,b,c) nvif_wr((a), iowrite8, 1, (b), (u8)(c))
+#define nvif_wr16(a,b,c) nvif_wr((a), iowrite16_native, 2, (b), (u16)(c))
+#define nvif_wr32(a,b,c) nvif_wr((a), iowrite32_native, 4, (b), (u32)(c))
#define nvif_mask(a,b,c,d) ({ \
- u32 _v = nvif_rd32(nvif_object(a), (b)); \
- nvif_wr32(nvif_object(a), (b), (_v & ~(c)) | (d)); \
- _v; \
+ struct nvif_object *__object = (a); \
+ u32 _addr = (b), _data = nvif_rd32(__object, _addr); \
+ nvif_wr32(__object, _addr, (_data & ~(c)) | (d)); \
+ _data; \
})
-#define nvif_mthd(a,b,c,d) nvif_object_mthd(nvif_object(a), (b), (c), (d))
+#define nvif_mthd(a,b,c,d) nvif_object_mthd((a), (b), (c), (d))
/*XXX*/
#include <core/object.h>
-#define nvxx_object(a) ((struct nvkm_object *)nvif_object(a)->priv)
-
+#define nvxx_object(a) ({ \
+ struct nvif_object *_object = (a); \
+ (struct nvkm_object *)_object->priv; \
+})
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/os.h b/drivers/gpu/drm/nouveau/include/nvif/os.h
index bdd05ee7ec72..3accc99d8e0b 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/os.h
@@ -24,9 +24,15 @@
#include <linux/power_supply.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
+#include <linux/agp_backend.h>
+#include <linux/reset.h>
+#include <linux/iommu.h>
#include <asm/unaligned.h>
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/pmc.h>
+
#ifndef ioread32_native
#ifdef __BIG_ENDIAN
#define ioread16_native ioread16be
@@ -40,5 +46,4 @@
#define iowrite32_native iowrite32
#endif /* def __BIG_ENDIAN else */
#endif /* !ioread32_native */
-
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index a35b38244502..eaf5905a87a3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -1,55 +1,52 @@
#ifndef __NVKM_CLIENT_H__
#define __NVKM_CLIENT_H__
-#include <core/namedb.h>
+#include <core/object.h>
struct nvkm_client {
- struct nvkm_namedb namedb;
- struct nvkm_handle *root;
- struct nvkm_object *device;
+ struct nvkm_object object;
char name[32];
+ u64 device;
u32 debug;
- struct nvkm_vm *vm;
+
+ struct nvkm_client_notify *notify[16];
+ struct rb_root objroot;
+ struct rb_root dmaroot;
+
bool super;
void *data;
-
int (*ntfy)(const void *, u32, const void *, u32);
- struct nvkm_client_notify *notify[16];
+
+ struct nvkm_vm *vm;
};
-static inline struct nvkm_client *
-nv_client(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
- if (unlikely(!nv_iclass(obj, NV_CLIENT_CLASS)))
- nv_assert("BAD CAST -> NvClient, %08x", nv_hclass(obj));
-#endif
- return obj;
-}
-
-static inline struct nvkm_client *
-nvkm_client(void *obj)
-{
- struct nvkm_object *client = nv_object(obj);
- while (client && !(nv_iclass(client, NV_CLIENT_CLASS)))
- client = client->parent;
- return (void *)client;
-}
-
-#define nvkm_client_create(n,c,oc,od,d) \
- nvkm_client_create_((n), (c), (oc), (od), sizeof(**d), (void **)d)
-
-int nvkm_client_create_(const char *name, u64 device, const char *cfg,
- const char *dbg, int, void **);
-#define nvkm_client_destroy(p) \
- nvkm_namedb_destroy(&(p)->base)
+bool nvkm_client_insert(struct nvkm_client *, struct nvkm_object *);
+void nvkm_client_remove(struct nvkm_client *, struct nvkm_object *);
+struct nvkm_object *nvkm_client_search(struct nvkm_client *, u64 object);
+int nvkm_client_new(const char *name, u64 device, const char *cfg,
+ const char *dbg, struct nvkm_client **);
+void nvkm_client_del(struct nvkm_client **);
int nvkm_client_init(struct nvkm_client *);
int nvkm_client_fini(struct nvkm_client *, bool suspend);
-const char *nvkm_client_name(void *obj);
int nvkm_client_notify_new(struct nvkm_object *, struct nvkm_event *,
void *data, u32 size);
int nvkm_client_notify_del(struct nvkm_client *, int index);
int nvkm_client_notify_get(struct nvkm_client *, int index);
int nvkm_client_notify_put(struct nvkm_client *, int index);
+
+/* logging for client-facing objects */
+#define nvif_printk(o,l,p,f,a...) do { \
+ struct nvkm_object *_object = (o); \
+ struct nvkm_client *_client = _object->client; \
+ if (_client->debug >= NV_DBG_##l) \
+ printk(KERN_##p "nouveau: %s:%08x:%08x: "f, _client->name, \
+ _object->handle, _object->oclass, ##a); \
+} while(0)
+#define nvif_fatal(o,f,a...) nvif_printk((o), FATAL, CRIT, f, ##a)
+#define nvif_error(o,f,a...) nvif_printk((o), ERROR, ERR, f, ##a)
+#define nvif_debug(o,f,a...) nvif_printk((o), DEBUG, INFO, f, ##a)
+#define nvif_trace(o,f,a...) nvif_printk((o), TRACE, INFO, f, ##a)
+#define nvif_info(o,f,a...) nvif_printk((o), INFO, INFO, f, ##a)
+#define nvif_ioctl(o,f,a...) nvif_trace((o), "ioctl: "f, ##a)
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h b/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
index d07cb860b56c..c59fd4e2ad5e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
@@ -1,18 +1,11 @@
#ifndef __NVKM_DEBUG_H__
#define __NVKM_DEBUG_H__
-extern int nv_info_debug_level;
-
#define NV_DBG_FATAL 0
#define NV_DBG_ERROR 1
#define NV_DBG_WARN 2
-#define NV_DBG_INFO nv_info_debug_level
+#define NV_DBG_INFO 3
#define NV_DBG_DEBUG 4
#define NV_DBG_TRACE 5
#define NV_DBG_PARANOIA 6
#define NV_DBG_SPAM 7
-
-#define NV_DBG_INFO_NORMAL 3
-#define NV_DBG_INFO_SILENT NV_DBG_DEBUG
-
-#define nv_debug_level(a) nv_info_debug_level = NV_DBG_INFO_##a
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 333db33a162c..8f760002e401 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -1,24 +1,84 @@
#ifndef __NVKM_DEVICE_H__
#define __NVKM_DEVICE_H__
-#include <core/engine.h>
#include <core/event.h>
+#include <core/object.h>
+
+enum nvkm_devidx {
+ NVKM_SUBDEV_PCI,
+ NVKM_SUBDEV_VBIOS,
+ NVKM_SUBDEV_DEVINIT,
+ NVKM_SUBDEV_IBUS,
+ NVKM_SUBDEV_GPIO,
+ NVKM_SUBDEV_I2C,
+ NVKM_SUBDEV_FUSE,
+ NVKM_SUBDEV_MXM,
+ NVKM_SUBDEV_MC,
+ NVKM_SUBDEV_BUS,
+ NVKM_SUBDEV_TIMER,
+ NVKM_SUBDEV_FB,
+ NVKM_SUBDEV_LTC,
+ NVKM_SUBDEV_INSTMEM,
+ NVKM_SUBDEV_MMU,
+ NVKM_SUBDEV_BAR,
+ NVKM_SUBDEV_PMU,
+ NVKM_SUBDEV_VOLT,
+ NVKM_SUBDEV_THERM,
+ NVKM_SUBDEV_CLK,
+
+ NVKM_ENGINE_DMAOBJ,
+ NVKM_ENGINE_IFB,
+ NVKM_ENGINE_FIFO,
+ NVKM_ENGINE_SW,
+ NVKM_ENGINE_GR,
+ NVKM_ENGINE_MPEG,
+ NVKM_ENGINE_ME,
+ NVKM_ENGINE_VP,
+ NVKM_ENGINE_CIPHER,
+ NVKM_ENGINE_BSP,
+ NVKM_ENGINE_MSPPP,
+ NVKM_ENGINE_CE0,
+ NVKM_ENGINE_CE1,
+ NVKM_ENGINE_CE2,
+ NVKM_ENGINE_VIC,
+ NVKM_ENGINE_MSENC,
+ NVKM_ENGINE_DISP,
+ NVKM_ENGINE_PM,
+ NVKM_ENGINE_MSVLD,
+ NVKM_ENGINE_SEC,
+ NVKM_ENGINE_MSPDEC,
+
+ NVKM_SUBDEV_NR
+};
+
+enum nvkm_device_type {
+ NVKM_DEVICE_PCI,
+ NVKM_DEVICE_AGP,
+ NVKM_DEVICE_PCIE,
+ NVKM_DEVICE_TEGRA,
+};
struct nvkm_device {
- struct nvkm_engine engine;
+ const struct nvkm_device_func *func;
+ const struct nvkm_device_quirk *quirk;
+ struct device *dev;
+ enum nvkm_device_type type;
+ u64 handle;
+ const char *name;
+ const char *cfgopt;
+ const char *dbgopt;
+
struct list_head head;
+ struct mutex mutex;
+ int refcount;
- struct pci_dev *pdev;
- struct platform_device *platformdev;
- u64 handle;
+ void __iomem *pri;
struct nvkm_event event;
- const char *cfgopt;
- const char *dbgopt;
- const char *name;
- const char *cname;
u64 disable_mask;
+ u32 debug;
+ const struct nvkm_device_chip *chip;
enum {
NV_04 = 0x04,
NV_10 = 0x10,
@@ -35,67 +95,157 @@ struct nvkm_device {
u8 chiprev;
u32 crystal;
- struct nvkm_oclass *oclass[NVDEV_SUBDEV_NR];
- struct nvkm_object *subdev[NVDEV_SUBDEV_NR];
-
struct {
struct notifier_block nb;
} acpi;
+
+ struct nvkm_bar *bar;
+ struct nvkm_bios *bios;
+ struct nvkm_bus *bus;
+ struct nvkm_clk *clk;
+ struct nvkm_devinit *devinit;
+ struct nvkm_fb *fb;
+ struct nvkm_fuse *fuse;
+ struct nvkm_gpio *gpio;
+ struct nvkm_i2c *i2c;
+ struct nvkm_subdev *ibus;
+ struct nvkm_instmem *imem;
+ struct nvkm_ltc *ltc;
+ struct nvkm_mc *mc;
+ struct nvkm_mmu *mmu;
+ struct nvkm_subdev *mxm;
+ struct nvkm_pci *pci;
+ struct nvkm_pmu *pmu;
+ struct nvkm_therm *therm;
+ struct nvkm_timer *timer;
+ struct nvkm_volt *volt;
+
+ struct nvkm_engine *bsp;
+ struct nvkm_engine *ce[3];
+ struct nvkm_engine *cipher;
+ struct nvkm_disp *disp;
+ struct nvkm_dma *dma;
+ struct nvkm_fifo *fifo;
+ struct nvkm_gr *gr;
+ struct nvkm_engine *ifb;
+ struct nvkm_engine *me;
+ struct nvkm_engine *mpeg;
+ struct nvkm_engine *msenc;
+ struct nvkm_engine *mspdec;
+ struct nvkm_engine *msppp;
+ struct nvkm_engine *msvld;
+ struct nvkm_pm *pm;
+ struct nvkm_engine *sec;
+ struct nvkm_sw *sw;
+ struct nvkm_engine *vic;
+ struct nvkm_engine *vp;
+};
+
+struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int index);
+struct nvkm_engine *nvkm_device_engine(struct nvkm_device *, int index);
+
+struct nvkm_device_func {
+ struct nvkm_device_pci *(*pci)(struct nvkm_device *);
+ struct nvkm_device_tegra *(*tegra)(struct nvkm_device *);
+ void *(*dtor)(struct nvkm_device *);
+ int (*preinit)(struct nvkm_device *);
+ int (*init)(struct nvkm_device *);
+ void (*fini)(struct nvkm_device *, bool suspend);
+ resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
+ resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
+ bool cpu_coherent;
+};
+
+struct nvkm_device_quirk {
+ u8 tv_pin_mask;
+ u8 tv_gpio;
+ bool War00C800_0;
+};
+
+struct nvkm_device_chip {
+ const char *name;
+
+ int (*bar )(struct nvkm_device *, int idx, struct nvkm_bar **);
+ int (*bios )(struct nvkm_device *, int idx, struct nvkm_bios **);
+ int (*bus )(struct nvkm_device *, int idx, struct nvkm_bus **);
+ int (*clk )(struct nvkm_device *, int idx, struct nvkm_clk **);
+ int (*devinit)(struct nvkm_device *, int idx, struct nvkm_devinit **);
+ int (*fb )(struct nvkm_device *, int idx, struct nvkm_fb **);
+ int (*fuse )(struct nvkm_device *, int idx, struct nvkm_fuse **);
+ int (*gpio )(struct nvkm_device *, int idx, struct nvkm_gpio **);
+ int (*i2c )(struct nvkm_device *, int idx, struct nvkm_i2c **);
+ int (*ibus )(struct nvkm_device *, int idx, struct nvkm_subdev **);
+ int (*imem )(struct nvkm_device *, int idx, struct nvkm_instmem **);
+ int (*ltc )(struct nvkm_device *, int idx, struct nvkm_ltc **);
+ int (*mc )(struct nvkm_device *, int idx, struct nvkm_mc **);
+ int (*mmu )(struct nvkm_device *, int idx, struct nvkm_mmu **);
+ int (*mxm )(struct nvkm_device *, int idx, struct nvkm_subdev **);
+ int (*pci )(struct nvkm_device *, int idx, struct nvkm_pci **);
+ int (*pmu )(struct nvkm_device *, int idx, struct nvkm_pmu **);
+ int (*therm )(struct nvkm_device *, int idx, struct nvkm_therm **);
+ int (*timer )(struct nvkm_device *, int idx, struct nvkm_timer **);
+ int (*volt )(struct nvkm_device *, int idx, struct nvkm_volt **);
+
+ int (*bsp )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*ce[3] )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*disp )(struct nvkm_device *, int idx, struct nvkm_disp **);
+ int (*dma )(struct nvkm_device *, int idx, struct nvkm_dma **);
+ int (*fifo )(struct nvkm_device *, int idx, struct nvkm_fifo **);
+ int (*gr )(struct nvkm_device *, int idx, struct nvkm_gr **);
+ int (*ifb )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*me )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*mpeg )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*msenc )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **);
+ int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*sw )(struct nvkm_device *, int idx, struct nvkm_sw **);
+ int (*vic )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*vp )(struct nvkm_device *, int idx, struct nvkm_engine **);
};
struct nvkm_device *nvkm_device_find(u64 name);
int nvkm_device_list(u64 *name, int size);
-struct nvkm_device *nv_device(void *obj);
-
-static inline bool
-nv_device_match(struct nvkm_object *object, u16 dev, u16 ven, u16 sub)
-{
- struct nvkm_device *device = nv_device(object);
- return device->pdev->device == dev &&
- device->pdev->subsystem_vendor == ven &&
- device->pdev->subsystem_device == sub;
-}
-
-static inline bool
-nv_device_is_pci(struct nvkm_device *device)
-{
- return device->pdev != NULL;
-}
-
-static inline bool
-nv_device_is_cpu_coherent(struct nvkm_device *device)
-{
- return (!IS_ENABLED(CONFIG_ARM) && nv_device_is_pci(device));
-}
-
-static inline struct device *
-nv_device_base(struct nvkm_device *device)
-{
- return nv_device_is_pci(device) ? &device->pdev->dev :
- &device->platformdev->dev;
-}
-
-resource_size_t
-nv_device_resource_start(struct nvkm_device *device, unsigned int bar);
-
-resource_size_t
-nv_device_resource_len(struct nvkm_device *device, unsigned int bar);
-
-int
-nv_device_get_irq(struct nvkm_device *device, bool stall);
-
-struct platform_device;
-
-enum nv_bus_type {
- NVKM_BUS_PCI,
- NVKM_BUS_PLATFORM,
+/* privileged register interface accessor macros */
+#define nvkm_rd08(d,a) ioread8((d)->pri + (a))
+#define nvkm_rd16(d,a) ioread16_native((d)->pri + (a))
+#define nvkm_rd32(d,a) ioread32_native((d)->pri + (a))
+#define nvkm_wr08(d,a,v) iowrite8((v), (d)->pri + (a))
+#define nvkm_wr16(d,a,v) iowrite16_native((v), (d)->pri + (a))
+#define nvkm_wr32(d,a,v) iowrite32_native((v), (d)->pri + (a))
+#define nvkm_mask(d,a,m,v) ({ \
+ struct nvkm_device *_device = (d); \
+ u32 _addr = (a), _temp = nvkm_rd32(_device, _addr); \
+ nvkm_wr32(_device, _addr, (_temp & ~(m)) | (v)); \
+ _temp; \
+})
+
+void nvkm_device_del(struct nvkm_device **);
+
+struct nvkm_device_oclass {
+ int (*ctor)(struct nvkm_device *, const struct nvkm_oclass *,
+ void *data, u32 size, struct nvkm_object **);
+ struct nvkm_sclass base;
};
-#define nvkm_device_create(p,t,n,s,c,d,u) \
- nvkm_device_create_((void *)(p), (t), (n), (s), (c), (d), \
- sizeof(**u), (void **)u)
-int nvkm_device_create_(void *, enum nv_bus_type type, u64 name,
- const char *sname, const char *cfg, const char *dbg,
- int, void **);
+extern const struct nvkm_sclass nvkm_udevice_sclass;
+
+/* device logging */
+#define nvdev_printk_(d,l,p,f,a...) do { \
+ struct nvkm_device *_device = (d); \
+ if (_device->debug >= (l)) \
+ dev_##p(_device->dev, f, ##a); \
+} while(0)
+#define nvdev_printk(d,l,p,f,a...) nvdev_printk_((d), NV_DBG_##l, p, f, ##a)
+#define nvdev_fatal(d,f,a...) nvdev_printk((d), FATAL, crit, f, ##a)
+#define nvdev_error(d,f,a...) nvdev_printk((d), ERROR, err, f, ##a)
+#define nvdev_warn(d,f,a...) nvdev_printk((d), WARN, notice, f, ##a)
+#define nvdev_info(d,f,a...) nvdev_printk((d), INFO, info, f, ##a)
+#define nvdev_debug(d,f,a...) nvdev_printk((d), DEBUG, info, f, ##a)
+#define nvdev_trace(d,f,a...) nvdev_printk((d), TRACE, info, f, ##a)
+#define nvdev_spam(d,f,a...) nvdev_printk((d), SPAM, dbg, f, ##a)
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h b/drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h
deleted file mode 100644
index 60c5888b5df3..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef __NVKM_DEVIDX_H__
-#define __NVKM_DEVIDX_H__
-enum nvkm_devidx {
- NVDEV_ENGINE_DEVICE,
- NVDEV_SUBDEV_VBIOS,
-
- /* All subdevs from DEVINIT to DEVINIT_LAST will be created before
- * *any* of them are initialised. This subdev category is used
- * for any subdevs that the VBIOS init table parsing may call out
- * to during POST.
- */
- NVDEV_SUBDEV_DEVINIT,
- NVDEV_SUBDEV_IBUS,
- NVDEV_SUBDEV_GPIO,
- NVDEV_SUBDEV_I2C,
- NVDEV_SUBDEV_DEVINIT_LAST = NVDEV_SUBDEV_I2C,
-
- /* This grouping of subdevs are initialised right after they've
- * been created, and are allowed to assume any subdevs in the
- * list above them exist and have been initialised.
- */
- NVDEV_SUBDEV_FUSE,
- NVDEV_SUBDEV_MXM,
- NVDEV_SUBDEV_MC,
- NVDEV_SUBDEV_BUS,
- NVDEV_SUBDEV_TIMER,
- NVDEV_SUBDEV_FB,
- NVDEV_SUBDEV_LTC,
- NVDEV_SUBDEV_INSTMEM,
- NVDEV_SUBDEV_MMU,
- NVDEV_SUBDEV_BAR,
- NVDEV_SUBDEV_PMU,
- NVDEV_SUBDEV_VOLT,
- NVDEV_SUBDEV_THERM,
- NVDEV_SUBDEV_CLK,
-
- NVDEV_ENGINE_FIRST,
- NVDEV_ENGINE_DMAOBJ = NVDEV_ENGINE_FIRST,
- NVDEV_ENGINE_IFB,
- NVDEV_ENGINE_FIFO,
- NVDEV_ENGINE_SW,
- NVDEV_ENGINE_GR,
- NVDEV_ENGINE_MPEG,
- NVDEV_ENGINE_ME,
- NVDEV_ENGINE_VP,
- NVDEV_ENGINE_CIPHER,
- NVDEV_ENGINE_BSP,
- NVDEV_ENGINE_MSPPP,
- NVDEV_ENGINE_CE0,
- NVDEV_ENGINE_CE1,
- NVDEV_ENGINE_CE2,
- NVDEV_ENGINE_VIC,
- NVDEV_ENGINE_MSENC,
- NVDEV_ENGINE_DISP,
- NVDEV_ENGINE_PM,
- NVDEV_ENGINE_MSVLD,
- NVDEV_ENGINE_SEC,
- NVDEV_ENGINE_MSPDEC,
-
- NVDEV_SUBDEV_NR,
-};
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h
deleted file mode 100644
index 1bf2e8eb4268..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef __NVKM_ENGCTX_H__
-#define __NVKM_ENGCTX_H__
-#include <core/gpuobj.h>
-
-#include <subdev/mmu.h>
-
-#define NV_ENGCTX_(eng,var) (NV_ENGCTX_CLASS | ((var) << 8) | (eng))
-#define NV_ENGCTX(name,var) NV_ENGCTX_(NVDEV_ENGINE_##name, (var))
-
-struct nvkm_engctx {
- struct nvkm_gpuobj gpuobj;
- struct nvkm_vma vma;
- struct list_head head;
- unsigned long save;
- u64 addr;
-};
-
-static inline struct nvkm_engctx *
-nv_engctx(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
- if (unlikely(!nv_iclass(obj, NV_ENGCTX_CLASS)))
- nv_assert("BAD CAST -> NvEngCtx, %08x", nv_hclass(obj));
-#endif
- return obj;
-}
-
-#define nvkm_engctx_create(p,e,c,g,s,a,f,d) \
- nvkm_engctx_create_((p), (e), (c), (g), (s), (a), (f), \
- sizeof(**d), (void **)d)
-
-int nvkm_engctx_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, struct nvkm_object *,
- u32 size, u32 align, u32 flags,
- int length, void **data);
-void nvkm_engctx_destroy(struct nvkm_engctx *);
-int nvkm_engctx_init(struct nvkm_engctx *);
-int nvkm_engctx_fini(struct nvkm_engctx *, bool suspend);
-
-int _nvkm_engctx_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-void _nvkm_engctx_dtor(struct nvkm_object *);
-int _nvkm_engctx_init(struct nvkm_object *);
-int _nvkm_engctx_fini(struct nvkm_object *, bool suspend);
-#define _nvkm_engctx_rd32 _nvkm_gpuobj_rd32
-#define _nvkm_engctx_wr32 _nvkm_gpuobj_wr32
-
-struct nvkm_object *nvkm_engctx_get(struct nvkm_engine *, u64 addr);
-void nvkm_engctx_put(struct nvkm_object *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index faf0fd2f0638..48bf128456a1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -1,56 +1,49 @@
#ifndef __NVKM_ENGINE_H__
#define __NVKM_ENGINE_H__
+#define nvkm_engine(p) container_of((p), struct nvkm_engine, subdev)
#include <core/subdev.h>
-
-#define NV_ENGINE_(eng,var) (NV_ENGINE_CLASS | ((var) << 8) | (eng))
-#define NV_ENGINE(name,var) NV_ENGINE_(NVDEV_ENGINE_##name, (var))
+struct nvkm_fifo_chan;
+struct nvkm_fb_tile;
struct nvkm_engine {
+ const struct nvkm_engine_func *func;
struct nvkm_subdev subdev;
- struct nvkm_oclass *cclass;
- struct nvkm_oclass *sclass;
-
- struct list_head contexts;
spinlock_t lock;
- void (*tile_prog)(struct nvkm_engine *, int region);
- int (*tlb_flush)(struct nvkm_engine *);
+ int usecount;
};
-static inline struct nvkm_engine *
-nv_engine(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
- if (unlikely(!nv_iclass(obj, NV_ENGINE_CLASS)))
- nv_assert("BAD CAST -> NvEngine, %08x", nv_hclass(obj));
-#endif
- return obj;
-}
-
-static inline int
-nv_engidx(struct nvkm_engine *engine)
-{
- return nv_subidx(&engine->subdev);
-}
-
-struct nvkm_engine *nvkm_engine(void *obj, int idx);
-
-#define nvkm_engine_create(p,e,c,d,i,f,r) \
- nvkm_engine_create_((p), (e), (c), (d), (i), (f), \
- sizeof(**r),(void **)r)
-
-#define nvkm_engine_destroy(p) \
- nvkm_subdev_destroy(&(p)->subdev)
-#define nvkm_engine_init(p) \
- nvkm_subdev_init(&(p)->subdev)
-#define nvkm_engine_fini(p,s) \
- nvkm_subdev_fini(&(p)->subdev, (s))
-
-int nvkm_engine_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, bool, const char *,
- const char *, int, void **);
+struct nvkm_engine_func {
+ void *(*dtor)(struct nvkm_engine *);
+ int (*oneinit)(struct nvkm_engine *);
+ int (*init)(struct nvkm_engine *);
+ int (*fini)(struct nvkm_engine *, bool suspend);
+ void (*intr)(struct nvkm_engine *);
+ void (*tile)(struct nvkm_engine *, int region, struct nvkm_fb_tile *);
+
+ struct {
+ int (*sclass)(struct nvkm_oclass *, int index,
+ const struct nvkm_device_oclass **);
+ } base;
+
+ struct {
+ int (*cclass)(struct nvkm_fifo_chan *,
+ const struct nvkm_oclass *,
+ struct nvkm_object **);
+ int (*sclass)(struct nvkm_oclass *, int index);
+ } fifo;
+
+ const struct nvkm_object_func *cclass;
+ struct nvkm_sclass sclass[];
+};
-#define _nvkm_engine_dtor _nvkm_subdev_dtor
-#define _nvkm_engine_init _nvkm_subdev_init
-#define _nvkm_engine_fini _nvkm_subdev_fini
+int nvkm_engine_ctor(const struct nvkm_engine_func *, struct nvkm_device *,
+ int index, u32 pmc_enable, bool enable,
+ struct nvkm_engine *);
+int nvkm_engine_new_(const struct nvkm_engine_func *, struct nvkm_device *,
+ int index, u32 pmc_enable, bool enable,
+ struct nvkm_engine **);
+struct nvkm_engine *nvkm_engine_ref(struct nvkm_engine *);
+void nvkm_engine_unref(struct nvkm_engine **);
+void nvkm_engine_tile(struct nvkm_engine *, int region);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
index e76f76f115e9..40429a82f792 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
@@ -10,12 +10,11 @@ struct nvkm_enum {
};
const struct nvkm_enum *nvkm_enum_find(const struct nvkm_enum *, u32 value);
-const struct nvkm_enum *nvkm_enum_print(const struct nvkm_enum *, u32 value);
struct nvkm_bitfield {
u32 mask;
const char *name;
};
-void nvkm_bitfield_print(const struct nvkm_bitfield *, u32 value);
+void nvkm_snprintbf(char *, int, const struct nvkm_bitfield *, u32 value);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
index e0187e7abb6e..d4f56eafb073 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
@@ -1,64 +1,40 @@
#ifndef __NVKM_GPUOBJ_H__
#define __NVKM_GPUOBJ_H__
#include <core/object.h>
+#include <core/memory.h>
#include <core/mm.h>
struct nvkm_vma;
struct nvkm_vm;
#define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
-#define NVOBJ_FLAG_ZERO_FREE 0x00000002
#define NVOBJ_FLAG_HEAP 0x00000004
struct nvkm_gpuobj {
struct nvkm_object object;
- struct nvkm_object *parent;
+ const struct nvkm_gpuobj_func *func;
+ struct nvkm_gpuobj *parent;
+ struct nvkm_memory *memory;
struct nvkm_mm_node *node;
- struct nvkm_mm heap;
- u32 flags;
u64 addr;
u32 size;
-};
+ struct nvkm_mm heap;
-static inline struct nvkm_gpuobj *
-nv_gpuobj(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
- if (unlikely(!nv_iclass(obj, NV_GPUOBJ_CLASS)))
- nv_assert("BAD CAST -> NvGpuObj, %08x", nv_hclass(obj));
-#endif
- return obj;
-}
+ void __iomem *map;
+};
-#define nvkm_gpuobj_create(p,e,c,v,g,s,a,f,d) \
- nvkm_gpuobj_create_((p), (e), (c), (v), (g), (s), (a), (f), \
- sizeof(**d), (void **)d)
-#define nvkm_gpuobj_init(p) nvkm_object_init(&(p)->object)
-#define nvkm_gpuobj_fini(p,s) nvkm_object_fini(&(p)->object, (s))
-int nvkm_gpuobj_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, u32 pclass,
- struct nvkm_object *, u32 size, u32 align,
- u32 flags, int length, void **);
-void nvkm_gpuobj_destroy(struct nvkm_gpuobj *);
+struct nvkm_gpuobj_func {
+ void *(*acquire)(struct nvkm_gpuobj *);
+ void (*release)(struct nvkm_gpuobj *);
+ u32 (*rd32)(struct nvkm_gpuobj *, u32 offset);
+ void (*wr32)(struct nvkm_gpuobj *, u32 offset, u32 data);
+};
-int nvkm_gpuobj_new(struct nvkm_object *, struct nvkm_object *, u32 size,
- u32 align, u32 flags, struct nvkm_gpuobj **);
-int nvkm_gpuobj_dup(struct nvkm_object *, struct nvkm_gpuobj *,
- struct nvkm_gpuobj **);
-int nvkm_gpuobj_map(struct nvkm_gpuobj *, u32 acc, struct nvkm_vma *);
-int nvkm_gpuobj_map_vm(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
- struct nvkm_vma *);
+int nvkm_gpuobj_new(struct nvkm_device *, u32 size, int align, bool zero,
+ struct nvkm_gpuobj *parent, struct nvkm_gpuobj **);
+void nvkm_gpuobj_del(struct nvkm_gpuobj **);
+int nvkm_gpuobj_wrap(struct nvkm_memory *, struct nvkm_gpuobj **);
+int nvkm_gpuobj_map(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
+ struct nvkm_vma *);
void nvkm_gpuobj_unmap(struct nvkm_vma *);
-
-static inline void
-nvkm_gpuobj_ref(struct nvkm_gpuobj *obj, struct nvkm_gpuobj **ref)
-{
- nvkm_object_ref(&obj->object, (struct nvkm_object **)ref);
-}
-
-void _nvkm_gpuobj_dtor(struct nvkm_object *);
-int _nvkm_gpuobj_init(struct nvkm_object *);
-int _nvkm_gpuobj_fini(struct nvkm_object *, bool);
-u32 _nvkm_gpuobj_rd32(struct nvkm_object *, u64);
-void _nvkm_gpuobj_wr32(struct nvkm_object *, u64, u32);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h b/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h
deleted file mode 100644
index 67f384d0916c..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __NVKM_HANDLE_H__
-#define __NVKM_HANDLE_H__
-#include <core/os.h>
-struct nvkm_object;
-
-struct nvkm_handle {
- struct nvkm_namedb *namedb;
- struct list_head node;
-
- struct list_head head;
- struct list_head tree;
- u32 name;
- u32 priv;
-
- u8 route;
- u64 token;
-
- struct nvkm_handle *parent;
- struct nvkm_object *object;
-};
-
-int nvkm_handle_create(struct nvkm_object *, u32 parent, u32 handle,
- struct nvkm_object *, struct nvkm_handle **);
-void nvkm_handle_destroy(struct nvkm_handle *);
-int nvkm_handle_init(struct nvkm_handle *);
-int nvkm_handle_fini(struct nvkm_handle *, bool suspend);
-
-struct nvkm_object *nvkm_handle_ref(struct nvkm_object *, u32 name);
-
-struct nvkm_handle *nvkm_handle_get_class(struct nvkm_object *, u16);
-struct nvkm_handle *nvkm_handle_get_vinst(struct nvkm_object *, u64);
-struct nvkm_handle *nvkm_handle_get_cinst(struct nvkm_object *, u32);
-void nvkm_handle_put(struct nvkm_handle *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
new file mode 100644
index 000000000000..9363b839a9da
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -0,0 +1,53 @@
+#ifndef __NVKM_MEMORY_H__
+#define __NVKM_MEMORY_H__
+#include <core/os.h>
+struct nvkm_device;
+struct nvkm_vma;
+struct nvkm_vm;
+
+enum nvkm_memory_target {
+ NVKM_MEM_TARGET_INST,
+ NVKM_MEM_TARGET_VRAM,
+ NVKM_MEM_TARGET_HOST,
+};
+
+struct nvkm_memory {
+ const struct nvkm_memory_func *func;
+};
+
+struct nvkm_memory_func {
+ void *(*dtor)(struct nvkm_memory *);
+ enum nvkm_memory_target (*target)(struct nvkm_memory *);
+ u64 (*addr)(struct nvkm_memory *);
+ u64 (*size)(struct nvkm_memory *);
+ void (*boot)(struct nvkm_memory *, struct nvkm_vm *);
+ void __iomem *(*acquire)(struct nvkm_memory *);
+ void (*release)(struct nvkm_memory *);
+ u32 (*rd32)(struct nvkm_memory *, u64 offset);
+ void (*wr32)(struct nvkm_memory *, u64 offset, u32 data);
+ void (*map)(struct nvkm_memory *, struct nvkm_vma *, u64 offset);
+};
+
+void nvkm_memory_ctor(const struct nvkm_memory_func *, struct nvkm_memory *);
+int nvkm_memory_new(struct nvkm_device *, enum nvkm_memory_target,
+ u64 size, u32 align, bool zero, struct nvkm_memory **);
+void nvkm_memory_del(struct nvkm_memory **);
+#define nvkm_memory_target(p) (p)->func->target(p)
+#define nvkm_memory_addr(p) (p)->func->addr(p)
+#define nvkm_memory_size(p) (p)->func->size(p)
+#define nvkm_memory_boot(p,v) (p)->func->boot((p),(v))
+#define nvkm_memory_map(p,v,o) (p)->func->map((p),(v),(o))
+
+/* accessor macros - kmap()/done() must bracket use of the other accessor
+ * macros to guarantee correct behaviour across all chipsets
+ */
+#define nvkm_kmap(o) (o)->func->acquire(o)
+#define nvkm_ro32(o,a) (o)->func->rd32((o), (a))
+#define nvkm_wo32(o,a,d) (o)->func->wr32((o), (a), (d))
+#define nvkm_mo32(o,a,m,d) ({ \
+ u32 _addr = (a), _data = nvkm_ro32((o), _addr); \
+ nvkm_wo32((o), _addr, (_data & ~(m)) | (d)); \
+ _data; \
+})
+#define nvkm_done(o) (o)->func->release(o)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
index 096eb1a623ee..d92fd41e4056 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
@@ -27,7 +27,7 @@ struct nvkm_mm {
static inline bool
nvkm_mm_initialised(struct nvkm_mm *mm)
{
- return mm->block_size != 0;
+ return mm->heap_nodes;
}
int nvkm_mm_init(struct nvkm_mm *, u32 offset, u32 length, u32 block);
@@ -37,4 +37,5 @@ int nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
u32 size_min, u32 align, struct nvkm_mm_node **);
void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
+void nvkm_mm_dump(struct nvkm_mm *, const char *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h b/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h
deleted file mode 100644
index 4cfe16fcde9b..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef __NVKM_NAMEDB_H__
-#define __NVKM_NAMEDB_H__
-#include <core/parent.h>
-struct nvkm_handle;
-
-struct nvkm_namedb {
- struct nvkm_parent parent;
- rwlock_t lock;
- struct list_head list;
-};
-
-static inline struct nvkm_namedb *
-nv_namedb(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
- if (unlikely(!nv_iclass(obj, NV_NAMEDB_CLASS)))
- nv_assert("BAD CAST -> NvNameDB, %08x", nv_hclass(obj));
-#endif
- return obj;
-}
-
-#define nvkm_namedb_create(p,e,c,v,s,m,d) \
- nvkm_namedb_create_((p), (e), (c), (v), (s), (m), \
- sizeof(**d), (void **)d)
-#define nvkm_namedb_init(p) \
- nvkm_parent_init(&(p)->parent)
-#define nvkm_namedb_fini(p,s) \
- nvkm_parent_fini(&(p)->parent, (s))
-#define nvkm_namedb_destroy(p) \
- nvkm_parent_destroy(&(p)->parent)
-
-int nvkm_namedb_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, u32 pclass,
- struct nvkm_oclass *, u64 engcls,
- int size, void **);
-
-int _nvkm_namedb_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-#define _nvkm_namedb_dtor _nvkm_parent_dtor
-#define _nvkm_namedb_init _nvkm_parent_init
-#define _nvkm_namedb_fini _nvkm_parent_fini
-
-int nvkm_namedb_insert(struct nvkm_namedb *, u32 name, struct nvkm_object *,
- struct nvkm_handle *);
-void nvkm_namedb_remove(struct nvkm_handle *);
-
-struct nvkm_handle *nvkm_namedb_get(struct nvkm_namedb *, u32);
-struct nvkm_handle *nvkm_namedb_get_class(struct nvkm_namedb *, u16);
-struct nvkm_handle *nvkm_namedb_get_vinst(struct nvkm_namedb *, u64);
-struct nvkm_handle *nvkm_namedb_get_cinst(struct nvkm_namedb *, u32);
-void nvkm_namedb_put(struct nvkm_handle *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
index 6e3cd3908400..dcd048b91fac 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
@@ -1,203 +1,88 @@
#ifndef __NVKM_OBJECT_H__
#define __NVKM_OBJECT_H__
#include <core/os.h>
-#include <core/printk.h>
-
-#define NV_PARENT_CLASS 0x80000000
-#define NV_NAMEDB_CLASS 0x40000000
-#define NV_CLIENT_CLASS 0x20000000
-#define NV_SUBDEV_CLASS 0x10000000
-#define NV_ENGINE_CLASS 0x08000000
-#define NV_MEMOBJ_CLASS 0x04000000
-#define NV_GPUOBJ_CLASS 0x02000000
-#define NV_ENGCTX_CLASS 0x01000000
-#define NV_OBJECT_CLASS 0x0000ffff
+#include <core/debug.h>
+struct nvkm_event;
+struct nvkm_gpuobj;
+struct nvkm_oclass;
struct nvkm_object {
- struct nvkm_oclass *oclass;
- struct nvkm_object *parent;
+ const struct nvkm_object_func *func;
+ struct nvkm_client *client;
struct nvkm_engine *engine;
- atomic_t refcount;
- atomic_t usecount;
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-#define NVKM_OBJECT_MAGIC 0x75ef0bad
- struct list_head list;
- u32 _magic;
-#endif
-};
-
-static inline struct nvkm_object *
-nv_object(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
- if (likely(obj)) {
- struct nvkm_object *object = obj;
- if (unlikely(object->_magic != NVKM_OBJECT_MAGIC))
- nv_assert("BAD CAST -> NvObject, invalid magic");
- }
-#endif
- return obj;
-}
-
-#define nvkm_object_create(p,e,c,s,d) \
- nvkm_object_create_((p), (e), (c), (s), sizeof(**d), (void **)d)
-int nvkm_object_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, u32, int size, void **);
-void nvkm_object_destroy(struct nvkm_object *);
-int nvkm_object_init(struct nvkm_object *);
-int nvkm_object_fini(struct nvkm_object *, bool suspend);
-
-int _nvkm_object_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-
-extern struct nvkm_ofuncs nvkm_object_ofuncs;
-
-/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in
- * ".data". */
-struct nvkm_oclass {
+ s32 oclass;
u32 handle;
- struct nvkm_ofuncs * const ofuncs;
- struct nvkm_omthds * const omthds;
- struct lock_class_key lock_class_key;
-};
-
-#define nv_oclass(o) nv_object(o)->oclass
-#define nv_hclass(o) nv_oclass(o)->handle
-#define nv_iclass(o,i) (nv_hclass(o) & (i))
-#define nv_mclass(o) nv_iclass(o, NV_OBJECT_CLASS)
-static inline struct nvkm_object *
-nv_pclass(struct nvkm_object *parent, u32 oclass)
-{
- while (parent && !nv_iclass(parent, oclass))
- parent = parent->parent;
- return parent;
-}
+ struct list_head head;
+ struct list_head tree;
+ u8 route;
+ u64 token;
+ u64 object;
+ struct rb_node node;
+};
-struct nvkm_omthds {
- u32 start;
- u32 limit;
- int (*call)(struct nvkm_object *, u32, void *, u32);
+struct nvkm_object_func {
+ void *(*dtor)(struct nvkm_object *);
+ int (*init)(struct nvkm_object *);
+ int (*fini)(struct nvkm_object *, bool suspend);
+ int (*mthd)(struct nvkm_object *, u32 mthd, void *data, u32 size);
+ int (*ntfy)(struct nvkm_object *, u32 mthd, struct nvkm_event **);
+ int (*map)(struct nvkm_object *, u64 *addr, u32 *size);
+ int (*rd08)(struct nvkm_object *, u64 addr, u8 *data);
+ int (*rd16)(struct nvkm_object *, u64 addr, u16 *data);
+ int (*rd32)(struct nvkm_object *, u64 addr, u32 *data);
+ int (*wr08)(struct nvkm_object *, u64 addr, u8 data);
+ int (*wr16)(struct nvkm_object *, u64 addr, u16 data);
+ int (*wr32)(struct nvkm_object *, u64 addr, u32 data);
+ int (*bind)(struct nvkm_object *, struct nvkm_gpuobj *, int align,
+ struct nvkm_gpuobj **);
+ int (*sclass)(struct nvkm_object *, int index, struct nvkm_oclass *);
};
-struct nvkm_event;
-struct nvkm_ofuncs {
- int (*ctor)(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *data, u32 size,
+void nvkm_object_ctor(const struct nvkm_object_func *,
+ const struct nvkm_oclass *, struct nvkm_object *);
+int nvkm_object_new_(const struct nvkm_object_func *,
+ const struct nvkm_oclass *, void *data, u32 size,
struct nvkm_object **);
- void (*dtor)(struct nvkm_object *);
- int (*init)(struct nvkm_object *);
- int (*fini)(struct nvkm_object *, bool suspend);
- int (*mthd)(struct nvkm_object *, u32, void *, u32);
- int (*ntfy)(struct nvkm_object *, u32, struct nvkm_event **);
- int (* map)(struct nvkm_object *, u64 *, u32 *);
- u8 (*rd08)(struct nvkm_object *, u64 offset);
- u16 (*rd16)(struct nvkm_object *, u64 offset);
- u32 (*rd32)(struct nvkm_object *, u64 offset);
- void (*wr08)(struct nvkm_object *, u64 offset, u8 data);
- void (*wr16)(struct nvkm_object *, u64 offset, u16 data);
- void (*wr32)(struct nvkm_object *, u64 offset, u32 data);
+int nvkm_object_new(const struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **);
+void nvkm_object_del(struct nvkm_object **);
+void *nvkm_object_dtor(struct nvkm_object *);
+int nvkm_object_init(struct nvkm_object *);
+int nvkm_object_fini(struct nvkm_object *, bool suspend);
+int nvkm_object_mthd(struct nvkm_object *, u32 mthd, void *data, u32 size);
+int nvkm_object_ntfy(struct nvkm_object *, u32 mthd, struct nvkm_event **);
+int nvkm_object_map(struct nvkm_object *, u64 *addr, u32 *size);
+int nvkm_object_rd08(struct nvkm_object *, u64 addr, u8 *data);
+int nvkm_object_rd16(struct nvkm_object *, u64 addr, u16 *data);
+int nvkm_object_rd32(struct nvkm_object *, u64 addr, u32 *data);
+int nvkm_object_wr08(struct nvkm_object *, u64 addr, u8 data);
+int nvkm_object_wr16(struct nvkm_object *, u64 addr, u16 data);
+int nvkm_object_wr32(struct nvkm_object *, u64 addr, u32 data);
+int nvkm_object_bind(struct nvkm_object *, struct nvkm_gpuobj *, int align,
+ struct nvkm_gpuobj **);
+
+struct nvkm_sclass {
+ int minver;
+ int maxver;
+ s32 oclass;
+ const struct nvkm_object_func *func;
+ int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **);
};
-static inline struct nvkm_ofuncs *
-nv_ofuncs(void *obj)
-{
- return nv_oclass(obj)->ofuncs;
-}
-
-int nvkm_object_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-void nvkm_object_ref(struct nvkm_object *, struct nvkm_object **);
-int nvkm_object_inc(struct nvkm_object *);
-int nvkm_object_dec(struct nvkm_object *, bool suspend);
-void nvkm_object_debug(void);
-
-static inline int
-nv_exec(void *obj, u32 mthd, void *data, u32 size)
-{
- struct nvkm_omthds *method = nv_oclass(obj)->omthds;
-
- while (method && method->call) {
- if (mthd >= method->start && mthd <= method->limit)
- return method->call(obj, mthd, data, size);
- method++;
- }
-
- return -EINVAL;
-}
-
-static inline int
-nv_call(void *obj, u32 mthd, u32 data)
-{
- return nv_exec(obj, mthd, &data, sizeof(data));
-}
-
-static inline u8
-nv_ro08(void *obj, u64 addr)
-{
- u8 data = nv_ofuncs(obj)->rd08(obj, addr);
- nv_spam(obj, "nv_ro08 0x%08llx 0x%02x\n", addr, data);
- return data;
-}
-
-static inline u16
-nv_ro16(void *obj, u64 addr)
-{
- u16 data = nv_ofuncs(obj)->rd16(obj, addr);
- nv_spam(obj, "nv_ro16 0x%08llx 0x%04x\n", addr, data);
- return data;
-}
-
-static inline u32
-nv_ro32(void *obj, u64 addr)
-{
- u32 data = nv_ofuncs(obj)->rd32(obj, addr);
- nv_spam(obj, "nv_ro32 0x%08llx 0x%08x\n", addr, data);
- return data;
-}
-
-static inline void
-nv_wo08(void *obj, u64 addr, u8 data)
-{
- nv_spam(obj, "nv_wo08 0x%08llx 0x%02x\n", addr, data);
- nv_ofuncs(obj)->wr08(obj, addr, data);
-}
-
-static inline void
-nv_wo16(void *obj, u64 addr, u16 data)
-{
- nv_spam(obj, "nv_wo16 0x%08llx 0x%04x\n", addr, data);
- nv_ofuncs(obj)->wr16(obj, addr, data);
-}
-
-static inline void
-nv_wo32(void *obj, u64 addr, u32 data)
-{
- nv_spam(obj, "nv_wo32 0x%08llx 0x%08x\n", addr, data);
- nv_ofuncs(obj)->wr32(obj, addr, data);
-}
-
-static inline u32
-nv_mo32(void *obj, u64 addr, u32 mask, u32 data)
-{
- u32 temp = nv_ro32(obj, addr);
- nv_wo32(obj, addr, (temp & ~mask) | data);
- return temp;
-}
-
-static inline int
-nv_memcmp(void *obj, u32 addr, const char *str, u32 len)
-{
- unsigned char c1, c2;
-
- while (len--) {
- c1 = nv_ro08(obj, addr++);
- c2 = *(str++);
- if (c1 != c2)
- return c1 - c2;
- }
- return 0;
-}
+struct nvkm_oclass {
+ int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **);
+ struct nvkm_sclass base;
+ const void *priv;
+ const void *engn;
+ u32 handle;
+ u8 route;
+ u64 token;
+ u64 object;
+ struct nvkm_client *client;
+ struct nvkm_object *parent;
+ struct nvkm_engine *engine;
+};
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h b/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
new file mode 100644
index 000000000000..bd52236cc2f4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
@@ -0,0 +1,22 @@
+#ifndef __NVKM_OPROXY_H__
+#define __NVKM_OPROXY_H__
+#define nvkm_oproxy(p) container_of((p), struct nvkm_oproxy, base)
+#include <core/object.h>
+
+struct nvkm_oproxy {
+ const struct nvkm_oproxy_func *func;
+ struct nvkm_object base;
+ struct nvkm_object *object;
+};
+
+struct nvkm_oproxy_func {
+ void (*dtor[2])(struct nvkm_oproxy *);
+ int (*init[2])(struct nvkm_oproxy *);
+ int (*fini[2])(struct nvkm_oproxy *, bool suspend);
+};
+
+void nvkm_oproxy_ctor(const struct nvkm_oproxy_func *,
+ const struct nvkm_oclass *, struct nvkm_oproxy *);
+int nvkm_oproxy_new_(const struct nvkm_oproxy_func *,
+ const struct nvkm_oclass *, struct nvkm_oproxy **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/option.h b/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
index 532bfa8e3f72..80fdc146e816 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
@@ -4,6 +4,7 @@
const char *nvkm_stropt(const char *optstr, const char *opt, int *len);
bool nvkm_boolopt(const char *optstr, const char *opt, bool value);
+long nvkm_longopt(const char *optstr, const char *opt, long value);
int nvkm_dbgopt(const char *optstr, const char *sub);
/* compares unterminated string 'str' with zero-terminated string 'cmp' */
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h b/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h
deleted file mode 100644
index 837e4fe966a5..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef __NVKM_PARENT_H__
-#define __NVKM_PARENT_H__
-#include <core/object.h>
-
-struct nvkm_sclass {
- struct nvkm_sclass *sclass;
- struct nvkm_engine *engine;
- struct nvkm_oclass *oclass;
-};
-
-struct nvkm_parent {
- struct nvkm_object object;
-
- struct nvkm_sclass *sclass;
- u64 engine;
-
- int (*context_attach)(struct nvkm_object *, struct nvkm_object *);
- int (*context_detach)(struct nvkm_object *, bool suspend,
- struct nvkm_object *);
-
- int (*object_attach)(struct nvkm_object *parent,
- struct nvkm_object *object, u32 name);
- void (*object_detach)(struct nvkm_object *parent, int cookie);
-};
-
-static inline struct nvkm_parent *
-nv_parent(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
- if (unlikely(!(nv_iclass(obj, NV_PARENT_CLASS))))
- nv_assert("BAD CAST -> NvParent, %08x", nv_hclass(obj));
-#endif
- return obj;
-}
-
-#define nvkm_parent_create(p,e,c,v,s,m,d) \
- nvkm_parent_create_((p), (e), (c), (v), (s), (m), \
- sizeof(**d), (void **)d)
-#define nvkm_parent_init(p) \
- nvkm_object_init(&(p)->object)
-#define nvkm_parent_fini(p,s) \
- nvkm_object_fini(&(p)->object, (s))
-
-int nvkm_parent_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, u32 pclass,
- struct nvkm_oclass *, u64 engcls,
- int size, void **);
-void nvkm_parent_destroy(struct nvkm_parent *);
-
-void _nvkm_parent_dtor(struct nvkm_object *);
-#define _nvkm_parent_init nvkm_object_init
-#define _nvkm_parent_fini nvkm_object_fini
-
-int nvkm_parent_sclass(struct nvkm_object *, u16 handle,
- struct nvkm_object **pengine,
- struct nvkm_oclass **poclass);
-int nvkm_parent_lclass(struct nvkm_object *, u32 *, int);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
new file mode 100644
index 000000000000..78d41be20b8c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
@@ -0,0 +1,14 @@
+#ifndef __NVKM_DEVICE_PCI_H__
+#define __NVKM_DEVICE_PCI_H__
+#include <core/device.h>
+
+struct nvkm_device_pci {
+ struct nvkm_device device;
+ struct pci_dev *pdev;
+ bool suspend;
+};
+
+int nvkm_device_pci_new(struct pci_dev *, const char *cfg, const char *dbg,
+ bool detect, bool mmio, u64 subdev_mask,
+ struct nvkm_device **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h b/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h
deleted file mode 100644
index 83648177059f..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __NVKM_PRINTK_H__
-#define __NVKM_PRINTK_H__
-#include <core/os.h>
-#include <core/debug.h>
-struct nvkm_object;
-
-void __printf(3, 4)
-nv_printk_(struct nvkm_object *, int, const char *, ...);
-
-#define nv_printk(o,l,f,a...) do { \
- if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG) \
- nv_printk_(nv_object(o), NV_DBG_##l, f, ##a); \
-} while(0)
-
-#define nv_fatal(o,f,a...) nv_printk((o), FATAL, f, ##a)
-#define nv_error(o,f,a...) nv_printk((o), ERROR, f, ##a)
-#define nv_warn(o,f,a...) nv_printk((o), WARN, f, ##a)
-#define nv_info(o,f,a...) nv_printk((o), INFO, f, ##a)
-#define nv_debug(o,f,a...) nv_printk((o), DEBUG, f, ##a)
-#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
-#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
-#define nv_ioctl(o,f,a...) nv_trace(nvkm_client(o), "ioctl: "f, ##a)
-
-#define nv_assert(f,a...) do { \
- if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \
- nv_printk_(NULL, NV_DBG_FATAL, f "\n", ##a); \
- BUG_ON(1); \
-} while(0)
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
index cc132eaa10cc..5ee6298991e2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
@@ -2,19 +2,27 @@
#define __NVKM_RAMHT_H__
#include <core/gpuobj.h>
+struct nvkm_ramht_data {
+ struct nvkm_gpuobj *inst;
+ int chid;
+ u32 handle;
+};
+
struct nvkm_ramht {
- struct nvkm_gpuobj gpuobj;
+ struct nvkm_device *device;
+ struct nvkm_gpuobj *parent;
+ struct nvkm_gpuobj *gpuobj;
+ int size;
int bits;
+ struct nvkm_ramht_data data[];
};
-int nvkm_ramht_insert(struct nvkm_ramht *, int chid, u32 handle, u32 context);
+int nvkm_ramht_new(struct nvkm_device *, u32 size, u32 align,
+ struct nvkm_gpuobj *, struct nvkm_ramht **);
+void nvkm_ramht_del(struct nvkm_ramht **);
+int nvkm_ramht_insert(struct nvkm_ramht *, struct nvkm_object *,
+ int chid, int addr, u32 handle, u32 context);
void nvkm_ramht_remove(struct nvkm_ramht *, int cookie);
-int nvkm_ramht_new(struct nvkm_object *, struct nvkm_object *, u32 size,
- u32 align, struct nvkm_ramht **);
-
-static inline void
-nvkm_ramht_ref(struct nvkm_ramht *obj, struct nvkm_ramht **ref)
-{
- nvkm_gpuobj_ref(&obj->gpuobj, (struct nvkm_gpuobj **)ref);
-}
+struct nvkm_gpuobj *
+nvkm_ramht_search(struct nvkm_ramht *, int chid, u32 handle);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 6fdc39116aac..3b5dc9c63069 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -1,119 +1,50 @@
#ifndef __NVKM_SUBDEV_H__
#define __NVKM_SUBDEV_H__
-#include <core/object.h>
-#include <core/devidx.h>
-
-#define NV_SUBDEV_(sub,var) (NV_SUBDEV_CLASS | ((var) << 8) | (sub))
-#define NV_SUBDEV(name,var) NV_SUBDEV_(NVDEV_SUBDEV_##name, (var))
+#include <core/device.h>
struct nvkm_subdev {
- struct nvkm_object object;
+ const struct nvkm_subdev_func *func;
+ struct nvkm_device *device;
+ enum nvkm_devidx index;
+ u32 pmc_enable;
struct mutex mutex;
- const char *name;
- void __iomem *mmio;
u32 debug;
- u32 unit;
- void (*intr)(struct nvkm_subdev *);
+ bool oneinit;
};
-static inline struct nvkm_subdev *
-nv_subdev(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
- if (unlikely(!nv_iclass(obj, NV_SUBDEV_CLASS)))
- nv_assert("BAD CAST -> NvSubDev, %08x", nv_hclass(obj));
-#endif
- return obj;
-}
-
-static inline int
-nv_subidx(struct nvkm_subdev *subdev)
-{
- return nv_hclass(subdev) & 0xff;
-}
-
-struct nvkm_subdev *nvkm_subdev(void *obj, int idx);
-
-#define nvkm_subdev_create(p,e,o,v,s,f,d) \
- nvkm_subdev_create_((p), (e), (o), (v), (s), (f), \
- sizeof(**d),(void **)d)
+struct nvkm_subdev_func {
+ void *(*dtor)(struct nvkm_subdev *);
+ int (*preinit)(struct nvkm_subdev *);
+ int (*oneinit)(struct nvkm_subdev *);
+ int (*init)(struct nvkm_subdev *);
+ int (*fini)(struct nvkm_subdev *, bool suspend);
+ void (*intr)(struct nvkm_subdev *);
+};
-int nvkm_subdev_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, u32 pclass,
- const char *sname, const char *fname,
- int size, void **);
-void nvkm_subdev_destroy(struct nvkm_subdev *);
+extern const char *nvkm_subdev_name[NVKM_SUBDEV_NR];
+void nvkm_subdev_ctor(const struct nvkm_subdev_func *, struct nvkm_device *,
+ int index, u32 pmc_enable, struct nvkm_subdev *);
+void nvkm_subdev_del(struct nvkm_subdev **);
+int nvkm_subdev_preinit(struct nvkm_subdev *);
int nvkm_subdev_init(struct nvkm_subdev *);
int nvkm_subdev_fini(struct nvkm_subdev *, bool suspend);
-void nvkm_subdev_reset(struct nvkm_object *);
-
-void _nvkm_subdev_dtor(struct nvkm_object *);
-int _nvkm_subdev_init(struct nvkm_object *);
-int _nvkm_subdev_fini(struct nvkm_object *, bool suspend);
-
-#define s_printk(s,l,f,a...) do { \
- if ((s)->debug >= OS_DBG_##l) { \
- nv_printk((s)->base.parent, (s)->name, l, f, ##a); \
+void nvkm_subdev_intr(struct nvkm_subdev *);
+
+/* subdev logging */
+#define nvkm_printk_(s,l,p,f,a...) do { \
+ struct nvkm_subdev *_subdev = (s); \
+ if (_subdev->debug >= (l)) { \
+ dev_##p(_subdev->device->dev, "%s: "f, \
+ nvkm_subdev_name[_subdev->index], ##a); \
} \
} while(0)
-
-static inline u8
-nv_rd08(void *obj, u32 addr)
-{
- struct nvkm_subdev *subdev = nv_subdev(obj);
- u8 data = ioread8(subdev->mmio + addr);
- nv_spam(subdev, "nv_rd08 0x%06x 0x%02x\n", addr, data);
- return data;
-}
-
-static inline u16
-nv_rd16(void *obj, u32 addr)
-{
- struct nvkm_subdev *subdev = nv_subdev(obj);
- u16 data = ioread16_native(subdev->mmio + addr);
- nv_spam(subdev, "nv_rd16 0x%06x 0x%04x\n", addr, data);
- return data;
-}
-
-static inline u32
-nv_rd32(void *obj, u32 addr)
-{
- struct nvkm_subdev *subdev = nv_subdev(obj);
- u32 data = ioread32_native(subdev->mmio + addr);
- nv_spam(subdev, "nv_rd32 0x%06x 0x%08x\n", addr, data);
- return data;
-}
-
-static inline void
-nv_wr08(void *obj, u32 addr, u8 data)
-{
- struct nvkm_subdev *subdev = nv_subdev(obj);
- nv_spam(subdev, "nv_wr08 0x%06x 0x%02x\n", addr, data);
- iowrite8(data, subdev->mmio + addr);
-}
-
-static inline void
-nv_wr16(void *obj, u32 addr, u16 data)
-{
- struct nvkm_subdev *subdev = nv_subdev(obj);
- nv_spam(subdev, "nv_wr16 0x%06x 0x%04x\n", addr, data);
- iowrite16_native(data, subdev->mmio + addr);
-}
-
-static inline void
-nv_wr32(void *obj, u32 addr, u32 data)
-{
- struct nvkm_subdev *subdev = nv_subdev(obj);
- nv_spam(subdev, "nv_wr32 0x%06x 0x%08x\n", addr, data);
- iowrite32_native(data, subdev->mmio + addr);
-}
-
-static inline u32
-nv_mask(void *obj, u32 addr, u32 mask, u32 data)
-{
- u32 temp = nv_rd32(obj, addr);
- nv_wr32(obj, addr, (temp & ~mask) | data);
- return temp;
-}
+#define nvkm_printk(s,l,p,f,a...) nvkm_printk_((s), NV_DBG_##l, p, f, ##a)
+#define nvkm_fatal(s,f,a...) nvkm_printk((s), FATAL, crit, f, ##a)
+#define nvkm_error(s,f,a...) nvkm_printk((s), ERROR, err, f, ##a)
+#define nvkm_warn(s,f,a...) nvkm_printk((s), WARN, notice, f, ##a)
+#define nvkm_info(s,f,a...) nvkm_printk((s), INFO, info, f, ##a)
+#define nvkm_debug(s,f,a...) nvkm_printk((s), DEBUG, info, f, ##a)
+#define nvkm_trace(s,f,a...) nvkm_printk((s), TRACE, info, f, ##a)
+#define nvkm_spam(s,f,a...) nvkm_printk((s), SPAM, dbg, f, ##a)
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
new file mode 100644
index 000000000000..5aa2480da25f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -0,0 +1,35 @@
+#ifndef __NVKM_DEVICE_TEGRA_H__
+#define __NVKM_DEVICE_TEGRA_H__
+#include <core/device.h>
+#include <core/mm.h>
+
+struct nvkm_device_tegra {
+ struct nvkm_device device;
+ struct platform_device *pdev;
+ int irq;
+
+ struct reset_control *rst;
+ struct clk *clk;
+ struct clk *clk_pwr;
+
+ struct regulator *vdd;
+
+ struct {
+ /*
+ * Protects accesses to mm from subsystems
+ */
+ struct mutex mutex;
+
+ struct nvkm_mm mm;
+ struct iommu_domain *domain;
+ unsigned long pgshift;
+ } iommu;
+
+ int gpu_speedo;
+};
+
+int nvkm_device_tegra_new(struct platform_device *,
+ const char *cfg, const char *dbg,
+ bool detect, bool mmio, u64 subdev_mask,
+ struct nvkm_device **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
index e489beef2b92..904820558fc0 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
@@ -1,5 +1,5 @@
#ifndef __NVKM_BSP_H__
#define __NVKM_BSP_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g84_bsp_oclass;
+#include <engine/xtensa.h>
+int g84_bsp_new(struct nvkm_device *, int, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index e832f729e1b4..e2e22cd5305b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -1,16 +1,9 @@
#ifndef __NVKM_CE_H__
#define __NVKM_CE_H__
-#include <core/engine.h>
+#include <engine/falcon.h>
-void gt215_ce_intr(struct nvkm_subdev *);
-
-extern struct nvkm_oclass gt215_ce_oclass;
-extern struct nvkm_oclass gf100_ce0_oclass;
-extern struct nvkm_oclass gf100_ce1_oclass;
-extern struct nvkm_oclass gk104_ce0_oclass;
-extern struct nvkm_oclass gk104_ce1_oclass;
-extern struct nvkm_oclass gk104_ce2_oclass;
-extern struct nvkm_oclass gm204_ce0_oclass;
-extern struct nvkm_oclass gm204_ce1_oclass;
-extern struct nvkm_oclass gm204_ce2_oclass;
+int gt215_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gf100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gm204_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
index 57c29e91bad5..03fa57a7c30a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
@@ -1,5 +1,5 @@
#ifndef __NVKM_CIPHER_H__
#define __NVKM_CIPHER_H__
#include <core/engine.h>
-extern struct nvkm_oclass g84_cipher_oclass;
+int g84_cipher_new(struct nvkm_device *, int, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h
deleted file mode 100644
index 5d4805e67e76..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef __NOUVEAU_SUBDEV_DEVICE_H__
-#define __NOUVEAU_SUBDEV_DEVICE_H__
-
-#include <core/device.h>
-
-struct platform_device;
-
-enum nv_bus_type {
- NOUVEAU_BUS_PCI,
- NOUVEAU_BUS_PLATFORM,
-};
-
-#define nouveau_device_create(p,t,n,s,c,d,u) \
- nouveau_device_create_((void *)(p), (t), (n), (s), (c), (d), \
- sizeof(**u), (void **)u)
-
-int nouveau_device_create_(void *, enum nv_bus_type type, u64 name,
- const char *sname, const char *cfg, const char *dbg,
- int, void **);
-
-int nv04_identify(struct nouveau_device *);
-int nv10_identify(struct nouveau_device *);
-int nv20_identify(struct nouveau_device *);
-int nv30_identify(struct nouveau_device *);
-int nv40_identify(struct nouveau_device *);
-int nv50_identify(struct nouveau_device *);
-int nvc0_identify(struct nouveau_device *);
-int nve0_identify(struct nouveau_device *);
-int gm100_identify(struct nouveau_device *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index a5e1ed81312f..efc74d03346b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -1,32 +1,35 @@
#ifndef __NVKM_DISP_H__
#define __NVKM_DISP_H__
+#define nvkm_disp(p) container_of((p), struct nvkm_disp, engine)
#include <core/engine.h>
#include <core/event.h>
struct nvkm_disp {
- struct nvkm_engine base;
+ const struct nvkm_disp_func *func;
+ struct nvkm_engine engine;
+
+ struct nvkm_oproxy *client;
struct list_head outp;
+ struct list_head conn;
struct nvkm_event hpd;
struct nvkm_event vblank;
-};
-static inline struct nvkm_disp *
-nvkm_disp(void *obj)
-{
- return (void *)nvkm_engine(obj, NVDEV_ENGINE_DISP);
-}
+ struct {
+ int nr;
+ } head;
+};
-extern struct nvkm_oclass *nv04_disp_oclass;
-extern struct nvkm_oclass *nv50_disp_oclass;
-extern struct nvkm_oclass *g84_disp_oclass;
-extern struct nvkm_oclass *gt200_disp_oclass;
-extern struct nvkm_oclass *g94_disp_oclass;
-extern struct nvkm_oclass *gt215_disp_oclass;
-extern struct nvkm_oclass *gf110_disp_oclass;
-extern struct nvkm_oclass *gk104_disp_oclass;
-extern struct nvkm_oclass *gk110_disp_oclass;
-extern struct nvkm_oclass *gm107_disp_oclass;
-extern struct nvkm_oclass *gm204_disp_oclass;
+int nv04_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int nv50_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int g84_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gt200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int g94_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gt215_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gf119_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gk104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gm204_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
new file mode 100644
index 000000000000..114bfb737a81
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
@@ -0,0 +1,32 @@
+#ifndef __NVKM_DMA_H__
+#define __NVKM_DMA_H__
+#include <core/engine.h>
+struct nvkm_client;
+
+struct nvkm_dmaobj {
+ const struct nvkm_dmaobj_func *func;
+ struct nvkm_dma *dma;
+
+ struct nvkm_object object;
+ u32 target;
+ u32 access;
+ u64 start;
+ u64 limit;
+
+ struct rb_node rb;
+ u64 handle; /*XXX HANDLE MERGE */
+};
+
+struct nvkm_dma {
+ const struct nvkm_dma_func *func;
+ struct nvkm_engine engine;
+};
+
+struct nvkm_dmaobj *
+nvkm_dma_search(struct nvkm_dma *, struct nvkm_client *, u64 object);
+
+int nv04_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
+int nv50_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
+int gf100_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
+int gf119_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h
deleted file mode 100644
index c4fce8afcf83..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef __NVKM_DMAOBJ_H__
-#define __NVKM_DMAOBJ_H__
-#include <core/engine.h>
-struct nvkm_gpuobj;
-
-struct nvkm_dmaobj {
- struct nvkm_object base;
- u32 target;
- u32 access;
- u64 start;
- u64 limit;
-};
-
-struct nvkm_dmaeng {
- struct nvkm_engine base;
-
- /* creates a "physical" dma object from a struct nvkm_dmaobj */
- int (*bind)(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
- struct nvkm_gpuobj **);
-};
-
-extern struct nvkm_oclass *nv04_dmaeng_oclass;
-extern struct nvkm_oclass *nv50_dmaeng_oclass;
-extern struct nvkm_oclass *gf100_dmaeng_oclass;
-extern struct nvkm_oclass *gf110_dmaeng_oclass;
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index bd38cf9130fc..81c0bc66a9f8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -1,41 +1,18 @@
#ifndef __NVKM_FALCON_H__
#define __NVKM_FALCON_H__
-#include <core/engctx.h>
-
-struct nvkm_falcon_chan {
- struct nvkm_engctx base;
-};
-
-#define nvkm_falcon_context_create(p,e,c,g,s,a,f,d) \
- nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nvkm_falcon_context_destroy(d) \
- nvkm_engctx_destroy(&(d)->base)
-#define nvkm_falcon_context_init(d) \
- nvkm_engctx_init(&(d)->base)
-#define nvkm_falcon_context_fini(d,s) \
- nvkm_engctx_fini(&(d)->base, (s))
-
-#define _nvkm_falcon_context_ctor _nvkm_engctx_ctor
-#define _nvkm_falcon_context_dtor _nvkm_engctx_dtor
-#define _nvkm_falcon_context_init _nvkm_engctx_init
-#define _nvkm_falcon_context_fini _nvkm_engctx_fini
-#define _nvkm_falcon_context_rd32 _nvkm_engctx_rd32
-#define _nvkm_falcon_context_wr32 _nvkm_engctx_wr32
-
-struct nvkm_falcon_data {
- bool external;
-};
-
+#define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
#include <core/engine.h>
+struct nvkm_fifo_chan;
struct nvkm_falcon {
- struct nvkm_engine base;
+ const struct nvkm_falcon_func *func;
+ struct nvkm_engine engine;
u32 addr;
u8 version;
u8 secret;
- struct nvkm_gpuobj *core;
+ struct nvkm_memory *core;
bool external;
struct {
@@ -51,31 +28,21 @@ struct nvkm_falcon {
} data;
};
-#define nv_falcon(priv) (&(priv)->base)
-
-#define nvkm_falcon_create(p,e,c,b,d,i,f,r) \
- nvkm_falcon_create_((p), (e), (c), (b), (d), (i), (f), \
- sizeof(**r),(void **)r)
-#define nvkm_falcon_destroy(p) \
- nvkm_engine_destroy(&(p)->base)
-#define nvkm_falcon_init(p) ({ \
- struct nvkm_falcon *falcon = (p); \
- _nvkm_falcon_init(nv_object(falcon)); \
-})
-#define nvkm_falcon_fini(p,s) ({ \
- struct nvkm_falcon *falcon = (p); \
- _nvkm_falcon_fini(nv_object(falcon), (s)); \
-})
-
-int nvkm_falcon_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, u32, bool, const char *,
- const char *, int, void **);
+int nvkm_falcon_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
+ int index, bool enable, u32 addr, struct nvkm_engine **);
-void nvkm_falcon_intr(struct nvkm_subdev *subdev);
-
-#define _nvkm_falcon_dtor _nvkm_engine_dtor
-int _nvkm_falcon_init(struct nvkm_object *);
-int _nvkm_falcon_fini(struct nvkm_object *, bool);
-u32 _nvkm_falcon_rd32(struct nvkm_object *, u64);
-void _nvkm_falcon_wr32(struct nvkm_object *, u64, u32);
+struct nvkm_falcon_func {
+ struct {
+ u32 *data;
+ u32 size;
+ } code;
+ struct {
+ u32 *data;
+ u32 size;
+ } data;
+ u32 pmc_enable;
+ void (*init)(struct nvkm_falcon *);
+ void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *);
+ struct nvkm_sclass sclass[];
+};
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 97cdeab8e44c..9e6644955d19 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -1,127 +1,67 @@
#ifndef __NVKM_FIFO_H__
#define __NVKM_FIFO_H__
-#include <core/namedb.h>
+#include <core/engine.h>
+#include <core/event.h>
+
+#define NVKM_FIFO_CHID_NR 4096
+
+struct nvkm_fifo_engn {
+ struct nvkm_object *object;
+ int refcount;
+ int usecount;
+};
struct nvkm_fifo_chan {
- struct nvkm_namedb namedb;
- struct nvkm_dmaobj *pushdma;
- struct nvkm_gpuobj *pushgpu;
+ const struct nvkm_fifo_chan_func *func;
+ struct nvkm_fifo *fifo;
+ u64 engines;
+ struct nvkm_object object;
+
+ struct list_head head;
+ u16 chid;
+ struct nvkm_gpuobj *inst;
+ struct nvkm_gpuobj *push;
+ struct nvkm_vm *vm;
void __iomem *user;
u64 addr;
u32 size;
- u16 chid;
- atomic_t refcnt; /* NV04_NVSW_SET_REF */
-};
-
-static inline struct nvkm_fifo_chan *
-nvkm_fifo_chan(void *obj)
-{
- return (void *)nv_namedb(obj);
-}
-
-#define nvkm_fifo_channel_create(p,e,c,b,a,s,n,m,d) \
- nvkm_fifo_channel_create_((p), (e), (c), (b), (a), (s), (n), \
- (m), sizeof(**d), (void **)d)
-#define nvkm_fifo_channel_init(p) \
- nvkm_namedb_init(&(p)->namedb)
-#define nvkm_fifo_channel_fini(p,s) \
- nvkm_namedb_fini(&(p)->namedb, (s))
-
-int nvkm_fifo_channel_create_(struct nvkm_object *,
- struct nvkm_object *,
- struct nvkm_oclass *,
- int bar, u32 addr, u32 size, u32 push,
- u64 engmask, int len, void **);
-void nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *);
-#define _nvkm_fifo_channel_init _nvkm_namedb_init
-#define _nvkm_fifo_channel_fini _nvkm_namedb_fini
-
-void _nvkm_fifo_channel_dtor(struct nvkm_object *);
-int _nvkm_fifo_channel_map(struct nvkm_object *, u64 *, u32 *);
-u32 _nvkm_fifo_channel_rd32(struct nvkm_object *, u64);
-void _nvkm_fifo_channel_wr32(struct nvkm_object *, u64, u32);
-int _nvkm_fifo_channel_ntfy(struct nvkm_object *, u32, struct nvkm_event **);
-
-#include <core/gpuobj.h>
-
-struct nvkm_fifo_base {
- struct nvkm_gpuobj gpuobj;
+ struct nvkm_fifo_engn engn[NVKM_SUBDEV_NR];
};
-#define nvkm_fifo_context_create(p,e,c,g,s,a,f,d) \
- nvkm_gpuobj_create((p), (e), (c), 0, (g), (s), (a), (f), (d))
-#define nvkm_fifo_context_destroy(p) \
- nvkm_gpuobj_destroy(&(p)->gpuobj)
-#define nvkm_fifo_context_init(p) \
- nvkm_gpuobj_init(&(p)->gpuobj)
-#define nvkm_fifo_context_fini(p,s) \
- nvkm_gpuobj_fini(&(p)->gpuobj, (s))
-
-#define _nvkm_fifo_context_dtor _nvkm_gpuobj_dtor
-#define _nvkm_fifo_context_init _nvkm_gpuobj_init
-#define _nvkm_fifo_context_fini _nvkm_gpuobj_fini
-#define _nvkm_fifo_context_rd32 _nvkm_gpuobj_rd32
-#define _nvkm_fifo_context_wr32 _nvkm_gpuobj_wr32
-
-#include <core/engine.h>
-#include <core/event.h>
-
struct nvkm_fifo {
- struct nvkm_engine base;
+ const struct nvkm_fifo_func *func;
+ struct nvkm_engine engine;
- struct nvkm_event cevent; /* channel creation event */
- struct nvkm_event uevent; /* async user trigger */
-
- struct nvkm_object **channel;
+ DECLARE_BITMAP(mask, NVKM_FIFO_CHID_NR);
+ int nr;
+ struct list_head chan;
spinlock_t lock;
- u16 min;
- u16 max;
- int (*chid)(struct nvkm_fifo *, struct nvkm_object *);
- void (*pause)(struct nvkm_fifo *, unsigned long *);
- void (*start)(struct nvkm_fifo *, unsigned long *);
+ struct nvkm_event uevent; /* async user trigger */
+ struct nvkm_event cevent; /* channel creation event */
};
-static inline struct nvkm_fifo *
-nvkm_fifo(void *obj)
-{
- return (void *)nvkm_engine(obj, NVDEV_ENGINE_FIFO);
-}
-
-#define nvkm_fifo_create(o,e,c,fc,lc,d) \
- nvkm_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d)
-#define nvkm_fifo_init(p) \
- nvkm_engine_init(&(p)->base)
-#define nvkm_fifo_fini(p,s) \
- nvkm_engine_fini(&(p)->base, (s))
-
-int nvkm_fifo_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, int min, int max,
- int size, void **);
-void nvkm_fifo_destroy(struct nvkm_fifo *);
-const char *
-nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid);
-
-#define _nvkm_fifo_init _nvkm_engine_init
-#define _nvkm_fifo_fini _nvkm_engine_fini
-
-extern struct nvkm_oclass *nv04_fifo_oclass;
-extern struct nvkm_oclass *nv10_fifo_oclass;
-extern struct nvkm_oclass *nv17_fifo_oclass;
-extern struct nvkm_oclass *nv40_fifo_oclass;
-extern struct nvkm_oclass *nv50_fifo_oclass;
-extern struct nvkm_oclass *g84_fifo_oclass;
-extern struct nvkm_oclass *gf100_fifo_oclass;
-extern struct nvkm_oclass *gk104_fifo_oclass;
-extern struct nvkm_oclass *gk20a_fifo_oclass;
-extern struct nvkm_oclass *gk208_fifo_oclass;
-extern struct nvkm_oclass *gm204_fifo_oclass;
-
-int nvkm_fifo_uevent_ctor(struct nvkm_object *, void *, u32,
- struct nvkm_notify *);
-void nvkm_fifo_uevent(struct nvkm_fifo *);
-
-void nv04_fifo_intr(struct nvkm_subdev *);
-int nv04_fifo_context_attach(struct nvkm_object *, struct nvkm_object *);
+void nvkm_fifo_pause(struct nvkm_fifo *, unsigned long *);
+void nvkm_fifo_start(struct nvkm_fifo *, unsigned long *);
+
+void nvkm_fifo_chan_put(struct nvkm_fifo *, unsigned long flags,
+ struct nvkm_fifo_chan **);
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_inst(struct nvkm_fifo *, u64 inst, unsigned long *flags);
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_chid(struct nvkm_fifo *, int chid, unsigned long *flags);
+
+int nv04_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int nv10_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int nv17_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int nv40_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int nv50_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int g84_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gf100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gk104_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gk208_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gk20a_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gm204_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 7cbe20280760..f126e54d2e30 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -1,88 +1,46 @@
#ifndef __NVKM_GR_H__
#define __NVKM_GR_H__
-#include <core/engctx.h>
-
-struct nvkm_gr_chan {
- struct nvkm_engctx base;
-};
-
-#define nvkm_gr_context_create(p,e,c,g,s,a,f,d) \
- nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nvkm_gr_context_destroy(d) \
- nvkm_engctx_destroy(&(d)->base)
-#define nvkm_gr_context_init(d) \
- nvkm_engctx_init(&(d)->base)
-#define nvkm_gr_context_fini(d,s) \
- nvkm_engctx_fini(&(d)->base, (s))
-
-#define _nvkm_gr_context_dtor _nvkm_engctx_dtor
-#define _nvkm_gr_context_init _nvkm_engctx_init
-#define _nvkm_gr_context_fini _nvkm_engctx_fini
-#define _nvkm_gr_context_rd32 _nvkm_engctx_rd32
-#define _nvkm_gr_context_wr32 _nvkm_engctx_wr32
-
#include <core/engine.h>
struct nvkm_gr {
- struct nvkm_engine base;
-
- /* Returns chipset-specific counts of units packed into an u64.
- */
- u64 (*units)(struct nvkm_gr *);
+ const struct nvkm_gr_func *func;
+ struct nvkm_engine engine;
};
-static inline struct nvkm_gr *
-nvkm_gr(void *obj)
-{
- return (void *)nvkm_engine(obj, NVDEV_ENGINE_GR);
-}
-
-#define nvkm_gr_create(p,e,c,y,d) \
- nvkm_engine_create((p), (e), (c), (y), "PGRAPH", "graphics", (d))
-#define nvkm_gr_destroy(d) \
- nvkm_engine_destroy(&(d)->base)
-#define nvkm_gr_init(d) \
- nvkm_engine_init(&(d)->base)
-#define nvkm_gr_fini(d,s) \
- nvkm_engine_fini(&(d)->base, (s))
-
-#define _nvkm_gr_dtor _nvkm_engine_dtor
-#define _nvkm_gr_init _nvkm_engine_init
-#define _nvkm_gr_fini _nvkm_engine_fini
-
-extern struct nvkm_oclass nv04_gr_oclass;
-extern struct nvkm_oclass nv10_gr_oclass;
-extern struct nvkm_oclass nv20_gr_oclass;
-extern struct nvkm_oclass nv25_gr_oclass;
-extern struct nvkm_oclass nv2a_gr_oclass;
-extern struct nvkm_oclass nv30_gr_oclass;
-extern struct nvkm_oclass nv34_gr_oclass;
-extern struct nvkm_oclass nv35_gr_oclass;
-extern struct nvkm_oclass nv40_gr_oclass;
-extern struct nvkm_oclass nv50_gr_oclass;
-extern struct nvkm_oclass *gf100_gr_oclass;
-extern struct nvkm_oclass *gf108_gr_oclass;
-extern struct nvkm_oclass *gf104_gr_oclass;
-extern struct nvkm_oclass *gf110_gr_oclass;
-extern struct nvkm_oclass *gf117_gr_oclass;
-extern struct nvkm_oclass *gf119_gr_oclass;
-extern struct nvkm_oclass *gk104_gr_oclass;
-extern struct nvkm_oclass *gk20a_gr_oclass;
-extern struct nvkm_oclass *gk110_gr_oclass;
-extern struct nvkm_oclass *gk110b_gr_oclass;
-extern struct nvkm_oclass *gk208_gr_oclass;
-extern struct nvkm_oclass *gm107_gr_oclass;
-extern struct nvkm_oclass *gm204_gr_oclass;
-extern struct nvkm_oclass *gm206_gr_oclass;
-
-#include <core/enum.h>
-
-extern const struct nvkm_bitfield nv04_gr_nsource[];
-extern struct nvkm_ofuncs nv04_gr_ofuncs;
-bool nv04_gr_idle(void *obj);
-
-extern const struct nvkm_bitfield nv10_gr_intr_name[];
-extern const struct nvkm_bitfield nv10_gr_nstatus[];
-
-extern const struct nvkm_enum nv50_data_error_names[];
+u64 nvkm_gr_units(struct nvkm_gr *);
+int nvkm_gr_tlb_flush(struct nvkm_gr *);
+
+int nv04_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv10_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv15_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv17_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv20_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv25_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv2a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv30_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv34_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv35_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv40_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv44_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv50_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int g84_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gt200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int mcp79_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gt215_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int mcp89_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf108_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf110_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf117_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf119_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gk104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gk110_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gk110b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gk208_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gk20a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gm204_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gm206_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
index 4e500b398064..257738eff9f6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
@@ -1,62 +1,9 @@
#ifndef __NVKM_MPEG_H__
#define __NVKM_MPEG_H__
-#include <core/engctx.h>
-
-struct nvkm_mpeg_chan {
- struct nvkm_engctx base;
-};
-
-#define nvkm_mpeg_context_create(p,e,c,g,s,a,f,d) \
- nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nvkm_mpeg_context_destroy(d) \
- nvkm_engctx_destroy(&(d)->base)
-#define nvkm_mpeg_context_init(d) \
- nvkm_engctx_init(&(d)->base)
-#define nvkm_mpeg_context_fini(d,s) \
- nvkm_engctx_fini(&(d)->base, (s))
-
-#define _nvkm_mpeg_context_dtor _nvkm_engctx_dtor
-#define _nvkm_mpeg_context_init _nvkm_engctx_init
-#define _nvkm_mpeg_context_fini _nvkm_engctx_fini
-#define _nvkm_mpeg_context_rd32 _nvkm_engctx_rd32
-#define _nvkm_mpeg_context_wr32 _nvkm_engctx_wr32
-
#include <core/engine.h>
-
-struct nvkm_mpeg {
- struct nvkm_engine base;
-};
-
-#define nvkm_mpeg_create(p,e,c,d) \
- nvkm_engine_create((p), (e), (c), true, "PMPEG", "mpeg", (d))
-#define nvkm_mpeg_destroy(d) \
- nvkm_engine_destroy(&(d)->base)
-#define nvkm_mpeg_init(d) \
- nvkm_engine_init(&(d)->base)
-#define nvkm_mpeg_fini(d,s) \
- nvkm_engine_fini(&(d)->base, (s))
-
-#define _nvkm_mpeg_dtor _nvkm_engine_dtor
-#define _nvkm_mpeg_init _nvkm_engine_init
-#define _nvkm_mpeg_fini _nvkm_engine_fini
-
-extern struct nvkm_oclass nv31_mpeg_oclass;
-extern struct nvkm_oclass nv40_mpeg_oclass;
-extern struct nvkm_oclass nv44_mpeg_oclass;
-extern struct nvkm_oclass nv50_mpeg_oclass;
-extern struct nvkm_oclass g84_mpeg_oclass;
-extern struct nvkm_ofuncs nv31_mpeg_ofuncs;
-extern struct nvkm_oclass nv31_mpeg_cclass;
-extern struct nvkm_oclass nv31_mpeg_sclass[];
-extern struct nvkm_oclass nv40_mpeg_sclass[];
-void nv31_mpeg_intr(struct nvkm_subdev *);
-void nv31_mpeg_tile_prog(struct nvkm_engine *, int);
-int nv31_mpeg_init(struct nvkm_object *);
-
-extern struct nvkm_ofuncs nv50_mpeg_ofuncs;
-int nv50_mpeg_context_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-void nv50_mpeg_intr(struct nvkm_subdev *);
-int nv50_mpeg_init(struct nvkm_object *);
+int nv31_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
+int nv40_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
+int nv44_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
+int nv50_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
+int g84_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
index 54b7672eed9c..08516ca82e04 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
@@ -1,7 +1,8 @@
#ifndef __NVKM_MSPDEC_H__
#define __NVKM_MSPDEC_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g98_mspdec_oclass;
-extern struct nvkm_oclass gf100_mspdec_oclass;
-extern struct nvkm_oclass gk104_mspdec_oclass;
+#include <engine/falcon.h>
+int g98_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gt215_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gf100_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gk104_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
index c6c69d0a8d01..85fd306021ac 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
@@ -1,6 +1,7 @@
#ifndef __NVKM_MSPPP_H__
#define __NVKM_MSPPP_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g98_msppp_oclass;
-extern struct nvkm_oclass gf100_msppp_oclass;
+#include <engine/falcon.h>
+int g98_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gt215_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gf100_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
index 1f193b7bd6c5..99757ed96f76 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
@@ -1,7 +1,9 @@
#ifndef __NVKM_MSVLD_H__
#define __NVKM_MSVLD_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g98_msvld_oclass;
-extern struct nvkm_oclass gf100_msvld_oclass;
-extern struct nvkm_oclass gk104_msvld_oclass;
+#include <engine/falcon.h>
+int g98_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gt215_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
+int mcp89_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gf100_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gk104_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
index 93181bbf0f63..240855ad8c8d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
@@ -2,33 +2,24 @@
#define __NVKM_PM_H__
#include <core/engine.h>
-struct nvkm_perfdom;
-struct nvkm_perfctr;
struct nvkm_pm {
- struct nvkm_engine base;
+ const struct nvkm_pm_func *func;
+ struct nvkm_engine engine;
- struct nvkm_perfctx *context;
- void *profile_data;
+ struct nvkm_object *perfmon;
struct list_head domains;
+ struct list_head sources;
u32 sequence;
-
- /*XXX: temp for daemon backend */
- u32 pwr[8];
- u32 last;
};
-static inline struct nvkm_pm *
-nvkm_pm(void *obj)
-{
- return (void *)nvkm_engine(obj, NVDEV_ENGINE_PM);
-}
-
-extern struct nvkm_oclass *nv40_pm_oclass;
-extern struct nvkm_oclass *nv50_pm_oclass;
-extern struct nvkm_oclass *g84_pm_oclass;
-extern struct nvkm_oclass *gt215_pm_oclass;
-extern struct nvkm_oclass gf100_pm_oclass;
-extern struct nvkm_oclass gk104_pm_oclass;
-extern struct nvkm_oclass gk110_pm_oclass;
+int nv40_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int nv50_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int g84_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gt200_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gt215_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gf100_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gf108_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gf117_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gk104_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
index 44590a2a479d..7317ef4c0207 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
@@ -1,5 +1,5 @@
#ifndef __NVKM_SEC_H__
#define __NVKM_SEC_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g98_sec_oclass;
+#include <engine/falcon.h>
+int g98_sec_new(struct nvkm_device *, int, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
index a529013c92ab..096e7dbd1e65 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
@@ -1,50 +1,18 @@
#ifndef __NVKM_SW_H__
#define __NVKM_SW_H__
-#include <core/engctx.h>
-
-struct nvkm_sw_chan {
- struct nvkm_engctx base;
-
- int (*flip)(void *);
- void *flip_data;
-};
-
-#define nvkm_sw_context_create(p,e,c,d) \
- nvkm_engctx_create((p), (e), (c), (p), 0, 0, 0, (d))
-#define nvkm_sw_context_destroy(d) \
- nvkm_engctx_destroy(&(d)->base)
-#define nvkm_sw_context_init(d) \
- nvkm_engctx_init(&(d)->base)
-#define nvkm_sw_context_fini(d,s) \
- nvkm_engctx_fini(&(d)->base, (s))
-
-#define _nvkm_sw_context_dtor _nvkm_engctx_dtor
-#define _nvkm_sw_context_init _nvkm_engctx_init
-#define _nvkm_sw_context_fini _nvkm_engctx_fini
-
#include <core/engine.h>
struct nvkm_sw {
- struct nvkm_engine base;
-};
+ const struct nvkm_sw_func *func;
+ struct nvkm_engine engine;
-#define nvkm_sw_create(p,e,c,d) \
- nvkm_engine_create((p), (e), (c), true, "SW", "software", (d))
-#define nvkm_sw_destroy(d) \
- nvkm_engine_destroy(&(d)->base)
-#define nvkm_sw_init(d) \
- nvkm_engine_init(&(d)->base)
-#define nvkm_sw_fini(d,s) \
- nvkm_engine_fini(&(d)->base, (s))
-
-#define _nvkm_sw_dtor _nvkm_engine_dtor
-#define _nvkm_sw_init _nvkm_engine_init
-#define _nvkm_sw_fini _nvkm_engine_fini
+ struct list_head chan;
+};
-extern struct nvkm_oclass *nv04_sw_oclass;
-extern struct nvkm_oclass *nv10_sw_oclass;
-extern struct nvkm_oclass *nv50_sw_oclass;
-extern struct nvkm_oclass *gf100_sw_oclass;
+bool nvkm_sw_mthd(struct nvkm_sw *sw, int chid, int subc, u32 mthd, u32 data);
-void nv04_sw_intr(struct nvkm_subdev *);
+int nv04_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
+int nv10_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
+int nv50_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
+int gf100_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
index 7851f18c5add..616ea91e03f8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
@@ -1,5 +1,5 @@
#ifndef __NVKM_VP_H__
#define __NVKM_VP_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g84_vp_oclass;
+#include <engine/xtensa.h>
+int g84_vp_new(struct nvkm_device *, int, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
index 7a216cca2865..3128d21a5d1a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
@@ -1,35 +1,23 @@
#ifndef __NVKM_XTENSA_H__
#define __NVKM_XTENSA_H__
+#define nvkm_xtensa(p) container_of((p), struct nvkm_xtensa, engine)
#include <core/engine.h>
-struct nvkm_gpuobj;
struct nvkm_xtensa {
- struct nvkm_engine base;
-
+ const struct nvkm_xtensa_func *func;
u32 addr;
- struct nvkm_gpuobj *gpu_fw;
- u32 fifo_val;
- u32 unkd28;
-};
+ struct nvkm_engine engine;
-#define nvkm_xtensa_create(p,e,c,b,d,i,f,r) \
- nvkm_xtensa_create_((p), (e), (c), (b), (d), (i), (f), \
- sizeof(**r),(void **)r)
+ struct nvkm_memory *gpu_fw;
+};
-int _nvkm_xtensa_engctx_ctor(struct nvkm_object *,
- struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
+int nvkm_xtensa_new_(const struct nvkm_xtensa_func *, struct nvkm_device *,
+ int index, bool enable, u32 addr, struct nvkm_engine **);
-void _nvkm_xtensa_intr(struct nvkm_subdev *);
-int nvkm_xtensa_create_(struct nvkm_object *,
- struct nvkm_object *,
- struct nvkm_oclass *, u32, bool,
- const char *, const char *,
- int, void **);
-#define _nvkm_xtensa_dtor _nvkm_engine_dtor
-int _nvkm_xtensa_init(struct nvkm_object *);
-int _nvkm_xtensa_fini(struct nvkm_object *, bool);
-u32 _nvkm_xtensa_rd32(struct nvkm_object *, u64);
-void _nvkm_xtensa_wr32(struct nvkm_object *, u64, u32);
+struct nvkm_xtensa_func {
+ u32 pmc_enable;
+ u32 fifo_val;
+ u32 unkd28;
+ struct nvkm_sclass sclass[];
+};
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
index c7a007b8bc10..d3071b5a4f98 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
@@ -1,33 +1,24 @@
#ifndef __NVKM_BAR_H__
#define __NVKM_BAR_H__
#include <core/subdev.h>
-struct nvkm_mem;
struct nvkm_vma;
struct nvkm_bar {
- struct nvkm_subdev base;
+ const struct nvkm_bar_func *func;
+ struct nvkm_subdev subdev;
- int (*alloc)(struct nvkm_bar *, struct nvkm_object *,
- struct nvkm_mem *, struct nvkm_object **);
-
- int (*kmap)(struct nvkm_bar *, struct nvkm_mem *, u32 flags,
- struct nvkm_vma *);
- int (*umap)(struct nvkm_bar *, struct nvkm_mem *, u32 flags,
- struct nvkm_vma *);
- void (*unmap)(struct nvkm_bar *, struct nvkm_vma *);
- void (*flush)(struct nvkm_bar *);
+ spinlock_t lock;
/* whether the BAR supports to be ioremapped WC or should be uncached */
bool iomap_uncached;
};
-static inline struct nvkm_bar *
-nvkm_bar(void *obj)
-{
- return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_BAR);
-}
+void nvkm_bar_flush(struct nvkm_bar *);
+struct nvkm_vm *nvkm_bar_kmap(struct nvkm_bar *);
+int nvkm_bar_umap(struct nvkm_bar *, u64 size, int type, struct nvkm_vma *);
-extern struct nvkm_oclass nv50_bar_oclass;
-extern struct nvkm_oclass gf100_bar_oclass;
-extern struct nvkm_oclass gk20a_bar_oclass;
+int nv50_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int g84_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
index cef287e0bbf2..e39a1fea930b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
@@ -3,7 +3,7 @@
#include <core/subdev.h>
struct nvkm_bios {
- struct nvkm_subdev base;
+ struct nvkm_subdev subdev;
u32 size;
u8 *data;
@@ -19,14 +19,13 @@ struct nvkm_bios {
} version;
};
-static inline struct nvkm_bios *
-nvkm_bios(void *obj)
-{
- return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_VBIOS);
-}
-
u8 nvbios_checksum(const u8 *data, int size);
u16 nvbios_findstr(const u8 *data, int size, const char *str, int len);
+int nvbios_memcmp(struct nvkm_bios *, u32 addr, const char *, u32 len);
+
+#define nvbios_rd08(b,o) (b)->data[(o)]
+#define nvbios_rd16(b,o) get_unaligned_le16(&(b)->data[(o)])
+#define nvbios_rd32(b,o) get_unaligned_le32(&(b)->data[(o)])
-extern struct nvkm_oclass nvkm_bios_oclass;
+int nvkm_bios_new(struct nvkm_device *, int, struct nvkm_bios **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
index 4107aa546a21..3f0c7c414026 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
@@ -4,8 +4,8 @@ static inline u16
bmp_version(struct nvkm_bios *bios)
{
if (bios->bmp_offset) {
- return nv_ro08(bios, bios->bmp_offset + 5) << 8 |
- nv_ro08(bios, bios->bmp_offset + 6);
+ return nvbios_rd08(bios, bios->bmp_offset + 5) << 8 |
+ nvbios_rd08(bios, bios->bmp_offset + 6);
}
return 0x0000;
@@ -15,7 +15,7 @@ static inline u16
bmp_mem_init_table(struct nvkm_bios *bios)
{
if (bmp_version(bios) >= 0x0300)
- return nv_ro16(bios, bios->bmp_offset + 24);
+ return nvbios_rd16(bios, bios->bmp_offset + 24);
return 0x0000;
}
@@ -23,7 +23,7 @@ static inline u16
bmp_sdr_seq_table(struct nvkm_bios *bios)
{
if (bmp_version(bios) >= 0x0300)
- return nv_ro16(bios, bios->bmp_offset + 26);
+ return nvbios_rd16(bios, bios->bmp_offset + 26);
return 0x0000;
}
@@ -31,7 +31,7 @@ static inline u16
bmp_ddr_seq_table(struct nvkm_bios *bios)
{
if (bmp_version(bios) >= 0x0300)
- return nv_ro16(bios, bios->bmp_offset + 28);
+ return nvbios_rd16(bios, bios->bmp_offset + 28);
return 0x0000;
}
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
index 578a667eed3b..4dc1c8af840c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
@@ -1,5 +1,6 @@
#ifndef __NVBIOS_INIT_H__
#define __NVBIOS_INIT_H__
+
struct nvbios_init {
struct nvkm_subdev *subdev;
struct nvkm_bios *bios;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
index 420426793880..3a9abd38aca8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
@@ -7,6 +7,11 @@ struct nvbios_ramcfg {
unsigned rammap_max;
union {
struct {
+ unsigned rammap_00_16_20:1;
+ unsigned rammap_00_16_40:1;
+ unsigned rammap_00_17_02:1;
+ };
+ struct {
unsigned rammap_10_04_02:1;
unsigned rammap_10_04_08:1;
};
@@ -32,15 +37,32 @@ struct nvbios_ramcfg {
unsigned ramcfg_ver;
unsigned ramcfg_hdr;
unsigned ramcfg_timing;
+ unsigned ramcfg_DLLoff;
+ unsigned ramcfg_RON;
union {
struct {
+ unsigned ramcfg_00_03_01:1;
+ unsigned ramcfg_00_03_02:1;
+ unsigned ramcfg_00_03_08:1;
+ unsigned ramcfg_00_03_10:1;
+ unsigned ramcfg_00_04_02:1;
+ unsigned ramcfg_00_04_04:1;
+ unsigned ramcfg_00_04_20:1;
+ unsigned ramcfg_00_05:8;
+ unsigned ramcfg_00_06:8;
+ unsigned ramcfg_00_07:8;
+ unsigned ramcfg_00_08:8;
+ unsigned ramcfg_00_09:8;
+ unsigned ramcfg_00_0a_0f:4;
+ unsigned ramcfg_00_0a_f0:4;
+ };
+ struct {
unsigned ramcfg_10_02_01:1;
unsigned ramcfg_10_02_02:1;
unsigned ramcfg_10_02_04:1;
unsigned ramcfg_10_02_08:1;
unsigned ramcfg_10_02_10:1;
unsigned ramcfg_10_02_20:1;
- unsigned ramcfg_10_DLLoff:1;
unsigned ramcfg_10_03_0f:4;
unsigned ramcfg_10_04_01:1;
unsigned ramcfg_10_05:8;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
index 609a905ec780..8d8ee13721ec 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
@@ -7,6 +7,8 @@ u32 nvbios_rammapTe(struct nvkm_bios *, u8 *ver, u8 *hdr,
u32 nvbios_rammapEe(struct nvkm_bios *, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_rammapEp_from_perf(struct nvkm_bios *bios, u32 data, u8 size,
+ struct nvbios_ramcfg *p);
u32 nvbios_rammapEp(struct nvkm_bios *, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *);
u32 nvbios_rammapEm(struct nvkm_bios *, u16 mhz,
@@ -15,6 +17,8 @@ u32 nvbios_rammapEm(struct nvkm_bios *, u16 mhz,
u32 nvbios_rammapSe(struct nvkm_bios *, u32 data,
u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
u8 *ver, u8 *hdr);
+u32 nvbios_rammapSp_from_perf(struct nvkm_bios *bios, u32 data, u8 size, int idx,
+ struct nvbios_ramcfg *p);
u32 nvbios_rammapSp(struct nvkm_bios *, u32 data,
u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
u8 *ver, u8 *hdr, struct nvbios_ramcfg *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
index fba83c04849e..6a04d9c07944 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
@@ -2,49 +2,23 @@
#define __NVKM_BUS_H__
#include <core/subdev.h>
-struct nvkm_bus_intr {
- u32 stat;
- u32 unit;
-};
-
struct nvkm_bus {
- struct nvkm_subdev base;
- int (*hwsq_exec)(struct nvkm_bus *, u32 *, u32);
- u32 hwsq_size;
+ const struct nvkm_bus_func *func;
+ struct nvkm_subdev subdev;
};
-static inline struct nvkm_bus *
-nvkm_bus(void *obj)
-{
- return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_BUS);
-}
-
-#define nvkm_bus_create(p, e, o, d) \
- nvkm_subdev_create_((p), (e), (o), 0, "PBUS", "master", \
- sizeof(**d), (void **)d)
-#define nvkm_bus_destroy(p) \
- nvkm_subdev_destroy(&(p)->base)
-#define nvkm_bus_init(p) \
- nvkm_subdev_init(&(p)->base)
-#define nvkm_bus_fini(p, s) \
- nvkm_subdev_fini(&(p)->base, (s))
-
-#define _nvkm_bus_dtor _nvkm_subdev_dtor
-#define _nvkm_bus_init _nvkm_subdev_init
-#define _nvkm_bus_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass *nv04_bus_oclass;
-extern struct nvkm_oclass *nv31_bus_oclass;
-extern struct nvkm_oclass *nv50_bus_oclass;
-extern struct nvkm_oclass *g94_bus_oclass;
-extern struct nvkm_oclass *gf100_bus_oclass;
-
/* interface to sequencer */
struct nvkm_hwsq;
-int nvkm_hwsq_init(struct nvkm_bus *, struct nvkm_hwsq **);
+int nvkm_hwsq_init(struct nvkm_subdev *, struct nvkm_hwsq **);
int nvkm_hwsq_fini(struct nvkm_hwsq **, bool exec);
void nvkm_hwsq_wr32(struct nvkm_hwsq *, u32 addr, u32 data);
void nvkm_hwsq_setf(struct nvkm_hwsq *, u8 flag, int data);
void nvkm_hwsq_wait(struct nvkm_hwsq *, u8 flag, u8 data);
void nvkm_hwsq_nsec(struct nvkm_hwsq *, u32 nsec);
+
+int nv04_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
+int nv31_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
+int nv50_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
+int g94_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
+int gf100_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index f5d303850d8c..8708f0a4e188 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -71,9 +71,10 @@ struct nvkm_domain {
};
struct nvkm_clk {
- struct nvkm_subdev base;
+ const struct nvkm_clk_func *func;
+ struct nvkm_subdev subdev;
- struct nvkm_domain *domains;
+ const struct nvkm_domain *domains;
struct nvkm_pstate bstate;
struct list_head states;
@@ -94,68 +95,27 @@ struct nvkm_clk {
bool allow_reclock;
- int (*read)(struct nvkm_clk *, enum nv_clk_src);
- int (*calc)(struct nvkm_clk *, struct nvkm_cstate *);
- int (*prog)(struct nvkm_clk *);
- void (*tidy)(struct nvkm_clk *);
-
/*XXX: die, these are here *only* to support the completely
- * bat-shit insane what-was-nvkm_hw.c code
+ * bat-shit insane what-was-nouveau_hw.c code
*/
int (*pll_calc)(struct nvkm_clk *, struct nvbios_pll *, int clk,
struct nvkm_pll_vals *pv);
int (*pll_prog)(struct nvkm_clk *, u32 reg1, struct nvkm_pll_vals *pv);
};
-static inline struct nvkm_clk *
-nvkm_clk(void *obj)
-{
- return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_CLK);
-}
-
-#define nvkm_clk_create(p,e,o,i,r,s,n,d) \
- nvkm_clk_create_((p), (e), (o), (i), (r), (s), (n), sizeof(**d), \
- (void **)d)
-#define nvkm_clk_destroy(p) ({ \
- struct nvkm_clk *clk = (p); \
- _nvkm_clk_dtor(nv_object(clk)); \
-})
-#define nvkm_clk_init(p) ({ \
- struct nvkm_clk *clk = (p); \
- _nvkm_clk_init(nv_object(clk)); \
-})
-#define nvkm_clk_fini(p,s) ({ \
- struct nvkm_clk *clk = (p); \
- _nvkm_clk_fini(nv_object(clk), (s)); \
-})
-
-int nvkm_clk_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *,
- struct nvkm_domain *, struct nvkm_pstate *,
- int, bool, int, void **);
-void _nvkm_clk_dtor(struct nvkm_object *);
-int _nvkm_clk_init(struct nvkm_object *);
-int _nvkm_clk_fini(struct nvkm_object *, bool);
-
-extern struct nvkm_oclass nv04_clk_oclass;
-extern struct nvkm_oclass nv40_clk_oclass;
-extern struct nvkm_oclass *nv50_clk_oclass;
-extern struct nvkm_oclass *g84_clk_oclass;
-extern struct nvkm_oclass *mcp77_clk_oclass;
-extern struct nvkm_oclass gt215_clk_oclass;
-extern struct nvkm_oclass gf100_clk_oclass;
-extern struct nvkm_oclass gk104_clk_oclass;
-extern struct nvkm_oclass gk20a_clk_oclass;
-
-int nv04_clk_pll_set(struct nvkm_clk *, u32 type, u32 freq);
-int nv04_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *, int clk,
- struct nvkm_pll_vals *);
-int nv04_clk_pll_prog(struct nvkm_clk *, u32 reg1, struct nvkm_pll_vals *);
-int gt215_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *,
- int clk, struct nvkm_pll_vals *);
-
+int nvkm_clk_read(struct nvkm_clk *, enum nv_clk_src);
int nvkm_clk_ustate(struct nvkm_clk *, int req, int pwr);
int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait);
int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel);
int nvkm_clk_tstate(struct nvkm_clk *, int req, int rel);
+
+int nv04_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int nv40_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int nv50_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int g84_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int mcp77_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int gt215_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int gf100_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int gk104_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int gk20a_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index d1bbe0d62b35..6c1407fd317b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -1,32 +1,31 @@
#ifndef __NVKM_DEVINIT_H__
#define __NVKM_DEVINIT_H__
#include <core/subdev.h>
+struct nvkm_devinit;
struct nvkm_devinit {
- struct nvkm_subdev base;
+ const struct nvkm_devinit_func *func;
+ struct nvkm_subdev subdev;
bool post;
- void (*meminit)(struct nvkm_devinit *);
- int (*pll_set)(struct nvkm_devinit *, u32 type, u32 freq);
- u32 (*mmio)(struct nvkm_devinit *, u32 addr);
};
-static inline struct nvkm_devinit *
-nvkm_devinit(void *obj)
-{
- return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_DEVINIT);
-}
+u32 nvkm_devinit_mmio(struct nvkm_devinit *, u32 addr);
+int nvkm_devinit_pll_set(struct nvkm_devinit *, u32 type, u32 khz);
+void nvkm_devinit_meminit(struct nvkm_devinit *);
+u64 nvkm_devinit_disable(struct nvkm_devinit *);
+int nvkm_devinit_post(struct nvkm_devinit *, u64 *disable);
-extern struct nvkm_oclass *nv04_devinit_oclass;
-extern struct nvkm_oclass *nv05_devinit_oclass;
-extern struct nvkm_oclass *nv10_devinit_oclass;
-extern struct nvkm_oclass *nv1a_devinit_oclass;
-extern struct nvkm_oclass *nv20_devinit_oclass;
-extern struct nvkm_oclass *nv50_devinit_oclass;
-extern struct nvkm_oclass *g84_devinit_oclass;
-extern struct nvkm_oclass *g98_devinit_oclass;
-extern struct nvkm_oclass *gt215_devinit_oclass;
-extern struct nvkm_oclass *mcp89_devinit_oclass;
-extern struct nvkm_oclass *gf100_devinit_oclass;
-extern struct nvkm_oclass *gm107_devinit_oclass;
-extern struct nvkm_oclass *gm204_devinit_oclass;
+int nv04_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv05_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv10_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv1a_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv20_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv50_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int g84_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int g98_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int gt215_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int mcp89_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int gf100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int gm204_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 16da56cf43b0..85ab72c7f821 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -18,7 +18,7 @@
#define NV_MEM_TARGET_VM 3
#define NV_MEM_TARGET_GART 4
-#define NV_MEM_TYPE_VM 0x7f
+#define NVKM_RAM_TYPE_VM 0x7f
#define NV_MEM_COMP_VM 0x03
struct nvkm_mem {
@@ -46,62 +46,47 @@ struct nvkm_fb_tile {
};
struct nvkm_fb {
- struct nvkm_subdev base;
-
- bool (*memtype_valid)(struct nvkm_fb *, u32 memtype);
+ const struct nvkm_fb_func *func;
+ struct nvkm_subdev subdev;
struct nvkm_ram *ram;
- struct nvkm_mm vram;
- struct nvkm_mm tags;
-
struct {
struct nvkm_fb_tile region[16];
int regions;
- void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
- u32 pitch, u32 flags, struct nvkm_fb_tile *);
- void (*comp)(struct nvkm_fb *, int i, u32 size, u32 flags,
- struct nvkm_fb_tile *);
- void (*fini)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
- void (*prog)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
} tile;
};
-static inline struct nvkm_fb *
-nvkm_fb(void *obj)
-{
- /* fbram uses this before device subdev pointer is valid */
- if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
- nv_subidx(obj) == NVDEV_SUBDEV_FB)
- return obj;
-
- return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_FB);
-}
-
-extern struct nvkm_oclass *nv04_fb_oclass;
-extern struct nvkm_oclass *nv10_fb_oclass;
-extern struct nvkm_oclass *nv1a_fb_oclass;
-extern struct nvkm_oclass *nv20_fb_oclass;
-extern struct nvkm_oclass *nv25_fb_oclass;
-extern struct nvkm_oclass *nv30_fb_oclass;
-extern struct nvkm_oclass *nv35_fb_oclass;
-extern struct nvkm_oclass *nv36_fb_oclass;
-extern struct nvkm_oclass *nv40_fb_oclass;
-extern struct nvkm_oclass *nv41_fb_oclass;
-extern struct nvkm_oclass *nv44_fb_oclass;
-extern struct nvkm_oclass *nv46_fb_oclass;
-extern struct nvkm_oclass *nv47_fb_oclass;
-extern struct nvkm_oclass *nv49_fb_oclass;
-extern struct nvkm_oclass *nv4e_fb_oclass;
-extern struct nvkm_oclass *nv50_fb_oclass;
-extern struct nvkm_oclass *g84_fb_oclass;
-extern struct nvkm_oclass *gt215_fb_oclass;
-extern struct nvkm_oclass *mcp77_fb_oclass;
-extern struct nvkm_oclass *mcp89_fb_oclass;
-extern struct nvkm_oclass *gf100_fb_oclass;
-extern struct nvkm_oclass *gk104_fb_oclass;
-extern struct nvkm_oclass *gk20a_fb_oclass;
-extern struct nvkm_oclass *gm107_fb_oclass;
+bool nvkm_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
+void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nvkm_fb_tile *);
+void nvkm_fb_tile_fini(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
+void nvkm_fb_tile_prog(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
+
+int nv04_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv10_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv1a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv20_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv25_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv30_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv35_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv36_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv40_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv41_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv44_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv46_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv47_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv49_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv4e_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv50_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int g84_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gt215_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int mcp77_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int mcp89_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>
@@ -112,36 +97,35 @@ struct nvkm_ram_data {
u32 freq;
};
+enum nvkm_ram_type {
+ NVKM_RAM_TYPE_UNKNOWN = 0,
+ NVKM_RAM_TYPE_STOLEN,
+ NVKM_RAM_TYPE_SGRAM,
+ NVKM_RAM_TYPE_SDRAM,
+ NVKM_RAM_TYPE_DDR1,
+ NVKM_RAM_TYPE_DDR2,
+ NVKM_RAM_TYPE_DDR3,
+ NVKM_RAM_TYPE_GDDR2,
+ NVKM_RAM_TYPE_GDDR3,
+ NVKM_RAM_TYPE_GDDR4,
+ NVKM_RAM_TYPE_GDDR5
+};
+
struct nvkm_ram {
- struct nvkm_object base;
- enum {
- NV_MEM_TYPE_UNKNOWN = 0,
- NV_MEM_TYPE_STOLEN,
- NV_MEM_TYPE_SGRAM,
- NV_MEM_TYPE_SDRAM,
- NV_MEM_TYPE_DDR1,
- NV_MEM_TYPE_DDR2,
- NV_MEM_TYPE_DDR3,
- NV_MEM_TYPE_GDDR2,
- NV_MEM_TYPE_GDDR3,
- NV_MEM_TYPE_GDDR4,
- NV_MEM_TYPE_GDDR5
- } type;
- u64 stolen;
+ const struct nvkm_ram_func *func;
+ struct nvkm_fb *fb;
+ enum nvkm_ram_type type;
u64 size;
- u32 tags;
+
+#define NVKM_RAM_MM_SHIFT 12
+ struct nvkm_mm vram;
+ struct nvkm_mm tags;
+ u64 stolen;
int ranks;
int parts;
int part_mask;
- int (*get)(struct nvkm_fb *, u64 size, u32 align, u32 size_nc,
- u32 type, struct nvkm_mem **);
- void (*put)(struct nvkm_fb *, struct nvkm_mem **);
-
- int (*calc)(struct nvkm_fb *, u32 freq);
- int (*prog)(struct nvkm_fb *);
- void (*tidy)(struct nvkm_fb *);
u32 freq;
u32 mr[16];
u32 mr1_nuts;
@@ -151,4 +135,17 @@ struct nvkm_ram {
struct nvkm_ram_data xition;
struct nvkm_ram_data target;
};
+
+struct nvkm_ram_func {
+ void *(*dtor)(struct nvkm_ram *);
+ int (*init)(struct nvkm_ram *);
+
+ int (*get)(struct nvkm_ram *, u64 size, u32 align, u32 size_nc,
+ u32 type, struct nvkm_mem **);
+ void (*put)(struct nvkm_ram *, struct nvkm_mem **);
+
+ int (*calc)(struct nvkm_ram *, u32 freq);
+ int (*prog)(struct nvkm_ram *);
+ void (*tidy)(struct nvkm_ram *);
+};
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
index a1384786adc9..ae201e388487 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
@@ -1,28 +1,16 @@
#ifndef __NVKM_FUSE_H__
#define __NVKM_FUSE_H__
#include <core/subdev.h>
-#include <core/device.h>
struct nvkm_fuse {
- struct nvkm_subdev base;
+ const struct nvkm_fuse_func *func;
+ struct nvkm_subdev subdev;
+ spinlock_t lock;
};
-static inline struct nvkm_fuse *
-nvkm_fuse(void *obj)
-{
- return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_FUSE);
-}
+u32 nvkm_fuse_read(struct nvkm_fuse *, u32 addr);
-#define nvkm_fuse_create(p, e, o, d) \
- nvkm_fuse_create_((p), (e), (o), sizeof(**d), (void **)d)
-
-int nvkm_fuse_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, int, void **);
-void _nvkm_fuse_dtor(struct nvkm_object *);
-int _nvkm_fuse_init(struct nvkm_object *);
-#define _nvkm_fuse_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass nv50_fuse_oclass;
-extern struct nvkm_oclass gf100_fuse_oclass;
-extern struct nvkm_oclass gm107_fuse_oclass;
+int nv50_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
+int gf100_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
+int gm107_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
index ca5099a81b5a..9b9c6d2f90b6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
@@ -19,26 +19,21 @@ struct nvkm_gpio_ntfy_rep {
};
struct nvkm_gpio {
- struct nvkm_subdev base;
+ const struct nvkm_gpio_func *func;
+ struct nvkm_subdev subdev;
struct nvkm_event event;
-
- void (*reset)(struct nvkm_gpio *, u8 func);
- int (*find)(struct nvkm_gpio *, int idx, u8 tag, u8 line,
- struct dcb_gpio_func *);
- int (*set)(struct nvkm_gpio *, int idx, u8 tag, u8 line, int state);
- int (*get)(struct nvkm_gpio *, int idx, u8 tag, u8 line);
};
-static inline struct nvkm_gpio *
-nvkm_gpio(void *obj)
-{
- return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_GPIO);
-}
-
-extern struct nvkm_oclass *nv10_gpio_oclass;
-extern struct nvkm_oclass *nv50_gpio_oclass;
-extern struct nvkm_oclass *g94_gpio_oclass;
-extern struct nvkm_oclass *gf110_gpio_oclass;
-extern struct nvkm_oclass *gk104_gpio_oclass;
+void nvkm_gpio_reset(struct nvkm_gpio *, u8 func);
+int nvkm_gpio_find(struct nvkm_gpio *, int idx, u8 tag, u8 line,
+ struct dcb_gpio_func *);
+int nvkm_gpio_set(struct nvkm_gpio *, int idx, u8 tag, u8 line, int state);
+int nvkm_gpio_get(struct nvkm_gpio *, int idx, u8 tag, u8 line);
+
+int nv10_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int nv50_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int g94_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int gf119_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int gk104_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index a2e33730f05e..6b6224dbd5bb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -6,15 +6,6 @@
#include <subdev/bios.h>
#include <subdev/bios/i2c.h>
-#define NV_I2C_PORT(n) (0x00 + (n))
-#define NV_I2C_AUX(n) (0x10 + (n))
-#define NV_I2C_EXT(n) (0x20 + (n))
-#define NV_I2C_DEFAULT(n) (0x80 + (n))
-
-#define NV_I2C_TYPE_DCBI2C(n) (0x0000 | (n))
-#define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8)
-#define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8)
-
struct nvkm_i2c_ntfy_req {
#define NVKM_I2C_PLUG 0x01
#define NVKM_I2C_UNPLUG 0x02
@@ -29,72 +20,79 @@ struct nvkm_i2c_ntfy_rep {
u8 mask;
};
-struct nvkm_i2c_port {
- struct nvkm_object base;
- struct i2c_adapter adapter;
- struct mutex mutex;
+struct nvkm_i2c_bus_probe {
+ struct i2c_board_info dev;
+ u8 udelay; /* set to 0 to use the standard delay */
+};
- struct list_head head;
- u8 index;
- int aux;
+struct nvkm_i2c_bus {
+ const struct nvkm_i2c_bus_func *func;
+ struct nvkm_i2c_pad *pad;
+#define NVKM_I2C_BUS_CCB(n) /* 'n' is ccb index */ (n)
+#define NVKM_I2C_BUS_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x100)
+#define NVKM_I2C_BUS_PRI /* ccb primary comm. port */ -1
+#define NVKM_I2C_BUS_SEC /* ccb secondary comm. port */ -2
+ int id;
- const struct nvkm_i2c_func *func;
+ struct mutex mutex;
+ struct list_head head;
+ struct i2c_adapter i2c;
};
-struct nvkm_i2c_func {
- void (*drive_scl)(struct nvkm_i2c_port *, int);
- void (*drive_sda)(struct nvkm_i2c_port *, int);
- int (*sense_scl)(struct nvkm_i2c_port *);
- int (*sense_sda)(struct nvkm_i2c_port *);
+int nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *);
+void nvkm_i2c_bus_release(struct nvkm_i2c_bus *);
+int nvkm_i2c_bus_probe(struct nvkm_i2c_bus *, const char *,
+ struct nvkm_i2c_bus_probe *,
+ bool (*)(struct nvkm_i2c_bus *,
+ struct i2c_board_info *, void *), void *);
- int (*aux)(struct nvkm_i2c_port *, bool, u8, u32, u8 *, u8);
- int (*pattern)(struct nvkm_i2c_port *, int pattern);
- int (*lnk_ctl)(struct nvkm_i2c_port *, int nr, int bw, bool enh);
- int (*drv_ctl)(struct nvkm_i2c_port *, int lane, int sw, int pe);
-};
+struct nvkm_i2c_aux {
+ const struct nvkm_i2c_aux_func *func;
+ struct nvkm_i2c_pad *pad;
+#define NVKM_I2C_AUX_CCB(n) /* 'n' is ccb index */ (n)
+#define NVKM_I2C_AUX_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x100)
+ int id;
-struct nvkm_i2c_board_info {
- struct i2c_board_info dev;
- u8 udelay; /* set to 0 to use the standard delay */
+ struct mutex mutex;
+ struct list_head head;
+ struct i2c_adapter i2c;
+
+ u32 intr;
};
+void nvkm_i2c_aux_monitor(struct nvkm_i2c_aux *, bool monitor);
+int nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *);
+void nvkm_i2c_aux_release(struct nvkm_i2c_aux *);
+int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
+ u32 addr, u8 *data, u8 size);
+int nvkm_i2c_aux_lnk_ctl(struct nvkm_i2c_aux *, int link_nr, int link_bw,
+ bool enhanced_framing);
+
struct nvkm_i2c {
- struct nvkm_subdev base;
- struct nvkm_event event;
+ const struct nvkm_i2c_func *func;
+ struct nvkm_subdev subdev;
- struct nvkm_i2c_port *(*find)(struct nvkm_i2c *, u8 index);
- struct nvkm_i2c_port *(*find_type)(struct nvkm_i2c *, u16 type);
- int (*acquire_pad)(struct nvkm_i2c_port *, unsigned long timeout);
- void (*release_pad)(struct nvkm_i2c_port *);
- int (*acquire)(struct nvkm_i2c_port *, unsigned long timeout);
- void (*release)(struct nvkm_i2c_port *);
- int (*identify)(struct nvkm_i2c *, int index,
- const char *what, struct nvkm_i2c_board_info *,
- bool (*match)(struct nvkm_i2c_port *,
- struct i2c_board_info *, void *),
- void *);
-
- wait_queue_head_t wait;
- struct list_head ports;
+ struct list_head pad;
+ struct list_head bus;
+ struct list_head aux;
+
+ struct nvkm_event event;
};
-static inline struct nvkm_i2c *
-nvkm_i2c(void *obj)
-{
- return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_I2C);
-}
+struct nvkm_i2c_bus *nvkm_i2c_bus_find(struct nvkm_i2c *, int);
+struct nvkm_i2c_aux *nvkm_i2c_aux_find(struct nvkm_i2c *, int);
-extern struct nvkm_oclass *nv04_i2c_oclass;
-extern struct nvkm_oclass *nv4e_i2c_oclass;
-extern struct nvkm_oclass *nv50_i2c_oclass;
-extern struct nvkm_oclass *g94_i2c_oclass;
-extern struct nvkm_oclass *gf110_i2c_oclass;
-extern struct nvkm_oclass *gf117_i2c_oclass;
-extern struct nvkm_oclass *gk104_i2c_oclass;
-extern struct nvkm_oclass *gm204_i2c_oclass;
+int nv04_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int nv4e_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int nv50_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int g94_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gf117_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gf119_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gk104_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gm204_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
static inline int
-nv_rdi2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg)
+nvkm_rdi2cr(struct i2c_adapter *adap, u8 addr, u8 reg)
{
u8 val;
struct i2c_msg msgs[] = {
@@ -102,7 +100,7 @@ nv_rdi2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg)
{ .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val },
};
- int ret = i2c_transfer(&port->adapter, msgs, 2);
+ int ret = i2c_transfer(adap, msgs, ARRAY_SIZE(msgs));
if (ret != 2)
return -EIO;
@@ -110,14 +108,14 @@ nv_rdi2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg)
}
static inline int
-nv_wri2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg, u8 val)
+nvkm_wri2cr(struct i2c_adapter *adap, u8 addr, u8 reg, u8 val)
{
u8 buf[2] = { reg, val };
struct i2c_msg msgs[] = {
{ .addr = addr, .flags = 0, .len = 2, .buf = buf },
};
- int ret = i2c_transfer(&port->adapter, msgs, 1);
+ int ret = i2c_transfer(adap, msgs, ARRAY_SIZE(msgs));
if (ret != 1)
return -EIO;
@@ -125,11 +123,30 @@ nv_wri2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg, u8 val)
}
static inline bool
-nv_probe_i2c(struct nvkm_i2c_port *port, u8 addr)
+nvkm_probe_i2c(struct i2c_adapter *adap, u8 addr)
{
- return nv_rdi2cr(port, addr, 0) >= 0;
+ return nvkm_rdi2cr(adap, addr, 0) >= 0;
}
-int nv_rdaux(struct nvkm_i2c_port *, u32 addr, u8 *data, u8 size);
-int nv_wraux(struct nvkm_i2c_port *, u32 addr, u8 *data, u8 size);
+static inline int
+nvkm_rdaux(struct nvkm_i2c_aux *aux, u32 addr, u8 *data, u8 size)
+{
+ int ret = nvkm_i2c_aux_acquire(aux);
+ if (ret == 0) {
+ ret = nvkm_i2c_aux_xfer(aux, true, 9, addr, data, size);
+ nvkm_i2c_aux_release(aux);
+ }
+ return ret;
+}
+
+static inline int
+nvkm_wraux(struct nvkm_i2c_aux *aux, u32 addr, u8 *data, u8 size)
+{
+ int ret = nvkm_i2c_aux_acquire(aux);
+ if (ret == 0) {
+ ret = nvkm_i2c_aux_xfer(aux, true, 8, addr, data, size);
+ nvkm_i2c_aux_release(aux);
+ }
+ return ret;
+}
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
index 2150d8af0040..9d512cd5a0a7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
@@ -2,31 +2,7 @@
#define __NVKM_IBUS_H__
#include <core/subdev.h>
-struct nvkm_ibus {
- struct nvkm_subdev base;
-};
-
-static inline struct nvkm_ibus *
-nvkm_ibus(void *obj)
-{
- return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_IBUS);
-}
-
-#define nvkm_ibus_create(p,e,o,d) \
- nvkm_subdev_create_((p), (e), (o), 0, "PIBUS", "ibus", \
- sizeof(**d), (void **)d)
-#define nvkm_ibus_destroy(p) \
- nvkm_subdev_destroy(&(p)->base)
-#define nvkm_ibus_init(p) \
- nvkm_subdev_init(&(p)->base)
-#define nvkm_ibus_fini(p,s) \
- nvkm_subdev_fini(&(p)->base, (s))
-
-#define _nvkm_ibus_dtor _nvkm_subdev_dtor
-#define _nvkm_ibus_init _nvkm_subdev_init
-#define _nvkm_ibus_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass gf100_ibus_oclass;
-extern struct nvkm_oclass gk104_ibus_oclass;
-extern struct nvkm_oclass gk20a_ibus_oclass;
+int gf100_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
+int gk104_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
+int gk20a_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index 1bcb763cfca0..28bc202f9753 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -1,49 +1,29 @@
#ifndef __NVKM_INSTMEM_H__
#define __NVKM_INSTMEM_H__
#include <core/subdev.h>
-
-struct nvkm_instobj {
- struct nvkm_object base;
- struct list_head head;
- u32 *suspend;
- u64 addr;
- u32 size;
-};
-
-static inline struct nvkm_instobj *
-nv_memobj(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
- if (unlikely(!nv_iclass(obj, NV_MEMOBJ_CLASS)))
- nv_assert("BAD CAST -> NvMemObj, %08x", nv_hclass(obj));
-#endif
- return obj;
-}
+struct nvkm_memory;
struct nvkm_instmem {
- struct nvkm_subdev base;
- struct list_head list;
+ const struct nvkm_instmem_func *func;
+ struct nvkm_subdev subdev;
+ struct list_head list;
u32 reserved;
- int (*alloc)(struct nvkm_instmem *, struct nvkm_object *,
- u32 size, u32 align, struct nvkm_object **);
+
+ struct nvkm_memory *vbios;
+ struct nvkm_ramht *ramht;
+ struct nvkm_memory *ramro;
+ struct nvkm_memory *ramfc;
};
-static inline struct nvkm_instmem *
-nvkm_instmem(void *obj)
-{
- /* nv04/nv40 impls need to create objects in their constructor,
- * which is before the subdev pointer is valid
- */
- if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
- nv_subidx(obj) == NVDEV_SUBDEV_INSTMEM)
- return obj;
+u32 nvkm_instmem_rd32(struct nvkm_instmem *, u32 addr);
+void nvkm_instmem_wr32(struct nvkm_instmem *, u32 addr, u32 data);
+int nvkm_instobj_new(struct nvkm_instmem *, u32 size, u32 align, bool zero,
+ struct nvkm_memory **);
- return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_INSTMEM);
-}
-extern struct nvkm_oclass *nv04_instmem_oclass;
-extern struct nvkm_oclass *nv40_instmem_oclass;
-extern struct nvkm_oclass *nv50_instmem_oclass;
-extern struct nvkm_oclass *gk20a_instmem_oclass;
+int nv04_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
+int nv40_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
+int nv50_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
+int gk20a_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index cd5d29fc0565..c773b5e958b4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -1,31 +1,36 @@
#ifndef __NVKM_LTC_H__
#define __NVKM_LTC_H__
#include <core/subdev.h>
-struct nvkm_mm_node;
+#include <core/mm.h>
#define NVKM_LTC_MAX_ZBC_CNT 16
struct nvkm_ltc {
- struct nvkm_subdev base;
+ const struct nvkm_ltc_func *func;
+ struct nvkm_subdev subdev;
- int (*tags_alloc)(struct nvkm_ltc *, u32 count,
- struct nvkm_mm_node **);
- void (*tags_free)(struct nvkm_ltc *, struct nvkm_mm_node **);
- void (*tags_clear)(struct nvkm_ltc *, u32 first, u32 count);
+ u32 ltc_nr;
+ u32 lts_nr;
+
+ u32 num_tags;
+ u32 tag_base;
+ struct nvkm_mm tags;
+ struct nvkm_mm_node *tag_ram;
int zbc_min;
int zbc_max;
- int (*zbc_color_get)(struct nvkm_ltc *, int index, const u32[4]);
- int (*zbc_depth_get)(struct nvkm_ltc *, int index, const u32);
+ u32 zbc_color[NVKM_LTC_MAX_ZBC_CNT][4];
+ u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
};
-static inline struct nvkm_ltc *
-nvkm_ltc(void *obj)
-{
- return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_LTC);
-}
+int nvkm_ltc_tags_alloc(struct nvkm_ltc *, u32 count, struct nvkm_mm_node **);
+void nvkm_ltc_tags_free(struct nvkm_ltc *, struct nvkm_mm_node **);
+void nvkm_ltc_tags_clear(struct nvkm_ltc *, u32 first, u32 count);
+
+int nvkm_ltc_zbc_color_get(struct nvkm_ltc *, int index, const u32[4]);
+int nvkm_ltc_zbc_depth_get(struct nvkm_ltc *, int index, const u32);
-extern struct nvkm_oclass *gf100_ltc_oclass;
-extern struct nvkm_oclass *gk104_ltc_oclass;
-extern struct nvkm_oclass *gm107_ltc_oclass;
+int gf100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 055bea7702a1..4de05e718f83 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -3,26 +3,19 @@
#include <core/subdev.h>
struct nvkm_mc {
- struct nvkm_subdev base;
- bool use_msi;
- unsigned int irq;
- void (*unk260)(struct nvkm_mc *, u32);
+ const struct nvkm_mc_func *func;
+ struct nvkm_subdev subdev;
};
-static inline struct nvkm_mc *
-nvkm_mc(void *obj)
-{
- return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_MC);
-}
+void nvkm_mc_intr(struct nvkm_mc *, bool *handled);
+void nvkm_mc_intr_unarm(struct nvkm_mc *);
+void nvkm_mc_intr_rearm(struct nvkm_mc *);
+void nvkm_mc_unk260(struct nvkm_mc *, u32 data);
-extern struct nvkm_oclass *nv04_mc_oclass;
-extern struct nvkm_oclass *nv40_mc_oclass;
-extern struct nvkm_oclass *nv44_mc_oclass;
-extern struct nvkm_oclass *nv4c_mc_oclass;
-extern struct nvkm_oclass *nv50_mc_oclass;
-extern struct nvkm_oclass *g94_mc_oclass;
-extern struct nvkm_oclass *g98_mc_oclass;
-extern struct nvkm_oclass *gf100_mc_oclass;
-extern struct nvkm_oclass *gf106_mc_oclass;
-extern struct nvkm_oclass *gk20a_mc_oclass;
+int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int nv44_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int nv50_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int g98_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 3a5368776c31..dcd3deff27a4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -6,7 +6,7 @@ struct nvkm_device;
struct nvkm_mem;
struct nvkm_vm_pgt {
- struct nvkm_gpuobj *obj[2];
+ struct nvkm_memory *mem[2];
u32 refcount[2];
};
@@ -26,74 +26,23 @@ struct nvkm_vma {
struct nvkm_vm {
struct nvkm_mmu *mmu;
+
+ struct mutex mutex;
struct nvkm_mm mm;
struct kref refcount;
struct list_head pgd_list;
- atomic_t engref[NVDEV_SUBDEV_NR];
+ atomic_t engref[NVKM_SUBDEV_NR];
struct nvkm_vm_pgt *pgt;
u32 fpde;
u32 lpde;
};
-struct nvkm_mmu {
- struct nvkm_subdev base;
-
- u64 limit;
- u8 dma_bits;
- u32 pgt_bits;
- u8 spg_shift;
- u8 lpg_shift;
-
- int (*create)(struct nvkm_mmu *, u64 offset, u64 length,
- u64 mm_offset, struct nvkm_vm **);
-
- void (*map_pgt)(struct nvkm_gpuobj *pgd, u32 pde,
- struct nvkm_gpuobj *pgt[2]);
- void (*map)(struct nvkm_vma *, struct nvkm_gpuobj *,
- struct nvkm_mem *, u32 pte, u32 cnt,
- u64 phys, u64 delta);
- void (*map_sg)(struct nvkm_vma *, struct nvkm_gpuobj *,
- struct nvkm_mem *, u32 pte, u32 cnt, dma_addr_t *);
- void (*unmap)(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt);
- void (*flush)(struct nvkm_vm *);
-};
-
-static inline struct nvkm_mmu *
-nvkm_mmu(void *obj)
-{
- return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_MMU);
-}
-
-#define nvkm_mmu_create(p,e,o,i,f,d) \
- nvkm_subdev_create((p), (e), (o), 0, (i), (f), (d))
-#define nvkm_mmu_destroy(p) \
- nvkm_subdev_destroy(&(p)->base)
-#define nvkm_mmu_init(p) \
- nvkm_subdev_init(&(p)->base)
-#define nvkm_mmu_fini(p,s) \
- nvkm_subdev_fini(&(p)->base, (s))
-
-#define _nvkm_mmu_dtor _nvkm_subdev_dtor
-#define _nvkm_mmu_init _nvkm_subdev_init
-#define _nvkm_mmu_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass nv04_mmu_oclass;
-extern struct nvkm_oclass nv41_mmu_oclass;
-extern struct nvkm_oclass nv44_mmu_oclass;
-extern struct nvkm_oclass nv50_mmu_oclass;
-extern struct nvkm_oclass gf100_mmu_oclass;
-
-int nv04_vm_create(struct nvkm_mmu *, u64, u64, u64,
- struct nvkm_vm **);
-void nv04_mmu_dtor(struct nvkm_object *);
-
-int nvkm_vm_create(struct nvkm_mmu *, u64 offset, u64 length, u64 mm_offset,
- u32 block, struct nvkm_vm **);
int nvkm_vm_new(struct nvkm_device *, u64 offset, u64 length, u64 mm_offset,
- struct nvkm_vm **);
+ struct lock_class_key *, struct nvkm_vm **);
int nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_gpuobj *pgd);
+int nvkm_vm_boot(struct nvkm_vm *, u64 size);
int nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access,
struct nvkm_vma *);
void nvkm_vm_put(struct nvkm_vma *);
@@ -101,4 +50,19 @@ void nvkm_vm_map(struct nvkm_vma *, struct nvkm_mem *);
void nvkm_vm_map_at(struct nvkm_vma *, u64 offset, struct nvkm_mem *);
void nvkm_vm_unmap(struct nvkm_vma *);
void nvkm_vm_unmap_at(struct nvkm_vma *, u64 offset, u64 length);
+
+struct nvkm_mmu {
+ const struct nvkm_mmu_func *func;
+ struct nvkm_subdev subdev;
+
+ u64 limit;
+ u8 dma_bits;
+ u8 lpg_shift;
+};
+
+int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
index fba613477b1a..ed0250139dae 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
@@ -2,33 +2,5 @@
#define __NVKM_MXM_H__
#include <core/subdev.h>
-#define MXM_SANITISE_DCB 0x00000001
-
-struct nvkm_mxm {
- struct nvkm_subdev base;
- u32 action;
- u8 *mxms;
-};
-
-static inline struct nvkm_mxm *
-nvkm_mxm(void *obj)
-{
- return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_MXM);
-}
-
-#define nvkm_mxm_create(p,e,o,d) \
- nvkm_mxm_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_mxm_init(p) \
- nvkm_subdev_init(&(p)->base)
-#define nvkm_mxm_fini(p,s) \
- nvkm_subdev_fini(&(p)->base, (s))
-int nvkm_mxm_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, int, void **);
-void nvkm_mxm_destroy(struct nvkm_mxm *);
-
-#define _nvkm_mxm_dtor _nvkm_subdev_dtor
-#define _nvkm_mxm_init _nvkm_subdev_init
-#define _nvkm_mxm_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass nv50_mxm_oclass;
+int nv50_mxm_new(struct nvkm_device *, int, struct nvkm_subdev **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
new file mode 100644
index 000000000000..5b3c054f3b55
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -0,0 +1,34 @@
+#ifndef __NVKM_PCI_H__
+#define __NVKM_PCI_H__
+#include <core/subdev.h>
+
+struct nvkm_pci {
+ const struct nvkm_pci_func *func;
+ struct nvkm_subdev subdev;
+ struct pci_dev *pdev;
+ int irq;
+
+ struct {
+ struct agp_bridge_data *bridge;
+ u32 mode;
+ u64 base;
+ u64 size;
+ int mtrr;
+ bool cma;
+ bool acquired;
+ } agp;
+
+ bool msi;
+};
+
+u32 nvkm_pci_rd32(struct nvkm_pci *, u16 addr);
+void nvkm_pci_wr08(struct nvkm_pci *, u16 addr, u8 data);
+void nvkm_pci_wr32(struct nvkm_pci *, u16 addr, u32 data);
+void nvkm_pci_rom_shadow(struct nvkm_pci *, bool shadow);
+
+int nv04_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int nv40_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int nv4c_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int nv50_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index 755942352557..e61923d5e49c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -3,7 +3,8 @@
#include <core/subdev.h>
struct nvkm_pmu {
- struct nvkm_subdev base;
+ const struct nvkm_pmu_func *func;
+ struct nvkm_subdev subdev;
struct {
u32 base;
@@ -20,24 +21,20 @@ struct nvkm_pmu {
u32 message;
u32 data[2];
} recv;
-
- int (*message)(struct nvkm_pmu *, u32[2], u32, u32, u32, u32);
- void (*pgob)(struct nvkm_pmu *, bool);
};
-static inline struct nvkm_pmu *
-nvkm_pmu(void *obj)
-{
- return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_PMU);
-}
-
-extern struct nvkm_oclass *gt215_pmu_oclass;
-extern struct nvkm_oclass *gf100_pmu_oclass;
-extern struct nvkm_oclass *gf110_pmu_oclass;
-extern struct nvkm_oclass *gk104_pmu_oclass;
-extern struct nvkm_oclass *gk110_pmu_oclass;
-extern struct nvkm_oclass *gk208_pmu_oclass;
-extern struct nvkm_oclass *gk20a_pmu_oclass;
+int nvkm_pmu_send(struct nvkm_pmu *, u32 reply[2], u32 process,
+ u32 message, u32 data0, u32 data1);
+void nvkm_pmu_pgob(struct nvkm_pmu *, bool enable);
+
+int gt215_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gf100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gf119_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gk104_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
/* interface to MEMX process running on PMU */
struct nvkm_memx;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
index 6662829b6db1..b268b96faece 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
@@ -2,6 +2,28 @@
#define __NVKM_THERM_H__
#include <core/subdev.h>
+#include <subdev/bios.h>
+#include <subdev/bios/therm.h>
+#include <subdev/timer.h>
+
+enum nvkm_therm_thrs_direction {
+ NVKM_THERM_THRS_FALLING = 0,
+ NVKM_THERM_THRS_RISING = 1
+};
+
+enum nvkm_therm_thrs_state {
+ NVKM_THERM_THRS_LOWER = 0,
+ NVKM_THERM_THRS_HIGHER = 1
+};
+
+enum nvkm_therm_thrs {
+ NVKM_THERM_THRS_FANBOOST = 0,
+ NVKM_THERM_THRS_DOWNCLOCK = 1,
+ NVKM_THERM_THRS_CRITICAL = 2,
+ NVKM_THERM_THRS_SHUTDOWN = 3,
+ NVKM_THERM_THRS_NR
+};
+
enum nvkm_therm_fan_mode {
NVKM_THERM_CTRL_NONE = 0,
NVKM_THERM_CTRL_MANUAL = 1,
@@ -24,56 +46,54 @@ enum nvkm_therm_attr_type {
};
struct nvkm_therm {
- struct nvkm_subdev base;
+ const struct nvkm_therm_func *func;
+ struct nvkm_subdev subdev;
- int (*pwm_ctrl)(struct nvkm_therm *, int line, bool);
- int (*pwm_get)(struct nvkm_therm *, int line, u32 *, u32 *);
- int (*pwm_set)(struct nvkm_therm *, int line, u32, u32);
- int (*pwm_clock)(struct nvkm_therm *, int line);
+ /* automatic thermal management */
+ struct nvkm_alarm alarm;
+ spinlock_t lock;
+ struct nvbios_therm_trip_point *last_trip;
+ int mode;
+ int cstate;
+ int suspend;
+
+ /* bios */
+ struct nvbios_therm_sensor bios_sensor;
+
+ /* fan priv */
+ struct nvkm_fan *fan;
+
+ /* alarms priv */
+ struct {
+ spinlock_t alarm_program_lock;
+ struct nvkm_alarm therm_poll_alarm;
+ enum nvkm_therm_thrs_state alarm_state[NVKM_THERM_THRS_NR];
+ } sensor;
+
+ /* what should be done if the card overheats */
+ struct {
+ void (*downclock)(struct nvkm_therm *, bool active);
+ void (*pause)(struct nvkm_therm *, bool active);
+ } emergency;
+
+ /* ic */
+ struct i2c_client *ic;
int (*fan_get)(struct nvkm_therm *);
int (*fan_set)(struct nvkm_therm *, int);
- int (*fan_sense)(struct nvkm_therm *);
-
- int (*temp_get)(struct nvkm_therm *);
int (*attr_get)(struct nvkm_therm *, enum nvkm_therm_attr_type);
int (*attr_set)(struct nvkm_therm *, enum nvkm_therm_attr_type, int);
};
-static inline struct nvkm_therm *
-nvkm_therm(void *obj)
-{
- return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_THERM);
-}
-
-#define nvkm_therm_create(p,e,o,d) \
- nvkm_therm_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_therm_destroy(p) ({ \
- struct nvkm_therm *therm = (p); \
- _nvkm_therm_dtor(nv_object(therm)); \
-})
-#define nvkm_therm_init(p) ({ \
- struct nvkm_therm *therm = (p); \
- _nvkm_therm_init(nv_object(therm)); \
-})
-#define nvkm_therm_fini(p,s) ({ \
- struct nvkm_therm *therm = (p); \
- _nvkm_therm_init(nv_object(therm), (s)); \
-})
-
-int nvkm_therm_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, int, void **);
-void _nvkm_therm_dtor(struct nvkm_object *);
-int _nvkm_therm_init(struct nvkm_object *);
-int _nvkm_therm_fini(struct nvkm_object *, bool);
-
-int nvkm_therm_cstate(struct nvkm_therm *, int, int);
-
-extern struct nvkm_oclass nv40_therm_oclass;
-extern struct nvkm_oclass nv50_therm_oclass;
-extern struct nvkm_oclass g84_therm_oclass;
-extern struct nvkm_oclass gt215_therm_oclass;
-extern struct nvkm_oclass gf110_therm_oclass;
-extern struct nvkm_oclass gm107_therm_oclass;
+int nvkm_therm_temp_get(struct nvkm_therm *);
+int nvkm_therm_fan_sense(struct nvkm_therm *);
+int nvkm_therm_cstate(struct nvkm_therm *, int, int);
+
+int nv40_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int nv50_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int g84_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index 4ad55082ef7a..62ed0880b0e1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -9,53 +9,58 @@ struct nvkm_alarm {
};
static inline void
-nvkm_alarm_init(struct nvkm_alarm *alarm,
- void (*func)(struct nvkm_alarm *))
+nvkm_alarm_init(struct nvkm_alarm *alarm, void (*func)(struct nvkm_alarm *))
{
INIT_LIST_HEAD(&alarm->head);
alarm->func = func;
}
-bool nvkm_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data);
-bool nvkm_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data);
-bool nvkm_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data);
-void nvkm_timer_alarm(void *, u32 nsec, struct nvkm_alarm *);
-void nvkm_timer_alarm_cancel(void *, struct nvkm_alarm *);
-
-#define NV_WAIT_DEFAULT 2000000000ULL
-#define nv_wait(o,a,m,v) \
- nvkm_timer_wait_eq((o), NV_WAIT_DEFAULT, (a), (m), (v))
-#define nv_wait_ne(o,a,m,v) \
- nvkm_timer_wait_ne((o), NV_WAIT_DEFAULT, (a), (m), (v))
-#define nv_wait_cb(o,c,d) \
- nvkm_timer_wait_cb((o), NV_WAIT_DEFAULT, (c), (d))
-
struct nvkm_timer {
- struct nvkm_subdev base;
- u64 (*read)(struct nvkm_timer *);
- void (*alarm)(struct nvkm_timer *, u64 time, struct nvkm_alarm *);
- void (*alarm_cancel)(struct nvkm_timer *, struct nvkm_alarm *);
-};
+ const struct nvkm_timer_func *func;
+ struct nvkm_subdev subdev;
-static inline struct nvkm_timer *
-nvkm_timer(void *obj)
-{
- return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_TIMER);
-}
+ struct list_head alarms;
+ spinlock_t lock;
+};
-#define nvkm_timer_create(p,e,o,d) \
- nvkm_subdev_create_((p), (e), (o), 0, "PTIMER", "timer", \
- sizeof(**d), (void **)d)
-#define nvkm_timer_destroy(p) \
- nvkm_subdev_destroy(&(p)->base)
-#define nvkm_timer_init(p) \
- nvkm_subdev_init(&(p)->base)
-#define nvkm_timer_fini(p,s) \
- nvkm_subdev_fini(&(p)->base, (s))
+u64 nvkm_timer_read(struct nvkm_timer *);
+void nvkm_timer_alarm(struct nvkm_timer *, u32 nsec, struct nvkm_alarm *);
+void nvkm_timer_alarm_cancel(struct nvkm_timer *, struct nvkm_alarm *);
-int nvkm_timer_create_(struct nvkm_object *, struct nvkm_engine *,
- struct nvkm_oclass *, int size, void **);
+/* Delay based on GPU time (ie. PTIMER).
+ *
+ * Will return -ETIMEDOUT unless the loop was terminated with 'break',
+ * where it will return the number of nanoseconds taken instead.
+ *
+ * NVKM_DELAY can be passed for 'cond' to disable the timeout warning,
+ * which is useful for unconditional delay loops.
+ */
+#define NVKM_DELAY _warn = false;
+#define nvkm_nsec(d,n,cond...) ({ \
+ struct nvkm_device *_device = (d); \
+ struct nvkm_timer *_tmr = _device->timer; \
+ u64 _nsecs = (n), _time0 = nvkm_timer_read(_tmr); \
+ s64 _taken = 0; \
+ bool _warn = true; \
+ \
+ do { \
+ cond \
+ } while (_taken = nvkm_timer_read(_tmr) - _time0, _taken < _nsecs); \
+ \
+ if (_taken >= _nsecs) { \
+ if (_warn) { \
+ dev_warn(_device->dev, "timeout at %s:%d/%s()!\n", \
+ __FILE__, __LINE__, __func__); \
+ } \
+ _taken = -ETIMEDOUT; \
+ } \
+ _taken; \
+})
+#define nvkm_usec(d,u,cond...) nvkm_nsec((d), (u) * 1000, ##cond)
+#define nvkm_msec(d,m,cond...) nvkm_usec((d), (m) * 1000, ##cond)
-extern struct nvkm_oclass nv04_timer_oclass;
-extern struct nvkm_oclass gk20a_timer_oclass;
+int nv04_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
+int nv40_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
+int nv41_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
+int gk20a_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
index fee09ad818e4..ce5636fe2a66 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
@@ -1,30 +1,28 @@
#ifndef __NOUVEAU_VGA_H__
#define __NOUVEAU_VGA_H__
-
-#include <core/os.h>
+#include <core/subdev.h>
/* access to various legacy io ports */
-u8 nv_rdport(void *obj, int head, u16 port);
-void nv_wrport(void *obj, int head, u16 port, u8 value);
+u8 nvkm_rdport(struct nvkm_device *, int head, u16 port);
+void nvkm_wrport(struct nvkm_device *, int head, u16 port, u8 value);
/* VGA Sequencer */
-u8 nv_rdvgas(void *obj, int head, u8 index);
-void nv_wrvgas(void *obj, int head, u8 index, u8 value);
+u8 nvkm_rdvgas(struct nvkm_device *, int head, u8 index);
+void nvkm_wrvgas(struct nvkm_device *, int head, u8 index, u8 value);
/* VGA Graphics */
-u8 nv_rdvgag(void *obj, int head, u8 index);
-void nv_wrvgag(void *obj, int head, u8 index, u8 value);
+u8 nvkm_rdvgag(struct nvkm_device *, int head, u8 index);
+void nvkm_wrvgag(struct nvkm_device *, int head, u8 index, u8 value);
/* VGA CRTC */
-u8 nv_rdvgac(void *obj, int head, u8 index);
-void nv_wrvgac(void *obj, int head, u8 index, u8 value);
+u8 nvkm_rdvgac(struct nvkm_device *, int head, u8 index);
+void nvkm_wrvgac(struct nvkm_device *, int head, u8 index, u8 value);
/* VGA indexed port access dispatcher */
-u8 nv_rdvgai(void *obj, int head, u16 port, u8 index);
-void nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value);
-
-bool nv_lockvgac(void *obj, bool lock);
-u8 nv_rdvgaowner(void *obj);
-void nv_wrvgaowner(void *obj, u8);
+u8 nvkm_rdvgai(struct nvkm_device *, int head, u16 port, u8 index);
+void nvkm_wrvgai(struct nvkm_device *, int head, u16 port, u8 index, u8 value);
+bool nvkm_lockvgac(struct nvkm_device *, bool lock);
+u8 nvkm_rdvgaowner(struct nvkm_device *);
+void nvkm_wrvgaowner(struct nvkm_device *, u8);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index e3d7243fbb1d..5c8a3f1196de 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -2,19 +2,9 @@
#define __NVKM_VOLT_H__
#include <core/subdev.h>
-struct nvkm_voltage {
- u32 uv;
- u8 id;
-};
-
struct nvkm_volt {
- struct nvkm_subdev base;
-
- int (*vid_get)(struct nvkm_volt *);
- int (*get)(struct nvkm_volt *);
- int (*vid_set)(struct nvkm_volt *, u8 vid);
- int (*set)(struct nvkm_volt *, u32 uv);
- int (*set_id)(struct nvkm_volt *, u8 id, int condition);
+ const struct nvkm_volt_func *func;
+ struct nvkm_subdev subdev;
u8 vid_mask;
u8 vid_nr;
@@ -24,35 +14,9 @@ struct nvkm_volt {
} vid[256];
};
-static inline struct nvkm_volt *
-nvkm_volt(void *obj)
-{
- return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_VOLT);
-}
-
-#define nvkm_volt_create(p, e, o, d) \
- nvkm_volt_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_volt_destroy(p) ({ \
- struct nvkm_volt *v = (p); \
- _nvkm_volt_dtor(nv_object(v)); \
-})
-#define nvkm_volt_init(p) ({ \
- struct nvkm_volt *v = (p); \
- _nvkm_volt_init(nv_object(v)); \
-})
-#define nvkm_volt_fini(p,s) \
- nvkm_subdev_fini((p), (s))
-
-int nvkm_volt_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, int, void **);
-void _nvkm_volt_dtor(struct nvkm_object *);
-int _nvkm_volt_init(struct nvkm_object *);
-#define _nvkm_volt_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass nv40_volt_oclass;
-extern struct nvkm_oclass gk20a_volt_oclass;
+int nvkm_volt_get(struct nvkm_volt *);
+int nvkm_volt_set_id(struct nvkm_volt *, u8 id, int condition);
-int nvkm_voltgpio_init(struct nvkm_volt *);
-int nvkm_voltgpio_get(struct nvkm_volt *);
-int nvkm_voltgpio_set(struct nvkm_volt *, u8);
+int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index d8b0891a141c..d336c2247d6a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -51,7 +51,7 @@ nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
* device (ie. the one that belongs to the fd it
* opened)
*/
- if (nvif_device_init(&cli->base.base, NULL,
+ if (nvif_device_init(&cli->base.object,
NOUVEAU_ABI16_DEVICE, NV_DEVICE,
&args, sizeof(args),
&abi16->device) == 0)
@@ -69,28 +69,28 @@ nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
int
nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
{
- struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base);
+ struct nouveau_cli *cli = (void *)abi16->device.object.client;
mutex_unlock(&cli->mutex);
return ret;
}
-u16
+s32
nouveau_abi16_swclass(struct nouveau_drm *drm)
{
switch (drm->device.info.family) {
case NV_DEVICE_INFO_V0_TNT:
- return 0x006e;
+ return NVIF_IOCTL_NEW_V0_SW_NV04;
case NV_DEVICE_INFO_V0_CELSIUS:
case NV_DEVICE_INFO_V0_KELVIN:
case NV_DEVICE_INFO_V0_RANKINE:
case NV_DEVICE_INFO_V0_CURIE:
- return 0x016e;
+ return NVIF_IOCTL_NEW_V0_SW_NV10;
case NV_DEVICE_INFO_V0_TESLA:
- return 0x506e;
+ return NVIF_IOCTL_NEW_V0_SW_NV50;
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
case NV_DEVICE_INFO_V0_MAXWELL:
- return 0x906e;
+ return NVIF_IOCTL_NEW_V0_SW_GF100;
}
return 0x0000;
@@ -100,6 +100,7 @@ static void
nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
struct nouveau_abi16_ntfy *ntfy)
{
+ nvif_object_fini(&ntfy->object);
nvkm_mm_free(&chan->heap, &ntfy->node);
list_del(&ntfy->head);
kfree(ntfy);
@@ -132,7 +133,8 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
/* destroy channel object, all children will be killed too */
if (chan->chan) {
- abi16->handles &= ~(1ULL << (chan->chan->object->handle & 0xffff));
+ abi16->handles &= ~(1ULL << (chan->chan->user.handle & 0xffff));
+ nouveau_channel_idle(chan->chan);
nouveau_channel_del(&chan->chan);
}
@@ -143,7 +145,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
void
nouveau_abi16_fini(struct nouveau_abi16 *abi16)
{
- struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base);
+ struct nouveau_cli *cli = (void *)abi16->device.object.client;
struct nouveau_abi16_chan *chan, *temp;
/* cleanup channels */
@@ -164,7 +166,6 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->device;
- struct nvkm_timer *ptimer = nvxx_timer(device);
struct nvkm_gr *gr = nvxx_gr(device);
struct drm_nouveau_getparam *getparam = data;
@@ -173,19 +174,19 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = device->info.chipset;
break;
case NOUVEAU_GETPARAM_PCI_VENDOR:
- if (nv_device_is_pci(nvxx_device(device)))
+ if (nvxx_device(device)->func->pci)
getparam->value = dev->pdev->vendor;
else
getparam->value = 0;
break;
case NOUVEAU_GETPARAM_PCI_DEVICE:
- if (nv_device_is_pci(nvxx_device(device)))
+ if (nvxx_device(device)->func->pci)
getparam->value = dev->pdev->device;
else
getparam->value = 0;
break;
case NOUVEAU_GETPARAM_BUS_TYPE:
- if (!nv_device_is_pci(nvxx_device(device)))
+ if (!nvxx_device(device)->func->pci)
getparam->value = 3;
else
if (drm_pci_device_is_agp(dev))
@@ -206,7 +207,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = 0; /* deprecated */
break;
case NOUVEAU_GETPARAM_PTIMER_TIME:
- getparam->value = ptimer->read(ptimer);
+ getparam->value = nvif_device_time(device);
break;
case NOUVEAU_GETPARAM_HAS_BO_USAGE:
getparam->value = 1;
@@ -215,10 +216,10 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = 1;
break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
- getparam->value = gr->units ? gr->units(gr) : 0;
+ getparam->value = nvkm_gr_units(gr);
break;
default:
- NV_PRINTK(debug, cli, "unknown parameter %lld\n", getparam->param);
+ NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
}
@@ -337,7 +338,7 @@ nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
struct nouveau_abi16_chan *chan;
list_for_each_entry(chan, &abi16->channels, head) {
- if (chan->chan->object->handle == NOUVEAU_ABI16_CHAN(channel))
+ if (chan->chan->user.handle == NOUVEAU_ABI16_CHAN(channel))
return chan;
}
@@ -365,40 +366,91 @@ int
nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_grobj_alloc *init = data;
- struct {
- struct nvif_ioctl_v0 ioctl;
- struct nvif_ioctl_new_v0 new;
- } args = {
- .ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
- .ioctl.type = NVIF_IOCTL_V0_NEW,
- .ioctl.path_nr = 3,
- .ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
- .ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
- .ioctl.path[0] = NOUVEAU_ABI16_CHAN(init->channel),
- .new.route = NVDRM_OBJECT_ABI16,
- .new.handle = init->handle,
- .new.oclass = init->class,
- };
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_abi16_chan *chan;
+ struct nouveau_abi16_ntfy *ntfy;
struct nvif_client *client;
- int ret;
+ struct nvif_sclass *sclass;
+ s32 oclass = 0;
+ int ret, i;
if (unlikely(!abi16))
return -ENOMEM;
if (init->handle == ~0)
return nouveau_abi16_put(abi16, -EINVAL);
- client = nvif_client(nvif_object(&abi16->device));
+ client = abi16->device.object.client;
+
+ chan = nouveau_abi16_chan(abi16, init->channel);
+ if (!chan)
+ return nouveau_abi16_put(abi16, -ENOENT);
+
+ ret = nvif_object_sclass_get(&chan->chan->user, &sclass);
+ if (ret < 0)
+ return nouveau_abi16_put(abi16, ret);
- /* compatibility with userspace that assumes 506e for all chipsets */
- if (init->class == 0x506e) {
- init->class = nouveau_abi16_swclass(drm);
- if (init->class == 0x906e)
- return nouveau_abi16_put(abi16, 0);
+ if ((init->class & 0x00ff) == 0x006e) {
+ /* nvsw: compatibility with older 0x*6e class identifier */
+ for (i = 0; !oclass && i < ret; i++) {
+ switch (sclass[i].oclass) {
+ case NVIF_IOCTL_NEW_V0_SW_NV04:
+ case NVIF_IOCTL_NEW_V0_SW_NV10:
+ case NVIF_IOCTL_NEW_V0_SW_NV50:
+ case NVIF_IOCTL_NEW_V0_SW_GF100:
+ oclass = sclass[i].oclass;
+ break;
+ default:
+ break;
+ }
+ }
+ } else
+ if ((init->class & 0x00ff) == 0x00b1) {
+ /* msvld: compatibility with incorrect version exposure */
+ for (i = 0; i < ret; i++) {
+ if ((sclass[i].oclass & 0x00ff) == 0x00b1) {
+ oclass = sclass[i].oclass;
+ break;
+ }
+ }
+ } else
+ if ((init->class & 0x00ff) == 0x00b2) { /* mspdec */
+ /* mspdec: compatibility with incorrect version exposure */
+ for (i = 0; i < ret; i++) {
+ if ((sclass[i].oclass & 0x00ff) == 0x00b2) {
+ oclass = sclass[i].oclass;
+ break;
+ }
+ }
+ } else
+ if ((init->class & 0x00ff) == 0x00b3) { /* msppp */
+ /* msppp: compatibility with incorrect version exposure */
+ for (i = 0; i < ret; i++) {
+ if ((sclass[i].oclass & 0x00ff) == 0x00b3) {
+ oclass = sclass[i].oclass;
+ break;
+ }
+ }
+ } else {
+ oclass = init->class;
}
- ret = nvif_client_ioctl(client, &args, sizeof(args));
+ nvif_object_sclass_put(&sclass);
+ if (!oclass)
+ return nouveau_abi16_put(abi16, -EINVAL);
+
+ ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
+ if (!ntfy)
+ return nouveau_abi16_put(abi16, -ENOMEM);
+
+ list_add(&ntfy->head, &chan->notifiers);
+
+ client->route = NVDRM_OBJECT_ABI16;
+ ret = nvif_object_init(&chan->chan->user, init->handle, oclass,
+ NULL, 0, &ntfy->object);
+ client->route = NVDRM_OBJECT_NVIF;
+
+ if (ret)
+ nouveau_abi16_ntfy_fini(chan, ntfy);
return nouveau_abi16_put(abi16, ret);
}
@@ -406,27 +458,13 @@ int
nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_notifierobj_alloc *info = data;
- struct {
- struct nvif_ioctl_v0 ioctl;
- struct nvif_ioctl_new_v0 new;
- struct nv_dma_v0 ctxdma;
- } args = {
- .ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
- .ioctl.type = NVIF_IOCTL_V0_NEW,
- .ioctl.path_nr = 3,
- .ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
- .ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
- .ioctl.path[0] = NOUVEAU_ABI16_CHAN(info->channel),
- .new.route = NVDRM_OBJECT_ABI16,
- .new.handle = info->handle,
- .new.oclass = NV_DMA_IN_MEMORY,
- };
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
struct nouveau_abi16_chan *chan;
struct nouveau_abi16_ntfy *ntfy;
struct nvif_device *device = &abi16->device;
struct nvif_client *client;
+ struct nv_dma_v0 args = {};
int ret;
if (unlikely(!abi16))
@@ -435,7 +473,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
/* completely unnecessary for these chipsets... */
if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
return nouveau_abi16_put(abi16, -EINVAL);
- client = nvif_client(nvif_object(&abi16->device));
+ client = abi16->device.object.client;
chan = nouveau_abi16_chan(abi16, info->channel);
if (!chan)
@@ -446,41 +484,43 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
return nouveau_abi16_put(abi16, -ENOMEM);
list_add(&ntfy->head, &chan->notifiers);
- ntfy->handle = info->handle;
ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1,
&ntfy->node);
if (ret)
goto done;
- args.ctxdma.start = ntfy->node->offset;
- args.ctxdma.limit = ntfy->node->offset + ntfy->node->length - 1;
+ args.start = ntfy->node->offset;
+ args.limit = ntfy->node->offset + ntfy->node->length - 1;
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
- args.ctxdma.target = NV_DMA_V0_TARGET_VM;
- args.ctxdma.access = NV_DMA_V0_ACCESS_VM;
- args.ctxdma.start += chan->ntfy_vma.offset;
- args.ctxdma.limit += chan->ntfy_vma.offset;
+ args.target = NV_DMA_V0_TARGET_VM;
+ args.access = NV_DMA_V0_ACCESS_VM;
+ args.start += chan->ntfy_vma.offset;
+ args.limit += chan->ntfy_vma.offset;
} else
- if (drm->agp.stat == ENABLED) {
- args.ctxdma.target = NV_DMA_V0_TARGET_AGP;
- args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
- args.ctxdma.start += drm->agp.base + chan->ntfy->bo.offset;
- args.ctxdma.limit += drm->agp.base + chan->ntfy->bo.offset;
- client->super = true;
+ if (drm->agp.bridge) {
+ args.target = NV_DMA_V0_TARGET_AGP;
+ args.access = NV_DMA_V0_ACCESS_RDWR;
+ args.start += drm->agp.base + chan->ntfy->bo.offset;
+ args.limit += drm->agp.base + chan->ntfy->bo.offset;
} else {
- args.ctxdma.target = NV_DMA_V0_TARGET_VM;
- args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
- args.ctxdma.start += chan->ntfy->bo.offset;
- args.ctxdma.limit += chan->ntfy->bo.offset;
+ args.target = NV_DMA_V0_TARGET_VM;
+ args.access = NV_DMA_V0_ACCESS_RDWR;
+ args.start += chan->ntfy->bo.offset;
+ args.limit += chan->ntfy->bo.offset;
}
- ret = nvif_client_ioctl(client, &args, sizeof(args));
+ client->route = NVDRM_OBJECT_ABI16;
+ client->super = true;
+ ret = nvif_object_init(&chan->chan->user, info->handle,
+ NV_DMA_IN_MEMORY, &args, sizeof(args),
+ &ntfy->object);
client->super = false;
+ client->route = NVDRM_OBJECT_NVIF;
if (ret)
goto done;
info->offset = ntfy->node->offset;
-
done:
if (ret)
nouveau_abi16_ntfy_fini(chan, ntfy);
@@ -491,47 +531,28 @@ int
nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_gpuobj_free *fini = data;
- struct {
- struct nvif_ioctl_v0 ioctl;
- struct nvif_ioctl_del del;
- } args = {
- .ioctl.owner = NVDRM_OBJECT_ABI16,
- .ioctl.type = NVIF_IOCTL_V0_DEL,
- .ioctl.path_nr = 4,
- .ioctl.path[3] = NOUVEAU_ABI16_CLIENT,
- .ioctl.path[2] = NOUVEAU_ABI16_DEVICE,
- .ioctl.path[1] = NOUVEAU_ABI16_CHAN(fini->channel),
- .ioctl.path[0] = fini->handle,
- };
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
struct nouveau_abi16_chan *chan;
struct nouveau_abi16_ntfy *ntfy;
- struct nvif_client *client;
- int ret;
+ int ret = -ENOENT;
if (unlikely(!abi16))
return -ENOMEM;
chan = nouveau_abi16_chan(abi16, fini->channel);
if (!chan)
- return nouveau_abi16_put(abi16, -ENOENT);
- client = nvif_client(nvif_object(&abi16->device));
+ return nouveau_abi16_put(abi16, -EINVAL);
/* synchronize with the user channel and destroy the gpu object */
nouveau_channel_idle(chan->chan);
- ret = nvif_client_ioctl(client, &args, sizeof(args));
- if (ret)
- return nouveau_abi16_put(abi16, ret);
-
- /* cleanup extra state if this object was a notifier */
list_for_each_entry(ntfy, &chan->notifiers, head) {
- if (ntfy->handle == fini->handle) {
- nvkm_mm_free(&chan->heap, &ntfy->node);
- list_del(&ntfy->head);
+ if (ntfy->object.handle == fini->handle) {
+ nouveau_abi16_ntfy_fini(chan, ntfy);
+ ret = 0;
break;
}
}
- return nouveau_abi16_put(abi16, 0);
+ return nouveau_abi16_put(abi16, ret);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 86eb1caf4957..6584557afa40 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -13,9 +13,9 @@ int nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS);
int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS);
struct nouveau_abi16_ntfy {
+ struct nvif_object object;
struct list_head head;
struct nvkm_mm_node *node;
- u32 handle;
};
struct nouveau_abi16_chan {
@@ -37,7 +37,7 @@ struct nouveau_drm;
struct nouveau_abi16 *nouveau_abi16_get(struct drm_file *, struct drm_device *);
int nouveau_abi16_put(struct nouveau_abi16 *, int);
void nouveau_abi16_fini(struct nouveau_abi16 *);
-u16 nouveau_abi16_swclass(struct nouveau_drm *);
+s32 nouveau_abi16_swclass(struct nouveau_drm *);
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 622424692b3b..df2d9818aba3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -372,12 +372,12 @@ static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
return len;
}
-bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
+bool nouveau_acpi_rom_supported(struct device *dev)
{
acpi_status status;
acpi_handle dhandle, rom_handle;
- dhandle = ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(dev);
if (!dhandle)
return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index 74acf0f87785..2f03653aff86 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -10,7 +10,7 @@ void nouveau_register_dsm_handler(void);
void nouveau_unregister_dsm_handler(void);
void nouveau_switcheroo_optimus_dsm(void);
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
+bool nouveau_acpi_rom_supported(struct device *);
void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
#else
static inline bool nouveau_is_optimus(void) { return false; };
@@ -18,7 +18,7 @@ static inline bool nouveau_is_v1_dsm(void) { return false; };
static inline void nouveau_register_dsm_handler(void) {}
static inline void nouveau_unregister_dsm_handler(void) {}
static inline void nouveau_switcheroo_optimus_dsm(void) {}
-static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
+static inline bool nouveau_acpi_rom_supported(struct device *dev) { return false; }
static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; }
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
deleted file mode 100644
index 0b5970955604..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_agp.c
+++ /dev/null
@@ -1,195 +0,0 @@
-#include <linux/module.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_agp.h"
-#include "nouveau_reg.h"
-
-#if __OS_HAS_AGP
-MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
-static int nouveau_agpmode = -1;
-module_param_named(agpmode, nouveau_agpmode, int, 0400);
-
-struct nouveau_agpmode_quirk {
- u16 hostbridge_vendor;
- u16 hostbridge_device;
- u16 chip_vendor;
- u16 chip_device;
- int mode;
-};
-
-static struct nouveau_agpmode_quirk nouveau_agpmode_quirk_list[] = {
- /* VIA Apollo PRO133x / GeForce FX 5600 Ultra, max agpmode 2, fdo #20341 */
- { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 },
-
- {},
-};
-
-static unsigned long
-get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
-{
- struct nvif_device *device = &drm->device;
- struct nouveau_agpmode_quirk *quirk = nouveau_agpmode_quirk_list;
- int agpmode = nouveau_agpmode;
- unsigned long mode = info->mode;
-
- /*
- * FW seems to be broken on nv18, it makes the card lock up
- * randomly.
- */
- if (device->info.chipset == 0x18)
- mode &= ~PCI_AGP_COMMAND_FW;
-
- /*
- * Go through the quirks list and adjust the agpmode accordingly.
- */
- while (agpmode == -1 && quirk->hostbridge_vendor) {
- if (info->id_vendor == quirk->hostbridge_vendor &&
- info->id_device == quirk->hostbridge_device &&
- nvxx_device(device)->pdev->vendor == quirk->chip_vendor &&
- nvxx_device(device)->pdev->device == quirk->chip_device) {
- agpmode = quirk->mode;
- NV_INFO(drm, "Forcing agp mode to %dX. Use agpmode to override.\n",
- agpmode);
- break;
- }
- ++quirk;
- }
-
- /*
- * AGP mode set in the command line.
- */
- if (agpmode > 0) {
- bool agpv3 = mode & 0x8;
- int rate = agpv3 ? agpmode / 4 : agpmode;
-
- mode = (mode & ~0x7) | (rate & 0x7);
- }
-
- return mode;
-}
-
-static bool
-nouveau_agp_enabled(struct nouveau_drm *drm)
-{
- struct drm_device *dev = drm->dev;
-
- if (!dev->pdev || !drm_pci_device_is_agp(dev) || !dev->agp)
- return false;
-
- if (drm->agp.stat == UNKNOWN) {
- if (!nouveau_agpmode)
- return false;
-#ifdef __powerpc__
- /* Disable AGP by default on all PowerPC machines for
- * now -- At least some UniNorth-2 AGP bridges are
- * known to be broken: DMA from the host to the card
- * works just fine, but writeback from the card to the
- * host goes straight to memory untranslated bypassing
- * the GATT somehow, making them quite painful to deal
- * with...
- */
- if (nouveau_agpmode == -1)
- return false;
-#endif
- return true;
- }
-
- return (drm->agp.stat == ENABLED);
-}
-#endif
-
-void
-nouveau_agp_reset(struct nouveau_drm *drm)
-{
-#if __OS_HAS_AGP
- struct nvif_device *device = &drm->device;
- struct drm_device *dev = drm->dev;
- u32 save[2];
- int ret;
-
- if (!nouveau_agp_enabled(drm))
- return;
-
- /* First of all, disable fast writes, otherwise if it's
- * already enabled in the AGP bridge and we disable the card's
- * AGP controller we might be locking ourselves out of it. */
- if ((nvif_rd32(device, NV04_PBUS_PCI_NV_19) |
- dev->agp->mode) & PCI_AGP_COMMAND_FW) {
- struct drm_agp_info info;
- struct drm_agp_mode mode;
-
- ret = drm_agp_info(dev, &info);
- if (ret)
- return;
-
- mode.mode = get_agp_mode(drm, &info);
- mode.mode &= ~PCI_AGP_COMMAND_FW;
-
- ret = drm_agp_enable(dev, mode);
- if (ret)
- return;
- }
-
-
- /* clear busmaster bit, and disable AGP */
- save[0] = nvif_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000);
- nvif_wr32(device, NV04_PBUS_PCI_NV_19, 0);
-
- /* reset PGRAPH, PFIFO and PTIMER */
- save[1] = nvif_mask(device, 0x000200, 0x00011100, 0x00000000);
- nvif_mask(device, 0x000200, 0x00011100, save[1]);
-
- /* and restore bustmaster bit (gives effect of resetting AGP) */
- nvif_wr32(device, NV04_PBUS_PCI_NV_1, save[0]);
-#endif
-}
-
-void
-nouveau_agp_init(struct nouveau_drm *drm)
-{
-#if __OS_HAS_AGP
- struct drm_device *dev = drm->dev;
- struct drm_agp_info info;
- struct drm_agp_mode mode;
- int ret;
-
- if (!nouveau_agp_enabled(drm))
- return;
- drm->agp.stat = DISABLE;
-
- ret = drm_agp_acquire(dev);
- if (ret) {
- NV_ERROR(drm, "unable to acquire AGP: %d\n", ret);
- return;
- }
-
- ret = drm_agp_info(dev, &info);
- if (ret) {
- NV_ERROR(drm, "unable to get AGP info: %d\n", ret);
- return;
- }
-
- /* see agp.h for the AGPSTAT_* modes available */
- mode.mode = get_agp_mode(drm, &info);
-
- ret = drm_agp_enable(dev, mode);
- if (ret) {
- NV_ERROR(drm, "unable to enable AGP: %d\n", ret);
- return;
- }
-
- drm->agp.stat = ENABLED;
- drm->agp.base = info.aperture_base;
- drm->agp.size = info.aperture_size;
-#endif
-}
-
-void
-nouveau_agp_fini(struct nouveau_drm *drm)
-{
-#if __OS_HAS_AGP
- struct drm_device *dev = drm->dev;
- if (dev->agp && dev->agp->acquired)
- drm_agp_release(dev);
-#endif
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.h b/drivers/gpu/drm/nouveau/nouveau_agp.h
deleted file mode 100644
index b55c08652963..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_agp.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __NOUVEAU_AGP_H__
-#define __NOUVEAU_AGP_H__
-
-struct nouveau_drm;
-
-void nouveau_agp_reset(struct nouveau_drm *);
-void nouveau_agp_init(struct nouveau_drm *);
-void nouveau_agp_fini(struct nouveau_drm *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index e566c5b53651..89eb46040b13 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -40,7 +40,7 @@ static int
nv40_get_intensity(struct backlight_device *bd)
{
struct nouveau_drm *drm = bl_get_data(bd);
- struct nvif_device *device = &drm->device;
+ struct nvif_object *device = &drm->device.object;
int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) &
NV40_PMC_BACKLIGHT_MASK) >> 16;
@@ -51,7 +51,7 @@ static int
nv40_set_intensity(struct backlight_device *bd)
{
struct nouveau_drm *drm = bl_get_data(bd);
- struct nvif_device *device = &drm->device;
+ struct nvif_object *device = &drm->device.object;
int val = bd->props.brightness;
int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT);
@@ -71,7 +71,7 @@ static int
nv40_backlight_init(struct drm_connector *connector)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nvif_device *device = &drm->device;
+ struct nvif_object *device = &drm->device.object;
struct backlight_properties props;
struct backlight_device *bd;
@@ -97,7 +97,7 @@ nv50_get_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nvif_device *device = &drm->device;
+ struct nvif_object *device = &drm->device.object;
int or = nv_encoder->or;
u32 div = 1025;
u32 val;
@@ -112,7 +112,7 @@ nv50_set_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nvif_device *device = &drm->device;
+ struct nvif_object *device = &drm->device.object;
int or = nv_encoder->or;
u32 div = 1025;
u32 val = (bd->props.brightness * div) / 100;
@@ -133,7 +133,7 @@ nva3_get_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nvif_device *device = &drm->device;
+ struct nvif_object *device = &drm->device.object;
int or = nv_encoder->or;
u32 div, val;
@@ -151,7 +151,7 @@ nva3_set_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nvif_device *device = &drm->device;
+ struct nvif_object *device = &drm->device.object;
int or = nv_encoder->or;
u32 div, val;
@@ -177,7 +177,7 @@ static int
nv50_backlight_init(struct drm_connector *connector)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nvif_device *device = &drm->device;
+ struct nvif_object *device = &drm->device.object;
struct nouveau_encoder *nv_encoder;
struct backlight_properties props;
struct backlight_device *bd;
@@ -193,9 +193,9 @@ nv50_backlight_init(struct drm_connector *connector)
if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
return 0;
- if (device->info.chipset <= 0xa0 ||
- device->info.chipset == 0xaa ||
- device->info.chipset == 0xac)
+ if (drm->device.info.chipset <= 0xa0 ||
+ drm->device.info.chipset == 0xaa ||
+ drm->device.info.chipset == 0xac)
ops = &nv50_bl_ops;
else
ops = &nva3_bl_ops;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0190b69bbe25..4dca65a63b92 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -215,7 +215,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head
*/
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_device *device = &drm->device;
+ struct nvif_object *device = &drm->device.object;
struct nvbios *bios = &drm->vbios;
uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
uint32_t sel_clk_binding, sel_clk;
@@ -318,7 +318,8 @@ static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct n
static int
get_fp_strap(struct drm_device *dev, struct nvbios *bios)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvif_object *device = &drm->device.object;
/*
* The fp strap is normally dictated by the "User Strap" in
@@ -332,7 +333,7 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios)
if (bios->major_version < 5 && bios->data[0x48] & 0x4)
return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
- if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
else
return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
@@ -634,7 +635,7 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head,
*/
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_device *device = &drm->device;
+ struct nvif_object *device = &drm->device.object;
struct nvbios *bios = &drm->vbios;
int cv = bios->chip_version;
uint16_t clktable = 0, scriptptr;
@@ -1481,22 +1482,20 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->dpconf.link_bw = 540000;
break;
}
- entry->dpconf.link_nr = (conf & 0x0f000000) >> 24;
- if (dcb->version < 0x41) {
- switch (entry->dpconf.link_nr) {
- case 0xf:
- entry->dpconf.link_nr = 4;
- break;
- case 0x3:
- entry->dpconf.link_nr = 2;
- break;
- default:
- entry->dpconf.link_nr = 1;
- break;
- }
+ switch ((conf & 0x0f000000) >> 24) {
+ case 0xf:
+ case 0x4:
+ entry->dpconf.link_nr = 4;
+ break;
+ case 0x3:
+ case 0x2:
+ entry->dpconf.link_nr = 2;
+ break;
+ default:
+ entry->dpconf.link_nr = 1;
+ break;
}
link = entry->dpconf.sor.link;
- entry->i2c_index += NV_I2C_AUX(0);
break;
case DCB_OUTPUT_TMDS:
if (dcb->version >= 0x40) {
@@ -1892,11 +1891,12 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
idx = -1;
while ((conn = olddcb_conn(dev, ++idx))) {
if (conn[0] != 0xff) {
- NV_INFO(drm, "DCB conn %02d: ", idx);
if (olddcb_conntab(dev)[3] < 4)
- pr_cont("%04x\n", ROM16(conn[0]));
+ NV_INFO(drm, "DCB conn %02d: %04x\n",
+ idx, ROM16(conn[0]));
else
- pr_cont("%08x\n", ROM32(conn[0]));
+ NV_INFO(drm, "DCB conn %02d: %08x\n",
+ idx, ROM32(conn[0]));
}
}
dcb_fake_connectors(bios);
@@ -1915,7 +1915,7 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio
*/
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_device *device = &drm->device;
+ struct nvif_object *device = &drm->device.object;
uint8_t bytes_to_write;
uint16_t hwsq_entry_offset;
int i;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 6edcce1658b7..15057b39491c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -48,24 +48,19 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
{
struct nouveau_drm *drm = nouveau_drm(dev);
int i = reg - drm->tile.reg;
- struct nvkm_fb *pfb = nvxx_fb(&drm->device);
- struct nvkm_fb_tile *tile = &pfb->tile.region[i];
- struct nvkm_engine *engine;
+ struct nvkm_device *device = nvxx_device(&drm->device);
+ struct nvkm_fb *fb = device->fb;
+ struct nvkm_fb_tile *tile = &fb->tile.region[i];
nouveau_fence_unref(&reg->fence);
if (tile->pitch)
- pfb->tile.fini(pfb, i, tile);
+ nvkm_fb_tile_fini(fb, i, tile);
if (pitch)
- pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
-
- pfb->tile.prog(pfb, i, tile);
+ nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
- if ((engine = nvkm_engine(pfb, NVDEV_ENGINE_GR)))
- engine->tile_prog(engine, i);
- if ((engine = nvkm_engine(pfb, NVDEV_ENGINE_MPEG)))
- engine->tile_prog(engine, i);
+ nvkm_fb_tile_prog(fb, i, tile);
}
static struct nouveau_drm_tile *
@@ -105,18 +100,18 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
u32 size, u32 pitch, u32 flags)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+ struct nvkm_fb *fb = nvxx_fb(&drm->device);
struct nouveau_drm_tile *tile, *found = NULL;
int i;
- for (i = 0; i < pfb->tile.regions; i++) {
+ for (i = 0; i < fb->tile.regions; i++) {
tile = nv10_bo_get_tile_region(dev, i);
if (pitch && !found) {
found = tile;
continue;
- } else if (tile && pfb->tile.region[i].pitch) {
+ } else if (tile && fb->tile.region[i].pitch) {
/* Kill an unused tile region. */
nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
}
@@ -214,7 +209,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &drm->ttm.bdev;
- if (!nv_device_is_cpu_coherent(nvxx_device(&drm->device)))
+ if (!nvxx_device(&drm->device)->func->cpu_coherent)
nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
nvbo->page_shift = 12;
@@ -471,8 +466,8 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
return;
for (i = 0; i < ttm_dma->ttm.num_pages; i++)
- dma_sync_single_for_device(nv_device_base(device),
- ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE);
+ dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
+ PAGE_SIZE, DMA_TO_DEVICE);
}
void
@@ -491,8 +486,8 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
return;
for (i = 0; i < ttm_dma->ttm.num_pages; i++)
- dma_sync_single_for_cpu(nv_device_base(device),
- ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
+ PAGE_SIZE, DMA_FROM_DEVICE);
}
int
@@ -581,10 +576,9 @@ nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
{
#if __OS_HAS_AGP
struct nouveau_drm *drm = nouveau_bdev(bdev);
- struct drm_device *dev = drm->dev;
- if (drm->agp.stat == ENABLED) {
- return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
+ if (drm->agp.bridge) {
+ return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
page_flags, dummy_read);
}
#endif
@@ -636,12 +630,12 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
man->func = &nouveau_gart_manager;
else
- if (drm->agp.stat != ENABLED)
+ if (!drm->agp.bridge)
man->func = &nv04_gart_manager;
else
man->func = &ttm_bo_manager_func;
- if (drm->agp.stat == ENABLED) {
+ if (drm->agp.bridge) {
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
@@ -1064,7 +1058,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = drm->ttm.chan;
- struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
+ struct nouveau_cli *cli = (void *)chan->user.client;
struct nouveau_fence *fence;
int ret;
@@ -1104,7 +1098,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
static const struct {
const char *name;
int engine;
- u32 oclass;
+ s32 oclass;
int (*exec)(struct nouveau_channel *,
struct ttm_buffer_object *,
struct ttm_mem_reg *, struct ttm_mem_reg *);
@@ -1137,7 +1131,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
if (chan == NULL)
continue;
- ret = nvif_object_init(chan->object, NULL,
+ ret = nvif_object_init(&chan->user,
mthd->oclass | (mthd->engine << 16),
mthd->oclass, NULL, 0,
&drm->ttm.copy);
@@ -1356,6 +1350,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct nouveau_drm *drm = nouveau_bdev(bdev);
+ struct nvkm_device *device = nvxx_device(&drm->device);
struct nvkm_mem *node = mem->mm_node;
int ret;
@@ -1372,10 +1367,10 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
return 0;
case TTM_PL_TT:
#if __OS_HAS_AGP
- if (drm->agp.stat == ENABLED) {
+ if (drm->agp.bridge) {
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = drm->agp.base;
- mem->bus.is_iomem = !drm->dev->agp->cant_use_aperture;
+ mem->bus.is_iomem = !drm->agp.cma;
}
#endif
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
@@ -1384,16 +1379,20 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
/* fallthrough, tiled memory */
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = nv_device_resource_start(nvxx_device(&drm->device), 1);
+ mem->bus.base = device->func->resource_addr(device, 1);
mem->bus.is_iomem = true;
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
struct nvkm_bar *bar = nvxx_bar(&drm->device);
+ int page_shift = 12;
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
+ page_shift = node->page_shift;
- ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
- &node->bar_vma);
+ ret = nvkm_bar_umap(bar, node->size << 12, page_shift,
+ &node->bar_vma);
if (ret)
return ret;
+ nvkm_vm_map(&node->bar_vma, node);
mem->bus.offset = node->bar_vma.offset;
}
break;
@@ -1406,14 +1405,13 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
- struct nouveau_drm *drm = nouveau_bdev(bdev);
- struct nvkm_bar *bar = nvxx_bar(&drm->device);
struct nvkm_mem *node = mem->mm_node;
if (!node->bar_vma.node)
return;
- bar->unmap(bar, &node->bar_vma);
+ nvkm_vm_unmap(&node->bar_vma);
+ nvkm_vm_put(&node->bar_vma);
}
static int
@@ -1421,8 +1419,8 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nvif_device *device = &drm->device;
- u32 mappable = nv_device_resource_len(nvxx_device(device), 1) >> PAGE_SHIFT;
+ struct nvkm_device *device = nvxx_device(&drm->device);
+ u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
int i, ret;
/* as long as the bo isn't in vram, and isn't tiled, we've got
@@ -1488,18 +1486,18 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
drm = nouveau_bdev(ttm->bdev);
device = nvxx_device(&drm->device);
dev = drm->dev;
- pdev = nv_device_base(device);
+ pdev = device->dev;
/*
* Objects matching this condition have been marked as force_coherent,
* so use the DMA API for them.
*/
- if (!nv_device_is_cpu_coherent(device) &&
+ if (!nvxx_device(&drm->device)->func->cpu_coherent &&
ttm->caching_state == tt_uncached)
return ttm_dma_populate(ttm_dma, dev->dev);
#if __OS_HAS_AGP
- if (drm->agp.stat == ENABLED) {
+ if (drm->agp.bridge) {
return ttm_agp_tt_populate(ttm);
}
#endif
@@ -1553,20 +1551,20 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
drm = nouveau_bdev(ttm->bdev);
device = nvxx_device(&drm->device);
dev = drm->dev;
- pdev = nv_device_base(device);
+ pdev = device->dev;
/*
* Objects matching this condition have been marked as force_coherent,
* so use the DMA API for them.
*/
- if (!nv_device_is_cpu_coherent(device) &&
+ if (!nvxx_device(&drm->device)->func->cpu_coherent &&
ttm->caching_state == tt_uncached) {
ttm_dma_unpopulate(ttm_dma, dev->dev);
return;
}
#if __OS_HAS_AGP
- if (drm->agp.stat == ENABLED) {
+ if (drm->agp.bridge) {
ttm_agp_tt_unpopulate(ttm);
return;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 0589babc506e..ff5e59db49db 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -24,6 +24,7 @@
#include <nvif/os.h>
#include <nvif/class.h>
+#include <nvif/ioctl.h>
/*XXX*/
#include <core/client.h>
@@ -42,20 +43,26 @@ module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
int
nouveau_channel_idle(struct nouveau_channel *chan)
{
- struct nouveau_cli *cli = (void *)nvif_client(chan->object);
- struct nouveau_fence *fence = NULL;
- int ret;
+ if (likely(chan && chan->fence)) {
+ struct nouveau_cli *cli = (void *)chan->user.client;
+ struct nouveau_fence *fence = NULL;
+ int ret;
+
+ ret = nouveau_fence_new(chan, false, &fence);
+ if (!ret) {
+ ret = nouveau_fence_wait(fence, false, false);
+ nouveau_fence_unref(&fence);
+ }
- ret = nouveau_fence_new(chan, false, &fence);
- if (!ret) {
- ret = nouveau_fence_wait(fence, false, false);
- nouveau_fence_unref(&fence);
+ if (ret) {
+ NV_PRINTK(err, cli, "failed to idle channel "
+ "0x%08x [%s]\n",
+ chan->user.handle,
+ nvxx_client(&cli->base)->name);
+ return ret;
+ }
}
-
- if (ret)
- NV_PRINTK(error, cli, "failed to idle channel 0x%08x [%s]\n",
- chan->object->handle, nvxx_client(&cli->base)->name);
- return ret;
+ return 0;
}
void
@@ -63,21 +70,18 @@ nouveau_channel_del(struct nouveau_channel **pchan)
{
struct nouveau_channel *chan = *pchan;
if (chan) {
- if (chan->fence) {
- nouveau_channel_idle(chan);
+ if (chan->fence)
nouveau_fence(chan->drm)->context_del(chan);
- }
nvif_object_fini(&chan->nvsw);
nvif_object_fini(&chan->gart);
nvif_object_fini(&chan->vram);
- nvif_object_ref(NULL, &chan->object);
+ nvif_object_fini(&chan->user);
nvif_object_fini(&chan->push.ctxdma);
nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
nouveau_bo_unmap(chan->push.buffer);
if (chan->push.buffer && chan->push.buffer->pin_refcnt)
nouveau_bo_unpin(chan->push.buffer);
nouveau_bo_ref(NULL, &chan->push.buffer);
- nvif_device_ref(NULL, &chan->device);
kfree(chan);
}
*pchan = NULL;
@@ -87,7 +91,7 @@ static int
nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
u32 handle, u32 size, struct nouveau_channel **pchan)
{
- struct nouveau_cli *cli = (void *)nvif_client(&device->base);
+ struct nouveau_cli *cli = (void *)device->object.client;
struct nvkm_mmu *mmu = nvxx_mmu(device);
struct nv_dma_v0 args = {};
struct nouveau_channel *chan;
@@ -98,7 +102,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
if (!chan)
return -ENOMEM;
- nvif_device_ref(device, &chan->device);
+ chan->device = device;
chan->drm = drm;
/* allocate memory for dma push buffer */
@@ -146,7 +150,8 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
*/
args.target = NV_DMA_V0_TARGET_PCI;
args.access = NV_DMA_V0_ACCESS_RDWR;
- args.start = nv_device_resource_start(nvxx_device(device), 1);
+ args.start = nvxx_device(device)->func->
+ resource_addr(nvxx_device(device), 1);
args.limit = args.start + device->info.ram_user - 1;
} else {
args.target = NV_DMA_V0_TARGET_VRAM;
@@ -155,7 +160,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
args.limit = device->info.ram_user - 1;
}
} else {
- if (chan->drm->agp.stat == ENABLED) {
+ if (chan->drm->agp.bridge) {
args.target = NV_DMA_V0_TARGET_AGP;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = chan->drm->agp.base;
@@ -169,7 +174,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
}
}
- ret = nvif_object_init(nvif_object(device), NULL, NVDRM_PUSH |
+ ret = nvif_object_init(&device->object, NVDRM_PUSH |
(handle & 0xffff), NV_DMA_FROM_MEMORY,
&args, sizeof(args), &chan->push.ctxdma);
if (ret) {
@@ -193,8 +198,9 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
const u16 *oclass = oclasses;
union {
struct nv50_channel_gpfifo_v0 nv50;
+ struct fermi_channel_gpfifo_v0 fermi;
struct kepler_channel_gpfifo_a_v0 kepler;
- } args, *retn;
+ } args;
struct nouveau_channel *chan;
u32 size;
int ret;
@@ -210,26 +216,36 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) {
args.kepler.version = 0;
args.kepler.engine = engine;
- args.kepler.pushbuf = chan->push.ctxdma.handle;
args.kepler.ilength = 0x02000;
args.kepler.ioffset = 0x10000 + chan->push.vma.offset;
+ args.kepler.vm = 0;
size = sizeof(args.kepler);
+ } else
+ if (oclass[0] >= FERMI_CHANNEL_GPFIFO) {
+ args.fermi.version = 0;
+ args.fermi.ilength = 0x02000;
+ args.fermi.ioffset = 0x10000 + chan->push.vma.offset;
+ args.fermi.vm = 0;
+ size = sizeof(args.fermi);
} else {
args.nv50.version = 0;
- args.nv50.pushbuf = chan->push.ctxdma.handle;
args.nv50.ilength = 0x02000;
args.nv50.ioffset = 0x10000 + chan->push.vma.offset;
+ args.nv50.pushbuf = nvif_handle(&chan->push.ctxdma);
+ args.nv50.vm = 0;
size = sizeof(args.nv50);
}
- ret = nvif_object_new(nvif_object(device), handle, *oclass++,
- &args, size, &chan->object);
+ ret = nvif_object_init(&device->object, handle, *oclass++,
+ &args, size, &chan->user);
if (ret == 0) {
- retn = chan->object->data;
- if (chan->object->oclass >= KEPLER_CHANNEL_GPFIFO_A)
- chan->chid = retn->kepler.chid;
+ if (chan->user.oclass >= KEPLER_CHANNEL_GPFIFO_A)
+ chan->chid = args.kepler.chid;
+ else
+ if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO)
+ chan->chid = args.fermi.chid;
else
- chan->chid = retn->nv50.chid;
+ chan->chid = args.nv50.chid;
return ret;
}
} while (*oclass);
@@ -248,7 +264,7 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
NV03_CHANNEL_DMA,
0 };
const u16 *oclass = oclasses;
- struct nv03_channel_dma_v0 args, *retn;
+ struct nv03_channel_dma_v0 args;
struct nouveau_channel *chan;
int ret;
@@ -260,15 +276,14 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
/* create channel object */
args.version = 0;
- args.pushbuf = chan->push.ctxdma.handle;
+ args.pushbuf = nvif_handle(&chan->push.ctxdma);
args.offset = chan->push.vma.offset;
do {
- ret = nvif_object_new(nvif_object(device), handle, *oclass++,
- &args, sizeof(args), &chan->object);
+ ret = nvif_object_init(&device->object, handle, *oclass++,
+ &args, sizeof(args), &chan->user);
if (ret == 0) {
- retn = chan->object->data;
- chan->chid = retn->chid;
+ chan->chid = args.chid;
return ret;
}
} while (ret && *oclass);
@@ -281,13 +296,12 @@ static int
nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
{
struct nvif_device *device = chan->device;
- struct nouveau_cli *cli = (void *)nvif_client(&device->base);
+ struct nouveau_cli *cli = (void *)chan->user.client;
struct nvkm_mmu *mmu = nvxx_mmu(device);
- struct nvkm_sw_chan *swch;
struct nv_dma_v0 args = {};
int ret, i;
- nvif_object_map(chan->object);
+ nvif_object_map(&chan->user);
/* allocate dma objects to cover all allowed vram, and gart */
if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
@@ -303,9 +317,8 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.limit = device->info.ram_user - 1;
}
- ret = nvif_object_init(chan->object, NULL, vram,
- NV_DMA_IN_MEMORY, &args,
- sizeof(args), &chan->vram);
+ ret = nvif_object_init(&chan->user, vram, NV_DMA_IN_MEMORY,
+ &args, sizeof(args), &chan->vram);
if (ret)
return ret;
@@ -315,7 +328,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.start = 0;
args.limit = cli->vm->mmu->limit - 1;
} else
- if (chan->drm->agp.stat == ENABLED) {
+ if (chan->drm->agp.bridge) {
args.target = NV_DMA_V0_TARGET_AGP;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = chan->drm->agp.base;
@@ -328,15 +341,14 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.limit = mmu->limit - 1;
}
- ret = nvif_object_init(chan->object, NULL, gart,
- NV_DMA_IN_MEMORY, &args,
- sizeof(args), &chan->gart);
+ ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY,
+ &args, sizeof(args), &chan->gart);
if (ret)
return ret;
}
/* initialise dma tracking parameters */
- switch (chan->object->oclass & 0x00ff) {
+ switch (chan->user.oclass & 0x00ff) {
case 0x006b:
case 0x006e:
chan->user_put = 0x40;
@@ -368,15 +380,12 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
/* allocate software object class (used for fences on <= nv05) */
if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
- ret = nvif_object_init(chan->object, NULL, 0x006e, 0x006e,
+ ret = nvif_object_init(&chan->user, 0x006e,
+ NVIF_IOCTL_NEW_V0_SW_NV04,
NULL, 0, &chan->nvsw);
if (ret)
return ret;
- swch = (void *)nvxx_object(&chan->nvsw)->parent;
- swch->flip = nouveau_flip_complete;
- swch->flip_data = chan;
-
ret = RING_SPACE(chan, 2);
if (ret)
return ret;
@@ -395,7 +404,7 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
u32 handle, u32 arg0, u32 arg1,
struct nouveau_channel **pchan)
{
- struct nouveau_cli *cli = (void *)nvif_client(&device->base);
+ struct nouveau_cli *cli = (void *)device->object.client;
bool super;
int ret;
@@ -405,17 +414,17 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
ret = nouveau_channel_ind(drm, device, handle, arg0, pchan);
if (ret) {
- NV_PRINTK(debug, cli, "ib channel create, %d\n", ret);
+ NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret);
ret = nouveau_channel_dma(drm, device, handle, pchan);
if (ret) {
- NV_PRINTK(debug, cli, "dma channel create, %d\n", ret);
+ NV_PRINTK(dbg, cli, "dma channel create, %d\n", ret);
goto done;
}
}
ret = nouveau_channel_init(*pchan, arg0, arg1);
if (ret) {
- NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret);
+ NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
nouveau_channel_del(pchan);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 8b3640f69e4f..2ed32414cb69 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -37,7 +37,7 @@ struct nouveau_channel {
u32 user_get;
u32 user_put;
- struct nvif_object *object;
+ struct nvif_object user;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 3162040bc314..2e7cbe933533 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -125,9 +125,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
* is handled by the SOR itself, and not required for LVDS DDC.
*/
if (nv_connector->type == DCB_CONNECTOR_eDP) {
- panel = gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
+ panel = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
if (panel == 0) {
- gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
+ nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
msleep(300);
}
}
@@ -148,7 +148,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
break;
} else
if (nv_encoder->i2c) {
- if (nv_probe_i2c(nv_encoder->i2c, 0x50))
+ if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
break;
}
}
@@ -157,7 +157,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
* state to avoid confusing the SOR for other output types.
*/
if (!nv_encoder && panel == 0)
- gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
+ nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
return nv_encoder;
}
@@ -241,7 +241,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = NULL;
struct nouveau_encoder *nv_partner;
- struct nvkm_i2c_port *i2c;
+ struct i2c_adapter *i2c;
int type;
int ret;
enum drm_connector_status conn_status = connector_status_disconnected;
@@ -259,7 +259,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
nv_encoder = nouveau_connector_ddc_detect(connector);
if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
- nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
+ nv_connector->edid = drm_get_edid(connector, i2c);
drm_mode_connector_update_edid_property(connector,
nv_connector->edid);
if (!nv_connector->edid) {
@@ -919,7 +919,7 @@ nouveau_connector_funcs_lvds = {
.force = nouveau_connector_force
};
-static void
+static int
nouveau_connector_dp_dpms(struct drm_connector *connector, int mode)
{
struct nouveau_encoder *nv_encoder = NULL;
@@ -930,15 +930,15 @@ nouveau_connector_dp_dpms(struct drm_connector *connector, int mode)
nv_encoder->dcb->type == DCB_OUTPUT_DP) {
if (mode == DRM_MODE_DPMS_ON) {
u8 data = DP_SET_POWER_D0;
- nv_wraux(nv_encoder->i2c, DP_SET_POWER, &data, 1);
+ nvkm_wraux(nv_encoder->aux, DP_SET_POWER, &data, 1);
usleep_range(1000, 2000);
} else {
u8 data = DP_SET_POWER_D3;
- nv_wraux(nv_encoder->i2c, DP_SET_POWER, &data, 1);
+ nvkm_wraux(nv_encoder->aux, DP_SET_POWER, &data, 1);
}
}
- drm_helper_connector_dpms(connector, mode);
+ return drm_helper_connector_dpms(connector, mode);
}
static const struct drm_connector_funcs
@@ -980,29 +980,29 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
}
static ssize_t
-nouveau_connector_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
{
struct nouveau_connector *nv_connector =
- container_of(aux, typeof(*nv_connector), aux);
+ container_of(obj, typeof(*nv_connector), aux);
struct nouveau_encoder *nv_encoder;
- struct nvkm_i2c_port *port;
+ struct nvkm_i2c_aux *aux;
int ret;
nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
- if (!nv_encoder || !(port = nv_encoder->i2c))
+ if (!nv_encoder || !(aux = nv_encoder->aux))
return -ENODEV;
if (WARN_ON(msg->size > 16))
return -E2BIG;
if (msg->size == 0)
return msg->size;
- ret = nvkm_i2c(port)->acquire(port, 0);
+ ret = nvkm_i2c_aux_acquire(aux);
if (ret)
return ret;
- ret = port->func->aux(port, false, msg->request, msg->address,
- msg->buffer, msg->size);
- nvkm_i2c(port)->release(port);
+ ret = nvkm_i2c_aux_xfer(aux, false, msg->request, msg->address,
+ msg->buffer, msg->size);
+ nvkm_i2c_aux_release(aux);
if (ret >= 0) {
msg->reply = ret;
return msg->size;
@@ -1256,8 +1256,8 @@ nouveau_connector_create(struct drm_device *dev, int index)
break;
}
- ret = nvif_notify_init(&disp->disp, NULL, nouveau_connector_hotplug,
- true, NV04_DISP_NTFY_CONN,
+ ret = nvif_notify_init(&disp->disp, nouveau_connector_hotplug, true,
+ NV04_DISP_NTFY_CONN,
&(struct nvif_notify_conn_req_v0) {
.mask = NVIF_NOTIFY_CONN_V0_ANY,
.conn = index,
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 8670d90cdc11..cc6c228e11c8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -185,7 +185,7 @@ nouveau_display_vblank_init(struct drm_device *dev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- ret = nvif_notify_init(&disp->disp, NULL,
+ ret = nvif_notify_init(&disp->disp,
nouveau_display_vblank_handler, false,
NV04_DISP_NTFY_VBLANK,
&(struct nvif_notify_head_req_v0) {
@@ -358,6 +358,7 @@ int
nouveau_display_init(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
int ret;
@@ -374,6 +375,8 @@ nouveau_display_init(struct drm_device *dev)
nvif_notify_get(&conn->hpd);
}
+ /* enable flip completion events */
+ nvif_notify_get(&drm->flip);
return ret;
}
@@ -381,6 +384,7 @@ void
nouveau_display_fini(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
int head;
@@ -388,6 +392,9 @@ nouveau_display_fini(struct drm_device *dev)
for (head = 0; head < dev->mode_config.num_crtc; head++)
drm_vblank_off(dev, head);
+ /* disable flip completion events */
+ nvif_notify_put(&drm->flip);
+
/* disable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
@@ -438,6 +445,7 @@ int
nouveau_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvkm_device *device = nvxx_device(&drm->device);
struct nouveau_display *disp;
int ret;
@@ -450,7 +458,7 @@ nouveau_display_create(struct drm_device *dev)
drm_mode_create_dvi_i_properties(dev);
dev->mode_config.funcs = &nouveau_mode_config_funcs;
- dev->mode_config.fb_base = nv_device_resource_start(nvxx_device(&drm->device), 1);
+ dev->mode_config.fb_base = device->func->resource_addr(device, 1);
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
@@ -494,7 +502,7 @@ nouveau_display_create(struct drm_device *dev)
int i;
for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) {
- ret = nvif_object_init(nvif_object(&drm->device), NULL,
+ ret = nvif_object_init(&drm->device.object,
NVDRM_DISPLAY, oclass[i],
NULL, 0, &disp->disp);
}
@@ -711,7 +719,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
chan = drm->channel;
if (!chan)
return -ENODEV;
- cli = (void *)nvif_client(&chan->device->base);
+ cli = (void *)chan->user.client;
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
@@ -847,10 +855,10 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
}
int
-nouveau_flip_complete(void *data)
+nouveau_flip_complete(struct nvif_notify *notify)
{
- struct nouveau_channel *chan = data;
- struct nouveau_drm *drm = chan->drm;
+ struct nouveau_drm *drm = container_of(notify, typeof(*drm), flip);
+ struct nouveau_channel *chan = drm->channel;
struct nouveau_page_flip_state state;
if (!nouveau_finish_page_flip(chan, &state)) {
@@ -861,7 +869,7 @@ nouveau_flip_complete(void *data)
}
}
- return 0;
+ return NVIF_NOTIFY_KEEP;
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 6d9245aa81a6..d168c63533c1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -52,9 +52,9 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
{
uint64_t val;
- val = nvif_rd32(chan, chan->user_get);
+ val = nvif_rd32(&chan->user, chan->user_get);
if (chan->user_get_hi)
- val |= (uint64_t)nvif_rd32(chan, chan->user_get_hi) << 32;
+ val |= (uint64_t)nvif_rd32(&chan->user, chan->user_get_hi) << 32;
/* reset counter as long as GET is still advancing, this is
* to avoid misdetecting a GPU lockup if the GPU happens to
@@ -82,7 +82,7 @@ void
nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
int delta, int length)
{
- struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
+ struct nouveau_cli *cli = (void *)chan->user.client;
struct nouveau_bo *pb = chan->push.buffer;
struct nvkm_vma *vma;
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
@@ -103,7 +103,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
/* Flush writes. */
nouveau_bo_rd32(pb, 0);
- nvif_wr32(chan, 0x8c, chan->dma.ib_put);
+ nvif_wr32(&chan->user, 0x8c, chan->dma.ib_put);
chan->dma.ib_free--;
}
@@ -113,7 +113,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
uint32_t cnt = 0, prev_get = 0;
while (chan->dma.ib_free < count) {
- uint32_t get = nvif_rd32(chan, 0x88);
+ uint32_t get = nvif_rd32(&chan->user, 0x88);
if (get != prev_get) {
prev_get = get;
cnt = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 8da0a272c45a..aff3a9d0a1fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -140,7 +140,7 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
#define WRITE_PUT(val) do { \
mb(); \
nouveau_bo_rd32(chan->push.buffer, 0); \
- nvif_wr32(chan, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
+ nvif_wr32(&chan->user, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
} while (0)
static inline void
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index c3ef30b3a5ec..e17e15ec7d43 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -31,8 +31,7 @@
#include "nouveau_crtc.h"
static void
-nouveau_dp_probe_oui(struct drm_device *dev, struct nvkm_i2c_port *auxch,
- u8 *dpcd)
+nouveau_dp_probe_oui(struct drm_device *dev, struct nvkm_i2c_aux *aux, u8 *dpcd)
{
struct nouveau_drm *drm = nouveau_drm(dev);
u8 buf[3];
@@ -40,11 +39,11 @@ nouveau_dp_probe_oui(struct drm_device *dev, struct nvkm_i2c_port *auxch,
if (!(dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
- if (!nv_rdaux(auxch, DP_SINK_OUI, buf, 3))
+ if (!nvkm_rdaux(aux, DP_SINK_OUI, buf, 3))
NV_DEBUG(drm, "Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
- if (!nv_rdaux(auxch, DP_BRANCH_OUI, buf, 3))
+ if (!nvkm_rdaux(aux, DP_BRANCH_OUI, buf, 3))
NV_DEBUG(drm, "Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
@@ -55,15 +54,15 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
{
struct drm_device *dev = nv_encoder->base.base.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_i2c_port *auxch;
+ struct nvkm_i2c_aux *aux;
u8 *dpcd = nv_encoder->dp.dpcd;
int ret;
- auxch = nv_encoder->i2c;
- if (!auxch)
+ aux = nv_encoder->aux;
+ if (!aux)
return -ENODEV;
- ret = nv_rdaux(auxch, DP_DPCD_REV, dpcd, 8);
+ ret = nvkm_rdaux(aux, DP_DPCD_REV, dpcd, 8);
if (ret)
return ret;
@@ -84,6 +83,6 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
NV_DEBUG(drm, "maximum: %dx%d\n",
nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
- nouveau_dp_probe_oui(dev, auxch, dpcd);
+ nouveau_dp_probe_oui(dev, aux, dpcd);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 649024d4daf1..ccefb645fd55 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -32,15 +32,15 @@
#include "drmP.h"
#include "drm_crtc_helper.h"
-#include <core/device.h>
#include <core/gpuobj.h>
#include <core/option.h>
+#include <core/pci.h>
+#include <core/tegra.h>
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
-#include "nouveau_agp.h"
#include "nouveau_vga.h"
#include "nouveau_sysfs.h"
#include "nouveau_hwmon.h"
@@ -105,14 +105,18 @@ nouveau_name(struct drm_device *dev)
}
static int
-nouveau_cli_create(u64 name, const char *sname,
+nouveau_cli_create(struct drm_device *dev, const char *sname,
int size, void **pcli)
{
struct nouveau_cli *cli = *pcli = kzalloc(size, GFP_KERNEL);
+ int ret;
if (cli) {
- int ret = nvif_client_init(NULL, NULL, sname, name,
- nouveau_config, nouveau_debug,
- &cli->base);
+ snprintf(cli->name, sizeof(cli->name), "%s", sname);
+ cli->dev = dev;
+
+ ret = nvif_client_init(NULL, cli->name, nouveau_name(dev),
+ nouveau_config, nouveau_debug,
+ &cli->base);
if (ret == 0) {
mutex_init(&cli->mutex);
usif_client_init(cli);
@@ -128,17 +132,23 @@ nouveau_cli_destroy(struct nouveau_cli *cli)
nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
nvif_client_fini(&cli->base);
usif_client_fini(cli);
+ kfree(cli);
}
static void
nouveau_accel_fini(struct nouveau_drm *drm)
{
- nouveau_channel_del(&drm->channel);
+ nouveau_channel_idle(drm->channel);
nvif_object_fini(&drm->ntfy);
- nvkm_gpuobj_ref(NULL, &drm->notify);
+ nvkm_gpuobj_del(&drm->notify);
+ nvif_notify_fini(&drm->flip);
nvif_object_fini(&drm->nvsw);
- nouveau_channel_del(&drm->cechan);
+ nouveau_channel_del(&drm->channel);
+
+ nouveau_channel_idle(drm->cechan);
nvif_object_fini(&drm->ttm.copy);
+ nouveau_channel_del(&drm->cechan);
+
if (drm->fence)
nouveau_fence(drm)->dtor(drm);
}
@@ -147,9 +157,9 @@ static void
nouveau_accel_init(struct nouveau_drm *drm)
{
struct nvif_device *device = &drm->device;
+ struct nvif_sclass *sclass;
u32 arg0, arg1;
- u32 sclass[16];
- int ret, i;
+ int ret, i, n;
if (nouveau_noaccel)
return;
@@ -158,12 +168,12 @@ nouveau_accel_init(struct nouveau_drm *drm)
/*XXX: this is crap, but the fence/channel stuff is a little
* backwards in some places. this will be fixed.
*/
- ret = nvif_object_sclass(&device->base, sclass, ARRAY_SIZE(sclass));
+ ret = n = nvif_object_sclass_get(&device->object, &sclass);
if (ret < 0)
return;
- for (ret = -ENOSYS, i = 0; ret && i < ARRAY_SIZE(sclass); i++) {
- switch (sclass[i]) {
+ for (ret = -ENOSYS, i = 0; i < n; i++) {
+ switch (sclass[i].oclass) {
case NV03_CHANNEL_DMA:
ret = nv04_fence_create(drm);
break;
@@ -190,6 +200,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
}
}
+ nvif_object_sclass_put(&sclass);
if (ret) {
NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
nouveau_accel_fini(drm);
@@ -230,10 +241,9 @@ nouveau_accel_init(struct nouveau_drm *drm)
return;
}
- ret = nvif_object_init(drm->channel->object, NULL, NVDRM_NVSW,
+ ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW,
nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw);
if (ret == 0) {
- struct nvkm_sw_chan *swch;
ret = RING_SPACE(drm->channel, 2);
if (ret == 0) {
if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
@@ -245,9 +255,16 @@ nouveau_accel_init(struct nouveau_drm *drm)
OUT_RING (drm->channel, 0x001f0000);
}
}
- swch = (void *)nvxx_object(&drm->nvsw)->parent;
- swch->flip = nouveau_flip_complete;
- swch->flip_data = drm->channel;
+
+ ret = nvif_notify_init(&drm->nvsw, nouveau_flip_complete,
+ false, NVSW_NTFY_UEVENT, NULL, 0, 0,
+ &drm->flip);
+ if (ret == 0)
+ ret = nvif_notify_get(&drm->flip);
+ if (ret) {
+ nouveau_accel_fini(drm);
+ return;
+ }
}
if (ret) {
@@ -257,15 +274,15 @@ nouveau_accel_init(struct nouveau_drm *drm)
}
if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
- ret = nvkm_gpuobj_new(nvxx_object(&drm->device), NULL, 32,
- 0, 0, &drm->notify);
+ ret = nvkm_gpuobj_new(nvxx_device(&drm->device), 32, 0, false,
+ NULL, &drm->notify);
if (ret) {
NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
nouveau_accel_fini(drm);
return;
}
- ret = nvif_object_init(drm->channel->object, NULL, NvNotify0,
+ ret = nvif_object_init(&drm->channel->user, NvNotify0,
NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
@@ -320,9 +337,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
remove_conflicting_framebuffers(aper, "nouveaufb", boot);
kfree(aper);
- ret = nvkm_device_create(pdev, NVKM_BUS_PCI,
- nouveau_pci_name(pdev), pci_name(pdev),
- nouveau_config, nouveau_debug, &device);
+ ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
+ true, true, ~0ULL, &device);
if (ret)
return ret;
@@ -330,7 +346,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
ret = drm_get_pci_dev(pdev, pent, &driver_pci);
if (ret) {
- nvkm_object_ref(NULL, (struct nvkm_object **)&device);
+ nvkm_device_del(&device);
return ret;
}
@@ -370,12 +386,10 @@ nouveau_get_hdmi_dev(struct nouveau_drm *drm)
static int
nouveau_drm_load(struct drm_device *dev, unsigned long flags)
{
- struct pci_dev *pdev = dev->pdev;
struct nouveau_drm *drm;
int ret;
- ret = nouveau_cli_create(nouveau_name(dev), "DRM", sizeof(*drm),
- (void **)&drm);
+ ret = nouveau_cli_create(dev, "DRM", sizeof(*drm), (void **)&drm);
if (ret)
return ret;
@@ -389,36 +403,10 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
nouveau_get_hdmi_dev(drm);
- /* make sure AGP controller is in a consistent state before we
- * (possibly) execute vbios init tables (see nouveau_agp.h)
- */
- if (pdev && drm_pci_device_is_agp(dev) && dev->agp) {
- const u64 enables = NV_DEVICE_V0_DISABLE_IDENTIFY |
- NV_DEVICE_V0_DISABLE_MMIO;
- /* dummy device object, doesn't init anything, but allows
- * agp code access to registers
- */
- ret = nvif_device_init(&drm->client.base.base, NULL,
- NVDRM_DEVICE, NV_DEVICE,
- &(struct nv_device_v0) {
- .device = ~0,
- .disable = ~enables,
- .debug0 = ~0,
- }, sizeof(struct nv_device_v0),
- &drm->device);
- if (ret)
- goto fail_device;
-
- nouveau_agp_reset(drm);
- nvif_device_fini(&drm->device);
- }
-
- ret = nvif_device_init(&drm->client.base.base, NULL, NVDRM_DEVICE,
- NV_DEVICE,
+ ret = nvif_device_init(&drm->client.base.object,
+ NVDRM_DEVICE, NV_DEVICE,
&(struct nv_device_v0) {
.device = ~0,
- .disable = 0,
- .debug0 = 0,
}, sizeof(struct nv_device_v0),
&drm->device);
if (ret)
@@ -431,14 +419,13 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
* better fix is found - assuming there is one...
*/
if (drm->device.info.chipset == 0xc1)
- nvif_mask(&drm->device, 0x00088080, 0x00000800, 0x00000000);
+ nvif_mask(&drm->device.object, 0x00088080, 0x00000800, 0x00000000);
nouveau_vga_init(drm);
- nouveau_agp_init(drm);
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
- 0x1000, &drm->client.vm);
+ 0x1000, NULL, &drm->client.vm);
if (ret)
goto fail_device;
@@ -485,7 +472,6 @@ fail_dispctor:
fail_bios:
nouveau_ttm_fini(drm);
fail_ttm:
- nouveau_agp_fini(drm);
nouveau_vga_fini(drm);
fail_device:
nvif_device_fini(&drm->device);
@@ -511,7 +497,6 @@ nouveau_drm_unload(struct drm_device *dev)
nouveau_bios_takedown(dev);
nouveau_ttm_fini(drm);
- nouveau_agp_fini(drm);
nouveau_vga_fini(drm);
nvif_device_fini(&drm->device);
@@ -526,15 +511,14 @@ nouveau_drm_device_remove(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_client *client;
- struct nvkm_object *device;
+ struct nvkm_device *device;
dev->irq_enabled = false;
client = nvxx_client(&drm->client.base);
- device = client->device;
+ device = nvkm_device_find(client->device);
drm_put_dev(dev);
- nvkm_object_ref(NULL, &device);
- nvkm_object_debug();
+ nvkm_device_del(&device);
}
static void
@@ -596,7 +580,6 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
if (ret)
goto fail_client;
- nouveau_agp_fini(drm);
return 0;
fail_client:
@@ -621,13 +604,8 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli;
- NV_INFO(drm, "re-enabling device...\n");
-
- nouveau_agp_reset(drm);
-
NV_INFO(drm, "resuming kernel object tree...\n");
nvif_client_resume(&drm->client.base);
- nouveau_agp_init(drm);
NV_INFO(drm, "resuming client object trees...\n");
if (drm->fence && nouveau_fence(drm)->resume)
@@ -727,7 +705,6 @@ nouveau_pmops_runtime_suspend(struct device *dev)
return -EBUSY;
}
- nv_debug_level(SILENT);
drm_kms_helper_poll_disable(drm_dev);
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
nouveau_switcheroo_optimus_dsm();
@@ -761,10 +738,9 @@ nouveau_pmops_runtime_resume(struct device *dev)
ret = nouveau_do_resume(drm_dev, true);
drm_kms_helper_poll_enable(drm_dev);
/* do magic */
- nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
+ nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
- nv_debug_level(NORMAL);
return ret;
}
@@ -825,8 +801,7 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
get_task_comm(tmpname, current);
snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
- ret = nouveau_cli_create(nouveau_name(dev), name, sizeof(*cli),
- (void **)&cli);
+ ret = nouveau_cli_create(dev, name, sizeof(*cli), (void **)&cli);
if (ret)
goto out_suspend;
@@ -835,7 +810,7 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
- 0x1000, &cli->vm);
+ 0x1000, NULL, &cli->vm);
if (ret) {
nouveau_cli_destroy(cli);
goto out_suspend;
@@ -865,8 +840,10 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
pm_runtime_get_sync(dev->dev);
+ mutex_lock(&cli->mutex);
if (cli->abi16)
nouveau_abi16_fini(cli->abi16);
+ mutex_unlock(&cli->mutex);
mutex_lock(&drm->client.mutex);
list_del(&cli->head);
@@ -942,8 +919,8 @@ nouveau_driver_fops = {
static struct drm_driver
driver_stub = {
.driver_features =
- DRIVER_USE_AGP |
- DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
+ DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
+ DRIVER_KMS_LEGACY_CONTEXT,
.load = nouveau_drm_load,
.unload = nouveau_drm_unload,
@@ -1053,18 +1030,16 @@ nouveau_drm_pci_driver = {
};
struct drm_device *
-nouveau_platform_device_create_(struct platform_device *pdev, int size,
- void **pobject)
+nouveau_platform_device_create(struct platform_device *pdev,
+ struct nvkm_device **pdevice)
{
struct drm_device *drm;
int err;
- err = nvkm_device_create_(pdev, NVKM_BUS_PLATFORM,
- nouveau_platform_name(pdev),
- dev_name(&pdev->dev), nouveau_config,
- nouveau_debug, size, pobject);
+ err = nvkm_device_tegra_new(pdev, nouveau_config, nouveau_debug,
+ true, true, ~0ULL, pdevice);
if (err)
- return ERR_PTR(err);
+ goto err_free;
drm = drm_dev_alloc(&driver_platform, &pdev->dev);
if (!drm) {
@@ -1082,7 +1057,7 @@ nouveau_platform_device_create_(struct platform_device *pdev, int size,
return drm;
err_free:
- nvkm_object_ref(NULL, (struct nvkm_object **)pobject);
+ nvkm_device_del(pdevice);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index dd726523ca99..3c902c24a8dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -9,8 +9,8 @@
#define DRIVER_DATE "20120801"
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 2
-#define DRIVER_PATCHLEVEL 2
+#define DRIVER_MINOR 3
+#define DRIVER_PATCHLEVEL 0
/*
* 1.1.1:
@@ -30,6 +30,9 @@
* - allow concurrent access to bo's mapped read/write.
* 1.2.2:
* - add NOUVEAU_GEM_DOMAIN_COHERENT flag
+ * 1.3.0:
+ * - NVIF ABI modified, safe because only (current) users are test
+ * programs that get directly linked with NVKM.
*/
#include <nvif/client.h>
@@ -88,6 +91,8 @@ struct nouveau_cli {
void *abi16;
struct list_head objects;
struct list_head notifys;
+ char name[32];
+ struct drm_device *dev;
};
static inline struct nouveau_cli *
@@ -109,13 +114,10 @@ struct nouveau_drm {
struct list_head clients;
struct {
- enum {
- UNKNOWN = 0,
- DISABLE = 1,
- ENABLED = 2
- } stat;
+ struct agp_bridge_data *bridge;
u32 base;
u32 size;
+ bool cma;
} agp;
/* TTM interface support */
@@ -148,6 +150,7 @@ struct nouveau_drm {
struct nouveau_fbdev *fbcon;
struct nvif_object nvsw;
struct nvif_object ntfy;
+ struct nvif_notify flip;
/* nv10-nv40 tiling regions */
struct {
@@ -180,22 +183,22 @@ nouveau_drm(struct drm_device *dev)
int nouveau_pmops_suspend(struct device *);
int nouveau_pmops_resume(struct device *);
-#define nouveau_platform_device_create(p, u) \
- nouveau_platform_device_create_(p, sizeof(**u), (void **)u)
struct drm_device *
-nouveau_platform_device_create_(struct platform_device *pdev,
- int size, void **pobject);
+nouveau_platform_device_create(struct platform_device *, struct nvkm_device **);
void nouveau_drm_device_remove(struct drm_device *dev);
#define NV_PRINTK(l,c,f,a...) do { \
struct nouveau_cli *_cli = (c); \
- nv_##l(_cli->base.base.priv, f, ##a); \
+ dev_##l(_cli->dev->dev, "%s: "f, _cli->name, ##a); \
} while(0)
-#define NV_FATAL(drm,f,a...) NV_PRINTK(fatal, &(drm)->client, f, ##a)
-#define NV_ERROR(drm,f,a...) NV_PRINTK(error, &(drm)->client, f, ##a)
+#define NV_FATAL(drm,f,a...) NV_PRINTK(crit, &(drm)->client, f, ##a)
+#define NV_ERROR(drm,f,a...) NV_PRINTK(err, &(drm)->client, f, ##a)
#define NV_WARN(drm,f,a...) NV_PRINTK(warn, &(drm)->client, f, ##a)
#define NV_INFO(drm,f,a...) NV_PRINTK(info, &(drm)->client, f, ##a)
-#define NV_DEBUG(drm,f,a...) NV_PRINTK(debug, &(drm)->client, f, ##a)
+#define NV_DEBUG(drm,f,a...) do { \
+ if (unlikely(drm_debug & DRM_UT_DRIVER)) \
+ NV_PRINTK(info, &(drm)->client, f, ##a); \
+} while(0)
extern int nouveau_modeset;
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index c57a37e8e1eb..b37da95105b0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -41,7 +41,9 @@ struct nouveau_encoder {
struct dcb_output *dcb;
int or;
- struct nvkm_i2c_port *i2c;
+
+ struct i2c_adapter *i2c;
+ struct nvkm_i2c_aux *aux;
/* different to drm_encoder.crtc, this reflects what's
* actually programmed on the hw, not the proposed crtc */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 6751553abe4a..2791701685dc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -84,7 +84,7 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
if (ret != -ENODEV)
nouveau_fbcon_gpu_lockup(info);
- cfb_fillrect(info, rect);
+ drm_fb_helper_cfb_fillrect(info, rect);
}
static void
@@ -116,7 +116,7 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
if (ret != -ENODEV)
nouveau_fbcon_gpu_lockup(info);
- cfb_copyarea(info, image);
+ drm_fb_helper_cfb_copyarea(info, image);
}
static void
@@ -148,7 +148,7 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (ret != -ENODEV)
nouveau_fbcon_gpu_lockup(info);
- cfb_imageblit(info, image);
+ drm_fb_helper_cfb_imageblit(info, image);
}
static int
@@ -197,9 +197,9 @@ static struct fb_ops nouveau_fbcon_sw_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
@@ -319,7 +319,6 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd2 mode_cmd;
- struct pci_dev *pdev = dev->pdev;
int size, ret;
mode_cmd.width = sizes->surface_width;
@@ -365,20 +364,13 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
mutex_lock(&dev->struct_mutex);
- info = framebuffer_alloc(0, &pdev->dev);
- if (!info) {
- ret = -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto out_unlock;
}
info->skip_vt_switch = 1;
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- framebuffer_release(info);
- goto out_unlock;
- }
-
info->par = fbcon;
nouveau_framebuffer_init(dev, &fbcon->nouveau_fb, &mode_cmd, nvbo);
@@ -388,7 +380,6 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
/* setup helper */
fbcon->helper.fb = fb;
- fbcon->helper.fbdev = info;
strcpy(info->fix.id, "nouveaufb");
if (!chan)
@@ -450,15 +441,9 @@ static int
nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
{
struct nouveau_framebuffer *nouveau_fb = &fbcon->nouveau_fb;
- struct fb_info *info;
- if (fbcon->helper.fbdev) {
- info = fbcon->helper.fbdev;
- unregister_framebuffer(info);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&fbcon->helper);
+ drm_fb_helper_release_fbi(&fbcon->helper);
if (nouveau_fb->nvbo) {
nouveau_bo_unmap(nouveau_fb->nvbo);
@@ -496,7 +481,7 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
console_lock();
if (state == FBINFO_STATE_RUNNING)
nouveau_fbcon_accel_restore(dev);
- fb_set_suspend(drm->fbcon->helper.fbdev, state);
+ drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
if (state != FBINFO_STATE_RUNNING)
nouveau_fbcon_accel_save_disable(dev);
console_unlock();
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index c6d56bef5823..574c36b492ee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -169,7 +169,7 @@ void
nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
{
struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
- struct nouveau_cli *cli = (void *)nvif_client(chan->object);
+ struct nouveau_cli *cli = (void *)chan->user.client;
int ret;
INIT_LIST_HEAD(&fctx->flip);
@@ -188,13 +188,12 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
if (!priv->uevent)
return;
- ret = nvif_notify_init(chan->object, NULL,
- nouveau_fence_wait_uevent_handler, false,
- G82_CHANNEL_DMA_V0_NTFY_UEVENT,
- &(struct nvif_notify_uevent_req) { },
- sizeof(struct nvif_notify_uevent_req),
- sizeof(struct nvif_notify_uevent_rep),
- &fctx->notify);
+ ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler,
+ false, G82_CHANNEL_DMA_V0_NTFY_UEVENT,
+ &(struct nvif_notify_uevent_req) { },
+ sizeof(struct nvif_notify_uevent_req),
+ sizeof(struct nvif_notify_uevent_rep),
+ &fctx->notify);
WARN_ON(ret);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index d9241d8247fb..2e3a62d38fe9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -85,7 +85,7 @@ int nv50_fence_create(struct nouveau_drm *);
int nv84_fence_create(struct nouveau_drm *);
int nvc0_fence_create(struct nouveau_drm *);
-int nouveau_flip_complete(void *chan);
+int nouveau_flip_complete(struct nvif_notify *);
struct nv84_fence_chan {
struct nouveau_fence_chan base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index af1ee517f372..2c9981512d27 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -254,13 +254,13 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli = nouveau_cli(file_priv);
- struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+ struct nvkm_fb *fb = nvxx_fb(&drm->device);
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
int ret = 0;
- if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
- NV_PRINTK(error, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
+ if (!nvkm_fb_memtype_valid(fb, req->info.tile_flags)) {
+ NV_PRINTK(err, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
}
@@ -376,7 +376,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
ww_acquire_init(&op->ticket, &reservation_ww_class);
retry:
if (++trycnt > 100000) {
- NV_PRINTK(error, cli, "%s failed and gave up.\n", __func__);
+ NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
return -EINVAL;
}
@@ -387,7 +387,7 @@ retry:
gem = drm_gem_object_lookup(dev, file_priv, b->handle);
if (!gem) {
- NV_PRINTK(error, cli, "Unknown handle 0x%08x\n", b->handle);
+ NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
ret = -ENOENT;
break;
}
@@ -399,7 +399,7 @@ retry:
}
if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
- NV_PRINTK(error, cli, "multiple instances of buffer %d on "
+ NV_PRINTK(err, cli, "multiple instances of buffer %d on "
"validation list\n", b->handle);
drm_gem_object_unreference_unlocked(gem);
ret = -EINVAL;
@@ -420,7 +420,7 @@ retry:
}
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
- NV_PRINTK(error, cli, "fail reserve\n");
+ NV_PRINTK(err, cli, "fail reserve\n");
break;
}
}
@@ -438,7 +438,7 @@ retry:
if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
list_add_tail(&nvbo->entry, &gart_list);
else {
- NV_PRINTK(error, cli, "invalid valid domains: 0x%08x\n",
+ NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
b->valid_domains);
list_add_tail(&nvbo->entry, &both_list);
ret = -EINVAL;
@@ -476,21 +476,21 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
b->write_domains,
b->valid_domains);
if (unlikely(ret)) {
- NV_PRINTK(error, cli, "fail set_domain\n");
+ NV_PRINTK(err, cli, "fail set_domain\n");
return ret;
}
ret = nouveau_bo_validate(nvbo, true, false);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
- NV_PRINTK(error, cli, "fail ttm_validate\n");
+ NV_PRINTK(err, cli, "fail ttm_validate\n");
return ret;
}
ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
- NV_PRINTK(error, cli, "fail post-validate sync\n");
+ NV_PRINTK(err, cli, "fail post-validate sync\n");
return ret;
}
@@ -537,14 +537,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
- NV_PRINTK(error, cli, "validate_init\n");
+ NV_PRINTK(err, cli, "validate_init\n");
return ret;
}
ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
- NV_PRINTK(error, cli, "validating bo list\n");
+ NV_PRINTK(err, cli, "validating bo list\n");
validate_fini(op, NULL, NULL);
return ret;
}
@@ -600,7 +600,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
uint32_t data;
if (unlikely(r->bo_index > req->nr_buffers)) {
- NV_PRINTK(error, cli, "reloc bo index invalid\n");
+ NV_PRINTK(err, cli, "reloc bo index invalid\n");
ret = -EINVAL;
break;
}
@@ -610,7 +610,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
continue;
if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
- NV_PRINTK(error, cli, "reloc container bo index invalid\n");
+ NV_PRINTK(err, cli, "reloc container bo index invalid\n");
ret = -EINVAL;
break;
}
@@ -618,7 +618,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
if (unlikely(r->reloc_bo_offset + 4 >
nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
- NV_PRINTK(error, cli, "reloc outside of bo\n");
+ NV_PRINTK(err, cli, "reloc outside of bo\n");
ret = -EINVAL;
break;
}
@@ -627,7 +627,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
&nvbo->kmap);
if (ret) {
- NV_PRINTK(error, cli, "failed kmap for reloc\n");
+ NV_PRINTK(err, cli, "failed kmap for reloc\n");
break;
}
nvbo->validate_mapped = true;
@@ -650,7 +650,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
ret = ttm_bo_wait(&nvbo->bo, true, false, false);
if (ret) {
- NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret);
+ NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
break;
}
@@ -681,7 +681,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
return -ENOMEM;
list_for_each_entry(temp, &abi16->channels, head) {
- if (temp->chan->object->handle == (NVDRM_CHAN | req->channel)) {
+ if (temp->chan->user.handle == (NVDRM_CHAN | req->channel)) {
chan = temp->chan;
break;
}
@@ -696,19 +696,19 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
goto out_next;
if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
- NV_PRINTK(error, cli, "pushbuf push count exceeds limit: %d max %d\n",
+ NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
req->nr_push, NOUVEAU_GEM_MAX_PUSH);
return nouveau_abi16_put(abi16, -EINVAL);
}
if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
- NV_PRINTK(error, cli, "pushbuf bo count exceeds limit: %d max %d\n",
+ NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
return nouveau_abi16_put(abi16, -EINVAL);
}
if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
- NV_PRINTK(error, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
+ NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
return nouveau_abi16_put(abi16, -EINVAL);
}
@@ -726,7 +726,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
/* Ensure all push buffers are on validate list */
for (i = 0; i < req->nr_push; i++) {
if (push[i].bo_index >= req->nr_buffers) {
- NV_PRINTK(error, cli, "push %d buffer not in list\n", i);
+ NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
ret = -EINVAL;
goto out_prevalid;
}
@@ -737,7 +737,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
req->nr_buffers, &op, &do_reloc);
if (ret) {
if (ret != -ERESTARTSYS)
- NV_PRINTK(error, cli, "validate: %d\n", ret);
+ NV_PRINTK(err, cli, "validate: %d\n", ret);
goto out_prevalid;
}
@@ -745,7 +745,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (do_reloc) {
ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
if (ret) {
- NV_PRINTK(error, cli, "reloc apply: %d\n", ret);
+ NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
goto out;
}
}
@@ -753,7 +753,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (chan->dma.ib_max) {
ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
if (ret) {
- NV_PRINTK(error, cli, "nv50cal_space: %d\n", ret);
+ NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
goto out;
}
@@ -768,7 +768,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (drm->device.info.chipset >= 0x25) {
ret = RING_SPACE(chan, req->nr_push * 2);
if (ret) {
- NV_PRINTK(error, cli, "cal_space: %d\n", ret);
+ NV_PRINTK(err, cli, "cal_space: %d\n", ret);
goto out;
}
@@ -782,7 +782,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
} else {
ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
if (ret) {
- NV_PRINTK(error, cli, "jmp_space: %d\n", ret);
+ NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
goto out;
}
@@ -820,7 +820,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
ret = nouveau_fence_new(chan, false, &fence);
if (ret) {
- NV_PRINTK(error, cli, "error fencing pushbuf: %d\n", ret);
+ NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
goto out;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 0dbe0060f86e..491c7149d197 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -41,7 +41,7 @@ nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_therm *therm = nvxx_therm(&drm->device);
- int temp = therm->temp_get(therm);
+ int temp = nvkm_therm_temp_get(therm);
if (temp < 0)
return temp;
@@ -348,7 +348,7 @@ nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr,
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_therm *therm = nvxx_therm(&drm->device);
- return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm));
+ return snprintf(buf, PAGE_SIZE, "%d\n", nvkm_therm_fan_sense(therm));
}
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, nouveau_hwmon_show_fan1_input,
NULL, 0);
@@ -571,7 +571,7 @@ nouveau_hwmon_init(struct drm_device *dev)
return -ENOMEM;
hwmon->dev = dev;
- if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set)
+ if (!therm || !therm->attr_get || !therm->attr_set)
return -ENODEV;
hwmon_dev = hwmon_device_register(&dev->pdev->dev);
@@ -588,7 +588,7 @@ nouveau_hwmon_init(struct drm_device *dev)
goto error;
/* if the card has a working thermal sensor */
- if (therm->temp_get(therm) >= 0) {
+ if (nvkm_therm_temp_get(therm) >= 0) {
ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup);
if (ret)
goto error;
@@ -606,7 +606,7 @@ nouveau_hwmon_init(struct drm_device *dev)
}
/* if the card can read the fan rpm */
- if (therm->fan_sense(therm) >= 0) {
+ if (nvkm_therm_fan_sense(therm) >= 0) {
ret = sysfs_create_group(&hwmon_dev->kobj,
&hwmon_fan_rpm_attrgroup);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
index ca0ad9d1563d..55eb942847fa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_nvif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -72,10 +72,8 @@ nvkm_client_suspend(void *priv)
static void
nvkm_client_driver_fini(void *priv)
{
- struct nvkm_object *client = priv;
- nvkm_client_fini(nv_client(client), false);
- atomic_set(&client->refcount, 1);
- nvkm_object_ref(NULL, &client);
+ struct nvkm_client *client = priv;
+ nvkm_client_del(&client);
}
static int
@@ -113,7 +111,7 @@ nvkm_client_driver_init(const char *name, u64 device, const char *cfg,
struct nvkm_client *client;
int ret;
- ret = nvkm_client_create(name, device, cfg, dbg, &client);
+ ret = nvkm_client_new(name, device, cfg, dbg, &client);
*ppriv = client;
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 775277f1edb0..3eb665453165 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -19,223 +19,38 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/reset.h>
-#include <linux/regulator/consumer.h>
-#include <linux/iommu.h>
-#include <soc/tegra/fuse.h>
-#include <soc/tegra/pmc.h>
-
-#include "nouveau_drm.h"
#include "nouveau_platform.h"
-static int nouveau_platform_power_up(struct nouveau_platform_gpu *gpu)
-{
- int err;
-
- err = regulator_enable(gpu->vdd);
- if (err)
- goto err_power;
-
- err = clk_prepare_enable(gpu->clk);
- if (err)
- goto err_clk;
- err = clk_prepare_enable(gpu->clk_pwr);
- if (err)
- goto err_clk_pwr;
- clk_set_rate(gpu->clk_pwr, 204000000);
- udelay(10);
-
- reset_control_assert(gpu->rst);
- udelay(10);
-
- err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
- if (err)
- goto err_clamp;
- udelay(10);
-
- reset_control_deassert(gpu->rst);
- udelay(10);
-
- return 0;
-
-err_clamp:
- clk_disable_unprepare(gpu->clk_pwr);
-err_clk_pwr:
- clk_disable_unprepare(gpu->clk);
-err_clk:
- regulator_disable(gpu->vdd);
-err_power:
- return err;
-}
-
-static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
-{
- int err;
-
- reset_control_assert(gpu->rst);
- udelay(10);
-
- clk_disable_unprepare(gpu->clk_pwr);
- clk_disable_unprepare(gpu->clk);
- udelay(10);
-
- err = regulator_disable(gpu->vdd);
- if (err)
- return err;
-
- return 0;
-}
-
-static void nouveau_platform_probe_iommu(struct device *dev,
- struct nouveau_platform_gpu *gpu)
-{
- int err;
- unsigned long pgsize_bitmap;
-
- mutex_init(&gpu->iommu.mutex);
-
- if (iommu_present(&platform_bus_type)) {
- gpu->iommu.domain = iommu_domain_alloc(&platform_bus_type);
- if (IS_ERR(gpu->iommu.domain))
- goto error;
-
- /*
- * A IOMMU is only usable if it supports page sizes smaller
- * or equal to the system's PAGE_SIZE, with a preference if
- * both are equal.
- */
- pgsize_bitmap = gpu->iommu.domain->ops->pgsize_bitmap;
- if (pgsize_bitmap & PAGE_SIZE) {
- gpu->iommu.pgshift = PAGE_SHIFT;
- } else {
- gpu->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
- if (gpu->iommu.pgshift == 0) {
- dev_warn(dev, "unsupported IOMMU page size\n");
- goto free_domain;
- }
- gpu->iommu.pgshift -= 1;
- }
-
- err = iommu_attach_device(gpu->iommu.domain, dev);
- if (err)
- goto free_domain;
-
- err = nvkm_mm_init(&gpu->iommu._mm, 0,
- (1ULL << 40) >> gpu->iommu.pgshift, 1);
- if (err)
- goto detach_device;
-
- gpu->iommu.mm = &gpu->iommu._mm;
- }
-
- return;
-
-detach_device:
- iommu_detach_device(gpu->iommu.domain, dev);
-
-free_domain:
- iommu_domain_free(gpu->iommu.domain);
-
-error:
- gpu->iommu.domain = NULL;
- gpu->iommu.pgshift = 0;
- dev_err(dev, "cannot initialize IOMMU MM\n");
-}
-
-static void nouveau_platform_remove_iommu(struct device *dev,
- struct nouveau_platform_gpu *gpu)
-{
- if (gpu->iommu.domain) {
- nvkm_mm_fini(&gpu->iommu._mm);
- iommu_detach_device(gpu->iommu.domain, dev);
- iommu_domain_free(gpu->iommu.domain);
- }
-}
-
static int nouveau_platform_probe(struct platform_device *pdev)
{
- struct nouveau_platform_gpu *gpu;
- struct nouveau_platform_device *device;
+ struct nvkm_device *device;
struct drm_device *drm;
- int err;
-
- gpu = devm_kzalloc(&pdev->dev, sizeof(*gpu), GFP_KERNEL);
- if (!gpu)
- return -ENOMEM;
-
- gpu->vdd = devm_regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(gpu->vdd))
- return PTR_ERR(gpu->vdd);
-
- gpu->rst = devm_reset_control_get(&pdev->dev, "gpu");
- if (IS_ERR(gpu->rst))
- return PTR_ERR(gpu->rst);
-
- gpu->clk = devm_clk_get(&pdev->dev, "gpu");
- if (IS_ERR(gpu->clk))
- return PTR_ERR(gpu->clk);
-
- gpu->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
- if (IS_ERR(gpu->clk_pwr))
- return PTR_ERR(gpu->clk_pwr);
-
- nouveau_platform_probe_iommu(&pdev->dev, gpu);
-
- err = nouveau_platform_power_up(gpu);
- if (err)
- return err;
+ int ret;
drm = nouveau_platform_device_create(pdev, &device);
- if (IS_ERR(drm)) {
- err = PTR_ERR(drm);
- goto power_down;
- }
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
- device->gpu = gpu;
- device->gpu_speedo = tegra_sku_info.gpu_speedo_value;
-
- err = drm_dev_register(drm, 0);
- if (err < 0)
- goto err_unref;
+ ret = drm_dev_register(drm, 0);
+ if (ret < 0) {
+ drm_dev_unref(drm);
+ return ret;
+ }
return 0;
-
-err_unref:
- drm_dev_unref(drm);
-
-power_down:
- nouveau_platform_power_down(gpu);
- nouveau_platform_remove_iommu(&pdev->dev, gpu);
-
- return err;
}
static int nouveau_platform_remove(struct platform_device *pdev)
{
- struct drm_device *drm_dev = platform_get_drvdata(pdev);
- struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_device *device = nvxx_device(&drm->device);
- struct nouveau_platform_gpu *gpu = nv_device_to_platform(device)->gpu;
- int err;
-
- nouveau_drm_device_remove(drm_dev);
-
- err = nouveau_platform_power_down(gpu);
-
- nouveau_platform_remove_iommu(&pdev->dev, gpu);
-
- return err;
+ struct drm_device *dev = platform_get_drvdata(pdev);
+ nouveau_drm_device_remove(dev);
+ return 0;
}
#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id nouveau_platform_match[] = {
{ .compatible = "nvidia,gk20a" },
+ { .compatible = "nvidia,gm20b" },
{ }
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.h b/drivers/gpu/drm/nouveau/nouveau_platform.h
index 392874cf4725..f41056d0f5f4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.h
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.h
@@ -19,54 +19,9 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-
#ifndef __NOUVEAU_PLATFORM_H__
#define __NOUVEAU_PLATFORM_H__
-
-#include "core/device.h"
-#include "core/mm.h"
-
-struct reset_control;
-struct clk;
-struct regulator;
-struct iommu_domain;
-struct platform_driver;
-
-struct nouveau_platform_gpu {
- struct reset_control *rst;
- struct clk *clk;
- struct clk *clk_pwr;
-
- struct regulator *vdd;
-
- struct {
- /*
- * Protects accesses to mm from subsystems
- */
- struct mutex mutex;
-
- struct nvkm_mm _mm;
- /*
- * Just points to _mm. We need this to avoid embedding
- * struct nvkm_mm in os.h
- */
- struct nvkm_mm *mm;
- struct iommu_domain *domain;
- unsigned long pgshift;
- } iommu;
-};
-
-struct nouveau_platform_device {
- struct nvkm_device device;
-
- struct nouveau_platform_gpu *gpu;
-
- int gpu_speedo;
-};
-
-#define nv_device_to_platform(d) \
- container_of(d, struct nouveau_platform_device, device)
+#include "nouveau_drm.h"
extern struct platform_driver nouveau_platform_driver;
-
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
index 1ec8f38ae69a..d12a5faee047 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sysfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
@@ -165,7 +165,7 @@ nouveau_sysfs_fini(struct drm_device *dev)
struct nvif_device *device = &drm->device;
if (sysfs && sysfs->ctrl.priv) {
- device_remove_file(nv_device_base(nvxx_device(device)), &dev_attr_pstate);
+ device_remove_file(nvxx_device(device)->dev, &dev_attr_pstate);
nvif_object_fini(&sysfs->ctrl);
}
@@ -188,11 +188,11 @@ nouveau_sysfs_init(struct drm_device *dev)
if (!sysfs)
return -ENOMEM;
- ret = nvif_object_init(nvif_object(device), NULL, NVDRM_CONTROL,
+ ret = nvif_object_init(&device->object, NVDRM_CONTROL,
NVIF_IOCTL_NEW_V0_CONTROL, NULL, 0,
- &sysfs->ctrl);
+ &sysfs->ctrl);
if (ret == 0)
- device_create_file(nv_device_base(nvxx_device(device)), &dev_attr_pstate);
+ device_create_file(nvxx_device(device)->dev, &dev_attr_pstate);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 18f449715788..3f0fb55cb473 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -33,8 +33,8 @@ static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nvkm_fb *pfb = nvxx_fb(&drm->device);
- man->priv = pfb;
+ struct nvkm_fb *fb = nvxx_fb(&drm->device);
+ man->priv = fb;
return 0;
}
@@ -64,9 +64,9 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+ struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
nvkm_mem_node_cleanup(mem->mm_node);
- pfb->ram->put(pfb, (struct nvkm_mem **)&mem->mm_node);
+ ram->func->put(ram, (struct nvkm_mem **)&mem->mm_node);
}
static int
@@ -76,7 +76,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+ struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nvkm_mem *node;
u32 size_nc = 0;
@@ -88,9 +88,9 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
size_nc = 1 << nvbo->page_shift;
- ret = pfb->ram->get(pfb, mem->num_pages << PAGE_SHIFT,
- mem->page_alignment << PAGE_SHIFT, size_nc,
- (nvbo->tile_flags >> 8) & 0x3ff, &node);
+ ret = ram->func->get(ram, mem->num_pages << PAGE_SHIFT,
+ mem->page_alignment << PAGE_SHIFT, size_nc,
+ (nvbo->tile_flags >> 8) & 0x3ff, &node);
if (ret) {
mem->mm_node = NULL;
return (ret == -ENOSPC) ? 0 : ret;
@@ -103,38 +103,11 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
return 0;
}
-static void
-nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
-{
- struct nvkm_fb *pfb = man->priv;
- struct nvkm_mm *mm = &pfb->vram;
- struct nvkm_mm_node *r;
- u32 total = 0, free = 0;
-
- mutex_lock(&nv_subdev(pfb)->mutex);
- list_for_each_entry(r, &mm->nodes, nl_entry) {
- printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
- prefix, r->type, ((u64)r->offset << 12),
- (((u64)r->offset + r->length) << 12));
-
- total += r->length;
- if (!r->type)
- free += r->length;
- }
- mutex_unlock(&nv_subdev(pfb)->mutex);
-
- printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
- prefix, (u64)total << 12, (u64)free << 12);
- printk(KERN_DEBUG "%s block: 0x%08x\n",
- prefix, mm->block_size << 12);
-}
-
const struct ttm_mem_type_manager_func nouveau_vram_manager = {
nouveau_vram_manager_init,
nouveau_vram_manager_fini,
nouveau_vram_manager_new,
nouveau_vram_manager_del,
- nouveau_vram_manager_debug
};
static int
@@ -175,15 +148,24 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
node->page_shift = 12;
switch (drm->device.info.family) {
+ case NV_DEVICE_INFO_V0_TNT:
+ case NV_DEVICE_INFO_V0_CELSIUS:
+ case NV_DEVICE_INFO_V0_KELVIN:
+ case NV_DEVICE_INFO_V0_RANKINE:
+ case NV_DEVICE_INFO_V0_CURIE:
+ break;
case NV_DEVICE_INFO_V0_TESLA:
if (drm->device.info.chipset != 0x50)
node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
break;
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
+ case NV_DEVICE_INFO_V0_MAXWELL:
node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
break;
default:
+ NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
+ drm->device.info.family);
break;
}
@@ -212,7 +194,7 @@ nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
struct nvkm_mmu *mmu = nvxx_mmu(&drm->device);
- struct nv04_mmu_priv *priv = (void *)mmu;
+ struct nv04_mmu *priv = (void *)mmu;
struct nvkm_vm *vm = NULL;
nvkm_vm_ref(priv->vm, &vm, NULL);
man->priv = vm;
@@ -353,13 +335,22 @@ nouveau_ttm_global_release(struct nouveau_drm *drm)
int
nouveau_ttm_init(struct nouveau_drm *drm)
{
+ struct nvkm_device *device = nvxx_device(&drm->device);
+ struct nvkm_pci *pci = device->pci;
struct drm_device *dev = drm->dev;
u32 bits;
int ret;
+ if (pci && pci->agp.bridge) {
+ drm->agp.bridge = pci->agp.bridge;
+ drm->agp.base = pci->agp.base;
+ drm->agp.size = pci->agp.size;
+ drm->agp.cma = pci->agp.cma;
+ }
+
bits = nvxx_mmu(&drm->device)->dma_bits;
- if (nv_device_is_pci(nvxx_device(&drm->device))) {
- if (drm->agp.stat == ENABLED ||
+ if (nvxx_device(&drm->device)->func->pci) {
+ if (drm->agp.bridge ||
!pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
bits = 32;
@@ -399,11 +390,11 @@ nouveau_ttm_init(struct nouveau_drm *drm)
return ret;
}
- drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvxx_device(&drm->device), 1),
- nv_device_resource_len(nvxx_device(&drm->device), 1));
+ drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
+ device->func->resource_size(device, 1));
/* GART init */
- if (drm->agp.stat != ENABLED) {
+ if (!drm->agp.bridge) {
drm->gem.gart_available = nvxx_mmu(&drm->device)->limit;
} else {
drm->gem.gart_available = drm->agp.size;
@@ -424,10 +415,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
void
nouveau_ttm_fini(struct nouveau_drm *drm)
{
- mutex_lock(&drm->dev->struct_mutex);
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
- mutex_unlock(&drm->dev->struct_mutex);
ttm_bo_device_release(&drm->ttm.bdev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index c7592ec8ecb8..af89c3665b2a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -12,13 +12,14 @@
static unsigned int
nouveau_vga_set_decode(void *priv, bool state)
{
- struct nvif_device *device = &nouveau_drm(priv)->device;
+ struct nouveau_drm *drm = nouveau_drm(priv);
+ struct nvif_object *device = &drm->device.object;
- if (device->info.family == NV_DEVICE_INFO_V0_CURIE &&
- device->info.chipset >= 0x4c)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE &&
+ drm->device.info.chipset >= 0x4c)
nvif_wr32(device, 0x088060, state);
else
- if (device->info.chipset >= 0x40)
+ if (drm->device.info.chipset >= 0x40)
nvif_wr32(device, 0x088054, state);
else
nvif_wr32(device, 0x001854, state);
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 4ef602c5469d..789dc2993b0d 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -171,39 +171,39 @@ nv04_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
- ret = nvif_object_init(chan->object, NULL, 0x0062,
+ ret = nvif_object_init(&chan->user, 0x0062,
device->info.family >= NV_DEVICE_INFO_V0_CELSIUS ?
0x0062 : 0x0042, NULL, 0, &nfbdev->surf2d);
if (ret)
return ret;
- ret = nvif_object_init(chan->object, NULL, 0x0019, 0x0019, NULL, 0,
+ ret = nvif_object_init(&chan->user, 0x0019, 0x0019, NULL, 0,
&nfbdev->clip);
if (ret)
return ret;
- ret = nvif_object_init(chan->object, NULL, 0x0043, 0x0043, NULL, 0,
+ ret = nvif_object_init(&chan->user, 0x0043, 0x0043, NULL, 0,
&nfbdev->rop);
if (ret)
return ret;
- ret = nvif_object_init(chan->object, NULL, 0x0044, 0x0044, NULL, 0,
+ ret = nvif_object_init(&chan->user, 0x0044, 0x0044, NULL, 0,
&nfbdev->patt);
if (ret)
return ret;
- ret = nvif_object_init(chan->object, NULL, 0x004a, 0x004a, NULL, 0,
+ ret = nvif_object_init(&chan->user, 0x004a, 0x004a, NULL, 0,
&nfbdev->gdi);
if (ret)
return ret;
- ret = nvif_object_init(chan->object, NULL, 0x005f,
+ ret = nvif_object_init(&chan->user, 0x005f,
device->info.chipset >= 0x11 ? 0x009f : 0x005f,
NULL, 0, &nfbdev->blit);
if (ret)
return ret;
- if (RING_SPACE(chan, 49)) {
+ if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) {
nouveau_fbcon_gpu_lockup(info);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index c2e05e64cd6f..f3d705d67738 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -57,8 +57,10 @@ nv04_fence_sync(struct nouveau_fence *fence,
static u32
nv04_fence_read(struct nouveau_channel *chan)
{
- struct nvkm_fifo_chan *fifo = nvxx_fifo_chan(chan);;
- return atomic_read(&fifo->refcnt);
+ struct nv04_nvsw_get_ref_v0 args = {};
+ WARN_ON(nvif_object_mthd(&chan->nvsw, NV04_NVSW_GET_REF,
+ &args, sizeof(args)));
+ return args.ref;
}
static void
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 5e1ea1cdce75..2c35213da275 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -50,7 +50,7 @@ nv10_fence_sync(struct nouveau_fence *fence,
u32
nv10_fence_read(struct nouveau_channel *chan)
{
- return nvif_rd32(chan, 0x0048);
+ return nvif_rd32(&chan->user, 0x0048);
}
void
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 57860cfa1de5..80b6eb8b3d02 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -33,7 +33,7 @@ int
nv17_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *prev, struct nouveau_channel *chan)
{
- struct nouveau_cli *cli = (void *)nvif_client(&prev->device->base);
+ struct nouveau_cli *cli = (void *)prev->user.client;
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx = chan->fence;
u32 value;
@@ -89,7 +89,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
fctx->base.read = nv10_fence_read;
fctx->base.sync = nv17_fence_sync;
- ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_FROM_MEMORY,
+ ret = nvif_object_init(&chan->user, NvSema, NV_DMA_FROM_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7da7958556a3..4ae87aed4505 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -60,35 +60,39 @@
struct nv50_chan {
struct nvif_object user;
+ struct nvif_device *device;
};
static int
-nv50_chan_create(struct nvif_object *disp, const u32 *oclass, u8 head,
- void *data, u32 size, struct nv50_chan *chan)
+nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
+ const s32 *oclass, u8 head, void *data, u32 size,
+ struct nv50_chan *chan)
{
const u32 handle = (oclass[0] << 16) | head;
- u32 sclass[8];
- int ret, i;
+ struct nvif_sclass *sclass;
+ int ret, i, n;
+
+ chan->device = device;
- ret = nvif_object_sclass(disp, sclass, ARRAY_SIZE(sclass));
- WARN_ON(ret > ARRAY_SIZE(sclass));
+ ret = n = nvif_object_sclass_get(disp, &sclass);
if (ret < 0)
return ret;
while (oclass[0]) {
- for (i = 0; i < ARRAY_SIZE(sclass); i++) {
- if (sclass[i] == oclass[0]) {
- ret = nvif_object_init(disp, NULL, handle,
- oclass[0], data, size,
- &chan->user);
+ for (i = 0; i < n; i++) {
+ if (sclass[i].oclass == oclass[0]) {
+ ret = nvif_object_init(disp, handle, oclass[0],
+ data, size, &chan->user);
if (ret == 0)
nvif_object_map(&chan->user);
+ nvif_object_sclass_put(&sclass);
return ret;
}
}
oclass++;
}
+ nvif_object_sclass_put(&sclass);
return -ENOSYS;
}
@@ -113,10 +117,12 @@ nv50_pioc_destroy(struct nv50_pioc *pioc)
}
static int
-nv50_pioc_create(struct nvif_object *disp, const u32 *oclass, u8 head,
- void *data, u32 size, struct nv50_pioc *pioc)
+nv50_pioc_create(struct nvif_device *device, struct nvif_object *disp,
+ const s32 *oclass, u8 head, void *data, u32 size,
+ struct nv50_pioc *pioc)
{
- return nv50_chan_create(disp, oclass, head, data, size, &pioc->base);
+ return nv50_chan_create(device, disp, oclass, head, data, size,
+ &pioc->base);
}
/******************************************************************************
@@ -128,12 +134,13 @@ struct nv50_curs {
};
static int
-nv50_curs_create(struct nvif_object *disp, int head, struct nv50_curs *curs)
+nv50_curs_create(struct nvif_device *device, struct nvif_object *disp,
+ int head, struct nv50_curs *curs)
{
struct nv50_disp_cursor_v0 args = {
.head = head,
};
- static const u32 oclass[] = {
+ static const s32 oclass[] = {
GK104_DISP_CURSOR,
GF110_DISP_CURSOR,
GT214_DISP_CURSOR,
@@ -142,8 +149,8 @@ nv50_curs_create(struct nvif_object *disp, int head, struct nv50_curs *curs)
0
};
- return nv50_pioc_create(disp, oclass, head, &args, sizeof(args),
- &curs->base);
+ return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
+ &curs->base);
}
/******************************************************************************
@@ -155,12 +162,13 @@ struct nv50_oimm {
};
static int
-nv50_oimm_create(struct nvif_object *disp, int head, struct nv50_oimm *oimm)
+nv50_oimm_create(struct nvif_device *device, struct nvif_object *disp,
+ int head, struct nv50_oimm *oimm)
{
struct nv50_disp_cursor_v0 args = {
.head = head,
};
- static const u32 oclass[] = {
+ static const s32 oclass[] = {
GK104_DISP_OVERLAY,
GF110_DISP_OVERLAY,
GT214_DISP_OVERLAY,
@@ -169,8 +177,8 @@ nv50_oimm_create(struct nvif_object *disp, int head, struct nv50_oimm *oimm)
0
};
- return nv50_pioc_create(disp, oclass, head, &args, sizeof(args),
- &oimm->base);
+ return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
+ &oimm->base);
}
/******************************************************************************
@@ -194,37 +202,37 @@ struct nv50_dmac {
static void
nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
{
+ struct nvif_device *device = dmac->base.device;
+
nvif_object_fini(&dmac->vram);
nvif_object_fini(&dmac->sync);
nv50_chan_destroy(&dmac->base);
if (dmac->ptr) {
- struct pci_dev *pdev = nvxx_device(nvif_device(disp))->pdev;
- pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle);
+ struct device *dev = nvxx_device(device)->dev;
+ dma_free_coherent(dev, PAGE_SIZE, dmac->ptr, dmac->handle);
}
}
static int
-nv50_dmac_create(struct nvif_object *disp, const u32 *oclass, u8 head,
- void *data, u32 size, u64 syncbuf,
+nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
+ const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
struct nv50_dmac *dmac)
{
- struct nvif_device *device = nvif_device(disp);
struct nv50_disp_core_channel_dma_v0 *args = data;
struct nvif_object pushbuf;
int ret;
mutex_init(&dmac->lock);
- dmac->ptr = pci_alloc_consistent(nvxx_device(device)->pdev,
- PAGE_SIZE, &dmac->handle);
+ dmac->ptr = dma_alloc_coherent(nvxx_device(device)->dev, PAGE_SIZE,
+ &dmac->handle, GFP_KERNEL);
if (!dmac->ptr)
return -ENOMEM;
- ret = nvif_object_init(nvif_object(device), NULL,
- args->pushbuf, NV_DMA_FROM_MEMORY,
- &(struct nv_dma_v0) {
+ ret = nvif_object_init(&device->object, 0xd0000000,
+ NV_DMA_FROM_MEMORY, &(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_PCI_US,
.access = NV_DMA_V0_ACCESS_RD,
.start = dmac->handle + 0x0000,
@@ -233,13 +241,15 @@ nv50_dmac_create(struct nvif_object *disp, const u32 *oclass, u8 head,
if (ret)
return ret;
- ret = nv50_chan_create(disp, oclass, head, data, size, &dmac->base);
+ args->pushbuf = nvif_handle(&pushbuf);
+
+ ret = nv50_chan_create(device, disp, oclass, head, data, size,
+ &dmac->base);
nvif_object_fini(&pushbuf);
if (ret)
return ret;
- ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000000,
- NV_DMA_IN_MEMORY,
+ ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
@@ -250,8 +260,7 @@ nv50_dmac_create(struct nvif_object *disp, const u32 *oclass, u8 head,
if (ret)
return ret;
- ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000001,
- NV_DMA_IN_MEMORY,
+ ret = nvif_object_init(&dmac->base.user, 0xf0000001, NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
@@ -274,12 +283,13 @@ struct nv50_mast {
};
static int
-nv50_core_create(struct nvif_object *disp, u64 syncbuf, struct nv50_mast *core)
+nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
+ u64 syncbuf, struct nv50_mast *core)
{
struct nv50_disp_core_channel_dma_v0 args = {
.pushbuf = 0xb0007d00,
};
- static const u32 oclass[] = {
+ static const s32 oclass[] = {
GM204_DISP_CORE_CHANNEL_DMA,
GM107_DISP_CORE_CHANNEL_DMA,
GK110_DISP_CORE_CHANNEL_DMA,
@@ -293,8 +303,8 @@ nv50_core_create(struct nvif_object *disp, u64 syncbuf, struct nv50_mast *core)
0
};
- return nv50_dmac_create(disp, oclass, 0, &args, sizeof(args), syncbuf,
- &core->base);
+ return nv50_dmac_create(device, disp, oclass, 0, &args, sizeof(args),
+ syncbuf, &core->base);
}
/******************************************************************************
@@ -308,14 +318,14 @@ struct nv50_sync {
};
static int
-nv50_base_create(struct nvif_object *disp, int head, u64 syncbuf,
- struct nv50_sync *base)
+nv50_base_create(struct nvif_device *device, struct nvif_object *disp,
+ int head, u64 syncbuf, struct nv50_sync *base)
{
struct nv50_disp_base_channel_dma_v0 args = {
.pushbuf = 0xb0007c00 | head,
.head = head,
};
- static const u32 oclass[] = {
+ static const s32 oclass[] = {
GK110_DISP_BASE_CHANNEL_DMA,
GK104_DISP_BASE_CHANNEL_DMA,
GF110_DISP_BASE_CHANNEL_DMA,
@@ -326,7 +336,7 @@ nv50_base_create(struct nvif_object *disp, int head, u64 syncbuf,
0
};
- return nv50_dmac_create(disp, oclass, head, &args, sizeof(args),
+ return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
syncbuf, &base->base);
}
@@ -339,14 +349,14 @@ struct nv50_ovly {
};
static int
-nv50_ovly_create(struct nvif_object *disp, int head, u64 syncbuf,
- struct nv50_ovly *ovly)
+nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
+ int head, u64 syncbuf, struct nv50_ovly *ovly)
{
struct nv50_disp_overlay_channel_dma_v0 args = {
.pushbuf = 0xb0007e00 | head,
.head = head,
};
- static const u32 oclass[] = {
+ static const s32 oclass[] = {
GK104_DISP_OVERLAY_CONTROL_DMA,
GF110_DISP_OVERLAY_CONTROL_DMA,
GT214_DISP_OVERLAY_CHANNEL_DMA,
@@ -356,7 +366,7 @@ nv50_ovly_create(struct nvif_object *disp, int head, u64 syncbuf,
0
};
- return nv50_dmac_create(disp, oclass, head, &args, sizeof(args),
+ return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
syncbuf, &ovly->base);
}
@@ -413,6 +423,7 @@ static u32 *
evo_wait(void *evoc, int nr)
{
struct nv50_dmac *dmac = evoc;
+ struct nvif_device *device = dmac->base.device;
u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
mutex_lock(&dmac->lock);
@@ -420,9 +431,12 @@ evo_wait(void *evoc, int nr)
dmac->ptr[put] = 0x20000000;
nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
- if (!nvxx_wait(&dmac->base.user, 0x0004, ~0, 0x00000000)) {
+ if (nvif_msec(device, 2000,
+ if (!nvif_rd32(&dmac->base.user, 0x0004))
+ break;
+ ) < 0) {
mutex_unlock(&dmac->lock);
- nv_error(nvxx_object(&dmac->base.user), "channel stalled\n");
+ printk(KERN_ERR "nouveau: evo channel stalled\n");
return NULL;
}
@@ -480,7 +494,10 @@ evo_sync(struct drm_device *dev)
evo_data(push, 0x00000000);
evo_data(push, 0x00000000);
evo_kick(push, mast);
- if (nv_wait_cb(nvxx_device(device), evo_sync_wait, disp->sync))
+ if (nvif_msec(device, 2000,
+ if (evo_sync_wait(disp->sync))
+ break;
+ ) >= 0)
return 0;
}
@@ -535,7 +552,10 @@ nv50_display_flip_stop(struct drm_crtc *crtc)
evo_kick(push, flip.chan);
}
- nv_wait_cb(nvxx_device(device), nv50_display_flip_wait, &flip);
+ nvif_msec(device, 2000,
+ if (nv50_display_flip_wait(&flip))
+ break;
+ );
}
int
@@ -563,7 +583,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (unlikely(push == NULL))
return -EBUSY;
- if (chan && chan->object->oclass < G82_CHANNEL_GPFIFO) {
+ if (chan && chan->user.oclass < G82_CHANNEL_GPFIFO) {
ret = RING_SPACE(chan, 8);
if (ret)
return ret;
@@ -577,7 +597,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
OUT_RING (chan, sync->addr);
OUT_RING (chan, sync->data);
} else
- if (chan && chan->object->oclass < FERMI_CHANNEL_GPFIFO) {
+ if (chan && chan->user.oclass < FERMI_CHANNEL_GPFIFO) {
u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
ret = RING_SPACE(chan, 12);
if (ret)
@@ -979,7 +999,7 @@ nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
{
struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
- if (show && nv_crtc->cursor.nvbo)
+ if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled)
nv50_crtc_cursor_show(nv_crtc);
else
nv50_crtc_cursor_hide(nv_crtc);
@@ -1408,6 +1428,8 @@ static const struct drm_crtc_funcs nv50_crtc_func = {
static int
nv50_crtc_create(struct drm_device *dev, int index)
{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvif_device *device = &drm->device;
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_head *head;
struct drm_crtc *crtc;
@@ -1452,13 +1474,13 @@ nv50_crtc_create(struct drm_device *dev, int index)
goto out;
/* allocate cursor resources */
- ret = nv50_curs_create(disp->disp, index, &head->curs);
+ ret = nv50_curs_create(device, disp->disp, index, &head->curs);
if (ret)
goto out;
/* allocate page flip / sync resources */
- ret = nv50_base_create(disp->disp, index, disp->sync->bo.offset,
- &head->sync);
+ ret = nv50_base_create(device, disp->disp, index, disp->sync->bo.offset,
+ &head->sync);
if (ret)
goto out;
@@ -1466,12 +1488,12 @@ nv50_crtc_create(struct drm_device *dev, int index)
head->sync.data = 0x00000000;
/* allocate overlay resources */
- ret = nv50_oimm_create(disp->disp, index, &head->oimm);
+ ret = nv50_oimm_create(device, disp->disp, index, &head->oimm);
if (ret)
goto out;
- ret = nv50_ovly_create(disp->disp, index, disp->sync->bo.offset,
- &head->ovly);
+ ret = nv50_ovly_create(device, disp->disp, index, disp->sync->bo.offset,
+ &head->ovly);
if (ret)
goto out;
@@ -1678,6 +1700,7 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
+ struct nvkm_i2c_bus *bus;
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
int type = DRM_MODE_ENCODER_DAC;
@@ -1687,7 +1710,10 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
return -ENOMEM;
nv_encoder->dcb = dcbe;
nv_encoder->or = ffs(dcbe->or) - 1;
- nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index);
+
+ bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
+ if (bus)
+ nv_encoder->i2c = &bus->i2c;
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
@@ -2081,9 +2107,22 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
return -ENOMEM;
nv_encoder->dcb = dcbe;
nv_encoder->or = ffs(dcbe->or) - 1;
- nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index);
nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
+ if (dcbe->type == DCB_OUTPUT_DP) {
+ struct nvkm_i2c_aux *aux =
+ nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
+ if (aux) {
+ nv_encoder->i2c = &aux->i2c;
+ nv_encoder->aux = aux;
+ }
+ } else {
+ struct nvkm_i2c_bus *bus =
+ nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
+ if (bus)
+ nv_encoder->i2c = &bus->i2c;
+ }
+
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
@@ -2234,18 +2273,22 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
- struct nvkm_i2c_port *ddc = NULL;
+ struct nvkm_i2c_bus *bus = NULL;
+ struct nvkm_i2c_aux *aux = NULL;
+ struct i2c_adapter *ddc;
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
int type;
switch (dcbe->type) {
case DCB_OUTPUT_TMDS:
- ddc = i2c->find_type(i2c, NV_I2C_TYPE_EXTDDC(dcbe->extdev));
+ bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
+ ddc = bus ? &bus->i2c : NULL;
type = DRM_MODE_ENCODER_TMDS;
break;
case DCB_OUTPUT_DP:
- ddc = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(dcbe->extdev));
+ aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
+ ddc = aux ? &aux->i2c : NULL;
type = DRM_MODE_ENCODER_TMDS;
break;
default:
@@ -2258,6 +2301,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
nv_encoder->dcb = dcbe;
nv_encoder->or = ffs(dcbe->or) - 1;
nv_encoder->i2c = ddc;
+ nv_encoder->aux = aux;
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
@@ -2295,7 +2339,7 @@ nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kin
union {
struct nv50_dma_v0 nv50;
struct gf100_dma_v0 gf100;
- struct gf110_dma_v0 gf110;
+ struct gf119_dma_v0 gf119;
};
} args = {};
struct nv50_fbdma *fbdma;
@@ -2331,15 +2375,15 @@ nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kin
args.gf100.kind = kind;
size += sizeof(args.gf100);
} else {
- args.gf110.page = GF110_DMA_V0_PAGE_LP;
- args.gf110.kind = kind;
- size += sizeof(args.gf110);
+ args.gf119.page = GF119_DMA_V0_PAGE_LP;
+ args.gf119.kind = kind;
+ size += sizeof(args.gf119);
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nv50_head *head = nv50_head(crtc);
- int ret = nvif_object_init(&head->sync.base.base.user, NULL,
- name, NV_DMA_IN_MEMORY, &args, size,
+ int ret = nvif_object_init(&head->sync.base.base.user, name,
+ NV_DMA_IN_MEMORY, &args, size,
&fbdma->base[head->base.index]);
if (ret) {
nv50_fbdma_fini(fbdma);
@@ -2347,9 +2391,8 @@ nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kin
}
}
- ret = nvif_object_init(&mast->base.base.user, NULL, name,
- NV_DMA_IN_MEMORY, &args, size,
- &fbdma->core);
+ ret = nvif_object_init(&mast->base.base.user, name, NV_DMA_IN_MEMORY,
+ &args, size, &fbdma->core);
if (ret) {
nv50_fbdma_fini(fbdma);
return ret;
@@ -2502,14 +2545,14 @@ nv50_display_create(struct drm_device *dev)
goto out;
/* allocate master evo channel */
- ret = nv50_core_create(disp->disp, disp->sync->bo.offset,
+ ret = nv50_core_create(device, disp->disp, disp->sync->bo.offset,
&disp->mast);
if (ret)
goto out;
/* create crtc objects to represent the hw heads */
if (disp->disp->oclass >= GF110_DISP)
- crtcs = nvif_rd32(device, 0x022448);
+ crtcs = nvif_rd32(&device->object, 0x022448);
else
crtcs = 2;
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 394c89abcc97..e05499d6ed83 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -183,12 +183,12 @@ nv50_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
- ret = nvif_object_init(chan->object, NULL, 0x502d, 0x502d, NULL, 0,
+ ret = nvif_object_init(&chan->user, 0x502d, 0x502d, NULL, 0,
&nfbdev->twod);
if (ret)
return ret;
- ret = RING_SPACE(chan, 59);
+ ret = RING_SPACE(chan, 58);
if (ret) {
nouveau_fbcon_gpu_lockup(info);
return ret;
@@ -252,6 +252,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->var.yres_virtual);
OUT_RING(chan, upper_32_bits(fb->vma.offset));
OUT_RING(chan, lower_32_bits(fb->vma.offset));
+ FIRE_RING(chan);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index a82d9ea7c6fd..f0d96e5da6b4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -51,7 +51,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
fctx->base.read = nv10_fence_read;
fctx->base.sync = nv17_fence_sync;
- ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_IN_MEMORY,
+ ret = nvif_object_init(&chan->user, NvSema, NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
@@ -66,7 +66,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
u32 start = bo->bo.mem.start * PAGE_SIZE;
u32 limit = start + bo->bo.mem.size - 1;
- ret = nvif_object_init(chan->object, NULL, NvEvoSema0 + i,
+ ret = nvif_object_init(&chan->user, NvEvoSema0 + i,
NV_DMA_IN_MEMORY, &(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index a03db4368696..412c5be5a9ca 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -131,7 +131,7 @@ nv84_fence_context_del(struct nouveau_channel *chan)
int
nv84_fence_context_new(struct nouveau_channel *chan)
{
- struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
+ struct nouveau_cli *cli = (void *)chan->user.client;
struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx;
int ret, i;
@@ -213,7 +213,7 @@ nv84_fence_destroy(struct nouveau_drm *drm)
int
nv84_fence_create(struct nouveau_drm *drm)
{
- struct nvkm_fifo *pfifo = nvxx_fifo(&drm->device);
+ struct nvkm_fifo *fifo = nvxx_fifo(&drm->device);
struct nv84_fence_priv *priv;
u32 domain;
int ret;
@@ -228,7 +228,7 @@ nv84_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv84_fence_context_new;
priv->base.context_del = nv84_fence_context_del;
- priv->base.contexts = pfifo->max + 1;
+ priv->base.contexts = fifo->nr;
priv->base.context_base = fence_context_alloc(priv->base.contexts);
priv->base.uevent = true;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 61246677e8dc..c97395b4a312 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -156,7 +156,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
struct nouveau_channel *chan = drm->channel;
int ret, format;
- ret = nvif_object_init(chan->object, NULL, 0x902d, 0x902d, NULL, 0,
+ ret = nvif_object_init(&chan->user, 0x902d, 0x902d, NULL, 0,
&nfbdev->twod);
if (ret)
return ret;
@@ -188,7 +188,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
- ret = RING_SPACE(chan, 60);
+ ret = RING_SPACE(chan, 58);
if (ret) {
WARN_ON(1);
nouveau_fbcon_gpu_lockup(info);
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 80b96844221e..1ee9294eca2e 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -29,29 +29,29 @@
int
nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
{
- return client->driver->ioctl(client->base.priv, client->super, data, size, NULL);
+ return client->driver->ioctl(client->object.priv, client->super, data, size, NULL);
}
int
nvif_client_suspend(struct nvif_client *client)
{
- return client->driver->suspend(client->base.priv);
+ return client->driver->suspend(client->object.priv);
}
int
nvif_client_resume(struct nvif_client *client)
{
- return client->driver->resume(client->base.priv);
+ return client->driver->resume(client->object.priv);
}
void
nvif_client_fini(struct nvif_client *client)
{
if (client->driver) {
- client->driver->fini(client->base.priv);
+ client->driver->fini(client->object.priv);
client->driver = NULL;
- client->base.parent = NULL;
- nvif_object_fini(&client->base);
+ client->object.client = NULL;
+ nvif_object_fini(&client->object);
}
}
@@ -68,63 +68,39 @@ nvif_drivers[] = {
};
int
-nvif_client_init(void (*dtor)(struct nvif_client *), const char *driver,
- const char *name, u64 device, const char *cfg, const char *dbg,
- struct nvif_client *client)
+nvif_client_init(const char *driver, const char *name, u64 device,
+ const char *cfg, const char *dbg, struct nvif_client *client)
{
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_nop_v0 nop;
+ } args = {};
int ret, i;
- ret = nvif_object_init(NULL, (void*)dtor, 0, 0, NULL, 0, &client->base);
+ ret = nvif_object_init(NULL, 0, 0, NULL, 0, &client->object);
if (ret)
return ret;
- client->base.parent = &client->base;
- client->base.handle = ~0;
- client->object = &client->base;
+ client->object.client = client;
+ client->object.handle = ~0;
+ client->route = NVIF_IOCTL_V0_ROUTE_NVIF;
client->super = true;
for (i = 0, ret = -EINVAL; (client->driver = nvif_drivers[i]); i++) {
if (!driver || !strcmp(client->driver->name, driver)) {
ret = client->driver->init(name, device, cfg, dbg,
- &client->base.priv);
+ &client->object.priv);
if (!ret || driver)
break;
}
}
+ if (ret == 0) {
+ ret = nvif_client_ioctl(client, &args, sizeof(args));
+ client->version = args.nop.version;
+ }
+
if (ret)
nvif_client_fini(client);
return ret;
}
-
-static void
-nvif_client_del(struct nvif_client *client)
-{
- nvif_client_fini(client);
- kfree(client);
-}
-
-int
-nvif_client_new(const char *driver, const char *name, u64 device,
- const char *cfg, const char *dbg,
- struct nvif_client **pclient)
-{
- struct nvif_client *client = kzalloc(sizeof(*client), GFP_KERNEL);
- if (client) {
- int ret = nvif_client_init(nvif_client_del, driver, name,
- device, cfg, dbg, client);
- if (ret) {
- kfree(client);
- client = NULL;
- }
- *pclient = client;
- return ret;
- }
- return -ENOMEM;
-}
-
-void
-nvif_client_ref(struct nvif_client *client, struct nvif_client **pclient)
-{
- nvif_object_ref(&client->base, (struct nvif_object **)pclient);
-}
diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c
index 6f72244c52cd..252d8c33215b 100644
--- a/drivers/gpu/drm/nouveau/nvif/device.c
+++ b/drivers/gpu/drm/nouveau/nvif/device.c
@@ -24,55 +24,32 @@
#include <nvif/device.h>
+u64
+nvif_device_time(struct nvif_device *device)
+{
+ struct nv_device_time_v0 args = {};
+ int ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_TIME,
+ &args, sizeof(args));
+ WARN_ON_ONCE(ret != 0);
+ return args.time;
+}
+
void
nvif_device_fini(struct nvif_device *device)
{
- nvif_object_fini(&device->base);
+ nvif_object_fini(&device->object);
}
int
-nvif_device_init(struct nvif_object *parent, void (*dtor)(struct nvif_device *),
- u32 handle, u32 oclass, void *data, u32 size,
- struct nvif_device *device)
+nvif_device_init(struct nvif_object *parent, u32 handle, s32 oclass,
+ void *data, u32 size, struct nvif_device *device)
{
- int ret = nvif_object_init(parent, (void *)dtor, handle, oclass,
- data, size, &device->base);
+ int ret = nvif_object_init(parent, handle, oclass, data, size,
+ &device->object);
if (ret == 0) {
- device->object = &device->base;
device->info.version = 0;
- ret = nvif_object_mthd(&device->base, NV_DEVICE_V0_INFO,
+ ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_INFO,
&device->info, sizeof(device->info));
}
return ret;
}
-
-static void
-nvif_device_del(struct nvif_device *device)
-{
- nvif_device_fini(device);
- kfree(device);
-}
-
-int
-nvif_device_new(struct nvif_object *parent, u32 handle, u32 oclass,
- void *data, u32 size, struct nvif_device **pdevice)
-{
- struct nvif_device *device = kzalloc(sizeof(*device), GFP_KERNEL);
- if (device) {
- int ret = nvif_device_init(parent, nvif_device_del, handle,
- oclass, data, size, device);
- if (ret) {
- kfree(device);
- device = NULL;
- }
- *pdevice = device;
- return ret;
- }
- return -ENOMEM;
-}
-
-void
-nvif_device_ref(struct nvif_device *device, struct nvif_device **pdevice)
-{
- nvif_object_ref(&device->base, (struct nvif_object **)pdevice);
-}
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.c b/drivers/gpu/drm/nouveau/nvif/notify.c
index 8e34748709a0..b0787ff833ef 100644
--- a/drivers/gpu/drm/nouveau/nvif/notify.c
+++ b/drivers/gpu/drm/nouveau/nvif/notify.c
@@ -124,7 +124,7 @@ nvif_notify(const void *header, u32 length, const void *data, u32 size)
}
if (!WARN_ON(notify == NULL)) {
- struct nvif_client *client = nvif_client(notify->object);
+ struct nvif_client *client = notify->object->client;
if (!WARN_ON(notify->size != size)) {
atomic_inc(&notify->putcnt);
if (test_bit(NVIF_NOTIFY_WORK, &notify->flags)) {
@@ -156,7 +156,7 @@ nvif_notify_fini(struct nvif_notify *notify)
if (ret >= 0 && object) {
ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
if (ret == 0) {
- nvif_object_ref(NULL, &notify->object);
+ notify->object = NULL;
kfree((void *)notify->data);
}
}
@@ -164,9 +164,9 @@ nvif_notify_fini(struct nvif_notify *notify)
}
int
-nvif_notify_init(struct nvif_object *object, void (*dtor)(struct nvif_notify *),
- int (*func)(struct nvif_notify *), bool work, u8 event,
- void *data, u32 size, u32 reply, struct nvif_notify *notify)
+nvif_notify_init(struct nvif_object *object, int (*func)(struct nvif_notify *),
+ bool work, u8 event, void *data, u32 size, u32 reply,
+ struct nvif_notify *notify)
{
struct {
struct nvif_ioctl_v0 ioctl;
@@ -175,11 +175,9 @@ nvif_notify_init(struct nvif_object *object, void (*dtor)(struct nvif_notify *),
} *args;
int ret = -ENOMEM;
- notify->object = NULL;
- nvif_object_ref(object, &notify->object);
+ notify->object = object;
notify->flags = 0;
atomic_set(&notify->putcnt, 1);
- notify->dtor = dtor;
notify->func = func;
notify->data = NULL;
notify->size = reply;
@@ -211,38 +209,3 @@ done:
nvif_notify_fini(notify);
return ret;
}
-
-static void
-nvif_notify_del(struct nvif_notify *notify)
-{
- nvif_notify_fini(notify);
- kfree(notify);
-}
-
-void
-nvif_notify_ref(struct nvif_notify *notify, struct nvif_notify **pnotify)
-{
- BUG_ON(notify != NULL);
- if (*pnotify)
- (*pnotify)->dtor(*pnotify);
- *pnotify = notify;
-}
-
-int
-nvif_notify_new(struct nvif_object *object, int (*func)(struct nvif_notify *),
- bool work, u8 type, void *data, u32 size, u32 reply,
- struct nvif_notify **pnotify)
-{
- struct nvif_notify *notify = kzalloc(sizeof(*notify), GFP_KERNEL);
- if (notify) {
- int ret = nvif_notify_init(object, nvif_notify_del, func, work,
- type, data, size, reply, notify);
- if (ret) {
- kfree(notify);
- notify = NULL;
- }
- *pnotify = notify;
- return ret;
- }
- return -ENOMEM;
-}
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index 3ab4e2f8cc12..c3fb6a20f567 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -30,47 +30,71 @@
int
nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
{
- struct nvif_client *client = nvif_client(object);
+ struct nvif_client *client = object->client;
union {
struct nvif_ioctl_v0 v0;
} *args = data;
if (size >= sizeof(*args) && args->v0.version == 0) {
+ if (object != &client->object)
+ args->v0.object = nvif_handle(object);
+ else
+ args->v0.object = 0;
args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
- args->v0.path_nr = 0;
- while (args->v0.path_nr < ARRAY_SIZE(args->v0.path)) {
- args->v0.path[args->v0.path_nr++] = object->handle;
- if (object->parent == object)
- break;
- object = object->parent;
- }
} else
return -ENOSYS;
- return client->driver->ioctl(client->base.priv, client->super, data, size, hack);
+ return client->driver->ioctl(client->object.priv, client->super,
+ data, size, hack);
+}
+
+void
+nvif_object_sclass_put(struct nvif_sclass **psclass)
+{
+ kfree(*psclass);
+ *psclass = NULL;
}
int
-nvif_object_sclass(struct nvif_object *object, u32 *oclass, int count)
+nvif_object_sclass_get(struct nvif_object *object, struct nvif_sclass **psclass)
{
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_sclass_v0 sclass;
- } *args;
- u32 size = count * sizeof(args->sclass.oclass[0]);
- int ret;
+ } *args = NULL;
+ int ret, cnt = 0, i;
+ u32 size;
- if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
- return -ENOMEM;
- args->ioctl.version = 0;
- args->ioctl.type = NVIF_IOCTL_V0_SCLASS;
- args->sclass.version = 0;
- args->sclass.count = count;
+ while (1) {
+ size = sizeof(*args) + cnt * sizeof(args->sclass.oclass[0]);
+ if (!(args = kmalloc(size, GFP_KERNEL)))
+ return -ENOMEM;
+ args->ioctl.version = 0;
+ args->ioctl.type = NVIF_IOCTL_V0_SCLASS;
+ args->sclass.version = 0;
+ args->sclass.count = cnt;
+
+ ret = nvif_object_ioctl(object, args, size, NULL);
+ if (ret == 0 && args->sclass.count <= cnt)
+ break;
+ cnt = args->sclass.count;
+ kfree(args);
+ if (ret != 0)
+ return ret;
+ }
+
+ *psclass = kzalloc(sizeof(**psclass) * args->sclass.count, GFP_KERNEL);
+ if (*psclass) {
+ for (i = 0; i < args->sclass.count; i++) {
+ (*psclass)[i].oclass = args->sclass.oclass[i].oclass;
+ (*psclass)[i].minver = args->sclass.oclass[i].minver;
+ (*psclass)[i].maxver = args->sclass.oclass[i].maxver;
+ }
+ ret = args->sclass.count;
+ } else {
+ ret = -ENOMEM;
+ }
- memcpy(args->sclass.oclass, oclass, size);
- ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
- ret = ret ? ret : args->sclass.count;
- memcpy(oclass, args->sclass.oclass, size);
kfree(args);
return ret;
}
@@ -145,7 +169,7 @@ void
nvif_object_unmap(struct nvif_object *object)
{
if (object->map.size) {
- struct nvif_client *client = nvif_client(object);
+ struct nvif_client *client = object->client;
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_unmap unmap;
@@ -167,7 +191,7 @@ nvif_object_unmap(struct nvif_object *object)
int
nvif_object_map(struct nvif_object *object)
{
- struct nvif_client *client = nvif_client(object);
+ struct nvif_client *client = object->client;
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_map_v0 map;
@@ -186,119 +210,65 @@ nvif_object_map(struct nvif_object *object)
return ret;
}
-struct ctor {
- struct nvif_ioctl_v0 ioctl;
- struct nvif_ioctl_new_v0 new;
-};
-
void
nvif_object_fini(struct nvif_object *object)
{
- struct ctor *ctor = container_of(object->data, typeof(*ctor), new.data);
- if (object->parent) {
- struct {
- struct nvif_ioctl_v0 ioctl;
- struct nvif_ioctl_del del;
- } args = {
- .ioctl.type = NVIF_IOCTL_V0_DEL,
- };
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_del del;
+ } args = {
+ .ioctl.type = NVIF_IOCTL_V0_DEL,
+ };
- nvif_object_unmap(object);
- nvif_object_ioctl(object, &args, sizeof(args), NULL);
- if (object->data) {
- object->size = 0;
- object->data = NULL;
- kfree(ctor);
- }
- nvif_object_ref(NULL, &object->parent);
- }
+ if (!object->client)
+ return;
+
+ nvif_object_unmap(object);
+ nvif_object_ioctl(object, &args, sizeof(args), NULL);
+ object->client = NULL;
}
int
-nvif_object_init(struct nvif_object *parent, void (*dtor)(struct nvif_object *),
- u32 handle, u32 oclass, void *data, u32 size,
- struct nvif_object *object)
+nvif_object_init(struct nvif_object *parent, u32 handle, s32 oclass,
+ void *data, u32 size, struct nvif_object *object)
{
- struct ctor *ctor;
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_new_v0 new;
+ } *args;
int ret = 0;
- object->parent = NULL;
- object->object = object;
- nvif_object_ref(parent, &object->parent);
- kref_init(&object->refcount);
+ object->client = NULL;
object->handle = handle;
object->oclass = oclass;
- object->data = NULL;
- object->size = 0;
- object->dtor = dtor;
object->map.ptr = NULL;
object->map.size = 0;
- if (object->parent) {
- if (!(ctor = kmalloc(sizeof(*ctor) + size, GFP_KERNEL))) {
+ if (parent) {
+ if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) {
nvif_object_fini(object);
return -ENOMEM;
}
- object->data = ctor->new.data;
- object->size = size;
- memcpy(object->data, data, size);
- ctor->ioctl.version = 0;
- ctor->ioctl.type = NVIF_IOCTL_V0_NEW;
- ctor->new.version = 0;
- ctor->new.route = NVIF_IOCTL_V0_ROUTE_NVIF;
- ctor->new.token = (unsigned long)(void *)object;
- ctor->new.handle = handle;
- ctor->new.oclass = oclass;
+ args->ioctl.version = 0;
+ args->ioctl.type = NVIF_IOCTL_V0_NEW;
+ args->new.version = 0;
+ args->new.route = parent->client->route;
+ args->new.token = nvif_handle(object);
+ args->new.object = nvif_handle(object);
+ args->new.handle = handle;
+ args->new.oclass = oclass;
- ret = nvif_object_ioctl(parent, ctor, sizeof(*ctor) +
- object->size, &object->priv);
+ memcpy(args->new.data, data, size);
+ ret = nvif_object_ioctl(parent, args, sizeof(*args) + size,
+ &object->priv);
+ memcpy(data, args->new.data, size);
+ kfree(args);
+ if (ret == 0)
+ object->client = parent->client;
}
if (ret)
nvif_object_fini(object);
return ret;
}
-
-static void
-nvif_object_del(struct nvif_object *object)
-{
- nvif_object_fini(object);
- kfree(object);
-}
-
-int
-nvif_object_new(struct nvif_object *parent, u32 handle, u32 oclass,
- void *data, u32 size, struct nvif_object **pobject)
-{
- struct nvif_object *object = kzalloc(sizeof(*object), GFP_KERNEL);
- if (object) {
- int ret = nvif_object_init(parent, nvif_object_del, handle,
- oclass, data, size, object);
- if (ret) {
- kfree(object);
- object = NULL;
- }
- *pobject = object;
- return ret;
- }
- return -ENOMEM;
-}
-
-static void
-nvif_object_put(struct kref *kref)
-{
- struct nvif_object *object =
- container_of(kref, typeof(*object), refcount);
- object->dtor(object);
-}
-
-void
-nvif_object_ref(struct nvif_object *object, struct nvif_object **pobject)
-{
- if (object)
- kref_get(&object->refcount);
- if (*pobject)
- kref_put(&(*pobject)->refcount, nvif_object_put);
- *pobject = object;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
index a2bdb2069113..7f66963f305c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
@@ -1,17 +1,14 @@
nvkm-y := nvkm/core/client.o
-nvkm-y += nvkm/core/engctx.o
nvkm-y += nvkm/core/engine.o
nvkm-y += nvkm/core/enum.o
nvkm-y += nvkm/core/event.o
nvkm-y += nvkm/core/gpuobj.o
-nvkm-y += nvkm/core/handle.o
nvkm-y += nvkm/core/ioctl.o
+nvkm-y += nvkm/core/memory.o
nvkm-y += nvkm/core/mm.o
-nvkm-y += nvkm/core/namedb.o
nvkm-y += nvkm/core/notify.o
nvkm-y += nvkm/core/object.o
+nvkm-y += nvkm/core/oproxy.o
nvkm-y += nvkm/core/option.o
-nvkm-y += nvkm/core/parent.o
-nvkm-y += nvkm/core/printk.o
nvkm-y += nvkm/core/ramht.o
nvkm-y += nvkm/core/subdev.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c
index 878a82f8f295..297e1e953fa6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/client.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c
@@ -23,7 +23,6 @@
*/
#include <core/client.h>
#include <core/device.h>
-#include <core/handle.h>
#include <core/notify.h>
#include <core/option.h>
@@ -91,7 +90,7 @@ int
nvkm_client_notify_new(struct nvkm_object *object,
struct nvkm_event *event, void *data, u32 size)
{
- struct nvkm_client *client = nvkm_client(object);
+ struct nvkm_client *client = object->client;
struct nvkm_client_notify *notify;
union {
struct nvif_notify_req_v0 v0;
@@ -111,11 +110,11 @@ nvkm_client_notify_new(struct nvkm_object *object,
if (!notify)
return -ENOMEM;
- nv_ioctl(client, "notify new size %d\n", size);
+ nvif_ioctl(object, "notify new size %d\n", size);
if (nvif_unpack(req->v0, 0, 0, true)) {
- nv_ioctl(client, "notify new vers %d reply %d route %02x "
- "token %llx\n", req->v0.version,
- req->v0.reply, req->v0.route, req->v0.token);
+ nvif_ioctl(object, "notify new vers %d reply %d route %02x "
+ "token %llx\n", req->v0.version,
+ req->v0.reply, req->v0.route, req->v0.token);
notify->version = req->v0.version;
notify->size = sizeof(notify->rep.v0);
notify->rep.v0.version = req->v0.version;
@@ -146,10 +145,10 @@ nvkm_client_mthd_devlist(struct nvkm_object *object, void *data, u32 size)
} *args = data;
int ret;
- nv_ioctl(object, "client devlist size %d\n", size);
+ nvif_ioctl(object, "client devlist size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, true)) {
- nv_ioctl(object, "client devlist vers %d count %d\n",
- args->v0.version, args->v0.count);
+ nvif_ioctl(object, "client devlist vers %d count %d\n",
+ args->v0.version, args->v0.count);
if (size == sizeof(args->v0.device[0]) * args->v0.count) {
ret = nvkm_device_list(args->v0.device, args->v0.count);
if (ret >= 0) {
@@ -176,91 +175,134 @@ nvkm_client_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
return -EINVAL;
}
-static void
-nvkm_client_dtor(struct nvkm_object *object)
+static int
+nvkm_client_child_new(const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
{
- struct nvkm_client *client = (void *)object;
- int i;
- for (i = 0; i < ARRAY_SIZE(client->notify); i++)
- nvkm_client_notify_del(client, i);
- nvkm_object_ref(NULL, &client->device);
- nvkm_handle_destroy(client->root);
- nvkm_namedb_destroy(&client->namedb);
+ return oclass->base.ctor(oclass, data, size, pobject);
}
-static struct nvkm_oclass
-nvkm_client_oclass = {
- .ofuncs = &(struct nvkm_ofuncs) {
- .dtor = nvkm_client_dtor,
- .mthd = nvkm_client_mthd,
- },
-};
-
-int
-nvkm_client_create_(const char *name, u64 devname, const char *cfg,
- const char *dbg, int length, void **pobject)
+static int
+nvkm_client_child_get(struct nvkm_object *object, int index,
+ struct nvkm_oclass *oclass)
{
- struct nvkm_object *device;
- struct nvkm_client *client;
- int ret;
+ const struct nvkm_sclass *sclass;
+
+ switch (index) {
+ case 0: sclass = &nvkm_udevice_sclass; break;
+ default:
+ return -EINVAL;
+ }
- device = (void *)nvkm_device_find(devname);
- if (!device)
- return -ENODEV;
+ oclass->ctor = nvkm_client_child_new;
+ oclass->base = *sclass;
+ return 0;
+}
+
+static const struct nvkm_object_func
+nvkm_client_object_func = {
+ .mthd = nvkm_client_mthd,
+ .sclass = nvkm_client_child_get,
+};
- ret = nvkm_namedb_create_(NULL, NULL, &nvkm_client_oclass,
- NV_CLIENT_CLASS, NULL,
- (1ULL << NVDEV_ENGINE_DEVICE),
- length, pobject);
- client = *pobject;
- if (ret)
- return ret;
+void
+nvkm_client_remove(struct nvkm_client *client, struct nvkm_object *object)
+{
+ if (!RB_EMPTY_NODE(&object->node))
+ rb_erase(&object->node, &client->objroot);
+}
- ret = nvkm_handle_create(nv_object(client), ~0, ~0, nv_object(client),
- &client->root);
- if (ret)
- return ret;
+bool
+nvkm_client_insert(struct nvkm_client *client, struct nvkm_object *object)
+{
+ struct rb_node **ptr = &client->objroot.rb_node;
+ struct rb_node *parent = NULL;
- /* prevent init/fini being called, os in in charge of this */
- atomic_set(&nv_object(client)->usecount, 2);
+ while (*ptr) {
+ struct nvkm_object *this =
+ container_of(*ptr, typeof(*this), node);
+ parent = *ptr;
+ if (object->object < this->object)
+ ptr = &parent->rb_left;
+ else
+ if (object->object > this->object)
+ ptr = &parent->rb_right;
+ else
+ return false;
+ }
- nvkm_object_ref(device, &client->device);
- snprintf(client->name, sizeof(client->name), "%s", name);
- client->debug = nvkm_dbgopt(dbg, "CLIENT");
- return 0;
+ rb_link_node(&object->node, parent, ptr);
+ rb_insert_color(&object->node, &client->objroot);
+ return true;
}
-int
-nvkm_client_init(struct nvkm_client *client)
+struct nvkm_object *
+nvkm_client_search(struct nvkm_client *client, u64 handle)
{
- int ret;
- nv_debug(client, "init running\n");
- ret = nvkm_handle_init(client->root);
- nv_debug(client, "init completed with %d\n", ret);
- return ret;
+ struct rb_node *node = client->objroot.rb_node;
+ while (node) {
+ struct nvkm_object *object =
+ container_of(node, typeof(*object), node);
+ if (handle < object->object)
+ node = node->rb_left;
+ else
+ if (handle > object->object)
+ node = node->rb_right;
+ else
+ return object;
+ }
+ return NULL;
}
int
nvkm_client_fini(struct nvkm_client *client, bool suspend)
{
+ struct nvkm_object *object = &client->object;
const char *name[2] = { "fini", "suspend" };
- int ret, i;
- nv_debug(client, "%s running\n", name[suspend]);
- nv_debug(client, "%s notify\n", name[suspend]);
+ int i;
+ nvif_debug(object, "%s notify\n", name[suspend]);
for (i = 0; i < ARRAY_SIZE(client->notify); i++)
nvkm_client_notify_put(client, i);
- nv_debug(client, "%s object\n", name[suspend]);
- ret = nvkm_handle_fini(client->root, suspend);
- nv_debug(client, "%s completed with %d\n", name[suspend], ret);
- return ret;
+ return nvkm_object_fini(&client->object, suspend);
+}
+
+int
+nvkm_client_init(struct nvkm_client *client)
+{
+ return nvkm_object_init(&client->object);
+}
+
+void
+nvkm_client_del(struct nvkm_client **pclient)
+{
+ struct nvkm_client *client = *pclient;
+ int i;
+ if (client) {
+ nvkm_client_fini(client, false);
+ for (i = 0; i < ARRAY_SIZE(client->notify); i++)
+ nvkm_client_notify_del(client, i);
+ nvkm_object_dtor(&client->object);
+ kfree(*pclient);
+ *pclient = NULL;
+ }
}
-const char *
-nvkm_client_name(void *obj)
+int
+nvkm_client_new(const char *name, u64 device, const char *cfg,
+ const char *dbg, struct nvkm_client **pclient)
{
- const char *client_name = "unknown";
- struct nvkm_client *client = nvkm_client(obj);
- if (client)
- client_name = client->name;
- return client_name;
+ struct nvkm_oclass oclass = {};
+ struct nvkm_client *client;
+
+ if (!(client = *pclient = kzalloc(sizeof(*client), GFP_KERNEL)))
+ return -ENOMEM;
+ oclass.client = client;
+
+ nvkm_object_ctor(&nvkm_client_object_func, &oclass, &client->object);
+ snprintf(client->name, sizeof(client->name), "%s", name);
+ client->device = device;
+ client->debug = nvkm_dbgopt(dbg, "CLIENT");
+ client->objroot = RB_ROOT;
+ client->dmaroot = RB_ROOT;
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engctx.c b/drivers/gpu/drm/nouveau/nvkm/core/engctx.c
deleted file mode 100644
index fb2acbca75d9..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/engctx.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include <core/engctx.h>
-#include <core/engine.h>
-#include <core/client.h>
-
-static inline int
-nvkm_engctx_exists(struct nvkm_object *parent,
- struct nvkm_engine *engine, void **pobject)
-{
- struct nvkm_engctx *engctx;
- struct nvkm_object *parctx;
-
- list_for_each_entry(engctx, &engine->contexts, head) {
- parctx = nv_pclass(nv_object(engctx), NV_PARENT_CLASS);
- if (parctx == parent) {
- atomic_inc(&nv_object(engctx)->refcount);
- *pobject = engctx;
- return 1;
- }
- }
-
- return 0;
-}
-
-int
-nvkm_engctx_create_(struct nvkm_object *parent, struct nvkm_object *engobj,
- struct nvkm_oclass *oclass, struct nvkm_object *pargpu,
- u32 size, u32 align, u32 flags, int length, void **pobject)
-{
- struct nvkm_client *client = nvkm_client(parent);
- struct nvkm_engine *engine = nv_engine(engobj);
- struct nvkm_object *engctx;
- unsigned long save;
- int ret;
-
- /* check if this engine already has a context for the parent object,
- * and reference it instead of creating a new one
- */
- spin_lock_irqsave(&engine->lock, save);
- ret = nvkm_engctx_exists(parent, engine, pobject);
- spin_unlock_irqrestore(&engine->lock, save);
- if (ret)
- return ret;
-
- /* create the new context, supports creating both raw objects and
- * objects backed by instance memory
- */
- if (size) {
- ret = nvkm_gpuobj_create_(parent, engobj, oclass,
- NV_ENGCTX_CLASS, pargpu, size,
- align, flags, length, pobject);
- } else {
- ret = nvkm_object_create_(parent, engobj, oclass,
- NV_ENGCTX_CLASS, length, pobject);
- }
-
- engctx = *pobject;
- if (ret)
- return ret;
-
- /* must take the lock again and re-check a context doesn't already
- * exist (in case of a race) - the lock had to be dropped before as
- * it's not possible to allocate the object with it held.
- */
- spin_lock_irqsave(&engine->lock, save);
- ret = nvkm_engctx_exists(parent, engine, pobject);
- if (ret) {
- spin_unlock_irqrestore(&engine->lock, save);
- nvkm_object_ref(NULL, &engctx);
- return ret;
- }
-
- if (client->vm)
- atomic_inc(&client->vm->engref[nv_engidx(engine)]);
- list_add(&nv_engctx(engctx)->head, &engine->contexts);
- nv_engctx(engctx)->addr = ~0ULL;
- spin_unlock_irqrestore(&engine->lock, save);
- return 0;
-}
-
-void
-nvkm_engctx_destroy(struct nvkm_engctx *engctx)
-{
- struct nvkm_engine *engine = engctx->gpuobj.object.engine;
- struct nvkm_client *client = nvkm_client(engctx);
- unsigned long save;
-
- nvkm_gpuobj_unmap(&engctx->vma);
- spin_lock_irqsave(&engine->lock, save);
- list_del(&engctx->head);
- spin_unlock_irqrestore(&engine->lock, save);
-
- if (client->vm)
- atomic_dec(&client->vm->engref[nv_engidx(engine)]);
-
- if (engctx->gpuobj.size)
- nvkm_gpuobj_destroy(&engctx->gpuobj);
- else
- nvkm_object_destroy(&engctx->gpuobj.object);
-}
-
-int
-nvkm_engctx_init(struct nvkm_engctx *engctx)
-{
- struct nvkm_object *object = nv_object(engctx);
- struct nvkm_subdev *subdev = nv_subdev(object->engine);
- struct nvkm_object *parent;
- struct nvkm_subdev *pardev;
- int ret;
-
- ret = nvkm_gpuobj_init(&engctx->gpuobj);
- if (ret)
- return ret;
-
- parent = nv_pclass(object->parent, NV_PARENT_CLASS);
- pardev = nv_subdev(parent->engine);
- if (nv_parent(parent)->context_attach) {
- mutex_lock(&pardev->mutex);
- ret = nv_parent(parent)->context_attach(parent, object);
- mutex_unlock(&pardev->mutex);
- }
-
- if (ret) {
- nv_error(parent, "failed to attach %s context, %d\n",
- subdev->name, ret);
- return ret;
- }
-
- nv_debug(parent, "attached %s context\n", subdev->name);
- return 0;
-}
-
-int
-nvkm_engctx_fini(struct nvkm_engctx *engctx, bool suspend)
-{
- struct nvkm_object *object = nv_object(engctx);
- struct nvkm_subdev *subdev = nv_subdev(object->engine);
- struct nvkm_object *parent;
- struct nvkm_subdev *pardev;
- int ret = 0;
-
- parent = nv_pclass(object->parent, NV_PARENT_CLASS);
- pardev = nv_subdev(parent->engine);
- if (nv_parent(parent)->context_detach) {
- mutex_lock(&pardev->mutex);
- ret = nv_parent(parent)->context_detach(parent, suspend, object);
- mutex_unlock(&pardev->mutex);
- }
-
- if (ret) {
- nv_error(parent, "failed to detach %s context, %d\n",
- subdev->name, ret);
- return ret;
- }
-
- nv_debug(parent, "detached %s context\n", subdev->name);
- return nvkm_gpuobj_fini(&engctx->gpuobj, suspend);
-}
-
-int
-_nvkm_engctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nvkm_engctx *engctx;
- int ret;
-
- ret = nvkm_engctx_create(parent, engine, oclass, NULL, 256, 256,
- NVOBJ_FLAG_ZERO_ALLOC, &engctx);
- *pobject = nv_object(engctx);
- return ret;
-}
-
-void
-_nvkm_engctx_dtor(struct nvkm_object *object)
-{
- nvkm_engctx_destroy(nv_engctx(object));
-}
-
-int
-_nvkm_engctx_init(struct nvkm_object *object)
-{
- return nvkm_engctx_init(nv_engctx(object));
-}
-
-int
-_nvkm_engctx_fini(struct nvkm_object *object, bool suspend)
-{
- return nvkm_engctx_fini(nv_engctx(object), suspend);
-}
-
-struct nvkm_object *
-nvkm_engctx_get(struct nvkm_engine *engine, u64 addr)
-{
- struct nvkm_engctx *engctx;
- unsigned long flags;
-
- spin_lock_irqsave(&engine->lock, flags);
- list_for_each_entry(engctx, &engine->contexts, head) {
- if (engctx->addr == addr) {
- engctx->save = flags;
- return nv_object(engctx);
- }
- }
- spin_unlock_irqrestore(&engine->lock, flags);
- return NULL;
-}
-
-void
-nvkm_engctx_put(struct nvkm_object *object)
-{
- if (object) {
- struct nvkm_engine *engine = nv_engine(object->engine);
- struct nvkm_engctx *engctx = nv_engctx(object);
- spin_unlock_irqrestore(&engine->lock, engctx->save);
- }
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index 60820173c6aa..8a7bae7bd995 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -25,51 +25,141 @@
#include <core/device.h>
#include <core/option.h>
+#include <subdev/fb.h>
+
+void
+nvkm_engine_unref(struct nvkm_engine **pengine)
+{
+ struct nvkm_engine *engine = *pengine;
+ if (engine) {
+ mutex_lock(&engine->subdev.mutex);
+ if (--engine->usecount == 0)
+ nvkm_subdev_fini(&engine->subdev, false);
+ mutex_unlock(&engine->subdev.mutex);
+ *pengine = NULL;
+ }
+}
+
struct nvkm_engine *
-nvkm_engine(void *obj, int idx)
+nvkm_engine_ref(struct nvkm_engine *engine)
{
- obj = nvkm_subdev(obj, idx);
- if (obj && nv_iclass(obj, NV_ENGINE_CLASS))
- return nv_engine(obj);
- return NULL;
+ if (engine) {
+ mutex_lock(&engine->subdev.mutex);
+ if (++engine->usecount == 1) {
+ int ret = nvkm_subdev_init(&engine->subdev);
+ if (ret) {
+ engine->usecount--;
+ mutex_unlock(&engine->subdev.mutex);
+ return ERR_PTR(ret);
+ }
+ }
+ mutex_unlock(&engine->subdev.mutex);
+ }
+ return engine;
}
-int
-nvkm_engine_create_(struct nvkm_object *parent, struct nvkm_object *engobj,
- struct nvkm_oclass *oclass, bool enable,
- const char *iname, const char *fname,
- int length, void **pobject)
+void
+nvkm_engine_tile(struct nvkm_engine *engine, int region)
{
- struct nvkm_engine *engine;
- int ret;
+ struct nvkm_fb *fb = engine->subdev.device->fb;
+ if (engine->func->tile)
+ engine->func->tile(engine, region, &fb->tile.region[region]);
+}
- ret = nvkm_subdev_create_(parent, engobj, oclass, NV_ENGINE_CLASS,
- iname, fname, length, pobject);
- engine = *pobject;
- if (ret)
- return ret;
+static void
+nvkm_engine_intr(struct nvkm_subdev *subdev)
+{
+ struct nvkm_engine *engine = nvkm_engine(subdev);
+ if (engine->func->intr)
+ engine->func->intr(engine);
+}
- if (parent) {
- struct nvkm_device *device = nv_device(parent);
- int engidx = nv_engidx(engine);
+static int
+nvkm_engine_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+ struct nvkm_engine *engine = nvkm_engine(subdev);
+ if (engine->func->fini)
+ return engine->func->fini(engine, suspend);
+ return 0;
+}
- if (device->disable_mask & (1ULL << engidx)) {
- if (!nvkm_boolopt(device->cfgopt, iname, false)) {
- nv_debug(engine, "engine disabled by hw/fw\n");
- return -ENODEV;
- }
+static int
+nvkm_engine_init(struct nvkm_subdev *subdev)
+{
+ struct nvkm_engine *engine = nvkm_engine(subdev);
+ struct nvkm_fb *fb = subdev->device->fb;
+ int ret = 0, i;
+ s64 time;
- nv_warn(engine, "ignoring hw/fw engine disable\n");
- }
+ if (!engine->usecount) {
+ nvkm_trace(subdev, "init skipped, engine has no users\n");
+ return ret;
+ }
- if (!nvkm_boolopt(device->cfgopt, iname, enable)) {
- if (!enable)
- nv_warn(engine, "disabled, %s=1 to enable\n", iname);
- return -ENODEV;
+ if (engine->func->oneinit && !engine->subdev.oneinit) {
+ nvkm_trace(subdev, "one-time init running...\n");
+ time = ktime_to_us(ktime_get());
+ ret = engine->func->oneinit(engine);
+ if (ret) {
+ nvkm_trace(subdev, "one-time init failed, %d\n", ret);
+ return ret;
}
+
+ engine->subdev.oneinit = true;
+ time = ktime_to_us(ktime_get()) - time;
+ nvkm_trace(subdev, "one-time init completed in %lldus\n", time);
+ }
+
+ if (engine->func->init)
+ ret = engine->func->init(engine);
+
+ for (i = 0; fb && i < fb->tile.regions; i++)
+ nvkm_engine_tile(engine, i);
+ return ret;
+}
+
+static void *
+nvkm_engine_dtor(struct nvkm_subdev *subdev)
+{
+ struct nvkm_engine *engine = nvkm_engine(subdev);
+ if (engine->func->dtor)
+ return engine->func->dtor(engine);
+ return engine;
+}
+
+static const struct nvkm_subdev_func
+nvkm_engine_func = {
+ .dtor = nvkm_engine_dtor,
+ .init = nvkm_engine_init,
+ .fini = nvkm_engine_fini,
+ .intr = nvkm_engine_intr,
+};
+
+int
+nvkm_engine_ctor(const struct nvkm_engine_func *func,
+ struct nvkm_device *device, int index, u32 pmc_enable,
+ bool enable, struct nvkm_engine *engine)
+{
+ nvkm_subdev_ctor(&nvkm_engine_func, device, index,
+ pmc_enable, &engine->subdev);
+ engine->func = func;
+
+ if (!nvkm_boolopt(device->cfgopt, nvkm_subdev_name[index], enable)) {
+ nvkm_debug(&engine->subdev, "disabled\n");
+ return -ENODEV;
}
- INIT_LIST_HEAD(&engine->contexts);
spin_lock_init(&engine->lock);
return 0;
}
+
+int
+nvkm_engine_new_(const struct nvkm_engine_func *func,
+ struct nvkm_device *device, int index, u32 pmc_enable,
+ bool enable, struct nvkm_engine **pengine)
+{
+ if (!(*pengine = kzalloc(sizeof(**pengine), GFP_KERNEL)))
+ return -ENOMEM;
+ return nvkm_engine_ctor(func, device, index, pmc_enable,
+ enable, *pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/enum.c b/drivers/gpu/drm/nouveau/nvkm/core/enum.c
index 4f92bfc13d6b..b9581feb24cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/enum.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/enum.c
@@ -38,29 +38,19 @@ nvkm_enum_find(const struct nvkm_enum *en, u32 value)
return NULL;
}
-const struct nvkm_enum *
-nvkm_enum_print(const struct nvkm_enum *en, u32 value)
-{
- en = nvkm_enum_find(en, value);
- if (en)
- pr_cont("%s", en->name);
- else
- pr_cont("(unknown enum 0x%08x)", value);
- return en;
-}
-
void
-nvkm_bitfield_print(const struct nvkm_bitfield *bf, u32 value)
+nvkm_snprintbf(char *data, int size, const struct nvkm_bitfield *bf, u32 value)
{
- while (bf->name) {
+ bool space = false;
+ while (size >= 1 && bf->name) {
if (value & bf->mask) {
- pr_cont(" %s", bf->name);
- value &= ~bf->mask;
+ int this = snprintf(data, size, "%s%s",
+ space ? " " : "", bf->name);
+ size -= this;
+ data += this;
+ space = true;
}
-
bf++;
}
-
- if (value)
- pr_cont(" (unknown bits 0x%08x)", value);
+ data[0] = '\0';
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
index 2eba801aae6f..c3a790eb8d6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
@@ -28,240 +28,205 @@
#include <subdev/bar.h>
#include <subdev/mmu.h>
-void
-nvkm_gpuobj_destroy(struct nvkm_gpuobj *gpuobj)
+/* fast-path, where backend is able to provide direct pointer to memory */
+static u32
+nvkm_gpuobj_rd32_fast(struct nvkm_gpuobj *gpuobj, u32 offset)
{
- int i;
-
- if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) {
- for (i = 0; i < gpuobj->size; i += 4)
- nv_wo32(gpuobj, i, 0x00000000);
- }
-
- if (gpuobj->node)
- nvkm_mm_free(&nv_gpuobj(gpuobj->parent)->heap, &gpuobj->node);
+ return ioread32_native(gpuobj->map + offset);
+}
- if (gpuobj->heap.block_size)
- nvkm_mm_fini(&gpuobj->heap);
+static void
+nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
+{
+ iowrite32_native(data, gpuobj->map + offset);
+}
- nvkm_object_destroy(&gpuobj->object);
+/* accessor functions for gpuobjs allocated directly from instmem */
+static u32
+nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
+{
+ return nvkm_ro32(gpuobj->memory, offset);
}
-int
-nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, u32 pclass,
- struct nvkm_object *pargpu, u32 size, u32 align, u32 flags,
- int length, void **pobject)
+static void
+nvkm_gpuobj_heap_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
{
- struct nvkm_instmem *imem = nvkm_instmem(parent);
- struct nvkm_bar *bar = nvkm_bar(parent);
- struct nvkm_gpuobj *gpuobj;
- struct nvkm_mm *heap = NULL;
- int ret, i;
- u64 addr;
+ nvkm_wo32(gpuobj->memory, offset, data);
+}
- *pobject = NULL;
+static const struct nvkm_gpuobj_func nvkm_gpuobj_heap;
+static void
+nvkm_gpuobj_heap_release(struct nvkm_gpuobj *gpuobj)
+{
+ gpuobj->func = &nvkm_gpuobj_heap;
+ nvkm_done(gpuobj->memory);
+}
- if (pargpu) {
- while ((pargpu = nv_pclass(pargpu, NV_GPUOBJ_CLASS))) {
- if (nv_gpuobj(pargpu)->heap.block_size)
- break;
- pargpu = pargpu->parent;
- }
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_heap_fast = {
+ .release = nvkm_gpuobj_heap_release,
+ .rd32 = nvkm_gpuobj_rd32_fast,
+ .wr32 = nvkm_gpuobj_wr32_fast,
+};
- if (unlikely(pargpu == NULL)) {
- nv_error(parent, "no gpuobj heap\n");
- return -EINVAL;
- }
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_heap_slow = {
+ .release = nvkm_gpuobj_heap_release,
+ .rd32 = nvkm_gpuobj_heap_rd32,
+ .wr32 = nvkm_gpuobj_heap_wr32,
+};
- addr = nv_gpuobj(pargpu)->addr;
- heap = &nv_gpuobj(pargpu)->heap;
- atomic_inc(&parent->refcount);
- } else {
- ret = imem->alloc(imem, parent, size, align, &parent);
- pargpu = parent;
- if (ret)
- return ret;
+static void *
+nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj)
+{
+ gpuobj->map = nvkm_kmap(gpuobj->memory);
+ if (likely(gpuobj->map))
+ gpuobj->func = &nvkm_gpuobj_heap_fast;
+ else
+ gpuobj->func = &nvkm_gpuobj_heap_slow;
+ return gpuobj->map;
+}
- addr = nv_memobj(pargpu)->addr;
- size = nv_memobj(pargpu)->size;
-
- if (bar && bar->alloc) {
- struct nvkm_instobj *iobj = (void *)parent;
- struct nvkm_mem **mem = (void *)(iobj + 1);
- struct nvkm_mem *node = *mem;
- if (!bar->alloc(bar, parent, node, &pargpu)) {
- nvkm_object_ref(NULL, &parent);
- parent = pargpu;
- }
- }
- }
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_heap = {
+ .acquire = nvkm_gpuobj_heap_acquire,
+};
- ret = nvkm_object_create_(parent, engine, oclass, pclass |
- NV_GPUOBJ_CLASS, length, pobject);
- nvkm_object_ref(NULL, &parent);
- gpuobj = *pobject;
- if (ret)
- return ret;
+/* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
+static u32
+nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
+{
+ return nvkm_ro32(gpuobj->parent, gpuobj->node->offset + offset);
+}
- gpuobj->parent = pargpu;
- gpuobj->flags = flags;
- gpuobj->addr = addr;
- gpuobj->size = size;
+static void
+nvkm_gpuobj_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
+{
+ nvkm_wo32(gpuobj->parent, gpuobj->node->offset + offset, data);
+}
- if (heap) {
- ret = nvkm_mm_head(heap, 0, 1, size, size, max(align, (u32)1),
- &gpuobj->node);
- if (ret)
- return ret;
+static const struct nvkm_gpuobj_func nvkm_gpuobj_func;
+static void
+nvkm_gpuobj_release(struct nvkm_gpuobj *gpuobj)
+{
+ gpuobj->func = &nvkm_gpuobj_func;
+ nvkm_done(gpuobj->parent);
+}
- gpuobj->addr += gpuobj->node->offset;
- }
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_fast = {
+ .release = nvkm_gpuobj_release,
+ .rd32 = nvkm_gpuobj_rd32_fast,
+ .wr32 = nvkm_gpuobj_wr32_fast,
+};
- if (gpuobj->flags & NVOBJ_FLAG_HEAP) {
- ret = nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
- if (ret)
- return ret;
- }
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_slow = {
+ .release = nvkm_gpuobj_release,
+ .rd32 = nvkm_gpuobj_rd32,
+ .wr32 = nvkm_gpuobj_wr32,
+};
- if (flags & NVOBJ_FLAG_ZERO_ALLOC) {
- for (i = 0; i < gpuobj->size; i += 4)
- nv_wo32(gpuobj, i, 0x00000000);
+static void *
+nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj)
+{
+ gpuobj->map = nvkm_kmap(gpuobj->parent);
+ if (likely(gpuobj->map)) {
+ gpuobj->map = (u8 *)gpuobj->map + gpuobj->node->offset;
+ gpuobj->func = &nvkm_gpuobj_fast;
+ } else {
+ gpuobj->func = &nvkm_gpuobj_slow;
}
-
- return ret;
+ return gpuobj->map;
}
-struct nvkm_gpuobj_class {
- struct nvkm_object *pargpu;
- u64 size;
- u32 align;
- u32 flags;
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_func = {
+ .acquire = nvkm_gpuobj_acquire,
};
static int
-_nvkm_gpuobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero,
+ struct nvkm_gpuobj *parent, struct nvkm_gpuobj *gpuobj)
{
- struct nvkm_gpuobj_class *args = data;
- struct nvkm_gpuobj *object;
+ u32 offset;
int ret;
- ret = nvkm_gpuobj_create(parent, engine, oclass, 0, args->pargpu,
- args->size, args->align, args->flags,
- &object);
- *pobject = nv_object(object);
- if (ret)
- return ret;
+ if (parent) {
+ if (align >= 0) {
+ ret = nvkm_mm_head(&parent->heap, 0, 1, size, size,
+ max(align, 1), &gpuobj->node);
+ } else {
+ ret = nvkm_mm_tail(&parent->heap, 0, 1, size, size,
+ -align, &gpuobj->node);
+ }
+ if (ret)
+ return ret;
- return 0;
-}
+ gpuobj->parent = parent;
+ gpuobj->func = &nvkm_gpuobj_func;
+ gpuobj->addr = parent->addr + gpuobj->node->offset;
+ gpuobj->size = gpuobj->node->length;
-void
-_nvkm_gpuobj_dtor(struct nvkm_object *object)
-{
- nvkm_gpuobj_destroy(nv_gpuobj(object));
-}
-
-int
-_nvkm_gpuobj_init(struct nvkm_object *object)
-{
- return nvkm_gpuobj_init(nv_gpuobj(object));
-}
+ if (zero) {
+ nvkm_kmap(gpuobj);
+ for (offset = 0; offset < gpuobj->size; offset += 4)
+ nvkm_wo32(gpuobj, offset, 0x00000000);
+ nvkm_done(gpuobj);
+ }
+ } else {
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size,
+ abs(align), zero, &gpuobj->memory);
+ if (ret)
+ return ret;
-int
-_nvkm_gpuobj_fini(struct nvkm_object *object, bool suspend)
-{
- return nvkm_gpuobj_fini(nv_gpuobj(object), suspend);
-}
+ gpuobj->func = &nvkm_gpuobj_heap;
+ gpuobj->addr = nvkm_memory_addr(gpuobj->memory);
+ gpuobj->size = nvkm_memory_size(gpuobj->memory);
+ }
-u32
-_nvkm_gpuobj_rd32(struct nvkm_object *object, u64 addr)
-{
- struct nvkm_gpuobj *gpuobj = nv_gpuobj(object);
- struct nvkm_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
- if (gpuobj->node)
- addr += gpuobj->node->offset;
- return pfuncs->rd32(gpuobj->parent, addr);
+ return nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
}
void
-_nvkm_gpuobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
+nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj)
{
- struct nvkm_gpuobj *gpuobj = nv_gpuobj(object);
- struct nvkm_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
- if (gpuobj->node)
- addr += gpuobj->node->offset;
- pfuncs->wr32(gpuobj->parent, addr, data);
+ struct nvkm_gpuobj *gpuobj = *pgpuobj;
+ if (gpuobj) {
+ if (gpuobj->parent)
+ nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
+ nvkm_mm_fini(&gpuobj->heap);
+ nvkm_memory_del(&gpuobj->memory);
+ kfree(*pgpuobj);
+ *pgpuobj = NULL;
+ }
}
-static struct nvkm_oclass
-_nvkm_gpuobj_oclass = {
- .handle = 0x00000000,
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_gpuobj_ctor,
- .dtor = _nvkm_gpuobj_dtor,
- .init = _nvkm_gpuobj_init,
- .fini = _nvkm_gpuobj_fini,
- .rd32 = _nvkm_gpuobj_rd32,
- .wr32 = _nvkm_gpuobj_wr32,
- },
-};
-
int
-nvkm_gpuobj_new(struct nvkm_object *parent, struct nvkm_object *pargpu,
- u32 size, u32 align, u32 flags,
- struct nvkm_gpuobj **pgpuobj)
+nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero,
+ struct nvkm_gpuobj *parent, struct nvkm_gpuobj **pgpuobj)
{
- struct nvkm_object *engine = parent;
- struct nvkm_gpuobj_class args = {
- .pargpu = pargpu,
- .size = size,
- .align = align,
- .flags = flags,
- };
-
- if (!nv_iclass(engine, NV_SUBDEV_CLASS))
- engine = &engine->engine->subdev.object;
- BUG_ON(engine == NULL);
-
- return nvkm_object_ctor(parent, engine, &_nvkm_gpuobj_oclass,
- &args, sizeof(args),
- (struct nvkm_object **)pgpuobj);
-}
+ struct nvkm_gpuobj *gpuobj;
+ int ret;
-int
-nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u32 access, struct nvkm_vma *vma)
-{
- struct nvkm_bar *bar = nvkm_bar(gpuobj);
- int ret = -EINVAL;
-
- if (bar && bar->umap) {
- struct nvkm_instobj *iobj = (void *)
- nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
- struct nvkm_mem **mem = (void *)(iobj + 1);
- ret = bar->umap(bar, *mem, access, vma);
- }
+ if (!(gpuobj = *pgpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL)))
+ return -ENOMEM;
+ ret = nvkm_gpuobj_ctor(device, size, align, zero, parent, gpuobj);
+ if (ret)
+ nvkm_gpuobj_del(pgpuobj);
return ret;
}
int
-nvkm_gpuobj_map_vm(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
- u32 access, struct nvkm_vma *vma)
+nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
+ u32 access, struct nvkm_vma *vma)
{
- struct nvkm_instobj *iobj = (void *)
- nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
- struct nvkm_mem **mem = (void *)(iobj + 1);
- int ret;
-
- ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
- if (ret)
- return ret;
-
- nvkm_vm_map(vma, *mem);
- return 0;
+ struct nvkm_memory *memory = gpuobj->memory;
+ int ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
+ if (ret == 0)
+ nvkm_memory_map(memory, vma, 0);
+ return ret;
}
void
@@ -278,39 +243,13 @@ nvkm_gpuobj_unmap(struct nvkm_vma *vma)
* anywhere else.
*/
-static void
-nvkm_gpudup_dtor(struct nvkm_object *object)
-{
- struct nvkm_gpuobj *gpuobj = (void *)object;
- nvkm_object_ref(NULL, &gpuobj->parent);
- nvkm_object_destroy(&gpuobj->object);
-}
-
-static struct nvkm_oclass
-nvkm_gpudup_oclass = {
- .handle = NV_GPUOBJ_CLASS,
- .ofuncs = &(struct nvkm_ofuncs) {
- .dtor = nvkm_gpudup_dtor,
- .init = nvkm_object_init,
- .fini = nvkm_object_fini,
- },
-};
-
int
-nvkm_gpuobj_dup(struct nvkm_object *parent, struct nvkm_gpuobj *base,
- struct nvkm_gpuobj **pgpuobj)
+nvkm_gpuobj_wrap(struct nvkm_memory *memory, struct nvkm_gpuobj **pgpuobj)
{
- struct nvkm_gpuobj *gpuobj;
- int ret;
-
- ret = nvkm_object_create(parent, &parent->engine->subdev.object,
- &nvkm_gpudup_oclass, 0, &gpuobj);
- *pgpuobj = gpuobj;
- if (ret)
- return ret;
+ if (!(*pgpuobj = kzalloc(sizeof(**pgpuobj), GFP_KERNEL)))
+ return -ENOMEM;
- nvkm_object_ref(nv_object(base), &gpuobj->parent);
- gpuobj->addr = base->addr;
- gpuobj->size = base->size;
+ (*pgpuobj)->addr = nvkm_memory_addr(memory);
+ (*pgpuobj)->size = nvkm_memory_size(memory);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/handle.c b/drivers/gpu/drm/nouveau/nvkm/core/handle.c
deleted file mode 100644
index dc7ff10ebe7b..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/handle.c
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include <core/handle.h>
-#include <core/client.h>
-
-#define hprintk(h,l,f,a...) do { \
- struct nvkm_client *c = nvkm_client((h)->object); \
- struct nvkm_handle *p = (h)->parent; u32 n = p ? p->name : ~0; \
- nv_printk((c), l, "0x%08x:0x%08x "f, n, (h)->name, ##a); \
-} while(0)
-
-int
-nvkm_handle_init(struct nvkm_handle *handle)
-{
- struct nvkm_handle *item;
- int ret;
-
- hprintk(handle, TRACE, "init running\n");
- ret = nvkm_object_inc(handle->object);
- if (ret)
- return ret;
-
- hprintk(handle, TRACE, "init children\n");
- list_for_each_entry(item, &handle->tree, head) {
- ret = nvkm_handle_init(item);
- if (ret)
- goto fail;
- }
-
- hprintk(handle, TRACE, "init completed\n");
- return 0;
-fail:
- hprintk(handle, ERROR, "init failed with %d\n", ret);
- list_for_each_entry_continue_reverse(item, &handle->tree, head) {
- nvkm_handle_fini(item, false);
- }
-
- nvkm_object_dec(handle->object, false);
- return ret;
-}
-
-int
-nvkm_handle_fini(struct nvkm_handle *handle, bool suspend)
-{
- static char *name[2] = { "fini", "suspend" };
- struct nvkm_handle *item;
- int ret;
-
- hprintk(handle, TRACE, "%s children\n", name[suspend]);
- list_for_each_entry(item, &handle->tree, head) {
- ret = nvkm_handle_fini(item, suspend);
- if (ret && suspend)
- goto fail;
- }
-
- hprintk(handle, TRACE, "%s running\n", name[suspend]);
- if (handle->object) {
- ret = nvkm_object_dec(handle->object, suspend);
- if (ret && suspend)
- goto fail;
- }
-
- hprintk(handle, TRACE, "%s completed\n", name[suspend]);
- return 0;
-fail:
- hprintk(handle, ERROR, "%s failed with %d\n", name[suspend], ret);
- list_for_each_entry_continue_reverse(item, &handle->tree, head) {
- int rret = nvkm_handle_init(item);
- if (rret)
- hprintk(handle, FATAL, "failed to restart, %d\n", rret);
- }
-
- return ret;
-}
-
-int
-nvkm_handle_create(struct nvkm_object *parent, u32 _parent, u32 _handle,
- struct nvkm_object *object, struct nvkm_handle **phandle)
-{
- struct nvkm_object *namedb;
- struct nvkm_handle *handle;
- int ret;
-
- namedb = parent;
- while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
- namedb = namedb->parent;
-
- handle = kzalloc(sizeof(*handle), GFP_KERNEL);
- if (!handle)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&handle->head);
- INIT_LIST_HEAD(&handle->tree);
- handle->name = _handle;
- handle->priv = ~0;
-
- ret = nvkm_namedb_insert(nv_namedb(namedb), _handle, object, handle);
- if (ret) {
- kfree(handle);
- return ret;
- }
-
- if (nv_parent(parent)->object_attach) {
- ret = nv_parent(parent)->object_attach(parent, object, _handle);
- if (ret < 0) {
- nvkm_handle_destroy(handle);
- return ret;
- }
-
- handle->priv = ret;
- }
-
- if (object != namedb) {
- while (!nv_iclass(namedb, NV_CLIENT_CLASS))
- namedb = namedb->parent;
-
- handle->parent = nvkm_namedb_get(nv_namedb(namedb), _parent);
- if (handle->parent) {
- list_add(&handle->head, &handle->parent->tree);
- nvkm_namedb_put(handle->parent);
- }
- }
-
- hprintk(handle, TRACE, "created\n");
- *phandle = handle;
- return 0;
-}
-
-void
-nvkm_handle_destroy(struct nvkm_handle *handle)
-{
- struct nvkm_handle *item, *temp;
-
- hprintk(handle, TRACE, "destroy running\n");
- list_for_each_entry_safe(item, temp, &handle->tree, head) {
- nvkm_handle_destroy(item);
- }
- list_del(&handle->head);
-
- if (handle->priv != ~0) {
- struct nvkm_object *parent = handle->parent->object;
- nv_parent(parent)->object_detach(parent, handle->priv);
- }
-
- hprintk(handle, TRACE, "destroy completed\n");
- nvkm_namedb_remove(handle);
- kfree(handle);
-}
-
-struct nvkm_object *
-nvkm_handle_ref(struct nvkm_object *parent, u32 name)
-{
- struct nvkm_object *object = NULL;
- struct nvkm_handle *handle;
-
- while (!nv_iclass(parent, NV_NAMEDB_CLASS))
- parent = parent->parent;
-
- handle = nvkm_namedb_get(nv_namedb(parent), name);
- if (handle) {
- nvkm_object_ref(handle->object, &object);
- nvkm_namedb_put(handle);
- }
-
- return object;
-}
-
-struct nvkm_handle *
-nvkm_handle_get_class(struct nvkm_object *engctx, u16 oclass)
-{
- struct nvkm_namedb *namedb;
- if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
- return nvkm_namedb_get_class(namedb, oclass);
- return NULL;
-}
-
-struct nvkm_handle *
-nvkm_handle_get_vinst(struct nvkm_object *engctx, u64 vinst)
-{
- struct nvkm_namedb *namedb;
- if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
- return nvkm_namedb_get_vinst(namedb, vinst);
- return NULL;
-}
-
-struct nvkm_handle *
-nvkm_handle_get_cinst(struct nvkm_object *engctx, u32 cinst)
-{
- struct nvkm_namedb *namedb;
- if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
- return nvkm_namedb_get_cinst(namedb, cinst);
- return NULL;
-}
-
-void
-nvkm_handle_put(struct nvkm_handle *handle)
-{
- if (handle)
- nvkm_namedb_put(handle);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
index 4459ff5f4cb8..d87d6ab03cc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
@@ -24,196 +24,154 @@
#include <core/ioctl.h>
#include <core/client.h>
#include <core/engine.h>
-#include <core/handle.h>
-#include <core/namedb.h>
#include <nvif/unpack.h>
#include <nvif/ioctl.h>
static int
-nvkm_ioctl_nop(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_nop(struct nvkm_object *object, void *data, u32 size)
{
- struct nvkm_object *object = handle->object;
union {
- struct nvif_ioctl_nop none;
+ struct nvif_ioctl_nop_v0 v0;
} *args = data;
int ret;
- nv_ioctl(object, "nop size %d\n", size);
- if (nvif_unvers(args->none)) {
- nv_ioctl(object, "nop\n");
+ nvif_ioctl(object, "nop size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(object, "nop vers %lld\n", args->v0.version);
+ args->v0.version = NVIF_VERSION_LATEST;
}
return ret;
}
static int
-nvkm_ioctl_sclass(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_sclass(struct nvkm_object *object, void *data, u32 size)
{
- struct nvkm_object *object = handle->object;
union {
struct nvif_ioctl_sclass_v0 v0;
} *args = data;
- int ret;
+ struct nvkm_oclass oclass;
+ int ret, i = 0;
- if (!nv_iclass(object, NV_PARENT_CLASS)) {
- nv_debug(object, "cannot have children (sclass)\n");
- return -ENODEV;
- }
-
- nv_ioctl(object, "sclass size %d\n", size);
+ nvif_ioctl(object, "sclass size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, true)) {
- nv_ioctl(object, "sclass vers %d count %d\n",
- args->v0.version, args->v0.count);
- if (size == args->v0.count * sizeof(args->v0.oclass[0])) {
- ret = nvkm_parent_lclass(object, args->v0.oclass,
- args->v0.count);
- if (ret >= 0) {
- args->v0.count = ret;
- ret = 0;
+ nvif_ioctl(object, "sclass vers %d count %d\n",
+ args->v0.version, args->v0.count);
+ if (size != args->v0.count * sizeof(args->v0.oclass[0]))
+ return -EINVAL;
+
+ while (object->func->sclass &&
+ object->func->sclass(object, i, &oclass) >= 0) {
+ if (i < args->v0.count) {
+ args->v0.oclass[i].oclass = oclass.base.oclass;
+ args->v0.oclass[i].minver = oclass.base.minver;
+ args->v0.oclass[i].maxver = oclass.base.maxver;
}
- } else {
- ret = -EINVAL;
+ i++;
}
+
+ args->v0.count = i;
}
return ret;
}
static int
-nvkm_ioctl_new(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size)
{
union {
struct nvif_ioctl_new_v0 v0;
} *args = data;
- struct nvkm_client *client = nvkm_client(handle->object);
- struct nvkm_object *engctx = NULL;
+ struct nvkm_client *client = parent->client;
struct nvkm_object *object = NULL;
- struct nvkm_parent *parent;
- struct nvkm_object *engine;
- struct nvkm_oclass *oclass;
- u32 _handle, _oclass;
- int ret;
+ struct nvkm_oclass oclass;
+ int ret, i = 0;
- nv_ioctl(client, "new size %d\n", size);
+ nvif_ioctl(parent, "new size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, true)) {
- _handle = args->v0.handle;
- _oclass = args->v0.oclass;
+ nvif_ioctl(parent, "new vers %d handle %08x class %08x "
+ "route %02x token %llx object %016llx\n",
+ args->v0.version, args->v0.handle, args->v0.oclass,
+ args->v0.route, args->v0.token, args->v0.object);
} else
return ret;
- nv_ioctl(client, "new vers %d handle %08x class %08x "
- "route %02x token %llx\n",
- args->v0.version, _handle, _oclass,
- args->v0.route, args->v0.token);
-
- if (!nv_iclass(handle->object, NV_PARENT_CLASS)) {
- nv_debug(handle->object, "cannot have children (ctor)\n");
- ret = -ENODEV;
- goto fail_class;
+ if (!parent->func->sclass) {
+ nvif_ioctl(parent, "cannot have children\n");
+ return -EINVAL;
}
- parent = nv_parent(handle->object);
-
- /* check that parent supports the requested subclass */
- ret = nvkm_parent_sclass(&parent->object, _oclass, &engine, &oclass);
- if (ret) {
- nv_debug(parent, "illegal class 0x%04x\n", _oclass);
- goto fail_class;
- }
-
- /* make sure engine init has been completed *before* any objects
- * it controls are created - the constructors may depend on
- * state calculated at init (ie. default context construction)
- */
- if (engine) {
- ret = nvkm_object_inc(engine);
+ do {
+ memset(&oclass, 0x00, sizeof(oclass));
+ oclass.client = client;
+ oclass.handle = args->v0.handle;
+ oclass.object = args->v0.object;
+ oclass.parent = parent;
+ ret = parent->func->sclass(parent, i++, &oclass);
if (ret)
- goto fail_class;
+ return ret;
+ } while (oclass.base.oclass != args->v0.oclass);
+
+ if (oclass.engine) {
+ oclass.engine = nvkm_engine_ref(oclass.engine);
+ if (IS_ERR(oclass.engine))
+ return PTR_ERR(oclass.engine);
}
- /* if engine requires it, create a context object to insert
- * between the parent and its children (eg. PGRAPH context)
- */
- if (engine && nv_engine(engine)->cclass) {
- ret = nvkm_object_ctor(&parent->object, engine,
- nv_engine(engine)->cclass,
- data, size, &engctx);
- if (ret)
- goto fail_engctx;
- } else {
- nvkm_object_ref(&parent->object, &engctx);
+ ret = oclass.ctor(&oclass, data, size, &object);
+ nvkm_engine_unref(&oclass.engine);
+ if (ret == 0) {
+ ret = nvkm_object_init(object);
+ if (ret == 0) {
+ list_add(&object->head, &parent->tree);
+ object->route = args->v0.route;
+ object->token = args->v0.token;
+ object->object = args->v0.object;
+ if (nvkm_client_insert(client, object)) {
+ client->data = object;
+ return 0;
+ }
+ ret = -EEXIST;
+ }
+ nvkm_object_fini(object, false);
}
- /* finally, create new object and bind it to its handle */
- ret = nvkm_object_ctor(engctx, engine, oclass, data, size, &object);
- client->data = object;
- if (ret)
- goto fail_ctor;
-
- ret = nvkm_object_inc(object);
- if (ret)
- goto fail_init;
-
- ret = nvkm_handle_create(&parent->object, handle->name,
- _handle, object, &handle);
- if (ret)
- goto fail_handle;
-
- ret = nvkm_handle_init(handle);
- handle->route = args->v0.route;
- handle->token = args->v0.token;
- if (ret)
- nvkm_handle_destroy(handle);
-
-fail_handle:
- nvkm_object_dec(object, false);
-fail_init:
- nvkm_object_ref(NULL, &object);
-fail_ctor:
- nvkm_object_ref(NULL, &engctx);
-fail_engctx:
- if (engine)
- nvkm_object_dec(engine, false);
-fail_class:
+ nvkm_object_del(&object);
return ret;
}
static int
-nvkm_ioctl_del(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_del(struct nvkm_object *object, void *data, u32 size)
{
- struct nvkm_object *object = handle->object;
union {
struct nvif_ioctl_del none;
} *args = data;
int ret;
- nv_ioctl(object, "delete size %d\n", size);
+ nvif_ioctl(object, "delete size %d\n", size);
if (nvif_unvers(args->none)) {
- nv_ioctl(object, "delete\n");
- nvkm_handle_fini(handle, false);
- nvkm_handle_destroy(handle);
+ nvif_ioctl(object, "delete\n");
+ nvkm_object_fini(object, false);
+ nvkm_object_del(&object);
}
return ret;
}
static int
-nvkm_ioctl_mthd(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_mthd(struct nvkm_object *object, void *data, u32 size)
{
- struct nvkm_object *object = handle->object;
- struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
union {
struct nvif_ioctl_mthd_v0 v0;
} *args = data;
int ret;
- nv_ioctl(object, "mthd size %d\n", size);
+ nvif_ioctl(object, "mthd size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, true)) {
- nv_ioctl(object, "mthd vers %d mthd %02x\n",
- args->v0.version, args->v0.method);
- if (ret = -ENODEV, ofuncs->mthd)
- ret = ofuncs->mthd(object, args->v0.method, data, size);
+ nvif_ioctl(object, "mthd vers %d mthd %02x\n",
+ args->v0.version, args->v0.method);
+ ret = nvkm_object_mthd(object, args->v0.method, data, size);
}
return ret;
@@ -221,37 +179,34 @@ nvkm_ioctl_mthd(struct nvkm_handle *handle, void *data, u32 size)
static int
-nvkm_ioctl_rd(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_rd(struct nvkm_object *object, void *data, u32 size)
{
- struct nvkm_object *object = handle->object;
- struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
union {
struct nvif_ioctl_rd_v0 v0;
} *args = data;
+ union {
+ u8 b08;
+ u16 b16;
+ u32 b32;
+ } v;
int ret;
- nv_ioctl(object, "rd size %d\n", size);
+ nvif_ioctl(object, "rd size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "rd vers %d size %d addr %016llx\n",
- args->v0.version, args->v0.size, args->v0.addr);
+ nvif_ioctl(object, "rd vers %d size %d addr %016llx\n",
+ args->v0.version, args->v0.size, args->v0.addr);
switch (args->v0.size) {
case 1:
- if (ret = -ENODEV, ofuncs->rd08) {
- args->v0.data = nv_ro08(object, args->v0.addr);
- ret = 0;
- }
+ ret = nvkm_object_rd08(object, args->v0.addr, &v.b08);
+ args->v0.data = v.b08;
break;
case 2:
- if (ret = -ENODEV, ofuncs->rd16) {
- args->v0.data = nv_ro16(object, args->v0.addr);
- ret = 0;
- }
+ ret = nvkm_object_rd16(object, args->v0.addr, &v.b16);
+ args->v0.data = v.b16;
break;
case 4:
- if (ret = -ENODEV, ofuncs->rd32) {
- args->v0.data = nv_ro32(object, args->v0.addr);
- ret = 0;
- }
+ ret = nvkm_object_rd32(object, args->v0.addr, &v.b32);
+ args->v0.data = v.b32;
break;
default:
ret = -EINVAL;
@@ -263,104 +218,81 @@ nvkm_ioctl_rd(struct nvkm_handle *handle, void *data, u32 size)
}
static int
-nvkm_ioctl_wr(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_wr(struct nvkm_object *object, void *data, u32 size)
{
- struct nvkm_object *object = handle->object;
- struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
union {
struct nvif_ioctl_wr_v0 v0;
} *args = data;
int ret;
- nv_ioctl(object, "wr size %d\n", size);
+ nvif_ioctl(object, "wr size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "wr vers %d size %d addr %016llx data %08x\n",
- args->v0.version, args->v0.size, args->v0.addr,
- args->v0.data);
- switch (args->v0.size) {
- case 1:
- if (ret = -ENODEV, ofuncs->wr08) {
- nv_wo08(object, args->v0.addr, args->v0.data);
- ret = 0;
- }
- break;
- case 2:
- if (ret = -ENODEV, ofuncs->wr16) {
- nv_wo16(object, args->v0.addr, args->v0.data);
- ret = 0;
- }
- break;
- case 4:
- if (ret = -ENODEV, ofuncs->wr32) {
- nv_wo32(object, args->v0.addr, args->v0.data);
- ret = 0;
- }
- break;
- default:
- ret = -EINVAL;
- break;
- }
+ nvif_ioctl(object,
+ "wr vers %d size %d addr %016llx data %08x\n",
+ args->v0.version, args->v0.size, args->v0.addr,
+ args->v0.data);
+ } else
+ return ret;
+
+ switch (args->v0.size) {
+ case 1: return nvkm_object_wr08(object, args->v0.addr, args->v0.data);
+ case 2: return nvkm_object_wr16(object, args->v0.addr, args->v0.data);
+ case 4: return nvkm_object_wr32(object, args->v0.addr, args->v0.data);
+ default:
+ break;
}
- return ret;
+ return -EINVAL;
}
static int
-nvkm_ioctl_map(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_map(struct nvkm_object *object, void *data, u32 size)
{
- struct nvkm_object *object = handle->object;
- struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
union {
struct nvif_ioctl_map_v0 v0;
} *args = data;
int ret;
- nv_ioctl(object, "map size %d\n", size);
+ nvif_ioctl(object, "map size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "map vers %d\n", args->v0.version);
- if (ret = -ENODEV, ofuncs->map) {
- ret = ofuncs->map(object, &args->v0.handle,
- &args->v0.length);
- }
+ nvif_ioctl(object, "map vers %d\n", args->v0.version);
+ ret = nvkm_object_map(object, &args->v0.handle,
+ &args->v0.length);
}
return ret;
}
static int
-nvkm_ioctl_unmap(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_unmap(struct nvkm_object *object, void *data, u32 size)
{
- struct nvkm_object *object = handle->object;
union {
struct nvif_ioctl_unmap none;
} *args = data;
int ret;
- nv_ioctl(object, "unmap size %d\n", size);
+ nvif_ioctl(object, "unmap size %d\n", size);
if (nvif_unvers(args->none)) {
- nv_ioctl(object, "unmap\n");
+ nvif_ioctl(object, "unmap\n");
}
return ret;
}
static int
-nvkm_ioctl_ntfy_new(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_ntfy_new(struct nvkm_object *object, void *data, u32 size)
{
- struct nvkm_object *object = handle->object;
- struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
union {
struct nvif_ioctl_ntfy_new_v0 v0;
} *args = data;
struct nvkm_event *event;
int ret;
- nv_ioctl(object, "ntfy new size %d\n", size);
+ nvif_ioctl(object, "ntfy new size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, true)) {
- nv_ioctl(object, "ntfy new vers %d event %02x\n",
- args->v0.version, args->v0.event);
- if (ret = -ENODEV, ofuncs->ntfy)
- ret = ofuncs->ntfy(object, args->v0.event, &event);
+ nvif_ioctl(object, "ntfy new vers %d event %02x\n",
+ args->v0.version, args->v0.event);
+ ret = nvkm_object_ntfy(object, args->v0.event, &event);
if (ret == 0) {
ret = nvkm_client_notify_new(object, event, data, size);
if (ret >= 0) {
@@ -374,19 +306,18 @@ nvkm_ioctl_ntfy_new(struct nvkm_handle *handle, void *data, u32 size)
}
static int
-nvkm_ioctl_ntfy_del(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_ntfy_del(struct nvkm_object *object, void *data, u32 size)
{
- struct nvkm_client *client = nvkm_client(handle->object);
- struct nvkm_object *object = handle->object;
+ struct nvkm_client *client = object->client;
union {
struct nvif_ioctl_ntfy_del_v0 v0;
} *args = data;
int ret;
- nv_ioctl(object, "ntfy del size %d\n", size);
+ nvif_ioctl(object, "ntfy del size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "ntfy del vers %d index %d\n",
- args->v0.version, args->v0.index);
+ nvif_ioctl(object, "ntfy del vers %d index %d\n",
+ args->v0.version, args->v0.index);
ret = nvkm_client_notify_del(client, args->v0.index);
}
@@ -394,19 +325,18 @@ nvkm_ioctl_ntfy_del(struct nvkm_handle *handle, void *data, u32 size)
}
static int
-nvkm_ioctl_ntfy_get(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_ntfy_get(struct nvkm_object *object, void *data, u32 size)
{
- struct nvkm_client *client = nvkm_client(handle->object);
- struct nvkm_object *object = handle->object;
+ struct nvkm_client *client = object->client;
union {
struct nvif_ioctl_ntfy_get_v0 v0;
} *args = data;
int ret;
- nv_ioctl(object, "ntfy get size %d\n", size);
+ nvif_ioctl(object, "ntfy get size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "ntfy get vers %d index %d\n",
- args->v0.version, args->v0.index);
+ nvif_ioctl(object, "ntfy get vers %d index %d\n",
+ args->v0.version, args->v0.index);
ret = nvkm_client_notify_get(client, args->v0.index);
}
@@ -414,19 +344,18 @@ nvkm_ioctl_ntfy_get(struct nvkm_handle *handle, void *data, u32 size)
}
static int
-nvkm_ioctl_ntfy_put(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_ntfy_put(struct nvkm_object *object, void *data, u32 size)
{
- struct nvkm_client *client = nvkm_client(handle->object);
- struct nvkm_object *object = handle->object;
+ struct nvkm_client *client = object->client;
union {
struct nvif_ioctl_ntfy_put_v0 v0;
} *args = data;
int ret;
- nv_ioctl(object, "ntfy put size %d\n", size);
+ nvif_ioctl(object, "ntfy put size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "ntfy put vers %d index %d\n",
- args->v0.version, args->v0.index);
+ nvif_ioctl(object, "ntfy put vers %d index %d\n",
+ args->v0.version, args->v0.index);
ret = nvkm_client_notify_put(client, args->v0.index);
}
@@ -435,7 +364,7 @@ nvkm_ioctl_ntfy_put(struct nvkm_handle *handle, void *data, u32 size)
static struct {
int version;
- int (*func)(struct nvkm_handle *, void *, u32);
+ int (*func)(struct nvkm_object *, void *, u32);
}
nvkm_ioctl_v0[] = {
{ 0x00, nvkm_ioctl_nop },
@@ -454,40 +383,31 @@ nvkm_ioctl_v0[] = {
};
static int
-nvkm_ioctl_path(struct nvkm_handle *parent, u32 type, u32 nr, u32 *path,
+nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
void *data, u32 size, u8 owner, u8 *route, u64 *token)
{
- struct nvkm_handle *handle = parent;
- struct nvkm_namedb *namedb;
struct nvkm_object *object;
int ret;
- while ((object = parent->object), nr--) {
- nv_ioctl(object, "path 0x%08x\n", path[nr]);
- if (!nv_iclass(object, NV_PARENT_CLASS)) {
- nv_debug(object, "cannot have children (path)\n");
- return -EINVAL;
- }
-
- if (!(namedb = (void *)nv_pclass(object, NV_NAMEDB_CLASS)) ||
- !(handle = nvkm_namedb_get(namedb, path[nr]))) {
- nv_debug(object, "handle 0x%08x not found\n", path[nr]);
- return -ENOENT;
- }
- nvkm_namedb_put(handle);
- parent = handle;
+ if (handle)
+ object = nvkm_client_search(client, handle);
+ else
+ object = &client->object;
+ if (unlikely(!object)) {
+ nvif_ioctl(&client->object, "object not found\n");
+ return -ENOENT;
}
- if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != handle->route) {
- nv_ioctl(object, "object route != owner\n");
+ if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != object->route) {
+ nvif_ioctl(&client->object, "route != owner\n");
return -EACCES;
}
- *route = handle->route;
- *token = handle->token;
+ *route = object->route;
+ *token = object->token;
if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) {
if (nvkm_ioctl_v0[type].version == 0)
- ret = nvkm_ioctl_v0[type].func(handle, data, size);
+ ret = nvkm_ioctl_v0[type].func(object, data, size);
}
return ret;
@@ -497,25 +417,26 @@ int
nvkm_ioctl(struct nvkm_client *client, bool supervisor,
void *data, u32 size, void **hack)
{
+ struct nvkm_object *object = &client->object;
union {
struct nvif_ioctl_v0 v0;
} *args = data;
int ret;
client->super = supervisor;
- nv_ioctl(client, "size %d\n", size);
+ nvif_ioctl(object, "size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, true)) {
- nv_ioctl(client, "vers %d type %02x path %d owner %02x\n",
- args->v0.version, args->v0.type, args->v0.path_nr,
- args->v0.owner);
- ret = nvkm_ioctl_path(client->root, args->v0.type,
- args->v0.path_nr, args->v0.path,
+ nvif_ioctl(object,
+ "vers %d type %02x object %016llx owner %02x\n",
+ args->v0.version, args->v0.type, args->v0.object,
+ args->v0.owner);
+ ret = nvkm_ioctl_path(client, args->v0.object, args->v0.type,
data, size, args->v0.owner,
&args->v0.route, &args->v0.token);
}
- nv_ioctl(client, "return %d\n", ret);
+ nvif_ioctl(object, "return %d\n", ret);
if (hack) {
*hack = client->data;
client->data = NULL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
new file mode 100644
index 000000000000..8903c04c977e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include <core/memory.h>
+#include <subdev/instmem.h>
+
+void
+nvkm_memory_ctor(const struct nvkm_memory_func *func,
+ struct nvkm_memory *memory)
+{
+ memory->func = func;
+}
+
+void
+nvkm_memory_del(struct nvkm_memory **pmemory)
+{
+ struct nvkm_memory *memory = *pmemory;
+ if (memory && !WARN_ON(!memory->func)) {
+ if (memory->func->dtor)
+ *pmemory = memory->func->dtor(memory);
+ kfree(*pmemory);
+ *pmemory = NULL;
+ }
+}
+
+int
+nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
+ u64 size, u32 align, bool zero,
+ struct nvkm_memory **pmemory)
+{
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_memory *memory;
+ int ret = -ENOSYS;
+
+ if (unlikely(target != NVKM_MEM_TARGET_INST || !imem))
+ return -ENOSYS;
+
+ ret = nvkm_instobj_new(imem, size, align, zero, &memory);
+ if (ret)
+ return ret;
+
+ *pmemory = memory;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/mm.c b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
index 7f458dfd5608..09a1eee8fd33 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/mm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
@@ -26,7 +26,7 @@
#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
list_entry((root)->nl_entry.dir, struct nvkm_mm_node, nl_entry)
-static void
+void
nvkm_mm_dump(struct nvkm_mm *mm, const char *header)
{
struct nvkm_mm_node *node;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/namedb.c b/drivers/gpu/drm/nouveau/nvkm/core/namedb.c
deleted file mode 100644
index 6400767c5dba..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/namedb.c
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include <core/namedb.h>
-#include <core/gpuobj.h>
-#include <core/handle.h>
-
-static struct nvkm_handle *
-nvkm_namedb_lookup(struct nvkm_namedb *namedb, u32 name)
-{
- struct nvkm_handle *handle;
-
- list_for_each_entry(handle, &namedb->list, node) {
- if (handle->name == name)
- return handle;
- }
-
- return NULL;
-}
-
-static struct nvkm_handle *
-nvkm_namedb_lookup_class(struct nvkm_namedb *namedb, u16 oclass)
-{
- struct nvkm_handle *handle;
-
- list_for_each_entry(handle, &namedb->list, node) {
- if (nv_mclass(handle->object) == oclass)
- return handle;
- }
-
- return NULL;
-}
-
-static struct nvkm_handle *
-nvkm_namedb_lookup_vinst(struct nvkm_namedb *namedb, u64 vinst)
-{
- struct nvkm_handle *handle;
-
- list_for_each_entry(handle, &namedb->list, node) {
- if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
- if (nv_gpuobj(handle->object)->addr == vinst)
- return handle;
- }
- }
-
- return NULL;
-}
-
-static struct nvkm_handle *
-nvkm_namedb_lookup_cinst(struct nvkm_namedb *namedb, u32 cinst)
-{
- struct nvkm_handle *handle;
-
- list_for_each_entry(handle, &namedb->list, node) {
- if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
- if (nv_gpuobj(handle->object)->node &&
- nv_gpuobj(handle->object)->node->offset == cinst)
- return handle;
- }
- }
-
- return NULL;
-}
-
-int
-nvkm_namedb_insert(struct nvkm_namedb *namedb, u32 name,
- struct nvkm_object *object,
- struct nvkm_handle *handle)
-{
- int ret = -EEXIST;
- write_lock_irq(&namedb->lock);
- if (!nvkm_namedb_lookup(namedb, name)) {
- nvkm_object_ref(object, &handle->object);
- handle->namedb = namedb;
- list_add(&handle->node, &namedb->list);
- ret = 0;
- }
- write_unlock_irq(&namedb->lock);
- return ret;
-}
-
-void
-nvkm_namedb_remove(struct nvkm_handle *handle)
-{
- struct nvkm_namedb *namedb = handle->namedb;
- struct nvkm_object *object = handle->object;
- write_lock_irq(&namedb->lock);
- list_del(&handle->node);
- write_unlock_irq(&namedb->lock);
- nvkm_object_ref(NULL, &object);
-}
-
-struct nvkm_handle *
-nvkm_namedb_get(struct nvkm_namedb *namedb, u32 name)
-{
- struct nvkm_handle *handle;
- read_lock(&namedb->lock);
- handle = nvkm_namedb_lookup(namedb, name);
- if (handle == NULL)
- read_unlock(&namedb->lock);
- return handle;
-}
-
-struct nvkm_handle *
-nvkm_namedb_get_class(struct nvkm_namedb *namedb, u16 oclass)
-{
- struct nvkm_handle *handle;
- read_lock(&namedb->lock);
- handle = nvkm_namedb_lookup_class(namedb, oclass);
- if (handle == NULL)
- read_unlock(&namedb->lock);
- return handle;
-}
-
-struct nvkm_handle *
-nvkm_namedb_get_vinst(struct nvkm_namedb *namedb, u64 vinst)
-{
- struct nvkm_handle *handle;
- read_lock(&namedb->lock);
- handle = nvkm_namedb_lookup_vinst(namedb, vinst);
- if (handle == NULL)
- read_unlock(&namedb->lock);
- return handle;
-}
-
-struct nvkm_handle *
-nvkm_namedb_get_cinst(struct nvkm_namedb *namedb, u32 cinst)
-{
- struct nvkm_handle *handle;
- read_lock(&namedb->lock);
- handle = nvkm_namedb_lookup_cinst(namedb, cinst);
- if (handle == NULL)
- read_unlock(&namedb->lock);
- return handle;
-}
-
-void
-nvkm_namedb_put(struct nvkm_handle *handle)
-{
- if (handle)
- read_unlock(&handle->namedb->lock);
-}
-
-int
-nvkm_namedb_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, u32 pclass,
- struct nvkm_oclass *sclass, u64 engcls,
- int length, void **pobject)
-{
- struct nvkm_namedb *namedb;
- int ret;
-
- ret = nvkm_parent_create_(parent, engine, oclass, pclass |
- NV_NAMEDB_CLASS, sclass, engcls,
- length, pobject);
- namedb = *pobject;
- if (ret)
- return ret;
-
- rwlock_init(&namedb->lock);
- INIT_LIST_HEAD(&namedb->list);
- return 0;
-}
-
-int
-_nvkm_namedb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nvkm_namedb *object;
- int ret;
-
- ret = nvkm_namedb_create(parent, engine, oclass, 0, NULL, 0, &object);
- *pobject = nv_object(object);
- if (ret)
- return ret;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c
index 979f3627d395..67aa7223dcd7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/object.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c
@@ -22,309 +22,243 @@
* Authors: Ben Skeggs
*/
#include <core/object.h>
+#include <core/client.h>
#include <core/engine.h>
-#ifdef NVKM_OBJECT_MAGIC
-static struct list_head _objlist = LIST_HEAD_INIT(_objlist);
-static DEFINE_SPINLOCK(_objlist_lock);
-#endif
-
int
-nvkm_object_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, u32 pclass,
- int size, void **pobject)
+nvkm_object_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
- struct nvkm_object *object;
-
- object = *pobject = kzalloc(size, GFP_KERNEL);
- if (!object)
- return -ENOMEM;
-
- nvkm_object_ref(parent, &object->parent);
- nvkm_object_ref(engine, (struct nvkm_object **)&object->engine);
- object->oclass = oclass;
- object->oclass->handle |= pclass;
- atomic_set(&object->refcount, 1);
- atomic_set(&object->usecount, 0);
-
-#ifdef NVKM_OBJECT_MAGIC
- object->_magic = NVKM_OBJECT_MAGIC;
- spin_lock(&_objlist_lock);
- list_add(&object->list, &_objlist);
- spin_unlock(&_objlist_lock);
-#endif
- return 0;
+ if (likely(object->func->mthd))
+ return object->func->mthd(object, mthd, data, size);
+ return -ENODEV;
}
int
-_nvkm_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nvkm_object_ntfy(struct nvkm_object *object, u32 mthd,
+ struct nvkm_event **pevent)
{
- if (size != 0)
- return -ENOSYS;
- return nvkm_object_create(parent, engine, oclass, 0, pobject);
+ if (likely(object->func->ntfy))
+ return object->func->ntfy(object, mthd, pevent);
+ return -ENODEV;
}
-void
-nvkm_object_destroy(struct nvkm_object *object)
+int
+nvkm_object_map(struct nvkm_object *object, u64 *addr, u32 *size)
{
-#ifdef NVKM_OBJECT_MAGIC
- spin_lock(&_objlist_lock);
- list_del(&object->list);
- spin_unlock(&_objlist_lock);
-#endif
- nvkm_object_ref(NULL, (struct nvkm_object **)&object->engine);
- nvkm_object_ref(NULL, &object->parent);
- kfree(object);
+ if (likely(object->func->map))
+ return object->func->map(object, addr, size);
+ return -ENODEV;
}
int
-nvkm_object_init(struct nvkm_object *object)
+nvkm_object_rd08(struct nvkm_object *object, u64 addr, u8 *data)
{
- return 0;
+ if (likely(object->func->rd08))
+ return object->func->rd08(object, addr, data);
+ return -ENODEV;
}
int
-nvkm_object_fini(struct nvkm_object *object, bool suspend)
+nvkm_object_rd16(struct nvkm_object *object, u64 addr, u16 *data)
{
- return 0;
+ if (likely(object->func->rd16))
+ return object->func->rd16(object, addr, data);
+ return -ENODEV;
}
-struct nvkm_ofuncs
-nvkm_object_ofuncs = {
- .ctor = _nvkm_object_ctor,
- .dtor = nvkm_object_destroy,
- .init = nvkm_object_init,
- .fini = nvkm_object_fini,
-};
-
int
-nvkm_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nvkm_object_rd32(struct nvkm_object *object, u64 addr, u32 *data)
{
- struct nvkm_ofuncs *ofuncs = oclass->ofuncs;
- struct nvkm_object *object = NULL;
- int ret;
-
- ret = ofuncs->ctor(parent, engine, oclass, data, size, &object);
- *pobject = object;
- if (ret < 0) {
- if (ret != -ENODEV) {
- nv_error(parent, "failed to create 0x%08x, %d\n",
- oclass->handle, ret);
- }
-
- if (object) {
- ofuncs->dtor(object);
- *pobject = NULL;
- }
-
- return ret;
- }
-
- if (ret == 0) {
- nv_trace(object, "created\n");
- atomic_set(&object->refcount, 1);
- }
-
- return 0;
+ if (likely(object->func->rd32))
+ return object->func->rd32(object, addr, data);
+ return -ENODEV;
}
-static void
-nvkm_object_dtor(struct nvkm_object *object)
+int
+nvkm_object_wr08(struct nvkm_object *object, u64 addr, u8 data)
{
- nv_trace(object, "destroying\n");
- nv_ofuncs(object)->dtor(object);
+ if (likely(object->func->wr08))
+ return object->func->wr08(object, addr, data);
+ return -ENODEV;
}
-void
-nvkm_object_ref(struct nvkm_object *obj, struct nvkm_object **ref)
+int
+nvkm_object_wr16(struct nvkm_object *object, u64 addr, u16 data)
{
- if (obj) {
- atomic_inc(&obj->refcount);
- nv_trace(obj, "inc() == %d\n", atomic_read(&obj->refcount));
- }
+ if (likely(object->func->wr16))
+ return object->func->wr16(object, addr, data);
+ return -ENODEV;
+}
- if (*ref) {
- int dead = atomic_dec_and_test(&(*ref)->refcount);
- nv_trace(*ref, "dec() == %d\n", atomic_read(&(*ref)->refcount));
- if (dead)
- nvkm_object_dtor(*ref);
- }
+int
+nvkm_object_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+ if (likely(object->func->wr32))
+ return object->func->wr32(object, addr, data);
+ return -ENODEV;
+}
- *ref = obj;
+int
+nvkm_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *gpuobj,
+ int align, struct nvkm_gpuobj **pgpuobj)
+{
+ if (object->func->bind)
+ return object->func->bind(object, gpuobj, align, pgpuobj);
+ return -ENODEV;
}
int
-nvkm_object_inc(struct nvkm_object *object)
+nvkm_object_fini(struct nvkm_object *object, bool suspend)
{
- int ref = atomic_add_return(1, &object->usecount);
+ const char *action = suspend ? "suspend" : "fini";
+ struct nvkm_object *child;
+ s64 time;
int ret;
- nv_trace(object, "use(+1) == %d\n", atomic_read(&object->usecount));
- if (ref != 1)
- return 0;
-
- nv_trace(object, "initialising...\n");
- if (object->parent) {
- ret = nvkm_object_inc(object->parent);
- if (ret) {
- nv_error(object, "parent failed, %d\n", ret);
- goto fail_parent;
- }
+ nvif_debug(object, "%s children...\n", action);
+ time = ktime_to_us(ktime_get());
+ list_for_each_entry(child, &object->tree, head) {
+ ret = nvkm_object_fini(child, suspend);
+ if (ret && suspend)
+ goto fail_child;
}
- if (object->engine) {
- mutex_lock(&nv_subdev(object->engine)->mutex);
- ret = nvkm_object_inc(&object->engine->subdev.object);
- mutex_unlock(&nv_subdev(object->engine)->mutex);
+ nvif_debug(object, "%s running...\n", action);
+ if (object->func->fini) {
+ ret = object->func->fini(object, suspend);
if (ret) {
- nv_error(object, "engine failed, %d\n", ret);
- goto fail_engine;
+ nvif_error(object, "%s failed with %d\n", action, ret);
+ if (suspend)
+ goto fail;
}
}
- ret = nv_ofuncs(object)->init(object);
- atomic_set(&object->usecount, 1);
- if (ret) {
- nv_error(object, "init failed, %d\n", ret);
- goto fail_self;
- }
-
- nv_trace(object, "initialised\n");
+ time = ktime_to_us(ktime_get()) - time;
+ nvif_debug(object, "%s completed in %lldus\n", action, time);
return 0;
-fail_self:
- if (object->engine) {
- mutex_lock(&nv_subdev(object->engine)->mutex);
- nvkm_object_dec(&object->engine->subdev.object, false);
- mutex_unlock(&nv_subdev(object->engine)->mutex);
+fail:
+ if (object->func->init) {
+ int rret = object->func->init(object);
+ if (rret)
+ nvif_fatal(object, "failed to restart, %d\n", rret);
+ }
+fail_child:
+ list_for_each_entry_continue_reverse(child, &object->tree, head) {
+ nvkm_object_init(child);
}
-fail_engine:
- if (object->parent)
- nvkm_object_dec(object->parent, false);
-fail_parent:
- atomic_dec(&object->usecount);
return ret;
}
-static int
-nvkm_object_decf(struct nvkm_object *object)
+int
+nvkm_object_init(struct nvkm_object *object)
{
+ struct nvkm_object *child;
+ s64 time;
int ret;
- nv_trace(object, "stopping...\n");
-
- ret = nv_ofuncs(object)->fini(object, false);
- atomic_set(&object->usecount, 0);
- if (ret)
- nv_warn(object, "failed fini, %d\n", ret);
-
- if (object->engine) {
- mutex_lock(&nv_subdev(object->engine)->mutex);
- nvkm_object_dec(&object->engine->subdev.object, false);
- mutex_unlock(&nv_subdev(object->engine)->mutex);
+ nvif_debug(object, "init running...\n");
+ time = ktime_to_us(ktime_get());
+ if (object->func->init) {
+ ret = object->func->init(object);
+ if (ret)
+ goto fail;
}
- if (object->parent)
- nvkm_object_dec(object->parent, false);
+ nvif_debug(object, "init children...\n");
+ list_for_each_entry(child, &object->tree, head) {
+ ret = nvkm_object_init(child);
+ if (ret)
+ goto fail_child;
+ }
- nv_trace(object, "stopped\n");
+ time = ktime_to_us(ktime_get()) - time;
+ nvif_debug(object, "init completed in %lldus\n", time);
return 0;
+
+fail_child:
+ list_for_each_entry_continue_reverse(child, &object->tree, head)
+ nvkm_object_fini(child, false);
+fail:
+ nvif_error(object, "init failed with %d\n", ret);
+ if (object->func->fini)
+ object->func->fini(object, false);
+ return ret;
}
-static int
-nvkm_object_decs(struct nvkm_object *object)
+void *
+nvkm_object_dtor(struct nvkm_object *object)
{
- int ret, rret;
-
- nv_trace(object, "suspending...\n");
-
- ret = nv_ofuncs(object)->fini(object, true);
- atomic_set(&object->usecount, 0);
- if (ret) {
- nv_error(object, "failed suspend, %d\n", ret);
- return ret;
+ struct nvkm_object *child, *ctemp;
+ void *data = object;
+ s64 time;
+
+ nvif_debug(object, "destroy children...\n");
+ time = ktime_to_us(ktime_get());
+ list_for_each_entry_safe(child, ctemp, &object->tree, head) {
+ nvkm_object_del(&child);
}
- if (object->engine) {
- mutex_lock(&nv_subdev(object->engine)->mutex);
- ret = nvkm_object_dec(&object->engine->subdev.object, true);
- mutex_unlock(&nv_subdev(object->engine)->mutex);
- if (ret) {
- nv_warn(object, "engine failed suspend, %d\n", ret);
- goto fail_engine;
- }
- }
-
- if (object->parent) {
- ret = nvkm_object_dec(object->parent, true);
- if (ret) {
- nv_warn(object, "parent failed suspend, %d\n", ret);
- goto fail_parent;
- }
- }
-
- nv_trace(object, "suspended\n");
- return 0;
+ nvif_debug(object, "destroy running...\n");
+ if (object->func->dtor)
+ data = object->func->dtor(object);
+ nvkm_engine_unref(&object->engine);
+ time = ktime_to_us(ktime_get()) - time;
+ nvif_debug(object, "destroy completed in %lldus...\n", time);
+ return data;
+}
-fail_parent:
- if (object->engine) {
- mutex_lock(&nv_subdev(object->engine)->mutex);
- rret = nvkm_object_inc(&object->engine->subdev.object);
- mutex_unlock(&nv_subdev(object->engine)->mutex);
- if (rret)
- nv_fatal(object, "engine failed to reinit, %d\n", rret);
+void
+nvkm_object_del(struct nvkm_object **pobject)
+{
+ struct nvkm_object *object = *pobject;
+ if (object && !WARN_ON(!object->func)) {
+ *pobject = nvkm_object_dtor(object);
+ nvkm_client_remove(object->client, object);
+ list_del(&object->head);
+ kfree(*pobject);
+ *pobject = NULL;
}
+}
-fail_engine:
- rret = nv_ofuncs(object)->init(object);
- if (rret)
- nv_fatal(object, "failed to reinit, %d\n", rret);
-
- return ret;
+void
+nvkm_object_ctor(const struct nvkm_object_func *func,
+ const struct nvkm_oclass *oclass, struct nvkm_object *object)
+{
+ object->func = func;
+ object->client = oclass->client;
+ object->engine = nvkm_engine_ref(oclass->engine);
+ object->oclass = oclass->base.oclass;
+ object->handle = oclass->handle;
+ INIT_LIST_HEAD(&object->head);
+ INIT_LIST_HEAD(&object->tree);
+ RB_CLEAR_NODE(&object->node);
+ WARN_ON(oclass->engine && !object->engine);
}
int
-nvkm_object_dec(struct nvkm_object *object, bool suspend)
+nvkm_object_new_(const struct nvkm_object_func *func,
+ const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- int ref = atomic_add_return(-1, &object->usecount);
- int ret;
-
- nv_trace(object, "use(-1) == %d\n", atomic_read(&object->usecount));
-
- if (ref == 0) {
- if (suspend)
- ret = nvkm_object_decs(object);
- else
- ret = nvkm_object_decf(object);
-
- if (ret) {
- atomic_inc(&object->usecount);
- return ret;
- }
+ if (size == 0) {
+ if (!(*pobject = kzalloc(sizeof(**pobject), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(func, oclass, *pobject);
+ return 0;
}
-
- return 0;
+ return -ENOSYS;
}
-void
-nvkm_object_debug(void)
+static const struct nvkm_object_func
+nvkm_object_func = {
+};
+
+int
+nvkm_object_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
-#ifdef NVKM_OBJECT_MAGIC
- struct nvkm_object *object;
- if (!list_empty(&_objlist)) {
- nv_fatal(NULL, "*******************************************\n");
- nv_fatal(NULL, "* AIIIII! object(s) still exist!!!\n");
- nv_fatal(NULL, "*******************************************\n");
- list_for_each_entry(object, &_objlist, list) {
- nv_fatal(object, "%p/%p/%d/%d\n",
- object->parent, object->engine,
- atomic_read(&object->refcount),
- atomic_read(&object->usecount));
- }
- }
-#endif
+ const struct nvkm_object_func *func =
+ oclass->base.func ? oclass->base.func : &nvkm_object_func;
+ return nvkm_object_new_(func, oclass, data, size, pobject);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
new file mode 100644
index 000000000000..e31a0479add0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include <core/oproxy.h>
+
+static int
+nvkm_oproxy_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+ return nvkm_object_mthd(nvkm_oproxy(object)->object, mthd, data, size);
+}
+
+static int
+nvkm_oproxy_ntfy(struct nvkm_object *object, u32 mthd,
+ struct nvkm_event **pevent)
+{
+ return nvkm_object_ntfy(nvkm_oproxy(object)->object, mthd, pevent);
+}
+
+static int
+nvkm_oproxy_map(struct nvkm_object *object, u64 *addr, u32 *size)
+{
+ return nvkm_object_map(nvkm_oproxy(object)->object, addr, size);
+}
+
+static int
+nvkm_oproxy_rd08(struct nvkm_object *object, u64 addr, u8 *data)
+{
+ return nvkm_object_rd08(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_rd16(struct nvkm_object *object, u64 addr, u16 *data)
+{
+ return nvkm_object_rd16(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_rd32(struct nvkm_object *object, u64 addr, u32 *data)
+{
+ return nvkm_object_rd32(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_wr08(struct nvkm_object *object, u64 addr, u8 data)
+{
+ return nvkm_object_wr08(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_wr16(struct nvkm_object *object, u64 addr, u16 data)
+{
+ return nvkm_object_wr16(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+ return nvkm_object_wr32(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+ int align, struct nvkm_gpuobj **pgpuobj)
+{
+ return nvkm_object_bind(nvkm_oproxy(object)->object,
+ parent, align, pgpuobj);
+}
+
+static int
+nvkm_oproxy_sclass(struct nvkm_object *object, int index,
+ struct nvkm_oclass *oclass)
+{
+ struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
+ oclass->parent = oproxy->object;
+ if (!oproxy->object->func->sclass)
+ return -ENODEV;
+ return oproxy->object->func->sclass(oproxy->object, index, oclass);
+}
+
+static int
+nvkm_oproxy_fini(struct nvkm_object *object, bool suspend)
+{
+ struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
+ int ret;
+
+ if (oproxy->func->fini[0]) {
+ ret = oproxy->func->fini[0](oproxy, suspend);
+ if (ret && suspend)
+ return ret;
+ }
+
+ if (oproxy->object->func->fini) {
+ ret = oproxy->object->func->fini(oproxy->object, suspend);
+ if (ret && suspend)
+ return ret;
+ }
+
+ if (oproxy->func->fini[1]) {
+ ret = oproxy->func->fini[1](oproxy, suspend);
+ if (ret && suspend)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+nvkm_oproxy_init(struct nvkm_object *object)
+{
+ struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
+ int ret;
+
+ if (oproxy->func->init[0]) {
+ ret = oproxy->func->init[0](oproxy);
+ if (ret)
+ return ret;
+ }
+
+ if (oproxy->object->func->init) {
+ ret = oproxy->object->func->init(oproxy->object);
+ if (ret)
+ return ret;
+ }
+
+ if (oproxy->func->init[1]) {
+ ret = oproxy->func->init[1](oproxy);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void *
+nvkm_oproxy_dtor(struct nvkm_object *object)
+{
+ struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
+ if (oproxy->func->dtor[0])
+ oproxy->func->dtor[0](oproxy);
+ nvkm_object_del(&oproxy->object);
+ if (oproxy->func->dtor[1])
+ oproxy->func->dtor[1](oproxy);
+ return oproxy;
+}
+
+static const struct nvkm_object_func
+nvkm_oproxy_func = {
+ .dtor = nvkm_oproxy_dtor,
+ .init = nvkm_oproxy_init,
+ .fini = nvkm_oproxy_fini,
+ .mthd = nvkm_oproxy_mthd,
+ .ntfy = nvkm_oproxy_ntfy,
+ .map = nvkm_oproxy_map,
+ .rd08 = nvkm_oproxy_rd08,
+ .rd16 = nvkm_oproxy_rd16,
+ .rd32 = nvkm_oproxy_rd32,
+ .wr08 = nvkm_oproxy_wr08,
+ .wr16 = nvkm_oproxy_wr16,
+ .wr32 = nvkm_oproxy_wr32,
+ .bind = nvkm_oproxy_bind,
+ .sclass = nvkm_oproxy_sclass,
+};
+
+void
+nvkm_oproxy_ctor(const struct nvkm_oproxy_func *func,
+ const struct nvkm_oclass *oclass, struct nvkm_oproxy *oproxy)
+{
+ nvkm_object_ctor(&nvkm_oproxy_func, oclass, &oproxy->base);
+ oproxy->func = func;
+}
+
+int
+nvkm_oproxy_new_(const struct nvkm_oproxy_func *func,
+ const struct nvkm_oclass *oclass, struct nvkm_oproxy **poproxy)
+{
+ if (!(*poproxy = kzalloc(sizeof(**poproxy), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_oproxy_ctor(func, oclass, *poproxy);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/option.c b/drivers/gpu/drm/nouveau/nvkm/core/option.c
index 19d153f8c8fd..3e62cf8cde08 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/option.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/option.c
@@ -73,6 +73,24 @@ nvkm_boolopt(const char *optstr, const char *opt, bool value)
return value;
}
+long
+nvkm_longopt(const char *optstr, const char *opt, long value)
+{
+ long result = value;
+ int arglen;
+ char *s;
+
+ optstr = nvkm_stropt(optstr, opt, &arglen);
+ if (optstr && (s = kstrndup(optstr, arglen, GFP_KERNEL))) {
+ int ret = kstrtol(s, 0, &value);
+ if (ret == 0)
+ result = value;
+ kfree(s);
+ }
+
+ return result;
+}
+
int
nvkm_dbgopt(const char *optstr, const char *sub)
{
@@ -95,7 +113,7 @@ nvkm_dbgopt(const char *optstr, const char *sub)
else if (!strncasecmpz(optstr, "warn", len))
level = NV_DBG_WARN;
else if (!strncasecmpz(optstr, "info", len))
- level = NV_DBG_INFO_NORMAL;
+ level = NV_DBG_INFO;
else if (!strncasecmpz(optstr, "debug", len))
level = NV_DBG_DEBUG;
else if (!strncasecmpz(optstr, "trace", len))
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/parent.c b/drivers/gpu/drm/nouveau/nvkm/core/parent.c
deleted file mode 100644
index dd56cd1eeb38..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/parent.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include <core/parent.h>
-#include <core/client.h>
-#include <core/engine.h>
-
-int
-nvkm_parent_sclass(struct nvkm_object *parent, u16 handle,
- struct nvkm_object **pengine,
- struct nvkm_oclass **poclass)
-{
- struct nvkm_sclass *sclass;
- struct nvkm_engine *engine;
- struct nvkm_oclass *oclass;
- u64 mask;
-
- sclass = nv_parent(parent)->sclass;
- while (sclass) {
- if ((sclass->oclass->handle & 0xffff) == handle) {
- *pengine = &parent->engine->subdev.object;
- *poclass = sclass->oclass;
- return 0;
- }
-
- sclass = sclass->sclass;
- }
-
- mask = nv_parent(parent)->engine;
- while (mask) {
- int i = __ffs64(mask);
-
- if (nv_iclass(parent, NV_CLIENT_CLASS))
- engine = nv_engine(nv_client(parent)->device);
- else
- engine = nvkm_engine(parent, i);
-
- if (engine) {
- oclass = engine->sclass;
- while (oclass->ofuncs) {
- if ((oclass->handle & 0xffff) == handle) {
- *pengine = nv_object(engine);
- *poclass = oclass;
- return 0;
- }
- oclass++;
- }
- }
-
- mask &= ~(1ULL << i);
- }
-
- return -EINVAL;
-}
-
-int
-nvkm_parent_lclass(struct nvkm_object *parent, u32 *lclass, int size)
-{
- struct nvkm_sclass *sclass;
- struct nvkm_engine *engine;
- struct nvkm_oclass *oclass;
- int nr = -1, i;
- u64 mask;
-
- sclass = nv_parent(parent)->sclass;
- while (sclass) {
- if (++nr < size)
- lclass[nr] = sclass->oclass->handle & 0xffff;
- sclass = sclass->sclass;
- }
-
- mask = nv_parent(parent)->engine;
- while (i = __ffs64(mask), mask) {
- engine = nvkm_engine(parent, i);
- if (engine && (oclass = engine->sclass)) {
- while (oclass->ofuncs) {
- if (++nr < size)
- lclass[nr] = oclass->handle & 0xffff;
- oclass++;
- }
- }
-
- mask &= ~(1ULL << i);
- }
-
- return nr + 1;
-}
-
-int
-nvkm_parent_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, u32 pclass,
- struct nvkm_oclass *sclass, u64 engcls,
- int size, void **pobject)
-{
- struct nvkm_parent *object;
- struct nvkm_sclass *nclass;
- int ret;
-
- ret = nvkm_object_create_(parent, engine, oclass, pclass |
- NV_PARENT_CLASS, size, pobject);
- object = *pobject;
- if (ret)
- return ret;
-
- while (sclass && sclass->ofuncs) {
- nclass = kzalloc(sizeof(*nclass), GFP_KERNEL);
- if (!nclass)
- return -ENOMEM;
-
- nclass->sclass = object->sclass;
- object->sclass = nclass;
- nclass->engine = engine ? nv_engine(engine) : NULL;
- nclass->oclass = sclass;
- sclass++;
- }
-
- object->engine = engcls;
- return 0;
-}
-
-void
-nvkm_parent_destroy(struct nvkm_parent *parent)
-{
- struct nvkm_sclass *sclass;
-
- while ((sclass = parent->sclass)) {
- parent->sclass = sclass->sclass;
- kfree(sclass);
- }
-
- nvkm_object_destroy(&parent->object);
-}
-
-
-void
-_nvkm_parent_dtor(struct nvkm_object *object)
-{
- nvkm_parent_destroy(nv_parent(object));
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/printk.c b/drivers/gpu/drm/nouveau/nvkm/core/printk.c
deleted file mode 100644
index 4a220eb91660..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/printk.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include <core/printk.h>
-#include <core/client.h>
-#include <core/device.h>
-
-int nv_info_debug_level = NV_DBG_INFO_NORMAL;
-
-void
-nv_printk_(struct nvkm_object *object, int level, const char *fmt, ...)
-{
- static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' };
- const char *pfx;
- char mfmt[256];
- va_list args;
-
- switch (level) {
- case NV_DBG_FATAL:
- pfx = KERN_CRIT;
- break;
- case NV_DBG_ERROR:
- pfx = KERN_ERR;
- break;
- case NV_DBG_WARN:
- pfx = KERN_WARNING;
- break;
- case NV_DBG_INFO_NORMAL:
- pfx = KERN_INFO;
- break;
- case NV_DBG_DEBUG:
- case NV_DBG_PARANOIA:
- case NV_DBG_TRACE:
- case NV_DBG_SPAM:
- default:
- pfx = KERN_DEBUG;
- break;
- }
-
- if (object && !nv_iclass(object, NV_CLIENT_CLASS)) {
- struct nvkm_object *device;
- struct nvkm_object *subdev;
- char obuf[64], *ofmt = "";
-
- if (object->engine == NULL) {
- subdev = object;
- while (subdev && !nv_iclass(subdev, NV_SUBDEV_CLASS))
- subdev = subdev->parent;
- } else {
- subdev = &object->engine->subdev.object;
- }
-
- device = subdev;
- if (device->parent)
- device = device->parent;
-
- if (object != subdev) {
- snprintf(obuf, sizeof(obuf), "[0x%08x]",
- nv_hclass(object));
- ofmt = obuf;
- }
-
- if (level > nv_subdev(subdev)->debug)
- return;
-
- snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s][%s]%s %s", pfx,
- name[level], nv_subdev(subdev)->name,
- nv_device(device)->name, ofmt, fmt);
- } else
- if (object && nv_iclass(object, NV_CLIENT_CLASS)) {
- if (level > nv_client(object)->debug)
- return;
-
- snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s] %s", pfx,
- name[level], nv_client(object)->name, fmt);
- } else {
- snprintf(mfmt, sizeof(mfmt), "%snouveau: %s", pfx, fmt);
- }
-
- va_start(args, fmt);
- vprintk(mfmt, args);
- va_end(args);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
index ebd4d15479bd..3216e157a8a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
@@ -22,8 +22,6 @@
#include <core/ramht.h>
#include <core/engine.h>
-#include <subdev/bar.h>
-
static u32
nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle)
{
@@ -35,72 +33,130 @@ nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle)
}
hash ^= chid << (ramht->bits - 4);
- hash = hash << 3;
return hash;
}
-int
-nvkm_ramht_insert(struct nvkm_ramht *ramht, int chid, u32 handle, u32 context)
+struct nvkm_gpuobj *
+nvkm_ramht_search(struct nvkm_ramht *ramht, int chid, u32 handle)
{
- struct nvkm_bar *bar = nvkm_bar(ramht);
u32 co, ho;
co = ho = nvkm_ramht_hash(ramht, chid, handle);
do {
- if (!nv_ro32(ramht, co + 4)) {
- nv_wo32(ramht, co + 0, handle);
- nv_wo32(ramht, co + 4, context);
- if (bar)
- bar->flush(bar);
- return co;
+ if (ramht->data[co].chid == chid) {
+ if (ramht->data[co].handle == handle)
+ return ramht->data[co].inst;
}
- co += 8;
- if (co >= nv_gpuobj(ramht)->size)
+ if (++co >= ramht->size)
co = 0;
} while (co != ho);
- return -ENOMEM;
+ return NULL;
+}
+
+static int
+nvkm_ramht_update(struct nvkm_ramht *ramht, int co, struct nvkm_object *object,
+ int chid, int addr, u32 handle, u32 context)
+{
+ struct nvkm_ramht_data *data = &ramht->data[co];
+ u64 inst = 0x00000040; /* just non-zero for <=g8x fifo ramht */
+ int ret;
+
+ nvkm_gpuobj_del(&data->inst);
+ data->chid = chid;
+ data->handle = handle;
+
+ if (object) {
+ ret = nvkm_object_bind(object, ramht->parent, 16, &data->inst);
+ if (ret) {
+ if (ret != -ENODEV) {
+ data->chid = -1;
+ return ret;
+ }
+ data->inst = NULL;
+ }
+
+ if (data->inst) {
+ if (ramht->device->card_type >= NV_50)
+ inst = data->inst->node->offset;
+ else
+ inst = data->inst->addr;
+ }
+
+ if (addr < 0) context |= inst << -addr;
+ else context |= inst >> addr;
+ }
+
+ nvkm_kmap(ramht->gpuobj);
+ nvkm_wo32(ramht->gpuobj, (co << 3) + 0, handle);
+ nvkm_wo32(ramht->gpuobj, (co << 3) + 4, context);
+ nvkm_done(ramht->gpuobj);
+ return co + 1;
}
void
nvkm_ramht_remove(struct nvkm_ramht *ramht, int cookie)
{
- struct nvkm_bar *bar = nvkm_bar(ramht);
- nv_wo32(ramht, cookie + 0, 0x00000000);
- nv_wo32(ramht, cookie + 4, 0x00000000);
- if (bar)
- bar->flush(bar);
+ if (--cookie >= 0)
+ nvkm_ramht_update(ramht, cookie, NULL, -1, 0, 0, 0);
+}
+
+int
+nvkm_ramht_insert(struct nvkm_ramht *ramht, struct nvkm_object *object,
+ int chid, int addr, u32 handle, u32 context)
+{
+ u32 co, ho;
+
+ if (nvkm_ramht_search(ramht, chid, handle))
+ return -EEXIST;
+
+ co = ho = nvkm_ramht_hash(ramht, chid, handle);
+ do {
+ if (ramht->data[co].chid < 0) {
+ return nvkm_ramht_update(ramht, co, object, chid,
+ addr, handle, context);
+ }
+
+ if (++co >= ramht->size)
+ co = 0;
+ } while (co != ho);
+
+ return -ENOSPC;
}
-static struct nvkm_oclass
-nvkm_ramht_oclass = {
- .handle = 0x0000abcd,
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = NULL,
- .dtor = _nvkm_gpuobj_dtor,
- .init = _nvkm_gpuobj_init,
- .fini = _nvkm_gpuobj_fini,
- .rd32 = _nvkm_gpuobj_rd32,
- .wr32 = _nvkm_gpuobj_wr32,
- },
-};
+void
+nvkm_ramht_del(struct nvkm_ramht **pramht)
+{
+ struct nvkm_ramht *ramht = *pramht;
+ if (ramht) {
+ nvkm_gpuobj_del(&ramht->gpuobj);
+ kfree(*pramht);
+ *pramht = NULL;
+ }
+}
int
-nvkm_ramht_new(struct nvkm_object *parent, struct nvkm_object *pargpu,
- u32 size, u32 align, struct nvkm_ramht **pramht)
+nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
+ struct nvkm_gpuobj *parent, struct nvkm_ramht **pramht)
{
struct nvkm_ramht *ramht;
- int ret;
+ int ret, i;
- ret = nvkm_gpuobj_create(parent, parent->engine ?
- &parent->engine->subdev.object : parent, /* <nv50 ramht */
- &nvkm_ramht_oclass, 0, pargpu, size,
- align, NVOBJ_FLAG_ZERO_ALLOC, &ramht);
- *pramht = ramht;
- if (ret)
- return ret;
+ if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) *
+ sizeof(*ramht->data), GFP_KERNEL)))
+ return -ENOMEM;
- ramht->bits = order_base_2(nv_gpuobj(ramht)->size >> 3);
- return 0;
+ ramht->device = device;
+ ramht->parent = parent;
+ ramht->size = size >> 3;
+ ramht->bits = order_base_2(ramht->size);
+ for (i = 0; i < ramht->size; i++)
+ ramht->data[i].chid = -1;
+
+ ret = nvkm_gpuobj_new(ramht->device, size, align, true,
+ ramht->parent, &ramht->gpuobj);
+ if (ret)
+ nvkm_ramht_del(pramht);
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index c5fb3a793174..7de98470a2a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -25,96 +25,178 @@
#include <core/device.h>
#include <core/option.h>
-struct nvkm_subdev *
-nvkm_subdev(void *obj, int idx)
-{
- struct nvkm_object *object = nv_object(obj);
- while (object && !nv_iclass(object, NV_SUBDEV_CLASS))
- object = object->parent;
- if (object == NULL || nv_subidx(nv_subdev(object)) != idx)
- object = nv_device(obj)->subdev[idx];
- return object ? nv_subdev(object) : NULL;
-}
+static struct lock_class_key nvkm_subdev_lock_class[NVKM_SUBDEV_NR];
+
+const char *
+nvkm_subdev_name[NVKM_SUBDEV_NR] = {
+ [NVKM_SUBDEV_BAR ] = "bar",
+ [NVKM_SUBDEV_VBIOS ] = "bios",
+ [NVKM_SUBDEV_BUS ] = "bus",
+ [NVKM_SUBDEV_CLK ] = "clk",
+ [NVKM_SUBDEV_DEVINIT] = "devinit",
+ [NVKM_SUBDEV_FB ] = "fb",
+ [NVKM_SUBDEV_FUSE ] = "fuse",
+ [NVKM_SUBDEV_GPIO ] = "gpio",
+ [NVKM_SUBDEV_I2C ] = "i2c",
+ [NVKM_SUBDEV_IBUS ] = "priv",
+ [NVKM_SUBDEV_INSTMEM] = "imem",
+ [NVKM_SUBDEV_LTC ] = "ltc",
+ [NVKM_SUBDEV_MC ] = "mc",
+ [NVKM_SUBDEV_MMU ] = "mmu",
+ [NVKM_SUBDEV_MXM ] = "mxm",
+ [NVKM_SUBDEV_PCI ] = "pci",
+ [NVKM_SUBDEV_PMU ] = "pmu",
+ [NVKM_SUBDEV_THERM ] = "therm",
+ [NVKM_SUBDEV_TIMER ] = "tmr",
+ [NVKM_SUBDEV_VOLT ] = "volt",
+ [NVKM_ENGINE_BSP ] = "bsp",
+ [NVKM_ENGINE_CE0 ] = "ce0",
+ [NVKM_ENGINE_CE1 ] = "ce1",
+ [NVKM_ENGINE_CE2 ] = "ce2",
+ [NVKM_ENGINE_CIPHER ] = "cipher",
+ [NVKM_ENGINE_DISP ] = "disp",
+ [NVKM_ENGINE_DMAOBJ ] = "dma",
+ [NVKM_ENGINE_FIFO ] = "fifo",
+ [NVKM_ENGINE_GR ] = "gr",
+ [NVKM_ENGINE_IFB ] = "ifb",
+ [NVKM_ENGINE_ME ] = "me",
+ [NVKM_ENGINE_MPEG ] = "mpeg",
+ [NVKM_ENGINE_MSENC ] = "msenc",
+ [NVKM_ENGINE_MSPDEC ] = "mspdec",
+ [NVKM_ENGINE_MSPPP ] = "msppp",
+ [NVKM_ENGINE_MSVLD ] = "msvld",
+ [NVKM_ENGINE_PM ] = "pm",
+ [NVKM_ENGINE_SEC ] = "sec",
+ [NVKM_ENGINE_SW ] = "sw",
+ [NVKM_ENGINE_VIC ] = "vic",
+ [NVKM_ENGINE_VP ] = "vp",
+};
void
-nvkm_subdev_reset(struct nvkm_object *subdev)
+nvkm_subdev_intr(struct nvkm_subdev *subdev)
{
- nv_trace(subdev, "resetting...\n");
- nv_ofuncs(subdev)->fini(subdev, false);
- nv_debug(subdev, "reset\n");
+ if (subdev->func->intr)
+ subdev->func->intr(subdev);
}
int
-nvkm_subdev_init(struct nvkm_subdev *subdev)
+nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
{
- int ret = nvkm_object_init(&subdev->object);
- if (ret)
- return ret;
+ struct nvkm_device *device = subdev->device;
+ const char *action = suspend ? "suspend" : "fini";
+ u32 pmc_enable = subdev->pmc_enable;
+ s64 time;
- nvkm_subdev_reset(&subdev->object);
- return 0;
-}
+ nvkm_trace(subdev, "%s running...\n", action);
+ time = ktime_to_us(ktime_get());
-int
-_nvkm_subdev_init(struct nvkm_object *object)
-{
- return nvkm_subdev_init(nv_subdev(object));
-}
+ if (subdev->func->fini) {
+ int ret = subdev->func->fini(subdev, suspend);
+ if (ret) {
+ nvkm_error(subdev, "%s failed, %d\n", action, ret);
+ if (suspend)
+ return ret;
+ }
+ }
-int
-nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
-{
- if (subdev->unit) {
- nv_mask(subdev, 0x000200, subdev->unit, 0x00000000);
- nv_mask(subdev, 0x000200, subdev->unit, subdev->unit);
+ if (pmc_enable) {
+ nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
+ nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
+ nvkm_rd32(device, 0x000200);
}
- return nvkm_object_fini(&subdev->object, suspend);
+ time = ktime_to_us(ktime_get()) - time;
+ nvkm_trace(subdev, "%s completed in %lldus\n", action, time);
+ return 0;
}
int
-_nvkm_subdev_fini(struct nvkm_object *object, bool suspend)
+nvkm_subdev_preinit(struct nvkm_subdev *subdev)
{
- return nvkm_subdev_fini(nv_subdev(object), suspend);
-}
+ s64 time;
-void
-nvkm_subdev_destroy(struct nvkm_subdev *subdev)
-{
- int subidx = nv_hclass(subdev) & 0xff;
- nv_device(subdev)->subdev[subidx] = NULL;
- nvkm_object_destroy(&subdev->object);
-}
+ nvkm_trace(subdev, "preinit running...\n");
+ time = ktime_to_us(ktime_get());
-void
-_nvkm_subdev_dtor(struct nvkm_object *object)
-{
- nvkm_subdev_destroy(nv_subdev(object));
+ if (subdev->func->preinit) {
+ int ret = subdev->func->preinit(subdev);
+ if (ret) {
+ nvkm_error(subdev, "preinit failed, %d\n", ret);
+ return ret;
+ }
+ }
+
+ time = ktime_to_us(ktime_get()) - time;
+ nvkm_trace(subdev, "preinit completed in %lldus\n", time);
+ return 0;
}
int
-nvkm_subdev_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, u32 pclass,
- const char *subname, const char *sysname,
- int size, void **pobject)
+nvkm_subdev_init(struct nvkm_subdev *subdev)
{
- struct nvkm_subdev *subdev;
+ s64 time;
int ret;
- ret = nvkm_object_create_(parent, engine, oclass, pclass |
- NV_SUBDEV_CLASS, size, pobject);
- subdev = *pobject;
- if (ret)
- return ret;
+ nvkm_trace(subdev, "init running...\n");
+ time = ktime_to_us(ktime_get());
+
+ if (subdev->func->oneinit && !subdev->oneinit) {
+ s64 time;
+ nvkm_trace(subdev, "one-time init running...\n");
+ time = ktime_to_us(ktime_get());
+ ret = subdev->func->oneinit(subdev);
+ if (ret) {
+ nvkm_error(subdev, "one-time init failed, %d\n", ret);
+ return ret;
+ }
- __mutex_init(&subdev->mutex, subname, &oclass->lock_class_key);
- subdev->name = subname;
+ subdev->oneinit = true;
+ time = ktime_to_us(ktime_get()) - time;
+ nvkm_trace(subdev, "one-time init completed in %lldus\n", time);
+ }
- if (parent) {
- struct nvkm_device *device = nv_device(parent);
- subdev->debug = nvkm_dbgopt(device->dbgopt, subname);
- subdev->mmio = nv_subdev(device)->mmio;
+ if (subdev->func->init) {
+ ret = subdev->func->init(subdev);
+ if (ret) {
+ nvkm_error(subdev, "init failed, %d\n", ret);
+ return ret;
+ }
}
+ time = ktime_to_us(ktime_get()) - time;
+ nvkm_trace(subdev, "init completed in %lldus\n", time);
return 0;
}
+
+void
+nvkm_subdev_del(struct nvkm_subdev **psubdev)
+{
+ struct nvkm_subdev *subdev = *psubdev;
+ s64 time;
+
+ if (subdev && !WARN_ON(!subdev->func)) {
+ nvkm_trace(subdev, "destroy running...\n");
+ time = ktime_to_us(ktime_get());
+ if (subdev->func->dtor)
+ *psubdev = subdev->func->dtor(subdev);
+ time = ktime_to_us(ktime_get()) - time;
+ nvkm_trace(subdev, "destroy completed in %lldus\n", time);
+ kfree(*psubdev);
+ *psubdev = NULL;
+ }
+}
+
+void
+nvkm_subdev_ctor(const struct nvkm_subdev_func *func,
+ struct nvkm_device *device, int index, u32 pmc_enable,
+ struct nvkm_subdev *subdev)
+{
+ const char *name = nvkm_subdev_name[index];
+ subdev->func = func;
+ subdev->device = device;
+ subdev->index = index;
+ subdev->pmc_enable = pmc_enable;
+
+ __mutex_init(&subdev->mutex, name, &nvkm_subdev_lock_class[index]);
+ subdev->debug = nvkm_dbgopt(device->dbgopt, name);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
index 6bd3d756f32c..36f724763fde 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
@@ -6,7 +6,7 @@ include $(src)/nvkm/engine/ce/Kbuild
include $(src)/nvkm/engine/cipher/Kbuild
include $(src)/nvkm/engine/device/Kbuild
include $(src)/nvkm/engine/disp/Kbuild
-include $(src)/nvkm/engine/dmaobj/Kbuild
+include $(src)/nvkm/engine/dma/Kbuild
include $(src)/nvkm/engine/fifo/Kbuild
include $(src)/nvkm/engine/gr/Kbuild
include $(src)/nvkm/engine/mpeg/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
index a0b1fd80fa93..3ef01071f073 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
@@ -22,72 +22,23 @@
* Authors: Ben Skeggs, Ilia Mirkin
*/
#include <engine/bsp.h>
-#include <engine/xtensa.h>
-#include <core/engctx.h>
-
-/*******************************************************************************
- * BSP object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_bsp_sclass[] = {
- { 0x74b0, &nvkm_object_ofuncs },
- {},
-};
-
-/*******************************************************************************
- * BSP context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_bsp_cclass = {
- .handle = NV_ENGCTX(BSP, 0x84),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_xtensa_engctx_ctor,
- .dtor = _nvkm_engctx_dtor,
- .init = _nvkm_engctx_init,
- .fini = _nvkm_engctx_fini,
- .rd32 = _nvkm_engctx_rd32,
- .wr32 = _nvkm_engctx_wr32,
- },
+#include <nvif/class.h>
+
+static const struct nvkm_xtensa_func
+g84_bsp = {
+ .pmc_enable = 0x04008000,
+ .fifo_val = 0x1111,
+ .unkd28 = 0x90044,
+ .sclass = {
+ { -1, -1, NV74_BSP },
+ {}
+ }
};
-/*******************************************************************************
- * BSP engine/subdev functions
- ******************************************************************************/
-
-static int
-g84_bsp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
{
- struct nvkm_xtensa *priv;
- int ret;
-
- ret = nvkm_xtensa_create(parent, engine, oclass, 0x103000, true,
- "PBSP", "bsp", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x04008000;
- nv_engine(priv)->cclass = &g84_bsp_cclass;
- nv_engine(priv)->sclass = g84_bsp_sclass;
- priv->fifo_val = 0x1111;
- priv->unkd28 = 0x90044;
- return 0;
+ return nvkm_xtensa_new_(&g84_bsp, device, index,
+ true, 0x103000, pengine);
}
-
-struct nvkm_oclass
-g84_bsp_oclass = {
- .handle = NV_ENGINE(BSP, 0x84),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = g84_bsp_ctor,
- .dtor = _nvkm_xtensa_dtor,
- .init = _nvkm_xtensa_init,
- .fini = _nvkm_xtensa_fini,
- .rd32 = _nvkm_xtensa_rd32,
- .wr32 = _nvkm_xtensa_wr32,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc
index a558dfa4d76a..6226bcd98ca9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc
@@ -24,9 +24,9 @@
*/
#ifdef GT215
-.section #gt215_pce_data
+.section #gt215_ce_data
#else
-.section #gf100_pce_data
+.section #gf100_ce_data
#endif
ctx_object: .b32 0
@@ -128,9 +128,9 @@ dispatch_dma:
.b16 0x800 0
#ifdef GT215
-.section #gt215_pce_code
+.section #gt215_ce_code
#else
-.section #gf100_pce_code
+.section #gf100_ce_code
#endif
main:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
index d9af6e4e4585..05bb65608dfe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf100_pce_data[] = {
+uint32_t gf100_ce_data[] = {
/* 0x0000: ctx_object */
0x00000000,
/* 0x0004: ctx_query_address_high */
@@ -171,7 +171,7 @@ uint32_t gf100_pce_data[] = {
0x00000800,
};
-uint32_t gf100_pce_code[] = {
+uint32_t gf100_ce_code[] = {
/* 0x0000: main */
0x04fe04bd,
0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
index f42c0d0d6cee..972281d10f38 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gt215_pce_data[] = {
+uint32_t gt215_ce_data[] = {
/* 0x0000: ctx_object */
0x00000000,
/* 0x0004: ctx_dma */
@@ -183,7 +183,7 @@ uint32_t gt215_pce_data[] = {
0x00000800,
};
-uint32_t gt215_pce_code[] = {
+uint32_t gt215_ce_code[] = {
/* 0x0000: main */
0x04fe04bd,
0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
index 2d2e549c2e34..92a9f35df1a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
@@ -21,146 +21,60 @@
*
* Authors: Ben Skeggs
*/
-#include <engine/ce.h>
-#include <engine/falcon.h>
+#include "priv.h"
#include "fuc/gf100.fuc3.h"
-struct gf100_ce_priv {
- struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * Copy object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_ce0_sclass[] = {
- { 0x90b5, &nvkm_object_ofuncs },
- {},
-};
-
-static struct nvkm_oclass
-gf100_ce1_sclass[] = {
- { 0x90b8, &nvkm_object_ofuncs },
- {},
-};
-
-/*******************************************************************************
- * PCE context
- ******************************************************************************/
-
-static struct nvkm_ofuncs
-gf100_ce_context_ofuncs = {
- .ctor = _nvkm_falcon_context_ctor,
- .dtor = _nvkm_falcon_context_dtor,
- .init = _nvkm_falcon_context_init,
- .fini = _nvkm_falcon_context_fini,
- .rd32 = _nvkm_falcon_context_rd32,
- .wr32 = _nvkm_falcon_context_wr32,
-};
-
-static struct nvkm_oclass
-gf100_ce0_cclass = {
- .handle = NV_ENGCTX(CE0, 0xc0),
- .ofuncs = &gf100_ce_context_ofuncs,
-};
-
-static struct nvkm_oclass
-gf100_ce1_cclass = {
- .handle = NV_ENGCTX(CE1, 0xc0),
- .ofuncs = &gf100_ce_context_ofuncs,
-};
-
-/*******************************************************************************
- * PCE engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
-static int
-gf100_ce_init(struct nvkm_object *object)
+static void
+gf100_ce_init(struct nvkm_falcon *ce)
{
- struct gf100_ce_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_falcon_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wo32(priv, 0x084, nv_engidx(&priv->base.base) - NVDEV_ENGINE_CE0);
- return 0;
+ struct nvkm_device *device = ce->engine.subdev.device;
+ const int index = ce->engine.subdev.index - NVKM_ENGINE_CE0;
+ nvkm_wr32(device, ce->addr + 0x084, index);
}
-static int
-gf100_ce0_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gf100_ce_priv *priv;
- int ret;
-
- ret = nvkm_falcon_create(parent, engine, oclass, 0x104000, true,
- "PCE0", "ce0", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static const struct nvkm_falcon_func
+gf100_ce0 = {
+ .code.data = gf100_ce_code,
+ .code.size = sizeof(gf100_ce_code),
+ .data.data = gf100_ce_data,
+ .data.size = sizeof(gf100_ce_data),
+ .pmc_enable = 0x00000040,
+ .init = gf100_ce_init,
+ .intr = gt215_ce_intr,
+ .sclass = {
+ { -1, -1, FERMI_DMA },
+ {}
+ }
+};
- nv_subdev(priv)->unit = 0x00000040;
- nv_subdev(priv)->intr = gt215_ce_intr;
- nv_engine(priv)->cclass = &gf100_ce0_cclass;
- nv_engine(priv)->sclass = gf100_ce0_sclass;
- nv_falcon(priv)->code.data = gf100_pce_code;
- nv_falcon(priv)->code.size = sizeof(gf100_pce_code);
- nv_falcon(priv)->data.data = gf100_pce_data;
- nv_falcon(priv)->data.size = sizeof(gf100_pce_data);
- return 0;
-}
+static const struct nvkm_falcon_func
+gf100_ce1 = {
+ .code.data = gf100_ce_code,
+ .code.size = sizeof(gf100_ce_code),
+ .data.data = gf100_ce_data,
+ .data.size = sizeof(gf100_ce_data),
+ .pmc_enable = 0x00000080,
+ .init = gf100_ce_init,
+ .intr = gt215_ce_intr,
+ .sclass = {
+ { -1, -1, FERMI_DECOMPRESS },
+ {}
+ }
+};
-static int
-gf100_ce1_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+gf100_ce_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
{
- struct gf100_ce_priv *priv;
- int ret;
-
- ret = nvkm_falcon_create(parent, engine, oclass, 0x105000, true,
- "PCE1", "ce1", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00000080;
- nv_subdev(priv)->intr = gt215_ce_intr;
- nv_engine(priv)->cclass = &gf100_ce1_cclass;
- nv_engine(priv)->sclass = gf100_ce1_sclass;
- nv_falcon(priv)->code.data = gf100_pce_code;
- nv_falcon(priv)->code.size = sizeof(gf100_pce_code);
- nv_falcon(priv)->data.data = gf100_pce_data;
- nv_falcon(priv)->data.size = sizeof(gf100_pce_data);
- return 0;
+ if (index == NVKM_ENGINE_CE0) {
+ return nvkm_falcon_new_(&gf100_ce0, device, index, true,
+ 0x104000, pengine);
+ } else
+ if (index == NVKM_ENGINE_CE1) {
+ return nvkm_falcon_new_(&gf100_ce1, device, index, true,
+ 0x105000, pengine);
+ }
+ return -ENODEV;
}
-
-struct nvkm_oclass
-gf100_ce0_oclass = {
- .handle = NV_ENGINE(CE0, 0xc0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_ce0_ctor,
- .dtor = _nvkm_falcon_dtor,
- .init = gf100_ce_init,
- .fini = _nvkm_falcon_fini,
- .rd32 = _nvkm_falcon_rd32,
- .wr32 = _nvkm_falcon_wr32,
- },
-};
-
-struct nvkm_oclass
-gf100_ce1_oclass = {
- .handle = NV_ENGINE(CE1, 0xc0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_ce1_ctor,
- .dtor = _nvkm_falcon_dtor,
- .init = gf100_ce_init,
- .fini = _nvkm_falcon_fini,
- .rd32 = _nvkm_falcon_rd32,
- .wr32 = _nvkm_falcon_wr32,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
index a998932fae45..c541a1c012dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
@@ -21,153 +21,47 @@
*
* Authors: Ben Skeggs
*/
-#include <engine/ce.h>
+#include "priv.h"
-#include <core/engctx.h>
+#include <nvif/class.h>
-struct gk104_ce_priv {
- struct nvkm_engine base;
-};
-
-/*******************************************************************************
- * Copy object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_ce_sclass[] = {
- { 0xa0b5, &nvkm_object_ofuncs },
- {},
-};
-
-/*******************************************************************************
- * PCE context
- ******************************************************************************/
-
-static struct nvkm_ofuncs
-gk104_ce_context_ofuncs = {
- .ctor = _nvkm_engctx_ctor,
- .dtor = _nvkm_engctx_dtor,
- .init = _nvkm_engctx_init,
- .fini = _nvkm_engctx_fini,
- .rd32 = _nvkm_engctx_rd32,
- .wr32 = _nvkm_engctx_wr32,
-};
-
-static struct nvkm_oclass
-gk104_ce_cclass = {
- .handle = NV_ENGCTX(CE0, 0xc0),
- .ofuncs = &gk104_ce_context_ofuncs,
-};
-
-/*******************************************************************************
- * PCE engine/subdev functions
- ******************************************************************************/
-
-static void
-gk104_ce_intr(struct nvkm_subdev *subdev)
+void
+gk104_ce_intr(struct nvkm_engine *ce)
{
- const int ce = nv_subidx(subdev) - NVDEV_ENGINE_CE0;
- struct gk104_ce_priv *priv = (void *)subdev;
- u32 stat = nv_rd32(priv, 0x104908 + (ce * 0x1000));
-
+ const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x1000;
+ struct nvkm_subdev *subdev = &ce->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x104908 + base);
if (stat) {
- nv_warn(priv, "unhandled intr 0x%08x\n", stat);
- nv_wr32(priv, 0x104908 + (ce * 0x1000), stat);
+ nvkm_warn(subdev, "intr %08x\n", stat);
+ nvkm_wr32(device, 0x104908 + base, stat);
}
}
-static int
-gk104_ce0_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gk104_ce_priv *priv;
- int ret;
-
- ret = nvkm_engine_create(parent, engine, oclass, true,
- "PCE0", "ce0", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00000040;
- nv_subdev(priv)->intr = gk104_ce_intr;
- nv_engine(priv)->cclass = &gk104_ce_cclass;
- nv_engine(priv)->sclass = gk104_ce_sclass;
- return 0;
-}
-
-static int
-gk104_ce1_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gk104_ce_priv *priv;
- int ret;
-
- ret = nvkm_engine_create(parent, engine, oclass, true,
- "PCE1", "ce1", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00000080;
- nv_subdev(priv)->intr = gk104_ce_intr;
- nv_engine(priv)->cclass = &gk104_ce_cclass;
- nv_engine(priv)->sclass = gk104_ce_sclass;
- return 0;
-}
+static const struct nvkm_engine_func
+gk104_ce = {
+ .intr = gk104_ce_intr,
+ .sclass = {
+ { -1, -1, KEPLER_DMA_COPY_A },
+ {}
+ }
+};
-static int
-gk104_ce2_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+gk104_ce_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
{
- struct gk104_ce_priv *priv;
- int ret;
-
- ret = nvkm_engine_create(parent, engine, oclass, true,
- "PCE2", "ce2", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00200000;
- nv_subdev(priv)->intr = gk104_ce_intr;
- nv_engine(priv)->cclass = &gk104_ce_cclass;
- nv_engine(priv)->sclass = gk104_ce_sclass;
- return 0;
+ if (index == NVKM_ENGINE_CE0) {
+ return nvkm_engine_new_(&gk104_ce, device, index,
+ 0x00000040, true, pengine);
+ } else
+ if (index == NVKM_ENGINE_CE1) {
+ return nvkm_engine_new_(&gk104_ce, device, index,
+ 0x00000080, true, pengine);
+ } else
+ if (index == NVKM_ENGINE_CE2) {
+ return nvkm_engine_new_(&gk104_ce, device, index,
+ 0x00200000, true, pengine);
+ }
+ return -ENODEV;
}
-
-struct nvkm_oclass
-gk104_ce0_oclass = {
- .handle = NV_ENGINE(CE0, 0xe0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk104_ce0_ctor,
- .dtor = _nvkm_engine_dtor,
- .init = _nvkm_engine_init,
- .fini = _nvkm_engine_fini,
- },
-};
-
-struct nvkm_oclass
-gk104_ce1_oclass = {
- .handle = NV_ENGINE(CE1, 0xe0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk104_ce1_ctor,
- .dtor = _nvkm_engine_dtor,
- .init = _nvkm_engine_init,
- .fini = _nvkm_engine_fini,
- },
-};
-
-struct nvkm_oclass
-gk104_ce2_oclass = {
- .handle = NV_ENGINE(CE2, 0xe0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk104_ce2_ctor,
- .dtor = _nvkm_engine_dtor,
- .init = _nvkm_engine_init,
- .fini = _nvkm_engine_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
index 577eb2eead05..8eaa72a59f40 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
@@ -21,153 +21,34 @@
*
* Authors: Ben Skeggs
*/
-#include <engine/ce.h>
+#include "priv.h"
-#include <core/engctx.h>
+#include <nvif/class.h>
-struct gm204_ce_priv {
- struct nvkm_engine base;
-};
-
-/*******************************************************************************
- * Copy object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gm204_ce_sclass[] = {
- { 0xb0b5, &nvkm_object_ofuncs },
- {},
-};
-
-/*******************************************************************************
- * PCE context
- ******************************************************************************/
-
-static struct nvkm_ofuncs
-gm204_ce_context_ofuncs = {
- .ctor = _nvkm_engctx_ctor,
- .dtor = _nvkm_engctx_dtor,
- .init = _nvkm_engctx_init,
- .fini = _nvkm_engctx_fini,
- .rd32 = _nvkm_engctx_rd32,
- .wr32 = _nvkm_engctx_wr32,
-};
-
-static struct nvkm_oclass
-gm204_ce_cclass = {
- .handle = NV_ENGCTX(CE0, 0x24),
- .ofuncs = &gm204_ce_context_ofuncs,
+static const struct nvkm_engine_func
+gm204_ce = {
+ .intr = gk104_ce_intr,
+ .sclass = {
+ { -1, -1, MAXWELL_DMA_COPY_A },
+ {}
+ }
};
-/*******************************************************************************
- * PCE engine/subdev functions
- ******************************************************************************/
-
-static void
-gm204_ce_intr(struct nvkm_subdev *subdev)
+int
+gm204_ce_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
{
- const int ce = nv_subidx(subdev) - NVDEV_ENGINE_CE0;
- struct gm204_ce_priv *priv = (void *)subdev;
- u32 stat = nv_rd32(priv, 0x104908 + (ce * 0x1000));
-
- if (stat) {
- nv_warn(priv, "unhandled intr 0x%08x\n", stat);
- nv_wr32(priv, 0x104908 + (ce * 0x1000), stat);
+ if (index == NVKM_ENGINE_CE0) {
+ return nvkm_engine_new_(&gm204_ce, device, index,
+ 0x00000040, true, pengine);
+ } else
+ if (index == NVKM_ENGINE_CE1) {
+ return nvkm_engine_new_(&gm204_ce, device, index,
+ 0x00000080, true, pengine);
+ } else
+ if (index == NVKM_ENGINE_CE2) {
+ return nvkm_engine_new_(&gm204_ce, device, index,
+ 0x00200000, true, pengine);
}
+ return -ENODEV;
}
-
-static int
-gm204_ce0_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gm204_ce_priv *priv;
- int ret;
-
- ret = nvkm_engine_create(parent, engine, oclass, true,
- "PCE0", "ce0", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00000040;
- nv_subdev(priv)->intr = gm204_ce_intr;
- nv_engine(priv)->cclass = &gm204_ce_cclass;
- nv_engine(priv)->sclass = gm204_ce_sclass;
- return 0;
-}
-
-static int
-gm204_ce1_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gm204_ce_priv *priv;
- int ret;
-
- ret = nvkm_engine_create(parent, engine, oclass, true,
- "PCE1", "ce1", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00000080;
- nv_subdev(priv)->intr = gm204_ce_intr;
- nv_engine(priv)->cclass = &gm204_ce_cclass;
- nv_engine(priv)->sclass = gm204_ce_sclass;
- return 0;
-}
-
-static int
-gm204_ce2_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gm204_ce_priv *priv;
- int ret;
-
- ret = nvkm_engine_create(parent, engine, oclass, true,
- "PCE2", "ce2", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00200000;
- nv_subdev(priv)->intr = gm204_ce_intr;
- nv_engine(priv)->cclass = &gm204_ce_cclass;
- nv_engine(priv)->sclass = gm204_ce_sclass;
- return 0;
-}
-
-struct nvkm_oclass
-gm204_ce0_oclass = {
- .handle = NV_ENGINE(CE0, 0x24),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gm204_ce0_ctor,
- .dtor = _nvkm_engine_dtor,
- .init = _nvkm_engine_init,
- .fini = _nvkm_engine_fini,
- },
-};
-
-struct nvkm_oclass
-gm204_ce1_oclass = {
- .handle = NV_ENGINE(CE1, 0x24),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gm204_ce1_ctor,
- .dtor = _nvkm_engine_dtor,
- .init = _nvkm_engine_init,
- .fini = _nvkm_engine_fini,
- },
-};
-
-struct nvkm_oclass
-gm204_ce2_oclass = {
- .handle = NV_ENGINE(CE2, 0x24),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gm204_ce2_ctor,
- .dtor = _nvkm_engine_dtor,
- .init = _nvkm_engine_init,
- .fini = _nvkm_engine_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
index d8bb4293bc11..402dcbcc2192 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
@@ -21,50 +21,15 @@
*
* Authors: Ben Skeggs
*/
-#include <engine/ce.h>
-#include <engine/falcon.h>
-#include <engine/fifo.h>
+#include "priv.h"
#include "fuc/gt215.fuc3.h"
#include <core/client.h>
-#include <core/device.h>
#include <core/enum.h>
+#include <core/gpuobj.h>
+#include <engine/fifo.h>
-struct gt215_ce_priv {
- struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * Copy object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gt215_ce_sclass[] = {
- { 0x85b5, &nvkm_object_ofuncs },
- {}
-};
-
-/*******************************************************************************
- * PCE context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gt215_ce_cclass = {
- .handle = NV_ENGCTX(CE0, 0xa3),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_falcon_context_ctor,
- .dtor = _nvkm_falcon_context_dtor,
- .init = _nvkm_falcon_context_init,
- .fini = _nvkm_falcon_context_fini,
- .rd32 = _nvkm_falcon_context_rd32,
- .wr32 = _nvkm_falcon_context_wr32,
-
- },
-};
-
-/*******************************************************************************
- * PCE engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
static const struct nvkm_enum
gt215_ce_isr_error_name[] = {
@@ -75,78 +40,45 @@ gt215_ce_isr_error_name[] = {
};
void
-gt215_ce_intr(struct nvkm_subdev *subdev)
+gt215_ce_intr(struct nvkm_falcon *ce, struct nvkm_fifo_chan *chan)
{
- struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
- struct nvkm_engine *engine = nv_engine(subdev);
- struct nvkm_falcon *falcon = (void *)subdev;
- struct nvkm_object *engctx;
- u32 dispatch = nv_ro32(falcon, 0x01c);
- u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
- u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff;
- u32 ssta = nv_ro32(falcon, 0x040) & 0x0000ffff;
- u32 addr = nv_ro32(falcon, 0x040) >> 16;
+ struct nvkm_subdev *subdev = &ce->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 base = (subdev->index - NVKM_ENGINE_CE0) * 0x1000;
+ u32 ssta = nvkm_rd32(device, 0x104040 + base) & 0x0000ffff;
+ u32 addr = nvkm_rd32(device, 0x104040 + base) >> 16;
u32 mthd = (addr & 0x07ff) << 2;
u32 subc = (addr & 0x3800) >> 11;
- u32 data = nv_ro32(falcon, 0x044);
- int chid;
-
- engctx = nvkm_engctx_get(engine, inst);
- chid = pfifo->chid(pfifo, engctx);
-
- if (stat & 0x00000040) {
- nv_error(falcon, "DISPATCH_ERROR [");
- nvkm_enum_print(gt215_ce_isr_error_name, ssta);
- pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
- chid, inst << 12, nvkm_client_name(engctx), subc,
- mthd, data);
- nv_wo32(falcon, 0x004, 0x00000040);
- stat &= ~0x00000040;
- }
+ u32 data = nvkm_rd32(device, 0x104044 + base);
+ const struct nvkm_enum *en =
+ nvkm_enum_find(gt215_ce_isr_error_name, ssta);
+
+ nvkm_error(subdev, "DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] "
+ "subc %d mthd %04x data %08x\n", ssta,
+ en ? en->name : "", chan ? chan->chid : -1,
+ chan ? chan->inst->addr : 0,
+ chan ? chan->object.client->name : "unknown",
+ subc, mthd, data);
+}
- if (stat) {
- nv_error(falcon, "unhandled intr 0x%08x\n", stat);
- nv_wo32(falcon, 0x004, stat);
+static const struct nvkm_falcon_func
+gt215_ce = {
+ .code.data = gt215_ce_code,
+ .code.size = sizeof(gt215_ce_code),
+ .data.data = gt215_ce_data,
+ .data.size = sizeof(gt215_ce_data),
+ .pmc_enable = 0x00802000,
+ .intr = gt215_ce_intr,
+ .sclass = {
+ { -1, -1, GT212_DMA },
+ {}
}
+};
- nvkm_engctx_put(engctx);
-}
-
-static int
-gt215_ce_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+gt215_ce_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
{
- bool enable = (nv_device(parent)->chipset != 0xaf);
- struct gt215_ce_priv *priv;
- int ret;
-
- ret = nvkm_falcon_create(parent, engine, oclass, 0x104000, enable,
- "PCE0", "ce0", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00802000;
- nv_subdev(priv)->intr = gt215_ce_intr;
- nv_engine(priv)->cclass = &gt215_ce_cclass;
- nv_engine(priv)->sclass = gt215_ce_sclass;
- nv_falcon(priv)->code.data = gt215_pce_code;
- nv_falcon(priv)->code.size = sizeof(gt215_pce_code);
- nv_falcon(priv)->data.data = gt215_pce_data;
- nv_falcon(priv)->data.size = sizeof(gt215_pce_data);
- return 0;
+ return nvkm_falcon_new_(&gt215_ce, device, index,
+ (device->chipset != 0xaf), 0x104000, pengine);
}
-
-struct nvkm_oclass
-gt215_ce_oclass = {
- .handle = NV_ENGINE(CE0, 0xa3),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gt215_ce_ctor,
- .dtor = _nvkm_falcon_dtor,
- .init = _nvkm_falcon_init,
- .fini = _nvkm_falcon_fini,
- .rd32 = _nvkm_falcon_rd32,
- .wr32 = _nvkm_falcon_wr32,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
new file mode 100644
index 000000000000..e2fa8b161943
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
@@ -0,0 +1,7 @@
+#ifndef __NVKM_CE_PRIV_H__
+#define __NVKM_CE_PRIV_H__
+#include <engine/ce.h>
+
+void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *);
+void gk104_ce_intr(struct nvkm_engine *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
index 13f30428a305..bfd01625ec7f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
@@ -25,76 +25,47 @@
#include <engine/fifo.h>
#include <core/client.h>
-#include <core/engctx.h>
#include <core/enum.h>
+#include <core/gpuobj.h>
-struct g84_cipher_priv {
- struct nvkm_engine base;
-};
-
-/*******************************************************************************
- * Crypt object classes
- ******************************************************************************/
+#include <nvif/class.h>
static int
-g84_cipher_object_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+g84_cipher_oclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+ int align, struct nvkm_gpuobj **pgpuobj)
{
- struct nvkm_gpuobj *obj;
- int ret;
-
- ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
- 16, 16, 0, &obj);
- *pobject = nv_object(obj);
- if (ret)
- return ret;
-
- nv_wo32(obj, 0x00, nv_mclass(obj));
- nv_wo32(obj, 0x04, 0x00000000);
- nv_wo32(obj, 0x08, 0x00000000);
- nv_wo32(obj, 0x0c, 0x00000000);
- return 0;
+ int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16,
+ align, false, parent, pgpuobj);
+ if (ret == 0) {
+ nvkm_kmap(*pgpuobj);
+ nvkm_wo32(*pgpuobj, 0x00, object->oclass);
+ nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
+ nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
+ nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+ nvkm_done(*pgpuobj);
+ }
+ return ret;
}
-static struct nvkm_ofuncs
-g84_cipher_ofuncs = {
- .ctor = g84_cipher_object_ctor,
- .dtor = _nvkm_gpuobj_dtor,
- .init = _nvkm_gpuobj_init,
- .fini = _nvkm_gpuobj_fini,
- .rd32 = _nvkm_gpuobj_rd32,
- .wr32 = _nvkm_gpuobj_wr32,
+static const struct nvkm_object_func
+g84_cipher_oclass_func = {
+ .bind = g84_cipher_oclass_bind,
};
-static struct nvkm_oclass
-g84_cipher_sclass[] = {
- { 0x74c1, &g84_cipher_ofuncs },
- {}
-};
+static int
+g84_cipher_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+ int align, struct nvkm_gpuobj **pgpuobj)
+{
+ return nvkm_gpuobj_new(object->engine->subdev.device, 256,
+ align, true, parent, pgpuobj);
-/*******************************************************************************
- * PCIPHER context
- ******************************************************************************/
+}
-static struct nvkm_oclass
+static const struct nvkm_object_func
g84_cipher_cclass = {
- .handle = NV_ENGCTX(CIPHER, 0x84),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_engctx_ctor,
- .dtor = _nvkm_engctx_dtor,
- .init = _nvkm_engctx_init,
- .fini = _nvkm_engctx_fini,
- .rd32 = _nvkm_engctx_rd32,
- .wr32 = _nvkm_engctx_wr32,
- },
+ .bind = g84_cipher_cclass_bind,
};
-/*******************************************************************************
- * PCIPHER engine/subdev functions
- ******************************************************************************/
-
static const struct nvkm_bitfield
g84_cipher_intr_mask[] = {
{ 0x00000001, "INVALID_STATE" },
@@ -106,79 +77,59 @@ g84_cipher_intr_mask[] = {
};
static void
-g84_cipher_intr(struct nvkm_subdev *subdev)
+g84_cipher_intr(struct nvkm_engine *cipher)
{
- struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
- struct nvkm_engine *engine = nv_engine(subdev);
- struct nvkm_object *engctx;
- struct g84_cipher_priv *priv = (void *)subdev;
- u32 stat = nv_rd32(priv, 0x102130);
- u32 mthd = nv_rd32(priv, 0x102190);
- u32 data = nv_rd32(priv, 0x102194);
- u32 inst = nv_rd32(priv, 0x102188) & 0x7fffffff;
- int chid;
-
- engctx = nvkm_engctx_get(engine, inst);
- chid = pfifo->chid(pfifo, engctx);
-
+ struct nvkm_subdev *subdev = &cipher->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_fifo *fifo = device->fifo;
+ struct nvkm_fifo_chan *chan;
+ u32 stat = nvkm_rd32(device, 0x102130);
+ u32 mthd = nvkm_rd32(device, 0x102190);
+ u32 data = nvkm_rd32(device, 0x102194);
+ u32 inst = nvkm_rd32(device, 0x102188) & 0x7fffffff;
+ unsigned long flags;
+ char msg[128];
+
+ chan = nvkm_fifo_chan_inst(fifo, (u64)inst << 12, &flags);
if (stat) {
- nv_error(priv, "%s", "");
- nvkm_bitfield_print(g84_cipher_intr_mask, stat);
- pr_cont(" ch %d [0x%010llx %s] mthd 0x%04x data 0x%08x\n",
- chid, (u64)inst << 12, nvkm_client_name(engctx),
- mthd, data);
+ nvkm_snprintbf(msg, sizeof(msg), g84_cipher_intr_mask, stat);
+ nvkm_error(subdev, "%08x [%s] ch %d [%010llx %s] "
+ "mthd %04x data %08x\n", stat, msg,
+ chan ? chan->chid : -1, (u64)inst << 12,
+ chan ? chan->object.client->name : "unknown",
+ mthd, data);
}
+ nvkm_fifo_chan_put(fifo, flags, &chan);
- nv_wr32(priv, 0x102130, stat);
- nv_wr32(priv, 0x10200c, 0x10);
-
- nvkm_engctx_put(engctx);
+ nvkm_wr32(device, 0x102130, stat);
+ nvkm_wr32(device, 0x10200c, 0x10);
}
static int
-g84_cipher_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+g84_cipher_init(struct nvkm_engine *cipher)
{
- struct g84_cipher_priv *priv;
- int ret;
-
- ret = nvkm_engine_create(parent, engine, oclass, true,
- "PCIPHER", "cipher", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00004000;
- nv_subdev(priv)->intr = g84_cipher_intr;
- nv_engine(priv)->cclass = &g84_cipher_cclass;
- nv_engine(priv)->sclass = g84_cipher_sclass;
+ struct nvkm_device *device = cipher->subdev.device;
+ nvkm_wr32(device, 0x102130, 0xffffffff);
+ nvkm_wr32(device, 0x102140, 0xffffffbf);
+ nvkm_wr32(device, 0x10200c, 0x00000010);
return 0;
}
-static int
-g84_cipher_init(struct nvkm_object *object)
-{
- struct g84_cipher_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_engine_init(&priv->base);
- if (ret)
- return ret;
+static const struct nvkm_engine_func
+g84_cipher = {
+ .init = g84_cipher_init,
+ .intr = g84_cipher_intr,
+ .cclass = &g84_cipher_cclass,
+ .sclass = {
+ { -1, -1, NV74_CIPHER, &g84_cipher_oclass_func },
+ {}
+ }
+};
- nv_wr32(priv, 0x102130, 0xffffffff);
- nv_wr32(priv, 0x102140, 0xffffffbf);
- nv_wr32(priv, 0x10200c, 0x00000010);
- return 0;
+int
+g84_cipher_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
+{
+ return nvkm_engine_new_(&g84_cipher, device, index,
+ 0x00004000, true, pengine);
}
-
-struct nvkm_oclass
-g84_cipher_oclass = {
- .handle = NV_ENGINE(CIPHER, 0x84),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = g84_cipher_ctor,
- .dtor = _nvkm_engine_dtor,
- .init = g84_cipher_init,
- .fini = _nvkm_engine_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild
index de1bf092b2b2..09032ba36000 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild
@@ -1,12 +1,6 @@
nvkm-y += nvkm/engine/device/acpi.o
nvkm-y += nvkm/engine/device/base.o
nvkm-y += nvkm/engine/device/ctrl.o
-nvkm-y += nvkm/engine/device/nv04.o
-nvkm-y += nvkm/engine/device/nv10.o
-nvkm-y += nvkm/engine/device/nv20.o
-nvkm-y += nvkm/engine/device/nv30.o
-nvkm-y += nvkm/engine/device/nv40.o
-nvkm-y += nvkm/engine/device/nv50.o
-nvkm-y += nvkm/engine/device/gf100.o
-nvkm-y += nvkm/engine/device/gk104.o
-nvkm-y += nvkm/engine/device/gm100.o
+nvkm-y += nvkm/engine/device/pci.o
+nvkm-y += nvkm/engine/device/tegra.o
+nvkm-y += nvkm/engine/device/user.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c
index f42706e1d5db..fdca90bc8f0e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c
@@ -40,21 +40,19 @@ nvkm_acpi_ntfy(struct notifier_block *nb, unsigned long val, void *data)
}
#endif
-int
-nvkm_acpi_fini(struct nvkm_device *device, bool suspend)
+void
+nvkm_acpi_fini(struct nvkm_device *device)
{
#ifdef CONFIG_ACPI
unregister_acpi_notifier(&device->acpi.nb);
#endif
- return 0;
}
-int
+void
nvkm_acpi_init(struct nvkm_device *device)
{
#ifdef CONFIG_ACPI
device->acpi.nb.notifier_call = nvkm_acpi_ntfy;
register_acpi_notifier(&device->acpi.nb);
#endif
- return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
index 82dd359ddfa4..1bbe76e0740a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
@@ -3,6 +3,6 @@
#include <core/os.h>
struct nvkm_device;
-int nvkm_acpi_init(struct nvkm_device *);
-int nvkm_acpi_fini(struct nvkm_device *, bool);
+void nvkm_acpi_init(struct nvkm_device *);
+void nvkm_acpi_fini(struct nvkm_device *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 63d8e52f4b22..94a906b8cb88 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -24,33 +24,33 @@
#include "priv.h"
#include "acpi.h"
-#include <core/client.h>
-#include <core/option.h>
#include <core/notify.h>
-#include <core/parent.h>
-#include <subdev/bios.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
+#include <core/option.h>
-#include <nvif/class.h>
-#include <nvif/unpack.h>
+#include <subdev/bios.h>
static DEFINE_MUTEX(nv_devices_mutex);
static LIST_HEAD(nv_devices);
-struct nvkm_device *
-nvkm_device_find(u64 name)
+static struct nvkm_device *
+nvkm_device_find_locked(u64 handle)
{
- struct nvkm_device *device, *match = NULL;
- mutex_lock(&nv_devices_mutex);
+ struct nvkm_device *device;
list_for_each_entry(device, &nv_devices, head) {
- if (device->handle == name) {
- match = device;
- break;
- }
+ if (device->handle == handle)
+ return device;
}
+ return NULL;
+}
+
+struct nvkm_device *
+nvkm_device_find(u64 handle)
+{
+ struct nvkm_device *device;
+ mutex_lock(&nv_devices_mutex);
+ device = nvkm_device_find_locked(handle);
mutex_unlock(&nv_devices_mutex);
- return match;
+ return device;
}
int
@@ -67,280 +67,2272 @@ nvkm_device_list(u64 *name, int size)
return nr;
}
-/******************************************************************************
- * nvkm_devobj (0x0080): class implementation
- *****************************************************************************/
+static const struct nvkm_device_chip
+null_chipset = {
+ .name = "NULL",
+ .bios = nvkm_bios_new,
+};
+
+static const struct nvkm_device_chip
+nv4_chipset = {
+ .name = "NV04",
+ .bios = nvkm_bios_new,
+ .bus = nv04_bus_new,
+ .clk = nv04_clk_new,
+ .devinit = nv04_devinit_new,
+ .fb = nv04_fb_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv04_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv04_mmu_new,
+ .pci = nv04_pci_new,
+ .timer = nv04_timer_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv04_fifo_new,
+ .gr = nv04_gr_new,
+ .sw = nv04_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv5_chipset = {
+ .name = "NV05",
+ .bios = nvkm_bios_new,
+ .bus = nv04_bus_new,
+ .clk = nv04_clk_new,
+ .devinit = nv05_devinit_new,
+ .fb = nv04_fb_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv04_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv04_mmu_new,
+ .pci = nv04_pci_new,
+ .timer = nv04_timer_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv04_fifo_new,
+ .gr = nv04_gr_new,
+ .sw = nv04_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv10_chipset = {
+ .name = "NV10",
+ .bios = nvkm_bios_new,
+ .bus = nv04_bus_new,
+ .clk = nv04_clk_new,
+ .devinit = nv10_devinit_new,
+ .fb = nv10_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv04_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv04_mmu_new,
+ .pci = nv04_pci_new,
+ .timer = nv04_timer_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .gr = nv10_gr_new,
+};
+
+static const struct nvkm_device_chip
+nv11_chipset = {
+ .name = "NV11",
+ .bios = nvkm_bios_new,
+ .bus = nv04_bus_new,
+ .clk = nv04_clk_new,
+ .devinit = nv10_devinit_new,
+ .fb = nv10_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv04_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv04_mmu_new,
+ .pci = nv04_pci_new,
+ .timer = nv04_timer_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv10_fifo_new,
+ .gr = nv15_gr_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv15_chipset = {
+ .name = "NV15",
+ .bios = nvkm_bios_new,
+ .bus = nv04_bus_new,
+ .clk = nv04_clk_new,
+ .devinit = nv10_devinit_new,
+ .fb = nv10_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv04_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv04_mmu_new,
+ .pci = nv04_pci_new,
+ .timer = nv04_timer_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv10_fifo_new,
+ .gr = nv15_gr_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv17_chipset = {
+ .name = "NV17",
+ .bios = nvkm_bios_new,
+ .bus = nv04_bus_new,
+ .clk = nv04_clk_new,
+ .devinit = nv10_devinit_new,
+ .fb = nv10_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv04_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv04_mmu_new,
+ .pci = nv04_pci_new,
+ .timer = nv04_timer_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv17_fifo_new,
+ .gr = nv17_gr_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv18_chipset = {
+ .name = "NV18",
+ .bios = nvkm_bios_new,
+ .bus = nv04_bus_new,
+ .clk = nv04_clk_new,
+ .devinit = nv10_devinit_new,
+ .fb = nv10_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv04_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv04_mmu_new,
+ .pci = nv04_pci_new,
+ .timer = nv04_timer_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv17_fifo_new,
+ .gr = nv17_gr_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv1a_chipset = {
+ .name = "nForce",
+ .bios = nvkm_bios_new,
+ .bus = nv04_bus_new,
+ .clk = nv04_clk_new,
+ .devinit = nv1a_devinit_new,
+ .fb = nv1a_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv04_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv04_mmu_new,
+ .pci = nv04_pci_new,
+ .timer = nv04_timer_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv10_fifo_new,
+ .gr = nv15_gr_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv1f_chipset = {
+ .name = "nForce2",
+ .bios = nvkm_bios_new,
+ .bus = nv04_bus_new,
+ .clk = nv04_clk_new,
+ .devinit = nv1a_devinit_new,
+ .fb = nv1a_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv04_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv04_mmu_new,
+ .pci = nv04_pci_new,
+ .timer = nv04_timer_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv17_fifo_new,
+ .gr = nv17_gr_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv20_chipset = {
+ .name = "NV20",
+ .bios = nvkm_bios_new,
+ .bus = nv04_bus_new,
+ .clk = nv04_clk_new,
+ .devinit = nv20_devinit_new,
+ .fb = nv20_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv04_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv04_mmu_new,
+ .pci = nv04_pci_new,
+ .timer = nv04_timer_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv17_fifo_new,
+ .gr = nv20_gr_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv25_chipset = {
+ .name = "NV25",
+ .bios = nvkm_bios_new,
+ .bus = nv04_bus_new,
+ .clk = nv04_clk_new,
+ .devinit = nv20_devinit_new,
+ .fb = nv25_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv04_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv04_mmu_new,
+ .pci = nv04_pci_new,
+ .timer = nv04_timer_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv17_fifo_new,
+ .gr = nv25_gr_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv28_chipset = {
+ .name = "NV28",
+ .bios = nvkm_bios_new,
+ .bus = nv04_bus_new,
+ .clk = nv04_clk_new,
+ .devinit = nv20_devinit_new,
+ .fb = nv25_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv04_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv04_mmu_new,
+ .pci = nv04_pci_new,
+ .timer = nv04_timer_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv17_fifo_new,
+ .gr = nv25_gr_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv2a_chipset = {
+ .name = "NV2A",
+ .bios = nvkm_bios_new,
+ .bus = nv04_bus_new,
+ .clk = nv04_clk_new,
+ .devinit = nv20_devinit_new,
+ .fb = nv25_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv04_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv04_mmu_new,
+ .pci = nv04_pci_new,
+ .timer = nv04_timer_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv17_fifo_new,
+ .gr = nv2a_gr_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv30_chipset = {
+ .name = "NV30",
+ .bios = nvkm_bios_new,
+ .bus = nv04_bus_new,
+ .clk = nv04_clk_new,
+ .devinit = nv20_devinit_new,
+ .fb = nv30_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv04_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv04_mmu_new,
+ .pci = nv04_pci_new,
+ .timer = nv04_timer_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv17_fifo_new,
+ .gr = nv30_gr_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv31_chipset = {
+ .name = "NV31",
+ .bios = nvkm_bios_new,
+ .bus = nv31_bus_new,
+ .clk = nv04_clk_new,
+ .devinit = nv20_devinit_new,
+ .fb = nv30_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv04_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv04_mmu_new,
+ .pci = nv04_pci_new,
+ .timer = nv04_timer_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv17_fifo_new,
+ .gr = nv30_gr_new,
+ .mpeg = nv31_mpeg_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv34_chipset = {
+ .name = "NV34",
+ .bios = nvkm_bios_new,
+ .bus = nv31_bus_new,
+ .clk = nv04_clk_new,
+ .devinit = nv10_devinit_new,
+ .fb = nv10_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv04_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv04_mmu_new,
+ .pci = nv04_pci_new,
+ .timer = nv04_timer_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv17_fifo_new,
+ .gr = nv34_gr_new,
+ .mpeg = nv31_mpeg_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv35_chipset = {
+ .name = "NV35",
+ .bios = nvkm_bios_new,
+ .bus = nv04_bus_new,
+ .clk = nv04_clk_new,
+ .devinit = nv20_devinit_new,
+ .fb = nv35_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv04_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv04_mmu_new,
+ .pci = nv04_pci_new,
+ .timer = nv04_timer_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv17_fifo_new,
+ .gr = nv35_gr_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv36_chipset = {
+ .name = "NV36",
+ .bios = nvkm_bios_new,
+ .bus = nv31_bus_new,
+ .clk = nv04_clk_new,
+ .devinit = nv20_devinit_new,
+ .fb = nv36_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv04_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv04_mmu_new,
+ .pci = nv04_pci_new,
+ .timer = nv04_timer_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv17_fifo_new,
+ .gr = nv35_gr_new,
+ .mpeg = nv31_mpeg_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv40_chipset = {
+ .name = "NV40",
+ .bios = nvkm_bios_new,
+ .bus = nv31_bus_new,
+ .clk = nv40_clk_new,
+ .devinit = nv1a_devinit_new,
+ .fb = nv40_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv40_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv04_mmu_new,
+ .pci = nv40_pci_new,
+ .therm = nv40_therm_new,
+ .timer = nv40_timer_new,
+ .volt = nv40_volt_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv40_fifo_new,
+ .gr = nv40_gr_new,
+ .mpeg = nv40_mpeg_new,
+ .pm = nv40_pm_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv41_chipset = {
+ .name = "NV41",
+ .bios = nvkm_bios_new,
+ .bus = nv31_bus_new,
+ .clk = nv40_clk_new,
+ .devinit = nv1a_devinit_new,
+ .fb = nv41_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv40_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv41_mmu_new,
+ .pci = nv40_pci_new,
+ .therm = nv40_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv40_fifo_new,
+ .gr = nv40_gr_new,
+ .mpeg = nv40_mpeg_new,
+ .pm = nv40_pm_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv42_chipset = {
+ .name = "NV42",
+ .bios = nvkm_bios_new,
+ .bus = nv31_bus_new,
+ .clk = nv40_clk_new,
+ .devinit = nv1a_devinit_new,
+ .fb = nv41_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv40_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv41_mmu_new,
+ .pci = nv40_pci_new,
+ .therm = nv40_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv40_fifo_new,
+ .gr = nv40_gr_new,
+ .mpeg = nv40_mpeg_new,
+ .pm = nv40_pm_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv43_chipset = {
+ .name = "NV43",
+ .bios = nvkm_bios_new,
+ .bus = nv31_bus_new,
+ .clk = nv40_clk_new,
+ .devinit = nv1a_devinit_new,
+ .fb = nv41_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv40_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv41_mmu_new,
+ .pci = nv40_pci_new,
+ .therm = nv40_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv40_fifo_new,
+ .gr = nv40_gr_new,
+ .mpeg = nv40_mpeg_new,
+ .pm = nv40_pm_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv44_chipset = {
+ .name = "NV44",
+ .bios = nvkm_bios_new,
+ .bus = nv31_bus_new,
+ .clk = nv40_clk_new,
+ .devinit = nv1a_devinit_new,
+ .fb = nv44_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv40_instmem_new,
+ .mc = nv44_mc_new,
+ .mmu = nv44_mmu_new,
+ .pci = nv40_pci_new,
+ .therm = nv40_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv40_fifo_new,
+ .gr = nv44_gr_new,
+ .mpeg = nv44_mpeg_new,
+ .pm = nv40_pm_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv45_chipset = {
+ .name = "NV45",
+ .bios = nvkm_bios_new,
+ .bus = nv31_bus_new,
+ .clk = nv40_clk_new,
+ .devinit = nv1a_devinit_new,
+ .fb = nv40_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv40_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv04_mmu_new,
+ .pci = nv40_pci_new,
+ .therm = nv40_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv40_fifo_new,
+ .gr = nv40_gr_new,
+ .mpeg = nv44_mpeg_new,
+ .pm = nv40_pm_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv46_chipset = {
+ .name = "G72",
+ .bios = nvkm_bios_new,
+ .bus = nv31_bus_new,
+ .clk = nv40_clk_new,
+ .devinit = nv1a_devinit_new,
+ .fb = nv46_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv40_instmem_new,
+ .mc = nv44_mc_new,
+ .mmu = nv44_mmu_new,
+ .pci = nv4c_pci_new,
+ .therm = nv40_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv40_fifo_new,
+ .gr = nv44_gr_new,
+ .mpeg = nv44_mpeg_new,
+ .pm = nv40_pm_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv47_chipset = {
+ .name = "G70",
+ .bios = nvkm_bios_new,
+ .bus = nv31_bus_new,
+ .clk = nv40_clk_new,
+ .devinit = nv1a_devinit_new,
+ .fb = nv47_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv40_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv41_mmu_new,
+ .pci = nv40_pci_new,
+ .therm = nv40_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv40_fifo_new,
+ .gr = nv40_gr_new,
+ .mpeg = nv44_mpeg_new,
+ .pm = nv40_pm_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv49_chipset = {
+ .name = "G71",
+ .bios = nvkm_bios_new,
+ .bus = nv31_bus_new,
+ .clk = nv40_clk_new,
+ .devinit = nv1a_devinit_new,
+ .fb = nv49_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv40_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv41_mmu_new,
+ .pci = nv40_pci_new,
+ .therm = nv40_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv40_fifo_new,
+ .gr = nv40_gr_new,
+ .mpeg = nv44_mpeg_new,
+ .pm = nv40_pm_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv4a_chipset = {
+ .name = "NV44A",
+ .bios = nvkm_bios_new,
+ .bus = nv31_bus_new,
+ .clk = nv40_clk_new,
+ .devinit = nv1a_devinit_new,
+ .fb = nv44_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv40_instmem_new,
+ .mc = nv44_mc_new,
+ .mmu = nv44_mmu_new,
+ .pci = nv40_pci_new,
+ .therm = nv40_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv40_fifo_new,
+ .gr = nv44_gr_new,
+ .mpeg = nv44_mpeg_new,
+ .pm = nv40_pm_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv4b_chipset = {
+ .name = "G73",
+ .bios = nvkm_bios_new,
+ .bus = nv31_bus_new,
+ .clk = nv40_clk_new,
+ .devinit = nv1a_devinit_new,
+ .fb = nv49_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv40_instmem_new,
+ .mc = nv04_mc_new,
+ .mmu = nv41_mmu_new,
+ .pci = nv40_pci_new,
+ .therm = nv40_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv40_fifo_new,
+ .gr = nv40_gr_new,
+ .mpeg = nv44_mpeg_new,
+ .pm = nv40_pm_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv4c_chipset = {
+ .name = "C61",
+ .bios = nvkm_bios_new,
+ .bus = nv31_bus_new,
+ .clk = nv40_clk_new,
+ .devinit = nv1a_devinit_new,
+ .fb = nv46_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv40_instmem_new,
+ .mc = nv44_mc_new,
+ .mmu = nv44_mmu_new,
+ .pci = nv4c_pci_new,
+ .therm = nv40_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv40_fifo_new,
+ .gr = nv44_gr_new,
+ .mpeg = nv44_mpeg_new,
+ .pm = nv40_pm_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv4e_chipset = {
+ .name = "C51",
+ .bios = nvkm_bios_new,
+ .bus = nv31_bus_new,
+ .clk = nv40_clk_new,
+ .devinit = nv1a_devinit_new,
+ .fb = nv4e_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv4e_i2c_new,
+ .imem = nv40_instmem_new,
+ .mc = nv44_mc_new,
+ .mmu = nv44_mmu_new,
+ .pci = nv4c_pci_new,
+ .therm = nv40_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv40_fifo_new,
+ .gr = nv44_gr_new,
+ .mpeg = nv44_mpeg_new,
+ .pm = nv40_pm_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv50_chipset = {
+ .name = "G80",
+ .bar = nv50_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = nv50_bus_new,
+ .clk = nv50_clk_new,
+ .devinit = nv50_devinit_new,
+ .fb = nv50_fb_new,
+ .fuse = nv50_fuse_new,
+ .gpio = nv50_gpio_new,
+ .i2c = nv50_i2c_new,
+ .imem = nv50_instmem_new,
+ .mc = nv50_mc_new,
+ .mmu = nv50_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv50_pci_new,
+ .therm = nv50_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .disp = nv50_disp_new,
+ .dma = nv50_dma_new,
+ .fifo = nv50_fifo_new,
+ .gr = nv50_gr_new,
+ .mpeg = nv50_mpeg_new,
+ .pm = nv50_pm_new,
+ .sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv63_chipset = {
+ .name = "C73",
+ .bios = nvkm_bios_new,
+ .bus = nv31_bus_new,
+ .clk = nv40_clk_new,
+ .devinit = nv1a_devinit_new,
+ .fb = nv46_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv40_instmem_new,
+ .mc = nv44_mc_new,
+ .mmu = nv44_mmu_new,
+ .pci = nv4c_pci_new,
+ .therm = nv40_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv40_fifo_new,
+ .gr = nv44_gr_new,
+ .mpeg = nv44_mpeg_new,
+ .pm = nv40_pm_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv67_chipset = {
+ .name = "C67",
+ .bios = nvkm_bios_new,
+ .bus = nv31_bus_new,
+ .clk = nv40_clk_new,
+ .devinit = nv1a_devinit_new,
+ .fb = nv46_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv40_instmem_new,
+ .mc = nv44_mc_new,
+ .mmu = nv44_mmu_new,
+ .pci = nv4c_pci_new,
+ .therm = nv40_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv40_fifo_new,
+ .gr = nv44_gr_new,
+ .mpeg = nv44_mpeg_new,
+ .pm = nv40_pm_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv68_chipset = {
+ .name = "C68",
+ .bios = nvkm_bios_new,
+ .bus = nv31_bus_new,
+ .clk = nv40_clk_new,
+ .devinit = nv1a_devinit_new,
+ .fb = nv46_fb_new,
+ .gpio = nv10_gpio_new,
+ .i2c = nv04_i2c_new,
+ .imem = nv40_instmem_new,
+ .mc = nv44_mc_new,
+ .mmu = nv44_mmu_new,
+ .pci = nv4c_pci_new,
+ .therm = nv40_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .disp = nv04_disp_new,
+ .dma = nv04_dma_new,
+ .fifo = nv40_fifo_new,
+ .gr = nv44_gr_new,
+ .mpeg = nv44_mpeg_new,
+ .pm = nv40_pm_new,
+ .sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv84_chipset = {
+ .name = "G84",
+ .bar = g84_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = nv50_bus_new,
+ .clk = g84_clk_new,
+ .devinit = g84_devinit_new,
+ .fb = g84_fb_new,
+ .fuse = nv50_fuse_new,
+ .gpio = nv50_gpio_new,
+ .i2c = nv50_i2c_new,
+ .imem = nv50_instmem_new,
+ .mc = nv50_mc_new,
+ .mmu = nv50_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv50_pci_new,
+ .therm = g84_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .bsp = g84_bsp_new,
+ .cipher = g84_cipher_new,
+ .disp = g84_disp_new,
+ .dma = nv50_dma_new,
+ .fifo = g84_fifo_new,
+ .gr = g84_gr_new,
+ .mpeg = g84_mpeg_new,
+ .pm = g84_pm_new,
+ .sw = nv50_sw_new,
+ .vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nv86_chipset = {
+ .name = "G86",
+ .bar = g84_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = nv50_bus_new,
+ .clk = g84_clk_new,
+ .devinit = g84_devinit_new,
+ .fb = g84_fb_new,
+ .fuse = nv50_fuse_new,
+ .gpio = nv50_gpio_new,
+ .i2c = nv50_i2c_new,
+ .imem = nv50_instmem_new,
+ .mc = nv50_mc_new,
+ .mmu = nv50_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv50_pci_new,
+ .therm = g84_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .bsp = g84_bsp_new,
+ .cipher = g84_cipher_new,
+ .disp = g84_disp_new,
+ .dma = nv50_dma_new,
+ .fifo = g84_fifo_new,
+ .gr = g84_gr_new,
+ .mpeg = g84_mpeg_new,
+ .pm = g84_pm_new,
+ .sw = nv50_sw_new,
+ .vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nv92_chipset = {
+ .name = "G92",
+ .bar = g84_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = nv50_bus_new,
+ .clk = g84_clk_new,
+ .devinit = g84_devinit_new,
+ .fb = g84_fb_new,
+ .fuse = nv50_fuse_new,
+ .gpio = nv50_gpio_new,
+ .i2c = nv50_i2c_new,
+ .imem = nv50_instmem_new,
+ .mc = nv50_mc_new,
+ .mmu = nv50_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv50_pci_new,
+ .therm = g84_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .bsp = g84_bsp_new,
+ .cipher = g84_cipher_new,
+ .disp = g84_disp_new,
+ .dma = nv50_dma_new,
+ .fifo = g84_fifo_new,
+ .gr = g84_gr_new,
+ .mpeg = g84_mpeg_new,
+ .pm = g84_pm_new,
+ .sw = nv50_sw_new,
+ .vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nv94_chipset = {
+ .name = "G94",
+ .bar = g84_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = g94_bus_new,
+ .clk = g84_clk_new,
+ .devinit = g84_devinit_new,
+ .fb = g84_fb_new,
+ .fuse = nv50_fuse_new,
+ .gpio = g94_gpio_new,
+ .i2c = g94_i2c_new,
+ .imem = nv50_instmem_new,
+ .mc = nv50_mc_new,
+ .mmu = nv50_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .therm = g84_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .bsp = g84_bsp_new,
+ .cipher = g84_cipher_new,
+ .disp = g94_disp_new,
+ .dma = nv50_dma_new,
+ .fifo = g84_fifo_new,
+ .gr = g84_gr_new,
+ .mpeg = g84_mpeg_new,
+ .pm = g84_pm_new,
+ .sw = nv50_sw_new,
+ .vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nv96_chipset = {
+ .name = "G96",
+ .bar = g84_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = g94_bus_new,
+ .clk = g84_clk_new,
+ .devinit = g84_devinit_new,
+ .fb = g84_fb_new,
+ .fuse = nv50_fuse_new,
+ .gpio = g94_gpio_new,
+ .i2c = g94_i2c_new,
+ .imem = nv50_instmem_new,
+ .mc = nv50_mc_new,
+ .mmu = nv50_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .therm = g84_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .bsp = g84_bsp_new,
+ .cipher = g84_cipher_new,
+ .disp = g94_disp_new,
+ .dma = nv50_dma_new,
+ .fifo = g84_fifo_new,
+ .gr = g84_gr_new,
+ .mpeg = g84_mpeg_new,
+ .pm = g84_pm_new,
+ .sw = nv50_sw_new,
+ .vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nv98_chipset = {
+ .name = "G98",
+ .bar = g84_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = g94_bus_new,
+ .clk = g84_clk_new,
+ .devinit = g98_devinit_new,
+ .fb = g84_fb_new,
+ .fuse = nv50_fuse_new,
+ .gpio = g94_gpio_new,
+ .i2c = g94_i2c_new,
+ .imem = nv50_instmem_new,
+ .mc = g98_mc_new,
+ .mmu = nv50_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .therm = g84_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .disp = g94_disp_new,
+ .dma = nv50_dma_new,
+ .fifo = g84_fifo_new,
+ .gr = g84_gr_new,
+ .mspdec = g98_mspdec_new,
+ .msppp = g98_msppp_new,
+ .msvld = g98_msvld_new,
+ .pm = g84_pm_new,
+ .sec = g98_sec_new,
+ .sw = nv50_sw_new,
+};
-struct nvkm_devobj {
- struct nvkm_parent base;
- struct nvkm_object *subdev[NVDEV_SUBDEV_NR];
+static const struct nvkm_device_chip
+nva0_chipset = {
+ .name = "GT200",
+ .bar = g84_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = g94_bus_new,
+ .clk = g84_clk_new,
+ .devinit = g84_devinit_new,
+ .fb = g84_fb_new,
+ .fuse = nv50_fuse_new,
+ .gpio = g94_gpio_new,
+ .i2c = nv50_i2c_new,
+ .imem = nv50_instmem_new,
+ .mc = g98_mc_new,
+ .mmu = nv50_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .therm = g84_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .bsp = g84_bsp_new,
+ .cipher = g84_cipher_new,
+ .disp = gt200_disp_new,
+ .dma = nv50_dma_new,
+ .fifo = g84_fifo_new,
+ .gr = gt200_gr_new,
+ .mpeg = g84_mpeg_new,
+ .pm = gt200_pm_new,
+ .sw = nv50_sw_new,
+ .vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nva3_chipset = {
+ .name = "GT215",
+ .bar = g84_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = g94_bus_new,
+ .clk = gt215_clk_new,
+ .devinit = gt215_devinit_new,
+ .fb = gt215_fb_new,
+ .fuse = nv50_fuse_new,
+ .gpio = g94_gpio_new,
+ .i2c = g94_i2c_new,
+ .imem = nv50_instmem_new,
+ .mc = g98_mc_new,
+ .mmu = nv50_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .pmu = gt215_pmu_new,
+ .therm = gt215_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .ce[0] = gt215_ce_new,
+ .disp = gt215_disp_new,
+ .dma = nv50_dma_new,
+ .fifo = g84_fifo_new,
+ .gr = gt215_gr_new,
+ .mpeg = g84_mpeg_new,
+ .mspdec = gt215_mspdec_new,
+ .msppp = gt215_msppp_new,
+ .msvld = gt215_msvld_new,
+ .pm = gt215_pm_new,
+ .sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nva5_chipset = {
+ .name = "GT216",
+ .bar = g84_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = g94_bus_new,
+ .clk = gt215_clk_new,
+ .devinit = gt215_devinit_new,
+ .fb = gt215_fb_new,
+ .fuse = nv50_fuse_new,
+ .gpio = g94_gpio_new,
+ .i2c = g94_i2c_new,
+ .imem = nv50_instmem_new,
+ .mc = g98_mc_new,
+ .mmu = nv50_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .pmu = gt215_pmu_new,
+ .therm = gt215_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .ce[0] = gt215_ce_new,
+ .disp = gt215_disp_new,
+ .dma = nv50_dma_new,
+ .fifo = g84_fifo_new,
+ .gr = gt215_gr_new,
+ .mspdec = gt215_mspdec_new,
+ .msppp = gt215_msppp_new,
+ .msvld = gt215_msvld_new,
+ .pm = gt215_pm_new,
+ .sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nva8_chipset = {
+ .name = "GT218",
+ .bar = g84_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = g94_bus_new,
+ .clk = gt215_clk_new,
+ .devinit = gt215_devinit_new,
+ .fb = gt215_fb_new,
+ .fuse = nv50_fuse_new,
+ .gpio = g94_gpio_new,
+ .i2c = g94_i2c_new,
+ .imem = nv50_instmem_new,
+ .mc = g98_mc_new,
+ .mmu = nv50_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .pmu = gt215_pmu_new,
+ .therm = gt215_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .ce[0] = gt215_ce_new,
+ .disp = gt215_disp_new,
+ .dma = nv50_dma_new,
+ .fifo = g84_fifo_new,
+ .gr = gt215_gr_new,
+ .mspdec = gt215_mspdec_new,
+ .msppp = gt215_msppp_new,
+ .msvld = gt215_msvld_new,
+ .pm = gt215_pm_new,
+ .sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvaa_chipset = {
+ .name = "MCP77/MCP78",
+ .bar = g84_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = g94_bus_new,
+ .clk = mcp77_clk_new,
+ .devinit = g98_devinit_new,
+ .fb = mcp77_fb_new,
+ .fuse = nv50_fuse_new,
+ .gpio = g94_gpio_new,
+ .i2c = g94_i2c_new,
+ .imem = nv50_instmem_new,
+ .mc = g98_mc_new,
+ .mmu = nv50_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .therm = g84_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .disp = g94_disp_new,
+ .dma = nv50_dma_new,
+ .fifo = g84_fifo_new,
+ .gr = gt200_gr_new,
+ .mspdec = g98_mspdec_new,
+ .msppp = g98_msppp_new,
+ .msvld = g98_msvld_new,
+ .pm = g84_pm_new,
+ .sec = g98_sec_new,
+ .sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvac_chipset = {
+ .name = "MCP79/MCP7A",
+ .bar = g84_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = g94_bus_new,
+ .clk = mcp77_clk_new,
+ .devinit = g98_devinit_new,
+ .fb = mcp77_fb_new,
+ .fuse = nv50_fuse_new,
+ .gpio = g94_gpio_new,
+ .i2c = g94_i2c_new,
+ .imem = nv50_instmem_new,
+ .mc = g98_mc_new,
+ .mmu = nv50_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .therm = g84_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .disp = g94_disp_new,
+ .dma = nv50_dma_new,
+ .fifo = g84_fifo_new,
+ .gr = mcp79_gr_new,
+ .mspdec = g98_mspdec_new,
+ .msppp = g98_msppp_new,
+ .msvld = g98_msvld_new,
+ .pm = g84_pm_new,
+ .sec = g98_sec_new,
+ .sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvaf_chipset = {
+ .name = "MCP89",
+ .bar = g84_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = g94_bus_new,
+ .clk = gt215_clk_new,
+ .devinit = mcp89_devinit_new,
+ .fb = mcp89_fb_new,
+ .fuse = nv50_fuse_new,
+ .gpio = g94_gpio_new,
+ .i2c = g94_i2c_new,
+ .imem = nv50_instmem_new,
+ .mc = g98_mc_new,
+ .mmu = nv50_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .pmu = gt215_pmu_new,
+ .therm = gt215_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .ce[0] = gt215_ce_new,
+ .disp = gt215_disp_new,
+ .dma = nv50_dma_new,
+ .fifo = g84_fifo_new,
+ .gr = mcp89_gr_new,
+ .mspdec = gt215_mspdec_new,
+ .msppp = gt215_msppp_new,
+ .msvld = mcp89_msvld_new,
+ .pm = gt215_pm_new,
+ .sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvc0_chipset = {
+ .name = "GF100",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .clk = gf100_clk_new,
+ .devinit = gf100_devinit_new,
+ .fb = gf100_fb_new,
+ .fuse = gf100_fuse_new,
+ .gpio = g94_gpio_new,
+ .i2c = g94_i2c_new,
+ .ibus = gf100_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gf100_ltc_new,
+ .mc = gf100_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = gf100_pci_new,
+ .pmu = gf100_pmu_new,
+ .therm = gt215_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .ce[0] = gf100_ce_new,
+ .ce[1] = gf100_ce_new,
+ .disp = gt215_disp_new,
+ .dma = gf100_dma_new,
+ .fifo = gf100_fifo_new,
+ .gr = gf100_gr_new,
+ .mspdec = gf100_mspdec_new,
+ .msppp = gf100_msppp_new,
+ .msvld = gf100_msvld_new,
+ .pm = gf100_pm_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvc1_chipset = {
+ .name = "GF108",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .clk = gf100_clk_new,
+ .devinit = gf100_devinit_new,
+ .fb = gf100_fb_new,
+ .fuse = gf100_fuse_new,
+ .gpio = g94_gpio_new,
+ .i2c = g94_i2c_new,
+ .ibus = gf100_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gf100_ltc_new,
+ .mc = gf100_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .pmu = gf100_pmu_new,
+ .therm = gt215_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .ce[0] = gf100_ce_new,
+ .disp = gt215_disp_new,
+ .dma = gf100_dma_new,
+ .fifo = gf100_fifo_new,
+ .gr = gf108_gr_new,
+ .mspdec = gf100_mspdec_new,
+ .msppp = gf100_msppp_new,
+ .msvld = gf100_msvld_new,
+ .pm = gf108_pm_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvc3_chipset = {
+ .name = "GF106",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .clk = gf100_clk_new,
+ .devinit = gf100_devinit_new,
+ .fb = gf100_fb_new,
+ .fuse = gf100_fuse_new,
+ .gpio = g94_gpio_new,
+ .i2c = g94_i2c_new,
+ .ibus = gf100_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gf100_ltc_new,
+ .mc = gf100_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .pmu = gf100_pmu_new,
+ .therm = gt215_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .ce[0] = gf100_ce_new,
+ .disp = gt215_disp_new,
+ .dma = gf100_dma_new,
+ .fifo = gf100_fifo_new,
+ .gr = gf104_gr_new,
+ .mspdec = gf100_mspdec_new,
+ .msppp = gf100_msppp_new,
+ .msvld = gf100_msvld_new,
+ .pm = gf100_pm_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvc4_chipset = {
+ .name = "GF104",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .clk = gf100_clk_new,
+ .devinit = gf100_devinit_new,
+ .fb = gf100_fb_new,
+ .fuse = gf100_fuse_new,
+ .gpio = g94_gpio_new,
+ .i2c = g94_i2c_new,
+ .ibus = gf100_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gf100_ltc_new,
+ .mc = gf100_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = gf100_pci_new,
+ .pmu = gf100_pmu_new,
+ .therm = gt215_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .ce[0] = gf100_ce_new,
+ .ce[1] = gf100_ce_new,
+ .disp = gt215_disp_new,
+ .dma = gf100_dma_new,
+ .fifo = gf100_fifo_new,
+ .gr = gf104_gr_new,
+ .mspdec = gf100_mspdec_new,
+ .msppp = gf100_msppp_new,
+ .msvld = gf100_msvld_new,
+ .pm = gf100_pm_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvc8_chipset = {
+ .name = "GF110",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .clk = gf100_clk_new,
+ .devinit = gf100_devinit_new,
+ .fb = gf100_fb_new,
+ .fuse = gf100_fuse_new,
+ .gpio = g94_gpio_new,
+ .i2c = g94_i2c_new,
+ .ibus = gf100_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gf100_ltc_new,
+ .mc = gf100_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = gf100_pci_new,
+ .pmu = gf100_pmu_new,
+ .therm = gt215_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .ce[0] = gf100_ce_new,
+ .ce[1] = gf100_ce_new,
+ .disp = gt215_disp_new,
+ .dma = gf100_dma_new,
+ .fifo = gf100_fifo_new,
+ .gr = gf110_gr_new,
+ .mspdec = gf100_mspdec_new,
+ .msppp = gf100_msppp_new,
+ .msvld = gf100_msvld_new,
+ .pm = gf100_pm_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvce_chipset = {
+ .name = "GF114",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .clk = gf100_clk_new,
+ .devinit = gf100_devinit_new,
+ .fb = gf100_fb_new,
+ .fuse = gf100_fuse_new,
+ .gpio = g94_gpio_new,
+ .i2c = g94_i2c_new,
+ .ibus = gf100_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gf100_ltc_new,
+ .mc = gf100_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = gf100_pci_new,
+ .pmu = gf100_pmu_new,
+ .therm = gt215_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .ce[0] = gf100_ce_new,
+ .ce[1] = gf100_ce_new,
+ .disp = gt215_disp_new,
+ .dma = gf100_dma_new,
+ .fifo = gf100_fifo_new,
+ .gr = gf104_gr_new,
+ .mspdec = gf100_mspdec_new,
+ .msppp = gf100_msppp_new,
+ .msvld = gf100_msvld_new,
+ .pm = gf100_pm_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvcf_chipset = {
+ .name = "GF116",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .clk = gf100_clk_new,
+ .devinit = gf100_devinit_new,
+ .fb = gf100_fb_new,
+ .fuse = gf100_fuse_new,
+ .gpio = g94_gpio_new,
+ .i2c = g94_i2c_new,
+ .ibus = gf100_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gf100_ltc_new,
+ .mc = gf100_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .pmu = gf100_pmu_new,
+ .therm = gt215_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .ce[0] = gf100_ce_new,
+ .disp = gt215_disp_new,
+ .dma = gf100_dma_new,
+ .fifo = gf100_fifo_new,
+ .gr = gf104_gr_new,
+ .mspdec = gf100_mspdec_new,
+ .msppp = gf100_msppp_new,
+ .msvld = gf100_msvld_new,
+ .pm = gf100_pm_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvd7_chipset = {
+ .name = "GF117",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .clk = gf100_clk_new,
+ .devinit = gf100_devinit_new,
+ .fb = gf100_fb_new,
+ .fuse = gf100_fuse_new,
+ .gpio = gf119_gpio_new,
+ .i2c = gf117_i2c_new,
+ .ibus = gf100_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gf100_ltc_new,
+ .mc = gf100_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .therm = gf119_therm_new,
+ .timer = nv41_timer_new,
+ .ce[0] = gf100_ce_new,
+ .disp = gf119_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gf100_fifo_new,
+ .gr = gf117_gr_new,
+ .mspdec = gf100_mspdec_new,
+ .msppp = gf100_msppp_new,
+ .msvld = gf100_msvld_new,
+ .pm = gf117_pm_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvd9_chipset = {
+ .name = "GF119",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .clk = gf100_clk_new,
+ .devinit = gf100_devinit_new,
+ .fb = gf100_fb_new,
+ .fuse = gf100_fuse_new,
+ .gpio = gf119_gpio_new,
+ .i2c = gf119_i2c_new,
+ .ibus = gf100_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gf100_ltc_new,
+ .mc = gf100_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .pmu = gf119_pmu_new,
+ .therm = gf119_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .ce[0] = gf100_ce_new,
+ .disp = gf119_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gf100_fifo_new,
+ .gr = gf119_gr_new,
+ .mspdec = gf100_mspdec_new,
+ .msppp = gf100_msppp_new,
+ .msvld = gf100_msvld_new,
+ .pm = gf117_pm_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nve4_chipset = {
+ .name = "GK104",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .clk = gk104_clk_new,
+ .devinit = gf100_devinit_new,
+ .fb = gk104_fb_new,
+ .fuse = gf100_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gk104_i2c_new,
+ .ibus = gk104_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gk104_ltc_new,
+ .mc = gf100_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .pmu = gk104_pmu_new,
+ .therm = gf119_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .ce[0] = gk104_ce_new,
+ .ce[1] = gk104_ce_new,
+ .ce[2] = gk104_ce_new,
+ .disp = gk104_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gk104_fifo_new,
+ .gr = gk104_gr_new,
+ .mspdec = gk104_mspdec_new,
+ .msppp = gf100_msppp_new,
+ .msvld = gk104_msvld_new,
+ .pm = gk104_pm_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nve6_chipset = {
+ .name = "GK106",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .clk = gk104_clk_new,
+ .devinit = gf100_devinit_new,
+ .fb = gk104_fb_new,
+ .fuse = gf100_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gk104_i2c_new,
+ .ibus = gk104_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gk104_ltc_new,
+ .mc = gf100_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .pmu = gk104_pmu_new,
+ .therm = gf119_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .ce[0] = gk104_ce_new,
+ .ce[1] = gk104_ce_new,
+ .ce[2] = gk104_ce_new,
+ .disp = gk104_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gk104_fifo_new,
+ .gr = gk104_gr_new,
+ .mspdec = gk104_mspdec_new,
+ .msppp = gf100_msppp_new,
+ .msvld = gk104_msvld_new,
+ .pm = gk104_pm_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nve7_chipset = {
+ .name = "GK107",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .clk = gk104_clk_new,
+ .devinit = gf100_devinit_new,
+ .fb = gk104_fb_new,
+ .fuse = gf100_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gk104_i2c_new,
+ .ibus = gk104_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gk104_ltc_new,
+ .mc = gf100_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .pmu = gf119_pmu_new,
+ .therm = gf119_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .ce[0] = gk104_ce_new,
+ .ce[1] = gk104_ce_new,
+ .ce[2] = gk104_ce_new,
+ .disp = gk104_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gk104_fifo_new,
+ .gr = gk104_gr_new,
+ .mspdec = gk104_mspdec_new,
+ .msppp = gf100_msppp_new,
+ .msvld = gk104_msvld_new,
+ .pm = gk104_pm_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvea_chipset = {
+ .name = "GK20A",
+ .bar = gk20a_bar_new,
+ .bus = gf100_bus_new,
+ .clk = gk20a_clk_new,
+ .fb = gk20a_fb_new,
+ .fuse = gf100_fuse_new,
+ .ibus = gk20a_ibus_new,
+ .imem = gk20a_instmem_new,
+ .ltc = gk104_ltc_new,
+ .mc = gk20a_mc_new,
+ .mmu = gf100_mmu_new,
+ .pmu = gk20a_pmu_new,
+ .timer = gk20a_timer_new,
+ .volt = gk20a_volt_new,
+ .ce[2] = gk104_ce_new,
+ .dma = gf119_dma_new,
+ .fifo = gk20a_fifo_new,
+ .gr = gk20a_gr_new,
+ .pm = gk104_pm_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvf0_chipset = {
+ .name = "GK110",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .clk = gk104_clk_new,
+ .devinit = gf100_devinit_new,
+ .fb = gk104_fb_new,
+ .fuse = gf100_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gk104_i2c_new,
+ .ibus = gk104_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gk104_ltc_new,
+ .mc = gf100_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .pmu = gk110_pmu_new,
+ .therm = gf119_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .ce[0] = gk104_ce_new,
+ .ce[1] = gk104_ce_new,
+ .ce[2] = gk104_ce_new,
+ .disp = gk110_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gk104_fifo_new,
+ .gr = gk110_gr_new,
+ .mspdec = gk104_mspdec_new,
+ .msppp = gf100_msppp_new,
+ .msvld = gk104_msvld_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvf1_chipset = {
+ .name = "GK110B",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .clk = gk104_clk_new,
+ .devinit = gf100_devinit_new,
+ .fb = gk104_fb_new,
+ .fuse = gf100_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gf119_i2c_new,
+ .ibus = gk104_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gk104_ltc_new,
+ .mc = gf100_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .pmu = gk110_pmu_new,
+ .therm = gf119_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .ce[0] = gk104_ce_new,
+ .ce[1] = gk104_ce_new,
+ .ce[2] = gk104_ce_new,
+ .disp = gk110_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gk104_fifo_new,
+ .gr = gk110b_gr_new,
+ .mspdec = gk104_mspdec_new,
+ .msppp = gf100_msppp_new,
+ .msvld = gk104_msvld_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv106_chipset = {
+ .name = "GK208B",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .clk = gk104_clk_new,
+ .devinit = gf100_devinit_new,
+ .fb = gk104_fb_new,
+ .fuse = gf100_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gk104_i2c_new,
+ .ibus = gk104_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gk104_ltc_new,
+ .mc = gk20a_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .pmu = gk208_pmu_new,
+ .therm = gf119_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .ce[0] = gk104_ce_new,
+ .ce[1] = gk104_ce_new,
+ .ce[2] = gk104_ce_new,
+ .disp = gk110_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gk208_fifo_new,
+ .gr = gk208_gr_new,
+ .mspdec = gk104_mspdec_new,
+ .msppp = gf100_msppp_new,
+ .msvld = gk104_msvld_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv108_chipset = {
+ .name = "GK208",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .clk = gk104_clk_new,
+ .devinit = gf100_devinit_new,
+ .fb = gk104_fb_new,
+ .fuse = gf100_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gk104_i2c_new,
+ .ibus = gk104_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gk104_ltc_new,
+ .mc = gk20a_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .pmu = gk208_pmu_new,
+ .therm = gf119_therm_new,
+ .timer = nv41_timer_new,
+ .volt = nv40_volt_new,
+ .ce[0] = gk104_ce_new,
+ .ce[1] = gk104_ce_new,
+ .ce[2] = gk104_ce_new,
+ .disp = gk110_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gk208_fifo_new,
+ .gr = gk208_gr_new,
+ .mspdec = gk104_mspdec_new,
+ .msppp = gf100_msppp_new,
+ .msvld = gk104_msvld_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv117_chipset = {
+ .name = "GM107",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .clk = gk104_clk_new,
+ .devinit = gm107_devinit_new,
+ .fb = gm107_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gf119_i2c_new,
+ .ibus = gk104_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gm107_ltc_new,
+ .mc = gk20a_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .pmu = gm107_pmu_new,
+ .therm = gm107_therm_new,
+ .timer = gk20a_timer_new,
+ .ce[0] = gk104_ce_new,
+ .ce[2] = gk104_ce_new,
+ .disp = gm107_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gk208_fifo_new,
+ .gr = gm107_gr_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv124_chipset = {
+ .name = "GM204",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = gm204_devinit_new,
+ .fb = gm107_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm204_i2c_new,
+ .ibus = gk104_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gm107_ltc_new,
+ .mc = gk20a_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .pmu = gm107_pmu_new,
+ .timer = gk20a_timer_new,
+ .ce[0] = gm204_ce_new,
+ .ce[1] = gm204_ce_new,
+ .ce[2] = gm204_ce_new,
+ .disp = gm204_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gm204_fifo_new,
+ .gr = gm204_gr_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv126_chipset = {
+ .name = "GM206",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = gm204_devinit_new,
+ .fb = gm107_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm204_i2c_new,
+ .ibus = gk104_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gm107_ltc_new,
+ .mc = gk20a_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = nv40_pci_new,
+ .pmu = gm107_pmu_new,
+ .timer = gk20a_timer_new,
+ .ce[0] = gm204_ce_new,
+ .ce[1] = gm204_ce_new,
+ .ce[2] = gm204_ce_new,
+ .disp = gm204_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gm204_fifo_new,
+ .gr = gm206_gr_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv12b_chipset = {
+ .name = "GM20B",
+ .bar = gk20a_bar_new,
+ .bus = gf100_bus_new,
+ .fb = gk20a_fb_new,
+ .fuse = gm107_fuse_new,
+ .ibus = gk20a_ibus_new,
+ .imem = gk20a_instmem_new,
+ .ltc = gm107_ltc_new,
+ .mc = gk20a_mc_new,
+ .mmu = gf100_mmu_new,
+ .timer = gk20a_timer_new,
+ .ce[2] = gm204_ce_new,
+ .dma = gf119_dma_new,
+ .fifo = gm20b_fifo_new,
+ .gr = gm20b_gr_new,
+ .sw = gf100_sw_new,
};
static int
-nvkm_devobj_info(struct nvkm_object *object, void *data, u32 size)
+nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
+ struct nvkm_notify *notify)
{
- struct nvkm_device *device = nv_device(object);
- struct nvkm_fb *pfb = nvkm_fb(device);
- struct nvkm_instmem *imem = nvkm_instmem(device);
- union {
- struct nv_device_info_v0 v0;
- } *args = data;
- int ret;
-
- nv_ioctl(object, "device info size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "device info vers %d\n", args->v0.version);
- } else
- return ret;
+ if (!WARN_ON(size != 0)) {
+ notify->size = 0;
+ notify->types = 1;
+ notify->index = 0;
+ return 0;
+ }
+ return -EINVAL;
+}
- switch (device->chipset) {
- case 0x01a:
- case 0x01f:
- case 0x04c:
- case 0x04e:
- case 0x063:
- case 0x067:
- case 0x068:
- case 0x0aa:
- case 0x0ac:
- case 0x0af:
- args->v0.platform = NV_DEVICE_INFO_V0_IGP;
- break;
+static const struct nvkm_event_func
+nvkm_device_event_func = {
+ .ctor = nvkm_device_event_ctor,
+};
+
+struct nvkm_subdev *
+nvkm_device_subdev(struct nvkm_device *device, int index)
+{
+ struct nvkm_engine *engine;
+
+ if (device->disable_mask & (1ULL << index))
+ return NULL;
+
+ switch (index) {
+#define _(n,p,m) case NVKM_SUBDEV_##n: if (p) return (m); break
+ _(BAR , device->bar , &device->bar->subdev);
+ _(VBIOS , device->bios , &device->bios->subdev);
+ _(BUS , device->bus , &device->bus->subdev);
+ _(CLK , device->clk , &device->clk->subdev);
+ _(DEVINIT, device->devinit, &device->devinit->subdev);
+ _(FB , device->fb , &device->fb->subdev);
+ _(FUSE , device->fuse , &device->fuse->subdev);
+ _(GPIO , device->gpio , &device->gpio->subdev);
+ _(I2C , device->i2c , &device->i2c->subdev);
+ _(IBUS , device->ibus , device->ibus);
+ _(INSTMEM, device->imem , &device->imem->subdev);
+ _(LTC , device->ltc , &device->ltc->subdev);
+ _(MC , device->mc , &device->mc->subdev);
+ _(MMU , device->mmu , &device->mmu->subdev);
+ _(MXM , device->mxm , device->mxm);
+ _(PCI , device->pci , &device->pci->subdev);
+ _(PMU , device->pmu , &device->pmu->subdev);
+ _(THERM , device->therm , &device->therm->subdev);
+ _(TIMER , device->timer , &device->timer->subdev);
+ _(VOLT , device->volt , &device->volt->subdev);
+#undef _
default:
- if (device->pdev) {
- if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP))
- args->v0.platform = NV_DEVICE_INFO_V0_AGP;
- else
- if (pci_is_pcie(device->pdev))
- args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
- else
- args->v0.platform = NV_DEVICE_INFO_V0_PCI;
- } else {
- args->v0.platform = NV_DEVICE_INFO_V0_SOC;
- }
+ engine = nvkm_device_engine(device, index);
+ if (engine)
+ return &engine->subdev;
break;
}
+ return NULL;
+}
- switch (device->card_type) {
- case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break;
- case NV_10:
- case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break;
- case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break;
- case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break;
- case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break;
- case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break;
- case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
- case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
- case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
+struct nvkm_engine *
+nvkm_device_engine(struct nvkm_device *device, int index)
+{
+ if (device->disable_mask & (1ULL << index))
+ return NULL;
+
+ switch (index) {
+#define _(n,p,m) case NVKM_ENGINE_##n: if (p) return (m); break
+ _(BSP , device->bsp , device->bsp);
+ _(CE0 , device->ce[0] , device->ce[0]);
+ _(CE1 , device->ce[1] , device->ce[1]);
+ _(CE2 , device->ce[2] , device->ce[2]);
+ _(CIPHER , device->cipher , device->cipher);
+ _(DISP , device->disp , &device->disp->engine);
+ _(DMAOBJ , device->dma , &device->dma->engine);
+ _(FIFO , device->fifo , &device->fifo->engine);
+ _(GR , device->gr , &device->gr->engine);
+ _(IFB , device->ifb , device->ifb);
+ _(ME , device->me , device->me);
+ _(MPEG , device->mpeg , device->mpeg);
+ _(MSENC , device->msenc , device->msenc);
+ _(MSPDEC , device->mspdec , device->mspdec);
+ _(MSPPP , device->msppp , device->msppp);
+ _(MSVLD , device->msvld , device->msvld);
+ _(PM , device->pm , &device->pm->engine);
+ _(SEC , device->sec , device->sec);
+ _(SW , device->sw , &device->sw->engine);
+ _(VIC , device->vic , device->vic);
+ _(VP , device->vp , device->vp);
+#undef _
default:
- args->v0.family = 0;
+ WARN_ON(1);
break;
}
+ return NULL;
+}
+
+int
+nvkm_device_fini(struct nvkm_device *device, bool suspend)
+{
+ const char *action = suspend ? "suspend" : "fini";
+ struct nvkm_subdev *subdev;
+ int ret, i;
+ s64 time;
+
+ nvdev_trace(device, "%s running...\n", action);
+ time = ktime_to_us(ktime_get());
+
+ nvkm_acpi_fini(device);
+
+ for (i = NVKM_SUBDEV_NR - 1; i >= 0; i--) {
+ if ((subdev = nvkm_device_subdev(device, i))) {
+ ret = nvkm_subdev_fini(subdev, suspend);
+ if (ret && suspend)
+ goto fail;
+ }
+ }
- args->v0.chipset = device->chipset;
- args->v0.revision = device->chiprev;
- if (pfb && pfb->ram)
- args->v0.ram_size = args->v0.ram_user = pfb->ram->size;
- else
- args->v0.ram_size = args->v0.ram_user = 0;
- if (imem && args->v0.ram_size > 0)
- args->v0.ram_user = args->v0.ram_user - imem->reserved;
+ if (device->func->fini)
+ device->func->fini(device, suspend);
+
+ time = ktime_to_us(ktime_get()) - time;
+ nvdev_trace(device, "%s completed in %lldus...\n", action, time);
return 0;
+
+fail:
+ do {
+ if ((subdev = nvkm_device_subdev(device, i))) {
+ int rret = nvkm_subdev_init(subdev);
+ if (rret)
+ nvkm_fatal(subdev, "failed restart, %d\n", ret);
+ }
+ } while (++i < NVKM_SUBDEV_NR);
+
+ nvdev_trace(device, "%s failed with %d\n", action, ret);
+ return ret;
}
static int
-nvkm_devobj_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+nvkm_device_preinit(struct nvkm_device *device)
{
- switch (mthd) {
- case NV_DEVICE_V0_INFO:
- return nvkm_devobj_info(object, data, size);
- default:
- break;
+ struct nvkm_subdev *subdev;
+ int ret, i;
+ s64 time;
+
+ nvdev_trace(device, "preinit running...\n");
+ time = ktime_to_us(ktime_get());
+
+ if (device->func->preinit) {
+ ret = device->func->preinit(device);
+ if (ret)
+ goto fail;
}
- return -EINVAL;
-}
-static u8
-nvkm_devobj_rd08(struct nvkm_object *object, u64 addr)
-{
- return nv_rd08(object->engine, addr);
-}
+ for (i = 0; i < NVKM_SUBDEV_NR; i++) {
+ if ((subdev = nvkm_device_subdev(device, i))) {
+ ret = nvkm_subdev_preinit(subdev);
+ if (ret)
+ goto fail;
+ }
+ }
-static u16
-nvkm_devobj_rd16(struct nvkm_object *object, u64 addr)
-{
- return nv_rd16(object->engine, addr);
-}
+ ret = nvkm_devinit_post(device->devinit, &device->disable_mask);
+ if (ret)
+ goto fail;
-static u32
-nvkm_devobj_rd32(struct nvkm_object *object, u64 addr)
-{
- return nv_rd32(object->engine, addr);
-}
+ time = ktime_to_us(ktime_get()) - time;
+ nvdev_trace(device, "preinit completed in %lldus\n", time);
+ return 0;
-static void
-nvkm_devobj_wr08(struct nvkm_object *object, u64 addr, u8 data)
-{
- nv_wr08(object->engine, addr, data);
+fail:
+ nvdev_error(device, "preinit failed with %d\n", ret);
+ return ret;
}
-static void
-nvkm_devobj_wr16(struct nvkm_object *object, u64 addr, u16 data)
+int
+nvkm_device_init(struct nvkm_device *device)
{
- nv_wr16(object->engine, addr, data);
-}
+ struct nvkm_subdev *subdev;
+ int ret, i;
+ s64 time;
-static void
-nvkm_devobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
- nv_wr32(object->engine, addr, data);
-}
+ ret = nvkm_device_preinit(device);
+ if (ret)
+ return ret;
-static int
-nvkm_devobj_map(struct nvkm_object *object, u64 *addr, u32 *size)
-{
- struct nvkm_device *device = nv_device(object);
- *addr = nv_device_resource_start(device, 0);
- *size = nv_device_resource_len(device, 0);
+ nvkm_device_fini(device, false);
+
+ nvdev_trace(device, "init running...\n");
+ time = ktime_to_us(ktime_get());
+
+ if (device->func->init) {
+ ret = device->func->init(device);
+ if (ret)
+ goto fail;
+ }
+
+ for (i = 0; i < NVKM_SUBDEV_NR; i++) {
+ if ((subdev = nvkm_device_subdev(device, i))) {
+ ret = nvkm_subdev_init(subdev);
+ if (ret)
+ goto fail_subdev;
+ }
+ }
+
+ nvkm_acpi_init(device);
+
+ time = ktime_to_us(ktime_get()) - time;
+ nvdev_trace(device, "init completed in %lldus\n", time);
return 0;
+
+fail_subdev:
+ do {
+ if ((subdev = nvkm_device_subdev(device, i)))
+ nvkm_subdev_fini(subdev, false);
+ } while (--i >= 0);
+
+fail:
+ nvdev_error(device, "init failed with %d\n", ret);
+ return ret;
}
-static const u64 disable_map[] = {
- [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_V0_DISABLE_VBIOS,
- [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_SUBDEV_GPIO] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_SUBDEV_I2C] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_SUBDEV_CLK ] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_SUBDEV_MXM] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_SUBDEV_MC] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_SUBDEV_BUS] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_SUBDEV_TIMER] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_SUBDEV_FB] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_SUBDEV_LTC] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_SUBDEV_IBUS] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_SUBDEV_MMU] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_SUBDEV_BAR] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_SUBDEV_VOLT] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_SUBDEV_THERM] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_SUBDEV_PMU] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_SUBDEV_FUSE] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_ENGINE_PM ] = NV_DEVICE_V0_DISABLE_CORE,
- [NVDEV_ENGINE_FIFO] = NV_DEVICE_V0_DISABLE_FIFO,
- [NVDEV_ENGINE_SW] = NV_DEVICE_V0_DISABLE_FIFO,
- [NVDEV_ENGINE_GR] = NV_DEVICE_V0_DISABLE_GR,
- [NVDEV_ENGINE_MPEG] = NV_DEVICE_V0_DISABLE_MPEG,
- [NVDEV_ENGINE_ME] = NV_DEVICE_V0_DISABLE_ME,
- [NVDEV_ENGINE_VP] = NV_DEVICE_V0_DISABLE_VP,
- [NVDEV_ENGINE_CIPHER] = NV_DEVICE_V0_DISABLE_CIPHER,
- [NVDEV_ENGINE_BSP] = NV_DEVICE_V0_DISABLE_BSP,
- [NVDEV_ENGINE_MSPPP] = NV_DEVICE_V0_DISABLE_MSPPP,
- [NVDEV_ENGINE_CE0] = NV_DEVICE_V0_DISABLE_CE0,
- [NVDEV_ENGINE_CE1] = NV_DEVICE_V0_DISABLE_CE1,
- [NVDEV_ENGINE_CE2] = NV_DEVICE_V0_DISABLE_CE2,
- [NVDEV_ENGINE_VIC] = NV_DEVICE_V0_DISABLE_VIC,
- [NVDEV_ENGINE_MSENC] = NV_DEVICE_V0_DISABLE_MSENC,
- [NVDEV_ENGINE_DISP] = NV_DEVICE_V0_DISABLE_DISP,
- [NVDEV_ENGINE_MSVLD] = NV_DEVICE_V0_DISABLE_MSVLD,
- [NVDEV_ENGINE_SEC] = NV_DEVICE_V0_DISABLE_SEC,
- [NVDEV_SUBDEV_NR] = 0,
-};
-
-static void
-nvkm_devobj_dtor(struct nvkm_object *object)
+void
+nvkm_device_del(struct nvkm_device **pdevice)
{
- struct nvkm_devobj *devobj = (void *)object;
+ struct nvkm_device *device = *pdevice;
int i;
+ if (device) {
+ mutex_lock(&nv_devices_mutex);
+ device->disable_mask = 0;
+ for (i = NVKM_SUBDEV_NR - 1; i >= 0; i--) {
+ struct nvkm_subdev *subdev =
+ nvkm_device_subdev(device, i);
+ nvkm_subdev_del(&subdev);
+ }
- for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
- nvkm_object_ref(NULL, &devobj->subdev[i]);
+ nvkm_event_fini(&device->event);
- nvkm_parent_destroy(&devobj->base);
-}
+ if (device->pri)
+ iounmap(device->pri);
+ list_del(&device->head);
-static struct nvkm_oclass
-nvkm_devobj_oclass_super = {
- .handle = NV_DEVICE,
- .ofuncs = &(struct nvkm_ofuncs) {
- .dtor = nvkm_devobj_dtor,
- .init = _nvkm_parent_init,
- .fini = _nvkm_parent_fini,
- .mthd = nvkm_devobj_mthd,
- .map = nvkm_devobj_map,
- .rd08 = nvkm_devobj_rd08,
- .rd16 = nvkm_devobj_rd16,
- .rd32 = nvkm_devobj_rd32,
- .wr08 = nvkm_devobj_wr08,
- .wr16 = nvkm_devobj_wr16,
- .wr32 = nvkm_devobj_wr32,
+ if (device->func->dtor)
+ *pdevice = device->func->dtor(device);
+ mutex_unlock(&nv_devices_mutex);
+
+ kfree(*pdevice);
+ *pdevice = NULL;
}
-};
+}
-static int
-nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nvkm_device_ctor(const struct nvkm_device_func *func,
+ const struct nvkm_device_quirk *quirk,
+ struct device *dev, enum nvkm_device_type type, u64 handle,
+ const char *name, const char *cfg, const char *dbg,
+ bool detect, bool mmio, u64 subdev_mask,
+ struct nvkm_device *device)
{
- union {
- struct nv_device_v0 v0;
- } *args = data;
- struct nvkm_client *client = nv_client(parent);
- struct nvkm_device *device;
- struct nvkm_devobj *devobj;
+ struct nvkm_subdev *subdev;
+ u64 mmio_base, mmio_size;
u32 boot0, strap;
- u64 disable, mmio_base, mmio_size;
void __iomem *map;
- int ret, i, c;
-
- nv_ioctl(parent, "create device size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(parent, "create device v%d device %016llx "
- "disable %016llx debug0 %016llx\n",
- args->v0.version, args->v0.device,
- args->v0.disable, args->v0.debug0);
- } else
- return ret;
+ int ret = -EEXIST;
+ int i;
- /* give priviledged clients register access */
- if (client->super)
- oclass = &nvkm_devobj_oclass_super;
+ mutex_lock(&nv_devices_mutex);
+ if (nvkm_device_find_locked(handle))
+ goto done;
- /* find the device subdev that matches what the client requested */
- device = nv_device(client->device);
- if (args->v0.device != ~0) {
- device = nvkm_device_find(args->v0.device);
- if (!device)
- return -ENODEV;
- }
+ device->func = func;
+ device->quirk = quirk;
+ device->dev = dev;
+ device->type = type;
+ device->handle = handle;
+ device->cfgopt = cfg;
+ device->dbgopt = dbg;
+ device->name = name;
+ list_add_tail(&device->head, &nv_devices);
+ device->debug = nvkm_dbgopt(device->dbgopt, "device");
- ret = nvkm_parent_create(parent, nv_object(device), oclass, 0,
- nvkm_control_oclass,
- (1ULL << NVDEV_ENGINE_DMAOBJ) |
- (1ULL << NVDEV_ENGINE_FIFO) |
- (1ULL << NVDEV_ENGINE_DISP) |
- (1ULL << NVDEV_ENGINE_PM), &devobj);
- *pobject = nv_object(devobj);
+ ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event);
if (ret)
- return ret;
-
- mmio_base = nv_device_resource_start(device, 0);
- mmio_size = nv_device_resource_len(device, 0);
+ goto done;
- /* translate api disable mask into internal mapping */
- disable = args->v0.debug0;
- for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
- if (args->v0.disable & disable_map[i])
- disable |= (1ULL << i);
- }
+ mmio_base = device->func->resource_addr(device, 0);
+ mmio_size = device->func->resource_size(device, 0);
/* identify the chipset, and determine classes of subdev/engines */
- if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY) &&
- !device->card_type) {
+ if (detect) {
map = ioremap(mmio_base, 0x102000);
- if (map == NULL)
- return -ENOMEM;
+ if (ret = -ENOMEM, map == NULL)
+ goto done;
/* switch mmio to cpu's native endianness */
#ifndef __BIG_ENDIAN
@@ -397,31 +2389,83 @@ nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
device->card_type = NV_04;
}
- switch (device->card_type) {
- case NV_04: ret = nv04_identify(device); break;
- case NV_10:
- case NV_11: ret = nv10_identify(device); break;
- case NV_20: ret = nv20_identify(device); break;
- case NV_30: ret = nv30_identify(device); break;
- case NV_40: ret = nv40_identify(device); break;
- case NV_50: ret = nv50_identify(device); break;
- case NV_C0: ret = gf100_identify(device); break;
- case NV_E0: ret = gk104_identify(device); break;
- case GM100: ret = gm100_identify(device); break;
+ switch (device->chipset) {
+ case 0x004: device->chip = &nv4_chipset; break;
+ case 0x005: device->chip = &nv5_chipset; break;
+ case 0x010: device->chip = &nv10_chipset; break;
+ case 0x011: device->chip = &nv11_chipset; break;
+ case 0x015: device->chip = &nv15_chipset; break;
+ case 0x017: device->chip = &nv17_chipset; break;
+ case 0x018: device->chip = &nv18_chipset; break;
+ case 0x01a: device->chip = &nv1a_chipset; break;
+ case 0x01f: device->chip = &nv1f_chipset; break;
+ case 0x020: device->chip = &nv20_chipset; break;
+ case 0x025: device->chip = &nv25_chipset; break;
+ case 0x028: device->chip = &nv28_chipset; break;
+ case 0x02a: device->chip = &nv2a_chipset; break;
+ case 0x030: device->chip = &nv30_chipset; break;
+ case 0x031: device->chip = &nv31_chipset; break;
+ case 0x034: device->chip = &nv34_chipset; break;
+ case 0x035: device->chip = &nv35_chipset; break;
+ case 0x036: device->chip = &nv36_chipset; break;
+ case 0x040: device->chip = &nv40_chipset; break;
+ case 0x041: device->chip = &nv41_chipset; break;
+ case 0x042: device->chip = &nv42_chipset; break;
+ case 0x043: device->chip = &nv43_chipset; break;
+ case 0x044: device->chip = &nv44_chipset; break;
+ case 0x045: device->chip = &nv45_chipset; break;
+ case 0x046: device->chip = &nv46_chipset; break;
+ case 0x047: device->chip = &nv47_chipset; break;
+ case 0x049: device->chip = &nv49_chipset; break;
+ case 0x04a: device->chip = &nv4a_chipset; break;
+ case 0x04b: device->chip = &nv4b_chipset; break;
+ case 0x04c: device->chip = &nv4c_chipset; break;
+ case 0x04e: device->chip = &nv4e_chipset; break;
+ case 0x050: device->chip = &nv50_chipset; break;
+ case 0x063: device->chip = &nv63_chipset; break;
+ case 0x067: device->chip = &nv67_chipset; break;
+ case 0x068: device->chip = &nv68_chipset; break;
+ case 0x084: device->chip = &nv84_chipset; break;
+ case 0x086: device->chip = &nv86_chipset; break;
+ case 0x092: device->chip = &nv92_chipset; break;
+ case 0x094: device->chip = &nv94_chipset; break;
+ case 0x096: device->chip = &nv96_chipset; break;
+ case 0x098: device->chip = &nv98_chipset; break;
+ case 0x0a0: device->chip = &nva0_chipset; break;
+ case 0x0a3: device->chip = &nva3_chipset; break;
+ case 0x0a5: device->chip = &nva5_chipset; break;
+ case 0x0a8: device->chip = &nva8_chipset; break;
+ case 0x0aa: device->chip = &nvaa_chipset; break;
+ case 0x0ac: device->chip = &nvac_chipset; break;
+ case 0x0af: device->chip = &nvaf_chipset; break;
+ case 0x0c0: device->chip = &nvc0_chipset; break;
+ case 0x0c1: device->chip = &nvc1_chipset; break;
+ case 0x0c3: device->chip = &nvc3_chipset; break;
+ case 0x0c4: device->chip = &nvc4_chipset; break;
+ case 0x0c8: device->chip = &nvc8_chipset; break;
+ case 0x0ce: device->chip = &nvce_chipset; break;
+ case 0x0cf: device->chip = &nvcf_chipset; break;
+ case 0x0d7: device->chip = &nvd7_chipset; break;
+ case 0x0d9: device->chip = &nvd9_chipset; break;
+ case 0x0e4: device->chip = &nve4_chipset; break;
+ case 0x0e6: device->chip = &nve6_chipset; break;
+ case 0x0e7: device->chip = &nve7_chipset; break;
+ case 0x0ea: device->chip = &nvea_chipset; break;
+ case 0x0f0: device->chip = &nvf0_chipset; break;
+ case 0x0f1: device->chip = &nvf1_chipset; break;
+ case 0x106: device->chip = &nv106_chipset; break;
+ case 0x108: device->chip = &nv108_chipset; break;
+ case 0x117: device->chip = &nv117_chipset; break;
+ case 0x124: device->chip = &nv124_chipset; break;
+ case 0x126: device->chip = &nv126_chipset; break;
+ case 0x12b: device->chip = &nv12b_chipset; break;
default:
- ret = -EINVAL;
- break;
- }
-
- if (ret) {
- nv_error(device, "unknown chipset, 0x%08x\n", boot0);
- return ret;
+ nvdev_error(device, "unknown chipset (%08x)\n", boot0);
+ goto done;
}
- nv_info(device, "BOOT0 : 0x%08x\n", boot0);
- nv_info(device, "Chipset: %s (NV%02X)\n",
- device->cname, device->chipset);
- nv_info(device, "Family : NV%02X\n", device->card_type);
+ nvdev_info(device, "NVIDIA %s (%08x)\n",
+ device->chip->name, boot0);
/* determine frequency of timing crystal */
if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
@@ -436,300 +2480,89 @@ nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
case 0x00400000: device->crystal = 27000; break;
case 0x00400040: device->crystal = 25000; break;
}
-
- nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
- } else
- if ( (args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY)) {
- device->cname = "NULL";
- device->oclass[NVDEV_SUBDEV_VBIOS] = &nvkm_bios_oclass;
- }
-
- if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_MMIO) &&
- !nv_subdev(device)->mmio) {
- nv_subdev(device)->mmio = ioremap(mmio_base, mmio_size);
- if (!nv_subdev(device)->mmio) {
- nv_error(device, "unable to map device registers\n");
- return -ENOMEM;
- }
- }
-
- /* ensure requested subsystems are available for use */
- for (i = 1, c = 1; i < NVDEV_SUBDEV_NR; i++) {
- if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
- continue;
-
- if (device->subdev[i]) {
- nvkm_object_ref(device->subdev[i], &devobj->subdev[i]);
- continue;
- }
-
- ret = nvkm_object_ctor(nv_object(device), NULL, oclass,
- NULL, i, &devobj->subdev[i]);
- if (ret == -ENODEV)
- continue;
- if (ret)
- return ret;
-
- device->subdev[i] = devobj->subdev[i];
-
- /* note: can't init *any* subdevs until devinit has been run
- * due to not knowing exactly what the vbios init tables will
- * mess with. devinit also can't be run until all of its
- * dependencies have been created.
- *
- * this code delays init of any subdev until all of devinit's
- * dependencies have been created, and then initialises each
- * subdev in turn as they're created.
- */
- while (i >= NVDEV_SUBDEV_DEVINIT_LAST && c <= i) {
- struct nvkm_object *subdev = devobj->subdev[c++];
- if (subdev && !nv_iclass(subdev, NV_ENGINE_CLASS)) {
- ret = nvkm_object_inc(subdev);
- if (ret)
- return ret;
- atomic_dec(&nv_object(device)->usecount);
- } else
- if (subdev) {
- nvkm_subdev_reset(subdev);
- }
- }
- }
-
- return 0;
-}
-
-static struct nvkm_ofuncs
-nvkm_devobj_ofuncs = {
- .ctor = nvkm_devobj_ctor,
- .dtor = nvkm_devobj_dtor,
- .init = _nvkm_parent_init,
- .fini = _nvkm_parent_fini,
- .mthd = nvkm_devobj_mthd,
-};
-
-/******************************************************************************
- * nvkm_device: engine functions
- *****************************************************************************/
-
-struct nvkm_device *
-nv_device(void *obj)
-{
- struct nvkm_object *device = nv_object(obj);
- if (device->engine == NULL) {
- while (device && device->parent)
- device = device->parent;
} else {
- device = &nv_object(obj)->engine->subdev.object;
- if (device && device->parent)
- device = device->parent;
+ device->chip = &null_chipset;
}
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
- if (unlikely(!device))
- nv_assert("BAD CAST -> NvDevice, 0x%08x\n", nv_hclass(obj));
-#endif
- return (void *)device;
-}
-static struct nvkm_oclass
-nvkm_device_sclass[] = {
- { 0x0080, &nvkm_devobj_ofuncs },
- {}
-};
+ if (!device->name)
+ device->name = device->chip->name;
-static int
-nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
- struct nvkm_notify *notify)
-{
- if (!WARN_ON(size != 0)) {
- notify->size = 0;
- notify->types = 1;
- notify->index = 0;
- return 0;
- }
- return -EINVAL;
-}
-
-static const struct nvkm_event_func
-nvkm_device_event_func = {
- .ctor = nvkm_device_event_ctor,
-};
-
-static int
-nvkm_device_fini(struct nvkm_object *object, bool suspend)
-{
- struct nvkm_device *device = (void *)object;
- struct nvkm_object *subdev;
- int ret, i;
-
- for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
- if ((subdev = device->subdev[i])) {
- if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
- ret = nvkm_object_dec(subdev, suspend);
- if (ret && suspend)
- goto fail;
- }
- }
- }
-
- ret = nvkm_acpi_fini(device, suspend);
-fail:
- for (; ret && i < NVDEV_SUBDEV_NR; i++) {
- if ((subdev = device->subdev[i])) {
- if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
- ret = nvkm_object_inc(subdev);
- if (ret) {
- /* XXX */
- }
- }
+ if (mmio) {
+ device->pri = ioremap(mmio_base, mmio_size);
+ if (!device->pri) {
+ nvdev_error(device, "unable to map PRI\n");
+ return -ENOMEM;
}
}
- return ret;
-}
-
-static int
-nvkm_device_init(struct nvkm_object *object)
-{
- struct nvkm_device *device = (void *)object;
- struct nvkm_object *subdev;
- int ret, i = 0;
-
- ret = nvkm_acpi_init(device);
- if (ret)
- goto fail;
-
- for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
- if ((subdev = device->subdev[i])) {
- if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
- ret = nvkm_object_inc(subdev);
- if (ret)
- goto fail;
- } else {
- nvkm_subdev_reset(subdev);
- }
+ mutex_init(&device->mutex);
+
+ for (i = 0; i < NVKM_SUBDEV_NR; i++) {
+#define _(s,m) case s: \
+ if (device->chip->m && (subdev_mask & (1ULL << (s)))) { \
+ ret = device->chip->m(device, (s), &device->m); \
+ if (ret) { \
+ subdev = nvkm_device_subdev(device, (s)); \
+ nvkm_subdev_del(&subdev); \
+ device->m = NULL; \
+ if (ret != -ENODEV) { \
+ nvdev_error(device, "%s ctor failed, %d\n", \
+ nvkm_subdev_name[s], ret); \
+ goto done; \
+ } \
+ } \
+ } \
+ break
+ switch (i) {
+ _(NVKM_SUBDEV_BAR , bar);
+ _(NVKM_SUBDEV_VBIOS , bios);
+ _(NVKM_SUBDEV_BUS , bus);
+ _(NVKM_SUBDEV_CLK , clk);
+ _(NVKM_SUBDEV_DEVINIT, devinit);
+ _(NVKM_SUBDEV_FB , fb);
+ _(NVKM_SUBDEV_FUSE , fuse);
+ _(NVKM_SUBDEV_GPIO , gpio);
+ _(NVKM_SUBDEV_I2C , i2c);
+ _(NVKM_SUBDEV_IBUS , ibus);
+ _(NVKM_SUBDEV_INSTMEM, imem);
+ _(NVKM_SUBDEV_LTC , ltc);
+ _(NVKM_SUBDEV_MC , mc);
+ _(NVKM_SUBDEV_MMU , mmu);
+ _(NVKM_SUBDEV_MXM , mxm);
+ _(NVKM_SUBDEV_PCI , pci);
+ _(NVKM_SUBDEV_PMU , pmu);
+ _(NVKM_SUBDEV_THERM , therm);
+ _(NVKM_SUBDEV_TIMER , timer);
+ _(NVKM_SUBDEV_VOLT , volt);
+ _(NVKM_ENGINE_BSP , bsp);
+ _(NVKM_ENGINE_CE0 , ce[0]);
+ _(NVKM_ENGINE_CE1 , ce[1]);
+ _(NVKM_ENGINE_CE2 , ce[2]);
+ _(NVKM_ENGINE_CIPHER , cipher);
+ _(NVKM_ENGINE_DISP , disp);
+ _(NVKM_ENGINE_DMAOBJ , dma);
+ _(NVKM_ENGINE_FIFO , fifo);
+ _(NVKM_ENGINE_GR , gr);
+ _(NVKM_ENGINE_IFB , ifb);
+ _(NVKM_ENGINE_ME , me);
+ _(NVKM_ENGINE_MPEG , mpeg);
+ _(NVKM_ENGINE_MSENC , msenc);
+ _(NVKM_ENGINE_MSPDEC , mspdec);
+ _(NVKM_ENGINE_MSPPP , msppp);
+ _(NVKM_ENGINE_MSVLD , msvld);
+ _(NVKM_ENGINE_PM , pm);
+ _(NVKM_ENGINE_SEC , sec);
+ _(NVKM_ENGINE_SW , sw);
+ _(NVKM_ENGINE_VIC , vic);
+ _(NVKM_ENGINE_VP , vp);
+ default:
+ WARN_ON(1);
+ continue;
}
+#undef _
}
ret = 0;
-fail:
- for (--i; ret && i >= 0; i--) {
- if ((subdev = device->subdev[i])) {
- if (!nv_iclass(subdev, NV_ENGINE_CLASS))
- nvkm_object_dec(subdev, false);
- }
- }
-
- if (ret)
- nvkm_acpi_fini(device, false);
- return ret;
-}
-
-static void
-nvkm_device_dtor(struct nvkm_object *object)
-{
- struct nvkm_device *device = (void *)object;
-
- nvkm_event_fini(&device->event);
-
- mutex_lock(&nv_devices_mutex);
- list_del(&device->head);
- mutex_unlock(&nv_devices_mutex);
-
- if (nv_subdev(device)->mmio)
- iounmap(nv_subdev(device)->mmio);
-
- nvkm_engine_destroy(&device->engine);
-}
-
-resource_size_t
-nv_device_resource_start(struct nvkm_device *device, unsigned int bar)
-{
- if (nv_device_is_pci(device)) {
- return pci_resource_start(device->pdev, bar);
- } else {
- struct resource *res;
- res = platform_get_resource(device->platformdev,
- IORESOURCE_MEM, bar);
- if (!res)
- return 0;
- return res->start;
- }
-}
-
-resource_size_t
-nv_device_resource_len(struct nvkm_device *device, unsigned int bar)
-{
- if (nv_device_is_pci(device)) {
- return pci_resource_len(device->pdev, bar);
- } else {
- struct resource *res;
- res = platform_get_resource(device->platformdev,
- IORESOURCE_MEM, bar);
- if (!res)
- return 0;
- return resource_size(res);
- }
-}
-
-int
-nv_device_get_irq(struct nvkm_device *device, bool stall)
-{
- if (nv_device_is_pci(device)) {
- return device->pdev->irq;
- } else {
- return platform_get_irq_byname(device->platformdev,
- stall ? "stall" : "nonstall");
- }
-}
-
-static struct nvkm_oclass
-nvkm_device_oclass = {
- .handle = NV_ENGINE(DEVICE, 0x00),
- .ofuncs = &(struct nvkm_ofuncs) {
- .dtor = nvkm_device_dtor,
- .init = nvkm_device_init,
- .fini = nvkm_device_fini,
- },
-};
-
-int
-nvkm_device_create_(void *dev, enum nv_bus_type type, u64 name,
- const char *sname, const char *cfg, const char *dbg,
- int length, void **pobject)
-{
- struct nvkm_device *device;
- int ret = -EEXIST;
-
- mutex_lock(&nv_devices_mutex);
- list_for_each_entry(device, &nv_devices, head) {
- if (device->handle == name)
- goto done;
- }
-
- ret = nvkm_engine_create_(NULL, NULL, &nvkm_device_oclass, true,
- "DEVICE", "device", length, pobject);
- device = *pobject;
- if (ret)
- goto done;
-
- switch (type) {
- case NVKM_BUS_PCI:
- device->pdev = dev;
- break;
- case NVKM_BUS_PLATFORM:
- device->platformdev = dev;
- break;
- }
- device->handle = name;
- device->cfgopt = cfg;
- device->dbgopt = dbg;
- device->name = sname;
-
- nv_subdev(device)->debug = nvkm_dbgopt(device->dbgopt, "DEVICE");
- nv_engine(device)->sclass = nvkm_device_sclass;
- list_add(&device->head, &nv_devices);
-
- ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event);
done:
mutex_unlock(&nv_devices_mutex);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
index 0b794b13cec3..cf8bc068e9b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
@@ -21,7 +21,7 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include "priv.h"
+#include "ctrl.h"
#include <core/client.h>
#include <subdev/clk.h>
@@ -31,18 +31,18 @@
#include <nvif/unpack.h>
static int
-nvkm_control_mthd_pstate_info(struct nvkm_object *object, void *data, u32 size)
+nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size)
{
union {
struct nvif_control_pstate_info_v0 v0;
} *args = data;
- struct nvkm_clk *clk = nvkm_clk(object);
+ struct nvkm_clk *clk = ctrl->device->clk;
int ret;
- nv_ioctl(object, "control pstate info size %d\n", size);
+ nvif_ioctl(&ctrl->object, "control pstate info size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "control pstate info vers %d\n",
- args->v0.version);
+ nvif_ioctl(&ctrl->object, "control pstate info vers %d\n",
+ args->v0.version);
} else
return ret;
@@ -64,24 +64,24 @@ nvkm_control_mthd_pstate_info(struct nvkm_object *object, void *data, u32 size)
}
static int
-nvkm_control_mthd_pstate_attr(struct nvkm_object *object, void *data, u32 size)
+nvkm_control_mthd_pstate_attr(struct nvkm_control *ctrl, void *data, u32 size)
{
union {
struct nvif_control_pstate_attr_v0 v0;
} *args = data;
- struct nvkm_clk *clk = nvkm_clk(object);
- struct nvkm_domain *domain;
+ struct nvkm_clk *clk = ctrl->device->clk;
+ const struct nvkm_domain *domain;
struct nvkm_pstate *pstate;
struct nvkm_cstate *cstate;
int i = 0, j = -1;
u32 lo, hi;
int ret;
- nv_ioctl(object, "control pstate attr size %d\n", size);
+ nvif_ioctl(&ctrl->object, "control pstate attr size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "control pstate attr vers %d state %d "
- "index %d\n",
- args->v0.version, args->v0.state, args->v0.index);
+ nvif_ioctl(&ctrl->object,
+ "control pstate attr vers %d state %d index %d\n",
+ args->v0.version, args->v0.state, args->v0.index);
if (!clk)
return -ENODEV;
if (args->v0.state < NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT)
@@ -116,7 +116,7 @@ nvkm_control_mthd_pstate_attr(struct nvkm_object *object, void *data, u32 size)
args->v0.state = pstate->pstate;
} else {
- lo = max(clk->read(clk, domain->name), 0);
+ lo = max(nvkm_clk_read(clk, domain->name), 0);
hi = lo;
}
@@ -137,19 +137,19 @@ nvkm_control_mthd_pstate_attr(struct nvkm_object *object, void *data, u32 size)
}
static int
-nvkm_control_mthd_pstate_user(struct nvkm_object *object, void *data, u32 size)
+nvkm_control_mthd_pstate_user(struct nvkm_control *ctrl, void *data, u32 size)
{
union {
struct nvif_control_pstate_user_v0 v0;
} *args = data;
- struct nvkm_clk *clk = nvkm_clk(object);
+ struct nvkm_clk *clk = ctrl->device->clk;
int ret;
- nv_ioctl(object, "control pstate user size %d\n", size);
+ nvif_ioctl(&ctrl->object, "control pstate user size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "control pstate user vers %d ustate %d "
- "pwrsrc %d\n", args->v0.version,
- args->v0.ustate, args->v0.pwrsrc);
+ nvif_ioctl(&ctrl->object,
+ "control pstate user vers %d ustate %d pwrsrc %d\n",
+ args->v0.version, args->v0.ustate, args->v0.pwrsrc);
if (!clk)
return -ENODEV;
} else
@@ -168,32 +168,44 @@ nvkm_control_mthd_pstate_user(struct nvkm_object *object, void *data, u32 size)
static int
nvkm_control_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
+ struct nvkm_control *ctrl = nvkm_control(object);
switch (mthd) {
case NVIF_CONTROL_PSTATE_INFO:
- return nvkm_control_mthd_pstate_info(object, data, size);
+ return nvkm_control_mthd_pstate_info(ctrl, data, size);
case NVIF_CONTROL_PSTATE_ATTR:
- return nvkm_control_mthd_pstate_attr(object, data, size);
+ return nvkm_control_mthd_pstate_attr(ctrl, data, size);
case NVIF_CONTROL_PSTATE_USER:
- return nvkm_control_mthd_pstate_user(object, data, size);
+ return nvkm_control_mthd_pstate_user(ctrl, data, size);
default:
break;
}
return -EINVAL;
}
-static struct nvkm_ofuncs
-nvkm_control_ofuncs = {
- .ctor = _nvkm_object_ctor,
- .dtor = nvkm_object_destroy,
- .init = nvkm_object_init,
- .fini = nvkm_object_fini,
+static const struct nvkm_object_func
+nvkm_control = {
.mthd = nvkm_control_mthd,
};
-struct nvkm_oclass
-nvkm_control_oclass[] = {
- { .handle = NVIF_IOCTL_NEW_V0_CONTROL,
- .ofuncs = &nvkm_control_ofuncs
- },
- {}
+static int
+nvkm_control_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_control *ctrl;
+
+ if (!(ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &ctrl->object;
+ ctrl->device = device;
+
+ nvkm_object_ctor(&nvkm_control, oclass, &ctrl->object);
+ return 0;
+}
+
+const struct nvkm_device_oclass
+nvkm_control_oclass = {
+ .base.oclass = NVIF_IOCTL_NEW_V0_CONTROL,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = nvkm_control_new,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
new file mode 100644
index 000000000000..20249d8e444d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
@@ -0,0 +1,12 @@
+#ifndef __NVKM_DEVICE_CTRL_H__
+#define __NVKM_DEVICE_CTRL_H__
+#define nvkm_control(p) container_of((p), struct nvkm_control, object)
+#include <core/device.h>
+
+struct nvkm_control {
+ struct nvkm_object object;
+ struct nvkm_device *device;
+};
+
+extern const struct nvkm_device_oclass nvkm_control_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c
deleted file mode 100644
index 82b38d7e9730..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/fuse.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/mxm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/ltc.h>
-#include <subdev/ibus.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
-#include <subdev/pmu.h>
-#include <subdev/volt.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/mspdec.h>
-#include <engine/bsp.h>
-#include <engine/msvld.h>
-#include <engine/msppp.h>
-#include <engine/ce.h>
-#include <engine/disp.h>
-#include <engine/pm.h>
-
-int
-gf100_identify(struct nvkm_device *device)
-{
- switch (device->chipset) {
- case 0xc0:
- device->cname = "GF100";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gf100_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gf100_gr_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
- device->oclass[NVDEV_ENGINE_CE1 ] = &gf100_ce1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
- break;
- case 0xc4:
- device->cname = "GF104";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gf100_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gf104_gr_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
- device->oclass[NVDEV_ENGINE_CE1 ] = &gf100_ce1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
- break;
- case 0xc3:
- device->cname = "GF106";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gf104_gr_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
- break;
- case 0xce:
- device->cname = "GF114";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gf100_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gf104_gr_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
- device->oclass[NVDEV_ENGINE_CE1 ] = &gf100_ce1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
- break;
- case 0xcf:
- device->cname = "GF116";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gf104_gr_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
- break;
- case 0xc1:
- device->cname = "GF108";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gf108_gr_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
- break;
- case 0xc8:
- device->cname = "GF110";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gf100_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gf100_pmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf100_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gf110_gr_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
- device->oclass[NVDEV_ENGINE_CE1 ] = &gf100_ce1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
- break;
- case 0xd9:
- device->cname = "GF119";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = gf110_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = gf110_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gf110_pmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gf119_gr_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gf110_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
- break;
- case 0xd7:
- device->cname = "GF117";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = gf110_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = gf117_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gf100_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gf100_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gf100_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gf100_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gf117_gr_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &gf100_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gf100_ce0_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gf110_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = &gf100_pm_oclass;
- break;
- default:
- nv_fatal(device, "unknown Fermi chipset\n");
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c
deleted file mode 100644
index 6a9483f65d83..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/fuse.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/mxm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/ltc.h>
-#include <subdev/ibus.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
-#include <subdev/pmu.h>
-#include <subdev/volt.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/disp.h>
-#include <engine/ce.h>
-#include <engine/bsp.h>
-#include <engine/msvld.h>
-#include <engine/mspdec.h>
-#include <engine/msppp.h>
-#include <engine/pm.h>
-
-int
-gk104_identify(struct nvkm_device *device)
-{
- switch (device->chipset) {
- case 0xe4:
- device->cname = "GK104";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = gk104_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gk104_pmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gk104_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gk104_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gk104_disp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
- device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
- device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = &gk104_pm_oclass;
- break;
- case 0xe7:
- device->cname = "GK107";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = gk104_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gf110_pmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gk104_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gk104_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gk104_disp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
- device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
- device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = &gk104_pm_oclass;
- break;
- case 0xe6:
- device->cname = "GK106";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = gk104_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gk104_pmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gk104_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gk104_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gk104_disp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
- device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
- device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = &gk104_pm_oclass;
- break;
- case 0xea:
- device->cname = "GK20A";
- device->oclass[NVDEV_SUBDEV_CLK ] = &gk20a_clk_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gk20a_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gk20a_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = gk20a_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gk20a_bar_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gk20a_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gk20a_gr_oclass;
- device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = &gk104_pm_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &gk20a_volt_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gk20a_pmu_oclass;
- break;
- case 0xf0:
- device->cname = "GK110";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = gk104_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gk110_pmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gk104_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gk110_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gk110_disp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
- device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
- device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = &gk110_pm_oclass;
- break;
- case 0xf1:
- device->cname = "GK110B";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = gf110_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gf106_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gk110_pmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gk104_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gk110b_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gk110_disp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
- device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
- device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = &gk110_pm_oclass;
- break;
- case 0x106:
- device->cname = "GK208B";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = gk104_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gk208_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gk110_disp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
- device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
- device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
- break;
- case 0x108:
- device->cname = "GK208";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = gk104_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gf110_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gf100_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gk104_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gk208_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gk110_disp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
- device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
- device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
- break;
- default:
- nv_fatal(device, "unknown Kepler chipset\n");
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
deleted file mode 100644
index 70abf1ec7c98..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/fuse.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/mxm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/ltc.h>
-#include <subdev/ibus.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
-#include <subdev/pmu.h>
-#include <subdev/volt.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/disp.h>
-#include <engine/ce.h>
-#include <engine/bsp.h>
-#include <engine/msvld.h>
-#include <engine/mspdec.h>
-#include <engine/msppp.h>
-#include <engine/pm.h>
-
-int
-gm100_identify(struct nvkm_device *device)
-{
- switch (device->chipset) {
- case 0x117:
- device->cname = "GM107";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = gf110_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gm107_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass;
-
-#if 0
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
-#endif
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gm107_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gm107_disp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gk104_ce0_oclass;
-#if 0
- device->oclass[NVDEV_ENGINE_CE1 ] = &gk104_ce1_oclass;
-#endif
- device->oclass[NVDEV_ENGINE_CE2 ] = &gk104_ce2_oclass;
-#if 0
- device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
-#endif
- break;
- case 0x124:
- device->cname = "GM204";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = gm204_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass;
-#if 0
- /* looks to be some non-trivial changes */
- device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
- /* priv ring says no to 0x10eb14 writes */
- device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass;
-#endif
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gm204_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass;
-#if 0
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
-#endif
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gm204_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gm204_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass;
- device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass;
- device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass;
-#if 0
- device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
-#endif
- break;
- case 0x126:
- device->cname = "GM206";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = gm204_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass;
-#if 0
- /* looks to be some non-trivial changes */
- device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
- /* priv ring says no to 0x10eb14 writes */
- device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass;
-#endif
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gm204_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass;
- device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass;
-#if 0
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
-#endif
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = gm204_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = gm206_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass;
- device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass;
- device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass;
-#if 0
- device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
-#endif
- break;
- default:
- nv_fatal(device, "unknown Maxwell chipset\n");
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c
deleted file mode 100644
index 5a2ae043b478..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/i2c.h>
-#include <subdev/clk.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/disp.h>
-
-int
-nv04_identify(struct nvkm_device *device)
-{
- switch (device->chipset) {
- case 0x04:
- device->cname = "NV04";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv04_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv04_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv04_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- break;
- case 0x05:
- device->cname = "NV05";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv05_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv04_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv04_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- break;
- default:
- nv_fatal(device, "unknown RIVA chipset\n");
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c
deleted file mode 100644
index 94a1ca45e94a..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/clk.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/disp.h>
-
-int
-nv10_identify(struct nvkm_device *device)
-{
- switch (device->chipset) {
- case 0x10:
- device->cname = "NV10";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- break;
- case 0x15:
- device->cname = "NV15";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- break;
- case 0x16:
- device->cname = "NV16";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- break;
- case 0x1a:
- device->cname = "nForce";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- break;
- case 0x11:
- device->cname = "NV11";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- break;
- case 0x17:
- device->cname = "NV17";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- break;
- case 0x1f:
- device->cname = "nForce2";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- break;
- case 0x18:
- device->cname = "NV18";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv10_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- break;
- default:
- nv_fatal(device, "unknown Celsius chipset\n");
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c
deleted file mode 100644
index d5ec8937df68..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/disp.h>
-
-int
-nv20_identify(struct nvkm_device *device)
-{
- switch (device->chipset) {
- case 0x20:
- device->cname = "NV20";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv20_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv20_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- break;
- case 0x25:
- device->cname = "NV25";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv25_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- break;
- case 0x28:
- device->cname = "NV28";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv25_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- break;
- case 0x2a:
- device->cname = "NV2A";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv2a_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- break;
- default:
- nv_fatal(device, "unknown Kelvin chipset\n");
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c
deleted file mode 100644
index dda09621e898..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/clk.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/mpeg.h>
-#include <engine/disp.h>
-
-int
-nv30_identify(struct nvkm_device *device)
-{
- switch (device->chipset) {
- case 0x30:
- device->cname = "NV30";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv30_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- break;
- case 0x35:
- device->cname = "NV35";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv35_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv35_gr_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- break;
- case 0x31:
- device->cname = "NV31";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv30_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- break;
- case 0x36:
- device->cname = "NV36";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv36_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv35_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- break;
- case 0x34:
- device->cname = "NV34";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv04_clk_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv34_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- break;
- default:
- nv_fatal(device, "unknown Rankine chipset\n");
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c
deleted file mode 100644
index c6301361d14f..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c
+++ /dev/null
@@ -1,427 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/mmu.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-#include <subdev/volt.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/mpeg.h>
-#include <engine/disp.h>
-#include <engine/pm.h>
-
-int
-nv40_identify(struct nvkm_device *device)
-{
- switch (device->chipset) {
- case 0x40:
- device->cname = "NV40";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
- break;
- case 0x41:
- device->cname = "NV41";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv41_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
- break;
- case 0x42:
- device->cname = "NV42";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv41_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
- break;
- case 0x43:
- device->cname = "NV43";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv41_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
- break;
- case 0x45:
- device->cname = "NV45";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv04_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
- break;
- case 0x47:
- device->cname = "G70";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv47_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv41_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
- break;
- case 0x49:
- device->cname = "G71";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv41_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
- break;
- case 0x4b:
- device->cname = "G73";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv41_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
- break;
- case 0x44:
- device->cname = "NV44";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
- break;
- case 0x46:
- device->cname = "G72";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
- break;
- case 0x4a:
- device->cname = "NV44A";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
- break;
- case 0x4c:
- device->cname = "C61";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
- break;
- case 0x4e:
- device->cname = "C51";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv4e_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
- break;
- case 0x63:
- device->cname = "C73";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
- break;
- case 0x67:
- device->cname = "C67";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
- break;
- case 0x68:
- device->cname = "C68";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv10_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv04_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &nv40_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv44_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv10_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv40_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv44_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = nv40_pm_oclass;
- break;
- default:
- nv_fatal(device, "unknown Curie chipset\n");
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c
deleted file mode 100644
index 249b84454612..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/fuse.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/mxm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
-#include <subdev/pmu.h>
-#include <subdev/volt.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/mpeg.h>
-#include <engine/vp.h>
-#include <engine/cipher.h>
-#include <engine/sec.h>
-#include <engine/bsp.h>
-#include <engine/msvld.h>
-#include <engine/mspdec.h>
-#include <engine/msppp.h>
-#include <engine/ce.h>
-#include <engine/disp.h>
-#include <engine/pm.h>
-
-int
-nv50_identify(struct nvkm_device *device)
-{
- switch (device->chipset) {
- case 0x50:
- device->cname = "G80";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = nv50_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = nv50_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = nv50_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = nv50_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &nv50_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = nv50_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = nv50_pm_oclass;
- break;
- case 0x84:
- device->cname = "G84";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = g84_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &g84_vp_oclass;
- device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &g84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = g84_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
- break;
- case 0x86:
- device->cname = "G86";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = g84_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &g84_vp_oclass;
- device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &g84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = g84_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
- break;
- case 0x92:
- device->cname = "G92";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = g84_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &g84_vp_oclass;
- device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &g84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = g84_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
- break;
- case 0x94:
- device->cname = "G94";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = g84_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = g94_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &g84_vp_oclass;
- device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &g84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = g94_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
- break;
- case 0x96:
- device->cname = "G96";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = g84_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = g94_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &g84_vp_oclass;
- device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &g84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = g94_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
- break;
- case 0x98:
- device->cname = "G98";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = g98_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_SEC ] = &g98_sec_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = g94_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
- break;
- case 0xa0:
- device->cname = "G200";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = g84_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = g84_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = g84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &g84_vp_oclass;
- device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &g84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gt200_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
- break;
- case 0xaa:
- device->cname = "MCP77/MCP78";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = mcp77_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = g98_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = mcp77_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_SEC ] = &g98_sec_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = g94_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
- break;
- case 0xac:
- device->cname = "MCP79/MCP7A";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = mcp77_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &g84_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = g98_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = mcp77_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_SEC ] = &g98_sec_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = g94_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = g84_pm_oclass;
- break;
- case 0xa3:
- device->cname = "GT215";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gt215_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gt215_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gt215_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gt215_pmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
- device->oclass[NVDEV_ENGINE_MPEG ] = &g84_mpeg_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gt215_ce_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = gt215_pm_oclass;
- break;
- case 0xa5:
- device->cname = "GT216";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gt215_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gt215_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gt215_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gt215_pmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gt215_ce_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = gt215_pm_oclass;
- break;
- case 0xa8:
- device->cname = "GT218";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gt215_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = gt215_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = gt215_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gt215_pmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gt215_ce_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = gt215_pm_oclass;
- break;
- case 0xaf:
- device->cname = "MCP89";
- device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
- device->oclass[NVDEV_SUBDEV_GPIO ] = g94_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = g94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_FUSE ] = &nv50_fuse_oclass;
- device->oclass[NVDEV_SUBDEV_CLK ] = &gt215_clk_oclass;
- device->oclass[NVDEV_SUBDEV_THERM ] = &gt215_therm_oclass;
- device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = mcp89_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = g98_mc_oclass;
- device->oclass[NVDEV_SUBDEV_BUS ] = g94_bus_oclass;
- device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = mcp89_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
- device->oclass[NVDEV_SUBDEV_MMU ] = &nv50_mmu_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PMU ] = gt215_pmu_oclass;
- device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
- device->oclass[NVDEV_ENGINE_FIFO ] = g84_fifo_oclass;
- device->oclass[NVDEV_ENGINE_SW ] = nv50_sw_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = &nv50_gr_oclass;
- device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
- device->oclass[NVDEV_ENGINE_MSVLD ] = &g98_msvld_oclass;
- device->oclass[NVDEV_ENGINE_MSPPP ] = &g98_msppp_oclass;
- device->oclass[NVDEV_ENGINE_CE0 ] = &gt215_ce_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = gt215_disp_oclass;
- device->oclass[NVDEV_ENGINE_PM ] = gt215_pm_oclass;
- break;
- default:
- nv_fatal(device, "unknown Tesla chipset\n");
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
new file mode 100644
index 000000000000..e8eb14e438f4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -0,0 +1,1686 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include <core/pci.h>
+#include "priv.h"
+
+struct nvkm_device_pci_device {
+ u16 device;
+ const char *name;
+ const struct nvkm_device_pci_vendor *vendor;
+};
+
+struct nvkm_device_pci_vendor {
+ u16 vendor;
+ u16 device;
+ const char *name;
+ const struct nvkm_device_quirk quirk;
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0189[] = {
+ /* Apple iMac G4 NV18 */
+ { 0x10de, 0x0010, NULL, { .tv_gpio = 4 } },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_01f0[] = {
+ /* MSI nForce2 IGP */
+ { 0x1462, 0x5710, NULL, { .tv_pin_mask = 0xc } },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0322[] = {
+ /* Zotac FX5200 */
+ { 0x19da, 0x1035, NULL, { .tv_pin_mask = 0xc } },
+ { 0x19da, 0x2035, NULL, { .tv_pin_mask = 0xc } },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_05e7[] = {
+ { 0x10de, 0x0595, "Tesla T10 Processor" },
+ { 0x10de, 0x068f, "Tesla T10 Processor" },
+ { 0x10de, 0x0697, "Tesla M1060" },
+ { 0x10de, 0x0714, "Tesla M1060" },
+ { 0x10de, 0x0743, "Tesla M1060" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0609[] = {
+ { 0x106b, 0x00a7, "GeForce 8800 GS" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_062e[] = {
+ { 0x106b, 0x0605, "GeForce GT 130" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0649[] = {
+ { 0x1043, 0x202d, "GeForce GT 220M" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0652[] = {
+ { 0x152d, 0x0850, "GeForce GT 240M LE" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0654[] = {
+ { 0x1043, 0x14a2, "GeForce GT 320M" },
+ { 0x1043, 0x14d2, "GeForce GT 320M" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0655[] = {
+ { 0x106b, 0x0633, "GeForce GT 120" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0656[] = {
+ { 0x106b, 0x0693, "GeForce GT 120" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06d1[] = {
+ { 0x10de, 0x0771, "Tesla C2050" },
+ { 0x10de, 0x0772, "Tesla C2070" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06d2[] = {
+ { 0x10de, 0x088f, "Tesla X2070" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06de[] = {
+ { 0x10de, 0x0773, "Tesla S2050" },
+ { 0x10de, 0x082f, "Tesla M2050" },
+ { 0x10de, 0x0840, "Tesla X2070" },
+ { 0x10de, 0x0842, "Tesla M2050" },
+ { 0x10de, 0x0846, "Tesla M2050" },
+ { 0x10de, 0x0866, "Tesla M2050" },
+ { 0x10de, 0x0907, "Tesla M2050" },
+ { 0x10de, 0x091e, "Tesla M2050" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06e8[] = {
+ { 0x103c, 0x360b, "GeForce 9200M GE" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06f9[] = {
+ { 0x10de, 0x060d, "Quadro FX 370 Low Profile" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06ff[] = {
+ { 0x10de, 0x0711, "HICx8 + Graphics" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0866[] = {
+ { 0x106b, 0x00b1, "GeForce 9400M" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0872[] = {
+ { 0x1043, 0x1c42, "GeForce G205M" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0873[] = {
+ { 0x1043, 0x1c52, "GeForce G205M" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a6e[] = {
+ { 0x17aa, 0x3607, "Second Generation ION" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a70[] = {
+ { 0x17aa, 0x3605, "Second Generation ION" },
+ { 0x17aa, 0x3617, "Second Generation ION" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a73[] = {
+ { 0x17aa, 0x3607, "Second Generation ION" },
+ { 0x17aa, 0x3610, "Second Generation ION" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a74[] = {
+ { 0x17aa, 0x903a, "GeForce G210" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a75[] = {
+ { 0x17aa, 0x3605, "Second Generation ION" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a7a[] = {
+ { 0x1462, 0xaa51, "GeForce 405" },
+ { 0x1462, 0xaa58, "GeForce 405" },
+ { 0x1462, 0xac71, "GeForce 405" },
+ { 0x1462, 0xac82, "GeForce 405" },
+ { 0x1642, 0x3980, "GeForce 405" },
+ { 0x17aa, 0x3950, "GeForce 405M" },
+ { 0x17aa, 0x397d, "GeForce 405M" },
+ { 0x1b0a, 0x90b4, "GeForce 405" },
+ { 0x1bfd, 0x0003, "GeForce 405" },
+ { 0x1bfd, 0x8006, "GeForce 405" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0dd8[] = {
+ { 0x10de, 0x0914, "Quadro 2000D" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0de9[] = {
+ { 0x1025, 0x0692, "GeForce GT 620M" },
+ { 0x1025, 0x0725, "GeForce GT 620M" },
+ { 0x1025, 0x0728, "GeForce GT 620M" },
+ { 0x1025, 0x072b, "GeForce GT 620M" },
+ { 0x1025, 0x072e, "GeForce GT 620M" },
+ { 0x1025, 0x0753, "GeForce GT 620M" },
+ { 0x1025, 0x0754, "GeForce GT 620M" },
+ { 0x17aa, 0x3977, "GeForce GT 640M LE" },
+ { 0x1b0a, 0x2210, "GeForce GT 635M" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0dea[] = {
+ { 0x17aa, 0x365a, "GeForce 615" },
+ { 0x17aa, 0x365b, "GeForce 615" },
+ { 0x17aa, 0x365e, "GeForce 615" },
+ { 0x17aa, 0x3660, "GeForce 615" },
+ { 0x17aa, 0x366c, "GeForce 615" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0df4[] = {
+ { 0x152d, 0x0952, "GeForce GT 630M" },
+ { 0x152d, 0x0953, "GeForce GT 630M" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0fd2[] = {
+ { 0x1028, 0x0595, "GeForce GT 640M LE" },
+ { 0x1028, 0x05b2, "GeForce GT 640M LE" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0fe3[] = {
+ { 0x103c, 0x2b16, "GeForce GT 745A" },
+ { 0x17aa, 0x3675, "GeForce GT 745A" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_104b[] = {
+ { 0x1043, 0x844c, "GeForce GT 625" },
+ { 0x1043, 0x846b, "GeForce GT 625" },
+ { 0x1462, 0xb590, "GeForce GT 625" },
+ { 0x174b, 0x0625, "GeForce GT 625" },
+ { 0x174b, 0xa625, "GeForce GT 625" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1058[] = {
+ { 0x103c, 0x2af1, "GeForce 610" },
+ { 0x17aa, 0x3682, "GeForce 800A" },
+ { 0x17aa, 0x3692, "GeForce 705A" },
+ { 0x17aa, 0x3695, "GeForce 800A" },
+ { 0x17aa, 0x36a8, "GeForce 800A" },
+ { 0x17aa, 0x36ac, "GeForce 800A" },
+ { 0x17aa, 0x36ad, "GeForce 800A" },
+ { 0x705a, 0x3682, "GeForce 800A" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_105b[] = {
+ { 0x103c, 0x2afb, "GeForce 705A" },
+ { 0x17aa, 0x36a1, "GeForce 800A" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1091[] = {
+ { 0x10de, 0x088e, "Tesla X2090" },
+ { 0x10de, 0x0891, "Tesla X2090" },
+ { 0x10de, 0x0974, "Tesla X2090" },
+ { 0x10de, 0x098d, "Tesla X2090" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1096[] = {
+ { 0x10de, 0x0911, "Tesla C2050" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1140[] = {
+ { 0x1019, 0x999f, "GeForce GT 720M" },
+ { 0x1025, 0x0600, "GeForce GT 620M" },
+ { 0x1025, 0x0606, "GeForce GT 620M" },
+ { 0x1025, 0x064a, "GeForce GT 620M" },
+ { 0x1025, 0x064c, "GeForce GT 620M" },
+ { 0x1025, 0x067a, "GeForce GT 620M" },
+ { 0x1025, 0x0680, "GeForce GT 620M" },
+ { 0x1025, 0x0686, "GeForce 710M" },
+ { 0x1025, 0x0689, "GeForce 710M" },
+ { 0x1025, 0x068b, "GeForce 710M" },
+ { 0x1025, 0x068d, "GeForce 710M" },
+ { 0x1025, 0x068e, "GeForce 710M" },
+ { 0x1025, 0x0691, "GeForce 710M" },
+ { 0x1025, 0x0692, "GeForce GT 620M" },
+ { 0x1025, 0x0694, "GeForce GT 620M" },
+ { 0x1025, 0x0702, "GeForce GT 620M" },
+ { 0x1025, 0x0719, "GeForce GT 620M" },
+ { 0x1025, 0x0725, "GeForce GT 620M" },
+ { 0x1025, 0x0728, "GeForce GT 620M" },
+ { 0x1025, 0x072b, "GeForce GT 620M" },
+ { 0x1025, 0x072e, "GeForce GT 620M" },
+ { 0x1025, 0x0732, "GeForce GT 620M" },
+ { 0x1025, 0x0763, "GeForce GT 720M" },
+ { 0x1025, 0x0773, "GeForce 710M" },
+ { 0x1025, 0x0774, "GeForce 710M" },
+ { 0x1025, 0x0776, "GeForce GT 720M" },
+ { 0x1025, 0x077a, "GeForce 710M" },
+ { 0x1025, 0x077b, "GeForce 710M" },
+ { 0x1025, 0x077c, "GeForce 710M" },
+ { 0x1025, 0x077d, "GeForce 710M" },
+ { 0x1025, 0x077e, "GeForce 710M" },
+ { 0x1025, 0x077f, "GeForce 710M" },
+ { 0x1025, 0x0781, "GeForce GT 720M" },
+ { 0x1025, 0x0798, "GeForce GT 720M" },
+ { 0x1025, 0x0799, "GeForce GT 720M" },
+ { 0x1025, 0x079b, "GeForce GT 720M" },
+ { 0x1025, 0x079c, "GeForce GT 720M" },
+ { 0x1025, 0x0807, "GeForce GT 720M" },
+ { 0x1025, 0x0821, "GeForce 820M" },
+ { 0x1025, 0x0823, "GeForce GT 720M" },
+ { 0x1025, 0x0830, "GeForce GT 720M" },
+ { 0x1025, 0x0833, "GeForce GT 720M" },
+ { 0x1025, 0x0837, "GeForce GT 720M" },
+ { 0x1025, 0x083e, "GeForce 820M" },
+ { 0x1025, 0x0841, "GeForce 710M" },
+ { 0x1025, 0x0853, "GeForce 820M" },
+ { 0x1025, 0x0854, "GeForce 820M" },
+ { 0x1025, 0x0855, "GeForce 820M" },
+ { 0x1025, 0x0856, "GeForce 820M" },
+ { 0x1025, 0x0857, "GeForce 820M" },
+ { 0x1025, 0x0858, "GeForce 820M" },
+ { 0x1025, 0x0863, "GeForce 820M" },
+ { 0x1025, 0x0868, "GeForce 820M" },
+ { 0x1025, 0x0869, "GeForce 810M" },
+ { 0x1025, 0x0873, "GeForce 820M" },
+ { 0x1025, 0x0878, "GeForce 820M" },
+ { 0x1025, 0x087b, "GeForce 820M" },
+ { 0x1025, 0x087f, "GeForce 820M" },
+ { 0x1025, 0x0881, "GeForce 820M" },
+ { 0x1025, 0x0885, "GeForce 820M" },
+ { 0x1025, 0x088a, "GeForce 820M" },
+ { 0x1025, 0x089b, "GeForce 820M" },
+ { 0x1025, 0x0921, "GeForce 820M" },
+ { 0x1025, 0x092e, "GeForce 810M" },
+ { 0x1025, 0x092f, "GeForce 820M" },
+ { 0x1025, 0x0932, "GeForce 820M" },
+ { 0x1025, 0x093a, "GeForce 820M" },
+ { 0x1025, 0x093c, "GeForce 820M" },
+ { 0x1025, 0x093f, "GeForce 820M" },
+ { 0x1025, 0x0941, "GeForce 820M" },
+ { 0x1025, 0x0945, "GeForce 820M" },
+ { 0x1025, 0x0954, "GeForce 820M" },
+ { 0x1025, 0x0965, "GeForce 820M" },
+ { 0x1028, 0x054d, "GeForce GT 630M" },
+ { 0x1028, 0x054e, "GeForce GT 630M" },
+ { 0x1028, 0x0554, "GeForce GT 620M" },
+ { 0x1028, 0x0557, "GeForce GT 620M" },
+ { 0x1028, 0x0562, "GeForce GT625M" },
+ { 0x1028, 0x0565, "GeForce GT 630M" },
+ { 0x1028, 0x0568, "GeForce GT 630M" },
+ { 0x1028, 0x0590, "GeForce GT 630M" },
+ { 0x1028, 0x0592, "GeForce GT625M" },
+ { 0x1028, 0x0594, "GeForce GT625M" },
+ { 0x1028, 0x0595, "GeForce GT625M" },
+ { 0x1028, 0x05a2, "GeForce GT625M" },
+ { 0x1028, 0x05b1, "GeForce GT625M" },
+ { 0x1028, 0x05b3, "GeForce GT625M" },
+ { 0x1028, 0x05da, "GeForce GT 630M" },
+ { 0x1028, 0x05de, "GeForce GT 720M" },
+ { 0x1028, 0x05e0, "GeForce GT 720M" },
+ { 0x1028, 0x05e8, "GeForce GT 630M" },
+ { 0x1028, 0x05f4, "GeForce GT 720M" },
+ { 0x1028, 0x060f, "GeForce GT 720M" },
+ { 0x1028, 0x062f, "GeForce GT 720M" },
+ { 0x1028, 0x064e, "GeForce 820M" },
+ { 0x1028, 0x0652, "GeForce 820M" },
+ { 0x1028, 0x0653, "GeForce 820M" },
+ { 0x1028, 0x0655, "GeForce 820M" },
+ { 0x1028, 0x065e, "GeForce 820M" },
+ { 0x1028, 0x0662, "GeForce 820M" },
+ { 0x1028, 0x068d, "GeForce 820M" },
+ { 0x1028, 0x06ad, "GeForce 820M" },
+ { 0x1028, 0x06ae, "GeForce 820M" },
+ { 0x1028, 0x06af, "GeForce 820M" },
+ { 0x1028, 0x06b0, "GeForce 820M" },
+ { 0x1028, 0x06c0, "GeForce 820M" },
+ { 0x1028, 0x06c1, "GeForce 820M" },
+ { 0x103c, 0x18ef, "GeForce GT 630M" },
+ { 0x103c, 0x18f9, "GeForce GT 630M" },
+ { 0x103c, 0x18fb, "GeForce GT 630M" },
+ { 0x103c, 0x18fd, "GeForce GT 630M" },
+ { 0x103c, 0x18ff, "GeForce GT 630M" },
+ { 0x103c, 0x218a, "GeForce 820M" },
+ { 0x103c, 0x21bb, "GeForce 820M" },
+ { 0x103c, 0x21bc, "GeForce 820M" },
+ { 0x103c, 0x220e, "GeForce 820M" },
+ { 0x103c, 0x2210, "GeForce 820M" },
+ { 0x103c, 0x2212, "GeForce 820M" },
+ { 0x103c, 0x2214, "GeForce 820M" },
+ { 0x103c, 0x2218, "GeForce 820M" },
+ { 0x103c, 0x225b, "GeForce 820M" },
+ { 0x103c, 0x225d, "GeForce 820M" },
+ { 0x103c, 0x226d, "GeForce 820M" },
+ { 0x103c, 0x226f, "GeForce 820M" },
+ { 0x103c, 0x22d2, "GeForce 820M" },
+ { 0x103c, 0x22d9, "GeForce 820M" },
+ { 0x103c, 0x2335, "GeForce 820M" },
+ { 0x103c, 0x2337, "GeForce 820M" },
+ { 0x103c, 0x2aef, "GeForce GT 720A" },
+ { 0x103c, 0x2af9, "GeForce 710A" },
+ { 0x1043, 0x10dd, "NVS 5200M" },
+ { 0x1043, 0x10ed, "NVS 5200M" },
+ { 0x1043, 0x11fd, "GeForce GT 720M" },
+ { 0x1043, 0x124d, "GeForce GT 720M" },
+ { 0x1043, 0x126d, "GeForce GT 720M" },
+ { 0x1043, 0x131d, "GeForce GT 720M" },
+ { 0x1043, 0x13fd, "GeForce GT 720M" },
+ { 0x1043, 0x14c7, "GeForce GT 720M" },
+ { 0x1043, 0x1507, "GeForce GT 620M" },
+ { 0x1043, 0x15ad, "GeForce 820M" },
+ { 0x1043, 0x15ed, "GeForce 820M" },
+ { 0x1043, 0x160d, "GeForce 820M" },
+ { 0x1043, 0x163d, "GeForce 820M" },
+ { 0x1043, 0x165d, "GeForce 820M" },
+ { 0x1043, 0x166d, "GeForce 820M" },
+ { 0x1043, 0x16cd, "GeForce 820M" },
+ { 0x1043, 0x16dd, "GeForce 820M" },
+ { 0x1043, 0x170d, "GeForce 820M" },
+ { 0x1043, 0x176d, "GeForce 820M" },
+ { 0x1043, 0x178d, "GeForce 820M" },
+ { 0x1043, 0x179d, "GeForce 820M" },
+ { 0x1043, 0x2132, "GeForce GT 620M" },
+ { 0x1043, 0x2136, "NVS 5200M" },
+ { 0x1043, 0x21ba, "GeForce GT 720M" },
+ { 0x1043, 0x21fa, "GeForce GT 720M" },
+ { 0x1043, 0x220a, "GeForce GT 720M" },
+ { 0x1043, 0x221a, "GeForce GT 720M" },
+ { 0x1043, 0x223a, "GeForce GT 710M" },
+ { 0x1043, 0x224a, "GeForce GT 710M" },
+ { 0x1043, 0x227a, "GeForce 820M" },
+ { 0x1043, 0x228a, "GeForce 820M" },
+ { 0x1043, 0x22fa, "GeForce 820M" },
+ { 0x1043, 0x232a, "GeForce 820M" },
+ { 0x1043, 0x233a, "GeForce 820M" },
+ { 0x1043, 0x235a, "GeForce 820M" },
+ { 0x1043, 0x236a, "GeForce 820M" },
+ { 0x1043, 0x238a, "GeForce 820M" },
+ { 0x1043, 0x8595, "GeForce GT 720M" },
+ { 0x1043, 0x85ea, "GeForce GT 720M" },
+ { 0x1043, 0x85eb, "GeForce 820M" },
+ { 0x1043, 0x85ec, "GeForce 820M" },
+ { 0x1043, 0x85ee, "GeForce GT 720M" },
+ { 0x1043, 0x85f3, "GeForce 820M" },
+ { 0x1043, 0x860e, "GeForce 820M" },
+ { 0x1043, 0x861a, "GeForce 820M" },
+ { 0x1043, 0x861b, "GeForce 820M" },
+ { 0x1043, 0x8628, "GeForce 820M" },
+ { 0x1043, 0x8643, "GeForce 820M" },
+ { 0x1043, 0x864c, "GeForce 820M" },
+ { 0x1043, 0x8652, "GeForce 820M" },
+ { 0x1043, 0x8660, "GeForce 820M" },
+ { 0x1043, 0x8661, "GeForce 820M" },
+ { 0x105b, 0x0dac, "GeForce GT 720M" },
+ { 0x105b, 0x0dad, "GeForce GT 720M" },
+ { 0x105b, 0x0ef3, "GeForce GT 720M" },
+ { 0x10cf, 0x17f5, "GeForce GT 720M" },
+ { 0x1179, 0xfa01, "GeForce 710M" },
+ { 0x1179, 0xfa02, "GeForce 710M" },
+ { 0x1179, 0xfa03, "GeForce 710M" },
+ { 0x1179, 0xfa05, "GeForce 710M" },
+ { 0x1179, 0xfa11, "GeForce 710M" },
+ { 0x1179, 0xfa13, "GeForce 710M" },
+ { 0x1179, 0xfa18, "GeForce 710M" },
+ { 0x1179, 0xfa19, "GeForce 710M" },
+ { 0x1179, 0xfa21, "GeForce 710M" },
+ { 0x1179, 0xfa23, "GeForce 710M" },
+ { 0x1179, 0xfa2a, "GeForce 710M" },
+ { 0x1179, 0xfa32, "GeForce 710M" },
+ { 0x1179, 0xfa33, "GeForce 710M" },
+ { 0x1179, 0xfa36, "GeForce 710M" },
+ { 0x1179, 0xfa38, "GeForce 710M" },
+ { 0x1179, 0xfa42, "GeForce 710M" },
+ { 0x1179, 0xfa43, "GeForce 710M" },
+ { 0x1179, 0xfa45, "GeForce 710M" },
+ { 0x1179, 0xfa47, "GeForce 710M" },
+ { 0x1179, 0xfa49, "GeForce 710M" },
+ { 0x1179, 0xfa58, "GeForce 710M" },
+ { 0x1179, 0xfa59, "GeForce 710M" },
+ { 0x1179, 0xfa88, "GeForce 710M" },
+ { 0x1179, 0xfa89, "GeForce 710M" },
+ { 0x144d, 0xb092, "GeForce GT 620M" },
+ { 0x144d, 0xc0d5, "GeForce GT 630M" },
+ { 0x144d, 0xc0d7, "GeForce GT 620M" },
+ { 0x144d, 0xc0e2, "NVS 5200M" },
+ { 0x144d, 0xc0e3, "NVS 5200M" },
+ { 0x144d, 0xc0e4, "NVS 5200M" },
+ { 0x144d, 0xc10d, "GeForce 820M" },
+ { 0x144d, 0xc652, "GeForce GT 620M" },
+ { 0x144d, 0xc709, "GeForce 710M" },
+ { 0x144d, 0xc711, "GeForce 710M" },
+ { 0x144d, 0xc736, "GeForce 710M" },
+ { 0x144d, 0xc737, "GeForce 710M" },
+ { 0x144d, 0xc745, "GeForce 820M" },
+ { 0x144d, 0xc750, "GeForce 820M" },
+ { 0x1462, 0x10b8, "GeForce GT 710M" },
+ { 0x1462, 0x10e9, "GeForce GT 720M" },
+ { 0x1462, 0x1116, "GeForce 820M" },
+ { 0x1462, 0xaa33, "GeForce 720M" },
+ { 0x1462, 0xaaa2, "GeForce GT 720M" },
+ { 0x1462, 0xaaa3, "GeForce 820M" },
+ { 0x1462, 0xacb2, "GeForce GT 720M" },
+ { 0x1462, 0xacc1, "GeForce GT 720M" },
+ { 0x1462, 0xae61, "GeForce 720M" },
+ { 0x1462, 0xae65, "GeForce GT 720M" },
+ { 0x1462, 0xae6a, "GeForce 820M" },
+ { 0x1462, 0xae71, "GeForce GT 720M" },
+ { 0x14c0, 0x0083, "GeForce 820M" },
+ { 0x152d, 0x0926, "GeForce 620M" },
+ { 0x152d, 0x0982, "GeForce GT 630M" },
+ { 0x152d, 0x0983, "GeForce GT 630M" },
+ { 0x152d, 0x1005, "GeForce GT820M" },
+ { 0x152d, 0x1012, "GeForce 710M" },
+ { 0x152d, 0x1019, "GeForce 820M" },
+ { 0x152d, 0x1030, "GeForce GT 630M" },
+ { 0x152d, 0x1055, "GeForce 710M" },
+ { 0x152d, 0x1067, "GeForce GT 720M" },
+ { 0x152d, 0x1092, "GeForce 820M" },
+ { 0x17aa, 0x2200, "NVS 5200M" },
+ { 0x17aa, 0x2213, "GeForce GT 720M" },
+ { 0x17aa, 0x2220, "GeForce GT 720M" },
+ { 0x17aa, 0x309c, "GeForce GT 720A" },
+ { 0x17aa, 0x30b4, "GeForce 820A" },
+ { 0x17aa, 0x30b7, "GeForce 720A" },
+ { 0x17aa, 0x30e4, "GeForce 820A" },
+ { 0x17aa, 0x361b, "GeForce 820A" },
+ { 0x17aa, 0x361c, "GeForce 820A" },
+ { 0x17aa, 0x361d, "GeForce 820A" },
+ { 0x17aa, 0x3656, "GeForce GT620M" },
+ { 0x17aa, 0x365a, "GeForce 705M" },
+ { 0x17aa, 0x365e, "GeForce 800M" },
+ { 0x17aa, 0x3661, "GeForce 820A" },
+ { 0x17aa, 0x366c, "GeForce 800M" },
+ { 0x17aa, 0x3685, "GeForce 800M" },
+ { 0x17aa, 0x3686, "GeForce 800M" },
+ { 0x17aa, 0x3687, "GeForce 705A" },
+ { 0x17aa, 0x3696, "GeForce 820A" },
+ { 0x17aa, 0x369b, "GeForce 820A" },
+ { 0x17aa, 0x369c, "GeForce 820A" },
+ { 0x17aa, 0x369d, "GeForce 820A" },
+ { 0x17aa, 0x369e, "GeForce 820A" },
+ { 0x17aa, 0x36a6, "GeForce 820A" },
+ { 0x17aa, 0x36a7, "GeForce 820A" },
+ { 0x17aa, 0x36a9, "GeForce 820A" },
+ { 0x17aa, 0x36af, "GeForce 820A" },
+ { 0x17aa, 0x36b0, "GeForce 820A" },
+ { 0x17aa, 0x36b6, "GeForce 820A" },
+ { 0x17aa, 0x3800, "GeForce GT 720M" },
+ { 0x17aa, 0x3801, "GeForce GT 720M" },
+ { 0x17aa, 0x3802, "GeForce GT 720M" },
+ { 0x17aa, 0x3803, "GeForce GT 720M" },
+ { 0x17aa, 0x3804, "GeForce GT 720M" },
+ { 0x17aa, 0x3806, "GeForce GT 720M" },
+ { 0x17aa, 0x3808, "GeForce GT 720M" },
+ { 0x17aa, 0x380d, "GeForce 820M" },
+ { 0x17aa, 0x380e, "GeForce 820M" },
+ { 0x17aa, 0x380f, "GeForce 820M" },
+ { 0x17aa, 0x3811, "GeForce 820M" },
+ { 0x17aa, 0x3812, "GeForce 820M" },
+ { 0x17aa, 0x3813, "GeForce 820M" },
+ { 0x17aa, 0x3816, "GeForce 820M" },
+ { 0x17aa, 0x3817, "GeForce 820M" },
+ { 0x17aa, 0x3818, "GeForce 820M" },
+ { 0x17aa, 0x381a, "GeForce 820M" },
+ { 0x17aa, 0x381c, "GeForce 820M" },
+ { 0x17aa, 0x381d, "GeForce 820M" },
+ { 0x17aa, 0x3901, "GeForce 610M" },
+ { 0x17aa, 0x3902, "GeForce 710M" },
+ { 0x17aa, 0x3903, "GeForce 710M" },
+ { 0x17aa, 0x3904, "GeForce GT 625M" },
+ { 0x17aa, 0x3905, "GeForce GT 720M" },
+ { 0x17aa, 0x3907, "GeForce 820M" },
+ { 0x17aa, 0x3910, "GeForce GT 720M" },
+ { 0x17aa, 0x3912, "GeForce GT 720M" },
+ { 0x17aa, 0x3913, "GeForce 820M" },
+ { 0x17aa, 0x3915, "GeForce 820M" },
+ { 0x17aa, 0x3983, "GeForce 610M" },
+ { 0x17aa, 0x5001, "GeForce 610M" },
+ { 0x17aa, 0x5003, "GeForce GT 720M" },
+ { 0x17aa, 0x5005, "GeForce 705M" },
+ { 0x17aa, 0x500d, "GeForce GT 620M" },
+ { 0x17aa, 0x5014, "GeForce 710M" },
+ { 0x17aa, 0x5017, "GeForce 710M" },
+ { 0x17aa, 0x5019, "GeForce 710M" },
+ { 0x17aa, 0x501a, "GeForce 710M" },
+ { 0x17aa, 0x501f, "GeForce GT 720M" },
+ { 0x17aa, 0x5025, "GeForce 710M" },
+ { 0x17aa, 0x5027, "GeForce 710M" },
+ { 0x17aa, 0x502a, "GeForce 710M" },
+ { 0x17aa, 0x502b, "GeForce GT 720M" },
+ { 0x17aa, 0x502d, "GeForce 710M" },
+ { 0x17aa, 0x502e, "GeForce GT 720M" },
+ { 0x17aa, 0x502f, "GeForce GT 720M" },
+ { 0x17aa, 0x5030, "GeForce 705M" },
+ { 0x17aa, 0x5031, "GeForce 705M" },
+ { 0x17aa, 0x5032, "GeForce 820M" },
+ { 0x17aa, 0x5033, "GeForce 820M" },
+ { 0x17aa, 0x503e, "GeForce 710M" },
+ { 0x17aa, 0x503f, "GeForce 820M" },
+ { 0x17aa, 0x5040, "GeForce 820M" },
+ { 0x1854, 0x0177, "GeForce 710M" },
+ { 0x1854, 0x0180, "GeForce 710M" },
+ { 0x1854, 0x0190, "GeForce GT 720M" },
+ { 0x1854, 0x0192, "GeForce GT 720M" },
+ { 0x1854, 0x0224, "GeForce 820M" },
+ { 0x1b0a, 0x20dd, "GeForce GT 620M" },
+ { 0x1b0a, 0x20df, "GeForce GT 620M" },
+ { 0x1b0a, 0x210e, "GeForce 820M" },
+ { 0x1b0a, 0x2202, "GeForce GT 720M" },
+ { 0x1b0a, 0x90d7, "GeForce 820M" },
+ { 0x1b0a, 0x90dd, "GeForce 820M" },
+ { 0x1b50, 0x5530, "GeForce 820M" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1185[] = {
+ { 0x10de, 0x106f, "GeForce GTX 760" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1189[] = {
+ { 0x10de, 0x1074, "GeForce GTX 760 Ti OEM" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1199[] = {
+ { 0x1458, 0xd001, "GeForce GTX 760" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_11e3[] = {
+ { 0x17aa, 0x3683, "GeForce GTX 760A" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_11fc[] = {
+ { 0x1179, 0x0001, NULL, { .War00C800_0 = true } }, /* Toshiba Tecra W50 */
+ { 0x17aa, 0x2211, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
+ { 0x17aa, 0x221e, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1247[] = {
+ { 0x1043, 0x212a, "GeForce GT 635M" },
+ { 0x1043, 0x212b, "GeForce GT 635M" },
+ { 0x1043, 0x212c, "GeForce GT 635M" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_124d[] = {
+ { 0x1462, 0x10cc, "GeForce GT 635M" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1290[] = {
+ { 0x103c, 0x2afa, "GeForce 730A" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1292[] = {
+ { 0x17aa, 0x3675, "GeForce GT 740A" },
+ { 0x17aa, 0x367c, "GeForce GT 740A" },
+ { 0x17aa, 0x3684, "GeForce GT 740A" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1295[] = {
+ { 0x103c, 0x2b0d, "GeForce 710A" },
+ { 0x103c, 0x2b0f, "GeForce 710A" },
+ { 0x103c, 0x2b20, "GeForce 810A" },
+ { 0x103c, 0x2b21, "GeForce 810A" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1299[] = {
+ { 0x17aa, 0x369b, "GeForce 920A" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1340[] = {
+ { 0x103c, 0x2b2b, "GeForce 830A" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1341[] = {
+ { 0x17aa, 0x3697, "GeForce 840A" },
+ { 0x17aa, 0x3699, "GeForce 840A" },
+ { 0x17aa, 0x369c, "GeForce 840A" },
+ { 0x17aa, 0x36af, "GeForce 840A" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1346[] = {
+ { 0x17aa, 0x30ba, "GeForce 930A" },
+ { 0x17aa, 0x362c, "GeForce 930A" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1347[] = {
+ { 0x17aa, 0x36b9, "GeForce 940A" },
+ { 0x17aa, 0x36ba, "GeForce 940A" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_137a[] = {
+ { 0x17aa, 0x2225, "Quadro K620M" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_137d[] = {
+ { 0x17aa, 0x3699, "GeForce 940A" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1391[] = {
+ { 0x17aa, 0x3697, "GeForce GTX 850A" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1392[] = {
+ { 0x1028, 0x066a, "GeForce GPU" },
+ { 0x1043, 0x861e, "GeForce GTX 750 Ti" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_139a[] = {
+ { 0x17aa, 0x36b9, "GeForce GTX 950A" },
+ {}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_139b[] = {
+ { 0x1028, 0x06a3, "GeForce GTX 860M" },
+ { 0x19da, 0xc248, "GeForce GTX 750 Ti" },
+ {}
+};
+
+static const struct nvkm_device_pci_device
+nvkm_device_pci_10de[] = {
+ { 0x0020, "RIVA TNT" },
+ { 0x0028, "RIVA TNT2/TNT2 Pro" },
+ { 0x0029, "RIVA TNT2 Ultra" },
+ { 0x002c, "Vanta/Vanta LT" },
+ { 0x002d, "RIVA TNT2 Model 64/Model 64 Pro" },
+ { 0x0040, "GeForce 6800 Ultra" },
+ { 0x0041, "GeForce 6800" },
+ { 0x0042, "GeForce 6800 LE" },
+ { 0x0043, "GeForce 6800 XE" },
+ { 0x0044, "GeForce 6800 XT" },
+ { 0x0045, "GeForce 6800 GT" },
+ { 0x0046, "GeForce 6800 GT" },
+ { 0x0047, "GeForce 6800 GS" },
+ { 0x0048, "GeForce 6800 XT" },
+ { 0x004e, "Quadro FX 4000" },
+ { 0x0090, "GeForce 7800 GTX" },
+ { 0x0091, "GeForce 7800 GTX" },
+ { 0x0092, "GeForce 7800 GT" },
+ { 0x0093, "GeForce 7800 GS" },
+ { 0x0095, "GeForce 7800 SLI" },
+ { 0x0098, "GeForce Go 7800" },
+ { 0x0099, "GeForce Go 7800 GTX" },
+ { 0x009d, "Quadro FX 4500" },
+ { 0x00a0, "Aladdin TNT2" },
+ { 0x00c0, "GeForce 6800 GS" },
+ { 0x00c1, "GeForce 6800" },
+ { 0x00c2, "GeForce 6800 LE" },
+ { 0x00c3, "GeForce 6800 XT" },
+ { 0x00c8, "GeForce Go 6800" },
+ { 0x00c9, "GeForce Go 6800 Ultra" },
+ { 0x00cc, "Quadro FX Go1400" },
+ { 0x00cd, "Quadro FX 3450/4000 SDI" },
+ { 0x00ce, "Quadro FX 1400" },
+ { 0x00f1, "GeForce 6600 GT" },
+ { 0x00f2, "GeForce 6600" },
+ { 0x00f3, "GeForce 6200" },
+ { 0x00f4, "GeForce 6600 LE" },
+ { 0x00f5, "GeForce 7800 GS" },
+ { 0x00f6, "GeForce 6800 GS" },
+ { 0x00f8, "Quadro FX 3400/Quadro FX 4000" },
+ { 0x00f9, "GeForce 6800 Ultra" },
+ { 0x00fa, "GeForce PCX 5750" },
+ { 0x00fb, "GeForce PCX 5900" },
+ { 0x00fc, "Quadro FX 330/GeForce PCX 5300" },
+ { 0x00fd, "Quadro FX 330/Quadro NVS 280 PCI-E" },
+ { 0x00fe, "Quadro FX 1300" },
+ { 0x0100, "GeForce 256" },
+ { 0x0101, "GeForce DDR" },
+ { 0x0103, "Quadro" },
+ { 0x0110, "GeForce2 MX/MX 400" },
+ { 0x0111, "GeForce2 MX 100/200" },
+ { 0x0112, "GeForce2 Go" },
+ { 0x0113, "Quadro2 MXR/EX/Go" },
+ { 0x0140, "GeForce 6600 GT" },
+ { 0x0141, "GeForce 6600" },
+ { 0x0142, "GeForce 6600 LE" },
+ { 0x0143, "GeForce 6600 VE" },
+ { 0x0144, "GeForce Go 6600" },
+ { 0x0145, "GeForce 6610 XL" },
+ { 0x0146, "GeForce Go 6600 TE/6200 TE" },
+ { 0x0147, "GeForce 6700 XL" },
+ { 0x0148, "GeForce Go 6600" },
+ { 0x0149, "GeForce Go 6600 GT" },
+ { 0x014a, "Quadro NVS 440" },
+ { 0x014c, "Quadro FX 540M" },
+ { 0x014d, "Quadro FX 550" },
+ { 0x014e, "Quadro FX 540" },
+ { 0x014f, "GeForce 6200" },
+ { 0x0150, "GeForce2 GTS/GeForce2 Pro" },
+ { 0x0151, "GeForce2 Ti" },
+ { 0x0152, "GeForce2 Ultra" },
+ { 0x0153, "Quadro2 Pro" },
+ { 0x0160, "GeForce 6500" },
+ { 0x0161, "GeForce 6200 TurboCache(TM)" },
+ { 0x0162, "GeForce 6200SE TurboCache(TM)" },
+ { 0x0163, "GeForce 6200 LE" },
+ { 0x0164, "GeForce Go 6200" },
+ { 0x0165, "Quadro NVS 285" },
+ { 0x0166, "GeForce Go 6400" },
+ { 0x0167, "GeForce Go 6200" },
+ { 0x0168, "GeForce Go 6400" },
+ { 0x0169, "GeForce 6250" },
+ { 0x016a, "GeForce 7100 GS" },
+ { 0x0170, "GeForce4 MX 460" },
+ { 0x0171, "GeForce4 MX 440" },
+ { 0x0172, "GeForce4 MX 420" },
+ { 0x0173, "GeForce4 MX 440-SE" },
+ { 0x0174, "GeForce4 440 Go" },
+ { 0x0175, "GeForce4 420 Go" },
+ { 0x0176, "GeForce4 420 Go 32M" },
+ { 0x0177, "GeForce4 460 Go" },
+ { 0x0178, "Quadro4 550 XGL" },
+ { 0x0179, "GeForce4 440 Go 64M" },
+ { 0x017a, "Quadro NVS 400" },
+ { 0x017c, "Quadro4 500 GoGL" },
+ { 0x017d, "GeForce4 410 Go 16M" },
+ { 0x0181, "GeForce4 MX 440 with AGP8X" },
+ { 0x0182, "GeForce4 MX 440SE with AGP8X" },
+ { 0x0183, "GeForce4 MX 420 with AGP8X" },
+ { 0x0185, "GeForce4 MX 4000" },
+ { 0x0188, "Quadro4 580 XGL" },
+ { 0x0189, "GeForce4 MX with AGP8X (Mac)", nvkm_device_pci_10de_0189 },
+ { 0x018a, "Quadro NVS 280 SD" },
+ { 0x018b, "Quadro4 380 XGL" },
+ { 0x018c, "Quadro NVS 50 PCI" },
+ { 0x0191, "GeForce 8800 GTX" },
+ { 0x0193, "GeForce 8800 GTS" },
+ { 0x0194, "GeForce 8800 Ultra" },
+ { 0x0197, "Tesla C870" },
+ { 0x019d, "Quadro FX 5600" },
+ { 0x019e, "Quadro FX 4600" },
+ { 0x01a0, "GeForce2 Integrated GPU" },
+ { 0x01d0, "GeForce 7350 LE" },
+ { 0x01d1, "GeForce 7300 LE" },
+ { 0x01d2, "GeForce 7550 LE" },
+ { 0x01d3, "GeForce 7300 SE/7200 GS" },
+ { 0x01d6, "GeForce Go 7200" },
+ { 0x01d7, "GeForce Go 7300" },
+ { 0x01d8, "GeForce Go 7400" },
+ { 0x01da, "Quadro NVS 110M" },
+ { 0x01db, "Quadro NVS 120M" },
+ { 0x01dc, "Quadro FX 350M" },
+ { 0x01dd, "GeForce 7500 LE" },
+ { 0x01de, "Quadro FX 350" },
+ { 0x01df, "GeForce 7300 GS" },
+ { 0x01f0, "GeForce4 MX Integrated GPU", nvkm_device_pci_10de_01f0 },
+ { 0x0200, "GeForce3" },
+ { 0x0201, "GeForce3 Ti 200" },
+ { 0x0202, "GeForce3 Ti 500" },
+ { 0x0203, "Quadro DCC" },
+ { 0x0211, "GeForce 6800" },
+ { 0x0212, "GeForce 6800 LE" },
+ { 0x0215, "GeForce 6800 GT" },
+ { 0x0218, "GeForce 6800 XT" },
+ { 0x0221, "GeForce 6200" },
+ { 0x0222, "GeForce 6200 A-LE" },
+ { 0x0240, "GeForce 6150" },
+ { 0x0241, "GeForce 6150 LE" },
+ { 0x0242, "GeForce 6100" },
+ { 0x0244, "GeForce Go 6150" },
+ { 0x0245, "Quadro NVS 210S / GeForce 6150LE" },
+ { 0x0247, "GeForce Go 6100" },
+ { 0x0250, "GeForce4 Ti 4600" },
+ { 0x0251, "GeForce4 Ti 4400" },
+ { 0x0253, "GeForce4 Ti 4200" },
+ { 0x0258, "Quadro4 900 XGL" },
+ { 0x0259, "Quadro4 750 XGL" },
+ { 0x025b, "Quadro4 700 XGL" },
+ { 0x0280, "GeForce4 Ti 4800" },
+ { 0x0281, "GeForce4 Ti 4200 with AGP8X" },
+ { 0x0282, "GeForce4 Ti 4800 SE" },
+ { 0x0286, "GeForce4 4200 Go" },
+ { 0x0288, "Quadro4 980 XGL" },
+ { 0x0289, "Quadro4 780 XGL" },
+ { 0x028c, "Quadro4 700 GoGL" },
+ { 0x0290, "GeForce 7900 GTX" },
+ { 0x0291, "GeForce 7900 GT/GTO" },
+ { 0x0292, "GeForce 7900 GS" },
+ { 0x0293, "GeForce 7950 GX2" },
+ { 0x0294, "GeForce 7950 GX2" },
+ { 0x0295, "GeForce 7950 GT" },
+ { 0x0297, "GeForce Go 7950 GTX" },
+ { 0x0298, "GeForce Go 7900 GS" },
+ { 0x0299, "Quadro NVS 510M" },
+ { 0x029a, "Quadro FX 2500M" },
+ { 0x029b, "Quadro FX 1500M" },
+ { 0x029c, "Quadro FX 5500" },
+ { 0x029d, "Quadro FX 3500" },
+ { 0x029e, "Quadro FX 1500" },
+ { 0x029f, "Quadro FX 4500 X2" },
+ { 0x02e0, "GeForce 7600 GT" },
+ { 0x02e1, "GeForce 7600 GS" },
+ { 0x02e2, "GeForce 7300 GT" },
+ { 0x02e3, "GeForce 7900 GS" },
+ { 0x02e4, "GeForce 7950 GT" },
+ { 0x0301, "GeForce FX 5800 Ultra" },
+ { 0x0302, "GeForce FX 5800" },
+ { 0x0308, "Quadro FX 2000" },
+ { 0x0309, "Quadro FX 1000" },
+ { 0x0311, "GeForce FX 5600 Ultra" },
+ { 0x0312, "GeForce FX 5600" },
+ { 0x0314, "GeForce FX 5600XT" },
+ { 0x031a, "GeForce FX Go5600" },
+ { 0x031b, "GeForce FX Go5650" },
+ { 0x031c, "Quadro FX Go700" },
+ { 0x0320, "GeForce FX 5200" },
+ { 0x0321, "GeForce FX 5200 Ultra" },
+ { 0x0322, "GeForce FX 5200", nvkm_device_pci_10de_0322 },
+ { 0x0323, "GeForce FX 5200LE" },
+ { 0x0324, "GeForce FX Go5200" },
+ { 0x0325, "GeForce FX Go5250" },
+ { 0x0326, "GeForce FX 5500" },
+ { 0x0327, "GeForce FX 5100" },
+ { 0x0328, "GeForce FX Go5200 32M/64M" },
+ { 0x032a, "Quadro NVS 55/280 PCI" },
+ { 0x032b, "Quadro FX 500/FX 600" },
+ { 0x032c, "GeForce FX Go53xx" },
+ { 0x032d, "GeForce FX Go5100" },
+ { 0x0330, "GeForce FX 5900 Ultra" },
+ { 0x0331, "GeForce FX 5900" },
+ { 0x0332, "GeForce FX 5900XT" },
+ { 0x0333, "GeForce FX 5950 Ultra" },
+ { 0x0334, "GeForce FX 5900ZT" },
+ { 0x0338, "Quadro FX 3000" },
+ { 0x033f, "Quadro FX 700" },
+ { 0x0341, "GeForce FX 5700 Ultra" },
+ { 0x0342, "GeForce FX 5700" },
+ { 0x0343, "GeForce FX 5700LE" },
+ { 0x0344, "GeForce FX 5700VE" },
+ { 0x0347, "GeForce FX Go5700" },
+ { 0x0348, "GeForce FX Go5700" },
+ { 0x034c, "Quadro FX Go1000" },
+ { 0x034e, "Quadro FX 1100" },
+ { 0x038b, "GeForce 7650 GS" },
+ { 0x0390, "GeForce 7650 GS" },
+ { 0x0391, "GeForce 7600 GT" },
+ { 0x0392, "GeForce 7600 GS" },
+ { 0x0393, "GeForce 7300 GT" },
+ { 0x0394, "GeForce 7600 LE" },
+ { 0x0395, "GeForce 7300 GT" },
+ { 0x0397, "GeForce Go 7700" },
+ { 0x0398, "GeForce Go 7600" },
+ { 0x0399, "GeForce Go 7600 GT" },
+ { 0x039c, "Quadro FX 560M" },
+ { 0x039e, "Quadro FX 560" },
+ { 0x03d0, "GeForce 6150SE nForce 430" },
+ { 0x03d1, "GeForce 6100 nForce 405" },
+ { 0x03d2, "GeForce 6100 nForce 400" },
+ { 0x03d5, "GeForce 6100 nForce 420" },
+ { 0x03d6, "GeForce 7025 / nForce 630a" },
+ { 0x0400, "GeForce 8600 GTS" },
+ { 0x0401, "GeForce 8600 GT" },
+ { 0x0402, "GeForce 8600 GT" },
+ { 0x0403, "GeForce 8600 GS" },
+ { 0x0404, "GeForce 8400 GS" },
+ { 0x0405, "GeForce 9500M GS" },
+ { 0x0406, "GeForce 8300 GS" },
+ { 0x0407, "GeForce 8600M GT" },
+ { 0x0408, "GeForce 9650M GS" },
+ { 0x0409, "GeForce 8700M GT" },
+ { 0x040a, "Quadro FX 370" },
+ { 0x040b, "Quadro NVS 320M" },
+ { 0x040c, "Quadro FX 570M" },
+ { 0x040d, "Quadro FX 1600M" },
+ { 0x040e, "Quadro FX 570" },
+ { 0x040f, "Quadro FX 1700" },
+ { 0x0410, "GeForce GT 330" },
+ { 0x0420, "GeForce 8400 SE" },
+ { 0x0421, "GeForce 8500 GT" },
+ { 0x0422, "GeForce 8400 GS" },
+ { 0x0423, "GeForce 8300 GS" },
+ { 0x0424, "GeForce 8400 GS" },
+ { 0x0425, "GeForce 8600M GS" },
+ { 0x0426, "GeForce 8400M GT" },
+ { 0x0427, "GeForce 8400M GS" },
+ { 0x0428, "GeForce 8400M G" },
+ { 0x0429, "Quadro NVS 140M" },
+ { 0x042a, "Quadro NVS 130M" },
+ { 0x042b, "Quadro NVS 135M" },
+ { 0x042c, "GeForce 9400 GT" },
+ { 0x042d, "Quadro FX 360M" },
+ { 0x042e, "GeForce 9300M G" },
+ { 0x042f, "Quadro NVS 290" },
+ { 0x0531, "GeForce 7150M / nForce 630M" },
+ { 0x0533, "GeForce 7000M / nForce 610M" },
+ { 0x053a, "GeForce 7050 PV / nForce 630a" },
+ { 0x053b, "GeForce 7050 PV / nForce 630a" },
+ { 0x053e, "GeForce 7025 / nForce 630a" },
+ { 0x05e0, "GeForce GTX 295" },
+ { 0x05e1, "GeForce GTX 280" },
+ { 0x05e2, "GeForce GTX 260" },
+ { 0x05e3, "GeForce GTX 285" },
+ { 0x05e6, "GeForce GTX 275" },
+ { 0x05e7, "Tesla C1060", nvkm_device_pci_10de_05e7 },
+ { 0x05ea, "GeForce GTX 260" },
+ { 0x05eb, "GeForce GTX 295" },
+ { 0x05ed, "Quadroplex 2200 D2" },
+ { 0x05f8, "Quadroplex 2200 S4" },
+ { 0x05f9, "Quadro CX" },
+ { 0x05fd, "Quadro FX 5800" },
+ { 0x05fe, "Quadro FX 4800" },
+ { 0x05ff, "Quadro FX 3800" },
+ { 0x0600, "GeForce 8800 GTS 512" },
+ { 0x0601, "GeForce 9800 GT" },
+ { 0x0602, "GeForce 8800 GT" },
+ { 0x0603, "GeForce GT 230" },
+ { 0x0604, "GeForce 9800 GX2" },
+ { 0x0605, "GeForce 9800 GT" },
+ { 0x0606, "GeForce 8800 GS" },
+ { 0x0607, "GeForce GTS 240" },
+ { 0x0608, "GeForce 9800M GTX" },
+ { 0x0609, "GeForce 8800M GTS", nvkm_device_pci_10de_0609 },
+ { 0x060a, "GeForce GTX 280M" },
+ { 0x060b, "GeForce 9800M GT" },
+ { 0x060c, "GeForce 8800M GTX" },
+ { 0x060d, "GeForce 8800 GS" },
+ { 0x060f, "GeForce GTX 285M" },
+ { 0x0610, "GeForce 9600 GSO" },
+ { 0x0611, "GeForce 8800 GT" },
+ { 0x0612, "GeForce 9800 GTX/9800 GTX+" },
+ { 0x0613, "GeForce 9800 GTX+" },
+ { 0x0614, "GeForce 9800 GT" },
+ { 0x0615, "GeForce GTS 250" },
+ { 0x0617, "GeForce 9800M GTX" },
+ { 0x0618, "GeForce GTX 260M" },
+ { 0x0619, "Quadro FX 4700 X2" },
+ { 0x061a, "Quadro FX 3700" },
+ { 0x061b, "Quadro VX 200" },
+ { 0x061c, "Quadro FX 3600M" },
+ { 0x061d, "Quadro FX 2800M" },
+ { 0x061e, "Quadro FX 3700M" },
+ { 0x061f, "Quadro FX 3800M" },
+ { 0x0621, "GeForce GT 230" },
+ { 0x0622, "GeForce 9600 GT" },
+ { 0x0623, "GeForce 9600 GS" },
+ { 0x0625, "GeForce 9600 GSO 512" },
+ { 0x0626, "GeForce GT 130" },
+ { 0x0627, "GeForce GT 140" },
+ { 0x0628, "GeForce 9800M GTS" },
+ { 0x062a, "GeForce 9700M GTS" },
+ { 0x062b, "GeForce 9800M GS" },
+ { 0x062c, "GeForce 9800M GTS" },
+ { 0x062d, "GeForce 9600 GT" },
+ { 0x062e, "GeForce 9600 GT", nvkm_device_pci_10de_062e },
+ { 0x0630, "GeForce 9700 S" },
+ { 0x0631, "GeForce GTS 160M" },
+ { 0x0632, "GeForce GTS 150M" },
+ { 0x0635, "GeForce 9600 GSO" },
+ { 0x0637, "GeForce 9600 GT" },
+ { 0x0638, "Quadro FX 1800" },
+ { 0x063a, "Quadro FX 2700M" },
+ { 0x0640, "GeForce 9500 GT" },
+ { 0x0641, "GeForce 9400 GT" },
+ { 0x0643, "GeForce 9500 GT" },
+ { 0x0644, "GeForce 9500 GS" },
+ { 0x0645, "GeForce 9500 GS" },
+ { 0x0646, "GeForce GT 120" },
+ { 0x0647, "GeForce 9600M GT" },
+ { 0x0648, "GeForce 9600M GS" },
+ { 0x0649, "GeForce 9600M GT", nvkm_device_pci_10de_0649 },
+ { 0x064a, "GeForce 9700M GT" },
+ { 0x064b, "GeForce 9500M G" },
+ { 0x064c, "GeForce 9650M GT" },
+ { 0x0651, "GeForce G 110M" },
+ { 0x0652, "GeForce GT 130M", nvkm_device_pci_10de_0652 },
+ { 0x0653, "GeForce GT 120M" },
+ { 0x0654, "GeForce GT 220M", nvkm_device_pci_10de_0654 },
+ { 0x0655, NULL, nvkm_device_pci_10de_0655 },
+ { 0x0656, NULL, nvkm_device_pci_10de_0656 },
+ { 0x0658, "Quadro FX 380" },
+ { 0x0659, "Quadro FX 580" },
+ { 0x065a, "Quadro FX 1700M" },
+ { 0x065b, "GeForce 9400 GT" },
+ { 0x065c, "Quadro FX 770M" },
+ { 0x06c0, "GeForce GTX 480" },
+ { 0x06c4, "GeForce GTX 465" },
+ { 0x06ca, "GeForce GTX 480M" },
+ { 0x06cd, "GeForce GTX 470" },
+ { 0x06d1, "Tesla C2050 / C2070", nvkm_device_pci_10de_06d1 },
+ { 0x06d2, "Tesla M2070", nvkm_device_pci_10de_06d2 },
+ { 0x06d8, "Quadro 6000" },
+ { 0x06d9, "Quadro 5000" },
+ { 0x06da, "Quadro 5000M" },
+ { 0x06dc, "Quadro 6000" },
+ { 0x06dd, "Quadro 4000" },
+ { 0x06de, "Tesla T20 Processor", nvkm_device_pci_10de_06de },
+ { 0x06df, "Tesla M2070-Q" },
+ { 0x06e0, "GeForce 9300 GE" },
+ { 0x06e1, "GeForce 9300 GS" },
+ { 0x06e2, "GeForce 8400" },
+ { 0x06e3, "GeForce 8400 SE" },
+ { 0x06e4, "GeForce 8400 GS" },
+ { 0x06e5, "GeForce 9300M GS" },
+ { 0x06e6, "GeForce G100" },
+ { 0x06e7, "GeForce 9300 SE" },
+ { 0x06e8, "GeForce 9200M GS", nvkm_device_pci_10de_06e8 },
+ { 0x06e9, "GeForce 9300M GS" },
+ { 0x06ea, "Quadro NVS 150M" },
+ { 0x06eb, "Quadro NVS 160M" },
+ { 0x06ec, "GeForce G 105M" },
+ { 0x06ef, "GeForce G 103M" },
+ { 0x06f1, "GeForce G105M" },
+ { 0x06f8, "Quadro NVS 420" },
+ { 0x06f9, "Quadro FX 370 LP", nvkm_device_pci_10de_06f9 },
+ { 0x06fa, "Quadro NVS 450" },
+ { 0x06fb, "Quadro FX 370M" },
+ { 0x06fd, "Quadro NVS 295" },
+ { 0x06ff, "HICx16 + Graphics", nvkm_device_pci_10de_06ff },
+ { 0x07e0, "GeForce 7150 / nForce 630i" },
+ { 0x07e1, "GeForce 7100 / nForce 630i" },
+ { 0x07e2, "GeForce 7050 / nForce 630i" },
+ { 0x07e3, "GeForce 7050 / nForce 610i" },
+ { 0x07e5, "GeForce 7050 / nForce 620i" },
+ { 0x0840, "GeForce 8200M" },
+ { 0x0844, "GeForce 9100M G" },
+ { 0x0845, "GeForce 8200M G" },
+ { 0x0846, "GeForce 9200" },
+ { 0x0847, "GeForce 9100" },
+ { 0x0848, "GeForce 8300" },
+ { 0x0849, "GeForce 8200" },
+ { 0x084a, "nForce 730a" },
+ { 0x084b, "GeForce 9200" },
+ { 0x084c, "nForce 980a/780a SLI" },
+ { 0x084d, "nForce 750a SLI" },
+ { 0x084f, "GeForce 8100 / nForce 720a" },
+ { 0x0860, "GeForce 9400" },
+ { 0x0861, "GeForce 9400" },
+ { 0x0862, "GeForce 9400M G" },
+ { 0x0863, "GeForce 9400M" },
+ { 0x0864, "GeForce 9300" },
+ { 0x0865, "ION" },
+ { 0x0866, "GeForce 9400M G", nvkm_device_pci_10de_0866 },
+ { 0x0867, "GeForce 9400" },
+ { 0x0868, "nForce 760i SLI" },
+ { 0x0869, "GeForce 9400" },
+ { 0x086a, "GeForce 9400" },
+ { 0x086c, "GeForce 9300 / nForce 730i" },
+ { 0x086d, "GeForce 9200" },
+ { 0x086e, "GeForce 9100M G" },
+ { 0x086f, "GeForce 8200M G" },
+ { 0x0870, "GeForce 9400M" },
+ { 0x0871, "GeForce 9200" },
+ { 0x0872, "GeForce G102M", nvkm_device_pci_10de_0872 },
+ { 0x0873, "GeForce G102M", nvkm_device_pci_10de_0873 },
+ { 0x0874, "ION" },
+ { 0x0876, "ION" },
+ { 0x087a, "GeForce 9400" },
+ { 0x087d, "ION" },
+ { 0x087e, "ION LE" },
+ { 0x087f, "ION LE" },
+ { 0x08a0, "GeForce 320M" },
+ { 0x08a2, "GeForce 320M" },
+ { 0x08a3, "GeForce 320M" },
+ { 0x08a4, "GeForce 320M" },
+ { 0x08a5, "GeForce 320M" },
+ { 0x0a20, "GeForce GT 220" },
+ { 0x0a22, "GeForce 315" },
+ { 0x0a23, "GeForce 210" },
+ { 0x0a26, "GeForce 405" },
+ { 0x0a27, "GeForce 405" },
+ { 0x0a28, "GeForce GT 230M" },
+ { 0x0a29, "GeForce GT 330M" },
+ { 0x0a2a, "GeForce GT 230M" },
+ { 0x0a2b, "GeForce GT 330M" },
+ { 0x0a2c, "NVS 5100M" },
+ { 0x0a2d, "GeForce GT 320M" },
+ { 0x0a32, "GeForce GT 415" },
+ { 0x0a34, "GeForce GT 240M" },
+ { 0x0a35, "GeForce GT 325M" },
+ { 0x0a38, "Quadro 400" },
+ { 0x0a3c, "Quadro FX 880M" },
+ { 0x0a60, "GeForce G210" },
+ { 0x0a62, "GeForce 205" },
+ { 0x0a63, "GeForce 310" },
+ { 0x0a64, "Second Generation ION" },
+ { 0x0a65, "GeForce 210" },
+ { 0x0a66, "GeForce 310" },
+ { 0x0a67, "GeForce 315" },
+ { 0x0a68, "GeForce G105M" },
+ { 0x0a69, "GeForce G105M" },
+ { 0x0a6a, "NVS 2100M" },
+ { 0x0a6c, "NVS 3100M" },
+ { 0x0a6e, "GeForce 305M", nvkm_device_pci_10de_0a6e },
+ { 0x0a6f, "Second Generation ION" },
+ { 0x0a70, "GeForce 310M", nvkm_device_pci_10de_0a70 },
+ { 0x0a71, "GeForce 305M" },
+ { 0x0a72, "GeForce 310M" },
+ { 0x0a73, "GeForce 305M", nvkm_device_pci_10de_0a73 },
+ { 0x0a74, "GeForce G210M", nvkm_device_pci_10de_0a74 },
+ { 0x0a75, "GeForce 310M", nvkm_device_pci_10de_0a75 },
+ { 0x0a76, "Second Generation ION" },
+ { 0x0a78, "Quadro FX 380 LP" },
+ { 0x0a7a, "GeForce 315M", nvkm_device_pci_10de_0a7a },
+ { 0x0a7c, "Quadro FX 380M" },
+ { 0x0ca0, "GeForce GT 330" },
+ { 0x0ca2, "GeForce GT 320" },
+ { 0x0ca3, "GeForce GT 240" },
+ { 0x0ca4, "GeForce GT 340" },
+ { 0x0ca5, "GeForce GT 220" },
+ { 0x0ca7, "GeForce GT 330" },
+ { 0x0ca8, "GeForce GTS 260M" },
+ { 0x0ca9, "GeForce GTS 250M" },
+ { 0x0cac, "GeForce GT 220" },
+ { 0x0caf, "GeForce GT 335M" },
+ { 0x0cb0, "GeForce GTS 350M" },
+ { 0x0cb1, "GeForce GTS 360M" },
+ { 0x0cbc, "Quadro FX 1800M" },
+ { 0x0dc0, "GeForce GT 440" },
+ { 0x0dc4, "GeForce GTS 450" },
+ { 0x0dc5, "GeForce GTS 450" },
+ { 0x0dc6, "GeForce GTS 450" },
+ { 0x0dcd, "GeForce GT 555M" },
+ { 0x0dce, "GeForce GT 555M" },
+ { 0x0dd1, "GeForce GTX 460M" },
+ { 0x0dd2, "GeForce GT 445M" },
+ { 0x0dd3, "GeForce GT 435M" },
+ { 0x0dd6, "GeForce GT 550M" },
+ { 0x0dd8, "Quadro 2000", nvkm_device_pci_10de_0dd8 },
+ { 0x0dda, "Quadro 2000M" },
+ { 0x0de0, "GeForce GT 440" },
+ { 0x0de1, "GeForce GT 430" },
+ { 0x0de2, "GeForce GT 420" },
+ { 0x0de3, "GeForce GT 635M" },
+ { 0x0de4, "GeForce GT 520" },
+ { 0x0de5, "GeForce GT 530" },
+ { 0x0de7, "GeForce GT 610" },
+ { 0x0de8, "GeForce GT 620M" },
+ { 0x0de9, "GeForce GT 630M", nvkm_device_pci_10de_0de9 },
+ { 0x0dea, "GeForce 610M", nvkm_device_pci_10de_0dea },
+ { 0x0deb, "GeForce GT 555M" },
+ { 0x0dec, "GeForce GT 525M" },
+ { 0x0ded, "GeForce GT 520M" },
+ { 0x0dee, "GeForce GT 415M" },
+ { 0x0def, "NVS 5400M" },
+ { 0x0df0, "GeForce GT 425M" },
+ { 0x0df1, "GeForce GT 420M" },
+ { 0x0df2, "GeForce GT 435M" },
+ { 0x0df3, "GeForce GT 420M" },
+ { 0x0df4, "GeForce GT 540M", nvkm_device_pci_10de_0df4 },
+ { 0x0df5, "GeForce GT 525M" },
+ { 0x0df6, "GeForce GT 550M" },
+ { 0x0df7, "GeForce GT 520M" },
+ { 0x0df8, "Quadro 600" },
+ { 0x0df9, "Quadro 500M" },
+ { 0x0dfa, "Quadro 1000M" },
+ { 0x0dfc, "NVS 5200M" },
+ { 0x0e22, "GeForce GTX 460" },
+ { 0x0e23, "GeForce GTX 460 SE" },
+ { 0x0e24, "GeForce GTX 460" },
+ { 0x0e30, "GeForce GTX 470M" },
+ { 0x0e31, "GeForce GTX 485M" },
+ { 0x0e3a, "Quadro 3000M" },
+ { 0x0e3b, "Quadro 4000M" },
+ { 0x0f00, "GeForce GT 630" },
+ { 0x0f01, "GeForce GT 620" },
+ { 0x0f02, "GeForce GT 730" },
+ { 0x0fc0, "GeForce GT 640" },
+ { 0x0fc1, "GeForce GT 640" },
+ { 0x0fc2, "GeForce GT 630" },
+ { 0x0fc6, "GeForce GTX 650" },
+ { 0x0fc8, "GeForce GT 740" },
+ { 0x0fc9, "GeForce GT 730" },
+ { 0x0fcd, "GeForce GT 755M" },
+ { 0x0fce, "GeForce GT 640M LE" },
+ { 0x0fd1, "GeForce GT 650M" },
+ { 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 },
+ { 0x0fd3, "GeForce GT 640M LE" },
+ { 0x0fd4, "GeForce GTX 660M" },
+ { 0x0fd5, "GeForce GT 650M" },
+ { 0x0fd8, "GeForce GT 640M" },
+ { 0x0fd9, "GeForce GT 645M" },
+ { 0x0fdf, "GeForce GT 740M" },
+ { 0x0fe0, "GeForce GTX 660M" },
+ { 0x0fe1, "GeForce GT 730M" },
+ { 0x0fe2, "GeForce GT 745M" },
+ { 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 },
+ { 0x0fe4, "GeForce GT 750M" },
+ { 0x0fe9, "GeForce GT 750M" },
+ { 0x0fea, "GeForce GT 755M" },
+ { 0x0fec, "GeForce 710A" },
+ { 0x0fef, "GRID K340" },
+ { 0x0ff2, "GRID K1" },
+ { 0x0ff3, "Quadro K420" },
+ { 0x0ff6, "Quadro K1100M" },
+ { 0x0ff8, "Quadro K500M" },
+ { 0x0ff9, "Quadro K2000D" },
+ { 0x0ffa, "Quadro K600" },
+ { 0x0ffb, "Quadro K2000M" },
+ { 0x0ffc, "Quadro K1000M" },
+ { 0x0ffd, "NVS 510" },
+ { 0x0ffe, "Quadro K2000" },
+ { 0x0fff, "Quadro 410" },
+ { 0x1001, "GeForce GTX TITAN Z" },
+ { 0x1004, "GeForce GTX 780" },
+ { 0x1005, "GeForce GTX TITAN" },
+ { 0x1007, "GeForce GTX 780" },
+ { 0x1008, "GeForce GTX 780 Ti" },
+ { 0x100a, "GeForce GTX 780 Ti" },
+ { 0x100c, "GeForce GTX TITAN Black" },
+ { 0x1021, "Tesla K20Xm" },
+ { 0x1022, "Tesla K20c" },
+ { 0x1023, "Tesla K40m" },
+ { 0x1024, "Tesla K40c" },
+ { 0x1026, "Tesla K20s" },
+ { 0x1027, "Tesla K40st" },
+ { 0x1028, "Tesla K20m" },
+ { 0x1029, "Tesla K40s" },
+ { 0x102a, "Tesla K40t" },
+ { 0x102d, "Tesla K80" },
+ { 0x103a, "Quadro K6000" },
+ { 0x103c, "Quadro K5200" },
+ { 0x1040, "GeForce GT 520" },
+ { 0x1042, "GeForce 510" },
+ { 0x1048, "GeForce 605" },
+ { 0x1049, "GeForce GT 620" },
+ { 0x104a, "GeForce GT 610" },
+ { 0x104b, "GeForce GT 625 (OEM)", nvkm_device_pci_10de_104b },
+ { 0x104c, "GeForce GT 705" },
+ { 0x1050, "GeForce GT 520M" },
+ { 0x1051, "GeForce GT 520MX" },
+ { 0x1052, "GeForce GT 520M" },
+ { 0x1054, "GeForce 410M" },
+ { 0x1055, "GeForce 410M" },
+ { 0x1056, "NVS 4200M" },
+ { 0x1057, "NVS 4200M" },
+ { 0x1058, "GeForce 610M", nvkm_device_pci_10de_1058 },
+ { 0x1059, "GeForce 610M" },
+ { 0x105a, "GeForce 610M" },
+ { 0x105b, "GeForce 705M", nvkm_device_pci_10de_105b },
+ { 0x107c, "NVS 315" },
+ { 0x107d, "NVS 310" },
+ { 0x1080, "GeForce GTX 580" },
+ { 0x1081, "GeForce GTX 570" },
+ { 0x1082, "GeForce GTX 560 Ti" },
+ { 0x1084, "GeForce GTX 560" },
+ { 0x1086, "GeForce GTX 570" },
+ { 0x1087, "GeForce GTX 560 Ti" },
+ { 0x1088, "GeForce GTX 590" },
+ { 0x1089, "GeForce GTX 580" },
+ { 0x108b, "GeForce GTX 580" },
+ { 0x1091, "Tesla M2090", nvkm_device_pci_10de_1091 },
+ { 0x1094, "Tesla M2075" },
+ { 0x1096, "Tesla C2075", nvkm_device_pci_10de_1096 },
+ { 0x109a, "Quadro 5010M" },
+ { 0x109b, "Quadro 7000" },
+ { 0x10c0, "GeForce 9300 GS" },
+ { 0x10c3, "GeForce 8400GS" },
+ { 0x10c5, "GeForce 405" },
+ { 0x10d8, "NVS 300" },
+ { 0x1140, NULL, nvkm_device_pci_10de_1140 },
+ { 0x1180, "GeForce GTX 680" },
+ { 0x1183, "GeForce GTX 660 Ti" },
+ { 0x1184, "GeForce GTX 770" },
+ { 0x1185, "GeForce GTX 660", nvkm_device_pci_10de_1185 },
+ { 0x1187, "GeForce GTX 760" },
+ { 0x1188, "GeForce GTX 690" },
+ { 0x1189, "GeForce GTX 670", nvkm_device_pci_10de_1189 },
+ { 0x118a, "GRID K520" },
+ { 0x118e, "GeForce GTX 760 (192-bit)" },
+ { 0x118f, "Tesla K10" },
+ { 0x1193, "GeForce GTX 760 Ti OEM" },
+ { 0x1194, "Tesla K8" },
+ { 0x1195, "GeForce GTX 660" },
+ { 0x1198, "GeForce GTX 880M" },
+ { 0x1199, "GeForce GTX 870M", nvkm_device_pci_10de_1199 },
+ { 0x119a, "GeForce GTX 860M" },
+ { 0x119d, "GeForce GTX 775M" },
+ { 0x119e, "GeForce GTX 780M" },
+ { 0x119f, "GeForce GTX 780M" },
+ { 0x11a0, "GeForce GTX 680M" },
+ { 0x11a1, "GeForce GTX 670MX" },
+ { 0x11a2, "GeForce GTX 675MX" },
+ { 0x11a3, "GeForce GTX 680MX" },
+ { 0x11a7, "GeForce GTX 675MX" },
+ { 0x11b4, "Quadro K4200" },
+ { 0x11b6, "Quadro K3100M" },
+ { 0x11b7, "Quadro K4100M" },
+ { 0x11b8, "Quadro K5100M" },
+ { 0x11ba, "Quadro K5000" },
+ { 0x11bc, "Quadro K5000M" },
+ { 0x11bd, "Quadro K4000M" },
+ { 0x11be, "Quadro K3000M" },
+ { 0x11bf, "GRID K2" },
+ { 0x11c0, "GeForce GTX 660" },
+ { 0x11c2, "GeForce GTX 650 Ti BOOST" },
+ { 0x11c3, "GeForce GTX 650 Ti" },
+ { 0x11c4, "GeForce GTX 645" },
+ { 0x11c5, "GeForce GT 740" },
+ { 0x11c6, "GeForce GTX 650 Ti" },
+ { 0x11c8, "GeForce GTX 650" },
+ { 0x11cb, "GeForce GT 740" },
+ { 0x11e0, "GeForce GTX 770M" },
+ { 0x11e1, "GeForce GTX 765M" },
+ { 0x11e2, "GeForce GTX 765M" },
+ { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 },
+ { 0x11fa, "Quadro K4000" },
+ { 0x11fc, "Quadro K2100M", nvkm_device_pci_10de_11fc },
+ { 0x1200, "GeForce GTX 560 Ti" },
+ { 0x1201, "GeForce GTX 560" },
+ { 0x1203, "GeForce GTX 460 SE v2" },
+ { 0x1205, "GeForce GTX 460 v2" },
+ { 0x1206, "GeForce GTX 555" },
+ { 0x1207, "GeForce GT 645" },
+ { 0x1208, "GeForce GTX 560 SE" },
+ { 0x1210, "GeForce GTX 570M" },
+ { 0x1211, "GeForce GTX 580M" },
+ { 0x1212, "GeForce GTX 675M" },
+ { 0x1213, "GeForce GTX 670M" },
+ { 0x1241, "GeForce GT 545" },
+ { 0x1243, "GeForce GT 545" },
+ { 0x1244, "GeForce GTX 550 Ti" },
+ { 0x1245, "GeForce GTS 450" },
+ { 0x1246, "GeForce GT 550M" },
+ { 0x1247, "GeForce GT 555M", nvkm_device_pci_10de_1247 },
+ { 0x1248, "GeForce GT 555M" },
+ { 0x1249, "GeForce GTS 450" },
+ { 0x124b, "GeForce GT 640" },
+ { 0x124d, "GeForce GT 555M", nvkm_device_pci_10de_124d },
+ { 0x1251, "GeForce GTX 560M" },
+ { 0x1280, "GeForce GT 635" },
+ { 0x1281, "GeForce GT 710" },
+ { 0x1282, "GeForce GT 640" },
+ { 0x1284, "GeForce GT 630" },
+ { 0x1286, "GeForce GT 720" },
+ { 0x1287, "GeForce GT 730" },
+ { 0x1288, "GeForce GT 720" },
+ { 0x1289, "GeForce GT 710" },
+ { 0x1290, "GeForce GT 730M", nvkm_device_pci_10de_1290 },
+ { 0x1291, "GeForce GT 735M" },
+ { 0x1292, "GeForce GT 740M", nvkm_device_pci_10de_1292 },
+ { 0x1293, "GeForce GT 730M" },
+ { 0x1295, "GeForce 710M", nvkm_device_pci_10de_1295 },
+ { 0x1296, "GeForce 825M" },
+ { 0x1298, "GeForce GT 720M" },
+ { 0x1299, "GeForce 920M", nvkm_device_pci_10de_1299 },
+ { 0x129a, "GeForce 910M" },
+ { 0x12b9, "Quadro K610M" },
+ { 0x12ba, "Quadro K510M" },
+ { 0x1340, "GeForce 830M", nvkm_device_pci_10de_1340 },
+ { 0x1341, "GeForce 840M", nvkm_device_pci_10de_1341 },
+ { 0x1344, "GeForce 845M" },
+ { 0x1346, "GeForce 930M", nvkm_device_pci_10de_1346 },
+ { 0x1347, "GeForce 940M", nvkm_device_pci_10de_1347 },
+ { 0x137a, NULL, nvkm_device_pci_10de_137a },
+ { 0x137d, NULL, nvkm_device_pci_10de_137d },
+ { 0x1380, "GeForce GTX 750 Ti" },
+ { 0x1381, "GeForce GTX 750" },
+ { 0x1382, "GeForce GTX 745" },
+ { 0x1390, "GeForce 845M" },
+ { 0x1391, "GeForce GTX 850M", nvkm_device_pci_10de_1391 },
+ { 0x1392, "GeForce GTX 860M", nvkm_device_pci_10de_1392 },
+ { 0x1393, "GeForce 840M" },
+ { 0x1398, "GeForce 845M" },
+ { 0x139a, "GeForce GTX 950M", nvkm_device_pci_10de_139a },
+ { 0x139b, "GeForce GTX 960M", nvkm_device_pci_10de_139b },
+ { 0x139c, "GeForce 940M" },
+ { 0x13b3, "Quadro K2200M" },
+ { 0x13ba, "Quadro K2200" },
+ { 0x13bb, "Quadro K620" },
+ { 0x13bc, "Quadro K1200" },
+ { 0x13c0, "GeForce GTX 980" },
+ { 0x13c2, "GeForce GTX 970" },
+ { 0x13d7, "GeForce GTX 980M" },
+ { 0x13d8, "GeForce GTX 970M" },
+ { 0x13d9, "GeForce GTX 965M" },
+ { 0x1401, "GeForce GTX 960" },
+ { 0x1617, "GeForce GTX 980M" },
+ { 0x1618, "GeForce GTX 970M" },
+ { 0x1619, "GeForce GTX 965M" },
+ { 0x17c2, "GeForce GTX TITAN X" },
+ { 0x17c8, "GeForce GTX 980 Ti" },
+ { 0x17f0, "Quadro M6000" },
+ {}
+};
+
+static struct nvkm_device_pci *
+nvkm_device_pci(struct nvkm_device *device)
+{
+ return container_of(device, struct nvkm_device_pci, device);
+}
+
+static resource_size_t
+nvkm_device_pci_resource_addr(struct nvkm_device *device, unsigned bar)
+{
+ struct nvkm_device_pci *pdev = nvkm_device_pci(device);
+ return pci_resource_start(pdev->pdev, bar);
+}
+
+static resource_size_t
+nvkm_device_pci_resource_size(struct nvkm_device *device, unsigned bar)
+{
+ struct nvkm_device_pci *pdev = nvkm_device_pci(device);
+ return pci_resource_len(pdev->pdev, bar);
+}
+
+static void
+nvkm_device_pci_fini(struct nvkm_device *device, bool suspend)
+{
+ struct nvkm_device_pci *pdev = nvkm_device_pci(device);
+ if (suspend) {
+ pci_disable_device(pdev->pdev);
+ pdev->suspend = true;
+ }
+}
+
+static int
+nvkm_device_pci_preinit(struct nvkm_device *device)
+{
+ struct nvkm_device_pci *pdev = nvkm_device_pci(device);
+ if (pdev->suspend) {
+ int ret = pci_enable_device(pdev->pdev);
+ if (ret)
+ return ret;
+ pci_set_master(pdev->pdev);
+ pdev->suspend = false;
+ }
+ return 0;
+}
+
+static void *
+nvkm_device_pci_dtor(struct nvkm_device *device)
+{
+ struct nvkm_device_pci *pdev = nvkm_device_pci(device);
+ pci_disable_device(pdev->pdev);
+ return pdev;
+}
+
+static const struct nvkm_device_func
+nvkm_device_pci_func = {
+ .pci = nvkm_device_pci,
+ .dtor = nvkm_device_pci_dtor,
+ .preinit = nvkm_device_pci_preinit,
+ .fini = nvkm_device_pci_fini,
+ .resource_addr = nvkm_device_pci_resource_addr,
+ .resource_size = nvkm_device_pci_resource_size,
+ .cpu_coherent = !IS_ENABLED(CONFIG_ARM),
+};
+
+int
+nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
+ bool detect, bool mmio, u64 subdev_mask,
+ struct nvkm_device **pdevice)
+{
+ const struct nvkm_device_quirk *quirk = NULL;
+ const struct nvkm_device_pci_device *pcid;
+ const struct nvkm_device_pci_vendor *pciv;
+ const char *name = NULL;
+ struct nvkm_device_pci *pdev;
+ int ret;
+
+ ret = pci_enable_device(pci_dev);
+ if (ret)
+ return ret;
+
+ switch (pci_dev->vendor) {
+ case 0x10de: pcid = nvkm_device_pci_10de; break;
+ default:
+ pcid = NULL;
+ break;
+ }
+
+ while (pcid && pcid->device) {
+ if (pciv = pcid->vendor, pcid->device == pci_dev->device) {
+ while (pciv && pciv->vendor) {
+ if (pciv->vendor == pci_dev->subsystem_vendor &&
+ pciv->device == pci_dev->subsystem_device) {
+ quirk = &pciv->quirk;
+ name = pciv->name;
+ break;
+ }
+ pciv++;
+ }
+ if (!name)
+ name = pcid->name;
+ break;
+ }
+ pcid++;
+ }
+
+ if (!(pdev = kzalloc(sizeof(*pdev), GFP_KERNEL))) {
+ pci_disable_device(pci_dev);
+ return -ENOMEM;
+ }
+ *pdevice = &pdev->device;
+ pdev->pdev = pci_dev;
+
+ return nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev,
+ pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE :
+ pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ?
+ NVKM_DEVICE_AGP : NVKM_DEVICE_PCI,
+ (u64)pci_domain_nr(pci_dev->bus) << 32 |
+ pci_dev->bus->number << 16 |
+ PCI_SLOT(pci_dev->devfn) << 8 |
+ PCI_FUNC(pci_dev->devfn), name,
+ cfg, dbg, detect, mmio, subdev_mask,
+ &pdev->device);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index 8d3590e7bd87..ed3ad2c30e17 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -2,15 +2,49 @@
#define __NVKM_DEVICE_PRIV_H__
#include <core/device.h>
-extern struct nvkm_oclass nvkm_control_oclass[];
+#include <subdev/bar.h>
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/clk.h>
+#include <subdev/devinit.h>
+#include <subdev/fb.h>
+#include <subdev/fuse.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/ibus.h>
+#include <subdev/instmem.h>
+#include <subdev/ltc.h>
+#include <subdev/mc.h>
+#include <subdev/mmu.h>
+#include <subdev/mxm.h>
+#include <subdev/pci.h>
+#include <subdev/pmu.h>
+#include <subdev/therm.h>
+#include <subdev/timer.h>
+#include <subdev/volt.h>
-int nv04_identify(struct nvkm_device *);
-int nv10_identify(struct nvkm_device *);
-int nv20_identify(struct nvkm_device *);
-int nv30_identify(struct nvkm_device *);
-int nv40_identify(struct nvkm_device *);
-int nv50_identify(struct nvkm_device *);
-int gf100_identify(struct nvkm_device *);
-int gk104_identify(struct nvkm_device *);
-int gm100_identify(struct nvkm_device *);
+#include <engine/bsp.h>
+#include <engine/ce.h>
+#include <engine/cipher.h>
+#include <engine/disp.h>
+#include <engine/dma.h>
+#include <engine/fifo.h>
+#include <engine/gr.h>
+#include <engine/mpeg.h>
+#include <engine/mspdec.h>
+#include <engine/msppp.h>
+#include <engine/msvld.h>
+#include <engine/pm.h>
+#include <engine/sec.h>
+#include <engine/sw.h>
+#include <engine/vp.h>
+
+int nvkm_device_ctor(const struct nvkm_device_func *,
+ const struct nvkm_device_quirk *,
+ struct device *, enum nvkm_device_type, u64 handle,
+ const char *name, const char *cfg, const char *dbg,
+ bool detect, bool mmio, u64 subdev_mask,
+ struct nvkm_device *);
+int nvkm_device_init(struct nvkm_device *);
+int nvkm_device_fini(struct nvkm_device *, bool suspend);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
new file mode 100644
index 000000000000..da57c8a60608
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <core/tegra.h>
+#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
+#include "priv.h"
+
+static int
+nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
+{
+ int ret;
+
+ ret = regulator_enable(tdev->vdd);
+ if (ret)
+ goto err_power;
+
+ ret = clk_prepare_enable(tdev->clk);
+ if (ret)
+ goto err_clk;
+ ret = clk_prepare_enable(tdev->clk_pwr);
+ if (ret)
+ goto err_clk_pwr;
+ clk_set_rate(tdev->clk_pwr, 204000000);
+ udelay(10);
+
+ reset_control_assert(tdev->rst);
+ udelay(10);
+
+ ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
+ if (ret)
+ goto err_clamp;
+ udelay(10);
+
+ reset_control_deassert(tdev->rst);
+ udelay(10);
+
+ return 0;
+
+err_clamp:
+ clk_disable_unprepare(tdev->clk_pwr);
+err_clk_pwr:
+ clk_disable_unprepare(tdev->clk);
+err_clk:
+ regulator_disable(tdev->vdd);
+err_power:
+ return ret;
+}
+
+static int
+nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
+{
+ reset_control_assert(tdev->rst);
+ udelay(10);
+
+ clk_disable_unprepare(tdev->clk_pwr);
+ clk_disable_unprepare(tdev->clk);
+ udelay(10);
+
+ return regulator_disable(tdev->vdd);
+}
+
+static void
+nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
+{
+#if IS_ENABLED(CONFIG_IOMMU_API)
+ struct device *dev = &tdev->pdev->dev;
+ unsigned long pgsize_bitmap;
+ int ret;
+
+ mutex_init(&tdev->iommu.mutex);
+
+ if (iommu_present(&platform_bus_type)) {
+ tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
+ if (IS_ERR(tdev->iommu.domain))
+ goto error;
+
+ /*
+ * A IOMMU is only usable if it supports page sizes smaller
+ * or equal to the system's PAGE_SIZE, with a preference if
+ * both are equal.
+ */
+ pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
+ if (pgsize_bitmap & PAGE_SIZE) {
+ tdev->iommu.pgshift = PAGE_SHIFT;
+ } else {
+ tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
+ if (tdev->iommu.pgshift == 0) {
+ dev_warn(dev, "unsupported IOMMU page size\n");
+ goto free_domain;
+ }
+ tdev->iommu.pgshift -= 1;
+ }
+
+ ret = iommu_attach_device(tdev->iommu.domain, dev);
+ if (ret)
+ goto free_domain;
+
+ ret = nvkm_mm_init(&tdev->iommu.mm, 0,
+ (1ULL << 40) >> tdev->iommu.pgshift, 1);
+ if (ret)
+ goto detach_device;
+ }
+
+ return;
+
+detach_device:
+ iommu_detach_device(tdev->iommu.domain, dev);
+
+free_domain:
+ iommu_domain_free(tdev->iommu.domain);
+
+error:
+ tdev->iommu.domain = NULL;
+ tdev->iommu.pgshift = 0;
+ dev_err(dev, "cannot initialize IOMMU MM\n");
+#endif
+}
+
+static void
+nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev)
+{
+#if IS_ENABLED(CONFIG_IOMMU_API)
+ if (tdev->iommu.domain) {
+ nvkm_mm_fini(&tdev->iommu.mm);
+ iommu_detach_device(tdev->iommu.domain, tdev->device.dev);
+ iommu_domain_free(tdev->iommu.domain);
+ }
+#endif
+}
+
+static struct nvkm_device_tegra *
+nvkm_device_tegra(struct nvkm_device *device)
+{
+ return container_of(device, struct nvkm_device_tegra, device);
+}
+
+static struct resource *
+nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
+{
+ struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
+ return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
+}
+
+static resource_size_t
+nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
+{
+ struct resource *res = nvkm_device_tegra_resource(device, bar);
+ return res ? res->start : 0;
+}
+
+static resource_size_t
+nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
+{
+ struct resource *res = nvkm_device_tegra_resource(device, bar);
+ return res ? resource_size(res) : 0;
+}
+
+static irqreturn_t
+nvkm_device_tegra_intr(int irq, void *arg)
+{
+ struct nvkm_device_tegra *tdev = arg;
+ struct nvkm_mc *mc = tdev->device.mc;
+ bool handled = false;
+ if (likely(mc)) {
+ nvkm_mc_intr_unarm(mc);
+ nvkm_mc_intr(mc, &handled);
+ nvkm_mc_intr_rearm(mc);
+ }
+ return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static void
+nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
+{
+ struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
+ if (tdev->irq) {
+ free_irq(tdev->irq, tdev);
+ tdev->irq = 0;
+ };
+}
+
+static int
+nvkm_device_tegra_init(struct nvkm_device *device)
+{
+ struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
+ int irq, ret;
+
+ irq = platform_get_irq_byname(tdev->pdev, "stall");
+ if (irq < 0)
+ return irq;
+
+ ret = request_irq(irq, nvkm_device_tegra_intr,
+ IRQF_SHARED, "nvkm", tdev);
+ if (ret)
+ return ret;
+
+ tdev->irq = irq;
+ return 0;
+}
+
+static void *
+nvkm_device_tegra_dtor(struct nvkm_device *device)
+{
+ struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
+ nvkm_device_tegra_power_down(tdev);
+ nvkm_device_tegra_remove_iommu(tdev);
+ return tdev;
+}
+
+static const struct nvkm_device_func
+nvkm_device_tegra_func = {
+ .tegra = nvkm_device_tegra,
+ .dtor = nvkm_device_tegra_dtor,
+ .init = nvkm_device_tegra_init,
+ .fini = nvkm_device_tegra_fini,
+ .resource_addr = nvkm_device_tegra_resource_addr,
+ .resource_size = nvkm_device_tegra_resource_size,
+ .cpu_coherent = false,
+};
+
+int
+nvkm_device_tegra_new(struct platform_device *pdev,
+ const char *cfg, const char *dbg,
+ bool detect, bool mmio, u64 subdev_mask,
+ struct nvkm_device **pdevice)
+{
+ struct nvkm_device_tegra *tdev;
+ int ret;
+
+ if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
+ return -ENOMEM;
+ *pdevice = &tdev->device;
+ tdev->pdev = pdev;
+ tdev->irq = -1;
+
+ tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(tdev->vdd))
+ return PTR_ERR(tdev->vdd);
+
+ tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
+ if (IS_ERR(tdev->rst))
+ return PTR_ERR(tdev->rst);
+
+ tdev->clk = devm_clk_get(&pdev->dev, "gpu");
+ if (IS_ERR(tdev->clk))
+ return PTR_ERR(tdev->clk);
+
+ tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
+ if (IS_ERR(tdev->clk_pwr))
+ return PTR_ERR(tdev->clk_pwr);
+
+ nvkm_device_tegra_probe_iommu(tdev);
+
+ ret = nvkm_device_tegra_power_up(tdev);
+ if (ret)
+ return ret;
+
+ tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
+ ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
+ NVKM_DEVICE_TEGRA, pdev->id, NULL,
+ cfg, dbg, detect, mmio, subdev_mask,
+ &tdev->device);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+#else
+int
+nvkm_device_tegra_new(struct platform_device *pdev,
+ const char *cfg, const char *dbg,
+ bool detect, bool mmio, u64 subdev_mask,
+ struct nvkm_device **pdevice)
+{
+ return -ENOSYS;
+}
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
new file mode 100644
index 000000000000..1ae48f27029d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -0,0 +1,371 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define nvkm_udevice(p) container_of((p), struct nvkm_udevice, object)
+#include "priv.h"
+#include "ctrl.h"
+
+#include <core/client.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+struct nvkm_udevice {
+ struct nvkm_object object;
+ struct nvkm_device *device;
+};
+
+static int
+nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
+{
+ struct nvkm_object *object = &udev->object;
+ struct nvkm_device *device = udev->device;
+ struct nvkm_fb *fb = device->fb;
+ struct nvkm_instmem *imem = device->imem;
+ union {
+ struct nv_device_info_v0 v0;
+ } *args = data;
+ int ret;
+
+ nvif_ioctl(object, "device info size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(object, "device info vers %d\n", args->v0.version);
+ } else
+ return ret;
+
+ switch (device->chipset) {
+ case 0x01a:
+ case 0x01f:
+ case 0x04c:
+ case 0x04e:
+ case 0x063:
+ case 0x067:
+ case 0x068:
+ case 0x0aa:
+ case 0x0ac:
+ case 0x0af:
+ args->v0.platform = NV_DEVICE_INFO_V0_IGP;
+ break;
+ default:
+ switch (device->type) {
+ case NVKM_DEVICE_PCI:
+ args->v0.platform = NV_DEVICE_INFO_V0_PCI;
+ break;
+ case NVKM_DEVICE_AGP:
+ args->v0.platform = NV_DEVICE_INFO_V0_AGP;
+ break;
+ case NVKM_DEVICE_PCIE:
+ args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
+ break;
+ case NVKM_DEVICE_TEGRA:
+ args->v0.platform = NV_DEVICE_INFO_V0_SOC;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ break;
+ }
+
+ switch (device->card_type) {
+ case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break;
+ case NV_10:
+ case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break;
+ case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break;
+ case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break;
+ case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break;
+ case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break;
+ case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
+ case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
+ case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
+ default:
+ args->v0.family = 0;
+ break;
+ }
+
+ args->v0.chipset = device->chipset;
+ args->v0.revision = device->chiprev;
+ if (fb && fb->ram)
+ args->v0.ram_size = args->v0.ram_user = fb->ram->size;
+ else
+ args->v0.ram_size = args->v0.ram_user = 0;
+ if (imem && args->v0.ram_size > 0)
+ args->v0.ram_user = args->v0.ram_user - imem->reserved;
+
+ strncpy(args->v0.chip, device->chip->name, sizeof(args->v0.chip));
+ strncpy(args->v0.name, device->name, sizeof(args->v0.name));
+ return 0;
+}
+
+static int
+nvkm_udevice_time(struct nvkm_udevice *udev, void *data, u32 size)
+{
+ struct nvkm_device *device = udev->device;
+ union {
+ struct nv_device_time_v0 v0;
+ } *args = data;
+ int ret;
+
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ args->v0.time = nvkm_timer_read(device->timer);
+ }
+
+ return ret;
+}
+
+static int
+nvkm_udevice_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+ struct nvkm_udevice *udev = nvkm_udevice(object);
+ switch (mthd) {
+ case NV_DEVICE_V0_INFO:
+ return nvkm_udevice_info(udev, data, size);
+ case NV_DEVICE_V0_TIME:
+ return nvkm_udevice_time(udev, data, size);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int
+nvkm_udevice_rd08(struct nvkm_object *object, u64 addr, u8 *data)
+{
+ struct nvkm_udevice *udev = nvkm_udevice(object);
+ *data = nvkm_rd08(udev->device, addr);
+ return 0;
+}
+
+static int
+nvkm_udevice_rd16(struct nvkm_object *object, u64 addr, u16 *data)
+{
+ struct nvkm_udevice *udev = nvkm_udevice(object);
+ *data = nvkm_rd16(udev->device, addr);
+ return 0;
+}
+
+static int
+nvkm_udevice_rd32(struct nvkm_object *object, u64 addr, u32 *data)
+{
+ struct nvkm_udevice *udev = nvkm_udevice(object);
+ *data = nvkm_rd32(udev->device, addr);
+ return 0;
+}
+
+static int
+nvkm_udevice_wr08(struct nvkm_object *object, u64 addr, u8 data)
+{
+ struct nvkm_udevice *udev = nvkm_udevice(object);
+ nvkm_wr08(udev->device, addr, data);
+ return 0;
+}
+
+static int
+nvkm_udevice_wr16(struct nvkm_object *object, u64 addr, u16 data)
+{
+ struct nvkm_udevice *udev = nvkm_udevice(object);
+ nvkm_wr16(udev->device, addr, data);
+ return 0;
+}
+
+static int
+nvkm_udevice_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+ struct nvkm_udevice *udev = nvkm_udevice(object);
+ nvkm_wr32(udev->device, addr, data);
+ return 0;
+}
+
+static int
+nvkm_udevice_map(struct nvkm_object *object, u64 *addr, u32 *size)
+{
+ struct nvkm_udevice *udev = nvkm_udevice(object);
+ struct nvkm_device *device = udev->device;
+ *addr = device->func->resource_addr(device, 0);
+ *size = device->func->resource_size(device, 0);
+ return 0;
+}
+
+static int
+nvkm_udevice_fini(struct nvkm_object *object, bool suspend)
+{
+ struct nvkm_udevice *udev = nvkm_udevice(object);
+ struct nvkm_device *device = udev->device;
+ int ret = 0;
+
+ mutex_lock(&device->mutex);
+ if (!--device->refcount) {
+ ret = nvkm_device_fini(device, suspend);
+ if (ret && suspend) {
+ device->refcount++;
+ goto done;
+ }
+ }
+
+done:
+ mutex_unlock(&device->mutex);
+ return ret;
+}
+
+static int
+nvkm_udevice_init(struct nvkm_object *object)
+{
+ struct nvkm_udevice *udev = nvkm_udevice(object);
+ struct nvkm_device *device = udev->device;
+ int ret = 0;
+
+ mutex_lock(&device->mutex);
+ if (!device->refcount++) {
+ ret = nvkm_device_init(device);
+ if (ret) {
+ device->refcount--;
+ goto done;
+ }
+ }
+
+done:
+ mutex_unlock(&device->mutex);
+ return ret;
+}
+
+static int
+nvkm_udevice_child_new(const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_udevice *udev = nvkm_udevice(oclass->parent);
+ const struct nvkm_device_oclass *sclass = oclass->priv;
+ return sclass->ctor(udev->device, oclass, data, size, pobject);
+}
+
+static int
+nvkm_udevice_child_get(struct nvkm_object *object, int index,
+ struct nvkm_oclass *oclass)
+{
+ struct nvkm_udevice *udev = nvkm_udevice(object);
+ struct nvkm_device *device = udev->device;
+ struct nvkm_engine *engine;
+ u64 mask = (1ULL << NVKM_ENGINE_DMAOBJ) |
+ (1ULL << NVKM_ENGINE_FIFO) |
+ (1ULL << NVKM_ENGINE_DISP) |
+ (1ULL << NVKM_ENGINE_PM);
+ const struct nvkm_device_oclass *sclass = NULL;
+ int i;
+
+ for (; i = __ffs64(mask), mask && !sclass; mask &= ~(1ULL << i)) {
+ if (!(engine = nvkm_device_engine(device, i)) ||
+ !(engine->func->base.sclass))
+ continue;
+ oclass->engine = engine;
+
+ index -= engine->func->base.sclass(oclass, index, &sclass);
+ }
+
+ if (!sclass) {
+ switch (index) {
+ case 0: sclass = &nvkm_control_oclass; break;
+ default:
+ return -EINVAL;
+ }
+ oclass->base = sclass->base;
+ }
+
+ oclass->ctor = nvkm_udevice_child_new;
+ oclass->priv = sclass;
+ return 0;
+}
+
+static const struct nvkm_object_func
+nvkm_udevice_super = {
+ .init = nvkm_udevice_init,
+ .fini = nvkm_udevice_fini,
+ .mthd = nvkm_udevice_mthd,
+ .map = nvkm_udevice_map,
+ .rd08 = nvkm_udevice_rd08,
+ .rd16 = nvkm_udevice_rd16,
+ .rd32 = nvkm_udevice_rd32,
+ .wr08 = nvkm_udevice_wr08,
+ .wr16 = nvkm_udevice_wr16,
+ .wr32 = nvkm_udevice_wr32,
+ .sclass = nvkm_udevice_child_get,
+};
+
+static const struct nvkm_object_func
+nvkm_udevice = {
+ .init = nvkm_udevice_init,
+ .fini = nvkm_udevice_fini,
+ .mthd = nvkm_udevice_mthd,
+ .sclass = nvkm_udevice_child_get,
+};
+
+int
+nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ union {
+ struct nv_device_v0 v0;
+ } *args = data;
+ struct nvkm_client *client = oclass->client;
+ struct nvkm_object *parent = &client->object;
+ const struct nvkm_object_func *func;
+ struct nvkm_udevice *udev;
+ int ret;
+
+ nvif_ioctl(parent, "create device size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(parent, "create device v%d device %016llx\n",
+ args->v0.version, args->v0.device);
+ } else
+ return ret;
+
+ /* give priviledged clients register access */
+ if (client->super)
+ func = &nvkm_udevice_super;
+ else
+ func = &nvkm_udevice;
+
+ if (!(udev = kzalloc(sizeof(*udev), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(func, oclass, &udev->object);
+ *pobject = &udev->object;
+
+ /* find the device that matches what the client requested */
+ if (args->v0.device != ~0)
+ udev->device = nvkm_device_find(args->v0.device);
+ else
+ udev->device = nvkm_device_find(client->device);
+ if (!udev->device)
+ return -ENODEV;
+
+ return 0;
+}
+
+const struct nvkm_sclass
+nvkm_udevice_sclass = {
+ .oclass = NV_DEVICE,
+ .minver = 0,
+ .maxver = 0,
+ .ctor = nvkm_udevice_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 16a4e2a37008..04f60452011e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -1,29 +1,93 @@
nvkm-y += nvkm/engine/disp/base.o
-nvkm-y += nvkm/engine/disp/conn.o
-nvkm-y += nvkm/engine/disp/outp.o
-nvkm-y += nvkm/engine/disp/outpdp.o
nvkm-y += nvkm/engine/disp/nv04.o
nvkm-y += nvkm/engine/disp/nv50.o
nvkm-y += nvkm/engine/disp/g84.o
nvkm-y += nvkm/engine/disp/g94.o
nvkm-y += nvkm/engine/disp/gt200.o
nvkm-y += nvkm/engine/disp/gt215.o
-nvkm-y += nvkm/engine/disp/gf110.o
+nvkm-y += nvkm/engine/disp/gf119.o
nvkm-y += nvkm/engine/disp/gk104.o
nvkm-y += nvkm/engine/disp/gk110.o
nvkm-y += nvkm/engine/disp/gm107.o
nvkm-y += nvkm/engine/disp/gm204.o
+
+nvkm-y += nvkm/engine/disp/outp.o
+nvkm-y += nvkm/engine/disp/outpdp.o
nvkm-y += nvkm/engine/disp/dacnv50.o
+nvkm-y += nvkm/engine/disp/piornv50.o
+nvkm-y += nvkm/engine/disp/sornv50.o
+nvkm-y += nvkm/engine/disp/sorg94.o
+nvkm-y += nvkm/engine/disp/sorgf119.o
+nvkm-y += nvkm/engine/disp/sorgm204.o
nvkm-y += nvkm/engine/disp/dport.o
+
+nvkm-y += nvkm/engine/disp/conn.o
+
nvkm-y += nvkm/engine/disp/hdagt215.o
-nvkm-y += nvkm/engine/disp/hdagf110.o
+nvkm-y += nvkm/engine/disp/hdagf119.o
+
nvkm-y += nvkm/engine/disp/hdmig84.o
nvkm-y += nvkm/engine/disp/hdmigt215.o
-nvkm-y += nvkm/engine/disp/hdmigf110.o
+nvkm-y += nvkm/engine/disp/hdmigf119.o
nvkm-y += nvkm/engine/disp/hdmigk104.o
-nvkm-y += nvkm/engine/disp/piornv50.o
-nvkm-y += nvkm/engine/disp/sornv50.o
-nvkm-y += nvkm/engine/disp/sorg94.o
-nvkm-y += nvkm/engine/disp/sorgf110.o
-nvkm-y += nvkm/engine/disp/sorgm204.o
+
nvkm-y += nvkm/engine/disp/vga.o
+
+nvkm-y += nvkm/engine/disp/rootnv04.o
+nvkm-y += nvkm/engine/disp/rootnv50.o
+nvkm-y += nvkm/engine/disp/rootg84.o
+nvkm-y += nvkm/engine/disp/rootg94.o
+nvkm-y += nvkm/engine/disp/rootgt200.o
+nvkm-y += nvkm/engine/disp/rootgt215.o
+nvkm-y += nvkm/engine/disp/rootgf119.o
+nvkm-y += nvkm/engine/disp/rootgk104.o
+nvkm-y += nvkm/engine/disp/rootgk110.o
+nvkm-y += nvkm/engine/disp/rootgm107.o
+nvkm-y += nvkm/engine/disp/rootgm204.o
+
+nvkm-y += nvkm/engine/disp/channv50.o
+nvkm-y += nvkm/engine/disp/changf119.o
+
+nvkm-y += nvkm/engine/disp/dmacnv50.o
+nvkm-y += nvkm/engine/disp/dmacgf119.o
+
+nvkm-y += nvkm/engine/disp/basenv50.o
+nvkm-y += nvkm/engine/disp/baseg84.o
+nvkm-y += nvkm/engine/disp/basegt200.o
+nvkm-y += nvkm/engine/disp/basegt215.o
+nvkm-y += nvkm/engine/disp/basegf119.o
+nvkm-y += nvkm/engine/disp/basegk104.o
+nvkm-y += nvkm/engine/disp/basegk110.o
+
+nvkm-y += nvkm/engine/disp/corenv50.o
+nvkm-y += nvkm/engine/disp/coreg84.o
+nvkm-y += nvkm/engine/disp/coreg94.o
+nvkm-y += nvkm/engine/disp/coregt200.o
+nvkm-y += nvkm/engine/disp/coregt215.o
+nvkm-y += nvkm/engine/disp/coregf119.o
+nvkm-y += nvkm/engine/disp/coregk104.o
+nvkm-y += nvkm/engine/disp/coregk110.o
+nvkm-y += nvkm/engine/disp/coregm107.o
+nvkm-y += nvkm/engine/disp/coregm204.o
+
+nvkm-y += nvkm/engine/disp/ovlynv50.o
+nvkm-y += nvkm/engine/disp/ovlyg84.o
+nvkm-y += nvkm/engine/disp/ovlygt200.o
+nvkm-y += nvkm/engine/disp/ovlygt215.o
+nvkm-y += nvkm/engine/disp/ovlygf119.o
+nvkm-y += nvkm/engine/disp/ovlygk104.o
+
+nvkm-y += nvkm/engine/disp/piocnv50.o
+nvkm-y += nvkm/engine/disp/piocgf119.o
+
+nvkm-y += nvkm/engine/disp/cursnv50.o
+nvkm-y += nvkm/engine/disp/cursg84.o
+nvkm-y += nvkm/engine/disp/cursgt215.o
+nvkm-y += nvkm/engine/disp/cursgf119.o
+nvkm-y += nvkm/engine/disp/cursgk104.o
+
+nvkm-y += nvkm/engine/disp/oimmnv50.o
+nvkm-y += nvkm/engine/disp/oimmg84.o
+nvkm-y += nvkm/engine/disp/oimmgt215.o
+nvkm-y += nvkm/engine/disp/oimmgf119.o
+nvkm-y += nvkm/engine/disp/oimmgk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 23d1b5c0dc16..44b67719f64d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -25,7 +25,9 @@
#include "conn.h"
#include "outp.h"
+#include <core/client.h>
#include <core/notify.h>
+#include <core/oproxy.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
@@ -33,7 +35,21 @@
#include <nvif/event.h>
#include <nvif/unpack.h>
-int
+static void
+nvkm_disp_vblank_fini(struct nvkm_event *event, int type, int head)
+{
+ struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
+ disp->func->head.vblank_fini(disp, head);
+}
+
+static void
+nvkm_disp_vblank_init(struct nvkm_event *event, int type, int head)
+{
+ struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
+ disp->func->head.vblank_init(disp, head);
+}
+
+static int
nvkm_disp_vblank_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
{
@@ -56,6 +72,13 @@ nvkm_disp_vblank_ctor(struct nvkm_object *object, void *data, u32 size,
return ret;
}
+static const struct nvkm_event_func
+nvkm_disp_vblank_func = {
+ .ctor = nvkm_disp_vblank_ctor,
+ .init = nvkm_disp_vblank_init,
+ .fini = nvkm_disp_vblank_fini,
+};
+
void
nvkm_disp_vblank(struct nvkm_disp *disp, int head)
{
@@ -100,7 +123,7 @@ nvkm_disp_hpd_func = {
int
nvkm_disp_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **event)
{
- struct nvkm_disp *disp = (void *)object->engine;
+ struct nvkm_disp *disp = nvkm_disp(object->engine);
switch (type) {
case NV04_DISP_NTFY_VBLANK:
*event = &disp->vblank;
@@ -114,127 +137,303 @@ nvkm_disp_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **event)
return -EINVAL;
}
-int
-_nvkm_disp_fini(struct nvkm_object *object, bool suspend)
+static void
+nvkm_disp_class_del(struct nvkm_oproxy *oproxy)
{
- struct nvkm_disp *disp = (void *)object;
- struct nvkm_output *outp;
+ struct nvkm_disp *disp = nvkm_disp(oproxy->base.engine);
+ mutex_lock(&disp->engine.subdev.mutex);
+ if (disp->client == oproxy)
+ disp->client = NULL;
+ mutex_unlock(&disp->engine.subdev.mutex);
+}
+
+static const struct nvkm_oproxy_func
+nvkm_disp_class = {
+ .dtor[1] = nvkm_disp_class_del,
+};
+
+static int
+nvkm_disp_class_new(struct nvkm_device *device,
+ const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ const struct nvkm_disp_oclass *sclass = oclass->engn;
+ struct nvkm_disp *disp = nvkm_disp(oclass->engine);
+ struct nvkm_oproxy *oproxy;
int ret;
- list_for_each_entry(outp, &disp->outp, head) {
- ret = nv_ofuncs(outp)->fini(nv_object(outp), suspend);
- if (ret && suspend)
- goto fail_outp;
+ ret = nvkm_oproxy_new_(&nvkm_disp_class, oclass, &oproxy);
+ if (ret)
+ return ret;
+ *pobject = &oproxy->base;
+
+ mutex_lock(&disp->engine.subdev.mutex);
+ if (disp->client) {
+ mutex_unlock(&disp->engine.subdev.mutex);
+ return -EBUSY;
}
+ disp->client = oproxy;
+ mutex_unlock(&disp->engine.subdev.mutex);
- return nvkm_engine_fini(&disp->base, suspend);
+ return sclass->ctor(disp, oclass, data, size, &oproxy->object);
+}
-fail_outp:
- list_for_each_entry_continue_reverse(outp, &disp->outp, head) {
- nv_ofuncs(outp)->init(nv_object(outp));
+static const struct nvkm_device_oclass
+nvkm_disp_sclass = {
+ .ctor = nvkm_disp_class_new,
+};
+
+static int
+nvkm_disp_class_get(struct nvkm_oclass *oclass, int index,
+ const struct nvkm_device_oclass **class)
+{
+ struct nvkm_disp *disp = nvkm_disp(oclass->engine);
+ if (index == 0) {
+ const struct nvkm_disp_oclass *root = disp->func->root(disp);
+ oclass->base = root->base;
+ oclass->engn = root;
+ *class = &nvkm_disp_sclass;
+ return 0;
}
+ return 1;
+}
- return ret;
+static void
+nvkm_disp_intr(struct nvkm_engine *engine)
+{
+ struct nvkm_disp *disp = nvkm_disp(engine);
+ disp->func->intr(disp);
}
-int
-_nvkm_disp_init(struct nvkm_object *object)
+static int
+nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
{
- struct nvkm_disp *disp = (void *)object;
+ struct nvkm_disp *disp = nvkm_disp(engine);
+ struct nvkm_connector *conn;
struct nvkm_output *outp;
- int ret;
-
- ret = nvkm_engine_init(&disp->base);
- if (ret)
- return ret;
list_for_each_entry(outp, &disp->outp, head) {
- ret = nv_ofuncs(outp)->init(nv_object(outp));
- if (ret)
- goto fail_outp;
+ nvkm_output_fini(outp);
}
- return ret;
+ list_for_each_entry(conn, &disp->conn, head) {
+ nvkm_connector_fini(conn);
+ }
+
+ return 0;
+}
+
+static int
+nvkm_disp_init(struct nvkm_engine *engine)
+{
+ struct nvkm_disp *disp = nvkm_disp(engine);
+ struct nvkm_connector *conn;
+ struct nvkm_output *outp;
-fail_outp:
- list_for_each_entry_continue_reverse(outp, &disp->outp, head) {
- nv_ofuncs(outp)->fini(nv_object(outp), false);
+ list_for_each_entry(conn, &disp->conn, head) {
+ nvkm_connector_init(conn);
}
- return ret;
+ list_for_each_entry(outp, &disp->outp, head) {
+ nvkm_output_init(outp);
+ }
+
+ return 0;
}
-void
-_nvkm_disp_dtor(struct nvkm_object *object)
+static void *
+nvkm_disp_dtor(struct nvkm_engine *engine)
{
- struct nvkm_disp *disp = (void *)object;
- struct nvkm_output *outp, *outt;
+ struct nvkm_disp *disp = nvkm_disp(engine);
+ struct nvkm_connector *conn;
+ struct nvkm_output *outp;
+ void *data = disp;
+
+ if (disp->func->dtor)
+ data = disp->func->dtor(disp);
nvkm_event_fini(&disp->vblank);
nvkm_event_fini(&disp->hpd);
- if (disp->outp.next) {
- list_for_each_entry_safe(outp, outt, &disp->outp, head) {
- nvkm_object_ref(NULL, (struct nvkm_object **)&outp);
- }
+ while (!list_empty(&disp->outp)) {
+ outp = list_first_entry(&disp->outp, typeof(*outp), head);
+ list_del(&outp->head);
+ nvkm_output_del(&outp);
}
- nvkm_engine_destroy(&disp->base);
+ while (!list_empty(&disp->conn)) {
+ conn = list_first_entry(&disp->conn, typeof(*conn), head);
+ list_del(&conn->head);
+ nvkm_connector_del(&conn);
+ }
+
+ return data;
}
+static const struct nvkm_engine_func
+nvkm_disp = {
+ .dtor = nvkm_disp_dtor,
+ .init = nvkm_disp_init,
+ .fini = nvkm_disp_fini,
+ .intr = nvkm_disp_intr,
+ .base.sclass = nvkm_disp_class_get,
+};
+
int
-nvkm_disp_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, int heads, const char *intname,
- const char *extname, int length, void **pobject)
+nvkm_disp_ctor(const struct nvkm_disp_func *func, struct nvkm_device *device,
+ int index, int heads, struct nvkm_disp *disp)
{
- struct nvkm_disp_impl *impl = (void *)oclass;
- struct nvkm_bios *bios = nvkm_bios(parent);
- struct nvkm_disp *disp;
- struct nvkm_oclass **sclass;
- struct nvkm_object *object;
+ struct nvkm_bios *bios = device->bios;
+ struct nvkm_output *outp, *outt, *pair;
+ struct nvkm_connector *conn;
+ struct nvbios_connE connE;
struct dcb_output dcbE;
u8 hpd = 0, ver, hdr;
u32 data;
int ret, i;
- ret = nvkm_engine_create_(parent, engine, oclass, true, intname,
- extname, length, pobject);
- disp = *pobject;
+ INIT_LIST_HEAD(&disp->outp);
+ INIT_LIST_HEAD(&disp->conn);
+ disp->func = func;
+ disp->head.nr = heads;
+
+ ret = nvkm_engine_ctor(&nvkm_disp, device, index, 0,
+ true, &disp->engine);
if (ret)
return ret;
- INIT_LIST_HEAD(&disp->outp);
-
/* create output objects for each display path in the vbios */
i = -1;
while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
+ const struct nvkm_disp_func_outp *outps;
+ int (*ctor)(struct nvkm_disp *, int, struct dcb_output *,
+ struct nvkm_output **);
+
if (dcbE.type == DCB_OUTPUT_UNUSED)
continue;
if (dcbE.type == DCB_OUTPUT_EOL)
break;
- data = dcbE.location << 4 | dcbE.type;
+ outp = NULL;
+
+ switch (dcbE.location) {
+ case 0: outps = &disp->func->outp.internal; break;
+ case 1: outps = &disp->func->outp.external; break;
+ default:
+ nvkm_warn(&disp->engine.subdev,
+ "dcb %d locn %d unknown\n", i, dcbE.location);
+ continue;
+ }
- oclass = nvkm_output_oclass;
- sclass = impl->outp;
- while (sclass && sclass[0]) {
- if (sclass[0]->handle == data) {
- oclass = sclass[0];
- break;
+ switch (dcbE.type) {
+ case DCB_OUTPUT_ANALOG: ctor = outps->crt ; break;
+ case DCB_OUTPUT_TV : ctor = outps->tv ; break;
+ case DCB_OUTPUT_TMDS : ctor = outps->tmds; break;
+ case DCB_OUTPUT_LVDS : ctor = outps->lvds; break;
+ case DCB_OUTPUT_DP : ctor = outps->dp ; break;
+ default:
+ nvkm_warn(&disp->engine.subdev,
+ "dcb %d type %d unknown\n", i, dcbE.type);
+ continue;
+ }
+
+ if (ctor)
+ ret = ctor(disp, i, &dcbE, &outp);
+ else
+ ret = -ENODEV;
+
+ if (ret) {
+ if (ret == -ENODEV) {
+ nvkm_debug(&disp->engine.subdev,
+ "dcb %d %d/%d not supported\n",
+ i, dcbE.location, dcbE.type);
+ continue;
}
- sclass++;
+ nvkm_error(&disp->engine.subdev,
+ "failed to create output %d\n", i);
+ nvkm_output_del(&outp);
+ continue;
}
- nvkm_object_ctor(*pobject, NULL, oclass, &dcbE, i, &object);
+ list_add_tail(&outp->head, &disp->outp);
hpd = max(hpd, (u8)(dcbE.connector + 1));
}
+ /* create connector objects based on the outputs we support */
+ list_for_each_entry_safe(outp, outt, &disp->outp, head) {
+ /* bios data *should* give us the most useful information */
+ data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr,
+ &connE);
+
+ /* no bios connector data... */
+ if (!data) {
+ /* heuristic: anything with the same ccb index is
+ * considered to be on the same connector, any
+ * output path without an associated ccb entry will
+ * be put on its own connector
+ */
+ int ccb_index = outp->info.i2c_index;
+ if (ccb_index != 0xf) {
+ list_for_each_entry(pair, &disp->outp, head) {
+ if (pair->info.i2c_index == ccb_index) {
+ outp->conn = pair->conn;
+ break;
+ }
+ }
+ }
+
+ /* connector shared with another output path */
+ if (outp->conn)
+ continue;
+
+ memset(&connE, 0x00, sizeof(connE));
+ connE.type = DCB_CONNECTOR_NONE;
+ i = -1;
+ } else {
+ i = outp->info.connector;
+ }
+
+ /* check that we haven't already created this connector */
+ list_for_each_entry(conn, &disp->conn, head) {
+ if (conn->index == outp->info.connector) {
+ outp->conn = conn;
+ break;
+ }
+ }
+
+ if (outp->conn)
+ continue;
+
+ /* apparently we need to create a new one! */
+ ret = nvkm_connector_new(disp, i, &connE, &outp->conn);
+ if (ret) {
+ nvkm_error(&disp->engine.subdev,
+ "failed to create output %d conn: %d\n",
+ outp->index, ret);
+ nvkm_connector_del(&outp->conn);
+ list_del(&outp->head);
+ nvkm_output_del(&outp);
+ continue;
+ }
+
+ list_add_tail(&outp->conn->head, &disp->conn);
+ }
+
ret = nvkm_event_init(&nvkm_disp_hpd_func, 3, hpd, &disp->hpd);
if (ret)
return ret;
- ret = nvkm_event_init(impl->vblank, 1, heads, &disp->vblank);
+ ret = nvkm_event_init(&nvkm_disp_vblank_func, 1, heads, &disp->vblank);
if (ret)
return ret;
return 0;
}
+
+int
+nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
+ int index, int heads, struct nvkm_disp **pdisp)
+{
+ if (!(*pdisp = kzalloc(sizeof(**pdisp), GFP_KERNEL)))
+ return -ENOMEM;
+ return nvkm_disp_ctor(func, device, index, heads, *pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c
new file mode 100644
index 000000000000..6d17630a3dee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+g84_disp_base_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x0008c4 },
+ { 0x0088, 0x0008d0 },
+ { 0x008c, 0x0008dc },
+ { 0x0090, 0x0008e4 },
+ { 0x0094, 0x610884 },
+ { 0x00a0, 0x6108a0 },
+ { 0x00a4, 0x610878 },
+ { 0x00c0, 0x61086c },
+ { 0x00c4, 0x610800 },
+ { 0x00c8, 0x61080c },
+ { 0x00cc, 0x610818 },
+ { 0x00e0, 0x610858 },
+ { 0x00e4, 0x610860 },
+ { 0x00e8, 0x6108ac },
+ { 0x00ec, 0x6108b4 },
+ { 0x00fc, 0x610824 },
+ { 0x0100, 0x610894 },
+ { 0x0104, 0x61082c },
+ { 0x0110, 0x6108bc },
+ { 0x0114, 0x61088c },
+ {}
+ }
+};
+
+const struct nv50_disp_chan_mthd
+g84_disp_base_chan_mthd = {
+ .name = "Base",
+ .addr = 0x000540,
+ .prev = 0x000004,
+ .data = {
+ { "Global", 1, &g84_disp_base_mthd_base },
+ { "Image", 2, &nv50_disp_base_mthd_image },
+ {}
+ }
+};
+
+const struct nv50_disp_dmac_oclass
+g84_disp_base_oclass = {
+ .base.oclass = G82_DISP_BASE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_base_new,
+ .func = &nv50_disp_dmac_func,
+ .mthd = &g84_disp_base_chan_mthd,
+ .chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c
new file mode 100644
index 000000000000..ebcb925e9d90
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+gf119_disp_base_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x661080 },
+ { 0x0084, 0x661084 },
+ { 0x0088, 0x661088 },
+ { 0x008c, 0x66108c },
+ { 0x0090, 0x661090 },
+ { 0x0094, 0x661094 },
+ { 0x00a0, 0x6610a0 },
+ { 0x00a4, 0x6610a4 },
+ { 0x00c0, 0x6610c0 },
+ { 0x00c4, 0x6610c4 },
+ { 0x00c8, 0x6610c8 },
+ { 0x00cc, 0x6610cc },
+ { 0x00e0, 0x6610e0 },
+ { 0x00e4, 0x6610e4 },
+ { 0x00e8, 0x6610e8 },
+ { 0x00ec, 0x6610ec },
+ { 0x00fc, 0x6610fc },
+ { 0x0100, 0x661100 },
+ { 0x0104, 0x661104 },
+ { 0x0108, 0x661108 },
+ { 0x010c, 0x66110c },
+ { 0x0110, 0x661110 },
+ { 0x0114, 0x661114 },
+ { 0x0118, 0x661118 },
+ { 0x011c, 0x66111c },
+ { 0x0130, 0x661130 },
+ { 0x0134, 0x661134 },
+ { 0x0138, 0x661138 },
+ { 0x013c, 0x66113c },
+ { 0x0140, 0x661140 },
+ { 0x0144, 0x661144 },
+ { 0x0148, 0x661148 },
+ { 0x014c, 0x66114c },
+ { 0x0150, 0x661150 },
+ { 0x0154, 0x661154 },
+ { 0x0158, 0x661158 },
+ { 0x015c, 0x66115c },
+ { 0x0160, 0x661160 },
+ { 0x0164, 0x661164 },
+ { 0x0168, 0x661168 },
+ { 0x016c, 0x66116c },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_list
+gf119_disp_base_mthd_image = {
+ .mthd = 0x0020,
+ .addr = 0x000020,
+ .data = {
+ { 0x0400, 0x661400 },
+ { 0x0404, 0x661404 },
+ { 0x0408, 0x661408 },
+ { 0x040c, 0x66140c },
+ { 0x0410, 0x661410 },
+ {}
+ }
+};
+
+const struct nv50_disp_chan_mthd
+gf119_disp_base_chan_mthd = {
+ .name = "Base",
+ .addr = 0x001000,
+ .prev = -0x020000,
+ .data = {
+ { "Global", 1, &gf119_disp_base_mthd_base },
+ { "Image", 2, &gf119_disp_base_mthd_image },
+ {}
+ }
+};
+
+const struct nv50_disp_dmac_oclass
+gf119_disp_base_oclass = {
+ .base.oclass = GF110_DISP_BASE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_base_new,
+ .func = &gf119_disp_dmac_func,
+ .mthd = &gf119_disp_base_chan_mthd,
+ .chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c
new file mode 100644
index 000000000000..780a1d973634
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gk104_disp_base_oclass = {
+ .base.oclass = GK104_DISP_BASE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_base_new,
+ .func = &gf119_disp_dmac_func,
+ .mthd = &gf119_disp_base_chan_mthd,
+ .chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c
new file mode 100644
index 000000000000..d8bdd246c8ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gk110_disp_base_oclass = {
+ .base.oclass = GK110_DISP_BASE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_base_new,
+ .func = &gf119_disp_dmac_func,
+ .mthd = &gf119_disp_base_chan_mthd,
+ .chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c
new file mode 100644
index 000000000000..93451e46570c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gt200_disp_base_oclass = {
+ .base.oclass = GT200_DISP_BASE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_base_new,
+ .func = &nv50_disp_dmac_func,
+ .mthd = &g84_disp_base_chan_mthd,
+ .chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c
new file mode 100644
index 000000000000..08e2b1fa3806
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gt215_disp_base_oclass = {
+ .base.oclass = GT214_DISP_BASE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_base_new,
+ .func = &nv50_disp_dmac_func,
+ .mthd = &g84_disp_base_chan_mthd,
+ .chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
new file mode 100644
index 000000000000..1fd89edefc26
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_base_new(const struct nv50_disp_dmac_func *func,
+ const struct nv50_disp_chan_mthd *mthd,
+ struct nv50_disp_root *root, int chid,
+ const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ union {
+ struct nv50_disp_base_channel_dma_v0 v0;
+ } *args = data;
+ struct nvkm_object *parent = oclass->parent;
+ struct nv50_disp *disp = root->disp;
+ int head, ret;
+ u64 push;
+
+ nvif_ioctl(parent, "create disp base channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(parent, "create disp base channel dma vers %d "
+ "pushbuf %016llx head %d\n",
+ args->v0.version, args->v0.pushbuf, args->v0.head);
+ if (args->v0.head > disp->base.head.nr)
+ return -EINVAL;
+ push = args->v0.pushbuf;
+ head = args->v0.head;
+ } else
+ return ret;
+
+ return nv50_disp_dmac_new_(func, mthd, root, chid + head,
+ head, push, oclass, pobject);
+}
+
+static const struct nv50_disp_mthd_list
+nv50_disp_base_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x0008c4 },
+ { 0x0088, 0x0008d0 },
+ { 0x008c, 0x0008dc },
+ { 0x0090, 0x0008e4 },
+ { 0x0094, 0x610884 },
+ { 0x00a0, 0x6108a0 },
+ { 0x00a4, 0x610878 },
+ { 0x00c0, 0x61086c },
+ { 0x00e0, 0x610858 },
+ { 0x00e4, 0x610860 },
+ { 0x00e8, 0x6108ac },
+ { 0x00ec, 0x6108b4 },
+ { 0x0100, 0x610894 },
+ { 0x0110, 0x6108bc },
+ { 0x0114, 0x61088c },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+nv50_disp_base_mthd_image = {
+ .mthd = 0x0400,
+ .addr = 0x000000,
+ .data = {
+ { 0x0800, 0x6108f0 },
+ { 0x0804, 0x6108fc },
+ { 0x0808, 0x61090c },
+ { 0x080c, 0x610914 },
+ { 0x0810, 0x610904 },
+ {}
+ }
+};
+
+static const struct nv50_disp_chan_mthd
+nv50_disp_base_chan_mthd = {
+ .name = "Base",
+ .addr = 0x000540,
+ .prev = 0x000004,
+ .data = {
+ { "Global", 1, &nv50_disp_base_mthd_base },
+ { "Image", 2, &nv50_disp_base_mthd_image },
+ {}
+ }
+};
+
+const struct nv50_disp_dmac_oclass
+nv50_disp_base_oclass = {
+ .base.oclass = NV50_DISP_BASE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_base_new,
+ .func = &nv50_disp_dmac_func,
+ .mthd = &nv50_disp_base_chan_mthd,
+ .chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
new file mode 100644
index 000000000000..17a3d835cb42
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+static void
+gf119_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
+{
+ struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ nvkm_mask(device, 0x610090, 0x00000001 << index, 0x00000000 << index);
+ nvkm_wr32(device, 0x61008c, 0x00000001 << index);
+}
+
+static void
+gf119_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
+{
+ struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ nvkm_wr32(device, 0x61008c, 0x00000001 << index);
+ nvkm_mask(device, 0x610090, 0x00000001 << index, 0x00000001 << index);
+}
+
+const struct nvkm_event_func
+gf119_disp_chan_uevent = {
+ .ctor = nv50_disp_chan_uevent_ctor,
+ .init = gf119_disp_chan_uevent_init,
+ .fini = gf119_disp_chan_uevent_fini,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
new file mode 100644
index 000000000000..01803c0679b6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -0,0 +1,301 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <engine/dma.h>
+
+#include <nvif/class.h>
+#include <nvif/event.h>
+#include <nvif/unpack.h>
+
+static void
+nv50_disp_mthd_list(struct nv50_disp *disp, int debug, u32 base, int c,
+ const struct nv50_disp_mthd_list *list, int inst)
+{
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int i;
+
+ for (i = 0; list->data[i].mthd; i++) {
+ if (list->data[i].addr) {
+ u32 next = nvkm_rd32(device, list->data[i].addr + base + 0);
+ u32 prev = nvkm_rd32(device, list->data[i].addr + base + c);
+ u32 mthd = list->data[i].mthd + (list->mthd * inst);
+ const char *name = list->data[i].name;
+ char mods[16];
+
+ if (prev != next)
+ snprintf(mods, sizeof(mods), "-> %08x", next);
+ else
+ snprintf(mods, sizeof(mods), "%13c", ' ');
+
+ nvkm_printk_(subdev, debug, info,
+ "\t%04x: %08x %s%s%s\n",
+ mthd, prev, mods, name ? " // " : "",
+ name ? name : "");
+ }
+ }
+}
+
+void
+nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
+{
+ struct nv50_disp *disp = chan->root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ const struct nv50_disp_chan_mthd *mthd = chan->mthd;
+ const struct nv50_disp_mthd_list *list;
+ int i, j;
+
+ if (debug > subdev->debug)
+ return;
+
+ for (i = 0; (list = mthd->data[i].mthd) != NULL; i++) {
+ u32 base = chan->head * mthd->addr;
+ for (j = 0; j < mthd->data[i].nr; j++, base += list->addr) {
+ const char *cname = mthd->name;
+ const char *sname = "";
+ char cname_[16], sname_[16];
+
+ if (mthd->addr) {
+ snprintf(cname_, sizeof(cname_), "%s %d",
+ mthd->name, chan->chid);
+ cname = cname_;
+ }
+
+ if (mthd->data[i].nr > 1) {
+ snprintf(sname_, sizeof(sname_), " - %s %d",
+ mthd->data[i].name, j);
+ sname = sname_;
+ }
+
+ nvkm_printk_(subdev, debug, info, "%s%s:\n", cname, sname);
+ nv50_disp_mthd_list(disp, debug, base, mthd->prev,
+ list, j);
+ }
+ }
+}
+
+static void
+nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
+{
+ struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000000 << index);
+ nvkm_wr32(device, 0x610020, 0x00000001 << index);
+}
+
+static void
+nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
+{
+ struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ nvkm_wr32(device, 0x610020, 0x00000001 << index);
+ nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000001 << index);
+}
+
+void
+nv50_disp_chan_uevent_send(struct nv50_disp *disp, int chid)
+{
+ struct nvif_notify_uevent_rep {
+ } rep;
+
+ nvkm_event_send(&disp->uevent, 1, chid, &rep, sizeof(rep));
+}
+
+int
+nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
+ struct nvkm_notify *notify)
+{
+ struct nv50_disp_chan *chan = nv50_disp_chan(object);
+ union {
+ struct nvif_notify_uevent_req none;
+ } *args = data;
+ int ret;
+
+ if (nvif_unvers(args->none)) {
+ notify->size = sizeof(struct nvif_notify_uevent_rep);
+ notify->types = 1;
+ notify->index = chan->chid;
+ return 0;
+ }
+
+ return ret;
+}
+
+const struct nvkm_event_func
+nv50_disp_chan_uevent = {
+ .ctor = nv50_disp_chan_uevent_ctor,
+ .init = nv50_disp_chan_uevent_init,
+ .fini = nv50_disp_chan_uevent_fini,
+};
+
+int
+nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
+{
+ struct nv50_disp_chan *chan = nv50_disp_chan(object);
+ struct nv50_disp *disp = chan->root->disp;
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ *data = nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr);
+ return 0;
+}
+
+int
+nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+ struct nv50_disp_chan *chan = nv50_disp_chan(object);
+ struct nv50_disp *disp = chan->root->disp;
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data);
+ return 0;
+}
+
+int
+nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
+ struct nvkm_event **pevent)
+{
+ struct nv50_disp_chan *chan = nv50_disp_chan(object);
+ struct nv50_disp *disp = chan->root->disp;
+ switch (type) {
+ case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT:
+ *pevent = &disp->uevent;
+ return 0;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+int
+nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
+{
+ struct nv50_disp_chan *chan = nv50_disp_chan(object);
+ struct nv50_disp *disp = chan->root->disp;
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ *addr = device->func->resource_addr(device, 0) +
+ 0x640000 + (chan->chid * 0x1000);
+ *size = 0x001000;
+ return 0;
+}
+
+static int
+nv50_disp_chan_child_new(const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nv50_disp_chan *chan = nv50_disp_chan(oclass->parent);
+ return chan->func->child_new(chan, oclass, data, size, pobject);
+}
+
+static int
+nv50_disp_chan_child_get(struct nvkm_object *object, int index,
+ struct nvkm_oclass *oclass)
+{
+ struct nv50_disp_chan *chan = nv50_disp_chan(object);
+ if (chan->func->child_get) {
+ int ret = chan->func->child_get(chan, index, oclass);
+ if (ret == 0)
+ oclass->ctor = nv50_disp_chan_child_new;
+ return ret;
+ }
+ return -EINVAL;
+}
+
+static int
+nv50_disp_chan_fini(struct nvkm_object *object, bool suspend)
+{
+ struct nv50_disp_chan *chan = nv50_disp_chan(object);
+ chan->func->fini(chan);
+ return 0;
+}
+
+static int
+nv50_disp_chan_init(struct nvkm_object *object)
+{
+ struct nv50_disp_chan *chan = nv50_disp_chan(object);
+ return chan->func->init(chan);
+}
+
+static void *
+nv50_disp_chan_dtor(struct nvkm_object *object)
+{
+ struct nv50_disp_chan *chan = nv50_disp_chan(object);
+ struct nv50_disp *disp = chan->root->disp;
+ if (chan->chid >= 0)
+ disp->chan[chan->chid] = NULL;
+ return chan->func->dtor ? chan->func->dtor(chan) : chan;
+}
+
+static const struct nvkm_object_func
+nv50_disp_chan = {
+ .dtor = nv50_disp_chan_dtor,
+ .init = nv50_disp_chan_init,
+ .fini = nv50_disp_chan_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+ .ntfy = nv50_disp_chan_ntfy,
+ .map = nv50_disp_chan_map,
+ .sclass = nv50_disp_chan_child_get,
+};
+
+int
+nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
+ const struct nv50_disp_chan_mthd *mthd,
+ struct nv50_disp_root *root, int chid, int head,
+ const struct nvkm_oclass *oclass,
+ struct nv50_disp_chan *chan)
+{
+ struct nv50_disp *disp = root->disp;
+
+ nvkm_object_ctor(&nv50_disp_chan, oclass, &chan->object);
+ chan->func = func;
+ chan->mthd = mthd;
+ chan->root = root;
+ chan->chid = chid;
+ chan->head = head;
+
+ if (disp->chan[chan->chid]) {
+ chan->chid = -1;
+ return -EBUSY;
+ }
+ disp->chan[chan->chid] = chan;
+ return 0;
+}
+
+int
+nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
+ const struct nv50_disp_chan_mthd *mthd,
+ struct nv50_disp_root *root, int chid, int head,
+ const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+{
+ struct nv50_disp_chan *chan;
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->object;
+
+ return nv50_disp_chan_ctor(func, mthd, root, chid, head, oclass, chan);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
new file mode 100644
index 000000000000..aee374884c96
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -0,0 +1,127 @@
+#ifndef __NV50_DISP_CHAN_H__
+#define __NV50_DISP_CHAN_H__
+#define nv50_disp_chan(p) container_of((p), struct nv50_disp_chan, object)
+#include "nv50.h"
+
+struct nv50_disp_chan {
+ const struct nv50_disp_chan_func *func;
+ const struct nv50_disp_chan_mthd *mthd;
+ struct nv50_disp_root *root;
+ int chid;
+ int head;
+
+ struct nvkm_object object;
+};
+
+struct nv50_disp_chan_func {
+ void *(*dtor)(struct nv50_disp_chan *);
+ int (*init)(struct nv50_disp_chan *);
+ void (*fini)(struct nv50_disp_chan *);
+ int (*child_get)(struct nv50_disp_chan *, int index,
+ struct nvkm_oclass *);
+ int (*child_new)(struct nv50_disp_chan *, const struct nvkm_oclass *,
+ void *data, u32 size, struct nvkm_object **);
+};
+
+int nv50_disp_chan_ctor(const struct nv50_disp_chan_func *,
+ const struct nv50_disp_chan_mthd *,
+ struct nv50_disp_root *, int chid, int head,
+ const struct nvkm_oclass *, struct nv50_disp_chan *);
+int nv50_disp_chan_new_(const struct nv50_disp_chan_func *,
+ const struct nv50_disp_chan_mthd *,
+ struct nv50_disp_root *, int chid, int head,
+ const struct nvkm_oclass *, struct nvkm_object **);
+
+extern const struct nv50_disp_chan_func nv50_disp_pioc_func;
+extern const struct nv50_disp_chan_func gf119_disp_pioc_func;
+
+extern const struct nvkm_event_func nv50_disp_chan_uevent;
+int nv50_disp_chan_uevent_ctor(struct nvkm_object *, void *, u32,
+ struct nvkm_notify *);
+void nv50_disp_chan_uevent_send(struct nv50_disp *, int);
+
+extern const struct nvkm_event_func gf119_disp_chan_uevent;
+
+struct nv50_disp_mthd_list {
+ u32 mthd;
+ u32 addr;
+ struct {
+ u32 mthd;
+ u32 addr;
+ const char *name;
+ } data[];
+};
+
+struct nv50_disp_chan_mthd {
+ const char *name;
+ u32 addr;
+ s32 prev;
+ struct {
+ const char *name;
+ int nr;
+ const struct nv50_disp_mthd_list *mthd;
+ } data[];
+};
+
+void nv50_disp_chan_mthd(struct nv50_disp_chan *, int debug);
+
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_base;
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_sor;
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_pior;
+extern const struct nv50_disp_mthd_list nv50_disp_base_mthd_image;
+
+extern const struct nv50_disp_chan_mthd g84_disp_core_chan_mthd;
+extern const struct nv50_disp_mthd_list g84_disp_core_mthd_dac;
+extern const struct nv50_disp_mthd_list g84_disp_core_mthd_head;
+extern const struct nv50_disp_chan_mthd g84_disp_base_chan_mthd;
+extern const struct nv50_disp_chan_mthd g84_disp_ovly_chan_mthd;
+
+extern const struct nv50_disp_chan_mthd g94_disp_core_chan_mthd;
+
+extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_base;
+extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_dac;
+extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_sor;
+extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_pior;
+extern const struct nv50_disp_chan_mthd gf119_disp_base_chan_mthd;
+
+extern const struct nv50_disp_chan_mthd gk104_disp_core_chan_mthd;
+
+struct nv50_disp_pioc_oclass {
+ int (*ctor)(const struct nv50_disp_chan_func *,
+ const struct nv50_disp_chan_mthd *,
+ struct nv50_disp_root *, int chid,
+ const struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **);
+ struct nvkm_sclass base;
+ const struct nv50_disp_chan_func *func;
+ const struct nv50_disp_chan_mthd *mthd;
+ int chid;
+};
+
+extern const struct nv50_disp_pioc_oclass nv50_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass nv50_disp_curs_oclass;
+
+extern const struct nv50_disp_pioc_oclass g84_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass g84_disp_curs_oclass;
+
+extern const struct nv50_disp_pioc_oclass gt215_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass gt215_disp_curs_oclass;
+
+extern const struct nv50_disp_pioc_oclass gf119_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass gf119_disp_curs_oclass;
+
+extern const struct nv50_disp_pioc_oclass gk104_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass gk104_disp_curs_oclass;
+
+
+int nv50_disp_curs_new(const struct nv50_disp_chan_func *,
+ const struct nv50_disp_chan_mthd *,
+ struct nv50_disp_root *, int chid,
+ const struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **);
+int nv50_disp_oimm_new(const struct nv50_disp_chan_func *,
+ const struct nv50_disp_chan_mthd *,
+ struct nv50_disp_root *, int chid,
+ const struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c
index cf03e0240ced..c6910d644a3d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c
@@ -33,15 +33,15 @@ static int
nvkm_connector_hpd(struct nvkm_notify *notify)
{
struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd);
- struct nvkm_disp *disp = nvkm_disp(conn);
- struct nvkm_gpio *gpio = nvkm_gpio(conn);
+ struct nvkm_disp *disp = conn->disp;
+ struct nvkm_gpio *gpio = disp->engine.subdev.device->gpio;
const struct nvkm_gpio_ntfy_rep *line = notify->data;
struct nvif_notify_conn_rep_v0 rep;
int index = conn->index;
- DBG("HPD: %d\n", line->mask);
+ CONN_DBG(conn, "HPD: %d", line->mask);
- if (!gpio->get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.index))
+ if (!nvkm_gpio_get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.index))
rep.mask = NVIF_NOTIFY_CONN_V0_UNPLUG;
else
rep.mask = NVIF_NOTIFY_CONN_V0_PLUG;
@@ -51,78 +51,58 @@ nvkm_connector_hpd(struct nvkm_notify *notify)
return NVKM_NOTIFY_KEEP;
}
-int
-_nvkm_connector_fini(struct nvkm_object *object, bool suspend)
+void
+nvkm_connector_fini(struct nvkm_connector *conn)
{
- struct nvkm_connector *conn = (void *)object;
nvkm_notify_put(&conn->hpd);
- return nvkm_object_fini(&conn->base, suspend);
}
-int
-_nvkm_connector_init(struct nvkm_object *object)
+void
+nvkm_connector_init(struct nvkm_connector *conn)
{
- struct nvkm_connector *conn = (void *)object;
- int ret = nvkm_object_init(&conn->base);
- if (ret == 0)
- nvkm_notify_get(&conn->hpd);
- return ret;
+ nvkm_notify_get(&conn->hpd);
}
void
-_nvkm_connector_dtor(struct nvkm_object *object)
+nvkm_connector_del(struct nvkm_connector **pconn)
{
- struct nvkm_connector *conn = (void *)object;
- nvkm_notify_fini(&conn->hpd);
- nvkm_object_destroy(&conn->base);
+ struct nvkm_connector *conn = *pconn;
+ if (conn) {
+ nvkm_notify_fini(&conn->hpd);
+ kfree(*pconn);
+ *pconn = NULL;
+ }
}
-int
-nvkm_connector_create_(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass,
- struct nvbios_connE *info, int index,
- int length, void **pobject)
+static void
+nvkm_connector_ctor(struct nvkm_disp *disp, int index,
+ struct nvbios_connE *info, struct nvkm_connector *conn)
{
static const u8 hpd[] = { 0x07, 0x08, 0x51, 0x52, 0x5e, 0x5f, 0x60 };
- struct nvkm_disp *disp = nvkm_disp(parent);
- struct nvkm_gpio *gpio = nvkm_gpio(parent);
- struct nvkm_connector *conn;
- struct nvkm_output *outp;
+ struct nvkm_gpio *gpio = disp->engine.subdev.device->gpio;
struct dcb_gpio_func func;
int ret;
- list_for_each_entry(outp, &disp->outp, head) {
- if (outp->conn && outp->conn->index == index) {
- atomic_inc(&nv_object(outp->conn)->refcount);
- *pobject = outp->conn;
- return 1;
- }
- }
-
- ret = nvkm_object_create_(parent, engine, oclass, 0, length, pobject);
- conn = *pobject;
- if (ret)
- return ret;
-
- conn->info = *info;
+ conn->disp = disp;
conn->index = index;
+ conn->info = *info;
- DBG("type %02x loc %d hpd %02x dp %x di %x sr %x lcdid %x\n",
- info->type, info->location, info->hpd, info->dp,
- info->di, info->sr, info->lcdid);
+ CONN_DBG(conn, "type %02x loc %d hpd %02x dp %x di %x sr %x lcdid %x",
+ info->type, info->location, info->hpd, info->dp,
+ info->di, info->sr, info->lcdid);
if ((info->hpd = ffs(info->hpd))) {
if (--info->hpd >= ARRAY_SIZE(hpd)) {
- ERR("hpd %02x unknown\n", info->hpd);
- return 0;
+ CONN_ERR(conn, "hpd %02x unknown", info->hpd);
+ return;
}
info->hpd = hpd[info->hpd];
- ret = gpio->find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func);
+ ret = nvkm_gpio_find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func);
if (ret) {
- ERR("func %02x lookup failed, %d\n", info->hpd, ret);
- return 0;
+ CONN_ERR(conn, "func %02x lookup failed, %d",
+ info->hpd, ret);
+ return;
}
ret = nvkm_notify_init(NULL, &gpio->event, nvkm_connector_hpd,
@@ -134,41 +114,19 @@ nvkm_connector_create_(struct nvkm_object *parent,
sizeof(struct nvkm_gpio_ntfy_rep),
&conn->hpd);
if (ret) {
- ERR("func %02x failed, %d\n", info->hpd, ret);
+ CONN_ERR(conn, "func %02x failed, %d", info->hpd, ret);
} else {
- DBG("func %02x (HPD)\n", info->hpd);
+ CONN_DBG(conn, "func %02x (HPD)", info->hpd);
}
}
-
- return 0;
}
int
-_nvkm_connector_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *info, u32 index,
- struct nvkm_object **pobject)
+nvkm_connector_new(struct nvkm_disp *disp, int index,
+ struct nvbios_connE *info, struct nvkm_connector **pconn)
{
- struct nvkm_connector *conn;
- int ret;
-
- ret = nvkm_connector_create(parent, engine, oclass, info, index, &conn);
- *pobject = nv_object(conn);
- if (ret)
- return ret;
-
+ if (!(*pconn = kzalloc(sizeof(**pconn), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_connector_ctor(disp, index, info, *pconn);
return 0;
}
-
-struct nvkm_oclass *
-nvkm_connector_oclass = &(struct nvkm_connector_impl) {
- .base = {
- .handle = 0,
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_connector_ctor,
- .dtor = _nvkm_connector_dtor,
- .init = _nvkm_connector_init,
- .fini = _nvkm_connector_fini,
- },
- },
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
index c87a061f7f7d..ed32fe7f1864 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
@@ -1,58 +1,33 @@
#ifndef __NVKM_DISP_CONN_H__
#define __NVKM_DISP_CONN_H__
-#include <core/object.h>
-#include <core/notify.h>
+#include <engine/disp.h>
+#include <core/notify.h>
#include <subdev/bios.h>
#include <subdev/bios/conn.h>
struct nvkm_connector {
- struct nvkm_object base;
- struct list_head head;
-
- struct nvbios_connE info;
+ struct nvkm_disp *disp;
int index;
+ struct nvbios_connE info;
struct nvkm_notify hpd;
-};
-#define nvkm_connector_create(p,e,c,b,i,d) \
- nvkm_connector_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
-#define nvkm_connector_destroy(d) ({ \
- struct nvkm_connector *disp = (d); \
- _nvkm_connector_dtor(nv_object(disp)); \
-})
-#define nvkm_connector_init(d) ({ \
- struct nvkm_connector *disp = (d); \
- _nvkm_connector_init(nv_object(disp)); \
-})
-#define nvkm_connector_fini(d,s) ({ \
- struct nvkm_connector *disp = (d); \
- _nvkm_connector_fini(nv_object(disp), (s)); \
-})
-
-int nvkm_connector_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, struct nvbios_connE *,
- int, int, void **);
-
-int _nvkm_connector_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-void _nvkm_connector_dtor(struct nvkm_object *);
-int _nvkm_connector_init(struct nvkm_object *);
-int _nvkm_connector_fini(struct nvkm_object *, bool);
-
-struct nvkm_connector_impl {
- struct nvkm_oclass base;
+ struct list_head head;
};
-#ifndef MSG
-#define MSG(l,f,a...) do { \
- struct nvkm_connector *_conn = (void *)conn; \
- nv_##l(_conn, "%02x:%02x%02x: "f, _conn->index, \
- _conn->info.location, _conn->info.type, ##a); \
+int nvkm_connector_new(struct nvkm_disp *, int index, struct nvbios_connE *,
+ struct nvkm_connector **);
+void nvkm_connector_del(struct nvkm_connector **);
+void nvkm_connector_init(struct nvkm_connector *);
+void nvkm_connector_fini(struct nvkm_connector *);
+
+#define CONN_MSG(c,l,f,a...) do { \
+ struct nvkm_connector *_conn = (c); \
+ nvkm_##l(&_conn->disp->engine.subdev, "conn %02x:%02x%02x: "f"\n", \
+ _conn->index, _conn->info.location, _conn->info.type, ##a); \
} while(0)
-#define DBG(f,a...) MSG(debug, f, ##a)
-#define ERR(f,a...) MSG(error, f, ##a)
-#endif
+#define CONN_ERR(c,f,a...) CONN_MSG((c), error, f, ##a)
+#define CONN_DBG(c,f,a...) CONN_MSG((c), debug, f, ##a)
+#define CONN_TRACE(c,f,a...) CONN_MSG((c), trace, f, ##a)
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c
new file mode 100644
index 000000000000..1baa5c34b327
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_mthd_list
+g84_disp_core_mthd_dac = {
+ .mthd = 0x0080,
+ .addr = 0x000008,
+ .data = {
+ { 0x0400, 0x610b58 },
+ { 0x0404, 0x610bdc },
+ { 0x0420, 0x610bc4 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+g84_disp_core_mthd_head = {
+ .mthd = 0x0400,
+ .addr = 0x000540,
+ .data = {
+ { 0x0800, 0x610ad8 },
+ { 0x0804, 0x610ad0 },
+ { 0x0808, 0x610a48 },
+ { 0x080c, 0x610a78 },
+ { 0x0810, 0x610ac0 },
+ { 0x0814, 0x610af8 },
+ { 0x0818, 0x610b00 },
+ { 0x081c, 0x610ae8 },
+ { 0x0820, 0x610af0 },
+ { 0x0824, 0x610b08 },
+ { 0x0828, 0x610b10 },
+ { 0x082c, 0x610a68 },
+ { 0x0830, 0x610a60 },
+ { 0x0834, 0x000000 },
+ { 0x0838, 0x610a40 },
+ { 0x0840, 0x610a24 },
+ { 0x0844, 0x610a2c },
+ { 0x0848, 0x610aa8 },
+ { 0x084c, 0x610ab0 },
+ { 0x085c, 0x610c5c },
+ { 0x0860, 0x610a84 },
+ { 0x0864, 0x610a90 },
+ { 0x0868, 0x610b18 },
+ { 0x086c, 0x610b20 },
+ { 0x0870, 0x610ac8 },
+ { 0x0874, 0x610a38 },
+ { 0x0878, 0x610c50 },
+ { 0x0880, 0x610a58 },
+ { 0x0884, 0x610a9c },
+ { 0x089c, 0x610c68 },
+ { 0x08a0, 0x610a70 },
+ { 0x08a4, 0x610a50 },
+ { 0x08a8, 0x610ae0 },
+ { 0x08c0, 0x610b28 },
+ { 0x08c4, 0x610b30 },
+ { 0x08c8, 0x610b40 },
+ { 0x08d4, 0x610b38 },
+ { 0x08d8, 0x610b48 },
+ { 0x08dc, 0x610b50 },
+ { 0x0900, 0x610a18 },
+ { 0x0904, 0x610ab8 },
+ { 0x0910, 0x610c70 },
+ { 0x0914, 0x610c78 },
+ {}
+ }
+};
+
+const struct nv50_disp_chan_mthd
+g84_disp_core_chan_mthd = {
+ .name = "Core",
+ .addr = 0x000000,
+ .prev = 0x000004,
+ .data = {
+ { "Global", 1, &nv50_disp_core_mthd_base },
+ { "DAC", 3, &g84_disp_core_mthd_dac },
+ { "SOR", 2, &nv50_disp_core_mthd_sor },
+ { "PIOR", 3, &nv50_disp_core_mthd_pior },
+ { "HEAD", 2, &g84_disp_core_mthd_head },
+ {}
+ }
+};
+
+const struct nv50_disp_dmac_oclass
+g84_disp_core_oclass = {
+ .base.oclass = G82_DISP_CORE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_core_new,
+ .func = &nv50_disp_core_func,
+ .mthd = &g84_disp_core_chan_mthd,
+ .chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
new file mode 100644
index 000000000000..019379a3a01c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_mthd_list
+g94_disp_core_mthd_sor = {
+ .mthd = 0x0040,
+ .addr = 0x000008,
+ .data = {
+ { 0x0600, 0x610794 },
+ {}
+ }
+};
+
+const struct nv50_disp_chan_mthd
+g94_disp_core_chan_mthd = {
+ .name = "Core",
+ .addr = 0x000000,
+ .prev = 0x000004,
+ .data = {
+ { "Global", 1, &nv50_disp_core_mthd_base },
+ { "DAC", 3, &g84_disp_core_mthd_dac },
+ { "SOR", 4, &g94_disp_core_mthd_sor },
+ { "PIOR", 3, &nv50_disp_core_mthd_pior },
+ { "HEAD", 2, &g84_disp_core_mthd_head },
+ {}
+ }
+};
+
+const struct nv50_disp_dmac_oclass
+g94_disp_core_oclass = {
+ .base.oclass = GT206_DISP_CORE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_core_new,
+ .func = &nv50_disp_core_func,
+ .mthd = &g94_disp_core_chan_mthd,
+ .chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
new file mode 100644
index 000000000000..6b1dc703dac7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+const struct nv50_disp_mthd_list
+gf119_disp_core_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x660080 },
+ { 0x0084, 0x660084 },
+ { 0x0088, 0x660088 },
+ { 0x008c, 0x000000 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+gf119_disp_core_mthd_dac = {
+ .mthd = 0x0020,
+ .addr = 0x000020,
+ .data = {
+ { 0x0180, 0x660180 },
+ { 0x0184, 0x660184 },
+ { 0x0188, 0x660188 },
+ { 0x0190, 0x660190 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+gf119_disp_core_mthd_sor = {
+ .mthd = 0x0020,
+ .addr = 0x000020,
+ .data = {
+ { 0x0200, 0x660200 },
+ { 0x0204, 0x660204 },
+ { 0x0208, 0x660208 },
+ { 0x0210, 0x660210 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+gf119_disp_core_mthd_pior = {
+ .mthd = 0x0020,
+ .addr = 0x000020,
+ .data = {
+ { 0x0300, 0x660300 },
+ { 0x0304, 0x660304 },
+ { 0x0308, 0x660308 },
+ { 0x0310, 0x660310 },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_list
+gf119_disp_core_mthd_head = {
+ .mthd = 0x0300,
+ .addr = 0x000300,
+ .data = {
+ { 0x0400, 0x660400 },
+ { 0x0404, 0x660404 },
+ { 0x0408, 0x660408 },
+ { 0x040c, 0x66040c },
+ { 0x0410, 0x660410 },
+ { 0x0414, 0x660414 },
+ { 0x0418, 0x660418 },
+ { 0x041c, 0x66041c },
+ { 0x0420, 0x660420 },
+ { 0x0424, 0x660424 },
+ { 0x0428, 0x660428 },
+ { 0x042c, 0x66042c },
+ { 0x0430, 0x660430 },
+ { 0x0434, 0x660434 },
+ { 0x0438, 0x660438 },
+ { 0x0440, 0x660440 },
+ { 0x0444, 0x660444 },
+ { 0x0448, 0x660448 },
+ { 0x044c, 0x66044c },
+ { 0x0450, 0x660450 },
+ { 0x0454, 0x660454 },
+ { 0x0458, 0x660458 },
+ { 0x045c, 0x66045c },
+ { 0x0460, 0x660460 },
+ { 0x0468, 0x660468 },
+ { 0x046c, 0x66046c },
+ { 0x0470, 0x660470 },
+ { 0x0474, 0x660474 },
+ { 0x0480, 0x660480 },
+ { 0x0484, 0x660484 },
+ { 0x048c, 0x66048c },
+ { 0x0490, 0x660490 },
+ { 0x0494, 0x660494 },
+ { 0x0498, 0x660498 },
+ { 0x04b0, 0x6604b0 },
+ { 0x04b8, 0x6604b8 },
+ { 0x04bc, 0x6604bc },
+ { 0x04c0, 0x6604c0 },
+ { 0x04c4, 0x6604c4 },
+ { 0x04c8, 0x6604c8 },
+ { 0x04d0, 0x6604d0 },
+ { 0x04d4, 0x6604d4 },
+ { 0x04e0, 0x6604e0 },
+ { 0x04e4, 0x6604e4 },
+ { 0x04e8, 0x6604e8 },
+ { 0x04ec, 0x6604ec },
+ { 0x04f0, 0x6604f0 },
+ { 0x04f4, 0x6604f4 },
+ { 0x04f8, 0x6604f8 },
+ { 0x04fc, 0x6604fc },
+ { 0x0500, 0x660500 },
+ { 0x0504, 0x660504 },
+ { 0x0508, 0x660508 },
+ { 0x050c, 0x66050c },
+ { 0x0510, 0x660510 },
+ { 0x0514, 0x660514 },
+ { 0x0518, 0x660518 },
+ { 0x051c, 0x66051c },
+ { 0x052c, 0x66052c },
+ { 0x0530, 0x660530 },
+ { 0x054c, 0x66054c },
+ { 0x0550, 0x660550 },
+ { 0x0554, 0x660554 },
+ { 0x0558, 0x660558 },
+ { 0x055c, 0x66055c },
+ {}
+ }
+};
+
+static const struct nv50_disp_chan_mthd
+gf119_disp_core_chan_mthd = {
+ .name = "Core",
+ .addr = 0x000000,
+ .prev = -0x020000,
+ .data = {
+ { "Global", 1, &gf119_disp_core_mthd_base },
+ { "DAC", 3, &gf119_disp_core_mthd_dac },
+ { "SOR", 8, &gf119_disp_core_mthd_sor },
+ { "PIOR", 4, &gf119_disp_core_mthd_pior },
+ { "HEAD", 4, &gf119_disp_core_mthd_head },
+ {}
+ }
+};
+
+static void
+gf119_disp_core_fini(struct nv50_disp_dmac *chan)
+{
+ struct nv50_disp *disp = chan->base.root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+
+ /* deactivate channel */
+ nvkm_mask(device, 0x610490, 0x00000010, 0x00000000);
+ nvkm_mask(device, 0x610490, 0x00000003, 0x00000000);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610490) & 0x001e0000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "core fini: %08x\n",
+ nvkm_rd32(device, 0x610490));
+ }
+
+ /* disable error reporting and completion notification */
+ nvkm_mask(device, 0x610090, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000000);
+}
+
+static int
+gf119_disp_core_init(struct nv50_disp_dmac *chan)
+{
+ struct nv50_disp *disp = chan->base.root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+
+ /* enable error reporting */
+ nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000001);
+
+ /* initialise channel for dma command submission */
+ nvkm_wr32(device, 0x610494, chan->push);
+ nvkm_wr32(device, 0x610498, 0x00010000);
+ nvkm_wr32(device, 0x61049c, 0x00000001);
+ nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000, 0x00000000);
+ nvkm_wr32(device, 0x610490, 0x01000013);
+
+ /* wait for it to go inactive */
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610490) & 0x80000000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "core init: %08x\n",
+ nvkm_rd32(device, 0x610490));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+const struct nv50_disp_dmac_func
+gf119_disp_core_func = {
+ .init = gf119_disp_core_init,
+ .fini = gf119_disp_core_fini,
+ .bind = gf119_disp_dmac_bind,
+};
+
+const struct nv50_disp_dmac_oclass
+gf119_disp_core_oclass = {
+ .base.oclass = GF110_DISP_CORE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_core_new,
+ .func = &gf119_disp_core_func,
+ .mthd = &gf119_disp_core_chan_mthd,
+ .chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c
new file mode 100644
index 000000000000..088ab222e823
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+gk104_disp_core_mthd_head = {
+ .mthd = 0x0300,
+ .addr = 0x000300,
+ .data = {
+ { 0x0400, 0x660400 },
+ { 0x0404, 0x660404 },
+ { 0x0408, 0x660408 },
+ { 0x040c, 0x66040c },
+ { 0x0410, 0x660410 },
+ { 0x0414, 0x660414 },
+ { 0x0418, 0x660418 },
+ { 0x041c, 0x66041c },
+ { 0x0420, 0x660420 },
+ { 0x0424, 0x660424 },
+ { 0x0428, 0x660428 },
+ { 0x042c, 0x66042c },
+ { 0x0430, 0x660430 },
+ { 0x0434, 0x660434 },
+ { 0x0438, 0x660438 },
+ { 0x0440, 0x660440 },
+ { 0x0444, 0x660444 },
+ { 0x0448, 0x660448 },
+ { 0x044c, 0x66044c },
+ { 0x0450, 0x660450 },
+ { 0x0454, 0x660454 },
+ { 0x0458, 0x660458 },
+ { 0x045c, 0x66045c },
+ { 0x0460, 0x660460 },
+ { 0x0468, 0x660468 },
+ { 0x046c, 0x66046c },
+ { 0x0470, 0x660470 },
+ { 0x0474, 0x660474 },
+ { 0x047c, 0x66047c },
+ { 0x0480, 0x660480 },
+ { 0x0484, 0x660484 },
+ { 0x0488, 0x660488 },
+ { 0x048c, 0x66048c },
+ { 0x0490, 0x660490 },
+ { 0x0494, 0x660494 },
+ { 0x0498, 0x660498 },
+ { 0x04a0, 0x6604a0 },
+ { 0x04b0, 0x6604b0 },
+ { 0x04b8, 0x6604b8 },
+ { 0x04bc, 0x6604bc },
+ { 0x04c0, 0x6604c0 },
+ { 0x04c4, 0x6604c4 },
+ { 0x04c8, 0x6604c8 },
+ { 0x04d0, 0x6604d0 },
+ { 0x04d4, 0x6604d4 },
+ { 0x04e0, 0x6604e0 },
+ { 0x04e4, 0x6604e4 },
+ { 0x04e8, 0x6604e8 },
+ { 0x04ec, 0x6604ec },
+ { 0x04f0, 0x6604f0 },
+ { 0x04f4, 0x6604f4 },
+ { 0x04f8, 0x6604f8 },
+ { 0x04fc, 0x6604fc },
+ { 0x0500, 0x660500 },
+ { 0x0504, 0x660504 },
+ { 0x0508, 0x660508 },
+ { 0x050c, 0x66050c },
+ { 0x0510, 0x660510 },
+ { 0x0514, 0x660514 },
+ { 0x0518, 0x660518 },
+ { 0x051c, 0x66051c },
+ { 0x0520, 0x660520 },
+ { 0x0524, 0x660524 },
+ { 0x052c, 0x66052c },
+ { 0x0530, 0x660530 },
+ { 0x054c, 0x66054c },
+ { 0x0550, 0x660550 },
+ { 0x0554, 0x660554 },
+ { 0x0558, 0x660558 },
+ { 0x055c, 0x66055c },
+ {}
+ }
+};
+
+const struct nv50_disp_chan_mthd
+gk104_disp_core_chan_mthd = {
+ .name = "Core",
+ .addr = 0x000000,
+ .prev = -0x020000,
+ .data = {
+ { "Global", 1, &gf119_disp_core_mthd_base },
+ { "DAC", 3, &gf119_disp_core_mthd_dac },
+ { "SOR", 8, &gf119_disp_core_mthd_sor },
+ { "PIOR", 4, &gf119_disp_core_mthd_pior },
+ { "HEAD", 4, &gk104_disp_core_mthd_head },
+ {}
+ }
+};
+
+const struct nv50_disp_dmac_oclass
+gk104_disp_core_oclass = {
+ .base.oclass = GK104_DISP_CORE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_core_new,
+ .func = &gf119_disp_core_func,
+ .mthd = &gk104_disp_core_chan_mthd,
+ .chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c
new file mode 100644
index 000000000000..df0f45c20108
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gk110_disp_core_oclass = {
+ .base.oclass = GK110_DISP_CORE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_core_new,
+ .func = &gf119_disp_core_func,
+ .mthd = &gk104_disp_core_chan_mthd,
+ .chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c
new file mode 100644
index 000000000000..9e27f8fd98b6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gm107_disp_core_oclass = {
+ .base.oclass = GM107_DISP_CORE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_core_new,
+ .func = &gf119_disp_core_func,
+ .mthd = &gk104_disp_core_chan_mthd,
+ .chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c
new file mode 100644
index 000000000000..222f4a822f4d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gm204_disp_core_oclass = {
+ .base.oclass = GM204_DISP_CORE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_core_new,
+ .func = &gf119_disp_core_func,
+ .mthd = &gk104_disp_core_chan_mthd,
+ .chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c
new file mode 100644
index 000000000000..b234547708fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gt200_disp_core_oclass = {
+ .base.oclass = GT200_DISP_CORE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_core_new,
+ .func = &nv50_disp_core_func,
+ .mthd = &g84_disp_core_chan_mthd,
+ .chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c
new file mode 100644
index 000000000000..8f5ba2018975
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gt215_disp_core_oclass = {
+ .base.oclass = GT214_DISP_CORE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_core_new,
+ .func = &nv50_disp_core_func,
+ .mthd = &g94_disp_core_chan_mthd,
+ .chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
new file mode 100644
index 000000000000..db4a9b3e0e09
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_core_new(const struct nv50_disp_dmac_func *func,
+ const struct nv50_disp_chan_mthd *mthd,
+ struct nv50_disp_root *root, int chid,
+ const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ union {
+ struct nv50_disp_core_channel_dma_v0 v0;
+ } *args = data;
+ struct nvkm_object *parent = oclass->parent;
+ u64 push;
+ int ret;
+
+ nvif_ioctl(parent, "create disp core channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(parent, "create disp core channel dma vers %d "
+ "pushbuf %016llx\n",
+ args->v0.version, args->v0.pushbuf);
+ push = args->v0.pushbuf;
+ } else
+ return ret;
+
+ return nv50_disp_dmac_new_(func, mthd, root, chid, 0,
+ push, oclass, pobject);
+}
+
+const struct nv50_disp_mthd_list
+nv50_disp_core_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x610bb8 },
+ { 0x0088, 0x610b9c },
+ { 0x008c, 0x000000 },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_list
+nv50_disp_core_mthd_dac = {
+ .mthd = 0x0080,
+ .addr = 0x000008,
+ .data = {
+ { 0x0400, 0x610b58 },
+ { 0x0404, 0x610bdc },
+ { 0x0420, 0x610828 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+nv50_disp_core_mthd_sor = {
+ .mthd = 0x0040,
+ .addr = 0x000008,
+ .data = {
+ { 0x0600, 0x610b70 },
+ {}
+ }
+};
+
+const struct nv50_disp_mthd_list
+nv50_disp_core_mthd_pior = {
+ .mthd = 0x0040,
+ .addr = 0x000008,
+ .data = {
+ { 0x0700, 0x610b80 },
+ {}
+ }
+};
+
+static const struct nv50_disp_mthd_list
+nv50_disp_core_mthd_head = {
+ .mthd = 0x0400,
+ .addr = 0x000540,
+ .data = {
+ { 0x0800, 0x610ad8 },
+ { 0x0804, 0x610ad0 },
+ { 0x0808, 0x610a48 },
+ { 0x080c, 0x610a78 },
+ { 0x0810, 0x610ac0 },
+ { 0x0814, 0x610af8 },
+ { 0x0818, 0x610b00 },
+ { 0x081c, 0x610ae8 },
+ { 0x0820, 0x610af0 },
+ { 0x0824, 0x610b08 },
+ { 0x0828, 0x610b10 },
+ { 0x082c, 0x610a68 },
+ { 0x0830, 0x610a60 },
+ { 0x0834, 0x000000 },
+ { 0x0838, 0x610a40 },
+ { 0x0840, 0x610a24 },
+ { 0x0844, 0x610a2c },
+ { 0x0848, 0x610aa8 },
+ { 0x084c, 0x610ab0 },
+ { 0x0860, 0x610a84 },
+ { 0x0864, 0x610a90 },
+ { 0x0868, 0x610b18 },
+ { 0x086c, 0x610b20 },
+ { 0x0870, 0x610ac8 },
+ { 0x0874, 0x610a38 },
+ { 0x0880, 0x610a58 },
+ { 0x0884, 0x610a9c },
+ { 0x08a0, 0x610a70 },
+ { 0x08a4, 0x610a50 },
+ { 0x08a8, 0x610ae0 },
+ { 0x08c0, 0x610b28 },
+ { 0x08c4, 0x610b30 },
+ { 0x08c8, 0x610b40 },
+ { 0x08d4, 0x610b38 },
+ { 0x08d8, 0x610b48 },
+ { 0x08dc, 0x610b50 },
+ { 0x0900, 0x610a18 },
+ { 0x0904, 0x610ab8 },
+ {}
+ }
+};
+
+static const struct nv50_disp_chan_mthd
+nv50_disp_core_chan_mthd = {
+ .name = "Core",
+ .addr = 0x000000,
+ .prev = 0x000004,
+ .data = {
+ { "Global", 1, &nv50_disp_core_mthd_base },
+ { "DAC", 3, &nv50_disp_core_mthd_dac },
+ { "SOR", 2, &nv50_disp_core_mthd_sor },
+ { "PIOR", 3, &nv50_disp_core_mthd_pior },
+ { "HEAD", 2, &nv50_disp_core_mthd_head },
+ {}
+ }
+};
+
+static void
+nv50_disp_core_fini(struct nv50_disp_dmac *chan)
+{
+ struct nv50_disp *disp = chan->base.root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+
+ /* deactivate channel */
+ nvkm_mask(device, 0x610200, 0x00000010, 0x00000000);
+ nvkm_mask(device, 0x610200, 0x00000003, 0x00000000);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610200) & 0x001e0000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "core fini: %08x\n",
+ nvkm_rd32(device, 0x610200));
+ }
+
+ /* disable error reporting and completion notifications */
+ nvkm_mask(device, 0x610028, 0x00010001, 0x00000000);
+}
+
+static int
+nv50_disp_core_init(struct nv50_disp_dmac *chan)
+{
+ struct nv50_disp *disp = chan->base.root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+
+ /* enable error reporting */
+ nvkm_mask(device, 0x610028, 0x00010000, 0x00010000);
+
+ /* attempt to unstick channel from some unknown state */
+ if ((nvkm_rd32(device, 0x610200) & 0x009f0000) == 0x00020000)
+ nvkm_mask(device, 0x610200, 0x00800000, 0x00800000);
+ if ((nvkm_rd32(device, 0x610200) & 0x003f0000) == 0x00030000)
+ nvkm_mask(device, 0x610200, 0x00600000, 0x00600000);
+
+ /* initialise channel for dma command submission */
+ nvkm_wr32(device, 0x610204, chan->push);
+ nvkm_wr32(device, 0x610208, 0x00010000);
+ nvkm_wr32(device, 0x61020c, 0x00000000);
+ nvkm_mask(device, 0x610200, 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000, 0x00000000);
+ nvkm_wr32(device, 0x610200, 0x01000013);
+
+ /* wait for it to go inactive */
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610200) & 0x80000000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "core init: %08x\n",
+ nvkm_rd32(device, 0x610200));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+const struct nv50_disp_dmac_func
+nv50_disp_core_func = {
+ .init = nv50_disp_core_init,
+ .fini = nv50_disp_core_fini,
+ .bind = nv50_disp_dmac_bind,
+};
+
+const struct nv50_disp_dmac_oclass
+nv50_disp_core_oclass = {
+ .base.oclass = NV50_DISP_CORE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_core_new,
+ .func = &nv50_disp_core_func,
+ .mthd = &nv50_disp_core_chan_mthd,
+ .chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
new file mode 100644
index 000000000000..dd99fc7060b1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+g84_disp_curs_oclass = {
+ .base.oclass = G82_DISP_CURSOR,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_curs_new,
+ .func = &nv50_disp_pioc_func,
+ .chid = 7,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
index f042e7d8321d..2a1574e06ad6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
@@ -21,17 +21,17 @@
*
* Authors: Ben Skeggs
*/
-#include "nv04.h"
+#include "channv50.h"
+#include "rootnv50.h"
-struct nvkm_oclass *
-g94_mc_oclass = &(struct nvkm_mc_oclass) {
- .base.handle = NV_SUBDEV(MC, 0x94),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_mc_ctor,
- .dtor = _nvkm_mc_dtor,
- .init = nv50_mc_init,
- .fini = _nvkm_mc_fini,
- },
- .intr = nv50_mc_intr,
- .msi_rearm = nv40_mc_msi_rearm,
-}.base;
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gf119_disp_curs_oclass = {
+ .base.oclass = GF110_DISP_CURSOR,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_curs_new,
+ .func = &gf119_disp_pioc_func,
+ .chid = 13,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
index 8d2a8f457778..28e8f06c9472 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
@@ -21,18 +21,17 @@
*
* Authors: Ben Skeggs
*/
-#include "nv04.h"
+#include "channv50.h"
+#include "rootnv50.h"
-struct nvkm_oclass *
-gf106_mc_oclass = &(struct nvkm_mc_oclass) {
- .base.handle = NV_SUBDEV(MC, 0xc3),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_mc_ctor,
- .dtor = _nvkm_mc_dtor,
- .init = nv50_mc_init,
- .fini = _nvkm_mc_fini,
- },
- .intr = gf100_mc_intr,
- .msi_rearm = nv40_mc_msi_rearm,
- .unk260 = gf100_mc_unk260,
-}.base;
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gk104_disp_curs_oclass = {
+ .base.oclass = GK104_DISP_CURSOR,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_curs_new,
+ .func = &gf119_disp_pioc_func,
+ .chid = 13,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
new file mode 100644
index 000000000000..d8a4b9ca139c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gt215_disp_curs_oclass = {
+ .base.oclass = GT214_DISP_CURSOR,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_curs_new,
+ .func = &nv50_disp_pioc_func,
+ .chid = 7,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
new file mode 100644
index 000000000000..225858e62cf6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
+ const struct nv50_disp_chan_mthd *mthd,
+ struct nv50_disp_root *root, int chid,
+ const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ union {
+ struct nv50_disp_cursor_v0 v0;
+ } *args = data;
+ struct nvkm_object *parent = oclass->parent;
+ struct nv50_disp *disp = root->disp;
+ int head, ret;
+
+ nvif_ioctl(parent, "create disp cursor size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(parent, "create disp cursor vers %d head %d\n",
+ args->v0.version, args->v0.head);
+ if (args->v0.head > disp->base.head.nr)
+ return -EINVAL;
+ head = args->v0.head;
+ } else
+ return ret;
+
+ return nv50_disp_chan_new_(func, mthd, root, chid + head,
+ head, oclass, pobject);
+}
+
+const struct nv50_disp_pioc_oclass
+nv50_disp_curs_oclass = {
+ .base.oclass = NV50_DISP_CURSOR,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_curs_new,
+ .func = &nv50_disp_pioc_func,
+ .chid = 7,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
index 0f7d1ec4d37e..9bfa9e7dc161 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
@@ -33,6 +33,7 @@
int
nv50_dac_power(NV50_DISP_MTHD_V1)
{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
const u32 doff = outp->or * 0x800;
union {
struct nv50_disp_dac_pwr_v0 v0;
@@ -40,12 +41,12 @@ nv50_dac_power(NV50_DISP_MTHD_V1)
u32 stat;
int ret;
- nv_ioctl(object, "disp dac pwr size %d\n", size);
+ nvif_ioctl(object, "disp dac pwr size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "disp dac pwr vers %d state %d data %d "
- "vsync %d hsync %d\n",
- args->v0.version, args->v0.state, args->v0.data,
- args->v0.vsync, args->v0.hsync);
+ nvif_ioctl(object, "disp dac pwr vers %d state %d data %d "
+ "vsync %d hsync %d\n",
+ args->v0.version, args->v0.state, args->v0.data,
+ args->v0.vsync, args->v0.hsync);
stat = 0x00000040 * !args->v0.state;
stat |= 0x00000010 * !args->v0.data;
stat |= 0x00000004 * !args->v0.vsync;
@@ -53,15 +54,23 @@ nv50_dac_power(NV50_DISP_MTHD_V1)
} else
return ret;
- nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
- nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
- nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
+ break;
+ );
+ nvkm_mask(device, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
+ break;
+ );
return 0;
}
int
nv50_dac_sense(NV50_DISP_MTHD_V1)
{
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
union {
struct nv50_disp_dac_load_v0 v0;
} *args = data;
@@ -69,31 +78,49 @@ nv50_dac_sense(NV50_DISP_MTHD_V1)
u32 loadval;
int ret;
- nv_ioctl(object, "disp dac load size %d\n", size);
+ nvif_ioctl(object, "disp dac load size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "disp dac load vers %d data %08x\n",
- args->v0.version, args->v0.data);
+ nvif_ioctl(object, "disp dac load vers %d data %08x\n",
+ args->v0.version, args->v0.data);
if (args->v0.data & 0xfff00000)
return -EINVAL;
loadval = args->v0.data;
} else
return ret;
- nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
- nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+ nvkm_mask(device, 0x61a004 + doff, 0x807f0000, 0x80150000);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
+ break;
+ );
- nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
+ nvkm_wr32(device, 0x61a00c + doff, 0x00100000 | loadval);
mdelay(9);
udelay(500);
- loadval = nv_mask(priv, 0x61a00c + doff, 0xffffffff, 0x00000000);
+ loadval = nvkm_mask(device, 0x61a00c + doff, 0xffffffff, 0x00000000);
- nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
- nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+ nvkm_mask(device, 0x61a004 + doff, 0x807f0000, 0x80550000);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
+ break;
+ );
- nv_debug(priv, "DAC%d sense: 0x%08x\n", outp->or, loadval);
+ nvkm_debug(subdev, "DAC%d sense: %08x\n", outp->or, loadval);
if (!(loadval & 0x80000000))
return -ETIMEDOUT;
args->v0.load = (loadval & 0x38000000) >> 27;
return 0;
}
+
+static const struct nvkm_output_func
+nv50_dac_output_func = {
+};
+
+int
+nv50_dac_output_new(struct nvkm_disp *disp, int index,
+ struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+ return nvkm_output_new_(&nv50_dac_output_func, disp,
+ index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
new file mode 100644
index 000000000000..876b14549a58
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/ramht.h>
+#include <subdev/timer.h>
+
+int
+gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
+ struct nvkm_object *object, u32 handle)
+{
+ return nvkm_ramht_insert(chan->base.root->ramht, object,
+ chan->base.chid, -9, handle,
+ chan->base.chid << 27 | 0x00000001);
+}
+
+static void
+gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
+{
+ struct nv50_disp *disp = chan->base.root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int chid = chan->base.chid;
+
+ /* deactivate channel */
+ nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
+ nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x001e0000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "ch %d fini: %08x\n", chid,
+ nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ }
+
+ /* disable error reporting and completion notification */
+ nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
+ nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
+}
+
+static int
+gf119_disp_dmac_init(struct nv50_disp_dmac *chan)
+{
+ struct nv50_disp *disp = chan->base.root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int chid = chan->base.chid;
+
+ /* enable error reporting */
+ nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+ /* initialise channel for dma command submission */
+ nvkm_wr32(device, 0x610494 + (chid * 0x0010), chan->push);
+ nvkm_wr32(device, 0x610498 + (chid * 0x0010), 0x00010000);
+ nvkm_wr32(device, 0x61049c + (chid * 0x0010), 0x00000001);
+ nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
+ nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
+
+ /* wait for it to go inactive */
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "ch %d init: %08x\n", chid,
+ nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+const struct nv50_disp_dmac_func
+gf119_disp_dmac_func = {
+ .init = gf119_disp_dmac_init,
+ .fini = gf119_disp_dmac_fini,
+ .bind = gf119_disp_dmac_bind,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
new file mode 100644
index 000000000000..9c6645a357b9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+#include <core/oproxy.h>
+#include <core/ramht.h>
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <engine/dma.h>
+
+struct nv50_disp_dmac_object {
+ struct nvkm_oproxy oproxy;
+ struct nv50_disp_root *root;
+ int hash;
+};
+
+static void
+nv50_disp_dmac_child_del_(struct nvkm_oproxy *base)
+{
+ struct nv50_disp_dmac_object *object =
+ container_of(base, typeof(*object), oproxy);
+ nvkm_ramht_remove(object->root->ramht, object->hash);
+}
+
+static const struct nvkm_oproxy_func
+nv50_disp_dmac_child_func_ = {
+ .dtor[0] = nv50_disp_dmac_child_del_,
+};
+
+static int
+nv50_disp_dmac_child_new_(struct nv50_disp_chan *base,
+ const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
+ struct nv50_disp_root *root = chan->base.root;
+ struct nvkm_device *device = root->disp->base.engine.subdev.device;
+ const struct nvkm_device_oclass *sclass = oclass->priv;
+ struct nv50_disp_dmac_object *object;
+ int ret;
+
+ if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_oproxy_ctor(&nv50_disp_dmac_child_func_, oclass, &object->oproxy);
+ object->root = root;
+ *pobject = &object->oproxy.base;
+
+ ret = sclass->ctor(device, oclass, data, size, &object->oproxy.object);
+ if (ret)
+ return ret;
+
+ object->hash = chan->func->bind(chan, object->oproxy.object,
+ oclass->handle);
+ if (object->hash < 0)
+ return object->hash;
+
+ return 0;
+}
+
+static int
+nv50_disp_dmac_child_get_(struct nv50_disp_chan *base, int index,
+ struct nvkm_oclass *sclass)
+{
+ struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
+ struct nv50_disp *disp = chan->base.root->disp;
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ const struct nvkm_device_oclass *oclass = NULL;
+
+ sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ);
+ if (sclass->engine && sclass->engine->func->base.sclass) {
+ sclass->engine->func->base.sclass(sclass, index, &oclass);
+ if (oclass) {
+ sclass->priv = oclass;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void
+nv50_disp_dmac_fini_(struct nv50_disp_chan *base)
+{
+ struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
+ chan->func->fini(chan);
+}
+
+static int
+nv50_disp_dmac_init_(struct nv50_disp_chan *base)
+{
+ struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
+ return chan->func->init(chan);
+}
+
+static void *
+nv50_disp_dmac_dtor_(struct nv50_disp_chan *base)
+{
+ return nv50_disp_dmac(base);
+}
+
+static const struct nv50_disp_chan_func
+nv50_disp_dmac_func_ = {
+ .dtor = nv50_disp_dmac_dtor_,
+ .init = nv50_disp_dmac_init_,
+ .fini = nv50_disp_dmac_fini_,
+ .child_get = nv50_disp_dmac_child_get_,
+ .child_new = nv50_disp_dmac_child_new_,
+};
+
+int
+nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
+ const struct nv50_disp_chan_mthd *mthd,
+ struct nv50_disp_root *root, int chid, int head, u64 push,
+ const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_device *device = root->disp->base.engine.subdev.device;
+ struct nvkm_client *client = oclass->client;
+ struct nvkm_dmaobj *dmaobj;
+ struct nv50_disp_dmac *chan;
+ int ret;
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+ chan->func = func;
+
+ ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root,
+ chid, head, oclass, &chan->base);
+ if (ret)
+ return ret;
+
+ dmaobj = nvkm_dma_search(device->dma, client, push);
+ if (!dmaobj)
+ return -ENOENT;
+
+ if (dmaobj->limit - dmaobj->start != 0xfff)
+ return -EINVAL;
+
+ switch (dmaobj->target) {
+ case NV_MEM_TARGET_VRAM:
+ chan->push = 0x00000001 | dmaobj->start >> 8;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ chan->push = 0x00000003 | dmaobj->start >> 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+nv50_disp_dmac_bind(struct nv50_disp_dmac *chan,
+ struct nvkm_object *object, u32 handle)
+{
+ return nvkm_ramht_insert(chan->base.root->ramht, object,
+ chan->base.chid, -10, handle,
+ chan->base.chid << 28 |
+ chan->base.chid);
+}
+
+static void
+nv50_disp_dmac_fini(struct nv50_disp_dmac *chan)
+{
+ struct nv50_disp *disp = chan->base.root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int chid = chan->base.chid;
+
+ /* deactivate channel */
+ nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
+ nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid,
+ nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+ }
+
+ /* disable error reporting and completion notifications */
+ nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
+}
+
+static int
+nv50_disp_dmac_init(struct nv50_disp_dmac *chan)
+{
+ struct nv50_disp *disp = chan->base.root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int chid = chan->base.chid;
+
+ /* enable error reporting */
+ nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
+
+ /* initialise channel for dma command submission */
+ nvkm_wr32(device, 0x610204 + (chid * 0x0010), chan->push);
+ nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000);
+ nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid);
+ nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
+ nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013);
+
+ /* wait for it to go inactive */
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "ch %d init timeout, %08x\n", chid,
+ nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+const struct nv50_disp_dmac_func
+nv50_disp_dmac_func = {
+ .init = nv50_disp_dmac_init,
+ .fini = nv50_disp_dmac_fini,
+ .bind = nv50_disp_dmac_bind,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
new file mode 100644
index 000000000000..c748ca23ab70
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
@@ -0,0 +1,91 @@
+#ifndef __NV50_DISP_DMAC_H__
+#define __NV50_DISP_DMAC_H__
+#define nv50_disp_dmac(p) container_of((p), struct nv50_disp_dmac, base)
+#include "channv50.h"
+
+struct nv50_disp_dmac {
+ const struct nv50_disp_dmac_func *func;
+ struct nv50_disp_chan base;
+ u32 push;
+};
+
+struct nv50_disp_dmac_func {
+ int (*init)(struct nv50_disp_dmac *);
+ void (*fini)(struct nv50_disp_dmac *);
+ int (*bind)(struct nv50_disp_dmac *, struct nvkm_object *, u32 handle);
+};
+
+int nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *,
+ const struct nv50_disp_chan_mthd *,
+ struct nv50_disp_root *, int chid, int head, u64 push,
+ const struct nvkm_oclass *, struct nvkm_object **);
+
+extern const struct nv50_disp_dmac_func nv50_disp_dmac_func;
+int nv50_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
+extern const struct nv50_disp_dmac_func nv50_disp_core_func;
+
+extern const struct nv50_disp_dmac_func gf119_disp_dmac_func;
+int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
+extern const struct nv50_disp_dmac_func gf119_disp_core_func;
+
+struct nv50_disp_dmac_oclass {
+ int (*ctor)(const struct nv50_disp_dmac_func *,
+ const struct nv50_disp_chan_mthd *,
+ struct nv50_disp_root *, int chid,
+ const struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **);
+ struct nvkm_sclass base;
+ const struct nv50_disp_dmac_func *func;
+ const struct nv50_disp_chan_mthd *mthd;
+ int chid;
+};
+
+int nv50_disp_core_new(const struct nv50_disp_dmac_func *,
+ const struct nv50_disp_chan_mthd *,
+ struct nv50_disp_root *, int chid,
+ const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **);
+int nv50_disp_base_new(const struct nv50_disp_dmac_func *,
+ const struct nv50_disp_chan_mthd *,
+ struct nv50_disp_root *, int chid,
+ const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **);
+int nv50_disp_ovly_new(const struct nv50_disp_dmac_func *,
+ const struct nv50_disp_chan_mthd *,
+ struct nv50_disp_root *, int chid,
+ const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **);
+
+extern const struct nv50_disp_dmac_oclass nv50_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass nv50_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass nv50_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass g84_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass g84_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass g84_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass g94_disp_core_oclass;
+
+extern const struct nv50_disp_dmac_oclass gt200_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gt200_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gt200_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass gt215_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gt215_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gt215_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass gf119_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gf119_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gf119_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass gk104_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gk104_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gk104_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass gk110_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gk110_disp_base_oclass;
+
+extern const struct nv50_disp_dmac_oclass gm107_disp_core_oclass;
+
+extern const struct nv50_disp_dmac_oclass gm204_disp_core_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
index 68347661adca..74e2f7c6c07e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
@@ -48,12 +48,12 @@ struct dp_state {
static int
dp_set_link_config(struct dp_state *dp)
{
- struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
struct nvkm_output_dp *outp = dp->outp;
- struct nvkm_disp *disp = nvkm_disp(outp);
- struct nvkm_bios *bios = nvkm_bios(disp);
+ struct nvkm_disp *disp = outp->base.disp;
+ struct nvkm_subdev *subdev = &disp->engine.subdev;
+ struct nvkm_bios *bios = subdev->device->bios;
struct nvbios_init init = {
- .subdev = nv_subdev(disp),
+ .subdev = subdev,
.bios = bios,
.offset = 0x0000,
.outp = &outp->base.info,
@@ -64,33 +64,33 @@ dp_set_link_config(struct dp_state *dp)
u8 sink[2];
int ret;
- DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
+ OUTP_DBG(&outp->base, "%d lanes at %d KB/s", dp->link_nr, dp->link_bw);
/* set desired link configuration on the source */
if ((lnkcmp = dp->outp->info.lnkcmp)) {
if (outp->version < 0x30) {
- while ((dp->link_bw / 10) < nv_ro16(bios, lnkcmp))
+ while ((dp->link_bw / 10) < nvbios_rd16(bios, lnkcmp))
lnkcmp += 4;
- init.offset = nv_ro16(bios, lnkcmp + 2);
+ init.offset = nvbios_rd16(bios, lnkcmp + 2);
} else {
- while ((dp->link_bw / 27000) < nv_ro08(bios, lnkcmp))
+ while ((dp->link_bw / 27000) < nvbios_rd08(bios, lnkcmp))
lnkcmp += 3;
- init.offset = nv_ro16(bios, lnkcmp + 1);
+ init.offset = nvbios_rd16(bios, lnkcmp + 1);
}
nvbios_exec(&init);
}
- ret = impl->lnk_ctl(outp, dp->link_nr, dp->link_bw / 27000,
- outp->dpcd[DPCD_RC02] &
- DPCD_RC02_ENHANCED_FRAME_CAP);
+ ret = outp->func->lnk_ctl(outp, dp->link_nr, dp->link_bw / 27000,
+ outp->dpcd[DPCD_RC02] &
+ DPCD_RC02_ENHANCED_FRAME_CAP);
if (ret) {
if (ret < 0)
- ERR("lnk_ctl failed with %d\n", ret);
+ OUTP_ERR(&outp->base, "lnk_ctl failed with %d", ret);
return ret;
}
- impl->lnk_pwr(outp, dp->link_nr);
+ outp->func->lnk_pwr(outp, dp->link_nr);
/* set desired link configuration on the sink */
sink[0] = dp->link_bw / 27000;
@@ -98,29 +98,27 @@ dp_set_link_config(struct dp_state *dp)
if (outp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
- return nv_wraux(outp->base.edid, DPCD_LC00_LINK_BW_SET, sink, 2);
+ return nvkm_wraux(outp->aux, DPCD_LC00_LINK_BW_SET, sink, 2);
}
static void
dp_set_training_pattern(struct dp_state *dp, u8 pattern)
{
- struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
struct nvkm_output_dp *outp = dp->outp;
u8 sink_tp;
- DBG("training pattern %d\n", pattern);
- impl->pattern(outp, pattern);
+ OUTP_DBG(&outp->base, "training pattern %d", pattern);
+ outp->func->pattern(outp, pattern);
- nv_rdaux(outp->base.edid, DPCD_LC02, &sink_tp, 1);
+ nvkm_rdaux(outp->aux, DPCD_LC02, &sink_tp, 1);
sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
sink_tp |= pattern;
- nv_wraux(outp->base.edid, DPCD_LC02, &sink_tp, 1);
+ nvkm_wraux(outp->aux, DPCD_LC02, &sink_tp, 1);
}
static int
dp_link_train_commit(struct dp_state *dp, bool pc)
{
- struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
struct nvkm_output_dp *outp = dp->outp;
int ret, i;
@@ -146,16 +144,17 @@ dp_link_train_commit(struct dp_state *dp, bool pc)
dp->conf[i] = (lpre << 3) | lvsw;
dp->pc2conf[i >> 1] |= lpc2 << ((i & 1) * 4);
- DBG("config lane %d %02x %02x\n", i, dp->conf[i], lpc2);
- impl->drv_ctl(outp, i, lvsw & 3, lpre & 3, lpc2 & 3);
+ OUTP_DBG(&outp->base, "config lane %d %02x %02x",
+ i, dp->conf[i], lpc2);
+ outp->func->drv_ctl(outp, i, lvsw & 3, lpre & 3, lpc2 & 3);
}
- ret = nv_wraux(outp->base.edid, DPCD_LC03(0), dp->conf, 4);
+ ret = nvkm_wraux(outp->aux, DPCD_LC03(0), dp->conf, 4);
if (ret)
return ret;
if (pc) {
- ret = nv_wraux(outp->base.edid, DPCD_LC0F, dp->pc2conf, 2);
+ ret = nvkm_wraux(outp->aux, DPCD_LC0F, dp->pc2conf, 2);
if (ret)
return ret;
}
@@ -174,17 +173,18 @@ dp_link_train_update(struct dp_state *dp, bool pc, u32 delay)
else
udelay(delay);
- ret = nv_rdaux(outp->base.edid, DPCD_LS02, dp->stat, 6);
+ ret = nvkm_rdaux(outp->aux, DPCD_LS02, dp->stat, 6);
if (ret)
return ret;
if (pc) {
- ret = nv_rdaux(outp->base.edid, DPCD_LS0C, &dp->pc2stat, 1);
+ ret = nvkm_rdaux(outp->aux, DPCD_LS0C, &dp->pc2stat, 1);
if (ret)
dp->pc2stat = 0x00;
- DBG("status %6ph pc2 %02x\n", dp->stat, dp->pc2stat);
+ OUTP_DBG(&outp->base, "status %6ph pc2 %02x",
+ dp->stat, dp->pc2stat);
} else {
- DBG("status %6ph\n", dp->stat);
+ OUTP_DBG(&outp->base, "status %6ph", dp->stat);
}
return 0;
@@ -260,11 +260,11 @@ static void
dp_link_train_init(struct dp_state *dp, bool spread)
{
struct nvkm_output_dp *outp = dp->outp;
- struct nvkm_disp *disp = nvkm_disp(outp);
- struct nvkm_bios *bios = nvkm_bios(disp);
+ struct nvkm_disp *disp = outp->base.disp;
+ struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvbios_init init = {
- .subdev = nv_subdev(disp),
- .bios = bios,
+ .subdev = subdev,
+ .bios = subdev->device->bios,
.outp = &outp->base.info,
.crtc = -1,
.execute = 1,
@@ -286,11 +286,11 @@ static void
dp_link_train_fini(struct dp_state *dp)
{
struct nvkm_output_dp *outp = dp->outp;
- struct nvkm_disp *disp = nvkm_disp(outp);
- struct nvkm_bios *bios = nvkm_bios(disp);
+ struct nvkm_disp *disp = outp->base.disp;
+ struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvbios_init init = {
- .subdev = nv_subdev(disp),
- .bios = bios,
+ .subdev = subdev,
+ .bios = subdev->device->bios,
.outp = &outp->base.info,
.crtc = -1,
.execute = 1,
@@ -322,7 +322,7 @@ void
nvkm_dp_train(struct work_struct *w)
{
struct nvkm_output_dp *outp = container_of(w, typeof(*outp), lt.work);
- struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+ struct nv50_disp *disp = nv50_disp(outp->base.disp);
const struct dp_rates *cfg = nvkm_dp_rates;
struct dp_state _dp = {
.outp = outp,
@@ -330,11 +330,11 @@ nvkm_dp_train(struct work_struct *w)
u32 datarate = 0;
int ret;
- if (!outp->base.info.location && priv->sor.magic)
- priv->sor.magic(&outp->base);
+ if (!outp->base.info.location && disp->func->sor.magic)
+ disp->func->sor.magic(&outp->base);
/* bring capabilities within encoder limits */
- if (nv_mclass(priv) < GF110_DISP)
+ if (disp->base.engine.subdev.device->chipset < 0xd0)
outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED;
if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) {
outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT;
@@ -386,12 +386,12 @@ nvkm_dp_train(struct work_struct *w)
/* finish link training and execute post-train script from vbios */
dp_set_training_pattern(dp, 0);
if (ret < 0)
- ERR("link training failed\n");
+ OUTP_ERR(&outp->base, "link training failed");
dp_link_train_fini(dp);
/* signal completion and enable link interrupt handling */
- DBG("training complete\n");
+ OUTP_DBG(&outp->base, "training complete");
atomic_set(&outp->lt.done, 1);
wake_up(&outp->lt.wait);
nvkm_notify_get(&outp->irq);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
index a0dcf534cb20..3e3e592cd09f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
@@ -22,251 +22,34 @@
* Authors: Ben Skeggs
*/
#include "nv50.h"
-
-#include <nvif/class.h>
-
-/*******************************************************************************
- * EVO master channel object
- ******************************************************************************/
-
-const struct nv50_disp_mthd_list
-g84_disp_core_mthd_dac = {
- .mthd = 0x0080,
- .addr = 0x000008,
- .data = {
- { 0x0400, 0x610b58 },
- { 0x0404, 0x610bdc },
- { 0x0420, 0x610bc4 },
- {}
- }
-};
-
-const struct nv50_disp_mthd_list
-g84_disp_core_mthd_head = {
- .mthd = 0x0400,
- .addr = 0x000540,
- .data = {
- { 0x0800, 0x610ad8 },
- { 0x0804, 0x610ad0 },
- { 0x0808, 0x610a48 },
- { 0x080c, 0x610a78 },
- { 0x0810, 0x610ac0 },
- { 0x0814, 0x610af8 },
- { 0x0818, 0x610b00 },
- { 0x081c, 0x610ae8 },
- { 0x0820, 0x610af0 },
- { 0x0824, 0x610b08 },
- { 0x0828, 0x610b10 },
- { 0x082c, 0x610a68 },
- { 0x0830, 0x610a60 },
- { 0x0834, 0x000000 },
- { 0x0838, 0x610a40 },
- { 0x0840, 0x610a24 },
- { 0x0844, 0x610a2c },
- { 0x0848, 0x610aa8 },
- { 0x084c, 0x610ab0 },
- { 0x085c, 0x610c5c },
- { 0x0860, 0x610a84 },
- { 0x0864, 0x610a90 },
- { 0x0868, 0x610b18 },
- { 0x086c, 0x610b20 },
- { 0x0870, 0x610ac8 },
- { 0x0874, 0x610a38 },
- { 0x0878, 0x610c50 },
- { 0x0880, 0x610a58 },
- { 0x0884, 0x610a9c },
- { 0x089c, 0x610c68 },
- { 0x08a0, 0x610a70 },
- { 0x08a4, 0x610a50 },
- { 0x08a8, 0x610ae0 },
- { 0x08c0, 0x610b28 },
- { 0x08c4, 0x610b30 },
- { 0x08c8, 0x610b40 },
- { 0x08d4, 0x610b38 },
- { 0x08d8, 0x610b48 },
- { 0x08dc, 0x610b50 },
- { 0x0900, 0x610a18 },
- { 0x0904, 0x610ab8 },
- { 0x0910, 0x610c70 },
- { 0x0914, 0x610c78 },
- {}
- }
-};
-
-const struct nv50_disp_mthd_chan
-g84_disp_core_mthd_chan = {
- .name = "Core",
- .addr = 0x000000,
- .data = {
- { "Global", 1, &nv50_disp_core_mthd_base },
- { "DAC", 3, &g84_disp_core_mthd_dac },
- { "SOR", 2, &nv50_disp_core_mthd_sor },
- { "PIOR", 3, &nv50_disp_core_mthd_pior },
- { "HEAD", 2, &g84_disp_core_mthd_head },
- {}
- }
-};
-
-/*******************************************************************************
- * EVO sync channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-g84_disp_base_mthd_base = {
- .mthd = 0x0000,
- .addr = 0x000000,
- .data = {
- { 0x0080, 0x000000 },
- { 0x0084, 0x0008c4 },
- { 0x0088, 0x0008d0 },
- { 0x008c, 0x0008dc },
- { 0x0090, 0x0008e4 },
- { 0x0094, 0x610884 },
- { 0x00a0, 0x6108a0 },
- { 0x00a4, 0x610878 },
- { 0x00c0, 0x61086c },
- { 0x00c4, 0x610800 },
- { 0x00c8, 0x61080c },
- { 0x00cc, 0x610818 },
- { 0x00e0, 0x610858 },
- { 0x00e4, 0x610860 },
- { 0x00e8, 0x6108ac },
- { 0x00ec, 0x6108b4 },
- { 0x00fc, 0x610824 },
- { 0x0100, 0x610894 },
- { 0x0104, 0x61082c },
- { 0x0110, 0x6108bc },
- { 0x0114, 0x61088c },
- {}
- }
-};
-
-const struct nv50_disp_mthd_chan
-g84_disp_base_mthd_chan = {
- .name = "Base",
- .addr = 0x000540,
- .data = {
- { "Global", 1, &g84_disp_base_mthd_base },
- { "Image", 2, &nv50_disp_base_mthd_image },
- {}
- }
-};
-
-/*******************************************************************************
- * EVO overlay channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-g84_disp_ovly_mthd_base = {
- .mthd = 0x0000,
- .addr = 0x000000,
- .data = {
- { 0x0080, 0x000000 },
- { 0x0084, 0x6109a0 },
- { 0x0088, 0x6109c0 },
- { 0x008c, 0x6109c8 },
- { 0x0090, 0x6109b4 },
- { 0x0094, 0x610970 },
- { 0x00a0, 0x610998 },
- { 0x00a4, 0x610964 },
- { 0x00c0, 0x610958 },
- { 0x00e0, 0x6109a8 },
- { 0x00e4, 0x6109d0 },
- { 0x00e8, 0x6109d8 },
- { 0x0100, 0x61094c },
- { 0x0104, 0x610984 },
- { 0x0108, 0x61098c },
- { 0x0800, 0x6109f8 },
- { 0x0808, 0x610a08 },
- { 0x080c, 0x610a10 },
- { 0x0810, 0x610a00 },
- {}
- }
-};
-
-const struct nv50_disp_mthd_chan
-g84_disp_ovly_mthd_chan = {
- .name = "Overlay",
- .addr = 0x000540,
- .data = {
- { "Global", 1, &g84_disp_ovly_mthd_base },
- {}
- }
-};
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_disp_sclass[] = {
- { G82_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
- { G82_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
- { G82_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
- { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
- { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
- {}
-};
-
-static struct nvkm_oclass
-g84_disp_main_oclass[] = {
- { G82_DISP, &nv50_disp_main_ofuncs },
- {}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-g84_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+g84_disp = {
+ .intr = nv50_disp_intr,
+ .uevent = &nv50_disp_chan_uevent,
+ .super = nv50_disp_intr_supervisor,
+ .root = &g84_disp_root_oclass,
+ .head.vblank_init = nv50_disp_vblank_init,
+ .head.vblank_fini = nv50_disp_vblank_fini,
+ .head.scanoutpos = nv50_disp_root_scanoutpos,
+ .outp.internal.crt = nv50_dac_output_new,
+ .outp.internal.tmds = nv50_sor_output_new,
+ .outp.internal.lvds = nv50_sor_output_new,
+ .outp.external.tmds = nv50_pior_output_new,
+ .outp.external.dp = nv50_pior_dp_new,
+ .dac.nr = 3,
+ .dac.power = nv50_dac_power,
+ .dac.sense = nv50_dac_sense,
+ .sor.nr = 2,
+ .sor.power = nv50_sor_power,
+ .sor.hdmi = g84_hdmi_ctrl,
+ .pior.nr = 3,
+ .pior.power = nv50_pior_power,
+};
+
+int
+g84_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
{
- struct nv50_disp_priv *priv;
- int ret;
-
- ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
- "display", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
- if (ret)
- return ret;
-
- nv_engine(priv)->sclass = g84_disp_main_oclass;
- nv_engine(priv)->cclass = &nv50_disp_cclass;
- nv_subdev(priv)->intr = nv50_disp_intr;
- INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
- priv->sclass = g84_disp_sclass;
- priv->head.nr = 2;
- priv->dac.nr = 3;
- priv->sor.nr = 2;
- priv->pior.nr = 3;
- priv->dac.power = nv50_dac_power;
- priv->dac.sense = nv50_dac_sense;
- priv->sor.power = nv50_sor_power;
- priv->sor.hdmi = g84_hdmi_ctrl;
- priv->pior.power = nv50_pior_power;
- return 0;
+ return nv50_disp_new_(&g84_disp, device, index, 2, pdisp);
}
-
-struct nvkm_oclass *
-g84_disp_oclass = &(struct nv50_disp_impl) {
- .base.base.handle = NV_ENGINE(DISP, 0x82),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = g84_disp_ctor,
- .dtor = _nvkm_disp_dtor,
- .init = _nvkm_disp_init,
- .fini = _nvkm_disp_fini,
- },
- .base.vblank = &nv50_disp_vblank_func,
- .base.outp = nv50_disp_outp_sclass,
- .mthd.core = &g84_disp_core_mthd_chan,
- .mthd.base = &g84_disp_base_mthd_chan,
- .mthd.ovly = &g84_disp_ovly_mthd_chan,
- .mthd.prev = 0x000004,
- .head.scanoutpos = nv50_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
index 1ab0d0ae3cc8..7a7af3b478f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
@@ -22,118 +22,35 @@
* Authors: Ben Skeggs
*/
#include "nv50.h"
-#include "outpdp.h"
-
-#include <nvif/class.h>
-
-/*******************************************************************************
- * EVO master channel object
- ******************************************************************************/
-
-const struct nv50_disp_mthd_list
-g94_disp_core_mthd_sor = {
- .mthd = 0x0040,
- .addr = 0x000008,
- .data = {
- { 0x0600, 0x610794 },
- {}
- }
-};
-
-const struct nv50_disp_mthd_chan
-g94_disp_core_mthd_chan = {
- .name = "Core",
- .addr = 0x000000,
- .data = {
- { "Global", 1, &nv50_disp_core_mthd_base },
- { "DAC", 3, &g84_disp_core_mthd_dac },
- { "SOR", 4, &g94_disp_core_mthd_sor },
- { "PIOR", 3, &nv50_disp_core_mthd_pior },
- { "HEAD", 2, &g84_disp_core_mthd_head },
- {}
- }
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+g94_disp = {
+ .intr = nv50_disp_intr,
+ .uevent = &nv50_disp_chan_uevent,
+ .super = nv50_disp_intr_supervisor,
+ .root = &g94_disp_root_oclass,
+ .head.vblank_init = nv50_disp_vblank_init,
+ .head.vblank_fini = nv50_disp_vblank_fini,
+ .head.scanoutpos = nv50_disp_root_scanoutpos,
+ .outp.internal.crt = nv50_dac_output_new,
+ .outp.internal.tmds = nv50_sor_output_new,
+ .outp.internal.lvds = nv50_sor_output_new,
+ .outp.internal.dp = g94_sor_dp_new,
+ .outp.external.tmds = nv50_pior_output_new,
+ .outp.external.dp = nv50_pior_dp_new,
+ .dac.nr = 3,
+ .dac.power = nv50_dac_power,
+ .dac.sense = nv50_dac_sense,
+ .sor.nr = 4,
+ .sor.power = nv50_sor_power,
+ .sor.hdmi = g84_hdmi_ctrl,
+ .pior.nr = 3,
+ .pior.power = nv50_pior_power,
};
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-g94_disp_sclass[] = {
- { GT206_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
- { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
- { GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
- { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
- { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
- {}
-};
-
-static struct nvkm_oclass
-g94_disp_main_oclass[] = {
- { GT206_DISP, &nv50_disp_main_ofuncs },
- {}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-g94_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+g94_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
{
- struct nv50_disp_priv *priv;
- int ret;
-
- ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
- "display", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
- if (ret)
- return ret;
-
- nv_engine(priv)->sclass = g94_disp_main_oclass;
- nv_engine(priv)->cclass = &nv50_disp_cclass;
- nv_subdev(priv)->intr = nv50_disp_intr;
- INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
- priv->sclass = g94_disp_sclass;
- priv->head.nr = 2;
- priv->dac.nr = 3;
- priv->sor.nr = 4;
- priv->pior.nr = 3;
- priv->dac.power = nv50_dac_power;
- priv->dac.sense = nv50_dac_sense;
- priv->sor.power = nv50_sor_power;
- priv->sor.hdmi = g84_hdmi_ctrl;
- priv->pior.power = nv50_pior_power;
- return 0;
+ return nv50_disp_new_(&g94_disp, device, index, 2, pdisp);
}
-
-struct nvkm_oclass *
-g94_disp_outp_sclass[] = {
- &nv50_pior_dp_impl.base.base,
- &g94_sor_dp_impl.base.base,
- NULL
-};
-
-struct nvkm_oclass *
-g94_disp_oclass = &(struct nv50_disp_impl) {
- .base.base.handle = NV_ENGINE(DISP, 0x88),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = g94_disp_ctor,
- .dtor = _nvkm_disp_dtor,
- .init = _nvkm_disp_init,
- .fini = _nvkm_disp_fini,
- },
- .base.vblank = &nv50_disp_vblank_func,
- .base.outp = g94_disp_outp_sclass,
- .mthd.core = &g94_disp_core_mthd_chan,
- .mthd.base = &g84_disp_base_mthd_chan,
- .mthd.ovly = &g84_disp_ovly_mthd_chan,
- .mthd.prev = 0x000004,
- .head.scanoutpos = nv50_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
deleted file mode 100644
index 9ef6728c528d..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
+++ /dev/null
@@ -1,1310 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv50.h"
-#include "outp.h"
-#include "outpdp.h"
-
-#include <core/client.h>
-#include <core/gpuobj.h>
-#include <core/ramht.h>
-#include <subdev/bios.h>
-#include <subdev/bios/dcb.h>
-#include <subdev/bios/disp.h>
-#include <subdev/bios/init.h>
-#include <subdev/bios/pll.h>
-#include <subdev/devinit.h>
-#include <subdev/timer.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-/*******************************************************************************
- * EVO channel base class
- ******************************************************************************/
-
-static void
-gf110_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
-{
- struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
- nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000000 << index);
- nv_wr32(priv, 0x61008c, 0x00000001 << index);
-}
-
-static void
-gf110_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
-{
- struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
- nv_wr32(priv, 0x61008c, 0x00000001 << index);
- nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000001 << index);
-}
-
-const struct nvkm_event_func
-gf110_disp_chan_uevent = {
- .ctor = nv50_disp_chan_uevent_ctor,
- .init = gf110_disp_chan_uevent_init,
- .fini = gf110_disp_chan_uevent_fini,
-};
-
-/*******************************************************************************
- * EVO DMA channel base class
- ******************************************************************************/
-
-static int
-gf110_disp_dmac_object_attach(struct nvkm_object *parent,
- struct nvkm_object *object, u32 name)
-{
- struct nv50_disp_base *base = (void *)parent->parent;
- struct nv50_disp_chan *chan = (void *)parent;
- u32 addr = nv_gpuobj(object)->node->offset;
- u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
- return nvkm_ramht_insert(base->ramht, chan->chid, name, data);
-}
-
-static void
-gf110_disp_dmac_object_detach(struct nvkm_object *parent, int cookie)
-{
- struct nv50_disp_base *base = (void *)parent->parent;
- nvkm_ramht_remove(base->ramht, cookie);
-}
-
-static int
-gf110_disp_dmac_init(struct nvkm_object *object)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv50_disp_dmac *dmac = (void *)object;
- int chid = dmac->base.chid;
- int ret;
-
- ret = nv50_disp_chan_init(&dmac->base);
- if (ret)
- return ret;
-
- /* enable error reporting */
- nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
-
- /* initialise channel for dma command submission */
- nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
- nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
- nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
- nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
- nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
- nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
-
- /* wait for it to go inactive */
- if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
- nv_error(dmac, "init: 0x%08x\n",
- nv_rd32(priv, 0x610490 + (chid * 0x10)));
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int
-gf110_disp_dmac_fini(struct nvkm_object *object, bool suspend)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv50_disp_dmac *dmac = (void *)object;
- int chid = dmac->base.chid;
-
- /* deactivate channel */
- nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
- nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
- if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
- nv_error(dmac, "fini: 0x%08x\n",
- nv_rd32(priv, 0x610490 + (chid * 0x10)));
- if (suspend)
- return -EBUSY;
- }
-
- /* disable error reporting and completion notification */
- nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
- nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
-
- return nv50_disp_chan_fini(&dmac->base, suspend);
-}
-
-/*******************************************************************************
- * EVO master channel object
- ******************************************************************************/
-
-const struct nv50_disp_mthd_list
-gf110_disp_core_mthd_base = {
- .mthd = 0x0000,
- .addr = 0x000000,
- .data = {
- { 0x0080, 0x660080 },
- { 0x0084, 0x660084 },
- { 0x0088, 0x660088 },
- { 0x008c, 0x000000 },
- {}
- }
-};
-
-const struct nv50_disp_mthd_list
-gf110_disp_core_mthd_dac = {
- .mthd = 0x0020,
- .addr = 0x000020,
- .data = {
- { 0x0180, 0x660180 },
- { 0x0184, 0x660184 },
- { 0x0188, 0x660188 },
- { 0x0190, 0x660190 },
- {}
- }
-};
-
-const struct nv50_disp_mthd_list
-gf110_disp_core_mthd_sor = {
- .mthd = 0x0020,
- .addr = 0x000020,
- .data = {
- { 0x0200, 0x660200 },
- { 0x0204, 0x660204 },
- { 0x0208, 0x660208 },
- { 0x0210, 0x660210 },
- {}
- }
-};
-
-const struct nv50_disp_mthd_list
-gf110_disp_core_mthd_pior = {
- .mthd = 0x0020,
- .addr = 0x000020,
- .data = {
- { 0x0300, 0x660300 },
- { 0x0304, 0x660304 },
- { 0x0308, 0x660308 },
- { 0x0310, 0x660310 },
- {}
- }
-};
-
-static const struct nv50_disp_mthd_list
-gf110_disp_core_mthd_head = {
- .mthd = 0x0300,
- .addr = 0x000300,
- .data = {
- { 0x0400, 0x660400 },
- { 0x0404, 0x660404 },
- { 0x0408, 0x660408 },
- { 0x040c, 0x66040c },
- { 0x0410, 0x660410 },
- { 0x0414, 0x660414 },
- { 0x0418, 0x660418 },
- { 0x041c, 0x66041c },
- { 0x0420, 0x660420 },
- { 0x0424, 0x660424 },
- { 0x0428, 0x660428 },
- { 0x042c, 0x66042c },
- { 0x0430, 0x660430 },
- { 0x0434, 0x660434 },
- { 0x0438, 0x660438 },
- { 0x0440, 0x660440 },
- { 0x0444, 0x660444 },
- { 0x0448, 0x660448 },
- { 0x044c, 0x66044c },
- { 0x0450, 0x660450 },
- { 0x0454, 0x660454 },
- { 0x0458, 0x660458 },
- { 0x045c, 0x66045c },
- { 0x0460, 0x660460 },
- { 0x0468, 0x660468 },
- { 0x046c, 0x66046c },
- { 0x0470, 0x660470 },
- { 0x0474, 0x660474 },
- { 0x0480, 0x660480 },
- { 0x0484, 0x660484 },
- { 0x048c, 0x66048c },
- { 0x0490, 0x660490 },
- { 0x0494, 0x660494 },
- { 0x0498, 0x660498 },
- { 0x04b0, 0x6604b0 },
- { 0x04b8, 0x6604b8 },
- { 0x04bc, 0x6604bc },
- { 0x04c0, 0x6604c0 },
- { 0x04c4, 0x6604c4 },
- { 0x04c8, 0x6604c8 },
- { 0x04d0, 0x6604d0 },
- { 0x04d4, 0x6604d4 },
- { 0x04e0, 0x6604e0 },
- { 0x04e4, 0x6604e4 },
- { 0x04e8, 0x6604e8 },
- { 0x04ec, 0x6604ec },
- { 0x04f0, 0x6604f0 },
- { 0x04f4, 0x6604f4 },
- { 0x04f8, 0x6604f8 },
- { 0x04fc, 0x6604fc },
- { 0x0500, 0x660500 },
- { 0x0504, 0x660504 },
- { 0x0508, 0x660508 },
- { 0x050c, 0x66050c },
- { 0x0510, 0x660510 },
- { 0x0514, 0x660514 },
- { 0x0518, 0x660518 },
- { 0x051c, 0x66051c },
- { 0x052c, 0x66052c },
- { 0x0530, 0x660530 },
- { 0x054c, 0x66054c },
- { 0x0550, 0x660550 },
- { 0x0554, 0x660554 },
- { 0x0558, 0x660558 },
- { 0x055c, 0x66055c },
- {}
- }
-};
-
-static const struct nv50_disp_mthd_chan
-gf110_disp_core_mthd_chan = {
- .name = "Core",
- .addr = 0x000000,
- .data = {
- { "Global", 1, &gf110_disp_core_mthd_base },
- { "DAC", 3, &gf110_disp_core_mthd_dac },
- { "SOR", 8, &gf110_disp_core_mthd_sor },
- { "PIOR", 4, &gf110_disp_core_mthd_pior },
- { "HEAD", 4, &gf110_disp_core_mthd_head },
- {}
- }
-};
-
-static int
-gf110_disp_core_init(struct nvkm_object *object)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv50_disp_dmac *mast = (void *)object;
- int ret;
-
- ret = nv50_disp_chan_init(&mast->base);
- if (ret)
- return ret;
-
- /* enable error reporting */
- nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
-
- /* initialise channel for dma command submission */
- nv_wr32(priv, 0x610494, mast->push);
- nv_wr32(priv, 0x610498, 0x00010000);
- nv_wr32(priv, 0x61049c, 0x00000001);
- nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
- nv_wr32(priv, 0x640000, 0x00000000);
- nv_wr32(priv, 0x610490, 0x01000013);
-
- /* wait for it to go inactive */
- if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
- nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int
-gf110_disp_core_fini(struct nvkm_object *object, bool suspend)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv50_disp_dmac *mast = (void *)object;
-
- /* deactivate channel */
- nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
- nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
- if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
- nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
- if (suspend)
- return -EBUSY;
- }
-
- /* disable error reporting and completion notification */
- nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
- nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
-
- return nv50_disp_chan_fini(&mast->base, suspend);
-}
-
-struct nv50_disp_chan_impl
-gf110_disp_core_ofuncs = {
- .base.ctor = nv50_disp_core_ctor,
- .base.dtor = nv50_disp_dmac_dtor,
- .base.init = gf110_disp_core_init,
- .base.fini = gf110_disp_core_fini,
- .base.ntfy = nv50_disp_chan_ntfy,
- .base.map = nv50_disp_chan_map,
- .base.rd32 = nv50_disp_chan_rd32,
- .base.wr32 = nv50_disp_chan_wr32,
- .chid = 0,
- .attach = gf110_disp_dmac_object_attach,
- .detach = gf110_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO sync channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-gf110_disp_base_mthd_base = {
- .mthd = 0x0000,
- .addr = 0x000000,
- .data = {
- { 0x0080, 0x661080 },
- { 0x0084, 0x661084 },
- { 0x0088, 0x661088 },
- { 0x008c, 0x66108c },
- { 0x0090, 0x661090 },
- { 0x0094, 0x661094 },
- { 0x00a0, 0x6610a0 },
- { 0x00a4, 0x6610a4 },
- { 0x00c0, 0x6610c0 },
- { 0x00c4, 0x6610c4 },
- { 0x00c8, 0x6610c8 },
- { 0x00cc, 0x6610cc },
- { 0x00e0, 0x6610e0 },
- { 0x00e4, 0x6610e4 },
- { 0x00e8, 0x6610e8 },
- { 0x00ec, 0x6610ec },
- { 0x00fc, 0x6610fc },
- { 0x0100, 0x661100 },
- { 0x0104, 0x661104 },
- { 0x0108, 0x661108 },
- { 0x010c, 0x66110c },
- { 0x0110, 0x661110 },
- { 0x0114, 0x661114 },
- { 0x0118, 0x661118 },
- { 0x011c, 0x66111c },
- { 0x0130, 0x661130 },
- { 0x0134, 0x661134 },
- { 0x0138, 0x661138 },
- { 0x013c, 0x66113c },
- { 0x0140, 0x661140 },
- { 0x0144, 0x661144 },
- { 0x0148, 0x661148 },
- { 0x014c, 0x66114c },
- { 0x0150, 0x661150 },
- { 0x0154, 0x661154 },
- { 0x0158, 0x661158 },
- { 0x015c, 0x66115c },
- { 0x0160, 0x661160 },
- { 0x0164, 0x661164 },
- { 0x0168, 0x661168 },
- { 0x016c, 0x66116c },
- {}
- }
-};
-
-static const struct nv50_disp_mthd_list
-gf110_disp_base_mthd_image = {
- .mthd = 0x0020,
- .addr = 0x000020,
- .data = {
- { 0x0400, 0x661400 },
- { 0x0404, 0x661404 },
- { 0x0408, 0x661408 },
- { 0x040c, 0x66140c },
- { 0x0410, 0x661410 },
- {}
- }
-};
-
-const struct nv50_disp_mthd_chan
-gf110_disp_base_mthd_chan = {
- .name = "Base",
- .addr = 0x001000,
- .data = {
- { "Global", 1, &gf110_disp_base_mthd_base },
- { "Image", 2, &gf110_disp_base_mthd_image },
- {}
- }
-};
-
-struct nv50_disp_chan_impl
-gf110_disp_base_ofuncs = {
- .base.ctor = nv50_disp_base_ctor,
- .base.dtor = nv50_disp_dmac_dtor,
- .base.init = gf110_disp_dmac_init,
- .base.fini = gf110_disp_dmac_fini,
- .base.ntfy = nv50_disp_chan_ntfy,
- .base.map = nv50_disp_chan_map,
- .base.rd32 = nv50_disp_chan_rd32,
- .base.wr32 = nv50_disp_chan_wr32,
- .chid = 1,
- .attach = gf110_disp_dmac_object_attach,
- .detach = gf110_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO overlay channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-gf110_disp_ovly_mthd_base = {
- .mthd = 0x0000,
- .data = {
- { 0x0080, 0x665080 },
- { 0x0084, 0x665084 },
- { 0x0088, 0x665088 },
- { 0x008c, 0x66508c },
- { 0x0090, 0x665090 },
- { 0x0094, 0x665094 },
- { 0x00a0, 0x6650a0 },
- { 0x00a4, 0x6650a4 },
- { 0x00b0, 0x6650b0 },
- { 0x00b4, 0x6650b4 },
- { 0x00b8, 0x6650b8 },
- { 0x00c0, 0x6650c0 },
- { 0x00e0, 0x6650e0 },
- { 0x00e4, 0x6650e4 },
- { 0x00e8, 0x6650e8 },
- { 0x0100, 0x665100 },
- { 0x0104, 0x665104 },
- { 0x0108, 0x665108 },
- { 0x010c, 0x66510c },
- { 0x0110, 0x665110 },
- { 0x0118, 0x665118 },
- { 0x011c, 0x66511c },
- { 0x0120, 0x665120 },
- { 0x0124, 0x665124 },
- { 0x0130, 0x665130 },
- { 0x0134, 0x665134 },
- { 0x0138, 0x665138 },
- { 0x013c, 0x66513c },
- { 0x0140, 0x665140 },
- { 0x0144, 0x665144 },
- { 0x0148, 0x665148 },
- { 0x014c, 0x66514c },
- { 0x0150, 0x665150 },
- { 0x0154, 0x665154 },
- { 0x0158, 0x665158 },
- { 0x015c, 0x66515c },
- { 0x0160, 0x665160 },
- { 0x0164, 0x665164 },
- { 0x0168, 0x665168 },
- { 0x016c, 0x66516c },
- { 0x0400, 0x665400 },
- { 0x0408, 0x665408 },
- { 0x040c, 0x66540c },
- { 0x0410, 0x665410 },
- {}
- }
-};
-
-static const struct nv50_disp_mthd_chan
-gf110_disp_ovly_mthd_chan = {
- .name = "Overlay",
- .addr = 0x001000,
- .data = {
- { "Global", 1, &gf110_disp_ovly_mthd_base },
- {}
- }
-};
-
-struct nv50_disp_chan_impl
-gf110_disp_ovly_ofuncs = {
- .base.ctor = nv50_disp_ovly_ctor,
- .base.dtor = nv50_disp_dmac_dtor,
- .base.init = gf110_disp_dmac_init,
- .base.fini = gf110_disp_dmac_fini,
- .base.ntfy = nv50_disp_chan_ntfy,
- .base.map = nv50_disp_chan_map,
- .base.rd32 = nv50_disp_chan_rd32,
- .base.wr32 = nv50_disp_chan_wr32,
- .chid = 5,
- .attach = gf110_disp_dmac_object_attach,
- .detach = gf110_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO PIO channel base class
- ******************************************************************************/
-
-static int
-gf110_disp_pioc_init(struct nvkm_object *object)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv50_disp_pioc *pioc = (void *)object;
- int chid = pioc->base.chid;
- int ret;
-
- ret = nv50_disp_chan_init(&pioc->base);
- if (ret)
- return ret;
-
- /* enable error reporting */
- nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
-
- /* activate channel */
- nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
- if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
- nv_error(pioc, "init: 0x%08x\n",
- nv_rd32(priv, 0x610490 + (chid * 0x10)));
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int
-gf110_disp_pioc_fini(struct nvkm_object *object, bool suspend)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv50_disp_pioc *pioc = (void *)object;
- int chid = pioc->base.chid;
-
- nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
- if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
- nv_error(pioc, "timeout: 0x%08x\n",
- nv_rd32(priv, 0x610490 + (chid * 0x10)));
- if (suspend)
- return -EBUSY;
- }
-
- /* disable error reporting and completion notification */
- nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
- nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
-
- return nv50_disp_chan_fini(&pioc->base, suspend);
-}
-
-/*******************************************************************************
- * EVO immediate overlay channel objects
- ******************************************************************************/
-
-struct nv50_disp_chan_impl
-gf110_disp_oimm_ofuncs = {
- .base.ctor = nv50_disp_oimm_ctor,
- .base.dtor = nv50_disp_pioc_dtor,
- .base.init = gf110_disp_pioc_init,
- .base.fini = gf110_disp_pioc_fini,
- .base.ntfy = nv50_disp_chan_ntfy,
- .base.map = nv50_disp_chan_map,
- .base.rd32 = nv50_disp_chan_rd32,
- .base.wr32 = nv50_disp_chan_wr32,
- .chid = 9,
-};
-
-/*******************************************************************************
- * EVO cursor channel objects
- ******************************************************************************/
-
-struct nv50_disp_chan_impl
-gf110_disp_curs_ofuncs = {
- .base.ctor = nv50_disp_curs_ctor,
- .base.dtor = nv50_disp_pioc_dtor,
- .base.init = gf110_disp_pioc_init,
- .base.fini = gf110_disp_pioc_fini,
- .base.ntfy = nv50_disp_chan_ntfy,
- .base.map = nv50_disp_chan_map,
- .base.rd32 = nv50_disp_chan_rd32,
- .base.wr32 = nv50_disp_chan_wr32,
- .chid = 13,
-};
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-int
-gf110_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
-{
- const u32 total = nv_rd32(priv, 0x640414 + (head * 0x300));
- const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
- const u32 blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
- union {
- struct nv04_disp_scanoutpos_v0 v0;
- } *args = data;
- int ret;
-
- nv_ioctl(object, "disp scanoutpos size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
- args->v0.vblanke = (blanke & 0xffff0000) >> 16;
- args->v0.hblanke = (blanke & 0x0000ffff);
- args->v0.vblanks = (blanks & 0xffff0000) >> 16;
- args->v0.hblanks = (blanks & 0x0000ffff);
- args->v0.vtotal = ( total & 0xffff0000) >> 16;
- args->v0.htotal = ( total & 0x0000ffff);
- args->v0.time[0] = ktime_to_ns(ktime_get());
- args->v0.vline = /* vline read locks hline */
- nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
- args->v0.time[1] = ktime_to_ns(ktime_get());
- args->v0.hline =
- nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
- } else
- return ret;
-
- return 0;
-}
-
-static int
-gf110_disp_main_init(struct nvkm_object *object)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv50_disp_base *base = (void *)object;
- int ret, i;
- u32 tmp;
-
- ret = nvkm_parent_init(&base->base);
- if (ret)
- return ret;
-
- /* The below segments of code copying values from one register to
- * another appear to inform EVO of the display capabilities or
- * something similar.
- */
-
- /* ... CRTC caps */
- for (i = 0; i < priv->head.nr; i++) {
- tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
- nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
- tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
- nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
- tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
- nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
- }
-
- /* ... DAC caps */
- for (i = 0; i < priv->dac.nr; i++) {
- tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
- nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
- }
-
- /* ... SOR caps */
- for (i = 0; i < priv->sor.nr; i++) {
- tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
- nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
- }
-
- /* steal display away from vbios, or something like that */
- if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
- nv_wr32(priv, 0x6100ac, 0x00000100);
- nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
- if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
- nv_error(priv, "timeout acquiring display\n");
- return -EBUSY;
- }
- }
-
- /* point at display engine memory area (hash table, objects) */
- nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
-
- /* enable supervisor interrupts, disable everything else */
- nv_wr32(priv, 0x610090, 0x00000000);
- nv_wr32(priv, 0x6100a0, 0x00000000);
- nv_wr32(priv, 0x6100b0, 0x00000307);
-
- /* disable underflow reporting, preventing an intermittent issue
- * on some gk104 boards where the production vbios left this
- * setting enabled by default.
- *
- * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
- */
- for (i = 0; i < priv->head.nr; i++)
- nv_mask(priv, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
-
- return 0;
-}
-
-static int
-gf110_disp_main_fini(struct nvkm_object *object, bool suspend)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv50_disp_base *base = (void *)object;
-
- /* disable all interrupts */
- nv_wr32(priv, 0x6100b0, 0x00000000);
-
- return nvkm_parent_fini(&base->base, suspend);
-}
-
-struct nvkm_ofuncs
-gf110_disp_main_ofuncs = {
- .ctor = nv50_disp_main_ctor,
- .dtor = nv50_disp_main_dtor,
- .init = gf110_disp_main_init,
- .fini = gf110_disp_main_fini,
- .mthd = nv50_disp_main_mthd,
- .ntfy = nvkm_disp_ntfy,
-};
-
-static struct nvkm_oclass
-gf110_disp_main_oclass[] = {
- { GF110_DISP, &gf110_disp_main_ofuncs },
- {}
-};
-
-static struct nvkm_oclass
-gf110_disp_sclass[] = {
- { GF110_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
- { GF110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
- { GF110_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
- { GF110_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
- { GF110_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
- {}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static void
-gf110_disp_vblank_init(struct nvkm_event *event, int type, int head)
-{
- struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
- nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
-}
-
-static void
-gf110_disp_vblank_fini(struct nvkm_event *event, int type, int head)
-{
- struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
- nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
-}
-
-const struct nvkm_event_func
-gf110_disp_vblank_func = {
- .ctor = nvkm_disp_vblank_ctor,
- .init = gf110_disp_vblank_init,
- .fini = gf110_disp_vblank_fini,
-};
-
-static struct nvkm_output *
-exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
- u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
- struct nvbios_outp *info)
-{
- struct nvkm_bios *bios = nvkm_bios(priv);
- struct nvkm_output *outp;
- u16 mask, type;
-
- if (or < 4) {
- type = DCB_OUTPUT_ANALOG;
- mask = 0;
- } else {
- or -= 4;
- switch (ctrl & 0x00000f00) {
- case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
- case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
- case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
- case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
- case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
- case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
- default:
- nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
- return 0x0000;
- }
- }
-
- mask = 0x00c0 & (mask << 6);
- mask |= 0x0001 << or;
- mask |= 0x0100 << head;
-
- list_for_each_entry(outp, &priv->base.outp, head) {
- if ((outp->info.hasht & 0xff) == type &&
- (outp->info.hashm & mask) == mask) {
- *data = nvbios_outp_match(bios, outp->info.hasht,
- outp->info.hashm,
- ver, hdr, cnt, len, info);
- if (!*data)
- return NULL;
- return outp;
- }
- }
-
- return NULL;
-}
-
-static struct nvkm_output *
-exec_script(struct nv50_disp_priv *priv, int head, int id)
-{
- struct nvkm_bios *bios = nvkm_bios(priv);
- struct nvkm_output *outp;
- struct nvbios_outp info;
- u8 ver, hdr, cnt, len;
- u32 data, ctrl = 0;
- int or;
-
- for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
- ctrl = nv_rd32(priv, 0x640180 + (or * 0x20));
- if (ctrl & (1 << head))
- break;
- }
-
- if (or == 8)
- return NULL;
-
- outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
- if (outp) {
- struct nvbios_init init = {
- .subdev = nv_subdev(priv),
- .bios = bios,
- .offset = info.script[id],
- .outp = &outp->info,
- .crtc = head,
- .execute = 1,
- };
-
- nvbios_exec(&init);
- }
-
- return outp;
-}
-
-static struct nvkm_output *
-exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
-{
- struct nvkm_bios *bios = nvkm_bios(priv);
- struct nvkm_output *outp;
- struct nvbios_outp info1;
- struct nvbios_ocfg info2;
- u8 ver, hdr, cnt, len;
- u32 data, ctrl = 0;
- int or;
-
- for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
- ctrl = nv_rd32(priv, 0x660180 + (or * 0x20));
- if (ctrl & (1 << head))
- break;
- }
-
- if (or == 8)
- return NULL;
-
- outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
- if (!outp)
- return NULL;
-
- switch (outp->info.type) {
- case DCB_OUTPUT_TMDS:
- *conf = (ctrl & 0x00000f00) >> 8;
- if (pclk >= 165000)
- *conf |= 0x0100;
- break;
- case DCB_OUTPUT_LVDS:
- *conf = priv->sor.lvdsconf;
- break;
- case DCB_OUTPUT_DP:
- *conf = (ctrl & 0x00000f00) >> 8;
- break;
- case DCB_OUTPUT_ANALOG:
- default:
- *conf = 0x00ff;
- break;
- }
-
- data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
- if (data && id < 0xff) {
- data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
- if (data) {
- struct nvbios_init init = {
- .subdev = nv_subdev(priv),
- .bios = bios,
- .offset = data,
- .outp = &outp->info,
- .crtc = head,
- .execute = 1,
- };
-
- nvbios_exec(&init);
- }
- }
-
- return outp;
-}
-
-static void
-gf110_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head)
-{
- exec_script(priv, head, 1);
-}
-
-static void
-gf110_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
-{
- struct nvkm_output *outp = exec_script(priv, head, 2);
-
- /* see note in nv50_disp_intr_unk20_0() */
- if (outp && outp->info.type == DCB_OUTPUT_DP) {
- struct nvkm_output_dp *outpdp = (void *)outp;
- struct nvbios_init init = {
- .subdev = nv_subdev(priv),
- .bios = nvkm_bios(priv),
- .outp = &outp->info,
- .crtc = head,
- .offset = outpdp->info.script[4],
- .execute = 1,
- };
-
- nvbios_exec(&init);
- atomic_set(&outpdp->lt.done, 0);
- }
-}
-
-static void
-gf110_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head)
-{
- struct nvkm_devinit *devinit = nvkm_devinit(priv);
- u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
- if (pclk)
- devinit->pll_set(devinit, PLL_VPLL0 + head, pclk);
- nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
-}
-
-static void
-gf110_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
- struct dcb_output *outp)
-{
- const int or = ffs(outp->or) - 1;
- const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020));
- const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
- const s32 vactive = nv_rd32(priv, 0x660414 + (head * 0x300)) & 0xffff;
- const s32 vblanke = nv_rd32(priv, 0x66041c + (head * 0x300)) & 0xffff;
- const s32 vblanks = nv_rd32(priv, 0x660420 + (head * 0x300)) & 0xffff;
- const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
- const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
- const u32 hoff = (head * 0x800);
- const u32 soff = ( or * 0x800);
- const u32 loff = (link * 0x080) + soff;
- const u32 symbol = 100000;
- const u32 TU = 64;
- u32 dpctrl = nv_rd32(priv, 0x61c10c + loff);
- u32 clksor = nv_rd32(priv, 0x612300 + soff);
- u32 datarate, link_nr, link_bw, bits;
- u64 ratio, value;
-
- link_nr = hweight32(dpctrl & 0x000f0000);
- link_bw = (clksor & 0x007c0000) >> 18;
- link_bw *= 27000;
-
- /* symbols/hblank - algorithm taken from comments in tegra driver */
- value = vblanke + vactive - vblanks - 7;
- value = value * link_bw;
- do_div(value, pclk);
- value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
- nv_mask(priv, 0x616620 + hoff, 0x0000ffff, value);
-
- /* symbols/vblank - algorithm taken from comments in tegra driver */
- value = vblanks - vblanke - 25;
- value = value * link_bw;
- do_div(value, pclk);
- value = value - ((36 / link_nr) + 3) - 1;
- nv_mask(priv, 0x616624 + hoff, 0x00ffffff, value);
-
- /* watermark */
- if ((conf & 0x3c0) == 0x180) bits = 30;
- else if ((conf & 0x3c0) == 0x140) bits = 24;
- else bits = 18;
- datarate = (pclk * bits) / 8;
-
- ratio = datarate;
- ratio *= symbol;
- do_div(ratio, link_nr * link_bw);
-
- value = (symbol - ratio) * TU;
- value *= ratio;
- do_div(value, symbol);
- do_div(value, symbol);
-
- value += 5;
- value |= 0x08000000;
-
- nv_wr32(priv, 0x616610 + hoff, value);
-}
-
-static void
-gf110_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
-{
- struct nvkm_output *outp;
- u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
- u32 conf, addr, data;
-
- outp = exec_clkcmp(priv, head, 0xff, pclk, &conf);
- if (!outp)
- return;
-
- /* see note in nv50_disp_intr_unk20_2() */
- if (outp->info.type == DCB_OUTPUT_DP) {
- u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
- switch ((sync & 0x000003c0) >> 6) {
- case 6: pclk = pclk * 30; break;
- case 5: pclk = pclk * 24; break;
- case 2:
- default:
- pclk = pclk * 18;
- break;
- }
-
- if (nvkm_output_dp_train(outp, pclk, true))
- ERR("link not trained before attach\n");
- } else {
- if (priv->sor.magic)
- priv->sor.magic(outp);
- }
-
- exec_clkcmp(priv, head, 0, pclk, &conf);
-
- if (outp->info.type == DCB_OUTPUT_ANALOG) {
- addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
- data = 0x00000000;
- } else {
- addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
- data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
- switch (outp->info.type) {
- case DCB_OUTPUT_TMDS:
- nv_mask(priv, addr, 0x007c0000, 0x00280000);
- break;
- case DCB_OUTPUT_DP:
- gf110_disp_intr_unk2_2_tu(priv, head, &outp->info);
- break;
- default:
- break;
- }
- }
-
- nv_mask(priv, addr, 0x00000707, data);
-}
-
-static void
-gf110_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
-{
- u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
- u32 conf;
-
- exec_clkcmp(priv, head, 1, pclk, &conf);
-}
-
-void
-gf110_disp_intr_supervisor(struct work_struct *work)
-{
- struct nv50_disp_priv *priv =
- container_of(work, struct nv50_disp_priv, supervisor);
- struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
- u32 mask[4];
- int head;
-
- nv_debug(priv, "supervisor %d\n", ffs(priv->super));
- for (head = 0; head < priv->head.nr; head++) {
- mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800));
- nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]);
- }
-
- if (priv->super & 0x00000001) {
- nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core);
- for (head = 0; head < priv->head.nr; head++) {
- if (!(mask[head] & 0x00001000))
- continue;
- nv_debug(priv, "supervisor 1.0 - head %d\n", head);
- gf110_disp_intr_unk1_0(priv, head);
- }
- } else
- if (priv->super & 0x00000002) {
- for (head = 0; head < priv->head.nr; head++) {
- if (!(mask[head] & 0x00001000))
- continue;
- nv_debug(priv, "supervisor 2.0 - head %d\n", head);
- gf110_disp_intr_unk2_0(priv, head);
- }
- for (head = 0; head < priv->head.nr; head++) {
- if (!(mask[head] & 0x00010000))
- continue;
- nv_debug(priv, "supervisor 2.1 - head %d\n", head);
- gf110_disp_intr_unk2_1(priv, head);
- }
- for (head = 0; head < priv->head.nr; head++) {
- if (!(mask[head] & 0x00001000))
- continue;
- nv_debug(priv, "supervisor 2.2 - head %d\n", head);
- gf110_disp_intr_unk2_2(priv, head);
- }
- } else
- if (priv->super & 0x00000004) {
- for (head = 0; head < priv->head.nr; head++) {
- if (!(mask[head] & 0x00001000))
- continue;
- nv_debug(priv, "supervisor 3.0 - head %d\n", head);
- gf110_disp_intr_unk4_0(priv, head);
- }
- }
-
- for (head = 0; head < priv->head.nr; head++)
- nv_wr32(priv, 0x6101d4 + (head * 0x800), 0x00000000);
- nv_wr32(priv, 0x6101d0, 0x80000000);
-}
-
-static void
-gf110_disp_intr_error(struct nv50_disp_priv *priv, int chid)
-{
- const struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
- u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
- u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
- u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
-
- nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
- "0x%08x 0x%08x\n",
- chid, (mthd & 0x0000ffc), data, mthd, unkn);
-
- if (chid == 0) {
- switch (mthd & 0xffc) {
- case 0x0080:
- nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
- impl->mthd.core);
- break;
- default:
- break;
- }
- } else
- if (chid <= 4) {
- switch (mthd & 0xffc) {
- case 0x0080:
- nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
- impl->mthd.base);
- break;
- default:
- break;
- }
- } else
- if (chid <= 8) {
- switch (mthd & 0xffc) {
- case 0x0080:
- nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 5,
- impl->mthd.ovly);
- break;
- default:
- break;
- }
- }
-
- nv_wr32(priv, 0x61009c, (1 << chid));
- nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
-}
-
-void
-gf110_disp_intr(struct nvkm_subdev *subdev)
-{
- struct nv50_disp_priv *priv = (void *)subdev;
- u32 intr = nv_rd32(priv, 0x610088);
- int i;
-
- if (intr & 0x00000001) {
- u32 stat = nv_rd32(priv, 0x61008c);
- while (stat) {
- int chid = __ffs(stat); stat &= ~(1 << chid);
- nv50_disp_chan_uevent_send(priv, chid);
- nv_wr32(priv, 0x61008c, 1 << chid);
- }
- intr &= ~0x00000001;
- }
-
- if (intr & 0x00000002) {
- u32 stat = nv_rd32(priv, 0x61009c);
- int chid = ffs(stat) - 1;
- if (chid >= 0)
- gf110_disp_intr_error(priv, chid);
- intr &= ~0x00000002;
- }
-
- if (intr & 0x00100000) {
- u32 stat = nv_rd32(priv, 0x6100ac);
- if (stat & 0x00000007) {
- priv->super = (stat & 0x00000007);
- schedule_work(&priv->supervisor);
- nv_wr32(priv, 0x6100ac, priv->super);
- stat &= ~0x00000007;
- }
-
- if (stat) {
- nv_info(priv, "unknown intr24 0x%08x\n", stat);
- nv_wr32(priv, 0x6100ac, stat);
- }
-
- intr &= ~0x00100000;
- }
-
- for (i = 0; i < priv->head.nr; i++) {
- u32 mask = 0x01000000 << i;
- if (mask & intr) {
- u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
- if (stat & 0x00000001)
- nvkm_disp_vblank(&priv->base, i);
- nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
- nv_rd32(priv, 0x6100c0 + (i * 0x800));
- }
- }
-}
-
-static int
-gf110_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv50_disp_priv *priv;
- int heads = nv_rd32(parent, 0x022448);
- int ret;
-
- ret = nvkm_disp_create(parent, engine, oclass, heads,
- "PDISP", "display", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
- if (ret)
- return ret;
-
- nv_engine(priv)->sclass = gf110_disp_main_oclass;
- nv_engine(priv)->cclass = &nv50_disp_cclass;
- nv_subdev(priv)->intr = gf110_disp_intr;
- INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
- priv->sclass = gf110_disp_sclass;
- priv->head.nr = heads;
- priv->dac.nr = 3;
- priv->sor.nr = 4;
- priv->dac.power = nv50_dac_power;
- priv->dac.sense = nv50_dac_sense;
- priv->sor.power = nv50_sor_power;
- priv->sor.hda_eld = gf110_hda_eld;
- priv->sor.hdmi = gf110_hdmi_ctrl;
- return 0;
-}
-
-struct nvkm_oclass *
-gf110_disp_outp_sclass[] = {
- &gf110_sor_dp_impl.base.base,
- NULL
-};
-
-struct nvkm_oclass *
-gf110_disp_oclass = &(struct nv50_disp_impl) {
- .base.base.handle = NV_ENGINE(DISP, 0x90),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf110_disp_ctor,
- .dtor = _nvkm_disp_dtor,
- .init = _nvkm_disp_init,
- .fini = _nvkm_disp_fini,
- },
- .base.vblank = &gf110_disp_vblank_func,
- .base.outp = gf110_disp_outp_sclass,
- .mthd.core = &gf110_disp_core_mthd_chan,
- .mthd.base = &gf110_disp_base_mthd_chan,
- .mthd.ovly = &gf110_disp_ovly_mthd_chan,
- .mthd.prev = -0x020000,
- .head.scanoutpos = gf110_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
new file mode 100644
index 000000000000..186fd3ac78f6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -0,0 +1,536 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+#include "rootnv50.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+#include <subdev/devinit.h>
+
+void
+gf119_disp_vblank_init(struct nv50_disp *disp, int head)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ nvkm_mask(device, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
+}
+
+void
+gf119_disp_vblank_fini(struct nv50_disp *disp, int head)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ nvkm_mask(device, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
+}
+
+static struct nvkm_output *
+exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
+ u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *info)
+{
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_bios *bios = subdev->device->bios;
+ struct nvkm_output *outp;
+ u16 mask, type;
+
+ if (or < 4) {
+ type = DCB_OUTPUT_ANALOG;
+ mask = 0;
+ } else {
+ or -= 4;
+ switch (ctrl & 0x00000f00) {
+ case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+ case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+ case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+ case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+ case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+ case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+ default:
+ nvkm_error(subdev, "unknown SOR mc %08x\n", ctrl);
+ return NULL;
+ }
+ }
+
+ mask = 0x00c0 & (mask << 6);
+ mask |= 0x0001 << or;
+ mask |= 0x0100 << head;
+
+ list_for_each_entry(outp, &disp->base.outp, head) {
+ if ((outp->info.hasht & 0xff) == type &&
+ (outp->info.hashm & mask) == mask) {
+ *data = nvbios_outp_match(bios, outp->info.hasht,
+ outp->info.hashm,
+ ver, hdr, cnt, len, info);
+ if (!*data)
+ return NULL;
+ return outp;
+ }
+ }
+
+ return NULL;
+}
+
+static struct nvkm_output *
+exec_script(struct nv50_disp *disp, int head, int id)
+{
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_bios *bios = device->bios;
+ struct nvkm_output *outp;
+ struct nvbios_outp info;
+ u8 ver, hdr, cnt, len;
+ u32 data, ctrl = 0;
+ int or;
+
+ for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
+ ctrl = nvkm_rd32(device, 0x640180 + (or * 0x20));
+ if (ctrl & (1 << head))
+ break;
+ }
+
+ if (or == 8)
+ return NULL;
+
+ outp = exec_lookup(disp, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
+ if (outp) {
+ struct nvbios_init init = {
+ .subdev = subdev,
+ .bios = bios,
+ .offset = info.script[id],
+ .outp = &outp->info,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ nvbios_exec(&init);
+ }
+
+ return outp;
+}
+
+static struct nvkm_output *
+exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
+{
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_bios *bios = device->bios;
+ struct nvkm_output *outp;
+ struct nvbios_outp info1;
+ struct nvbios_ocfg info2;
+ u8 ver, hdr, cnt, len;
+ u32 data, ctrl = 0;
+ int or;
+
+ for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
+ ctrl = nvkm_rd32(device, 0x660180 + (or * 0x20));
+ if (ctrl & (1 << head))
+ break;
+ }
+
+ if (or == 8)
+ return NULL;
+
+ outp = exec_lookup(disp, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
+ if (!outp)
+ return NULL;
+
+ switch (outp->info.type) {
+ case DCB_OUTPUT_TMDS:
+ *conf = (ctrl & 0x00000f00) >> 8;
+ if (pclk >= 165000)
+ *conf |= 0x0100;
+ break;
+ case DCB_OUTPUT_LVDS:
+ *conf = disp->sor.lvdsconf;
+ break;
+ case DCB_OUTPUT_DP:
+ *conf = (ctrl & 0x00000f00) >> 8;
+ break;
+ case DCB_OUTPUT_ANALOG:
+ default:
+ *conf = 0x00ff;
+ break;
+ }
+
+ data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+ if (data && id < 0xff) {
+ data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+ if (data) {
+ struct nvbios_init init = {
+ .subdev = subdev,
+ .bios = bios,
+ .offset = data,
+ .outp = &outp->info,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ nvbios_exec(&init);
+ }
+ }
+
+ return outp;
+}
+
+static void
+gf119_disp_intr_unk1_0(struct nv50_disp *disp, int head)
+{
+ exec_script(disp, head, 1);
+}
+
+static void
+gf119_disp_intr_unk2_0(struct nv50_disp *disp, int head)
+{
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_output *outp = exec_script(disp, head, 2);
+
+ /* see note in nv50_disp_intr_unk20_0() */
+ if (outp && outp->info.type == DCB_OUTPUT_DP) {
+ struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
+ struct nvbios_init init = {
+ .subdev = subdev,
+ .bios = subdev->device->bios,
+ .outp = &outp->info,
+ .crtc = head,
+ .offset = outpdp->info.script[4],
+ .execute = 1,
+ };
+
+ nvbios_exec(&init);
+ atomic_set(&outpdp->lt.done, 0);
+ }
+}
+
+static void
+gf119_disp_intr_unk2_1(struct nv50_disp *disp, int head)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ struct nvkm_devinit *devinit = device->devinit;
+ u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
+ if (pclk)
+ nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head, pclk);
+ nvkm_wr32(device, 0x612200 + (head * 0x800), 0x00000000);
+}
+
+static void
+gf119_disp_intr_unk2_2_tu(struct nv50_disp *disp, int head,
+ struct dcb_output *outp)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ const int or = ffs(outp->or) - 1;
+ const u32 ctrl = nvkm_rd32(device, 0x660200 + (or * 0x020));
+ const u32 conf = nvkm_rd32(device, 0x660404 + (head * 0x300));
+ const s32 vactive = nvkm_rd32(device, 0x660414 + (head * 0x300)) & 0xffff;
+ const s32 vblanke = nvkm_rd32(device, 0x66041c + (head * 0x300)) & 0xffff;
+ const s32 vblanks = nvkm_rd32(device, 0x660420 + (head * 0x300)) & 0xffff;
+ const u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
+ const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
+ const u32 hoff = (head * 0x800);
+ const u32 soff = ( or * 0x800);
+ const u32 loff = (link * 0x080) + soff;
+ const u32 symbol = 100000;
+ const u32 TU = 64;
+ u32 dpctrl = nvkm_rd32(device, 0x61c10c + loff);
+ u32 clksor = nvkm_rd32(device, 0x612300 + soff);
+ u32 datarate, link_nr, link_bw, bits;
+ u64 ratio, value;
+
+ link_nr = hweight32(dpctrl & 0x000f0000);
+ link_bw = (clksor & 0x007c0000) >> 18;
+ link_bw *= 27000;
+
+ /* symbols/hblank - algorithm taken from comments in tegra driver */
+ value = vblanke + vactive - vblanks - 7;
+ value = value * link_bw;
+ do_div(value, pclk);
+ value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
+ nvkm_mask(device, 0x616620 + hoff, 0x0000ffff, value);
+
+ /* symbols/vblank - algorithm taken from comments in tegra driver */
+ value = vblanks - vblanke - 25;
+ value = value * link_bw;
+ do_div(value, pclk);
+ value = value - ((36 / link_nr) + 3) - 1;
+ nvkm_mask(device, 0x616624 + hoff, 0x00ffffff, value);
+
+ /* watermark */
+ if ((conf & 0x3c0) == 0x180) bits = 30;
+ else if ((conf & 0x3c0) == 0x140) bits = 24;
+ else bits = 18;
+ datarate = (pclk * bits) / 8;
+
+ ratio = datarate;
+ ratio *= symbol;
+ do_div(ratio, link_nr * link_bw);
+
+ value = (symbol - ratio) * TU;
+ value *= ratio;
+ do_div(value, symbol);
+ do_div(value, symbol);
+
+ value += 5;
+ value |= 0x08000000;
+
+ nvkm_wr32(device, 0x616610 + hoff, value);
+}
+
+static void
+gf119_disp_intr_unk2_2(struct nv50_disp *disp, int head)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ struct nvkm_output *outp;
+ u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
+ u32 conf, addr, data;
+
+ outp = exec_clkcmp(disp, head, 0xff, pclk, &conf);
+ if (!outp)
+ return;
+
+ /* see note in nv50_disp_intr_unk20_2() */
+ if (outp->info.type == DCB_OUTPUT_DP) {
+ u32 sync = nvkm_rd32(device, 0x660404 + (head * 0x300));
+ switch ((sync & 0x000003c0) >> 6) {
+ case 6: pclk = pclk * 30; break;
+ case 5: pclk = pclk * 24; break;
+ case 2:
+ default:
+ pclk = pclk * 18;
+ break;
+ }
+
+ if (nvkm_output_dp_train(outp, pclk, true))
+ OUTP_ERR(outp, "link not trained before attach");
+ } else {
+ if (disp->func->sor.magic)
+ disp->func->sor.magic(outp);
+ }
+
+ exec_clkcmp(disp, head, 0, pclk, &conf);
+
+ if (outp->info.type == DCB_OUTPUT_ANALOG) {
+ addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
+ data = 0x00000000;
+ } else {
+ addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
+ data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
+ switch (outp->info.type) {
+ case DCB_OUTPUT_TMDS:
+ nvkm_mask(device, addr, 0x007c0000, 0x00280000);
+ break;
+ case DCB_OUTPUT_DP:
+ gf119_disp_intr_unk2_2_tu(disp, head, &outp->info);
+ break;
+ default:
+ break;
+ }
+ }
+
+ nvkm_mask(device, addr, 0x00000707, data);
+}
+
+static void
+gf119_disp_intr_unk4_0(struct nv50_disp *disp, int head)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
+ u32 conf;
+
+ exec_clkcmp(disp, head, 1, pclk, &conf);
+}
+
+void
+gf119_disp_intr_supervisor(struct work_struct *work)
+{
+ struct nv50_disp *disp =
+ container_of(work, struct nv50_disp, supervisor);
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask[4];
+ int head;
+
+ nvkm_debug(subdev, "supervisor %d\n", ffs(disp->super));
+ for (head = 0; head < disp->base.head.nr; head++) {
+ mask[head] = nvkm_rd32(device, 0x6101d4 + (head * 0x800));
+ nvkm_debug(subdev, "head %d: %08x\n", head, mask[head]);
+ }
+
+ if (disp->super & 0x00000001) {
+ nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
+ for (head = 0; head < disp->base.head.nr; head++) {
+ if (!(mask[head] & 0x00001000))
+ continue;
+ nvkm_debug(subdev, "supervisor 1.0 - head %d\n", head);
+ gf119_disp_intr_unk1_0(disp, head);
+ }
+ } else
+ if (disp->super & 0x00000002) {
+ for (head = 0; head < disp->base.head.nr; head++) {
+ if (!(mask[head] & 0x00001000))
+ continue;
+ nvkm_debug(subdev, "supervisor 2.0 - head %d\n", head);
+ gf119_disp_intr_unk2_0(disp, head);
+ }
+ for (head = 0; head < disp->base.head.nr; head++) {
+ if (!(mask[head] & 0x00010000))
+ continue;
+ nvkm_debug(subdev, "supervisor 2.1 - head %d\n", head);
+ gf119_disp_intr_unk2_1(disp, head);
+ }
+ for (head = 0; head < disp->base.head.nr; head++) {
+ if (!(mask[head] & 0x00001000))
+ continue;
+ nvkm_debug(subdev, "supervisor 2.2 - head %d\n", head);
+ gf119_disp_intr_unk2_2(disp, head);
+ }
+ } else
+ if (disp->super & 0x00000004) {
+ for (head = 0; head < disp->base.head.nr; head++) {
+ if (!(mask[head] & 0x00001000))
+ continue;
+ nvkm_debug(subdev, "supervisor 3.0 - head %d\n", head);
+ gf119_disp_intr_unk4_0(disp, head);
+ }
+ }
+
+ for (head = 0; head < disp->base.head.nr; head++)
+ nvkm_wr32(device, 0x6101d4 + (head * 0x800), 0x00000000);
+ nvkm_wr32(device, 0x6101d0, 0x80000000);
+}
+
+static void
+gf119_disp_intr_error(struct nv50_disp *disp, int chid)
+{
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mthd = nvkm_rd32(device, 0x6101f0 + (chid * 12));
+ u32 data = nvkm_rd32(device, 0x6101f4 + (chid * 12));
+ u32 unkn = nvkm_rd32(device, 0x6101f8 + (chid * 12));
+
+ nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n",
+ chid, (mthd & 0x0000ffc), data, mthd, unkn);
+
+ if (chid < ARRAY_SIZE(disp->chan)) {
+ switch (mthd & 0xffc) {
+ case 0x0080:
+ nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
+ break;
+ default:
+ break;
+ }
+ }
+
+ nvkm_wr32(device, 0x61009c, (1 << chid));
+ nvkm_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
+}
+
+void
+gf119_disp_intr(struct nv50_disp *disp)
+{
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x610088);
+ int i;
+
+ if (intr & 0x00000001) {
+ u32 stat = nvkm_rd32(device, 0x61008c);
+ while (stat) {
+ int chid = __ffs(stat); stat &= ~(1 << chid);
+ nv50_disp_chan_uevent_send(disp, chid);
+ nvkm_wr32(device, 0x61008c, 1 << chid);
+ }
+ intr &= ~0x00000001;
+ }
+
+ if (intr & 0x00000002) {
+ u32 stat = nvkm_rd32(device, 0x61009c);
+ int chid = ffs(stat) - 1;
+ if (chid >= 0)
+ gf119_disp_intr_error(disp, chid);
+ intr &= ~0x00000002;
+ }
+
+ if (intr & 0x00100000) {
+ u32 stat = nvkm_rd32(device, 0x6100ac);
+ if (stat & 0x00000007) {
+ disp->super = (stat & 0x00000007);
+ schedule_work(&disp->supervisor);
+ nvkm_wr32(device, 0x6100ac, disp->super);
+ stat &= ~0x00000007;
+ }
+
+ if (stat) {
+ nvkm_warn(subdev, "intr24 %08x\n", stat);
+ nvkm_wr32(device, 0x6100ac, stat);
+ }
+
+ intr &= ~0x00100000;
+ }
+
+ for (i = 0; i < disp->base.head.nr; i++) {
+ u32 mask = 0x01000000 << i;
+ if (mask & intr) {
+ u32 stat = nvkm_rd32(device, 0x6100bc + (i * 0x800));
+ if (stat & 0x00000001)
+ nvkm_disp_vblank(&disp->base, i);
+ nvkm_mask(device, 0x6100bc + (i * 0x800), 0, 0);
+ nvkm_rd32(device, 0x6100c0 + (i * 0x800));
+ }
+ }
+}
+
+int
+gf119_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
+ int index, struct nvkm_disp **pdisp)
+{
+ u32 heads = nvkm_rd32(device, 0x022448);
+ return nv50_disp_new_(func, device, index, heads, pdisp);
+}
+
+static const struct nv50_disp_func
+gf119_disp = {
+ .intr = gf119_disp_intr,
+ .uevent = &gf119_disp_chan_uevent,
+ .super = gf119_disp_intr_supervisor,
+ .root = &gf119_disp_root_oclass,
+ .head.vblank_init = gf119_disp_vblank_init,
+ .head.vblank_fini = gf119_disp_vblank_fini,
+ .head.scanoutpos = gf119_disp_root_scanoutpos,
+ .outp.internal.crt = nv50_dac_output_new,
+ .outp.internal.tmds = nv50_sor_output_new,
+ .outp.internal.lvds = nv50_sor_output_new,
+ .outp.internal.dp = gf119_sor_dp_new,
+ .dac.nr = 3,
+ .dac.power = nv50_dac_power,
+ .dac.sense = nv50_dac_sense,
+ .sor.nr = 4,
+ .sor.power = nv50_sor_power,
+ .sor.hda_eld = gf119_hda_eld,
+ .sor.hdmi = gf119_hdmi_ctrl,
+};
+
+int
+gf119_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+ return gf119_disp_new_(&gf119_disp, device, index, pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
index 6f4019ab4e65..a86384b8e388 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
@@ -22,247 +22,32 @@
* Authors: Ben Skeggs
*/
#include "nv50.h"
-
-#include <nvif/class.h>
-
-/*******************************************************************************
- * EVO master channel object
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-gk104_disp_core_mthd_head = {
- .mthd = 0x0300,
- .addr = 0x000300,
- .data = {
- { 0x0400, 0x660400 },
- { 0x0404, 0x660404 },
- { 0x0408, 0x660408 },
- { 0x040c, 0x66040c },
- { 0x0410, 0x660410 },
- { 0x0414, 0x660414 },
- { 0x0418, 0x660418 },
- { 0x041c, 0x66041c },
- { 0x0420, 0x660420 },
- { 0x0424, 0x660424 },
- { 0x0428, 0x660428 },
- { 0x042c, 0x66042c },
- { 0x0430, 0x660430 },
- { 0x0434, 0x660434 },
- { 0x0438, 0x660438 },
- { 0x0440, 0x660440 },
- { 0x0444, 0x660444 },
- { 0x0448, 0x660448 },
- { 0x044c, 0x66044c },
- { 0x0450, 0x660450 },
- { 0x0454, 0x660454 },
- { 0x0458, 0x660458 },
- { 0x045c, 0x66045c },
- { 0x0460, 0x660460 },
- { 0x0468, 0x660468 },
- { 0x046c, 0x66046c },
- { 0x0470, 0x660470 },
- { 0x0474, 0x660474 },
- { 0x047c, 0x66047c },
- { 0x0480, 0x660480 },
- { 0x0484, 0x660484 },
- { 0x0488, 0x660488 },
- { 0x048c, 0x66048c },
- { 0x0490, 0x660490 },
- { 0x0494, 0x660494 },
- { 0x0498, 0x660498 },
- { 0x04a0, 0x6604a0 },
- { 0x04b0, 0x6604b0 },
- { 0x04b8, 0x6604b8 },
- { 0x04bc, 0x6604bc },
- { 0x04c0, 0x6604c0 },
- { 0x04c4, 0x6604c4 },
- { 0x04c8, 0x6604c8 },
- { 0x04d0, 0x6604d0 },
- { 0x04d4, 0x6604d4 },
- { 0x04e0, 0x6604e0 },
- { 0x04e4, 0x6604e4 },
- { 0x04e8, 0x6604e8 },
- { 0x04ec, 0x6604ec },
- { 0x04f0, 0x6604f0 },
- { 0x04f4, 0x6604f4 },
- { 0x04f8, 0x6604f8 },
- { 0x04fc, 0x6604fc },
- { 0x0500, 0x660500 },
- { 0x0504, 0x660504 },
- { 0x0508, 0x660508 },
- { 0x050c, 0x66050c },
- { 0x0510, 0x660510 },
- { 0x0514, 0x660514 },
- { 0x0518, 0x660518 },
- { 0x051c, 0x66051c },
- { 0x0520, 0x660520 },
- { 0x0524, 0x660524 },
- { 0x052c, 0x66052c },
- { 0x0530, 0x660530 },
- { 0x054c, 0x66054c },
- { 0x0550, 0x660550 },
- { 0x0554, 0x660554 },
- { 0x0558, 0x660558 },
- { 0x055c, 0x66055c },
- {}
- }
-};
-
-const struct nv50_disp_mthd_chan
-gk104_disp_core_mthd_chan = {
- .name = "Core",
- .addr = 0x000000,
- .data = {
- { "Global", 1, &gf110_disp_core_mthd_base },
- { "DAC", 3, &gf110_disp_core_mthd_dac },
- { "SOR", 8, &gf110_disp_core_mthd_sor },
- { "PIOR", 4, &gf110_disp_core_mthd_pior },
- { "HEAD", 4, &gk104_disp_core_mthd_head },
- {}
- }
-};
-
-/*******************************************************************************
- * EVO overlay channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-gk104_disp_ovly_mthd_base = {
- .mthd = 0x0000,
- .data = {
- { 0x0080, 0x665080 },
- { 0x0084, 0x665084 },
- { 0x0088, 0x665088 },
- { 0x008c, 0x66508c },
- { 0x0090, 0x665090 },
- { 0x0094, 0x665094 },
- { 0x00a0, 0x6650a0 },
- { 0x00a4, 0x6650a4 },
- { 0x00b0, 0x6650b0 },
- { 0x00b4, 0x6650b4 },
- { 0x00b8, 0x6650b8 },
- { 0x00c0, 0x6650c0 },
- { 0x00c4, 0x6650c4 },
- { 0x00e0, 0x6650e0 },
- { 0x00e4, 0x6650e4 },
- { 0x00e8, 0x6650e8 },
- { 0x0100, 0x665100 },
- { 0x0104, 0x665104 },
- { 0x0108, 0x665108 },
- { 0x010c, 0x66510c },
- { 0x0110, 0x665110 },
- { 0x0118, 0x665118 },
- { 0x011c, 0x66511c },
- { 0x0120, 0x665120 },
- { 0x0124, 0x665124 },
- { 0x0130, 0x665130 },
- { 0x0134, 0x665134 },
- { 0x0138, 0x665138 },
- { 0x013c, 0x66513c },
- { 0x0140, 0x665140 },
- { 0x0144, 0x665144 },
- { 0x0148, 0x665148 },
- { 0x014c, 0x66514c },
- { 0x0150, 0x665150 },
- { 0x0154, 0x665154 },
- { 0x0158, 0x665158 },
- { 0x015c, 0x66515c },
- { 0x0160, 0x665160 },
- { 0x0164, 0x665164 },
- { 0x0168, 0x665168 },
- { 0x016c, 0x66516c },
- { 0x0400, 0x665400 },
- { 0x0404, 0x665404 },
- { 0x0408, 0x665408 },
- { 0x040c, 0x66540c },
- { 0x0410, 0x665410 },
- {}
- }
-};
-
-const struct nv50_disp_mthd_chan
-gk104_disp_ovly_mthd_chan = {
- .name = "Overlay",
- .addr = 0x001000,
- .data = {
- { "Global", 1, &gk104_disp_ovly_mthd_base },
- {}
- }
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+gk104_disp = {
+ .intr = gf119_disp_intr,
+ .uevent = &gf119_disp_chan_uevent,
+ .super = gf119_disp_intr_supervisor,
+ .root = &gk104_disp_root_oclass,
+ .head.vblank_init = gf119_disp_vblank_init,
+ .head.vblank_fini = gf119_disp_vblank_fini,
+ .head.scanoutpos = gf119_disp_root_scanoutpos,
+ .outp.internal.crt = nv50_dac_output_new,
+ .outp.internal.tmds = nv50_sor_output_new,
+ .outp.internal.lvds = nv50_sor_output_new,
+ .outp.internal.dp = gf119_sor_dp_new,
+ .dac.nr = 3,
+ .dac.power = nv50_dac_power,
+ .dac.sense = nv50_dac_sense,
+ .sor.nr = 4,
+ .sor.power = nv50_sor_power,
+ .sor.hda_eld = gf119_hda_eld,
+ .sor.hdmi = gk104_hdmi_ctrl,
};
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_disp_sclass[] = {
- { GK104_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
- { GK104_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
- { GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
- { GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
- { GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
- {}
-};
-
-static struct nvkm_oclass
-gk104_disp_main_oclass[] = {
- { GK104_DISP, &gf110_disp_main_ofuncs },
- {}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gk104_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+gk104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
{
- struct nv50_disp_priv *priv;
- int heads = nv_rd32(parent, 0x022448);
- int ret;
-
- ret = nvkm_disp_create(parent, engine, oclass, heads,
- "PDISP", "display", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
- if (ret)
- return ret;
-
- nv_engine(priv)->sclass = gk104_disp_main_oclass;
- nv_engine(priv)->cclass = &nv50_disp_cclass;
- nv_subdev(priv)->intr = gf110_disp_intr;
- INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
- priv->sclass = gk104_disp_sclass;
- priv->head.nr = heads;
- priv->dac.nr = 3;
- priv->sor.nr = 4;
- priv->dac.power = nv50_dac_power;
- priv->dac.sense = nv50_dac_sense;
- priv->sor.power = nv50_sor_power;
- priv->sor.hda_eld = gf110_hda_eld;
- priv->sor.hdmi = gk104_hdmi_ctrl;
- return 0;
+ return gf119_disp_new_(&gk104_disp, device, index, pdisp);
}
-
-struct nvkm_oclass *
-gk104_disp_oclass = &(struct nv50_disp_impl) {
- .base.base.handle = NV_ENGINE(DISP, 0x91),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk104_disp_ctor,
- .dtor = _nvkm_disp_dtor,
- .init = _nvkm_disp_init,
- .fini = _nvkm_disp_fini,
- },
- .base.vblank = &gf110_disp_vblank_func,
- .base.outp = gf110_disp_outp_sclass,
- .mthd.core = &gk104_disp_core_mthd_chan,
- .mthd.base = &gf110_disp_base_mthd_chan,
- .mthd.ovly = &gk104_disp_ovly_mthd_chan,
- .mthd.prev = -0x020000,
- .head.scanoutpos = gf110_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
index daa4b460a6ba..0d574c7e594a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
@@ -22,82 +22,32 @@
* Authors: Ben Skeggs
*/
#include "nv50.h"
-
-#include <nvif/class.h>
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk110_disp_sclass[] = {
- { GK110_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
- { GK110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
- { GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
- { GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
- { GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
- {}
-};
-
-static struct nvkm_oclass
-gk110_disp_main_oclass[] = {
- { GK110_DISP, &gf110_disp_main_ofuncs },
- {}
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+gk110_disp = {
+ .intr = gf119_disp_intr,
+ .uevent = &gf119_disp_chan_uevent,
+ .super = gf119_disp_intr_supervisor,
+ .root = &gk110_disp_root_oclass,
+ .head.vblank_init = gf119_disp_vblank_init,
+ .head.vblank_fini = gf119_disp_vblank_fini,
+ .head.scanoutpos = gf119_disp_root_scanoutpos,
+ .outp.internal.crt = nv50_dac_output_new,
+ .outp.internal.tmds = nv50_sor_output_new,
+ .outp.internal.lvds = nv50_sor_output_new,
+ .outp.internal.dp = gf119_sor_dp_new,
+ .dac.nr = 3,
+ .dac.power = nv50_dac_power,
+ .dac.sense = nv50_dac_sense,
+ .sor.nr = 4,
+ .sor.power = nv50_sor_power,
+ .sor.hda_eld = gf119_hda_eld,
+ .sor.hdmi = gk104_hdmi_ctrl,
};
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gk110_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+gk110_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
{
- struct nv50_disp_priv *priv;
- int heads = nv_rd32(parent, 0x022448);
- int ret;
-
- ret = nvkm_disp_create(parent, engine, oclass, heads,
- "PDISP", "display", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
- if (ret)
- return ret;
-
- nv_engine(priv)->sclass = gk110_disp_main_oclass;
- nv_engine(priv)->cclass = &nv50_disp_cclass;
- nv_subdev(priv)->intr = gf110_disp_intr;
- INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
- priv->sclass = gk110_disp_sclass;
- priv->head.nr = heads;
- priv->dac.nr = 3;
- priv->sor.nr = 4;
- priv->dac.power = nv50_dac_power;
- priv->dac.sense = nv50_dac_sense;
- priv->sor.power = nv50_sor_power;
- priv->sor.hda_eld = gf110_hda_eld;
- priv->sor.hdmi = gk104_hdmi_ctrl;
- return 0;
+ return gf119_disp_new_(&gk110_disp, device, index, pdisp);
}
-
-struct nvkm_oclass *
-gk110_disp_oclass = &(struct nv50_disp_impl) {
- .base.base.handle = NV_ENGINE(DISP, 0x92),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk110_disp_ctor,
- .dtor = _nvkm_disp_dtor,
- .init = _nvkm_disp_init,
- .fini = _nvkm_disp_fini,
- },
- .base.vblank = &gf110_disp_vblank_func,
- .base.outp = gf110_disp_outp_sclass,
- .mthd.core = &gk104_disp_core_mthd_chan,
- .mthd.base = &gf110_disp_base_mthd_chan,
- .mthd.ovly = &gk104_disp_ovly_mthd_chan,
- .mthd.prev = -0x020000,
- .head.scanoutpos = gf110_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index 881cc94385a1..b6944142d616 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -22,82 +22,32 @@
* Authors: Ben Skeggs
*/
#include "nv50.h"
-
-#include <nvif/class.h>
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gm107_disp_sclass[] = {
- { GM107_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
- { GK110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
- { GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
- { GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
- { GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
- {}
-};
-
-static struct nvkm_oclass
-gm107_disp_main_oclass[] = {
- { GM107_DISP, &gf110_disp_main_ofuncs },
- {}
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+gm107_disp = {
+ .intr = gf119_disp_intr,
+ .uevent = &gf119_disp_chan_uevent,
+ .super = gf119_disp_intr_supervisor,
+ .root = &gm107_disp_root_oclass,
+ .head.vblank_init = gf119_disp_vblank_init,
+ .head.vblank_fini = gf119_disp_vblank_fini,
+ .head.scanoutpos = gf119_disp_root_scanoutpos,
+ .outp.internal.crt = nv50_dac_output_new,
+ .outp.internal.tmds = nv50_sor_output_new,
+ .outp.internal.lvds = nv50_sor_output_new,
+ .outp.internal.dp = gf119_sor_dp_new,
+ .dac.nr = 3,
+ .dac.power = nv50_dac_power,
+ .dac.sense = nv50_dac_sense,
+ .sor.nr = 4,
+ .sor.power = nv50_sor_power,
+ .sor.hda_eld = gf119_hda_eld,
+ .sor.hdmi = gk104_hdmi_ctrl,
};
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gm107_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+gm107_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
{
- struct nv50_disp_priv *priv;
- int heads = nv_rd32(parent, 0x022448);
- int ret;
-
- ret = nvkm_disp_create(parent, engine, oclass, heads,
- "PDISP", "display", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
- if (ret)
- return ret;
-
- nv_engine(priv)->sclass = gm107_disp_main_oclass;
- nv_engine(priv)->cclass = &nv50_disp_cclass;
- nv_subdev(priv)->intr = gf110_disp_intr;
- INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
- priv->sclass = gm107_disp_sclass;
- priv->head.nr = heads;
- priv->dac.nr = 3;
- priv->sor.nr = 4;
- priv->dac.power = nv50_dac_power;
- priv->dac.sense = nv50_dac_sense;
- priv->sor.power = nv50_sor_power;
- priv->sor.hda_eld = gf110_hda_eld;
- priv->sor.hdmi = gk104_hdmi_ctrl;
- return 0;
+ return gf119_disp_new_(&gm107_disp, device, index, pdisp);
}
-
-struct nvkm_oclass *
-gm107_disp_oclass = &(struct nv50_disp_impl) {
- .base.base.handle = NV_ENGINE(DISP, 0x07),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gm107_disp_ctor,
- .dtor = _nvkm_disp_dtor,
- .init = _nvkm_disp_init,
- .fini = _nvkm_disp_fini,
- },
- .base.vblank = &gf110_disp_vblank_func,
- .base.outp = gf110_disp_outp_sclass,
- .mthd.core = &gk104_disp_core_mthd_chan,
- .mthd.base = &gf110_disp_base_mthd_chan,
- .mthd.ovly = &gk104_disp_ovly_mthd_chan,
- .mthd.prev = -0x020000,
- .head.scanoutpos = gf110_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c
index 67004f8302b3..30f1987b5b40 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c
@@ -22,90 +22,33 @@
* Authors: Ben Skeggs
*/
#include "nv50.h"
-#include "outpdp.h"
-
-#include <nvif/class.h>
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gm204_disp_sclass[] = {
- { GM204_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
- { GK110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
- { GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
- { GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
- { GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
- {}
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+gm204_disp = {
+ .intr = gf119_disp_intr,
+ .uevent = &gf119_disp_chan_uevent,
+ .super = gf119_disp_intr_supervisor,
+ .root = &gm204_disp_root_oclass,
+ .head.vblank_init = gf119_disp_vblank_init,
+ .head.vblank_fini = gf119_disp_vblank_fini,
+ .head.scanoutpos = gf119_disp_root_scanoutpos,
+ .outp.internal.crt = nv50_dac_output_new,
+ .outp.internal.tmds = nv50_sor_output_new,
+ .outp.internal.lvds = nv50_sor_output_new,
+ .outp.internal.dp = gm204_sor_dp_new,
+ .dac.nr = 3,
+ .dac.power = nv50_dac_power,
+ .dac.sense = nv50_dac_sense,
+ .sor.nr = 4,
+ .sor.power = nv50_sor_power,
+ .sor.hda_eld = gf119_hda_eld,
+ .sor.hdmi = gk104_hdmi_ctrl,
+ .sor.magic = gm204_sor_magic,
};
-static struct nvkm_oclass
-gm204_disp_main_oclass[] = {
- { GM204_DISP, &gf110_disp_main_ofuncs },
- {}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gm204_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+gm204_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
{
- struct nv50_disp_priv *priv;
- int heads = nv_rd32(parent, 0x022448);
- int ret;
-
- ret = nvkm_disp_create(parent, engine, oclass, heads,
- "PDISP", "display", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
- if (ret)
- return ret;
-
- nv_engine(priv)->sclass = gm204_disp_main_oclass;
- nv_engine(priv)->cclass = &nv50_disp_cclass;
- nv_subdev(priv)->intr = gf110_disp_intr;
- INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
- priv->sclass = gm204_disp_sclass;
- priv->head.nr = heads;
- priv->dac.nr = 3;
- priv->sor.nr = 4;
- priv->dac.power = nv50_dac_power;
- priv->dac.sense = nv50_dac_sense;
- priv->sor.power = nv50_sor_power;
- priv->sor.hda_eld = gf110_hda_eld;
- priv->sor.hdmi = gf110_hdmi_ctrl;
- priv->sor.magic = gm204_sor_magic;
- return 0;
+ return gf119_disp_new_(&gm204_disp, device, index, pdisp);
}
-
-struct nvkm_oclass *
-gm204_disp_outp_sclass[] = {
- &gm204_sor_dp_impl.base.base,
- NULL
-};
-
-struct nvkm_oclass *
-gm204_disp_oclass = &(struct nv50_disp_impl) {
- .base.base.handle = NV_ENGINE(DISP, 0x07),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gm204_disp_ctor,
- .dtor = _nvkm_disp_dtor,
- .init = _nvkm_disp_init,
- .fini = _nvkm_disp_fini,
- },
- .base.vblank = &gf110_disp_vblank_func,
- .base.outp = gm204_disp_outp_sclass,
- .mthd.core = &gk104_disp_core_mthd_chan,
- .mthd.base = &gf110_disp_base_mthd_chan,
- .mthd.ovly = &gk104_disp_ovly_mthd_chan,
- .mthd.prev = -0x020000,
- .head.scanoutpos = gf110_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
index a45307213f4b..6bc3bf096001 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
@@ -22,127 +22,34 @@
* Authors: Ben Skeggs
*/
#include "nv50.h"
-
-#include <nvif/class.h>
-
-/*******************************************************************************
- * EVO overlay channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-gt200_disp_ovly_mthd_base = {
- .mthd = 0x0000,
- .addr = 0x000000,
- .data = {
- { 0x0080, 0x000000 },
- { 0x0084, 0x6109a0 },
- { 0x0088, 0x6109c0 },
- { 0x008c, 0x6109c8 },
- { 0x0090, 0x6109b4 },
- { 0x0094, 0x610970 },
- { 0x00a0, 0x610998 },
- { 0x00a4, 0x610964 },
- { 0x00b0, 0x610c98 },
- { 0x00b4, 0x610ca4 },
- { 0x00b8, 0x610cac },
- { 0x00c0, 0x610958 },
- { 0x00e0, 0x6109a8 },
- { 0x00e4, 0x6109d0 },
- { 0x00e8, 0x6109d8 },
- { 0x0100, 0x61094c },
- { 0x0104, 0x610984 },
- { 0x0108, 0x61098c },
- { 0x0800, 0x6109f8 },
- { 0x0808, 0x610a08 },
- { 0x080c, 0x610a10 },
- { 0x0810, 0x610a00 },
- {}
- }
-};
-
-static const struct nv50_disp_mthd_chan
-gt200_disp_ovly_mthd_chan = {
- .name = "Overlay",
- .addr = 0x000540,
- .data = {
- { "Global", 1, &gt200_disp_ovly_mthd_base },
- {}
- }
-};
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gt200_disp_sclass[] = {
- { GT200_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
- { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
- { GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
- { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
- { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
- {}
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+gt200_disp = {
+ .intr = nv50_disp_intr,
+ .uevent = &nv50_disp_chan_uevent,
+ .super = nv50_disp_intr_supervisor,
+ .root = &gt200_disp_root_oclass,
+ .head.vblank_init = nv50_disp_vblank_init,
+ .head.vblank_fini = nv50_disp_vblank_fini,
+ .head.scanoutpos = nv50_disp_root_scanoutpos,
+ .outp.internal.crt = nv50_dac_output_new,
+ .outp.internal.tmds = nv50_sor_output_new,
+ .outp.internal.lvds = nv50_sor_output_new,
+ .outp.external.tmds = nv50_pior_output_new,
+ .outp.external.dp = nv50_pior_dp_new,
+ .dac.nr = 3,
+ .dac.power = nv50_dac_power,
+ .dac.sense = nv50_dac_sense,
+ .sor.nr = 2,
+ .sor.power = nv50_sor_power,
+ .sor.hdmi = g84_hdmi_ctrl,
+ .pior.nr = 3,
+ .pior.power = nv50_pior_power,
};
-static struct nvkm_oclass
-gt200_disp_main_oclass[] = {
- { GT200_DISP, &nv50_disp_main_ofuncs },
- {}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gt200_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+gt200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
{
- struct nv50_disp_priv *priv;
- int ret;
-
- ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
- "display", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
- if (ret)
- return ret;
-
- nv_engine(priv)->sclass = gt200_disp_main_oclass;
- nv_engine(priv)->cclass = &nv50_disp_cclass;
- nv_subdev(priv)->intr = nv50_disp_intr;
- INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
- priv->sclass = gt200_disp_sclass;
- priv->head.nr = 2;
- priv->dac.nr = 3;
- priv->sor.nr = 2;
- priv->pior.nr = 3;
- priv->dac.power = nv50_dac_power;
- priv->dac.sense = nv50_dac_sense;
- priv->sor.power = nv50_sor_power;
- priv->sor.hdmi = g84_hdmi_ctrl;
- priv->pior.power = nv50_pior_power;
- return 0;
+ return nv50_disp_new_(&gt200_disp, device, index, 2, pdisp);
}
-
-struct nvkm_oclass *
-gt200_disp_oclass = &(struct nv50_disp_impl) {
- .base.base.handle = NV_ENGINE(DISP, 0x83),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gt200_disp_ctor,
- .dtor = _nvkm_disp_dtor,
- .init = _nvkm_disp_init,
- .fini = _nvkm_disp_fini,
- },
- .base.vblank = &nv50_disp_vblank_func,
- .base.outp = nv50_disp_outp_sclass,
- .mthd.core = &g84_disp_core_mthd_chan,
- .mthd.base = &g84_disp_base_mthd_chan,
- .mthd.ovly = &gt200_disp_ovly_mthd_chan,
- .mthd.prev = 0x000004,
- .head.scanoutpos = nv50_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
index 55f0d3ac591e..94026288ab4d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
@@ -22,83 +22,36 @@
* Authors: Ben Skeggs
*/
#include "nv50.h"
-
-#include <nvif/class.h>
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gt215_disp_sclass[] = {
- { GT214_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
- { GT214_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
- { GT214_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
- { GT214_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
- { GT214_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
- {}
-};
-
-static struct nvkm_oclass
-gt215_disp_main_oclass[] = {
- { GT214_DISP, &nv50_disp_main_ofuncs },
- {}
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+gt215_disp = {
+ .intr = nv50_disp_intr,
+ .uevent = &nv50_disp_chan_uevent,
+ .super = nv50_disp_intr_supervisor,
+ .root = &gt215_disp_root_oclass,
+ .head.vblank_init = nv50_disp_vblank_init,
+ .head.vblank_fini = nv50_disp_vblank_fini,
+ .head.scanoutpos = nv50_disp_root_scanoutpos,
+ .outp.internal.crt = nv50_dac_output_new,
+ .outp.internal.tmds = nv50_sor_output_new,
+ .outp.internal.lvds = nv50_sor_output_new,
+ .outp.internal.dp = g94_sor_dp_new,
+ .outp.external.tmds = nv50_pior_output_new,
+ .outp.external.dp = nv50_pior_dp_new,
+ .dac.nr = 3,
+ .dac.power = nv50_dac_power,
+ .dac.sense = nv50_dac_sense,
+ .sor.nr = 4,
+ .sor.power = nv50_sor_power,
+ .sor.hda_eld = gt215_hda_eld,
+ .sor.hdmi = gt215_hdmi_ctrl,
+ .pior.nr = 3,
+ .pior.power = nv50_pior_power,
};
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gt215_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+gt215_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
{
- struct nv50_disp_priv *priv;
- int ret;
-
- ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
- "display", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
- if (ret)
- return ret;
-
- nv_engine(priv)->sclass = gt215_disp_main_oclass;
- nv_engine(priv)->cclass = &nv50_disp_cclass;
- nv_subdev(priv)->intr = nv50_disp_intr;
- INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
- priv->sclass = gt215_disp_sclass;
- priv->head.nr = 2;
- priv->dac.nr = 3;
- priv->sor.nr = 4;
- priv->pior.nr = 3;
- priv->dac.power = nv50_dac_power;
- priv->dac.sense = nv50_dac_sense;
- priv->sor.power = nv50_sor_power;
- priv->sor.hda_eld = gt215_hda_eld;
- priv->sor.hdmi = gt215_hdmi_ctrl;
- priv->pior.power = nv50_pior_power;
- return 0;
+ return nv50_disp_new_(&gt215_disp, device, index, 2, pdisp);
}
-
-struct nvkm_oclass *
-gt215_disp_oclass = &(struct nv50_disp_impl) {
- .base.base.handle = NV_ENGINE(DISP, 0x85),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gt215_disp_ctor,
- .dtor = _nvkm_disp_dtor,
- .init = _nvkm_disp_init,
- .fini = _nvkm_disp_fini,
- },
- .base.vblank = &nv50_disp_vblank_func,
- .base.outp = g94_disp_outp_sclass,
- .mthd.core = &g94_disp_core_mthd_chan,
- .mthd.base = &g84_disp_base_mthd_chan,
- .mthd.ovly = &g84_disp_ovly_mthd_chan,
- .mthd.prev = 0x000004,
- .head.scanoutpos = nv50_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
index b9813d246ba5..af99efbd63f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
@@ -33,8 +33,9 @@
#include <nvif/unpack.h>
int
-gf110_hda_eld(NV50_DISP_MTHD_V1)
+gf119_hda_eld(NV50_DISP_MTHD_V1)
{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
union {
struct nv50_disp_sor_hda_eld_v0 v0;
} *args = data;
@@ -42,9 +43,10 @@ gf110_hda_eld(NV50_DISP_MTHD_V1)
const u32 hoff = head * 0x800;
int ret, i;
- nv_ioctl(object, "disp sor hda eld size %d\n", size);
+ nvif_ioctl(object, "disp sor hda eld size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, true)) {
- nv_ioctl(object, "disp sor hda eld vers %d\n", args->v0.version);
+ nvif_ioctl(object, "disp sor hda eld vers %d\n",
+ args->v0.version);
if (size > 0x60)
return -E2BIG;
} else
@@ -52,21 +54,29 @@ gf110_hda_eld(NV50_DISP_MTHD_V1)
if (size && args->v0.data[0]) {
if (outp->info.type == DCB_OUTPUT_DP) {
- nv_mask(priv, 0x616618 + hoff, 0x8000000c, 0x80000001);
- nv_wait(priv, 0x616618 + hoff, 0x80000000, 0x00000000);
+ nvkm_mask(device, 0x616618 + hoff, 0x8000000c, 0x80000001);
+ nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x616618 + hoff);
+ if (!(tmp & 0x80000000))
+ break;
+ );
}
- nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
+ nvkm_mask(device, 0x616548 + hoff, 0x00000070, 0x00000000);
for (i = 0; i < size; i++)
- nv_wr32(priv, 0x10ec00 + soff, (i << 8) | args->v0.data[i]);
+ nvkm_wr32(device, 0x10ec00 + soff, (i << 8) | args->v0.data[i]);
for (; i < 0x60; i++)
- nv_wr32(priv, 0x10ec00 + soff, (i << 8));
- nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
+ nvkm_wr32(device, 0x10ec00 + soff, (i << 8));
+ nvkm_mask(device, 0x10ec10 + soff, 0x80000003, 0x80000003);
} else {
if (outp->info.type == DCB_OUTPUT_DP) {
- nv_mask(priv, 0x616618 + hoff, 0x80000001, 0x80000000);
- nv_wait(priv, 0x616618 + hoff, 0x80000000, 0x00000000);
+ nvkm_mask(device, 0x616618 + hoff, 0x80000001, 0x80000000);
+ nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x616618 + hoff);
+ if (!(tmp & 0x80000000))
+ break;
+ );
}
- nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000 | !!size);
+ nvkm_mask(device, 0x10ec10 + soff, 0x80000003, 0x80000000 | !!size);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
index 891d1e7bf7d2..c1590b746f13 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
@@ -33,15 +33,17 @@
int
gt215_hda_eld(NV50_DISP_MTHD_V1)
{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
union {
struct nv50_disp_sor_hda_eld_v0 v0;
} *args = data;
const u32 soff = outp->or * 0x800;
int ret, i;
- nv_ioctl(object, "disp sor hda eld size %d\n", size);
+ nvif_ioctl(object, "disp sor hda eld size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, true)) {
- nv_ioctl(object, "disp sor hda eld vers %d\n", args->v0.version);
+ nvif_ioctl(object, "disp sor hda eld vers %d\n",
+ args->v0.version);
if (size > 0x60)
return -E2BIG;
} else
@@ -49,20 +51,28 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
if (size && args->v0.data[0]) {
if (outp->info.type == DCB_OUTPUT_DP) {
- nv_mask(priv, 0x61c1e0 + soff, 0x8000000d, 0x80000001);
- nv_wait(priv, 0x61c1e0 + soff, 0x80000000, 0x00000000);
+ nvkm_mask(device, 0x61c1e0 + soff, 0x8000000d, 0x80000001);
+ nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x61c1e0 + soff);
+ if (!(tmp & 0x80000000))
+ break;
+ );
}
for (i = 0; i < size; i++)
- nv_wr32(priv, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
+ nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
for (; i < 0x60; i++)
- nv_wr32(priv, 0x61c440 + soff, (i << 8));
- nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
+ nvkm_wr32(device, 0x61c440 + soff, (i << 8));
+ nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
} else {
if (outp->info.type == DCB_OUTPUT_DP) {
- nv_mask(priv, 0x61c1e0 + soff, 0x80000001, 0x80000000);
- nv_wait(priv, 0x61c1e0 + soff, 0x80000000, 0x00000000);
+ nvkm_mask(device, 0x61c1e0 + soff, 0x80000001, 0x80000000);
+ nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x61c1e0 + soff);
+ if (!(tmp & 0x80000000))
+ break;
+ );
}
- nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000 | !!size);
+ nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000000 | !!size);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c
index 621cb0b7ff19..ee9e800a8f06 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c
@@ -31,6 +31,7 @@
int
g84_hdmi_ctrl(NV50_DISP_MTHD_V1)
{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
const u32 hoff = (head * 0x800);
union {
struct nv50_disp_sor_hdmi_pwr_v0 v0;
@@ -38,12 +39,12 @@ g84_hdmi_ctrl(NV50_DISP_MTHD_V1)
u32 ctrl;
int ret;
- nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+ nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
- "max_ac_packet %d rekey %d\n",
- args->v0.version, args->v0.state,
- args->v0.max_ac_packet, args->v0.rekey);
+ nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+ "max_ac_packet %d rekey %d\n",
+ args->v0.version, args->v0.state,
+ args->v0.max_ac_packet, args->v0.rekey);
if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
return -EINVAL;
ctrl = 0x40000000 * !!args->v0.state;
@@ -54,38 +55,38 @@ g84_hdmi_ctrl(NV50_DISP_MTHD_V1)
return ret;
if (!(ctrl & 0x40000000)) {
- nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000);
- nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
- nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x6165a4 + hoff, 0x40000000, 0x00000000);
+ nvkm_mask(device, 0x616520 + hoff, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000000);
return 0;
}
/* AVI InfoFrame */
- nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
- nv_wr32(priv, 0x616528 + hoff, 0x000d0282);
- nv_wr32(priv, 0x61652c + hoff, 0x0000006f);
- nv_wr32(priv, 0x616530 + hoff, 0x00000000);
- nv_wr32(priv, 0x616534 + hoff, 0x00000000);
- nv_wr32(priv, 0x616538 + hoff, 0x00000000);
- nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x616520 + hoff, 0x00000001, 0x00000000);
+ nvkm_wr32(device, 0x616528 + hoff, 0x000d0282);
+ nvkm_wr32(device, 0x61652c + hoff, 0x0000006f);
+ nvkm_wr32(device, 0x616530 + hoff, 0x00000000);
+ nvkm_wr32(device, 0x616534 + hoff, 0x00000000);
+ nvkm_wr32(device, 0x616538 + hoff, 0x00000000);
+ nvkm_mask(device, 0x616520 + hoff, 0x00000001, 0x00000001);
/* Audio InfoFrame */
- nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
- nv_wr32(priv, 0x616508 + hoff, 0x000a0184);
- nv_wr32(priv, 0x61650c + hoff, 0x00000071);
- nv_wr32(priv, 0x616510 + hoff, 0x00000000);
- nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000000);
+ nvkm_wr32(device, 0x616508 + hoff, 0x000a0184);
+ nvkm_wr32(device, 0x61650c + hoff, 0x00000071);
+ nvkm_wr32(device, 0x616510 + hoff, 0x00000000);
+ nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000001);
- nv_mask(priv, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
- nv_mask(priv, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
- nv_mask(priv, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+ nvkm_mask(device, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+ nvkm_mask(device, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+ nvkm_mask(device, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
/* ??? */
- nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
- nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
- nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+ nvkm_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+ nvkm_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+ nvkm_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
/* HDMI_CTRL */
- nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, ctrl);
+ nvkm_mask(device, 0x6165a4 + hoff, 0x5f1f007f, ctrl);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c
index c28449061bbd..b5af025d3b04 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c
@@ -29,8 +29,9 @@
#include <nvif/unpack.h>
int
-gf110_hdmi_ctrl(NV50_DISP_MTHD_V1)
+gf119_hdmi_ctrl(NV50_DISP_MTHD_V1)
{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
const u32 hoff = (head * 0x800);
union {
struct nv50_disp_sor_hdmi_pwr_v0 v0;
@@ -38,12 +39,12 @@ gf110_hdmi_ctrl(NV50_DISP_MTHD_V1)
u32 ctrl;
int ret;
- nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+ nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
- "max_ac_packet %d rekey %d\n",
- args->v0.version, args->v0.state,
- args->v0.max_ac_packet, args->v0.rekey);
+ nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+ "max_ac_packet %d rekey %d\n",
+ args->v0.version, args->v0.state,
+ args->v0.max_ac_packet, args->v0.rekey);
if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
return -EINVAL;
ctrl = 0x40000000 * !!args->v0.state;
@@ -53,27 +54,27 @@ gf110_hdmi_ctrl(NV50_DISP_MTHD_V1)
return ret;
if (!(ctrl & 0x40000000)) {
- nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
- nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
- nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x616798 + hoff, 0x40000000, 0x00000000);
+ nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000000);
return 0;
}
/* AVI InfoFrame */
- nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
- nv_wr32(priv, 0x61671c + hoff, 0x000d0282);
- nv_wr32(priv, 0x616720 + hoff, 0x0000006f);
- nv_wr32(priv, 0x616724 + hoff, 0x00000000);
- nv_wr32(priv, 0x616728 + hoff, 0x00000000);
- nv_wr32(priv, 0x61672c + hoff, 0x00000000);
- nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000000);
+ nvkm_wr32(device, 0x61671c + hoff, 0x000d0282);
+ nvkm_wr32(device, 0x616720 + hoff, 0x0000006f);
+ nvkm_wr32(device, 0x616724 + hoff, 0x00000000);
+ nvkm_wr32(device, 0x616728 + hoff, 0x00000000);
+ nvkm_wr32(device, 0x61672c + hoff, 0x00000000);
+ nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000001);
/* ??? InfoFrame? */
- nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
- nv_wr32(priv, 0x6167ac + hoff, 0x00000010);
- nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+ nvkm_wr32(device, 0x6167ac + hoff, 0x00000010);
+ nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000001);
/* HDMI_CTRL */
- nv_mask(priv, 0x616798 + hoff, 0x401f007f, ctrl);
+ nvkm_mask(device, 0x616798 + hoff, 0x401f007f, ctrl);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c
index ca34ff81ad7f..110dc19e4f67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c
@@ -31,6 +31,7 @@
int
gk104_hdmi_ctrl(NV50_DISP_MTHD_V1)
{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
const u32 hoff = (head * 0x800);
const u32 hdmi = (head * 0x400);
union {
@@ -39,12 +40,12 @@ gk104_hdmi_ctrl(NV50_DISP_MTHD_V1)
u32 ctrl;
int ret;
- nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+ nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
- "max_ac_packet %d rekey %d\n",
- args->v0.version, args->v0.state,
- args->v0.max_ac_packet, args->v0.rekey);
+ nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+ "max_ac_packet %d rekey %d\n",
+ args->v0.version, args->v0.state,
+ args->v0.max_ac_packet, args->v0.rekey);
if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
return -EINVAL;
ctrl = 0x40000000 * !!args->v0.state;
@@ -54,30 +55,30 @@ gk104_hdmi_ctrl(NV50_DISP_MTHD_V1)
return ret;
if (!(ctrl & 0x40000000)) {
- nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
- nv_mask(priv, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
- nv_mask(priv, 0x690000 + hdmi, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x616798 + hoff, 0x40000000, 0x00000000);
+ nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000000);
return 0;
}
/* AVI InfoFrame */
- nv_mask(priv, 0x690000 + hdmi, 0x00000001, 0x00000000);
- nv_wr32(priv, 0x690008 + hdmi, 0x000d0282);
- nv_wr32(priv, 0x69000c + hdmi, 0x0000006f);
- nv_wr32(priv, 0x690010 + hdmi, 0x00000000);
- nv_wr32(priv, 0x690014 + hdmi, 0x00000000);
- nv_wr32(priv, 0x690018 + hdmi, 0x00000000);
- nv_mask(priv, 0x690000 + hdmi, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000000);
+ nvkm_wr32(device, 0x690008 + hdmi, 0x000d0282);
+ nvkm_wr32(device, 0x69000c + hdmi, 0x0000006f);
+ nvkm_wr32(device, 0x690010 + hdmi, 0x00000000);
+ nvkm_wr32(device, 0x690014 + hdmi, 0x00000000);
+ nvkm_wr32(device, 0x690018 + hdmi, 0x00000000);
+ nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000001);
/* ??? InfoFrame? */
- nv_mask(priv, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
- nv_wr32(priv, 0x6900cc + hdmi, 0x00000010);
- nv_mask(priv, 0x6900c0 + hdmi, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
+ nvkm_wr32(device, 0x6900cc + hdmi, 0x00000010);
+ nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000001);
/* ??? */
- nv_wr32(priv, 0x690080 + hdmi, 0x82000000);
+ nvkm_wr32(device, 0x690080 + hdmi, 0x82000000);
/* HDMI_CTRL */
- nv_mask(priv, 0x616798 + hoff, 0x401f007f, ctrl);
+ nvkm_mask(device, 0x616798 + hoff, 0x401f007f, ctrl);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c
index b641c167dcfa..61237dbfa35a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c
@@ -32,6 +32,7 @@
int
gt215_hdmi_ctrl(NV50_DISP_MTHD_V1)
{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
const u32 soff = outp->or * 0x800;
union {
struct nv50_disp_sor_hdmi_pwr_v0 v0;
@@ -39,12 +40,12 @@ gt215_hdmi_ctrl(NV50_DISP_MTHD_V1)
u32 ctrl;
int ret;
- nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+ nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
- "max_ac_packet %d rekey %d\n",
- args->v0.version, args->v0.state,
- args->v0.max_ac_packet, args->v0.rekey);
+ nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+ "max_ac_packet %d rekey %d\n",
+ args->v0.version, args->v0.state,
+ args->v0.max_ac_packet, args->v0.rekey);
if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
return -EINVAL;
ctrl = 0x40000000 * !!args->v0.state;
@@ -55,38 +56,38 @@ gt215_hdmi_ctrl(NV50_DISP_MTHD_V1)
return ret;
if (!(ctrl & 0x40000000)) {
- nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000);
- nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
- nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x61c5a4 + soff, 0x40000000, 0x00000000);
+ nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000000);
return 0;
}
/* AVI InfoFrame */
- nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
- nv_wr32(priv, 0x61c528 + soff, 0x000d0282);
- nv_wr32(priv, 0x61c52c + soff, 0x0000006f);
- nv_wr32(priv, 0x61c530 + soff, 0x00000000);
- nv_wr32(priv, 0x61c534 + soff, 0x00000000);
- nv_wr32(priv, 0x61c538 + soff, 0x00000000);
- nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000000);
+ nvkm_wr32(device, 0x61c528 + soff, 0x000d0282);
+ nvkm_wr32(device, 0x61c52c + soff, 0x0000006f);
+ nvkm_wr32(device, 0x61c530 + soff, 0x00000000);
+ nvkm_wr32(device, 0x61c534 + soff, 0x00000000);
+ nvkm_wr32(device, 0x61c538 + soff, 0x00000000);
+ nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000001);
/* Audio InfoFrame */
- nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
- nv_wr32(priv, 0x61c508 + soff, 0x000a0184);
- nv_wr32(priv, 0x61c50c + soff, 0x00000071);
- nv_wr32(priv, 0x61c510 + soff, 0x00000000);
- nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000000);
+ nvkm_wr32(device, 0x61c508 + soff, 0x000a0184);
+ nvkm_wr32(device, 0x61c50c + soff, 0x00000071);
+ nvkm_wr32(device, 0x61c510 + soff, 0x00000000);
+ nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000001);
- nv_mask(priv, 0x61c5d0 + soff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
- nv_mask(priv, 0x61c568 + soff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
- nv_mask(priv, 0x61c578 + soff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+ nvkm_mask(device, 0x61c5d0 + soff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+ nvkm_mask(device, 0x61c568 + soff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+ nvkm_mask(device, 0x61c578 + soff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
/* ??? */
- nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
- nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
- nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+ nvkm_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+ nvkm_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+ nvkm_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
/* HDMI_CTRL */
- nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, ctrl);
+ nvkm_mask(device, 0x61c5a4 + soff, 0x5f1f007f, ctrl);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
index ff09b2659c17..67254ce6f83f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
@@ -23,183 +23,63 @@
*/
#include "priv.h"
-#include <core/client.h>
-#include <core/device.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-struct nv04_disp_priv {
- struct nvkm_disp base;
-};
-
-static int
-nv04_disp_scanoutpos(struct nvkm_object *object, struct nv04_disp_priv *priv,
- void *data, u32 size, int head)
+static const struct nvkm_disp_oclass *
+nv04_disp_root(struct nvkm_disp *disp)
{
- const u32 hoff = head * 0x2000;
- union {
- struct nv04_disp_scanoutpos_v0 v0;
- } *args = data;
- u32 line;
- int ret;
-
- nv_ioctl(object, "disp scanoutpos size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
- args->v0.vblanks = nv_rd32(priv, 0x680800 + hoff) & 0xffff;
- args->v0.vtotal = nv_rd32(priv, 0x680804 + hoff) & 0xffff;
- args->v0.vblanke = args->v0.vtotal - 1;
-
- args->v0.hblanks = nv_rd32(priv, 0x680820 + hoff) & 0xffff;
- args->v0.htotal = nv_rd32(priv, 0x680824 + hoff) & 0xffff;
- args->v0.hblanke = args->v0.htotal - 1;
-
- /*
- * If output is vga instead of digital then vtotal/htotal is
- * invalid so we have to give up and trigger the timestamping
- * fallback in the drm core.
- */
- if (!args->v0.vtotal || !args->v0.htotal)
- return -ENOTSUPP;
-
- args->v0.time[0] = ktime_to_ns(ktime_get());
- line = nv_rd32(priv, 0x600868 + hoff);
- args->v0.time[1] = ktime_to_ns(ktime_get());
- args->v0.hline = (line & 0xffff0000) >> 16;
- args->v0.vline = (line & 0x0000ffff);
- } else
- return ret;
-
- return 0;
+ return &nv04_disp_root_oclass;
}
-static int
-nv04_disp_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
-{
- union {
- struct nv04_disp_mthd_v0 v0;
- } *args = data;
- struct nv04_disp_priv *priv = (void *)object->engine;
- int head, ret;
-
- nv_ioctl(object, "disp mthd size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, true)) {
- nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
- args->v0.version, args->v0.method, args->v0.head);
- mthd = args->v0.method;
- head = args->v0.head;
- } else
- return ret;
-
- if (head < 0 || head >= 2)
- return -ENXIO;
-
- switch (mthd) {
- case NV04_DISP_SCANOUTPOS:
- return nv04_disp_scanoutpos(object, priv, data, size, head);
- default:
- break;
- }
-
- return -EINVAL;
-}
-
-static struct nvkm_ofuncs
-nv04_disp_ofuncs = {
- .ctor = _nvkm_object_ctor,
- .dtor = nvkm_object_destroy,
- .init = nvkm_object_init,
- .fini = nvkm_object_fini,
- .mthd = nv04_disp_mthd,
- .ntfy = nvkm_disp_ntfy,
-};
-
-static struct nvkm_oclass
-nv04_disp_sclass[] = {
- { NV04_DISP, &nv04_disp_ofuncs },
- {},
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
static void
-nv04_disp_vblank_init(struct nvkm_event *event, int type, int head)
+nv04_disp_vblank_init(struct nvkm_disp *disp, int head)
{
- struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
- nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000001);
+ struct nvkm_device *device = disp->engine.subdev.device;
+ nvkm_wr32(device, 0x600140 + (head * 0x2000) , 0x00000001);
}
static void
-nv04_disp_vblank_fini(struct nvkm_event *event, int type, int head)
+nv04_disp_vblank_fini(struct nvkm_disp *disp, int head)
{
- struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
- nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000000);
+ struct nvkm_device *device = disp->engine.subdev.device;
+ nvkm_wr32(device, 0x600140 + (head * 0x2000) , 0x00000000);
}
-static const struct nvkm_event_func
-nv04_disp_vblank_func = {
- .ctor = nvkm_disp_vblank_ctor,
- .init = nv04_disp_vblank_init,
- .fini = nv04_disp_vblank_fini,
-};
-
static void
-nv04_disp_intr(struct nvkm_subdev *subdev)
+nv04_disp_intr(struct nvkm_disp *disp)
{
- struct nv04_disp_priv *priv = (void *)subdev;
- u32 crtc0 = nv_rd32(priv, 0x600100);
- u32 crtc1 = nv_rd32(priv, 0x602100);
+ struct nvkm_subdev *subdev = &disp->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 crtc0 = nvkm_rd32(device, 0x600100);
+ u32 crtc1 = nvkm_rd32(device, 0x602100);
u32 pvideo;
if (crtc0 & 0x00000001) {
- nvkm_disp_vblank(&priv->base, 0);
- nv_wr32(priv, 0x600100, 0x00000001);
+ nvkm_disp_vblank(disp, 0);
+ nvkm_wr32(device, 0x600100, 0x00000001);
}
if (crtc1 & 0x00000001) {
- nvkm_disp_vblank(&priv->base, 1);
- nv_wr32(priv, 0x602100, 0x00000001);
+ nvkm_disp_vblank(disp, 1);
+ nvkm_wr32(device, 0x602100, 0x00000001);
}
- if (nv_device(priv)->chipset >= 0x10 &&
- nv_device(priv)->chipset <= 0x40) {
- pvideo = nv_rd32(priv, 0x8100);
+ if (device->chipset >= 0x10 && device->chipset <= 0x40) {
+ pvideo = nvkm_rd32(device, 0x8100);
if (pvideo & ~0x11)
- nv_info(priv, "PVIDEO intr: %08x\n", pvideo);
- nv_wr32(priv, 0x8100, pvideo);
+ nvkm_info(subdev, "PVIDEO intr: %08x\n", pvideo);
+ nvkm_wr32(device, 0x8100, pvideo);
}
}
-static int
-nv04_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv04_disp_priv *priv;
- int ret;
-
- ret = nvkm_disp_create(parent, engine, oclass, 2, "DISPLAY",
- "display", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static const struct nvkm_disp_func
+nv04_disp = {
+ .intr = nv04_disp_intr,
+ .root = nv04_disp_root,
+ .head.vblank_init = nv04_disp_vblank_init,
+ .head.vblank_fini = nv04_disp_vblank_fini,
+};
- nv_engine(priv)->sclass = nv04_disp_sclass;
- nv_subdev(priv)->intr = nv04_disp_intr;
- return 0;
+int
+nv04_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+ return nvkm_disp_new_(&nv04_disp, device, index, 2, pdisp);
}
-
-struct nvkm_oclass *
-nv04_disp_oclass = &(struct nvkm_disp_impl) {
- .base.handle = NV_ENGINE(DISP, 0x04),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_disp_ctor,
- .dtor = _nvkm_disp_dtor,
- .init = _nvkm_disp_init,
- .fini = _nvkm_disp_fini,
- },
- .vblank = &nv04_disp_vblank_func,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 8ba808df24ad..32e73a975b58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -22,1291 +22,158 @@
* Authors: Ben Skeggs
*/
#include "nv50.h"
-#include "outp.h"
-#include "outpdp.h"
+#include "rootnv50.h"
#include <core/client.h>
-#include <core/device.h>
-#include <core/engctx.h>
#include <core/enum.h>
-#include <core/handle.h>
-#include <core/ramht.h>
-#include <engine/dmaobj.h>
+#include <core/gpuobj.h>
#include <subdev/bios.h>
-#include <subdev/bios/dcb.h>
#include <subdev/bios/disp.h>
#include <subdev/bios/init.h>
#include <subdev/bios/pll.h>
#include <subdev/devinit.h>
-#include <subdev/fb.h>
-#include <subdev/timer.h>
-#include <nvif/class.h>
-#include <nvif/event.h>
-#include <nvif/unpack.h>
-
-/*******************************************************************************
- * EVO channel base class
- ******************************************************************************/
-
-static int
-nv50_disp_chan_create_(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, int head,
- int length, void **pobject)
-{
- const struct nv50_disp_chan_impl *impl = (void *)oclass->ofuncs;
- struct nv50_disp_base *base = (void *)parent;
- struct nv50_disp_chan *chan;
- int chid = impl->chid + head;
- int ret;
-
- if (base->chan & (1 << chid))
- return -EBUSY;
- base->chan |= (1 << chid);
-
- ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL,
- (1ULL << NVDEV_ENGINE_DMAOBJ),
- length, pobject);
- chan = *pobject;
- if (ret)
- return ret;
- chan->chid = chid;
-
- nv_parent(chan)->object_attach = impl->attach;
- nv_parent(chan)->object_detach = impl->detach;
- return 0;
-}
-
-static void
-nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
-{
- struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
- base->chan &= ~(1 << chan->chid);
- nvkm_namedb_destroy(&chan->base);
-}
-
-static void
-nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
-{
- struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
- nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000000 << index);
- nv_wr32(priv, 0x610020, 0x00000001 << index);
-}
-
-static void
-nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
-{
- struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
- nv_wr32(priv, 0x610020, 0x00000001 << index);
- nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000001 << index);
-}
-
-void
-nv50_disp_chan_uevent_send(struct nv50_disp_priv *priv, int chid)
-{
- struct nvif_notify_uevent_rep {
- } rep;
-
- nvkm_event_send(&priv->uevent, 1, chid, &rep, sizeof(rep));
-}
-
-int
-nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
- struct nvkm_notify *notify)
-{
- struct nv50_disp_dmac *dmac = (void *)object;
- union {
- struct nvif_notify_uevent_req none;
- } *args = data;
- int ret;
-
- if (nvif_unvers(args->none)) {
- notify->size = sizeof(struct nvif_notify_uevent_rep);
- notify->types = 1;
- notify->index = dmac->base.chid;
- return 0;
- }
-
- return ret;
-}
-
-const struct nvkm_event_func
-nv50_disp_chan_uevent = {
- .ctor = nv50_disp_chan_uevent_ctor,
- .init = nv50_disp_chan_uevent_init,
- .fini = nv50_disp_chan_uevent_fini,
-};
-
-int
-nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
- struct nvkm_event **pevent)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- switch (type) {
- case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT:
- *pevent = &priv->uevent;
- return 0;
- default:
- break;
- }
- return -EINVAL;
-}
-
-int
-nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
+static const struct nvkm_disp_oclass *
+nv50_disp_root_(struct nvkm_disp *base)
{
- struct nv50_disp_chan *chan = (void *)object;
- *addr = nv_device_resource_start(nv_device(object), 0) +
- 0x640000 + (chan->chid * 0x1000);
- *size = 0x001000;
- return 0;
+ return nv50_disp(base)->func->root;
}
-u32
-nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv50_disp_chan *chan = (void *)object;
- return nv_rd32(priv, 0x640000 + (chan->chid * 0x1000) + addr);
-}
-
-void
-nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv50_disp_chan *chan = (void *)object;
- nv_wr32(priv, 0x640000 + (chan->chid * 0x1000) + addr, data);
-}
-
-/*******************************************************************************
- * EVO DMA channel base class
- ******************************************************************************/
-
static int
-nv50_disp_dmac_object_attach(struct nvkm_object *parent,
- struct nvkm_object *object, u32 name)
-{
- struct nv50_disp_base *base = (void *)parent->parent;
- struct nv50_disp_chan *chan = (void *)parent;
- u32 addr = nv_gpuobj(object)->node->offset;
- u32 chid = chan->chid;
- u32 data = (chid << 28) | (addr << 10) | chid;
- return nvkm_ramht_insert(base->ramht, chid, name, data);
-}
-
-static void
-nv50_disp_dmac_object_detach(struct nvkm_object *parent, int cookie)
+nv50_disp_outp_internal_crt_(struct nvkm_disp *base, int index,
+ struct dcb_output *dcb, struct nvkm_output **poutp)
{
- struct nv50_disp_base *base = (void *)parent->parent;
- nvkm_ramht_remove(base->ramht, cookie);
+ struct nv50_disp *disp = nv50_disp(base);
+ return disp->func->outp.internal.crt(base, index, dcb, poutp);
}
static int
-nv50_disp_dmac_create_(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, u32 pushbuf, int head,
- int length, void **pobject)
-{
- struct nv50_disp_dmac *dmac;
- int ret;
-
- ret = nv50_disp_chan_create_(parent, engine, oclass, head,
- length, pobject);
- dmac = *pobject;
- if (ret)
- return ret;
-
- dmac->pushdma = (void *)nvkm_handle_ref(parent, pushbuf);
- if (!dmac->pushdma)
- return -ENOENT;
-
- switch (nv_mclass(dmac->pushdma)) {
- case 0x0002:
- case 0x003d:
- if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff)
- return -EINVAL;
-
- switch (dmac->pushdma->target) {
- case NV_MEM_TARGET_VRAM:
- dmac->push = 0x00000001 | dmac->pushdma->start >> 8;
- break;
- case NV_MEM_TARGET_PCI_NOSNOOP:
- dmac->push = 0x00000003 | dmac->pushdma->start >> 8;
- break;
- default:
- return -EINVAL;
- }
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-void
-nv50_disp_dmac_dtor(struct nvkm_object *object)
+nv50_disp_outp_internal_tmds_(struct nvkm_disp *base, int index,
+ struct dcb_output *dcb,
+ struct nvkm_output **poutp)
{
- struct nv50_disp_dmac *dmac = (void *)object;
- nvkm_object_ref(NULL, (struct nvkm_object **)&dmac->pushdma);
- nv50_disp_chan_destroy(&dmac->base);
+ struct nv50_disp *disp = nv50_disp(base);
+ return disp->func->outp.internal.tmds(base, index, dcb, poutp);
}
static int
-nv50_disp_dmac_init(struct nvkm_object *object)
+nv50_disp_outp_internal_lvds_(struct nvkm_disp *base, int index,
+ struct dcb_output *dcb,
+ struct nvkm_output **poutp)
{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv50_disp_dmac *dmac = (void *)object;
- int chid = dmac->base.chid;
- int ret;
-
- ret = nv50_disp_chan_init(&dmac->base);
- if (ret)
- return ret;
-
- /* enable error reporting */
- nv_mask(priv, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
-
- /* initialise channel for dma command submission */
- nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push);
- nv_wr32(priv, 0x610208 + (chid * 0x0010), 0x00010000);
- nv_wr32(priv, 0x61020c + (chid * 0x0010), chid);
- nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
- nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
- nv_wr32(priv, 0x610200 + (chid * 0x0010), 0x00000013);
-
- /* wait for it to go inactive */
- if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x80000000, 0x00000000)) {
- nv_error(dmac, "init timeout, 0x%08x\n",
- nv_rd32(priv, 0x610200 + (chid * 0x10)));
- return -EBUSY;
- }
-
- return 0;
+ struct nv50_disp *disp = nv50_disp(base);
+ return disp->func->outp.internal.lvds(base, index, dcb, poutp);
}
static int
-nv50_disp_dmac_fini(struct nvkm_object *object, bool suspend)
+nv50_disp_outp_internal_dp_(struct nvkm_disp *base, int index,
+ struct dcb_output *dcb, struct nvkm_output **poutp)
{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv50_disp_dmac *dmac = (void *)object;
- int chid = dmac->base.chid;
-
- /* deactivate channel */
- nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
- nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
- if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x001e0000, 0x00000000)) {
- nv_error(dmac, "fini timeout, 0x%08x\n",
- nv_rd32(priv, 0x610200 + (chid * 0x10)));
- if (suspend)
- return -EBUSY;
- }
-
- /* disable error reporting and completion notifications */
- nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
-
- return nv50_disp_chan_fini(&dmac->base, suspend);
-}
-
-/*******************************************************************************
- * EVO master channel object
- ******************************************************************************/
-
-static void
-nv50_disp_mthd_list(struct nv50_disp_priv *priv, int debug, u32 base, int c,
- const struct nv50_disp_mthd_list *list, int inst)
-{
- struct nvkm_object *disp = nv_object(priv);
- int i;
-
- for (i = 0; list->data[i].mthd; i++) {
- if (list->data[i].addr) {
- u32 next = nv_rd32(priv, list->data[i].addr + base + 0);
- u32 prev = nv_rd32(priv, list->data[i].addr + base + c);
- u32 mthd = list->data[i].mthd + (list->mthd * inst);
- const char *name = list->data[i].name;
- char mods[16];
-
- if (prev != next)
- snprintf(mods, sizeof(mods), "-> 0x%08x", next);
- else
- snprintf(mods, sizeof(mods), "%13c", ' ');
-
- nv_printk_(disp, debug, "\t0x%04x: 0x%08x %s%s%s\n",
- mthd, prev, mods, name ? " // " : "",
- name ? name : "");
- }
- }
-}
-
-void
-nv50_disp_mthd_chan(struct nv50_disp_priv *priv, int debug, int head,
- const struct nv50_disp_mthd_chan *chan)
-{
- struct nvkm_object *disp = nv_object(priv);
- const struct nv50_disp_impl *impl = (void *)disp->oclass;
- const struct nv50_disp_mthd_list *list;
- int i, j;
-
- if (debug > nv_subdev(priv)->debug)
- return;
-
- for (i = 0; (list = chan->data[i].mthd) != NULL; i++) {
- u32 base = head * chan->addr;
- for (j = 0; j < chan->data[i].nr; j++, base += list->addr) {
- const char *cname = chan->name;
- const char *sname = "";
- char cname_[16], sname_[16];
-
- if (chan->addr) {
- snprintf(cname_, sizeof(cname_), "%s %d",
- chan->name, head);
- cname = cname_;
- }
-
- if (chan->data[i].nr > 1) {
- snprintf(sname_, sizeof(sname_), " - %s %d",
- chan->data[i].name, j);
- sname = sname_;
- }
-
- nv_printk_(disp, debug, "%s%s:\n", cname, sname);
- nv50_disp_mthd_list(priv, debug, base, impl->mthd.prev,
- list, j);
- }
- }
-}
-
-const struct nv50_disp_mthd_list
-nv50_disp_core_mthd_base = {
- .mthd = 0x0000,
- .addr = 0x000000,
- .data = {
- { 0x0080, 0x000000 },
- { 0x0084, 0x610bb8 },
- { 0x0088, 0x610b9c },
- { 0x008c, 0x000000 },
- {}
- }
-};
-
-static const struct nv50_disp_mthd_list
-nv50_disp_core_mthd_dac = {
- .mthd = 0x0080,
- .addr = 0x000008,
- .data = {
- { 0x0400, 0x610b58 },
- { 0x0404, 0x610bdc },
- { 0x0420, 0x610828 },
- {}
- }
-};
-
-const struct nv50_disp_mthd_list
-nv50_disp_core_mthd_sor = {
- .mthd = 0x0040,
- .addr = 0x000008,
- .data = {
- { 0x0600, 0x610b70 },
- {}
- }
-};
-
-const struct nv50_disp_mthd_list
-nv50_disp_core_mthd_pior = {
- .mthd = 0x0040,
- .addr = 0x000008,
- .data = {
- { 0x0700, 0x610b80 },
- {}
- }
-};
-
-static const struct nv50_disp_mthd_list
-nv50_disp_core_mthd_head = {
- .mthd = 0x0400,
- .addr = 0x000540,
- .data = {
- { 0x0800, 0x610ad8 },
- { 0x0804, 0x610ad0 },
- { 0x0808, 0x610a48 },
- { 0x080c, 0x610a78 },
- { 0x0810, 0x610ac0 },
- { 0x0814, 0x610af8 },
- { 0x0818, 0x610b00 },
- { 0x081c, 0x610ae8 },
- { 0x0820, 0x610af0 },
- { 0x0824, 0x610b08 },
- { 0x0828, 0x610b10 },
- { 0x082c, 0x610a68 },
- { 0x0830, 0x610a60 },
- { 0x0834, 0x000000 },
- { 0x0838, 0x610a40 },
- { 0x0840, 0x610a24 },
- { 0x0844, 0x610a2c },
- { 0x0848, 0x610aa8 },
- { 0x084c, 0x610ab0 },
- { 0x0860, 0x610a84 },
- { 0x0864, 0x610a90 },
- { 0x0868, 0x610b18 },
- { 0x086c, 0x610b20 },
- { 0x0870, 0x610ac8 },
- { 0x0874, 0x610a38 },
- { 0x0880, 0x610a58 },
- { 0x0884, 0x610a9c },
- { 0x08a0, 0x610a70 },
- { 0x08a4, 0x610a50 },
- { 0x08a8, 0x610ae0 },
- { 0x08c0, 0x610b28 },
- { 0x08c4, 0x610b30 },
- { 0x08c8, 0x610b40 },
- { 0x08d4, 0x610b38 },
- { 0x08d8, 0x610b48 },
- { 0x08dc, 0x610b50 },
- { 0x0900, 0x610a18 },
- { 0x0904, 0x610ab8 },
- {}
- }
-};
-
-static const struct nv50_disp_mthd_chan
-nv50_disp_core_mthd_chan = {
- .name = "Core",
- .addr = 0x000000,
- .data = {
- { "Global", 1, &nv50_disp_core_mthd_base },
- { "DAC", 3, &nv50_disp_core_mthd_dac },
- { "SOR", 2, &nv50_disp_core_mthd_sor },
- { "PIOR", 3, &nv50_disp_core_mthd_pior },
- { "HEAD", 2, &nv50_disp_core_mthd_head },
- {}
- }
-};
-
-int
-nv50_disp_core_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- union {
- struct nv50_disp_core_channel_dma_v0 v0;
- } *args = data;
- struct nv50_disp_dmac *mast;
- int ret;
-
- nv_ioctl(parent, "create disp core channel dma size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(parent, "create disp core channel dma vers %d "
- "pushbuf %08x\n",
- args->v0.version, args->v0.pushbuf);
- } else
- return ret;
-
- ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
- 0, sizeof(*mast), (void **)&mast);
- *pobject = nv_object(mast);
- if (ret)
- return ret;
-
- return 0;
+ struct nv50_disp *disp = nv50_disp(base);
+ if (disp->func->outp.internal.dp)
+ return disp->func->outp.internal.dp(base, index, dcb, poutp);
+ return -ENODEV;
}
static int
-nv50_disp_core_init(struct nvkm_object *object)
+nv50_disp_outp_external_tmds_(struct nvkm_disp *base, int index,
+ struct dcb_output *dcb,
+ struct nvkm_output **poutp)
{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv50_disp_dmac *mast = (void *)object;
- int ret;
-
- ret = nv50_disp_chan_init(&mast->base);
- if (ret)
- return ret;
-
- /* enable error reporting */
- nv_mask(priv, 0x610028, 0x00010000, 0x00010000);
-
- /* attempt to unstick channel from some unknown state */
- if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000)
- nv_mask(priv, 0x610200, 0x00800000, 0x00800000);
- if ((nv_rd32(priv, 0x610200) & 0x003f0000) == 0x00030000)
- nv_mask(priv, 0x610200, 0x00600000, 0x00600000);
-
- /* initialise channel for dma command submission */
- nv_wr32(priv, 0x610204, mast->push);
- nv_wr32(priv, 0x610208, 0x00010000);
- nv_wr32(priv, 0x61020c, 0x00000000);
- nv_mask(priv, 0x610200, 0x00000010, 0x00000010);
- nv_wr32(priv, 0x640000, 0x00000000);
- nv_wr32(priv, 0x610200, 0x01000013);
-
- /* wait for it to go inactive */
- if (!nv_wait(priv, 0x610200, 0x80000000, 0x00000000)) {
- nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610200));
- return -EBUSY;
- }
-
- return 0;
+ struct nv50_disp *disp = nv50_disp(base);
+ if (disp->func->outp.external.tmds)
+ return disp->func->outp.external.tmds(base, index, dcb, poutp);
+ return -ENODEV;
}
static int
-nv50_disp_core_fini(struct nvkm_object *object, bool suspend)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv50_disp_dmac *mast = (void *)object;
-
- /* deactivate channel */
- nv_mask(priv, 0x610200, 0x00000010, 0x00000000);
- nv_mask(priv, 0x610200, 0x00000003, 0x00000000);
- if (!nv_wait(priv, 0x610200, 0x001e0000, 0x00000000)) {
- nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610200));
- if (suspend)
- return -EBUSY;
- }
-
- /* disable error reporting and completion notifications */
- nv_mask(priv, 0x610028, 0x00010001, 0x00000000);
-
- return nv50_disp_chan_fini(&mast->base, suspend);
-}
-
-struct nv50_disp_chan_impl
-nv50_disp_core_ofuncs = {
- .base.ctor = nv50_disp_core_ctor,
- .base.dtor = nv50_disp_dmac_dtor,
- .base.init = nv50_disp_core_init,
- .base.fini = nv50_disp_core_fini,
- .base.map = nv50_disp_chan_map,
- .base.ntfy = nv50_disp_chan_ntfy,
- .base.rd32 = nv50_disp_chan_rd32,
- .base.wr32 = nv50_disp_chan_wr32,
- .chid = 0,
- .attach = nv50_disp_dmac_object_attach,
- .detach = nv50_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO sync channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-nv50_disp_base_mthd_base = {
- .mthd = 0x0000,
- .addr = 0x000000,
- .data = {
- { 0x0080, 0x000000 },
- { 0x0084, 0x0008c4 },
- { 0x0088, 0x0008d0 },
- { 0x008c, 0x0008dc },
- { 0x0090, 0x0008e4 },
- { 0x0094, 0x610884 },
- { 0x00a0, 0x6108a0 },
- { 0x00a4, 0x610878 },
- { 0x00c0, 0x61086c },
- { 0x00e0, 0x610858 },
- { 0x00e4, 0x610860 },
- { 0x00e8, 0x6108ac },
- { 0x00ec, 0x6108b4 },
- { 0x0100, 0x610894 },
- { 0x0110, 0x6108bc },
- { 0x0114, 0x61088c },
- {}
- }
-};
-
-const struct nv50_disp_mthd_list
-nv50_disp_base_mthd_image = {
- .mthd = 0x0400,
- .addr = 0x000000,
- .data = {
- { 0x0800, 0x6108f0 },
- { 0x0804, 0x6108fc },
- { 0x0808, 0x61090c },
- { 0x080c, 0x610914 },
- { 0x0810, 0x610904 },
- {}
- }
-};
-
-static const struct nv50_disp_mthd_chan
-nv50_disp_base_mthd_chan = {
- .name = "Base",
- .addr = 0x000540,
- .data = {
- { "Global", 1, &nv50_disp_base_mthd_base },
- { "Image", 2, &nv50_disp_base_mthd_image },
- {}
- }
-};
-
-int
-nv50_disp_base_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv50_disp_outp_external_dp_(struct nvkm_disp *base, int index,
+ struct dcb_output *dcb, struct nvkm_output **poutp)
{
- union {
- struct nv50_disp_base_channel_dma_v0 v0;
- } *args = data;
- struct nv50_disp_priv *priv = (void *)engine;
- struct nv50_disp_dmac *dmac;
- int ret;
-
- nv_ioctl(parent, "create disp base channel dma size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(parent, "create disp base channel dma vers %d "
- "pushbuf %08x head %d\n",
- args->v0.version, args->v0.pushbuf, args->v0.head);
- if (args->v0.head > priv->head.nr)
- return -EINVAL;
- } else
- return ret;
-
- ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
- args->v0.head, sizeof(*dmac),
- (void **)&dmac);
- *pobject = nv_object(dmac);
- if (ret)
- return ret;
-
- return 0;
+ struct nv50_disp *disp = nv50_disp(base);
+ if (disp->func->outp.external.dp)
+ return disp->func->outp.external.dp(base, index, dcb, poutp);
+ return -ENODEV;
}
-struct nv50_disp_chan_impl
-nv50_disp_base_ofuncs = {
- .base.ctor = nv50_disp_base_ctor,
- .base.dtor = nv50_disp_dmac_dtor,
- .base.init = nv50_disp_dmac_init,
- .base.fini = nv50_disp_dmac_fini,
- .base.ntfy = nv50_disp_chan_ntfy,
- .base.map = nv50_disp_chan_map,
- .base.rd32 = nv50_disp_chan_rd32,
- .base.wr32 = nv50_disp_chan_wr32,
- .chid = 1,
- .attach = nv50_disp_dmac_object_attach,
- .detach = nv50_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO overlay channel objects
- ******************************************************************************/
-
-const struct nv50_disp_mthd_list
-nv50_disp_ovly_mthd_base = {
- .mthd = 0x0000,
- .addr = 0x000000,
- .data = {
- { 0x0080, 0x000000 },
- { 0x0084, 0x0009a0 },
- { 0x0088, 0x0009c0 },
- { 0x008c, 0x0009c8 },
- { 0x0090, 0x6109b4 },
- { 0x0094, 0x610970 },
- { 0x00a0, 0x610998 },
- { 0x00a4, 0x610964 },
- { 0x00c0, 0x610958 },
- { 0x00e0, 0x6109a8 },
- { 0x00e4, 0x6109d0 },
- { 0x00e8, 0x6109d8 },
- { 0x0100, 0x61094c },
- { 0x0104, 0x610984 },
- { 0x0108, 0x61098c },
- { 0x0800, 0x6109f8 },
- { 0x0808, 0x610a08 },
- { 0x080c, 0x610a10 },
- { 0x0810, 0x610a00 },
- {}
- }
-};
-
-static const struct nv50_disp_mthd_chan
-nv50_disp_ovly_mthd_chan = {
- .name = "Overlay",
- .addr = 0x000540,
- .data = {
- { "Global", 1, &nv50_disp_ovly_mthd_base },
- {}
- }
-};
-
-int
-nv50_disp_ovly_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- union {
- struct nv50_disp_overlay_channel_dma_v0 v0;
- } *args = data;
- struct nv50_disp_priv *priv = (void *)engine;
- struct nv50_disp_dmac *dmac;
- int ret;
-
- nv_ioctl(parent, "create disp overlay channel dma size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(parent, "create disp overlay channel dma vers %d "
- "pushbuf %08x head %d\n",
- args->v0.version, args->v0.pushbuf, args->v0.head);
- if (args->v0.head > priv->head.nr)
- return -EINVAL;
- } else
- return ret;
-
- ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
- args->v0.head, sizeof(*dmac),
- (void **)&dmac);
- *pobject = nv_object(dmac);
- if (ret)
- return ret;
-
- return 0;
-}
-
-struct nv50_disp_chan_impl
-nv50_disp_ovly_ofuncs = {
- .base.ctor = nv50_disp_ovly_ctor,
- .base.dtor = nv50_disp_dmac_dtor,
- .base.init = nv50_disp_dmac_init,
- .base.fini = nv50_disp_dmac_fini,
- .base.ntfy = nv50_disp_chan_ntfy,
- .base.map = nv50_disp_chan_map,
- .base.rd32 = nv50_disp_chan_rd32,
- .base.wr32 = nv50_disp_chan_wr32,
- .chid = 3,
- .attach = nv50_disp_dmac_object_attach,
- .detach = nv50_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO PIO channel base class
- ******************************************************************************/
-
-static int
-nv50_disp_pioc_create_(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, int head,
- int length, void **pobject)
-{
- return nv50_disp_chan_create_(parent, engine, oclass, head,
- length, pobject);
-}
-
-void
-nv50_disp_pioc_dtor(struct nvkm_object *object)
-{
- struct nv50_disp_pioc *pioc = (void *)object;
- nv50_disp_chan_destroy(&pioc->base);
-}
-
-static int
-nv50_disp_pioc_init(struct nvkm_object *object)
+static void
+nv50_disp_vblank_fini_(struct nvkm_disp *base, int head)
{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv50_disp_pioc *pioc = (void *)object;
- int chid = pioc->base.chid;
- int ret;
-
- ret = nv50_disp_chan_init(&pioc->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00002000);
- if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00000000, 0x00000000)) {
- nv_error(pioc, "timeout0: 0x%08x\n",
- nv_rd32(priv, 0x610200 + (chid * 0x10)));
- return -EBUSY;
- }
-
- nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00000001);
- if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00010000)) {
- nv_error(pioc, "timeout1: 0x%08x\n",
- nv_rd32(priv, 0x610200 + (chid * 0x10)));
- return -EBUSY;
- }
-
- return 0;
+ struct nv50_disp *disp = nv50_disp(base);
+ disp->func->head.vblank_fini(disp, head);
}
-static int
-nv50_disp_pioc_fini(struct nvkm_object *object, bool suspend)
+static void
+nv50_disp_vblank_init_(struct nvkm_disp *base, int head)
{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv50_disp_pioc *pioc = (void *)object;
- int chid = pioc->base.chid;
-
- nv_mask(priv, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
- if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00000000)) {
- nv_error(pioc, "timeout: 0x%08x\n",
- nv_rd32(priv, 0x610200 + (chid * 0x10)));
- if (suspend)
- return -EBUSY;
- }
-
- return nv50_disp_chan_fini(&pioc->base, suspend);
+ struct nv50_disp *disp = nv50_disp(base);
+ disp->func->head.vblank_init(disp, head);
}
-/*******************************************************************************
- * EVO immediate overlay channel objects
- ******************************************************************************/
-
-int
-nv50_disp_oimm_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static void
+nv50_disp_intr_(struct nvkm_disp *base)
{
- union {
- struct nv50_disp_overlay_v0 v0;
- } *args = data;
- struct nv50_disp_priv *priv = (void *)engine;
- struct nv50_disp_pioc *pioc;
- int ret;
-
- nv_ioctl(parent, "create disp overlay size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(parent, "create disp overlay vers %d head %d\n",
- args->v0.version, args->v0.head);
- if (args->v0.head > priv->head.nr)
- return -EINVAL;
- } else
- return ret;
-
- ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head,
- sizeof(*pioc), (void **)&pioc);
- *pobject = nv_object(pioc);
- if (ret)
- return ret;
-
- return 0;
+ struct nv50_disp *disp = nv50_disp(base);
+ disp->func->intr(disp);
}
-struct nv50_disp_chan_impl
-nv50_disp_oimm_ofuncs = {
- .base.ctor = nv50_disp_oimm_ctor,
- .base.dtor = nv50_disp_pioc_dtor,
- .base.init = nv50_disp_pioc_init,
- .base.fini = nv50_disp_pioc_fini,
- .base.ntfy = nv50_disp_chan_ntfy,
- .base.map = nv50_disp_chan_map,
- .base.rd32 = nv50_disp_chan_rd32,
- .base.wr32 = nv50_disp_chan_wr32,
- .chid = 5,
-};
-
-/*******************************************************************************
- * EVO cursor channel objects
- ******************************************************************************/
-
-int
-nv50_disp_curs_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static void *
+nv50_disp_dtor_(struct nvkm_disp *base)
{
- union {
- struct nv50_disp_cursor_v0 v0;
- } *args = data;
- struct nv50_disp_priv *priv = (void *)engine;
- struct nv50_disp_pioc *pioc;
- int ret;
-
- nv_ioctl(parent, "create disp cursor size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(parent, "create disp cursor vers %d head %d\n",
- args->v0.version, args->v0.head);
- if (args->v0.head > priv->head.nr)
- return -EINVAL;
- } else
- return ret;
-
- ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head,
- sizeof(*pioc), (void **)&pioc);
- *pobject = nv_object(pioc);
- if (ret)
- return ret;
-
- return 0;
+ struct nv50_disp *disp = nv50_disp(base);
+ nvkm_event_fini(&disp->uevent);
+ return disp;
}
-struct nv50_disp_chan_impl
-nv50_disp_curs_ofuncs = {
- .base.ctor = nv50_disp_curs_ctor,
- .base.dtor = nv50_disp_pioc_dtor,
- .base.init = nv50_disp_pioc_init,
- .base.fini = nv50_disp_pioc_fini,
- .base.ntfy = nv50_disp_chan_ntfy,
- .base.map = nv50_disp_chan_map,
- .base.rd32 = nv50_disp_chan_rd32,
- .base.wr32 = nv50_disp_chan_wr32,
- .chid = 7,
+static const struct nvkm_disp_func
+nv50_disp_ = {
+ .dtor = nv50_disp_dtor_,
+ .intr = nv50_disp_intr_,
+ .root = nv50_disp_root_,
+ .outp.internal.crt = nv50_disp_outp_internal_crt_,
+ .outp.internal.tmds = nv50_disp_outp_internal_tmds_,
+ .outp.internal.lvds = nv50_disp_outp_internal_lvds_,
+ .outp.internal.dp = nv50_disp_outp_internal_dp_,
+ .outp.external.tmds = nv50_disp_outp_external_tmds_,
+ .outp.external.dp = nv50_disp_outp_external_dp_,
+ .head.vblank_init = nv50_disp_vblank_init_,
+ .head.vblank_fini = nv50_disp_vblank_fini_,
};
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
int
-nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
+nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
+ int index, int heads, struct nvkm_disp **pdisp)
{
- const u32 blanke = nv_rd32(priv, 0x610aec + (head * 0x540));
- const u32 blanks = nv_rd32(priv, 0x610af4 + (head * 0x540));
- const u32 total = nv_rd32(priv, 0x610afc + (head * 0x540));
- union {
- struct nv04_disp_scanoutpos_v0 v0;
- } *args = data;
+ struct nv50_disp *disp;
int ret;
- nv_ioctl(object, "disp scanoutpos size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
- args->v0.vblanke = (blanke & 0xffff0000) >> 16;
- args->v0.hblanke = (blanke & 0x0000ffff);
- args->v0.vblanks = (blanks & 0xffff0000) >> 16;
- args->v0.hblanks = (blanks & 0x0000ffff);
- args->v0.vtotal = ( total & 0xffff0000) >> 16;
- args->v0.htotal = ( total & 0x0000ffff);
- args->v0.time[0] = ktime_to_ns(ktime_get());
- args->v0.vline = /* vline read locks hline */
- nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
- args->v0.time[1] = ktime_to_ns(ktime_get());
- args->v0.hline =
- nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
- } else
- return ret;
-
- return 0;
-}
-
-int
-nv50_disp_main_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
-{
- const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine);
- union {
- struct nv50_disp_mthd_v0 v0;
- struct nv50_disp_mthd_v1 v1;
- } *args = data;
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nvkm_output *outp = NULL;
- struct nvkm_output *temp;
- u16 type, mask = 0;
- int head, ret;
-
- if (mthd != NV50_DISP_MTHD)
- return -EINVAL;
-
- nv_ioctl(object, "disp mthd size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, true)) {
- nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
- args->v0.version, args->v0.method, args->v0.head);
- mthd = args->v0.method;
- head = args->v0.head;
- } else
- if (nvif_unpack(args->v1, 1, 1, true)) {
- nv_ioctl(object, "disp mthd vers %d mthd %02x "
- "type %04x mask %04x\n",
- args->v1.version, args->v1.method,
- args->v1.hasht, args->v1.hashm);
- mthd = args->v1.method;
- type = args->v1.hasht;
- mask = args->v1.hashm;
- head = ffs((mask >> 8) & 0x0f) - 1;
- } else
- return ret;
-
- if (head < 0 || head >= priv->head.nr)
- return -ENXIO;
-
- if (mask) {
- list_for_each_entry(temp, &priv->base.outp, head) {
- if ((temp->info.hasht == type) &&
- (temp->info.hashm & mask) == mask) {
- outp = temp;
- break;
- }
- }
- if (outp == NULL)
- return -ENXIO;
- }
-
- switch (mthd) {
- case NV50_DISP_SCANOUTPOS:
- return impl->head.scanoutpos(object, priv, data, size, head);
- default:
- break;
- }
-
- switch (mthd * !!outp) {
- case NV50_DISP_MTHD_V1_DAC_PWR:
- return priv->dac.power(object, priv, data, size, head, outp);
- case NV50_DISP_MTHD_V1_DAC_LOAD:
- return priv->dac.sense(object, priv, data, size, head, outp);
- case NV50_DISP_MTHD_V1_SOR_PWR:
- return priv->sor.power(object, priv, data, size, head, outp);
- case NV50_DISP_MTHD_V1_SOR_HDA_ELD:
- if (!priv->sor.hda_eld)
- return -ENODEV;
- return priv->sor.hda_eld(object, priv, data, size, head, outp);
- case NV50_DISP_MTHD_V1_SOR_HDMI_PWR:
- if (!priv->sor.hdmi)
- return -ENODEV;
- return priv->sor.hdmi(object, priv, data, size, head, outp);
- case NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT: {
- union {
- struct nv50_disp_sor_lvds_script_v0 v0;
- } *args = data;
- nv_ioctl(object, "disp sor lvds script size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "disp sor lvds script "
- "vers %d name %04x\n",
- args->v0.version, args->v0.script);
- priv->sor.lvdsconf = args->v0.script;
- return 0;
- } else
- return ret;
- }
- break;
- case NV50_DISP_MTHD_V1_SOR_DP_PWR: {
- struct nvkm_output_dp *outpdp = (void *)outp;
- union {
- struct nv50_disp_sor_dp_pwr_v0 v0;
- } *args = data;
- nv_ioctl(object, "disp sor dp pwr size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "disp sor dp pwr vers %d state %d\n",
- args->v0.version, args->v0.state);
- if (args->v0.state == 0) {
- nvkm_notify_put(&outpdp->irq);
- ((struct nvkm_output_dp_impl *)nv_oclass(outp))
- ->lnk_pwr(outpdp, 0);
- atomic_set(&outpdp->lt.done, 0);
- return 0;
- } else
- if (args->v0.state != 0) {
- nvkm_output_dp_train(&outpdp->base, 0, true);
- return 0;
- }
- } else
- return ret;
- }
- break;
- case NV50_DISP_MTHD_V1_PIOR_PWR:
- if (!priv->pior.power)
- return -ENODEV;
- return priv->pior.power(object, priv, data, size, head, outp);
- default:
- break;
- }
-
- return -EINVAL;
-}
-
-int
-nv50_disp_main_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv50_disp_priv *priv = (void *)engine;
- struct nv50_disp_base *base;
- int ret;
+ if (!(disp = kzalloc(sizeof(*disp), GFP_KERNEL)))
+ return -ENOMEM;
+ INIT_WORK(&disp->supervisor, func->super);
+ disp->func = func;
+ *pdisp = &disp->base;
- ret = nvkm_parent_create(parent, engine, oclass, 0,
- priv->sclass, 0, &base);
- *pobject = nv_object(base);
+ ret = nvkm_disp_ctor(&nv50_disp_, device, index, heads, &disp->base);
if (ret)
return ret;
- return nvkm_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
- &base->ramht);
+ return nvkm_event_init(func->uevent, 1, 1 + (heads * 4), &disp->uevent);
}
void
-nv50_disp_main_dtor(struct nvkm_object *object)
-{
- struct nv50_disp_base *base = (void *)object;
- nvkm_ramht_ref(NULL, &base->ramht);
- nvkm_parent_destroy(&base->base);
-}
-
-static int
-nv50_disp_main_init(struct nvkm_object *object)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv50_disp_base *base = (void *)object;
- int ret, i;
- u32 tmp;
-
- ret = nvkm_parent_init(&base->base);
- if (ret)
- return ret;
-
- /* The below segments of code copying values from one register to
- * another appear to inform EVO of the display capabilities or
- * something similar. NFI what the 0x614004 caps are for..
- */
- tmp = nv_rd32(priv, 0x614004);
- nv_wr32(priv, 0x610184, tmp);
-
- /* ... CRTC caps */
- for (i = 0; i < priv->head.nr; i++) {
- tmp = nv_rd32(priv, 0x616100 + (i * 0x800));
- nv_wr32(priv, 0x610190 + (i * 0x10), tmp);
- tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
- nv_wr32(priv, 0x610194 + (i * 0x10), tmp);
- tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
- nv_wr32(priv, 0x610198 + (i * 0x10), tmp);
- tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
- nv_wr32(priv, 0x61019c + (i * 0x10), tmp);
- }
-
- /* ... DAC caps */
- for (i = 0; i < priv->dac.nr; i++) {
- tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
- nv_wr32(priv, 0x6101d0 + (i * 0x04), tmp);
- }
-
- /* ... SOR caps */
- for (i = 0; i < priv->sor.nr; i++) {
- tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
- nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp);
- }
-
- /* ... PIOR caps */
- for (i = 0; i < priv->pior.nr; i++) {
- tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
- nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
- }
-
- /* steal display away from vbios, or something like that */
- if (nv_rd32(priv, 0x610024) & 0x00000100) {
- nv_wr32(priv, 0x610024, 0x00000100);
- nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
- if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
- nv_error(priv, "timeout acquiring display\n");
- return -EBUSY;
- }
- }
-
- /* point at display engine memory area (hash table, objects) */
- nv_wr32(priv, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9);
-
- /* enable supervisor interrupts, disable everything else */
- nv_wr32(priv, 0x61002c, 0x00000370);
- nv_wr32(priv, 0x610028, 0x00000000);
- return 0;
-}
-
-static int
-nv50_disp_main_fini(struct nvkm_object *object, bool suspend)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv50_disp_base *base = (void *)object;
-
- /* disable all interrupts */
- nv_wr32(priv, 0x610024, 0x00000000);
- nv_wr32(priv, 0x610020, 0x00000000);
-
- return nvkm_parent_fini(&base->base, suspend);
-}
-
-struct nvkm_ofuncs
-nv50_disp_main_ofuncs = {
- .ctor = nv50_disp_main_ctor,
- .dtor = nv50_disp_main_dtor,
- .init = nv50_disp_main_init,
- .fini = nv50_disp_main_fini,
- .mthd = nv50_disp_main_mthd,
- .ntfy = nvkm_disp_ntfy,
-};
-
-static struct nvkm_oclass
-nv50_disp_main_oclass[] = {
- { NV50_DISP, &nv50_disp_main_ofuncs },
- {}
-};
-
-static struct nvkm_oclass
-nv50_disp_sclass[] = {
- { NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
- { NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
- { NV50_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
- { NV50_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
- { NV50_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
- {}
-};
-
-/*******************************************************************************
- * Display context, tracks instmem allocation and prevents more than one
- * client using the display hardware at any time.
- ******************************************************************************/
-
-static int
-nv50_disp_data_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv50_disp_priv *priv = (void *)engine;
- struct nvkm_engctx *ectx;
- int ret = -EBUSY;
-
- /* no context needed for channel objects... */
- if (nv_mclass(parent) != NV_DEVICE) {
- atomic_inc(&parent->refcount);
- *pobject = parent;
- return 1;
- }
-
- /* allocate display hardware to client */
- mutex_lock(&nv_subdev(priv)->mutex);
- if (list_empty(&nv_engine(priv)->contexts)) {
- ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0x10000,
- 0x10000, NVOBJ_FLAG_HEAP, &ectx);
- *pobject = nv_object(ectx);
- }
- mutex_unlock(&nv_subdev(priv)->mutex);
- return ret;
-}
-
-struct nvkm_oclass
-nv50_disp_cclass = {
- .handle = NV_ENGCTX(DISP, 0x50),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_disp_data_ctor,
- .dtor = _nvkm_engctx_dtor,
- .init = _nvkm_engctx_init,
- .fini = _nvkm_engctx_fini,
- .rd32 = _nvkm_engctx_rd32,
- .wr32 = _nvkm_engctx_wr32,
- },
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static void
-nv50_disp_vblank_fini(struct nvkm_event *event, int type, int head)
+nv50_disp_vblank_fini(struct nv50_disp *disp, int head)
{
- struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
- nv_mask(disp, 0x61002c, (4 << head), 0);
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ nvkm_mask(device, 0x61002c, (4 << head), 0);
}
-static void
-nv50_disp_vblank_init(struct nvkm_event *event, int type, int head)
+void
+nv50_disp_vblank_init(struct nv50_disp *disp, int head)
{
- struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
- nv_mask(disp, 0x61002c, (4 << head), (4 << head));
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ nvkm_mask(device, 0x61002c, (4 << head), (4 << head));
}
-const struct nvkm_event_func
-nv50_disp_vblank_func = {
- .ctor = nvkm_disp_vblank_ctor,
- .init = nv50_disp_vblank_init,
- .fini = nv50_disp_vblank_fini,
-};
-
static const struct nvkm_enum
nv50_disp_intr_error_type[] = {
{ 3, "ILLEGAL_MTHD" },
@@ -1323,70 +190,46 @@ nv50_disp_intr_error_code[] = {
};
static void
-nv50_disp_intr_error(struct nv50_disp_priv *priv, int chid)
+nv50_disp_intr_error(struct nv50_disp *disp, int chid)
{
- struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
- u32 data = nv_rd32(priv, 0x610084 + (chid * 0x08));
- u32 addr = nv_rd32(priv, 0x610080 + (chid * 0x08));
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 data = nvkm_rd32(device, 0x610084 + (chid * 0x08));
+ u32 addr = nvkm_rd32(device, 0x610080 + (chid * 0x08));
u32 code = (addr & 0x00ff0000) >> 16;
u32 type = (addr & 0x00007000) >> 12;
u32 mthd = (addr & 0x00000ffc);
const struct nvkm_enum *ec, *et;
- char ecunk[6], etunk[6];
et = nvkm_enum_find(nv50_disp_intr_error_type, type);
- if (!et)
- snprintf(etunk, sizeof(etunk), "UNK%02X", type);
-
ec = nvkm_enum_find(nv50_disp_intr_error_code, code);
- if (!ec)
- snprintf(ecunk, sizeof(ecunk), "UNK%02X", code);
- nv_error(priv, "%s [%s] chid %d mthd 0x%04x data 0x%08x\n",
- et ? et->name : etunk, ec ? ec->name : ecunk,
- chid, mthd, data);
+ nvkm_error(subdev,
+ "ERROR %d [%s] %02x [%s] chid %d mthd %04x data %08x\n",
+ type, et ? et->name : "", code, ec ? ec->name : "",
+ chid, mthd, data);
- if (chid == 0) {
- switch (mthd) {
- case 0x0080:
- nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
- impl->mthd.core);
- break;
- default:
- break;
- }
- } else
- if (chid <= 2) {
- switch (mthd) {
- case 0x0080:
- nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
- impl->mthd.base);
- break;
- default:
- break;
- }
- } else
- if (chid <= 4) {
+ if (chid < ARRAY_SIZE(disp->chan)) {
switch (mthd) {
case 0x0080:
- nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 3,
- impl->mthd.ovly);
+ nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
break;
default:
break;
}
}
- nv_wr32(priv, 0x610020, 0x00010000 << chid);
- nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000);
+ nvkm_wr32(device, 0x610020, 0x00010000 << chid);
+ nvkm_wr32(device, 0x610080 + (chid * 0x08), 0x90000000);
}
static struct nvkm_output *
-exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
+exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_outp *info)
{
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_bios *bios = subdev->device->bios;
struct nvkm_output *outp;
u16 mask, type;
@@ -1403,7 +246,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
default:
- nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+ nvkm_error(subdev, "unknown SOR mc %08x\n", ctrl);
return NULL;
}
or -= 4;
@@ -1412,9 +255,9 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
type = 0x0010;
mask = 0;
switch (ctrl & 0x00000f00) {
- case 0x00000000: type |= priv->pior.type[or]; break;
+ case 0x00000000: type |= disp->pior.type[or]; break;
default:
- nv_error(priv, "unknown PIOR mc 0x%08x\n", ctrl);
+ nvkm_error(subdev, "unknown PIOR mc %08x\n", ctrl);
return NULL;
}
}
@@ -1423,7 +266,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
mask |= 0x0001 << or;
mask |= 0x0100 << head;
- list_for_each_entry(outp, &priv->base.outp, head) {
+ list_for_each_entry(outp, &disp->base.outp, head) {
if ((outp->info.hasht & 0xff) == type &&
(outp->info.hashm & mask) == mask) {
*data = nvbios_outp_match(bios, outp->info.hasht,
@@ -1439,9 +282,11 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
}
static struct nvkm_output *
-exec_script(struct nv50_disp_priv *priv, int head, int id)
+exec_script(struct nv50_disp *disp, int head, int id)
{
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_bios *bios = device->bios;
struct nvkm_output *outp;
struct nvbios_outp info;
u8 ver, hdr, cnt, len;
@@ -1450,27 +295,27 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
int i;
/* DAC */
- for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++)
- ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
+ for (i = 0; !(ctrl & (1 << head)) && i < disp->func->dac.nr; i++)
+ ctrl = nvkm_rd32(device, 0x610b5c + (i * 8));
/* SOR */
if (!(ctrl & (1 << head))) {
- if (nv_device(priv)->chipset < 0x90 ||
- nv_device(priv)->chipset == 0x92 ||
- nv_device(priv)->chipset == 0xa0) {
+ if (device->chipset < 0x90 ||
+ device->chipset == 0x92 ||
+ device->chipset == 0xa0) {
reg = 0x610b74;
} else {
reg = 0x610798;
}
- for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++)
- ctrl = nv_rd32(priv, reg + (i * 8));
+ for (i = 0; !(ctrl & (1 << head)) && i < disp->func->sor.nr; i++)
+ ctrl = nvkm_rd32(device, reg + (i * 8));
i += 4;
}
/* PIOR */
if (!(ctrl & (1 << head))) {
- for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++)
- ctrl = nv_rd32(priv, 0x610b84 + (i * 8));
+ for (i = 0; !(ctrl & (1 << head)) && i < disp->func->pior.nr; i++)
+ ctrl = nvkm_rd32(device, 0x610b84 + (i * 8));
i += 8;
}
@@ -1478,10 +323,10 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
return NULL;
i--;
- outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
+ outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
if (outp) {
struct nvbios_init init = {
- .subdev = nv_subdev(priv),
+ .subdev = subdev,
.bios = bios,
.offset = info.script[id],
.outp = &outp->info,
@@ -1496,9 +341,11 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
}
static struct nvkm_output *
-exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
+exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
{
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_bios *bios = device->bios;
struct nvkm_output *outp;
struct nvbios_outp info1;
struct nvbios_ocfg info2;
@@ -1508,27 +355,27 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
int i;
/* DAC */
- for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++)
- ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
+ for (i = 0; !(ctrl & (1 << head)) && i < disp->func->dac.nr; i++)
+ ctrl = nvkm_rd32(device, 0x610b58 + (i * 8));
/* SOR */
if (!(ctrl & (1 << head))) {
- if (nv_device(priv)->chipset < 0x90 ||
- nv_device(priv)->chipset == 0x92 ||
- nv_device(priv)->chipset == 0xa0) {
+ if (device->chipset < 0x90 ||
+ device->chipset == 0x92 ||
+ device->chipset == 0xa0) {
reg = 0x610b70;
} else {
reg = 0x610794;
}
- for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++)
- ctrl = nv_rd32(priv, reg + (i * 8));
+ for (i = 0; !(ctrl & (1 << head)) && i < disp->func->sor.nr; i++)
+ ctrl = nvkm_rd32(device, reg + (i * 8));
i += 4;
}
/* PIOR */
if (!(ctrl & (1 << head))) {
- for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++)
- ctrl = nv_rd32(priv, 0x610b80 + (i * 8));
+ for (i = 0; !(ctrl & (1 << head)) && i < disp->func->pior.nr; i++)
+ ctrl = nvkm_rd32(device, 0x610b80 + (i * 8));
i += 8;
}
@@ -1536,7 +383,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
return NULL;
i--;
- outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
+ outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
if (!outp)
return NULL;
@@ -1548,7 +395,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
*conf |= 0x0100;
break;
case DCB_OUTPUT_LVDS:
- *conf = priv->sor.lvdsconf;
+ *conf = disp->sor.lvdsconf;
break;
case DCB_OUTPUT_DP:
*conf = (ctrl & 0x00000f00) >> 8;
@@ -1568,7 +415,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
if (data) {
struct nvbios_init init = {
- .subdev = nv_subdev(priv),
+ .subdev = subdev,
.bios = bios,
.offset = data,
.outp = &outp->info,
@@ -1584,15 +431,16 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
}
static void
-nv50_disp_intr_unk10_0(struct nv50_disp_priv *priv, int head)
+nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head)
{
- exec_script(priv, head, 1);
+ exec_script(disp, head, 1);
}
static void
-nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head)
+nv50_disp_intr_unk20_0(struct nv50_disp *disp, int head)
{
- struct nvkm_output *outp = exec_script(priv, head, 2);
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_output *outp = exec_script(disp, head, 2);
/* the binary driver does this outside of the supervisor handling
* (after the third supervisor from a detach). we (currently?)
@@ -1608,10 +456,10 @@ nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head)
* in a blank screen (SOR_PWR off/on can restore it)
*/
if (outp && outp->info.type == DCB_OUTPUT_DP) {
- struct nvkm_output_dp *outpdp = (void *)outp;
+ struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
struct nvbios_init init = {
- .subdev = nv_subdev(priv),
- .bios = nvkm_bios(priv),
+ .subdev = subdev,
+ .bios = subdev->device->bios,
.outp = &outp->info,
.crtc = head,
.offset = outpdp->info.script[4],
@@ -1624,29 +472,32 @@ nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head)
}
static void
-nv50_disp_intr_unk20_1(struct nv50_disp_priv *priv, int head)
+nv50_disp_intr_unk20_1(struct nv50_disp *disp, int head)
{
- struct nvkm_devinit *devinit = nvkm_devinit(priv);
- u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ struct nvkm_devinit *devinit = device->devinit;
+ u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
if (pclk)
- devinit->pll_set(devinit, PLL_VPLL0 + head, pclk);
+ nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head, pclk);
}
static void
-nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, int head,
+nv50_disp_intr_unk20_2_dp(struct nv50_disp *disp, int head,
struct dcb_output *outp, u32 pclk)
{
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
const int link = !(outp->sorconf.link & 1);
const int or = ffs(outp->or) - 1;
const u32 soff = ( or * 0x800);
const u32 loff = (link * 0x080) + soff;
- const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8));
+ const u32 ctrl = nvkm_rd32(device, 0x610794 + (or * 8));
const u32 symbol = 100000;
- const s32 vactive = nv_rd32(priv, 0x610af8 + (head * 0x540)) & 0xffff;
- const s32 vblanke = nv_rd32(priv, 0x610ae8 + (head * 0x540)) & 0xffff;
- const s32 vblanks = nv_rd32(priv, 0x610af0 + (head * 0x540)) & 0xffff;
- u32 dpctrl = nv_rd32(priv, 0x61c10c + loff);
- u32 clksor = nv_rd32(priv, 0x614300 + soff);
+ const s32 vactive = nvkm_rd32(device, 0x610af8 + (head * 0x540)) & 0xffff;
+ const s32 vblanke = nvkm_rd32(device, 0x610ae8 + (head * 0x540)) & 0xffff;
+ const s32 vblanks = nvkm_rd32(device, 0x610af0 + (head * 0x540)) & 0xffff;
+ u32 dpctrl = nvkm_rd32(device, 0x61c10c + loff);
+ u32 clksor = nvkm_rd32(device, 0x614300 + soff);
int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
int TU, VTUi, VTUf, VTUa;
u64 link_data_rate, link_ratio, unk;
@@ -1662,14 +513,14 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, int head,
value = value * link_bw;
do_div(value, pclk);
value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
- nv_mask(priv, 0x61c1e8 + soff, 0x0000ffff, value);
+ nvkm_mask(device, 0x61c1e8 + soff, 0x0000ffff, value);
/* symbols/vblank - algorithm taken from comments in tegra driver */
value = vblanks - vblanke - 25;
value = value * link_bw;
do_div(value, pclk);
value = value - ((36 / link_nr) + 3) - 1;
- nv_mask(priv, 0x61c1ec + soff, 0x00ffffff, value);
+ nvkm_mask(device, 0x61c1ec + soff, 0x00ffffff, value);
/* watermark / activesym */
if ((ctrl & 0xf0000) == 0x60000) bits = 30;
@@ -1734,7 +585,7 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, int head,
}
if (!bestTU) {
- nv_error(priv, "unable to find suitable dp config\n");
+ nvkm_error(subdev, "unable to find suitable dp config\n");
return;
}
@@ -1745,22 +596,23 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, int head,
do_div(unk, symbol);
unk += 6;
- nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2);
- nv_mask(priv, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
+ nvkm_mask(device, 0x61c10c + loff, 0x000001fc, bestTU << 2);
+ nvkm_mask(device, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
bestVTUf << 16 |
bestVTUi << 8 | unk);
}
static void
-nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
+nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head)
{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
struct nvkm_output *outp;
- u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
u32 hval, hreg = 0x614200 + (head * 0x800);
u32 oval, oreg;
u32 mask, conf;
- outp = exec_clkcmp(priv, head, 0xff, pclk, &conf);
+ outp = exec_clkcmp(disp, head, 0xff, pclk, &conf);
if (!outp)
return;
@@ -1787,10 +639,10 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
u32 ctrl, datarate;
if (outp->info.location == 0) {
- ctrl = nv_rd32(priv, 0x610794 + soff);
+ ctrl = nvkm_rd32(device, 0x610794 + soff);
soff = 1;
} else {
- ctrl = nv_rd32(priv, 0x610b80 + soff);
+ ctrl = nvkm_rd32(device, 0x610b80 + soff);
soff = 2;
}
@@ -1804,10 +656,10 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
}
if (nvkm_output_dp_train(outp, datarate / soff, true))
- ERR("link not trained before attach\n");
+ OUTP_ERR(outp, "link not trained before attach");
}
- exec_clkcmp(priv, head, 0, pclk, &conf);
+ exec_clkcmp(disp, head, 0, pclk, &conf);
if (!outp->info.location && outp->info.type == DCB_OUTPUT_ANALOG) {
oreg = 0x614280 + (ffs(outp->info.or) - 1) * 0x800;
@@ -1817,7 +669,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
} else
if (!outp->info.location) {
if (outp->info.type == DCB_OUTPUT_DP)
- nv50_disp_intr_unk20_2_dp(priv, head, &outp->info, pclk);
+ nv50_disp_intr_unk20_2_dp(disp, head, &outp->info, pclk);
oreg = 0x614300 + (ffs(outp->info.or) - 1) * 0x800;
oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
hval = 0x00000000;
@@ -1829,8 +681,8 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
mask = 0x00000707;
}
- nv_mask(priv, hreg, 0x0000000f, hval);
- nv_mask(priv, oreg, mask, oval);
+ nvkm_mask(device, hreg, 0x0000000f, hval);
+ nvkm_mask(device, oreg, mask, oval);
}
/* If programming a TMDS output on a SOR that can also be configured for
@@ -1842,10 +694,11 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
* programmed for DisplayPort.
*/
static void
-nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv,
+nv50_disp_intr_unk40_0_tmds(struct nv50_disp *disp,
struct dcb_output *outp)
{
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ struct nvkm_bios *bios = device->bios;
const int link = !(outp->sorconf.link & 1);
const int or = ffs(outp->or) - 1;
const u32 loff = (or * 0x800) + (link * 0x80);
@@ -1854,166 +707,136 @@ nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv,
u8 ver, hdr;
if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match))
- nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x61c10c + loff, 0x00000001, 0x00000000);
}
static void
-nv50_disp_intr_unk40_0(struct nv50_disp_priv *priv, int head)
+nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head)
{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
struct nvkm_output *outp;
- u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
u32 conf;
- outp = exec_clkcmp(priv, head, 1, pclk, &conf);
+ outp = exec_clkcmp(disp, head, 1, pclk, &conf);
if (!outp)
return;
if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS)
- nv50_disp_intr_unk40_0_tmds(priv, &outp->info);
+ nv50_disp_intr_unk40_0_tmds(disp, &outp->info);
}
void
nv50_disp_intr_supervisor(struct work_struct *work)
{
- struct nv50_disp_priv *priv =
- container_of(work, struct nv50_disp_priv, supervisor);
- struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
- u32 super = nv_rd32(priv, 0x610030);
+ struct nv50_disp *disp =
+ container_of(work, struct nv50_disp, supervisor);
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 super = nvkm_rd32(device, 0x610030);
int head;
- nv_debug(priv, "supervisor 0x%08x 0x%08x\n", priv->super, super);
+ nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super, super);
- if (priv->super & 0x00000010) {
- nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core);
- for (head = 0; head < priv->head.nr; head++) {
+ if (disp->super & 0x00000010) {
+ nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
+ for (head = 0; head < disp->base.head.nr; head++) {
if (!(super & (0x00000020 << head)))
continue;
if (!(super & (0x00000080 << head)))
continue;
- nv50_disp_intr_unk10_0(priv, head);
+ nv50_disp_intr_unk10_0(disp, head);
}
} else
- if (priv->super & 0x00000020) {
- for (head = 0; head < priv->head.nr; head++) {
+ if (disp->super & 0x00000020) {
+ for (head = 0; head < disp->base.head.nr; head++) {
if (!(super & (0x00000080 << head)))
continue;
- nv50_disp_intr_unk20_0(priv, head);
+ nv50_disp_intr_unk20_0(disp, head);
}
- for (head = 0; head < priv->head.nr; head++) {
+ for (head = 0; head < disp->base.head.nr; head++) {
if (!(super & (0x00000200 << head)))
continue;
- nv50_disp_intr_unk20_1(priv, head);
+ nv50_disp_intr_unk20_1(disp, head);
}
- for (head = 0; head < priv->head.nr; head++) {
+ for (head = 0; head < disp->base.head.nr; head++) {
if (!(super & (0x00000080 << head)))
continue;
- nv50_disp_intr_unk20_2(priv, head);
+ nv50_disp_intr_unk20_2(disp, head);
}
} else
- if (priv->super & 0x00000040) {
- for (head = 0; head < priv->head.nr; head++) {
+ if (disp->super & 0x00000040) {
+ for (head = 0; head < disp->base.head.nr; head++) {
if (!(super & (0x00000080 << head)))
continue;
- nv50_disp_intr_unk40_0(priv, head);
+ nv50_disp_intr_unk40_0(disp, head);
}
}
- nv_wr32(priv, 0x610030, 0x80000000);
+ nvkm_wr32(device, 0x610030, 0x80000000);
}
void
-nv50_disp_intr(struct nvkm_subdev *subdev)
+nv50_disp_intr(struct nv50_disp *disp)
{
- struct nv50_disp_priv *priv = (void *)subdev;
- u32 intr0 = nv_rd32(priv, 0x610020);
- u32 intr1 = nv_rd32(priv, 0x610024);
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ u32 intr0 = nvkm_rd32(device, 0x610020);
+ u32 intr1 = nvkm_rd32(device, 0x610024);
while (intr0 & 0x001f0000) {
u32 chid = __ffs(intr0 & 0x001f0000) - 16;
- nv50_disp_intr_error(priv, chid);
+ nv50_disp_intr_error(disp, chid);
intr0 &= ~(0x00010000 << chid);
}
while (intr0 & 0x0000001f) {
u32 chid = __ffs(intr0 & 0x0000001f);
- nv50_disp_chan_uevent_send(priv, chid);
+ nv50_disp_chan_uevent_send(disp, chid);
intr0 &= ~(0x00000001 << chid);
}
if (intr1 & 0x00000004) {
- nvkm_disp_vblank(&priv->base, 0);
- nv_wr32(priv, 0x610024, 0x00000004);
- intr1 &= ~0x00000004;
+ nvkm_disp_vblank(&disp->base, 0);
+ nvkm_wr32(device, 0x610024, 0x00000004);
}
if (intr1 & 0x00000008) {
- nvkm_disp_vblank(&priv->base, 1);
- nv_wr32(priv, 0x610024, 0x00000008);
- intr1 &= ~0x00000008;
+ nvkm_disp_vblank(&disp->base, 1);
+ nvkm_wr32(device, 0x610024, 0x00000008);
}
if (intr1 & 0x00000070) {
- priv->super = (intr1 & 0x00000070);
- schedule_work(&priv->supervisor);
- nv_wr32(priv, 0x610024, priv->super);
- intr1 &= ~0x00000070;
- }
-}
+ disp->super = (intr1 & 0x00000070);
+ schedule_work(&disp->supervisor);
+ nvkm_wr32(device, 0x610024, disp->super);
+ }
+}
+
+static const struct nv50_disp_func
+nv50_disp = {
+ .intr = nv50_disp_intr,
+ .uevent = &nv50_disp_chan_uevent,
+ .super = nv50_disp_intr_supervisor,
+ .root = &nv50_disp_root_oclass,
+ .head.vblank_init = nv50_disp_vblank_init,
+ .head.vblank_fini = nv50_disp_vblank_fini,
+ .head.scanoutpos = nv50_disp_root_scanoutpos,
+ .outp.internal.crt = nv50_dac_output_new,
+ .outp.internal.tmds = nv50_sor_output_new,
+ .outp.internal.lvds = nv50_sor_output_new,
+ .outp.external.tmds = nv50_pior_output_new,
+ .outp.external.dp = nv50_pior_dp_new,
+ .dac.nr = 3,
+ .dac.power = nv50_dac_power,
+ .dac.sense = nv50_dac_sense,
+ .sor.nr = 2,
+ .sor.power = nv50_sor_power,
+ .pior.nr = 3,
+ .pior.power = nv50_pior_power,
+};
-static int
-nv50_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv50_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
{
- struct nv50_disp_priv *priv;
- int ret;
-
- ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
- "display", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
- if (ret)
- return ret;
-
- nv_engine(priv)->sclass = nv50_disp_main_oclass;
- nv_engine(priv)->cclass = &nv50_disp_cclass;
- nv_subdev(priv)->intr = nv50_disp_intr;
- INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
- priv->sclass = nv50_disp_sclass;
- priv->head.nr = 2;
- priv->dac.nr = 3;
- priv->sor.nr = 2;
- priv->pior.nr = 3;
- priv->dac.power = nv50_dac_power;
- priv->dac.sense = nv50_dac_sense;
- priv->sor.power = nv50_sor_power;
- priv->pior.power = nv50_pior_power;
- return 0;
+ return nv50_disp_new_(&nv50_disp, device, index, 2, pdisp);
}
-
-struct nvkm_oclass *
-nv50_disp_outp_sclass[] = {
- &nv50_pior_dp_impl.base.base,
- NULL
-};
-
-struct nvkm_oclass *
-nv50_disp_oclass = &(struct nv50_disp_impl) {
- .base.base.handle = NV_ENGINE(DISP, 0x50),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_disp_ctor,
- .dtor = _nvkm_disp_dtor,
- .init = _nvkm_disp_init,
- .fini = _nvkm_disp_fini,
- },
- .base.vblank = &nv50_disp_vblank_func,
- .base.outp = nv50_disp_outp_sclass,
- .mthd.core = &nv50_disp_core_mthd_chan,
- .mthd.base = &nv50_disp_base_mthd_chan,
- .mthd.ovly = &nv50_disp_ovly_mthd_chan,
- .mthd.prev = 0x000004,
- .head.scanoutpos = nv50_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index b4ed620070fa..aecebd8717e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -1,17 +1,18 @@
#ifndef __NV50_DISP_H__
#define __NV50_DISP_H__
+#define nv50_disp(p) container_of((p), struct nv50_disp, base)
#include "priv.h"
struct nvkm_output;
struct nvkm_output_dp;
#define NV50_DISP_MTHD_ struct nvkm_object *object, \
- struct nv50_disp_priv *priv, void *data, u32 size
+ struct nv50_disp *disp, void *data, u32 size
#define NV50_DISP_MTHD_V0 NV50_DISP_MTHD_, int head
#define NV50_DISP_MTHD_V1 NV50_DISP_MTHD_, int head, struct nvkm_output *outp
-struct nv50_disp_priv {
+struct nv50_disp {
+ const struct nv50_disp_func *func;
struct nvkm_disp base;
- struct nvkm_oclass *sclass;
struct work_struct supervisor;
u32 super;
@@ -19,208 +20,98 @@ struct nv50_disp_priv {
struct nvkm_event uevent;
struct {
- int nr;
- } head;
- struct {
- int nr;
- int (*power)(NV50_DISP_MTHD_V1);
- int (*sense)(NV50_DISP_MTHD_V1);
- } dac;
- struct {
- int nr;
- int (*power)(NV50_DISP_MTHD_V1);
- int (*hda_eld)(NV50_DISP_MTHD_V1);
- int (*hdmi)(NV50_DISP_MTHD_V1);
u32 lvdsconf;
- void (*magic)(struct nvkm_output *);
} sor;
+
struct {
- int nr;
- int (*power)(NV50_DISP_MTHD_V1);
u8 type[3];
} pior;
-};
-struct nv50_disp_impl {
- struct nvkm_disp_impl base;
- struct {
- const struct nv50_disp_mthd_chan *core;
- const struct nv50_disp_mthd_chan *base;
- const struct nv50_disp_mthd_chan *ovly;
- int prev;
- } mthd;
- struct {
- int (*scanoutpos)(NV50_DISP_MTHD_V0);
- } head;
+ struct nv50_disp_chan *chan[17];
};
-int nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0);
-int nv50_disp_main_mthd(struct nvkm_object *, u32, void *, u32);
+int nv50_disp_root_scanoutpos(NV50_DISP_MTHD_V0);
-int gf110_disp_main_scanoutpos(NV50_DISP_MTHD_V0);
+int gf119_disp_root_scanoutpos(NV50_DISP_MTHD_V0);
int nv50_dac_power(NV50_DISP_MTHD_V1);
int nv50_dac_sense(NV50_DISP_MTHD_V1);
int gt215_hda_eld(NV50_DISP_MTHD_V1);
-int gf110_hda_eld(NV50_DISP_MTHD_V1);
+int gf119_hda_eld(NV50_DISP_MTHD_V1);
int g84_hdmi_ctrl(NV50_DISP_MTHD_V1);
int gt215_hdmi_ctrl(NV50_DISP_MTHD_V1);
-int gf110_hdmi_ctrl(NV50_DISP_MTHD_V1);
+int gf119_hdmi_ctrl(NV50_DISP_MTHD_V1);
int gk104_hdmi_ctrl(NV50_DISP_MTHD_V1);
int nv50_sor_power(NV50_DISP_MTHD_V1);
int nv50_pior_power(NV50_DISP_MTHD_V1);
-#include <core/parent.h>
-
-struct nv50_disp_base {
- struct nvkm_parent base;
- struct nvkm_ramht *ramht;
- u32 chan;
-};
-
-struct nv50_disp_chan_impl {
- struct nvkm_ofuncs base;
- int chid;
- int (*attach)(struct nvkm_object *, struct nvkm_object *, u32);
- void (*detach)(struct nvkm_object *, int);
+int nv50_disp_new_(const struct nv50_disp_func *, struct nvkm_device *,
+ int index, int heads, struct nvkm_disp **);
+int gf119_disp_new_(const struct nv50_disp_func *, struct nvkm_device *,
+ int index, struct nvkm_disp **);
+
+struct nv50_disp_func_outp {
+ int (* crt)(struct nvkm_disp *, int index, struct dcb_output *,
+ struct nvkm_output **);
+ int (* tv)(struct nvkm_disp *, int index, struct dcb_output *,
+ struct nvkm_output **);
+ int (*tmds)(struct nvkm_disp *, int index, struct dcb_output *,
+ struct nvkm_output **);
+ int (*lvds)(struct nvkm_disp *, int index, struct dcb_output *,
+ struct nvkm_output **);
+ int (* dp)(struct nvkm_disp *, int index, struct dcb_output *,
+ struct nvkm_output **);
};
-#include <core/namedb.h>
+struct nv50_disp_func {
+ void (*intr)(struct nv50_disp *);
-struct nv50_disp_chan {
- struct nvkm_namedb base;
- int chid;
-};
+ const struct nvkm_event_func *uevent;
+ void (*super)(struct work_struct *);
-int nv50_disp_chan_ntfy(struct nvkm_object *, u32, struct nvkm_event **);
-int nv50_disp_chan_map(struct nvkm_object *, u64 *, u32 *);
-u32 nv50_disp_chan_rd32(struct nvkm_object *, u64);
-void nv50_disp_chan_wr32(struct nvkm_object *, u64, u32);
-extern const struct nvkm_event_func nv50_disp_chan_uevent;
-int nv50_disp_chan_uevent_ctor(struct nvkm_object *, void *, u32,
- struct nvkm_notify *);
-void nv50_disp_chan_uevent_send(struct nv50_disp_priv *, int);
-
-extern const struct nvkm_event_func gf110_disp_chan_uevent;
-
-#define nv50_disp_chan_init(a) \
- nvkm_namedb_init(&(a)->base)
-#define nv50_disp_chan_fini(a,b) \
- nvkm_namedb_fini(&(a)->base, (b))
-
-struct nv50_disp_dmac {
- struct nv50_disp_chan base;
- struct nvkm_dmaobj *pushdma;
- u32 push;
-};
+ const struct nvkm_disp_oclass *root;
-void nv50_disp_dmac_dtor(struct nvkm_object *);
+ struct {
+ void (*vblank_init)(struct nv50_disp *, int head);
+ void (*vblank_fini)(struct nv50_disp *, int head);
+ int (*scanoutpos)(NV50_DISP_MTHD_V0);
+ } head;
-struct nv50_disp_pioc {
- struct nv50_disp_chan base;
-};
+ struct {
+ const struct nv50_disp_func_outp internal;
+ const struct nv50_disp_func_outp external;
+ } outp;
-void nv50_disp_pioc_dtor(struct nvkm_object *);
+ struct {
+ int nr;
+ int (*power)(NV50_DISP_MTHD_V1);
+ int (*sense)(NV50_DISP_MTHD_V1);
+ } dac;
-struct nv50_disp_mthd_list {
- u32 mthd;
- u32 addr;
struct {
- u32 mthd;
- u32 addr;
- const char *name;
- } data[];
-};
+ int nr;
+ int (*power)(NV50_DISP_MTHD_V1);
+ int (*hda_eld)(NV50_DISP_MTHD_V1);
+ int (*hdmi)(NV50_DISP_MTHD_V1);
+ void (*magic)(struct nvkm_output *);
+ } sor;
-struct nv50_disp_mthd_chan {
- const char *name;
- u32 addr;
struct {
- const char *name;
int nr;
- const struct nv50_disp_mthd_list *mthd;
- } data[];
+ int (*power)(NV50_DISP_MTHD_V1);
+ } pior;
};
-extern struct nv50_disp_chan_impl nv50_disp_core_ofuncs;
-int nv50_disp_core_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_base;
-extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_sor;
-extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_pior;
-extern struct nv50_disp_chan_impl nv50_disp_base_ofuncs;
-int nv50_disp_base_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_base_mthd_image;
-extern struct nv50_disp_chan_impl nv50_disp_ovly_ofuncs;
-int nv50_disp_ovly_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_ovly_mthd_base;
-extern struct nv50_disp_chan_impl nv50_disp_oimm_ofuncs;
-int nv50_disp_oimm_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-extern struct nv50_disp_chan_impl nv50_disp_curs_ofuncs;
-int nv50_disp_curs_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-extern struct nvkm_ofuncs nv50_disp_main_ofuncs;
-int nv50_disp_main_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-void nv50_disp_main_dtor(struct nvkm_object *);
-extern struct nvkm_omthds nv50_disp_main_omthds[];
-extern struct nvkm_oclass nv50_disp_cclass;
-void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head,
- const struct nv50_disp_mthd_chan *);
+void nv50_disp_vblank_init(struct nv50_disp *, int);
+void nv50_disp_vblank_fini(struct nv50_disp *, int);
+void nv50_disp_intr(struct nv50_disp *);
void nv50_disp_intr_supervisor(struct work_struct *);
-void nv50_disp_intr(struct nvkm_subdev *);
-extern const struct nvkm_event_func nv50_disp_vblank_func;
-
-extern const struct nv50_disp_mthd_chan g84_disp_core_mthd_chan;
-extern const struct nv50_disp_mthd_list g84_disp_core_mthd_dac;
-extern const struct nv50_disp_mthd_list g84_disp_core_mthd_head;
-extern const struct nv50_disp_mthd_chan g84_disp_base_mthd_chan;
-extern const struct nv50_disp_mthd_chan g84_disp_ovly_mthd_chan;
-
-extern const struct nv50_disp_mthd_chan g94_disp_core_mthd_chan;
-
-extern struct nv50_disp_chan_impl gf110_disp_core_ofuncs;
-extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_base;
-extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_dac;
-extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_sor;
-extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_pior;
-extern struct nv50_disp_chan_impl gf110_disp_base_ofuncs;
-extern struct nv50_disp_chan_impl gf110_disp_ovly_ofuncs;
-extern const struct nv50_disp_mthd_chan gf110_disp_base_mthd_chan;
-extern struct nv50_disp_chan_impl gf110_disp_oimm_ofuncs;
-extern struct nv50_disp_chan_impl gf110_disp_curs_ofuncs;
-extern struct nvkm_ofuncs gf110_disp_main_ofuncs;
-extern struct nvkm_oclass gf110_disp_cclass;
-void gf110_disp_intr_supervisor(struct work_struct *);
-void gf110_disp_intr(struct nvkm_subdev *);
-extern const struct nvkm_event_func gf110_disp_vblank_func;
-
-extern const struct nv50_disp_mthd_chan gk104_disp_core_mthd_chan;
-extern const struct nv50_disp_mthd_chan gk104_disp_ovly_mthd_chan;
-
-extern struct nvkm_output_dp_impl nv50_pior_dp_impl;
-extern struct nvkm_oclass *nv50_disp_outp_sclass[];
-
-extern struct nvkm_output_dp_impl g94_sor_dp_impl;
-int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
-extern struct nvkm_oclass *g94_disp_outp_sclass[];
-
-extern struct nvkm_output_dp_impl gf110_sor_dp_impl;
-int gf110_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
-extern struct nvkm_oclass *gf110_disp_outp_sclass[];
-
-void gm204_sor_magic(struct nvkm_output *outp);
-extern struct nvkm_output_dp_impl gm204_sor_dp_impl;
+
+void gf119_disp_vblank_init(struct nv50_disp *, int);
+void gf119_disp_vblank_fini(struct nv50_disp *, int);
+void gf119_disp_intr(struct nv50_disp *);
+void gf119_disp_intr_supervisor(struct work_struct *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
index c0aac7e20d45..54a4ae8d66c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2014 Ilia Mirkin
+ * Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,18 +19,19 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: Ilia Mirkin
+ * Authors: Ben Skeggs
*/
-#include "nv04.h"
+#include "channv50.h"
+#include "rootnv50.h"
-struct nvkm_oclass *
-nv4c_mc_oclass = &(struct nvkm_mc_oclass) {
- .base.handle = NV_SUBDEV(MC, 0x4c),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_mc_ctor,
- .dtor = _nvkm_mc_dtor,
- .init = nv44_mc_init,
- .fini = _nvkm_mc_fini,
- },
- .intr = nv04_mc_intr,
-}.base;
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+g84_disp_oimm_oclass = {
+ .base.oclass = G82_DISP_OVERLAY,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_oimm_new,
+ .func = &nv50_disp_pioc_func,
+ .chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
new file mode 100644
index 000000000000..c658db54afc5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gf119_disp_oimm_oclass = {
+ .base.oclass = GF110_DISP_OVERLAY,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_oimm_new,
+ .func = &gf119_disp_pioc_func,
+ .chid = 9,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
new file mode 100644
index 000000000000..b1fde8c125d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gk104_disp_oimm_oclass = {
+ .base.oclass = GK104_DISP_OVERLAY,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_oimm_new,
+ .func = &gf119_disp_pioc_func,
+ .chid = 9,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
new file mode 100644
index 000000000000..f4e7eb3d1177
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gt215_disp_oimm_oclass = {
+ .base.oclass = GT214_DISP_OVERLAY,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_oimm_new,
+ .func = &nv50_disp_pioc_func,
+ .chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
new file mode 100644
index 000000000000..cd888a1e443c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
+ const struct nv50_disp_chan_mthd *mthd,
+ struct nv50_disp_root *root, int chid,
+ const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ union {
+ struct nv50_disp_overlay_v0 v0;
+ } *args = data;
+ struct nvkm_object *parent = oclass->parent;
+ struct nv50_disp *disp = root->disp;
+ int head, ret;
+
+ nvif_ioctl(parent, "create disp overlay size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(parent, "create disp overlay vers %d head %d\n",
+ args->v0.version, args->v0.head);
+ if (args->v0.head > disp->base.head.nr)
+ return -EINVAL;
+ head = args->v0.head;
+ } else
+ return ret;
+
+ return nv50_disp_chan_new_(func, mthd, root, chid + head,
+ head, oclass, pobject);
+}
+
+const struct nv50_disp_pioc_oclass
+nv50_disp_oimm_oclass = {
+ .base.oclass = NV50_DISP_OVERLAY,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_oimm_new,
+ .func = &nv50_disp_pioc_func,
+ .chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index 9224bcbf0159..bbe5ec0dedb2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -22,121 +22,66 @@
* Authors: Ben Skeggs
*/
#include "outp.h"
-#include "priv.h"
#include <subdev/bios.h>
-#include <subdev/bios/conn.h>
#include <subdev/bios/dcb.h>
#include <subdev/i2c.h>
-int
-_nvkm_output_fini(struct nvkm_object *object, bool suspend)
+void
+nvkm_output_fini(struct nvkm_output *outp)
{
- struct nvkm_output *outp = (void *)object;
- nv_ofuncs(outp->conn)->fini(nv_object(outp->conn), suspend);
- return nvkm_object_fini(&outp->base, suspend);
+ if (outp->func->fini)
+ outp->func->fini(outp);
}
-int
-_nvkm_output_init(struct nvkm_object *object)
+void
+nvkm_output_init(struct nvkm_output *outp)
{
- struct nvkm_output *outp = (void *)object;
- int ret = nvkm_object_init(&outp->base);
- if (ret == 0)
- nv_ofuncs(outp->conn)->init(nv_object(outp->conn));
- return 0;
+ if (outp->func->init)
+ outp->func->init(outp);
}
void
-_nvkm_output_dtor(struct nvkm_object *object)
+nvkm_output_del(struct nvkm_output **poutp)
{
- struct nvkm_output *outp = (void *)object;
- list_del(&outp->head);
- nvkm_object_ref(NULL, (void *)&outp->conn);
- nvkm_object_destroy(&outp->base);
+ struct nvkm_output *outp = *poutp;
+ if (outp && !WARN_ON(!outp->func)) {
+ if (outp->func->dtor)
+ *poutp = outp->func->dtor(outp);
+ kfree(*poutp);
+ *poutp = NULL;
+ }
}
-int
-nvkm_output_create_(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass,
- struct dcb_output *dcbE, int index,
- int length, void **pobject)
+void
+nvkm_output_ctor(const struct nvkm_output_func *func, struct nvkm_disp *disp,
+ int index, struct dcb_output *dcbE, struct nvkm_output *outp)
{
- struct nvkm_disp *disp = nvkm_disp(parent);
- struct nvkm_bios *bios = nvkm_bios(parent);
- struct nvkm_i2c *i2c = nvkm_i2c(parent);
- struct nvbios_connE connE;
- struct nvkm_output *outp;
- u8 ver, hdr;
- u32 data;
- int ret;
+ struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
- ret = nvkm_object_create_(parent, engine, oclass, 0, length, pobject);
- outp = *pobject;
- if (ret)
- return ret;
-
- outp->info = *dcbE;
+ outp->func = func;
+ outp->disp = disp;
outp->index = index;
+ outp->info = *dcbE;
+ outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
outp->or = ffs(outp->info.or) - 1;
- DBG("type %02x loc %d or %d link %d con %x edid %x bus %d head %x\n",
- dcbE->type, dcbE->location, dcbE->or, dcbE->type >= 2 ?
- dcbE->sorconf.link : 0, dcbE->connector, dcbE->i2c_index,
- dcbE->bus, dcbE->heads);
-
- if (outp->info.type != DCB_OUTPUT_DP)
- outp->port = i2c->find(i2c, NV_I2C_PORT(outp->info.i2c_index));
- else
- outp->port = i2c->find(i2c, NV_I2C_AUX(outp->info.i2c_index));
- outp->edid = outp->port;
-
- data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr, &connE);
- if (!data) {
- DBG("vbios connector data not found\n");
- memset(&connE, 0x00, sizeof(connE));
- connE.type = DCB_CONNECTOR_NONE;
- }
-
- ret = nvkm_object_ctor(parent, NULL, nvkm_connector_oclass,
- &connE, outp->info.connector,
- (struct nvkm_object **)&outp->conn);
- if (ret < 0) {
- ERR("error %d creating connector, disabling\n", ret);
- return ret;
- }
-
- list_add_tail(&outp->head, &disp->outp);
- return 0;
+ OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "
+ "edid %x bus %d head %x",
+ outp->info.type, outp->info.location, outp->info.or,
+ outp->info.type >= 2 ? outp->info.sorconf.link : 0,
+ outp->info.connector, outp->info.i2c_index,
+ outp->info.bus, outp->info.heads);
}
int
-_nvkm_output_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *dcbE, u32 index,
- struct nvkm_object **pobject)
+nvkm_output_new_(const struct nvkm_output_func *func,
+ struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+ struct nvkm_output **poutp)
{
- struct nvkm_output *outp;
- int ret;
-
- ret = nvkm_output_create(parent, engine, oclass, dcbE, index, &outp);
- *pobject = nv_object(outp);
- if (ret)
- return ret;
+ if (!(*poutp = kzalloc(sizeof(**poutp), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_output_ctor(func, disp, index, dcbE, *poutp);
return 0;
}
-
-struct nvkm_oclass *
-nvkm_output_oclass = &(struct nvkm_output_impl) {
- .base = {
- .handle = 0,
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_output_ctor,
- .dtor = _nvkm_output_dtor,
- .init = _nvkm_output_init,
- .fini = _nvkm_output_fini,
- },
- },
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index d9253d26c31b..2590fec67ca9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -1,61 +1,55 @@
#ifndef __NVKM_DISP_OUTP_H__
#define __NVKM_DISP_OUTP_H__
-#include <core/object.h>
+#include <engine/disp.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
struct nvkm_output {
- struct nvkm_object base;
- struct list_head head;
-
- struct dcb_output info;
+ const struct nvkm_output_func *func;
+ struct nvkm_disp *disp;
int index;
- int or;
+ struct dcb_output info;
- struct nvkm_i2c_port *port;
- struct nvkm_i2c_port *edid;
+ // whatever (if anything) is pointed at by the dcb device entry
+ struct nvkm_i2c_bus *i2c;
+ int or;
+ struct list_head head;
struct nvkm_connector *conn;
};
-#define nvkm_output_create(p,e,c,b,i,d) \
- nvkm_output_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
-#define nvkm_output_destroy(d) ({ \
- struct nvkm_output *_outp = (d); \
- _nvkm_output_dtor(nv_object(_outp)); \
-})
-#define nvkm_output_init(d) ({ \
- struct nvkm_output *_outp = (d); \
- _nvkm_output_init(nv_object(_outp)); \
-})
-#define nvkm_output_fini(d,s) ({ \
- struct nvkm_output *_outp = (d); \
- _nvkm_output_fini(nv_object(_outp), (s)); \
-})
-
-int nvkm_output_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, struct dcb_output *,
- int, int, void **);
-
-int _nvkm_output_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-void _nvkm_output_dtor(struct nvkm_object *);
-int _nvkm_output_init(struct nvkm_object *);
-int _nvkm_output_fini(struct nvkm_object *, bool);
-
-struct nvkm_output_impl {
- struct nvkm_oclass base;
+struct nvkm_output_func {
+ void *(*dtor)(struct nvkm_output *);
+ void (*init)(struct nvkm_output *);
+ void (*fini)(struct nvkm_output *);
};
-#ifndef MSG
-#define MSG(l,f,a...) do { \
- struct nvkm_output *_outp = (void *)outp; \
- nv_##l(_outp, "%02x:%04x:%04x: "f, _outp->index, \
- _outp->info.hasht, _outp->info.hashm, ##a); \
+void nvkm_output_ctor(const struct nvkm_output_func *, struct nvkm_disp *,
+ int index, struct dcb_output *, struct nvkm_output *);
+int nvkm_output_new_(const struct nvkm_output_func *, struct nvkm_disp *,
+ int index, struct dcb_output *, struct nvkm_output **);
+void nvkm_output_del(struct nvkm_output **);
+void nvkm_output_init(struct nvkm_output *);
+void nvkm_output_fini(struct nvkm_output *);
+
+int nv50_dac_output_new(struct nvkm_disp *, int, struct dcb_output *,
+ struct nvkm_output **);
+int nv50_sor_output_new(struct nvkm_disp *, int, struct dcb_output *,
+ struct nvkm_output **);
+int nv50_pior_output_new(struct nvkm_disp *, int, struct dcb_output *,
+ struct nvkm_output **);
+
+u32 g94_sor_dp_lane_map(struct nvkm_device *, u8 lane);
+
+void gm204_sor_magic(struct nvkm_output *outp);
+
+#define OUTP_MSG(o,l,f,a...) do { \
+ struct nvkm_output *_outp = (o); \
+ nvkm_##l(&_outp->disp->engine.subdev, "outp %02x:%04x:%04x: "f"\n", \
+ _outp->index, _outp->info.hasht, _outp->info.hashm, ##a); \
} while(0)
-#define DBG(f,a...) MSG(debug, f, ##a)
-#define ERR(f,a...) MSG(error, f, ##a)
-#endif
+#define OUTP_ERR(o,f,a...) OUTP_MSG((o), error, f, ##a)
+#define OUTP_DBG(o,f,a...) OUTP_MSG((o), debug, f, ##a)
+#define OUTP_TRACE(o,f,a...) OUTP_MSG((o), trace, f, ##a)
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
index 0bde0fa5b59d..3b7a9e7a1ea8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
@@ -33,16 +33,17 @@
int
nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
{
- struct nvkm_output_dp *outp = (void *)base;
+ struct nvkm_output_dp *outp = nvkm_output_dp(base);
bool retrain = true;
u8 link[2], stat[3];
u32 linkrate;
int ret, i;
/* check that the link is trained at a high enough rate */
- ret = nv_rdaux(outp->base.edid, DPCD_LC00_LINK_BW_SET, link, 2);
+ ret = nvkm_rdaux(outp->aux, DPCD_LC00_LINK_BW_SET, link, 2);
if (ret) {
- DBG("failed to read link config, assuming no sink\n");
+ OUTP_DBG(&outp->base,
+ "failed to read link config, assuming no sink");
goto done;
}
@@ -50,14 +51,15 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
linkrate = (linkrate * 8) / 10; /* 8B/10B coding overhead */
datarate = (datarate + 9) / 10; /* -> decakilobits */
if (linkrate < datarate) {
- DBG("link not trained at sufficient rate\n");
+ OUTP_DBG(&outp->base, "link not trained at sufficient rate");
goto done;
}
/* check that link is still trained */
- ret = nv_rdaux(outp->base.edid, DPCD_LS02, stat, 3);
+ ret = nvkm_rdaux(outp->aux, DPCD_LS02, stat, 3);
if (ret) {
- DBG("failed to read link status, assuming no sink\n");
+ OUTP_DBG(&outp->base,
+ "failed to read link status, assuming no sink");
goto done;
}
@@ -67,13 +69,14 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
if (!(lane & DPCD_LS02_LANE0_CR_DONE) ||
!(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
!(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) {
- DBG("lane %d not equalised\n", lane);
+ OUTP_DBG(&outp->base,
+ "lane %d not equalised", lane);
goto done;
}
}
retrain = false;
} else {
- DBG("no inter-lane alignment\n");
+ OUTP_DBG(&outp->base, "no inter-lane alignment");
}
done:
@@ -102,150 +105,138 @@ done:
}
static void
-nvkm_output_dp_enable(struct nvkm_output_dp *outp, bool present)
+nvkm_output_dp_enable(struct nvkm_output_dp *outp, bool enable)
{
- struct nvkm_i2c_port *port = outp->base.edid;
- if (present) {
+ struct nvkm_i2c_aux *aux = outp->aux;
+
+ if (enable) {
if (!outp->present) {
- nvkm_i2c(port)->acquire_pad(port, 0);
- DBG("aux power -> always\n");
+ OUTP_DBG(&outp->base, "aux power -> always");
+ nvkm_i2c_aux_monitor(aux, true);
outp->present = true;
}
- nvkm_output_dp_train(&outp->base, 0, true);
- } else {
- if (outp->present) {
- nvkm_i2c(port)->release_pad(port);
- DBG("aux power -> demand\n");
- outp->present = false;
+
+ if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dpcd,
+ sizeof(outp->dpcd))) {
+ nvkm_output_dp_train(&outp->base, 0, true);
+ return;
}
- atomic_set(&outp->lt.done, 0);
}
-}
-static void
-nvkm_output_dp_detect(struct nvkm_output_dp *outp)
-{
- struct nvkm_i2c_port *port = outp->base.edid;
- int ret = nvkm_i2c(port)->acquire_pad(port, 0);
- if (ret == 0) {
- ret = nv_rdaux(outp->base.edid, DPCD_RC00_DPCD_REV,
- outp->dpcd, sizeof(outp->dpcd));
- nvkm_output_dp_enable(outp, ret == 0);
- nvkm_i2c(port)->release_pad(port);
+ if (outp->present) {
+ OUTP_DBG(&outp->base, "aux power -> demand");
+ nvkm_i2c_aux_monitor(aux, false);
+ outp->present = false;
}
+
+ atomic_set(&outp->lt.done, 0);
}
static int
nvkm_output_dp_hpd(struct nvkm_notify *notify)
{
- struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd);
- struct nvkm_output_dp *outp;
- struct nvkm_disp *disp = nvkm_disp(conn);
const struct nvkm_i2c_ntfy_rep *line = notify->data;
+ struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), hpd);
+ struct nvkm_connector *conn = outp->base.conn;
+ struct nvkm_disp *disp = outp->base.disp;
struct nvif_notify_conn_rep_v0 rep = {};
- list_for_each_entry(outp, &disp->outp, base.head) {
- if (outp->base.conn == conn &&
- outp->info.type == DCB_OUTPUT_DP) {
- DBG("HPD: %d\n", line->mask);
- nvkm_output_dp_detect(outp);
+ OUTP_DBG(&outp->base, "HPD: %d", line->mask);
+ nvkm_output_dp_enable(outp, true);
- if (line->mask & NVKM_I2C_UNPLUG)
- rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG;
- if (line->mask & NVKM_I2C_PLUG)
- rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG;
+ if (line->mask & NVKM_I2C_UNPLUG)
+ rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG;
+ if (line->mask & NVKM_I2C_PLUG)
+ rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG;
- nvkm_event_send(&disp->hpd, rep.mask, conn->index,
- &rep, sizeof(rep));
- return NVKM_NOTIFY_KEEP;
- }
- }
-
- WARN_ON(1);
- return NVKM_NOTIFY_DROP;
+ nvkm_event_send(&disp->hpd, rep.mask, conn->index, &rep, sizeof(rep));
+ return NVKM_NOTIFY_KEEP;
}
static int
nvkm_output_dp_irq(struct nvkm_notify *notify)
{
- struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), irq);
- struct nvkm_disp *disp = nvkm_disp(outp);
const struct nvkm_i2c_ntfy_rep *line = notify->data;
+ struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), irq);
+ struct nvkm_connector *conn = outp->base.conn;
+ struct nvkm_disp *disp = outp->base.disp;
struct nvif_notify_conn_rep_v0 rep = {
.mask = NVIF_NOTIFY_CONN_V0_IRQ,
};
- int index = outp->base.info.connector;
- DBG("IRQ: %d\n", line->mask);
+ OUTP_DBG(&outp->base, "IRQ: %d", line->mask);
nvkm_output_dp_train(&outp->base, 0, true);
- nvkm_event_send(&disp->hpd, rep.mask, index, &rep, sizeof(rep));
+ nvkm_event_send(&disp->hpd, rep.mask, conn->index, &rep, sizeof(rep));
return NVKM_NOTIFY_DROP;
}
-int
-_nvkm_output_dp_fini(struct nvkm_object *object, bool suspend)
+static void
+nvkm_output_dp_fini(struct nvkm_output *base)
{
- struct nvkm_output_dp *outp = (void *)object;
+ struct nvkm_output_dp *outp = nvkm_output_dp(base);
+ nvkm_notify_put(&outp->hpd);
nvkm_notify_put(&outp->irq);
+ flush_work(&outp->lt.work);
nvkm_output_dp_enable(outp, false);
- return nvkm_output_fini(&outp->base, suspend);
}
-int
-_nvkm_output_dp_init(struct nvkm_object *object)
+static void
+nvkm_output_dp_init(struct nvkm_output *base)
{
- struct nvkm_output_dp *outp = (void *)object;
- nvkm_output_dp_detect(outp);
- return nvkm_output_init(&outp->base);
+ struct nvkm_output_dp *outp = nvkm_output_dp(base);
+ nvkm_notify_put(&outp->base.conn->hpd);
+ nvkm_output_dp_enable(outp, true);
+ nvkm_notify_get(&outp->hpd);
}
-void
-_nvkm_output_dp_dtor(struct nvkm_object *object)
+static void *
+nvkm_output_dp_dtor(struct nvkm_output *base)
{
- struct nvkm_output_dp *outp = (void *)object;
+ struct nvkm_output_dp *outp = nvkm_output_dp(base);
+ nvkm_notify_fini(&outp->hpd);
nvkm_notify_fini(&outp->irq);
- nvkm_output_destroy(&outp->base);
+ return outp;
}
+static const struct nvkm_output_func
+nvkm_output_dp_func = {
+ .dtor = nvkm_output_dp_dtor,
+ .init = nvkm_output_dp_init,
+ .fini = nvkm_output_dp_fini,
+};
+
int
-nvkm_output_dp_create_(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass,
- struct dcb_output *info, int index,
- int length, void **pobject)
+nvkm_output_dp_ctor(const struct nvkm_output_dp_func *func,
+ struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+ struct nvkm_i2c_aux *aux, struct nvkm_output_dp *outp)
{
- struct nvkm_bios *bios = nvkm_bios(parent);
- struct nvkm_i2c *i2c = nvkm_i2c(parent);
- struct nvkm_output_dp *outp;
+ struct nvkm_device *device = disp->engine.subdev.device;
+ struct nvkm_bios *bios = device->bios;
+ struct nvkm_i2c *i2c = device->i2c;
u8 hdr, cnt, len;
u32 data;
int ret;
- ret = nvkm_output_create_(parent, engine, oclass, info, index,
- length, pobject);
- outp = *pobject;
- if (ret)
- return ret;
-
- nvkm_notify_fini(&outp->base.conn->hpd);
-
- /* access to the aux channel is not optional... */
- if (!outp->base.edid) {
- ERR("aux channel not found\n");
+ nvkm_output_ctor(&nvkm_output_dp_func, disp, index, dcbE, &outp->base);
+ outp->func = func;
+ outp->aux = aux;
+ if (!outp->aux) {
+ OUTP_ERR(&outp->base, "no aux");
return -ENODEV;
}
- /* nor is the bios data for this output... */
+ /* bios data is not optional */
data = nvbios_dpout_match(bios, outp->base.info.hasht,
outp->base.info.hashm, &outp->version,
&hdr, &cnt, &len, &outp->info);
if (!data) {
- ERR("no bios dp data\n");
+ OUTP_ERR(&outp->base, "no bios dp data");
return -ENODEV;
}
- DBG("bios dp %02x %02x %02x %02x\n", outp->version, hdr, cnt, len);
+ OUTP_DBG(&outp->base, "bios dp %02x %02x %02x %02x",
+ outp->version, hdr, cnt, len);
/* link training */
INIT_WORK(&outp->lt.work, nvkm_dp_train);
@@ -256,13 +247,13 @@ nvkm_output_dp_create_(struct nvkm_object *parent,
ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_irq, true,
&(struct nvkm_i2c_ntfy_req) {
.mask = NVKM_I2C_IRQ,
- .port = outp->base.edid->index,
+ .port = outp->aux->id,
},
sizeof(struct nvkm_i2c_ntfy_req),
sizeof(struct nvkm_i2c_ntfy_rep),
&outp->irq);
if (ret) {
- ERR("error monitoring aux irq event: %d\n", ret);
+ OUTP_ERR(&outp->base, "error monitoring aux irq: %d", ret);
return ret;
}
@@ -270,13 +261,13 @@ nvkm_output_dp_create_(struct nvkm_object *parent,
ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_hpd, true,
&(struct nvkm_i2c_ntfy_req) {
.mask = NVKM_I2C_PLUG | NVKM_I2C_UNPLUG,
- .port = outp->base.edid->index,
+ .port = outp->aux->id,
},
sizeof(struct nvkm_i2c_ntfy_req),
sizeof(struct nvkm_i2c_ntfy_rep),
- &outp->base.conn->hpd);
+ &outp->hpd);
if (ret) {
- ERR("error monitoring aux hpd events: %d\n", ret);
+ OUTP_ERR(&outp->base, "error monitoring aux hpd: %d", ret);
return ret;
}
@@ -284,18 +275,17 @@ nvkm_output_dp_create_(struct nvkm_object *parent,
}
int
-_nvkm_output_dp_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *info, u32 index,
- struct nvkm_object **pobject)
+nvkm_output_dp_new_(const struct nvkm_output_dp_func *func,
+ struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+ struct nvkm_output **poutp)
{
+ struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
+ struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, dcbE->i2c_index);
struct nvkm_output_dp *outp;
- int ret;
- ret = nvkm_output_dp_create(parent, engine, oclass, info, index, &outp);
- *pobject = nv_object(outp);
- if (ret)
- return ret;
+ if (!(outp = kzalloc(sizeof(*outp), GFP_KERNEL)))
+ return -ENOMEM;
+ *poutp = &outp->base;
- return 0;
+ return nvkm_output_dp_ctor(func, disp, index, dcbE, aux, outp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
index 70c77aec4850..731136d660b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
@@ -1,5 +1,14 @@
#ifndef __NVKM_DISP_OUTP_DP_H__
#define __NVKM_DISP_OUTP_DP_H__
+#define nvkm_output_dp(p) container_of((p), struct nvkm_output_dp, base)
+#ifndef MSG
+#define MSG(l,f,a...) \
+ nvkm_##l(&outp->base.disp->engine.subdev, "%02x:%04x:%04x: "f, \
+ outp->base.index, outp->base.info.hasht, \
+ outp->base.info.hashm, ##a)
+#define DBG(f,a...) MSG(debug, f, ##a)
+#define ERR(f,a...) MSG(error, f, ##a)
+#endif
#include "outp.h"
#include <core/notify.h>
@@ -7,12 +16,16 @@
#include <subdev/bios/dp.h>
struct nvkm_output_dp {
+ const struct nvkm_output_dp_func *func;
struct nvkm_output base;
struct nvbios_dpout info;
u8 version;
+ struct nvkm_i2c_aux *aux;
+
struct nvkm_notify irq;
+ struct nvkm_notify hpd;
bool present;
u8 dpcd[16];
@@ -23,34 +36,7 @@ struct nvkm_output_dp {
} lt;
};
-#define nvkm_output_dp_create(p,e,c,b,i,d) \
- nvkm_output_dp_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
-#define nvkm_output_dp_destroy(d) ({ \
- struct nvkm_output_dp *_outp = (d); \
- _nvkm_output_dp_dtor(nv_object(_outp)); \
-})
-#define nvkm_output_dp_init(d) ({ \
- struct nvkm_output_dp *_outp = (d); \
- _nvkm_output_dp_init(nv_object(_outp)); \
-})
-#define nvkm_output_dp_fini(d,s) ({ \
- struct nvkm_output_dp *_outp = (d); \
- _nvkm_output_dp_fini(nv_object(_outp), (s)); \
-})
-
-int nvkm_output_dp_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, struct dcb_output *,
- int, int, void **);
-
-int _nvkm_output_dp_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-void _nvkm_output_dp_dtor(struct nvkm_object *);
-int _nvkm_output_dp_init(struct nvkm_object *);
-int _nvkm_output_dp_fini(struct nvkm_object *, bool);
-
-struct nvkm_output_dp_impl {
- struct nvkm_output_impl base;
+struct nvkm_output_dp_func {
int (*pattern)(struct nvkm_output_dp *, int);
int (*lnk_pwr)(struct nvkm_output_dp *, int nr);
int (*lnk_ctl)(struct nvkm_output_dp *, int nr, int bw, bool ef);
@@ -58,4 +44,25 @@ struct nvkm_output_dp_impl {
};
int nvkm_output_dp_train(struct nvkm_output *, u32 rate, bool wait);
+
+int nvkm_output_dp_ctor(const struct nvkm_output_dp_func *, struct nvkm_disp *,
+ int index, struct dcb_output *, struct nvkm_i2c_aux *,
+ struct nvkm_output_dp *);
+int nvkm_output_dp_new_(const struct nvkm_output_dp_func *, struct nvkm_disp *,
+ int index, struct dcb_output *,
+ struct nvkm_output **);
+
+int nv50_pior_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+ struct nvkm_output **);
+
+int g94_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+ struct nvkm_output **);
+int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
+
+int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+ struct nvkm_output **);
+int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
+
+int gm204_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+ struct nvkm_output **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c
new file mode 100644
index 000000000000..db6234eebc61
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+g84_disp_ovly_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x6109a0 },
+ { 0x0088, 0x6109c0 },
+ { 0x008c, 0x6109c8 },
+ { 0x0090, 0x6109b4 },
+ { 0x0094, 0x610970 },
+ { 0x00a0, 0x610998 },
+ { 0x00a4, 0x610964 },
+ { 0x00c0, 0x610958 },
+ { 0x00e0, 0x6109a8 },
+ { 0x00e4, 0x6109d0 },
+ { 0x00e8, 0x6109d8 },
+ { 0x0100, 0x61094c },
+ { 0x0104, 0x610984 },
+ { 0x0108, 0x61098c },
+ { 0x0800, 0x6109f8 },
+ { 0x0808, 0x610a08 },
+ { 0x080c, 0x610a10 },
+ { 0x0810, 0x610a00 },
+ {}
+ }
+};
+
+const struct nv50_disp_chan_mthd
+g84_disp_ovly_chan_mthd = {
+ .name = "Overlay",
+ .addr = 0x000540,
+ .prev = 0x000004,
+ .data = {
+ { "Global", 1, &g84_disp_ovly_mthd_base },
+ {}
+ }
+};
+
+const struct nv50_disp_dmac_oclass
+g84_disp_ovly_oclass = {
+ .base.oclass = G82_DISP_OVERLAY_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_ovly_new,
+ .func = &nv50_disp_dmac_func,
+ .mthd = &g84_disp_ovly_chan_mthd,
+ .chid = 3,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c
new file mode 100644
index 000000000000..5985879abd23
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+gf119_disp_ovly_mthd_base = {
+ .mthd = 0x0000,
+ .data = {
+ { 0x0080, 0x665080 },
+ { 0x0084, 0x665084 },
+ { 0x0088, 0x665088 },
+ { 0x008c, 0x66508c },
+ { 0x0090, 0x665090 },
+ { 0x0094, 0x665094 },
+ { 0x00a0, 0x6650a0 },
+ { 0x00a4, 0x6650a4 },
+ { 0x00b0, 0x6650b0 },
+ { 0x00b4, 0x6650b4 },
+ { 0x00b8, 0x6650b8 },
+ { 0x00c0, 0x6650c0 },
+ { 0x00e0, 0x6650e0 },
+ { 0x00e4, 0x6650e4 },
+ { 0x00e8, 0x6650e8 },
+ { 0x0100, 0x665100 },
+ { 0x0104, 0x665104 },
+ { 0x0108, 0x665108 },
+ { 0x010c, 0x66510c },
+ { 0x0110, 0x665110 },
+ { 0x0118, 0x665118 },
+ { 0x011c, 0x66511c },
+ { 0x0120, 0x665120 },
+ { 0x0124, 0x665124 },
+ { 0x0130, 0x665130 },
+ { 0x0134, 0x665134 },
+ { 0x0138, 0x665138 },
+ { 0x013c, 0x66513c },
+ { 0x0140, 0x665140 },
+ { 0x0144, 0x665144 },
+ { 0x0148, 0x665148 },
+ { 0x014c, 0x66514c },
+ { 0x0150, 0x665150 },
+ { 0x0154, 0x665154 },
+ { 0x0158, 0x665158 },
+ { 0x015c, 0x66515c },
+ { 0x0160, 0x665160 },
+ { 0x0164, 0x665164 },
+ { 0x0168, 0x665168 },
+ { 0x016c, 0x66516c },
+ { 0x0400, 0x665400 },
+ { 0x0408, 0x665408 },
+ { 0x040c, 0x66540c },
+ { 0x0410, 0x665410 },
+ {}
+ }
+};
+
+static const struct nv50_disp_chan_mthd
+gf119_disp_ovly_chan_mthd = {
+ .name = "Overlay",
+ .addr = 0x001000,
+ .prev = -0x020000,
+ .data = {
+ { "Global", 1, &gf119_disp_ovly_mthd_base },
+ {}
+ }
+};
+
+const struct nv50_disp_dmac_oclass
+gf119_disp_ovly_oclass = {
+ .base.oclass = GF110_DISP_OVERLAY_CONTROL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_ovly_new,
+ .func = &gf119_disp_dmac_func,
+ .mthd = &gf119_disp_ovly_chan_mthd,
+ .chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
new file mode 100644
index 000000000000..2e2dc0641ef2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+gk104_disp_ovly_mthd_base = {
+ .mthd = 0x0000,
+ .data = {
+ { 0x0080, 0x665080 },
+ { 0x0084, 0x665084 },
+ { 0x0088, 0x665088 },
+ { 0x008c, 0x66508c },
+ { 0x0090, 0x665090 },
+ { 0x0094, 0x665094 },
+ { 0x00a0, 0x6650a0 },
+ { 0x00a4, 0x6650a4 },
+ { 0x00b0, 0x6650b0 },
+ { 0x00b4, 0x6650b4 },
+ { 0x00b8, 0x6650b8 },
+ { 0x00c0, 0x6650c0 },
+ { 0x00c4, 0x6650c4 },
+ { 0x00e0, 0x6650e0 },
+ { 0x00e4, 0x6650e4 },
+ { 0x00e8, 0x6650e8 },
+ { 0x0100, 0x665100 },
+ { 0x0104, 0x665104 },
+ { 0x0108, 0x665108 },
+ { 0x010c, 0x66510c },
+ { 0x0110, 0x665110 },
+ { 0x0118, 0x665118 },
+ { 0x011c, 0x66511c },
+ { 0x0120, 0x665120 },
+ { 0x0124, 0x665124 },
+ { 0x0130, 0x665130 },
+ { 0x0134, 0x665134 },
+ { 0x0138, 0x665138 },
+ { 0x013c, 0x66513c },
+ { 0x0140, 0x665140 },
+ { 0x0144, 0x665144 },
+ { 0x0148, 0x665148 },
+ { 0x014c, 0x66514c },
+ { 0x0150, 0x665150 },
+ { 0x0154, 0x665154 },
+ { 0x0158, 0x665158 },
+ { 0x015c, 0x66515c },
+ { 0x0160, 0x665160 },
+ { 0x0164, 0x665164 },
+ { 0x0168, 0x665168 },
+ { 0x016c, 0x66516c },
+ { 0x0400, 0x665400 },
+ { 0x0404, 0x665404 },
+ { 0x0408, 0x665408 },
+ { 0x040c, 0x66540c },
+ { 0x0410, 0x665410 },
+ {}
+ }
+};
+
+static const struct nv50_disp_chan_mthd
+gk104_disp_ovly_chan_mthd = {
+ .name = "Overlay",
+ .addr = 0x001000,
+ .prev = -0x020000,
+ .data = {
+ { "Global", 1, &gk104_disp_ovly_mthd_base },
+ {}
+ }
+};
+
+const struct nv50_disp_dmac_oclass
+gk104_disp_ovly_oclass = {
+ .base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_ovly_new,
+ .func = &gf119_disp_dmac_func,
+ .mthd = &gk104_disp_ovly_chan_mthd,
+ .chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c
new file mode 100644
index 000000000000..f858053db83d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+gt200_disp_ovly_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x6109a0 },
+ { 0x0088, 0x6109c0 },
+ { 0x008c, 0x6109c8 },
+ { 0x0090, 0x6109b4 },
+ { 0x0094, 0x610970 },
+ { 0x00a0, 0x610998 },
+ { 0x00a4, 0x610964 },
+ { 0x00b0, 0x610c98 },
+ { 0x00b4, 0x610ca4 },
+ { 0x00b8, 0x610cac },
+ { 0x00c0, 0x610958 },
+ { 0x00e0, 0x6109a8 },
+ { 0x00e4, 0x6109d0 },
+ { 0x00e8, 0x6109d8 },
+ { 0x0100, 0x61094c },
+ { 0x0104, 0x610984 },
+ { 0x0108, 0x61098c },
+ { 0x0800, 0x6109f8 },
+ { 0x0808, 0x610a08 },
+ { 0x080c, 0x610a10 },
+ { 0x0810, 0x610a00 },
+ {}
+ }
+};
+
+static const struct nv50_disp_chan_mthd
+gt200_disp_ovly_chan_mthd = {
+ .name = "Overlay",
+ .addr = 0x000540,
+ .prev = 0x000004,
+ .data = {
+ { "Global", 1, &gt200_disp_ovly_mthd_base },
+ {}
+ }
+};
+
+const struct nv50_disp_dmac_oclass
+gt200_disp_ovly_oclass = {
+ .base.oclass = GT200_DISP_OVERLAY_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_ovly_new,
+ .func = &nv50_disp_dmac_func,
+ .mthd = &gt200_disp_ovly_chan_mthd,
+ .chid = 3,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c
new file mode 100644
index 000000000000..c947e1e16a37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gt215_disp_ovly_oclass = {
+ .base.oclass = GT214_DISP_OVERLAY_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_ovly_new,
+ .func = &nv50_disp_dmac_func,
+ .mthd = &g84_disp_ovly_chan_mthd,
+ .chid = 3,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
new file mode 100644
index 000000000000..6fa296c047b8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_ovly_new(const struct nv50_disp_dmac_func *func,
+ const struct nv50_disp_chan_mthd *mthd,
+ struct nv50_disp_root *root, int chid,
+ const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ union {
+ struct nv50_disp_overlay_channel_dma_v0 v0;
+ } *args = data;
+ struct nvkm_object *parent = oclass->parent;
+ struct nv50_disp *disp = root->disp;
+ int head, ret;
+ u64 push;
+
+ nvif_ioctl(parent, "create disp overlay channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(parent, "create disp overlay channel dma vers %d "
+ "pushbuf %016llx head %d\n",
+ args->v0.version, args->v0.pushbuf, args->v0.head);
+ if (args->v0.head > disp->base.head.nr)
+ return -EINVAL;
+ push = args->v0.pushbuf;
+ head = args->v0.head;
+ } else
+ return ret;
+
+ return nv50_disp_dmac_new_(func, mthd, root, chid + head,
+ head, push, oclass, pobject);
+}
+
+static const struct nv50_disp_mthd_list
+nv50_disp_ovly_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x0009a0 },
+ { 0x0088, 0x0009c0 },
+ { 0x008c, 0x0009c8 },
+ { 0x0090, 0x6109b4 },
+ { 0x0094, 0x610970 },
+ { 0x00a0, 0x610998 },
+ { 0x00a4, 0x610964 },
+ { 0x00c0, 0x610958 },
+ { 0x00e0, 0x6109a8 },
+ { 0x00e4, 0x6109d0 },
+ { 0x00e8, 0x6109d8 },
+ { 0x0100, 0x61094c },
+ { 0x0104, 0x610984 },
+ { 0x0108, 0x61098c },
+ { 0x0800, 0x6109f8 },
+ { 0x0808, 0x610a08 },
+ { 0x080c, 0x610a10 },
+ { 0x0810, 0x610a00 },
+ {}
+ }
+};
+
+static const struct nv50_disp_chan_mthd
+nv50_disp_ovly_chan_mthd = {
+ .name = "Overlay",
+ .addr = 0x000540,
+ .prev = 0x000004,
+ .data = {
+ { "Global", 1, &nv50_disp_ovly_mthd_base },
+ {}
+ }
+};
+
+const struct nv50_disp_dmac_oclass
+nv50_disp_ovly_oclass = {
+ .base.oclass = NV50_DISP_OVERLAY_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_ovly_new,
+ .func = &nv50_disp_dmac_func,
+ .mthd = &nv50_disp_ovly_chan_mthd,
+ .chid = 3,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
new file mode 100644
index 000000000000..a625a9876e34
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <subdev/timer.h>
+
+static void
+gf119_disp_pioc_fini(struct nv50_disp_chan *chan)
+{
+ struct nv50_disp *disp = chan->root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int chid = chan->chid;
+
+ nvkm_mask(device, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x00030000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "ch %d fini: %08x\n", chid,
+ nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ }
+
+ /* disable error reporting and completion notification */
+ nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
+ nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
+}
+
+static int
+gf119_disp_pioc_init(struct nv50_disp_chan *chan)
+{
+ struct nv50_disp *disp = chan->root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int chid = chan->chid;
+
+ /* enable error reporting */
+ nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+ /* activate channel */
+ nvkm_wr32(device, 0x610490 + (chid * 0x10), 0x00000001);
+ if (nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x610490 + (chid * 0x10));
+ if ((tmp & 0x00030000) == 0x00010000)
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "ch %d init: %08x\n", chid,
+ nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+const struct nv50_disp_chan_func
+gf119_disp_pioc_func = {
+ .init = gf119_disp_pioc_init,
+ .fini = gf119_disp_pioc_fini,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
new file mode 100644
index 000000000000..9d2618dacf20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <subdev/timer.h>
+
+static void
+nv50_disp_pioc_fini(struct nv50_disp_chan *chan)
+{
+ struct nv50_disp *disp = chan->root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int chid = chan->chid;
+
+ nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "ch %d timeout: %08x\n", chid,
+ nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+ }
+}
+
+static int
+nv50_disp_pioc_init(struct nv50_disp_chan *chan)
+{
+ struct nv50_disp *disp = chan->root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int chid = chan->chid;
+
+ nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "ch %d timeout0: %08x\n", chid,
+ nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001);
+ if (nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10));
+ if ((tmp & 0x00030000) == 0x00010000)
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "ch %d timeout1: %08x\n", chid,
+ nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+const struct nv50_disp_chan_func
+nv50_disp_pioc_func = {
+ .init = nv50_disp_pioc_init,
+ .fini = nv50_disp_pioc_fini,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
index 2a1d8871bf82..ab524bde7795 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
@@ -21,8 +21,8 @@
*
* Authors: Ben Skeggs
*/
-#include "nv50.h"
#include "outpdp.h"
+#include "nv50.h"
#include <core/client.h>
#include <subdev/i2c.h>
@@ -31,140 +31,101 @@
#include <nvif/class.h>
#include <nvif/unpack.h>
-/******************************************************************************
- * TMDS
- *****************************************************************************/
-
-static int
-nv50_pior_tmds_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *info, u32 index,
- struct nvkm_object **pobject)
+int
+nv50_pior_power(NV50_DISP_MTHD_V1)
{
- struct nvkm_i2c *i2c = nvkm_i2c(parent);
- struct nvkm_output *outp;
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ const u32 soff = outp->or * 0x800;
+ union {
+ struct nv50_disp_pior_pwr_v0 v0;
+ } *args = data;
+ u32 ctrl, type;
int ret;
- ret = nvkm_output_create(parent, engine, oclass, info, index, &outp);
- *pobject = nv_object(outp);
- if (ret)
+ nvif_ioctl(object, "disp pior pwr size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(object, "disp pior pwr vers %d state %d type %x\n",
+ args->v0.version, args->v0.state, args->v0.type);
+ if (args->v0.type > 0x0f)
+ return -EINVAL;
+ ctrl = !!args->v0.state;
+ type = args->v0.type;
+ } else
return ret;
- outp->edid = i2c->find_type(i2c, NV_I2C_TYPE_EXTDDC(outp->info.extdev));
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61e004 + soff) & 0x80000000))
+ break;
+ );
+ nvkm_mask(device, 0x61e004 + soff, 0x80000101, 0x80000000 | ctrl);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61e004 + soff) & 0x80000000))
+ break;
+ );
+ disp->pior.type[outp->or] = type;
return 0;
}
-struct nvkm_output_impl
-nv50_pior_tmds_impl = {
- .base.handle = DCB_OUTPUT_TMDS | 0x0100,
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_pior_tmds_ctor,
- .dtor = _nvkm_output_dtor,
- .init = _nvkm_output_init,
- .fini = _nvkm_output_fini,
- },
-};
-
/******************************************************************************
- * DisplayPort
+ * TMDS
*****************************************************************************/
+static const struct nvkm_output_func
+nv50_pior_output_func = {
+};
-static int
-nv50_pior_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+int
+nv50_pior_output_new(struct nvkm_disp *disp, int index,
+ struct dcb_output *dcbE, struct nvkm_output **poutp)
{
- struct nvkm_i2c_port *port = outp->base.edid;
- if (port && port->func->pattern)
- return port->func->pattern(port, pattern);
- return port ? 0 : -ENODEV;
+ return nvkm_output_new_(&nv50_pior_output_func, disp,
+ index, dcbE, poutp);
}
+/******************************************************************************
+ * DisplayPort
+ *****************************************************************************/
static int
-nv50_pior_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
+nv50_pior_output_dp_pattern(struct nvkm_output_dp *outp, int pattern)
{
return 0;
}
static int
-nv50_pior_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
-{
- struct nvkm_i2c_port *port = outp->base.edid;
- if (port && port->func->lnk_ctl)
- return port->func->lnk_ctl(port, nr, bw, ef);
- return port ? 0 : -ENODEV;
-}
-
-static int
-nv50_pior_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
+nv50_pior_output_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
{
- struct nvkm_i2c_port *port = outp->base.edid;
- if (port && port->func->drv_ctl)
- return port->func->drv_ctl(port, ln, vs, pe);
- return port ? 0 : -ENODEV;
+ return 0;
}
static int
-nv50_pior_dp_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *info, u32 index,
- struct nvkm_object **pobject)
+nv50_pior_output_dp_lnk_ctl(struct nvkm_output_dp *outp,
+ int nr, int bw, bool ef)
{
- struct nvkm_i2c *i2c = nvkm_i2c(parent);
- struct nvkm_output_dp *outp;
- int ret;
-
- ret = nvkm_output_dp_create(parent, engine, oclass, info, index, &outp);
- *pobject = nv_object(outp);
+ int ret = nvkm_i2c_aux_lnk_ctl(outp->aux, nr, bw, ef);
if (ret)
return ret;
-
- outp->base.edid = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(
- outp->base.info.extdev));
- return 0;
+ return 1;
}
-struct nvkm_output_dp_impl
-nv50_pior_dp_impl = {
- .base.base.handle = DCB_OUTPUT_DP | 0x0010,
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_pior_dp_ctor,
- .dtor = _nvkm_output_dp_dtor,
- .init = _nvkm_output_dp_init,
- .fini = _nvkm_output_dp_fini,
- },
- .pattern = nv50_pior_dp_pattern,
- .lnk_pwr = nv50_pior_dp_lnk_pwr,
- .lnk_ctl = nv50_pior_dp_lnk_ctl,
- .drv_ctl = nv50_pior_dp_drv_ctl,
+static const struct nvkm_output_dp_func
+nv50_pior_output_dp_func = {
+ .pattern = nv50_pior_output_dp_pattern,
+ .lnk_pwr = nv50_pior_output_dp_lnk_pwr,
+ .lnk_ctl = nv50_pior_output_dp_lnk_ctl,
};
-/******************************************************************************
- * General PIOR handling
- *****************************************************************************/
-
int
-nv50_pior_power(NV50_DISP_MTHD_V1)
+nv50_pior_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+ struct nvkm_output **poutp)
{
- const u32 soff = outp->or * 0x800;
- union {
- struct nv50_disp_pior_pwr_v0 v0;
- } *args = data;
- u32 ctrl, type;
- int ret;
+ struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
+ struct nvkm_i2c_aux *aux =
+ nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbE->extdev));
+ struct nvkm_output_dp *outp;
- nv_ioctl(object, "disp pior pwr size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "disp pior pwr vers %d state %d type %x\n",
- args->v0.version, args->v0.state, args->v0.type);
- if (args->v0.type > 0x0f)
- return -EINVAL;
- ctrl = !!args->v0.state;
- type = args->v0.type;
- } else
- return ret;
+ if (!(outp = kzalloc(sizeof(*outp), GFP_KERNEL)))
+ return -ENOMEM;
+ *poutp = &outp->base;
- nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
- nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | ctrl);
- nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
- priv->pior.type[outp->or] = type;
- return 0;
+ return nvkm_output_dp_ctor(&nv50_pior_output_dp_func, disp,
+ index, dcbE, aux, outp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
index 961ce8bb2135..c2452957fc57 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
@@ -1,42 +1,52 @@
#ifndef __NVKM_DISP_PRIV_H__
#define __NVKM_DISP_PRIV_H__
#include <engine/disp.h>
+#include "outp.h"
+#include "outpdp.h"
-struct nvkm_disp_impl {
- struct nvkm_oclass base;
- struct nvkm_oclass **outp;
- struct nvkm_oclass **conn;
- const struct nvkm_event_func *vblank;
+int nvkm_disp_ctor(const struct nvkm_disp_func *, struct nvkm_device *,
+ int index, int heads, struct nvkm_disp *);
+int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *,
+ int index, int heads, struct nvkm_disp **);
+void nvkm_disp_vblank(struct nvkm_disp *, int head);
+
+struct nvkm_disp_func_outp {
+ int (* crt)(struct nvkm_disp *, int index, struct dcb_output *,
+ struct nvkm_output **);
+ int (* tv)(struct nvkm_disp *, int index, struct dcb_output *,
+ struct nvkm_output **);
+ int (*tmds)(struct nvkm_disp *, int index, struct dcb_output *,
+ struct nvkm_output **);
+ int (*lvds)(struct nvkm_disp *, int index, struct dcb_output *,
+ struct nvkm_output **);
+ int (* dp)(struct nvkm_disp *, int index, struct dcb_output *,
+ struct nvkm_output **);
+};
+
+struct nvkm_disp_func {
+ void *(*dtor)(struct nvkm_disp *);
+ void (*intr)(struct nvkm_disp *);
+
+ const struct nvkm_disp_oclass *(*root)(struct nvkm_disp *);
+
+ struct {
+ void (*vblank_init)(struct nvkm_disp *, int head);
+ void (*vblank_fini)(struct nvkm_disp *, int head);
+ } head;
+
+ struct {
+ const struct nvkm_disp_func_outp internal;
+ const struct nvkm_disp_func_outp external;
+ } outp;
};
-#define nvkm_disp_create(p,e,c,h,i,x,d) \
- nvkm_disp_create_((p), (e), (c), (h), (i), (x), \
- sizeof(**d), (void **)d)
-#define nvkm_disp_destroy(d) ({ \
- struct nvkm_disp *disp = (d); \
- _nvkm_disp_dtor(nv_object(disp)); \
-})
-#define nvkm_disp_init(d) ({ \
- struct nvkm_disp *disp = (d); \
- _nvkm_disp_init(nv_object(disp)); \
-})
-#define nvkm_disp_fini(d,s) ({ \
- struct nvkm_disp *disp = (d); \
- _nvkm_disp_fini(nv_object(disp), (s)); \
-})
-
-int nvkm_disp_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, int heads,
- const char *, const char *, int, void **);
-void _nvkm_disp_dtor(struct nvkm_object *);
-int _nvkm_disp_init(struct nvkm_object *);
-int _nvkm_disp_fini(struct nvkm_object *, bool);
-
-extern struct nvkm_oclass *nvkm_output_oclass;
-extern struct nvkm_oclass *nvkm_connector_oclass;
-
-int nvkm_disp_vblank_ctor(struct nvkm_object *, void *data, u32 size,
- struct nvkm_notify *);
-void nvkm_disp_vblank(struct nvkm_disp *, int head);
int nvkm_disp_ntfy(struct nvkm_object *, u32, struct nvkm_event **);
+
+extern const struct nvkm_disp_oclass nv04_disp_root_oclass;
+
+struct nvkm_disp_oclass {
+ int (*ctor)(struct nvkm_disp *, const struct nvkm_oclass *,
+ void *data, u32 size, struct nvkm_object **);
+ struct nvkm_sclass base;
+};
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
index 6820176e5f78..721e4f74d1fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2013 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -21,37 +21,38 @@
*
* Authors: Ben Skeggs
*/
-#include "gf100.h"
+#include "rootnv50.h"
+#include "dmacnv50.h"
-static int
-gk110_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gf100_pm_priv *priv;
- int ret;
-
- ret = nvkm_pm_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+#include <nvif/class.h>
- ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0, gk104_pm_pwr);
- if (ret)
- return ret;
+static const struct nv50_disp_root_func
+g84_disp_root = {
+ .init = nv50_disp_root_init,
+ .fini = nv50_disp_root_fini,
+ .dmac = {
+ &g84_disp_core_oclass,
+ &g84_disp_base_oclass,
+ &g84_disp_ovly_oclass,
+ },
+ .pioc = {
+ &g84_disp_oimm_oclass,
+ &g84_disp_curs_oclass,
+ },
+};
- nv_engine(priv)->cclass = &nvkm_pm_cclass;
- nv_engine(priv)->sclass = nvkm_pm_sclass;
- return 0;
+static int
+g84_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&g84_disp_root, disp, oclass,
+ data, size, pobject);
}
-struct nvkm_oclass
-gk110_pm_oclass = {
- .handle = NV_ENGINE(PM, 0xf0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk110_pm_ctor,
- .dtor = _nvkm_pm_dtor,
- .init = _nvkm_pm_init,
- .fini = gf100_pm_fini,
- },
+const struct nvkm_disp_oclass
+g84_disp_root_oclass = {
+ .base.oclass = G82_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = g84_disp_root_new,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
new file mode 100644
index 000000000000..9493f6edf62b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+g94_disp_root = {
+ .init = nv50_disp_root_init,
+ .fini = nv50_disp_root_fini,
+ .dmac = {
+ &g94_disp_core_oclass,
+ &gt200_disp_base_oclass,
+ &gt200_disp_ovly_oclass,
+ },
+ .pioc = {
+ &g84_disp_oimm_oclass,
+ &g84_disp_curs_oclass,
+ },
+};
+
+static int
+g94_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&g94_disp_root, disp, oclass,
+ data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+g94_disp_root_oclass = {
+ .base.oclass = GT206_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = g94_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
new file mode 100644
index 000000000000..8591726871ac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+gf119_disp_root_scanoutpos(NV50_DISP_MTHD_V0)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ const u32 total = nvkm_rd32(device, 0x640414 + (head * 0x300));
+ const u32 blanke = nvkm_rd32(device, 0x64041c + (head * 0x300));
+ const u32 blanks = nvkm_rd32(device, 0x640420 + (head * 0x300));
+ union {
+ struct nv04_disp_scanoutpos_v0 v0;
+ } *args = data;
+ int ret;
+
+ nvif_ioctl(object, "disp scanoutpos size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(object, "disp scanoutpos vers %d\n",
+ args->v0.version);
+ args->v0.vblanke = (blanke & 0xffff0000) >> 16;
+ args->v0.hblanke = (blanke & 0x0000ffff);
+ args->v0.vblanks = (blanks & 0xffff0000) >> 16;
+ args->v0.hblanks = (blanks & 0x0000ffff);
+ args->v0.vtotal = ( total & 0xffff0000) >> 16;
+ args->v0.htotal = ( total & 0x0000ffff);
+ args->v0.time[0] = ktime_to_ns(ktime_get());
+ args->v0.vline = /* vline read locks hline */
+ nvkm_rd32(device, 0x616340 + (head * 0x800)) & 0xffff;
+ args->v0.time[1] = ktime_to_ns(ktime_get());
+ args->v0.hline =
+ nvkm_rd32(device, 0x616344 + (head * 0x800)) & 0xffff;
+ } else
+ return ret;
+
+ return 0;
+}
+
+void
+gf119_disp_root_fini(struct nv50_disp_root *root)
+{
+ struct nvkm_device *device = root->disp->base.engine.subdev.device;
+ /* disable all interrupts */
+ nvkm_wr32(device, 0x6100b0, 0x00000000);
+}
+
+int
+gf119_disp_root_init(struct nv50_disp_root *root)
+{
+ struct nv50_disp *disp = root->disp;
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ u32 tmp;
+ int i;
+
+ /* The below segments of code copying values from one register to
+ * another appear to inform EVO of the display capabilities or
+ * something similar.
+ */
+
+ /* ... CRTC caps */
+ for (i = 0; i < disp->base.head.nr; i++) {
+ tmp = nvkm_rd32(device, 0x616104 + (i * 0x800));
+ nvkm_wr32(device, 0x6101b4 + (i * 0x800), tmp);
+ tmp = nvkm_rd32(device, 0x616108 + (i * 0x800));
+ nvkm_wr32(device, 0x6101b8 + (i * 0x800), tmp);
+ tmp = nvkm_rd32(device, 0x61610c + (i * 0x800));
+ nvkm_wr32(device, 0x6101bc + (i * 0x800), tmp);
+ }
+
+ /* ... DAC caps */
+ for (i = 0; i < disp->func->dac.nr; i++) {
+ tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
+ nvkm_wr32(device, 0x6101c0 + (i * 0x800), tmp);
+ }
+
+ /* ... SOR caps */
+ for (i = 0; i < disp->func->sor.nr; i++) {
+ tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
+ nvkm_wr32(device, 0x6301c4 + (i * 0x800), tmp);
+ }
+
+ /* steal display away from vbios, or something like that */
+ if (nvkm_rd32(device, 0x6100ac) & 0x00000100) {
+ nvkm_wr32(device, 0x6100ac, 0x00000100);
+ nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
+ break;
+ ) < 0)
+ return -EBUSY;
+ }
+
+ /* point at display engine memory area (hash table, objects) */
+ nvkm_wr32(device, 0x610010, (root->instmem->addr >> 8) | 9);
+
+ /* enable supervisor interrupts, disable everything else */
+ nvkm_wr32(device, 0x610090, 0x00000000);
+ nvkm_wr32(device, 0x6100a0, 0x00000000);
+ nvkm_wr32(device, 0x6100b0, 0x00000307);
+
+ /* disable underflow reporting, preventing an intermittent issue
+ * on some gk104 boards where the production vbios left this
+ * setting enabled by default.
+ *
+ * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
+ */
+ for (i = 0; i < disp->base.head.nr; i++)
+ nvkm_mask(device, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
+
+ return 0;
+}
+
+static const struct nv50_disp_root_func
+gf119_disp_root = {
+ .init = gf119_disp_root_init,
+ .fini = gf119_disp_root_fini,
+ .dmac = {
+ &gf119_disp_core_oclass,
+ &gf119_disp_base_oclass,
+ &gf119_disp_ovly_oclass,
+ },
+ .pioc = {
+ &gf119_disp_oimm_oclass,
+ &gf119_disp_curs_oclass,
+ },
+};
+
+static int
+gf119_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&gf119_disp_root, disp, oclass,
+ data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gf119_disp_root_oclass = {
+ .base.oclass = GF110_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = gf119_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
new file mode 100644
index 000000000000..0bfdb1d1c6ab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gk104_disp_root = {
+ .init = gf119_disp_root_init,
+ .fini = gf119_disp_root_fini,
+ .dmac = {
+ &gk104_disp_core_oclass,
+ &gk104_disp_base_oclass,
+ &gk104_disp_ovly_oclass,
+ },
+ .pioc = {
+ &gk104_disp_oimm_oclass,
+ &gk104_disp_curs_oclass,
+ },
+};
+
+static int
+gk104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&gk104_disp_root, disp, oclass,
+ data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gk104_disp_root_oclass = {
+ .base.oclass = GK104_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = gk104_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
new file mode 100644
index 000000000000..1e8dbed8a67c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gk110_disp_root = {
+ .init = gf119_disp_root_init,
+ .fini = gf119_disp_root_fini,
+ .dmac = {
+ &gk110_disp_core_oclass,
+ &gk110_disp_base_oclass,
+ &gk104_disp_ovly_oclass,
+ },
+ .pioc = {
+ &gk104_disp_oimm_oclass,
+ &gk104_disp_curs_oclass,
+ },
+};
+
+static int
+gk110_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&gk110_disp_root, disp, oclass,
+ data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gk110_disp_root_oclass = {
+ .base.oclass = GK110_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = gk110_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
new file mode 100644
index 000000000000..44c55be69e99
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gm107_disp_root = {
+ .init = gf119_disp_root_init,
+ .fini = gf119_disp_root_fini,
+ .dmac = {
+ &gm107_disp_core_oclass,
+ &gk110_disp_base_oclass,
+ &gk104_disp_ovly_oclass,
+ },
+ .pioc = {
+ &gk104_disp_oimm_oclass,
+ &gk104_disp_curs_oclass,
+ },
+};
+
+static int
+gm107_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&gm107_disp_root, disp, oclass,
+ data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gm107_disp_root_oclass = {
+ .base.oclass = GM107_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = gm107_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c
new file mode 100644
index 000000000000..168bffe0643c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gm204_disp_root = {
+ .init = gf119_disp_root_init,
+ .fini = gf119_disp_root_fini,
+ .dmac = {
+ &gm204_disp_core_oclass,
+ &gk110_disp_base_oclass,
+ &gk104_disp_ovly_oclass,
+ },
+ .pioc = {
+ &gk104_disp_oimm_oclass,
+ &gk104_disp_curs_oclass,
+ },
+};
+
+static int
+gm204_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&gm204_disp_root, disp, oclass,
+ data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gm204_disp_root_oclass = {
+ .base.oclass = GM204_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = gm204_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
new file mode 100644
index 000000000000..124a0c24f92c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gt200_disp_root = {
+ .init = nv50_disp_root_init,
+ .fini = nv50_disp_root_fini,
+ .dmac = {
+ &gt200_disp_core_oclass,
+ &gt200_disp_base_oclass,
+ &gt200_disp_ovly_oclass,
+ },
+ .pioc = {
+ &g84_disp_oimm_oclass,
+ &g84_disp_curs_oclass,
+ },
+};
+
+static int
+gt200_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&gt200_disp_root, disp, oclass,
+ data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gt200_disp_root_oclass = {
+ .base.oclass = GT200_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = gt200_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
new file mode 100644
index 000000000000..dff52f30668b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gt215_disp_root = {
+ .init = nv50_disp_root_init,
+ .fini = nv50_disp_root_fini,
+ .dmac = {
+ &gt215_disp_core_oclass,
+ &gt215_disp_base_oclass,
+ &gt215_disp_ovly_oclass,
+ },
+ .pioc = {
+ &gt215_disp_oimm_oclass,
+ &gt215_disp_curs_oclass,
+ },
+};
+
+static int
+gt215_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&gt215_disp_root, disp, oclass,
+ data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gt215_disp_root_oclass = {
+ .base.oclass = GT214_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = gt215_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c
new file mode 100644
index 000000000000..62d3fb66d0ec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define nv04_disp_root(p) container_of((p), struct nv04_disp_root, object)
+#include "priv.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+struct nv04_disp_root {
+ struct nvkm_object object;
+ struct nvkm_disp *disp;
+};
+
+static int
+nv04_disp_scanoutpos(struct nv04_disp_root *root,
+ void *data, u32 size, int head)
+{
+ struct nvkm_device *device = root->disp->engine.subdev.device;
+ struct nvkm_object *object = &root->object;
+ const u32 hoff = head * 0x2000;
+ union {
+ struct nv04_disp_scanoutpos_v0 v0;
+ } *args = data;
+ u32 line;
+ int ret;
+
+ nvif_ioctl(object, "disp scanoutpos size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(object, "disp scanoutpos vers %d\n",
+ args->v0.version);
+ args->v0.vblanks = nvkm_rd32(device, 0x680800 + hoff) & 0xffff;
+ args->v0.vtotal = nvkm_rd32(device, 0x680804 + hoff) & 0xffff;
+ args->v0.vblanke = args->v0.vtotal - 1;
+
+ args->v0.hblanks = nvkm_rd32(device, 0x680820 + hoff) & 0xffff;
+ args->v0.htotal = nvkm_rd32(device, 0x680824 + hoff) & 0xffff;
+ args->v0.hblanke = args->v0.htotal - 1;
+
+ /*
+ * If output is vga instead of digital then vtotal/htotal is
+ * invalid so we have to give up and trigger the timestamping
+ * fallback in the drm core.
+ */
+ if (!args->v0.vtotal || !args->v0.htotal)
+ return -ENOTSUPP;
+
+ args->v0.time[0] = ktime_to_ns(ktime_get());
+ line = nvkm_rd32(device, 0x600868 + hoff);
+ args->v0.time[1] = ktime_to_ns(ktime_get());
+ args->v0.hline = (line & 0xffff0000) >> 16;
+ args->v0.vline = (line & 0x0000ffff);
+ } else
+ return ret;
+
+ return 0;
+}
+
+static int
+nv04_disp_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+ struct nv04_disp_root *root = nv04_disp_root(object);
+ union {
+ struct nv04_disp_mthd_v0 v0;
+ } *args = data;
+ int head, ret;
+
+ nvif_ioctl(object, "disp mthd size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nvif_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
+ args->v0.version, args->v0.method, args->v0.head);
+ mthd = args->v0.method;
+ head = args->v0.head;
+ } else
+ return ret;
+
+ if (head < 0 || head >= 2)
+ return -ENXIO;
+
+ switch (mthd) {
+ case NV04_DISP_SCANOUTPOS:
+ return nv04_disp_scanoutpos(root, data, size, head);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static struct nvkm_object_func
+nv04_disp_root = {
+ .mthd = nv04_disp_mthd,
+ .ntfy = nvkm_disp_ntfy,
+};
+
+static int
+nv04_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nv04_disp_root *root;
+
+ if (!(root = kzalloc(sizeof(*root), GFP_KERNEL)))
+ return -ENOMEM;
+ root->disp = disp;
+ *pobject = &root->object;
+
+ nvkm_object_ctor(&nv04_disp_root, oclass, &root->object);
+ return 0;
+}
+
+const struct nvkm_disp_oclass
+nv04_disp_root_oclass = {
+ .base.oclass = NV04_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = nv04_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
new file mode 100644
index 000000000000..06fb24d88702
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_root_scanoutpos(NV50_DISP_MTHD_V0)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ const u32 blanke = nvkm_rd32(device, 0x610aec + (head * 0x540));
+ const u32 blanks = nvkm_rd32(device, 0x610af4 + (head * 0x540));
+ const u32 total = nvkm_rd32(device, 0x610afc + (head * 0x540));
+ union {
+ struct nv04_disp_scanoutpos_v0 v0;
+ } *args = data;
+ int ret;
+
+ nvif_ioctl(object, "disp scanoutpos size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(object, "disp scanoutpos vers %d\n",
+ args->v0.version);
+ args->v0.vblanke = (blanke & 0xffff0000) >> 16;
+ args->v0.hblanke = (blanke & 0x0000ffff);
+ args->v0.vblanks = (blanks & 0xffff0000) >> 16;
+ args->v0.hblanks = (blanks & 0x0000ffff);
+ args->v0.vtotal = ( total & 0xffff0000) >> 16;
+ args->v0.htotal = ( total & 0x0000ffff);
+ args->v0.time[0] = ktime_to_ns(ktime_get());
+ args->v0.vline = /* vline read locks hline */
+ nvkm_rd32(device, 0x616340 + (head * 0x800)) & 0xffff;
+ args->v0.time[1] = ktime_to_ns(ktime_get());
+ args->v0.hline =
+ nvkm_rd32(device, 0x616344 + (head * 0x800)) & 0xffff;
+ } else
+ return ret;
+
+ return 0;
+}
+
+int
+nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+ union {
+ struct nv50_disp_mthd_v0 v0;
+ struct nv50_disp_mthd_v1 v1;
+ } *args = data;
+ struct nv50_disp_root *root = nv50_disp_root(object);
+ struct nv50_disp *disp = root->disp;
+ const struct nv50_disp_func *func = disp->func;
+ struct nvkm_output *outp = NULL;
+ struct nvkm_output *temp;
+ u16 type, mask = 0;
+ int head, ret;
+
+ if (mthd != NV50_DISP_MTHD)
+ return -EINVAL;
+
+ nvif_ioctl(object, "disp mthd size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nvif_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
+ args->v0.version, args->v0.method, args->v0.head);
+ mthd = args->v0.method;
+ head = args->v0.head;
+ } else
+ if (nvif_unpack(args->v1, 1, 1, true)) {
+ nvif_ioctl(object, "disp mthd vers %d mthd %02x "
+ "type %04x mask %04x\n",
+ args->v1.version, args->v1.method,
+ args->v1.hasht, args->v1.hashm);
+ mthd = args->v1.method;
+ type = args->v1.hasht;
+ mask = args->v1.hashm;
+ head = ffs((mask >> 8) & 0x0f) - 1;
+ } else
+ return ret;
+
+ if (head < 0 || head >= disp->base.head.nr)
+ return -ENXIO;
+
+ if (mask) {
+ list_for_each_entry(temp, &disp->base.outp, head) {
+ if ((temp->info.hasht == type) &&
+ (temp->info.hashm & mask) == mask) {
+ outp = temp;
+ break;
+ }
+ }
+ if (outp == NULL)
+ return -ENXIO;
+ }
+
+ switch (mthd) {
+ case NV50_DISP_SCANOUTPOS:
+ return func->head.scanoutpos(object, disp, data, size, head);
+ default:
+ break;
+ }
+
+ switch (mthd * !!outp) {
+ case NV50_DISP_MTHD_V1_DAC_PWR:
+ return func->dac.power(object, disp, data, size, head, outp);
+ case NV50_DISP_MTHD_V1_DAC_LOAD:
+ return func->dac.sense(object, disp, data, size, head, outp);
+ case NV50_DISP_MTHD_V1_SOR_PWR:
+ return func->sor.power(object, disp, data, size, head, outp);
+ case NV50_DISP_MTHD_V1_SOR_HDA_ELD:
+ if (!func->sor.hda_eld)
+ return -ENODEV;
+ return func->sor.hda_eld(object, disp, data, size, head, outp);
+ case NV50_DISP_MTHD_V1_SOR_HDMI_PWR:
+ if (!func->sor.hdmi)
+ return -ENODEV;
+ return func->sor.hdmi(object, disp, data, size, head, outp);
+ case NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT: {
+ union {
+ struct nv50_disp_sor_lvds_script_v0 v0;
+ } *args = data;
+ nvif_ioctl(object, "disp sor lvds script size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(object, "disp sor lvds script "
+ "vers %d name %04x\n",
+ args->v0.version, args->v0.script);
+ disp->sor.lvdsconf = args->v0.script;
+ return 0;
+ } else
+ return ret;
+ }
+ break;
+ case NV50_DISP_MTHD_V1_SOR_DP_PWR: {
+ struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
+ union {
+ struct nv50_disp_sor_dp_pwr_v0 v0;
+ } *args = data;
+ nvif_ioctl(object, "disp sor dp pwr size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(object, "disp sor dp pwr vers %d state %d\n",
+ args->v0.version, args->v0.state);
+ if (args->v0.state == 0) {
+ nvkm_notify_put(&outpdp->irq);
+ outpdp->func->lnk_pwr(outpdp, 0);
+ atomic_set(&outpdp->lt.done, 0);
+ return 0;
+ } else
+ if (args->v0.state != 0) {
+ nvkm_output_dp_train(&outpdp->base, 0, true);
+ return 0;
+ }
+ } else
+ return ret;
+ }
+ break;
+ case NV50_DISP_MTHD_V1_PIOR_PWR:
+ if (!func->pior.power)
+ return -ENODEV;
+ return func->pior.power(object, disp, data, size, head, outp);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int
+nv50_disp_root_dmac_new_(const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ const struct nv50_disp_dmac_oclass *sclass = oclass->priv;
+ struct nv50_disp_root *root = nv50_disp_root(oclass->parent);
+ return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid,
+ oclass, data, size, pobject);
+}
+
+static int
+nv50_disp_root_pioc_new_(const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ const struct nv50_disp_pioc_oclass *sclass = oclass->priv;
+ struct nv50_disp_root *root = nv50_disp_root(oclass->parent);
+ return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid,
+ oclass, data, size, pobject);
+}
+
+static int
+nv50_disp_root_child_get_(struct nvkm_object *object, int index,
+ struct nvkm_oclass *sclass)
+{
+ struct nv50_disp_root *root = nv50_disp_root(object);
+
+ if (index < ARRAY_SIZE(root->func->dmac)) {
+ sclass->base = root->func->dmac[index]->base;
+ sclass->priv = root->func->dmac[index];
+ sclass->ctor = nv50_disp_root_dmac_new_;
+ return 0;
+ }
+
+ index -= ARRAY_SIZE(root->func->dmac);
+
+ if (index < ARRAY_SIZE(root->func->pioc)) {
+ sclass->base = root->func->pioc[index]->base;
+ sclass->priv = root->func->pioc[index];
+ sclass->ctor = nv50_disp_root_pioc_new_;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int
+nv50_disp_root_fini_(struct nvkm_object *object, bool suspend)
+{
+ struct nv50_disp_root *root = nv50_disp_root(object);
+ root->func->fini(root);
+ return 0;
+}
+
+static int
+nv50_disp_root_init_(struct nvkm_object *object)
+{
+ struct nv50_disp_root *root = nv50_disp_root(object);
+ return root->func->init(root);
+}
+
+static void *
+nv50_disp_root_dtor_(struct nvkm_object *object)
+{
+ struct nv50_disp_root *root = nv50_disp_root(object);
+ nvkm_ramht_del(&root->ramht);
+ nvkm_gpuobj_del(&root->instmem);
+ return root;
+}
+
+static const struct nvkm_object_func
+nv50_disp_root_ = {
+ .dtor = nv50_disp_root_dtor_,
+ .init = nv50_disp_root_init_,
+ .fini = nv50_disp_root_fini_,
+ .mthd = nv50_disp_root_mthd_,
+ .ntfy = nvkm_disp_ntfy,
+ .sclass = nv50_disp_root_child_get_,
+};
+
+int
+nv50_disp_root_new_(const struct nv50_disp_root_func *func,
+ struct nvkm_disp *base, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nv50_disp *disp = nv50_disp(base);
+ struct nv50_disp_root *root;
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ int ret;
+
+ if (!(root = kzalloc(sizeof(*root), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &root->object;
+
+ nvkm_object_ctor(&nv50_disp_root_, oclass, &root->object);
+ root->func = func;
+ root->disp = disp;
+
+ ret = nvkm_gpuobj_new(disp->base.engine.subdev.device, 0x10000, 0x10000,
+ false, NULL, &root->instmem);
+ if (ret)
+ return ret;
+
+ return nvkm_ramht_new(device, 0x1000, 0, root->instmem, &root->ramht);
+}
+
+void
+nv50_disp_root_fini(struct nv50_disp_root *root)
+{
+ struct nvkm_device *device = root->disp->base.engine.subdev.device;
+ /* disable all interrupts */
+ nvkm_wr32(device, 0x610024, 0x00000000);
+ nvkm_wr32(device, 0x610020, 0x00000000);
+}
+
+int
+nv50_disp_root_init(struct nv50_disp_root *root)
+{
+ struct nv50_disp *disp = root->disp;
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ u32 tmp;
+ int i;
+
+ /* The below segments of code copying values from one register to
+ * another appear to inform EVO of the display capabilities or
+ * something similar. NFI what the 0x614004 caps are for..
+ */
+ tmp = nvkm_rd32(device, 0x614004);
+ nvkm_wr32(device, 0x610184, tmp);
+
+ /* ... CRTC caps */
+ for (i = 0; i < disp->base.head.nr; i++) {
+ tmp = nvkm_rd32(device, 0x616100 + (i * 0x800));
+ nvkm_wr32(device, 0x610190 + (i * 0x10), tmp);
+ tmp = nvkm_rd32(device, 0x616104 + (i * 0x800));
+ nvkm_wr32(device, 0x610194 + (i * 0x10), tmp);
+ tmp = nvkm_rd32(device, 0x616108 + (i * 0x800));
+ nvkm_wr32(device, 0x610198 + (i * 0x10), tmp);
+ tmp = nvkm_rd32(device, 0x61610c + (i * 0x800));
+ nvkm_wr32(device, 0x61019c + (i * 0x10), tmp);
+ }
+
+ /* ... DAC caps */
+ for (i = 0; i < disp->func->dac.nr; i++) {
+ tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
+ nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp);
+ }
+
+ /* ... SOR caps */
+ for (i = 0; i < disp->func->sor.nr; i++) {
+ tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
+ nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp);
+ }
+
+ /* ... PIOR caps */
+ for (i = 0; i < disp->func->pior.nr; i++) {
+ tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800));
+ nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp);
+ }
+
+ /* steal display away from vbios, or something like that */
+ if (nvkm_rd32(device, 0x610024) & 0x00000100) {
+ nvkm_wr32(device, 0x610024, 0x00000100);
+ nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
+ break;
+ ) < 0)
+ return -EBUSY;
+ }
+
+ /* point at display engine memory area (hash table, objects) */
+ nvkm_wr32(device, 0x610010, (root->instmem->addr >> 8) | 9);
+
+ /* enable supervisor interrupts, disable everything else */
+ nvkm_wr32(device, 0x61002c, 0x00000370);
+ nvkm_wr32(device, 0x610028, 0x00000000);
+ return 0;
+}
+
+static const struct nv50_disp_root_func
+nv50_disp_root = {
+ .init = nv50_disp_root_init,
+ .fini = nv50_disp_root_fini,
+ .dmac = {
+ &nv50_disp_core_oclass,
+ &nv50_disp_base_oclass,
+ &nv50_disp_ovly_oclass,
+ },
+ .pioc = {
+ &nv50_disp_oimm_oclass,
+ &nv50_disp_curs_oclass,
+ },
+};
+
+static int
+nv50_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&nv50_disp_root, disp, oclass,
+ data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+nv50_disp_root_oclass = {
+ .base.oclass = NV50_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = nv50_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
new file mode 100644
index 000000000000..5b2c903ce9ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -0,0 +1,43 @@
+#ifndef __NV50_DISP_ROOT_H__
+#define __NV50_DISP_ROOT_H__
+#define nv50_disp_root(p) container_of((p), struct nv50_disp_root, object)
+#include "nv50.h"
+#include "channv50.h"
+#include "dmacnv50.h"
+
+struct nv50_disp_root {
+ const struct nv50_disp_root_func *func;
+ struct nv50_disp *disp;
+ struct nvkm_object object;
+
+ struct nvkm_gpuobj *instmem;
+ struct nvkm_ramht *ramht;
+};
+
+struct nv50_disp_root_func {
+ int (*init)(struct nv50_disp_root *);
+ void (*fini)(struct nv50_disp_root *);
+ const struct nv50_disp_dmac_oclass *dmac[3];
+ const struct nv50_disp_pioc_oclass *pioc[2];
+};
+
+int nv50_disp_root_new_(const struct nv50_disp_root_func *, struct nvkm_disp *,
+ const struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **);
+int nv50_disp_root_init(struct nv50_disp_root *);
+void nv50_disp_root_fini(struct nv50_disp_root *);
+
+int gf119_disp_root_init(struct nv50_disp_root *);
+void gf119_disp_root_fini(struct nv50_disp_root *);
+
+extern const struct nvkm_disp_oclass nv50_disp_root_oclass;
+extern const struct nvkm_disp_oclass g84_disp_root_oclass;
+extern const struct nvkm_disp_oclass g94_disp_root_oclass;
+extern const struct nvkm_disp_oclass gt200_disp_root_oclass;
+extern const struct nvkm_disp_oclass gt215_disp_root_oclass;
+extern const struct nvkm_disp_oclass gf119_disp_root_oclass;
+extern const struct nvkm_disp_oclass gk104_disp_root_oclass;
+extern const struct nvkm_disp_oclass gk110_disp_root_oclass;
+extern const struct nvkm_disp_oclass gm107_disp_root_oclass;
+extern const struct nvkm_disp_oclass gm204_disp_root_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
index 8918da7ffdf2..1bb9d661e9b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
@@ -24,7 +24,6 @@
#include "nv50.h"
#include "outpdp.h"
-#include <core/device.h>
#include <subdev/timer.h>
static inline u32
@@ -39,12 +38,33 @@ g94_sor_loff(struct nvkm_output_dp *outp)
return g94_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
}
-static inline u32
-g94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+/*******************************************************************************
+ * TMDS/LVDS
+ ******************************************************************************/
+static const struct nvkm_output_func
+g94_sor_output_func = {
+};
+
+int
+g94_sor_output_new(struct nvkm_disp *disp, int index,
+ struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+ return nvkm_output_new_(&g94_sor_output_func, disp,
+ index, dcbE, poutp);
+}
+
+/*******************************************************************************
+ * DisplayPort
+ ******************************************************************************/
+u32
+g94_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
{
+ static const u8 gm100[] = { 0, 8, 16, 24 };
static const u8 mcp89[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
- static const u8 g94[] = { 16, 8, 0, 24 };
- if (nv_device(priv)->chipset == 0xaf)
+ static const u8 g94[] = { 16, 8, 0, 24 };
+ if (device->chipset >= 0x110)
+ return gm100[lane];
+ if (device->chipset == 0xaf)
return mcp89[lane];
return g94[lane];
}
@@ -52,33 +72,36 @@ g94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
static int
g94_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
{
- struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+ struct nvkm_device *device = outp->base.disp->engine.subdev.device;
const u32 loff = g94_sor_loff(outp);
- nv_mask(priv, 0x61c10c + loff, 0x0f000000, pattern << 24);
+ nvkm_mask(device, 0x61c10c + loff, 0x0f000000, pattern << 24);
return 0;
}
int
g94_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
{
- struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+ struct nvkm_device *device = outp->base.disp->engine.subdev.device;
const u32 soff = g94_sor_soff(outp);
const u32 loff = g94_sor_loff(outp);
u32 mask = 0, i;
for (i = 0; i < nr; i++)
- mask |= 1 << (g94_sor_dp_lane_map(priv, i) >> 3);
-
- nv_mask(priv, 0x61c130 + loff, 0x0000000f, mask);
- nv_mask(priv, 0x61c034 + soff, 0x80000000, 0x80000000);
- nv_wait(priv, 0x61c034 + soff, 0x80000000, 0x00000000);
+ mask |= 1 << (g94_sor_dp_lane_map(device, i) >> 3);
+
+ nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask);
+ nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61c034 + soff) & 0x80000000))
+ break;
+ );
return 0;
}
static int
g94_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
{
- struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+ struct nvkm_device *device = outp->base.disp->engine.subdev.device;
const u32 soff = g94_sor_soff(outp);
const u32 loff = g94_sor_loff(outp);
u32 dpctrl = 0x00000000;
@@ -90,17 +113,17 @@ g94_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
if (bw > 0x06)
clksor |= 0x00040000;
- nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor);
- nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+ nvkm_mask(device, 0x614300 + soff, 0x000c0000, clksor);
+ nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl);
return 0;
}
static int
g94_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
{
- struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
- struct nvkm_bios *bios = nvkm_bios(priv);
- const u32 shift = g94_sor_dp_lane_map(priv, ln);
+ struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+ struct nvkm_bios *bios = device->bios;
+ const u32 shift = g94_sor_dp_lane_map(device, ln);
const u32 loff = g94_sor_loff(outp);
u32 addr, data[3];
u8 ver, hdr, cnt, len;
@@ -109,37 +132,37 @@ g94_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
addr = nvbios_dpout_match(bios, outp->base.info.hasht,
outp->base.info.hashm,
- &ver, &hdr, &cnt, &len, &info);
+ &ver, &hdr, &cnt, &len, &info);
if (!addr)
return -ENODEV;
addr = nvbios_dpcfg_match(bios, addr, 0, vs, pe,
- &ver, &hdr, &cnt, &len, &ocfg);
+ &ver, &hdr, &cnt, &len, &ocfg);
if (!addr)
return -EINVAL;
- data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
- data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
- data[2] = nv_rd32(priv, 0x61c130 + loff);
+ data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
+ data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
+ data[2] = nvkm_rd32(device, 0x61c130 + loff);
if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
- nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
- nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
- nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8));
+ nvkm_wr32(device, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
+ nvkm_wr32(device, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
+ nvkm_wr32(device, 0x61c130 + loff, data[2]);
return 0;
}
-struct nvkm_output_dp_impl
-g94_sor_dp_impl = {
- .base.base.handle = DCB_OUTPUT_DP,
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_output_dp_ctor,
- .dtor = _nvkm_output_dp_dtor,
- .init = _nvkm_output_dp_init,
- .fini = _nvkm_output_dp_fini,
- },
+static const struct nvkm_output_dp_func
+g94_sor_dp_func = {
.pattern = g94_sor_dp_pattern,
.lnk_pwr = g94_sor_dp_lnk_pwr,
.lnk_ctl = g94_sor_dp_lnk_ctl,
.drv_ctl = g94_sor_dp_drv_ctl,
};
+
+int
+g94_sor_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+ struct nvkm_output **poutp)
+{
+ return nvkm_output_dp_new_(&g94_sor_dp_func, disp, index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index 52fbe4880e13..b4b41b135643 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -25,39 +25,32 @@
#include "outpdp.h"
static inline u32
-gf110_sor_soff(struct nvkm_output_dp *outp)
+gf119_sor_soff(struct nvkm_output_dp *outp)
{
return (ffs(outp->base.info.or) - 1) * 0x800;
}
static inline u32
-gf110_sor_loff(struct nvkm_output_dp *outp)
+gf119_sor_loff(struct nvkm_output_dp *outp)
{
- return gf110_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
-}
-
-static inline u32
-gf110_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
-{
- static const u8 gf110[] = { 16, 8, 0, 24 };
- return gf110[lane];
+ return gf119_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
}
static int
-gf110_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
{
- struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
- const u32 loff = gf110_sor_loff(outp);
- nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
+ struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+ const u32 loff = gf119_sor_loff(outp);
+ nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
return 0;
}
int
-gf110_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
+gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
{
- struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
- const u32 soff = gf110_sor_soff(outp);
- const u32 loff = gf110_sor_loff(outp);
+ struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+ const u32 soff = gf119_sor_soff(outp);
+ const u32 loff = gf119_sor_loff(outp);
u32 dpctrl = 0x00000000;
u32 clksor = 0x00000000;
@@ -66,19 +59,19 @@ gf110_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
if (ef)
dpctrl |= 0x00004000;
- nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor);
- nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+ nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
+ nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl);
return 0;
}
static int
-gf110_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
+gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
int ln, int vs, int pe, int pc)
{
- struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
- struct nvkm_bios *bios = nvkm_bios(priv);
- const u32 shift = gf110_sor_dp_lane_map(priv, ln);
- const u32 loff = gf110_sor_loff(outp);
+ struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+ struct nvkm_bios *bios = device->bios;
+ const u32 shift = g94_sor_dp_lane_map(device, ln);
+ const u32 loff = gf119_sor_loff(outp);
u32 addr, data[4];
u8 ver, hdr, cnt, len;
struct nvbios_dpout info;
@@ -95,30 +88,30 @@ gf110_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
if (!addr)
return -EINVAL;
- data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
- data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
- data[2] = nv_rd32(priv, 0x61c130 + loff);
+ data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
+ data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
+ data[2] = nvkm_rd32(device, 0x61c130 + loff);
if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
- nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
- nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
- nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8));
- data[3] = nv_rd32(priv, 0x61c13c + loff) & ~(0x000000ff << shift);
- nv_wr32(priv, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
+ nvkm_wr32(device, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
+ nvkm_wr32(device, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
+ nvkm_wr32(device, 0x61c130 + loff, data[2]);
+ data[3] = nvkm_rd32(device, 0x61c13c + loff) & ~(0x000000ff << shift);
+ nvkm_wr32(device, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
return 0;
}
-struct nvkm_output_dp_impl
-gf110_sor_dp_impl = {
- .base.base.handle = DCB_OUTPUT_DP,
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_output_dp_ctor,
- .dtor = _nvkm_output_dp_dtor,
- .init = _nvkm_output_dp_init,
- .fini = _nvkm_output_dp_fini,
- },
- .pattern = gf110_sor_dp_pattern,
+static const struct nvkm_output_dp_func
+gf119_sor_dp_func = {
+ .pattern = gf119_sor_dp_pattern,
.lnk_pwr = g94_sor_dp_lnk_pwr,
- .lnk_ctl = gf110_sor_dp_lnk_ctl,
- .drv_ctl = gf110_sor_dp_drv_ctl,
+ .lnk_ctl = gf119_sor_dp_lnk_ctl,
+ .drv_ctl = gf119_sor_dp_drv_ctl,
};
+
+int
+gf119_sor_dp_new(struct nvkm_disp *disp, int index,
+ struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+ return nvkm_output_dp_new_(&gf119_sor_dp_func, disp, index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c
index 1e40dfe11319..029e5f16c2a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c
@@ -41,17 +41,17 @@ gm204_sor_loff(struct nvkm_output_dp *outp)
void
gm204_sor_magic(struct nvkm_output *outp)
{
- struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+ struct nvkm_device *device = outp->disp->engine.subdev.device;
const u32 soff = outp->or * 0x100;
const u32 data = outp->or + 1;
if (outp->info.sorconf.link & 1)
- nv_mask(priv, 0x612308 + soff, 0x0000001f, 0x00000000 | data);
+ nvkm_mask(device, 0x612308 + soff, 0x0000001f, 0x00000000 | data);
if (outp->info.sorconf.link & 2)
- nv_mask(priv, 0x612388 + soff, 0x0000001f, 0x00000010 | data);
+ nvkm_mask(device, 0x612388 + soff, 0x0000001f, 0x00000010 | data);
}
static inline u32
-gm204_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+gm204_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
{
return lane * 0x08;
}
@@ -59,30 +59,33 @@ gm204_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
static int
gm204_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
{
- struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+ struct nvkm_device *device = outp->base.disp->engine.subdev.device;
const u32 soff = gm204_sor_soff(outp);
const u32 data = 0x01010101 * pattern;
if (outp->base.info.sorconf.link & 1)
- nv_mask(priv, 0x61c110 + soff, 0x0f0f0f0f, data);
+ nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
else
- nv_mask(priv, 0x61c12c + soff, 0x0f0f0f0f, data);
+ nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
return 0;
}
static int
gm204_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
{
- struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+ struct nvkm_device *device = outp->base.disp->engine.subdev.device;
const u32 soff = gm204_sor_soff(outp);
const u32 loff = gm204_sor_loff(outp);
u32 mask = 0, i;
for (i = 0; i < nr; i++)
- mask |= 1 << (gm204_sor_dp_lane_map(priv, i) >> 3);
+ mask |= 1 << (gm204_sor_dp_lane_map(device, i) >> 3);
- nv_mask(priv, 0x61c130 + loff, 0x0000000f, mask);
- nv_mask(priv, 0x61c034 + soff, 0x80000000, 0x80000000);
- nv_wait(priv, 0x61c034 + soff, 0x80000000, 0x00000000);
+ nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask);
+ nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61c034 + soff) & 0x80000000))
+ break;
+ );
return 0;
}
@@ -90,9 +93,9 @@ static int
gm204_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
int ln, int vs, int pe, int pc)
{
- struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
- struct nvkm_bios *bios = nvkm_bios(priv);
- const u32 shift = gm204_sor_dp_lane_map(priv, ln);
+ struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+ struct nvkm_bios *bios = device->bios;
+ const u32 shift = gm204_sor_dp_lane_map(device, ln);
const u32 loff = gm204_sor_loff(outp);
u32 addr, data[4];
u8 ver, hdr, cnt, len;
@@ -109,31 +112,32 @@ gm204_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
&ver, &hdr, &cnt, &len, &ocfg);
if (!addr)
return -EINVAL;
+ ocfg.tx_pu &= 0x0f;
- data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
- data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
- data[2] = nv_rd32(priv, 0x61c130 + loff);
- if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
- data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
- nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
- nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
- nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8));
- data[3] = nv_rd32(priv, 0x61c13c + loff) & ~(0x000000ff << shift);
- nv_wr32(priv, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
+ data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
+ data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
+ data[2] = nvkm_rd32(device, 0x61c130 + loff);
+ if ((data[2] & 0x00000f00) < (ocfg.tx_pu << 8) || ln == 0)
+ data[2] = (data[2] & ~0x00000f00) | (ocfg.tx_pu << 8);
+ nvkm_wr32(device, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
+ nvkm_wr32(device, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
+ nvkm_wr32(device, 0x61c130 + loff, data[2]);
+ data[3] = nvkm_rd32(device, 0x61c13c + loff) & ~(0x000000ff << shift);
+ nvkm_wr32(device, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
return 0;
}
-struct nvkm_output_dp_impl
-gm204_sor_dp_impl = {
- .base.base.handle = DCB_OUTPUT_DP,
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_output_dp_ctor,
- .dtor = _nvkm_output_dp_dtor,
- .init = _nvkm_output_dp_init,
- .fini = _nvkm_output_dp_fini,
- },
+static const struct nvkm_output_dp_func
+gm204_sor_dp_func = {
.pattern = gm204_sor_dp_pattern,
.lnk_pwr = gm204_sor_dp_lnk_pwr,
- .lnk_ctl = gf110_sor_dp_lnk_ctl,
+ .lnk_ctl = gf119_sor_dp_lnk_ctl,
.drv_ctl = gm204_sor_dp_drv_ctl,
};
+
+int
+gm204_sor_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+ struct nvkm_output **poutp)
+{
+ return nvkm_output_dp_new_(&gm204_sor_dp_func, disp, index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
index b229a311c78c..29e0d2a9a839 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
@@ -33,6 +33,7 @@
int
nv50_sor_power(NV50_DISP_MTHD_V1)
{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
union {
struct nv50_disp_sor_pwr_v0 v0;
} *args = data;
@@ -40,17 +41,39 @@ nv50_sor_power(NV50_DISP_MTHD_V1)
u32 stat;
int ret;
- nv_ioctl(object, "disp sor pwr size %d\n", size);
+ nvif_ioctl(object, "disp sor pwr size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "disp sor pwr vers %d state %d\n",
- args->v0.version, args->v0.state);
+ nvif_ioctl(object, "disp sor pwr vers %d state %d\n",
+ args->v0.version, args->v0.state);
stat = !!args->v0.state;
} else
return ret;
- nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
- nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
- nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
- nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000);
+
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61c004 + soff) & 0x80000000))
+ break;
+ );
+ nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61c004 + soff) & 0x80000000))
+ break;
+ );
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
+ break;
+ );
return 0;
}
+
+static const struct nvkm_output_func
+nv50_sor_output_func = {
+};
+
+int
+nv50_sor_output_new(struct nvkm_disp *disp, int index,
+ struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+ return nvkm_output_new_(&nv50_sor_output_func, disp,
+ index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c
index c4622c7388d0..8bff95c6343f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c
@@ -23,131 +23,119 @@
*/
#include <subdev/vga.h>
-#include <core/device.h>
-
u8
-nv_rdport(void *obj, int head, u16 port)
+nvkm_rdport(struct nvkm_device *device, int head, u16 port)
{
- struct nvkm_device *device = nv_device(obj);
-
if (device->card_type >= NV_50)
- return nv_rd08(obj, 0x601000 + port);
+ return nvkm_rd08(device, 0x601000 + port);
if (port == 0x03c0 || port == 0x03c1 || /* AR */
port == 0x03c2 || port == 0x03da || /* INP0 */
port == 0x03d4 || port == 0x03d5) /* CR */
- return nv_rd08(obj, 0x601000 + (head * 0x2000) + port);
+ return nvkm_rd08(device, 0x601000 + (head * 0x2000) + port);
if (port == 0x03c2 || port == 0x03cc || /* MISC */
port == 0x03c4 || port == 0x03c5 || /* SR */
port == 0x03ce || port == 0x03cf) { /* GR */
if (device->card_type < NV_40)
head = 0; /* CR44 selects head */
- return nv_rd08(obj, 0x0c0000 + (head * 0x2000) + port);
+ return nvkm_rd08(device, 0x0c0000 + (head * 0x2000) + port);
}
- nv_error(obj, "unknown vga port 0x%04x\n", port);
return 0x00;
}
void
-nv_wrport(void *obj, int head, u16 port, u8 data)
+nvkm_wrport(struct nvkm_device *device, int head, u16 port, u8 data)
{
- struct nvkm_device *device = nv_device(obj);
-
if (device->card_type >= NV_50)
- nv_wr08(obj, 0x601000 + port, data);
+ nvkm_wr08(device, 0x601000 + port, data);
else
if (port == 0x03c0 || port == 0x03c1 || /* AR */
port == 0x03c2 || port == 0x03da || /* INP0 */
port == 0x03d4 || port == 0x03d5) /* CR */
- nv_wr08(obj, 0x601000 + (head * 0x2000) + port, data);
+ nvkm_wr08(device, 0x601000 + (head * 0x2000) + port, data);
else
if (port == 0x03c2 || port == 0x03cc || /* MISC */
port == 0x03c4 || port == 0x03c5 || /* SR */
port == 0x03ce || port == 0x03cf) { /* GR */
if (device->card_type < NV_40)
head = 0; /* CR44 selects head */
- nv_wr08(obj, 0x0c0000 + (head * 0x2000) + port, data);
- } else
- nv_error(obj, "unknown vga port 0x%04x\n", port);
+ nvkm_wr08(device, 0x0c0000 + (head * 0x2000) + port, data);
+ }
}
u8
-nv_rdvgas(void *obj, int head, u8 index)
+nvkm_rdvgas(struct nvkm_device *device, int head, u8 index)
{
- nv_wrport(obj, head, 0x03c4, index);
- return nv_rdport(obj, head, 0x03c5);
+ nvkm_wrport(device, head, 0x03c4, index);
+ return nvkm_rdport(device, head, 0x03c5);
}
void
-nv_wrvgas(void *obj, int head, u8 index, u8 value)
+nvkm_wrvgas(struct nvkm_device *device, int head, u8 index, u8 value)
{
- nv_wrport(obj, head, 0x03c4, index);
- nv_wrport(obj, head, 0x03c5, value);
+ nvkm_wrport(device, head, 0x03c4, index);
+ nvkm_wrport(device, head, 0x03c5, value);
}
u8
-nv_rdvgag(void *obj, int head, u8 index)
+nvkm_rdvgag(struct nvkm_device *device, int head, u8 index)
{
- nv_wrport(obj, head, 0x03ce, index);
- return nv_rdport(obj, head, 0x03cf);
+ nvkm_wrport(device, head, 0x03ce, index);
+ return nvkm_rdport(device, head, 0x03cf);
}
void
-nv_wrvgag(void *obj, int head, u8 index, u8 value)
+nvkm_wrvgag(struct nvkm_device *device, int head, u8 index, u8 value)
{
- nv_wrport(obj, head, 0x03ce, index);
- nv_wrport(obj, head, 0x03cf, value);
+ nvkm_wrport(device, head, 0x03ce, index);
+ nvkm_wrport(device, head, 0x03cf, value);
}
u8
-nv_rdvgac(void *obj, int head, u8 index)
+nvkm_rdvgac(struct nvkm_device *device, int head, u8 index)
{
- nv_wrport(obj, head, 0x03d4, index);
- return nv_rdport(obj, head, 0x03d5);
+ nvkm_wrport(device, head, 0x03d4, index);
+ return nvkm_rdport(device, head, 0x03d5);
}
void
-nv_wrvgac(void *obj, int head, u8 index, u8 value)
+nvkm_wrvgac(struct nvkm_device *device, int head, u8 index, u8 value)
{
- nv_wrport(obj, head, 0x03d4, index);
- nv_wrport(obj, head, 0x03d5, value);
+ nvkm_wrport(device, head, 0x03d4, index);
+ nvkm_wrport(device, head, 0x03d5, value);
}
u8
-nv_rdvgai(void *obj, int head, u16 port, u8 index)
+nvkm_rdvgai(struct nvkm_device *device, int head, u16 port, u8 index)
{
- if (port == 0x03c4) return nv_rdvgas(obj, head, index);
- if (port == 0x03ce) return nv_rdvgag(obj, head, index);
- if (port == 0x03d4) return nv_rdvgac(obj, head, index);
- nv_error(obj, "unknown indexed vga port 0x%04x\n", port);
+ if (port == 0x03c4) return nvkm_rdvgas(device, head, index);
+ if (port == 0x03ce) return nvkm_rdvgag(device, head, index);
+ if (port == 0x03d4) return nvkm_rdvgac(device, head, index);
return 0x00;
}
void
-nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value)
+nvkm_wrvgai(struct nvkm_device *device, int head, u16 port, u8 index, u8 value)
{
- if (port == 0x03c4) nv_wrvgas(obj, head, index, value);
- else if (port == 0x03ce) nv_wrvgag(obj, head, index, value);
- else if (port == 0x03d4) nv_wrvgac(obj, head, index, value);
- else nv_error(obj, "unknown indexed vga port 0x%04x\n", port);
+ if (port == 0x03c4) nvkm_wrvgas(device, head, index, value);
+ else if (port == 0x03ce) nvkm_wrvgag(device, head, index, value);
+ else if (port == 0x03d4) nvkm_wrvgac(device, head, index, value);
}
bool
-nv_lockvgac(void *obj, bool lock)
+nvkm_lockvgac(struct nvkm_device *device, bool lock)
{
- struct nvkm_device *dev = nv_device(obj);
-
- bool locked = !nv_rdvgac(obj, 0, 0x1f);
+ bool locked = !nvkm_rdvgac(device, 0, 0x1f);
u8 data = lock ? 0x99 : 0x57;
- if (dev->card_type < NV_50)
- nv_wrvgac(obj, 0, 0x1f, data);
+ if (device->card_type < NV_50)
+ nvkm_wrvgac(device, 0, 0x1f, data);
else
- nv_wrvgac(obj, 0, 0x3f, data);
- if (dev->chipset == 0x11) {
- if (!(nv_rd32(obj, 0x001084) & 0x10000000))
- nv_wrvgac(obj, 1, 0x1f, data);
+ nvkm_wrvgac(device, 0, 0x3f, data);
+ if (device->chipset == 0x11) {
+ if (!(nvkm_rd32(device, 0x001084) & 0x10000000))
+ nvkm_wrvgac(device, 1, 0x1f, data);
}
return locked;
}
@@ -171,16 +159,16 @@ nv_lockvgac(void *obj, bool lock)
* other values are treated as literal values to set
*/
u8
-nv_rdvgaowner(void *obj)
+nvkm_rdvgaowner(struct nvkm_device *device)
{
- if (nv_device(obj)->card_type < NV_50) {
- if (nv_device(obj)->chipset == 0x11) {
- u32 tied = nv_rd32(obj, 0x001084) & 0x10000000;
+ if (device->card_type < NV_50) {
+ if (device->chipset == 0x11) {
+ u32 tied = nvkm_rd32(device, 0x001084) & 0x10000000;
if (tied == 0) {
- u8 slA = nv_rdvgac(obj, 0, 0x28) & 0x80;
- u8 tvA = nv_rdvgac(obj, 0, 0x33) & 0x01;
- u8 slB = nv_rdvgac(obj, 1, 0x28) & 0x80;
- u8 tvB = nv_rdvgac(obj, 1, 0x33) & 0x01;
+ u8 slA = nvkm_rdvgac(device, 0, 0x28) & 0x80;
+ u8 tvA = nvkm_rdvgac(device, 0, 0x33) & 0x01;
+ u8 slB = nvkm_rdvgac(device, 1, 0x28) & 0x80;
+ u8 tvB = nvkm_rdvgac(device, 1, 0x33) & 0x01;
if (slA && !tvA) return 0x00;
if (slB && !tvB) return 0x03;
if (slA) return 0x00;
@@ -190,30 +178,28 @@ nv_rdvgaowner(void *obj)
return 0x04;
}
- return nv_rdvgac(obj, 0, 0x44);
+ return nvkm_rdvgac(device, 0, 0x44);
}
- nv_error(obj, "rdvgaowner after nv4x\n");
return 0x00;
}
void
-nv_wrvgaowner(void *obj, u8 select)
+nvkm_wrvgaowner(struct nvkm_device *device, u8 select)
{
- if (nv_device(obj)->card_type < NV_50) {
+ if (device->card_type < NV_50) {
u8 owner = (select == 1) ? 3 : select;
- if (nv_device(obj)->chipset == 0x11) {
+ if (device->chipset == 0x11) {
/* workaround hw lockup bug */
- nv_rdvgac(obj, 0, 0x1f);
- nv_rdvgac(obj, 1, 0x1f);
+ nvkm_rdvgac(device, 0, 0x1f);
+ nvkm_rdvgac(device, 1, 0x1f);
}
- nv_wrvgac(obj, 0, 0x44, owner);
+ nvkm_wrvgac(device, 0, 0x44, owner);
- if (nv_device(obj)->chipset == 0x11) {
- nv_wrvgac(obj, 0, 0x2e, owner);
- nv_wrvgac(obj, 0, 0x2e, owner);
+ if (device->chipset == 0x11) {
+ nvkm_wrvgac(device, 0, 0x2e, owner);
+ nvkm_wrvgac(device, 0, 0x2e, owner);
}
- } else
- nv_error(obj, "wrvgaowner after nv4x\n");
+ }
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild
new file mode 100644
index 000000000000..c4a2ce9b0d71
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild
@@ -0,0 +1,11 @@
+nvkm-y += nvkm/engine/dma/base.o
+nvkm-y += nvkm/engine/dma/nv04.o
+nvkm-y += nvkm/engine/dma/nv50.o
+nvkm-y += nvkm/engine/dma/gf100.o
+nvkm-y += nvkm/engine/dma/gf119.o
+
+nvkm-y += nvkm/engine/dma/user.o
+nvkm-y += nvkm/engine/dma/usernv04.o
+nvkm-y += nvkm/engine/dma/usernv50.o
+nvkm-y += nvkm/engine/dma/usergf100.o
+nvkm-y += nvkm/engine/dma/usergf119.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
new file mode 100644
index 000000000000..9769fc0d5351
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+#include <core/client.h>
+#include <engine/fifo.h>
+
+#include <nvif/class.h>
+
+struct nvkm_dmaobj *
+nvkm_dma_search(struct nvkm_dma *dma, struct nvkm_client *client, u64 object)
+{
+ struct rb_node *node = client->dmaroot.rb_node;
+ while (node) {
+ struct nvkm_dmaobj *dmaobj =
+ container_of(node, typeof(*dmaobj), rb);
+ if (object < dmaobj->handle)
+ node = node->rb_left;
+ else
+ if (object > dmaobj->handle)
+ node = node->rb_right;
+ else
+ return dmaobj;
+ }
+ return NULL;
+}
+
+static int
+nvkm_dma_oclass_new(struct nvkm_device *device,
+ const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_dma *dma = nvkm_dma(oclass->engine);
+ struct nvkm_dmaobj *dmaobj = NULL;
+ struct nvkm_client *client = oclass->client;
+ struct rb_node **ptr = &client->dmaroot.rb_node;
+ struct rb_node *parent = NULL;
+ int ret;
+
+ ret = dma->func->class_new(dma, oclass, data, size, &dmaobj);
+ if (dmaobj)
+ *pobject = &dmaobj->object;
+ if (ret)
+ return ret;
+
+ dmaobj->handle = oclass->object;
+
+ while (*ptr) {
+ struct nvkm_dmaobj *obj = container_of(*ptr, typeof(*obj), rb);
+ parent = *ptr;
+ if (dmaobj->handle < obj->handle)
+ ptr = &parent->rb_left;
+ else
+ if (dmaobj->handle > obj->handle)
+ ptr = &parent->rb_right;
+ else
+ return -EEXIST;
+ }
+
+ rb_link_node(&dmaobj->rb, parent, ptr);
+ rb_insert_color(&dmaobj->rb, &client->dmaroot);
+ return 0;
+}
+
+static const struct nvkm_device_oclass
+nvkm_dma_oclass_base = {
+ .ctor = nvkm_dma_oclass_new,
+};
+
+static int
+nvkm_dma_oclass_fifo_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ return nvkm_dma_oclass_new(oclass->engine->subdev.device,
+ oclass, data, size, pobject);
+}
+
+static const struct nvkm_sclass
+nvkm_dma_sclass[] = {
+ { 0, 0, NV_DMA_FROM_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
+ { 0, 0, NV_DMA_TO_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
+ { 0, 0, NV_DMA_IN_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
+};
+
+static int
+nvkm_dma_oclass_base_get(struct nvkm_oclass *sclass, int index,
+ const struct nvkm_device_oclass **class)
+{
+ const int count = ARRAY_SIZE(nvkm_dma_sclass);
+ if (index < count) {
+ const struct nvkm_sclass *oclass = &nvkm_dma_sclass[index];
+ sclass->base = oclass[0];
+ sclass->engn = oclass;
+ *class = &nvkm_dma_oclass_base;
+ return index;
+ }
+ return count;
+}
+
+static int
+nvkm_dma_oclass_fifo_get(struct nvkm_oclass *oclass, int index)
+{
+ const int count = ARRAY_SIZE(nvkm_dma_sclass);
+ if (index < count) {
+ oclass->base = nvkm_dma_sclass[index];
+ return index;
+ }
+ return count;
+}
+
+static void *
+nvkm_dma_dtor(struct nvkm_engine *engine)
+{
+ return nvkm_dma(engine);
+}
+
+static const struct nvkm_engine_func
+nvkm_dma = {
+ .dtor = nvkm_dma_dtor,
+ .base.sclass = nvkm_dma_oclass_base_get,
+ .fifo.sclass = nvkm_dma_oclass_fifo_get,
+};
+
+int
+nvkm_dma_new_(const struct nvkm_dma_func *func, struct nvkm_device *device,
+ int index, struct nvkm_dma **pdma)
+{
+ struct nvkm_dma *dma;
+
+ if (!(dma = *pdma = kzalloc(sizeof(*dma), GFP_KERNEL)))
+ return -ENOMEM;
+ dma->func = func;
+
+ return nvkm_engine_ctor(&nvkm_dma, device, index,
+ 0, true, &dma->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c
new file mode 100644
index 000000000000..efec5d322179
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "user.h"
+
+static const struct nvkm_dma_func
+gf100_dma = {
+ .class_new = gf100_dmaobj_new,
+};
+
+int
+gf100_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+{
+ return nvkm_dma_new_(&gf100_dma, device, index, pdma);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c
new file mode 100644
index 000000000000..34c766039aed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "user.h"
+
+static const struct nvkm_dma_func
+gf119_dma = {
+ .class_new = gf119_dmaobj_new,
+};
+
+int
+gf119_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+{
+ return nvkm_dma_new_(&gf119_dma, device, index, pdma);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c
new file mode 100644
index 000000000000..30747a0ce488
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "user.h"
+
+static const struct nvkm_dma_func
+nv04_dma = {
+ .class_new = nv04_dmaobj_new,
+};
+
+int
+nv04_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+{
+ return nvkm_dma_new_(&nv04_dma, device, index, pdma);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c
new file mode 100644
index 000000000000..77aca7b71c83
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "user.h"
+
+static const struct nvkm_dma_func
+nv50_dma = {
+ .class_new = nv50_dmaobj_new,
+};
+
+int
+nv50_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+{
+ return nvkm_dma_new_(&nv50_dma, device, index, pdma);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
new file mode 100644
index 000000000000..deb37ee55c0b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
@@ -0,0 +1,18 @@
+#ifndef __NVKM_DMA_PRIV_H__
+#define __NVKM_DMA_PRIV_H__
+#define nvkm_dma(p) container_of((p), struct nvkm_dma, engine)
+#include <engine/dma.h>
+
+struct nvkm_dmaobj_func {
+ int (*bind)(struct nvkm_dmaobj *, struct nvkm_gpuobj *, int align,
+ struct nvkm_gpuobj **);
+};
+
+int nvkm_dma_new_(const struct nvkm_dma_func *, struct nvkm_device *,
+ int index, struct nvkm_dma **);
+
+struct nvkm_dma_func {
+ int (*class_new)(struct nvkm_dma *, const struct nvkm_oclass *,
+ void *data, u32 size, struct nvkm_dmaobj **);
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
index a2b60d86baba..45ab062661a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
@@ -21,10 +21,10 @@
*
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "user.h"
#include <core/client.h>
-#include <core/device.h>
+#include <core/gpuobj.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
@@ -32,56 +32,56 @@
#include <nvif/unpack.h>
static int
-nvkm_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
- struct nvkm_gpuobj **pgpuobj)
+nvkm_dmaobj_bind(struct nvkm_object *base, struct nvkm_gpuobj *gpuobj,
+ int align, struct nvkm_gpuobj **pgpuobj)
{
- const struct nvkm_dmaeng_impl *impl = (void *)
- nv_oclass(nv_object(dmaobj)->engine);
- int ret = 0;
-
- if (nv_object(dmaobj) == parent) { /* ctor bind */
- if (nv_mclass(parent->parent) == NV_DEVICE) {
- /* delayed, or no, binding */
- return 0;
- }
- ret = impl->bind(dmaobj, parent, pgpuobj);
- if (ret == 0)
- nvkm_object_ref(NULL, &parent);
- return ret;
- }
+ struct nvkm_dmaobj *dmaobj = nvkm_dmaobj(base);
+ return dmaobj->func->bind(dmaobj, gpuobj, align, pgpuobj);
+}
- return impl->bind(dmaobj, parent, pgpuobj);
+static void *
+nvkm_dmaobj_dtor(struct nvkm_object *base)
+{
+ struct nvkm_dmaobj *dmaobj = nvkm_dmaobj(base);
+ if (!RB_EMPTY_NODE(&dmaobj->rb))
+ rb_erase(&dmaobj->rb, &dmaobj->object.client->dmaroot);
+ return dmaobj;
}
+static const struct nvkm_object_func
+nvkm_dmaobj_func = {
+ .dtor = nvkm_dmaobj_dtor,
+ .bind = nvkm_dmaobj_bind,
+};
+
int
-nvkm_dmaobj_create_(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void **pdata, u32 *psize,
- int length, void **pobject)
+nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
+ const struct nvkm_oclass *oclass, void **pdata, u32 *psize,
+ struct nvkm_dmaobj *dmaobj)
{
union {
struct nv_dma_v0 v0;
} *args = *pdata;
- struct nvkm_instmem *instmem = nvkm_instmem(parent);
- struct nvkm_client *client = nvkm_client(parent);
- struct nvkm_device *device = nv_device(parent);
- struct nvkm_fb *pfb = nvkm_fb(parent);
- struct nvkm_dmaobj *dmaobj;
+ struct nvkm_device *device = dma->engine.subdev.device;
+ struct nvkm_client *client = oclass->client;
+ struct nvkm_object *parent = oclass->parent;
+ struct nvkm_instmem *instmem = device->imem;
+ struct nvkm_fb *fb = device->fb;
void *data = *pdata;
u32 size = *psize;
int ret;
- ret = nvkm_object_create_(parent, engine, oclass, 0, length, pobject);
- dmaobj = *pobject;
- if (ret)
- return ret;
+ nvkm_object_ctor(&nvkm_dmaobj_func, oclass, &dmaobj->object);
+ dmaobj->func = func;
+ dmaobj->dma = dma;
+ RB_CLEAR_NODE(&dmaobj->rb);
- nv_ioctl(parent, "create dma size %d\n", *psize);
+ nvif_ioctl(parent, "create dma size %d\n", *psize);
if (nvif_unpack(args->v0, 0, 0, true)) {
- nv_ioctl(parent, "create dma vers %d target %d access %d "
- "start %016llx limit %016llx\n",
- args->v0.version, args->v0.target, args->v0.access,
- args->v0.start, args->v0.limit);
+ nvif_ioctl(parent, "create dma vers %d target %d access %d "
+ "start %016llx limit %016llx\n",
+ args->v0.version, args->v0.target, args->v0.access,
+ args->v0.start, args->v0.limit);
dmaobj->target = args->v0.target;
dmaobj->access = args->v0.access;
dmaobj->start = args->v0.start;
@@ -101,7 +101,7 @@ nvkm_dmaobj_create_(struct nvkm_object *parent,
break;
case NV_DMA_V0_TARGET_VRAM:
if (!client->super) {
- if (dmaobj->limit >= pfb->ram->size - instmem->reserved)
+ if (dmaobj->limit >= fb->ram->size - instmem->reserved)
return -EACCES;
if (device->card_type >= NV_50)
return -EACCES;
@@ -142,23 +142,3 @@ nvkm_dmaobj_create_(struct nvkm_object *parent,
return ret;
}
-
-int
-_nvkm_dmaeng_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- const struct nvkm_dmaeng_impl *impl = (void *)oclass;
- struct nvkm_dmaeng *dmaeng;
- int ret;
-
- ret = nvkm_engine_create(parent, engine, oclass, true, "DMAOBJ",
- "dmaobj", &dmaeng);
- *pobject = nv_object(dmaeng);
- if (ret)
- return ret;
-
- nv_engine(dmaeng)->sclass = impl->sclass;
- dmaeng->bind = nvkm_dmaobj_bind;
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
new file mode 100644
index 000000000000..69a7f1034024
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
@@ -0,0 +1,18 @@
+#ifndef __NVKM_DMA_USER_H__
+#define __NVKM_DMA_USER_H__
+#define nvkm_dmaobj(p) container_of((p), struct nvkm_dmaobj, object)
+#include "priv.h"
+
+int nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *, struct nvkm_dma *,
+ const struct nvkm_oclass *, void **data, u32 *size,
+ struct nvkm_dmaobj *);
+
+int nv04_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
+ struct nvkm_dmaobj **);
+int nv50_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
+ struct nvkm_dmaobj **);
+int gf100_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
+ struct nvkm_dmaobj **);
+int gf119_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
+ struct nvkm_dmaobj **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c
new file mode 100644
index 000000000000..13e341cc4e32
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define gf100_dmaobj(p) container_of((p), struct gf100_dmaobj, base)
+#include "user.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+struct gf100_dmaobj {
+ struct nvkm_dmaobj base;
+ u32 flags0;
+ u32 flags5;
+};
+
+static int
+gf100_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
+ int align, struct nvkm_gpuobj **pgpuobj)
+{
+ struct gf100_dmaobj *dmaobj = gf100_dmaobj(base);
+ struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
+ int ret;
+
+ ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
+ if (ret == 0) {
+ nvkm_kmap(*pgpuobj);
+ nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
+ nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->base.limit));
+ nvkm_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start));
+ nvkm_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->base.limit) << 24 |
+ upper_32_bits(dmaobj->base.start));
+ nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
+ nvkm_wo32(*pgpuobj, 0x14, dmaobj->flags5);
+ nvkm_done(*pgpuobj);
+ }
+
+ return ret;
+}
+
+static const struct nvkm_dmaobj_func
+gf100_dmaobj_func = {
+ .bind = gf100_dmaobj_bind,
+};
+
+int
+gf100_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
+{
+ union {
+ struct gf100_dma_v0 v0;
+ } *args;
+ struct nvkm_object *parent = oclass->parent;
+ struct gf100_dmaobj *dmaobj;
+ u32 kind, user, unkn;
+ int ret;
+
+ if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
+ return -ENOMEM;
+ *pdmaobj = &dmaobj->base;
+
+ ret = nvkm_dmaobj_ctor(&gf100_dmaobj_func, dma, oclass,
+ &data, &size, &dmaobj->base);
+ if (ret)
+ return ret;
+
+ args = data;
+
+ nvif_ioctl(parent, "create gf100 dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(parent,
+ "create gf100 dma vers %d priv %d kind %02x\n",
+ args->v0.version, args->v0.priv, args->v0.kind);
+ kind = args->v0.kind;
+ user = args->v0.priv;
+ unkn = 0;
+ } else
+ if (size == 0) {
+ if (dmaobj->base.target != NV_MEM_TARGET_VM) {
+ kind = GF100_DMA_V0_KIND_PITCH;
+ user = GF100_DMA_V0_PRIV_US;
+ unkn = 2;
+ } else {
+ kind = GF100_DMA_V0_KIND_VM;
+ user = GF100_DMA_V0_PRIV_VM;
+ unkn = 0;
+ }
+ } else
+ return ret;
+
+ if (user > 2)
+ return -EINVAL;
+ dmaobj->flags0 |= (kind << 22) | (user << 20) | oclass->base.oclass;
+ dmaobj->flags5 |= (unkn << 16);
+
+ switch (dmaobj->base.target) {
+ case NV_MEM_TARGET_VM:
+ dmaobj->flags0 |= 0x00000000;
+ break;
+ case NV_MEM_TARGET_VRAM:
+ dmaobj->flags0 |= 0x00010000;
+ break;
+ case NV_MEM_TARGET_PCI:
+ dmaobj->flags0 |= 0x00020000;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ dmaobj->flags0 |= 0x00030000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dmaobj->base.access) {
+ case NV_MEM_ACCESS_VM:
+ break;
+ case NV_MEM_ACCESS_RO:
+ dmaobj->flags0 |= 0x00040000;
+ break;
+ case NV_MEM_ACCESS_WO:
+ case NV_MEM_ACCESS_RW:
+ dmaobj->flags0 |= 0x00080000;
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c
new file mode 100644
index 000000000000..0e1af8b4db84
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define gf119_dmaobj(p) container_of((p), struct gf119_dmaobj, base)
+#include "user.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+struct gf119_dmaobj {
+ struct nvkm_dmaobj base;
+ u32 flags0;
+};
+
+static int
+gf119_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
+ int align, struct nvkm_gpuobj **pgpuobj)
+{
+ struct gf119_dmaobj *dmaobj = gf119_dmaobj(base);
+ struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
+ int ret;
+
+ ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
+ if (ret == 0) {
+ nvkm_kmap(*pgpuobj);
+ nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
+ nvkm_wo32(*pgpuobj, 0x04, dmaobj->base.start >> 8);
+ nvkm_wo32(*pgpuobj, 0x08, dmaobj->base.limit >> 8);
+ nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+ nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
+ nvkm_wo32(*pgpuobj, 0x14, 0x00000000);
+ nvkm_done(*pgpuobj);
+ }
+
+ return ret;
+}
+
+static const struct nvkm_dmaobj_func
+gf119_dmaobj_func = {
+ .bind = gf119_dmaobj_bind,
+};
+
+int
+gf119_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
+{
+ union {
+ struct gf119_dma_v0 v0;
+ } *args;
+ struct nvkm_object *parent = oclass->parent;
+ struct gf119_dmaobj *dmaobj;
+ u32 kind, page;
+ int ret;
+
+ if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
+ return -ENOMEM;
+ *pdmaobj = &dmaobj->base;
+
+ ret = nvkm_dmaobj_ctor(&gf119_dmaobj_func, dma, oclass,
+ &data, &size, &dmaobj->base);
+ if (ret)
+ return ret;
+
+ args = data;
+
+ nvif_ioctl(parent, "create gf119 dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(parent,
+ "create gf100 dma vers %d page %d kind %02x\n",
+ args->v0.version, args->v0.page, args->v0.kind);
+ kind = args->v0.kind;
+ page = args->v0.page;
+ } else
+ if (size == 0) {
+ if (dmaobj->base.target != NV_MEM_TARGET_VM) {
+ kind = GF119_DMA_V0_KIND_PITCH;
+ page = GF119_DMA_V0_PAGE_SP;
+ } else {
+ kind = GF119_DMA_V0_KIND_VM;
+ page = GF119_DMA_V0_PAGE_LP;
+ }
+ } else
+ return ret;
+
+ if (page > 1)
+ return -EINVAL;
+ dmaobj->flags0 = (kind << 20) | (page << 6);
+
+ switch (dmaobj->base.target) {
+ case NV_MEM_TARGET_VRAM:
+ dmaobj->flags0 |= 0x00000009;
+ break;
+ case NV_MEM_TARGET_VM:
+ case NV_MEM_TARGET_PCI:
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ /* XXX: don't currently know how to construct a real one
+ * of these. we only use them to represent pushbufs
+ * on these chipsets, and the classes that use them
+ * deal with the target themselves.
+ */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
new file mode 100644
index 000000000000..c95942ef8216
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define nv04_dmaobj(p) container_of((p), struct nv04_dmaobj, base)
+#include "user.h"
+
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+#include <subdev/mmu/nv04.h>
+
+#include <nvif/class.h>
+
+struct nv04_dmaobj {
+ struct nvkm_dmaobj base;
+ bool clone;
+ u32 flags0;
+ u32 flags2;
+};
+
+static int
+nv04_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
+ int align, struct nvkm_gpuobj **pgpuobj)
+{
+ struct nv04_dmaobj *dmaobj = nv04_dmaobj(base);
+ struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
+ u64 offset = dmaobj->base.start & 0xfffff000;
+ u64 adjust = dmaobj->base.start & 0x00000fff;
+ u32 length = dmaobj->base.limit - dmaobj->base.start;
+ int ret;
+
+ if (dmaobj->clone) {
+ struct nv04_mmu *mmu = nv04_mmu(device->mmu);
+ struct nvkm_memory *pgt = mmu->vm->pgt[0].mem[0];
+ if (!dmaobj->base.start)
+ return nvkm_gpuobj_wrap(pgt, pgpuobj);
+ nvkm_kmap(pgt);
+ offset = nvkm_ro32(pgt, 8 + (offset >> 10));
+ offset &= 0xfffff000;
+ nvkm_done(pgt);
+ }
+
+ ret = nvkm_gpuobj_new(device, 16, align, false, parent, pgpuobj);
+ if (ret == 0) {
+ nvkm_kmap(*pgpuobj);
+ nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | (adjust << 20));
+ nvkm_wo32(*pgpuobj, 0x04, length);
+ nvkm_wo32(*pgpuobj, 0x08, dmaobj->flags2 | offset);
+ nvkm_wo32(*pgpuobj, 0x0c, dmaobj->flags2 | offset);
+ nvkm_done(*pgpuobj);
+ }
+
+ return ret;
+}
+
+static const struct nvkm_dmaobj_func
+nv04_dmaobj_func = {
+ .bind = nv04_dmaobj_bind,
+};
+
+int
+nv04_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
+{
+ struct nvkm_device *device = dma->engine.subdev.device;
+ struct nv04_dmaobj *dmaobj;
+ int ret;
+
+ if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
+ return -ENOMEM;
+ *pdmaobj = &dmaobj->base;
+
+ ret = nvkm_dmaobj_ctor(&nv04_dmaobj_func, dma, oclass,
+ &data, &size, &dmaobj->base);
+ if (ret)
+ return ret;
+
+ if (dmaobj->base.target == NV_MEM_TARGET_VM) {
+ if (device->mmu->func == &nv04_mmu)
+ dmaobj->clone = true;
+ dmaobj->base.target = NV_MEM_TARGET_PCI;
+ dmaobj->base.access = NV_MEM_ACCESS_RW;
+ }
+
+ dmaobj->flags0 = oclass->base.oclass;
+ switch (dmaobj->base.target) {
+ case NV_MEM_TARGET_VRAM:
+ dmaobj->flags0 |= 0x00003000;
+ break;
+ case NV_MEM_TARGET_PCI:
+ dmaobj->flags0 |= 0x00023000;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ dmaobj->flags0 |= 0x00033000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dmaobj->base.access) {
+ case NV_MEM_ACCESS_RO:
+ dmaobj->flags0 |= 0x00004000;
+ break;
+ case NV_MEM_ACCESS_WO:
+ dmaobj->flags0 |= 0x00008000;
+ case NV_MEM_ACCESS_RW:
+ dmaobj->flags2 |= 0x00000002;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c
new file mode 100644
index 000000000000..5b7ce313ea14
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define nv50_dmaobj(p) container_of((p), struct nv50_dmaobj, base)
+#include "user.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+struct nv50_dmaobj {
+ struct nvkm_dmaobj base;
+ u32 flags0;
+ u32 flags5;
+};
+
+static int
+nv50_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
+ int align, struct nvkm_gpuobj **pgpuobj)
+{
+ struct nv50_dmaobj *dmaobj = nv50_dmaobj(base);
+ struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
+ int ret;
+
+ ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
+ if (ret == 0) {
+ nvkm_kmap(*pgpuobj);
+ nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
+ nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->base.limit));
+ nvkm_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start));
+ nvkm_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->base.limit) << 24 |
+ upper_32_bits(dmaobj->base.start));
+ nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
+ nvkm_wo32(*pgpuobj, 0x14, dmaobj->flags5);
+ nvkm_done(*pgpuobj);
+ }
+
+ return ret;
+}
+
+static const struct nvkm_dmaobj_func
+nv50_dmaobj_func = {
+ .bind = nv50_dmaobj_bind,
+};
+
+int
+nv50_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
+{
+ union {
+ struct nv50_dma_v0 v0;
+ } *args;
+ struct nvkm_object *parent = oclass->parent;
+ struct nv50_dmaobj *dmaobj;
+ u32 user, part, comp, kind;
+ int ret;
+
+ if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
+ return -ENOMEM;
+ *pdmaobj = &dmaobj->base;
+
+ ret = nvkm_dmaobj_ctor(&nv50_dmaobj_func, dma, oclass,
+ &data, &size, &dmaobj->base);
+ if (ret)
+ return ret;
+
+ args = data;
+
+ nvif_ioctl(parent, "create nv50 dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(parent, "create nv50 dma vers %d priv %d part %d "
+ "comp %d kind %02x\n", args->v0.version,
+ args->v0.priv, args->v0.part, args->v0.comp,
+ args->v0.kind);
+ user = args->v0.priv;
+ part = args->v0.part;
+ comp = args->v0.comp;
+ kind = args->v0.kind;
+ } else
+ if (size == 0) {
+ if (dmaobj->base.target != NV_MEM_TARGET_VM) {
+ user = NV50_DMA_V0_PRIV_US;
+ part = NV50_DMA_V0_PART_256;
+ comp = NV50_DMA_V0_COMP_NONE;
+ kind = NV50_DMA_V0_KIND_PITCH;
+ } else {
+ user = NV50_DMA_V0_PRIV_VM;
+ part = NV50_DMA_V0_PART_VM;
+ comp = NV50_DMA_V0_COMP_VM;
+ kind = NV50_DMA_V0_KIND_VM;
+ }
+ } else
+ return ret;
+
+ if (user > 2 || part > 2 || comp > 3 || kind > 0x7f)
+ return -EINVAL;
+ dmaobj->flags0 = (comp << 29) | (kind << 22) | (user << 20) |
+ oclass->base.oclass;
+ dmaobj->flags5 = (part << 16);
+
+ switch (dmaobj->base.target) {
+ case NV_MEM_TARGET_VM:
+ dmaobj->flags0 |= 0x00000000;
+ break;
+ case NV_MEM_TARGET_VRAM:
+ dmaobj->flags0 |= 0x00010000;
+ break;
+ case NV_MEM_TARGET_PCI:
+ dmaobj->flags0 |= 0x00020000;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ dmaobj->flags0 |= 0x00030000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dmaobj->base.access) {
+ case NV_MEM_ACCESS_VM:
+ break;
+ case NV_MEM_ACCESS_RO:
+ dmaobj->flags0 |= 0x00040000;
+ break;
+ case NV_MEM_ACCESS_WO:
+ case NV_MEM_ACCESS_RW:
+ dmaobj->flags0 |= 0x00080000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild
deleted file mode 100644
index 7529632dbedb..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild
+++ /dev/null
@@ -1,5 +0,0 @@
-nvkm-y += nvkm/engine/dmaobj/base.o
-nvkm-y += nvkm/engine/dmaobj/nv04.o
-nvkm-y += nvkm/engine/dmaobj/nv50.o
-nvkm-y += nvkm/engine/dmaobj/gf100.o
-nvkm-y += nvkm/engine/dmaobj/gf110.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c
deleted file mode 100644
index f880e5167e45..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/client.h>
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-struct gf100_dmaobj_priv {
- struct nvkm_dmaobj base;
- u32 flags0;
- u32 flags5;
-};
-
-static int
-gf100_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
- struct nvkm_gpuobj **pgpuobj)
-{
- struct gf100_dmaobj_priv *priv = (void *)dmaobj;
- int ret;
-
- if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
- switch (nv_mclass(parent->parent)) {
- case GT214_DISP_CORE_CHANNEL_DMA:
- case GT214_DISP_BASE_CHANNEL_DMA:
- case GT214_DISP_OVERLAY_CHANNEL_DMA:
- break;
- default:
- return -EINVAL;
- }
- } else
- return 0;
-
- ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
- if (ret == 0) {
- nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj));
- nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit));
- nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start));
- nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 |
- upper_32_bits(priv->base.start));
- nv_wo32(*pgpuobj, 0x10, 0x00000000);
- nv_wo32(*pgpuobj, 0x14, priv->flags5);
- }
-
- return ret;
-}
-
-static int
-gf100_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nvkm_dmaeng *dmaeng = (void *)engine;
- union {
- struct gf100_dma_v0 v0;
- } *args;
- struct gf100_dmaobj_priv *priv;
- u32 kind, user, unkn;
- int ret;
-
- ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
- args = data;
-
- nv_ioctl(parent, "create gf100 dma size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(parent, "create gf100 dma vers %d priv %d kind %02x\n",
- args->v0.version, args->v0.priv, args->v0.kind);
- kind = args->v0.kind;
- user = args->v0.priv;
- unkn = 0;
- } else
- if (size == 0) {
- if (priv->base.target != NV_MEM_TARGET_VM) {
- kind = GF100_DMA_V0_KIND_PITCH;
- user = GF100_DMA_V0_PRIV_US;
- unkn = 2;
- } else {
- kind = GF100_DMA_V0_KIND_VM;
- user = GF100_DMA_V0_PRIV_VM;
- unkn = 0;
- }
- } else
- return ret;
-
- if (user > 2)
- return -EINVAL;
- priv->flags0 |= (kind << 22) | (user << 20);
- priv->flags5 |= (unkn << 16);
-
- switch (priv->base.target) {
- case NV_MEM_TARGET_VM:
- priv->flags0 |= 0x00000000;
- break;
- case NV_MEM_TARGET_VRAM:
- priv->flags0 |= 0x00010000;
- break;
- case NV_MEM_TARGET_PCI:
- priv->flags0 |= 0x00020000;
- break;
- case NV_MEM_TARGET_PCI_NOSNOOP:
- priv->flags0 |= 0x00030000;
- break;
- default:
- return -EINVAL;
- }
-
- switch (priv->base.access) {
- case NV_MEM_ACCESS_VM:
- break;
- case NV_MEM_ACCESS_RO:
- priv->flags0 |= 0x00040000;
- break;
- case NV_MEM_ACCESS_WO:
- case NV_MEM_ACCESS_RW:
- priv->flags0 |= 0x00080000;
- break;
- }
-
- return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
-}
-
-static struct nvkm_ofuncs
-gf100_dmaobj_ofuncs = {
- .ctor = gf100_dmaobj_ctor,
- .dtor = _nvkm_dmaobj_dtor,
- .init = _nvkm_dmaobj_init,
- .fini = _nvkm_dmaobj_fini,
-};
-
-static struct nvkm_oclass
-gf100_dmaeng_sclass[] = {
- { NV_DMA_FROM_MEMORY, &gf100_dmaobj_ofuncs },
- { NV_DMA_TO_MEMORY, &gf100_dmaobj_ofuncs },
- { NV_DMA_IN_MEMORY, &gf100_dmaobj_ofuncs },
- {}
-};
-
-struct nvkm_oclass *
-gf100_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
- .base.handle = NV_ENGINE(DMAOBJ, 0xc0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_dmaeng_ctor,
- .dtor = _nvkm_dmaeng_dtor,
- .init = _nvkm_dmaeng_init,
- .fini = _nvkm_dmaeng_fini,
- },
- .sclass = gf100_dmaeng_sclass,
- .bind = gf100_dmaobj_bind,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c
deleted file mode 100644
index bf8f0f20976c..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/client.h>
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-struct gf110_dmaobj_priv {
- struct nvkm_dmaobj base;
- u32 flags0;
-};
-
-static int
-gf110_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
- struct nvkm_gpuobj **pgpuobj)
-{
- struct gf110_dmaobj_priv *priv = (void *)dmaobj;
- int ret;
-
- if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
- switch (nv_mclass(parent->parent)) {
- case GF110_DISP_CORE_CHANNEL_DMA:
- case GK104_DISP_CORE_CHANNEL_DMA:
- case GK110_DISP_CORE_CHANNEL_DMA:
- case GM107_DISP_CORE_CHANNEL_DMA:
- case GM204_DISP_CORE_CHANNEL_DMA:
- case GF110_DISP_BASE_CHANNEL_DMA:
- case GK104_DISP_BASE_CHANNEL_DMA:
- case GK110_DISP_BASE_CHANNEL_DMA:
- case GF110_DISP_OVERLAY_CONTROL_DMA:
- case GK104_DISP_OVERLAY_CONTROL_DMA:
- break;
- default:
- return -EINVAL;
- }
- } else
- return 0;
-
- ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
- if (ret == 0) {
- nv_wo32(*pgpuobj, 0x00, priv->flags0);
- nv_wo32(*pgpuobj, 0x04, priv->base.start >> 8);
- nv_wo32(*pgpuobj, 0x08, priv->base.limit >> 8);
- nv_wo32(*pgpuobj, 0x0c, 0x00000000);
- nv_wo32(*pgpuobj, 0x10, 0x00000000);
- nv_wo32(*pgpuobj, 0x14, 0x00000000);
- }
-
- return ret;
-}
-
-static int
-gf110_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nvkm_dmaeng *dmaeng = (void *)engine;
- union {
- struct gf110_dma_v0 v0;
- } *args;
- struct gf110_dmaobj_priv *priv;
- u32 kind, page;
- int ret;
-
- ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
- args = data;
-
- nv_ioctl(parent, "create gf110 dma size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(parent, "create gf100 dma vers %d page %d kind %02x\n",
- args->v0.version, args->v0.page, args->v0.kind);
- kind = args->v0.kind;
- page = args->v0.page;
- } else
- if (size == 0) {
- if (priv->base.target != NV_MEM_TARGET_VM) {
- kind = GF110_DMA_V0_KIND_PITCH;
- page = GF110_DMA_V0_PAGE_SP;
- } else {
- kind = GF110_DMA_V0_KIND_VM;
- page = GF110_DMA_V0_PAGE_LP;
- }
- } else
- return ret;
-
- if (page > 1)
- return -EINVAL;
- priv->flags0 = (kind << 20) | (page << 6);
-
- switch (priv->base.target) {
- case NV_MEM_TARGET_VRAM:
- priv->flags0 |= 0x00000009;
- break;
- case NV_MEM_TARGET_VM:
- case NV_MEM_TARGET_PCI:
- case NV_MEM_TARGET_PCI_NOSNOOP:
- /* XXX: don't currently know how to construct a real one
- * of these. we only use them to represent pushbufs
- * on these chipsets, and the classes that use them
- * deal with the target themselves.
- */
- break;
- default:
- return -EINVAL;
- }
-
- return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
-}
-
-static struct nvkm_ofuncs
-gf110_dmaobj_ofuncs = {
- .ctor = gf110_dmaobj_ctor,
- .dtor = _nvkm_dmaobj_dtor,
- .init = _nvkm_dmaobj_init,
- .fini = _nvkm_dmaobj_fini,
-};
-
-static struct nvkm_oclass
-gf110_dmaeng_sclass[] = {
- { NV_DMA_FROM_MEMORY, &gf110_dmaobj_ofuncs },
- { NV_DMA_TO_MEMORY, &gf110_dmaobj_ofuncs },
- { NV_DMA_IN_MEMORY, &gf110_dmaobj_ofuncs },
- {}
-};
-
-struct nvkm_oclass *
-gf110_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
- .base.handle = NV_ENGINE(DMAOBJ, 0xd0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_dmaeng_ctor,
- .dtor = _nvkm_dmaeng_dtor,
- .init = _nvkm_dmaeng_init,
- .fini = _nvkm_dmaeng_fini,
- },
- .sclass = gf110_dmaeng_sclass,
- .bind = gf110_dmaobj_bind,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c
deleted file mode 100644
index b4379c2a2fb5..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-#include <subdev/mmu/nv04.h>
-
-#include <nvif/class.h>
-
-struct nv04_dmaobj_priv {
- struct nvkm_dmaobj base;
- bool clone;
- u32 flags0;
- u32 flags2;
-};
-
-static int
-nv04_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
- struct nvkm_gpuobj **pgpuobj)
-{
- struct nv04_dmaobj_priv *priv = (void *)dmaobj;
- struct nvkm_gpuobj *gpuobj;
- u64 offset = priv->base.start & 0xfffff000;
- u64 adjust = priv->base.start & 0x00000fff;
- u32 length = priv->base.limit - priv->base.start;
- int ret;
-
- if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
- switch (nv_mclass(parent->parent)) {
- case NV03_CHANNEL_DMA:
- case NV10_CHANNEL_DMA:
- case NV17_CHANNEL_DMA:
- case NV40_CHANNEL_DMA:
- break;
- default:
- return -EINVAL;
- }
- }
-
- if (priv->clone) {
- struct nv04_mmu_priv *mmu = nv04_mmu(dmaobj);
- struct nvkm_gpuobj *pgt = mmu->vm->pgt[0].obj[0];
- if (!dmaobj->start)
- return nvkm_gpuobj_dup(parent, pgt, pgpuobj);
- offset = nv_ro32(pgt, 8 + (offset >> 10));
- offset &= 0xfffff000;
- }
-
- ret = nvkm_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
- *pgpuobj = gpuobj;
- if (ret == 0) {
- nv_wo32(*pgpuobj, 0x00, priv->flags0 | (adjust << 20));
- nv_wo32(*pgpuobj, 0x04, length);
- nv_wo32(*pgpuobj, 0x08, priv->flags2 | offset);
- nv_wo32(*pgpuobj, 0x0c, priv->flags2 | offset);
- }
-
- return ret;
-}
-
-static int
-nv04_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nvkm_dmaeng *dmaeng = (void *)engine;
- struct nv04_mmu_priv *mmu = nv04_mmu(engine);
- struct nv04_dmaobj_priv *priv;
- int ret;
-
- ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
- *pobject = nv_object(priv);
- if (ret || (ret = -ENOSYS, size))
- return ret;
-
- if (priv->base.target == NV_MEM_TARGET_VM) {
- if (nv_object(mmu)->oclass == &nv04_mmu_oclass)
- priv->clone = true;
- priv->base.target = NV_MEM_TARGET_PCI;
- priv->base.access = NV_MEM_ACCESS_RW;
- }
-
- priv->flags0 = nv_mclass(priv);
- switch (priv->base.target) {
- case NV_MEM_TARGET_VRAM:
- priv->flags0 |= 0x00003000;
- break;
- case NV_MEM_TARGET_PCI:
- priv->flags0 |= 0x00023000;
- break;
- case NV_MEM_TARGET_PCI_NOSNOOP:
- priv->flags0 |= 0x00033000;
- break;
- default:
- return -EINVAL;
- }
-
- switch (priv->base.access) {
- case NV_MEM_ACCESS_RO:
- priv->flags0 |= 0x00004000;
- break;
- case NV_MEM_ACCESS_WO:
- priv->flags0 |= 0x00008000;
- case NV_MEM_ACCESS_RW:
- priv->flags2 |= 0x00000002;
- break;
- default:
- return -EINVAL;
- }
-
- return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
-}
-
-static struct nvkm_ofuncs
-nv04_dmaobj_ofuncs = {
- .ctor = nv04_dmaobj_ctor,
- .dtor = _nvkm_dmaobj_dtor,
- .init = _nvkm_dmaobj_init,
- .fini = _nvkm_dmaobj_fini,
-};
-
-static struct nvkm_oclass
-nv04_dmaeng_sclass[] = {
- { NV_DMA_FROM_MEMORY, &nv04_dmaobj_ofuncs },
- { NV_DMA_TO_MEMORY, &nv04_dmaobj_ofuncs },
- { NV_DMA_IN_MEMORY, &nv04_dmaobj_ofuncs },
- {}
-};
-
-struct nvkm_oclass *
-nv04_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
- .base.handle = NV_ENGINE(DMAOBJ, 0x04),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_dmaeng_ctor,
- .dtor = _nvkm_dmaeng_dtor,
- .init = _nvkm_dmaeng_init,
- .fini = _nvkm_dmaeng_fini,
- },
- .sclass = nv04_dmaeng_sclass,
- .bind = nv04_dmaobj_bind,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c
deleted file mode 100644
index 4d3c828fe0e6..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/client.h>
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-struct nv50_dmaobj_priv {
- struct nvkm_dmaobj base;
- u32 flags0;
- u32 flags5;
-};
-
-static int
-nv50_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
- struct nvkm_gpuobj **pgpuobj)
-{
- struct nv50_dmaobj_priv *priv = (void *)dmaobj;
- int ret;
-
- if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
- switch (nv_mclass(parent->parent)) {
- case NV40_CHANNEL_DMA:
- case NV50_CHANNEL_GPFIFO:
- case G82_CHANNEL_GPFIFO:
- case NV50_DISP_CORE_CHANNEL_DMA:
- case G82_DISP_CORE_CHANNEL_DMA:
- case GT206_DISP_CORE_CHANNEL_DMA:
- case GT200_DISP_CORE_CHANNEL_DMA:
- case GT214_DISP_CORE_CHANNEL_DMA:
- case NV50_DISP_BASE_CHANNEL_DMA:
- case G82_DISP_BASE_CHANNEL_DMA:
- case GT200_DISP_BASE_CHANNEL_DMA:
- case GT214_DISP_BASE_CHANNEL_DMA:
- case NV50_DISP_OVERLAY_CHANNEL_DMA:
- case G82_DISP_OVERLAY_CHANNEL_DMA:
- case GT200_DISP_OVERLAY_CHANNEL_DMA:
- case GT214_DISP_OVERLAY_CHANNEL_DMA:
- break;
- default:
- return -EINVAL;
- }
- }
-
- ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
- if (ret == 0) {
- nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj));
- nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit));
- nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start));
- nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 |
- upper_32_bits(priv->base.start));
- nv_wo32(*pgpuobj, 0x10, 0x00000000);
- nv_wo32(*pgpuobj, 0x14, priv->flags5);
- }
-
- return ret;
-}
-
-static int
-nv50_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nvkm_dmaeng *dmaeng = (void *)engine;
- union {
- struct nv50_dma_v0 v0;
- } *args;
- struct nv50_dmaobj_priv *priv;
- u32 user, part, comp, kind;
- int ret;
-
- ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
- args = data;
-
- nv_ioctl(parent, "create nv50 dma size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(parent, "create nv50 dma vers %d priv %d part %d "
- "comp %d kind %02x\n", args->v0.version,
- args->v0.priv, args->v0.part, args->v0.comp,
- args->v0.kind);
- user = args->v0.priv;
- part = args->v0.part;
- comp = args->v0.comp;
- kind = args->v0.kind;
- } else
- if (size == 0) {
- if (priv->base.target != NV_MEM_TARGET_VM) {
- user = NV50_DMA_V0_PRIV_US;
- part = NV50_DMA_V0_PART_256;
- comp = NV50_DMA_V0_COMP_NONE;
- kind = NV50_DMA_V0_KIND_PITCH;
- } else {
- user = NV50_DMA_V0_PRIV_VM;
- part = NV50_DMA_V0_PART_VM;
- comp = NV50_DMA_V0_COMP_VM;
- kind = NV50_DMA_V0_KIND_VM;
- }
- } else
- return ret;
-
- if (user > 2 || part > 2 || comp > 3 || kind > 0x7f)
- return -EINVAL;
- priv->flags0 = (comp << 29) | (kind << 22) | (user << 20);
- priv->flags5 = (part << 16);
-
- switch (priv->base.target) {
- case NV_MEM_TARGET_VM:
- priv->flags0 |= 0x00000000;
- break;
- case NV_MEM_TARGET_VRAM:
- priv->flags0 |= 0x00010000;
- break;
- case NV_MEM_TARGET_PCI:
- priv->flags0 |= 0x00020000;
- break;
- case NV_MEM_TARGET_PCI_NOSNOOP:
- priv->flags0 |= 0x00030000;
- break;
- default:
- return -EINVAL;
- }
-
- switch (priv->base.access) {
- case NV_MEM_ACCESS_VM:
- break;
- case NV_MEM_ACCESS_RO:
- priv->flags0 |= 0x00040000;
- break;
- case NV_MEM_ACCESS_WO:
- case NV_MEM_ACCESS_RW:
- priv->flags0 |= 0x00080000;
- break;
- default:
- return -EINVAL;
- }
-
- return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
-}
-
-static struct nvkm_ofuncs
-nv50_dmaobj_ofuncs = {
- .ctor = nv50_dmaobj_ctor,
- .dtor = _nvkm_dmaobj_dtor,
- .init = _nvkm_dmaobj_init,
- .fini = _nvkm_dmaobj_fini,
-};
-
-static struct nvkm_oclass
-nv50_dmaeng_sclass[] = {
- { NV_DMA_FROM_MEMORY, &nv50_dmaobj_ofuncs },
- { NV_DMA_TO_MEMORY, &nv50_dmaobj_ofuncs },
- { NV_DMA_IN_MEMORY, &nv50_dmaobj_ofuncs },
- {}
-};
-
-struct nvkm_oclass *
-nv50_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
- .base.handle = NV_ENGINE(DMAOBJ, 0x50),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_dmaeng_ctor,
- .dtor = _nvkm_dmaeng_dtor,
- .init = _nvkm_dmaeng_init,
- .fini = _nvkm_dmaeng_fini,
- },
- .sclass = nv50_dmaeng_sclass,
- .bind = nv50_dmaobj_bind,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h
deleted file mode 100644
index 44ae8a0ca65c..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef __NVKM_DMAOBJ_PRIV_H__
-#define __NVKM_DMAOBJ_PRIV_H__
-#include <engine/dmaobj.h>
-
-#define nvkm_dmaobj_create(p,e,c,pa,sa,d) \
- nvkm_dmaobj_create_((p), (e), (c), (pa), (sa), sizeof(**d), (void **)d)
-
-int nvkm_dmaobj_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void **, u32 *,
- int, void **);
-#define _nvkm_dmaobj_dtor nvkm_object_destroy
-#define _nvkm_dmaobj_init nvkm_object_init
-#define _nvkm_dmaobj_fini nvkm_object_fini
-
-int _nvkm_dmaeng_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-#define _nvkm_dmaeng_dtor _nvkm_engine_dtor
-#define _nvkm_dmaeng_init _nvkm_engine_init
-#define _nvkm_dmaeng_fini _nvkm_engine_fini
-
-struct nvkm_dmaeng_impl {
- struct nvkm_oclass base;
- struct nvkm_oclass *sclass;
- int (*bind)(struct nvkm_dmaobj *, struct nvkm_object *,
- struct nvkm_gpuobj **);
-};
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index 30958c19e61d..74000602fbb1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -21,40 +21,95 @@
*/
#include <engine/falcon.h>
-#include <core/device.h>
+#include <core/gpuobj.h>
#include <subdev/timer.h>
+#include <engine/fifo.h>
-void
-nvkm_falcon_intr(struct nvkm_subdev *subdev)
+static int
+nvkm_falcon_oclass_get(struct nvkm_oclass *oclass, int index)
{
- struct nvkm_falcon *falcon = (void *)subdev;
- u32 dispatch = nv_ro32(falcon, 0x01c);
- u32 intr = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
+ struct nvkm_falcon *falcon = nvkm_falcon(oclass->engine);
+ int c = 0;
+
+ while (falcon->func->sclass[c].oclass) {
+ if (c++ == index) {
+ oclass->base = falcon->func->sclass[index];
+ return index;
+ }
+ }
+
+ return c;
+}
+
+static int
+nvkm_falcon_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+ int align, struct nvkm_gpuobj **pgpuobj)
+{
+ return nvkm_gpuobj_new(object->engine->subdev.device, 256,
+ align, true, parent, pgpuobj);
+}
+
+static const struct nvkm_object_func
+nvkm_falcon_cclass = {
+ .bind = nvkm_falcon_cclass_bind,
+};
+
+static void
+nvkm_falcon_intr(struct nvkm_engine *engine)
+{
+ struct nvkm_falcon *falcon = nvkm_falcon(engine);
+ struct nvkm_subdev *subdev = &falcon->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 base = falcon->addr;
+ u32 dest = nvkm_rd32(device, base + 0x01c);
+ u32 intr = nvkm_rd32(device, base + 0x008) & dest & ~(dest >> 16);
+ u32 inst = nvkm_rd32(device, base + 0x050) & 0x3fffffff;
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+
+ chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
+
+ if (intr & 0x00000040) {
+ if (falcon->func->intr) {
+ falcon->func->intr(falcon, chan);
+ nvkm_wr32(device, base + 0x004, 0x00000040);
+ intr &= ~0x00000040;
+ }
+ }
if (intr & 0x00000010) {
- nv_debug(falcon, "ucode halted\n");
- nv_wo32(falcon, 0x004, 0x00000010);
+ nvkm_debug(subdev, "ucode halted\n");
+ nvkm_wr32(device, base + 0x004, 0x00000010);
intr &= ~0x00000010;
}
if (intr) {
- nv_error(falcon, "unhandled intr 0x%08x\n", intr);
- nv_wo32(falcon, 0x004, intr);
+ nvkm_error(subdev, "intr %08x\n", intr);
+ nvkm_wr32(device, base + 0x004, intr);
}
-}
-u32
-_nvkm_falcon_rd32(struct nvkm_object *object, u64 addr)
-{
- struct nvkm_falcon *falcon = (void *)object;
- return nv_rd32(falcon, falcon->addr + addr);
+ nvkm_fifo_chan_put(device->fifo, flags, &chan);
}
-void
-_nvkm_falcon_wr32(struct nvkm_object *object, u64 addr, u32 data)
+static int
+nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
{
- struct nvkm_falcon *falcon = (void *)object;
- nv_wr32(falcon, falcon->addr + addr, data);
+ struct nvkm_falcon *falcon = nvkm_falcon(engine);
+ struct nvkm_device *device = falcon->engine.subdev.device;
+ const u32 base = falcon->addr;
+
+ if (!suspend) {
+ nvkm_memory_del(&falcon->core);
+ if (falcon->external) {
+ vfree(falcon->data.data);
+ vfree(falcon->code.data);
+ falcon->code.data = NULL;
+ }
+ }
+
+ nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
+ nvkm_wr32(device, base + 0x014, 0xffffffff);
+ return 0;
}
static void *
@@ -67,51 +122,66 @@ vmemdup(const void *src, size_t len)
return p;
}
-int
-_nvkm_falcon_init(struct nvkm_object *object)
+static int
+nvkm_falcon_oneinit(struct nvkm_engine *engine)
{
- struct nvkm_device *device = nv_device(object);
- struct nvkm_falcon *falcon = (void *)object;
- const struct firmware *fw;
- char name[32] = "internal";
- int ret, i;
+ struct nvkm_falcon *falcon = nvkm_falcon(engine);
+ struct nvkm_subdev *subdev = &falcon->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 base = falcon->addr;
u32 caps;
- /* enable engine, and determine its capabilities */
- ret = nvkm_engine_init(&falcon->base);
- if (ret)
- return ret;
-
+ /* determine falcon capabilities */
if (device->chipset < 0xa3 ||
device->chipset == 0xaa || device->chipset == 0xac) {
falcon->version = 0;
falcon->secret = (falcon->addr == 0x087000) ? 1 : 0;
} else {
- caps = nv_ro32(falcon, 0x12c);
+ caps = nvkm_rd32(device, base + 0x12c);
falcon->version = (caps & 0x0000000f);
falcon->secret = (caps & 0x00000030) >> 4;
}
- caps = nv_ro32(falcon, 0x108);
+ caps = nvkm_rd32(device, base + 0x108);
falcon->code.limit = (caps & 0x000001ff) << 8;
falcon->data.limit = (caps & 0x0003fe00) >> 1;
- nv_debug(falcon, "falcon version: %d\n", falcon->version);
- nv_debug(falcon, "secret level: %d\n", falcon->secret);
- nv_debug(falcon, "code limit: %d\n", falcon->code.limit);
- nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
+ nvkm_debug(subdev, "falcon version: %d\n", falcon->version);
+ nvkm_debug(subdev, "secret level: %d\n", falcon->secret);
+ nvkm_debug(subdev, "code limit: %d\n", falcon->code.limit);
+ nvkm_debug(subdev, "data limit: %d\n", falcon->data.limit);
+ return 0;
+}
+
+static int
+nvkm_falcon_init(struct nvkm_engine *engine)
+{
+ struct nvkm_falcon *falcon = nvkm_falcon(engine);
+ struct nvkm_subdev *subdev = &falcon->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const struct firmware *fw;
+ char name[32] = "internal";
+ const u32 base = falcon->addr;
+ int ret, i;
/* wait for 'uc halted' to be signalled before continuing */
if (falcon->secret && falcon->version < 4) {
- if (!falcon->version)
- nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
- else
- nv_wait(falcon, 0x180, 0x80000000, 0);
- nv_wo32(falcon, 0x004, 0x00000010);
+ if (!falcon->version) {
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, base + 0x008) & 0x00000010)
+ break;
+ );
+ } else {
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, base + 0x180) & 0x80000000))
+ break;
+ );
+ }
+ nvkm_wr32(device, base + 0x004, 0x00000010);
}
/* disable all interrupts */
- nv_wo32(falcon, 0x014, 0xffffffff);
+ nvkm_wr32(device, base + 0x014, 0xffffffff);
/* no default ucode provided by the engine implementation, try and
* locate a "self-bootstrapping" firmware image for the engine
@@ -120,7 +190,7 @@ _nvkm_falcon_init(struct nvkm_object *object)
snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
device->chipset, falcon->addr >> 12);
- ret = request_firmware(&fw, name, nv_device_base(device));
+ ret = request_firmware(&fw, name, device->dev);
if (ret == 0) {
falcon->code.data = vmemdup(fw->data, fw->size);
falcon->code.size = fw->size;
@@ -139,10 +209,10 @@ _nvkm_falcon_init(struct nvkm_object *object)
snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
device->chipset, falcon->addr >> 12);
- ret = request_firmware(&fw, name, nv_device_base(device));
+ ret = request_firmware(&fw, name, device->dev);
if (ret) {
- nv_error(falcon, "unable to load firmware data\n");
- return ret;
+ nvkm_error(subdev, "unable to load firmware data\n");
+ return -ENODEV;
}
falcon->data.data = vmemdup(fw->data, fw->size);
@@ -154,10 +224,10 @@ _nvkm_falcon_init(struct nvkm_object *object)
snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
device->chipset, falcon->addr >> 12);
- ret = request_firmware(&fw, name, nv_device_base(device));
+ ret = request_firmware(&fw, name, device->dev);
if (ret) {
- nv_error(falcon, "unable to load firmware code\n");
- return ret;
+ nvkm_error(subdev, "unable to load firmware code\n");
+ return -ENODEV;
}
falcon->code.data = vmemdup(fw->data, fw->size);
@@ -167,111 +237,117 @@ _nvkm_falcon_init(struct nvkm_object *object)
return -ENOMEM;
}
- nv_debug(falcon, "firmware: %s (%s)\n", name, falcon->data.data ?
- "static code/data segments" : "self-bootstrapping");
+ nvkm_debug(subdev, "firmware: %s (%s)\n", name, falcon->data.data ?
+ "static code/data segments" : "self-bootstrapping");
/* ensure any "self-bootstrapping" firmware image is in vram */
if (!falcon->data.data && !falcon->core) {
- ret = nvkm_gpuobj_new(object->parent, NULL, falcon->code.size,
- 256, 0, &falcon->core);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ falcon->code.size, 256, false,
+ &falcon->core);
if (ret) {
- nv_error(falcon, "core allocation failed, %d\n", ret);
+ nvkm_error(subdev, "core allocation failed, %d\n", ret);
return ret;
}
+ nvkm_kmap(falcon->core);
for (i = 0; i < falcon->code.size; i += 4)
- nv_wo32(falcon->core, i, falcon->code.data[i / 4]);
+ nvkm_wo32(falcon->core, i, falcon->code.data[i / 4]);
+ nvkm_done(falcon->core);
}
/* upload firmware bootloader (or the full code segments) */
if (falcon->core) {
+ u64 addr = nvkm_memory_addr(falcon->core);
if (device->card_type < NV_C0)
- nv_wo32(falcon, 0x618, 0x04000000);
+ nvkm_wr32(device, base + 0x618, 0x04000000);
else
- nv_wo32(falcon, 0x618, 0x00000114);
- nv_wo32(falcon, 0x11c, 0);
- nv_wo32(falcon, 0x110, falcon->core->addr >> 8);
- nv_wo32(falcon, 0x114, 0);
- nv_wo32(falcon, 0x118, 0x00006610);
+ nvkm_wr32(device, base + 0x618, 0x00000114);
+ nvkm_wr32(device, base + 0x11c, 0);
+ nvkm_wr32(device, base + 0x110, addr >> 8);
+ nvkm_wr32(device, base + 0x114, 0);
+ nvkm_wr32(device, base + 0x118, 0x00006610);
} else {
if (falcon->code.size > falcon->code.limit ||
falcon->data.size > falcon->data.limit) {
- nv_error(falcon, "ucode exceeds falcon limit(s)\n");
+ nvkm_error(subdev, "ucode exceeds falcon limit(s)\n");
return -EINVAL;
}
if (falcon->version < 3) {
- nv_wo32(falcon, 0xff8, 0x00100000);
+ nvkm_wr32(device, base + 0xff8, 0x00100000);
for (i = 0; i < falcon->code.size / 4; i++)
- nv_wo32(falcon, 0xff4, falcon->code.data[i]);
+ nvkm_wr32(device, base + 0xff4, falcon->code.data[i]);
} else {
- nv_wo32(falcon, 0x180, 0x01000000);
+ nvkm_wr32(device, base + 0x180, 0x01000000);
for (i = 0; i < falcon->code.size / 4; i++) {
if ((i & 0x3f) == 0)
- nv_wo32(falcon, 0x188, i >> 6);
- nv_wo32(falcon, 0x184, falcon->code.data[i]);
+ nvkm_wr32(device, base + 0x188, i >> 6);
+ nvkm_wr32(device, base + 0x184, falcon->code.data[i]);
}
}
}
/* upload data segment (if necessary), zeroing the remainder */
if (falcon->version < 3) {
- nv_wo32(falcon, 0xff8, 0x00000000);
+ nvkm_wr32(device, base + 0xff8, 0x00000000);
for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
- nv_wo32(falcon, 0xff4, falcon->data.data[i]);
+ nvkm_wr32(device, base + 0xff4, falcon->data.data[i]);
for (; i < falcon->data.limit; i += 4)
- nv_wo32(falcon, 0xff4, 0x00000000);
+ nvkm_wr32(device, base + 0xff4, 0x00000000);
} else {
- nv_wo32(falcon, 0x1c0, 0x01000000);
+ nvkm_wr32(device, base + 0x1c0, 0x01000000);
for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
- nv_wo32(falcon, 0x1c4, falcon->data.data[i]);
+ nvkm_wr32(device, base + 0x1c4, falcon->data.data[i]);
for (; i < falcon->data.limit / 4; i++)
- nv_wo32(falcon, 0x1c4, 0x00000000);
+ nvkm_wr32(device, base + 0x1c4, 0x00000000);
}
/* start it running */
- nv_wo32(falcon, 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
- nv_wo32(falcon, 0x104, 0x00000000); /* ENTRY */
- nv_wo32(falcon, 0x100, 0x00000002); /* TRIGGER */
- nv_wo32(falcon, 0x048, 0x00000003); /* FIFO | CHSW */
+ nvkm_wr32(device, base + 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
+ nvkm_wr32(device, base + 0x104, 0x00000000); /* ENTRY */
+ nvkm_wr32(device, base + 0x100, 0x00000002); /* TRIGGER */
+ nvkm_wr32(device, base + 0x048, 0x00000003); /* FIFO | CHSW */
+
+ if (falcon->func->init)
+ falcon->func->init(falcon);
return 0;
}
-int
-_nvkm_falcon_fini(struct nvkm_object *object, bool suspend)
+static void *
+nvkm_falcon_dtor(struct nvkm_engine *engine)
{
- struct nvkm_falcon *falcon = (void *)object;
-
- if (!suspend) {
- nvkm_gpuobj_ref(NULL, &falcon->core);
- if (falcon->external) {
- vfree(falcon->data.data);
- vfree(falcon->code.data);
- falcon->code.data = NULL;
- }
- }
-
- nv_mo32(falcon, 0x048, 0x00000003, 0x00000000);
- nv_wo32(falcon, 0x014, 0xffffffff);
-
- return nvkm_engine_fini(&falcon->base, suspend);
+ return nvkm_falcon(engine);
}
+static const struct nvkm_engine_func
+nvkm_falcon = {
+ .dtor = nvkm_falcon_dtor,
+ .oneinit = nvkm_falcon_oneinit,
+ .init = nvkm_falcon_init,
+ .fini = nvkm_falcon_fini,
+ .intr = nvkm_falcon_intr,
+ .fifo.sclass = nvkm_falcon_oclass_get,
+ .cclass = &nvkm_falcon_cclass,
+};
+
int
-nvkm_falcon_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, u32 addr, bool enable,
- const char *iname, const char *fname,
- int length, void **pobject)
+nvkm_falcon_new_(const struct nvkm_falcon_func *func,
+ struct nvkm_device *device, int index, bool enable,
+ u32 addr, struct nvkm_engine **pengine)
{
struct nvkm_falcon *falcon;
- int ret;
-
- ret = nvkm_engine_create_(parent, engine, oclass, enable, iname,
- fname, length, pobject);
- falcon = *pobject;
- if (ret)
- return ret;
+ if (!(falcon = kzalloc(sizeof(*falcon), GFP_KERNEL)))
+ return -ENOMEM;
+ falcon->func = func;
falcon->addr = addr;
- return 0;
+ falcon->code.data = func->code.data;
+ falcon->code.size = func->code.size;
+ falcon->data.data = func->data.data;
+ falcon->data.size = func->data.size;
+ *pengine = &falcon->engine;
+
+ return nvkm_engine_ctor(&nvkm_falcon, device, index, func->pmc_enable,
+ enable, &falcon->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 42891cb71ea3..74993c144a84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -7,6 +7,24 @@ nvkm-y += nvkm/engine/fifo/nv50.o
nvkm-y += nvkm/engine/fifo/g84.o
nvkm-y += nvkm/engine/fifo/gf100.o
nvkm-y += nvkm/engine/fifo/gk104.o
-nvkm-y += nvkm/engine/fifo/gk20a.o
nvkm-y += nvkm/engine/fifo/gk208.o
+nvkm-y += nvkm/engine/fifo/gk20a.o
nvkm-y += nvkm/engine/fifo/gm204.o
+nvkm-y += nvkm/engine/fifo/gm20b.o
+
+nvkm-y += nvkm/engine/fifo/chan.o
+nvkm-y += nvkm/engine/fifo/channv50.o
+nvkm-y += nvkm/engine/fifo/chang84.o
+
+nvkm-y += nvkm/engine/fifo/dmanv04.o
+nvkm-y += nvkm/engine/fifo/dmanv10.o
+nvkm-y += nvkm/engine/fifo/dmanv17.o
+nvkm-y += nvkm/engine/fifo/dmanv40.o
+nvkm-y += nvkm/engine/fifo/dmanv50.o
+nvkm-y += nvkm/engine/fifo/dmag84.o
+
+nvkm-y += nvkm/engine/fifo/gpfifonv50.o
+nvkm-y += nvkm/engine/fifo/gpfifog84.o
+nvkm-y += nvkm/engine/fifo/gpfifogf100.o
+nvkm-y += nvkm/engine/fifo/gpfifogk104.o
+nvkm-y += nvkm/engine/fifo/gpfifogm204.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index fa223f88d25e..1fbbfbe6ca9c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -21,156 +21,108 @@
*
* Authors: Ben Skeggs
*/
-#include <engine/fifo.h>
+#include "priv.h"
+#include "chan.h"
#include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
#include <core/notify.h>
-#include <engine/dmaobj.h>
-#include <nvif/class.h>
#include <nvif/event.h>
#include <nvif/unpack.h>
-static int
-nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size,
- struct nvkm_notify *notify)
+void
+nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
{
- if (size == 0) {
- notify->size = 0;
- notify->types = 1;
- notify->index = 0;
- return 0;
- }
- return -ENOSYS;
+ return fifo->func->pause(fifo, flags);
}
-static const struct nvkm_event_func
-nvkm_fifo_event_func = {
- .ctor = nvkm_fifo_event_ctor,
-};
-
-int
-nvkm_fifo_channel_create_(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass,
- int bar, u32 addr, u32 size, u32 pushbuf,
- u64 engmask, int len, void **ptr)
+void
+nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags)
{
- struct nvkm_device *device = nv_device(engine);
- struct nvkm_fifo *priv = (void *)engine;
- struct nvkm_fifo_chan *chan;
- struct nvkm_dmaeng *dmaeng;
- unsigned long flags;
- int ret;
-
- /* create base object class */
- ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL,
- engmask, len, ptr);
- chan = *ptr;
- if (ret)
- return ret;
+ return fifo->func->start(fifo, flags);
+}
- /* validate dma object representing push buffer */
- chan->pushdma = (void *)nvkm_handle_ref(parent, pushbuf);
- if (!chan->pushdma)
- return -ENOENT;
-
- dmaeng = (void *)chan->pushdma->base.engine;
- switch (chan->pushdma->base.oclass->handle) {
- case NV_DMA_FROM_MEMORY:
- case NV_DMA_IN_MEMORY:
- break;
- default:
- return -EINVAL;
+void
+nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags,
+ struct nvkm_fifo_chan **pchan)
+{
+ struct nvkm_fifo_chan *chan = *pchan;
+ if (likely(chan)) {
+ *pchan = NULL;
+ spin_unlock_irqrestore(&fifo->lock, flags);
}
+}
- ret = dmaeng->bind(chan->pushdma, parent, &chan->pushgpu);
- if (ret)
- return ret;
-
- /* find a free fifo channel */
- spin_lock_irqsave(&priv->lock, flags);
- for (chan->chid = priv->min; chan->chid < priv->max; chan->chid++) {
- if (!priv->channel[chan->chid]) {
- priv->channel[chan->chid] = nv_object(chan);
- break;
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags)
+{
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ spin_lock_irqsave(&fifo->lock, flags);
+ list_for_each_entry(chan, &fifo->chan, head) {
+ if (chan->inst->addr == inst) {
+ list_del(&chan->head);
+ list_add(&chan->head, &fifo->chan);
+ *rflags = flags;
+ return chan;
}
}
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (chan->chid == priv->max) {
- nv_error(priv, "no free channels\n");
- return -ENOSPC;
- }
-
- chan->addr = nv_device_resource_start(device, bar) +
- addr + size * chan->chid;
- chan->size = size;
- nvkm_event_send(&priv->cevent, 1, 0, NULL, 0);
- return 0;
+ spin_unlock_irqrestore(&fifo->lock, flags);
+ return NULL;
}
-void
-nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *chan)
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags)
{
- struct nvkm_fifo *priv = (void *)nv_object(chan)->engine;
+ struct nvkm_fifo_chan *chan;
unsigned long flags;
-
- if (chan->user)
- iounmap(chan->user);
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->channel[chan->chid] = NULL;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- nvkm_gpuobj_ref(NULL, &chan->pushgpu);
- nvkm_object_ref(NULL, (struct nvkm_object **)&chan->pushdma);
- nvkm_namedb_destroy(&chan->namedb);
+ spin_lock_irqsave(&fifo->lock, flags);
+ list_for_each_entry(chan, &fifo->chan, head) {
+ if (chan->chid == chid) {
+ list_del(&chan->head);
+ list_add(&chan->head, &fifo->chan);
+ *rflags = flags;
+ return chan;
+ }
+ }
+ spin_unlock_irqrestore(&fifo->lock, flags);
+ return NULL;
}
-void
-_nvkm_fifo_channel_dtor(struct nvkm_object *object)
+static int
+nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size,
+ struct nvkm_notify *notify)
{
- struct nvkm_fifo_chan *chan = (void *)object;
- nvkm_fifo_channel_destroy(chan);
+ if (size == 0) {
+ notify->size = 0;
+ notify->types = 1;
+ notify->index = 0;
+ return 0;
+ }
+ return -ENOSYS;
}
-int
-_nvkm_fifo_channel_map(struct nvkm_object *object, u64 *addr, u32 *size)
-{
- struct nvkm_fifo_chan *chan = (void *)object;
- *addr = chan->addr;
- *size = chan->size;
- return 0;
-}
+static const struct nvkm_event_func
+nvkm_fifo_event_func = {
+ .ctor = nvkm_fifo_event_ctor,
+};
-u32
-_nvkm_fifo_channel_rd32(struct nvkm_object *object, u64 addr)
+static void
+nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
{
- struct nvkm_fifo_chan *chan = (void *)object;
- if (unlikely(!chan->user)) {
- chan->user = ioremap(chan->addr, chan->size);
- if (WARN_ON_ONCE(chan->user == NULL))
- return 0;
- }
- return ioread32_native(chan->user + addr);
+ struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ fifo->func->uevent_fini(fifo);
}
-void
-_nvkm_fifo_channel_wr32(struct nvkm_object *object, u64 addr, u32 data)
+static void
+nvkm_fifo_uevent_init(struct nvkm_event *event, int type, int index)
{
- struct nvkm_fifo_chan *chan = (void *)object;
- if (unlikely(!chan->user)) {
- chan->user = ioremap(chan->addr, chan->size);
- if (WARN_ON_ONCE(chan->user == NULL))
- return;
- }
- iowrite32_native(data, chan->user + addr);
+ struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ fifo->func->uevent_init(fifo);
}
-int
+static int
nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
{
@@ -188,6 +140,13 @@ nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
return ret;
}
+static const struct nvkm_event_func
+nvkm_fifo_uevent_func = {
+ .ctor = nvkm_fifo_uevent_ctor,
+ .init = nvkm_fifo_uevent_init,
+ .fini = nvkm_fifo_uevent_fini,
+};
+
void
nvkm_fifo_uevent(struct nvkm_fifo *fifo)
{
@@ -196,87 +155,123 @@ nvkm_fifo_uevent(struct nvkm_fifo *fifo)
nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep));
}
-int
-_nvkm_fifo_channel_ntfy(struct nvkm_object *object, u32 type,
- struct nvkm_event **event)
+static int
+nvkm_fifo_class_new(struct nvkm_device *device,
+ const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
- struct nvkm_fifo *fifo = (void *)object->engine;
- switch (type) {
- case G82_CHANNEL_DMA_V0_NTFY_UEVENT:
- if (nv_mclass(object) >= G82_CHANNEL_DMA) {
- *event = &fifo->uevent;
- return 0;
- }
- break;
- default:
- break;
- }
- return -EINVAL;
+ const struct nvkm_fifo_chan_oclass *sclass = oclass->engn;
+ struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
+ return sclass->ctor(fifo, oclass, data, size, pobject);
}
+static const struct nvkm_device_oclass
+nvkm_fifo_class = {
+ .ctor = nvkm_fifo_class_new,
+};
+
static int
-nvkm_fifo_chid(struct nvkm_fifo *priv, struct nvkm_object *object)
+nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index,
+ const struct nvkm_device_oclass **class)
{
- int engidx = nv_hclass(priv) & 0xff;
-
- while (object && object->parent) {
- if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) &&
- (nv_hclass(object->parent) & 0xff) == engidx)
- return nvkm_fifo_chan(object)->chid;
- object = object->parent;
+ struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
+ const struct nvkm_fifo_chan_oclass *sclass;
+ int c = 0;
+
+ while ((sclass = fifo->func->chan[c])) {
+ if (c++ == index) {
+ oclass->base = sclass->base;
+ oclass->engn = sclass;
+ *class = &nvkm_fifo_class;
+ return 0;
+ }
}
- return -1;
+ return c;
}
-const char *
-nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid)
+static void
+nvkm_fifo_intr(struct nvkm_engine *engine)
{
- struct nvkm_fifo_chan *chan = NULL;
- unsigned long flags;
+ struct nvkm_fifo *fifo = nvkm_fifo(engine);
+ fifo->func->intr(fifo);
+}
- spin_lock_irqsave(&fifo->lock, flags);
- if (chid >= fifo->min && chid <= fifo->max)
- chan = (void *)fifo->channel[chid];
- spin_unlock_irqrestore(&fifo->lock, flags);
+static int
+nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend)
+{
+ struct nvkm_fifo *fifo = nvkm_fifo(engine);
+ if (fifo->func->fini)
+ fifo->func->fini(fifo);
+ return 0;
+}
- return nvkm_client_name(chan);
+static int
+nvkm_fifo_oneinit(struct nvkm_engine *engine)
+{
+ struct nvkm_fifo *fifo = nvkm_fifo(engine);
+ if (fifo->func->oneinit)
+ return fifo->func->oneinit(fifo);
+ return 0;
}
-void
-nvkm_fifo_destroy(struct nvkm_fifo *priv)
+static int
+nvkm_fifo_init(struct nvkm_engine *engine)
{
- kfree(priv->channel);
- nvkm_event_fini(&priv->uevent);
- nvkm_event_fini(&priv->cevent);
- nvkm_engine_destroy(&priv->base);
+ struct nvkm_fifo *fifo = nvkm_fifo(engine);
+ fifo->func->init(fifo);
+ return 0;
}
+static void *
+nvkm_fifo_dtor(struct nvkm_engine *engine)
+{
+ struct nvkm_fifo *fifo = nvkm_fifo(engine);
+ void *data = fifo;
+ if (fifo->func->dtor)
+ data = fifo->func->dtor(fifo);
+ nvkm_event_fini(&fifo->cevent);
+ nvkm_event_fini(&fifo->uevent);
+ return data;
+}
+
+static const struct nvkm_engine_func
+nvkm_fifo = {
+ .dtor = nvkm_fifo_dtor,
+ .oneinit = nvkm_fifo_oneinit,
+ .init = nvkm_fifo_init,
+ .fini = nvkm_fifo_fini,
+ .intr = nvkm_fifo_intr,
+ .base.sclass = nvkm_fifo_class_get,
+};
+
int
-nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass,
- int min, int max, int length, void **pobject)
+nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+ int index, int nr, struct nvkm_fifo *fifo)
{
- struct nvkm_fifo *priv;
int ret;
- ret = nvkm_engine_create_(parent, engine, oclass, true, "PFIFO",
- "fifo", length, pobject);
- priv = *pobject;
- if (ret)
- return ret;
+ fifo->func = func;
+ INIT_LIST_HEAD(&fifo->chan);
+ spin_lock_init(&fifo->lock);
- priv->min = min;
- priv->max = max;
- priv->channel = kzalloc(sizeof(*priv->channel) * (max + 1), GFP_KERNEL);
- if (!priv->channel)
- return -ENOMEM;
+ if (WARN_ON(fifo->nr > NVKM_FIFO_CHID_NR))
+ fifo->nr = NVKM_FIFO_CHID_NR;
+ else
+ fifo->nr = nr;
+ bitmap_clear(fifo->mask, 0, fifo->nr);
- ret = nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &priv->cevent);
+ ret = nvkm_engine_ctor(&nvkm_fifo, device, index, 0x00000100,
+ true, &fifo->engine);
if (ret)
return ret;
- priv->chid = nvkm_fifo_chid;
- spin_lock_init(&priv->lock);
- return 0;
+ if (func->uevent_init) {
+ ret = nvkm_event_init(&nvkm_fifo_uevent_func, 1, 1,
+ &fifo->uevent);
+ if (ret)
+ return ret;
+ }
+
+ return nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &fifo->cevent);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
new file mode 100644
index 000000000000..dc6d4678f228
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "chan.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <core/oproxy.h>
+#include <subdev/mmu.h>
+#include <engine/dma.h>
+
+struct nvkm_fifo_chan_object {
+ struct nvkm_oproxy oproxy;
+ struct nvkm_fifo_chan *chan;
+ int hash;
+};
+
+static int
+nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
+{
+ struct nvkm_fifo_chan_object *object =
+ container_of(base, typeof(*object), oproxy);
+ struct nvkm_engine *engine = object->oproxy.object->engine;
+ struct nvkm_fifo_chan *chan = object->chan;
+ struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+ const char *name = nvkm_subdev_name[engine->subdev.index];
+ int ret = 0;
+
+ if (--engn->usecount)
+ return 0;
+
+ if (chan->func->engine_fini) {
+ ret = chan->func->engine_fini(chan, engine, suspend);
+ if (ret) {
+ nvif_error(&chan->object,
+ "detach %s failed, %d\n", name, ret);
+ return ret;
+ }
+ }
+
+ if (engn->object) {
+ ret = nvkm_object_fini(engn->object, suspend);
+ if (ret && suspend)
+ return ret;
+ }
+
+ nvif_trace(&chan->object, "detached %s\n", name);
+ return ret;
+}
+
+static int
+nvkm_fifo_chan_child_init(struct nvkm_oproxy *base)
+{
+ struct nvkm_fifo_chan_object *object =
+ container_of(base, typeof(*object), oproxy);
+ struct nvkm_engine *engine = object->oproxy.object->engine;
+ struct nvkm_fifo_chan *chan = object->chan;
+ struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+ const char *name = nvkm_subdev_name[engine->subdev.index];
+ int ret;
+
+ if (engn->usecount++)
+ return 0;
+
+ if (engn->object) {
+ ret = nvkm_object_init(engn->object);
+ if (ret)
+ return ret;
+ }
+
+ if (chan->func->engine_init) {
+ ret = chan->func->engine_init(chan, engine);
+ if (ret) {
+ nvif_error(&chan->object,
+ "attach %s failed, %d\n", name, ret);
+ return ret;
+ }
+ }
+
+ nvif_trace(&chan->object, "attached %s\n", name);
+ return 0;
+}
+
+static void
+nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
+{
+ struct nvkm_fifo_chan_object *object =
+ container_of(base, typeof(*object), oproxy);
+ struct nvkm_engine *engine = object->oproxy.base.engine;
+ struct nvkm_fifo_chan *chan = object->chan;
+ struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+
+ if (chan->func->object_dtor)
+ chan->func->object_dtor(chan, object->hash);
+
+ if (!--engn->refcount) {
+ if (chan->func->engine_dtor)
+ chan->func->engine_dtor(chan, engine);
+ nvkm_object_del(&engn->object);
+ if (chan->vm)
+ atomic_dec(&chan->vm->engref[engine->subdev.index]);
+ }
+}
+
+static const struct nvkm_oproxy_func
+nvkm_fifo_chan_child_func = {
+ .dtor[0] = nvkm_fifo_chan_child_del,
+ .init[0] = nvkm_fifo_chan_child_init,
+ .fini[0] = nvkm_fifo_chan_child_fini,
+};
+
+static int
+nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_engine *engine = oclass->engine;
+ struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent);
+ struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+ struct nvkm_fifo_chan_object *object;
+ int ret = 0;
+
+ if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_oproxy_ctor(&nvkm_fifo_chan_child_func, oclass, &object->oproxy);
+ object->chan = chan;
+ *pobject = &object->oproxy.base;
+
+ if (!engn->refcount++) {
+ struct nvkm_oclass cclass = {
+ .client = oclass->client,
+ .engine = oclass->engine,
+ };
+
+ if (chan->vm)
+ atomic_inc(&chan->vm->engref[engine->subdev.index]);
+
+ if (engine->func->fifo.cclass) {
+ ret = engine->func->fifo.cclass(chan, &cclass,
+ &engn->object);
+ } else
+ if (engine->func->cclass) {
+ ret = nvkm_object_new_(engine->func->cclass, &cclass,
+ NULL, 0, &engn->object);
+ }
+ if (ret)
+ return ret;
+
+ if (chan->func->engine_ctor) {
+ ret = chan->func->engine_ctor(chan, oclass->engine,
+ engn->object);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = oclass->base.ctor(&(const struct nvkm_oclass) {
+ .base = oclass->base,
+ .engn = oclass->engn,
+ .handle = oclass->handle,
+ .object = oclass->object,
+ .client = oclass->client,
+ .parent = engn->object ?
+ engn->object :
+ oclass->parent,
+ .engine = engine,
+ }, data, size, &object->oproxy.object);
+ if (ret)
+ return ret;
+
+ if (chan->func->object_ctor) {
+ object->hash =
+ chan->func->object_ctor(chan, object->oproxy.object);
+ if (object->hash < 0)
+ return object->hash;
+ }
+
+ return 0;
+}
+
+static int
+nvkm_fifo_chan_child_get(struct nvkm_object *object, int index,
+ struct nvkm_oclass *oclass)
+{
+ struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+ struct nvkm_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_engine *engine;
+ u64 mask = chan->engines;
+ int ret, i, c;
+
+ for (; c = 0, i = __ffs64(mask), mask; mask &= ~(1ULL << i)) {
+ if (!(engine = nvkm_device_engine(device, i)))
+ continue;
+ oclass->engine = engine;
+ oclass->base.oclass = 0;
+
+ if (engine->func->fifo.sclass) {
+ ret = engine->func->fifo.sclass(oclass, index);
+ if (oclass->base.oclass) {
+ if (!oclass->base.ctor)
+ oclass->base.ctor = nvkm_object_new;
+ oclass->ctor = nvkm_fifo_chan_child_new;
+ return 0;
+ }
+
+ index -= ret;
+ continue;
+ }
+
+ while (engine->func->sclass[c].oclass) {
+ if (c++ == index) {
+ oclass->base = engine->func->sclass[index];
+ if (!oclass->base.ctor)
+ oclass->base.ctor = nvkm_object_new;
+ oclass->ctor = nvkm_fifo_chan_child_new;
+ return 0;
+ }
+ }
+ index -= c;
+ }
+
+ return -EINVAL;
+}
+
+static int
+nvkm_fifo_chan_ntfy(struct nvkm_object *object, u32 type,
+ struct nvkm_event **pevent)
+{
+ struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+ if (chan->func->ntfy)
+ return chan->func->ntfy(chan, type, pevent);
+ return -ENODEV;
+}
+
+static int
+nvkm_fifo_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
+{
+ struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+ *addr = chan->addr;
+ *size = chan->size;
+ return 0;
+}
+
+static int
+nvkm_fifo_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
+{
+ struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+ if (unlikely(!chan->user)) {
+ chan->user = ioremap(chan->addr, chan->size);
+ if (!chan->user)
+ return -ENOMEM;
+ }
+ if (unlikely(addr + 4 > chan->size))
+ return -EINVAL;
+ *data = ioread32_native(chan->user + addr);
+ return 0;
+}
+
+static int
+nvkm_fifo_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+ struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+ if (unlikely(!chan->user)) {
+ chan->user = ioremap(chan->addr, chan->size);
+ if (!chan->user)
+ return -ENOMEM;
+ }
+ if (unlikely(addr + 4 > chan->size))
+ return -EINVAL;
+ iowrite32_native(data, chan->user + addr);
+ return 0;
+}
+
+static int
+nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend)
+{
+ struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+ chan->func->fini(chan);
+ return 0;
+}
+
+static int
+nvkm_fifo_chan_init(struct nvkm_object *object)
+{
+ struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+ chan->func->init(chan);
+ return 0;
+}
+
+static void *
+nvkm_fifo_chan_dtor(struct nvkm_object *object)
+{
+ struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+ struct nvkm_fifo *fifo = chan->fifo;
+ void *data = chan->func->dtor(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fifo->lock, flags);
+ if (!list_empty(&chan->head)) {
+ __clear_bit(chan->chid, fifo->mask);
+ list_del(&chan->head);
+ }
+ spin_unlock_irqrestore(&fifo->lock, flags);
+
+ if (chan->user)
+ iounmap(chan->user);
+
+ nvkm_vm_ref(NULL, &chan->vm, NULL);
+
+ nvkm_gpuobj_del(&chan->push);
+ nvkm_gpuobj_del(&chan->inst);
+ return data;
+}
+
+static const struct nvkm_object_func
+nvkm_fifo_chan_func = {
+ .dtor = nvkm_fifo_chan_dtor,
+ .init = nvkm_fifo_chan_init,
+ .fini = nvkm_fifo_chan_fini,
+ .ntfy = nvkm_fifo_chan_ntfy,
+ .map = nvkm_fifo_chan_map,
+ .rd32 = nvkm_fifo_chan_rd32,
+ .wr32 = nvkm_fifo_chan_wr32,
+ .sclass = nvkm_fifo_chan_child_get,
+};
+
+int
+nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
+ struct nvkm_fifo *fifo, u32 size, u32 align, bool zero,
+ u64 vm, u64 push, u64 engines, int bar, u32 base, u32 user,
+ const struct nvkm_oclass *oclass,
+ struct nvkm_fifo_chan *chan)
+{
+ struct nvkm_client *client = oclass->client;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_mmu *mmu = device->mmu;
+ struct nvkm_dmaobj *dmaobj;
+ unsigned long flags;
+ int ret;
+
+ nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object);
+ chan->func = func;
+ chan->fifo = fifo;
+ chan->engines = engines;
+ INIT_LIST_HEAD(&chan->head);
+
+ /* instance memory */
+ ret = nvkm_gpuobj_new(device, size, align, zero, NULL, &chan->inst);
+ if (ret)
+ return ret;
+
+ /* allocate push buffer ctxdma instance */
+ if (push) {
+ dmaobj = nvkm_dma_search(device->dma, oclass->client, push);
+ if (!dmaobj)
+ return -ENOENT;
+
+ ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16,
+ &chan->push);
+ if (ret)
+ return ret;
+ }
+
+ /* channel address space */
+ if (!vm && mmu) {
+ if (!client->vm || client->vm->mmu == mmu) {
+ ret = nvkm_vm_ref(client->vm, &chan->vm, NULL);
+ if (ret)
+ return ret;
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ return -ENOENT;
+ }
+
+ /* allocate channel id */
+ spin_lock_irqsave(&fifo->lock, flags);
+ chan->chid = find_first_zero_bit(fifo->mask, NVKM_FIFO_CHID_NR);
+ if (chan->chid >= NVKM_FIFO_CHID_NR) {
+ spin_unlock_irqrestore(&fifo->lock, flags);
+ return -ENOSPC;
+ }
+ list_add(&chan->head, &fifo->chan);
+ __set_bit(chan->chid, fifo->mask);
+ spin_unlock_irqrestore(&fifo->lock, flags);
+
+ /* determine address of this channel's user registers */
+ chan->addr = device->func->resource_addr(device, bar) +
+ base + user * chan->chid;
+ chan->size = user;
+
+ nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
new file mode 100644
index 000000000000..55dc415c5c08
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -0,0 +1,33 @@
+#ifndef __NVKM_FIFO_CHAN_H__
+#define __NVKM_FIFO_CHAN_H__
+#define nvkm_fifo_chan(p) container_of((p), struct nvkm_fifo_chan, object)
+#include "priv.h"
+
+struct nvkm_fifo_chan_func {
+ void *(*dtor)(struct nvkm_fifo_chan *);
+ void (*init)(struct nvkm_fifo_chan *);
+ void (*fini)(struct nvkm_fifo_chan *);
+ int (*ntfy)(struct nvkm_fifo_chan *, u32 type, struct nvkm_event **);
+ int (*engine_ctor)(struct nvkm_fifo_chan *, struct nvkm_engine *,
+ struct nvkm_object *);
+ void (*engine_dtor)(struct nvkm_fifo_chan *, struct nvkm_engine *);
+ int (*engine_init)(struct nvkm_fifo_chan *, struct nvkm_engine *);
+ int (*engine_fini)(struct nvkm_fifo_chan *, struct nvkm_engine *,
+ bool suspend);
+ int (*object_ctor)(struct nvkm_fifo_chan *, struct nvkm_object *);
+ void (*object_dtor)(struct nvkm_fifo_chan *, int);
+};
+
+int nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *, struct nvkm_fifo *,
+ u32 size, u32 align, bool zero, u64 vm, u64 push,
+ u64 engines, int bar, u32 base, u32 user,
+ const struct nvkm_oclass *, struct nvkm_fifo_chan *);
+
+struct nvkm_fifo_chan_oclass {
+ int (*ctor)(struct nvkm_fifo *, const struct nvkm_oclass *,
+ void *data, u32 size, struct nvkm_object **);
+ struct nvkm_sclass base;
+};
+
+int g84_fifo_chan_ntfy(struct nvkm_fifo_chan *, u32, struct nvkm_event **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
new file mode 100644
index 000000000000..04305241ceed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+
+int
+g84_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type,
+ struct nvkm_event **pevent)
+{
+ switch (type) {
+ case G82_CHANNEL_DMA_V0_NTFY_UEVENT:
+ *pevent = &chan->fifo->uevent;
+ return 0;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int
+g84_fifo_chan_engine(struct nvkm_engine *engine)
+{
+ switch (engine->subdev.index) {
+ case NVKM_ENGINE_GR : return 0;
+ case NVKM_ENGINE_MPEG :
+ case NVKM_ENGINE_MSPPP : return 1;
+ case NVKM_ENGINE_CE0 : return 2;
+ case NVKM_ENGINE_VP :
+ case NVKM_ENGINE_MSPDEC: return 3;
+ case NVKM_ENGINE_CIPHER:
+ case NVKM_ENGINE_SEC : return 4;
+ case NVKM_ENGINE_BSP :
+ case NVKM_ENGINE_MSVLD : return 5;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
+static int
+g84_fifo_chan_engine_addr(struct nvkm_engine *engine)
+{
+ switch (engine->subdev.index) {
+ case NVKM_ENGINE_DMAOBJ:
+ case NVKM_ENGINE_SW : return -1;
+ case NVKM_ENGINE_GR : return 0x0020;
+ case NVKM_ENGINE_VP :
+ case NVKM_ENGINE_MSPDEC: return 0x0040;
+ case NVKM_ENGINE_MPEG :
+ case NVKM_ENGINE_MSPPP : return 0x0060;
+ case NVKM_ENGINE_BSP :
+ case NVKM_ENGINE_MSVLD : return 0x0080;
+ case NVKM_ENGINE_CIPHER:
+ case NVKM_ENGINE_SEC : return 0x00a0;
+ case NVKM_ENGINE_CE0 : return 0x00c0;
+ default:
+ WARN_ON(1);
+ return -1;
+ }
+}
+
+static int
+g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine, bool suspend)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ struct nv50_fifo *fifo = chan->fifo;
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 engn, save;
+ int offset;
+ bool done;
+
+ offset = g84_fifo_chan_engine_addr(engine);
+ if (offset < 0)
+ return 0;
+
+ engn = g84_fifo_chan_engine(engine);
+ save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn);
+ nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
+ done = nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
+ break;
+ ) >= 0;
+ nvkm_wr32(device, 0x002520, save);
+ if (!done) {
+ nvkm_error(subdev, "channel %d [%s] unload timeout\n",
+ chan->base.chid, chan->base.object.client->name);
+ if (suspend)
+ return -EBUSY;
+ }
+
+ nvkm_kmap(chan->eng);
+ nvkm_wo32(chan->eng, offset + 0x00, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x04, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x08, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x0c, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
+ nvkm_done(chan->eng);
+ return 0;
+}
+
+
+int
+g84_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index];
+ u64 limit, start;
+ int offset;
+
+ offset = g84_fifo_chan_engine_addr(engine);
+ if (offset < 0)
+ return 0;
+ limit = engn->addr + engn->size - 1;
+ start = engn->addr;
+
+ nvkm_kmap(chan->eng);
+ nvkm_wo32(chan->eng, offset + 0x00, 0x00190000);
+ nvkm_wo32(chan->eng, offset + 0x04, lower_32_bits(limit));
+ nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start));
+ nvkm_wo32(chan->eng, offset + 0x0c, upper_32_bits(limit) << 24 |
+ upper_32_bits(start));
+ nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
+ nvkm_done(chan->eng);
+ return 0;
+}
+
+static int
+g84_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine,
+ struct nvkm_object *object)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ int engn = engine->subdev.index;
+
+ if (g84_fifo_chan_engine_addr(engine) < 0)
+ return 0;
+
+ return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+}
+
+int
+g84_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
+ struct nvkm_object *object)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ u32 handle = object->handle;
+ u32 context;
+
+ switch (object->engine->subdev.index) {
+ case NVKM_ENGINE_DMAOBJ:
+ case NVKM_ENGINE_SW : context = 0x00000000; break;
+ case NVKM_ENGINE_GR : context = 0x00100000; break;
+ case NVKM_ENGINE_MPEG :
+ case NVKM_ENGINE_MSPPP : context = 0x00200000; break;
+ case NVKM_ENGINE_ME :
+ case NVKM_ENGINE_CE0 : context = 0x00300000; break;
+ case NVKM_ENGINE_VP :
+ case NVKM_ENGINE_MSPDEC: context = 0x00400000; break;
+ case NVKM_ENGINE_CIPHER:
+ case NVKM_ENGINE_SEC :
+ case NVKM_ENGINE_VIC : context = 0x00500000; break;
+ case NVKM_ENGINE_BSP :
+ case NVKM_ENGINE_MSVLD : context = 0x00600000; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return nvkm_ramht_insert(chan->ramht, object, 0, 4, handle, context);
+}
+
+static void
+g84_fifo_chan_init(struct nvkm_fifo_chan *base)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ struct nv50_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u64 addr = chan->ramfc->addr >> 8;
+ u32 chid = chan->base.chid;
+
+ nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | addr);
+ nv50_fifo_runlist_update(fifo);
+}
+
+static const struct nvkm_fifo_chan_func
+g84_fifo_chan_func = {
+ .dtor = nv50_fifo_chan_dtor,
+ .init = g84_fifo_chan_init,
+ .fini = nv50_fifo_chan_fini,
+ .ntfy = g84_fifo_chan_ntfy,
+ .engine_ctor = g84_fifo_chan_engine_ctor,
+ .engine_dtor = nv50_fifo_chan_engine_dtor,
+ .engine_init = g84_fifo_chan_engine_init,
+ .engine_fini = g84_fifo_chan_engine_fini,
+ .object_ctor = g84_fifo_chan_object_ctor,
+ .object_dtor = nv50_fifo_chan_object_dtor,
+};
+
+int
+g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
+ const struct nvkm_oclass *oclass,
+ struct nv50_fifo_chan *chan)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ int ret;
+
+ ret = nvkm_fifo_chan_ctor(&g84_fifo_chan_func, &fifo->base,
+ 0x10000, 0x1000, false, vm, push,
+ (1ULL << NVKM_ENGINE_BSP) |
+ (1ULL << NVKM_ENGINE_CE0) |
+ (1ULL << NVKM_ENGINE_CIPHER) |
+ (1ULL << NVKM_ENGINE_DMAOBJ) |
+ (1ULL << NVKM_ENGINE_GR) |
+ (1ULL << NVKM_ENGINE_ME) |
+ (1ULL << NVKM_ENGINE_MPEG) |
+ (1ULL << NVKM_ENGINE_MSPDEC) |
+ (1ULL << NVKM_ENGINE_MSPPP) |
+ (1ULL << NVKM_ENGINE_MSVLD) |
+ (1ULL << NVKM_ENGINE_SEC) |
+ (1ULL << NVKM_ENGINE_SW) |
+ (1ULL << NVKM_ENGINE_VIC) |
+ (1ULL << NVKM_ENGINE_VP),
+ 0, 0xc00000, 0x2000, oclass, &chan->base);
+ chan->fifo = fifo;
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(device, 0x0200, 0, true, chan->base.inst,
+ &chan->eng);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->base.inst,
+ &chan->pgd);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(device, 0x1000, 0x400, true, chan->base.inst,
+ &chan->cache);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(device, 0x100, 0x100, true, chan->base.inst,
+ &chan->ramfc);
+ if (ret)
+ return ret;
+
+ ret = nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
+ if (ret)
+ return ret;
+
+ return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
new file mode 100644
index 000000000000..7d697e2dce1a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
@@ -0,0 +1,24 @@
+#ifndef __GF100_FIFO_CHAN_H__
+#define __GF100_FIFO_CHAN_H__
+#define gf100_fifo_chan(p) container_of((p), struct gf100_fifo_chan, base)
+#include "chan.h"
+#include "gf100.h"
+
+struct gf100_fifo_chan {
+ struct nvkm_fifo_chan base;
+ struct gf100_fifo *fifo;
+
+ struct list_head head;
+ bool killed;
+
+ struct nvkm_gpuobj *pgd;
+ struct nvkm_vm *vm;
+
+ struct {
+ struct nvkm_gpuobj *inst;
+ struct nvkm_vma vma;
+ } engn[NVKM_SUBDEV_NR];
+};
+
+extern const struct nvkm_fifo_chan_oclass gf100_fifo_gpfifo_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
new file mode 100644
index 000000000000..97bdddb7644a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -0,0 +1,29 @@
+#ifndef __GK104_FIFO_CHAN_H__
+#define __GK104_FIFO_CHAN_H__
+#define gk104_fifo_chan(p) container_of((p), struct gk104_fifo_chan, base)
+#include "chan.h"
+#include "gk104.h"
+
+struct gk104_fifo_chan {
+ struct nvkm_fifo_chan base;
+ struct gk104_fifo *fifo;
+ int engine;
+
+ struct list_head head;
+ bool killed;
+
+ struct nvkm_gpuobj *pgd;
+ struct nvkm_vm *vm;
+
+ struct {
+ struct nvkm_gpuobj *inst;
+ struct nvkm_vma vma;
+ } engn[NVKM_SUBDEV_NR];
+};
+
+int gk104_fifo_gpfifo_new(struct nvkm_fifo *, const struct nvkm_oclass *,
+ void *data, u32 size, struct nvkm_object **);
+
+extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass;
+extern const struct nvkm_fifo_chan_oclass gm204_fifo_gpfifo_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
new file mode 100644
index 000000000000..3361a1fd0343
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
@@ -0,0 +1,24 @@
+#ifndef __NV04_FIFO_CHAN_H__
+#define __NV04_FIFO_CHAN_H__
+#define nv04_fifo_chan(p) container_of((p), struct nv04_fifo_chan, base)
+#include "chan.h"
+#include "nv04.h"
+
+struct nv04_fifo_chan {
+ struct nvkm_fifo_chan base;
+ struct nv04_fifo *fifo;
+ u32 ramfc;
+ struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR];
+};
+
+extern const struct nvkm_fifo_chan_func nv04_fifo_dma_func;
+void *nv04_fifo_dma_dtor(struct nvkm_fifo_chan *);
+void nv04_fifo_dma_init(struct nvkm_fifo_chan *);
+void nv04_fifo_dma_fini(struct nvkm_fifo_chan *);
+void nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *, int);
+
+extern const struct nvkm_fifo_chan_oclass nv04_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass nv10_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass nv17_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass nv40_fifo_dma_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
new file mode 100644
index 000000000000..25b60aff40e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+
+static int
+nv50_fifo_chan_engine_addr(struct nvkm_engine *engine)
+{
+ switch (engine->subdev.index) {
+ case NVKM_ENGINE_DMAOBJ:
+ case NVKM_ENGINE_SW : return -1;
+ case NVKM_ENGINE_GR : return 0x0000;
+ case NVKM_ENGINE_MPEG : return 0x0060;
+ default:
+ WARN_ON(1);
+ return -1;
+ }
+}
+
+static int
+nv50_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine, bool suspend)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ struct nv50_fifo *fifo = chan->fifo;
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int offset, ret = 0;
+ u32 me;
+
+ offset = nv50_fifo_chan_engine_addr(engine);
+ if (offset < 0)
+ return 0;
+
+ /* HW bug workaround:
+ *
+ * PFIFO will hang forever if the connected engines don't report
+ * that they've processed the context switch request.
+ *
+ * In order for the kickoff to work, we need to ensure all the
+ * connected engines are in a state where they can answer.
+ *
+ * Newer chipsets don't seem to suffer from this issue, and well,
+ * there's also a "ignore these engines" bitmask reg we can use
+ * if we hit the issue there..
+ */
+ me = nvkm_mask(device, 0x00b860, 0x00000001, 0x00000001);
+
+ /* do the kickoff... */
+ nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "channel %d [%s] unload timeout\n",
+ chan->base.chid, chan->base.object.client->name);
+ if (suspend)
+ ret = -EBUSY;
+ }
+ nvkm_wr32(device, 0x00b860, me);
+
+ if (ret == 0) {
+ nvkm_kmap(chan->eng);
+ nvkm_wo32(chan->eng, offset + 0x00, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x04, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x08, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x0c, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
+ nvkm_done(chan->eng);
+ }
+
+ return ret;
+}
+
+static int
+nv50_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index];
+ u64 limit, start;
+ int offset;
+
+ offset = nv50_fifo_chan_engine_addr(engine);
+ if (offset < 0)
+ return 0;
+ limit = engn->addr + engn->size - 1;
+ start = engn->addr;
+
+ nvkm_kmap(chan->eng);
+ nvkm_wo32(chan->eng, offset + 0x00, 0x00190000);
+ nvkm_wo32(chan->eng, offset + 0x04, lower_32_bits(limit));
+ nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start));
+ nvkm_wo32(chan->eng, offset + 0x0c, upper_32_bits(limit) << 24 |
+ upper_32_bits(start));
+ nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
+ nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
+ nvkm_done(chan->eng);
+ return 0;
+}
+
+void
+nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ nvkm_gpuobj_del(&chan->engn[engine->subdev.index]);
+}
+
+static int
+nv50_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine,
+ struct nvkm_object *object)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ int engn = engine->subdev.index;
+
+ if (nv50_fifo_chan_engine_addr(engine) < 0)
+ return 0;
+
+ return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+}
+
+void
+nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *base, int cookie)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ nvkm_ramht_remove(chan->ramht, cookie);
+}
+
+static int
+nv50_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
+ struct nvkm_object *object)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ u32 handle = object->handle;
+ u32 context;
+
+ switch (object->engine->subdev.index) {
+ case NVKM_ENGINE_DMAOBJ:
+ case NVKM_ENGINE_SW : context = 0x00000000; break;
+ case NVKM_ENGINE_GR : context = 0x00100000; break;
+ case NVKM_ENGINE_MPEG : context = 0x00200000; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return nvkm_ramht_insert(chan->ramht, object, 0, 4, handle, context);
+}
+
+void
+nv50_fifo_chan_fini(struct nvkm_fifo_chan *base)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ struct nv50_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u32 chid = chan->base.chid;
+
+ /* remove channel from runlist, fifo will unload context */
+ nvkm_mask(device, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
+ nv50_fifo_runlist_update(fifo);
+ nvkm_wr32(device, 0x002600 + (chid * 4), 0x00000000);
+}
+
+static void
+nv50_fifo_chan_init(struct nvkm_fifo_chan *base)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ struct nv50_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u64 addr = chan->ramfc->addr >> 12;
+ u32 chid = chan->base.chid;
+
+ nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | addr);
+ nv50_fifo_runlist_update(fifo);
+}
+
+void *
+nv50_fifo_chan_dtor(struct nvkm_fifo_chan *base)
+{
+ struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+ nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
+ nvkm_ramht_del(&chan->ramht);
+ nvkm_gpuobj_del(&chan->pgd);
+ nvkm_gpuobj_del(&chan->eng);
+ nvkm_gpuobj_del(&chan->cache);
+ nvkm_gpuobj_del(&chan->ramfc);
+ return chan;
+}
+
+static const struct nvkm_fifo_chan_func
+nv50_fifo_chan_func = {
+ .dtor = nv50_fifo_chan_dtor,
+ .init = nv50_fifo_chan_init,
+ .fini = nv50_fifo_chan_fini,
+ .engine_ctor = nv50_fifo_chan_engine_ctor,
+ .engine_dtor = nv50_fifo_chan_engine_dtor,
+ .engine_init = nv50_fifo_chan_engine_init,
+ .engine_fini = nv50_fifo_chan_engine_fini,
+ .object_ctor = nv50_fifo_chan_object_ctor,
+ .object_dtor = nv50_fifo_chan_object_dtor,
+};
+
+int
+nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
+ const struct nvkm_oclass *oclass,
+ struct nv50_fifo_chan *chan)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ int ret;
+
+ ret = nvkm_fifo_chan_ctor(&nv50_fifo_chan_func, &fifo->base,
+ 0x10000, 0x1000, false, vm, push,
+ (1ULL << NVKM_ENGINE_DMAOBJ) |
+ (1ULL << NVKM_ENGINE_SW) |
+ (1ULL << NVKM_ENGINE_GR) |
+ (1ULL << NVKM_ENGINE_MPEG),
+ 0, 0xc00000, 0x2000, oclass, &chan->base);
+ chan->fifo = fifo;
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(device, 0x0200, 0x1000, true, chan->base.inst,
+ &chan->ramfc);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(device, 0x1200, 0, true, chan->base.inst,
+ &chan->eng);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->base.inst,
+ &chan->pgd);
+ if (ret)
+ return ret;
+
+ ret = nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
+ if (ret)
+ return ret;
+
+ return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
new file mode 100644
index 000000000000..4b9da469b704
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
@@ -0,0 +1,35 @@
+#ifndef __NV50_FIFO_CHAN_H__
+#define __NV50_FIFO_CHAN_H__
+#define nv50_fifo_chan(p) container_of((p), struct nv50_fifo_chan, base)
+#include "chan.h"
+#include "nv50.h"
+
+struct nv50_fifo_chan {
+ struct nv50_fifo *fifo;
+ struct nvkm_fifo_chan base;
+
+ struct nvkm_gpuobj *ramfc;
+ struct nvkm_gpuobj *cache;
+ struct nvkm_gpuobj *eng;
+ struct nvkm_gpuobj *pgd;
+ struct nvkm_ramht *ramht;
+ struct nvkm_vm *vm;
+
+ struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR];
+};
+
+int nv50_fifo_chan_ctor(struct nv50_fifo *, u64 vm, u64 push,
+ const struct nvkm_oclass *, struct nv50_fifo_chan *);
+void *nv50_fifo_chan_dtor(struct nvkm_fifo_chan *);
+void nv50_fifo_chan_fini(struct nvkm_fifo_chan *);
+void nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *, struct nvkm_engine *);
+void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
+
+int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vm, u64 push,
+ const struct nvkm_oclass *, struct nv50_fifo_chan *);
+
+extern const struct nvkm_fifo_chan_oclass nv50_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass nv50_fifo_gpfifo_oclass;
+extern const struct nvkm_fifo_chan_oclass g84_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass g84_fifo_gpfifo_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
new file mode 100644
index 000000000000..a5ca52c7b74f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_object *parent = oclass->parent;
+ union {
+ struct nv50_channel_dma_v0 v0;
+ } *args = data;
+ struct nv50_fifo *fifo = nv50_fifo(base);
+ struct nv50_fifo_chan *chan;
+ int ret;
+
+ nvif_ioctl(parent, "create channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(parent, "create channel dma vers %d vm %llx "
+ "pushbuf %llx offset %016llx\n",
+ args->v0.version, args->v0.vm, args->v0.pushbuf,
+ args->v0.offset);
+ if (!args->v0.pushbuf)
+ return -EINVAL;
+ } else
+ return ret;
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+
+ ret = g84_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+ oclass, chan);
+ if (ret)
+ return ret;
+
+ args->v0.chid = chan->base.chid;
+
+ nvkm_kmap(chan->ramfc);
+ nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
+ nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
+ nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
+ nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
+ nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
+ nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
+ nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
+ nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
+ nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
+ nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
+ nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
+ nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->gpuobj->node->offset >> 4));
+ nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
+ nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
+ nvkm_done(chan->ramfc);
+ return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+g84_fifo_dma_oclass = {
+ .base.oclass = G82_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = g84_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
new file mode 100644
index 000000000000..bfcc6408a772
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+void
+nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
+{
+ struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+ struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+ nvkm_ramht_remove(imem->ramht, cookie);
+}
+
+static int
+nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
+ struct nvkm_object *object)
+{
+ struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+ struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+ u32 context = 0x80000000 | chan->base.chid << 24;
+ u32 handle = object->handle;
+ int hash;
+
+ switch (object->engine->subdev.index) {
+ case NVKM_ENGINE_DMAOBJ:
+ case NVKM_ENGINE_SW : context |= 0x00000000; break;
+ case NVKM_ENGINE_GR : context |= 0x00010000; break;
+ case NVKM_ENGINE_MPEG : context |= 0x00020000; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ mutex_lock(&chan->fifo->base.engine.subdev.mutex);
+ hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
+ handle, context);
+ mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
+ return hash;
+}
+
+void
+nv04_fifo_dma_fini(struct nvkm_fifo_chan *base)
+{
+ struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+ struct nv04_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_memory *fctx = device->imem->ramfc;
+ const struct nv04_fifo_ramfc *c;
+ unsigned long flags;
+ u32 mask = fifo->base.nr - 1;
+ u32 data = chan->ramfc;
+ u32 chid;
+
+ /* prevent fifo context switches */
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
+
+ /* if this channel is active, replace it with a null context */
+ chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & mask;
+ if (chid == chan->base.chid) {
+ nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
+ nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
+
+ c = fifo->ramfc;
+ do {
+ u32 rm = ((1ULL << c->bits) - 1) << c->regs;
+ u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
+ u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs;
+ u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
+ nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
+ } while ((++c)->bits);
+
+ c = fifo->ramfc;
+ do {
+ nvkm_wr32(device, c->regp, 0x00000000);
+ } while ((++c)->bits);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, mask);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+ }
+
+ /* restore normal operation, after disabling dma mode */
+ nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+void
+nv04_fifo_dma_init(struct nvkm_fifo_chan *base)
+{
+ struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+ struct nv04_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u32 mask = 1 << chan->base.chid;
+ unsigned long flags;
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ nvkm_mask(device, NV04_PFIFO_MODE, mask, mask);
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+void *
+nv04_fifo_dma_dtor(struct nvkm_fifo_chan *base)
+{
+ struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+ struct nv04_fifo *fifo = chan->fifo;
+ struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
+ const struct nv04_fifo_ramfc *c = fifo->ramfc;
+
+ nvkm_kmap(imem->ramfc);
+ do {
+ nvkm_wo32(imem->ramfc, chan->ramfc + c->ctxp, 0x00000000);
+ } while ((++c)->bits);
+ nvkm_done(imem->ramfc);
+ return chan;
+}
+
+const struct nvkm_fifo_chan_func
+nv04_fifo_dma_func = {
+ .dtor = nv04_fifo_dma_dtor,
+ .init = nv04_fifo_dma_init,
+ .fini = nv04_fifo_dma_fini,
+ .object_ctor = nv04_fifo_dma_object_ctor,
+ .object_dtor = nv04_fifo_dma_object_dtor,
+};
+
+static int
+nv04_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_object *parent = oclass->parent;
+ union {
+ struct nv03_channel_dma_v0 v0;
+ } *args = data;
+ struct nv04_fifo *fifo = nv04_fifo(base);
+ struct nv04_fifo_chan *chan = NULL;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ int ret;
+
+ nvif_ioctl(parent, "create channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
+ "offset %08x\n", args->v0.version,
+ args->v0.pushbuf, args->v0.offset);
+ if (!args->v0.pushbuf)
+ return -EINVAL;
+ } else
+ return ret;
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+
+ ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
+ 0x1000, 0x1000, false, 0, args->v0.pushbuf,
+ (1ULL << NVKM_ENGINE_DMAOBJ) |
+ (1ULL << NVKM_ENGINE_GR) |
+ (1ULL << NVKM_ENGINE_SW),
+ 0, 0x800000, 0x10000, oclass, &chan->base);
+ chan->fifo = fifo;
+ if (ret)
+ return ret;
+
+ args->v0.chid = chan->base.chid;
+ chan->ramfc = chan->base.chid * 32;
+
+ nvkm_kmap(imem->ramfc);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x08, chan->base.push->addr >> 4);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x10,
+ NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nvkm_done(imem->ramfc);
+ return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv04_fifo_dma_oclass = {
+ .base.oclass = NV03_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv04_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
new file mode 100644
index 000000000000..34f68e5bd040
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+nv10_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_object *parent = oclass->parent;
+ union {
+ struct nv03_channel_dma_v0 v0;
+ } *args = data;
+ struct nv04_fifo *fifo = nv04_fifo(base);
+ struct nv04_fifo_chan *chan = NULL;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ int ret;
+
+ nvif_ioctl(parent, "create channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
+ "offset %08x\n", args->v0.version,
+ args->v0.pushbuf, args->v0.offset);
+ if (!args->v0.pushbuf)
+ return -EINVAL;
+ } else
+ return ret;
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+
+ ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
+ 0x1000, 0x1000, false, 0, args->v0.pushbuf,
+ (1ULL << NVKM_ENGINE_DMAOBJ) |
+ (1ULL << NVKM_ENGINE_GR) |
+ (1ULL << NVKM_ENGINE_SW),
+ 0, 0x800000, 0x10000, oclass, &chan->base);
+ chan->fifo = fifo;
+ if (ret)
+ return ret;
+
+ args->v0.chid = chan->base.chid;
+ chan->ramfc = chan->base.chid * 32;
+
+ nvkm_kmap(imem->ramfc);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
+ NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nvkm_done(imem->ramfc);
+ return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv10_fifo_dma_oclass = {
+ .base.oclass = NV10_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv10_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
new file mode 100644
index 000000000000..ed7cc9f2b540
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+nv17_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_object *parent = oclass->parent;
+ union {
+ struct nv03_channel_dma_v0 v0;
+ } *args = data;
+ struct nv04_fifo *fifo = nv04_fifo(base);
+ struct nv04_fifo_chan *chan = NULL;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ int ret;
+
+ nvif_ioctl(parent, "create channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
+ "offset %08x\n", args->v0.version,
+ args->v0.pushbuf, args->v0.offset);
+ if (!args->v0.pushbuf)
+ return -EINVAL;
+ } else
+ return ret;
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+
+ ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
+ 0x1000, 0x1000, false, 0, args->v0.pushbuf,
+ (1ULL << NVKM_ENGINE_DMAOBJ) |
+ (1ULL << NVKM_ENGINE_GR) |
+ (1ULL << NVKM_ENGINE_MPEG) | /* NV31- */
+ (1ULL << NVKM_ENGINE_SW),
+ 0, 0x800000, 0x10000, oclass, &chan->base);
+ chan->fifo = fifo;
+ if (ret)
+ return ret;
+
+ args->v0.chid = chan->base.chid;
+ chan->ramfc = chan->base.chid * 64;
+
+ nvkm_kmap(imem->ramfc);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
+ NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nvkm_done(imem->ramfc);
+ return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv17_fifo_dma_oclass = {
+ .base.oclass = NV17_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv17_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
new file mode 100644
index 000000000000..043b6c325949
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static bool
+nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx)
+{
+ switch (engine->subdev.index) {
+ case NVKM_ENGINE_DMAOBJ:
+ case NVKM_ENGINE_SW:
+ return false;
+ case NVKM_ENGINE_GR:
+ *reg = 0x0032e0;
+ *ctx = 0x38;
+ return true;
+ case NVKM_ENGINE_MPEG:
+ *reg = 0x00330c;
+ *ctx = 0x54;
+ return true;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+}
+
+static int
+nv40_fifo_dma_engine_fini(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine, bool suspend)
+{
+ struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+ struct nv04_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ unsigned long flags;
+ u32 reg, ctx;
+ int chid;
+
+ if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
+ return 0;
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
+
+ chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1);
+ if (chid == chan->base.chid)
+ nvkm_wr32(device, reg, 0x00000000);
+ nvkm_kmap(imem->ramfc);
+ nvkm_wo32(imem->ramfc, chan->ramfc + ctx, 0x00000000);
+ nvkm_done(imem->ramfc);
+
+ nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+ return 0;
+}
+
+static int
+nv40_fifo_dma_engine_init(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+ struct nv04_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ unsigned long flags;
+ u32 inst, reg, ctx;
+ int chid;
+
+ if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
+ return 0;
+ inst = chan->engn[engine->subdev.index]->addr >> 4;
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
+
+ chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1);
+ if (chid == chan->base.chid)
+ nvkm_wr32(device, reg, inst);
+ nvkm_kmap(imem->ramfc);
+ nvkm_wo32(imem->ramfc, chan->ramfc + ctx, inst);
+ nvkm_done(imem->ramfc);
+
+ nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+ return 0;
+}
+
+static void
+nv40_fifo_dma_engine_dtor(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+ nvkm_gpuobj_del(&chan->engn[engine->subdev.index]);
+}
+
+static int
+nv40_fifo_dma_engine_ctor(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine,
+ struct nvkm_object *object)
+{
+ struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+ const int engn = engine->subdev.index;
+ u32 reg, ctx;
+
+ if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
+ return 0;
+
+ return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+}
+
+static int
+nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
+ struct nvkm_object *object)
+{
+ struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+ struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+ u32 context = chan->base.chid << 23;
+ u32 handle = object->handle;
+ int hash;
+
+ switch (object->engine->subdev.index) {
+ case NVKM_ENGINE_DMAOBJ:
+ case NVKM_ENGINE_SW : context |= 0x00000000; break;
+ case NVKM_ENGINE_GR : context |= 0x00100000; break;
+ case NVKM_ENGINE_MPEG : context |= 0x00200000; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ mutex_lock(&chan->fifo->base.engine.subdev.mutex);
+ hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
+ handle, context);
+ mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
+ return hash;
+}
+
+static const struct nvkm_fifo_chan_func
+nv40_fifo_dma_func = {
+ .dtor = nv04_fifo_dma_dtor,
+ .init = nv04_fifo_dma_init,
+ .fini = nv04_fifo_dma_fini,
+ .engine_ctor = nv40_fifo_dma_engine_ctor,
+ .engine_dtor = nv40_fifo_dma_engine_dtor,
+ .engine_init = nv40_fifo_dma_engine_init,
+ .engine_fini = nv40_fifo_dma_engine_fini,
+ .object_ctor = nv40_fifo_dma_object_ctor,
+ .object_dtor = nv04_fifo_dma_object_dtor,
+};
+
+static int
+nv40_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_object *parent = oclass->parent;
+ union {
+ struct nv03_channel_dma_v0 v0;
+ } *args = data;
+ struct nv04_fifo *fifo = nv04_fifo(base);
+ struct nv04_fifo_chan *chan = NULL;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ int ret;
+
+ nvif_ioctl(parent, "create channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
+ "offset %08x\n", args->v0.version,
+ args->v0.pushbuf, args->v0.offset);
+ if (!args->v0.pushbuf)
+ return -EINVAL;
+ } else
+ return ret;
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+
+ ret = nvkm_fifo_chan_ctor(&nv40_fifo_dma_func, &fifo->base,
+ 0x1000, 0x1000, false, 0, args->v0.pushbuf,
+ (1ULL << NVKM_ENGINE_DMAOBJ) |
+ (1ULL << NVKM_ENGINE_GR) |
+ (1ULL << NVKM_ENGINE_MPEG) |
+ (1ULL << NVKM_ENGINE_SW),
+ 0, 0xc00000, 0x1000, oclass, &chan->base);
+ chan->fifo = fifo;
+ if (ret)
+ return ret;
+
+ args->v0.chid = chan->base.chid;
+ chan->ramfc = chan->base.chid * 128;
+
+ nvkm_kmap(imem->ramfc);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x18, 0x30000000 |
+ NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nvkm_wo32(imem->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
+ nvkm_done(imem->ramfc);
+ return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv40_fifo_dma_oclass = {
+ .base.oclass = NV40_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv40_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
new file mode 100644
index 000000000000..6b3b15f12c39
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_object *parent = oclass->parent;
+ union {
+ struct nv50_channel_dma_v0 v0;
+ } *args = data;
+ struct nv50_fifo *fifo = nv50_fifo(base);
+ struct nv50_fifo_chan *chan;
+ int ret;
+
+ nvif_ioctl(parent, "create channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(parent, "create channel dma vers %d vm %llx "
+ "pushbuf %llx offset %016llx\n",
+ args->v0.version, args->v0.vm, args->v0.pushbuf,
+ args->v0.offset);
+ if (!args->v0.pushbuf)
+ return -EINVAL;
+ } else
+ return ret;
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+
+ ret = nv50_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+ oclass, chan);
+ if (ret)
+ return ret;
+
+ args->v0.chid = chan->base.chid;
+
+ nvkm_kmap(chan->ramfc);
+ nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
+ nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
+ nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
+ nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
+ nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
+ nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
+ nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
+ nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
+ nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
+ nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
+ nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
+ nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->gpuobj->node->offset >> 4));
+ nvkm_done(chan->ramfc);
+ return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv50_fifo_dma_oclass = {
+ .base.oclass = NV50_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
index a04920b3cf84..ff7b529764fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
@@ -22,466 +22,41 @@
* Authors: Ben Skeggs
*/
#include "nv50.h"
-#include "nv04.h"
-
-#include <core/client.h>
-#include <core/engctx.h>
-#include <core/ramht.h>
-#include <subdev/bar.h>
-#include <subdev/mmu.h>
-#include <subdev/timer.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-static int
-g84_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
-{
- struct nvkm_bar *bar = nvkm_bar(parent);
- struct nv50_fifo_base *base = (void *)parent->parent;
- struct nvkm_gpuobj *ectx = (void *)object;
- u64 limit = ectx->addr + ectx->size - 1;
- u64 start = ectx->addr;
- u32 addr;
-
- switch (nv_engidx(object->engine)) {
- case NVDEV_ENGINE_SW : return 0;
- case NVDEV_ENGINE_GR : addr = 0x0020; break;
- case NVDEV_ENGINE_VP :
- case NVDEV_ENGINE_MSPDEC: addr = 0x0040; break;
- case NVDEV_ENGINE_MSPPP :
- case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
- case NVDEV_ENGINE_BSP :
- case NVDEV_ENGINE_MSVLD : addr = 0x0080; break;
- case NVDEV_ENGINE_CIPHER:
- case NVDEV_ENGINE_SEC : addr = 0x00a0; break;
- case NVDEV_ENGINE_CE0 : addr = 0x00c0; break;
- default:
- return -EINVAL;
- }
-
- nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
- nv_wo32(base->eng, addr + 0x00, 0x00190000);
- nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
- nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
- nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
- upper_32_bits(start));
- nv_wo32(base->eng, addr + 0x10, 0x00000000);
- nv_wo32(base->eng, addr + 0x14, 0x00000000);
- bar->flush(bar);
- return 0;
-}
-
-static int
-g84_fifo_context_detach(struct nvkm_object *parent, bool suspend,
- struct nvkm_object *object)
-{
- struct nvkm_bar *bar = nvkm_bar(parent);
- struct nv50_fifo_priv *priv = (void *)parent->engine;
- struct nv50_fifo_base *base = (void *)parent->parent;
- struct nv50_fifo_chan *chan = (void *)parent;
- u32 addr, save, engn;
- bool done;
-
- switch (nv_engidx(object->engine)) {
- case NVDEV_ENGINE_SW : return 0;
- case NVDEV_ENGINE_GR : engn = 0; addr = 0x0020; break;
- case NVDEV_ENGINE_VP :
- case NVDEV_ENGINE_MSPDEC: engn = 3; addr = 0x0040; break;
- case NVDEV_ENGINE_MSPPP :
- case NVDEV_ENGINE_MPEG : engn = 1; addr = 0x0060; break;
- case NVDEV_ENGINE_BSP :
- case NVDEV_ENGINE_MSVLD : engn = 5; addr = 0x0080; break;
- case NVDEV_ENGINE_CIPHER:
- case NVDEV_ENGINE_SEC : engn = 4; addr = 0x00a0; break;
- case NVDEV_ENGINE_CE0 : engn = 2; addr = 0x00c0; break;
- default:
- return -EINVAL;
- }
-
- save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn);
- nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
- done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
- nv_wr32(priv, 0x002520, save);
- if (!done) {
- nv_error(priv, "channel %d [%s] unload timeout\n",
- chan->base.chid, nvkm_client_name(chan));
- if (suspend)
- return -EBUSY;
- }
-
- nv_wo32(base->eng, addr + 0x00, 0x00000000);
- nv_wo32(base->eng, addr + 0x04, 0x00000000);
- nv_wo32(base->eng, addr + 0x08, 0x00000000);
- nv_wo32(base->eng, addr + 0x0c, 0x00000000);
- nv_wo32(base->eng, addr + 0x10, 0x00000000);
- nv_wo32(base->eng, addr + 0x14, 0x00000000);
- bar->flush(bar);
- return 0;
-}
-
-static int
-g84_fifo_object_attach(struct nvkm_object *parent,
- struct nvkm_object *object, u32 handle)
-{
- struct nv50_fifo_chan *chan = (void *)parent;
- u32 context;
-
- if (nv_iclass(object, NV_GPUOBJ_CLASS))
- context = nv_gpuobj(object)->node->offset >> 4;
- else
- context = 0x00000004; /* just non-zero */
-
- switch (nv_engidx(object->engine)) {
- case NVDEV_ENGINE_DMAOBJ:
- case NVDEV_ENGINE_SW : context |= 0x00000000; break;
- case NVDEV_ENGINE_GR : context |= 0x00100000; break;
- case NVDEV_ENGINE_MPEG :
- case NVDEV_ENGINE_MSPPP : context |= 0x00200000; break;
- case NVDEV_ENGINE_ME :
- case NVDEV_ENGINE_CE0 : context |= 0x00300000; break;
- case NVDEV_ENGINE_VP :
- case NVDEV_ENGINE_MSPDEC: context |= 0x00400000; break;
- case NVDEV_ENGINE_CIPHER:
- case NVDEV_ENGINE_SEC :
- case NVDEV_ENGINE_VIC : context |= 0x00500000; break;
- case NVDEV_ENGINE_BSP :
- case NVDEV_ENGINE_MSVLD : context |= 0x00600000; break;
- default:
- return -EINVAL;
- }
-
- return nvkm_ramht_insert(chan->ramht, 0, handle, context);
-}
-
-static int
-g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- union {
- struct nv03_channel_dma_v0 v0;
- } *args = data;
- struct nvkm_bar *bar = nvkm_bar(parent);
- struct nv50_fifo_base *base = (void *)parent;
- struct nv50_fifo_chan *chan;
- int ret;
-
- nv_ioctl(parent, "create channel dma size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
- "offset %016llx\n", args->v0.version,
- args->v0.pushbuf, args->v0.offset);
- } else
- return ret;
-
- ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
- 0x2000, args->v0.pushbuf,
- (1ULL << NVDEV_ENGINE_DMAOBJ) |
- (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_GR) |
- (1ULL << NVDEV_ENGINE_MPEG) |
- (1ULL << NVDEV_ENGINE_ME) |
- (1ULL << NVDEV_ENGINE_VP) |
- (1ULL << NVDEV_ENGINE_CIPHER) |
- (1ULL << NVDEV_ENGINE_SEC) |
- (1ULL << NVDEV_ENGINE_BSP) |
- (1ULL << NVDEV_ENGINE_MSVLD) |
- (1ULL << NVDEV_ENGINE_MSPDEC) |
- (1ULL << NVDEV_ENGINE_MSPPP) |
- (1ULL << NVDEV_ENGINE_CE0) |
- (1ULL << NVDEV_ENGINE_VIC), &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
-
- args->v0.chid = chan->base.chid;
-
- ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
- &chan->ramht);
- if (ret)
- return ret;
-
- nv_parent(chan)->context_attach = g84_fifo_context_attach;
- nv_parent(chan)->context_detach = g84_fifo_context_detach;
- nv_parent(chan)->object_attach = g84_fifo_object_attach;
- nv_parent(chan)->object_detach = nv50_fifo_object_detach;
-
- nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
- nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
- nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
- nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
- nv_wo32(base->ramfc, 0x3c, 0x003f6078);
- nv_wo32(base->ramfc, 0x44, 0x01003fff);
- nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
- nv_wo32(base->ramfc, 0x4c, 0xffffffff);
- nv_wo32(base->ramfc, 0x60, 0x7fffffff);
- nv_wo32(base->ramfc, 0x78, 0x00000000);
- nv_wo32(base->ramfc, 0x7c, 0x30000001);
- nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
- (4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->gpuobj.node->offset >> 4));
- nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
- nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
- bar->flush(bar);
- return 0;
-}
-
-static int
-g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- union {
- struct nv50_channel_gpfifo_v0 v0;
- } *args = data;
- struct nvkm_bar *bar = nvkm_bar(parent);
- struct nv50_fifo_base *base = (void *)parent;
- struct nv50_fifo_chan *chan;
- u64 ioffset, ilength;
- int ret;
-
- nv_ioctl(parent, "create channel gpfifo size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
- "ioffset %016llx ilength %08x\n",
- args->v0.version, args->v0.pushbuf, args->v0.ioffset,
- args->v0.ilength);
- } else
- return ret;
-
- ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
- 0x2000, args->v0.pushbuf,
- (1ULL << NVDEV_ENGINE_DMAOBJ) |
- (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_GR) |
- (1ULL << NVDEV_ENGINE_MPEG) |
- (1ULL << NVDEV_ENGINE_ME) |
- (1ULL << NVDEV_ENGINE_VP) |
- (1ULL << NVDEV_ENGINE_CIPHER) |
- (1ULL << NVDEV_ENGINE_SEC) |
- (1ULL << NVDEV_ENGINE_BSP) |
- (1ULL << NVDEV_ENGINE_MSVLD) |
- (1ULL << NVDEV_ENGINE_MSPDEC) |
- (1ULL << NVDEV_ENGINE_MSPPP) |
- (1ULL << NVDEV_ENGINE_CE0) |
- (1ULL << NVDEV_ENGINE_VIC), &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
-
- args->v0.chid = chan->base.chid;
-
- ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
- &chan->ramht);
- if (ret)
- return ret;
-
- nv_parent(chan)->context_attach = g84_fifo_context_attach;
- nv_parent(chan)->context_detach = g84_fifo_context_detach;
- nv_parent(chan)->object_attach = g84_fifo_object_attach;
- nv_parent(chan)->object_detach = nv50_fifo_object_detach;
-
- ioffset = args->v0.ioffset;
- ilength = order_base_2(args->v0.ilength / 8);
-
- nv_wo32(base->ramfc, 0x3c, 0x403f6078);
- nv_wo32(base->ramfc, 0x44, 0x01003fff);
- nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
- nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
- nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
- nv_wo32(base->ramfc, 0x60, 0x7fffffff);
- nv_wo32(base->ramfc, 0x78, 0x00000000);
- nv_wo32(base->ramfc, 0x7c, 0x30000001);
- nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
- (4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->gpuobj.node->offset >> 4));
- nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
- nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
- bar->flush(bar);
- return 0;
-}
-
-static int
-g84_fifo_chan_init(struct nvkm_object *object)
-{
- struct nv50_fifo_priv *priv = (void *)object->engine;
- struct nv50_fifo_base *base = (void *)object->parent;
- struct nv50_fifo_chan *chan = (void *)object;
- struct nvkm_gpuobj *ramfc = base->ramfc;
- u32 chid = chan->base.chid;
- int ret;
-
- ret = nvkm_fifo_channel_init(&chan->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8);
- nv50_fifo_playlist_update(priv);
- return 0;
-}
-
-static struct nvkm_ofuncs
-g84_fifo_ofuncs_dma = {
- .ctor = g84_fifo_chan_ctor_dma,
- .dtor = nv50_fifo_chan_dtor,
- .init = g84_fifo_chan_init,
- .fini = nv50_fifo_chan_fini,
- .map = _nvkm_fifo_channel_map,
- .rd32 = _nvkm_fifo_channel_rd32,
- .wr32 = _nvkm_fifo_channel_wr32,
- .ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_ofuncs
-g84_fifo_ofuncs_ind = {
- .ctor = g84_fifo_chan_ctor_ind,
- .dtor = nv50_fifo_chan_dtor,
- .init = g84_fifo_chan_init,
- .fini = nv50_fifo_chan_fini,
- .map = _nvkm_fifo_channel_map,
- .rd32 = _nvkm_fifo_channel_rd32,
- .wr32 = _nvkm_fifo_channel_wr32,
- .ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-g84_fifo_sclass[] = {
- { G82_CHANNEL_DMA, &g84_fifo_ofuncs_dma },
- { G82_CHANNEL_GPFIFO, &g84_fifo_ofuncs_ind },
- {}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-static int
-g84_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv50_fifo_base *base;
- int ret;
-
- ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
- 0x1000, NVOBJ_FLAG_HEAP, &base);
- *pobject = nv_object(base);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0200, 0,
- NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0,
- 0, &base->pgd);
- if (ret)
- return ret;
-
- ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x1000,
- 0x400, NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0100,
- 0x100, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static struct nvkm_oclass
-g84_fifo_cclass = {
- .handle = NV_ENGCTX(FIFO, 0x84),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = g84_fifo_context_ctor,
- .dtor = nv50_fifo_context_dtor,
- .init = _nvkm_fifo_context_init,
- .fini = _nvkm_fifo_context_fini,
- .rd32 = _nvkm_fifo_context_rd32,
- .wr32 = _nvkm_fifo_context_wr32,
- },
-};
-
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
+#include "channv50.h"
static void
-g84_fifo_uevent_init(struct nvkm_event *event, int type, int index)
+g84_fifo_uevent_fini(struct nvkm_fifo *fifo)
{
- struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
- nv_mask(fifo, 0x002140, 0x40000000, 0x40000000);
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ nvkm_mask(device, 0x002140, 0x40000000, 0x00000000);
}
static void
-g84_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
-{
- struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
- nv_mask(fifo, 0x002140, 0x40000000, 0x00000000);
-}
-
-static const struct nvkm_event_func
-g84_fifo_uevent_func = {
- .ctor = nvkm_fifo_uevent_ctor,
- .init = g84_fifo_uevent_init,
- .fini = g84_fifo_uevent_fini,
+g84_fifo_uevent_init(struct nvkm_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ nvkm_mask(device, 0x002140, 0x40000000, 0x40000000);
+}
+
+static const struct nvkm_fifo_func
+g84_fifo = {
+ .dtor = nv50_fifo_dtor,
+ .oneinit = nv50_fifo_oneinit,
+ .init = nv50_fifo_init,
+ .intr = nv04_fifo_intr,
+ .pause = nv04_fifo_pause,
+ .start = nv04_fifo_start,
+ .uevent_init = g84_fifo_uevent_init,
+ .uevent_fini = g84_fifo_uevent_fini,
+ .chan = {
+ &g84_fifo_dma_oclass,
+ &g84_fifo_gpfifo_oclass,
+ NULL
+ },
};
-static int
-g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+g84_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
{
- struct nv50_fifo_priv *priv;
- int ret;
-
- ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
- &priv->playlist[0]);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
- &priv->playlist[1]);
- if (ret)
- return ret;
-
- ret = nvkm_event_init(&g84_fifo_uevent_func, 1, 1, &priv->base.uevent);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00000100;
- nv_subdev(priv)->intr = nv04_fifo_intr;
- nv_engine(priv)->cclass = &g84_fifo_cclass;
- nv_engine(priv)->sclass = g84_fifo_sclass;
- priv->base.pause = nv04_fifo_pause;
- priv->base.start = nv04_fifo_start;
- return 0;
+ return nv50_fifo_new_(&g84_fifo, device, index, pfifo);
}
-
-struct nvkm_oclass *
-g84_fifo_oclass = &(struct nvkm_oclass) {
- .handle = NV_ENGINE(FIFO, 0x84),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = g84_fifo_ctor,
- .dtor = nv50_fifo_dtor,
- .init = nv50_fifo_init,
- .fini = _nvkm_fifo_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index b745252f2261..ff6fcbda615b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -21,365 +21,72 @@
*
* Authors: Ben Skeggs
*/
-#include <engine/fifo.h>
+#include "gf100.h"
+#include "changf100.h"
#include <core/client.h>
-#include <core/engctx.h>
#include <core/enum.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
#include <subdev/bar.h>
-#include <subdev/fb.h>
-#include <subdev/mmu.h>
-#include <subdev/timer.h>
+#include <engine/sw.h>
#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-struct gf100_fifo_priv {
- struct nvkm_fifo base;
-
- struct work_struct fault;
- u64 mask;
-
- struct {
- struct nvkm_gpuobj *mem[2];
- int active;
- wait_queue_head_t wait;
- } runlist;
-
- struct {
- struct nvkm_gpuobj *mem;
- struct nvkm_vma bar;
- } user;
- int spoon_nr;
-};
-
-struct gf100_fifo_base {
- struct nvkm_fifo_base base;
- struct nvkm_gpuobj *pgd;
- struct nvkm_vm *vm;
-};
-
-struct gf100_fifo_chan {
- struct nvkm_fifo_chan base;
- enum {
- STOPPED,
- RUNNING,
- KILLED
- } state;
-};
-
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
static void
-gf100_fifo_runlist_update(struct gf100_fifo_priv *priv)
+gf100_fifo_uevent_init(struct nvkm_fifo *fifo)
{
- struct nvkm_bar *bar = nvkm_bar(priv);
- struct nvkm_gpuobj *cur;
- int i, p;
-
- mutex_lock(&nv_subdev(priv)->mutex);
- cur = priv->runlist.mem[priv->runlist.active];
- priv->runlist.active = !priv->runlist.active;
-
- for (i = 0, p = 0; i < 128; i++) {
- struct gf100_fifo_chan *chan = (void *)priv->base.channel[i];
- if (chan && chan->state == RUNNING) {
- nv_wo32(cur, p + 0, i);
- nv_wo32(cur, p + 4, 0x00000004);
- p += 8;
- }
- }
- bar->flush(bar);
-
- nv_wr32(priv, 0x002270, cur->addr >> 12);
- nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3));
-
- if (wait_event_timeout(priv->runlist.wait,
- !(nv_rd32(priv, 0x00227c) & 0x00100000),
- msecs_to_jiffies(2000)) == 0)
- nv_error(priv, "runlist update timeout\n");
- mutex_unlock(&nv_subdev(priv)->mutex);
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
}
-static int
-gf100_fifo_context_attach(struct nvkm_object *parent,
- struct nvkm_object *object)
-{
- struct nvkm_bar *bar = nvkm_bar(parent);
- struct gf100_fifo_base *base = (void *)parent->parent;
- struct nvkm_engctx *ectx = (void *)object;
- u32 addr;
- int ret;
-
- switch (nv_engidx(object->engine)) {
- case NVDEV_ENGINE_SW : return 0;
- case NVDEV_ENGINE_GR : addr = 0x0210; break;
- case NVDEV_ENGINE_CE0 : addr = 0x0230; break;
- case NVDEV_ENGINE_CE1 : addr = 0x0240; break;
- case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
- case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
- case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
- default:
- return -EINVAL;
- }
-
- if (!ectx->vma.node) {
- ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
- NV_MEM_ACCESS_RW, &ectx->vma);
- if (ret)
- return ret;
-
- nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
- }
-
- nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
- nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
- bar->flush(bar);
- return 0;
-}
-
-static int
-gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
- struct nvkm_object *object)
+static void
+gf100_fifo_uevent_fini(struct nvkm_fifo *fifo)
{
- struct nvkm_bar *bar = nvkm_bar(parent);
- struct gf100_fifo_priv *priv = (void *)parent->engine;
- struct gf100_fifo_base *base = (void *)parent->parent;
- struct gf100_fifo_chan *chan = (void *)parent;
- u32 addr;
-
- switch (nv_engidx(object->engine)) {
- case NVDEV_ENGINE_SW : return 0;
- case NVDEV_ENGINE_GR : addr = 0x0210; break;
- case NVDEV_ENGINE_CE0 : addr = 0x0230; break;
- case NVDEV_ENGINE_CE1 : addr = 0x0240; break;
- case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
- case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
- case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
- default:
- return -EINVAL;
- }
-
- nv_wr32(priv, 0x002634, chan->base.chid);
- if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
- nv_error(priv, "channel %d [%s] kick timeout\n",
- chan->base.chid, nvkm_client_name(chan));
- if (suspend)
- return -EBUSY;
- }
-
- nv_wo32(base, addr + 0x00, 0x00000000);
- nv_wo32(base, addr + 0x04, 0x00000000);
- bar->flush(bar);
- return 0;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
}
-static int
-gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+void
+gf100_fifo_runlist_update(struct gf100_fifo *fifo)
{
- union {
- struct nv50_channel_gpfifo_v0 v0;
- } *args = data;
- struct nvkm_bar *bar = nvkm_bar(parent);
- struct gf100_fifo_priv *priv = (void *)engine;
- struct gf100_fifo_base *base = (void *)parent;
struct gf100_fifo_chan *chan;
- u64 usermem, ioffset, ilength;
- int ret, i;
-
- nv_ioctl(parent, "create channel gpfifo size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
- "ioffset %016llx ilength %08x\n",
- args->v0.version, args->v0.pushbuf, args->v0.ioffset,
- args->v0.ilength);
- } else
- return ret;
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_memory *cur;
+ int nr = 0;
- ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
- priv->user.bar.offset, 0x1000,
- args->v0.pushbuf,
- (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_GR) |
- (1ULL << NVDEV_ENGINE_CE0) |
- (1ULL << NVDEV_ENGINE_CE1) |
- (1ULL << NVDEV_ENGINE_MSVLD) |
- (1ULL << NVDEV_ENGINE_MSPDEC) |
- (1ULL << NVDEV_ENGINE_MSPPP), &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
+ mutex_lock(&subdev->mutex);
+ cur = fifo->runlist.mem[fifo->runlist.active];
+ fifo->runlist.active = !fifo->runlist.active;
- args->v0.chid = chan->base.chid;
-
- nv_parent(chan)->context_attach = gf100_fifo_context_attach;
- nv_parent(chan)->context_detach = gf100_fifo_context_detach;
-
- usermem = chan->base.chid * 0x1000;
- ioffset = args->v0.ioffset;
- ilength = order_base_2(args->v0.ilength / 8);
-
- for (i = 0; i < 0x1000; i += 4)
- nv_wo32(priv->user.mem, usermem + i, 0x00000000);
-
- nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
- nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
- nv_wo32(base, 0x10, 0x0000face);
- nv_wo32(base, 0x30, 0xfffff902);
- nv_wo32(base, 0x48, lower_32_bits(ioffset));
- nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
- nv_wo32(base, 0x54, 0x00000002);
- nv_wo32(base, 0x84, 0x20400000);
- nv_wo32(base, 0x94, 0x30000001);
- nv_wo32(base, 0x9c, 0x00000100);
- nv_wo32(base, 0xa4, 0x1f1f1f1f);
- nv_wo32(base, 0xa8, 0x1f1f1f1f);
- nv_wo32(base, 0xac, 0x0000001f);
- nv_wo32(base, 0xb8, 0xf8000000);
- nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
- nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
- bar->flush(bar);
- return 0;
-}
-
-static int
-gf100_fifo_chan_init(struct nvkm_object *object)
-{
- struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
- struct gf100_fifo_priv *priv = (void *)object->engine;
- struct gf100_fifo_chan *chan = (void *)object;
- u32 chid = chan->base.chid;
- int ret;
-
- ret = nvkm_fifo_channel_init(&chan->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
-
- if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
- nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001);
- gf100_fifo_runlist_update(priv);
- }
-
- return 0;
-}
-
-static void gf100_fifo_intr_engine(struct gf100_fifo_priv *priv);
-
-static int
-gf100_fifo_chan_fini(struct nvkm_object *object, bool suspend)
-{
- struct gf100_fifo_priv *priv = (void *)object->engine;
- struct gf100_fifo_chan *chan = (void *)object;
- u32 chid = chan->base.chid;
-
- if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
- nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
- gf100_fifo_runlist_update(priv);
+ nvkm_kmap(cur);
+ list_for_each_entry(chan, &fifo->chan, head) {
+ nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
+ nvkm_wo32(cur, (nr * 8) + 4, 0x00000004);
+ nr++;
}
+ nvkm_done(cur);
- gf100_fifo_intr_engine(priv);
-
- nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
- return nvkm_fifo_channel_fini(&chan->base, suspend);
-}
-
-static struct nvkm_ofuncs
-gf100_fifo_ofuncs = {
- .ctor = gf100_fifo_chan_ctor,
- .dtor = _nvkm_fifo_channel_dtor,
- .init = gf100_fifo_chan_init,
- .fini = gf100_fifo_chan_fini,
- .map = _nvkm_fifo_channel_map,
- .rd32 = _nvkm_fifo_channel_rd32,
- .wr32 = _nvkm_fifo_channel_wr32,
- .ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-gf100_fifo_sclass[] = {
- { FERMI_CHANNEL_GPFIFO, &gf100_fifo_ofuncs },
- {}
-};
-
-/*******************************************************************************
- * FIFO context - instmem heap and vm setup
- ******************************************************************************/
-
-static int
-gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gf100_fifo_base *base;
- int ret;
-
- ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
- 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_HEAP, &base);
- *pobject = nv_object(base);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
- &base->pgd);
- if (ret)
- return ret;
-
- nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
- nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
- nv_wo32(base, 0x0208, 0xffffffff);
- nv_wo32(base, 0x020c, 0x000000ff);
+ nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
+ nvkm_wr32(device, 0x002274, 0x01f00000 | nr);
- ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void
-gf100_fifo_context_dtor(struct nvkm_object *object)
-{
- struct gf100_fifo_base *base = (void *)object;
- nvkm_vm_ref(NULL, &base->vm, base->pgd);
- nvkm_gpuobj_ref(NULL, &base->pgd);
- nvkm_fifo_context_destroy(&base->base);
+ if (wait_event_timeout(fifo->runlist.wait,
+ !(nvkm_rd32(device, 0x00227c) & 0x00100000),
+ msecs_to_jiffies(2000)) == 0)
+ nvkm_error(subdev, "runlist update timeout\n");
+ mutex_unlock(&subdev->mutex);
}
-static struct nvkm_oclass
-gf100_fifo_cclass = {
- .handle = NV_ENGCTX(FIFO, 0xc0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_fifo_context_ctor,
- .dtor = gf100_fifo_context_dtor,
- .init = _nvkm_fifo_context_init,
- .fini = _nvkm_fifo_context_fini,
- .rd32 = _nvkm_fifo_context_rd32,
- .wr32 = _nvkm_fifo_context_wr32,
- },
-};
-
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
static inline int
-gf100_fifo_engidx(struct gf100_fifo_priv *priv, u32 engn)
+gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
{
switch (engn) {
- case NVDEV_ENGINE_GR : engn = 0; break;
- case NVDEV_ENGINE_MSVLD : engn = 1; break;
- case NVDEV_ENGINE_MSPPP : engn = 2; break;
- case NVDEV_ENGINE_MSPDEC: engn = 3; break;
- case NVDEV_ENGINE_CE0 : engn = 4; break;
- case NVDEV_ENGINE_CE1 : engn = 5; break;
+ case NVKM_ENGINE_GR : engn = 0; break;
+ case NVKM_ENGINE_MSVLD : engn = 1; break;
+ case NVKM_ENGINE_MSPPP : engn = 2; break;
+ case NVKM_ENGINE_MSPDEC: engn = 3; break;
+ case NVKM_ENGINE_CE0 : engn = 4; break;
+ case NVKM_ENGINE_CE1 : engn = 5; break;
default:
return -1;
}
@@ -388,95 +95,73 @@ gf100_fifo_engidx(struct gf100_fifo_priv *priv, u32 engn)
}
static inline struct nvkm_engine *
-gf100_fifo_engine(struct gf100_fifo_priv *priv, u32 engn)
+gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+
switch (engn) {
- case 0: engn = NVDEV_ENGINE_GR; break;
- case 1: engn = NVDEV_ENGINE_MSVLD; break;
- case 2: engn = NVDEV_ENGINE_MSPPP; break;
- case 3: engn = NVDEV_ENGINE_MSPDEC; break;
- case 4: engn = NVDEV_ENGINE_CE0; break;
- case 5: engn = NVDEV_ENGINE_CE1; break;
+ case 0: engn = NVKM_ENGINE_GR; break;
+ case 1: engn = NVKM_ENGINE_MSVLD; break;
+ case 2: engn = NVKM_ENGINE_MSPPP; break;
+ case 3: engn = NVKM_ENGINE_MSPDEC; break;
+ case 4: engn = NVKM_ENGINE_CE0; break;
+ case 5: engn = NVKM_ENGINE_CE1; break;
default:
return NULL;
}
- return nvkm_engine(priv, engn);
+ return nvkm_device_engine(device, engn);
}
static void
gf100_fifo_recover_work(struct work_struct *work)
{
- struct gf100_fifo_priv *priv = container_of(work, typeof(*priv), fault);
- struct nvkm_object *engine;
+ struct gf100_fifo *fifo = container_of(work, typeof(*fifo), fault);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_engine *engine;
unsigned long flags;
u32 engn, engm = 0;
u64 mask, todo;
- spin_lock_irqsave(&priv->base.lock, flags);
- mask = priv->mask;
- priv->mask = 0ULL;
- spin_unlock_irqrestore(&priv->base.lock, flags);
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ mask = fifo->mask;
+ fifo->mask = 0ULL;
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
- engm |= 1 << gf100_fifo_engidx(priv, engn);
- nv_mask(priv, 0x002630, engm, engm);
+ engm |= 1 << gf100_fifo_engidx(fifo, engn);
+ nvkm_mask(device, 0x002630, engm, engm);
for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
- if ((engine = (void *)nvkm_engine(priv, engn))) {
- nv_ofuncs(engine)->fini(engine, false);
- WARN_ON(nv_ofuncs(engine)->init(engine));
+ if ((engine = nvkm_device_engine(device, engn))) {
+ nvkm_subdev_fini(&engine->subdev, false);
+ WARN_ON(nvkm_subdev_init(&engine->subdev));
}
}
- gf100_fifo_runlist_update(priv);
- nv_wr32(priv, 0x00262c, engm);
- nv_mask(priv, 0x002630, engm, 0x00000000);
+ gf100_fifo_runlist_update(fifo);
+ nvkm_wr32(device, 0x00262c, engm);
+ nvkm_mask(device, 0x002630, engm, 0x00000000);
}
static void
-gf100_fifo_recover(struct gf100_fifo_priv *priv, struct nvkm_engine *engine,
+gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
struct gf100_fifo_chan *chan)
{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
u32 chid = chan->base.chid;
- unsigned long flags;
- nv_error(priv, "%s engine fault on channel %d, recovering...\n",
- nv_subdev(engine)->name, chid);
+ nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
+ nvkm_subdev_name[engine->subdev.index], chid);
+ assert_spin_locked(&fifo->base.lock);
- nv_mask(priv, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
- chan->state = KILLED;
+ nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
+ list_del_init(&chan->head);
+ chan->killed = true;
- spin_lock_irqsave(&priv->base.lock, flags);
- priv->mask |= 1ULL << nv_engidx(engine);
- spin_unlock_irqrestore(&priv->base.lock, flags);
- schedule_work(&priv->fault);
-}
-
-static int
-gf100_fifo_swmthd(struct gf100_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
-{
- struct gf100_fifo_chan *chan = NULL;
- struct nvkm_handle *bind;
- unsigned long flags;
- int ret = -EINVAL;
-
- spin_lock_irqsave(&priv->base.lock, flags);
- if (likely(chid >= priv->base.min && chid <= priv->base.max))
- chan = (void *)priv->base.channel[chid];
- if (unlikely(!chan))
- goto out;
-
- bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e);
- if (likely(bind)) {
- if (!mthd || !nv_call(bind->object, mthd, data))
- ret = 0;
- nvkm_namedb_put(bind);
- }
-
-out:
- spin_unlock_irqrestore(&priv->base.lock, flags);
- return ret;
+ fifo->mask |= 1ULL << engine->subdev.index;
+ schedule_work(&fifo->fault);
}
static const struct nvkm_enum
@@ -486,14 +171,17 @@ gf100_fifo_sched_reason[] = {
};
static void
-gf100_fifo_intr_sched_ctxsw(struct gf100_fifo_priv *priv)
+gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_engine *engine;
struct gf100_fifo_chan *chan;
+ unsigned long flags;
u32 engn;
+ spin_lock_irqsave(&fifo->base.lock, flags);
for (engn = 0; engn < 6; engn++) {
- u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04));
+ u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
u32 busy = (stat & 0x80000000);
u32 save = (stat & 0x00100000); /* maybe? */
u32 unk0 = (stat & 0x00040000);
@@ -502,32 +190,36 @@ gf100_fifo_intr_sched_ctxsw(struct gf100_fifo_priv *priv)
(void)save;
if (busy && unk0 && unk1) {
- if (!(chan = (void *)priv->base.channel[chid]))
- continue;
- if (!(engine = gf100_fifo_engine(priv, engn)))
- continue;
- gf100_fifo_recover(priv, engine, chan);
+ list_for_each_entry(chan, &fifo->chan, head) {
+ if (chan->base.chid == chid) {
+ engine = gf100_fifo_engine(fifo, engn);
+ if (!engine)
+ break;
+ gf100_fifo_recover(fifo, engine, chan);
+ break;
+ }
+ }
}
}
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
}
static void
-gf100_fifo_intr_sched(struct gf100_fifo_priv *priv)
+gf100_fifo_intr_sched(struct gf100_fifo *fifo)
{
- u32 intr = nv_rd32(priv, 0x00254c);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x00254c);
u32 code = intr & 0x000000ff;
const struct nvkm_enum *en;
- char enunk[6] = "";
en = nvkm_enum_find(gf100_fifo_sched_reason, code);
- if (!en)
- snprintf(enunk, sizeof(enunk), "UNK%02x", code);
- nv_error(priv, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk);
+ nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
switch (code) {
case 0x0a:
- gf100_fifo_intr_sched_ctxsw(priv);
+ gf100_fifo_intr_sched_ctxsw(fifo);
break;
default:
break;
@@ -536,17 +228,17 @@ gf100_fifo_intr_sched(struct gf100_fifo_priv *priv)
static const struct nvkm_enum
gf100_fifo_fault_engine[] = {
- { 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR },
- { 0x03, "PEEPHOLE", NULL, NVDEV_ENGINE_IFB },
- { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
- { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
- { 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO },
- { 0x10, "PMSVLD", NULL, NVDEV_ENGINE_MSVLD },
- { 0x11, "PMSPPP", NULL, NVDEV_ENGINE_MSPPP },
+ { 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
+ { 0x03, "PEEPHOLE", NULL, NVKM_ENGINE_IFB },
+ { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
+ { 0x07, "PFIFO", NULL, NVKM_ENGINE_FIFO },
+ { 0x10, "PMSVLD", NULL, NVKM_ENGINE_MSVLD },
+ { 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
{ 0x13, "PCOUNTER" },
- { 0x14, "PMSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
- { 0x15, "PCE0", NULL, NVDEV_ENGINE_CE0 },
- { 0x16, "PCE1", NULL, NVDEV_ENGINE_CE1 },
+ { 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
+ { 0x15, "PCE0", NULL, NVKM_ENGINE_CE0 },
+ { 0x16, "PCE1", NULL, NVKM_ENGINE_CE1 },
{ 0x17, "PDAEMON" },
{}
};
@@ -594,79 +286,65 @@ gf100_fifo_fault_gpcclient[] = {
};
static void
-gf100_fifo_intr_fault(struct gf100_fifo_priv *priv, int unit)
+gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
{
- u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
- u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
- u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
- u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
+ u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
+ u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
+ u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
u32 gpc = (stat & 0x1f000000) >> 24;
u32 client = (stat & 0x00001f00) >> 8;
u32 write = (stat & 0x00000080);
u32 hub = (stat & 0x00000040);
u32 reason = (stat & 0x0000000f);
- struct nvkm_object *engctx = NULL, *object;
- struct nvkm_engine *engine = NULL;
const struct nvkm_enum *er, *eu, *ec;
- char erunk[6] = "";
- char euunk[6] = "";
- char ecunk[6] = "";
- char gpcid[3] = "";
+ struct nvkm_engine *engine = NULL;
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ char gpcid[8] = "";
er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
- if (!er)
- snprintf(erunk, sizeof(erunk), "UNK%02X", reason);
-
eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
+ if (hub) {
+ ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
+ } else {
+ ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
+ snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
+ }
+
if (eu) {
switch (eu->data2) {
- case NVDEV_SUBDEV_BAR:
- nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
+ case NVKM_SUBDEV_BAR:
+ nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
break;
- case NVDEV_SUBDEV_INSTMEM:
- nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
+ case NVKM_SUBDEV_INSTMEM:
+ nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
break;
- case NVDEV_ENGINE_IFB:
- nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
+ case NVKM_ENGINE_IFB:
+ nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
break;
default:
- engine = nvkm_engine(priv, eu->data2);
- if (engine)
- engctx = nvkm_engctx_get(engine, inst);
+ engine = nvkm_device_engine(device, eu->data2);
break;
}
- } else {
- snprintf(euunk, sizeof(euunk), "UNK%02x", unit);
}
- if (hub) {
- ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
- } else {
- ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
- snprintf(gpcid, sizeof(gpcid), "%d", gpc);
- }
+ chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
- if (!ec)
- snprintf(ecunk, sizeof(ecunk), "UNK%02x", client);
-
- nv_error(priv, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on "
- "channel 0x%010llx [%s]\n", write ? "write" : "read",
- (u64)vahi << 32 | valo, er ? er->name : erunk,
- eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/",
- ec ? ec->name : ecunk, (u64)inst << 12,
- nvkm_client_name(engctx));
-
- object = engctx;
- while (object) {
- switch (nv_mclass(object)) {
- case FERMI_CHANNEL_GPFIFO:
- gf100_fifo_recover(priv, engine, (void *)object);
- break;
- }
- object = object->parent;
- }
+ nvkm_error(subdev,
+ "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
+ "reason %02x [%s] on channel %d [%010llx %s]\n",
+ write ? "write" : "read", (u64)vahi << 32 | valo,
+ unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
+ reason, er ? er->name : "", chan ? chan->chid : -1,
+ (u64)inst << 12,
+ chan ? chan->object.client->name : "unknown");
- nvkm_engctx_put(engctx);
+ if (engine && chan)
+ gf100_fifo_recover(fifo, engine, (void *)chan);
+ nvkm_fifo_chan_put(&fifo->base, flags, &chan);
}
static const struct nvkm_bitfield
@@ -678,290 +356,288 @@ gf100_fifo_pbdma_intr[] = {
};
static void
-gf100_fifo_intr_pbdma(struct gf100_fifo_priv *priv, int unit)
+gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
{
- u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
- u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
- u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
- u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0x7f;
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
+ u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
+ u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
+ u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00003ffc);
- u32 show = stat;
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ u32 show= stat;
+ char msg[128];
if (stat & 0x00800000) {
- if (!gf100_fifo_swmthd(priv, chid, mthd, data))
- show &= ~0x00800000;
+ if (device->sw) {
+ if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
+ show &= ~0x00800000;
+ }
}
if (show) {
- nv_error(priv, "PBDMA%d:", unit);
- nvkm_bitfield_print(gf100_fifo_pbdma_intr, show);
- pr_cont("\n");
- nv_error(priv,
- "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
- unit, chid,
- nvkm_client_name_for_fifo_chid(&priv->base, chid),
- subc, mthd, data);
- }
-
- nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
- nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
+ nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
+ chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
+ nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
+ "subc %d mthd %04x data %08x\n",
+ unit, show, msg, chid, chan ? chan->inst->addr : 0,
+ chan ? chan->object.client->name : "unknown",
+ subc, mthd, data);
+ nvkm_fifo_chan_put(&fifo->base, flags, &chan);
+ }
+
+ nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
+ nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
}
static void
-gf100_fifo_intr_runlist(struct gf100_fifo_priv *priv)
+gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
{
- u32 intr = nv_rd32(priv, 0x002a00);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x002a00);
if (intr & 0x10000000) {
- wake_up(&priv->runlist.wait);
- nv_wr32(priv, 0x002a00, 0x10000000);
+ wake_up(&fifo->runlist.wait);
+ nvkm_wr32(device, 0x002a00, 0x10000000);
intr &= ~0x10000000;
}
if (intr) {
- nv_error(priv, "RUNLIST 0x%08x\n", intr);
- nv_wr32(priv, 0x002a00, intr);
+ nvkm_error(subdev, "RUNLIST %08x\n", intr);
+ nvkm_wr32(device, 0x002a00, intr);
}
}
static void
-gf100_fifo_intr_engine_unit(struct gf100_fifo_priv *priv, int engn)
+gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
{
- u32 intr = nv_rd32(priv, 0x0025a8 + (engn * 0x04));
- u32 inte = nv_rd32(priv, 0x002628);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
+ u32 inte = nvkm_rd32(device, 0x002628);
u32 unkn;
- nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
+ nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
for (unkn = 0; unkn < 8; unkn++) {
u32 ints = (intr >> (unkn * 0x04)) & inte;
if (ints & 0x1) {
- nvkm_fifo_uevent(&priv->base);
+ nvkm_fifo_uevent(&fifo->base);
ints &= ~1;
}
if (ints) {
- nv_error(priv, "ENGINE %d %d %01x", engn, unkn, ints);
- nv_mask(priv, 0x002628, ints, 0);
+ nvkm_error(subdev, "ENGINE %d %d %01x",
+ engn, unkn, ints);
+ nvkm_mask(device, 0x002628, ints, 0);
}
}
}
-static void
-gf100_fifo_intr_engine(struct gf100_fifo_priv *priv)
+void
+gf100_fifo_intr_engine(struct gf100_fifo *fifo)
{
- u32 mask = nv_rd32(priv, 0x0025a4);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u32 mask = nvkm_rd32(device, 0x0025a4);
while (mask) {
u32 unit = __ffs(mask);
- gf100_fifo_intr_engine_unit(priv, unit);
+ gf100_fifo_intr_engine_unit(fifo, unit);
mask &= ~(1 << unit);
}
}
static void
-gf100_fifo_intr(struct nvkm_subdev *subdev)
+gf100_fifo_intr(struct nvkm_fifo *base)
{
- struct gf100_fifo_priv *priv = (void *)subdev;
- u32 mask = nv_rd32(priv, 0x002140);
- u32 stat = nv_rd32(priv, 0x002100) & mask;
+ struct gf100_fifo *fifo = gf100_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, 0x002140);
+ u32 stat = nvkm_rd32(device, 0x002100) & mask;
if (stat & 0x00000001) {
- u32 intr = nv_rd32(priv, 0x00252c);
- nv_warn(priv, "INTR 0x00000001: 0x%08x\n", intr);
- nv_wr32(priv, 0x002100, 0x00000001);
+ u32 intr = nvkm_rd32(device, 0x00252c);
+ nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
+ nvkm_wr32(device, 0x002100, 0x00000001);
stat &= ~0x00000001;
}
if (stat & 0x00000100) {
- gf100_fifo_intr_sched(priv);
- nv_wr32(priv, 0x002100, 0x00000100);
+ gf100_fifo_intr_sched(fifo);
+ nvkm_wr32(device, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
if (stat & 0x00010000) {
- u32 intr = nv_rd32(priv, 0x00256c);
- nv_warn(priv, "INTR 0x00010000: 0x%08x\n", intr);
- nv_wr32(priv, 0x002100, 0x00010000);
+ u32 intr = nvkm_rd32(device, 0x00256c);
+ nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
+ nvkm_wr32(device, 0x002100, 0x00010000);
stat &= ~0x00010000;
}
if (stat & 0x01000000) {
- u32 intr = nv_rd32(priv, 0x00258c);
- nv_warn(priv, "INTR 0x01000000: 0x%08x\n", intr);
- nv_wr32(priv, 0x002100, 0x01000000);
+ u32 intr = nvkm_rd32(device, 0x00258c);
+ nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
+ nvkm_wr32(device, 0x002100, 0x01000000);
stat &= ~0x01000000;
}
if (stat & 0x10000000) {
- u32 mask = nv_rd32(priv, 0x00259c);
+ u32 mask = nvkm_rd32(device, 0x00259c);
while (mask) {
u32 unit = __ffs(mask);
- gf100_fifo_intr_fault(priv, unit);
- nv_wr32(priv, 0x00259c, (1 << unit));
+ gf100_fifo_intr_fault(fifo, unit);
+ nvkm_wr32(device, 0x00259c, (1 << unit));
mask &= ~(1 << unit);
}
stat &= ~0x10000000;
}
if (stat & 0x20000000) {
- u32 mask = nv_rd32(priv, 0x0025a0);
+ u32 mask = nvkm_rd32(device, 0x0025a0);
while (mask) {
u32 unit = __ffs(mask);
- gf100_fifo_intr_pbdma(priv, unit);
- nv_wr32(priv, 0x0025a0, (1 << unit));
+ gf100_fifo_intr_pbdma(fifo, unit);
+ nvkm_wr32(device, 0x0025a0, (1 << unit));
mask &= ~(1 << unit);
}
stat &= ~0x20000000;
}
if (stat & 0x40000000) {
- gf100_fifo_intr_runlist(priv);
+ gf100_fifo_intr_runlist(fifo);
stat &= ~0x40000000;
}
if (stat & 0x80000000) {
- gf100_fifo_intr_engine(priv);
+ gf100_fifo_intr_engine(fifo);
stat &= ~0x80000000;
}
if (stat) {
- nv_error(priv, "INTR 0x%08x\n", stat);
- nv_mask(priv, 0x002140, stat, 0x00000000);
- nv_wr32(priv, 0x002100, stat);
+ nvkm_error(subdev, "INTR %08x\n", stat);
+ nvkm_mask(device, 0x002140, stat, 0x00000000);
+ nvkm_wr32(device, 0x002100, stat);
}
}
-static void
-gf100_fifo_uevent_init(struct nvkm_event *event, int type, int index)
-{
- struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
- nv_mask(fifo, 0x002140, 0x80000000, 0x80000000);
-}
-
-static void
-gf100_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
-{
- struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
- nv_mask(fifo, 0x002140, 0x80000000, 0x00000000);
-}
-
-static const struct nvkm_event_func
-gf100_fifo_uevent_func = {
- .ctor = nvkm_fifo_uevent_ctor,
- .init = gf100_fifo_uevent_init,
- .fini = gf100_fifo_uevent_fini,
-};
-
static int
-gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+gf100_fifo_oneinit(struct nvkm_fifo *base)
{
- struct gf100_fifo_priv *priv;
+ struct gf100_fifo *fifo = gf100_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
int ret;
- ret = nvkm_fifo_create(parent, engine, oclass, 0, 127, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- INIT_WORK(&priv->fault, gf100_fifo_recover_work);
-
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
- &priv->runlist.mem[0]);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
+ false, &fifo->runlist.mem[0]);
if (ret)
return ret;
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
- &priv->runlist.mem[1]);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
+ false, &fifo->runlist.mem[1]);
if (ret)
return ret;
- init_waitqueue_head(&priv->runlist.wait);
+ init_waitqueue_head(&fifo->runlist.wait);
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 0x1000, 0x1000, 0,
- &priv->user.mem);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
+ 0x1000, false, &fifo->user.mem);
if (ret)
return ret;
- ret = nvkm_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
- &priv->user.bar);
+ ret = nvkm_bar_umap(device->bar, 128 * 0x1000, 12, &fifo->user.bar);
if (ret)
return ret;
- ret = nvkm_event_init(&gf100_fifo_uevent_func, 1, 1, &priv->base.uevent);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00000100;
- nv_subdev(priv)->intr = gf100_fifo_intr;
- nv_engine(priv)->cclass = &gf100_fifo_cclass;
- nv_engine(priv)->sclass = gf100_fifo_sclass;
+ nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
return 0;
}
static void
-gf100_fifo_dtor(struct nvkm_object *object)
+gf100_fifo_fini(struct nvkm_fifo *base)
{
- struct gf100_fifo_priv *priv = (void *)object;
-
- nvkm_gpuobj_unmap(&priv->user.bar);
- nvkm_gpuobj_ref(NULL, &priv->user.mem);
- nvkm_gpuobj_ref(NULL, &priv->runlist.mem[0]);
- nvkm_gpuobj_ref(NULL, &priv->runlist.mem[1]);
-
- nvkm_fifo_destroy(&priv->base);
+ struct gf100_fifo *fifo = gf100_fifo(base);
+ flush_work(&fifo->fault);
}
-static int
-gf100_fifo_init(struct nvkm_object *object)
+static void
+gf100_fifo_init(struct nvkm_fifo *base)
{
- struct gf100_fifo_priv *priv = (void *)object;
- int ret, i;
-
- ret = nvkm_fifo_init(&priv->base);
- if (ret)
- return ret;
+ struct gf100_fifo *fifo = gf100_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int i;
- nv_wr32(priv, 0x000204, 0xffffffff);
- nv_wr32(priv, 0x002204, 0xffffffff);
+ nvkm_wr32(device, 0x000204, 0xffffffff);
+ nvkm_wr32(device, 0x002204, 0xffffffff);
- priv->spoon_nr = hweight32(nv_rd32(priv, 0x002204));
- nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr);
+ fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x002204));
+ nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
/* assign engines to PBDMAs */
- if (priv->spoon_nr >= 3) {
- nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */
- nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */
- nv_wr32(priv, 0x002210, ~(1 << 1)); /* PMSPP */
- nv_wr32(priv, 0x002214, ~(1 << 1)); /* PMSVLD */
- nv_wr32(priv, 0x002218, ~(1 << 2)); /* PCE0 */
- nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */
+ if (fifo->spoon_nr >= 3) {
+ nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
+ nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
+ nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
+ nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
+ nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
+ nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
}
/* PBDMA[n] */
- for (i = 0; i < priv->spoon_nr; i++) {
- nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
- nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
- nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+ for (i = 0; i < fifo->spoon_nr; i++) {
+ nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+ nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+ nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
}
- nv_mask(priv, 0x002200, 0x00000001, 0x00000001);
- nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+ nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
+ nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
- nv_wr32(priv, 0x002100, 0xffffffff);
- nv_wr32(priv, 0x002140, 0x7fffffff);
- nv_wr32(priv, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
- return 0;
+ nvkm_wr32(device, 0x002100, 0xffffffff);
+ nvkm_wr32(device, 0x002140, 0x7fffffff);
+ nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
+}
+
+static void *
+gf100_fifo_dtor(struct nvkm_fifo *base)
+{
+ struct gf100_fifo *fifo = gf100_fifo(base);
+ nvkm_vm_put(&fifo->user.bar);
+ nvkm_memory_del(&fifo->user.mem);
+ nvkm_memory_del(&fifo->runlist.mem[0]);
+ nvkm_memory_del(&fifo->runlist.mem[1]);
+ return fifo;
}
-struct nvkm_oclass *
-gf100_fifo_oclass = &(struct nvkm_oclass) {
- .handle = NV_ENGINE(FIFO, 0xc0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_fifo_ctor,
- .dtor = gf100_fifo_dtor,
- .init = gf100_fifo_init,
- .fini = _nvkm_fifo_fini,
+static const struct nvkm_fifo_func
+gf100_fifo = {
+ .dtor = gf100_fifo_dtor,
+ .oneinit = gf100_fifo_oneinit,
+ .init = gf100_fifo_init,
+ .fini = gf100_fifo_fini,
+ .intr = gf100_fifo_intr,
+ .uevent_init = gf100_fifo_uevent_init,
+ .uevent_fini = gf100_fifo_uevent_fini,
+ .chan = {
+ &gf100_fifo_gpfifo_oclass,
+ NULL
},
};
+
+int
+gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+ struct gf100_fifo *fifo;
+
+ if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+ return -ENOMEM;
+ INIT_LIST_HEAD(&fifo->chan);
+ INIT_WORK(&fifo->fault, gf100_fifo_recover_work);
+ *pfifo = &fifo->base;
+
+ return nvkm_fifo_ctor(&gf100_fifo, device, index, 128, &fifo->base);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
new file mode 100644
index 000000000000..c649ca9b53e3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
@@ -0,0 +1,31 @@
+#ifndef __GF100_FIFO_H__
+#define __GF100_FIFO_H__
+#define gf100_fifo(p) container_of((p), struct gf100_fifo, base)
+#include "priv.h"
+
+#include <subdev/mmu.h>
+
+struct gf100_fifo {
+ struct nvkm_fifo base;
+
+ struct list_head chan;
+
+ struct work_struct fault;
+ u64 mask;
+
+ struct {
+ struct nvkm_memory *mem[2];
+ int active;
+ wait_queue_head_t wait;
+ } runlist;
+
+ struct {
+ struct nvkm_memory *mem;
+ struct nvkm_vma bar;
+ } user;
+ int spoon_nr;
+};
+
+void gf100_fifo_intr_engine(struct gf100_fifo *);
+void gf100_fifo_runlist_update(struct gf100_fifo *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index e10f9644140f..98970a0b7a66 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -22,486 +22,121 @@
* Authors: Ben Skeggs
*/
#include "gk104.h"
+#include "changk104.h"
#include <core/client.h>
-#include <core/engctx.h>
#include <core/enum.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
#include <subdev/bar.h>
-#include <subdev/fb.h>
-#include <subdev/mmu.h>
-#include <subdev/timer.h>
+#include <engine/sw.h>
#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
-static const struct {
- u64 subdev;
- u64 mask;
-} fifo_engine[] = {
- _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_CE2)),
- _(NVDEV_ENGINE_MSPDEC , 0),
- _(NVDEV_ENGINE_MSPPP , 0),
- _(NVDEV_ENGINE_MSVLD , 0),
- _(NVDEV_ENGINE_CE0 , 0),
- _(NVDEV_ENGINE_CE1 , 0),
- _(NVDEV_ENGINE_MSENC , 0),
-};
-#undef _
-#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
-
-struct gk104_fifo_engn {
- struct nvkm_gpuobj *runlist[2];
- int cur_runlist;
- wait_queue_head_t wait;
-};
-
-struct gk104_fifo_priv {
- struct nvkm_fifo base;
-
- struct work_struct fault;
- u64 mask;
-
- struct gk104_fifo_engn engine[FIFO_ENGINE_NR];
- struct {
- struct nvkm_gpuobj *mem;
- struct nvkm_vma bar;
- } user;
- int spoon_nr;
-};
-
-struct gk104_fifo_base {
- struct nvkm_fifo_base base;
- struct nvkm_gpuobj *pgd;
- struct nvkm_vm *vm;
-};
-
-struct gk104_fifo_chan {
- struct nvkm_fifo_chan base;
- u32 engine;
- enum {
- STOPPED,
- RUNNING,
- KILLED
- } state;
-};
-
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-static void
-gk104_fifo_runlist_update(struct gk104_fifo_priv *priv, u32 engine)
-{
- struct nvkm_bar *bar = nvkm_bar(priv);
- struct gk104_fifo_engn *engn = &priv->engine[engine];
- struct nvkm_gpuobj *cur;
- int i, p;
-
- mutex_lock(&nv_subdev(priv)->mutex);
- cur = engn->runlist[engn->cur_runlist];
- engn->cur_runlist = !engn->cur_runlist;
-
- for (i = 0, p = 0; i < priv->base.max; i++) {
- struct gk104_fifo_chan *chan = (void *)priv->base.channel[i];
- if (chan && chan->state == RUNNING && chan->engine == engine) {
- nv_wo32(cur, p + 0, i);
- nv_wo32(cur, p + 4, 0x00000000);
- p += 8;
- }
- }
- bar->flush(bar);
-
- nv_wr32(priv, 0x002270, cur->addr >> 12);
- nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
-
- if (wait_event_timeout(engn->wait, !(nv_rd32(priv, 0x002284 +
- (engine * 0x08)) & 0x00100000),
- msecs_to_jiffies(2000)) == 0)
- nv_error(priv, "runlist %d update timeout\n", engine);
- mutex_unlock(&nv_subdev(priv)->mutex);
-}
-static int
-gk104_fifo_context_attach(struct nvkm_object *parent,
- struct nvkm_object *object)
+void
+gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
{
- struct nvkm_bar *bar = nvkm_bar(parent);
- struct gk104_fifo_base *base = (void *)parent->parent;
- struct nvkm_engctx *ectx = (void *)object;
- u32 addr;
- int ret;
-
- switch (nv_engidx(object->engine)) {
- case NVDEV_ENGINE_SW :
- return 0;
- case NVDEV_ENGINE_CE0:
- case NVDEV_ENGINE_CE1:
- case NVDEV_ENGINE_CE2:
- nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
- return 0;
- case NVDEV_ENGINE_GR : addr = 0x0210; break;
- case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
- case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
- case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
- default:
- return -EINVAL;
- }
-
- if (!ectx->vma.node) {
- ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
- NV_MEM_ACCESS_RW, &ectx->vma);
- if (ret)
- return ret;
-
- nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
- }
-
- nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
- nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
- bar->flush(bar);
- return 0;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
}
-static int
-gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
- struct nvkm_object *object)
+void
+gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
{
- struct nvkm_bar *bar = nvkm_bar(parent);
- struct gk104_fifo_priv *priv = (void *)parent->engine;
- struct gk104_fifo_base *base = (void *)parent->parent;
- struct gk104_fifo_chan *chan = (void *)parent;
- u32 addr;
-
- switch (nv_engidx(object->engine)) {
- case NVDEV_ENGINE_SW : return 0;
- case NVDEV_ENGINE_CE0 :
- case NVDEV_ENGINE_CE1 :
- case NVDEV_ENGINE_CE2 : addr = 0x0000; break;
- case NVDEV_ENGINE_GR : addr = 0x0210; break;
- case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
- case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
- case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
- default:
- return -EINVAL;
- }
-
- nv_wr32(priv, 0x002634, chan->base.chid);
- if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
- nv_error(priv, "channel %d [%s] kick timeout\n",
- chan->base.chid, nvkm_client_name(chan));
- if (suspend)
- return -EBUSY;
- }
-
- if (addr) {
- nv_wo32(base, addr + 0x00, 0x00000000);
- nv_wo32(base, addr + 0x04, 0x00000000);
- bar->flush(bar);
- }
-
- return 0;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
}
-static int
-gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+void
+gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
{
- union {
- struct kepler_channel_gpfifo_a_v0 v0;
- } *args = data;
- struct nvkm_bar *bar = nvkm_bar(parent);
- struct gk104_fifo_priv *priv = (void *)engine;
- struct gk104_fifo_base *base = (void *)parent;
+ struct gk104_fifo_engn *engn = &fifo->engine[engine];
struct gk104_fifo_chan *chan;
- u64 usermem, ioffset, ilength;
- int ret, i;
-
- nv_ioctl(parent, "create channel gpfifo size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
- "ioffset %016llx ilength %08x engine %08x\n",
- args->v0.version, args->v0.pushbuf, args->v0.ioffset,
- args->v0.ilength, args->v0.engine);
- } else
- return ret;
-
- for (i = 0; i < FIFO_ENGINE_NR; i++) {
- if (args->v0.engine & (1 << i)) {
- if (nvkm_engine(parent, fifo_engine[i].subdev)) {
- args->v0.engine = (1 << i);
- break;
- }
- }
- }
-
- if (i == FIFO_ENGINE_NR) {
- nv_error(priv, "unsupported engines 0x%08x\n", args->v0.engine);
- return -ENODEV;
- }
-
- ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
- priv->user.bar.offset, 0x200,
- args->v0.pushbuf,
- fifo_engine[i].mask, &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
-
- args->v0.chid = chan->base.chid;
-
- nv_parent(chan)->context_attach = gk104_fifo_context_attach;
- nv_parent(chan)->context_detach = gk104_fifo_context_detach;
- chan->engine = i;
-
- usermem = chan->base.chid * 0x200;
- ioffset = args->v0.ioffset;
- ilength = order_base_2(args->v0.ilength / 8);
-
- for (i = 0; i < 0x200; i += 4)
- nv_wo32(priv->user.mem, usermem + i, 0x00000000);
-
- nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
- nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
- nv_wo32(base, 0x10, 0x0000face);
- nv_wo32(base, 0x30, 0xfffff902);
- nv_wo32(base, 0x48, lower_32_bits(ioffset));
- nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
- nv_wo32(base, 0x84, 0x20400000);
- nv_wo32(base, 0x94, 0x30000001);
- nv_wo32(base, 0x9c, 0x00000100);
- nv_wo32(base, 0xac, 0x0000001f);
- nv_wo32(base, 0xe8, chan->base.chid);
- nv_wo32(base, 0xb8, 0xf8000000);
- nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
- nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
- bar->flush(bar);
- return 0;
-}
-
-static int
-gk104_fifo_chan_init(struct nvkm_object *object)
-{
- struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
- struct gk104_fifo_priv *priv = (void *)object->engine;
- struct gk104_fifo_chan *chan = (void *)object;
- u32 chid = chan->base.chid;
- int ret;
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_memory *cur;
+ int nr = 0;
- ret = nvkm_fifo_channel_init(&chan->base);
- if (ret)
- return ret;
-
- nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
- nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
-
- if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
- nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
- gk104_fifo_runlist_update(priv, chan->engine);
- nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
- }
-
- return 0;
-}
-
-static int
-gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
-{
- struct gk104_fifo_priv *priv = (void *)object->engine;
- struct gk104_fifo_chan *chan = (void *)object;
- u32 chid = chan->base.chid;
+ mutex_lock(&subdev->mutex);
+ cur = engn->runlist[engn->cur_runlist];
+ engn->cur_runlist = !engn->cur_runlist;
- if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
- nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
- gk104_fifo_runlist_update(priv, chan->engine);
+ nvkm_kmap(cur);
+ list_for_each_entry(chan, &engn->chan, head) {
+ nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
+ nvkm_wo32(cur, (nr * 8) + 4, 0x00000000);
+ nr++;
}
+ nvkm_done(cur);
- nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
- return nvkm_fifo_channel_fini(&chan->base, suspend);
-}
-
-struct nvkm_ofuncs
-gk104_fifo_chan_ofuncs = {
- .ctor = gk104_fifo_chan_ctor,
- .dtor = _nvkm_fifo_channel_dtor,
- .init = gk104_fifo_chan_init,
- .fini = gk104_fifo_chan_fini,
- .map = _nvkm_fifo_channel_map,
- .rd32 = _nvkm_fifo_channel_rd32,
- .wr32 = _nvkm_fifo_channel_wr32,
- .ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-gk104_fifo_sclass[] = {
- { KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
- {}
-};
-
-/*******************************************************************************
- * FIFO context - instmem heap and vm setup
- ******************************************************************************/
-
-static int
-gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gk104_fifo_base *base;
- int ret;
-
- ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
- 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
- *pobject = nv_object(base);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
- &base->pgd);
- if (ret)
- return ret;
-
- nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
- nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
- nv_wo32(base, 0x0208, 0xffffffff);
- nv_wo32(base, 0x020c, 0x000000ff);
-
- ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void
-gk104_fifo_context_dtor(struct nvkm_object *object)
-{
- struct gk104_fifo_base *base = (void *)object;
- nvkm_vm_ref(NULL, &base->vm, base->pgd);
- nvkm_gpuobj_ref(NULL, &base->pgd);
- nvkm_fifo_context_destroy(&base->base);
-}
-
-static struct nvkm_oclass
-gk104_fifo_cclass = {
- .handle = NV_ENGCTX(FIFO, 0xe0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk104_fifo_context_ctor,
- .dtor = gk104_fifo_context_dtor,
- .init = _nvkm_fifo_context_init,
- .fini = _nvkm_fifo_context_fini,
- .rd32 = _nvkm_fifo_context_rd32,
- .wr32 = _nvkm_fifo_context_wr32,
- },
-};
+ nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
+ nvkm_wr32(device, 0x002274, (engine << 20) | nr);
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-static inline int
-gk104_fifo_engidx(struct gk104_fifo_priv *priv, u32 engn)
-{
- switch (engn) {
- case NVDEV_ENGINE_GR :
- case NVDEV_ENGINE_CE2 : engn = 0; break;
- case NVDEV_ENGINE_MSVLD : engn = 1; break;
- case NVDEV_ENGINE_MSPPP : engn = 2; break;
- case NVDEV_ENGINE_MSPDEC: engn = 3; break;
- case NVDEV_ENGINE_CE0 : engn = 4; break;
- case NVDEV_ENGINE_CE1 : engn = 5; break;
- case NVDEV_ENGINE_MSENC : engn = 6; break;
- default:
- return -1;
- }
-
- return engn;
+ if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
+ (engine * 0x08)) & 0x00100000),
+ msecs_to_jiffies(2000)) == 0)
+ nvkm_error(subdev, "runlist %d update timeout\n", engine);
+ mutex_unlock(&subdev->mutex);
}
static inline struct nvkm_engine *
-gk104_fifo_engine(struct gk104_fifo_priv *priv, u32 engn)
+gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
{
- if (engn >= ARRAY_SIZE(fifo_engine))
- return NULL;
- return nvkm_engine(priv, fifo_engine[engn].subdev);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u64 subdevs = gk104_fifo_engine_subdev(engn);
+ if (subdevs)
+ return nvkm_device_engine(device, __ffs(subdevs));
+ return NULL;
}
static void
gk104_fifo_recover_work(struct work_struct *work)
{
- struct gk104_fifo_priv *priv = container_of(work, typeof(*priv), fault);
- struct nvkm_object *engine;
+ struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_engine *engine;
unsigned long flags;
u32 engn, engm = 0;
u64 mask, todo;
- spin_lock_irqsave(&priv->base.lock, flags);
- mask = priv->mask;
- priv->mask = 0ULL;
- spin_unlock_irqrestore(&priv->base.lock, flags);
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ mask = fifo->mask;
+ fifo->mask = 0ULL;
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
- engm |= 1 << gk104_fifo_engidx(priv, engn);
- nv_mask(priv, 0x002630, engm, engm);
+ engm |= 1 << gk104_fifo_subdev_engine(engn);
+ nvkm_mask(device, 0x002630, engm, engm);
for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
- if ((engine = (void *)nvkm_engine(priv, engn))) {
- nv_ofuncs(engine)->fini(engine, false);
- WARN_ON(nv_ofuncs(engine)->init(engine));
+ if ((engine = nvkm_device_engine(device, engn))) {
+ nvkm_subdev_fini(&engine->subdev, false);
+ WARN_ON(nvkm_subdev_init(&engine->subdev));
}
- gk104_fifo_runlist_update(priv, gk104_fifo_engidx(priv, engn));
+ gk104_fifo_runlist_update(fifo, gk104_fifo_subdev_engine(engn));
}
- nv_wr32(priv, 0x00262c, engm);
- nv_mask(priv, 0x002630, engm, 0x00000000);
+ nvkm_wr32(device, 0x00262c, engm);
+ nvkm_mask(device, 0x002630, engm, 0x00000000);
}
static void
-gk104_fifo_recover(struct gk104_fifo_priv *priv, struct nvkm_engine *engine,
+gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
struct gk104_fifo_chan *chan)
{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
u32 chid = chan->base.chid;
- unsigned long flags;
- nv_error(priv, "%s engine fault on channel %d, recovering...\n",
- nv_subdev(engine)->name, chid);
+ nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
+ nvkm_subdev_name[engine->subdev.index], chid);
+ assert_spin_locked(&fifo->base.lock);
- nv_mask(priv, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
- chan->state = KILLED;
+ nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
+ list_del_init(&chan->head);
+ chan->killed = true;
- spin_lock_irqsave(&priv->base.lock, flags);
- priv->mask |= 1ULL << nv_engidx(engine);
- spin_unlock_irqrestore(&priv->base.lock, flags);
- schedule_work(&priv->fault);
-}
-
-static int
-gk104_fifo_swmthd(struct gk104_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
-{
- struct gk104_fifo_chan *chan = NULL;
- struct nvkm_handle *bind;
- unsigned long flags;
- int ret = -EINVAL;
-
- spin_lock_irqsave(&priv->base.lock, flags);
- if (likely(chid >= priv->base.min && chid <= priv->base.max))
- chan = (void *)priv->base.channel[chid];
- if (unlikely(!chan))
- goto out;
-
- bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e);
- if (likely(bind)) {
- if (!mthd || !nv_call(bind->object, mthd, data))
- ret = 0;
- nvkm_namedb_put(bind);
- }
-
-out:
- spin_unlock_irqrestore(&priv->base.lock, flags);
- return ret;
+ fifo->mask |= 1ULL << engine->subdev.index;
+ schedule_work(&fifo->fault);
}
static const struct nvkm_enum
@@ -516,18 +151,16 @@ gk104_fifo_bind_reason[] = {
};
static void
-gk104_fifo_intr_bind(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_bind(struct gk104_fifo *fifo)
{
- u32 intr = nv_rd32(priv, 0x00252c);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x00252c);
u32 code = intr & 0x000000ff;
- const struct nvkm_enum *en;
- char enunk[6] = "";
-
- en = nvkm_enum_find(gk104_fifo_bind_reason, code);
- if (!en)
- snprintf(enunk, sizeof(enunk), "UNK%02x", code);
+ const struct nvkm_enum *en =
+ nvkm_enum_find(gk104_fifo_bind_reason, code);
- nv_error(priv, "BIND_ERROR [ %s ]\n", en ? en->name : enunk);
+ nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
}
static const struct nvkm_enum
@@ -537,14 +170,17 @@ gk104_fifo_sched_reason[] = {
};
static void
-gk104_fifo_intr_sched_ctxsw(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_engine *engine;
struct gk104_fifo_chan *chan;
+ unsigned long flags;
u32 engn;
- for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) {
- u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04));
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ for (engn = 0; engn < ARRAY_SIZE(fifo->engine); engn++) {
+ u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
u32 busy = (stat & 0x80000000);
u32 next = (stat & 0x07ff0000) >> 16;
u32 chsw = (stat & 0x00008000);
@@ -555,32 +191,35 @@ gk104_fifo_intr_sched_ctxsw(struct gk104_fifo_priv *priv)
(void)save;
if (busy && chsw) {
- if (!(chan = (void *)priv->base.channel[chid]))
- continue;
- if (!(engine = gk104_fifo_engine(priv, engn)))
- continue;
- gk104_fifo_recover(priv, engine, chan);
+ list_for_each_entry(chan, &fifo->engine[engn].chan, head) {
+ if (chan->base.chid == chid) {
+ engine = gk104_fifo_engine(fifo, engn);
+ if (!engine)
+ break;
+ gk104_fifo_recover(fifo, engine, chan);
+ break;
+ }
+ }
}
}
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
}
static void
-gk104_fifo_intr_sched(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_sched(struct gk104_fifo *fifo)
{
- u32 intr = nv_rd32(priv, 0x00254c);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x00254c);
u32 code = intr & 0x000000ff;
- const struct nvkm_enum *en;
- char enunk[6] = "";
-
- en = nvkm_enum_find(gk104_fifo_sched_reason, code);
- if (!en)
- snprintf(enunk, sizeof(enunk), "UNK%02x", code);
+ const struct nvkm_enum *en =
+ nvkm_enum_find(gk104_fifo_sched_reason, code);
- nv_error(priv, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk);
+ nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
switch (code) {
case 0x0a:
- gk104_fifo_intr_sched_ctxsw(priv);
+ gk104_fifo_intr_sched_ctxsw(fifo);
break;
default:
break;
@@ -588,38 +227,42 @@ gk104_fifo_intr_sched(struct gk104_fifo_priv *priv)
}
static void
-gk104_fifo_intr_chsw(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
{
- u32 stat = nv_rd32(priv, 0x00256c);
- nv_error(priv, "CHSW_ERROR 0x%08x\n", stat);
- nv_wr32(priv, 0x00256c, stat);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x00256c);
+ nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
+ nvkm_wr32(device, 0x00256c, stat);
}
static void
-gk104_fifo_intr_dropped_fault(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
{
- u32 stat = nv_rd32(priv, 0x00259c);
- nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n", stat);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x00259c);
+ nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
}
static const struct nvkm_enum
gk104_fifo_fault_engine[] = {
- { 0x00, "GR", NULL, NVDEV_ENGINE_GR },
- { 0x03, "IFB", NULL, NVDEV_ENGINE_IFB },
- { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
- { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
- { 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
- { 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
- { 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
- { 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD },
- { 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP },
+ { 0x00, "GR", NULL, NVKM_ENGINE_GR },
+ { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
+ { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
+ { 0x07, "PBDMA0", NULL, NVKM_ENGINE_FIFO },
+ { 0x08, "PBDMA1", NULL, NVKM_ENGINE_FIFO },
+ { 0x09, "PBDMA2", NULL, NVKM_ENGINE_FIFO },
+ { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
+ { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
{ 0x13, "PERF" },
- { 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
- { 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 },
- { 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 },
+ { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
+ { 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
+ { 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
{ 0x17, "PMU" },
- { 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC },
- { 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 },
+ { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
+ { 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
{}
};
@@ -708,80 +351,65 @@ gk104_fifo_fault_gpcclient[] = {
};
static void
-gk104_fifo_intr_fault(struct gk104_fifo_priv *priv, int unit)
+gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
{
- u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
- u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
- u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
- u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
+ u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
+ u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
+ u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
u32 gpc = (stat & 0x1f000000) >> 24;
u32 client = (stat & 0x00001f00) >> 8;
u32 write = (stat & 0x00000080);
u32 hub = (stat & 0x00000040);
u32 reason = (stat & 0x0000000f);
- struct nvkm_object *engctx = NULL, *object;
- struct nvkm_engine *engine = NULL;
const struct nvkm_enum *er, *eu, *ec;
- char erunk[6] = "";
- char euunk[6] = "";
- char ecunk[6] = "";
- char gpcid[3] = "";
+ struct nvkm_engine *engine = NULL;
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ char gpcid[8] = "";
er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
- if (!er)
- snprintf(erunk, sizeof(erunk), "UNK%02X", reason);
-
eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
+ if (hub) {
+ ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
+ } else {
+ ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
+ snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
+ }
+
if (eu) {
switch (eu->data2) {
- case NVDEV_SUBDEV_BAR:
- nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
+ case NVKM_SUBDEV_BAR:
+ nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
break;
- case NVDEV_SUBDEV_INSTMEM:
- nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
+ case NVKM_SUBDEV_INSTMEM:
+ nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
break;
- case NVDEV_ENGINE_IFB:
- nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
+ case NVKM_ENGINE_IFB:
+ nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
break;
default:
- engine = nvkm_engine(priv, eu->data2);
- if (engine)
- engctx = nvkm_engctx_get(engine, inst);
+ engine = nvkm_device_engine(device, eu->data2);
break;
}
- } else {
- snprintf(euunk, sizeof(euunk), "UNK%02x", unit);
}
- if (hub) {
- ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
- } else {
- ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
- snprintf(gpcid, sizeof(gpcid), "%d", gpc);
- }
+ chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
- if (!ec)
- snprintf(ecunk, sizeof(ecunk), "UNK%02x", client);
-
- nv_error(priv, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on "
- "channel 0x%010llx [%s]\n", write ? "write" : "read",
- (u64)vahi << 32 | valo, er ? er->name : erunk,
- eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/",
- ec ? ec->name : ecunk, (u64)inst << 12,
- nvkm_client_name(engctx));
-
- object = engctx;
- while (object) {
- switch (nv_mclass(object)) {
- case KEPLER_CHANNEL_GPFIFO_A:
- case MAXWELL_CHANNEL_GPFIFO_A:
- gk104_fifo_recover(priv, engine, (void *)object);
- break;
- }
- object = object->parent;
- }
+ nvkm_error(subdev,
+ "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
+ "reason %02x [%s] on channel %d [%010llx %s]\n",
+ write ? "write" : "read", (u64)vahi << 32 | valo,
+ unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
+ reason, er ? er->name : "", chan ? chan->chid : -1,
+ (u64)inst << 12,
+ chan ? chan->object.client->name : "unknown");
- nvkm_engctx_put(engctx);
+ if (engine && chan)
+ gk104_fifo_recover(fifo, engine, (void *)chan);
+ nvkm_fifo_chan_put(&fifo->base, flags, &chan);
}
static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
@@ -819,35 +447,42 @@ static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
};
static void
-gk104_fifo_intr_pbdma_0(struct gk104_fifo_priv *priv, int unit)
+gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
{
- u32 mask = nv_rd32(priv, 0x04010c + (unit * 0x2000));
- u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)) & mask;
- u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
- u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
- u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
+ u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
+ u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
+ u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
+ u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00003ffc);
u32 show = stat;
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ char msg[128];
if (stat & 0x00800000) {
- if (!gk104_fifo_swmthd(priv, chid, mthd, data))
- show &= ~0x00800000;
- nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
+ if (device->sw) {
+ if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
+ show &= ~0x00800000;
+ }
+ nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
}
if (show) {
- nv_error(priv, "PBDMA%d:", unit);
- nvkm_bitfield_print(gk104_fifo_pbdma_intr_0, show);
- pr_cont("\n");
- nv_error(priv,
- "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
- unit, chid,
- nvkm_client_name_for_fifo_chid(&priv->base, chid),
- subc, mthd, data);
+ nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
+ chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
+ nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
+ "subc %d mthd %04x data %08x\n",
+ unit, show, msg, chid, chan ? chan->inst->addr : 0,
+ chan ? chan->object.client->name : "unknown",
+ subc, mthd, data);
+ nvkm_fifo_chan_put(&fifo->base, flags, &chan);
}
- nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
+ nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
}
static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
@@ -860,280 +495,266 @@ static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
};
static void
-gk104_fifo_intr_pbdma_1(struct gk104_fifo_priv *priv, int unit)
+gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
{
- u32 mask = nv_rd32(priv, 0x04014c + (unit * 0x2000));
- u32 stat = nv_rd32(priv, 0x040148 + (unit * 0x2000)) & mask;
- u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
+ u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
+ u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
+ char msg[128];
if (stat) {
- nv_error(priv, "PBDMA%d:", unit);
- nvkm_bitfield_print(gk104_fifo_pbdma_intr_1, stat);
- pr_cont("\n");
- nv_error(priv, "PBDMA%d: ch %d %08x %08x\n", unit, chid,
- nv_rd32(priv, 0x040150 + (unit * 0x2000)),
- nv_rd32(priv, 0x040154 + (unit * 0x2000)));
+ nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
+ nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
+ unit, stat, msg, chid,
+ nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
+ nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
}
- nv_wr32(priv, 0x040148 + (unit * 0x2000), stat);
+ nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
}
static void
-gk104_fifo_intr_runlist(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
{
- u32 mask = nv_rd32(priv, 0x002a00);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u32 mask = nvkm_rd32(device, 0x002a00);
while (mask) {
u32 engn = __ffs(mask);
- wake_up(&priv->engine[engn].wait);
- nv_wr32(priv, 0x002a00, 1 << engn);
+ wake_up(&fifo->engine[engn].wait);
+ nvkm_wr32(device, 0x002a00, 1 << engn);
mask &= ~(1 << engn);
}
}
static void
-gk104_fifo_intr_engine(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_engine(struct gk104_fifo *fifo)
{
- nvkm_fifo_uevent(&priv->base);
+ nvkm_fifo_uevent(&fifo->base);
}
-static void
-gk104_fifo_intr(struct nvkm_subdev *subdev)
+void
+gk104_fifo_intr(struct nvkm_fifo *base)
{
- struct gk104_fifo_priv *priv = (void *)subdev;
- u32 mask = nv_rd32(priv, 0x002140);
- u32 stat = nv_rd32(priv, 0x002100) & mask;
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, 0x002140);
+ u32 stat = nvkm_rd32(device, 0x002100) & mask;
if (stat & 0x00000001) {
- gk104_fifo_intr_bind(priv);
- nv_wr32(priv, 0x002100, 0x00000001);
+ gk104_fifo_intr_bind(fifo);
+ nvkm_wr32(device, 0x002100, 0x00000001);
stat &= ~0x00000001;
}
if (stat & 0x00000010) {
- nv_error(priv, "PIO_ERROR\n");
- nv_wr32(priv, 0x002100, 0x00000010);
+ nvkm_error(subdev, "PIO_ERROR\n");
+ nvkm_wr32(device, 0x002100, 0x00000010);
stat &= ~0x00000010;
}
if (stat & 0x00000100) {
- gk104_fifo_intr_sched(priv);
- nv_wr32(priv, 0x002100, 0x00000100);
+ gk104_fifo_intr_sched(fifo);
+ nvkm_wr32(device, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
if (stat & 0x00010000) {
- gk104_fifo_intr_chsw(priv);
- nv_wr32(priv, 0x002100, 0x00010000);
+ gk104_fifo_intr_chsw(fifo);
+ nvkm_wr32(device, 0x002100, 0x00010000);
stat &= ~0x00010000;
}
if (stat & 0x00800000) {
- nv_error(priv, "FB_FLUSH_TIMEOUT\n");
- nv_wr32(priv, 0x002100, 0x00800000);
+ nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
+ nvkm_wr32(device, 0x002100, 0x00800000);
stat &= ~0x00800000;
}
if (stat & 0x01000000) {
- nv_error(priv, "LB_ERROR\n");
- nv_wr32(priv, 0x002100, 0x01000000);
+ nvkm_error(subdev, "LB_ERROR\n");
+ nvkm_wr32(device, 0x002100, 0x01000000);
stat &= ~0x01000000;
}
if (stat & 0x08000000) {
- gk104_fifo_intr_dropped_fault(priv);
- nv_wr32(priv, 0x002100, 0x08000000);
+ gk104_fifo_intr_dropped_fault(fifo);
+ nvkm_wr32(device, 0x002100, 0x08000000);
stat &= ~0x08000000;
}
if (stat & 0x10000000) {
- u32 mask = nv_rd32(priv, 0x00259c);
+ u32 mask = nvkm_rd32(device, 0x00259c);
while (mask) {
u32 unit = __ffs(mask);
- gk104_fifo_intr_fault(priv, unit);
- nv_wr32(priv, 0x00259c, (1 << unit));
+ gk104_fifo_intr_fault(fifo, unit);
+ nvkm_wr32(device, 0x00259c, (1 << unit));
mask &= ~(1 << unit);
}
stat &= ~0x10000000;
}
if (stat & 0x20000000) {
- u32 mask = nv_rd32(priv, 0x0025a0);
+ u32 mask = nvkm_rd32(device, 0x0025a0);
while (mask) {
u32 unit = __ffs(mask);
- gk104_fifo_intr_pbdma_0(priv, unit);
- gk104_fifo_intr_pbdma_1(priv, unit);
- nv_wr32(priv, 0x0025a0, (1 << unit));
+ gk104_fifo_intr_pbdma_0(fifo, unit);
+ gk104_fifo_intr_pbdma_1(fifo, unit);
+ nvkm_wr32(device, 0x0025a0, (1 << unit));
mask &= ~(1 << unit);
}
stat &= ~0x20000000;
}
if (stat & 0x40000000) {
- gk104_fifo_intr_runlist(priv);
+ gk104_fifo_intr_runlist(fifo);
stat &= ~0x40000000;
}
if (stat & 0x80000000) {
- nv_wr32(priv, 0x002100, 0x80000000);
- gk104_fifo_intr_engine(priv);
+ nvkm_wr32(device, 0x002100, 0x80000000);
+ gk104_fifo_intr_engine(fifo);
stat &= ~0x80000000;
}
if (stat) {
- nv_error(priv, "INTR 0x%08x\n", stat);
- nv_mask(priv, 0x002140, stat, 0x00000000);
- nv_wr32(priv, 0x002100, stat);
+ nvkm_error(subdev, "INTR %08x\n", stat);
+ nvkm_mask(device, 0x002140, stat, 0x00000000);
+ nvkm_wr32(device, 0x002100, stat);
}
}
-static void
-gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index)
+void
+gk104_fifo_fini(struct nvkm_fifo *base)
{
- struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
- nv_mask(fifo, 0x002140, 0x80000000, 0x80000000);
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ flush_work(&fifo->fault);
+ /* allow mmu fault interrupts, even when we're not using fifo */
+ nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
}
-static void
-gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
+int
+gk104_fifo_oneinit(struct nvkm_fifo *base)
{
- struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
- nv_mask(fifo, 0x002140, 0x80000000, 0x00000000);
-}
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ int ret, i;
-static const struct nvkm_event_func
-gk104_fifo_uevent_func = {
- .ctor = nvkm_fifo_uevent_ctor,
- .init = gk104_fifo_uevent_init,
- .fini = gk104_fifo_uevent_fini,
-};
+ for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ 0x8000, 0x1000, false,
+ &fifo->engine[i].runlist[0]);
+ if (ret)
+ return ret;
-int
-gk104_fifo_fini(struct nvkm_object *object, bool suspend)
-{
- struct gk104_fifo_priv *priv = (void *)object;
- int ret;
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ 0x8000, 0x1000, false,
+ &fifo->engine[i].runlist[1]);
+ if (ret)
+ return ret;
+
+ init_waitqueue_head(&fifo->engine[i].wait);
+ INIT_LIST_HEAD(&fifo->engine[i].chan);
+ }
- ret = nvkm_fifo_fini(&priv->base, suspend);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ fifo->base.nr * 0x200, 0x1000, true,
+ &fifo->user.mem);
if (ret)
return ret;
- /* allow mmu fault interrupts, even when we're not using fifo */
- nv_mask(priv, 0x002140, 0x10000000, 0x10000000);
+ ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12,
+ &fifo->user.bar);
+ if (ret)
+ return ret;
+
+ nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
return 0;
}
-int
-gk104_fifo_init(struct nvkm_object *object)
+void
+gk104_fifo_init(struct nvkm_fifo *base)
{
- struct gk104_fifo_priv *priv = (void *)object;
- int ret, i;
-
- ret = nvkm_fifo_init(&priv->base);
- if (ret)
- return ret;
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int i;
/* enable all available PBDMA units */
- nv_wr32(priv, 0x000204, 0xffffffff);
- priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
- nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr);
+ nvkm_wr32(device, 0x000204, 0xffffffff);
+ fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204));
+ nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
/* PBDMA[n] */
- for (i = 0; i < priv->spoon_nr; i++) {
- nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
- nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
- nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+ for (i = 0; i < fifo->spoon_nr; i++) {
+ nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+ nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+ nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
}
/* PBDMA[n].HCE */
- for (i = 0; i < priv->spoon_nr; i++) {
- nv_wr32(priv, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
- nv_wr32(priv, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
+ for (i = 0; i < fifo->spoon_nr; i++) {
+ nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
+ nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
}
- nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+ nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
- nv_wr32(priv, 0x002100, 0xffffffff);
- nv_wr32(priv, 0x002140, 0x7fffffff);
- return 0;
+ nvkm_wr32(device, 0x002100, 0xffffffff);
+ nvkm_wr32(device, 0x002140, 0x7fffffff);
}
-void
-gk104_fifo_dtor(struct nvkm_object *object)
+void *
+gk104_fifo_dtor(struct nvkm_fifo *base)
{
- struct gk104_fifo_priv *priv = (void *)object;
+ struct gk104_fifo *fifo = gk104_fifo(base);
int i;
- nvkm_gpuobj_unmap(&priv->user.bar);
- nvkm_gpuobj_ref(NULL, &priv->user.mem);
+ nvkm_vm_put(&fifo->user.bar);
+ nvkm_memory_del(&fifo->user.mem);
- for (i = 0; i < FIFO_ENGINE_NR; i++) {
- nvkm_gpuobj_ref(NULL, &priv->engine[i].runlist[1]);
- nvkm_gpuobj_ref(NULL, &priv->engine[i].runlist[0]);
+ for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
+ nvkm_memory_del(&fifo->engine[i].runlist[1]);
+ nvkm_memory_del(&fifo->engine[i].runlist[0]);
}
- nvkm_fifo_destroy(&priv->base);
+ return fifo;
}
int
-gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+gk104_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+ int index, int nr, struct nvkm_fifo **pfifo)
{
- struct gk104_fifo_impl *impl = (void *)oclass;
- struct gk104_fifo_priv *priv;
- int ret, i;
+ struct gk104_fifo *fifo;
- ret = nvkm_fifo_create(parent, engine, oclass, 0,
- impl->channels - 1, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- INIT_WORK(&priv->fault, gk104_fifo_recover_work);
-
- for (i = 0; i < FIFO_ENGINE_NR; i++) {
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
- 0, &priv->engine[i].runlist[0]);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
- 0, &priv->engine[i].runlist[1]);
- if (ret)
- return ret;
-
- init_waitqueue_head(&priv->engine[i].wait);
- }
-
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, impl->channels * 0x200,
- 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
- if (ret)
- return ret;
+ if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+ return -ENOMEM;
+ INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
+ *pfifo = &fifo->base;
- ret = nvkm_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
- &priv->user.bar);
- if (ret)
- return ret;
-
- ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &priv->base.uevent);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00000100;
- nv_subdev(priv)->intr = gk104_fifo_intr;
- nv_engine(priv)->cclass = &gk104_fifo_cclass;
- nv_engine(priv)->sclass = gk104_fifo_sclass;
- return 0;
+ return nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
}
-struct nvkm_oclass *
-gk104_fifo_oclass = &(struct gk104_fifo_impl) {
- .base.handle = NV_ENGINE(FIFO, 0xe0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk104_fifo_ctor,
- .dtor = gk104_fifo_dtor,
- .init = gk104_fifo_init,
- .fini = gk104_fifo_fini,
+static const struct nvkm_fifo_func
+gk104_fifo = {
+ .dtor = gk104_fifo_dtor,
+ .oneinit = gk104_fifo_oneinit,
+ .init = gk104_fifo_init,
+ .fini = gk104_fifo_fini,
+ .intr = gk104_fifo_intr,
+ .uevent_init = gk104_fifo_uevent_init,
+ .uevent_fini = gk104_fifo_uevent_fini,
+ .chan = {
+ &gk104_fifo_gpfifo_oclass,
+ NULL
},
- .channels = 4096,
-}.base;
+};
+
+int
+gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 318d30d6ee1a..5afd9b5ec5d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -1,18 +1,77 @@
-#ifndef __NVKM_FIFO_NVE0_H__
-#define __NVKM_FIFO_NVE0_H__
-#include <engine/fifo.h>
-
-int gk104_fifo_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-void gk104_fifo_dtor(struct nvkm_object *);
-int gk104_fifo_init(struct nvkm_object *);
-int gk104_fifo_fini(struct nvkm_object *, bool);
-
-struct gk104_fifo_impl {
- struct nvkm_oclass base;
- u32 channels;
+#ifndef __GK104_FIFO_H__
+#define __GK104_FIFO_H__
+#define gk104_fifo(p) container_of((p), struct gk104_fifo, base)
+#include "priv.h"
+
+#include <subdev/mmu.h>
+
+struct gk104_fifo_engn {
+ struct nvkm_memory *runlist[2];
+ int cur_runlist;
+ wait_queue_head_t wait;
+ struct list_head chan;
+};
+
+struct gk104_fifo {
+ struct nvkm_fifo base;
+
+ struct work_struct fault;
+ u64 mask;
+
+ struct gk104_fifo_engn engine[7];
+ struct {
+ struct nvkm_memory *mem;
+ struct nvkm_vma bar;
+ } user;
+ int spoon_nr;
};
-extern struct nvkm_ofuncs gk104_fifo_chan_ofuncs;
+int gk104_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
+ int index, int nr, struct nvkm_fifo **);
+void *gk104_fifo_dtor(struct nvkm_fifo *);
+int gk104_fifo_oneinit(struct nvkm_fifo *);
+void gk104_fifo_init(struct nvkm_fifo *);
+void gk104_fifo_fini(struct nvkm_fifo *);
+void gk104_fifo_intr(struct nvkm_fifo *);
+void gk104_fifo_uevent_init(struct nvkm_fifo *);
+void gk104_fifo_uevent_fini(struct nvkm_fifo *);
+void gk104_fifo_runlist_update(struct gk104_fifo *, u32 engine);
+
+static inline u64
+gk104_fifo_engine_subdev(int engine)
+{
+ switch (engine) {
+ case 0: return (1ULL << NVKM_ENGINE_GR) |
+ (1ULL << NVKM_ENGINE_SW) |
+ (1ULL << NVKM_ENGINE_CE2);
+ case 1: return (1ULL << NVKM_ENGINE_MSPDEC);
+ case 2: return (1ULL << NVKM_ENGINE_MSPPP);
+ case 3: return (1ULL << NVKM_ENGINE_MSVLD);
+ case 4: return (1ULL << NVKM_ENGINE_CE0);
+ case 5: return (1ULL << NVKM_ENGINE_CE1);
+ case 6: return (1ULL << NVKM_ENGINE_MSENC);
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
+static inline int
+gk104_fifo_subdev_engine(int subdev)
+{
+ switch (subdev) {
+ case NVKM_ENGINE_GR:
+ case NVKM_ENGINE_SW:
+ case NVKM_ENGINE_CE2 : return 0;
+ case NVKM_ENGINE_MSPDEC: return 1;
+ case NVKM_ENGINE_MSPPP : return 2;
+ case NVKM_ENGINE_MSVLD : return 3;
+ case NVKM_ENGINE_CE0 : return 4;
+ case NVKM_ENGINE_CE1 : return 5;
+ case NVKM_ENGINE_MSENC : return 6;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index 927092217a06..ce01c1a7d41c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -22,15 +22,25 @@
* Authors: Ben Skeggs
*/
#include "gk104.h"
+#include "changk104.h"
-struct nvkm_oclass *
-gk208_fifo_oclass = &(struct gk104_fifo_impl) {
- .base.handle = NV_ENGINE(FIFO, 0x08),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk104_fifo_ctor,
- .dtor = gk104_fifo_dtor,
- .init = gk104_fifo_init,
- .fini = _nvkm_fifo_fini,
+static const struct nvkm_fifo_func
+gk208_fifo = {
+ .dtor = gk104_fifo_dtor,
+ .oneinit = gk104_fifo_oneinit,
+ .init = gk104_fifo_init,
+ .fini = gk104_fifo_fini,
+ .intr = gk104_fifo_intr,
+ .uevent_init = gk104_fifo_uevent_init,
+ .uevent_fini = gk104_fifo_uevent_fini,
+ .chan = {
+ &gk104_fifo_gpfifo_oclass,
+ NULL
},
- .channels = 1024,
-}.base;
+};
+
+int
+gk208_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gk208_fifo, device, index, 1024, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index b30dc87a1357..b47fe98f4181 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -20,15 +20,25 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "gk104.h"
+#include "changk104.h"
-struct nvkm_oclass *
-gk20a_fifo_oclass = &(struct gk104_fifo_impl) {
- .base.handle = NV_ENGINE(FIFO, 0xea),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk104_fifo_ctor,
- .dtor = gk104_fifo_dtor,
- .init = gk104_fifo_init,
- .fini = gk104_fifo_fini,
+static const struct nvkm_fifo_func
+gk20a_fifo = {
+ .dtor = gk104_fifo_dtor,
+ .oneinit = gk104_fifo_oneinit,
+ .init = gk104_fifo_init,
+ .fini = gk104_fifo_fini,
+ .intr = gk104_fifo_intr,
+ .uevent_init = gk104_fifo_uevent_init,
+ .uevent_fini = gk104_fifo_uevent_fini,
+ .chan = {
+ &gk104_fifo_gpfifo_oclass,
+ NULL
},
- .channels = 128,
-}.base;
+};
+
+int
+gk20a_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gk20a_fifo, device, index, 128, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
index 749d525dd8e3..2db629f1bf7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
@@ -22,36 +22,25 @@
* Authors: Ben Skeggs
*/
#include "gk104.h"
+#include "changk104.h"
-#include <nvif/class.h>
-
-static struct nvkm_oclass
-gm204_fifo_sclass[] = {
- { MAXWELL_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
- {}
+static const struct nvkm_fifo_func
+gm204_fifo = {
+ .dtor = gk104_fifo_dtor,
+ .oneinit = gk104_fifo_oneinit,
+ .init = gk104_fifo_init,
+ .fini = gk104_fifo_fini,
+ .intr = gk104_fifo_intr,
+ .uevent_init = gk104_fifo_uevent_init,
+ .uevent_fini = gk104_fifo_uevent_fini,
+ .chan = {
+ &gm204_fifo_gpfifo_oclass,
+ NULL
+ },
};
-static int
-gm204_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+gm204_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
{
- int ret = gk104_fifo_ctor(parent, engine, oclass, data, size, pobject);
- if (ret == 0) {
- struct gk104_fifo_priv *priv = (void *)*pobject;
- nv_engine(priv)->sclass = gm204_fifo_sclass;
- }
- return ret;
+ return gk104_fifo_new_(&gm204_fifo, device, index, 4096, pfifo);
}
-
-struct nvkm_oclass *
-gm204_fifo_oclass = &(struct gk104_fifo_impl) {
- .base.handle = NV_ENGINE(FIFO, 0x24),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gm204_fifo_ctor,
- .dtor = gk104_fifo_dtor,
- .init = gk104_fifo_init,
- .fini = _nvkm_fifo_fini,
- },
- .channels = 4096,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
new file mode 100644
index 000000000000..ae6375d9760f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "gk104.h"
+#include "changk104.h"
+
+static const struct nvkm_fifo_func
+gm20b_fifo = {
+ .dtor = gk104_fifo_dtor,
+ .oneinit = gk104_fifo_oneinit,
+ .init = gk104_fifo_init,
+ .fini = gk104_fifo_fini,
+ .intr = gk104_fifo_intr,
+ .uevent_init = gk104_fifo_uevent_init,
+ .uevent_fini = gk104_fifo_uevent_fini,
+ .chan = {
+ &gm204_fifo_gpfifo_oclass,
+ NULL
+ },
+};
+
+int
+gm20b_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gm20b_fifo, device, index, 512, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
new file mode 100644
index 000000000000..820132363f68
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+g84_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_object *parent = oclass->parent;
+ union {
+ struct nv50_channel_gpfifo_v0 v0;
+ } *args = data;
+ struct nv50_fifo *fifo = nv50_fifo(base);
+ struct nv50_fifo_chan *chan;
+ u64 ioffset, ilength;
+ int ret;
+
+ nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+ "pushbuf %llx ioffset %016llx "
+ "ilength %08x\n",
+ args->v0.version, args->v0.vm, args->v0.pushbuf,
+ args->v0.ioffset, args->v0.ilength);
+ if (!args->v0.pushbuf)
+ return -EINVAL;
+ } else
+ return ret;
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+
+ ret = g84_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+ oclass, chan);
+ if (ret)
+ return ret;
+
+ args->v0.chid = chan->base.chid;
+ ioffset = args->v0.ioffset;
+ ilength = order_base_2(args->v0.ilength / 8);
+
+ nvkm_kmap(chan->ramfc);
+ nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
+ nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
+ nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
+ nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(ioffset));
+ nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
+ nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
+ nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
+ nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
+ nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->gpuobj->node->offset >> 4));
+ nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
+ nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
+ nvkm_done(chan->ramfc);
+ return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+g84_fifo_gpfifo_oclass = {
+ .base.oclass = G82_CHANNEL_GPFIFO,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = g84_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
new file mode 100644
index 000000000000..e7cbc139c1d4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "changf100.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static u32
+gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
+{
+ switch (engine->subdev.index) {
+ case NVKM_ENGINE_SW : return 0;
+ case NVKM_ENGINE_GR : return 0x0210;
+ case NVKM_ENGINE_CE0 : return 0x0230;
+ case NVKM_ENGINE_CE1 : return 0x0240;
+ case NVKM_ENGINE_MSPDEC: return 0x0250;
+ case NVKM_ENGINE_MSPPP : return 0x0260;
+ case NVKM_ENGINE_MSVLD : return 0x0270;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
+static int
+gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine, bool suspend)
+{
+ const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
+ struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+ struct nvkm_subdev *subdev = &chan->fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_gpuobj *inst = chan->base.inst;
+ int ret = 0;
+
+ nvkm_wr32(device, 0x002634, chan->base.chid);
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x002634) == chan->base.chid)
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "channel %d [%s] kick timeout\n",
+ chan->base.chid, chan->base.object.client->name);
+ ret = -EBUSY;
+ if (suspend)
+ return ret;
+ }
+
+ if (offset) {
+ nvkm_kmap(inst);
+ nvkm_wo32(inst, offset + 0x00, 0x00000000);
+ nvkm_wo32(inst, offset + 0x04, 0x00000000);
+ nvkm_done(inst);
+ }
+
+ return ret;
+}
+
+static int
+gf100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
+ struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+ struct nvkm_gpuobj *inst = chan->base.inst;
+
+ if (offset) {
+ u64 addr = chan->engn[engine->subdev.index].vma.offset;
+ nvkm_kmap(inst);
+ nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
+ nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
+ nvkm_done(inst);
+ }
+
+ return 0;
+}
+
+static void
+gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+ nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
+ nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
+}
+
+static int
+gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine,
+ struct nvkm_object *object)
+{
+ struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+ int engn = engine->subdev.index;
+ int ret;
+
+ if (!gf100_fifo_gpfifo_engine_addr(engine))
+ return 0;
+
+ ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst);
+ if (ret)
+ return ret;
+
+ return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
+ NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+}
+
+static void
+gf100_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
+{
+ struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+ struct gf100_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u32 coff = chan->base.chid * 8;
+
+ if (!list_empty(&chan->head) && !chan->killed) {
+ list_del_init(&chan->head);
+ nvkm_mask(device, 0x003004 + coff, 0x00000001, 0x00000000);
+ gf100_fifo_runlist_update(fifo);
+ }
+
+ gf100_fifo_intr_engine(fifo);
+
+ nvkm_wr32(device, 0x003000 + coff, 0x00000000);
+}
+
+static void
+gf100_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
+{
+ struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+ struct gf100_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u32 addr = chan->base.inst->addr >> 12;
+ u32 coff = chan->base.chid * 8;
+
+ nvkm_wr32(device, 0x003000 + coff, 0xc0000000 | addr);
+
+ if (list_empty(&chan->head) && !chan->killed) {
+ list_add_tail(&chan->head, &fifo->chan);
+ nvkm_wr32(device, 0x003004 + coff, 0x001f0001);
+ gf100_fifo_runlist_update(fifo);
+ }
+}
+
+static void *
+gf100_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
+{
+ struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+ nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
+ nvkm_gpuobj_del(&chan->pgd);
+ return chan;
+}
+
+static const struct nvkm_fifo_chan_func
+gf100_fifo_gpfifo_func = {
+ .dtor = gf100_fifo_gpfifo_dtor,
+ .init = gf100_fifo_gpfifo_init,
+ .fini = gf100_fifo_gpfifo_fini,
+ .ntfy = g84_fifo_chan_ntfy,
+ .engine_ctor = gf100_fifo_gpfifo_engine_ctor,
+ .engine_dtor = gf100_fifo_gpfifo_engine_dtor,
+ .engine_init = gf100_fifo_gpfifo_engine_init,
+ .engine_fini = gf100_fifo_gpfifo_engine_fini,
+};
+
+static int
+gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ union {
+ struct fermi_channel_gpfifo_v0 v0;
+ } *args = data;
+ struct gf100_fifo *fifo = gf100_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_object *parent = oclass->parent;
+ struct gf100_fifo_chan *chan;
+ u64 usermem, ioffset, ilength;
+ int ret, i;
+
+ nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+ "ioffset %016llx ilength %08x\n",
+ args->v0.version, args->v0.vm, args->v0.ioffset,
+ args->v0.ilength);
+ } else
+ return ret;
+
+ /* allocate channel */
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+ chan->fifo = fifo;
+ INIT_LIST_HEAD(&chan->head);
+
+ ret = nvkm_fifo_chan_ctor(&gf100_fifo_gpfifo_func, &fifo->base,
+ 0x1000, 0x1000, true, args->v0.vm, 0,
+ (1ULL << NVKM_ENGINE_CE0) |
+ (1ULL << NVKM_ENGINE_CE1) |
+ (1ULL << NVKM_ENGINE_GR) |
+ (1ULL << NVKM_ENGINE_MSPDEC) |
+ (1ULL << NVKM_ENGINE_MSPPP) |
+ (1ULL << NVKM_ENGINE_MSVLD) |
+ (1ULL << NVKM_ENGINE_SW),
+ 1, fifo->user.bar.offset, 0x1000,
+ oclass, &chan->base);
+ if (ret)
+ return ret;
+
+ args->v0.chid = chan->base.chid;
+
+ /* page directory */
+ ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
+ if (ret)
+ return ret;
+
+ nvkm_kmap(chan->base.inst);
+ nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
+ nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
+ nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
+ nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
+ nvkm_done(chan->base.inst);
+
+ ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+ if (ret)
+ return ret;
+
+ /* clear channel control registers */
+
+ usermem = chan->base.chid * 0x1000;
+ ioffset = args->v0.ioffset;
+ ilength = order_base_2(args->v0.ilength / 8);
+
+ nvkm_kmap(fifo->user.mem);
+ for (i = 0; i < 0x1000; i += 4)
+ nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
+ nvkm_done(fifo->user.mem);
+ usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
+
+ /* RAMFC */
+ nvkm_kmap(chan->base.inst);
+ nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem));
+ nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem));
+ nvkm_wo32(chan->base.inst, 0x10, 0x0000face);
+ nvkm_wo32(chan->base.inst, 0x30, 0xfffff902);
+ nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset));
+ nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) |
+ (ilength << 16));
+ nvkm_wo32(chan->base.inst, 0x54, 0x00000002);
+ nvkm_wo32(chan->base.inst, 0x84, 0x20400000);
+ nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
+ nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
+ nvkm_wo32(chan->base.inst, 0xa4, 0x1f1f1f1f);
+ nvkm_wo32(chan->base.inst, 0xa8, 0x1f1f1f1f);
+ nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
+ nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
+ nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */
+ nvkm_wo32(chan->base.inst, 0xfc, 0x10000010); /* 0x002350 */
+ nvkm_done(chan->base.inst);
+ return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+gf100_fifo_gpfifo_oclass = {
+ .base.oclass = FERMI_CHANNEL_GPFIFO,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = gf100_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
new file mode 100644
index 000000000000..0b817540a9e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -0,0 +1,323 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "changk104.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
+{
+ struct gk104_fifo *fifo = chan->fifo;
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_client *client = chan->base.object.client;
+
+ nvkm_wr32(device, 0x002634, chan->base.chid);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "channel %d [%s] kick timeout\n",
+ chan->base.chid, client->name);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static u32
+gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
+{
+ switch (engine->subdev.index) {
+ case NVKM_ENGINE_SW :
+ case NVKM_ENGINE_CE0 :
+ case NVKM_ENGINE_CE1 :
+ case NVKM_ENGINE_CE2 : return 0x0000;
+ case NVKM_ENGINE_GR : return 0x0210;
+ case NVKM_ENGINE_MSPDEC: return 0x0250;
+ case NVKM_ENGINE_MSPPP : return 0x0260;
+ case NVKM_ENGINE_MSVLD : return 0x0270;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
+static int
+gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine, bool suspend)
+{
+ const u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ struct nvkm_gpuobj *inst = chan->base.inst;
+ int ret;
+
+ ret = gk104_fifo_gpfifo_kick(chan);
+ if (ret && suspend)
+ return ret;
+
+ if (offset) {
+ nvkm_kmap(inst);
+ nvkm_wo32(inst, offset + 0x00, 0x00000000);
+ nvkm_wo32(inst, offset + 0x04, 0x00000000);
+ nvkm_done(inst);
+ }
+
+ return ret;
+}
+
+static int
+gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ const u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ struct nvkm_gpuobj *inst = chan->base.inst;
+
+ if (offset) {
+ u64 addr = chan->engn[engine->subdev.index].vma.offset;
+ nvkm_kmap(inst);
+ nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
+ nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
+ nvkm_done(inst);
+ }
+
+ return 0;
+}
+
+static void
+gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine)
+{
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
+ nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
+}
+
+static int
+gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
+ struct nvkm_engine *engine,
+ struct nvkm_object *object)
+{
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ int engn = engine->subdev.index;
+ int ret;
+
+ if (!gk104_fifo_gpfifo_engine_addr(engine))
+ return 0;
+
+ ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst);
+ if (ret)
+ return ret;
+
+ return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
+ NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+}
+
+static void
+gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
+{
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ struct gk104_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u32 coff = chan->base.chid * 8;
+
+ if (!list_empty(&chan->head)) {
+ list_del_init(&chan->head);
+ nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800);
+ gk104_fifo_runlist_update(fifo, chan->engine);
+ }
+
+ nvkm_wr32(device, 0x800000 + coff, 0x00000000);
+}
+
+static void
+gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
+{
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ struct gk104_fifo *fifo = chan->fifo;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u32 addr = chan->base.inst->addr >> 12;
+ u32 coff = chan->base.chid * 8;
+
+ nvkm_mask(device, 0x800004 + coff, 0x000f0000, chan->engine << 16);
+ nvkm_wr32(device, 0x800000 + coff, 0x80000000 | addr);
+
+ if (list_empty(&chan->head) && !chan->killed) {
+ list_add_tail(&chan->head, &fifo->engine[chan->engine].chan);
+ nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
+ gk104_fifo_runlist_update(fifo, chan->engine);
+ nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
+ }
+}
+
+static void *
+gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
+{
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
+ nvkm_gpuobj_del(&chan->pgd);
+ return chan;
+}
+
+static const struct nvkm_fifo_chan_func
+gk104_fifo_gpfifo_func = {
+ .dtor = gk104_fifo_gpfifo_dtor,
+ .init = gk104_fifo_gpfifo_init,
+ .fini = gk104_fifo_gpfifo_fini,
+ .ntfy = g84_fifo_chan_ntfy,
+ .engine_ctor = gk104_fifo_gpfifo_engine_ctor,
+ .engine_dtor = gk104_fifo_gpfifo_engine_dtor,
+ .engine_init = gk104_fifo_gpfifo_engine_init,
+ .engine_fini = gk104_fifo_gpfifo_engine_fini,
+};
+
+int
+gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ union {
+ struct kepler_channel_gpfifo_a_v0 v0;
+ } *args = data;
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_object *parent = oclass->parent;
+ struct gk104_fifo_chan *chan;
+ u64 usermem, ioffset, ilength;
+ u32 engines;
+ int ret, i;
+
+ nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+ "ioffset %016llx ilength %08x engine %08x\n",
+ args->v0.version, args->v0.vm, args->v0.ioffset,
+ args->v0.ilength, args->v0.engine);
+ } else
+ return ret;
+
+ /* determine which downstream engines are present */
+ for (i = 0, engines = 0; i < ARRAY_SIZE(fifo->engine); i++) {
+ u64 subdevs = gk104_fifo_engine_subdev(i);
+ if (!nvkm_device_engine(device, __ffs64(subdevs)))
+ continue;
+ engines |= (1 << i);
+ }
+
+ /* if this is an engine mask query, we're done */
+ if (!args->v0.engine) {
+ args->v0.engine = engines;
+ return nvkm_object_new(oclass, NULL, 0, pobject);
+ }
+
+ /* check that we support a requested engine - note that the user
+ * argument is a mask in order to allow the user to request (for
+ * example) *any* copy engine, but doesn't matter which.
+ */
+ args->v0.engine &= engines;
+ if (!args->v0.engine) {
+ nvif_ioctl(parent, "no supported engine\n");
+ return -ENODEV;
+ }
+
+ /* allocate the channel */
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+ chan->fifo = fifo;
+ chan->engine = __ffs(args->v0.engine);
+ INIT_LIST_HEAD(&chan->head);
+
+ ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base,
+ 0x1000, 0x1000, true, args->v0.vm, 0,
+ gk104_fifo_engine_subdev(chan->engine),
+ 1, fifo->user.bar.offset, 0x200,
+ oclass, &chan->base);
+ if (ret)
+ return ret;
+
+ args->v0.chid = chan->base.chid;
+
+ /* page directory */
+ ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
+ if (ret)
+ return ret;
+
+ nvkm_kmap(chan->base.inst);
+ nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
+ nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
+ nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
+ nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
+ nvkm_done(chan->base.inst);
+
+ ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+ if (ret)
+ return ret;
+
+ /* clear channel control registers */
+ usermem = chan->base.chid * 0x200;
+ ioffset = args->v0.ioffset;
+ ilength = order_base_2(args->v0.ilength / 8);
+
+ nvkm_kmap(fifo->user.mem);
+ for (i = 0; i < 0x200; i += 4)
+ nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
+ nvkm_done(fifo->user.mem);
+ usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
+
+ /* RAMFC */
+ nvkm_kmap(chan->base.inst);
+ nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem));
+ nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem));
+ nvkm_wo32(chan->base.inst, 0x10, 0x0000face);
+ nvkm_wo32(chan->base.inst, 0x30, 0xfffff902);
+ nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset));
+ nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) |
+ (ilength << 16));
+ nvkm_wo32(chan->base.inst, 0x84, 0x20400000);
+ nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
+ nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
+ nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
+ nvkm_wo32(chan->base.inst, 0xe8, chan->base.chid);
+ nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
+ nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */
+ nvkm_wo32(chan->base.inst, 0xfc, 0x10000010); /* 0x002350 */
+ nvkm_done(chan->base.inst);
+ return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+gk104_fifo_gpfifo_oclass = {
+ .base.oclass = KEPLER_CHANNEL_GPFIFO_A,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = gk104_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
new file mode 100644
index 000000000000..6511d6e21ecc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "changk104.h"
+
+#include <nvif/class.h>
+
+const struct nvkm_fifo_chan_oclass
+gm204_fifo_gpfifo_oclass = {
+ .base.oclass = MAXWELL_CHANNEL_GPFIFO_A,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = gk104_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
new file mode 100644
index 000000000000..a8c69f878221
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+nv50_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_object *parent = oclass->parent;
+ union {
+ struct nv50_channel_gpfifo_v0 v0;
+ } *args = data;
+ struct nv50_fifo *fifo = nv50_fifo(base);
+ struct nv50_fifo_chan *chan;
+ u64 ioffset, ilength;
+ int ret;
+
+ nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+ "pushbuf %llx ioffset %016llx "
+ "ilength %08x\n",
+ args->v0.version, args->v0.vm, args->v0.pushbuf,
+ args->v0.ioffset, args->v0.ilength);
+ if (!args->v0.pushbuf)
+ return -EINVAL;
+ } else
+ return ret;
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+
+ ret = nv50_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+ oclass, chan);
+ if (ret)
+ return ret;
+
+ args->v0.chid = chan->base.chid;
+ ioffset = args->v0.ioffset;
+ ilength = order_base_2(args->v0.ilength / 8);
+
+ nvkm_kmap(chan->ramfc);
+ nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
+ nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
+ nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
+ nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(ioffset));
+ nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
+ nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
+ nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
+ nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
+ nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->gpuobj->node->offset >> 4));
+ nvkm_done(chan->ramfc);
+ return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv50_fifo_gpfifo_oclass = {
+ .base.oclass = NV50_CHANNEL_GPFIFO,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index 043e4296084c..ad707ff176cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -22,20 +22,17 @@
* Authors: Ben Skeggs
*/
#include "nv04.h"
+#include "channv04.h"
+#include "regsnv04.h"
#include <core/client.h>
-#include <core/device.h>
-#include <core/engctx.h>
-#include <core/handle.h>
#include <core/ramht.h>
-#include <subdev/instmem/nv04.h>
+#include <subdev/instmem.h>
#include <subdev/timer.h>
+#include <engine/sw.h>
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-static struct ramfc_desc
-nv04_ramfc[] = {
+static const struct nv04_fifo_ramfc
+nv04_fifo_ramfc[] = {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
@@ -47,268 +44,19 @@ nv04_ramfc[] = {
{}
};
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-int
-nv04_fifo_object_attach(struct nvkm_object *parent,
- struct nvkm_object *object, u32 handle)
-{
- struct nv04_fifo_priv *priv = (void *)parent->engine;
- struct nv04_fifo_chan *chan = (void *)parent;
- u32 context, chid = chan->base.chid;
- int ret;
-
- if (nv_iclass(object, NV_GPUOBJ_CLASS))
- context = nv_gpuobj(object)->addr >> 4;
- else
- context = 0x00000004; /* just non-zero */
-
- switch (nv_engidx(object->engine)) {
- case NVDEV_ENGINE_DMAOBJ:
- case NVDEV_ENGINE_SW:
- context |= 0x00000000;
- break;
- case NVDEV_ENGINE_GR:
- context |= 0x00010000;
- break;
- case NVDEV_ENGINE_MPEG:
- context |= 0x00020000;
- break;
- default:
- return -EINVAL;
- }
-
- context |= 0x80000000; /* valid */
- context |= chid << 24;
-
- mutex_lock(&nv_subdev(priv)->mutex);
- ret = nvkm_ramht_insert(priv->ramht, chid, handle, context);
- mutex_unlock(&nv_subdev(priv)->mutex);
- return ret;
-}
-
-void
-nv04_fifo_object_detach(struct nvkm_object *parent, int cookie)
-{
- struct nv04_fifo_priv *priv = (void *)parent->engine;
- mutex_lock(&nv_subdev(priv)->mutex);
- nvkm_ramht_remove(priv->ramht, cookie);
- mutex_unlock(&nv_subdev(priv)->mutex);
-}
-
-int
-nv04_fifo_context_attach(struct nvkm_object *parent,
- struct nvkm_object *object)
-{
- nv_engctx(object)->addr = nvkm_fifo_chan(parent)->chid;
- return 0;
-}
-
-static int
-nv04_fifo_chan_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- union {
- struct nv03_channel_dma_v0 v0;
- } *args = data;
- struct nv04_fifo_priv *priv = (void *)engine;
- struct nv04_fifo_chan *chan;
- int ret;
-
- nv_ioctl(parent, "create channel dma size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
- "offset %016llx\n", args->v0.version,
- args->v0.pushbuf, args->v0.offset);
- } else
- return ret;
-
- ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
- 0x10000, args->v0.pushbuf,
- (1ULL << NVDEV_ENGINE_DMAOBJ) |
- (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_GR), &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
-
- args->v0.chid = chan->base.chid;
-
- nv_parent(chan)->object_attach = nv04_fifo_object_attach;
- nv_parent(chan)->object_detach = nv04_fifo_object_detach;
- nv_parent(chan)->context_attach = nv04_fifo_context_attach;
- chan->ramfc = chan->base.chid * 32;
-
- nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
- nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
- nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
- nv_wo32(priv->ramfc, chan->ramfc + 0x10,
- NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
- NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
- NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
- return 0;
-}
-
-void
-nv04_fifo_chan_dtor(struct nvkm_object *object)
-{
- struct nv04_fifo_priv *priv = (void *)object->engine;
- struct nv04_fifo_chan *chan = (void *)object;
- struct ramfc_desc *c = priv->ramfc_desc;
-
- do {
- nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000);
- } while ((++c)->bits);
-
- nvkm_fifo_channel_destroy(&chan->base);
-}
-
-int
-nv04_fifo_chan_init(struct nvkm_object *object)
-{
- struct nv04_fifo_priv *priv = (void *)object->engine;
- struct nv04_fifo_chan *chan = (void *)object;
- u32 mask = 1 << chan->base.chid;
- unsigned long flags;
- int ret;
-
- ret = nvkm_fifo_channel_init(&chan->base);
- if (ret)
- return ret;
-
- spin_lock_irqsave(&priv->base.lock, flags);
- nv_mask(priv, NV04_PFIFO_MODE, mask, mask);
- spin_unlock_irqrestore(&priv->base.lock, flags);
- return 0;
-}
-
-int
-nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
-{
- struct nv04_fifo_priv *priv = (void *)object->engine;
- struct nv04_fifo_chan *chan = (void *)object;
- struct nvkm_gpuobj *fctx = priv->ramfc;
- struct ramfc_desc *c;
- unsigned long flags;
- u32 data = chan->ramfc;
- u32 chid;
-
- /* prevent fifo context switches */
- spin_lock_irqsave(&priv->base.lock, flags);
- nv_wr32(priv, NV03_PFIFO_CACHES, 0);
-
- /* if this channel is active, replace it with a null context */
- chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
- if (chid == chan->base.chid) {
- nv_mask(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
- nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 0);
- nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
-
- c = priv->ramfc_desc;
- do {
- u32 rm = ((1ULL << c->bits) - 1) << c->regs;
- u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
- u32 rv = (nv_rd32(priv, c->regp) & rm) >> c->regs;
- u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm);
- nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
- } while ((++c)->bits);
-
- c = priv->ramfc_desc;
- do {
- nv_wr32(priv, c->regp, 0x00000000);
- } while ((++c)->bits);
-
- nv_wr32(priv, NV03_PFIFO_CACHE1_GET, 0);
- nv_wr32(priv, NV03_PFIFO_CACHE1_PUT, 0);
- nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
- nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
- nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
- }
-
- /* restore normal operation, after disabling dma mode */
- nv_mask(priv, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
- nv_wr32(priv, NV03_PFIFO_CACHES, 1);
- spin_unlock_irqrestore(&priv->base.lock, flags);
-
- return nvkm_fifo_channel_fini(&chan->base, suspend);
-}
-
-static struct nvkm_ofuncs
-nv04_fifo_ofuncs = {
- .ctor = nv04_fifo_chan_ctor,
- .dtor = nv04_fifo_chan_dtor,
- .init = nv04_fifo_chan_init,
- .fini = nv04_fifo_chan_fini,
- .map = _nvkm_fifo_channel_map,
- .rd32 = _nvkm_fifo_channel_rd32,
- .wr32 = _nvkm_fifo_channel_wr32,
- .ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-nv04_fifo_sclass[] = {
- { NV03_CHANNEL_DMA, &nv04_fifo_ofuncs },
- {}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-int
-nv04_fifo_context_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv04_fifo_base *base;
- int ret;
-
- ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
- 0x1000, NVOBJ_FLAG_HEAP, &base);
- *pobject = nv_object(base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static struct nvkm_oclass
-nv04_fifo_cclass = {
- .handle = NV_ENGCTX(FIFO, 0x04),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_fifo_context_ctor,
- .dtor = _nvkm_fifo_context_dtor,
- .init = _nvkm_fifo_context_init,
- .fini = _nvkm_fifo_context_fini,
- .rd32 = _nvkm_fifo_context_rd32,
- .wr32 = _nvkm_fifo_context_wr32,
- },
-};
-
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
void
-nv04_fifo_pause(struct nvkm_fifo *pfifo, unsigned long *pflags)
-__acquires(priv->base.lock)
+nv04_fifo_pause(struct nvkm_fifo *base, unsigned long *pflags)
+__acquires(fifo->base.lock)
{
- struct nv04_fifo_priv *priv = (void *)pfifo;
+ struct nv04_fifo *fifo = nv04_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
unsigned long flags;
- spin_lock_irqsave(&priv->base.lock, flags);
+ spin_lock_irqsave(&fifo->base.lock, flags);
*pflags = flags;
- nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000000);
- nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000);
+ nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
/* in some cases the puller may be left in an inconsistent state
* if you try to stop it while it's busy translating handles.
@@ -319,28 +67,31 @@ __acquires(priv->base.lock)
* to avoid this, we invalidate the most recently calculated
* instance.
*/
- if (!nv_wait(priv, NV04_PFIFO_CACHE1_PULL0,
- NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000))
- nv_warn(priv, "timeout idling puller\n");
+ nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0);
+ if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY))
+ break;
+ );
- if (nv_rd32(priv, NV04_PFIFO_CACHE1_PULL0) &
+ if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) &
NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
- nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
- nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0x00000000);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000);
}
void
-nv04_fifo_start(struct nvkm_fifo *pfifo, unsigned long *pflags)
-__releases(priv->base.lock)
+nv04_fifo_start(struct nvkm_fifo *base, unsigned long *pflags)
+__releases(fifo->base.lock)
{
- struct nv04_fifo_priv *priv = (void *)pfifo;
+ struct nv04_fifo *fifo = nv04_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
unsigned long flags = *pflags;
- nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
- nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000001);
+ nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001);
- spin_unlock_irqrestore(&priv->base.lock, flags);
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
}
static const char *
@@ -354,61 +105,40 @@ nv_dma_state_err(u32 state)
}
static bool
-nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data)
+nv04_fifo_swmthd(struct nvkm_device *device, u32 chid, u32 addr, u32 data)
{
- struct nv04_fifo_chan *chan = NULL;
- struct nvkm_handle *bind;
- const int subc = (addr >> 13) & 0x7;
- const int mthd = addr & 0x1ffc;
+ struct nvkm_sw *sw = device->sw;
+ const int subc = (addr & 0x0000e000) >> 13;
+ const int mthd = (addr & 0x00001ffc);
+ const u32 mask = 0x0000000f << (subc * 4);
+ u32 engine = nvkm_rd32(device, 0x003280);
bool handled = false;
- unsigned long flags;
- u32 engine;
-
- spin_lock_irqsave(&priv->base.lock, flags);
- if (likely(chid >= priv->base.min && chid <= priv->base.max))
- chan = (void *)priv->base.channel[chid];
- if (unlikely(!chan))
- goto out;
switch (mthd) {
- case 0x0000:
- bind = nvkm_namedb_get(nv_namedb(chan), data);
- if (unlikely(!bind))
- break;
-
- if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) {
- engine = 0x0000000f << (subc * 4);
- chan->subc[subc] = data;
- handled = true;
-
- nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0);
- }
-
- nvkm_namedb_put(bind);
+ case 0x0000 ... 0x0000: /* subchannel's engine -> software */
+ nvkm_wr32(device, 0x003280, (engine &= ~mask));
+ case 0x0180 ... 0x01fc: /* handle -> instance */
+ data = nvkm_rd32(device, 0x003258) & 0x0000ffff;
+ case 0x0100 ... 0x017c:
+ case 0x0200 ... 0x1ffc: /* pass method down to sw */
+ if (!(engine & mask) && sw)
+ handled = nvkm_sw_mthd(sw, chid, subc, mthd, data);
break;
default:
- engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE);
- if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
- break;
-
- bind = nvkm_namedb_get(nv_namedb(chan), chan->subc[subc]);
- if (likely(bind)) {
- if (!nv_call(bind->object, mthd, data))
- handled = true;
- nvkm_namedb_put(bind);
- }
break;
}
-out:
- spin_unlock_irqrestore(&priv->base.lock, flags);
return handled;
}
static void
-nv04_fifo_cache_error(struct nvkm_device *device,
- struct nv04_fifo_priv *priv, u32 chid, u32 get)
+nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get)
{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ u32 pull0 = nvkm_rd32(device, 0x003250);
u32 mthd, data;
int ptr;
@@ -420,216 +150,214 @@ nv04_fifo_cache_error(struct nvkm_device *device,
ptr = (get & 0x7ff) >> 2;
if (device->card_type < NV_40) {
- mthd = nv_rd32(priv, NV04_PFIFO_CACHE1_METHOD(ptr));
- data = nv_rd32(priv, NV04_PFIFO_CACHE1_DATA(ptr));
+ mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr));
+ data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr));
} else {
- mthd = nv_rd32(priv, NV40_PFIFO_CACHE1_METHOD(ptr));
- data = nv_rd32(priv, NV40_PFIFO_CACHE1_DATA(ptr));
+ mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr));
+ data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr));
}
- if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
- const char *client_name =
- nvkm_client_name_for_fifo_chid(&priv->base, chid);
- nv_error(priv,
- "CACHE_ERROR - ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
- chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc,
- data);
+ if (!(pull0 & 0x00000100) ||
+ !nv04_fifo_swmthd(device, chid, mthd, data)) {
+ chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
+ nvkm_error(subdev, "CACHE_ERROR - "
+ "ch %d [%s] subc %d mthd %04x data %08x\n",
+ chid, chan ? chan->object.client->name : "unknown",
+ (mthd >> 13) & 7, mthd & 0x1ffc, data);
+ nvkm_fifo_chan_put(&fifo->base, flags, &chan);
}
- nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
- nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
- nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
- nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
- nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
- nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
- nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
- nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
+ nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
+ nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0);
- nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
- nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
- nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH,
+ nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
}
static void
-nv04_fifo_dma_pusher(struct nvkm_device *device,
- struct nv04_fifo_priv *priv, u32 chid)
+nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
{
- const char *client_name;
- u32 dma_get = nv_rd32(priv, 0x003244);
- u32 dma_put = nv_rd32(priv, 0x003240);
- u32 push = nv_rd32(priv, 0x003220);
- u32 state = nv_rd32(priv, 0x003228);
-
- client_name = nvkm_client_name_for_fifo_chid(&priv->base, chid);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 dma_get = nvkm_rd32(device, 0x003244);
+ u32 dma_put = nvkm_rd32(device, 0x003240);
+ u32 push = nvkm_rd32(device, 0x003220);
+ u32 state = nvkm_rd32(device, 0x003228);
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ const char *name;
+ chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
+ name = chan ? chan->object.client->name : "unknown";
if (device->card_type == NV_50) {
- u32 ho_get = nv_rd32(priv, 0x003328);
- u32 ho_put = nv_rd32(priv, 0x003320);
- u32 ib_get = nv_rd32(priv, 0x003334);
- u32 ib_put = nv_rd32(priv, 0x003330);
-
- nv_error(priv,
- "DMA_PUSHER - ch %d [%s] get 0x%02x%08x put 0x%02x%08x ib_get 0x%08x ib_put 0x%08x state 0x%08x (err: %s) push 0x%08x\n",
- chid, client_name, ho_get, dma_get, ho_put, dma_put,
- ib_get, ib_put, state, nv_dma_state_err(state), push);
+ u32 ho_get = nvkm_rd32(device, 0x003328);
+ u32 ho_put = nvkm_rd32(device, 0x003320);
+ u32 ib_get = nvkm_rd32(device, 0x003334);
+ u32 ib_put = nvkm_rd32(device, 0x003330);
+
+ nvkm_error(subdev, "DMA_PUSHER - "
+ "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x "
+ "ib_put %08x state %08x (err: %s) push %08x\n",
+ chid, name, ho_get, dma_get, ho_put, dma_put,
+ ib_get, ib_put, state, nv_dma_state_err(state),
+ push);
/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
- nv_wr32(priv, 0x003364, 0x00000000);
+ nvkm_wr32(device, 0x003364, 0x00000000);
if (dma_get != dma_put || ho_get != ho_put) {
- nv_wr32(priv, 0x003244, dma_put);
- nv_wr32(priv, 0x003328, ho_put);
+ nvkm_wr32(device, 0x003244, dma_put);
+ nvkm_wr32(device, 0x003328, ho_put);
} else
if (ib_get != ib_put)
- nv_wr32(priv, 0x003334, ib_put);
+ nvkm_wr32(device, 0x003334, ib_put);
} else {
- nv_error(priv,
- "DMA_PUSHER - ch %d [%s] get 0x%08x put 0x%08x state 0x%08x (err: %s) push 0x%08x\n",
- chid, client_name, dma_get, dma_put, state,
- nv_dma_state_err(state), push);
+ nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x "
+ "state %08x (err: %s) push %08x\n",
+ chid, name, dma_get, dma_put, state,
+ nv_dma_state_err(state), push);
if (dma_get != dma_put)
- nv_wr32(priv, 0x003244, dma_put);
+ nvkm_wr32(device, 0x003244, dma_put);
}
+ nvkm_fifo_chan_put(&fifo->base, flags, &chan);
- nv_wr32(priv, 0x003228, 0x00000000);
- nv_wr32(priv, 0x003220, 0x00000001);
- nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+ nvkm_wr32(device, 0x003228, 0x00000000);
+ nvkm_wr32(device, 0x003220, 0x00000001);
+ nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
}
void
-nv04_fifo_intr(struct nvkm_subdev *subdev)
+nv04_fifo_intr(struct nvkm_fifo *base)
{
- struct nvkm_device *device = nv_device(subdev);
- struct nv04_fifo_priv *priv = (void *)subdev;
- u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0);
- u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask;
+ struct nv04_fifo *fifo = nv04_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0);
+ u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask;
u32 reassign, chid, get, sem;
- reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1;
- nv_wr32(priv, NV03_PFIFO_CACHES, 0);
+ reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1;
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
- chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
- get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
+ chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & (fifo->base.nr - 1);
+ get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET);
if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
- nv04_fifo_cache_error(device, priv, chid, get);
+ nv04_fifo_cache_error(fifo, chid, get);
stat &= ~NV_PFIFO_INTR_CACHE_ERROR;
}
if (stat & NV_PFIFO_INTR_DMA_PUSHER) {
- nv04_fifo_dma_pusher(device, priv, chid);
+ nv04_fifo_dma_pusher(fifo, chid);
stat &= ~NV_PFIFO_INTR_DMA_PUSHER;
}
if (stat & NV_PFIFO_INTR_SEMAPHORE) {
stat &= ~NV_PFIFO_INTR_SEMAPHORE;
- nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
- sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
- nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+ sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE);
+ nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
- nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
- nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
}
if (device->card_type == NV_50) {
if (stat & 0x00000010) {
stat &= ~0x00000010;
- nv_wr32(priv, 0x002100, 0x00000010);
+ nvkm_wr32(device, 0x002100, 0x00000010);
}
if (stat & 0x40000000) {
- nv_wr32(priv, 0x002100, 0x40000000);
- nvkm_fifo_uevent(&priv->base);
+ nvkm_wr32(device, 0x002100, 0x40000000);
+ nvkm_fifo_uevent(&fifo->base);
stat &= ~0x40000000;
}
}
if (stat) {
- nv_warn(priv, "unknown intr 0x%08x\n", stat);
- nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
- nv_wr32(priv, NV03_PFIFO_INTR_0, stat);
+ nvkm_warn(subdev, "intr %08x\n", stat);
+ nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, stat);
}
- nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
-}
-
-static int
-nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv04_instmem_priv *imem = nv04_instmem(parent);
- struct nv04_fifo_priv *priv;
- int ret;
-
- ret = nvkm_fifo_create(parent, engine, oclass, 0, 15, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nvkm_ramht_ref(imem->ramht, &priv->ramht);
- nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
- nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
-
- nv_subdev(priv)->unit = 0x00000100;
- nv_subdev(priv)->intr = nv04_fifo_intr;
- nv_engine(priv)->cclass = &nv04_fifo_cclass;
- nv_engine(priv)->sclass = nv04_fifo_sclass;
- priv->base.pause = nv04_fifo_pause;
- priv->base.start = nv04_fifo_start;
- priv->ramfc_desc = nv04_ramfc;
- return 0;
+ nvkm_wr32(device, NV03_PFIFO_CACHES, reassign);
}
void
-nv04_fifo_dtor(struct nvkm_object *object)
+nv04_fifo_init(struct nvkm_fifo *base)
{
- struct nv04_fifo_priv *priv = (void *)object;
- nvkm_gpuobj_ref(NULL, &priv->ramfc);
- nvkm_gpuobj_ref(NULL, &priv->ramro);
- nvkm_ramht_ref(NULL, &priv->ramht);
- nvkm_fifo_destroy(&priv->base);
+ struct nv04_fifo *fifo = nv04_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_ramht *ramht = imem->ramht;
+ struct nvkm_memory *ramro = imem->ramro;
+ struct nvkm_memory *ramfc = imem->ramfc;
+
+ nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
+ nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+ nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((ramht->bits - 9) << 16) |
+ (ramht->gpuobj->addr >> 8));
+ nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
+ nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
+
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
+ nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
}
int
-nv04_fifo_init(struct nvkm_object *object)
+nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+ int index, int nr, const struct nv04_fifo_ramfc *ramfc,
+ struct nvkm_fifo **pfifo)
{
- struct nv04_fifo_priv *priv = (void *)object;
+ struct nv04_fifo *fifo;
int ret;
- ret = nvkm_fifo_init(&priv->base);
+ if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+ return -ENOMEM;
+ fifo->ramfc = ramfc;
+ *pfifo = &fifo->base;
+
+ ret = nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
if (ret)
return ret;
- nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
- nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
-
- nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
- ((priv->ramht->bits - 9) << 16) |
- (priv->ramht->gpuobj.addr >> 8));
- nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
- nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8);
-
- nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
-
- nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
- nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
-
- nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
- nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
- nv_wr32(priv, NV03_PFIFO_CACHES, 1);
+ set_bit(nr - 1, fifo->base.mask); /* inactive channel */
return 0;
}
-struct nvkm_oclass *
-nv04_fifo_oclass = &(struct nvkm_oclass) {
- .handle = NV_ENGINE(FIFO, 0x04),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_fifo_ctor,
- .dtor = nv04_fifo_dtor,
- .init = nv04_fifo_init,
- .fini = _nvkm_fifo_fini,
+static const struct nvkm_fifo_func
+nv04_fifo = {
+ .init = nv04_fifo_init,
+ .intr = nv04_fifo_intr,
+ .pause = nv04_fifo_pause,
+ .start = nv04_fifo_start,
+ .chan = {
+ &nv04_fifo_dma_oclass,
+ NULL
},
};
+
+int
+nv04_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+ return nv04_fifo_new_(&nv04_fifo, device, index, 16,
+ nv04_fifo_ramfc, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
index e0e0c47cb4ca..03f60004bf7c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
@@ -1,137 +1,9 @@
#ifndef __NV04_FIFO_H__
#define __NV04_FIFO_H__
-#include <engine/fifo.h>
+#define nv04_fifo(p) container_of((p), struct nv04_fifo, base)
+#include "priv.h"
-#define NV04_PFIFO_DELAY_0 0x00002040
-#define NV04_PFIFO_DMA_TIMESLICE 0x00002044
-#define NV04_PFIFO_NEXT_CHANNEL 0x00002050
-#define NV03_PFIFO_INTR_0 0x00002100
-#define NV03_PFIFO_INTR_EN_0 0x00002140
-# define NV_PFIFO_INTR_CACHE_ERROR (1<<0)
-# define NV_PFIFO_INTR_RUNOUT (1<<4)
-# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8)
-# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
-# define NV_PFIFO_INTR_DMA_PT (1<<16)
-# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
-# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
-#define NV03_PFIFO_RAMHT 0x00002210
-#define NV03_PFIFO_RAMFC 0x00002214
-#define NV03_PFIFO_RAMRO 0x00002218
-#define NV40_PFIFO_RAMFC 0x00002220
-#define NV03_PFIFO_CACHES 0x00002500
-#define NV04_PFIFO_MODE 0x00002504
-#define NV04_PFIFO_DMA 0x00002508
-#define NV04_PFIFO_SIZE 0x0000250c
-#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
-#define NV50_PFIFO_CTX_TABLE__SIZE 128
-#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
-#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
-#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
-#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
-#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
-#define NV03_PFIFO_CACHE0_PULL0 0x00003040
-#define NV04_PFIFO_CACHE0_PULL0 0x00003050
-#define NV04_PFIFO_CACHE0_PULL1 0x00003054
-#define NV03_PFIFO_CACHE1_PUSH0 0x00003200
-#define NV03_PFIFO_CACHE1_PUSH1 0x00003204
-#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8)
-#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16)
-#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f
-#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f
-#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f
-#define NV03_PFIFO_CACHE1_PUT 0x00003210
-#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220
-#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0
-# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8
-# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000
-# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000
-# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000
-# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000
-# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000
-# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000
-# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000
-# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000
-# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000
-# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000
-# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000
-# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000
-# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000
-# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000
-# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000
-# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000
-# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000
-# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000
-# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000
-# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000
-# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000
-# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000
-# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000
-# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000
-# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000
-# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000
-# define NV_PFIFO_CACHE1_ENDIAN 0x80000000
-# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF
-# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000
-#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228
-#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c
-#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230
-#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240
-#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244
-#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248
-#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
-#define NV03_PFIFO_CACHE1_PULL0 0x00003240
-#define NV04_PFIFO_CACHE1_PULL0 0x00003250
-# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010
-# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000
-#define NV03_PFIFO_CACHE1_PULL1 0x00003250
-#define NV04_PFIFO_CACHE1_PULL1 0x00003254
-#define NV04_PFIFO_CACHE1_HASH 0x00003258
-#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260
-#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264
-#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268
-#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
-#define NV03_PFIFO_CACHE1_GET 0x00003270
-#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
-#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
-#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
-#define NV40_PFIFO_UNK32E4 0x000032E4
-#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
-#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8))
-#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8))
-#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8))
-
-struct ramfc_desc {
+struct nv04_fifo_ramfc {
unsigned bits:6;
unsigned ctxs:5;
unsigned ctxp:8;
@@ -139,37 +11,13 @@ struct ramfc_desc {
unsigned regp;
};
-struct nv04_fifo_priv {
+struct nv04_fifo {
struct nvkm_fifo base;
- struct ramfc_desc *ramfc_desc;
- struct nvkm_ramht *ramht;
- struct nvkm_gpuobj *ramro;
- struct nvkm_gpuobj *ramfc;
-};
-
-struct nv04_fifo_base {
- struct nvkm_fifo_base base;
-};
-
-struct nv04_fifo_chan {
- struct nvkm_fifo_chan base;
- u32 subc[8];
- u32 ramfc;
+ const struct nv04_fifo_ramfc *ramfc;
};
-int nv04_fifo_object_attach(struct nvkm_object *, struct nvkm_object *, u32);
-void nv04_fifo_object_detach(struct nvkm_object *, int);
-
-void nv04_fifo_chan_dtor(struct nvkm_object *);
-int nv04_fifo_chan_init(struct nvkm_object *);
-int nv04_fifo_chan_fini(struct nvkm_object *, bool suspend);
-
-int nv04_fifo_context_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-
-void nv04_fifo_dtor(struct nvkm_object *);
-int nv04_fifo_init(struct nvkm_object *);
-void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
-void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
+int nv04_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
+ int index, int nr, const struct nv04_fifo_ramfc *,
+ struct nvkm_fifo **);
+void nv04_fifo_init(struct nvkm_fifo *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
index 48ce4af6f543..f9a87deb2b3d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
@@ -22,17 +22,11 @@
* Authors: Ben Skeggs
*/
#include "nv04.h"
+#include "channv04.h"
+#include "regsnv04.h"
-#include <core/client.h>
-#include <core/engctx.h>
-#include <core/ramht.h>
-#include <subdev/instmem/nv04.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-static struct ramfc_desc
-nv10_ramfc[] = {
+static const struct nv04_fifo_ramfc
+nv10_fifo_ramfc[] = {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -45,134 +39,21 @@ nv10_ramfc[] = {
{}
};
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-static int
-nv10_fifo_chan_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- union {
- struct nv03_channel_dma_v0 v0;
- } *args = data;
- struct nv04_fifo_priv *priv = (void *)engine;
- struct nv04_fifo_chan *chan;
- int ret;
-
- nv_ioctl(parent, "create channel dma size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
- "offset %016llx\n", args->v0.version,
- args->v0.pushbuf, args->v0.offset);
- } else
- return ret;
-
- ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
- 0x10000, args->v0.pushbuf,
- (1ULL << NVDEV_ENGINE_DMAOBJ) |
- (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_GR), &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
-
- args->v0.chid = chan->base.chid;
-
- nv_parent(chan)->object_attach = nv04_fifo_object_attach;
- nv_parent(chan)->object_detach = nv04_fifo_object_detach;
- nv_parent(chan)->context_attach = nv04_fifo_context_attach;
- chan->ramfc = chan->base.chid * 32;
-
- nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
- nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
- nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
- nv_wo32(priv->ramfc, chan->ramfc + 0x14,
- NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
- NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
- NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
- return 0;
-}
-
-static struct nvkm_ofuncs
-nv10_fifo_ofuncs = {
- .ctor = nv10_fifo_chan_ctor,
- .dtor = nv04_fifo_chan_dtor,
- .init = nv04_fifo_chan_init,
- .fini = nv04_fifo_chan_fini,
- .map = _nvkm_fifo_channel_map,
- .rd32 = _nvkm_fifo_channel_rd32,
- .wr32 = _nvkm_fifo_channel_wr32,
- .ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-nv10_fifo_sclass[] = {
- { NV10_CHANNEL_DMA, &nv10_fifo_ofuncs },
- {}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-static struct nvkm_oclass
-nv10_fifo_cclass = {
- .handle = NV_ENGCTX(FIFO, 0x10),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_fifo_context_ctor,
- .dtor = _nvkm_fifo_context_dtor,
- .init = _nvkm_fifo_context_init,
- .fini = _nvkm_fifo_context_fini,
- .rd32 = _nvkm_fifo_context_rd32,
- .wr32 = _nvkm_fifo_context_wr32,
+static const struct nvkm_fifo_func
+nv10_fifo = {
+ .init = nv04_fifo_init,
+ .intr = nv04_fifo_intr,
+ .pause = nv04_fifo_pause,
+ .start = nv04_fifo_start,
+ .chan = {
+ &nv10_fifo_dma_oclass,
+ NULL
},
};
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-static int
-nv10_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv10_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
{
- struct nv04_instmem_priv *imem = nv04_instmem(parent);
- struct nv04_fifo_priv *priv;
- int ret;
-
- ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nvkm_ramht_ref(imem->ramht, &priv->ramht);
- nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
- nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
-
- nv_subdev(priv)->unit = 0x00000100;
- nv_subdev(priv)->intr = nv04_fifo_intr;
- nv_engine(priv)->cclass = &nv10_fifo_cclass;
- nv_engine(priv)->sclass = nv10_fifo_sclass;
- priv->base.pause = nv04_fifo_pause;
- priv->base.start = nv04_fifo_start;
- priv->ramfc_desc = nv10_ramfc;
- return 0;
+ return nv04_fifo_new_(&nv10_fifo, device, index, 32,
+ nv10_fifo_ramfc, pfifo);
}
-
-struct nvkm_oclass *
-nv10_fifo_oclass = &(struct nvkm_oclass) {
- .handle = NV_ENGINE(FIFO, 0x10),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv10_fifo_ctor,
- .dtor = nv04_fifo_dtor,
- .init = nv04_fifo_init,
- .fini = _nvkm_fifo_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
index 4a20a6fd3887..f6d383a21222 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
@@ -22,17 +22,14 @@
* Authors: Ben Skeggs
*/
#include "nv04.h"
+#include "channv04.h"
+#include "regsnv04.h"
-#include <core/client.h>
-#include <core/engctx.h>
#include <core/ramht.h>
-#include <subdev/instmem/nv04.h>
+#include <subdev/instmem.h>
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-static struct ramfc_desc
-nv17_ramfc[] = {
+static const struct nv04_fifo_ramfc
+nv17_fifo_ramfc[] = {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -50,166 +47,51 @@ nv17_ramfc[] = {
{}
};
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-static int
-nv17_fifo_chan_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static void
+nv17_fifo_init(struct nvkm_fifo *base)
{
- union {
- struct nv03_channel_dma_v0 v0;
- } *args = data;
- struct nv04_fifo_priv *priv = (void *)engine;
- struct nv04_fifo_chan *chan;
- int ret;
-
- nv_ioctl(parent, "create channel dma size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
- "offset %016llx\n", args->v0.version,
- args->v0.pushbuf, args->v0.offset);
- } else
- return ret;
-
- ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
- 0x10000, args->v0.pushbuf,
- (1ULL << NVDEV_ENGINE_DMAOBJ) |
- (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_GR) |
- (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */
- &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
-
- args->v0.chid = chan->base.chid;
-
- nv_parent(chan)->object_attach = nv04_fifo_object_attach;
- nv_parent(chan)->object_detach = nv04_fifo_object_detach;
- nv_parent(chan)->context_attach = nv04_fifo_context_attach;
- chan->ramfc = chan->base.chid * 64;
-
- nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
- nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
- nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
- nv_wo32(priv->ramfc, chan->ramfc + 0x14,
- NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
- NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
- NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
- return 0;
+ struct nv04_fifo *fifo = nv04_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_ramht *ramht = imem->ramht;
+ struct nvkm_memory *ramro = imem->ramro;
+ struct nvkm_memory *ramfc = imem->ramfc;
+
+ nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
+ nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+ nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((ramht->bits - 9) << 16) |
+ (ramht->gpuobj->addr >> 8));
+ nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
+ nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8 |
+ 0x00010000);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
+
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
+ nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
}
-static struct nvkm_ofuncs
-nv17_fifo_ofuncs = {
- .ctor = nv17_fifo_chan_ctor,
- .dtor = nv04_fifo_chan_dtor,
- .init = nv04_fifo_chan_init,
- .fini = nv04_fifo_chan_fini,
- .map = _nvkm_fifo_channel_map,
- .rd32 = _nvkm_fifo_channel_rd32,
- .wr32 = _nvkm_fifo_channel_wr32,
- .ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-nv17_fifo_sclass[] = {
- { NV17_CHANNEL_DMA, &nv17_fifo_ofuncs },
- {}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-static struct nvkm_oclass
-nv17_fifo_cclass = {
- .handle = NV_ENGCTX(FIFO, 0x17),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_fifo_context_ctor,
- .dtor = _nvkm_fifo_context_dtor,
- .init = _nvkm_fifo_context_init,
- .fini = _nvkm_fifo_context_fini,
- .rd32 = _nvkm_fifo_context_rd32,
- .wr32 = _nvkm_fifo_context_wr32,
+static const struct nvkm_fifo_func
+nv17_fifo = {
+ .init = nv17_fifo_init,
+ .intr = nv04_fifo_intr,
+ .pause = nv04_fifo_pause,
+ .start = nv04_fifo_start,
+ .chan = {
+ &nv17_fifo_dma_oclass,
+ NULL
},
};
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-static int
-nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv17_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
{
- struct nv04_instmem_priv *imem = nv04_instmem(parent);
- struct nv04_fifo_priv *priv;
- int ret;
-
- ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nvkm_ramht_ref(imem->ramht, &priv->ramht);
- nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
- nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
-
- nv_subdev(priv)->unit = 0x00000100;
- nv_subdev(priv)->intr = nv04_fifo_intr;
- nv_engine(priv)->cclass = &nv17_fifo_cclass;
- nv_engine(priv)->sclass = nv17_fifo_sclass;
- priv->base.pause = nv04_fifo_pause;
- priv->base.start = nv04_fifo_start;
- priv->ramfc_desc = nv17_ramfc;
- return 0;
-}
-
-static int
-nv17_fifo_init(struct nvkm_object *object)
-{
- struct nv04_fifo_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_fifo_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
- nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
-
- nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
- ((priv->ramht->bits - 9) << 16) |
- (priv->ramht->gpuobj.addr >> 8));
- nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
- nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8 | 0x00010000);
-
- nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
-
- nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
- nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
-
- nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
- nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
- nv_wr32(priv, NV03_PFIFO_CACHES, 1);
- return 0;
+ return nv04_fifo_new_(&nv17_fifo, device, index, 32,
+ nv17_fifo_ramfc, pfifo);
}
-
-struct nvkm_oclass *
-nv17_fifo_oclass = &(struct nvkm_oclass) {
- .handle = NV_ENGINE(FIFO, 0x17),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv17_fifo_ctor,
- .dtor = nv04_fifo_dtor,
- .init = nv17_fifo_init,
- .fini = _nvkm_fifo_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
index 5bfc96265f3b..8c7ba32763c4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
@@ -22,19 +22,15 @@
* Authors: Ben Skeggs
*/
#include "nv04.h"
+#include "channv04.h"
+#include "regsnv04.h"
-#include <core/client.h>
-#include <core/device.h>
-#include <core/engctx.h>
#include <core/ramht.h>
#include <subdev/fb.h>
-#include <subdev/instmem/nv04.h>
+#include <subdev/instmem.h>
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-static struct ramfc_desc
-nv40_ramfc[] = {
+static const struct nv04_fifo_ramfc
+nv40_fifo_ramfc[] = {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -60,297 +56,72 @@ nv40_ramfc[] = {
{}
};
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-static int
-nv40_fifo_object_attach(struct nvkm_object *parent,
- struct nvkm_object *object, u32 handle)
-{
- struct nv04_fifo_priv *priv = (void *)parent->engine;
- struct nv04_fifo_chan *chan = (void *)parent;
- u32 context, chid = chan->base.chid;
- int ret;
-
- if (nv_iclass(object, NV_GPUOBJ_CLASS))
- context = nv_gpuobj(object)->addr >> 4;
- else
- context = 0x00000004; /* just non-zero */
-
- switch (nv_engidx(object->engine)) {
- case NVDEV_ENGINE_DMAOBJ:
- case NVDEV_ENGINE_SW:
- context |= 0x00000000;
- break;
- case NVDEV_ENGINE_GR:
- context |= 0x00100000;
- break;
- case NVDEV_ENGINE_MPEG:
- context |= 0x00200000;
- break;
- default:
- return -EINVAL;
- }
-
- context |= chid << 23;
-
- mutex_lock(&nv_subdev(priv)->mutex);
- ret = nvkm_ramht_insert(priv->ramht, chid, handle, context);
- mutex_unlock(&nv_subdev(priv)->mutex);
- return ret;
-}
-
-static int
-nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
-{
- struct nv04_fifo_priv *priv = (void *)parent->engine;
- struct nv04_fifo_chan *chan = (void *)parent;
- unsigned long flags;
- u32 reg, ctx;
-
- switch (nv_engidx(engctx->engine)) {
- case NVDEV_ENGINE_SW:
- return 0;
- case NVDEV_ENGINE_GR:
- reg = 0x32e0;
- ctx = 0x38;
- break;
- case NVDEV_ENGINE_MPEG:
- reg = 0x330c;
- ctx = 0x54;
- break;
- default:
- return -EINVAL;
- }
-
- spin_lock_irqsave(&priv->base.lock, flags);
- nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4;
- nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
-
- if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
- nv_wr32(priv, reg, nv_engctx(engctx)->addr);
- nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
-
- nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&priv->base.lock, flags);
- return 0;
-}
-
-static int
-nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
- struct nvkm_object *engctx)
+static void
+nv40_fifo_init(struct nvkm_fifo *base)
{
- struct nv04_fifo_priv *priv = (void *)parent->engine;
- struct nv04_fifo_chan *chan = (void *)parent;
- unsigned long flags;
- u32 reg, ctx;
-
- switch (nv_engidx(engctx->engine)) {
- case NVDEV_ENGINE_SW:
- return 0;
- case NVDEV_ENGINE_GR:
- reg = 0x32e0;
- ctx = 0x38;
- break;
- case NVDEV_ENGINE_MPEG:
- reg = 0x330c;
- ctx = 0x54;
- break;
- default:
- return -EINVAL;
- }
-
- spin_lock_irqsave(&priv->base.lock, flags);
- nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
-
- if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
- nv_wr32(priv, reg, 0x00000000);
- nv_wo32(priv->ramfc, chan->ramfc + ctx, 0x00000000);
-
- nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&priv->base.lock, flags);
- return 0;
-}
-
-static int
-nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- union {
- struct nv03_channel_dma_v0 v0;
- } *args = data;
- struct nv04_fifo_priv *priv = (void *)engine;
- struct nv04_fifo_chan *chan;
- int ret;
-
- nv_ioctl(parent, "create channel dma size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
- "offset %016llx\n", args->v0.version,
- args->v0.pushbuf, args->v0.offset);
- } else
- return ret;
-
- ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
- 0x1000, args->v0.pushbuf,
- (1ULL << NVDEV_ENGINE_DMAOBJ) |
- (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_GR) |
- (1ULL << NVDEV_ENGINE_MPEG), &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
-
- args->v0.chid = chan->base.chid;
-
- nv_parent(chan)->context_attach = nv40_fifo_context_attach;
- nv_parent(chan)->context_detach = nv40_fifo_context_detach;
- nv_parent(chan)->object_attach = nv40_fifo_object_attach;
- nv_parent(chan)->object_detach = nv04_fifo_object_detach;
- chan->ramfc = chan->base.chid * 128;
-
- nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
- nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
- nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
- nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 |
- NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
- NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
- NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
- nv_wo32(priv->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
- return 0;
-}
-
-static struct nvkm_ofuncs
-nv40_fifo_ofuncs = {
- .ctor = nv40_fifo_chan_ctor,
- .dtor = nv04_fifo_chan_dtor,
- .init = nv04_fifo_chan_init,
- .fini = nv04_fifo_chan_fini,
- .map = _nvkm_fifo_channel_map,
- .rd32 = _nvkm_fifo_channel_rd32,
- .wr32 = _nvkm_fifo_channel_wr32,
- .ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-nv40_fifo_sclass[] = {
- { NV40_CHANNEL_DMA, &nv40_fifo_ofuncs },
- {}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-static struct nvkm_oclass
-nv40_fifo_cclass = {
- .handle = NV_ENGCTX(FIFO, 0x40),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_fifo_context_ctor,
- .dtor = _nvkm_fifo_context_dtor,
- .init = _nvkm_fifo_context_init,
- .fini = _nvkm_fifo_context_fini,
- .rd32 = _nvkm_fifo_context_rd32,
- .wr32 = _nvkm_fifo_context_wr32,
- },
-};
-
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-static int
-nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv04_instmem_priv *imem = nv04_instmem(parent);
- struct nv04_fifo_priv *priv;
- int ret;
-
- ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nvkm_ramht_ref(imem->ramht, &priv->ramht);
- nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
- nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
-
- nv_subdev(priv)->unit = 0x00000100;
- nv_subdev(priv)->intr = nv04_fifo_intr;
- nv_engine(priv)->cclass = &nv40_fifo_cclass;
- nv_engine(priv)->sclass = nv40_fifo_sclass;
- priv->base.pause = nv04_fifo_pause;
- priv->base.start = nv04_fifo_start;
- priv->ramfc_desc = nv40_ramfc;
- return 0;
-}
-
-static int
-nv40_fifo_init(struct nvkm_object *object)
-{
- struct nv04_fifo_priv *priv = (void *)object;
- struct nvkm_fb *pfb = nvkm_fb(object);
- int ret;
-
- ret = nvkm_fifo_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x002040, 0x000000ff);
- nv_wr32(priv, 0x002044, 0x2101ffff);
- nv_wr32(priv, 0x002058, 0x00000001);
-
- nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
- ((priv->ramht->bits - 9) << 16) |
- (priv->ramht->gpuobj.addr >> 8));
- nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
-
- switch (nv_device(priv)->chipset) {
+ struct nv04_fifo *fifo = nv04_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_fb *fb = device->fb;
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_ramht *ramht = imem->ramht;
+ struct nvkm_memory *ramro = imem->ramro;
+ struct nvkm_memory *ramfc = imem->ramfc;
+
+ nvkm_wr32(device, 0x002040, 0x000000ff);
+ nvkm_wr32(device, 0x002044, 0x2101ffff);
+ nvkm_wr32(device, 0x002058, 0x00000001);
+
+ nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((ramht->bits - 9) << 16) |
+ (ramht->gpuobj->addr >> 8));
+ nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
+
+ switch (device->chipset) {
case 0x47:
case 0x49:
case 0x4b:
- nv_wr32(priv, 0x002230, 0x00000001);
+ nvkm_wr32(device, 0x002230, 0x00000001);
case 0x40:
case 0x41:
case 0x42:
case 0x43:
case 0x45:
case 0x48:
- nv_wr32(priv, 0x002220, 0x00030002);
+ nvkm_wr32(device, 0x002220, 0x00030002);
break;
default:
- nv_wr32(priv, 0x002230, 0x00000000);
- nv_wr32(priv, 0x002220, ((pfb->ram->size - 512 * 1024 +
- priv->ramfc->addr) >> 16) |
- 0x00030000);
+ nvkm_wr32(device, 0x002230, 0x00000000);
+ nvkm_wr32(device, 0x002220, ((fb->ram->size - 512 * 1024 +
+ nvkm_memory_addr(ramfc)) >> 16) |
+ 0x00030000);
break;
}
- nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
- nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
- nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
+ nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
- nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
- nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
- nv_wr32(priv, NV03_PFIFO_CACHES, 1);
- return 0;
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
}
-struct nvkm_oclass *
-nv40_fifo_oclass = &(struct nvkm_oclass) {
- .handle = NV_ENGINE(FIFO, 0x40),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv40_fifo_ctor,
- .dtor = nv04_fifo_dtor,
- .init = nv40_fifo_init,
- .fini = _nvkm_fifo_fini,
+static const struct nvkm_fifo_func
+nv40_fifo = {
+ .init = nv40_fifo_init,
+ .intr = nv04_fifo_intr,
+ .pause = nv04_fifo_pause,
+ .start = nv04_fifo_start,
+ .chan = {
+ &nv40_fifo_dma_oclass,
+ NULL
},
};
+
+int
+nv40_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+ return nv04_fifo_new_(&nv40_fifo, device, index, 32,
+ nv40_fifo_ramfc, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
index f25f0fd0655d..66eb12c2b5ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -22,513 +22,126 @@
* Authors: Ben Skeggs
*/
#include "nv50.h"
-#include "nv04.h"
+#include "channv50.h"
-#include <core/client.h>
-#include <core/engctx.h>
-#include <core/ramht.h>
-#include <subdev/bar.h>
-#include <subdev/mmu.h>
-#include <subdev/timer.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
+#include <core/gpuobj.h>
static void
-nv50_fifo_playlist_update_locked(struct nv50_fifo_priv *priv)
+nv50_fifo_runlist_update_locked(struct nv50_fifo *fifo)
{
- struct nvkm_bar *bar = nvkm_bar(priv);
- struct nvkm_gpuobj *cur;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_memory *cur;
int i, p;
- cur = priv->playlist[priv->cur_playlist];
- priv->cur_playlist = !priv->cur_playlist;
-
- for (i = priv->base.min, p = 0; i < priv->base.max; i++) {
- if (nv_rd32(priv, 0x002600 + (i * 4)) & 0x80000000)
- nv_wo32(cur, p++ * 4, i);
- }
-
- bar->flush(bar);
-
- nv_wr32(priv, 0x0032f4, cur->addr >> 12);
- nv_wr32(priv, 0x0032ec, p);
- nv_wr32(priv, 0x002500, 0x00000101);
-}
-
-void
-nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
-{
- mutex_lock(&nv_subdev(priv)->mutex);
- nv50_fifo_playlist_update_locked(priv);
- mutex_unlock(&nv_subdev(priv)->mutex);
-}
-
-static int
-nv50_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
-{
- struct nvkm_bar *bar = nvkm_bar(parent);
- struct nv50_fifo_base *base = (void *)parent->parent;
- struct nvkm_gpuobj *ectx = (void *)object;
- u64 limit = ectx->addr + ectx->size - 1;
- u64 start = ectx->addr;
- u32 addr;
-
- switch (nv_engidx(object->engine)) {
- case NVDEV_ENGINE_SW : return 0;
- case NVDEV_ENGINE_GR : addr = 0x0000; break;
- case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
- default:
- return -EINVAL;
- }
-
- nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
- nv_wo32(base->eng, addr + 0x00, 0x00190000);
- nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
- nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
- nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
- upper_32_bits(start));
- nv_wo32(base->eng, addr + 0x10, 0x00000000);
- nv_wo32(base->eng, addr + 0x14, 0x00000000);
- bar->flush(bar);
- return 0;
-}
-
-static int
-nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend,
- struct nvkm_object *object)
-{
- struct nvkm_bar *bar = nvkm_bar(parent);
- struct nv50_fifo_priv *priv = (void *)parent->engine;
- struct nv50_fifo_base *base = (void *)parent->parent;
- struct nv50_fifo_chan *chan = (void *)parent;
- u32 addr, me;
- int ret = 0;
-
- switch (nv_engidx(object->engine)) {
- case NVDEV_ENGINE_SW : return 0;
- case NVDEV_ENGINE_GR : addr = 0x0000; break;
- case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
- default:
- return -EINVAL;
- }
-
- /* HW bug workaround:
- *
- * PFIFO will hang forever if the connected engines don't report
- * that they've processed the context switch request.
- *
- * In order for the kickoff to work, we need to ensure all the
- * connected engines are in a state where they can answer.
- *
- * Newer chipsets don't seem to suffer from this issue, and well,
- * there's also a "ignore these engines" bitmask reg we can use
- * if we hit the issue there..
- */
- me = nv_mask(priv, 0x00b860, 0x00000001, 0x00000001);
-
- /* do the kickoff... */
- nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
- if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
- nv_error(priv, "channel %d [%s] unload timeout\n",
- chan->base.chid, nvkm_client_name(chan));
- if (suspend)
- ret = -EBUSY;
- }
- nv_wr32(priv, 0x00b860, me);
-
- if (ret == 0) {
- nv_wo32(base->eng, addr + 0x00, 0x00000000);
- nv_wo32(base->eng, addr + 0x04, 0x00000000);
- nv_wo32(base->eng, addr + 0x08, 0x00000000);
- nv_wo32(base->eng, addr + 0x0c, 0x00000000);
- nv_wo32(base->eng, addr + 0x10, 0x00000000);
- nv_wo32(base->eng, addr + 0x14, 0x00000000);
- bar->flush(bar);
- }
-
- return ret;
-}
-
-static int
-nv50_fifo_object_attach(struct nvkm_object *parent,
- struct nvkm_object *object, u32 handle)
-{
- struct nv50_fifo_chan *chan = (void *)parent;
- u32 context;
-
- if (nv_iclass(object, NV_GPUOBJ_CLASS))
- context = nv_gpuobj(object)->node->offset >> 4;
- else
- context = 0x00000004; /* just non-zero */
+ cur = fifo->runlist[fifo->cur_runlist];
+ fifo->cur_runlist = !fifo->cur_runlist;
- switch (nv_engidx(object->engine)) {
- case NVDEV_ENGINE_DMAOBJ:
- case NVDEV_ENGINE_SW : context |= 0x00000000; break;
- case NVDEV_ENGINE_GR : context |= 0x00100000; break;
- case NVDEV_ENGINE_MPEG : context |= 0x00200000; break;
- default:
- return -EINVAL;
+ nvkm_kmap(cur);
+ for (i = 0, p = 0; i < fifo->base.nr; i++) {
+ if (nvkm_rd32(device, 0x002600 + (i * 4)) & 0x80000000)
+ nvkm_wo32(cur, p++ * 4, i);
}
+ nvkm_done(cur);
- return nvkm_ramht_insert(chan->ramht, 0, handle, context);
+ nvkm_wr32(device, 0x0032f4, nvkm_memory_addr(cur) >> 12);
+ nvkm_wr32(device, 0x0032ec, p);
+ nvkm_wr32(device, 0x002500, 0x00000101);
}
void
-nv50_fifo_object_detach(struct nvkm_object *parent, int cookie)
+nv50_fifo_runlist_update(struct nv50_fifo *fifo)
{
- struct nv50_fifo_chan *chan = (void *)parent;
- nvkm_ramht_remove(chan->ramht, cookie);
+ mutex_lock(&fifo->base.engine.subdev.mutex);
+ nv50_fifo_runlist_update_locked(fifo);
+ mutex_unlock(&fifo->base.engine.subdev.mutex);
}
-static int
-nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv50_fifo_oneinit(struct nvkm_fifo *base)
{
- union {
- struct nv03_channel_dma_v0 v0;
- } *args = data;
- struct nvkm_bar *bar = nvkm_bar(parent);
- struct nv50_fifo_base *base = (void *)parent;
- struct nv50_fifo_chan *chan;
+ struct nv50_fifo *fifo = nv50_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
int ret;
- nv_ioctl(parent, "create channel dma size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
- "offset %016llx\n", args->v0.version,
- args->v0.pushbuf, args->v0.offset);
- } else
- return ret;
-
- ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
- 0x2000, args->v0.pushbuf,
- (1ULL << NVDEV_ENGINE_DMAOBJ) |
- (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_GR) |
- (1ULL << NVDEV_ENGINE_MPEG), &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
-
- args->v0.chid = chan->base.chid;
-
- nv_parent(chan)->context_attach = nv50_fifo_context_attach;
- nv_parent(chan)->context_detach = nv50_fifo_context_detach;
- nv_parent(chan)->object_attach = nv50_fifo_object_attach;
- nv_parent(chan)->object_detach = nv50_fifo_object_detach;
-
- ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
- &chan->ramht);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
+ false, &fifo->runlist[0]);
if (ret)
return ret;
- nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
- nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
- nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
- nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
- nv_wo32(base->ramfc, 0x3c, 0x003f6078);
- nv_wo32(base->ramfc, 0x44, 0x01003fff);
- nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
- nv_wo32(base->ramfc, 0x4c, 0xffffffff);
- nv_wo32(base->ramfc, 0x60, 0x7fffffff);
- nv_wo32(base->ramfc, 0x78, 0x00000000);
- nv_wo32(base->ramfc, 0x7c, 0x30000001);
- nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
- (4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->gpuobj.node->offset >> 4));
- bar->flush(bar);
- return 0;
+ return nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
+ false, &fifo->runlist[1]);
}
-static int
-nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+void
+nv50_fifo_init(struct nvkm_fifo *base)
{
- union {
- struct nv50_channel_gpfifo_v0 v0;
- } *args = data;
- struct nvkm_bar *bar = nvkm_bar(parent);
- struct nv50_fifo_base *base = (void *)parent;
- struct nv50_fifo_chan *chan;
- u64 ioffset, ilength;
- int ret;
+ struct nv50_fifo *fifo = nv50_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ int i;
- nv_ioctl(parent, "create channel gpfifo size %d\n", size);
- if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
- "ioffset %016llx ilength %08x\n",
- args->v0.version, args->v0.pushbuf, args->v0.ioffset,
- args->v0.ilength);
- } else
- return ret;
-
- ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
- 0x2000, args->v0.pushbuf,
- (1ULL << NVDEV_ENGINE_DMAOBJ) |
- (1ULL << NVDEV_ENGINE_SW) |
- (1ULL << NVDEV_ENGINE_GR) |
- (1ULL << NVDEV_ENGINE_MPEG), &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
+ nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
+ nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
+ nvkm_wr32(device, 0x00250c, 0x6f3cfc34);
+ nvkm_wr32(device, 0x002044, 0x01003fff);
- args->v0.chid = chan->base.chid;
+ nvkm_wr32(device, 0x002100, 0xffffffff);
+ nvkm_wr32(device, 0x002140, 0xbfffffff);
- nv_parent(chan)->context_attach = nv50_fifo_context_attach;
- nv_parent(chan)->context_detach = nv50_fifo_context_detach;
- nv_parent(chan)->object_attach = nv50_fifo_object_attach;
- nv_parent(chan)->object_detach = nv50_fifo_object_detach;
-
- ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
- &chan->ramht);
- if (ret)
- return ret;
-
- ioffset = args->v0.ioffset;
- ilength = order_base_2(args->v0.ilength / 8);
+ for (i = 0; i < 128; i++)
+ nvkm_wr32(device, 0x002600 + (i * 4), 0x00000000);
+ nv50_fifo_runlist_update_locked(fifo);
- nv_wo32(base->ramfc, 0x3c, 0x403f6078);
- nv_wo32(base->ramfc, 0x44, 0x01003fff);
- nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
- nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
- nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
- nv_wo32(base->ramfc, 0x60, 0x7fffffff);
- nv_wo32(base->ramfc, 0x78, 0x00000000);
- nv_wo32(base->ramfc, 0x7c, 0x30000001);
- nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
- (4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->gpuobj.node->offset >> 4));
- bar->flush(bar);
- return 0;
+ nvkm_wr32(device, 0x003200, 0x00000001);
+ nvkm_wr32(device, 0x003250, 0x00000001);
+ nvkm_wr32(device, 0x002500, 0x00000001);
}
-void
-nv50_fifo_chan_dtor(struct nvkm_object *object)
+void *
+nv50_fifo_dtor(struct nvkm_fifo *base)
{
- struct nv50_fifo_chan *chan = (void *)object;
- nvkm_ramht_ref(NULL, &chan->ramht);
- nvkm_fifo_channel_destroy(&chan->base);
-}
-
-static int
-nv50_fifo_chan_init(struct nvkm_object *object)
-{
- struct nv50_fifo_priv *priv = (void *)object->engine;
- struct nv50_fifo_base *base = (void *)object->parent;
- struct nv50_fifo_chan *chan = (void *)object;
- struct nvkm_gpuobj *ramfc = base->ramfc;
- u32 chid = chan->base.chid;
- int ret;
-
- ret = nvkm_fifo_channel_init(&chan->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12);
- nv50_fifo_playlist_update(priv);
- return 0;
+ struct nv50_fifo *fifo = nv50_fifo(base);
+ nvkm_memory_del(&fifo->runlist[1]);
+ nvkm_memory_del(&fifo->runlist[0]);
+ return fifo;
}
int
-nv50_fifo_chan_fini(struct nvkm_object *object, bool suspend)
-{
- struct nv50_fifo_priv *priv = (void *)object->engine;
- struct nv50_fifo_chan *chan = (void *)object;
- u32 chid = chan->base.chid;
-
- /* remove channel from playlist, fifo will unload context */
- nv_mask(priv, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
- nv50_fifo_playlist_update(priv);
- nv_wr32(priv, 0x002600 + (chid * 4), 0x00000000);
-
- return nvkm_fifo_channel_fini(&chan->base, suspend);
-}
-
-static struct nvkm_ofuncs
-nv50_fifo_ofuncs_dma = {
- .ctor = nv50_fifo_chan_ctor_dma,
- .dtor = nv50_fifo_chan_dtor,
- .init = nv50_fifo_chan_init,
- .fini = nv50_fifo_chan_fini,
- .map = _nvkm_fifo_channel_map,
- .rd32 = _nvkm_fifo_channel_rd32,
- .wr32 = _nvkm_fifo_channel_wr32,
- .ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_ofuncs
-nv50_fifo_ofuncs_ind = {
- .ctor = nv50_fifo_chan_ctor_ind,
- .dtor = nv50_fifo_chan_dtor,
- .init = nv50_fifo_chan_init,
- .fini = nv50_fifo_chan_fini,
- .map = _nvkm_fifo_channel_map,
- .rd32 = _nvkm_fifo_channel_rd32,
- .wr32 = _nvkm_fifo_channel_wr32,
- .ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-nv50_fifo_sclass[] = {
- { NV50_CHANNEL_DMA, &nv50_fifo_ofuncs_dma },
- { NV50_CHANNEL_GPFIFO, &nv50_fifo_ofuncs_ind },
- {}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-static int
-nv50_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv50_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+ int index, struct nvkm_fifo **pfifo)
{
- struct nv50_fifo_base *base;
+ struct nv50_fifo *fifo;
int ret;
- ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
- 0x1000, NVOBJ_FLAG_HEAP, &base);
- *pobject = nv_object(base);
- if (ret)
- return ret;
+ if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+ return -ENOMEM;
+ *pfifo = &fifo->base;
- ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0200,
- 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x1200, 0,
- NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, 0,
- &base->pgd);
- if (ret)
- return ret;
-
- ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
+ ret = nvkm_fifo_ctor(func, device, index, 128, &fifo->base);
if (ret)
return ret;
+ set_bit(0, fifo->base.mask); /* PIO channel */
+ set_bit(127, fifo->base.mask); /* inactive channel */
return 0;
}
-void
-nv50_fifo_context_dtor(struct nvkm_object *object)
-{
- struct nv50_fifo_base *base = (void *)object;
- nvkm_vm_ref(NULL, &base->vm, base->pgd);
- nvkm_gpuobj_ref(NULL, &base->pgd);
- nvkm_gpuobj_ref(NULL, &base->eng);
- nvkm_gpuobj_ref(NULL, &base->ramfc);
- nvkm_gpuobj_ref(NULL, &base->cache);
- nvkm_fifo_context_destroy(&base->base);
-}
-
-static struct nvkm_oclass
-nv50_fifo_cclass = {
- .handle = NV_ENGCTX(FIFO, 0x50),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_fifo_context_ctor,
- .dtor = nv50_fifo_context_dtor,
- .init = _nvkm_fifo_context_init,
- .fini = _nvkm_fifo_context_fini,
- .rd32 = _nvkm_fifo_context_rd32,
- .wr32 = _nvkm_fifo_context_wr32,
+static const struct nvkm_fifo_func
+nv50_fifo = {
+ .dtor = nv50_fifo_dtor,
+ .oneinit = nv50_fifo_oneinit,
+ .init = nv50_fifo_init,
+ .intr = nv04_fifo_intr,
+ .pause = nv04_fifo_pause,
+ .start = nv04_fifo_start,
+ .chan = {
+ &nv50_fifo_dma_oclass,
+ &nv50_fifo_gpfifo_oclass,
+ NULL
},
};
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-static int
-nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv50_fifo_priv *priv;
- int ret;
-
- ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
- &priv->playlist[0]);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
- &priv->playlist[1]);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00000100;
- nv_subdev(priv)->intr = nv04_fifo_intr;
- nv_engine(priv)->cclass = &nv50_fifo_cclass;
- nv_engine(priv)->sclass = nv50_fifo_sclass;
- priv->base.pause = nv04_fifo_pause;
- priv->base.start = nv04_fifo_start;
- return 0;
-}
-
-void
-nv50_fifo_dtor(struct nvkm_object *object)
-{
- struct nv50_fifo_priv *priv = (void *)object;
-
- nvkm_gpuobj_ref(NULL, &priv->playlist[1]);
- nvkm_gpuobj_ref(NULL, &priv->playlist[0]);
-
- nvkm_fifo_destroy(&priv->base);
-}
-
int
-nv50_fifo_init(struct nvkm_object *object)
+nv50_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
{
- struct nv50_fifo_priv *priv = (void *)object;
- int ret, i;
-
- ret = nvkm_fifo_init(&priv->base);
- if (ret)
- return ret;
-
- nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
- nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
- nv_wr32(priv, 0x00250c, 0x6f3cfc34);
- nv_wr32(priv, 0x002044, 0x01003fff);
-
- nv_wr32(priv, 0x002100, 0xffffffff);
- nv_wr32(priv, 0x002140, 0xbfffffff);
-
- for (i = 0; i < 128; i++)
- nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
- nv50_fifo_playlist_update_locked(priv);
-
- nv_wr32(priv, 0x003200, 0x00000001);
- nv_wr32(priv, 0x003250, 0x00000001);
- nv_wr32(priv, 0x002500, 0x00000001);
- return 0;
+ return nv50_fifo_new_(&nv50_fifo, device, index, pfifo);
}
-
-struct nvkm_oclass *
-nv50_fifo_oclass = &(struct nvkm_oclass) {
- .handle = NV_ENGINE(FIFO, 0x50),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_fifo_ctor,
- .dtor = nv50_fifo_dtor,
- .init = nv50_fifo_init,
- .fini = _nvkm_fifo_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
index 09ed93c66567..8ab53948cbb4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
@@ -1,36 +1,19 @@
#ifndef __NV50_FIFO_H__
#define __NV50_FIFO_H__
-#include <engine/fifo.h>
+#define nv50_fifo(p) container_of((p), struct nv50_fifo, base)
+#include "priv.h"
-struct nv50_fifo_priv {
+struct nv50_fifo {
struct nvkm_fifo base;
- struct nvkm_gpuobj *playlist[2];
- int cur_playlist;
+ struct nvkm_memory *runlist[2];
+ int cur_runlist;
};
-struct nv50_fifo_base {
- struct nvkm_fifo_base base;
- struct nvkm_gpuobj *ramfc;
- struct nvkm_gpuobj *cache;
- struct nvkm_gpuobj *eng;
- struct nvkm_gpuobj *pgd;
- struct nvkm_vm *vm;
-};
-
-struct nv50_fifo_chan {
- struct nvkm_fifo_chan base;
- u32 subc[8];
- struct nvkm_ramht *ramht;
-};
-
-void nv50_fifo_playlist_update(struct nv50_fifo_priv *);
-
-void nv50_fifo_object_detach(struct nvkm_object *, int);
-void nv50_fifo_chan_dtor(struct nvkm_object *);
-int nv50_fifo_chan_fini(struct nvkm_object *, bool);
-
-void nv50_fifo_context_dtor(struct nvkm_object *);
+int nv50_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
+ int index, struct nvkm_fifo **);
-void nv50_fifo_dtor(struct nvkm_object *);
-int nv50_fifo_init(struct nvkm_object *);
+void *nv50_fifo_dtor(struct nvkm_fifo *);
+int nv50_fifo_oneinit(struct nvkm_fifo *);
+void nv50_fifo_init(struct nvkm_fifo *);
+void nv50_fifo_runlist_update(struct nv50_fifo *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
new file mode 100644
index 000000000000..cb1432e9be08
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_FIFO_PRIV_H__
+#define __NVKM_FIFO_PRIV_H__
+#define nvkm_fifo(p) container_of((p), struct nvkm_fifo, engine)
+#include <engine/fifo.h>
+
+int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *,
+ int index, int nr, struct nvkm_fifo *);
+void nvkm_fifo_uevent(struct nvkm_fifo *);
+
+struct nvkm_fifo_func {
+ void *(*dtor)(struct nvkm_fifo *);
+ int (*oneinit)(struct nvkm_fifo *);
+ void (*init)(struct nvkm_fifo *);
+ void (*fini)(struct nvkm_fifo *);
+ void (*intr)(struct nvkm_fifo *);
+ void (*pause)(struct nvkm_fifo *, unsigned long *);
+ void (*start)(struct nvkm_fifo *, unsigned long *);
+ void (*uevent_init)(struct nvkm_fifo *);
+ void (*uevent_fini)(struct nvkm_fifo *);
+ const struct nvkm_fifo_chan_oclass *chan[];
+};
+
+void nv04_fifo_intr(struct nvkm_fifo *);
+void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
+void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
new file mode 100644
index 000000000000..92d56221197b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
@@ -0,0 +1,132 @@
+#ifndef __NV04_FIFO_REGS_H__
+#define __NV04_FIFO_REGS_H__
+
+#define NV04_PFIFO_DELAY_0 0x00002040
+#define NV04_PFIFO_DMA_TIMESLICE 0x00002044
+#define NV04_PFIFO_NEXT_CHANNEL 0x00002050
+#define NV03_PFIFO_INTR_0 0x00002100
+#define NV03_PFIFO_INTR_EN_0 0x00002140
+# define NV_PFIFO_INTR_CACHE_ERROR (1<<0)
+# define NV_PFIFO_INTR_RUNOUT (1<<4)
+# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8)
+# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
+# define NV_PFIFO_INTR_DMA_PT (1<<16)
+# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
+# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
+#define NV03_PFIFO_RAMHT 0x00002210
+#define NV03_PFIFO_RAMFC 0x00002214
+#define NV03_PFIFO_RAMRO 0x00002218
+#define NV40_PFIFO_RAMFC 0x00002220
+#define NV03_PFIFO_CACHES 0x00002500
+#define NV04_PFIFO_MODE 0x00002504
+#define NV04_PFIFO_DMA 0x00002508
+#define NV04_PFIFO_SIZE 0x0000250c
+#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
+#define NV50_PFIFO_CTX_TABLE__SIZE 128
+#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
+#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
+#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
+#define NV03_PFIFO_CACHE0_PULL0 0x00003040
+#define NV04_PFIFO_CACHE0_PULL0 0x00003050
+#define NV04_PFIFO_CACHE0_PULL1 0x00003054
+#define NV03_PFIFO_CACHE1_PUSH0 0x00003200
+#define NV03_PFIFO_CACHE1_PUSH1 0x00003204
+#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8)
+#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16)
+#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f
+#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f
+#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f
+#define NV03_PFIFO_CACHE1_PUT 0x00003210
+#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220
+#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000
+# define NV_PFIFO_CACHE1_ENDIAN 0x80000000
+# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF
+# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000
+#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228
+#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c
+#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230
+#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240
+#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244
+#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248
+#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
+#define NV03_PFIFO_CACHE1_PULL0 0x00003240
+#define NV04_PFIFO_CACHE1_PULL0 0x00003250
+# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010
+# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000
+#define NV03_PFIFO_CACHE1_PULL1 0x00003250
+#define NV04_PFIFO_CACHE1_PULL1 0x00003254
+#define NV04_PFIFO_CACHE1_HASH 0x00003258
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264
+#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268
+#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
+#define NV03_PFIFO_CACHE1_GET 0x00003270
+#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
+#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
+#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
+#define NV40_PFIFO_UNK32E4 0x000032E4
+#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
+#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8))
+#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8))
+#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8))
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 2e1b92f71d9e..9ad0d0e78a96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -1,21 +1,8 @@
-nvkm-y += nvkm/engine/gr/ctxnv40.o
-nvkm-y += nvkm/engine/gr/ctxnv50.o
-nvkm-y += nvkm/engine/gr/ctxgf100.o
-nvkm-y += nvkm/engine/gr/ctxgf108.o
-nvkm-y += nvkm/engine/gr/ctxgf104.o
-nvkm-y += nvkm/engine/gr/ctxgf110.o
-nvkm-y += nvkm/engine/gr/ctxgf117.o
-nvkm-y += nvkm/engine/gr/ctxgf119.o
-nvkm-y += nvkm/engine/gr/ctxgk104.o
-nvkm-y += nvkm/engine/gr/ctxgk20a.o
-nvkm-y += nvkm/engine/gr/ctxgk110.o
-nvkm-y += nvkm/engine/gr/ctxgk110b.o
-nvkm-y += nvkm/engine/gr/ctxgk208.o
-nvkm-y += nvkm/engine/gr/ctxgm107.o
-nvkm-y += nvkm/engine/gr/ctxgm204.o
-nvkm-y += nvkm/engine/gr/ctxgm206.o
+nvkm-y += nvkm/engine/gr/base.o
nvkm-y += nvkm/engine/gr/nv04.o
nvkm-y += nvkm/engine/gr/nv10.o
+nvkm-y += nvkm/engine/gr/nv15.o
+nvkm-y += nvkm/engine/gr/nv17.o
nvkm-y += nvkm/engine/gr/nv20.o
nvkm-y += nvkm/engine/gr/nv25.o
nvkm-y += nvkm/engine/gr/nv2a.o
@@ -23,18 +10,43 @@ nvkm-y += nvkm/engine/gr/nv30.o
nvkm-y += nvkm/engine/gr/nv34.o
nvkm-y += nvkm/engine/gr/nv35.o
nvkm-y += nvkm/engine/gr/nv40.o
+nvkm-y += nvkm/engine/gr/nv44.o
nvkm-y += nvkm/engine/gr/nv50.o
+nvkm-y += nvkm/engine/gr/g84.o
+nvkm-y += nvkm/engine/gr/gt200.o
+nvkm-y += nvkm/engine/gr/mcp79.o
+nvkm-y += nvkm/engine/gr/gt215.o
+nvkm-y += nvkm/engine/gr/mcp89.o
nvkm-y += nvkm/engine/gr/gf100.o
-nvkm-y += nvkm/engine/gr/gf108.o
nvkm-y += nvkm/engine/gr/gf104.o
+nvkm-y += nvkm/engine/gr/gf108.o
nvkm-y += nvkm/engine/gr/gf110.o
nvkm-y += nvkm/engine/gr/gf117.o
nvkm-y += nvkm/engine/gr/gf119.o
nvkm-y += nvkm/engine/gr/gk104.o
-nvkm-y += nvkm/engine/gr/gk20a.o
nvkm-y += nvkm/engine/gr/gk110.o
nvkm-y += nvkm/engine/gr/gk110b.o
nvkm-y += nvkm/engine/gr/gk208.o
+nvkm-y += nvkm/engine/gr/gk20a.o
nvkm-y += nvkm/engine/gr/gm107.o
nvkm-y += nvkm/engine/gr/gm204.o
nvkm-y += nvkm/engine/gr/gm206.o
+nvkm-y += nvkm/engine/gr/gm20b.o
+
+nvkm-y += nvkm/engine/gr/ctxnv40.o
+nvkm-y += nvkm/engine/gr/ctxnv50.o
+nvkm-y += nvkm/engine/gr/ctxgf100.o
+nvkm-y += nvkm/engine/gr/ctxgf104.o
+nvkm-y += nvkm/engine/gr/ctxgf108.o
+nvkm-y += nvkm/engine/gr/ctxgf110.o
+nvkm-y += nvkm/engine/gr/ctxgf117.o
+nvkm-y += nvkm/engine/gr/ctxgf119.o
+nvkm-y += nvkm/engine/gr/ctxgk104.o
+nvkm-y += nvkm/engine/gr/ctxgk110.o
+nvkm-y += nvkm/engine/gr/ctxgk110b.o
+nvkm-y += nvkm/engine/gr/ctxgk208.o
+nvkm-y += nvkm/engine/gr/ctxgk20a.o
+nvkm-y += nvkm/engine/gr/ctxgm107.o
+nvkm-y += nvkm/engine/gr/ctxgm204.o
+nvkm-y += nvkm/engine/gr/ctxgm206.o
+nvkm-y += nvkm/engine/gr/ctxgm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
new file mode 100644
index 000000000000..090765ff070d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+#include <engine/fifo.h>
+
+static void
+nvkm_gr_tile(struct nvkm_engine *engine, int region, struct nvkm_fb_tile *tile)
+{
+ struct nvkm_gr *gr = nvkm_gr(engine);
+ if (gr->func->tile)
+ gr->func->tile(gr, region, tile);
+}
+
+u64
+nvkm_gr_units(struct nvkm_gr *gr)
+{
+ if (gr->func->units)
+ return gr->func->units(gr);
+ return 0;
+}
+
+int
+nvkm_gr_tlb_flush(struct nvkm_gr *gr)
+{
+ if (gr->func->tlb_flush)
+ return gr->func->tlb_flush(gr);
+ return -ENODEV;
+}
+
+static int
+nvkm_gr_oclass_get(struct nvkm_oclass *oclass, int index)
+{
+ struct nvkm_gr *gr = nvkm_gr(oclass->engine);
+ int c = 0;
+
+ if (gr->func->object_get) {
+ int ret = gr->func->object_get(gr, index, &oclass->base);
+ if (oclass->base.oclass)
+ return index;
+ return ret;
+ }
+
+ while (gr->func->sclass[c].oclass) {
+ if (c++ == index) {
+ oclass->base = gr->func->sclass[index];
+ return index;
+ }
+ }
+
+ return c;
+}
+
+static int
+nvkm_gr_cclass_new(struct nvkm_fifo_chan *chan,
+ const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_gr *gr = nvkm_gr(oclass->engine);
+ if (gr->func->chan_new)
+ return gr->func->chan_new(gr, chan, oclass, pobject);
+ return 0;
+}
+
+static void
+nvkm_gr_intr(struct nvkm_engine *engine)
+{
+ struct nvkm_gr *gr = nvkm_gr(engine);
+ gr->func->intr(gr);
+}
+
+static int
+nvkm_gr_oneinit(struct nvkm_engine *engine)
+{
+ struct nvkm_gr *gr = nvkm_gr(engine);
+ if (gr->func->oneinit)
+ return gr->func->oneinit(gr);
+ return 0;
+}
+
+static int
+nvkm_gr_init(struct nvkm_engine *engine)
+{
+ struct nvkm_gr *gr = nvkm_gr(engine);
+ return gr->func->init(gr);
+}
+
+static void *
+nvkm_gr_dtor(struct nvkm_engine *engine)
+{
+ struct nvkm_gr *gr = nvkm_gr(engine);
+ if (gr->func->dtor)
+ return gr->func->dtor(gr);
+ return gr;
+}
+
+static const struct nvkm_engine_func
+nvkm_gr = {
+ .dtor = nvkm_gr_dtor,
+ .oneinit = nvkm_gr_oneinit,
+ .init = nvkm_gr_init,
+ .intr = nvkm_gr_intr,
+ .tile = nvkm_gr_tile,
+ .fifo.cclass = nvkm_gr_cclass_new,
+ .fifo.sclass = nvkm_gr_oclass_get,
+};
+
+int
+nvkm_gr_ctor(const struct nvkm_gr_func *func, struct nvkm_device *device,
+ int index, u32 pmc_enable, bool enable, struct nvkm_gr *gr)
+{
+ gr->func = func;
+ return nvkm_engine_ctor(&nvkm_gr, device, index, pmc_enable,
+ enable, &gr->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 57e2c5b13123..56f392d3d4fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -23,7 +23,6 @@
*/
#include "ctxgf100.h"
-#include <subdev/bar.h>
#include <subdev/fb.h>
#include <subdev/mc.h>
#include <subdev/timer.h>
@@ -1005,6 +1004,7 @@ void
gf100_grctx_mmio_item(struct gf100_grctx *info, u32 addr, u32 data,
int shift, int buffer)
{
+ struct nvkm_device *device = info->gr->base.engine.subdev.device;
if (info->data) {
if (shift >= 0) {
info->mmio->addr = addr;
@@ -1021,29 +1021,29 @@ gf100_grctx_mmio_item(struct gf100_grctx *info, u32 addr, u32 data,
return;
}
- nv_wr32(info->priv, addr, data);
+ nvkm_wr32(device, addr, data);
}
void
gf100_grctx_generate_bundle(struct gf100_grctx *info)
{
- const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
+ const struct gf100_grctx_func *grctx = info->gr->func->grctx;
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
- const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
+ const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
mmio_refn(info, 0x408004, 0x00000000, s, b);
- mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
+ mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
mmio_refn(info, 0x418808, 0x00000000, s, b);
- mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
+ mmio_wr32(info, 0x41880c, 0x80000000 | (grctx->bundle_size >> s));
}
void
gf100_grctx_generate_pagepool(struct gf100_grctx *info)
{
- const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
+ const struct gf100_grctx_func *grctx = info->gr->func->grctx;
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
- const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
+ const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
mmio_refn(info, 0x40800c, 0x00000000, s, b);
mmio_wr32(info, 0x408010, 0x80000000);
mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -1053,13 +1053,13 @@ gf100_grctx_generate_pagepool(struct gf100_grctx *info)
void
gf100_grctx_generate_attrib(struct gf100_grctx *info)
{
- struct gf100_gr_priv *priv = info->priv;
- const struct gf100_grctx_oclass *impl = gf100_grctx_impl(priv);
- const u32 attrib = impl->attrib_nr;
- const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+ struct gf100_gr *gr = info->gr;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
+ const u32 attrib = grctx->attrib_nr;
+ const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
- const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+ const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
int gpc, tpc;
u32 bo = 0;
@@ -1067,91 +1067,95 @@ gf100_grctx_generate_attrib(struct gf100_grctx *info)
mmio_refn(info, 0x419848, 0x10000000, s, b);
mmio_wr32(info, 0x405830, (attrib << 16));
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
const u32 o = TPC_UNIT(gpc, tpc, 0x0520);
mmio_skip(info, o, (attrib << 16) | ++bo);
mmio_wr32(info, o, (attrib << 16) | --bo);
- bo += impl->attrib_nr_max;
+ bo += grctx->attrib_nr_max;
}
}
}
void
-gf100_grctx_generate_unkn(struct gf100_gr_priv *priv)
+gf100_grctx_generate_unkn(struct gf100_gr *gr)
{
}
void
-gf100_grctx_generate_tpcid(struct gf100_gr_priv *priv)
+gf100_grctx_generate_tpcid(struct gf100_gr *gr)
{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
int gpc, tpc, id;
for (tpc = 0, id = 0; tpc < 4; tpc++) {
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- if (tpc < priv->tpc_nr[gpc]) {
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x4e8), id);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ if (tpc < gr->tpc_nr[gpc]) {
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x4e8), id);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
id++;
}
- nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0c08), gr->tpc_nr[gpc]);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0c8c), gr->tpc_nr[gpc]);
}
}
}
void
-gf100_grctx_generate_r406028(struct gf100_gr_priv *priv)
+gf100_grctx_generate_r406028(struct gf100_gr *gr)
{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
u32 tmp[GPC_MAX / 8] = {}, i = 0;
- for (i = 0; i < priv->gpc_nr; i++)
- tmp[i / 8] |= priv->tpc_nr[i] << ((i % 8) * 4);
+ for (i = 0; i < gr->gpc_nr; i++)
+ tmp[i / 8] |= gr->tpc_nr[i] << ((i % 8) * 4);
for (i = 0; i < 4; i++) {
- nv_wr32(priv, 0x406028 + (i * 4), tmp[i]);
- nv_wr32(priv, 0x405870 + (i * 4), tmp[i]);
+ nvkm_wr32(device, 0x406028 + (i * 4), tmp[i]);
+ nvkm_wr32(device, 0x405870 + (i * 4), tmp[i]);
}
}
void
-gf100_grctx_generate_r4060a8(struct gf100_gr_priv *priv)
+gf100_grctx_generate_r4060a8(struct gf100_gr *gr)
{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
u8 tpcnr[GPC_MAX], data[TPC_MAX];
int gpc, tpc, i;
- memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
memset(data, 0x1f, sizeof(data));
gpc = -1;
- for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+ for (tpc = 0; tpc < gr->tpc_total; tpc++) {
do {
- gpc = (gpc + 1) % priv->gpc_nr;
+ gpc = (gpc + 1) % gr->gpc_nr;
} while (!tpcnr[gpc]);
tpcnr[gpc]--;
data[tpc] = gpc;
}
for (i = 0; i < 4; i++)
- nv_wr32(priv, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
+ nvkm_wr32(device, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
}
void
-gf100_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
+gf100_grctx_generate_r418bb8(struct gf100_gr *gr)
{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
u32 data[6] = {}, data2[2] = {};
u8 tpcnr[GPC_MAX];
u8 shift, ntpcv;
int gpc, tpc, i;
/* calculate first set of magics */
- memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
gpc = -1;
- for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+ for (tpc = 0; tpc < gr->tpc_total; tpc++) {
do {
- gpc = (gpc + 1) % priv->gpc_nr;
+ gpc = (gpc + 1) % gr->gpc_nr;
} while (!tpcnr[gpc]);
tpcnr[gpc]--;
@@ -1163,7 +1167,7 @@ gf100_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
/* and the second... */
shift = 0;
- ntpcv = priv->tpc_total;
+ ntpcv = gr->tpc_total;
while (!(ntpcv & (1 << 4))) {
ntpcv <<= 1;
shift++;
@@ -1176,202 +1180,211 @@ gf100_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
/* GPC_BROADCAST */
- nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) |
- priv->magic_not_rop_nr);
+ nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) |
+ gr->magic_not_rop_nr);
for (i = 0; i < 6; i++)
- nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
+ nvkm_wr32(device, 0x418b08 + (i * 4), data[i]);
/* GPC_BROADCAST.TP_BROADCAST */
- nv_wr32(priv, 0x419bd0, (priv->tpc_total << 8) |
- priv->magic_not_rop_nr | data2[0]);
- nv_wr32(priv, 0x419be4, data2[1]);
+ nvkm_wr32(device, 0x419bd0, (gr->tpc_total << 8) |
+ gr->magic_not_rop_nr | data2[0]);
+ nvkm_wr32(device, 0x419be4, data2[1]);
for (i = 0; i < 6; i++)
- nv_wr32(priv, 0x419b00 + (i * 4), data[i]);
+ nvkm_wr32(device, 0x419b00 + (i * 4), data[i]);
/* UNK78xx */
- nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) |
- priv->magic_not_rop_nr);
+ nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) |
+ gr->magic_not_rop_nr);
for (i = 0; i < 6; i++)
- nv_wr32(priv, 0x40780c + (i * 4), data[i]);
+ nvkm_wr32(device, 0x40780c + (i * 4), data[i]);
}
void
-gf100_grctx_generate_r406800(struct gf100_gr_priv *priv)
+gf100_grctx_generate_r406800(struct gf100_gr *gr)
{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
u64 tpc_mask = 0, tpc_set = 0;
u8 tpcnr[GPC_MAX];
int gpc, tpc;
int i, a, b;
- memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
- for (gpc = 0; gpc < priv->gpc_nr; gpc++)
- tpc_mask |= ((1ULL << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
+ memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++)
+ tpc_mask |= ((1ULL << gr->tpc_nr[gpc]) - 1) << (gpc * 8);
for (i = 0, gpc = -1, b = -1; i < 32; i++) {
- a = (i * (priv->tpc_total - 1)) / 32;
+ a = (i * (gr->tpc_total - 1)) / 32;
if (a != b) {
b = a;
do {
- gpc = (gpc + 1) % priv->gpc_nr;
+ gpc = (gpc + 1) % gr->gpc_nr;
} while (!tpcnr[gpc]);
- tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+ tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
tpc_set |= 1ULL << ((gpc * 8) + tpc);
}
- nv_wr32(priv, 0x406800 + (i * 0x20), lower_32_bits(tpc_set));
- nv_wr32(priv, 0x406c00 + (i * 0x20), lower_32_bits(tpc_set ^ tpc_mask));
- if (priv->gpc_nr > 4) {
- nv_wr32(priv, 0x406804 + (i * 0x20), upper_32_bits(tpc_set));
- nv_wr32(priv, 0x406c04 + (i * 0x20), upper_32_bits(tpc_set ^ tpc_mask));
+ nvkm_wr32(device, 0x406800 + (i * 0x20), lower_32_bits(tpc_set));
+ nvkm_wr32(device, 0x406c00 + (i * 0x20), lower_32_bits(tpc_set ^ tpc_mask));
+ if (gr->gpc_nr > 4) {
+ nvkm_wr32(device, 0x406804 + (i * 0x20), upper_32_bits(tpc_set));
+ nvkm_wr32(device, 0x406c04 + (i * 0x20), upper_32_bits(tpc_set ^ tpc_mask));
}
}
}
void
-gf100_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
- struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
-
- nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
-
- gf100_gr_mmio(priv, oclass->hub);
- gf100_gr_mmio(priv, oclass->gpc);
- gf100_gr_mmio(priv, oclass->zcull);
- gf100_gr_mmio(priv, oclass->tpc);
- gf100_gr_mmio(priv, oclass->ppc);
-
- nv_wr32(priv, 0x404154, 0x00000000);
-
- oclass->bundle(info);
- oclass->pagepool(info);
- oclass->attrib(info);
- oclass->unkn(priv);
-
- gf100_grctx_generate_tpcid(priv);
- gf100_grctx_generate_r406028(priv);
- gf100_grctx_generate_r4060a8(priv);
- gf100_grctx_generate_r418bb8(priv);
- gf100_grctx_generate_r406800(priv);
-
- gf100_gr_icmd(priv, oclass->icmd);
- nv_wr32(priv, 0x404154, 0x00000400);
- gf100_gr_mthd(priv, oclass->mthd);
- nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
+
+ nvkm_mc_unk260(device->mc, 0);
+
+ gf100_gr_mmio(gr, grctx->hub);
+ gf100_gr_mmio(gr, grctx->gpc);
+ gf100_gr_mmio(gr, grctx->zcull);
+ gf100_gr_mmio(gr, grctx->tpc);
+ gf100_gr_mmio(gr, grctx->ppc);
+
+ nvkm_wr32(device, 0x404154, 0x00000000);
+
+ grctx->bundle(info);
+ grctx->pagepool(info);
+ grctx->attrib(info);
+ grctx->unkn(gr);
+
+ gf100_grctx_generate_tpcid(gr);
+ gf100_grctx_generate_r406028(gr);
+ gf100_grctx_generate_r4060a8(gr);
+ gf100_grctx_generate_r418bb8(gr);
+ gf100_grctx_generate_r406800(gr);
+
+ gf100_gr_icmd(gr, grctx->icmd);
+ nvkm_wr32(device, 0x404154, 0x00000400);
+ gf100_gr_mthd(gr, grctx->mthd);
+ nvkm_mc_unk260(device->mc, 1);
}
int
-gf100_grctx_generate(struct gf100_gr_priv *priv)
+gf100_grctx_generate(struct gf100_gr *gr)
{
- struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
- struct nvkm_bar *bar = nvkm_bar(priv);
- struct nvkm_gpuobj *chan;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_memory *chan;
struct gf100_grctx info;
int ret, i;
+ u64 addr;
/* allocate memory to for a "channel", which we'll use to generate
* the default context values
*/
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x80000 + priv->size,
- 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x80000 + gr->size,
+ 0x1000, true, &chan);
if (ret) {
- nv_error(priv, "failed to allocate channel memory, %d\n", ret);
+ nvkm_error(subdev, "failed to allocate chan memory, %d\n", ret);
return ret;
}
+ addr = nvkm_memory_addr(chan);
+
/* PGD pointer */
- nv_wo32(chan, 0x0200, lower_32_bits(chan->addr + 0x1000));
- nv_wo32(chan, 0x0204, upper_32_bits(chan->addr + 0x1000));
- nv_wo32(chan, 0x0208, 0xffffffff);
- nv_wo32(chan, 0x020c, 0x000000ff);
+ nvkm_kmap(chan);
+ nvkm_wo32(chan, 0x0200, lower_32_bits(addr + 0x1000));
+ nvkm_wo32(chan, 0x0204, upper_32_bits(addr + 0x1000));
+ nvkm_wo32(chan, 0x0208, 0xffffffff);
+ nvkm_wo32(chan, 0x020c, 0x000000ff);
/* PGT[0] pointer */
- nv_wo32(chan, 0x1000, 0x00000000);
- nv_wo32(chan, 0x1004, 0x00000001 | (chan->addr + 0x2000) >> 8);
+ nvkm_wo32(chan, 0x1000, 0x00000000);
+ nvkm_wo32(chan, 0x1004, 0x00000001 | (addr + 0x2000) >> 8);
/* identity-map the whole "channel" into its own vm */
- for (i = 0; i < chan->size / 4096; i++) {
- u64 addr = ((chan->addr + (i * 4096)) >> 8) | 1;
- nv_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
- nv_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
+ for (i = 0; i < nvkm_memory_size(chan) / 4096; i++) {
+ u64 addr = ((nvkm_memory_addr(chan) + (i * 4096)) >> 8) | 1;
+ nvkm_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
+ nvkm_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
}
/* context pointer (virt) */
- nv_wo32(chan, 0x0210, 0x00080004);
- nv_wo32(chan, 0x0214, 0x00000000);
+ nvkm_wo32(chan, 0x0210, 0x00080004);
+ nvkm_wo32(chan, 0x0214, 0x00000000);
+ nvkm_done(chan);
- bar->flush(bar);
-
- nv_wr32(priv, 0x100cb8, (chan->addr + 0x1000) >> 8);
- nv_wr32(priv, 0x100cbc, 0x80000001);
- nv_wait(priv, 0x100c80, 0x00008000, 0x00008000);
+ nvkm_wr32(device, 0x100cb8, (addr + 0x1000) >> 8);
+ nvkm_wr32(device, 0x100cbc, 0x80000001);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100c80) & 0x00008000)
+ break;
+ );
/* setup default state for mmio list construction */
- info.priv = priv;
- info.data = priv->mmio_data;
- info.mmio = priv->mmio_list;
+ info.gr = gr;
+ info.data = gr->mmio_data;
+ info.mmio = gr->mmio_list;
info.addr = 0x2000 + (i * 8);
info.buffer_nr = 0;
/* make channel current */
- if (priv->firmware) {
- nv_wr32(priv, 0x409840, 0x00000030);
- nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
- nv_wr32(priv, 0x409504, 0x00000003);
- if (!nv_wait(priv, 0x409800, 0x00000010, 0x00000010))
- nv_error(priv, "load_ctx timeout\n");
-
- nv_wo32(chan, 0x8001c, 1);
- nv_wo32(chan, 0x80020, 0);
- nv_wo32(chan, 0x80028, 0);
- nv_wo32(chan, 0x8002c, 0);
- bar->flush(bar);
+ if (gr->firmware) {
+ nvkm_wr32(device, 0x409840, 0x00000030);
+ nvkm_wr32(device, 0x409500, 0x80000000 | addr >> 12);
+ nvkm_wr32(device, 0x409504, 0x00000003);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x409800) & 0x00000010)
+ break;
+ );
+
+ nvkm_kmap(chan);
+ nvkm_wo32(chan, 0x8001c, 1);
+ nvkm_wo32(chan, 0x80020, 0);
+ nvkm_wo32(chan, 0x80028, 0);
+ nvkm_wo32(chan, 0x8002c, 0);
+ nvkm_done(chan);
} else {
- nv_wr32(priv, 0x409840, 0x80000000);
- nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
- nv_wr32(priv, 0x409504, 0x00000001);
- if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000))
- nv_error(priv, "HUB_SET_CHAN timeout\n");
+ nvkm_wr32(device, 0x409840, 0x80000000);
+ nvkm_wr32(device, 0x409500, 0x80000000 | addr >> 12);
+ nvkm_wr32(device, 0x409504, 0x00000001);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x409800) & 0x80000000)
+ break;
+ );
}
- oclass->main(priv, &info);
+ grctx->main(gr, &info);
/* trigger a context unload by unsetting the "next channel valid" bit
* and faking a context switch interrupt
*/
- nv_mask(priv, 0x409b04, 0x80000000, 0x00000000);
- nv_wr32(priv, 0x409000, 0x00000100);
- if (!nv_wait(priv, 0x409b00, 0x80000000, 0x00000000)) {
- nv_error(priv, "grctx template channel unload timeout\n");
+ nvkm_mask(device, 0x409b04, 0x80000000, 0x00000000);
+ nvkm_wr32(device, 0x409000, 0x00000100);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x409b00) & 0x80000000))
+ break;
+ ) < 0) {
ret = -EBUSY;
goto done;
}
- priv->data = kmalloc(priv->size, GFP_KERNEL);
- if (priv->data) {
- for (i = 0; i < priv->size; i += 4)
- priv->data[i / 4] = nv_ro32(chan, 0x80000 + i);
+ gr->data = kmalloc(gr->size, GFP_KERNEL);
+ if (gr->data) {
+ nvkm_kmap(chan);
+ for (i = 0; i < gr->size; i += 4)
+ gr->data[i / 4] = nvkm_ro32(chan, 0x80000 + i);
+ nvkm_done(chan);
ret = 0;
} else {
ret = -ENOMEM;
}
done:
- nvkm_gpuobj_ref(NULL, &chan);
+ nvkm_memory_del(&chan);
return ret;
}
-struct nvkm_oclass *
-gf100_grctx_oclass = &(struct gf100_grctx_oclass) {
- .base.handle = NV_ENGCTX(GR, 0xc0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_context_ctor,
- .dtor = gf100_gr_context_dtor,
- .init = _nvkm_gr_context_init,
- .fini = _nvkm_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
+const struct gf100_grctx_func
+gf100_grctx = {
.main = gf100_grctx_generate_main,
.unkn = gf100_grctx_generate_unkn,
.hub = gf100_grctx_pack_hub,
@@ -1387,4 +1400,4 @@ gf100_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib = gf100_grctx_generate_attrib,
.attrib_nr_max = 0x324,
.attrib_nr = 0x218,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 3676a3342bc5..3c64040ec5a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -3,7 +3,7 @@
#include "gf100.h"
struct gf100_grctx {
- struct gf100_gr_priv *priv;
+ struct gf100_gr *gr;
struct gf100_gr_data *data;
struct gf100_gr_mmio *mmio;
int buffer_nr;
@@ -19,12 +19,11 @@ void gf100_grctx_mmio_item(struct gf100_grctx *, u32 addr, u32 data, int s, int)
#define mmio_skip(a,b,c) mmio_refn((a), (b), (c), -1, -1)
#define mmio_wr32(a,b,c) mmio_refn((a), (b), (c), 0, -1)
-struct gf100_grctx_oclass {
- struct nvkm_oclass base;
+struct gf100_grctx_func {
/* main context generation function */
- void (*main)(struct gf100_gr_priv *, struct gf100_grctx *);
+ void (*main)(struct gf100_gr *, struct gf100_grctx *);
/* context-specific modify-on-first-load list generation function */
- void (*unkn)(struct gf100_gr_priv *);
+ void (*unkn)(struct gf100_gr *);
/* mmio context data */
const struct gf100_gr_pack *hub;
const struct gf100_gr_pack *gpc;
@@ -50,60 +49,61 @@ struct gf100_grctx_oclass {
u32 alpha_nr;
};
-static inline const struct gf100_grctx_oclass *
-gf100_grctx_impl(struct gf100_gr_priv *priv)
-{
- return (void *)nv_engine(priv)->cclass;
-}
-
-extern struct nvkm_oclass *gf100_grctx_oclass;
-int gf100_grctx_generate(struct gf100_gr_priv *);
-void gf100_grctx_generate_main(struct gf100_gr_priv *, struct gf100_grctx *);
+extern const struct gf100_grctx_func gf100_grctx;
+int gf100_grctx_generate(struct gf100_gr *);
+void gf100_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
void gf100_grctx_generate_bundle(struct gf100_grctx *);
void gf100_grctx_generate_pagepool(struct gf100_grctx *);
void gf100_grctx_generate_attrib(struct gf100_grctx *);
-void gf100_grctx_generate_unkn(struct gf100_gr_priv *);
-void gf100_grctx_generate_tpcid(struct gf100_gr_priv *);
-void gf100_grctx_generate_r406028(struct gf100_gr_priv *);
-void gf100_grctx_generate_r4060a8(struct gf100_gr_priv *);
-void gf100_grctx_generate_r418bb8(struct gf100_gr_priv *);
-void gf100_grctx_generate_r406800(struct gf100_gr_priv *);
-
-extern struct nvkm_oclass *gf108_grctx_oclass;
+void gf100_grctx_generate_unkn(struct gf100_gr *);
+void gf100_grctx_generate_tpcid(struct gf100_gr *);
+void gf100_grctx_generate_r406028(struct gf100_gr *);
+void gf100_grctx_generate_r4060a8(struct gf100_gr *);
+void gf100_grctx_generate_r418bb8(struct gf100_gr *);
+void gf100_grctx_generate_r406800(struct gf100_gr *);
+
+extern const struct gf100_grctx_func gf108_grctx;
void gf108_grctx_generate_attrib(struct gf100_grctx *);
-void gf108_grctx_generate_unkn(struct gf100_gr_priv *);
+void gf108_grctx_generate_unkn(struct gf100_gr *);
-extern struct nvkm_oclass *gf104_grctx_oclass;
-extern struct nvkm_oclass *gf110_grctx_oclass;
+extern const struct gf100_grctx_func gf104_grctx;
+extern const struct gf100_grctx_func gf110_grctx;
-extern struct nvkm_oclass *gf117_grctx_oclass;
+extern const struct gf100_grctx_func gf117_grctx;
void gf117_grctx_generate_attrib(struct gf100_grctx *);
-extern struct nvkm_oclass *gf119_grctx_oclass;
+extern const struct gf100_grctx_func gf119_grctx;
-extern struct nvkm_oclass *gk104_grctx_oclass;
-extern struct nvkm_oclass *gk20a_grctx_oclass;
-void gk104_grctx_generate_main(struct gf100_gr_priv *, struct gf100_grctx *);
+extern const struct gf100_grctx_func gk104_grctx;
+extern const struct gf100_grctx_func gk20a_grctx;
+void gk104_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
void gk104_grctx_generate_bundle(struct gf100_grctx *);
void gk104_grctx_generate_pagepool(struct gf100_grctx *);
-void gk104_grctx_generate_unkn(struct gf100_gr_priv *);
-void gk104_grctx_generate_r418bb8(struct gf100_gr_priv *);
-void gk104_grctx_generate_rop_active_fbps(struct gf100_gr_priv *);
+void gk104_grctx_generate_unkn(struct gf100_gr *);
+void gk104_grctx_generate_r418bb8(struct gf100_gr *);
+void gk104_grctx_generate_rop_active_fbps(struct gf100_gr *);
+
+void gm107_grctx_generate_bundle(struct gf100_grctx *);
+void gm107_grctx_generate_pagepool(struct gf100_grctx *);
+void gm107_grctx_generate_attrib(struct gf100_grctx *);
-extern struct nvkm_oclass *gk110_grctx_oclass;
-extern struct nvkm_oclass *gk110b_grctx_oclass;
-extern struct nvkm_oclass *gk208_grctx_oclass;
+extern const struct gf100_grctx_func gk110_grctx;
+extern const struct gf100_grctx_func gk110b_grctx;
+extern const struct gf100_grctx_func gk208_grctx;
-extern struct nvkm_oclass *gm107_grctx_oclass;
+extern const struct gf100_grctx_func gm107_grctx;
void gm107_grctx_generate_bundle(struct gf100_grctx *);
void gm107_grctx_generate_pagepool(struct gf100_grctx *);
void gm107_grctx_generate_attrib(struct gf100_grctx *);
-extern struct nvkm_oclass *gm204_grctx_oclass;
-void gm204_grctx_generate_main(struct gf100_gr_priv *, struct gf100_grctx *);
+extern const struct gf100_grctx_func gm204_grctx;
+void gm204_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
+void gm204_grctx_generate_tpcid(struct gf100_gr *);
+void gm204_grctx_generate_405b60(struct gf100_gr *);
-extern struct nvkm_oclass *gm206_grctx_oclass;
+extern const struct gf100_grctx_func gm206_grctx;
+extern const struct gf100_grctx_func gm20b_grctx;
/* context init value lists */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
index c5a8d55e2cac..54fd74e9cca0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
@@ -79,17 +79,8 @@ gf104_grctx_pack_tpc[] = {
* PGRAPH context implementation
******************************************************************************/
-struct nvkm_oclass *
-gf104_grctx_oclass = &(struct gf100_grctx_oclass) {
- .base.handle = NV_ENGCTX(GR, 0xc3),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_context_ctor,
- .dtor = gf100_gr_context_dtor,
- .init = _nvkm_gr_context_init,
- .fini = _nvkm_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
+const struct gf100_grctx_func
+gf104_grctx = {
.main = gf100_grctx_generate_main,
.unkn = gf100_grctx_generate_unkn,
.hub = gf100_grctx_pack_hub,
@@ -105,4 +96,4 @@ gf104_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib = gf100_grctx_generate_attrib,
.attrib_nr_max = 0x324,
.attrib_nr = 0x218,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
index 87c844a5f34b..505cdcbfc085 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
@@ -730,18 +730,18 @@ gf108_grctx_pack_tpc[] = {
void
gf108_grctx_generate_attrib(struct gf100_grctx *info)
{
- struct gf100_gr_priv *priv = info->priv;
- const struct gf100_grctx_oclass *impl = gf100_grctx_impl(priv);
- const u32 alpha = impl->alpha_nr;
- const u32 beta = impl->attrib_nr;
- const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+ struct gf100_gr *gr = info->gr;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
+ const u32 alpha = grctx->alpha_nr;
+ const u32 beta = grctx->attrib_nr;
+ const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
- const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+ const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
const int timeslice_mode = 1;
const int max_batches = 0xffff;
u32 bo = 0;
- u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
+ u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
int gpc, tpc;
mmio_refn(info, 0x418810, 0x80000000, s, b);
@@ -749,43 +749,35 @@ gf108_grctx_generate_attrib(struct gf100_grctx *info)
mmio_wr32(info, 0x405830, (beta << 16) | alpha);
mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
const u32 a = alpha;
const u32 b = beta;
const u32 t = timeslice_mode;
const u32 o = TPC_UNIT(gpc, tpc, 0x500);
mmio_skip(info, o + 0x20, (t << 28) | (b << 16) | ++bo);
mmio_wr32(info, o + 0x20, (t << 28) | (b << 16) | --bo);
- bo += impl->attrib_nr_max;
+ bo += grctx->attrib_nr_max;
mmio_wr32(info, o + 0x44, (a << 16) | ao);
- ao += impl->alpha_nr_max;
+ ao += grctx->alpha_nr_max;
}
}
}
void
-gf108_grctx_generate_unkn(struct gf100_gr_priv *priv)
+gf108_grctx_generate_unkn(struct gf100_gr *gr)
{
- nv_mask(priv, 0x418c6c, 0x00000001, 0x00000001);
- nv_mask(priv, 0x41980c, 0x00000010, 0x00000010);
- nv_mask(priv, 0x419814, 0x00000004, 0x00000004);
- nv_mask(priv, 0x4064c0, 0x80000000, 0x80000000);
- nv_mask(priv, 0x405800, 0x08000000, 0x08000000);
- nv_mask(priv, 0x419c00, 0x00000008, 0x00000008);
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ nvkm_mask(device, 0x418c6c, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x41980c, 0x00000010, 0x00000010);
+ nvkm_mask(device, 0x419814, 0x00000004, 0x00000004);
+ nvkm_mask(device, 0x4064c0, 0x80000000, 0x80000000);
+ nvkm_mask(device, 0x405800, 0x08000000, 0x08000000);
+ nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
}
-struct nvkm_oclass *
-gf108_grctx_oclass = &(struct gf100_grctx_oclass) {
- .base.handle = NV_ENGCTX(GR, 0xc1),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_context_ctor,
- .dtor = gf100_gr_context_dtor,
- .init = _nvkm_gr_context_init,
- .fini = _nvkm_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
+const struct gf100_grctx_func
+gf108_grctx = {
.main = gf100_grctx_generate_main,
.unkn = gf108_grctx_generate_unkn,
.hub = gf108_grctx_pack_hub,
@@ -803,4 +795,4 @@ gf108_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x324,
.alpha_nr = 0x218,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
index b3acd931b978..7df398b53f8f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
@@ -330,17 +330,8 @@ gf110_grctx_pack_gpc[] = {
* PGRAPH context implementation
******************************************************************************/
-struct nvkm_oclass *
-gf110_grctx_oclass = &(struct gf100_grctx_oclass) {
- .base.handle = NV_ENGCTX(GR, 0xc8),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_context_ctor,
- .dtor = gf100_gr_context_dtor,
- .init = _nvkm_gr_context_init,
- .fini = _nvkm_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
+const struct gf100_grctx_func
+gf110_grctx = {
.main = gf100_grctx_generate_main,
.unkn = gf100_grctx_generate_unkn,
.hub = gf100_grctx_pack_hub,
@@ -356,4 +347,4 @@ gf110_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib = gf100_grctx_generate_attrib,
.attrib_nr_max = 0x324,
.attrib_nr = 0x218,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index 9bbe2c97552e..b5b875928aba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -182,18 +182,18 @@ gf117_grctx_pack_ppc[] = {
void
gf117_grctx_generate_attrib(struct gf100_grctx *info)
{
- struct gf100_gr_priv *priv = info->priv;
- const struct gf100_grctx_oclass *impl = gf100_grctx_impl(priv);
- const u32 alpha = impl->alpha_nr;
- const u32 beta = impl->attrib_nr;
- const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+ struct gf100_gr *gr = info->gr;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
+ const u32 alpha = grctx->alpha_nr;
+ const u32 beta = grctx->attrib_nr;
+ const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
- const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+ const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
const int timeslice_mode = 1;
const int max_batches = 0xffff;
u32 bo = 0;
- u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
+ u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
int gpc, ppc;
mmio_refn(info, 0x418810, 0x80000000, s, b);
@@ -201,68 +201,60 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info)
mmio_wr32(info, 0x405830, (beta << 16) | alpha);
mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++) {
- const u32 a = alpha * priv->ppc_tpc_nr[gpc][ppc];
- const u32 b = beta * priv->ppc_tpc_nr[gpc][ppc];
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++) {
+ const u32 a = alpha * gr->ppc_tpc_nr[gpc][ppc];
+ const u32 b = beta * gr->ppc_tpc_nr[gpc][ppc];
const u32 t = timeslice_mode;
const u32 o = PPC_UNIT(gpc, ppc, 0);
mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo);
mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo);
- bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+ bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
mmio_wr32(info, o + 0xe4, (a << 16) | ao);
- ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+ ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
}
}
}
void
-gf117_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
- struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
int i;
- nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
+ nvkm_mc_unk260(device->mc, 0);
- gf100_gr_mmio(priv, oclass->hub);
- gf100_gr_mmio(priv, oclass->gpc);
- gf100_gr_mmio(priv, oclass->zcull);
- gf100_gr_mmio(priv, oclass->tpc);
- gf100_gr_mmio(priv, oclass->ppc);
+ gf100_gr_mmio(gr, grctx->hub);
+ gf100_gr_mmio(gr, grctx->gpc);
+ gf100_gr_mmio(gr, grctx->zcull);
+ gf100_gr_mmio(gr, grctx->tpc);
+ gf100_gr_mmio(gr, grctx->ppc);
- nv_wr32(priv, 0x404154, 0x00000000);
+ nvkm_wr32(device, 0x404154, 0x00000000);
- oclass->bundle(info);
- oclass->pagepool(info);
- oclass->attrib(info);
- oclass->unkn(priv);
+ grctx->bundle(info);
+ grctx->pagepool(info);
+ grctx->attrib(info);
+ grctx->unkn(gr);
- gf100_grctx_generate_tpcid(priv);
- gf100_grctx_generate_r406028(priv);
- gf100_grctx_generate_r4060a8(priv);
- gk104_grctx_generate_r418bb8(priv);
- gf100_grctx_generate_r406800(priv);
+ gf100_grctx_generate_tpcid(gr);
+ gf100_grctx_generate_r406028(gr);
+ gf100_grctx_generate_r4060a8(gr);
+ gk104_grctx_generate_r418bb8(gr);
+ gf100_grctx_generate_r406800(gr);
for (i = 0; i < 8; i++)
- nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
+ nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
- gf100_gr_icmd(priv, oclass->icmd);
- nv_wr32(priv, 0x404154, 0x00000400);
- gf100_gr_mthd(priv, oclass->mthd);
- nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
+ gf100_gr_icmd(gr, grctx->icmd);
+ nvkm_wr32(device, 0x404154, 0x00000400);
+ gf100_gr_mthd(gr, grctx->mthd);
+ nvkm_mc_unk260(device->mc, 1);
}
-struct nvkm_oclass *
-gf117_grctx_oclass = &(struct gf100_grctx_oclass) {
- .base.handle = NV_ENGCTX(GR, 0xd7),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_context_ctor,
- .dtor = gf100_gr_context_dtor,
- .init = _nvkm_gr_context_init,
- .fini = _nvkm_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
+const struct gf100_grctx_func
+gf117_grctx = {
.main = gf117_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gf117_grctx_pack_hub,
@@ -281,4 +273,4 @@ gf117_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x7ff,
.alpha_nr = 0x324,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
index 8d8761443809..605185b078be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
@@ -498,17 +498,8 @@ gf119_grctx_pack_tpc[] = {
* PGRAPH context implementation
******************************************************************************/
-struct nvkm_oclass *
-gf119_grctx_oclass = &(struct gf100_grctx_oclass) {
- .base.handle = NV_ENGCTX(GR, 0xd9),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_context_ctor,
- .dtor = gf100_gr_context_dtor,
- .init = _nvkm_gr_context_init,
- .fini = _nvkm_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
+const struct gf100_grctx_func
+gf119_grctx = {
.main = gf100_grctx_generate_main,
.unkn = gf108_grctx_generate_unkn,
.hub = gf119_grctx_pack_hub,
@@ -526,4 +517,4 @@ gf119_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x324,
.alpha_nr = 0x218,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index b12f6a9fd926..a843e3689c3c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -843,27 +843,27 @@ gk104_grctx_pack_ppc[] = {
void
gk104_grctx_generate_bundle(struct gf100_grctx *info)
{
- const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
- const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth,
- impl->bundle_size / 0x20);
- const u32 token_limit = impl->bundle_token_limit;
+ const struct gf100_grctx_func *grctx = info->gr->func->grctx;
+ const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
+ grctx->bundle_size / 0x20);
+ const u32 token_limit = grctx->bundle_token_limit;
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
- const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
+ const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
mmio_refn(info, 0x408004, 0x00000000, s, b);
- mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
+ mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
mmio_refn(info, 0x418808, 0x00000000, s, b);
- mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
+ mmio_wr32(info, 0x41880c, 0x80000000 | (grctx->bundle_size >> s));
mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
}
void
gk104_grctx_generate_pagepool(struct gf100_grctx *info)
{
- const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
+ const struct gf100_grctx_func *grctx = info->gr->func->grctx;
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
- const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
+ const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
mmio_refn(info, 0x40800c, 0x00000000, s, b);
mmio_wr32(info, 0x408010, 0x80000000);
mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -872,31 +872,33 @@ gk104_grctx_generate_pagepool(struct gf100_grctx *info)
}
void
-gk104_grctx_generate_unkn(struct gf100_gr_priv *priv)
+gk104_grctx_generate_unkn(struct gf100_gr *gr)
{
- nv_mask(priv, 0x418c6c, 0x00000001, 0x00000001);
- nv_mask(priv, 0x41980c, 0x00000010, 0x00000010);
- nv_mask(priv, 0x41be08, 0x00000004, 0x00000004);
- nv_mask(priv, 0x4064c0, 0x80000000, 0x80000000);
- nv_mask(priv, 0x405800, 0x08000000, 0x08000000);
- nv_mask(priv, 0x419c00, 0x00000008, 0x00000008);
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ nvkm_mask(device, 0x418c6c, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x41980c, 0x00000010, 0x00000010);
+ nvkm_mask(device, 0x41be08, 0x00000004, 0x00000004);
+ nvkm_mask(device, 0x4064c0, 0x80000000, 0x80000000);
+ nvkm_mask(device, 0x405800, 0x08000000, 0x08000000);
+ nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
}
void
-gk104_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
+gk104_grctx_generate_r418bb8(struct gf100_gr *gr)
{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
u32 data[6] = {}, data2[2] = {};
u8 tpcnr[GPC_MAX];
u8 shift, ntpcv;
int gpc, tpc, i;
/* calculate first set of magics */
- memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
gpc = -1;
- for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+ for (tpc = 0; tpc < gr->tpc_total; tpc++) {
do {
- gpc = (gpc + 1) % priv->gpc_nr;
+ gpc = (gpc + 1) % gr->gpc_nr;
} while (!tpcnr[gpc]);
tpcnr[gpc]--;
@@ -908,7 +910,7 @@ gk104_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
/* and the second... */
shift = 0;
- ntpcv = priv->tpc_total;
+ ntpcv = gr->tpc_total;
while (!(ntpcv & (1 << 4))) {
ntpcv <<= 1;
shift++;
@@ -921,86 +923,79 @@ gk104_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
/* GPC_BROADCAST */
- nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) |
- priv->magic_not_rop_nr);
+ nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) |
+ gr->magic_not_rop_nr);
for (i = 0; i < 6; i++)
- nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
+ nvkm_wr32(device, 0x418b08 + (i * 4), data[i]);
/* GPC_BROADCAST.TP_BROADCAST */
- nv_wr32(priv, 0x41bfd0, (priv->tpc_total << 8) |
- priv->magic_not_rop_nr | data2[0]);
- nv_wr32(priv, 0x41bfe4, data2[1]);
+ nvkm_wr32(device, 0x41bfd0, (gr->tpc_total << 8) |
+ gr->magic_not_rop_nr | data2[0]);
+ nvkm_wr32(device, 0x41bfe4, data2[1]);
for (i = 0; i < 6; i++)
- nv_wr32(priv, 0x41bf00 + (i * 4), data[i]);
+ nvkm_wr32(device, 0x41bf00 + (i * 4), data[i]);
/* UNK78xx */
- nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) |
- priv->magic_not_rop_nr);
+ nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) |
+ gr->magic_not_rop_nr);
for (i = 0; i < 6; i++)
- nv_wr32(priv, 0x40780c + (i * 4), data[i]);
+ nvkm_wr32(device, 0x40780c + (i * 4), data[i]);
}
void
-gk104_grctx_generate_rop_active_fbps(struct gf100_gr_priv *priv)
+gk104_grctx_generate_rop_active_fbps(struct gf100_gr *gr)
{
- const u32 fbp_count = nv_rd32(priv, 0x120074);
- nv_mask(priv, 0x408850, 0x0000000f, fbp_count); /* zrop */
- nv_mask(priv, 0x408958, 0x0000000f, fbp_count); /* crop */
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 fbp_count = nvkm_rd32(device, 0x120074);
+ nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
+ nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
}
void
-gk104_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
- struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
int i;
- nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
+ nvkm_mc_unk260(device->mc, 0);
- gf100_gr_mmio(priv, oclass->hub);
- gf100_gr_mmio(priv, oclass->gpc);
- gf100_gr_mmio(priv, oclass->zcull);
- gf100_gr_mmio(priv, oclass->tpc);
- gf100_gr_mmio(priv, oclass->ppc);
+ gf100_gr_mmio(gr, grctx->hub);
+ gf100_gr_mmio(gr, grctx->gpc);
+ gf100_gr_mmio(gr, grctx->zcull);
+ gf100_gr_mmio(gr, grctx->tpc);
+ gf100_gr_mmio(gr, grctx->ppc);
- nv_wr32(priv, 0x404154, 0x00000000);
+ nvkm_wr32(device, 0x404154, 0x00000000);
- oclass->bundle(info);
- oclass->pagepool(info);
- oclass->attrib(info);
- oclass->unkn(priv);
+ grctx->bundle(info);
+ grctx->pagepool(info);
+ grctx->attrib(info);
+ grctx->unkn(gr);
- gf100_grctx_generate_tpcid(priv);
- gf100_grctx_generate_r406028(priv);
- gk104_grctx_generate_r418bb8(priv);
- gf100_grctx_generate_r406800(priv);
+ gf100_grctx_generate_tpcid(gr);
+ gf100_grctx_generate_r406028(gr);
+ gk104_grctx_generate_r418bb8(gr);
+ gf100_grctx_generate_r406800(gr);
for (i = 0; i < 8; i++)
- nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
+ nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
- nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
- gk104_grctx_generate_rop_active_fbps(priv);
- nv_mask(priv, 0x419f78, 0x00000001, 0x00000000);
+ nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
+ gk104_grctx_generate_rop_active_fbps(gr);
+ nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
- gf100_gr_icmd(priv, oclass->icmd);
- nv_wr32(priv, 0x404154, 0x00000400);
- gf100_gr_mthd(priv, oclass->mthd);
- nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
+ gf100_gr_icmd(gr, grctx->icmd);
+ nvkm_wr32(device, 0x404154, 0x00000400);
+ gf100_gr_mthd(gr, grctx->mthd);
+ nvkm_mc_unk260(device->mc, 1);
- nv_mask(priv, 0x418800, 0x00200000, 0x00200000);
- nv_mask(priv, 0x41be10, 0x00800000, 0x00800000);
+ nvkm_mask(device, 0x418800, 0x00200000, 0x00200000);
+ nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000);
}
-struct nvkm_oclass *
-gk104_grctx_oclass = &(struct gf100_grctx_oclass) {
- .base.handle = NV_ENGCTX(GR, 0xe4),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_context_ctor,
- .dtor = gf100_gr_context_dtor,
- .init = _nvkm_gr_context_init,
- .fini = _nvkm_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
+const struct gf100_grctx_func
+gk104_grctx = {
.main = gk104_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gk104_grctx_pack_hub,
@@ -1021,4 +1016,4 @@ gk104_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x7ff,
.alpha_nr = 0x648,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
index b3f58be04e9c..7b95ec2fe453 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
@@ -808,17 +808,8 @@ gk110_grctx_pack_ppc[] = {
* PGRAPH context implementation
******************************************************************************/
-struct nvkm_oclass *
-gk110_grctx_oclass = &(struct gf100_grctx_oclass) {
- .base.handle = NV_ENGCTX(GR, 0xf0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_context_ctor,
- .dtor = gf100_gr_context_dtor,
- .init = _nvkm_gr_context_init,
- .fini = _nvkm_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
+const struct gf100_grctx_func
+gk110_grctx = {
.main = gk104_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gk110_grctx_pack_hub,
@@ -839,4 +830,4 @@ gk110_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x7ff,
.alpha_nr = 0x648,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
index b11c26794fde..048b1152da44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
@@ -69,17 +69,8 @@ gk110b_grctx_pack_tpc[] = {
* PGRAPH context implementation
******************************************************************************/
-struct nvkm_oclass *
-gk110b_grctx_oclass = &(struct gf100_grctx_oclass) {
- .base.handle = NV_ENGCTX(GR, 0xf1),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_context_ctor,
- .dtor = gf100_gr_context_dtor,
- .init = _nvkm_gr_context_init,
- .fini = _nvkm_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
+const struct gf100_grctx_func
+gk110b_grctx = {
.main = gk104_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gk110_grctx_pack_hub,
@@ -100,4 +91,4 @@ gk110b_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x7ff,
.alpha_nr = 0x648,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
index 6e8ce9fc311a..67b7a1b43617 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
@@ -530,17 +530,8 @@ gk208_grctx_pack_ppc[] = {
* PGRAPH context implementation
******************************************************************************/
-struct nvkm_oclass *
-gk208_grctx_oclass = &(struct gf100_grctx_oclass) {
- .base.handle = NV_ENGCTX(GR, 0x08),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_context_ctor,
- .dtor = gf100_gr_context_dtor,
- .init = _nvkm_gr_context_init,
- .fini = _nvkm_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
+const struct gf100_grctx_func
+gk208_grctx = {
.main = gk104_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gk208_grctx_pack_hub,
@@ -561,4 +552,4 @@ gk208_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x7ff,
.alpha_nr = 0x648,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
index 2f241f6f0f0a..ddaa16a71c84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,34 +20,60 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "ctxgf100.h"
+#include "gf100.h"
-static const struct gf100_gr_pack
-gk20a_grctx_pack_mthd[] = {
- { gk104_grctx_init_a097_0, 0xa297 },
- { gf100_grctx_init_902d_0, 0x902d },
- {}
-};
+#include <subdev/mc.h>
+
+static void
+gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
+ int idle_timeout_save;
+ int i;
+
+ gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+
+ gf100_gr_wait_idle(gr);
+
+ idle_timeout_save = nvkm_rd32(device, 0x404154);
+ nvkm_wr32(device, 0x404154, 0x00000000);
+
+ grctx->attrib(info);
+
+ grctx->unkn(gr);
+
+ gf100_grctx_generate_tpcid(gr);
+ gf100_grctx_generate_r406028(gr);
+ gk104_grctx_generate_r418bb8(gr);
+ gf100_grctx_generate_r406800(gr);
+
+ for (i = 0; i < 8; i++)
+ nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
-struct nvkm_oclass *
-gk20a_grctx_oclass = &(struct gf100_grctx_oclass) {
- .base.handle = NV_ENGCTX(GR, 0xea),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_context_ctor,
- .dtor = gf100_gr_context_dtor,
- .init = _nvkm_gr_context_init,
- .fini = _nvkm_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
- .main = gk104_grctx_generate_main,
+ nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
+
+ gk104_grctx_generate_rop_active_fbps(gr);
+
+ nvkm_mask(device, 0x5044b0, 0x8000000, 0x8000000);
+
+ gf100_gr_wait_idle(gr);
+
+ nvkm_wr32(device, 0x404154, idle_timeout_save);
+ gf100_gr_wait_idle(gr);
+
+ gf100_gr_mthd(gr, gr->fuc_method);
+ gf100_gr_wait_idle(gr);
+
+ gf100_gr_icmd(gr, gr->fuc_bundle);
+ grctx->pagepool(info);
+ grctx->bundle(info);
+}
+
+const struct gf100_grctx_func
+gk20a_grctx = {
+ .main = gk20a_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
- .hub = gk104_grctx_pack_hub,
- .gpc = gk104_grctx_pack_gpc,
- .zcull = gf100_grctx_pack_zcull,
- .tpc = gk104_grctx_pack_tpc,
- .ppc = gk104_grctx_pack_ppc,
- .icmd = gk104_grctx_pack_icmd,
- .mthd = gk20a_grctx_pack_mthd,
.bundle = gk104_grctx_generate_bundle,
.bundle_size = 0x1800,
.bundle_min_gpm_fifo_depth = 0x62,
@@ -59,4 +85,4 @@ gk20a_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x240,
.alpha_nr_max = 0x648 + (0x648 / 2),
.alpha_nr = 0x648,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index fbeaae3ae6ce..95f59e3169f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -863,27 +863,27 @@ gm107_grctx_pack_ppc[] = {
void
gm107_grctx_generate_bundle(struct gf100_grctx *info)
{
- const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
- const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth,
- impl->bundle_size / 0x20);
- const u32 token_limit = impl->bundle_token_limit;
+ const struct gf100_grctx_func *grctx = info->gr->func->grctx;
+ const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
+ grctx->bundle_size / 0x20);
+ const u32 token_limit = grctx->bundle_token_limit;
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
- const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
+ const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
mmio_refn(info, 0x408004, 0x00000000, s, b);
- mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
+ mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
mmio_refn(info, 0x418e24, 0x00000000, s, b);
- mmio_wr32(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s));
+ mmio_wr32(info, 0x418e28, 0x80000000 | (grctx->bundle_size >> s));
mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
}
void
gm107_grctx_generate_pagepool(struct gf100_grctx *info)
{
- const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
+ const struct gf100_grctx_func *grctx = info->gr->func->grctx;
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
- const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
+ const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
mmio_refn(info, 0x40800c, 0x00000000, s, b);
mmio_wr32(info, 0x408010, 0x80000000);
mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -895,17 +895,17 @@ gm107_grctx_generate_pagepool(struct gf100_grctx *info)
void
gm107_grctx_generate_attrib(struct gf100_grctx *info)
{
- struct gf100_gr_priv *priv = info->priv;
- const struct gf100_grctx_oclass *impl = (void *)gf100_grctx_impl(priv);
- const u32 alpha = impl->alpha_nr;
- const u32 attrib = impl->attrib_nr;
- const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+ struct gf100_gr *gr = info->gr;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
+ const u32 alpha = grctx->alpha_nr;
+ const u32 attrib = grctx->attrib_nr;
+ const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
- const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+ const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
const int max_batches = 0xffff;
u32 bo = 0;
- u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
+ u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
int gpc, ppc, n = 0;
mmio_refn(info, 0x418810, 0x80000000, s, b);
@@ -914,97 +914,90 @@ gm107_grctx_generate_attrib(struct gf100_grctx *info)
mmio_wr32(info, 0x405830, (attrib << 16) | alpha);
mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++, n++) {
- const u32 as = alpha * priv->ppc_tpc_nr[gpc][ppc];
- const u32 bs = attrib * priv->ppc_tpc_nr[gpc][ppc];
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
+ const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc];
+ const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
const u32 u = 0x418ea0 + (n * 0x04);
const u32 o = PPC_UNIT(gpc, ppc, 0);
mmio_wr32(info, o + 0xc0, bs);
mmio_wr32(info, o + 0xf4, bo);
- bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+ bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
mmio_wr32(info, o + 0xe4, as);
mmio_wr32(info, o + 0xf8, ao);
- ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+ ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
mmio_wr32(info, u, ((bs / 3 /*XXX*/) << 16) | bs);
}
}
}
-static void
-gm107_grctx_generate_tpcid(struct gf100_gr_priv *priv)
+void
+gm107_grctx_generate_tpcid(struct gf100_gr *gr)
{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
int gpc, tpc, id;
for (tpc = 0, id = 0; tpc < 4; tpc++) {
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- if (tpc < priv->tpc_nr[gpc]) {
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ if (tpc < gr->tpc_nr[gpc]) {
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
id++;
}
- nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0c08), gr->tpc_nr[gpc]);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0c8c), gr->tpc_nr[gpc]);
}
}
}
static void
-gm107_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
- struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
int i;
- gf100_gr_mmio(priv, oclass->hub);
- gf100_gr_mmio(priv, oclass->gpc);
- gf100_gr_mmio(priv, oclass->zcull);
- gf100_gr_mmio(priv, oclass->tpc);
- gf100_gr_mmio(priv, oclass->ppc);
+ gf100_gr_mmio(gr, grctx->hub);
+ gf100_gr_mmio(gr, grctx->gpc);
+ gf100_gr_mmio(gr, grctx->zcull);
+ gf100_gr_mmio(gr, grctx->tpc);
+ gf100_gr_mmio(gr, grctx->ppc);
- nv_wr32(priv, 0x404154, 0x00000000);
+ nvkm_wr32(device, 0x404154, 0x00000000);
- oclass->bundle(info);
- oclass->pagepool(info);
- oclass->attrib(info);
- oclass->unkn(priv);
+ grctx->bundle(info);
+ grctx->pagepool(info);
+ grctx->attrib(info);
+ grctx->unkn(gr);
- gm107_grctx_generate_tpcid(priv);
- gf100_grctx_generate_r406028(priv);
- gk104_grctx_generate_r418bb8(priv);
- gf100_grctx_generate_r406800(priv);
+ gm107_grctx_generate_tpcid(gr);
+ gf100_grctx_generate_r406028(gr);
+ gk104_grctx_generate_r418bb8(gr);
+ gf100_grctx_generate_r406800(gr);
- nv_wr32(priv, 0x4064d0, 0x00000001);
+ nvkm_wr32(device, 0x4064d0, 0x00000001);
for (i = 1; i < 8; i++)
- nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
- nv_wr32(priv, 0x406500, 0x00000001);
+ nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+ nvkm_wr32(device, 0x406500, 0x00000001);
- nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
+ nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
- gk104_grctx_generate_rop_active_fbps(priv);
+ gk104_grctx_generate_rop_active_fbps(gr);
- gf100_gr_icmd(priv, oclass->icmd);
- nv_wr32(priv, 0x404154, 0x00000400);
- gf100_gr_mthd(priv, oclass->mthd);
+ gf100_gr_icmd(gr, grctx->icmd);
+ nvkm_wr32(device, 0x404154, 0x00000400);
+ gf100_gr_mthd(gr, grctx->mthd);
- nv_mask(priv, 0x419e00, 0x00808080, 0x00808080);
- nv_mask(priv, 0x419ccc, 0x80000000, 0x80000000);
- nv_mask(priv, 0x419f80, 0x80000000, 0x80000000);
- nv_mask(priv, 0x419f88, 0x80000000, 0x80000000);
+ nvkm_mask(device, 0x419e00, 0x00808080, 0x00808080);
+ nvkm_mask(device, 0x419ccc, 0x80000000, 0x80000000);
+ nvkm_mask(device, 0x419f80, 0x80000000, 0x80000000);
+ nvkm_mask(device, 0x419f88, 0x80000000, 0x80000000);
}
-struct nvkm_oclass *
-gm107_grctx_oclass = &(struct gf100_grctx_oclass) {
- .base.handle = NV_ENGCTX(GR, 0x08),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_context_ctor,
- .dtor = gf100_gr_context_dtor,
- .init = _nvkm_gr_context_init,
- .fini = _nvkm_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
+const struct gf100_grctx_func
+gm107_grctx = {
.main = gm107_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gm107_grctx_pack_hub,
@@ -1025,4 +1018,4 @@ gm107_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0xaa0,
.alpha_nr_max = 0x1800,
.alpha_nr = 0x1000,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
index ea8e66151aa8..170cbfdbe1ae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
@@ -918,17 +918,18 @@ gm204_grctx_pack_ppc[] = {
* PGRAPH context implementation
******************************************************************************/
-static void
-gm204_grctx_generate_tpcid(struct gf100_gr_priv *priv)
+void
+gm204_grctx_generate_tpcid(struct gf100_gr *gr)
{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
int gpc, tpc, id;
for (tpc = 0, id = 0; tpc < 4; tpc++) {
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- if (tpc < priv->tpc_nr[gpc]) {
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ if (tpc < gr->tpc_nr[gpc]) {
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
id++;
}
}
@@ -936,101 +937,95 @@ gm204_grctx_generate_tpcid(struct gf100_gr_priv *priv)
}
static void
-gm204_grctx_generate_rop_active_fbps(struct gf100_gr_priv *priv)
+gm204_grctx_generate_rop_active_fbps(struct gf100_gr *gr)
{
- const u32 fbp_count = nv_rd32(priv, 0x12006c);
- nv_mask(priv, 0x408850, 0x0000000f, fbp_count); /* zrop */
- nv_mask(priv, 0x408958, 0x0000000f, fbp_count); /* crop */
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 fbp_count = nvkm_rd32(device, 0x12006c);
+ nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
+ nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
}
-static void
-gm204_grctx_generate_405b60(struct gf100_gr_priv *priv)
+void
+gm204_grctx_generate_405b60(struct gf100_gr *gr)
{
- const u32 dist_nr = DIV_ROUND_UP(priv->tpc_total, 4);
- u32 dist[TPC_MAX] = {};
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
+ u32 dist[TPC_MAX / 4] = {};
u32 gpcs[GPC_MAX] = {};
u8 tpcnr[GPC_MAX];
int tpc, gpc, i;
- memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
/* won't result in the same distribution as the binary driver where
* some of the gpcs have more tpcs than others, but this shall do
* for the moment. the code for earlier gpus has this issue too.
*/
- for (gpc = -1, i = 0; i < priv->tpc_total; i++) {
+ for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
do {
- gpc = (gpc + 1) % priv->gpc_nr;
+ gpc = (gpc + 1) % gr->gpc_nr;
} while(!tpcnr[gpc]);
- tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+ tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
gpcs[gpc] |= i << (tpc * 8);
}
for (i = 0; i < dist_nr; i++)
- nv_wr32(priv, 0x405b60 + (i * 4), dist[i]);
- for (i = 0; i < priv->gpc_nr; i++)
- nv_wr32(priv, 0x405ba0 + (i * 4), gpcs[i]);
+ nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]);
+ for (i = 0; i < gr->gpc_nr; i++)
+ nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
}
void
-gm204_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+gm204_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
- struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
u32 tmp;
int i;
- gf100_gr_mmio(priv, oclass->hub);
- gf100_gr_mmio(priv, oclass->gpc);
- gf100_gr_mmio(priv, oclass->zcull);
- gf100_gr_mmio(priv, oclass->tpc);
- gf100_gr_mmio(priv, oclass->ppc);
+ gf100_gr_mmio(gr, grctx->hub);
+ gf100_gr_mmio(gr, grctx->gpc);
+ gf100_gr_mmio(gr, grctx->zcull);
+ gf100_gr_mmio(gr, grctx->tpc);
+ gf100_gr_mmio(gr, grctx->ppc);
- nv_wr32(priv, 0x404154, 0x00000000);
+ nvkm_wr32(device, 0x404154, 0x00000000);
- oclass->bundle(info);
- oclass->pagepool(info);
- oclass->attrib(info);
- oclass->unkn(priv);
+ grctx->bundle(info);
+ grctx->pagepool(info);
+ grctx->attrib(info);
+ grctx->unkn(gr);
- gm204_grctx_generate_tpcid(priv);
- gf100_grctx_generate_r406028(priv);
- gk104_grctx_generate_r418bb8(priv);
+ gm204_grctx_generate_tpcid(gr);
+ gf100_grctx_generate_r406028(gr);
+ gk104_grctx_generate_r418bb8(gr);
for (i = 0; i < 8; i++)
- nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
- nv_wr32(priv, 0x406500, 0x00000000);
+ nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+ nvkm_wr32(device, 0x406500, 0x00000000);
- nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
+ nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
- gm204_grctx_generate_rop_active_fbps(priv);
+ gm204_grctx_generate_rop_active_fbps(gr);
- for (tmp = 0, i = 0; i < priv->gpc_nr; i++)
- tmp |= ((1 << priv->tpc_nr[i]) - 1) << (i * 4);
- nv_wr32(priv, 0x4041c4, tmp);
+ for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
+ tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
+ nvkm_wr32(device, 0x4041c4, tmp);
- gm204_grctx_generate_405b60(priv);
+ gm204_grctx_generate_405b60(gr);
- gf100_gr_icmd(priv, oclass->icmd);
- nv_wr32(priv, 0x404154, 0x00000800);
- gf100_gr_mthd(priv, oclass->mthd);
+ gf100_gr_icmd(gr, grctx->icmd);
+ nvkm_wr32(device, 0x404154, 0x00000800);
+ gf100_gr_mthd(gr, grctx->mthd);
- nv_mask(priv, 0x418e94, 0xffffffff, 0xc4230000);
- nv_mask(priv, 0x418e4c, 0xffffffff, 0x70000000);
+ nvkm_mask(device, 0x418e94, 0xffffffff, 0xc4230000);
+ nvkm_mask(device, 0x418e4c, 0xffffffff, 0x70000000);
}
-struct nvkm_oclass *
-gm204_grctx_oclass = &(struct gf100_grctx_oclass) {
- .base.handle = NV_ENGCTX(GR, 0x24),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_context_ctor,
- .dtor = gf100_gr_context_dtor,
- .init = _nvkm_gr_context_init,
- .fini = _nvkm_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
+const struct gf100_grctx_func
+gm204_grctx = {
.main = gm204_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gm204_grctx_pack_hub,
@@ -1051,4 +1046,4 @@ gm204_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x400,
.alpha_nr_max = 0x1800,
.alpha_nr = 0x1000,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
index 91ec41617943..d6be6034c2c2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
@@ -49,17 +49,8 @@ gm206_grctx_pack_gpc[] = {
{}
};
-struct nvkm_oclass *
-gm206_grctx_oclass = &(struct gf100_grctx_oclass) {
- .base.handle = NV_ENGCTX(GR, 0x26),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_context_ctor,
- .dtor = gf100_gr_context_dtor,
- .init = _nvkm_gr_context_init,
- .fini = _nvkm_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
+const struct gf100_grctx_func
+gm206_grctx = {
.main = gm204_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gm204_grctx_pack_hub,
@@ -80,4 +71,4 @@ gm206_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x400,
.alpha_nr_max = 0x1800,
.alpha_nr = 0x1000,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
new file mode 100644
index 000000000000..670260402538
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "ctxgf100.h"
+
+static void
+gm20b_grctx_generate_r406028(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ u32 tpc_per_gpc = 0;
+ int i;
+
+ for (i = 0; i < gr->gpc_nr; i++)
+ tpc_per_gpc |= gr->tpc_nr[i] << (4 * i);
+
+ nvkm_wr32(device, 0x406028, tpc_per_gpc);
+ nvkm_wr32(device, 0x405870, tpc_per_gpc);
+}
+
+static void
+gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
+ int idle_timeout_save;
+ int i, tmp;
+
+ gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+
+ gf100_gr_wait_idle(gr);
+
+ idle_timeout_save = nvkm_rd32(device, 0x404154);
+ nvkm_wr32(device, 0x404154, 0x00000000);
+
+ grctx->attrib(info);
+
+ grctx->unkn(gr);
+
+ gm204_grctx_generate_tpcid(gr);
+ gm20b_grctx_generate_r406028(gr);
+ gk104_grctx_generate_r418bb8(gr);
+
+ for (i = 0; i < 8; i++)
+ nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+
+ nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
+
+ gk104_grctx_generate_rop_active_fbps(gr);
+ nvkm_wr32(device, 0x408908, nvkm_rd32(device, 0x410108) | 0x80000000);
+
+ for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
+ tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
+ nvkm_wr32(device, 0x4041c4, tmp);
+
+ gm204_grctx_generate_405b60(gr);
+
+ gf100_gr_wait_idle(gr);
+
+ nvkm_wr32(device, 0x404154, idle_timeout_save);
+ gf100_gr_wait_idle(gr);
+
+ gf100_gr_mthd(gr, gr->fuc_method);
+ gf100_gr_wait_idle(gr);
+
+ gf100_gr_icmd(gr, gr->fuc_bundle);
+ grctx->pagepool(info);
+ grctx->bundle(info);
+}
+
+const struct gf100_grctx_func
+gm20b_grctx = {
+ .main = gm20b_grctx_generate_main,
+ .unkn = gk104_grctx_generate_unkn,
+ .bundle = gm107_grctx_generate_bundle,
+ .bundle_size = 0x1800,
+ .bundle_min_gpm_fifo_depth = 0x182,
+ .bundle_token_limit = 0x1c0,
+ .pagepool = gm107_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = gm107_grctx_generate_attrib,
+ .attrib_nr_max = 0x600,
+ .attrib_nr = 0x400,
+ .alpha_nr_max = 0xc00,
+ .alpha_nr = 0x800,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c
index dc31462afe65..80a6b017af64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c
@@ -111,7 +111,6 @@
#include "ctxnv40.h"
#include "nv40.h"
-#include <core/device.h>
/* TODO:
* - get vs count from 0x1540
@@ -583,13 +582,13 @@ nv40_gr_construct_shader(struct nvkm_grctx *ctx)
offset += 0x0280/4;
for (i = 0; i < 16; i++, offset += 2)
- nv_wo32(obj, offset * 4, 0x3f800000);
+ nvkm_wo32(obj, offset * 4, 0x3f800000);
for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
for (i = 0; i < vs_nr_b0 * 6; i += 6)
- nv_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001);
+ nvkm_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001);
for (i = 0; i < vs_nr_b1 * 4; i += 4)
- nv_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
+ nvkm_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
}
}
@@ -675,7 +674,7 @@ nv40_grctx_init(struct nvkm_device *device, u32 *size)
struct nvkm_grctx ctx = {
.device = device,
.mode = NVKM_GRCTX_PROG,
- .data = ctxprog,
+ .ucode = ctxprog,
.ctxprog_max = 256,
};
@@ -684,9 +683,9 @@ nv40_grctx_init(struct nvkm_device *device, u32 *size)
nv40_grctx_generate(&ctx);
- nv_wr32(device, 0x400324, 0);
+ nvkm_wr32(device, 0x400324, 0);
for (i = 0; i < ctx.ctxprog_len; i++)
- nv_wr32(device, 0x400328, ctxprog[i]);
+ nvkm_wr32(device, 0x400328, ctxprog[i]);
*size = ctx.ctxvals_pos * 4;
kfree(ctxprog);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
index 8a89961956af..50e808e9f926 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
@@ -9,7 +9,8 @@ struct nvkm_grctx {
NVKM_GRCTX_PROG,
NVKM_GRCTX_VALS
} mode;
- void *data;
+ u32 *ucode;
+ struct nvkm_gpuobj *data;
u32 ctxprog_max;
u32 ctxprog_len;
@@ -22,7 +23,7 @@ struct nvkm_grctx {
static inline void
cp_out(struct nvkm_grctx *ctx, u32 inst)
{
- u32 *ctxprog = ctx->data;
+ u32 *ctxprog = ctx->ucode;
if (ctx->mode != NVKM_GRCTX_PROG)
return;
@@ -56,7 +57,7 @@ cp_ctx(struct nvkm_grctx *ctx, u32 reg, u32 length)
static inline void
cp_name(struct nvkm_grctx *ctx, int name)
{
- u32 *ctxprog = ctx->data;
+ u32 *ctxprog = ctx->ucode;
int i;
if (ctx->mode != NVKM_GRCTX_PROG)
@@ -124,6 +125,6 @@ gr_def(struct nvkm_grctx *ctx, u32 reg, u32 val)
reg = (reg - 0x00400000) / 4;
reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
- nv_wo32(ctx->data, reg * 4, val);
+ nvkm_wo32(ctx->data, reg * 4, val);
}
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
index 9c9528d2cd90..1e13278cf306 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
@@ -107,7 +107,6 @@
#include "ctxnv40.h"
-#include <core/device.h>
#include <subdev/fb.h>
#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
@@ -269,7 +268,7 @@ nv50_grctx_init(struct nvkm_device *device, u32 *size)
struct nvkm_grctx ctx = {
.device = device,
.mode = NVKM_GRCTX_PROG,
- .data = ctxprog,
+ .ucode = ctxprog,
.ctxprog_max = 512,
};
@@ -277,9 +276,9 @@ nv50_grctx_init(struct nvkm_device *device, u32 *size)
return -ENOMEM;
nv50_grctx_generate(&ctx);
- nv_wr32(device, 0x400324, 0);
+ nvkm_wr32(device, 0x400324, 0);
for (i = 0; i < ctx.ctxprog_len; i++)
- nv_wr32(device, 0x400328, ctxprog[i]);
+ nvkm_wr32(device, 0x400328, ctxprog[i]);
*size = ctx.ctxvals_pos * 4;
kfree(ctxprog);
return 0;
@@ -299,7 +298,7 @@ nv50_gr_construct_mmio(struct nvkm_grctx *ctx)
struct nvkm_device *device = ctx->device;
int i, j;
int offset, base;
- u32 units = nv_rd32 (ctx->device, 0x1540);
+ u32 units = nvkm_rd32(device, 0x1540);
/* 0800: DISPATCH */
cp_ctx(ctx, 0x400808, 7);
@@ -570,7 +569,7 @@ nv50_gr_construct_mmio(struct nvkm_grctx *ctx)
else if (device->chipset < 0xa0)
gr_def(ctx, 0x407d08, 0x00390040);
else {
- if (nvkm_fb(device)->ram->type != NV_MEM_TYPE_GDDR5)
+ if (device->fb->ram->type != NVKM_RAM_TYPE_GDDR5)
gr_def(ctx, 0x407d08, 0x003d0040);
else
gr_def(ctx, 0x407d08, 0x003c0040);
@@ -784,9 +783,10 @@ nv50_gr_construct_mmio(struct nvkm_grctx *ctx)
static void
dd_emit(struct nvkm_grctx *ctx, int num, u32 val) {
int i;
- if (val && ctx->mode == NVKM_GRCTX_VALS)
+ if (val && ctx->mode == NVKM_GRCTX_VALS) {
for (i = 0; i < num; i++)
- nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
+ nvkm_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
+ }
ctx->ctxvals_pos += num;
}
@@ -1156,9 +1156,10 @@ nv50_gr_construct_mmio_ddata(struct nvkm_grctx *ctx)
static void
xf_emit(struct nvkm_grctx *ctx, int num, u32 val) {
int i;
- if (val && ctx->mode == NVKM_GRCTX_VALS)
+ if (val && ctx->mode == NVKM_GRCTX_VALS) {
for (i = 0; i < num; i++)
- nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
+ nvkm_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
+ }
ctx->ctxvals_pos += num << 3;
}
@@ -1190,7 +1191,7 @@ nv50_gr_construct_xfer1(struct nvkm_grctx *ctx)
int i;
int offset;
int size = 0;
- u32 units = nv_rd32 (ctx->device, 0x1540);
+ u32 units = nvkm_rd32(device, 0x1540);
offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
ctx->ctxvals_base = offset;
@@ -3273,7 +3274,7 @@ nv50_gr_construct_xfer2(struct nvkm_grctx *ctx)
struct nvkm_device *device = ctx->device;
int i;
u32 offset;
- u32 units = nv_rd32 (ctx->device, 0x1540);
+ u32 units = nvkm_rd32(device, 0x1540);
int size = 0;
offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
new file mode 100644
index 000000000000..ce913300539f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+#include <subdev/timer.h>
+
+static const struct nvkm_bitfield nv50_gr_status[] = {
+ { 0x00000001, "BUSY" }, /* set when any bit is set */
+ { 0x00000002, "DISPATCH" },
+ { 0x00000004, "UNK2" },
+ { 0x00000008, "UNK3" },
+ { 0x00000010, "UNK4" },
+ { 0x00000020, "UNK5" },
+ { 0x00000040, "M2MF" },
+ { 0x00000080, "UNK7" },
+ { 0x00000100, "CTXPROG" },
+ { 0x00000200, "VFETCH" },
+ { 0x00000400, "CCACHE_PREGEOM" },
+ { 0x00000800, "STRMOUT_VATTR_POSTGEOM" },
+ { 0x00001000, "VCLIP" },
+ { 0x00002000, "RATTR_APLANE" },
+ { 0x00004000, "TRAST" },
+ { 0x00008000, "CLIPID" },
+ { 0x00010000, "ZCULL" },
+ { 0x00020000, "ENG2D" },
+ { 0x00040000, "RMASK" },
+ { 0x00080000, "TPC_RAST" },
+ { 0x00100000, "TPC_PROP" },
+ { 0x00200000, "TPC_TEX" },
+ { 0x00400000, "TPC_GEOM" },
+ { 0x00800000, "TPC_MP" },
+ { 0x01000000, "ROP" },
+ {}
+};
+
+static const struct nvkm_bitfield
+nv50_gr_vstatus_0[] = {
+ { 0x01, "VFETCH" },
+ { 0x02, "CCACHE" },
+ { 0x04, "PREGEOM" },
+ { 0x08, "POSTGEOM" },
+ { 0x10, "VATTR" },
+ { 0x20, "STRMOUT" },
+ { 0x40, "VCLIP" },
+ {}
+};
+
+static const struct nvkm_bitfield
+nv50_gr_vstatus_1[] = {
+ { 0x01, "TPC_RAST" },
+ { 0x02, "TPC_PROP" },
+ { 0x04, "TPC_TEX" },
+ { 0x08, "TPC_GEOM" },
+ { 0x10, "TPC_MP" },
+ {}
+};
+
+static const struct nvkm_bitfield
+nv50_gr_vstatus_2[] = {
+ { 0x01, "RATTR" },
+ { 0x02, "APLANE" },
+ { 0x04, "TRAST" },
+ { 0x08, "CLIPID" },
+ { 0x10, "ZCULL" },
+ { 0x20, "ENG2D" },
+ { 0x40, "RMASK" },
+ { 0x80, "ROP" },
+ {}
+};
+
+static void
+nvkm_gr_vstatus_print(struct nv50_gr *gr, int r,
+ const struct nvkm_bitfield *units, u32 status)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ u32 stat = status;
+ u8 mask = 0x00;
+ char msg[64];
+ int i;
+
+ for (i = 0; units[i].name && status; i++) {
+ if ((status & 7) == 1)
+ mask |= (1 << i);
+ status >>= 3;
+ }
+
+ nvkm_snprintbf(msg, sizeof(msg), units, mask);
+ nvkm_error(subdev, "PGRAPH_VSTATUS%d: %08x [%s]\n", r, stat, msg);
+}
+
+int
+g84_gr_tlb_flush(struct nvkm_gr *base)
+{
+ struct nv50_gr *gr = nv50_gr(base);
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_timer *tmr = device->timer;
+ bool idle, timeout = false;
+ unsigned long flags;
+ char status[128];
+ u64 start;
+ u32 tmp;
+
+ spin_lock_irqsave(&gr->lock, flags);
+ nvkm_mask(device, 0x400500, 0x00000001, 0x00000000);
+
+ start = nvkm_timer_read(tmr);
+ do {
+ idle = true;
+
+ for (tmp = nvkm_rd32(device, 0x400380); tmp && idle; tmp >>= 3) {
+ if ((tmp & 7) == 1)
+ idle = false;
+ }
+
+ for (tmp = nvkm_rd32(device, 0x400384); tmp && idle; tmp >>= 3) {
+ if ((tmp & 7) == 1)
+ idle = false;
+ }
+
+ for (tmp = nvkm_rd32(device, 0x400388); tmp && idle; tmp >>= 3) {
+ if ((tmp & 7) == 1)
+ idle = false;
+ }
+ } while (!idle &&
+ !(timeout = nvkm_timer_read(tmr) - start > 2000000000));
+
+ if (timeout) {
+ nvkm_error(subdev, "PGRAPH TLB flush idle timeout fail\n");
+
+ tmp = nvkm_rd32(device, 0x400700);
+ nvkm_snprintbf(status, sizeof(status), nv50_gr_status, tmp);
+ nvkm_error(subdev, "PGRAPH_STATUS %08x [%s]\n", tmp, status);
+
+ nvkm_gr_vstatus_print(gr, 0, nv50_gr_vstatus_0,
+ nvkm_rd32(device, 0x400380));
+ nvkm_gr_vstatus_print(gr, 1, nv50_gr_vstatus_1,
+ nvkm_rd32(device, 0x400384));
+ nvkm_gr_vstatus_print(gr, 2, nv50_gr_vstatus_2,
+ nvkm_rd32(device, 0x400388));
+ }
+
+
+ nvkm_wr32(device, 0x100c80, 0x00000001);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
+ break;
+ );
+ nvkm_mask(device, 0x400500, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&gr->lock, flags);
+ return timeout ? -EBUSY : 0;
+}
+
+static const struct nvkm_gr_func
+g84_gr = {
+ .init = nv50_gr_init,
+ .intr = nv50_gr_intr,
+ .chan_new = nv50_gr_chan_new,
+ .tlb_flush = g84_gr_tlb_flush,
+ .units = nv50_gr_units,
+ .sclass = {
+ { -1, -1, 0x0030, &nv50_gr_object },
+ { -1, -1, 0x502d, &nv50_gr_object },
+ { -1, -1, 0x5039, &nv50_gr_object },
+ { -1, -1, 0x50c0, &nv50_gr_object },
+ { -1, -1, 0x8297, &nv50_gr_object },
+ {}
+ }
+};
+
+int
+g84_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return nv50_gr_new_(&g84_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 5606c25e5d02..f1358a564e3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -26,13 +26,12 @@
#include "fuc/os.h"
#include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
#include <core/option.h>
-#include <engine/fifo.h>
#include <subdev/fb.h>
#include <subdev/mc.h>
+#include <subdev/pmu.h>
#include <subdev/timer.h>
+#include <engine/fifo.h>
#include <nvif/class.h>
#include <nvif/unpack.h>
@@ -42,35 +41,36 @@
******************************************************************************/
static void
-gf100_gr_zbc_clear_color(struct gf100_gr_priv *priv, int zbc)
+gf100_gr_zbc_clear_color(struct gf100_gr *gr, int zbc)
{
- if (priv->zbc_color[zbc].format) {
- nv_wr32(priv, 0x405804, priv->zbc_color[zbc].ds[0]);
- nv_wr32(priv, 0x405808, priv->zbc_color[zbc].ds[1]);
- nv_wr32(priv, 0x40580c, priv->zbc_color[zbc].ds[2]);
- nv_wr32(priv, 0x405810, priv->zbc_color[zbc].ds[3]);
- }
- nv_wr32(priv, 0x405814, priv->zbc_color[zbc].format);
- nv_wr32(priv, 0x405820, zbc);
- nv_wr32(priv, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ if (gr->zbc_color[zbc].format) {
+ nvkm_wr32(device, 0x405804, gr->zbc_color[zbc].ds[0]);
+ nvkm_wr32(device, 0x405808, gr->zbc_color[zbc].ds[1]);
+ nvkm_wr32(device, 0x40580c, gr->zbc_color[zbc].ds[2]);
+ nvkm_wr32(device, 0x405810, gr->zbc_color[zbc].ds[3]);
+ }
+ nvkm_wr32(device, 0x405814, gr->zbc_color[zbc].format);
+ nvkm_wr32(device, 0x405820, zbc);
+ nvkm_wr32(device, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */
}
static int
-gf100_gr_zbc_color_get(struct gf100_gr_priv *priv, int format,
+gf100_gr_zbc_color_get(struct gf100_gr *gr, int format,
const u32 ds[4], const u32 l2[4])
{
- struct nvkm_ltc *ltc = nvkm_ltc(priv);
+ struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
int zbc = -ENOSPC, i;
for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
- if (priv->zbc_color[i].format) {
- if (priv->zbc_color[i].format != format)
+ if (gr->zbc_color[i].format) {
+ if (gr->zbc_color[i].format != format)
continue;
- if (memcmp(priv->zbc_color[i].ds, ds, sizeof(
- priv->zbc_color[i].ds)))
+ if (memcmp(gr->zbc_color[i].ds, ds, sizeof(
+ gr->zbc_color[i].ds)))
continue;
- if (memcmp(priv->zbc_color[i].l2, l2, sizeof(
- priv->zbc_color[i].l2))) {
+ if (memcmp(gr->zbc_color[i].l2, l2, sizeof(
+ gr->zbc_color[i].l2))) {
WARN_ON(1);
return -EINVAL;
}
@@ -83,38 +83,39 @@ gf100_gr_zbc_color_get(struct gf100_gr_priv *priv, int format,
if (zbc < 0)
return zbc;
- memcpy(priv->zbc_color[zbc].ds, ds, sizeof(priv->zbc_color[zbc].ds));
- memcpy(priv->zbc_color[zbc].l2, l2, sizeof(priv->zbc_color[zbc].l2));
- priv->zbc_color[zbc].format = format;
- ltc->zbc_color_get(ltc, zbc, l2);
- gf100_gr_zbc_clear_color(priv, zbc);
+ memcpy(gr->zbc_color[zbc].ds, ds, sizeof(gr->zbc_color[zbc].ds));
+ memcpy(gr->zbc_color[zbc].l2, l2, sizeof(gr->zbc_color[zbc].l2));
+ gr->zbc_color[zbc].format = format;
+ nvkm_ltc_zbc_color_get(ltc, zbc, l2);
+ gf100_gr_zbc_clear_color(gr, zbc);
return zbc;
}
static void
-gf100_gr_zbc_clear_depth(struct gf100_gr_priv *priv, int zbc)
+gf100_gr_zbc_clear_depth(struct gf100_gr *gr, int zbc)
{
- if (priv->zbc_depth[zbc].format)
- nv_wr32(priv, 0x405818, priv->zbc_depth[zbc].ds);
- nv_wr32(priv, 0x40581c, priv->zbc_depth[zbc].format);
- nv_wr32(priv, 0x405820, zbc);
- nv_wr32(priv, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ if (gr->zbc_depth[zbc].format)
+ nvkm_wr32(device, 0x405818, gr->zbc_depth[zbc].ds);
+ nvkm_wr32(device, 0x40581c, gr->zbc_depth[zbc].format);
+ nvkm_wr32(device, 0x405820, zbc);
+ nvkm_wr32(device, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */
}
static int
-gf100_gr_zbc_depth_get(struct gf100_gr_priv *priv, int format,
+gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format,
const u32 ds, const u32 l2)
{
- struct nvkm_ltc *ltc = nvkm_ltc(priv);
+ struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
int zbc = -ENOSPC, i;
for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
- if (priv->zbc_depth[i].format) {
- if (priv->zbc_depth[i].format != format)
+ if (gr->zbc_depth[i].format) {
+ if (gr->zbc_depth[i].format != format)
continue;
- if (priv->zbc_depth[i].ds != ds)
+ if (gr->zbc_depth[i].ds != ds)
continue;
- if (priv->zbc_depth[i].l2 != l2) {
+ if (gr->zbc_depth[i].l2 != l2) {
WARN_ON(1);
return -EINVAL;
}
@@ -127,11 +128,11 @@ gf100_gr_zbc_depth_get(struct gf100_gr_priv *priv, int format,
if (zbc < 0)
return zbc;
- priv->zbc_depth[zbc].format = format;
- priv->zbc_depth[zbc].ds = ds;
- priv->zbc_depth[zbc].l2 = l2;
- ltc->zbc_depth_get(ltc, zbc, l2);
- gf100_gr_zbc_clear_depth(priv, zbc);
+ gr->zbc_depth[zbc].format = format;
+ gr->zbc_depth[zbc].ds = ds;
+ gr->zbc_depth[zbc].l2 = l2;
+ nvkm_ltc_zbc_depth_get(ltc, zbc, l2);
+ gf100_gr_zbc_clear_depth(gr, zbc);
return zbc;
}
@@ -142,7 +143,7 @@ gf100_gr_zbc_depth_get(struct gf100_gr_priv *priv, int format,
static int
gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
{
- struct gf100_gr_priv *priv = (void *)object->engine;
+ struct gf100_gr *gr = (void *)object->engine;
union {
struct fermi_a_zbc_color_v0 v0;
} *args = data;
@@ -169,7 +170,7 @@ gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
case FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8:
case FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10:
case FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11:
- ret = gf100_gr_zbc_color_get(priv, args->v0.format,
+ ret = gf100_gr_zbc_color_get(gr, args->v0.format,
args->v0.ds,
args->v0.l2);
if (ret >= 0) {
@@ -188,7 +189,7 @@ gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
static int
gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size)
{
- struct gf100_gr_priv *priv = (void *)object->engine;
+ struct gf100_gr *gr = (void *)object->engine;
union {
struct fermi_a_zbc_depth_v0 v0;
} *args = data;
@@ -197,7 +198,7 @@ gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size)
if (nvif_unpack(args->v0, 0, 0, false)) {
switch (args->v0.format) {
case FERMI_A_ZBC_DEPTH_V0_FMT_FP32:
- ret = gf100_gr_zbc_depth_get(priv, args->v0.format,
+ ret = gf100_gr_zbc_depth_get(gr, args->v0.format,
args->v0.ds,
args->v0.l2);
return (ret >= 0) ? 0 : -ENOSPC;
@@ -223,106 +224,176 @@ gf100_fermi_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
return -EINVAL;
}
-struct nvkm_ofuncs
-gf100_fermi_ofuncs = {
- .ctor = _nvkm_object_ctor,
- .dtor = nvkm_object_destroy,
- .init = nvkm_object_init,
- .fini = nvkm_object_fini,
+const struct nvkm_object_func
+gf100_fermi = {
.mthd = gf100_fermi_mthd,
};
-static int
-gf100_gr_set_shader_exceptions(struct nvkm_object *object, u32 mthd,
- void *pdata, u32 size)
+static void
+gf100_gr_mthd_set_shader_exceptions(struct nvkm_device *device, u32 data)
{
- struct gf100_gr_priv *priv = (void *)object->engine;
- if (size >= sizeof(u32)) {
- u32 data = *(u32 *)pdata ? 0xffffffff : 0x00000000;
- nv_wr32(priv, 0x419e44, data);
- nv_wr32(priv, 0x419e4c, data);
- return 0;
+ nvkm_wr32(device, 0x419e44, data ? 0xffffffff : 0x00000000);
+ nvkm_wr32(device, 0x419e4c, data ? 0xffffffff : 0x00000000);
+}
+
+static bool
+gf100_gr_mthd_sw(struct nvkm_device *device, u16 class, u32 mthd, u32 data)
+{
+ switch (class & 0x00ff) {
+ case 0x97:
+ case 0xc0:
+ switch (mthd) {
+ case 0x1528:
+ gf100_gr_mthd_set_shader_exceptions(device, data);
+ return true;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
}
- return -EINVAL;
+ return false;
}
-struct nvkm_omthds
-gf100_gr_9097_omthds[] = {
- { 0x1528, 0x1528, gf100_gr_set_shader_exceptions },
- {}
-};
+static int
+gf100_gr_object_get(struct nvkm_gr *base, int index, struct nvkm_sclass *sclass)
+{
+ struct gf100_gr *gr = gf100_gr(base);
+ int c = 0;
-struct nvkm_omthds
-gf100_gr_90c0_omthds[] = {
- { 0x1528, 0x1528, gf100_gr_set_shader_exceptions },
- {}
-};
+ while (gr->func->sclass[c].oclass) {
+ if (c++ == index) {
+ *sclass = gr->func->sclass[index];
+ return index;
+ }
+ }
-struct nvkm_oclass
-gf100_gr_sclass[] = {
- { FERMI_TWOD_A, &nvkm_object_ofuncs },
- { FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
- { FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
- { FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
- {}
-};
+ return c;
+}
/*******************************************************************************
* PGRAPH context
******************************************************************************/
-int
-gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *args, u32 size,
- struct nvkm_object **pobject)
+static int
+gf100_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+ int align, struct nvkm_gpuobj **pgpuobj)
{
- struct nvkm_vm *vm = nvkm_client(parent)->vm;
- struct gf100_gr_priv *priv = (void *)engine;
- struct gf100_gr_data *data = priv->mmio_data;
- struct gf100_gr_mmio *mmio = priv->mmio_list;
- struct gf100_gr_chan *chan;
+ struct gf100_gr_chan *chan = gf100_gr_chan(object);
+ struct gf100_gr *gr = chan->gr;
int ret, i;
- /* allocate memory for context, and fill with default values */
- ret = nvkm_gr_context_create(parent, engine, oclass, NULL,
- priv->size, 0x100,
- NVOBJ_FLAG_ZERO_ALLOC, &chan);
- *pobject = nv_object(chan);
+ ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
+ align, false, parent, pgpuobj);
if (ret)
return ret;
+ nvkm_kmap(*pgpuobj);
+ for (i = 0; i < gr->size; i += 4)
+ nvkm_wo32(*pgpuobj, i, gr->data[i / 4]);
+
+ if (!gr->firmware) {
+ nvkm_wo32(*pgpuobj, 0x00, chan->mmio_nr / 2);
+ nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma.offset >> 8);
+ } else {
+ nvkm_wo32(*pgpuobj, 0xf4, 0);
+ nvkm_wo32(*pgpuobj, 0xf8, 0);
+ nvkm_wo32(*pgpuobj, 0x10, chan->mmio_nr / 2);
+ nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma.offset));
+ nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma.offset));
+ nvkm_wo32(*pgpuobj, 0x1c, 1);
+ nvkm_wo32(*pgpuobj, 0x20, 0);
+ nvkm_wo32(*pgpuobj, 0x28, 0);
+ nvkm_wo32(*pgpuobj, 0x2c, 0);
+ }
+ nvkm_done(*pgpuobj);
+ return 0;
+}
+
+static void *
+gf100_gr_chan_dtor(struct nvkm_object *object)
+{
+ struct gf100_gr_chan *chan = gf100_gr_chan(object);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
+ if (chan->data[i].vma.node) {
+ nvkm_vm_unmap(&chan->data[i].vma);
+ nvkm_vm_put(&chan->data[i].vma);
+ }
+ nvkm_memory_del(&chan->data[i].mem);
+ }
+
+ if (chan->mmio_vma.node) {
+ nvkm_vm_unmap(&chan->mmio_vma);
+ nvkm_vm_put(&chan->mmio_vma);
+ }
+ nvkm_memory_del(&chan->mmio);
+ return chan;
+}
+
+static const struct nvkm_object_func
+gf100_gr_chan = {
+ .dtor = gf100_gr_chan_dtor,
+ .bind = gf100_gr_chan_bind,
+};
+
+static int
+gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+ const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+{
+ struct gf100_gr *gr = gf100_gr(base);
+ struct gf100_gr_data *data = gr->mmio_data;
+ struct gf100_gr_mmio *mmio = gr->mmio_list;
+ struct gf100_gr_chan *chan;
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ int ret, i;
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&gf100_gr_chan, oclass, &chan->object);
+ chan->gr = gr;
+ *pobject = &chan->object;
+
/* allocate memory for a "mmio list" buffer that's used by the HUB
* fuc to modify some per-context register settings on first load
* of the context.
*/
- ret = nvkm_gpuobj_new(nv_object(chan), NULL, 0x1000, 0x100, 0,
- &chan->mmio);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x100,
+ false, &chan->mmio);
if (ret)
return ret;
- ret = nvkm_gpuobj_map_vm(nv_gpuobj(chan->mmio), vm,
- NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
- &chan->mmio_vma);
+ ret = nvkm_vm_get(fifoch->vm, 0x1000, 12, NV_MEM_ACCESS_RW |
+ NV_MEM_ACCESS_SYS, &chan->mmio_vma);
if (ret)
return ret;
+ nvkm_memory_map(chan->mmio, &chan->mmio_vma, 0);
+
/* allocate buffers referenced by mmio list */
- for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) {
- ret = nvkm_gpuobj_new(nv_object(chan), NULL, data->size,
- data->align, 0, &chan->data[i].mem);
+ for (i = 0; data->size && i < ARRAY_SIZE(gr->mmio_data); i++) {
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ data->size, data->align, false,
+ &chan->data[i].mem);
if (ret)
return ret;
- ret = nvkm_gpuobj_map_vm(chan->data[i].mem, vm, data->access,
- &chan->data[i].vma);
+ ret = nvkm_vm_get(fifoch->vm,
+ nvkm_memory_size(chan->data[i].mem), 12,
+ data->access, &chan->data[i].vma);
if (ret)
return ret;
+ nvkm_memory_map(chan->data[i].mem, &chan->data[i].vma, 0);
data++;
}
/* finally, fill in the mmio list and point the context at it */
- for (i = 0; mmio->addr && i < ARRAY_SIZE(priv->mmio_list); i++) {
+ nvkm_kmap(chan->mmio);
+ for (i = 0; mmio->addr && i < ARRAY_SIZE(gr->mmio_list); i++) {
u32 addr = mmio->addr;
u32 data = mmio->data;
@@ -331,49 +402,14 @@ gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
data |= info >> mmio->shift;
}
- nv_wo32(chan->mmio, chan->mmio_nr++ * 4, addr);
- nv_wo32(chan->mmio, chan->mmio_nr++ * 4, data);
+ nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, addr);
+ nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, data);
mmio++;
}
-
- for (i = 0; i < priv->size; i += 4)
- nv_wo32(chan, i, priv->data[i / 4]);
-
- if (!priv->firmware) {
- nv_wo32(chan, 0x00, chan->mmio_nr / 2);
- nv_wo32(chan, 0x04, chan->mmio_vma.offset >> 8);
- } else {
- nv_wo32(chan, 0xf4, 0);
- nv_wo32(chan, 0xf8, 0);
- nv_wo32(chan, 0x10, chan->mmio_nr / 2);
- nv_wo32(chan, 0x14, lower_32_bits(chan->mmio_vma.offset));
- nv_wo32(chan, 0x18, upper_32_bits(chan->mmio_vma.offset));
- nv_wo32(chan, 0x1c, 1);
- nv_wo32(chan, 0x20, 0);
- nv_wo32(chan, 0x28, 0);
- nv_wo32(chan, 0x2c, 0);
- }
-
+ nvkm_done(chan->mmio);
return 0;
}
-void
-gf100_gr_context_dtor(struct nvkm_object *object)
-{
- struct gf100_gr_chan *chan = (void *)object;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
- nvkm_gpuobj_unmap(&chan->data[i].vma);
- nvkm_gpuobj_ref(NULL, &chan->data[i].mem);
- }
-
- nvkm_gpuobj_unmap(&chan->mmio_vma);
- nvkm_gpuobj_ref(NULL, &chan->mmio);
-
- nvkm_gr_context_destroy(&chan->base);
-}
-
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@@ -635,7 +671,7 @@ gf100_gr_pack_mmio[] = {
******************************************************************************/
void
-gf100_gr_zbc_init(struct gf100_gr_priv *priv)
+gf100_gr_zbc_init(struct gf100_gr *gr)
{
const u32 zero[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000 };
@@ -645,27 +681,62 @@ gf100_gr_zbc_init(struct gf100_gr_priv *priv)
0x00000000, 0x00000000, 0x00000000, 0x00000000 };
const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000,
0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 };
- struct nvkm_ltc *ltc = nvkm_ltc(priv);
+ struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
int index;
- if (!priv->zbc_color[0].format) {
- gf100_gr_zbc_color_get(priv, 1, & zero[0], &zero[4]);
- gf100_gr_zbc_color_get(priv, 2, & one[0], &one[4]);
- gf100_gr_zbc_color_get(priv, 4, &f32_0[0], &f32_0[4]);
- gf100_gr_zbc_color_get(priv, 4, &f32_1[0], &f32_1[4]);
- gf100_gr_zbc_depth_get(priv, 1, 0x00000000, 0x00000000);
- gf100_gr_zbc_depth_get(priv, 1, 0x3f800000, 0x3f800000);
+ if (!gr->zbc_color[0].format) {
+ gf100_gr_zbc_color_get(gr, 1, & zero[0], &zero[4]);
+ gf100_gr_zbc_color_get(gr, 2, & one[0], &one[4]);
+ gf100_gr_zbc_color_get(gr, 4, &f32_0[0], &f32_0[4]);
+ gf100_gr_zbc_color_get(gr, 4, &f32_1[0], &f32_1[4]);
+ gf100_gr_zbc_depth_get(gr, 1, 0x00000000, 0x00000000);
+ gf100_gr_zbc_depth_get(gr, 1, 0x3f800000, 0x3f800000);
}
for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
- gf100_gr_zbc_clear_color(priv, index);
+ gf100_gr_zbc_clear_color(gr, index);
for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
- gf100_gr_zbc_clear_depth(priv, index);
+ gf100_gr_zbc_clear_depth(gr, index);
+}
+
+/**
+ * Wait until GR goes idle. GR is considered idle if it is disabled by the
+ * MC (0x200) register, or GR is not busy and a context switch is not in
+ * progress.
+ */
+int
+gf100_gr_wait_idle(struct gf100_gr *gr)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000);
+ bool gr_enabled, ctxsw_active, gr_busy;
+
+ do {
+ /*
+ * required to make sure FIFO_ENGINE_STATUS (0x2640) is
+ * up-to-date
+ */
+ nvkm_rd32(device, 0x400700);
+
+ gr_enabled = nvkm_rd32(device, 0x200) & 0x1000;
+ ctxsw_active = nvkm_rd32(device, 0x2640) & 0x8000;
+ gr_busy = nvkm_rd32(device, 0x40060c) & 0x1;
+
+ if (!gr_enabled || (!gr_busy && !ctxsw_active))
+ return 0;
+ } while (time_before(jiffies, end_jiffies));
+
+ nvkm_error(subdev,
+ "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
+ gr_enabled, ctxsw_active, gr_busy);
+ return -EAGAIN;
}
void
-gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
+gf100_gr_mmio(struct gf100_gr *gr, const struct gf100_gr_pack *p)
{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
const struct gf100_gr_pack *pack;
const struct gf100_gr_init *init;
@@ -673,43 +744,54 @@ gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
u32 next = init->addr + init->count * init->pitch;
u32 addr = init->addr;
while (addr < next) {
- nv_wr32(priv, addr, init->data);
+ nvkm_wr32(device, addr, init->data);
addr += init->pitch;
}
}
}
void
-gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
+gf100_gr_icmd(struct gf100_gr *gr, const struct gf100_gr_pack *p)
{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
const struct gf100_gr_pack *pack;
const struct gf100_gr_init *init;
u32 data = 0;
- nv_wr32(priv, 0x400208, 0x80000000);
+ nvkm_wr32(device, 0x400208, 0x80000000);
pack_for_each_init(init, pack, p) {
u32 next = init->addr + init->count * init->pitch;
u32 addr = init->addr;
if ((pack == p && init == p->init) || data != init->data) {
- nv_wr32(priv, 0x400204, init->data);
+ nvkm_wr32(device, 0x400204, init->data);
data = init->data;
}
while (addr < next) {
- nv_wr32(priv, 0x400200, addr);
- nv_wait(priv, 0x400700, 0x00000002, 0x00000000);
+ nvkm_wr32(device, 0x400200, addr);
+ /**
+ * Wait for GR to go idle after submitting a
+ * GO_IDLE bundle
+ */
+ if ((addr & 0xffff) == 0xe100)
+ gf100_gr_wait_idle(gr);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x400700) & 0x00000004))
+ break;
+ );
addr += init->pitch;
}
}
- nv_wr32(priv, 0x400208, 0x00000000);
+ nvkm_wr32(device, 0x400208, 0x00000000);
}
void
-gf100_gr_mthd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
+gf100_gr_mthd(struct gf100_gr *gr, const struct gf100_gr_pack *p)
{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
const struct gf100_gr_pack *pack;
const struct gf100_gr_init *init;
u32 data = 0;
@@ -720,79 +802,75 @@ gf100_gr_mthd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
u32 addr = init->addr;
if ((pack == p && init == p->init) || data != init->data) {
- nv_wr32(priv, 0x40448c, init->data);
+ nvkm_wr32(device, 0x40448c, init->data);
data = init->data;
}
while (addr < next) {
- nv_wr32(priv, 0x404488, ctrl | (addr << 14));
+ nvkm_wr32(device, 0x404488, ctrl | (addr << 14));
addr += init->pitch;
}
}
}
u64
-gf100_gr_units(struct nvkm_gr *gr)
+gf100_gr_units(struct nvkm_gr *base)
{
- struct gf100_gr_priv *priv = (void *)gr;
+ struct gf100_gr *gr = gf100_gr(base);
u64 cfg;
- cfg = (u32)priv->gpc_nr;
- cfg |= (u32)priv->tpc_total << 8;
- cfg |= (u64)priv->rop_nr << 32;
+ cfg = (u32)gr->gpc_nr;
+ cfg |= (u32)gr->tpc_total << 8;
+ cfg |= (u64)gr->rop_nr << 32;
return cfg;
}
-static const struct nvkm_enum gk104_sked_error[] = {
- { 7, "CONSTANT_BUFFER_SIZE" },
- { 9, "LOCAL_MEMORY_SIZE_POS" },
- { 10, "LOCAL_MEMORY_SIZE_NEG" },
- { 11, "WARP_CSTACK_SIZE" },
- { 12, "TOTAL_TEMP_SIZE" },
- { 13, "REGISTER_COUNT" },
- { 18, "TOTAL_THREADS" },
- { 20, "PROGRAM_OFFSET" },
- { 21, "SHARED_MEMORY_SIZE" },
- { 25, "SHARED_CONFIG_TOO_SMALL" },
- { 26, "TOTAL_REGISTER_COUNT" },
+static const struct nvkm_bitfield gk104_sked_error[] = {
+ { 0x00000080, "CONSTANT_BUFFER_SIZE" },
+ { 0x00000200, "LOCAL_MEMORY_SIZE_POS" },
+ { 0x00000400, "LOCAL_MEMORY_SIZE_NEG" },
+ { 0x00000800, "WARP_CSTACK_SIZE" },
+ { 0x00001000, "TOTAL_TEMP_SIZE" },
+ { 0x00002000, "REGISTER_COUNT" },
+ { 0x00040000, "TOTAL_THREADS" },
+ { 0x00100000, "PROGRAM_OFFSET" },
+ { 0x00200000, "SHARED_MEMORY_SIZE" },
+ { 0x02000000, "SHARED_CONFIG_TOO_SMALL" },
+ { 0x04000000, "TOTAL_REGISTER_COUNT" },
{}
};
-static const struct nvkm_enum gf100_gpc_rop_error[] = {
- { 1, "RT_PITCH_OVERRUN" },
- { 4, "RT_WIDTH_OVERRUN" },
- { 5, "RT_HEIGHT_OVERRUN" },
- { 7, "ZETA_STORAGE_TYPE_MISMATCH" },
- { 8, "RT_STORAGE_TYPE_MISMATCH" },
- { 10, "RT_LINEAR_MISMATCH" },
+static const struct nvkm_bitfield gf100_gpc_rop_error[] = {
+ { 0x00000002, "RT_PITCH_OVERRUN" },
+ { 0x00000010, "RT_WIDTH_OVERRUN" },
+ { 0x00000020, "RT_HEIGHT_OVERRUN" },
+ { 0x00000080, "ZETA_STORAGE_TYPE_MISMATCH" },
+ { 0x00000100, "RT_STORAGE_TYPE_MISMATCH" },
+ { 0x00000400, "RT_LINEAR_MISMATCH" },
{}
};
static void
-gf100_gr_trap_gpc_rop(struct gf100_gr_priv *priv, int gpc)
+gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ char error[128];
u32 trap[4];
- int i;
- trap[0] = nv_rd32(priv, GPC_UNIT(gpc, 0x0420));
- trap[1] = nv_rd32(priv, GPC_UNIT(gpc, 0x0434));
- trap[2] = nv_rd32(priv, GPC_UNIT(gpc, 0x0438));
- trap[3] = nv_rd32(priv, GPC_UNIT(gpc, 0x043c));
+ trap[0] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0420)) & 0x3fffffff;
+ trap[1] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0434));
+ trap[2] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0438));
+ trap[3] = nvkm_rd32(device, GPC_UNIT(gpc, 0x043c));
- nv_error(priv, "GPC%d/PROP trap:", gpc);
- for (i = 0; i <= 29; ++i) {
- if (!(trap[0] & (1 << i)))
- continue;
- pr_cont(" ");
- nvkm_enum_print(gf100_gpc_rop_error, i);
- }
- pr_cont("\n");
+ nvkm_snprintbf(error, sizeof(error), gf100_gpc_rop_error, trap[0]);
- nv_error(priv, "x = %u, y = %u, format = %x, storage type = %x\n",
- trap[1] & 0xffff, trap[1] >> 16, (trap[2] >> 8) & 0x3f,
- trap[3] & 0xff);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nvkm_error(subdev, "GPC%d/PROP trap: %08x [%s] x = %u, y = %u, "
+ "format = %x, storage type = %x\n",
+ gpc, trap[0], error, trap[1] & 0xffff, trap[1] >> 16,
+ (trap[2] >> 8) & 0x3f, trap[3] & 0xff);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
}
static const struct nvkm_enum gf100_mp_warp_error[] = {
@@ -815,401 +893,418 @@ static const struct nvkm_bitfield gf100_mp_global_error[] = {
};
static void
-gf100_gr_trap_mp(struct gf100_gr_priv *priv, int gpc, int tpc)
+gf100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
{
- u32 werr = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x648));
- u32 gerr = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x650));
-
- nv_error(priv, "GPC%i/TPC%i/MP trap:", gpc, tpc);
- nvkm_bitfield_print(gf100_mp_global_error, gerr);
- if (werr) {
- pr_cont(" ");
- nvkm_enum_print(gf100_mp_warp_error, werr & 0xffff);
- }
- pr_cont("\n");
-
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x648), 0x00000000);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x650), gerr);
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x648));
+ u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x650));
+ const struct nvkm_enum *warp;
+ char glob[128];
+
+ nvkm_snprintbf(glob, sizeof(glob), gf100_mp_global_error, gerr);
+ warp = nvkm_enum_find(gf100_mp_warp_error, werr & 0xffff);
+
+ nvkm_error(subdev, "GPC%i/TPC%i/MP trap: "
+ "global %08x [%s] warp %04x [%s]\n",
+ gpc, tpc, gerr, glob, werr, warp ? warp->name : "");
+
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x648), 0x00000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x650), gerr);
}
static void
-gf100_gr_trap_tpc(struct gf100_gr_priv *priv, int gpc, int tpc)
+gf100_gr_trap_tpc(struct gf100_gr *gr, int gpc, int tpc)
{
- u32 stat = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0508));
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0508));
if (stat & 0x00000001) {
- u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0224));
- nv_error(priv, "GPC%d/TPC%d/TEX: 0x%08x\n", gpc, tpc, trap);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000);
+ u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0224));
+ nvkm_error(subdev, "GPC%d/TPC%d/TEX: %08x\n", gpc, tpc, trap);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000);
stat &= ~0x00000001;
}
if (stat & 0x00000002) {
- gf100_gr_trap_mp(priv, gpc, tpc);
+ gf100_gr_trap_mp(gr, gpc, tpc);
stat &= ~0x00000002;
}
if (stat & 0x00000004) {
- u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0084));
- nv_error(priv, "GPC%d/TPC%d/POLY: 0x%08x\n", gpc, tpc, trap);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000);
+ u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0084));
+ nvkm_error(subdev, "GPC%d/TPC%d/POLY: %08x\n", gpc, tpc, trap);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000);
stat &= ~0x00000004;
}
if (stat & 0x00000008) {
- u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x048c));
- nv_error(priv, "GPC%d/TPC%d/L1C: 0x%08x\n", gpc, tpc, trap);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000);
+ u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x048c));
+ nvkm_error(subdev, "GPC%d/TPC%d/L1C: %08x\n", gpc, tpc, trap);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000);
stat &= ~0x00000008;
}
if (stat) {
- nv_error(priv, "GPC%d/TPC%d/0x%08x: unknown\n", gpc, tpc, stat);
+ nvkm_error(subdev, "GPC%d/TPC%d/%08x: unknown\n", gpc, tpc, stat);
}
}
static void
-gf100_gr_trap_gpc(struct gf100_gr_priv *priv, int gpc)
+gf100_gr_trap_gpc(struct gf100_gr *gr, int gpc)
{
- u32 stat = nv_rd32(priv, GPC_UNIT(gpc, 0x2c90));
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, GPC_UNIT(gpc, 0x2c90));
int tpc;
if (stat & 0x00000001) {
- gf100_gr_trap_gpc_rop(priv, gpc);
+ gf100_gr_trap_gpc_rop(gr, gpc);
stat &= ~0x00000001;
}
if (stat & 0x00000002) {
- u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0900));
- nv_error(priv, "GPC%d/ZCULL: 0x%08x\n", gpc, trap);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0900));
+ nvkm_error(subdev, "GPC%d/ZCULL: %08x\n", gpc, trap);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
stat &= ~0x00000002;
}
if (stat & 0x00000004) {
- u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x1028));
- nv_error(priv, "GPC%d/CCACHE: 0x%08x\n", gpc, trap);
- nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x1028));
+ nvkm_error(subdev, "GPC%d/CCACHE: %08x\n", gpc, trap);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
stat &= ~0x00000004;
}
if (stat & 0x00000008) {
- u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0824));
- nv_error(priv, "GPC%d/ESETUP: 0x%08x\n", gpc, trap);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0824));
+ nvkm_error(subdev, "GPC%d/ESETUP: %08x\n", gpc, trap);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
stat &= ~0x00000009;
}
- for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+ for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
u32 mask = 0x00010000 << tpc;
if (stat & mask) {
- gf100_gr_trap_tpc(priv, gpc, tpc);
- nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), mask);
+ gf100_gr_trap_tpc(gr, gpc, tpc);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), mask);
stat &= ~mask;
}
}
if (stat) {
- nv_error(priv, "GPC%d/0x%08x: unknown\n", gpc, stat);
+ nvkm_error(subdev, "GPC%d/%08x: unknown\n", gpc, stat);
}
}
static void
-gf100_gr_trap_intr(struct gf100_gr_priv *priv)
+gf100_gr_trap_intr(struct gf100_gr *gr)
{
- u32 trap = nv_rd32(priv, 0x400108);
- int rop, gpc, i;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 trap = nvkm_rd32(device, 0x400108);
+ int rop, gpc;
if (trap & 0x00000001) {
- u32 stat = nv_rd32(priv, 0x404000);
- nv_error(priv, "DISPATCH 0x%08x\n", stat);
- nv_wr32(priv, 0x404000, 0xc0000000);
- nv_wr32(priv, 0x400108, 0x00000001);
+ u32 stat = nvkm_rd32(device, 0x404000);
+ nvkm_error(subdev, "DISPATCH %08x\n", stat);
+ nvkm_wr32(device, 0x404000, 0xc0000000);
+ nvkm_wr32(device, 0x400108, 0x00000001);
trap &= ~0x00000001;
}
if (trap & 0x00000002) {
- u32 stat = nv_rd32(priv, 0x404600);
- nv_error(priv, "M2MF 0x%08x\n", stat);
- nv_wr32(priv, 0x404600, 0xc0000000);
- nv_wr32(priv, 0x400108, 0x00000002);
+ u32 stat = nvkm_rd32(device, 0x404600);
+ nvkm_error(subdev, "M2MF %08x\n", stat);
+ nvkm_wr32(device, 0x404600, 0xc0000000);
+ nvkm_wr32(device, 0x400108, 0x00000002);
trap &= ~0x00000002;
}
if (trap & 0x00000008) {
- u32 stat = nv_rd32(priv, 0x408030);
- nv_error(priv, "CCACHE 0x%08x\n", stat);
- nv_wr32(priv, 0x408030, 0xc0000000);
- nv_wr32(priv, 0x400108, 0x00000008);
+ u32 stat = nvkm_rd32(device, 0x408030);
+ nvkm_error(subdev, "CCACHE %08x\n", stat);
+ nvkm_wr32(device, 0x408030, 0xc0000000);
+ nvkm_wr32(device, 0x400108, 0x00000008);
trap &= ~0x00000008;
}
if (trap & 0x00000010) {
- u32 stat = nv_rd32(priv, 0x405840);
- nv_error(priv, "SHADER 0x%08x\n", stat);
- nv_wr32(priv, 0x405840, 0xc0000000);
- nv_wr32(priv, 0x400108, 0x00000010);
+ u32 stat = nvkm_rd32(device, 0x405840);
+ nvkm_error(subdev, "SHADER %08x\n", stat);
+ nvkm_wr32(device, 0x405840, 0xc0000000);
+ nvkm_wr32(device, 0x400108, 0x00000010);
trap &= ~0x00000010;
}
if (trap & 0x00000040) {
- u32 stat = nv_rd32(priv, 0x40601c);
- nv_error(priv, "UNK6 0x%08x\n", stat);
- nv_wr32(priv, 0x40601c, 0xc0000000);
- nv_wr32(priv, 0x400108, 0x00000040);
+ u32 stat = nvkm_rd32(device, 0x40601c);
+ nvkm_error(subdev, "UNK6 %08x\n", stat);
+ nvkm_wr32(device, 0x40601c, 0xc0000000);
+ nvkm_wr32(device, 0x400108, 0x00000040);
trap &= ~0x00000040;
}
if (trap & 0x00000080) {
- u32 stat = nv_rd32(priv, 0x404490);
- nv_error(priv, "MACRO 0x%08x\n", stat);
- nv_wr32(priv, 0x404490, 0xc0000000);
- nv_wr32(priv, 0x400108, 0x00000080);
+ u32 stat = nvkm_rd32(device, 0x404490);
+ nvkm_error(subdev, "MACRO %08x\n", stat);
+ nvkm_wr32(device, 0x404490, 0xc0000000);
+ nvkm_wr32(device, 0x400108, 0x00000080);
trap &= ~0x00000080;
}
if (trap & 0x00000100) {
- u32 stat = nv_rd32(priv, 0x407020);
+ u32 stat = nvkm_rd32(device, 0x407020) & 0x3fffffff;
+ char sked[128];
- nv_error(priv, "SKED:");
- for (i = 0; i <= 29; ++i) {
- if (!(stat & (1 << i)))
- continue;
- pr_cont(" ");
- nvkm_enum_print(gk104_sked_error, i);
- }
- pr_cont("\n");
+ nvkm_snprintbf(sked, sizeof(sked), gk104_sked_error, stat);
+ nvkm_error(subdev, "SKED: %08x [%s]\n", stat, sked);
- if (stat & 0x3fffffff)
- nv_wr32(priv, 0x407020, 0x40000000);
- nv_wr32(priv, 0x400108, 0x00000100);
+ if (stat)
+ nvkm_wr32(device, 0x407020, 0x40000000);
+ nvkm_wr32(device, 0x400108, 0x00000100);
trap &= ~0x00000100;
}
if (trap & 0x01000000) {
- u32 stat = nv_rd32(priv, 0x400118);
- for (gpc = 0; stat && gpc < priv->gpc_nr; gpc++) {
+ u32 stat = nvkm_rd32(device, 0x400118);
+ for (gpc = 0; stat && gpc < gr->gpc_nr; gpc++) {
u32 mask = 0x00000001 << gpc;
if (stat & mask) {
- gf100_gr_trap_gpc(priv, gpc);
- nv_wr32(priv, 0x400118, mask);
+ gf100_gr_trap_gpc(gr, gpc);
+ nvkm_wr32(device, 0x400118, mask);
stat &= ~mask;
}
}
- nv_wr32(priv, 0x400108, 0x01000000);
+ nvkm_wr32(device, 0x400108, 0x01000000);
trap &= ~0x01000000;
}
if (trap & 0x02000000) {
- for (rop = 0; rop < priv->rop_nr; rop++) {
- u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
- u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144));
- nv_error(priv, "ROP%d 0x%08x 0x%08x\n",
+ for (rop = 0; rop < gr->rop_nr; rop++) {
+ u32 statz = nvkm_rd32(device, ROP_UNIT(rop, 0x070));
+ u32 statc = nvkm_rd32(device, ROP_UNIT(rop, 0x144));
+ nvkm_error(subdev, "ROP%d %08x %08x\n",
rop, statz, statc);
- nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
- nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
}
- nv_wr32(priv, 0x400108, 0x02000000);
+ nvkm_wr32(device, 0x400108, 0x02000000);
trap &= ~0x02000000;
}
if (trap) {
- nv_error(priv, "TRAP UNHANDLED 0x%08x\n", trap);
- nv_wr32(priv, 0x400108, trap);
+ nvkm_error(subdev, "TRAP UNHANDLED %08x\n", trap);
+ nvkm_wr32(device, 0x400108, trap);
}
}
static void
-gf100_gr_ctxctl_debug_unit(struct gf100_gr_priv *priv, u32 base)
+gf100_gr_ctxctl_debug_unit(struct gf100_gr *gr, u32 base)
{
- nv_error(priv, "%06x - done 0x%08x\n", base,
- nv_rd32(priv, base + 0x400));
- nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
- nv_rd32(priv, base + 0x800), nv_rd32(priv, base + 0x804),
- nv_rd32(priv, base + 0x808), nv_rd32(priv, base + 0x80c));
- nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
- nv_rd32(priv, base + 0x810), nv_rd32(priv, base + 0x814),
- nv_rd32(priv, base + 0x818), nv_rd32(priv, base + 0x81c));
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ nvkm_error(subdev, "%06x - done %08x\n", base,
+ nvkm_rd32(device, base + 0x400));
+ nvkm_error(subdev, "%06x - stat %08x %08x %08x %08x\n", base,
+ nvkm_rd32(device, base + 0x800),
+ nvkm_rd32(device, base + 0x804),
+ nvkm_rd32(device, base + 0x808),
+ nvkm_rd32(device, base + 0x80c));
+ nvkm_error(subdev, "%06x - stat %08x %08x %08x %08x\n", base,
+ nvkm_rd32(device, base + 0x810),
+ nvkm_rd32(device, base + 0x814),
+ nvkm_rd32(device, base + 0x818),
+ nvkm_rd32(device, base + 0x81c));
}
void
-gf100_gr_ctxctl_debug(struct gf100_gr_priv *priv)
+gf100_gr_ctxctl_debug(struct gf100_gr *gr)
{
- u32 gpcnr = nv_rd32(priv, 0x409604) & 0xffff;
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ u32 gpcnr = nvkm_rd32(device, 0x409604) & 0xffff;
u32 gpc;
- gf100_gr_ctxctl_debug_unit(priv, 0x409000);
+ gf100_gr_ctxctl_debug_unit(gr, 0x409000);
for (gpc = 0; gpc < gpcnr; gpc++)
- gf100_gr_ctxctl_debug_unit(priv, 0x502000 + (gpc * 0x8000));
+ gf100_gr_ctxctl_debug_unit(gr, 0x502000 + (gpc * 0x8000));
}
static void
-gf100_gr_ctxctl_isr(struct gf100_gr_priv *priv)
+gf100_gr_ctxctl_isr(struct gf100_gr *gr)
{
- u32 stat = nv_rd32(priv, 0x409c18);
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x409c18);
if (stat & 0x00000001) {
- u32 code = nv_rd32(priv, 0x409814);
+ u32 code = nvkm_rd32(device, 0x409814);
if (code == E_BAD_FWMTHD) {
- u32 class = nv_rd32(priv, 0x409808);
- u32 addr = nv_rd32(priv, 0x40980c);
+ u32 class = nvkm_rd32(device, 0x409808);
+ u32 addr = nvkm_rd32(device, 0x40980c);
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00003ffc);
- u32 data = nv_rd32(priv, 0x409810);
+ u32 data = nvkm_rd32(device, 0x409810);
- nv_error(priv, "FECS MTHD subc %d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- subc, class, mthd, data);
+ nvkm_error(subdev, "FECS MTHD subc %d class %04x "
+ "mthd %04x data %08x\n",
+ subc, class, mthd, data);
- nv_wr32(priv, 0x409c20, 0x00000001);
+ nvkm_wr32(device, 0x409c20, 0x00000001);
stat &= ~0x00000001;
} else {
- nv_error(priv, "FECS ucode error %d\n", code);
+ nvkm_error(subdev, "FECS ucode error %d\n", code);
}
}
if (stat & 0x00080000) {
- nv_error(priv, "FECS watchdog timeout\n");
- gf100_gr_ctxctl_debug(priv);
- nv_wr32(priv, 0x409c20, 0x00080000);
+ nvkm_error(subdev, "FECS watchdog timeout\n");
+ gf100_gr_ctxctl_debug(gr);
+ nvkm_wr32(device, 0x409c20, 0x00080000);
stat &= ~0x00080000;
}
if (stat) {
- nv_error(priv, "FECS 0x%08x\n", stat);
- gf100_gr_ctxctl_debug(priv);
- nv_wr32(priv, 0x409c20, stat);
+ nvkm_error(subdev, "FECS %08x\n", stat);
+ gf100_gr_ctxctl_debug(gr);
+ nvkm_wr32(device, 0x409c20, stat);
}
}
static void
-gf100_gr_intr(struct nvkm_subdev *subdev)
+gf100_gr_intr(struct nvkm_gr *base)
{
- struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
- struct nvkm_engine *engine = nv_engine(subdev);
- struct nvkm_object *engctx;
- struct nvkm_handle *handle;
- struct gf100_gr_priv *priv = (void *)subdev;
- u64 inst = nv_rd32(priv, 0x409b00) & 0x0fffffff;
- u32 stat = nv_rd32(priv, 0x400100);
- u32 addr = nv_rd32(priv, 0x400704);
+ struct gf100_gr *gr = gf100_gr(base);
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ u64 inst = nvkm_rd32(device, 0x409b00) & 0x0fffffff;
+ u32 stat = nvkm_rd32(device, 0x400100);
+ u32 addr = nvkm_rd32(device, 0x400704);
u32 mthd = (addr & 0x00003ffc);
u32 subc = (addr & 0x00070000) >> 16;
- u32 data = nv_rd32(priv, 0x400708);
- u32 code = nv_rd32(priv, 0x400110);
+ u32 data = nvkm_rd32(device, 0x400708);
+ u32 code = nvkm_rd32(device, 0x400110);
u32 class;
- int chid;
+ const char *name = "unknown";
+ int chid = -1;
- if (nv_device(priv)->card_type < NV_E0 || subc < 4)
- class = nv_rd32(priv, 0x404200 + (subc * 4));
+ chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
+ if (chan) {
+ name = chan->object.client->name;
+ chid = chan->chid;
+ }
+
+ if (device->card_type < NV_E0 || subc < 4)
+ class = nvkm_rd32(device, 0x404200 + (subc * 4));
else
class = 0x0000;
- engctx = nvkm_engctx_get(engine, inst);
- chid = pfifo->chid(pfifo, engctx);
-
if (stat & 0x00000001) {
/*
* notifier interrupt, only needed for cyclestats
* can be safely ignored
*/
- nv_wr32(priv, 0x400100, 0x00000001);
+ nvkm_wr32(device, 0x400100, 0x00000001);
stat &= ~0x00000001;
}
if (stat & 0x00000010) {
- handle = nvkm_handle_get_class(engctx, class);
- if (!handle || nv_call(handle->object, mthd, data)) {
- nv_error(priv,
- "ILLEGAL_MTHD ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst << 12, nvkm_client_name(engctx),
- subc, class, mthd, data);
+ if (!gf100_gr_mthd_sw(device, class, mthd, data)) {
+ nvkm_error(subdev, "ILLEGAL_MTHD ch %d [%010llx %s] "
+ "subc %d class %04x mthd %04x data %08x\n",
+ chid, inst << 12, name, subc,
+ class, mthd, data);
}
- nvkm_handle_put(handle);
- nv_wr32(priv, 0x400100, 0x00000010);
+ nvkm_wr32(device, 0x400100, 0x00000010);
stat &= ~0x00000010;
}
if (stat & 0x00000020) {
- nv_error(priv,
- "ILLEGAL_CLASS ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst << 12, nvkm_client_name(engctx), subc,
- class, mthd, data);
- nv_wr32(priv, 0x400100, 0x00000020);
+ nvkm_error(subdev, "ILLEGAL_CLASS ch %d [%010llx %s] "
+ "subc %d class %04x mthd %04x data %08x\n",
+ chid, inst << 12, name, subc, class, mthd, data);
+ nvkm_wr32(device, 0x400100, 0x00000020);
stat &= ~0x00000020;
}
if (stat & 0x00100000) {
- nv_error(priv, "DATA_ERROR [");
- nvkm_enum_print(nv50_data_error_names, code);
- pr_cont("] ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst << 12, nvkm_client_name(engctx), subc,
- class, mthd, data);
- nv_wr32(priv, 0x400100, 0x00100000);
+ const struct nvkm_enum *en =
+ nvkm_enum_find(nv50_data_error_names, code);
+ nvkm_error(subdev, "DATA_ERROR %08x [%s] ch %d [%010llx %s] "
+ "subc %d class %04x mthd %04x data %08x\n",
+ code, en ? en->name : "", chid, inst << 12,
+ name, subc, class, mthd, data);
+ nvkm_wr32(device, 0x400100, 0x00100000);
stat &= ~0x00100000;
}
if (stat & 0x00200000) {
- nv_error(priv, "TRAP ch %d [0x%010llx %s]\n", chid, inst << 12,
- nvkm_client_name(engctx));
- gf100_gr_trap_intr(priv);
- nv_wr32(priv, 0x400100, 0x00200000);
+ nvkm_error(subdev, "TRAP ch %d [%010llx %s]\n",
+ chid, inst << 12, name);
+ gf100_gr_trap_intr(gr);
+ nvkm_wr32(device, 0x400100, 0x00200000);
stat &= ~0x00200000;
}
if (stat & 0x00080000) {
- gf100_gr_ctxctl_isr(priv);
- nv_wr32(priv, 0x400100, 0x00080000);
+ gf100_gr_ctxctl_isr(gr);
+ nvkm_wr32(device, 0x400100, 0x00080000);
stat &= ~0x00080000;
}
if (stat) {
- nv_error(priv, "unknown stat 0x%08x\n", stat);
- nv_wr32(priv, 0x400100, stat);
+ nvkm_error(subdev, "intr %08x\n", stat);
+ nvkm_wr32(device, 0x400100, stat);
}
- nv_wr32(priv, 0x400500, 0x00010001);
- nvkm_engctx_put(engctx);
+ nvkm_wr32(device, 0x400500, 0x00010001);
+ nvkm_fifo_chan_put(device->fifo, flags, &chan);
}
void
-gf100_gr_init_fw(struct gf100_gr_priv *priv, u32 fuc_base,
+gf100_gr_init_fw(struct gf100_gr *gr, u32 fuc_base,
struct gf100_gr_fuc *code, struct gf100_gr_fuc *data)
{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
int i;
- nv_wr32(priv, fuc_base + 0x01c0, 0x01000000);
+ nvkm_wr32(device, fuc_base + 0x01c0, 0x01000000);
for (i = 0; i < data->size / 4; i++)
- nv_wr32(priv, fuc_base + 0x01c4, data->data[i]);
+ nvkm_wr32(device, fuc_base + 0x01c4, data->data[i]);
- nv_wr32(priv, fuc_base + 0x0180, 0x01000000);
+ nvkm_wr32(device, fuc_base + 0x0180, 0x01000000);
for (i = 0; i < code->size / 4; i++) {
if ((i & 0x3f) == 0)
- nv_wr32(priv, fuc_base + 0x0188, i >> 6);
- nv_wr32(priv, fuc_base + 0x0184, code->data[i]);
+ nvkm_wr32(device, fuc_base + 0x0188, i >> 6);
+ nvkm_wr32(device, fuc_base + 0x0184, code->data[i]);
}
/* code must be padded to 0x40 words */
for (; i & 0x3f; i++)
- nv_wr32(priv, fuc_base + 0x0184, 0);
+ nvkm_wr32(device, fuc_base + 0x0184, 0);
}
static void
-gf100_gr_init_csdata(struct gf100_gr_priv *priv,
+gf100_gr_init_csdata(struct gf100_gr *gr,
const struct gf100_gr_pack *pack,
u32 falcon, u32 starstar, u32 base)
{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
const struct gf100_gr_pack *iter;
const struct gf100_gr_init *init;
u32 addr = ~0, prev = ~0, xfer = 0;
u32 star, temp;
- nv_wr32(priv, falcon + 0x01c0, 0x02000000 + starstar);
- star = nv_rd32(priv, falcon + 0x01c4);
- temp = nv_rd32(priv, falcon + 0x01c4);
+ nvkm_wr32(device, falcon + 0x01c0, 0x02000000 + starstar);
+ star = nvkm_rd32(device, falcon + 0x01c4);
+ temp = nvkm_rd32(device, falcon + 0x01c4);
if (temp > star)
star = temp;
- nv_wr32(priv, falcon + 0x01c0, 0x01000000 + star);
+ nvkm_wr32(device, falcon + 0x01c0, 0x01000000 + star);
pack_for_each_init(init, iter, pack) {
u32 head = init->addr - base;
@@ -1218,7 +1313,7 @@ gf100_gr_init_csdata(struct gf100_gr_priv *priv,
if (head != prev + 4 || xfer >= 32) {
if (xfer) {
u32 data = ((--xfer << 26) | addr);
- nv_wr32(priv, falcon + 0x01c4, data);
+ nvkm_wr32(device, falcon + 0x01c4, data);
star += 4;
}
addr = head;
@@ -1230,157 +1325,166 @@ gf100_gr_init_csdata(struct gf100_gr_priv *priv,
}
}
- nv_wr32(priv, falcon + 0x01c4, (--xfer << 26) | addr);
- nv_wr32(priv, falcon + 0x01c0, 0x01000004 + starstar);
- nv_wr32(priv, falcon + 0x01c4, star + 4);
+ nvkm_wr32(device, falcon + 0x01c4, (--xfer << 26) | addr);
+ nvkm_wr32(device, falcon + 0x01c0, 0x01000004 + starstar);
+ nvkm_wr32(device, falcon + 0x01c4, star + 4);
}
int
-gf100_gr_init_ctxctl(struct gf100_gr_priv *priv)
+gf100_gr_init_ctxctl(struct gf100_gr *gr)
{
- struct gf100_gr_oclass *oclass = (void *)nv_object(priv)->oclass;
- struct gf100_grctx_oclass *cclass = (void *)nv_engine(priv)->cclass;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
int i;
- if (priv->firmware) {
+ if (gr->firmware) {
/* load fuc microcode */
- nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
- gf100_gr_init_fw(priv, 0x409000, &priv->fuc409c,
- &priv->fuc409d);
- gf100_gr_init_fw(priv, 0x41a000, &priv->fuc41ac,
- &priv->fuc41ad);
- nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
+ nvkm_mc_unk260(device->mc, 0);
+ gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c, &gr->fuc409d);
+ gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac, &gr->fuc41ad);
+ nvkm_mc_unk260(device->mc, 1);
/* start both of them running */
- nv_wr32(priv, 0x409840, 0xffffffff);
- nv_wr32(priv, 0x41a10c, 0x00000000);
- nv_wr32(priv, 0x40910c, 0x00000000);
- nv_wr32(priv, 0x41a100, 0x00000002);
- nv_wr32(priv, 0x409100, 0x00000002);
- if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
- nv_warn(priv, "0x409800 wait failed\n");
-
- nv_wr32(priv, 0x409840, 0xffffffff);
- nv_wr32(priv, 0x409500, 0x7fffffff);
- nv_wr32(priv, 0x409504, 0x00000021);
-
- nv_wr32(priv, 0x409840, 0xffffffff);
- nv_wr32(priv, 0x409500, 0x00000000);
- nv_wr32(priv, 0x409504, 0x00000010);
- if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
- nv_error(priv, "fuc09 req 0x10 timeout\n");
+ nvkm_wr32(device, 0x409840, 0xffffffff);
+ nvkm_wr32(device, 0x41a10c, 0x00000000);
+ nvkm_wr32(device, 0x40910c, 0x00000000);
+ nvkm_wr32(device, 0x41a100, 0x00000002);
+ nvkm_wr32(device, 0x409100, 0x00000002);
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x409800) & 0x00000001)
+ break;
+ ) < 0)
return -EBUSY;
- }
- priv->size = nv_rd32(priv, 0x409800);
- nv_wr32(priv, 0x409840, 0xffffffff);
- nv_wr32(priv, 0x409500, 0x00000000);
- nv_wr32(priv, 0x409504, 0x00000016);
- if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
- nv_error(priv, "fuc09 req 0x16 timeout\n");
+ nvkm_wr32(device, 0x409840, 0xffffffff);
+ nvkm_wr32(device, 0x409500, 0x7fffffff);
+ nvkm_wr32(device, 0x409504, 0x00000021);
+
+ nvkm_wr32(device, 0x409840, 0xffffffff);
+ nvkm_wr32(device, 0x409500, 0x00000000);
+ nvkm_wr32(device, 0x409504, 0x00000010);
+ if (nvkm_msec(device, 2000,
+ if ((gr->size = nvkm_rd32(device, 0x409800)))
+ break;
+ ) < 0)
return -EBUSY;
- }
- nv_wr32(priv, 0x409840, 0xffffffff);
- nv_wr32(priv, 0x409500, 0x00000000);
- nv_wr32(priv, 0x409504, 0x00000025);
- if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
- nv_error(priv, "fuc09 req 0x25 timeout\n");
+ nvkm_wr32(device, 0x409840, 0xffffffff);
+ nvkm_wr32(device, 0x409500, 0x00000000);
+ nvkm_wr32(device, 0x409504, 0x00000016);
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x409800))
+ break;
+ ) < 0)
+ return -EBUSY;
+
+ nvkm_wr32(device, 0x409840, 0xffffffff);
+ nvkm_wr32(device, 0x409500, 0x00000000);
+ nvkm_wr32(device, 0x409504, 0x00000025);
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x409800))
+ break;
+ ) < 0)
return -EBUSY;
- }
- if (nv_device(priv)->chipset >= 0xe0) {
- nv_wr32(priv, 0x409800, 0x00000000);
- nv_wr32(priv, 0x409500, 0x00000001);
- nv_wr32(priv, 0x409504, 0x00000030);
- if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
- nv_error(priv, "fuc09 req 0x30 timeout\n");
+ if (device->chipset >= 0xe0) {
+ nvkm_wr32(device, 0x409800, 0x00000000);
+ nvkm_wr32(device, 0x409500, 0x00000001);
+ nvkm_wr32(device, 0x409504, 0x00000030);
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x409800))
+ break;
+ ) < 0)
return -EBUSY;
- }
- nv_wr32(priv, 0x409810, 0xb00095c8);
- nv_wr32(priv, 0x409800, 0x00000000);
- nv_wr32(priv, 0x409500, 0x00000001);
- nv_wr32(priv, 0x409504, 0x00000031);
- if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
- nv_error(priv, "fuc09 req 0x31 timeout\n");
+ nvkm_wr32(device, 0x409810, 0xb00095c8);
+ nvkm_wr32(device, 0x409800, 0x00000000);
+ nvkm_wr32(device, 0x409500, 0x00000001);
+ nvkm_wr32(device, 0x409504, 0x00000031);
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x409800))
+ break;
+ ) < 0)
return -EBUSY;
- }
- nv_wr32(priv, 0x409810, 0x00080420);
- nv_wr32(priv, 0x409800, 0x00000000);
- nv_wr32(priv, 0x409500, 0x00000001);
- nv_wr32(priv, 0x409504, 0x00000032);
- if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
- nv_error(priv, "fuc09 req 0x32 timeout\n");
+ nvkm_wr32(device, 0x409810, 0x00080420);
+ nvkm_wr32(device, 0x409800, 0x00000000);
+ nvkm_wr32(device, 0x409500, 0x00000001);
+ nvkm_wr32(device, 0x409504, 0x00000032);
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x409800))
+ break;
+ ) < 0)
return -EBUSY;
- }
- nv_wr32(priv, 0x409614, 0x00000070);
- nv_wr32(priv, 0x409614, 0x00000770);
- nv_wr32(priv, 0x40802c, 0x00000001);
+ nvkm_wr32(device, 0x409614, 0x00000070);
+ nvkm_wr32(device, 0x409614, 0x00000770);
+ nvkm_wr32(device, 0x40802c, 0x00000001);
}
- if (priv->data == NULL) {
- int ret = gf100_grctx_generate(priv);
+ if (gr->data == NULL) {
+ int ret = gf100_grctx_generate(gr);
if (ret) {
- nv_error(priv, "failed to construct context\n");
+ nvkm_error(subdev, "failed to construct context\n");
return ret;
}
}
return 0;
} else
- if (!oclass->fecs.ucode) {
+ if (!gr->func->fecs.ucode) {
return -ENOSYS;
}
/* load HUB microcode */
- nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
- nv_wr32(priv, 0x4091c0, 0x01000000);
- for (i = 0; i < oclass->fecs.ucode->data.size / 4; i++)
- nv_wr32(priv, 0x4091c4, oclass->fecs.ucode->data.data[i]);
+ nvkm_mc_unk260(device->mc, 0);
+ nvkm_wr32(device, 0x4091c0, 0x01000000);
+ for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++)
+ nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]);
- nv_wr32(priv, 0x409180, 0x01000000);
- for (i = 0; i < oclass->fecs.ucode->code.size / 4; i++) {
+ nvkm_wr32(device, 0x409180, 0x01000000);
+ for (i = 0; i < gr->func->fecs.ucode->code.size / 4; i++) {
if ((i & 0x3f) == 0)
- nv_wr32(priv, 0x409188, i >> 6);
- nv_wr32(priv, 0x409184, oclass->fecs.ucode->code.data[i]);
+ nvkm_wr32(device, 0x409188, i >> 6);
+ nvkm_wr32(device, 0x409184, gr->func->fecs.ucode->code.data[i]);
}
/* load GPC microcode */
- nv_wr32(priv, 0x41a1c0, 0x01000000);
- for (i = 0; i < oclass->gpccs.ucode->data.size / 4; i++)
- nv_wr32(priv, 0x41a1c4, oclass->gpccs.ucode->data.data[i]);
+ nvkm_wr32(device, 0x41a1c0, 0x01000000);
+ for (i = 0; i < gr->func->gpccs.ucode->data.size / 4; i++)
+ nvkm_wr32(device, 0x41a1c4, gr->func->gpccs.ucode->data.data[i]);
- nv_wr32(priv, 0x41a180, 0x01000000);
- for (i = 0; i < oclass->gpccs.ucode->code.size / 4; i++) {
+ nvkm_wr32(device, 0x41a180, 0x01000000);
+ for (i = 0; i < gr->func->gpccs.ucode->code.size / 4; i++) {
if ((i & 0x3f) == 0)
- nv_wr32(priv, 0x41a188, i >> 6);
- nv_wr32(priv, 0x41a184, oclass->gpccs.ucode->code.data[i]);
+ nvkm_wr32(device, 0x41a188, i >> 6);
+ nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]);
}
- nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
+ nvkm_mc_unk260(device->mc, 1);
/* load register lists */
- gf100_gr_init_csdata(priv, cclass->hub, 0x409000, 0x000, 0x000000);
- gf100_gr_init_csdata(priv, cclass->gpc, 0x41a000, 0x000, 0x418000);
- gf100_gr_init_csdata(priv, cclass->tpc, 0x41a000, 0x004, 0x419800);
- gf100_gr_init_csdata(priv, cclass->ppc, 0x41a000, 0x008, 0x41be00);
+ gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000);
+ gf100_gr_init_csdata(gr, grctx->gpc, 0x41a000, 0x000, 0x418000);
+ gf100_gr_init_csdata(gr, grctx->tpc, 0x41a000, 0x004, 0x419800);
+ gf100_gr_init_csdata(gr, grctx->ppc, 0x41a000, 0x008, 0x41be00);
/* start HUB ucode running, it'll init the GPCs */
- nv_wr32(priv, 0x40910c, 0x00000000);
- nv_wr32(priv, 0x409100, 0x00000002);
- if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
- nv_error(priv, "HUB_INIT timed out\n");
- gf100_gr_ctxctl_debug(priv);
+ nvkm_wr32(device, 0x40910c, 0x00000000);
+ nvkm_wr32(device, 0x409100, 0x00000002);
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x409800) & 0x80000000)
+ break;
+ ) < 0) {
+ gf100_gr_ctxctl_debug(gr);
return -EBUSY;
}
- priv->size = nv_rd32(priv, 0x409804);
- if (priv->data == NULL) {
- int ret = gf100_grctx_generate(priv);
+ gr->size = nvkm_rd32(device, 0x409804);
+ if (gr->data == NULL) {
+ int ret = gf100_grctx_generate(gr);
if (ret) {
- nv_error(priv, "failed to construct context\n");
+ nvkm_error(subdev, "failed to construct context\n");
return ret;
}
}
@@ -1388,143 +1492,160 @@ gf100_gr_init_ctxctl(struct gf100_gr_priv *priv)
return 0;
}
-int
-gf100_gr_init(struct nvkm_object *object)
+static int
+gf100_gr_oneinit(struct nvkm_gr *base)
{
- struct gf100_gr_oclass *oclass = (void *)object->oclass;
- struct gf100_gr_priv *priv = (void *)object;
- const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
- u32 data[TPC_MAX / 8] = {};
- u8 tpcnr[GPC_MAX];
- int gpc, tpc, rop;
- int ret, i;
+ struct gf100_gr *gr = gf100_gr(base);
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ int ret, i, j;
+
+ nvkm_pmu_pgob(device->pmu, false);
- ret = nvkm_gr_init(&priv->base);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false,
+ &gr->unk4188b4);
if (ret)
return ret;
- nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
- nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
- nv_wr32(priv, GPC_BCAST(0x0888), 0x00000000);
- nv_wr32(priv, GPC_BCAST(0x088c), 0x00000000);
- nv_wr32(priv, GPC_BCAST(0x0890), 0x00000000);
- nv_wr32(priv, GPC_BCAST(0x0894), 0x00000000);
- nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
- nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
-
- gf100_gr_mmio(priv, oclass->mmio);
-
- memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
- for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
- do {
- gpc = (gpc + 1) % priv->gpc_nr;
- } while (!tpcnr[gpc]);
- tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
-
- data[i / 8] |= tpc << ((i % 8) * 4);
- }
-
- nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
- nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
- nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
- nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
-
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
- priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
- priv->tpc_total);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
- }
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false,
+ &gr->unk4188b8);
+ if (ret)
+ return ret;
- if (nv_device(priv)->chipset != 0xd7)
- nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
- else
- nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
-
- nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
-
- nv_wr32(priv, 0x400500, 0x00010001);
-
- nv_wr32(priv, 0x400100, 0xffffffff);
- nv_wr32(priv, 0x40013c, 0xffffffff);
-
- nv_wr32(priv, 0x409c24, 0x000f0000);
- nv_wr32(priv, 0x404000, 0xc0000000);
- nv_wr32(priv, 0x404600, 0xc0000000);
- nv_wr32(priv, 0x408030, 0xc0000000);
- nv_wr32(priv, 0x40601c, 0xc0000000);
- nv_wr32(priv, 0x404490, 0xc0000000);
- nv_wr32(priv, 0x406018, 0xc0000000);
- nv_wr32(priv, 0x405840, 0xc0000000);
- nv_wr32(priv, 0x405844, 0x00ffffff);
- nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
- nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
-
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
- nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
- for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
+ nvkm_kmap(gr->unk4188b4);
+ for (i = 0; i < 0x1000; i += 4)
+ nvkm_wo32(gr->unk4188b4, i, 0x00000010);
+ nvkm_done(gr->unk4188b4);
+
+ nvkm_kmap(gr->unk4188b8);
+ for (i = 0; i < 0x1000; i += 4)
+ nvkm_wo32(gr->unk4188b8, i, 0x00000010);
+ nvkm_done(gr->unk4188b8);
+
+ gr->rop_nr = (nvkm_rd32(device, 0x409604) & 0x001f0000) >> 16;
+ gr->gpc_nr = nvkm_rd32(device, 0x409604) & 0x0000001f;
+ for (i = 0; i < gr->gpc_nr; i++) {
+ gr->tpc_nr[i] = nvkm_rd32(device, GPC_UNIT(i, 0x2608));
+ gr->tpc_total += gr->tpc_nr[i];
+ gr->ppc_nr[i] = gr->func->ppc_nr;
+ for (j = 0; j < gr->ppc_nr[i]; j++) {
+ u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4)));
+ gr->ppc_tpc_nr[i][j] = hweight8(mask);
}
- nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
- nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
}
- for (rop = 0; rop < priv->rop_nr; rop++) {
- nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
- nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
- nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
- nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+ /*XXX: these need figuring out... though it might not even matter */
+ switch (device->chipset) {
+ case 0xc0:
+ if (gr->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
+ gr->magic_not_rop_nr = 0x07;
+ } else
+ if (gr->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
+ gr->magic_not_rop_nr = 0x05;
+ } else
+ if (gr->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
+ gr->magic_not_rop_nr = 0x06;
+ }
+ break;
+ case 0xc3: /* 450, 4/0/0/0, 2 */
+ gr->magic_not_rop_nr = 0x03;
+ break;
+ case 0xc4: /* 460, 3/4/0/0, 4 */
+ gr->magic_not_rop_nr = 0x01;
+ break;
+ case 0xc1: /* 2/0/0/0, 1 */
+ gr->magic_not_rop_nr = 0x01;
+ break;
+ case 0xc8: /* 4/4/3/4, 5 */
+ gr->magic_not_rop_nr = 0x06;
+ break;
+ case 0xce: /* 4/4/0/0, 4 */
+ gr->magic_not_rop_nr = 0x03;
+ break;
+ case 0xcf: /* 4/0/0/0, 3 */
+ gr->magic_not_rop_nr = 0x03;
+ break;
+ case 0xd7:
+ case 0xd9: /* 1/0/0/0, 1 */
+ case 0xea: /* gk20a */
+ case 0x12b: /* gm20b */
+ gr->magic_not_rop_nr = 0x01;
+ break;
}
- nv_wr32(priv, 0x400108, 0xffffffff);
- nv_wr32(priv, 0x400138, 0xffffffff);
- nv_wr32(priv, 0x400118, 0xffffffff);
- nv_wr32(priv, 0x400130, 0xffffffff);
- nv_wr32(priv, 0x40011c, 0xffffffff);
- nv_wr32(priv, 0x400134, 0xffffffff);
-
- nv_wr32(priv, 0x400054, 0x34ce3464);
-
- gf100_gr_zbc_init(priv);
+ return 0;
+}
- return gf100_gr_init_ctxctl(priv);
+int
+gf100_gr_init_(struct nvkm_gr *base)
+{
+ struct gf100_gr *gr = gf100_gr(base);
+ nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false);
+ return gr->func->init(gr);
}
-static void
+void
gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc)
{
kfree(fuc->data);
fuc->data = NULL;
}
+void *
+gf100_gr_dtor(struct nvkm_gr *base)
+{
+ struct gf100_gr *gr = gf100_gr(base);
+
+ if (gr->func->dtor)
+ gr->func->dtor(gr);
+ kfree(gr->data);
+
+ gf100_gr_dtor_fw(&gr->fuc409c);
+ gf100_gr_dtor_fw(&gr->fuc409d);
+ gf100_gr_dtor_fw(&gr->fuc41ac);
+ gf100_gr_dtor_fw(&gr->fuc41ad);
+
+ nvkm_memory_del(&gr->unk4188b8);
+ nvkm_memory_del(&gr->unk4188b4);
+ return gr;
+}
+
+static const struct nvkm_gr_func
+gf100_gr_ = {
+ .dtor = gf100_gr_dtor,
+ .oneinit = gf100_gr_oneinit,
+ .init = gf100_gr_init_,
+ .intr = gf100_gr_intr,
+ .units = gf100_gr_units,
+ .chan_new = gf100_gr_chan_new,
+ .object_get = gf100_gr_object_get,
+};
+
int
-gf100_gr_ctor_fw(struct gf100_gr_priv *priv, const char *fwname,
+gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
struct gf100_gr_fuc *fuc)
{
- struct nvkm_device *device = nv_device(priv);
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
const struct firmware *fw;
- char f[32];
+ char f[64];
+ char cname[16];
int ret;
+ int i;
- snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
- ret = request_firmware(&fw, f, nv_device_base(device));
+ /* Convert device name to lowercase */
+ strncpy(cname, device->chip->name, sizeof(cname));
+ cname[sizeof(cname) - 1] = '\0';
+ i = strlen(cname);
+ while (i) {
+ --i;
+ cname[i] = tolower(cname[i]);
+ }
+
+ snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
+ ret = request_firmware(&fw, f, device->dev);
if (ret) {
- snprintf(f, sizeof(f), "nouveau/%s", fwname);
- ret = request_firmware(&fw, f, nv_device_base(device));
- if (ret) {
- nv_error(priv, "failed to load %s\n", fwname);
- return ret;
- }
+ nvkm_error(subdev, "failed to load %s\n", fwname);
+ return ret;
}
fuc->size = fw->size;
@@ -1533,126 +1654,150 @@ gf100_gr_ctor_fw(struct gf100_gr_priv *priv, const char *fwname,
return (fuc->data != NULL) ? 0 : -ENOMEM;
}
-void
-gf100_gr_dtor(struct nvkm_object *object)
+int
+gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
+ int index, struct gf100_gr *gr)
{
- struct gf100_gr_priv *priv = (void *)object;
+ int ret;
- kfree(priv->data);
+ gr->func = func;
+ gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
+ func->fecs.ucode == NULL);
- gf100_gr_dtor_fw(&priv->fuc409c);
- gf100_gr_dtor_fw(&priv->fuc409d);
- gf100_gr_dtor_fw(&priv->fuc41ac);
- gf100_gr_dtor_fw(&priv->fuc41ad);
+ ret = nvkm_gr_ctor(&gf100_gr_, device, index, 0x08001000,
+ gr->firmware || func->fecs.ucode != NULL,
+ &gr->base);
+ if (ret)
+ return ret;
- nvkm_gpuobj_ref(NULL, &priv->unk4188b8);
- nvkm_gpuobj_ref(NULL, &priv->unk4188b4);
+ if (gr->firmware) {
+ nvkm_info(&gr->base.engine.subdev, "using external firmware\n");
+ if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) ||
+ gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) ||
+ gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) ||
+ gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad))
+ return -ENODEV;
+ }
- nvkm_gr_destroy(&priv->base);
+ return 0;
}
int
-gf100_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *bclass, void *data, u32 size,
- struct nvkm_object **pobject)
+gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
+ int index, struct nvkm_gr **pgr)
{
- struct gf100_gr_oclass *oclass = (void *)bclass;
- struct nvkm_device *device = nv_device(parent);
- struct gf100_gr_priv *priv;
- bool use_ext_fw, enable;
- int ret, i, j;
+ struct gf100_gr *gr;
+ if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+ return -ENOMEM;
+ *pgr = &gr->base;
+ return gf100_gr_ctor(func, device, index, gr);
+}
- use_ext_fw = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
- oclass->fecs.ucode == NULL);
- enable = use_ext_fw || oclass->fecs.ucode != NULL;
+int
+gf100_gr_init(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+ u32 data[TPC_MAX / 8] = {};
+ u8 tpcnr[GPC_MAX];
+ int gpc, tpc, rop;
+ int i;
- ret = nvkm_gr_create(parent, engine, bclass, enable, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
+ nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000);
+ nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000);
+ nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
+ nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
+ nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
+ nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
+ nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
- nv_subdev(priv)->unit = 0x08001000;
- nv_subdev(priv)->intr = gf100_gr_intr;
+ gf100_gr_mmio(gr, gr->func->mmio);
- priv->base.units = gf100_gr_units;
+ memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+ for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
+ do {
+ gpc = (gpc + 1) % gr->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
- if (use_ext_fw) {
- nv_info(priv, "using external firmware\n");
- if (gf100_gr_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
- gf100_gr_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
- gf100_gr_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
- gf100_gr_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
- return -ENODEV;
- priv->firmware = true;
+ data[i / 8] |= tpc << ((i % 8) * 4);
}
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
- &priv->unk4188b4);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
- &priv->unk4188b8);
- if (ret)
- return ret;
+ nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+ nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+ nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+ nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
- for (i = 0; i < 0x1000; i += 4) {
- nv_wo32(priv->unk4188b4, i, 0x00000010);
- nv_wo32(priv->unk4188b8, i, 0x00000010);
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+ gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+ gr->tpc_total);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
}
- priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16;
- priv->gpc_nr = nv_rd32(priv, 0x409604) & 0x0000001f;
- for (i = 0; i < priv->gpc_nr; i++) {
- priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608));
- priv->tpc_total += priv->tpc_nr[i];
- priv->ppc_nr[i] = oclass->ppc_nr;
- for (j = 0; j < priv->ppc_nr[i]; j++) {
- u8 mask = nv_rd32(priv, GPC_UNIT(i, 0x0c30 + (j * 4)));
- priv->ppc_tpc_nr[i][j] = hweight8(mask);
+ if (device->chipset != 0xd7)
+ nvkm_wr32(device, GPC_BCAST(0x1bd4), magicgpc918);
+ else
+ nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+
+ nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+
+ nvkm_wr32(device, 0x400500, 0x00010001);
+
+ nvkm_wr32(device, 0x400100, 0xffffffff);
+ nvkm_wr32(device, 0x40013c, 0xffffffff);
+
+ nvkm_wr32(device, 0x409c24, 0x000f0000);
+ nvkm_wr32(device, 0x404000, 0xc0000000);
+ nvkm_wr32(device, 0x404600, 0xc0000000);
+ nvkm_wr32(device, 0x408030, 0xc0000000);
+ nvkm_wr32(device, 0x40601c, 0xc0000000);
+ nvkm_wr32(device, 0x404490, 0xc0000000);
+ nvkm_wr32(device, 0x406018, 0xc0000000);
+ nvkm_wr32(device, 0x405840, 0xc0000000);
+ nvkm_wr32(device, 0x405844, 0x00ffffff);
+ nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+ nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
}
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
}
- /*XXX: these need figuring out... though it might not even matter */
- switch (nv_device(priv)->chipset) {
- case 0xc0:
- if (priv->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
- priv->magic_not_rop_nr = 0x07;
- } else
- if (priv->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
- priv->magic_not_rop_nr = 0x05;
- } else
- if (priv->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
- priv->magic_not_rop_nr = 0x06;
- }
- break;
- case 0xc3: /* 450, 4/0/0/0, 2 */
- priv->magic_not_rop_nr = 0x03;
- break;
- case 0xc4: /* 460, 3/4/0/0, 4 */
- priv->magic_not_rop_nr = 0x01;
- break;
- case 0xc1: /* 2/0/0/0, 1 */
- priv->magic_not_rop_nr = 0x01;
- break;
- case 0xc8: /* 4/4/3/4, 5 */
- priv->magic_not_rop_nr = 0x06;
- break;
- case 0xce: /* 4/4/0/0, 4 */
- priv->magic_not_rop_nr = 0x03;
- break;
- case 0xcf: /* 4/0/0/0, 3 */
- priv->magic_not_rop_nr = 0x03;
- break;
- case 0xd7:
- case 0xd9: /* 1/0/0/0, 1 */
- priv->magic_not_rop_nr = 0x01;
- break;
+ for (rop = 0; rop < gr->rop_nr; rop++) {
+ nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
}
- nv_engine(priv)->cclass = *oclass->cclass;
- nv_engine(priv)->sclass = oclass->sclass;
- return 0;
+ nvkm_wr32(device, 0x400108, 0xffffffff);
+ nvkm_wr32(device, 0x400138, 0xffffffff);
+ nvkm_wr32(device, 0x400118, 0xffffffff);
+ nvkm_wr32(device, 0x400130, 0xffffffff);
+ nvkm_wr32(device, 0x40011c, 0xffffffff);
+ nvkm_wr32(device, 0x400134, 0xffffffff);
+
+ nvkm_wr32(device, 0x400054, 0x34ce3464);
+
+ gf100_gr_zbc_init(gr);
+
+ return gf100_gr_init_ctxctl(gr);
}
#include "fuc/hubgf100.fuc3.h"
@@ -1675,18 +1820,24 @@ gf100_gr_gpccs_ucode = {
.data.size = sizeof(gf100_grgpc_data),
};
-struct nvkm_oclass *
-gf100_gr_oclass = &(struct gf100_gr_oclass) {
- .base.handle = NV_ENGINE(GR, 0xc0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_ctor,
- .dtor = gf100_gr_dtor,
- .init = gf100_gr_init,
- .fini = _nvkm_gr_fini,
- },
- .cclass = &gf100_grctx_oclass,
- .sclass = gf100_gr_sclass,
+static const struct gf100_gr_func
+gf100_gr = {
+ .init = gf100_gr_init,
.mmio = gf100_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
-}.base;
+ .grctx = &gf100_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+ { -1, -1, FERMI_A, &gf100_fermi },
+ { -1, -1, FERMI_COMPUTE_A },
+ {}
+ }
+};
+
+int
+gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(&gf100_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 8af1a89eda84..4611961b1187 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -21,11 +21,14 @@
*
* Authors: Ben Skeggs
*/
-#ifndef __NVC0_GR_H__
-#define __NVC0_GR_H__
-#include <engine/gr.h>
+#ifndef __GF100_GR_H__
+#define __GF100_GR_H__
+#define gf100_gr(p) container_of((p), struct gf100_gr, base)
+#include "priv.h"
+#include <core/gpuobj.h>
#include <subdev/ltc.h>
+#include <subdev/mmu.h>
#define GPC_MAX 32
#define TPC_MAX (GPC_MAX * 8)
@@ -67,7 +70,8 @@ struct gf100_gr_zbc_depth {
u32 l2;
};
-struct gf100_gr_priv {
+struct gf100_gr {
+ const struct gf100_gr_func *func;
struct nvkm_gr base;
struct gf100_gr_fuc fuc409c;
@@ -76,6 +80,15 @@ struct gf100_gr_priv {
struct gf100_gr_fuc fuc41ad;
bool firmware;
+ /*
+ * Used if the register packs are loaded from NVIDIA fw instead of
+ * using hardcoded arrays.
+ */
+ struct gf100_gr_pack *fuc_sw_nonctx;
+ struct gf100_gr_pack *fuc_sw_ctx;
+ struct gf100_gr_pack *fuc_bundle;
+ struct gf100_gr_pack *fuc_method;
+
struct gf100_gr_zbc_color zbc_color[NVKM_LTC_MAX_ZBC_CNT];
struct gf100_gr_zbc_depth zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
@@ -86,8 +99,8 @@ struct gf100_gr_priv {
u8 ppc_nr[GPC_MAX];
u8 ppc_tpc_nr[GPC_MAX][4];
- struct nvkm_gpuobj *unk4188b4;
- struct nvkm_gpuobj *unk4188b8;
+ struct nvkm_memory *unk4188b4;
+ struct nvkm_memory *unk4188b8;
struct gf100_gr_data mmio_data[4];
struct gf100_gr_mmio mmio_list[4096/8];
@@ -97,48 +110,65 @@ struct gf100_gr_priv {
u8 magic_not_rop_nr;
};
+int gf100_gr_ctor(const struct gf100_gr_func *, struct nvkm_device *,
+ int, struct gf100_gr *);
+int gf100_gr_new_(const struct gf100_gr_func *, struct nvkm_device *,
+ int, struct nvkm_gr **);
+void *gf100_gr_dtor(struct nvkm_gr *);
+
+struct gf100_gr_func {
+ void (*dtor)(struct gf100_gr *);
+ int (*init)(struct gf100_gr *);
+ void (*init_gpc_mmu)(struct gf100_gr *);
+ void (*set_hww_esr_report_mask)(struct gf100_gr *);
+ const struct gf100_gr_pack *mmio;
+ struct {
+ struct gf100_gr_ucode *ucode;
+ } fecs;
+ struct {
+ struct gf100_gr_ucode *ucode;
+ } gpccs;
+ int ppc_nr;
+ const struct gf100_grctx_func *grctx;
+ struct nvkm_sclass sclass[];
+};
+
+int gf100_gr_init(struct gf100_gr *);
+
+int gk104_gr_init(struct gf100_gr *);
+
+int gk20a_gr_new_(const struct gf100_gr_func *, struct nvkm_device *,
+ int, struct nvkm_gr **);
+void gk20a_gr_dtor(struct gf100_gr *);
+int gk20a_gr_init(struct gf100_gr *);
+
+int gm204_gr_init(struct gf100_gr *);
+
+#define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
+
struct gf100_gr_chan {
- struct nvkm_gr_chan base;
+ struct nvkm_object object;
+ struct gf100_gr *gr;
- struct nvkm_gpuobj *mmio;
+ struct nvkm_memory *mmio;
struct nvkm_vma mmio_vma;
int mmio_nr;
+
struct {
- struct nvkm_gpuobj *mem;
+ struct nvkm_memory *mem;
struct nvkm_vma vma;
} data[4];
};
-int gf100_gr_context_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-void gf100_gr_context_dtor(struct nvkm_object *);
-
-void gf100_gr_ctxctl_debug(struct gf100_gr_priv *);
+void gf100_gr_ctxctl_debug(struct gf100_gr *);
+void gf100_gr_dtor_fw(struct gf100_gr_fuc *);
+int gf100_gr_ctor_fw(struct gf100_gr *, const char *,
+ struct gf100_gr_fuc *);
u64 gf100_gr_units(struct nvkm_gr *);
-int gf100_gr_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *data, u32 size,
- struct nvkm_object **);
-void gf100_gr_dtor(struct nvkm_object *);
-int gf100_gr_init(struct nvkm_object *);
-void gf100_gr_zbc_init(struct gf100_gr_priv *);
-
-int gk104_gr_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *data, u32 size,
- struct nvkm_object **);
-int gk104_gr_init(struct nvkm_object *);
-
-int gm204_gr_init(struct nvkm_object *);
+void gf100_gr_zbc_init(struct gf100_gr *);
-extern struct nvkm_ofuncs gf100_fermi_ofuncs;
-
-extern struct nvkm_oclass gf100_gr_sclass[];
-extern struct nvkm_omthds gf100_gr_9097_omthds[];
-extern struct nvkm_omthds gf100_gr_90c0_omthds[];
-extern struct nvkm_oclass gf110_gr_sclass[];
-extern struct nvkm_oclass gk110_gr_sclass[];
-extern struct nvkm_oclass gm204_gr_sclass[];
+extern const struct nvkm_object_func gf100_fermi;
struct gf100_gr_init {
u32 addr;
@@ -167,24 +197,11 @@ extern struct gf100_gr_ucode gf100_gr_gpccs_ucode;
extern struct gf100_gr_ucode gk110_gr_fecs_ucode;
extern struct gf100_gr_ucode gk110_gr_gpccs_ucode;
-struct gf100_gr_oclass {
- struct nvkm_oclass base;
- struct nvkm_oclass **cclass;
- struct nvkm_oclass *sclass;
- const struct gf100_gr_pack *mmio;
- struct {
- struct gf100_gr_ucode *ucode;
- } fecs;
- struct {
- struct gf100_gr_ucode *ucode;
- } gpccs;
- int ppc_nr;
-};
-
-void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *);
-void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
-void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
-int gf100_gr_init_ctxctl(struct gf100_gr_priv *);
+int gf100_gr_wait_idle(struct gf100_gr *);
+void gf100_gr_mmio(struct gf100_gr *, const struct gf100_gr_pack *);
+void gf100_gr_icmd(struct gf100_gr *, const struct gf100_gr_pack *);
+void gf100_gr_mthd(struct gf100_gr *, const struct gf100_gr_pack *);
+int gf100_gr_init_ctxctl(struct gf100_gr *);
/* register init value lists */
@@ -260,7 +277,7 @@ extern const struct gf100_gr_init gm107_gr_init_tex_0[];
extern const struct gf100_gr_init gm107_gr_init_l1c_0[];
extern const struct gf100_gr_init gm107_gr_init_wwdx_0[];
extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
-void gm107_gr_init_bios(struct gf100_gr_priv *);
+void gm107_gr_init_bios(struct gf100_gr *);
extern const struct gf100_gr_pack gm204_gr_pack_mmio[];
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 20d3b85db3b5..8f253e0a22f4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -24,6 +24,8 @@
#include "gf100.h"
#include "ctxgf100.h"
+#include <nvif/class.h>
+
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@@ -110,18 +112,24 @@ gf104_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-struct nvkm_oclass *
-gf104_gr_oclass = &(struct gf100_gr_oclass) {
- .base.handle = NV_ENGINE(GR, 0xc3),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_ctor,
- .dtor = gf100_gr_dtor,
- .init = gf100_gr_init,
- .fini = _nvkm_gr_fini,
- },
- .cclass = &gf104_grctx_oclass,
- .sclass = gf100_gr_sclass,
+static const struct gf100_gr_func
+gf104_gr = {
+ .init = gf100_gr_init,
.mmio = gf104_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
-}.base;
+ .grctx = &gf104_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+ { -1, -1, FERMI_A, &gf100_fermi },
+ { -1, -1, FERMI_COMPUTE_A },
+ {}
+ }
+};
+
+int
+gf104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(&gf104_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 8df73421c78c..815a5aafa245 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -27,20 +27,6 @@
#include <nvif/class.h>
/*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf108_gr_sclass[] = {
- { FERMI_TWOD_A, &nvkm_object_ofuncs },
- { FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
- { FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
- { FERMI_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
- { FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
- {}
-};
-
-/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@@ -117,18 +103,25 @@ gf108_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-struct nvkm_oclass *
-gf108_gr_oclass = &(struct gf100_gr_oclass) {
- .base.handle = NV_ENGINE(GR, 0xc1),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_ctor,
- .dtor = gf100_gr_dtor,
- .init = gf100_gr_init,
- .fini = _nvkm_gr_fini,
- },
- .cclass = &gf108_grctx_oclass,
- .sclass = gf108_gr_sclass,
+static const struct gf100_gr_func
+gf108_gr = {
+ .init = gf100_gr_init,
.mmio = gf108_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
-}.base;
+ .grctx = &gf108_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+ { -1, -1, FERMI_A, &gf100_fermi },
+ { -1, -1, FERMI_B, &gf100_fermi },
+ { -1, -1, FERMI_COMPUTE_A },
+ {}
+ }
+};
+
+int
+gf108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(&gf108_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index ef76e2dd1d31..d13187409d68 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -27,21 +27,6 @@
#include <nvif/class.h>
/*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-struct nvkm_oclass
-gf110_gr_sclass[] = {
- { FERMI_TWOD_A, &nvkm_object_ofuncs },
- { FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
- { FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
- { FERMI_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
- { FERMI_C, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
- { FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
- {}
-};
-
-/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@@ -99,18 +84,26 @@ gf110_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-struct nvkm_oclass *
-gf110_gr_oclass = &(struct gf100_gr_oclass) {
- .base.handle = NV_ENGINE(GR, 0xc8),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_ctor,
- .dtor = gf100_gr_dtor,
- .init = gf100_gr_init,
- .fini = _nvkm_gr_fini,
- },
- .cclass = &gf110_grctx_oclass,
- .sclass = gf110_gr_sclass,
+static const struct gf100_gr_func
+gf110_gr = {
+ .init = gf100_gr_init,
.mmio = gf110_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
-}.base;
+ .grctx = &gf110_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+ { -1, -1, FERMI_A, &gf100_fermi },
+ { -1, -1, FERMI_B, &gf100_fermi },
+ { -1, -1, FERMI_C, &gf100_fermi },
+ { -1, -1, FERMI_COMPUTE_A },
+ {}
+ }
+};
+
+int
+gf110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(&gf110_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 871ac5f806f6..28483d8bf3d2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -24,6 +24,8 @@
#include "gf100.h"
#include "ctxgf100.h"
+#include <nvif/class.h>
+
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@@ -118,19 +120,27 @@ gf117_gr_gpccs_ucode = {
.data.size = sizeof(gf117_grgpc_data),
};
-struct nvkm_oclass *
-gf117_gr_oclass = &(struct gf100_gr_oclass) {
- .base.handle = NV_ENGINE(GR, 0xd7),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_ctor,
- .dtor = gf100_gr_dtor,
- .init = gf100_gr_init,
- .fini = _nvkm_gr_fini,
- },
- .cclass = &gf117_grctx_oclass,
- .sclass = gf110_gr_sclass,
+static const struct gf100_gr_func
+gf117_gr = {
+ .init = gf100_gr_init,
.mmio = gf117_gr_pack_mmio,
.fecs.ucode = &gf117_gr_fecs_ucode,
.gpccs.ucode = &gf117_gr_gpccs_ucode,
.ppc_nr = 1,
-}.base;
+ .grctx = &gf117_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+ { -1, -1, FERMI_A, &gf100_fermi },
+ { -1, -1, FERMI_B, &gf100_fermi },
+ { -1, -1, FERMI_C, &gf100_fermi },
+ { -1, -1, FERMI_COMPUTE_A },
+ {}
+ }
+};
+
+int
+gf117_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(&gf117_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index e6dd651e2636..9811a72e0313 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -24,6 +24,8 @@
#include "gf100.h"
#include "ctxgf100.h"
+#include <nvif/class.h>
+
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@@ -173,18 +175,26 @@ gf119_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-struct nvkm_oclass *
-gf119_gr_oclass = &(struct gf100_gr_oclass) {
- .base.handle = NV_ENGINE(GR, 0xd9),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_ctor,
- .dtor = gf100_gr_dtor,
- .init = gf100_gr_init,
- .fini = _nvkm_gr_fini,
- },
- .cclass = &gf119_grctx_oclass,
- .sclass = gf110_gr_sclass,
+static const struct gf100_gr_func
+gf119_gr = {
+ .init = gf100_gr_init,
.mmio = gf119_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
-}.base;
+ .grctx = &gf119_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+ { -1, -1, FERMI_A, &gf100_fermi },
+ { -1, -1, FERMI_B, &gf100_fermi },
+ { -1, -1, FERMI_C, &gf100_fermi },
+ { -1, -1, FERMI_COMPUTE_A },
+ {}
+ }
+};
+
+int
+gf119_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(&gf119_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 46f7844eca70..abf54928a1a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -24,24 +24,9 @@
#include "gf100.h"
#include "ctxgf100.h"
-#include <subdev/pmu.h>
-
#include <nvif/class.h>
/*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_gr_sclass[] = {
- { FERMI_TWOD_A, &nvkm_object_ofuncs },
- { KEPLER_INLINE_TO_MEMORY_A, &nvkm_object_ofuncs },
- { KEPLER_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
- { KEPLER_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
- {}
-};
-
-/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@@ -193,132 +178,112 @@ gk104_gr_pack_mmio[] = {
******************************************************************************/
int
-gk104_gr_init(struct nvkm_object *object)
+gk104_gr_init(struct gf100_gr *gr)
{
- struct gf100_gr_oclass *oclass = (void *)object->oclass;
- struct gf100_gr_priv *priv = (void *)object;
- struct nvkm_pmu *pmu = nvkm_pmu(priv);
- const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
u32 data[TPC_MAX / 8] = {};
u8 tpcnr[GPC_MAX];
int gpc, tpc, rop;
- int ret, i;
-
- if (pmu)
- pmu->pgob(pmu, false);
+ int i;
- ret = nvkm_gr_init(&priv->base);
- if (ret)
- return ret;
+ nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
+ nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000);
+ nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000);
+ nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
+ nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
+ nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
+ nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
+ nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
- nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
- nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
- nv_wr32(priv, GPC_BCAST(0x0888), 0x00000000);
- nv_wr32(priv, GPC_BCAST(0x088c), 0x00000000);
- nv_wr32(priv, GPC_BCAST(0x0890), 0x00000000);
- nv_wr32(priv, GPC_BCAST(0x0894), 0x00000000);
- nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
- nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
+ gf100_gr_mmio(gr, gr->func->mmio);
- gf100_gr_mmio(priv, oclass->mmio);
-
- nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
+ nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
memset(data, 0x00, sizeof(data));
- memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
- for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+ memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+ for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
do {
- gpc = (gpc + 1) % priv->gpc_nr;
+ gpc = (gpc + 1) % gr->gpc_nr;
} while (!tpcnr[gpc]);
- tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+ tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
data[i / 8] |= tpc << ((i % 8) * 4);
}
- nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
- nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
- nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
- nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
-
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
- priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
- priv->tpc_total);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+ nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+ nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+ nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+ gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+ gr->tpc_total);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
}
- nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
- nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
-
- nv_wr32(priv, 0x400500, 0x00010001);
-
- nv_wr32(priv, 0x400100, 0xffffffff);
- nv_wr32(priv, 0x40013c, 0xffffffff);
-
- nv_wr32(priv, 0x409ffc, 0x00000000);
- nv_wr32(priv, 0x409c14, 0x00003e3e);
- nv_wr32(priv, 0x409c24, 0x000f0001);
- nv_wr32(priv, 0x404000, 0xc0000000);
- nv_wr32(priv, 0x404600, 0xc0000000);
- nv_wr32(priv, 0x408030, 0xc0000000);
- nv_wr32(priv, 0x404490, 0xc0000000);
- nv_wr32(priv, 0x406018, 0xc0000000);
- nv_wr32(priv, 0x407020, 0x40000000);
- nv_wr32(priv, 0x405840, 0xc0000000);
- nv_wr32(priv, 0x405844, 0x00ffffff);
- nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
- nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
-
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- nv_wr32(priv, GPC_UNIT(gpc, 0x3038), 0xc0000000);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
- nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
- for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
+ nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+ nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+
+ nvkm_wr32(device, 0x400500, 0x00010001);
+
+ nvkm_wr32(device, 0x400100, 0xffffffff);
+ nvkm_wr32(device, 0x40013c, 0xffffffff);
+
+ nvkm_wr32(device, 0x409ffc, 0x00000000);
+ nvkm_wr32(device, 0x409c14, 0x00003e3e);
+ nvkm_wr32(device, 0x409c24, 0x000f0001);
+ nvkm_wr32(device, 0x404000, 0xc0000000);
+ nvkm_wr32(device, 0x404600, 0xc0000000);
+ nvkm_wr32(device, 0x408030, 0xc0000000);
+ nvkm_wr32(device, 0x404490, 0xc0000000);
+ nvkm_wr32(device, 0x406018, 0xc0000000);
+ nvkm_wr32(device, 0x407020, 0x40000000);
+ nvkm_wr32(device, 0x405840, 0xc0000000);
+ nvkm_wr32(device, 0x405844, 0x00ffffff);
+ nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+ nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x3038), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
}
- nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
- nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
}
- for (rop = 0; rop < priv->rop_nr; rop++) {
- nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
- nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
- nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
- nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+ for (rop = 0; rop < gr->rop_nr; rop++) {
+ nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
}
- nv_wr32(priv, 0x400108, 0xffffffff);
- nv_wr32(priv, 0x400138, 0xffffffff);
- nv_wr32(priv, 0x400118, 0xffffffff);
- nv_wr32(priv, 0x400130, 0xffffffff);
- nv_wr32(priv, 0x40011c, 0xffffffff);
- nv_wr32(priv, 0x400134, 0xffffffff);
+ nvkm_wr32(device, 0x400108, 0xffffffff);
+ nvkm_wr32(device, 0x400138, 0xffffffff);
+ nvkm_wr32(device, 0x400118, 0xffffffff);
+ nvkm_wr32(device, 0x400130, 0xffffffff);
+ nvkm_wr32(device, 0x40011c, 0xffffffff);
+ nvkm_wr32(device, 0x400134, 0xffffffff);
- nv_wr32(priv, 0x400054, 0x34ce3464);
+ nvkm_wr32(device, 0x400054, 0x34ce3464);
- gf100_gr_zbc_init(priv);
+ gf100_gr_zbc_init(gr);
- return gf100_gr_init_ctxctl(priv);
-}
-
-int
-gk104_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nvkm_pmu *pmu = nvkm_pmu(parent);
- if (pmu)
- pmu->pgob(pmu, false);
- return gf100_gr_ctor(parent, engine, oclass, data, size, pobject);
+ return gf100_gr_init_ctxctl(gr);
}
#include "fuc/hubgk104.fuc3.h"
@@ -341,19 +306,25 @@ gk104_gr_gpccs_ucode = {
.data.size = sizeof(gk104_grgpc_data),
};
-struct nvkm_oclass *
-gk104_gr_oclass = &(struct gf100_gr_oclass) {
- .base.handle = NV_ENGINE(GR, 0xe4),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk104_gr_ctor,
- .dtor = gf100_gr_dtor,
- .init = gk104_gr_init,
- .fini = _nvkm_gr_fini,
- },
- .cclass = &gk104_grctx_oclass,
- .sclass = gk104_gr_sclass,
+static const struct gf100_gr_func
+gk104_gr = {
+ .init = gk104_gr_init,
.mmio = gk104_gr_pack_mmio,
.fecs.ucode = &gk104_gr_fecs_ucode,
.gpccs.ucode = &gk104_gr_gpccs_ucode,
.ppc_nr = 1,
-}.base;
+ .grctx = &gk104_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_A },
+ { -1, -1, KEPLER_A, &gf100_fermi },
+ { -1, -1, KEPLER_COMPUTE_A },
+ {}
+ }
+};
+
+int
+gk104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(&gk104_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index f4cd8e5546af..32aa2946e7b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -29,19 +29,6 @@
#include <nvif/class.h>
/*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-struct nvkm_oclass
-gk110_gr_sclass[] = {
- { FERMI_TWOD_A, &nvkm_object_ofuncs },
- { KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
- { KEPLER_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
- { KEPLER_COMPUTE_B, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
- {}
-};
-
-/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@@ -193,19 +180,25 @@ gk110_gr_gpccs_ucode = {
.data.size = sizeof(gk110_grgpc_data),
};
-struct nvkm_oclass *
-gk110_gr_oclass = &(struct gf100_gr_oclass) {
- .base.handle = NV_ENGINE(GR, 0xf0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk104_gr_ctor,
- .dtor = gf100_gr_dtor,
- .init = gk104_gr_init,
- .fini = _nvkm_gr_fini,
- },
- .cclass = &gk110_grctx_oclass,
- .sclass = gk110_gr_sclass,
+static const struct gf100_gr_func
+gk110_gr = {
+ .init = gk104_gr_init,
.mmio = gk110_gr_pack_mmio,
.fecs.ucode = &gk110_gr_fecs_ucode,
.gpccs.ucode = &gk110_gr_gpccs_ucode,
.ppc_nr = 2,
-}.base;
+ .grctx = &gk110_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+ { -1, -1, KEPLER_B, &gf100_fermi },
+ { -1, -1, KEPLER_COMPUTE_B },
+ {}
+ }
+};
+
+int
+gk110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(&gk110_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index 9ff9eab0ccaf..22f88afbf35f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -24,6 +24,8 @@
#include "gf100.h"
#include "ctxgf100.h"
+#include <nvif/class.h>
+
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@@ -98,19 +100,25 @@ gk110b_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-struct nvkm_oclass *
-gk110b_gr_oclass = &(struct gf100_gr_oclass) {
- .base.handle = NV_ENGINE(GR, 0xf1),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk104_gr_ctor,
- .dtor = gf100_gr_dtor,
- .init = gk104_gr_init,
- .fini = _nvkm_gr_fini,
- },
- .cclass = &gk110b_grctx_oclass,
- .sclass = gk110_gr_sclass,
+static const struct gf100_gr_func
+gk110b_gr = {
+ .init = gk104_gr_init,
.mmio = gk110b_gr_pack_mmio,
.fecs.ucode = &gk110_gr_fecs_ucode,
.gpccs.ucode = &gk110_gr_gpccs_ucode,
.ppc_nr = 2,
-}.base;
+ .grctx = &gk110b_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+ { -1, -1, KEPLER_B, &gf100_fermi },
+ { -1, -1, KEPLER_COMPUTE_B },
+ {}
+ }
+};
+
+int
+gk110b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(&gk110b_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 85f44a3d5d11..ee7554fc87dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -29,19 +29,6 @@
#include <nvif/class.h>
/*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk208_gr_sclass[] = {
- { FERMI_TWOD_A, &nvkm_object_ofuncs },
- { KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
- { KEPLER_B, &gf100_fermi_ofuncs },
- { KEPLER_COMPUTE_B, &nvkm_object_ofuncs },
- {}
-};
-
-/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@@ -172,19 +159,25 @@ gk208_gr_gpccs_ucode = {
.data.size = sizeof(gk208_grgpc_data),
};
-struct nvkm_oclass *
-gk208_gr_oclass = &(struct gf100_gr_oclass) {
- .base.handle = NV_ENGINE(GR, 0x08),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk104_gr_ctor,
- .dtor = gf100_gr_dtor,
- .init = gk104_gr_init,
- .fini = _nvkm_gr_fini,
- },
- .cclass = &gk208_grctx_oclass,
- .sclass = gk208_gr_sclass,
+static const struct gf100_gr_func
+gk208_gr = {
+ .init = gk104_gr_init,
.mmio = gk208_gr_pack_mmio,
.fecs.ucode = &gk208_gr_fecs_ucode,
.gpccs.ucode = &gk208_gr_gpccs_ucode,
.ppc_nr = 1,
-}.base;
+ .grctx = &gk208_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+ { -1, -1, KEPLER_B, &gf100_fermi },
+ { -1, -1, KEPLER_COMPUTE_B },
+ {}
+ }
+};
+
+int
+gk208_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(&gk208_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 40ff5eb9180c..b8758d3b8b51 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,28 +22,335 @@
#include "gf100.h"
#include "ctxgf100.h"
+#include <subdev/timer.h>
+
#include <nvif/class.h>
-static struct nvkm_oclass
-gk20a_gr_sclass[] = {
- { FERMI_TWOD_A, &nvkm_object_ofuncs },
- { KEPLER_INLINE_TO_MEMORY_A, &nvkm_object_ofuncs },
- { KEPLER_C, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
- { KEPLER_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
- {}
+static void
+gk20a_gr_init_dtor(struct gf100_gr_pack *pack)
+{
+ vfree(pack);
+}
+
+struct gk20a_fw_av
+{
+ u32 addr;
+ u32 data;
+};
+
+static struct gf100_gr_pack *
+gk20a_gr_av_to_init(struct gf100_gr_fuc *fuc)
+{
+ struct gf100_gr_init *init;
+ struct gf100_gr_pack *pack;
+ const int nent = (fuc->size / sizeof(struct gk20a_fw_av));
+ int i;
+
+ pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
+ if (!pack)
+ return ERR_PTR(-ENOMEM);
+
+ init = (void *)(pack + 2);
+
+ pack[0].init = init;
+
+ for (i = 0; i < nent; i++) {
+ struct gf100_gr_init *ent = &init[i];
+ struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc->data)[i];
+
+ ent->addr = av->addr;
+ ent->data = av->data;
+ ent->count = 1;
+ ent->pitch = 1;
+ }
+
+ return pack;
+}
+
+struct gk20a_fw_aiv
+{
+ u32 addr;
+ u32 index;
+ u32 data;
};
-struct nvkm_oclass *
-gk20a_gr_oclass = &(struct gf100_gr_oclass) {
- .base.handle = NV_ENGINE(GR, 0xea),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_ctor,
- .dtor = gf100_gr_dtor,
- .init = gk104_gr_init,
- .fini = _nvkm_gr_fini,
- },
- .cclass = &gk20a_grctx_oclass,
- .sclass = gk20a_gr_sclass,
- .mmio = gk104_gr_pack_mmio,
+static struct gf100_gr_pack *
+gk20a_gr_aiv_to_init(struct gf100_gr_fuc *fuc)
+{
+ struct gf100_gr_init *init;
+ struct gf100_gr_pack *pack;
+ const int nent = (fuc->size / sizeof(struct gk20a_fw_aiv));
+ int i;
+
+ pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
+ if (!pack)
+ return ERR_PTR(-ENOMEM);
+
+ init = (void *)(pack + 2);
+
+ pack[0].init = init;
+
+ for (i = 0; i < nent; i++) {
+ struct gf100_gr_init *ent = &init[i];
+ struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)fuc->data)[i];
+
+ ent->addr = av->addr;
+ ent->data = av->data;
+ ent->count = 1;
+ ent->pitch = 1;
+ }
+
+ return pack;
+}
+
+static struct gf100_gr_pack *
+gk20a_gr_av_to_method(struct gf100_gr_fuc *fuc)
+{
+ struct gf100_gr_init *init;
+ struct gf100_gr_pack *pack;
+ /* We don't suppose we will initialize more than 16 classes here... */
+ static const unsigned int max_classes = 16;
+ const int nent = (fuc->size / sizeof(struct gk20a_fw_av));
+ int i, classidx = 0;
+ u32 prevclass = 0;
+
+ pack = vzalloc((sizeof(*pack) * max_classes) +
+ (sizeof(*init) * (nent + 1)));
+ if (!pack)
+ return ERR_PTR(-ENOMEM);
+
+ init = (void *)(pack + max_classes);
+
+ for (i = 0; i < nent; i++) {
+ struct gf100_gr_init *ent = &init[i];
+ struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc->data)[i];
+ u32 class = av->addr & 0xffff;
+ u32 addr = (av->addr & 0xffff0000) >> 14;
+
+ if (prevclass != class) {
+ pack[classidx].init = ent;
+ pack[classidx].type = class;
+ prevclass = class;
+ if (++classidx >= max_classes) {
+ vfree(pack);
+ return ERR_PTR(-ENOSPC);
+ }
+ }
+
+ ent->addr = addr;
+ ent->data = av->data;
+ ent->count = 1;
+ ent->pitch = 1;
+ }
+
+ return pack;
+}
+
+static int
+gk20a_gr_wait_mem_scrubbing(struct gf100_gr *gr)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x40910c) & 0x00000006))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "FECS mem scrubbing timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x41a10c) & 0x00000006))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "GPCCS mem scrubbing timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void
+gk20a_gr_set_hww_esr_report_mask(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ nvkm_wr32(device, 0x419e44, 0x1ffffe);
+ nvkm_wr32(device, 0x419e4c, 0x7f);
+}
+
+int
+gk20a_gr_init(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+ u32 data[TPC_MAX / 8] = {};
+ u8 tpcnr[GPC_MAX];
+ int gpc, tpc;
+ int ret, i;
+
+ /* Clear SCC RAM */
+ nvkm_wr32(device, 0x40802c, 0x1);
+
+ gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
+
+ ret = gk20a_gr_wait_mem_scrubbing(gr);
+ if (ret)
+ return ret;
+
+ ret = gf100_gr_wait_idle(gr);
+ if (ret)
+ return ret;
+
+ /* MMU debug buffer */
+ nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
+ nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
+
+ if (gr->func->init_gpc_mmu)
+ gr->func->init_gpc_mmu(gr);
+
+ /* Set the PE as stream master */
+ nvkm_mask(device, 0x503018, 0x1, 0x1);
+
+ /* Zcull init */
+ memset(data, 0x00, sizeof(data));
+ memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+ for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
+ do {
+ gpc = (gpc + 1) % gr->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ data[i / 8] |= tpc << ((i % 8) * 4);
+ }
+
+ nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+ nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+ nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+ nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+ gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+ gr->tpc_total);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ }
+
+ nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+
+ /* Enable FIFO access */
+ nvkm_wr32(device, 0x400500, 0x00010001);
+
+ /* Enable interrupts */
+ nvkm_wr32(device, 0x400100, 0xffffffff);
+ nvkm_wr32(device, 0x40013c, 0xffffffff);
+
+ /* Enable FECS error interrupts */
+ nvkm_wr32(device, 0x409c24, 0x000f0000);
+
+ /* Enable hardware warning exceptions */
+ nvkm_wr32(device, 0x404000, 0xc0000000);
+ nvkm_wr32(device, 0x404600, 0xc0000000);
+
+ if (gr->func->set_hww_esr_report_mask)
+ gr->func->set_hww_esr_report_mask(gr);
+
+ /* Enable TPC exceptions per GPC */
+ nvkm_wr32(device, 0x419d0c, 0x2);
+ nvkm_wr32(device, 0x41ac94, (((1 << gr->tpc_total) - 1) & 0xff) << 16);
+
+ /* Reset and enable all exceptions */
+ nvkm_wr32(device, 0x400108, 0xffffffff);
+ nvkm_wr32(device, 0x400138, 0xffffffff);
+ nvkm_wr32(device, 0x400118, 0xffffffff);
+ nvkm_wr32(device, 0x400130, 0xffffffff);
+ nvkm_wr32(device, 0x40011c, 0xffffffff);
+ nvkm_wr32(device, 0x400134, 0xffffffff);
+
+ gf100_gr_zbc_init(gr);
+
+ return gf100_gr_init_ctxctl(gr);
+}
+
+void
+gk20a_gr_dtor(struct gf100_gr *gr)
+{
+ gk20a_gr_init_dtor(gr->fuc_method);
+ gk20a_gr_init_dtor(gr->fuc_bundle);
+ gk20a_gr_init_dtor(gr->fuc_sw_ctx);
+ gk20a_gr_init_dtor(gr->fuc_sw_nonctx);
+}
+
+int
+gk20a_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
+ int index, struct nvkm_gr **pgr)
+{
+ struct gf100_gr_fuc fuc;
+ struct gf100_gr *gr;
+ int ret;
+
+ if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+ return -ENOMEM;
+ *pgr = &gr->base;
+
+ ret = gf100_gr_ctor(func, device, index, gr);
+ if (ret)
+ return ret;
+
+ ret = gf100_gr_ctor_fw(gr, "sw_nonctx", &fuc);
+ if (ret)
+ return ret;
+ gr->fuc_sw_nonctx = gk20a_gr_av_to_init(&fuc);
+ gf100_gr_dtor_fw(&fuc);
+ if (IS_ERR(gr->fuc_sw_nonctx))
+ return PTR_ERR(gr->fuc_sw_nonctx);
+
+ ret = gf100_gr_ctor_fw(gr, "sw_ctx", &fuc);
+ if (ret)
+ return ret;
+ gr->fuc_sw_ctx = gk20a_gr_aiv_to_init(&fuc);
+ gf100_gr_dtor_fw(&fuc);
+ if (IS_ERR(gr->fuc_sw_ctx))
+ return PTR_ERR(gr->fuc_sw_ctx);
+
+ ret = gf100_gr_ctor_fw(gr, "sw_bundle_init", &fuc);
+ if (ret)
+ return ret;
+ gr->fuc_bundle = gk20a_gr_av_to_init(&fuc);
+ gf100_gr_dtor_fw(&fuc);
+ if (IS_ERR(gr->fuc_bundle))
+ return PTR_ERR(gr->fuc_bundle);
+
+ ret = gf100_gr_ctor_fw(gr, "sw_method_init", &fuc);
+ if (ret)
+ return ret;
+ gr->fuc_method = gk20a_gr_av_to_method(&fuc);
+ gf100_gr_dtor_fw(&fuc);
+ if (IS_ERR(gr->fuc_method))
+ return PTR_ERR(gr->fuc_method);
+
+ return 0;
+}
+
+static const struct gf100_gr_func
+gk20a_gr = {
+ .dtor = gk20a_gr_dtor,
+ .init = gk20a_gr_init,
+ .set_hww_esr_report_mask = gk20a_gr_set_hww_esr_report_mask,
.ppc_nr = 1,
-}.base;
+ .grctx = &gk20a_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_A },
+ { -1, -1, KEPLER_C, &gf100_fermi },
+ { -1, -1, KEPLER_COMPUTE_A },
+ {}
+ }
+};
+
+int
+gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gk20a_gr_new_(&gk20a_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index a5ebd459bc24..56e960212e5d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -30,19 +30,6 @@
#include <nvif/class.h>
/*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gm107_gr_sclass[] = {
- { FERMI_TWOD_A, &nvkm_object_ofuncs },
- { KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
- { MAXWELL_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
- { MAXWELL_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
- {}
-};
-
-/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@@ -292,7 +279,7 @@ gm107_gr_pack_mmio[] = {
******************************************************************************/
void
-gm107_gr_init_bios(struct gf100_gr_priv *priv)
+gm107_gr_init_bios(struct gf100_gr *gr)
{
static const struct {
u32 ctrl;
@@ -304,7 +291,8 @@ gm107_gr_init_bios(struct gf100_gr_priv *priv)
{ 0x419af0, 0x419af4 },
{ 0x419af8, 0x419afc },
};
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ struct nvkm_bios *bios = device->bios;
struct nvbios_P0260E infoE;
struct nvbios_P0260X infoX;
int E = -1, X;
@@ -312,124 +300,119 @@ gm107_gr_init_bios(struct gf100_gr_priv *priv)
while (nvbios_P0260Ep(bios, ++E, &ver, &hdr, &infoE)) {
if (X = -1, E < ARRAY_SIZE(regs)) {
- nv_wr32(priv, regs[E].ctrl, infoE.data);
+ nvkm_wr32(device, regs[E].ctrl, infoE.data);
while (nvbios_P0260Xp(bios, ++X, &ver, &hdr, &infoX))
- nv_wr32(priv, regs[E].data, infoX.data);
+ nvkm_wr32(device, regs[E].data, infoX.data);
}
}
}
int
-gm107_gr_init(struct nvkm_object *object)
+gm107_gr_init(struct gf100_gr *gr)
{
- struct gf100_gr_oclass *oclass = (void *)object->oclass;
- struct gf100_gr_priv *priv = (void *)object;
- const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
u32 data[TPC_MAX / 8] = {};
u8 tpcnr[GPC_MAX];
int gpc, tpc, ppc, rop;
- int ret, i;
-
- ret = nvkm_gr_init(&priv->base);
- if (ret)
- return ret;
+ int i;
- nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
- nv_wr32(priv, GPC_BCAST(0x0890), 0x00000000);
- nv_wr32(priv, GPC_BCAST(0x0894), 0x00000000);
- nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
- nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
+ nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
+ nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
+ nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
+ nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
+ nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
- gf100_gr_mmio(priv, oclass->mmio);
+ gf100_gr_mmio(gr, gr->func->mmio);
- gm107_gr_init_bios(priv);
+ gm107_gr_init_bios(gr);
- nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
+ nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
memset(data, 0x00, sizeof(data));
- memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
- for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+ memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+ for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
do {
- gpc = (gpc + 1) % priv->gpc_nr;
+ gpc = (gpc + 1) % gr->gpc_nr;
} while (!tpcnr[gpc]);
- tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+ tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
data[i / 8] |= tpc << ((i % 8) * 4);
}
- nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
- nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
- nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
- nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
-
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
- priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
- priv->tpc_total);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+ nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+ nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+ nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+ gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+ gr->tpc_total);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
}
- nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
- nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
-
- nv_wr32(priv, 0x400500, 0x00010001);
-
- nv_wr32(priv, 0x400100, 0xffffffff);
- nv_wr32(priv, 0x40013c, 0xffffffff);
- nv_wr32(priv, 0x400124, 0x00000002);
- nv_wr32(priv, 0x409c24, 0x000e0000);
-
- nv_wr32(priv, 0x404000, 0xc0000000);
- nv_wr32(priv, 0x404600, 0xc0000000);
- nv_wr32(priv, 0x408030, 0xc0000000);
- nv_wr32(priv, 0x404490, 0xc0000000);
- nv_wr32(priv, 0x406018, 0xc0000000);
- nv_wr32(priv, 0x407020, 0x40000000);
- nv_wr32(priv, 0x405840, 0xc0000000);
- nv_wr32(priv, 0x405844, 0x00ffffff);
- nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
-
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- for (ppc = 0; ppc < 2 /* priv->ppc_nr[gpc] */; ppc++)
- nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
- nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
- for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
+ nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+ nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+
+ nvkm_wr32(device, 0x400500, 0x00010001);
+
+ nvkm_wr32(device, 0x400100, 0xffffffff);
+ nvkm_wr32(device, 0x40013c, 0xffffffff);
+ nvkm_wr32(device, 0x400124, 0x00000002);
+ nvkm_wr32(device, 0x409c24, 0x000e0000);
+
+ nvkm_wr32(device, 0x404000, 0xc0000000);
+ nvkm_wr32(device, 0x404600, 0xc0000000);
+ nvkm_wr32(device, 0x408030, 0xc0000000);
+ nvkm_wr32(device, 0x404490, 0xc0000000);
+ nvkm_wr32(device, 0x406018, 0xc0000000);
+ nvkm_wr32(device, 0x407020, 0x40000000);
+ nvkm_wr32(device, 0x405840, 0xc0000000);
+ nvkm_wr32(device, 0x405844, 0x00ffffff);
+ nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ for (ppc = 0; ppc < 2 /* gr->ppc_nr[gpc] */; ppc++)
+ nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
}
- nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
- nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
}
- for (rop = 0; rop < priv->rop_nr; rop++) {
- nv_wr32(priv, ROP_UNIT(rop, 0x144), 0x40000000);
- nv_wr32(priv, ROP_UNIT(rop, 0x070), 0x40000000);
- nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
- nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+ for (rop = 0; rop < gr->rop_nr; rop++) {
+ nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
}
- nv_wr32(priv, 0x400108, 0xffffffff);
- nv_wr32(priv, 0x400138, 0xffffffff);
- nv_wr32(priv, 0x400118, 0xffffffff);
- nv_wr32(priv, 0x400130, 0xffffffff);
- nv_wr32(priv, 0x40011c, 0xffffffff);
- nv_wr32(priv, 0x400134, 0xffffffff);
+ nvkm_wr32(device, 0x400108, 0xffffffff);
+ nvkm_wr32(device, 0x400138, 0xffffffff);
+ nvkm_wr32(device, 0x400118, 0xffffffff);
+ nvkm_wr32(device, 0x400130, 0xffffffff);
+ nvkm_wr32(device, 0x40011c, 0xffffffff);
+ nvkm_wr32(device, 0x400134, 0xffffffff);
- nv_wr32(priv, 0x400054, 0x2c350f63);
+ nvkm_wr32(device, 0x400054, 0x2c350f63);
- gf100_gr_zbc_init(priv);
+ gf100_gr_zbc_init(gr);
- return gf100_gr_init_ctxctl(priv);
+ return gf100_gr_init_ctxctl(gr);
}
#include "fuc/hubgm107.fuc5.h"
@@ -452,19 +435,25 @@ gm107_gr_gpccs_ucode = {
.data.size = sizeof(gm107_grgpc_data),
};
-struct nvkm_oclass *
-gm107_gr_oclass = &(struct gf100_gr_oclass) {
- .base.handle = NV_ENGINE(GR, 0x07),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_ctor,
- .dtor = gf100_gr_dtor,
- .init = gm107_gr_init,
- .fini = _nvkm_gr_fini,
- },
- .cclass = &gm107_grctx_oclass,
- .sclass = gm107_gr_sclass,
+static const struct gf100_gr_func
+gm107_gr = {
+ .init = gm107_gr_init,
.mmio = gm107_gr_pack_mmio,
.fecs.ucode = &gm107_gr_fecs_ucode,
.gpccs.ucode = &gm107_gr_gpccs_ucode,
.ppc_nr = 2,
-}.base;
+ .grctx = &gm107_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+ { -1, -1, MAXWELL_A, &gf100_fermi },
+ { -1, -1, MAXWELL_COMPUTE_A },
+ {}
+ }
+};
+
+int
+gm107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(&gm107_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
index fdb1dcf16a59..90381dde451a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
@@ -27,19 +27,6 @@
#include <nvif/class.h>
/*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-struct nvkm_oclass
-gm204_gr_sclass[] = {
- { FERMI_TWOD_A, &nvkm_object_ofuncs },
- { KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
- { MAXWELL_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
- { MAXWELL_COMPUTE_B, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
- {}
-};
-
-/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@@ -243,144 +230,144 @@ gm204_gr_data[] = {
******************************************************************************/
static int
-gm204_gr_init_ctxctl(struct gf100_gr_priv *priv)
+gm204_gr_init_ctxctl(struct gf100_gr *gr)
{
return 0;
}
int
-gm204_gr_init(struct nvkm_object *object)
+gm204_gr_init(struct gf100_gr *gr)
{
- struct gf100_gr_oclass *oclass = (void *)object->oclass;
- struct gf100_gr_priv *priv = (void *)object;
- const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
- u32 data[TPC_MAX / 8] = {};
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+ u32 data[TPC_MAX / 8] = {}, tmp;
u8 tpcnr[GPC_MAX];
int gpc, tpc, ppc, rop;
- int ret, i;
- u32 tmp;
-
- ret = nvkm_gr_init(&priv->base);
- if (ret)
- return ret;
+ int i;
- tmp = nv_rd32(priv, 0x100c80); /*XXX: mask? */
- nv_wr32(priv, 0x418880, 0x00001000 | (tmp & 0x00000fff));
- nv_wr32(priv, 0x418890, 0x00000000);
- nv_wr32(priv, 0x418894, 0x00000000);
- nv_wr32(priv, 0x4188b4, priv->unk4188b4->addr >> 8);
- nv_wr32(priv, 0x4188b8, priv->unk4188b8->addr >> 8);
- nv_mask(priv, 0x4188b0, 0x00040000, 0x00040000);
+ tmp = nvkm_rd32(device, 0x100c80); /*XXX: mask? */
+ nvkm_wr32(device, 0x418880, 0x00001000 | (tmp & 0x00000fff));
+ nvkm_wr32(device, 0x418890, 0x00000000);
+ nvkm_wr32(device, 0x418894, 0x00000000);
+ nvkm_wr32(device, 0x4188b4, nvkm_memory_addr(gr->unk4188b4) >> 8);
+ nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(gr->unk4188b8) >> 8);
+ nvkm_mask(device, 0x4188b0, 0x00040000, 0x00040000);
/*XXX: belongs in fb */
- nv_wr32(priv, 0x100cc8, priv->unk4188b4->addr >> 8);
- nv_wr32(priv, 0x100ccc, priv->unk4188b8->addr >> 8);
- nv_mask(priv, 0x100cc4, 0x00040000, 0x00040000);
+ nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
+ nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
+ nvkm_mask(device, 0x100cc4, 0x00040000, 0x00040000);
- gf100_gr_mmio(priv, oclass->mmio);
+ gf100_gr_mmio(gr, gr->func->mmio);
- gm107_gr_init_bios(priv);
+ gm107_gr_init_bios(gr);
- nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
+ nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
memset(data, 0x00, sizeof(data));
- memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
- for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+ memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+ for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
do {
- gpc = (gpc + 1) % priv->gpc_nr;
+ gpc = (gpc + 1) % gr->gpc_nr;
} while (!tpcnr[gpc]);
- tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+ tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
data[i / 8] |= tpc << ((i % 8) * 4);
}
- nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
- nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
- nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
- nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
-
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
- priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
- priv->tpc_total);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+ nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+ nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+ nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+ gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+ gr->tpc_total);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
}
- nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
- nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
- nv_wr32(priv, GPC_BCAST(0x033c), nv_rd32(priv, 0x100804));
-
- nv_wr32(priv, 0x400500, 0x00010001);
- nv_wr32(priv, 0x400100, 0xffffffff);
- nv_wr32(priv, 0x40013c, 0xffffffff);
- nv_wr32(priv, 0x400124, 0x00000002);
- nv_wr32(priv, 0x409c24, 0x000e0000);
- nv_wr32(priv, 0x405848, 0xc0000000);
- nv_wr32(priv, 0x40584c, 0x00000001);
- nv_wr32(priv, 0x404000, 0xc0000000);
- nv_wr32(priv, 0x404600, 0xc0000000);
- nv_wr32(priv, 0x408030, 0xc0000000);
- nv_wr32(priv, 0x404490, 0xc0000000);
- nv_wr32(priv, 0x406018, 0xc0000000);
- nv_wr32(priv, 0x407020, 0x40000000);
- nv_wr32(priv, 0x405840, 0xc0000000);
- nv_wr32(priv, 0x405844, 0x00ffffff);
- nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
-
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++)
- nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
- nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
- for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
- nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
+ nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+ nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+ nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
+
+ nvkm_wr32(device, 0x400500, 0x00010001);
+ nvkm_wr32(device, 0x400100, 0xffffffff);
+ nvkm_wr32(device, 0x40013c, 0xffffffff);
+ nvkm_wr32(device, 0x400124, 0x00000002);
+ nvkm_wr32(device, 0x409c24, 0x000e0000);
+ nvkm_wr32(device, 0x405848, 0xc0000000);
+ nvkm_wr32(device, 0x40584c, 0x00000001);
+ nvkm_wr32(device, 0x404000, 0xc0000000);
+ nvkm_wr32(device, 0x404600, 0xc0000000);
+ nvkm_wr32(device, 0x408030, 0xc0000000);
+ nvkm_wr32(device, 0x404490, 0xc0000000);
+ nvkm_wr32(device, 0x406018, 0xc0000000);
+ nvkm_wr32(device, 0x407020, 0x40000000);
+ nvkm_wr32(device, 0x405840, 0xc0000000);
+ nvkm_wr32(device, 0x405844, 0x00ffffff);
+ nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++)
+ nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
}
- nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
- nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
}
- for (rop = 0; rop < priv->rop_nr; rop++) {
- nv_wr32(priv, ROP_UNIT(rop, 0x144), 0x40000000);
- nv_wr32(priv, ROP_UNIT(rop, 0x070), 0x40000000);
- nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
- nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+ for (rop = 0; rop < gr->rop_nr; rop++) {
+ nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
}
- nv_wr32(priv, 0x400108, 0xffffffff);
- nv_wr32(priv, 0x400138, 0xffffffff);
- nv_wr32(priv, 0x400118, 0xffffffff);
- nv_wr32(priv, 0x400130, 0xffffffff);
- nv_wr32(priv, 0x40011c, 0xffffffff);
- nv_wr32(priv, 0x400134, 0xffffffff);
+ nvkm_wr32(device, 0x400108, 0xffffffff);
+ nvkm_wr32(device, 0x400138, 0xffffffff);
+ nvkm_wr32(device, 0x400118, 0xffffffff);
+ nvkm_wr32(device, 0x400130, 0xffffffff);
+ nvkm_wr32(device, 0x40011c, 0xffffffff);
+ nvkm_wr32(device, 0x400134, 0xffffffff);
- nv_wr32(priv, 0x400054, 0x2c350f63);
+ nvkm_wr32(device, 0x400054, 0x2c350f63);
- gf100_gr_zbc_init(priv);
+ gf100_gr_zbc_init(gr);
- return gm204_gr_init_ctxctl(priv);
+ return gm204_gr_init_ctxctl(gr);
}
-struct nvkm_oclass *
-gm204_gr_oclass = &(struct gf100_gr_oclass) {
- .base.handle = NV_ENGINE(GR, 0x24),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_ctor,
- .dtor = gf100_gr_dtor,
- .init = gm204_gr_init,
- .fini = _nvkm_gr_fini,
- },
- .cclass = &gm204_grctx_oclass,
- .sclass = gm204_gr_sclass,
+static const struct gf100_gr_func
+gm204_gr = {
+ .init = gm204_gr_init,
.mmio = gm204_gr_pack_mmio,
.ppc_nr = 2,
-}.base;
+ .grctx = &gm204_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+ { -1, -1, MAXWELL_B, &gf100_fermi },
+ { -1, -1, MAXWELL_COMPUTE_B },
+ {}
+ }
+};
+
+int
+gm204_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(&gm204_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
index 04b9733d146a..341dc560acbb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
@@ -24,17 +24,25 @@
#include "gf100.h"
#include "ctxgf100.h"
-struct nvkm_oclass *
-gm206_gr_oclass = &(struct gf100_gr_oclass) {
- .base.handle = NV_ENGINE(GR, 0x26),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_gr_ctor,
- .dtor = gf100_gr_dtor,
- .init = gm204_gr_init,
- .fini = _nvkm_gr_fini,
- },
- .cclass = &gm206_grctx_oclass,
- .sclass = gm204_gr_sclass,
+#include <nvif/class.h>
+
+static const struct gf100_gr_func
+gm206_gr = {
+ .init = gm204_gr_init,
.mmio = gm204_gr_pack_mmio,
.ppc_nr = 2,
-}.base;
+ .grctx = &gm206_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+ { -1, -1, MAXWELL_B, &gf100_fermi },
+ { -1, -1, MAXWELL_COMPUTE_B },
+ {}
+ }
+};
+
+int
+gm206_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(&gm206_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
new file mode 100644
index 000000000000..65b6e3d1e90d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+
+static void
+gm20b_gr_init_gpc_mmu(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ u32 val;
+
+ /* TODO this needs to be removed once secure boot works */
+ if (1) {
+ nvkm_wr32(device, 0x100ce4, 0xffffffff);
+ }
+
+ /* TODO update once secure boot works */
+ val = nvkm_rd32(device, 0x100c80);
+ val &= 0xf000087f;
+ nvkm_wr32(device, 0x418880, val);
+ nvkm_wr32(device, 0x418890, 0);
+ nvkm_wr32(device, 0x418894, 0);
+
+ nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4));
+ nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8));
+ nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc));
+
+ nvkm_wr32(device, 0x4188ac, nvkm_rd32(device, 0x100800));
+}
+
+static void
+gm20b_gr_set_hww_esr_report_mask(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ nvkm_wr32(device, 0x419e44, 0xdffffe);
+ nvkm_wr32(device, 0x419e4c, 0x5);
+}
+
+static const struct gf100_gr_func
+gm20b_gr = {
+ .dtor = gk20a_gr_dtor,
+ .init = gk20a_gr_init,
+ .init_gpc_mmu = gm20b_gr_init_gpc_mmu,
+ .set_hww_esr_report_mask = gm20b_gr_set_hww_esr_report_mask,
+ .ppc_nr = 1,
+ .grctx = &gm20b_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+ { -1, -1, MAXWELL_B, &gf100_fermi },
+ { -1, -1, MAXWELL_COMPUTE_B },
+ {}
+ }
+};
+
+int
+gm20b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gk20a_gr_new_(&gm20b_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
new file mode 100644
index 000000000000..2e68919f00b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+static const struct nvkm_gr_func
+gt200_gr = {
+ .init = nv50_gr_init,
+ .intr = nv50_gr_intr,
+ .chan_new = nv50_gr_chan_new,
+ .tlb_flush = g84_gr_tlb_flush,
+ .units = nv50_gr_units,
+ .sclass = {
+ { -1, -1, 0x0030, &nv50_gr_object },
+ { -1, -1, 0x502d, &nv50_gr_object },
+ { -1, -1, 0x5039, &nv50_gr_object },
+ { -1, -1, 0x50c0, &nv50_gr_object },
+ { -1, -1, 0x8397, &nv50_gr_object },
+ {}
+ }
+};
+
+int
+gt200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return nv50_gr_new_(&gt200_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
new file mode 100644
index 000000000000..2bf7aac360cc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+static const struct nvkm_gr_func
+gt215_gr = {
+ .init = nv50_gr_init,
+ .intr = nv50_gr_intr,
+ .chan_new = nv50_gr_chan_new,
+ .tlb_flush = g84_gr_tlb_flush,
+ .units = nv50_gr_units,
+ .sclass = {
+ { -1, -1, 0x0030, &nv50_gr_object },
+ { -1, -1, 0x502d, &nv50_gr_object },
+ { -1, -1, 0x5039, &nv50_gr_object },
+ { -1, -1, 0x50c0, &nv50_gr_object },
+ { -1, -1, 0x8597, &nv50_gr_object },
+ { -1, -1, 0x85c0, &nv50_gr_object },
+ {}
+ }
+};
+
+int
+gt215_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return nv50_gr_new_(&gt215_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
new file mode 100644
index 000000000000..95d5219faf93
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+static const struct nvkm_gr_func
+mcp79_gr = {
+ .init = nv50_gr_init,
+ .intr = nv50_gr_intr,
+ .chan_new = nv50_gr_chan_new,
+ .units = nv50_gr_units,
+ .sclass = {
+ { -1, -1, 0x0030, &nv50_gr_object },
+ { -1, -1, 0x502d, &nv50_gr_object },
+ { -1, -1, 0x5039, &nv50_gr_object },
+ { -1, -1, 0x50c0, &nv50_gr_object },
+ { -1, -1, 0x8397, &nv50_gr_object },
+ {}
+ }
+};
+
+int
+mcp79_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return nv50_gr_new_(&mcp79_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
new file mode 100644
index 000000000000..027b58e5976b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+static const struct nvkm_gr_func
+mcp89_gr = {
+ .init = nv50_gr_init,
+ .intr = nv50_gr_intr,
+ .chan_new = nv50_gr_chan_new,
+ .tlb_flush = g84_gr_tlb_flush,
+ .units = nv50_gr_units,
+ .sclass = {
+ { -1, -1, 0x0030, &nv50_gr_object },
+ { -1, -1, 0x502d, &nv50_gr_object },
+ { -1, -1, 0x5039, &nv50_gr_object },
+ { -1, -1, 0x50c0, &nv50_gr_object },
+ { -1, -1, 0x85c0, &nv50_gr_object },
+ { -1, -1, 0x8697, &nv50_gr_object },
+ {}
+ }
+};
+
+int
+mcp89_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return nv50_gr_new_(&mcp89_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
index 2614510c28d0..85c5b7fea5f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
@@ -21,13 +21,13 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-#include <engine/gr.h>
+#include "priv.h"
#include "regs.h"
#include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
#include <engine/fifo.h>
+#include <engine/fifo/chan.h>
#include <subdev/instmem.h>
#include <subdev/timer.h>
@@ -346,25 +346,23 @@ nv04_gr_ctx_regs[] = {
NV04_PGRAPH_DEBUG_3
};
-struct nv04_gr_priv {
+#define nv04_gr(p) container_of((p), struct nv04_gr, base)
+
+struct nv04_gr {
struct nvkm_gr base;
struct nv04_gr_chan *chan[16];
spinlock_t lock;
};
+#define nv04_gr_chan(p) container_of((p), struct nv04_gr_chan, object)
+
struct nv04_gr_chan {
- struct nvkm_object base;
+ struct nvkm_object object;
+ struct nv04_gr *gr;
int chid;
u32 nv04[ARRAY_SIZE(nv04_gr_ctx_regs)];
};
-
-static inline struct nv04_gr_priv *
-nv04_gr_priv(struct nv04_gr_chan *chan)
-{
- return (void *)nv_object(chan)->engine;
-}
-
/*******************************************************************************
* Graphics object classes
******************************************************************************/
@@ -444,35 +442,34 @@ nv04_gr_priv(struct nv04_gr_chan *chan)
*/
static void
-nv04_gr_set_ctx1(struct nvkm_object *object, u32 mask, u32 value)
+nv04_gr_set_ctx1(struct nvkm_device *device, u32 inst, u32 mask, u32 value)
{
- struct nv04_gr_priv *priv = (void *)object->engine;
- int subc = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
+ int subc = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
u32 tmp;
- tmp = nv_ro32(object, 0x00);
+ tmp = nvkm_rd32(device, 0x700000 + inst);
tmp &= ~mask;
tmp |= value;
- nv_wo32(object, 0x00, tmp);
+ nvkm_wr32(device, 0x700000 + inst, tmp);
- nv_wr32(priv, NV04_PGRAPH_CTX_SWITCH1, tmp);
- nv_wr32(priv, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
+ nvkm_wr32(device, NV04_PGRAPH_CTX_SWITCH1, tmp);
+ nvkm_wr32(device, NV04_PGRAPH_CTX_CACHE1 + (subc << 2), tmp);
}
static void
-nv04_gr_set_ctx_val(struct nvkm_object *object, u32 mask, u32 value)
+nv04_gr_set_ctx_val(struct nvkm_device *device, u32 inst, u32 mask, u32 value)
{
int class, op, valid = 1;
u32 tmp, ctx1;
- ctx1 = nv_ro32(object, 0x00);
+ ctx1 = nvkm_rd32(device, 0x700000 + inst);
class = ctx1 & 0xff;
op = (ctx1 >> 15) & 7;
- tmp = nv_ro32(object, 0x0c);
+ tmp = nvkm_rd32(device, 0x70000c + inst);
tmp &= ~mask;
tmp |= value;
- nv_wo32(object, 0x0c, tmp);
+ nvkm_wr32(device, 0x70000c + inst, tmp);
/* check for valid surf2d/surf_dst/surf_color */
if (!(tmp & 0x02000000))
@@ -504,527 +501,567 @@ nv04_gr_set_ctx_val(struct nvkm_object *object, u32 mask, u32 value)
break;
}
- nv04_gr_set_ctx1(object, 0x01000000, valid << 24);
+ nv04_gr_set_ctx1(device, inst, 0x01000000, valid << 24);
}
-static int
-nv04_gr_mthd_set_operation(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
+static bool
+nv04_gr_mthd_set_operation(struct nvkm_device *device, u32 inst, u32 data)
{
- u32 class = nv_ro32(object, 0) & 0xff;
- u32 data = *(u32 *)args;
+ u8 class = nvkm_rd32(device, 0x700000) & 0x000000ff;
if (data > 5)
- return 1;
+ return false;
/* Old versions of the objects only accept first three operations. */
if (data > 2 && class < 0x40)
- return 1;
- nv04_gr_set_ctx1(object, 0x00038000, data << 15);
+ return false;
+ nv04_gr_set_ctx1(device, inst, 0x00038000, data << 15);
/* changing operation changes set of objects needed for validation */
- nv04_gr_set_ctx_val(object, 0, 0);
- return 0;
+ nv04_gr_set_ctx_val(device, inst, 0, 0);
+ return true;
}
-static int
-nv04_gr_mthd_surf3d_clip_h(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
+static bool
+nv04_gr_mthd_surf3d_clip_h(struct nvkm_device *device, u32 inst, u32 data)
{
- struct nv04_gr_priv *priv = (void *)object->engine;
- u32 data = *(u32 *)args;
u32 min = data & 0xffff, max;
u32 w = data >> 16;
if (min & 0x8000)
/* too large */
- return 1;
+ return false;
if (w & 0x8000)
/* yes, it accepts negative for some reason. */
w |= 0xffff0000;
max = min + w;
max &= 0x3ffff;
- nv_wr32(priv, 0x40053c, min);
- nv_wr32(priv, 0x400544, max);
- return 0;
+ nvkm_wr32(device, 0x40053c, min);
+ nvkm_wr32(device, 0x400544, max);
+ return true;
}
-static int
-nv04_gr_mthd_surf3d_clip_v(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
+static bool
+nv04_gr_mthd_surf3d_clip_v(struct nvkm_device *device, u32 inst, u32 data)
{
- struct nv04_gr_priv *priv = (void *)object->engine;
- u32 data = *(u32 *)args;
u32 min = data & 0xffff, max;
u32 w = data >> 16;
if (min & 0x8000)
/* too large */
- return 1;
+ return false;
if (w & 0x8000)
/* yes, it accepts negative for some reason. */
w |= 0xffff0000;
max = min + w;
max &= 0x3ffff;
- nv_wr32(priv, 0x400540, min);
- nv_wr32(priv, 0x400548, max);
- return 0;
+ nvkm_wr32(device, 0x400540, min);
+ nvkm_wr32(device, 0x400548, max);
+ return true;
}
-static u16
-nv04_gr_mthd_bind_class(struct nvkm_object *object, u32 *args, u32 size)
+static u8
+nv04_gr_mthd_bind_class(struct nvkm_device *device, u32 inst)
{
- struct nvkm_instmem *imem = nvkm_instmem(object);
- u32 inst = *(u32 *)args << 4;
- return nv_ro32(imem, inst);
+ return nvkm_rd32(device, 0x700000 + (inst << 4));
}
-static int
-nv04_gr_mthd_bind_surf2d(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf2d(struct nvkm_device *device, u32 inst, u32 data)
{
- switch (nv04_gr_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(device, data)) {
case 0x30:
- nv04_gr_set_ctx1(object, 0x00004000, 0);
- nv04_gr_set_ctx_val(object, 0x02000000, 0);
- return 0;
+ nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
+ nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
+ return true;
case 0x42:
- nv04_gr_set_ctx1(object, 0x00004000, 0);
- nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
- return 0;
+ nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
+ nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
+ return true;
}
- return 1;
+ return false;
}
-static int
-nv04_gr_mthd_bind_surf2d_swzsurf(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf2d_swzsurf(struct nvkm_device *device, u32 inst, u32 data)
{
- switch (nv04_gr_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(device, data)) {
case 0x30:
- nv04_gr_set_ctx1(object, 0x00004000, 0);
- nv04_gr_set_ctx_val(object, 0x02000000, 0);
- return 0;
+ nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
+ nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
+ return true;
case 0x42:
- nv04_gr_set_ctx1(object, 0x00004000, 0);
- nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
- return 0;
+ nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
+ nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
+ return true;
case 0x52:
- nv04_gr_set_ctx1(object, 0x00004000, 0x00004000);
- nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
- return 0;
+ nv04_gr_set_ctx1(device, inst, 0x00004000, 0x00004000);
+ nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
+ return true;
}
- return 1;
+ return false;
}
-static int
-nv01_gr_mthd_bind_patt(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
+static bool
+nv01_gr_mthd_bind_patt(struct nvkm_device *device, u32 inst, u32 data)
{
- switch (nv04_gr_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(device, data)) {
case 0x30:
- nv04_gr_set_ctx_val(object, 0x08000000, 0);
- return 0;
+ nv04_gr_set_ctx_val(device, inst, 0x08000000, 0);
+ return true;
case 0x18:
- nv04_gr_set_ctx_val(object, 0x08000000, 0x08000000);
- return 0;
+ nv04_gr_set_ctx_val(device, inst, 0x08000000, 0x08000000);
+ return true;
}
- return 1;
+ return false;
}
-static int
-nv04_gr_mthd_bind_patt(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_patt(struct nvkm_device *device, u32 inst, u32 data)
{
- switch (nv04_gr_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(device, data)) {
case 0x30:
- nv04_gr_set_ctx_val(object, 0x08000000, 0);
- return 0;
+ nv04_gr_set_ctx_val(device, inst, 0x08000000, 0);
+ return true;
case 0x44:
- nv04_gr_set_ctx_val(object, 0x08000000, 0x08000000);
- return 0;
+ nv04_gr_set_ctx_val(device, inst, 0x08000000, 0x08000000);
+ return true;
}
- return 1;
+ return false;
}
-static int
-nv04_gr_mthd_bind_rop(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_rop(struct nvkm_device *device, u32 inst, u32 data)
{
- switch (nv04_gr_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(device, data)) {
case 0x30:
- nv04_gr_set_ctx_val(object, 0x10000000, 0);
- return 0;
+ nv04_gr_set_ctx_val(device, inst, 0x10000000, 0);
+ return true;
case 0x43:
- nv04_gr_set_ctx_val(object, 0x10000000, 0x10000000);
- return 0;
+ nv04_gr_set_ctx_val(device, inst, 0x10000000, 0x10000000);
+ return true;
}
- return 1;
+ return false;
}
-static int
-nv04_gr_mthd_bind_beta1(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_beta1(struct nvkm_device *device, u32 inst, u32 data)
{
- switch (nv04_gr_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(device, data)) {
case 0x30:
- nv04_gr_set_ctx_val(object, 0x20000000, 0);
- return 0;
+ nv04_gr_set_ctx_val(device, inst, 0x20000000, 0);
+ return true;
case 0x12:
- nv04_gr_set_ctx_val(object, 0x20000000, 0x20000000);
- return 0;
+ nv04_gr_set_ctx_val(device, inst, 0x20000000, 0x20000000);
+ return true;
}
- return 1;
+ return false;
}
-static int
-nv04_gr_mthd_bind_beta4(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_beta4(struct nvkm_device *device, u32 inst, u32 data)
{
- switch (nv04_gr_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(device, data)) {
case 0x30:
- nv04_gr_set_ctx_val(object, 0x40000000, 0);
- return 0;
+ nv04_gr_set_ctx_val(device, inst, 0x40000000, 0);
+ return true;
case 0x72:
- nv04_gr_set_ctx_val(object, 0x40000000, 0x40000000);
- return 0;
+ nv04_gr_set_ctx_val(device, inst, 0x40000000, 0x40000000);
+ return true;
}
- return 1;
+ return false;
}
-static int
-nv04_gr_mthd_bind_surf_dst(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf_dst(struct nvkm_device *device, u32 inst, u32 data)
{
- switch (nv04_gr_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(device, data)) {
case 0x30:
- nv04_gr_set_ctx_val(object, 0x02000000, 0);
- return 0;
+ nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
+ return true;
case 0x58:
- nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
- return 0;
+ nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
+ return true;
}
- return 1;
+ return false;
}
-static int
-nv04_gr_mthd_bind_surf_src(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf_src(struct nvkm_device *device, u32 inst, u32 data)
{
- switch (nv04_gr_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(device, data)) {
case 0x30:
- nv04_gr_set_ctx_val(object, 0x04000000, 0);
- return 0;
+ nv04_gr_set_ctx_val(device, inst, 0x04000000, 0);
+ return true;
case 0x59:
- nv04_gr_set_ctx_val(object, 0x04000000, 0x04000000);
- return 0;
+ nv04_gr_set_ctx_val(device, inst, 0x04000000, 0x04000000);
+ return true;
}
- return 1;
+ return false;
}
-static int
-nv04_gr_mthd_bind_surf_color(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf_color(struct nvkm_device *device, u32 inst, u32 data)
{
- switch (nv04_gr_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(device, data)) {
case 0x30:
- nv04_gr_set_ctx_val(object, 0x02000000, 0);
- return 0;
+ nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
+ return true;
case 0x5a:
- nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
- return 0;
+ nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
+ return true;
}
- return 1;
+ return false;
}
-static int
-nv04_gr_mthd_bind_surf_zeta(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf_zeta(struct nvkm_device *device, u32 inst, u32 data)
{
- switch (nv04_gr_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(device, data)) {
case 0x30:
- nv04_gr_set_ctx_val(object, 0x04000000, 0);
- return 0;
+ nv04_gr_set_ctx_val(device, inst, 0x04000000, 0);
+ return true;
case 0x5b:
- nv04_gr_set_ctx_val(object, 0x04000000, 0x04000000);
- return 0;
+ nv04_gr_set_ctx_val(device, inst, 0x04000000, 0x04000000);
+ return true;
}
- return 1;
+ return false;
}
-static int
-nv01_gr_mthd_bind_clip(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
+static bool
+nv01_gr_mthd_bind_clip(struct nvkm_device *device, u32 inst, u32 data)
{
- switch (nv04_gr_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(device, data)) {
case 0x30:
- nv04_gr_set_ctx1(object, 0x2000, 0);
- return 0;
+ nv04_gr_set_ctx1(device, inst, 0x2000, 0);
+ return true;
case 0x19:
- nv04_gr_set_ctx1(object, 0x2000, 0x2000);
- return 0;
+ nv04_gr_set_ctx1(device, inst, 0x2000, 0x2000);
+ return true;
}
- return 1;
+ return false;
}
-static int
-nv01_gr_mthd_bind_chroma(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
+static bool
+nv01_gr_mthd_bind_chroma(struct nvkm_device *device, u32 inst, u32 data)
{
- switch (nv04_gr_mthd_bind_class(object, args, size)) {
+ switch (nv04_gr_mthd_bind_class(device, data)) {
case 0x30:
- nv04_gr_set_ctx1(object, 0x1000, 0);
- return 0;
+ nv04_gr_set_ctx1(device, inst, 0x1000, 0);
+ return true;
/* Yes, for some reason even the old versions of objects
* accept 0x57 and not 0x17. Consistency be damned.
*/
case 0x57:
- nv04_gr_set_ctx1(object, 0x1000, 0x1000);
- return 0;
+ nv04_gr_set_ctx1(device, inst, 0x1000, 0x1000);
+ return true;
}
- return 1;
+ return false;
}
-static struct nvkm_omthds
-nv03_gr_gdi_omthds[] = {
- { 0x0184, 0x0184, nv01_gr_mthd_bind_patt },
- { 0x0188, 0x0188, nv04_gr_mthd_bind_rop },
- { 0x018c, 0x018c, nv04_gr_mthd_bind_beta1 },
- { 0x0190, 0x0190, nv04_gr_mthd_bind_surf_dst },
- { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
- {}
-};
-
-static struct nvkm_omthds
-nv04_gr_gdi_omthds[] = {
- { 0x0188, 0x0188, nv04_gr_mthd_bind_patt },
- { 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
- { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
- { 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
- { 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
- { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
- {}
-};
+static bool
+nv03_gr_mthd_gdi(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+ bool (*func)(struct nvkm_device *, u32, u32);
+ switch (mthd) {
+ case 0x0184: func = nv01_gr_mthd_bind_patt; break;
+ case 0x0188: func = nv04_gr_mthd_bind_rop; break;
+ case 0x018c: func = nv04_gr_mthd_bind_beta1; break;
+ case 0x0190: func = nv04_gr_mthd_bind_surf_dst; break;
+ case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+ default:
+ return false;
+ }
+ return func(device, inst, data);
+}
-static struct nvkm_omthds
-nv01_gr_blit_omthds[] = {
- { 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
- { 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
- { 0x018c, 0x018c, nv01_gr_mthd_bind_patt },
- { 0x0190, 0x0190, nv04_gr_mthd_bind_rop },
- { 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 },
- { 0x0198, 0x0198, nv04_gr_mthd_bind_surf_dst },
- { 0x019c, 0x019c, nv04_gr_mthd_bind_surf_src },
- { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
- {}
-};
+static bool
+nv04_gr_mthd_gdi(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+ bool (*func)(struct nvkm_device *, u32, u32);
+ switch (mthd) {
+ case 0x0188: func = nv04_gr_mthd_bind_patt; break;
+ case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+ case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+ case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
+ case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
+ case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+ default:
+ return false;
+ }
+ return func(device, inst, data);
+}
-static struct nvkm_omthds
-nv04_gr_blit_omthds[] = {
- { 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
- { 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
- { 0x018c, 0x018c, nv04_gr_mthd_bind_patt },
- { 0x0190, 0x0190, nv04_gr_mthd_bind_rop },
- { 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 },
- { 0x0198, 0x0198, nv04_gr_mthd_bind_beta4 },
- { 0x019c, 0x019c, nv04_gr_mthd_bind_surf2d },
- { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
- {}
-};
+static bool
+nv01_gr_mthd_blit(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+ bool (*func)(struct nvkm_device *, u32, u32);
+ switch (mthd) {
+ case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+ case 0x0188: func = nv01_gr_mthd_bind_clip; break;
+ case 0x018c: func = nv01_gr_mthd_bind_patt; break;
+ case 0x0190: func = nv04_gr_mthd_bind_rop; break;
+ case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
+ case 0x0198: func = nv04_gr_mthd_bind_surf_dst; break;
+ case 0x019c: func = nv04_gr_mthd_bind_surf_src; break;
+ case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+ default:
+ return false;
+ }
+ return func(device, inst, data);
+}
-static struct nvkm_omthds
-nv04_gr_iifc_omthds[] = {
- { 0x0188, 0x0188, nv01_gr_mthd_bind_chroma },
- { 0x018c, 0x018c, nv01_gr_mthd_bind_clip },
- { 0x0190, 0x0190, nv04_gr_mthd_bind_patt },
- { 0x0194, 0x0194, nv04_gr_mthd_bind_rop },
- { 0x0198, 0x0198, nv04_gr_mthd_bind_beta1 },
- { 0x019c, 0x019c, nv04_gr_mthd_bind_beta4 },
- { 0x01a0, 0x01a0, nv04_gr_mthd_bind_surf2d_swzsurf },
- { 0x03e4, 0x03e4, nv04_gr_mthd_set_operation },
- {}
-};
+static bool
+nv04_gr_mthd_blit(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+ bool (*func)(struct nvkm_device *, u32, u32);
+ switch (mthd) {
+ case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+ case 0x0188: func = nv01_gr_mthd_bind_clip; break;
+ case 0x018c: func = nv04_gr_mthd_bind_patt; break;
+ case 0x0190: func = nv04_gr_mthd_bind_rop; break;
+ case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
+ case 0x0198: func = nv04_gr_mthd_bind_beta4; break;
+ case 0x019c: func = nv04_gr_mthd_bind_surf2d; break;
+ case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+ default:
+ return false;
+ }
+ return func(device, inst, data);
+}
-static struct nvkm_omthds
-nv01_gr_ifc_omthds[] = {
- { 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
- { 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
- { 0x018c, 0x018c, nv01_gr_mthd_bind_patt },
- { 0x0190, 0x0190, nv04_gr_mthd_bind_rop },
- { 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 },
- { 0x0198, 0x0198, nv04_gr_mthd_bind_surf_dst },
- { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
- {}
-};
+static bool
+nv04_gr_mthd_iifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+ bool (*func)(struct nvkm_device *, u32, u32);
+ switch (mthd) {
+ case 0x0188: func = nv01_gr_mthd_bind_chroma; break;
+ case 0x018c: func = nv01_gr_mthd_bind_clip; break;
+ case 0x0190: func = nv04_gr_mthd_bind_patt; break;
+ case 0x0194: func = nv04_gr_mthd_bind_rop; break;
+ case 0x0198: func = nv04_gr_mthd_bind_beta1; break;
+ case 0x019c: func = nv04_gr_mthd_bind_beta4; break;
+ case 0x01a0: func = nv04_gr_mthd_bind_surf2d_swzsurf; break;
+ case 0x03e4: func = nv04_gr_mthd_set_operation; break;
+ default:
+ return false;
+ }
+ return func(device, inst, data);
+}
-static struct nvkm_omthds
-nv04_gr_ifc_omthds[] = {
- { 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
- { 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
- { 0x018c, 0x018c, nv04_gr_mthd_bind_patt },
- { 0x0190, 0x0190, nv04_gr_mthd_bind_rop },
- { 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 },
- { 0x0198, 0x0198, nv04_gr_mthd_bind_beta4 },
- { 0x019c, 0x019c, nv04_gr_mthd_bind_surf2d },
- { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
- {}
-};
+static bool
+nv01_gr_mthd_ifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+ bool (*func)(struct nvkm_device *, u32, u32);
+ switch (mthd) {
+ case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+ case 0x0188: func = nv01_gr_mthd_bind_clip; break;
+ case 0x018c: func = nv01_gr_mthd_bind_patt; break;
+ case 0x0190: func = nv04_gr_mthd_bind_rop; break;
+ case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
+ case 0x0198: func = nv04_gr_mthd_bind_surf_dst; break;
+ case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+ default:
+ return false;
+ }
+ return func(device, inst, data);
+}
-static struct nvkm_omthds
-nv03_gr_sifc_omthds[] = {
- { 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
- { 0x0188, 0x0188, nv01_gr_mthd_bind_patt },
- { 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
- { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
- { 0x0194, 0x0194, nv04_gr_mthd_bind_surf_dst },
- { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
- {}
-};
+static bool
+nv04_gr_mthd_ifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+ bool (*func)(struct nvkm_device *, u32, u32);
+ switch (mthd) {
+ case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+ case 0x0188: func = nv01_gr_mthd_bind_clip; break;
+ case 0x018c: func = nv04_gr_mthd_bind_patt; break;
+ case 0x0190: func = nv04_gr_mthd_bind_rop; break;
+ case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
+ case 0x0198: func = nv04_gr_mthd_bind_beta4; break;
+ case 0x019c: func = nv04_gr_mthd_bind_surf2d; break;
+ case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+ default:
+ return false;
+ }
+ return func(device, inst, data);
+}
-static struct nvkm_omthds
-nv04_gr_sifc_omthds[] = {
- { 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
- { 0x0188, 0x0188, nv04_gr_mthd_bind_patt },
- { 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
- { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
- { 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
- { 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
- { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
- {}
-};
+static bool
+nv03_gr_mthd_sifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+ bool (*func)(struct nvkm_device *, u32, u32);
+ switch (mthd) {
+ case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+ case 0x0188: func = nv01_gr_mthd_bind_patt; break;
+ case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+ case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+ case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
+ case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+ default:
+ return false;
+ }
+ return func(device, inst, data);
+}
-static struct nvkm_omthds
-nv03_gr_sifm_omthds[] = {
- { 0x0188, 0x0188, nv01_gr_mthd_bind_patt },
- { 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
- { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
- { 0x0194, 0x0194, nv04_gr_mthd_bind_surf_dst },
- { 0x0304, 0x0304, nv04_gr_mthd_set_operation },
- {}
-};
+static bool
+nv04_gr_mthd_sifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+ bool (*func)(struct nvkm_device *, u32, u32);
+ switch (mthd) {
+ case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+ case 0x0188: func = nv04_gr_mthd_bind_patt; break;
+ case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+ case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+ case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
+ case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
+ case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+ default:
+ return false;
+ }
+ return func(device, inst, data);
+}
-static struct nvkm_omthds
-nv04_gr_sifm_omthds[] = {
- { 0x0188, 0x0188, nv04_gr_mthd_bind_patt },
- { 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
- { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
- { 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
- { 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
- { 0x0304, 0x0304, nv04_gr_mthd_set_operation },
- {}
-};
+static bool
+nv03_gr_mthd_sifm(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+ bool (*func)(struct nvkm_device *, u32, u32);
+ switch (mthd) {
+ case 0x0188: func = nv01_gr_mthd_bind_patt; break;
+ case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+ case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+ case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
+ case 0x0304: func = nv04_gr_mthd_set_operation; break;
+ default:
+ return false;
+ }
+ return func(device, inst, data);
+}
-static struct nvkm_omthds
-nv04_gr_surf3d_omthds[] = {
- { 0x02f8, 0x02f8, nv04_gr_mthd_surf3d_clip_h },
- { 0x02fc, 0x02fc, nv04_gr_mthd_surf3d_clip_v },
- {}
-};
+static bool
+nv04_gr_mthd_sifm(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+ bool (*func)(struct nvkm_device *, u32, u32);
+ switch (mthd) {
+ case 0x0188: func = nv04_gr_mthd_bind_patt; break;
+ case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+ case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+ case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
+ case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
+ case 0x0304: func = nv04_gr_mthd_set_operation; break;
+ default:
+ return false;
+ }
+ return func(device, inst, data);
+}
-static struct nvkm_omthds
-nv03_gr_ttri_omthds[] = {
- { 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
- { 0x018c, 0x018c, nv04_gr_mthd_bind_surf_color },
- { 0x0190, 0x0190, nv04_gr_mthd_bind_surf_zeta },
- {}
-};
+static bool
+nv04_gr_mthd_surf3d(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+ bool (*func)(struct nvkm_device *, u32, u32);
+ switch (mthd) {
+ case 0x02f8: func = nv04_gr_mthd_surf3d_clip_h; break;
+ case 0x02fc: func = nv04_gr_mthd_surf3d_clip_v; break;
+ default:
+ return false;
+ }
+ return func(device, inst, data);
+}
-static struct nvkm_omthds
-nv01_gr_prim_omthds[] = {
- { 0x0184, 0x0184, nv01_gr_mthd_bind_clip },
- { 0x0188, 0x0188, nv01_gr_mthd_bind_patt },
- { 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
- { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
- { 0x0194, 0x0194, nv04_gr_mthd_bind_surf_dst },
- { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
- {}
-};
+static bool
+nv03_gr_mthd_ttri(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+ bool (*func)(struct nvkm_device *, u32, u32);
+ switch (mthd) {
+ case 0x0188: func = nv01_gr_mthd_bind_clip; break;
+ case 0x018c: func = nv04_gr_mthd_bind_surf_color; break;
+ case 0x0190: func = nv04_gr_mthd_bind_surf_zeta; break;
+ default:
+ return false;
+ }
+ return func(device, inst, data);
+}
-static struct nvkm_omthds
-nv04_gr_prim_omthds[] = {
- { 0x0184, 0x0184, nv01_gr_mthd_bind_clip },
- { 0x0188, 0x0188, nv04_gr_mthd_bind_patt },
- { 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
- { 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
- { 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
- { 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
- { 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
- {}
-};
+static bool
+nv01_gr_mthd_prim(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+ bool (*func)(struct nvkm_device *, u32, u32);
+ switch (mthd) {
+ case 0x0184: func = nv01_gr_mthd_bind_clip; break;
+ case 0x0188: func = nv01_gr_mthd_bind_patt; break;
+ case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+ case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+ case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
+ case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+ default:
+ return false;
+ }
+ return func(device, inst, data);
+}
-static int
-nv04_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static bool
+nv04_gr_mthd_prim(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
{
- struct nvkm_gpuobj *obj;
- int ret;
+ bool (*func)(struct nvkm_device *, u32, u32);
+ switch (mthd) {
+ case 0x0184: func = nv01_gr_mthd_bind_clip; break;
+ case 0x0188: func = nv04_gr_mthd_bind_patt; break;
+ case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+ case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+ case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
+ case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
+ case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+ default:
+ return false;
+ }
+ return func(device, inst, data);
+}
- ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
- 16, 16, 0, &obj);
- *pobject = nv_object(obj);
- if (ret)
- return ret;
+static bool
+nv04_gr_mthd(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+ bool (*func)(struct nvkm_device *, u32, u32, u32);
+ switch (nvkm_rd32(device, 0x700000 + inst) & 0x000000ff) {
+ case 0x1c ... 0x1e:
+ func = nv01_gr_mthd_prim; break;
+ case 0x1f: func = nv01_gr_mthd_blit; break;
+ case 0x21: func = nv01_gr_mthd_ifc; break;
+ case 0x36: func = nv03_gr_mthd_sifc; break;
+ case 0x37: func = nv03_gr_mthd_sifm; break;
+ case 0x48: func = nv03_gr_mthd_ttri; break;
+ case 0x4a: func = nv04_gr_mthd_gdi; break;
+ case 0x4b: func = nv03_gr_mthd_gdi; break;
+ case 0x53: func = nv04_gr_mthd_surf3d; break;
+ case 0x5c ... 0x5e:
+ func = nv04_gr_mthd_prim; break;
+ case 0x5f: func = nv04_gr_mthd_blit; break;
+ case 0x60: func = nv04_gr_mthd_iifc; break;
+ case 0x61: func = nv04_gr_mthd_ifc; break;
+ case 0x76: func = nv04_gr_mthd_sifc; break;
+ case 0x77: func = nv04_gr_mthd_sifm; break;
+ default:
+ return false;
+ }
+ return func(device, inst, mthd, data);
+}
- nv_wo32(obj, 0x00, nv_mclass(obj));
+static int
+nv04_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+ int align, struct nvkm_gpuobj **pgpuobj)
+{
+ int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, align,
+ false, parent, pgpuobj);
+ if (ret == 0) {
+ nvkm_kmap(*pgpuobj);
+ nvkm_wo32(*pgpuobj, 0x00, object->oclass);
#ifdef __BIG_ENDIAN
- nv_mo32(obj, 0x00, 0x00080000, 0x00080000);
+ nvkm_mo32(*pgpuobj, 0x00, 0x00080000, 0x00080000);
#endif
- nv_wo32(obj, 0x04, 0x00000000);
- nv_wo32(obj, 0x08, 0x00000000);
- nv_wo32(obj, 0x0c, 0x00000000);
- return 0;
+ nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
+ nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
+ nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+ nvkm_done(*pgpuobj);
+ }
+ return ret;
}
-struct nvkm_ofuncs
-nv04_gr_ofuncs = {
- .ctor = nv04_gr_object_ctor,
- .dtor = _nvkm_gpuobj_dtor,
- .init = _nvkm_gpuobj_init,
- .fini = _nvkm_gpuobj_fini,
- .rd32 = _nvkm_gpuobj_rd32,
- .wr32 = _nvkm_gpuobj_wr32,
-};
-
-static struct nvkm_oclass
-nv04_gr_sclass[] = {
- { 0x0012, &nv04_gr_ofuncs }, /* beta1 */
- { 0x0017, &nv04_gr_ofuncs }, /* chroma */
- { 0x0018, &nv04_gr_ofuncs }, /* pattern (nv01) */
- { 0x0019, &nv04_gr_ofuncs }, /* clip */
- { 0x001c, &nv04_gr_ofuncs, nv01_gr_prim_omthds }, /* line */
- { 0x001d, &nv04_gr_ofuncs, nv01_gr_prim_omthds }, /* tri */
- { 0x001e, &nv04_gr_ofuncs, nv01_gr_prim_omthds }, /* rect */
- { 0x001f, &nv04_gr_ofuncs, nv01_gr_blit_omthds },
- { 0x0021, &nv04_gr_ofuncs, nv01_gr_ifc_omthds },
- { 0x0030, &nv04_gr_ofuncs }, /* null */
- { 0x0036, &nv04_gr_ofuncs, nv03_gr_sifc_omthds },
- { 0x0037, &nv04_gr_ofuncs, nv03_gr_sifm_omthds },
- { 0x0038, &nv04_gr_ofuncs }, /* dvd subpicture */
- { 0x0039, &nv04_gr_ofuncs }, /* m2mf */
- { 0x0042, &nv04_gr_ofuncs }, /* surf2d */
- { 0x0043, &nv04_gr_ofuncs }, /* rop */
- { 0x0044, &nv04_gr_ofuncs }, /* pattern */
- { 0x0048, &nv04_gr_ofuncs, nv03_gr_ttri_omthds },
- { 0x004a, &nv04_gr_ofuncs, nv04_gr_gdi_omthds },
- { 0x004b, &nv04_gr_ofuncs, nv03_gr_gdi_omthds },
- { 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
- { 0x0053, &nv04_gr_ofuncs, nv04_gr_surf3d_omthds },
- { 0x0054, &nv04_gr_ofuncs }, /* ttri */
- { 0x0055, &nv04_gr_ofuncs }, /* mtri */
- { 0x0057, &nv04_gr_ofuncs }, /* chroma */
- { 0x0058, &nv04_gr_ofuncs }, /* surf_dst */
- { 0x0059, &nv04_gr_ofuncs }, /* surf_src */
- { 0x005a, &nv04_gr_ofuncs }, /* surf_color */
- { 0x005b, &nv04_gr_ofuncs }, /* surf_zeta */
- { 0x005c, &nv04_gr_ofuncs, nv04_gr_prim_omthds }, /* line */
- { 0x005d, &nv04_gr_ofuncs, nv04_gr_prim_omthds }, /* tri */
- { 0x005e, &nv04_gr_ofuncs, nv04_gr_prim_omthds }, /* rect */
- { 0x005f, &nv04_gr_ofuncs, nv04_gr_blit_omthds },
- { 0x0060, &nv04_gr_ofuncs, nv04_gr_iifc_omthds },
- { 0x0061, &nv04_gr_ofuncs, nv04_gr_ifc_omthds },
- { 0x0064, &nv04_gr_ofuncs }, /* iifc (nv05) */
- { 0x0065, &nv04_gr_ofuncs }, /* ifc (nv05) */
- { 0x0066, &nv04_gr_ofuncs }, /* sifc (nv05) */
- { 0x0072, &nv04_gr_ofuncs }, /* beta4 */
- { 0x0076, &nv04_gr_ofuncs, nv04_gr_sifc_omthds },
- { 0x0077, &nv04_gr_ofuncs, nv04_gr_sifm_omthds },
- {},
+const struct nvkm_object_func
+nv04_gr_object = {
+ .bind = nv04_gr_object_bind,
};
/*******************************************************************************
@@ -1032,13 +1069,14 @@ nv04_gr_sclass[] = {
******************************************************************************/
static struct nv04_gr_chan *
-nv04_gr_channel(struct nv04_gr_priv *priv)
+nv04_gr_channel(struct nv04_gr *gr)
{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
struct nv04_gr_chan *chan = NULL;
- if (nv_rd32(priv, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) {
- int chid = nv_rd32(priv, NV04_PGRAPH_CTX_USER) >> 24;
- if (chid < ARRAY_SIZE(priv->chan))
- chan = priv->chan[chid];
+ if (nvkm_rd32(device, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) {
+ int chid = nvkm_rd32(device, NV04_PGRAPH_CTX_USER) >> 24;
+ if (chid < ARRAY_SIZE(gr->chan))
+ chan = gr->chan[chid];
}
return chan;
}
@@ -1046,55 +1084,52 @@ nv04_gr_channel(struct nv04_gr_priv *priv)
static int
nv04_gr_load_context(struct nv04_gr_chan *chan, int chid)
{
- struct nv04_gr_priv *priv = nv04_gr_priv(chan);
+ struct nvkm_device *device = chan->gr->base.engine.subdev.device;
int i;
for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++)
- nv_wr32(priv, nv04_gr_ctx_regs[i], chan->nv04[i]);
+ nvkm_wr32(device, nv04_gr_ctx_regs[i], chan->nv04[i]);
- nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
- nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24);
- nv_mask(priv, NV04_PGRAPH_FFINTFC_ST2, 0xfff00000, 0x00000000);
+ nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
+ nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24);
+ nvkm_mask(device, NV04_PGRAPH_FFINTFC_ST2, 0xfff00000, 0x00000000);
return 0;
}
static int
nv04_gr_unload_context(struct nv04_gr_chan *chan)
{
- struct nv04_gr_priv *priv = nv04_gr_priv(chan);
+ struct nvkm_device *device = chan->gr->base.engine.subdev.device;
int i;
for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++)
- chan->nv04[i] = nv_rd32(priv, nv04_gr_ctx_regs[i]);
+ chan->nv04[i] = nvkm_rd32(device, nv04_gr_ctx_regs[i]);
- nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
- nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
+ nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
+ nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
return 0;
}
static void
-nv04_gr_context_switch(struct nv04_gr_priv *priv)
+nv04_gr_context_switch(struct nv04_gr *gr)
{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
struct nv04_gr_chan *prev = NULL;
struct nv04_gr_chan *next = NULL;
- unsigned long flags;
int chid;
- spin_lock_irqsave(&priv->lock, flags);
- nv04_gr_idle(priv);
+ nv04_gr_idle(&gr->base);
/* If previous context is valid, we need to save it */
- prev = nv04_gr_channel(priv);
+ prev = nv04_gr_channel(gr);
if (prev)
nv04_gr_unload_context(prev);
/* load context for next channel */
- chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f;
- next = priv->chan[chid];
+ chid = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f;
+ next = gr->chan[chid];
if (next)
nv04_gr_load_context(next, chid);
-
- spin_unlock_irqrestore(&priv->lock, flags);
}
static u32 *ctx_reg(struct nv04_gr_chan *chan, u32 reg)
@@ -1109,98 +1144,85 @@ static u32 *ctx_reg(struct nv04_gr_chan *chan, u32 reg)
return NULL;
}
-static int
-nv04_gr_context_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static void *
+nv04_gr_chan_dtor(struct nvkm_object *object)
{
- struct nvkm_fifo_chan *fifo = (void *)parent;
- struct nv04_gr_priv *priv = (void *)engine;
- struct nv04_gr_chan *chan;
+ struct nv04_gr_chan *chan = nv04_gr_chan(object);
+ struct nv04_gr *gr = chan->gr;
unsigned long flags;
- int ret;
-
- ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
-
- spin_lock_irqsave(&priv->lock, flags);
- if (priv->chan[fifo->chid]) {
- *pobject = nv_object(priv->chan[fifo->chid]);
- atomic_inc(&(*pobject)->refcount);
- spin_unlock_irqrestore(&priv->lock, flags);
- nvkm_object_destroy(&chan->base);
- return 1;
- }
- *ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
-
- priv->chan[fifo->chid] = chan;
- chan->chid = fifo->chid;
- spin_unlock_irqrestore(&priv->lock, flags);
- return 0;
+ spin_lock_irqsave(&gr->lock, flags);
+ gr->chan[chan->chid] = NULL;
+ spin_unlock_irqrestore(&gr->lock, flags);
+ return chan;
}
-static void
-nv04_gr_context_dtor(struct nvkm_object *object)
+static int
+nv04_gr_chan_fini(struct nvkm_object *object, bool suspend)
{
- struct nv04_gr_priv *priv = (void *)object->engine;
- struct nv04_gr_chan *chan = (void *)object;
+ struct nv04_gr_chan *chan = nv04_gr_chan(object);
+ struct nv04_gr *gr = chan->gr;
+ struct nvkm_device *device = gr->base.engine.subdev.device;
unsigned long flags;
- spin_lock_irqsave(&priv->lock, flags);
- priv->chan[chan->chid] = NULL;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- nvkm_object_destroy(&chan->base);
+ spin_lock_irqsave(&gr->lock, flags);
+ nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+ if (nv04_gr_channel(gr) == chan)
+ nv04_gr_unload_context(chan);
+ nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&gr->lock, flags);
+ return 0;
}
+static const struct nvkm_object_func
+nv04_gr_chan = {
+ .dtor = nv04_gr_chan_dtor,
+ .fini = nv04_gr_chan_fini,
+};
+
static int
-nv04_gr_context_fini(struct nvkm_object *object, bool suspend)
+nv04_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+ const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
- struct nv04_gr_priv *priv = (void *)object->engine;
- struct nv04_gr_chan *chan = (void *)object;
+ struct nv04_gr *gr = nv04_gr(base);
+ struct nv04_gr_chan *chan;
unsigned long flags;
- spin_lock_irqsave(&priv->lock, flags);
- nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
- if (nv04_gr_channel(priv) == chan)
- nv04_gr_unload_context(chan);
- nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&priv->lock, flags);
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nv04_gr_chan, oclass, &chan->object);
+ chan->gr = gr;
+ chan->chid = fifoch->chid;
+ *pobject = &chan->object;
- return nvkm_object_fini(&chan->base, suspend);
-}
+ *ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
-static struct nvkm_oclass
-nv04_gr_cclass = {
- .handle = NV_ENGCTX(GR, 0x04),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_gr_context_ctor,
- .dtor = nv04_gr_context_dtor,
- .init = nvkm_object_init,
- .fini = nv04_gr_context_fini,
- },
-};
+ spin_lock_irqsave(&gr->lock, flags);
+ gr->chan[chan->chid] = chan;
+ spin_unlock_irqrestore(&gr->lock, flags);
+ return 0;
+}
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
bool
-nv04_gr_idle(void *obj)
+nv04_gr_idle(struct nvkm_gr *gr)
{
- struct nvkm_gr *gr = nvkm_gr(obj);
+ struct nvkm_subdev *subdev = &gr->engine.subdev;
+ struct nvkm_device *device = subdev->device;
u32 mask = 0xffffffff;
- if (nv_device(obj)->card_type == NV_40)
+ if (device->card_type == NV_40)
mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
- if (!nv_wait(gr, NV04_PGRAPH_STATUS, mask, 0)) {
- nv_error(gr, "idle timed out with status 0x%08x\n",
- nv_rd32(gr, NV04_PGRAPH_STATUS));
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, NV04_PGRAPH_STATUS) & mask))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "idle timed out with status %08x\n",
+ nvkm_rd32(device, NV04_PGRAPH_STATUS));
return false;
}
@@ -1247,136 +1269,159 @@ nv04_gr_nsource[] = {
};
static void
-nv04_gr_intr(struct nvkm_subdev *subdev)
+nv04_gr_intr(struct nvkm_gr *base)
{
- struct nv04_gr_priv *priv = (void *)subdev;
- struct nv04_gr_chan *chan = NULL;
- struct nvkm_namedb *namedb = NULL;
- struct nvkm_handle *handle = NULL;
- u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
- u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
- u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
- u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+ struct nv04_gr *gr = nv04_gr(base);
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
+ u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
+ u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
u32 chid = (addr & 0x0f000000) >> 24;
u32 subc = (addr & 0x0000e000) >> 13;
u32 mthd = (addr & 0x00001ffc);
- u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
- u32 class = nv_rd32(priv, 0x400180 + subc * 4) & 0xff;
- u32 inst = (nv_rd32(priv, 0x40016c) & 0xffff) << 4;
+ u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nvkm_rd32(device, 0x400180 + subc * 4) & 0xff;
+ u32 inst = (nvkm_rd32(device, 0x40016c) & 0xffff) << 4;
u32 show = stat;
+ char msg[128], src[128], sta[128];
+ struct nv04_gr_chan *chan;
unsigned long flags;
- spin_lock_irqsave(&priv->lock, flags);
- chan = priv->chan[chid];
- if (chan)
- namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_lock_irqsave(&gr->lock, flags);
+ chan = gr->chan[chid];
if (stat & NV_PGRAPH_INTR_NOTIFY) {
if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
- handle = nvkm_namedb_get_vinst(namedb, inst);
- if (handle && !nv_call(handle->object, mthd, data))
+ if (!nv04_gr_mthd(device, inst, mthd, data))
show &= ~NV_PGRAPH_INTR_NOTIFY;
}
}
if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
- nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ nvkm_wr32(device, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- nv04_gr_context_switch(priv);
+ nv04_gr_context_switch(gr);
}
- nv_wr32(priv, NV03_PGRAPH_INTR, stat);
- nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+ nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
+ nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_error(priv, "%s", "");
- nvkm_bitfield_print(nv04_gr_intr_name, show);
- pr_cont(" nsource:");
- nvkm_bitfield_print(nv04_gr_nsource, nsource);
- pr_cont(" nstatus:");
- nvkm_bitfield_print(nv04_gr_nstatus, nstatus);
- pr_cont("\n");
- nv_error(priv,
- "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, nvkm_client_name(chan), subc, class, mthd,
- data);
+ nvkm_snprintbf(msg, sizeof(msg), nv04_gr_intr_name, show);
+ nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
+ nvkm_snprintbf(sta, sizeof(sta), nv04_gr_nstatus, nstatus);
+ nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
+ "nstatus %08x [%s] ch %d [%s] subc %d "
+ "class %04x mthd %04x data %08x\n",
+ show, msg, nsource, src, nstatus, sta, chid,
+ chan ? chan->object.client->name : "unknown",
+ subc, class, mthd, data);
}
- nvkm_namedb_put(handle);
+ spin_unlock_irqrestore(&gr->lock, flags);
}
static int
-nv04_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv04_gr_init(struct nvkm_gr *base)
{
- struct nv04_gr_priv *priv;
- int ret;
-
- ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00001000;
- nv_subdev(priv)->intr = nv04_gr_intr;
- nv_engine(priv)->cclass = &nv04_gr_cclass;
- nv_engine(priv)->sclass = nv04_gr_sclass;
- spin_lock_init(&priv->lock);
- return 0;
-}
-
-static int
-nv04_gr_init(struct nvkm_object *object)
-{
- struct nvkm_engine *engine = nv_engine(object);
- struct nv04_gr_priv *priv = (void *)engine;
- int ret;
-
- ret = nvkm_gr_init(&priv->base);
- if (ret)
- return ret;
+ struct nv04_gr *gr = nv04_gr(base);
+ struct nvkm_device *device = gr->base.engine.subdev.device;
/* Enable PGRAPH interrupts */
- nv_wr32(priv, NV03_PGRAPH_INTR, 0xFFFFFFFF);
- nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
- nv_wr32(priv, NV04_PGRAPH_VALID1, 0);
- nv_wr32(priv, NV04_PGRAPH_VALID2, 0);
- /*nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x000001FF);
- nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
- nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x1231c000);
+ nvkm_wr32(device, NV03_PGRAPH_INTR, 0xFFFFFFFF);
+ nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nvkm_wr32(device, NV04_PGRAPH_VALID1, 0);
+ nvkm_wr32(device, NV04_PGRAPH_VALID2, 0);
+ /*nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x000001FF);
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x1231c000);
/*1231C000 blob, 001 haiku*/
/*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
- nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x72111100);
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x72111100);
/*0x72111100 blob , 01 haiku*/
- /*nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
- nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
+ /*nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
/*haiku same*/
- /*nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
- nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
+ /*nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
/*haiku and blob 10d4*/
- nv_wr32(priv, NV04_PGRAPH_STATE , 0xFFFFFFFF);
- nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
- nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
+ nvkm_wr32(device, NV04_PGRAPH_STATE , 0xFFFFFFFF);
+ nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
+ nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
/* These don't belong here, they're part of a per-channel context */
- nv_wr32(priv, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
- nv_wr32(priv, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
+ nvkm_wr32(device, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
+ nvkm_wr32(device, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
return 0;
}
-struct nvkm_oclass
-nv04_gr_oclass = {
- .handle = NV_ENGINE(GR, 0x04),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_gr_ctor,
- .dtor = _nvkm_gr_dtor,
- .init = nv04_gr_init,
- .fini = _nvkm_gr_fini,
- },
+static const struct nvkm_gr_func
+nv04_gr = {
+ .init = nv04_gr_init,
+ .intr = nv04_gr_intr,
+ .chan_new = nv04_gr_chan_new,
+ .sclass = {
+ { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+ { -1, -1, 0x0017, &nv04_gr_object }, /* chroma */
+ { -1, -1, 0x0018, &nv04_gr_object }, /* pattern (nv01) */
+ { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+ { -1, -1, 0x001c, &nv04_gr_object }, /* line */
+ { -1, -1, 0x001d, &nv04_gr_object }, /* tri */
+ { -1, -1, 0x001e, &nv04_gr_object }, /* rect */
+ { -1, -1, 0x001f, &nv04_gr_object },
+ { -1, -1, 0x0021, &nv04_gr_object },
+ { -1, -1, 0x0030, &nv04_gr_object }, /* null */
+ { -1, -1, 0x0036, &nv04_gr_object },
+ { -1, -1, 0x0037, &nv04_gr_object },
+ { -1, -1, 0x0038, &nv04_gr_object }, /* dvd subpicture */
+ { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+ { -1, -1, 0x0042, &nv04_gr_object }, /* surf2d */
+ { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+ { -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
+ { -1, -1, 0x0048, &nv04_gr_object },
+ { -1, -1, 0x004a, &nv04_gr_object },
+ { -1, -1, 0x004b, &nv04_gr_object },
+ { -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
+ { -1, -1, 0x0053, &nv04_gr_object },
+ { -1, -1, 0x0054, &nv04_gr_object }, /* ttri */
+ { -1, -1, 0x0055, &nv04_gr_object }, /* mtri */
+ { -1, -1, 0x0057, &nv04_gr_object }, /* chroma */
+ { -1, -1, 0x0058, &nv04_gr_object }, /* surf_dst */
+ { -1, -1, 0x0059, &nv04_gr_object }, /* surf_src */
+ { -1, -1, 0x005a, &nv04_gr_object }, /* surf_color */
+ { -1, -1, 0x005b, &nv04_gr_object }, /* surf_zeta */
+ { -1, -1, 0x005c, &nv04_gr_object }, /* line */
+ { -1, -1, 0x005d, &nv04_gr_object }, /* tri */
+ { -1, -1, 0x005e, &nv04_gr_object }, /* rect */
+ { -1, -1, 0x005f, &nv04_gr_object },
+ { -1, -1, 0x0060, &nv04_gr_object },
+ { -1, -1, 0x0061, &nv04_gr_object },
+ { -1, -1, 0x0064, &nv04_gr_object }, /* iifc (nv05) */
+ { -1, -1, 0x0065, &nv04_gr_object }, /* ifc (nv05) */
+ { -1, -1, 0x0066, &nv04_gr_object }, /* sifc (nv05) */
+ { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+ { -1, -1, 0x0076, &nv04_gr_object },
+ { -1, -1, 0x0077, &nv04_gr_object },
+ {}
+ }
};
+
+int
+nv04_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ struct nv04_gr *gr;
+
+ if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+ return -ENOMEM;
+ spin_lock_init(&gr->lock);
+ *pgr = &gr->base;
+
+ return nvkm_gr_ctor(&nv04_gr, device, index, 0x00001000,
+ true, &gr->base);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
index 389904eb603f..4542867fa9e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
@@ -21,13 +21,13 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-#include <engine/gr.h>
+#include "nv10.h"
#include "regs.h"
#include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
#include <engine/fifo.h>
+#include <engine/fifo/chan.h>
#include <subdev/fb.h>
struct pipe_state {
@@ -386,14 +386,19 @@ static int nv17_gr_ctx_regs[] = {
0x00400a04,
};
-struct nv10_gr_priv {
+#define nv10_gr(p) container_of((p), struct nv10_gr, base)
+
+struct nv10_gr {
struct nvkm_gr base;
struct nv10_gr_chan *chan[32];
spinlock_t lock;
};
+#define nv10_gr_chan(p) container_of((p), struct nv10_gr_chan, object)
+
struct nv10_gr_chan {
- struct nvkm_object base;
+ struct nvkm_object object;
+ struct nv10_gr *gr;
int chid;
int nv10[ARRAY_SIZE(nv10_gr_ctx_regs)];
int nv17[ARRAY_SIZE(nv17_gr_ctx_regs)];
@@ -402,214 +407,151 @@ struct nv10_gr_chan {
};
-static inline struct nv10_gr_priv *
-nv10_gr_priv(struct nv10_gr_chan *chan)
-{
- return (void *)nv_object(chan)->engine;
-}
-
/*******************************************************************************
* Graphics object classes
******************************************************************************/
-#define PIPE_SAVE(priv, state, addr) \
+#define PIPE_SAVE(gr, state, addr) \
do { \
int __i; \
- nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr); \
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, addr); \
for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
- state[__i] = nv_rd32(priv, NV10_PGRAPH_PIPE_DATA); \
+ state[__i] = nvkm_rd32(device, NV10_PGRAPH_PIPE_DATA); \
} while (0)
-#define PIPE_RESTORE(priv, state, addr) \
+#define PIPE_RESTORE(gr, state, addr) \
do { \
int __i; \
- nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr); \
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, addr); \
for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
- nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, state[__i]); \
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, state[__i]); \
} while (0)
-static struct nvkm_oclass
-nv10_gr_sclass[] = {
- { 0x0012, &nv04_gr_ofuncs }, /* beta1 */
- { 0x0019, &nv04_gr_ofuncs }, /* clip */
- { 0x0030, &nv04_gr_ofuncs }, /* null */
- { 0x0039, &nv04_gr_ofuncs }, /* m2mf */
- { 0x0043, &nv04_gr_ofuncs }, /* rop */
- { 0x0044, &nv04_gr_ofuncs }, /* pattern */
- { 0x004a, &nv04_gr_ofuncs }, /* gdi */
- { 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
- { 0x005f, &nv04_gr_ofuncs }, /* blit */
- { 0x0062, &nv04_gr_ofuncs }, /* surf2d */
- { 0x0072, &nv04_gr_ofuncs }, /* beta4 */
- { 0x0089, &nv04_gr_ofuncs }, /* sifm */
- { 0x008a, &nv04_gr_ofuncs }, /* ifc */
- { 0x009f, &nv04_gr_ofuncs }, /* blit */
- { 0x0093, &nv04_gr_ofuncs }, /* surf3d */
- { 0x0094, &nv04_gr_ofuncs }, /* ttri */
- { 0x0095, &nv04_gr_ofuncs }, /* mtri */
- { 0x0056, &nv04_gr_ofuncs }, /* celcius */
- {},
-};
-
-static struct nvkm_oclass
-nv15_gr_sclass[] = {
- { 0x0012, &nv04_gr_ofuncs }, /* beta1 */
- { 0x0019, &nv04_gr_ofuncs }, /* clip */
- { 0x0030, &nv04_gr_ofuncs }, /* null */
- { 0x0039, &nv04_gr_ofuncs }, /* m2mf */
- { 0x0043, &nv04_gr_ofuncs }, /* rop */
- { 0x0044, &nv04_gr_ofuncs }, /* pattern */
- { 0x004a, &nv04_gr_ofuncs }, /* gdi */
- { 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
- { 0x005f, &nv04_gr_ofuncs }, /* blit */
- { 0x0062, &nv04_gr_ofuncs }, /* surf2d */
- { 0x0072, &nv04_gr_ofuncs }, /* beta4 */
- { 0x0089, &nv04_gr_ofuncs }, /* sifm */
- { 0x008a, &nv04_gr_ofuncs }, /* ifc */
- { 0x009f, &nv04_gr_ofuncs }, /* blit */
- { 0x0093, &nv04_gr_ofuncs }, /* surf3d */
- { 0x0094, &nv04_gr_ofuncs }, /* ttri */
- { 0x0095, &nv04_gr_ofuncs }, /* mtri */
- { 0x0096, &nv04_gr_ofuncs }, /* celcius */
- {},
-};
-
-static int
-nv17_gr_mthd_lma_window(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
+static void
+nv17_gr_mthd_lma_window(struct nv10_gr_chan *chan, u32 mthd, u32 data)
{
- struct nv10_gr_chan *chan = (void *)object->parent;
- struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+ struct nvkm_device *device = chan->object.engine->subdev.device;
+ struct nvkm_gr *gr = &chan->gr->base;
struct pipe_state *pipe = &chan->pipe_state;
u32 pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
u32 xfmode0, xfmode1;
- u32 data = *(u32 *)args;
int i;
chan->lma_window[(mthd - 0x1638) / 4] = data;
if (mthd != 0x1644)
- return 0;
+ return;
- nv04_gr_idle(priv);
+ nv04_gr_idle(gr);
- PIPE_SAVE(priv, pipe_0x0040, 0x0040);
- PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
+ PIPE_SAVE(device, pipe_0x0040, 0x0040);
+ PIPE_SAVE(device, pipe->pipe_0x0200, 0x0200);
- PIPE_RESTORE(priv, chan->lma_window, 0x6790);
+ PIPE_RESTORE(device, chan->lma_window, 0x6790);
- nv04_gr_idle(priv);
+ nv04_gr_idle(gr);
- xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
- xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
+ xfmode0 = nvkm_rd32(device, NV10_PGRAPH_XFMODE0);
+ xfmode1 = nvkm_rd32(device, NV10_PGRAPH_XFMODE1);
- PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
- PIPE_SAVE(priv, pipe_0x64c0, 0x64c0);
- PIPE_SAVE(priv, pipe_0x6ab0, 0x6ab0);
- PIPE_SAVE(priv, pipe_0x6a80, 0x6a80);
+ PIPE_SAVE(device, pipe->pipe_0x4400, 0x4400);
+ PIPE_SAVE(device, pipe_0x64c0, 0x64c0);
+ PIPE_SAVE(device, pipe_0x6ab0, 0x6ab0);
+ PIPE_SAVE(device, pipe_0x6a80, 0x6a80);
- nv04_gr_idle(priv);
+ nv04_gr_idle(gr);
- nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
- nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
- nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+ nvkm_wr32(device, NV10_PGRAPH_XFMODE0, 0x10000000);
+ nvkm_wr32(device, NV10_PGRAPH_XFMODE1, 0x00000000);
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
for (i = 0; i < 4; i++)
- nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
for (i = 0; i < 4; i++)
- nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
- nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
for (i = 0; i < 3; i++)
- nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
- nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
for (i = 0; i < 3; i++)
- nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
-
- nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
- nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
- PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000008);
- nv04_gr_idle(priv);
+ PIPE_RESTORE(device, pipe->pipe_0x0200, 0x0200);
- PIPE_RESTORE(priv, pipe_0x0040, 0x0040);
+ nv04_gr_idle(gr);
- nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
- nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
+ PIPE_RESTORE(device, pipe_0x0040, 0x0040);
- PIPE_RESTORE(priv, pipe_0x64c0, 0x64c0);
- PIPE_RESTORE(priv, pipe_0x6ab0, 0x6ab0);
- PIPE_RESTORE(priv, pipe_0x6a80, 0x6a80);
- PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
+ nvkm_wr32(device, NV10_PGRAPH_XFMODE0, xfmode0);
+ nvkm_wr32(device, NV10_PGRAPH_XFMODE1, xfmode1);
- nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
- nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+ PIPE_RESTORE(device, pipe_0x64c0, 0x64c0);
+ PIPE_RESTORE(device, pipe_0x6ab0, 0x6ab0);
+ PIPE_RESTORE(device, pipe_0x6a80, 0x6a80);
+ PIPE_RESTORE(device, pipe->pipe_0x4400, 0x4400);
- nv04_gr_idle(priv);
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
- return 0;
+ nv04_gr_idle(gr);
}
-static int
-nv17_gr_mthd_lma_enable(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
+static void
+nv17_gr_mthd_lma_enable(struct nv10_gr_chan *chan, u32 mthd, u32 data)
{
- struct nv10_gr_chan *chan = (void *)object->parent;
- struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+ struct nvkm_device *device = chan->object.engine->subdev.device;
+ struct nvkm_gr *gr = &chan->gr->base;
- nv04_gr_idle(priv);
+ nv04_gr_idle(gr);
- nv_mask(priv, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100);
- nv_mask(priv, 0x4006b0, 0x08000000, 0x08000000);
- return 0;
+ nvkm_mask(device, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100);
+ nvkm_mask(device, 0x4006b0, 0x08000000, 0x08000000);
}
-static struct nvkm_omthds
-nv17_celcius_omthds[] = {
- { 0x1638, 0x1638, nv17_gr_mthd_lma_window },
- { 0x163c, 0x163c, nv17_gr_mthd_lma_window },
- { 0x1640, 0x1640, nv17_gr_mthd_lma_window },
- { 0x1644, 0x1644, nv17_gr_mthd_lma_window },
- { 0x1658, 0x1658, nv17_gr_mthd_lma_enable },
- {}
-};
+static bool
+nv17_gr_mthd_celcius(struct nv10_gr_chan *chan, u32 mthd, u32 data)
+{
+ void (*func)(struct nv10_gr_chan *, u32, u32);
+ switch (mthd) {
+ case 0x1638 ... 0x1644:
+ func = nv17_gr_mthd_lma_window; break;
+ case 0x1658: func = nv17_gr_mthd_lma_enable; break;
+ default:
+ return false;
+ }
+ func(chan, mthd, data);
+ return true;
+}
-static struct nvkm_oclass
-nv17_gr_sclass[] = {
- { 0x0012, &nv04_gr_ofuncs }, /* beta1 */
- { 0x0019, &nv04_gr_ofuncs }, /* clip */
- { 0x0030, &nv04_gr_ofuncs }, /* null */
- { 0x0039, &nv04_gr_ofuncs }, /* m2mf */
- { 0x0043, &nv04_gr_ofuncs }, /* rop */
- { 0x0044, &nv04_gr_ofuncs }, /* pattern */
- { 0x004a, &nv04_gr_ofuncs }, /* gdi */
- { 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
- { 0x005f, &nv04_gr_ofuncs }, /* blit */
- { 0x0062, &nv04_gr_ofuncs }, /* surf2d */
- { 0x0072, &nv04_gr_ofuncs }, /* beta4 */
- { 0x0089, &nv04_gr_ofuncs }, /* sifm */
- { 0x008a, &nv04_gr_ofuncs }, /* ifc */
- { 0x009f, &nv04_gr_ofuncs }, /* blit */
- { 0x0093, &nv04_gr_ofuncs }, /* surf3d */
- { 0x0094, &nv04_gr_ofuncs }, /* ttri */
- { 0x0095, &nv04_gr_ofuncs }, /* mtri */
- { 0x0099, &nv04_gr_ofuncs, nv17_celcius_omthds },
- {},
-};
+static bool
+nv10_gr_mthd(struct nv10_gr_chan *chan, u8 class, u32 mthd, u32 data)
+{
+ bool (*func)(struct nv10_gr_chan *, u32, u32);
+ switch (class) {
+ case 0x99: func = nv17_gr_mthd_celcius; break;
+ default:
+ return false;
+ }
+ return func(chan, mthd, data);
+}
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static struct nv10_gr_chan *
-nv10_gr_channel(struct nv10_gr_priv *priv)
+nv10_gr_channel(struct nv10_gr *gr)
{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
struct nv10_gr_chan *chan = NULL;
- if (nv_rd32(priv, 0x400144) & 0x00010000) {
- int chid = nv_rd32(priv, 0x400148) >> 24;
- if (chid < ARRAY_SIZE(priv->chan))
- chan = priv->chan[chid];
+ if (nvkm_rd32(device, 0x400144) & 0x00010000) {
+ int chid = nvkm_rd32(device, 0x400148) >> 24;
+ if (chid < ARRAY_SIZE(gr->chan))
+ chan = gr->chan[chid];
}
return chan;
}
@@ -617,75 +559,78 @@ nv10_gr_channel(struct nv10_gr_priv *priv)
static void
nv10_gr_save_pipe(struct nv10_gr_chan *chan)
{
- struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+ struct nv10_gr *gr = chan->gr;
struct pipe_state *pipe = &chan->pipe_state;
-
- PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
- PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
- PIPE_SAVE(priv, pipe->pipe_0x6400, 0x6400);
- PIPE_SAVE(priv, pipe->pipe_0x6800, 0x6800);
- PIPE_SAVE(priv, pipe->pipe_0x6c00, 0x6c00);
- PIPE_SAVE(priv, pipe->pipe_0x7000, 0x7000);
- PIPE_SAVE(priv, pipe->pipe_0x7400, 0x7400);
- PIPE_SAVE(priv, pipe->pipe_0x7800, 0x7800);
- PIPE_SAVE(priv, pipe->pipe_0x0040, 0x0040);
- PIPE_SAVE(priv, pipe->pipe_0x0000, 0x0000);
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ PIPE_SAVE(gr, pipe->pipe_0x4400, 0x4400);
+ PIPE_SAVE(gr, pipe->pipe_0x0200, 0x0200);
+ PIPE_SAVE(gr, pipe->pipe_0x6400, 0x6400);
+ PIPE_SAVE(gr, pipe->pipe_0x6800, 0x6800);
+ PIPE_SAVE(gr, pipe->pipe_0x6c00, 0x6c00);
+ PIPE_SAVE(gr, pipe->pipe_0x7000, 0x7000);
+ PIPE_SAVE(gr, pipe->pipe_0x7400, 0x7400);
+ PIPE_SAVE(gr, pipe->pipe_0x7800, 0x7800);
+ PIPE_SAVE(gr, pipe->pipe_0x0040, 0x0040);
+ PIPE_SAVE(gr, pipe->pipe_0x0000, 0x0000);
}
static void
nv10_gr_load_pipe(struct nv10_gr_chan *chan)
{
- struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+ struct nv10_gr *gr = chan->gr;
struct pipe_state *pipe = &chan->pipe_state;
+ struct nvkm_device *device = gr->base.engine.subdev.device;
u32 xfmode0, xfmode1;
int i;
- nv04_gr_idle(priv);
+ nv04_gr_idle(&gr->base);
/* XXX check haiku comments */
- xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
- xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
- nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
- nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
- nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+ xfmode0 = nvkm_rd32(device, NV10_PGRAPH_XFMODE0);
+ xfmode1 = nvkm_rd32(device, NV10_PGRAPH_XFMODE1);
+ nvkm_wr32(device, NV10_PGRAPH_XFMODE0, 0x10000000);
+ nvkm_wr32(device, NV10_PGRAPH_XFMODE1, 0x00000000);
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
for (i = 0; i < 4; i++)
- nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
for (i = 0; i < 4; i++)
- nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
- nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
for (i = 0; i < 3; i++)
- nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
- nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
for (i = 0; i < 3; i++)
- nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
- nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
- nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+ nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000008);
- PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
- nv04_gr_idle(priv);
+ PIPE_RESTORE(gr, pipe->pipe_0x0200, 0x0200);
+ nv04_gr_idle(&gr->base);
/* restore XFMODE */
- nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
- nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
- PIPE_RESTORE(priv, pipe->pipe_0x6400, 0x6400);
- PIPE_RESTORE(priv, pipe->pipe_0x6800, 0x6800);
- PIPE_RESTORE(priv, pipe->pipe_0x6c00, 0x6c00);
- PIPE_RESTORE(priv, pipe->pipe_0x7000, 0x7000);
- PIPE_RESTORE(priv, pipe->pipe_0x7400, 0x7400);
- PIPE_RESTORE(priv, pipe->pipe_0x7800, 0x7800);
- PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
- PIPE_RESTORE(priv, pipe->pipe_0x0000, 0x0000);
- PIPE_RESTORE(priv, pipe->pipe_0x0040, 0x0040);
- nv04_gr_idle(priv);
+ nvkm_wr32(device, NV10_PGRAPH_XFMODE0, xfmode0);
+ nvkm_wr32(device, NV10_PGRAPH_XFMODE1, xfmode1);
+ PIPE_RESTORE(gr, pipe->pipe_0x6400, 0x6400);
+ PIPE_RESTORE(gr, pipe->pipe_0x6800, 0x6800);
+ PIPE_RESTORE(gr, pipe->pipe_0x6c00, 0x6c00);
+ PIPE_RESTORE(gr, pipe->pipe_0x7000, 0x7000);
+ PIPE_RESTORE(gr, pipe->pipe_0x7400, 0x7400);
+ PIPE_RESTORE(gr, pipe->pipe_0x7800, 0x7800);
+ PIPE_RESTORE(gr, pipe->pipe_0x4400, 0x4400);
+ PIPE_RESTORE(gr, pipe->pipe_0x0000, 0x0000);
+ PIPE_RESTORE(gr, pipe->pipe_0x0040, 0x0040);
+ nv04_gr_idle(&gr->base);
}
static void
nv10_gr_create_pipe(struct nv10_gr_chan *chan)
{
- struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+ struct nv10_gr *gr = chan->gr;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct pipe_state *pipe_state = &chan->pipe_state;
u32 *pipe_state_addr;
int i;
@@ -698,7 +643,7 @@ nv10_gr_create_pipe(struct nv10_gr_chan *chan)
u32 *__end_addr = pipe_state->pipe_##addr + \
ARRAY_SIZE(pipe_state->pipe_##addr); \
if (pipe_state_addr != __end_addr) \
- nv_error(priv, "incomplete pipe init for 0x%x : %p/%p\n", \
+ nvkm_error(subdev, "incomplete pipe init for 0x%x : %p/%p\n", \
addr, pipe_state_addr, __end_addr); \
} while (0)
#define NV_WRITE_PIPE_INIT(value) *(pipe_state_addr++) = value
@@ -838,33 +783,36 @@ nv10_gr_create_pipe(struct nv10_gr_chan *chan)
}
static int
-nv10_gr_ctx_regs_find_offset(struct nv10_gr_priv *priv, int reg)
+nv10_gr_ctx_regs_find_offset(struct nv10_gr *gr, int reg)
{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
int i;
for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++) {
if (nv10_gr_ctx_regs[i] == reg)
return i;
}
- nv_error(priv, "unknow offset nv10_ctx_regs %d\n", reg);
+ nvkm_error(subdev, "unknown offset nv10_ctx_regs %d\n", reg);
return -1;
}
static int
-nv17_gr_ctx_regs_find_offset(struct nv10_gr_priv *priv, int reg)
+nv17_gr_ctx_regs_find_offset(struct nv10_gr *gr, int reg)
{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
int i;
for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++) {
if (nv17_gr_ctx_regs[i] == reg)
return i;
}
- nv_error(priv, "unknow offset nv17_ctx_regs %d\n", reg);
+ nvkm_error(subdev, "unknown offset nv17_ctx_regs %d\n", reg);
return -1;
}
static void
nv10_gr_load_dma_vtxbuf(struct nv10_gr_chan *chan, int chid, u32 inst)
{
- struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+ struct nv10_gr *gr = chan->gr;
+ struct nvkm_device *device = gr->base.engine.subdev.device;
u32 st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
u32 ctx_user, ctx_switch[5];
int i, subchan = -1;
@@ -876,7 +824,7 @@ nv10_gr_load_dma_vtxbuf(struct nv10_gr_chan *chan, int chid, u32 inst)
/* Look for a celsius object */
for (i = 0; i < 8; i++) {
- int class = nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
+ int class = nvkm_rd32(device, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
if (class == 0x56 || class == 0x96 || class == 0x99) {
subchan = i;
@@ -888,159 +836,183 @@ nv10_gr_load_dma_vtxbuf(struct nv10_gr_chan *chan, int chid, u32 inst)
return;
/* Save the current ctx object */
- ctx_user = nv_rd32(priv, NV10_PGRAPH_CTX_USER);
+ ctx_user = nvkm_rd32(device, NV10_PGRAPH_CTX_USER);
for (i = 0; i < 5; i++)
- ctx_switch[i] = nv_rd32(priv, NV10_PGRAPH_CTX_SWITCH(i));
+ ctx_switch[i] = nvkm_rd32(device, NV10_PGRAPH_CTX_SWITCH(i));
/* Save the FIFO state */
- st2 = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2);
- st2_dl = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DL);
- st2_dh = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DH);
- fifo_ptr = nv_rd32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR);
+ st2 = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2);
+ st2_dl = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2_DL);
+ st2_dh = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2_DH);
+ fifo_ptr = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR);
for (i = 0; i < ARRAY_SIZE(fifo); i++)
- fifo[i] = nv_rd32(priv, 0x4007a0 + 4 * i);
+ fifo[i] = nvkm_rd32(device, 0x4007a0 + 4 * i);
/* Switch to the celsius subchannel */
for (i = 0; i < 5; i++)
- nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i),
- nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(subchan, i)));
- nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
+ nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(i),
+ nvkm_rd32(device, NV10_PGRAPH_CTX_CACHE(subchan, i)));
+ nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
/* Inject NV10TCL_DMA_VTXBUF */
- nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
- nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2,
+ nvkm_wr32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
+ nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2,
0x2c000000 | chid << 20 | subchan << 16 | 0x18c);
- nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
- nv_mask(priv, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
- nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+ nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
+ nvkm_mask(device, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
+ nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
/* Restore the FIFO state */
for (i = 0; i < ARRAY_SIZE(fifo); i++)
- nv_wr32(priv, 0x4007a0 + 4 * i, fifo[i]);
+ nvkm_wr32(device, 0x4007a0 + 4 * i, fifo[i]);
- nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
- nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, st2);
- nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
- nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
+ nvkm_wr32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
+ nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2, st2);
+ nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
+ nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
/* Restore the current ctx object */
for (i = 0; i < 5; i++)
- nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
- nv_wr32(priv, NV10_PGRAPH_CTX_USER, ctx_user);
+ nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
+ nvkm_wr32(device, NV10_PGRAPH_CTX_USER, ctx_user);
}
static int
nv10_gr_load_context(struct nv10_gr_chan *chan, int chid)
{
- struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+ struct nv10_gr *gr = chan->gr;
+ struct nvkm_device *device = gr->base.engine.subdev.device;
u32 inst;
int i;
for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++)
- nv_wr32(priv, nv10_gr_ctx_regs[i], chan->nv10[i]);
+ nvkm_wr32(device, nv10_gr_ctx_regs[i], chan->nv10[i]);
- if (nv_device(priv)->card_type >= NV_11 &&
- nv_device(priv)->chipset >= 0x17) {
+ if (device->card_type >= NV_11 && device->chipset >= 0x17) {
for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++)
- nv_wr32(priv, nv17_gr_ctx_regs[i], chan->nv17[i]);
+ nvkm_wr32(device, nv17_gr_ctx_regs[i], chan->nv17[i]);
}
nv10_gr_load_pipe(chan);
- inst = nv_rd32(priv, NV10_PGRAPH_GLOBALSTATE1) & 0xffff;
+ inst = nvkm_rd32(device, NV10_PGRAPH_GLOBALSTATE1) & 0xffff;
nv10_gr_load_dma_vtxbuf(chan, chid, inst);
- nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
- nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24);
- nv_mask(priv, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000);
+ nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+ nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24);
+ nvkm_mask(device, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000);
return 0;
}
static int
nv10_gr_unload_context(struct nv10_gr_chan *chan)
{
- struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+ struct nv10_gr *gr = chan->gr;
+ struct nvkm_device *device = gr->base.engine.subdev.device;
int i;
for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++)
- chan->nv10[i] = nv_rd32(priv, nv10_gr_ctx_regs[i]);
+ chan->nv10[i] = nvkm_rd32(device, nv10_gr_ctx_regs[i]);
- if (nv_device(priv)->card_type >= NV_11 &&
- nv_device(priv)->chipset >= 0x17) {
+ if (device->card_type >= NV_11 && device->chipset >= 0x17) {
for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++)
- chan->nv17[i] = nv_rd32(priv, nv17_gr_ctx_regs[i]);
+ chan->nv17[i] = nvkm_rd32(device, nv17_gr_ctx_regs[i]);
}
nv10_gr_save_pipe(chan);
- nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
- nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
+ nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
+ nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
return 0;
}
static void
-nv10_gr_context_switch(struct nv10_gr_priv *priv)
+nv10_gr_context_switch(struct nv10_gr *gr)
{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
struct nv10_gr_chan *prev = NULL;
struct nv10_gr_chan *next = NULL;
- unsigned long flags;
int chid;
- spin_lock_irqsave(&priv->lock, flags);
- nv04_gr_idle(priv);
+ nv04_gr_idle(&gr->base);
/* If previous context is valid, we need to save it */
- prev = nv10_gr_channel(priv);
+ prev = nv10_gr_channel(gr);
if (prev)
nv10_gr_unload_context(prev);
/* load context for next channel */
- chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
- next = priv->chan[chid];
+ chid = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
+ next = gr->chan[chid];
if (next)
nv10_gr_load_context(next, chid);
+}
+
+static int
+nv10_gr_chan_fini(struct nvkm_object *object, bool suspend)
+{
+ struct nv10_gr_chan *chan = nv10_gr_chan(object);
+ struct nv10_gr *gr = chan->gr;
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ unsigned long flags;
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_lock_irqsave(&gr->lock, flags);
+ nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+ if (nv10_gr_channel(gr) == chan)
+ nv10_gr_unload_context(chan);
+ nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&gr->lock, flags);
+ return 0;
+}
+
+static void *
+nv10_gr_chan_dtor(struct nvkm_object *object)
+{
+ struct nv10_gr_chan *chan = nv10_gr_chan(object);
+ struct nv10_gr *gr = chan->gr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gr->lock, flags);
+ gr->chan[chan->chid] = NULL;
+ spin_unlock_irqrestore(&gr->lock, flags);
+ return chan;
}
+static const struct nvkm_object_func
+nv10_gr_chan = {
+ .dtor = nv10_gr_chan_dtor,
+ .fini = nv10_gr_chan_fini,
+};
+
#define NV_WRITE_CTX(reg, val) do { \
- int offset = nv10_gr_ctx_regs_find_offset(priv, reg); \
+ int offset = nv10_gr_ctx_regs_find_offset(gr, reg); \
if (offset > 0) \
chan->nv10[offset] = val; \
} while (0)
#define NV17_WRITE_CTX(reg, val) do { \
- int offset = nv17_gr_ctx_regs_find_offset(priv, reg); \
+ int offset = nv17_gr_ctx_regs_find_offset(gr, reg); \
if (offset > 0) \
chan->nv17[offset] = val; \
} while (0)
-static int
-nv10_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv10_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+ const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
- struct nvkm_fifo_chan *fifo = (void *)parent;
- struct nv10_gr_priv *priv = (void *)engine;
+ struct nv10_gr *gr = nv10_gr(base);
struct nv10_gr_chan *chan;
+ struct nvkm_device *device = gr->base.engine.subdev.device;
unsigned long flags;
- int ret;
-
- ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
-
- spin_lock_irqsave(&priv->lock, flags);
- if (priv->chan[fifo->chid]) {
- *pobject = nv_object(priv->chan[fifo->chid]);
- atomic_inc(&(*pobject)->refcount);
- spin_unlock_irqrestore(&priv->lock, flags);
- nvkm_object_destroy(&chan->base);
- return 1;
- }
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nv10_gr_chan, oclass, &chan->object);
+ chan->gr = gr;
+ chan->chid = fifoch->chid;
+ *pobject = &chan->object;
NV_WRITE_CTX(0x00400e88, 0x08000000);
NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
@@ -1049,12 +1021,11 @@ nv10_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
NV_WRITE_CTX(0x00400e14, 0x00001000);
NV_WRITE_CTX(0x00400e30, 0x00080008);
NV_WRITE_CTX(0x00400e34, 0x00080008);
- if (nv_device(priv)->card_type >= NV_11 &&
- nv_device(priv)->chipset >= 0x17) {
+ if (device->card_type >= NV_11 && device->chipset >= 0x17) {
/* is it really needed ??? */
NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
- nv_rd32(priv, NV10_PGRAPH_DEBUG_4));
- NV17_WRITE_CTX(0x004006b0, nv_rd32(priv, 0x004006b0));
+ nvkm_rd32(device, NV10_PGRAPH_DEBUG_4));
+ NV17_WRITE_CTX(0x004006b0, nvkm_rd32(device, 0x004006b0));
NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
NV17_WRITE_CTX(0x00400ec0, 0x00000080);
@@ -1064,74 +1035,32 @@ nv10_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv10_gr_create_pipe(chan);
- priv->chan[fifo->chid] = chan;
- chan->chid = fifo->chid;
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_lock_irqsave(&gr->lock, flags);
+ gr->chan[chan->chid] = chan;
+ spin_unlock_irqrestore(&gr->lock, flags);
return 0;
}
-static void
-nv10_gr_context_dtor(struct nvkm_object *object)
-{
- struct nv10_gr_priv *priv = (void *)object->engine;
- struct nv10_gr_chan *chan = (void *)object;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->chan[chan->chid] = NULL;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- nvkm_object_destroy(&chan->base);
-}
-
-static int
-nv10_gr_context_fini(struct nvkm_object *object, bool suspend)
-{
- struct nv10_gr_priv *priv = (void *)object->engine;
- struct nv10_gr_chan *chan = (void *)object;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
- if (nv10_gr_channel(priv) == chan)
- nv10_gr_unload_context(chan);
- nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return nvkm_object_fini(&chan->base, suspend);
-}
-
-static struct nvkm_oclass
-nv10_gr_cclass = {
- .handle = NV_ENGCTX(GR, 0x10),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv10_gr_context_ctor,
- .dtor = nv10_gr_context_dtor,
- .init = nvkm_object_init,
- .fini = nv10_gr_context_fini,
- },
-};
-
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
-static void
-nv10_gr_tile_prog(struct nvkm_engine *engine, int i)
+void
+nv10_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
{
- struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
- struct nvkm_fifo *pfifo = nvkm_fifo(engine);
- struct nv10_gr_priv *priv = (void *)engine;
+ struct nv10_gr *gr = nv10_gr(base);
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ struct nvkm_fifo *fifo = device->fifo;
unsigned long flags;
- pfifo->pause(pfifo, &flags);
- nv04_gr_idle(priv);
+ nvkm_fifo_pause(fifo, &flags);
+ nv04_gr_idle(&gr->base);
- nv_wr32(priv, NV10_PGRAPH_TLIMIT(i), tile->limit);
- nv_wr32(priv, NV10_PGRAPH_TSIZE(i), tile->pitch);
- nv_wr32(priv, NV10_PGRAPH_TILE(i), tile->addr);
+ nvkm_wr32(device, NV10_PGRAPH_TLIMIT(i), tile->limit);
+ nvkm_wr32(device, NV10_PGRAPH_TSIZE(i), tile->pitch);
+ nvkm_wr32(device, NV10_PGRAPH_TILE(i), tile->addr);
- pfifo->start(pfifo, &flags);
+ nvkm_fifo_start(fifo, &flags);
}
const struct nvkm_bitfield nv10_gr_intr_name[] = {
@@ -1148,168 +1077,145 @@ const struct nvkm_bitfield nv10_gr_nstatus[] = {
{}
};
-static void
-nv10_gr_intr(struct nvkm_subdev *subdev)
+void
+nv10_gr_intr(struct nvkm_gr *base)
{
- struct nv10_gr_priv *priv = (void *)subdev;
- struct nv10_gr_chan *chan = NULL;
- struct nvkm_namedb *namedb = NULL;
- struct nvkm_handle *handle = NULL;
- u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
- u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
- u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
- u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+ struct nv10_gr *gr = nv10_gr(base);
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
+ u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
+ u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
u32 chid = (addr & 0x01f00000) >> 20;
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00001ffc);
- u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
- u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
+ u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xfff;
u32 show = stat;
+ char msg[128], src[128], sta[128];
+ struct nv10_gr_chan *chan;
unsigned long flags;
- spin_lock_irqsave(&priv->lock, flags);
- chan = priv->chan[chid];
- if (chan)
- namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_lock_irqsave(&gr->lock, flags);
+ chan = gr->chan[chid];
if (stat & NV_PGRAPH_INTR_ERROR) {
if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
- handle = nvkm_namedb_get_class(namedb, class);
- if (handle && !nv_call(handle->object, mthd, data))
+ if (!nv10_gr_mthd(chan, class, mthd, data))
show &= ~NV_PGRAPH_INTR_ERROR;
}
}
if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
- nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ nvkm_wr32(device, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- nv10_gr_context_switch(priv);
+ nv10_gr_context_switch(gr);
}
- nv_wr32(priv, NV03_PGRAPH_INTR, stat);
- nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+ nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
+ nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_error(priv, "%s", "");
- nvkm_bitfield_print(nv10_gr_intr_name, show);
- pr_cont(" nsource:");
- nvkm_bitfield_print(nv04_gr_nsource, nsource);
- pr_cont(" nstatus:");
- nvkm_bitfield_print(nv10_gr_nstatus, nstatus);
- pr_cont("\n");
- nv_error(priv,
- "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, nvkm_client_name(chan), subc, class, mthd,
- data);
+ nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
+ nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
+ nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
+ nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
+ "nstatus %08x [%s] ch %d [%s] subc %d "
+ "class %04x mthd %04x data %08x\n",
+ show, msg, nsource, src, nstatus, sta, chid,
+ chan ? chan->object.client->name : "unknown",
+ subc, class, mthd, data);
}
- nvkm_namedb_put(handle);
+ spin_unlock_irqrestore(&gr->lock, flags);
}
-static int
-nv10_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv10_gr_init(struct nvkm_gr *base)
{
- struct nv10_gr_priv *priv;
- int ret;
-
- ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00001000;
- nv_subdev(priv)->intr = nv10_gr_intr;
- nv_engine(priv)->cclass = &nv10_gr_cclass;
-
- if (nv_device(priv)->chipset <= 0x10)
- nv_engine(priv)->sclass = nv10_gr_sclass;
- else
- if (nv_device(priv)->chipset < 0x17 ||
- nv_device(priv)->card_type < NV_11)
- nv_engine(priv)->sclass = nv15_gr_sclass;
- else
- nv_engine(priv)->sclass = nv17_gr_sclass;
-
- nv_engine(priv)->tile_prog = nv10_gr_tile_prog;
- spin_lock_init(&priv->lock);
- return 0;
-}
-
-static void
-nv10_gr_dtor(struct nvkm_object *object)
-{
- struct nv10_gr_priv *priv = (void *)object;
- nvkm_gr_destroy(&priv->base);
-}
-
-static int
-nv10_gr_init(struct nvkm_object *object)
-{
- struct nvkm_engine *engine = nv_engine(object);
- struct nvkm_fb *pfb = nvkm_fb(object);
- struct nv10_gr_priv *priv = (void *)engine;
- int ret, i;
-
- ret = nvkm_gr_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
- nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
- nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
- nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
- nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
- /* nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
- nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
- nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
-
- if (nv_device(priv)->card_type >= NV_11 &&
- nv_device(priv)->chipset >= 0x17) {
- nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x1f000000);
- nv_wr32(priv, 0x400a10, 0x03ff3fb6);
- nv_wr32(priv, 0x400838, 0x002f8684);
- nv_wr32(priv, 0x40083c, 0x00115f3f);
- nv_wr32(priv, 0x4006b0, 0x40000020);
+ struct nv10_gr *gr = nv10_gr(base);
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x00118700);
+ /* nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
+
+ if (device->card_type >= NV_11 && device->chipset >= 0x17) {
+ nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x1f000000);
+ nvkm_wr32(device, 0x400a10, 0x03ff3fb6);
+ nvkm_wr32(device, 0x400838, 0x002f8684);
+ nvkm_wr32(device, 0x40083c, 0x00115f3f);
+ nvkm_wr32(device, 0x4006b0, 0x40000020);
} else {
- nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000);
+ nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00000000);
}
- /* Turn all the tiling regions off. */
- for (i = 0; i < pfb->tile.regions; i++)
- engine->tile_prog(engine, i);
-
- nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
- nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
- nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
- nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
- nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
- nv_wr32(priv, NV10_PGRAPH_STATE, 0xFFFFFFFF);
+ nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
+ nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
+ nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
+ nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
+ nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
+ nvkm_wr32(device, NV10_PGRAPH_STATE, 0xFFFFFFFF);
- nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
- nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
- nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
+ nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
+ nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+ nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
return 0;
}
-static int
-nv10_gr_fini(struct nvkm_object *object, bool suspend)
+int
+nv10_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
+ int index, struct nvkm_gr **pgr)
{
- struct nv10_gr_priv *priv = (void *)object;
- return nvkm_gr_fini(&priv->base, suspend);
+ struct nv10_gr *gr;
+
+ if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+ return -ENOMEM;
+ spin_lock_init(&gr->lock);
+ *pgr = &gr->base;
+
+ return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
}
-struct nvkm_oclass
-nv10_gr_oclass = {
- .handle = NV_ENGINE(GR, 0x10),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv10_gr_ctor,
- .dtor = nv10_gr_dtor,
- .init = nv10_gr_init,
- .fini = nv10_gr_fini,
- },
+static const struct nvkm_gr_func
+nv10_gr = {
+ .init = nv10_gr_init,
+ .intr = nv10_gr_intr,
+ .tile = nv10_gr_tile,
+ .chan_new = nv10_gr_chan_new,
+ .sclass = {
+ { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+ { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+ { -1, -1, 0x0030, &nv04_gr_object }, /* null */
+ { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+ { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+ { -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
+ { -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+ { -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
+ { -1, -1, 0x005f, &nv04_gr_object }, /* blit */
+ { -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+ { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+ { -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+ { -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+ { -1, -1, 0x009f, &nv04_gr_object }, /* blit */
+ { -1, -1, 0x0093, &nv04_gr_object }, /* surf3d */
+ { -1, -1, 0x0094, &nv04_gr_object }, /* ttri */
+ { -1, -1, 0x0095, &nv04_gr_object }, /* mtri */
+ { -1, -1, 0x0056, &nv04_gr_object }, /* celcius */
+ {}
+ }
};
+
+int
+nv10_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return nv10_gr_new_(&nv10_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
new file mode 100644
index 000000000000..d7c3d86cc99d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
@@ -0,0 +1,13 @@
+#ifndef __NV10_GR_H__
+#define __NV10_GR_H__
+#include "priv.h"
+
+int nv10_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+ struct nvkm_gr **);
+int nv10_gr_init(struct nvkm_gr *);
+void nv10_gr_intr(struct nvkm_gr *);
+void nv10_gr_tile(struct nvkm_gr *, int, struct nvkm_fb_tile *);
+
+int nv10_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
+ const struct nvkm_oclass *, struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c
new file mode 100644
index 000000000000..3e2c6856b4c4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragr) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "nv10.h"
+
+static const struct nvkm_gr_func
+nv15_gr = {
+ .init = nv10_gr_init,
+ .intr = nv10_gr_intr,
+ .tile = nv10_gr_tile,
+ .chan_new = nv10_gr_chan_new,
+ .sclass = {
+ { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+ { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+ { -1, -1, 0x0030, &nv04_gr_object }, /* null */
+ { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+ { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+ { -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
+ { -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+ { -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
+ { -1, -1, 0x005f, &nv04_gr_object }, /* blit */
+ { -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+ { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+ { -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+ { -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+ { -1, -1, 0x009f, &nv04_gr_object }, /* blit */
+ { -1, -1, 0x0093, &nv04_gr_object }, /* surf3d */
+ { -1, -1, 0x0094, &nv04_gr_object }, /* ttri */
+ { -1, -1, 0x0095, &nv04_gr_object }, /* mtri */
+ { -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
+ {}
+ }
+};
+
+int
+nv15_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return nv10_gr_new_(&nv15_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c
new file mode 100644
index 000000000000..12437d085a73
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragr) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "nv10.h"
+
+static const struct nvkm_gr_func
+nv17_gr = {
+ .init = nv10_gr_init,
+ .intr = nv10_gr_intr,
+ .tile = nv10_gr_tile,
+ .chan_new = nv10_gr_chan_new,
+ .sclass = {
+ { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+ { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+ { -1, -1, 0x0030, &nv04_gr_object }, /* null */
+ { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+ { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+ { -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
+ { -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+ { -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
+ { -1, -1, 0x005f, &nv04_gr_object }, /* blit */
+ { -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+ { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+ { -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+ { -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+ { -1, -1, 0x009f, &nv04_gr_object }, /* blit */
+ { -1, -1, 0x0093, &nv04_gr_object }, /* surf3d */
+ { -1, -1, 0x0094, &nv04_gr_object }, /* ttri */
+ { -1, -1, 0x0095, &nv04_gr_object }, /* mtri */
+ { -1, -1, 0x0099, &nv04_gr_object },
+ {}
+ }
+};
+
+int
+nv17_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return nv10_gr_new_(&nv17_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index 1713ffb669e8..5caef65d3c6e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -2,375 +2,374 @@
#include "regs.h"
#include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
#include <engine/fifo.h>
+#include <engine/fifo/chan.h>
#include <subdev/fb.h>
#include <subdev/timer.h>
/*******************************************************************************
- * Graphics object classes
+ * PGRAPH context
******************************************************************************/
-static struct nvkm_oclass
-nv20_gr_sclass[] = {
- { 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
- { 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
- { 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
- { 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
- { 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
- { 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
- { 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
- { 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
- { 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
- { 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
- { 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
- { 0x0096, &nv04_gr_ofuncs, NULL }, /* celcius */
- { 0x0097, &nv04_gr_ofuncs, NULL }, /* kelvin */
- { 0x009e, &nv04_gr_ofuncs, NULL }, /* swzsurf */
- { 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
- {},
-};
+int
+nv20_gr_chan_init(struct nvkm_object *object)
+{
+ struct nv20_gr_chan *chan = nv20_gr_chan(object);
+ struct nv20_gr *gr = chan->gr;
+ u32 inst = nvkm_memory_addr(chan->inst);
-/*******************************************************************************
- * PGRAPH context
- ******************************************************************************/
+ nvkm_kmap(gr->ctxtab);
+ nvkm_wo32(gr->ctxtab, chan->chid * 4, inst >> 4);
+ nvkm_done(gr->ctxtab);
+ return 0;
+}
+
+int
+nv20_gr_chan_fini(struct nvkm_object *object, bool suspend)
+{
+ struct nv20_gr_chan *chan = nv20_gr_chan(object);
+ struct nv20_gr *gr = chan->gr;
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ u32 inst = nvkm_memory_addr(chan->inst);
+ int chid = -1;
+
+ nvkm_mask(device, 0x400720, 0x00000001, 0x00000000);
+ if (nvkm_rd32(device, 0x400144) & 0x00010000)
+ chid = (nvkm_rd32(device, 0x400148) & 0x1f000000) >> 24;
+ if (chan->chid == chid) {
+ nvkm_wr32(device, 0x400784, inst >> 4);
+ nvkm_wr32(device, 0x400788, 0x00000002);
+ nvkm_msec(device, 2000,
+ if (!nvkm_rd32(device, 0x400700))
+ break;
+ );
+ nvkm_wr32(device, 0x400144, 0x10000000);
+ nvkm_mask(device, 0x400148, 0xff000000, 0x1f000000);
+ }
+ nvkm_mask(device, 0x400720, 0x00000001, 0x00000001);
+
+ nvkm_kmap(gr->ctxtab);
+ nvkm_wo32(gr->ctxtab, chan->chid * 4, 0x00000000);
+ nvkm_done(gr->ctxtab);
+ return 0;
+}
+
+void *
+nv20_gr_chan_dtor(struct nvkm_object *object)
+{
+ struct nv20_gr_chan *chan = nv20_gr_chan(object);
+ nvkm_memory_del(&chan->inst);
+ return chan;
+}
+
+static const struct nvkm_object_func
+nv20_gr_chan = {
+ .dtor = nv20_gr_chan_dtor,
+ .init = nv20_gr_chan_init,
+ .fini = nv20_gr_chan_fini,
+};
static int
-nv20_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv20_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+ const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
+ struct nv20_gr *gr = nv20_gr(base);
struct nv20_gr_chan *chan;
int ret, i;
- ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x37f0,
- 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
- *pobject = nv_object(chan);
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nv20_gr_chan, oclass, &chan->object);
+ chan->gr = gr;
+ chan->chid = fifoch->chid;
+ *pobject = &chan->object;
+
+ ret = nvkm_memory_new(gr->base.engine.subdev.device,
+ NVKM_MEM_TARGET_INST, 0x37f0, 16, true,
+ &chan->inst);
if (ret)
return ret;
- chan->chid = nvkm_fifo_chan(parent)->chid;
-
- nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
- nv_wo32(chan, 0x033c, 0xffff0000);
- nv_wo32(chan, 0x03a0, 0x0fff0000);
- nv_wo32(chan, 0x03a4, 0x0fff0000);
- nv_wo32(chan, 0x047c, 0x00000101);
- nv_wo32(chan, 0x0490, 0x00000111);
- nv_wo32(chan, 0x04a8, 0x44400000);
+ nvkm_kmap(chan->inst);
+ nvkm_wo32(chan->inst, 0x0000, 0x00000001 | (chan->chid << 24));
+ nvkm_wo32(chan->inst, 0x033c, 0xffff0000);
+ nvkm_wo32(chan->inst, 0x03a0, 0x0fff0000);
+ nvkm_wo32(chan->inst, 0x03a4, 0x0fff0000);
+ nvkm_wo32(chan->inst, 0x047c, 0x00000101);
+ nvkm_wo32(chan->inst, 0x0490, 0x00000111);
+ nvkm_wo32(chan->inst, 0x04a8, 0x44400000);
for (i = 0x04d4; i <= 0x04e0; i += 4)
- nv_wo32(chan, i, 0x00030303);
+ nvkm_wo32(chan->inst, i, 0x00030303);
for (i = 0x04f4; i <= 0x0500; i += 4)
- nv_wo32(chan, i, 0x00080000);
+ nvkm_wo32(chan->inst, i, 0x00080000);
for (i = 0x050c; i <= 0x0518; i += 4)
- nv_wo32(chan, i, 0x01012000);
+ nvkm_wo32(chan->inst, i, 0x01012000);
for (i = 0x051c; i <= 0x0528; i += 4)
- nv_wo32(chan, i, 0x000105b8);
+ nvkm_wo32(chan->inst, i, 0x000105b8);
for (i = 0x052c; i <= 0x0538; i += 4)
- nv_wo32(chan, i, 0x00080008);
+ nvkm_wo32(chan->inst, i, 0x00080008);
for (i = 0x055c; i <= 0x0598; i += 4)
- nv_wo32(chan, i, 0x07ff0000);
- nv_wo32(chan, 0x05a4, 0x4b7fffff);
- nv_wo32(chan, 0x05fc, 0x00000001);
- nv_wo32(chan, 0x0604, 0x00004000);
- nv_wo32(chan, 0x0610, 0x00000001);
- nv_wo32(chan, 0x0618, 0x00040000);
- nv_wo32(chan, 0x061c, 0x00010000);
+ nvkm_wo32(chan->inst, i, 0x07ff0000);
+ nvkm_wo32(chan->inst, 0x05a4, 0x4b7fffff);
+ nvkm_wo32(chan->inst, 0x05fc, 0x00000001);
+ nvkm_wo32(chan->inst, 0x0604, 0x00004000);
+ nvkm_wo32(chan->inst, 0x0610, 0x00000001);
+ nvkm_wo32(chan->inst, 0x0618, 0x00040000);
+ nvkm_wo32(chan->inst, 0x061c, 0x00010000);
for (i = 0x1c1c; i <= 0x248c; i += 16) {
- nv_wo32(chan, (i + 0), 0x10700ff9);
- nv_wo32(chan, (i + 4), 0x0436086c);
- nv_wo32(chan, (i + 8), 0x000c001b);
+ nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
+ nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
+ nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
}
- nv_wo32(chan, 0x281c, 0x3f800000);
- nv_wo32(chan, 0x2830, 0x3f800000);
- nv_wo32(chan, 0x285c, 0x40000000);
- nv_wo32(chan, 0x2860, 0x3f800000);
- nv_wo32(chan, 0x2864, 0x3f000000);
- nv_wo32(chan, 0x286c, 0x40000000);
- nv_wo32(chan, 0x2870, 0x3f800000);
- nv_wo32(chan, 0x2878, 0xbf800000);
- nv_wo32(chan, 0x2880, 0xbf800000);
- nv_wo32(chan, 0x34a4, 0x000fe000);
- nv_wo32(chan, 0x3530, 0x000003f8);
- nv_wo32(chan, 0x3540, 0x002fe000);
+ nvkm_wo32(chan->inst, 0x281c, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x2830, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x285c, 0x40000000);
+ nvkm_wo32(chan->inst, 0x2860, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x2864, 0x3f000000);
+ nvkm_wo32(chan->inst, 0x286c, 0x40000000);
+ nvkm_wo32(chan->inst, 0x2870, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x2878, 0xbf800000);
+ nvkm_wo32(chan->inst, 0x2880, 0xbf800000);
+ nvkm_wo32(chan->inst, 0x34a4, 0x000fe000);
+ nvkm_wo32(chan->inst, 0x3530, 0x000003f8);
+ nvkm_wo32(chan->inst, 0x3540, 0x002fe000);
for (i = 0x355c; i <= 0x3578; i += 4)
- nv_wo32(chan, i, 0x001c527c);
- return 0;
-}
-
-int
-nv20_gr_context_init(struct nvkm_object *object)
-{
- struct nv20_gr_priv *priv = (void *)object->engine;
- struct nv20_gr_chan *chan = (void *)object;
- int ret;
-
- ret = nvkm_gr_context_init(&chan->base);
- if (ret)
- return ret;
-
- nv_wo32(priv->ctxtab, chan->chid * 4, nv_gpuobj(chan)->addr >> 4);
+ nvkm_wo32(chan->inst, i, 0x001c527c);
+ nvkm_done(chan->inst);
return 0;
}
-int
-nv20_gr_context_fini(struct nvkm_object *object, bool suspend)
-{
- struct nv20_gr_priv *priv = (void *)object->engine;
- struct nv20_gr_chan *chan = (void *)object;
- int chid = -1;
-
- nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
- if (nv_rd32(priv, 0x400144) & 0x00010000)
- chid = (nv_rd32(priv, 0x400148) & 0x1f000000) >> 24;
- if (chan->chid == chid) {
- nv_wr32(priv, 0x400784, nv_gpuobj(chan)->addr >> 4);
- nv_wr32(priv, 0x400788, 0x00000002);
- nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
- nv_wr32(priv, 0x400144, 0x10000000);
- nv_mask(priv, 0x400148, 0xff000000, 0x1f000000);
- }
- nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
-
- nv_wo32(priv->ctxtab, chan->chid * 4, 0x00000000);
- return nvkm_gr_context_fini(&chan->base, suspend);
-}
-
-static struct nvkm_oclass
-nv20_gr_cclass = {
- .handle = NV_ENGCTX(GR, 0x20),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv20_gr_context_ctor,
- .dtor = _nvkm_gr_context_dtor,
- .init = nv20_gr_context_init,
- .fini = nv20_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
-};
-
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
void
-nv20_gr_tile_prog(struct nvkm_engine *engine, int i)
+nv20_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
{
- struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
- struct nvkm_fifo *pfifo = nvkm_fifo(engine);
- struct nv20_gr_priv *priv = (void *)engine;
+ struct nv20_gr *gr = nv20_gr(base);
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ struct nvkm_fifo *fifo = device->fifo;
unsigned long flags;
- pfifo->pause(pfifo, &flags);
- nv04_gr_idle(priv);
+ nvkm_fifo_pause(fifo, &flags);
+ nv04_gr_idle(&gr->base);
- nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
- nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
- nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
+ nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
- nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
- nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->limit);
- nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
- nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->pitch);
- nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
- nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->limit);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->pitch);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->addr);
- if (nv_device(engine)->chipset != 0x34) {
- nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
- nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
- nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp);
+ if (device->chipset != 0x34) {
+ nvkm_wr32(device, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->zcomp);
}
- pfifo->start(pfifo, &flags);
+ nvkm_fifo_start(fifo, &flags);
}
void
-nv20_gr_intr(struct nvkm_subdev *subdev)
+nv20_gr_intr(struct nvkm_gr *base)
{
- struct nvkm_engine *engine = nv_engine(subdev);
- struct nvkm_object *engctx;
- struct nvkm_handle *handle;
- struct nv20_gr_priv *priv = (void *)subdev;
- u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
- u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
- u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
- u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+ struct nv20_gr *gr = nv20_gr(base);
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_fifo_chan *chan;
+ u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
+ u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
+ u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
u32 chid = (addr & 0x01f00000) >> 20;
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00001ffc);
- u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
- u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
+ u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xfff;
u32 show = stat;
+ char msg[128], src[128], sta[128];
+ unsigned long flags;
- engctx = nvkm_engctx_get(engine, chid);
- if (stat & NV_PGRAPH_INTR_ERROR) {
- if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
- handle = nvkm_handle_get_class(engctx, class);
- if (handle && !nv_call(handle->object, mthd, data))
- show &= ~NV_PGRAPH_INTR_ERROR;
- nvkm_handle_put(handle);
- }
- }
+ chan = nvkm_fifo_chan_chid(device->fifo, chid, &flags);
- nv_wr32(priv, NV03_PGRAPH_INTR, stat);
- nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+ nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
+ nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_error(priv, "%s", "");
- nvkm_bitfield_print(nv10_gr_intr_name, show);
- pr_cont(" nsource:");
- nvkm_bitfield_print(nv04_gr_nsource, nsource);
- pr_cont(" nstatus:");
- nvkm_bitfield_print(nv10_gr_nstatus, nstatus);
- pr_cont("\n");
- nv_error(priv,
- "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, nvkm_client_name(engctx), subc, class, mthd,
- data);
+ nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
+ nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
+ nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
+ nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
+ "nstatus %08x [%s] ch %d [%s] subc %d "
+ "class %04x mthd %04x data %08x\n",
+ show, msg, nsource, src, nstatus, sta, chid,
+ chan ? chan->object.client->name : "unknown",
+ subc, class, mthd, data);
}
- nvkm_engctx_put(engctx);
-}
-
-static int
-nv20_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv20_gr_priv *priv;
- int ret;
-
- ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00001000;
- nv_subdev(priv)->intr = nv20_gr_intr;
- nv_engine(priv)->cclass = &nv20_gr_cclass;
- nv_engine(priv)->sclass = nv20_gr_sclass;
- nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
- return 0;
+ nvkm_fifo_chan_put(device->fifo, flags, &chan);
}
-void
-nv20_gr_dtor(struct nvkm_object *object)
+int
+nv20_gr_oneinit(struct nvkm_gr *base)
{
- struct nv20_gr_priv *priv = (void *)object;
- nvkm_gpuobj_ref(NULL, &priv->ctxtab);
- nvkm_gr_destroy(&priv->base);
+ struct nv20_gr *gr = nv20_gr(base);
+ return nvkm_memory_new(gr->base.engine.subdev.device,
+ NVKM_MEM_TARGET_INST, 32 * 4, 16,
+ true, &gr->ctxtab);
}
int
-nv20_gr_init(struct nvkm_object *object)
+nv20_gr_init(struct nvkm_gr *base)
{
- struct nvkm_engine *engine = nv_engine(object);
- struct nv20_gr_priv *priv = (void *)engine;
- struct nvkm_fb *pfb = nvkm_fb(object);
+ struct nv20_gr *gr = nv20_gr(base);
+ struct nvkm_device *device = gr->base.engine.subdev.device;
u32 tmp, vramsz;
- int ret, i;
-
- ret = nvkm_gr_init(&priv->base);
- if (ret)
- return ret;
+ int i;
- nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
+ nvkm_wr32(device, NV20_PGRAPH_CHANNEL_CTX_TABLE,
+ nvkm_memory_addr(gr->ctxtab) >> 4);
- if (nv_device(priv)->chipset == 0x20) {
- nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x003d0000);
+ if (device->chipset == 0x20) {
+ nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x003d0000);
for (i = 0; i < 15; i++)
- nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000);
- nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, 0x00000000);
+ nvkm_msec(device, 2000,
+ if (!nvkm_rd32(device, 0x400700))
+ break;
+ );
} else {
- nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x02c80000);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x02c80000);
for (i = 0; i < 32; i++)
- nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000);
- nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, 0x00000000);
+ nvkm_msec(device, 2000,
+ if (!nvkm_rd32(device, 0x400700))
+ break;
+ );
}
- nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
- nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+ nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
- nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
- nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
- nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
- nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
- nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000);
- nv_wr32(priv, 0x40009C , 0x00000040);
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x00118700);
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
+ nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00000000);
+ nvkm_wr32(device, 0x40009C , 0x00000040);
- if (nv_device(priv)->chipset >= 0x25) {
- nv_wr32(priv, 0x400890, 0x00a8cfff);
- nv_wr32(priv, 0x400610, 0x304B1FB6);
- nv_wr32(priv, 0x400B80, 0x1cbd3883);
- nv_wr32(priv, 0x400B84, 0x44000000);
- nv_wr32(priv, 0x400098, 0x40000080);
- nv_wr32(priv, 0x400B88, 0x000000ff);
+ if (device->chipset >= 0x25) {
+ nvkm_wr32(device, 0x400890, 0x00a8cfff);
+ nvkm_wr32(device, 0x400610, 0x304B1FB6);
+ nvkm_wr32(device, 0x400B80, 0x1cbd3883);
+ nvkm_wr32(device, 0x400B84, 0x44000000);
+ nvkm_wr32(device, 0x400098, 0x40000080);
+ nvkm_wr32(device, 0x400B88, 0x000000ff);
} else {
- nv_wr32(priv, 0x400880, 0x0008c7df);
- nv_wr32(priv, 0x400094, 0x00000005);
- nv_wr32(priv, 0x400B80, 0x45eae20e);
- nv_wr32(priv, 0x400B84, 0x24000000);
- nv_wr32(priv, 0x400098, 0x00000040);
- nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
- nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030);
- nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
- nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030);
+ nvkm_wr32(device, 0x400880, 0x0008c7df);
+ nvkm_wr32(device, 0x400094, 0x00000005);
+ nvkm_wr32(device, 0x400B80, 0x45eae20e);
+ nvkm_wr32(device, 0x400B84, 0x24000000);
+ nvkm_wr32(device, 0x400098, 0x00000040);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000030);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000030);
}
- /* Turn all the tiling regions off. */
- for (i = 0; i < pfb->tile.regions; i++)
- engine->tile_prog(engine, i);
+ nvkm_wr32(device, 0x4009a0, nvkm_rd32(device, 0x100324));
+ nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, nvkm_rd32(device, 0x100324));
- nv_wr32(priv, 0x4009a0, nv_rd32(priv, 0x100324));
- nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
- nv_wr32(priv, NV10_PGRAPH_RDI_DATA, nv_rd32(priv, 0x100324));
+ nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+ nvkm_wr32(device, NV10_PGRAPH_STATE , 0xFFFFFFFF);
- nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
- nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF);
-
- tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) & 0x0007ff00;
- nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
- tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) | 0x00020100;
- nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
+ tmp = nvkm_rd32(device, NV10_PGRAPH_SURFACE) & 0x0007ff00;
+ nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp);
+ tmp = nvkm_rd32(device, NV10_PGRAPH_SURFACE) | 0x00020100;
+ nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp);
/* begin RAM config */
- vramsz = nv_device_resource_len(nv_device(priv), 0) - 1;
- nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
- nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
- nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
- nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100200));
- nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
- nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100204));
- nv_wr32(priv, 0x400820, 0);
- nv_wr32(priv, 0x400824, 0);
- nv_wr32(priv, 0x400864, vramsz - 1);
- nv_wr32(priv, 0x400868, vramsz - 1);
+ vramsz = device->func->resource_size(device, 1) - 1;
+ nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
+ nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
+ nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , nvkm_rd32(device, 0x100200));
+ nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , nvkm_rd32(device, 0x100204));
+ nvkm_wr32(device, 0x400820, 0);
+ nvkm_wr32(device, 0x400824, 0);
+ nvkm_wr32(device, 0x400864, vramsz - 1);
+ nvkm_wr32(device, 0x400868, vramsz - 1);
/* interesting.. the below overwrites some of the tile setup above.. */
- nv_wr32(priv, 0x400B20, 0x00000000);
- nv_wr32(priv, 0x400B04, 0xFFFFFFFF);
+ nvkm_wr32(device, 0x400B20, 0x00000000);
+ nvkm_wr32(device, 0x400B04, 0xFFFFFFFF);
- nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
- nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
- nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
- nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
+ nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
+ nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
+ nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
+ nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
return 0;
}
-struct nvkm_oclass
-nv20_gr_oclass = {
- .handle = NV_ENGINE(GR, 0x20),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv20_gr_ctor,
- .dtor = nv20_gr_dtor,
- .init = nv20_gr_init,
- .fini = _nvkm_gr_fini,
- },
+void *
+nv20_gr_dtor(struct nvkm_gr *base)
+{
+ struct nv20_gr *gr = nv20_gr(base);
+ nvkm_memory_del(&gr->ctxtab);
+ return gr;
+}
+
+int
+nv20_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
+ int index, struct nvkm_gr **pgr)
+{
+ struct nv20_gr *gr;
+
+ if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+ return -ENOMEM;
+ *pgr = &gr->base;
+
+ return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
+}
+
+static const struct nvkm_gr_func
+nv20_gr = {
+ .dtor = nv20_gr_dtor,
+ .oneinit = nv20_gr_oneinit,
+ .init = nv20_gr_init,
+ .intr = nv20_gr_intr,
+ .tile = nv20_gr_tile,
+ .chan_new = nv20_gr_chan_new,
+ .sclass = {
+ { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+ { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+ { -1, -1, 0x0030, &nv04_gr_object }, /* null */
+ { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+ { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+ { -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+ { -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+ { -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+ { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+ { -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+ { -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+ { -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
+ { -1, -1, 0x0097, &nv04_gr_object }, /* kelvin */
+ { -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
+ { -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+ {}
+ }
};
+
+int
+nv20_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return nv20_gr_new_(&nv20_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
index ac4dc048fed1..cdf4501e3798 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
@@ -1,26 +1,33 @@
#ifndef __NV20_GR_H__
#define __NV20_GR_H__
-#include <engine/gr.h>
+#define nv20_gr(p) container_of((p), struct nv20_gr, base)
+#include "priv.h"
-struct nv20_gr_priv {
+struct nv20_gr {
struct nvkm_gr base;
- struct nvkm_gpuobj *ctxtab;
+ struct nvkm_memory *ctxtab;
};
-struct nv20_gr_chan {
- struct nvkm_gr_chan base;
- int chid;
-};
+int nv20_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *,
+ int, struct nvkm_gr **);
+void *nv20_gr_dtor(struct nvkm_gr *);
+int nv20_gr_oneinit(struct nvkm_gr *);
+int nv20_gr_init(struct nvkm_gr *);
+void nv20_gr_intr(struct nvkm_gr *);
+void nv20_gr_tile(struct nvkm_gr *, int, struct nvkm_fb_tile *);
-extern struct nvkm_oclass nv25_gr_sclass[];
-int nv20_gr_context_init(struct nvkm_object *);
-int nv20_gr_context_fini(struct nvkm_object *, bool);
+int nv30_gr_init(struct nvkm_gr *);
-void nv20_gr_tile_prog(struct nvkm_engine *, int);
-void nv20_gr_intr(struct nvkm_subdev *);
+#define nv20_gr_chan(p) container_of((p), struct nv20_gr_chan, object)
-void nv20_gr_dtor(struct nvkm_object *);
-int nv20_gr_init(struct nvkm_object *);
+struct nv20_gr_chan {
+ struct nvkm_object object;
+ struct nv20_gr *gr;
+ int chid;
+ struct nvkm_memory *inst;
+};
-int nv30_gr_init(struct nvkm_object *);
+void *nv20_gr_chan_dtor(struct nvkm_object *);
+int nv20_gr_chan_init(struct nvkm_object *);
+int nv20_gr_chan_fini(struct nvkm_object *, bool);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
index bc362519cebb..6c4a00819b4b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
@@ -1,158 +1,134 @@
#include "nv20.h"
#include "regs.h"
+#include <core/gpuobj.h>
#include <engine/fifo.h>
+#include <engine/fifo/chan.h>
/*******************************************************************************
- * Graphics object classes
+ * PGRAPH context
******************************************************************************/
-struct nvkm_oclass
-nv25_gr_sclass[] = {
- { 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
- { 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
- { 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
- { 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
- { 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
- { 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
- { 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
- { 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
- { 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
- { 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
- { 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
- { 0x0096, &nv04_gr_ofuncs, NULL }, /* celcius */
- { 0x009e, &nv04_gr_ofuncs, NULL }, /* swzsurf */
- { 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
- { 0x0597, &nv04_gr_ofuncs, NULL }, /* kelvin */
- {},
+static const struct nvkm_object_func
+nv25_gr_chan = {
+ .dtor = nv20_gr_chan_dtor,
+ .init = nv20_gr_chan_init,
+ .fini = nv20_gr_chan_fini,
};
-/*******************************************************************************
- * PGRAPH context
- ******************************************************************************/
-
static int
-nv25_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv25_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+ const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
+ struct nv20_gr *gr = nv20_gr(base);
struct nv20_gr_chan *chan;
int ret, i;
- ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x3724,
- 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
- *pobject = nv_object(chan);
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nv25_gr_chan, oclass, &chan->object);
+ chan->gr = gr;
+ chan->chid = fifoch->chid;
+ *pobject = &chan->object;
+
+ ret = nvkm_memory_new(gr->base.engine.subdev.device,
+ NVKM_MEM_TARGET_INST, 0x3724, 16, true,
+ &chan->inst);
if (ret)
return ret;
- chan->chid = nvkm_fifo_chan(parent)->chid;
-
- nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
- nv_wo32(chan, 0x035c, 0xffff0000);
- nv_wo32(chan, 0x03c0, 0x0fff0000);
- nv_wo32(chan, 0x03c4, 0x0fff0000);
- nv_wo32(chan, 0x049c, 0x00000101);
- nv_wo32(chan, 0x04b0, 0x00000111);
- nv_wo32(chan, 0x04c8, 0x00000080);
- nv_wo32(chan, 0x04cc, 0xffff0000);
- nv_wo32(chan, 0x04d0, 0x00000001);
- nv_wo32(chan, 0x04e4, 0x44400000);
- nv_wo32(chan, 0x04fc, 0x4b800000);
+ nvkm_kmap(chan->inst);
+ nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
+ nvkm_wo32(chan->inst, 0x035c, 0xffff0000);
+ nvkm_wo32(chan->inst, 0x03c0, 0x0fff0000);
+ nvkm_wo32(chan->inst, 0x03c4, 0x0fff0000);
+ nvkm_wo32(chan->inst, 0x049c, 0x00000101);
+ nvkm_wo32(chan->inst, 0x04b0, 0x00000111);
+ nvkm_wo32(chan->inst, 0x04c8, 0x00000080);
+ nvkm_wo32(chan->inst, 0x04cc, 0xffff0000);
+ nvkm_wo32(chan->inst, 0x04d0, 0x00000001);
+ nvkm_wo32(chan->inst, 0x04e4, 0x44400000);
+ nvkm_wo32(chan->inst, 0x04fc, 0x4b800000);
for (i = 0x0510; i <= 0x051c; i += 4)
- nv_wo32(chan, i, 0x00030303);
+ nvkm_wo32(chan->inst, i, 0x00030303);
for (i = 0x0530; i <= 0x053c; i += 4)
- nv_wo32(chan, i, 0x00080000);
+ nvkm_wo32(chan->inst, i, 0x00080000);
for (i = 0x0548; i <= 0x0554; i += 4)
- nv_wo32(chan, i, 0x01012000);
+ nvkm_wo32(chan->inst, i, 0x01012000);
for (i = 0x0558; i <= 0x0564; i += 4)
- nv_wo32(chan, i, 0x000105b8);
+ nvkm_wo32(chan->inst, i, 0x000105b8);
for (i = 0x0568; i <= 0x0574; i += 4)
- nv_wo32(chan, i, 0x00080008);
+ nvkm_wo32(chan->inst, i, 0x00080008);
for (i = 0x0598; i <= 0x05d4; i += 4)
- nv_wo32(chan, i, 0x07ff0000);
- nv_wo32(chan, 0x05e0, 0x4b7fffff);
- nv_wo32(chan, 0x0620, 0x00000080);
- nv_wo32(chan, 0x0624, 0x30201000);
- nv_wo32(chan, 0x0628, 0x70605040);
- nv_wo32(chan, 0x062c, 0xb0a09080);
- nv_wo32(chan, 0x0630, 0xf0e0d0c0);
- nv_wo32(chan, 0x0664, 0x00000001);
- nv_wo32(chan, 0x066c, 0x00004000);
- nv_wo32(chan, 0x0678, 0x00000001);
- nv_wo32(chan, 0x0680, 0x00040000);
- nv_wo32(chan, 0x0684, 0x00010000);
+ nvkm_wo32(chan->inst, i, 0x07ff0000);
+ nvkm_wo32(chan->inst, 0x05e0, 0x4b7fffff);
+ nvkm_wo32(chan->inst, 0x0620, 0x00000080);
+ nvkm_wo32(chan->inst, 0x0624, 0x30201000);
+ nvkm_wo32(chan->inst, 0x0628, 0x70605040);
+ nvkm_wo32(chan->inst, 0x062c, 0xb0a09080);
+ nvkm_wo32(chan->inst, 0x0630, 0xf0e0d0c0);
+ nvkm_wo32(chan->inst, 0x0664, 0x00000001);
+ nvkm_wo32(chan->inst, 0x066c, 0x00004000);
+ nvkm_wo32(chan->inst, 0x0678, 0x00000001);
+ nvkm_wo32(chan->inst, 0x0680, 0x00040000);
+ nvkm_wo32(chan->inst, 0x0684, 0x00010000);
for (i = 0x1b04; i <= 0x2374; i += 16) {
- nv_wo32(chan, (i + 0), 0x10700ff9);
- nv_wo32(chan, (i + 4), 0x0436086c);
- nv_wo32(chan, (i + 8), 0x000c001b);
+ nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
+ nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
+ nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
}
- nv_wo32(chan, 0x2704, 0x3f800000);
- nv_wo32(chan, 0x2718, 0x3f800000);
- nv_wo32(chan, 0x2744, 0x40000000);
- nv_wo32(chan, 0x2748, 0x3f800000);
- nv_wo32(chan, 0x274c, 0x3f000000);
- nv_wo32(chan, 0x2754, 0x40000000);
- nv_wo32(chan, 0x2758, 0x3f800000);
- nv_wo32(chan, 0x2760, 0xbf800000);
- nv_wo32(chan, 0x2768, 0xbf800000);
- nv_wo32(chan, 0x308c, 0x000fe000);
- nv_wo32(chan, 0x3108, 0x000003f8);
- nv_wo32(chan, 0x3468, 0x002fe000);
+ nvkm_wo32(chan->inst, 0x2704, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x2718, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x2744, 0x40000000);
+ nvkm_wo32(chan->inst, 0x2748, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x274c, 0x3f000000);
+ nvkm_wo32(chan->inst, 0x2754, 0x40000000);
+ nvkm_wo32(chan->inst, 0x2758, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x2760, 0xbf800000);
+ nvkm_wo32(chan->inst, 0x2768, 0xbf800000);
+ nvkm_wo32(chan->inst, 0x308c, 0x000fe000);
+ nvkm_wo32(chan->inst, 0x3108, 0x000003f8);
+ nvkm_wo32(chan->inst, 0x3468, 0x002fe000);
for (i = 0x3484; i <= 0x34a0; i += 4)
- nv_wo32(chan, i, 0x001c527c);
+ nvkm_wo32(chan->inst, i, 0x001c527c);
+ nvkm_done(chan->inst);
return 0;
}
-static struct nvkm_oclass
-nv25_gr_cclass = {
- .handle = NV_ENGCTX(GR, 0x25),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv25_gr_context_ctor,
- .dtor = _nvkm_gr_context_dtor,
- .init = nv20_gr_context_init,
- .fini = nv20_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
-};
-
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
-static int
-nv25_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv20_gr_priv *priv;
- int ret;
-
- ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
- if (ret)
- return ret;
+static const struct nvkm_gr_func
+nv25_gr = {
+ .dtor = nv20_gr_dtor,
+ .oneinit = nv20_gr_oneinit,
+ .init = nv20_gr_init,
+ .intr = nv20_gr_intr,
+ .tile = nv20_gr_tile,
+ .chan_new = nv25_gr_chan_new,
+ .sclass = {
+ { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+ { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+ { -1, -1, 0x0030, &nv04_gr_object }, /* null */
+ { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+ { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+ { -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+ { -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+ { -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+ { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+ { -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+ { -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+ { -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
+ { -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
+ { -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+ { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
+ {}
+ }
+};
- nv_subdev(priv)->unit = 0x00001000;
- nv_subdev(priv)->intr = nv20_gr_intr;
- nv_engine(priv)->cclass = &nv25_gr_cclass;
- nv_engine(priv)->sclass = nv25_gr_sclass;
- nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
- return 0;
+int
+nv25_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return nv20_gr_new_(&nv25_gr, device, index, pgr);
}
-
-struct nvkm_oclass
-nv25_gr_oclass = {
- .handle = NV_ENGINE(GR, 0x25),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv25_gr_ctor,
- .dtor = nv20_gr_dtor,
- .init = nv20_gr_init,
- .fini = _nvkm_gr_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
index 22a5096e283d..3cad26dbc2b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
@@ -1,125 +1,125 @@
#include "nv20.h"
#include "regs.h"
+#include <core/gpuobj.h>
#include <engine/fifo.h>
+#include <engine/fifo/chan.h>
/*******************************************************************************
* PGRAPH context
******************************************************************************/
+static const struct nvkm_object_func
+nv2a_gr_chan = {
+ .dtor = nv20_gr_chan_dtor,
+ .init = nv20_gr_chan_init,
+ .fini = nv20_gr_chan_fini,
+};
+
static int
-nv2a_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv2a_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+ const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
+ struct nv20_gr *gr = nv20_gr(base);
struct nv20_gr_chan *chan;
int ret, i;
- ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x36b0,
- 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
- *pobject = nv_object(chan);
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nv2a_gr_chan, oclass, &chan->object);
+ chan->gr = gr;
+ chan->chid = fifoch->chid;
+ *pobject = &chan->object;
+
+ ret = nvkm_memory_new(gr->base.engine.subdev.device,
+ NVKM_MEM_TARGET_INST, 0x36b0, 16, true,
+ &chan->inst);
if (ret)
return ret;
- chan->chid = nvkm_fifo_chan(parent)->chid;
-
- nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
- nv_wo32(chan, 0x033c, 0xffff0000);
- nv_wo32(chan, 0x03a0, 0x0fff0000);
- nv_wo32(chan, 0x03a4, 0x0fff0000);
- nv_wo32(chan, 0x047c, 0x00000101);
- nv_wo32(chan, 0x0490, 0x00000111);
- nv_wo32(chan, 0x04a8, 0x44400000);
+ nvkm_kmap(chan->inst);
+ nvkm_wo32(chan->inst, 0x0000, 0x00000001 | (chan->chid << 24));
+ nvkm_wo32(chan->inst, 0x033c, 0xffff0000);
+ nvkm_wo32(chan->inst, 0x03a0, 0x0fff0000);
+ nvkm_wo32(chan->inst, 0x03a4, 0x0fff0000);
+ nvkm_wo32(chan->inst, 0x047c, 0x00000101);
+ nvkm_wo32(chan->inst, 0x0490, 0x00000111);
+ nvkm_wo32(chan->inst, 0x04a8, 0x44400000);
for (i = 0x04d4; i <= 0x04e0; i += 4)
- nv_wo32(chan, i, 0x00030303);
+ nvkm_wo32(chan->inst, i, 0x00030303);
for (i = 0x04f4; i <= 0x0500; i += 4)
- nv_wo32(chan, i, 0x00080000);
+ nvkm_wo32(chan->inst, i, 0x00080000);
for (i = 0x050c; i <= 0x0518; i += 4)
- nv_wo32(chan, i, 0x01012000);
+ nvkm_wo32(chan->inst, i, 0x01012000);
for (i = 0x051c; i <= 0x0528; i += 4)
- nv_wo32(chan, i, 0x000105b8);
+ nvkm_wo32(chan->inst, i, 0x000105b8);
for (i = 0x052c; i <= 0x0538; i += 4)
- nv_wo32(chan, i, 0x00080008);
+ nvkm_wo32(chan->inst, i, 0x00080008);
for (i = 0x055c; i <= 0x0598; i += 4)
- nv_wo32(chan, i, 0x07ff0000);
- nv_wo32(chan, 0x05a4, 0x4b7fffff);
- nv_wo32(chan, 0x05fc, 0x00000001);
- nv_wo32(chan, 0x0604, 0x00004000);
- nv_wo32(chan, 0x0610, 0x00000001);
- nv_wo32(chan, 0x0618, 0x00040000);
- nv_wo32(chan, 0x061c, 0x00010000);
+ nvkm_wo32(chan->inst, i, 0x07ff0000);
+ nvkm_wo32(chan->inst, 0x05a4, 0x4b7fffff);
+ nvkm_wo32(chan->inst, 0x05fc, 0x00000001);
+ nvkm_wo32(chan->inst, 0x0604, 0x00004000);
+ nvkm_wo32(chan->inst, 0x0610, 0x00000001);
+ nvkm_wo32(chan->inst, 0x0618, 0x00040000);
+ nvkm_wo32(chan->inst, 0x061c, 0x00010000);
for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
- nv_wo32(chan, (i + 0), 0x10700ff9);
- nv_wo32(chan, (i + 4), 0x0436086c);
- nv_wo32(chan, (i + 8), 0x000c001b);
+ nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
+ nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
+ nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
}
- nv_wo32(chan, 0x269c, 0x3f800000);
- nv_wo32(chan, 0x26b0, 0x3f800000);
- nv_wo32(chan, 0x26dc, 0x40000000);
- nv_wo32(chan, 0x26e0, 0x3f800000);
- nv_wo32(chan, 0x26e4, 0x3f000000);
- nv_wo32(chan, 0x26ec, 0x40000000);
- nv_wo32(chan, 0x26f0, 0x3f800000);
- nv_wo32(chan, 0x26f8, 0xbf800000);
- nv_wo32(chan, 0x2700, 0xbf800000);
- nv_wo32(chan, 0x3024, 0x000fe000);
- nv_wo32(chan, 0x30a0, 0x000003f8);
- nv_wo32(chan, 0x33fc, 0x002fe000);
+ nvkm_wo32(chan->inst, 0x269c, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x26b0, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x26dc, 0x40000000);
+ nvkm_wo32(chan->inst, 0x26e0, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x26e4, 0x3f000000);
+ nvkm_wo32(chan->inst, 0x26ec, 0x40000000);
+ nvkm_wo32(chan->inst, 0x26f0, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x26f8, 0xbf800000);
+ nvkm_wo32(chan->inst, 0x2700, 0xbf800000);
+ nvkm_wo32(chan->inst, 0x3024, 0x000fe000);
+ nvkm_wo32(chan->inst, 0x30a0, 0x000003f8);
+ nvkm_wo32(chan->inst, 0x33fc, 0x002fe000);
for (i = 0x341c; i <= 0x3438; i += 4)
- nv_wo32(chan, i, 0x001c527c);
+ nvkm_wo32(chan->inst, i, 0x001c527c);
+ nvkm_done(chan->inst);
return 0;
}
-static struct nvkm_oclass
-nv2a_gr_cclass = {
- .handle = NV_ENGCTX(GR, 0x2a),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv2a_gr_context_ctor,
- .dtor = _nvkm_gr_context_dtor,
- .init = nv20_gr_context_init,
- .fini = nv20_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
-};
-
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
-static int
-nv2a_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv20_gr_priv *priv;
- int ret;
-
- ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
- if (ret)
- return ret;
+static const struct nvkm_gr_func
+nv2a_gr = {
+ .dtor = nv20_gr_dtor,
+ .oneinit = nv20_gr_oneinit,
+ .init = nv20_gr_init,
+ .intr = nv20_gr_intr,
+ .tile = nv20_gr_tile,
+ .chan_new = nv2a_gr_chan_new,
+ .sclass = {
+ { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+ { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+ { -1, -1, 0x0030, &nv04_gr_object }, /* null */
+ { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+ { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+ { -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+ { -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+ { -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+ { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+ { -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+ { -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+ { -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
+ { -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
+ { -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+ { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
+ {}
+ }
+};
- nv_subdev(priv)->unit = 0x00001000;
- nv_subdev(priv)->intr = nv20_gr_intr;
- nv_engine(priv)->cclass = &nv2a_gr_cclass;
- nv_engine(priv)->sclass = nv25_gr_sclass;
- nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
- return 0;
+int
+nv2a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return nv20_gr_new_(&nv2a_gr, device, index, pgr);
}
-
-struct nvkm_oclass
-nv2a_gr_oclass = {
- .handle = NV_ENGINE(GR, 0x2a),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv2a_gr_ctor,
- .dtor = nv20_gr_dtor,
- .init = nv20_gr_init,
- .fini = _nvkm_gr_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index dcc84eb54fb6..69de8c6259fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -1,231 +1,198 @@
#include "nv20.h"
#include "regs.h"
-#include <core/device.h>
+#include <core/gpuobj.h>
#include <engine/fifo.h>
+#include <engine/fifo/chan.h>
#include <subdev/fb.h>
/*******************************************************************************
- * Graphics object classes
+ * PGRAPH context
******************************************************************************/
-static struct nvkm_oclass
-nv30_gr_sclass[] = {
- { 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
- { 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
- { 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
- { 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
- { 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
- { 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
- { 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
- { 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
- { 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
- { 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
- { 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
- { 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
- { 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
- { 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
- { 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
- { 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
- { 0x0397, &nv04_gr_ofuncs, NULL }, /* rankine */
- {},
+static const struct nvkm_object_func
+nv30_gr_chan = {
+ .dtor = nv20_gr_chan_dtor,
+ .init = nv20_gr_chan_init,
+ .fini = nv20_gr_chan_fini,
};
-/*******************************************************************************
- * PGRAPH context
- ******************************************************************************/
-
static int
-nv30_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+ const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
+ struct nv20_gr *gr = nv20_gr(base);
struct nv20_gr_chan *chan;
int ret, i;
- ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x5f48,
- 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
- *pobject = nv_object(chan);
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nv30_gr_chan, oclass, &chan->object);
+ chan->gr = gr;
+ chan->chid = fifoch->chid;
+ *pobject = &chan->object;
+
+ ret = nvkm_memory_new(gr->base.engine.subdev.device,
+ NVKM_MEM_TARGET_INST, 0x5f48, 16, true,
+ &chan->inst);
if (ret)
return ret;
- chan->chid = nvkm_fifo_chan(parent)->chid;
-
- nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
- nv_wo32(chan, 0x0410, 0x00000101);
- nv_wo32(chan, 0x0424, 0x00000111);
- nv_wo32(chan, 0x0428, 0x00000060);
- nv_wo32(chan, 0x0444, 0x00000080);
- nv_wo32(chan, 0x0448, 0xffff0000);
- nv_wo32(chan, 0x044c, 0x00000001);
- nv_wo32(chan, 0x0460, 0x44400000);
- nv_wo32(chan, 0x048c, 0xffff0000);
+ nvkm_kmap(chan->inst);
+ nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
+ nvkm_wo32(chan->inst, 0x0410, 0x00000101);
+ nvkm_wo32(chan->inst, 0x0424, 0x00000111);
+ nvkm_wo32(chan->inst, 0x0428, 0x00000060);
+ nvkm_wo32(chan->inst, 0x0444, 0x00000080);
+ nvkm_wo32(chan->inst, 0x0448, 0xffff0000);
+ nvkm_wo32(chan->inst, 0x044c, 0x00000001);
+ nvkm_wo32(chan->inst, 0x0460, 0x44400000);
+ nvkm_wo32(chan->inst, 0x048c, 0xffff0000);
for (i = 0x04e0; i < 0x04e8; i += 4)
- nv_wo32(chan, i, 0x0fff0000);
- nv_wo32(chan, 0x04ec, 0x00011100);
+ nvkm_wo32(chan->inst, i, 0x0fff0000);
+ nvkm_wo32(chan->inst, 0x04ec, 0x00011100);
for (i = 0x0508; i < 0x0548; i += 4)
- nv_wo32(chan, i, 0x07ff0000);
- nv_wo32(chan, 0x0550, 0x4b7fffff);
- nv_wo32(chan, 0x058c, 0x00000080);
- nv_wo32(chan, 0x0590, 0x30201000);
- nv_wo32(chan, 0x0594, 0x70605040);
- nv_wo32(chan, 0x0598, 0xb8a89888);
- nv_wo32(chan, 0x059c, 0xf8e8d8c8);
- nv_wo32(chan, 0x05b0, 0xb0000000);
+ nvkm_wo32(chan->inst, i, 0x07ff0000);
+ nvkm_wo32(chan->inst, 0x0550, 0x4b7fffff);
+ nvkm_wo32(chan->inst, 0x058c, 0x00000080);
+ nvkm_wo32(chan->inst, 0x0590, 0x30201000);
+ nvkm_wo32(chan->inst, 0x0594, 0x70605040);
+ nvkm_wo32(chan->inst, 0x0598, 0xb8a89888);
+ nvkm_wo32(chan->inst, 0x059c, 0xf8e8d8c8);
+ nvkm_wo32(chan->inst, 0x05b0, 0xb0000000);
for (i = 0x0600; i < 0x0640; i += 4)
- nv_wo32(chan, i, 0x00010588);
+ nvkm_wo32(chan->inst, i, 0x00010588);
for (i = 0x0640; i < 0x0680; i += 4)
- nv_wo32(chan, i, 0x00030303);
+ nvkm_wo32(chan->inst, i, 0x00030303);
for (i = 0x06c0; i < 0x0700; i += 4)
- nv_wo32(chan, i, 0x0008aae4);
+ nvkm_wo32(chan->inst, i, 0x0008aae4);
for (i = 0x0700; i < 0x0740; i += 4)
- nv_wo32(chan, i, 0x01012000);
+ nvkm_wo32(chan->inst, i, 0x01012000);
for (i = 0x0740; i < 0x0780; i += 4)
- nv_wo32(chan, i, 0x00080008);
- nv_wo32(chan, 0x085c, 0x00040000);
- nv_wo32(chan, 0x0860, 0x00010000);
+ nvkm_wo32(chan->inst, i, 0x00080008);
+ nvkm_wo32(chan->inst, 0x085c, 0x00040000);
+ nvkm_wo32(chan->inst, 0x0860, 0x00010000);
for (i = 0x0864; i < 0x0874; i += 4)
- nv_wo32(chan, i, 0x00040004);
+ nvkm_wo32(chan->inst, i, 0x00040004);
for (i = 0x1f18; i <= 0x3088 ; i += 16) {
- nv_wo32(chan, i + 0, 0x10700ff9);
- nv_wo32(chan, i + 1, 0x0436086c);
- nv_wo32(chan, i + 2, 0x000c001b);
+ nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
+ nvkm_wo32(chan->inst, i + 1, 0x0436086c);
+ nvkm_wo32(chan->inst, i + 2, 0x000c001b);
}
for (i = 0x30b8; i < 0x30c8; i += 4)
- nv_wo32(chan, i, 0x0000ffff);
- nv_wo32(chan, 0x344c, 0x3f800000);
- nv_wo32(chan, 0x3808, 0x3f800000);
- nv_wo32(chan, 0x381c, 0x3f800000);
- nv_wo32(chan, 0x3848, 0x40000000);
- nv_wo32(chan, 0x384c, 0x3f800000);
- nv_wo32(chan, 0x3850, 0x3f000000);
- nv_wo32(chan, 0x3858, 0x40000000);
- nv_wo32(chan, 0x385c, 0x3f800000);
- nv_wo32(chan, 0x3864, 0xbf800000);
- nv_wo32(chan, 0x386c, 0xbf800000);
+ nvkm_wo32(chan->inst, i, 0x0000ffff);
+ nvkm_wo32(chan->inst, 0x344c, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x3808, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x381c, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x3848, 0x40000000);
+ nvkm_wo32(chan->inst, 0x384c, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x3850, 0x3f000000);
+ nvkm_wo32(chan->inst, 0x3858, 0x40000000);
+ nvkm_wo32(chan->inst, 0x385c, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x3864, 0xbf800000);
+ nvkm_wo32(chan->inst, 0x386c, 0xbf800000);
+ nvkm_done(chan->inst);
return 0;
}
-static struct nvkm_oclass
-nv30_gr_cclass = {
- .handle = NV_ENGCTX(GR, 0x30),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv30_gr_context_ctor,
- .dtor = _nvkm_gr_context_dtor,
- .init = nv20_gr_context_init,
- .fini = nv20_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
-};
-
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
-static int
-nv30_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv20_gr_priv *priv;
- int ret;
-
- ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00001000;
- nv_subdev(priv)->intr = nv20_gr_intr;
- nv_engine(priv)->cclass = &nv30_gr_cclass;
- nv_engine(priv)->sclass = nv30_gr_sclass;
- nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
- return 0;
-}
-
int
-nv30_gr_init(struct nvkm_object *object)
+nv30_gr_init(struct nvkm_gr *base)
{
- struct nvkm_engine *engine = nv_engine(object);
- struct nv20_gr_priv *priv = (void *)engine;
- struct nvkm_fb *pfb = nvkm_fb(object);
- int ret, i;
-
- ret = nvkm_gr_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
-
- nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
- nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
- nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
- nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
- nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
- nv_wr32(priv, 0x400890, 0x01b463ff);
- nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
- nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
- nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
- nv_wr32(priv, 0x400B80, 0x1003d888);
- nv_wr32(priv, 0x400B84, 0x0c000000);
- nv_wr32(priv, 0x400098, 0x00000000);
- nv_wr32(priv, 0x40009C, 0x0005ad00);
- nv_wr32(priv, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
- nv_wr32(priv, 0x4000a0, 0x00000000);
- nv_wr32(priv, 0x4000a4, 0x00000008);
- nv_wr32(priv, 0x4008a8, 0xb784a400);
- nv_wr32(priv, 0x400ba0, 0x002f8685);
- nv_wr32(priv, 0x400ba4, 0x00231f3f);
- nv_wr32(priv, 0x4008a4, 0x40000020);
-
- if (nv_device(priv)->chipset == 0x34) {
- nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
- nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00200201);
- nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
- nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000008);
- nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
- nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000032);
- nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
- nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000002);
+ struct nv20_gr *gr = nv20_gr(base);
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ nvkm_wr32(device, NV20_PGRAPH_CHANNEL_CTX_TABLE,
+ nvkm_memory_addr(gr->ctxtab) >> 4);
+
+ nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+ nvkm_wr32(device, 0x400890, 0x01b463ff);
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
+ nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00008000);
+ nvkm_wr32(device, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
+ nvkm_wr32(device, 0x400B80, 0x1003d888);
+ nvkm_wr32(device, 0x400B84, 0x0c000000);
+ nvkm_wr32(device, 0x400098, 0x00000000);
+ nvkm_wr32(device, 0x40009C, 0x0005ad00);
+ nvkm_wr32(device, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
+ nvkm_wr32(device, 0x4000a0, 0x00000000);
+ nvkm_wr32(device, 0x4000a4, 0x00000008);
+ nvkm_wr32(device, 0x4008a8, 0xb784a400);
+ nvkm_wr32(device, 0x400ba0, 0x002f8685);
+ nvkm_wr32(device, 0x400ba4, 0x00231f3f);
+ nvkm_wr32(device, 0x4008a4, 0x40000020);
+
+ if (device->chipset == 0x34) {
+ nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00200201);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000008);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000032);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
+ nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000002);
}
- nv_wr32(priv, 0x4000c0, 0x00000016);
-
- /* Turn all the tiling regions off. */
- for (i = 0; i < pfb->tile.regions; i++)
- engine->tile_prog(engine, i);
+ nvkm_wr32(device, 0x4000c0, 0x00000016);
- nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
- nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF);
- nv_wr32(priv, 0x0040075c , 0x00000001);
+ nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+ nvkm_wr32(device, NV10_PGRAPH_STATE , 0xFFFFFFFF);
+ nvkm_wr32(device, 0x0040075c , 0x00000001);
/* begin RAM config */
- /* vramsz = pci_resource_len(priv->dev->pdev, 0) - 1; */
- nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
- nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
- if (nv_device(priv)->chipset != 0x34) {
- nv_wr32(priv, 0x400750, 0x00EA0000);
- nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100200));
- nv_wr32(priv, 0x400750, 0x00EA0004);
- nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100204));
+ /* vramsz = pci_resource_len(gr->dev->pdev, 1) - 1; */
+ nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
+ nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
+ if (device->chipset != 0x34) {
+ nvkm_wr32(device, 0x400750, 0x00EA0000);
+ nvkm_wr32(device, 0x400754, nvkm_rd32(device, 0x100200));
+ nvkm_wr32(device, 0x400750, 0x00EA0004);
+ nvkm_wr32(device, 0x400754, nvkm_rd32(device, 0x100204));
}
+
return 0;
}
-struct nvkm_oclass
-nv30_gr_oclass = {
- .handle = NV_ENGINE(GR, 0x30),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv30_gr_ctor,
- .dtor = nv20_gr_dtor,
- .init = nv30_gr_init,
- .fini = _nvkm_gr_fini,
- },
+static const struct nvkm_gr_func
+nv30_gr = {
+ .dtor = nv20_gr_dtor,
+ .oneinit = nv20_gr_oneinit,
+ .init = nv30_gr_init,
+ .intr = nv20_gr_intr,
+ .tile = nv20_gr_tile,
+ .chan_new = nv30_gr_chan_new,
+ .sclass = {
+ { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+ { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+ { -1, -1, 0x0030, &nv04_gr_object }, /* null */
+ { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+ { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+ { -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+ { -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+ { -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+ { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+ { -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+ { -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+ { -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+ { -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
+ { -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
+ { -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
+ { -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
+ { -1, -1, 0x0397, &nv04_gr_object }, /* rankine */
+ {}
+ }
};
+
+int
+nv30_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return nv20_gr_new_(&nv30_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index 985b7f3306ae..2207dac23981 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -1,159 +1,135 @@
#include "nv20.h"
#include "regs.h"
+#include <core/gpuobj.h>
#include <engine/fifo.h>
+#include <engine/fifo/chan.h>
/*******************************************************************************
- * Graphics object classes
+ * PGRAPH context
******************************************************************************/
-static struct nvkm_oclass
-nv34_gr_sclass[] = {
- { 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
- { 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
- { 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
- { 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
- { 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
- { 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
- { 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
- { 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
- { 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
- { 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
- { 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
- { 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
- { 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
- { 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
- { 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
- { 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
- { 0x0697, &nv04_gr_ofuncs, NULL }, /* rankine */
- {},
+static const struct nvkm_object_func
+nv34_gr_chan = {
+ .dtor = nv20_gr_chan_dtor,
+ .init = nv20_gr_chan_init,
+ .fini = nv20_gr_chan_fini,
};
-/*******************************************************************************
- * PGRAPH context
- ******************************************************************************/
-
static int
-nv34_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+ const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
+ struct nv20_gr *gr = nv20_gr(base);
struct nv20_gr_chan *chan;
int ret, i;
- ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x46dc,
- 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
- *pobject = nv_object(chan);
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nv34_gr_chan, oclass, &chan->object);
+ chan->gr = gr;
+ chan->chid = fifoch->chid;
+ *pobject = &chan->object;
+
+ ret = nvkm_memory_new(gr->base.engine.subdev.device,
+ NVKM_MEM_TARGET_INST, 0x46dc, 16, true,
+ &chan->inst);
if (ret)
return ret;
- chan->chid = nvkm_fifo_chan(parent)->chid;
-
- nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
- nv_wo32(chan, 0x040c, 0x01000101);
- nv_wo32(chan, 0x0420, 0x00000111);
- nv_wo32(chan, 0x0424, 0x00000060);
- nv_wo32(chan, 0x0440, 0x00000080);
- nv_wo32(chan, 0x0444, 0xffff0000);
- nv_wo32(chan, 0x0448, 0x00000001);
- nv_wo32(chan, 0x045c, 0x44400000);
- nv_wo32(chan, 0x0480, 0xffff0000);
+ nvkm_kmap(chan->inst);
+ nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
+ nvkm_wo32(chan->inst, 0x040c, 0x01000101);
+ nvkm_wo32(chan->inst, 0x0420, 0x00000111);
+ nvkm_wo32(chan->inst, 0x0424, 0x00000060);
+ nvkm_wo32(chan->inst, 0x0440, 0x00000080);
+ nvkm_wo32(chan->inst, 0x0444, 0xffff0000);
+ nvkm_wo32(chan->inst, 0x0448, 0x00000001);
+ nvkm_wo32(chan->inst, 0x045c, 0x44400000);
+ nvkm_wo32(chan->inst, 0x0480, 0xffff0000);
for (i = 0x04d4; i < 0x04dc; i += 4)
- nv_wo32(chan, i, 0x0fff0000);
- nv_wo32(chan, 0x04e0, 0x00011100);
+ nvkm_wo32(chan->inst, i, 0x0fff0000);
+ nvkm_wo32(chan->inst, 0x04e0, 0x00011100);
for (i = 0x04fc; i < 0x053c; i += 4)
- nv_wo32(chan, i, 0x07ff0000);
- nv_wo32(chan, 0x0544, 0x4b7fffff);
- nv_wo32(chan, 0x057c, 0x00000080);
- nv_wo32(chan, 0x0580, 0x30201000);
- nv_wo32(chan, 0x0584, 0x70605040);
- nv_wo32(chan, 0x0588, 0xb8a89888);
- nv_wo32(chan, 0x058c, 0xf8e8d8c8);
- nv_wo32(chan, 0x05a0, 0xb0000000);
+ nvkm_wo32(chan->inst, i, 0x07ff0000);
+ nvkm_wo32(chan->inst, 0x0544, 0x4b7fffff);
+ nvkm_wo32(chan->inst, 0x057c, 0x00000080);
+ nvkm_wo32(chan->inst, 0x0580, 0x30201000);
+ nvkm_wo32(chan->inst, 0x0584, 0x70605040);
+ nvkm_wo32(chan->inst, 0x0588, 0xb8a89888);
+ nvkm_wo32(chan->inst, 0x058c, 0xf8e8d8c8);
+ nvkm_wo32(chan->inst, 0x05a0, 0xb0000000);
for (i = 0x05f0; i < 0x0630; i += 4)
- nv_wo32(chan, i, 0x00010588);
+ nvkm_wo32(chan->inst, i, 0x00010588);
for (i = 0x0630; i < 0x0670; i += 4)
- nv_wo32(chan, i, 0x00030303);
+ nvkm_wo32(chan->inst, i, 0x00030303);
for (i = 0x06b0; i < 0x06f0; i += 4)
- nv_wo32(chan, i, 0x0008aae4);
+ nvkm_wo32(chan->inst, i, 0x0008aae4);
for (i = 0x06f0; i < 0x0730; i += 4)
- nv_wo32(chan, i, 0x01012000);
+ nvkm_wo32(chan->inst, i, 0x01012000);
for (i = 0x0730; i < 0x0770; i += 4)
- nv_wo32(chan, i, 0x00080008);
- nv_wo32(chan, 0x0850, 0x00040000);
- nv_wo32(chan, 0x0854, 0x00010000);
+ nvkm_wo32(chan->inst, i, 0x00080008);
+ nvkm_wo32(chan->inst, 0x0850, 0x00040000);
+ nvkm_wo32(chan->inst, 0x0854, 0x00010000);
for (i = 0x0858; i < 0x0868; i += 4)
- nv_wo32(chan, i, 0x00040004);
+ nvkm_wo32(chan->inst, i, 0x00040004);
for (i = 0x15ac; i <= 0x271c ; i += 16) {
- nv_wo32(chan, i + 0, 0x10700ff9);
- nv_wo32(chan, i + 1, 0x0436086c);
- nv_wo32(chan, i + 2, 0x000c001b);
+ nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
+ nvkm_wo32(chan->inst, i + 1, 0x0436086c);
+ nvkm_wo32(chan->inst, i + 2, 0x000c001b);
}
for (i = 0x274c; i < 0x275c; i += 4)
- nv_wo32(chan, i, 0x0000ffff);
- nv_wo32(chan, 0x2ae0, 0x3f800000);
- nv_wo32(chan, 0x2e9c, 0x3f800000);
- nv_wo32(chan, 0x2eb0, 0x3f800000);
- nv_wo32(chan, 0x2edc, 0x40000000);
- nv_wo32(chan, 0x2ee0, 0x3f800000);
- nv_wo32(chan, 0x2ee4, 0x3f000000);
- nv_wo32(chan, 0x2eec, 0x40000000);
- nv_wo32(chan, 0x2ef0, 0x3f800000);
- nv_wo32(chan, 0x2ef8, 0xbf800000);
- nv_wo32(chan, 0x2f00, 0xbf800000);
+ nvkm_wo32(chan->inst, i, 0x0000ffff);
+ nvkm_wo32(chan->inst, 0x2ae0, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x2e9c, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x2eb0, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x2edc, 0x40000000);
+ nvkm_wo32(chan->inst, 0x2ee0, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x2ee4, 0x3f000000);
+ nvkm_wo32(chan->inst, 0x2eec, 0x40000000);
+ nvkm_wo32(chan->inst, 0x2ef0, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x2ef8, 0xbf800000);
+ nvkm_wo32(chan->inst, 0x2f00, 0xbf800000);
+ nvkm_done(chan->inst);
return 0;
}
-static struct nvkm_oclass
-nv34_gr_cclass = {
- .handle = NV_ENGCTX(GR, 0x34),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv34_gr_context_ctor,
- .dtor = _nvkm_gr_context_dtor,
- .init = nv20_gr_context_init,
- .fini = nv20_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
-};
-
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
-static int
-nv34_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv20_gr_priv *priv;
- int ret;
-
- ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
- if (ret)
- return ret;
+static const struct nvkm_gr_func
+nv34_gr = {
+ .dtor = nv20_gr_dtor,
+ .oneinit = nv20_gr_oneinit,
+ .init = nv30_gr_init,
+ .intr = nv20_gr_intr,
+ .tile = nv20_gr_tile,
+ .chan_new = nv34_gr_chan_new,
+ .sclass = {
+ { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+ { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+ { -1, -1, 0x0030, &nv04_gr_object }, /* null */
+ { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+ { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+ { -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+ { -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+ { -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+ { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+ { -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+ { -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+ { -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+ { -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
+ { -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
+ { -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
+ { -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
+ { -1, -1, 0x0697, &nv04_gr_object }, /* rankine */
+ {}
+ }
+};
- nv_subdev(priv)->unit = 0x00001000;
- nv_subdev(priv)->intr = nv20_gr_intr;
- nv_engine(priv)->cclass = &nv34_gr_cclass;
- nv_engine(priv)->sclass = nv34_gr_sclass;
- nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
- return 0;
+int
+nv34_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return nv20_gr_new_(&nv34_gr, device, index, pgr);
}
-
-struct nvkm_oclass
-nv34_gr_oclass = {
- .handle = NV_ENGINE(GR, 0x34),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv34_gr_ctor,
- .dtor = nv20_gr_dtor,
- .init = nv30_gr_init,
- .fini = _nvkm_gr_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
index 707625f19ff5..740df0f52c38 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
@@ -1,159 +1,135 @@
#include "nv20.h"
#include "regs.h"
+#include <core/gpuobj.h>
#include <engine/fifo.h>
+#include <engine/fifo/chan.h>
/*******************************************************************************
- * Graphics object classes
+ * PGRAPH context
******************************************************************************/
-static struct nvkm_oclass
-nv35_gr_sclass[] = {
- { 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
- { 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
- { 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
- { 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
- { 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
- { 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
- { 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
- { 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
- { 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
- { 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
- { 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
- { 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
- { 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
- { 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
- { 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
- { 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
- { 0x0497, &nv04_gr_ofuncs, NULL }, /* rankine */
- {},
+static const struct nvkm_object_func
+nv35_gr_chan = {
+ .dtor = nv20_gr_chan_dtor,
+ .init = nv20_gr_chan_init,
+ .fini = nv20_gr_chan_fini,
};
-/*******************************************************************************
- * PGRAPH context
- ******************************************************************************/
-
static int
-nv35_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv35_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+ const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
+ struct nv20_gr *gr = nv20_gr(base);
struct nv20_gr_chan *chan;
int ret, i;
- ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x577c,
- 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
- *pobject = nv_object(chan);
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nv35_gr_chan, oclass, &chan->object);
+ chan->gr = gr;
+ chan->chid = fifoch->chid;
+ *pobject = &chan->object;
+
+ ret = nvkm_memory_new(gr->base.engine.subdev.device,
+ NVKM_MEM_TARGET_INST, 0x577c, 16, true,
+ &chan->inst);
if (ret)
return ret;
- chan->chid = nvkm_fifo_chan(parent)->chid;
-
- nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
- nv_wo32(chan, 0x040c, 0x00000101);
- nv_wo32(chan, 0x0420, 0x00000111);
- nv_wo32(chan, 0x0424, 0x00000060);
- nv_wo32(chan, 0x0440, 0x00000080);
- nv_wo32(chan, 0x0444, 0xffff0000);
- nv_wo32(chan, 0x0448, 0x00000001);
- nv_wo32(chan, 0x045c, 0x44400000);
- nv_wo32(chan, 0x0488, 0xffff0000);
+ nvkm_kmap(chan->inst);
+ nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
+ nvkm_wo32(chan->inst, 0x040c, 0x00000101);
+ nvkm_wo32(chan->inst, 0x0420, 0x00000111);
+ nvkm_wo32(chan->inst, 0x0424, 0x00000060);
+ nvkm_wo32(chan->inst, 0x0440, 0x00000080);
+ nvkm_wo32(chan->inst, 0x0444, 0xffff0000);
+ nvkm_wo32(chan->inst, 0x0448, 0x00000001);
+ nvkm_wo32(chan->inst, 0x045c, 0x44400000);
+ nvkm_wo32(chan->inst, 0x0488, 0xffff0000);
for (i = 0x04dc; i < 0x04e4; i += 4)
- nv_wo32(chan, i, 0x0fff0000);
- nv_wo32(chan, 0x04e8, 0x00011100);
+ nvkm_wo32(chan->inst, i, 0x0fff0000);
+ nvkm_wo32(chan->inst, 0x04e8, 0x00011100);
for (i = 0x0504; i < 0x0544; i += 4)
- nv_wo32(chan, i, 0x07ff0000);
- nv_wo32(chan, 0x054c, 0x4b7fffff);
- nv_wo32(chan, 0x0588, 0x00000080);
- nv_wo32(chan, 0x058c, 0x30201000);
- nv_wo32(chan, 0x0590, 0x70605040);
- nv_wo32(chan, 0x0594, 0xb8a89888);
- nv_wo32(chan, 0x0598, 0xf8e8d8c8);
- nv_wo32(chan, 0x05ac, 0xb0000000);
+ nvkm_wo32(chan->inst, i, 0x07ff0000);
+ nvkm_wo32(chan->inst, 0x054c, 0x4b7fffff);
+ nvkm_wo32(chan->inst, 0x0588, 0x00000080);
+ nvkm_wo32(chan->inst, 0x058c, 0x30201000);
+ nvkm_wo32(chan->inst, 0x0590, 0x70605040);
+ nvkm_wo32(chan->inst, 0x0594, 0xb8a89888);
+ nvkm_wo32(chan->inst, 0x0598, 0xf8e8d8c8);
+ nvkm_wo32(chan->inst, 0x05ac, 0xb0000000);
for (i = 0x0604; i < 0x0644; i += 4)
- nv_wo32(chan, i, 0x00010588);
+ nvkm_wo32(chan->inst, i, 0x00010588);
for (i = 0x0644; i < 0x0684; i += 4)
- nv_wo32(chan, i, 0x00030303);
+ nvkm_wo32(chan->inst, i, 0x00030303);
for (i = 0x06c4; i < 0x0704; i += 4)
- nv_wo32(chan, i, 0x0008aae4);
+ nvkm_wo32(chan->inst, i, 0x0008aae4);
for (i = 0x0704; i < 0x0744; i += 4)
- nv_wo32(chan, i, 0x01012000);
+ nvkm_wo32(chan->inst, i, 0x01012000);
for (i = 0x0744; i < 0x0784; i += 4)
- nv_wo32(chan, i, 0x00080008);
- nv_wo32(chan, 0x0860, 0x00040000);
- nv_wo32(chan, 0x0864, 0x00010000);
+ nvkm_wo32(chan->inst, i, 0x00080008);
+ nvkm_wo32(chan->inst, 0x0860, 0x00040000);
+ nvkm_wo32(chan->inst, 0x0864, 0x00010000);
for (i = 0x0868; i < 0x0878; i += 4)
- nv_wo32(chan, i, 0x00040004);
+ nvkm_wo32(chan->inst, i, 0x00040004);
for (i = 0x1f1c; i <= 0x308c ; i += 16) {
- nv_wo32(chan, i + 0, 0x10700ff9);
- nv_wo32(chan, i + 4, 0x0436086c);
- nv_wo32(chan, i + 8, 0x000c001b);
+ nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
+ nvkm_wo32(chan->inst, i + 4, 0x0436086c);
+ nvkm_wo32(chan->inst, i + 8, 0x000c001b);
}
for (i = 0x30bc; i < 0x30cc; i += 4)
- nv_wo32(chan, i, 0x0000ffff);
- nv_wo32(chan, 0x3450, 0x3f800000);
- nv_wo32(chan, 0x380c, 0x3f800000);
- nv_wo32(chan, 0x3820, 0x3f800000);
- nv_wo32(chan, 0x384c, 0x40000000);
- nv_wo32(chan, 0x3850, 0x3f800000);
- nv_wo32(chan, 0x3854, 0x3f000000);
- nv_wo32(chan, 0x385c, 0x40000000);
- nv_wo32(chan, 0x3860, 0x3f800000);
- nv_wo32(chan, 0x3868, 0xbf800000);
- nv_wo32(chan, 0x3870, 0xbf800000);
+ nvkm_wo32(chan->inst, i, 0x0000ffff);
+ nvkm_wo32(chan->inst, 0x3450, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x380c, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x3820, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x384c, 0x40000000);
+ nvkm_wo32(chan->inst, 0x3850, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x3854, 0x3f000000);
+ nvkm_wo32(chan->inst, 0x385c, 0x40000000);
+ nvkm_wo32(chan->inst, 0x3860, 0x3f800000);
+ nvkm_wo32(chan->inst, 0x3868, 0xbf800000);
+ nvkm_wo32(chan->inst, 0x3870, 0xbf800000);
+ nvkm_done(chan->inst);
return 0;
}
-static struct nvkm_oclass
-nv35_gr_cclass = {
- .handle = NV_ENGCTX(GR, 0x35),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv35_gr_context_ctor,
- .dtor = _nvkm_gr_context_dtor,
- .init = nv20_gr_context_init,
- .fini = nv20_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
-};
-
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
-static int
-nv35_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv20_gr_priv *priv;
- int ret;
-
- ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
- if (ret)
- return ret;
+static const struct nvkm_gr_func
+nv35_gr = {
+ .dtor = nv20_gr_dtor,
+ .oneinit = nv20_gr_oneinit,
+ .init = nv30_gr_init,
+ .intr = nv20_gr_intr,
+ .tile = nv20_gr_tile,
+ .chan_new = nv35_gr_chan_new,
+ .sclass = {
+ { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+ { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+ { -1, -1, 0x0030, &nv04_gr_object }, /* null */
+ { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+ { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+ { -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+ { -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+ { -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+ { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+ { -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+ { -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+ { -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+ { -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
+ { -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
+ { -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
+ { -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
+ { -1, -1, 0x0497, &nv04_gr_object }, /* rankine */
+ {}
+ }
+};
- nv_subdev(priv)->unit = 0x00001000;
- nv_subdev(priv)->intr = nv20_gr_intr;
- nv_engine(priv)->cclass = &nv35_gr_cclass;
- nv_engine(priv)->sclass = nv35_gr_sclass;
- nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
- return 0;
+int
+nv35_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return nv20_gr_new_(&nv35_gr, device, index, pgr);
}
-
-struct nvkm_oclass
-nv35_gr_oclass = {
- .handle = NV_ENGINE(GR, 0x35),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv35_gr_ctor,
- .dtor = nv20_gr_dtor,
- .init = nv30_gr_init,
- .fini = _nvkm_gr_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
index 7e1937980e3f..ffa902ece872 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
@@ -25,26 +25,15 @@
#include "regs.h"
#include <core/client.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
#include <subdev/fb.h>
#include <subdev/timer.h>
#include <engine/fifo.h>
-struct nv40_gr_priv {
- struct nvkm_gr base;
- u32 size;
-};
-
-struct nv40_gr_chan {
- struct nvkm_gr_chan base;
-};
-
-static u64
+u64
nv40_gr_units(struct nvkm_gr *gr)
{
- struct nv40_gr_priv *priv = (void *)gr;
-
- return nv_rd32(priv, 0x1540);
+ return nvkm_rd32(gr->engine.subdev.device, 0x1540);
}
/*******************************************************************************
@@ -52,80 +41,29 @@ nv40_gr_units(struct nvkm_gr *gr)
******************************************************************************/
static int
-nv40_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv40_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+ int align, struct nvkm_gpuobj **pgpuobj)
{
- struct nvkm_gpuobj *obj;
- int ret;
-
- ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
- 20, 16, 0, &obj);
- *pobject = nv_object(obj);
- if (ret)
- return ret;
-
- nv_wo32(obj, 0x00, nv_mclass(obj));
- nv_wo32(obj, 0x04, 0x00000000);
- nv_wo32(obj, 0x08, 0x00000000);
+ int ret = nvkm_gpuobj_new(object->engine->subdev.device, 20, align,
+ false, parent, pgpuobj);
+ if (ret == 0) {
+ nvkm_kmap(*pgpuobj);
+ nvkm_wo32(*pgpuobj, 0x00, object->oclass);
+ nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
+ nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
#ifdef __BIG_ENDIAN
- nv_mo32(obj, 0x08, 0x01000000, 0x01000000);
+ nvkm_mo32(*pgpuobj, 0x08, 0x01000000, 0x01000000);
#endif
- nv_wo32(obj, 0x0c, 0x00000000);
- nv_wo32(obj, 0x10, 0x00000000);
- return 0;
+ nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+ nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
+ nvkm_done(*pgpuobj);
+ }
+ return ret;
}
-static struct nvkm_ofuncs
-nv40_gr_ofuncs = {
- .ctor = nv40_gr_object_ctor,
- .dtor = _nvkm_gpuobj_dtor,
- .init = _nvkm_gpuobj_init,
- .fini = _nvkm_gpuobj_fini,
- .rd32 = _nvkm_gpuobj_rd32,
- .wr32 = _nvkm_gpuobj_wr32,
-};
-
-static struct nvkm_oclass
-nv40_gr_sclass[] = {
- { 0x0012, &nv40_gr_ofuncs, NULL }, /* beta1 */
- { 0x0019, &nv40_gr_ofuncs, NULL }, /* clip */
- { 0x0030, &nv40_gr_ofuncs, NULL }, /* null */
- { 0x0039, &nv40_gr_ofuncs, NULL }, /* m2mf */
- { 0x0043, &nv40_gr_ofuncs, NULL }, /* rop */
- { 0x0044, &nv40_gr_ofuncs, NULL }, /* patt */
- { 0x004a, &nv40_gr_ofuncs, NULL }, /* gdi */
- { 0x0062, &nv40_gr_ofuncs, NULL }, /* surf2d */
- { 0x0072, &nv40_gr_ofuncs, NULL }, /* beta4 */
- { 0x0089, &nv40_gr_ofuncs, NULL }, /* sifm */
- { 0x008a, &nv40_gr_ofuncs, NULL }, /* ifc */
- { 0x009f, &nv40_gr_ofuncs, NULL }, /* imageblit */
- { 0x3062, &nv40_gr_ofuncs, NULL }, /* surf2d (nv40) */
- { 0x3089, &nv40_gr_ofuncs, NULL }, /* sifm (nv40) */
- { 0x309e, &nv40_gr_ofuncs, NULL }, /* swzsurf (nv40) */
- { 0x4097, &nv40_gr_ofuncs, NULL }, /* curie */
- {},
-};
-
-static struct nvkm_oclass
-nv44_gr_sclass[] = {
- { 0x0012, &nv40_gr_ofuncs, NULL }, /* beta1 */
- { 0x0019, &nv40_gr_ofuncs, NULL }, /* clip */
- { 0x0030, &nv40_gr_ofuncs, NULL }, /* null */
- { 0x0039, &nv40_gr_ofuncs, NULL }, /* m2mf */
- { 0x0043, &nv40_gr_ofuncs, NULL }, /* rop */
- { 0x0044, &nv40_gr_ofuncs, NULL }, /* patt */
- { 0x004a, &nv40_gr_ofuncs, NULL }, /* gdi */
- { 0x0062, &nv40_gr_ofuncs, NULL }, /* surf2d */
- { 0x0072, &nv40_gr_ofuncs, NULL }, /* beta4 */
- { 0x0089, &nv40_gr_ofuncs, NULL }, /* sifm */
- { 0x008a, &nv40_gr_ofuncs, NULL }, /* ifc */
- { 0x009f, &nv40_gr_ofuncs, NULL }, /* imageblit */
- { 0x3062, &nv40_gr_ofuncs, NULL }, /* surf2d (nv40) */
- { 0x3089, &nv40_gr_ofuncs, NULL }, /* sifm (nv40) */
- { 0x309e, &nv40_gr_ofuncs, NULL }, /* swzsurf (nv40) */
- { 0x4497, &nv40_gr_ofuncs, NULL }, /* curie */
- {},
+const struct nvkm_object_func
+nv40_gr_object = {
+ .bind = nv40_gr_object_bind,
};
/*******************************************************************************
@@ -133,361 +71,334 @@ nv44_gr_sclass[] = {
******************************************************************************/
static int
-nv40_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv40_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+ int align, struct nvkm_gpuobj **pgpuobj)
{
- struct nv40_gr_priv *priv = (void *)engine;
- struct nv40_gr_chan *chan;
- int ret;
-
- ret = nvkm_gr_context_create(parent, engine, oclass, NULL, priv->size,
- 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
-
- nv40_grctx_fill(nv_device(priv), nv_gpuobj(chan));
- nv_wo32(chan, 0x00000, nv_gpuobj(chan)->addr >> 4);
- return 0;
+ struct nv40_gr_chan *chan = nv40_gr_chan(object);
+ struct nv40_gr *gr = chan->gr;
+ int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
+ align, true, parent, pgpuobj);
+ if (ret == 0) {
+ chan->inst = (*pgpuobj)->addr;
+ nvkm_kmap(*pgpuobj);
+ nv40_grctx_fill(gr->base.engine.subdev.device, *pgpuobj);
+ nvkm_wo32(*pgpuobj, 0x00000, chan->inst >> 4);
+ nvkm_done(*pgpuobj);
+ }
+ return ret;
}
static int
-nv40_gr_context_fini(struct nvkm_object *object, bool suspend)
+nv40_gr_chan_fini(struct nvkm_object *object, bool suspend)
{
- struct nv40_gr_priv *priv = (void *)object->engine;
- struct nv40_gr_chan *chan = (void *)object;
- u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
+ struct nv40_gr_chan *chan = nv40_gr_chan(object);
+ struct nv40_gr *gr = chan->gr;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 inst = 0x01000000 | chan->inst >> 4;
int ret = 0;
- nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x400720, 0x00000001, 0x00000000);
- if (nv_rd32(priv, 0x40032c) == inst) {
+ if (nvkm_rd32(device, 0x40032c) == inst) {
if (suspend) {
- nv_wr32(priv, 0x400720, 0x00000000);
- nv_wr32(priv, 0x400784, inst);
- nv_mask(priv, 0x400310, 0x00000020, 0x00000020);
- nv_mask(priv, 0x400304, 0x00000001, 0x00000001);
- if (!nv_wait(priv, 0x400300, 0x00000001, 0x00000000)) {
- u32 insn = nv_rd32(priv, 0x400308);
- nv_warn(priv, "ctxprog timeout 0x%08x\n", insn);
+ nvkm_wr32(device, 0x400720, 0x00000000);
+ nvkm_wr32(device, 0x400784, inst);
+ nvkm_mask(device, 0x400310, 0x00000020, 0x00000020);
+ nvkm_mask(device, 0x400304, 0x00000001, 0x00000001);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x400300) & 0x00000001))
+ break;
+ ) < 0) {
+ u32 insn = nvkm_rd32(device, 0x400308);
+ nvkm_warn(subdev, "ctxprog timeout %08x\n", insn);
ret = -EBUSY;
}
}
- nv_mask(priv, 0x40032c, 0x01000000, 0x00000000);
+ nvkm_mask(device, 0x40032c, 0x01000000, 0x00000000);
}
- if (nv_rd32(priv, 0x400330) == inst)
- nv_mask(priv, 0x400330, 0x01000000, 0x00000000);
+ if (nvkm_rd32(device, 0x400330) == inst)
+ nvkm_mask(device, 0x400330, 0x01000000, 0x00000000);
- nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x400720, 0x00000001, 0x00000001);
return ret;
}
-static struct nvkm_oclass
-nv40_gr_cclass = {
- .handle = NV_ENGCTX(GR, 0x40),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv40_gr_context_ctor,
- .dtor = _nvkm_gr_context_dtor,
- .init = _nvkm_gr_context_init,
- .fini = nv40_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
+static void *
+nv40_gr_chan_dtor(struct nvkm_object *object)
+{
+ struct nv40_gr_chan *chan = nv40_gr_chan(object);
+ unsigned long flags;
+ spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
+ list_del(&chan->head);
+ spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags);
+ return chan;
+}
+
+static const struct nvkm_object_func
+nv40_gr_chan = {
+ .dtor = nv40_gr_chan_dtor,
+ .fini = nv40_gr_chan_fini,
+ .bind = nv40_gr_chan_bind,
};
+int
+nv40_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+ const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
+{
+ struct nv40_gr *gr = nv40_gr(base);
+ struct nv40_gr_chan *chan;
+ unsigned long flags;
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nv40_gr_chan, oclass, &chan->object);
+ chan->gr = gr;
+ *pobject = &chan->object;
+
+ spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
+ list_add(&chan->head, &gr->chan);
+ spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags);
+ return 0;
+}
+
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
static void
-nv40_gr_tile_prog(struct nvkm_engine *engine, int i)
+nv40_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
{
- struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
- struct nvkm_fifo *pfifo = nvkm_fifo(engine);
- struct nv40_gr_priv *priv = (void *)engine;
+ struct nv40_gr *gr = nv40_gr(base);
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ struct nvkm_fifo *fifo = device->fifo;
unsigned long flags;
- pfifo->pause(pfifo, &flags);
- nv04_gr_idle(priv);
+ nvkm_fifo_pause(fifo, &flags);
+ nv04_gr_idle(&gr->base);
- switch (nv_device(priv)->chipset) {
+ switch (device->chipset) {
case 0x40:
case 0x41:
case 0x42:
case 0x43:
case 0x45:
- case 0x4e:
- nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
- nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
- nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
- nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
- nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
- nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
- switch (nv_device(priv)->chipset) {
+ nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
+ nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+ nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+ nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
+ switch (device->chipset) {
case 0x40:
case 0x45:
- nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
- nv_wr32(priv, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
+ nvkm_wr32(device, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+ nvkm_wr32(device, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
break;
case 0x41:
case 0x42:
case 0x43:
- nv_wr32(priv, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
- nv_wr32(priv, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
+ nvkm_wr32(device, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
+ nvkm_wr32(device, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
break;
default:
break;
}
break;
- case 0x44:
- case 0x4a:
- nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
- nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
- nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
- break;
- case 0x46:
- case 0x4c:
case 0x47:
case 0x49:
case 0x4b:
- case 0x63:
- case 0x67:
- case 0x68:
- nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
- nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
- nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
- nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
- nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
- nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
- switch (nv_device(priv)->chipset) {
- case 0x47:
- case 0x49:
- case 0x4b:
- nv_wr32(priv, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
- nv_wr32(priv, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
- break;
- default:
- break;
- }
+ nvkm_wr32(device, NV47_PGRAPH_TSIZE(i), tile->pitch);
+ nvkm_wr32(device, NV47_PGRAPH_TLIMIT(i), tile->limit);
+ nvkm_wr32(device, NV47_PGRAPH_TILE(i), tile->addr);
+ nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+ nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+ nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
+ nvkm_wr32(device, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
+ nvkm_wr32(device, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
break;
default:
+ WARN_ON(1);
break;
}
- pfifo->start(pfifo, &flags);
+ nvkm_fifo_start(fifo, &flags);
}
-static void
-nv40_gr_intr(struct nvkm_subdev *subdev)
+void
+nv40_gr_intr(struct nvkm_gr *base)
{
- struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
- struct nvkm_engine *engine = nv_engine(subdev);
- struct nvkm_object *engctx;
- struct nvkm_handle *handle = NULL;
- struct nv40_gr_priv *priv = (void *)subdev;
- u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
- u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
- u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
- u32 inst = nv_rd32(priv, 0x40032c) & 0x000fffff;
- u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+ struct nv40_gr *gr = nv40_gr(base);
+ struct nv40_gr_chan *temp, *chan = NULL;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
+ u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
+ u32 inst = nvkm_rd32(device, 0x40032c) & 0x000fffff;
+ u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00001ffc);
- u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
- u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xffff;
+ u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xffff;
u32 show = stat;
- int chid;
-
- engctx = nvkm_engctx_get(engine, inst);
- chid = pfifo->chid(pfifo, engctx);
+ char msg[128], src[128], sta[128];
+ unsigned long flags;
- if (stat & NV_PGRAPH_INTR_ERROR) {
- if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
- handle = nvkm_handle_get_class(engctx, class);
- if (handle && !nv_call(handle->object, mthd, data))
- show &= ~NV_PGRAPH_INTR_ERROR;
- nvkm_handle_put(handle);
+ spin_lock_irqsave(&gr->base.engine.lock, flags);
+ list_for_each_entry(temp, &gr->chan, head) {
+ if (temp->inst >> 4 == inst) {
+ chan = temp;
+ list_del(&chan->head);
+ list_add(&chan->head, &gr->chan);
+ break;
}
+ }
+ if (stat & NV_PGRAPH_INTR_ERROR) {
if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
- nv_mask(priv, 0x402000, 0, 0);
+ nvkm_mask(device, 0x402000, 0, 0);
}
}
- nv_wr32(priv, NV03_PGRAPH_INTR, stat);
- nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+ nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
+ nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_error(priv, "%s", "");
- nvkm_bitfield_print(nv10_gr_intr_name, show);
- pr_cont(" nsource:");
- nvkm_bitfield_print(nv04_gr_nsource, nsource);
- pr_cont(" nstatus:");
- nvkm_bitfield_print(nv10_gr_nstatus, nstatus);
- pr_cont("\n");
- nv_error(priv,
- "ch %d [0x%08x %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst << 4, nvkm_client_name(engctx), subc,
- class, mthd, data);
+ nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
+ nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
+ nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
+ nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
+ "nstatus %08x [%s] ch %d [%08x %s] subc %d "
+ "class %04x mthd %04x data %08x\n",
+ show, msg, nsource, src, nstatus, sta,
+ chan ? chan->fifo->chid : -1, inst << 4,
+ chan ? chan->fifo->object.client->name : "unknown",
+ subc, class, mthd, data);
}
- nvkm_engctx_put(engctx);
+ spin_unlock_irqrestore(&gr->base.engine.lock, flags);
}
-static int
-nv40_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv40_gr_init(struct nvkm_gr *base)
{
- struct nv40_gr_priv *priv;
- int ret;
-
- ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00001000;
- nv_subdev(priv)->intr = nv40_gr_intr;
- nv_engine(priv)->cclass = &nv40_gr_cclass;
- if (nv44_gr_class(priv))
- nv_engine(priv)->sclass = nv44_gr_sclass;
- else
- nv_engine(priv)->sclass = nv40_gr_sclass;
- nv_engine(priv)->tile_prog = nv40_gr_tile_prog;
-
- priv->base.units = nv40_gr_units;
- return 0;
-}
-
-static int
-nv40_gr_init(struct nvkm_object *object)
-{
- struct nvkm_engine *engine = nv_engine(object);
- struct nvkm_fb *pfb = nvkm_fb(object);
- struct nv40_gr_priv *priv = (void *)engine;
+ struct nv40_gr *gr = nv40_gr(base);
+ struct nvkm_device *device = gr->base.engine.subdev.device;
int ret, i, j;
u32 vramsz;
- ret = nvkm_gr_init(&priv->base);
- if (ret)
- return ret;
-
/* generate and upload context program */
- ret = nv40_grctx_init(nv_device(priv), &priv->size);
+ ret = nv40_grctx_init(device, &gr->size);
if (ret)
return ret;
/* No context present currently */
- nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
+ nvkm_wr32(device, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
- nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
- nv_wr32(priv, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
+ nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ nvkm_wr32(device, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
- nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
- nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
- nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
- nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
- nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
- nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+ nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
+ nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00008000);
+ nvkm_wr32(device, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
- nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
- nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF);
+ nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+ nvkm_wr32(device, NV10_PGRAPH_STATE , 0xFFFFFFFF);
- j = nv_rd32(priv, 0x1540) & 0xff;
+ j = nvkm_rd32(device, 0x1540) & 0xff;
if (j) {
for (i = 0; !(j & 1); j >>= 1, i++)
;
- nv_wr32(priv, 0x405000, i);
+ nvkm_wr32(device, 0x405000, i);
}
- if (nv_device(priv)->chipset == 0x40) {
- nv_wr32(priv, 0x4009b0, 0x83280fff);
- nv_wr32(priv, 0x4009b4, 0x000000a0);
+ if (device->chipset == 0x40) {
+ nvkm_wr32(device, 0x4009b0, 0x83280fff);
+ nvkm_wr32(device, 0x4009b4, 0x000000a0);
} else {
- nv_wr32(priv, 0x400820, 0x83280eff);
- nv_wr32(priv, 0x400824, 0x000000a0);
+ nvkm_wr32(device, 0x400820, 0x83280eff);
+ nvkm_wr32(device, 0x400824, 0x000000a0);
}
- switch (nv_device(priv)->chipset) {
+ switch (device->chipset) {
case 0x40:
case 0x45:
- nv_wr32(priv, 0x4009b8, 0x0078e366);
- nv_wr32(priv, 0x4009bc, 0x0000014c);
+ nvkm_wr32(device, 0x4009b8, 0x0078e366);
+ nvkm_wr32(device, 0x4009bc, 0x0000014c);
break;
case 0x41:
case 0x42: /* pciid also 0x00Cx */
/* case 0x0120: XXX (pciid) */
- nv_wr32(priv, 0x400828, 0x007596ff);
- nv_wr32(priv, 0x40082c, 0x00000108);
+ nvkm_wr32(device, 0x400828, 0x007596ff);
+ nvkm_wr32(device, 0x40082c, 0x00000108);
break;
case 0x43:
- nv_wr32(priv, 0x400828, 0x0072cb77);
- nv_wr32(priv, 0x40082c, 0x00000108);
+ nvkm_wr32(device, 0x400828, 0x0072cb77);
+ nvkm_wr32(device, 0x40082c, 0x00000108);
break;
case 0x44:
case 0x46: /* G72 */
case 0x4a:
case 0x4c: /* G7x-based C51 */
case 0x4e:
- nv_wr32(priv, 0x400860, 0);
- nv_wr32(priv, 0x400864, 0);
+ nvkm_wr32(device, 0x400860, 0);
+ nvkm_wr32(device, 0x400864, 0);
break;
case 0x47: /* G70 */
case 0x49: /* G71 */
case 0x4b: /* G73 */
- nv_wr32(priv, 0x400828, 0x07830610);
- nv_wr32(priv, 0x40082c, 0x0000016A);
+ nvkm_wr32(device, 0x400828, 0x07830610);
+ nvkm_wr32(device, 0x40082c, 0x0000016A);
break;
default:
break;
}
- nv_wr32(priv, 0x400b38, 0x2ffff800);
- nv_wr32(priv, 0x400b3c, 0x00006000);
+ nvkm_wr32(device, 0x400b38, 0x2ffff800);
+ nvkm_wr32(device, 0x400b3c, 0x00006000);
/* Tiling related stuff. */
- switch (nv_device(priv)->chipset) {
+ switch (device->chipset) {
case 0x44:
case 0x4a:
- nv_wr32(priv, 0x400bc4, 0x1003d888);
- nv_wr32(priv, 0x400bbc, 0xb7a7b500);
+ nvkm_wr32(device, 0x400bc4, 0x1003d888);
+ nvkm_wr32(device, 0x400bbc, 0xb7a7b500);
break;
case 0x46:
- nv_wr32(priv, 0x400bc4, 0x0000e024);
- nv_wr32(priv, 0x400bbc, 0xb7a7b520);
+ nvkm_wr32(device, 0x400bc4, 0x0000e024);
+ nvkm_wr32(device, 0x400bbc, 0xb7a7b520);
break;
case 0x4c:
case 0x4e:
case 0x67:
- nv_wr32(priv, 0x400bc4, 0x1003d888);
- nv_wr32(priv, 0x400bbc, 0xb7a7b540);
+ nvkm_wr32(device, 0x400bc4, 0x1003d888);
+ nvkm_wr32(device, 0x400bbc, 0xb7a7b540);
break;
default:
break;
}
- /* Turn all the tiling regions off. */
- for (i = 0; i < pfb->tile.regions; i++)
- engine->tile_prog(engine, i);
-
/* begin RAM config */
- vramsz = nv_device_resource_len(nv_device(priv), 0) - 1;
- switch (nv_device(priv)->chipset) {
+ vramsz = device->func->resource_size(device, 1) - 1;
+ switch (device->chipset) {
case 0x40:
- nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
- nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
- nv_wr32(priv, 0x4069A4, nv_rd32(priv, 0x100200));
- nv_wr32(priv, 0x4069A8, nv_rd32(priv, 0x100204));
- nv_wr32(priv, 0x400820, 0);
- nv_wr32(priv, 0x400824, 0);
- nv_wr32(priv, 0x400864, vramsz);
- nv_wr32(priv, 0x400868, vramsz);
+ nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
+ nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
+ nvkm_wr32(device, 0x4069A4, nvkm_rd32(device, 0x100200));
+ nvkm_wr32(device, 0x4069A8, nvkm_rd32(device, 0x100204));
+ nvkm_wr32(device, 0x400820, 0);
+ nvkm_wr32(device, 0x400824, 0);
+ nvkm_wr32(device, 0x400864, vramsz);
+ nvkm_wr32(device, 0x400868, vramsz);
break;
default:
- switch (nv_device(priv)->chipset) {
+ switch (device->chipset) {
case 0x41:
case 0x42:
case 0x43:
@@ -495,33 +406,70 @@ nv40_gr_init(struct nvkm_object *object)
case 0x4e:
case 0x44:
case 0x4a:
- nv_wr32(priv, 0x4009F0, nv_rd32(priv, 0x100200));
- nv_wr32(priv, 0x4009F4, nv_rd32(priv, 0x100204));
+ nvkm_wr32(device, 0x4009F0, nvkm_rd32(device, 0x100200));
+ nvkm_wr32(device, 0x4009F4, nvkm_rd32(device, 0x100204));
break;
default:
- nv_wr32(priv, 0x400DF0, nv_rd32(priv, 0x100200));
- nv_wr32(priv, 0x400DF4, nv_rd32(priv, 0x100204));
+ nvkm_wr32(device, 0x400DF0, nvkm_rd32(device, 0x100200));
+ nvkm_wr32(device, 0x400DF4, nvkm_rd32(device, 0x100204));
break;
}
- nv_wr32(priv, 0x4069F0, nv_rd32(priv, 0x100200));
- nv_wr32(priv, 0x4069F4, nv_rd32(priv, 0x100204));
- nv_wr32(priv, 0x400840, 0);
- nv_wr32(priv, 0x400844, 0);
- nv_wr32(priv, 0x4008A0, vramsz);
- nv_wr32(priv, 0x4008A4, vramsz);
+ nvkm_wr32(device, 0x4069F0, nvkm_rd32(device, 0x100200));
+ nvkm_wr32(device, 0x4069F4, nvkm_rd32(device, 0x100204));
+ nvkm_wr32(device, 0x400840, 0);
+ nvkm_wr32(device, 0x400844, 0);
+ nvkm_wr32(device, 0x4008A0, vramsz);
+ nvkm_wr32(device, 0x4008A4, vramsz);
break;
}
return 0;
}
-struct nvkm_oclass
-nv40_gr_oclass = {
- .handle = NV_ENGINE(GR, 0x40),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv40_gr_ctor,
- .dtor = _nvkm_gr_dtor,
- .init = nv40_gr_init,
- .fini = _nvkm_gr_fini,
- },
+int
+nv40_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
+ int index, struct nvkm_gr **pgr)
+{
+ struct nv40_gr *gr;
+
+ if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+ return -ENOMEM;
+ *pgr = &gr->base;
+ INIT_LIST_HEAD(&gr->chan);
+
+ return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
+}
+
+static const struct nvkm_gr_func
+nv40_gr = {
+ .init = nv40_gr_init,
+ .intr = nv40_gr_intr,
+ .tile = nv40_gr_tile,
+ .units = nv40_gr_units,
+ .chan_new = nv40_gr_chan_new,
+ .sclass = {
+ { -1, -1, 0x0012, &nv40_gr_object }, /* beta1 */
+ { -1, -1, 0x0019, &nv40_gr_object }, /* clip */
+ { -1, -1, 0x0030, &nv40_gr_object }, /* null */
+ { -1, -1, 0x0039, &nv40_gr_object }, /* m2mf */
+ { -1, -1, 0x0043, &nv40_gr_object }, /* rop */
+ { -1, -1, 0x0044, &nv40_gr_object }, /* patt */
+ { -1, -1, 0x004a, &nv40_gr_object }, /* gdi */
+ { -1, -1, 0x0062, &nv40_gr_object }, /* surf2d */
+ { -1, -1, 0x0072, &nv40_gr_object }, /* beta4 */
+ { -1, -1, 0x0089, &nv40_gr_object }, /* sifm */
+ { -1, -1, 0x008a, &nv40_gr_object }, /* ifc */
+ { -1, -1, 0x009f, &nv40_gr_object }, /* imageblit */
+ { -1, -1, 0x3062, &nv40_gr_object }, /* surf2d (nv40) */
+ { -1, -1, 0x3089, &nv40_gr_object }, /* sifm (nv40) */
+ { -1, -1, 0x309e, &nv40_gr_object }, /* swzsurf (nv40) */
+ { -1, -1, 0x4097, &nv40_gr_object }, /* curie */
+ {}
+ }
};
+
+int
+nv40_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return nv40_gr_new_(&nv40_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
index d852bd6de571..2812ed11f877 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
@@ -1,22 +1,45 @@
#ifndef __NV40_GR_H__
#define __NV40_GR_H__
-#include <engine/gr.h>
+#define nv40_gr(p) container_of((p), struct nv40_gr, base)
+#include "priv.h"
-#include <core/device.h>
-struct nvkm_gpuobj;
+struct nv40_gr {
+ struct nvkm_gr base;
+ u32 size;
+ struct list_head chan;
+};
+
+int nv40_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+ struct nvkm_gr **);
+int nv40_gr_init(struct nvkm_gr *);
+void nv40_gr_intr(struct nvkm_gr *);
+u64 nv40_gr_units(struct nvkm_gr *);
+
+#define nv40_gr_chan(p) container_of((p), struct nv40_gr_chan, object)
+
+struct nv40_gr_chan {
+ struct nvkm_object object;
+ struct nv40_gr *gr;
+ struct nvkm_fifo_chan *fifo;
+ u32 inst;
+ struct list_head head;
+};
+
+int nv40_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
+ const struct nvkm_oclass *, struct nvkm_object **);
+
+extern const struct nvkm_object_func nv40_gr_object;
/* returns 1 if device is one of the nv4x using the 0x4497 object class,
* helpful to determine a number of other hardware features
*/
static inline int
-nv44_gr_class(void *priv)
+nv44_gr_class(struct nvkm_device *device)
{
- struct nvkm_device *device = nv_device(priv);
-
if ((device->chipset & 0xf0) == 0x60)
return 1;
- return !(0x0baf & (1 << (device->chipset & 0x0f)));
+ return !(0x0aaf & (1 << (device->chipset & 0x0f)));
}
int nv40_grctx_init(struct nvkm_device *, u32 *size);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c
new file mode 100644
index 000000000000..45ff80254eb4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv40.h"
+#include "regs.h"
+
+#include <subdev/fb.h>
+#include <engine/fifo.h>
+
+static void
+nv44_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
+{
+ struct nv40_gr *gr = nv40_gr(base);
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ struct nvkm_fifo *fifo = device->fifo;
+ unsigned long flags;
+
+ nvkm_fifo_pause(fifo, &flags);
+ nv04_gr_idle(&gr->base);
+
+ switch (device->chipset) {
+ case 0x44:
+ case 0x4a:
+ nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
+ break;
+ case 0x46:
+ case 0x4c:
+ case 0x63:
+ case 0x67:
+ case 0x68:
+ nvkm_wr32(device, NV47_PGRAPH_TSIZE(i), tile->pitch);
+ nvkm_wr32(device, NV47_PGRAPH_TLIMIT(i), tile->limit);
+ nvkm_wr32(device, NV47_PGRAPH_TILE(i), tile->addr);
+ nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+ nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+ nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
+ break;
+ case 0x4e:
+ nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
+ nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+ nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+ nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ nvkm_fifo_start(fifo, &flags);
+}
+
+static const struct nvkm_gr_func
+nv44_gr = {
+ .init = nv40_gr_init,
+ .intr = nv40_gr_intr,
+ .tile = nv44_gr_tile,
+ .units = nv40_gr_units,
+ .chan_new = nv40_gr_chan_new,
+ .sclass = {
+ { -1, -1, 0x0012, &nv40_gr_object }, /* beta1 */
+ { -1, -1, 0x0019, &nv40_gr_object }, /* clip */
+ { -1, -1, 0x0030, &nv40_gr_object }, /* null */
+ { -1, -1, 0x0039, &nv40_gr_object }, /* m2mf */
+ { -1, -1, 0x0043, &nv40_gr_object }, /* rop */
+ { -1, -1, 0x0044, &nv40_gr_object }, /* patt */
+ { -1, -1, 0x004a, &nv40_gr_object }, /* gdi */
+ { -1, -1, 0x0062, &nv40_gr_object }, /* surf2d */
+ { -1, -1, 0x0072, &nv40_gr_object }, /* beta4 */
+ { -1, -1, 0x0089, &nv40_gr_object }, /* sifm */
+ { -1, -1, 0x008a, &nv40_gr_object }, /* ifc */
+ { -1, -1, 0x009f, &nv40_gr_object }, /* imageblit */
+ { -1, -1, 0x3062, &nv40_gr_object }, /* surf2d (nv40) */
+ { -1, -1, 0x3089, &nv40_gr_object }, /* sifm (nv40) */
+ { -1, -1, 0x309e, &nv40_gr_object }, /* swzsurf (nv40) */
+ { -1, -1, 0x4497, &nv40_gr_object }, /* curie */
+ {}
+ }
+};
+
+int
+nv44_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return nv40_gr_new_(&nv44_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
index 270d7cd63fc7..b19b912d5787 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
@@ -24,27 +24,13 @@
#include "nv50.h"
#include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
#include <engine/fifo.h>
-#include <subdev/timer.h>
-struct nv50_gr_priv {
- struct nvkm_gr base;
- spinlock_t lock;
- u32 size;
-};
-
-struct nv50_gr_chan {
- struct nvkm_gr_chan base;
-};
-
-static u64
+u64
nv50_gr_units(struct nvkm_gr *gr)
{
- struct nv50_gr_priv *priv = (void *)gr;
-
- return nv_rd32(priv, 0x1540);
+ return nvkm_rd32(gr->engine.subdev.device, 0x1540);
}
/*******************************************************************************
@@ -52,86 +38,25 @@ nv50_gr_units(struct nvkm_gr *gr)
******************************************************************************/
static int
-nv50_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv50_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+ int align, struct nvkm_gpuobj **pgpuobj)
{
- struct nvkm_gpuobj *obj;
- int ret;
-
- ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
- 16, 16, 0, &obj);
- *pobject = nv_object(obj);
- if (ret)
- return ret;
-
- nv_wo32(obj, 0x00, nv_mclass(obj));
- nv_wo32(obj, 0x04, 0x00000000);
- nv_wo32(obj, 0x08, 0x00000000);
- nv_wo32(obj, 0x0c, 0x00000000);
- return 0;
+ int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16,
+ align, false, parent, pgpuobj);
+ if (ret == 0) {
+ nvkm_kmap(*pgpuobj);
+ nvkm_wo32(*pgpuobj, 0x00, object->oclass);
+ nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
+ nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
+ nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+ nvkm_done(*pgpuobj);
+ }
+ return ret;
}
-static struct nvkm_ofuncs
-nv50_gr_ofuncs = {
- .ctor = nv50_gr_object_ctor,
- .dtor = _nvkm_gpuobj_dtor,
- .init = _nvkm_gpuobj_init,
- .fini = _nvkm_gpuobj_fini,
- .rd32 = _nvkm_gpuobj_rd32,
- .wr32 = _nvkm_gpuobj_wr32,
-};
-
-static struct nvkm_oclass
-nv50_gr_sclass[] = {
- { 0x0030, &nv50_gr_ofuncs },
- { 0x502d, &nv50_gr_ofuncs },
- { 0x5039, &nv50_gr_ofuncs },
- { 0x5097, &nv50_gr_ofuncs },
- { 0x50c0, &nv50_gr_ofuncs },
- {}
-};
-
-static struct nvkm_oclass
-g84_gr_sclass[] = {
- { 0x0030, &nv50_gr_ofuncs },
- { 0x502d, &nv50_gr_ofuncs },
- { 0x5039, &nv50_gr_ofuncs },
- { 0x50c0, &nv50_gr_ofuncs },
- { 0x8297, &nv50_gr_ofuncs },
- {}
-};
-
-static struct nvkm_oclass
-gt200_gr_sclass[] = {
- { 0x0030, &nv50_gr_ofuncs },
- { 0x502d, &nv50_gr_ofuncs },
- { 0x5039, &nv50_gr_ofuncs },
- { 0x50c0, &nv50_gr_ofuncs },
- { 0x8397, &nv50_gr_ofuncs },
- {}
-};
-
-static struct nvkm_oclass
-gt215_gr_sclass[] = {
- { 0x0030, &nv50_gr_ofuncs },
- { 0x502d, &nv50_gr_ofuncs },
- { 0x5039, &nv50_gr_ofuncs },
- { 0x50c0, &nv50_gr_ofuncs },
- { 0x8597, &nv50_gr_ofuncs },
- { 0x85c0, &nv50_gr_ofuncs },
- {}
-};
-
-static struct nvkm_oclass
-mcp89_gr_sclass[] = {
- { 0x0030, &nv50_gr_ofuncs },
- { 0x502d, &nv50_gr_ofuncs },
- { 0x5039, &nv50_gr_ofuncs },
- { 0x50c0, &nv50_gr_ofuncs },
- { 0x85c0, &nv50_gr_ofuncs },
- { 0x8697, &nv50_gr_ofuncs },
- {}
+const struct nvkm_object_func
+nv50_gr_object = {
+ .bind = nv50_gr_object_bind,
};
/*******************************************************************************
@@ -139,160 +64,43 @@ mcp89_gr_sclass[] = {
******************************************************************************/
static int
-nv50_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv50_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+ int align, struct nvkm_gpuobj **pgpuobj)
{
- struct nv50_gr_priv *priv = (void *)engine;
- struct nv50_gr_chan *chan;
- int ret;
-
- ret = nvkm_gr_context_create(parent, engine, oclass, NULL, priv->size,
- 0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
-
- nv50_grctx_fill(nv_device(priv), nv_gpuobj(chan));
- return 0;
+ struct nv50_gr *gr = nv50_gr_chan(object)->gr;
+ int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
+ align, true, parent, pgpuobj);
+ if (ret == 0) {
+ nvkm_kmap(*pgpuobj);
+ nv50_grctx_fill(gr->base.engine.subdev.device, *pgpuobj);
+ nvkm_done(*pgpuobj);
+ }
+ return ret;
}
-static struct nvkm_oclass
-nv50_gr_cclass = {
- .handle = NV_ENGCTX(GR, 0x50),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_gr_context_ctor,
- .dtor = _nvkm_gr_context_dtor,
- .init = _nvkm_gr_context_init,
- .fini = _nvkm_gr_context_fini,
- .rd32 = _nvkm_gr_context_rd32,
- .wr32 = _nvkm_gr_context_wr32,
- },
-};
-
-/*******************************************************************************
- * PGRAPH engine/subdev functions
- ******************************************************************************/
-
-static const struct nvkm_bitfield nv50_pgr_status[] = {
- { 0x00000001, "BUSY" }, /* set when any bit is set */
- { 0x00000002, "DISPATCH" },
- { 0x00000004, "UNK2" },
- { 0x00000008, "UNK3" },
- { 0x00000010, "UNK4" },
- { 0x00000020, "UNK5" },
- { 0x00000040, "M2MF" },
- { 0x00000080, "UNK7" },
- { 0x00000100, "CTXPROG" },
- { 0x00000200, "VFETCH" },
- { 0x00000400, "CCACHE_PREGEOM" },
- { 0x00000800, "STRMOUT_VATTR_POSTGEOM" },
- { 0x00001000, "VCLIP" },
- { 0x00002000, "RATTR_APLANE" },
- { 0x00004000, "TRAST" },
- { 0x00008000, "CLIPID" },
- { 0x00010000, "ZCULL" },
- { 0x00020000, "ENG2D" },
- { 0x00040000, "RMASK" },
- { 0x00080000, "TPC_RAST" },
- { 0x00100000, "TPC_PROP" },
- { 0x00200000, "TPC_TEX" },
- { 0x00400000, "TPC_GEOM" },
- { 0x00800000, "TPC_MP" },
- { 0x01000000, "ROP" },
- {}
-};
-
-static const char *const nv50_pgr_vstatus_0[] = {
- "VFETCH", "CCACHE", "PREGEOM", "POSTGEOM", "VATTR", "STRMOUT", "VCLIP",
- NULL
-};
-
-static const char *const nv50_pgr_vstatus_1[] = {
- "TPC_RAST", "TPC_PROP", "TPC_TEX", "TPC_GEOM", "TPC_MP", NULL
-};
-
-static const char *const nv50_pgr_vstatus_2[] = {
- "RATTR", "APLANE", "TRAST", "CLIPID", "ZCULL", "ENG2D", "RMASK",
- "ROP", NULL
+static const struct nvkm_object_func
+nv50_gr_chan = {
+ .bind = nv50_gr_chan_bind,
};
-static void
-nvkm_pgr_vstatus_print(struct nv50_gr_priv *priv, int r,
- const char *const units[], u32 status)
+int
+nv50_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+ const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
- int i;
-
- nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status);
+ struct nv50_gr *gr = nv50_gr(base);
+ struct nv50_gr_chan *chan;
- for (i = 0; units[i] && status; i++) {
- if ((status & 7) == 1)
- pr_cont(" %s", units[i]);
- status >>= 3;
- }
- if (status)
- pr_cont(" (invalid: 0x%x)", status);
- pr_cont("\n");
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nv50_gr_chan, oclass, &chan->object);
+ chan->gr = gr;
+ *pobject = &chan->object;
+ return 0;
}
-static int
-g84_gr_tlb_flush(struct nvkm_engine *engine)
-{
- struct nvkm_timer *ptimer = nvkm_timer(engine);
- struct nv50_gr_priv *priv = (void *)engine;
- bool idle, timeout = false;
- unsigned long flags;
- u64 start;
- u32 tmp;
-
- spin_lock_irqsave(&priv->lock, flags);
- nv_mask(priv, 0x400500, 0x00000001, 0x00000000);
-
- start = ptimer->read(ptimer);
- do {
- idle = true;
-
- for (tmp = nv_rd32(priv, 0x400380); tmp && idle; tmp >>= 3) {
- if ((tmp & 7) == 1)
- idle = false;
- }
-
- for (tmp = nv_rd32(priv, 0x400384); tmp && idle; tmp >>= 3) {
- if ((tmp & 7) == 1)
- idle = false;
- }
-
- for (tmp = nv_rd32(priv, 0x400388); tmp && idle; tmp >>= 3) {
- if ((tmp & 7) == 1)
- idle = false;
- }
- } while (!idle &&
- !(timeout = ptimer->read(ptimer) - start > 2000000000));
-
- if (timeout) {
- nv_error(priv, "PGRAPH TLB flush idle timeout fail\n");
-
- tmp = nv_rd32(priv, 0x400700);
- nv_error(priv, "PGRAPH_STATUS : 0x%08x", tmp);
- nvkm_bitfield_print(nv50_pgr_status, tmp);
- pr_cont("\n");
-
- nvkm_pgr_vstatus_print(priv, 0, nv50_pgr_vstatus_0,
- nv_rd32(priv, 0x400380));
- nvkm_pgr_vstatus_print(priv, 1, nv50_pgr_vstatus_1,
- nv_rd32(priv, 0x400384));
- nvkm_pgr_vstatus_print(priv, 2, nv50_pgr_vstatus_2,
- nv_rd32(priv, 0x400388));
- }
-
-
- nv_wr32(priv, 0x100c80, 0x00000001);
- if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000))
- nv_error(priv, "vm flush timeout\n");
- nv_mask(priv, 0x400500, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&priv->lock, flags);
- return timeout ? -EBUSY : 0;
-}
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
static const struct nvkm_bitfield nv50_mp_exec_errors[] = {
{ 0x01, "STACK_UNDERFLOW" },
@@ -427,157 +235,172 @@ static const struct nvkm_bitfield nv50_gr_trap_prop[] = {
};
static void
-nv50_priv_prop_trap(struct nv50_gr_priv *priv,
- u32 ustatus_addr, u32 ustatus, u32 tp)
+nv50_gr_prop_trap(struct nv50_gr *gr, u32 ustatus_addr, u32 ustatus, u32 tp)
{
- u32 e0c = nv_rd32(priv, ustatus_addr + 0x04);
- u32 e10 = nv_rd32(priv, ustatus_addr + 0x08);
- u32 e14 = nv_rd32(priv, ustatus_addr + 0x0c);
- u32 e18 = nv_rd32(priv, ustatus_addr + 0x10);
- u32 e1c = nv_rd32(priv, ustatus_addr + 0x14);
- u32 e20 = nv_rd32(priv, ustatus_addr + 0x18);
- u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c);
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 e0c = nvkm_rd32(device, ustatus_addr + 0x04);
+ u32 e10 = nvkm_rd32(device, ustatus_addr + 0x08);
+ u32 e14 = nvkm_rd32(device, ustatus_addr + 0x0c);
+ u32 e18 = nvkm_rd32(device, ustatus_addr + 0x10);
+ u32 e1c = nvkm_rd32(device, ustatus_addr + 0x14);
+ u32 e20 = nvkm_rd32(device, ustatus_addr + 0x18);
+ u32 e24 = nvkm_rd32(device, ustatus_addr + 0x1c);
+ char msg[128];
/* CUDA memory: l[], g[] or stack. */
if (ustatus & 0x00000080) {
if (e18 & 0x80000000) {
/* g[] read fault? */
- nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
+ nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
tp, e14, e10 | ((e18 >> 24) & 0x1f));
e18 &= ~0x1f000000;
} else if (e18 & 0xc) {
/* g[] write fault? */
- nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
+ nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
tp, e14, e10 | ((e18 >> 7) & 0x1f));
e18 &= ~0x00000f80;
} else {
- nv_error(priv, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
+ nvkm_error(subdev, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
tp, e14, e10);
}
ustatus &= ~0x00000080;
}
if (ustatus) {
- nv_error(priv, "TRAP_PROP - TP %d -", tp);
- nvkm_bitfield_print(nv50_gr_trap_prop, ustatus);
- pr_cont(" - Address %02x%08x\n", e14, e10);
+ nvkm_snprintbf(msg, sizeof(msg), nv50_gr_trap_prop, ustatus);
+ nvkm_error(subdev, "TRAP_PROP - TP %d - %08x [%s] - "
+ "Address %02x%08x\n",
+ tp, ustatus, msg, e14, e10);
}
- nv_error(priv, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ nvkm_error(subdev, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
tp, e0c, e18, e1c, e20, e24);
}
static void
-nv50_priv_mp_trap(struct nv50_gr_priv *priv, int tpid, int display)
+nv50_gr_mp_trap(struct nv50_gr *gr, int tpid, int display)
{
- u32 units = nv_rd32(priv, 0x1540);
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 units = nvkm_rd32(device, 0x1540);
u32 addr, mp10, status, pc, oplow, ophigh;
+ char msg[128];
int i;
int mps = 0;
for (i = 0; i < 4; i++) {
if (!(units & 1 << (i+24)))
continue;
- if (nv_device(priv)->chipset < 0xa0)
+ if (device->chipset < 0xa0)
addr = 0x408200 + (tpid << 12) + (i << 7);
else
addr = 0x408100 + (tpid << 11) + (i << 7);
- mp10 = nv_rd32(priv, addr + 0x10);
- status = nv_rd32(priv, addr + 0x14);
+ mp10 = nvkm_rd32(device, addr + 0x10);
+ status = nvkm_rd32(device, addr + 0x14);
if (!status)
continue;
if (display) {
- nv_rd32(priv, addr + 0x20);
- pc = nv_rd32(priv, addr + 0x24);
- oplow = nv_rd32(priv, addr + 0x70);
- ophigh = nv_rd32(priv, addr + 0x74);
- nv_error(priv, "TRAP_MP_EXEC - "
- "TP %d MP %d:", tpid, i);
- nvkm_bitfield_print(nv50_mp_exec_errors, status);
- pr_cont(" at %06x warp %d, opcode %08x %08x\n",
- pc&0xffffff, pc >> 24,
- oplow, ophigh);
+ nvkm_rd32(device, addr + 0x20);
+ pc = nvkm_rd32(device, addr + 0x24);
+ oplow = nvkm_rd32(device, addr + 0x70);
+ ophigh = nvkm_rd32(device, addr + 0x74);
+ nvkm_snprintbf(msg, sizeof(msg),
+ nv50_mp_exec_errors, status);
+ nvkm_error(subdev, "TRAP_MP_EXEC - TP %d MP %d: "
+ "%08x [%s] at %06x warp %d, "
+ "opcode %08x %08x\n",
+ tpid, i, status, msg, pc & 0xffffff,
+ pc >> 24, oplow, ophigh);
}
- nv_wr32(priv, addr + 0x10, mp10);
- nv_wr32(priv, addr + 0x14, 0);
+ nvkm_wr32(device, addr + 0x10, mp10);
+ nvkm_wr32(device, addr + 0x14, 0);
mps++;
}
if (!mps && display)
- nv_error(priv, "TRAP_MP_EXEC - TP %d: "
+ nvkm_error(subdev, "TRAP_MP_EXEC - TP %d: "
"No MPs claiming errors?\n", tpid);
}
static void
-nv50_priv_tp_trap(struct nv50_gr_priv *priv, int type, u32 ustatus_old,
+nv50_gr_tp_trap(struct nv50_gr *gr, int type, u32 ustatus_old,
u32 ustatus_new, int display, const char *name)
{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 units = nvkm_rd32(device, 0x1540);
int tps = 0;
- u32 units = nv_rd32(priv, 0x1540);
int i, r;
+ char msg[128];
u32 ustatus_addr, ustatus;
for (i = 0; i < 16; i++) {
if (!(units & (1 << i)))
continue;
- if (nv_device(priv)->chipset < 0xa0)
+ if (device->chipset < 0xa0)
ustatus_addr = ustatus_old + (i << 12);
else
ustatus_addr = ustatus_new + (i << 11);
- ustatus = nv_rd32(priv, ustatus_addr) & 0x7fffffff;
+ ustatus = nvkm_rd32(device, ustatus_addr) & 0x7fffffff;
if (!ustatus)
continue;
tps++;
switch (type) {
case 6: /* texture error... unknown for now */
if (display) {
- nv_error(priv, "magic set %d:\n", i);
+ nvkm_error(subdev, "magic set %d:\n", i);
for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
- nv_error(priv, "\t0x%08x: 0x%08x\n", r,
- nv_rd32(priv, r));
+ nvkm_error(subdev, "\t%08x: %08x\n", r,
+ nvkm_rd32(device, r));
if (ustatus) {
- nv_error(priv, "%s - TP%d:", name, i);
- nvkm_bitfield_print(nv50_tex_traps,
- ustatus);
- pr_cont("\n");
+ nvkm_snprintbf(msg, sizeof(msg),
+ nv50_tex_traps, ustatus);
+ nvkm_error(subdev,
+ "%s - TP%d: %08x [%s]\n",
+ name, i, ustatus, msg);
ustatus = 0;
}
}
break;
case 7: /* MP error */
if (ustatus & 0x04030000) {
- nv50_priv_mp_trap(priv, i, display);
+ nv50_gr_mp_trap(gr, i, display);
ustatus &= ~0x04030000;
}
if (ustatus && display) {
- nv_error(priv, "%s - TP%d:", name, i);
- nvkm_bitfield_print(nv50_mpc_traps, ustatus);
- pr_cont("\n");
+ nvkm_snprintbf(msg, sizeof(msg),
+ nv50_mpc_traps, ustatus);
+ nvkm_error(subdev, "%s - TP%d: %08x [%s]\n",
+ name, i, ustatus, msg);
ustatus = 0;
}
break;
case 8: /* PROP error */
if (display)
- nv50_priv_prop_trap(
- priv, ustatus_addr, ustatus, i);
+ nv50_gr_prop_trap(
+ gr, ustatus_addr, ustatus, i);
ustatus = 0;
break;
}
if (ustatus) {
if (display)
- nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+ nvkm_error(subdev, "%s - TP%d: Unhandled ustatus %08x\n", name, i, ustatus);
}
- nv_wr32(priv, ustatus_addr, 0xc0000000);
+ nvkm_wr32(device, ustatus_addr, 0xc0000000);
}
if (!tps && display)
- nv_warn(priv, "%s - No TPs claiming errors?\n", name);
+ nvkm_warn(subdev, "%s - No TPs claiming errors?\n", name);
}
static int
-nv50_gr_trap_handler(struct nv50_gr_priv *priv, u32 display,
- int chid, u64 inst, struct nvkm_object *engctx)
+nv50_gr_trap_handler(struct nv50_gr *gr, u32 display,
+ int chid, u64 inst, const char *name)
{
- u32 status = nv_rd32(priv, 0x400108);
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 status = nvkm_rd32(device, 0x400108);
u32 ustatus;
+ char msg[128];
if (!status && display) {
- nv_error(priv, "TRAP: no units reporting traps?\n");
+ nvkm_error(subdev, "TRAP: no units reporting traps?\n");
return 1;
}
@@ -585,71 +408,72 @@ nv50_gr_trap_handler(struct nv50_gr_priv *priv, u32 display,
* COND, QUERY. If you get a trap from it, the command is still stuck
* in DISPATCH and you need to do something about it. */
if (status & 0x001) {
- ustatus = nv_rd32(priv, 0x400804) & 0x7fffffff;
+ ustatus = nvkm_rd32(device, 0x400804) & 0x7fffffff;
if (!ustatus && display) {
- nv_error(priv, "TRAP_DISPATCH - no ustatus?\n");
+ nvkm_error(subdev, "TRAP_DISPATCH - no ustatus?\n");
}
- nv_wr32(priv, 0x400500, 0x00000000);
+ nvkm_wr32(device, 0x400500, 0x00000000);
/* Known to be triggered by screwed up NOTIFY and COND... */
if (ustatus & 0x00000001) {
- u32 addr = nv_rd32(priv, 0x400808);
+ u32 addr = nvkm_rd32(device, 0x400808);
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00001ffc);
- u32 datal = nv_rd32(priv, 0x40080c);
- u32 datah = nv_rd32(priv, 0x400810);
- u32 class = nv_rd32(priv, 0x400814);
- u32 r848 = nv_rd32(priv, 0x400848);
+ u32 datal = nvkm_rd32(device, 0x40080c);
+ u32 datah = nvkm_rd32(device, 0x400810);
+ u32 class = nvkm_rd32(device, 0x400814);
+ u32 r848 = nvkm_rd32(device, 0x400848);
- nv_error(priv, "TRAP DISPATCH_FAULT\n");
+ nvkm_error(subdev, "TRAP DISPATCH_FAULT\n");
if (display && (addr & 0x80000000)) {
- nv_error(priv,
- "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x%08x 400808 0x%08x 400848 0x%08x\n",
- chid, inst,
- nvkm_client_name(engctx), subc,
- class, mthd, datah, datal, addr, r848);
+ nvkm_error(subdev,
+ "ch %d [%010llx %s] subc %d "
+ "class %04x mthd %04x data %08x%08x "
+ "400808 %08x 400848 %08x\n",
+ chid, inst, name, subc, class, mthd,
+ datah, datal, addr, r848);
} else
if (display) {
- nv_error(priv, "no stuck command?\n");
+ nvkm_error(subdev, "no stuck command?\n");
}
- nv_wr32(priv, 0x400808, 0);
- nv_wr32(priv, 0x4008e8, nv_rd32(priv, 0x4008e8) & 3);
- nv_wr32(priv, 0x400848, 0);
+ nvkm_wr32(device, 0x400808, 0);
+ nvkm_wr32(device, 0x4008e8, nvkm_rd32(device, 0x4008e8) & 3);
+ nvkm_wr32(device, 0x400848, 0);
ustatus &= ~0x00000001;
}
if (ustatus & 0x00000002) {
- u32 addr = nv_rd32(priv, 0x40084c);
+ u32 addr = nvkm_rd32(device, 0x40084c);
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00001ffc);
- u32 data = nv_rd32(priv, 0x40085c);
- u32 class = nv_rd32(priv, 0x400814);
+ u32 data = nvkm_rd32(device, 0x40085c);
+ u32 class = nvkm_rd32(device, 0x400814);
- nv_error(priv, "TRAP DISPATCH_QUERY\n");
+ nvkm_error(subdev, "TRAP DISPATCH_QUERY\n");
if (display && (addr & 0x80000000)) {
- nv_error(priv,
- "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x 40084c 0x%08x\n",
- chid, inst,
- nvkm_client_name(engctx), subc,
- class, mthd, data, addr);
+ nvkm_error(subdev,
+ "ch %d [%010llx %s] subc %d "
+ "class %04x mthd %04x data %08x "
+ "40084c %08x\n", chid, inst, name,
+ subc, class, mthd, data, addr);
} else
if (display) {
- nv_error(priv, "no stuck command?\n");
+ nvkm_error(subdev, "no stuck command?\n");
}
- nv_wr32(priv, 0x40084c, 0);
+ nvkm_wr32(device, 0x40084c, 0);
ustatus &= ~0x00000002;
}
if (ustatus && display) {
- nv_error(priv, "TRAP_DISPATCH (unknown "
- "0x%08x)\n", ustatus);
+ nvkm_error(subdev, "TRAP_DISPATCH "
+ "(unknown %08x)\n", ustatus);
}
- nv_wr32(priv, 0x400804, 0xc0000000);
- nv_wr32(priv, 0x400108, 0x001);
+ nvkm_wr32(device, 0x400804, 0xc0000000);
+ nvkm_wr32(device, 0x400108, 0x001);
status &= ~0x001;
if (!status)
return 0;
@@ -657,81 +481,91 @@ nv50_gr_trap_handler(struct nv50_gr_priv *priv, u32 display,
/* M2MF: Memory to memory copy engine. */
if (status & 0x002) {
- u32 ustatus = nv_rd32(priv, 0x406800) & 0x7fffffff;
+ u32 ustatus = nvkm_rd32(device, 0x406800) & 0x7fffffff;
if (display) {
- nv_error(priv, "TRAP_M2MF");
- nvkm_bitfield_print(nv50_gr_trap_m2mf, ustatus);
- pr_cont("\n");
- nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n",
- nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808),
- nv_rd32(priv, 0x40680c), nv_rd32(priv, 0x406810));
-
+ nvkm_snprintbf(msg, sizeof(msg),
+ nv50_gr_trap_m2mf, ustatus);
+ nvkm_error(subdev, "TRAP_M2MF %08x [%s]\n",
+ ustatus, msg);
+ nvkm_error(subdev, "TRAP_M2MF %08x %08x %08x %08x\n",
+ nvkm_rd32(device, 0x406804),
+ nvkm_rd32(device, 0x406808),
+ nvkm_rd32(device, 0x40680c),
+ nvkm_rd32(device, 0x406810));
}
/* No sane way found yet -- just reset the bugger. */
- nv_wr32(priv, 0x400040, 2);
- nv_wr32(priv, 0x400040, 0);
- nv_wr32(priv, 0x406800, 0xc0000000);
- nv_wr32(priv, 0x400108, 0x002);
+ nvkm_wr32(device, 0x400040, 2);
+ nvkm_wr32(device, 0x400040, 0);
+ nvkm_wr32(device, 0x406800, 0xc0000000);
+ nvkm_wr32(device, 0x400108, 0x002);
status &= ~0x002;
}
/* VFETCH: Fetches data from vertex buffers. */
if (status & 0x004) {
- u32 ustatus = nv_rd32(priv, 0x400c04) & 0x7fffffff;
+ u32 ustatus = nvkm_rd32(device, 0x400c04) & 0x7fffffff;
if (display) {
- nv_error(priv, "TRAP_VFETCH");
- nvkm_bitfield_print(nv50_gr_trap_vfetch, ustatus);
- pr_cont("\n");
- nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n",
- nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08),
- nv_rd32(priv, 0x400c0c), nv_rd32(priv, 0x400c10));
+ nvkm_snprintbf(msg, sizeof(msg),
+ nv50_gr_trap_vfetch, ustatus);
+ nvkm_error(subdev, "TRAP_VFETCH %08x [%s]\n",
+ ustatus, msg);
+ nvkm_error(subdev, "TRAP_VFETCH %08x %08x %08x %08x\n",
+ nvkm_rd32(device, 0x400c00),
+ nvkm_rd32(device, 0x400c08),
+ nvkm_rd32(device, 0x400c0c),
+ nvkm_rd32(device, 0x400c10));
}
- nv_wr32(priv, 0x400c04, 0xc0000000);
- nv_wr32(priv, 0x400108, 0x004);
+ nvkm_wr32(device, 0x400c04, 0xc0000000);
+ nvkm_wr32(device, 0x400108, 0x004);
status &= ~0x004;
}
/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
if (status & 0x008) {
- ustatus = nv_rd32(priv, 0x401800) & 0x7fffffff;
+ ustatus = nvkm_rd32(device, 0x401800) & 0x7fffffff;
if (display) {
- nv_error(priv, "TRAP_STRMOUT");
- nvkm_bitfield_print(nv50_gr_trap_strmout, ustatus);
- pr_cont("\n");
- nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n",
- nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808),
- nv_rd32(priv, 0x40180c), nv_rd32(priv, 0x401810));
-
+ nvkm_snprintbf(msg, sizeof(msg),
+ nv50_gr_trap_strmout, ustatus);
+ nvkm_error(subdev, "TRAP_STRMOUT %08x [%s]\n",
+ ustatus, msg);
+ nvkm_error(subdev, "TRAP_STRMOUT %08x %08x %08x %08x\n",
+ nvkm_rd32(device, 0x401804),
+ nvkm_rd32(device, 0x401808),
+ nvkm_rd32(device, 0x40180c),
+ nvkm_rd32(device, 0x401810));
}
/* No sane way found yet -- just reset the bugger. */
- nv_wr32(priv, 0x400040, 0x80);
- nv_wr32(priv, 0x400040, 0);
- nv_wr32(priv, 0x401800, 0xc0000000);
- nv_wr32(priv, 0x400108, 0x008);
+ nvkm_wr32(device, 0x400040, 0x80);
+ nvkm_wr32(device, 0x400040, 0);
+ nvkm_wr32(device, 0x401800, 0xc0000000);
+ nvkm_wr32(device, 0x400108, 0x008);
status &= ~0x008;
}
/* CCACHE: Handles code and c[] caches and fills them. */
if (status & 0x010) {
- ustatus = nv_rd32(priv, 0x405018) & 0x7fffffff;
+ ustatus = nvkm_rd32(device, 0x405018) & 0x7fffffff;
if (display) {
- nv_error(priv, "TRAP_CCACHE");
- nvkm_bitfield_print(nv50_gr_trap_ccache, ustatus);
- pr_cont("\n");
- nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x"
- " %08x %08x %08x\n",
- nv_rd32(priv, 0x405000), nv_rd32(priv, 0x405004),
- nv_rd32(priv, 0x405008), nv_rd32(priv, 0x40500c),
- nv_rd32(priv, 0x405010), nv_rd32(priv, 0x405014),
- nv_rd32(priv, 0x40501c));
-
+ nvkm_snprintbf(msg, sizeof(msg),
+ nv50_gr_trap_ccache, ustatus);
+ nvkm_error(subdev, "TRAP_CCACHE %08x [%s]\n",
+ ustatus, msg);
+ nvkm_error(subdev, "TRAP_CCACHE %08x %08x %08x %08x "
+ "%08x %08x %08x\n",
+ nvkm_rd32(device, 0x405000),
+ nvkm_rd32(device, 0x405004),
+ nvkm_rd32(device, 0x405008),
+ nvkm_rd32(device, 0x40500c),
+ nvkm_rd32(device, 0x405010),
+ nvkm_rd32(device, 0x405014),
+ nvkm_rd32(device, 0x40501c));
}
- nv_wr32(priv, 0x405018, 0xc0000000);
- nv_wr32(priv, 0x400108, 0x010);
+ nvkm_wr32(device, 0x405018, 0xc0000000);
+ nvkm_wr32(device, 0x400108, 0x010);
status &= ~0x010;
}
@@ -739,239 +573,174 @@ nv50_gr_trap_handler(struct nv50_gr_priv *priv, u32 display,
* remaining, so try to handle it anyway. Perhaps related to that
* unknown DMA slot on tesla? */
if (status & 0x20) {
- ustatus = nv_rd32(priv, 0x402000) & 0x7fffffff;
+ ustatus = nvkm_rd32(device, 0x402000) & 0x7fffffff;
if (display)
- nv_error(priv, "TRAP_UNKC04 0x%08x\n", ustatus);
- nv_wr32(priv, 0x402000, 0xc0000000);
+ nvkm_error(subdev, "TRAP_UNKC04 %08x\n", ustatus);
+ nvkm_wr32(device, 0x402000, 0xc0000000);
/* no status modifiction on purpose */
}
/* TEXTURE: CUDA texturing units */
if (status & 0x040) {
- nv50_priv_tp_trap(priv, 6, 0x408900, 0x408600, display,
+ nv50_gr_tp_trap(gr, 6, 0x408900, 0x408600, display,
"TRAP_TEXTURE");
- nv_wr32(priv, 0x400108, 0x040);
+ nvkm_wr32(device, 0x400108, 0x040);
status &= ~0x040;
}
/* MP: CUDA execution engines. */
if (status & 0x080) {
- nv50_priv_tp_trap(priv, 7, 0x408314, 0x40831c, display,
+ nv50_gr_tp_trap(gr, 7, 0x408314, 0x40831c, display,
"TRAP_MP");
- nv_wr32(priv, 0x400108, 0x080);
+ nvkm_wr32(device, 0x400108, 0x080);
status &= ~0x080;
}
/* PROP: Handles TP-initiated uncached memory accesses:
* l[], g[], stack, 2d surfaces, render targets. */
if (status & 0x100) {
- nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display,
+ nv50_gr_tp_trap(gr, 8, 0x408e08, 0x408708, display,
"TRAP_PROP");
- nv_wr32(priv, 0x400108, 0x100);
+ nvkm_wr32(device, 0x400108, 0x100);
status &= ~0x100;
}
if (status) {
if (display)
- nv_error(priv, "TRAP: unknown 0x%08x\n", status);
- nv_wr32(priv, 0x400108, status);
+ nvkm_error(subdev, "TRAP: unknown %08x\n", status);
+ nvkm_wr32(device, 0x400108, status);
}
return 1;
}
-static void
-nv50_gr_intr(struct nvkm_subdev *subdev)
+void
+nv50_gr_intr(struct nvkm_gr *base)
{
- struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
- struct nvkm_engine *engine = nv_engine(subdev);
- struct nvkm_object *engctx;
- struct nvkm_handle *handle = NULL;
- struct nv50_gr_priv *priv = (void *)subdev;
- u32 stat = nv_rd32(priv, 0x400100);
- u32 inst = nv_rd32(priv, 0x40032c) & 0x0fffffff;
- u32 addr = nv_rd32(priv, 0x400704);
+ struct nv50_gr *gr = nv50_gr(base);
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_fifo_chan *chan;
+ u32 stat = nvkm_rd32(device, 0x400100);
+ u32 inst = nvkm_rd32(device, 0x40032c) & 0x0fffffff;
+ u32 addr = nvkm_rd32(device, 0x400704);
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00001ffc);
- u32 data = nv_rd32(priv, 0x400708);
- u32 class = nv_rd32(priv, 0x400814);
+ u32 data = nvkm_rd32(device, 0x400708);
+ u32 class = nvkm_rd32(device, 0x400814);
u32 show = stat, show_bitfield = stat;
- int chid;
-
- engctx = nvkm_engctx_get(engine, inst);
- chid = pfifo->chid(pfifo, engctx);
-
- if (stat & 0x00000010) {
- handle = nvkm_handle_get_class(engctx, class);
- if (handle && !nv_call(handle->object, mthd, data))
- show &= ~0x00000010;
- nvkm_handle_put(handle);
+ const struct nvkm_enum *en;
+ unsigned long flags;
+ const char *name = "unknown";
+ char msg[128];
+ int chid = -1;
+
+ chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
+ if (chan) {
+ name = chan->object.client->name;
+ chid = chan->chid;
}
if (show & 0x00100000) {
- u32 ecode = nv_rd32(priv, 0x400110);
- nv_error(priv, "DATA_ERROR ");
- nvkm_enum_print(nv50_data_error_names, ecode);
- pr_cont("\n");
+ u32 ecode = nvkm_rd32(device, 0x400110);
+ en = nvkm_enum_find(nv50_data_error_names, ecode);
+ nvkm_error(subdev, "DATA_ERROR %08x [%s]\n",
+ ecode, en ? en->name : "");
show_bitfield &= ~0x00100000;
}
if (stat & 0x00200000) {
- if (!nv50_gr_trap_handler(priv, show, chid, (u64)inst << 12,
- engctx))
+ if (!nv50_gr_trap_handler(gr, show, chid, (u64)inst << 12, name))
show &= ~0x00200000;
show_bitfield &= ~0x00200000;
}
- nv_wr32(priv, 0x400100, stat);
- nv_wr32(priv, 0x400500, 0x00010001);
+ nvkm_wr32(device, 0x400100, stat);
+ nvkm_wr32(device, 0x400500, 0x00010001);
if (show) {
show &= show_bitfield;
- if (show) {
- nv_error(priv, "%s", "");
- nvkm_bitfield_print(nv50_gr_intr_name, show);
- pr_cont("\n");
- }
- nv_error(priv,
- "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, (u64)inst << 12, nvkm_client_name(engctx),
- subc, class, mthd, data);
+ nvkm_snprintbf(msg, sizeof(msg), nv50_gr_intr_name, show);
+ nvkm_error(subdev, "%08x [%s] ch %d [%010llx %s] subc %d "
+ "class %04x mthd %04x data %08x\n",
+ stat, msg, chid, (u64)inst << 12, name,
+ subc, class, mthd, data);
}
- if (nv_rd32(priv, 0x400824) & (1 << 31))
- nv_wr32(priv, 0x400824, nv_rd32(priv, 0x400824) & ~(1 << 31));
+ if (nvkm_rd32(device, 0x400824) & (1 << 31))
+ nvkm_wr32(device, 0x400824, nvkm_rd32(device, 0x400824) & ~(1 << 31));
- nvkm_engctx_put(engctx);
+ nvkm_fifo_chan_put(device->fifo, flags, &chan);
}
-static int
-nv50_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv50_gr_init(struct nvkm_gr *base)
{
- struct nv50_gr_priv *priv;
- int ret;
-
- ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00201000;
- nv_subdev(priv)->intr = nv50_gr_intr;
- nv_engine(priv)->cclass = &nv50_gr_cclass;
-
- priv->base.units = nv50_gr_units;
-
- switch (nv_device(priv)->chipset) {
- case 0x50:
- nv_engine(priv)->sclass = nv50_gr_sclass;
- break;
- case 0x84:
- case 0x86:
- case 0x92:
- case 0x94:
- case 0x96:
- case 0x98:
- nv_engine(priv)->sclass = g84_gr_sclass;
- break;
- case 0xa0:
- case 0xaa:
- case 0xac:
- nv_engine(priv)->sclass = gt200_gr_sclass;
- break;
- case 0xa3:
- case 0xa5:
- case 0xa8:
- nv_engine(priv)->sclass = gt215_gr_sclass;
- break;
- case 0xaf:
- nv_engine(priv)->sclass = mcp89_gr_sclass;
- break;
-
- }
-
- /* unfortunate hw bug workaround... */
- if (nv_device(priv)->chipset != 0x50 &&
- nv_device(priv)->chipset != 0xac)
- nv_engine(priv)->tlb_flush = g84_gr_tlb_flush;
-
- spin_lock_init(&priv->lock);
- return 0;
-}
-
-static int
-nv50_gr_init(struct nvkm_object *object)
-{
- struct nv50_gr_priv *priv = (void *)object;
+ struct nv50_gr *gr = nv50_gr(base);
+ struct nvkm_device *device = gr->base.engine.subdev.device;
int ret, units, i;
- ret = nvkm_gr_init(&priv->base);
- if (ret)
- return ret;
-
/* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */
- nv_wr32(priv, 0x40008c, 0x00000004);
+ nvkm_wr32(device, 0x40008c, 0x00000004);
/* reset/enable traps and interrupts */
- nv_wr32(priv, 0x400804, 0xc0000000);
- nv_wr32(priv, 0x406800, 0xc0000000);
- nv_wr32(priv, 0x400c04, 0xc0000000);
- nv_wr32(priv, 0x401800, 0xc0000000);
- nv_wr32(priv, 0x405018, 0xc0000000);
- nv_wr32(priv, 0x402000, 0xc0000000);
-
- units = nv_rd32(priv, 0x001540);
+ nvkm_wr32(device, 0x400804, 0xc0000000);
+ nvkm_wr32(device, 0x406800, 0xc0000000);
+ nvkm_wr32(device, 0x400c04, 0xc0000000);
+ nvkm_wr32(device, 0x401800, 0xc0000000);
+ nvkm_wr32(device, 0x405018, 0xc0000000);
+ nvkm_wr32(device, 0x402000, 0xc0000000);
+
+ units = nvkm_rd32(device, 0x001540);
for (i = 0; i < 16; i++) {
if (!(units & (1 << i)))
continue;
- if (nv_device(priv)->chipset < 0xa0) {
- nv_wr32(priv, 0x408900 + (i << 12), 0xc0000000);
- nv_wr32(priv, 0x408e08 + (i << 12), 0xc0000000);
- nv_wr32(priv, 0x408314 + (i << 12), 0xc0000000);
+ if (device->chipset < 0xa0) {
+ nvkm_wr32(device, 0x408900 + (i << 12), 0xc0000000);
+ nvkm_wr32(device, 0x408e08 + (i << 12), 0xc0000000);
+ nvkm_wr32(device, 0x408314 + (i << 12), 0xc0000000);
} else {
- nv_wr32(priv, 0x408600 + (i << 11), 0xc0000000);
- nv_wr32(priv, 0x408708 + (i << 11), 0xc0000000);
- nv_wr32(priv, 0x40831c + (i << 11), 0xc0000000);
+ nvkm_wr32(device, 0x408600 + (i << 11), 0xc0000000);
+ nvkm_wr32(device, 0x408708 + (i << 11), 0xc0000000);
+ nvkm_wr32(device, 0x40831c + (i << 11), 0xc0000000);
}
}
- nv_wr32(priv, 0x400108, 0xffffffff);
- nv_wr32(priv, 0x400138, 0xffffffff);
- nv_wr32(priv, 0x400100, 0xffffffff);
- nv_wr32(priv, 0x40013c, 0xffffffff);
- nv_wr32(priv, 0x400500, 0x00010001);
+ nvkm_wr32(device, 0x400108, 0xffffffff);
+ nvkm_wr32(device, 0x400138, 0xffffffff);
+ nvkm_wr32(device, 0x400100, 0xffffffff);
+ nvkm_wr32(device, 0x40013c, 0xffffffff);
+ nvkm_wr32(device, 0x400500, 0x00010001);
/* upload context program, initialise ctxctl defaults */
- ret = nv50_grctx_init(nv_device(priv), &priv->size);
+ ret = nv50_grctx_init(device, &gr->size);
if (ret)
return ret;
- nv_wr32(priv, 0x400824, 0x00000000);
- nv_wr32(priv, 0x400828, 0x00000000);
- nv_wr32(priv, 0x40082c, 0x00000000);
- nv_wr32(priv, 0x400830, 0x00000000);
- nv_wr32(priv, 0x40032c, 0x00000000);
- nv_wr32(priv, 0x400330, 0x00000000);
+ nvkm_wr32(device, 0x400824, 0x00000000);
+ nvkm_wr32(device, 0x400828, 0x00000000);
+ nvkm_wr32(device, 0x40082c, 0x00000000);
+ nvkm_wr32(device, 0x400830, 0x00000000);
+ nvkm_wr32(device, 0x40032c, 0x00000000);
+ nvkm_wr32(device, 0x400330, 0x00000000);
/* some unknown zcull magic */
- switch (nv_device(priv)->chipset & 0xf0) {
+ switch (device->chipset & 0xf0) {
case 0x50:
case 0x80:
case 0x90:
- nv_wr32(priv, 0x402ca8, 0x00000800);
+ nvkm_wr32(device, 0x402ca8, 0x00000800);
break;
case 0xa0:
default:
- if (nv_device(priv)->chipset == 0xa0 ||
- nv_device(priv)->chipset == 0xaa ||
- nv_device(priv)->chipset == 0xac) {
- nv_wr32(priv, 0x402ca8, 0x00000802);
+ if (device->chipset == 0xa0 ||
+ device->chipset == 0xaa ||
+ device->chipset == 0xac) {
+ nvkm_wr32(device, 0x402ca8, 0x00000802);
} else {
- nv_wr32(priv, 0x402cc0, 0x00000000);
- nv_wr32(priv, 0x402ca8, 0x00000002);
+ nvkm_wr32(device, 0x402cc0, 0x00000000);
+ nvkm_wr32(device, 0x402ca8, 0x00000002);
}
break;
@@ -979,21 +748,47 @@ nv50_gr_init(struct nvkm_object *object)
/* zero out zcull regions */
for (i = 0; i < 8; i++) {
- nv_wr32(priv, 0x402c20 + (i * 0x10), 0x00000000);
- nv_wr32(priv, 0x402c24 + (i * 0x10), 0x00000000);
- nv_wr32(priv, 0x402c28 + (i * 0x10), 0x00000000);
- nv_wr32(priv, 0x402c2c + (i * 0x10), 0x00000000);
+ nvkm_wr32(device, 0x402c20 + (i * 0x10), 0x00000000);
+ nvkm_wr32(device, 0x402c24 + (i * 0x10), 0x00000000);
+ nvkm_wr32(device, 0x402c28 + (i * 0x10), 0x00000000);
+ nvkm_wr32(device, 0x402c2c + (i * 0x10), 0x00000000);
}
+
return 0;
}
-struct nvkm_oclass
-nv50_gr_oclass = {
- .handle = NV_ENGINE(GR, 0x50),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_gr_ctor,
- .dtor = _nvkm_gr_dtor,
- .init = nv50_gr_init,
- .fini = _nvkm_gr_fini,
- },
+int
+nv50_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
+ int index, struct nvkm_gr **pgr)
+{
+ struct nv50_gr *gr;
+
+ if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+ return -ENOMEM;
+ spin_lock_init(&gr->lock);
+ *pgr = &gr->base;
+
+ return nvkm_gr_ctor(func, device, index, 0x00201000, true, &gr->base);
+}
+
+static const struct nvkm_gr_func
+nv50_gr = {
+ .init = nv50_gr_init,
+ .intr = nv50_gr_intr,
+ .chan_new = nv50_gr_chan_new,
+ .units = nv50_gr_units,
+ .sclass = {
+ { -1, -1, 0x0030, &nv50_gr_object },
+ { -1, -1, 0x502d, &nv50_gr_object },
+ { -1, -1, 0x5039, &nv50_gr_object },
+ { -1, -1, 0x5097, &nv50_gr_object },
+ { -1, -1, 0x50c0, &nv50_gr_object },
+ {}
+ }
};
+
+int
+nv50_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return nv50_gr_new_(&nv50_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
index bcf786f6b731..45eec83a5969 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
@@ -1,8 +1,34 @@
#ifndef __NV50_GR_H__
#define __NV50_GR_H__
-#include <engine/gr.h>
-struct nvkm_device;
-struct nvkm_gpuobj;
+#define nv50_gr(p) container_of((p), struct nv50_gr, base)
+#include "priv.h"
+
+struct nv50_gr {
+ struct nvkm_gr base;
+ const struct nv50_gr_func *func;
+ spinlock_t lock;
+ u32 size;
+};
+
+int nv50_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+ struct nvkm_gr **);
+int nv50_gr_init(struct nvkm_gr *);
+void nv50_gr_intr(struct nvkm_gr *);
+u64 nv50_gr_units(struct nvkm_gr *);
+
+int g84_gr_tlb_flush(struct nvkm_gr *);
+
+#define nv50_gr_chan(p) container_of((p), struct nv50_gr_chan, object)
+
+struct nv50_gr_chan {
+ struct nvkm_object object;
+ struct nv50_gr *gr;
+};
+
+int nv50_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
+ const struct nvkm_oclass *, struct nvkm_object **);
+
+extern const struct nvkm_object_func nv50_gr_object;
int nv50_grctx_init(struct nvkm_device *, u32 *size);
void nv50_grctx_fill(struct nvkm_device *, struct nvkm_gpuobj *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
new file mode 100644
index 000000000000..a234590be88e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
@@ -0,0 +1,38 @@
+#ifndef __NVKM_GR_PRIV_H__
+#define __NVKM_GR_PRIV_H__
+#define nvkm_gr(p) container_of((p), struct nvkm_gr, engine)
+#include <engine/gr.h>
+#include <core/enum.h>
+struct nvkm_fb_tile;
+struct nvkm_fifo_chan;
+
+int nvkm_gr_ctor(const struct nvkm_gr_func *, struct nvkm_device *,
+ int index, u32 pmc_enable, bool enable,
+ struct nvkm_gr *);
+
+bool nv04_gr_idle(struct nvkm_gr *);
+
+struct nvkm_gr_func {
+ void *(*dtor)(struct nvkm_gr *);
+ int (*oneinit)(struct nvkm_gr *);
+ int (*init)(struct nvkm_gr *);
+ void (*intr)(struct nvkm_gr *);
+ void (*tile)(struct nvkm_gr *, int region, struct nvkm_fb_tile *);
+ int (*tlb_flush)(struct nvkm_gr *);
+ int (*chan_new)(struct nvkm_gr *, struct nvkm_fifo_chan *,
+ const struct nvkm_oclass *, struct nvkm_object **);
+ int (*object_get)(struct nvkm_gr *, int, struct nvkm_sclass *);
+ /* Returns chipset-specific counts of units packed into an u64.
+ */
+ u64 (*units)(struct nvkm_gr *);
+ struct nvkm_sclass sclass[];
+};
+
+extern const struct nvkm_bitfield nv04_gr_nsource[];
+extern const struct nvkm_object_func nv04_gr_object;
+
+extern const struct nvkm_bitfield nv10_gr_intr_name[];
+extern const struct nvkm_bitfield nv10_gr_nstatus[];
+
+extern const struct nvkm_enum nv50_data_error_names[];
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
index 0df889fa2611..34ff0014a6c1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
@@ -21,74 +21,24 @@
*
* Authors: Ben Skeggs
*/
-#include <engine/mpeg.h>
-
-struct g84_mpeg_priv {
- struct nvkm_mpeg base;
-};
-
-struct g84_mpeg_chan {
- struct nvkm_mpeg_chan base;
-};
-
-/*******************************************************************************
- * MPEG object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_mpeg_sclass[] = {
- { 0x8274, &nv50_mpeg_ofuncs },
- {}
-};
-
-/*******************************************************************************
- * PMPEG context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_mpeg_cclass = {
- .handle = NV_ENGCTX(MPEG, 0x84),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_mpeg_context_ctor,
- .dtor = _nvkm_mpeg_context_dtor,
- .init = _nvkm_mpeg_context_init,
- .fini = _nvkm_mpeg_context_fini,
- .rd32 = _nvkm_mpeg_context_rd32,
- .wr32 = _nvkm_mpeg_context_wr32,
- },
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+g84_mpeg = {
+ .init = nv50_mpeg_init,
+ .intr = nv50_mpeg_intr,
+ .cclass = &nv50_mpeg_cclass,
+ .sclass = {
+ { -1, -1, G82_MPEG, &nv31_mpeg_object },
+ {}
+ }
};
-/*******************************************************************************
- * PMPEG engine/subdev functions
- ******************************************************************************/
-
-static int
-g84_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+g84_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
{
- struct g84_mpeg_priv *priv;
- int ret;
-
- ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00000002;
- nv_subdev(priv)->intr = nv50_mpeg_intr;
- nv_engine(priv)->cclass = &g84_mpeg_cclass;
- nv_engine(priv)->sclass = g84_mpeg_sclass;
- return 0;
+ return nvkm_engine_new_(&g84_mpeg, device, index, 0x00000002,
+ true, pmpeg);
}
-
-struct nvkm_oclass
-g84_mpeg_oclass = {
- .handle = NV_ENGINE(MPEG, 0x84),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = g84_mpeg_ctor,
- .dtor = _nvkm_mpeg_dtor,
- .init = nv50_mpeg_init,
- .fini = _nvkm_mpeg_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index b5bef0718359..d4d8942b1347 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -24,281 +24,271 @@
#include "nv31.h"
#include <core/client.h>
-#include <core/handle.h>
-#include <engine/fifo.h>
-#include <subdev/instmem.h>
+#include <core/gpuobj.h>
#include <subdev/fb.h>
#include <subdev/timer.h>
+#include <engine/fifo.h>
+
+#include <nvif/class.h>
/*******************************************************************************
* MPEG object classes
******************************************************************************/
static int
-nv31_mpeg_object_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv31_mpeg_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+ int align, struct nvkm_gpuobj **pgpuobj)
{
- struct nvkm_gpuobj *obj;
- int ret;
-
- ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
- 20, 16, 0, &obj);
- *pobject = nv_object(obj);
- if (ret)
- return ret;
-
- nv_wo32(obj, 0x00, nv_mclass(obj));
- nv_wo32(obj, 0x04, 0x00000000);
- nv_wo32(obj, 0x08, 0x00000000);
- nv_wo32(obj, 0x0c, 0x00000000);
- return 0;
+ int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, align,
+ false, parent, pgpuobj);
+ if (ret == 0) {
+ nvkm_kmap(*pgpuobj);
+ nvkm_wo32(*pgpuobj, 0x00, object->oclass);
+ nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
+ nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
+ nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+ nvkm_done(*pgpuobj);
+ }
+ return ret;
}
-static int
-nv31_mpeg_mthd_dma(struct nvkm_object *object, u32 mthd, void *arg, u32 len)
+const struct nvkm_object_func
+nv31_mpeg_object = {
+ .bind = nv31_mpeg_object_bind,
+};
+
+/*******************************************************************************
+ * PMPEG context
+ ******************************************************************************/
+
+static void *
+nv31_mpeg_chan_dtor(struct nvkm_object *object)
{
- struct nvkm_instmem *imem = nvkm_instmem(object);
- struct nv31_mpeg_priv *priv = (void *)object->engine;
- u32 inst = *(u32 *)arg << 4;
- u32 dma0 = nv_ro32(imem, inst + 0);
- u32 dma1 = nv_ro32(imem, inst + 4);
- u32 dma2 = nv_ro32(imem, inst + 8);
+ struct nv31_mpeg_chan *chan = nv31_mpeg_chan(object);
+ struct nv31_mpeg *mpeg = chan->mpeg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mpeg->engine.lock, flags);
+ if (mpeg->chan == chan)
+ mpeg->chan = NULL;
+ spin_unlock_irqrestore(&mpeg->engine.lock, flags);
+ return chan;
+}
+
+static const struct nvkm_object_func
+nv31_mpeg_chan = {
+ .dtor = nv31_mpeg_chan_dtor,
+};
+
+int
+nv31_mpeg_chan_new(struct nvkm_fifo_chan *fifoch,
+ const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+{
+ struct nv31_mpeg *mpeg = nv31_mpeg(oclass->engine);
+ struct nv31_mpeg_chan *chan;
+ unsigned long flags;
+ int ret = -EBUSY;
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nv31_mpeg_chan, oclass, &chan->object);
+ chan->mpeg = mpeg;
+ chan->fifo = fifoch;
+ *pobject = &chan->object;
+
+ spin_lock_irqsave(&mpeg->engine.lock, flags);
+ if (!mpeg->chan) {
+ mpeg->chan = chan;
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&mpeg->engine.lock, flags);
+ return ret;
+}
+
+/*******************************************************************************
+ * PMPEG engine/subdev functions
+ ******************************************************************************/
+
+void
+nv31_mpeg_tile(struct nvkm_engine *engine, int i, struct nvkm_fb_tile *tile)
+{
+ struct nv31_mpeg *mpeg = nv31_mpeg(engine);
+ struct nvkm_device *device = mpeg->engine.subdev.device;
+
+ nvkm_wr32(device, 0x00b008 + (i * 0x10), tile->pitch);
+ nvkm_wr32(device, 0x00b004 + (i * 0x10), tile->limit);
+ nvkm_wr32(device, 0x00b000 + (i * 0x10), tile->addr);
+}
+
+static bool
+nv31_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
+{
+ u32 inst = data << 4;
+ u32 dma0 = nvkm_rd32(device, 0x700000 + inst);
+ u32 dma1 = nvkm_rd32(device, 0x700004 + inst);
+ u32 dma2 = nvkm_rd32(device, 0x700008 + inst);
u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
u32 size = dma1 + 1;
/* only allow linear DMA objects */
if (!(dma0 & 0x00002000))
- return -EINVAL;
+ return false;
if (mthd == 0x0190) {
/* DMA_CMD */
- nv_mask(priv, 0x00b300, 0x00010000, (dma0 & 0x00030000) ? 0x00010000 : 0);
- nv_wr32(priv, 0x00b334, base);
- nv_wr32(priv, 0x00b324, size);
+ nvkm_mask(device, 0x00b300, 0x00010000,
+ (dma0 & 0x00030000) ? 0x00010000 : 0);
+ nvkm_wr32(device, 0x00b334, base);
+ nvkm_wr32(device, 0x00b324, size);
} else
if (mthd == 0x01a0) {
/* DMA_DATA */
- nv_mask(priv, 0x00b300, 0x00020000, (dma0 & 0x00030000) ? 0x00020000 : 0);
- nv_wr32(priv, 0x00b360, base);
- nv_wr32(priv, 0x00b364, size);
+ nvkm_mask(device, 0x00b300, 0x00020000,
+ (dma0 & 0x00030000) ? 0x00020000 : 0);
+ nvkm_wr32(device, 0x00b360, base);
+ nvkm_wr32(device, 0x00b364, size);
} else {
/* DMA_IMAGE, VRAM only */
if (dma0 & 0x00030000)
- return -EINVAL;
+ return false;
- nv_wr32(priv, 0x00b370, base);
- nv_wr32(priv, 0x00b374, size);
+ nvkm_wr32(device, 0x00b370, base);
+ nvkm_wr32(device, 0x00b374, size);
}
- return 0;
+ return true;
}
-struct nvkm_ofuncs
-nv31_mpeg_ofuncs = {
- .ctor = nv31_mpeg_object_ctor,
- .dtor = _nvkm_gpuobj_dtor,
- .init = _nvkm_gpuobj_init,
- .fini = _nvkm_gpuobj_fini,
- .rd32 = _nvkm_gpuobj_rd32,
- .wr32 = _nvkm_gpuobj_wr32,
-};
-
-static struct nvkm_omthds
-nv31_mpeg_omthds[] = {
- { 0x0190, 0x0190, nv31_mpeg_mthd_dma },
- { 0x01a0, 0x01a0, nv31_mpeg_mthd_dma },
- { 0x01b0, 0x01b0, nv31_mpeg_mthd_dma },
- {}
-};
-
-struct nvkm_oclass
-nv31_mpeg_sclass[] = {
- { 0x3174, &nv31_mpeg_ofuncs, nv31_mpeg_omthds },
- {}
-};
-
-/*******************************************************************************
- * PMPEG context
- ******************************************************************************/
-
-static int
-nv31_mpeg_context_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static bool
+nv31_mpeg_mthd(struct nv31_mpeg *mpeg, u32 mthd, u32 data)
{
- struct nv31_mpeg_priv *priv = (void *)engine;
- struct nv31_mpeg_chan *chan;
- unsigned long flags;
- int ret;
-
- ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
-
- spin_lock_irqsave(&nv_engine(priv)->lock, flags);
- if (priv->chan) {
- spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
- nvkm_object_destroy(&chan->base);
- *pobject = NULL;
- return -EBUSY;
+ struct nvkm_device *device = mpeg->engine.subdev.device;
+ switch (mthd) {
+ case 0x190:
+ case 0x1a0:
+ case 0x1b0:
+ return mpeg->func->mthd_dma(device, mthd, data);
+ default:
+ break;
}
- priv->chan = chan;
- spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
- return 0;
+ return false;
}
static void
-nv31_mpeg_context_dtor(struct nvkm_object *object)
+nv31_mpeg_intr(struct nvkm_engine *engine)
{
- struct nv31_mpeg_priv *priv = (void *)object->engine;
- struct nv31_mpeg_chan *chan = (void *)object;
- unsigned long flags;
-
- spin_lock_irqsave(&nv_engine(priv)->lock, flags);
- priv->chan = NULL;
- spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
- nvkm_object_destroy(&chan->base);
-}
-
-struct nvkm_oclass
-nv31_mpeg_cclass = {
- .handle = NV_ENGCTX(MPEG, 0x31),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv31_mpeg_context_ctor,
- .dtor = nv31_mpeg_context_dtor,
- .init = nvkm_object_init,
- .fini = nvkm_object_fini,
- },
-};
-
-/*******************************************************************************
- * PMPEG engine/subdev functions
- ******************************************************************************/
-
-void
-nv31_mpeg_tile_prog(struct nvkm_engine *engine, int i)
-{
- struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
- struct nv31_mpeg_priv *priv = (void *)engine;
-
- nv_wr32(priv, 0x00b008 + (i * 0x10), tile->pitch);
- nv_wr32(priv, 0x00b004 + (i * 0x10), tile->limit);
- nv_wr32(priv, 0x00b000 + (i * 0x10), tile->addr);
-}
-
-void
-nv31_mpeg_intr(struct nvkm_subdev *subdev)
-{
- struct nv31_mpeg_priv *priv = (void *)subdev;
- struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
- struct nvkm_handle *handle;
- struct nvkm_object *engctx;
- u32 stat = nv_rd32(priv, 0x00b100);
- u32 type = nv_rd32(priv, 0x00b230);
- u32 mthd = nv_rd32(priv, 0x00b234);
- u32 data = nv_rd32(priv, 0x00b238);
+ struct nv31_mpeg *mpeg = nv31_mpeg(engine);
+ struct nvkm_subdev *subdev = &mpeg->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x00b100);
+ u32 type = nvkm_rd32(device, 0x00b230);
+ u32 mthd = nvkm_rd32(device, 0x00b234);
+ u32 data = nvkm_rd32(device, 0x00b238);
u32 show = stat;
unsigned long flags;
- spin_lock_irqsave(&nv_engine(priv)->lock, flags);
- engctx = nv_object(priv->chan);
+ spin_lock_irqsave(&mpeg->engine.lock, flags);
if (stat & 0x01000000) {
/* happens on initial binding of the object */
if (type == 0x00000020 && mthd == 0x0000) {
- nv_mask(priv, 0x00b308, 0x00000000, 0x00000000);
+ nvkm_mask(device, 0x00b308, 0x00000000, 0x00000000);
show &= ~0x01000000;
}
- if (type == 0x00000010 && engctx) {
- handle = nvkm_handle_get_class(engctx, 0x3174);
- if (handle && !nv_call(handle->object, mthd, data))
+ if (type == 0x00000010) {
+ if (!nv31_mpeg_mthd(mpeg, mthd, data))
show &= ~0x01000000;
- nvkm_handle_put(handle);
}
}
- nv_wr32(priv, 0x00b100, stat);
- nv_wr32(priv, 0x00b230, 0x00000001);
+ nvkm_wr32(device, 0x00b100, stat);
+ nvkm_wr32(device, 0x00b230, 0x00000001);
if (show) {
- nv_error(priv, "ch %d [%s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
- pfifo->chid(pfifo, engctx),
- nvkm_client_name(engctx), stat, type, mthd, data);
+ nvkm_error(subdev, "ch %d [%s] %08x %08x %08x %08x\n",
+ mpeg->chan ? mpeg->chan->fifo->chid : -1,
+ mpeg->chan ? mpeg->chan->object.client->name :
+ "unknown", stat, type, mthd, data);
}
- spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
-}
-
-static int
-nv31_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv31_mpeg_priv *priv;
- int ret;
-
- ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00000002;
- nv_subdev(priv)->intr = nv31_mpeg_intr;
- nv_engine(priv)->cclass = &nv31_mpeg_cclass;
- nv_engine(priv)->sclass = nv31_mpeg_sclass;
- nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
- return 0;
+ spin_unlock_irqrestore(&mpeg->engine.lock, flags);
}
int
-nv31_mpeg_init(struct nvkm_object *object)
+nv31_mpeg_init(struct nvkm_engine *mpeg)
{
- struct nvkm_engine *engine = nv_engine(object);
- struct nv31_mpeg_priv *priv = (void *)object;
- struct nvkm_fb *pfb = nvkm_fb(object);
- int ret, i;
-
- ret = nvkm_mpeg_init(&priv->base);
- if (ret)
- return ret;
+ struct nvkm_subdev *subdev = &mpeg->subdev;
+ struct nvkm_device *device = subdev->device;
/* VPE init */
- nv_wr32(priv, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
- nv_wr32(priv, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
-
- for (i = 0; i < pfb->tile.regions; i++)
- engine->tile_prog(engine, i);
+ nvkm_wr32(device, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
+ nvkm_wr32(device, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
/* PMPEG init */
- nv_wr32(priv, 0x00b32c, 0x00000000);
- nv_wr32(priv, 0x00b314, 0x00000100);
- nv_wr32(priv, 0x00b220, 0x00000031);
- nv_wr32(priv, 0x00b300, 0x02001ec1);
- nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
-
- nv_wr32(priv, 0x00b100, 0xffffffff);
- nv_wr32(priv, 0x00b140, 0xffffffff);
-
- if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
- nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
+ nvkm_wr32(device, 0x00b32c, 0x00000000);
+ nvkm_wr32(device, 0x00b314, 0x00000100);
+ nvkm_wr32(device, 0x00b220, 0x00000031);
+ nvkm_wr32(device, 0x00b300, 0x02001ec1);
+ nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001);
+
+ nvkm_wr32(device, 0x00b100, 0xffffffff);
+ nvkm_wr32(device, 0x00b140, 0xffffffff);
+
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x00b200) & 0x00000001))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "timeout %08x\n",
+ nvkm_rd32(device, 0x00b200));
return -EBUSY;
}
return 0;
}
-struct nvkm_oclass
-nv31_mpeg_oclass = {
- .handle = NV_ENGINE(MPEG, 0x31),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv31_mpeg_ctor,
- .dtor = _nvkm_mpeg_dtor,
- .init = nv31_mpeg_init,
- .fini = _nvkm_mpeg_fini,
- },
+static void *
+nv31_mpeg_dtor(struct nvkm_engine *engine)
+{
+ return nv31_mpeg(engine);
+}
+
+static const struct nvkm_engine_func
+nv31_mpeg_ = {
+ .dtor = nv31_mpeg_dtor,
+ .init = nv31_mpeg_init,
+ .intr = nv31_mpeg_intr,
+ .tile = nv31_mpeg_tile,
+ .fifo.cclass = nv31_mpeg_chan_new,
+ .sclass = {
+ { -1, -1, NV31_MPEG, &nv31_mpeg_object },
+ {}
+ }
};
+
+int
+nv31_mpeg_new_(const struct nv31_mpeg_func *func, struct nvkm_device *device,
+ int index, struct nvkm_engine **pmpeg)
+{
+ struct nv31_mpeg *mpeg;
+
+ if (!(mpeg = kzalloc(sizeof(*mpeg), GFP_KERNEL)))
+ return -ENOMEM;
+ mpeg->func = func;
+ *pmpeg = &mpeg->engine;
+
+ return nvkm_engine_ctor(&nv31_mpeg_, device, index, 0x00000002,
+ true, &mpeg->engine);
+}
+
+static const struct nv31_mpeg_func
+nv31_mpeg = {
+ .mthd_dma = nv31_mpeg_mthd_dma,
+};
+
+int
+nv31_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+{
+ return nv31_mpeg_new_(&nv31_mpeg, device, index, pmpeg);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
index 782b796d7458..d3bb34fcdebf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
@@ -1,13 +1,30 @@
#ifndef __NV31_MPEG_H__
#define __NV31_MPEG_H__
+#define nv31_mpeg(p) container_of((p), struct nv31_mpeg, engine)
+#include "priv.h"
#include <engine/mpeg.h>
-struct nv31_mpeg_chan {
- struct nvkm_object base;
+struct nv31_mpeg {
+ const struct nv31_mpeg_func *func;
+ struct nvkm_engine engine;
+ struct nv31_mpeg_chan *chan;
};
-struct nv31_mpeg_priv {
- struct nvkm_mpeg base;
- struct nv31_mpeg_chan *chan;
+int nv31_mpeg_new_(const struct nv31_mpeg_func *, struct nvkm_device *,
+ int index, struct nvkm_engine **);
+
+struct nv31_mpeg_func {
+ bool (*mthd_dma)(struct nvkm_device *, u32 mthd, u32 data);
};
+
+#define nv31_mpeg_chan(p) container_of((p), struct nv31_mpeg_chan, object)
+
+struct nv31_mpeg_chan {
+ struct nvkm_object object;
+ struct nv31_mpeg *mpeg;
+ struct nvkm_fifo_chan *fifo;
+};
+
+int nv31_mpeg_chan_new(struct nvkm_fifo_chan *, const struct nvkm_oclass *,
+ struct nvkm_object **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
index 9508bf9e140f..16de5bd94b14 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
@@ -25,110 +25,53 @@
#include <subdev/instmem.h>
-/*******************************************************************************
- * MPEG object classes
- ******************************************************************************/
+#include <nvif/class.h>
-static int
-nv40_mpeg_mthd_dma(struct nvkm_object *object, u32 mthd, void *arg, u32 len)
+bool
+nv40_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
{
- struct nvkm_instmem *imem = nvkm_instmem(object);
- struct nv31_mpeg_priv *priv = (void *)object->engine;
- u32 inst = *(u32 *)arg << 4;
- u32 dma0 = nv_ro32(imem, inst + 0);
- u32 dma1 = nv_ro32(imem, inst + 4);
- u32 dma2 = nv_ro32(imem, inst + 8);
+ struct nvkm_instmem *imem = device->imem;
+ u32 inst = data << 4;
+ u32 dma0 = nvkm_instmem_rd32(imem, inst + 0);
+ u32 dma1 = nvkm_instmem_rd32(imem, inst + 4);
+ u32 dma2 = nvkm_instmem_rd32(imem, inst + 8);
u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
u32 size = dma1 + 1;
/* only allow linear DMA objects */
if (!(dma0 & 0x00002000))
- return -EINVAL;
+ return false;
if (mthd == 0x0190) {
/* DMA_CMD */
- nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000));
- nv_wr32(priv, 0x00b334, base);
- nv_wr32(priv, 0x00b324, size);
+ nvkm_mask(device, 0x00b300, 0x00030000, (dma0 & 0x00030000));
+ nvkm_wr32(device, 0x00b334, base);
+ nvkm_wr32(device, 0x00b324, size);
} else
if (mthd == 0x01a0) {
/* DMA_DATA */
- nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
- nv_wr32(priv, 0x00b360, base);
- nv_wr32(priv, 0x00b364, size);
+ nvkm_mask(device, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
+ nvkm_wr32(device, 0x00b360, base);
+ nvkm_wr32(device, 0x00b364, size);
} else {
/* DMA_IMAGE, VRAM only */
if (dma0 & 0x00030000)
- return -EINVAL;
+ return false;
- nv_wr32(priv, 0x00b370, base);
- nv_wr32(priv, 0x00b374, size);
+ nvkm_wr32(device, 0x00b370, base);
+ nvkm_wr32(device, 0x00b374, size);
}
- return 0;
+ return true;
}
-static struct nvkm_omthds
-nv40_mpeg_omthds[] = {
- { 0x0190, 0x0190, nv40_mpeg_mthd_dma },
- { 0x01a0, 0x01a0, nv40_mpeg_mthd_dma },
- { 0x01b0, 0x01b0, nv40_mpeg_mthd_dma },
- {}
+static const struct nv31_mpeg_func
+nv40_mpeg = {
+ .mthd_dma = nv40_mpeg_mthd_dma,
};
-struct nvkm_oclass
-nv40_mpeg_sclass[] = {
- { 0x3174, &nv31_mpeg_ofuncs, nv40_mpeg_omthds },
- {}
-};
-
-/*******************************************************************************
- * PMPEG engine/subdev functions
- ******************************************************************************/
-
-static void
-nv40_mpeg_intr(struct nvkm_subdev *subdev)
+int
+nv40_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
{
- struct nv31_mpeg_priv *priv = (void *)subdev;
- u32 stat;
-
- if ((stat = nv_rd32(priv, 0x00b100)))
- nv31_mpeg_intr(subdev);
-
- if ((stat = nv_rd32(priv, 0x00b800))) {
- nv_error(priv, "PMSRCH 0x%08x\n", stat);
- nv_wr32(priv, 0x00b800, stat);
- }
+ return nv31_mpeg_new_(&nv40_mpeg, device, index, pmpeg);
}
-
-static int
-nv40_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv31_mpeg_priv *priv;
- int ret;
-
- ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00000002;
- nv_subdev(priv)->intr = nv40_mpeg_intr;
- nv_engine(priv)->cclass = &nv31_mpeg_cclass;
- nv_engine(priv)->sclass = nv40_mpeg_sclass;
- nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
- return 0;
-}
-
-struct nvkm_oclass
-nv40_mpeg_oclass = {
- .handle = NV_ENGINE(MPEG, 0x40),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv40_mpeg_ctor,
- .dtor = _nvkm_mpeg_dtor,
- .init = nv31_mpeg_init,
- .fini = _nvkm_mpeg_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index 4720ac884468..d433cfa4a8ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -21,165 +21,197 @@
*
* Authors: Ben Skeggs
*/
-#include <engine/mpeg.h>
+#define nv44_mpeg(p) container_of((p), struct nv44_mpeg, engine)
+#include "priv.h"
#include <core/client.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
#include <engine/fifo.h>
-struct nv44_mpeg_priv {
- struct nvkm_mpeg base;
-};
+#include <nvif/class.h>
-struct nv44_mpeg_chan {
- struct nvkm_mpeg_chan base;
+struct nv44_mpeg {
+ struct nvkm_engine engine;
+ struct list_head chan;
};
/*******************************************************************************
* PMPEG context
******************************************************************************/
+#define nv44_mpeg_chan(p) container_of((p), struct nv44_mpeg_chan, object)
+
+struct nv44_mpeg_chan {
+ struct nvkm_object object;
+ struct nv44_mpeg *mpeg;
+ struct nvkm_fifo_chan *fifo;
+ struct list_head head;
+ u32 inst;
+};
static int
-nv44_mpeg_context_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv44_mpeg_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+ int align, struct nvkm_gpuobj **pgpuobj)
{
- struct nv44_mpeg_chan *chan;
- int ret;
-
- ret = nvkm_mpeg_context_create(parent, engine, oclass, NULL, 264 * 4,
- 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
-
- nv_wo32(&chan->base.base, 0x78, 0x02001ec1);
- return 0;
+ struct nv44_mpeg_chan *chan = nv44_mpeg_chan(object);
+ int ret = nvkm_gpuobj_new(chan->object.engine->subdev.device, 264 * 4,
+ align, true, parent, pgpuobj);
+ if (ret == 0) {
+ chan->inst = (*pgpuobj)->addr;
+ nvkm_kmap(*pgpuobj);
+ nvkm_wo32(*pgpuobj, 0x78, 0x02001ec1);
+ nvkm_done(*pgpuobj);
+ }
+ return ret;
}
static int
-nv44_mpeg_context_fini(struct nvkm_object *object, bool suspend)
+nv44_mpeg_chan_fini(struct nvkm_object *object, bool suspend)
{
- struct nv44_mpeg_priv *priv = (void *)object->engine;
- struct nv44_mpeg_chan *chan = (void *)object;
- u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4;
+ struct nv44_mpeg_chan *chan = nv44_mpeg_chan(object);
+ struct nv44_mpeg *mpeg = chan->mpeg;
+ struct nvkm_device *device = mpeg->engine.subdev.device;
+ u32 inst = 0x80000000 | (chan->inst >> 4);
- nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
- if (nv_rd32(priv, 0x00b318) == inst)
- nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
- nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000000);
+ if (nvkm_rd32(device, 0x00b318) == inst)
+ nvkm_mask(device, 0x00b318, 0x80000000, 0x00000000);
+ nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001);
return 0;
}
-static struct nvkm_oclass
-nv44_mpeg_cclass = {
- .handle = NV_ENGCTX(MPEG, 0x44),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv44_mpeg_context_ctor,
- .dtor = _nvkm_mpeg_context_dtor,
- .init = _nvkm_mpeg_context_init,
- .fini = nv44_mpeg_context_fini,
- .rd32 = _nvkm_mpeg_context_rd32,
- .wr32 = _nvkm_mpeg_context_wr32,
- },
+static void *
+nv44_mpeg_chan_dtor(struct nvkm_object *object)
+{
+ struct nv44_mpeg_chan *chan = nv44_mpeg_chan(object);
+ struct nv44_mpeg *mpeg = chan->mpeg;
+ unsigned long flags;
+ spin_lock_irqsave(&mpeg->engine.lock, flags);
+ list_del(&chan->head);
+ spin_unlock_irqrestore(&mpeg->engine.lock, flags);
+ return chan;
+}
+
+static const struct nvkm_object_func
+nv44_mpeg_chan = {
+ .dtor = nv44_mpeg_chan_dtor,
+ .fini = nv44_mpeg_chan_fini,
+ .bind = nv44_mpeg_chan_bind,
};
+static int
+nv44_mpeg_chan_new(struct nvkm_fifo_chan *fifoch,
+ const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+{
+ struct nv44_mpeg *mpeg = nv44_mpeg(oclass->engine);
+ struct nv44_mpeg_chan *chan;
+ unsigned long flags;
+
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nv44_mpeg_chan, oclass, &chan->object);
+ chan->mpeg = mpeg;
+ chan->fifo = fifoch;
+ *pobject = &chan->object;
+
+ spin_lock_irqsave(&mpeg->engine.lock, flags);
+ list_add(&chan->head, &mpeg->chan);
+ spin_unlock_irqrestore(&mpeg->engine.lock, flags);
+ return 0;
+}
+
/*******************************************************************************
* PMPEG engine/subdev functions
******************************************************************************/
+static bool
+nv44_mpeg_mthd(struct nvkm_device *device, u32 mthd, u32 data)
+{
+ switch (mthd) {
+ case 0x190:
+ case 0x1a0:
+ case 0x1b0:
+ return nv40_mpeg_mthd_dma(device, mthd, data);
+ default:
+ break;
+ }
+ return false;
+}
+
static void
-nv44_mpeg_intr(struct nvkm_subdev *subdev)
+nv44_mpeg_intr(struct nvkm_engine *engine)
{
- struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
- struct nvkm_engine *engine = nv_engine(subdev);
- struct nvkm_object *engctx;
- struct nvkm_handle *handle;
- struct nv44_mpeg_priv *priv = (void *)subdev;
- u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff;
- u32 stat = nv_rd32(priv, 0x00b100);
- u32 type = nv_rd32(priv, 0x00b230);
- u32 mthd = nv_rd32(priv, 0x00b234);
- u32 data = nv_rd32(priv, 0x00b238);
+ struct nv44_mpeg *mpeg = nv44_mpeg(engine);
+ struct nvkm_subdev *subdev = &mpeg->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nv44_mpeg_chan *temp, *chan = NULL;
+ unsigned long flags;
+ u32 inst = nvkm_rd32(device, 0x00b318) & 0x000fffff;
+ u32 stat = nvkm_rd32(device, 0x00b100);
+ u32 type = nvkm_rd32(device, 0x00b230);
+ u32 mthd = nvkm_rd32(device, 0x00b234);
+ u32 data = nvkm_rd32(device, 0x00b238);
u32 show = stat;
- int chid;
- engctx = nvkm_engctx_get(engine, inst);
- chid = pfifo->chid(pfifo, engctx);
+ spin_lock_irqsave(&mpeg->engine.lock, flags);
+ list_for_each_entry(temp, &mpeg->chan, head) {
+ if (temp->inst >> 4 == inst) {
+ chan = temp;
+ list_del(&chan->head);
+ list_add(&chan->head, &mpeg->chan);
+ break;
+ }
+ }
if (stat & 0x01000000) {
/* happens on initial binding of the object */
if (type == 0x00000020 && mthd == 0x0000) {
- nv_mask(priv, 0x00b308, 0x00000000, 0x00000000);
+ nvkm_mask(device, 0x00b308, 0x00000000, 0x00000000);
show &= ~0x01000000;
}
if (type == 0x00000010) {
- handle = nvkm_handle_get_class(engctx, 0x3174);
- if (handle && !nv_call(handle->object, mthd, data))
+ if (!nv44_mpeg_mthd(subdev->device, mthd, data))
show &= ~0x01000000;
- nvkm_handle_put(handle);
}
}
- nv_wr32(priv, 0x00b100, stat);
- nv_wr32(priv, 0x00b230, 0x00000001);
+ nvkm_wr32(device, 0x00b100, stat);
+ nvkm_wr32(device, 0x00b230, 0x00000001);
if (show) {
- nv_error(priv,
- "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
- chid, inst << 4, nvkm_client_name(engctx), stat,
- type, mthd, data);
+ nvkm_error(subdev, "ch %d [%08x %s] %08x %08x %08x %08x\n",
+ chan ? chan->fifo->chid : -1, inst << 4,
+ chan ? chan->object.client->name : "unknown",
+ stat, type, mthd, data);
}
- nvkm_engctx_put(engctx);
+ spin_unlock_irqrestore(&mpeg->engine.lock, flags);
}
-static void
-nv44_mpeg_me_intr(struct nvkm_subdev *subdev)
-{
- struct nv44_mpeg_priv *priv = (void *)subdev;
- u32 stat;
-
- if ((stat = nv_rd32(priv, 0x00b100)))
- nv44_mpeg_intr(subdev);
-
- if ((stat = nv_rd32(priv, 0x00b800))) {
- nv_error(priv, "PMSRCH 0x%08x\n", stat);
- nv_wr32(priv, 0x00b800, stat);
+static const struct nvkm_engine_func
+nv44_mpeg = {
+ .init = nv31_mpeg_init,
+ .intr = nv44_mpeg_intr,
+ .tile = nv31_mpeg_tile,
+ .fifo.cclass = nv44_mpeg_chan_new,
+ .sclass = {
+ { -1, -1, NV31_MPEG, &nv31_mpeg_object },
+ {}
}
-}
+};
-static int
-nv44_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv44_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
{
- struct nv44_mpeg_priv *priv;
- int ret;
-
- ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00000002;
- nv_subdev(priv)->intr = nv44_mpeg_me_intr;
- nv_engine(priv)->cclass = &nv44_mpeg_cclass;
- nv_engine(priv)->sclass = nv40_mpeg_sclass;
- nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
- return 0;
-}
+ struct nv44_mpeg *mpeg;
-struct nvkm_oclass
-nv44_mpeg_oclass = {
- .handle = NV_ENGINE(MPEG, 0x44),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv44_mpeg_ctor,
- .dtor = _nvkm_mpeg_dtor,
- .init = nv31_mpeg_init,
- .fini = _nvkm_mpeg_fini,
- },
-};
+ if (!(mpeg = kzalloc(sizeof(*mpeg), GFP_KERNEL)))
+ return -ENOMEM;
+ INIT_LIST_HEAD(&mpeg->chan);
+ *pmpeg = &mpeg->engine;
+
+ return nvkm_engine_ctor(&nv44_mpeg, device, index, 0x00000002,
+ true, &mpeg->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
index b3463f3739ce..c3a85dffc782 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
@@ -21,98 +21,35 @@
*
* Authors: Ben Skeggs
*/
-#include <engine/mpeg.h>
+#include "priv.h"
-#include <subdev/bar.h>
+#include <core/gpuobj.h>
#include <subdev/timer.h>
-struct nv50_mpeg_priv {
- struct nvkm_mpeg base;
-};
-
-struct nv50_mpeg_chan {
- struct nvkm_mpeg_chan base;
-};
-
-/*******************************************************************************
- * MPEG object classes
- ******************************************************************************/
-
-static int
-nv50_mpeg_object_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nvkm_gpuobj *obj;
- int ret;
-
- ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
- 16, 16, 0, &obj);
- *pobject = nv_object(obj);
- if (ret)
- return ret;
-
- nv_wo32(obj, 0x00, nv_mclass(obj));
- nv_wo32(obj, 0x04, 0x00000000);
- nv_wo32(obj, 0x08, 0x00000000);
- nv_wo32(obj, 0x0c, 0x00000000);
- return 0;
-}
-
-struct nvkm_ofuncs
-nv50_mpeg_ofuncs = {
- .ctor = nv50_mpeg_object_ctor,
- .dtor = _nvkm_gpuobj_dtor,
- .init = _nvkm_gpuobj_init,
- .fini = _nvkm_gpuobj_fini,
- .rd32 = _nvkm_gpuobj_rd32,
- .wr32 = _nvkm_gpuobj_wr32,
-};
-
-static struct nvkm_oclass
-nv50_mpeg_sclass[] = {
- { 0x3174, &nv50_mpeg_ofuncs },
- {}
-};
+#include <nvif/class.h>
/*******************************************************************************
* PMPEG context
******************************************************************************/
-int
-nv50_mpeg_context_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static int
+nv50_mpeg_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+ int align, struct nvkm_gpuobj **pgpuobj)
{
- struct nvkm_bar *bar = nvkm_bar(parent);
- struct nv50_mpeg_chan *chan;
- int ret;
-
- ret = nvkm_mpeg_context_create(parent, engine, oclass, NULL, 128 * 4,
- 0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
-
- nv_wo32(chan, 0x0070, 0x00801ec1);
- nv_wo32(chan, 0x007c, 0x0000037c);
- bar->flush(bar);
- return 0;
+ int ret = nvkm_gpuobj_new(object->engine->subdev.device, 128 * 4,
+ align, true, parent, pgpuobj);
+ if (ret == 0) {
+ nvkm_kmap(*pgpuobj);
+ nvkm_wo32(*pgpuobj, 0x70, 0x00801ec1);
+ nvkm_wo32(*pgpuobj, 0x7c, 0x0000037c);
+ nvkm_done(*pgpuobj);
+ }
+ return ret;
}
-static struct nvkm_oclass
+const struct nvkm_object_func
nv50_mpeg_cclass = {
- .handle = NV_ENGCTX(MPEG, 0x50),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_mpeg_context_ctor,
- .dtor = _nvkm_mpeg_context_dtor,
- .init = _nvkm_mpeg_context_init,
- .fini = _nvkm_mpeg_context_fini,
- .rd32 = _nvkm_mpeg_context_rd32,
- .wr32 = _nvkm_mpeg_context_wr32,
- },
+ .bind = nv50_mpeg_cclass_bind,
};
/*******************************************************************************
@@ -120,106 +57,79 @@ nv50_mpeg_cclass = {
******************************************************************************/
void
-nv50_mpeg_intr(struct nvkm_subdev *subdev)
+nv50_mpeg_intr(struct nvkm_engine *mpeg)
{
- struct nv50_mpeg_priv *priv = (void *)subdev;
- u32 stat = nv_rd32(priv, 0x00b100);
- u32 type = nv_rd32(priv, 0x00b230);
- u32 mthd = nv_rd32(priv, 0x00b234);
- u32 data = nv_rd32(priv, 0x00b238);
+ struct nvkm_subdev *subdev = &mpeg->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x00b100);
+ u32 type = nvkm_rd32(device, 0x00b230);
+ u32 mthd = nvkm_rd32(device, 0x00b234);
+ u32 data = nvkm_rd32(device, 0x00b238);
u32 show = stat;
if (stat & 0x01000000) {
/* happens on initial binding of the object */
if (type == 0x00000020 && mthd == 0x0000) {
- nv_wr32(priv, 0x00b308, 0x00000100);
+ nvkm_wr32(device, 0x00b308, 0x00000100);
show &= ~0x01000000;
}
}
if (show) {
- nv_info(priv, "0x%08x 0x%08x 0x%08x 0x%08x\n",
- stat, type, mthd, data);
- }
-
- nv_wr32(priv, 0x00b100, stat);
- nv_wr32(priv, 0x00b230, 0x00000001);
-}
-
-static void
-nv50_vpe_intr(struct nvkm_subdev *subdev)
-{
- struct nv50_mpeg_priv *priv = (void *)subdev;
-
- if (nv_rd32(priv, 0x00b100))
- nv50_mpeg_intr(subdev);
-
- if (nv_rd32(priv, 0x00b800)) {
- u32 stat = nv_rd32(priv, 0x00b800);
- nv_info(priv, "PMSRCH: 0x%08x\n", stat);
- nv_wr32(priv, 0xb800, stat);
+ nvkm_info(subdev, "%08x %08x %08x %08x\n",
+ stat, type, mthd, data);
}
-}
-static int
-nv50_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv50_mpeg_priv *priv;
- int ret;
-
- ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00400002;
- nv_subdev(priv)->intr = nv50_vpe_intr;
- nv_engine(priv)->cclass = &nv50_mpeg_cclass;
- nv_engine(priv)->sclass = nv50_mpeg_sclass;
- return 0;
+ nvkm_wr32(device, 0x00b100, stat);
+ nvkm_wr32(device, 0x00b230, 0x00000001);
}
int
-nv50_mpeg_init(struct nvkm_object *object)
+nv50_mpeg_init(struct nvkm_engine *mpeg)
{
- struct nv50_mpeg_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_mpeg_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x00b32c, 0x00000000);
- nv_wr32(priv, 0x00b314, 0x00000100);
- nv_wr32(priv, 0x00b0e0, 0x0000001a);
-
- nv_wr32(priv, 0x00b220, 0x00000044);
- nv_wr32(priv, 0x00b300, 0x00801ec1);
- nv_wr32(priv, 0x00b390, 0x00000000);
- nv_wr32(priv, 0x00b394, 0x00000000);
- nv_wr32(priv, 0x00b398, 0x00000000);
- nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
-
- nv_wr32(priv, 0x00b100, 0xffffffff);
- nv_wr32(priv, 0x00b140, 0xffffffff);
-
- if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
- nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
+ struct nvkm_subdev *subdev = &mpeg->subdev;
+ struct nvkm_device *device = subdev->device;
+
+ nvkm_wr32(device, 0x00b32c, 0x00000000);
+ nvkm_wr32(device, 0x00b314, 0x00000100);
+ nvkm_wr32(device, 0x00b0e0, 0x0000001a);
+
+ nvkm_wr32(device, 0x00b220, 0x00000044);
+ nvkm_wr32(device, 0x00b300, 0x00801ec1);
+ nvkm_wr32(device, 0x00b390, 0x00000000);
+ nvkm_wr32(device, 0x00b394, 0x00000000);
+ nvkm_wr32(device, 0x00b398, 0x00000000);
+ nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001);
+
+ nvkm_wr32(device, 0x00b100, 0xffffffff);
+ nvkm_wr32(device, 0x00b140, 0xffffffff);
+
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x00b200) & 0x00000001))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "timeout %08x\n",
+ nvkm_rd32(device, 0x00b200));
return -EBUSY;
}
return 0;
}
-struct nvkm_oclass
-nv50_mpeg_oclass = {
- .handle = NV_ENGINE(MPEG, 0x50),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_mpeg_ctor,
- .dtor = _nvkm_mpeg_dtor,
- .init = nv50_mpeg_init,
- .fini = _nvkm_mpeg_fini,
- },
+static const struct nvkm_engine_func
+nv50_mpeg = {
+ .init = nv50_mpeg_init,
+ .intr = nv50_mpeg_intr,
+ .cclass = &nv50_mpeg_cclass,
+ .sclass = {
+ { -1, -1, NV31_MPEG, &nv31_mpeg_object },
+ {}
+ }
};
+
+int
+nv50_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+{
+ return nvkm_engine_new_(&nv50_mpeg, device, index, 0x00400002,
+ true, pmpeg);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
new file mode 100644
index 000000000000..d5753103ff63
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
@@ -0,0 +1,16 @@
+#ifndef __NVKM_MPEG_PRIV_H__
+#define __NVKM_MPEG_PRIV_H__
+#include <engine/mpeg.h>
+struct nvkm_fifo_chan;
+
+int nv31_mpeg_init(struct nvkm_engine *);
+void nv31_mpeg_tile(struct nvkm_engine *, int, struct nvkm_fb_tile *);
+extern const struct nvkm_object_func nv31_mpeg_object;
+
+bool nv40_mpeg_mthd_dma(struct nvkm_device *, u32, u32);
+
+int nv50_mpeg_init(struct nvkm_engine *);
+void nv50_mpeg_intr(struct nvkm_engine *);
+
+extern const struct nvkm_object_func nv50_mpeg_cclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild
index c59c83a67315..1a7151146e9d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild
@@ -1,3 +1,5 @@
+nvkm-y += nvkm/engine/mspdec/base.o
nvkm-y += nvkm/engine/mspdec/g98.o
+nvkm-y += nvkm/engine/mspdec/gt215.o
nvkm-y += nvkm/engine/mspdec/gf100.o
nvkm-y += nvkm/engine/mspdec/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c
new file mode 100644
index 000000000000..80211f76093b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+int
+nvkm_mspdec_new_(const struct nvkm_falcon_func *func,
+ struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
+{
+ return nvkm_falcon_new_(func, device, index, true, 0x085000, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
index 2174577793a4..1f1a99e927b2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
@@ -21,89 +21,31 @@
*
* Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
*/
-#include <engine/mspdec.h>
-#include <engine/falcon.h>
+#include "priv.h"
-struct g98_mspdec_priv {
- struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * MSPDEC object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_mspdec_sclass[] = {
- { 0x88b2, &nvkm_object_ofuncs },
- { 0x85b2, &nvkm_object_ofuncs },
- {},
-};
-
-/*******************************************************************************
- * PMSPDEC context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_mspdec_cclass = {
- .handle = NV_ENGCTX(MSPDEC, 0x98),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_falcon_context_ctor,
- .dtor = _nvkm_falcon_context_dtor,
- .init = _nvkm_falcon_context_init,
- .fini = _nvkm_falcon_context_fini,
- .rd32 = _nvkm_falcon_context_rd32,
- .wr32 = _nvkm_falcon_context_wr32,
- },
-};
-
-/*******************************************************************************
- * PMSPDEC engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
-static int
-g98_mspdec_init(struct nvkm_object *object)
+void
+g98_mspdec_init(struct nvkm_falcon *mspdec)
{
- struct g98_mspdec_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_falcon_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x085010, 0x0000ffd2);
- nv_wr32(priv, 0x08501c, 0x0000fff2);
- return 0;
+ struct nvkm_device *device = mspdec->engine.subdev.device;
+ nvkm_wr32(device, 0x085010, 0x0000ffd2);
+ nvkm_wr32(device, 0x08501c, 0x0000fff2);
}
-static int
-g98_mspdec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct g98_mspdec_priv *priv;
- int ret;
-
- ret = nvkm_falcon_create(parent, engine, oclass, 0x085000, true,
- "PMSPDEC", "mspdec", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static const struct nvkm_falcon_func
+g98_mspdec = {
+ .pmc_enable = 0x01020000,
+ .init = g98_mspdec_init,
+ .sclass = {
+ { -1, -1, G98_MSPDEC },
+ {}
+ }
+};
- nv_subdev(priv)->unit = 0x01020000;
- nv_engine(priv)->cclass = &g98_mspdec_cclass;
- nv_engine(priv)->sclass = g98_mspdec_sclass;
- return 0;
+int
+g98_mspdec_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
+{
+ return nvkm_mspdec_new_(&g98_mspdec, device, index, pengine);
}
-
-struct nvkm_oclass
-g98_mspdec_oclass = {
- .handle = NV_ENGINE(MSPDEC, 0x98),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = g98_mspdec_ctor,
- .dtor = _nvkm_falcon_dtor,
- .init = g98_mspdec_init,
- .fini = _nvkm_falcon_fini,
- .rd32 = _nvkm_falcon_rd32,
- .wr32 = _nvkm_falcon_wr32,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
index c814a5f65eb0..371fd6c3c663 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
@@ -21,89 +21,31 @@
*
* Authors: Maarten Lankhorst
*/
-#include <engine/mspdec.h>
-#include <engine/falcon.h>
+#include "priv.h"
-struct gf100_mspdec_priv {
- struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * MSPDEC object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_mspdec_sclass[] = {
- { 0x90b2, &nvkm_object_ofuncs },
- {},
-};
-
-/*******************************************************************************
- * PMSPDEC context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_mspdec_cclass = {
- .handle = NV_ENGCTX(MSPDEC, 0xc0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_falcon_context_ctor,
- .dtor = _nvkm_falcon_context_dtor,
- .init = _nvkm_falcon_context_init,
- .fini = _nvkm_falcon_context_fini,
- .rd32 = _nvkm_falcon_context_rd32,
- .wr32 = _nvkm_falcon_context_wr32,
- },
-};
-
-/*******************************************************************************
- * PMSPDEC engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
-static int
-gf100_mspdec_init(struct nvkm_object *object)
+void
+gf100_mspdec_init(struct nvkm_falcon *mspdec)
{
- struct gf100_mspdec_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_falcon_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x085010, 0x0000fff2);
- nv_wr32(priv, 0x08501c, 0x0000fff2);
- return 0;
+ struct nvkm_device *device = mspdec->engine.subdev.device;
+ nvkm_wr32(device, 0x085010, 0x0000fff2);
+ nvkm_wr32(device, 0x08501c, 0x0000fff2);
}
-static int
-gf100_mspdec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gf100_mspdec_priv *priv;
- int ret;
-
- ret = nvkm_falcon_create(parent, engine, oclass, 0x085000, true,
- "PMSPDEC", "mspdec", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static const struct nvkm_falcon_func
+gf100_mspdec = {
+ .pmc_enable = 0x00020000,
+ .init = gf100_mspdec_init,
+ .sclass = {
+ { -1, -1, GF100_MSPDEC },
+ {}
+ }
+};
- nv_subdev(priv)->unit = 0x00020000;
- nv_subdev(priv)->intr = nvkm_falcon_intr;
- nv_engine(priv)->cclass = &gf100_mspdec_cclass;
- nv_engine(priv)->sclass = gf100_mspdec_sclass;
- return 0;
+int
+gf100_mspdec_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
+{
+ return nvkm_mspdec_new_(&gf100_mspdec, device, index, pengine);
}
-
-struct nvkm_oclass
-gf100_mspdec_oclass = {
- .handle = NV_ENGINE(MSPDEC, 0xc0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_mspdec_ctor,
- .dtor = _nvkm_falcon_dtor,
- .init = gf100_mspdec_init,
- .fini = _nvkm_falcon_fini,
- .rd32 = _nvkm_falcon_rd32,
- .wr32 = _nvkm_falcon_wr32,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
index 979920650dbd..de804a15bfd4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
@@ -21,89 +21,23 @@
*
* Authors: Ben Skeggs
*/
-#include <engine/mspdec.h>
-#include <engine/falcon.h>
-
-struct gk104_mspdec_priv {
- struct nvkm_falcon base;
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+gk104_mspdec = {
+ .pmc_enable = 0x00020000,
+ .init = gf100_mspdec_init,
+ .sclass = {
+ { -1, -1, GK104_MSPDEC },
+ {}
+ }
};
-/*******************************************************************************
- * MSPDEC object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_mspdec_sclass[] = {
- { 0x95b2, &nvkm_object_ofuncs },
- {},
-};
-
-/*******************************************************************************
- * PMSPDEC context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_mspdec_cclass = {
- .handle = NV_ENGCTX(MSPDEC, 0xe0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_falcon_context_ctor,
- .dtor = _nvkm_falcon_context_dtor,
- .init = _nvkm_falcon_context_init,
- .fini = _nvkm_falcon_context_fini,
- .rd32 = _nvkm_falcon_context_rd32,
- .wr32 = _nvkm_falcon_context_wr32,
- },
-};
-
-/*******************************************************************************
- * PMSPDEC engine/subdev functions
- ******************************************************************************/
-
-static int
-gk104_mspdec_init(struct nvkm_object *object)
+int
+gk104_mspdec_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
{
- struct gk104_mspdec_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_falcon_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x085010, 0x0000fff2);
- nv_wr32(priv, 0x08501c, 0x0000fff2);
- return 0;
-}
-
-static int
-gk104_mspdec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gk104_mspdec_priv *priv;
- int ret;
-
- ret = nvkm_falcon_create(parent, engine, oclass, 0x085000, true,
- "PMSPDEC", "mspdec", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00020000;
- nv_subdev(priv)->intr = nvkm_falcon_intr;
- nv_engine(priv)->cclass = &gk104_mspdec_cclass;
- nv_engine(priv)->sclass = gk104_mspdec_sclass;
- return 0;
+ return nvkm_mspdec_new_(&gk104_mspdec, device, index, pengine);
}
-
-struct nvkm_oclass
-gk104_mspdec_oclass = {
- .handle = NV_ENGINE(MSPDEC, 0xe0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk104_mspdec_ctor,
- .dtor = _nvkm_falcon_dtor,
- .init = gk104_mspdec_init,
- .fini = _nvkm_falcon_fini,
- .rd32 = _nvkm_falcon_rd32,
- .wr32 = _nvkm_falcon_wr32,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
index b7613059da08..835631713c95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
@@ -19,26 +19,25 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: Ben Skeggs
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
*/
-#include "nv04.h"
+#include "priv.h"
-void
-nv40_mc_msi_rearm(struct nvkm_mc *pmc)
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+gt215_mspdec = {
+ .pmc_enable = 0x01020000,
+ .init = g98_mspdec_init,
+ .sclass = {
+ { -1, -1, GT212_MSPDEC },
+ {}
+ }
+};
+
+int
+gt215_mspdec_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
{
- struct nv04_mc_priv *priv = (void *)pmc;
- nv_wr08(priv, 0x088068, 0xff);
+ return nvkm_mspdec_new_(&gt215_mspdec, device, index, pengine);
}
-
-struct nvkm_oclass *
-nv40_mc_oclass = &(struct nvkm_mc_oclass) {
- .base.handle = NV_SUBDEV(MC, 0x40),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_mc_ctor,
- .dtor = _nvkm_mc_dtor,
- .init = nv04_mc_init,
- .fini = _nvkm_mc_fini,
- },
- .intr = nv04_mc_intr,
- .msi_rearm = nv40_mc_msi_rearm,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
new file mode 100644
index 000000000000..d518af4bc9de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
@@ -0,0 +1,11 @@
+#ifndef __NVKM_MSPDEC_PRIV_H__
+#define __NVKM_MSPDEC_PRIV_H__
+#include <engine/mspdec.h>
+
+int nvkm_mspdec_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
+ int index, struct nvkm_engine **);
+
+void g98_mspdec_init(struct nvkm_falcon *);
+
+void gf100_mspdec_init(struct nvkm_falcon *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild
index 4576a9eee39d..3ea7eafb408f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild
@@ -1,2 +1,4 @@
+nvkm-y += nvkm/engine/msppp/base.o
nvkm-y += nvkm/engine/msppp/g98.o
+nvkm-y += nvkm/engine/msppp/gt215.o
nvkm-y += nvkm/engine/msppp/gf100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c
new file mode 100644
index 000000000000..bfae5e60e925
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+int
+nvkm_msppp_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
+ int index, struct nvkm_engine **pengine)
+{
+ return nvkm_falcon_new_(func, device, index, true, 0x086000, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
index 7a602a2dec94..73f633ae2ee7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
@@ -21,89 +21,31 @@
*
* Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
*/
-#include <engine/msppp.h>
-#include <engine/falcon.h>
+#include "priv.h"
-struct g98_msppp_priv {
- struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * MSPPP object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_msppp_sclass[] = {
- { 0x88b3, &nvkm_object_ofuncs },
- { 0x85b3, &nvkm_object_ofuncs },
- {},
-};
-
-/*******************************************************************************
- * PMSPPP context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_msppp_cclass = {
- .handle = NV_ENGCTX(MSPPP, 0x98),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_falcon_context_ctor,
- .dtor = _nvkm_falcon_context_dtor,
- .init = _nvkm_falcon_context_init,
- .fini = _nvkm_falcon_context_fini,
- .rd32 = _nvkm_falcon_context_rd32,
- .wr32 = _nvkm_falcon_context_wr32,
- },
-};
-
-/*******************************************************************************
- * PMSPPP engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
-static int
-g98_msppp_init(struct nvkm_object *object)
+void
+g98_msppp_init(struct nvkm_falcon *msppp)
{
- struct g98_msppp_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_falcon_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x086010, 0x0000ffd2);
- nv_wr32(priv, 0x08601c, 0x0000fff2);
- return 0;
+ struct nvkm_device *device = msppp->engine.subdev.device;
+ nvkm_wr32(device, 0x086010, 0x0000ffd2);
+ nvkm_wr32(device, 0x08601c, 0x0000fff2);
}
-static int
-g98_msppp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct g98_msppp_priv *priv;
- int ret;
-
- ret = nvkm_falcon_create(parent, engine, oclass, 0x086000, true,
- "PMSPPP", "msppp", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static const struct nvkm_falcon_func
+g98_msppp = {
+ .pmc_enable = 0x00400002,
+ .init = g98_msppp_init,
+ .sclass = {
+ { -1, -1, G98_MSPPP },
+ {}
+ }
+};
- nv_subdev(priv)->unit = 0x00400002;
- nv_engine(priv)->cclass = &g98_msppp_cclass;
- nv_engine(priv)->sclass = g98_msppp_sclass;
- return 0;
+int
+g98_msppp_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
+{
+ return nvkm_msppp_new_(&g98_msppp, device, index, pengine);
}
-
-struct nvkm_oclass
-g98_msppp_oclass = {
- .handle = NV_ENGINE(MSPPP, 0x98),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = g98_msppp_ctor,
- .dtor = _nvkm_falcon_dtor,
- .init = g98_msppp_init,
- .fini = _nvkm_falcon_fini,
- .rd32 = _nvkm_falcon_rd32,
- .wr32 = _nvkm_falcon_wr32,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
index 6047baee1f75..c42c0c07e2db 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
@@ -21,89 +21,31 @@
*
* Authors: Maarten Lankhorst
*/
-#include <engine/msppp.h>
-#include <engine/falcon.h>
+#include "priv.h"
-struct gf100_msppp_priv {
- struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * MSPPP object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_msppp_sclass[] = {
- { 0x90b3, &nvkm_object_ofuncs },
- {},
-};
-
-/*******************************************************************************
- * PMSPPP context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_msppp_cclass = {
- .handle = NV_ENGCTX(MSPPP, 0xc0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_falcon_context_ctor,
- .dtor = _nvkm_falcon_context_dtor,
- .init = _nvkm_falcon_context_init,
- .fini = _nvkm_falcon_context_fini,
- .rd32 = _nvkm_falcon_context_rd32,
- .wr32 = _nvkm_falcon_context_wr32,
- },
-};
-
-/*******************************************************************************
- * PMSPPP engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
-static int
-gf100_msppp_init(struct nvkm_object *object)
+static void
+gf100_msppp_init(struct nvkm_falcon *msppp)
{
- struct gf100_msppp_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_falcon_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x086010, 0x0000fff2);
- nv_wr32(priv, 0x08601c, 0x0000fff2);
- return 0;
+ struct nvkm_device *device = msppp->engine.subdev.device;
+ nvkm_wr32(device, 0x086010, 0x0000fff2);
+ nvkm_wr32(device, 0x08601c, 0x0000fff2);
}
-static int
-gf100_msppp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gf100_msppp_priv *priv;
- int ret;
-
- ret = nvkm_falcon_create(parent, engine, oclass, 0x086000, true,
- "PMSPPP", "msppp", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static const struct nvkm_falcon_func
+gf100_msppp = {
+ .pmc_enable = 0x00000002,
+ .init = gf100_msppp_init,
+ .sclass = {
+ { -1, -1, GF100_MSPPP },
+ {}
+ }
+};
- nv_subdev(priv)->unit = 0x00000002;
- nv_subdev(priv)->intr = nvkm_falcon_intr;
- nv_engine(priv)->cclass = &gf100_msppp_cclass;
- nv_engine(priv)->sclass = gf100_msppp_sclass;
- return 0;
+int
+gf100_msppp_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
+{
+ return nvkm_msppp_new_(&gf100_msppp, device, index, pengine);
}
-
-struct nvkm_oclass
-gf100_msppp_oclass = {
- .handle = NV_ENGINE(MSPPP, 0xc0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_msppp_ctor,
- .dtor = _nvkm_falcon_dtor,
- .init = gf100_msppp_init,
- .fini = _nvkm_falcon_fini,
- .rd32 = _nvkm_falcon_rd32,
- .wr32 = _nvkm_falcon_wr32,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
new file mode 100644
index 000000000000..00e7795f1d51
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+gt215_msppp = {
+ .pmc_enable = 0x00400002,
+ .init = g98_msppp_init,
+ .sclass = {
+ { -1, -1, GT212_MSPPP },
+ {}
+ }
+};
+
+int
+gt215_msppp_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
+{
+ return nvkm_msppp_new_(&gt215_msppp, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
new file mode 100644
index 000000000000..37a91f9d9181
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
@@ -0,0 +1,9 @@
+#ifndef __NVKM_MSPPP_PRIV_H__
+#define __NVKM_MSPPP_PRIV_H__
+#include <engine/msppp.h>
+
+int nvkm_msppp_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
+ int index, struct nvkm_engine **);
+
+void g98_msppp_init(struct nvkm_falcon *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild
index 0c9811009e28..28c8ecd27b6d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild
@@ -1,3 +1,6 @@
+nvkm-y += nvkm/engine/msvld/base.o
nvkm-y += nvkm/engine/msvld/g98.o
+nvkm-y += nvkm/engine/msvld/gt215.o
+nvkm-y += nvkm/engine/msvld/mcp89.o
nvkm-y += nvkm/engine/msvld/gf100.o
nvkm-y += nvkm/engine/msvld/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c
new file mode 100644
index 000000000000..745bbb653dc0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+int
+nvkm_msvld_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
+ int index, struct nvkm_engine **pengine)
+{
+ return nvkm_falcon_new_(func, device, index, true, 0x084000, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
index c8a6b4ef52a1..47e2929bfaf0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
@@ -21,90 +21,31 @@
*
* Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
*/
-#include <engine/msvld.h>
-#include <engine/falcon.h>
+#include "priv.h"
-struct g98_msvld_priv {
- struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * MSVLD object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_msvld_sclass[] = {
- { 0x88b1, &nvkm_object_ofuncs },
- { 0x85b1, &nvkm_object_ofuncs },
- { 0x86b1, &nvkm_object_ofuncs },
- {},
-};
-
-/*******************************************************************************
- * PMSVLD context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_msvld_cclass = {
- .handle = NV_ENGCTX(MSVLD, 0x98),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_falcon_context_ctor,
- .dtor = _nvkm_falcon_context_dtor,
- .init = _nvkm_falcon_context_init,
- .fini = _nvkm_falcon_context_fini,
- .rd32 = _nvkm_falcon_context_rd32,
- .wr32 = _nvkm_falcon_context_wr32,
- },
-};
-
-/*******************************************************************************
- * PMSVLD engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
-static int
-g98_msvld_init(struct nvkm_object *object)
+void
+g98_msvld_init(struct nvkm_falcon *msvld)
{
- struct g98_msvld_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_falcon_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x084010, 0x0000ffd2);
- nv_wr32(priv, 0x08401c, 0x0000fff2);
- return 0;
+ struct nvkm_device *device = msvld->engine.subdev.device;
+ nvkm_wr32(device, 0x084010, 0x0000ffd2);
+ nvkm_wr32(device, 0x08401c, 0x0000fff2);
}
-static int
-g98_msvld_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct g98_msvld_priv *priv;
- int ret;
-
- ret = nvkm_falcon_create(parent, engine, oclass, 0x084000, true,
- "PMSVLD", "msvld", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static const struct nvkm_falcon_func
+g98_msvld = {
+ .pmc_enable = 0x04008000,
+ .init = g98_msvld_init,
+ .sclass = {
+ { -1, -1, G98_MSVLD },
+ {}
+ }
+};
- nv_subdev(priv)->unit = 0x04008000;
- nv_engine(priv)->cclass = &g98_msvld_cclass;
- nv_engine(priv)->sclass = g98_msvld_sclass;
- return 0;
+int
+g98_msvld_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
+{
+ return nvkm_msvld_new_(&g98_msvld, device, index, pengine);
}
-
-struct nvkm_oclass
-g98_msvld_oclass = {
- .handle = NV_ENGINE(MSVLD, 0x98),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = g98_msvld_ctor,
- .dtor = _nvkm_falcon_dtor,
- .init = g98_msvld_init,
- .fini = _nvkm_falcon_fini,
- .rd32 = _nvkm_falcon_rd32,
- .wr32 = _nvkm_falcon_wr32,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
index b8d1e0f521ef..1ac581ba9f96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
@@ -21,89 +21,31 @@
*
* Authors: Maarten Lankhorst
*/
-#include <engine/msvld.h>
-#include <engine/falcon.h>
+#include "priv.h"
-struct gf100_msvld_priv {
- struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * MSVLD object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_msvld_sclass[] = {
- { 0x90b1, &nvkm_object_ofuncs },
- {},
-};
-
-/*******************************************************************************
- * PMSVLD context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_msvld_cclass = {
- .handle = NV_ENGCTX(MSVLD, 0xc0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_falcon_context_ctor,
- .dtor = _nvkm_falcon_context_dtor,
- .init = _nvkm_falcon_context_init,
- .fini = _nvkm_falcon_context_fini,
- .rd32 = _nvkm_falcon_context_rd32,
- .wr32 = _nvkm_falcon_context_wr32,
- },
-};
-
-/*******************************************************************************
- * PMSVLD engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
-static int
-gf100_msvld_init(struct nvkm_object *object)
+void
+gf100_msvld_init(struct nvkm_falcon *msvld)
{
- struct gf100_msvld_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_falcon_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x084010, 0x0000fff2);
- nv_wr32(priv, 0x08401c, 0x0000fff2);
- return 0;
+ struct nvkm_device *device = msvld->engine.subdev.device;
+ nvkm_wr32(device, 0x084010, 0x0000fff2);
+ nvkm_wr32(device, 0x08401c, 0x0000fff2);
}
-static int
-gf100_msvld_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gf100_msvld_priv *priv;
- int ret;
-
- ret = nvkm_falcon_create(parent, engine, oclass, 0x084000, true,
- "PMSVLD", "msvld", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static const struct nvkm_falcon_func
+gf100_msvld = {
+ .pmc_enable = 0x00008000,
+ .init = gf100_msvld_init,
+ .sclass = {
+ { -1, -1, GF100_MSVLD },
+ {}
+ }
+};
- nv_subdev(priv)->unit = 0x00008000;
- nv_subdev(priv)->intr = nvkm_falcon_intr;
- nv_engine(priv)->cclass = &gf100_msvld_cclass;
- nv_engine(priv)->sclass = gf100_msvld_sclass;
- return 0;
+int
+gf100_msvld_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
+{
+ return nvkm_msvld_new_(&gf100_msvld, device, index, pengine);
}
-
-struct nvkm_oclass
-gf100_msvld_oclass = {
- .handle = NV_ENGINE(MSVLD, 0xc0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_msvld_ctor,
- .dtor = _nvkm_falcon_dtor,
- .init = gf100_msvld_init,
- .fini = _nvkm_falcon_fini,
- .rd32 = _nvkm_falcon_rd32,
- .wr32 = _nvkm_falcon_wr32,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
index a0b0927834df..4bba16e0f560 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
@@ -21,89 +21,23 @@
*
* Authors: Ben Skeggs
*/
-#include <engine/msvld.h>
-#include <engine/falcon.h>
-
-struct gk104_msvld_priv {
- struct nvkm_falcon base;
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+gk104_msvld = {
+ .pmc_enable = 0x00008000,
+ .init = gf100_msvld_init,
+ .sclass = {
+ { -1, -1, GK104_MSVLD },
+ {}
+ }
};
-/*******************************************************************************
- * MSVLD object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_msvld_sclass[] = {
- { 0x95b1, &nvkm_object_ofuncs },
- {},
-};
-
-/*******************************************************************************
- * PMSVLD context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_msvld_cclass = {
- .handle = NV_ENGCTX(MSVLD, 0xe0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_falcon_context_ctor,
- .dtor = _nvkm_falcon_context_dtor,
- .init = _nvkm_falcon_context_init,
- .fini = _nvkm_falcon_context_fini,
- .rd32 = _nvkm_falcon_context_rd32,
- .wr32 = _nvkm_falcon_context_wr32,
- },
-};
-
-/*******************************************************************************
- * PMSVLD engine/subdev functions
- ******************************************************************************/
-
-static int
-gk104_msvld_init(struct nvkm_object *object)
+int
+gk104_msvld_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
{
- struct gk104_msvld_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_falcon_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x084010, 0x0000fff2);
- nv_wr32(priv, 0x08401c, 0x0000fff2);
- return 0;
-}
-
-static int
-gk104_msvld_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gk104_msvld_priv *priv;
- int ret;
-
- ret = nvkm_falcon_create(parent, engine, oclass, 0x084000, true,
- "PMSVLD", "msvld", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00008000;
- nv_subdev(priv)->intr = nvkm_falcon_intr;
- nv_engine(priv)->cclass = &gk104_msvld_cclass;
- nv_engine(priv)->sclass = gk104_msvld_sclass;
- return 0;
+ return nvkm_msvld_new_(&gk104_msvld, device, index, pengine);
}
-
-struct nvkm_oclass
-gk104_msvld_oclass = {
- .handle = NV_ENGINE(MSVLD, 0xe0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk104_msvld_ctor,
- .dtor = _nvkm_falcon_dtor,
- .init = gk104_msvld_init,
- .fini = _nvkm_falcon_fini,
- .rd32 = _nvkm_falcon_rd32,
- .wr32 = _nvkm_falcon_wr32,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
new file mode 100644
index 000000000000..e17cb5605b2d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+gt215_msvld = {
+ .pmc_enable = 0x04008000,
+ .init = g98_msvld_init,
+ .sclass = {
+ { -1, -1, GT212_MSVLD },
+ {}
+ }
+};
+
+int
+gt215_msvld_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
+{
+ return nvkm_msvld_new_(&gt215_msvld, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
new file mode 100644
index 000000000000..511800f6a43b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+mcp89_msvld = {
+ .pmc_enable = 0x04008000,
+ .init = g98_msvld_init,
+ .sclass = {
+ { -1, -1, IGT21A_MSVLD },
+ {}
+ }
+};
+
+int
+mcp89_msvld_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
+{
+ return nvkm_msvld_new_(&mcp89_msvld, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
new file mode 100644
index 000000000000..9dc1da67d929
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
@@ -0,0 +1,11 @@
+#ifndef __NVKM_MSVLD_PRIV_H__
+#define __NVKM_MSVLD_PRIV_H__
+#include <engine/msvld.h>
+
+int nvkm_msvld_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
+ int index, struct nvkm_engine **);
+
+void g98_msvld_init(struct nvkm_falcon *);
+
+void gf100_msvld_init(struct nvkm_falcon *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
index 413b6091e256..1614d385fb0c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
@@ -1,9 +1,10 @@
nvkm-y += nvkm/engine/pm/base.o
-nvkm-y += nvkm/engine/pm/daemon.o
nvkm-y += nvkm/engine/pm/nv40.o
nvkm-y += nvkm/engine/pm/nv50.o
nvkm-y += nvkm/engine/pm/g84.o
+nvkm-y += nvkm/engine/pm/gt200.o
nvkm-y += nvkm/engine/pm/gt215.o
nvkm-y += nvkm/engine/pm/gf100.o
+nvkm-y += nvkm/engine/pm/gf108.o
+nvkm-y += nvkm/engine/pm/gf117.o
nvkm-y += nvkm/engine/pm/gk104.o
-nvkm-y += nvkm/engine/pm/gk110.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 2006c445938d..0db9be202c42 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -24,368 +24,751 @@
#include "priv.h"
#include <core/client.h>
-#include <core/device.h>
#include <core/option.h>
#include <nvif/class.h>
#include <nvif/ioctl.h>
#include <nvif/unpack.h>
-#define QUAD_MASK 0x0f
-#define QUAD_FREE 0x01
+static u8
+nvkm_pm_count_perfdom(struct nvkm_pm *pm)
+{
+ struct nvkm_perfdom *dom;
+ u8 domain_nr = 0;
-static struct nvkm_perfsig *
-nvkm_perfsig_find_(struct nvkm_perfdom *dom, const char *name, u32 size)
+ list_for_each_entry(dom, &pm->domains, head)
+ domain_nr++;
+ return domain_nr;
+}
+
+static u16
+nvkm_perfdom_count_perfsig(struct nvkm_perfdom *dom)
{
- char path[64];
+ u16 signal_nr = 0;
int i;
- if (name[0] != '/') {
+ if (dom) {
for (i = 0; i < dom->signal_nr; i++) {
- if ( dom->signal[i].name &&
- !strncmp(name, dom->signal[i].name, size))
- return &dom->signal[i];
- }
- } else {
- for (i = 0; i < dom->signal_nr; i++) {
- snprintf(path, sizeof(path), "/%s/%02x", dom->name, i);
- if (!strncmp(name, path, size))
- return &dom->signal[i];
+ if (dom->signal[i].name)
+ signal_nr++;
}
}
+ return signal_nr;
+}
+static struct nvkm_perfdom *
+nvkm_perfdom_find(struct nvkm_pm *pm, int di)
+{
+ struct nvkm_perfdom *dom;
+ int tmp = 0;
+
+ list_for_each_entry(dom, &pm->domains, head) {
+ if (tmp++ == di)
+ return dom;
+ }
return NULL;
}
struct nvkm_perfsig *
-nvkm_perfsig_find(struct nvkm_pm *ppm, const char *name, u32 size,
- struct nvkm_perfdom **pdom)
+nvkm_perfsig_find(struct nvkm_pm *pm, u8 di, u8 si, struct nvkm_perfdom **pdom)
{
struct nvkm_perfdom *dom = *pdom;
- struct nvkm_perfsig *sig;
if (dom == NULL) {
- list_for_each_entry(dom, &ppm->domains, head) {
- sig = nvkm_perfsig_find_(dom, name, size);
- if (sig) {
- *pdom = dom;
- return sig;
- }
- }
+ dom = nvkm_perfdom_find(pm, di);
+ if (dom == NULL)
+ return NULL;
+ *pdom = dom;
+ }
+ if (!dom->signal[si].name)
return NULL;
- }
+ return &dom->signal[si];
+}
- return nvkm_perfsig_find_(dom, name, size);
+static u8
+nvkm_perfsig_count_perfsrc(struct nvkm_perfsig *sig)
+{
+ u8 source_nr = 0, i;
+
+ for (i = 0; i < ARRAY_SIZE(sig->source); i++) {
+ if (sig->source[i])
+ source_nr++;
+ }
+ return source_nr;
}
-struct nvkm_perfctr *
-nvkm_perfsig_wrap(struct nvkm_pm *ppm, const char *name,
- struct nvkm_perfdom **pdom)
+static struct nvkm_perfsrc *
+nvkm_perfsrc_find(struct nvkm_pm *pm, struct nvkm_perfsig *sig, int si)
{
- struct nvkm_perfsig *sig;
- struct nvkm_perfctr *ctr;
+ struct nvkm_perfsrc *src;
+ bool found = false;
+ int tmp = 1; /* Sources ID start from 1 */
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(sig->source) && sig->source[i]; i++) {
+ if (sig->source[i] == si) {
+ found = true;
+ break;
+ }
+ }
- sig = nvkm_perfsig_find(ppm, name, strlen(name), pdom);
- if (!sig)
- return NULL;
+ if (found) {
+ list_for_each_entry(src, &pm->sources, head) {
+ if (tmp++ == si)
+ return src;
+ }
+ }
- ctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
- if (ctr) {
- ctr->signal[0] = sig;
- ctr->logic_op = 0xaaaa;
+ return NULL;
+}
+
+static int
+nvkm_perfsrc_enable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
+{
+ struct nvkm_subdev *subdev = &pm->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_perfdom *dom = NULL;
+ struct nvkm_perfsig *sig;
+ struct nvkm_perfsrc *src;
+ u32 mask, value;
+ int i, j;
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 8 && ctr->source[i][j]; j++) {
+ sig = nvkm_perfsig_find(pm, ctr->domain,
+ ctr->signal[i], &dom);
+ if (!sig)
+ return -EINVAL;
+
+ src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
+ if (!src)
+ return -EINVAL;
+
+ /* set enable bit if needed */
+ mask = value = 0x00000000;
+ if (src->enable)
+ mask = value = 0x80000000;
+ mask |= (src->mask << src->shift);
+ value |= ((ctr->source[i][j] >> 32) << src->shift);
+
+ /* enable the source */
+ nvkm_mask(device, src->addr, mask, value);
+ nvkm_debug(subdev,
+ "enabled source %08x %08x %08x\n",
+ src->addr, mask, value);
+ }
}
+ return 0;
+}
- return ctr;
+static int
+nvkm_perfsrc_disable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
+{
+ struct nvkm_subdev *subdev = &pm->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_perfdom *dom = NULL;
+ struct nvkm_perfsig *sig;
+ struct nvkm_perfsrc *src;
+ u32 mask;
+ int i, j;
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 8 && ctr->source[i][j]; j++) {
+ sig = nvkm_perfsig_find(pm, ctr->domain,
+ ctr->signal[i], &dom);
+ if (!sig)
+ return -EINVAL;
+
+ src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
+ if (!src)
+ return -EINVAL;
+
+ /* unset enable bit if needed */
+ mask = 0x00000000;
+ if (src->enable)
+ mask = 0x80000000;
+ mask |= (src->mask << src->shift);
+
+ /* disable the source */
+ nvkm_mask(device, src->addr, mask, 0);
+ nvkm_debug(subdev, "disabled source %08x %08x\n",
+ src->addr, mask);
+ }
+ }
+ return 0;
}
/*******************************************************************************
- * Perfmon object classes
+ * Perfdom object classes
******************************************************************************/
static int
-nvkm_perfctr_query(struct nvkm_object *object, void *data, u32 size)
+nvkm_perfdom_init(struct nvkm_perfdom *dom, void *data, u32 size)
{
union {
- struct nvif_perfctr_query_v0 v0;
+ struct nvif_perfdom_init none;
} *args = data;
- struct nvkm_device *device = nv_device(object);
- struct nvkm_pm *ppm = (void *)object->engine;
- struct nvkm_perfdom *dom = NULL, *chk;
- const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
- const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
- const char *name;
- int tmp = 0, di, si;
+ struct nvkm_object *object = &dom->object;
+ struct nvkm_pm *pm = dom->perfmon->pm;
+ int ret, i;
+
+ nvif_ioctl(object, "perfdom init size %d\n", size);
+ if (nvif_unvers(args->none)) {
+ nvif_ioctl(object, "perfdom init\n");
+ } else
+ return ret;
+
+ for (i = 0; i < 4; i++) {
+ if (dom->ctr[i]) {
+ dom->func->init(pm, dom, dom->ctr[i]);
+
+ /* enable sources */
+ nvkm_perfsrc_enable(pm, dom->ctr[i]);
+ }
+ }
+
+ /* start next batch of counters for sampling */
+ dom->func->next(pm, dom);
+ return 0;
+}
+
+static int
+nvkm_perfdom_sample(struct nvkm_perfdom *dom, void *data, u32 size)
+{
+ union {
+ struct nvif_perfdom_sample none;
+ } *args = data;
+ struct nvkm_object *object = &dom->object;
+ struct nvkm_pm *pm = dom->perfmon->pm;
int ret;
- nv_ioctl(object, "perfctr query size %d\n", size);
+ nvif_ioctl(object, "perfdom sample size %d\n", size);
+ if (nvif_unvers(args->none)) {
+ nvif_ioctl(object, "perfdom sample\n");
+ } else
+ return ret;
+ pm->sequence++;
+
+ /* sample previous batch of counters */
+ list_for_each_entry(dom, &pm->domains, head)
+ dom->func->next(pm, dom);
+
+ return 0;
+}
+
+static int
+nvkm_perfdom_read(struct nvkm_perfdom *dom, void *data, u32 size)
+{
+ union {
+ struct nvif_perfdom_read_v0 v0;
+ } *args = data;
+ struct nvkm_object *object = &dom->object;
+ struct nvkm_pm *pm = dom->perfmon->pm;
+ int ret, i;
+
+ nvif_ioctl(object, "perfdom read size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "perfctr query vers %d iter %08x\n",
- args->v0.version, args->v0.iter);
- di = (args->v0.iter & 0xff000000) >> 24;
- si = (args->v0.iter & 0x00ffffff) - 1;
+ nvif_ioctl(object, "perfdom read vers %d\n", args->v0.version);
} else
return ret;
- list_for_each_entry(chk, &ppm->domains, head) {
- if (tmp++ == di) {
- dom = chk;
- break;
- }
+ for (i = 0; i < 4; i++) {
+ if (dom->ctr[i])
+ dom->func->read(pm, dom, dom->ctr[i]);
}
- if (dom == NULL || si >= (int)dom->signal_nr)
- return -EINVAL;
+ if (!dom->clk)
+ return -EAGAIN;
- if (si >= 0) {
- if (raw || !(name = dom->signal[si].name)) {
- snprintf(args->v0.name, sizeof(args->v0.name),
- "/%s/%02x", dom->name, si);
- } else {
- strncpy(args->v0.name, name, sizeof(args->v0.name));
+ for (i = 0; i < 4; i++)
+ if (dom->ctr[i])
+ args->v0.ctr[i] = dom->ctr[i]->ctr;
+ args->v0.clk = dom->clk;
+ return 0;
+}
+
+static int
+nvkm_perfdom_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+ struct nvkm_perfdom *dom = nvkm_perfdom(object);
+ switch (mthd) {
+ case NVIF_PERFDOM_V0_INIT:
+ return nvkm_perfdom_init(dom, data, size);
+ case NVIF_PERFDOM_V0_SAMPLE:
+ return nvkm_perfdom_sample(dom, data, size);
+ case NVIF_PERFDOM_V0_READ:
+ return nvkm_perfdom_read(dom, data, size);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static void *
+nvkm_perfdom_dtor(struct nvkm_object *object)
+{
+ struct nvkm_perfdom *dom = nvkm_perfdom(object);
+ struct nvkm_pm *pm = dom->perfmon->pm;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ struct nvkm_perfctr *ctr = dom->ctr[i];
+ if (ctr) {
+ nvkm_perfsrc_disable(pm, ctr);
+ if (ctr->head.next)
+ list_del(&ctr->head);
}
+ kfree(ctr);
}
- do {
- while (++si < dom->signal_nr) {
- if (all || dom->signal[si].name) {
- args->v0.iter = (di << 24) | ++si;
- return 0;
- }
+ return dom;
+}
+
+static int
+nvkm_perfctr_new(struct nvkm_perfdom *dom, int slot, u8 domain,
+ struct nvkm_perfsig *signal[4], u64 source[4][8],
+ u16 logic_op, struct nvkm_perfctr **pctr)
+{
+ struct nvkm_perfctr *ctr;
+ int i, j;
+
+ if (!dom)
+ return -EINVAL;
+
+ ctr = *pctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
+ if (!ctr)
+ return -ENOMEM;
+
+ ctr->domain = domain;
+ ctr->logic_op = logic_op;
+ ctr->slot = slot;
+ for (i = 0; i < 4; i++) {
+ if (signal[i]) {
+ ctr->signal[i] = signal[i] - dom->signal;
+ for (j = 0; j < 8; j++)
+ ctr->source[i][j] = source[i][j];
}
- si = -1;
- di = di + 1;
- dom = list_entry(dom->head.next, typeof(*dom), head);
- } while (&dom->head != &ppm->domains);
+ }
+ list_add_tail(&ctr->head, &dom->list);
- args->v0.iter = 0xffffffff;
return 0;
}
+static const struct nvkm_object_func
+nvkm_perfdom = {
+ .dtor = nvkm_perfdom_dtor,
+ .mthd = nvkm_perfdom_mthd,
+};
+
static int
-nvkm_perfctr_sample(struct nvkm_object *object, void *data, u32 size)
+nvkm_perfdom_new_(struct nvkm_perfmon *perfmon,
+ const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
{
union {
- struct nvif_perfctr_sample none;
+ struct nvif_perfdom_v0 v0;
} *args = data;
- struct nvkm_pm *ppm = (void *)object->engine;
- struct nvkm_perfctr *ctr, *tmp;
+ struct nvkm_pm *pm = perfmon->pm;
+ struct nvkm_object *parent = oclass->parent;
+ struct nvkm_perfdom *sdom = NULL;
+ struct nvkm_perfctr *ctr[4] = {};
struct nvkm_perfdom *dom;
+ int c, s, m;
int ret;
- nv_ioctl(object, "perfctr sample size %d\n", size);
- if (nvif_unvers(args->none)) {
- nv_ioctl(object, "perfctr sample\n");
+ nvif_ioctl(parent, "create perfdom size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(parent, "create perfdom vers %d dom %d mode %02x\n",
+ args->v0.version, args->v0.domain, args->v0.mode);
} else
return ret;
- ppm->sequence++;
-
- list_for_each_entry(dom, &ppm->domains, head) {
- /* sample previous batch of counters */
- if (dom->quad != QUAD_MASK) {
- dom->func->next(ppm, dom);
- tmp = NULL;
- while (!list_empty(&dom->list)) {
- ctr = list_first_entry(&dom->list,
- typeof(*ctr), head);
- if (ctr->slot < 0) break;
- if ( tmp && tmp == ctr) break;
- if (!tmp) tmp = ctr;
- dom->func->read(ppm, dom, ctr);
- ctr->slot = -1;
- list_move_tail(&ctr->head, &dom->list);
- }
- }
- dom->quad = QUAD_MASK;
-
- /* setup next batch of counters for sampling */
- list_for_each_entry(ctr, &dom->list, head) {
- ctr->slot = ffs(dom->quad) - 1;
- if (ctr->slot < 0)
- break;
- dom->quad &= ~(QUAD_FREE << ctr->slot);
- dom->func->init(ppm, dom, ctr);
+ for (c = 0; c < ARRAY_SIZE(args->v0.ctr); c++) {
+ struct nvkm_perfsig *sig[4] = {};
+ u64 src[4][8] = {};
+
+ for (s = 0; s < ARRAY_SIZE(args->v0.ctr[c].signal); s++) {
+ sig[s] = nvkm_perfsig_find(pm, args->v0.domain,
+ args->v0.ctr[c].signal[s],
+ &sdom);
+ if (args->v0.ctr[c].signal[s] && !sig[s])
+ return -EINVAL;
+
+ for (m = 0; m < 8; m++) {
+ src[s][m] = args->v0.ctr[c].source[s][m];
+ if (src[s][m] && !nvkm_perfsrc_find(pm, sig[s],
+ src[s][m]))
+ return -EINVAL;
+ }
}
- if (dom->quad != QUAD_MASK)
- dom->func->next(ppm, dom);
+ ret = nvkm_perfctr_new(sdom, c, args->v0.domain, sig, src,
+ args->v0.ctr[c].logic_op, &ctr[c]);
+ if (ret)
+ return ret;
}
+ if (!sdom)
+ return -EINVAL;
+
+ if (!(dom = kzalloc(sizeof(*dom), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nvkm_perfdom, oclass, &dom->object);
+ dom->perfmon = perfmon;
+ *pobject = &dom->object;
+
+ dom->func = sdom->func;
+ dom->addr = sdom->addr;
+ dom->mode = args->v0.mode;
+ for (c = 0; c < ARRAY_SIZE(ctr); c++)
+ dom->ctr[c] = ctr[c];
return 0;
}
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
static int
-nvkm_perfctr_read(struct nvkm_object *object, void *data, u32 size)
+nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
+ void *data, u32 size)
{
union {
- struct nvif_perfctr_read_v0 v0;
+ struct nvif_perfmon_query_domain_v0 v0;
} *args = data;
- struct nvkm_perfctr *ctr = (void *)object;
- int ret;
+ struct nvkm_object *object = &perfmon->object;
+ struct nvkm_pm *pm = perfmon->pm;
+ struct nvkm_perfdom *dom;
+ u8 domain_nr;
+ int di, ret;
- nv_ioctl(object, "perfctr read size %d\n", size);
+ nvif_ioctl(object, "perfmon query domain size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(object, "perfctr read vers %d\n", args->v0.version);
+ nvif_ioctl(object, "perfmon domain vers %d iter %02x\n",
+ args->v0.version, args->v0.iter);
+ di = (args->v0.iter & 0xff) - 1;
} else
return ret;
- if (!ctr->clk)
- return -EAGAIN;
+ domain_nr = nvkm_pm_count_perfdom(pm);
+ if (di >= (int)domain_nr)
+ return -EINVAL;
+
+ if (di >= 0) {
+ dom = nvkm_perfdom_find(pm, di);
+ if (dom == NULL)
+ return -EINVAL;
+
+ args->v0.id = di;
+ args->v0.signal_nr = nvkm_perfdom_count_perfsig(dom);
+ strncpy(args->v0.name, dom->name, sizeof(args->v0.name));
+
+ /* Currently only global counters (PCOUNTER) are implemented
+ * but this will be different for local counters (MP). */
+ args->v0.counter_nr = 4;
+ }
- args->v0.clk = ctr->clk;
- args->v0.ctr = ctr->ctr;
+ if (++di < domain_nr) {
+ args->v0.iter = ++di;
+ return 0;
+ }
+
+ args->v0.iter = 0xff;
return 0;
}
static int
-nvkm_perfctr_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
+ void *data, u32 size)
{
- switch (mthd) {
- case NVIF_PERFCTR_V0_QUERY:
- return nvkm_perfctr_query(object, data, size);
- case NVIF_PERFCTR_V0_SAMPLE:
- return nvkm_perfctr_sample(object, data, size);
- case NVIF_PERFCTR_V0_READ:
- return nvkm_perfctr_read(object, data, size);
- default:
- break;
+ union {
+ struct nvif_perfmon_query_signal_v0 v0;
+ } *args = data;
+ struct nvkm_object *object = &perfmon->object;
+ struct nvkm_pm *pm = perfmon->pm;
+ struct nvkm_device *device = pm->engine.subdev.device;
+ struct nvkm_perfdom *dom;
+ struct nvkm_perfsig *sig;
+ const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
+ const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
+ int ret, si;
+
+ nvif_ioctl(object, "perfmon query signal size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nvif_ioctl(object,
+ "perfmon query signal vers %d dom %d iter %04x\n",
+ args->v0.version, args->v0.domain, args->v0.iter);
+ si = (args->v0.iter & 0xffff) - 1;
+ } else
+ return ret;
+
+ dom = nvkm_perfdom_find(pm, args->v0.domain);
+ if (dom == NULL || si >= (int)dom->signal_nr)
+ return -EINVAL;
+
+ if (si >= 0) {
+ sig = &dom->signal[si];
+ if (raw || !sig->name) {
+ snprintf(args->v0.name, sizeof(args->v0.name),
+ "/%s/%02x", dom->name, si);
+ } else {
+ strncpy(args->v0.name, sig->name,
+ sizeof(args->v0.name));
+ }
+
+ args->v0.signal = si;
+ args->v0.source_nr = nvkm_perfsig_count_perfsrc(sig);
}
- return -EINVAL;
-}
-static void
-nvkm_perfctr_dtor(struct nvkm_object *object)
-{
- struct nvkm_perfctr *ctr = (void *)object;
- if (ctr->head.next)
- list_del(&ctr->head);
- nvkm_object_destroy(&ctr->base);
+ while (++si < dom->signal_nr) {
+ if (all || dom->signal[si].name) {
+ args->v0.iter = ++si;
+ return 0;
+ }
+ }
+
+ args->v0.iter = 0xffff;
+ return 0;
}
static int
-nvkm_perfctr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
+ void *data, u32 size)
{
union {
- struct nvif_perfctr_v0 v0;
+ struct nvif_perfmon_query_source_v0 v0;
} *args = data;
- struct nvkm_pm *ppm = (void *)engine;
+ struct nvkm_object *object = &perfmon->object;
+ struct nvkm_pm *pm = perfmon->pm;
struct nvkm_perfdom *dom = NULL;
- struct nvkm_perfsig *sig[4] = {};
- struct nvkm_perfctr *ctr;
- int ret, i;
+ struct nvkm_perfsig *sig;
+ struct nvkm_perfsrc *src;
+ u8 source_nr = 0;
+ int si, ret;
- nv_ioctl(parent, "create perfctr size %d\n", size);
+ nvif_ioctl(object, "perfmon query source size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
- nv_ioctl(parent, "create perfctr vers %d logic_op %04x\n",
- args->v0.version, args->v0.logic_op);
+ nvif_ioctl(object,
+ "perfmon source vers %d dom %d sig %02x iter %02x\n",
+ args->v0.version, args->v0.domain, args->v0.signal,
+ args->v0.iter);
+ si = (args->v0.iter & 0xff) - 1;
} else
return ret;
- for (i = 0; i < ARRAY_SIZE(args->v0.name) && args->v0.name[i][0]; i++) {
- sig[i] = nvkm_perfsig_find(ppm, args->v0.name[i],
- strnlen(args->v0.name[i],
- sizeof(args->v0.name[i])),
- &dom);
- if (!sig[i])
+ sig = nvkm_perfsig_find(pm, args->v0.domain, args->v0.signal, &dom);
+ if (!sig)
+ return -EINVAL;
+
+ source_nr = nvkm_perfsig_count_perfsrc(sig);
+ if (si >= (int)source_nr)
+ return -EINVAL;
+
+ if (si >= 0) {
+ src = nvkm_perfsrc_find(pm, sig, sig->source[si]);
+ if (!src)
return -EINVAL;
+
+ args->v0.source = sig->source[si];
+ args->v0.mask = src->mask;
+ strncpy(args->v0.name, src->name, sizeof(args->v0.name));
}
- ret = nvkm_object_create(parent, engine, oclass, 0, &ctr);
- *pobject = nv_object(ctr);
- if (ret)
- return ret;
+ if (++si < source_nr) {
+ args->v0.iter = ++si;
+ return 0;
+ }
- ctr->slot = -1;
- ctr->logic_op = args->v0.logic_op;
- ctr->signal[0] = sig[0];
- ctr->signal[1] = sig[1];
- ctr->signal[2] = sig[2];
- ctr->signal[3] = sig[3];
- if (dom)
- list_add_tail(&ctr->head, &dom->list);
+ args->v0.iter = 0xff;
return 0;
}
-static struct nvkm_ofuncs
-nvkm_perfctr_ofuncs = {
- .ctor = nvkm_perfctr_ctor,
- .dtor = nvkm_perfctr_dtor,
- .init = nvkm_object_init,
- .fini = nvkm_object_fini,
- .mthd = nvkm_perfctr_mthd,
-};
+static int
+nvkm_perfmon_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+ struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
+ switch (mthd) {
+ case NVIF_PERFMON_V0_QUERY_DOMAIN:
+ return nvkm_perfmon_mthd_query_domain(perfmon, data, size);
+ case NVIF_PERFMON_V0_QUERY_SIGNAL:
+ return nvkm_perfmon_mthd_query_signal(perfmon, data, size);
+ case NVIF_PERFMON_V0_QUERY_SOURCE:
+ return nvkm_perfmon_mthd_query_source(perfmon, data, size);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int
+nvkm_perfmon_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_perfmon *perfmon = nvkm_perfmon(oclass->parent);
+ return nvkm_perfdom_new_(perfmon, oclass, data, size, pobject);
+}
+
+static int
+nvkm_perfmon_child_get(struct nvkm_object *object, int index,
+ struct nvkm_oclass *oclass)
+{
+ if (index == 0) {
+ oclass->base.oclass = NVIF_IOCTL_NEW_V0_PERFDOM;
+ oclass->base.minver = 0;
+ oclass->base.maxver = 0;
+ oclass->ctor = nvkm_perfmon_child_new;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void *
+nvkm_perfmon_dtor(struct nvkm_object *object)
+{
+ struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
+ struct nvkm_pm *pm = perfmon->pm;
+ mutex_lock(&pm->engine.subdev.mutex);
+ if (pm->perfmon == &perfmon->object)
+ pm->perfmon = NULL;
+ mutex_unlock(&pm->engine.subdev.mutex);
+ return perfmon;
+}
-struct nvkm_oclass
-nvkm_pm_sclass[] = {
- { .handle = NVIF_IOCTL_NEW_V0_PERFCTR,
- .ofuncs = &nvkm_perfctr_ofuncs,
- },
- {},
+static struct nvkm_object_func
+nvkm_perfmon = {
+ .dtor = nvkm_perfmon_dtor,
+ .mthd = nvkm_perfmon_mthd,
+ .sclass = nvkm_perfmon_child_get,
};
-/*******************************************************************************
- * PPM context
- ******************************************************************************/
-static void
-nvkm_perfctx_dtor(struct nvkm_object *object)
+static int
+nvkm_perfmon_new(struct nvkm_pm *pm, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
{
- struct nvkm_pm *ppm = (void *)object->engine;
- mutex_lock(&nv_subdev(ppm)->mutex);
- nvkm_engctx_destroy(&ppm->context->base);
- ppm->context = NULL;
- mutex_unlock(&nv_subdev(ppm)->mutex);
+ struct nvkm_perfmon *perfmon;
+
+ if (!(perfmon = kzalloc(sizeof(*perfmon), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nvkm_perfmon, oclass, &perfmon->object);
+ perfmon->pm = pm;
+ *pobject = &perfmon->object;
+ return 0;
}
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
static int
-nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nvkm_pm_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
{
- struct nvkm_pm *ppm = (void *)engine;
- struct nvkm_perfctx *ctx;
+ struct nvkm_pm *pm = nvkm_pm(oclass->engine);
int ret;
- ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0, 0, 0, &ctx);
- *pobject = nv_object(ctx);
+ ret = nvkm_perfmon_new(pm, oclass, data, size, pobject);
if (ret)
return ret;
- mutex_lock(&nv_subdev(ppm)->mutex);
- if (ppm->context == NULL)
- ppm->context = ctx;
- mutex_unlock(&nv_subdev(ppm)->mutex);
+ mutex_lock(&pm->engine.subdev.mutex);
+ if (pm->perfmon == NULL)
+ pm->perfmon = *pobject;
+ ret = (pm->perfmon == *pobject) ? 0 : -EBUSY;
+ mutex_unlock(&pm->engine.subdev.mutex);
+ return ret;
+}
- if (ctx != ppm->context)
- return -EBUSY;
+static const struct nvkm_device_oclass
+nvkm_pm_oclass = {
+ .base.oclass = NVIF_IOCTL_NEW_V0_PERFMON,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = nvkm_pm_oclass_new,
+};
- return 0;
+static int
+nvkm_pm_oclass_get(struct nvkm_oclass *oclass, int index,
+ const struct nvkm_device_oclass **class)
+{
+ if (index == 0) {
+ oclass->base = nvkm_pm_oclass.base;
+ *class = &nvkm_pm_oclass;
+ return index;
+ }
+ return 1;
}
-struct nvkm_oclass
-nvkm_pm_cclass = {
- .handle = NV_ENGCTX(PM, 0x00),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nvkm_perfctx_ctor,
- .dtor = nvkm_perfctx_dtor,
- .init = _nvkm_engctx_init,
- .fini = _nvkm_engctx_fini,
- },
-};
+int
+nvkm_perfsrc_new(struct nvkm_pm *pm, struct nvkm_perfsig *sig,
+ const struct nvkm_specsrc *spec)
+{
+ const struct nvkm_specsrc *ssrc;
+ const struct nvkm_specmux *smux;
+ struct nvkm_perfsrc *src;
+ u8 source_nr = 0;
+
+ if (!spec) {
+ /* No sources are defined for this signal. */
+ return 0;
+ }
+
+ ssrc = spec;
+ while (ssrc->name) {
+ smux = ssrc->mux;
+ while (smux->name) {
+ bool found = false;
+ u8 source_id = 0;
+ u32 len;
+
+ list_for_each_entry(src, &pm->sources, head) {
+ if (src->addr == ssrc->addr &&
+ src->shift == smux->shift) {
+ found = true;
+ break;
+ }
+ source_id++;
+ }
+
+ if (!found) {
+ src = kzalloc(sizeof(*src), GFP_KERNEL);
+ if (!src)
+ return -ENOMEM;
+
+ src->addr = ssrc->addr;
+ src->mask = smux->mask;
+ src->shift = smux->shift;
+ src->enable = smux->enable;
+
+ len = strlen(ssrc->name) +
+ strlen(smux->name) + 2;
+ src->name = kzalloc(len, GFP_KERNEL);
+ if (!src->name) {
+ kfree(src);
+ return -ENOMEM;
+ }
+ snprintf(src->name, len, "%s_%s", ssrc->name,
+ smux->name);
+
+ list_add_tail(&src->head, &pm->sources);
+ }
+
+ sig->source[source_nr++] = source_id + 1;
+ smux++;
+ }
+ ssrc++;
+ }
+
+ return 0;
+}
-/*******************************************************************************
- * PPM engine/subdev functions
- ******************************************************************************/
int
-nvkm_perfdom_new(struct nvkm_pm *ppm, const char *name, u32 mask,
+nvkm_perfdom_new(struct nvkm_pm *pm, const char *name, u32 mask,
u32 base, u32 size_unit, u32 size_domain,
const struct nvkm_specdom *spec)
{
const struct nvkm_specdom *sdom;
const struct nvkm_specsig *ssig;
struct nvkm_perfdom *dom;
- int i;
+ int ret, i;
for (i = 0; i == 0 || mask; i++) {
u32 addr = base + (i * size_unit);
@@ -408,16 +791,20 @@ nvkm_perfdom_new(struct nvkm_pm *ppm, const char *name, u32 mask,
"%s/%02x", name, (int)(sdom - spec));
}
- list_add_tail(&dom->head, &ppm->domains);
+ list_add_tail(&dom->head, &pm->domains);
INIT_LIST_HEAD(&dom->list);
dom->func = sdom->func;
dom->addr = addr;
- dom->quad = QUAD_MASK;
dom->signal_nr = sdom->signal_nr;
ssig = (sdom++)->signal;
while (ssig->name) {
- dom->signal[ssig->signal].name = ssig->name;
+ struct nvkm_perfsig *sig =
+ &dom->signal[ssig->signal];
+ sig->name = ssig->name;
+ ret = nvkm_perfsrc_new(pm, sig, ssig->source);
+ if (ret)
+ return ret;
ssig++;
}
@@ -430,47 +817,49 @@ nvkm_perfdom_new(struct nvkm_pm *ppm, const char *name, u32 mask,
return 0;
}
-int
-_nvkm_pm_fini(struct nvkm_object *object, bool suspend)
-{
- struct nvkm_pm *ppm = (void *)object;
- return nvkm_engine_fini(&ppm->base, suspend);
-}
-
-int
-_nvkm_pm_init(struct nvkm_object *object)
+static int
+nvkm_pm_fini(struct nvkm_engine *engine, bool suspend)
{
- struct nvkm_pm *ppm = (void *)object;
- return nvkm_engine_init(&ppm->base);
+ struct nvkm_pm *pm = nvkm_pm(engine);
+ if (pm->func->fini)
+ pm->func->fini(pm);
+ return 0;
}
-void
-_nvkm_pm_dtor(struct nvkm_object *object)
+static void *
+nvkm_pm_dtor(struct nvkm_engine *engine)
{
- struct nvkm_pm *ppm = (void *)object;
- struct nvkm_perfdom *dom, *tmp;
+ struct nvkm_pm *pm = nvkm_pm(engine);
+ struct nvkm_perfdom *dom, *next_dom;
+ struct nvkm_perfsrc *src, *next_src;
- list_for_each_entry_safe(dom, tmp, &ppm->domains, head) {
+ list_for_each_entry_safe(dom, next_dom, &pm->domains, head) {
list_del(&dom->head);
kfree(dom);
}
- nvkm_engine_destroy(&ppm->base);
+ list_for_each_entry_safe(src, next_src, &pm->sources, head) {
+ list_del(&src->head);
+ kfree(src->name);
+ kfree(src);
+ }
+
+ return pm;
}
+static const struct nvkm_engine_func
+nvkm_pm = {
+ .dtor = nvkm_pm_dtor,
+ .fini = nvkm_pm_fini,
+ .base.sclass = nvkm_pm_oclass_get,
+};
+
int
-nvkm_pm_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_pm_ctor(const struct nvkm_pm_func *func, struct nvkm_device *device,
+ int index, struct nvkm_pm *pm)
{
- struct nvkm_pm *ppm;
- int ret;
-
- ret = nvkm_engine_create_(parent, engine, oclass, true, "PPM",
- "pm", length, pobject);
- ppm = *pobject;
- if (ret)
- return ret;
-
- INIT_LIST_HEAD(&ppm->domains);
- return 0;
+ pm->func = func;
+ INIT_LIST_HEAD(&pm->domains);
+ INIT_LIST_HEAD(&pm->sources);
+ return nvkm_engine_ctor(&nvkm_pm, device, index, 0, true, &pm->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c
deleted file mode 100644
index a7a5f3a3c91b..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-static void
-pwr_perfctr_init(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
- struct nvkm_perfctr *ctr)
-{
- u32 mask = 0x00000000;
- u32 ctrl = 0x00000001;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ctr->signal) && ctr->signal[i]; i++)
- mask |= 1 << (ctr->signal[i] - dom->signal);
-
- nv_wr32(ppm, 0x10a504 + (ctr->slot * 0x10), mask);
- nv_wr32(ppm, 0x10a50c + (ctr->slot * 0x10), ctrl);
- nv_wr32(ppm, 0x10a50c + (ppm->last * 0x10), 0x00000003);
-}
-
-static void
-pwr_perfctr_read(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
- struct nvkm_perfctr *ctr)
-{
- ctr->ctr = ppm->pwr[ctr->slot];
- ctr->clk = ppm->pwr[ppm->last];
-}
-
-static void
-pwr_perfctr_next(struct nvkm_pm *ppm, struct nvkm_perfdom *dom)
-{
- int i;
-
- for (i = 0; i <= ppm->last; i++) {
- ppm->pwr[i] = nv_rd32(ppm, 0x10a508 + (i * 0x10));
- nv_wr32(ppm, 0x10a508 + (i * 0x10), 0x80000000);
- }
-}
-
-static const struct nvkm_funcdom
-pwr_perfctr_func = {
- .init = pwr_perfctr_init,
- .read = pwr_perfctr_read,
- .next = pwr_perfctr_next,
-};
-
-const struct nvkm_specdom
-gt215_pm_pwr[] = {
- { 0x20, (const struct nvkm_specsig[]) {
- { 0x00, "pwr_gr_idle" },
- { 0x04, "pwr_bsp_idle" },
- { 0x05, "pwr_vp_idle" },
- { 0x06, "pwr_ppp_idle" },
- { 0x13, "pwr_ce0_idle" },
- {}
- }, &pwr_perfctr_func },
- {}
-};
-
-const struct nvkm_specdom
-gf100_pm_pwr[] = {
- { 0x20, (const struct nvkm_specsig[]) {
- { 0x00, "pwr_gr_idle" },
- { 0x04, "pwr_bsp_idle" },
- { 0x05, "pwr_vp_idle" },
- { 0x06, "pwr_ppp_idle" },
- { 0x13, "pwr_ce0_idle" },
- { 0x14, "pwr_ce1_idle" },
- {}
- }, &pwr_perfctr_func },
- {}
-};
-
-const struct nvkm_specdom
-gk104_pm_pwr[] = {
- { 0x20, (const struct nvkm_specsig[]) {
- { 0x00, "pwr_gr_idle" },
- { 0x04, "pwr_bsp_idle" },
- { 0x05, "pwr_vp_idle" },
- { 0x06, "pwr_ppp_idle" },
- { 0x13, "pwr_ce0_idle" },
- { 0x14, "pwr_ce1_idle" },
- { 0x15, "pwr_ce2_idle" },
- {}
- }, &pwr_perfctr_func },
- {}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
index d54c6705ba17..6e441ddafd86 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
@@ -23,15 +23,121 @@
*/
#include "nv40.h"
+const struct nvkm_specsrc
+g84_vfetch_sources[] = {
+ { 0x400c0c, (const struct nvkm_specmux[]) {
+ { 0x3, 0, "unk0" },
+ {}
+ }, "pgraph_vfetch_unk0c" },
+ {}
+};
+
+static const struct nvkm_specsrc
+g84_prop_sources[] = {
+ { 0x408e50, (const struct nvkm_specmux[]) {
+ { 0x1f, 0, "sel", true },
+ {}
+ }, "pgraph_tpc0_prop_pm_mux" },
+ {}
+};
+
+static const struct nvkm_specsrc
+g84_crop_sources[] = {
+ { 0x407008, (const struct nvkm_specmux[]) {
+ { 0xf, 0, "sel0", true },
+ { 0x7, 16, "sel1", true },
+ {}
+ }, "pgraph_rop0_crop_pm_mux" },
+ {}
+};
+
+static const struct nvkm_specsrc
+g84_tex_sources[] = {
+ { 0x408808, (const struct nvkm_specmux[]) {
+ { 0xfffff, 0, "unk0" },
+ {}
+ }, "pgraph_tpc0_tex_unk08" },
+ {}
+};
+
static const struct nvkm_specdom
g84_pm[] = {
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
+ { 0xf0, (const struct nvkm_specsig[]) {
+ { 0xbd, "pc01_gr_idle" },
+ { 0x5e, "pc01_strmout_00" },
+ { 0x5f, "pc01_strmout_01" },
+ { 0xd2, "pc01_trast_00" },
+ { 0xd3, "pc01_trast_01" },
+ { 0xd4, "pc01_trast_02" },
+ { 0xd5, "pc01_trast_03" },
+ { 0xd8, "pc01_trast_04" },
+ { 0xd9, "pc01_trast_05" },
+ { 0x5c, "pc01_vattr_00" },
+ { 0x5d, "pc01_vattr_01" },
+ { 0x66, "pc01_vfetch_00", g84_vfetch_sources },
+ { 0x67, "pc01_vfetch_01", g84_vfetch_sources },
+ { 0x68, "pc01_vfetch_02", g84_vfetch_sources },
+ { 0x69, "pc01_vfetch_03", g84_vfetch_sources },
+ { 0x6a, "pc01_vfetch_04", g84_vfetch_sources },
+ { 0x6b, "pc01_vfetch_05", g84_vfetch_sources },
+ { 0x6c, "pc01_vfetch_06", g84_vfetch_sources },
+ { 0x6d, "pc01_vfetch_07", g84_vfetch_sources },
+ { 0x6e, "pc01_vfetch_08", g84_vfetch_sources },
+ { 0x6f, "pc01_vfetch_09", g84_vfetch_sources },
+ { 0x70, "pc01_vfetch_0a", g84_vfetch_sources },
+ { 0x71, "pc01_vfetch_0b", g84_vfetch_sources },
+ { 0x72, "pc01_vfetch_0c", g84_vfetch_sources },
+ { 0x73, "pc01_vfetch_0d", g84_vfetch_sources },
+ { 0x74, "pc01_vfetch_0e", g84_vfetch_sources },
+ { 0x75, "pc01_vfetch_0f", g84_vfetch_sources },
+ { 0x76, "pc01_vfetch_10", g84_vfetch_sources },
+ { 0x77, "pc01_vfetch_11", g84_vfetch_sources },
+ { 0x78, "pc01_vfetch_12", g84_vfetch_sources },
+ { 0x79, "pc01_vfetch_13", g84_vfetch_sources },
+ { 0x7a, "pc01_vfetch_14", g84_vfetch_sources },
+ { 0x7b, "pc01_vfetch_15", g84_vfetch_sources },
+ { 0x7c, "pc01_vfetch_16", g84_vfetch_sources },
+ { 0x7d, "pc01_vfetch_17", g84_vfetch_sources },
+ { 0x7e, "pc01_vfetch_18", g84_vfetch_sources },
+ { 0x7f, "pc01_vfetch_19", g84_vfetch_sources },
+ { 0x07, "pc01_zcull_00", nv50_zcull_sources },
+ { 0x08, "pc01_zcull_01", nv50_zcull_sources },
+ { 0x09, "pc01_zcull_02", nv50_zcull_sources },
+ { 0x0a, "pc01_zcull_03", nv50_zcull_sources },
+ { 0x0b, "pc01_zcull_04", nv50_zcull_sources },
+ { 0x0c, "pc01_zcull_05", nv50_zcull_sources },
+ { 0xa4, "pc01_unk00" },
+ { 0xec, "pc01_trailer" },
{}
}, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
+ { 0xa0, (const struct nvkm_specsig[]) {
+ { 0x30, "pc02_crop_00", g84_crop_sources },
+ { 0x31, "pc02_crop_01", g84_crop_sources },
+ { 0x32, "pc02_crop_02", g84_crop_sources },
+ { 0x33, "pc02_crop_03", g84_crop_sources },
+ { 0x00, "pc02_prop_00", g84_prop_sources },
+ { 0x01, "pc02_prop_01", g84_prop_sources },
+ { 0x02, "pc02_prop_02", g84_prop_sources },
+ { 0x03, "pc02_prop_03", g84_prop_sources },
+ { 0x04, "pc02_prop_04", g84_prop_sources },
+ { 0x05, "pc02_prop_05", g84_prop_sources },
+ { 0x06, "pc02_prop_06", g84_prop_sources },
+ { 0x07, "pc02_prop_07", g84_prop_sources },
+ { 0x48, "pc02_tex_00", g84_tex_sources },
+ { 0x49, "pc02_tex_01", g84_tex_sources },
+ { 0x4a, "pc02_tex_02", g84_tex_sources },
+ { 0x4b, "pc02_tex_03", g84_tex_sources },
+ { 0x1a, "pc02_tex_04", g84_tex_sources },
+ { 0x1b, "pc02_tex_05", g84_tex_sources },
+ { 0x1c, "pc02_tex_06", g84_tex_sources },
+ { 0x44, "pc02_zrop_00", nv50_zrop_sources },
+ { 0x45, "pc02_zrop_01", nv50_zrop_sources },
+ { 0x46, "pc02_zrop_02", nv50_zrop_sources },
+ { 0x47, "pc02_zrop_03", nv50_zrop_sources },
+ { 0x8c, "pc02_trailer" },
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
@@ -52,14 +158,8 @@ g84_pm[] = {
{}
};
-struct nvkm_oclass *
-g84_pm_oclass = &(struct nv40_pm_oclass) {
- .base.handle = NV_ENGINE(PM, 0x84),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv40_pm_ctor,
- .dtor = _nvkm_pm_dtor,
- .init = _nvkm_pm_init,
- .fini = _nvkm_pm_fini,
- },
- .doms = g84_pm,
-}.base;
+int
+g84_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+ return nv40_pm_new_(g84_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
index 008fed73dd82..d2901e9a7808 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
@@ -23,62 +23,146 @@
*/
#include "gf100.h"
+const struct nvkm_specsrc
+gf100_pbfb_sources[] = {
+ { 0x10f100, (const struct nvkm_specmux[]) {
+ { 0x1, 0, "unk0" },
+ { 0x3f, 4, "unk4" },
+ {}
+ }, "pbfb_broadcast_pm_unk100" },
+ {}
+};
+
+const struct nvkm_specsrc
+gf100_pmfb_sources[] = {
+ { 0x140028, (const struct nvkm_specmux[]) {
+ { 0x3fff, 0, "unk0" },
+ { 0x7, 16, "unk16" },
+ { 0x3, 24, "unk24" },
+ { 0x2, 29, "unk29" },
+ {}
+ }, "pmfb0_pm_unk28" },
+ {}
+};
+
+static const struct nvkm_specsrc
+gf100_l1_sources[] = {
+ { 0x5044a8, (const struct nvkm_specmux[]) {
+ { 0x3f, 0, "sel", true },
+ {}
+ }, "pgraph_gpc0_tpc0_l1_pm_mux" },
+ {}
+};
+
+static const struct nvkm_specsrc
+gf100_tex_sources[] = {
+ { 0x5042c0, (const struct nvkm_specmux[]) {
+ { 0xf, 0, "sel0", true },
+ { 0x7, 8, "sel1", true },
+ {}
+ }, "pgraph_gpc0_tpc0_tex_pm_mux_c_d" },
+ {}
+};
+
+static const struct nvkm_specsrc
+gf100_unk400_sources[] = {
+ { 0x50440c, (const struct nvkm_specmux[]) {
+ { 0x3f, 0, "sel", true },
+ {}
+ }, "pgraph_gpc0_tpc0_unk400_pm_mux" },
+ {}
+};
+
static const struct nvkm_specdom
gf100_pm_hub[] = {
{}
};
-static const struct nvkm_specdom
+const struct nvkm_specdom
gf100_pm_gpc[] = {
+ { 0xe0, (const struct nvkm_specsig[]) {
+ { 0x00, "gpc00_l1_00", gf100_l1_sources },
+ { 0x01, "gpc00_l1_01", gf100_l1_sources },
+ { 0x02, "gpc00_l1_02", gf100_l1_sources },
+ { 0x03, "gpc00_l1_03", gf100_l1_sources },
+ { 0x05, "gpc00_l1_04", gf100_l1_sources },
+ { 0x06, "gpc00_l1_05", gf100_l1_sources },
+ { 0x0a, "gpc00_tex_00", gf100_tex_sources },
+ { 0x0b, "gpc00_tex_01", gf100_tex_sources },
+ { 0x0c, "gpc00_tex_02", gf100_tex_sources },
+ { 0x0d, "gpc00_tex_03", gf100_tex_sources },
+ { 0x0e, "gpc00_tex_04", gf100_tex_sources },
+ { 0x0f, "gpc00_tex_05", gf100_tex_sources },
+ { 0x10, "gpc00_tex_06", gf100_tex_sources },
+ { 0x11, "gpc00_tex_07", gf100_tex_sources },
+ { 0x12, "gpc00_tex_08", gf100_tex_sources },
+ { 0x26, "gpc00_unk400_00", gf100_unk400_sources },
+ {}
+ }, &gf100_perfctr_func },
{}
};
-static const struct nvkm_specdom
+const struct nvkm_specdom
gf100_pm_part[] = {
+ { 0xe0, (const struct nvkm_specsig[]) {
+ { 0x0f, "part00_pbfb_00", gf100_pbfb_sources },
+ { 0x10, "part00_pbfb_01", gf100_pbfb_sources },
+ { 0x21, "part00_pmfb_00", gf100_pmfb_sources },
+ { 0x04, "part00_pmfb_01", gf100_pmfb_sources },
+ { 0x00, "part00_pmfb_02", gf100_pmfb_sources },
+ { 0x02, "part00_pmfb_03", gf100_pmfb_sources },
+ { 0x01, "part00_pmfb_04", gf100_pmfb_sources },
+ { 0x2e, "part00_pmfb_05", gf100_pmfb_sources },
+ { 0x2f, "part00_pmfb_06", gf100_pmfb_sources },
+ { 0x1b, "part00_pmfb_07", gf100_pmfb_sources },
+ { 0x1c, "part00_pmfb_08", gf100_pmfb_sources },
+ { 0x1d, "part00_pmfb_09", gf100_pmfb_sources },
+ { 0x1e, "part00_pmfb_0a", gf100_pmfb_sources },
+ { 0x1f, "part00_pmfb_0b", gf100_pmfb_sources },
+ {}
+ }, &gf100_perfctr_func },
{}
};
static void
-gf100_perfctr_init(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+gf100_perfctr_init(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
struct nvkm_perfctr *ctr)
{
- struct gf100_pm_priv *priv = (void *)ppm;
- struct gf100_pm_cntr *cntr = (void *)ctr;
+ struct nvkm_device *device = pm->engine.subdev.device;
u32 log = ctr->logic_op;
u32 src = 0x00000000;
int i;
- for (i = 0; i < 4 && ctr->signal[i]; i++)
- src |= (ctr->signal[i] - dom->signal) << (i * 8);
+ for (i = 0; i < 4; i++)
+ src |= ctr->signal[i] << (i * 8);
- nv_wr32(priv, dom->addr + 0x09c, 0x00040002);
- nv_wr32(priv, dom->addr + 0x100, 0x00000000);
- nv_wr32(priv, dom->addr + 0x040 + (cntr->base.slot * 0x08), src);
- nv_wr32(priv, dom->addr + 0x044 + (cntr->base.slot * 0x08), log);
+ nvkm_wr32(device, dom->addr + 0x09c, 0x00040002 | (dom->mode << 3));
+ nvkm_wr32(device, dom->addr + 0x100, 0x00000000);
+ nvkm_wr32(device, dom->addr + 0x040 + (ctr->slot * 0x08), src);
+ nvkm_wr32(device, dom->addr + 0x044 + (ctr->slot * 0x08), log);
}
static void
-gf100_perfctr_read(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+gf100_perfctr_read(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
struct nvkm_perfctr *ctr)
{
- struct gf100_pm_priv *priv = (void *)ppm;
- struct gf100_pm_cntr *cntr = (void *)ctr;
-
- switch (cntr->base.slot) {
- case 0: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x08c); break;
- case 1: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x088); break;
- case 2: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x080); break;
- case 3: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x090); break;
+ struct nvkm_device *device = pm->engine.subdev.device;
+
+ switch (ctr->slot) {
+ case 0: ctr->ctr = nvkm_rd32(device, dom->addr + 0x08c); break;
+ case 1: ctr->ctr = nvkm_rd32(device, dom->addr + 0x088); break;
+ case 2: ctr->ctr = nvkm_rd32(device, dom->addr + 0x080); break;
+ case 3: ctr->ctr = nvkm_rd32(device, dom->addr + 0x090); break;
}
- cntr->base.clk = nv_rd32(priv, dom->addr + 0x070);
+ dom->clk = nvkm_rd32(device, dom->addr + 0x070);
}
static void
-gf100_perfctr_next(struct nvkm_pm *ppm, struct nvkm_perfdom *dom)
+gf100_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
{
- struct gf100_pm_priv *priv = (void *)ppm;
- nv_wr32(priv, dom->addr + 0x06c, dom->signal_nr - 0x40 + 0x27);
- nv_wr32(priv, dom->addr + 0x0ec, 0x00000011);
+ struct nvkm_device *device = pm->engine.subdev.device;
+ nvkm_wr32(device, dom->addr + 0x06c, dom->signal_nr - 0x40 + 0x27);
+ nvkm_wr32(device, dom->addr + 0x0ec, 0x00000011);
}
const struct nvkm_funcdom
@@ -88,72 +172,72 @@ gf100_perfctr_func = {
.next = gf100_perfctr_next,
};
-int
-gf100_pm_fini(struct nvkm_object *object, bool suspend)
+static void
+gf100_pm_fini(struct nvkm_pm *pm)
{
- struct gf100_pm_priv *priv = (void *)object;
- nv_mask(priv, 0x000200, 0x10000000, 0x00000000);
- nv_mask(priv, 0x000200, 0x10000000, 0x10000000);
- return nvkm_pm_fini(&priv->base, suspend);
+ struct nvkm_device *device = pm->engine.subdev.device;
+ nvkm_mask(device, 0x000200, 0x10000000, 0x00000000);
+ nvkm_mask(device, 0x000200, 0x10000000, 0x10000000);
}
-static int
-gf100_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static const struct nvkm_pm_func
+gf100_pm_ = {
+ .fini = gf100_pm_fini,
+};
+
+int
+gf100_pm_new_(const struct gf100_pm_func *func, struct nvkm_device *device,
+ int index, struct nvkm_pm **ppm)
{
- struct gf100_pm_priv *priv;
+ struct nvkm_pm *pm;
u32 mask;
int ret;
- ret = nvkm_pm_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ if (!(pm = *ppm = kzalloc(sizeof(*pm), GFP_KERNEL)))
+ return -ENOMEM;
- ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0, gf100_pm_pwr);
+ ret = nvkm_pm_ctor(&gf100_pm_, device, index, pm);
if (ret)
return ret;
/* HUB */
- ret = nvkm_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
- gf100_pm_hub);
+ ret = nvkm_perfdom_new(pm, "hub", 0, 0x1b0000, 0, 0x200,
+ func->doms_hub);
if (ret)
return ret;
/* GPC */
- mask = (1 << nv_rd32(priv, 0x022430)) - 1;
- mask &= ~nv_rd32(priv, 0x022504);
- mask &= ~nv_rd32(priv, 0x022584);
+ mask = (1 << nvkm_rd32(device, 0x022430)) - 1;
+ mask &= ~nvkm_rd32(device, 0x022504);
+ mask &= ~nvkm_rd32(device, 0x022584);
- ret = nvkm_perfdom_new(&priv->base, "gpc", mask, 0x180000,
- 0x1000, 0x200, gf100_pm_gpc);
+ ret = nvkm_perfdom_new(pm, "gpc", mask, 0x180000,
+ 0x1000, 0x200, func->doms_gpc);
if (ret)
return ret;
/* PART */
- mask = (1 << nv_rd32(priv, 0x022438)) - 1;
- mask &= ~nv_rd32(priv, 0x022548);
- mask &= ~nv_rd32(priv, 0x0225c8);
+ mask = (1 << nvkm_rd32(device, 0x022438)) - 1;
+ mask &= ~nvkm_rd32(device, 0x022548);
+ mask &= ~nvkm_rd32(device, 0x0225c8);
- ret = nvkm_perfdom_new(&priv->base, "part", mask, 0x1a0000,
- 0x1000, 0x200, gf100_pm_part);
+ ret = nvkm_perfdom_new(pm, "part", mask, 0x1a0000,
+ 0x1000, 0x200, func->doms_part);
if (ret)
return ret;
- nv_engine(priv)->cclass = &nvkm_pm_cclass;
- nv_engine(priv)->sclass = nvkm_pm_sclass;
- priv->base.last = 7;
return 0;
}
-struct nvkm_oclass
-gf100_pm_oclass = {
- .handle = NV_ENGINE(PM, 0xc0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_pm_ctor,
- .dtor = _nvkm_pm_dtor,
- .init = _nvkm_pm_init,
- .fini = gf100_pm_fini,
- },
+static const struct gf100_pm_func
+gf100_pm = {
+ .doms_gpc = gf100_pm_gpc,
+ .doms_hub = gf100_pm_hub,
+ .doms_part = gf100_pm_part,
};
+
+int
+gf100_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+ return gf100_pm_new_(&gf100_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
index 6a01fc7fec6f..56d0344853ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
@@ -2,14 +2,18 @@
#define __NVKM_PM_NVC0_H__
#include "priv.h"
-struct gf100_pm_priv {
- struct nvkm_pm base;
+struct gf100_pm_func {
+ const struct nvkm_specdom *doms_hub;
+ const struct nvkm_specdom *doms_gpc;
+ const struct nvkm_specdom *doms_part;
};
-struct gf100_pm_cntr {
- struct nvkm_perfctr base;
-};
+int gf100_pm_new_(const struct gf100_pm_func *, struct nvkm_device *,
+ int index, struct nvkm_pm **);
extern const struct nvkm_funcdom gf100_perfctr_func;
-int gf100_pm_fini(struct nvkm_object *, bool);
+extern const struct nvkm_specdom gf100_pm_gpc[];
+
+extern const struct nvkm_specsrc gf100_pbfb_sources[];
+extern const struct nvkm_specsrc gf100_pmfb_sources[];
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
new file mode 100644
index 000000000000..49b24c98a7f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2015 Samuel Pitoiset
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Samuel Pitoiset
+ */
+#include "gf100.h"
+
+static const struct nvkm_specdom
+gf108_pm_hub[] = {
+ {}
+};
+
+static const struct nvkm_specdom
+gf108_pm_part[] = {
+ { 0xe0, (const struct nvkm_specsig[]) {
+ { 0x14, "part00_pbfb_00", gf100_pbfb_sources },
+ { 0x15, "part00_pbfb_01", gf100_pbfb_sources },
+ { 0x20, "part00_pbfb_02", gf100_pbfb_sources },
+ { 0x21, "part00_pbfb_03", gf100_pbfb_sources },
+ { 0x01, "part00_pmfb_00", gf100_pmfb_sources },
+ { 0x04, "part00_pmfb_01", gf100_pmfb_sources },
+ { 0x05, "part00_pmfb_02", gf100_pmfb_sources},
+ { 0x07, "part00_pmfb_03", gf100_pmfb_sources },
+ { 0x0d, "part00_pmfb_04", gf100_pmfb_sources },
+ { 0x12, "part00_pmfb_05", gf100_pmfb_sources },
+ { 0x13, "part00_pmfb_06", gf100_pmfb_sources },
+ { 0x2c, "part00_pmfb_07", gf100_pmfb_sources },
+ { 0x2d, "part00_pmfb_08", gf100_pmfb_sources },
+ { 0x2e, "part00_pmfb_09", gf100_pmfb_sources },
+ { 0x2f, "part00_pmfb_0a", gf100_pmfb_sources },
+ { 0x30, "part00_pmfb_0b", gf100_pmfb_sources },
+ {}
+ }, &gf100_perfctr_func },
+ {}
+};
+
+static const struct gf100_pm_func
+gf108_pm = {
+ .doms_gpc = gf100_pm_gpc,
+ .doms_hub = gf108_pm_hub,
+ .doms_part = gf108_pm_part,
+};
+
+int
+gf108_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+ return gf100_pm_new_(&gf108_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
new file mode 100644
index 000000000000..9170025fc988
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2015 Samuel Pitoiset
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Samuel Pitoiset
+ */
+#include "gf100.h"
+
+static const struct nvkm_specsrc
+gf117_pmfb_sources[] = {
+ { 0x140028, (const struct nvkm_specmux[]) {
+ { 0x3fff, 0, "unk0" },
+ { 0x7, 16, "unk16" },
+ { 0x3, 24, "unk24" },
+ { 0x2, 28, "unk28" },
+ {}
+ }, "pmfb0_pm_unk28" },
+ { 0x14125c, (const struct nvkm_specmux[]) {
+ { 0x3fff, 0, "unk0" },
+ {}
+ }, "pmfb0_subp0_pm_unk25c" },
+ {}
+};
+
+static const struct nvkm_specdom
+gf117_pm_hub[] = {
+ {}
+};
+
+static const struct nvkm_specdom
+gf117_pm_part[] = {
+ { 0xe0, (const struct nvkm_specsig[]) {
+ { 0x00, "part00_pbfb_00", gf100_pbfb_sources },
+ { 0x01, "part00_pbfb_01", gf100_pbfb_sources },
+ { 0x12, "part00_pmfb_00", gf117_pmfb_sources },
+ { 0x15, "part00_pmfb_01", gf117_pmfb_sources },
+ { 0x16, "part00_pmfb_02", gf117_pmfb_sources },
+ { 0x18, "part00_pmfb_03", gf117_pmfb_sources },
+ { 0x1e, "part00_pmfb_04", gf117_pmfb_sources },
+ { 0x23, "part00_pmfb_05", gf117_pmfb_sources },
+ { 0x24, "part00_pmfb_06", gf117_pmfb_sources },
+ { 0x0c, "part00_pmfb_07", gf117_pmfb_sources },
+ { 0x0d, "part00_pmfb_08", gf117_pmfb_sources },
+ { 0x0e, "part00_pmfb_09", gf117_pmfb_sources },
+ { 0x0f, "part00_pmfb_0a", gf117_pmfb_sources },
+ { 0x10, "part00_pmfb_0b", gf117_pmfb_sources },
+ {}
+ }, &gf100_perfctr_func },
+ {}
+};
+
+static const struct gf100_pm_func
+gf117_pm = {
+ .doms_gpc = gf100_pm_gpc,
+ .doms_hub = gf117_pm_hub,
+ .doms_part = gf117_pm_part,
+};
+
+int
+gf117_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+ return gf100_pm_new_(&gf117_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
index 75b9ff3d1a2c..07f946d26ac6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
@@ -23,6 +23,52 @@
*/
#include "gf100.h"
+static const struct nvkm_specsrc
+gk104_pmfb_sources[] = {
+ { 0x140028, (const struct nvkm_specmux[]) {
+ { 0x3fff, 0, "unk0" },
+ { 0x7, 16, "unk16" },
+ { 0x3, 24, "unk24" },
+ { 0x2, 28, "unk28" },
+ {}
+ }, "pmfb0_pm_unk28" },
+ { 0x14125c, (const struct nvkm_specmux[]) {
+ { 0x3fff, 0, "unk0" },
+ {}
+ }, "pmfb0_subp0_pm_unk25c" },
+ { 0x14165c, (const struct nvkm_specmux[]) {
+ { 0x3fff, 0, "unk0" },
+ {}
+ }, "pmfb0_subp1_pm_unk25c" },
+ { 0x141a5c, (const struct nvkm_specmux[]) {
+ { 0x3fff, 0, "unk0" },
+ {}
+ }, "pmfb0_subp2_pm_unk25c" },
+ { 0x141e5c, (const struct nvkm_specmux[]) {
+ { 0x3fff, 0, "unk0" },
+ {}
+ }, "pmfb0_subp3_pm_unk25c" },
+ {}
+};
+
+static const struct nvkm_specsrc
+gk104_tex_sources[] = {
+ { 0x5042c0, (const struct nvkm_specmux[]) {
+ { 0xf, 0, "sel0", true },
+ { 0x7, 8, "sel1", true },
+ {}
+ }, "pgraph_gpc0_tpc0_tex_pm_mux_c_d" },
+ { 0x5042c8, (const struct nvkm_specmux[]) {
+ { 0x1f, 0, "sel", true },
+ {}
+ }, "pgraph_gpc0_tpc0_tex_pm_unkc8" },
+ { 0x5042b8, (const struct nvkm_specmux[]) {
+ { 0xff, 0, "sel", true },
+ {}
+ }, "pgraph_gpc0_tpc0_tex_pm_unkb8" },
+ {}
+};
+
static const struct nvkm_specdom
gk104_pm_hub[] = {
{ 0x60, (const struct nvkm_specsig[]) {
@@ -69,12 +115,51 @@ gk104_pm_gpc[] = {
{ 0xc7, "gpc00_user_0" },
{}
}, &gf100_perfctr_func },
+ { 0x20, (const struct nvkm_specsig[]) {
+ {}
+ }, &gf100_perfctr_func },
+ { 0x20, (const struct nvkm_specsig[]) {
+ { 0x00, "gpc02_tex_00", gk104_tex_sources },
+ { 0x01, "gpc02_tex_01", gk104_tex_sources },
+ { 0x02, "gpc02_tex_02", gk104_tex_sources },
+ { 0x03, "gpc02_tex_03", gk104_tex_sources },
+ { 0x04, "gpc02_tex_04", gk104_tex_sources },
+ { 0x05, "gpc02_tex_05", gk104_tex_sources },
+ { 0x06, "gpc02_tex_06", gk104_tex_sources },
+ { 0x07, "gpc02_tex_07", gk104_tex_sources },
+ { 0x08, "gpc02_tex_08", gk104_tex_sources },
+ { 0x0a, "gpc02_tex_0a", gk104_tex_sources },
+ { 0x0b, "gpc02_tex_0b", gk104_tex_sources },
+ { 0x0d, "gpc02_tex_0c", gk104_tex_sources },
+ { 0x0c, "gpc02_tex_0d", gk104_tex_sources },
+ { 0x0e, "gpc02_tex_0e", gk104_tex_sources },
+ { 0x0f, "gpc02_tex_0f", gk104_tex_sources },
+ { 0x10, "gpc02_tex_10", gk104_tex_sources },
+ { 0x11, "gpc02_tex_11", gk104_tex_sources },
+ { 0x12, "gpc02_tex_12", gk104_tex_sources },
+ {}
+ }, &gf100_perfctr_func },
{}
};
static const struct nvkm_specdom
gk104_pm_part[] = {
{ 0x60, (const struct nvkm_specsig[]) {
+ { 0x00, "part00_pbfb_00", gf100_pbfb_sources },
+ { 0x01, "part00_pbfb_01", gf100_pbfb_sources },
+ { 0x0c, "part00_pmfb_00", gk104_pmfb_sources },
+ { 0x0d, "part00_pmfb_01", gk104_pmfb_sources },
+ { 0x0e, "part00_pmfb_02", gk104_pmfb_sources },
+ { 0x0f, "part00_pmfb_03", gk104_pmfb_sources },
+ { 0x10, "part00_pmfb_04", gk104_pmfb_sources },
+ { 0x12, "part00_pmfb_05", gk104_pmfb_sources },
+ { 0x15, "part00_pmfb_06", gk104_pmfb_sources },
+ { 0x16, "part00_pmfb_07", gk104_pmfb_sources },
+ { 0x18, "part00_pmfb_08", gk104_pmfb_sources },
+ { 0x21, "part00_pmfb_09", gk104_pmfb_sources },
+ { 0x25, "part00_pmfb_0a", gk104_pmfb_sources },
+ { 0x26, "part00_pmfb_0b", gk104_pmfb_sources },
+ { 0x27, "part00_pmfb_0c", gk104_pmfb_sources },
{ 0x47, "part00_user_0" },
{}
}, &gf100_perfctr_func },
@@ -85,64 +170,15 @@ gk104_pm_part[] = {
{}
};
-static int
-gk104_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gf100_pm_priv *priv;
- u32 mask;
- int ret;
-
- ret = nvkm_pm_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- /* PDAEMON */
- ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0, gk104_pm_pwr);
- if (ret)
- return ret;
-
- /* HUB */
- ret = nvkm_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
- gk104_pm_hub);
- if (ret)
- return ret;
-
- /* GPC */
- mask = (1 << nv_rd32(priv, 0x022430)) - 1;
- mask &= ~nv_rd32(priv, 0x022504);
- mask &= ~nv_rd32(priv, 0x022584);
-
- ret = nvkm_perfdom_new(&priv->base, "gpc", mask, 0x180000,
- 0x1000, 0x200, gk104_pm_gpc);
- if (ret)
- return ret;
-
- /* PART */
- mask = (1 << nv_rd32(priv, 0x022438)) - 1;
- mask &= ~nv_rd32(priv, 0x022548);
- mask &= ~nv_rd32(priv, 0x0225c8);
-
- ret = nvkm_perfdom_new(&priv->base, "part", mask, 0x1a0000,
- 0x1000, 0x200, gk104_pm_part);
- if (ret)
- return ret;
+static const struct gf100_pm_func
+gk104_pm = {
+ .doms_gpc = gk104_pm_gpc,
+ .doms_hub = gk104_pm_hub,
+ .doms_part = gk104_pm_part,
+};
- nv_engine(priv)->cclass = &nvkm_pm_cclass;
- nv_engine(priv)->sclass = nvkm_pm_sclass;
- priv->base.last = 7;
- return 0;
+int
+gk104_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+ return gf100_pm_new_(&gk104_pm, device, index, ppm);
}
-
-struct nvkm_oclass
-gk104_pm_oclass = {
- .handle = NV_ENGINE(PM, 0xe0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk104_pm_ctor,
- .dtor = _nvkm_pm_dtor,
- .init = _nvkm_pm_init,
- .fini = gf100_pm_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
new file mode 100644
index 000000000000..5cf5dd536fd0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2015 Nouveau project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Samuel Pitoiset
+ */
+#include "nv40.h"
+
+const struct nvkm_specsrc
+gt200_crop_sources[] = {
+ { 0x407008, (const struct nvkm_specmux[]) {
+ { 0xf, 0, "sel0", true },
+ { 0x1f, 16, "sel1", true },
+ {}
+ }, "pgraph_rop0_crop_pm_mux" },
+ {}
+};
+
+const struct nvkm_specsrc
+gt200_prop_sources[] = {
+ { 0x408750, (const struct nvkm_specmux[]) {
+ { 0x3f, 0, "sel", true },
+ {}
+ }, "pgraph_tpc0_prop_pm_mux" },
+ {}
+};
+
+const struct nvkm_specsrc
+gt200_tex_sources[] = {
+ { 0x408508, (const struct nvkm_specmux[]) {
+ { 0xfffff, 0, "unk0" },
+ {}
+ }, "pgraph_tpc0_tex_unk08" },
+ {}
+};
+
+static const struct nvkm_specdom
+gt200_pm[] = {
+ { 0x20, (const struct nvkm_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0xf0, (const struct nvkm_specsig[]) {
+ { 0xc9, "pc01_gr_idle" },
+ { 0x84, "pc01_strmout_00" },
+ { 0x85, "pc01_strmout_01" },
+ { 0xde, "pc01_trast_00" },
+ { 0xdf, "pc01_trast_01" },
+ { 0xe0, "pc01_trast_02" },
+ { 0xe1, "pc01_trast_03" },
+ { 0xe4, "pc01_trast_04" },
+ { 0xe5, "pc01_trast_05" },
+ { 0x82, "pc01_vattr_00" },
+ { 0x83, "pc01_vattr_01" },
+ { 0x46, "pc01_vfetch_00", g84_vfetch_sources },
+ { 0x47, "pc01_vfetch_01", g84_vfetch_sources },
+ { 0x48, "pc01_vfetch_02", g84_vfetch_sources },
+ { 0x49, "pc01_vfetch_03", g84_vfetch_sources },
+ { 0x4a, "pc01_vfetch_04", g84_vfetch_sources },
+ { 0x4b, "pc01_vfetch_05", g84_vfetch_sources },
+ { 0x4c, "pc01_vfetch_06", g84_vfetch_sources },
+ { 0x4d, "pc01_vfetch_07", g84_vfetch_sources },
+ { 0x4e, "pc01_vfetch_08", g84_vfetch_sources },
+ { 0x4f, "pc01_vfetch_09", g84_vfetch_sources },
+ { 0x50, "pc01_vfetch_0a", g84_vfetch_sources },
+ { 0x51, "pc01_vfetch_0b", g84_vfetch_sources },
+ { 0x52, "pc01_vfetch_0c", g84_vfetch_sources },
+ { 0x53, "pc01_vfetch_0d", g84_vfetch_sources },
+ { 0x54, "pc01_vfetch_0e", g84_vfetch_sources },
+ { 0x55, "pc01_vfetch_0f", g84_vfetch_sources },
+ { 0x56, "pc01_vfetch_10", g84_vfetch_sources },
+ { 0x57, "pc01_vfetch_11", g84_vfetch_sources },
+ { 0x58, "pc01_vfetch_12", g84_vfetch_sources },
+ { 0x59, "pc01_vfetch_13", g84_vfetch_sources },
+ { 0x5a, "pc01_vfetch_14", g84_vfetch_sources },
+ { 0x5b, "pc01_vfetch_15", g84_vfetch_sources },
+ { 0x5c, "pc01_vfetch_16", g84_vfetch_sources },
+ { 0x5d, "pc01_vfetch_17", g84_vfetch_sources },
+ { 0x5e, "pc01_vfetch_18", g84_vfetch_sources },
+ { 0x5f, "pc01_vfetch_19", g84_vfetch_sources },
+ { 0x07, "pc01_zcull_00", nv50_zcull_sources },
+ { 0x08, "pc01_zcull_01", nv50_zcull_sources },
+ { 0x09, "pc01_zcull_02", nv50_zcull_sources },
+ { 0x0a, "pc01_zcull_03", nv50_zcull_sources },
+ { 0x0b, "pc01_zcull_04", nv50_zcull_sources },
+ { 0x0c, "pc01_zcull_05", nv50_zcull_sources },
+
+ { 0xb0, "pc01_unk00" },
+ { 0xec, "pc01_trailer" },
+ {}
+ }, &nv40_perfctr_func },
+ { 0xf0, (const struct nvkm_specsig[]) {
+ { 0x55, "pc02_crop_00", gt200_crop_sources },
+ { 0x56, "pc02_crop_01", gt200_crop_sources },
+ { 0x57, "pc02_crop_02", gt200_crop_sources },
+ { 0x58, "pc02_crop_03", gt200_crop_sources },
+ { 0x00, "pc02_prop_00", gt200_prop_sources },
+ { 0x01, "pc02_prop_01", gt200_prop_sources },
+ { 0x02, "pc02_prop_02", gt200_prop_sources },
+ { 0x03, "pc02_prop_03", gt200_prop_sources },
+ { 0x04, "pc02_prop_04", gt200_prop_sources },
+ { 0x05, "pc02_prop_05", gt200_prop_sources },
+ { 0x06, "pc02_prop_06", gt200_prop_sources },
+ { 0x07, "pc02_prop_07", gt200_prop_sources },
+ { 0x78, "pc02_tex_00", gt200_tex_sources },
+ { 0x79, "pc02_tex_01", gt200_tex_sources },
+ { 0x7a, "pc02_tex_02", gt200_tex_sources },
+ { 0x7b, "pc02_tex_03", gt200_tex_sources },
+ { 0x32, "pc02_tex_04", gt200_tex_sources },
+ { 0x33, "pc02_tex_05", gt200_tex_sources },
+ { 0x34, "pc02_tex_06", gt200_tex_sources },
+ { 0x74, "pc02_zrop_00", nv50_zrop_sources },
+ { 0x75, "pc02_zrop_01", nv50_zrop_sources },
+ { 0x76, "pc02_zrop_02", nv50_zrop_sources },
+ { 0x77, "pc02_zrop_03", nv50_zrop_sources },
+ { 0xec, "pc02_trailer" },
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nvkm_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nvkm_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nvkm_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nvkm_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ { 0x20, (const struct nvkm_specsig[]) {
+ {}
+ }, &nv40_perfctr_func },
+ {}
+};
+
+int
+gt200_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+ return nv40_pm_new_(gt200_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
index d065bfc59bbf..c9227ad41b04 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
@@ -23,15 +23,94 @@
*/
#include "nv40.h"
+static const struct nvkm_specsrc
+gt215_zcull_sources[] = {
+ { 0x402ca4, (const struct nvkm_specmux[]) {
+ { 0x7fff, 0, "unk0" },
+ { 0xff, 24, "unk24" },
+ {}
+ }, "pgraph_zcull_pm_unka4" },
+ {}
+};
+
static const struct nvkm_specdom
gt215_pm[] = {
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
+ { 0xf0, (const struct nvkm_specsig[]) {
+ { 0xcb, "pc01_gr_idle" },
+ { 0x86, "pc01_strmout_00" },
+ { 0x87, "pc01_strmout_01" },
+ { 0xe0, "pc01_trast_00" },
+ { 0xe1, "pc01_trast_01" },
+ { 0xe2, "pc01_trast_02" },
+ { 0xe3, "pc01_trast_03" },
+ { 0xe6, "pc01_trast_04" },
+ { 0xe7, "pc01_trast_05" },
+ { 0x84, "pc01_vattr_00" },
+ { 0x85, "pc01_vattr_01" },
+ { 0x46, "pc01_vfetch_00", g84_vfetch_sources },
+ { 0x47, "pc01_vfetch_01", g84_vfetch_sources },
+ { 0x48, "pc01_vfetch_02", g84_vfetch_sources },
+ { 0x49, "pc01_vfetch_03", g84_vfetch_sources },
+ { 0x4a, "pc01_vfetch_04", g84_vfetch_sources },
+ { 0x4b, "pc01_vfetch_05", g84_vfetch_sources },
+ { 0x4c, "pc01_vfetch_06", g84_vfetch_sources },
+ { 0x4d, "pc01_vfetch_07", g84_vfetch_sources },
+ { 0x4e, "pc01_vfetch_08", g84_vfetch_sources },
+ { 0x4f, "pc01_vfetch_09", g84_vfetch_sources },
+ { 0x50, "pc01_vfetch_0a", g84_vfetch_sources },
+ { 0x51, "pc01_vfetch_0b", g84_vfetch_sources },
+ { 0x52, "pc01_vfetch_0c", g84_vfetch_sources },
+ { 0x53, "pc01_vfetch_0d", g84_vfetch_sources },
+ { 0x54, "pc01_vfetch_0e", g84_vfetch_sources },
+ { 0x55, "pc01_vfetch_0f", g84_vfetch_sources },
+ { 0x56, "pc01_vfetch_10", g84_vfetch_sources },
+ { 0x57, "pc01_vfetch_11", g84_vfetch_sources },
+ { 0x58, "pc01_vfetch_12", g84_vfetch_sources },
+ { 0x59, "pc01_vfetch_13", g84_vfetch_sources },
+ { 0x5a, "pc01_vfetch_14", g84_vfetch_sources },
+ { 0x5b, "pc01_vfetch_15", g84_vfetch_sources },
+ { 0x5c, "pc01_vfetch_16", g84_vfetch_sources },
+ { 0x5d, "pc01_vfetch_17", g84_vfetch_sources },
+ { 0x5e, "pc01_vfetch_18", g84_vfetch_sources },
+ { 0x5f, "pc01_vfetch_19", g84_vfetch_sources },
+ { 0x07, "pc01_zcull_00", gt215_zcull_sources },
+ { 0x08, "pc01_zcull_01", gt215_zcull_sources },
+ { 0x09, "pc01_zcull_02", gt215_zcull_sources },
+ { 0x0a, "pc01_zcull_03", gt215_zcull_sources },
+ { 0x0b, "pc01_zcull_04", gt215_zcull_sources },
+ { 0x0c, "pc01_zcull_05", gt215_zcull_sources },
+ { 0xb2, "pc01_unk00" },
+ { 0xec, "pc01_trailer" },
{}
}, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
+ { 0xe0, (const struct nvkm_specsig[]) {
+ { 0x64, "pc02_crop_00", gt200_crop_sources },
+ { 0x65, "pc02_crop_01", gt200_crop_sources },
+ { 0x66, "pc02_crop_02", gt200_crop_sources },
+ { 0x67, "pc02_crop_03", gt200_crop_sources },
+ { 0x00, "pc02_prop_00", gt200_prop_sources },
+ { 0x01, "pc02_prop_01", gt200_prop_sources },
+ { 0x02, "pc02_prop_02", gt200_prop_sources },
+ { 0x03, "pc02_prop_03", gt200_prop_sources },
+ { 0x04, "pc02_prop_04", gt200_prop_sources },
+ { 0x05, "pc02_prop_05", gt200_prop_sources },
+ { 0x06, "pc02_prop_06", gt200_prop_sources },
+ { 0x07, "pc02_prop_07", gt200_prop_sources },
+ { 0x80, "pc02_tex_00", gt200_tex_sources },
+ { 0x81, "pc02_tex_01", gt200_tex_sources },
+ { 0x82, "pc02_tex_02", gt200_tex_sources },
+ { 0x83, "pc02_tex_03", gt200_tex_sources },
+ { 0x3a, "pc02_tex_04", gt200_tex_sources },
+ { 0x3b, "pc02_tex_05", gt200_tex_sources },
+ { 0x3c, "pc02_tex_06", gt200_tex_sources },
+ { 0x7c, "pc02_zrop_00", nv50_zrop_sources },
+ { 0x7d, "pc02_zrop_01", nv50_zrop_sources },
+ { 0x7e, "pc02_zrop_02", nv50_zrop_sources },
+ { 0x7f, "pc02_zrop_03", nv50_zrop_sources },
+ { 0xcc, "pc02_trailer" },
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
@@ -52,32 +131,8 @@ gt215_pm[] = {
{}
};
-static int
-gt215_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **object)
+int
+gt215_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
{
- int ret = nv40_pm_ctor(parent, engine, oclass, data, size, object);
- if (ret == 0) {
- struct nv40_pm_priv *priv = (void *)*object;
- ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
- gt215_pm_pwr);
- if (ret)
- return ret;
-
- priv->base.last = 3;
- }
- return ret;
+ return nv40_pm_new_(gt215_pm, device, index, ppm);
}
-
-struct nvkm_oclass *
-gt215_pm_oclass = &(struct nv40_pm_oclass) {
- .base.handle = NV_ENGINE(PM, 0xa3),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gt215_pm_ctor,
- .dtor = _nvkm_pm_dtor,
- .init = _nvkm_pm_init,
- .fini = _nvkm_pm_fini,
- },
- .doms = gt215_pm,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
index ff22f06b22b8..4bef72a9d106 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
@@ -24,46 +24,44 @@
#include "nv40.h"
static void
-nv40_perfctr_init(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+nv40_perfctr_init(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
struct nvkm_perfctr *ctr)
{
- struct nv40_pm_priv *priv = (void *)ppm;
- struct nv40_pm_cntr *cntr = (void *)ctr;
+ struct nvkm_device *device = pm->engine.subdev.device;
u32 log = ctr->logic_op;
u32 src = 0x00000000;
int i;
- for (i = 0; i < 4 && ctr->signal[i]; i++)
- src |= (ctr->signal[i] - dom->signal) << (i * 8);
+ for (i = 0; i < 4; i++)
+ src |= ctr->signal[i] << (i * 8);
- nv_wr32(priv, 0x00a7c0 + dom->addr, 0x00000001);
- nv_wr32(priv, 0x00a400 + dom->addr + (cntr->base.slot * 0x40), src);
- nv_wr32(priv, 0x00a420 + dom->addr + (cntr->base.slot * 0x40), log);
+ nvkm_wr32(device, 0x00a7c0 + dom->addr, 0x00000001 | (dom->mode << 4));
+ nvkm_wr32(device, 0x00a400 + dom->addr + (ctr->slot * 0x40), src);
+ nvkm_wr32(device, 0x00a420 + dom->addr + (ctr->slot * 0x40), log);
}
static void
-nv40_perfctr_read(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+nv40_perfctr_read(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
struct nvkm_perfctr *ctr)
{
- struct nv40_pm_priv *priv = (void *)ppm;
- struct nv40_pm_cntr *cntr = (void *)ctr;
+ struct nvkm_device *device = pm->engine.subdev.device;
- switch (cntr->base.slot) {
- case 0: cntr->base.ctr = nv_rd32(priv, 0x00a700 + dom->addr); break;
- case 1: cntr->base.ctr = nv_rd32(priv, 0x00a6c0 + dom->addr); break;
- case 2: cntr->base.ctr = nv_rd32(priv, 0x00a680 + dom->addr); break;
- case 3: cntr->base.ctr = nv_rd32(priv, 0x00a740 + dom->addr); break;
+ switch (ctr->slot) {
+ case 0: ctr->ctr = nvkm_rd32(device, 0x00a700 + dom->addr); break;
+ case 1: ctr->ctr = nvkm_rd32(device, 0x00a6c0 + dom->addr); break;
+ case 2: ctr->ctr = nvkm_rd32(device, 0x00a680 + dom->addr); break;
+ case 3: ctr->ctr = nvkm_rd32(device, 0x00a740 + dom->addr); break;
}
- cntr->base.clk = nv_rd32(priv, 0x00a600 + dom->addr);
+ dom->clk = nvkm_rd32(device, 0x00a600 + dom->addr);
}
static void
-nv40_perfctr_next(struct nvkm_pm *ppm, struct nvkm_perfdom *dom)
+nv40_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
{
- struct nv40_pm_priv *priv = (void *)ppm;
- if (priv->sequence != ppm->sequence) {
- nv_wr32(priv, 0x400084, 0x00000020);
- priv->sequence = ppm->sequence;
+ struct nvkm_device *device = pm->engine.subdev.device;
+ if (pm->sequence != pm->sequence) {
+ nvkm_wr32(device, 0x400084, 0x00000020);
+ pm->sequence = pm->sequence;
}
}
@@ -74,6 +72,28 @@ nv40_perfctr_func = {
.next = nv40_perfctr_next,
};
+static const struct nvkm_pm_func
+nv40_pm_ = {
+};
+
+int
+nv40_pm_new_(const struct nvkm_specdom *doms, struct nvkm_device *device,
+ int index, struct nvkm_pm **ppm)
+{
+ struct nv40_pm *pm;
+ int ret;
+
+ if (!(pm = kzalloc(sizeof(*pm), GFP_KERNEL)))
+ return -ENOMEM;
+ *ppm = &pm->base;
+
+ ret = nvkm_pm_ctor(&nv40_pm_, device, index, &pm->base);
+ if (ret)
+ return ret;
+
+ return nvkm_perfdom_new(&pm->base, "pc", 0, 0, 0, 4, doms);
+}
+
static const struct nvkm_specdom
nv40_pm[] = {
{ 0x20, (const struct nvkm_specsig[]) {
@@ -95,36 +115,7 @@ nv40_pm[] = {
};
int
-nv40_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv40_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
{
- struct nv40_pm_oclass *mclass = (void *)oclass;
- struct nv40_pm_priv *priv;
- int ret;
-
- ret = nvkm_pm_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- ret = nvkm_perfdom_new(&priv->base, "pm", 0, 0, 0, 4, mclass->doms);
- if (ret)
- return ret;
-
- nv_engine(priv)->cclass = &nvkm_pm_cclass;
- nv_engine(priv)->sclass = nvkm_pm_sclass;
- return 0;
+ return nv40_pm_new_(nv40_pm, device, index, ppm);
}
-
-struct nvkm_oclass *
-nv40_pm_oclass = &(struct nv40_pm_oclass) {
- .base.handle = NV_ENGINE(PM, 0x40),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv40_pm_ctor,
- .dtor = _nvkm_pm_dtor,
- .init = _nvkm_pm_init,
- .fini = _nvkm_pm_fini,
- },
- .doms = nv40_pm,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
index 2338e150420e..da481abe8f7a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
@@ -1,24 +1,14 @@
#ifndef __NVKM_PM_NV40_H__
#define __NVKM_PM_NV40_H__
+#define nv40_pm(p) container_of((p), struct nv40_pm, base)
#include "priv.h"
-struct nv40_pm_oclass {
- struct nvkm_oclass base;
- const struct nvkm_specdom *doms;
-};
-
-struct nv40_pm_priv {
+struct nv40_pm {
struct nvkm_pm base;
u32 sequence;
};
-int nv40_pm_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *data, u32 size,
- struct nvkm_object **pobject);
-
-struct nv40_pm_cntr {
- struct nvkm_perfctr base;
-};
-
+int nv40_pm_new_(const struct nvkm_specdom *, struct nvkm_device *,
+ int index, struct nvkm_pm **);
extern const struct nvkm_funcdom nv40_perfctr_func;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
index 6af83b5d1b11..cc5a41d4c6f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
@@ -23,35 +23,153 @@
*/
#include "nv40.h"
+const struct nvkm_specsrc
+nv50_zcull_sources[] = {
+ { 0x402ca4, (const struct nvkm_specmux[]) {
+ { 0x7fff, 0, "unk0" },
+ {}
+ }, "pgraph_zcull_pm_unka4" },
+ {}
+};
+
+const struct nvkm_specsrc
+nv50_zrop_sources[] = {
+ { 0x40708c, (const struct nvkm_specmux[]) {
+ { 0xf, 0, "sel0", true },
+ { 0xf, 16, "sel1", true },
+ {}
+ }, "pgraph_rop0_zrop_pm_mux" },
+ {}
+};
+
+static const struct nvkm_specsrc
+nv50_prop_sources[] = {
+ { 0x40be50, (const struct nvkm_specmux[]) {
+ { 0x1f, 0, "sel", true },
+ {}
+ }, "pgraph_tpc3_prop_pm_mux" },
+ {}
+};
+
+static const struct nvkm_specsrc
+nv50_crop_sources[] = {
+ { 0x407008, (const struct nvkm_specmux[]) {
+ { 0x7, 0, "sel0", true },
+ { 0x7, 16, "sel1", true },
+ {}
+ }, "pgraph_rop0_crop_pm_mux" },
+ {}
+};
+
+static const struct nvkm_specsrc
+nv50_tex_sources[] = {
+ { 0x40b808, (const struct nvkm_specmux[]) {
+ { 0x3fff, 0, "unk0" },
+ {}
+ }, "pgraph_tpc3_tex_unk08" },
+ {}
+};
+
+static const struct nvkm_specsrc
+nv50_vfetch_sources[] = {
+ { 0x400c0c, (const struct nvkm_specmux[]) {
+ { 0x1, 0, "unk0" },
+ {}
+ }, "pgraph_vfetch_unk0c" },
+ {}
+};
+
static const struct nvkm_specdom
nv50_pm[] = {
- { 0x040, (const struct nvkm_specsig[]) {
+ { 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
- { 0x100, (const struct nvkm_specsig[]) {
- { 0xc8, "gr_idle" },
+ { 0xf0, (const struct nvkm_specsig[]) {
+ { 0xc8, "pc01_gr_idle" },
+ { 0x7f, "pc01_strmout_00" },
+ { 0x80, "pc01_strmout_01" },
+ { 0xdc, "pc01_trast_00" },
+ { 0xdd, "pc01_trast_01" },
+ { 0xde, "pc01_trast_02" },
+ { 0xdf, "pc01_trast_03" },
+ { 0xe2, "pc01_trast_04" },
+ { 0xe3, "pc01_trast_05" },
+ { 0x7c, "pc01_vattr_00" },
+ { 0x7d, "pc01_vattr_01" },
+ { 0x26, "pc01_vfetch_00", nv50_vfetch_sources },
+ { 0x27, "pc01_vfetch_01", nv50_vfetch_sources },
+ { 0x28, "pc01_vfetch_02", nv50_vfetch_sources },
+ { 0x29, "pc01_vfetch_03", nv50_vfetch_sources },
+ { 0x2a, "pc01_vfetch_04", nv50_vfetch_sources },
+ { 0x2b, "pc01_vfetch_05", nv50_vfetch_sources },
+ { 0x2c, "pc01_vfetch_06", nv50_vfetch_sources },
+ { 0x2d, "pc01_vfetch_07", nv50_vfetch_sources },
+ { 0x2e, "pc01_vfetch_08", nv50_vfetch_sources },
+ { 0x2f, "pc01_vfetch_09", nv50_vfetch_sources },
+ { 0x30, "pc01_vfetch_0a", nv50_vfetch_sources },
+ { 0x31, "pc01_vfetch_0b", nv50_vfetch_sources },
+ { 0x32, "pc01_vfetch_0c", nv50_vfetch_sources },
+ { 0x33, "pc01_vfetch_0d", nv50_vfetch_sources },
+ { 0x34, "pc01_vfetch_0e", nv50_vfetch_sources },
+ { 0x35, "pc01_vfetch_0f", nv50_vfetch_sources },
+ { 0x36, "pc01_vfetch_10", nv50_vfetch_sources },
+ { 0x37, "pc01_vfetch_11", nv50_vfetch_sources },
+ { 0x38, "pc01_vfetch_12", nv50_vfetch_sources },
+ { 0x39, "pc01_vfetch_13", nv50_vfetch_sources },
+ { 0x3a, "pc01_vfetch_14", nv50_vfetch_sources },
+ { 0x3b, "pc01_vfetch_15", nv50_vfetch_sources },
+ { 0x3c, "pc01_vfetch_16", nv50_vfetch_sources },
+ { 0x3d, "pc01_vfetch_17", nv50_vfetch_sources },
+ { 0x3e, "pc01_vfetch_18", nv50_vfetch_sources },
+ { 0x3f, "pc01_vfetch_19", nv50_vfetch_sources },
+ { 0x20, "pc01_zcull_00", nv50_zcull_sources },
+ { 0x21, "pc01_zcull_01", nv50_zcull_sources },
+ { 0x22, "pc01_zcull_02", nv50_zcull_sources },
+ { 0x23, "pc01_zcull_03", nv50_zcull_sources },
+ { 0x24, "pc01_zcull_04", nv50_zcull_sources },
+ { 0x25, "pc01_zcull_05", nv50_zcull_sources },
+ { 0xae, "pc01_unk00" },
+ { 0xee, "pc01_trailer" },
{}
}, &nv40_perfctr_func },
- { 0x100, (const struct nvkm_specsig[]) {
+ { 0xf0, (const struct nvkm_specsig[]) {
+ { 0x52, "pc02_crop_00", nv50_crop_sources },
+ { 0x53, "pc02_crop_01", nv50_crop_sources },
+ { 0x54, "pc02_crop_02", nv50_crop_sources },
+ { 0x55, "pc02_crop_03", nv50_crop_sources },
+ { 0x00, "pc02_prop_00", nv50_prop_sources },
+ { 0x01, "pc02_prop_01", nv50_prop_sources },
+ { 0x02, "pc02_prop_02", nv50_prop_sources },
+ { 0x03, "pc02_prop_03", nv50_prop_sources },
+ { 0x04, "pc02_prop_04", nv50_prop_sources },
+ { 0x05, "pc02_prop_05", nv50_prop_sources },
+ { 0x06, "pc02_prop_06", nv50_prop_sources },
+ { 0x07, "pc02_prop_07", nv50_prop_sources },
+ { 0x70, "pc02_tex_00", nv50_tex_sources },
+ { 0x71, "pc02_tex_01", nv50_tex_sources },
+ { 0x72, "pc02_tex_02", nv50_tex_sources },
+ { 0x73, "pc02_tex_03", nv50_tex_sources },
+ { 0x40, "pc02_tex_04", nv50_tex_sources },
+ { 0x41, "pc02_tex_05", nv50_tex_sources },
+ { 0x42, "pc02_tex_06", nv50_tex_sources },
+ { 0x6c, "pc02_zrop_00", nv50_zrop_sources },
+ { 0x6d, "pc02_zrop_01", nv50_zrop_sources },
+ { 0x6e, "pc02_zrop_02", nv50_zrop_sources },
+ { 0x6f, "pc02_zrop_03", nv50_zrop_sources },
+ { 0xee, "pc02_trailer" },
{}
}, &nv40_perfctr_func },
- { 0x020, (const struct nvkm_specsig[]) {
+ { 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
- { 0x040, (const struct nvkm_specsig[]) {
+ { 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{}
};
-struct nvkm_oclass *
-nv50_pm_oclass = &(struct nv40_pm_oclass) {
- .base.handle = NV_ENGINE(PM, 0x50),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv40_pm_ctor,
- .dtor = _nvkm_pm_dtor,
- .init = _nvkm_pm_init,
- .fini = _nvkm_pm_fini,
- },
- .doms = nv50_pm,
-}.base;
+int
+nv50_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+ return nv40_pm_new_(nv50_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
index 1e6eff2a6d79..d7b81cbf82b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
@@ -1,58 +1,85 @@
#ifndef __NVKM_PM_PRIV_H__
#define __NVKM_PM_PRIV_H__
+#define nvkm_pm(p) container_of((p), struct nvkm_pm, engine)
#include <engine/pm.h>
+int nvkm_pm_ctor(const struct nvkm_pm_func *, struct nvkm_device *,
+ int index, struct nvkm_pm *);
+
+struct nvkm_pm_func {
+ void (*fini)(struct nvkm_pm *);
+};
+
struct nvkm_perfctr {
- struct nvkm_object base;
struct list_head head;
- struct nvkm_perfsig *signal[4];
+ u8 domain;
+ u8 signal[4];
+ u64 source[4][8];
int slot;
u32 logic_op;
- u32 clk;
u32 ctr;
};
-extern struct nvkm_oclass nvkm_pm_sclass[];
+struct nvkm_specmux {
+ u32 mask;
+ u8 shift;
+ const char *name;
+ bool enable;
+};
-#include <core/engctx.h>
+struct nvkm_specsrc {
+ u32 addr;
+ const struct nvkm_specmux *mux;
+ const char *name;
+};
-struct nvkm_perfctx {
- struct nvkm_engctx base;
+struct nvkm_perfsrc {
+ struct list_head head;
+ char *name;
+ u32 addr;
+ u32 mask;
+ u8 shift;
+ bool enable;
};
-extern struct nvkm_oclass nvkm_pm_cclass;
+extern const struct nvkm_specsrc nv50_zcull_sources[];
+extern const struct nvkm_specsrc nv50_zrop_sources[];
+extern const struct nvkm_specsrc g84_vfetch_sources[];
+extern const struct nvkm_specsrc gt200_crop_sources[];
+extern const struct nvkm_specsrc gt200_prop_sources[];
+extern const struct nvkm_specsrc gt200_tex_sources[];
struct nvkm_specsig {
u8 signal;
const char *name;
+ const struct nvkm_specsrc *source;
};
struct nvkm_perfsig {
const char *name;
+ u8 source[8];
};
-struct nvkm_perfdom;
-struct nvkm_perfctr *
-nvkm_perfsig_wrap(struct nvkm_pm *, const char *, struct nvkm_perfdom **);
-
struct nvkm_specdom {
u16 signal_nr;
const struct nvkm_specsig *signal;
const struct nvkm_funcdom *func;
};
-extern const struct nvkm_specdom gt215_pm_pwr[];
-extern const struct nvkm_specdom gf100_pm_pwr[];
-extern const struct nvkm_specdom gk104_pm_pwr[];
+#define nvkm_perfdom(p) container_of((p), struct nvkm_perfdom, object)
struct nvkm_perfdom {
+ struct nvkm_object object;
+ struct nvkm_perfmon *perfmon;
struct list_head head;
struct list_head list;
const struct nvkm_funcdom *func;
+ struct nvkm_perfctr *ctr[4];
char name[32];
u32 addr;
- u8 quad;
- u32 signal_nr;
+ u8 mode;
+ u32 clk;
+ u16 signal_nr;
struct nvkm_perfsig signal[];
};
@@ -67,24 +94,10 @@ struct nvkm_funcdom {
int nvkm_perfdom_new(struct nvkm_pm *, const char *, u32, u32, u32, u32,
const struct nvkm_specdom *);
-#define nvkm_pm_create(p,e,o,d) \
- nvkm_pm_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_pm_dtor(p) ({ \
- struct nvkm_pm *c = (p); \
- _nvkm_pm_dtor(nv_object(c)); \
-})
-#define nvkm_pm_init(p) ({ \
- struct nvkm_pm *c = (p); \
- _nvkm_pm_init(nv_object(c)); \
-})
-#define nvkm_pm_fini(p,s) ({ \
- struct nvkm_pm *c = (p); \
- _nvkm_pm_fini(nv_object(c), (s)); \
-})
-
-int nvkm_pm_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, int, void **);
-void _nvkm_pm_dtor(struct nvkm_object *);
-int _nvkm_pm_init(struct nvkm_object *);
-int _nvkm_pm_fini(struct nvkm_object *, bool);
+#define nvkm_perfmon(p) container_of((p), struct nvkm_perfmon, object)
+
+struct nvkm_perfmon {
+ struct nvkm_object object;
+ struct nvkm_pm *pm;
+};
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s
index 06ee06071104..66b147bd58eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s
@@ -1,5 +1,5 @@
/*
- * fuc microcode for g98 psec engine
+ * fuc microcode for g98 sec engine
* Copyright (C) 2010 Marcin Kościelnicki
*
* This program is free software; you can redistribute it and/or modify
@@ -17,7 +17,7 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-.section #g98_psec_data
+.section #g98_sec_data
ctx_dma:
ctx_dma_query: .b32 0
@@ -94,7 +94,7 @@ sec_dtable:
.align 0x100
-.section #g98_psec_code
+.section #g98_sec_code
// $r0 is always set to 0 in our code - this allows some space savings.
clear b32 $r0
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
index 5d65c4fbb087..eca62221f299 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
@@ -1,4 +1,4 @@
-uint32_t g98_psec_data[] = {
+uint32_t g98_sec_data[] = {
/* 0x0000: ctx_dma */
/* 0x0000: ctx_dma_query */
0x00000000,
@@ -150,7 +150,7 @@ uint32_t g98_psec_data[] = {
0x00000000,
};
-uint32_t g98_psec_code[] = {
+uint32_t g98_sec_code[] = {
0x17f004bd,
0x0010fe35,
0xf10004fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
index 9d5c1b8b1f8c..995c2c5ec150 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
@@ -22,47 +22,14 @@
* Authors: Ben Skeggs
*/
#include <engine/sec.h>
-#include <engine/falcon.h>
+#include <engine/fifo.h>
#include "fuc/g98.fuc0s.h"
#include <core/client.h>
#include <core/enum.h>
-#include <engine/fifo.h>
-
-struct g98_sec_priv {
- struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * Crypt object classes
- ******************************************************************************/
+#include <core/gpuobj.h>
-static struct nvkm_oclass
-g98_sec_sclass[] = {
- { 0x88b4, &nvkm_object_ofuncs },
- {},
-};
-
-/*******************************************************************************
- * PSEC context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_sec_cclass = {
- .handle = NV_ENGCTX(SEC, 0x98),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_falcon_context_ctor,
- .dtor = _nvkm_falcon_context_dtor,
- .init = _nvkm_falcon_context_init,
- .fini = _nvkm_falcon_context_fini,
- .rd32 = _nvkm_falcon_context_rd32,
- .wr32 = _nvkm_falcon_context_wr32,
- },
-};
-
-/*******************************************************************************
- * PSEC engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
static const struct nvkm_enum g98_sec_isr_error_name[] = {
{ 0x0000, "ILLEGAL_MTHD" },
@@ -73,77 +40,44 @@ static const struct nvkm_enum g98_sec_isr_error_name[] = {
};
static void
-g98_sec_intr(struct nvkm_subdev *subdev)
+g98_sec_intr(struct nvkm_falcon *sec, struct nvkm_fifo_chan *chan)
{
- struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
- struct nvkm_engine *engine = nv_engine(subdev);
- struct nvkm_object *engctx;
- struct g98_sec_priv *priv = (void *)subdev;
- u32 disp = nv_rd32(priv, 0x08701c);
- u32 stat = nv_rd32(priv, 0x087008) & disp & ~(disp >> 16);
- u32 inst = nv_rd32(priv, 0x087050) & 0x3fffffff;
- u32 ssta = nv_rd32(priv, 0x087040) & 0x0000ffff;
- u32 addr = nv_rd32(priv, 0x087040) >> 16;
+ struct nvkm_subdev *subdev = &sec->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 ssta = nvkm_rd32(device, 0x087040) & 0x0000ffff;
+ u32 addr = nvkm_rd32(device, 0x087040) >> 16;
u32 mthd = (addr & 0x07ff) << 2;
u32 subc = (addr & 0x3800) >> 11;
- u32 data = nv_rd32(priv, 0x087044);
- int chid;
-
- engctx = nvkm_engctx_get(engine, inst);
- chid = pfifo->chid(pfifo, engctx);
-
- if (stat & 0x00000040) {
- nv_error(priv, "DISPATCH_ERROR [");
- nvkm_enum_print(g98_sec_isr_error_name, ssta);
- pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
- chid, (u64)inst << 12, nvkm_client_name(engctx),
- subc, mthd, data);
- nv_wr32(priv, 0x087004, 0x00000040);
- stat &= ~0x00000040;
- }
+ u32 data = nvkm_rd32(device, 0x087044);
+ const struct nvkm_enum *en =
+ nvkm_enum_find(g98_sec_isr_error_name, ssta);
+
+ nvkm_error(subdev, "DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] "
+ "subc %d mthd %04x data %08x\n", ssta,
+ en ? en->name : "UNKNOWN", chan ? chan->chid : -1,
+ chan ? chan->inst->addr : 0,
+ chan ? chan->object.client->name : "unknown",
+ subc, mthd, data);
+}
- if (stat) {
- nv_error(priv, "unhandled intr 0x%08x\n", stat);
- nv_wr32(priv, 0x087004, stat);
+static const struct nvkm_falcon_func
+g98_sec = {
+ .code.data = g98_sec_code,
+ .code.size = sizeof(g98_sec_code),
+ .data.data = g98_sec_data,
+ .data.size = sizeof(g98_sec_data),
+ .pmc_enable = 0x00004000,
+ .intr = g98_sec_intr,
+ .sclass = {
+ { -1, -1, G98_SEC },
+ {}
}
+};
- nvkm_engctx_put(engctx);
-}
-
-static int
-g98_sec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+g98_sec_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
{
- struct g98_sec_priv *priv;
- int ret;
-
- ret = nvkm_falcon_create(parent, engine, oclass, 0x087000, true,
- "PSEC", "sec", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00004000;
- nv_subdev(priv)->intr = g98_sec_intr;
- nv_engine(priv)->cclass = &g98_sec_cclass;
- nv_engine(priv)->sclass = g98_sec_sclass;
- nv_falcon(priv)->code.data = g98_psec_code;
- nv_falcon(priv)->code.size = sizeof(g98_psec_code);
- nv_falcon(priv)->data.data = g98_psec_data;
- nv_falcon(priv)->data.size = sizeof(g98_psec_data);
- return 0;
+ return nvkm_falcon_new_(&g98_sec, device, index,
+ true, 0x087000, pengine);
}
-
-struct nvkm_oclass
-g98_sec_oclass = {
- .handle = NV_ENGINE(SEC, 0x98),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = g98_sec_ctor,
- .dtor = _nvkm_falcon_dtor,
- .init = _nvkm_falcon_init,
- .fini = _nvkm_falcon_fini,
- .rd32 = _nvkm_falcon_rd32,
- .wr32 = _nvkm_falcon_wr32,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild
index bdc3a05907d5..1c291e6fcf96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild
@@ -1,4 +1,9 @@
+nvkm-y += nvkm/engine/sw/base.o
nvkm-y += nvkm/engine/sw/nv04.o
nvkm-y += nvkm/engine/sw/nv10.o
nvkm-y += nvkm/engine/sw/nv50.o
nvkm-y += nvkm/engine/sw/gf100.o
+
+nvkm-y += nvkm/engine/sw/chan.o
+
+nvkm-y += nvkm/engine/sw/nvsw.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
new file mode 100644
index 000000000000..53c1f7e75b54
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+#include "chan.h"
+
+#include <engine/fifo.h>
+
+bool
+nvkm_sw_mthd(struct nvkm_sw *sw, int chid, int subc, u32 mthd, u32 data)
+{
+ struct nvkm_sw_chan *chan;
+ bool handled = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sw->engine.lock, flags);
+ list_for_each_entry(chan, &sw->chan, head) {
+ if (chan->fifo->chid == chid) {
+ handled = nvkm_sw_chan_mthd(chan, subc, mthd, data);
+ list_del(&chan->head);
+ list_add(&chan->head, &sw->chan);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&sw->engine.lock, flags);
+ return handled;
+}
+
+static int
+nvkm_sw_oclass_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_sw_chan *chan = nvkm_sw_chan(oclass->parent);
+ const struct nvkm_sw_chan_sclass *sclass = oclass->engn;
+ return sclass->ctor(chan, oclass, data, size, pobject);
+}
+
+static int
+nvkm_sw_oclass_get(struct nvkm_oclass *oclass, int index)
+{
+ struct nvkm_sw *sw = nvkm_sw(oclass->engine);
+ int c = 0;
+
+ while (sw->func->sclass[c].ctor) {
+ if (c++ == index) {
+ oclass->engn = &sw->func->sclass[index];
+ oclass->base = sw->func->sclass[index].base;
+ oclass->base.ctor = nvkm_sw_oclass_new;
+ return index;
+ }
+ }
+
+ return c;
+}
+
+static int
+nvkm_sw_cclass_get(struct nvkm_fifo_chan *fifoch,
+ const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_sw *sw = nvkm_sw(oclass->engine);
+ return sw->func->chan_new(sw, fifoch, oclass, pobject);
+}
+
+static void *
+nvkm_sw_dtor(struct nvkm_engine *engine)
+{
+ return nvkm_sw(engine);
+}
+
+static const struct nvkm_engine_func
+nvkm_sw = {
+ .dtor = nvkm_sw_dtor,
+ .fifo.cclass = nvkm_sw_cclass_get,
+ .fifo.sclass = nvkm_sw_oclass_get,
+};
+
+int
+nvkm_sw_new_(const struct nvkm_sw_func *func, struct nvkm_device *device,
+ int index, struct nvkm_sw **psw)
+{
+ struct nvkm_sw *sw;
+
+ if (!(sw = *psw = kzalloc(sizeof(*sw), GFP_KERNEL)))
+ return -ENOMEM;
+ INIT_LIST_HEAD(&sw->chan);
+ sw->func = func;
+
+ return nvkm_engine_ctor(&nvkm_sw, device, index, 0, true, &sw->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c
new file mode 100644
index 000000000000..d082f4f73a80
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "chan.h"
+
+#include <core/notify.h>
+#include <engine/fifo.h>
+
+#include <nvif/event.h>
+#include <nvif/unpack.h>
+
+bool
+nvkm_sw_chan_mthd(struct nvkm_sw_chan *chan, int subc, u32 mthd, u32 data)
+{
+ switch (mthd) {
+ case 0x0000:
+ return true;
+ case 0x0500:
+ nvkm_event_send(&chan->event, 1, 0, NULL, 0);
+ return true;
+ default:
+ if (chan->func->mthd)
+ return chan->func->mthd(chan, subc, mthd, data);
+ break;
+ }
+ return false;
+}
+
+static int
+nvkm_sw_chan_event_ctor(struct nvkm_object *object, void *data, u32 size,
+ struct nvkm_notify *notify)
+{
+ union {
+ struct nvif_notify_uevent_req none;
+ } *req = data;
+ int ret;
+
+ if (nvif_unvers(req->none)) {
+ notify->size = sizeof(struct nvif_notify_uevent_rep);
+ notify->types = 1;
+ notify->index = 0;
+ }
+
+ return ret;
+}
+
+static const struct nvkm_event_func
+nvkm_sw_chan_event = {
+ .ctor = nvkm_sw_chan_event_ctor,
+};
+
+static void *
+nvkm_sw_chan_dtor(struct nvkm_object *object)
+{
+ struct nvkm_sw_chan *chan = nvkm_sw_chan(object);
+ struct nvkm_sw *sw = chan->sw;
+ unsigned long flags;
+ void *data = chan;
+
+ if (chan->func->dtor)
+ data = chan->func->dtor(chan);
+ nvkm_event_fini(&chan->event);
+
+ spin_lock_irqsave(&sw->engine.lock, flags);
+ list_del(&chan->head);
+ spin_unlock_irqrestore(&sw->engine.lock, flags);
+ return data;
+}
+
+static const struct nvkm_object_func
+nvkm_sw_chan = {
+ .dtor = nvkm_sw_chan_dtor,
+};
+
+int
+nvkm_sw_chan_ctor(const struct nvkm_sw_chan_func *func, struct nvkm_sw *sw,
+ struct nvkm_fifo_chan *fifo, const struct nvkm_oclass *oclass,
+ struct nvkm_sw_chan *chan)
+{
+ unsigned long flags;
+
+ nvkm_object_ctor(&nvkm_sw_chan, oclass, &chan->object);
+ chan->func = func;
+ chan->sw = sw;
+ chan->fifo = fifo;
+ spin_lock_irqsave(&sw->engine.lock, flags);
+ list_add(&chan->head, &sw->chan);
+ spin_unlock_irqrestore(&sw->engine.lock, flags);
+
+ return nvkm_event_init(&nvkm_sw_chan_event, 1, 1, &chan->event);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
new file mode 100644
index 000000000000..6608bf6c6842
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_SW_CHAN_H__
+#define __NVKM_SW_CHAN_H__
+#define nvkm_sw_chan(p) container_of((p), struct nvkm_sw_chan, object)
+#include "priv.h"
+#include <core/event.h>
+
+struct nvkm_sw_chan {
+ const struct nvkm_sw_chan_func *func;
+ struct nvkm_object object;
+ struct nvkm_sw *sw;
+ struct nvkm_fifo_chan *fifo;
+ struct list_head head;
+
+ struct nvkm_event event;
+};
+
+struct nvkm_sw_chan_func {
+ void *(*dtor)(struct nvkm_sw_chan *);
+ bool (*mthd)(struct nvkm_sw_chan *, int subc, u32 mthd, u32 data);
+};
+
+int nvkm_sw_chan_ctor(const struct nvkm_sw_chan_func *, struct nvkm_sw *,
+ struct nvkm_fifo_chan *, const struct nvkm_oclass *,
+ struct nvkm_sw_chan *);
+bool nvkm_sw_chan_mthd(struct nvkm_sw_chan *, int subc, u32 mthd, u32 data);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
index 533d5d8ed363..b01ef7eca906 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
@@ -23,119 +23,133 @@
*/
#include "nv50.h"
+#include <core/gpuobj.h>
#include <subdev/bar.h>
+#include <engine/disp.h>
+#include <engine/fifo.h>
+
+#include <nvif/event.h>
+#include <nvif/ioctl.h>
/*******************************************************************************
- * software object classes
+ * software context
******************************************************************************/
static int
-gf100_sw_mthd_vblsem_offset(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
+gf100_sw_chan_vblsem_release(struct nvkm_notify *notify)
{
- struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
- u64 data = *(u32 *)args;
- if (mthd == 0x0400) {
- chan->vblank.offset &= 0x00ffffffffULL;
- chan->vblank.offset |= data << 32;
- } else {
- chan->vblank.offset &= 0xff00000000ULL;
- chan->vblank.offset |= data;
- }
- return 0;
+ struct nv50_sw_chan *chan =
+ container_of(notify, typeof(*chan), vblank.notify[notify->index]);
+ struct nvkm_sw *sw = chan->base.sw;
+ struct nvkm_device *device = sw->engine.subdev.device;
+ u32 inst = chan->base.fifo->inst->addr >> 12;
+
+ nvkm_wr32(device, 0x001718, 0x80000000 | inst);
+ nvkm_bar_flush(device->bar);
+ nvkm_wr32(device, 0x06000c, upper_32_bits(chan->vblank.offset));
+ nvkm_wr32(device, 0x060010, lower_32_bits(chan->vblank.offset));
+ nvkm_wr32(device, 0x060014, chan->vblank.value);
+
+ return NVKM_NOTIFY_DROP;
}
-static int
-gf100_sw_mthd_mp_control(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
+static bool
+gf100_sw_chan_mthd(struct nvkm_sw_chan *base, int subc, u32 mthd, u32 data)
{
- struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
- struct nv50_sw_priv *priv = (void *)nv_object(chan)->engine;
- u32 data = *(u32 *)args;
-
+ struct nv50_sw_chan *chan = nv50_sw_chan(base);
+ struct nvkm_engine *engine = chan->base.object.engine;
+ struct nvkm_device *device = engine->subdev.device;
switch (mthd) {
- case 0x600:
- nv_wr32(priv, 0x419e00, data); /* MP.PM_UNK000 */
- break;
- case 0x644:
- if (data & ~0x1ffffe)
- return -EINVAL;
- nv_wr32(priv, 0x419e44, data); /* MP.TRAP_WARP_ERROR_EN */
+ case 0x0400:
+ chan->vblank.offset &= 0x00ffffffffULL;
+ chan->vblank.offset |= (u64)data << 32;
+ return true;
+ case 0x0404:
+ chan->vblank.offset &= 0xff00000000ULL;
+ chan->vblank.offset |= data;
+ return true;
+ case 0x0408:
+ chan->vblank.value = data;
+ return true;
+ case 0x040c:
+ if (data < device->disp->vblank.index_nr) {
+ nvkm_notify_get(&chan->vblank.notify[data]);
+ return true;
+ }
break;
- case 0x6ac:
- nv_wr32(priv, 0x419eac, data); /* MP.PM_UNK0AC */
+ case 0x600: /* MP.PM_UNK000 */
+ nvkm_wr32(device, 0x419e00, data);
+ return true;
+ case 0x644: /* MP.TRAP_WARP_ERROR_EN */
+ if (!(data & ~0x001ffffe)) {
+ nvkm_wr32(device, 0x419e44, data);
+ return true;
+ }
break;
+ case 0x6ac: /* MP.PM_UNK0AC */
+ nvkm_wr32(device, 0x419eac, data);
+ return true;
default:
- return -EINVAL;
+ break;
}
- return 0;
+ return false;
}
-static struct nvkm_omthds
-gf100_sw_omthds[] = {
- { 0x0400, 0x0400, gf100_sw_mthd_vblsem_offset },
- { 0x0404, 0x0404, gf100_sw_mthd_vblsem_offset },
- { 0x0408, 0x0408, nv50_sw_mthd_vblsem_value },
- { 0x040c, 0x040c, nv50_sw_mthd_vblsem_release },
- { 0x0500, 0x0500, nv50_sw_mthd_flip },
- { 0x0600, 0x0600, gf100_sw_mthd_mp_control },
- { 0x0644, 0x0644, gf100_sw_mthd_mp_control },
- { 0x06ac, 0x06ac, gf100_sw_mthd_mp_control },
- {}
+static const struct nvkm_sw_chan_func
+gf100_sw_chan = {
+ .dtor = nv50_sw_chan_dtor,
+ .mthd = gf100_sw_chan_mthd,
};
-static struct nvkm_oclass
-gf100_sw_sclass[] = {
- { 0x906e, &nvkm_object_ofuncs, gf100_sw_omthds },
- {}
-};
-
-/*******************************************************************************
- * software context
- ******************************************************************************/
-
static int
-gf100_sw_vblsem_release(struct nvkm_notify *notify)
+gf100_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifoch,
+ const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
{
- struct nv50_sw_chan *chan =
- container_of(notify, typeof(*chan), vblank.notify[notify->index]);
- struct nv50_sw_priv *priv = (void *)nv_object(chan)->engine;
- struct nvkm_bar *bar = nvkm_bar(priv);
+ struct nvkm_disp *disp = sw->engine.subdev.device->disp;
+ struct nv50_sw_chan *chan;
+ int ret, i;
- nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
- bar->flush(bar);
- nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
- nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
- nv_wr32(priv, 0x060014, chan->vblank.value);
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
- return NVKM_NOTIFY_DROP;
-}
+ ret = nvkm_sw_chan_ctor(&gf100_sw_chan, sw, fifoch, oclass,
+ &chan->base);
+ if (ret)
+ return ret;
-static struct nv50_sw_cclass
-gf100_sw_cclass = {
- .base.handle = NV_ENGCTX(SW, 0xc0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_sw_context_ctor,
- .dtor = nv50_sw_context_dtor,
- .init = _nvkm_sw_context_init,
- .fini = _nvkm_sw_context_fini,
- },
- .vblank = gf100_sw_vblsem_release,
-};
+ for (i = 0; disp && i < disp->vblank.index_nr; i++) {
+ ret = nvkm_notify_init(NULL, &disp->vblank,
+ gf100_sw_chan_vblsem_release, false,
+ &(struct nvif_notify_head_req_v0) {
+ .head = i,
+ },
+ sizeof(struct nvif_notify_head_req_v0),
+ sizeof(struct nvif_notify_head_rep_v0),
+ &chan->vblank.notify[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
/*******************************************************************************
* software engine/subdev functions
******************************************************************************/
-struct nvkm_oclass *
-gf100_sw_oclass = &(struct nv50_sw_oclass) {
- .base.handle = NV_ENGINE(SW, 0xc0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_sw_ctor,
- .dtor = _nvkm_sw_dtor,
- .init = _nvkm_sw_init,
- .fini = _nvkm_sw_fini,
- },
- .cclass = &gf100_sw_cclass.base,
- .sclass = gf100_sw_sclass,
-}.base;
+static const struct nvkm_sw_func
+gf100_sw = {
+ .chan_new = gf100_sw_chan_new,
+ .sclass = {
+ { nvkm_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_GF100 } },
+ {}
+ }
+};
+
+int
+gf100_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+{
+ return nvkm_sw_new_(&gf100_sw, device, index, psw);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
index 897024421d36..445217ffa791 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
@@ -21,15 +21,18 @@
*
* Authors: Ben Skeggs
*/
-#include <engine/sw.h>
-#include <engine/fifo.h>
+#define nv04_sw_chan(p) container_of((p), struct nv04_sw_chan, base)
+#include "priv.h"
+#include "chan.h"
+#include "nvsw.h"
-struct nv04_sw_priv {
- struct nvkm_sw base;
-};
+#include <nvif/class.h>
+#include <nvif/ioctl.h>
+#include <nvif/unpack.h>
struct nv04_sw_chan {
struct nvkm_sw_chan base;
+ atomic_t ref;
};
/*******************************************************************************
@@ -37,103 +40,99 @@ struct nv04_sw_chan {
******************************************************************************/
static int
-nv04_sw_set_ref(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+nv04_nvsw_mthd_get_ref(struct nvkm_nvsw *nvsw, void *data, u32 size)
{
- struct nvkm_object *channel = (void *)nv_engctx(object->parent);
- struct nvkm_fifo_chan *fifo = (void *)channel->parent;
- atomic_set(&fifo->refcnt, *(u32*)data);
- return 0;
+ struct nv04_sw_chan *chan = nv04_sw_chan(nvsw->chan);
+ union {
+ struct nv04_nvsw_get_ref_v0 v0;
+ } *args = data;
+ int ret;
+
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ args->v0.ref = atomic_read(&chan->ref);
+ }
+
+ return ret;
}
static int
-nv04_sw_flip(struct nvkm_object *object, u32 mthd, void *args, u32 size)
+nv04_nvsw_mthd(struct nvkm_nvsw *nvsw, u32 mthd, void *data, u32 size)
{
- struct nv04_sw_chan *chan = (void *)nv_engctx(object->parent);
- if (chan->base.flip)
- return chan->base.flip(chan->base.flip_data);
+ switch (mthd) {
+ case NV04_NVSW_GET_REF:
+ return nv04_nvsw_mthd_get_ref(nvsw, data, size);
+ default:
+ break;
+ }
return -EINVAL;
}
-static struct nvkm_omthds
-nv04_sw_omthds[] = {
- { 0x0150, 0x0150, nv04_sw_set_ref },
- { 0x0500, 0x0500, nv04_sw_flip },
- {}
+static const struct nvkm_nvsw_func
+nv04_nvsw = {
+ .mthd = nv04_nvsw_mthd,
};
-static struct nvkm_oclass
-nv04_sw_sclass[] = {
- { 0x006e, &nvkm_object_ofuncs, nv04_sw_omthds },
- {}
-};
+static int
+nv04_nvsw_new(struct nvkm_sw_chan *chan, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nvkm_nvsw_new_(&nv04_nvsw, chan, oclass, data, size, pobject);
+}
/*******************************************************************************
* software context
******************************************************************************/
-static int
-nv04_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static bool
+nv04_sw_chan_mthd(struct nvkm_sw_chan *base, int subc, u32 mthd, u32 data)
{
- struct nv04_sw_chan *chan;
- int ret;
+ struct nv04_sw_chan *chan = nv04_sw_chan(base);
- ret = nvkm_sw_context_create(parent, engine, oclass, &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
+ switch (mthd) {
+ case 0x0150:
+ atomic_set(&chan->ref, data);
+ return true;
+ default:
+ break;
+ }
- return 0;
+ return false;
}
-static struct nvkm_oclass
-nv04_sw_cclass = {
- .handle = NV_ENGCTX(SW, 0x04),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_sw_context_ctor,
- .dtor = _nvkm_sw_context_dtor,
- .init = _nvkm_sw_context_init,
- .fini = _nvkm_sw_context_fini,
- },
+static const struct nvkm_sw_chan_func
+nv04_sw_chan = {
+ .mthd = nv04_sw_chan_mthd,
};
-/*******************************************************************************
- * software engine/subdev functions
- ******************************************************************************/
-
-void
-nv04_sw_intr(struct nvkm_subdev *subdev)
-{
- nv_mask(subdev, 0x000100, 0x80000000, 0x00000000);
-}
-
static int
-nv04_sw_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv04_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifo,
+ const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
- struct nv04_sw_priv *priv;
- int ret;
+ struct nv04_sw_chan *chan;
- ret = nvkm_sw_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ atomic_set(&chan->ref, 0);
+ *pobject = &chan->base.object;
- nv_engine(priv)->cclass = &nv04_sw_cclass;
- nv_engine(priv)->sclass = nv04_sw_sclass;
- nv_subdev(priv)->intr = nv04_sw_intr;
- return 0;
+ return nvkm_sw_chan_ctor(&nv04_sw_chan, sw, fifo, oclass, &chan->base);
}
-struct nvkm_oclass *
-nv04_sw_oclass = &(struct nvkm_oclass) {
- .handle = NV_ENGINE(SW, 0x04),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_sw_ctor,
- .dtor = _nvkm_sw_dtor,
- .init = _nvkm_sw_init,
- .fini = _nvkm_sw_fini,
- },
+/*******************************************************************************
+ * software engine/subdev functions
+ ******************************************************************************/
+
+static const struct nvkm_sw_func
+nv04_sw = {
+ .chan_new = nv04_sw_chan_new,
+ .sclass = {
+ { nv04_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_NV04 } },
+ {}
+ }
};
+
+int
+nv04_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+{
+ return nvkm_sw_new_(&nv04_sw, device, index, psw);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
index c61153a3fb8b..adf70d92b244 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
@@ -21,102 +21,48 @@
*
* Authors: Ben Skeggs
*/
-#include <engine/sw.h>
+#include "priv.h"
+#include "chan.h"
+#include "nvsw.h"
-struct nv10_sw_priv {
- struct nvkm_sw base;
-};
-
-struct nv10_sw_chan {
- struct nvkm_sw_chan base;
-};
+#include <nvif/ioctl.h>
/*******************************************************************************
- * software object classes
+ * software context
******************************************************************************/
-static int
-nv10_sw_flip(struct nvkm_object *object, u32 mthd, void *args, u32 size)
-{
- struct nv10_sw_chan *chan = (void *)nv_engctx(object->parent);
- if (chan->base.flip)
- return chan->base.flip(chan->base.flip_data);
- return -EINVAL;
-}
-
-static struct nvkm_omthds
-nv10_sw_omthds[] = {
- { 0x0500, 0x0500, nv10_sw_flip },
- {}
-};
-
-static struct nvkm_oclass
-nv10_sw_sclass[] = {
- { 0x016e, &nvkm_object_ofuncs, nv10_sw_omthds },
- {}
+static const struct nvkm_sw_chan_func
+nv10_sw_chan = {
};
-/*******************************************************************************
- * software context
- ******************************************************************************/
-
static int
-nv10_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv10_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifo,
+ const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
- struct nv10_sw_chan *chan;
- int ret;
+ struct nvkm_sw_chan *chan;
- ret = nvkm_sw_context_create(parent, engine, oclass, &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->object;
- return 0;
+ return nvkm_sw_chan_ctor(&nv10_sw_chan, sw, fifo, oclass, chan);
}
-static struct nvkm_oclass
-nv10_sw_cclass = {
- .handle = NV_ENGCTX(SW, 0x04),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv10_sw_context_ctor,
- .dtor = _nvkm_sw_context_dtor,
- .init = _nvkm_sw_context_init,
- .fini = _nvkm_sw_context_fini,
- },
-};
-
/*******************************************************************************
* software engine/subdev functions
******************************************************************************/
-static int
-nv10_sw_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv10_sw_priv *priv;
- int ret;
-
- ret = nvkm_sw_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static const struct nvkm_sw_func
+nv10_sw = {
+ .chan_new = nv10_sw_chan_new,
+ .sclass = {
+ { nvkm_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_NV10 } },
+ {}
+ }
+};
- nv_engine(priv)->cclass = &nv10_sw_cclass;
- nv_engine(priv)->sclass = nv10_sw_sclass;
- nv_subdev(priv)->intr = nv04_sw_intr;
- return 0;
+int
+nv10_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+{
+ return nvkm_sw_new_(&nv10_sw, device, index, psw);
}
-
-struct nvkm_oclass *
-nv10_sw_oclass = &(struct nvkm_oclass) {
- .handle = NV_ENGINE(SW, 0x10),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv10_sw_ctor,
- .dtor = _nvkm_sw_dtor,
- .init = _nvkm_sw_init,
- .fini = _nvkm_sw_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
index 401fcd73086b..a381196af69d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
@@ -23,153 +23,98 @@
*/
#include "nv50.h"
-#include <core/device.h>
-#include <core/handle.h>
-#include <core/namedb.h>
+#include <core/gpuobj.h>
#include <engine/disp.h>
+#include <engine/fifo/chan.h>
#include <subdev/bar.h>
#include <nvif/event.h>
-
-/*******************************************************************************
- * software object classes
- ******************************************************************************/
-
-static int
-nv50_sw_mthd_dma_vblsem(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
-{
- struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
- struct nvkm_fifo_chan *fifo = (void *)nv_object(chan)->parent;
- struct nvkm_handle *handle;
- int ret = -EINVAL;
-
- handle = nvkm_namedb_get(nv_namedb(fifo), *(u32 *)args);
- if (!handle)
- return -ENOENT;
-
- if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
- struct nvkm_gpuobj *gpuobj = nv_gpuobj(handle->object);
- chan->vblank.ctxdma = gpuobj->node->offset >> 4;
- ret = 0;
- }
- nvkm_namedb_put(handle);
- return ret;
-}
-
-static int
-nv50_sw_mthd_vblsem_offset(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
-{
- struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
- chan->vblank.offset = *(u32 *)args;
- return 0;
-}
-
-int
-nv50_sw_mthd_vblsem_value(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
-{
- struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
- chan->vblank.value = *(u32 *)args;
- return 0;
-}
-
-int
-nv50_sw_mthd_vblsem_release(struct nvkm_object *object, u32 mthd,
- void *args, u32 size)
-{
- struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
- u32 head = *(u32 *)args;
- if (head >= nvkm_disp(chan)->vblank.index_nr)
- return -EINVAL;
-
- nvkm_notify_get(&chan->vblank.notify[head]);
- return 0;
-}
-
-int
-nv50_sw_mthd_flip(struct nvkm_object *object, u32 mthd, void *args, u32 size)
-{
- struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
- if (chan->base.flip)
- return chan->base.flip(chan->base.flip_data);
- return -EINVAL;
-}
-
-static struct nvkm_omthds
-nv50_sw_omthds[] = {
- { 0x018c, 0x018c, nv50_sw_mthd_dma_vblsem },
- { 0x0400, 0x0400, nv50_sw_mthd_vblsem_offset },
- { 0x0404, 0x0404, nv50_sw_mthd_vblsem_value },
- { 0x0408, 0x0408, nv50_sw_mthd_vblsem_release },
- { 0x0500, 0x0500, nv50_sw_mthd_flip },
- {}
-};
-
-static struct nvkm_oclass
-nv50_sw_sclass[] = {
- { 0x506e, &nvkm_object_ofuncs, nv50_sw_omthds },
- {}
-};
+#include <nvif/ioctl.h>
/*******************************************************************************
* software context
******************************************************************************/
static int
-nv50_sw_vblsem_release(struct nvkm_notify *notify)
+nv50_sw_chan_vblsem_release(struct nvkm_notify *notify)
{
struct nv50_sw_chan *chan =
container_of(notify, typeof(*chan), vblank.notify[notify->index]);
- struct nv50_sw_priv *priv = (void *)nv_object(chan)->engine;
- struct nvkm_bar *bar = nvkm_bar(priv);
+ struct nvkm_sw *sw = chan->base.sw;
+ struct nvkm_device *device = sw->engine.subdev.device;
- nv_wr32(priv, 0x001704, chan->vblank.channel);
- nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
- bar->flush(bar);
+ nvkm_wr32(device, 0x001704, chan->base.fifo->inst->addr >> 12);
+ nvkm_wr32(device, 0x001710, 0x80000000 | chan->vblank.ctxdma);
+ nvkm_bar_flush(device->bar);
- if (nv_device(priv)->chipset == 0x50) {
- nv_wr32(priv, 0x001570, chan->vblank.offset);
- nv_wr32(priv, 0x001574, chan->vblank.value);
+ if (device->chipset == 0x50) {
+ nvkm_wr32(device, 0x001570, chan->vblank.offset);
+ nvkm_wr32(device, 0x001574, chan->vblank.value);
} else {
- nv_wr32(priv, 0x060010, chan->vblank.offset);
- nv_wr32(priv, 0x060014, chan->vblank.value);
+ nvkm_wr32(device, 0x060010, chan->vblank.offset);
+ nvkm_wr32(device, 0x060014, chan->vblank.value);
}
return NVKM_NOTIFY_DROP;
}
-void
-nv50_sw_context_dtor(struct nvkm_object *object)
+static bool
+nv50_sw_chan_mthd(struct nvkm_sw_chan *base, int subc, u32 mthd, u32 data)
{
- struct nv50_sw_chan *chan = (void *)object;
- int i;
+ struct nv50_sw_chan *chan = nv50_sw_chan(base);
+ struct nvkm_engine *engine = chan->base.object.engine;
+ struct nvkm_device *device = engine->subdev.device;
+ switch (mthd) {
+ case 0x018c: chan->vblank.ctxdma = data; return true;
+ case 0x0400: chan->vblank.offset = data; return true;
+ case 0x0404: chan->vblank.value = data; return true;
+ case 0x0408:
+ if (data < device->disp->vblank.index_nr) {
+ nvkm_notify_get(&chan->vblank.notify[data]);
+ return true;
+ }
+ break;
+ default:
+ break;
+ }
+ return false;
+}
+void *
+nv50_sw_chan_dtor(struct nvkm_sw_chan *base)
+{
+ struct nv50_sw_chan *chan = nv50_sw_chan(base);
+ int i;
for (i = 0; i < ARRAY_SIZE(chan->vblank.notify); i++)
nvkm_notify_fini(&chan->vblank.notify[i]);
-
- nvkm_sw_context_destroy(&chan->base);
+ return chan;
}
-int
-nv50_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static const struct nvkm_sw_chan_func
+nv50_sw_chan = {
+ .dtor = nv50_sw_chan_dtor,
+ .mthd = nv50_sw_chan_mthd,
+};
+
+static int
+nv50_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifoch,
+ const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
- struct nvkm_disp *pdisp = nvkm_disp(parent);
- struct nv50_sw_cclass *pclass = (void *)oclass;
+ struct nvkm_disp *disp = sw->engine.subdev.device->disp;
struct nv50_sw_chan *chan;
int ret, i;
- ret = nvkm_sw_context_create(parent, engine, oclass, &chan);
- *pobject = nv_object(chan);
+ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &chan->base.object;
+
+ ret = nvkm_sw_chan_ctor(&nv50_sw_chan, sw, fifoch, oclass, &chan->base);
if (ret)
return ret;
- for (i = 0; pdisp && i < pdisp->vblank.index_nr; i++) {
- ret = nvkm_notify_init(NULL, &pdisp->vblank, pclass->vblank,
- false,
+ for (i = 0; disp && i < disp->vblank.index_nr; i++) {
+ ret = nvkm_notify_init(NULL, &disp->vblank,
+ nv50_sw_chan_vblsem_release, false,
&(struct nvif_notify_head_req_v0) {
.head = i,
},
@@ -180,55 +125,24 @@ nv50_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
return ret;
}
- chan->vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
return 0;
}
-static struct nv50_sw_cclass
-nv50_sw_cclass = {
- .base.handle = NV_ENGCTX(SW, 0x50),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_sw_context_ctor,
- .dtor = nv50_sw_context_dtor,
- .init = _nvkm_sw_context_init,
- .fini = _nvkm_sw_context_fini,
- },
- .vblank = nv50_sw_vblsem_release,
-};
-
/*******************************************************************************
* software engine/subdev functions
******************************************************************************/
+static const struct nvkm_sw_func
+nv50_sw = {
+ .chan_new = nv50_sw_chan_new,
+ .sclass = {
+ { nvkm_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_NV50 } },
+ {}
+ }
+};
+
int
-nv50_sw_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv50_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
{
- struct nv50_sw_oclass *pclass = (void *)oclass;
- struct nv50_sw_priv *priv;
- int ret;
-
- ret = nvkm_sw_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_engine(priv)->cclass = pclass->cclass;
- nv_engine(priv)->sclass = pclass->sclass;
- nv_subdev(priv)->intr = nv04_sw_intr;
- return 0;
+ return nvkm_sw_new_(&nv50_sw, device, index, psw);
}
-
-struct nvkm_oclass *
-nv50_sw_oclass = &(struct nv50_sw_oclass) {
- .base.handle = NV_ENGINE(SW, 0x50),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_sw_ctor,
- .dtor = _nvkm_sw_dtor,
- .init = _nvkm_sw_init,
- .fini = _nvkm_sw_fini,
- },
- .cclass = &nv50_sw_cclass.base,
- .sclass = nv50_sw_sclass,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
index d8adc1108467..25cdfdef2d46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
@@ -1,45 +1,20 @@
#ifndef __NVKM_SW_NV50_H__
#define __NVKM_SW_NV50_H__
-#include <engine/sw.h>
+#define nv50_sw_chan(p) container_of((p), struct nv50_sw_chan, base)
+#include "priv.h"
+#include "chan.h"
+#include "nvsw.h"
#include <core/notify.h>
-struct nv50_sw_oclass {
- struct nvkm_oclass base;
- struct nvkm_oclass *cclass;
- struct nvkm_oclass *sclass;
-};
-
-struct nv50_sw_priv {
- struct nvkm_sw base;
-};
-
-int nv50_sw_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-
-struct nv50_sw_cclass {
- struct nvkm_oclass base;
- int (*vblank)(struct nvkm_notify *);
-};
-
struct nv50_sw_chan {
struct nvkm_sw_chan base;
struct {
struct nvkm_notify notify[4];
- u32 channel;
u32 ctxdma;
u64 offset;
u32 value;
} vblank;
};
-int nv50_sw_context_ctor(struct nvkm_object *,
- struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-void nv50_sw_context_dtor(struct nvkm_object *);
-
-int nv50_sw_mthd_vblsem_value(struct nvkm_object *, u32, void *, u32);
-int nv50_sw_mthd_vblsem_release(struct nvkm_object *, u32, void *, u32);
-int nv50_sw_mthd_flip(struct nvkm_object *, u32, void *, u32);
+void *nv50_sw_chan_dtor(struct nvkm_sw_chan *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c
new file mode 100644
index 000000000000..66cf986b9572
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nvsw.h"
+#include "chan.h"
+
+#include <nvif/class.h>
+
+static int
+nvkm_nvsw_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+ struct nvkm_nvsw *nvsw = nvkm_nvsw(object);
+ if (nvsw->func->mthd)
+ return nvsw->func->mthd(nvsw, mthd, data, size);
+ return -ENODEV;
+}
+
+static int
+nvkm_nvsw_ntfy_(struct nvkm_object *object, u32 mthd,
+ struct nvkm_event **pevent)
+{
+ struct nvkm_nvsw *nvsw = nvkm_nvsw(object);
+ switch (mthd) {
+ case NVSW_NTFY_UEVENT:
+ *pevent = &nvsw->chan->event;
+ return 0;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static const struct nvkm_object_func
+nvkm_nvsw_ = {
+ .mthd = nvkm_nvsw_mthd_,
+ .ntfy = nvkm_nvsw_ntfy_,
+};
+
+int
+nvkm_nvsw_new_(const struct nvkm_nvsw_func *func, struct nvkm_sw_chan *chan,
+ const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_nvsw *nvsw;
+
+ if (!(nvsw = kzalloc(sizeof(*nvsw), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &nvsw->object;
+
+ nvkm_object_ctor(&nvkm_nvsw_, oclass, &nvsw->object);
+ nvsw->func = func;
+ nvsw->chan = chan;
+ return 0;
+}
+
+static const struct nvkm_nvsw_func
+nvkm_nvsw = {
+};
+
+int
+nvkm_nvsw_new(struct nvkm_sw_chan *chan, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nvkm_nvsw_new_(&nvkm_nvsw, chan, oclass, data, size, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
new file mode 100644
index 000000000000..943ef4c10091
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
@@ -0,0 +1,21 @@
+#ifndef __NVKM_NVSW_H__
+#define __NVKM_NVSW_H__
+#define nvkm_nvsw(p) container_of((p), struct nvkm_nvsw, object)
+#include "priv.h"
+
+struct nvkm_nvsw {
+ struct nvkm_object object;
+ const struct nvkm_nvsw_func *func;
+ struct nvkm_sw_chan *chan;
+};
+
+struct nvkm_nvsw_func {
+ int (*mthd)(struct nvkm_nvsw *, u32 mthd, void *data, u32 size);
+};
+
+int nvkm_nvsw_new_(const struct nvkm_nvsw_func *, struct nvkm_sw_chan *,
+ const struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **pobject);
+int nvkm_nvsw_new(struct nvkm_sw_chan *, const struct nvkm_oclass *,
+ void *data, u32 size, struct nvkm_object **pobject);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
new file mode 100644
index 000000000000..0ef1318dc2fd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
@@ -0,0 +1,21 @@
+#ifndef __NVKM_SW_PRIV_H__
+#define __NVKM_SW_PRIV_H__
+#define nvkm_sw(p) container_of((p), struct nvkm_sw, engine)
+#include <engine/sw.h>
+struct nvkm_sw_chan;
+
+int nvkm_sw_new_(const struct nvkm_sw_func *, struct nvkm_device *,
+ int index, struct nvkm_sw **);
+
+struct nvkm_sw_chan_sclass {
+ int (*ctor)(struct nvkm_sw_chan *, const struct nvkm_oclass *,
+ void *data, u32 size, struct nvkm_object **);
+ struct nvkm_sclass base;
+};
+
+struct nvkm_sw_func {
+ int (*chan_new)(struct nvkm_sw *, struct nvkm_fifo_chan *,
+ const struct nvkm_oclass *, struct nvkm_object **);
+ const struct nvkm_sw_chan_sclass sclass[];
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
index 45f4e186befc..4188c77ac927 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
@@ -22,72 +22,23 @@
* Authors: Ben Skeggs, Ilia Mirkin
*/
#include <engine/vp.h>
-#include <engine/xtensa.h>
-#include <core/engctx.h>
-
-/*******************************************************************************
- * VP object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_vp_sclass[] = {
- { 0x7476, &nvkm_object_ofuncs },
- {},
-};
-
-/*******************************************************************************
- * PVP context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_vp_cclass = {
- .handle = NV_ENGCTX(VP, 0x84),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_xtensa_engctx_ctor,
- .dtor = _nvkm_engctx_dtor,
- .init = _nvkm_engctx_init,
- .fini = _nvkm_engctx_fini,
- .rd32 = _nvkm_engctx_rd32,
- .wr32 = _nvkm_engctx_wr32,
- },
+#include <nvif/class.h>
+
+static const struct nvkm_xtensa_func
+g84_vp = {
+ .pmc_enable = 0x01020000,
+ .fifo_val = 0x111,
+ .unkd28 = 0x9c544,
+ .sclass = {
+ { -1, -1, NV74_VP2 },
+ {}
+ }
};
-/*******************************************************************************
- * PVP engine/subdev functions
- ******************************************************************************/
-
-static int
-g84_vp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+g84_vp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
{
- struct nvkm_xtensa *priv;
- int ret;
-
- ret = nvkm_xtensa_create(parent, engine, oclass, 0xf000, true,
- "PVP", "vp", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x01020000;
- nv_engine(priv)->cclass = &g84_vp_cclass;
- nv_engine(priv)->sclass = g84_vp_sclass;
- priv->fifo_val = 0x111;
- priv->unkd28 = 0x9c544;
- return 0;
+ return nvkm_xtensa_new_(&g84_vp, device, index,
+ true, 0x00f000, pengine);
}
-
-struct nvkm_oclass
-g84_vp_oclass = {
- .handle = NV_ENGINE(VP, 0x84),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = g84_vp_ctor,
- .dtor = _nvkm_xtensa_dtor,
- .init = _nvkm_xtensa_init,
- .fini = _nvkm_xtensa_fini,
- .rd32 = _nvkm_xtensa_rd32,
- .wr32 = _nvkm_xtensa_wr32,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
index cea90df533d9..a3d4f5bcec7a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
@@ -20,153 +20,173 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <engine/xtensa.h>
-#include <core/device.h>
-#include <core/engctx.h>
+#include <core/gpuobj.h>
+#include <engine/fifo.h>
-u32
-_nvkm_xtensa_rd32(struct nvkm_object *object, u64 addr)
+static int
+nvkm_xtensa_oclass_get(struct nvkm_oclass *oclass, int index)
{
- struct nvkm_xtensa *xtensa = (void *)object;
- return nv_rd32(xtensa, xtensa->addr + addr);
-}
+ struct nvkm_xtensa *xtensa = nvkm_xtensa(oclass->engine);
+ int c = 0;
-void
-_nvkm_xtensa_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
- struct nvkm_xtensa *xtensa = (void *)object;
- nv_wr32(xtensa, xtensa->addr + addr, data);
+ while (xtensa->func->sclass[c].oclass) {
+ if (c++ == index) {
+ oclass->base = xtensa->func->sclass[index];
+ return index;
+ }
+ }
+
+ return c;
}
-int
-_nvkm_xtensa_engctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static int
+nvkm_xtensa_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+ int align, struct nvkm_gpuobj **pgpuobj)
{
- struct nvkm_engctx *engctx;
- int ret;
-
- ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0x10000, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &engctx);
- *pobject = nv_object(engctx);
- return ret;
+ return nvkm_gpuobj_new(object->engine->subdev.device, 0x10000, align,
+ true, parent, pgpuobj);
}
-void
-_nvkm_xtensa_intr(struct nvkm_subdev *subdev)
+static const struct nvkm_object_func
+nvkm_xtensa_cclass = {
+ .bind = nvkm_xtensa_cclass_bind,
+};
+
+static void
+nvkm_xtensa_intr(struct nvkm_engine *engine)
{
- struct nvkm_xtensa *xtensa = (void *)subdev;
- u32 unk104 = nv_ro32(xtensa, 0xd04);
- u32 intr = nv_ro32(xtensa, 0xc20);
- u32 chan = nv_ro32(xtensa, 0xc28);
- u32 unk10c = nv_ro32(xtensa, 0xd0c);
+ struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
+ struct nvkm_subdev *subdev = &xtensa->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 base = xtensa->addr;
+ u32 unk104 = nvkm_rd32(device, base + 0xd04);
+ u32 intr = nvkm_rd32(device, base + 0xc20);
+ u32 chan = nvkm_rd32(device, base + 0xc28);
+ u32 unk10c = nvkm_rd32(device, base + 0xd0c);
if (intr & 0x10)
- nv_warn(xtensa, "Watchdog interrupt, engine hung.\n");
- nv_wo32(xtensa, 0xc20, intr);
- intr = nv_ro32(xtensa, 0xc20);
+ nvkm_warn(subdev, "Watchdog interrupt, engine hung.\n");
+ nvkm_wr32(device, base + 0xc20, intr);
+ intr = nvkm_rd32(device, base + 0xc20);
if (unk104 == 0x10001 && unk10c == 0x200 && chan && !intr) {
- nv_debug(xtensa, "Enabling FIFO_CTRL\n");
- nv_mask(xtensa, xtensa->addr + 0xd94, 0, xtensa->fifo_val);
+ nvkm_debug(subdev, "Enabling FIFO_CTRL\n");
+ nvkm_mask(device, xtensa->addr + 0xd94, 0, xtensa->func->fifo_val);
}
}
-int
-nvkm_xtensa_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, u32 addr, bool enable,
- const char *iname, const char *fname,
- int length, void **pobject)
+static int
+nvkm_xtensa_fini(struct nvkm_engine *engine, bool suspend)
{
- struct nvkm_xtensa *xtensa;
- int ret;
+ struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
+ struct nvkm_device *device = xtensa->engine.subdev.device;
+ const u32 base = xtensa->addr;
- ret = nvkm_engine_create_(parent, engine, oclass, enable, iname,
- fname, length, pobject);
- xtensa = *pobject;
- if (ret)
- return ret;
+ nvkm_wr32(device, base + 0xd84, 0); /* INTR_EN */
+ nvkm_wr32(device, base + 0xd94, 0); /* FIFO_CTRL */
- nv_subdev(xtensa)->intr = _nvkm_xtensa_intr;
- xtensa->addr = addr;
+ if (!suspend)
+ nvkm_memory_del(&xtensa->gpu_fw);
return 0;
}
-int
-_nvkm_xtensa_init(struct nvkm_object *object)
+static int
+nvkm_xtensa_init(struct nvkm_engine *engine)
{
- struct nvkm_device *device = nv_device(object);
- struct nvkm_xtensa *xtensa = (void *)object;
+ struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
+ struct nvkm_subdev *subdev = &xtensa->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 base = xtensa->addr;
const struct firmware *fw;
char name[32];
int i, ret;
+ u64 addr, size;
u32 tmp;
- ret = nvkm_engine_init(&xtensa->base);
- if (ret)
- return ret;
-
if (!xtensa->gpu_fw) {
snprintf(name, sizeof(name), "nouveau/nv84_xuc%03x",
xtensa->addr >> 12);
- ret = request_firmware(&fw, name, nv_device_base(device));
+ ret = request_firmware(&fw, name, device->dev);
if (ret) {
- nv_warn(xtensa, "unable to load firmware %s\n", name);
+ nvkm_warn(subdev, "unable to load firmware %s\n", name);
return ret;
}
if (fw->size > 0x40000) {
- nv_warn(xtensa, "firmware %s too large\n", name);
+ nvkm_warn(subdev, "firmware %s too large\n", name);
release_firmware(fw);
return -EINVAL;
}
- ret = nvkm_gpuobj_new(object, NULL, 0x40000, 0x1000, 0,
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ 0x40000, 0x1000, false,
&xtensa->gpu_fw);
if (ret) {
release_firmware(fw);
return ret;
}
- nv_debug(xtensa, "Loading firmware to address: 0x%llx\n",
- xtensa->gpu_fw->addr);
-
+ nvkm_kmap(xtensa->gpu_fw);
for (i = 0; i < fw->size / 4; i++)
- nv_wo32(xtensa->gpu_fw, i * 4, *((u32 *)fw->data + i));
+ nvkm_wo32(xtensa->gpu_fw, i * 4, *((u32 *)fw->data + i));
+ nvkm_done(xtensa->gpu_fw);
release_firmware(fw);
}
- nv_wo32(xtensa, 0xd10, 0x1fffffff); /* ?? */
- nv_wo32(xtensa, 0xd08, 0x0fffffff); /* ?? */
+ addr = nvkm_memory_addr(xtensa->gpu_fw);
+ size = nvkm_memory_size(xtensa->gpu_fw);
- nv_wo32(xtensa, 0xd28, xtensa->unkd28); /* ?? */
- nv_wo32(xtensa, 0xc20, 0x3f); /* INTR */
- nv_wo32(xtensa, 0xd84, 0x3f); /* INTR_EN */
+ nvkm_wr32(device, base + 0xd10, 0x1fffffff); /* ?? */
+ nvkm_wr32(device, base + 0xd08, 0x0fffffff); /* ?? */
- nv_wo32(xtensa, 0xcc0, xtensa->gpu_fw->addr >> 8); /* XT_REGION_BASE */
- nv_wo32(xtensa, 0xcc4, 0x1c); /* XT_REGION_SETUP */
- nv_wo32(xtensa, 0xcc8, xtensa->gpu_fw->size >> 8); /* XT_REGION_LIMIT */
+ nvkm_wr32(device, base + 0xd28, xtensa->func->unkd28); /* ?? */
+ nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */
+ nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */
- tmp = nv_rd32(xtensa, 0x0);
- nv_wo32(xtensa, 0xde0, tmp); /* SCRATCH_H2X */
+ nvkm_wr32(device, base + 0xcc0, addr >> 8); /* XT_REGION_BASE */
+ nvkm_wr32(device, base + 0xcc4, 0x1c); /* XT_REGION_SETUP */
+ nvkm_wr32(device, base + 0xcc8, size >> 8); /* XT_REGION_LIMIT */
- nv_wo32(xtensa, 0xce8, 0xf); /* XT_REGION_SETUP */
+ tmp = nvkm_rd32(device, 0x0);
+ nvkm_wr32(device, base + 0xde0, tmp); /* SCRATCH_H2X */
- nv_wo32(xtensa, 0xc20, 0x3f); /* INTR */
- nv_wo32(xtensa, 0xd84, 0x3f); /* INTR_EN */
+ nvkm_wr32(device, base + 0xce8, 0xf); /* XT_REGION_SETUP */
+
+ nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */
+ nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */
return 0;
}
-int
-_nvkm_xtensa_fini(struct nvkm_object *object, bool suspend)
+static void *
+nvkm_xtensa_dtor(struct nvkm_engine *engine)
{
- struct nvkm_xtensa *xtensa = (void *)object;
+ return nvkm_xtensa(engine);
+}
- nv_wo32(xtensa, 0xd84, 0); /* INTR_EN */
- nv_wo32(xtensa, 0xd94, 0); /* FIFO_CTRL */
+static const struct nvkm_engine_func
+nvkm_xtensa = {
+ .dtor = nvkm_xtensa_dtor,
+ .init = nvkm_xtensa_init,
+ .fini = nvkm_xtensa_fini,
+ .intr = nvkm_xtensa_intr,
+ .fifo.sclass = nvkm_xtensa_oclass_get,
+ .cclass = &nvkm_xtensa_cclass,
+};
- if (!suspend)
- nvkm_gpuobj_ref(NULL, &xtensa->gpu_fw);
+int
+nvkm_xtensa_new_(const struct nvkm_xtensa_func *func,
+ struct nvkm_device *device, int index, bool enable,
+ u32 addr, struct nvkm_engine **pengine)
+{
+ struct nvkm_xtensa *xtensa;
+
+ if (!(xtensa = kzalloc(sizeof(*xtensa), GFP_KERNEL)))
+ return -ENOMEM;
+ xtensa->func = func;
+ xtensa->addr = addr;
+ *pengine = &xtensa->engine;
- return nvkm_engine_fini(&xtensa->base, suspend);
+ return nvkm_engine_ctor(&nvkm_xtensa, device, index, func->pmc_enable,
+ enable, &xtensa->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index a1bb3e48739c..ee2c38f50ef5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -13,6 +13,7 @@ include $(src)/nvkm/subdev/ltc/Kbuild
include $(src)/nvkm/subdev/mc/Kbuild
include $(src)/nvkm/subdev/mmu/Kbuild
include $(src)/nvkm/subdev/mxm/Kbuild
+include $(src)/nvkm/subdev/pci/Kbuild
include $(src)/nvkm/subdev/pmu/Kbuild
include $(src)/nvkm/subdev/therm/Kbuild
include $(src)/nvkm/subdev/timer/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
index 1ab554a0b5e0..1e138b337955 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -1,4 +1,5 @@
nvkm-y += nvkm/subdev/bar/base.o
nvkm-y += nvkm/subdev/bar/nv50.o
+nvkm-y += nvkm/subdev/bar/g84.o
nvkm-y += nvkm/subdev/bar/gf100.o
nvkm-y += nvkm/subdev/bar/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index 3502d00122ef..a9433ad45b1e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -23,122 +23,61 @@
*/
#include "priv.h"
-#include <core/device.h>
-#include <subdev/fb.h>
-#include <subdev/mmu.h>
-
-struct nvkm_barobj {
- struct nvkm_object base;
- struct nvkm_vma vma;
- void __iomem *iomem;
-};
-
-static int
-nvkm_barobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+void
+nvkm_bar_flush(struct nvkm_bar *bar)
{
- struct nvkm_device *device = nv_device(parent);
- struct nvkm_bar *bar = nvkm_bar(device);
- struct nvkm_mem *mem = data;
- struct nvkm_barobj *barobj;
- int ret;
-
- ret = nvkm_object_create(parent, engine, oclass, 0, &barobj);
- *pobject = nv_object(barobj);
- if (ret)
- return ret;
-
- ret = bar->kmap(bar, mem, NV_MEM_ACCESS_RW, &barobj->vma);
- if (ret)
- return ret;
-
- barobj->iomem = ioremap(nv_device_resource_start(device, 3) +
- (u32)barobj->vma.offset, mem->size << 12);
- if (!barobj->iomem) {
- nv_warn(bar, "PRAMIN ioremap failed\n");
- return -ENOMEM;
- }
-
- return 0;
+ if (bar && bar->func->flush)
+ bar->func->flush(bar);
}
-static void
-nvkm_barobj_dtor(struct nvkm_object *object)
+struct nvkm_vm *
+nvkm_bar_kmap(struct nvkm_bar *bar)
{
- struct nvkm_bar *bar = nvkm_bar(object);
- struct nvkm_barobj *barobj = (void *)object;
- if (barobj->vma.node) {
- if (barobj->iomem)
- iounmap(barobj->iomem);
- bar->unmap(bar, &barobj->vma);
- }
- nvkm_object_destroy(&barobj->base);
+ /* disallow kmap() until after vm has been bootstrapped */
+ if (bar && bar->func->kmap && bar->subdev.oneinit)
+ return bar->func->kmap(bar);
+ return NULL;
}
-static u32
-nvkm_barobj_rd32(struct nvkm_object *object, u64 addr)
+int
+nvkm_bar_umap(struct nvkm_bar *bar, u64 size, int type, struct nvkm_vma *vma)
{
- struct nvkm_barobj *barobj = (void *)object;
- return ioread32_native(barobj->iomem + addr);
+ return bar->func->umap(bar, size, type, vma);
}
-static void
-nvkm_barobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
+static int
+nvkm_bar_oneinit(struct nvkm_subdev *subdev)
{
- struct nvkm_barobj *barobj = (void *)object;
- iowrite32_native(data, barobj->iomem + addr);
+ struct nvkm_bar *bar = nvkm_bar(subdev);
+ return bar->func->oneinit(bar);
}
-static struct nvkm_oclass
-nvkm_barobj_oclass = {
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nvkm_barobj_ctor,
- .dtor = nvkm_barobj_dtor,
- .init = nvkm_object_init,
- .fini = nvkm_object_fini,
- .rd32 = nvkm_barobj_rd32,
- .wr32 = nvkm_barobj_wr32,
- },
-};
-
-int
-nvkm_bar_alloc(struct nvkm_bar *bar, struct nvkm_object *parent,
- struct nvkm_mem *mem, struct nvkm_object **pobject)
+static int
+nvkm_bar_init(struct nvkm_subdev *subdev)
{
- struct nvkm_object *gpuobj;
- int ret = nvkm_object_ctor(parent, &parent->engine->subdev.object,
- &nvkm_barobj_oclass, mem, 0, &gpuobj);
- if (ret == 0)
- *pobject = gpuobj;
- return ret;
+ struct nvkm_bar *bar = nvkm_bar(subdev);
+ return bar->func->init(bar);
}
-int
-nvkm_bar_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, int length, void **pobject)
+static void *
+nvkm_bar_dtor(struct nvkm_subdev *subdev)
{
- struct nvkm_bar *bar;
- int ret;
-
- ret = nvkm_subdev_create_(parent, engine, oclass, 0, "BARCTL",
- "bar", length, pobject);
- bar = *pobject;
- if (ret)
- return ret;
-
- return 0;
+ struct nvkm_bar *bar = nvkm_bar(subdev);
+ return bar->func->dtor(bar);
}
-void
-nvkm_bar_destroy(struct nvkm_bar *bar)
-{
- nvkm_subdev_destroy(&bar->base);
-}
+static const struct nvkm_subdev_func
+nvkm_bar = {
+ .dtor = nvkm_bar_dtor,
+ .oneinit = nvkm_bar_oneinit,
+ .init = nvkm_bar_init,
+};
void
-_nvkm_bar_dtor(struct nvkm_object *object)
+nvkm_bar_ctor(const struct nvkm_bar_func *func, struct nvkm_device *device,
+ int index, struct nvkm_bar *bar)
{
- struct nvkm_bar *bar = (void *)object;
- nvkm_bar_destroy(bar);
+ nvkm_subdev_ctor(&nvkm_bar, device, index, 0, &bar->subdev);
+ bar->func = func;
+ spin_lock_init(&bar->lock);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
new file mode 100644
index 000000000000..ef717136c838
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+
+#include <subdev/timer.h>
+
+void
+g84_bar_flush(struct nvkm_bar *bar)
+{
+ struct nvkm_device *device = bar->subdev.device;
+ unsigned long flags;
+ spin_lock_irqsave(&bar->lock, flags);
+ nvkm_wr32(device, 0x070000, 0x00000001);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x070000) & 0x00000002))
+ break;
+ );
+ spin_unlock_irqrestore(&bar->lock, flags);
+}
+
+static const struct nvkm_bar_func
+g84_bar_func = {
+ .dtor = nv50_bar_dtor,
+ .oneinit = nv50_bar_oneinit,
+ .init = nv50_bar_init,
+ .kmap = nv50_bar_kmap,
+ .umap = nv50_bar_umap,
+ .flush = g84_bar_flush,
+};
+
+int
+g84_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+ return nv50_bar_new_(&g84_bar_func, device, index, 0x200, pbar);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index 12a1aebd9a96..c794b2c2d21e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -21,101 +21,60 @@
*
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "gf100.h"
-#include <core/device.h>
#include <core/gpuobj.h>
#include <subdev/fb.h>
#include <subdev/mmu.h>
-struct gf100_bar_priv_vm {
- struct nvkm_gpuobj *mem;
- struct nvkm_gpuobj *pgd;
- struct nvkm_vm *vm;
-};
-
-struct gf100_bar_priv {
- struct nvkm_bar base;
- spinlock_t lock;
- struct gf100_bar_priv_vm bar[2];
-};
-
-static int
-gf100_bar_kmap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
- struct nvkm_vma *vma)
-{
- struct gf100_bar_priv *priv = (void *)bar;
- int ret;
-
- ret = nvkm_vm_get(priv->bar[0].vm, mem->size << 12, 12, flags, vma);
- if (ret)
- return ret;
-
- nvkm_vm_map(vma, mem);
- return 0;
-}
-
-static int
-gf100_bar_umap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
- struct nvkm_vma *vma)
+static struct nvkm_vm *
+gf100_bar_kmap(struct nvkm_bar *base)
{
- struct gf100_bar_priv *priv = (void *)bar;
- int ret;
-
- ret = nvkm_vm_get(priv->bar[1].vm, mem->size << 12,
- mem->page_shift, flags, vma);
- if (ret)
- return ret;
-
- nvkm_vm_map(vma, mem);
- return 0;
+ return gf100_bar(base)->bar[0].vm;
}
-static void
-gf100_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma)
+int
+gf100_bar_umap(struct nvkm_bar *base, u64 size, int type, struct nvkm_vma *vma)
{
- nvkm_vm_unmap(vma);
- nvkm_vm_put(vma);
+ struct gf100_bar *bar = gf100_bar(base);
+ return nvkm_vm_get(bar->bar[1].vm, size, type, NV_MEM_ACCESS_RW, vma);
}
static int
-gf100_bar_ctor_vm(struct gf100_bar_priv *priv, struct gf100_bar_priv_vm *bar_vm,
- int bar_nr)
+gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm,
+ struct lock_class_key *key, int bar_nr)
{
- struct nvkm_device *device = nv_device(&priv->base);
+ struct nvkm_device *device = bar->base.subdev.device;
struct nvkm_vm *vm;
resource_size_t bar_len;
int ret;
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, false,
&bar_vm->mem);
if (ret)
return ret;
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
- &bar_vm->pgd);
+ ret = nvkm_gpuobj_new(device, 0x8000, 0, false, NULL, &bar_vm->pgd);
if (ret)
return ret;
- bar_len = nv_device_resource_len(device, bar_nr);
+ bar_len = device->func->resource_size(device, bar_nr);
- ret = nvkm_vm_new(device, 0, bar_len, 0, &vm);
+ ret = nvkm_vm_new(device, 0, bar_len, 0, key, &vm);
if (ret)
return ret;
- atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
+ atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
/*
* Bootstrap page table lookup.
*/
if (bar_nr == 3) {
- ret = nvkm_gpuobj_new(nv_object(priv), NULL,
- (bar_len >> 12) * 8, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC,
- &vm->pgt[0].obj[0]);
- vm->pgt[0].refcount[0] = 1;
- if (ret)
+ ret = nvkm_vm_boot(vm, bar_len);
+ if (ret) {
+ nvkm_vm_ref(NULL, &vm, NULL);
return ret;
+ }
}
ret = nvkm_vm_ref(vm, &bar_vm->vm, bar_vm->pgd);
@@ -123,97 +82,101 @@ gf100_bar_ctor_vm(struct gf100_bar_priv *priv, struct gf100_bar_priv_vm *bar_vm,
if (ret)
return ret;
- nv_wo32(bar_vm->mem, 0x0200, lower_32_bits(bar_vm->pgd->addr));
- nv_wo32(bar_vm->mem, 0x0204, upper_32_bits(bar_vm->pgd->addr));
- nv_wo32(bar_vm->mem, 0x0208, lower_32_bits(bar_len - 1));
- nv_wo32(bar_vm->mem, 0x020c, upper_32_bits(bar_len - 1));
+ nvkm_kmap(bar_vm->mem);
+ nvkm_wo32(bar_vm->mem, 0x0200, lower_32_bits(bar_vm->pgd->addr));
+ nvkm_wo32(bar_vm->mem, 0x0204, upper_32_bits(bar_vm->pgd->addr));
+ nvkm_wo32(bar_vm->mem, 0x0208, lower_32_bits(bar_len - 1));
+ nvkm_wo32(bar_vm->mem, 0x020c, upper_32_bits(bar_len - 1));
+ nvkm_done(bar_vm->mem);
return 0;
}
int
-gf100_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+gf100_bar_oneinit(struct nvkm_bar *base)
{
- struct nvkm_device *device = nv_device(parent);
- struct gf100_bar_priv *priv;
- bool has_bar3 = nv_device_resource_len(device, 3) != 0;
+ static struct lock_class_key bar1_lock;
+ static struct lock_class_key bar3_lock;
+ struct gf100_bar *bar = gf100_bar(base);
int ret;
- ret = nvkm_bar_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
/* BAR3 */
- if (has_bar3) {
- ret = gf100_bar_ctor_vm(priv, &priv->bar[0], 3);
+ if (bar->base.func->kmap) {
+ ret = gf100_bar_ctor_vm(bar, &bar->bar[0], &bar3_lock, 3);
if (ret)
return ret;
}
/* BAR1 */
- ret = gf100_bar_ctor_vm(priv, &priv->bar[1], 1);
+ ret = gf100_bar_ctor_vm(bar, &bar->bar[1], &bar1_lock, 1);
if (ret)
return ret;
- if (has_bar3) {
- priv->base.alloc = nvkm_bar_alloc;
- priv->base.kmap = gf100_bar_kmap;
- }
- priv->base.umap = gf100_bar_umap;
- priv->base.unmap = gf100_bar_unmap;
- priv->base.flush = g84_bar_flush;
- spin_lock_init(&priv->lock);
return 0;
}
-void
-gf100_bar_dtor(struct nvkm_object *object)
+int
+gf100_bar_init(struct nvkm_bar *base)
{
- struct gf100_bar_priv *priv = (void *)object;
+ struct gf100_bar *bar = gf100_bar(base);
+ struct nvkm_device *device = bar->base.subdev.device;
+ u32 addr;
+
+ nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
+ nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
- nvkm_vm_ref(NULL, &priv->bar[1].vm, priv->bar[1].pgd);
- nvkm_gpuobj_ref(NULL, &priv->bar[1].pgd);
- nvkm_gpuobj_ref(NULL, &priv->bar[1].mem);
+ addr = nvkm_memory_addr(bar->bar[1].mem) >> 12;
+ nvkm_wr32(device, 0x001704, 0x80000000 | addr);
- if (priv->bar[0].vm) {
- nvkm_gpuobj_ref(NULL, &priv->bar[0].vm->pgt[0].obj[0]);
- nvkm_vm_ref(NULL, &priv->bar[0].vm, priv->bar[0].pgd);
+ if (bar->bar[0].mem) {
+ addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
+ nvkm_wr32(device, 0x001714, 0xc0000000 | addr);
}
- nvkm_gpuobj_ref(NULL, &priv->bar[0].pgd);
- nvkm_gpuobj_ref(NULL, &priv->bar[0].mem);
- nvkm_bar_destroy(&priv->base);
+ return 0;
}
-int
-gf100_bar_init(struct nvkm_object *object)
+void *
+gf100_bar_dtor(struct nvkm_bar *base)
{
- struct gf100_bar_priv *priv = (void *)object;
- int ret;
+ struct gf100_bar *bar = gf100_bar(base);
- ret = nvkm_bar_init(&priv->base);
- if (ret)
- return ret;
+ nvkm_vm_ref(NULL, &bar->bar[1].vm, bar->bar[1].pgd);
+ nvkm_gpuobj_del(&bar->bar[1].pgd);
+ nvkm_memory_del(&bar->bar[1].mem);
- nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
- nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
+ if (bar->bar[0].vm) {
+ nvkm_memory_del(&bar->bar[0].vm->pgt[0].mem[0]);
+ nvkm_vm_ref(NULL, &bar->bar[0].vm, bar->bar[0].pgd);
+ }
+ nvkm_gpuobj_del(&bar->bar[0].pgd);
+ nvkm_memory_del(&bar->bar[0].mem);
+ return bar;
+}
- nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
- if (priv->bar[0].mem)
- nv_wr32(priv, 0x001714,
- 0xc0000000 | priv->bar[0].mem->addr >> 12);
+int
+gf100_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
+ int index, struct nvkm_bar **pbar)
+{
+ struct gf100_bar *bar;
+ if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_bar_ctor(func, device, index, &bar->base);
+ *pbar = &bar->base;
return 0;
}
-struct nvkm_oclass
-gf100_bar_oclass = {
- .handle = NV_SUBDEV(BAR, 0xc0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_bar_ctor,
- .dtor = gf100_bar_dtor,
- .init = gf100_bar_init,
- .fini = _nvkm_bar_fini,
- },
+static const struct nvkm_bar_func
+gf100_bar_func = {
+ .dtor = gf100_bar_dtor,
+ .oneinit = gf100_bar_oneinit,
+ .init = gf100_bar_init,
+ .kmap = gf100_bar_kmap,
+ .umap = gf100_bar_umap,
+ .flush = g84_bar_flush,
};
+
+int
+gf100_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+ return gf100_bar_new_(&gf100_bar_func, device, index, pbar);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
new file mode 100644
index 000000000000..f7dea69640d8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
@@ -0,0 +1,23 @@
+#ifndef __GF100_BAR_H__
+#define __GF100_BAR_H__
+#define gf100_bar(p) container_of((p), struct gf100_bar, base)
+#include "priv.h"
+
+struct gf100_bar_vm {
+ struct nvkm_memory *mem;
+ struct nvkm_gpuobj *pgd;
+ struct nvkm_vm *vm;
+};
+
+struct gf100_bar {
+ struct nvkm_bar base;
+ struct gf100_bar_vm bar[2];
+};
+
+int gf100_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
+ int, struct nvkm_bar **);
+void *gf100_bar_dtor(struct nvkm_bar *);
+int gf100_bar_oneinit(struct nvkm_bar *);
+int gf100_bar_init(struct nvkm_bar *);
+int gf100_bar_umap(struct nvkm_bar *, u64, int, struct nvkm_vma *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
index 148f739a276e..9232fab4274c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
@@ -19,32 +19,22 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
+#include "gf100.h"
+
+static const struct nvkm_bar_func
+gk20a_bar_func = {
+ .dtor = gf100_bar_dtor,
+ .oneinit = gf100_bar_oneinit,
+ .init = gf100_bar_init,
+ .umap = gf100_bar_umap,
+ .flush = g84_bar_flush,
+};
int
-gk20a_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+gk20a_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
{
- struct nvkm_bar *bar;
- int ret;
-
- ret = gf100_bar_ctor(parent, engine, oclass, data, size, pobject);
- if (ret)
- return ret;
-
- bar = (struct nvkm_bar *)*pobject;
- bar->iomap_uncached = true;
- return 0;
+ int ret = gf100_bar_new_(&gk20a_bar_func, device, index, pbar);
+ if (ret == 0)
+ (*pbar)->iomap_uncached = true;
+ return ret;
}
-
-struct nvkm_oclass
-gk20a_bar_oclass = {
- .handle = NV_SUBDEV(BAR, 0xea),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk20a_bar_ctor,
- .dtor = gf100_bar_dtor,
- .init = gf100_bar_init,
- .fini = _nvkm_bar_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 8548adb91dcc..370dcd8ff7b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -21,251 +21,196 @@
*
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "nv50.h"
-#include <core/device.h>
#include <core/gpuobj.h>
#include <subdev/fb.h>
#include <subdev/mmu.h>
#include <subdev/timer.h>
-struct nv50_bar_priv {
- struct nvkm_bar base;
- spinlock_t lock;
- struct nvkm_gpuobj *mem;
- struct nvkm_gpuobj *pad;
- struct nvkm_gpuobj *pgd;
- struct nvkm_vm *bar1_vm;
- struct nvkm_gpuobj *bar1;
- struct nvkm_vm *bar3_vm;
- struct nvkm_gpuobj *bar3;
-};
-
-static int
-nv50_bar_kmap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
- struct nvkm_vma *vma)
-{
- struct nv50_bar_priv *priv = (void *)bar;
- int ret;
-
- ret = nvkm_vm_get(priv->bar3_vm, mem->size << 12, 12, flags, vma);
- if (ret)
- return ret;
-
- nvkm_vm_map(vma, mem);
- return 0;
-}
-
-static int
-nv50_bar_umap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
- struct nvkm_vma *vma)
+struct nvkm_vm *
+nv50_bar_kmap(struct nvkm_bar *base)
{
- struct nv50_bar_priv *priv = (void *)bar;
- int ret;
-
- ret = nvkm_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma);
- if (ret)
- return ret;
-
- nvkm_vm_map(vma, mem);
- return 0;
+ return nv50_bar(base)->bar3_vm;
}
-static void
-nv50_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma)
+int
+nv50_bar_umap(struct nvkm_bar *base, u64 size, int type, struct nvkm_vma *vma)
{
- nvkm_vm_unmap(vma);
- nvkm_vm_put(vma);
+ struct nv50_bar *bar = nv50_bar(base);
+ return nvkm_vm_get(bar->bar1_vm, size, type, NV_MEM_ACCESS_RW, vma);
}
static void
-nv50_bar_flush(struct nvkm_bar *bar)
-{
- struct nv50_bar_priv *priv = (void *)bar;
- unsigned long flags;
- spin_lock_irqsave(&priv->lock, flags);
- nv_wr32(priv, 0x00330c, 0x00000001);
- if (!nv_wait(priv, 0x00330c, 0x00000002, 0x00000000))
- nv_warn(priv, "flush timeout\n");
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-void
-g84_bar_flush(struct nvkm_bar *bar)
+nv50_bar_flush(struct nvkm_bar *base)
{
- struct nv50_bar_priv *priv = (void *)bar;
+ struct nv50_bar *bar = nv50_bar(base);
+ struct nvkm_device *device = bar->base.subdev.device;
unsigned long flags;
- spin_lock_irqsave(&priv->lock, flags);
- nv_wr32(bar, 0x070000, 0x00000001);
- if (!nv_wait(priv, 0x070000, 0x00000002, 0x00000000))
- nv_warn(priv, "flush timeout\n");
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_lock_irqsave(&bar->base.lock, flags);
+ nvkm_wr32(device, 0x00330c, 0x00000001);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x00330c) & 0x00000002))
+ break;
+ );
+ spin_unlock_irqrestore(&bar->base.lock, flags);
}
-static int
-nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv50_bar_oneinit(struct nvkm_bar *base)
{
- struct nvkm_device *device = nv_device(parent);
- struct nvkm_object *heap;
+ struct nv50_bar *bar = nv50_bar(base);
+ struct nvkm_device *device = bar->base.subdev.device;
+ static struct lock_class_key bar1_lock;
+ static struct lock_class_key bar3_lock;
struct nvkm_vm *vm;
- struct nv50_bar_priv *priv;
u64 start, limit;
int ret;
- ret = nvkm_bar_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
+ ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem);
if (ret)
return ret;
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
- NVOBJ_FLAG_HEAP, &priv->mem);
- heap = nv_object(priv->mem);
+ ret = nvkm_gpuobj_new(device, bar->pgd_addr, 0, false, bar->mem,
+ &bar->pad);
if (ret)
return ret;
- ret = nvkm_gpuobj_new(nv_object(priv), heap,
- (device->chipset == 0x50) ? 0x1400 : 0x0200,
- 0, 0, &priv->pad);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(priv), heap, 0x4000, 0, 0, &priv->pgd);
+ ret = nvkm_gpuobj_new(device, 0x4000, 0, false, bar->mem, &bar->pgd);
if (ret)
return ret;
/* BAR3 */
start = 0x0100000000ULL;
- limit = start + nv_device_resource_len(device, 3);
+ limit = start + device->func->resource_size(device, 3);
- ret = nvkm_vm_new(device, start, limit, start, &vm);
+ ret = nvkm_vm_new(device, start, limit, start, &bar3_lock, &vm);
if (ret)
return ret;
- atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
+ atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
- ret = nvkm_gpuobj_new(nv_object(priv), heap,
- ((limit-- - start) >> 12) * 8, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]);
- vm->pgt[0].refcount[0] = 1;
+ ret = nvkm_vm_boot(vm, limit-- - start);
if (ret)
return ret;
- ret = nvkm_vm_ref(vm, &priv->bar3_vm, priv->pgd);
+ ret = nvkm_vm_ref(vm, &bar->bar3_vm, bar->pgd);
nvkm_vm_ref(NULL, &vm, NULL);
if (ret)
return ret;
- ret = nvkm_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar3);
+ ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar3);
if (ret)
return ret;
- nv_wo32(priv->bar3, 0x00, 0x7fc00000);
- nv_wo32(priv->bar3, 0x04, lower_32_bits(limit));
- nv_wo32(priv->bar3, 0x08, lower_32_bits(start));
- nv_wo32(priv->bar3, 0x0c, upper_32_bits(limit) << 24 |
- upper_32_bits(start));
- nv_wo32(priv->bar3, 0x10, 0x00000000);
- nv_wo32(priv->bar3, 0x14, 0x00000000);
+ nvkm_kmap(bar->bar3);
+ nvkm_wo32(bar->bar3, 0x00, 0x7fc00000);
+ nvkm_wo32(bar->bar3, 0x04, lower_32_bits(limit));
+ nvkm_wo32(bar->bar3, 0x08, lower_32_bits(start));
+ nvkm_wo32(bar->bar3, 0x0c, upper_32_bits(limit) << 24 |
+ upper_32_bits(start));
+ nvkm_wo32(bar->bar3, 0x10, 0x00000000);
+ nvkm_wo32(bar->bar3, 0x14, 0x00000000);
+ nvkm_done(bar->bar3);
/* BAR1 */
start = 0x0000000000ULL;
- limit = start + nv_device_resource_len(device, 1);
+ limit = start + device->func->resource_size(device, 1);
- ret = nvkm_vm_new(device, start, limit--, start, &vm);
+ ret = nvkm_vm_new(device, start, limit--, start, &bar1_lock, &vm);
if (ret)
return ret;
- atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
+ atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
- ret = nvkm_vm_ref(vm, &priv->bar1_vm, priv->pgd);
+ ret = nvkm_vm_ref(vm, &bar->bar1_vm, bar->pgd);
nvkm_vm_ref(NULL, &vm, NULL);
if (ret)
return ret;
- ret = nvkm_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar1);
+ ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar1);
if (ret)
return ret;
- nv_wo32(priv->bar1, 0x00, 0x7fc00000);
- nv_wo32(priv->bar1, 0x04, lower_32_bits(limit));
- nv_wo32(priv->bar1, 0x08, lower_32_bits(start));
- nv_wo32(priv->bar1, 0x0c, upper_32_bits(limit) << 24 |
- upper_32_bits(start));
- nv_wo32(priv->bar1, 0x10, 0x00000000);
- nv_wo32(priv->bar1, 0x14, 0x00000000);
-
- priv->base.alloc = nvkm_bar_alloc;
- priv->base.kmap = nv50_bar_kmap;
- priv->base.umap = nv50_bar_umap;
- priv->base.unmap = nv50_bar_unmap;
- if (device->chipset == 0x50)
- priv->base.flush = nv50_bar_flush;
- else
- priv->base.flush = g84_bar_flush;
- spin_lock_init(&priv->lock);
+ nvkm_kmap(bar->bar1);
+ nvkm_wo32(bar->bar1, 0x00, 0x7fc00000);
+ nvkm_wo32(bar->bar1, 0x04, lower_32_bits(limit));
+ nvkm_wo32(bar->bar1, 0x08, lower_32_bits(start));
+ nvkm_wo32(bar->bar1, 0x0c, upper_32_bits(limit) << 24 |
+ upper_32_bits(start));
+ nvkm_wo32(bar->bar1, 0x10, 0x00000000);
+ nvkm_wo32(bar->bar1, 0x14, 0x00000000);
+ nvkm_done(bar->bar1);
return 0;
}
-static void
-nv50_bar_dtor(struct nvkm_object *object)
+int
+nv50_bar_init(struct nvkm_bar *base)
{
- struct nv50_bar_priv *priv = (void *)object;
- nvkm_gpuobj_ref(NULL, &priv->bar1);
- nvkm_vm_ref(NULL, &priv->bar1_vm, priv->pgd);
- nvkm_gpuobj_ref(NULL, &priv->bar3);
- if (priv->bar3_vm) {
- nvkm_gpuobj_ref(NULL, &priv->bar3_vm->pgt[0].obj[0]);
- nvkm_vm_ref(NULL, &priv->bar3_vm, priv->pgd);
- }
- nvkm_gpuobj_ref(NULL, &priv->pgd);
- nvkm_gpuobj_ref(NULL, &priv->pad);
- nvkm_gpuobj_ref(NULL, &priv->mem);
- nvkm_bar_destroy(&priv->base);
-}
-
-static int
-nv50_bar_init(struct nvkm_object *object)
-{
- struct nv50_bar_priv *priv = (void *)object;
- int ret, i;
-
- ret = nvkm_bar_init(&priv->base);
- if (ret)
- return ret;
-
- nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
- nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
- nv_wr32(priv, 0x100c80, 0x00060001);
- if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000)) {
- nv_error(priv, "vm flush timeout\n");
+ struct nv50_bar *bar = nv50_bar(base);
+ struct nvkm_device *device = bar->base.subdev.device;
+ int i;
+
+ nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
+ nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
+ nvkm_wr32(device, 0x100c80, 0x00060001);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
+ break;
+ ) < 0)
return -EBUSY;
- }
- nv_wr32(priv, 0x001704, 0x00000000 | priv->mem->addr >> 12);
- nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12);
- nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4);
- nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4);
+ nvkm_wr32(device, 0x001704, 0x00000000 | bar->mem->addr >> 12);
+ nvkm_wr32(device, 0x001704, 0x40000000 | bar->mem->addr >> 12);
+ nvkm_wr32(device, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4);
+ nvkm_wr32(device, 0x00170c, 0x80000000 | bar->bar3->node->offset >> 4);
for (i = 0; i < 8; i++)
- nv_wr32(priv, 0x001900 + (i * 4), 0x00000000);
+ nvkm_wr32(device, 0x001900 + (i * 4), 0x00000000);
return 0;
}
-static int
-nv50_bar_fini(struct nvkm_object *object, bool suspend)
+void *
+nv50_bar_dtor(struct nvkm_bar *base)
{
- struct nv50_bar_priv *priv = (void *)object;
- return nvkm_bar_fini(&priv->base, suspend);
+ struct nv50_bar *bar = nv50_bar(base);
+ nvkm_gpuobj_del(&bar->bar1);
+ nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd);
+ nvkm_gpuobj_del(&bar->bar3);
+ if (bar->bar3_vm) {
+ nvkm_memory_del(&bar->bar3_vm->pgt[0].mem[0]);
+ nvkm_vm_ref(NULL, &bar->bar3_vm, bar->pgd);
+ }
+ nvkm_gpuobj_del(&bar->pgd);
+ nvkm_gpuobj_del(&bar->pad);
+ nvkm_gpuobj_del(&bar->mem);
+ return bar;
}
-struct nvkm_oclass
-nv50_bar_oclass = {
- .handle = NV_SUBDEV(BAR, 0x50),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_bar_ctor,
- .dtor = nv50_bar_dtor,
- .init = nv50_bar_init,
- .fini = nv50_bar_fini,
- },
+int
+nv50_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
+ int index, u32 pgd_addr, struct nvkm_bar **pbar)
+{
+ struct nv50_bar *bar;
+ if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_bar_ctor(func, device, index, &bar->base);
+ bar->pgd_addr = pgd_addr;
+ *pbar = &bar->base;
+ return 0;
+}
+
+static const struct nvkm_bar_func
+nv50_bar_func = {
+ .dtor = nv50_bar_dtor,
+ .oneinit = nv50_bar_oneinit,
+ .init = nv50_bar_init,
+ .kmap = nv50_bar_kmap,
+ .umap = nv50_bar_umap,
+ .flush = nv50_bar_flush,
};
+
+int
+nv50_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+ return nv50_bar_new_(&nv50_bar_func, device, index, 0x1400, pbar);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
new file mode 100644
index 000000000000..1eb764f22a49
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
@@ -0,0 +1,26 @@
+#ifndef __NV50_BAR_H__
+#define __NV50_BAR_H__
+#define nv50_bar(p) container_of((p), struct nv50_bar, base)
+#include "priv.h"
+
+struct nv50_bar {
+ struct nvkm_bar base;
+ u32 pgd_addr;
+ struct nvkm_gpuobj *mem;
+ struct nvkm_gpuobj *pad;
+ struct nvkm_gpuobj *pgd;
+ struct nvkm_vm *bar1_vm;
+ struct nvkm_gpuobj *bar1;
+ struct nvkm_vm *bar3_vm;
+ struct nvkm_gpuobj *bar3;
+};
+
+int nv50_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
+ int, u32 pgd_addr, struct nvkm_bar **);
+void *nv50_bar_dtor(struct nvkm_bar *);
+int nv50_bar_oneinit(struct nvkm_bar *);
+int nv50_bar_init(struct nvkm_bar *);
+struct nvkm_vm *nv50_bar_kmap(struct nvkm_bar *);
+int nv50_bar_umap(struct nvkm_bar *, u64, int, struct nvkm_vma *);
+void nv50_bar_unmap(struct nvkm_bar *, struct nvkm_vma *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
index aa85f61b48c2..d834ef20db5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
@@ -1,30 +1,19 @@
#ifndef __NVKM_BAR_PRIV_H__
#define __NVKM_BAR_PRIV_H__
+#define nvkm_bar(p) container_of((p), struct nvkm_bar, subdev)
#include <subdev/bar.h>
-#define nvkm_bar_create(p,e,o,d) \
- nvkm_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_bar_init(p) \
- nvkm_subdev_init(&(p)->base)
-#define nvkm_bar_fini(p,s) \
- nvkm_subdev_fini(&(p)->base, (s))
+void nvkm_bar_ctor(const struct nvkm_bar_func *, struct nvkm_device *,
+ int, struct nvkm_bar *);
-int nvkm_bar_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, int, void **);
-void nvkm_bar_destroy(struct nvkm_bar *);
-
-void _nvkm_bar_dtor(struct nvkm_object *);
-#define _nvkm_bar_init _nvkm_subdev_init
-#define _nvkm_bar_fini _nvkm_subdev_fini
-
-int nvkm_bar_alloc(struct nvkm_bar *, struct nvkm_object *,
- struct nvkm_mem *, struct nvkm_object **);
+struct nvkm_bar_func {
+ void *(*dtor)(struct nvkm_bar *);
+ int (*oneinit)(struct nvkm_bar *);
+ int (*init)(struct nvkm_bar *);
+ struct nvkm_vm *(*kmap)(struct nvkm_bar *);
+ int (*umap)(struct nvkm_bar *, u64 size, int type, struct nvkm_vma *);
+ void (*flush)(struct nvkm_bar *);
+};
void g84_bar_flush(struct nvkm_bar *);
-
-int gf100_bar_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-void gf100_bar_dtor(struct nvkm_object *);
-int gf100_bar_init(struct nvkm_object *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c
index 08eb03fbc203..43f0ba1fba7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c
@@ -33,14 +33,14 @@ nvbios_M0203Te(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
if (!bit_entry(bios, 'M', &bit_M)) {
if (bit_M.version == 2 && bit_M.length > 0x04)
- data = nv_ro16(bios, bit_M.offset + 0x03);
+ data = nvbios_rd16(bios, bit_M.offset + 0x03);
if (data) {
- *ver = nv_ro08(bios, data + 0x00);
+ *ver = nvbios_rd08(bios, data + 0x00);
switch (*ver) {
case 0x10:
- *hdr = nv_ro08(bios, data + 0x01);
- *len = nv_ro08(bios, data + 0x02);
- *cnt = nv_ro08(bios, data + 0x03);
+ *hdr = nvbios_rd08(bios, data + 0x01);
+ *len = nvbios_rd08(bios, data + 0x02);
+ *cnt = nvbios_rd08(bios, data + 0x03);
return data;
default:
break;
@@ -59,8 +59,8 @@ nvbios_M0203Tp(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x10:
- info->type = nv_ro08(bios, data + 0x04);
- info->pointer = nv_ro16(bios, data + 0x05);
+ info->type = nvbios_rd08(bios, data + 0x04);
+ info->pointer = nvbios_rd16(bios, data + 0x05);
break;
default:
break;
@@ -89,9 +89,9 @@ nvbios_M0203Ep(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x10:
- info->type = (nv_ro08(bios, data + 0x00) & 0x0f) >> 0;
- info->strap = (nv_ro08(bios, data + 0x00) & 0xf0) >> 4;
- info->group = (nv_ro08(bios, data + 0x01) & 0x0f) >> 0;
+ info->type = (nvbios_rd08(bios, data + 0x00) & 0x0f) >> 0;
+ info->strap = (nvbios_rd08(bios, data + 0x00) & 0xf0) >> 4;
+ info->group = (nvbios_rd08(bios, data + 0x01) & 0x0f) >> 0;
return data;
default:
break;
@@ -103,12 +103,13 @@ u32
nvbios_M0203Em(struct nvkm_bios *bios, u8 ramcfg, u8 *ver, u8 *hdr,
struct nvbios_M0203E *info)
{
+ struct nvkm_subdev *subdev = &bios->subdev;
struct nvbios_M0203T M0203T;
u8 cnt, len, idx = 0xff;
u32 data;
if (!nvbios_M0203Tp(bios, ver, hdr, &cnt, &len, &M0203T)) {
- nv_warn(bios, "M0203T not found\n");
+ nvkm_warn(subdev, "M0203T not found\n");
return 0x00000000;
}
@@ -119,7 +120,7 @@ nvbios_M0203Em(struct nvkm_bios *bios, u8 ramcfg, u8 *ver, u8 *hdr,
continue;
return data;
default:
- nv_warn(bios, "M0203T type %02x\n", M0203T.type);
+ nvkm_warn(subdev, "M0203T type %02x\n", M0203T.type);
return 0x00000000;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c
index e1a8ad5f3066..293a6af1b1d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c
@@ -34,16 +34,16 @@ nvbios_M0205Te(struct nvkm_bios *bios,
if (!bit_entry(bios, 'M', &bit_M)) {
if (bit_M.version == 2 && bit_M.length > 0x08)
- data = nv_ro32(bios, bit_M.offset + 0x05);
+ data = nvbios_rd32(bios, bit_M.offset + 0x05);
if (data) {
- *ver = nv_ro08(bios, data + 0x00);
+ *ver = nvbios_rd08(bios, data + 0x00);
switch (*ver) {
case 0x10:
- *hdr = nv_ro08(bios, data + 0x01);
- *len = nv_ro08(bios, data + 0x02);
- *ssz = nv_ro08(bios, data + 0x03);
- *snr = nv_ro08(bios, data + 0x04);
- *cnt = nv_ro08(bios, data + 0x05);
+ *hdr = nvbios_rd08(bios, data + 0x01);
+ *len = nvbios_rd08(bios, data + 0x02);
+ *ssz = nvbios_rd08(bios, data + 0x03);
+ *snr = nvbios_rd08(bios, data + 0x04);
+ *cnt = nvbios_rd08(bios, data + 0x05);
return data;
default:
break;
@@ -63,7 +63,7 @@ nvbios_M0205Tp(struct nvkm_bios *bios,
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x10:
- info->freq = nv_ro16(bios, data + 0x06);
+ info->freq = nvbios_rd16(bios, data + 0x06);
break;
default:
break;
@@ -96,7 +96,7 @@ nvbios_M0205Ep(struct nvkm_bios *bios, int idx,
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x10:
- info->type = nv_ro08(bios, data + 0x00) & 0x0f;
+ info->type = nvbios_rd08(bios, data + 0x00) & 0x0f;
return data;
default:
break;
@@ -126,7 +126,7 @@ nvbios_M0205Sp(struct nvkm_bios *bios, int ent, int idx, u8 *ver, u8 *hdr,
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x10:
- info->data = nv_ro08(bios, data + 0x00);
+ info->data = nvbios_rd08(bios, data + 0x00);
return data;
default:
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c
index 3026920c3358..95d49a526472 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c
@@ -34,16 +34,16 @@ nvbios_M0209Te(struct nvkm_bios *bios,
if (!bit_entry(bios, 'M', &bit_M)) {
if (bit_M.version == 2 && bit_M.length > 0x0c)
- data = nv_ro32(bios, bit_M.offset + 0x09);
+ data = nvbios_rd32(bios, bit_M.offset + 0x09);
if (data) {
- *ver = nv_ro08(bios, data + 0x00);
+ *ver = nvbios_rd08(bios, data + 0x00);
switch (*ver) {
case 0x10:
- *hdr = nv_ro08(bios, data + 0x01);
- *len = nv_ro08(bios, data + 0x02);
- *ssz = nv_ro08(bios, data + 0x03);
+ *hdr = nvbios_rd08(bios, data + 0x01);
+ *len = nvbios_rd08(bios, data + 0x02);
+ *ssz = nvbios_rd08(bios, data + 0x03);
*snr = 1;
- *cnt = nv_ro08(bios, data + 0x04);
+ *cnt = nvbios_rd08(bios, data + 0x04);
return data;
default:
break;
@@ -78,12 +78,12 @@ nvbios_M0209Ep(struct nvkm_bios *bios, int idx,
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x10:
- info->v00_40 = (nv_ro08(bios, data + 0x00) & 0x40) >> 6;
- info->bits = nv_ro08(bios, data + 0x00) & 0x3f;
- info->modulo = nv_ro08(bios, data + 0x01);
- info->v02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
- info->v02_07 = nv_ro08(bios, data + 0x02) & 0x07;
- info->v03 = nv_ro08(bios, data + 0x03);
+ info->v00_40 = (nvbios_rd08(bios, data + 0x00) & 0x40) >> 6;
+ info->bits = nvbios_rd08(bios, data + 0x00) & 0x3f;
+ info->modulo = nvbios_rd08(bios, data + 0x01);
+ info->v02_40 = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
+ info->v02_07 = nvbios_rd08(bios, data + 0x02) & 0x07;
+ info->v03 = nvbios_rd08(bios, data + 0x03);
return data;
default:
break;
@@ -122,7 +122,7 @@ nvbios_M0209Sp(struct nvkm_bios *bios, int ent, int idx, u8 *ver, u8 *hdr,
u32 mask = (1ULL << M0209E.bits) - 1;
u16 off = bits / 8;
u8 mod = bits % 8;
- info->data[i] = nv_ro32(bios, data + off);
+ info->data[i] = nvbios_rd32(bios, data + off);
info->data[i] = info->data[i] >> mod;
info->data[i] = info->data[i] & mask;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c
index b72edcf849b6..3f7db3eb3ad6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c
@@ -34,15 +34,15 @@ nvbios_P0260Te(struct nvkm_bios *bios,
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2 && bit_P.length > 0x63)
- data = nv_ro32(bios, bit_P.offset + 0x60);
+ data = nvbios_rd32(bios, bit_P.offset + 0x60);
if (data) {
- *ver = nv_ro08(bios, data + 0);
+ *ver = nvbios_rd08(bios, data + 0);
switch (*ver) {
case 0x10:
- *hdr = nv_ro08(bios, data + 1);
- *cnt = nv_ro08(bios, data + 2);
+ *hdr = nvbios_rd08(bios, data + 1);
+ *cnt = nvbios_rd08(bios, data + 2);
*len = 4;
- *xnr = nv_ro08(bios, data + 3);
+ *xnr = nvbios_rd08(bios, data + 3);
*xsz = 4;
return data;
default:
@@ -72,7 +72,7 @@ nvbios_P0260Ep(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x10:
- info->data = nv_ro32(bios, data);
+ info->data = nvbios_rd32(bios, data);
return data;
default:
break;
@@ -98,7 +98,7 @@ nvbios_P0260Xp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x10:
- info->data = nv_ro32(bios, data);
+ info->data = nvbios_rd32(bios, data);
return data;
default:
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index 8db204f92ed3..79536897efaa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -53,6 +53,20 @@ nvbios_findstr(const u8 *data, int size, const char *str, int len)
}
int
+nvbios_memcmp(struct nvkm_bios *bios, u32 addr, const char *str, u32 len)
+{
+ unsigned char c1, c2;
+
+ while (len--) {
+ c1 = nvbios_rd08(bios, addr++);
+ c2 = *(str++);
+ if (c1 != c2)
+ return c1 - c2;
+ }
+ return 0;
+}
+
+int
nvbios_extend(struct nvkm_bios *bios, u32 length)
{
if (bios->size < length) {
@@ -69,62 +83,29 @@ nvbios_extend(struct nvkm_bios *bios, u32 length)
return 0;
}
-static u8
-nvkm_bios_rd08(struct nvkm_object *object, u64 addr)
-{
- struct nvkm_bios *bios = (void *)object;
- return bios->data[addr];
-}
-
-static u16
-nvkm_bios_rd16(struct nvkm_object *object, u64 addr)
+static void *
+nvkm_bios_dtor(struct nvkm_subdev *subdev)
{
- struct nvkm_bios *bios = (void *)object;
- return get_unaligned_le16(&bios->data[addr]);
-}
-
-static u32
-nvkm_bios_rd32(struct nvkm_object *object, u64 addr)
-{
- struct nvkm_bios *bios = (void *)object;
- return get_unaligned_le32(&bios->data[addr]);
-}
-
-static void
-nvkm_bios_wr08(struct nvkm_object *object, u64 addr, u8 data)
-{
- struct nvkm_bios *bios = (void *)object;
- bios->data[addr] = data;
-}
-
-static void
-nvkm_bios_wr16(struct nvkm_object *object, u64 addr, u16 data)
-{
- struct nvkm_bios *bios = (void *)object;
- put_unaligned_le16(data, &bios->data[addr]);
+ struct nvkm_bios *bios = nvkm_bios(subdev);
+ kfree(bios->data);
+ return bios;
}
-static void
-nvkm_bios_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
- struct nvkm_bios *bios = (void *)object;
- put_unaligned_le32(data, &bios->data[addr]);
-}
+static const struct nvkm_subdev_func
+nvkm_bios = {
+ .dtor = nvkm_bios_dtor,
+};
-static int
-nvkm_bios_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
{
struct nvkm_bios *bios;
struct bit_entry bit_i;
int ret;
- ret = nvkm_subdev_create(parent, engine, oclass, 0,
- "VBIOS", "bios", &bios);
- *pobject = nv_object(bios);
- if (ret)
- return ret;
+ if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_subdev_ctor(&nvkm_bios, device, index, 0, &bios->subdev);
ret = nvbios_shadow(bios);
if (ret)
@@ -134,73 +115,33 @@ nvkm_bios_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
bios->bmp_offset = nvbios_findstr(bios->data, bios->size,
"\xff\x7f""NV\0", 5);
if (bios->bmp_offset) {
- nv_info(bios, "BMP version %x.%x\n",
- bmp_version(bios) >> 8,
- bmp_version(bios) & 0xff);
+ nvkm_debug(&bios->subdev, "BMP version %x.%x\n",
+ bmp_version(bios) >> 8,
+ bmp_version(bios) & 0xff);
}
bios->bit_offset = nvbios_findstr(bios->data, bios->size,
"\xff\xb8""BIT", 5);
if (bios->bit_offset)
- nv_info(bios, "BIT signature found\n");
+ nvkm_debug(&bios->subdev, "BIT signature found\n");
/* determine the vbios version number */
if (!bit_entry(bios, 'i', &bit_i) && bit_i.length >= 4) {
- bios->version.major = nv_ro08(bios, bit_i.offset + 3);
- bios->version.chip = nv_ro08(bios, bit_i.offset + 2);
- bios->version.minor = nv_ro08(bios, bit_i.offset + 1);
- bios->version.micro = nv_ro08(bios, bit_i.offset + 0);
- bios->version.patch = nv_ro08(bios, bit_i.offset + 4);
+ bios->version.major = nvbios_rd08(bios, bit_i.offset + 3);
+ bios->version.chip = nvbios_rd08(bios, bit_i.offset + 2);
+ bios->version.minor = nvbios_rd08(bios, bit_i.offset + 1);
+ bios->version.micro = nvbios_rd08(bios, bit_i.offset + 0);
+ bios->version.patch = nvbios_rd08(bios, bit_i.offset + 4);
} else
if (bmp_version(bios)) {
- bios->version.major = nv_ro08(bios, bios->bmp_offset + 13);
- bios->version.chip = nv_ro08(bios, bios->bmp_offset + 12);
- bios->version.minor = nv_ro08(bios, bios->bmp_offset + 11);
- bios->version.micro = nv_ro08(bios, bios->bmp_offset + 10);
+ bios->version.major = nvbios_rd08(bios, bios->bmp_offset + 13);
+ bios->version.chip = nvbios_rd08(bios, bios->bmp_offset + 12);
+ bios->version.minor = nvbios_rd08(bios, bios->bmp_offset + 11);
+ bios->version.micro = nvbios_rd08(bios, bios->bmp_offset + 10);
}
- nv_info(bios, "version %02x.%02x.%02x.%02x.%02x\n",
- bios->version.major, bios->version.chip,
- bios->version.minor, bios->version.micro, bios->version.patch);
-
+ nvkm_info(&bios->subdev, "version %02x.%02x.%02x.%02x.%02x\n",
+ bios->version.major, bios->version.chip,
+ bios->version.minor, bios->version.micro, bios->version.patch);
return 0;
}
-
-static void
-nvkm_bios_dtor(struct nvkm_object *object)
-{
- struct nvkm_bios *bios = (void *)object;
- kfree(bios->data);
- nvkm_subdev_destroy(&bios->base);
-}
-
-static int
-nvkm_bios_init(struct nvkm_object *object)
-{
- struct nvkm_bios *bios = (void *)object;
- return nvkm_subdev_init(&bios->base);
-}
-
-static int
-nvkm_bios_fini(struct nvkm_object *object, bool suspend)
-{
- struct nvkm_bios *bios = (void *)object;
- return nvkm_subdev_fini(&bios->base, suspend);
-}
-
-struct nvkm_oclass
-nvkm_bios_oclass = {
- .handle = NV_SUBDEV(VBIOS, 0x00),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nvkm_bios_ctor,
- .dtor = nvkm_bios_dtor,
- .init = nvkm_bios_init,
- .fini = nvkm_bios_fini,
- .rd08 = nvkm_bios_rd08,
- .rd16 = nvkm_bios_rd16,
- .rd32 = nvkm_bios_rd32,
- .wr08 = nvkm_bios_wr08,
- .wr16 = nvkm_bios_wr16,
- .wr32 = nvkm_bios_wr32,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c
index eab540496cdf..070ff33f8d5e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c
@@ -28,18 +28,18 @@ int
bit_entry(struct nvkm_bios *bios, u8 id, struct bit_entry *bit)
{
if (likely(bios->bit_offset)) {
- u8 entries = nv_ro08(bios, bios->bit_offset + 10);
+ u8 entries = nvbios_rd08(bios, bios->bit_offset + 10);
u32 entry = bios->bit_offset + 12;
while (entries--) {
- if (nv_ro08(bios, entry + 0) == id) {
- bit->id = nv_ro08(bios, entry + 0);
- bit->version = nv_ro08(bios, entry + 1);
- bit->length = nv_ro16(bios, entry + 2);
- bit->offset = nv_ro16(bios, entry + 4);
+ if (nvbios_rd08(bios, entry + 0) == id) {
+ bit->id = nvbios_rd08(bios, entry + 0);
+ bit->version = nvbios_rd08(bios, entry + 1);
+ bit->length = nvbios_rd16(bios, entry + 2);
+ bit->offset = nvbios_rd16(bios, entry + 4);
return 0;
}
- entry += nv_ro08(bios, bios->bit_offset + 9);
+ entry += nvbios_rd08(bios, bios->bit_offset + 9);
}
return -ENOENT;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
index 12e958533f46..3756ec91a88d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
@@ -34,17 +34,17 @@ nvbios_boostTe(struct nvkm_bios *bios,
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2)
- boost = nv_ro16(bios, bit_P.offset + 0x30);
+ boost = nvbios_rd16(bios, bit_P.offset + 0x30);
if (boost) {
- *ver = nv_ro08(bios, boost + 0);
+ *ver = nvbios_rd08(bios, boost + 0);
switch (*ver) {
case 0x11:
- *hdr = nv_ro08(bios, boost + 1);
- *cnt = nv_ro08(bios, boost + 5);
- *len = nv_ro08(bios, boost + 2);
- *snr = nv_ro08(bios, boost + 4);
- *ssz = nv_ro08(bios, boost + 3);
+ *hdr = nvbios_rd08(bios, boost + 1);
+ *cnt = nvbios_rd08(bios, boost + 5);
+ *len = nvbios_rd08(bios, boost + 2);
+ *snr = nvbios_rd08(bios, boost + 4);
+ *ssz = nvbios_rd08(bios, boost + 3);
return boost;
default:
break;
@@ -78,9 +78,9 @@ nvbios_boostEp(struct nvkm_bios *bios, int idx,
u16 data = nvbios_boostEe(bios, idx, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
if (data) {
- info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5;
- info->min = nv_ro16(bios, data + 0x02) * 1000;
- info->max = nv_ro16(bios, data + 0x04) * 1000;
+ info->pstate = (nvbios_rd16(bios, data + 0x00) & 0x01e0) >> 5;
+ info->min = nvbios_rd16(bios, data + 0x02) * 1000;
+ info->max = nvbios_rd16(bios, data + 0x04) * 1000;
}
return data;
}
@@ -117,10 +117,10 @@ nvbios_boostSp(struct nvkm_bios *bios, int idx,
data = nvbios_boostSe(bios, idx, data, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
if (data) {
- info->domain = nv_ro08(bios, data + 0x00);
- info->percent = nv_ro08(bios, data + 0x01);
- info->min = nv_ro16(bios, data + 0x02) * 1000;
- info->max = nv_ro16(bios, data + 0x04) * 1000;
+ info->domain = nvbios_rd08(bios, data + 0x00);
+ info->percent = nvbios_rd08(bios, data + 0x01);
+ info->min = nvbios_rd16(bios, data + 0x02) * 1000;
+ info->max = nvbios_rd16(bios, data + 0x04) * 1000;
}
return data;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c
index 706a1650a4f2..276823426332 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c
@@ -30,12 +30,12 @@ nvbios_connTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u32 dcb = dcb_table(bios, ver, hdr, cnt, len);
if (dcb && *ver >= 0x30 && *hdr >= 0x16) {
- u32 data = nv_ro16(bios, dcb + 0x14);
+ u32 data = nvbios_rd16(bios, dcb + 0x14);
if (data) {
- *ver = nv_ro08(bios, data + 0);
- *hdr = nv_ro08(bios, data + 1);
- *cnt = nv_ro08(bios, data + 2);
- *len = nv_ro08(bios, data + 3);
+ *ver = nvbios_rd08(bios, data + 0);
+ *hdr = nvbios_rd08(bios, data + 1);
+ *cnt = nvbios_rd08(bios, data + 2);
+ *len = nvbios_rd08(bios, data + 3);
return data;
}
}
@@ -77,18 +77,18 @@ nvbios_connEp(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len,
switch (!!data * *ver) {
case 0x30:
case 0x40:
- info->type = nv_ro08(bios, data + 0x00);
- info->location = nv_ro08(bios, data + 0x01) & 0x0f;
- info->hpd = (nv_ro08(bios, data + 0x01) & 0x30) >> 4;
- info->dp = (nv_ro08(bios, data + 0x01) & 0xc0) >> 6;
+ info->type = nvbios_rd08(bios, data + 0x00);
+ info->location = nvbios_rd08(bios, data + 0x01) & 0x0f;
+ info->hpd = (nvbios_rd08(bios, data + 0x01) & 0x30) >> 4;
+ info->dp = (nvbios_rd08(bios, data + 0x01) & 0xc0) >> 6;
if (*len < 4)
return data;
- info->hpd |= (nv_ro08(bios, data + 0x02) & 0x03) << 2;
- info->dp |= nv_ro08(bios, data + 0x02) & 0x0c;
- info->di = (nv_ro08(bios, data + 0x02) & 0xf0) >> 4;
- info->hpd |= (nv_ro08(bios, data + 0x03) & 0x07) << 4;
- info->sr = (nv_ro08(bios, data + 0x03) & 0x08) >> 3;
- info->lcdid = (nv_ro08(bios, data + 0x03) & 0x70) >> 4;
+ info->hpd |= (nvbios_rd08(bios, data + 0x02) & 0x03) << 2;
+ info->dp |= nvbios_rd08(bios, data + 0x02) & 0x0c;
+ info->di = (nvbios_rd08(bios, data + 0x02) & 0xf0) >> 4;
+ info->hpd |= (nvbios_rd08(bios, data + 0x03) & 0x07) << 4;
+ info->sr = (nvbios_rd08(bios, data + 0x03) & 0x08) >> 3;
+ info->lcdid = (nvbios_rd08(bios, data + 0x03) & 0x70) >> 4;
return data;
default:
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
index 16f7ad8a4f80..32e01624a162 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
@@ -34,17 +34,17 @@ nvbios_cstepTe(struct nvkm_bios *bios,
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2)
- cstep = nv_ro16(bios, bit_P.offset + 0x34);
+ cstep = nvbios_rd16(bios, bit_P.offset + 0x34);
if (cstep) {
- *ver = nv_ro08(bios, cstep + 0);
+ *ver = nvbios_rd08(bios, cstep + 0);
switch (*ver) {
case 0x10:
- *hdr = nv_ro08(bios, cstep + 1);
- *cnt = nv_ro08(bios, cstep + 3);
- *len = nv_ro08(bios, cstep + 2);
- *xnr = nv_ro08(bios, cstep + 5);
- *xsz = nv_ro08(bios, cstep + 4);
+ *hdr = nvbios_rd08(bios, cstep + 1);
+ *cnt = nvbios_rd08(bios, cstep + 3);
+ *len = nvbios_rd08(bios, cstep + 2);
+ *xnr = nvbios_rd08(bios, cstep + 5);
+ *xsz = nvbios_rd08(bios, cstep + 4);
return cstep;
default:
break;
@@ -75,8 +75,8 @@ nvbios_cstepEp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
u16 data = nvbios_cstepEe(bios, idx, ver, hdr);
memset(info, 0x00, sizeof(*info));
if (data) {
- info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5;
- info->index = nv_ro08(bios, data + 0x03);
+ info->pstate = (nvbios_rd16(bios, data + 0x00) & 0x01e0) >> 5;
+ info->index = nvbios_rd08(bios, data + 0x03);
}
return data;
}
@@ -113,10 +113,10 @@ nvbios_cstepXp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
u16 data = nvbios_cstepXe(bios, idx, ver, hdr);
memset(info, 0x00, sizeof(*info));
if (data) {
- info->freq = nv_ro16(bios, data + 0x00) * 1000;
- info->unkn[0] = nv_ro08(bios, data + 0x02);
- info->unkn[1] = nv_ro08(bios, data + 0x03);
- info->voltage = nv_ro08(bios, data + 0x04);
+ info->freq = nvbios_rd16(bios, data + 0x00) * 1000;
+ info->unkn[0] = nvbios_rd08(bios, data + 0x02);
+ info->unkn[1] = nvbios_rd08(bios, data + 0x03);
+ info->voltage = nvbios_rd08(bios, data + 0x04);
}
return data;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
index 8d78140f9401..8304b806f2a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
@@ -24,38 +24,37 @@
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
-#include <core/device.h>
-
u16
dcb_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
- struct nvkm_device *device = nv_device(bios);
+ struct nvkm_subdev *subdev = &bios->subdev;
+ struct nvkm_device *device = subdev->device;
u16 dcb = 0x0000;
if (device->card_type > NV_04)
- dcb = nv_ro16(bios, 0x36);
+ dcb = nvbios_rd16(bios, 0x36);
if (!dcb) {
- nv_warn(bios, "DCB table not found\n");
+ nvkm_warn(subdev, "DCB table not found\n");
return dcb;
}
- *ver = nv_ro08(bios, dcb);
+ *ver = nvbios_rd08(bios, dcb);
if (*ver >= 0x42) {
- nv_warn(bios, "DCB version 0x%02x unknown\n", *ver);
+ nvkm_warn(subdev, "DCB version 0x%02x unknown\n", *ver);
return 0x0000;
} else
if (*ver >= 0x30) {
- if (nv_ro32(bios, dcb + 6) == 0x4edcbdcb) {
- *hdr = nv_ro08(bios, dcb + 1);
- *cnt = nv_ro08(bios, dcb + 2);
- *len = nv_ro08(bios, dcb + 3);
+ if (nvbios_rd32(bios, dcb + 6) == 0x4edcbdcb) {
+ *hdr = nvbios_rd08(bios, dcb + 1);
+ *cnt = nvbios_rd08(bios, dcb + 2);
+ *len = nvbios_rd08(bios, dcb + 3);
return dcb;
}
} else
if (*ver >= 0x20) {
- if (nv_ro32(bios, dcb + 4) == 0x4edcbdcb) {
- u16 i2c = nv_ro16(bios, dcb + 2);
+ if (nvbios_rd32(bios, dcb + 4) == 0x4edcbdcb) {
+ u16 i2c = nvbios_rd16(bios, dcb + 2);
*hdr = 8;
*cnt = (i2c - dcb) / 8;
*len = 8;
@@ -63,8 +62,8 @@ dcb_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
} else
if (*ver >= 0x15) {
- if (!nv_memcmp(bios, dcb - 7, "DEV_REC", 7)) {
- u16 i2c = nv_ro16(bios, dcb + 2);
+ if (!nvbios_memcmp(bios, dcb - 7, "DEV_REC", 7)) {
+ u16 i2c = nvbios_rd16(bios, dcb + 2);
*hdr = 4;
*cnt = (i2c - dcb) / 10;
*len = 10;
@@ -88,11 +87,11 @@ dcb_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
*
* v1.1 (NV5+, maybe some NV4) is entirely unhelpful
*/
- nv_warn(bios, "DCB contains no useful data\n");
+ nvkm_debug(subdev, "DCB contains no useful data\n");
return 0x0000;
}
- nv_warn(bios, "DCB header validation failed\n");
+ nvkm_warn(subdev, "DCB header validation failed\n");
return 0x0000;
}
@@ -126,7 +125,7 @@ dcb_outp_parse(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len,
memset(outp, 0x00, sizeof(*outp));
if (dcb) {
if (*ver >= 0x20) {
- u32 conn = nv_ro32(bios, dcb + 0x00);
+ u32 conn = nvbios_rd32(bios, dcb + 0x00);
outp->or = (conn & 0x0f000000) >> 24;
outp->location = (conn & 0x00300000) >> 20;
outp->bus = (conn & 0x000f0000) >> 16;
@@ -140,7 +139,7 @@ dcb_outp_parse(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len,
}
if (*ver >= 0x40) {
- u32 conf = nv_ro32(bios, dcb + 0x04);
+ u32 conf = nvbios_rd32(bios, dcb + 0x04);
switch (outp->type) {
case DCB_OUTPUT_DP:
switch (conf & 0x00e00000) {
@@ -156,20 +155,19 @@ dcb_outp_parse(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len,
break;
}
- outp->dpconf.link_nr = (conf & 0x0f000000) >> 24;
- if (*ver < 0x41) {
- switch (outp->dpconf.link_nr) {
- case 0x0f:
- outp->dpconf.link_nr = 4;
- break;
- case 0x03:
- outp->dpconf.link_nr = 2;
- break;
- case 0x01:
- default:
- outp->dpconf.link_nr = 1;
- break;
- }
+ switch ((conf & 0x0f000000) >> 24) {
+ case 0xf:
+ case 0x4:
+ outp->dpconf.link_nr = 4;
+ break;
+ case 0x3:
+ case 0x2:
+ outp->dpconf.link_nr = 2;
+ break;
+ case 0x1:
+ default:
+ outp->dpconf.link_nr = 1;
+ break;
}
/* fall-through... */
@@ -215,14 +213,14 @@ dcb_outp_foreach(struct nvkm_bios *bios, void *data,
u16 outp;
while ((outp = dcb_outp(bios, ++idx, &ver, &len))) {
- if (nv_ro32(bios, outp) == 0x00000000)
+ if (nvbios_rd32(bios, outp) == 0x00000000)
break; /* seen on an NV11 with DCB v1.5 */
- if (nv_ro32(bios, outp) == 0xffffffff)
+ if (nvbios_rd32(bios, outp) == 0xffffffff)
break; /* seen on an NV17 with DCB v2.0 */
- if (nv_ro08(bios, outp) == DCB_OUTPUT_UNUSED)
+ if (nvbios_rd08(bios, outp) == DCB_OUTPUT_UNUSED)
continue;
- if (nv_ro08(bios, outp) == DCB_OUTPUT_EOL)
+ if (nvbios_rd08(bios, outp) == DCB_OUTPUT_EOL)
break;
ret = exec(bios, data, idx, outp);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
index 262c410b7ee2..a5e92135cd77 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
@@ -33,17 +33,17 @@ nvbios_disp_table(struct nvkm_bios *bios,
if (!bit_entry(bios, 'U', &U)) {
if (U.version == 1) {
- u16 data = nv_ro16(bios, U.offset);
+ u16 data = nvbios_rd16(bios, U.offset);
if (data) {
- *ver = nv_ro08(bios, data + 0x00);
+ *ver = nvbios_rd08(bios, data + 0x00);
switch (*ver) {
case 0x20:
case 0x21:
case 0x22:
- *hdr = nv_ro08(bios, data + 0x01);
- *len = nv_ro08(bios, data + 0x02);
- *cnt = nv_ro08(bios, data + 0x03);
- *sub = nv_ro08(bios, data + 0x04);
+ *hdr = nvbios_rd08(bios, data + 0x01);
+ *len = nvbios_rd08(bios, data + 0x02);
+ *cnt = nvbios_rd08(bios, data + 0x03);
+ *sub = nvbios_rd08(bios, data + 0x04);
return data;
default:
break;
@@ -72,7 +72,7 @@ nvbios_disp_parse(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len, u8 *sub,
{
u16 data = nvbios_disp_entry(bios, idx, ver, len, sub);
if (data && *len >= 2) {
- info->data = nv_ro16(bios, data + 0);
+ info->data = nvbios_rd16(bios, data + 0);
return data;
}
return 0x0000;
@@ -85,7 +85,7 @@ nvbios_outp_entry(struct nvkm_bios *bios, u8 idx,
struct nvbios_disp info;
u16 data = nvbios_disp_parse(bios, idx, ver, len, hdr, &info);
if (data) {
- *cnt = nv_ro08(bios, info.data + 0x05);
+ *cnt = nvbios_rd08(bios, info.data + 0x05);
*len = 0x06;
data = info.data;
}
@@ -98,15 +98,15 @@ nvbios_outp_parse(struct nvkm_bios *bios, u8 idx,
{
u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len);
if (data && *hdr >= 0x0a) {
- info->type = nv_ro16(bios, data + 0x00);
- info->mask = nv_ro32(bios, data + 0x02);
+ info->type = nvbios_rd16(bios, data + 0x00);
+ info->mask = nvbios_rd32(bios, data + 0x02);
if (*ver <= 0x20) /* match any link */
info->mask |= 0x00c0;
- info->script[0] = nv_ro16(bios, data + 0x06);
- info->script[1] = nv_ro16(bios, data + 0x08);
+ info->script[0] = nvbios_rd16(bios, data + 0x06);
+ info->script[1] = nvbios_rd16(bios, data + 0x08);
info->script[2] = 0x0000;
if (*hdr >= 0x0c)
- info->script[2] = nv_ro16(bios, data + 0x0a);
+ info->script[2] = nvbios_rd16(bios, data + 0x0a);
return data;
}
return 0x0000;
@@ -141,9 +141,9 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
{
u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
if (data) {
- info->match = nv_ro16(bios, data + 0x00);
- info->clkcmp[0] = nv_ro16(bios, data + 0x02);
- info->clkcmp[1] = nv_ro16(bios, data + 0x04);
+ info->match = nvbios_rd16(bios, data + 0x00);
+ info->clkcmp[0] = nvbios_rd16(bios, data + 0x02);
+ info->clkcmp[1] = nvbios_rd16(bios, data + 0x04);
}
return data;
}
@@ -164,8 +164,8 @@ u16
nvbios_oclk_match(struct nvkm_bios *bios, u16 cmp, u32 khz)
{
while (cmp) {
- if (khz / 10 >= nv_ro16(bios, cmp + 0x00))
- return nv_ro16(bios, cmp + 0x02);
+ if (khz / 10 >= nvbios_rd16(bios, cmp + 0x00))
+ return nvbios_rd16(bios, cmp + 0x02);
cmp += 0x04;
}
return 0x0000;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
index 95970faae6c8..05332476354a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
@@ -32,17 +32,17 @@ nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
if (!bit_entry(bios, 'd', &d)) {
if (d.version == 1 && d.length >= 2) {
- u16 data = nv_ro16(bios, d.offset);
+ u16 data = nvbios_rd16(bios, d.offset);
if (data) {
- *ver = nv_ro08(bios, data + 0x00);
+ *ver = nvbios_rd08(bios, data + 0x00);
switch (*ver) {
case 0x21:
case 0x30:
case 0x40:
case 0x41:
- *hdr = nv_ro08(bios, data + 0x01);
- *len = nv_ro08(bios, data + 0x02);
- *cnt = nv_ro08(bios, data + 0x03);
+ *hdr = nvbios_rd08(bios, data + 0x01);
+ *len = nvbios_rd08(bios, data + 0x02);
+ *cnt = nvbios_rd08(bios, data + 0x03);
return data;
default:
break;
@@ -60,17 +60,17 @@ nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx,
{
u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len);
if (data && idx < *cnt) {
- u16 outp = nv_ro16(bios, data + *hdr + idx * *len);
+ u16 outp = nvbios_rd16(bios, data + *hdr + idx * *len);
switch (*ver * !!outp) {
case 0x21:
case 0x30:
- *hdr = nv_ro08(bios, data + 0x04);
- *len = nv_ro08(bios, data + 0x05);
- *cnt = nv_ro08(bios, outp + 0x04);
+ *hdr = nvbios_rd08(bios, data + 0x04);
+ *len = nvbios_rd08(bios, data + 0x05);
+ *cnt = nvbios_rd08(bios, outp + 0x04);
break;
case 0x40:
case 0x41:
- *hdr = nv_ro08(bios, data + 0x04);
+ *hdr = nvbios_rd08(bios, data + 0x04);
*cnt = 0;
*len = 0;
break;
@@ -91,31 +91,31 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
if (data && *ver) {
- info->type = nv_ro16(bios, data + 0x00);
- info->mask = nv_ro16(bios, data + 0x02);
+ info->type = nvbios_rd16(bios, data + 0x00);
+ info->mask = nvbios_rd16(bios, data + 0x02);
switch (*ver) {
case 0x21:
case 0x30:
- info->flags = nv_ro08(bios, data + 0x05);
- info->script[0] = nv_ro16(bios, data + 0x06);
- info->script[1] = nv_ro16(bios, data + 0x08);
- info->lnkcmp = nv_ro16(bios, data + 0x0a);
+ info->flags = nvbios_rd08(bios, data + 0x05);
+ info->script[0] = nvbios_rd16(bios, data + 0x06);
+ info->script[1] = nvbios_rd16(bios, data + 0x08);
+ info->lnkcmp = nvbios_rd16(bios, data + 0x0a);
if (*len >= 0x0f) {
- info->script[2] = nv_ro16(bios, data + 0x0c);
- info->script[3] = nv_ro16(bios, data + 0x0e);
+ info->script[2] = nvbios_rd16(bios, data + 0x0c);
+ info->script[3] = nvbios_rd16(bios, data + 0x0e);
}
if (*len >= 0x11)
- info->script[4] = nv_ro16(bios, data + 0x10);
+ info->script[4] = nvbios_rd16(bios, data + 0x10);
break;
case 0x40:
case 0x41:
- info->flags = nv_ro08(bios, data + 0x04);
- info->script[0] = nv_ro16(bios, data + 0x05);
- info->script[1] = nv_ro16(bios, data + 0x07);
- info->lnkcmp = nv_ro16(bios, data + 0x09);
- info->script[2] = nv_ro16(bios, data + 0x0b);
- info->script[3] = nv_ro16(bios, data + 0x0d);
- info->script[4] = nv_ro16(bios, data + 0x0f);
+ info->flags = nvbios_rd08(bios, data + 0x04);
+ info->script[0] = nvbios_rd16(bios, data + 0x05);
+ info->script[1] = nvbios_rd16(bios, data + 0x07);
+ info->lnkcmp = nvbios_rd16(bios, data + 0x09);
+ info->script[2] = nvbios_rd16(bios, data + 0x0b);
+ info->script[3] = nvbios_rd16(bios, data + 0x0d);
+ info->script[4] = nvbios_rd16(bios, data + 0x0f);
break;
default:
data = 0x0000;
@@ -147,8 +147,9 @@ nvbios_dpcfg_entry(struct nvkm_bios *bios, u16 outp, u8 idx,
if (*ver >= 0x40) {
outp = nvbios_dp_table(bios, ver, hdr, cnt, len);
*hdr = *hdr + (*len * * cnt);
- *len = nv_ro08(bios, outp + 0x06);
- *cnt = nv_ro08(bios, outp + 0x07);
+ *len = nvbios_rd08(bios, outp + 0x06);
+ *cnt = nvbios_rd08(bios, outp + 0x07) *
+ nvbios_rd08(bios, outp + 0x05);
}
if (idx < *cnt)
@@ -167,17 +168,17 @@ nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
if (data) {
switch (*ver) {
case 0x21:
- info->dc = nv_ro08(bios, data + 0x02);
- info->pe = nv_ro08(bios, data + 0x03);
- info->tx_pu = nv_ro08(bios, data + 0x04);
+ info->dc = nvbios_rd08(bios, data + 0x02);
+ info->pe = nvbios_rd08(bios, data + 0x03);
+ info->tx_pu = nvbios_rd08(bios, data + 0x04);
break;
case 0x30:
case 0x40:
case 0x41:
- info->pc = nv_ro08(bios, data + 0x00);
- info->dc = nv_ro08(bios, data + 0x01);
- info->pe = nv_ro08(bios, data + 0x02);
- info->tx_pu = nv_ro08(bios, data + 0x03) & 0x0f;
+ info->pc = nvbios_rd08(bios, data + 0x00);
+ info->dc = nvbios_rd08(bios, data + 0x01);
+ info->pe = nvbios_rd08(bios, data + 0x02);
+ info->tx_pu = nvbios_rd08(bios, data + 0x03);
break;
default:
data = 0x0000;
@@ -196,17 +197,15 @@ nvbios_dpcfg_match(struct nvkm_bios *bios, u16 outp, u8 pc, u8 vs, u8 pe,
u16 data;
if (*ver >= 0x30) {
- /*XXX: there's a second set of these on at least 4.1, that
- * i've witnessed nvidia using instead of the first
- * on gm204. figure out what/why
- */
const u8 vsoff[] = { 0, 4, 7, 9 };
idx = (pc * 10) + vsoff[vs] + pe;
+ if (*ver >= 0x40 && *hdr >= 0x12)
+ idx += nvbios_rd08(bios, outp + 0x11) * 40;
} else {
while ((data = nvbios_dpcfg_entry(bios, outp, ++idx,
ver, hdr, cnt, len))) {
- if (nv_ro08(bios, data + 0x00) == vs &&
- nv_ro08(bios, data + 0x01) == pe)
+ if (nvbios_rd08(bios, data + 0x00) == vs &&
+ nvbios_rd08(bios, data + 0x01) == pe)
break;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
index a8503a1854c4..c9e6f6ff7c50 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
@@ -35,14 +35,14 @@ extdev_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
if (!dcb || (dcb_ver != 0x30 && dcb_ver != 0x40))
return 0x0000;
- extdev = nv_ro16(bios, dcb + 18);
+ extdev = nvbios_rd16(bios, dcb + 18);
if (!extdev)
return 0x0000;
- *ver = nv_ro08(bios, extdev + 0);
- *hdr = nv_ro08(bios, extdev + 1);
- *cnt = nv_ro08(bios, extdev + 2);
- *len = nv_ro08(bios, extdev + 3);
+ *ver = nvbios_rd08(bios, extdev + 0);
+ *hdr = nvbios_rd08(bios, extdev + 1);
+ *cnt = nvbios_rd08(bios, extdev + 2);
+ *len = nvbios_rd08(bios, extdev + 3);
return extdev + *hdr;
}
@@ -60,9 +60,9 @@ static void
extdev_parse_entry(struct nvkm_bios *bios, u16 offset,
struct nvbios_extdev_func *entry)
{
- entry->type = nv_ro08(bios, offset + 0);
- entry->addr = nv_ro08(bios, offset + 1);
- entry->bus = (nv_ro08(bios, offset + 2) >> 4) & 1;
+ entry->type = nvbios_rd08(bios, offset + 0);
+ entry->addr = nvbios_rd08(bios, offset + 1);
+ entry->bus = (nvbios_rd08(bios, offset + 2) >> 4) & 1;
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
index 8dba70d9d9a9..43006db6fd58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
@@ -33,15 +33,15 @@ nvbios_fan_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2 && bit_P.length >= 0x5a)
- fan = nv_ro16(bios, bit_P.offset + 0x58);
+ fan = nvbios_rd16(bios, bit_P.offset + 0x58);
if (fan) {
- *ver = nv_ro08(bios, fan + 0);
+ *ver = nvbios_rd08(bios, fan + 0);
switch (*ver) {
case 0x10:
- *hdr = nv_ro08(bios, fan + 1);
- *len = nv_ro08(bios, fan + 2);
- *cnt = nv_ro08(bios, fan + 3);
+ *hdr = nvbios_rd08(bios, fan + 1);
+ *len = nvbios_rd08(bios, fan + 2);
+ *cnt = nvbios_rd08(bios, fan + 3);
return fan;
default:
break;
@@ -69,7 +69,7 @@ nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
u16 data = nvbios_fan_entry(bios, 0, &ver, &hdr, &cnt, &len);
if (data) {
- u8 type = nv_ro08(bios, data + 0x00);
+ u8 type = nvbios_rd08(bios, data + 0x00);
switch (type) {
case 0:
fan->type = NVBIOS_THERM_FAN_TOGGLE;
@@ -83,10 +83,10 @@ nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
fan->type = NVBIOS_THERM_FAN_UNK;
}
- fan->min_duty = nv_ro08(bios, data + 0x02);
- fan->max_duty = nv_ro08(bios, data + 0x03);
+ fan->min_duty = nvbios_rd08(bios, data + 0x02);
+ fan->max_duty = nvbios_rd08(bios, data + 0x03);
- fan->pwm_freq = nv_ro32(bios, data + 0x0b) & 0xffffff;
+ fan->pwm_freq = nvbios_rd32(bios, data + 0x0b) & 0xffffff;
}
return data;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c
index 8ce154d88f51..2107b558437a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c
@@ -33,22 +33,22 @@ dcb_gpio_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
if (dcb) {
if (*ver >= 0x30 && *hdr >= 0x0c)
- data = nv_ro16(bios, dcb + 0x0a);
+ data = nvbios_rd16(bios, dcb + 0x0a);
else
- if (*ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
- data = nv_ro16(bios, dcb - 0x0f);
+ if (*ver >= 0x22 && nvbios_rd08(bios, dcb - 1) >= 0x13)
+ data = nvbios_rd16(bios, dcb - 0x0f);
if (data) {
- *ver = nv_ro08(bios, data + 0x00);
+ *ver = nvbios_rd08(bios, data + 0x00);
if (*ver < 0x30) {
*hdr = 3;
- *cnt = nv_ro08(bios, data + 0x02);
- *len = nv_ro08(bios, data + 0x01);
+ *cnt = nvbios_rd08(bios, data + 0x02);
+ *len = nvbios_rd08(bios, data + 0x01);
} else
if (*ver <= 0x41) {
- *hdr = nv_ro08(bios, data + 0x01);
- *cnt = nv_ro08(bios, data + 0x02);
- *len = nv_ro08(bios, data + 0x03);
+ *hdr = nvbios_rd08(bios, data + 0x01);
+ *cnt = nvbios_rd08(bios, data + 0x02);
+ *len = nvbios_rd08(bios, data + 0x03);
} else {
data = 0x0000;
}
@@ -81,7 +81,7 @@ dcb_gpio_parse(struct nvkm_bios *bios, int idx, int ent, u8 *ver, u8 *len,
u16 data = dcb_gpio_entry(bios, idx, ent, ver, len);
if (data) {
if (*ver < 0x40) {
- u16 info = nv_ro16(bios, data);
+ u16 info = nvbios_rd16(bios, data);
*gpio = (struct dcb_gpio_func) {
.line = (info & 0x001f) >> 0,
.func = (info & 0x07e0) >> 5,
@@ -91,7 +91,7 @@ dcb_gpio_parse(struct nvkm_bios *bios, int idx, int ent, u8 *ver, u8 *len,
};
} else
if (*ver < 0x41) {
- u32 info = nv_ro32(bios, data);
+ u32 info = nvbios_rd32(bios, data);
*gpio = (struct dcb_gpio_func) {
.line = (info & 0x0000001f) >> 0,
.func = (info & 0x0000ff00) >> 8,
@@ -100,8 +100,8 @@ dcb_gpio_parse(struct nvkm_bios *bios, int idx, int ent, u8 *ver, u8 *len,
.param = !!(info & 0x80000000),
};
} else {
- u32 info = nv_ro32(bios, data + 0);
- u8 info1 = nv_ro32(bios, data + 4);
+ u32 info = nvbios_rd32(bios, data + 0);
+ u8 info1 = nvbios_rd32(bios, data + 4);
*gpio = (struct dcb_gpio_func) {
.line = (info & 0x0000003f) >> 0,
.func = (info & 0x0000ff00) >> 8,
@@ -131,8 +131,8 @@ dcb_gpio_match(struct nvkm_bios *bios, int idx, u8 func, u8 line,
/* DCB 2.2, fixed TVDAC GPIO data */
if ((data = dcb_table(bios, ver, &hdr, &cnt, len))) {
if (*ver >= 0x22 && *ver < 0x30 && func == DCB_GPIO_TVDAC0) {
- u8 conf = nv_ro08(bios, data - 5);
- u8 addr = nv_ro08(bios, data - 4);
+ u8 conf = nvbios_rd08(bios, data - 5);
+ u8 addr = nvbios_rd08(bios, data - 4);
if (conf & 0x01) {
*gpio = (struct dcb_gpio_func) {
.func = DCB_GPIO_TVDAC0,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
index c4e1f085ee10..0fc60be32727 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
@@ -32,21 +32,21 @@ dcb_i2c_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
if (dcb) {
if (*ver >= 0x15)
- i2c = nv_ro16(bios, dcb + 2);
+ i2c = nvbios_rd16(bios, dcb + 2);
if (*ver >= 0x30)
- i2c = nv_ro16(bios, dcb + 4);
+ i2c = nvbios_rd16(bios, dcb + 4);
}
if (i2c && *ver >= 0x42) {
- nv_warn(bios, "ccb %02x not supported\n", *ver);
+ nvkm_warn(&bios->subdev, "ccb %02x not supported\n", *ver);
return 0x0000;
}
if (i2c && *ver >= 0x30) {
- *ver = nv_ro08(bios, i2c + 0);
- *hdr = nv_ro08(bios, i2c + 1);
- *cnt = nv_ro08(bios, i2c + 2);
- *len = nv_ro08(bios, i2c + 3);
+ *ver = nvbios_rd08(bios, i2c + 0);
+ *hdr = nvbios_rd08(bios, i2c + 1);
+ *cnt = nvbios_rd08(bios, i2c + 2);
+ *len = nvbios_rd08(bios, i2c + 3);
} else {
*ver = *ver; /* use DCB version */
*hdr = 0;
@@ -70,13 +70,14 @@ dcb_i2c_entry(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len)
int
dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
{
+ struct nvkm_subdev *subdev = &bios->subdev;
u8 ver, len;
u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
if (ent) {
if (ver >= 0x41) {
- u32 ent_value = nv_ro32(bios, ent);
- u8 i2c_port = (ent_value >> 27) & 0x1f;
- u8 dpaux_port = (ent_value >> 22) & 0x1f;
+ u32 ent_value = nvbios_rd32(bios, ent);
+ u8 i2c_port = (ent_value >> 0) & 0x1f;
+ u8 dpaux_port = (ent_value >> 5) & 0x1f;
/* value 0x1f means unused according to DCB 4.x spec */
if (i2c_port == 0x1f && dpaux_port == 0x1f)
info->type = DCB_I2C_UNUSED;
@@ -84,9 +85,9 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
info->type = DCB_I2C_PMGR;
} else
if (ver >= 0x30) {
- info->type = nv_ro08(bios, ent + 0x03);
+ info->type = nvbios_rd08(bios, ent + 0x03);
} else {
- info->type = nv_ro08(bios, ent + 0x03) & 0x07;
+ info->type = nvbios_rd08(bios, ent + 0x03) & 0x07;
if (info->type == 0x07)
info->type = DCB_I2C_UNUSED;
}
@@ -98,27 +99,27 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
switch (info->type) {
case DCB_I2C_NV04_BIT:
- info->drive = nv_ro08(bios, ent + 0);
- info->sense = nv_ro08(bios, ent + 1);
+ info->drive = nvbios_rd08(bios, ent + 0);
+ info->sense = nvbios_rd08(bios, ent + 1);
return 0;
case DCB_I2C_NV4E_BIT:
- info->drive = nv_ro08(bios, ent + 1);
+ info->drive = nvbios_rd08(bios, ent + 1);
return 0;
case DCB_I2C_NVIO_BIT:
- info->drive = nv_ro08(bios, ent + 0) & 0x0f;
- if (nv_ro08(bios, ent + 1) & 0x01)
- info->share = nv_ro08(bios, ent + 1) >> 1;
+ info->drive = nvbios_rd08(bios, ent + 0) & 0x0f;
+ if (nvbios_rd08(bios, ent + 1) & 0x01)
+ info->share = nvbios_rd08(bios, ent + 1) >> 1;
return 0;
case DCB_I2C_NVIO_AUX:
- info->auxch = nv_ro08(bios, ent + 0) & 0x0f;
- if (nv_ro08(bios, ent + 1) & 0x01)
+ info->auxch = nvbios_rd08(bios, ent + 0) & 0x0f;
+ if (nvbios_rd08(bios, ent + 1) & 0x01)
info->share = info->auxch;
return 0;
case DCB_I2C_PMGR:
- info->drive = (nv_ro16(bios, ent + 0) & 0x01f) >> 0;
+ info->drive = (nvbios_rd16(bios, ent + 0) & 0x01f) >> 0;
if (info->drive == 0x1f)
info->drive = DCB_I2C_UNUSED;
- info->auxch = (nv_ro16(bios, ent + 0) & 0x3e0) >> 5;
+ info->auxch = (nvbios_rd16(bios, ent + 0) & 0x3e0) >> 5;
if (info->auxch == 0x1f)
info->auxch = DCB_I2C_UNUSED;
info->share = info->auxch;
@@ -126,7 +127,7 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
case DCB_I2C_UNUSED:
return 0;
default:
- nv_warn(bios, "unknown i2c type %d\n", info->type);
+ nvkm_warn(subdev, "unknown i2c type %d\n", info->type);
info->type = DCB_I2C_UNUSED;
return 0;
}
@@ -136,21 +137,21 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
/* BMP (from v4.0 has i2c info in the structure, it's in a
* fixed location on earlier VBIOS
*/
- if (nv_ro08(bios, bios->bmp_offset + 5) < 4)
+ if (nvbios_rd08(bios, bios->bmp_offset + 5) < 4)
ent = 0x0048;
else
ent = 0x0036 + bios->bmp_offset;
if (idx == 0) {
- info->drive = nv_ro08(bios, ent + 4);
+ info->drive = nvbios_rd08(bios, ent + 4);
if (!info->drive) info->drive = 0x3f;
- info->sense = nv_ro08(bios, ent + 5);
+ info->sense = nvbios_rd08(bios, ent + 5);
if (!info->sense) info->sense = 0x3e;
} else
if (idx == 1) {
- info->drive = nv_ro08(bios, ent + 6);
+ info->drive = nvbios_rd08(bios, ent + 6);
if (!info->drive) info->drive = 0x37;
- info->sense = nv_ro08(bios, ent + 7);
+ info->sense = nvbios_rd08(bios, ent + 7);
if (!info->sense) info->sense = 0x36;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
index 1815540a0e8b..74b14cf09308 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
@@ -29,20 +29,21 @@
static bool
nvbios_imagen(struct nvkm_bios *bios, struct nvbios_image *image)
{
+ struct nvkm_subdev *subdev = &bios->subdev;
struct nvbios_pcirT pcir;
struct nvbios_npdeT npde;
u8 ver;
u16 hdr;
u32 data;
- switch ((data = nv_ro16(bios, image->base + 0x00))) {
+ switch ((data = nvbios_rd16(bios, image->base + 0x00))) {
case 0xaa55:
case 0xbb77:
case 0x4e56: /* NV */
break;
default:
- nv_debug(bios, "%08x: ROM signature (%04x) unknown\n",
- image->base, data);
+ nvkm_debug(subdev, "%08x: ROM signature (%04x) unknown\n",
+ image->base, data);
return false;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index f67cdae1e90a..65af31441e9c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -31,18 +31,18 @@
#include <subdev/bios/init.h>
#include <subdev/bios/ramcfg.h>
-#include <core/device.h>
#include <subdev/devinit.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/vga.h>
#define bioslog(lvl, fmt, args...) do { \
- nv_printk(init->bios, lvl, "0x%04x[%c]: "fmt, init->offset, \
- init_exec(init) ? '0' + (init->nested - 1) : ' ', ##args); \
+ nvkm_printk(init->subdev, lvl, info, "0x%04x[%c]: "fmt, \
+ init->offset, init_exec(init) ? \
+ '0' + (init->nested - 1) : ' ', ##args); \
} while(0)
#define cont(fmt, args...) do { \
- if (nv_subdev(init->bios)->debug >= NV_DBG_TRACE) \
+ if (init->subdev->debug >= NV_DBG_TRACE) \
printk(fmt, ##args); \
} while(0)
#define trace(fmt, args...) bioslog(TRACE, fmt, ##args)
@@ -141,7 +141,7 @@ init_conn(struct nvbios_init *init)
static inline u32
init_nvreg(struct nvbios_init *init, u32 reg)
{
- struct nvkm_devinit *devinit = nvkm_devinit(init->bios);
+ struct nvkm_devinit *devinit = init->bios->subdev.device->devinit;
/* C51 (at least) sometimes has the lower bits set which the VBIOS
* interprets to mean that access needs to go through certain IO
@@ -154,7 +154,7 @@ init_nvreg(struct nvbios_init *init, u32 reg)
/* GF8+ display scripts need register addresses mangled a bit to
* select a specific CRTC/OR
*/
- if (nv_device(init->bios)->card_type >= NV_50) {
+ if (init->bios->subdev.device->card_type >= NV_50) {
if (reg & 0x80000000) {
reg += init_crtc(init) * 0x800;
reg &= ~0x80000000;
@@ -173,35 +173,36 @@ init_nvreg(struct nvbios_init *init, u32 reg)
if (reg & ~0x00fffffc)
warn("unknown bits in register 0x%08x\n", reg);
- if (devinit->mmio)
- reg = devinit->mmio(devinit, reg);
- return reg;
+ return nvkm_devinit_mmio(devinit, reg);
}
static u32
init_rd32(struct nvbios_init *init, u32 reg)
{
+ struct nvkm_device *device = init->bios->subdev.device;
reg = init_nvreg(init, reg);
if (reg != ~0 && init_exec(init))
- return nv_rd32(init->subdev, reg);
+ return nvkm_rd32(device, reg);
return 0x00000000;
}
static void
init_wr32(struct nvbios_init *init, u32 reg, u32 val)
{
+ struct nvkm_device *device = init->bios->subdev.device;
reg = init_nvreg(init, reg);
if (reg != ~0 && init_exec(init))
- nv_wr32(init->subdev, reg, val);
+ nvkm_wr32(device, reg, val);
}
static u32
init_mask(struct nvbios_init *init, u32 reg, u32 mask, u32 val)
{
+ struct nvkm_device *device = init->bios->subdev.device;
reg = init_nvreg(init, reg);
if (reg != ~0 && init_exec(init)) {
- u32 tmp = nv_rd32(init->subdev, reg);
- nv_wr32(init->subdev, reg, (tmp & ~mask) | val);
+ u32 tmp = nvkm_rd32(device, reg);
+ nvkm_wr32(device, reg, (tmp & ~mask) | val);
return tmp;
}
return 0x00000000;
@@ -211,7 +212,7 @@ static u8
init_rdport(struct nvbios_init *init, u16 port)
{
if (init_exec(init))
- return nv_rdport(init->subdev, init->crtc, port);
+ return nvkm_rdport(init->subdev->device, init->crtc, port);
return 0x00;
}
@@ -219,7 +220,7 @@ static void
init_wrport(struct nvbios_init *init, u16 port, u8 value)
{
if (init_exec(init))
- nv_wrport(init->subdev, init->crtc, port, value);
+ nvkm_wrport(init->subdev->device, init->crtc, port, value);
}
static u8
@@ -228,7 +229,7 @@ init_rdvgai(struct nvbios_init *init, u16 port, u8 index)
struct nvkm_subdev *subdev = init->subdev;
if (init_exec(init)) {
int head = init->crtc < 0 ? 0 : init->crtc;
- return nv_rdvgai(subdev, head, port, index);
+ return nvkm_rdvgai(subdev->device, head, port, index);
}
return 0x00;
}
@@ -236,80 +237,80 @@ init_rdvgai(struct nvbios_init *init, u16 port, u8 index)
static void
init_wrvgai(struct nvbios_init *init, u16 port, u8 index, u8 value)
{
+ struct nvkm_device *device = init->subdev->device;
+
/* force head 0 for updates to cr44, it only exists on first head */
- if (nv_device(init->subdev)->card_type < NV_50) {
+ if (device->card_type < NV_50) {
if (port == 0x03d4 && index == 0x44)
init->crtc = 0;
}
if (init_exec(init)) {
int head = init->crtc < 0 ? 0 : init->crtc;
- nv_wrvgai(init->subdev, head, port, index, value);
+ nvkm_wrvgai(device, head, port, index, value);
}
/* select head 1 if cr44 write selected it */
- if (nv_device(init->subdev)->card_type < NV_50) {
+ if (device->card_type < NV_50) {
if (port == 0x03d4 && index == 0x44 && value == 3)
init->crtc = 1;
}
}
-static struct nvkm_i2c_port *
+static struct i2c_adapter *
init_i2c(struct nvbios_init *init, int index)
{
- struct nvkm_i2c *i2c = nvkm_i2c(init->bios);
+ struct nvkm_i2c *i2c = init->bios->subdev.device->i2c;
+ struct nvkm_i2c_bus *bus;
if (index == 0xff) {
- index = NV_I2C_DEFAULT(0);
+ index = NVKM_I2C_BUS_PRI;
if (init->outp && init->outp->i2c_upper_default)
- index = NV_I2C_DEFAULT(1);
- } else
- if (index < 0) {
- if (!init->outp) {
- if (init_exec(init))
- error("script needs output for i2c\n");
- return NULL;
- }
-
- if (index == -2 && init->outp->location) {
- index = NV_I2C_TYPE_EXTAUX(init->outp->extdev);
- return i2c->find_type(i2c, index);
- }
-
- index = init->outp->i2c_index;
- if (init->outp->type == DCB_OUTPUT_DP)
- index += NV_I2C_AUX(0);
+ index = NVKM_I2C_BUS_SEC;
}
- return i2c->find(i2c, index);
+ bus = nvkm_i2c_bus_find(i2c, index);
+ return bus ? &bus->i2c : NULL;
}
static int
init_rdi2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg)
{
- struct nvkm_i2c_port *port = init_i2c(init, index);
- if (port && init_exec(init))
- return nv_rdi2cr(port, addr, reg);
+ struct i2c_adapter *adap = init_i2c(init, index);
+ if (adap && init_exec(init))
+ return nvkm_rdi2cr(adap, addr, reg);
return -ENODEV;
}
static int
init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val)
{
- struct nvkm_i2c_port *port = init_i2c(init, index);
- if (port && init_exec(init))
- return nv_wri2cr(port, addr, reg, val);
+ struct i2c_adapter *adap = init_i2c(init, index);
+ if (adap && init_exec(init))
+ return nvkm_wri2cr(adap, addr, reg, val);
return -ENODEV;
}
+static struct nvkm_i2c_aux *
+init_aux(struct nvbios_init *init)
+{
+ struct nvkm_i2c *i2c = init->bios->subdev.device->i2c;
+ if (!init->outp) {
+ if (init_exec(init))
+ error("script needs output for aux\n");
+ return NULL;
+ }
+ return nvkm_i2c_aux_find(i2c, init->outp->i2c_index);
+}
+
static u8
init_rdauxr(struct nvbios_init *init, u32 addr)
{
- struct nvkm_i2c_port *port = init_i2c(init, -2);
+ struct nvkm_i2c_aux *aux = init_aux(init);
u8 data;
- if (port && init_exec(init)) {
- int ret = nv_rdaux(port, addr, &data, 1);
+ if (aux && init_exec(init)) {
+ int ret = nvkm_rdaux(aux, addr, &data, 1);
if (ret == 0)
return data;
trace("auxch read failed with %d\n", ret);
@@ -321,9 +322,9 @@ init_rdauxr(struct nvbios_init *init, u32 addr)
static int
init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
{
- struct nvkm_i2c_port *port = init_i2c(init, -2);
- if (port && init_exec(init)) {
- int ret = nv_wraux(port, addr, &data, 1);
+ struct nvkm_i2c_aux *aux = init_aux(init);
+ if (aux && init_exec(init)) {
+ int ret = nvkm_wraux(aux, addr, &data, 1);
if (ret)
trace("auxch write failed with %d\n", ret);
return ret;
@@ -334,9 +335,9 @@ init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
static void
init_prog_pll(struct nvbios_init *init, u32 id, u32 freq)
{
- struct nvkm_devinit *devinit = nvkm_devinit(init->bios);
- if (devinit->pll_set && init_exec(init)) {
- int ret = devinit->pll_set(devinit, id, freq);
+ struct nvkm_devinit *devinit = init->bios->subdev.device->devinit;
+ if (init_exec(init)) {
+ int ret = nvkm_devinit_pll_set(devinit, id, freq);
if (ret)
warn("failed to prog pll 0x%08x to %dkHz\n", id, freq);
}
@@ -371,7 +372,7 @@ init_table_(struct nvbios_init *init, u16 offset, const char *name)
u16 len, data = init_table(bios, &len);
if (data) {
if (len >= offset + 2) {
- data = nv_ro16(bios, data + offset);
+ data = nvbios_rd16(bios, data + offset);
if (data)
return data;
@@ -407,12 +408,12 @@ init_script(struct nvkm_bios *bios, int index)
return 0x0000;
data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18);
- return nv_ro16(bios, data + (index * 2));
+ return nvbios_rd16(bios, data + (index * 2));
}
data = init_script_table(&init);
if (data)
- return nv_ro16(bios, data + (index * 2));
+ return nvbios_rd16(bios, data + (index * 2));
return 0x0000;
}
@@ -422,7 +423,7 @@ init_unknown_script(struct nvkm_bios *bios)
{
u16 len, data = init_table(bios, &len);
if (data && len >= 16)
- return nv_ro16(bios, data + 14);
+ return nvbios_rd16(bios, data + 14);
return 0x0000;
}
@@ -454,9 +455,9 @@ init_xlat_(struct nvbios_init *init, u8 index, u8 offset)
struct nvkm_bios *bios = init->bios;
u16 table = init_xlat_table(init);
if (table) {
- u16 data = nv_ro16(bios, table + (index * 2));
+ u16 data = nvbios_rd16(bios, table + (index * 2));
if (data)
- return nv_ro08(bios, data + offset);
+ return nvbios_rd08(bios, data + offset);
warn("xlat table pointer %d invalid\n", index);
}
return 0x00;
@@ -472,9 +473,9 @@ init_condition_met(struct nvbios_init *init, u8 cond)
struct nvkm_bios *bios = init->bios;
u16 table = init_condition_table(init);
if (table) {
- u32 reg = nv_ro32(bios, table + (cond * 12) + 0);
- u32 msk = nv_ro32(bios, table + (cond * 12) + 4);
- u32 val = nv_ro32(bios, table + (cond * 12) + 8);
+ u32 reg = nvbios_rd32(bios, table + (cond * 12) + 0);
+ u32 msk = nvbios_rd32(bios, table + (cond * 12) + 4);
+ u32 val = nvbios_rd32(bios, table + (cond * 12) + 8);
trace("\t[0x%02x] (R[0x%06x] & 0x%08x) == 0x%08x\n",
cond, reg, msk, val);
return (init_rd32(init, reg) & msk) == val;
@@ -488,10 +489,10 @@ init_io_condition_met(struct nvbios_init *init, u8 cond)
struct nvkm_bios *bios = init->bios;
u16 table = init_io_condition_table(init);
if (table) {
- u16 port = nv_ro16(bios, table + (cond * 5) + 0);
- u8 index = nv_ro08(bios, table + (cond * 5) + 2);
- u8 mask = nv_ro08(bios, table + (cond * 5) + 3);
- u8 value = nv_ro08(bios, table + (cond * 5) + 4);
+ u16 port = nvbios_rd16(bios, table + (cond * 5) + 0);
+ u8 index = nvbios_rd08(bios, table + (cond * 5) + 2);
+ u8 mask = nvbios_rd08(bios, table + (cond * 5) + 3);
+ u8 value = nvbios_rd08(bios, table + (cond * 5) + 4);
trace("\t[0x%02x] (0x%04x[0x%02x] & 0x%02x) == 0x%02x\n",
cond, port, index, mask, value);
return (init_rdvgai(init, port, index) & mask) == value;
@@ -505,15 +506,15 @@ init_io_flag_condition_met(struct nvbios_init *init, u8 cond)
struct nvkm_bios *bios = init->bios;
u16 table = init_io_flag_condition_table(init);
if (table) {
- u16 port = nv_ro16(bios, table + (cond * 9) + 0);
- u8 index = nv_ro08(bios, table + (cond * 9) + 2);
- u8 mask = nv_ro08(bios, table + (cond * 9) + 3);
- u8 shift = nv_ro08(bios, table + (cond * 9) + 4);
- u16 data = nv_ro16(bios, table + (cond * 9) + 5);
- u8 dmask = nv_ro08(bios, table + (cond * 9) + 7);
- u8 value = nv_ro08(bios, table + (cond * 9) + 8);
+ u16 port = nvbios_rd16(bios, table + (cond * 9) + 0);
+ u8 index = nvbios_rd08(bios, table + (cond * 9) + 2);
+ u8 mask = nvbios_rd08(bios, table + (cond * 9) + 3);
+ u8 shift = nvbios_rd08(bios, table + (cond * 9) + 4);
+ u16 data = nvbios_rd16(bios, table + (cond * 9) + 5);
+ u8 dmask = nvbios_rd08(bios, table + (cond * 9) + 7);
+ u8 value = nvbios_rd08(bios, table + (cond * 9) + 8);
u8 ioval = (init_rdvgai(init, port, index) & mask) >> shift;
- return (nv_ro08(bios, data + ioval) & dmask) == value;
+ return (nvbios_rd08(bios, data + ioval) & dmask) == value;
}
return false;
}
@@ -573,7 +574,7 @@ init_tmds_reg(struct nvbios_init *init, u8 tmds)
static void
init_reserved(struct nvbios_init *init)
{
- u8 opcode = nv_ro08(init->bios, init->offset);
+ u8 opcode = nvbios_rd08(init->bios, init->offset);
u8 length, i;
switch (opcode) {
@@ -587,7 +588,7 @@ init_reserved(struct nvbios_init *init)
trace("RESERVED 0x%02x\t", opcode);
for (i = 1; i < length; i++)
- cont(" 0x%02x", nv_ro08(init->bios, init->offset + i));
+ cont(" 0x%02x", nvbios_rd08(init->bios, init->offset + i));
cont("\n");
init->offset += length;
}
@@ -611,12 +612,12 @@ static void
init_io_restrict_prog(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u16 port = nv_ro16(bios, init->offset + 1);
- u8 index = nv_ro08(bios, init->offset + 3);
- u8 mask = nv_ro08(bios, init->offset + 4);
- u8 shift = nv_ro08(bios, init->offset + 5);
- u8 count = nv_ro08(bios, init->offset + 6);
- u32 reg = nv_ro32(bios, init->offset + 7);
+ u16 port = nvbios_rd16(bios, init->offset + 1);
+ u8 index = nvbios_rd08(bios, init->offset + 3);
+ u8 mask = nvbios_rd08(bios, init->offset + 4);
+ u8 shift = nvbios_rd08(bios, init->offset + 5);
+ u8 count = nvbios_rd08(bios, init->offset + 6);
+ u32 reg = nvbios_rd32(bios, init->offset + 7);
u8 conf, i;
trace("IO_RESTRICT_PROG\tR[0x%06x] = "
@@ -626,7 +627,7 @@ init_io_restrict_prog(struct nvbios_init *init)
conf = (init_rdvgai(init, port, index) & mask) >> shift;
for (i = 0; i < count; i++) {
- u32 data = nv_ro32(bios, init->offset);
+ u32 data = nvbios_rd32(bios, init->offset);
if (i == conf) {
trace("\t0x%08x *\n", data);
@@ -648,7 +649,7 @@ static void
init_repeat(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 count = nv_ro08(bios, init->offset + 1);
+ u8 count = nvbios_rd08(bios, init->offset + 1);
u16 repeat = init->repeat;
trace("REPEAT\t0x%02x\n", count);
@@ -674,13 +675,13 @@ static void
init_io_restrict_pll(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u16 port = nv_ro16(bios, init->offset + 1);
- u8 index = nv_ro08(bios, init->offset + 3);
- u8 mask = nv_ro08(bios, init->offset + 4);
- u8 shift = nv_ro08(bios, init->offset + 5);
- s8 iofc = nv_ro08(bios, init->offset + 6);
- u8 count = nv_ro08(bios, init->offset + 7);
- u32 reg = nv_ro32(bios, init->offset + 8);
+ u16 port = nvbios_rd16(bios, init->offset + 1);
+ u8 index = nvbios_rd08(bios, init->offset + 3);
+ u8 mask = nvbios_rd08(bios, init->offset + 4);
+ u8 shift = nvbios_rd08(bios, init->offset + 5);
+ s8 iofc = nvbios_rd08(bios, init->offset + 6);
+ u8 count = nvbios_rd08(bios, init->offset + 7);
+ u32 reg = nvbios_rd32(bios, init->offset + 8);
u8 conf, i;
trace("IO_RESTRICT_PLL\tR[0x%06x] =PLL= "
@@ -690,7 +691,7 @@ init_io_restrict_pll(struct nvbios_init *init)
conf = (init_rdvgai(init, port, index) & mask) >> shift;
for (i = 0; i < count; i++) {
- u32 freq = nv_ro16(bios, init->offset) * 10;
+ u32 freq = nvbios_rd16(bios, init->offset) * 10;
if (i == conf) {
trace("\t%dkHz *\n", freq);
@@ -730,12 +731,12 @@ static void
init_copy(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u32 reg = nv_ro32(bios, init->offset + 1);
- u8 shift = nv_ro08(bios, init->offset + 5);
- u8 smask = nv_ro08(bios, init->offset + 6);
- u16 port = nv_ro16(bios, init->offset + 7);
- u8 index = nv_ro08(bios, init->offset + 9);
- u8 mask = nv_ro08(bios, init->offset + 10);
+ u32 reg = nvbios_rd32(bios, init->offset + 1);
+ u8 shift = nvbios_rd08(bios, init->offset + 5);
+ u8 smask = nvbios_rd08(bios, init->offset + 6);
+ u16 port = nvbios_rd16(bios, init->offset + 7);
+ u8 index = nvbios_rd08(bios, init->offset + 9);
+ u8 mask = nvbios_rd08(bios, init->offset + 10);
u8 data;
trace("COPY\t0x%04x[0x%02x] &= 0x%02x |= "
@@ -769,7 +770,7 @@ static void
init_io_flag_condition(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 cond = nv_ro08(bios, init->offset + 1);
+ u8 cond = nvbios_rd08(bios, init->offset + 1);
trace("IO_FLAG_CONDITION\t0x%02x\n", cond);
init->offset += 2;
@@ -787,8 +788,8 @@ init_dp_condition(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
struct nvbios_dpout info;
- u8 cond = nv_ro08(bios, init->offset + 1);
- u8 unkn = nv_ro08(bios, init->offset + 2);
+ u8 cond = nvbios_rd08(bios, init->offset + 1);
+ u8 unkn = nvbios_rd08(bios, init->offset + 2);
u8 ver, hdr, cnt, len;
u16 data;
@@ -834,7 +835,7 @@ static void
init_io_mask_or(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 index = nv_ro08(bios, init->offset + 1);
+ u8 index = nvbios_rd08(bios, init->offset + 1);
u8 or = init_or(init);
u8 data;
@@ -853,7 +854,7 @@ static void
init_io_or(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 index = nv_ro08(bios, init->offset + 1);
+ u8 index = nvbios_rd08(bios, init->offset + 1);
u8 or = init_or(init);
u8 data;
@@ -872,8 +873,8 @@ static void
init_andn_reg(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u32 reg = nv_ro32(bios, init->offset + 1);
- u32 mask = nv_ro32(bios, init->offset + 5);
+ u32 reg = nvbios_rd32(bios, init->offset + 1);
+ u32 mask = nvbios_rd32(bios, init->offset + 5);
trace("ANDN_REG\tR[0x%06x] &= ~0x%08x\n", reg, mask);
init->offset += 9;
@@ -889,8 +890,8 @@ static void
init_or_reg(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u32 reg = nv_ro32(bios, init->offset + 1);
- u32 mask = nv_ro32(bios, init->offset + 5);
+ u32 reg = nvbios_rd32(bios, init->offset + 1);
+ u32 mask = nvbios_rd32(bios, init->offset + 5);
trace("OR_REG\tR[0x%06x] |= 0x%08x\n", reg, mask);
init->offset += 9;
@@ -906,19 +907,19 @@ static void
init_idx_addr_latched(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u32 creg = nv_ro32(bios, init->offset + 1);
- u32 dreg = nv_ro32(bios, init->offset + 5);
- u32 mask = nv_ro32(bios, init->offset + 9);
- u32 data = nv_ro32(bios, init->offset + 13);
- u8 count = nv_ro08(bios, init->offset + 17);
+ u32 creg = nvbios_rd32(bios, init->offset + 1);
+ u32 dreg = nvbios_rd32(bios, init->offset + 5);
+ u32 mask = nvbios_rd32(bios, init->offset + 9);
+ u32 data = nvbios_rd32(bios, init->offset + 13);
+ u8 count = nvbios_rd08(bios, init->offset + 17);
trace("INDEX_ADDRESS_LATCHED\tR[0x%06x] : R[0x%06x]\n", creg, dreg);
trace("\tCTRL &= 0x%08x |= 0x%08x\n", mask, data);
init->offset += 18;
while (count--) {
- u8 iaddr = nv_ro08(bios, init->offset + 0);
- u8 idata = nv_ro08(bios, init->offset + 1);
+ u8 iaddr = nvbios_rd08(bios, init->offset + 0);
+ u8 idata = nvbios_rd08(bios, init->offset + 1);
trace("\t[0x%02x] = 0x%02x\n", iaddr, idata);
init->offset += 2;
@@ -936,12 +937,12 @@ static void
init_io_restrict_pll2(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u16 port = nv_ro16(bios, init->offset + 1);
- u8 index = nv_ro08(bios, init->offset + 3);
- u8 mask = nv_ro08(bios, init->offset + 4);
- u8 shift = nv_ro08(bios, init->offset + 5);
- u8 count = nv_ro08(bios, init->offset + 6);
- u32 reg = nv_ro32(bios, init->offset + 7);
+ u16 port = nvbios_rd16(bios, init->offset + 1);
+ u8 index = nvbios_rd08(bios, init->offset + 3);
+ u8 mask = nvbios_rd08(bios, init->offset + 4);
+ u8 shift = nvbios_rd08(bios, init->offset + 5);
+ u8 count = nvbios_rd08(bios, init->offset + 6);
+ u32 reg = nvbios_rd32(bios, init->offset + 7);
u8 conf, i;
trace("IO_RESTRICT_PLL2\t"
@@ -951,7 +952,7 @@ init_io_restrict_pll2(struct nvbios_init *init)
conf = (init_rdvgai(init, port, index) & mask) >> shift;
for (i = 0; i < count; i++) {
- u32 freq = nv_ro32(bios, init->offset);
+ u32 freq = nvbios_rd32(bios, init->offset);
if (i == conf) {
trace("\t%dkHz *\n", freq);
init_prog_pll(init, reg, freq);
@@ -971,8 +972,8 @@ static void
init_pll2(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u32 reg = nv_ro32(bios, init->offset + 1);
- u32 freq = nv_ro32(bios, init->offset + 5);
+ u32 reg = nvbios_rd32(bios, init->offset + 1);
+ u32 freq = nvbios_rd32(bios, init->offset + 5);
trace("PLL2\tR[0x%06x] =PLL= %dkHz\n", reg, freq);
init->offset += 9;
@@ -988,17 +989,17 @@ static void
init_i2c_byte(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 index = nv_ro08(bios, init->offset + 1);
- u8 addr = nv_ro08(bios, init->offset + 2) >> 1;
- u8 count = nv_ro08(bios, init->offset + 3);
+ u8 index = nvbios_rd08(bios, init->offset + 1);
+ u8 addr = nvbios_rd08(bios, init->offset + 2) >> 1;
+ u8 count = nvbios_rd08(bios, init->offset + 3);
trace("I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr);
init->offset += 4;
while (count--) {
- u8 reg = nv_ro08(bios, init->offset + 0);
- u8 mask = nv_ro08(bios, init->offset + 1);
- u8 data = nv_ro08(bios, init->offset + 2);
+ u8 reg = nvbios_rd08(bios, init->offset + 0);
+ u8 mask = nvbios_rd08(bios, init->offset + 1);
+ u8 data = nvbios_rd08(bios, init->offset + 2);
int val;
trace("\t[0x%02x] &= 0x%02x |= 0x%02x\n", reg, mask, data);
@@ -1019,16 +1020,16 @@ static void
init_zm_i2c_byte(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 index = nv_ro08(bios, init->offset + 1);
- u8 addr = nv_ro08(bios, init->offset + 2) >> 1;
- u8 count = nv_ro08(bios, init->offset + 3);
+ u8 index = nvbios_rd08(bios, init->offset + 1);
+ u8 addr = nvbios_rd08(bios, init->offset + 2) >> 1;
+ u8 count = nvbios_rd08(bios, init->offset + 3);
trace("ZM_I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr);
init->offset += 4;
while (count--) {
- u8 reg = nv_ro08(bios, init->offset + 0);
- u8 data = nv_ro08(bios, init->offset + 1);
+ u8 reg = nvbios_rd08(bios, init->offset + 0);
+ u8 data = nvbios_rd08(bios, init->offset + 1);
trace("\t[0x%02x] = 0x%02x\n", reg, data);
init->offset += 2;
@@ -1045,28 +1046,28 @@ static void
init_zm_i2c(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 index = nv_ro08(bios, init->offset + 1);
- u8 addr = nv_ro08(bios, init->offset + 2) >> 1;
- u8 count = nv_ro08(bios, init->offset + 3);
+ u8 index = nvbios_rd08(bios, init->offset + 1);
+ u8 addr = nvbios_rd08(bios, init->offset + 2) >> 1;
+ u8 count = nvbios_rd08(bios, init->offset + 3);
u8 data[256], i;
trace("ZM_I2C\tI2C[0x%02x][0x%02x]\n", index, addr);
init->offset += 4;
for (i = 0; i < count; i++) {
- data[i] = nv_ro08(bios, init->offset);
+ data[i] = nvbios_rd08(bios, init->offset);
trace("\t0x%02x\n", data[i]);
init->offset++;
}
if (init_exec(init)) {
- struct nvkm_i2c_port *port = init_i2c(init, index);
+ struct i2c_adapter *adap = init_i2c(init, index);
struct i2c_msg msg = {
.addr = addr, .flags = 0, .len = count, .buf = data,
};
int ret;
- if (port && (ret = i2c_transfer(&port->adapter, &msg, 1)) != 1)
+ if (adap && (ret = i2c_transfer(adap, &msg, 1)) != 1)
warn("i2c wr failed, %d\n", ret);
}
}
@@ -1079,10 +1080,10 @@ static void
init_tmds(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 tmds = nv_ro08(bios, init->offset + 1);
- u8 addr = nv_ro08(bios, init->offset + 2);
- u8 mask = nv_ro08(bios, init->offset + 3);
- u8 data = nv_ro08(bios, init->offset + 4);
+ u8 tmds = nvbios_rd08(bios, init->offset + 1);
+ u8 addr = nvbios_rd08(bios, init->offset + 2);
+ u8 mask = nvbios_rd08(bios, init->offset + 3);
+ u8 data = nvbios_rd08(bios, init->offset + 4);
u32 reg = init_tmds_reg(init, tmds);
trace("TMDS\tT[0x%02x][0x%02x] &= 0x%02x |= 0x%02x\n",
@@ -1105,16 +1106,16 @@ static void
init_zm_tmds_group(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 tmds = nv_ro08(bios, init->offset + 1);
- u8 count = nv_ro08(bios, init->offset + 2);
+ u8 tmds = nvbios_rd08(bios, init->offset + 1);
+ u8 count = nvbios_rd08(bios, init->offset + 2);
u32 reg = init_tmds_reg(init, tmds);
trace("TMDS_ZM_GROUP\tT[0x%02x]\n", tmds);
init->offset += 3;
while (count--) {
- u8 addr = nv_ro08(bios, init->offset + 0);
- u8 data = nv_ro08(bios, init->offset + 1);
+ u8 addr = nvbios_rd08(bios, init->offset + 0);
+ u8 data = nvbios_rd08(bios, init->offset + 1);
trace("\t[0x%02x] = 0x%02x\n", addr, data);
init->offset += 2;
@@ -1132,10 +1133,10 @@ static void
init_cr_idx_adr_latch(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 addr0 = nv_ro08(bios, init->offset + 1);
- u8 addr1 = nv_ro08(bios, init->offset + 2);
- u8 base = nv_ro08(bios, init->offset + 3);
- u8 count = nv_ro08(bios, init->offset + 4);
+ u8 addr0 = nvbios_rd08(bios, init->offset + 1);
+ u8 addr1 = nvbios_rd08(bios, init->offset + 2);
+ u8 base = nvbios_rd08(bios, init->offset + 3);
+ u8 count = nvbios_rd08(bios, init->offset + 4);
u8 save0;
trace("CR_INDEX_ADDR C[%02x] C[%02x]\n", addr0, addr1);
@@ -1143,7 +1144,7 @@ init_cr_idx_adr_latch(struct nvbios_init *init)
save0 = init_rdvgai(init, 0x03d4, addr0);
while (count--) {
- u8 data = nv_ro08(bios, init->offset);
+ u8 data = nvbios_rd08(bios, init->offset);
trace("\t\t[0x%02x] = 0x%02x\n", base, data);
init->offset += 1;
@@ -1162,9 +1163,9 @@ static void
init_cr(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 addr = nv_ro08(bios, init->offset + 1);
- u8 mask = nv_ro08(bios, init->offset + 2);
- u8 data = nv_ro08(bios, init->offset + 3);
+ u8 addr = nvbios_rd08(bios, init->offset + 1);
+ u8 mask = nvbios_rd08(bios, init->offset + 2);
+ u8 data = nvbios_rd08(bios, init->offset + 3);
u8 val;
trace("CR\t\tC[0x%02x] &= 0x%02x |= 0x%02x\n", addr, mask, data);
@@ -1182,8 +1183,8 @@ static void
init_zm_cr(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 addr = nv_ro08(bios, init->offset + 1);
- u8 data = nv_ro08(bios, init->offset + 2);
+ u8 addr = nvbios_rd08(bios, init->offset + 1);
+ u8 data = nvbios_rd08(bios, init->offset + 2);
trace("ZM_CR\tC[0x%02x] = 0x%02x\n", addr, data);
init->offset += 3;
@@ -1199,14 +1200,14 @@ static void
init_zm_cr_group(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 count = nv_ro08(bios, init->offset + 1);
+ u8 count = nvbios_rd08(bios, init->offset + 1);
trace("ZM_CR_GROUP\n");
init->offset += 2;
while (count--) {
- u8 addr = nv_ro08(bios, init->offset + 0);
- u8 data = nv_ro08(bios, init->offset + 1);
+ u8 addr = nvbios_rd08(bios, init->offset + 0);
+ u8 data = nvbios_rd08(bios, init->offset + 1);
trace("\t\tC[0x%02x] = 0x%02x\n", addr, data);
init->offset += 2;
@@ -1223,8 +1224,8 @@ static void
init_condition_time(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 cond = nv_ro08(bios, init->offset + 1);
- u8 retry = nv_ro08(bios, init->offset + 2);
+ u8 cond = nvbios_rd08(bios, init->offset + 1);
+ u8 retry = nvbios_rd08(bios, init->offset + 2);
u8 wait = min((u16)retry * 50, 100);
trace("CONDITION_TIME\t0x%02x 0x%02x\n", cond, retry);
@@ -1250,7 +1251,7 @@ static void
init_ltime(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u16 msec = nv_ro16(bios, init->offset + 1);
+ u16 msec = nvbios_rd16(bios, init->offset + 1);
trace("LTIME\t0x%04x\n", msec);
init->offset += 3;
@@ -1267,14 +1268,14 @@ static void
init_zm_reg_sequence(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u32 base = nv_ro32(bios, init->offset + 1);
- u8 count = nv_ro08(bios, init->offset + 5);
+ u32 base = nvbios_rd32(bios, init->offset + 1);
+ u8 count = nvbios_rd08(bios, init->offset + 5);
trace("ZM_REG_SEQUENCE\t0x%02x\n", count);
init->offset += 6;
while (count--) {
- u32 data = nv_ro32(bios, init->offset);
+ u32 data = nvbios_rd32(bios, init->offset);
trace("\t\tR[0x%06x] = 0x%08x\n", base, data);
init->offset += 4;
@@ -1285,6 +1286,44 @@ init_zm_reg_sequence(struct nvbios_init *init)
}
/**
+ * INIT_PLL_INDIRECT - opcode 0x59
+ *
+ */
+static void
+init_pll_indirect(struct nvbios_init *init)
+{
+ struct nvkm_bios *bios = init->bios;
+ u32 reg = nvbios_rd32(bios, init->offset + 1);
+ u16 addr = nvbios_rd16(bios, init->offset + 5);
+ u32 freq = (u32)nvbios_rd16(bios, addr) * 1000;
+
+ trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n",
+ reg, addr, freq);
+ init->offset += 7;
+
+ init_prog_pll(init, reg, freq);
+}
+
+/**
+ * INIT_ZM_REG_INDIRECT - opcode 0x5a
+ *
+ */
+static void
+init_zm_reg_indirect(struct nvbios_init *init)
+{
+ struct nvkm_bios *bios = init->bios;
+ u32 reg = nvbios_rd32(bios, init->offset + 1);
+ u16 addr = nvbios_rd16(bios, init->offset + 5);
+ u32 data = nvbios_rd32(bios, addr);
+
+ trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n",
+ reg, addr, data);
+ init->offset += 7;
+
+ init_wr32(init, addr, data);
+}
+
+/**
* INIT_SUB_DIRECT - opcode 0x5b
*
*/
@@ -1292,7 +1331,7 @@ static void
init_sub_direct(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u16 addr = nv_ro16(bios, init->offset + 1);
+ u16 addr = nvbios_rd16(bios, init->offset + 1);
u16 save;
trace("SUB_DIRECT\t0x%04x\n", addr);
@@ -1318,7 +1357,7 @@ static void
init_jump(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u16 offset = nv_ro16(bios, init->offset + 1);
+ u16 offset = nvbios_rd16(bios, init->offset + 1);
trace("JUMP\t0x%04x\n", offset);
@@ -1336,11 +1375,11 @@ static void
init_i2c_if(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 index = nv_ro08(bios, init->offset + 1);
- u8 addr = nv_ro08(bios, init->offset + 2);
- u8 reg = nv_ro08(bios, init->offset + 3);
- u8 mask = nv_ro08(bios, init->offset + 4);
- u8 data = nv_ro08(bios, init->offset + 5);
+ u8 index = nvbios_rd08(bios, init->offset + 1);
+ u8 addr = nvbios_rd08(bios, init->offset + 2);
+ u8 reg = nvbios_rd08(bios, init->offset + 3);
+ u8 mask = nvbios_rd08(bios, init->offset + 4);
+ u8 data = nvbios_rd08(bios, init->offset + 5);
u8 value;
trace("I2C_IF\tI2C[0x%02x][0x%02x][0x%02x] & 0x%02x == 0x%02x\n",
@@ -1363,12 +1402,12 @@ static void
init_copy_nv_reg(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u32 sreg = nv_ro32(bios, init->offset + 1);
- u8 shift = nv_ro08(bios, init->offset + 5);
- u32 smask = nv_ro32(bios, init->offset + 6);
- u32 sxor = nv_ro32(bios, init->offset + 10);
- u32 dreg = nv_ro32(bios, init->offset + 14);
- u32 dmask = nv_ro32(bios, init->offset + 18);
+ u32 sreg = nvbios_rd32(bios, init->offset + 1);
+ u8 shift = nvbios_rd08(bios, init->offset + 5);
+ u32 smask = nvbios_rd32(bios, init->offset + 6);
+ u32 sxor = nvbios_rd32(bios, init->offset + 10);
+ u32 dreg = nvbios_rd32(bios, init->offset + 14);
+ u32 dmask = nvbios_rd32(bios, init->offset + 18);
u32 data;
trace("COPY_NV_REG\tR[0x%06x] &= 0x%08x |= "
@@ -1389,9 +1428,9 @@ static void
init_zm_index_io(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u16 port = nv_ro16(bios, init->offset + 1);
- u8 index = nv_ro08(bios, init->offset + 3);
- u8 data = nv_ro08(bios, init->offset + 4);
+ u16 port = nvbios_rd16(bios, init->offset + 1);
+ u8 index = nvbios_rd08(bios, init->offset + 3);
+ u8 data = nvbios_rd08(bios, init->offset + 4);
trace("ZM_INDEX_IO\tI[0x%04x][0x%02x] = 0x%02x\n", port, index, data);
init->offset += 5;
@@ -1406,14 +1445,14 @@ init_zm_index_io(struct nvbios_init *init)
static void
init_compute_mem(struct nvbios_init *init)
{
- struct nvkm_devinit *devinit = nvkm_devinit(init->bios);
+ struct nvkm_devinit *devinit = init->bios->subdev.device->devinit;
trace("COMPUTE_MEM\n");
init->offset += 1;
init_exec_force(init, true);
- if (init_exec(init) && devinit->meminit)
- devinit->meminit(devinit);
+ if (init_exec(init))
+ nvkm_devinit_meminit(devinit);
init_exec_force(init, false);
}
@@ -1425,9 +1464,9 @@ static void
init_reset(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u32 reg = nv_ro32(bios, init->offset + 1);
- u32 data1 = nv_ro32(bios, init->offset + 5);
- u32 data2 = nv_ro32(bios, init->offset + 9);
+ u32 reg = nvbios_rd32(bios, init->offset + 1);
+ u32 data1 = nvbios_rd32(bios, init->offset + 5);
+ u32 data2 = nvbios_rd32(bios, init->offset + 9);
u32 savepci19;
trace("RESET\tR[0x%08x] = 0x%08x, 0x%08x", reg, data1, data2);
@@ -1475,14 +1514,14 @@ init_configure_mem(struct nvbios_init *init)
mdata = init_configure_mem_clk(init);
sdata = bmp_sdr_seq_table(bios);
- if (nv_ro08(bios, mdata) & 0x01)
+ if (nvbios_rd08(bios, mdata) & 0x01)
sdata = bmp_ddr_seq_table(bios);
mdata += 6; /* skip to data */
data = init_rdvgai(init, 0x03c4, 0x01);
init_wrvgai(init, 0x03c4, 0x01, data | 0x20);
- for (; (addr = nv_ro32(bios, sdata)) != 0xffffffff; sdata += 4) {
+ for (; (addr = nvbios_rd32(bios, sdata)) != 0xffffffff; sdata += 4) {
switch (addr) {
case 0x10021c: /* CKE_NORMAL */
case 0x1002d0: /* CMD_REFRESH */
@@ -1490,7 +1529,7 @@ init_configure_mem(struct nvbios_init *init)
data = 0x00000001;
break;
default:
- data = nv_ro32(bios, mdata);
+ data = nvbios_rd32(bios, mdata);
mdata += 4;
if (data == 0xffffffff)
continue;
@@ -1525,12 +1564,12 @@ init_configure_clk(struct nvbios_init *init)
mdata = init_configure_mem_clk(init);
/* NVPLL */
- clock = nv_ro16(bios, mdata + 4) * 10;
+ clock = nvbios_rd16(bios, mdata + 4) * 10;
init_prog_pll(init, 0x680500, clock);
/* MPLL */
- clock = nv_ro16(bios, mdata + 2) * 10;
- if (nv_ro08(bios, mdata) & 0x01)
+ clock = nvbios_rd16(bios, mdata + 2) * 10;
+ if (nvbios_rd08(bios, mdata) & 0x01)
clock *= 2;
init_prog_pll(init, 0x680504, clock);
@@ -1571,9 +1610,9 @@ static void
init_io(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u16 port = nv_ro16(bios, init->offset + 1);
- u8 mask = nv_ro16(bios, init->offset + 3);
- u8 data = nv_ro16(bios, init->offset + 4);
+ u16 port = nvbios_rd16(bios, init->offset + 1);
+ u8 mask = nvbios_rd16(bios, init->offset + 3);
+ u8 data = nvbios_rd16(bios, init->offset + 4);
u8 value;
trace("IO\t\tI[0x%04x] &= 0x%02x |= 0x%02x\n", port, mask, data);
@@ -1583,7 +1622,7 @@ init_io(struct nvbios_init *init)
* needed some day.. it's almost certainly wrong, but, it also
* somehow makes things work...
*/
- if (nv_device(init->bios)->card_type >= NV_50 &&
+ if (bios->subdev.device->card_type >= NV_50 &&
port == 0x03c3 && data == 0x01) {
init_mask(init, 0x614100, 0xf0800000, 0x00800000);
init_mask(init, 0x00e18c, 0x00020000, 0x00020000);
@@ -1611,7 +1650,7 @@ static void
init_sub(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 index = nv_ro08(bios, init->offset + 1);
+ u8 index = nvbios_rd08(bios, init->offset + 1);
u16 addr, save;
trace("SUB\t0x%02x\n", index);
@@ -1638,8 +1677,8 @@ static void
init_ram_condition(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 mask = nv_ro08(bios, init->offset + 1);
- u8 value = nv_ro08(bios, init->offset + 2);
+ u8 mask = nvbios_rd08(bios, init->offset + 1);
+ u8 value = nvbios_rd08(bios, init->offset + 2);
trace("RAM_CONDITION\t"
"(R[0x100000] & 0x%02x) == 0x%02x\n", mask, value);
@@ -1657,9 +1696,9 @@ static void
init_nv_reg(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u32 reg = nv_ro32(bios, init->offset + 1);
- u32 mask = nv_ro32(bios, init->offset + 5);
- u32 data = nv_ro32(bios, init->offset + 9);
+ u32 reg = nvbios_rd32(bios, init->offset + 1);
+ u32 mask = nvbios_rd32(bios, init->offset + 5);
+ u32 data = nvbios_rd32(bios, init->offset + 9);
trace("NV_REG\tR[0x%06x] &= 0x%08x |= 0x%08x\n", reg, mask, data);
init->offset += 13;
@@ -1675,15 +1714,15 @@ static void
init_macro(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 macro = nv_ro08(bios, init->offset + 1);
+ u8 macro = nvbios_rd08(bios, init->offset + 1);
u16 table;
trace("MACRO\t0x%02x\n", macro);
table = init_macro_table(init);
if (table) {
- u32 addr = nv_ro32(bios, table + (macro * 8) + 0);
- u32 data = nv_ro32(bios, table + (macro * 8) + 4);
+ u32 addr = nvbios_rd32(bios, table + (macro * 8) + 0);
+ u32 data = nvbios_rd32(bios, table + (macro * 8) + 4);
trace("\t\tR[0x%06x] = 0x%08x\n", addr, data);
init_wr32(init, addr, data);
}
@@ -1704,6 +1743,24 @@ init_resume(struct nvbios_init *init)
}
/**
+ * INIT_STRAP_CONDITION - opcode 0x73
+ *
+ */
+static void
+init_strap_condition(struct nvbios_init *init)
+{
+ struct nvkm_bios *bios = init->bios;
+ u32 mask = nvbios_rd32(bios, init->offset + 1);
+ u32 value = nvbios_rd32(bios, init->offset + 5);
+
+ trace("STRAP_CONDITION\t(R[0x101000] & 0x%08x) == 0x%08x\n", mask, value);
+ init->offset += 9;
+
+ if ((init_rd32(init, 0x101000) & mask) != value)
+ init_exec_set(init, false);
+}
+
+/**
* INIT_TIME - opcode 0x74
*
*/
@@ -1711,7 +1768,7 @@ static void
init_time(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u16 usec = nv_ro16(bios, init->offset + 1);
+ u16 usec = nvbios_rd16(bios, init->offset + 1);
trace("TIME\t0x%04x\n", usec);
init->offset += 3;
@@ -1732,7 +1789,7 @@ static void
init_condition(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 cond = nv_ro08(bios, init->offset + 1);
+ u8 cond = nvbios_rd08(bios, init->offset + 1);
trace("CONDITION\t0x%02x\n", cond);
init->offset += 2;
@@ -1749,7 +1806,7 @@ static void
init_io_condition(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 cond = nv_ro08(bios, init->offset + 1);
+ u8 cond = nvbios_rd08(bios, init->offset + 1);
trace("IO_CONDITION\t0x%02x\n", cond);
init->offset += 2;
@@ -1759,6 +1816,23 @@ init_io_condition(struct nvbios_init *init)
}
/**
+ * INIT_ZM_REG16 - opcode 0x77
+ *
+ */
+static void
+init_zm_reg16(struct nvbios_init *init)
+{
+ struct nvkm_bios *bios = init->bios;
+ u32 addr = nvbios_rd32(bios, init->offset + 1);
+ u16 data = nvbios_rd16(bios, init->offset + 5);
+
+ trace("ZM_REG\tR[0x%06x] = 0x%04x\n", addr, data);
+ init->offset += 7;
+
+ init_wr32(init, addr, data);
+}
+
+/**
* INIT_INDEX_IO - opcode 0x78
*
*/
@@ -1766,10 +1840,10 @@ static void
init_index_io(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u16 port = nv_ro16(bios, init->offset + 1);
- u8 index = nv_ro16(bios, init->offset + 3);
- u8 mask = nv_ro08(bios, init->offset + 4);
- u8 data = nv_ro08(bios, init->offset + 5);
+ u16 port = nvbios_rd16(bios, init->offset + 1);
+ u8 index = nvbios_rd16(bios, init->offset + 3);
+ u8 mask = nvbios_rd08(bios, init->offset + 4);
+ u8 data = nvbios_rd08(bios, init->offset + 5);
u8 value;
trace("INDEX_IO\tI[0x%04x][0x%02x] &= 0x%02x |= 0x%02x\n",
@@ -1788,8 +1862,8 @@ static void
init_pll(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u32 reg = nv_ro32(bios, init->offset + 1);
- u32 freq = nv_ro16(bios, init->offset + 5) * 10;
+ u32 reg = nvbios_rd32(bios, init->offset + 1);
+ u32 freq = nvbios_rd16(bios, init->offset + 5) * 10;
trace("PLL\tR[0x%06x] =PLL= %dkHz\n", reg, freq);
init->offset += 7;
@@ -1805,8 +1879,8 @@ static void
init_zm_reg(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u32 addr = nv_ro32(bios, init->offset + 1);
- u32 data = nv_ro32(bios, init->offset + 5);
+ u32 addr = nvbios_rd32(bios, init->offset + 1);
+ u32 data = nvbios_rd32(bios, init->offset + 5);
trace("ZM_REG\tR[0x%06x] = 0x%08x\n", addr, data);
init->offset += 9;
@@ -1825,7 +1899,7 @@ static void
init_ram_restrict_pll(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 type = nv_ro08(bios, init->offset + 1);
+ u8 type = nvbios_rd08(bios, init->offset + 1);
u8 count = init_ram_restrict_group_count(init);
u8 strap = init_ram_restrict(init);
u8 cconf;
@@ -1834,7 +1908,7 @@ init_ram_restrict_pll(struct nvbios_init *init)
init->offset += 2;
for (cconf = 0; cconf < count; cconf++) {
- u32 freq = nv_ro32(bios, init->offset);
+ u32 freq = nvbios_rd32(bios, init->offset);
if (cconf == strap) {
trace("%dkHz *\n", freq);
@@ -1854,13 +1928,13 @@ init_ram_restrict_pll(struct nvbios_init *init)
static void
init_gpio(struct nvbios_init *init)
{
- struct nvkm_gpio *gpio = nvkm_gpio(init->bios);
+ struct nvkm_gpio *gpio = init->bios->subdev.device->gpio;
trace("GPIO\n");
init->offset += 1;
- if (init_exec(init) && gpio && gpio->reset)
- gpio->reset(gpio, DCB_GPIO_UNUSED);
+ if (init_exec(init))
+ nvkm_gpio_reset(gpio, DCB_GPIO_UNUSED);
}
/**
@@ -1871,9 +1945,9 @@ static void
init_ram_restrict_zm_reg_group(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u32 addr = nv_ro32(bios, init->offset + 1);
- u8 incr = nv_ro08(bios, init->offset + 5);
- u8 num = nv_ro08(bios, init->offset + 6);
+ u32 addr = nvbios_rd32(bios, init->offset + 1);
+ u8 incr = nvbios_rd08(bios, init->offset + 5);
+ u8 num = nvbios_rd08(bios, init->offset + 6);
u8 count = init_ram_restrict_group_count(init);
u8 index = init_ram_restrict(init);
u8 i, j;
@@ -1885,7 +1959,7 @@ init_ram_restrict_zm_reg_group(struct nvbios_init *init)
for (i = 0; i < num; i++) {
trace("\tR[0x%06x] = {\n", addr);
for (j = 0; j < count; j++) {
- u32 data = nv_ro32(bios, init->offset);
+ u32 data = nvbios_rd32(bios, init->offset);
if (j == index) {
trace("\t\t0x%08x *\n", data);
@@ -1909,8 +1983,8 @@ static void
init_copy_zm_reg(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u32 sreg = nv_ro32(bios, init->offset + 1);
- u32 dreg = nv_ro32(bios, init->offset + 5);
+ u32 sreg = nvbios_rd32(bios, init->offset + 1);
+ u32 dreg = nvbios_rd32(bios, init->offset + 5);
trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", dreg, sreg);
init->offset += 9;
@@ -1926,14 +2000,14 @@ static void
init_zm_reg_group(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u32 addr = nv_ro32(bios, init->offset + 1);
- u8 count = nv_ro08(bios, init->offset + 5);
+ u32 addr = nvbios_rd32(bios, init->offset + 1);
+ u8 count = nvbios_rd08(bios, init->offset + 5);
trace("ZM_REG_GROUP\tR[0x%06x] =\n", addr);
init->offset += 6;
while (count--) {
- u32 data = nv_ro32(bios, init->offset);
+ u32 data = nvbios_rd32(bios, init->offset);
trace("\t0x%08x\n", data);
init_wr32(init, addr, data);
init->offset += 4;
@@ -1948,13 +2022,13 @@ static void
init_xlat(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u32 saddr = nv_ro32(bios, init->offset + 1);
- u8 sshift = nv_ro08(bios, init->offset + 5);
- u8 smask = nv_ro08(bios, init->offset + 6);
- u8 index = nv_ro08(bios, init->offset + 7);
- u32 daddr = nv_ro32(bios, init->offset + 8);
- u32 dmask = nv_ro32(bios, init->offset + 12);
- u8 shift = nv_ro08(bios, init->offset + 16);
+ u32 saddr = nvbios_rd32(bios, init->offset + 1);
+ u8 sshift = nvbios_rd08(bios, init->offset + 5);
+ u8 smask = nvbios_rd08(bios, init->offset + 6);
+ u8 index = nvbios_rd08(bios, init->offset + 7);
+ u32 daddr = nvbios_rd32(bios, init->offset + 8);
+ u32 dmask = nvbios_rd32(bios, init->offset + 12);
+ u8 shift = nvbios_rd08(bios, init->offset + 16);
u32 data;
trace("INIT_XLAT\tR[0x%06x] &= 0x%08x |= "
@@ -1976,9 +2050,9 @@ static void
init_zm_mask_add(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u32 addr = nv_ro32(bios, init->offset + 1);
- u32 mask = nv_ro32(bios, init->offset + 5);
- u32 add = nv_ro32(bios, init->offset + 9);
+ u32 addr = nvbios_rd32(bios, init->offset + 1);
+ u32 mask = nvbios_rd32(bios, init->offset + 5);
+ u32 add = nvbios_rd32(bios, init->offset + 9);
u32 data;
trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add);
@@ -1997,15 +2071,15 @@ static void
init_auxch(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u32 addr = nv_ro32(bios, init->offset + 1);
- u8 count = nv_ro08(bios, init->offset + 5);
+ u32 addr = nvbios_rd32(bios, init->offset + 1);
+ u8 count = nvbios_rd08(bios, init->offset + 5);
trace("AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count);
init->offset += 6;
while (count--) {
- u8 mask = nv_ro08(bios, init->offset + 0);
- u8 data = nv_ro08(bios, init->offset + 1);
+ u8 mask = nvbios_rd08(bios, init->offset + 0);
+ u8 data = nvbios_rd08(bios, init->offset + 1);
trace("\tAUX[0x%08x] &= 0x%02x |= 0x%02x\n", addr, mask, data);
mask = init_rdauxr(init, addr) & mask;
init_wrauxr(init, addr, mask | data);
@@ -2021,14 +2095,14 @@ static void
init_zm_auxch(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u32 addr = nv_ro32(bios, init->offset + 1);
- u8 count = nv_ro08(bios, init->offset + 5);
+ u32 addr = nvbios_rd32(bios, init->offset + 1);
+ u8 count = nvbios_rd08(bios, init->offset + 5);
trace("ZM_AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count);
init->offset += 6;
while (count--) {
- u8 data = nv_ro08(bios, init->offset + 0);
+ u8 data = nvbios_rd08(bios, init->offset + 0);
trace("\tAUX[0x%08x] = 0x%02x\n", addr, data);
init_wrauxr(init, addr, data);
init->offset += 1;
@@ -2043,21 +2117,21 @@ static void
init_i2c_long_if(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- u8 index = nv_ro08(bios, init->offset + 1);
- u8 addr = nv_ro08(bios, init->offset + 2) >> 1;
- u8 reglo = nv_ro08(bios, init->offset + 3);
- u8 reghi = nv_ro08(bios, init->offset + 4);
- u8 mask = nv_ro08(bios, init->offset + 5);
- u8 data = nv_ro08(bios, init->offset + 6);
- struct nvkm_i2c_port *port;
+ u8 index = nvbios_rd08(bios, init->offset + 1);
+ u8 addr = nvbios_rd08(bios, init->offset + 2) >> 1;
+ u8 reglo = nvbios_rd08(bios, init->offset + 3);
+ u8 reghi = nvbios_rd08(bios, init->offset + 4);
+ u8 mask = nvbios_rd08(bios, init->offset + 5);
+ u8 data = nvbios_rd08(bios, init->offset + 6);
+ struct i2c_adapter *adap;
trace("I2C_LONG_IF\t"
"I2C[0x%02x][0x%02x][0x%02x%02x] & 0x%02x == 0x%02x\n",
index, addr, reglo, reghi, mask, data);
init->offset += 7;
- port = init_i2c(init, index);
- if (port) {
+ adap = init_i2c(init, index);
+ if (adap) {
u8 i[2] = { reghi, reglo };
u8 o[1] = {};
struct i2c_msg msg[] = {
@@ -2066,7 +2140,7 @@ init_i2c_long_if(struct nvbios_init *init)
};
int ret;
- ret = i2c_transfer(&port->adapter, msg, 2);
+ ret = i2c_transfer(adap, msg, 2);
if (ret == 2 && ((o[0] & mask) == data))
return;
}
@@ -2082,9 +2156,9 @@ static void
init_gpio_ne(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
- struct nvkm_gpio *gpio = nvkm_gpio(bios);
+ struct nvkm_gpio *gpio = bios->subdev.device->gpio;
struct dcb_gpio_func func;
- u8 count = nv_ro08(bios, init->offset + 1);
+ u8 count = nvbios_rd08(bios, init->offset + 1);
u8 idx = 0, ver, len;
u16 data, i;
@@ -2092,21 +2166,21 @@ init_gpio_ne(struct nvbios_init *init)
init->offset += 2;
for (i = init->offset; i < init->offset + count; i++)
- cont("0x%02x ", nv_ro08(bios, i));
+ cont("0x%02x ", nvbios_rd08(bios, i));
cont("\n");
while ((data = dcb_gpio_parse(bios, 0, idx++, &ver, &len, &func))) {
if (func.func != DCB_GPIO_UNUSED) {
for (i = init->offset; i < init->offset + count; i++) {
- if (func.func == nv_ro08(bios, i))
+ if (func.func == nvbios_rd08(bios, i))
break;
}
trace("\tFUNC[0x%02x]", func.func);
if (i == (init->offset + count)) {
cont(" *");
- if (init_exec(init) && gpio && gpio->reset)
- gpio->reset(gpio, func.func);
+ if (init_exec(init))
+ nvkm_gpio_reset(gpio, func.func);
}
cont("\n");
}
@@ -2145,6 +2219,8 @@ static struct nvbios_init_opcode {
[0x56] = { init_condition_time },
[0x57] = { init_ltime },
[0x58] = { init_zm_reg_sequence },
+ [0x59] = { init_pll_indirect },
+ [0x5a] = { init_zm_reg_indirect },
[0x5b] = { init_sub_direct },
[0x5c] = { init_jump },
[0x5e] = { init_i2c_if },
@@ -2162,9 +2238,11 @@ static struct nvbios_init_opcode {
[0x6f] = { init_macro },
[0x71] = { init_done },
[0x72] = { init_resume },
+ [0x73] = { init_strap_condition },
[0x74] = { init_time },
[0x75] = { init_condition },
[0x76] = { init_io_condition },
+ [0x77] = { init_zm_reg16 },
[0x78] = { init_index_io },
[0x79] = { init_pll },
[0x7a] = { init_zm_reg },
@@ -2192,7 +2270,7 @@ nvbios_exec(struct nvbios_init *init)
{
init->nested++;
while (init->offset) {
- u8 opcode = nv_ro08(init->bios, init->offset);
+ u8 opcode = nvbios_rd08(init->bios, init->offset);
if (opcode >= init_opcode_nr || !init_opcode[opcode].exec) {
error("unknown opcode 0x%02x\n", opcode);
return -EINVAL;
@@ -2207,13 +2285,13 @@ nvbios_exec(struct nvbios_init *init)
int
nvbios_init(struct nvkm_subdev *subdev, bool execute)
{
- struct nvkm_bios *bios = nvkm_bios(subdev);
+ struct nvkm_bios *bios = subdev->device->bios;
int ret = 0;
int i = -1;
u16 data;
if (execute)
- nv_info(bios, "running init tables\n");
+ nvkm_debug(subdev, "running init tables\n");
while (!ret && (data = (init_script(bios, ++i)))) {
struct nvbios_init init = {
.subdev = subdev,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
index c4087df4f85e..3ddf0939ded3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
@@ -28,17 +28,18 @@
u16
mxm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr)
{
+ struct nvkm_subdev *subdev = &bios->subdev;
struct bit_entry x;
if (bit_entry(bios, 'x', &x)) {
- nv_debug(bios, "BIT 'x' table not present\n");
+ nvkm_debug(subdev, "BIT 'x' table not present\n");
return 0x0000;
}
*ver = x.version;
*hdr = x.length;
if (*ver != 1 || *hdr < 3) {
- nv_warn(bios, "BIT 'x' table %d/%d unknown\n", *ver, *hdr);
+ nvkm_warn(subdev, "BIT 'x' table %d/%d unknown\n", *ver, *hdr);
return 0x0000;
}
@@ -73,23 +74,24 @@ static u8 g98_sor_map[16] = {
u8
mxm_sor_map(struct nvkm_bios *bios, u8 conn)
{
+ struct nvkm_subdev *subdev = &bios->subdev;
u8 ver, hdr;
u16 mxm = mxm_table(bios, &ver, &hdr);
if (mxm && hdr >= 6) {
- u16 map = nv_ro16(bios, mxm + 4);
+ u16 map = nvbios_rd16(bios, mxm + 4);
if (map) {
- ver = nv_ro08(bios, map);
+ ver = nvbios_rd08(bios, map);
if (ver == 0x10) {
- if (conn < nv_ro08(bios, map + 3)) {
- map += nv_ro08(bios, map + 1);
+ if (conn < nvbios_rd08(bios, map + 3)) {
+ map += nvbios_rd08(bios, map + 1);
map += conn;
- return nv_ro08(bios, map);
+ return nvbios_rd08(bios, map);
}
return 0x00;
}
- nv_warn(bios, "unknown sor map v%02x\n", ver);
+ nvkm_warn(subdev, "unknown sor map v%02x\n", ver);
}
}
@@ -102,30 +104,31 @@ mxm_sor_map(struct nvkm_bios *bios, u8 conn)
if (bios->version.chip == 0x98)
return g98_sor_map[conn];
- nv_warn(bios, "missing sor map\n");
+ nvkm_warn(subdev, "missing sor map\n");
return 0x00;
}
u8
mxm_ddc_map(struct nvkm_bios *bios, u8 port)
{
+ struct nvkm_subdev *subdev = &bios->subdev;
u8 ver, hdr;
u16 mxm = mxm_table(bios, &ver, &hdr);
if (mxm && hdr >= 8) {
- u16 map = nv_ro16(bios, mxm + 6);
+ u16 map = nvbios_rd16(bios, mxm + 6);
if (map) {
- ver = nv_ro08(bios, map);
+ ver = nvbios_rd08(bios, map);
if (ver == 0x10) {
- if (port < nv_ro08(bios, map + 3)) {
- map += nv_ro08(bios, map + 1);
+ if (port < nvbios_rd08(bios, map + 3)) {
+ map += nvbios_rd08(bios, map + 1);
map += port;
- return nv_ro08(bios, map);
+ return nvbios_rd08(bios, map);
}
return 0x00;
}
- nv_warn(bios, "unknown ddc map v%02x\n", ver);
+ nvkm_warn(subdev, "unknown ddc map v%02x\n", ver);
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c
index fd7dd718b2bf..955df29635c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c
@@ -32,12 +32,13 @@ nvbios_npdeTe(struct nvkm_bios *bios, u32 base)
u8 ver; u16 hdr;
u32 data = nvbios_pcirTp(bios, base, &ver, &hdr, &pcir);
if (data = (data + hdr + 0x0f) & ~0x0f, data) {
- switch (nv_ro32(bios, data + 0x00)) {
+ switch (nvbios_rd32(bios, data + 0x00)) {
case 0x4544504e: /* NPDE */
break;
default:
- nv_debug(bios, "%08x: NPDE signature (%08x) unknown\n",
- data, nv_ro32(bios, data + 0x00));
+ nvkm_debug(&bios->subdev,
+ "%08x: NPDE signature (%08x) unknown\n",
+ data, nvbios_rd32(bios, data + 0x00));
data = 0;
break;
}
@@ -51,8 +52,8 @@ nvbios_npdeTp(struct nvkm_bios *bios, u32 base, struct nvbios_npdeT *info)
u32 data = nvbios_npdeTe(bios, base);
memset(info, 0x00, sizeof(*info));
if (data) {
- info->image_size = nv_ro16(bios, data + 0x08) * 512;
- info->last = nv_ro08(bios, data + 0x0a) & 0x80;
+ info->image_size = nvbios_rd16(bios, data + 0x08) * 512;
+ info->last = nvbios_rd08(bios, data + 0x0a) & 0x80;
}
return data;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c
index df5978753ae8..67cb3aeb2da7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c
@@ -27,19 +27,20 @@
u32
nvbios_pcirTe(struct nvkm_bios *bios, u32 base, u8 *ver, u16 *hdr)
{
- u32 data = nv_ro16(bios, base + 0x18);
+ u32 data = nvbios_rd16(bios, base + 0x18);
if (data) {
data += base;
- switch (nv_ro32(bios, data + 0x00)) {
+ switch (nvbios_rd32(bios, data + 0x00)) {
case 0x52494350: /* PCIR */
case 0x53494752: /* RGIS */
case 0x5344504e: /* NPDS */
- *hdr = nv_ro16(bios, data + 0x0a);
- *ver = nv_ro08(bios, data + 0x0c);
+ *hdr = nvbios_rd16(bios, data + 0x0a);
+ *ver = nvbios_rd08(bios, data + 0x0c);
break;
default:
- nv_debug(bios, "%08x: PCIR signature (%08x) unknown\n",
- data, nv_ro32(bios, data + 0x00));
+ nvkm_debug(&bios->subdev,
+ "%08x: PCIR signature (%08x) unknown\n",
+ data, nvbios_rd32(bios, data + 0x00));
data = 0;
break;
}
@@ -54,15 +55,15 @@ nvbios_pcirTp(struct nvkm_bios *bios, u32 base, u8 *ver, u16 *hdr,
u32 data = nvbios_pcirTe(bios, base, ver, hdr);
memset(info, 0x00, sizeof(*info));
if (data) {
- info->vendor_id = nv_ro16(bios, data + 0x04);
- info->device_id = nv_ro16(bios, data + 0x06);
- info->class_code[0] = nv_ro08(bios, data + 0x0d);
- info->class_code[1] = nv_ro08(bios, data + 0x0e);
- info->class_code[2] = nv_ro08(bios, data + 0x0f);
- info->image_size = nv_ro16(bios, data + 0x10) * 512;
- info->image_rev = nv_ro16(bios, data + 0x12);
- info->image_type = nv_ro08(bios, data + 0x14);
- info->last = nv_ro08(bios, data + 0x15) & 0x80;
+ info->vendor_id = nvbios_rd16(bios, data + 0x04);
+ info->device_id = nvbios_rd16(bios, data + 0x06);
+ info->class_code[0] = nvbios_rd08(bios, data + 0x0d);
+ info->class_code[1] = nvbios_rd08(bios, data + 0x0e);
+ info->class_code[2] = nvbios_rd08(bios, data + 0x0f);
+ info->image_size = nvbios_rd16(bios, data + 0x10) * 512;
+ info->image_rev = nvbios_rd16(bios, data + 0x12);
+ info->image_type = nvbios_rd08(bios, data + 0x14);
+ info->last = nvbios_rd08(bios, data + 0x15) & 0x80;
}
return data;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
index 382ae9cdbf58..aa7e33b42b30 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
@@ -25,8 +25,6 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/perf.h>
-#include <core/device.h>
-
u16
nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
@@ -36,22 +34,22 @@ nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version <= 2) {
- perf = nv_ro16(bios, bit_P.offset + 0);
+ perf = nvbios_rd16(bios, bit_P.offset + 0);
if (perf) {
- *ver = nv_ro08(bios, perf + 0);
- *hdr = nv_ro08(bios, perf + 1);
+ *ver = nvbios_rd08(bios, perf + 0);
+ *hdr = nvbios_rd08(bios, perf + 1);
if (*ver >= 0x40 && *ver < 0x41) {
- *cnt = nv_ro08(bios, perf + 5);
- *len = nv_ro08(bios, perf + 2);
- *snr = nv_ro08(bios, perf + 4);
- *ssz = nv_ro08(bios, perf + 3);
+ *cnt = nvbios_rd08(bios, perf + 5);
+ *len = nvbios_rd08(bios, perf + 2);
+ *snr = nvbios_rd08(bios, perf + 4);
+ *ssz = nvbios_rd08(bios, perf + 3);
return perf;
} else
if (*ver >= 0x20 && *ver < 0x40) {
- *cnt = nv_ro08(bios, perf + 2);
- *len = nv_ro08(bios, perf + 3);
- *snr = nv_ro08(bios, perf + 4);
- *ssz = nv_ro08(bios, perf + 5);
+ *cnt = nvbios_rd08(bios, perf + 2);
+ *len = nvbios_rd08(bios, perf + 3);
+ *snr = nvbios_rd08(bios, perf + 4);
+ *ssz = nvbios_rd08(bios, perf + 5);
return perf;
}
}
@@ -59,13 +57,13 @@ nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
}
if (bios->bmp_offset) {
- if (nv_ro08(bios, bios->bmp_offset + 6) >= 0x25) {
- perf = nv_ro16(bios, bios->bmp_offset + 0x94);
+ if (nvbios_rd08(bios, bios->bmp_offset + 6) >= 0x25) {
+ perf = nvbios_rd16(bios, bios->bmp_offset + 0x94);
if (perf) {
- *hdr = nv_ro08(bios, perf + 0);
- *ver = nv_ro08(bios, perf + 1);
- *cnt = nv_ro08(bios, perf + 2);
- *len = nv_ro08(bios, perf + 3);
+ *hdr = nvbios_rd08(bios, perf + 0);
+ *ver = nvbios_rd08(bios, perf + 1);
+ *cnt = nvbios_rd08(bios, perf + 2);
+ *len = nvbios_rd08(bios, perf + 3);
*snr = 0;
*ssz = 0;
return perf;
@@ -98,55 +96,55 @@ nvbios_perfEp(struct nvkm_bios *bios, int idx,
{
u16 perf = nvbios_perf_entry(bios, idx, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
- info->pstate = nv_ro08(bios, perf + 0x00);
+ info->pstate = nvbios_rd08(bios, perf + 0x00);
switch (!!perf * *ver) {
case 0x12:
case 0x13:
case 0x14:
- info->core = nv_ro32(bios, perf + 0x01) * 10;
- info->memory = nv_ro32(bios, perf + 0x05) * 20;
- info->fanspeed = nv_ro08(bios, perf + 0x37);
+ info->core = nvbios_rd32(bios, perf + 0x01) * 10;
+ info->memory = nvbios_rd32(bios, perf + 0x05) * 20;
+ info->fanspeed = nvbios_rd08(bios, perf + 0x37);
if (*hdr > 0x38)
- info->voltage = nv_ro08(bios, perf + 0x38);
+ info->voltage = nvbios_rd08(bios, perf + 0x38);
break;
case 0x21:
case 0x23:
case 0x24:
- info->fanspeed = nv_ro08(bios, perf + 0x04);
- info->voltage = nv_ro08(bios, perf + 0x05);
- info->shader = nv_ro16(bios, perf + 0x06) * 1000;
+ info->fanspeed = nvbios_rd08(bios, perf + 0x04);
+ info->voltage = nvbios_rd08(bios, perf + 0x05);
+ info->shader = nvbios_rd16(bios, perf + 0x06) * 1000;
info->core = info->shader + (signed char)
- nv_ro08(bios, perf + 0x08) * 1000;
- switch (nv_device(bios)->chipset) {
+ nvbios_rd08(bios, perf + 0x08) * 1000;
+ switch (bios->subdev.device->chipset) {
case 0x49:
case 0x4b:
- info->memory = nv_ro16(bios, perf + 0x0b) * 1000;
+ info->memory = nvbios_rd16(bios, perf + 0x0b) * 1000;
break;
default:
- info->memory = nv_ro16(bios, perf + 0x0b) * 2000;
+ info->memory = nvbios_rd16(bios, perf + 0x0b) * 2000;
break;
}
break;
case 0x25:
- info->fanspeed = nv_ro08(bios, perf + 0x04);
- info->voltage = nv_ro08(bios, perf + 0x05);
- info->core = nv_ro16(bios, perf + 0x06) * 1000;
- info->shader = nv_ro16(bios, perf + 0x0a) * 1000;
- info->memory = nv_ro16(bios, perf + 0x0c) * 1000;
+ info->fanspeed = nvbios_rd08(bios, perf + 0x04);
+ info->voltage = nvbios_rd08(bios, perf + 0x05);
+ info->core = nvbios_rd16(bios, perf + 0x06) * 1000;
+ info->shader = nvbios_rd16(bios, perf + 0x0a) * 1000;
+ info->memory = nvbios_rd16(bios, perf + 0x0c) * 1000;
break;
case 0x30:
- info->script = nv_ro16(bios, perf + 0x02);
+ info->script = nvbios_rd16(bios, perf + 0x02);
case 0x35:
- info->fanspeed = nv_ro08(bios, perf + 0x06);
- info->voltage = nv_ro08(bios, perf + 0x07);
- info->core = nv_ro16(bios, perf + 0x08) * 1000;
- info->shader = nv_ro16(bios, perf + 0x0a) * 1000;
- info->memory = nv_ro16(bios, perf + 0x0c) * 1000;
- info->vdec = nv_ro16(bios, perf + 0x10) * 1000;
- info->disp = nv_ro16(bios, perf + 0x14) * 1000;
+ info->fanspeed = nvbios_rd08(bios, perf + 0x06);
+ info->voltage = nvbios_rd08(bios, perf + 0x07);
+ info->core = nvbios_rd16(bios, perf + 0x08) * 1000;
+ info->shader = nvbios_rd16(bios, perf + 0x0a) * 1000;
+ info->memory = nvbios_rd16(bios, perf + 0x0c) * 1000;
+ info->vdec = nvbios_rd16(bios, perf + 0x10) * 1000;
+ info->disp = nvbios_rd16(bios, perf + 0x14) * 1000;
break;
case 0x40:
- info->voltage = nv_ro08(bios, perf + 0x02);
+ info->voltage = nvbios_rd08(bios, perf + 0x02);
break;
default:
return 0x0000;
@@ -175,7 +173,7 @@ nvbios_perfSp(struct nvkm_bios *bios, u32 perfE, int idx,
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x40:
- info->v40.freq = (nv_ro16(bios, data + 0x00) & 0x3fff) * 1000;
+ info->v40.freq = (nvbios_rd16(bios, data + 0x00) & 0x3fff) * 1000;
break;
default:
break;
@@ -193,7 +191,7 @@ nvbios_perf_fan_parse(struct nvkm_bios *bios,
return -ENODEV;
if (ver >= 0x20 && ver < 0x40 && hdr > 6)
- fan->pwm_divisor = nv_ro16(bios, perf + 6);
+ fan->pwm_divisor = nvbios_rd16(bios, perf + 6);
else
fan->pwm_divisor = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
index ebd402e19dbf..125ec2ed6c2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
@@ -27,7 +27,6 @@
#include <subdev/bios/pll.h>
#include <subdev/vga.h>
-#include <core/device.h>
struct pll_mapping {
u8 type;
@@ -84,20 +83,20 @@ pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
struct bit_entry bit_C;
if (!bit_entry(bios, 'C', &bit_C) && bit_C.length >= 10) {
- u16 data = nv_ro16(bios, bit_C.offset + 8);
+ u16 data = nvbios_rd16(bios, bit_C.offset + 8);
if (data) {
- *ver = nv_ro08(bios, data + 0);
- *hdr = nv_ro08(bios, data + 1);
- *len = nv_ro08(bios, data + 2);
- *cnt = nv_ro08(bios, data + 3);
+ *ver = nvbios_rd08(bios, data + 0);
+ *hdr = nvbios_rd08(bios, data + 1);
+ *len = nvbios_rd08(bios, data + 2);
+ *cnt = nvbios_rd08(bios, data + 3);
return data;
}
}
if (bmp_version(bios) >= 0x0524) {
- u16 data = nv_ro16(bios, bios->bmp_offset + 142);
+ u16 data = nvbios_rd16(bios, bios->bmp_offset + 142);
if (data) {
- *ver = nv_ro08(bios, data + 0);
+ *ver = nvbios_rd08(bios, data + 0);
*hdr = 1;
*cnt = 1;
*len = 0x18;
@@ -112,7 +111,8 @@ pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
static struct pll_mapping *
pll_map(struct nvkm_bios *bios)
{
- switch (nv_device(bios)->card_type) {
+ struct nvkm_device *device = bios->subdev.device;
+ switch (device->card_type) {
case NV_04:
case NV_10:
case NV_11:
@@ -123,12 +123,12 @@ pll_map(struct nvkm_bios *bios)
case NV_40:
return nv40_pll_mapping;
case NV_50:
- if (nv_device(bios)->chipset == 0x50)
+ if (device->chipset == 0x50)
return nv50_pll_mapping;
else
- if (nv_device(bios)->chipset < 0xa3 ||
- nv_device(bios)->chipset == 0xaa ||
- nv_device(bios)->chipset == 0xac)
+ if (device->chipset < 0xa3 ||
+ device->chipset == 0xaa ||
+ device->chipset == 0xac)
return g84_pll_mapping;
default:
return NULL;
@@ -146,8 +146,8 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
if (data && *ver >= 0x30) {
data += hdr;
while (cnt--) {
- if (nv_ro32(bios, data + 3) == reg) {
- *type = nv_ro08(bios, data + 0);
+ if (nvbios_rd32(bios, data + 3) == reg) {
+ *type = nvbios_rd08(bios, data + 0);
return data;
}
data += *len;
@@ -161,7 +161,7 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
u16 addr = (data += hdr);
*type = map->type;
while (cnt--) {
- if (nv_ro32(bios, data) == map->reg)
+ if (nvbios_rd32(bios, data) == map->reg)
return data;
data += *len;
}
@@ -188,8 +188,8 @@ pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
if (data && *ver >= 0x30) {
data += hdr;
while (cnt--) {
- if (nv_ro08(bios, data + 0) == type) {
- *reg = nv_ro32(bios, data + 3);
+ if (nvbios_rd08(bios, data + 0) == type) {
+ *reg = nvbios_rd32(bios, data + 3);
return data;
}
data += *len;
@@ -203,7 +203,7 @@ pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
u16 addr = (data += hdr);
*reg = map->reg;
while (cnt--) {
- if (nv_ro32(bios, data) == map->reg)
+ if (nvbios_rd32(bios, data) == map->reg)
return data;
data += *len;
}
@@ -222,6 +222,8 @@ pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
int
nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
{
+ struct nvkm_subdev *subdev = &bios->subdev;
+ struct nvkm_device *device = subdev->device;
u8 ver, len;
u32 reg = type;
u16 data;
@@ -245,12 +247,12 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
break;
case 0x10:
case 0x11:
- info->vco1.min_freq = nv_ro32(bios, data + 0);
- info->vco1.max_freq = nv_ro32(bios, data + 4);
- info->vco2.min_freq = nv_ro32(bios, data + 8);
- info->vco2.max_freq = nv_ro32(bios, data + 12);
- info->vco1.min_inputfreq = nv_ro32(bios, data + 16);
- info->vco2.min_inputfreq = nv_ro32(bios, data + 20);
+ info->vco1.min_freq = nvbios_rd32(bios, data + 0);
+ info->vco1.max_freq = nvbios_rd32(bios, data + 4);
+ info->vco2.min_freq = nvbios_rd32(bios, data + 8);
+ info->vco2.max_freq = nvbios_rd32(bios, data + 12);
+ info->vco1.min_inputfreq = nvbios_rd32(bios, data + 16);
+ info->vco2.min_inputfreq = nvbios_rd32(bios, data + 20);
info->vco1.max_inputfreq = INT_MAX;
info->vco2.max_inputfreq = INT_MAX;
@@ -291,82 +293,82 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
break;
case 0x20:
case 0x21:
- info->vco1.min_freq = nv_ro16(bios, data + 4) * 1000;
- info->vco1.max_freq = nv_ro16(bios, data + 6) * 1000;
- info->vco2.min_freq = nv_ro16(bios, data + 8) * 1000;
- info->vco2.max_freq = nv_ro16(bios, data + 10) * 1000;
- info->vco1.min_inputfreq = nv_ro16(bios, data + 12) * 1000;
- info->vco2.min_inputfreq = nv_ro16(bios, data + 14) * 1000;
- info->vco1.max_inputfreq = nv_ro16(bios, data + 16) * 1000;
- info->vco2.max_inputfreq = nv_ro16(bios, data + 18) * 1000;
- info->vco1.min_n = nv_ro08(bios, data + 20);
- info->vco1.max_n = nv_ro08(bios, data + 21);
- info->vco1.min_m = nv_ro08(bios, data + 22);
- info->vco1.max_m = nv_ro08(bios, data + 23);
- info->vco2.min_n = nv_ro08(bios, data + 24);
- info->vco2.max_n = nv_ro08(bios, data + 25);
- info->vco2.min_m = nv_ro08(bios, data + 26);
- info->vco2.max_m = nv_ro08(bios, data + 27);
-
- info->max_p = nv_ro08(bios, data + 29);
+ info->vco1.min_freq = nvbios_rd16(bios, data + 4) * 1000;
+ info->vco1.max_freq = nvbios_rd16(bios, data + 6) * 1000;
+ info->vco2.min_freq = nvbios_rd16(bios, data + 8) * 1000;
+ info->vco2.max_freq = nvbios_rd16(bios, data + 10) * 1000;
+ info->vco1.min_inputfreq = nvbios_rd16(bios, data + 12) * 1000;
+ info->vco2.min_inputfreq = nvbios_rd16(bios, data + 14) * 1000;
+ info->vco1.max_inputfreq = nvbios_rd16(bios, data + 16) * 1000;
+ info->vco2.max_inputfreq = nvbios_rd16(bios, data + 18) * 1000;
+ info->vco1.min_n = nvbios_rd08(bios, data + 20);
+ info->vco1.max_n = nvbios_rd08(bios, data + 21);
+ info->vco1.min_m = nvbios_rd08(bios, data + 22);
+ info->vco1.max_m = nvbios_rd08(bios, data + 23);
+ info->vco2.min_n = nvbios_rd08(bios, data + 24);
+ info->vco2.max_n = nvbios_rd08(bios, data + 25);
+ info->vco2.min_m = nvbios_rd08(bios, data + 26);
+ info->vco2.max_m = nvbios_rd08(bios, data + 27);
+
+ info->max_p = nvbios_rd08(bios, data + 29);
info->max_p_usable = info->max_p;
if (bios->version.chip < 0x60)
info->max_p_usable = 0x6;
- info->bias_p = nv_ro08(bios, data + 30);
+ info->bias_p = nvbios_rd08(bios, data + 30);
if (len > 0x22)
- info->refclk = nv_ro32(bios, data + 31);
+ info->refclk = nvbios_rd32(bios, data + 31);
break;
case 0x30:
- data = nv_ro16(bios, data + 1);
-
- info->vco1.min_freq = nv_ro16(bios, data + 0) * 1000;
- info->vco1.max_freq = nv_ro16(bios, data + 2) * 1000;
- info->vco2.min_freq = nv_ro16(bios, data + 4) * 1000;
- info->vco2.max_freq = nv_ro16(bios, data + 6) * 1000;
- info->vco1.min_inputfreq = nv_ro16(bios, data + 8) * 1000;
- info->vco2.min_inputfreq = nv_ro16(bios, data + 10) * 1000;
- info->vco1.max_inputfreq = nv_ro16(bios, data + 12) * 1000;
- info->vco2.max_inputfreq = nv_ro16(bios, data + 14) * 1000;
- info->vco1.min_n = nv_ro08(bios, data + 16);
- info->vco1.max_n = nv_ro08(bios, data + 17);
- info->vco1.min_m = nv_ro08(bios, data + 18);
- info->vco1.max_m = nv_ro08(bios, data + 19);
- info->vco2.min_n = nv_ro08(bios, data + 20);
- info->vco2.max_n = nv_ro08(bios, data + 21);
- info->vco2.min_m = nv_ro08(bios, data + 22);
- info->vco2.max_m = nv_ro08(bios, data + 23);
- info->max_p_usable = info->max_p = nv_ro08(bios, data + 25);
- info->bias_p = nv_ro08(bios, data + 27);
- info->refclk = nv_ro32(bios, data + 28);
+ data = nvbios_rd16(bios, data + 1);
+
+ info->vco1.min_freq = nvbios_rd16(bios, data + 0) * 1000;
+ info->vco1.max_freq = nvbios_rd16(bios, data + 2) * 1000;
+ info->vco2.min_freq = nvbios_rd16(bios, data + 4) * 1000;
+ info->vco2.max_freq = nvbios_rd16(bios, data + 6) * 1000;
+ info->vco1.min_inputfreq = nvbios_rd16(bios, data + 8) * 1000;
+ info->vco2.min_inputfreq = nvbios_rd16(bios, data + 10) * 1000;
+ info->vco1.max_inputfreq = nvbios_rd16(bios, data + 12) * 1000;
+ info->vco2.max_inputfreq = nvbios_rd16(bios, data + 14) * 1000;
+ info->vco1.min_n = nvbios_rd08(bios, data + 16);
+ info->vco1.max_n = nvbios_rd08(bios, data + 17);
+ info->vco1.min_m = nvbios_rd08(bios, data + 18);
+ info->vco1.max_m = nvbios_rd08(bios, data + 19);
+ info->vco2.min_n = nvbios_rd08(bios, data + 20);
+ info->vco2.max_n = nvbios_rd08(bios, data + 21);
+ info->vco2.min_m = nvbios_rd08(bios, data + 22);
+ info->vco2.max_m = nvbios_rd08(bios, data + 23);
+ info->max_p_usable = info->max_p = nvbios_rd08(bios, data + 25);
+ info->bias_p = nvbios_rd08(bios, data + 27);
+ info->refclk = nvbios_rd32(bios, data + 28);
break;
case 0x40:
- info->refclk = nv_ro16(bios, data + 9) * 1000;
- data = nv_ro16(bios, data + 1);
-
- info->vco1.min_freq = nv_ro16(bios, data + 0) * 1000;
- info->vco1.max_freq = nv_ro16(bios, data + 2) * 1000;
- info->vco1.min_inputfreq = nv_ro16(bios, data + 4) * 1000;
- info->vco1.max_inputfreq = nv_ro16(bios, data + 6) * 1000;
- info->vco1.min_m = nv_ro08(bios, data + 8);
- info->vco1.max_m = nv_ro08(bios, data + 9);
- info->vco1.min_n = nv_ro08(bios, data + 10);
- info->vco1.max_n = nv_ro08(bios, data + 11);
- info->min_p = nv_ro08(bios, data + 12);
- info->max_p = nv_ro08(bios, data + 13);
+ info->refclk = nvbios_rd16(bios, data + 9) * 1000;
+ data = nvbios_rd16(bios, data + 1);
+
+ info->vco1.min_freq = nvbios_rd16(bios, data + 0) * 1000;
+ info->vco1.max_freq = nvbios_rd16(bios, data + 2) * 1000;
+ info->vco1.min_inputfreq = nvbios_rd16(bios, data + 4) * 1000;
+ info->vco1.max_inputfreq = nvbios_rd16(bios, data + 6) * 1000;
+ info->vco1.min_m = nvbios_rd08(bios, data + 8);
+ info->vco1.max_m = nvbios_rd08(bios, data + 9);
+ info->vco1.min_n = nvbios_rd08(bios, data + 10);
+ info->vco1.max_n = nvbios_rd08(bios, data + 11);
+ info->min_p = nvbios_rd08(bios, data + 12);
+ info->max_p = nvbios_rd08(bios, data + 13);
break;
default:
- nv_error(bios, "unknown pll limits version 0x%02x\n", ver);
+ nvkm_error(subdev, "unknown pll limits version 0x%02x\n", ver);
return -EINVAL;
}
if (!info->refclk) {
- info->refclk = nv_device(bios)->crystal;
+ info->refclk = device->crystal;
if (bios->version.chip == 0x51) {
- u32 sel_clk = nv_rd32(bios, 0x680524);
+ u32 sel_clk = nvkm_rd32(device, 0x680524);
if ((info->reg == 0x680508 && sel_clk & 0x20) ||
(info->reg == 0x680520 && sel_clk & 0x80)) {
- if (nv_rdvgac(bios, 0, 0x27) < 0xa3)
+ if (nvkm_rdvgac(device, 0, 0x27) < 0xa3)
info->refclk = 200000;
else
info->refclk = 25000;
@@ -380,8 +382,8 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
* with an empty limit table (seen on nv18)
*/
if (!info->vco1.max_freq) {
- info->vco1.max_freq = nv_ro32(bios, bios->bmp_offset + 67);
- info->vco1.min_freq = nv_ro32(bios, bios->bmp_offset + 71);
+ info->vco1.max_freq = nvbios_rd32(bios, bios->bmp_offset + 67);
+ info->vco1.min_freq = nvbios_rd32(bios, bios->bmp_offset + 71);
if (bmp_version(bios) < 0x0506) {
info->vco1.max_freq = 256000;
info->vco1.min_freq = 128000;
@@ -393,7 +395,7 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
info->vco1.max_n = 0xff;
info->vco1.min_m = 0x1;
- if (nv_device(bios)->crystal == 13500) {
+ if (device->crystal == 13500) {
/* nv05 does this, nv11 doesn't, nv10 unknown */
if (bios->version.chip < 0x11)
info->vco1.min_m = 0x7;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
index 20c5ce0cd573..441ec451b788 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
@@ -49,12 +49,12 @@ nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
if (!bit_entry(bios, 'p', &bit_p)) {
if (bit_p.version == 2 && bit_p.length >= 4)
- data = nv_ro32(bios, bit_p.offset + 0x00);
+ data = nvbios_rd32(bios, bit_p.offset + 0x00);
if ((data = weirdo_pointer(bios, data))) {
- *ver = nv_ro08(bios, data + 0x00); /* maybe? */
- *hdr = nv_ro08(bios, data + 0x01);
- *len = nv_ro08(bios, data + 0x02);
- *cnt = nv_ro08(bios, data + 0x03);
+ *ver = nvbios_rd08(bios, data + 0x00); /* maybe? */
+ *hdr = nvbios_rd08(bios, data + 0x01);
+ *len = nvbios_rd08(bios, data + 0x02);
+ *cnt = nvbios_rd08(bios, data + 0x03);
}
}
@@ -95,8 +95,8 @@ nvbios_pmuEp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
default:
- info->type = nv_ro08(bios, data + 0x00);
- info->data = nv_ro32(bios, data + 0x02);
+ info->type = nvbios_rd08(bios, data + 0x00);
+ info->data = nvbios_rd32(bios, data + 0x02);
break;
}
return data;
@@ -112,21 +112,21 @@ nvbios_pmuRm(struct nvkm_bios *bios, u8 type, struct nvbios_pmuR *info)
while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) {
if ( pmuE.type == type &&
(data = weirdo_pointer(bios, pmuE.data))) {
- info->init_addr_pmu = nv_ro32(bios, data + 0x08);
- info->args_addr_pmu = nv_ro32(bios, data + 0x0c);
+ info->init_addr_pmu = nvbios_rd32(bios, data + 0x08);
+ info->args_addr_pmu = nvbios_rd32(bios, data + 0x0c);
info->boot_addr = data + 0x30;
- info->boot_addr_pmu = nv_ro32(bios, data + 0x10) +
- nv_ro32(bios, data + 0x18);
- info->boot_size = nv_ro32(bios, data + 0x1c) -
- nv_ro32(bios, data + 0x18);
+ info->boot_addr_pmu = nvbios_rd32(bios, data + 0x10) +
+ nvbios_rd32(bios, data + 0x18);
+ info->boot_size = nvbios_rd32(bios, data + 0x1c) -
+ nvbios_rd32(bios, data + 0x18);
info->code_addr = info->boot_addr + info->boot_size;
info->code_addr_pmu = info->boot_addr_pmu +
info->boot_size;
- info->code_size = nv_ro32(bios, data + 0x20);
+ info->code_size = nvbios_rd32(bios, data + 0x20);
info->data_addr = data + 0x30 +
- nv_ro32(bios, data + 0x24);
- info->data_addr_pmu = nv_ro32(bios, data + 0x28);
- info->data_size = nv_ro32(bios, data + 0x2c);
+ nvbios_rd32(bios, data + 0x24);
+ info->data_addr_pmu = nvbios_rd32(bios, data + 0x28);
+ info->data_size = nvbios_rd32(bios, data + 0x2c);
return true;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
index 95e4fa1531d6..e0ec2a6b7b79 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
@@ -1,5 +1,6 @@
#ifndef __NVKM_BIOS_PRIV_H__
#define __NVKM_BIOS_PRIV_H__
+#define nvkm_bios(p) container_of((p), struct nvkm_bios, subdev)
#include <subdev/bios.h>
struct nvbios_source {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c
index a17b221119b2..d5222af10b96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c
@@ -29,7 +29,7 @@
static u8
nvbios_ramcfg_strap(struct nvkm_subdev *subdev)
{
- return (nv_rd32(subdev, 0x101000) & 0x0000003c) >> 2;
+ return (nvkm_rd32(subdev->device, 0x101000) & 0x0000003c) >> 2;
}
u8
@@ -39,9 +39,9 @@ nvbios_ramcfg_count(struct nvkm_bios *bios)
if (!bit_entry(bios, 'M', &bit_M)) {
if (bit_M.version == 1 && bit_M.length >= 5)
- return nv_ro08(bios, bit_M.offset + 2);
+ return nvbios_rd08(bios, bit_M.offset + 2);
if (bit_M.version == 2 && bit_M.length >= 3)
- return nv_ro08(bios, bit_M.offset + 0);
+ return nvbios_rd08(bios, bit_M.offset + 0);
}
return 0x00;
@@ -50,7 +50,7 @@ nvbios_ramcfg_count(struct nvkm_bios *bios)
u8
nvbios_ramcfg_index(struct nvkm_subdev *subdev)
{
- struct nvkm_bios *bios = nvkm_bios(subdev);
+ struct nvkm_bios *bios = subdev->device->bios;
u8 strap = nvbios_ramcfg_strap(subdev);
u32 xlat = 0x00000000;
struct bit_entry bit_M;
@@ -59,7 +59,7 @@ nvbios_ramcfg_index(struct nvkm_subdev *subdev)
if (!bit_entry(bios, 'M', &bit_M)) {
if (bit_M.version == 1 && bit_M.length >= 5)
- xlat = nv_ro16(bios, bit_M.offset + 3);
+ xlat = nvbios_rd16(bios, bit_M.offset + 3);
if (bit_M.version == 2 && bit_M.length >= 3) {
/*XXX: is M ever shorter than this?
* if not - what is xlat used for now?
@@ -68,11 +68,11 @@ nvbios_ramcfg_index(struct nvkm_subdev *subdev)
if (bit_M.length >= 7 &&
nvbios_M0203Em(bios, strap, &ver, &hdr, &M0203E))
return M0203E.group;
- xlat = nv_ro16(bios, bit_M.offset + 1);
+ xlat = nvbios_rd16(bios, bit_M.offset + 1);
}
}
if (xlat)
- strap = nv_ro08(bios, xlat + strap);
+ strap = nvbios_rd08(bios, xlat + strap);
return strap;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
index 8b17bb4b220c..f0e1fc74a52e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
@@ -34,18 +34,18 @@ nvbios_rammapTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2)
- rammap = nv_ro16(bios, bit_P.offset + 4);
+ rammap = nvbios_rd16(bios, bit_P.offset + 4);
if (rammap) {
- *ver = nv_ro08(bios, rammap + 0);
+ *ver = nvbios_rd08(bios, rammap + 0);
switch (*ver) {
case 0x10:
case 0x11:
- *hdr = nv_ro08(bios, rammap + 1);
- *cnt = nv_ro08(bios, rammap + 5);
- *len = nv_ro08(bios, rammap + 2);
- *snr = nv_ro08(bios, rammap + 4);
- *ssz = nv_ro08(bios, rammap + 3);
+ *hdr = nvbios_rd08(bios, rammap + 1);
+ *cnt = nvbios_rd08(bios, rammap + 5);
+ *len = nvbios_rd08(bios, rammap + 2);
+ *snr = nvbios_rd08(bios, rammap + 4);
+ *ssz = nvbios_rd08(bios, rammap + 3);
return rammap;
default:
break;
@@ -72,6 +72,21 @@ nvbios_rammapEe(struct nvkm_bios *bios, int idx,
return 0x0000;
}
+/* Pretend a performance mode is also a rammap entry, helps coalesce entries
+ * later on */
+u32
+nvbios_rammapEp_from_perf(struct nvkm_bios *bios, u32 data, u8 size,
+ struct nvbios_ramcfg *p)
+{
+ memset(p, 0x00, sizeof(*p));
+
+ p->rammap_00_16_20 = (nvbios_rd08(bios, data + 0x16) & 0x20) >> 5;
+ p->rammap_00_16_40 = (nvbios_rd08(bios, data + 0x16) & 0x40) >> 6;
+ p->rammap_00_17_02 = (nvbios_rd08(bios, data + 0x17) & 0x02) >> 1;
+
+ return data;
+}
+
u32
nvbios_rammapEp(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *p)
@@ -82,18 +97,18 @@ nvbios_rammapEp(struct nvkm_bios *bios, int idx,
p->rammap_hdr = *hdr;
switch (!!data * *ver) {
case 0x10:
- p->rammap_min = nv_ro16(bios, data + 0x00);
- p->rammap_max = nv_ro16(bios, data + 0x02);
- p->rammap_10_04_02 = (nv_ro08(bios, data + 0x04) & 0x02) >> 1;
- p->rammap_10_04_08 = (nv_ro08(bios, data + 0x04) & 0x08) >> 3;
+ p->rammap_min = nvbios_rd16(bios, data + 0x00);
+ p->rammap_max = nvbios_rd16(bios, data + 0x02);
+ p->rammap_10_04_02 = (nvbios_rd08(bios, data + 0x04) & 0x02) >> 1;
+ p->rammap_10_04_08 = (nvbios_rd08(bios, data + 0x04) & 0x08) >> 3;
break;
case 0x11:
- p->rammap_min = nv_ro16(bios, data + 0x00);
- p->rammap_max = nv_ro16(bios, data + 0x02);
- p->rammap_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0;
- p->rammap_11_08_0c = (nv_ro08(bios, data + 0x08) & 0x0c) >> 2;
- p->rammap_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4;
- temp = nv_ro32(bios, data + 0x09);
+ p->rammap_min = nvbios_rd16(bios, data + 0x00);
+ p->rammap_max = nvbios_rd16(bios, data + 0x02);
+ p->rammap_11_08_01 = (nvbios_rd08(bios, data + 0x08) & 0x01) >> 0;
+ p->rammap_11_08_0c = (nvbios_rd08(bios, data + 0x08) & 0x0c) >> 2;
+ p->rammap_11_08_10 = (nvbios_rd08(bios, data + 0x08) & 0x10) >> 4;
+ temp = nvbios_rd32(bios, data + 0x09);
p->rammap_11_09_01ff = (temp & 0x000001ff) >> 0;
p->rammap_11_0a_03fe = (temp & 0x0003fe00) >> 9;
p->rammap_11_0a_0400 = (temp & 0x00040000) >> 18;
@@ -102,10 +117,10 @@ nvbios_rammapEp(struct nvkm_bios *bios, int idx,
p->rammap_11_0b_0200 = (temp & 0x02000000) >> 25;
p->rammap_11_0b_0400 = (temp & 0x04000000) >> 26;
p->rammap_11_0b_0800 = (temp & 0x08000000) >> 27;
- p->rammap_11_0d = nv_ro08(bios, data + 0x0d);
- p->rammap_11_0e = nv_ro08(bios, data + 0x0e);
- p->rammap_11_0f = nv_ro08(bios, data + 0x0f);
- p->rammap_11_11_0c = (nv_ro08(bios, data + 0x11) & 0x0c) >> 2;
+ p->rammap_11_0d = nvbios_rd08(bios, data + 0x0d);
+ p->rammap_11_0e = nvbios_rd08(bios, data + 0x0e);
+ p->rammap_11_0f = nvbios_rd08(bios, data + 0x0f);
+ p->rammap_11_11_0c = (nvbios_rd08(bios, data + 0x11) & 0x0c) >> 2;
break;
default:
data = 0;
@@ -141,6 +156,36 @@ nvbios_rammapSe(struct nvkm_bios *bios, u32 data,
}
u32
+nvbios_rammapSp_from_perf(struct nvkm_bios *bios, u32 data, u8 size, int idx,
+ struct nvbios_ramcfg *p)
+{
+ data += (idx * size);
+
+ if (size < 11)
+ return 0x00000000;
+
+ p->ramcfg_ver = 0;
+ p->ramcfg_timing = nvbios_rd08(bios, data + 0x01);
+ p->ramcfg_00_03_01 = (nvbios_rd08(bios, data + 0x03) & 0x01) >> 0;
+ p->ramcfg_00_03_02 = (nvbios_rd08(bios, data + 0x03) & 0x02) >> 1;
+ p->ramcfg_DLLoff = (nvbios_rd08(bios, data + 0x03) & 0x04) >> 2;
+ p->ramcfg_00_03_08 = (nvbios_rd08(bios, data + 0x03) & 0x08) >> 3;
+ p->ramcfg_RON = (nvbios_rd08(bios, data + 0x03) & 0x10) >> 3;
+ p->ramcfg_00_04_02 = (nvbios_rd08(bios, data + 0x04) & 0x02) >> 1;
+ p->ramcfg_00_04_04 = (nvbios_rd08(bios, data + 0x04) & 0x04) >> 2;
+ p->ramcfg_00_04_20 = (nvbios_rd08(bios, data + 0x04) & 0x20) >> 5;
+ p->ramcfg_00_05 = (nvbios_rd08(bios, data + 0x05) & 0xff) >> 0;
+ p->ramcfg_00_06 = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
+ p->ramcfg_00_07 = (nvbios_rd08(bios, data + 0x07) & 0xff) >> 0;
+ p->ramcfg_00_08 = (nvbios_rd08(bios, data + 0x08) & 0xff) >> 0;
+ p->ramcfg_00_09 = (nvbios_rd08(bios, data + 0x09) & 0xff) >> 0;
+ p->ramcfg_00_0a_0f = (nvbios_rd08(bios, data + 0x0a) & 0x0f) >> 0;
+ p->ramcfg_00_0a_f0 = (nvbios_rd08(bios, data + 0x0a) & 0xf0) >> 4;
+
+ return data;
+}
+
+u32
nvbios_rammapSp(struct nvkm_bios *bios, u32 data,
u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
u8 *ver, u8 *hdr, struct nvbios_ramcfg *p)
@@ -150,58 +195,58 @@ nvbios_rammapSp(struct nvkm_bios *bios, u32 data,
p->ramcfg_hdr = *hdr;
switch (!!data * *ver) {
case 0x10:
- p->ramcfg_timing = nv_ro08(bios, data + 0x01);
- p->ramcfg_10_02_01 = (nv_ro08(bios, data + 0x02) & 0x01) >> 0;
- p->ramcfg_10_02_02 = (nv_ro08(bios, data + 0x02) & 0x02) >> 1;
- p->ramcfg_10_02_04 = (nv_ro08(bios, data + 0x02) & 0x04) >> 2;
- p->ramcfg_10_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3;
- p->ramcfg_10_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4;
- p->ramcfg_10_02_20 = (nv_ro08(bios, data + 0x02) & 0x20) >> 5;
- p->ramcfg_10_DLLoff = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
- p->ramcfg_10_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0;
- p->ramcfg_10_04_01 = (nv_ro08(bios, data + 0x04) & 0x01) >> 0;
- p->ramcfg_10_05 = (nv_ro08(bios, data + 0x05) & 0xff) >> 0;
- p->ramcfg_10_06 = (nv_ro08(bios, data + 0x06) & 0xff) >> 0;
- p->ramcfg_10_07 = (nv_ro08(bios, data + 0x07) & 0xff) >> 0;
- p->ramcfg_10_08 = (nv_ro08(bios, data + 0x08) & 0xff) >> 0;
- p->ramcfg_10_09_0f = (nv_ro08(bios, data + 0x09) & 0x0f) >> 0;
- p->ramcfg_10_09_f0 = (nv_ro08(bios, data + 0x09) & 0xf0) >> 4;
+ p->ramcfg_timing = nvbios_rd08(bios, data + 0x01);
+ p->ramcfg_10_02_01 = (nvbios_rd08(bios, data + 0x02) & 0x01) >> 0;
+ p->ramcfg_10_02_02 = (nvbios_rd08(bios, data + 0x02) & 0x02) >> 1;
+ p->ramcfg_10_02_04 = (nvbios_rd08(bios, data + 0x02) & 0x04) >> 2;
+ p->ramcfg_10_02_08 = (nvbios_rd08(bios, data + 0x02) & 0x08) >> 3;
+ p->ramcfg_10_02_10 = (nvbios_rd08(bios, data + 0x02) & 0x10) >> 4;
+ p->ramcfg_10_02_20 = (nvbios_rd08(bios, data + 0x02) & 0x20) >> 5;
+ p->ramcfg_DLLoff = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
+ p->ramcfg_10_03_0f = (nvbios_rd08(bios, data + 0x03) & 0x0f) >> 0;
+ p->ramcfg_10_04_01 = (nvbios_rd08(bios, data + 0x04) & 0x01) >> 0;
+ p->ramcfg_10_05 = (nvbios_rd08(bios, data + 0x05) & 0xff) >> 0;
+ p->ramcfg_10_06 = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
+ p->ramcfg_10_07 = (nvbios_rd08(bios, data + 0x07) & 0xff) >> 0;
+ p->ramcfg_10_08 = (nvbios_rd08(bios, data + 0x08) & 0xff) >> 0;
+ p->ramcfg_10_09_0f = (nvbios_rd08(bios, data + 0x09) & 0x0f) >> 0;
+ p->ramcfg_10_09_f0 = (nvbios_rd08(bios, data + 0x09) & 0xf0) >> 4;
break;
case 0x11:
- p->ramcfg_timing = nv_ro08(bios, data + 0x00);
- p->ramcfg_11_01_01 = (nv_ro08(bios, data + 0x01) & 0x01) >> 0;
- p->ramcfg_11_01_02 = (nv_ro08(bios, data + 0x01) & 0x02) >> 1;
- p->ramcfg_11_01_04 = (nv_ro08(bios, data + 0x01) & 0x04) >> 2;
- p->ramcfg_11_01_08 = (nv_ro08(bios, data + 0x01) & 0x08) >> 3;
- p->ramcfg_11_01_10 = (nv_ro08(bios, data + 0x01) & 0x10) >> 4;
- p->ramcfg_11_01_20 = (nv_ro08(bios, data + 0x01) & 0x20) >> 5;
- p->ramcfg_11_01_40 = (nv_ro08(bios, data + 0x01) & 0x40) >> 6;
- p->ramcfg_11_01_80 = (nv_ro08(bios, data + 0x01) & 0x80) >> 7;
- p->ramcfg_11_02_03 = (nv_ro08(bios, data + 0x02) & 0x03) >> 0;
- p->ramcfg_11_02_04 = (nv_ro08(bios, data + 0x02) & 0x04) >> 2;
- p->ramcfg_11_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3;
- p->ramcfg_11_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4;
- p->ramcfg_11_02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
- p->ramcfg_11_02_80 = (nv_ro08(bios, data + 0x02) & 0x80) >> 7;
- p->ramcfg_11_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0;
- p->ramcfg_11_03_30 = (nv_ro08(bios, data + 0x03) & 0x30) >> 4;
- p->ramcfg_11_03_c0 = (nv_ro08(bios, data + 0x03) & 0xc0) >> 6;
- p->ramcfg_11_03_f0 = (nv_ro08(bios, data + 0x03) & 0xf0) >> 4;
- p->ramcfg_11_04 = (nv_ro08(bios, data + 0x04) & 0xff) >> 0;
- p->ramcfg_11_06 = (nv_ro08(bios, data + 0x06) & 0xff) >> 0;
- p->ramcfg_11_07_02 = (nv_ro08(bios, data + 0x07) & 0x02) >> 1;
- p->ramcfg_11_07_04 = (nv_ro08(bios, data + 0x07) & 0x04) >> 2;
- p->ramcfg_11_07_08 = (nv_ro08(bios, data + 0x07) & 0x08) >> 3;
- p->ramcfg_11_07_10 = (nv_ro08(bios, data + 0x07) & 0x10) >> 4;
- p->ramcfg_11_07_40 = (nv_ro08(bios, data + 0x07) & 0x40) >> 6;
- p->ramcfg_11_07_80 = (nv_ro08(bios, data + 0x07) & 0x80) >> 7;
- p->ramcfg_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0;
- p->ramcfg_11_08_02 = (nv_ro08(bios, data + 0x08) & 0x02) >> 1;
- p->ramcfg_11_08_04 = (nv_ro08(bios, data + 0x08) & 0x04) >> 2;
- p->ramcfg_11_08_08 = (nv_ro08(bios, data + 0x08) & 0x08) >> 3;
- p->ramcfg_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4;
- p->ramcfg_11_08_20 = (nv_ro08(bios, data + 0x08) & 0x20) >> 5;
- p->ramcfg_11_09 = (nv_ro08(bios, data + 0x09) & 0xff) >> 0;
+ p->ramcfg_timing = nvbios_rd08(bios, data + 0x00);
+ p->ramcfg_11_01_01 = (nvbios_rd08(bios, data + 0x01) & 0x01) >> 0;
+ p->ramcfg_11_01_02 = (nvbios_rd08(bios, data + 0x01) & 0x02) >> 1;
+ p->ramcfg_11_01_04 = (nvbios_rd08(bios, data + 0x01) & 0x04) >> 2;
+ p->ramcfg_11_01_08 = (nvbios_rd08(bios, data + 0x01) & 0x08) >> 3;
+ p->ramcfg_11_01_10 = (nvbios_rd08(bios, data + 0x01) & 0x10) >> 4;
+ p->ramcfg_11_01_20 = (nvbios_rd08(bios, data + 0x01) & 0x20) >> 5;
+ p->ramcfg_11_01_40 = (nvbios_rd08(bios, data + 0x01) & 0x40) >> 6;
+ p->ramcfg_11_01_80 = (nvbios_rd08(bios, data + 0x01) & 0x80) >> 7;
+ p->ramcfg_11_02_03 = (nvbios_rd08(bios, data + 0x02) & 0x03) >> 0;
+ p->ramcfg_11_02_04 = (nvbios_rd08(bios, data + 0x02) & 0x04) >> 2;
+ p->ramcfg_11_02_08 = (nvbios_rd08(bios, data + 0x02) & 0x08) >> 3;
+ p->ramcfg_11_02_10 = (nvbios_rd08(bios, data + 0x02) & 0x10) >> 4;
+ p->ramcfg_11_02_40 = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
+ p->ramcfg_11_02_80 = (nvbios_rd08(bios, data + 0x02) & 0x80) >> 7;
+ p->ramcfg_11_03_0f = (nvbios_rd08(bios, data + 0x03) & 0x0f) >> 0;
+ p->ramcfg_11_03_30 = (nvbios_rd08(bios, data + 0x03) & 0x30) >> 4;
+ p->ramcfg_11_03_c0 = (nvbios_rd08(bios, data + 0x03) & 0xc0) >> 6;
+ p->ramcfg_11_03_f0 = (nvbios_rd08(bios, data + 0x03) & 0xf0) >> 4;
+ p->ramcfg_11_04 = (nvbios_rd08(bios, data + 0x04) & 0xff) >> 0;
+ p->ramcfg_11_06 = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
+ p->ramcfg_11_07_02 = (nvbios_rd08(bios, data + 0x07) & 0x02) >> 1;
+ p->ramcfg_11_07_04 = (nvbios_rd08(bios, data + 0x07) & 0x04) >> 2;
+ p->ramcfg_11_07_08 = (nvbios_rd08(bios, data + 0x07) & 0x08) >> 3;
+ p->ramcfg_11_07_10 = (nvbios_rd08(bios, data + 0x07) & 0x10) >> 4;
+ p->ramcfg_11_07_40 = (nvbios_rd08(bios, data + 0x07) & 0x40) >> 6;
+ p->ramcfg_11_07_80 = (nvbios_rd08(bios, data + 0x07) & 0x80) >> 7;
+ p->ramcfg_11_08_01 = (nvbios_rd08(bios, data + 0x08) & 0x01) >> 0;
+ p->ramcfg_11_08_02 = (nvbios_rd08(bios, data + 0x08) & 0x02) >> 1;
+ p->ramcfg_11_08_04 = (nvbios_rd08(bios, data + 0x08) & 0x04) >> 2;
+ p->ramcfg_11_08_08 = (nvbios_rd08(bios, data + 0x08) & 0x08) >> 3;
+ p->ramcfg_11_08_10 = (nvbios_rd08(bios, data + 0x08) & 0x10) >> 4;
+ p->ramcfg_11_08_20 = (nvbios_rd08(bios, data + 0x08) & 0x20) >> 5;
+ p->ramcfg_11_09 = (nvbios_rd08(bios, data + 0x09) & 0xff) >> 0;
break;
default:
data = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index 8c2b7cba5cff..792f017525f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -23,13 +23,11 @@
*/
#include "priv.h"
-#include <core/device.h>
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/image.h>
struct shadow {
- struct nvkm_oclass base;
u32 skip;
const struct nvbios_source *func;
void *data;
@@ -38,9 +36,8 @@ struct shadow {
};
static bool
-shadow_fetch(struct nvkm_bios *bios, u32 upto)
+shadow_fetch(struct nvkm_bios *bios, struct shadow *mthd, u32 upto)
{
- struct shadow *mthd = (void *)nv_object(bios)->oclass;
const u32 limit = (upto + 3) & ~3;
const u32 start = bios->size;
void *data = mthd->data;
@@ -51,65 +48,35 @@ shadow_fetch(struct nvkm_bios *bios, u32 upto)
return bios->size >= limit;
}
-static u8
-shadow_rd08(struct nvkm_object *object, u64 addr)
-{
- struct nvkm_bios *bios = (void *)object;
- if (shadow_fetch(bios, addr + 1))
- return bios->data[addr];
- return 0x00;
-}
-
-static u16
-shadow_rd16(struct nvkm_object *object, u64 addr)
-{
- struct nvkm_bios *bios = (void *)object;
- if (shadow_fetch(bios, addr + 2))
- return get_unaligned_le16(&bios->data[addr]);
- return 0x0000;
-}
-
-static u32
-shadow_rd32(struct nvkm_object *object, u64 addr)
-{
- struct nvkm_bios *bios = (void *)object;
- if (shadow_fetch(bios, addr + 4))
- return get_unaligned_le32(&bios->data[addr]);
- return 0x00000000;
-}
-
-static struct nvkm_oclass
-shadow_class = {
- .handle = NV_SUBDEV(VBIOS, 0x00),
- .ofuncs = &(struct nvkm_ofuncs) {
- .rd08 = shadow_rd08,
- .rd16 = shadow_rd16,
- .rd32 = shadow_rd32,
- },
-};
-
static int
-shadow_image(struct nvkm_bios *bios, int idx, struct shadow *mthd)
+shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
{
+ struct nvkm_subdev *subdev = &bios->subdev;
struct nvbios_image image;
int score = 1;
+ if (!shadow_fetch(bios, mthd, offset + 0x1000)) {
+ nvkm_debug(subdev, "%08x: header fetch failed\n", offset);
+ return 0;
+ }
+
if (!nvbios_image(bios, idx, &image)) {
- nv_debug(bios, "image %d invalid\n", idx);
+ nvkm_debug(subdev, "image %d invalid\n", idx);
return 0;
}
- nv_debug(bios, "%08x: type %02x, %d bytes\n",
- image.base, image.type, image.size);
+ nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
+ image.base, image.type, image.size);
- if (!shadow_fetch(bios, image.size)) {
- nv_debug(bios, "%08x: fetch failed\n", image.base);
+ if (!shadow_fetch(bios, mthd, image.size)) {
+ nvkm_debug(subdev, "%08x: fetch failed\n", image.base);
return 0;
}
switch (image.type) {
case 0x00:
if (nvbios_checksum(&bios->data[image.base], image.size)) {
- nv_debug(bios, "%08x: checksum failed\n", image.base);
+ nvkm_debug(subdev, "%08x: checksum failed\n",
+ image.base);
if (mthd->func->rw)
score += 1;
score += 1;
@@ -123,28 +90,17 @@ shadow_image(struct nvkm_bios *bios, int idx, struct shadow *mthd)
}
if (!image.last)
- score += shadow_image(bios, idx + 1, mthd);
+ score += shadow_image(bios, idx + 1, offset + image.size, mthd);
return score;
}
static int
-shadow_score(struct nvkm_bios *bios, struct shadow *mthd)
-{
- struct nvkm_oclass *oclass = nv_object(bios)->oclass;
- int score;
- nv_object(bios)->oclass = &mthd->base;
- score = shadow_image(bios, 0, mthd);
- nv_object(bios)->oclass = oclass;
- return score;
-
-}
-
-static int
shadow_method(struct nvkm_bios *bios, struct shadow *mthd, const char *name)
{
const struct nvbios_source *func = mthd->func;
+ struct nvkm_subdev *subdev = &bios->subdev;
if (func->name) {
- nv_debug(bios, "trying %s...\n", name ? name : func->name);
+ nvkm_debug(subdev, "trying %s...\n", name ? name : func->name);
if (func->init) {
mthd->data = func->init(bios, name);
if (IS_ERR(mthd->data)) {
@@ -152,10 +108,10 @@ shadow_method(struct nvkm_bios *bios, struct shadow *mthd, const char *name)
return 0;
}
}
- mthd->score = shadow_score(bios, mthd);
+ mthd->score = shadow_image(bios, 0, 0, mthd);
if (func->fini)
func->fini(mthd->data);
- nv_debug(bios, "scored %d\n", mthd->score);
+ nvkm_debug(subdev, "scored %d\n", mthd->score);
mthd->data = bios->data;
mthd->size = bios->size;
bios->data = NULL;
@@ -178,7 +134,7 @@ shadow_fw_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
static void *
shadow_fw_init(struct nvkm_bios *bios, const char *name)
{
- struct device *dev = &nv_device(bios)->pdev->dev;
+ struct device *dev = bios->subdev.device->dev;
const struct firmware *fw;
int ret = request_firmware(&fw, name, dev);
if (ret)
@@ -198,22 +154,24 @@ shadow_fw = {
int
nvbios_shadow(struct nvkm_bios *bios)
{
+ struct nvkm_subdev *subdev = &bios->subdev;
+ struct nvkm_device *device = subdev->device;
struct shadow mthds[] = {
- { shadow_class, 0, &nvbios_of },
- { shadow_class, 0, &nvbios_ramin },
- { shadow_class, 0, &nvbios_rom },
- { shadow_class, 0, &nvbios_acpi_fast },
- { shadow_class, 4, &nvbios_acpi_slow },
- { shadow_class, 1, &nvbios_pcirom },
- { shadow_class, 1, &nvbios_platform },
- { shadow_class }
- }, *mthd = mthds, *best = NULL;
+ { 0, &nvbios_of },
+ { 0, &nvbios_ramin },
+ { 0, &nvbios_rom },
+ { 0, &nvbios_acpi_fast },
+ { 4, &nvbios_acpi_slow },
+ { 1, &nvbios_pcirom },
+ { 1, &nvbios_platform },
+ {}
+ }, *mthd, *best = NULL;
const char *optarg;
char *source;
int optlen;
/* handle user-specified bios source */
- optarg = nvkm_stropt(nv_device(bios)->cfgopt, "NvBios", &optlen);
+ optarg = nvkm_stropt(device->cfgopt, "NvBios", &optlen);
source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL;
if (source) {
/* try to match one of the built-in methods */
@@ -234,7 +192,7 @@ nvbios_shadow(struct nvkm_bios *bios)
}
if (!best->score) {
- nv_error(bios, "%s invalid\n", source);
+ nvkm_error(subdev, "%s invalid\n", source);
kfree(source);
source = NULL;
}
@@ -259,12 +217,12 @@ nvbios_shadow(struct nvkm_bios *bios)
}
if (!best->score) {
- nv_fatal(bios, "unable to locate usable image\n");
+ nvkm_error(subdev, "unable to locate usable image\n");
return -EINVAL;
}
- nv_info(bios, "using image from %s\n", best->func ?
- best->func->name : source);
+ nvkm_debug(subdev, "using image from %s\n", best->func ?
+ best->func->name : source);
bios->data = best->data;
bios->size = best->size;
kfree(source);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
index f9d0eb5647fa..8fecb5ff22a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
@@ -22,14 +22,12 @@
*/
#include "priv.h"
-#include <core/device.h>
-
#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
+bool nouveau_acpi_rom_supported(struct device *);
#else
static inline bool
-nouveau_acpi_rom_supported(struct pci_dev *pdev)
+nouveau_acpi_rom_supported(struct device *dev)
{
return false;
}
@@ -90,7 +88,7 @@ acpi_read_slow(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
static void *
acpi_init(struct nvkm_bios *bios, const char *name)
{
- if (!nouveau_acpi_rom_supported(nv_device(bios)->pdev))
+ if (!nouveau_acpi_rom_supported(bios->subdev.device->dev))
return ERR_PTR(-ENODEV);
return NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
index 4c19a7dba803..bd60d7dd09f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
@@ -21,8 +21,7 @@
*
*/
#include "priv.h"
-
-#include <core/device.h>
+#include <core/pci.h>
#if defined(__powerpc__)
struct priv {
@@ -44,7 +43,7 @@ of_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
static void *
of_init(struct nvkm_bios *bios, const char *name)
{
- struct pci_dev *pdev = nv_device(bios)->pdev;
+ struct pci_dev *pdev = bios->subdev.device->func->pci(bios->subdev.device)->pdev;
struct device_node *dn;
struct priv *priv;
if (!(dn = pci_device_to_OF_node(pdev)))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
index 1b045483dc87..9b91da09dc5f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
@@ -22,7 +22,7 @@
*/
#include "priv.h"
-#include <core/device.h>
+#include <core/pci.h>
struct priv {
struct pci_dev *pdev;
@@ -53,10 +53,16 @@ pcirom_fini(void *data)
static void *
pcirom_init(struct nvkm_bios *bios, const char *name)
{
- struct pci_dev *pdev = nv_device(bios)->pdev;
+ struct nvkm_device *device = bios->subdev.device;
struct priv *priv = NULL;
+ struct pci_dev *pdev;
int ret;
+ if (device->func->pci)
+ pdev = device->func->pci(device)->pdev;
+ else
+ return ERR_PTR(-ENODEV);
+
if (!(ret = pci_enable_rom(pdev))) {
if (ret = -ENOMEM,
(priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
@@ -85,10 +91,16 @@ nvbios_pcirom = {
static void *
platform_init(struct nvkm_bios *bios, const char *name)
{
- struct pci_dev *pdev = nv_device(bios)->pdev;
+ struct nvkm_device *device = bios->subdev.device;
+ struct pci_dev *pdev;
struct priv *priv;
int ret = -ENOMEM;
+ if (device->func->pci)
+ pdev = device->func->pci(device)->pdev;
+ else
+ return ERR_PTR(-ENODEV);
+
if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
if (ret = -ENODEV,
(priv->rom = pci_platform_rom(pdev, &priv->size)))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
index abe8ae4d3a9f..0f537c22804c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
@@ -22,8 +22,6 @@
*/
#include "priv.h"
-#include <core/device.h>
-
struct priv {
struct nvkm_bios *bios;
u32 bar0;
@@ -32,10 +30,11 @@ struct priv {
static u32
pramin_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
{
+ struct nvkm_device *device = bios->subdev.device;
u32 i;
if (offset + length <= 0x00100000) {
for (i = offset; i < offset + length; i += 4)
- *(u32 *)&bios->data[i] = nv_rd32(bios, 0x700000 + i);
+ *(u32 *)&bios->data[i] = nvkm_rd32(device, 0x700000 + i);
return length;
}
return 0;
@@ -46,7 +45,8 @@ pramin_fini(void *data)
{
struct priv *priv = data;
if (priv) {
- nv_wr32(priv->bios, 0x001700, priv->bar0);
+ struct nvkm_device *device = priv->bios->subdev.device;
+ nvkm_wr32(device, 0x001700, priv->bar0);
kfree(priv);
}
}
@@ -54,21 +54,23 @@ pramin_fini(void *data)
static void *
pramin_init(struct nvkm_bios *bios, const char *name)
{
+ struct nvkm_subdev *subdev = &bios->subdev;
+ struct nvkm_device *device = subdev->device;
struct priv *priv = NULL;
u64 addr = 0;
/* PRAMIN always potentially available prior to nv50 */
- if (nv_device(bios)->card_type < NV_50)
+ if (device->card_type < NV_50)
return NULL;
/* we can't get the bios image pointer without PDISP */
- if (nv_device(bios)->card_type >= GM100)
- addr = nv_rd32(bios, 0x021c04);
+ if (device->card_type >= GM100)
+ addr = nvkm_rd32(device, 0x021c04);
else
- if (nv_device(bios)->card_type >= NV_C0)
- addr = nv_rd32(bios, 0x022500);
+ if (device->card_type >= NV_C0)
+ addr = nvkm_rd32(device, 0x022500);
if (addr & 0x00000001) {
- nv_debug(bios, "... display disabled\n");
+ nvkm_debug(subdev, "... display disabled\n");
return ERR_PTR(-ENODEV);
}
@@ -76,32 +78,32 @@ pramin_init(struct nvkm_bios *bios, const char *name)
* important as we don't want to be touching vram on an
* uninitialised board
*/
- addr = nv_rd32(bios, 0x619f04);
+ addr = nvkm_rd32(device, 0x619f04);
if (!(addr & 0x00000008)) {
- nv_debug(bios, "... not enabled\n");
+ nvkm_debug(subdev, "... not enabled\n");
return ERR_PTR(-ENODEV);
}
if ( (addr & 0x00000003) != 1) {
- nv_debug(bios, "... not in vram\n");
+ nvkm_debug(subdev, "... not in vram\n");
return ERR_PTR(-ENODEV);
}
/* some alternate method inherited from xf86-video-nv... */
addr = (addr & 0xffffff00) << 8;
if (!addr) {
- addr = (u64)nv_rd32(bios, 0x001700) << 16;
+ addr = (u64)nvkm_rd32(device, 0x001700) << 16;
addr += 0xf0000;
}
/* modify bar0 PRAMIN window to cover the bios image */
if (!(priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
- nv_error(bios, "... out of memory\n");
+ nvkm_error(subdev, "... out of memory\n");
return ERR_PTR(-ENOMEM);
}
priv->bios = bios;
- priv->bar0 = nv_rd32(bios, 0x001700);
- nv_wr32(bios, 0x001700, addr >> 16);
+ priv->bar0 = nvkm_rd32(device, 0x001700);
+ nvkm_wr32(device, 0x001700, addr >> 16);
return priv;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c
index 6ec3b237925e..ffa4b395220a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c
@@ -22,15 +22,16 @@
*/
#include "priv.h"
-#include <core/device.h>
+#include <subdev/pci.h>
static u32
prom_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
{
+ struct nvkm_device *device = data;
u32 i;
if (offset + length <= 0x00100000) {
for (i = offset; i < offset + length; i += 4)
- *(u32 *)&bios->data[i] = nv_rd32(bios, 0x300000 + i);
+ *(u32 *)&bios->data[i] = nvkm_rd32(device, 0x300000 + i);
return length;
}
return 0;
@@ -39,25 +40,18 @@ prom_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
static void
prom_fini(void *data)
{
- struct nvkm_bios *bios = data;
- if (nv_device(bios)->card_type < NV_50)
- nv_mask(bios, 0x001850, 0x00000001, 0x00000001);
- else
- nv_mask(bios, 0x088050, 0x00000001, 0x00000001);
+ struct nvkm_device *device = data;
+ nvkm_pci_rom_shadow(device->pci, true);
}
static void *
prom_init(struct nvkm_bios *bios, const char *name)
{
- if (nv_device(bios)->card_type < NV_50) {
- if (nv_device(bios)->card_type == NV_40 &&
- nv_device(bios)->chipset >= 0x4c)
- return ERR_PTR(-ENODEV);
- nv_mask(bios, 0x001850, 0x00000001, 0x00000000);
- } else {
- nv_mask(bios, 0x088050, 0x00000001, 0x00000000);
- }
- return bios;
+ struct nvkm_device *device = bios->subdev.device;
+ if (device->card_type == NV_40 && device->chipset >= 0x4c)
+ return ERR_PTR(-ENODEV);
+ nvkm_pci_rom_shadow(device->pci, false);
+ return device;
}
const struct nvbios_source
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
index 249ff6d583df..a54cfec0550d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
@@ -25,8 +25,6 @@
#include <subdev/bios/bit.h>
#include <subdev/bios/therm.h>
-#include <core/device.h>
-
static u16
therm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
{
@@ -35,24 +33,24 @@ therm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 1)
- therm = nv_ro16(bios, bit_P.offset + 12);
+ therm = nvbios_rd16(bios, bit_P.offset + 12);
else if (bit_P.version == 2)
- therm = nv_ro16(bios, bit_P.offset + 16);
+ therm = nvbios_rd16(bios, bit_P.offset + 16);
else
- nv_error(bios,
- "unknown offset for thermal in BIT P %d\n",
- bit_P.version);
+ nvkm_error(&bios->subdev,
+ "unknown offset for thermal in BIT P %d\n",
+ bit_P.version);
}
/* exit now if we haven't found the thermal table */
if (!therm)
return 0x0000;
- *ver = nv_ro08(bios, therm + 0);
- *hdr = nv_ro08(bios, therm + 1);
- *len = nv_ro08(bios, therm + 2);
- *cnt = nv_ro08(bios, therm + 3);
- return therm + nv_ro08(bios, therm + 1);
+ *ver = nvbios_rd08(bios, therm + 0);
+ *hdr = nvbios_rd08(bios, therm + 1);
+ *len = nvbios_rd08(bios, therm + 2);
+ *cnt = nvbios_rd08(bios, therm + 3);
+ return therm + nvbios_rd08(bios, therm + 1);
}
static u16
@@ -83,9 +81,9 @@ nvbios_therm_sensor_parse(struct nvkm_bios *bios,
sensor_section = -1;
i = 0;
while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
- s16 value = nv_ro16(bios, entry + 1);
+ s16 value = nvbios_rd16(bios, entry + 1);
- switch (nv_ro08(bios, entry + 0)) {
+ switch (nvbios_rd08(bios, entry + 0)) {
case 0x0:
thrs_section = value;
if (value > 0)
@@ -94,7 +92,7 @@ nvbios_therm_sensor_parse(struct nvkm_bios *bios,
case 0x01:
sensor_section++;
if (sensor_section == 0) {
- offset = ((s8) nv_ro08(bios, entry + 2)) / 2;
+ offset = ((s8) nvbios_rd08(bios, entry + 2)) / 2;
sensor->offset_constant = offset;
}
break;
@@ -165,9 +163,9 @@ nvbios_therm_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
fan->nr_fan_trip = 0;
fan->fan_mode = NVBIOS_THERM_FAN_OTHER;
while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
- s16 value = nv_ro16(bios, entry + 1);
+ s16 value = nvbios_rd16(bios, entry + 1);
- switch (nv_ro08(bios, entry + 0)) {
+ switch (nvbios_rd08(bios, entry + 0)) {
case 0x22:
fan->min_duty = value & 0xff;
fan->max_duty = (value & 0xff00) >> 8;
@@ -198,14 +196,14 @@ nvbios_therm_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
case 0x46:
if (fan->fan_mode > NVBIOS_THERM_FAN_LINEAR)
fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
- fan->linear_min_temp = nv_ro08(bios, entry + 1);
- fan->linear_max_temp = nv_ro08(bios, entry + 2);
+ fan->linear_min_temp = nvbios_rd08(bios, entry + 1);
+ fan->linear_max_temp = nvbios_rd08(bios, entry + 2);
break;
}
}
/* starting from fermi, fan management is always linear */
- if (nv_device(bios)->card_type >= NV_C0 &&
+ if (bios->subdev.device->card_type >= NV_C0 &&
fan->fan_mode == NVBIOS_THERM_FAN_OTHER) {
fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
index 763fd29a58f2..99f6432ac0af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
@@ -34,27 +34,27 @@ nvbios_timingTe(struct nvkm_bios *bios,
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 1)
- timing = nv_ro16(bios, bit_P.offset + 4);
+ timing = nvbios_rd16(bios, bit_P.offset + 4);
else
if (bit_P.version == 2)
- timing = nv_ro16(bios, bit_P.offset + 8);
+ timing = nvbios_rd16(bios, bit_P.offset + 8);
if (timing) {
- *ver = nv_ro08(bios, timing + 0);
+ *ver = nvbios_rd08(bios, timing + 0);
switch (*ver) {
case 0x10:
- *hdr = nv_ro08(bios, timing + 1);
- *cnt = nv_ro08(bios, timing + 2);
- *len = nv_ro08(bios, timing + 3);
+ *hdr = nvbios_rd08(bios, timing + 1);
+ *cnt = nvbios_rd08(bios, timing + 2);
+ *len = nvbios_rd08(bios, timing + 3);
*snr = 0;
*ssz = 0;
return timing;
case 0x20:
- *hdr = nv_ro08(bios, timing + 1);
- *cnt = nv_ro08(bios, timing + 5);
- *len = nv_ro08(bios, timing + 2);
- *snr = nv_ro08(bios, timing + 4);
- *ssz = nv_ro08(bios, timing + 3);
+ *hdr = nvbios_rd08(bios, timing + 1);
+ *cnt = nvbios_rd08(bios, timing + 5);
+ *len = nvbios_rd08(bios, timing + 2);
+ *snr = nvbios_rd08(bios, timing + 4);
+ *ssz = nvbios_rd08(bios, timing + 3);
return timing;
default:
break;
@@ -90,18 +90,20 @@ nvbios_timingEp(struct nvkm_bios *bios, int idx,
p->timing_hdr = *hdr;
switch (!!data * *ver) {
case 0x10:
- p->timing_10_WR = nv_ro08(bios, data + 0x00);
- p->timing_10_WTR = nv_ro08(bios, data + 0x01);
- p->timing_10_CL = nv_ro08(bios, data + 0x02);
- p->timing_10_RC = nv_ro08(bios, data + 0x03);
- p->timing_10_RFC = nv_ro08(bios, data + 0x05);
- p->timing_10_RAS = nv_ro08(bios, data + 0x07);
- p->timing_10_RP = nv_ro08(bios, data + 0x09);
- p->timing_10_RCDRD = nv_ro08(bios, data + 0x0a);
- p->timing_10_RCDWR = nv_ro08(bios, data + 0x0b);
- p->timing_10_RRD = nv_ro08(bios, data + 0x0c);
- p->timing_10_13 = nv_ro08(bios, data + 0x0d);
- p->timing_10_ODT = nv_ro08(bios, data + 0x0e) & 0x07;
+ p->timing_10_WR = nvbios_rd08(bios, data + 0x00);
+ p->timing_10_WTR = nvbios_rd08(bios, data + 0x01);
+ p->timing_10_CL = nvbios_rd08(bios, data + 0x02);
+ p->timing_10_RC = nvbios_rd08(bios, data + 0x03);
+ p->timing_10_RFC = nvbios_rd08(bios, data + 0x05);
+ p->timing_10_RAS = nvbios_rd08(bios, data + 0x07);
+ p->timing_10_RP = nvbios_rd08(bios, data + 0x09);
+ p->timing_10_RCDRD = nvbios_rd08(bios, data + 0x0a);
+ p->timing_10_RCDWR = nvbios_rd08(bios, data + 0x0b);
+ p->timing_10_RRD = nvbios_rd08(bios, data + 0x0c);
+ p->timing_10_13 = nvbios_rd08(bios, data + 0x0d);
+ p->timing_10_ODT = nvbios_rd08(bios, data + 0x0e) & 0x07;
+ if (p->ramcfg_ver >= 0x10)
+ p->ramcfg_RON = nvbios_rd08(bios, data + 0x0e) & 0x07;
p->timing_10_24 = 0xff;
p->timing_10_21 = 0;
@@ -112,45 +114,45 @@ nvbios_timingEp(struct nvkm_bios *bios, int idx,
switch (min_t(u8, *hdr, 25)) {
case 25:
- p->timing_10_24 = nv_ro08(bios, data + 0x18);
+ p->timing_10_24 = nvbios_rd08(bios, data + 0x18);
case 24:
case 23:
case 22:
- p->timing_10_21 = nv_ro08(bios, data + 0x15);
+ p->timing_10_21 = nvbios_rd08(bios, data + 0x15);
case 21:
- p->timing_10_20 = nv_ro08(bios, data + 0x14);
+ p->timing_10_20 = nvbios_rd08(bios, data + 0x14);
case 20:
- p->timing_10_CWL = nv_ro08(bios, data + 0x13);
+ p->timing_10_CWL = nvbios_rd08(bios, data + 0x13);
case 19:
- p->timing_10_18 = nv_ro08(bios, data + 0x12);
+ p->timing_10_18 = nvbios_rd08(bios, data + 0x12);
case 18:
case 17:
- p->timing_10_16 = nv_ro08(bios, data + 0x10);
+ p->timing_10_16 = nvbios_rd08(bios, data + 0x10);
}
break;
case 0x20:
- p->timing[0] = nv_ro32(bios, data + 0x00);
- p->timing[1] = nv_ro32(bios, data + 0x04);
- p->timing[2] = nv_ro32(bios, data + 0x08);
- p->timing[3] = nv_ro32(bios, data + 0x0c);
- p->timing[4] = nv_ro32(bios, data + 0x10);
- p->timing[5] = nv_ro32(bios, data + 0x14);
- p->timing[6] = nv_ro32(bios, data + 0x18);
- p->timing[7] = nv_ro32(bios, data + 0x1c);
- p->timing[8] = nv_ro32(bios, data + 0x20);
- p->timing[9] = nv_ro32(bios, data + 0x24);
- p->timing[10] = nv_ro32(bios, data + 0x28);
- p->timing_20_2e_03 = (nv_ro08(bios, data + 0x2e) & 0x03) >> 0;
- p->timing_20_2e_30 = (nv_ro08(bios, data + 0x2e) & 0x30) >> 4;
- p->timing_20_2e_c0 = (nv_ro08(bios, data + 0x2e) & 0xc0) >> 6;
- p->timing_20_2f_03 = (nv_ro08(bios, data + 0x2f) & 0x03) >> 0;
- temp = nv_ro16(bios, data + 0x2c);
+ p->timing[0] = nvbios_rd32(bios, data + 0x00);
+ p->timing[1] = nvbios_rd32(bios, data + 0x04);
+ p->timing[2] = nvbios_rd32(bios, data + 0x08);
+ p->timing[3] = nvbios_rd32(bios, data + 0x0c);
+ p->timing[4] = nvbios_rd32(bios, data + 0x10);
+ p->timing[5] = nvbios_rd32(bios, data + 0x14);
+ p->timing[6] = nvbios_rd32(bios, data + 0x18);
+ p->timing[7] = nvbios_rd32(bios, data + 0x1c);
+ p->timing[8] = nvbios_rd32(bios, data + 0x20);
+ p->timing[9] = nvbios_rd32(bios, data + 0x24);
+ p->timing[10] = nvbios_rd32(bios, data + 0x28);
+ p->timing_20_2e_03 = (nvbios_rd08(bios, data + 0x2e) & 0x03) >> 0;
+ p->timing_20_2e_30 = (nvbios_rd08(bios, data + 0x2e) & 0x30) >> 4;
+ p->timing_20_2e_c0 = (nvbios_rd08(bios, data + 0x2e) & 0xc0) >> 6;
+ p->timing_20_2f_03 = (nvbios_rd08(bios, data + 0x2f) & 0x03) >> 0;
+ temp = nvbios_rd16(bios, data + 0x2c);
p->timing_20_2c_003f = (temp & 0x003f) >> 0;
p->timing_20_2c_1fc0 = (temp & 0x1fc0) >> 6;
- p->timing_20_30_07 = (nv_ro08(bios, data + 0x30) & 0x07) >> 0;
- p->timing_20_30_f8 = (nv_ro08(bios, data + 0x30) & 0xf8) >> 3;
- temp = nv_ro16(bios, data + 0x31);
+ p->timing_20_30_07 = (nvbios_rd08(bios, data + 0x30) & 0x07) >> 0;
+ p->timing_20_30_f8 = (nvbios_rd08(bios, data + 0x30) & 0xf8) >> 3;
+ temp = nvbios_rd16(bios, data + 0x31);
p->timing_20_31_0007 = (temp & 0x0007) >> 0;
p->timing_20_31_0078 = (temp & 0x0078) >> 3;
p->timing_20_31_0780 = (temp & 0x0780) >> 7;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
index e95b69faa82e..2f13db745948 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
@@ -33,15 +33,15 @@ nvbios_vmap_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2) {
- vmap = nv_ro16(bios, bit_P.offset + 0x20);
+ vmap = nvbios_rd16(bios, bit_P.offset + 0x20);
if (vmap) {
- *ver = nv_ro08(bios, vmap + 0);
+ *ver = nvbios_rd08(bios, vmap + 0);
switch (*ver) {
case 0x10:
case 0x20:
- *hdr = nv_ro08(bios, vmap + 1);
- *cnt = nv_ro08(bios, vmap + 3);
- *len = nv_ro08(bios, vmap + 2);
+ *hdr = nvbios_rd08(bios, vmap + 1);
+ *cnt = nvbios_rd08(bios, vmap + 3);
+ *len = nvbios_rd08(bios, vmap + 2);
return vmap;
default:
break;
@@ -88,23 +88,23 @@ nvbios_vmap_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
switch (!!vmap * *ver) {
case 0x10:
info->link = 0xff;
- info->min = nv_ro32(bios, vmap + 0x00);
- info->max = nv_ro32(bios, vmap + 0x04);
- info->arg[0] = nv_ro32(bios, vmap + 0x08);
- info->arg[1] = nv_ro32(bios, vmap + 0x0c);
- info->arg[2] = nv_ro32(bios, vmap + 0x10);
+ info->min = nvbios_rd32(bios, vmap + 0x00);
+ info->max = nvbios_rd32(bios, vmap + 0x04);
+ info->arg[0] = nvbios_rd32(bios, vmap + 0x08);
+ info->arg[1] = nvbios_rd32(bios, vmap + 0x0c);
+ info->arg[2] = nvbios_rd32(bios, vmap + 0x10);
break;
case 0x20:
- info->unk0 = nv_ro08(bios, vmap + 0x00);
- info->link = nv_ro08(bios, vmap + 0x01);
- info->min = nv_ro32(bios, vmap + 0x02);
- info->max = nv_ro32(bios, vmap + 0x06);
- info->arg[0] = nv_ro32(bios, vmap + 0x0a);
- info->arg[1] = nv_ro32(bios, vmap + 0x0e);
- info->arg[2] = nv_ro32(bios, vmap + 0x12);
- info->arg[3] = nv_ro32(bios, vmap + 0x16);
- info->arg[4] = nv_ro32(bios, vmap + 0x1a);
- info->arg[5] = nv_ro32(bios, vmap + 0x1e);
+ info->unk0 = nvbios_rd08(bios, vmap + 0x00);
+ info->link = nvbios_rd08(bios, vmap + 0x01);
+ info->min = nvbios_rd32(bios, vmap + 0x02);
+ info->max = nvbios_rd32(bios, vmap + 0x06);
+ info->arg[0] = nvbios_rd32(bios, vmap + 0x0a);
+ info->arg[1] = nvbios_rd32(bios, vmap + 0x0e);
+ info->arg[2] = nvbios_rd32(bios, vmap + 0x12);
+ info->arg[3] = nvbios_rd32(bios, vmap + 0x16);
+ info->arg[4] = nvbios_rd32(bios, vmap + 0x1a);
+ info->arg[5] = nvbios_rd32(bios, vmap + 0x1e);
break;
}
return vmap;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
index 8454ab7c4a3d..615804c3887b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
@@ -33,30 +33,30 @@ nvbios_volt_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2)
- volt = nv_ro16(bios, bit_P.offset + 0x0c);
+ volt = nvbios_rd16(bios, bit_P.offset + 0x0c);
else
if (bit_P.version == 1)
- volt = nv_ro16(bios, bit_P.offset + 0x10);
+ volt = nvbios_rd16(bios, bit_P.offset + 0x10);
if (volt) {
- *ver = nv_ro08(bios, volt + 0);
+ *ver = nvbios_rd08(bios, volt + 0);
switch (*ver) {
case 0x12:
*hdr = 5;
- *cnt = nv_ro08(bios, volt + 2);
- *len = nv_ro08(bios, volt + 1);
+ *cnt = nvbios_rd08(bios, volt + 2);
+ *len = nvbios_rd08(bios, volt + 1);
return volt;
case 0x20:
- *hdr = nv_ro08(bios, volt + 1);
- *cnt = nv_ro08(bios, volt + 2);
- *len = nv_ro08(bios, volt + 3);
+ *hdr = nvbios_rd08(bios, volt + 1);
+ *cnt = nvbios_rd08(bios, volt + 2);
+ *len = nvbios_rd08(bios, volt + 3);
return volt;
case 0x30:
case 0x40:
case 0x50:
- *hdr = nv_ro08(bios, volt + 1);
- *cnt = nv_ro08(bios, volt + 3);
- *len = nv_ro08(bios, volt + 2);
+ *hdr = nvbios_rd08(bios, volt + 1);
+ *cnt = nvbios_rd08(bios, volt + 3);
+ *len = nvbios_rd08(bios, volt + 2);
return volt;
}
}
@@ -73,28 +73,28 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
memset(info, 0x00, sizeof(*info));
switch (!!volt * *ver) {
case 0x12:
- info->vidmask = nv_ro08(bios, volt + 0x04);
+ info->vidmask = nvbios_rd08(bios, volt + 0x04);
break;
case 0x20:
- info->vidmask = nv_ro08(bios, volt + 0x05);
+ info->vidmask = nvbios_rd08(bios, volt + 0x05);
break;
case 0x30:
- info->vidmask = nv_ro08(bios, volt + 0x04);
+ info->vidmask = nvbios_rd08(bios, volt + 0x04);
break;
case 0x40:
- info->base = nv_ro32(bios, volt + 0x04);
- info->step = nv_ro16(bios, volt + 0x08);
- info->vidmask = nv_ro08(bios, volt + 0x0b);
+ info->base = nvbios_rd32(bios, volt + 0x04);
+ info->step = nvbios_rd16(bios, volt + 0x08);
+ info->vidmask = nvbios_rd08(bios, volt + 0x0b);
/*XXX*/
info->min = 0;
info->max = info->base;
break;
case 0x50:
- info->vidmask = nv_ro08(bios, volt + 0x06);
- info->min = nv_ro32(bios, volt + 0x0a);
- info->max = nv_ro32(bios, volt + 0x0e);
- info->base = nv_ro32(bios, volt + 0x12) & 0x00ffffff;
- info->step = nv_ro16(bios, volt + 0x16);
+ info->vidmask = nvbios_rd08(bios, volt + 0x06);
+ info->min = nvbios_rd32(bios, volt + 0x0a);
+ info->max = nvbios_rd32(bios, volt + 0x0e);
+ info->base = nvbios_rd32(bios, volt + 0x12) & 0x00ffffff;
+ info->step = nvbios_rd16(bios, volt + 0x16);
break;
}
return volt;
@@ -121,12 +121,12 @@ nvbios_volt_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
switch (!!volt * *ver) {
case 0x12:
case 0x20:
- info->voltage = nv_ro08(bios, volt + 0x00) * 10000;
- info->vid = nv_ro08(bios, volt + 0x01);
+ info->voltage = nvbios_rd08(bios, volt + 0x00) * 10000;
+ info->vid = nvbios_rd08(bios, volt + 0x01);
break;
case 0x30:
- info->voltage = nv_ro08(bios, volt + 0x00) * 10000;
- info->vid = nv_ro08(bios, volt + 0x01) >> 2;
+ info->voltage = nvbios_rd08(bios, volt + 0x00) * 10000;
+ info->vid = nvbios_rd08(bios, volt + 0x01) >> 2;
break;
case 0x40:
case 0x50:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c
index 63a5e1b5cb3c..250fc42d8608 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c
@@ -30,12 +30,12 @@ dcb_xpiod_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u16 data = dcb_gpio_table(bios, ver, hdr, cnt, len);
if (data && *ver >= 0x40 && *hdr >= 0x06) {
- u16 xpio = nv_ro16(bios, data + 0x04);
+ u16 xpio = nvbios_rd16(bios, data + 0x04);
if (xpio) {
- *ver = nv_ro08(bios, data + 0x00);
- *hdr = nv_ro08(bios, data + 0x01);
- *cnt = nv_ro08(bios, data + 0x02);
- *len = nv_ro08(bios, data + 0x03);
+ *ver = nvbios_rd08(bios, data + 0x00);
+ *hdr = nvbios_rd08(bios, data + 0x01);
+ *cnt = nvbios_rd08(bios, data + 0x02);
+ *len = nvbios_rd08(bios, data + 0x03);
return xpio;
}
}
@@ -48,12 +48,12 @@ dcb_xpio_table(struct nvkm_bios *bios, u8 idx,
{
u16 data = dcb_xpiod_table(bios, ver, hdr, cnt, len);
if (data && idx < *cnt) {
- u16 xpio = nv_ro16(bios, data + *hdr + (idx * *len));
+ u16 xpio = nvbios_rd16(bios, data + *hdr + (idx * *len));
if (xpio) {
- *ver = nv_ro08(bios, data + 0x00);
- *hdr = nv_ro08(bios, data + 0x01);
- *cnt = nv_ro08(bios, data + 0x02);
- *len = nv_ro08(bios, data + 0x03);
+ *ver = nvbios_rd08(bios, data + 0x00);
+ *hdr = nvbios_rd08(bios, data + 0x01);
+ *cnt = nvbios_rd08(bios, data + 0x02);
+ *len = nvbios_rd08(bios, data + 0x03);
return xpio;
}
}
@@ -66,9 +66,9 @@ dcb_xpio_parse(struct nvkm_bios *bios, u8 idx,
{
u16 data = dcb_xpio_table(bios, idx, ver, hdr, cnt, len);
if (data && *len >= 6) {
- info->type = nv_ro08(bios, data + 0x04);
- info->addr = nv_ro08(bios, data + 0x05);
- info->flags = nv_ro08(bios, data + 0x06);
+ info->type = nvbios_rd08(bios, data + 0x04);
+ info->addr = nvbios_rd08(bios, data + 0x05);
+ info->flags = nvbios_rd08(bios, data + 0x06);
}
return 0x0000;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild
index 83d80b13f149..5fa9e91835c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild
@@ -1,3 +1,4 @@
+nvkm-y += nvkm/subdev/bus/base.o
nvkm-y += nvkm/subdev/bus/hwsq.o
nvkm-y += nvkm/subdev/bus/nv04.o
nvkm-y += nvkm/subdev/bus/nv31.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_family.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
index 0698764354a2..dc5a10f18bdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_family.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
@@ -1,7 +1,5 @@
/*
- * Copyright 2008 Advanced Micro Devices, Inc.
- * Copyright 2008 Red Hat Inc.
- * Copyright 2009 Jerome Glisse.
+ * Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -21,42 +19,46 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: Dave Airlie
- * Alex Deucher
- * Jerome Glisse
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#include "priv.h"
-/* this file defines the CHIP_ and family flags used in the pciids,
- * its is common between kms and non-kms because duplicating it and
- * changing one place is fail.
- */
-#ifndef AMDGPU_FAMILY_H
-#define AMDGPU_FAMILY_H
-/*
- * Supported ASIC types
- */
-enum amdgpu_asic_type {
- CHIP_BONAIRE = 0,
- CHIP_KAVERI,
- CHIP_KABINI,
- CHIP_HAWAII,
- CHIP_MULLINS,
- CHIP_TOPAZ,
- CHIP_TONGA,
- CHIP_CARRIZO,
- CHIP_LAST,
-};
+static void
+nvkm_bus_intr(struct nvkm_subdev *subdev)
+{
+ struct nvkm_bus *bus = nvkm_bus(subdev);
+ bus->func->intr(bus);
+}
-/*
- * Chip flags
- */
-enum amdgpu_chip_flags {
- AMDGPU_ASIC_MASK = 0x0000ffffUL,
- AMDGPU_FLAGS_MASK = 0xffff0000UL,
- AMDGPU_IS_MOBILITY = 0x00010000UL,
- AMDGPU_IS_APU = 0x00020000UL,
- AMDGPU_IS_PX = 0x00040000UL,
- AMDGPU_EXP_HW_SUPPORT = 0x00080000UL,
+static int
+nvkm_bus_init(struct nvkm_subdev *subdev)
+{
+ struct nvkm_bus *bus = nvkm_bus(subdev);
+ bus->func->init(bus);
+ return 0;
+}
+
+static void *
+nvkm_bus_dtor(struct nvkm_subdev *subdev)
+{
+ return nvkm_bus(subdev);
+}
+
+static const struct nvkm_subdev_func
+nvkm_bus = {
+ .dtor = nvkm_bus_dtor,
+ .init = nvkm_bus_init,
+ .intr = nvkm_bus_intr,
};
-#endif
+int
+nvkm_bus_new_(const struct nvkm_bus_func *func, struct nvkm_device *device,
+ int index, struct nvkm_bus **pbus)
+{
+ struct nvkm_bus *bus;
+ if (!(bus = *pbus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_subdev_ctor(&nvkm_bus, device, index, 0, &bus->subdev);
+ bus->func = func;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
index cbe699e82593..9700b5c01cc6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
@@ -22,37 +22,43 @@
* Authors: Martin Peres <martin.peres@labri.fr>
* Ben Skeggs
*/
-#include "nv04.h"
+#include "priv.h"
#include <subdev/timer.h>
static int
-g94_bus_hwsq_exec(struct nvkm_bus *pbus, u32 *data, u32 size)
+g94_bus_hwsq_exec(struct nvkm_bus *bus, u32 *data, u32 size)
{
- struct nv50_bus_priv *priv = (void *)pbus;
+ struct nvkm_device *device = bus->subdev.device;
int i;
- nv_mask(pbus, 0x001098, 0x00000008, 0x00000000);
- nv_wr32(pbus, 0x001304, 0x00000000);
- nv_wr32(pbus, 0x001318, 0x00000000);
+ nvkm_mask(device, 0x001098, 0x00000008, 0x00000000);
+ nvkm_wr32(device, 0x001304, 0x00000000);
+ nvkm_wr32(device, 0x001318, 0x00000000);
for (i = 0; i < size; i++)
- nv_wr32(priv, 0x080000 + (i * 4), data[i]);
- nv_mask(pbus, 0x001098, 0x00000018, 0x00000018);
- nv_wr32(pbus, 0x00130c, 0x00000001);
+ nvkm_wr32(device, 0x080000 + (i * 4), data[i]);
+ nvkm_mask(device, 0x001098, 0x00000018, 0x00000018);
+ nvkm_wr32(device, 0x00130c, 0x00000001);
- return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT;
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x001308) & 0x00000100))
+ break;
+ ) < 0)
+ return -ETIMEDOUT;
+
+ return 0;
}
-struct nvkm_oclass *
-g94_bus_oclass = &(struct nv04_bus_impl) {
- .base.handle = NV_SUBDEV(BUS, 0x94),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_bus_ctor,
- .dtor = _nvkm_bus_dtor,
- .init = nv50_bus_init,
- .fini = _nvkm_bus_fini,
- },
+static const struct nvkm_bus_func
+g94_bus = {
+ .init = nv50_bus_init,
.intr = nv50_bus_intr,
.hwsq_exec = g94_bus_hwsq_exec,
.hwsq_size = 128,
-}.base;
+};
+
+int
+g94_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+{
+ return nvkm_bus_new_(&g94_bus, device, index, pbus);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
index ebc63ba968d4..e0930d5fdfb1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
@@ -22,59 +22,54 @@
* Authors: Martin Peres <martin.peres@labri.fr>
* Ben Skeggs
*/
-#include "nv04.h"
+#include "priv.h"
static void
-gf100_bus_intr(struct nvkm_subdev *subdev)
+gf100_bus_intr(struct nvkm_bus *bus)
{
- struct nvkm_bus *pbus = nvkm_bus(subdev);
- u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+ struct nvkm_subdev *subdev = &bus->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
if (stat & 0x0000000e) {
- u32 addr = nv_rd32(pbus, 0x009084);
- u32 data = nv_rd32(pbus, 0x009088);
+ u32 addr = nvkm_rd32(device, 0x009084);
+ u32 data = nvkm_rd32(device, 0x009088);
- nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x [ %s%s%s]\n",
- (addr & 0x00000002) ? "write" : "read", data,
- (addr & 0x00fffffc),
- (stat & 0x00000002) ? "!ENGINE " : "",
- (stat & 0x00000004) ? "IBUS " : "",
- (stat & 0x00000008) ? "TIMEOUT " : "");
+ nvkm_error(subdev,
+ "MMIO %s of %08x FAULT at %06x [ %s%s%s]\n",
+ (addr & 0x00000002) ? "write" : "read", data,
+ (addr & 0x00fffffc),
+ (stat & 0x00000002) ? "!ENGINE " : "",
+ (stat & 0x00000004) ? "IBUS " : "",
+ (stat & 0x00000008) ? "TIMEOUT " : "");
- nv_wr32(pbus, 0x009084, 0x00000000);
- nv_wr32(pbus, 0x001100, (stat & 0x0000000e));
+ nvkm_wr32(device, 0x009084, 0x00000000);
+ nvkm_wr32(device, 0x001100, (stat & 0x0000000e));
stat &= ~0x0000000e;
}
if (stat) {
- nv_error(pbus, "unknown intr 0x%08x\n", stat);
- nv_mask(pbus, 0x001140, stat, 0x00000000);
+ nvkm_error(subdev, "intr %08x\n", stat);
+ nvkm_mask(device, 0x001140, stat, 0x00000000);
}
}
-static int
-gf100_bus_init(struct nvkm_object *object)
+static void
+gf100_bus_init(struct nvkm_bus *bus)
{
- struct nv04_bus_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_bus_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x001100, 0xffffffff);
- nv_wr32(priv, 0x001140, 0x0000000e);
- return 0;
+ struct nvkm_device *device = bus->subdev.device;
+ nvkm_wr32(device, 0x001100, 0xffffffff);
+ nvkm_wr32(device, 0x001140, 0x0000000e);
}
-struct nvkm_oclass *
-gf100_bus_oclass = &(struct nv04_bus_impl) {
- .base.handle = NV_SUBDEV(BUS, 0xc0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_bus_ctor,
- .dtor = _nvkm_bus_dtor,
- .init = gf100_bus_init,
- .fini = _nvkm_bus_fini,
- },
+static const struct nvkm_bus_func
+gf100_bus = {
+ .init = gf100_bus_init,
.intr = gf100_bus_intr,
-}.base;
+};
+
+int
+gf100_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+{
+ return nvkm_bus_new_(&gf100_bus, device, index, pbus);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
index 7622b41619a0..79f1cf513b36 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
@@ -21,10 +21,10 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include <subdev/bus.h>
+#include "priv.h"
struct nvkm_hwsq {
- struct nvkm_bus *pbus;
+ struct nvkm_subdev *subdev;
u32 addr;
u32 data;
struct {
@@ -41,13 +41,13 @@ hwsq_cmd(struct nvkm_hwsq *hwsq, int size, u8 data[])
}
int
-nvkm_hwsq_init(struct nvkm_bus *pbus, struct nvkm_hwsq **phwsq)
+nvkm_hwsq_init(struct nvkm_subdev *subdev, struct nvkm_hwsq **phwsq)
{
struct nvkm_hwsq *hwsq;
hwsq = *phwsq = kmalloc(sizeof(*hwsq), GFP_KERNEL);
if (hwsq) {
- hwsq->pbus = pbus;
+ hwsq->subdev = subdev;
hwsq->addr = ~0;
hwsq->data = ~0;
memset(hwsq->c.data, 0x7f, sizeof(hwsq->c.data));
@@ -63,21 +63,23 @@ nvkm_hwsq_fini(struct nvkm_hwsq **phwsq, bool exec)
struct nvkm_hwsq *hwsq = *phwsq;
int ret = 0, i;
if (hwsq) {
- struct nvkm_bus *pbus = hwsq->pbus;
+ struct nvkm_subdev *subdev = hwsq->subdev;
+ struct nvkm_bus *bus = subdev->device->bus;
hwsq->c.size = (hwsq->c.size + 4) / 4;
- if (hwsq->c.size <= pbus->hwsq_size) {
+ if (hwsq->c.size <= bus->func->hwsq_size) {
if (exec)
- ret = pbus->hwsq_exec(pbus, (u32 *)hwsq->c.data,
- hwsq->c.size);
+ ret = bus->func->hwsq_exec(bus,
+ (u32 *)hwsq->c.data,
+ hwsq->c.size);
if (ret)
- nv_error(pbus, "hwsq exec failed: %d\n", ret);
+ nvkm_error(subdev, "hwsq exec failed: %d\n", ret);
} else {
- nv_error(pbus, "hwsq ucode too large\n");
+ nvkm_error(subdev, "hwsq ucode too large\n");
ret = -ENOSPC;
}
for (i = 0; ret && i < hwsq->c.size; i++)
- nv_error(pbus, "\t0x%08x\n", ((u32 *)hwsq->c.data)[i]);
+ nvkm_error(subdev, "\t%08x\n", ((u32 *)hwsq->c.data)[i]);
*phwsq = NULL;
kfree(hwsq);
@@ -88,7 +90,7 @@ nvkm_hwsq_fini(struct nvkm_hwsq **phwsq, bool exec)
void
nvkm_hwsq_wr32(struct nvkm_hwsq *hwsq, u32 addr, u32 data)
{
- nv_debug(hwsq->pbus, "R[%06x] = 0x%08x\n", addr, data);
+ nvkm_debug(hwsq->subdev, "R[%06x] = %08x\n", addr, data);
if (hwsq->data != data) {
if ((data & 0xffff0000) != (hwsq->data & 0xffff0000)) {
@@ -113,7 +115,7 @@ nvkm_hwsq_wr32(struct nvkm_hwsq *hwsq, u32 addr, u32 data)
void
nvkm_hwsq_setf(struct nvkm_hwsq *hwsq, u8 flag, int data)
{
- nv_debug(hwsq->pbus, " FLAG[%02x] = %d\n", flag, data);
+ nvkm_debug(hwsq->subdev, " FLAG[%02x] = %d\n", flag, data);
flag += 0x80;
if (data >= 0)
flag += 0x20;
@@ -125,7 +127,7 @@ nvkm_hwsq_setf(struct nvkm_hwsq *hwsq, u8 flag, int data)
void
nvkm_hwsq_wait(struct nvkm_hwsq *hwsq, u8 flag, u8 data)
{
- nv_debug(hwsq->pbus, " WAIT[%02x] = %d\n", flag, data);
+ nvkm_debug(hwsq->subdev, " WAIT[%02x] = %d\n", flag, data);
hwsq_cmd(hwsq, 3, (u8[]){ 0x5f, flag, data });
}
@@ -138,6 +140,6 @@ nvkm_hwsq_nsec(struct nvkm_hwsq *hwsq, u32 nsec)
shift++;
}
- nv_debug(hwsq->pbus, " DELAY = %d ns\n", nsec);
+ nvkm_debug(hwsq->subdev, " DELAY = %d ns\n", nsec);
hwsq_cmd(hwsq, 1, (u8[]){ 0x00 | (shift << 2) | usec });
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
index ebf709c27e3a..8117ec5a1468 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
@@ -59,10 +59,9 @@ hwsq_reg(u32 addr)
static inline int
hwsq_init(struct hwsq *ram, struct nvkm_subdev *subdev)
{
- struct nvkm_bus *pbus = nvkm_bus(subdev);
int ret;
- ret = nvkm_hwsq_init(pbus, &ram->hwsq);
+ ret = nvkm_hwsq_init(subdev, &ram->hwsq);
if (ret)
return ret;
@@ -85,8 +84,9 @@ hwsq_exec(struct hwsq *ram, bool exec)
static inline u32
hwsq_rd32(struct hwsq *ram, struct hwsq_reg *reg)
{
+ struct nvkm_device *device = ram->subdev->device;
if (reg->sequence != ram->sequence)
- reg->data = nv_rd32(ram->subdev, reg->addr);
+ reg->data = nvkm_rd32(device, reg->addr);
return reg->data;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
index 19c8e50eeff7..c80b96789c31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
@@ -22,73 +22,55 @@
* Authors: Martin Peres <martin.peres@labri.fr>
* Ben Skeggs
*/
-#include "nv04.h"
+#include "priv.h"
+
+#include <subdev/gpio.h>
+
+#include <subdev/gpio.h>
static void
-nv04_bus_intr(struct nvkm_subdev *subdev)
+nv04_bus_intr(struct nvkm_bus *bus)
{
- struct nvkm_bus *pbus = nvkm_bus(subdev);
- u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+ struct nvkm_subdev *subdev = &bus->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
if (stat & 0x00000001) {
- nv_error(pbus, "BUS ERROR\n");
+ nvkm_error(subdev, "BUS ERROR\n");
stat &= ~0x00000001;
- nv_wr32(pbus, 0x001100, 0x00000001);
+ nvkm_wr32(device, 0x001100, 0x00000001);
}
if (stat & 0x00000110) {
- subdev = nvkm_subdev(subdev, NVDEV_SUBDEV_GPIO);
- if (subdev && subdev->intr)
- subdev->intr(subdev);
+ struct nvkm_gpio *gpio = device->gpio;
+ if (gpio)
+ nvkm_subdev_intr(&gpio->subdev);
stat &= ~0x00000110;
- nv_wr32(pbus, 0x001100, 0x00000110);
+ nvkm_wr32(device, 0x001100, 0x00000110);
}
if (stat) {
- nv_error(pbus, "unknown intr 0x%08x\n", stat);
- nv_mask(pbus, 0x001140, stat, 0x00000000);
+ nvkm_error(subdev, "intr %08x\n", stat);
+ nvkm_mask(device, 0x001140, stat, 0x00000000);
}
}
-static int
-nv04_bus_init(struct nvkm_object *object)
+static void
+nv04_bus_init(struct nvkm_bus *bus)
{
- struct nv04_bus_priv *priv = (void *)object;
-
- nv_wr32(priv, 0x001100, 0xffffffff);
- nv_wr32(priv, 0x001140, 0x00000111);
-
- return nvkm_bus_init(&priv->base);
+ struct nvkm_device *device = bus->subdev.device;
+ nvkm_wr32(device, 0x001100, 0xffffffff);
+ nvkm_wr32(device, 0x001140, 0x00000111);
}
+static const struct nvkm_bus_func
+nv04_bus = {
+ .init = nv04_bus_init,
+ .intr = nv04_bus_intr,
+};
+
int
-nv04_bus_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv04_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
{
- struct nv04_bus_impl *impl = (void *)oclass;
- struct nv04_bus_priv *priv;
- int ret;
-
- ret = nvkm_bus_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->intr = impl->intr;
- priv->base.hwsq_exec = impl->hwsq_exec;
- priv->base.hwsq_size = impl->hwsq_size;
- return 0;
+ return nvkm_bus_new_(&nv04_bus, device, index, pbus);
}
-
-struct nvkm_oclass *
-nv04_bus_oclass = &(struct nv04_bus_impl) {
- .base.handle = NV_SUBDEV(BUS, 0x04),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_bus_ctor,
- .dtor = _nvkm_bus_dtor,
- .init = nv04_bus_init,
- .fini = _nvkm_bus_fini,
- },
- .intr = nv04_bus_intr,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h
deleted file mode 100644
index 3ddc8f91b1e3..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef __NVKM_BUS_NV04_H__
-#define __NVKM_BUS_NV04_H__
-#include <subdev/bus.h>
-
-struct nv04_bus_priv {
- struct nvkm_bus base;
-};
-
-int nv04_bus_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-int nv50_bus_init(struct nvkm_object *);
-void nv50_bus_intr(struct nvkm_subdev *);
-
-struct nv04_bus_impl {
- struct nvkm_oclass base;
- void (*intr)(struct nvkm_subdev *);
- int (*hwsq_exec)(struct nvkm_bus *, u32 *, u32);
- u32 hwsq_size;
-};
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
index c5739bce8052..5153d89e1f0b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
@@ -22,70 +22,67 @@
* Authors: Martin Peres <martin.peres@labri.fr>
* Ben Skeggs
*/
-#include "nv04.h"
+#include "priv.h"
+
+#include <subdev/gpio.h>
+#include <subdev/therm.h>
static void
-nv31_bus_intr(struct nvkm_subdev *subdev)
+nv31_bus_intr(struct nvkm_bus *bus)
{
- struct nvkm_bus *pbus = nvkm_bus(subdev);
- u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
- u32 gpio = nv_rd32(pbus, 0x001104) & nv_rd32(pbus, 0x001144);
+ struct nvkm_subdev *subdev = &bus->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
+ u32 gpio = nvkm_rd32(device, 0x001104) & nvkm_rd32(device, 0x001144);
if (gpio) {
- subdev = nvkm_subdev(pbus, NVDEV_SUBDEV_GPIO);
- if (subdev && subdev->intr)
- subdev->intr(subdev);
+ struct nvkm_gpio *gpio = device->gpio;
+ if (gpio)
+ nvkm_subdev_intr(&gpio->subdev);
}
if (stat & 0x00000008) { /* NV41- */
- u32 addr = nv_rd32(pbus, 0x009084);
- u32 data = nv_rd32(pbus, 0x009088);
+ u32 addr = nvkm_rd32(device, 0x009084);
+ u32 data = nvkm_rd32(device, 0x009088);
- nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n",
- (addr & 0x00000002) ? "write" : "read", data,
- (addr & 0x00fffffc));
+ nvkm_error(subdev, "MMIO %s of %08x FAULT at %06x\n",
+ (addr & 0x00000002) ? "write" : "read", data,
+ (addr & 0x00fffffc));
stat &= ~0x00000008;
- nv_wr32(pbus, 0x001100, 0x00000008);
+ nvkm_wr32(device, 0x001100, 0x00000008);
}
if (stat & 0x00070000) {
- subdev = nvkm_subdev(pbus, NVDEV_SUBDEV_THERM);
- if (subdev && subdev->intr)
- subdev->intr(subdev);
+ struct nvkm_therm *therm = device->therm;
+ if (therm)
+ nvkm_subdev_intr(&therm->subdev);
stat &= ~0x00070000;
- nv_wr32(pbus, 0x001100, 0x00070000);
+ nvkm_wr32(device, 0x001100, 0x00070000);
}
if (stat) {
- nv_error(pbus, "unknown intr 0x%08x\n", stat);
- nv_mask(pbus, 0x001140, stat, 0x00000000);
+ nvkm_error(subdev, "intr %08x\n", stat);
+ nvkm_mask(device, 0x001140, stat, 0x00000000);
}
}
-static int
-nv31_bus_init(struct nvkm_object *object)
+static void
+nv31_bus_init(struct nvkm_bus *bus)
{
- struct nv04_bus_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_bus_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x001100, 0xffffffff);
- nv_wr32(priv, 0x001140, 0x00070008);
- return 0;
+ struct nvkm_device *device = bus->subdev.device;
+ nvkm_wr32(device, 0x001100, 0xffffffff);
+ nvkm_wr32(device, 0x001140, 0x00070008);
}
-struct nvkm_oclass *
-nv31_bus_oclass = &(struct nv04_bus_impl) {
- .base.handle = NV_SUBDEV(BUS, 0x31),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_bus_ctor,
- .dtor = _nvkm_bus_dtor,
- .init = nv31_bus_init,
- .fini = _nvkm_bus_fini,
- },
+static const struct nvkm_bus_func
+nv31_bus = {
+ .init = nv31_bus_init,
.intr = nv31_bus_intr,
-}.base;
+};
+
+int
+nv31_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+{
+ return nvkm_bus_new_(&nv31_bus, device, index, pbus);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
index 1987863d71ee..19e10fdc9291 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
@@ -22,83 +22,84 @@
* Authors: Martin Peres <martin.peres@labri.fr>
* Ben Skeggs
*/
-#include "nv04.h"
+#include "priv.h"
+#include <subdev/therm.h>
#include <subdev/timer.h>
static int
-nv50_bus_hwsq_exec(struct nvkm_bus *pbus, u32 *data, u32 size)
+nv50_bus_hwsq_exec(struct nvkm_bus *bus, u32 *data, u32 size)
{
- struct nv50_bus_priv *priv = (void *)pbus;
+ struct nvkm_device *device = bus->subdev.device;
int i;
- nv_mask(pbus, 0x001098, 0x00000008, 0x00000000);
- nv_wr32(pbus, 0x001304, 0x00000000);
+ nvkm_mask(device, 0x001098, 0x00000008, 0x00000000);
+ nvkm_wr32(device, 0x001304, 0x00000000);
for (i = 0; i < size; i++)
- nv_wr32(priv, 0x001400 + (i * 4), data[i]);
- nv_mask(pbus, 0x001098, 0x00000018, 0x00000018);
- nv_wr32(pbus, 0x00130c, 0x00000003);
+ nvkm_wr32(device, 0x001400 + (i * 4), data[i]);
+ nvkm_mask(device, 0x001098, 0x00000018, 0x00000018);
+ nvkm_wr32(device, 0x00130c, 0x00000003);
- return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT;
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x001308) & 0x00000100))
+ break;
+ ) < 0)
+ return -ETIMEDOUT;
+
+ return 0;
}
void
-nv50_bus_intr(struct nvkm_subdev *subdev)
+nv50_bus_intr(struct nvkm_bus *bus)
{
- struct nvkm_bus *pbus = nvkm_bus(subdev);
- u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+ struct nvkm_subdev *subdev = &bus->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
if (stat & 0x00000008) {
- u32 addr = nv_rd32(pbus, 0x009084);
- u32 data = nv_rd32(pbus, 0x009088);
+ u32 addr = nvkm_rd32(device, 0x009084);
+ u32 data = nvkm_rd32(device, 0x009088);
- nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n",
- (addr & 0x00000002) ? "write" : "read", data,
- (addr & 0x00fffffc));
+ nvkm_error(subdev, "MMIO %s of %08x FAULT at %06x\n",
+ (addr & 0x00000002) ? "write" : "read", data,
+ (addr & 0x00fffffc));
stat &= ~0x00000008;
- nv_wr32(pbus, 0x001100, 0x00000008);
+ nvkm_wr32(device, 0x001100, 0x00000008);
}
if (stat & 0x00010000) {
- subdev = nvkm_subdev(pbus, NVDEV_SUBDEV_THERM);
- if (subdev && subdev->intr)
- subdev->intr(subdev);
+ struct nvkm_therm *therm = device->therm;
+ if (therm)
+ nvkm_subdev_intr(&therm->subdev);
stat &= ~0x00010000;
- nv_wr32(pbus, 0x001100, 0x00010000);
+ nvkm_wr32(device, 0x001100, 0x00010000);
}
if (stat) {
- nv_error(pbus, "unknown intr 0x%08x\n", stat);
- nv_mask(pbus, 0x001140, stat, 0);
+ nvkm_error(subdev, "intr %08x\n", stat);
+ nvkm_mask(device, 0x001140, stat, 0);
}
}
-int
-nv50_bus_init(struct nvkm_object *object)
+void
+nv50_bus_init(struct nvkm_bus *bus)
{
- struct nv04_bus_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_bus_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x001100, 0xffffffff);
- nv_wr32(priv, 0x001140, 0x00010008);
- return 0;
+ struct nvkm_device *device = bus->subdev.device;
+ nvkm_wr32(device, 0x001100, 0xffffffff);
+ nvkm_wr32(device, 0x001140, 0x00010008);
}
-struct nvkm_oclass *
-nv50_bus_oclass = &(struct nv04_bus_impl) {
- .base.handle = NV_SUBDEV(BUS, 0x50),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_bus_ctor,
- .dtor = _nvkm_bus_dtor,
- .init = nv50_bus_init,
- .fini = _nvkm_bus_fini,
- },
+static const struct nvkm_bus_func
+nv50_bus = {
+ .init = nv50_bus_init,
.intr = nv50_bus_intr,
.hwsq_exec = nv50_bus_hwsq_exec,
.hwsq_size = 64,
-}.base;
+};
+
+int
+nv50_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+{
+ return nvkm_bus_new_(&nv50_bus, device, index, pbus);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
new file mode 100644
index 000000000000..a130f2c642d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
@@ -0,0 +1,18 @@
+#ifndef __NVKM_BUS_PRIV_H__
+#define __NVKM_BUS_PRIV_H__
+#define nvkm_bus(p) container_of((p), struct nvkm_bus, subdev)
+#include <subdev/bus.h>
+
+struct nvkm_bus_func {
+ void (*init)(struct nvkm_bus *);
+ void (*intr)(struct nvkm_bus *);
+ int (*hwsq_exec)(struct nvkm_bus *, u32 *, u32);
+ u32 hwsq_size;
+};
+
+int nvkm_bus_new_(const struct nvkm_bus_func *, struct nvkm_device *, int,
+ struct nvkm_bus **);
+
+void nv50_bus_init(struct nvkm_bus *);
+void nv50_bus_intr(struct nvkm_bus *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
index 9c2f688c9602..ed7717bcc3a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
@@ -8,5 +8,6 @@ nvkm-y += nvkm/subdev/clk/mcp77.o
nvkm-y += nvkm/subdev/clk/gf100.o
nvkm-y += nvkm/subdev/clk/gk104.o
nvkm-y += nvkm/subdev/clk/gk20a.o
+
nvkm-y += nvkm/subdev/clk/pllnv04.o
nvkm-y += nvkm/subdev/clk/pllgt215.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index 39a83d82e0cd..dc8682c91cc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -21,7 +21,8 @@
*
* Authors: Ben Skeggs
*/
-#include <subdev/clk.h>
+#include "priv.h"
+
#include <subdev/bios.h>
#include <subdev/bios/boost.h>
#include <subdev/bios/cstep.h>
@@ -30,7 +31,6 @@
#include <subdev/therm.h>
#include <subdev/volt.h>
-#include <core/device.h>
#include <core/option.h>
/******************************************************************************
@@ -40,7 +40,7 @@ static u32
nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
u8 pstate, u8 domain, u32 input)
{
- struct nvkm_bios *bios = nvkm_bios(clk);
+ struct nvkm_bios *bios = clk->subdev.device->bios;
struct nvbios_boostE boostE;
u8 ver, hdr, cnt, len;
u16 data;
@@ -77,8 +77,10 @@ nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
static int
nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
{
- struct nvkm_therm *ptherm = nvkm_therm(clk);
- struct nvkm_volt *volt = nvkm_volt(clk);
+ struct nvkm_subdev *subdev = &clk->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_therm *therm = device->therm;
+ struct nvkm_volt *volt = device->volt;
struct nvkm_cstate *cstate;
int ret;
@@ -88,41 +90,41 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
cstate = &pstate->base;
}
- if (ptherm) {
- ret = nvkm_therm_cstate(ptherm, pstate->fanspeed, +1);
+ if (therm) {
+ ret = nvkm_therm_cstate(therm, pstate->fanspeed, +1);
if (ret && ret != -ENODEV) {
- nv_error(clk, "failed to raise fan speed: %d\n", ret);
+ nvkm_error(subdev, "failed to raise fan speed: %d\n", ret);
return ret;
}
}
if (volt) {
- ret = volt->set_id(volt, cstate->voltage, +1);
+ ret = nvkm_volt_set_id(volt, cstate->voltage, +1);
if (ret && ret != -ENODEV) {
- nv_error(clk, "failed to raise voltage: %d\n", ret);
+ nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
return ret;
}
}
- ret = clk->calc(clk, cstate);
+ ret = clk->func->calc(clk, cstate);
if (ret == 0) {
- ret = clk->prog(clk);
- clk->tidy(clk);
+ ret = clk->func->prog(clk);
+ clk->func->tidy(clk);
}
if (volt) {
- ret = volt->set_id(volt, cstate->voltage, -1);
+ ret = nvkm_volt_set_id(volt, cstate->voltage, -1);
if (ret && ret != -ENODEV)
- nv_error(clk, "failed to lower voltage: %d\n", ret);
+ nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
}
- if (ptherm) {
- ret = nvkm_therm_cstate(ptherm, pstate->fanspeed, -1);
+ if (therm) {
+ ret = nvkm_therm_cstate(therm, pstate->fanspeed, -1);
if (ret && ret != -ENODEV)
- nv_error(clk, "failed to lower fan speed: %d\n", ret);
+ nvkm_error(subdev, "failed to lower fan speed: %d\n", ret);
}
- return 0;
+ return ret;
}
static void
@@ -135,8 +137,8 @@ nvkm_cstate_del(struct nvkm_cstate *cstate)
static int
nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
{
- struct nvkm_bios *bios = nvkm_bios(clk);
- struct nvkm_domain *domain = clk->domains;
+ struct nvkm_bios *bios = clk->subdev.device->bios;
+ const struct nvkm_domain *domain = clk->domains;
struct nvkm_cstate *cstate = NULL;
struct nvbios_cstepX cstepX;
u8 ver, hdr;
@@ -172,7 +174,8 @@ nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
static int
nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
{
- struct nvkm_fb *pfb = nvkm_fb(clk);
+ struct nvkm_subdev *subdev = &clk->subdev;
+ struct nvkm_ram *ram = subdev->device->fb->ram;
struct nvkm_pstate *pstate;
int ret, idx = 0;
@@ -181,17 +184,17 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
break;
}
- nv_debug(clk, "setting performance state %d\n", pstatei);
+ nvkm_debug(subdev, "setting performance state %d\n", pstatei);
clk->pstate = pstatei;
- if (pfb->ram && pfb->ram->calc) {
+ if (ram && ram->func->calc) {
int khz = pstate->base.domain[nv_clk_src_mem];
do {
- ret = pfb->ram->calc(pfb, khz);
+ ret = ram->func->calc(ram, khz);
if (ret == 0)
- ret = pfb->ram->prog(pfb);
+ ret = ram->func->prog(ram);
} while (ret > 0);
- pfb->ram->tidy(pfb);
+ ram->func->tidy(ram);
}
return nvkm_cstate_prog(clk, pstate, 0);
@@ -201,31 +204,32 @@ static void
nvkm_pstate_work(struct work_struct *work)
{
struct nvkm_clk *clk = container_of(work, typeof(*clk), work);
+ struct nvkm_subdev *subdev = &clk->subdev;
int pstate;
if (!atomic_xchg(&clk->waiting, 0))
return;
clk->pwrsrc = power_supply_is_system_supplied();
- nv_trace(clk, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
- clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
- clk->astate, clk->tstate, clk->dstate);
+ nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
+ clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
+ clk->astate, clk->tstate, clk->dstate);
pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
if (clk->state_nr && pstate != -1) {
pstate = (pstate < 0) ? clk->astate : pstate;
- pstate = min(pstate, clk->state_nr - 1 - clk->tstate);
+ pstate = min(pstate, clk->state_nr - 1 + clk->tstate);
pstate = max(pstate, clk->dstate);
} else {
pstate = clk->pstate = -1;
}
- nv_trace(clk, "-> %d\n", pstate);
+ nvkm_trace(subdev, "-> %d\n", pstate);
if (pstate != clk->pstate) {
int ret = nvkm_pstate_prog(clk, pstate);
if (ret) {
- nv_error(clk, "error setting pstate %d: %d\n",
- pstate, ret);
+ nvkm_error(subdev, "error setting pstate %d: %d\n",
+ pstate, ret);
}
}
@@ -246,8 +250,9 @@ nvkm_pstate_calc(struct nvkm_clk *clk, bool wait)
static void
nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
{
- struct nvkm_domain *clock = clk->domains - 1;
+ const struct nvkm_domain *clock = clk->domains - 1;
struct nvkm_cstate *cstate;
+ struct nvkm_subdev *subdev = &clk->subdev;
char info[3][32] = { "", "", "" };
char name[4] = "--";
int i = -1;
@@ -261,12 +266,12 @@ nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
if (hi == 0)
continue;
- nv_debug(clk, "%02x: %10d KHz\n", clock->name, lo);
+ nvkm_debug(subdev, "%02x: %10d KHz\n", clock->name, lo);
list_for_each_entry(cstate, &pstate->list, head) {
u32 freq = cstate->domain[clock->name];
lo = min(lo, freq);
hi = max(hi, freq);
- nv_debug(clk, "%10d KHz\n", freq);
+ nvkm_debug(subdev, "%10d KHz\n", freq);
}
if (clock->mname && ++i < ARRAY_SIZE(info)) {
@@ -282,7 +287,7 @@ nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
}
}
- nv_info(clk, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
+ nvkm_debug(subdev, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
}
static void
@@ -301,8 +306,8 @@ nvkm_pstate_del(struct nvkm_pstate *pstate)
static int
nvkm_pstate_new(struct nvkm_clk *clk, int idx)
{
- struct nvkm_bios *bios = nvkm_bios(clk);
- struct nvkm_domain *domain = clk->domains - 1;
+ struct nvkm_bios *bios = clk->subdev.device->bios;
+ const struct nvkm_domain *domain = clk->domains - 1;
struct nvkm_pstate *pstate;
struct nvkm_cstate *cstate;
struct nvbios_cstepE cstepE;
@@ -471,32 +476,37 @@ nvkm_clk_pwrsrc(struct nvkm_notify *notify)
*****************************************************************************/
int
-_nvkm_clk_fini(struct nvkm_object *object, bool suspend)
+nvkm_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+{
+ return clk->func->read(clk, src);
+}
+
+static int
+nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend)
{
- struct nvkm_clk *clk = (void *)object;
+ struct nvkm_clk *clk = nvkm_clk(subdev);
nvkm_notify_put(&clk->pwrsrc_ntfy);
- return nvkm_subdev_fini(&clk->base, suspend);
+ flush_work(&clk->work);
+ if (clk->func->fini)
+ clk->func->fini(clk);
+ return 0;
}
-int
-_nvkm_clk_init(struct nvkm_object *object)
+static int
+nvkm_clk_init(struct nvkm_subdev *subdev)
{
- struct nvkm_clk *clk = (void *)object;
- struct nvkm_domain *clock = clk->domains;
+ struct nvkm_clk *clk = nvkm_clk(subdev);
+ const struct nvkm_domain *clock = clk->domains;
int ret;
- ret = nvkm_subdev_init(&clk->base);
- if (ret)
- return ret;
-
memset(&clk->bstate, 0x00, sizeof(clk->bstate));
INIT_LIST_HEAD(&clk->bstate.list);
clk->bstate.pstate = 0xff;
while (clock->name != nv_clk_src_max) {
- ret = clk->read(clk, clock->name);
+ ret = nvkm_clk_read(clk, clock->name);
if (ret < 0) {
- nv_error(clk, "%02x freq unknown\n", clock->name);
+ nvkm_error(subdev, "%02x freq unknown\n", clock->name);
return ret;
}
clk->bstate.base.domain[clock->name] = ret;
@@ -505,6 +515,9 @@ _nvkm_clk_init(struct nvkm_object *object)
nvkm_pstate_info(clk, &clk->bstate);
+ if (clk->func->init)
+ return clk->func->init(clk);
+
clk->astate = clk->state_nr - 1;
clk->tstate = 0;
clk->dstate = 0;
@@ -513,61 +526,63 @@ _nvkm_clk_init(struct nvkm_object *object)
return 0;
}
-void
-_nvkm_clk_dtor(struct nvkm_object *object)
+static void *
+nvkm_clk_dtor(struct nvkm_subdev *subdev)
{
- struct nvkm_clk *clk = (void *)object;
+ struct nvkm_clk *clk = nvkm_clk(subdev);
struct nvkm_pstate *pstate, *temp;
nvkm_notify_fini(&clk->pwrsrc_ntfy);
+ /* Early return if the pstates have been provided statically */
+ if (clk->func->pstates)
+ return clk;
+
list_for_each_entry_safe(pstate, temp, &clk->states, head) {
nvkm_pstate_del(pstate);
}
- nvkm_subdev_destroy(&clk->base);
+ return clk;
}
+static const struct nvkm_subdev_func
+nvkm_clk = {
+ .dtor = nvkm_clk_dtor,
+ .init = nvkm_clk_init,
+ .fini = nvkm_clk_fini,
+};
+
int
-nvkm_clk_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, struct nvkm_domain *clocks,
- struct nvkm_pstate *pstates, int nb_pstates,
- bool allow_reclock, int length, void **object)
+nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
+ int index, bool allow_reclock, struct nvkm_clk *clk)
{
- struct nvkm_device *device = nv_device(parent);
- struct nvkm_clk *clk;
int ret, idx, arglen;
const char *mode;
- ret = nvkm_subdev_create_(parent, engine, oclass, 0, "CLK",
- "clock", length, object);
- clk = *object;
- if (ret)
- return ret;
-
+ nvkm_subdev_ctor(&nvkm_clk, device, index, 0, &clk->subdev);
+ clk->func = func;
INIT_LIST_HEAD(&clk->states);
- clk->domains = clocks;
+ clk->domains = func->domains;
clk->ustate_ac = -1;
clk->ustate_dc = -1;
+ clk->allow_reclock = allow_reclock;
INIT_WORK(&clk->work, nvkm_pstate_work);
init_waitqueue_head(&clk->wait);
atomic_set(&clk->waiting, 0);
/* If no pstates are provided, try and fetch them from the BIOS */
- if (!pstates) {
+ if (!func->pstates) {
idx = 0;
do {
ret = nvkm_pstate_new(clk, idx++);
} while (ret == 0);
} else {
- for (idx = 0; idx < nb_pstates; idx++)
- list_add_tail(&pstates[idx].head, &clk->states);
- clk->state_nr = nb_pstates;
+ for (idx = 0; idx < func->nr_pstates; idx++)
+ list_add_tail(&func->pstates[idx].head, &clk->states);
+ clk->state_nr = func->nr_pstates;
}
- clk->allow_reclock = allow_reclock;
-
ret = nvkm_notify_init(NULL, &device->event, nvkm_clk_pwrsrc, true,
NULL, 0, 0, &clk->pwrsrc_ntfy);
if (ret)
@@ -589,3 +604,12 @@ nvkm_clk_create_(struct nvkm_object *parent, struct nvkm_object *engine,
return 0;
}
+
+int
+nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
+ int index, bool allow_reclock, struct nvkm_clk **pclk)
+{
+ if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL)))
+ return -ENOMEM;
+ return nvkm_clk_ctor(func, device, index, allow_reclock, *pclk);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
index 4c90b9769d64..347da9ee20f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
@@ -23,25 +23,26 @@
*/
#include "nv50.h"
-static struct nvkm_domain
-g84_domains[] = {
- { nv_clk_src_crystal, 0xff },
- { nv_clk_src_href , 0xff },
- { nv_clk_src_core , 0xff, 0, "core", 1000 },
- { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
- { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
- { nv_clk_src_vdec , 0xff },
- { nv_clk_src_max }
+static const struct nvkm_clk_func
+g84_clk = {
+ .read = nv50_clk_read,
+ .calc = nv50_clk_calc,
+ .prog = nv50_clk_prog,
+ .tidy = nv50_clk_tidy,
+ .domains = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_core , 0xff, 0, "core", 1000 },
+ { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+ { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
+ { nv_clk_src_vdec , 0xff },
+ { nv_clk_src_max }
+ }
};
-struct nvkm_oclass *
-g84_clk_oclass = &(struct nv50_clk_oclass) {
- .base.handle = NV_SUBDEV(CLK, 0x84),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_clk_ctor,
- .dtor = _nvkm_clk_dtor,
- .init = _nvkm_clk_init,
- .fini = _nvkm_clk_fini,
- },
- .domains = g84_domains,
-}.base;
+int
+g84_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
+ return nv50_clk_new_(&g84_clk, device, index,
+ (device->chipset == 0xa0), pclk);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
index 3d7330d54b02..a52b7e7fce41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
@@ -21,10 +21,10 @@
*
* Authors: Ben Skeggs
*/
-#include <subdev/clk.h>
+#define gf100_clk(p) container_of((p), struct gf100_clk, base)
+#include "priv.h"
#include "pll.h"
-#include <core/device.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/timer.h>
@@ -38,29 +38,29 @@ struct gf100_clk_info {
u32 coef;
};
-struct gf100_clk_priv {
+struct gf100_clk {
struct nvkm_clk base;
struct gf100_clk_info eng[16];
};
-static u32 read_div(struct gf100_clk_priv *, int, u32, u32);
+static u32 read_div(struct gf100_clk *, int, u32, u32);
static u32
-read_vco(struct gf100_clk_priv *priv, u32 dsrc)
+read_vco(struct gf100_clk *clk, u32 dsrc)
{
- struct nvkm_clk *clk = &priv->base;
- u32 ssrc = nv_rd32(priv, dsrc);
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 ssrc = nvkm_rd32(device, dsrc);
if (!(ssrc & 0x00000100))
- return clk->read(clk, nv_clk_src_sppll0);
- return clk->read(clk, nv_clk_src_sppll1);
+ return nvkm_clk_read(&clk->base, nv_clk_src_sppll0);
+ return nvkm_clk_read(&clk->base, nv_clk_src_sppll1);
}
static u32
-read_pll(struct gf100_clk_priv *priv, u32 pll)
+read_pll(struct gf100_clk *clk, u32 pll)
{
- struct nvkm_clk *clk = &priv->base;
- u32 ctrl = nv_rd32(priv, pll + 0x00);
- u32 coef = nv_rd32(priv, pll + 0x04);
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 ctrl = nvkm_rd32(device, pll + 0x00);
+ u32 coef = nvkm_rd32(device, pll + 0x04);
u32 P = (coef & 0x003f0000) >> 16;
u32 N = (coef & 0x0000ff00) >> 8;
u32 M = (coef & 0x000000ff) >> 0;
@@ -72,20 +72,20 @@ read_pll(struct gf100_clk_priv *priv, u32 pll)
switch (pll) {
case 0x00e800:
case 0x00e820:
- sclk = nv_device(priv)->crystal;
+ sclk = device->crystal;
P = 1;
break;
case 0x132000:
- sclk = clk->read(clk, nv_clk_src_mpllsrc);
+ sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrc);
break;
case 0x132020:
- sclk = clk->read(clk, nv_clk_src_mpllsrcref);
+ sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrcref);
break;
case 0x137000:
case 0x137020:
case 0x137040:
case 0x1370e0:
- sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
+ sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
break;
default:
return 0;
@@ -95,46 +95,48 @@ read_pll(struct gf100_clk_priv *priv, u32 pll)
}
static u32
-read_div(struct gf100_clk_priv *priv, int doff, u32 dsrc, u32 dctl)
+read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
{
- u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
- u32 sctl = nv_rd32(priv, dctl + (doff * 4));
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
+ u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
switch (ssrc & 0x00000003) {
case 0:
if ((ssrc & 0x00030000) != 0x00030000)
- return nv_device(priv)->crystal;
+ return device->crystal;
return 108000;
case 2:
return 100000;
case 3:
if (sctl & 0x80000000) {
- u32 sclk = read_vco(priv, dsrc + (doff * 4));
+ u32 sclk = read_vco(clk, dsrc + (doff * 4));
u32 sdiv = (sctl & 0x0000003f) + 2;
return (sclk * 2) / sdiv;
}
- return read_vco(priv, dsrc + (doff * 4));
+ return read_vco(clk, dsrc + (doff * 4));
default:
return 0;
}
}
static u32
-read_clk(struct gf100_clk_priv *priv, int clk)
+read_clk(struct gf100_clk *clk, int idx)
{
- u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
- u32 ssel = nv_rd32(priv, 0x137100);
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
+ u32 ssel = nvkm_rd32(device, 0x137100);
u32 sclk, sdiv;
- if (ssel & (1 << clk)) {
- if (clk < 7)
- sclk = read_pll(priv, 0x137000 + (clk * 0x20));
+ if (ssel & (1 << idx)) {
+ if (idx < 7)
+ sclk = read_pll(clk, 0x137000 + (idx * 0x20));
else
- sclk = read_pll(priv, 0x1370e0);
+ sclk = read_pll(clk, 0x1370e0);
sdiv = ((sctl & 0x00003f00) >> 8) + 2;
} else {
- sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+ sclk = read_div(clk, idx, 0x137160, 0x1371d0);
sdiv = ((sctl & 0x0000003f) >> 0) + 2;
}
@@ -145,10 +147,11 @@ read_clk(struct gf100_clk_priv *priv, int clk)
}
static int
-gf100_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+gf100_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
- struct nvkm_device *device = nv_device(clk);
- struct gf100_clk_priv *priv = (void *)clk;
+ struct gf100_clk *clk = gf100_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
switch (src) {
case nv_clk_src_crystal:
@@ -156,47 +159,47 @@ gf100_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
case nv_clk_src_href:
return 100000;
case nv_clk_src_sppll0:
- return read_pll(priv, 0x00e800);
+ return read_pll(clk, 0x00e800);
case nv_clk_src_sppll1:
- return read_pll(priv, 0x00e820);
+ return read_pll(clk, 0x00e820);
case nv_clk_src_mpllsrcref:
- return read_div(priv, 0, 0x137320, 0x137330);
+ return read_div(clk, 0, 0x137320, 0x137330);
case nv_clk_src_mpllsrc:
- return read_pll(priv, 0x132020);
+ return read_pll(clk, 0x132020);
case nv_clk_src_mpll:
- return read_pll(priv, 0x132000);
+ return read_pll(clk, 0x132000);
case nv_clk_src_mdiv:
- return read_div(priv, 0, 0x137300, 0x137310);
+ return read_div(clk, 0, 0x137300, 0x137310);
case nv_clk_src_mem:
- if (nv_rd32(priv, 0x1373f0) & 0x00000002)
- return clk->read(clk, nv_clk_src_mpll);
- return clk->read(clk, nv_clk_src_mdiv);
+ if (nvkm_rd32(device, 0x1373f0) & 0x00000002)
+ return nvkm_clk_read(&clk->base, nv_clk_src_mpll);
+ return nvkm_clk_read(&clk->base, nv_clk_src_mdiv);
case nv_clk_src_gpc:
- return read_clk(priv, 0x00);
+ return read_clk(clk, 0x00);
case nv_clk_src_rop:
- return read_clk(priv, 0x01);
+ return read_clk(clk, 0x01);
case nv_clk_src_hubk07:
- return read_clk(priv, 0x02);
+ return read_clk(clk, 0x02);
case nv_clk_src_hubk06:
- return read_clk(priv, 0x07);
+ return read_clk(clk, 0x07);
case nv_clk_src_hubk01:
- return read_clk(priv, 0x08);
+ return read_clk(clk, 0x08);
case nv_clk_src_copy:
- return read_clk(priv, 0x09);
+ return read_clk(clk, 0x09);
case nv_clk_src_daemon:
- return read_clk(priv, 0x0c);
+ return read_clk(clk, 0x0c);
case nv_clk_src_vdec:
- return read_clk(priv, 0x0e);
+ return read_clk(clk, 0x0e);
default:
- nv_error(clk, "invalid clock source %d\n", src);
+ nvkm_error(subdev, "invalid clock source %d\n", src);
return -EINVAL;
}
}
static u32
-calc_div(struct gf100_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
+calc_div(struct gf100_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
{
u32 div = min((ref * 2) / freq, (u32)65);
if (div < 2)
@@ -207,7 +210,7 @@ calc_div(struct gf100_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
}
static u32
-calc_src(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+calc_src(struct gf100_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
{
u32 sclk;
@@ -229,28 +232,29 @@ calc_src(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
}
/* otherwise, calculate the closest divider */
- sclk = read_vco(priv, 0x137160 + (clk * 4));
- if (clk < 7)
- sclk = calc_div(priv, clk, sclk, freq, ddiv);
+ sclk = read_vco(clk, 0x137160 + (idx * 4));
+ if (idx < 7)
+ sclk = calc_div(clk, idx, sclk, freq, ddiv);
return sclk;
}
static u32
-calc_pll(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *coef)
+calc_pll(struct gf100_clk *clk, int idx, u32 freq, u32 *coef)
{
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_bios *bios = subdev->device->bios;
struct nvbios_pll limits;
int N, M, P, ret;
- ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
+ ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
if (ret)
return 0;
- limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
+ limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
if (!limits.refclk)
return 0;
- ret = gt215_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
+ ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P);
if (ret <= 0)
return 0;
@@ -259,10 +263,9 @@ calc_pll(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *coef)
}
static int
-calc_clk(struct gf100_clk_priv *priv,
- struct nvkm_cstate *cstate, int clk, int dom)
+calc_clk(struct gf100_clk *clk, struct nvkm_cstate *cstate, int idx, int dom)
{
- struct gf100_clk_info *info = &priv->eng[clk];
+ struct gf100_clk_info *info = &clk->eng[idx];
u32 freq = cstate->domain[dom];
u32 src0, div0, div1D, div1P = 0;
u32 clk0, clk1 = 0;
@@ -272,16 +275,16 @@ calc_clk(struct gf100_clk_priv *priv,
return 0;
/* first possible path, using only dividers */
- clk0 = calc_src(priv, clk, freq, &src0, &div0);
- clk0 = calc_div(priv, clk, clk0, freq, &div1D);
+ clk0 = calc_src(clk, idx, freq, &src0, &div0);
+ clk0 = calc_div(clk, idx, clk0, freq, &div1D);
/* see if we can get any closer using PLLs */
- if (clk0 != freq && (0x00004387 & (1 << clk))) {
- if (clk <= 7)
- clk1 = calc_pll(priv, clk, freq, &info->coef);
+ if (clk0 != freq && (0x00004387 & (1 << idx))) {
+ if (idx <= 7)
+ clk1 = calc_pll(clk, idx, freq, &info->coef);
else
clk1 = cstate->domain[nv_clk_src_hubk06];
- clk1 = calc_div(priv, clk, clk1, freq, &div1P);
+ clk1 = calc_div(clk, idx, clk1, freq, &div1P);
}
/* select the method which gets closest to target freq */
@@ -303,7 +306,7 @@ calc_clk(struct gf100_clk_priv *priv,
info->mdiv |= 0x80000000;
info->mdiv |= div1P << 8;
}
- info->ssel = (1 << clk);
+ info->ssel = (1 << idx);
info->freq = clk1;
}
@@ -311,81 +314,96 @@ calc_clk(struct gf100_clk_priv *priv,
}
static int
-gf100_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+gf100_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
- struct gf100_clk_priv *priv = (void *)clk;
+ struct gf100_clk *clk = gf100_clk(base);
int ret;
- if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
- (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
- (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
- (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
- (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
- (ret = calc_clk(priv, cstate, 0x09, nv_clk_src_copy)) ||
- (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
- (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
+ if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
+ (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
+ (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
+ (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
+ (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
+ (ret = calc_clk(clk, cstate, 0x09, nv_clk_src_copy)) ||
+ (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_daemon)) ||
+ (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
return ret;
return 0;
}
static void
-gf100_clk_prog_0(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_0(struct gf100_clk *clk, int idx)
{
- struct gf100_clk_info *info = &priv->eng[clk];
- if (clk < 7 && !info->ssel) {
- nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
- nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
+ struct gf100_clk_info *info = &clk->eng[idx];
+ struct nvkm_device *device = clk->base.subdev.device;
+ if (idx < 7 && !info->ssel) {
+ nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x80003f3f, info->ddiv);
+ nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
}
}
static void
-gf100_clk_prog_1(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_1(struct gf100_clk *clk, int idx)
{
- nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
- nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
+ struct nvkm_device *device = clk->base.subdev.device;
+ nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
+ break;
+ );
}
static void
-gf100_clk_prog_2(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_2(struct gf100_clk *clk, int idx)
{
- struct gf100_clk_info *info = &priv->eng[clk];
- const u32 addr = 0x137000 + (clk * 0x20);
- if (clk <= 7) {
- nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
- nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
+ struct gf100_clk_info *info = &clk->eng[idx];
+ struct nvkm_device *device = clk->base.subdev.device;
+ const u32 addr = 0x137000 + (idx * 0x20);
+ if (idx <= 7) {
+ nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
+ nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
if (info->coef) {
- nv_wr32(priv, addr + 0x04, info->coef);
- nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
- nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
- nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
+ nvkm_wr32(device, addr + 0x04, info->coef);
+ nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
+ break;
+ );
+ nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
}
}
}
static void
-gf100_clk_prog_3(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_3(struct gf100_clk *clk, int idx)
{
- struct gf100_clk_info *info = &priv->eng[clk];
+ struct gf100_clk_info *info = &clk->eng[idx];
+ struct nvkm_device *device = clk->base.subdev.device;
if (info->ssel) {
- nv_mask(priv, 0x137100, (1 << clk), info->ssel);
- nv_wait(priv, 0x137100, (1 << clk), info->ssel);
+ nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
+ nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
+ if (tmp == info->ssel)
+ break;
+ );
}
}
static void
-gf100_clk_prog_4(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_4(struct gf100_clk *clk, int idx)
{
- struct gf100_clk_info *info = &priv->eng[clk];
- nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+ struct gf100_clk_info *info = &clk->eng[idx];
+ struct nvkm_device *device = clk->base.subdev.device;
+ nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f3f, info->mdiv);
}
static int
-gf100_clk_prog(struct nvkm_clk *clk)
+gf100_clk_prog(struct nvkm_clk *base)
{
- struct gf100_clk_priv *priv = (void *)clk;
+ struct gf100_clk *clk = gf100_clk(base);
struct {
- void (*exec)(struct gf100_clk_priv *, int);
+ void (*exec)(struct gf100_clk *, int);
} stage[] = {
{ gf100_clk_prog_0 }, /* div programming */
{ gf100_clk_prog_1 }, /* select div mode */
@@ -396,10 +414,10 @@ gf100_clk_prog(struct nvkm_clk *clk)
int i, j;
for (i = 0; i < ARRAY_SIZE(stage); i++) {
- for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
- if (!priv->eng[j].freq)
+ for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
+ if (!clk->eng[j].freq)
continue;
- stage[i].exec(priv, j);
+ stage[i].exec(clk, j);
}
}
@@ -407,56 +425,42 @@ gf100_clk_prog(struct nvkm_clk *clk)
}
static void
-gf100_clk_tidy(struct nvkm_clk *clk)
+gf100_clk_tidy(struct nvkm_clk *base)
{
- struct gf100_clk_priv *priv = (void *)clk;
- memset(priv->eng, 0x00, sizeof(priv->eng));
+ struct gf100_clk *clk = gf100_clk(base);
+ memset(clk->eng, 0x00, sizeof(clk->eng));
}
-static struct nvkm_domain
-gf100_domain[] = {
- { nv_clk_src_crystal, 0xff },
- { nv_clk_src_href , 0xff },
- { nv_clk_src_hubk06 , 0x00 },
- { nv_clk_src_hubk01 , 0x01 },
- { nv_clk_src_copy , 0x02 },
- { nv_clk_src_gpc , 0x03, 0, "core", 2000 },
- { nv_clk_src_rop , 0x04 },
- { nv_clk_src_mem , 0x05, 0, "memory", 1000 },
- { nv_clk_src_vdec , 0x06 },
- { nv_clk_src_daemon , 0x0a },
- { nv_clk_src_hubk07 , 0x0b },
- { nv_clk_src_max }
+static const struct nvkm_clk_func
+gf100_clk = {
+ .read = gf100_clk_read,
+ .calc = gf100_clk_calc,
+ .prog = gf100_clk_prog,
+ .tidy = gf100_clk_tidy,
+ .domains = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_hubk06 , 0x00 },
+ { nv_clk_src_hubk01 , 0x01 },
+ { nv_clk_src_copy , 0x02 },
+ { nv_clk_src_gpc , 0x03, 0, "core", 2000 },
+ { nv_clk_src_rop , 0x04 },
+ { nv_clk_src_mem , 0x05, 0, "memory", 1000 },
+ { nv_clk_src_vdec , 0x06 },
+ { nv_clk_src_daemon , 0x0a },
+ { nv_clk_src_hubk07 , 0x0b },
+ { nv_clk_src_max }
+ }
};
-static int
-gf100_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+gf100_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
{
- struct gf100_clk_priv *priv;
- int ret;
+ struct gf100_clk *clk;
- ret = nvkm_clk_create(parent, engine, oclass, gf100_domain,
- NULL, 0, false, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+ return -ENOMEM;
+ *pclk = &clk->base;
- priv->base.read = gf100_clk_read;
- priv->base.calc = gf100_clk_calc;
- priv->base.prog = gf100_clk_prog;
- priv->base.tidy = gf100_clk_tidy;
- return 0;
+ return nvkm_clk_ctor(&gf100_clk, device, index, false, &clk->base);
}
-
-struct nvkm_oclass
-gf100_clk_oclass = {
- .handle = NV_SUBDEV(CLK, 0xc0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_clk_ctor,
- .dtor = _nvkm_clk_dtor,
- .init = _nvkm_clk_init,
- .fini = _nvkm_clk_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
index e9b2310bdfbb..396f7e4dad0a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
@@ -21,10 +21,10 @@
*
* Authors: Ben Skeggs
*/
-#include <subdev/clk.h>
+#define gk104_clk(p) container_of((p), struct gk104_clk, base)
+#include "priv.h"
#include "pll.h"
-#include <core/device.h>
#include <subdev/timer.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
@@ -38,28 +38,30 @@ struct gk104_clk_info {
u32 coef;
};
-struct gk104_clk_priv {
+struct gk104_clk {
struct nvkm_clk base;
struct gk104_clk_info eng[16];
};
-static u32 read_div(struct gk104_clk_priv *, int, u32, u32);
-static u32 read_pll(struct gk104_clk_priv *, u32);
+static u32 read_div(struct gk104_clk *, int, u32, u32);
+static u32 read_pll(struct gk104_clk *, u32);
static u32
-read_vco(struct gk104_clk_priv *priv, u32 dsrc)
+read_vco(struct gk104_clk *clk, u32 dsrc)
{
- u32 ssrc = nv_rd32(priv, dsrc);
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 ssrc = nvkm_rd32(device, dsrc);
if (!(ssrc & 0x00000100))
- return read_pll(priv, 0x00e800);
- return read_pll(priv, 0x00e820);
+ return read_pll(clk, 0x00e800);
+ return read_pll(clk, 0x00e820);
}
static u32
-read_pll(struct gk104_clk_priv *priv, u32 pll)
+read_pll(struct gk104_clk *clk, u32 pll)
{
- u32 ctrl = nv_rd32(priv, pll + 0x00);
- u32 coef = nv_rd32(priv, pll + 0x04);
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 ctrl = nvkm_rd32(device, pll + 0x00);
+ u32 coef = nvkm_rd32(device, pll + 0x04);
u32 P = (coef & 0x003f0000) >> 16;
u32 N = (coef & 0x0000ff00) >> 8;
u32 M = (coef & 0x000000ff) >> 0;
@@ -72,22 +74,22 @@ read_pll(struct gk104_clk_priv *priv, u32 pll)
switch (pll) {
case 0x00e800:
case 0x00e820:
- sclk = nv_device(priv)->crystal;
+ sclk = device->crystal;
P = 1;
break;
case 0x132000:
- sclk = read_pll(priv, 0x132020);
+ sclk = read_pll(clk, 0x132020);
P = (coef & 0x10000000) ? 2 : 1;
break;
case 0x132020:
- sclk = read_div(priv, 0, 0x137320, 0x137330);
- fN = nv_rd32(priv, pll + 0x10) >> 16;
+ sclk = read_div(clk, 0, 0x137320, 0x137330);
+ fN = nvkm_rd32(device, pll + 0x10) >> 16;
break;
case 0x137000:
case 0x137020:
case 0x137040:
case 0x1370e0:
- sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
+ sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
break;
default:
return 0;
@@ -101,70 +103,73 @@ read_pll(struct gk104_clk_priv *priv, u32 pll)
}
static u32
-read_div(struct gk104_clk_priv *priv, int doff, u32 dsrc, u32 dctl)
+read_div(struct gk104_clk *clk, int doff, u32 dsrc, u32 dctl)
{
- u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
- u32 sctl = nv_rd32(priv, dctl + (doff * 4));
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
+ u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
switch (ssrc & 0x00000003) {
case 0:
if ((ssrc & 0x00030000) != 0x00030000)
- return nv_device(priv)->crystal;
+ return device->crystal;
return 108000;
case 2:
return 100000;
case 3:
if (sctl & 0x80000000) {
- u32 sclk = read_vco(priv, dsrc + (doff * 4));
+ u32 sclk = read_vco(clk, dsrc + (doff * 4));
u32 sdiv = (sctl & 0x0000003f) + 2;
return (sclk * 2) / sdiv;
}
- return read_vco(priv, dsrc + (doff * 4));
+ return read_vco(clk, dsrc + (doff * 4));
default:
return 0;
}
}
static u32
-read_mem(struct gk104_clk_priv *priv)
+read_mem(struct gk104_clk *clk)
{
- switch (nv_rd32(priv, 0x1373f4) & 0x0000000f) {
- case 1: return read_pll(priv, 0x132020);
- case 2: return read_pll(priv, 0x132000);
+ struct nvkm_device *device = clk->base.subdev.device;
+ switch (nvkm_rd32(device, 0x1373f4) & 0x0000000f) {
+ case 1: return read_pll(clk, 0x132020);
+ case 2: return read_pll(clk, 0x132000);
default:
return 0;
}
}
static u32
-read_clk(struct gk104_clk_priv *priv, int clk)
+read_clk(struct gk104_clk *clk, int idx)
{
- u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
u32 sclk, sdiv;
- if (clk < 7) {
- u32 ssel = nv_rd32(priv, 0x137100);
- if (ssel & (1 << clk)) {
- sclk = read_pll(priv, 0x137000 + (clk * 0x20));
+ if (idx < 7) {
+ u32 ssel = nvkm_rd32(device, 0x137100);
+ if (ssel & (1 << idx)) {
+ sclk = read_pll(clk, 0x137000 + (idx * 0x20));
sdiv = 1;
} else {
- sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+ sclk = read_div(clk, idx, 0x137160, 0x1371d0);
sdiv = 0;
}
} else {
- u32 ssrc = nv_rd32(priv, 0x137160 + (clk * 0x04));
+ u32 ssrc = nvkm_rd32(device, 0x137160 + (idx * 0x04));
if ((ssrc & 0x00000003) == 0x00000003) {
- sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+ sclk = read_div(clk, idx, 0x137160, 0x1371d0);
if (ssrc & 0x00000100) {
if (ssrc & 0x40000000)
- sclk = read_pll(priv, 0x1370e0);
+ sclk = read_pll(clk, 0x1370e0);
sdiv = 1;
} else {
sdiv = 0;
}
} else {
- sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+ sclk = read_div(clk, idx, 0x137160, 0x1371d0);
sdiv = 0;
}
}
@@ -181,10 +186,11 @@ read_clk(struct gk104_clk_priv *priv, int clk)
}
static int
-gk104_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+gk104_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
- struct nvkm_device *device = nv_device(clk);
- struct gk104_clk_priv *priv = (void *)clk;
+ struct gk104_clk *clk = gk104_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
switch (src) {
case nv_clk_src_crystal:
@@ -192,29 +198,29 @@ gk104_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
case nv_clk_src_href:
return 100000;
case nv_clk_src_mem:
- return read_mem(priv);
+ return read_mem(clk);
case nv_clk_src_gpc:
- return read_clk(priv, 0x00);
+ return read_clk(clk, 0x00);
case nv_clk_src_rop:
- return read_clk(priv, 0x01);
+ return read_clk(clk, 0x01);
case nv_clk_src_hubk07:
- return read_clk(priv, 0x02);
+ return read_clk(clk, 0x02);
case nv_clk_src_hubk06:
- return read_clk(priv, 0x07);
+ return read_clk(clk, 0x07);
case nv_clk_src_hubk01:
- return read_clk(priv, 0x08);
+ return read_clk(clk, 0x08);
case nv_clk_src_daemon:
- return read_clk(priv, 0x0c);
+ return read_clk(clk, 0x0c);
case nv_clk_src_vdec:
- return read_clk(priv, 0x0e);
+ return read_clk(clk, 0x0e);
default:
- nv_error(clk, "invalid clock source %d\n", src);
+ nvkm_error(subdev, "invalid clock source %d\n", src);
return -EINVAL;
}
}
static u32
-calc_div(struct gk104_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
+calc_div(struct gk104_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
{
u32 div = min((ref * 2) / freq, (u32)65);
if (div < 2)
@@ -225,7 +231,7 @@ calc_div(struct gk104_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
}
static u32
-calc_src(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+calc_src(struct gk104_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
{
u32 sclk;
@@ -247,28 +253,29 @@ calc_src(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
}
/* otherwise, calculate the closest divider */
- sclk = read_vco(priv, 0x137160 + (clk * 4));
- if (clk < 7)
- sclk = calc_div(priv, clk, sclk, freq, ddiv);
+ sclk = read_vco(clk, 0x137160 + (idx * 4));
+ if (idx < 7)
+ sclk = calc_div(clk, idx, sclk, freq, ddiv);
return sclk;
}
static u32
-calc_pll(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *coef)
+calc_pll(struct gk104_clk *clk, int idx, u32 freq, u32 *coef)
{
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_bios *bios = subdev->device->bios;
struct nvbios_pll limits;
int N, M, P, ret;
- ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
+ ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
if (ret)
return 0;
- limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
+ limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
if (!limits.refclk)
return 0;
- ret = gt215_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
+ ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P);
if (ret <= 0)
return 0;
@@ -277,10 +284,10 @@ calc_pll(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *coef)
}
static int
-calc_clk(struct gk104_clk_priv *priv,
- struct nvkm_cstate *cstate, int clk, int dom)
+calc_clk(struct gk104_clk *clk,
+ struct nvkm_cstate *cstate, int idx, int dom)
{
- struct gk104_clk_info *info = &priv->eng[clk];
+ struct gk104_clk_info *info = &clk->eng[idx];
u32 freq = cstate->domain[dom];
u32 src0, div0, div1D, div1P = 0;
u32 clk0, clk1 = 0;
@@ -290,16 +297,16 @@ calc_clk(struct gk104_clk_priv *priv,
return 0;
/* first possible path, using only dividers */
- clk0 = calc_src(priv, clk, freq, &src0, &div0);
- clk0 = calc_div(priv, clk, clk0, freq, &div1D);
+ clk0 = calc_src(clk, idx, freq, &src0, &div0);
+ clk0 = calc_div(clk, idx, clk0, freq, &div1D);
/* see if we can get any closer using PLLs */
- if (clk0 != freq && (0x0000ff87 & (1 << clk))) {
- if (clk <= 7)
- clk1 = calc_pll(priv, clk, freq, &info->coef);
+ if (clk0 != freq && (0x0000ff87 & (1 << idx))) {
+ if (idx <= 7)
+ clk1 = calc_pll(clk, idx, freq, &info->coef);
else
clk1 = cstate->domain[nv_clk_src_hubk06];
- clk1 = calc_div(priv, clk, clk1, freq, &div1P);
+ clk1 = calc_div(clk, idx, clk1, freq, &div1P);
}
/* select the method which gets closest to target freq */
@@ -320,7 +327,7 @@ calc_clk(struct gk104_clk_priv *priv,
info->mdiv |= 0x80000000;
info->mdiv |= div1P << 8;
}
- info->ssel = (1 << clk);
+ info->ssel = (1 << idx);
info->dsrc = 0x40000100;
info->freq = clk1;
}
@@ -329,98 +336,115 @@ calc_clk(struct gk104_clk_priv *priv,
}
static int
-gk104_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+gk104_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
- struct gk104_clk_priv *priv = (void *)clk;
+ struct gk104_clk *clk = gk104_clk(base);
int ret;
- if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
- (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
- (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
- (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
- (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
- (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
- (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
+ if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
+ (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
+ (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
+ (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
+ (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
+ (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_daemon)) ||
+ (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
return ret;
return 0;
}
static void
-gk104_clk_prog_0(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_0(struct gk104_clk *clk, int idx)
{
- struct gk104_clk_info *info = &priv->eng[clk];
+ struct gk104_clk_info *info = &clk->eng[idx];
+ struct nvkm_device *device = clk->base.subdev.device;
if (!info->ssel) {
- nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x8000003f, info->ddiv);
- nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
+ nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x8000003f, info->ddiv);
+ nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
}
}
static void
-gk104_clk_prog_1_0(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_1_0(struct gk104_clk *clk, int idx)
{
- nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
- nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
+ struct nvkm_device *device = clk->base.subdev.device;
+ nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
+ break;
+ );
}
static void
-gk104_clk_prog_1_1(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_1_1(struct gk104_clk *clk, int idx)
{
- nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000000);
+ struct nvkm_device *device = clk->base.subdev.device;
+ nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000000);
}
static void
-gk104_clk_prog_2(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_2(struct gk104_clk *clk, int idx)
{
- struct gk104_clk_info *info = &priv->eng[clk];
- const u32 addr = 0x137000 + (clk * 0x20);
- nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
- nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
+ struct gk104_clk_info *info = &clk->eng[idx];
+ struct nvkm_device *device = clk->base.subdev.device;
+ const u32 addr = 0x137000 + (idx * 0x20);
+ nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
+ nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
if (info->coef) {
- nv_wr32(priv, addr + 0x04, info->coef);
- nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
- nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
- nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
+ nvkm_wr32(device, addr + 0x04, info->coef);
+ nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
+ break;
+ );
+ nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
}
}
static void
-gk104_clk_prog_3(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_3(struct gk104_clk *clk, int idx)
{
- struct gk104_clk_info *info = &priv->eng[clk];
+ struct gk104_clk_info *info = &clk->eng[idx];
+ struct nvkm_device *device = clk->base.subdev.device;
if (info->ssel)
- nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f00, info->mdiv);
+ nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f00, info->mdiv);
else
- nv_mask(priv, 0x137250 + (clk * 0x04), 0x0000003f, info->mdiv);
+ nvkm_mask(device, 0x137250 + (idx * 0x04), 0x0000003f, info->mdiv);
}
static void
-gk104_clk_prog_4_0(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_4_0(struct gk104_clk *clk, int idx)
{
- struct gk104_clk_info *info = &priv->eng[clk];
+ struct gk104_clk_info *info = &clk->eng[idx];
+ struct nvkm_device *device = clk->base.subdev.device;
if (info->ssel) {
- nv_mask(priv, 0x137100, (1 << clk), info->ssel);
- nv_wait(priv, 0x137100, (1 << clk), info->ssel);
+ nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
+ nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
+ if (tmp == info->ssel)
+ break;
+ );
}
}
static void
-gk104_clk_prog_4_1(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_4_1(struct gk104_clk *clk, int idx)
{
- struct gk104_clk_info *info = &priv->eng[clk];
+ struct gk104_clk_info *info = &clk->eng[idx];
+ struct nvkm_device *device = clk->base.subdev.device;
if (info->ssel) {
- nv_mask(priv, 0x137160 + (clk * 0x04), 0x40000000, 0x40000000);
- nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000100);
+ nvkm_mask(device, 0x137160 + (idx * 0x04), 0x40000000, 0x40000000);
+ nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000100);
}
}
static int
-gk104_clk_prog(struct nvkm_clk *clk)
+gk104_clk_prog(struct nvkm_clk *base)
{
- struct gk104_clk_priv *priv = (void *)clk;
+ struct gk104_clk *clk = gk104_clk(base);
struct {
u32 mask;
- void (*exec)(struct gk104_clk_priv *, int);
+ void (*exec)(struct gk104_clk *, int);
} stage[] = {
{ 0x007f, gk104_clk_prog_0 }, /* div programming */
{ 0x007f, gk104_clk_prog_1_0 }, /* select div mode */
@@ -433,12 +457,12 @@ gk104_clk_prog(struct nvkm_clk *clk)
int i, j;
for (i = 0; i < ARRAY_SIZE(stage); i++) {
- for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
+ for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
if (!(stage[i].mask & (1 << j)))
continue;
- if (!priv->eng[j].freq)
+ if (!clk->eng[j].freq)
continue;
- stage[i].exec(priv, j);
+ stage[i].exec(clk, j);
}
}
@@ -446,55 +470,41 @@ gk104_clk_prog(struct nvkm_clk *clk)
}
static void
-gk104_clk_tidy(struct nvkm_clk *clk)
+gk104_clk_tidy(struct nvkm_clk *base)
{
- struct gk104_clk_priv *priv = (void *)clk;
- memset(priv->eng, 0x00, sizeof(priv->eng));
+ struct gk104_clk *clk = gk104_clk(base);
+ memset(clk->eng, 0x00, sizeof(clk->eng));
}
-static struct nvkm_domain
-gk104_domain[] = {
- { nv_clk_src_crystal, 0xff },
- { nv_clk_src_href , 0xff },
- { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
- { nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
- { nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE },
- { nv_clk_src_mem , 0x03, 0, "memory", 500 },
- { nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
- { nv_clk_src_hubk01 , 0x05 },
- { nv_clk_src_vdec , 0x06 },
- { nv_clk_src_daemon , 0x07 },
- { nv_clk_src_max }
+static const struct nvkm_clk_func
+gk104_clk = {
+ .read = gk104_clk_read,
+ .calc = gk104_clk_calc,
+ .prog = gk104_clk_prog,
+ .tidy = gk104_clk_tidy,
+ .domains = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
+ { nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
+ { nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE },
+ { nv_clk_src_mem , 0x03, 0, "memory", 500 },
+ { nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
+ { nv_clk_src_hubk01 , 0x05 },
+ { nv_clk_src_vdec , 0x06 },
+ { nv_clk_src_daemon , 0x07 },
+ { nv_clk_src_max }
+ }
};
-static int
-gk104_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+gk104_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
{
- struct gk104_clk_priv *priv;
- int ret;
+ struct gk104_clk *clk;
- ret = nvkm_clk_create(parent, engine, oclass, gk104_domain,
- NULL, 0, true, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+ return -ENOMEM;
+ *pclk = &clk->base;
- priv->base.read = gk104_clk_read;
- priv->base.calc = gk104_clk_calc;
- priv->base.prog = gk104_clk_prog;
- priv->base.tidy = gk104_clk_tidy;
- return 0;
+ return nvkm_clk_ctor(&gk104_clk, device, index, true, &clk->base);
}
-
-struct nvkm_oclass
-gk104_clk_oclass = {
- .handle = NV_SUBDEV(CLK, 0xe0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk104_clk_ctor,
- .dtor = _nvkm_clk_dtor,
- .init = _nvkm_clk_init,
- .fini = _nvkm_clk_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
index 65c532742b08..254094ab7fb8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
@@ -22,14 +22,11 @@
* Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c
*
*/
-#include <subdev/clk.h>
-#include <subdev/timer.h>
-
-#include <core/device.h>
+#define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
+#include "priv.h"
-#ifdef __KERNEL__
-#include <nouveau_platform.h>
-#endif
+#include <core/tegra.h>
+#include <subdev/timer.h>
#define MHZ (1000 * 1000)
@@ -117,41 +114,42 @@ static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
.min_pl = 1, .max_pl = 32,
};
-struct gk20a_clk_priv {
+struct gk20a_clk {
struct nvkm_clk base;
const struct gk20a_clk_pllg_params *params;
u32 m, n, pl;
u32 parent_rate;
};
-#define to_gk20a_clk(base) container_of(base, struct gk20a_clk_priv, base)
static void
-gk20a_pllg_read_mnp(struct gk20a_clk_priv *priv)
+gk20a_pllg_read_mnp(struct gk20a_clk *clk)
{
+ struct nvkm_device *device = clk->base.subdev.device;
u32 val;
- val = nv_rd32(priv, GPCPLL_COEFF);
- priv->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
- priv->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
- priv->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
+ val = nvkm_rd32(device, GPCPLL_COEFF);
+ clk->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
+ clk->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
+ clk->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
}
static u32
-gk20a_pllg_calc_rate(struct gk20a_clk_priv *priv)
+gk20a_pllg_calc_rate(struct gk20a_clk *clk)
{
u32 rate;
u32 divider;
- rate = priv->parent_rate * priv->n;
- divider = priv->m * pl_to_div[priv->pl];
+ rate = clk->parent_rate * clk->n;
+ divider = clk->m * pl_to_div[clk->pl];
do_div(rate, divider);
return rate / 2;
}
static int
-gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
+gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
{
+ struct nvkm_subdev *subdev = &clk->base.subdev;
u32 target_clk_f, ref_clk_f, target_freq;
u32 min_vco_f, max_vco_f;
u32 low_pl, high_pl, best_pl;
@@ -163,13 +161,13 @@ gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
u32 pl;
target_clk_f = rate * 2 / MHZ;
- ref_clk_f = priv->parent_rate / MHZ;
+ ref_clk_f = clk->parent_rate / MHZ;
- max_vco_f = priv->params->max_vco;
- min_vco_f = priv->params->min_vco;
- best_m = priv->params->max_m;
- best_n = priv->params->min_n;
- best_pl = priv->params->min_pl;
+ max_vco_f = clk->params->max_vco;
+ min_vco_f = clk->params->min_vco;
+ best_m = clk->params->max_m;
+ best_n = clk->params->min_n;
+ best_pl = clk->params->min_pl;
target_vco_f = target_clk_f + target_clk_f / 50;
if (max_vco_f < target_vco_f)
@@ -177,13 +175,13 @@ gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
/* min_pl <= high_pl <= max_pl */
high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
- high_pl = min(high_pl, priv->params->max_pl);
- high_pl = max(high_pl, priv->params->min_pl);
+ high_pl = min(high_pl, clk->params->max_pl);
+ high_pl = max(high_pl, clk->params->min_pl);
/* min_pl <= low_pl <= max_pl */
low_pl = min_vco_f / target_vco_f;
- low_pl = min(low_pl, priv->params->max_pl);
- low_pl = max(low_pl, priv->params->min_pl);
+ low_pl = min(low_pl, clk->params->max_pl);
+ low_pl = max(low_pl, clk->params->min_pl);
/* Find Indices of high_pl and low_pl */
for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) {
@@ -199,30 +197,30 @@ gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
}
}
- nv_debug(priv, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
- pl_to_div[low_pl], high_pl, pl_to_div[high_pl]);
+ nvkm_debug(subdev, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
+ pl_to_div[low_pl], high_pl, pl_to_div[high_pl]);
/* Select lowest possible VCO */
for (pl = low_pl; pl <= high_pl; pl++) {
target_vco_f = target_clk_f * pl_to_div[pl];
- for (m = priv->params->min_m; m <= priv->params->max_m; m++) {
+ for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
u_f = ref_clk_f / m;
- if (u_f < priv->params->min_u)
+ if (u_f < clk->params->min_u)
break;
- if (u_f > priv->params->max_u)
+ if (u_f > clk->params->max_u)
continue;
n = (target_vco_f * m) / ref_clk_f;
n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f;
- if (n > priv->params->max_n)
+ if (n > clk->params->max_n)
break;
for (; n <= n2; n++) {
- if (n < priv->params->min_n)
+ if (n < clk->params->min_n)
continue;
- if (n > priv->params->max_n)
+ if (n > clk->params->max_n)
break;
vco_f = ref_clk_f * n / m;
@@ -250,71 +248,75 @@ found_match:
WARN_ON(best_delta == ~0);
if (best_delta != 0)
- nv_debug(priv, "no best match for target @ %dMHz on gpc_pll",
- target_clk_f);
+ nvkm_debug(subdev,
+ "no best match for target @ %dMHz on gpc_pll",
+ target_clk_f);
- priv->m = best_m;
- priv->n = best_n;
- priv->pl = best_pl;
+ clk->m = best_m;
+ clk->n = best_n;
+ clk->pl = best_pl;
- target_freq = gk20a_pllg_calc_rate(priv) / MHZ;
+ target_freq = gk20a_pllg_calc_rate(clk) / MHZ;
- nv_debug(priv, "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
- target_freq, priv->m, priv->n, priv->pl, pl_to_div[priv->pl]);
+ nvkm_debug(subdev,
+ "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
+ target_freq, clk->m, clk->n, clk->pl, pl_to_div[clk->pl]);
return 0;
}
static int
-gk20a_pllg_slide(struct gk20a_clk_priv *priv, u32 n)
+gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
{
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
u32 val;
int ramp_timeout;
/* get old coefficients */
- val = nv_rd32(priv, GPCPLL_COEFF);
+ val = nvkm_rd32(device, GPCPLL_COEFF);
/* do nothing if NDIV is the same */
if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH)))
return 0;
/* setup */
- nv_mask(priv, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
+ nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT);
- nv_mask(priv, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
+ nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT);
/* pll slowdown mode */
- nv_mask(priv, GPCPLL_NDIV_SLOWDOWN,
+ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
/* new ndiv ready for ramp */
- val = nv_rd32(priv, GPCPLL_COEFF);
+ val = nvkm_rd32(device, GPCPLL_COEFF);
val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT);
val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
udelay(1);
- nv_wr32(priv, GPCPLL_COEFF, val);
+ nvkm_wr32(device, GPCPLL_COEFF, val);
/* dynamic ramp to new ndiv */
- val = nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN);
+ val = nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT;
udelay(1);
- nv_wr32(priv, GPCPLL_NDIV_SLOWDOWN, val);
+ nvkm_wr32(device, GPCPLL_NDIV_SLOWDOWN, val);
for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) {
udelay(1);
- val = nv_rd32(priv, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
+ val = nvkm_rd32(device, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK)
break;
}
/* exit slowdown mode */
- nv_mask(priv, GPCPLL_NDIV_SLOWDOWN,
+ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
- nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN);
+ nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
if (ramp_timeout <= 0) {
- nv_error(priv, "gpcpll dynamic ramp timeout\n");
+ nvkm_error(subdev, "gpcpll dynamic ramp timeout\n");
return -ETIMEDOUT;
}
@@ -322,149 +324,147 @@ gk20a_pllg_slide(struct gk20a_clk_priv *priv, u32 n)
}
static void
-_gk20a_pllg_enable(struct gk20a_clk_priv *priv)
+_gk20a_pllg_enable(struct gk20a_clk *clk)
{
- nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
- nv_rd32(priv, GPCPLL_CFG);
+ struct nvkm_device *device = clk->base.subdev.device;
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
+ nvkm_rd32(device, GPCPLL_CFG);
}
static void
-_gk20a_pllg_disable(struct gk20a_clk_priv *priv)
+_gk20a_pllg_disable(struct gk20a_clk *clk)
{
- nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
- nv_rd32(priv, GPCPLL_CFG);
+ struct nvkm_device *device = clk->base.subdev.device;
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
+ nvkm_rd32(device, GPCPLL_CFG);
}
static int
-_gk20a_pllg_program_mnp(struct gk20a_clk_priv *priv, bool allow_slide)
+_gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
{
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
u32 val, cfg;
u32 m_old, pl_old, n_lo;
/* get old coefficients */
- val = nv_rd32(priv, GPCPLL_COEFF);
+ val = nvkm_rd32(device, GPCPLL_COEFF);
m_old = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
pl_old = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
/* do NDIV slide if there is no change in M and PL */
- cfg = nv_rd32(priv, GPCPLL_CFG);
- if (allow_slide && priv->m == m_old && priv->pl == pl_old &&
+ cfg = nvkm_rd32(device, GPCPLL_CFG);
+ if (allow_slide && clk->m == m_old && clk->pl == pl_old &&
(cfg & GPCPLL_CFG_ENABLE)) {
- return gk20a_pllg_slide(priv, priv->n);
+ return gk20a_pllg_slide(clk, clk->n);
}
/* slide down to NDIV_LO */
- n_lo = DIV_ROUND_UP(m_old * priv->params->min_vco,
- priv->parent_rate / MHZ);
+ n_lo = DIV_ROUND_UP(m_old * clk->params->min_vco,
+ clk->parent_rate / MHZ);
if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) {
- int ret = gk20a_pllg_slide(priv, n_lo);
+ int ret = gk20a_pllg_slide(clk, n_lo);
if (ret)
return ret;
}
/* split FO-to-bypass jump in halfs by setting out divider 1:2 */
- nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
0x2 << GPC2CLK_OUT_VCODIV_SHIFT);
/* put PLL in bypass before programming it */
- val = nv_rd32(priv, SEL_VCO);
+ val = nvkm_rd32(device, SEL_VCO);
val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
udelay(2);
- nv_wr32(priv, SEL_VCO, val);
+ nvkm_wr32(device, SEL_VCO, val);
/* get out from IDDQ */
- val = nv_rd32(priv, GPCPLL_CFG);
+ val = nvkm_rd32(device, GPCPLL_CFG);
if (val & GPCPLL_CFG_IDDQ) {
val &= ~GPCPLL_CFG_IDDQ;
- nv_wr32(priv, GPCPLL_CFG, val);
- nv_rd32(priv, GPCPLL_CFG);
+ nvkm_wr32(device, GPCPLL_CFG, val);
+ nvkm_rd32(device, GPCPLL_CFG);
udelay(2);
}
- _gk20a_pllg_disable(priv);
+ _gk20a_pllg_disable(clk);
- nv_debug(priv, "%s: m=%d n=%d pl=%d\n", __func__, priv->m, priv->n,
- priv->pl);
+ nvkm_debug(subdev, "%s: m=%d n=%d pl=%d\n", __func__,
+ clk->m, clk->n, clk->pl);
- n_lo = DIV_ROUND_UP(priv->m * priv->params->min_vco,
- priv->parent_rate / MHZ);
- val = priv->m << GPCPLL_COEFF_M_SHIFT;
- val |= (allow_slide ? n_lo : priv->n) << GPCPLL_COEFF_N_SHIFT;
- val |= priv->pl << GPCPLL_COEFF_P_SHIFT;
- nv_wr32(priv, GPCPLL_COEFF, val);
+ n_lo = DIV_ROUND_UP(clk->m * clk->params->min_vco,
+ clk->parent_rate / MHZ);
+ val = clk->m << GPCPLL_COEFF_M_SHIFT;
+ val |= (allow_slide ? n_lo : clk->n) << GPCPLL_COEFF_N_SHIFT;
+ val |= clk->pl << GPCPLL_COEFF_P_SHIFT;
+ nvkm_wr32(device, GPCPLL_COEFF, val);
- _gk20a_pllg_enable(priv);
+ _gk20a_pllg_enable(clk);
- val = nv_rd32(priv, GPCPLL_CFG);
+ val = nvkm_rd32(device, GPCPLL_CFG);
if (val & GPCPLL_CFG_LOCK_DET_OFF) {
val &= ~GPCPLL_CFG_LOCK_DET_OFF;
- nv_wr32(priv, GPCPLL_CFG, val);
+ nvkm_wr32(device, GPCPLL_CFG, val);
}
- if (!nvkm_timer_wait_eq(priv, 300000, GPCPLL_CFG, GPCPLL_CFG_LOCK,
- GPCPLL_CFG_LOCK)) {
- nv_error(priv, "%s: timeout waiting for pllg lock\n", __func__);
+ if (nvkm_usec(device, 300,
+ if (nvkm_rd32(device, GPCPLL_CFG) & GPCPLL_CFG_LOCK)
+ break;
+ ) < 0)
return -ETIMEDOUT;
- }
/* switch to VCO mode */
- nv_mask(priv, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+ nvkm_mask(device, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
/* restore out divider 1:1 */
- val = nv_rd32(priv, GPC2CLK_OUT);
+ val = nvkm_rd32(device, GPC2CLK_OUT);
val &= ~GPC2CLK_OUT_VCODIV_MASK;
udelay(2);
- nv_wr32(priv, GPC2CLK_OUT, val);
+ nvkm_wr32(device, GPC2CLK_OUT, val);
/* slide up to new NDIV */
- return allow_slide ? gk20a_pllg_slide(priv, priv->n) : 0;
+ return allow_slide ? gk20a_pllg_slide(clk, clk->n) : 0;
}
static int
-gk20a_pllg_program_mnp(struct gk20a_clk_priv *priv)
+gk20a_pllg_program_mnp(struct gk20a_clk *clk)
{
int err;
- err = _gk20a_pllg_program_mnp(priv, true);
+ err = _gk20a_pllg_program_mnp(clk, true);
if (err)
- err = _gk20a_pllg_program_mnp(priv, false);
+ err = _gk20a_pllg_program_mnp(clk, false);
return err;
}
static void
-gk20a_pllg_disable(struct gk20a_clk_priv *priv)
+gk20a_pllg_disable(struct gk20a_clk *clk)
{
+ struct nvkm_device *device = clk->base.subdev.device;
u32 val;
/* slide to VCO min */
- val = nv_rd32(priv, GPCPLL_CFG);
+ val = nvkm_rd32(device, GPCPLL_CFG);
if (val & GPCPLL_CFG_ENABLE) {
u32 coeff, m, n_lo;
- coeff = nv_rd32(priv, GPCPLL_COEFF);
+ coeff = nvkm_rd32(device, GPCPLL_COEFF);
m = (coeff >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
- n_lo = DIV_ROUND_UP(m * priv->params->min_vco,
- priv->parent_rate / MHZ);
- gk20a_pllg_slide(priv, n_lo);
+ n_lo = DIV_ROUND_UP(m * clk->params->min_vco,
+ clk->parent_rate / MHZ);
+ gk20a_pllg_slide(clk, n_lo);
}
/* put PLL in bypass before disabling it */
- nv_mask(priv, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
+ nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
- _gk20a_pllg_disable(priv);
+ _gk20a_pllg_disable(clk);
}
#define GK20A_CLK_GPC_MDIV 1000
-static struct nvkm_domain
-gk20a_domains[] = {
- { nv_clk_src_crystal, 0xff },
- { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
- { nv_clk_src_max }
-};
-
static struct nvkm_pstate
gk20a_pstates[] = {
{
@@ -560,87 +560,99 @@ gk20a_pstates[] = {
};
static int
-gk20a_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
- struct gk20a_clk_priv *priv = (void *)clk;
+ struct gk20a_clk *clk = gk20a_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
switch (src) {
case nv_clk_src_crystal:
- return nv_device(clk)->crystal;
+ return device->crystal;
case nv_clk_src_gpc:
- gk20a_pllg_read_mnp(priv);
- return gk20a_pllg_calc_rate(priv) / GK20A_CLK_GPC_MDIV;
+ gk20a_pllg_read_mnp(clk);
+ return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV;
default:
- nv_error(clk, "invalid clock source %d\n", src);
+ nvkm_error(subdev, "invalid clock source %d\n", src);
return -EINVAL;
}
}
static int
-gk20a_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
- struct gk20a_clk_priv *priv = (void *)clk;
+ struct gk20a_clk *clk = gk20a_clk(base);
- return gk20a_pllg_calc_mnp(priv, cstate->domain[nv_clk_src_gpc] *
+ return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] *
GK20A_CLK_GPC_MDIV);
}
static int
-gk20a_clk_prog(struct nvkm_clk *clk)
+gk20a_clk_prog(struct nvkm_clk *base)
{
- struct gk20a_clk_priv *priv = (void *)clk;
+ struct gk20a_clk *clk = gk20a_clk(base);
- return gk20a_pllg_program_mnp(priv);
+ return gk20a_pllg_program_mnp(clk);
}
static void
-gk20a_clk_tidy(struct nvkm_clk *clk)
+gk20a_clk_tidy(struct nvkm_clk *base)
{
}
-static int
-gk20a_clk_fini(struct nvkm_object *object, bool suspend)
+static void
+gk20a_clk_fini(struct nvkm_clk *base)
{
- struct gk20a_clk_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_clk_fini(&priv->base, false);
-
- gk20a_pllg_disable(priv);
-
- return ret;
+ struct gk20a_clk *clk = gk20a_clk(base);
+ gk20a_pllg_disable(clk);
}
static int
-gk20a_clk_init(struct nvkm_object *object)
+gk20a_clk_init(struct nvkm_clk *base)
{
- struct gk20a_clk_priv *priv = (void *)object;
+ struct gk20a_clk *clk = gk20a_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
int ret;
- nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
-
- ret = nvkm_clk_init(&priv->base);
- if (ret)
- return ret;
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
- ret = gk20a_clk_prog(&priv->base);
+ ret = gk20a_clk_prog(&clk->base);
if (ret) {
- nv_error(priv, "cannot initialize clock\n");
+ nvkm_error(subdev, "cannot initialize clock\n");
return ret;
}
return 0;
}
-static int
-gk20a_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static const struct nvkm_clk_func
+gk20a_clk = {
+ .init = gk20a_clk_init,
+ .fini = gk20a_clk_fini,
+ .read = gk20a_clk_read,
+ .calc = gk20a_clk_calc,
+ .prog = gk20a_clk_prog,
+ .tidy = gk20a_clk_tidy,
+ .pstates = gk20a_pstates,
+ .nr_pstates = ARRAY_SIZE(gk20a_pstates),
+ .domains = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
+ { nv_clk_src_max }
+ }
+};
+
+int
+gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
{
- struct gk20a_clk_priv *priv;
- struct nouveau_platform_device *plat;
- int ret;
- int i;
+ struct nvkm_device_tegra *tdev = device->func->tegra(device);
+ struct gk20a_clk *clk;
+ int ret, i;
+
+ if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+ return -ENOMEM;
+ *pclk = &clk->base;
/* Finish initializing the pstates */
for (i = 0; i < ARRAY_SIZE(gk20a_pstates); i++) {
@@ -648,33 +660,11 @@ gk20a_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
gk20a_pstates[i].pstate = i + 1;
}
- ret = nvkm_clk_create(parent, engine, oclass, gk20a_domains,
- gk20a_pstates, ARRAY_SIZE(gk20a_pstates),
- true, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ clk->params = &gk20a_pllg_params;
+ clk->parent_rate = clk_get_rate(tdev->clk);
- priv->params = &gk20a_pllg_params;
-
- plat = nv_device_to_platform(nv_device(parent));
- priv->parent_rate = clk_get_rate(plat->gpu->clk);
- nv_info(priv, "parent clock rate: %d Mhz\n", priv->parent_rate / MHZ);
-
- priv->base.read = gk20a_clk_read;
- priv->base.calc = gk20a_clk_calc;
- priv->base.prog = gk20a_clk_prog;
- priv->base.tidy = gk20a_clk_tidy;
- return 0;
+ ret = nvkm_clk_ctor(&gk20a_clk, device, index, true, &clk->base);
+ nvkm_info(&clk->base.subdev, "parent clock rate: %d Mhz\n",
+ clk->parent_rate / MHZ);
+ return ret;
}
-
-struct nvkm_oclass
-gk20a_clk_oclass = {
- .handle = NV_SUBDEV(CLK, 0xea),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk20a_clk_ctor,
- .dtor = _nvkm_subdev_dtor,
- .init = gk20a_clk_init,
- .fini = gk20a_clk_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 822d32a28d6e..c233e3f653ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -22,56 +22,58 @@
* Authors: Ben Skeggs
* Roy Spliet
*/
+#define gt215_clk(p) container_of((p), struct gt215_clk, base)
#include "gt215.h"
#include "pll.h"
-#include <core/device.h>
#include <engine/fifo.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/timer.h>
-struct gt215_clk_priv {
+struct gt215_clk {
struct nvkm_clk base;
struct gt215_clk_info eng[nv_clk_src_max];
};
-static u32 read_clk(struct gt215_clk_priv *, int, bool);
-static u32 read_pll(struct gt215_clk_priv *, int, u32);
+static u32 read_clk(struct gt215_clk *, int, bool);
+static u32 read_pll(struct gt215_clk *, int, u32);
static u32
-read_vco(struct gt215_clk_priv *priv, int clk)
+read_vco(struct gt215_clk *clk, int idx)
{
- u32 sctl = nv_rd32(priv, 0x4120 + (clk * 4));
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
switch (sctl & 0x00000030) {
case 0x00000000:
- return nv_device(priv)->crystal;
+ return device->crystal;
case 0x00000020:
- return read_pll(priv, 0x41, 0x00e820);
+ return read_pll(clk, 0x41, 0x00e820);
case 0x00000030:
- return read_pll(priv, 0x42, 0x00e8a0);
+ return read_pll(clk, 0x42, 0x00e8a0);
default:
return 0;
}
}
static u32
-read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
+read_clk(struct gt215_clk *clk, int idx, bool ignore_en)
{
+ struct nvkm_device *device = clk->base.subdev.device;
u32 sctl, sdiv, sclk;
/* refclk for the 0xe8xx plls is a fixed frequency */
- if (clk >= 0x40) {
- if (nv_device(priv)->chipset == 0xaf) {
+ if (idx >= 0x40) {
+ if (device->chipset == 0xaf) {
/* no joke.. seriously.. sigh.. */
- return nv_rd32(priv, 0x00471c) * 1000;
+ return nvkm_rd32(device, 0x00471c) * 1000;
}
- return nv_device(priv)->crystal;
+ return device->crystal;
}
- sctl = nv_rd32(priv, 0x4120 + (clk * 4));
+ sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
if (!ignore_en && !(sctl & 0x00000100))
return 0;
@@ -83,7 +85,7 @@ read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
switch (sctl & 0x00003000) {
case 0x00000000:
if (!(sctl & 0x00000200))
- return nv_device(priv)->crystal;
+ return device->crystal;
return 0;
case 0x00002000:
if (sctl & 0x00000040)
@@ -94,7 +96,7 @@ read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
if (!(sctl & 0x00000001))
return 0;
- sclk = read_vco(priv, clk);
+ sclk = read_vco(clk, idx);
sdiv = ((sctl & 0x003f0000) >> 16) + 2;
return (sclk * 2) / sdiv;
default:
@@ -103,14 +105,15 @@ read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
}
static u32
-read_pll(struct gt215_clk_priv *priv, int clk, u32 pll)
+read_pll(struct gt215_clk *clk, int idx, u32 pll)
{
- u32 ctrl = nv_rd32(priv, pll + 0);
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 ctrl = nvkm_rd32(device, pll + 0);
u32 sclk = 0, P = 1, N = 1, M = 1;
if (!(ctrl & 0x00000008)) {
if (ctrl & 0x00000001) {
- u32 coef = nv_rd32(priv, pll + 4);
+ u32 coef = nvkm_rd32(device, pll + 4);
M = (coef & 0x000000ff) >> 0;
N = (coef & 0x0000ff00) >> 8;
P = (coef & 0x003f0000) >> 16;
@@ -121,10 +124,10 @@ read_pll(struct gt215_clk_priv *priv, int clk, u32 pll)
if ((pll & 0x00ff00) == 0x00e800)
P = 1;
- sclk = read_clk(priv, 0x00 + clk, false);
+ sclk = read_clk(clk, 0x00 + idx, false);
}
} else {
- sclk = read_clk(priv, 0x10 + clk, false);
+ sclk = read_clk(clk, 0x10 + idx, false);
}
if (M * P)
@@ -134,41 +137,43 @@ read_pll(struct gt215_clk_priv *priv, int clk, u32 pll)
}
static int
-gt215_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+gt215_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
- struct gt215_clk_priv *priv = (void *)clk;
+ struct gt215_clk *clk = gt215_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
u32 hsrc;
switch (src) {
case nv_clk_src_crystal:
- return nv_device(priv)->crystal;
+ return device->crystal;
case nv_clk_src_core:
case nv_clk_src_core_intm:
- return read_pll(priv, 0x00, 0x4200);
+ return read_pll(clk, 0x00, 0x4200);
case nv_clk_src_shader:
- return read_pll(priv, 0x01, 0x4220);
+ return read_pll(clk, 0x01, 0x4220);
case nv_clk_src_mem:
- return read_pll(priv, 0x02, 0x4000);
+ return read_pll(clk, 0x02, 0x4000);
case nv_clk_src_disp:
- return read_clk(priv, 0x20, false);
+ return read_clk(clk, 0x20, false);
case nv_clk_src_vdec:
- return read_clk(priv, 0x21, false);
+ return read_clk(clk, 0x21, false);
case nv_clk_src_daemon:
- return read_clk(priv, 0x25, false);
+ return read_clk(clk, 0x25, false);
case nv_clk_src_host:
- hsrc = (nv_rd32(priv, 0xc040) & 0x30000000) >> 28;
+ hsrc = (nvkm_rd32(device, 0xc040) & 0x30000000) >> 28;
switch (hsrc) {
case 0:
- return read_clk(priv, 0x1d, false);
+ return read_clk(clk, 0x1d, false);
case 2:
case 3:
return 277000;
default:
- nv_error(clk, "unknown HOST clock source %d\n", hsrc);
+ nvkm_error(subdev, "unknown HOST clock source %d\n", hsrc);
return -EINVAL;
}
default:
- nv_error(clk, "invalid clock source %d\n", src);
+ nvkm_error(subdev, "invalid clock source %d\n", src);
return -EINVAL;
}
@@ -176,11 +181,12 @@ gt215_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
}
int
-gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
+gt215_clk_info(struct nvkm_clk *base, int idx, u32 khz,
struct gt215_clk_info *info)
{
- struct gt215_clk_priv *priv = (void *)clock;
- u32 oclk, sclk, sdiv, diff;
+ struct gt215_clk *clk = gt215_clk(base);
+ u32 oclk, sclk, sdiv;
+ s32 diff;
info->clk = 0;
@@ -195,7 +201,7 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
info->clk = 0x00002140;
return khz;
default:
- sclk = read_vco(priv, clk);
+ sclk = read_vco(clk, idx);
sdiv = min((sclk * 2) / khz, (u32)65);
oclk = (sclk * 2) / sdiv;
diff = ((khz + 3000) - oclk);
@@ -223,11 +229,11 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
}
int
-gt215_pll_info(struct nvkm_clk *clock, int clk, u32 pll, u32 khz,
+gt215_pll_info(struct nvkm_clk *base, int idx, u32 pll, u32 khz,
struct gt215_clk_info *info)
{
- struct nvkm_bios *bios = nvkm_bios(clock);
- struct gt215_clk_priv *priv = (void *)clock;
+ struct gt215_clk *clk = gt215_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvbios_pll limits;
int P, N, M, diff;
int ret;
@@ -236,22 +242,22 @@ gt215_pll_info(struct nvkm_clk *clock, int clk, u32 pll, u32 khz,
/* If we can get a within [-2, 3) MHz of a divider, we'll disable the
* PLL and use the divider instead. */
- ret = gt215_clk_info(clock, clk, khz, info);
+ ret = gt215_clk_info(&clk->base, idx, khz, info);
diff = khz - ret;
if (!pll || (diff >= -2000 && diff < 3000)) {
goto out;
}
/* Try with PLL */
- ret = nvbios_pll_parse(bios, pll, &limits);
+ ret = nvbios_pll_parse(subdev->device->bios, pll, &limits);
if (ret)
return ret;
- ret = gt215_clk_info(clock, clk - 0x10, limits.refclk, info);
+ ret = gt215_clk_info(&clk->base, idx - 0x10, limits.refclk, info);
if (ret != limits.refclk)
return -EINVAL;
- ret = gt215_pll_calc(nv_subdev(priv), &limits, khz, &N, NULL, &M, &P);
+ ret = gt215_pll_calc(subdev, &limits, khz, &N, NULL, &M, &P);
if (ret >= 0) {
info->pll = (P << 16) | (N << 8) | M;
}
@@ -262,22 +268,22 @@ out:
}
static int
-calc_clk(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate,
- int clk, u32 pll, int idx)
+calc_clk(struct gt215_clk *clk, struct nvkm_cstate *cstate,
+ int idx, u32 pll, int dom)
{
- int ret = gt215_pll_info(&priv->base, clk, pll, cstate->domain[idx],
- &priv->eng[idx]);
+ int ret = gt215_pll_info(&clk->base, idx, pll, cstate->domain[dom],
+ &clk->eng[dom]);
if (ret >= 0)
return 0;
return ret;
}
static int
-calc_host(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate)
+calc_host(struct gt215_clk *clk, struct nvkm_cstate *cstate)
{
int ret = 0;
u32 kHz = cstate->domain[nv_clk_src_host];
- struct gt215_clk_info *info = &priv->eng[nv_clk_src_host];
+ struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
if (kHz == 277000) {
info->clk = 0;
@@ -287,7 +293,7 @@ calc_host(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate)
info->host_out = NVA3_HOST_CLK;
- ret = gt215_clk_info(&priv->base, 0x1d, kHz, info);
+ ret = gt215_clk_info(&clk->base, 0x1d, kHz, info);
if (ret >= 0)
return 0;
@@ -297,21 +303,33 @@ calc_host(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate)
int
gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
{
- struct nvkm_fifo *pfifo = nvkm_fifo(clk);
+ struct nvkm_device *device = clk->subdev.device;
+ struct nvkm_fifo *fifo = device->fifo;
/* halt and idle execution engines */
- nv_mask(clk, 0x020060, 0x00070000, 0x00000000);
- nv_mask(clk, 0x002504, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x020060, 0x00070000, 0x00000000);
+ nvkm_mask(device, 0x002504, 0x00000001, 0x00000001);
/* Wait until the interrupt handler is finished */
- if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000))
+ if (nvkm_msec(device, 2000,
+ if (!nvkm_rd32(device, 0x000100))
+ break;
+ ) < 0)
return -EBUSY;
- if (pfifo)
- pfifo->pause(pfifo, flags);
+ if (fifo)
+ nvkm_fifo_pause(fifo, flags);
- if (!nv_wait(clk, 0x002504, 0x00000010, 0x00000010))
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x002504) & 0x00000010)
+ break;
+ ) < 0)
return -EIO;
- if (!nv_wait(clk, 0x00251c, 0x0000003f, 0x0000003f))
+
+ if (nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x00251c) & 0x0000003f;
+ if (tmp == 0x0000003f)
+ break;
+ ) < 0)
return -EIO;
return 0;
@@ -320,86 +338,94 @@ gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
void
gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags)
{
- struct nvkm_fifo *pfifo = nvkm_fifo(clk);
+ struct nvkm_device *device = clk->subdev.device;
+ struct nvkm_fifo *fifo = device->fifo;
- if (pfifo && flags)
- pfifo->start(pfifo, flags);
+ if (fifo && flags)
+ nvkm_fifo_start(fifo, flags);
- nv_mask(clk, 0x002504, 0x00000001, 0x00000000);
- nv_mask(clk, 0x020060, 0x00070000, 0x00040000);
+ nvkm_mask(device, 0x002504, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x020060, 0x00070000, 0x00040000);
}
static void
-disable_clk_src(struct gt215_clk_priv *priv, u32 src)
+disable_clk_src(struct gt215_clk *clk, u32 src)
{
- nv_mask(priv, src, 0x00000100, 0x00000000);
- nv_mask(priv, src, 0x00000001, 0x00000000);
+ struct nvkm_device *device = clk->base.subdev.device;
+ nvkm_mask(device, src, 0x00000100, 0x00000000);
+ nvkm_mask(device, src, 0x00000001, 0x00000000);
}
static void
-prog_pll(struct gt215_clk_priv *priv, int clk, u32 pll, int idx)
+prog_pll(struct gt215_clk *clk, int idx, u32 pll, int dom)
{
- struct gt215_clk_info *info = &priv->eng[idx];
- const u32 src0 = 0x004120 + (clk * 4);
- const u32 src1 = 0x004160 + (clk * 4);
+ struct gt215_clk_info *info = &clk->eng[dom];
+ struct nvkm_device *device = clk->base.subdev.device;
+ const u32 src0 = 0x004120 + (idx * 4);
+ const u32 src1 = 0x004160 + (idx * 4);
const u32 ctrl = pll + 0;
const u32 coef = pll + 4;
u32 bypass;
if (info->pll) {
/* Always start from a non-PLL clock */
- bypass = nv_rd32(priv, ctrl) & 0x00000008;
+ bypass = nvkm_rd32(device, ctrl) & 0x00000008;
if (!bypass) {
- nv_mask(priv, src1, 0x00000101, 0x00000101);
- nv_mask(priv, ctrl, 0x00000008, 0x00000008);
+ nvkm_mask(device, src1, 0x00000101, 0x00000101);
+ nvkm_mask(device, ctrl, 0x00000008, 0x00000008);
udelay(20);
}
- nv_mask(priv, src0, 0x003f3141, 0x00000101 | info->clk);
- nv_wr32(priv, coef, info->pll);
- nv_mask(priv, ctrl, 0x00000015, 0x00000015);
- nv_mask(priv, ctrl, 0x00000010, 0x00000000);
- if (!nv_wait(priv, ctrl, 0x00020000, 0x00020000)) {
- nv_mask(priv, ctrl, 0x00000010, 0x00000010);
- nv_mask(priv, src0, 0x00000101, 0x00000000);
+ nvkm_mask(device, src0, 0x003f3141, 0x00000101 | info->clk);
+ nvkm_wr32(device, coef, info->pll);
+ nvkm_mask(device, ctrl, 0x00000015, 0x00000015);
+ nvkm_mask(device, ctrl, 0x00000010, 0x00000000);
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, ctrl) & 0x00020000)
+ break;
+ ) < 0) {
+ nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
+ nvkm_mask(device, src0, 0x00000101, 0x00000000);
return;
}
- nv_mask(priv, ctrl, 0x00000010, 0x00000010);
- nv_mask(priv, ctrl, 0x00000008, 0x00000000);
- disable_clk_src(priv, src1);
+ nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
+ nvkm_mask(device, ctrl, 0x00000008, 0x00000000);
+ disable_clk_src(clk, src1);
} else {
- nv_mask(priv, src1, 0x003f3141, 0x00000101 | info->clk);
- nv_mask(priv, ctrl, 0x00000018, 0x00000018);
+ nvkm_mask(device, src1, 0x003f3141, 0x00000101 | info->clk);
+ nvkm_mask(device, ctrl, 0x00000018, 0x00000018);
udelay(20);
- nv_mask(priv, ctrl, 0x00000001, 0x00000000);
- disable_clk_src(priv, src0);
+ nvkm_mask(device, ctrl, 0x00000001, 0x00000000);
+ disable_clk_src(clk, src0);
}
}
static void
-prog_clk(struct gt215_clk_priv *priv, int clk, int idx)
+prog_clk(struct gt215_clk *clk, int idx, int dom)
{
- struct gt215_clk_info *info = &priv->eng[idx];
- nv_mask(priv, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | info->clk);
+ struct gt215_clk_info *info = &clk->eng[dom];
+ struct nvkm_device *device = clk->base.subdev.device;
+ nvkm_mask(device, 0x004120 + (idx * 4), 0x003f3141, 0x00000101 | info->clk);
}
static void
-prog_host(struct gt215_clk_priv *priv)
+prog_host(struct gt215_clk *clk)
{
- struct gt215_clk_info *info = &priv->eng[nv_clk_src_host];
- u32 hsrc = (nv_rd32(priv, 0xc040));
+ struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 hsrc = (nvkm_rd32(device, 0xc040));
switch (info->host_out) {
case NVA3_HOST_277:
if ((hsrc & 0x30000000) == 0) {
- nv_wr32(priv, 0xc040, hsrc | 0x20000000);
- disable_clk_src(priv, 0x4194);
+ nvkm_wr32(device, 0xc040, hsrc | 0x20000000);
+ disable_clk_src(clk, 0x4194);
}
break;
case NVA3_HOST_CLK:
- prog_clk(priv, 0x1d, nv_clk_src_host);
+ prog_clk(clk, 0x1d, nv_clk_src_host);
if ((hsrc & 0x30000000) >= 0x20000000) {
- nv_wr32(priv, 0xc040, hsrc & ~0x30000000);
+ nvkm_wr32(device, 0xc040, hsrc & ~0x30000000);
}
break;
default:
@@ -407,44 +433,45 @@ prog_host(struct gt215_clk_priv *priv)
}
/* This seems to be a clock gating factor on idle, always set to 64 */
- nv_wr32(priv, 0xc044, 0x3e);
+ nvkm_wr32(device, 0xc044, 0x3e);
}
static void
-prog_core(struct gt215_clk_priv *priv, int idx)
+prog_core(struct gt215_clk *clk, int dom)
{
- struct gt215_clk_info *info = &priv->eng[idx];
- u32 fb_delay = nv_rd32(priv, 0x10002c);
+ struct gt215_clk_info *info = &clk->eng[dom];
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 fb_delay = nvkm_rd32(device, 0x10002c);
if (fb_delay < info->fb_delay)
- nv_wr32(priv, 0x10002c, info->fb_delay);
+ nvkm_wr32(device, 0x10002c, info->fb_delay);
- prog_pll(priv, 0x00, 0x004200, idx);
+ prog_pll(clk, 0x00, 0x004200, dom);
if (fb_delay > info->fb_delay)
- nv_wr32(priv, 0x10002c, info->fb_delay);
+ nvkm_wr32(device, 0x10002c, info->fb_delay);
}
static int
-gt215_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+gt215_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
- struct gt215_clk_priv *priv = (void *)clk;
- struct gt215_clk_info *core = &priv->eng[nv_clk_src_core];
+ struct gt215_clk *clk = gt215_clk(base);
+ struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
int ret;
- if ((ret = calc_clk(priv, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
- (ret = calc_clk(priv, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
- (ret = calc_clk(priv, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
- (ret = calc_clk(priv, cstate, 0x21, 0x0000, nv_clk_src_vdec)) ||
- (ret = calc_host(priv, cstate)))
+ if ((ret = calc_clk(clk, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
+ (ret = calc_clk(clk, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
+ (ret = calc_clk(clk, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
+ (ret = calc_clk(clk, cstate, 0x21, 0x0000, nv_clk_src_vdec)) ||
+ (ret = calc_host(clk, cstate)))
return ret;
/* XXX: Should be reading the highest bit in the VBIOS clock to decide
* whether to use a PLL or not... but using a PLL defeats the purpose */
if (core->pll) {
- ret = gt215_clk_info(clk, 0x10,
+ ret = gt215_clk_info(&clk->base, 0x10,
cstate->domain[nv_clk_src_core_intm],
- &priv->eng[nv_clk_src_core_intm]);
+ &clk->eng[nv_clk_src_core_intm]);
if (ret < 0)
return ret;
}
@@ -453,81 +480,67 @@ gt215_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
}
static int
-gt215_clk_prog(struct nvkm_clk *clk)
+gt215_clk_prog(struct nvkm_clk *base)
{
- struct gt215_clk_priv *priv = (void *)clk;
- struct gt215_clk_info *core = &priv->eng[nv_clk_src_core];
+ struct gt215_clk *clk = gt215_clk(base);
+ struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
int ret = 0;
unsigned long flags;
unsigned long *f = &flags;
- ret = gt215_clk_pre(clk, f);
+ ret = gt215_clk_pre(&clk->base, f);
if (ret)
goto out;
if (core->pll)
- prog_core(priv, nv_clk_src_core_intm);
+ prog_core(clk, nv_clk_src_core_intm);
- prog_core(priv, nv_clk_src_core);
- prog_pll(priv, 0x01, 0x004220, nv_clk_src_shader);
- prog_clk(priv, 0x20, nv_clk_src_disp);
- prog_clk(priv, 0x21, nv_clk_src_vdec);
- prog_host(priv);
+ prog_core(clk, nv_clk_src_core);
+ prog_pll(clk, 0x01, 0x004220, nv_clk_src_shader);
+ prog_clk(clk, 0x20, nv_clk_src_disp);
+ prog_clk(clk, 0x21, nv_clk_src_vdec);
+ prog_host(clk);
out:
if (ret == -EBUSY)
f = NULL;
- gt215_clk_post(clk, f);
+ gt215_clk_post(&clk->base, f);
return ret;
}
static void
-gt215_clk_tidy(struct nvkm_clk *clk)
+gt215_clk_tidy(struct nvkm_clk *base)
{
}
-static struct nvkm_domain
-gt215_domain[] = {
- { nv_clk_src_crystal , 0xff },
- { nv_clk_src_core , 0x00, 0, "core", 1000 },
- { nv_clk_src_shader , 0x01, 0, "shader", 1000 },
- { nv_clk_src_mem , 0x02, 0, "memory", 1000 },
- { nv_clk_src_vdec , 0x03 },
- { nv_clk_src_disp , 0x04 },
- { nv_clk_src_host , 0x05 },
- { nv_clk_src_core_intm, 0x06 },
- { nv_clk_src_max }
+static const struct nvkm_clk_func
+gt215_clk = {
+ .read = gt215_clk_read,
+ .calc = gt215_clk_calc,
+ .prog = gt215_clk_prog,
+ .tidy = gt215_clk_tidy,
+ .domains = {
+ { nv_clk_src_crystal , 0xff },
+ { nv_clk_src_core , 0x00, 0, "core", 1000 },
+ { nv_clk_src_shader , 0x01, 0, "shader", 1000 },
+ { nv_clk_src_mem , 0x02, 0, "memory", 1000 },
+ { nv_clk_src_vdec , 0x03 },
+ { nv_clk_src_disp , 0x04 },
+ { nv_clk_src_host , 0x05 },
+ { nv_clk_src_core_intm, 0x06 },
+ { nv_clk_src_max }
+ }
};
-static int
-gt215_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+gt215_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
{
- struct gt215_clk_priv *priv;
- int ret;
+ struct gt215_clk *clk;
- ret = nvkm_clk_create(parent, engine, oclass, gt215_domain,
- NULL, 0, true, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+ return -ENOMEM;
+ *pclk = &clk->base;
- priv->base.read = gt215_clk_read;
- priv->base.calc = gt215_clk_calc;
- priv->base.prog = gt215_clk_prog;
- priv->base.tidy = gt215_clk_tidy;
- return 0;
+ return nvkm_clk_ctor(&gt215_clk, device, index, true, &clk->base);
}
-
-struct nvkm_oclass
-gt215_clk_oclass = {
- .handle = NV_SUBDEV(CLK, 0xa3),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gt215_clk_ctor,
- .dtor = _nvkm_clk_dtor,
- .init = _nvkm_clk_init,
- .fini = _nvkm_clk_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
index b447d9cd4d37..8865b59fe575 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
@@ -1,6 +1,6 @@
#ifndef __NVKM_CLK_NVA3_H__
#define __NVKM_CLK_NVA3_H__
-#include <subdev/clk.h>
+#include "priv.h"
struct gt215_clk_info {
u32 clk;
@@ -13,6 +13,6 @@ struct gt215_clk_info {
};
int gt215_pll_info(struct nvkm_clk *, int, u32, u32, struct gt215_clk_info *);
-int gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags);
-void gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags);
+int gt215_clk_pre(struct nvkm_clk *, unsigned long *flags);
+void gt215_clk_post(struct nvkm_clk *, unsigned long *flags);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
index c54417b146c7..1c21b8b53b78 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
@@ -21,15 +21,15 @@
*
* Authors: Ben Skeggs
*/
+#define mcp77_clk(p) container_of((p), struct mcp77_clk, base)
#include "gt215.h"
#include "pll.h"
-#include <core/device.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/timer.h>
-struct mcp77_clk_priv {
+struct mcp77_clk {
struct nvkm_clk base;
enum nv_clk_src csrc, ssrc, vsrc;
u32 cctrl, sctrl;
@@ -39,27 +39,29 @@ struct mcp77_clk_priv {
};
static u32
-read_div(struct nvkm_clk *clk)
+read_div(struct mcp77_clk *clk)
{
- return nv_rd32(clk, 0x004600);
+ struct nvkm_device *device = clk->base.subdev.device;
+ return nvkm_rd32(device, 0x004600);
}
static u32
-read_pll(struct nvkm_clk *clk, u32 base)
+read_pll(struct mcp77_clk *clk, u32 base)
{
- u32 ctrl = nv_rd32(clk, base + 0);
- u32 coef = nv_rd32(clk, base + 4);
- u32 ref = clk->read(clk, nv_clk_src_href);
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 ctrl = nvkm_rd32(device, base + 0);
+ u32 coef = nvkm_rd32(device, base + 4);
+ u32 ref = nvkm_clk_read(&clk->base, nv_clk_src_href);
u32 post_div = 0;
u32 clock = 0;
int N1, M1;
switch (base){
case 0x4020:
- post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16);
+ post_div = 1 << ((nvkm_rd32(device, 0x4070) & 0x000f0000) >> 16);
break;
case 0x4028:
- post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16;
+ post_div = (nvkm_rd32(device, 0x4040) & 0x000f0000) >> 16;
break;
default:
break;
@@ -76,59 +78,61 @@ read_pll(struct nvkm_clk *clk, u32 base)
}
static int
-mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+mcp77_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
- struct mcp77_clk_priv *priv = (void *)clk;
- u32 mast = nv_rd32(clk, 0x00c054);
+ struct mcp77_clk *clk = mcp77_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mast = nvkm_rd32(device, 0x00c054);
u32 P = 0;
switch (src) {
case nv_clk_src_crystal:
- return nv_device(priv)->crystal;
+ return device->crystal;
case nv_clk_src_href:
return 100000; /* PCIE reference clock */
case nv_clk_src_hclkm4:
- return clk->read(clk, nv_clk_src_href) * 4;
+ return nvkm_clk_read(&clk->base, nv_clk_src_href) * 4;
case nv_clk_src_hclkm2d3:
- return clk->read(clk, nv_clk_src_href) * 2 / 3;
+ return nvkm_clk_read(&clk->base, nv_clk_src_href) * 2 / 3;
case nv_clk_src_host:
switch (mast & 0x000c0000) {
- case 0x00000000: return clk->read(clk, nv_clk_src_hclkm2d3);
+ case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
case 0x00040000: break;
- case 0x00080000: return clk->read(clk, nv_clk_src_hclkm4);
- case 0x000c0000: return clk->read(clk, nv_clk_src_cclk);
+ case 0x00080000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
+ case 0x000c0000: return nvkm_clk_read(&clk->base, nv_clk_src_cclk);
}
break;
case nv_clk_src_core:
- P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16;
+ P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
switch (mast & 0x00000003) {
- case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
+ case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
case 0x00000001: return 0;
- case 0x00000002: return clk->read(clk, nv_clk_src_hclkm4) >> P;
+ case 0x00000002: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4) >> P;
case 0x00000003: return read_pll(clk, 0x004028) >> P;
}
break;
case nv_clk_src_cclk:
if ((mast & 0x03000000) != 0x03000000)
- return clk->read(clk, nv_clk_src_core);
+ return nvkm_clk_read(&clk->base, nv_clk_src_core);
if ((mast & 0x00000200) == 0x00000000)
- return clk->read(clk, nv_clk_src_core);
+ return nvkm_clk_read(&clk->base, nv_clk_src_core);
switch (mast & 0x00000c00) {
- case 0x00000000: return clk->read(clk, nv_clk_src_href);
- case 0x00000400: return clk->read(clk, nv_clk_src_hclkm4);
- case 0x00000800: return clk->read(clk, nv_clk_src_hclkm2d3);
+ case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
+ case 0x00000400: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
+ case 0x00000800: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
default: return 0;
}
case nv_clk_src_shader:
- P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16;
+ P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
switch (mast & 0x00000030) {
case 0x00000000:
if (mast & 0x00000040)
- return clk->read(clk, nv_clk_src_href) >> P;
- return clk->read(clk, nv_clk_src_crystal) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
case 0x00000010: break;
case 0x00000020: return read_pll(clk, 0x004028) >> P;
case 0x00000030: return read_pll(clk, 0x004020) >> P;
@@ -142,7 +146,7 @@ mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
switch (mast & 0x00400000) {
case 0x00400000:
- return clk->read(clk, nv_clk_src_core) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
break;
default:
return 500000 >> P;
@@ -153,29 +157,28 @@ mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
break;
}
- nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+ nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
return 0;
}
static u32
-calc_pll(struct mcp77_clk_priv *priv, u32 reg,
+calc_pll(struct mcp77_clk *clk, u32 reg,
u32 clock, int *N, int *M, int *P)
{
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvbios_pll pll;
- struct nvkm_clk *clk = &priv->base;
int ret;
- ret = nvbios_pll_parse(bios, reg, &pll);
+ ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
if (ret)
return 0;
pll.vco2.max_freq = 0;
- pll.refclk = clk->read(clk, nv_clk_src_href);
+ pll.refclk = nvkm_clk_read(&clk->base, nv_clk_src_href);
if (!pll.refclk)
return 0;
- return nv04_pll_calc(nv_subdev(priv), &pll, clock, N, M, NULL, NULL, P);
+ return nv04_pll_calc(subdev, &pll, clock, N, M, NULL, NULL, P);
}
static inline u32
@@ -197,26 +200,27 @@ calc_P(u32 src, u32 target, int *div)
}
static int
-mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+mcp77_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
- struct mcp77_clk_priv *priv = (void *)clk;
+ struct mcp77_clk *clk = mcp77_clk(base);
const int shader = cstate->domain[nv_clk_src_shader];
const int core = cstate->domain[nv_clk_src_core];
const int vdec = cstate->domain[nv_clk_src_vdec];
+ struct nvkm_subdev *subdev = &clk->base.subdev;
u32 out = 0, clock = 0;
int N, M, P1, P2 = 0;
int divs = 0;
/* cclk: find suitable source, disable PLL if we can */
- if (core < clk->read(clk, nv_clk_src_hclkm4))
- out = calc_P(clk->read(clk, nv_clk_src_hclkm4), core, &divs);
+ if (core < nvkm_clk_read(&clk->base, nv_clk_src_hclkm4))
+ out = calc_P(nvkm_clk_read(&clk->base, nv_clk_src_hclkm4), core, &divs);
/* Calculate clock * 2, so shader clock can use it too */
- clock = calc_pll(priv, 0x4028, (core << 1), &N, &M, &P1);
+ clock = calc_pll(clk, 0x4028, (core << 1), &N, &M, &P1);
if (abs(core - out) <= abs(core - (clock >> 1))) {
- priv->csrc = nv_clk_src_hclkm4;
- priv->cctrl = divs << 16;
+ clk->csrc = nv_clk_src_hclkm4;
+ clk->cctrl = divs << 16;
} else {
/* NVCTRL is actually used _after_ NVPOST, and after what we
* call NVPLL. To make matters worse, NVPOST is an integer
@@ -226,31 +230,31 @@ mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
P1 = 2;
}
- priv->csrc = nv_clk_src_core;
- priv->ccoef = (N << 8) | M;
+ clk->csrc = nv_clk_src_core;
+ clk->ccoef = (N << 8) | M;
- priv->cctrl = (P2 + 1) << 16;
- priv->cpost = (1 << P1) << 16;
+ clk->cctrl = (P2 + 1) << 16;
+ clk->cpost = (1 << P1) << 16;
}
/* sclk: nvpll + divisor, href or spll */
out = 0;
- if (shader == clk->read(clk, nv_clk_src_href)) {
- priv->ssrc = nv_clk_src_href;
+ if (shader == nvkm_clk_read(&clk->base, nv_clk_src_href)) {
+ clk->ssrc = nv_clk_src_href;
} else {
- clock = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
- if (priv->csrc == nv_clk_src_core)
+ clock = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
+ if (clk->csrc == nv_clk_src_core)
out = calc_P((core << 1), shader, &divs);
if (abs(shader - out) <=
abs(shader - clock) &&
(divs + P2) <= 7) {
- priv->ssrc = nv_clk_src_core;
- priv->sctrl = (divs + P2) << 16;
+ clk->ssrc = nv_clk_src_core;
+ clk->sctrl = (divs + P2) << 16;
} else {
- priv->ssrc = nv_clk_src_shader;
- priv->scoef = (N << 8) | M;
- priv->sctrl = P1 << 16;
+ clk->ssrc = nv_clk_src_shader;
+ clk->scoef = (N << 8) | M;
+ clk->sctrl = P1 << 16;
}
}
@@ -258,172 +262,162 @@ mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
out = calc_P(core, vdec, &divs);
clock = calc_P(500000, vdec, &P1);
if(abs(vdec - out) <= abs(vdec - clock)) {
- priv->vsrc = nv_clk_src_cclk;
- priv->vdiv = divs << 16;
+ clk->vsrc = nv_clk_src_cclk;
+ clk->vdiv = divs << 16;
} else {
- priv->vsrc = nv_clk_src_vdec;
- priv->vdiv = P1 << 16;
+ clk->vsrc = nv_clk_src_vdec;
+ clk->vdiv = P1 << 16;
}
/* Print strategy! */
- nv_debug(priv, "nvpll: %08x %08x %08x\n",
- priv->ccoef, priv->cpost, priv->cctrl);
- nv_debug(priv, " spll: %08x %08x %08x\n",
- priv->scoef, priv->spost, priv->sctrl);
- nv_debug(priv, " vdiv: %08x\n", priv->vdiv);
- if (priv->csrc == nv_clk_src_hclkm4)
- nv_debug(priv, "core: hrefm4\n");
+ nvkm_debug(subdev, "nvpll: %08x %08x %08x\n",
+ clk->ccoef, clk->cpost, clk->cctrl);
+ nvkm_debug(subdev, " spll: %08x %08x %08x\n",
+ clk->scoef, clk->spost, clk->sctrl);
+ nvkm_debug(subdev, " vdiv: %08x\n", clk->vdiv);
+ if (clk->csrc == nv_clk_src_hclkm4)
+ nvkm_debug(subdev, "core: hrefm4\n");
else
- nv_debug(priv, "core: nvpll\n");
+ nvkm_debug(subdev, "core: nvpll\n");
- if (priv->ssrc == nv_clk_src_hclkm4)
- nv_debug(priv, "shader: hrefm4\n");
- else if (priv->ssrc == nv_clk_src_core)
- nv_debug(priv, "shader: nvpll\n");
+ if (clk->ssrc == nv_clk_src_hclkm4)
+ nvkm_debug(subdev, "shader: hrefm4\n");
+ else if (clk->ssrc == nv_clk_src_core)
+ nvkm_debug(subdev, "shader: nvpll\n");
else
- nv_debug(priv, "shader: spll\n");
+ nvkm_debug(subdev, "shader: spll\n");
- if (priv->vsrc == nv_clk_src_hclkm4)
- nv_debug(priv, "vdec: 500MHz\n");
+ if (clk->vsrc == nv_clk_src_hclkm4)
+ nvkm_debug(subdev, "vdec: 500MHz\n");
else
- nv_debug(priv, "vdec: core\n");
+ nvkm_debug(subdev, "vdec: core\n");
return 0;
}
static int
-mcp77_clk_prog(struct nvkm_clk *clk)
+mcp77_clk_prog(struct nvkm_clk *base)
{
- struct mcp77_clk_priv *priv = (void *)clk;
+ struct mcp77_clk *clk = mcp77_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
u32 pllmask = 0, mast;
unsigned long flags;
unsigned long *f = &flags;
int ret = 0;
- ret = gt215_clk_pre(clk, f);
+ ret = gt215_clk_pre(&clk->base, f);
if (ret)
goto out;
/* First switch to safe clocks: href */
- mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640);
+ mast = nvkm_mask(device, 0xc054, 0x03400e70, 0x03400640);
mast &= ~0x00400e73;
mast |= 0x03000000;
- switch (priv->csrc) {
+ switch (clk->csrc) {
case nv_clk_src_hclkm4:
- nv_mask(clk, 0x4028, 0x00070000, priv->cctrl);
+ nvkm_mask(device, 0x4028, 0x00070000, clk->cctrl);
mast |= 0x00000002;
break;
case nv_clk_src_core:
- nv_wr32(clk, 0x402c, priv->ccoef);
- nv_wr32(clk, 0x4028, 0x80000000 | priv->cctrl);
- nv_wr32(clk, 0x4040, priv->cpost);
+ nvkm_wr32(device, 0x402c, clk->ccoef);
+ nvkm_wr32(device, 0x4028, 0x80000000 | clk->cctrl);
+ nvkm_wr32(device, 0x4040, clk->cpost);
pllmask |= (0x3 << 8);
mast |= 0x00000003;
break;
default:
- nv_warn(priv,"Reclocking failed: unknown core clock\n");
+ nvkm_warn(subdev, "Reclocking failed: unknown core clock\n");
goto resume;
}
- switch (priv->ssrc) {
+ switch (clk->ssrc) {
case nv_clk_src_href:
- nv_mask(clk, 0x4020, 0x00070000, 0x00000000);
+ nvkm_mask(device, 0x4020, 0x00070000, 0x00000000);
/* mast |= 0x00000000; */
break;
case nv_clk_src_core:
- nv_mask(clk, 0x4020, 0x00070000, priv->sctrl);
+ nvkm_mask(device, 0x4020, 0x00070000, clk->sctrl);
mast |= 0x00000020;
break;
case nv_clk_src_shader:
- nv_wr32(clk, 0x4024, priv->scoef);
- nv_wr32(clk, 0x4020, 0x80000000 | priv->sctrl);
- nv_wr32(clk, 0x4070, priv->spost);
+ nvkm_wr32(device, 0x4024, clk->scoef);
+ nvkm_wr32(device, 0x4020, 0x80000000 | clk->sctrl);
+ nvkm_wr32(device, 0x4070, clk->spost);
pllmask |= (0x3 << 12);
mast |= 0x00000030;
break;
default:
- nv_warn(priv,"Reclocking failed: unknown sclk clock\n");
+ nvkm_warn(subdev, "Reclocking failed: unknown sclk clock\n");
goto resume;
}
- if (!nv_wait(clk, 0x004080, pllmask, pllmask)) {
- nv_warn(priv,"Reclocking failed: unstable PLLs\n");
+ if (nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x004080) & pllmask;
+ if (tmp == pllmask)
+ break;
+ ) < 0)
goto resume;
- }
- switch (priv->vsrc) {
+ switch (clk->vsrc) {
case nv_clk_src_cclk:
mast |= 0x00400000;
default:
- nv_wr32(clk, 0x4600, priv->vdiv);
+ nvkm_wr32(device, 0x4600, clk->vdiv);
}
- nv_wr32(clk, 0xc054, mast);
+ nvkm_wr32(device, 0xc054, mast);
resume:
/* Disable some PLLs and dividers when unused */
- if (priv->csrc != nv_clk_src_core) {
- nv_wr32(clk, 0x4040, 0x00000000);
- nv_mask(clk, 0x4028, 0x80000000, 0x00000000);
+ if (clk->csrc != nv_clk_src_core) {
+ nvkm_wr32(device, 0x4040, 0x00000000);
+ nvkm_mask(device, 0x4028, 0x80000000, 0x00000000);
}
- if (priv->ssrc != nv_clk_src_shader) {
- nv_wr32(clk, 0x4070, 0x00000000);
- nv_mask(clk, 0x4020, 0x80000000, 0x00000000);
+ if (clk->ssrc != nv_clk_src_shader) {
+ nvkm_wr32(device, 0x4070, 0x00000000);
+ nvkm_mask(device, 0x4020, 0x80000000, 0x00000000);
}
out:
if (ret == -EBUSY)
f = NULL;
- gt215_clk_post(clk, f);
+ gt215_clk_post(&clk->base, f);
return ret;
}
static void
-mcp77_clk_tidy(struct nvkm_clk *clk)
+mcp77_clk_tidy(struct nvkm_clk *base)
{
}
-static struct nvkm_domain
-mcp77_domains[] = {
- { nv_clk_src_crystal, 0xff },
- { nv_clk_src_href , 0xff },
- { nv_clk_src_core , 0xff, 0, "core", 1000 },
- { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
- { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 },
- { nv_clk_src_max }
+static const struct nvkm_clk_func
+mcp77_clk = {
+ .read = mcp77_clk_read,
+ .calc = mcp77_clk_calc,
+ .prog = mcp77_clk_prog,
+ .tidy = mcp77_clk_tidy,
+ .domains = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_core , 0xff, 0, "core", 1000 },
+ { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+ { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 },
+ { nv_clk_src_max }
+ }
};
-static int
-mcp77_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+mcp77_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
{
- struct mcp77_clk_priv *priv;
- int ret;
+ struct mcp77_clk *clk;
- ret = nvkm_clk_create(parent, engine, oclass, mcp77_domains,
- NULL, 0, true, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+ return -ENOMEM;
+ *pclk = &clk->base;
- priv->base.read = mcp77_clk_read;
- priv->base.calc = mcp77_clk_calc;
- priv->base.prog = mcp77_clk_prog;
- priv->base.tidy = mcp77_clk_tidy;
- return 0;
+ return nvkm_clk_ctor(&mcp77_clk, device, index, true, &clk->base);
}
-
-struct nvkm_oclass *
-mcp77_clk_oclass = &(struct nvkm_oclass) {
- .handle = NV_SUBDEV(CLK, 0xaa),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = mcp77_clk_ctor,
- .dtor = _nvkm_clk_dtor,
- .init = _nvkm_clk_init,
- .fini = _nvkm_clk_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
index 63dbbb575228..b280f85e8827 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
@@ -21,23 +21,19 @@
*
* Authors: Ben Skeggs
*/
-#include <subdev/clk.h>
+#include "priv.h"
#include "pll.h"
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/devinit/nv04.h>
-struct nv04_clk_priv {
- struct nvkm_clk base;
-};
-
int
nv04_clk_pll_calc(struct nvkm_clk *clock, struct nvbios_pll *info,
int clk, struct nvkm_pll_vals *pv)
{
int N1, M1, N2, M2, P;
- int ret = nv04_pll_calc(nv_subdev(clock), info, clk, &N1, &M1, &N2, &M2, &P);
+ int ret = nv04_pll_calc(&clock->subdev, info, clk, &N1, &M1, &N2, &M2, &P);
if (ret) {
pv->refclk = info->refclk;
pv->N1 = N1;
@@ -52,8 +48,9 @@ nv04_clk_pll_calc(struct nvkm_clk *clock, struct nvbios_pll *info,
int
nv04_clk_pll_prog(struct nvkm_clk *clk, u32 reg1, struct nvkm_pll_vals *pv)
{
- struct nvkm_devinit *devinit = nvkm_devinit(clk);
- int cv = nvkm_bios(clk)->version.chip;
+ struct nvkm_device *device = clk->subdev.device;
+ struct nvkm_devinit *devinit = device->devinit;
+ int cv = device->bios->version.chip;
if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
cv >= 0x40) {
@@ -67,37 +64,20 @@ nv04_clk_pll_prog(struct nvkm_clk *clk, u32 reg1, struct nvkm_pll_vals *pv)
return 0;
}
-static struct nvkm_domain
-nv04_domain[] = {
- { nv_clk_src_max }
+static const struct nvkm_clk_func
+nv04_clk = {
+ .domains = {
+ { nv_clk_src_max }
+ }
};
-static int
-nv04_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv04_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
{
- struct nv04_clk_priv *priv;
- int ret;
-
- ret = nvkm_clk_create(parent, engine, oclass, nv04_domain,
- NULL, 0, false, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.pll_calc = nv04_clk_pll_calc;
- priv->base.pll_prog = nv04_clk_pll_prog;
- return 0;
+ int ret = nvkm_clk_new_(&nv04_clk, device, index, false, pclk);
+ if (ret == 0) {
+ (*pclk)->pll_calc = nv04_clk_pll_calc;
+ (*pclk)->pll_prog = nv04_clk_pll_prog;
+ }
+ return ret;
}
-
-struct nvkm_oclass
-nv04_clk_oclass = {
- .handle = NV_SUBDEV(CLK, 0x04),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_clk_ctor,
- .dtor = _nvkm_clk_dtor,
- .init = _nvkm_clk_init,
- .fini = _nvkm_clk_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
index ed838130c89d..2ab9b9b84018 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
@@ -21,14 +21,14 @@
*
* Authors: Ben Skeggs
*/
-#include <subdev/clk.h>
+#define nv40_clk(p) container_of((p), struct nv40_clk, base)
+#include "priv.h"
#include "pll.h"
-#include <core/device.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
-struct nv40_clk_priv {
+struct nv40_clk {
struct nvkm_clk base;
u32 ctrl;
u32 npll_ctrl;
@@ -36,64 +36,56 @@ struct nv40_clk_priv {
u32 spll;
};
-static struct nvkm_domain
-nv40_domain[] = {
- { nv_clk_src_crystal, 0xff },
- { nv_clk_src_href , 0xff },
- { nv_clk_src_core , 0xff, 0, "core", 1000 },
- { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
- { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
- { nv_clk_src_max }
-};
-
static u32
-read_pll_1(struct nv40_clk_priv *priv, u32 reg)
+read_pll_1(struct nv40_clk *clk, u32 reg)
{
- u32 ctrl = nv_rd32(priv, reg + 0x00);
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 ctrl = nvkm_rd32(device, reg + 0x00);
int P = (ctrl & 0x00070000) >> 16;
int N = (ctrl & 0x0000ff00) >> 8;
int M = (ctrl & 0x000000ff) >> 0;
- u32 ref = 27000, clk = 0;
+ u32 ref = 27000, khz = 0;
if (ctrl & 0x80000000)
- clk = ref * N / M;
+ khz = ref * N / M;
- return clk >> P;
+ return khz >> P;
}
static u32
-read_pll_2(struct nv40_clk_priv *priv, u32 reg)
+read_pll_2(struct nv40_clk *clk, u32 reg)
{
- u32 ctrl = nv_rd32(priv, reg + 0x00);
- u32 coef = nv_rd32(priv, reg + 0x04);
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 ctrl = nvkm_rd32(device, reg + 0x00);
+ u32 coef = nvkm_rd32(device, reg + 0x04);
int N2 = (coef & 0xff000000) >> 24;
int M2 = (coef & 0x00ff0000) >> 16;
int N1 = (coef & 0x0000ff00) >> 8;
int M1 = (coef & 0x000000ff) >> 0;
int P = (ctrl & 0x00070000) >> 16;
- u32 ref = 27000, clk = 0;
+ u32 ref = 27000, khz = 0;
if ((ctrl & 0x80000000) && M1) {
- clk = ref * N1 / M1;
+ khz = ref * N1 / M1;
if ((ctrl & 0x40000100) == 0x40000000) {
if (M2)
- clk = clk * N2 / M2;
+ khz = khz * N2 / M2;
else
- clk = 0;
+ khz = 0;
}
}
- return clk >> P;
+ return khz >> P;
}
static u32
-read_clk(struct nv40_clk_priv *priv, u32 src)
+read_clk(struct nv40_clk *clk, u32 src)
{
switch (src) {
case 3:
- return read_pll_2(priv, 0x004000);
+ return read_pll_2(clk, 0x004000);
case 2:
- return read_pll_1(priv, 0x004008);
+ return read_pll_1(clk, 0x004008);
default:
break;
}
@@ -102,46 +94,48 @@ read_clk(struct nv40_clk_priv *priv, u32 src)
}
static int
-nv40_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+nv40_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
- struct nv40_clk_priv *priv = (void *)clk;
- u32 mast = nv_rd32(priv, 0x00c040);
+ struct nv40_clk *clk = nv40_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mast = nvkm_rd32(device, 0x00c040);
switch (src) {
case nv_clk_src_crystal:
- return nv_device(priv)->crystal;
+ return device->crystal;
case nv_clk_src_href:
return 100000; /*XXX: PCIE/AGP differ*/
case nv_clk_src_core:
- return read_clk(priv, (mast & 0x00000003) >> 0);
+ return read_clk(clk, (mast & 0x00000003) >> 0);
case nv_clk_src_shader:
- return read_clk(priv, (mast & 0x00000030) >> 4);
+ return read_clk(clk, (mast & 0x00000030) >> 4);
case nv_clk_src_mem:
- return read_pll_2(priv, 0x4020);
+ return read_pll_2(clk, 0x4020);
default:
break;
}
- nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+ nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
return -EINVAL;
}
static int
-nv40_clk_calc_pll(struct nv40_clk_priv *priv, u32 reg, u32 clk,
+nv40_clk_calc_pll(struct nv40_clk *clk, u32 reg, u32 khz,
int *N1, int *M1, int *N2, int *M2, int *log2P)
{
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvbios_pll pll;
int ret;
- ret = nvbios_pll_parse(bios, reg, &pll);
+ ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
if (ret)
return ret;
- if (clk < pll.vco1.max_freq)
+ if (khz < pll.vco1.max_freq)
pll.vco2.max_freq = 0;
- ret = nv04_pll_calc(nv_subdev(priv), &pll, clk, N1, M1, N2, M2, log2P);
+ ret = nv04_pll_calc(subdev, &pll, khz, N1, M1, N2, M2, log2P);
if (ret == 0)
return -ERANGE;
@@ -149,93 +143,90 @@ nv40_clk_calc_pll(struct nv40_clk_priv *priv, u32 reg, u32 clk,
}
static int
-nv40_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+nv40_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
- struct nv40_clk_priv *priv = (void *)clk;
+ struct nv40_clk *clk = nv40_clk(base);
int gclk = cstate->domain[nv_clk_src_core];
int sclk = cstate->domain[nv_clk_src_shader];
int N1, M1, N2, M2, log2P;
int ret;
/* core/geometric clock */
- ret = nv40_clk_calc_pll(priv, 0x004000, gclk,
+ ret = nv40_clk_calc_pll(clk, 0x004000, gclk,
&N1, &M1, &N2, &M2, &log2P);
if (ret < 0)
return ret;
if (N2 == M2) {
- priv->npll_ctrl = 0x80000100 | (log2P << 16);
- priv->npll_coef = (N1 << 8) | M1;
+ clk->npll_ctrl = 0x80000100 | (log2P << 16);
+ clk->npll_coef = (N1 << 8) | M1;
} else {
- priv->npll_ctrl = 0xc0000000 | (log2P << 16);
- priv->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
+ clk->npll_ctrl = 0xc0000000 | (log2P << 16);
+ clk->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
}
/* use the second pll for shader/rop clock, if it differs from core */
if (sclk && sclk != gclk) {
- ret = nv40_clk_calc_pll(priv, 0x004008, sclk,
+ ret = nv40_clk_calc_pll(clk, 0x004008, sclk,
&N1, &M1, NULL, NULL, &log2P);
if (ret < 0)
return ret;
- priv->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
- priv->ctrl = 0x00000223;
+ clk->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
+ clk->ctrl = 0x00000223;
} else {
- priv->spll = 0x00000000;
- priv->ctrl = 0x00000333;
+ clk->spll = 0x00000000;
+ clk->ctrl = 0x00000333;
}
return 0;
}
static int
-nv40_clk_prog(struct nvkm_clk *clk)
+nv40_clk_prog(struct nvkm_clk *base)
{
- struct nv40_clk_priv *priv = (void *)clk;
- nv_mask(priv, 0x00c040, 0x00000333, 0x00000000);
- nv_wr32(priv, 0x004004, priv->npll_coef);
- nv_mask(priv, 0x004000, 0xc0070100, priv->npll_ctrl);
- nv_mask(priv, 0x004008, 0xc007ffff, priv->spll);
+ struct nv40_clk *clk = nv40_clk(base);
+ struct nvkm_device *device = clk->base.subdev.device;
+ nvkm_mask(device, 0x00c040, 0x00000333, 0x00000000);
+ nvkm_wr32(device, 0x004004, clk->npll_coef);
+ nvkm_mask(device, 0x004000, 0xc0070100, clk->npll_ctrl);
+ nvkm_mask(device, 0x004008, 0xc007ffff, clk->spll);
mdelay(5);
- nv_mask(priv, 0x00c040, 0x00000333, priv->ctrl);
+ nvkm_mask(device, 0x00c040, 0x00000333, clk->ctrl);
return 0;
}
static void
-nv40_clk_tidy(struct nvkm_clk *clk)
+nv40_clk_tidy(struct nvkm_clk *obj)
{
}
-static int
-nv40_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static const struct nvkm_clk_func
+nv40_clk = {
+ .read = nv40_clk_read,
+ .calc = nv40_clk_calc,
+ .prog = nv40_clk_prog,
+ .tidy = nv40_clk_tidy,
+ .domains = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_core , 0xff, 0, "core", 1000 },
+ { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+ { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
+ { nv_clk_src_max }
+ }
+};
+
+int
+nv40_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
{
- struct nv40_clk_priv *priv;
- int ret;
+ struct nv40_clk *clk;
- ret = nvkm_clk_create(parent, engine, oclass, nv40_domain,
- NULL, 0, true, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+ return -ENOMEM;
+ clk->base.pll_calc = nv04_clk_pll_calc;
+ clk->base.pll_prog = nv04_clk_pll_prog;
+ *pclk = &clk->base;
- priv->base.pll_calc = nv04_clk_pll_calc;
- priv->base.pll_prog = nv04_clk_pll_prog;
- priv->base.read = nv40_clk_read;
- priv->base.calc = nv40_clk_calc;
- priv->base.prog = nv40_clk_prog;
- priv->base.tidy = nv40_clk_tidy;
- return 0;
+ return nvkm_clk_ctor(&nv40_clk, device, index, true, &clk->base);
}
-
-struct nvkm_oclass
-nv40_clk_oclass = {
- .handle = NV_SUBDEV(CLK, 0x40),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv40_clk_ctor,
- .dtor = _nvkm_clk_dtor,
- .init = _nvkm_clk_init,
- .fini = _nvkm_clk_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
index 9b4ffd6347ce..5841f297973c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
@@ -25,38 +25,39 @@
#include "pll.h"
#include "seq.h"
-#include <core/device.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
static u32
-read_div(struct nv50_clk_priv *priv)
+read_div(struct nv50_clk *clk)
{
- switch (nv_device(priv)->chipset) {
+ struct nvkm_device *device = clk->base.subdev.device;
+ switch (device->chipset) {
case 0x50: /* it exists, but only has bit 31, not the dividers.. */
case 0x84:
case 0x86:
case 0x98:
case 0xa0:
- return nv_rd32(priv, 0x004700);
+ return nvkm_rd32(device, 0x004700);
case 0x92:
case 0x94:
case 0x96:
- return nv_rd32(priv, 0x004800);
+ return nvkm_rd32(device, 0x004800);
default:
return 0x00000000;
}
}
static u32
-read_pll_src(struct nv50_clk_priv *priv, u32 base)
+read_pll_src(struct nv50_clk *clk, u32 base)
{
- struct nvkm_clk *clk = &priv->base;
- u32 coef, ref = clk->read(clk, nv_clk_src_crystal);
- u32 rsel = nv_rd32(priv, 0x00e18c);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 coef, ref = nvkm_clk_read(&clk->base, nv_clk_src_crystal);
+ u32 rsel = nvkm_rd32(device, 0x00e18c);
int P, N, M, id;
- switch (nv_device(priv)->chipset) {
+ switch (device->chipset) {
case 0x50:
case 0xa0:
switch (base) {
@@ -65,11 +66,11 @@ read_pll_src(struct nv50_clk_priv *priv, u32 base)
case 0x4008: id = !!(rsel & 0x00000008); break;
case 0x4030: id = 0; break;
default:
- nv_error(priv, "ref: bad pll 0x%06x\n", base);
+ nvkm_error(subdev, "ref: bad pll %06x\n", base);
return 0;
}
- coef = nv_rd32(priv, 0x00e81c + (id * 0x0c));
+ coef = nvkm_rd32(device, 0x00e81c + (id * 0x0c));
ref *= (coef & 0x01000000) ? 2 : 4;
P = (coef & 0x00070000) >> 16;
N = ((coef & 0x0000ff00) >> 8) + 1;
@@ -78,7 +79,7 @@ read_pll_src(struct nv50_clk_priv *priv, u32 base)
case 0x84:
case 0x86:
case 0x92:
- coef = nv_rd32(priv, 0x00e81c);
+ coef = nvkm_rd32(device, 0x00e81c);
P = (coef & 0x00070000) >> 16;
N = (coef & 0x0000ff00) >> 8;
M = (coef & 0x000000ff) >> 0;
@@ -86,26 +87,26 @@ read_pll_src(struct nv50_clk_priv *priv, u32 base)
case 0x94:
case 0x96:
case 0x98:
- rsel = nv_rd32(priv, 0x00c050);
+ rsel = nvkm_rd32(device, 0x00c050);
switch (base) {
case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
case 0x4030: rsel = 3; break;
default:
- nv_error(priv, "ref: bad pll 0x%06x\n", base);
+ nvkm_error(subdev, "ref: bad pll %06x\n", base);
return 0;
}
switch (rsel) {
case 0: id = 1; break;
- case 1: return clk->read(clk, nv_clk_src_crystal);
- case 2: return clk->read(clk, nv_clk_src_href);
+ case 1: return nvkm_clk_read(&clk->base, nv_clk_src_crystal);
+ case 2: return nvkm_clk_read(&clk->base, nv_clk_src_href);
case 3: id = 0; break;
}
- coef = nv_rd32(priv, 0x00e81c + (id * 0x28));
- P = (nv_rd32(priv, 0x00e824 + (id * 0x28)) >> 16) & 7;
+ coef = nvkm_rd32(device, 0x00e81c + (id * 0x28));
+ P = (nvkm_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7;
P += (coef & 0x00070000) >> 16;
N = (coef & 0x0000ff00) >> 8;
M = (coef & 0x000000ff) >> 0;
@@ -121,10 +122,11 @@ read_pll_src(struct nv50_clk_priv *priv, u32 base)
}
static u32
-read_pll_ref(struct nv50_clk_priv *priv, u32 base)
+read_pll_ref(struct nv50_clk *clk, u32 base)
{
- struct nvkm_clk *clk = &priv->base;
- u32 src, mast = nv_rd32(priv, 0x00c040);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 src, mast = nvkm_rd32(device, 0x00c040);
switch (base) {
case 0x004028:
@@ -140,33 +142,33 @@ read_pll_ref(struct nv50_clk_priv *priv, u32 base)
src = !!(mast & 0x02000000);
break;
case 0x00e810:
- return clk->read(clk, nv_clk_src_crystal);
+ return nvkm_clk_read(&clk->base, nv_clk_src_crystal);
default:
- nv_error(priv, "bad pll 0x%06x\n", base);
+ nvkm_error(subdev, "bad pll %06x\n", base);
return 0;
}
if (src)
- return clk->read(clk, nv_clk_src_href);
+ return nvkm_clk_read(&clk->base, nv_clk_src_href);
- return read_pll_src(priv, base);
+ return read_pll_src(clk, base);
}
static u32
-read_pll(struct nv50_clk_priv *priv, u32 base)
+read_pll(struct nv50_clk *clk, u32 base)
{
- struct nvkm_clk *clk = &priv->base;
- u32 mast = nv_rd32(priv, 0x00c040);
- u32 ctrl = nv_rd32(priv, base + 0);
- u32 coef = nv_rd32(priv, base + 4);
- u32 ref = read_pll_ref(priv, base);
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 mast = nvkm_rd32(device, 0x00c040);
+ u32 ctrl = nvkm_rd32(device, base + 0);
+ u32 coef = nvkm_rd32(device, base + 4);
+ u32 ref = read_pll_ref(clk, base);
u32 freq = 0;
int N1, N2, M1, M2;
if (base == 0x004028 && (mast & 0x00100000)) {
/* wtf, appears to only disable post-divider on gt200 */
- if (nv_device(priv)->chipset != 0xa0)
- return clk->read(clk, nv_clk_src_dom6);
+ if (device->chipset != 0xa0)
+ return nvkm_clk_read(&clk->base, nv_clk_src_dom6);
}
N2 = (coef & 0xff000000) >> 24;
@@ -186,71 +188,73 @@ read_pll(struct nv50_clk_priv *priv, u32 base)
return freq;
}
-static int
-nv50_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+int
+nv50_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
- struct nv50_clk_priv *priv = (void *)clk;
- u32 mast = nv_rd32(priv, 0x00c040);
+ struct nv50_clk *clk = nv50_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mast = nvkm_rd32(device, 0x00c040);
u32 P = 0;
switch (src) {
case nv_clk_src_crystal:
- return nv_device(priv)->crystal;
+ return device->crystal;
case nv_clk_src_href:
return 100000; /* PCIE reference clock */
case nv_clk_src_hclk:
- return div_u64((u64)clk->read(clk, nv_clk_src_href) * 27778, 10000);
+ return div_u64((u64)nvkm_clk_read(&clk->base, nv_clk_src_href) * 27778, 10000);
case nv_clk_src_hclkm3:
- return clk->read(clk, nv_clk_src_hclk) * 3;
+ return nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3;
case nv_clk_src_hclkm3d2:
- return clk->read(clk, nv_clk_src_hclk) * 3 / 2;
+ return nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3 / 2;
case nv_clk_src_host:
switch (mast & 0x30000000) {
- case 0x00000000: return clk->read(clk, nv_clk_src_href);
+ case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
case 0x10000000: break;
case 0x20000000: /* !0x50 */
- case 0x30000000: return clk->read(clk, nv_clk_src_hclk);
+ case 0x30000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclk);
}
break;
case nv_clk_src_core:
if (!(mast & 0x00100000))
- P = (nv_rd32(priv, 0x004028) & 0x00070000) >> 16;
+ P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
switch (mast & 0x00000003) {
- case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
- case 0x00000001: return clk->read(clk, nv_clk_src_dom6);
- case 0x00000002: return read_pll(priv, 0x004020) >> P;
- case 0x00000003: return read_pll(priv, 0x004028) >> P;
+ case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
+ case 0x00000001: return nvkm_clk_read(&clk->base, nv_clk_src_dom6);
+ case 0x00000002: return read_pll(clk, 0x004020) >> P;
+ case 0x00000003: return read_pll(clk, 0x004028) >> P;
}
break;
case nv_clk_src_shader:
- P = (nv_rd32(priv, 0x004020) & 0x00070000) >> 16;
+ P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
switch (mast & 0x00000030) {
case 0x00000000:
if (mast & 0x00000080)
- return clk->read(clk, nv_clk_src_host) >> P;
- return clk->read(clk, nv_clk_src_crystal) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_host) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
case 0x00000010: break;
- case 0x00000020: return read_pll(priv, 0x004028) >> P;
- case 0x00000030: return read_pll(priv, 0x004020) >> P;
+ case 0x00000020: return read_pll(clk, 0x004028) >> P;
+ case 0x00000030: return read_pll(clk, 0x004020) >> P;
}
break;
case nv_clk_src_mem:
- P = (nv_rd32(priv, 0x004008) & 0x00070000) >> 16;
- if (nv_rd32(priv, 0x004008) & 0x00000200) {
+ P = (nvkm_rd32(device, 0x004008) & 0x00070000) >> 16;
+ if (nvkm_rd32(device, 0x004008) & 0x00000200) {
switch (mast & 0x0000c000) {
case 0x00000000:
- return clk->read(clk, nv_clk_src_crystal) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
case 0x00008000:
case 0x0000c000:
- return clk->read(clk, nv_clk_src_href) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P;
}
} else {
- return read_pll(priv, 0x004008) >> P;
+ return read_pll(clk, 0x004008) >> P;
}
break;
case nv_clk_src_vdec:
- P = (read_div(priv) & 0x00000700) >> 8;
- switch (nv_device(priv)->chipset) {
+ P = (read_div(clk) & 0x00000700) >> 8;
+ switch (device->chipset) {
case 0x84:
case 0x86:
case 0x92:
@@ -259,51 +263,51 @@ nv50_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
case 0xa0:
switch (mast & 0x00000c00) {
case 0x00000000:
- if (nv_device(priv)->chipset == 0xa0) /* wtf?? */
- return clk->read(clk, nv_clk_src_core) >> P;
- return clk->read(clk, nv_clk_src_crystal) >> P;
+ if (device->chipset == 0xa0) /* wtf?? */
+ return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
case 0x00000400:
return 0;
case 0x00000800:
if (mast & 0x01000000)
- return read_pll(priv, 0x004028) >> P;
- return read_pll(priv, 0x004030) >> P;
+ return read_pll(clk, 0x004028) >> P;
+ return read_pll(clk, 0x004030) >> P;
case 0x00000c00:
- return clk->read(clk, nv_clk_src_core) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
}
break;
case 0x98:
switch (mast & 0x00000c00) {
case 0x00000000:
- return clk->read(clk, nv_clk_src_core) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
case 0x00000400:
return 0;
case 0x00000800:
- return clk->read(clk, nv_clk_src_hclkm3d2) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_hclkm3d2) >> P;
case 0x00000c00:
- return clk->read(clk, nv_clk_src_mem) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_mem) >> P;
}
break;
}
break;
case nv_clk_src_dom6:
- switch (nv_device(priv)->chipset) {
+ switch (device->chipset) {
case 0x50:
case 0xa0:
- return read_pll(priv, 0x00e810) >> 2;
+ return read_pll(clk, 0x00e810) >> 2;
case 0x84:
case 0x86:
case 0x92:
case 0x94:
case 0x96:
case 0x98:
- P = (read_div(priv) & 0x00000007) >> 0;
+ P = (read_div(clk) & 0x00000007) >> 0;
switch (mast & 0x0c000000) {
- case 0x00000000: return clk->read(clk, nv_clk_src_href);
+ case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
case 0x04000000: break;
- case 0x08000000: return clk->read(clk, nv_clk_src_hclk);
+ case 0x08000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclk);
case 0x0c000000:
- return clk->read(clk, nv_clk_src_hclkm3) >> P;
+ return nvkm_clk_read(&clk->base, nv_clk_src_hclkm3) >> P;
}
break;
default:
@@ -313,27 +317,27 @@ nv50_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
break;
}
- nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+ nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
return -EINVAL;
}
static u32
-calc_pll(struct nv50_clk_priv *priv, u32 reg, u32 clk, int *N, int *M, int *P)
+calc_pll(struct nv50_clk *clk, u32 reg, u32 idx, int *N, int *M, int *P)
{
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvbios_pll pll;
int ret;
- ret = nvbios_pll_parse(bios, reg, &pll);
+ ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
if (ret)
return 0;
pll.vco2.max_freq = 0;
- pll.refclk = read_pll_ref(priv, reg);
+ pll.refclk = read_pll_ref(clk, reg);
if (!pll.refclk)
return 0;
- return nv04_pll_calc(nv_subdev(priv), &pll, clk, N, M, NULL, NULL, P);
+ return nv04_pll_calc(subdev, &pll, idx, N, M, NULL, NULL, P);
}
static inline u32
@@ -360,11 +364,13 @@ clk_same(u32 a, u32 b)
return ((a / 1000) == (b / 1000));
}
-static int
-nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+int
+nv50_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
- struct nv50_clk_priv *priv = (void *)clk;
- struct nv50_clk_hwsq *hwsq = &priv->hwsq;
+ struct nv50_clk *clk = nv50_clk(base);
+ struct nv50_clk_hwsq *hwsq = &clk->hwsq;
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
const int shader = cstate->domain[nv_clk_src_shader];
const int core = cstate->domain[nv_clk_src_core];
const int vdec = cstate->domain[nv_clk_src_vdec];
@@ -375,7 +381,7 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
int freq, out;
/* prepare a hwsq script from which we'll perform the reclock */
- out = clk_init(hwsq, nv_subdev(clk));
+ out = clk_init(hwsq, subdev);
if (out)
return out;
@@ -393,15 +399,15 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
freq = calc_div(core, vdec, &P1);
/* see how close we can get using xpll/hclk as a source */
- if (nv_device(priv)->chipset != 0x98)
- out = read_pll(priv, 0x004030);
+ if (device->chipset != 0x98)
+ out = read_pll(clk, 0x004030);
else
- out = clk->read(clk, nv_clk_src_hclkm3d2);
+ out = nvkm_clk_read(&clk->base, nv_clk_src_hclkm3d2);
out = calc_div(out, vdec, &P2);
/* select whichever gets us closest */
if (abs(vdec - freq) <= abs(vdec - out)) {
- if (nv_device(priv)->chipset != 0x98)
+ if (device->chipset != 0x98)
mastv |= 0x00000c00;
divsv |= P1 << 8;
} else {
@@ -417,14 +423,14 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
* of the host clock frequency
*/
if (dom6) {
- if (clk_same(dom6, clk->read(clk, nv_clk_src_href))) {
+ if (clk_same(dom6, nvkm_clk_read(&clk->base, nv_clk_src_href))) {
mastv |= 0x00000000;
} else
- if (clk_same(dom6, clk->read(clk, nv_clk_src_hclk))) {
+ if (clk_same(dom6, nvkm_clk_read(&clk->base, nv_clk_src_hclk))) {
mastv |= 0x08000000;
} else {
- freq = clk->read(clk, nv_clk_src_hclk) * 3;
- freq = calc_div(freq, dom6, &P1);
+ freq = nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3;
+ calc_div(freq, dom6, &P1);
mastv |= 0x0c000000;
divsv |= P1;
@@ -444,13 +450,13 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
/* core/shader: disconnect nvclk/sclk from their PLLs (nvclk to dom6,
* sclk to hclk) before reprogramming
*/
- if (nv_device(priv)->chipset < 0x92)
+ if (device->chipset < 0x92)
clk_mask(hwsq, mast, 0x001000b0, 0x00100080);
else
clk_mask(hwsq, mast, 0x000000b3, 0x00000081);
/* core: for the moment at least, always use nvpll */
- freq = calc_pll(priv, 0x4028, core, &N, &M, &P1);
+ freq = calc_pll(clk, 0x4028, core, &N, &M, &P1);
if (freq == 0)
return -ERANGE;
@@ -468,7 +474,7 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
clk_mask(hwsq, spll[0], 0xc03f0100, (P1 << 19) | (P1 << 16));
clk_mask(hwsq, mast, 0x00100033, 0x00000023);
} else {
- freq = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
+ freq = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
if (freq == 0)
return -ERANGE;
@@ -485,77 +491,71 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
return 0;
}
-static int
-nv50_clk_prog(struct nvkm_clk *clk)
+int
+nv50_clk_prog(struct nvkm_clk *base)
{
- struct nv50_clk_priv *priv = (void *)clk;
- return clk_exec(&priv->hwsq, true);
+ struct nv50_clk *clk = nv50_clk(base);
+ return clk_exec(&clk->hwsq, true);
}
-static void
-nv50_clk_tidy(struct nvkm_clk *clk)
+void
+nv50_clk_tidy(struct nvkm_clk *base)
{
- struct nv50_clk_priv *priv = (void *)clk;
- clk_exec(&priv->hwsq, false);
+ struct nv50_clk *clk = nv50_clk(base);
+ clk_exec(&clk->hwsq, false);
}
int
-nv50_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv50_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
+ int index, bool allow_reclock, struct nvkm_clk **pclk)
{
- struct nv50_clk_oclass *pclass = (void *)oclass;
- struct nv50_clk_priv *priv;
+ struct nv50_clk *clk;
int ret;
- ret = nvkm_clk_create(parent, engine, oclass, pclass->domains,
- NULL, 0, false, &priv);
- *pobject = nv_object(priv);
+ if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+ return -ENOMEM;
+ ret = nvkm_clk_ctor(func, device, index, allow_reclock, &clk->base);
+ *pclk = &clk->base;
if (ret)
return ret;
- priv->hwsq.r_fifo = hwsq_reg(0x002504);
- priv->hwsq.r_spll[0] = hwsq_reg(0x004020);
- priv->hwsq.r_spll[1] = hwsq_reg(0x004024);
- priv->hwsq.r_nvpll[0] = hwsq_reg(0x004028);
- priv->hwsq.r_nvpll[1] = hwsq_reg(0x00402c);
- switch (nv_device(priv)->chipset) {
+ clk->hwsq.r_fifo = hwsq_reg(0x002504);
+ clk->hwsq.r_spll[0] = hwsq_reg(0x004020);
+ clk->hwsq.r_spll[1] = hwsq_reg(0x004024);
+ clk->hwsq.r_nvpll[0] = hwsq_reg(0x004028);
+ clk->hwsq.r_nvpll[1] = hwsq_reg(0x00402c);
+ switch (device->chipset) {
case 0x92:
case 0x94:
case 0x96:
- priv->hwsq.r_divs = hwsq_reg(0x004800);
+ clk->hwsq.r_divs = hwsq_reg(0x004800);
break;
default:
- priv->hwsq.r_divs = hwsq_reg(0x004700);
+ clk->hwsq.r_divs = hwsq_reg(0x004700);
break;
}
- priv->hwsq.r_mast = hwsq_reg(0x00c040);
-
- priv->base.read = nv50_clk_read;
- priv->base.calc = nv50_clk_calc;
- priv->base.prog = nv50_clk_prog;
- priv->base.tidy = nv50_clk_tidy;
+ clk->hwsq.r_mast = hwsq_reg(0x00c040);
return 0;
}
-static struct nvkm_domain
-nv50_domains[] = {
- { nv_clk_src_crystal, 0xff },
- { nv_clk_src_href , 0xff },
- { nv_clk_src_core , 0xff, 0, "core", 1000 },
- { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
- { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
- { nv_clk_src_max }
+static const struct nvkm_clk_func
+nv50_clk = {
+ .read = nv50_clk_read,
+ .calc = nv50_clk_calc,
+ .prog = nv50_clk_prog,
+ .tidy = nv50_clk_tidy,
+ .domains = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_core , 0xff, 0, "core", 1000 },
+ { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+ { nv_clk_src_mem , 0xff, 0, "memory", 1000 },
+ { nv_clk_src_max }
+ }
};
-struct nvkm_oclass *
-nv50_clk_oclass = &(struct nv50_clk_oclass) {
- .base.handle = NV_SUBDEV(CLK, 0x50),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_clk_ctor,
- .dtor = _nvkm_clk_dtor,
- .init = _nvkm_clk_init,
- .fini = _nvkm_clk_fini,
- },
- .domains = nv50_domains,
-}.base;
+int
+nv50_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
+ return nv50_clk_new_(&nv50_clk, device, index, false, pclk);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
index 0ead76a32f10..d3c7fb6efa16 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
@@ -1,7 +1,9 @@
-#ifndef __NVKM_CLK_NV50_H__
-#define __NVKM_CLK_NV50_H__
+#ifndef __NV50_CLK_H__
+#define __NV50_CLK_H__
+#define nv50_clk(p) container_of((p), struct nv50_clk, base)
+#include "priv.h"
+
#include <subdev/bus/hwsq.h>
-#include <subdev/clk.h>
struct nv50_clk_hwsq {
struct hwsq base;
@@ -12,17 +14,15 @@ struct nv50_clk_hwsq {
struct hwsq_reg r_mast;
};
-struct nv50_clk_priv {
+struct nv50_clk {
struct nvkm_clk base;
struct nv50_clk_hwsq hwsq;
};
-int nv50_clk_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-
-struct nv50_clk_oclass {
- struct nvkm_oclass base;
- struct nvkm_domain *domains;
-};
+int nv50_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, int,
+ bool, struct nvkm_clk **);
+int nv50_clk_read(struct nvkm_clk *, enum nv_clk_src);
+int nv50_clk_calc(struct nvkm_clk *, struct nvkm_cstate *);
+int nv50_clk_prog(struct nvkm_clk *);
+void nv50_clk_tidy(struct nvkm_clk *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c
index 783a3e78d632..c6fccd600db4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c
@@ -79,7 +79,7 @@ gt215_pll_calc(struct nvkm_subdev *subdev, struct nvbios_pll *info,
}
if (unlikely(best_err == ~0)) {
- nv_error(subdev, "unable to find matching pll values\n");
+ nvkm_error(subdev, "unable to find matching pll values\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c
index f2292895a1a8..5ad67879e703 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c
@@ -37,7 +37,7 @@ getMNP_single(struct nvkm_subdev *subdev, struct nvbios_pll *info, int clk,
* "clk" parameter in kHz
* returns calculated clock
*/
- struct nvkm_bios *bios = nvkm_bios(subdev);
+ struct nvkm_bios *bios = subdev->device->bios;
int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq;
int minM = info->vco1.min_m, maxM = info->vco1.max_m;
int minN = info->vco1.min_n, maxN = info->vco1.max_n;
@@ -136,7 +136,7 @@ getMNP_double(struct nvkm_subdev *subdev, struct nvbios_pll *info, int clk,
* "clk" parameter in kHz
* returns calculated clock
*/
- int chip_version = nvkm_bios(subdev)->version.chip;
+ int chip_version = subdev->device->bios->version.chip;
int minvco1 = info->vco1.min_freq, maxvco1 = info->vco1.max_freq;
int minvco2 = info->vco2.min_freq, maxvco2 = info->vco2.max_freq;
int minU1 = info->vco1.min_inputfreq, minU2 = info->vco2.min_inputfreq;
@@ -240,6 +240,6 @@ nv04_pll_calc(struct nvkm_subdev *subdev, struct nvbios_pll *info, u32 freq,
}
if (!ret)
- nv_error(subdev, "unable to compute acceptable pll values\n");
+ nvkm_error(subdev, "unable to compute acceptable pll values\n");
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
new file mode 100644
index 000000000000..51eafc00c8b1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_CLK_PRIV_H__
+#define __NVKM_CLK_PRIV_H__
+#define nvkm_clk(p) container_of((p), struct nvkm_clk, subdev)
+#include <subdev/clk.h>
+
+struct nvkm_clk_func {
+ int (*init)(struct nvkm_clk *);
+ void (*fini)(struct nvkm_clk *);
+ int (*read)(struct nvkm_clk *, enum nv_clk_src);
+ int (*calc)(struct nvkm_clk *, struct nvkm_cstate *);
+ int (*prog)(struct nvkm_clk *);
+ void (*tidy)(struct nvkm_clk *);
+ struct nvkm_pstate *pstates;
+ int nr_pstates;
+ struct nvkm_domain domains[];
+};
+
+int nvkm_clk_ctor(const struct nvkm_clk_func *, struct nvkm_device *, int,
+ bool allow_reclock, struct nvkm_clk *);
+int nvkm_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, int,
+ bool allow_reclock, struct nvkm_clk **);
+
+int nv04_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *, int clk,
+ struct nvkm_pll_vals *);
+int nv04_clk_pll_prog(struct nvkm_clk *, u32 reg1, struct nvkm_pll_vals *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
index b0d7c5f40db1..5f25402f6b09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
@@ -23,74 +23,108 @@
*/
#include "priv.h"
-#include <core/device.h>
#include <core/option.h>
#include <subdev/vga.h>
-int
-_nvkm_devinit_fini(struct nvkm_object *object, bool suspend)
+u32
+nvkm_devinit_mmio(struct nvkm_devinit *init, u32 addr)
{
- struct nvkm_devinit *devinit = (void *)object;
+ if (init->func->mmio)
+ addr = init->func->mmio(init, addr);
+ return addr;
+}
- /* force full reinit on resume */
- if (suspend)
- devinit->post = true;
+int
+nvkm_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 khz)
+{
+ return init->func->pll_set(init, type, khz);
+}
- /* unlock the extended vga crtc regs */
- nv_lockvgac(devinit, false);
+void
+nvkm_devinit_meminit(struct nvkm_devinit *init)
+{
+ if (init->func->meminit)
+ init->func->meminit(init);
+}
- return nvkm_subdev_fini(&devinit->base, suspend);
+u64
+nvkm_devinit_disable(struct nvkm_devinit *init)
+{
+ if (init && init->func->disable)
+ return init->func->disable(init);
+ return 0;
}
int
-_nvkm_devinit_init(struct nvkm_object *object)
+nvkm_devinit_post(struct nvkm_devinit *init, u64 *disable)
{
- struct nvkm_devinit_impl *impl = (void *)object->oclass;
- struct nvkm_devinit *devinit = (void *)object;
- int ret;
+ int ret = 0;
+ if (init && init->func->post)
+ ret = init->func->post(init, init->post);
+ *disable = nvkm_devinit_disable(init);
+ return ret;
+}
- ret = nvkm_subdev_init(&devinit->base);
- if (ret)
- return ret;
+static int
+nvkm_devinit_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+ struct nvkm_devinit *init = nvkm_devinit(subdev);
+ /* force full reinit on resume */
+ if (suspend)
+ init->post = true;
+ return 0;
+}
+
+static int
+nvkm_devinit_preinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_devinit *init = nvkm_devinit(subdev);
- ret = impl->post(&devinit->base, devinit->post);
- if (ret)
- return ret;
+ if (init->func->preinit)
+ init->func->preinit(init);
- if (impl->disable)
- nv_device(devinit)->disable_mask |= impl->disable(devinit);
+ /* unlock the extended vga crtc regs */
+ nvkm_lockvgac(subdev->device, false);
return 0;
}
-void
-_nvkm_devinit_dtor(struct nvkm_object *object)
+static int
+nvkm_devinit_init(struct nvkm_subdev *subdev)
+{
+ struct nvkm_devinit *init = nvkm_devinit(subdev);
+ if (init->func->init)
+ init->func->init(init);
+ return 0;
+}
+
+static void *
+nvkm_devinit_dtor(struct nvkm_subdev *subdev)
{
- struct nvkm_devinit *devinit = (void *)object;
+ struct nvkm_devinit *init = nvkm_devinit(subdev);
+ void *data = init;
- /* lock crtc regs */
- nv_lockvgac(devinit, true);
+ if (init->func->dtor)
+ data = init->func->dtor(init);
- nvkm_subdev_destroy(&devinit->base);
+ /* lock crtc regs */
+ nvkm_lockvgac(subdev->device, true);
+ return data;
}
-int
-nvkm_devinit_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, int size, void **pobject)
+static const struct nvkm_subdev_func
+nvkm_devinit = {
+ .dtor = nvkm_devinit_dtor,
+ .preinit = nvkm_devinit_preinit,
+ .init = nvkm_devinit_init,
+ .fini = nvkm_devinit_fini,
+};
+
+void
+nvkm_devinit_ctor(const struct nvkm_devinit_func *func,
+ struct nvkm_device *device, int index,
+ struct nvkm_devinit *init)
{
- struct nvkm_devinit_impl *impl = (void *)oclass;
- struct nvkm_device *device = nv_device(parent);
- struct nvkm_devinit *devinit;
- int ret;
-
- ret = nvkm_subdev_create_(parent, engine, oclass, 0, "DEVINIT",
- "init", size, pobject);
- devinit = *pobject;
- if (ret)
- return ret;
-
- devinit->post = nvkm_boolopt(device->cfgopt, "NvForcePost", false);
- devinit->meminit = impl->meminit;
- devinit->pll_set = impl->pll_set;
- devinit->mmio = impl->mmio;
- return 0;
+ nvkm_subdev_ctor(&nvkm_devinit, device, index, 0, &init->subdev);
+ init->func = func;
+ init->post = nvkm_boolopt(device->cfgopt, "NvForcePost", false);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
index 36684c3f9e9c..6c5bbff12eb4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
@@ -23,7 +23,6 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <core/device.h>
#include <subdev/fb/regsnv04.h>
#define NV04_PFB_DEBUG_0 0x00100080
@@ -48,8 +47,8 @@
static inline struct io_mapping *
fbmem_init(struct nvkm_device *dev)
{
- return io_mapping_create_wc(nv_device_resource_start(dev, 1),
- nv_device_resource_len(dev, 1));
+ return io_mapping_create_wc(dev->func->resource_addr(dev, 1),
+ dev->func->resource_size(dev, 1));
}
static inline void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
index ca776ce75f4f..e895289bf3c1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
@@ -27,40 +27,42 @@
#include <subdev/bios/init.h>
static u64
-g84_devinit_disable(struct nvkm_devinit *devinit)
+g84_devinit_disable(struct nvkm_devinit *init)
{
- struct nv50_devinit_priv *priv = (void *)devinit;
- u32 r001540 = nv_rd32(priv, 0x001540);
- u32 r00154c = nv_rd32(priv, 0x00154c);
+ struct nvkm_device *device = init->subdev.device;
+ u32 r001540 = nvkm_rd32(device, 0x001540);
+ u32 r00154c = nvkm_rd32(device, 0x00154c);
u64 disable = 0ULL;
if (!(r001540 & 0x40000000)) {
- disable |= (1ULL << NVDEV_ENGINE_MPEG);
- disable |= (1ULL << NVDEV_ENGINE_VP);
- disable |= (1ULL << NVDEV_ENGINE_BSP);
- disable |= (1ULL << NVDEV_ENGINE_CIPHER);
+ disable |= (1ULL << NVKM_ENGINE_MPEG);
+ disable |= (1ULL << NVKM_ENGINE_VP);
+ disable |= (1ULL << NVKM_ENGINE_BSP);
+ disable |= (1ULL << NVKM_ENGINE_CIPHER);
}
if (!(r00154c & 0x00000004))
- disable |= (1ULL << NVDEV_ENGINE_DISP);
+ disable |= (1ULL << NVKM_ENGINE_DISP);
if (!(r00154c & 0x00000020))
- disable |= (1ULL << NVDEV_ENGINE_BSP);
+ disable |= (1ULL << NVKM_ENGINE_BSP);
if (!(r00154c & 0x00000040))
- disable |= (1ULL << NVDEV_ENGINE_CIPHER);
+ disable |= (1ULL << NVKM_ENGINE_CIPHER);
return disable;
}
-struct nvkm_oclass *
-g84_devinit_oclass = &(struct nvkm_devinit_impl) {
- .base.handle = NV_SUBDEV(DEVINIT, 0x84),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_devinit_ctor,
- .dtor = _nvkm_devinit_dtor,
- .init = nv50_devinit_init,
- .fini = _nvkm_devinit_fini,
- },
+static const struct nvkm_devinit_func
+g84_devinit = {
+ .preinit = nv50_devinit_preinit,
+ .init = nv50_devinit_init,
+ .post = nv04_devinit_post,
.pll_set = nv50_devinit_pll_set,
.disable = g84_devinit_disable,
- .post = nvbios_init,
-}.base;
+};
+
+int
+g84_devinit_new(struct nvkm_device *device, int index,
+ struct nvkm_devinit **pinit)
+{
+ return nv50_devinit_new_(&g84_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
index d29bacee65ee..a9d45844df5a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
@@ -27,39 +27,41 @@
#include <subdev/bios/init.h>
static u64
-g98_devinit_disable(struct nvkm_devinit *devinit)
+g98_devinit_disable(struct nvkm_devinit *init)
{
- struct nv50_devinit_priv *priv = (void *)devinit;
- u32 r001540 = nv_rd32(priv, 0x001540);
- u32 r00154c = nv_rd32(priv, 0x00154c);
+ struct nvkm_device *device = init->subdev.device;
+ u32 r001540 = nvkm_rd32(device, 0x001540);
+ u32 r00154c = nvkm_rd32(device, 0x00154c);
u64 disable = 0ULL;
if (!(r001540 & 0x40000000)) {
- disable |= (1ULL << NVDEV_ENGINE_MSPDEC);
- disable |= (1ULL << NVDEV_ENGINE_MSVLD);
- disable |= (1ULL << NVDEV_ENGINE_MSPPP);
+ disable |= (1ULL << NVKM_ENGINE_MSPDEC);
+ disable |= (1ULL << NVKM_ENGINE_MSVLD);
+ disable |= (1ULL << NVKM_ENGINE_MSPPP);
}
if (!(r00154c & 0x00000004))
- disable |= (1ULL << NVDEV_ENGINE_DISP);
+ disable |= (1ULL << NVKM_ENGINE_DISP);
if (!(r00154c & 0x00000020))
- disable |= (1ULL << NVDEV_ENGINE_MSVLD);
+ disable |= (1ULL << NVKM_ENGINE_MSVLD);
if (!(r00154c & 0x00000040))
- disable |= (1ULL << NVDEV_ENGINE_SEC);
+ disable |= (1ULL << NVKM_ENGINE_SEC);
return disable;
}
-struct nvkm_oclass *
-g98_devinit_oclass = &(struct nvkm_devinit_impl) {
- .base.handle = NV_SUBDEV(DEVINIT, 0x98),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_devinit_ctor,
- .dtor = _nvkm_devinit_dtor,
- .init = nv50_devinit_init,
- .fini = _nvkm_devinit_fini,
- },
+static const struct nvkm_devinit_func
+g98_devinit = {
+ .preinit = nv50_devinit_preinit,
+ .init = nv50_devinit_init,
+ .post = nv04_devinit_post,
.pll_set = nv50_devinit_pll_set,
.disable = g98_devinit_disable,
- .post = nvbios_init,
-}.base;
+};
+
+int
+g98_devinit_new(struct nvkm_device *device, int index,
+ struct nvkm_devinit **pinit)
+{
+ return nv50_devinit_new_(&g98_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
index c61102f70805..22b0140e28c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
@@ -29,19 +29,19 @@
#include <subdev/clk/pll.h>
int
-gf100_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
+gf100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
{
- struct nv50_devinit_priv *priv = (void *)devinit;
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_subdev *subdev = &init->subdev;
+ struct nvkm_device *device = subdev->device;
struct nvbios_pll info;
int N, fN, M, P;
int ret;
- ret = nvbios_pll_parse(bios, type, &info);
+ ret = nvbios_pll_parse(device->bios, type, &info);
if (ret)
return ret;
- ret = gt215_pll_calc(nv_subdev(devinit), &info, freq, &N, &fN, &M, &P);
+ ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
if (ret < 0)
return ret;
@@ -50,12 +50,12 @@ gf100_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
case PLL_VPLL1:
case PLL_VPLL2:
case PLL_VPLL3:
- nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100);
- nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M);
- nv_wr32(priv, info.reg + 0x10, fN << 16);
+ nvkm_mask(device, info.reg + 0x0c, 0x00000000, 0x00000100);
+ nvkm_wr32(device, info.reg + 0x04, (P << 16) | (N << 8) | M);
+ nvkm_wr32(device, info.reg + 0x10, fN << 16);
break;
default:
- nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq);
+ nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
ret = -EINVAL;
break;
}
@@ -64,64 +64,44 @@ gf100_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
}
static u64
-gf100_devinit_disable(struct nvkm_devinit *devinit)
+gf100_devinit_disable(struct nvkm_devinit *init)
{
- struct nv50_devinit_priv *priv = (void *)devinit;
- u32 r022500 = nv_rd32(priv, 0x022500);
+ struct nvkm_device *device = init->subdev.device;
+ u32 r022500 = nvkm_rd32(device, 0x022500);
u64 disable = 0ULL;
if (r022500 & 0x00000001)
- disable |= (1ULL << NVDEV_ENGINE_DISP);
+ disable |= (1ULL << NVKM_ENGINE_DISP);
if (r022500 & 0x00000002) {
- disable |= (1ULL << NVDEV_ENGINE_MSPDEC);
- disable |= (1ULL << NVDEV_ENGINE_MSPPP);
+ disable |= (1ULL << NVKM_ENGINE_MSPDEC);
+ disable |= (1ULL << NVKM_ENGINE_MSPPP);
}
if (r022500 & 0x00000004)
- disable |= (1ULL << NVDEV_ENGINE_MSVLD);
+ disable |= (1ULL << NVKM_ENGINE_MSVLD);
if (r022500 & 0x00000008)
- disable |= (1ULL << NVDEV_ENGINE_MSENC);
+ disable |= (1ULL << NVKM_ENGINE_MSENC);
if (r022500 & 0x00000100)
- disable |= (1ULL << NVDEV_ENGINE_CE0);
+ disable |= (1ULL << NVKM_ENGINE_CE0);
if (r022500 & 0x00000200)
- disable |= (1ULL << NVDEV_ENGINE_CE1);
+ disable |= (1ULL << NVKM_ENGINE_CE1);
return disable;
}
+static const struct nvkm_devinit_func
+gf100_devinit = {
+ .preinit = nv50_devinit_preinit,
+ .init = nv50_devinit_init,
+ .post = nv04_devinit_post,
+ .pll_set = gf100_devinit_pll_set,
+ .disable = gf100_devinit_disable,
+};
+
int
-gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+gf100_devinit_new(struct nvkm_device *device, int index,
+ struct nvkm_devinit **pinit)
{
- struct nvkm_devinit_impl *impl = (void *)oclass;
- struct nv50_devinit_priv *priv;
- u64 disable;
- int ret;
-
- ret = nvkm_devinit_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- disable = impl->disable(&priv->base);
- if (disable & (1ULL << NVDEV_ENGINE_DISP))
- priv->base.post = true;
-
- return 0;
+ return nv50_devinit_new_(&gf100_devinit, device, index, pinit);
}
-
-struct nvkm_oclass *
-gf100_devinit_oclass = &(struct nvkm_devinit_impl) {
- .base.handle = NV_SUBDEV(DEVINIT, 0xc0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_devinit_ctor,
- .dtor = _nvkm_devinit_dtor,
- .init = nv50_devinit_init,
- .fini = _nvkm_devinit_fini,
- },
- .pll_set = gf100_devinit_pll_set,
- .disable = gf100_devinit_disable,
- .post = nvbios_init,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
index 87ca0ece37b4..2be98bd78214 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
@@ -27,33 +27,35 @@
#include <subdev/bios/init.h>
u64
-gm107_devinit_disable(struct nvkm_devinit *devinit)
+gm107_devinit_disable(struct nvkm_devinit *init)
{
- struct nv50_devinit_priv *priv = (void *)devinit;
- u32 r021c00 = nv_rd32(priv, 0x021c00);
- u32 r021c04 = nv_rd32(priv, 0x021c04);
+ struct nvkm_device *device = init->subdev.device;
+ u32 r021c00 = nvkm_rd32(device, 0x021c00);
+ u32 r021c04 = nvkm_rd32(device, 0x021c04);
u64 disable = 0ULL;
if (r021c00 & 0x00000001)
- disable |= (1ULL << NVDEV_ENGINE_CE0);
+ disable |= (1ULL << NVKM_ENGINE_CE0);
if (r021c00 & 0x00000004)
- disable |= (1ULL << NVDEV_ENGINE_CE2);
+ disable |= (1ULL << NVKM_ENGINE_CE2);
if (r021c04 & 0x00000001)
- disable |= (1ULL << NVDEV_ENGINE_DISP);
+ disable |= (1ULL << NVKM_ENGINE_DISP);
return disable;
}
-struct nvkm_oclass *
-gm107_devinit_oclass = &(struct nvkm_devinit_impl) {
- .base.handle = NV_SUBDEV(DEVINIT, 0x07),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_devinit_ctor,
- .dtor = _nvkm_devinit_dtor,
- .init = nv50_devinit_init,
- .fini = _nvkm_devinit_fini,
- },
+static const struct nvkm_devinit_func
+gm107_devinit = {
+ .preinit = nv50_devinit_preinit,
+ .init = nv50_devinit_init,
+ .post = nv04_devinit_post,
.pll_set = gf100_devinit_pll_set,
.disable = gm107_devinit_disable,
- .post = nvbios_init,
-}.base;
+};
+
+int
+gm107_devinit_new(struct nvkm_device *device, int index,
+ struct nvkm_devinit **pinit)
+{
+ return nv50_devinit_new_(&gm107_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
index 1076fcf0d716..2b9c3f11b7a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
@@ -28,69 +28,74 @@
#include <subdev/bios/pmu.h>
static void
-pmu_code(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len, bool sec)
+pmu_code(struct nv50_devinit *init, u32 pmu, u32 img, u32 len, bool sec)
{
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_device *device = init->base.subdev.device;
+ struct nvkm_bios *bios = device->bios;
int i;
- nv_wr32(priv, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu);
+ nvkm_wr32(device, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu);
for (i = 0; i < len; i += 4) {
if ((i & 0xff) == 0)
- nv_wr32(priv, 0x10a188, (pmu + i) >> 8);
- nv_wr32(priv, 0x10a184, nv_ro32(bios, img + i));
+ nvkm_wr32(device, 0x10a188, (pmu + i) >> 8);
+ nvkm_wr32(device, 0x10a184, nvbios_rd32(bios, img + i));
}
while (i & 0xff) {
- nv_wr32(priv, 0x10a184, 0x00000000);
+ nvkm_wr32(device, 0x10a184, 0x00000000);
i += 4;
}
}
static void
-pmu_data(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len)
+pmu_data(struct nv50_devinit *init, u32 pmu, u32 img, u32 len)
{
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_device *device = init->base.subdev.device;
+ struct nvkm_bios *bios = device->bios;
int i;
- nv_wr32(priv, 0x10a1c0, 0x01000000 | pmu);
+ nvkm_wr32(device, 0x10a1c0, 0x01000000 | pmu);
for (i = 0; i < len; i += 4)
- nv_wr32(priv, 0x10a1c4, nv_ro32(bios, img + i));
+ nvkm_wr32(device, 0x10a1c4, nvbios_rd32(bios, img + i));
}
static u32
-pmu_args(struct nv50_devinit_priv *priv, u32 argp, u32 argi)
+pmu_args(struct nv50_devinit *init, u32 argp, u32 argi)
{
- nv_wr32(priv, 0x10a1c0, argp);
- nv_wr32(priv, 0x10a1c0, nv_rd32(priv, 0x10a1c4) + argi);
- return nv_rd32(priv, 0x10a1c4);
+ struct nvkm_device *device = init->base.subdev.device;
+ nvkm_wr32(device, 0x10a1c0, argp);
+ nvkm_wr32(device, 0x10a1c0, nvkm_rd32(device, 0x10a1c4) + argi);
+ return nvkm_rd32(device, 0x10a1c4);
}
static void
-pmu_exec(struct nv50_devinit_priv *priv, u32 init_addr)
+pmu_exec(struct nv50_devinit *init, u32 init_addr)
{
- nv_wr32(priv, 0x10a104, init_addr);
- nv_wr32(priv, 0x10a10c, 0x00000000);
- nv_wr32(priv, 0x10a100, 0x00000002);
+ struct nvkm_device *device = init->base.subdev.device;
+ nvkm_wr32(device, 0x10a104, init_addr);
+ nvkm_wr32(device, 0x10a10c, 0x00000000);
+ nvkm_wr32(device, 0x10a100, 0x00000002);
}
static int
-pmu_load(struct nv50_devinit_priv *priv, u8 type, bool post,
+pmu_load(struct nv50_devinit *init, u8 type, bool post,
u32 *init_addr_pmu, u32 *args_addr_pmu)
{
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_subdev *subdev = &init->base.subdev;
+ struct nvkm_bios *bios = subdev->device->bios;
struct nvbios_pmuR pmu;
if (!nvbios_pmuRm(bios, type, &pmu)) {
- nv_error(priv, "VBIOS PMU fuc %02x not found\n", type);
+ nvkm_error(subdev, "VBIOS PMU fuc %02x not found\n", type);
return -EINVAL;
}
if (!post)
return 0;
- pmu_code(priv, pmu.boot_addr_pmu, pmu.boot_addr, pmu.boot_size, false);
- pmu_code(priv, pmu.code_addr_pmu, pmu.code_addr, pmu.code_size, true);
- pmu_data(priv, pmu.data_addr_pmu, pmu.data_addr, pmu.data_size);
+ pmu_code(init, pmu.boot_addr_pmu, pmu.boot_addr, pmu.boot_size, false);
+ pmu_code(init, pmu.code_addr_pmu, pmu.code_addr, pmu.code_size, true);
+ pmu_data(init, pmu.data_addr_pmu, pmu.data_addr, pmu.data_size);
if (init_addr_pmu) {
*init_addr_pmu = pmu.init_addr_pmu;
@@ -98,75 +103,79 @@ pmu_load(struct nv50_devinit_priv *priv, u8 type, bool post,
return 0;
}
- return pmu_exec(priv, pmu.init_addr_pmu), 0;
+ return pmu_exec(init, pmu.init_addr_pmu), 0;
}
static int
-gm204_devinit_post(struct nvkm_subdev *subdev, bool post)
+gm204_devinit_post(struct nvkm_devinit *base, bool post)
{
- struct nv50_devinit_priv *priv = (void *)nvkm_devinit(subdev);
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nv50_devinit *init = nv50_devinit(base);
+ struct nvkm_subdev *subdev = &init->base.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_bios *bios = device->bios;
struct bit_entry bit_I;
- u32 init, args;
+ u32 exec, args;
int ret;
if (bit_entry(bios, 'I', &bit_I) || bit_I.version != 1 ||
bit_I.length < 0x1c) {
- nv_error(priv, "VBIOS PMU init data not found\n");
+ nvkm_error(subdev, "VBIOS PMU init data not found\n");
return -EINVAL;
}
/* reset PMU and load init table parser ucode */
if (post) {
- nv_mask(priv, 0x000200, 0x00002000, 0x00000000);
- nv_mask(priv, 0x000200, 0x00002000, 0x00002000);
- nv_rd32(priv, 0x000200);
- while (nv_rd32(priv, 0x10a10c) & 0x00000006) {
+ nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
+ nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
+ nvkm_rd32(device, 0x000200);
+ while (nvkm_rd32(device, 0x10a10c) & 0x00000006) {
}
}
- ret = pmu_load(priv, 0x04, post, &init, &args);
+ ret = pmu_load(init, 0x04, post, &exec, &args);
if (ret)
return ret;
/* upload first chunk of init data */
if (post) {
- u32 pmu = pmu_args(priv, args + 0x08, 0x08);
- u32 img = nv_ro16(bios, bit_I.offset + 0x14);
- u32 len = nv_ro16(bios, bit_I.offset + 0x16);
- pmu_data(priv, pmu, img, len);
+ u32 pmu = pmu_args(init, args + 0x08, 0x08);
+ u32 img = nvbios_rd16(bios, bit_I.offset + 0x14);
+ u32 len = nvbios_rd16(bios, bit_I.offset + 0x16);
+ pmu_data(init, pmu, img, len);
}
/* upload second chunk of init data */
if (post) {
- u32 pmu = pmu_args(priv, args + 0x08, 0x10);
- u32 img = nv_ro16(bios, bit_I.offset + 0x18);
- u32 len = nv_ro16(bios, bit_I.offset + 0x1a);
- pmu_data(priv, pmu, img, len);
+ u32 pmu = pmu_args(init, args + 0x08, 0x10);
+ u32 img = nvbios_rd16(bios, bit_I.offset + 0x18);
+ u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a);
+ pmu_data(init, pmu, img, len);
}
/* execute init tables */
if (post) {
- nv_wr32(priv, 0x10a040, 0x00005000);
- pmu_exec(priv, init);
- while (!(nv_rd32(priv, 0x10a040) & 0x00002000)) {
+ nvkm_wr32(device, 0x10a040, 0x00005000);
+ pmu_exec(init, exec);
+ while (!(nvkm_rd32(device, 0x10a040) & 0x00002000)) {
}
}
/* load and execute some other ucode image (bios therm?) */
- return pmu_load(priv, 0x01, post, NULL, NULL);
+ return pmu_load(init, 0x01, post, NULL, NULL);
}
-struct nvkm_oclass *
-gm204_devinit_oclass = &(struct nvkm_devinit_impl) {
- .base.handle = NV_SUBDEV(DEVINIT, 0x07),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_devinit_ctor,
- .dtor = _nvkm_devinit_dtor,
- .init = nv50_devinit_init,
- .fini = _nvkm_devinit_fini,
- },
+static const struct nvkm_devinit_func
+gm204_devinit = {
+ .preinit = nv50_devinit_preinit,
+ .init = nv50_devinit_init,
+ .post = gm204_devinit_post,
.pll_set = gf100_devinit_pll_set,
.disable = gm107_devinit_disable,
- .post = gm204_devinit_post,
-}.base;
+};
+
+int
+gm204_devinit_new(struct nvkm_device *device, int index,
+ struct nvkm_devinit **pinit)
+{
+ return nv50_devinit_new_(&gm204_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
index 6a3e8d4efed7..9a8522fa9c65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
@@ -29,32 +29,32 @@
#include <subdev/clk/pll.h>
int
-gt215_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
+gt215_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
{
- struct nv50_devinit_priv *priv = (void *)devinit;
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_subdev *subdev = &init->subdev;
+ struct nvkm_device *device = subdev->device;
struct nvbios_pll info;
int N, fN, M, P;
int ret;
- ret = nvbios_pll_parse(bios, type, &info);
+ ret = nvbios_pll_parse(device->bios, type, &info);
if (ret)
return ret;
- ret = gt215_pll_calc(nv_subdev(devinit), &info, freq, &N, &fN, &M, &P);
+ ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
if (ret < 0)
return ret;
switch (info.type) {
case PLL_VPLL0:
case PLL_VPLL1:
- nv_wr32(priv, info.reg + 0, 0x50000610);
- nv_mask(priv, info.reg + 4, 0x003fffff,
- (P << 16) | (M << 8) | N);
- nv_wr32(priv, info.reg + 8, fN);
+ nvkm_wr32(device, info.reg + 0, 0x50000610);
+ nvkm_mask(device, info.reg + 4, 0x003fffff,
+ (P << 16) | (M << 8) | N);
+ nvkm_wr32(device, info.reg + 8, fN);
break;
default:
- nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq);
+ nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
ret = -EINVAL;
break;
}
@@ -63,24 +63,24 @@ gt215_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
}
static u64
-gt215_devinit_disable(struct nvkm_devinit *devinit)
+gt215_devinit_disable(struct nvkm_devinit *init)
{
- struct nv50_devinit_priv *priv = (void *)devinit;
- u32 r001540 = nv_rd32(priv, 0x001540);
- u32 r00154c = nv_rd32(priv, 0x00154c);
+ struct nvkm_device *device = init->subdev.device;
+ u32 r001540 = nvkm_rd32(device, 0x001540);
+ u32 r00154c = nvkm_rd32(device, 0x00154c);
u64 disable = 0ULL;
if (!(r001540 & 0x40000000)) {
- disable |= (1ULL << NVDEV_ENGINE_MSPDEC);
- disable |= (1ULL << NVDEV_ENGINE_MSPPP);
+ disable |= (1ULL << NVKM_ENGINE_MSPDEC);
+ disable |= (1ULL << NVKM_ENGINE_MSPPP);
}
if (!(r00154c & 0x00000004))
- disable |= (1ULL << NVDEV_ENGINE_DISP);
+ disable |= (1ULL << NVKM_ENGINE_DISP);
if (!(r00154c & 0x00000020))
- disable |= (1ULL << NVDEV_ENGINE_MSVLD);
+ disable |= (1ULL << NVKM_ENGINE_MSVLD);
if (!(r00154c & 0x00000200))
- disable |= (1ULL << NVDEV_ENGINE_CE0);
+ disable |= (1ULL << NVKM_ENGINE_CE0);
return disable;
}
@@ -99,9 +99,10 @@ gt215_devinit_mmio_part[] = {
};
static u32
-gt215_devinit_mmio(struct nvkm_devinit *devinit, u32 addr)
+gt215_devinit_mmio(struct nvkm_devinit *base, u32 addr)
{
- struct nv50_devinit_priv *priv = (void *)devinit;
+ struct nv50_devinit *init = nv50_devinit(base);
+ struct nvkm_device *device = init->base.subdev.device;
u32 *mmio = gt215_devinit_mmio_part;
/* the init tables on some boards have INIT_RAM_RESTRICT_ZM_REG_GROUP
@@ -113,7 +114,7 @@ gt215_devinit_mmio(struct nvkm_devinit *devinit, u32 addr)
*
* the binary driver avoids touching these registers at all, however,
* the video bios doesn't care and does what the scripts say. it's
- * presumed that the io-port access to priv registers isn't effected
+ * presumed that the io-port access to init registers isn't effected
* by the screw-up bug mentioned above.
*
* really, a new opcode should've been invented to handle these
@@ -122,9 +123,9 @@ gt215_devinit_mmio(struct nvkm_devinit *devinit, u32 addr)
while (mmio[0]) {
if (addr >= mmio[0] && addr <= mmio[1]) {
u32 part = (addr / mmio[2]) & 7;
- if (!priv->r001540)
- priv->r001540 = nv_rd32(priv, 0x001540);
- if (part >= hweight8((priv->r001540 >> 16) & 0xff))
+ if (!init->r001540)
+ init->r001540 = nvkm_rd32(device, 0x001540);
+ if (part >= hweight8((init->r001540 >> 16) & 0xff))
return ~0;
return addr;
}
@@ -134,17 +135,19 @@ gt215_devinit_mmio(struct nvkm_devinit *devinit, u32 addr)
return addr;
}
-struct nvkm_oclass *
-gt215_devinit_oclass = &(struct nvkm_devinit_impl) {
- .base.handle = NV_SUBDEV(DEVINIT, 0xa3),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_devinit_ctor,
- .dtor = _nvkm_devinit_dtor,
- .init = nv50_devinit_init,
- .fini = _nvkm_devinit_fini,
- },
+static const struct nvkm_devinit_func
+gt215_devinit = {
+ .preinit = nv50_devinit_preinit,
+ .init = nv50_devinit_init,
+ .post = nv04_devinit_post,
+ .mmio = gt215_devinit_mmio,
.pll_set = gt215_devinit_pll_set,
.disable = gt215_devinit_disable,
- .mmio = gt215_devinit_mmio,
- .post = nvbios_init,
-}.base;
+};
+
+int
+gt215_devinit_new(struct nvkm_device *device, int index,
+ struct nvkm_devinit **pinit)
+{
+ return nv50_devinit_new_(&gt215_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
index 55cf48bbca1c..ce4f718e98a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
@@ -27,40 +27,42 @@
#include <subdev/bios/init.h>
static u64
-mcp89_devinit_disable(struct nvkm_devinit *devinit)
+mcp89_devinit_disable(struct nvkm_devinit *init)
{
- struct nv50_devinit_priv *priv = (void *)devinit;
- u32 r001540 = nv_rd32(priv, 0x001540);
- u32 r00154c = nv_rd32(priv, 0x00154c);
+ struct nvkm_device *device = init->subdev.device;
+ u32 r001540 = nvkm_rd32(device, 0x001540);
+ u32 r00154c = nvkm_rd32(device, 0x00154c);
u64 disable = 0;
if (!(r001540 & 0x40000000)) {
- disable |= (1ULL << NVDEV_ENGINE_MSPDEC);
- disable |= (1ULL << NVDEV_ENGINE_MSPPP);
+ disable |= (1ULL << NVKM_ENGINE_MSPDEC);
+ disable |= (1ULL << NVKM_ENGINE_MSPPP);
}
if (!(r00154c & 0x00000004))
- disable |= (1ULL << NVDEV_ENGINE_DISP);
+ disable |= (1ULL << NVKM_ENGINE_DISP);
if (!(r00154c & 0x00000020))
- disable |= (1ULL << NVDEV_ENGINE_MSVLD);
+ disable |= (1ULL << NVKM_ENGINE_MSVLD);
if (!(r00154c & 0x00000040))
- disable |= (1ULL << NVDEV_ENGINE_VIC);
+ disable |= (1ULL << NVKM_ENGINE_VIC);
if (!(r00154c & 0x00000200))
- disable |= (1ULL << NVDEV_ENGINE_CE0);
+ disable |= (1ULL << NVKM_ENGINE_CE0);
return disable;
}
-struct nvkm_oclass *
-mcp89_devinit_oclass = &(struct nvkm_devinit_impl) {
- .base.handle = NV_SUBDEV(DEVINIT, 0xaf),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_devinit_ctor,
- .dtor = _nvkm_devinit_dtor,
- .init = nv50_devinit_init,
- .fini = _nvkm_devinit_fini,
- },
+static const struct nvkm_devinit_func
+mcp89_devinit = {
+ .preinit = nv50_devinit_preinit,
+ .init = nv50_devinit_init,
+ .post = nv04_devinit_post,
.pll_set = gt215_devinit_pll_set,
.disable = mcp89_devinit_disable,
- .post = nvbios_init,
-}.base;
+};
+
+int
+mcp89_devinit_new(struct nvkm_device *device, int index,
+ struct nvkm_devinit **pinit)
+{
+ return nv50_devinit_new_(&mcp89_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
index 03a0da834244..c8d455346fcd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
@@ -33,25 +33,26 @@
#include <subdev/vga.h>
static void
-nv04_devinit_meminit(struct nvkm_devinit *devinit)
+nv04_devinit_meminit(struct nvkm_devinit *init)
{
- struct nv04_devinit_priv *priv = (void *)devinit;
+ struct nvkm_subdev *subdev = &init->subdev;
+ struct nvkm_device *device = subdev->device;
u32 patt = 0xdeadbeef;
struct io_mapping *fb;
int i;
/* Map the framebuffer aperture */
- fb = fbmem_init(nv_device(priv));
+ fb = fbmem_init(device);
if (!fb) {
- nv_error(priv, "failed to map fb\n");
+ nvkm_error(subdev, "failed to map fb\n");
return;
}
/* Sequencer and refresh off */
- nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) | 0x20);
- nv_mask(priv, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF);
+ nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) | 0x20);
+ nvkm_mask(device, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF);
- nv_mask(priv, NV04_PFB_BOOT_0, ~0,
+ nvkm_mask(device, NV04_PFB_BOOT_0, ~0,
NV04_PFB_BOOT_0_RAM_AMOUNT_16MB |
NV04_PFB_BOOT_0_RAM_WIDTH_128 |
NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT);
@@ -62,49 +63,49 @@ nv04_devinit_meminit(struct nvkm_devinit *devinit)
fbmem_poke(fb, 0x400000, patt + 1);
if (fbmem_peek(fb, 0) == patt + 1) {
- nv_mask(priv, NV04_PFB_BOOT_0,
+ nvkm_mask(device, NV04_PFB_BOOT_0,
NV04_PFB_BOOT_0_RAM_TYPE,
NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT);
- nv_mask(priv, NV04_PFB_DEBUG_0,
+ nvkm_mask(device, NV04_PFB_DEBUG_0,
NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
for (i = 0; i < 4; i++)
fbmem_poke(fb, 4 * i, patt);
if ((fbmem_peek(fb, 0xc) & 0xffff) != (patt & 0xffff))
- nv_mask(priv, NV04_PFB_BOOT_0,
+ nvkm_mask(device, NV04_PFB_BOOT_0,
NV04_PFB_BOOT_0_RAM_WIDTH_128 |
NV04_PFB_BOOT_0_RAM_AMOUNT,
NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
} else
if ((fbmem_peek(fb, 0xc) & 0xffff0000) != (patt & 0xffff0000)) {
- nv_mask(priv, NV04_PFB_BOOT_0,
+ nvkm_mask(device, NV04_PFB_BOOT_0,
NV04_PFB_BOOT_0_RAM_WIDTH_128 |
NV04_PFB_BOOT_0_RAM_AMOUNT,
NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
} else
if (fbmem_peek(fb, 0) != patt) {
if (fbmem_readback(fb, 0x800000, patt))
- nv_mask(priv, NV04_PFB_BOOT_0,
+ nvkm_mask(device, NV04_PFB_BOOT_0,
NV04_PFB_BOOT_0_RAM_AMOUNT,
NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
else
- nv_mask(priv, NV04_PFB_BOOT_0,
+ nvkm_mask(device, NV04_PFB_BOOT_0,
NV04_PFB_BOOT_0_RAM_AMOUNT,
NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
- nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
+ nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT);
} else
if (!fbmem_readback(fb, 0x800000, patt)) {
- nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+ nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
}
/* Refresh on, sequencer on */
- nv_mask(priv, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
- nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) & ~0x20);
+ nvkm_mask(device, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
+ nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) & ~0x20);
fbmem_fini(fb);
}
@@ -139,11 +140,12 @@ powerctrl_1_shift(int chip_version, int reg)
}
void
-setPLL_single(struct nvkm_devinit *devinit, u32 reg,
+setPLL_single(struct nvkm_devinit *init, u32 reg,
struct nvkm_pll_vals *pv)
{
- int chip_version = nvkm_bios(devinit)->version.chip;
- uint32_t oldpll = nv_rd32(devinit, reg);
+ struct nvkm_device *device = init->subdev.device;
+ int chip_version = device->bios->version.chip;
+ uint32_t oldpll = nvkm_rd32(device, reg);
int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
uint32_t saved_powerctrl_1 = 0;
@@ -153,30 +155,30 @@ setPLL_single(struct nvkm_devinit *devinit, u32 reg,
return; /* already set */
if (shift_powerctrl_1 >= 0) {
- saved_powerctrl_1 = nv_rd32(devinit, 0x001584);
- nv_wr32(devinit, 0x001584,
+ saved_powerctrl_1 = nvkm_rd32(device, 0x001584);
+ nvkm_wr32(device, 0x001584,
(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
1 << shift_powerctrl_1);
}
if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
/* upclock -- write new post divider first */
- nv_wr32(devinit, reg, pv->log2P << 16 | (oldpll & 0xffff));
+ nvkm_wr32(device, reg, pv->log2P << 16 | (oldpll & 0xffff));
else
/* downclock -- write new NM first */
- nv_wr32(devinit, reg, (oldpll & 0xffff0000) | pv->NM1);
+ nvkm_wr32(device, reg, (oldpll & 0xffff0000) | pv->NM1);
if ((chip_version < 0x17 || chip_version == 0x1a) &&
chip_version != 0x11)
/* wait a bit on older chips */
msleep(64);
- nv_rd32(devinit, reg);
+ nvkm_rd32(device, reg);
/* then write the other half as well */
- nv_wr32(devinit, reg, pll);
+ nvkm_wr32(device, reg, pll);
if (shift_powerctrl_1 >= 0)
- nv_wr32(devinit, 0x001584, saved_powerctrl_1);
+ nvkm_wr32(device, 0x001584, saved_powerctrl_1);
}
static uint32_t
@@ -193,14 +195,15 @@ new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
}
void
-setPLL_double_highregs(struct nvkm_devinit *devinit, u32 reg1,
+setPLL_double_highregs(struct nvkm_devinit *init, u32 reg1,
struct nvkm_pll_vals *pv)
{
- int chip_version = nvkm_bios(devinit)->version.chip;
+ struct nvkm_device *device = init->subdev.device;
+ int chip_version = device->bios->version.chip;
bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
uint32_t reg2 = reg1 + ((reg1 == 0x680520) ? 0x5c : 0x70);
- uint32_t oldpll1 = nv_rd32(devinit, reg1);
- uint32_t oldpll2 = !nv3035 ? nv_rd32(devinit, reg2) : 0;
+ uint32_t oldpll1 = nvkm_rd32(device, reg1);
+ uint32_t oldpll2 = !nv3035 ? nvkm_rd32(device, reg2) : 0;
uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
uint32_t oldramdac580 = 0, ramdac580 = 0;
@@ -215,7 +218,7 @@ setPLL_double_highregs(struct nvkm_devinit *devinit, u32 reg1,
pll2 = 0;
}
if (chip_version > 0x40 && reg1 >= 0x680508) { /* !nv40 */
- oldramdac580 = nv_rd32(devinit, 0x680580);
+ oldramdac580 = nvkm_rd32(device, 0x680580);
ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
if (oldramdac580 != ramdac580)
oldpll1 = ~0; /* force mismatch */
@@ -231,8 +234,8 @@ setPLL_double_highregs(struct nvkm_devinit *devinit, u32 reg1,
return; /* already set */
if (shift_powerctrl_1 >= 0) {
- saved_powerctrl_1 = nv_rd32(devinit, 0x001584);
- nv_wr32(devinit, 0x001584,
+ saved_powerctrl_1 = nvkm_rd32(device, 0x001584);
+ nvkm_wr32(device, 0x001584,
(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
1 << shift_powerctrl_1);
}
@@ -251,26 +254,26 @@ setPLL_double_highregs(struct nvkm_devinit *devinit, u32 reg1,
shift_c040 += 2;
}
- savedc040 = nv_rd32(devinit, 0xc040);
+ savedc040 = nvkm_rd32(device, 0xc040);
if (shift_c040 != 14)
- nv_wr32(devinit, 0xc040, savedc040 & ~(3 << shift_c040));
+ nvkm_wr32(device, 0xc040, savedc040 & ~(3 << shift_c040));
}
if (oldramdac580 != ramdac580)
- nv_wr32(devinit, 0x680580, ramdac580);
+ nvkm_wr32(device, 0x680580, ramdac580);
if (!nv3035)
- nv_wr32(devinit, reg2, pll2);
- nv_wr32(devinit, reg1, pll1);
+ nvkm_wr32(device, reg2, pll2);
+ nvkm_wr32(device, reg1, pll1);
if (shift_powerctrl_1 >= 0)
- nv_wr32(devinit, 0x001584, saved_powerctrl_1);
+ nvkm_wr32(device, 0x001584, saved_powerctrl_1);
if (chip_version >= 0x40)
- nv_wr32(devinit, 0xc040, savedc040);
+ nvkm_wr32(device, 0xc040, savedc040);
}
void
-setPLL_double_lowregs(struct nvkm_devinit *devinit, u32 NMNMreg,
+setPLL_double_lowregs(struct nvkm_devinit *init, u32 NMNMreg,
struct nvkm_pll_vals *pv)
{
/* When setting PLLs, there is a merry game of disabling and enabling
@@ -280,10 +283,10 @@ setPLL_double_lowregs(struct nvkm_devinit *devinit, u32 NMNMreg,
* combined herein. Without luck it deviates from each card's formula
* so as to not work on any :)
*/
-
+ struct nvkm_device *device = init->subdev.device;
uint32_t Preg = NMNMreg - 4;
bool mpll = Preg == 0x4020;
- uint32_t oldPval = nv_rd32(devinit, Preg);
+ uint32_t oldPval = nvkm_rd32(device, Preg);
uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) |
0xc << 28 | pv->log2P << 16;
@@ -292,7 +295,7 @@ setPLL_double_lowregs(struct nvkm_devinit *devinit, u32 NMNMreg,
uint32_t maskc040 = ~(3 << 14), savedc040;
bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
- if (nv_rd32(devinit, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
+ if (nvkm_rd32(device, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
return;
if (Preg == 0x4000)
@@ -304,7 +307,7 @@ setPLL_double_lowregs(struct nvkm_devinit *devinit, u32 NMNMreg,
struct nvbios_pll info;
uint8_t Pval2;
- if (nvbios_pll_parse(nvkm_bios(devinit), Preg, &info))
+ if (nvbios_pll_parse(device->bios, Preg, &info))
return;
Pval2 = pv->log2P + info.bias_p;
@@ -312,47 +315,48 @@ setPLL_double_lowregs(struct nvkm_devinit *devinit, u32 NMNMreg,
Pval2 = info.max_p;
Pval |= 1 << 28 | Pval2 << 20;
- saved4600 = nv_rd32(devinit, 0x4600);
- nv_wr32(devinit, 0x4600, saved4600 | 8 << 28);
+ saved4600 = nvkm_rd32(device, 0x4600);
+ nvkm_wr32(device, 0x4600, saved4600 | 8 << 28);
}
if (single_stage)
Pval |= mpll ? 1 << 12 : 1 << 8;
- nv_wr32(devinit, Preg, oldPval | 1 << 28);
- nv_wr32(devinit, Preg, Pval & ~(4 << 28));
+ nvkm_wr32(device, Preg, oldPval | 1 << 28);
+ nvkm_wr32(device, Preg, Pval & ~(4 << 28));
if (mpll) {
Pval |= 8 << 20;
- nv_wr32(devinit, 0x4020, Pval & ~(0xc << 28));
- nv_wr32(devinit, 0x4038, Pval & ~(0xc << 28));
+ nvkm_wr32(device, 0x4020, Pval & ~(0xc << 28));
+ nvkm_wr32(device, 0x4038, Pval & ~(0xc << 28));
}
- savedc040 = nv_rd32(devinit, 0xc040);
- nv_wr32(devinit, 0xc040, savedc040 & maskc040);
+ savedc040 = nvkm_rd32(device, 0xc040);
+ nvkm_wr32(device, 0xc040, savedc040 & maskc040);
- nv_wr32(devinit, NMNMreg, NMNM);
+ nvkm_wr32(device, NMNMreg, NMNM);
if (NMNMreg == 0x4024)
- nv_wr32(devinit, 0x403c, NMNM);
+ nvkm_wr32(device, 0x403c, NMNM);
- nv_wr32(devinit, Preg, Pval);
+ nvkm_wr32(device, Preg, Pval);
if (mpll) {
Pval &= ~(8 << 20);
- nv_wr32(devinit, 0x4020, Pval);
- nv_wr32(devinit, 0x4038, Pval);
- nv_wr32(devinit, 0x4600, saved4600);
+ nvkm_wr32(device, 0x4020, Pval);
+ nvkm_wr32(device, 0x4038, Pval);
+ nvkm_wr32(device, 0x4600, saved4600);
}
- nv_wr32(devinit, 0xc040, savedc040);
+ nvkm_wr32(device, 0xc040, savedc040);
if (mpll) {
- nv_wr32(devinit, 0x4020, Pval & ~(1 << 28));
- nv_wr32(devinit, 0x4038, Pval & ~(1 << 28));
+ nvkm_wr32(device, 0x4020, Pval & ~(1 << 28));
+ nvkm_wr32(device, 0x4038, Pval & ~(1 << 28));
}
}
int
nv04_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
{
- struct nvkm_bios *bios = nvkm_bios(devinit);
+ struct nvkm_subdev *subdev = &devinit->subdev;
+ struct nvkm_bios *bios = subdev->device->bios;
struct nvkm_pll_vals pv;
struct nvbios_pll info;
int cv = bios->version.chip;
@@ -363,8 +367,7 @@ nv04_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
if (ret)
return ret;
- ret = nv04_pll_calc(nv_subdev(devinit), &info, freq,
- &N1, &M1, &N2, &M2, &P);
+ ret = nv04_pll_calc(subdev, &info, freq, &N1, &M1, &N2, &M2, &P);
if (!ret)
return -EINVAL;
@@ -388,83 +391,76 @@ nv04_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
}
int
-nv04_devinit_fini(struct nvkm_object *object, bool suspend)
+nv04_devinit_post(struct nvkm_devinit *init, bool execute)
{
- struct nv04_devinit_priv *priv = (void *)object;
- int ret;
+ return nvbios_init(&init->subdev, execute);
+}
- /* make i2c busses accessible */
- nv_mask(priv, 0x000200, 0x00000001, 0x00000001);
+void
+nv04_devinit_preinit(struct nvkm_devinit *base)
+{
+ struct nv04_devinit *init = nv04_devinit(base);
+ struct nvkm_subdev *subdev = &init->base.subdev;
+ struct nvkm_device *device = subdev->device;
- ret = nvkm_devinit_fini(&priv->base, suspend);
- if (ret)
- return ret;
+ /* make i2c busses accessible */
+ nvkm_mask(device, 0x000200, 0x00000001, 0x00000001);
/* unslave crtcs */
- if (priv->owner < 0)
- priv->owner = nv_rdvgaowner(priv);
- nv_wrvgaowner(priv, 0);
- return 0;
-}
-
-int
-nv04_devinit_init(struct nvkm_object *object)
-{
- struct nv04_devinit_priv *priv = (void *)object;
-
- if (!priv->base.post) {
- u32 htotal = nv_rdvgac(priv, 0, 0x06);
- htotal |= (nv_rdvgac(priv, 0, 0x07) & 0x01) << 8;
- htotal |= (nv_rdvgac(priv, 0, 0x07) & 0x20) << 4;
- htotal |= (nv_rdvgac(priv, 0, 0x25) & 0x01) << 10;
- htotal |= (nv_rdvgac(priv, 0, 0x41) & 0x01) << 11;
+ if (init->owner < 0)
+ init->owner = nvkm_rdvgaowner(device);
+ nvkm_wrvgaowner(device, 0);
+
+ if (!init->base.post) {
+ u32 htotal = nvkm_rdvgac(device, 0, 0x06);
+ htotal |= (nvkm_rdvgac(device, 0, 0x07) & 0x01) << 8;
+ htotal |= (nvkm_rdvgac(device, 0, 0x07) & 0x20) << 4;
+ htotal |= (nvkm_rdvgac(device, 0, 0x25) & 0x01) << 10;
+ htotal |= (nvkm_rdvgac(device, 0, 0x41) & 0x01) << 11;
if (!htotal) {
- nv_info(priv, "adaptor not initialised\n");
- priv->base.post = true;
+ nvkm_debug(subdev, "adaptor not initialised\n");
+ init->base.post = true;
}
}
-
- return nvkm_devinit_init(&priv->base);
}
-void
-nv04_devinit_dtor(struct nvkm_object *object)
+void *
+nv04_devinit_dtor(struct nvkm_devinit *base)
{
- struct nv04_devinit_priv *priv = (void *)object;
-
+ struct nv04_devinit *init = nv04_devinit(base);
/* restore vga owner saved at first init */
- nv_wrvgaowner(priv, priv->owner);
-
- nvkm_devinit_destroy(&priv->base);
+ nvkm_wrvgaowner(init->base.subdev.device, init->owner);
+ return init;
}
int
-nv04_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv04_devinit_new_(const struct nvkm_devinit_func *func,
+ struct nvkm_device *device, int index,
+ struct nvkm_devinit **pinit)
{
- struct nv04_devinit_priv *priv;
- int ret;
+ struct nv04_devinit *init;
- ret = nvkm_devinit_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ if (!(init = kzalloc(sizeof(*init), GFP_KERNEL)))
+ return -ENOMEM;
+ *pinit = &init->base;
- priv->owner = -1;
+ nvkm_devinit_ctor(func, device, index, &init->base);
+ init->owner = -1;
return 0;
}
-struct nvkm_oclass *
-nv04_devinit_oclass = &(struct nvkm_devinit_impl) {
- .base.handle = NV_SUBDEV(DEVINIT, 0x04),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_devinit_ctor,
- .dtor = nv04_devinit_dtor,
- .init = nv04_devinit_init,
- .fini = nv04_devinit_fini,
- },
+static const struct nvkm_devinit_func
+nv04_devinit = {
+ .dtor = nv04_devinit_dtor,
+ .preinit = nv04_devinit_preinit,
+ .post = nv04_devinit_post,
.meminit = nv04_devinit_meminit,
.pll_set = nv04_devinit_pll_set,
- .post = nvbios_init,
-}.base;
+};
+
+int
+nv04_devinit_new(struct nvkm_device *device, int index,
+ struct nvkm_devinit **pinit)
+{
+ return nv04_devinit_new_(&nv04_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
index 7c63abf11e22..4a87c8c2bce8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
@@ -1,19 +1,19 @@
-#ifndef __NVKM_DEVINIT_NV04_H__
-#define __NVKM_DEVINIT_NV04_H__
+#ifndef __NV04_DEVINIT_H__
+#define __NV04_DEVINIT_H__
+#define nv04_devinit(p) container_of((p), struct nv04_devinit, base)
#include "priv.h"
struct nvkm_pll_vals;
-struct nv04_devinit_priv {
+struct nv04_devinit {
struct nvkm_devinit base;
int owner;
};
-int nv04_devinit_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-void nv04_devinit_dtor(struct nvkm_object *);
-int nv04_devinit_init(struct nvkm_object *);
-int nv04_devinit_fini(struct nvkm_object *, bool);
+int nv04_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *,
+ int, struct nvkm_devinit **);
+void *nv04_devinit_dtor(struct nvkm_devinit *);
+void nv04_devinit_preinit(struct nvkm_devinit *);
+void nv04_devinit_fini(struct nvkm_devinit *);
int nv04_devinit_pll_set(struct nvkm_devinit *, u32, u32);
void setPLL_single(struct nvkm_devinit *, u32, struct nvkm_pll_vals *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
index def8649216c2..9891eadca1ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
@@ -32,7 +32,7 @@
#include <subdev/vga.h>
static void
-nv05_devinit_meminit(struct nvkm_devinit *devinit)
+nv05_devinit_meminit(struct nvkm_devinit *init)
{
static const u8 default_config_tab[][2] = {
{ 0x24, 0x00 },
@@ -44,8 +44,9 @@ nv05_devinit_meminit(struct nvkm_devinit *devinit)
{ 0x06, 0x00 },
{ 0x00, 0x00 }
};
- struct nv04_devinit_priv *priv = (void *)devinit;
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_subdev *subdev = &init->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_bios *bios = device->bios;
struct io_mapping *fb;
u32 patt = 0xdeadbeef;
u16 data;
@@ -53,88 +54,90 @@ nv05_devinit_meminit(struct nvkm_devinit *devinit)
int i, v;
/* Map the framebuffer aperture */
- fb = fbmem_init(nv_device(priv));
+ fb = fbmem_init(device);
if (!fb) {
- nv_error(priv, "failed to map fb\n");
+ nvkm_error(subdev, "failed to map fb\n");
return;
}
- strap = (nv_rd32(priv, 0x101000) & 0x0000003c) >> 2;
+ strap = (nvkm_rd32(device, 0x101000) & 0x0000003c) >> 2;
if ((data = bmp_mem_init_table(bios))) {
- ramcfg[0] = nv_ro08(bios, data + 2 * strap + 0);
- ramcfg[1] = nv_ro08(bios, data + 2 * strap + 1);
+ ramcfg[0] = nvbios_rd08(bios, data + 2 * strap + 0);
+ ramcfg[1] = nvbios_rd08(bios, data + 2 * strap + 1);
} else {
ramcfg[0] = default_config_tab[strap][0];
ramcfg[1] = default_config_tab[strap][1];
}
/* Sequencer off */
- nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) | 0x20);
+ nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) | 0x20);
- if (nv_rd32(priv, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE)
+ if (nvkm_rd32(device, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE)
goto out;
- nv_mask(priv, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
+ nvkm_mask(device, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
/* If present load the hardcoded scrambling table */
if (data) {
for (i = 0, data += 0x10; i < 8; i++, data += 4) {
- u32 scramble = nv_ro32(bios, data);
- nv_wr32(priv, NV04_PFB_SCRAMBLE(i), scramble);
+ u32 scramble = nvbios_rd32(bios, data);
+ nvkm_wr32(device, NV04_PFB_SCRAMBLE(i), scramble);
}
}
/* Set memory type/width/length defaults depending on the straps */
- nv_mask(priv, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]);
+ nvkm_mask(device, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]);
if (ramcfg[1] & 0x80)
- nv_mask(priv, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE);
+ nvkm_mask(device, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE);
- nv_mask(priv, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20);
- nv_mask(priv, NV04_PFB_CFG1, 0, 1);
+ nvkm_mask(device, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20);
+ nvkm_mask(device, NV04_PFB_CFG1, 0, 1);
/* Probe memory bus width */
for (i = 0; i < 4; i++)
fbmem_poke(fb, 4 * i, patt);
if (fbmem_peek(fb, 0xc) != patt)
- nv_mask(priv, NV04_PFB_BOOT_0,
+ nvkm_mask(device, NV04_PFB_BOOT_0,
NV04_PFB_BOOT_0_RAM_WIDTH_128, 0);
/* Probe memory length */
- v = nv_rd32(priv, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT;
+ v = nvkm_rd32(device, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT;
if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_32MB &&
(!fbmem_readback(fb, 0x1000000, ++patt) ||
!fbmem_readback(fb, 0, ++patt)))
- nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+ nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
NV04_PFB_BOOT_0_RAM_AMOUNT_16MB);
if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_16MB &&
!fbmem_readback(fb, 0x800000, ++patt))
- nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+ nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
if (!fbmem_readback(fb, 0x400000, ++patt))
- nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+ nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
out:
/* Sequencer on */
- nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) & ~0x20);
+ nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) & ~0x20);
fbmem_fini(fb);
}
-struct nvkm_oclass *
-nv05_devinit_oclass = &(struct nvkm_devinit_impl) {
- .base.handle = NV_SUBDEV(DEVINIT, 0x05),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_devinit_ctor,
- .dtor = nv04_devinit_dtor,
- .init = nv04_devinit_init,
- .fini = nv04_devinit_fini,
- },
+static const struct nvkm_devinit_func
+nv05_devinit = {
+ .dtor = nv04_devinit_dtor,
+ .preinit = nv04_devinit_preinit,
+ .post = nv04_devinit_post,
.meminit = nv05_devinit_meminit,
.pll_set = nv04_devinit_pll_set,
- .post = nvbios_init,
-}.base;
+};
+
+int
+nv05_devinit_new(struct nvkm_device *device, int index,
+ struct nvkm_devinit **pinit)
+{
+ return nv04_devinit_new_(&nv05_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
index 7aabc1bf0640..570822f83acf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
@@ -30,33 +30,33 @@
#include <subdev/bios/init.h>
static void
-nv10_devinit_meminit(struct nvkm_devinit *devinit)
+nv10_devinit_meminit(struct nvkm_devinit *init)
{
- struct nv04_devinit_priv *priv = (void *)devinit;
+ struct nvkm_subdev *subdev = &init->subdev;
+ struct nvkm_device *device = subdev->device;
static const int mem_width[] = { 0x10, 0x00, 0x20 };
int mem_width_count;
uint32_t patt = 0xdeadbeef;
struct io_mapping *fb;
int i, j, k;
- if (nv_device(priv)->card_type >= NV_11 &&
- nv_device(priv)->chipset >= 0x17)
+ if (device->card_type >= NV_11 && device->chipset >= 0x17)
mem_width_count = 3;
else
mem_width_count = 2;
/* Map the framebuffer aperture */
- fb = fbmem_init(nv_device(priv));
+ fb = fbmem_init(device);
if (!fb) {
- nv_error(priv, "failed to map fb\n");
+ nvkm_error(subdev, "failed to map fb\n");
return;
}
- nv_wr32(priv, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
+ nvkm_wr32(device, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
/* Probe memory bus width */
for (i = 0; i < mem_width_count; i++) {
- nv_mask(priv, NV04_PFB_CFG0, 0x30, mem_width[i]);
+ nvkm_mask(device, NV04_PFB_CFG0, 0x30, mem_width[i]);
for (j = 0; j < 4; j++) {
for (k = 0; k < 4; k++)
@@ -75,7 +75,7 @@ mem_width_found:
/* Probe amount of installed memory */
for (i = 0; i < 4; i++) {
- int off = nv_rd32(priv, 0x10020c) - 0x100000;
+ int off = nvkm_rd32(device, 0x10020c) - 0x100000;
fbmem_poke(fb, off, patt);
fbmem_poke(fb, 0, 0);
@@ -90,22 +90,24 @@ mem_width_found:
}
/* IC missing - disable the upper half memory space. */
- nv_mask(priv, NV04_PFB_CFG0, 0x1000, 0);
+ nvkm_mask(device, NV04_PFB_CFG0, 0x1000, 0);
amount_found:
fbmem_fini(fb);
}
-struct nvkm_oclass *
-nv10_devinit_oclass = &(struct nvkm_devinit_impl) {
- .base.handle = NV_SUBDEV(DEVINIT, 0x10),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_devinit_ctor,
- .dtor = nv04_devinit_dtor,
- .init = nv04_devinit_init,
- .fini = nv04_devinit_fini,
- },
+static const struct nvkm_devinit_func
+nv10_devinit = {
+ .dtor = nv04_devinit_dtor,
+ .preinit = nv04_devinit_preinit,
+ .post = nv04_devinit_post,
.meminit = nv10_devinit_meminit,
.pll_set = nv04_devinit_pll_set,
- .post = nvbios_init,
-}.base;
+};
+
+int
+nv10_devinit_new(struct nvkm_device *device, int index,
+ struct nvkm_devinit **pinit)
+{
+ return nv04_devinit_new_(&nv10_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
index 9f36fff5a1c3..fefafec7e2a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
@@ -26,15 +26,17 @@
#include <subdev/bios.h>
#include <subdev/bios/init.h>
-struct nvkm_oclass *
-nv1a_devinit_oclass = &(struct nvkm_devinit_impl) {
- .base.handle = NV_SUBDEV(DEVINIT, 0x1a),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_devinit_ctor,
- .dtor = nv04_devinit_dtor,
- .init = nv04_devinit_init,
- .fini = nv04_devinit_fini,
- },
+static const struct nvkm_devinit_func
+nv1a_devinit = {
+ .dtor = nv04_devinit_dtor,
+ .preinit = nv04_devinit_preinit,
+ .post = nv04_devinit_post,
.pll_set = nv04_devinit_pll_set,
- .post = nvbios_init,
-}.base;
+};
+
+int
+nv1a_devinit_new(struct nvkm_device *device, int index,
+ struct nvkm_devinit **pinit)
+{
+ return nv04_devinit_new_(&nv1a_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
index 02fcfd921c42..4ef04e0d8826 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
@@ -30,48 +30,50 @@
#include <subdev/bios/init.h>
static void
-nv20_devinit_meminit(struct nvkm_devinit *devinit)
+nv20_devinit_meminit(struct nvkm_devinit *init)
{
- struct nv04_devinit_priv *priv = (void *)devinit;
- struct nvkm_device *device = nv_device(priv);
+ struct nvkm_subdev *subdev = &init->subdev;
+ struct nvkm_device *device = subdev->device;
uint32_t mask = (device->chipset >= 0x25 ? 0x300 : 0x900);
uint32_t amount, off;
struct io_mapping *fb;
/* Map the framebuffer aperture */
- fb = fbmem_init(nv_device(priv));
+ fb = fbmem_init(device);
if (!fb) {
- nv_error(priv, "failed to map fb\n");
+ nvkm_error(subdev, "failed to map fb\n");
return;
}
- nv_wr32(priv, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
+ nvkm_wr32(device, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
/* Allow full addressing */
- nv_mask(priv, NV04_PFB_CFG0, 0, mask);
+ nvkm_mask(device, NV04_PFB_CFG0, 0, mask);
- amount = nv_rd32(priv, 0x10020c);
+ amount = nvkm_rd32(device, 0x10020c);
for (off = amount; off > 0x2000000; off -= 0x2000000)
fbmem_poke(fb, off - 4, off);
- amount = nv_rd32(priv, 0x10020c);
+ amount = nvkm_rd32(device, 0x10020c);
if (amount != fbmem_peek(fb, amount - 4))
/* IC missing - disable the upper half memory space. */
- nv_mask(priv, NV04_PFB_CFG0, mask, 0);
+ nvkm_mask(device, NV04_PFB_CFG0, mask, 0);
fbmem_fini(fb);
}
-struct nvkm_oclass *
-nv20_devinit_oclass = &(struct nvkm_devinit_impl) {
- .base.handle = NV_SUBDEV(DEVINIT, 0x20),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_devinit_ctor,
- .dtor = nv04_devinit_dtor,
- .init = nv04_devinit_init,
- .fini = nv04_devinit_fini,
- },
+static const struct nvkm_devinit_func
+nv20_devinit = {
+ .dtor = nv04_devinit_dtor,
+ .preinit = nv04_devinit_preinit,
+ .post = nv04_devinit_post,
.meminit = nv20_devinit_meminit,
.pll_set = nv04_devinit_pll_set,
- .post = nvbios_init,
-}.base;
+};
+
+int
+nv20_devinit_new(struct nvkm_device *device, int index,
+ struct nvkm_devinit **pinit)
+{
+ return nv04_devinit_new_(&nv20_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
index 26b7cb13e167..337c2c692dc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
@@ -29,47 +29,48 @@
#include <subdev/bios/init.h>
#include <subdev/bios/pll.h>
#include <subdev/clk/pll.h>
-#include <subdev/ibus.h>
#include <subdev/vga.h>
int
-nv50_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
+nv50_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
{
- struct nv50_devinit_priv *priv = (void *)devinit;
- struct nvkm_bios *bios = nvkm_bios(priv);
+ struct nvkm_subdev *subdev = &init->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_bios *bios = device->bios;
struct nvbios_pll info;
int N1, M1, N2, M2, P;
int ret;
ret = nvbios_pll_parse(bios, type, &info);
if (ret) {
- nv_error(devinit, "failed to retrieve pll data, %d\n", ret);
+ nvkm_error(subdev, "failed to retrieve pll data, %d\n", ret);
return ret;
}
- ret = nv04_pll_calc(nv_subdev(devinit), &info, freq, &N1, &M1, &N2, &M2, &P);
+ ret = nv04_pll_calc(subdev, &info, freq, &N1, &M1, &N2, &M2, &P);
if (!ret) {
- nv_error(devinit, "failed pll calculation\n");
+ nvkm_error(subdev, "failed pll calculation\n");
return ret;
}
switch (info.type) {
case PLL_VPLL0:
case PLL_VPLL1:
- nv_wr32(priv, info.reg + 0, 0x10000611);
- nv_mask(priv, info.reg + 4, 0x00ff00ff, (M1 << 16) | N1);
- nv_mask(priv, info.reg + 8, 0x7fff00ff, (P << 28) |
- (M2 << 16) | N2);
+ nvkm_wr32(device, info.reg + 0, 0x10000611);
+ nvkm_mask(device, info.reg + 4, 0x00ff00ff, (M1 << 16) | N1);
+ nvkm_mask(device, info.reg + 8, 0x7fff00ff, (P << 28) |
+ (M2 << 16) | N2);
break;
case PLL_MEMORY:
- nv_mask(priv, info.reg + 0, 0x01ff0000, (P << 22) |
- (info.bias_p << 19) |
- (P << 16));
- nv_wr32(priv, info.reg + 4, (N1 << 8) | M1);
+ nvkm_mask(device, info.reg + 0, 0x01ff0000,
+ (P << 22) |
+ (info.bias_p << 19) |
+ (P << 16));
+ nvkm_wr32(device, info.reg + 4, (N1 << 8) | M1);
break;
default:
- nv_mask(priv, info.reg + 0, 0x00070000, (P << 16));
- nv_wr32(priv, info.reg + 4, (N1 << 8) | M1);
+ nvkm_mask(device, info.reg + 0, 0x00070000, (P << 16));
+ nvkm_wr32(device, info.reg + 4, (N1 << 8) | M1);
break;
}
@@ -77,57 +78,68 @@ nv50_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
}
static u64
-nv50_devinit_disable(struct nvkm_devinit *devinit)
+nv50_devinit_disable(struct nvkm_devinit *init)
{
- struct nv50_devinit_priv *priv = (void *)devinit;
- u32 r001540 = nv_rd32(priv, 0x001540);
+ struct nvkm_device *device = init->subdev.device;
+ u32 r001540 = nvkm_rd32(device, 0x001540);
u64 disable = 0ULL;
if (!(r001540 & 0x40000000))
- disable |= (1ULL << NVDEV_ENGINE_MPEG);
+ disable |= (1ULL << NVKM_ENGINE_MPEG);
return disable;
}
-int
-nv50_devinit_init(struct nvkm_object *object)
+void
+nv50_devinit_preinit(struct nvkm_devinit *base)
{
- struct nvkm_bios *bios = nvkm_bios(object);
- struct nvkm_ibus *ibus = nvkm_ibus(object);
- struct nv50_devinit_priv *priv = (void *)object;
- struct nvbios_outp info;
- struct dcb_output outp;
- u8 ver = 0xff, hdr, cnt, len;
- int ret, i = 0;
+ struct nv50_devinit *init = nv50_devinit(base);
+ struct nvkm_subdev *subdev = &init->base.subdev;
+ struct nvkm_device *device = subdev->device;
- if (!priv->base.post) {
- if (!nv_rdvgac(priv, 0, 0x00) &&
- !nv_rdvgac(priv, 0, 0x1a)) {
- nv_info(priv, "adaptor not initialised\n");
- priv->base.post = true;
- }
+ /* our heuristics can't detect whether the board has had its
+ * devinit scripts executed or not if the display engine is
+ * missing, assume it's a secondary gpu which requires post
+ */
+ if (!init->base.post) {
+ u64 disable = nvkm_devinit_disable(&init->base);
+ if (disable & (1ULL << NVKM_ENGINE_DISP))
+ init->base.post = true;
}
- /* some boards appear to require certain priv register timeouts
- * to be bumped before runing devinit scripts. not a clue why
- * the vbios engineers didn't make the scripts just work...
+ /* magic to detect whether or not x86 vbios code has executed
+ * the devinit scripts to initialise the board
*/
- if (priv->base.post && ibus)
- nv_ofuncs(ibus)->init(nv_object(ibus));
+ if (!init->base.post) {
+ if (!nvkm_rdvgac(device, 0, 0x00) &&
+ !nvkm_rdvgac(device, 0, 0x1a)) {
+ nvkm_debug(subdev, "adaptor not initialised\n");
+ init->base.post = true;
+ }
+ }
+}
- ret = nvkm_devinit_init(&priv->base);
- if (ret)
- return ret;
+void
+nv50_devinit_init(struct nvkm_devinit *base)
+{
+ struct nv50_devinit *init = nv50_devinit(base);
+ struct nvkm_subdev *subdev = &init->base.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_bios *bios = device->bios;
+ struct nvbios_outp info;
+ struct dcb_output outp;
+ u8 ver = 0xff, hdr, cnt, len;
+ int i = 0;
/* if we ran the init tables, we have to execute the first script
* pointer of each dcb entry's display encoder table in order
* to properly initialise each encoder.
*/
- while (priv->base.post && dcb_outp_parse(bios, i, &ver, &hdr, &outp)) {
+ while (init->base.post && dcb_outp_parse(bios, i, &ver, &hdr, &outp)) {
if (nvbios_outp_match(bios, outp.hasht, outp.hashm,
&ver, &hdr, &cnt, &len, &info)) {
- struct nvbios_init init = {
- .subdev = nv_subdev(priv),
+ struct nvbios_init exec = {
+ .subdev = subdev,
.bios = bios,
.offset = info.script[0],
.outp = &outp,
@@ -135,40 +147,39 @@ nv50_devinit_init(struct nvkm_object *object)
.execute = 1,
};
- nvbios_exec(&init);
+ nvbios_exec(&exec);
}
i++;
}
-
- return 0;
}
int
-nv50_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv50_devinit_new_(const struct nvkm_devinit_func *func,
+ struct nvkm_device *device, int index,
+ struct nvkm_devinit **pinit)
{
- struct nv50_devinit_priv *priv;
- int ret;
+ struct nv50_devinit *init;
- ret = nvkm_devinit_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ if (!(init = kzalloc(sizeof(*init), GFP_KERNEL)))
+ return -ENOMEM;
+ *pinit = &init->base;
+ nvkm_devinit_ctor(func, device, index, &init->base);
return 0;
}
-struct nvkm_oclass *
-nv50_devinit_oclass = &(struct nvkm_devinit_impl) {
- .base.handle = NV_SUBDEV(DEVINIT, 0x50),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_devinit_ctor,
- .dtor = _nvkm_devinit_dtor,
- .init = nv50_devinit_init,
- .fini = _nvkm_devinit_fini,
- },
+static const struct nvkm_devinit_func
+nv50_devinit = {
+ .preinit = nv50_devinit_preinit,
+ .init = nv50_devinit_init,
+ .post = nv04_devinit_post,
.pll_set = nv50_devinit_pll_set,
.disable = nv50_devinit_disable,
- .post = nvbios_init,
-}.base;
+};
+
+int
+nv50_devinit_new(struct nvkm_device *device, int index,
+ struct nvkm_devinit **pinit)
+{
+ return nv50_devinit_new_(&nv50_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
index 9243521c80ac..5de70a8486b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -1,16 +1,17 @@
-#ifndef __NVKM_DEVINIT_NV50_H__
-#define __NVKM_DEVINIT_NV50_H__
+#ifndef __NV50_DEVINIT_H__
+#define __NV50_DEVINIT_H__
+#define nv50_devinit(p) container_of((p), struct nv50_devinit, base)
#include "priv.h"
-struct nv50_devinit_priv {
+struct nv50_devinit {
struct nvkm_devinit base;
u32 r001540;
};
-int nv50_devinit_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-int nv50_devinit_init(struct nvkm_object *);
+int nv50_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *,
+ int, struct nvkm_devinit **);
+void nv50_devinit_preinit(struct nvkm_devinit *);
+void nv50_devinit_init(struct nvkm_devinit *);
int nv50_devinit_pll_set(struct nvkm_devinit *, u32, u32);
int gt215_devinit_pll_set(struct nvkm_devinit *, u32, u32);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
index bb51a95d8012..e1f6ae58f1d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
@@ -1,34 +1,21 @@
#ifndef __NVKM_DEVINIT_PRIV_H__
#define __NVKM_DEVINIT_PRIV_H__
+#define nvkm_devinit(p) container_of((p), struct nvkm_devinit, subdev)
#include <subdev/devinit.h>
-struct nvkm_devinit_impl {
- struct nvkm_oclass base;
+struct nvkm_devinit_func {
+ void *(*dtor)(struct nvkm_devinit *);
+ void (*preinit)(struct nvkm_devinit *);
+ void (*init)(struct nvkm_devinit *);
+ int (*post)(struct nvkm_devinit *, bool post);
+ u32 (*mmio)(struct nvkm_devinit *, u32);
void (*meminit)(struct nvkm_devinit *);
int (*pll_set)(struct nvkm_devinit *, u32 type, u32 freq);
u64 (*disable)(struct nvkm_devinit *);
- u32 (*mmio)(struct nvkm_devinit *, u32);
- int (*post)(struct nvkm_subdev *, bool);
};
-#define nvkm_devinit_create(p,e,o,d) \
- nvkm_devinit_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_devinit_destroy(p) ({ \
- struct nvkm_devinit *d = (p); \
- _nvkm_devinit_dtor(nv_object(d)); \
-})
-#define nvkm_devinit_init(p) ({ \
- struct nvkm_devinit *d = (p); \
- _nvkm_devinit_init(nv_object(d)); \
-})
-#define nvkm_devinit_fini(p,s) ({ \
- struct nvkm_devinit *d = (p); \
- _nvkm_devinit_fini(nv_object(d), (s)); \
-})
+void nvkm_devinit_ctor(const struct nvkm_devinit_func *, struct nvkm_device *,
+ int index, struct nvkm_devinit *);
-int nvkm_devinit_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, int, void **);
-void _nvkm_devinit_dtor(struct nvkm_object *);
-int _nvkm_devinit_init(struct nvkm_object *);
-int _nvkm_devinit_fini(struct nvkm_object *, bool suspend);
+int nv04_devinit_post(struct nvkm_devinit *, bool);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index d6be4c6c5408..08105701af7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -23,6 +23,8 @@ nvkm-y += nvkm/subdev/fb/gf100.o
nvkm-y += nvkm/subdev/fb/gk104.o
nvkm-y += nvkm/subdev/fb/gk20a.o
nvkm-y += nvkm/subdev/fb/gm107.o
+
+nvkm-y += nvkm/subdev/fb/ram.o
nvkm-y += nvkm/subdev/fb/ramnv04.o
nvkm-y += nvkm/subdev/fb/ramnv10.o
nvkm-y += nvkm/subdev/fb/ramnv1a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index 61fde43dab71..a719b9becb73 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -22,144 +22,151 @@
* Authors: Ben Skeggs
*/
#include "priv.h"
+#include "ram.h"
#include <subdev/bios.h>
#include <subdev/bios/M0203.h>
+#include <engine/gr.h>
+#include <engine/mpeg.h>
+
+bool
+nvkm_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
+{
+ return fb->func->memtype_valid(fb, memtype);
+}
+
+void
+nvkm_fb_tile_fini(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
+{
+ fb->func->tile.fini(fb, region, tile);
+}
+
+void
+nvkm_fb_tile_init(struct nvkm_fb *fb, int region, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nvkm_fb_tile *tile)
+{
+ fb->func->tile.init(fb, region, addr, size, pitch, flags, tile);
+}
+
+void
+nvkm_fb_tile_prog(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ if (fb->func->tile.prog) {
+ fb->func->tile.prog(fb, region, tile);
+ if (device->gr)
+ nvkm_engine_tile(&device->gr->engine, region);
+ if (device->mpeg)
+ nvkm_engine_tile(device->mpeg, region);
+ }
+}
int
nvkm_fb_bios_memtype(struct nvkm_bios *bios)
{
- const u8 ramcfg = (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
+ struct nvkm_subdev *subdev = &bios->subdev;
+ struct nvkm_device *device = subdev->device;
+ const u8 ramcfg = (nvkm_rd32(device, 0x101000) & 0x0000003c) >> 2;
struct nvbios_M0203E M0203E;
u8 ver, hdr;
if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) {
switch (M0203E.type) {
- case M0203E_TYPE_DDR2 : return NV_MEM_TYPE_DDR2;
- case M0203E_TYPE_DDR3 : return NV_MEM_TYPE_DDR3;
- case M0203E_TYPE_GDDR3: return NV_MEM_TYPE_GDDR3;
- case M0203E_TYPE_GDDR5: return NV_MEM_TYPE_GDDR5;
+ case M0203E_TYPE_DDR2 : return NVKM_RAM_TYPE_DDR2;
+ case M0203E_TYPE_DDR3 : return NVKM_RAM_TYPE_DDR3;
+ case M0203E_TYPE_GDDR3: return NVKM_RAM_TYPE_GDDR3;
+ case M0203E_TYPE_GDDR5: return NVKM_RAM_TYPE_GDDR5;
default:
- nv_warn(bios, "M0203E type %02x\n", M0203E.type);
- return NV_MEM_TYPE_UNKNOWN;
+ nvkm_warn(subdev, "M0203E type %02x\n", M0203E.type);
+ return NVKM_RAM_TYPE_UNKNOWN;
}
}
- nv_warn(bios, "M0203E not matched!\n");
- return NV_MEM_TYPE_UNKNOWN;
+ nvkm_warn(subdev, "M0203E not matched!\n");
+ return NVKM_RAM_TYPE_UNKNOWN;
}
-int
-_nvkm_fb_fini(struct nvkm_object *object, bool suspend)
+static void
+nvkm_fb_intr(struct nvkm_subdev *subdev)
{
- struct nvkm_fb *pfb = (void *)object;
- int ret;
+ struct nvkm_fb *fb = nvkm_fb(subdev);
+ if (fb->func->intr)
+ fb->func->intr(fb);
+}
- if (pfb->ram) {
- ret = nv_ofuncs(pfb->ram)->fini(nv_object(pfb->ram), suspend);
- if (ret && suspend)
+static int
+nvkm_fb_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_fb *fb = nvkm_fb(subdev);
+ if (fb->func->ram_new) {
+ int ret = fb->func->ram_new(fb, &fb->ram);
+ if (ret) {
+ nvkm_error(subdev, "vram setup failed, %d\n", ret);
return ret;
+ }
}
-
- return nvkm_subdev_fini(&pfb->base, suspend);
+ return 0;
}
-int
-_nvkm_fb_init(struct nvkm_object *object)
+static int
+nvkm_fb_init(struct nvkm_subdev *subdev)
{
- struct nvkm_fb *pfb = (void *)object;
+ struct nvkm_fb *fb = nvkm_fb(subdev);
int ret, i;
- ret = nvkm_subdev_init(&pfb->base);
- if (ret)
- return ret;
-
- if (pfb->ram) {
- ret = nv_ofuncs(pfb->ram)->init(nv_object(pfb->ram));
+ if (fb->ram) {
+ ret = nvkm_ram_init(fb->ram);
if (ret)
return ret;
}
- for (i = 0; i < pfb->tile.regions; i++)
- pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
+ for (i = 0; i < fb->tile.regions; i++)
+ fb->func->tile.prog(fb, i, &fb->tile.region[i]);
+ if (fb->func->init)
+ fb->func->init(fb);
return 0;
}
-void
-_nvkm_fb_dtor(struct nvkm_object *object)
+static void *
+nvkm_fb_dtor(struct nvkm_subdev *subdev)
{
- struct nvkm_fb *pfb = (void *)object;
+ struct nvkm_fb *fb = nvkm_fb(subdev);
int i;
- for (i = 0; i < pfb->tile.regions; i++)
- pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
- nvkm_mm_fini(&pfb->tags);
+ for (i = 0; i < fb->tile.regions; i++)
+ fb->func->tile.fini(fb, i, &fb->tile.region[i]);
- if (pfb->ram) {
- nvkm_mm_fini(&pfb->vram);
- nvkm_object_ref(NULL, (struct nvkm_object **)&pfb->ram);
- }
+ nvkm_ram_del(&fb->ram);
- nvkm_subdev_destroy(&pfb->base);
+ if (fb->func->dtor)
+ return fb->func->dtor(fb);
+ return fb;
}
-int
-nvkm_fb_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, int length, void **pobject)
-{
- struct nvkm_fb_impl *impl = (void *)oclass;
- static const char *name[] = {
- [NV_MEM_TYPE_UNKNOWN] = "unknown",
- [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
- [NV_MEM_TYPE_SGRAM ] = "SGRAM",
- [NV_MEM_TYPE_SDRAM ] = "SDRAM",
- [NV_MEM_TYPE_DDR1 ] = "DDR1",
- [NV_MEM_TYPE_DDR2 ] = "DDR2",
- [NV_MEM_TYPE_DDR3 ] = "DDR3",
- [NV_MEM_TYPE_GDDR2 ] = "GDDR2",
- [NV_MEM_TYPE_GDDR3 ] = "GDDR3",
- [NV_MEM_TYPE_GDDR4 ] = "GDDR4",
- [NV_MEM_TYPE_GDDR5 ] = "GDDR5",
- };
- struct nvkm_object *ram;
- struct nvkm_fb *pfb;
- int ret;
-
- ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PFB", "fb",
- length, pobject);
- pfb = *pobject;
- if (ret)
- return ret;
-
- pfb->memtype_valid = impl->memtype;
-
- if (!impl->ram)
- return 0;
-
- ret = nvkm_object_ctor(nv_object(pfb), NULL, impl->ram, NULL, 0, &ram);
- if (ret) {
- nv_fatal(pfb, "error detecting memory configuration!!\n");
- return ret;
- }
-
- pfb->ram = (void *)ram;
+static const struct nvkm_subdev_func
+nvkm_fb = {
+ .dtor = nvkm_fb_dtor,
+ .oneinit = nvkm_fb_oneinit,
+ .init = nvkm_fb_init,
+ .intr = nvkm_fb_intr,
+};
- if (!nvkm_mm_initialised(&pfb->vram)) {
- ret = nvkm_mm_init(&pfb->vram, 0, pfb->ram->size >> 12, 1);
- if (ret)
- return ret;
- }
-
- if (!nvkm_mm_initialised(&pfb->tags)) {
- ret = nvkm_mm_init(&pfb->tags, 0, pfb->ram->tags ?
- ++pfb->ram->tags : 0, 1);
- if (ret)
- return ret;
- }
+void
+nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
+ int index, struct nvkm_fb *fb)
+{
+ nvkm_subdev_ctor(&nvkm_fb, device, index, 0, &fb->subdev);
+ fb->func = func;
+ fb->tile.regions = fb->func->tile.regions;
+}
- nv_info(pfb, "RAM type: %s\n", name[pfb->ram->type]);
- nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram->size >> 20));
- nv_info(pfb, " ZCOMP: %d tags\n", pfb->ram->tags);
+int
+nvkm_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
+ int index, struct nvkm_fb **pfb)
+{
+ if (!(*pfb = kzalloc(sizeof(**pfb), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_fb_ctor(func, device, index, *pfb);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
index 6c968d1e98b3..9c28392d07e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
@@ -22,17 +22,16 @@
* Authors: Ben Skeggs
*/
#include "nv50.h"
+#include "ram.h"
-struct nvkm_oclass *
-g84_fb_oclass = &(struct nv50_fb_impl) {
- .base.base.handle = NV_SUBDEV(FB, 0x84),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_fb_ctor,
- .dtor = nv50_fb_dtor,
- .init = nv50_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .base.memtype = nv50_fb_memtype_valid,
- .base.ram = &nv50_ram_oclass,
+static const struct nv50_fb_func
+g84_fb = {
+ .ram_new = nv50_ram_new,
.trap = 0x001d07ff,
-}.base.base;
+};
+
+int
+g84_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return nv50_fb_new_(&g84_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
index 15b462ae33cb..79b523aa52aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
* Roy Spliet <rspliet@eclipso.eu>
*/
-#include "priv.h"
+#include "ram.h"
struct ramxlat {
int id;
@@ -42,9 +42,9 @@ ramxlat(const struct ramxlat *xlat, int id)
static const struct ramxlat
ramgddr3_cl_lo[] = {
- { 7, 7 }, { 8, 0 }, { 9, 1 }, { 10, 2 }, { 11, 3 },
+ { 5, 5 }, { 7, 7 }, { 8, 0 }, { 9, 1 }, { 10, 2 }, { 11, 3 }, { 12, 8 },
/* the below are mentioned in some, but not all, gddr3 docs */
- { 12, 4 }, { 13, 5 }, { 14, 6 },
+ { 13, 9 }, { 14, 6 },
/* XXX: Per Samsung docs, are these used? They overlap with Qimonda */
/* { 4, 4 }, { 5, 5 }, { 6, 6 }, { 12, 8 }, { 13, 9 }, { 14, 10 },
* { 15, 11 }, */
@@ -61,24 +61,25 @@ ramgddr3_cl_hi[] = {
static const struct ramxlat
ramgddr3_wr_lo[] = {
{ 5, 2 }, { 7, 4 }, { 8, 5 }, { 9, 6 }, { 10, 7 },
- { 11, 0 },
+ { 11, 0 }, { 13 , 1 },
/* the below are mentioned in some, but not all, gddr3 docs */
- { 4, 1 }, { 6, 3 }, { 12, 1 }, { 13 , 2 },
+ { 4, 1 }, { 6, 3 }, { 12, 1 },
{ -1 }
};
int
nvkm_gddr3_calc(struct nvkm_ram *ram)
{
- int CL, WR, CWL, DLL = 0, ODT = 0, hi;
+ int CL, WR, CWL, DLL = 0, ODT = 0, RON, hi;
switch (ram->next->bios.timing_ver) {
case 0x10:
CWL = ram->next->bios.timing_10_CWL;
CL = ram->next->bios.timing_10_CL;
WR = ram->next->bios.timing_10_WR;
- DLL = !ram->next->bios.ramcfg_10_DLLoff;
+ DLL = !ram->next->bios.ramcfg_DLLoff;
ODT = ram->next->bios.timing_10_ODT;
+ RON = ram->next->bios.ramcfg_RON;
break;
case 0x20:
CWL = (ram->next->bios.timing[1] & 0x00000f80) >> 7;
@@ -89,6 +90,7 @@ nvkm_gddr3_calc(struct nvkm_ram *ram)
ODT = (ram->mr[1] & 0x004) >> 2 |
(ram->mr[1] & 0x040) >> 5 |
(ram->mr[1] & 0x200) >> 7;
+ RON = !(ram->mr[1] & 0x300) >> 8;
break;
default:
return -ENOSYS;
@@ -107,7 +109,7 @@ nvkm_gddr3_calc(struct nvkm_ram *ram)
ram->mr[1] &= ~0x3fc;
ram->mr[1] |= (ODT & 0x03) << 2;
- ram->mr[1] |= (ODT & 0x03) << 8;
+ ram->mr[1] |= (RON & 0x03) << 8;
ram->mr[1] |= (WR & 0x03) << 4;
ram->mr[1] |= (WR & 0x04) << 5;
ram->mr[1] |= !DLL << 6;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
index f6f9eee1dcd0..24f83b09e6a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
@@ -21,7 +21,7 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include "priv.h"
+#include "ram.h"
/* binary driver only executes this path if the condition (a) is true
* for any configuration (combination of rammap+ramcfg+timing) that
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index d51aa0237baf..008bb9849f3b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -22,101 +22,90 @@
* Authors: Ben Skeggs
*/
#include "gf100.h"
-
-#include <core/device.h>
+#include "ram.h"
extern const u8 gf100_pte_storage_type_map[256];
bool
-gf100_fb_memtype_valid(struct nvkm_fb *pfb, u32 tile_flags)
+gf100_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
{
u8 memtype = (tile_flags & 0x0000ff00) >> 8;
return likely((gf100_pte_storage_type_map[memtype] != 0xff));
}
-static void
-gf100_fb_intr(struct nvkm_subdev *subdev)
+void
+gf100_fb_intr(struct nvkm_fb *base)
{
- struct gf100_fb_priv *priv = (void *)subdev;
- u32 intr = nv_rd32(priv, 0x000100);
- if (intr & 0x08000000) {
- nv_debug(priv, "PFFB intr\n");
- intr &= ~0x08000000;
- }
- if (intr & 0x00002000) {
- nv_debug(priv, "PBFB intr\n");
- intr &= ~0x00002000;
- }
+ struct gf100_fb *fb = gf100_fb(base);
+ struct nvkm_subdev *subdev = &fb->base.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x000100);
+ if (intr & 0x08000000)
+ nvkm_debug(subdev, "PFFB intr\n");
+ if (intr & 0x00002000)
+ nvkm_debug(subdev, "PBFB intr\n");
}
-int
-gf100_fb_init(struct nvkm_object *object)
+void
+gf100_fb_init(struct nvkm_fb *base)
{
- struct gf100_fb_priv *priv = (void *)object;
- int ret;
+ struct gf100_fb *fb = gf100_fb(base);
+ struct nvkm_device *device = fb->base.subdev.device;
- ret = nvkm_fb_init(&priv->base);
- if (ret)
- return ret;
+ if (fb->r100c10_page)
+ nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
- if (priv->r100c10_page)
- nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
-
- nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
- return 0;
+ nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
}
-void
-gf100_fb_dtor(struct nvkm_object *object)
+void *
+gf100_fb_dtor(struct nvkm_fb *base)
{
- struct nvkm_device *device = nv_device(object);
- struct gf100_fb_priv *priv = (void *)object;
+ struct gf100_fb *fb = gf100_fb(base);
+ struct nvkm_device *device = fb->base.subdev.device;
- if (priv->r100c10_page) {
- dma_unmap_page(nv_device_base(device), priv->r100c10, PAGE_SIZE,
+ if (fb->r100c10_page) {
+ dma_unmap_page(device->dev, fb->r100c10, PAGE_SIZE,
DMA_BIDIRECTIONAL);
- __free_page(priv->r100c10_page);
+ __free_page(fb->r100c10_page);
}
- nvkm_fb_destroy(&priv->base);
+ return fb;
}
int
-gf100_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
+ int index, struct nvkm_fb **pfb)
{
- struct nvkm_device *device = nv_device(parent);
- struct gf100_fb_priv *priv;
- int ret;
-
- ret = nvkm_fb_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (priv->r100c10_page) {
- priv->r100c10 = dma_map_page(nv_device_base(device),
- priv->r100c10_page, 0, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(nv_device_base(device), priv->r100c10))
+ struct gf100_fb *fb;
+
+ if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_fb_ctor(func, device, index, &fb->base);
+ *pfb = &fb->base;
+
+ fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (fb->r100c10_page) {
+ fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device->dev, fb->r100c10))
return -EFAULT;
}
- nv_subdev(priv)->intr = gf100_fb_intr;
return 0;
}
-struct nvkm_oclass *
-gf100_fb_oclass = &(struct nvkm_fb_impl) {
- .base.handle = NV_SUBDEV(FB, 0xc0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_fb_ctor,
- .dtor = gf100_fb_dtor,
- .init = gf100_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .memtype = gf100_fb_memtype_valid,
- .ram = &gf100_ram_oclass,
-}.base;
+static const struct nvkm_fb_func
+gf100_fb = {
+ .dtor = gf100_fb_dtor,
+ .init = gf100_fb_init,
+ .intr = gf100_fb_intr,
+ .ram_new = gf100_ram_new,
+ .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gf100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gf100_fb_new_(&gf100_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index 0af4da259471..2160e5a39c9a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -1,28 +1,17 @@
#ifndef __NVKM_RAM_NVC0_H__
#define __NVKM_RAM_NVC0_H__
+#define gf100_fb(p) container_of((p), struct gf100_fb, base)
#include "priv.h"
-#include "nv50.h"
-struct gf100_fb_priv {
+struct gf100_fb {
struct nvkm_fb base;
struct page *r100c10_page;
dma_addr_t r100c10;
};
-int gf100_fb_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-void gf100_fb_dtor(struct nvkm_object *);
-int gf100_fb_init(struct nvkm_object *);
-bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
-
-#define gf100_ram_create(p,e,o,m,d) \
- gf100_ram_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
-int gf100_ram_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, u32, int, void **);
-int gf100_ram_get(struct nvkm_fb *, u64, u32, u32, u32,
- struct nvkm_mem **);
-void gf100_ram_put(struct nvkm_fb *, struct nvkm_mem **);
-
-int gk104_ram_init(struct nvkm_object*);
+int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *,
+ int index, struct nvkm_fb **);
+void *gf100_fb_dtor(struct nvkm_fb *);
+void gf100_fb_init(struct nvkm_fb *);
+void gf100_fb_intr(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index 1c08317665bb..0edb3c316f5c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -22,16 +22,19 @@
* Authors: Ben Skeggs
*/
#include "gf100.h"
+#include "ram.h"
-struct nvkm_oclass *
-gk104_fb_oclass = &(struct nvkm_fb_impl) {
- .base.handle = NV_SUBDEV(FB, 0xe0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_fb_ctor,
- .dtor = gf100_fb_dtor,
- .init = gf100_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .memtype = gf100_fb_memtype_valid,
- .ram = &gk104_ram_oclass,
-}.base;
+static const struct nvkm_fb_func
+gk104_fb = {
+ .dtor = gf100_fb_dtor,
+ .init = gf100_fb_init,
+ .intr = gf100_fb_intr,
+ .ram_new = gk104_ram_new,
+ .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gk104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gf100_fb_new_(&gk104_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index a5d7857d3898..81447eb4c948 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -19,50 +19,23 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-#include "gf100.h"
+#include "priv.h"
-struct gk20a_fb_priv {
- struct nvkm_fb base;
-};
-
-static int
-gk20a_fb_init(struct nvkm_object *object)
+static void
+gk20a_fb_init(struct nvkm_fb *fb)
{
- struct gk20a_fb_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_fb_init(&priv->base);
- if (ret)
- return ret;
-
- nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
- return 0;
+ struct nvkm_device *device = fb->subdev.device;
+ nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
}
-static int
-gk20a_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gk20a_fb_priv *priv;
- int ret;
-
- ret = nvkm_fb_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static const struct nvkm_fb_func
+gk20a_fb = {
+ .init = gk20a_fb_init,
+ .memtype_valid = gf100_fb_memtype_valid,
+};
- return 0;
+int
+gk20a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return nvkm_fb_new_(&gk20a_fb, device, index, pfb);
}
-
-struct nvkm_oclass *
-gk20a_fb_oclass = &(struct nvkm_fb_impl) {
- .base.handle = NV_SUBDEV(FB, 0xea),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk20a_fb_ctor,
- .dtor = _nvkm_fb_dtor,
- .init = gk20a_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .memtype = gf100_fb_memtype_valid,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
index 843f9356b360..2a91df8655dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
@@ -22,16 +22,19 @@
* Authors: Ben Skeggs
*/
#include "gf100.h"
+#include "ram.h"
-struct nvkm_oclass *
-gm107_fb_oclass = &(struct nvkm_fb_impl) {
- .base.handle = NV_SUBDEV(FB, 0x07),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_fb_ctor,
- .dtor = gf100_fb_dtor,
- .init = gf100_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .memtype = gf100_fb_memtype_valid,
- .ram = &gm107_ram_oclass,
-}.base;
+static const struct nvkm_fb_func
+gm107_fb = {
+ .dtor = gf100_fb_dtor,
+ .init = gf100_fb_init,
+ .intr = gf100_fb_intr,
+ .ram_new = gm107_ram_new,
+ .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gm107_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gf100_fb_new_(&gm107_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
index dd9b8a0a3c8e..ebb30608d5ef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
@@ -22,17 +22,16 @@
* Authors: Ben Skeggs
*/
#include "nv50.h"
+#include "ram.h"
-struct nvkm_oclass *
-gt215_fb_oclass = &(struct nv50_fb_impl) {
- .base.base.handle = NV_SUBDEV(FB, 0xa3),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_fb_ctor,
- .dtor = nv50_fb_dtor,
- .init = nv50_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .base.memtype = nv50_fb_memtype_valid,
- .base.ram = &gt215_ram_oclass,
+static const struct nv50_fb_func
+gt215_fb = {
+ .ram_new = gt215_ram_new,
.trap = 0x000d0fff,
-}.base.base;
+};
+
+int
+gt215_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return nv50_fb_new_(&gt215_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
index 7be4a47ef4ad..73b3b86a2826 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
@@ -22,17 +22,16 @@
* Authors: Ben Skeggs
*/
#include "nv50.h"
+#include "ram.h"
-struct nvkm_oclass *
-mcp77_fb_oclass = &(struct nv50_fb_impl) {
- .base.base.handle = NV_SUBDEV(FB, 0xaa),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_fb_ctor,
- .dtor = nv50_fb_dtor,
- .init = nv50_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .base.memtype = nv50_fb_memtype_valid,
- .base.ram = &mcp77_ram_oclass,
+static const struct nv50_fb_func
+mcp77_fb = {
+ .ram_new = mcp77_ram_new,
.trap = 0x001d07ff,
-}.base.base;
+};
+
+int
+mcp77_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return nv50_fb_new_(&mcp77_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
index 2d00656faef5..6d11e32ec7ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
@@ -22,17 +22,16 @@
* Authors: Ben Skeggs
*/
#include "nv50.h"
+#include "ram.h"
-struct nvkm_oclass *
-mcp89_fb_oclass = &(struct nv50_fb_impl) {
- .base.base.handle = NV_SUBDEV(FB, 0xaf),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_fb_ctor,
- .dtor = nv50_fb_dtor,
- .init = nv50_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .base.memtype = nv50_fb_memtype_valid,
- .base.ram = &mcp77_ram_oclass,
+static const struct nv50_fb_func
+mcp89_fb = {
+ .ram_new = mcp77_ram_new,
.trap = 0x089d1fff,
-}.base.base;
+};
+
+int
+mcp89_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return nv50_fb_new_(&mcp89_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
index c063dec7d03a..8ff2e5db4571 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
@@ -21,67 +21,39 @@
*
* Authors: Ben Skeggs
*/
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
#include "regsnv04.h"
bool
-nv04_fb_memtype_valid(struct nvkm_fb *pfb, u32 tile_flags)
+nv04_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
{
if (!(tile_flags & 0xff00))
return true;
-
return false;
}
-static int
-nv04_fb_init(struct nvkm_object *object)
+static void
+nv04_fb_init(struct nvkm_fb *fb)
{
- struct nv04_fb_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_fb_init(&priv->base);
- if (ret)
- return ret;
+ struct nvkm_device *device = fb->subdev.device;
/* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
* nvidia reading PFB_CFG_0, then writing back its original value.
* (which was 0x701114 in this case)
*/
- nv_wr32(priv, NV04_PFB_CFG0, 0x1114);
- return 0;
+ nvkm_wr32(device, NV04_PFB_CFG0, 0x1114);
}
+static const struct nvkm_fb_func
+nv04_fb = {
+ .init = nv04_fb_init,
+ .ram_new = nv04_ram_new,
+ .memtype_valid = nv04_fb_memtype_valid,
+};
+
int
-nv04_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv04_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
{
- struct nv04_fb_impl *impl = (void *)oclass;
- struct nv04_fb_priv *priv;
- int ret;
-
- ret = nvkm_fb_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.tile.regions = impl->tile.regions;
- priv->base.tile.init = impl->tile.init;
- priv->base.tile.comp = impl->tile.comp;
- priv->base.tile.fini = impl->tile.fini;
- priv->base.tile.prog = impl->tile.prog;
- return 0;
+ return nvkm_fb_new_(&nv04_fb, device, index, pfb);
}
-
-struct nvkm_oclass *
-nv04_fb_oclass = &(struct nv04_fb_impl) {
- .base.base.handle = NV_SUBDEV(FB, 0x04),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_fb_ctor,
- .dtor = _nvkm_fb_dtor,
- .init = nv04_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .base.memtype = nv04_fb_memtype_valid,
- .base.ram = &nv04_ram_oclass,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h
deleted file mode 100644
index caa0d03aaacc..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef __NVKM_FB_NV04_H__
-#define __NVKM_FB_NV04_H__
-#include "priv.h"
-
-struct nv04_fb_priv {
- struct nvkm_fb base;
-};
-
-int nv04_fb_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-
-struct nv04_fb_impl {
- struct nvkm_fb_impl base;
- struct {
- int regions;
- void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
- u32 pitch, u32 flags, struct nvkm_fb_tile *);
- void (*comp)(struct nvkm_fb *, int i, u32 size, u32 flags,
- struct nvkm_fb_tile *);
- void (*fini)(struct nvkm_fb *, int i,
- struct nvkm_fb_tile *);
- void (*prog)(struct nvkm_fb *, int i,
- struct nvkm_fb_tile *);
- } tile;
-};
-
-void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
- u32 pitch, u32 flags, struct nvkm_fb_tile *);
-void nv10_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
-void nv10_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
-
-void nv20_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
- u32 pitch, u32 flags, struct nvkm_fb_tile *);
-void nv20_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
-void nv20_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
-
-int nv30_fb_init(struct nvkm_object *);
-void nv30_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
- u32 pitch, u32 flags, struct nvkm_fb_tile *);
-
-void nv40_fb_tile_comp(struct nvkm_fb *, int i, u32 size, u32 flags,
- struct nvkm_fb_tile *);
-
-int nv41_fb_init(struct nvkm_object *);
-void nv41_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
-
-int nv44_fb_init(struct nvkm_object *);
-void nv44_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
-
-void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
- u32 pitch, u32 flags, struct nvkm_fb_tile *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
index f3530e4a6760..e8c44f5a3d84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
@@ -23,10 +23,11 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
void
-nv10_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+nv10_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nvkm_fb_tile *tile)
{
tile->addr = 0x80000000 | addr;
@@ -35,7 +36,7 @@ nv10_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
}
void
-nv10_fb_tile_fini(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv10_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
{
tile->addr = 0;
tile->limit = 0;
@@ -44,27 +45,27 @@ nv10_fb_tile_fini(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
}
void
-nv10_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv10_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
{
- nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
- nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
- nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
- nv_rd32(pfb, 0x100240 + (i * 0x10));
+ struct nvkm_device *device = fb->subdev.device;
+ nvkm_wr32(device, 0x100244 + (i * 0x10), tile->limit);
+ nvkm_wr32(device, 0x100248 + (i * 0x10), tile->pitch);
+ nvkm_wr32(device, 0x100240 + (i * 0x10), tile->addr);
+ nvkm_rd32(device, 0x100240 + (i * 0x10));
}
-struct nvkm_oclass *
-nv10_fb_oclass = &(struct nv04_fb_impl) {
- .base.base.handle = NV_SUBDEV(FB, 0x10),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_fb_ctor,
- .dtor = _nvkm_fb_dtor,
- .init = _nvkm_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .base.memtype = nv04_fb_memtype_valid,
- .base.ram = &nv10_ram_oclass,
+static const struct nvkm_fb_func
+nv10_fb = {
.tile.regions = 8,
.tile.init = nv10_fb_tile_init,
.tile.fini = nv10_fb_tile_fini,
.tile.prog = nv10_fb_tile_prog,
-}.base.base;
+ .ram_new = nv10_ram_new,
+ .memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv10_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return nvkm_fb_new_(&nv10_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
index 83bcb73caf0a..2ae0beb87567 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
@@ -23,21 +23,21 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
-struct nvkm_oclass *
-nv1a_fb_oclass = &(struct nv04_fb_impl) {
- .base.base.handle = NV_SUBDEV(FB, 0x1a),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_fb_ctor,
- .dtor = _nvkm_fb_dtor,
- .init = _nvkm_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .base.memtype = nv04_fb_memtype_valid,
- .base.ram = &nv1a_ram_oclass,
+static const struct nvkm_fb_func
+nv1a_fb = {
.tile.regions = 8,
.tile.init = nv10_fb_tile_init,
.tile.fini = nv10_fb_tile_fini,
.tile.prog = nv10_fb_tile_prog,
-}.base.base;
+ .ram_new = nv1a_ram_new,
+ .memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv1a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return nvkm_fb_new_(&nv1a_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
index e37084b8d05e..126865dfe777 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
@@ -23,28 +23,29 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
void
-nv20_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+nv20_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nvkm_fb_tile *tile)
{
tile->addr = 0x00000001 | addr;
tile->limit = max(1u, addr + size) - 1;
tile->pitch = pitch;
if (flags & 4) {
- pfb->tile.comp(pfb, i, size, flags, tile);
+ fb->func->tile.comp(fb, i, size, flags, tile);
tile->addr |= 2;
}
}
static void
-nv20_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv20_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
- u32 tags = round_up(tiles / pfb->ram->parts, 0x40);
- if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ u32 tags = round_up(tiles / fb->ram->parts, 0x40);
+ if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
else tile->zcomp = 0x04000000; /* Z24S8 */
tile->zcomp |= tile->tag->offset;
@@ -56,39 +57,39 @@ nv20_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
}
void
-nv20_fb_tile_fini(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv20_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
{
tile->addr = 0;
tile->limit = 0;
tile->pitch = 0;
tile->zcomp = 0;
- nvkm_mm_free(&pfb->tags, &tile->tag);
+ nvkm_mm_free(&fb->ram->tags, &tile->tag);
}
void
-nv20_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv20_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
{
- nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
- nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
- nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
- nv_rd32(pfb, 0x100240 + (i * 0x10));
- nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
+ struct nvkm_device *device = fb->subdev.device;
+ nvkm_wr32(device, 0x100244 + (i * 0x10), tile->limit);
+ nvkm_wr32(device, 0x100248 + (i * 0x10), tile->pitch);
+ nvkm_wr32(device, 0x100240 + (i * 0x10), tile->addr);
+ nvkm_rd32(device, 0x100240 + (i * 0x10));
+ nvkm_wr32(device, 0x100300 + (i * 0x04), tile->zcomp);
}
-struct nvkm_oclass *
-nv20_fb_oclass = &(struct nv04_fb_impl) {
- .base.base.handle = NV_SUBDEV(FB, 0x20),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_fb_ctor,
- .dtor = _nvkm_fb_dtor,
- .init = _nvkm_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .base.memtype = nv04_fb_memtype_valid,
- .base.ram = &nv20_ram_oclass,
+static const struct nvkm_fb_func
+nv20_fb = {
.tile.regions = 8,
.tile.init = nv20_fb_tile_init,
.tile.comp = nv20_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+ .ram_new = nv20_ram_new,
+ .memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv20_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return nvkm_fb_new_(&nv20_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
index bc9f54f38fba..c56746d2a502 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
@@ -23,15 +23,16 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
static void
-nv25_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv25_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
- u32 tags = round_up(tiles / pfb->ram->parts, 0x40);
- if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ u32 tags = round_up(tiles / fb->ram->parts, 0x40);
+ if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
else tile->zcomp = 0x00200000; /* Z24S8 */
tile->zcomp |= tile->tag->offset;
@@ -41,20 +42,19 @@ nv25_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
}
}
-struct nvkm_oclass *
-nv25_fb_oclass = &(struct nv04_fb_impl) {
- .base.base.handle = NV_SUBDEV(FB, 0x25),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_fb_ctor,
- .dtor = _nvkm_fb_dtor,
- .init = _nvkm_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .base.memtype = nv04_fb_memtype_valid,
- .base.ram = &nv20_ram_oclass,
+static const struct nvkm_fb_func
+nv25_fb = {
.tile.regions = 8,
.tile.init = nv20_fb_tile_init,
.tile.comp = nv25_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+ .ram_new = nv20_ram_new,
+ .memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv25_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return nvkm_fb_new_(&nv25_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
index 09ebb9477e00..2a7c4831b821 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
@@ -23,20 +23,19 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "nv04.h"
-
-#include <core/device.h>
+#include "priv.h"
+#include "ram.h"
void
-nv30_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+nv30_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nvkm_fb_tile *tile)
{
/* for performance, select alternate bank offset for zeta */
if (!(flags & 4)) {
tile->addr = (0 << 4);
} else {
- if (pfb->tile.comp) /* z compression */
- pfb->tile.comp(pfb, i, size, flags, tile);
+ if (fb->func->tile.comp) /* z compression */
+ fb->func->tile.comp(fb, i, size, flags, tile);
tile->addr = (1 << 4);
}
@@ -47,12 +46,12 @@ nv30_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
}
static void
-nv30_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv30_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
- u32 tags = round_up(tiles / pfb->ram->parts, 0x40);
- if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ u32 tags = round_up(tiles / fb->ram->parts, 0x40);
+ if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
else tile->zcomp |= 0x02000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -64,23 +63,24 @@ nv30_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
}
static int
-calc_bias(struct nv04_fb_priv *priv, int k, int i, int j)
+calc_bias(struct nvkm_fb *fb, int k, int i, int j)
{
- struct nvkm_device *device = nv_device(priv);
+ struct nvkm_device *device = fb->subdev.device;
int b = (device->chipset > 0x30 ?
- nv_rd32(priv, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) :
+ nvkm_rd32(device, 0x122c + 0x10 * k + 0x4 * j) >>
+ (4 * (i ^ 1)) :
0) & 0xf;
return 2 * (b & 0x8 ? b - 0x10 : b);
}
static int
-calc_ref(struct nv04_fb_priv *priv, int l, int k, int i)
+calc_ref(struct nvkm_fb *fb, int l, int k, int i)
{
int j, x = 0;
for (j = 0; j < 4; j++) {
- int m = (l >> (8 * i) & 0xff) + calc_bias(priv, k, i, j);
+ int m = (l >> (8 * i) & 0xff) + calc_bias(fb, k, i, j);
x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j);
}
@@ -88,16 +88,11 @@ calc_ref(struct nv04_fb_priv *priv, int l, int k, int i)
return x;
}
-int
-nv30_fb_init(struct nvkm_object *object)
+void
+nv30_fb_init(struct nvkm_fb *fb)
{
- struct nvkm_device *device = nv_device(object);
- struct nv04_fb_priv *priv = (void *)object;
- int ret, i, j;
-
- ret = nvkm_fb_init(&priv->base);
- if (ret)
- return ret;
+ struct nvkm_device *device = fb->subdev.device;
+ int i, j;
/* Init the memory timing regs at 0x10037c/0x1003ac */
if (device->chipset == 0x30 ||
@@ -105,36 +100,34 @@ nv30_fb_init(struct nvkm_object *object)
device->chipset == 0x35) {
/* Related to ROP count */
int n = (device->chipset == 0x31 ? 2 : 4);
- int l = nv_rd32(priv, 0x1003d0);
+ int l = nvkm_rd32(device, 0x1003d0);
for (i = 0; i < n; i++) {
for (j = 0; j < 3; j++)
- nv_wr32(priv, 0x10037c + 0xc * i + 0x4 * j,
- calc_ref(priv, l, 0, j));
+ nvkm_wr32(device, 0x10037c + 0xc * i + 0x4 * j,
+ calc_ref(fb, l, 0, j));
for (j = 0; j < 2; j++)
- nv_wr32(priv, 0x1003ac + 0x8 * i + 0x4 * j,
- calc_ref(priv, l, 1, j));
+ nvkm_wr32(device, 0x1003ac + 0x8 * i + 0x4 * j,
+ calc_ref(fb, l, 1, j));
}
}
-
- return 0;
}
-struct nvkm_oclass *
-nv30_fb_oclass = &(struct nv04_fb_impl) {
- .base.base.handle = NV_SUBDEV(FB, 0x30),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_fb_ctor,
- .dtor = _nvkm_fb_dtor,
- .init = nv30_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .base.memtype = nv04_fb_memtype_valid,
- .base.ram = &nv20_ram_oclass,
+static const struct nvkm_fb_func
+nv30_fb = {
+ .init = nv30_fb_init,
.tile.regions = 8,
.tile.init = nv30_fb_tile_init,
.tile.comp = nv30_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+ .ram_new = nv20_ram_new,
+ .memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv30_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return nvkm_fb_new_(&nv30_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
index c01dc1839ea4..1604b3789ad1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
@@ -23,15 +23,16 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
static void
-nv35_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv35_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
- u32 tags = round_up(tiles / pfb->ram->parts, 0x40);
- if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ u32 tags = round_up(tiles / fb->ram->parts, 0x40);
+ if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
else tile->zcomp |= 0x08000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -42,20 +43,20 @@ nv35_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
}
}
-struct nvkm_oclass *
-nv35_fb_oclass = &(struct nv04_fb_impl) {
- .base.base.handle = NV_SUBDEV(FB, 0x35),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_fb_ctor,
- .dtor = _nvkm_fb_dtor,
- .init = nv30_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .base.memtype = nv04_fb_memtype_valid,
- .base.ram = &nv20_ram_oclass,
+static const struct nvkm_fb_func
+nv35_fb = {
+ .init = nv30_fb_init,
.tile.regions = 8,
.tile.init = nv30_fb_tile_init,
.tile.comp = nv35_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+ .ram_new = nv20_ram_new,
+ .memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv35_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return nvkm_fb_new_(&nv35_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
index cad75a1cef22..80cc0a6e3416 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
@@ -23,15 +23,16 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
static void
-nv36_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv36_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
- u32 tags = round_up(tiles / pfb->ram->parts, 0x40);
- if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ u32 tags = round_up(tiles / fb->ram->parts, 0x40);
+ if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
else tile->zcomp |= 0x20000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -42,20 +43,20 @@ nv36_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
}
}
-struct nvkm_oclass *
-nv36_fb_oclass = &(struct nv04_fb_impl) {
- .base.base.handle = NV_SUBDEV(FB, 0x36),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_fb_ctor,
- .dtor = _nvkm_fb_dtor,
- .init = nv30_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .base.memtype = nv04_fb_memtype_valid,
- .base.ram = &nv20_ram_oclass,
+static const struct nvkm_fb_func
+nv36_fb = {
+ .init = nv30_fb_init,
.tile.regions = 8,
.tile.init = nv30_fb_tile_init,
.tile.comp = nv36_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+ .ram_new = nv20_ram_new,
+ .memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv36_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return nvkm_fb_new_(&nv36_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
index dbe5c1910c2c..deec46a310f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
@@ -23,16 +23,17 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
void
-nv40_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv40_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x80);
- u32 tags = round_up(tiles / pfb->ram->parts, 0x100);
+ u32 tags = round_up(tiles / fb->ram->parts, 0x100);
if ( (flags & 2) &&
- !nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ !nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */
tile->zcomp |= ((tile->tag->offset ) >> 8);
tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
@@ -42,34 +43,26 @@ nv40_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
}
}
-static int
-nv40_fb_init(struct nvkm_object *object)
+static void
+nv40_fb_init(struct nvkm_fb *fb)
{
- struct nv04_fb_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_fb_init(&priv->base);
- if (ret)
- return ret;
-
- nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
- return 0;
+ nvkm_mask(fb->subdev.device, 0x10033c, 0x00008000, 0x00000000);
}
-struct nvkm_oclass *
-nv40_fb_oclass = &(struct nv04_fb_impl) {
- .base.base.handle = NV_SUBDEV(FB, 0x40),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_fb_ctor,
- .dtor = _nvkm_fb_dtor,
- .init = nv40_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .base.memtype = nv04_fb_memtype_valid,
- .base.ram = &nv40_ram_oclass,
+static const struct nvkm_fb_func
+nv40_fb = {
+ .init = nv40_fb_init,
.tile.regions = 8,
.tile.init = nv30_fb_tile_init,
.tile.comp = nv40_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+ .ram_new = nv40_ram_new,
+ .memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv40_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return nvkm_fb_new_(&nv40_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h
deleted file mode 100644
index 602182661820..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef __NVKM_FB_NV40_H__
-#define __NVKM_FB_NV40_H__
-#include "priv.h"
-
-struct nv40_ram {
- struct nvkm_ram base;
- u32 ctrl;
- u32 coef;
-};
-
-int nv40_ram_calc(struct nvkm_fb *, u32);
-int nv40_ram_prog(struct nvkm_fb *);
-void nv40_ram_tidy(struct nvkm_fb *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
index d9e1a40a2955..79e57dd5a00f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
@@ -23,46 +23,40 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
void
-nv41_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv41_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
{
- nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
- nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
- nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
- nv_rd32(pfb, 0x100600 + (i * 0x10));
- nv_wr32(pfb, 0x100700 + (i * 0x04), tile->zcomp);
+ struct nvkm_device *device = fb->subdev.device;
+ nvkm_wr32(device, 0x100604 + (i * 0x10), tile->limit);
+ nvkm_wr32(device, 0x100608 + (i * 0x10), tile->pitch);
+ nvkm_wr32(device, 0x100600 + (i * 0x10), tile->addr);
+ nvkm_rd32(device, 0x100600 + (i * 0x10));
+ nvkm_wr32(device, 0x100700 + (i * 0x04), tile->zcomp);
}
-int
-nv41_fb_init(struct nvkm_object *object)
+void
+nv41_fb_init(struct nvkm_fb *fb)
{
- struct nv04_fb_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_fb_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x100800, 0x00000001);
- return 0;
+ nvkm_wr32(fb->subdev.device, 0x100800, 0x00000001);
}
-struct nvkm_oclass *
-nv41_fb_oclass = &(struct nv04_fb_impl) {
- .base.base.handle = NV_SUBDEV(FB, 0x41),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_fb_ctor,
- .dtor = _nvkm_fb_dtor,
- .init = nv41_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .base.memtype = nv04_fb_memtype_valid,
- .base.ram = &nv41_ram_oclass,
+static const struct nvkm_fb_func
+nv41_fb = {
+ .init = nv41_fb_init,
.tile.regions = 12,
.tile.init = nv30_fb_tile_init,
.tile.comp = nv40_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv41_fb_tile_prog,
-}.base.base;
+ .ram_new = nv41_ram_new,
+ .memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv41_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return nvkm_fb_new_(&nv41_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
index 20b97c83c4af..06246cce5ec4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
@@ -23,10 +23,11 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
static void
-nv44_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+nv44_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nvkm_fb_tile *tile)
{
tile->addr = 0x00000001; /* mode = vram */
@@ -36,42 +37,36 @@ nv44_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
}
void
-nv44_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv44_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
{
- nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
- nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
- nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
- nv_rd32(pfb, 0x100600 + (i * 0x10));
+ struct nvkm_device *device = fb->subdev.device;
+ nvkm_wr32(device, 0x100604 + (i * 0x10), tile->limit);
+ nvkm_wr32(device, 0x100608 + (i * 0x10), tile->pitch);
+ nvkm_wr32(device, 0x100600 + (i * 0x10), tile->addr);
+ nvkm_rd32(device, 0x100600 + (i * 0x10));
}
-int
-nv44_fb_init(struct nvkm_object *object)
+void
+nv44_fb_init(struct nvkm_fb *fb)
{
- struct nv04_fb_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_fb_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x100850, 0x80000000);
- nv_wr32(priv, 0x100800, 0x00000001);
- return 0;
+ struct nvkm_device *device = fb->subdev.device;
+ nvkm_wr32(device, 0x100850, 0x80000000);
+ nvkm_wr32(device, 0x100800, 0x00000001);
}
-struct nvkm_oclass *
-nv44_fb_oclass = &(struct nv04_fb_impl) {
- .base.base.handle = NV_SUBDEV(FB, 0x44),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_fb_ctor,
- .dtor = _nvkm_fb_dtor,
- .init = nv44_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .base.memtype = nv04_fb_memtype_valid,
- .base.ram = &nv44_ram_oclass,
+static const struct nvkm_fb_func
+nv44_fb = {
+ .init = nv44_fb_init,
.tile.regions = 12,
.tile.init = nv44_fb_tile_init,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv44_fb_tile_prog,
-}.base.base;
+ .ram_new = nv44_ram_new,
+ .memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv44_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return nvkm_fb_new_(&nv44_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
index 5bfac38cdf24..3598a1aa65be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
@@ -23,10 +23,11 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
void
-nv46_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+nv46_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nvkm_fb_tile *tile)
{
/* for performance, select alternate bank offset for zeta */
@@ -39,19 +40,19 @@ nv46_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
tile->pitch = pitch;
}
-struct nvkm_oclass *
-nv46_fb_oclass = &(struct nv04_fb_impl) {
- .base.base.handle = NV_SUBDEV(FB, 0x46),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_fb_ctor,
- .dtor = _nvkm_fb_dtor,
- .init = nv44_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .base.memtype = nv04_fb_memtype_valid,
- .base.ram = &nv44_ram_oclass,
+static const struct nvkm_fb_func
+nv46_fb = {
+ .init = nv44_fb_init,
.tile.regions = 15,
.tile.init = nv46_fb_tile_init,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv44_fb_tile_prog,
-}.base.base;
+ .ram_new = nv44_ram_new,
+ .memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv46_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return nvkm_fb_new_(&nv46_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
index d3b3988d1d49..c505e4429314 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
@@ -23,22 +23,23 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
-struct nvkm_oclass *
-nv47_fb_oclass = &(struct nv04_fb_impl) {
- .base.base.handle = NV_SUBDEV(FB, 0x47),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_fb_ctor,
- .dtor = _nvkm_fb_dtor,
- .init = nv41_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .base.memtype = nv04_fb_memtype_valid,
- .base.ram = &nv41_ram_oclass,
+static const struct nvkm_fb_func
+nv47_fb = {
+ .init = nv41_fb_init,
.tile.regions = 15,
.tile.init = nv30_fb_tile_init,
.tile.comp = nv40_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv41_fb_tile_prog,
-}.base.base;
+ .ram_new = nv41_ram_new,
+ .memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv47_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return nvkm_fb_new_(&nv47_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
index 236e36c5054e..7b91b9f170e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
@@ -23,22 +23,23 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
-struct nvkm_oclass *
-nv49_fb_oclass = &(struct nv04_fb_impl) {
- .base.base.handle = NV_SUBDEV(FB, 0x49),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_fb_ctor,
- .dtor = _nvkm_fb_dtor,
- .init = nv41_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .base.memtype = nv04_fb_memtype_valid,
- .base.ram = &nv49_ram_oclass,
+static const struct nvkm_fb_func
+nv49_fb = {
+ .init = nv41_fb_init,
.tile.regions = 15,
.tile.init = nv30_fb_tile_init,
.tile.comp = nv40_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv41_fb_tile_prog,
-}.base.base;
+ .ram_new = nv49_ram_new,
+ .memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv49_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return nvkm_fb_new_(&nv49_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
index 1352b6a73fb0..4e98210c1b1c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
@@ -23,21 +23,22 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
-struct nvkm_oclass *
-nv4e_fb_oclass = &(struct nv04_fb_impl) {
- .base.base.handle = NV_SUBDEV(FB, 0x4e),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_fb_ctor,
- .dtor = _nvkm_fb_dtor,
- .init = nv44_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .base.memtype = nv04_fb_memtype_valid,
- .base.ram = &nv4e_ram_oclass,
+static const struct nvkm_fb_func
+nv4e_fb = {
+ .init = nv44_fb_init,
.tile.regions = 12,
.tile.init = nv46_fb_tile_init,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv44_fb_tile_prog,
-}.base.base;
+ .ram_new = nv44_ram_new,
+ .memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv4e_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return nvkm_fb_new_(&nv4e_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index 0480ce52aa06..f5edfadb5b46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -22,11 +22,11 @@
* Authors: Ben Skeggs
*/
#include "nv50.h"
+#include "ram.h"
#include <core/client.h>
-#include <core/device.h>
-#include <core/engctx.h>
#include <core/enum.h>
+#include <engine/fifo.h>
int
nv50_fb_memtype[0x80] = {
@@ -40,130 +40,139 @@ nv50_fb_memtype[0x80] = {
1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
};
-bool
-nv50_fb_memtype_valid(struct nvkm_fb *pfb, u32 memtype)
+static int
+nv50_fb_ram_new(struct nvkm_fb *base, struct nvkm_ram **pram)
+{
+ struct nv50_fb *fb = nv50_fb(base);
+ return fb->func->ram_new(&fb->base, pram);
+}
+
+static bool
+nv50_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
{
return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0;
}
static const struct nvkm_enum vm_dispatch_subclients[] = {
- { 0x00000000, "GRCTX", NULL },
- { 0x00000001, "NOTIFY", NULL },
- { 0x00000002, "QUERY", NULL },
- { 0x00000003, "COND", NULL },
- { 0x00000004, "M2M_IN", NULL },
- { 0x00000005, "M2M_OUT", NULL },
- { 0x00000006, "M2M_NOTIFY", NULL },
+ { 0x00000000, "GRCTX" },
+ { 0x00000001, "NOTIFY" },
+ { 0x00000002, "QUERY" },
+ { 0x00000003, "COND" },
+ { 0x00000004, "M2M_IN" },
+ { 0x00000005, "M2M_OUT" },
+ { 0x00000006, "M2M_NOTIFY" },
{}
};
static const struct nvkm_enum vm_ccache_subclients[] = {
- { 0x00000000, "CB", NULL },
- { 0x00000001, "TIC", NULL },
- { 0x00000002, "TSC", NULL },
+ { 0x00000000, "CB" },
+ { 0x00000001, "TIC" },
+ { 0x00000002, "TSC" },
{}
};
static const struct nvkm_enum vm_prop_subclients[] = {
- { 0x00000000, "RT0", NULL },
- { 0x00000001, "RT1", NULL },
- { 0x00000002, "RT2", NULL },
- { 0x00000003, "RT3", NULL },
- { 0x00000004, "RT4", NULL },
- { 0x00000005, "RT5", NULL },
- { 0x00000006, "RT6", NULL },
- { 0x00000007, "RT7", NULL },
- { 0x00000008, "ZETA", NULL },
- { 0x00000009, "LOCAL", NULL },
- { 0x0000000a, "GLOBAL", NULL },
- { 0x0000000b, "STACK", NULL },
- { 0x0000000c, "DST2D", NULL },
+ { 0x00000000, "RT0" },
+ { 0x00000001, "RT1" },
+ { 0x00000002, "RT2" },
+ { 0x00000003, "RT3" },
+ { 0x00000004, "RT4" },
+ { 0x00000005, "RT5" },
+ { 0x00000006, "RT6" },
+ { 0x00000007, "RT7" },
+ { 0x00000008, "ZETA" },
+ { 0x00000009, "LOCAL" },
+ { 0x0000000a, "GLOBAL" },
+ { 0x0000000b, "STACK" },
+ { 0x0000000c, "DST2D" },
{}
};
static const struct nvkm_enum vm_pfifo_subclients[] = {
- { 0x00000000, "PUSHBUF", NULL },
- { 0x00000001, "SEMAPHORE", NULL },
+ { 0x00000000, "PUSHBUF" },
+ { 0x00000001, "SEMAPHORE" },
{}
};
static const struct nvkm_enum vm_bar_subclients[] = {
- { 0x00000000, "FB", NULL },
- { 0x00000001, "IN", NULL },
+ { 0x00000000, "FB" },
+ { 0x00000001, "IN" },
{}
};
static const struct nvkm_enum vm_client[] = {
- { 0x00000000, "STRMOUT", NULL },
+ { 0x00000000, "STRMOUT" },
{ 0x00000003, "DISPATCH", vm_dispatch_subclients },
- { 0x00000004, "PFIFO_WRITE", NULL },
+ { 0x00000004, "PFIFO_WRITE" },
{ 0x00000005, "CCACHE", vm_ccache_subclients },
- { 0x00000006, "PMSPPP", NULL },
- { 0x00000007, "CLIPID", NULL },
- { 0x00000008, "PFIFO_READ", NULL },
- { 0x00000009, "VFETCH", NULL },
- { 0x0000000a, "TEXTURE", NULL },
+ { 0x00000006, "PMSPPP" },
+ { 0x00000007, "CLIPID" },
+ { 0x00000008, "PFIFO_READ" },
+ { 0x00000009, "VFETCH" },
+ { 0x0000000a, "TEXTURE" },
{ 0x0000000b, "PROP", vm_prop_subclients },
- { 0x0000000c, "PVP", NULL },
- { 0x0000000d, "PBSP", NULL },
- { 0x0000000e, "PCRYPT", NULL },
- { 0x0000000f, "PCOUNTER", NULL },
- { 0x00000011, "PDAEMON", NULL },
+ { 0x0000000c, "PVP" },
+ { 0x0000000d, "PBSP" },
+ { 0x0000000e, "PCRYPT" },
+ { 0x0000000f, "PCOUNTER" },
+ { 0x00000011, "PDAEMON" },
{}
};
static const struct nvkm_enum vm_engine[] = {
- { 0x00000000, "PGRAPH", NULL, NVDEV_ENGINE_GR },
- { 0x00000001, "PVP", NULL, NVDEV_ENGINE_VP },
- { 0x00000004, "PEEPHOLE", NULL },
- { 0x00000005, "PFIFO", vm_pfifo_subclients, NVDEV_ENGINE_FIFO },
+ { 0x00000000, "PGRAPH" },
+ { 0x00000001, "PVP" },
+ { 0x00000004, "PEEPHOLE" },
+ { 0x00000005, "PFIFO", vm_pfifo_subclients },
{ 0x00000006, "BAR", vm_bar_subclients },
- { 0x00000008, "PMSPPP", NULL, NVDEV_ENGINE_MSPPP },
- { 0x00000008, "PMPEG", NULL, NVDEV_ENGINE_MPEG },
- { 0x00000009, "PBSP", NULL, NVDEV_ENGINE_BSP },
- { 0x0000000a, "PCRYPT", NULL, NVDEV_ENGINE_CIPHER },
- { 0x0000000b, "PCOUNTER", NULL },
- { 0x0000000c, "SEMAPHORE_BG", NULL },
- { 0x0000000d, "PCE0", NULL, NVDEV_ENGINE_CE0 },
- { 0x0000000e, "PDAEMON", NULL },
+ { 0x00000008, "PMSPPP" },
+ { 0x00000008, "PMPEG" },
+ { 0x00000009, "PBSP" },
+ { 0x0000000a, "PCRYPT" },
+ { 0x0000000b, "PCOUNTER" },
+ { 0x0000000c, "SEMAPHORE_BG" },
+ { 0x0000000d, "PCE0" },
+ { 0x0000000e, "PDAEMON" },
{}
};
static const struct nvkm_enum vm_fault[] = {
- { 0x00000000, "PT_NOT_PRESENT", NULL },
- { 0x00000001, "PT_TOO_SHORT", NULL },
- { 0x00000002, "PAGE_NOT_PRESENT", NULL },
- { 0x00000003, "PAGE_SYSTEM_ONLY", NULL },
- { 0x00000004, "PAGE_READ_ONLY", NULL },
- { 0x00000006, "NULL_DMAOBJ", NULL },
- { 0x00000007, "WRONG_MEMTYPE", NULL },
- { 0x0000000b, "VRAM_LIMIT", NULL },
- { 0x0000000f, "DMAOBJ_LIMIT", NULL },
+ { 0x00000000, "PT_NOT_PRESENT" },
+ { 0x00000001, "PT_TOO_SHORT" },
+ { 0x00000002, "PAGE_NOT_PRESENT" },
+ { 0x00000003, "PAGE_SYSTEM_ONLY" },
+ { 0x00000004, "PAGE_READ_ONLY" },
+ { 0x00000006, "NULL_DMAOBJ" },
+ { 0x00000007, "WRONG_MEMTYPE" },
+ { 0x0000000b, "VRAM_LIMIT" },
+ { 0x0000000f, "DMAOBJ_LIMIT" },
{}
};
static void
-nv50_fb_intr(struct nvkm_subdev *subdev)
+nv50_fb_intr(struct nvkm_fb *base)
{
- struct nvkm_device *device = nv_device(subdev);
- struct nvkm_engine *engine;
- struct nv50_fb_priv *priv = (void *)subdev;
- const struct nvkm_enum *en, *cl;
- struct nvkm_object *engctx = NULL;
- u32 trap[6], idx, chan;
+ struct nv50_fb *fb = nv50_fb(base);
+ struct nvkm_subdev *subdev = &fb->base.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_fifo *fifo = device->fifo;
+ struct nvkm_fifo_chan *chan;
+ const struct nvkm_enum *en, *re, *cl, *sc;
+ u32 trap[6], idx, inst;
u8 st0, st1, st2, st3;
+ unsigned long flags;
int i;
- idx = nv_rd32(priv, 0x100c90);
+ idx = nvkm_rd32(device, 0x100c90);
if (!(idx & 0x80000000))
return;
idx &= 0x00ffffff;
for (i = 0; i < 6; i++) {
- nv_wr32(priv, 0x100c90, idx | i << 24);
- trap[i] = nv_rd32(priv, 0x100c94);
+ nvkm_wr32(device, 0x100c90, idx | i << 24);
+ trap[i] = nvkm_rd32(device, 0x100c94);
}
- nv_wr32(priv, 0x100c90, idx | 0x80000000);
+ nvkm_wr32(device, 0x100c90, idx | 0x80000000);
/* decode status bits into something more useful */
if (device->chipset < 0xa3 ||
@@ -178,143 +187,103 @@ nv50_fb_intr(struct nvkm_subdev *subdev)
st2 = (trap[0] & 0x00ff0000) >> 16;
st3 = (trap[0] & 0xff000000) >> 24;
}
- chan = (trap[2] << 16) | trap[1];
+ inst = ((trap[2] << 16) | trap[1]) << 12;
en = nvkm_enum_find(vm_engine, st0);
-
- if (en && en->data2) {
- const struct nvkm_enum *orig_en = en;
- while (en->name && en->value == st0 && en->data2) {
- engine = nvkm_engine(subdev, en->data2);
- /*XXX: clean this up */
- if (!engine && en->data2 == NVDEV_ENGINE_BSP)
- engine = nvkm_engine(subdev, NVDEV_ENGINE_MSVLD);
- if (!engine && en->data2 == NVDEV_ENGINE_CIPHER)
- engine = nvkm_engine(subdev, NVDEV_ENGINE_SEC);
- if (!engine && en->data2 == NVDEV_ENGINE_VP)
- engine = nvkm_engine(subdev, NVDEV_ENGINE_MSPDEC);
- if (engine) {
- engctx = nvkm_engctx_get(engine, chan);
- if (engctx)
- break;
- }
- en++;
- }
- if (!engctx)
- en = orig_en;
- }
-
- nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x [%s] ",
- (trap[5] & 0x00000100) ? "read" : "write",
- trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan,
- nvkm_client_name(engctx));
-
- nvkm_engctx_put(engctx);
-
- if (en)
- pr_cont("%s/", en->name);
- else
- pr_cont("%02x/", st0);
-
+ re = nvkm_enum_find(vm_fault , st1);
cl = nvkm_enum_find(vm_client, st2);
- if (cl)
- pr_cont("%s/", cl->name);
- else
- pr_cont("%02x/", st2);
-
- if (cl && cl->data) cl = nvkm_enum_find(cl->data, st3);
- else if (en && en->data) cl = nvkm_enum_find(en->data, st3);
- else cl = NULL;
- if (cl)
- pr_cont("%s", cl->name);
- else
- pr_cont("%02x", st3);
-
- pr_cont(" reason: ");
- en = nvkm_enum_find(vm_fault, st1);
- if (en)
- pr_cont("%s\n", en->name);
- else
- pr_cont("0x%08x\n", st1);
+ if (cl && cl->data) sc = nvkm_enum_find(cl->data, st3);
+ else if (en && en->data) sc = nvkm_enum_find(en->data, st3);
+ else sc = NULL;
+
+ chan = nvkm_fifo_chan_inst(fifo, inst, &flags);
+ nvkm_error(subdev, "trapped %s at %02x%04x%04x on channel %d [%08x %s] "
+ "engine %02x [%s] client %02x [%s] "
+ "subclient %02x [%s] reason %08x [%s]\n",
+ (trap[5] & 0x00000100) ? "read" : "write",
+ trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff,
+ chan ? chan->chid : -1, inst,
+ chan ? chan->object.client->name : "unknown",
+ st0, en ? en->name : "",
+ st2, cl ? cl->name : "", st3, sc ? sc->name : "",
+ st1, re ? re->name : "");
+ nvkm_fifo_chan_put(fifo, flags, &chan);
}
-int
-nv50_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static void
+nv50_fb_init(struct nvkm_fb *base)
{
- struct nvkm_device *device = nv_device(parent);
- struct nv50_fb_priv *priv;
- int ret;
-
- ret = nvkm_fb_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ struct nv50_fb *fb = nv50_fb(base);
+ struct nvkm_device *device = fb->base.subdev.device;
- priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (priv->r100c08_page) {
- priv->r100c08 = dma_map_page(nv_device_base(device),
- priv->r100c08_page, 0, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(nv_device_base(device), priv->r100c08))
- return -EFAULT;
- } else {
- nv_warn(priv, "failed 0x100c08 page alloc\n");
- }
+ /* Not a clue what this is exactly. Without pointing it at a
+ * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
+ * cause IOMMU "read from address 0" errors (rh#561267)
+ */
+ nvkm_wr32(device, 0x100c08, fb->r100c08 >> 8);
- nv_subdev(priv)->intr = nv50_fb_intr;
- return 0;
+ /* This is needed to get meaningful information from 100c90
+ * on traps. No idea what these values mean exactly. */
+ nvkm_wr32(device, 0x100c90, fb->func->trap);
}
-void
-nv50_fb_dtor(struct nvkm_object *object)
+static void *
+nv50_fb_dtor(struct nvkm_fb *base)
{
- struct nvkm_device *device = nv_device(object);
- struct nv50_fb_priv *priv = (void *)object;
+ struct nv50_fb *fb = nv50_fb(base);
+ struct nvkm_device *device = fb->base.subdev.device;
- if (priv->r100c08_page) {
- dma_unmap_page(nv_device_base(device), priv->r100c08, PAGE_SIZE,
+ if (fb->r100c08_page) {
+ dma_unmap_page(device->dev, fb->r100c08, PAGE_SIZE,
DMA_BIDIRECTIONAL);
- __free_page(priv->r100c08_page);
+ __free_page(fb->r100c08_page);
}
- nvkm_fb_destroy(&priv->base);
+ return fb;
}
+static const struct nvkm_fb_func
+nv50_fb_ = {
+ .dtor = nv50_fb_dtor,
+ .init = nv50_fb_init,
+ .intr = nv50_fb_intr,
+ .ram_new = nv50_fb_ram_new,
+ .memtype_valid = nv50_fb_memtype_valid,
+};
+
int
-nv50_fb_init(struct nvkm_object *object)
+nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
+ int index, struct nvkm_fb **pfb)
{
- struct nv50_fb_impl *impl = (void *)object->oclass;
- struct nv50_fb_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_fb_init(&priv->base);
- if (ret)
- return ret;
-
- /* Not a clue what this is exactly. Without pointing it at a
- * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
- * cause IOMMU "read from address 0" errors (rh#561267)
- */
- nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
+ struct nv50_fb *fb;
+
+ if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_fb_ctor(&nv50_fb_, device, index, &fb->base);
+ fb->func = func;
+ *pfb = &fb->base;
+
+ fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (fb->r100c08_page) {
+ fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device->dev, fb->r100c08))
+ return -EFAULT;
+ } else {
+ nvkm_warn(&fb->base.subdev, "failed 100c08 page alloc\n");
+ }
- /* This is needed to get meaningful information from 100c90
- * on traps. No idea what these values mean exactly. */
- nv_wr32(priv, 0x100c90, impl->trap);
return 0;
}
-struct nvkm_oclass *
-nv50_fb_oclass = &(struct nv50_fb_impl) {
- .base.base.handle = NV_SUBDEV(FB, 0x50),
- .base.base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_fb_ctor,
- .dtor = nv50_fb_dtor,
- .init = nv50_fb_init,
- .fini = _nvkm_fb_fini,
- },
- .base.memtype = nv50_fb_memtype_valid,
- .base.ram = &nv50_ram_oclass,
+static const struct nv50_fb_func
+nv50_fb = {
+ .ram_new = nv50_ram_new,
.trap = 0x000707ff,
-}.base.base;
+};
+
+int
+nv50_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return nv50_fb_new_(&nv50_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
index f3cde3f1f511..faa88c8c66fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
@@ -1,31 +1,21 @@
#ifndef __NVKM_FB_NV50_H__
#define __NVKM_FB_NV50_H__
+#define nv50_fb(p) container_of((p), struct nv50_fb, base)
#include "priv.h"
-struct nv50_fb_priv {
+struct nv50_fb {
+ const struct nv50_fb_func *func;
struct nvkm_fb base;
struct page *r100c08_page;
dma_addr_t r100c08;
};
-int nv50_fb_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-void nv50_fb_dtor(struct nvkm_object *);
-int nv50_fb_init(struct nvkm_object *);
-
-struct nv50_fb_impl {
- struct nvkm_fb_impl base;
+struct nv50_fb_func {
+ int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
u32 trap;
};
-#define nv50_ram_create(p,e,o,d) \
- nv50_ram_create_((p), (e), (o), sizeof(**d), (void **)d)
-int nv50_ram_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, int, void **);
-int nv50_ram_get(struct nvkm_fb *, u64 size, u32 align, u32 ncmin,
- u32 memtype, struct nvkm_mem **);
-void nv50_ram_put(struct nvkm_fb *, struct nvkm_mem **);
-void __nv50_ram_put(struct nvkm_fb *, struct nvkm_mem *);
+int nv50_fb_new_(const struct nv50_fb_func *, struct nvkm_device *, int index,
+ struct nvkm_fb **pfb);
extern int nv50_fb_memtype[0x80];
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 485c4b64819a..62b9feb531dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -1,73 +1,62 @@
#ifndef __NVKM_FB_PRIV_H__
#define __NVKM_FB_PRIV_H__
+#define nvkm_fb(p) container_of((p), struct nvkm_fb, subdev)
#include <subdev/fb.h>
struct nvkm_bios;
-#define nvkm_ram_create(p,e,o,d) \
- nvkm_object_create_((p), (e), (o), 0, sizeof(**d), (void **)d)
-#define nvkm_ram_destroy(p) \
- nvkm_object_destroy(&(p)->base)
-#define nvkm_ram_init(p) \
- nvkm_object_init(&(p)->base)
-#define nvkm_ram_fini(p,s) \
- nvkm_object_fini(&(p)->base, (s))
+struct nvkm_fb_func {
+ void *(*dtor)(struct nvkm_fb *);
+ void (*init)(struct nvkm_fb *);
+ void (*intr)(struct nvkm_fb *);
-#define nvkm_ram_create_(p,e,o,s,d) \
- nvkm_object_create_((p), (e), (o), 0, (s), (void **)d)
-#define _nvkm_ram_dtor nvkm_object_destroy
-#define _nvkm_ram_init nvkm_object_init
-#define _nvkm_ram_fini nvkm_object_fini
+ struct {
+ int regions;
+ void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nvkm_fb_tile *);
+ void (*comp)(struct nvkm_fb *, int i, u32 size, u32 flags,
+ struct nvkm_fb_tile *);
+ void (*fini)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
+ void (*prog)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
+ } tile;
-extern struct nvkm_oclass nv04_ram_oclass;
-extern struct nvkm_oclass nv10_ram_oclass;
-extern struct nvkm_oclass nv1a_ram_oclass;
-extern struct nvkm_oclass nv20_ram_oclass;
-extern struct nvkm_oclass nv40_ram_oclass;
-extern struct nvkm_oclass nv41_ram_oclass;
-extern struct nvkm_oclass nv44_ram_oclass;
-extern struct nvkm_oclass nv49_ram_oclass;
-extern struct nvkm_oclass nv4e_ram_oclass;
-extern struct nvkm_oclass nv50_ram_oclass;
-extern struct nvkm_oclass gt215_ram_oclass;
-extern struct nvkm_oclass mcp77_ram_oclass;
-extern struct nvkm_oclass gf100_ram_oclass;
-extern struct nvkm_oclass gk104_ram_oclass;
-extern struct nvkm_oclass gm107_ram_oclass;
+ int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
-int nvkm_sddr2_calc(struct nvkm_ram *ram);
-int nvkm_sddr3_calc(struct nvkm_ram *ram);
-int nvkm_gddr3_calc(struct nvkm_ram *ram);
-int nvkm_gddr5_calc(struct nvkm_ram *ram, bool nuts);
+ bool (*memtype_valid)(struct nvkm_fb *, u32 memtype);
+};
-#define nvkm_fb_create(p,e,c,d) \
- nvkm_fb_create_((p), (e), (c), sizeof(**d), (void **)d)
-#define nvkm_fb_destroy(p) ({ \
- struct nvkm_fb *pfb = (p); \
- _nvkm_fb_dtor(nv_object(pfb)); \
-})
-#define nvkm_fb_init(p) ({ \
- struct nvkm_fb *pfb = (p); \
- _nvkm_fb_init(nv_object(pfb)); \
-})
-#define nvkm_fb_fini(p,s) ({ \
- struct nvkm_fb *pfb = (p); \
- _nvkm_fb_fini(nv_object(pfb), (s)); \
-})
+void nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device,
+ int index, struct nvkm_fb *);
+int nvkm_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *device,
+ int index, struct nvkm_fb **);
+int nvkm_fb_bios_memtype(struct nvkm_bios *);
-int nvkm_fb_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, int, void **);
-void _nvkm_fb_dtor(struct nvkm_object *);
-int _nvkm_fb_init(struct nvkm_object *);
-int _nvkm_fb_fini(struct nvkm_object *, bool);
+bool nv04_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
-struct nvkm_fb_impl {
- struct nvkm_oclass base;
- struct nvkm_oclass *ram;
- bool (*memtype)(struct nvkm_fb *, u32);
-};
+void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nvkm_fb_tile *);
+void nv10_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
+void nv10_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
-bool nv04_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
-bool nv50_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
+void nv20_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nvkm_fb_tile *);
+void nv20_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
+void nv20_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
+
+void nv30_fb_init(struct nvkm_fb *);
+void nv30_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nvkm_fb_tile *);
+
+void nv40_fb_tile_comp(struct nvkm_fb *, int i, u32 size, u32 flags,
+ struct nvkm_fb_tile *);
+
+void nv41_fb_init(struct nvkm_fb *);
+void nv41_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
+
+void nv44_fb_init(struct nvkm_fb *);
+void nv44_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
+
+void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nvkm_fb_tile *);
-int nvkm_fb_bios_memtype(struct nvkm_bios *);
+bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
new file mode 100644
index 000000000000..c17d559dbfbe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "ram.h"
+
+int
+nvkm_ram_init(struct nvkm_ram *ram)
+{
+ if (ram->func->init)
+ return ram->func->init(ram);
+ return 0;
+}
+
+void
+nvkm_ram_del(struct nvkm_ram **pram)
+{
+ struct nvkm_ram *ram = *pram;
+ if (ram && !WARN_ON(!ram->func)) {
+ if (ram->func->dtor)
+ *pram = ram->func->dtor(ram);
+ nvkm_mm_fini(&ram->tags);
+ nvkm_mm_fini(&ram->vram);
+ kfree(*pram);
+ *pram = NULL;
+ }
+}
+
+int
+nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
+ enum nvkm_ram_type type, u64 size, u32 tags,
+ struct nvkm_ram *ram)
+{
+ static const char *name[] = {
+ [NVKM_RAM_TYPE_UNKNOWN] = "of unknown memory type",
+ [NVKM_RAM_TYPE_STOLEN ] = "stolen system memory",
+ [NVKM_RAM_TYPE_SGRAM ] = "SGRAM",
+ [NVKM_RAM_TYPE_SDRAM ] = "SDRAM",
+ [NVKM_RAM_TYPE_DDR1 ] = "DDR1",
+ [NVKM_RAM_TYPE_DDR2 ] = "DDR2",
+ [NVKM_RAM_TYPE_DDR3 ] = "DDR3",
+ [NVKM_RAM_TYPE_GDDR2 ] = "GDDR2",
+ [NVKM_RAM_TYPE_GDDR3 ] = "GDDR3",
+ [NVKM_RAM_TYPE_GDDR4 ] = "GDDR4",
+ [NVKM_RAM_TYPE_GDDR5 ] = "GDDR5",
+ };
+ struct nvkm_subdev *subdev = &fb->subdev;
+ int ret;
+
+ nvkm_info(subdev, "%d MiB %s\n", (int)(size >> 20), name[type]);
+ ram->func = func;
+ ram->fb = fb;
+ ram->type = type;
+ ram->size = size;
+
+ if (!nvkm_mm_initialised(&ram->vram)) {
+ ret = nvkm_mm_init(&ram->vram, 0, size >> NVKM_RAM_MM_SHIFT, 1);
+ if (ret)
+ return ret;
+ }
+
+ if (!nvkm_mm_initialised(&ram->tags)) {
+ ret = nvkm_mm_init(&ram->tags, 0, tags ? ++tags : 0, 1);
+ if (ret)
+ return ret;
+
+ nvkm_debug(subdev, "%d compression tags\n", tags);
+ }
+
+ return 0;
+}
+
+int
+nvkm_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
+ enum nvkm_ram_type type, u64 size, u32 tags,
+ struct nvkm_ram **pram)
+{
+ if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
+ return -ENOMEM;
+ return nvkm_ram_ctor(func, fb, type, size, tags, *pram);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
new file mode 100644
index 000000000000..f816cbf2ced3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -0,0 +1,50 @@
+#ifndef __NVKM_FB_RAM_PRIV_H__
+#define __NVKM_FB_RAM_PRIV_H__
+#include "priv.h"
+
+int nvkm_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
+ enum nvkm_ram_type, u64 size, u32 tags,
+ struct nvkm_ram *);
+int nvkm_ram_new_(const struct nvkm_ram_func *, struct nvkm_fb *,
+ enum nvkm_ram_type, u64 size, u32 tags,
+ struct nvkm_ram **);
+void nvkm_ram_del(struct nvkm_ram **);
+int nvkm_ram_init(struct nvkm_ram *);
+
+extern const struct nvkm_ram_func nv04_ram_func;
+
+int nv50_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
+ struct nvkm_ram *);
+int nv50_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
+void nv50_ram_put(struct nvkm_ram *, struct nvkm_mem **);
+void __nv50_ram_put(struct nvkm_ram *, struct nvkm_mem *);
+
+int gf100_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
+ u32, struct nvkm_ram *);
+int gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
+void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **);
+
+int gk104_ram_init(struct nvkm_ram *ram);
+
+/* RAM type-specific MR calculation routines */
+int nvkm_sddr2_calc(struct nvkm_ram *);
+int nvkm_sddr3_calc(struct nvkm_ram *);
+int nvkm_gddr3_calc(struct nvkm_ram *);
+int nvkm_gddr5_calc(struct nvkm_ram *, bool nuts);
+
+int nv04_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv10_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv1a_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv20_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv40_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv41_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv44_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv49_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv4e_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv50_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gt215_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int mcp77_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
index f343682b1387..9ef9d6aa3721 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
@@ -1,10 +1,11 @@
#ifndef __NVKM_FBRAM_FUC_H__
#define __NVKM_FBRAM_FUC_H__
+#include <subdev/fb.h>
#include <subdev/pmu.h>
struct ramfuc {
struct nvkm_memx *memx;
- struct nvkm_fb *pfb;
+ struct nvkm_fb *fb;
int sequence;
};
@@ -54,17 +55,14 @@ ramfuc_reg(u32 addr)
}
static inline int
-ramfuc_init(struct ramfuc *ram, struct nvkm_fb *pfb)
+ramfuc_init(struct ramfuc *ram, struct nvkm_fb *fb)
{
- struct nvkm_pmu *pmu = nvkm_pmu(pfb);
- int ret;
-
- ret = nvkm_memx_init(pmu, &ram->memx);
+ int ret = nvkm_memx_init(fb->subdev.device->pmu, &ram->memx);
if (ret)
return ret;
ram->sequence++;
- ram->pfb = pfb;
+ ram->fb = fb;
return 0;
}
@@ -72,9 +70,9 @@ static inline int
ramfuc_exec(struct ramfuc *ram, bool exec)
{
int ret = 0;
- if (ram->pfb) {
+ if (ram->fb) {
ret = nvkm_memx_fini(&ram->memx, exec);
- ram->pfb = NULL;
+ ram->fb = NULL;
}
return ret;
}
@@ -82,8 +80,9 @@ ramfuc_exec(struct ramfuc *ram, bool exec)
static inline u32
ramfuc_rd32(struct ramfuc *ram, struct ramfuc_reg *reg)
{
+ struct nvkm_device *device = ram->fb->subdev.device;
if (reg->sequence != ram->sequence)
- reg->data = nv_rd32(ram->pfb, reg->addr);
+ reg->data = nvkm_rd32(device, reg->addr);
return reg->data;
}
@@ -144,11 +143,9 @@ ramfuc_train(struct ramfuc *ram)
}
static inline int
-ramfuc_train_result(struct nvkm_fb *pfb, u32 *result, u32 rsize)
+ramfuc_train_result(struct nvkm_fb *fb, u32 *result, u32 rsize)
{
- struct nvkm_pmu *pmu = nvkm_pmu(pfb);
-
- return nvkm_memx_train_result(pmu, result, rsize);
+ return nvkm_memx_train_result(fb->subdev.device->pmu, result, rsize);
}
static inline void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index de9f39569943..772425ca5a9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -21,10 +21,10 @@
*
* Authors: Ben Skeggs
*/
-#include "gf100.h"
+#define gf100_ram(p) container_of((p), struct gf100_ram, base)
+#include "ram.h"
#include "ramfuc.h"
-#include <core/device.h>
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
@@ -108,9 +108,10 @@ static void
gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
{
struct gf100_ram *ram = container_of(fuc, typeof(*ram), fuc);
- struct nvkm_fb *pfb = nvkm_fb(ram);
- u32 part = nv_rd32(pfb, 0x022438), i;
- u32 mask = nv_rd32(pfb, 0x022554);
+ struct nvkm_fb *fb = ram->base.fb;
+ struct nvkm_device *device = fb->subdev.device;
+ u32 part = nvkm_rd32(device, 0x022438), i;
+ u32 mask = nvkm_rd32(device, 0x022554);
u32 addr = 0x110974;
ram_wr32(fuc, 0x10f910, magic);
@@ -124,12 +125,14 @@ gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
}
static int
-gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
+gf100_ram_calc(struct nvkm_ram *base, u32 freq)
{
- struct nvkm_clk *clk = nvkm_clk(pfb);
- struct nvkm_bios *bios = nvkm_bios(pfb);
- struct gf100_ram *ram = (void *)pfb->ram;
+ struct gf100_ram *ram = gf100_ram(base);
struct gf100_ramfuc *fuc = &ram->fuc;
+ struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_clk *clk = device->clk;
+ struct nvkm_bios *bios = device->bios;
struct nvbios_ramcfg cfg;
u8 ver, cnt, len, strap;
struct {
@@ -145,37 +148,37 @@ gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size,
&cnt, &ramcfg.size, &cfg);
if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
- nv_error(pfb, "invalid/missing rammap entry\n");
+ nvkm_error(subdev, "invalid/missing rammap entry\n");
return -EINVAL;
}
/* locate specific data set for the attached memory */
- strap = nvbios_ramcfg_index(nv_subdev(pfb));
+ strap = nvbios_ramcfg_index(subdev);
if (strap >= cnt) {
- nv_error(pfb, "invalid ramcfg strap\n");
+ nvkm_error(subdev, "invalid ramcfg strap\n");
return -EINVAL;
}
ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size);
if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) {
- nv_error(pfb, "invalid/missing ramcfg entry\n");
+ nvkm_error(subdev, "invalid/missing ramcfg entry\n");
return -EINVAL;
}
/* lookup memory timings, if bios says they're present */
- strap = nv_ro08(bios, ramcfg.data + 0x01);
+ strap = nvbios_rd08(bios, ramcfg.data + 0x01);
if (strap != 0xff) {
timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size,
&cnt, &len);
if (!timing.data || ver != 0x10 || timing.size < 0x19) {
- nv_error(pfb, "invalid/missing timing entry\n");
+ nvkm_error(subdev, "invalid/missing timing entry\n");
return -EINVAL;
}
} else {
timing.data = 0;
}
- ret = ram_init(fuc, pfb);
+ ret = ram_init(fuc, ram->base.fb);
if (ret)
return ret;
@@ -184,9 +187,9 @@ gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
/* determine target mclk configuration */
if (!(ram_rd32(fuc, 0x137300) & 0x00000100))
- ref = clk->read(clk, nv_clk_src_sppll0);
+ ref = nvkm_clk_read(clk, nv_clk_src_sppll0);
else
- ref = clk->read(clk, nv_clk_src_sppll1);
+ ref = nvkm_clk_read(clk, nv_clk_src_sppll1);
div = max(min((ref * 2) / freq, (u32)65), (u32)2) - 2;
out = (ref * 2) / (div + 2);
mode = freq != out;
@@ -210,10 +213,10 @@ gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
if (mode == 1 && from == 0) {
/* calculate refpll */
- ret = gt215_pll_calc(nv_subdev(pfb), &ram->refpll,
- ram->mempll.refclk, &N1, NULL, &M1, &P);
+ ret = gt215_pll_calc(subdev, &ram->refpll, ram->mempll.refclk,
+ &N1, NULL, &M1, &P);
if (ret <= 0) {
- nv_error(pfb, "unable to calc refpll\n");
+ nvkm_error(subdev, "unable to calc refpll\n");
return ret ? ret : -ERANGE;
}
@@ -225,10 +228,10 @@ gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
/* calculate mempll */
- ret = gt215_pll_calc(nv_subdev(pfb), &ram->mempll, freq,
+ ret = gt215_pll_calc(subdev, &ram->mempll, freq,
&N1, NULL, &M1, &P);
if (ret <= 0) {
- nv_error(pfb, "unable to calc refpll\n");
+ nvkm_error(subdev, "unable to calc refpll\n");
return ret ? ret : -ERANGE;
}
@@ -402,49 +405,48 @@ gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
}
static int
-gf100_ram_prog(struct nvkm_fb *pfb)
+gf100_ram_prog(struct nvkm_ram *base)
{
- struct nvkm_device *device = nv_device(pfb);
- struct gf100_ram *ram = (void *)pfb->ram;
- struct gf100_ramfuc *fuc = &ram->fuc;
- ram_exec(fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
+ struct gf100_ram *ram = gf100_ram(base);
+ struct nvkm_device *device = ram->base.fb->subdev.device;
+ ram_exec(&ram->fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
return 0;
}
static void
-gf100_ram_tidy(struct nvkm_fb *pfb)
+gf100_ram_tidy(struct nvkm_ram *base)
{
- struct gf100_ram *ram = (void *)pfb->ram;
- struct gf100_ramfuc *fuc = &ram->fuc;
- ram_exec(fuc, false);
+ struct gf100_ram *ram = gf100_ram(base);
+ ram_exec(&ram->fuc, false);
}
extern const u8 gf100_pte_storage_type_map[256];
void
-gf100_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem)
+gf100_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
{
- struct nvkm_ltc *ltc = nvkm_ltc(pfb);
+ struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
struct nvkm_mem *mem = *pmem;
*pmem = NULL;
if (unlikely(mem == NULL))
return;
- mutex_lock(&pfb->base.mutex);
+ mutex_lock(&ram->fb->subdev.mutex);
if (mem->tag)
- ltc->tags_free(ltc, &mem->tag);
- __nv50_ram_put(pfb, mem);
- mutex_unlock(&pfb->base.mutex);
+ nvkm_ltc_tags_free(ltc, &mem->tag);
+ __nv50_ram_put(ram, mem);
+ mutex_unlock(&ram->fb->subdev.mutex);
kfree(mem);
}
int
-gf100_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
+gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
u32 memtype, struct nvkm_mem **pmem)
{
- struct nvkm_mm *mm = &pfb->vram;
+ struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
+ struct nvkm_mm *mm = &ram->vram;
struct nvkm_mm_node *r;
struct nvkm_mem *mem;
int type = (memtype & 0x0ff);
@@ -452,9 +454,9 @@ gf100_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
const bool comp = gf100_pte_storage_type_map[type] != type;
int ret;
- size >>= 12;
- align >>= 12;
- ncmin >>= 12;
+ size >>= NVKM_RAM_MM_SHIFT;
+ align >>= NVKM_RAM_MM_SHIFT;
+ ncmin >>= NVKM_RAM_MM_SHIFT;
if (!ncmin)
ncmin = size;
@@ -465,14 +467,12 @@ gf100_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
INIT_LIST_HEAD(&mem->regions);
mem->size = size;
- mutex_lock(&pfb->base.mutex);
+ mutex_lock(&ram->fb->subdev.mutex);
if (comp) {
- struct nvkm_ltc *ltc = nvkm_ltc(pfb);
-
/* compression only works with lpages */
- if (align == (1 << (17 - 12))) {
+ if (align == (1 << (17 - NVKM_RAM_MM_SHIFT))) {
int n = size >> 5;
- ltc->tags_alloc(ltc, n, &mem->tag);
+ nvkm_ltc_tags_alloc(ltc, n, &mem->tag);
}
if (unlikely(!mem->tag))
@@ -486,178 +486,173 @@ gf100_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
else
ret = nvkm_mm_head(mm, 0, 1, size, ncmin, align, &r);
if (ret) {
- mutex_unlock(&pfb->base.mutex);
- pfb->ram->put(pfb, &mem);
+ mutex_unlock(&ram->fb->subdev.mutex);
+ ram->func->put(ram, &mem);
return ret;
}
list_add_tail(&r->rl_entry, &mem->regions);
size -= r->length;
} while (size);
- mutex_unlock(&pfb->base.mutex);
+ mutex_unlock(&ram->fb->subdev.mutex);
r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
- mem->offset = (u64)r->offset << 12;
+ mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
*pmem = mem;
return 0;
}
-int
-gf100_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, u32 maskaddr, int size,
- void **pobject)
+static int
+gf100_ram_init(struct nvkm_ram *base)
{
- struct nvkm_fb *pfb = nvkm_fb(parent);
- struct nvkm_bios *bios = nvkm_bios(pfb);
- struct nvkm_ram *ram;
- const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
- const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
- u32 parts = nv_rd32(pfb, 0x022438);
- u32 pmask = nv_rd32(pfb, maskaddr);
- u32 bsize = nv_rd32(pfb, 0x10f20c);
- u32 offset, length;
- bool uniform = true;
- int ret, part;
+ static const u8 train0[] = {
+ 0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
+ 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
+ };
+ static const u32 train1[] = {
+ 0x00000000, 0xffffffff,
+ 0x55555555, 0xaaaaaaaa,
+ 0x33333333, 0xcccccccc,
+ 0xf0f0f0f0, 0x0f0f0f0f,
+ 0x00ff00ff, 0xff00ff00,
+ 0x0000ffff, 0xffff0000,
+ };
+ struct gf100_ram *ram = gf100_ram(base);
+ struct nvkm_device *device = ram->base.fb->subdev.device;
+ int i;
- ret = nvkm_ram_create_(parent, engine, oclass, size, pobject);
- ram = *pobject;
- if (ret)
- return ret;
+ switch (ram->base.type) {
+ case NVKM_RAM_TYPE_GDDR5:
+ break;
+ default:
+ return 0;
+ }
- nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800));
- nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask);
+ /* prepare for ddr link training, and load training patterns */
+ for (i = 0; i < 0x30; i++) {
+ nvkm_wr32(device, 0x10f968, 0x00000000 | (i << 8));
+ nvkm_wr32(device, 0x10f96c, 0x00000000 | (i << 8));
+ nvkm_wr32(device, 0x10f920, 0x00000100 | train0[i % 12]);
+ nvkm_wr32(device, 0x10f924, 0x00000100 | train0[i % 12]);
+ nvkm_wr32(device, 0x10f918, train1[i % 12]);
+ nvkm_wr32(device, 0x10f91c, train1[i % 12]);
+ nvkm_wr32(device, 0x10f920, 0x00000000 | train0[i % 12]);
+ nvkm_wr32(device, 0x10f924, 0x00000000 | train0[i % 12]);
+ nvkm_wr32(device, 0x10f918, train1[i % 12]);
+ nvkm_wr32(device, 0x10f91c, train1[i % 12]);
+ }
- ram->type = nvkm_fb_bios_memtype(bios);
- ram->ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1;
+ return 0;
+}
+
+static const struct nvkm_ram_func
+gf100_ram_func = {
+ .init = gf100_ram_init,
+ .get = gf100_ram_get,
+ .put = gf100_ram_put,
+ .calc = gf100_ram_calc,
+ .prog = gf100_ram_prog,
+ .tidy = gf100_ram_tidy,
+};
+
+int
+gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
+ u32 maskaddr, struct nvkm_ram *ram)
+{
+ struct nvkm_subdev *subdev = &fb->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_bios *bios = device->bios;
+ const u32 rsvd_head = ( 256 * 1024); /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
+ u32 parts = nvkm_rd32(device, 0x022438);
+ u32 pmask = nvkm_rd32(device, maskaddr);
+ u64 bsize = (u64)nvkm_rd32(device, 0x10f20c) << 20;
+ u64 psize, size = 0;
+ enum nvkm_ram_type type = nvkm_fb_bios_memtype(bios);
+ bool uniform = true;
+ int ret, i;
+
+ nvkm_debug(subdev, "100800: %08x\n", nvkm_rd32(device, 0x100800));
+ nvkm_debug(subdev, "parts %08x mask %08x\n", parts, pmask);
/* read amount of vram attached to each memory controller */
- for (part = 0; part < parts; part++) {
- if (!(pmask & (1 << part))) {
- u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000));
- if (psize != bsize) {
- if (psize < bsize)
- bsize = psize;
- uniform = false;
- }
-
- nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize);
- ram->size += (u64)psize << 20;
+ for (i = 0; i < parts; i++) {
+ if (pmask & (1 << i))
+ continue;
+
+ psize = (u64)nvkm_rd32(device, 0x11020c + (i * 0x1000)) << 20;
+ if (psize != bsize) {
+ if (psize < bsize)
+ bsize = psize;
+ uniform = false;
}
+
+ nvkm_debug(subdev, "%d: %d MiB\n", i, (u32)(psize >> 20));
+ size += psize;
}
+ ret = nvkm_ram_ctor(func, fb, type, size, 0, ram);
+ if (ret)
+ return ret;
+
+ nvkm_mm_fini(&ram->vram);
+
/* if all controllers have the same amount attached, there's no holes */
if (uniform) {
- offset = rsvd_head;
- length = (ram->size >> 12) - rsvd_head - rsvd_tail;
- ret = nvkm_mm_init(&pfb->vram, offset, length, 1);
+ ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+ (size - rsvd_head - rsvd_tail) >>
+ NVKM_RAM_MM_SHIFT, 1);
+ if (ret)
+ return ret;
} else {
/* otherwise, address lowest common amount from 0GiB */
- ret = nvkm_mm_init(&pfb->vram, rsvd_head,
- (bsize << 8) * parts - rsvd_head, 1);
+ ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+ ((bsize * parts) - rsvd_head) >>
+ NVKM_RAM_MM_SHIFT, 1);
if (ret)
return ret;
/* and the rest starting from (8GiB + common_size) */
- offset = (0x0200000000ULL >> 12) + (bsize << 8);
- length = (ram->size >> 12) - ((bsize * parts) << 8) - rsvd_tail;
-
- ret = nvkm_mm_init(&pfb->vram, offset, length, 1);
+ ret = nvkm_mm_init(&ram->vram, (0x0200000000ULL + bsize) >>
+ NVKM_RAM_MM_SHIFT,
+ (size - (bsize * parts) - rsvd_tail) >>
+ NVKM_RAM_MM_SHIFT, 1);
if (ret)
- nvkm_mm_fini(&pfb->vram);
- }
-
- if (ret)
- return ret;
-
- ram->get = gf100_ram_get;
- ram->put = gf100_ram_put;
- return 0;
-}
-
-static int
-gf100_ram_init(struct nvkm_object *object)
-{
- struct nvkm_fb *pfb = (void *)object->parent;
- struct gf100_ram *ram = (void *)object;
- int ret, i;
-
- ret = nvkm_ram_init(&ram->base);
- if (ret)
- return ret;
-
- /* prepare for ddr link training, and load training patterns */
- switch (ram->base.type) {
- case NV_MEM_TYPE_GDDR5: {
- static const u8 train0[] = {
- 0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
- 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
- };
- static const u32 train1[] = {
- 0x00000000, 0xffffffff,
- 0x55555555, 0xaaaaaaaa,
- 0x33333333, 0xcccccccc,
- 0xf0f0f0f0, 0x0f0f0f0f,
- 0x00ff00ff, 0xff00ff00,
- 0x0000ffff, 0xffff0000,
- };
-
- for (i = 0; i < 0x30; i++) {
- nv_wr32(pfb, 0x10f968, 0x00000000 | (i << 8));
- nv_wr32(pfb, 0x10f96c, 0x00000000 | (i << 8));
- nv_wr32(pfb, 0x10f920, 0x00000100 | train0[i % 12]);
- nv_wr32(pfb, 0x10f924, 0x00000100 | train0[i % 12]);
- nv_wr32(pfb, 0x10f918, train1[i % 12]);
- nv_wr32(pfb, 0x10f91c, train1[i % 12]);
- nv_wr32(pfb, 0x10f920, 0x00000000 | train0[i % 12]);
- nv_wr32(pfb, 0x10f924, 0x00000000 | train0[i % 12]);
- nv_wr32(pfb, 0x10f918, train1[i % 12]);
- nv_wr32(pfb, 0x10f91c, train1[i % 12]);
- }
- } break;
- default:
- break;
+ return ret;
}
+ ram->ranks = (nvkm_rd32(device, 0x10f200) & 0x00000004) ? 2 : 1;
return 0;
}
-static int
-gf100_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
- struct nvkm_bios *bios = nvkm_bios(parent);
+ struct nvkm_subdev *subdev = &fb->subdev;
+ struct nvkm_bios *bios = subdev->device->bios;
struct gf100_ram *ram;
int ret;
- ret = gf100_ram_create(parent, engine, oclass, 0x022554, &ram);
- *pobject = nv_object(ram);
+ if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+ return -ENOMEM;
+ *pram = &ram->base;
+
+ ret = gf100_ram_ctor(&gf100_ram_func, fb, 0x022554, &ram->base);
if (ret)
return ret;
ret = nvbios_pll_parse(bios, 0x0c, &ram->refpll);
if (ret) {
- nv_error(ram, "mclk refpll data not found\n");
+ nvkm_error(subdev, "mclk refpll data not found\n");
return ret;
}
ret = nvbios_pll_parse(bios, 0x04, &ram->mempll);
if (ret) {
- nv_error(ram, "mclk pll data not found\n");
+ nvkm_error(subdev, "mclk pll data not found\n");
return ret;
}
- switch (ram->base.type) {
- case NV_MEM_TYPE_GDDR5:
- ram->base.calc = gf100_ram_calc;
- ram->base.prog = gf100_ram_prog;
- ram->base.tidy = gf100_ram_tidy;
- break;
- default:
- nv_warn(ram, "reclocking of this ram type unsupported\n");
- return 0;
- }
-
ram->fuc.r_0x10fe20 = ramfuc_reg(0x10fe20);
ram->fuc.r_0x10fe24 = ramfuc_reg(0x10fe24);
ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
@@ -718,14 +713,3 @@ gf100_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4);
return 0;
}
-
-struct nvkm_oclass
-gf100_ram_oclass = {
- .handle = 0,
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_ram_ctor,
- .dtor = _nvkm_ram_dtor,
- .init = gf100_ram_init,
- .fini = _nvkm_ram_fini,
- }
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 1ef15c3e6a81..989355622aac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -21,10 +21,10 @@
*
* Authors: Ben Skeggs
*/
+#define gk104_ram(p) container_of((p), struct gk104_ram, base)
+#include "ram.h"
#include "ramfuc.h"
-#include "gf100.h"
-#include <core/device.h>
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/init.h>
@@ -229,8 +229,9 @@ static void
gk104_ram_nuts(struct gk104_ram *ram, struct ramfuc_reg *reg,
u32 _mask, u32 _data, u32 _copy)
{
- struct gk104_fb_priv *priv = (void *)nvkm_fb(ram);
+ struct nvkm_fb *fb = ram->base.fb;
struct ramfuc *fuc = &ram->fuc.base;
+ struct nvkm_device *device = fb->subdev.device;
u32 addr = 0x110000 + (reg->addr & 0xfff);
u32 mask = _mask | _copy;
u32 data = (_data & _mask) | (reg->data & _copy);
@@ -238,7 +239,7 @@ gk104_ram_nuts(struct gk104_ram *ram, struct ramfuc_reg *reg,
for (i = 0; i < 16; i++, addr += 0x1000) {
if (ram->pnuts & (1 << i)) {
- u32 prev = nv_rd32(priv, addr);
+ u32 prev = nvkm_rd32(device, addr);
u32 next = (prev & ~mask) | data;
nvkm_memx_wr32(fuc->memx, addr, next);
}
@@ -248,9 +249,8 @@ gk104_ram_nuts(struct gk104_ram *ram, struct ramfuc_reg *reg,
gk104_ram_nuts((s), &(s)->fuc.r_##r, (m), (d), (c))
static int
-gk104_ram_calc_gddr5(struct nvkm_fb *pfb, u32 freq)
+gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
{
- struct gk104_ram *ram = (void *)pfb->ram;
struct gk104_ramfuc *fuc = &ram->fuc;
struct nvkm_ram_data *next = ram->base.next;
int vc = !next->bios.ramcfg_11_02_08;
@@ -674,9 +674,8 @@ gk104_ram_calc_gddr5(struct nvkm_fb *pfb, u32 freq)
******************************************************************************/
static int
-gk104_ram_calc_sddr3(struct nvkm_fb *pfb, u32 freq)
+gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
{
- struct gk104_ram *ram = (void *)pfb->ram;
struct gk104_ramfuc *fuc = &ram->fuc;
const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
const u32 runk0 = ram->fN1 << 16;
@@ -926,9 +925,9 @@ gk104_ram_calc_sddr3(struct nvkm_fb *pfb, u32 freq)
******************************************************************************/
static int
-gk104_ram_calc_data(struct nvkm_fb *pfb, u32 khz, struct nvkm_ram_data *data)
+gk104_ram_calc_data(struct gk104_ram *ram, u32 khz, struct nvkm_ram_data *data)
{
- struct gk104_ram *ram = (void *)pfb->ram;
+ struct nvkm_subdev *subdev = &ram->base.fb->subdev;
struct nvkm_ram_data *cfg;
u32 mhz = khz / 1000;
@@ -941,19 +940,19 @@ gk104_ram_calc_data(struct nvkm_fb *pfb, u32 khz, struct nvkm_ram_data *data)
}
}
- nv_error(ram, "ramcfg data for %dMHz not found\n", mhz);
+ nvkm_error(subdev, "ramcfg data for %dMHz not found\n", mhz);
return -EINVAL;
}
static int
-gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
+gk104_ram_calc_xits(struct gk104_ram *ram, struct nvkm_ram_data *next)
{
- struct gk104_ram *ram = (void *)pfb->ram;
struct gk104_ramfuc *fuc = &ram->fuc;
+ struct nvkm_subdev *subdev = &ram->base.fb->subdev;
int refclk, i;
int ret;
- ret = ram_init(fuc, pfb);
+ ret = ram_init(fuc, ram->base.fb);
if (ret)
return ret;
@@ -973,11 +972,11 @@ gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
refclk = fuc->mempll.refclk;
/* calculate refpll coefficients */
- ret = gt215_pll_calc(nv_subdev(pfb), &fuc->refpll, refclk, &ram->N1,
+ ret = gt215_pll_calc(subdev, &fuc->refpll, refclk, &ram->N1,
&ram->fN1, &ram->M1, &ram->P1);
fuc->mempll.refclk = ret;
if (ret <= 0) {
- nv_error(pfb, "unable to calc refpll\n");
+ nvkm_error(subdev, "unable to calc refpll\n");
return -EINVAL;
}
@@ -990,10 +989,10 @@ gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
fuc->mempll.min_p = 1;
fuc->mempll.max_p = 2;
- ret = gt215_pll_calc(nv_subdev(pfb), &fuc->mempll, next->freq,
+ ret = gt215_pll_calc(subdev, &fuc->mempll, next->freq,
&ram->N2, NULL, &ram->M2, &ram->P2);
if (ret <= 0) {
- nv_error(pfb, "unable to calc mempll\n");
+ nvkm_error(subdev, "unable to calc mempll\n");
return -EINVAL;
}
}
@@ -1005,15 +1004,15 @@ gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
ram->base.freq = next->freq;
switch (ram->base.type) {
- case NV_MEM_TYPE_DDR3:
+ case NVKM_RAM_TYPE_DDR3:
ret = nvkm_sddr3_calc(&ram->base);
if (ret == 0)
- ret = gk104_ram_calc_sddr3(pfb, next->freq);
+ ret = gk104_ram_calc_sddr3(ram, next->freq);
break;
- case NV_MEM_TYPE_GDDR5:
+ case NVKM_RAM_TYPE_GDDR5:
ret = nvkm_gddr5_calc(&ram->base, ram->pnuts != 0);
if (ret == 0)
- ret = gk104_ram_calc_gddr5(pfb, next->freq);
+ ret = gk104_ram_calc_gddr5(ram, next->freq);
break;
default:
ret = -ENOSYS;
@@ -1024,21 +1023,22 @@ gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
}
static int
-gk104_ram_calc(struct nvkm_fb *pfb, u32 freq)
+gk104_ram_calc(struct nvkm_ram *base, u32 freq)
{
- struct nvkm_clk *clk = nvkm_clk(pfb);
- struct gk104_ram *ram = (void *)pfb->ram;
+ struct gk104_ram *ram = gk104_ram(base);
+ struct nvkm_clk *clk = ram->base.fb->subdev.device->clk;
struct nvkm_ram_data *xits = &ram->base.xition;
struct nvkm_ram_data *copy;
int ret;
if (ram->base.next == NULL) {
- ret = gk104_ram_calc_data(pfb, clk->read(clk, nv_clk_src_mem),
+ ret = gk104_ram_calc_data(ram,
+ nvkm_clk_read(clk, nv_clk_src_mem),
&ram->base.former);
if (ret)
return ret;
- ret = gk104_ram_calc_data(pfb, freq, &ram->base.target);
+ ret = gk104_ram_calc_data(ram, freq, &ram->base.target);
if (ret)
return ret;
@@ -1062,13 +1062,13 @@ gk104_ram_calc(struct nvkm_fb *pfb, u32 freq)
ram->base.next = &ram->base.target;
}
- return gk104_ram_calc_xits(pfb, ram->base.next);
+ return gk104_ram_calc_xits(ram, ram->base.next);
}
static void
-gk104_ram_prog_0(struct nvkm_fb *pfb, u32 freq)
+gk104_ram_prog_0(struct gk104_ram *ram, u32 freq)
{
- struct gk104_ram *ram = (void *)pfb->ram;
+ struct nvkm_device *device = ram->base.fb->subdev.device;
struct nvkm_ram_data *cfg;
u32 mhz = freq / 1000;
u32 mask, data;
@@ -1090,31 +1090,31 @@ gk104_ram_prog_0(struct nvkm_fb *pfb, u32 freq)
data |= cfg->bios.rammap_11_09_01ff;
mask |= 0x000001ff;
}
- nv_mask(pfb, 0x10f468, mask, data);
+ nvkm_mask(device, 0x10f468, mask, data);
if (mask = 0, data = 0, ram->diff.rammap_11_0a_0400) {
data |= cfg->bios.rammap_11_0a_0400;
mask |= 0x00000001;
}
- nv_mask(pfb, 0x10f420, mask, data);
+ nvkm_mask(device, 0x10f420, mask, data);
if (mask = 0, data = 0, ram->diff.rammap_11_0a_0800) {
data |= cfg->bios.rammap_11_0a_0800;
mask |= 0x00000001;
}
- nv_mask(pfb, 0x10f430, mask, data);
+ nvkm_mask(device, 0x10f430, mask, data);
if (mask = 0, data = 0, ram->diff.rammap_11_0b_01f0) {
data |= cfg->bios.rammap_11_0b_01f0;
mask |= 0x0000001f;
}
- nv_mask(pfb, 0x10f400, mask, data);
+ nvkm_mask(device, 0x10f400, mask, data);
if (mask = 0, data = 0, ram->diff.rammap_11_0b_0200) {
data |= cfg->bios.rammap_11_0b_0200 << 9;
mask |= 0x00000200;
}
- nv_mask(pfb, 0x10f410, mask, data);
+ nvkm_mask(device, 0x10f410, mask, data);
if (mask = 0, data = 0, ram->diff.rammap_11_0d) {
data |= cfg->bios.rammap_11_0d << 16;
@@ -1124,7 +1124,7 @@ gk104_ram_prog_0(struct nvkm_fb *pfb, u32 freq)
data |= cfg->bios.rammap_11_0f << 8;
mask |= 0x0000ff00;
}
- nv_mask(pfb, 0x10f440, mask, data);
+ nvkm_mask(device, 0x10f440, mask, data);
if (mask = 0, data = 0, ram->diff.rammap_11_0e) {
data |= cfg->bios.rammap_11_0e << 8;
@@ -1138,15 +1138,15 @@ gk104_ram_prog_0(struct nvkm_fb *pfb, u32 freq)
data |= cfg->bios.rammap_11_0b_0400 << 5;
mask |= 0x00000020;
}
- nv_mask(pfb, 0x10f444, mask, data);
+ nvkm_mask(device, 0x10f444, mask, data);
}
static int
-gk104_ram_prog(struct nvkm_fb *pfb)
+gk104_ram_prog(struct nvkm_ram *base)
{
- struct nvkm_device *device = nv_device(pfb);
- struct gk104_ram *ram = (void *)pfb->ram;
+ struct gk104_ram *ram = gk104_ram(base);
struct gk104_ramfuc *fuc = &ram->fuc;
+ struct nvkm_device *device = ram->base.fb->subdev.device;
struct nvkm_ram_data *next = ram->base.next;
if (!nvkm_boolopt(device->cfgopt, "NvMemExec", true)) {
@@ -1154,20 +1154,19 @@ gk104_ram_prog(struct nvkm_fb *pfb)
return (ram->base.next == &ram->base.xition);
}
- gk104_ram_prog_0(pfb, 1000);
+ gk104_ram_prog_0(ram, 1000);
ram_exec(fuc, true);
- gk104_ram_prog_0(pfb, next->freq);
+ gk104_ram_prog_0(ram, next->freq);
return (ram->base.next == &ram->base.xition);
}
static void
-gk104_ram_tidy(struct nvkm_fb *pfb)
+gk104_ram_tidy(struct nvkm_ram *base)
{
- struct gk104_ram *ram = (void *)pfb->ram;
- struct gk104_ramfuc *fuc = &ram->fuc;
+ struct gk104_ram *ram = gk104_ram(base);
ram->base.next = NULL;
- ram_exec(fuc, false);
+ ram_exec(&ram->fuc, false);
}
struct gk104_ram_train {
@@ -1183,10 +1182,10 @@ struct gk104_ram_train {
};
static int
-gk104_ram_train_type(struct nvkm_fb *pfb, int i, u8 ramcfg,
+gk104_ram_train_type(struct nvkm_ram *ram, int i, u8 ramcfg,
struct gk104_ram_train *train)
{
- struct nvkm_bios *bios = nvkm_bios(pfb);
+ struct nvkm_bios *bios = ram->fb->subdev.device->bios;
struct nvbios_M0205E M0205E;
struct nvbios_M0205S M0205S;
struct nvbios_M0209E M0209E;
@@ -1244,33 +1243,35 @@ gk104_ram_train_type(struct nvkm_fb *pfb, int i, u8 ramcfg,
}
static int
-gk104_ram_train_init_0(struct nvkm_fb *pfb, struct gk104_ram_train *train)
+gk104_ram_train_init_0(struct nvkm_ram *ram, struct gk104_ram_train *train)
{
+ struct nvkm_subdev *subdev = &ram->fb->subdev;
+ struct nvkm_device *device = subdev->device;
int i, j;
if ((train->mask & 0x03d3) != 0x03d3) {
- nv_warn(pfb, "missing link training data\n");
+ nvkm_warn(subdev, "missing link training data\n");
return -EINVAL;
}
for (i = 0; i < 0x30; i++) {
for (j = 0; j < 8; j += 4) {
- nv_wr32(pfb, 0x10f968 + j, 0x00000000 | (i << 8));
- nv_wr32(pfb, 0x10f920 + j, 0x00000000 |
+ nvkm_wr32(device, 0x10f968 + j, 0x00000000 | (i << 8));
+ nvkm_wr32(device, 0x10f920 + j, 0x00000000 |
train->type08.data[i] << 4 |
train->type06.data[i]);
- nv_wr32(pfb, 0x10f918 + j, train->type00.data[i]);
- nv_wr32(pfb, 0x10f920 + j, 0x00000100 |
+ nvkm_wr32(device, 0x10f918 + j, train->type00.data[i]);
+ nvkm_wr32(device, 0x10f920 + j, 0x00000100 |
train->type09.data[i] << 4 |
train->type07.data[i]);
- nv_wr32(pfb, 0x10f918 + j, train->type01.data[i]);
+ nvkm_wr32(device, 0x10f918 + j, train->type01.data[i]);
}
}
for (j = 0; j < 8; j += 4) {
for (i = 0; i < 0x100; i++) {
- nv_wr32(pfb, 0x10f968 + j, i);
- nv_wr32(pfb, 0x10f900 + j, train->type04.data[i]);
+ nvkm_wr32(device, 0x10f968 + j, i);
+ nvkm_wr32(device, 0x10f900 + j, train->type04.data[i]);
}
}
@@ -1278,23 +1279,24 @@ gk104_ram_train_init_0(struct nvkm_fb *pfb, struct gk104_ram_train *train)
}
static int
-gk104_ram_train_init(struct nvkm_fb *pfb)
+gk104_ram_train_init(struct nvkm_ram *ram)
{
- u8 ramcfg = nvbios_ramcfg_index(nv_subdev(pfb));
+ u8 ramcfg = nvbios_ramcfg_index(&ram->fb->subdev);
struct gk104_ram_train *train;
- int ret = -ENOMEM, i;
+ int ret, i;
- if ((train = kzalloc(sizeof(*train), GFP_KERNEL))) {
- for (i = 0; i < 0x100; i++) {
- ret = gk104_ram_train_type(pfb, i, ramcfg, train);
- if (ret && ret != -ENOENT)
- break;
- }
+ if (!(train = kzalloc(sizeof(*train), GFP_KERNEL)))
+ return -ENOMEM;
+
+ for (i = 0; i < 0x100; i++) {
+ ret = gk104_ram_train_type(ram, i, ramcfg, train);
+ if (ret && ret != -ENOENT)
+ break;
}
- switch (pfb->ram->type) {
- case NV_MEM_TYPE_GDDR5:
- ret = gk104_ram_train_init_0(pfb, train);
+ switch (ram->type) {
+ case NVKM_RAM_TYPE_GDDR5:
+ ret = gk104_ram_train_init_0(ram, train);
break;
default:
ret = 0;
@@ -1306,18 +1308,14 @@ gk104_ram_train_init(struct nvkm_fb *pfb)
}
int
-gk104_ram_init(struct nvkm_object *object)
+gk104_ram_init(struct nvkm_ram *ram)
{
- struct nvkm_fb *pfb = (void *)object->parent;
- struct gk104_ram *ram = (void *)object;
- struct nvkm_bios *bios = nvkm_bios(pfb);
+ struct nvkm_subdev *subdev = &ram->fb->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_bios *bios = device->bios;
u8 ver, hdr, cnt, len, snr, ssz;
u32 data, save;
- int ret, i;
-
- ret = nvkm_ram_init(&ram->base);
- if (ret)
- return ret;
+ int i;
/* run a bunch of tables from rammap table. there's actually
* individual pointers for each rammap entry too, but, nvidia
@@ -1334,33 +1332,32 @@ gk104_ram_init(struct nvkm_object *object)
if (!data || hdr < 0x15)
return -EINVAL;
- cnt = nv_ro08(bios, data + 0x14); /* guess at count */
- data = nv_ro32(bios, data + 0x10); /* guess u32... */
- save = nv_rd32(pfb, 0x10f65c) & 0x000000f0;
+ cnt = nvbios_rd08(bios, data + 0x14); /* guess at count */
+ data = nvbios_rd32(bios, data + 0x10); /* guess u32... */
+ save = nvkm_rd32(device, 0x10f65c) & 0x000000f0;
for (i = 0; i < cnt; i++, data += 4) {
if (i != save >> 4) {
- nv_mask(pfb, 0x10f65c, 0x000000f0, i << 4);
+ nvkm_mask(device, 0x10f65c, 0x000000f0, i << 4);
nvbios_exec(&(struct nvbios_init) {
- .subdev = nv_subdev(pfb),
+ .subdev = subdev,
.bios = bios,
- .offset = nv_ro32(bios, data),
+ .offset = nvbios_rd32(bios, data),
.execute = 1,
});
}
}
- nv_mask(pfb, 0x10f65c, 0x000000f0, save);
- nv_mask(pfb, 0x10f584, 0x11000000, 0x00000000);
- nv_wr32(pfb, 0x10ecc0, 0xffffffff);
- nv_mask(pfb, 0x10f160, 0x00000010, 0x00000010);
+ nvkm_mask(device, 0x10f65c, 0x000000f0, save);
+ nvkm_mask(device, 0x10f584, 0x11000000, 0x00000000);
+ nvkm_wr32(device, 0x10ecc0, 0xffffffff);
+ nvkm_mask(device, 0x10f160, 0x00000010, 0x00000010);
- return gk104_ram_train_init(pfb);
+ return gk104_ram_train_init(ram);
}
static int
gk104_ram_ctor_data(struct gk104_ram *ram, u8 ramcfg, int i)
{
- struct nvkm_fb *pfb = (void *)nv_object(ram)->parent;
- struct nvkm_bios *bios = nvkm_bios(pfb);
+ struct nvkm_bios *bios = ram->base.fb->subdev.device->bios;
struct nvkm_ram_data *cfg;
struct nvbios_ramcfg *d = &ram->diff;
struct nvbios_ramcfg *p, *n;
@@ -1426,63 +1423,64 @@ done:
return ret;
}
-static void
-gk104_ram_dtor(struct nvkm_object *object)
+static void *
+gk104_ram_dtor(struct nvkm_ram *base)
{
- struct gk104_ram *ram = (void *)object;
+ struct gk104_ram *ram = gk104_ram(base);
struct nvkm_ram_data *cfg, *tmp;
list_for_each_entry_safe(cfg, tmp, &ram->cfg, head) {
kfree(cfg);
}
- nvkm_ram_destroy(&ram->base);
+ return ram;
}
-static int
-gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static const struct nvkm_ram_func
+gk104_ram_func = {
+ .dtor = gk104_ram_dtor,
+ .init = gk104_ram_init,
+ .get = gf100_ram_get,
+ .put = gf100_ram_put,
+ .calc = gk104_ram_calc,
+ .prog = gk104_ram_prog,
+ .tidy = gk104_ram_tidy,
+};
+
+int
+gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
- struct nvkm_fb *pfb = nvkm_fb(parent);
- struct nvkm_bios *bios = nvkm_bios(pfb);
- struct nvkm_gpio *gpio = nvkm_gpio(pfb);
+ struct nvkm_subdev *subdev = &fb->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_bios *bios = device->bios;
+ struct nvkm_gpio *gpio = device->gpio;
struct dcb_gpio_func func;
struct gk104_ram *ram;
int ret, i;
- u8 ramcfg = nvbios_ramcfg_index(nv_subdev(pfb));
+ u8 ramcfg = nvbios_ramcfg_index(subdev);
u32 tmp;
- ret = gf100_ram_create(parent, engine, oclass, 0x022554, &ram);
- *pobject = nv_object(ram);
+ if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+ return -ENOMEM;
+ *pram = &ram->base;
+
+ ret = gf100_ram_ctor(&gk104_ram_func, fb, 0x022554, &ram->base);
if (ret)
return ret;
INIT_LIST_HEAD(&ram->cfg);
- switch (ram->base.type) {
- case NV_MEM_TYPE_DDR3:
- case NV_MEM_TYPE_GDDR5:
- ram->base.calc = gk104_ram_calc;
- ram->base.prog = gk104_ram_prog;
- ram->base.tidy = gk104_ram_tidy;
- break;
- default:
- nv_warn(pfb, "reclocking of this RAM type is unsupported\n");
- break;
- }
-
/* calculate a mask of differently configured memory partitions,
* because, of course reclocking wasn't complicated enough
* already without having to treat some of them differently to
* the others....
*/
- ram->parts = nv_rd32(pfb, 0x022438);
- ram->pmask = nv_rd32(pfb, 0x022554);
+ ram->parts = nvkm_rd32(device, 0x022438);
+ ram->pmask = nvkm_rd32(device, 0x022554);
ram->pnuts = 0;
for (i = 0, tmp = 0; i < ram->parts; i++) {
if (!(ram->pmask & (1 << i))) {
- u32 cfg1 = nv_rd32(pfb, 0x110204 + (i * 0x1000));
+ u32 cfg1 = nvkm_rd32(device, 0x110204 + (i * 0x1000));
if (tmp && tmp != cfg1) {
ram->pnuts |= (1 << i);
continue;
@@ -1505,7 +1503,7 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
for (i = 0; !ret; i++) {
ret = gk104_ram_ctor_data(ram, ramcfg, i);
if (ret && ret != -ENOENT) {
- nv_error(pfb, "failed to parse ramcfg data\n");
+ nvkm_error(subdev, "failed to parse ramcfg data\n");
return ret;
}
}
@@ -1513,25 +1511,25 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
/* parse bios data for both pll's */
ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll);
if (ret) {
- nv_error(pfb, "mclk refpll data not found\n");
+ nvkm_error(subdev, "mclk refpll data not found\n");
return ret;
}
ret = nvbios_pll_parse(bios, 0x04, &ram->fuc.mempll);
if (ret) {
- nv_error(pfb, "mclk pll data not found\n");
+ nvkm_error(subdev, "mclk pll data not found\n");
return ret;
}
/* lookup memory voltage gpios */
- ret = gpio->find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func);
+ ret = nvkm_gpio_find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func);
if (ret == 0) {
ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (func.line * 0x04));
ram->fuc.r_funcMV[0] = (func.log[0] ^ 2) << 12;
ram->fuc.r_funcMV[1] = (func.log[1] ^ 2) << 12;
}
- ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+ ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
if (ret == 0) {
ram->fuc.r_gpio2E = ramfuc_reg(0x00d610 + (func.line * 0x04));
ram->fuc.r_func2E[0] = (func.log[0] ^ 2) << 12;
@@ -1588,7 +1586,7 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
switch (ram->base.type) {
- case NV_MEM_TYPE_GDDR5:
+ case NVKM_RAM_TYPE_GDDR5:
ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
ram->fuc.r_mr[1] = ramfuc_reg(0x10f330);
ram->fuc.r_mr[2] = ramfuc_reg(0x10f334);
@@ -1600,7 +1598,7 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
ram->fuc.r_mr[8] = ramfuc_reg(0x10f354);
ram->fuc.r_mr[15] = ramfuc_reg(0x10f34c);
break;
- case NV_MEM_TYPE_DDR3:
+ case NVKM_RAM_TYPE_DDR3:
ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
ram->fuc.r_mr[2] = ramfuc_reg(0x10f320);
break;
@@ -1626,14 +1624,3 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
ram->fuc.r_0x100750 = ramfuc_reg(0x100750);
return 0;
}
-
-struct nvkm_oclass
-gk104_ram_oclass = {
- .handle = 0,
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk104_ram_ctor,
- .dtor = gk104_ram_dtor,
- .init = gk104_ram_init,
- .fini = _nvkm_ram_fini,
- }
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index a298b39f55c5..43d807f6ca71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -21,35 +21,20 @@
*
* Authors: Ben Skeggs
*/
-#include "gf100.h"
+#include "ram.h"
-struct gm107_ram {
- struct nvkm_ram base;
+static const struct nvkm_ram_func
+gm107_ram_func = {
+ .init = gk104_ram_init,
+ .get = gf100_ram_get,
+ .put = gf100_ram_put,
};
-static int
-gm107_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+gm107_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
- struct gm107_ram *ram;
- int ret;
+ if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
+ return -ENOMEM;
- ret = gf100_ram_create(parent, engine, oclass, 0x021c14, &ram);
- *pobject = nv_object(ram);
- if (ret)
- return ret;
-
- return 0;
+ return gf100_ram_ctor(&gm107_ram_func, fb, 0x021c14, *pram);
}
-
-struct nvkm_oclass
-gm107_ram_oclass = {
- .handle = 0,
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gm107_ram_ctor,
- .dtor = _nvkm_ram_dtor,
- .init = gk104_ram_init,
- .fini = _nvkm_ram_fini,
- }
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
index 24176401b49b..5c08ae8023fa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
@@ -22,11 +22,10 @@
* Authors: Ben Skeggs
* Roy Spliet <rspliet@eclipso.eu>
*/
-
+#define gt215_ram(p) container_of((p), struct gt215_ram, base)
+#include "ram.h"
#include "ramfuc.h"
-#include "nv50.h"
-#include <core/device.h>
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/M0205.h>
@@ -154,14 +153,14 @@ gt215_link_train_calc(u32 *vals, struct gt215_ltrain *train)
* Link training for (at least) DDR3
*/
int
-gt215_link_train(struct nvkm_fb *pfb)
+gt215_link_train(struct gt215_ram *ram)
{
- struct nvkm_bios *bios = nvkm_bios(pfb);
- struct gt215_ram *ram = (void *)pfb->ram;
- struct nvkm_clk *clk = nvkm_clk(pfb);
struct gt215_ltrain *train = &ram->ltrain;
- struct nvkm_device *device = nv_device(pfb);
struct gt215_ramfuc *fuc = &ram->fuc;
+ struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_bios *bios = device->bios;
+ struct nvkm_clk *clk = device->clk;
u32 *result, r1700;
int ret, i;
struct nvbios_M0205T M0205T = { 0 };
@@ -182,27 +181,29 @@ gt215_link_train(struct nvkm_fb *pfb)
/* Clock speeds for training and back */
nvbios_M0205Tp(bios, &ver, &hdr, &cnt, &len, &snr, &ssz, &M0205T);
- if (M0205T.freq == 0)
+ if (M0205T.freq == 0) {
+ kfree(result);
return -ENOENT;
+ }
- clk_current = clk->read(clk, nv_clk_src_mem);
+ clk_current = nvkm_clk_read(clk, nv_clk_src_mem);
ret = gt215_clk_pre(clk, f);
if (ret)
goto out;
/* First: clock up/down */
- ret = ram->base.calc(pfb, (u32) M0205T.freq * 1000);
+ ret = ram->base.func->calc(&ram->base, (u32) M0205T.freq * 1000);
if (ret)
goto out;
/* Do this *after* calc, eliminates write in script */
- nv_wr32(pfb, 0x111400, 0x00000000);
+ nvkm_wr32(device, 0x111400, 0x00000000);
/* XXX: Magic writes that improve train reliability? */
- nv_mask(pfb, 0x100674, 0x0000ffff, 0x00000000);
- nv_mask(pfb, 0x1005e4, 0x0000ffff, 0x00000000);
- nv_mask(pfb, 0x100b0c, 0x000000ff, 0x00000000);
- nv_wr32(pfb, 0x100c04, 0x00000400);
+ nvkm_mask(device, 0x100674, 0x0000ffff, 0x00000000);
+ nvkm_mask(device, 0x1005e4, 0x0000ffff, 0x00000000);
+ nvkm_mask(device, 0x100b0c, 0x000000ff, 0x00000000);
+ nvkm_wr32(device, 0x100c04, 0x00000400);
/* Now the training script */
r1700 = ram_rd32(fuc, 0x001700);
@@ -235,22 +236,22 @@ gt215_link_train(struct nvkm_fb *pfb)
ram_exec(fuc, true);
- ram->base.calc(pfb, clk_current);
+ ram->base.func->calc(&ram->base, clk_current);
ram_exec(fuc, true);
/* Post-processing, avoids flicker */
- nv_mask(pfb, 0x616308, 0x10, 0x10);
- nv_mask(pfb, 0x616b08, 0x10, 0x10);
+ nvkm_mask(device, 0x616308, 0x10, 0x10);
+ nvkm_mask(device, 0x616b08, 0x10, 0x10);
gt215_clk_post(clk, f);
- ram_train_result(pfb, result, 64);
+ ram_train_result(ram->base.fb, result, 64);
for (i = 0; i < 64; i++)
- nv_debug(pfb, "Train: %08x", result[i]);
+ nvkm_debug(subdev, "Train: %08x", result[i]);
gt215_link_train_calc(result, train);
- nv_debug(pfb, "Train: %08x %08x %08x", train->r_100720,
- train->r_1111e0, train->r_111400);
+ nvkm_debug(subdev, "Train: %08x %08x %08x", train->r_100720,
+ train->r_1111e0, train->r_111400);
kfree(result);
@@ -265,11 +266,12 @@ out:
train->state = NVA3_TRAIN_UNSUPPORTED;
gt215_clk_post(clk, f);
+ kfree(result);
return ret;
}
int
-gt215_link_train_init(struct nvkm_fb *pfb)
+gt215_link_train_init(struct gt215_ram *ram)
{
static const u32 pattern[16] = {
0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
@@ -277,9 +279,9 @@ gt215_link_train_init(struct nvkm_fb *pfb)
0x33333333, 0x55555555, 0x77777777, 0x66666666,
0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
};
- struct nvkm_bios *bios = nvkm_bios(pfb);
- struct gt215_ram *ram = (void *)pfb->ram;
struct gt215_ltrain *train = &ram->ltrain;
+ struct nvkm_device *device = ram->base.fb->subdev.device;
+ struct nvkm_bios *bios = device->bios;
struct nvkm_mem *mem;
struct nvbios_M0205E M0205E;
u8 ver, hdr, cnt, len;
@@ -298,48 +300,47 @@ gt215_link_train_init(struct nvkm_fb *pfb)
train->state = NVA3_TRAIN_ONCE;
- ret = pfb->ram->get(pfb, 0x8000, 0x10000, 0, 0x800, &ram->ltrain.mem);
+ ret = ram->base.func->get(&ram->base, 0x8000, 0x10000, 0, 0x800,
+ &ram->ltrain.mem);
if (ret)
return ret;
mem = ram->ltrain.mem;
- nv_wr32(pfb, 0x100538, 0x10000000 | (mem->offset >> 16));
- nv_wr32(pfb, 0x1005a8, 0x0000ffff);
- nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
+ nvkm_wr32(device, 0x100538, 0x10000000 | (mem->offset >> 16));
+ nvkm_wr32(device, 0x1005a8, 0x0000ffff);
+ nvkm_mask(device, 0x10f800, 0x00000001, 0x00000001);
for (i = 0; i < 0x30; i++) {
- nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
- nv_wr32(pfb, 0x10f900, pattern[i % 16]);
+ nvkm_wr32(device, 0x10f8c0, (i << 8) | i);
+ nvkm_wr32(device, 0x10f900, pattern[i % 16]);
}
for (i = 0; i < 0x30; i++) {
- nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
- nv_wr32(pfb, 0x10f920, pattern[i % 16]);
+ nvkm_wr32(device, 0x10f8e0, (i << 8) | i);
+ nvkm_wr32(device, 0x10f920, pattern[i % 16]);
}
/* And upload the pattern */
- r001700 = nv_rd32(pfb, 0x1700);
- nv_wr32(pfb, 0x1700, mem->offset >> 16);
+ r001700 = nvkm_rd32(device, 0x1700);
+ nvkm_wr32(device, 0x1700, mem->offset >> 16);
for (i = 0; i < 16; i++)
- nv_wr32(pfb, 0x700000 + (i << 2), pattern[i]);
+ nvkm_wr32(device, 0x700000 + (i << 2), pattern[i]);
for (i = 0; i < 16; i++)
- nv_wr32(pfb, 0x700100 + (i << 2), pattern[i]);
- nv_wr32(pfb, 0x1700, r001700);
+ nvkm_wr32(device, 0x700100 + (i << 2), pattern[i]);
+ nvkm_wr32(device, 0x1700, r001700);
- train->r_100720 = nv_rd32(pfb, 0x100720);
- train->r_1111e0 = nv_rd32(pfb, 0x1111e0);
- train->r_111400 = nv_rd32(pfb, 0x111400);
+ train->r_100720 = nvkm_rd32(device, 0x100720);
+ train->r_1111e0 = nvkm_rd32(device, 0x1111e0);
+ train->r_111400 = nvkm_rd32(device, 0x111400);
return 0;
}
void
-gt215_link_train_fini(struct nvkm_fb *pfb)
+gt215_link_train_fini(struct gt215_ram *ram)
{
- struct gt215_ram *ram = (void *)pfb->ram;
-
if (ram->ltrain.mem)
- pfb->ram->put(pfb, &ram->ltrain.mem);
+ ram->base.func->put(&ram->base, &ram->ltrain.mem);
}
/*
@@ -347,24 +348,25 @@ gt215_link_train_fini(struct nvkm_fb *pfb)
*/
#define T(t) cfg->timing_10_##t
static int
-gt215_ram_timing_calc(struct nvkm_fb *pfb, u32 *timing)
+gt215_ram_timing_calc(struct gt215_ram *ram, u32 *timing)
{
- struct gt215_ram *ram = (void *)pfb->ram;
struct nvbios_ramcfg *cfg = &ram->base.target.bios;
+ struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+ struct nvkm_device *device = subdev->device;
int tUNK_base, tUNK_40_0, prevCL;
u32 cur2, cur3, cur7, cur8;
- cur2 = nv_rd32(pfb, 0x100228);
- cur3 = nv_rd32(pfb, 0x10022c);
- cur7 = nv_rd32(pfb, 0x10023c);
- cur8 = nv_rd32(pfb, 0x100240);
+ cur2 = nvkm_rd32(device, 0x100228);
+ cur3 = nvkm_rd32(device, 0x10022c);
+ cur7 = nvkm_rd32(device, 0x10023c);
+ cur8 = nvkm_rd32(device, 0x100240);
switch ((!T(CWL)) * ram->base.type) {
- case NV_MEM_TYPE_DDR2:
+ case NVKM_RAM_TYPE_DDR2:
T(CWL) = T(CL) - 1;
break;
- case NV_MEM_TYPE_GDDR3:
+ case NVKM_RAM_TYPE_GDDR3:
T(CWL) = ((cur2 & 0xff000000) >> 24) + 1;
break;
}
@@ -402,8 +404,8 @@ gt215_ram_timing_calc(struct nvkm_fb *pfb, u32 *timing)
timing[8] = cur8 & 0xffffff00;
switch (ram->base.type) {
- case NV_MEM_TYPE_DDR2:
- case NV_MEM_TYPE_GDDR3:
+ case NVKM_RAM_TYPE_DDR2:
+ case NVKM_RAM_TYPE_GDDR3:
tUNK_40_0 = prevCL - (cur8 & 0xff);
if (tUNK_40_0 > 0)
timing[8] |= T(CL);
@@ -412,11 +414,11 @@ gt215_ram_timing_calc(struct nvkm_fb *pfb, u32 *timing)
break;
}
- nv_debug(pfb, "Entry: 220: %08x %08x %08x %08x\n",
- timing[0], timing[1], timing[2], timing[3]);
- nv_debug(pfb, " 230: %08x %08x %08x %08x\n",
- timing[4], timing[5], timing[6], timing[7]);
- nv_debug(pfb, " 240: %08x\n", timing[8]);
+ nvkm_debug(subdev, "Entry: 220: %08x %08x %08x %08x\n",
+ timing[0], timing[1], timing[2], timing[3]);
+ nvkm_debug(subdev, " 230: %08x %08x %08x %08x\n",
+ timing[4], timing[5], timing[6], timing[7]);
+ nvkm_debug(subdev, " 240: %08x\n", timing[8]);
return 0;
}
#undef T
@@ -466,13 +468,13 @@ gt215_ram_lock_pll(struct gt215_ramfuc *fuc, struct gt215_clk_info *mclk)
static void
gt215_ram_fbvref(struct gt215_ramfuc *fuc, u32 val)
{
- struct nvkm_gpio *gpio = nvkm_gpio(fuc->base.pfb);
+ struct nvkm_gpio *gpio = fuc->base.fb->subdev.device->gpio;
struct dcb_gpio_func func;
u32 reg, sh, gpio_val;
int ret;
- if (gpio->get(gpio, 0, 0x2e, DCB_GPIO_UNUSED) != val) {
- ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+ if (nvkm_gpio_get(gpio, 0, 0x2e, DCB_GPIO_UNUSED) != val) {
+ ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
if (ret)
return;
@@ -487,12 +489,14 @@ gt215_ram_fbvref(struct gt215_ramfuc *fuc, u32 val)
}
static int
-gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
+gt215_ram_calc(struct nvkm_ram *base, u32 freq)
{
- struct nvkm_bios *bios = nvkm_bios(pfb);
- struct gt215_ram *ram = (void *)pfb->ram;
+ struct gt215_ram *ram = gt215_ram(base);
struct gt215_ramfuc *fuc = &ram->fuc;
struct gt215_ltrain *train = &ram->ltrain;
+ struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_bios *bios = device->bios;
struct gt215_clk_info mclk;
struct nvkm_ram_data *next;
u8 ver, hdr, cnt, len, strap;
@@ -508,28 +512,27 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
ram->base.next = next;
if (ram->ltrain.state == NVA3_TRAIN_ONCE)
- gt215_link_train(pfb);
+ gt215_link_train(ram);
/* lookup memory config data relevant to the target frequency */
- i = 0;
data = nvbios_rammapEm(bios, freq / 1000, &ver, &hdr, &cnt, &len,
&next->bios);
if (!data || ver != 0x10 || hdr < 0x05) {
- nv_error(pfb, "invalid/missing rammap entry\n");
+ nvkm_error(subdev, "invalid/missing rammap entry\n");
return -EINVAL;
}
/* locate specific data set for the attached memory */
- strap = nvbios_ramcfg_index(nv_subdev(pfb));
+ strap = nvbios_ramcfg_index(subdev);
if (strap >= cnt) {
- nv_error(pfb, "invalid ramcfg strap\n");
+ nvkm_error(subdev, "invalid ramcfg strap\n");
return -EINVAL;
}
data = nvbios_rammapSp(bios, data, ver, hdr, cnt, len, strap,
&ver, &hdr, &next->bios);
if (!data || ver != 0x10 || hdr < 0x09) {
- nv_error(pfb, "invalid/missing ramcfg entry\n");
+ nvkm_error(subdev, "invalid/missing ramcfg entry\n");
return -EINVAL;
}
@@ -539,20 +542,20 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
&ver, &hdr, &cnt, &len,
&next->bios);
if (!data || ver != 0x10 || hdr < 0x17) {
- nv_error(pfb, "invalid/missing timing entry\n");
+ nvkm_error(subdev, "invalid/missing timing entry\n");
return -EINVAL;
}
}
- ret = gt215_pll_info(nvkm_clk(pfb), 0x12, 0x4000, freq, &mclk);
+ ret = gt215_pll_info(device->clk, 0x12, 0x4000, freq, &mclk);
if (ret < 0) {
- nv_error(pfb, "failed mclk calculation\n");
+ nvkm_error(subdev, "failed mclk calculation\n");
return ret;
}
- gt215_ram_timing_calc(pfb, timing);
+ gt215_ram_timing_calc(ram, timing);
- ret = ram_init(fuc, pfb);
+ ret = ram_init(fuc, ram->base.fb);
if (ret)
return ret;
@@ -562,13 +565,13 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
ram->base.mr[2] = ram_rd32(fuc, mr[2]);
switch (ram->base.type) {
- case NV_MEM_TYPE_DDR2:
+ case NVKM_RAM_TYPE_DDR2:
ret = nvkm_sddr2_calc(&ram->base);
break;
- case NV_MEM_TYPE_DDR3:
+ case NVKM_RAM_TYPE_DDR3:
ret = nvkm_sddr3_calc(&ram->base);
break;
- case NV_MEM_TYPE_GDDR3:
+ case NVKM_RAM_TYPE_GDDR3:
ret = nvkm_gddr3_calc(&ram->base);
break;
default:
@@ -579,7 +582,7 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
if (ret)
return ret;
- /* XXX: where the fuck does 750MHz come from? */
+ /* XXX: 750MHz seems rather arbitrary */
if (freq <= 750000) {
r004018 = 0x10000000;
r100760 = 0x22222222;
@@ -590,7 +593,7 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
r100da0 = 0x00000000;
}
- if (!next->bios.ramcfg_10_DLLoff)
+ if (!next->bios.ramcfg_DLLoff)
r004018 |= 0x00004000;
/* pll2pll requires to switch to a safe clock first */
@@ -623,18 +626,18 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
ram_nsec(fuc, 2000);
if (!next->bios.ramcfg_10_02_10) {
- if (ram->base.type == NV_MEM_TYPE_GDDR3)
+ if (ram->base.type == NVKM_RAM_TYPE_GDDR3)
ram_mask(fuc, 0x111100, 0x04020000, 0x00020000);
else
ram_mask(fuc, 0x111100, 0x04020000, 0x04020000);
}
/* If we're disabling the DLL, do it now */
- switch (next->bios.ramcfg_10_DLLoff * ram->base.type) {
- case NV_MEM_TYPE_DDR3:
+ switch (next->bios.ramcfg_DLLoff * ram->base.type) {
+ case NVKM_RAM_TYPE_DDR3:
nvkm_sddr3_dll_disable(fuc, ram->base.mr);
break;
- case NV_MEM_TYPE_GDDR3:
+ case NVKM_RAM_TYPE_GDDR3:
nvkm_gddr3_dll_disable(fuc, ram->base.mr);
break;
}
@@ -650,7 +653,7 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
ram_wr32(fuc, 0x1002dc, 0x00000001);
ram_nsec(fuc, 2000);
- if (nv_device(pfb)->chipset == 0xa3 && freq <= 500000)
+ if (device->chipset == 0xa3 && freq <= 500000)
ram_mask(fuc, 0x100700, 0x00000006, 0x00000006);
/* Fiddle with clocks */
@@ -708,7 +711,7 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
ram_mask(fuc, 0x1007e0, 0x22222222, r100760);
}
- if (nv_device(pfb)->chipset == 0xa3 && freq > 500000) {
+ if (device->chipset == 0xa3 && freq > 500000) {
ram_mask(fuc, 0x100700, 0x00000006, 0x00000000);
}
@@ -752,11 +755,11 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
if (next->bios.ramcfg_10_02_04) {
switch (ram->base.type) {
- case NV_MEM_TYPE_DDR3:
- if (nv_device(pfb)->chipset != 0xa8)
+ case NVKM_RAM_TYPE_DDR3:
+ if (device->chipset != 0xa8)
r111100 |= 0x00000004;
/* no break */
- case NV_MEM_TYPE_DDR2:
+ case NVKM_RAM_TYPE_DDR2:
r111100 |= 0x08000000;
break;
default:
@@ -764,12 +767,12 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
}
} else {
switch (ram->base.type) {
- case NV_MEM_TYPE_DDR2:
+ case NVKM_RAM_TYPE_DDR2:
r111100 |= 0x1a800000;
unk714 |= 0x00000010;
break;
- case NV_MEM_TYPE_DDR3:
- if (nv_device(pfb)->chipset == 0xa8) {
+ case NVKM_RAM_TYPE_DDR3:
+ if (device->chipset == 0xa8) {
r111100 |= 0x08000000;
} else {
r111100 &= ~0x00000004;
@@ -777,7 +780,7 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
}
unk714 |= 0x00000010;
break;
- case NV_MEM_TYPE_GDDR3:
+ case NVKM_RAM_TYPE_GDDR3:
r111100 |= 0x30000000;
unk714 |= 0x00000020;
break;
@@ -810,16 +813,16 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
gt215_ram_fbvref(fuc, 1);
/* Reset DLL */
- if (!next->bios.ramcfg_10_DLLoff)
+ if (!next->bios.ramcfg_DLLoff)
nvkm_sddr2_dll_reset(fuc);
- if (ram->base.type == NV_MEM_TYPE_GDDR3) {
+ if (ram->base.type == NVKM_RAM_TYPE_GDDR3) {
ram_nsec(fuc, 31000);
} else {
ram_nsec(fuc, 14000);
}
- if (ram->base.type == NV_MEM_TYPE_DDR3) {
+ if (ram->base.type == NVKM_RAM_TYPE_DDR3) {
ram_wr32(fuc, 0x100264, 0x1);
ram_nsec(fuc, 2000);
}
@@ -855,24 +858,24 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
}
static int
-gt215_ram_prog(struct nvkm_fb *pfb)
+gt215_ram_prog(struct nvkm_ram *base)
{
- struct nvkm_device *device = nv_device(pfb);
- struct gt215_ram *ram = (void *)pfb->ram;
+ struct gt215_ram *ram = gt215_ram(base);
struct gt215_ramfuc *fuc = &ram->fuc;
+ struct nvkm_device *device = ram->base.fb->subdev.device;
bool exec = nvkm_boolopt(device->cfgopt, "NvMemExec", true);
if (exec) {
- nv_mask(pfb, 0x001534, 0x2, 0x2);
+ nvkm_mask(device, 0x001534, 0x2, 0x2);
ram_exec(fuc, true);
/* Post-processing, avoids flicker */
- nv_mask(pfb, 0x002504, 0x1, 0x0);
- nv_mask(pfb, 0x001534, 0x2, 0x0);
+ nvkm_mask(device, 0x002504, 0x1, 0x0);
+ nvkm_mask(device, 0x001534, 0x2, 0x0);
- nv_mask(pfb, 0x616308, 0x10, 0x10);
- nv_mask(pfb, 0x616b08, 0x10, 0x10);
+ nvkm_mask(device, 0x616308, 0x10, 0x10);
+ nvkm_mask(device, 0x616b08, 0x10, 0x10);
} else {
ram_exec(fuc, false);
}
@@ -880,69 +883,56 @@ gt215_ram_prog(struct nvkm_fb *pfb)
}
static void
-gt215_ram_tidy(struct nvkm_fb *pfb)
+gt215_ram_tidy(struct nvkm_ram *base)
{
- struct gt215_ram *ram = (void *)pfb->ram;
- struct gt215_ramfuc *fuc = &ram->fuc;
- ram_exec(fuc, false);
+ struct gt215_ram *ram = gt215_ram(base);
+ ram_exec(&ram->fuc, false);
}
static int
-gt215_ram_init(struct nvkm_object *object)
+gt215_ram_init(struct nvkm_ram *base)
{
- struct nvkm_fb *pfb = (void *)object->parent;
- struct gt215_ram *ram = (void *)object;
- int ret;
-
- ret = nvkm_ram_init(&ram->base);
- if (ret)
- return ret;
-
- gt215_link_train_init(pfb);
+ struct gt215_ram *ram = gt215_ram(base);
+ gt215_link_train_init(ram);
return 0;
}
-static int
-gt215_ram_fini(struct nvkm_object *object, bool suspend)
+static void *
+gt215_ram_dtor(struct nvkm_ram *base)
{
- struct nvkm_fb *pfb = (void *)object->parent;
-
- if (!suspend)
- gt215_link_train_fini(pfb);
-
- return 0;
+ struct gt215_ram *ram = gt215_ram(base);
+ gt215_link_train_fini(ram);
+ return ram;
}
-static int
-gt215_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 datasize,
- struct nvkm_object **pobject)
+static const struct nvkm_ram_func
+gt215_ram_func = {
+ .dtor = gt215_ram_dtor,
+ .init = gt215_ram_init,
+ .get = nv50_ram_get,
+ .put = nv50_ram_put,
+ .calc = gt215_ram_calc,
+ .prog = gt215_ram_prog,
+ .tidy = gt215_ram_tidy,
+};
+
+int
+gt215_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
- struct nvkm_fb *pfb = nvkm_fb(parent);
- struct nvkm_gpio *gpio = nvkm_gpio(pfb);
+ struct nvkm_gpio *gpio = fb->subdev.device->gpio;
struct dcb_gpio_func func;
struct gt215_ram *ram;
- int ret, i;
u32 reg, shift;
+ int ret, i;
+
+ if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+ return -ENOMEM;
+ *pram = &ram->base;
- ret = nv50_ram_create(parent, engine, oclass, &ram);
- *pobject = nv_object(ram);
+ ret = nv50_ram_ctor(&gt215_ram_func, fb, &ram->base);
if (ret)
return ret;
- switch (ram->base.type) {
- case NV_MEM_TYPE_DDR2:
- case NV_MEM_TYPE_DDR3:
- case NV_MEM_TYPE_GDDR3:
- ram->base.calc = gt215_ram_calc;
- ram->base.prog = gt215_ram_prog;
- ram->base.tidy = gt215_ram_tidy;
- break;
- default:
- nv_warn(ram, "reclocking of this ram type unsupported\n");
- return 0;
- }
-
ram->fuc.r_0x001610 = ramfuc_reg(0x001610);
ram->fuc.r_0x001700 = ramfuc_reg(0x001700);
ram->fuc.r_0x002504 = ramfuc_reg(0x002504);
@@ -992,7 +982,7 @@ gt215_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
ram->fuc.r_mr[3] = ramfuc_reg(0x1002e4);
}
- ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+ ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
if (ret == 0) {
nv50_gpio_location(func.line, &reg, &shift);
ram->fuc.r_gpioFBVREF = ramfuc_reg(reg);
@@ -1000,13 +990,3 @@ gt215_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
return 0;
}
-
-struct nvkm_oclass
-gt215_ram_oclass = {
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gt215_ram_ctor,
- .dtor = _nvkm_ram_dtor,
- .init = gt215_ram_init,
- .fini = gt215_ram_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
index abc18e89a97c..0a0e44b75577 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
@@ -21,81 +21,67 @@
*
* Authors: Ben Skeggs
*/
-#include "nv50.h"
+#define mcp77_ram(p) container_of((p), struct mcp77_ram, base)
+#include "ram.h"
-struct mcp77_ram_priv {
+struct mcp77_ram {
struct nvkm_ram base;
u64 poller_base;
};
static int
-mcp77_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 datasize,
- struct nvkm_object **pobject)
+mcp77_ram_init(struct nvkm_ram *base)
{
- u32 rsvd_head = ( 256 * 1024); /* vga memory */
- u32 rsvd_tail = (1024 * 1024); /* vbios etc */
- struct nvkm_fb *pfb = nvkm_fb(parent);
- struct mcp77_ram_priv *priv;
- int ret;
-
- ret = nvkm_ram_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.type = NV_MEM_TYPE_STOLEN;
- priv->base.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
- priv->base.size = (u64)nv_rd32(pfb, 0x100e14) << 12;
+ struct mcp77_ram *ram = mcp77_ram(base);
+ struct nvkm_device *device = ram->base.fb->subdev.device;
+ u32 dniso = ((ram->base.size - (ram->poller_base + 0x00)) >> 5) - 1;
+ u32 hostnb = ((ram->base.size - (ram->poller_base + 0x20)) >> 5) - 1;
+ u32 flush = ((ram->base.size - (ram->poller_base + 0x40)) >> 5) - 1;
- rsvd_tail += 0x1000;
- priv->poller_base = priv->base.size - rsvd_tail;
-
- ret = nvkm_mm_init(&pfb->vram, rsvd_head >> 12,
- (priv->base.size - (rsvd_head + rsvd_tail)) >> 12,
- 1);
- if (ret)
- return ret;
-
- priv->base.get = nv50_ram_get;
- priv->base.put = nv50_ram_put;
+ /* Enable NISO poller for various clients and set their associated
+ * read address, only for MCP77/78 and MCP79/7A. (fd#25701)
+ */
+ nvkm_wr32(device, 0x100c18, dniso);
+ nvkm_mask(device, 0x100c14, 0x00000000, 0x00000001);
+ nvkm_wr32(device, 0x100c1c, hostnb);
+ nvkm_mask(device, 0x100c14, 0x00000000, 0x00000002);
+ nvkm_wr32(device, 0x100c24, flush);
+ nvkm_mask(device, 0x100c14, 0x00000000, 0x00010000);
return 0;
}
-static int
-mcp77_ram_init(struct nvkm_object *object)
+static const struct nvkm_ram_func
+mcp77_ram_func = {
+ .init = mcp77_ram_init,
+ .get = nv50_ram_get,
+ .put = nv50_ram_put,
+};
+
+int
+mcp77_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
- struct nvkm_fb *pfb = nvkm_fb(object);
- struct mcp77_ram_priv *priv = (void *)object;
+ struct nvkm_device *device = fb->subdev.device;
+ u32 rsvd_head = ( 256 * 1024); /* vga memory */
+ u32 rsvd_tail = (1024 * 1024) + 0x1000; /* vbios etc + poller mem */
+ u64 base = (u64)nvkm_rd32(device, 0x100e10) << 12;
+ u64 size = (u64)nvkm_rd32(device, 0x100e14) << 12;
+ struct mcp77_ram *ram;
int ret;
- u64 dniso, hostnb, flush;
- ret = nvkm_ram_init(&priv->base);
+ if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+ return -ENOMEM;
+ *pram = &ram->base;
+
+ ret = nvkm_ram_ctor(&mcp77_ram_func, fb, NVKM_RAM_TYPE_STOLEN,
+ size, 0, &ram->base);
if (ret)
return ret;
- dniso = ((priv->base.size - (priv->poller_base + 0x00)) >> 5) - 1;
- hostnb = ((priv->base.size - (priv->poller_base + 0x20)) >> 5) - 1;
- flush = ((priv->base.size - (priv->poller_base + 0x40)) >> 5) - 1;
+ ram->poller_base = size - rsvd_tail;
+ ram->base.stolen = base;
+ nvkm_mm_fini(&ram->base.vram);
- /* Enable NISO poller for various clients and set their associated
- * read address, only for MCP77/78 and MCP79/7A. (fd#25701)
- */
- nv_wr32(pfb, 0x100c18, dniso);
- nv_mask(pfb, 0x100c14, 0x00000000, 0x00000001);
- nv_wr32(pfb, 0x100c1c, hostnb);
- nv_mask(pfb, 0x100c14, 0x00000000, 0x00000002);
- nv_wr32(pfb, 0x100c24, flush);
- nv_mask(pfb, 0x100c14, 0x00000000, 0x00010000);
- return 0;
+ return nvkm_mm_init(&ram->base.vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+ (size - rsvd_head - rsvd_tail) >>
+ NVKM_RAM_MM_SHIFT, 1);
}
-
-struct nvkm_oclass
-mcp77_ram_oclass = {
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = mcp77_ram_ctor,
- .dtor = _nvkm_ram_dtor,
- .init = mcp77_ram_init,
- .fini = _nvkm_ram_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
index 855de1617229..6f053a03d61c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
@@ -21,59 +21,45 @@
*
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "ram.h"
#include "regsnv04.h"
-static int
-nv04_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nvkm_fb *pfb = nvkm_fb(parent);
- struct nvkm_ram *ram;
- u32 boot0 = nv_rd32(pfb, NV04_PFB_BOOT_0);
- int ret;
+const struct nvkm_ram_func
+nv04_ram_func = {
+};
- ret = nvkm_ram_create(parent, engine, oclass, &ram);
- *pobject = nv_object(ram);
- if (ret)
- return ret;
+int
+nv04_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ u32 boot0 = nvkm_rd32(device, NV04_PFB_BOOT_0);
+ u64 size;
+ enum nvkm_ram_type type;
if (boot0 & 0x00000100) {
- ram->size = ((boot0 >> 12) & 0xf) * 2 + 2;
- ram->size *= 1024 * 1024;
+ size = ((boot0 >> 12) & 0xf) * 2 + 2;
+ size *= 1024 * 1024;
} else {
switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
- ram->size = 32 * 1024 * 1024;
+ size = 32 * 1024 * 1024;
break;
case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
- ram->size = 16 * 1024 * 1024;
+ size = 16 * 1024 * 1024;
break;
case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
- ram->size = 8 * 1024 * 1024;
+ size = 8 * 1024 * 1024;
break;
case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
- ram->size = 4 * 1024 * 1024;
+ size = 4 * 1024 * 1024;
break;
}
}
if ((boot0 & 0x00000038) <= 0x10)
- ram->type = NV_MEM_TYPE_SGRAM;
+ type = NVKM_RAM_TYPE_SGRAM;
else
- ram->type = NV_MEM_TYPE_SDRAM;
+ type = NVKM_RAM_TYPE_SDRAM;
- return 0;
+ return nvkm_ram_new_(&nv04_ram_func, fb, type, size, 0, pram);
}
-
-struct nvkm_oclass
-nv04_ram_oclass = {
- .handle = 0,
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_ram_create,
- .dtor = _nvkm_ram_dtor,
- .init = _nvkm_ram_init,
- .fini = _nvkm_ram_fini,
- }
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
index 3b8a1eda5b64..dfd155c98dbb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
@@ -21,39 +21,20 @@
*
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "ram.h"
-static int
-nv10_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv10_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
- struct nvkm_fb *pfb = nvkm_fb(parent);
- struct nvkm_ram *ram;
- u32 cfg0 = nv_rd32(pfb, 0x100200);
- int ret;
-
- ret = nvkm_ram_create(parent, engine, oclass, &ram);
- *pobject = nv_object(ram);
- if (ret)
- return ret;
+ struct nvkm_device *device = fb->subdev.device;
+ u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+ u32 cfg0 = nvkm_rd32(device, 0x100200);
+ enum nvkm_ram_type type;
if (cfg0 & 0x00000001)
- ram->type = NV_MEM_TYPE_DDR1;
+ type = NVKM_RAM_TYPE_DDR1;
else
- ram->type = NV_MEM_TYPE_SDRAM;
+ type = NVKM_RAM_TYPE_SDRAM;
- ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
- return 0;
+ return nvkm_ram_new_(&nv04_ram_func, fb, type, size, 0, pram);
}
-
-struct nvkm_oclass
-nv10_ram_oclass = {
- .handle = 0,
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv10_ram_create,
- .dtor = _nvkm_ram_dtor,
- .init = _nvkm_ram_init,
- .fini = _nvkm_ram_fini,
- }
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
index fbae05db4ffd..3c6a8710e812 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
@@ -21,33 +21,21 @@
*
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "ram.h"
-#include <core/device.h>
-
-static int
-nv1a_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv1a_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
- struct nvkm_fb *pfb = nvkm_fb(parent);
- struct nvkm_ram *ram;
struct pci_dev *bridge;
u32 mem, mib;
- int ret;
bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
if (!bridge) {
- nv_fatal(pfb, "no bridge device\n");
+ nvkm_error(&fb->subdev, "no bridge device\n");
return -ENODEV;
}
- ret = nvkm_ram_create(parent, engine, oclass, &ram);
- *pobject = nv_object(ram);
- if (ret)
- return ret;
-
- if (nv_device(pfb)->chipset == 0x1a) {
+ if (fb->subdev.device->chipset == 0x1a) {
pci_read_config_dword(bridge, 0x7c, &mem);
mib = ((mem >> 6) & 31) + 1;
} else {
@@ -55,18 +43,6 @@ nv1a_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
mib = ((mem >> 4) & 127) + 1;
}
- ram->type = NV_MEM_TYPE_STOLEN;
- ram->size = mib * 1024 * 1024;
- return 0;
+ return nvkm_ram_new_(&nv04_ram_func, fb, NVKM_RAM_TYPE_STOLEN,
+ mib * 1024 * 1024, 0, pram);
}
-
-struct nvkm_oclass
-nv1a_ram_oclass = {
- .handle = 0,
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv1a_ram_create,
- .dtor = _nvkm_ram_dtor,
- .init = _nvkm_ram_init,
- .fini = _nvkm_ram_fini,
- }
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
index d9e7187bd235..747e47c10cc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
@@ -21,42 +21,29 @@
*
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "ram.h"
-static int
-nv20_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv20_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
- struct nvkm_fb *pfb = nvkm_fb(parent);
- struct nvkm_ram *ram;
- u32 pbus1218 = nv_rd32(pfb, 0x001218);
+ struct nvkm_device *device = fb->subdev.device;
+ u32 pbus1218 = nvkm_rd32(device, 0x001218);
+ u32 size = (nvkm_rd32(device, 0x10020c) & 0xff000000);
+ u32 tags = nvkm_rd32(device, 0x100320);
+ enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
int ret;
- ret = nvkm_ram_create(parent, engine, oclass, &ram);
- *pobject = nv_object(ram);
+ switch (pbus1218 & 0x00000300) {
+ case 0x00000000: type = NVKM_RAM_TYPE_SDRAM; break;
+ case 0x00000100: type = NVKM_RAM_TYPE_DDR1 ; break;
+ case 0x00000200: type = NVKM_RAM_TYPE_GDDR3; break;
+ case 0x00000300: type = NVKM_RAM_TYPE_GDDR2; break;
+ }
+
+ ret = nvkm_ram_new_(&nv04_ram_func, fb, type, size, tags, pram);
if (ret)
return ret;
- switch (pbus1218 & 0x00000300) {
- case 0x00000000: ram->type = NV_MEM_TYPE_SDRAM; break;
- case 0x00000100: ram->type = NV_MEM_TYPE_DDR1; break;
- case 0x00000200: ram->type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000300: ram->type = NV_MEM_TYPE_GDDR2; break;
- }
- ram->size = (nv_rd32(pfb, 0x10020c) & 0xff000000);
- ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
- ram->tags = nv_rd32(pfb, 0x100320);
+ (*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
return 0;
}
-
-struct nvkm_oclass
-nv20_ram_oclass = {
- .handle = 0,
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv20_ram_create,
- .dtor = _nvkm_ram_dtor,
- .init = _nvkm_ram_init,
- .fini = _nvkm_ram_fini,
- }
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
index 3d31fa45c1a6..56f8cffc2560 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
@@ -21,9 +21,8 @@
*
* Authors: Ben Skeggs
*/
-#include "nv40.h"
+#include "ramnv40.h"
-#include <core/device.h>
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/init.h>
@@ -31,23 +30,23 @@
#include <subdev/clk/pll.h>
#include <subdev/timer.h>
-int
-nv40_ram_calc(struct nvkm_fb *pfb, u32 freq)
+static int
+nv40_ram_calc(struct nvkm_ram *base, u32 freq)
{
- struct nvkm_bios *bios = nvkm_bios(pfb);
- struct nv40_ram *ram = (void *)pfb->ram;
+ struct nv40_ram *ram = nv40_ram(base);
+ struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+ struct nvkm_bios *bios = subdev->device->bios;
struct nvbios_pll pll;
int N1, M1, N2, M2;
int log2P, ret;
ret = nvbios_pll_parse(bios, 0x04, &pll);
if (ret) {
- nv_error(pfb, "mclk pll data not found\n");
+ nvkm_error(subdev, "mclk pll data not found\n");
return ret;
}
- ret = nv04_pll_calc(nv_subdev(pfb), &pll, freq,
- &N1, &M1, &N2, &M2, &log2P);
+ ret = nv04_pll_calc(subdev, &pll, freq, &N1, &M1, &N2, &M2, &log2P);
if (ret < 0)
return ret;
@@ -64,11 +63,13 @@ nv40_ram_calc(struct nvkm_fb *pfb, u32 freq)
return 0;
}
-int
-nv40_ram_prog(struct nvkm_fb *pfb)
+static int
+nv40_ram_prog(struct nvkm_ram *base)
{
- struct nvkm_bios *bios = nvkm_bios(pfb);
- struct nv40_ram *ram = (void *)pfb->ram;
+ struct nv40_ram *ram = nv40_ram(base);
+ struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_bios *bios = device->bios;
struct bit_entry M;
u32 crtc_mask = 0;
u8 sr1[2];
@@ -76,12 +77,12 @@ nv40_ram_prog(struct nvkm_fb *pfb)
/* determine which CRTCs are active, fetch VGA_SR1 for each */
for (i = 0; i < 2; i++) {
- u32 vbl = nv_rd32(pfb, 0x600808 + (i * 0x2000));
+ u32 vbl = nvkm_rd32(device, 0x600808 + (i * 0x2000));
u32 cnt = 0;
do {
- if (vbl != nv_rd32(pfb, 0x600808 + (i * 0x2000))) {
- nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
- sr1[i] = nv_rd08(pfb, 0x0c03c5 + (i * 0x2000));
+ if (vbl != nvkm_rd32(device, 0x600808 + (i * 0x2000))) {
+ nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
+ sr1[i] = nvkm_rd08(device, 0x0c03c5 + (i * 0x2000));
if (!(sr1[i] & 0x20))
crtc_mask |= (1 << i);
break;
@@ -94,55 +95,66 @@ nv40_ram_prog(struct nvkm_fb *pfb)
for (i = 0; i < 2; i++) {
if (!(crtc_mask & (1 << i)))
continue;
- nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
- nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
- nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
- nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
+
+ nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000));
+ if (!(tmp & 0x00010000))
+ break;
+ );
+
+ nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000));
+ if ( (tmp & 0x00010000))
+ break;
+ );
+
+ nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
+ nvkm_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
}
/* prepare ram for reclocking */
- nv_wr32(pfb, 0x1002d4, 0x00000001); /* precharge */
- nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */
- nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */
- nv_mask(pfb, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
- nv_wr32(pfb, 0x1002dc, 0x00000001); /* enable self-refresh */
+ nvkm_wr32(device, 0x1002d4, 0x00000001); /* precharge */
+ nvkm_wr32(device, 0x1002d0, 0x00000001); /* refresh */
+ nvkm_wr32(device, 0x1002d0, 0x00000001); /* refresh */
+ nvkm_mask(device, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
+ nvkm_wr32(device, 0x1002dc, 0x00000001); /* enable self-refresh */
/* change the PLL of each memory partition */
- nv_mask(pfb, 0x00c040, 0x0000c000, 0x00000000);
- switch (nv_device(pfb)->chipset) {
+ nvkm_mask(device, 0x00c040, 0x0000c000, 0x00000000);
+ switch (device->chipset) {
case 0x40:
case 0x45:
case 0x41:
case 0x42:
case 0x47:
- nv_mask(pfb, 0x004044, 0xc0771100, ram->ctrl);
- nv_mask(pfb, 0x00402c, 0xc0771100, ram->ctrl);
- nv_wr32(pfb, 0x004048, ram->coef);
- nv_wr32(pfb, 0x004030, ram->coef);
+ nvkm_mask(device, 0x004044, 0xc0771100, ram->ctrl);
+ nvkm_mask(device, 0x00402c, 0xc0771100, ram->ctrl);
+ nvkm_wr32(device, 0x004048, ram->coef);
+ nvkm_wr32(device, 0x004030, ram->coef);
case 0x43:
case 0x49:
case 0x4b:
- nv_mask(pfb, 0x004038, 0xc0771100, ram->ctrl);
- nv_wr32(pfb, 0x00403c, ram->coef);
+ nvkm_mask(device, 0x004038, 0xc0771100, ram->ctrl);
+ nvkm_wr32(device, 0x00403c, ram->coef);
default:
- nv_mask(pfb, 0x004020, 0xc0771100, ram->ctrl);
- nv_wr32(pfb, 0x004024, ram->coef);
+ nvkm_mask(device, 0x004020, 0xc0771100, ram->ctrl);
+ nvkm_wr32(device, 0x004024, ram->coef);
break;
}
udelay(100);
- nv_mask(pfb, 0x00c040, 0x0000c000, 0x0000c000);
+ nvkm_mask(device, 0x00c040, 0x0000c000, 0x0000c000);
/* re-enable normal operation of memory controller */
- nv_wr32(pfb, 0x1002dc, 0x00000000);
- nv_mask(pfb, 0x100210, 0x80000000, 0x80000000);
+ nvkm_wr32(device, 0x1002dc, 0x00000000);
+ nvkm_mask(device, 0x100210, 0x80000000, 0x80000000);
udelay(100);
/* execute memory reset script from vbios */
if (!bit_entry(bios, 'M', &M)) {
struct nvbios_init init = {
- .subdev = nv_subdev(pfb),
+ .subdev = subdev,
.bios = bios,
- .offset = nv_ro16(bios, M.offset + 0x00),
+ .offset = nvbios_rd16(bios, M.offset + 0x00),
.execute = 1,
};
@@ -155,58 +167,64 @@ nv40_ram_prog(struct nvkm_fb *pfb)
for (i = 0; i < 2; i++) {
if (!(crtc_mask & (1 << i)))
continue;
- nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
- nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
- nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i]);
+
+ nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000));
+ if ( (tmp & 0x00010000))
+ break;
+ );
+
+ nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
+ nvkm_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i]);
}
return 0;
}
-void
-nv40_ram_tidy(struct nvkm_fb *pfb)
+static void
+nv40_ram_tidy(struct nvkm_ram *base)
{
}
-static int
-nv40_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static const struct nvkm_ram_func
+nv40_ram_func = {
+ .calc = nv40_ram_calc,
+ .prog = nv40_ram_prog,
+ .tidy = nv40_ram_tidy,
+};
+
+int
+nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type type, u64 size,
+ u32 tags, struct nvkm_ram **pram)
{
- struct nvkm_fb *pfb = nvkm_fb(parent);
struct nv40_ram *ram;
- u32 pbus1218 = nv_rd32(pfb, 0x001218);
- int ret;
+ if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+ return -ENOMEM;
+ *pram = &ram->base;
+ return nvkm_ram_ctor(&nv40_ram_func, fb, type, size, tags, &ram->base);
+}
- ret = nvkm_ram_create(parent, engine, oclass, &ram);
- *pobject = nv_object(ram);
- if (ret)
- return ret;
+int
+nv40_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ u32 pbus1218 = nvkm_rd32(device, 0x001218);
+ u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+ u32 tags = nvkm_rd32(device, 0x100320);
+ enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
+ int ret;
switch (pbus1218 & 0x00000300) {
- case 0x00000000: ram->base.type = NV_MEM_TYPE_SDRAM; break;
- case 0x00000100: ram->base.type = NV_MEM_TYPE_DDR1; break;
- case 0x00000200: ram->base.type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000300: ram->base.type = NV_MEM_TYPE_DDR2; break;
+ case 0x00000000: type = NVKM_RAM_TYPE_SDRAM; break;
+ case 0x00000100: type = NVKM_RAM_TYPE_DDR1 ; break;
+ case 0x00000200: type = NVKM_RAM_TYPE_GDDR3; break;
+ case 0x00000300: type = NVKM_RAM_TYPE_DDR2 ; break;
}
- ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
- ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
- ram->base.tags = nv_rd32(pfb, 0x100320);
- ram->base.calc = nv40_ram_calc;
- ram->base.prog = nv40_ram_prog;
- ram->base.tidy = nv40_ram_tidy;
+ ret = nv40_ram_new_(fb, type, size, tags, pram);
+ if (ret)
+ return ret;
+
+ (*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
return 0;
}
-
-
-struct nvkm_oclass
-nv40_ram_oclass = {
- .handle = 0,
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv40_ram_create,
- .dtor = _nvkm_ram_dtor,
- .init = _nvkm_ram_init,
- .fini = _nvkm_ram_fini,
- }
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
new file mode 100644
index 000000000000..8a0524566b48
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
@@ -0,0 +1,14 @@
+#ifndef __NV40_FB_RAM_H__
+#define __NV40_FB_RAM_H__
+#define nv40_ram(p) container_of((p), struct nv40_ram, base)
+#include "ram.h"
+
+struct nv40_ram {
+ struct nvkm_ram base;
+ u32 ctrl;
+ u32 coef;
+};
+
+int nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type, u64, u32,
+ struct nvkm_ram **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
index 33c612b1355f..114828be292e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
@@ -21,46 +21,29 @@
*
* Authors: Ben Skeggs
*/
-#include "nv40.h"
+#include "ramnv40.h"
-static int
-nv41_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv41_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
- struct nvkm_fb *pfb = nvkm_fb(parent);
- struct nv40_ram *ram;
- u32 pfb474 = nv_rd32(pfb, 0x100474);
+ struct nvkm_device *device = fb->subdev.device;
+ u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+ u32 tags = nvkm_rd32(device, 0x100320);
+ u32 fb474 = nvkm_rd32(device, 0x100474);
+ enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
int ret;
- ret = nvkm_ram_create(parent, engine, oclass, &ram);
- *pobject = nv_object(ram);
+ if (fb474 & 0x00000004)
+ type = NVKM_RAM_TYPE_GDDR3;
+ if (fb474 & 0x00000002)
+ type = NVKM_RAM_TYPE_DDR2;
+ if (fb474 & 0x00000001)
+ type = NVKM_RAM_TYPE_DDR1;
+
+ ret = nv40_ram_new_(fb, type, size, tags, pram);
if (ret)
return ret;
- if (pfb474 & 0x00000004)
- ram->base.type = NV_MEM_TYPE_GDDR3;
- if (pfb474 & 0x00000002)
- ram->base.type = NV_MEM_TYPE_DDR2;
- if (pfb474 & 0x00000001)
- ram->base.type = NV_MEM_TYPE_DDR1;
-
- ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
- ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
- ram->base.tags = nv_rd32(pfb, 0x100320);
- ram->base.calc = nv40_ram_calc;
- ram->base.prog = nv40_ram_prog;
- ram->base.tidy = nv40_ram_tidy;
+ (*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
return 0;
}
-
-struct nvkm_oclass
-nv41_ram_oclass = {
- .handle = 0,
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv41_ram_create,
- .dtor = _nvkm_ram_dtor,
- .init = _nvkm_ram_init,
- .fini = _nvkm_ram_fini,
- }
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
index f575a7246403..bc56fbf1c788 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
@@ -21,44 +21,22 @@
*
* Authors: Ben Skeggs
*/
-#include "nv40.h"
+#include "ramnv40.h"
-static int
-nv44_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv44_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
- struct nvkm_fb *pfb = nvkm_fb(parent);
- struct nv40_ram *ram;
- u32 pfb474 = nv_rd32(pfb, 0x100474);
- int ret;
+ struct nvkm_device *device = fb->subdev.device;
+ u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+ u32 fb474 = nvkm_rd32(device, 0x100474);
+ enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
- ret = nvkm_ram_create(parent, engine, oclass, &ram);
- *pobject = nv_object(ram);
- if (ret)
- return ret;
+ if (fb474 & 0x00000004)
+ type = NVKM_RAM_TYPE_GDDR3;
+ if (fb474 & 0x00000002)
+ type = NVKM_RAM_TYPE_DDR2;
+ if (fb474 & 0x00000001)
+ type = NVKM_RAM_TYPE_DDR1;
- if (pfb474 & 0x00000004)
- ram->base.type = NV_MEM_TYPE_GDDR3;
- if (pfb474 & 0x00000002)
- ram->base.type = NV_MEM_TYPE_DDR2;
- if (pfb474 & 0x00000001)
- ram->base.type = NV_MEM_TYPE_DDR1;
-
- ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
- ram->base.calc = nv40_ram_calc;
- ram->base.prog = nv40_ram_prog;
- ram->base.tidy = nv40_ram_tidy;
- return 0;
+ return nv40_ram_new_(fb, type, size, 0, pram);
}
-
-struct nvkm_oclass
-nv44_ram_oclass = {
- .handle = 0,
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv44_ram_create,
- .dtor = _nvkm_ram_dtor,
- .init = _nvkm_ram_init,
- .fini = _nvkm_ram_fini,
- }
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
index 51b44cdb2732..c01f4b1022b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
@@ -21,46 +21,29 @@
*
* Authors: Ben Skeggs
*/
-#include "nv40.h"
+#include "ramnv40.h"
-static int
-nv49_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv49_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
- struct nvkm_fb *pfb = nvkm_fb(parent);
- struct nv40_ram *ram;
- u32 pfb914 = nv_rd32(pfb, 0x100914);
+ struct nvkm_device *device = fb->subdev.device;
+ u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+ u32 tags = nvkm_rd32(device, 0x100320);
+ u32 fb914 = nvkm_rd32(device, 0x100914);
+ enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
int ret;
- ret = nvkm_ram_create(parent, engine, oclass, &ram);
- *pobject = nv_object(ram);
- if (ret)
- return ret;
-
- switch (pfb914 & 0x00000003) {
- case 0x00000000: ram->base.type = NV_MEM_TYPE_DDR1; break;
- case 0x00000001: ram->base.type = NV_MEM_TYPE_DDR2; break;
- case 0x00000002: ram->base.type = NV_MEM_TYPE_GDDR3; break;
+ switch (fb914 & 0x00000003) {
+ case 0x00000000: type = NVKM_RAM_TYPE_DDR1 ; break;
+ case 0x00000001: type = NVKM_RAM_TYPE_DDR2 ; break;
+ case 0x00000002: type = NVKM_RAM_TYPE_GDDR3; break;
case 0x00000003: break;
}
- ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
- ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
- ram->base.tags = nv_rd32(pfb, 0x100320);
- ram->base.calc = nv40_ram_calc;
- ram->base.prog = nv40_ram_prog;
- ram->base.tidy = nv40_ram_tidy;
+ ret = nv40_ram_new_(fb, type, size, tags, pram);
+ if (ret)
+ return ret;
+
+ (*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
return 0;
}
-
-struct nvkm_oclass
-nv49_ram_oclass = {
- .handle = 0,
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv49_ram_create,
- .dtor = _nvkm_ram_dtor,
- .init = _nvkm_ram_init,
- .fini = _nvkm_ram_fini,
- }
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
index f3ed1c60d730..fa3c2e06203d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
@@ -21,34 +21,13 @@
*
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "ram.h"
-static int
-nv4e_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv4e_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
- struct nvkm_fb *pfb = nvkm_fb(parent);
- struct nvkm_ram *ram;
- int ret;
-
- ret = nvkm_ram_create(parent, engine, oclass, &ram);
- *pobject = nv_object(ram);
- if (ret)
- return ret;
-
- ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
- ram->type = NV_MEM_TYPE_STOLEN;
- return 0;
+ struct nvkm_device *device = fb->subdev.device;
+ u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+ return nvkm_ram_new_(&nv04_ram_func, fb, NVKM_RAM_TYPE_UNKNOWN,
+ size, 0, pram);
}
-
-struct nvkm_oclass
-nv4e_ram_oclass = {
- .handle = 0,
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv4e_ram_create,
- .dtor = _nvkm_ram_dtor,
- .init = _nvkm_ram_init,
- .fini = _nvkm_ram_fini,
- }
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
index d2c81dd635dc..9197e0ef5cdb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
@@ -21,14 +21,16 @@
*
* Authors: Ben Skeggs
*/
-#include "nv50.h"
+#define nv50_ram(p) container_of((p), struct nv50_ram, base)
+#include "ram.h"
#include "ramseq.h"
+#include "nv50.h"
-#include <core/device.h>
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/perf.h>
#include <subdev/bios/pll.h>
+#include <subdev/bios/rammap.h>
#include <subdev/bios/timing.h>
#include <subdev/clk/pll.h>
@@ -38,11 +40,20 @@ struct nv50_ramseq {
struct hwsq_reg r_0x004008;
struct hwsq_reg r_0x00400c;
struct hwsq_reg r_0x00c040;
+ struct hwsq_reg r_0x100200;
struct hwsq_reg r_0x100210;
+ struct hwsq_reg r_0x10021c;
struct hwsq_reg r_0x1002d0;
struct hwsq_reg r_0x1002d4;
struct hwsq_reg r_0x1002dc;
- struct hwsq_reg r_0x100da0[8];
+ struct hwsq_reg r_0x10053c;
+ struct hwsq_reg r_0x1005a0;
+ struct hwsq_reg r_0x1005a4;
+ struct hwsq_reg r_0x100710;
+ struct hwsq_reg r_0x100714;
+ struct hwsq_reg r_0x100718;
+ struct hwsq_reg r_0x10071c;
+ struct hwsq_reg r_0x100da0;
struct hwsq_reg r_0x100e20;
struct hwsq_reg r_0x100e24;
struct hwsq_reg r_0x611200;
@@ -55,64 +66,181 @@ struct nv50_ram {
struct nv50_ramseq hwsq;
};
-#define QFX5800NVA0 1
+#define T(t) cfg->timing_10_##t
+static int
+nv50_ram_timing_calc(struct nv50_ram *ram, u32 *timing)
+{
+ struct nvbios_ramcfg *cfg = &ram->base.target.bios;
+ struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 cur2, cur4, cur7, cur8;
+ u8 unkt3b;
+
+ cur2 = nvkm_rd32(device, 0x100228);
+ cur4 = nvkm_rd32(device, 0x100230);
+ cur7 = nvkm_rd32(device, 0x10023c);
+ cur8 = nvkm_rd32(device, 0x100240);
+
+ switch ((!T(CWL)) * ram->base.type) {
+ case NVKM_RAM_TYPE_DDR2:
+ T(CWL) = T(CL) - 1;
+ break;
+ case NVKM_RAM_TYPE_GDDR3:
+ T(CWL) = ((cur2 & 0xff000000) >> 24) + 1;
+ break;
+ }
+
+ /* XXX: N=1 is not proper statistics */
+ if (device->chipset == 0xa0) {
+ unkt3b = 0x19 + ram->base.next->bios.rammap_00_16_40;
+ timing[6] = (0x2d + T(CL) - T(CWL) +
+ ram->base.next->bios.rammap_00_16_40) << 16 |
+ T(CWL) << 8 |
+ (0x2f + T(CL) - T(CWL));
+ } else {
+ unkt3b = 0x16;
+ timing[6] = (0x2b + T(CL) - T(CWL)) << 16 |
+ max_t(s8, T(CWL) - 2, 1) << 8 |
+ (0x2e + T(CL) - T(CWL));
+ }
+
+ timing[0] = (T(RP) << 24 | T(RAS) << 16 | T(RFC) << 8 | T(RC));
+ timing[1] = (T(WR) + 1 + T(CWL)) << 24 |
+ max_t(u8, T(18), 1) << 16 |
+ (T(WTR) + 1 + T(CWL)) << 8 |
+ (3 + T(CL) - T(CWL));
+ timing[2] = (T(CWL) - 1) << 24 |
+ (T(RRD) << 16) |
+ (T(RCDWR) << 8) |
+ T(RCDRD);
+ timing[3] = (unkt3b - 2 + T(CL)) << 24 |
+ unkt3b << 16 |
+ (T(CL) - 1) << 8 |
+ (T(CL) - 1);
+ timing[4] = (cur4 & 0xffff0000) |
+ T(13) << 8 |
+ T(13);
+ timing[5] = T(RFC) << 24 |
+ max_t(u8, T(RCDRD), T(RCDWR)) << 16 |
+ T(RP);
+ /* Timing 6 is already done above */
+ timing[7] = (cur7 & 0xff00ffff) | (T(CL) - 1) << 16;
+ timing[8] = (cur8 & 0xffffff00);
+
+ /* XXX: P.version == 1 only has DDR2 and GDDR3? */
+ if (ram->base.type == NVKM_RAM_TYPE_DDR2) {
+ timing[5] |= (T(CL) + 3) << 8;
+ timing[8] |= (T(CL) - 4);
+ } else
+ if (ram->base.type == NVKM_RAM_TYPE_GDDR3) {
+ timing[5] |= (T(CL) + 2) << 8;
+ timing[8] |= (T(CL) - 2);
+ }
+
+ nvkm_debug(subdev, " 220: %08x %08x %08x %08x\n",
+ timing[0], timing[1], timing[2], timing[3]);
+ nvkm_debug(subdev, " 230: %08x %08x %08x %08x\n",
+ timing[4], timing[5], timing[6], timing[7]);
+ nvkm_debug(subdev, " 240: %08x\n", timing[8]);
+ return 0;
+}
+#undef T
+
+static void
+nvkm_sddr2_dll_reset(struct nv50_ramseq *hwsq)
+{
+ ram_mask(hwsq, mr[0], 0x100, 0x100);
+ ram_mask(hwsq, mr[0], 0x100, 0x000);
+ ram_nsec(hwsq, 24000);
+}
static int
-nv50_ram_calc(struct nvkm_fb *pfb, u32 freq)
+nv50_ram_calc(struct nvkm_ram *base, u32 freq)
{
- struct nvkm_bios *bios = nvkm_bios(pfb);
- struct nv50_ram *ram = (void *)pfb->ram;
+ struct nv50_ram *ram = nv50_ram(base);
struct nv50_ramseq *hwsq = &ram->hwsq;
+ struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+ struct nvkm_bios *bios = subdev->device->bios;
struct nvbios_perfE perfE;
struct nvbios_pll mpll;
- struct {
- u32 data;
- u8 size;
- } ramcfg, timing;
- u8 ver, hdr, cnt, len, strap;
+ struct nvkm_ram_data *next;
+ u8 ver, hdr, cnt, len, strap, size;
+ u32 data;
+ u32 r100da0, r004008, unk710, unk714, unk718, unk71c;
int N1, M1, N2, M2, P;
int ret, i;
+ u32 timing[9];
+
+ next = &ram->base.target;
+ next->freq = freq;
+ ram->base.next = next;
/* lookup closest matching performance table entry for frequency */
i = 0;
do {
- ramcfg.data = nvbios_perfEp(bios, i++, &ver, &hdr, &cnt,
- &ramcfg.size, &perfE);
- if (!ramcfg.data || (ver < 0x25 || ver >= 0x40) ||
- (ramcfg.size < 2)) {
- nv_error(pfb, "invalid/missing perftab entry\n");
+ data = nvbios_perfEp(bios, i++, &ver, &hdr, &cnt,
+ &size, &perfE);
+ if (!data || (ver < 0x25 || ver >= 0x40) ||
+ (size < 2)) {
+ nvkm_error(subdev, "invalid/missing perftab entry\n");
return -EINVAL;
}
} while (perfE.memory < freq);
+ nvbios_rammapEp_from_perf(bios, data, hdr, &next->bios);
+
/* locate specific data set for the attached memory */
- strap = nvbios_ramcfg_index(nv_subdev(pfb));
+ strap = nvbios_ramcfg_index(subdev);
if (strap >= cnt) {
- nv_error(pfb, "invalid ramcfg strap\n");
+ nvkm_error(subdev, "invalid ramcfg strap\n");
return -EINVAL;
}
- ramcfg.data += hdr + (strap * ramcfg.size);
+ data = nvbios_rammapSp_from_perf(bios, data + hdr, size, strap,
+ &next->bios);
+ if (!data) {
+ nvkm_error(subdev, "invalid/missing rammap entry ");
+ return -EINVAL;
+ }
/* lookup memory timings, if bios says they're present */
- strap = nv_ro08(bios, ramcfg.data + 0x01);
- if (strap != 0xff) {
- timing.data = nvbios_timingEe(bios, strap, &ver, &hdr,
- &cnt, &len);
- if (!timing.data || ver != 0x10 || hdr < 0x12) {
- nv_error(pfb, "invalid/missing timing entry "
+ if (next->bios.ramcfg_timing != 0xff) {
+ data = nvbios_timingEp(bios, next->bios.ramcfg_timing,
+ &ver, &hdr, &cnt, &len, &next->bios);
+ if (!data || ver != 0x10 || hdr < 0x12) {
+ nvkm_error(subdev, "invalid/missing timing entry "
"%02x %04x %02x %02x\n",
- strap, timing.data, ver, hdr);
+ strap, data, ver, hdr);
return -EINVAL;
}
- } else {
- timing.data = 0;
}
- ret = ram_init(hwsq, nv_subdev(pfb));
+ nv50_ram_timing_calc(ram, timing);
+
+ ret = ram_init(hwsq, subdev);
if (ret)
return ret;
+ /* Determine ram-specific MR values */
+ ram->base.mr[0] = ram_rd32(hwsq, mr[0]);
+ ram->base.mr[1] = ram_rd32(hwsq, mr[1]);
+ ram->base.mr[2] = ram_rd32(hwsq, mr[2]);
+
+ switch (ram->base.type) {
+ case NVKM_RAM_TYPE_GDDR3:
+ ret = nvkm_gddr3_calc(&ram->base);
+ break;
+ default:
+ ret = -ENOSYS;
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Always disable this bit during reclock */
+ ram_mask(hwsq, 0x100200, 0x00000800, 0x00000000);
+
ram_wait(hwsq, 0x01, 0x00); /* wait for !vblank */
ram_wait(hwsq, 0x01, 0x01); /* wait for vblank */
ram_wr32(hwsq, 0x611200, 0x00003300);
@@ -120,6 +248,7 @@ nv50_ram_calc(struct nvkm_fb *pfb, u32 freq)
ram_nsec(hwsq, 8000);
ram_setf(hwsq, 0x10, 0x00); /* disable fb */
ram_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */
+ ram_nsec(hwsq, 2000);
ram_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge */
ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */
@@ -129,97 +258,149 @@ nv50_ram_calc(struct nvkm_fb *pfb, u32 freq)
ret = nvbios_pll_parse(bios, 0x004008, &mpll);
mpll.vco2.max_freq = 0;
- if (ret == 0) {
- ret = nv04_pll_calc(nv_subdev(pfb), &mpll, freq,
+ if (ret >= 0) {
+ ret = nv04_pll_calc(subdev, &mpll, freq,
&N1, &M1, &N2, &M2, &P);
- if (ret == 0)
+ if (ret <= 0)
ret = -EINVAL;
}
if (ret < 0)
return ret;
+ /* XXX: 750MHz seems rather arbitrary */
+ if (freq <= 750000) {
+ r100da0 = 0x00000010;
+ r004008 = 0x90000000;
+ } else {
+ r100da0 = 0x00000000;
+ r004008 = 0x80000000;
+ }
+
+ r004008 |= (mpll.bias_p << 19) | (P << 22) | (P << 16);
+
ram_mask(hwsq, 0x00c040, 0xc000c000, 0x0000c000);
- ram_mask(hwsq, 0x004008, 0x00000200, 0x00000200);
+ /* XXX: Is rammap_00_16_40 the DLL bit we've seen in GT215? Why does
+ * it have a different rammap bit from DLLoff? */
+ ram_mask(hwsq, 0x004008, 0x00004200, 0x00000200 |
+ next->bios.rammap_00_16_40 << 14);
ram_mask(hwsq, 0x00400c, 0x0000ffff, (N1 << 8) | M1);
- ram_mask(hwsq, 0x004008, 0x81ff0000, 0x80000000 | (mpll.bias_p << 19) |
- (P << 22) | (P << 16));
-#if QFX5800NVA0
- for (i = 0; i < 8; i++)
- ram_mask(hwsq, 0x100da0[i], 0x00000000, 0x00000000); /*XXX*/
-#endif
- ram_nsec(hwsq, 96000); /*XXX*/
+ ram_mask(hwsq, 0x004008, 0x91ff0000, r004008);
+ if (subdev->device->chipset >= 0x96)
+ ram_wr32(hwsq, 0x100da0, r100da0);
+ ram_nsec(hwsq, 64000); /*XXX*/
+ ram_nsec(hwsq, 32000); /*XXX*/
+
ram_mask(hwsq, 0x004008, 0x00002200, 0x00002000);
ram_wr32(hwsq, 0x1002dc, 0x00000000); /* disable self-refresh */
+ ram_wr32(hwsq, 0x1002d4, 0x00000001); /* disable self-refresh */
ram_wr32(hwsq, 0x100210, 0x80000000); /* enable auto-refresh */
ram_nsec(hwsq, 12000);
switch (ram->base.type) {
- case NV_MEM_TYPE_DDR2:
+ case NVKM_RAM_TYPE_DDR2:
ram_nuke(hwsq, mr[0]); /* force update */
ram_mask(hwsq, mr[0], 0x000, 0x000);
break;
- case NV_MEM_TYPE_GDDR3:
- ram_mask(hwsq, mr[2], 0x000, 0x000);
+ case NVKM_RAM_TYPE_GDDR3:
+ ram_nuke(hwsq, mr[1]); /* force update */
+ ram_wr32(hwsq, mr[1], ram->base.mr[1]);
ram_nuke(hwsq, mr[0]); /* force update */
- ram_mask(hwsq, mr[0], 0x000, 0x000);
+ ram_wr32(hwsq, mr[0], ram->base.mr[0]);
break;
default:
break;
}
- ram_mask(hwsq, timing[3], 0x00000000, 0x00000000); /*XXX*/
- ram_mask(hwsq, timing[1], 0x00000000, 0x00000000); /*XXX*/
- ram_mask(hwsq, timing[6], 0x00000000, 0x00000000); /*XXX*/
- ram_mask(hwsq, timing[7], 0x00000000, 0x00000000); /*XXX*/
- ram_mask(hwsq, timing[8], 0x00000000, 0x00000000); /*XXX*/
- ram_mask(hwsq, timing[0], 0x00000000, 0x00000000); /*XXX*/
- ram_mask(hwsq, timing[2], 0x00000000, 0x00000000); /*XXX*/
- ram_mask(hwsq, timing[4], 0x00000000, 0x00000000); /*XXX*/
- ram_mask(hwsq, timing[5], 0x00000000, 0x00000000); /*XXX*/
-
- ram_mask(hwsq, timing[0], 0x00000000, 0x00000000); /*XXX*/
-
-#if QFX5800NVA0
- ram_nuke(hwsq, 0x100e24);
- ram_mask(hwsq, 0x100e24, 0x00000000, 0x00000000);
- ram_nuke(hwsq, 0x100e20);
- ram_mask(hwsq, 0x100e20, 0x00000000, 0x00000000);
-#endif
+ ram_mask(hwsq, timing[3], 0xffffffff, timing[3]);
+ ram_mask(hwsq, timing[1], 0xffffffff, timing[1]);
+ ram_mask(hwsq, timing[6], 0xffffffff, timing[6]);
+ ram_mask(hwsq, timing[7], 0xffffffff, timing[7]);
+ ram_mask(hwsq, timing[8], 0xffffffff, timing[8]);
+ ram_mask(hwsq, timing[0], 0xffffffff, timing[0]);
+ ram_mask(hwsq, timing[2], 0xffffffff, timing[2]);
+ ram_mask(hwsq, timing[4], 0xffffffff, timing[4]);
+ ram_mask(hwsq, timing[5], 0xffffffff, timing[5]);
+
+ if (!next->bios.ramcfg_00_03_02)
+ ram_mask(hwsq, 0x10021c, 0x00010000, 0x00000000);
+ ram_mask(hwsq, 0x100200, 0x00001000, !next->bios.ramcfg_00_04_02 << 12);
+
+ /* XXX: A lot of this could be "chipset"/"ram type" specific stuff */
+ unk710 = ram_rd32(hwsq, 0x100710) & ~0x00000101;
+ unk714 = ram_rd32(hwsq, 0x100714) & ~0xf0000020;
+ unk718 = ram_rd32(hwsq, 0x100718) & ~0x00000100;
+ unk71c = ram_rd32(hwsq, 0x10071c) & ~0x00000100;
+
+ if ( next->bios.ramcfg_00_03_01)
+ unk71c |= 0x00000100;
+ if ( next->bios.ramcfg_00_03_02)
+ unk710 |= 0x00000100;
+ if (!next->bios.ramcfg_00_03_08) {
+ unk710 |= 0x1;
+ unk714 |= 0x20;
+ }
+ if ( next->bios.ramcfg_00_04_04)
+ unk714 |= 0x70000000;
+ if ( next->bios.ramcfg_00_04_20)
+ unk718 |= 0x00000100;
+
+ ram_mask(hwsq, 0x100714, 0xffffffff, unk714);
+ ram_mask(hwsq, 0x10071c, 0xffffffff, unk71c);
+ ram_mask(hwsq, 0x100718, 0xffffffff, unk718);
+ ram_mask(hwsq, 0x100710, 0xffffffff, unk710);
+
+ if (next->bios.rammap_00_16_20) {
+ ram_wr32(hwsq, 0x1005a0, next->bios.ramcfg_00_07 << 16 |
+ next->bios.ramcfg_00_06 << 8 |
+ next->bios.ramcfg_00_05);
+ ram_wr32(hwsq, 0x1005a4, next->bios.ramcfg_00_09 << 8 |
+ next->bios.ramcfg_00_08);
+ ram_mask(hwsq, 0x10053c, 0x00001000, 0x00000000);
+ } else {
+ ram_mask(hwsq, 0x10053c, 0x00001000, 0x00001000);
+ }
+ ram_mask(hwsq, mr[1], 0xffffffff, ram->base.mr[1]);
- ram_mask(hwsq, mr[0], 0x100, 0x100);
- ram_mask(hwsq, mr[0], 0x100, 0x000);
+ /* Reset DLL */
+ if (!next->bios.ramcfg_DLLoff)
+ nvkm_sddr2_dll_reset(hwsq);
ram_setf(hwsq, 0x10, 0x01); /* enable fb */
ram_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */
ram_wr32(hwsq, 0x611200, 0x00003330);
ram_wr32(hwsq, 0x002504, 0x00000000); /* un-block fifo */
+
+ if (next->bios.rammap_00_17_02)
+ ram_mask(hwsq, 0x100200, 0x00000800, 0x00000800);
+ if (!next->bios.rammap_00_16_40)
+ ram_mask(hwsq, 0x004008, 0x00004000, 0x00000000);
+ if (next->bios.ramcfg_00_03_02)
+ ram_mask(hwsq, 0x10021c, 0x00010000, 0x00010000);
+
return 0;
}
static int
-nv50_ram_prog(struct nvkm_fb *pfb)
+nv50_ram_prog(struct nvkm_ram *base)
{
- struct nvkm_device *device = nv_device(pfb);
- struct nv50_ram *ram = (void *)pfb->ram;
- struct nv50_ramseq *hwsq = &ram->hwsq;
-
- ram_exec(hwsq, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
+ struct nv50_ram *ram = nv50_ram(base);
+ struct nvkm_device *device = ram->base.fb->subdev.device;
+ ram_exec(&ram->hwsq, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
return 0;
}
static void
-nv50_ram_tidy(struct nvkm_fb *pfb)
+nv50_ram_tidy(struct nvkm_ram *base)
{
- struct nv50_ram *ram = (void *)pfb->ram;
- struct nv50_ramseq *hwsq = &ram->hwsq;
- ram_exec(hwsq, false);
+ struct nv50_ram *ram = nv50_ram(base);
+ ram_exec(&ram->hwsq, false);
}
void
-__nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem *mem)
+__nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem *mem)
{
struct nvkm_mm_node *this;
@@ -227,14 +408,14 @@ __nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem *mem)
this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
list_del(&this->rl_entry);
- nvkm_mm_free(&pfb->vram, &this);
+ nvkm_mm_free(&ram->vram, &this);
}
- nvkm_mm_free(&pfb->tags, &mem->tag);
+ nvkm_mm_free(&ram->tags, &mem->tag);
}
void
-nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem)
+nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
{
struct nvkm_mem *mem = *pmem;
@@ -242,19 +423,19 @@ nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem)
if (unlikely(mem == NULL))
return;
- mutex_lock(&pfb->base.mutex);
- __nv50_ram_put(pfb, mem);
- mutex_unlock(&pfb->base.mutex);
+ mutex_lock(&ram->fb->subdev.mutex);
+ __nv50_ram_put(ram, mem);
+ mutex_unlock(&ram->fb->subdev.mutex);
kfree(mem);
}
int
-nv50_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
+nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
u32 memtype, struct nvkm_mem **pmem)
{
- struct nvkm_mm *heap = &pfb->vram;
- struct nvkm_mm *tags = &pfb->tags;
+ struct nvkm_mm *heap = &ram->vram;
+ struct nvkm_mm *tags = &ram->tags;
struct nvkm_mm_node *r;
struct nvkm_mem *mem;
int comp = (memtype & 0x300) >> 8;
@@ -262,17 +443,17 @@ nv50_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
int back = (memtype & 0x800);
int min, max, ret;
- max = (size >> 12);
- min = ncmin ? (ncmin >> 12) : max;
- align >>= 12;
+ max = (size >> NVKM_RAM_MM_SHIFT);
+ min = ncmin ? (ncmin >> NVKM_RAM_MM_SHIFT) : max;
+ align >>= NVKM_RAM_MM_SHIFT;
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
if (!mem)
return -ENOMEM;
- mutex_lock(&pfb->base.mutex);
+ mutex_lock(&ram->fb->subdev.mutex);
if (comp) {
- if (align == 16) {
+ if (align == (1 << (16 - NVKM_RAM_MM_SHIFT))) {
int n = (max >> 4) * comp;
ret = nvkm_mm_head(tags, 0, 1, n, n, 1, &mem->tag);
@@ -295,34 +476,45 @@ nv50_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
else
ret = nvkm_mm_head(heap, 0, type, max, min, align, &r);
if (ret) {
- mutex_unlock(&pfb->base.mutex);
- pfb->ram->put(pfb, &mem);
+ mutex_unlock(&ram->fb->subdev.mutex);
+ ram->func->put(ram, &mem);
return ret;
}
list_add_tail(&r->rl_entry, &mem->regions);
max -= r->length;
} while (max);
- mutex_unlock(&pfb->base.mutex);
+ mutex_unlock(&ram->fb->subdev.mutex);
r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
- mem->offset = (u64)r->offset << 12;
+ mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
*pmem = mem;
return 0;
}
+static const struct nvkm_ram_func
+nv50_ram_func = {
+ .get = nv50_ram_get,
+ .put = nv50_ram_put,
+ .calc = nv50_ram_calc,
+ .prog = nv50_ram_prog,
+ .tidy = nv50_ram_tidy,
+};
+
static u32
-nv50_fb_vram_rblock(struct nvkm_fb *pfb, struct nvkm_ram *ram)
+nv50_fb_vram_rblock(struct nvkm_ram *ram)
{
+ struct nvkm_subdev *subdev = &ram->fb->subdev;
+ struct nvkm_device *device = subdev->device;
int colbits, rowbitsa, rowbitsb, banks;
u64 rowsize, predicted;
u32 r0, r4, rt, rblock_size;
- r0 = nv_rd32(pfb, 0x100200);
- r4 = nv_rd32(pfb, 0x100204);
- rt = nv_rd32(pfb, 0x100250);
- nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n",
- r0, r4, rt, nv_rd32(pfb, 0x001540));
+ r0 = nvkm_rd32(device, 0x100200);
+ r4 = nvkm_rd32(device, 0x100204);
+ rt = nvkm_rd32(device, 0x100250);
+ nvkm_debug(subdev, "memcfg %08x %08x %08x %08x\n",
+ r0, r4, rt, nvkm_rd32(device, 0x001540));
colbits = (r4 & 0x0000f000) >> 12;
rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
@@ -335,103 +527,94 @@ nv50_fb_vram_rblock(struct nvkm_fb *pfb, struct nvkm_ram *ram)
predicted += rowsize << rowbitsb;
if (predicted != ram->size) {
- nv_warn(pfb, "memory controller reports %d MiB VRAM\n",
- (u32)(ram->size >> 20));
+ nvkm_warn(subdev, "memory controller reports %d MiB VRAM\n",
+ (u32)(ram->size >> 20));
}
rblock_size = rowsize;
if (rt & 1)
rblock_size *= 3;
- nv_debug(pfb, "rblock %d bytes\n", rblock_size);
+ nvkm_debug(subdev, "rblock %d bytes\n", rblock_size);
return rblock_size;
}
int
-nv50_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, int length, void **pobject)
+nv50_ram_ctor(const struct nvkm_ram_func *func,
+ struct nvkm_fb *fb, struct nvkm_ram *ram)
{
- const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
- const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
- struct nvkm_bios *bios = nvkm_bios(parent);
- struct nvkm_fb *pfb = nvkm_fb(parent);
- struct nvkm_ram *ram;
+ struct nvkm_device *device = fb->subdev.device;
+ struct nvkm_bios *bios = device->bios;
+ const u32 rsvd_head = ( 256 * 1024); /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
+ u64 size = nvkm_rd32(device, 0x10020c);
+ u32 tags = nvkm_rd32(device, 0x100320);
+ enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
int ret;
- ret = nvkm_ram_create_(parent, engine, oclass, length, pobject);
- ram = *pobject;
- if (ret)
- return ret;
-
- ram->size = nv_rd32(pfb, 0x10020c);
- ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
-
- ram->part_mask = (nv_rd32(pfb, 0x001540) & 0x00ff0000) >> 16;
- ram->parts = hweight8(ram->part_mask);
-
- switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
- case 0: ram->type = NV_MEM_TYPE_DDR1; break;
+ switch (nvkm_rd32(device, 0x100714) & 0x00000007) {
+ case 0: type = NVKM_RAM_TYPE_DDR1; break;
case 1:
- if (nvkm_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
- ram->type = NV_MEM_TYPE_DDR3;
+ if (nvkm_fb_bios_memtype(bios) == NVKM_RAM_TYPE_DDR3)
+ type = NVKM_RAM_TYPE_DDR3;
else
- ram->type = NV_MEM_TYPE_DDR2;
+ type = NVKM_RAM_TYPE_DDR2;
break;
- case 2: ram->type = NV_MEM_TYPE_GDDR3; break;
- case 3: ram->type = NV_MEM_TYPE_GDDR4; break;
- case 4: ram->type = NV_MEM_TYPE_GDDR5; break;
+ case 2: type = NVKM_RAM_TYPE_GDDR3; break;
+ case 3: type = NVKM_RAM_TYPE_GDDR4; break;
+ case 4: type = NVKM_RAM_TYPE_GDDR5; break;
default:
break;
}
- ret = nvkm_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
- (rsvd_head + rsvd_tail),
- nv50_fb_vram_rblock(pfb, ram) >> 12);
+ size = (size & 0x000000ff) << 32 | (size & 0xffffff00);
+
+ ret = nvkm_ram_ctor(func, fb, type, size, tags, ram);
if (ret)
return ret;
- ram->ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
- ram->tags = nv_rd32(pfb, 0x100320);
- ram->get = nv50_ram_get;
- ram->put = nv50_ram_put;
- return 0;
+ ram->part_mask = (nvkm_rd32(device, 0x001540) & 0x00ff0000) >> 16;
+ ram->parts = hweight8(ram->part_mask);
+ ram->ranks = (nvkm_rd32(device, 0x100200) & 0x4) ? 2 : 1;
+ nvkm_mm_fini(&ram->vram);
+
+ return nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+ (size - rsvd_head - rsvd_tail) >> NVKM_RAM_MM_SHIFT,
+ nv50_fb_vram_rblock(ram) >> NVKM_RAM_MM_SHIFT);
}
-static int
-nv50_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 datasize,
- struct nvkm_object **pobject)
+int
+nv50_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct nv50_ram *ram;
int ret, i;
- ret = nv50_ram_create(parent, engine, oclass, &ram);
- *pobject = nv_object(ram);
+ if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+ return -ENOMEM;
+ *pram = &ram->base;
+
+ ret = nv50_ram_ctor(&nv50_ram_func, fb, &ram->base);
if (ret)
return ret;
- switch (ram->base.type) {
- case NV_MEM_TYPE_DDR2:
- case NV_MEM_TYPE_GDDR3:
- ram->base.calc = nv50_ram_calc;
- ram->base.prog = nv50_ram_prog;
- ram->base.tidy = nv50_ram_tidy;
- break;
- default:
- nv_warn(ram, "reclocking of this ram type unsupported\n");
- return 0;
- }
-
ram->hwsq.r_0x002504 = hwsq_reg(0x002504);
ram->hwsq.r_0x00c040 = hwsq_reg(0x00c040);
ram->hwsq.r_0x004008 = hwsq_reg(0x004008);
ram->hwsq.r_0x00400c = hwsq_reg(0x00400c);
+ ram->hwsq.r_0x100200 = hwsq_reg(0x100200);
ram->hwsq.r_0x100210 = hwsq_reg(0x100210);
+ ram->hwsq.r_0x10021c = hwsq_reg(0x10021c);
ram->hwsq.r_0x1002d0 = hwsq_reg(0x1002d0);
ram->hwsq.r_0x1002d4 = hwsq_reg(0x1002d4);
ram->hwsq.r_0x1002dc = hwsq_reg(0x1002dc);
- for (i = 0; i < 8; i++)
- ram->hwsq.r_0x100da0[i] = hwsq_reg(0x100da0 + (i * 0x04));
+ ram->hwsq.r_0x10053c = hwsq_reg(0x10053c);
+ ram->hwsq.r_0x1005a0 = hwsq_reg(0x1005a0);
+ ram->hwsq.r_0x1005a4 = hwsq_reg(0x1005a4);
+ ram->hwsq.r_0x100710 = hwsq_reg(0x100710);
+ ram->hwsq.r_0x100714 = hwsq_reg(0x100714);
+ ram->hwsq.r_0x100718 = hwsq_reg(0x100718);
+ ram->hwsq.r_0x10071c = hwsq_reg(0x10071c);
+ ram->hwsq.r_0x100da0 = hwsq_stride(0x100da0, 4, ram->base.part_mask);
ram->hwsq.r_0x100e20 = hwsq_reg(0x100e20);
ram->hwsq.r_0x100e24 = hwsq_reg(0x100e24);
ram->hwsq.r_0x611200 = hwsq_reg(0x611200);
@@ -453,13 +636,3 @@ nv50_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
return 0;
}
-
-struct nvkm_oclass
-nv50_ram_oclass = {
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_ram_ctor,
- .dtor = _nvkm_ram_dtor,
- .init = _nvkm_ram_init,
- .fini = _nvkm_ram_fini,
- }
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
index afab42df28d4..86bf67456b14 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
@@ -65,7 +65,7 @@ nvkm_sddr2_calc(struct nvkm_ram *ram)
case 0x10:
CL = ram->next->bios.timing_10_CL;
WR = ram->next->bios.timing_10_WR;
- DLL = !ram->next->bios.ramcfg_10_DLLoff;
+ DLL = !ram->next->bios.ramcfg_DLLoff;
ODT = ram->next->bios.timing_10_ODT & 3;
break;
case 0x20:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
index 10844355c3f3..b4edc97dc8c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
@@ -53,7 +53,7 @@ static const struct ramxlat
ramddr3_wr[] = {
{ 5, 1 }, { 6, 2 }, { 7, 3 }, { 8, 4 }, { 10, 5 }, { 12, 6 },
/* the below are mentioned in some, but not all, ddr3 docs */
- { 14, 7 }, { 16, 0 },
+ { 14, 7 }, { 15, 7 }, { 16, 0 },
{ -1 }
};
@@ -61,7 +61,7 @@ static const struct ramxlat
ramddr3_cwl[] = {
{ 5, 0 }, { 6, 1 }, { 7, 2 }, { 8, 3 },
/* the below are mentioned in some, but not all, ddr3 docs */
- { 9, 4 },
+ { 9, 4 }, { 10, 5 },
{ -1 }
};
@@ -79,7 +79,7 @@ nvkm_sddr3_calc(struct nvkm_ram *ram)
CWL = ram->next->bios.timing_10_CWL;
CL = ram->next->bios.timing_10_CL;
WR = ram->next->bios.timing_10_WR;
- DLL = !ram->next->bios.ramcfg_10_DLLoff;
+ DLL = !ram->next->bios.ramcfg_DLLoff;
ODT = ram->next->bios.timing_10_ODT;
break;
case 0x20:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
index b7b7193bbce7..f4144979a79c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
@@ -21,31 +21,34 @@
*
* Authors: Martin Peres
*/
-#include <subdev/fuse.h>
+#include "priv.h"
-int
-_nvkm_fuse_init(struct nvkm_object *object)
+u32
+nvkm_fuse_read(struct nvkm_fuse *fuse, u32 addr)
{
- struct nvkm_fuse *fuse = (void *)object;
- return nvkm_subdev_init(&fuse->base);
+ return fuse->func->read(fuse, addr);
}
-void
-_nvkm_fuse_dtor(struct nvkm_object *object)
+static void *
+nvkm_fuse_dtor(struct nvkm_subdev *subdev)
{
- struct nvkm_fuse *fuse = (void *)object;
- nvkm_subdev_destroy(&fuse->base);
+ return nvkm_fuse(subdev);
}
+static const struct nvkm_subdev_func
+nvkm_fuse = {
+ .dtor = nvkm_fuse_dtor,
+};
+
int
-nvkm_fuse_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_fuse_new_(const struct nvkm_fuse_func *func, struct nvkm_device *device,
+ int index, struct nvkm_fuse **pfuse)
{
struct nvkm_fuse *fuse;
- int ret;
-
- ret = nvkm_subdev_create_(parent, engine, oclass, 0, "FUSE",
- "fuse", length, pobject);
- fuse = *pobject;
- return ret;
+ if (!(fuse = *pfuse = kzalloc(sizeof(*fuse), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_subdev_ctor(&nvkm_fuse, device, index, 0, &fuse->subdev);
+ fuse->func = func;
+ spin_lock_init(&fuse->lock);
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
index 393ef3a0faaf..13671fedc805 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
@@ -23,56 +23,31 @@
*/
#include "priv.h"
-struct gf100_fuse_priv {
- struct nvkm_fuse base;
-
- spinlock_t fuse_enable_lock;
-};
-
static u32
-gf100_fuse_rd32(struct nvkm_object *object, u64 addr)
+gf100_fuse_read(struct nvkm_fuse *fuse, u32 addr)
{
- struct gf100_fuse_priv *priv = (void *)object;
+ struct nvkm_device *device = fuse->subdev.device;
unsigned long flags;
u32 fuse_enable, unk, val;
/* racy if another part of nvkm start writing to these regs */
- spin_lock_irqsave(&priv->fuse_enable_lock, flags);
- fuse_enable = nv_mask(priv, 0x22400, 0x800, 0x800);
- unk = nv_mask(priv, 0x21000, 0x1, 0x1);
- val = nv_rd32(priv, 0x21100 + addr);
- nv_wr32(priv, 0x21000, unk);
- nv_wr32(priv, 0x22400, fuse_enable);
- spin_unlock_irqrestore(&priv->fuse_enable_lock, flags);
+ spin_lock_irqsave(&fuse->lock, flags);
+ fuse_enable = nvkm_mask(device, 0x022400, 0x800, 0x800);
+ unk = nvkm_mask(device, 0x021000, 0x1, 0x1);
+ val = nvkm_rd32(device, 0x021100 + addr);
+ nvkm_wr32(device, 0x021000, unk);
+ nvkm_wr32(device, 0x022400, fuse_enable);
+ spin_unlock_irqrestore(&fuse->lock, flags);
return val;
}
+static const struct nvkm_fuse_func
+gf100_fuse = {
+ .read = gf100_fuse_read,
+};
-static int
-gf100_fuse_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+gf100_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
{
- struct gf100_fuse_priv *priv;
- int ret;
-
- ret = nvkm_fuse_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- spin_lock_init(&priv->fuse_enable_lock);
- return 0;
+ return nvkm_fuse_new_(&gf100_fuse, device, index, pfuse);
}
-
-struct nvkm_oclass
-gf100_fuse_oclass = {
- .handle = NV_SUBDEV(FUSE, 0xC0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_fuse_ctor,
- .dtor = _nvkm_fuse_dtor,
- .init = _nvkm_fuse_init,
- .fini = _nvkm_fuse_fini,
- .rd32 = gf100_fuse_rd32,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
index 0b256aa4960f..9aff4ea04506 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
@@ -23,40 +23,20 @@
*/
#include "priv.h"
-struct gm107_fuse_priv {
- struct nvkm_fuse base;
-};
-
static u32
-gm107_fuse_rd32(struct nvkm_object *object, u64 addr)
+gm107_fuse_read(struct nvkm_fuse *fuse, u32 addr)
{
- struct gf100_fuse_priv *priv = (void *)object;
- return nv_rd32(priv, 0x21100 + addr);
+ struct nvkm_device *device = fuse->subdev.device;
+ return nvkm_rd32(device, 0x021100 + addr);
}
+static const struct nvkm_fuse_func
+gm107_fuse = {
+ .read = gm107_fuse_read,
+};
-static int
-gm107_fuse_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+gm107_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
{
- struct gm107_fuse_priv *priv;
- int ret;
-
- ret = nvkm_fuse_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
-
- return ret;
+ return nvkm_fuse_new_(&gm107_fuse, device, index, pfuse);
}
-
-struct nvkm_oclass
-gm107_fuse_oclass = {
- .handle = NV_SUBDEV(FUSE, 0x117),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gm107_fuse_ctor,
- .dtor = _nvkm_fuse_dtor,
- .init = _nvkm_fuse_init,
- .fini = _nvkm_fuse_fini,
- .rd32 = gm107_fuse_rd32,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
index 0d2afc426100..514c193db25d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
@@ -23,54 +23,29 @@
*/
#include "priv.h"
-struct nv50_fuse_priv {
- struct nvkm_fuse base;
-
- spinlock_t fuse_enable_lock;
-};
-
static u32
-nv50_fuse_rd32(struct nvkm_object *object, u64 addr)
+nv50_fuse_read(struct nvkm_fuse *fuse, u32 addr)
{
- struct nv50_fuse_priv *priv = (void *)object;
+ struct nvkm_device *device = fuse->subdev.device;
unsigned long flags;
u32 fuse_enable, val;
/* racy if another part of nvkm start writing to this reg */
- spin_lock_irqsave(&priv->fuse_enable_lock, flags);
- fuse_enable = nv_mask(priv, 0x1084, 0x800, 0x800);
- val = nv_rd32(priv, 0x21000 + addr);
- nv_wr32(priv, 0x1084, fuse_enable);
- spin_unlock_irqrestore(&priv->fuse_enable_lock, flags);
+ spin_lock_irqsave(&fuse->lock, flags);
+ fuse_enable = nvkm_mask(device, 0x001084, 0x800, 0x800);
+ val = nvkm_rd32(device, 0x021000 + addr);
+ nvkm_wr32(device, 0x001084, fuse_enable);
+ spin_unlock_irqrestore(&fuse->lock, flags);
return val;
}
+static const struct nvkm_fuse_func
+nv50_fuse = {
+ .read = &nv50_fuse_read,
+};
-static int
-nv50_fuse_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv50_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
{
- struct nv50_fuse_priv *priv;
- int ret;
-
- ret = nvkm_fuse_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- spin_lock_init(&priv->fuse_enable_lock);
- return 0;
+ return nvkm_fuse_new_(&nv50_fuse, device, index, pfuse);
}
-
-struct nvkm_oclass
-nv50_fuse_oclass = {
- .handle = NV_SUBDEV(FUSE, 0x50),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_fuse_ctor,
- .dtor = _nvkm_fuse_dtor,
- .init = _nvkm_fuse_init,
- .fini = _nvkm_fuse_fini,
- .rd32 = nv50_fuse_rd32,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
index 7e050f789384..b0390b540ef5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
@@ -1,7 +1,12 @@
#ifndef __NVKM_FUSE_PRIV_H__
#define __NVKM_FUSE_PRIV_H__
+#define nvkm_fuse(p) container_of((p), struct nvkm_fuse, subdev)
#include <subdev/fuse.h>
-int _nvkm_fuse_init(struct nvkm_object *object);
-void _nvkm_fuse_dtor(struct nvkm_object *object);
+struct nvkm_fuse_func {
+ u32 (*read)(struct nvkm_fuse *, u32 addr);
+};
+
+int nvkm_fuse_new_(const struct nvkm_fuse_func *, struct nvkm_device *,
+ int index, struct nvkm_fuse **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
index ea42a9ed1821..e52c5e87f242 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
@@ -2,5 +2,5 @@ nvkm-y += nvkm/subdev/gpio/base.o
nvkm-y += nvkm/subdev/gpio/nv10.o
nvkm-y += nvkm/subdev/gpio/nv50.o
nvkm-y += nvkm/subdev/gpio/g94.o
-nvkm-y += nvkm/subdev/gpio/gf110.o
+nvkm-y += nvkm/subdev/gpio/gf119.o
nvkm-y += nvkm/subdev/gpio/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
index dea58161ba46..d45ec99f0e38 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
@@ -23,28 +23,33 @@
*/
#include "priv.h"
-#include <core/device.h>
#include <core/notify.h>
static int
nvkm_gpio_drive(struct nvkm_gpio *gpio, int idx, int line, int dir, int out)
{
- const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
- return impl->drive ? impl->drive(gpio, line, dir, out) : -ENODEV;
+ return gpio->func->drive(gpio, line, dir, out);
}
static int
nvkm_gpio_sense(struct nvkm_gpio *gpio, int idx, int line)
{
- const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
- return impl->sense ? impl->sense(gpio, line) : -ENODEV;
+ return gpio->func->sense(gpio, line);
}
-static int
+void
+nvkm_gpio_reset(struct nvkm_gpio *gpio, u8 func)
+{
+ if (gpio->func->reset)
+ gpio->func->reset(gpio, func);
+}
+
+int
nvkm_gpio_find(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line,
struct dcb_gpio_func *func)
{
- struct nvkm_bios *bios = nvkm_bios(gpio);
+ struct nvkm_device *device = gpio->subdev.device;
+ struct nvkm_bios *bios = device->bios;
u8 ver, len;
u16 data;
@@ -56,11 +61,11 @@ nvkm_gpio_find(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line,
return 0;
/* Apple iMac G4 NV18 */
- if (nv_device_match(nv_object(gpio), 0x0189, 0x10de, 0x0010)) {
+ if (device->quirk && device->quirk->tv_gpio) {
if (tag == DCB_GPIO_TVDAC0) {
*func = (struct dcb_gpio_func) {
.func = DCB_GPIO_TVDAC0,
- .line = 4,
+ .line = device->quirk->tv_gpio,
.log[0] = 0,
.log[1] = 1,
};
@@ -71,7 +76,7 @@ nvkm_gpio_find(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line,
return -ENOENT;
}
-static int
+int
nvkm_gpio_set(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line, int state)
{
struct dcb_gpio_func func;
@@ -87,7 +92,7 @@ nvkm_gpio_set(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line, int state)
return ret;
}
-static int
+int
nvkm_gpio_get(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line)
{
struct dcb_gpio_func func;
@@ -107,16 +112,14 @@ static void
nvkm_gpio_intr_fini(struct nvkm_event *event, int type, int index)
{
struct nvkm_gpio *gpio = container_of(event, typeof(*gpio), event);
- const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
- impl->intr_mask(gpio, type, 1 << index, 0);
+ gpio->func->intr_mask(gpio, type, 1 << index, 0);
}
static void
nvkm_gpio_intr_init(struct nvkm_event *event, int type, int index)
{
struct nvkm_gpio *gpio = container_of(event, typeof(*gpio), event);
- const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
- impl->intr_mask(gpio, type, 1 << index, 1 << index);
+ gpio->func->intr_mask(gpio, type, 1 << index, 1 << index);
}
static int
@@ -133,16 +136,22 @@ nvkm_gpio_intr_ctor(struct nvkm_object *object, void *data, u32 size,
return -EINVAL;
}
+static const struct nvkm_event_func
+nvkm_gpio_intr_func = {
+ .ctor = nvkm_gpio_intr_ctor,
+ .init = nvkm_gpio_intr_init,
+ .fini = nvkm_gpio_intr_fini,
+};
+
static void
nvkm_gpio_intr(struct nvkm_subdev *subdev)
{
struct nvkm_gpio *gpio = nvkm_gpio(subdev);
- const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
u32 hi, lo, i;
- impl->intr_stat(gpio, &hi, &lo);
+ gpio->func->intr_stat(gpio, &hi, &lo);
- for (i = 0; (hi | lo) && i < impl->lines; i++) {
+ for (i = 0; (hi | lo) && i < gpio->func->lines; i++) {
struct nvkm_gpio_ntfy_rep rep = {
.mask = (NVKM_GPIO_HI * !!(hi & (1 << i))) |
(NVKM_GPIO_LO * !!(lo & (1 << i))),
@@ -151,24 +160,15 @@ nvkm_gpio_intr(struct nvkm_subdev *subdev)
}
}
-static const struct nvkm_event_func
-nvkm_gpio_intr_func = {
- .ctor = nvkm_gpio_intr_ctor,
- .init = nvkm_gpio_intr_init,
- .fini = nvkm_gpio_intr_fini,
-};
-
-int
-_nvkm_gpio_fini(struct nvkm_object *object, bool suspend)
+static int
+nvkm_gpio_fini(struct nvkm_subdev *subdev, bool suspend)
{
- const struct nvkm_gpio_impl *impl = (void *)object->oclass;
- struct nvkm_gpio *gpio = nvkm_gpio(object);
- u32 mask = (1 << impl->lines) - 1;
-
- impl->intr_mask(gpio, NVKM_GPIO_TOGGLED, mask, 0);
- impl->intr_stat(gpio, &mask, &mask);
+ struct nvkm_gpio *gpio = nvkm_gpio(subdev);
+ u32 mask = (1 << gpio->func->lines) - 1;
- return nvkm_subdev_fini(&gpio->base, suspend);
+ gpio->func->intr_mask(gpio, NVKM_GPIO_TOGGLED, mask, 0);
+ gpio->func->intr_stat(gpio, &mask, &mask);
+ return 0;
}
static struct dmi_system_id gpio_reset_ids[] = {
@@ -182,70 +182,43 @@ static struct dmi_system_id gpio_reset_ids[] = {
{ }
};
-int
-_nvkm_gpio_init(struct nvkm_object *object)
+static int
+nvkm_gpio_init(struct nvkm_subdev *subdev)
{
- struct nvkm_gpio *gpio = nvkm_gpio(object);
- int ret;
-
- ret = nvkm_subdev_init(&gpio->base);
- if (ret)
- return ret;
-
- if (gpio->reset && dmi_check_system(gpio_reset_ids))
- gpio->reset(gpio, DCB_GPIO_UNUSED);
-
- return ret;
+ struct nvkm_gpio *gpio = nvkm_gpio(subdev);
+ if (dmi_check_system(gpio_reset_ids))
+ nvkm_gpio_reset(gpio, DCB_GPIO_UNUSED);
+ return 0;
}
-void
-_nvkm_gpio_dtor(struct nvkm_object *object)
+static void *
+nvkm_gpio_dtor(struct nvkm_subdev *subdev)
{
- struct nvkm_gpio *gpio = (void *)object;
+ struct nvkm_gpio *gpio = nvkm_gpio(subdev);
nvkm_event_fini(&gpio->event);
- nvkm_subdev_destroy(&gpio->base);
+ return gpio;
}
-int
-nvkm_gpio_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, int length, void **pobject)
-{
- const struct nvkm_gpio_impl *impl = (void *)oclass;
- struct nvkm_gpio *gpio;
- int ret;
-
- ret = nvkm_subdev_create_(parent, engine, oclass, 0, "GPIO",
- "gpio", length, pobject);
- gpio = *pobject;
- if (ret)
- return ret;
-
- gpio->find = nvkm_gpio_find;
- gpio->set = nvkm_gpio_set;
- gpio->get = nvkm_gpio_get;
- gpio->reset = impl->reset;
-
- ret = nvkm_event_init(&nvkm_gpio_intr_func, 2, impl->lines,
- &gpio->event);
- if (ret)
- return ret;
-
- nv_subdev(gpio)->intr = nvkm_gpio_intr;
- return 0;
-}
+static const struct nvkm_subdev_func
+nvkm_gpio = {
+ .dtor = nvkm_gpio_dtor,
+ .init = nvkm_gpio_init,
+ .fini = nvkm_gpio_fini,
+ .intr = nvkm_gpio_intr,
+};
int
-_nvkm_gpio_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nvkm_gpio_new_(const struct nvkm_gpio_func *func, struct nvkm_device *device,
+ int index, struct nvkm_gpio **pgpio)
{
struct nvkm_gpio *gpio;
- int ret;
- ret = nvkm_gpio_create(parent, engine, oclass, &gpio);
- *pobject = nv_object(gpio);
- if (ret)
- return ret;
+ if (!(gpio = *pgpio = kzalloc(sizeof(*gpio), GFP_KERNEL)))
+ return -ENOMEM;
- return 0;
+ nvkm_subdev_ctor(&nvkm_gpio, device, index, 0, &gpio->subdev);
+ gpio->func = func;
+
+ return nvkm_event_init(&nvkm_gpio_intr_func, 2, func->lines,
+ &gpio->event);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
index 12b3e01fca8e..6dcda55fb865 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
@@ -26,21 +26,23 @@
void
g94_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
{
- u32 intr0 = nv_rd32(gpio, 0x00e054);
- u32 intr1 = nv_rd32(gpio, 0x00e074);
- u32 stat0 = nv_rd32(gpio, 0x00e050) & intr0;
- u32 stat1 = nv_rd32(gpio, 0x00e070) & intr1;
+ struct nvkm_device *device = gpio->subdev.device;
+ u32 intr0 = nvkm_rd32(device, 0x00e054);
+ u32 intr1 = nvkm_rd32(device, 0x00e074);
+ u32 stat0 = nvkm_rd32(device, 0x00e050) & intr0;
+ u32 stat1 = nvkm_rd32(device, 0x00e070) & intr1;
*lo = (stat1 & 0xffff0000) | (stat0 >> 16);
*hi = (stat1 << 16) | (stat0 & 0x0000ffff);
- nv_wr32(gpio, 0x00e054, intr0);
- nv_wr32(gpio, 0x00e074, intr1);
+ nvkm_wr32(device, 0x00e054, intr0);
+ nvkm_wr32(device, 0x00e074, intr1);
}
void
g94_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
{
- u32 inte0 = nv_rd32(gpio, 0x00e050);
- u32 inte1 = nv_rd32(gpio, 0x00e070);
+ struct nvkm_device *device = gpio->subdev.device;
+ u32 inte0 = nvkm_rd32(device, 0x00e050);
+ u32 inte1 = nvkm_rd32(device, 0x00e070);
if (type & NVKM_GPIO_LO)
inte0 = (inte0 & ~(mask << 16)) | (data << 16);
if (type & NVKM_GPIO_HI)
@@ -51,23 +53,22 @@ g94_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
inte1 = (inte1 & ~(mask << 16)) | (data << 16);
if (type & NVKM_GPIO_HI)
inte1 = (inte1 & ~mask) | data;
- nv_wr32(gpio, 0x00e050, inte0);
- nv_wr32(gpio, 0x00e070, inte1);
+ nvkm_wr32(device, 0x00e050, inte0);
+ nvkm_wr32(device, 0x00e070, inte1);
}
-struct nvkm_oclass *
-g94_gpio_oclass = &(struct nvkm_gpio_impl) {
- .base.handle = NV_SUBDEV(GPIO, 0x94),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_gpio_ctor,
- .dtor = _nvkm_gpio_dtor,
- .init = _nvkm_gpio_init,
- .fini = _nvkm_gpio_fini,
- },
+static const struct nvkm_gpio_func
+g94_gpio = {
.lines = 32,
.intr_stat = g94_gpio_intr_stat,
.intr_mask = g94_gpio_intr_mask,
.drive = nv50_gpio_drive,
.sense = nv50_gpio_sense,
.reset = nv50_gpio_reset,
-}.base;
+};
+
+int
+g94_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+ return nvkm_gpio_new_(&g94_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c
index 2c3bb255d1f8..bb7400dfaef8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c
@@ -24,15 +24,16 @@
#include "priv.h"
void
-gf110_gpio_reset(struct nvkm_gpio *gpio, u8 match)
+gf119_gpio_reset(struct nvkm_gpio *gpio, u8 match)
{
- struct nvkm_bios *bios = nvkm_bios(gpio);
+ struct nvkm_device *device = gpio->subdev.device;
+ struct nvkm_bios *bios = device->bios;
u8 ver, len;
u16 entry;
int ent = -1;
while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
- u32 data = nv_ro32(bios, entry);
+ u32 data = nvbios_rd32(bios, entry);
u8 line = (data & 0x0000003f);
u8 defs = !!(data & 0x00000080);
u8 func = (data & 0x0000ff00) >> 8;
@@ -43,42 +44,43 @@ gf110_gpio_reset(struct nvkm_gpio *gpio, u8 match)
(match != DCB_GPIO_UNUSED && match != func))
continue;
- gpio->set(gpio, 0, func, line, defs);
+ nvkm_gpio_set(gpio, 0, func, line, defs);
- nv_mask(gpio, 0x00d610 + (line * 4), 0xff, unk0);
+ nvkm_mask(device, 0x00d610 + (line * 4), 0xff, unk0);
if (unk1--)
- nv_mask(gpio, 0x00d740 + (unk1 * 4), 0xff, line);
+ nvkm_mask(device, 0x00d740 + (unk1 * 4), 0xff, line);
}
}
int
-gf110_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
+gf119_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
{
+ struct nvkm_device *device = gpio->subdev.device;
u32 data = ((dir ^ 1) << 13) | (out << 12);
- nv_mask(gpio, 0x00d610 + (line * 4), 0x00003000, data);
- nv_mask(gpio, 0x00d604, 0x00000001, 0x00000001); /* update? */
+ nvkm_mask(device, 0x00d610 + (line * 4), 0x00003000, data);
+ nvkm_mask(device, 0x00d604, 0x00000001, 0x00000001); /* update? */
return 0;
}
int
-gf110_gpio_sense(struct nvkm_gpio *gpio, int line)
+gf119_gpio_sense(struct nvkm_gpio *gpio, int line)
{
- return !!(nv_rd32(gpio, 0x00d610 + (line * 4)) & 0x00004000);
+ struct nvkm_device *device = gpio->subdev.device;
+ return !!(nvkm_rd32(device, 0x00d610 + (line * 4)) & 0x00004000);
}
-struct nvkm_oclass *
-gf110_gpio_oclass = &(struct nvkm_gpio_impl) {
- .base.handle = NV_SUBDEV(GPIO, 0xd0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_gpio_ctor,
- .dtor = _nvkm_gpio_dtor,
- .init = _nvkm_gpio_init,
- .fini = _nvkm_gpio_fini,
- },
+static const struct nvkm_gpio_func
+gf119_gpio = {
.lines = 32,
.intr_stat = g94_gpio_intr_stat,
.intr_mask = g94_gpio_intr_mask,
- .drive = gf110_gpio_drive,
- .sense = gf110_gpio_sense,
- .reset = gf110_gpio_reset,
-}.base;
+ .drive = gf119_gpio_drive,
+ .sense = gf119_gpio_sense,
+ .reset = gf119_gpio_reset,
+};
+
+int
+gf119_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+ return nvkm_gpio_new_(&gf119_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
index 42fd2faaaa4f..3f45afd17d5a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
@@ -26,21 +26,23 @@
static void
gk104_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
{
- u32 intr0 = nv_rd32(gpio, 0x00dc00);
- u32 intr1 = nv_rd32(gpio, 0x00dc80);
- u32 stat0 = nv_rd32(gpio, 0x00dc08) & intr0;
- u32 stat1 = nv_rd32(gpio, 0x00dc88) & intr1;
+ struct nvkm_device *device = gpio->subdev.device;
+ u32 intr0 = nvkm_rd32(device, 0x00dc00);
+ u32 intr1 = nvkm_rd32(device, 0x00dc80);
+ u32 stat0 = nvkm_rd32(device, 0x00dc08) & intr0;
+ u32 stat1 = nvkm_rd32(device, 0x00dc88) & intr1;
*lo = (stat1 & 0xffff0000) | (stat0 >> 16);
*hi = (stat1 << 16) | (stat0 & 0x0000ffff);
- nv_wr32(gpio, 0x00dc00, intr0);
- nv_wr32(gpio, 0x00dc80, intr1);
+ nvkm_wr32(device, 0x00dc00, intr0);
+ nvkm_wr32(device, 0x00dc80, intr1);
}
void
gk104_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
{
- u32 inte0 = nv_rd32(gpio, 0x00dc08);
- u32 inte1 = nv_rd32(gpio, 0x00dc88);
+ struct nvkm_device *device = gpio->subdev.device;
+ u32 inte0 = nvkm_rd32(device, 0x00dc08);
+ u32 inte1 = nvkm_rd32(device, 0x00dc88);
if (type & NVKM_GPIO_LO)
inte0 = (inte0 & ~(mask << 16)) | (data << 16);
if (type & NVKM_GPIO_HI)
@@ -51,23 +53,22 @@ gk104_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
inte1 = (inte1 & ~(mask << 16)) | (data << 16);
if (type & NVKM_GPIO_HI)
inte1 = (inte1 & ~mask) | data;
- nv_wr32(gpio, 0x00dc08, inte0);
- nv_wr32(gpio, 0x00dc88, inte1);
+ nvkm_wr32(device, 0x00dc08, inte0);
+ nvkm_wr32(device, 0x00dc88, inte1);
}
-struct nvkm_oclass *
-gk104_gpio_oclass = &(struct nvkm_gpio_impl) {
- .base.handle = NV_SUBDEV(GPIO, 0xe0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_gpio_ctor,
- .dtor = _nvkm_gpio_dtor,
- .init = _nvkm_gpio_init,
- .fini = _nvkm_gpio_fini,
- },
+static const struct nvkm_gpio_func
+gk104_gpio = {
.lines = 32,
.intr_stat = gk104_gpio_intr_stat,
.intr_mask = gk104_gpio_intr_mask,
- .drive = gf110_gpio_drive,
- .sense = gf110_gpio_sense,
- .reset = gf110_gpio_reset,
-}.base;
+ .drive = gf119_gpio_drive,
+ .sense = gf119_gpio_sense,
+ .reset = gf119_gpio_reset,
+};
+
+int
+gk104_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+ return nvkm_gpio_new_(&gk104_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
index 2b295154247e..ae3499b48330 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
@@ -28,19 +28,20 @@
static int
nv10_gpio_sense(struct nvkm_gpio *gpio, int line)
{
+ struct nvkm_device *device = gpio->subdev.device;
if (line < 2) {
line = line * 16;
- line = nv_rd32(gpio, 0x600818) >> line;
+ line = nvkm_rd32(device, 0x600818) >> line;
return !!(line & 0x0100);
} else
if (line < 10) {
line = (line - 2) * 4;
- line = nv_rd32(gpio, 0x60081c) >> line;
+ line = nvkm_rd32(device, 0x60081c) >> line;
return !!(line & 0x04);
} else
if (line < 14) {
line = (line - 10) * 4;
- line = nv_rd32(gpio, 0x600850) >> line;
+ line = nvkm_rd32(device, 0x600850) >> line;
return !!(line & 0x04);
}
@@ -50,6 +51,7 @@ nv10_gpio_sense(struct nvkm_gpio *gpio, int line)
static int
nv10_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
{
+ struct nvkm_device *device = gpio->subdev.device;
u32 reg, mask, data;
if (line < 2) {
@@ -73,43 +75,44 @@ nv10_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
return -EINVAL;
}
- nv_mask(gpio, reg, mask << line, data << line);
+ nvkm_mask(device, reg, mask << line, data << line);
return 0;
}
static void
nv10_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
{
- u32 intr = nv_rd32(gpio, 0x001104);
- u32 stat = nv_rd32(gpio, 0x001144) & intr;
+ struct nvkm_device *device = gpio->subdev.device;
+ u32 intr = nvkm_rd32(device, 0x001104);
+ u32 stat = nvkm_rd32(device, 0x001144) & intr;
*lo = (stat & 0xffff0000) >> 16;
*hi = (stat & 0x0000ffff);
- nv_wr32(gpio, 0x001104, intr);
+ nvkm_wr32(device, 0x001104, intr);
}
static void
nv10_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
{
- u32 inte = nv_rd32(gpio, 0x001144);
+ struct nvkm_device *device = gpio->subdev.device;
+ u32 inte = nvkm_rd32(device, 0x001144);
if (type & NVKM_GPIO_LO)
inte = (inte & ~(mask << 16)) | (data << 16);
if (type & NVKM_GPIO_HI)
inte = (inte & ~mask) | data;
- nv_wr32(gpio, 0x001144, inte);
+ nvkm_wr32(device, 0x001144, inte);
}
-struct nvkm_oclass *
-nv10_gpio_oclass = &(struct nvkm_gpio_impl) {
- .base.handle = NV_SUBDEV(GPIO, 0x10),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_gpio_ctor,
- .dtor = _nvkm_gpio_dtor,
- .init = _nvkm_gpio_init,
- .fini = _nvkm_gpio_fini,
- },
+static const struct nvkm_gpio_func
+nv10_gpio = {
.lines = 16,
.intr_stat = nv10_gpio_intr_stat,
.intr_mask = nv10_gpio_intr_mask,
.drive = nv10_gpio_drive,
.sense = nv10_gpio_sense,
-}.base;
+};
+
+int
+nv10_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+ return nvkm_gpio_new_(&nv10_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
index 6a031035bd27..8996649209ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
@@ -26,14 +26,15 @@
void
nv50_gpio_reset(struct nvkm_gpio *gpio, u8 match)
{
- struct nvkm_bios *bios = nvkm_bios(gpio);
+ struct nvkm_device *device = gpio->subdev.device;
+ struct nvkm_bios *bios = device->bios;
u8 ver, len;
u16 entry;
int ent = -1;
while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
static const u32 regs[] = { 0xe100, 0xe28c };
- u32 data = nv_ro32(bios, entry);
+ u32 data = nvbios_rd32(bios, entry);
u8 line = (data & 0x0000001f);
u8 func = (data & 0x0000ff00) >> 8;
u8 defs = !!(data & 0x01000000);
@@ -47,9 +48,9 @@ nv50_gpio_reset(struct nvkm_gpio *gpio, u8 match)
(match != DCB_GPIO_UNUSED && match != func))
continue;
- gpio->set(gpio, 0, func, line, defs);
+ nvkm_gpio_set(gpio, 0, func, line, defs);
- nv_mask(gpio, reg, 0x00010001 << lsh, val << lsh);
+ nvkm_mask(device, reg, 0x00010001 << lsh, val << lsh);
}
}
@@ -69,60 +70,63 @@ nv50_gpio_location(int line, u32 *reg, u32 *shift)
int
nv50_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
{
+ struct nvkm_device *device = gpio->subdev.device;
u32 reg, shift;
if (nv50_gpio_location(line, &reg, &shift))
return -EINVAL;
- nv_mask(gpio, reg, 3 << shift, (((dir ^ 1) << 1) | out) << shift);
+ nvkm_mask(device, reg, 3 << shift, (((dir ^ 1) << 1) | out) << shift);
return 0;
}
int
nv50_gpio_sense(struct nvkm_gpio *gpio, int line)
{
+ struct nvkm_device *device = gpio->subdev.device;
u32 reg, shift;
if (nv50_gpio_location(line, &reg, &shift))
return -EINVAL;
- return !!(nv_rd32(gpio, reg) & (4 << shift));
+ return !!(nvkm_rd32(device, reg) & (4 << shift));
}
static void
nv50_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
{
- u32 intr = nv_rd32(gpio, 0x00e054);
- u32 stat = nv_rd32(gpio, 0x00e050) & intr;
+ struct nvkm_device *device = gpio->subdev.device;
+ u32 intr = nvkm_rd32(device, 0x00e054);
+ u32 stat = nvkm_rd32(device, 0x00e050) & intr;
*lo = (stat & 0xffff0000) >> 16;
*hi = (stat & 0x0000ffff);
- nv_wr32(gpio, 0x00e054, intr);
+ nvkm_wr32(device, 0x00e054, intr);
}
static void
nv50_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
{
- u32 inte = nv_rd32(gpio, 0x00e050);
+ struct nvkm_device *device = gpio->subdev.device;
+ u32 inte = nvkm_rd32(device, 0x00e050);
if (type & NVKM_GPIO_LO)
inte = (inte & ~(mask << 16)) | (data << 16);
if (type & NVKM_GPIO_HI)
inte = (inte & ~mask) | data;
- nv_wr32(gpio, 0x00e050, inte);
+ nvkm_wr32(device, 0x00e050, inte);
}
-struct nvkm_oclass *
-nv50_gpio_oclass = &(struct nvkm_gpio_impl) {
- .base.handle = NV_SUBDEV(GPIO, 0x50),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_gpio_ctor,
- .dtor = _nvkm_gpio_dtor,
- .init = _nvkm_gpio_init,
- .fini = _nvkm_gpio_fini,
- },
+static const struct nvkm_gpio_func
+nv50_gpio = {
.lines = 16,
.intr_stat = nv50_gpio_intr_stat,
.intr_mask = nv50_gpio_intr_mask,
.drive = nv50_gpio_drive,
.sense = nv50_gpio_sense,
.reset = nv50_gpio_reset,
-}.base;
+};
+
+int
+nv50_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+ return nvkm_gpio_new_(&nv50_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
index 382f8d44e140..371bcdbbe0d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
@@ -1,33 +1,9 @@
#ifndef __NVKM_GPIO_PRIV_H__
#define __NVKM_GPIO_PRIV_H__
+#define nvkm_gpio(p) container_of((p), struct nvkm_gpio, subdev)
#include <subdev/gpio.h>
-#define nvkm_gpio_create(p,e,o,d) \
- nvkm_gpio_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_gpio_destroy(p) ({ \
- struct nvkm_gpio *gpio = (p); \
- _nvkm_gpio_dtor(nv_object(gpio)); \
-})
-#define nvkm_gpio_init(p) ({ \
- struct nvkm_gpio *gpio = (p); \
- _nvkm_gpio_init(nv_object(gpio)); \
-})
-#define nvkm_gpio_fini(p,s) ({ \
- struct nvkm_gpio *gpio = (p); \
- _nvkm_gpio_fini(nv_object(gpio), (s)); \
-})
-
-int nvkm_gpio_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, int, void **);
-int _nvkm_gpio_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-void _nvkm_gpio_dtor(struct nvkm_object *);
-int _nvkm_gpio_init(struct nvkm_object *);
-int _nvkm_gpio_fini(struct nvkm_object *, bool);
-
-struct nvkm_gpio_impl {
- struct nvkm_oclass base;
+struct nvkm_gpio_func {
int lines;
/* read and ack pending interrupts, returning only data
@@ -51,6 +27,9 @@ struct nvkm_gpio_impl {
void (*reset)(struct nvkm_gpio *, u8);
};
+int nvkm_gpio_new_(const struct nvkm_gpio_func *, struct nvkm_device *,
+ int index, struct nvkm_gpio **);
+
void nv50_gpio_reset(struct nvkm_gpio *, u8);
int nv50_gpio_drive(struct nvkm_gpio *, int, int, int);
int nv50_gpio_sense(struct nvkm_gpio *, int);
@@ -58,7 +37,7 @@ int nv50_gpio_sense(struct nvkm_gpio *, int);
void g94_gpio_intr_stat(struct nvkm_gpio *, u32 *, u32 *);
void g94_gpio_intr_mask(struct nvkm_gpio *, u32, u32, u32);
-void gf110_gpio_reset(struct nvkm_gpio *, u8);
-int gf110_gpio_drive(struct nvkm_gpio *, int, int, int);
-int gf110_gpio_sense(struct nvkm_gpio *, int);
+void gf119_gpio_reset(struct nvkm_gpio *, u8);
+int gf119_gpio_drive(struct nvkm_gpio *, int, int, int);
+int gf119_gpio_sense(struct nvkm_gpio *, int);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
index d68307409980..1f730613c237 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
@@ -1,16 +1,30 @@
nvkm-y += nvkm/subdev/i2c/base.o
-nvkm-y += nvkm/subdev/i2c/anx9805.o
-nvkm-y += nvkm/subdev/i2c/aux.o
-nvkm-y += nvkm/subdev/i2c/bit.o
-nvkm-y += nvkm/subdev/i2c/pad.o
-nvkm-y += nvkm/subdev/i2c/padnv04.o
-nvkm-y += nvkm/subdev/i2c/padg94.o
-nvkm-y += nvkm/subdev/i2c/padgm204.o
nvkm-y += nvkm/subdev/i2c/nv04.o
nvkm-y += nvkm/subdev/i2c/nv4e.o
nvkm-y += nvkm/subdev/i2c/nv50.o
nvkm-y += nvkm/subdev/i2c/g94.o
-nvkm-y += nvkm/subdev/i2c/gf110.o
nvkm-y += nvkm/subdev/i2c/gf117.o
+nvkm-y += nvkm/subdev/i2c/gf119.o
nvkm-y += nvkm/subdev/i2c/gk104.o
nvkm-y += nvkm/subdev/i2c/gm204.o
+
+nvkm-y += nvkm/subdev/i2c/pad.o
+nvkm-y += nvkm/subdev/i2c/padnv04.o
+nvkm-y += nvkm/subdev/i2c/padnv4e.o
+nvkm-y += nvkm/subdev/i2c/padnv50.o
+nvkm-y += nvkm/subdev/i2c/padg94.o
+nvkm-y += nvkm/subdev/i2c/padgf119.o
+nvkm-y += nvkm/subdev/i2c/padgm204.o
+
+nvkm-y += nvkm/subdev/i2c/bus.o
+nvkm-y += nvkm/subdev/i2c/busnv04.o
+nvkm-y += nvkm/subdev/i2c/busnv4e.o
+nvkm-y += nvkm/subdev/i2c/busnv50.o
+nvkm-y += nvkm/subdev/i2c/busgf119.o
+nvkm-y += nvkm/subdev/i2c/bit.o
+
+nvkm-y += nvkm/subdev/i2c/aux.o
+nvkm-y += nvkm/subdev/i2c/auxg94.o
+nvkm-y += nvkm/subdev/i2c/auxgm204.o
+
+nvkm-y += nvkm/subdev/i2c/anx9805.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
index d17dd1cf3c34..b7b01c3f7037 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
@@ -21,272 +21,258 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include "port.h"
+#define anx9805_pad(p) container_of((p), struct anx9805_pad, base)
+#define anx9805_bus(p) container_of((p), struct anx9805_bus, base)
+#define anx9805_aux(p) container_of((p), struct anx9805_aux, base)
+#include "aux.h"
+#include "bus.h"
+
+struct anx9805_pad {
+ struct nvkm_i2c_pad base;
+ struct nvkm_i2c_bus *bus;
+ u8 addr;
+};
-struct anx9805_i2c_port {
- struct nvkm_i2c_port base;
- u32 addr;
- u32 ctrl;
+struct anx9805_bus {
+ struct nvkm_i2c_bus base;
+ struct anx9805_pad *pad;
+ u8 addr;
};
static int
-anx9805_train(struct nvkm_i2c_port *port, int link_nr, int link_bw, bool enh)
+anx9805_bus_xfer(struct nvkm_i2c_bus *base, struct i2c_msg *msgs, int num)
{
- struct anx9805_i2c_port *chan = (void *)port;
- struct nvkm_i2c_port *mast = (void *)nv_object(chan)->parent;
- u8 tmp, i;
-
- DBG("ANX9805 train %d 0x%02x %d\n", link_nr, link_bw, enh);
+ struct anx9805_bus *bus = anx9805_bus(base);
+ struct anx9805_pad *pad = bus->pad;
+ struct i2c_adapter *adap = &pad->bus->i2c;
+ struct i2c_msg *msg = msgs;
+ int ret = -ETIMEDOUT;
+ int i, j, cnt = num;
+ u8 seg = 0x00, off = 0x00, tmp;
- nv_wri2cr(mast, chan->addr, 0xa0, link_bw);
- nv_wri2cr(mast, chan->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00));
- nv_wri2cr(mast, chan->addr, 0xa2, 0x01);
- nv_wri2cr(mast, chan->addr, 0xa8, 0x01);
+ tmp = nvkm_rdi2cr(adap, pad->addr, 0x07) & ~0x10;
+ nvkm_wri2cr(adap, pad->addr, 0x07, tmp | 0x10);
+ nvkm_wri2cr(adap, pad->addr, 0x07, tmp);
+ nvkm_wri2cr(adap, bus->addr, 0x43, 0x05);
+ mdelay(5);
- i = 0;
- while ((tmp = nv_rdi2cr(mast, chan->addr, 0xa8)) & 0x01) {
- mdelay(5);
- if (i++ == 100) {
- nv_error(port, "link training timed out\n");
- return -ETIMEDOUT;
+ while (cnt--) {
+ if ( (msg->flags & I2C_M_RD) && msg->addr == 0x50) {
+ nvkm_wri2cr(adap, bus->addr, 0x40, msg->addr << 1);
+ nvkm_wri2cr(adap, bus->addr, 0x41, seg);
+ nvkm_wri2cr(adap, bus->addr, 0x42, off);
+ nvkm_wri2cr(adap, bus->addr, 0x44, msg->len);
+ nvkm_wri2cr(adap, bus->addr, 0x45, 0x00);
+ nvkm_wri2cr(adap, bus->addr, 0x43, 0x01);
+ for (i = 0; i < msg->len; i++) {
+ j = 0;
+ while (nvkm_rdi2cr(adap, bus->addr, 0x46) & 0x10) {
+ mdelay(5);
+ if (j++ == 32)
+ goto done;
+ }
+ msg->buf[i] = nvkm_rdi2cr(adap, bus->addr, 0x47);
+ }
+ } else
+ if (!(msg->flags & I2C_M_RD)) {
+ if (msg->addr == 0x50 && msg->len == 0x01) {
+ off = msg->buf[0];
+ } else
+ if (msg->addr == 0x30 && msg->len == 0x01) {
+ seg = msg->buf[0];
+ } else
+ goto done;
+ } else {
+ goto done;
}
+ msg++;
}
- if (tmp & 0x70) {
- nv_error(port, "link training failed: 0x%02x\n", tmp);
- return -EIO;
+ ret = num;
+done:
+ nvkm_wri2cr(adap, bus->addr, 0x43, 0x00);
+ return ret;
+}
+
+static const struct nvkm_i2c_bus_func
+anx9805_bus_func = {
+ .xfer = anx9805_bus_xfer,
+};
+
+static int
+anx9805_bus_new(struct nvkm_i2c_pad *base, int id, u8 drive,
+ struct nvkm_i2c_bus **pbus)
+{
+ struct anx9805_pad *pad = anx9805_pad(base);
+ struct anx9805_bus *bus;
+ int ret;
+
+ if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+ return -ENOMEM;
+ *pbus = &bus->base;
+ bus->pad = pad;
+
+ ret = nvkm_i2c_bus_ctor(&anx9805_bus_func, &pad->base, id, &bus->base);
+ if (ret)
+ return ret;
+
+ switch (pad->addr) {
+ case 0x39: bus->addr = 0x3d; break;
+ case 0x3b: bus->addr = 0x3f; break;
+ default:
+ return -ENOSYS;
}
- return 1;
+ return 0;
}
+struct anx9805_aux {
+ struct nvkm_i2c_aux base;
+ struct anx9805_pad *pad;
+ u8 addr;
+};
+
static int
-anx9805_aux(struct nvkm_i2c_port *port, bool retry,
- u8 type, u32 addr, u8 *data, u8 size)
+anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry,
+ u8 type, u32 addr, u8 *data, u8 size)
{
- struct anx9805_i2c_port *chan = (void *)port;
- struct nvkm_i2c_port *mast = (void *)nv_object(chan)->parent;
+ struct anx9805_aux *aux = anx9805_aux(base);
+ struct anx9805_pad *pad = aux->pad;
+ struct i2c_adapter *adap = &pad->bus->i2c;
int i, ret = -ETIMEDOUT;
u8 buf[16] = {};
u8 tmp;
- DBG("%02x %05x %d\n", type, addr, size);
+ AUX_DBG(&aux->base, "%02x %05x %d", type, addr, size);
- tmp = nv_rdi2cr(mast, chan->ctrl, 0x07) & ~0x04;
- nv_wri2cr(mast, chan->ctrl, 0x07, tmp | 0x04);
- nv_wri2cr(mast, chan->ctrl, 0x07, tmp);
- nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
+ tmp = nvkm_rdi2cr(adap, pad->addr, 0x07) & ~0x04;
+ nvkm_wri2cr(adap, pad->addr, 0x07, tmp | 0x04);
+ nvkm_wri2cr(adap, pad->addr, 0x07, tmp);
+ nvkm_wri2cr(adap, pad->addr, 0xf7, 0x01);
- nv_wri2cr(mast, chan->addr, 0xe4, 0x80);
+ nvkm_wri2cr(adap, aux->addr, 0xe4, 0x80);
if (!(type & 1)) {
memcpy(buf, data, size);
- DBG("%16ph", buf);
+ AUX_DBG(&aux->base, "%16ph", buf);
for (i = 0; i < size; i++)
- nv_wri2cr(mast, chan->addr, 0xf0 + i, buf[i]);
+ nvkm_wri2cr(adap, aux->addr, 0xf0 + i, buf[i]);
}
- nv_wri2cr(mast, chan->addr, 0xe5, ((size - 1) << 4) | type);
- nv_wri2cr(mast, chan->addr, 0xe6, (addr & 0x000ff) >> 0);
- nv_wri2cr(mast, chan->addr, 0xe7, (addr & 0x0ff00) >> 8);
- nv_wri2cr(mast, chan->addr, 0xe8, (addr & 0xf0000) >> 16);
- nv_wri2cr(mast, chan->addr, 0xe9, 0x01);
+ nvkm_wri2cr(adap, aux->addr, 0xe5, ((size - 1) << 4) | type);
+ nvkm_wri2cr(adap, aux->addr, 0xe6, (addr & 0x000ff) >> 0);
+ nvkm_wri2cr(adap, aux->addr, 0xe7, (addr & 0x0ff00) >> 8);
+ nvkm_wri2cr(adap, aux->addr, 0xe8, (addr & 0xf0000) >> 16);
+ nvkm_wri2cr(adap, aux->addr, 0xe9, 0x01);
i = 0;
- while ((tmp = nv_rdi2cr(mast, chan->addr, 0xe9)) & 0x01) {
+ while ((tmp = nvkm_rdi2cr(adap, aux->addr, 0xe9)) & 0x01) {
mdelay(5);
if (i++ == 32)
goto done;
}
- if ((tmp = nv_rdi2cr(mast, chan->ctrl, 0xf7)) & 0x01) {
+ if ((tmp = nvkm_rdi2cr(adap, pad->addr, 0xf7)) & 0x01) {
ret = -EIO;
goto done;
}
if (type & 1) {
for (i = 0; i < size; i++)
- buf[i] = nv_rdi2cr(mast, chan->addr, 0xf0 + i);
- DBG("%16ph", buf);
+ buf[i] = nvkm_rdi2cr(adap, aux->addr, 0xf0 + i);
+ AUX_DBG(&aux->base, "%16ph", buf);
memcpy(data, buf, size);
}
ret = 0;
done:
- nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
+ nvkm_wri2cr(adap, pad->addr, 0xf7, 0x01);
return ret;
}
-static const struct nvkm_i2c_func
-anx9805_aux_func = {
- .aux = anx9805_aux,
- .lnk_ctl = anx9805_train,
-};
-
static int
-anx9805_aux_chan_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 index,
- struct nvkm_object **pobject)
+anx9805_aux_lnk_ctl(struct nvkm_i2c_aux *base,
+ int link_nr, int link_bw, bool enh)
{
- struct nvkm_i2c_port *mast = (void *)parent;
- struct anx9805_i2c_port *chan;
- int ret;
-
- ret = nvkm_i2c_port_create(parent, engine, oclass, index,
- &nvkm_i2c_aux_algo, &anx9805_aux_func,
- &chan);
- *pobject = nv_object(chan);
- if (ret)
- return ret;
-
- switch ((oclass->handle & 0xff00) >> 8) {
- case 0x0d:
- chan->addr = 0x38;
- chan->ctrl = 0x39;
- break;
- case 0x0e:
- chan->addr = 0x3c;
- chan->ctrl = 0x3b;
- break;
- default:
- BUG_ON(1);
- }
-
- if (mast->adapter.algo == &i2c_bit_algo) {
- struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
- algo->udelay = max(algo->udelay, 40);
- }
-
- return 0;
-}
-
-static struct nvkm_ofuncs
-anx9805_aux_ofuncs = {
- .ctor = anx9805_aux_chan_ctor,
- .dtor = _nvkm_i2c_port_dtor,
- .init = _nvkm_i2c_port_init,
- .fini = _nvkm_i2c_port_fini,
-};
+ struct anx9805_aux *aux = anx9805_aux(base);
+ struct anx9805_pad *pad = aux->pad;
+ struct i2c_adapter *adap = &pad->bus->i2c;
+ u8 tmp, i;
-static int
-anx9805_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
-{
- struct anx9805_i2c_port *port = adap->algo_data;
- struct nvkm_i2c_port *mast = (void *)nv_object(port)->parent;
- struct i2c_msg *msg = msgs;
- int ret = -ETIMEDOUT;
- int i, j, cnt = num;
- u8 seg = 0x00, off = 0x00, tmp;
+ AUX_DBG(&aux->base, "ANX9805 train %d %02x %d",
+ link_nr, link_bw, enh);
- tmp = nv_rdi2cr(mast, port->ctrl, 0x07) & ~0x10;
- nv_wri2cr(mast, port->ctrl, 0x07, tmp | 0x10);
- nv_wri2cr(mast, port->ctrl, 0x07, tmp);
- nv_wri2cr(mast, port->addr, 0x43, 0x05);
- mdelay(5);
+ nvkm_wri2cr(adap, aux->addr, 0xa0, link_bw);
+ nvkm_wri2cr(adap, aux->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00));
+ nvkm_wri2cr(adap, aux->addr, 0xa2, 0x01);
+ nvkm_wri2cr(adap, aux->addr, 0xa8, 0x01);
- while (cnt--) {
- if ( (msg->flags & I2C_M_RD) && msg->addr == 0x50) {
- nv_wri2cr(mast, port->addr, 0x40, msg->addr << 1);
- nv_wri2cr(mast, port->addr, 0x41, seg);
- nv_wri2cr(mast, port->addr, 0x42, off);
- nv_wri2cr(mast, port->addr, 0x44, msg->len);
- nv_wri2cr(mast, port->addr, 0x45, 0x00);
- nv_wri2cr(mast, port->addr, 0x43, 0x01);
- for (i = 0; i < msg->len; i++) {
- j = 0;
- while (nv_rdi2cr(mast, port->addr, 0x46) & 0x10) {
- mdelay(5);
- if (j++ == 32)
- goto done;
- }
- msg->buf[i] = nv_rdi2cr(mast, port->addr, 0x47);
- }
- } else
- if (!(msg->flags & I2C_M_RD)) {
- if (msg->addr == 0x50 && msg->len == 0x01) {
- off = msg->buf[0];
- } else
- if (msg->addr == 0x30 && msg->len == 0x01) {
- seg = msg->buf[0];
- } else
- goto done;
- } else {
- goto done;
+ i = 0;
+ while ((tmp = nvkm_rdi2cr(adap, aux->addr, 0xa8)) & 0x01) {
+ mdelay(5);
+ if (i++ == 100) {
+ AUX_ERR(&aux->base, "link training timeout");
+ return -ETIMEDOUT;
}
- msg++;
}
- ret = num;
-done:
- nv_wri2cr(mast, port->addr, 0x43, 0x00);
- return ret;
-}
+ if (tmp & 0x70) {
+ AUX_ERR(&aux->base, "link training failed");
+ return -EIO;
+ }
-static u32
-anx9805_func(struct i2c_adapter *adap)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ return 0;
}
-static const struct i2c_algorithm
-anx9805_i2c_algo = {
- .master_xfer = anx9805_xfer,
- .functionality = anx9805_func
-};
-
-static const struct nvkm_i2c_func
-anx9805_i2c_func = {
+static const struct nvkm_i2c_aux_func
+anx9805_aux_func = {
+ .xfer = anx9805_aux_xfer,
+ .lnk_ctl = anx9805_aux_lnk_ctl,
};
static int
-anx9805_ddc_port_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 index,
- struct nvkm_object **pobject)
+anx9805_aux_new(struct nvkm_i2c_pad *base, int id, u8 drive,
+ struct nvkm_i2c_aux **pbus)
{
- struct nvkm_i2c_port *mast = (void *)parent;
- struct anx9805_i2c_port *port;
+ struct anx9805_pad *pad = anx9805_pad(base);
+ struct anx9805_aux *aux;
int ret;
- ret = nvkm_i2c_port_create(parent, engine, oclass, index,
- &anx9805_i2c_algo, &anx9805_i2c_func, &port);
- *pobject = nv_object(port);
+ if (!(aux = kzalloc(sizeof(*aux), GFP_KERNEL)))
+ return -ENOMEM;
+ *pbus = &aux->base;
+ aux->pad = pad;
+
+ ret = nvkm_i2c_aux_ctor(&anx9805_aux_func, &pad->base, id, &aux->base);
if (ret)
return ret;
- switch ((oclass->handle & 0xff00) >> 8) {
- case 0x0d:
- port->addr = 0x3d;
- port->ctrl = 0x39;
- break;
- case 0x0e:
- port->addr = 0x3f;
- port->ctrl = 0x3b;
- break;
+ switch (pad->addr) {
+ case 0x39: aux->addr = 0x38; break;
+ case 0x3b: aux->addr = 0x3c; break;
default:
- BUG_ON(1);
- }
-
- if (mast->adapter.algo == &i2c_bit_algo) {
- struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
- algo->udelay = max(algo->udelay, 40);
+ return -ENOSYS;
}
return 0;
}
-static struct nvkm_ofuncs
-anx9805_ddc_ofuncs = {
- .ctor = anx9805_ddc_port_ctor,
- .dtor = _nvkm_i2c_port_dtor,
- .init = _nvkm_i2c_port_init,
- .fini = _nvkm_i2c_port_fini,
+static const struct nvkm_i2c_pad_func
+anx9805_pad_func = {
+ .bus_new_4 = anx9805_bus_new,
+ .aux_new_6 = anx9805_aux_new,
};
-struct nvkm_oclass
-nvkm_anx9805_sclass[] = {
- { .handle = NV_I2C_TYPE_EXTDDC(0x0d), .ofuncs = &anx9805_ddc_ofuncs },
- { .handle = NV_I2C_TYPE_EXTAUX(0x0d), .ofuncs = &anx9805_aux_ofuncs },
- { .handle = NV_I2C_TYPE_EXTDDC(0x0e), .ofuncs = &anx9805_ddc_ofuncs },
- { .handle = NV_I2C_TYPE_EXTAUX(0x0e), .ofuncs = &anx9805_aux_ofuncs },
- {}
-};
+int
+anx9805_pad_new(struct nvkm_i2c_bus *bus, int id, u8 addr,
+ struct nvkm_i2c_pad **ppad)
+{
+ struct anx9805_pad *pad;
+
+ if (!(pad = kzalloc(sizeof(*pad), GFP_KERNEL)))
+ return -ENOMEM;
+ *ppad = &pad->base;
+
+ nvkm_i2c_pad_ctor(&anx9805_pad_func, bus->pad->i2c, id, &pad->base);
+ pad->bus = bus;
+ pad->addr = addr;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index 1c18860f80d1..f0851d57df2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -21,50 +21,17 @@
*
* Authors: Ben Skeggs
*/
-#include "priv.h"
-
-int
-nv_rdaux(struct nvkm_i2c_port *port, u32 addr, u8 *data, u8 size)
-{
- struct nvkm_i2c *i2c = nvkm_i2c(port);
- if (port->func->aux) {
- int ret = i2c->acquire(port, 0);
- if (ret == 0) {
- ret = port->func->aux(port, true, 9, addr, data, size);
- i2c->release(port);
- }
- return ret;
- }
- return -ENODEV;
-}
-
-int
-nv_wraux(struct nvkm_i2c_port *port, u32 addr, u8 *data, u8 size)
-{
- struct nvkm_i2c *i2c = nvkm_i2c(port);
- if (port->func->aux) {
- int ret = i2c->acquire(port, 0);
- if (ret == 0) {
- ret = port->func->aux(port, true, 8, addr, data, size);
- i2c->release(port);
- }
- return ret;
- }
- return -ENODEV;
-}
+#include "aux.h"
+#include "pad.h"
static int
-aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
- struct nvkm_i2c_port *port = adap->algo_data;
- struct nvkm_i2c *i2c = nvkm_i2c(port);
+ struct nvkm_i2c_aux *aux = container_of(adap, typeof(*aux), i2c);
struct i2c_msg *msg = msgs;
int ret, mcnt = num;
- if (!port->func->aux)
- return -ENODEV;
-
- ret = i2c->acquire(port, 0);
+ ret = nvkm_i2c_aux_acquire(aux);
if (ret)
return ret;
@@ -84,9 +51,9 @@ aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if (mcnt || remaining > 16)
cmd |= 4; /* MOT */
- ret = port->func->aux(port, true, cmd, msg->addr, ptr, cnt);
+ ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, cnt);
if (ret < 0) {
- i2c->release(port);
+ nvkm_i2c_aux_release(aux);
return ret;
}
@@ -97,17 +64,111 @@ aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
msg++;
}
- i2c->release(port);
+ nvkm_i2c_aux_release(aux);
return num;
}
static u32
-aux_func(struct i2c_adapter *adap)
+nvkm_i2c_aux_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-const struct i2c_algorithm nvkm_i2c_aux_algo = {
- .master_xfer = aux_xfer,
- .functionality = aux_func
+const struct i2c_algorithm
+nvkm_i2c_aux_i2c_algo = {
+ .master_xfer = nvkm_i2c_aux_i2c_xfer,
+ .functionality = nvkm_i2c_aux_i2c_func
};
+
+void
+nvkm_i2c_aux_monitor(struct nvkm_i2c_aux *aux, bool monitor)
+{
+ struct nvkm_i2c_pad *pad = aux->pad;
+ AUX_TRACE(aux, "monitor: %s", monitor ? "yes" : "no");
+ if (monitor)
+ nvkm_i2c_pad_mode(pad, NVKM_I2C_PAD_AUX);
+ else
+ nvkm_i2c_pad_mode(pad, NVKM_I2C_PAD_OFF);
+}
+
+void
+nvkm_i2c_aux_release(struct nvkm_i2c_aux *aux)
+{
+ struct nvkm_i2c_pad *pad = aux->pad;
+ AUX_TRACE(aux, "release");
+ nvkm_i2c_pad_release(pad);
+ mutex_unlock(&aux->mutex);
+}
+
+int
+nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *aux)
+{
+ struct nvkm_i2c_pad *pad = aux->pad;
+ int ret;
+ AUX_TRACE(aux, "acquire");
+ mutex_lock(&aux->mutex);
+ ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_AUX);
+ if (ret)
+ mutex_unlock(&aux->mutex);
+ return ret;
+}
+
+int
+nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type,
+ u32 addr, u8 *data, u8 size)
+{
+ return aux->func->xfer(aux, retry, type, addr, data, size);
+}
+
+int
+nvkm_i2c_aux_lnk_ctl(struct nvkm_i2c_aux *aux, int nr, int bw, bool ef)
+{
+ if (aux->func->lnk_ctl)
+ return aux->func->lnk_ctl(aux, nr, bw, ef);
+ return -ENODEV;
+}
+
+void
+nvkm_i2c_aux_del(struct nvkm_i2c_aux **paux)
+{
+ struct nvkm_i2c_aux *aux = *paux;
+ if (aux && !WARN_ON(!aux->func)) {
+ AUX_TRACE(aux, "dtor");
+ list_del(&aux->head);
+ i2c_del_adapter(&aux->i2c);
+ kfree(*paux);
+ *paux = NULL;
+ }
+}
+
+int
+nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *func,
+ struct nvkm_i2c_pad *pad, int id,
+ struct nvkm_i2c_aux *aux)
+{
+ struct nvkm_device *device = pad->i2c->subdev.device;
+
+ aux->func = func;
+ aux->pad = pad;
+ aux->id = id;
+ mutex_init(&aux->mutex);
+ list_add_tail(&aux->head, &pad->i2c->aux);
+ AUX_TRACE(aux, "ctor");
+
+ snprintf(aux->i2c.name, sizeof(aux->i2c.name), "nvkm-%s-aux-%04x",
+ dev_name(device->dev), id);
+ aux->i2c.owner = THIS_MODULE;
+ aux->i2c.dev.parent = device->dev;
+ aux->i2c.algo = &nvkm_i2c_aux_i2c_algo;
+ return i2c_add_adapter(&aux->i2c);
+}
+
+int
+nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *func,
+ struct nvkm_i2c_pad *pad, int id,
+ struct nvkm_i2c_aux **paux)
+{
+ if (!(*paux = kzalloc(sizeof(**paux), GFP_KERNEL)))
+ return -ENOMEM;
+ return nvkm_i2c_aux_ctor(func, pad, id, *paux);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
new file mode 100644
index 000000000000..35a892e4a4c3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -0,0 +1,30 @@
+#ifndef __NVKM_I2C_AUX_H__
+#define __NVKM_I2C_AUX_H__
+#include "pad.h"
+
+struct nvkm_i2c_aux_func {
+ int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
+ u32 addr, u8 *data, u8 size);
+ int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw,
+ bool enhanced_framing);
+};
+
+int nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
+ int id, struct nvkm_i2c_aux *);
+int nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
+ int id, struct nvkm_i2c_aux **);
+void nvkm_i2c_aux_del(struct nvkm_i2c_aux **);
+int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
+ u32 addr, u8 *data, u8 size);
+
+int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
+int gm204_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
+
+#define AUX_MSG(b,l,f,a...) do { \
+ struct nvkm_i2c_aux *_aux = (b); \
+ nvkm_##l(&_aux->pad->i2c->subdev, "aux %04x: "f"\n", _aux->id, ##a); \
+} while(0)
+#define AUX_ERR(b,f,a...) AUX_MSG((b), error, f, ##a)
+#define AUX_DBG(b,f,a...) AUX_MSG((b), debug, f, ##a)
+#define AUX_TRACE(b,f,a...) AUX_MSG((b), trace, f, ##a)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
new file mode 100644
index 000000000000..954f5b76bfcf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define g94_i2c_aux(p) container_of((p), struct g94_i2c_aux, base)
+#include "aux.h"
+
+struct g94_i2c_aux {
+ struct nvkm_i2c_aux base;
+ int ch;
+};
+
+static void
+g94_i2c_aux_fini(struct g94_i2c_aux *aux)
+{
+ struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+ nvkm_mask(device, 0x00e4e4 + (aux->ch * 0x50), 0x00310000, 0x00000000);
+}
+
+static int
+g94_i2c_aux_init(struct g94_i2c_aux *aux)
+{
+ struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+ const u32 unksel = 1; /* nfi which to use, or if it matters.. */
+ const u32 ureq = unksel ? 0x00100000 : 0x00200000;
+ const u32 urep = unksel ? 0x01000000 : 0x02000000;
+ u32 ctrl, timeout;
+
+ /* wait up to 1ms for any previous transaction to be done... */
+ timeout = 1000;
+ do {
+ ctrl = nvkm_rd32(device, 0x00e4e4 + (aux->ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
+ return -EBUSY;
+ }
+ } while (ctrl & 0x03010000);
+
+ /* set some magic, and wait up to 1ms for it to appear */
+ nvkm_mask(device, 0x00e4e4 + (aux->ch * 0x50), 0x00300000, ureq);
+ timeout = 1000;
+ do {
+ ctrl = nvkm_rd32(device, 0x00e4e4 + (aux->ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR(&aux->base, "magic wait %08x", ctrl);
+ g94_i2c_aux_fini(aux);
+ return -EBUSY;
+ }
+ } while ((ctrl & 0x03000000) != urep);
+
+ return 0;
+}
+
+static int
+g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
+ u8 type, u32 addr, u8 *data, u8 size)
+{
+ struct g94_i2c_aux *aux = g94_i2c_aux(obj);
+ struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+ const u32 base = aux->ch * 0x50;
+ u32 ctrl, stat, timeout, retries;
+ u32 xbuf[4] = {};
+ int ret, i;
+
+ AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, size);
+
+ ret = g94_i2c_aux_init(aux);
+ if (ret < 0)
+ goto out;
+
+ stat = nvkm_rd32(device, 0x00e4e8 + base);
+ if (!(stat & 0x10000000)) {
+ AUX_TRACE(&aux->base, "sink not detected");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (!(type & 1)) {
+ memcpy(xbuf, data, size);
+ for (i = 0; i < 16; i += 4) {
+ AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]);
+ nvkm_wr32(device, 0x00e4c0 + base + i, xbuf[i / 4]);
+ }
+ }
+
+ ctrl = nvkm_rd32(device, 0x00e4e4 + base);
+ ctrl &= ~0x0001f0ff;
+ ctrl |= type << 12;
+ ctrl |= size - 1;
+ nvkm_wr32(device, 0x00e4e0 + base, addr);
+
+ /* (maybe) retry transaction a number of times on failure... */
+ for (retries = 0; !ret && retries < 32; retries++) {
+ /* reset, and delay a while if this is a retry */
+ nvkm_wr32(device, 0x00e4e4 + base, 0x80000000 | ctrl);
+ nvkm_wr32(device, 0x00e4e4 + base, 0x00000000 | ctrl);
+ if (retries)
+ udelay(400);
+
+ /* transaction request, wait up to 1ms for it to complete */
+ nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl);
+
+ timeout = 1000;
+ do {
+ ctrl = nvkm_rd32(device, 0x00e4e4 + base);
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR(&aux->base, "timeout %08x", ctrl);
+ ret = -EIO;
+ goto out;
+ }
+ } while (ctrl & 0x00010000);
+ ret = 1;
+
+ /* read status, and check if transaction completed ok */
+ stat = nvkm_mask(device, 0x00e4e8 + base, 0, 0);
+ if ((stat & 0x000f0000) == 0x00080000 ||
+ (stat & 0x000f0000) == 0x00020000)
+ ret = retry ? 0 : 1;
+ if ((stat & 0x00000100))
+ ret = -ETIMEDOUT;
+ if ((stat & 0x00000e00))
+ ret = -EIO;
+
+ AUX_TRACE(&aux->base, "%02d %08x %08x", retries, ctrl, stat);
+ }
+
+ if (type & 1) {
+ for (i = 0; i < 16; i += 4) {
+ xbuf[i / 4] = nvkm_rd32(device, 0x00e4d0 + base + i);
+ AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]);
+ }
+ memcpy(data, xbuf, size);
+ }
+
+out:
+ g94_i2c_aux_fini(aux);
+ return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
+}
+
+static const struct nvkm_i2c_aux_func
+g94_i2c_aux_func = {
+ .xfer = g94_i2c_aux_xfer,
+};
+
+int
+g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+ struct nvkm_i2c_aux **paux)
+{
+ struct g94_i2c_aux *aux;
+
+ if (!(aux = kzalloc(sizeof(*aux), GFP_KERNEL)))
+ return -ENOMEM;
+ *paux = &aux->base;
+
+ nvkm_i2c_aux_ctor(&g94_i2c_aux_func, pad, index, &aux->base);
+ aux->ch = drive;
+ aux->base.intr = 1 << aux->ch;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c
new file mode 100644
index 000000000000..bed231b56dbd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define gm204_i2c_aux(p) container_of((p), struct gm204_i2c_aux, base)
+#include "aux.h"
+
+struct gm204_i2c_aux {
+ struct nvkm_i2c_aux base;
+ int ch;
+};
+
+static void
+gm204_i2c_aux_fini(struct gm204_i2c_aux *aux)
+{
+ struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000);
+}
+
+static int
+gm204_i2c_aux_init(struct gm204_i2c_aux *aux)
+{
+ struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+ const u32 unksel = 1; /* nfi which to use, or if it matters.. */
+ const u32 ureq = unksel ? 0x00100000 : 0x00200000;
+ const u32 urep = unksel ? 0x01000000 : 0x02000000;
+ u32 ctrl, timeout;
+
+ /* wait up to 1ms for any previous transaction to be done... */
+ timeout = 1000;
+ do {
+ ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
+ return -EBUSY;
+ }
+ } while (ctrl & 0x03010000);
+
+ /* set some magic, and wait up to 1ms for it to appear */
+ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq);
+ timeout = 1000;
+ do {
+ ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR(&aux->base, "magic wait %08x", ctrl);
+ gm204_i2c_aux_fini(aux);
+ return -EBUSY;
+ }
+ } while ((ctrl & 0x03000000) != urep);
+
+ return 0;
+}
+
+static int
+gm204_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
+ u8 type, u32 addr, u8 *data, u8 size)
+{
+ struct gm204_i2c_aux *aux = gm204_i2c_aux(obj);
+ struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+ const u32 base = aux->ch * 0x50;
+ u32 ctrl, stat, timeout, retries;
+ u32 xbuf[4] = {};
+ int ret, i;
+
+ AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, size);
+
+ ret = gm204_i2c_aux_init(aux);
+ if (ret < 0)
+ goto out;
+
+ stat = nvkm_rd32(device, 0x00d958 + base);
+ if (!(stat & 0x10000000)) {
+ AUX_TRACE(&aux->base, "sink not detected");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (!(type & 1)) {
+ memcpy(xbuf, data, size);
+ for (i = 0; i < 16; i += 4) {
+ AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]);
+ nvkm_wr32(device, 0x00d930 + base + i, xbuf[i / 4]);
+ }
+ }
+
+ ctrl = nvkm_rd32(device, 0x00d954 + base);
+ ctrl &= ~0x0001f0ff;
+ ctrl |= type << 12;
+ ctrl |= size - 1;
+ nvkm_wr32(device, 0x00d950 + base, addr);
+
+ /* (maybe) retry transaction a number of times on failure... */
+ for (retries = 0; !ret && retries < 32; retries++) {
+ /* reset, and delay a while if this is a retry */
+ nvkm_wr32(device, 0x00d954 + base, 0x80000000 | ctrl);
+ nvkm_wr32(device, 0x00d954 + base, 0x00000000 | ctrl);
+ if (retries)
+ udelay(400);
+
+ /* transaction request, wait up to 1ms for it to complete */
+ nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl);
+
+ timeout = 1000;
+ do {
+ ctrl = nvkm_rd32(device, 0x00d954 + base);
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR(&aux->base, "timeout %08x", ctrl);
+ ret = -EIO;
+ goto out;
+ }
+ } while (ctrl & 0x00010000);
+ ret = 1;
+
+ /* read status, and check if transaction completed ok */
+ stat = nvkm_mask(device, 0x00d958 + base, 0, 0);
+ if ((stat & 0x000f0000) == 0x00080000 ||
+ (stat & 0x000f0000) == 0x00020000)
+ ret = retry ? 0 : 1;
+ if ((stat & 0x00000100))
+ ret = -ETIMEDOUT;
+ if ((stat & 0x00000e00))
+ ret = -EIO;
+
+ AUX_TRACE(&aux->base, "%02d %08x %08x", retries, ctrl, stat);
+ }
+
+ if (type & 1) {
+ for (i = 0; i < 16; i += 4) {
+ xbuf[i / 4] = nvkm_rd32(device, 0x00d940 + base + i);
+ AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]);
+ }
+ memcpy(data, xbuf, size);
+ }
+
+out:
+ gm204_i2c_aux_fini(aux);
+ return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
+}
+
+static const struct nvkm_i2c_aux_func
+gm204_i2c_aux_func = {
+ .xfer = gm204_i2c_aux_xfer,
+};
+
+int
+gm204_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+ struct nvkm_i2c_aux **paux)
+{
+ struct gm204_i2c_aux *aux;
+
+ if (!(aux = kzalloc(sizeof(*aux), GFP_KERNEL)))
+ return -ENOMEM;
+ *paux = &aux->base;
+
+ nvkm_i2c_aux_ctor(&gm204_i2c_aux_func, pad, index, &aux->base);
+ aux->ch = drive;
+ aux->base.intr = 1 << aux->ch;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
index 9200f122c02c..243a71ff0a0d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
@@ -22,328 +22,91 @@
* Authors: Ben Skeggs
*/
#include "priv.h"
+#include "aux.h"
+#include "bus.h"
#include "pad.h"
-#include <core/device.h>
#include <core/notify.h>
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
+#include <subdev/bios/i2c.h>
-/******************************************************************************
- * interface to linux i2c bit-banging algorithm
- *****************************************************************************/
-
-#ifdef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
-#define CSTMSEL true
-#else
-#define CSTMSEL false
-#endif
-
-static int
-nvkm_i2c_pre_xfer(struct i2c_adapter *adap)
+static struct nvkm_i2c_pad *
+nvkm_i2c_pad_find(struct nvkm_i2c *i2c, int id)
{
- struct i2c_algo_bit_data *bit = adap->algo_data;
- struct nvkm_i2c_port *port = bit->data;
- return nvkm_i2c(port)->acquire(port, bit->timeout);
-}
+ struct nvkm_i2c_pad *pad;
-static void
-nvkm_i2c_post_xfer(struct i2c_adapter *adap)
-{
- struct i2c_algo_bit_data *bit = adap->algo_data;
- struct nvkm_i2c_port *port = bit->data;
- return nvkm_i2c(port)->release(port);
-}
-
-static void
-nvkm_i2c_setscl(void *data, int state)
-{
- struct nvkm_i2c_port *port = data;
- port->func->drive_scl(port, state);
-}
-
-static void
-nvkm_i2c_setsda(void *data, int state)
-{
- struct nvkm_i2c_port *port = data;
- port->func->drive_sda(port, state);
-}
-
-static int
-nvkm_i2c_getscl(void *data)
-{
- struct nvkm_i2c_port *port = data;
- return port->func->sense_scl(port);
-}
-
-static int
-nvkm_i2c_getsda(void *data)
-{
- struct nvkm_i2c_port *port = data;
- return port->func->sense_sda(port);
-}
-
-/******************************************************************************
- * base i2c "port" class implementation
- *****************************************************************************/
-
-int
-_nvkm_i2c_port_fini(struct nvkm_object *object, bool suspend)
-{
- struct nvkm_i2c_port *port = (void *)object;
- struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
- nv_ofuncs(pad)->fini(nv_object(pad), suspend);
- return nvkm_object_fini(&port->base, suspend);
-}
-
-void
-_nvkm_i2c_port_dtor(struct nvkm_object *object)
-{
- struct nvkm_i2c_port *port = (void *)object;
- i2c_del_adapter(&port->adapter);
- nvkm_object_destroy(&port->base);
-}
-
-int
-nvkm_i2c_port_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, u8 index,
- const struct i2c_algorithm *algo,
- const struct nvkm_i2c_func *func,
- int size, void **pobject)
-{
- struct nvkm_device *device = nv_device(parent);
- struct nvkm_i2c *i2c = nvkm_i2c(parent);
- struct nvkm_i2c_port *port;
- int ret;
-
- ret = nvkm_object_create_(parent, engine, oclass, 0, size, pobject);
- port = *pobject;
- if (ret)
- return ret;
-
- snprintf(port->adapter.name, sizeof(port->adapter.name),
- "nvkm-%s-%d", device->name, index);
- port->adapter.owner = THIS_MODULE;
- port->adapter.dev.parent = nv_device_base(device);
- port->index = index;
- port->aux = -1;
- port->func = func;
- mutex_init(&port->mutex);
-
- if ( algo == &nvkm_i2c_bit_algo &&
- !nvkm_boolopt(device->cfgopt, "NvI2C", CSTMSEL)) {
- struct i2c_algo_bit_data *bit;
-
- bit = kzalloc(sizeof(*bit), GFP_KERNEL);
- if (!bit)
- return -ENOMEM;
-
- bit->udelay = 10;
- bit->timeout = usecs_to_jiffies(2200);
- bit->data = port;
- bit->pre_xfer = nvkm_i2c_pre_xfer;
- bit->post_xfer = nvkm_i2c_post_xfer;
- bit->setsda = nvkm_i2c_setsda;
- bit->setscl = nvkm_i2c_setscl;
- bit->getsda = nvkm_i2c_getsda;
- bit->getscl = nvkm_i2c_getscl;
-
- port->adapter.algo_data = bit;
- ret = i2c_bit_add_bus(&port->adapter);
- } else {
- port->adapter.algo_data = port;
- port->adapter.algo = algo;
- ret = i2c_add_adapter(&port->adapter);
+ list_for_each_entry(pad, &i2c->pad, head) {
+ if (pad->id == id)
+ return pad;
}
- if (ret == 0)
- list_add_tail(&port->head, &i2c->ports);
- return ret;
+ return NULL;
}
-/******************************************************************************
- * base i2c subdev class implementation
- *****************************************************************************/
-
-static struct nvkm_i2c_port *
-nvkm_i2c_find(struct nvkm_i2c *i2c, u8 index)
+struct nvkm_i2c_bus *
+nvkm_i2c_bus_find(struct nvkm_i2c *i2c, int id)
{
- struct nvkm_bios *bios = nvkm_bios(i2c);
- struct nvkm_i2c_port *port;
+ struct nvkm_bios *bios = i2c->subdev.device->bios;
+ struct nvkm_i2c_bus *bus;
- if (index == NV_I2C_DEFAULT(0) ||
- index == NV_I2C_DEFAULT(1)) {
+ if (id == NVKM_I2C_BUS_PRI || id == NVKM_I2C_BUS_SEC) {
u8 ver, hdr, cnt, len;
u16 i2c = dcb_i2c_table(bios, &ver, &hdr, &cnt, &len);
if (i2c && ver >= 0x30) {
- u8 auxidx = nv_ro08(bios, i2c + 4);
- if (index == NV_I2C_DEFAULT(0))
- index = (auxidx & 0x0f) >> 0;
+ u8 auxidx = nvbios_rd08(bios, i2c + 4);
+ if (id == NVKM_I2C_BUS_PRI)
+ id = NVKM_I2C_BUS_CCB((auxidx & 0x0f) >> 0);
else
- index = (auxidx & 0xf0) >> 4;
+ id = NVKM_I2C_BUS_CCB((auxidx & 0xf0) >> 4);
} else {
- index = 2;
+ id = NVKM_I2C_BUS_CCB(2);
}
}
- list_for_each_entry(port, &i2c->ports, head) {
- if (port->index == index)
- return port;
+ list_for_each_entry(bus, &i2c->bus, head) {
+ if (bus->id == id)
+ return bus;
}
return NULL;
}
-static struct nvkm_i2c_port *
-nvkm_i2c_find_type(struct nvkm_i2c *i2c, u16 type)
+struct nvkm_i2c_aux *
+nvkm_i2c_aux_find(struct nvkm_i2c *i2c, int id)
{
- struct nvkm_i2c_port *port;
+ struct nvkm_i2c_aux *aux;
- list_for_each_entry(port, &i2c->ports, head) {
- if (nv_hclass(port) == type)
- return port;
+ list_for_each_entry(aux, &i2c->aux, head) {
+ if (aux->id == id)
+ return aux;
}
return NULL;
}
static void
-nvkm_i2c_release_pad(struct nvkm_i2c_port *port)
-{
- struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
- struct nvkm_i2c *i2c = nvkm_i2c(port);
-
- if (atomic_dec_and_test(&nv_object(pad)->usecount)) {
- nv_ofuncs(pad)->fini(nv_object(pad), false);
- wake_up_all(&i2c->wait);
- }
-}
-
-static int
-nvkm_i2c_try_acquire_pad(struct nvkm_i2c_port *port)
-{
- struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
-
- if (atomic_add_return(1, &nv_object(pad)->usecount) != 1) {
- struct nvkm_object *owner = (void *)pad->port;
- do {
- if (owner == (void *)port)
- return 0;
- owner = owner->parent;
- } while(owner);
- nvkm_i2c_release_pad(port);
- return -EBUSY;
- }
-
- pad->next = port;
- nv_ofuncs(pad)->init(nv_object(pad));
- return 0;
-}
-
-static int
-nvkm_i2c_acquire_pad(struct nvkm_i2c_port *port, unsigned long timeout)
-{
- struct nvkm_i2c *i2c = nvkm_i2c(port);
-
- if (timeout) {
- if (wait_event_timeout(i2c->wait,
- nvkm_i2c_try_acquire_pad(port) == 0,
- timeout) == 0)
- return -EBUSY;
- } else {
- wait_event(i2c->wait, nvkm_i2c_try_acquire_pad(port) == 0);
- }
-
- return 0;
-}
-
-static void
-nvkm_i2c_release(struct nvkm_i2c_port *port)
-__releases(pad->mutex)
-{
- nvkm_i2c(port)->release_pad(port);
- mutex_unlock(&port->mutex);
-}
-
-static int
-nvkm_i2c_acquire(struct nvkm_i2c_port *port, unsigned long timeout)
-__acquires(pad->mutex)
-{
- int ret;
- mutex_lock(&port->mutex);
- if ((ret = nvkm_i2c(port)->acquire_pad(port, timeout)))
- mutex_unlock(&port->mutex);
- return ret;
-}
-
-static int
-nvkm_i2c_identify(struct nvkm_i2c *i2c, int index, const char *what,
- struct nvkm_i2c_board_info *info,
- bool (*match)(struct nvkm_i2c_port *,
- struct i2c_board_info *, void *), void *data)
-{
- struct nvkm_i2c_port *port = nvkm_i2c_find(i2c, index);
- int i;
-
- if (!port) {
- nv_debug(i2c, "no bus when probing %s on %d\n", what, index);
- return -ENODEV;
- }
-
- nv_debug(i2c, "probing %ss on bus: %d\n", what, port->index);
- for (i = 0; info[i].dev.addr; i++) {
- u8 orig_udelay = 0;
-
- if ((port->adapter.algo == &i2c_bit_algo) &&
- (info[i].udelay != 0)) {
- struct i2c_algo_bit_data *algo = port->adapter.algo_data;
- nv_debug(i2c, "using custom udelay %d instead of %d\n",
- info[i].udelay, algo->udelay);
- orig_udelay = algo->udelay;
- algo->udelay = info[i].udelay;
- }
-
- if (nv_probe_i2c(port, info[i].dev.addr) &&
- (!match || match(port, &info[i].dev, data))) {
- nv_info(i2c, "detected %s: %s\n", what,
- info[i].dev.type);
- return i;
- }
-
- if (orig_udelay) {
- struct i2c_algo_bit_data *algo = port->adapter.algo_data;
- algo->udelay = orig_udelay;
- }
- }
-
- nv_debug(i2c, "no devices found.\n");
- return -ENODEV;
-}
-
-static void
-nvkm_i2c_intr_fini(struct nvkm_event *event, int type, int index)
+nvkm_i2c_intr_fini(struct nvkm_event *event, int type, int id)
{
struct nvkm_i2c *i2c = container_of(event, typeof(*i2c), event);
- struct nvkm_i2c_port *port = i2c->find(i2c, index);
- const struct nvkm_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
- if (port && port->aux >= 0)
- impl->aux_mask(i2c, type, 1 << port->aux, 0);
+ struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, id);
+ if (aux)
+ i2c->func->aux_mask(i2c, type, aux->intr, 0);
}
static void
-nvkm_i2c_intr_init(struct nvkm_event *event, int type, int index)
+nvkm_i2c_intr_init(struct nvkm_event *event, int type, int id)
{
struct nvkm_i2c *i2c = container_of(event, typeof(*i2c), event);
- struct nvkm_i2c_port *port = i2c->find(i2c, index);
- const struct nvkm_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
- if (port && port->aux >= 0)
- impl->aux_mask(i2c, type, 1 << port->aux, 1 << port->aux);
+ struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, id);
+ if (aux)
+ i2c->func->aux_mask(i2c, type, aux->intr, aux->intr);
}
static int
nvkm_i2c_intr_ctor(struct nvkm_object *object, void *data, u32 size,
- struct nvkm_notify *notify)
+ struct nvkm_notify *notify)
{
struct nvkm_i2c_ntfy_req *req = data;
if (!WARN_ON(size != sizeof(*req))) {
@@ -355,38 +118,6 @@ nvkm_i2c_intr_ctor(struct nvkm_object *object, void *data, u32 size,
return -EINVAL;
}
-static void
-nvkm_i2c_intr(struct nvkm_subdev *subdev)
-{
- struct nvkm_i2c_impl *impl = (void *)nv_oclass(subdev);
- struct nvkm_i2c *i2c = nvkm_i2c(subdev);
- struct nvkm_i2c_port *port;
- u32 hi, lo, rq, tx, e;
-
- if (impl->aux_stat) {
- impl->aux_stat(i2c, &hi, &lo, &rq, &tx);
- if (hi || lo || rq || tx) {
- list_for_each_entry(port, &i2c->ports, head) {
- if (e = 0, port->aux < 0)
- continue;
-
- if (hi & (1 << port->aux)) e |= NVKM_I2C_PLUG;
- if (lo & (1 << port->aux)) e |= NVKM_I2C_UNPLUG;
- if (rq & (1 << port->aux)) e |= NVKM_I2C_IRQ;
- if (tx & (1 << port->aux)) e |= NVKM_I2C_DONE;
- if (e) {
- struct nvkm_i2c_ntfy_rep rep = {
- .mask = e,
- };
- nvkm_event_send(&i2c->event, rep.mask,
- port->index, &rep,
- sizeof(rep));
- }
- }
- }
- }
-}
-
static const struct nvkm_event_func
nvkm_i2c_intr_func = {
.ctor = nvkm_i2c_intr_ctor,
@@ -394,229 +125,272 @@ nvkm_i2c_intr_func = {
.fini = nvkm_i2c_intr_fini,
};
-int
-_nvkm_i2c_fini(struct nvkm_object *object, bool suspend)
+static void
+nvkm_i2c_intr(struct nvkm_subdev *subdev)
{
- struct nvkm_i2c_impl *impl = (void *)nv_oclass(object);
- struct nvkm_i2c *i2c = (void *)object;
- struct nvkm_i2c_port *port;
- u32 mask;
- int ret;
+ struct nvkm_i2c *i2c = nvkm_i2c(subdev);
+ struct nvkm_i2c_aux *aux;
+ u32 hi, lo, rq, tx;
+
+ if (!i2c->func->aux_stat)
+ return;
+
+ i2c->func->aux_stat(i2c, &hi, &lo, &rq, &tx);
+ if (!hi && !lo && !rq && !tx)
+ return;
- list_for_each_entry(port, &i2c->ports, head) {
- ret = nv_ofuncs(port)->fini(nv_object(port), suspend);
- if (ret && suspend)
- goto fail;
+ list_for_each_entry(aux, &i2c->aux, head) {
+ u32 mask = 0;
+ if (hi & aux->intr) mask |= NVKM_I2C_PLUG;
+ if (lo & aux->intr) mask |= NVKM_I2C_UNPLUG;
+ if (rq & aux->intr) mask |= NVKM_I2C_IRQ;
+ if (tx & aux->intr) mask |= NVKM_I2C_DONE;
+ if (mask) {
+ struct nvkm_i2c_ntfy_rep rep = {
+ .mask = mask,
+ };
+ nvkm_event_send(&i2c->event, rep.mask, aux->id,
+ &rep, sizeof(rep));
+ }
}
+}
+
+static int
+nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+ struct nvkm_i2c *i2c = nvkm_i2c(subdev);
+ struct nvkm_i2c_pad *pad;
+ u32 mask;
- if ((mask = (1 << impl->aux) - 1), impl->aux_stat) {
- impl->aux_mask(i2c, NVKM_I2C_ANY, mask, 0);
- impl->aux_stat(i2c, &mask, &mask, &mask, &mask);
+ if ((mask = (1 << i2c->func->aux) - 1), i2c->func->aux_stat) {
+ i2c->func->aux_mask(i2c, NVKM_I2C_ANY, mask, 0);
+ i2c->func->aux_stat(i2c, &mask, &mask, &mask, &mask);
}
- return nvkm_subdev_fini(&i2c->base, suspend);
-fail:
- list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
- nv_ofuncs(port)->init(nv_object(port));
+ list_for_each_entry(pad, &i2c->pad, head) {
+ nvkm_i2c_pad_fini(pad);
}
- return ret;
+ return 0;
}
-int
-_nvkm_i2c_init(struct nvkm_object *object)
+static int
+nvkm_i2c_init(struct nvkm_subdev *subdev)
{
- struct nvkm_i2c *i2c = (void *)object;
- struct nvkm_i2c_port *port;
- int ret;
-
- ret = nvkm_subdev_init(&i2c->base);
- if (ret == 0) {
- list_for_each_entry(port, &i2c->ports, head) {
- ret = nv_ofuncs(port)->init(nv_object(port));
- if (ret)
- goto fail;
- }
+ struct nvkm_i2c *i2c = nvkm_i2c(subdev);
+ struct nvkm_i2c_bus *bus;
+ struct nvkm_i2c_pad *pad;
+
+ list_for_each_entry(pad, &i2c->pad, head) {
+ nvkm_i2c_pad_init(pad);
}
- return ret;
-fail:
- list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
- nv_ofuncs(port)->fini(nv_object(port), false);
+ list_for_each_entry(bus, &i2c->bus, head) {
+ nvkm_i2c_bus_init(bus);
}
- return ret;
+ return 0;
}
-void
-_nvkm_i2c_dtor(struct nvkm_object *object)
+static void *
+nvkm_i2c_dtor(struct nvkm_subdev *subdev)
{
- struct nvkm_i2c *i2c = (void *)object;
- struct nvkm_i2c_port *port, *temp;
+ struct nvkm_i2c *i2c = nvkm_i2c(subdev);
nvkm_event_fini(&i2c->event);
- list_for_each_entry_safe(port, temp, &i2c->ports, head) {
- nvkm_object_ref(NULL, (struct nvkm_object **)&port);
+ while (!list_empty(&i2c->aux)) {
+ struct nvkm_i2c_aux *aux =
+ list_first_entry(&i2c->aux, typeof(*aux), head);
+ nvkm_i2c_aux_del(&aux);
}
- nvkm_subdev_destroy(&i2c->base);
-}
-
-static struct nvkm_oclass *
-nvkm_i2c_extdev_sclass[] = {
- nvkm_anx9805_sclass,
-};
+ while (!list_empty(&i2c->bus)) {
+ struct nvkm_i2c_bus *bus =
+ list_first_entry(&i2c->bus, typeof(*bus), head);
+ nvkm_i2c_bus_del(&bus);
+ }
-static void
-nvkm_i2c_create_port(struct nvkm_i2c *i2c, int index, u8 type,
- struct dcb_i2c_entry *info)
-{
- const struct nvkm_i2c_impl *impl = (void *)nv_oclass(i2c);
- struct nvkm_oclass *oclass;
- struct nvkm_object *parent;
- struct nvkm_object *object;
- int ret, pad;
-
- if (info->share != DCB_I2C_UNUSED) {
- pad = info->share;
- oclass = impl->pad_s;
- } else {
- if (type != DCB_I2C_NVIO_AUX)
- pad = 0x100 + info->drive;
- else
- pad = 0x100 + info->auxch;
- oclass = impl->pad_x;
+ while (!list_empty(&i2c->pad)) {
+ struct nvkm_i2c_pad *pad =
+ list_first_entry(&i2c->pad, typeof(*pad), head);
+ nvkm_i2c_pad_del(&pad);
}
- ret = nvkm_object_ctor(nv_object(i2c), NULL, oclass,
- NULL, pad, &parent);
- if (ret < 0)
- return;
+ return i2c;
+}
- oclass = impl->sclass;
- do {
- ret = -EINVAL;
- if (oclass->handle == type) {
- ret = nvkm_object_ctor(parent, NULL, oclass,
- info, index, &object);
- }
- } while (ret && (++oclass)->handle);
+static const struct nvkm_subdev_func
+nvkm_i2c = {
+ .dtor = nvkm_i2c_dtor,
+ .init = nvkm_i2c_init,
+ .fini = nvkm_i2c_fini,
+ .intr = nvkm_i2c_intr,
+};
- nvkm_object_ref(NULL, &parent);
+static const struct nvkm_i2c_drv {
+ u8 bios;
+ u8 addr;
+ int (*pad_new)(struct nvkm_i2c_bus *, int id, u8 addr,
+ struct nvkm_i2c_pad **);
}
+nvkm_i2c_drv[] = {
+ { 0x0d, 0x39, anx9805_pad_new },
+ { 0x0e, 0x3b, anx9805_pad_new },
+ {}
+};
int
-nvkm_i2c_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_i2c_new_(const struct nvkm_i2c_func *func, struct nvkm_device *device,
+ int index, struct nvkm_i2c **pi2c)
{
- struct nvkm_bios *bios = nvkm_bios(parent);
+ struct nvkm_bios *bios = device->bios;
struct nvkm_i2c *i2c;
- struct nvkm_object *object;
- struct dcb_i2c_entry info;
- int ret, i, j, index = -1;
- struct dcb_output outp;
- u8 ver, hdr;
- u32 data;
-
- ret = nvkm_subdev_create(parent, engine, oclass, 0, "I2C", "i2c", &i2c);
- *pobject = nv_object(i2c);
- if (ret)
- return ret;
-
- nv_subdev(i2c)->intr = nvkm_i2c_intr;
- i2c->find = nvkm_i2c_find;
- i2c->find_type = nvkm_i2c_find_type;
- i2c->acquire_pad = nvkm_i2c_acquire_pad;
- i2c->release_pad = nvkm_i2c_release_pad;
- i2c->acquire = nvkm_i2c_acquire;
- i2c->release = nvkm_i2c_release;
- i2c->identify = nvkm_i2c_identify;
- init_waitqueue_head(&i2c->wait);
- INIT_LIST_HEAD(&i2c->ports);
-
- while (!dcb_i2c_parse(bios, ++index, &info)) {
- switch (info.type) {
- case DCB_I2C_NV04_BIT:
- case DCB_I2C_NV4E_BIT:
- case DCB_I2C_NVIO_BIT:
- nvkm_i2c_create_port(i2c, NV_I2C_PORT(index),
- info.type, &info);
- break;
- case DCB_I2C_NVIO_AUX:
- nvkm_i2c_create_port(i2c, NV_I2C_AUX(index),
- info.type, &info);
- break;
- case DCB_I2C_PMGR:
- if (info.drive != DCB_I2C_UNUSED) {
- nvkm_i2c_create_port(i2c, NV_I2C_PORT(index),
- DCB_I2C_NVIO_BIT, &info);
- }
- if (info.auxch != DCB_I2C_UNUSED) {
- nvkm_i2c_create_port(i2c, NV_I2C_AUX(index),
- DCB_I2C_NVIO_AUX, &info);
- }
- break;
- case DCB_I2C_UNUSED:
- default:
+ struct dcb_i2c_entry ccbE;
+ struct dcb_output dcbE;
+ u8 ver, hdr;
+ int ret, i;
+
+ if (!(i2c = *pi2c = kzalloc(sizeof(*i2c), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_subdev_ctor(&nvkm_i2c, device, index, 0, &i2c->subdev);
+ i2c->func = func;
+ INIT_LIST_HEAD(&i2c->pad);
+ INIT_LIST_HEAD(&i2c->bus);
+ INIT_LIST_HEAD(&i2c->aux);
+
+ i = -1;
+ while (!dcb_i2c_parse(bios, ++i, &ccbE)) {
+ struct nvkm_i2c_pad *pad = NULL;
+ struct nvkm_i2c_bus *bus = NULL;
+ struct nvkm_i2c_aux *aux = NULL;
+
+ nvkm_debug(&i2c->subdev, "ccb %02x: type %02x drive %02x "
+ "sense %02x share %02x auxch %02x\n", i, ccbE.type,
+ ccbE.drive, ccbE.sense, ccbE.share, ccbE.auxch);
+
+ if (ccbE.share != DCB_I2C_UNUSED) {
+ const int id = NVKM_I2C_PAD_HYBRID(ccbE.share);
+ if (!(pad = nvkm_i2c_pad_find(i2c, id)))
+ ret = func->pad_s_new(i2c, id, &pad);
+ else
+ ret = 0;
+ } else {
+ ret = func->pad_x_new(i2c, NVKM_I2C_PAD_CCB(i), &pad);
+ }
+
+ if (ret) {
+ nvkm_error(&i2c->subdev, "ccb %02x pad, %d\n", i, ret);
+ nvkm_i2c_pad_del(&pad);
+ continue;
+ }
+
+ if (pad->func->bus_new_0 && ccbE.type == DCB_I2C_NV04_BIT) {
+ ret = pad->func->bus_new_0(pad, NVKM_I2C_BUS_CCB(i),
+ ccbE.drive,
+ ccbE.sense, &bus);
+ } else
+ if (pad->func->bus_new_4 &&
+ ( ccbE.type == DCB_I2C_NV4E_BIT ||
+ ccbE.type == DCB_I2C_NVIO_BIT ||
+ (ccbE.type == DCB_I2C_PMGR &&
+ ccbE.drive != DCB_I2C_UNUSED))) {
+ ret = pad->func->bus_new_4(pad, NVKM_I2C_BUS_CCB(i),
+ ccbE.drive, &bus);
+ }
+
+ if (ret) {
+ nvkm_error(&i2c->subdev, "ccb %02x bus, %d\n", i, ret);
+ nvkm_i2c_bus_del(&bus);
+ }
+
+ if (pad->func->aux_new_6 &&
+ ( ccbE.type == DCB_I2C_NVIO_AUX ||
+ (ccbE.type == DCB_I2C_PMGR &&
+ ccbE.auxch != DCB_I2C_UNUSED))) {
+ ret = pad->func->aux_new_6(pad, NVKM_I2C_BUS_CCB(i),
+ ccbE.auxch, &aux);
+ } else {
+ ret = 0;
+ }
+
+ if (ret) {
+ nvkm_error(&i2c->subdev, "ccb %02x aux, %d\n", i, ret);
+ nvkm_i2c_aux_del(&aux);
+ }
+
+ if (ccbE.type != DCB_I2C_UNUSED && !bus && !aux) {
+ nvkm_warn(&i2c->subdev, "ccb %02x was ignored\n", i);
continue;
}
}
- /* in addition to the busses specified in the i2c table, there
- * may be ddc/aux channels hiding behind external tmds/dp/etc
- * transmitters.
- */
- index = NV_I2C_EXT(0);
i = -1;
- while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &outp))) {
- if (!outp.location || !outp.extdev)
+ while (dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE)) {
+ const struct nvkm_i2c_drv *drv = nvkm_i2c_drv;
+ struct nvkm_i2c_bus *bus;
+ struct nvkm_i2c_pad *pad;
+
+ /* internal outputs handled by native i2c busses (above) */
+ if (!dcbE.location)
continue;
- switch (outp.type) {
- case DCB_OUTPUT_TMDS:
- info.type = NV_I2C_TYPE_EXTDDC(outp.extdev);
- break;
- case DCB_OUTPUT_DP:
- info.type = NV_I2C_TYPE_EXTAUX(outp.extdev);
- break;
- default:
+ /* we need an i2c bus to talk to the external encoder */
+ bus = nvkm_i2c_bus_find(i2c, dcbE.i2c_index);
+ if (!bus) {
+ nvkm_debug(&i2c->subdev, "dcb %02x no bus\n", i);
continue;
}
- ret = -ENODEV;
- j = -1;
- while (ret && ++j < ARRAY_SIZE(nvkm_i2c_extdev_sclass)) {
- parent = nv_object(i2c->find(i2c, outp.i2c_index));
- oclass = nvkm_i2c_extdev_sclass[j];
- do {
- if (oclass->handle != info.type)
- continue;
- ret = nvkm_object_ctor(parent, NULL, oclass,
- NULL, index++, &object);
- } while (ret && (++oclass)->handle);
+ /* ... and a driver for it */
+ while (drv->pad_new) {
+ if (drv->bios == dcbE.extdev)
+ break;
+ drv++;
}
- }
- ret = nvkm_event_init(&nvkm_i2c_intr_func, 4, index, &i2c->event);
- if (ret)
- return ret;
-
- return 0;
-}
+ if (!drv->pad_new) {
+ nvkm_debug(&i2c->subdev, "dcb %02x drv %02x unknown\n",
+ i, dcbE.extdev);
+ continue;
+ }
-int
-_nvkm_i2c_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nvkm_i2c *i2c;
- int ret;
+ /* find/create an instance of the driver */
+ pad = nvkm_i2c_pad_find(i2c, NVKM_I2C_PAD_EXT(dcbE.extdev));
+ if (!pad) {
+ const int id = NVKM_I2C_PAD_EXT(dcbE.extdev);
+ ret = drv->pad_new(bus, id, drv->addr, &pad);
+ if (ret) {
+ nvkm_error(&i2c->subdev, "dcb %02x pad, %d\n",
+ i, ret);
+ nvkm_i2c_pad_del(&pad);
+ continue;
+ }
+ }
- ret = nvkm_i2c_create(parent, engine, oclass, &i2c);
- *pobject = nv_object(i2c);
- if (ret)
- return ret;
+ /* create any i2c bus / aux channel required by the output */
+ if (pad->func->aux_new_6 && dcbE.type == DCB_OUTPUT_DP) {
+ const int id = NVKM_I2C_AUX_EXT(dcbE.extdev);
+ struct nvkm_i2c_aux *aux = NULL;
+ ret = pad->func->aux_new_6(pad, id, 0, &aux);
+ if (ret) {
+ nvkm_error(&i2c->subdev, "dcb %02x aux, %d\n",
+ i, ret);
+ nvkm_i2c_aux_del(&aux);
+ }
+ } else
+ if (pad->func->bus_new_4) {
+ const int id = NVKM_I2C_BUS_EXT(dcbE.extdev);
+ struct nvkm_i2c_bus *bus = NULL;
+ ret = pad->func->bus_new_4(pad, id, 0, &bus);
+ if (ret) {
+ nvkm_error(&i2c->subdev, "dcb %02x bus, %d\n",
+ i, ret);
+ nvkm_i2c_bus_del(&bus);
+ }
+ }
+ }
- return 0;
+ return nvkm_event_init(&nvkm_i2c_intr_func, 4, i, &i2c->event);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c
index 861a453d2a67..cdce11bbabe5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c
@@ -9,7 +9,7 @@
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
+ * all copies or substantial busions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
@@ -21,7 +21,7 @@
*
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "bus.h"
#ifdef CONFIG_NOUVEAU_I2C_INTERNAL
#define T_TIMEOUT 2200000
@@ -29,205 +29,188 @@
#define T_HOLD 5000
static inline void
-i2c_drive_scl(struct nvkm_i2c_port *port, int state)
+nvkm_i2c_drive_scl(struct nvkm_i2c_bus *bus, int state)
{
- port->func->drive_scl(port, state);
+ bus->func->drive_scl(bus, state);
}
static inline void
-i2c_drive_sda(struct nvkm_i2c_port *port, int state)
+nvkm_i2c_drive_sda(struct nvkm_i2c_bus *bus, int state)
{
- port->func->drive_sda(port, state);
+ bus->func->drive_sda(bus, state);
}
static inline int
-i2c_sense_scl(struct nvkm_i2c_port *port)
+nvkm_i2c_sense_scl(struct nvkm_i2c_bus *bus)
{
- return port->func->sense_scl(port);
+ return bus->func->sense_scl(bus);
}
static inline int
-i2c_sense_sda(struct nvkm_i2c_port *port)
+nvkm_i2c_sense_sda(struct nvkm_i2c_bus *bus)
{
- return port->func->sense_sda(port);
+ return bus->func->sense_sda(bus);
}
static void
-i2c_delay(struct nvkm_i2c_port *port, u32 nsec)
+nvkm_i2c_delay(struct nvkm_i2c_bus *bus, u32 nsec)
{
udelay((nsec + 500) / 1000);
}
static bool
-i2c_raise_scl(struct nvkm_i2c_port *port)
+nvkm_i2c_raise_scl(struct nvkm_i2c_bus *bus)
{
u32 timeout = T_TIMEOUT / T_RISEFALL;
- i2c_drive_scl(port, 1);
+ nvkm_i2c_drive_scl(bus, 1);
do {
- i2c_delay(port, T_RISEFALL);
- } while (!i2c_sense_scl(port) && --timeout);
+ nvkm_i2c_delay(bus, T_RISEFALL);
+ } while (!nvkm_i2c_sense_scl(bus) && --timeout);
return timeout != 0;
}
static int
-i2c_start(struct nvkm_i2c_port *port)
+i2c_start(struct nvkm_i2c_bus *bus)
{
int ret = 0;
- if (!i2c_sense_scl(port) ||
- !i2c_sense_sda(port)) {
- i2c_drive_scl(port, 0);
- i2c_drive_sda(port, 1);
- if (!i2c_raise_scl(port))
+ if (!nvkm_i2c_sense_scl(bus) ||
+ !nvkm_i2c_sense_sda(bus)) {
+ nvkm_i2c_drive_scl(bus, 0);
+ nvkm_i2c_drive_sda(bus, 1);
+ if (!nvkm_i2c_raise_scl(bus))
ret = -EBUSY;
}
- i2c_drive_sda(port, 0);
- i2c_delay(port, T_HOLD);
- i2c_drive_scl(port, 0);
- i2c_delay(port, T_HOLD);
+ nvkm_i2c_drive_sda(bus, 0);
+ nvkm_i2c_delay(bus, T_HOLD);
+ nvkm_i2c_drive_scl(bus, 0);
+ nvkm_i2c_delay(bus, T_HOLD);
return ret;
}
static void
-i2c_stop(struct nvkm_i2c_port *port)
+i2c_stop(struct nvkm_i2c_bus *bus)
{
- i2c_drive_scl(port, 0);
- i2c_drive_sda(port, 0);
- i2c_delay(port, T_RISEFALL);
-
- i2c_drive_scl(port, 1);
- i2c_delay(port, T_HOLD);
- i2c_drive_sda(port, 1);
- i2c_delay(port, T_HOLD);
+ nvkm_i2c_drive_scl(bus, 0);
+ nvkm_i2c_drive_sda(bus, 0);
+ nvkm_i2c_delay(bus, T_RISEFALL);
+
+ nvkm_i2c_drive_scl(bus, 1);
+ nvkm_i2c_delay(bus, T_HOLD);
+ nvkm_i2c_drive_sda(bus, 1);
+ nvkm_i2c_delay(bus, T_HOLD);
}
static int
-i2c_bitw(struct nvkm_i2c_port *port, int sda)
+i2c_bitw(struct nvkm_i2c_bus *bus, int sda)
{
- i2c_drive_sda(port, sda);
- i2c_delay(port, T_RISEFALL);
+ nvkm_i2c_drive_sda(bus, sda);
+ nvkm_i2c_delay(bus, T_RISEFALL);
- if (!i2c_raise_scl(port))
+ if (!nvkm_i2c_raise_scl(bus))
return -ETIMEDOUT;
- i2c_delay(port, T_HOLD);
+ nvkm_i2c_delay(bus, T_HOLD);
- i2c_drive_scl(port, 0);
- i2c_delay(port, T_HOLD);
+ nvkm_i2c_drive_scl(bus, 0);
+ nvkm_i2c_delay(bus, T_HOLD);
return 0;
}
static int
-i2c_bitr(struct nvkm_i2c_port *port)
+i2c_bitr(struct nvkm_i2c_bus *bus)
{
int sda;
- i2c_drive_sda(port, 1);
- i2c_delay(port, T_RISEFALL);
+ nvkm_i2c_drive_sda(bus, 1);
+ nvkm_i2c_delay(bus, T_RISEFALL);
- if (!i2c_raise_scl(port))
+ if (!nvkm_i2c_raise_scl(bus))
return -ETIMEDOUT;
- i2c_delay(port, T_HOLD);
+ nvkm_i2c_delay(bus, T_HOLD);
- sda = i2c_sense_sda(port);
+ sda = nvkm_i2c_sense_sda(bus);
- i2c_drive_scl(port, 0);
- i2c_delay(port, T_HOLD);
+ nvkm_i2c_drive_scl(bus, 0);
+ nvkm_i2c_delay(bus, T_HOLD);
return sda;
}
static int
-i2c_get_byte(struct nvkm_i2c_port *port, u8 *byte, bool last)
+nvkm_i2c_get_byte(struct nvkm_i2c_bus *bus, u8 *byte, bool last)
{
int i, bit;
*byte = 0;
for (i = 7; i >= 0; i--) {
- bit = i2c_bitr(port);
+ bit = i2c_bitr(bus);
if (bit < 0)
return bit;
*byte |= bit << i;
}
- return i2c_bitw(port, last ? 1 : 0);
+ return i2c_bitw(bus, last ? 1 : 0);
}
static int
-i2c_put_byte(struct nvkm_i2c_port *port, u8 byte)
+nvkm_i2c_put_byte(struct nvkm_i2c_bus *bus, u8 byte)
{
int i, ret;
for (i = 7; i >= 0; i--) {
- ret = i2c_bitw(port, !!(byte & (1 << i)));
+ ret = i2c_bitw(bus, !!(byte & (1 << i)));
if (ret < 0)
return ret;
}
- ret = i2c_bitr(port);
+ ret = i2c_bitr(bus);
if (ret == 1) /* nack */
ret = -EIO;
return ret;
}
static int
-i2c_addr(struct nvkm_i2c_port *port, struct i2c_msg *msg)
+i2c_addr(struct nvkm_i2c_bus *bus, struct i2c_msg *msg)
{
u32 addr = msg->addr << 1;
if (msg->flags & I2C_M_RD)
addr |= 1;
- return i2c_put_byte(port, addr);
+ return nvkm_i2c_put_byte(bus, addr);
}
-static int
-i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+int
+nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *bus, struct i2c_msg *msgs, int num)
{
- struct nvkm_i2c_port *port = adap->algo_data;
struct i2c_msg *msg = msgs;
int ret = 0, mcnt = num;
- ret = nvkm_i2c(port)->acquire(port, nsecs_to_jiffies(T_TIMEOUT));
- if (ret)
- return ret;
-
while (!ret && mcnt--) {
u8 remaining = msg->len;
u8 *ptr = msg->buf;
- ret = i2c_start(port);
+ ret = i2c_start(bus);
if (ret == 0)
- ret = i2c_addr(port, msg);
+ ret = i2c_addr(bus, msg);
if (msg->flags & I2C_M_RD) {
while (!ret && remaining--)
- ret = i2c_get_byte(port, ptr++, !remaining);
+ ret = nvkm_i2c_get_byte(bus, ptr++, !remaining);
} else {
while (!ret && remaining--)
- ret = i2c_put_byte(port, *ptr++);
+ ret = nvkm_i2c_put_byte(bus, *ptr++);
}
msg++;
}
- i2c_stop(port);
- nvkm_i2c(port)->release(port);
+ i2c_stop(bus);
return (ret < 0) ? ret : num;
}
#else
-static int
-i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+int
+nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *bus, struct i2c_msg *msgs, int num)
{
return -ENODEV;
}
#endif
-
-static u32
-i2c_bit_func(struct i2c_adapter *adap)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-const struct i2c_algorithm nvkm_i2c_bit_algo = {
- .master_xfer = i2c_bit_xfer,
- .functionality = i2c_bit_func
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
new file mode 100644
index 000000000000..807a2b67bd64
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "bus.h"
+#include "pad.h"
+
+#include <core/option.h>
+
+/*******************************************************************************
+ * i2c-algo-bit
+ ******************************************************************************/
+static int
+nvkm_i2c_bus_pre_xfer(struct i2c_adapter *adap)
+{
+ struct nvkm_i2c_bus *bus = container_of(adap, typeof(*bus), i2c);
+ return nvkm_i2c_bus_acquire(bus);
+}
+
+static void
+nvkm_i2c_bus_post_xfer(struct i2c_adapter *adap)
+{
+ struct nvkm_i2c_bus *bus = container_of(adap, typeof(*bus), i2c);
+ return nvkm_i2c_bus_release(bus);
+}
+
+static void
+nvkm_i2c_bus_setscl(void *data, int state)
+{
+ struct nvkm_i2c_bus *bus = data;
+ bus->func->drive_scl(bus, state);
+}
+
+static void
+nvkm_i2c_bus_setsda(void *data, int state)
+{
+ struct nvkm_i2c_bus *bus = data;
+ bus->func->drive_sda(bus, state);
+}
+
+static int
+nvkm_i2c_bus_getscl(void *data)
+{
+ struct nvkm_i2c_bus *bus = data;
+ return bus->func->sense_scl(bus);
+}
+
+static int
+nvkm_i2c_bus_getsda(void *data)
+{
+ struct nvkm_i2c_bus *bus = data;
+ return bus->func->sense_sda(bus);
+}
+
+/*******************************************************************************
+ * !i2c-algo-bit (off-chip i2c bus / hw i2c / internal bit-banging algo)
+ ******************************************************************************/
+static int
+nvkm_i2c_bus_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct nvkm_i2c_bus *bus = container_of(adap, typeof(*bus), i2c);
+ int ret;
+
+ ret = nvkm_i2c_bus_acquire(bus);
+ if (ret)
+ return ret;
+
+ ret = bus->func->xfer(bus, msgs, num);
+ nvkm_i2c_bus_release(bus);
+ return ret;
+}
+
+static u32
+nvkm_i2c_bus_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm
+nvkm_i2c_bus_algo = {
+ .master_xfer = nvkm_i2c_bus_xfer,
+ .functionality = nvkm_i2c_bus_func,
+};
+
+/*******************************************************************************
+ * nvkm_i2c_bus base
+ ******************************************************************************/
+void
+nvkm_i2c_bus_init(struct nvkm_i2c_bus *bus)
+{
+ BUS_TRACE(bus, "init");
+ if (bus->func->init)
+ bus->func->init(bus);
+}
+
+void
+nvkm_i2c_bus_release(struct nvkm_i2c_bus *bus)
+{
+ struct nvkm_i2c_pad *pad = bus->pad;
+ BUS_TRACE(bus, "release");
+ nvkm_i2c_pad_release(pad);
+ mutex_unlock(&bus->mutex);
+}
+
+int
+nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *bus)
+{
+ struct nvkm_i2c_pad *pad = bus->pad;
+ int ret;
+ BUS_TRACE(bus, "acquire");
+ mutex_lock(&bus->mutex);
+ ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_I2C);
+ if (ret)
+ mutex_unlock(&bus->mutex);
+ return ret;
+}
+
+int
+nvkm_i2c_bus_probe(struct nvkm_i2c_bus *bus, const char *what,
+ struct nvkm_i2c_bus_probe *info,
+ bool (*match)(struct nvkm_i2c_bus *,
+ struct i2c_board_info *, void *), void *data)
+{
+ int i;
+
+ BUS_DBG(bus, "probing %ss", what);
+ for (i = 0; info[i].dev.addr; i++) {
+ u8 orig_udelay = 0;
+
+ if ((bus->i2c.algo == &i2c_bit_algo) && (info[i].udelay != 0)) {
+ struct i2c_algo_bit_data *algo = bus->i2c.algo_data;
+ BUS_DBG(bus, "%dms delay instead of %dms",
+ info[i].udelay, algo->udelay);
+ orig_udelay = algo->udelay;
+ algo->udelay = info[i].udelay;
+ }
+
+ if (nvkm_probe_i2c(&bus->i2c, info[i].dev.addr) &&
+ (!match || match(bus, &info[i].dev, data))) {
+ BUS_DBG(bus, "detected %s: %s",
+ what, info[i].dev.type);
+ return i;
+ }
+
+ if (orig_udelay) {
+ struct i2c_algo_bit_data *algo = bus->i2c.algo_data;
+ algo->udelay = orig_udelay;
+ }
+ }
+
+ BUS_DBG(bus, "no devices found.");
+ return -ENODEV;
+}
+
+void
+nvkm_i2c_bus_del(struct nvkm_i2c_bus **pbus)
+{
+ struct nvkm_i2c_bus *bus = *pbus;
+ if (bus && !WARN_ON(!bus->func)) {
+ BUS_TRACE(bus, "dtor");
+ list_del(&bus->head);
+ i2c_del_adapter(&bus->i2c);
+ kfree(bus->i2c.algo_data);
+ kfree(*pbus);
+ *pbus = NULL;
+ }
+}
+
+int
+nvkm_i2c_bus_ctor(const struct nvkm_i2c_bus_func *func,
+ struct nvkm_i2c_pad *pad, int id,
+ struct nvkm_i2c_bus *bus)
+{
+ struct nvkm_device *device = pad->i2c->subdev.device;
+ struct i2c_algo_bit_data *bit;
+#ifndef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
+ const bool internal = false;
+#else
+ const bool internal = true;
+#endif
+ int ret;
+
+ bus->func = func;
+ bus->pad = pad;
+ bus->id = id;
+ mutex_init(&bus->mutex);
+ list_add_tail(&bus->head, &pad->i2c->bus);
+ BUS_TRACE(bus, "ctor");
+
+ snprintf(bus->i2c.name, sizeof(bus->i2c.name), "nvkm-%s-bus-%04x",
+ dev_name(device->dev), id);
+ bus->i2c.owner = THIS_MODULE;
+ bus->i2c.dev.parent = device->dev;
+
+ if ( bus->func->drive_scl &&
+ !nvkm_boolopt(device->cfgopt, "NvI2C", internal)) {
+ if (!(bit = kzalloc(sizeof(*bit), GFP_KERNEL)))
+ return -ENOMEM;
+ bit->udelay = 10;
+ bit->timeout = usecs_to_jiffies(2200);
+ bit->data = bus;
+ bit->pre_xfer = nvkm_i2c_bus_pre_xfer;
+ bit->post_xfer = nvkm_i2c_bus_post_xfer;
+ bit->setscl = nvkm_i2c_bus_setscl;
+ bit->setsda = nvkm_i2c_bus_setsda;
+ bit->getscl = nvkm_i2c_bus_getscl;
+ bit->getsda = nvkm_i2c_bus_getsda;
+ bus->i2c.algo_data = bit;
+ ret = i2c_bit_add_bus(&bus->i2c);
+ } else {
+ bus->i2c.algo = &nvkm_i2c_bus_algo;
+ ret = i2c_add_adapter(&bus->i2c);
+ }
+
+ return ret;
+}
+
+int
+nvkm_i2c_bus_new_(const struct nvkm_i2c_bus_func *func,
+ struct nvkm_i2c_pad *pad, int id,
+ struct nvkm_i2c_bus **pbus)
+{
+ if (!(*pbus = kzalloc(sizeof(**pbus), GFP_KERNEL)))
+ return -ENOMEM;
+ return nvkm_i2c_bus_ctor(func, pad, id, *pbus);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
new file mode 100644
index 000000000000..e1be14c23e54
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
@@ -0,0 +1,37 @@
+#ifndef __NVKM_I2C_BUS_H__
+#define __NVKM_I2C_BUS_H__
+#include "pad.h"
+
+struct nvkm_i2c_bus_func {
+ void (*init)(struct nvkm_i2c_bus *);
+ void (*drive_scl)(struct nvkm_i2c_bus *, int state);
+ void (*drive_sda)(struct nvkm_i2c_bus *, int state);
+ int (*sense_scl)(struct nvkm_i2c_bus *);
+ int (*sense_sda)(struct nvkm_i2c_bus *);
+ int (*xfer)(struct nvkm_i2c_bus *, struct i2c_msg *, int num);
+};
+
+int nvkm_i2c_bus_ctor(const struct nvkm_i2c_bus_func *, struct nvkm_i2c_pad *,
+ int id, struct nvkm_i2c_bus *);
+int nvkm_i2c_bus_new_(const struct nvkm_i2c_bus_func *, struct nvkm_i2c_pad *,
+ int id, struct nvkm_i2c_bus **);
+void nvkm_i2c_bus_del(struct nvkm_i2c_bus **);
+void nvkm_i2c_bus_init(struct nvkm_i2c_bus *);
+
+int nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *, struct i2c_msg *, int);
+
+int nv04_i2c_bus_new(struct nvkm_i2c_pad *, int, u8, u8,
+ struct nvkm_i2c_bus **);
+
+int nv4e_i2c_bus_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_bus **);
+int nv50_i2c_bus_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_bus **);
+int gf119_i2c_bus_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_bus **);
+
+#define BUS_MSG(b,l,f,a...) do { \
+ struct nvkm_i2c_bus *_bus = (b); \
+ nvkm_##l(&_bus->pad->i2c->subdev, "bus %04x: "f"\n", _bus->id, ##a); \
+} while(0)
+#define BUS_ERR(b,f,a...) BUS_MSG((b), error, f, ##a)
+#define BUS_DBG(b,f,a...) BUS_MSG((b), debug, f, ##a)
+#define BUS_TRACE(b,f,a...) BUS_MSG((b), trace, f, ##a)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c
new file mode 100644
index 000000000000..96bbdda0f439
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define gf119_i2c_bus(p) container_of((p), struct gf119_i2c_bus, base)
+#include "bus.h"
+
+struct gf119_i2c_bus {
+ struct nvkm_i2c_bus base;
+ u32 addr;
+};
+
+static void
+gf119_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
+{
+ struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
+ struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+ nvkm_mask(device, bus->addr, 0x00000001, state ? 0x00000001 : 0);
+}
+
+static void
+gf119_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
+{
+ struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
+ struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+ nvkm_mask(device, bus->addr, 0x00000002, state ? 0x00000002 : 0);
+}
+
+static int
+gf119_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
+{
+ struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
+ struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+ return !!(nvkm_rd32(device, bus->addr) & 0x00000010);
+}
+
+static int
+gf119_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
+{
+ struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
+ struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+ return !!(nvkm_rd32(device, bus->addr) & 0x00000020);
+}
+
+static void
+gf119_i2c_bus_init(struct nvkm_i2c_bus *base)
+{
+ struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
+ struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+ nvkm_wr32(device, bus->addr, 0x00000007);
+}
+
+static const struct nvkm_i2c_bus_func
+gf119_i2c_bus_func = {
+ .init = gf119_i2c_bus_init,
+ .drive_scl = gf119_i2c_bus_drive_scl,
+ .drive_sda = gf119_i2c_bus_drive_sda,
+ .sense_scl = gf119_i2c_bus_sense_scl,
+ .sense_sda = gf119_i2c_bus_sense_sda,
+ .xfer = nvkm_i2c_bit_xfer,
+};
+
+int
+gf119_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive,
+ struct nvkm_i2c_bus **pbus)
+{
+ struct gf119_i2c_bus *bus;
+
+ if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+ return -ENOMEM;
+ *pbus = &bus->base;
+
+ nvkm_i2c_bus_ctor(&gf119_i2c_bus_func, pad, id, &bus->base);
+ bus->addr = 0x00d014 + (drive * 0x20);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c
new file mode 100644
index 000000000000..a58db159231f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define nv04_i2c_bus(p) container_of((p), struct nv04_i2c_bus, base)
+#include "bus.h"
+
+#include <subdev/vga.h>
+
+struct nv04_i2c_bus {
+ struct nvkm_i2c_bus base;
+ u8 drive;
+ u8 sense;
+};
+
+static void
+nv04_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
+{
+ struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
+ struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+ u8 val = nvkm_rdvgac(device, 0, bus->drive);
+ if (state) val |= 0x20;
+ else val &= 0xdf;
+ nvkm_wrvgac(device, 0, bus->drive, val | 0x01);
+}
+
+static void
+nv04_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
+{
+ struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
+ struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+ u8 val = nvkm_rdvgac(device, 0, bus->drive);
+ if (state) val |= 0x10;
+ else val &= 0xef;
+ nvkm_wrvgac(device, 0, bus->drive, val | 0x01);
+}
+
+static int
+nv04_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
+{
+ struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
+ struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+ return !!(nvkm_rdvgac(device, 0, bus->sense) & 0x04);
+}
+
+static int
+nv04_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
+{
+ struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
+ struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+ return !!(nvkm_rdvgac(device, 0, bus->sense) & 0x08);
+}
+
+static const struct nvkm_i2c_bus_func
+nv04_i2c_bus_func = {
+ .drive_scl = nv04_i2c_bus_drive_scl,
+ .drive_sda = nv04_i2c_bus_drive_sda,
+ .sense_scl = nv04_i2c_bus_sense_scl,
+ .sense_sda = nv04_i2c_bus_sense_sda,
+ .xfer = nvkm_i2c_bit_xfer,
+};
+
+int
+nv04_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive, u8 sense,
+ struct nvkm_i2c_bus **pbus)
+{
+ struct nv04_i2c_bus *bus;
+
+ if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+ return -ENOMEM;
+ *pbus = &bus->base;
+
+ nvkm_i2c_bus_ctor(&nv04_i2c_bus_func, pad, id, &bus->base);
+ bus->drive = drive;
+ bus->sense = sense;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c
new file mode 100644
index 000000000000..cdd73dcb1197
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define nv4e_i2c_bus(p) container_of((p), struct nv4e_i2c_bus, base)
+#include "bus.h"
+
+struct nv4e_i2c_bus {
+ struct nvkm_i2c_bus base;
+ u32 addr;
+};
+
+static void
+nv4e_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
+{
+ struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
+ struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+ nvkm_mask(device, bus->addr, 0x2f, state ? 0x21 : 0x01);
+}
+
+static void
+nv4e_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
+{
+ struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
+ struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+ nvkm_mask(device, bus->addr, 0x1f, state ? 0x11 : 0x01);
+}
+
+static int
+nv4e_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
+{
+ struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
+ struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+ return !!(nvkm_rd32(device, bus->addr) & 0x00040000);
+}
+
+static int
+nv4e_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
+{
+ struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
+ struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+ return !!(nvkm_rd32(device, bus->addr) & 0x00080000);
+}
+
+static const struct nvkm_i2c_bus_func
+nv4e_i2c_bus_func = {
+ .drive_scl = nv4e_i2c_bus_drive_scl,
+ .drive_sda = nv4e_i2c_bus_drive_sda,
+ .sense_scl = nv4e_i2c_bus_sense_scl,
+ .sense_sda = nv4e_i2c_bus_sense_sda,
+ .xfer = nvkm_i2c_bit_xfer,
+};
+
+int
+nv4e_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive,
+ struct nvkm_i2c_bus **pbus)
+{
+ struct nv4e_i2c_bus *bus;
+
+ if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+ return -ENOMEM;
+ *pbus = &bus->base;
+
+ nvkm_i2c_bus_ctor(&nv4e_i2c_bus_func, pad, id, &bus->base);
+ bus->addr = 0x600800 + drive;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c
new file mode 100644
index 000000000000..8db8399381ca
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define nv50_i2c_bus(p) container_of((p), struct nv50_i2c_bus, base)
+#include "bus.h"
+
+#include <subdev/vga.h>
+
+struct nv50_i2c_bus {
+ struct nvkm_i2c_bus base;
+ u32 addr;
+ u32 data;
+};
+
+static void
+nv50_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
+{
+ struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
+ struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+ if (state) bus->data |= 0x01;
+ else bus->data &= 0xfe;
+ nvkm_wr32(device, bus->addr, bus->data);
+}
+
+static void
+nv50_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
+{
+ struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
+ struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+ if (state) bus->data |= 0x02;
+ else bus->data &= 0xfd;
+ nvkm_wr32(device, bus->addr, bus->data);
+}
+
+static int
+nv50_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
+{
+ struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
+ struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+ return !!(nvkm_rd32(device, bus->addr) & 0x00000001);
+}
+
+static int
+nv50_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
+{
+ struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
+ struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+ return !!(nvkm_rd32(device, bus->addr) & 0x00000002);
+}
+
+static void
+nv50_i2c_bus_init(struct nvkm_i2c_bus *base)
+{
+ struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
+ struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+ nvkm_wr32(device, bus->addr, (bus->data = 0x00000007));
+}
+
+static const struct nvkm_i2c_bus_func
+nv50_i2c_bus_func = {
+ .init = nv50_i2c_bus_init,
+ .drive_scl = nv50_i2c_bus_drive_scl,
+ .drive_sda = nv50_i2c_bus_drive_sda,
+ .sense_scl = nv50_i2c_bus_sense_scl,
+ .sense_sda = nv50_i2c_bus_sense_sda,
+ .xfer = nvkm_i2c_bit_xfer,
+};
+
+int
+nv50_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive,
+ struct nvkm_i2c_bus **pbus)
+{
+ static const u32 addr[] = {
+ 0x00e138, 0x00e150, 0x00e168, 0x00e180,
+ 0x00e254, 0x00e274, 0x00e764, 0x00e780,
+ 0x00e79c, 0x00e7b8
+ };
+ struct nv50_i2c_bus *bus;
+
+ if (drive >= ARRAY_SIZE(addr)) {
+ nvkm_warn(&pad->i2c->subdev, "bus %d unknown\n", drive);
+ return -ENODEV;
+ }
+
+ if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+ return -ENOMEM;
+ *pbus = &bus->base;
+
+ nvkm_i2c_bus_ctor(&nv50_i2c_bus_func, pad, id, &bus->base);
+ bus->addr = addr[drive];
+ bus->data = 0x00000007;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
index 2a2dd47b9835..bb2a31d88161 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
@@ -21,26 +21,29 @@
*
* Authors: Ben Skeggs
*/
-#include "nv50.h"
+#include "priv.h"
+#include "pad.h"
void
g94_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
{
- u32 intr = nv_rd32(i2c, 0x00e06c);
- u32 stat = nv_rd32(i2c, 0x00e068) & intr, i;
+ struct nvkm_device *device = i2c->subdev.device;
+ u32 intr = nvkm_rd32(device, 0x00e06c);
+ u32 stat = nvkm_rd32(device, 0x00e068) & intr, i;
for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
if ((stat & (1 << (i * 4)))) *hi |= 1 << i;
if ((stat & (2 << (i * 4)))) *lo |= 1 << i;
if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
if ((stat & (8 << (i * 4)))) *tx |= 1 << i;
}
- nv_wr32(i2c, 0x00e06c, intr);
+ nvkm_wr32(device, 0x00e06c, intr);
}
void
g94_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data)
{
- u32 temp = nv_rd32(i2c, 0x00e068), i;
+ struct nvkm_device *device = i2c->subdev.device;
+ u32 temp = nvkm_rd32(device, 0x00e068), i;
for (i = 0; i < 8; i++) {
if (mask & (1 << i)) {
if (!(data & (1 << i))) {
@@ -50,230 +53,20 @@ g94_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data)
temp |= type << (i * 4);
}
}
- nv_wr32(i2c, 0x00e068, temp);
-}
-
-#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
-#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
-
-static void
-auxch_fini(struct nvkm_i2c *aux, int ch)
-{
- nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
-}
-
-static int
-auxch_init(struct nvkm_i2c *aux, int ch)
-{
- const u32 unksel = 1; /* nfi which to use, or if it matters.. */
- const u32 ureq = unksel ? 0x00100000 : 0x00200000;
- const u32 urep = unksel ? 0x01000000 : 0x02000000;
- u32 ctrl, timeout;
-
- /* wait up to 1ms for any previous transaction to be done... */
- timeout = 1000;
- do {
- ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
- udelay(1);
- if (!timeout--) {
- AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
- return -EBUSY;
- }
- } while (ctrl & 0x03010000);
-
- /* set some magic, and wait up to 1ms for it to appear */
- nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
- timeout = 1000;
- do {
- ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
- udelay(1);
- if (!timeout--) {
- AUX_ERR("magic wait 0x%08x\n", ctrl);
- auxch_fini(aux, ch);
- return -EBUSY;
- }
- } while ((ctrl & 0x03000000) != urep);
-
- return 0;
-}
-
-int
-g94_aux(struct nvkm_i2c_port *base, bool retry,
- u8 type, u32 addr, u8 *data, u8 size)
-{
- struct nvkm_i2c *aux = nvkm_i2c(base);
- struct nv50_i2c_port *port = (void *)base;
- u32 ctrl, stat, timeout, retries;
- u32 xbuf[4] = {};
- int ch = port->addr;
- int ret, i;
-
- AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
-
- ret = auxch_init(aux, ch);
- if (ret)
- goto out;
-
- stat = nv_rd32(aux, 0x00e4e8 + (ch * 0x50));
- if (!(stat & 0x10000000)) {
- AUX_DBG("sink not detected\n");
- ret = -ENXIO;
- goto out;
- }
-
- if (!(type & 1)) {
- memcpy(xbuf, data, size);
- for (i = 0; i < 16; i += 4) {
- AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
- nv_wr32(aux, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
- }
- }
-
- ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
- ctrl &= ~0x0001f0ff;
- ctrl |= type << 12;
- ctrl |= size - 1;
- nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr);
-
- /* (maybe) retry transaction a number of times on failure... */
- for (retries = 0; !ret && retries < 32; retries++) {
- /* reset, and delay a while if this is a retry */
- nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
- nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
- if (retries)
- udelay(400);
-
- /* transaction request, wait up to 1ms for it to complete */
- nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
-
- timeout = 1000;
- do {
- ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
- udelay(1);
- if (!timeout--) {
- AUX_ERR("tx req timeout 0x%08x\n", ctrl);
- ret = -EIO;
- goto out;
- }
- } while (ctrl & 0x00010000);
- ret = 1;
-
- /* read status, and check if transaction completed ok */
- stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0);
- if ((stat & 0x000f0000) == 0x00080000 ||
- (stat & 0x000f0000) == 0x00020000)
- ret = retry ? 0 : 1;
- if ((stat & 0x00000100))
- ret = -ETIMEDOUT;
- if ((stat & 0x00000e00))
- ret = -EIO;
-
- AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
- }
-
- if (type & 1) {
- for (i = 0; i < 16; i += 4) {
- xbuf[i / 4] = nv_rd32(aux, 0x00e4d0 + (ch * 0x50) + i);
- AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
- }
- memcpy(data, xbuf, size);
- }
-
-out:
- auxch_fini(aux, ch);
- return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
+ nvkm_wr32(device, 0x00e068, temp);
}
static const struct nvkm_i2c_func
-g94_i2c_func = {
- .drive_scl = nv50_i2c_drive_scl,
- .drive_sda = nv50_i2c_drive_sda,
- .sense_scl = nv50_i2c_sense_scl,
- .sense_sda = nv50_i2c_sense_sda,
-};
-
-static int
-g94_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 index,
- struct nvkm_object **pobject)
-{
- struct dcb_i2c_entry *info = data;
- struct nv50_i2c_port *port;
- int ret;
-
- ret = nvkm_i2c_port_create(parent, engine, oclass, index,
- &nvkm_i2c_bit_algo, &g94_i2c_func, &port);
- *pobject = nv_object(port);
- if (ret)
- return ret;
-
- if (info->drive >= nv50_i2c_addr_nr)
- return -EINVAL;
-
- port->state = 7;
- port->addr = nv50_i2c_addr[info->drive];
- return 0;
-}
-
-static const struct nvkm_i2c_func
-g94_aux_func = {
- .aux = g94_aux,
+g94_i2c = {
+ .pad_x_new = g94_i2c_pad_x_new,
+ .pad_s_new = g94_i2c_pad_s_new,
+ .aux = 4,
+ .aux_stat = g94_aux_stat,
+ .aux_mask = g94_aux_mask,
};
int
-g94_aux_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 index,
- struct nvkm_object **pobject)
+g94_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
{
- struct dcb_i2c_entry *info = data;
- struct nv50_i2c_port *port;
- int ret;
-
- ret = nvkm_i2c_port_create(parent, engine, oclass, index,
- &nvkm_i2c_aux_algo, &g94_aux_func, &port);
- *pobject = nv_object(port);
- if (ret)
- return ret;
-
- port->base.aux = info->auxch;
- port->addr = info->auxch;
- return 0;
+ return nvkm_i2c_new_(&g94_i2c, device, index, pi2c);
}
-
-static struct nvkm_oclass
-g94_i2c_sclass[] = {
- { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = g94_i2c_port_ctor,
- .dtor = _nvkm_i2c_port_dtor,
- .init = nv50_i2c_port_init,
- .fini = _nvkm_i2c_port_fini,
- },
- },
- { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = g94_aux_port_ctor,
- .dtor = _nvkm_i2c_port_dtor,
- .init = _nvkm_i2c_port_init,
- .fini = _nvkm_i2c_port_fini,
- },
- },
- {}
-};
-
-struct nvkm_oclass *
-g94_i2c_oclass = &(struct nvkm_i2c_impl) {
- .base.handle = NV_SUBDEV(I2C, 0x94),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_i2c_ctor,
- .dtor = _nvkm_i2c_dtor,
- .init = _nvkm_i2c_init,
- .fini = _nvkm_i2c_fini,
- },
- .sclass = g94_i2c_sclass,
- .pad_x = &nv04_i2c_pad_oclass,
- .pad_s = &g94_i2c_pad_oclass,
- .aux = 4,
- .aux_stat = g94_aux_stat,
- .aux_mask = g94_aux_mask,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c
deleted file mode 100644
index 4d4ac6638140..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv50.h"
-
-static int
-gf110_i2c_sense_scl(struct nvkm_i2c_port *base)
-{
- struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
- struct nv50_i2c_port *port = (void *)base;
- return !!(nv_rd32(priv, port->addr) & 0x00000010);
-}
-
-static int
-gf110_i2c_sense_sda(struct nvkm_i2c_port *base)
-{
- struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
- struct nv50_i2c_port *port = (void *)base;
- return !!(nv_rd32(priv, port->addr) & 0x00000020);
-}
-
-static const struct nvkm_i2c_func
-gf110_i2c_func = {
- .drive_scl = nv50_i2c_drive_scl,
- .drive_sda = nv50_i2c_drive_sda,
- .sense_scl = gf110_i2c_sense_scl,
- .sense_sda = gf110_i2c_sense_sda,
-};
-
-int
-gf110_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 index,
- struct nvkm_object **pobject)
-{
- struct dcb_i2c_entry *info = data;
- struct nv50_i2c_port *port;
- int ret;
-
- ret = nvkm_i2c_port_create(parent, engine, oclass, index,
- &nvkm_i2c_bit_algo, &gf110_i2c_func, &port);
- *pobject = nv_object(port);
- if (ret)
- return ret;
-
- port->state = 0x00000007;
- port->addr = 0x00d014 + (info->drive * 0x20);
- return 0;
-}
-
-struct nvkm_oclass
-gf110_i2c_sclass[] = {
- { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf110_i2c_port_ctor,
- .dtor = _nvkm_i2c_port_dtor,
- .init = nv50_i2c_port_init,
- .fini = _nvkm_i2c_port_fini,
- },
- },
- { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = g94_aux_port_ctor,
- .dtor = _nvkm_i2c_port_dtor,
- .init = _nvkm_i2c_port_init,
- .fini = _nvkm_i2c_port_fini,
- },
- },
- {}
-};
-
-struct nvkm_oclass *
-gf110_i2c_oclass = &(struct nvkm_i2c_impl) {
- .base.handle = NV_SUBDEV(I2C, 0xd0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_i2c_ctor,
- .dtor = _nvkm_i2c_dtor,
- .init = _nvkm_i2c_init,
- .fini = _nvkm_i2c_fini,
- },
- .sclass = gf110_i2c_sclass,
- .pad_x = &nv04_i2c_pad_oclass,
- .pad_s = &g94_i2c_pad_oclass,
- .aux = 4,
- .aux_stat = g94_aux_stat,
- .aux_mask = g94_aux_mask,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
index e290b40f2d13..ae4aad3fcd2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
@@ -21,18 +21,16 @@
*
* Authors: Ben Skeggs
*/
-#include "nv50.h"
+#include "priv.h"
+#include "pad.h"
-struct nvkm_oclass *
-gf117_i2c_oclass = &(struct nvkm_i2c_impl) {
- .base.handle = NV_SUBDEV(I2C, 0xd7),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_i2c_ctor,
- .dtor = _nvkm_i2c_dtor,
- .init = _nvkm_i2c_init,
- .fini = _nvkm_i2c_fini,
- },
- .sclass = gf110_i2c_sclass,
- .pad_x = &nv04_i2c_pad_oclass,
- .pad_s = &nv04_i2c_pad_oclass,
-}.base;
+static const struct nvkm_i2c_func
+gf117_i2c = {
+ .pad_x_new = gf119_i2c_pad_x_new,
+};
+
+int
+gf117_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+ return nvkm_i2c_new_(&gf117_i2c, device, index, pi2c);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c
new file mode 100644
index 000000000000..6f2b02af42c8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "pad.h"
+
+static const struct nvkm_i2c_func
+gf119_i2c = {
+ .pad_x_new = gf119_i2c_pad_x_new,
+ .pad_s_new = gf119_i2c_pad_s_new,
+ .aux = 4,
+ .aux_stat = g94_aux_stat,
+ .aux_mask = g94_aux_mask,
+};
+
+int
+gf119_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+ return nvkm_i2c_new_(&gf119_i2c, device, index, pi2c);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
index 1a464903a992..f9f6bf4b66c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
@@ -21,26 +21,29 @@
*
* Authors: Ben Skeggs
*/
-#include "nv50.h"
+#include "priv.h"
+#include "pad.h"
void
gk104_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
{
- u32 intr = nv_rd32(i2c, 0x00dc60);
- u32 stat = nv_rd32(i2c, 0x00dc68) & intr, i;
+ struct nvkm_device *device = i2c->subdev.device;
+ u32 intr = nvkm_rd32(device, 0x00dc60);
+ u32 stat = nvkm_rd32(device, 0x00dc68) & intr, i;
for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
if ((stat & (1 << (i * 4)))) *hi |= 1 << i;
if ((stat & (2 << (i * 4)))) *lo |= 1 << i;
if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
if ((stat & (8 << (i * 4)))) *tx |= 1 << i;
}
- nv_wr32(i2c, 0x00dc60, intr);
+ nvkm_wr32(device, 0x00dc60, intr);
}
void
gk104_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data)
{
- u32 temp = nv_rd32(i2c, 0x00dc68), i;
+ struct nvkm_device *device = i2c->subdev.device;
+ u32 temp = nvkm_rd32(device, 0x00dc68), i;
for (i = 0; i < 8; i++) {
if (mask & (1 << i)) {
if (!(data & (1 << i))) {
@@ -50,22 +53,20 @@ gk104_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data)
temp |= type << (i * 4);
}
}
- nv_wr32(i2c, 0x00dc68, temp);
+ nvkm_wr32(device, 0x00dc68, temp);
}
-struct nvkm_oclass *
-gk104_i2c_oclass = &(struct nvkm_i2c_impl) {
- .base.handle = NV_SUBDEV(I2C, 0xe0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_i2c_ctor,
- .dtor = _nvkm_i2c_dtor,
- .init = _nvkm_i2c_init,
- .fini = _nvkm_i2c_fini,
- },
- .sclass = gf110_i2c_sclass,
- .pad_x = &nv04_i2c_pad_oclass,
- .pad_s = &g94_i2c_pad_oclass,
+static const struct nvkm_i2c_func
+gk104_i2c = {
+ .pad_x_new = gf119_i2c_pad_x_new,
+ .pad_s_new = gf119_i2c_pad_s_new,
.aux = 4,
.aux_stat = gk104_aux_stat,
.aux_mask = gk104_aux_mask,
-}.base;
+};
+
+int
+gk104_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+ return nvkm_i2c_new_(&gk104_i2c, device, index, pi2c);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c
index ab64237b3842..ff9f7d62f6be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c
@@ -21,199 +21,20 @@
*
* Authors: Ben Skeggs
*/
-#include "nv50.h"
-
-#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
-#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
-
-static void
-auxch_fini(struct nvkm_i2c *aux, int ch)
-{
- nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00310000, 0x00000000);
-}
-
-static int
-auxch_init(struct nvkm_i2c *aux, int ch)
-{
- const u32 unksel = 1; /* nfi which to use, or if it matters.. */
- const u32 ureq = unksel ? 0x00100000 : 0x00200000;
- const u32 urep = unksel ? 0x01000000 : 0x02000000;
- u32 ctrl, timeout;
-
- /* wait up to 1ms for any previous transaction to be done... */
- timeout = 1000;
- do {
- ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
- udelay(1);
- if (!timeout--) {
- AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
- return -EBUSY;
- }
- } while (ctrl & 0x03010000);
-
- /* set some magic, and wait up to 1ms for it to appear */
- nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00300000, ureq);
- timeout = 1000;
- do {
- ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
- udelay(1);
- if (!timeout--) {
- AUX_ERR("magic wait 0x%08x\n", ctrl);
- auxch_fini(aux, ch);
- return -EBUSY;
- }
- } while ((ctrl & 0x03000000) != urep);
-
- return 0;
-}
-
-int
-gm204_aux(struct nvkm_i2c_port *base, bool retry,
- u8 type, u32 addr, u8 *data, u8 size)
-{
- struct nvkm_i2c *aux = nvkm_i2c(base);
- struct nv50_i2c_port *port = (void *)base;
- u32 ctrl, stat, timeout, retries;
- u32 xbuf[4] = {};
- int ch = port->addr;
- int ret, i;
-
- AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
-
- ret = auxch_init(aux, ch);
- if (ret)
- goto out;
-
- stat = nv_rd32(aux, 0x00d958 + (ch * 0x50));
- if (!(stat & 0x10000000)) {
- AUX_DBG("sink not detected\n");
- ret = -ENXIO;
- goto out;
- }
-
- if (!(type & 1)) {
- memcpy(xbuf, data, size);
- for (i = 0; i < 16; i += 4) {
- AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
- nv_wr32(aux, 0x00d930 + (ch * 0x50) + i, xbuf[i / 4]);
- }
- }
-
- ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
- ctrl &= ~0x0001f0ff;
- ctrl |= type << 12;
- ctrl |= size - 1;
- nv_wr32(aux, 0x00d950 + (ch * 0x50), addr);
-
- /* (maybe) retry transaction a number of times on failure... */
- for (retries = 0; !ret && retries < 32; retries++) {
- /* reset, and delay a while if this is a retry */
- nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x80000000 | ctrl);
- nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x00000000 | ctrl);
- if (retries)
- udelay(400);
-
- /* transaction request, wait up to 1ms for it to complete */
- nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x00010000 | ctrl);
-
- timeout = 1000;
- do {
- ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
- udelay(1);
- if (!timeout--) {
- AUX_ERR("tx req timeout 0x%08x\n", ctrl);
- ret = -EIO;
- goto out;
- }
- } while (ctrl & 0x00010000);
- ret = 1;
-
- /* read status, and check if transaction completed ok */
- stat = nv_mask(aux, 0x00d958 + (ch * 0x50), 0, 0);
- if ((stat & 0x000f0000) == 0x00080000 ||
- (stat & 0x000f0000) == 0x00020000)
- ret = retry ? 0 : 1;
- if ((stat & 0x00000100))
- ret = -ETIMEDOUT;
- if ((stat & 0x00000e00))
- ret = -EIO;
-
- AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
- }
-
- if (type & 1) {
- for (i = 0; i < 16; i += 4) {
- xbuf[i / 4] = nv_rd32(aux, 0x00d940 + (ch * 0x50) + i);
- AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
- }
- memcpy(data, xbuf, size);
- }
-
-out:
- auxch_fini(aux, ch);
- return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
-}
+#include "priv.h"
+#include "pad.h"
static const struct nvkm_i2c_func
-gm204_aux_func = {
- .aux = gm204_aux,
+gm204_i2c = {
+ .pad_x_new = gf119_i2c_pad_x_new,
+ .pad_s_new = gm204_i2c_pad_s_new,
+ .aux = 8,
+ .aux_stat = gk104_aux_stat,
+ .aux_mask = gk104_aux_mask,
};
int
-gm204_aux_port_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 index,
- struct nvkm_object **pobject)
+gm204_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
{
- struct dcb_i2c_entry *info = data;
- struct nv50_i2c_port *port;
- int ret;
-
- ret = nvkm_i2c_port_create(parent, engine, oclass, index,
- &nvkm_i2c_aux_algo, &gm204_aux_func, &port);
- *pobject = nv_object(port);
- if (ret)
- return ret;
-
- port->base.aux = info->auxch;
- port->addr = info->auxch;
- return 0;
+ return nvkm_i2c_new_(&gm204_i2c, device, index, pi2c);
}
-
-struct nvkm_oclass
-gm204_i2c_sclass[] = {
- { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf110_i2c_port_ctor,
- .dtor = _nvkm_i2c_port_dtor,
- .init = nv50_i2c_port_init,
- .fini = _nvkm_i2c_port_fini,
- },
- },
- { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gm204_aux_port_ctor,
- .dtor = _nvkm_i2c_port_dtor,
- .init = _nvkm_i2c_port_init,
- .fini = _nvkm_i2c_port_fini,
- },
- },
- {}
-};
-
-struct nvkm_oclass *
-gm204_i2c_oclass = &(struct nvkm_i2c_impl) {
- .base.handle = NV_SUBDEV(I2C, 0x24),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_i2c_ctor,
- .dtor = _nvkm_i2c_dtor,
- .init = _nvkm_i2c_init,
- .fini = _nvkm_i2c_fini,
- },
- .sclass = gm204_i2c_sclass,
- .pad_x = &nv04_i2c_pad_oclass,
- .pad_s = &gm204_i2c_pad_oclass,
- .aux = 8,
- .aux_stat = gk104_aux_stat,
- .aux_mask = gk104_aux_mask,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
index 4cdf1c489353..18776f49355c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
@@ -22,107 +22,15 @@
* Authors: Ben Skeggs
*/
#include "priv.h"
-
-#include <subdev/vga.h>
-
-struct nv04_i2c_priv {
- struct nvkm_i2c base;
-};
-
-struct nv04_i2c_port {
- struct nvkm_i2c_port base;
- u8 drive;
- u8 sense;
-};
-
-static void
-nv04_i2c_drive_scl(struct nvkm_i2c_port *base, int state)
-{
- struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
- struct nv04_i2c_port *port = (void *)base;
- u8 val = nv_rdvgac(priv, 0, port->drive);
- if (state) val |= 0x20;
- else val &= 0xdf;
- nv_wrvgac(priv, 0, port->drive, val | 0x01);
-}
-
-static void
-nv04_i2c_drive_sda(struct nvkm_i2c_port *base, int state)
-{
- struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
- struct nv04_i2c_port *port = (void *)base;
- u8 val = nv_rdvgac(priv, 0, port->drive);
- if (state) val |= 0x10;
- else val &= 0xef;
- nv_wrvgac(priv, 0, port->drive, val | 0x01);
-}
-
-static int
-nv04_i2c_sense_scl(struct nvkm_i2c_port *base)
-{
- struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
- struct nv04_i2c_port *port = (void *)base;
- return !!(nv_rdvgac(priv, 0, port->sense) & 0x04);
-}
-
-static int
-nv04_i2c_sense_sda(struct nvkm_i2c_port *base)
-{
- struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
- struct nv04_i2c_port *port = (void *)base;
- return !!(nv_rdvgac(priv, 0, port->sense) & 0x08);
-}
+#include "pad.h"
static const struct nvkm_i2c_func
-nv04_i2c_func = {
- .drive_scl = nv04_i2c_drive_scl,
- .drive_sda = nv04_i2c_drive_sda,
- .sense_scl = nv04_i2c_sense_scl,
- .sense_sda = nv04_i2c_sense_sda,
+nv04_i2c = {
+ .pad_x_new = nv04_i2c_pad_new,
};
-static int
-nv04_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 index,
- struct nvkm_object **pobject)
+int
+nv04_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
{
- struct dcb_i2c_entry *info = data;
- struct nv04_i2c_port *port;
- int ret;
-
- ret = nvkm_i2c_port_create(parent, engine, oclass, index,
- &nvkm_i2c_bit_algo, &nv04_i2c_func, &port);
- *pobject = nv_object(port);
- if (ret)
- return ret;
-
- port->drive = info->drive;
- port->sense = info->sense;
- return 0;
+ return nvkm_i2c_new_(&nv04_i2c, device, index, pi2c);
}
-
-static struct nvkm_oclass
-nv04_i2c_sclass[] = {
- { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV04_BIT),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_i2c_port_ctor,
- .dtor = _nvkm_i2c_port_dtor,
- .init = _nvkm_i2c_port_init,
- .fini = _nvkm_i2c_port_fini,
- },
- },
- {}
-};
-
-struct nvkm_oclass *
-nv04_i2c_oclass = &(struct nvkm_i2c_impl) {
- .base.handle = NV_SUBDEV(I2C, 0x04),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_i2c_ctor,
- .dtor = _nvkm_i2c_dtor,
- .init = _nvkm_i2c_init,
- .fini = _nvkm_i2c_fini,
- },
- .sclass = nv04_i2c_sclass,
- .pad_x = &nv04_i2c_pad_oclass,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
index 046fe5e2ea19..6b762f7cee9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
@@ -22,99 +22,15 @@
* Authors: Ben Skeggs
*/
#include "priv.h"
-
-#include <subdev/vga.h>
-
-struct nv4e_i2c_priv {
- struct nvkm_i2c base;
-};
-
-struct nv4e_i2c_port {
- struct nvkm_i2c_port base;
- u32 addr;
-};
-
-static void
-nv4e_i2c_drive_scl(struct nvkm_i2c_port *base, int state)
-{
- struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
- struct nv4e_i2c_port *port = (void *)base;
- nv_mask(priv, port->addr, 0x2f, state ? 0x21 : 0x01);
-}
-
-static void
-nv4e_i2c_drive_sda(struct nvkm_i2c_port *base, int state)
-{
- struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
- struct nv4e_i2c_port *port = (void *)base;
- nv_mask(priv, port->addr, 0x1f, state ? 0x11 : 0x01);
-}
-
-static int
-nv4e_i2c_sense_scl(struct nvkm_i2c_port *base)
-{
- struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
- struct nv4e_i2c_port *port = (void *)base;
- return !!(nv_rd32(priv, port->addr) & 0x00040000);
-}
-
-static int
-nv4e_i2c_sense_sda(struct nvkm_i2c_port *base)
-{
- struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
- struct nv4e_i2c_port *port = (void *)base;
- return !!(nv_rd32(priv, port->addr) & 0x00080000);
-}
+#include "pad.h"
static const struct nvkm_i2c_func
-nv4e_i2c_func = {
- .drive_scl = nv4e_i2c_drive_scl,
- .drive_sda = nv4e_i2c_drive_sda,
- .sense_scl = nv4e_i2c_sense_scl,
- .sense_sda = nv4e_i2c_sense_sda,
+nv4e_i2c = {
+ .pad_x_new = nv4e_i2c_pad_new,
};
-static int
-nv4e_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 index,
- struct nvkm_object **pobject)
+int
+nv4e_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
{
- struct dcb_i2c_entry *info = data;
- struct nv4e_i2c_port *port;
- int ret;
-
- ret = nvkm_i2c_port_create(parent, engine, oclass, index,
- &nvkm_i2c_bit_algo, &nv4e_i2c_func, &port);
- *pobject = nv_object(port);
- if (ret)
- return ret;
-
- port->addr = 0x600800 + info->drive;
- return 0;
+ return nvkm_i2c_new_(&nv4e_i2c, device, index, pi2c);
}
-
-static struct nvkm_oclass
-nv4e_i2c_sclass[] = {
- { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV4E_BIT),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv4e_i2c_port_ctor,
- .dtor = _nvkm_i2c_port_dtor,
- .init = _nvkm_i2c_port_init,
- .fini = _nvkm_i2c_port_fini,
- },
- },
- {}
-};
-
-struct nvkm_oclass *
-nv4e_i2c_oclass = &(struct nvkm_i2c_impl) {
- .base.handle = NV_SUBDEV(I2C, 0x4e),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_i2c_ctor,
- .dtor = _nvkm_i2c_dtor,
- .init = _nvkm_i2c_init,
- .fini = _nvkm_i2c_fini,
- },
- .sclass = nv4e_i2c_sclass,
- .pad_x = &nv04_i2c_pad_oclass,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
index fba5b26a5682..75640ab97d6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
@@ -21,113 +21,16 @@
*
* Authors: Ben Skeggs
*/
-#include "nv50.h"
-
-void
-nv50_i2c_drive_scl(struct nvkm_i2c_port *base, int state)
-{
- struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
- struct nv50_i2c_port *port = (void *)base;
- if (state) port->state |= 0x01;
- else port->state &= 0xfe;
- nv_wr32(priv, port->addr, port->state);
-}
-
-void
-nv50_i2c_drive_sda(struct nvkm_i2c_port *base, int state)
-{
- struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
- struct nv50_i2c_port *port = (void *)base;
- if (state) port->state |= 0x02;
- else port->state &= 0xfd;
- nv_wr32(priv, port->addr, port->state);
-}
-
-int
-nv50_i2c_sense_scl(struct nvkm_i2c_port *base)
-{
- struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
- struct nv50_i2c_port *port = (void *)base;
- return !!(nv_rd32(priv, port->addr) & 0x00000001);
-}
-
-int
-nv50_i2c_sense_sda(struct nvkm_i2c_port *base)
-{
- struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
- struct nv50_i2c_port *port = (void *)base;
- return !!(nv_rd32(priv, port->addr) & 0x00000002);
-}
+#include "priv.h"
+#include "pad.h"
static const struct nvkm_i2c_func
-nv50_i2c_func = {
- .drive_scl = nv50_i2c_drive_scl,
- .drive_sda = nv50_i2c_drive_sda,
- .sense_scl = nv50_i2c_sense_scl,
- .sense_sda = nv50_i2c_sense_sda,
-};
-
-const u32 nv50_i2c_addr[] = {
- 0x00e138, 0x00e150, 0x00e168, 0x00e180,
- 0x00e254, 0x00e274, 0x00e764, 0x00e780,
- 0x00e79c, 0x00e7b8
+nv50_i2c = {
+ .pad_x_new = nv50_i2c_pad_new,
};
-const int nv50_i2c_addr_nr = ARRAY_SIZE(nv50_i2c_addr);
-
-static int
-nv50_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 index,
- struct nvkm_object **pobject)
-{
- struct dcb_i2c_entry *info = data;
- struct nv50_i2c_port *port;
- int ret;
-
- ret = nvkm_i2c_port_create(parent, engine, oclass, index,
- &nvkm_i2c_bit_algo, &nv50_i2c_func, &port);
- *pobject = nv_object(port);
- if (ret)
- return ret;
-
- if (info->drive >= nv50_i2c_addr_nr)
- return -EINVAL;
-
- port->state = 0x00000007;
- port->addr = nv50_i2c_addr[info->drive];
- return 0;
-}
int
-nv50_i2c_port_init(struct nvkm_object *object)
+nv50_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
{
- struct nv50_i2c_priv *priv = (void *)nvkm_i2c(object);
- struct nv50_i2c_port *port = (void *)object;
- nv_wr32(priv, port->addr, port->state);
- return nvkm_i2c_port_init(&port->base);
+ return nvkm_i2c_new_(&nv50_i2c, device, index, pi2c);
}
-
-static struct nvkm_oclass
-nv50_i2c_sclass[] = {
- { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_i2c_port_ctor,
- .dtor = _nvkm_i2c_port_dtor,
- .init = nv50_i2c_port_init,
- .fini = _nvkm_i2c_port_fini,
- },
- },
- {}
-};
-
-struct nvkm_oclass *
-nv50_i2c_oclass = &(struct nvkm_i2c_impl) {
- .base.handle = NV_SUBDEV(I2C, 0x50),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_i2c_ctor,
- .dtor = _nvkm_i2c_dtor,
- .init = _nvkm_i2c_init,
- .fini = _nvkm_i2c_fini,
- },
- .sclass = nv50_i2c_sclass,
- .pad_x = &nv04_i2c_pad_oclass,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h
deleted file mode 100644
index b3139e721b02..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef __NV50_I2C_H__
-#define __NV50_I2C_H__
-#include "priv.h"
-
-struct nv50_i2c_priv {
- struct nvkm_i2c base;
-};
-
-struct nv50_i2c_port {
- struct nvkm_i2c_port base;
- u32 addr;
- u32 state;
-};
-
-extern const u32 nv50_i2c_addr[];
-extern const int nv50_i2c_addr_nr;
-int nv50_i2c_port_init(struct nvkm_object *);
-int nv50_i2c_sense_scl(struct nvkm_i2c_port *);
-int nv50_i2c_sense_sda(struct nvkm_i2c_port *);
-void nv50_i2c_drive_scl(struct nvkm_i2c_port *, int state);
-void nv50_i2c_drive_sda(struct nvkm_i2c_port *, int state);
-
-int g94_aux_port_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-void g94_i2c_acquire(struct nvkm_i2c_port *);
-void g94_i2c_release(struct nvkm_i2c_port *);
-
-int gf110_i2c_port_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c
index a242eeb67829..2c5fcb9c504b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2014 Red Hat Inc.
+ * Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,65 +19,98 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: Ben Skeggs
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include "pad.h"
-int
-_nvkm_i2c_pad_fini(struct nvkm_object *object, bool suspend)
+static void
+nvkm_i2c_pad_mode_locked(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
{
- struct nvkm_i2c_pad *pad = (void *)object;
- DBG("-> NULL\n");
- pad->port = NULL;
- return nvkm_object_fini(&pad->base, suspend);
+ PAD_TRACE(pad, "-> %s", (mode == NVKM_I2C_PAD_AUX) ? "aux" :
+ (mode == NVKM_I2C_PAD_I2C) ? "i2c" : "off");
+ if (pad->func->mode)
+ pad->func->mode(pad, mode);
}
-int
-_nvkm_i2c_pad_init(struct nvkm_object *object)
+void
+nvkm_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
{
- struct nvkm_i2c_pad *pad = (void *)object;
- DBG("-> PORT:%02x\n", pad->next->index);
- pad->port = pad->next;
- return nvkm_object_init(&pad->base);
+ PAD_TRACE(pad, "mode %d", mode);
+ mutex_lock(&pad->mutex);
+ nvkm_i2c_pad_mode_locked(pad, mode);
+ pad->mode = mode;
+ mutex_unlock(&pad->mutex);
}
-int
-nvkm_i2c_pad_create_(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, int index,
- int size, void **pobject)
+void
+nvkm_i2c_pad_release(struct nvkm_i2c_pad *pad)
{
- struct nvkm_i2c *i2c = nvkm_i2c(parent);
- struct nvkm_i2c_port *port;
- struct nvkm_i2c_pad *pad;
- int ret;
+ PAD_TRACE(pad, "release");
+ if (pad->mode == NVKM_I2C_PAD_OFF)
+ nvkm_i2c_pad_mode_locked(pad, pad->mode);
+ mutex_unlock(&pad->mutex);
+}
- list_for_each_entry(port, &i2c->ports, head) {
- pad = nvkm_i2c_pad(port);
- if (pad->index == index) {
- atomic_inc(&nv_object(pad)->refcount);
- *pobject = pad;
- return 1;
+int
+nvkm_i2c_pad_acquire(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
+{
+ PAD_TRACE(pad, "acquire");
+ mutex_lock(&pad->mutex);
+ if (pad->mode != mode) {
+ if (pad->mode != NVKM_I2C_PAD_OFF) {
+ mutex_unlock(&pad->mutex);
+ return -EBUSY;
}
+ nvkm_i2c_pad_mode_locked(pad, mode);
}
+ return 0;
+}
+
+void
+nvkm_i2c_pad_fini(struct nvkm_i2c_pad *pad)
+{
+ PAD_TRACE(pad, "fini");
+ nvkm_i2c_pad_mode_locked(pad, NVKM_I2C_PAD_OFF);
+}
- ret = nvkm_object_create_(parent, engine, oclass, 0, size, pobject);
- pad = *pobject;
- if (ret)
- return ret;
+void
+nvkm_i2c_pad_init(struct nvkm_i2c_pad *pad)
+{
+ PAD_TRACE(pad, "init");
+ nvkm_i2c_pad_mode_locked(pad, pad->mode);
+}
- pad->index = index;
- return 0;
+void
+nvkm_i2c_pad_del(struct nvkm_i2c_pad **ppad)
+{
+ struct nvkm_i2c_pad *pad = *ppad;
+ if (pad) {
+ PAD_TRACE(pad, "dtor");
+ list_del(&pad->head);
+ kfree(pad);
+ pad = NULL;
+ }
+}
+
+void
+nvkm_i2c_pad_ctor(const struct nvkm_i2c_pad_func *func, struct nvkm_i2c *i2c,
+ int id, struct nvkm_i2c_pad *pad)
+{
+ pad->func = func;
+ pad->i2c = i2c;
+ pad->id = id;
+ pad->mode = NVKM_I2C_PAD_OFF;
+ mutex_init(&pad->mutex);
+ list_add_tail(&pad->head, &i2c->pad);
+ PAD_TRACE(pad, "ctor");
}
int
-_nvkm_i2c_pad_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 index,
- struct nvkm_object **pobject)
+nvkm_i2c_pad_new_(const struct nvkm_i2c_pad_func *func, struct nvkm_i2c *i2c,
+ int id, struct nvkm_i2c_pad **ppad)
{
- struct nvkm_i2c_pad *pad;
- int ret;
- ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
- *pobject = nv_object(pad);
- return ret;
+ if (!(*ppad = kzalloc(sizeof(**ppad), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_i2c_pad_ctor(func, i2c, id, *ppad);
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
index f3422cc6f8db..9eeb992944c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
@@ -1,56 +1,67 @@
#ifndef __NVKM_I2C_PAD_H__
#define __NVKM_I2C_PAD_H__
-#include "priv.h"
+#include <subdev/i2c.h>
struct nvkm_i2c_pad {
- struct nvkm_object base;
- int index;
- struct nvkm_i2c_port *port;
- struct nvkm_i2c_port *next;
+ const struct nvkm_i2c_pad_func *func;
+ struct nvkm_i2c *i2c;
+#define NVKM_I2C_PAD_HYBRID(n) /* 'n' is hw pad index */ (n)
+#define NVKM_I2C_PAD_CCB(n) /* 'n' is ccb index */ ((n) + 0x100)
+#define NVKM_I2C_PAD_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x200)
+ int id;
+
+ enum nvkm_i2c_pad_mode {
+ NVKM_I2C_PAD_OFF,
+ NVKM_I2C_PAD_I2C,
+ NVKM_I2C_PAD_AUX,
+ } mode;
+ struct mutex mutex;
+ struct list_head head;
+};
+
+struct nvkm_i2c_pad_func {
+ int (*bus_new_0)(struct nvkm_i2c_pad *, int id, u8 drive, u8 sense,
+ struct nvkm_i2c_bus **);
+ int (*bus_new_4)(struct nvkm_i2c_pad *, int id, u8 drive,
+ struct nvkm_i2c_bus **);
+
+ int (*aux_new_6)(struct nvkm_i2c_pad *, int id, u8 drive,
+ struct nvkm_i2c_aux **);
+
+ void (*mode)(struct nvkm_i2c_pad *, enum nvkm_i2c_pad_mode);
};
-static inline struct nvkm_i2c_pad *
-nvkm_i2c_pad(struct nvkm_i2c_port *port)
-{
- struct nvkm_object *pad = nv_object(port);
- while (!nv_iclass(pad->parent, NV_SUBDEV_CLASS))
- pad = pad->parent;
- return (void *)pad;
-}
-
-#define nvkm_i2c_pad_create(p,e,o,i,d) \
- nvkm_i2c_pad_create_((p), (e), (o), (i), sizeof(**d), (void **)d)
-#define nvkm_i2c_pad_destroy(p) ({ \
- struct nvkm_i2c_pad *_p = (p); \
- _nvkm_i2c_pad_dtor(nv_object(_p)); \
-})
-#define nvkm_i2c_pad_init(p) ({ \
- struct nvkm_i2c_pad *_p = (p); \
- _nvkm_i2c_pad_init(nv_object(_p)); \
-})
-#define nvkm_i2c_pad_fini(p,s) ({ \
- struct nvkm_i2c_pad *_p = (p); \
- _nvkm_i2c_pad_fini(nv_object(_p), (s)); \
-})
-
-int nvkm_i2c_pad_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, int index, int, void **);
-
-int _nvkm_i2c_pad_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-#define _nvkm_i2c_pad_dtor nvkm_object_destroy
-int _nvkm_i2c_pad_init(struct nvkm_object *);
-int _nvkm_i2c_pad_fini(struct nvkm_object *, bool);
-
-#ifndef MSG
-#define MSG(l,f,a...) do { \
- struct nvkm_i2c_pad *_pad = (void *)pad; \
- nv_##l(_pad, "PAD:%c:%02x: "f, \
- _pad->index >= 0x100 ? 'X' : 'S', \
- _pad->index >= 0x100 ? _pad->index - 0x100 : _pad->index, ##a); \
+void nvkm_i2c_pad_ctor(const struct nvkm_i2c_pad_func *, struct nvkm_i2c *,
+ int id, struct nvkm_i2c_pad *);
+int nvkm_i2c_pad_new_(const struct nvkm_i2c_pad_func *, struct nvkm_i2c *,
+ int id, struct nvkm_i2c_pad **);
+void nvkm_i2c_pad_del(struct nvkm_i2c_pad **);
+void nvkm_i2c_pad_init(struct nvkm_i2c_pad *);
+void nvkm_i2c_pad_fini(struct nvkm_i2c_pad *);
+void nvkm_i2c_pad_mode(struct nvkm_i2c_pad *, enum nvkm_i2c_pad_mode);
+int nvkm_i2c_pad_acquire(struct nvkm_i2c_pad *, enum nvkm_i2c_pad_mode);
+void nvkm_i2c_pad_release(struct nvkm_i2c_pad *);
+
+void g94_i2c_pad_mode(struct nvkm_i2c_pad *, enum nvkm_i2c_pad_mode);
+
+int nv04_i2c_pad_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int nv4e_i2c_pad_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int nv50_i2c_pad_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int g94_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int gf119_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int gm204_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+
+int g94_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int gf119_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int gm204_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+
+int anx9805_pad_new(struct nvkm_i2c_bus *, int, u8, struct nvkm_i2c_pad **);
+
+#define PAD_MSG(p,l,f,a...) do { \
+ struct nvkm_i2c_pad *_pad = (p); \
+ nvkm_##l(&_pad->i2c->subdev, "pad %04x: "f"\n", _pad->id, ##a); \
} while(0)
-#define DBG(f,a...) MSG(debug, f, ##a)
-#define ERR(f,a...) MSG(error, f, ##a)
-#endif
+#define PAD_ERR(p,f,a...) PAD_MSG((p), error, f, ##a)
+#define PAD_DBG(p,f,a...) PAD_MSG((p), debug, f, ##a)
+#define PAD_TRACE(p,f,a...) PAD_MSG((p), trace, f, ##a)
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c
index e9832f7a7e38..5904bc5f2d2a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c
@@ -22,64 +22,55 @@
* Authors: Ben Skeggs
*/
#include "pad.h"
+#include "aux.h"
+#include "bus.h"
-struct g94_i2c_pad {
- struct nvkm_i2c_pad base;
- int addr;
-};
-
-static int
-g94_i2c_pad_fini(struct nvkm_object *object, bool suspend)
-{
- struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
- struct g94_i2c_pad *pad = (void *)object;
- nv_mask(i2c, 0x00e50c + pad->addr, 0x00000001, 0x00000001);
- return nvkm_i2c_pad_fini(&pad->base, suspend);
-}
-
-static int
-g94_i2c_pad_init(struct nvkm_object *object)
+void
+g94_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
{
- struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
- struct g94_i2c_pad *pad = (void *)object;
+ struct nvkm_subdev *subdev = &pad->i2c->subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 base = (pad->id - NVKM_I2C_PAD_HYBRID(0)) * 0x50;
- switch (nv_oclass(pad->base.next)->handle) {
- case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX):
- nv_mask(i2c, 0x00e500 + pad->addr, 0x0000c003, 0x00000002);
+ switch (mode) {
+ case NVKM_I2C_PAD_OFF:
+ nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000001);
+ break;
+ case NVKM_I2C_PAD_I2C:
+ nvkm_mask(device, 0x00e500 + base, 0x0000c003, 0x0000c001);
+ nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000000);
+ break;
+ case NVKM_I2C_PAD_AUX:
+ nvkm_mask(device, 0x00e500 + base, 0x0000c003, 0x00000002);
+ nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000000);
break;
- case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT):
default:
- nv_mask(i2c, 0x00e500 + pad->addr, 0x0000c003, 0x0000c001);
+ WARN_ON(1);
break;
}
-
- nv_mask(i2c, 0x00e50c + pad->addr, 0x00000001, 0x00000000);
- return nvkm_i2c_pad_init(&pad->base);
}
-static int
-g94_i2c_pad_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 index,
- struct nvkm_object **pobject)
-{
- struct g94_i2c_pad *pad;
- int ret;
-
- ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
- *pobject = nv_object(pad);
- if (ret)
- return ret;
+static const struct nvkm_i2c_pad_func
+g94_i2c_pad_s_func = {
+ .bus_new_4 = nv50_i2c_bus_new,
+ .aux_new_6 = g94_i2c_aux_new,
+ .mode = g94_i2c_pad_mode,
+};
- pad->addr = index * 0x50;;
- return 0;
+int
+g94_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+ return nvkm_i2c_pad_new_(&g94_i2c_pad_s_func, i2c, id, ppad);
}
-struct nvkm_oclass
-g94_i2c_pad_oclass = {
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = g94_i2c_pad_ctor,
- .dtor = _nvkm_i2c_pad_dtor,
- .init = g94_i2c_pad_init,
- .fini = g94_i2c_pad_fini,
- },
+static const struct nvkm_i2c_pad_func
+g94_i2c_pad_x_func = {
+ .bus_new_4 = nv50_i2c_bus_new,
+ .aux_new_6 = g94_i2c_aux_new,
};
+
+int
+g94_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+ return nvkm_i2c_pad_new_(&g94_i2c_pad_x_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
new file mode 100644
index 000000000000..d53212f1aa52
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "pad.h"
+#include "aux.h"
+#include "bus.h"
+
+static const struct nvkm_i2c_pad_func
+gf119_i2c_pad_s_func = {
+ .bus_new_4 = gf119_i2c_bus_new,
+ .aux_new_6 = g94_i2c_aux_new,
+ .mode = g94_i2c_pad_mode,
+};
+
+int
+gf119_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+ return nvkm_i2c_pad_new_(&gf119_i2c_pad_s_func, i2c, id, ppad);
+}
+
+static const struct nvkm_i2c_pad_func
+gf119_i2c_pad_x_func = {
+ .bus_new_4 = gf119_i2c_bus_new,
+ .aux_new_6 = g94_i2c_aux_new,
+};
+
+int
+gf119_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+ return nvkm_i2c_pad_new_(&gf119_i2c_pad_x_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c
index be590405444d..24a4d760c67b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c
@@ -22,64 +22,55 @@
* Authors: Ben Skeggs
*/
#include "pad.h"
+#include "aux.h"
+#include "bus.h"
-struct gm204_i2c_pad {
- struct nvkm_i2c_pad base;
- int addr;
-};
-
-static int
-gm204_i2c_pad_fini(struct nvkm_object *object, bool suspend)
-{
- struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
- struct gm204_i2c_pad *pad = (void *)object;
- nv_mask(i2c, 0x00d97c + pad->addr, 0x00000001, 0x00000001);
- return nvkm_i2c_pad_fini(&pad->base, suspend);
-}
-
-static int
-gm204_i2c_pad_init(struct nvkm_object *object)
+static void
+gm204_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
{
- struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
- struct gm204_i2c_pad *pad = (void *)object;
+ struct nvkm_subdev *subdev = &pad->i2c->subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 base = (pad->id - NVKM_I2C_PAD_HYBRID(0)) * 0x50;
- switch (nv_oclass(pad->base.next)->handle) {
- case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX):
- nv_mask(i2c, 0x00d970 + pad->addr, 0x0000c003, 0x00000002);
+ switch (mode) {
+ case NVKM_I2C_PAD_OFF:
+ nvkm_mask(device, 0x00d97c + base, 0x00000001, 0x00000001);
+ break;
+ case NVKM_I2C_PAD_I2C:
+ nvkm_mask(device, 0x00d970 + base, 0x0000c003, 0x0000c001);
+ nvkm_mask(device, 0x00d97c + base, 0x00000001, 0x00000000);
+ break;
+ case NVKM_I2C_PAD_AUX:
+ nvkm_mask(device, 0x00d970 + base, 0x0000c003, 0x00000002);
+ nvkm_mask(device, 0x00d97c + base, 0x00000001, 0x00000000);
break;
- case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT):
default:
- nv_mask(i2c, 0x00d970 + pad->addr, 0x0000c003, 0x0000c001);
+ WARN_ON(1);
break;
}
-
- nv_mask(i2c, 0x00d97c + pad->addr, 0x00000001, 0x00000000);
- return nvkm_i2c_pad_init(&pad->base);
}
-static int
-gm204_i2c_pad_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 index,
- struct nvkm_object **pobject)
-{
- struct gm204_i2c_pad *pad;
- int ret;
-
- ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
- *pobject = nv_object(pad);
- if (ret)
- return ret;
+static const struct nvkm_i2c_pad_func
+gm204_i2c_pad_s_func = {
+ .bus_new_4 = gf119_i2c_bus_new,
+ .aux_new_6 = gm204_i2c_aux_new,
+ .mode = gm204_i2c_pad_mode,
+};
- pad->addr = index * 0x50;;
- return 0;
+int
+gm204_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+ return nvkm_i2c_pad_new_(&gm204_i2c_pad_s_func, i2c, id, ppad);
}
-struct nvkm_oclass
-gm204_i2c_pad_oclass = {
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gm204_i2c_pad_ctor,
- .dtor = _nvkm_i2c_pad_dtor,
- .init = gm204_i2c_pad_init,
- .fini = gm204_i2c_pad_fini,
- },
+static const struct nvkm_i2c_pad_func
+gm204_i2c_pad_x_func = {
+ .bus_new_4 = gf119_i2c_bus_new,
+ .aux_new_6 = gm204_i2c_aux_new,
};
+
+int
+gm204_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+ return nvkm_i2c_pad_new_(&gm204_i2c_pad_x_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c
index 22c7daaad3a0..310046ad9c61 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c
@@ -22,13 +22,15 @@
* Authors: Ben Skeggs
*/
#include "pad.h"
+#include "bus.h"
-struct nvkm_oclass
-nv04_i2c_pad_oclass = {
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_i2c_pad_ctor,
- .dtor = _nvkm_i2c_pad_dtor,
- .init = _nvkm_i2c_pad_init,
- .fini = _nvkm_i2c_pad_fini,
- },
+static const struct nvkm_i2c_pad_func
+nv04_i2c_pad_func = {
+ .bus_new_0 = nv04_i2c_bus_new,
};
+
+int
+nv04_i2c_pad_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+ return nvkm_i2c_pad_new_(&nv04_i2c_pad_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c
new file mode 100644
index 000000000000..dda6fc0b089d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "pad.h"
+#include "bus.h"
+
+static const struct nvkm_i2c_pad_func
+nv4e_i2c_pad_func = {
+ .bus_new_4 = nv4e_i2c_bus_new,
+};
+
+int
+nv4e_i2c_pad_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+ return nvkm_i2c_pad_new_(&nv4e_i2c_pad_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c
new file mode 100644
index 000000000000..a03f25b1914f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "pad.h"
+#include "bus.h"
+
+static const struct nvkm_i2c_pad_func
+nv50_i2c_pad_func = {
+ .bus_new_4 = nv50_i2c_bus_new,
+};
+
+int
+nv50_i2c_pad_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+ return nvkm_i2c_pad_new_(&nv50_i2c_pad_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h
deleted file mode 100644
index 586f53dad813..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __NVKM_I2C_PORT_H__
-#define __NVKM_I2C_PORT_H__
-#include "priv.h"
-
-#ifndef MSG
-#define MSG(l,f,a...) do { \
- struct nvkm_i2c_port *_port = (void *)port; \
- nv_##l(_port, "PORT:%02x: "f, _port->index, ##a); \
-} while(0)
-#define DBG(f,a...) MSG(debug, f, ##a)
-#define ERR(f,a...) MSG(error, f, ##a)
-#endif
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
index 6586e1567fcf..bf655a66ef40 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
@@ -1,69 +1,14 @@
#ifndef __NVKM_I2C_PRIV_H__
#define __NVKM_I2C_PRIV_H__
+#define nvkm_i2c(p) container_of((p), struct nvkm_i2c, subdev)
#include <subdev/i2c.h>
-extern struct nvkm_oclass nv04_i2c_pad_oclass;
-extern struct nvkm_oclass g94_i2c_pad_oclass;
-extern struct nvkm_oclass gm204_i2c_pad_oclass;
+int nvkm_i2c_new_(const struct nvkm_i2c_func *, struct nvkm_device *,
+ int index, struct nvkm_i2c **);
-#define nvkm_i2c_port_create(p,e,o,i,a,f,d) \
- nvkm_i2c_port_create_((p), (e), (o), (i), (a), (f), \
- sizeof(**d), (void **)d)
-#define nvkm_i2c_port_destroy(p) ({ \
- struct nvkm_i2c_port *port = (p); \
- _nvkm_i2c_port_dtor(nv_object(i2c)); \
-})
-#define nvkm_i2c_port_init(p) \
- nvkm_object_init(&(p)->base)
-#define nvkm_i2c_port_fini(p,s) \
- nvkm_object_fini(&(p)->base, (s))
-
-int nvkm_i2c_port_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, u8,
- const struct i2c_algorithm *,
- const struct nvkm_i2c_func *,
- int, void **);
-void _nvkm_i2c_port_dtor(struct nvkm_object *);
-#define _nvkm_i2c_port_init nvkm_object_init
-int _nvkm_i2c_port_fini(struct nvkm_object *, bool);
-
-#define nvkm_i2c_create(p,e,o,d) \
- nvkm_i2c_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_i2c_destroy(p) ({ \
- struct nvkm_i2c *i2c = (p); \
- _nvkm_i2c_dtor(nv_object(i2c)); \
-})
-#define nvkm_i2c_init(p) ({ \
- struct nvkm_i2c *i2c = (p); \
- _nvkm_i2c_init(nv_object(i2c)); \
-})
-#define nvkm_i2c_fini(p,s) ({ \
- struct nvkm_i2c *i2c = (p); \
- _nvkm_i2c_fini(nv_object(i2c), (s)); \
-})
-
-int nvkm_i2c_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, int, void **);
-int _nvkm_i2c_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-void _nvkm_i2c_dtor(struct nvkm_object *);
-int _nvkm_i2c_init(struct nvkm_object *);
-int _nvkm_i2c_fini(struct nvkm_object *, bool);
-
-extern struct nvkm_oclass nvkm_anx9805_sclass[];
-extern struct nvkm_oclass gf110_i2c_sclass[];
-
-extern const struct i2c_algorithm nvkm_i2c_bit_algo;
-extern const struct i2c_algorithm nvkm_i2c_aux_algo;
-
-struct nvkm_i2c_impl {
- struct nvkm_oclass base;
-
- /* supported i2c port classes */
- struct nvkm_oclass *sclass;
- struct nvkm_oclass *pad_x;
- struct nvkm_oclass *pad_s;
+struct nvkm_i2c_func {
+ int (*pad_x_new)(struct nvkm_i2c *, int id, struct nvkm_i2c_pad **);
+ int (*pad_s_new)(struct nvkm_i2c *, int id, struct nvkm_i2c_pad **);
/* number of native dp aux channels present */
int aux;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
index 8e578f802f66..37a0496f7ed1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
@@ -23,55 +23,54 @@
*/
#include <subdev/ibus.h>
-struct gf100_ibus_priv {
- struct nvkm_ibus base;
-};
-
static void
-gf100_ibus_intr_hub(struct gf100_ibus_priv *priv, int i)
+gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
{
- u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0400));
- u32 data = nv_rd32(priv, 0x122124 + (i * 0x0400));
- u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0400));
- nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
- nv_mask(priv, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
+ struct nvkm_device *device = ibus->device;
+ u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0400));
+ u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
+ u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
+ nvkm_error(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
+ nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
}
static void
-gf100_ibus_intr_rop(struct gf100_ibus_priv *priv, int i)
+gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
{
- u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0400));
- u32 data = nv_rd32(priv, 0x124124 + (i * 0x0400));
- u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0400));
- nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
- nv_mask(priv, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
+ struct nvkm_device *device = ibus->device;
+ u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0400));
+ u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
+ u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
+ nvkm_error(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
+ nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
}
static void
-gf100_ibus_intr_gpc(struct gf100_ibus_priv *priv, int i)
+gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
{
- u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0400));
- u32 data = nv_rd32(priv, 0x128124 + (i * 0x0400));
- u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0400));
- nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
- nv_mask(priv, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
+ struct nvkm_device *device = ibus->device;
+ u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0400));
+ u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
+ u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
+ nvkm_error(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
+ nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
}
static void
-gf100_ibus_intr(struct nvkm_subdev *subdev)
+gf100_ibus_intr(struct nvkm_subdev *ibus)
{
- struct gf100_ibus_priv *priv = (void *)subdev;
- u32 intr0 = nv_rd32(priv, 0x121c58);
- u32 intr1 = nv_rd32(priv, 0x121c5c);
- u32 hubnr = nv_rd32(priv, 0x121c70);
- u32 ropnr = nv_rd32(priv, 0x121c74);
- u32 gpcnr = nv_rd32(priv, 0x121c78);
+ struct nvkm_device *device = ibus->device;
+ u32 intr0 = nvkm_rd32(device, 0x121c58);
+ u32 intr1 = nvkm_rd32(device, 0x121c5c);
+ u32 hubnr = nvkm_rd32(device, 0x121c70);
+ u32 ropnr = nvkm_rd32(device, 0x121c74);
+ u32 gpcnr = nvkm_rd32(device, 0x121c78);
u32 i;
for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
u32 stat = 0x00000100 << i;
if (intr0 & stat) {
- gf100_ibus_intr_hub(priv, i);
+ gf100_ibus_intr_hub(ibus, i);
intr0 &= ~stat;
}
}
@@ -79,7 +78,7 @@ gf100_ibus_intr(struct nvkm_subdev *subdev)
for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
u32 stat = 0x00010000 << i;
if (intr0 & stat) {
- gf100_ibus_intr_rop(priv, i);
+ gf100_ibus_intr_rop(ibus, i);
intr0 &= ~stat;
}
}
@@ -87,36 +86,24 @@ gf100_ibus_intr(struct nvkm_subdev *subdev)
for (i = 0; intr1 && i < gpcnr; i++) {
u32 stat = 0x00000001 << i;
if (intr1 & stat) {
- gf100_ibus_intr_gpc(priv, i);
+ gf100_ibus_intr_gpc(ibus, i);
intr1 &= ~stat;
}
}
}
-static int
-gf100_ibus_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gf100_ibus_priv *priv;
- int ret;
-
- ret = nvkm_ibus_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static const struct nvkm_subdev_func
+gf100_ibus = {
+ .intr = gf100_ibus_intr,
+};
- nv_subdev(priv)->intr = gf100_ibus_intr;
+int
+gf100_ibus_new(struct nvkm_device *device, int index,
+ struct nvkm_subdev **pibus)
+{
+ struct nvkm_subdev *ibus;
+ if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_subdev_ctor(&gf100_ibus, device, index, 0, ibus);
return 0;
}
-
-struct nvkm_oclass
-gf100_ibus_oclass = {
- .handle = NV_SUBDEV(IBUS, 0xc0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_ibus_ctor,
- .dtor = _nvkm_ibus_dtor,
- .init = _nvkm_ibus_init,
- .fini = _nvkm_ibus_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
index 7b6e9a6cd7b2..ba33609f643c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
@@ -23,55 +23,54 @@
*/
#include <subdev/ibus.h>
-struct gk104_ibus_priv {
- struct nvkm_ibus base;
-};
-
static void
-gk104_ibus_intr_hub(struct gk104_ibus_priv *priv, int i)
+gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
{
- u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0800));
- u32 data = nv_rd32(priv, 0x122124 + (i * 0x0800));
- u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0800));
- nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
- nv_mask(priv, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
+ struct nvkm_device *device = ibus->device;
+ u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0800));
+ u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
+ u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
+ nvkm_error(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
+ nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
}
static void
-gk104_ibus_intr_rop(struct gk104_ibus_priv *priv, int i)
+gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
{
- u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0800));
- u32 data = nv_rd32(priv, 0x124124 + (i * 0x0800));
- u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0800));
- nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
- nv_mask(priv, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
+ struct nvkm_device *device = ibus->device;
+ u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0800));
+ u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
+ u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
+ nvkm_error(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
+ nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
}
static void
-gk104_ibus_intr_gpc(struct gk104_ibus_priv *priv, int i)
+gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
{
- u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0800));
- u32 data = nv_rd32(priv, 0x128124 + (i * 0x0800));
- u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0800));
- nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
- nv_mask(priv, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
+ struct nvkm_device *device = ibus->device;
+ u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0800));
+ u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
+ u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
+ nvkm_error(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
+ nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
}
static void
-gk104_ibus_intr(struct nvkm_subdev *subdev)
+gk104_ibus_intr(struct nvkm_subdev *ibus)
{
- struct gk104_ibus_priv *priv = (void *)subdev;
- u32 intr0 = nv_rd32(priv, 0x120058);
- u32 intr1 = nv_rd32(priv, 0x12005c);
- u32 hubnr = nv_rd32(priv, 0x120070);
- u32 ropnr = nv_rd32(priv, 0x120074);
- u32 gpcnr = nv_rd32(priv, 0x120078);
+ struct nvkm_device *device = ibus->device;
+ u32 intr0 = nvkm_rd32(device, 0x120058);
+ u32 intr1 = nvkm_rd32(device, 0x12005c);
+ u32 hubnr = nvkm_rd32(device, 0x120070);
+ u32 ropnr = nvkm_rd32(device, 0x120074);
+ u32 gpcnr = nvkm_rd32(device, 0x120078);
u32 i;
for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
u32 stat = 0x00000100 << i;
if (intr0 & stat) {
- gk104_ibus_intr_hub(priv, i);
+ gk104_ibus_intr_hub(ibus, i);
intr0 &= ~stat;
}
}
@@ -79,7 +78,7 @@ gk104_ibus_intr(struct nvkm_subdev *subdev)
for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
u32 stat = 0x00010000 << i;
if (intr0 & stat) {
- gk104_ibus_intr_rop(priv, i);
+ gk104_ibus_intr_rop(ibus, i);
intr0 &= ~stat;
}
}
@@ -87,53 +86,40 @@ gk104_ibus_intr(struct nvkm_subdev *subdev)
for (i = 0; intr1 && i < gpcnr; i++) {
u32 stat = 0x00000001 << i;
if (intr1 & stat) {
- gk104_ibus_intr_gpc(priv, i);
+ gk104_ibus_intr_gpc(ibus, i);
intr1 &= ~stat;
}
}
}
static int
-gk104_ibus_init(struct nvkm_object *object)
+gk104_ibus_init(struct nvkm_subdev *ibus)
{
- struct gk104_ibus_priv *priv = (void *)object;
- int ret = nvkm_ibus_init(&priv->base);
- if (ret == 0) {
- nv_mask(priv, 0x122318, 0x0003ffff, 0x00001000);
- nv_mask(priv, 0x12231c, 0x0003ffff, 0x00000200);
- nv_mask(priv, 0x122310, 0x0003ffff, 0x00000800);
- nv_mask(priv, 0x122348, 0x0003ffff, 0x00000100);
- nv_mask(priv, 0x1223b0, 0x0003ffff, 0x00000fff);
- nv_mask(priv, 0x122348, 0x0003ffff, 0x00000200);
- nv_mask(priv, 0x122358, 0x0003ffff, 0x00002880);
- }
- return ret;
+ struct nvkm_device *device = ibus->device;
+ nvkm_mask(device, 0x122318, 0x0003ffff, 0x00001000);
+ nvkm_mask(device, 0x12231c, 0x0003ffff, 0x00000200);
+ nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800);
+ nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000100);
+ nvkm_mask(device, 0x1223b0, 0x0003ffff, 0x00000fff);
+ nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000200);
+ nvkm_mask(device, 0x122358, 0x0003ffff, 0x00002880);
+ return 0;
}
-static int
-gk104_ibus_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gk104_ibus_priv *priv;
- int ret;
-
- ret = nvkm_ibus_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static const struct nvkm_subdev_func
+gk104_ibus = {
+ .preinit = gk104_ibus_init,
+ .init = gk104_ibus_init,
+ .intr = gk104_ibus_intr,
+};
- nv_subdev(priv)->intr = gk104_ibus_intr;
+int
+gk104_ibus_new(struct nvkm_device *device, int index,
+ struct nvkm_subdev **pibus)
+{
+ struct nvkm_subdev *ibus;
+ if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_subdev_ctor(&gk104_ibus, device, index, 0, ibus);
return 0;
}
-
-struct nvkm_oclass
-gk104_ibus_oclass = {
- .handle = NV_SUBDEV(IBUS, 0xe0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk104_ibus_ctor,
- .dtor = _nvkm_ibus_dtor,
- .init = gk104_ibus_init,
- .fini = _nvkm_ibus_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index c0fdb89e74ac..3484079e885a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -22,81 +22,68 @@
#include <subdev/ibus.h>
#include <subdev/timer.h>
-struct gk20a_ibus_priv {
- struct nvkm_ibus base;
-};
-
static void
-gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv)
+gk20a_ibus_init_ibus_ring(struct nvkm_subdev *ibus)
{
- nv_mask(priv, 0x137250, 0x3f, 0);
+ struct nvkm_device *device = ibus->device;
+ nvkm_mask(device, 0x137250, 0x3f, 0);
- nv_mask(priv, 0x000200, 0x20, 0);
+ nvkm_mask(device, 0x000200, 0x20, 0);
usleep_range(20, 30);
- nv_mask(priv, 0x000200, 0x20, 0x20);
-
- nv_wr32(priv, 0x12004c, 0x4);
- nv_wr32(priv, 0x122204, 0x2);
- nv_rd32(priv, 0x122204);
+ nvkm_mask(device, 0x000200, 0x20, 0x20);
+
+ nvkm_wr32(device, 0x12004c, 0x4);
+ nvkm_wr32(device, 0x122204, 0x2);
+ nvkm_rd32(device, 0x122204);
+
+ /*
+ * Bug: increase clock timeout to avoid operation failure at high
+ * gpcclk rate.
+ */
+ nvkm_wr32(device, 0x122354, 0x800);
+ nvkm_wr32(device, 0x128328, 0x800);
+ nvkm_wr32(device, 0x124320, 0x800);
}
static void
-gk20a_ibus_intr(struct nvkm_subdev *subdev)
+gk20a_ibus_intr(struct nvkm_subdev *ibus)
{
- struct gk20a_ibus_priv *priv = (void *)subdev;
- u32 status0 = nv_rd32(priv, 0x120058);
+ struct nvkm_device *device = ibus->device;
+ u32 status0 = nvkm_rd32(device, 0x120058);
if (status0 & 0x7) {
- nv_debug(priv, "resetting priv ring\n");
- gk20a_ibus_init_priv_ring(priv);
+ nvkm_debug(ibus, "resetting ibus ring\n");
+ gk20a_ibus_init_ibus_ring(ibus);
}
/* Acknowledge interrupt */
- nv_mask(priv, 0x12004c, 0x2, 0x2);
-
- if (!nv_wait(subdev, 0x12004c, 0x3f, 0x00))
- nv_warn(priv, "timeout waiting for ringmaster ack\n");
+ nvkm_mask(device, 0x12004c, 0x2, 0x2);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f))
+ break;
+ );
}
static int
-gk20a_ibus_init(struct nvkm_object *object)
+gk20a_ibus_init(struct nvkm_subdev *ibus)
{
- struct gk20a_ibus_priv *priv = (void *)object;
- int ret;
-
- ret = _nvkm_ibus_init(object);
- if (ret)
- return ret;
-
- gk20a_ibus_init_priv_ring(priv);
-
+ gk20a_ibus_init_ibus_ring(ibus);
return 0;
}
-static int
-gk20a_ibus_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gk20a_ibus_priv *priv;
- int ret;
-
- ret = nvkm_ibus_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static const struct nvkm_subdev_func
+gk20a_ibus = {
+ .init = gk20a_ibus_init,
+ .intr = gk20a_ibus_intr,
+};
- nv_subdev(priv)->intr = gk20a_ibus_intr;
+int
+gk20a_ibus_new(struct nvkm_device *device, int index,
+ struct nvkm_subdev **pibus)
+{
+ struct nvkm_subdev *ibus;
+ if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_subdev_ctor(&gk20a_ibus, device, index, 0, ibus);
return 0;
}
-
-struct nvkm_oclass
-gk20a_ibus_oclass = {
- .handle = NV_SUBDEV(IBUS, 0xea),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk20a_ibus_ctor,
- .dtor = _nvkm_ibus_dtor,
- .init = gk20a_ibus_init,
- .fini = _nvkm_ibus_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index d16358cc6cbb..895ba74057d4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -23,124 +23,291 @@
*/
#include "priv.h"
-#include <core/engine.h>
+#include <core/memory.h>
+#include <subdev/bar.h>
/******************************************************************************
* instmem object base implementation
*****************************************************************************/
+#define nvkm_instobj(p) container_of((p), struct nvkm_instobj, memory)
-void
-_nvkm_instobj_dtor(struct nvkm_object *object)
+struct nvkm_instobj {
+ struct nvkm_memory memory;
+ struct nvkm_memory *parent;
+ struct nvkm_instmem *imem;
+ struct list_head head;
+ u32 *suspend;
+ void __iomem *map;
+};
+
+static enum nvkm_memory_target
+nvkm_instobj_target(struct nvkm_memory *memory)
+{
+ memory = nvkm_instobj(memory)->parent;
+ return nvkm_memory_target(memory);
+}
+
+static u64
+nvkm_instobj_addr(struct nvkm_memory *memory)
+{
+ memory = nvkm_instobj(memory)->parent;
+ return nvkm_memory_addr(memory);
+}
+
+static u64
+nvkm_instobj_size(struct nvkm_memory *memory)
+{
+ memory = nvkm_instobj(memory)->parent;
+ return nvkm_memory_size(memory);
+}
+
+static void
+nvkm_instobj_release(struct nvkm_memory *memory)
+{
+ struct nvkm_instobj *iobj = nvkm_instobj(memory);
+ nvkm_bar_flush(iobj->imem->subdev.device->bar);
+}
+
+static void __iomem *
+nvkm_instobj_acquire(struct nvkm_memory *memory)
+{
+ return nvkm_instobj(memory)->map;
+}
+
+static u32
+nvkm_instobj_rd32(struct nvkm_memory *memory, u64 offset)
+{
+ return ioread32_native(nvkm_instobj(memory)->map + offset);
+}
+
+static void
+nvkm_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
{
- struct nvkm_instmem *imem = nvkm_instmem(object);
- struct nvkm_instobj *iobj = (void *)object;
+ iowrite32_native(data, nvkm_instobj(memory)->map + offset);
+}
+
+static void
+nvkm_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
+{
+ memory = nvkm_instobj(memory)->parent;
+ nvkm_memory_map(memory, vma, offset);
+}
- mutex_lock(&nv_subdev(imem)->mutex);
+static void *
+nvkm_instobj_dtor(struct nvkm_memory *memory)
+{
+ struct nvkm_instobj *iobj = nvkm_instobj(memory);
list_del(&iobj->head);
- mutex_unlock(&nv_subdev(imem)->mutex);
+ nvkm_memory_del(&iobj->parent);
+ return iobj;
+}
+
+const struct nvkm_memory_func
+nvkm_instobj_func = {
+ .dtor = nvkm_instobj_dtor,
+ .target = nvkm_instobj_target,
+ .addr = nvkm_instobj_addr,
+ .size = nvkm_instobj_size,
+ .acquire = nvkm_instobj_acquire,
+ .release = nvkm_instobj_release,
+ .rd32 = nvkm_instobj_rd32,
+ .wr32 = nvkm_instobj_wr32,
+ .map = nvkm_instobj_map,
+};
+
+static void
+nvkm_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
+{
+ memory = nvkm_instobj(memory)->parent;
+ nvkm_memory_boot(memory, vm);
+}
+
+static void
+nvkm_instobj_release_slow(struct nvkm_memory *memory)
+{
+ struct nvkm_instobj *iobj = nvkm_instobj(memory);
+ nvkm_instobj_release(memory);
+ nvkm_done(iobj->parent);
+}
+
+static void __iomem *
+nvkm_instobj_acquire_slow(struct nvkm_memory *memory)
+{
+ struct nvkm_instobj *iobj = nvkm_instobj(memory);
+ iobj->map = nvkm_kmap(iobj->parent);
+ if (iobj->map)
+ memory->func = &nvkm_instobj_func;
+ return iobj->map;
+}
+
+static u32
+nvkm_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
+{
+ struct nvkm_instobj *iobj = nvkm_instobj(memory);
+ return nvkm_ro32(iobj->parent, offset);
+}
- return nvkm_object_destroy(&iobj->base);
+static void
+nvkm_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
+{
+ struct nvkm_instobj *iobj = nvkm_instobj(memory);
+ return nvkm_wo32(iobj->parent, offset, data);
}
+const struct nvkm_memory_func
+nvkm_instobj_func_slow = {
+ .dtor = nvkm_instobj_dtor,
+ .target = nvkm_instobj_target,
+ .addr = nvkm_instobj_addr,
+ .size = nvkm_instobj_size,
+ .boot = nvkm_instobj_boot,
+ .acquire = nvkm_instobj_acquire_slow,
+ .release = nvkm_instobj_release_slow,
+ .rd32 = nvkm_instobj_rd32_slow,
+ .wr32 = nvkm_instobj_wr32_slow,
+ .map = nvkm_instobj_map,
+};
+
int
-nvkm_instobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
+ struct nvkm_memory **pmemory)
{
- struct nvkm_instmem *imem = nvkm_instmem(parent);
+ struct nvkm_memory *memory = NULL;
struct nvkm_instobj *iobj;
+ u32 offset;
int ret;
- ret = nvkm_object_create_(parent, engine, oclass, NV_MEMOBJ_CLASS,
- length, pobject);
- iobj = *pobject;
+ ret = imem->func->memory_new(imem, size, align, zero, &memory);
if (ret)
- return ret;
+ goto done;
- mutex_lock(&imem->base.mutex);
- list_add(&iobj->head, &imem->list);
- mutex_unlock(&imem->base.mutex);
- return 0;
+ if (!imem->func->persistent) {
+ if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL))) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory);
+ iobj->parent = memory;
+ iobj->imem = imem;
+ list_add_tail(&iobj->head, &imem->list);
+ memory = &iobj->memory;
+ }
+
+ if (!imem->func->zero && zero) {
+ void __iomem *map = nvkm_kmap(memory);
+ if (unlikely(!map)) {
+ for (offset = 0; offset < size; offset += 4)
+ nvkm_wo32(memory, offset, 0x00000000);
+ } else {
+ memset_io(map, 0x00, size);
+ }
+ nvkm_done(memory);
+ }
+
+done:
+ if (ret)
+ nvkm_memory_del(&memory);
+ *pmemory = memory;
+ return ret;
}
/******************************************************************************
* instmem subdev base implementation
*****************************************************************************/
-static int
-nvkm_instmem_alloc(struct nvkm_instmem *imem, struct nvkm_object *parent,
- u32 size, u32 align, struct nvkm_object **pobject)
+u32
+nvkm_instmem_rd32(struct nvkm_instmem *imem, u32 addr)
{
- struct nvkm_instmem_impl *impl = (void *)imem->base.object.oclass;
- struct nvkm_instobj_args args = { .size = size, .align = align };
- return nvkm_object_ctor(parent, &parent->engine->subdev.object,
- impl->instobj, &args, sizeof(args), pobject);
+ return imem->func->rd32(imem, addr);
}
-int
-_nvkm_instmem_fini(struct nvkm_object *object, bool suspend)
+void
+nvkm_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
+{
+ return imem->func->wr32(imem, addr, data);
+}
+
+static int
+nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
{
- struct nvkm_instmem *imem = (void *)object;
+ struct nvkm_instmem *imem = nvkm_instmem(subdev);
struct nvkm_instobj *iobj;
- int i, ret = 0;
+ int i;
+
+ if (imem->func->fini)
+ imem->func->fini(imem);
if (suspend) {
- mutex_lock(&imem->base.mutex);
list_for_each_entry(iobj, &imem->list, head) {
- iobj->suspend = vmalloc(iobj->size);
- if (!iobj->suspend) {
- ret = -ENOMEM;
- break;
- }
-
- for (i = 0; i < iobj->size; i += 4)
- iobj->suspend[i / 4] = nv_ro32(iobj, i);
+ struct nvkm_memory *memory = iobj->parent;
+ u64 size = nvkm_memory_size(memory);
+
+ iobj->suspend = vmalloc(size);
+ if (!iobj->suspend)
+ return -ENOMEM;
+
+ for (i = 0; i < size; i += 4)
+ iobj->suspend[i / 4] = nvkm_ro32(memory, i);
}
- mutex_unlock(&imem->base.mutex);
- if (ret)
- return ret;
}
- return nvkm_subdev_fini(&imem->base, suspend);
+ return 0;
}
-int
-_nvkm_instmem_init(struct nvkm_object *object)
+static int
+nvkm_instmem_oneinit(struct nvkm_subdev *subdev)
{
- struct nvkm_instmem *imem = (void *)object;
- struct nvkm_instobj *iobj;
- int ret, i;
+ struct nvkm_instmem *imem = nvkm_instmem(subdev);
+ if (imem->func->oneinit)
+ return imem->func->oneinit(imem);
+ return 0;
+}
- ret = nvkm_subdev_init(&imem->base);
- if (ret)
- return ret;
+static int
+nvkm_instmem_init(struct nvkm_subdev *subdev)
+{
+ struct nvkm_instmem *imem = nvkm_instmem(subdev);
+ struct nvkm_instobj *iobj;
+ int i;
- mutex_lock(&imem->base.mutex);
list_for_each_entry(iobj, &imem->list, head) {
if (iobj->suspend) {
- for (i = 0; i < iobj->size; i += 4)
- nv_wo32(iobj, i, iobj->suspend[i / 4]);
+ struct nvkm_memory *memory = iobj->parent;
+ u64 size = nvkm_memory_size(memory);
+ for (i = 0; i < size; i += 4)
+ nvkm_wo32(memory, i, iobj->suspend[i / 4]);
vfree(iobj->suspend);
iobj->suspend = NULL;
}
}
- mutex_unlock(&imem->base.mutex);
+
return 0;
}
-int
-nvkm_instmem_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, int length, void **pobject)
+static void *
+nvkm_instmem_dtor(struct nvkm_subdev *subdev)
{
- struct nvkm_instmem *imem;
- int ret;
+ struct nvkm_instmem *imem = nvkm_instmem(subdev);
+ if (imem->func->dtor)
+ return imem->func->dtor(imem);
+ return imem;
+}
- ret = nvkm_subdev_create_(parent, engine, oclass, 0, "INSTMEM",
- "instmem", length, pobject);
- imem = *pobject;
- if (ret)
- return ret;
+static const struct nvkm_subdev_func
+nvkm_instmem = {
+ .dtor = nvkm_instmem_dtor,
+ .oneinit = nvkm_instmem_oneinit,
+ .init = nvkm_instmem_init,
+ .fini = nvkm_instmem_fini,
+};
+void
+nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
+ struct nvkm_device *device, int index,
+ struct nvkm_instmem *imem)
+{
+ nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev);
+ imem->func = func;
INIT_LIST_HEAD(&imem->list);
- imem->alloc = nvkm_instmem_alloc;
- return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index dd0994d9ebfc..cd7feb1b25f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -37,32 +37,27 @@
* to use more "relaxed" allocation parameters when using the DMA API, since we
* never need a kernel mapping.
*/
+#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
+#include "priv.h"
-#include <subdev/fb.h>
+#include <core/memory.h>
#include <core/mm.h>
-#include <core/device.h>
+#include <core/tegra.h>
+#include <subdev/fb.h>
-#ifdef __KERNEL__
-#include <linux/dma-attrs.h>
-#include <linux/iommu.h>
-#include <nouveau_platform.h>
-#endif
+#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
-#include "priv.h"
-
-struct gk20a_instobj_priv {
- struct nvkm_instobj base;
- /* Must be second member here - see nouveau_gpuobj_map_vm() */
- struct nvkm_mem *mem;
- /* Pointed by mem */
- struct nvkm_mem _mem;
+struct gk20a_instobj {
+ struct nvkm_memory memory;
+ struct gk20a_instmem *imem;
+ struct nvkm_mem mem;
};
/*
* Used for objects allocated using the DMA API
*/
struct gk20a_instobj_dma {
- struct gk20a_instobj_priv base;
+ struct gk20a_instobj base;
void *cpuaddr;
dma_addr_t handle;
@@ -73,14 +68,15 @@ struct gk20a_instobj_dma {
* Used for objects flattened using the IOMMU API
*/
struct gk20a_instobj_iommu {
- struct gk20a_instobj_priv base;
+ struct gk20a_instobj base;
/* array of base.mem->size pages */
struct page *pages[];
};
-struct gk20a_instmem_priv {
+struct gk20a_instmem {
struct nvkm_instmem base;
+ unsigned long lock_flags;
spinlock_t lock;
u64 addr;
@@ -94,6 +90,42 @@ struct gk20a_instmem_priv {
struct dma_attrs attrs;
};
+static enum nvkm_memory_target
+gk20a_instobj_target(struct nvkm_memory *memory)
+{
+ return NVKM_MEM_TARGET_HOST;
+}
+
+static u64
+gk20a_instobj_addr(struct nvkm_memory *memory)
+{
+ return gk20a_instobj(memory)->mem.offset;
+
+}
+
+static u64
+gk20a_instobj_size(struct nvkm_memory *memory)
+{
+ return (u64)gk20a_instobj(memory)->mem.size << 12;
+}
+
+static void __iomem *
+gk20a_instobj_acquire(struct nvkm_memory *memory)
+{
+ struct gk20a_instmem *imem = gk20a_instobj(memory)->imem;
+ unsigned long flags;
+ spin_lock_irqsave(&imem->lock, flags);
+ imem->lock_flags = flags;
+ return NULL;
+}
+
+static void
+gk20a_instobj_release(struct nvkm_memory *memory)
+{
+ struct gk20a_instmem *imem = gk20a_instobj(memory)->imem;
+ spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
+}
+
/*
* Use PRAMIN to read/write data and avoid coherency issues.
* PRAMIN uses the GPU path and ensures data will always be coherent.
@@ -104,160 +136,170 @@ struct gk20a_instmem_priv {
*/
static u32
-gk20a_instobj_rd32(struct nvkm_object *object, u64 offset)
+gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset)
{
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
- struct gk20a_instobj_priv *node = (void *)object;
- unsigned long flags;
- u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
- u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+ struct gk20a_instobj *node = gk20a_instobj(memory);
+ struct gk20a_instmem *imem = node->imem;
+ struct nvkm_device *device = imem->base.subdev.device;
+ u64 base = (node->mem.offset + offset) & 0xffffff00000ULL;
+ u64 addr = (node->mem.offset + offset) & 0x000000fffffULL;
u32 data;
- spin_lock_irqsave(&priv->lock, flags);
- if (unlikely(priv->addr != base)) {
- nv_wr32(priv, 0x001700, base >> 16);
- priv->addr = base;
+ if (unlikely(imem->addr != base)) {
+ nvkm_wr32(device, 0x001700, base >> 16);
+ imem->addr = base;
}
- data = nv_rd32(priv, 0x700000 + addr);
- spin_unlock_irqrestore(&priv->lock, flags);
+ data = nvkm_rd32(device, 0x700000 + addr);
return data;
}
static void
-gk20a_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
+gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
{
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
- struct gk20a_instobj_priv *node = (void *)object;
- unsigned long flags;
- u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
- u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
-
- spin_lock_irqsave(&priv->lock, flags);
- if (unlikely(priv->addr != base)) {
- nv_wr32(priv, 0x001700, base >> 16);
- priv->addr = base;
+ struct gk20a_instobj *node = gk20a_instobj(memory);
+ struct gk20a_instmem *imem = node->imem;
+ struct nvkm_device *device = imem->base.subdev.device;
+ u64 base = (node->mem.offset + offset) & 0xffffff00000ULL;
+ u64 addr = (node->mem.offset + offset) & 0x000000fffffULL;
+
+ if (unlikely(imem->addr != base)) {
+ nvkm_wr32(device, 0x001700, base >> 16);
+ imem->addr = base;
}
- nv_wr32(priv, 0x700000 + addr, data);
- spin_unlock_irqrestore(&priv->lock, flags);
+ nvkm_wr32(device, 0x700000 + addr, data);
+}
+
+static void
+gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
+{
+ struct gk20a_instobj *node = gk20a_instobj(memory);
+ nvkm_vm_map_at(vma, offset, &node->mem);
}
static void
-gk20a_instobj_dtor_dma(struct gk20a_instobj_priv *_node)
+gk20a_instobj_dtor_dma(struct gk20a_instobj *_node)
{
struct gk20a_instobj_dma *node = (void *)_node;
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
- struct device *dev = nv_device_base(nv_device(priv));
+ struct gk20a_instmem *imem = _node->imem;
+ struct device *dev = imem->base.subdev.device->dev;
if (unlikely(!node->cpuaddr))
return;
- dma_free_attrs(dev, _node->mem->size << PAGE_SHIFT, node->cpuaddr,
- node->handle, &priv->attrs);
+ dma_free_attrs(dev, _node->mem.size << PAGE_SHIFT, node->cpuaddr,
+ node->handle, &imem->attrs);
}
static void
-gk20a_instobj_dtor_iommu(struct gk20a_instobj_priv *_node)
+gk20a_instobj_dtor_iommu(struct gk20a_instobj *_node)
{
struct gk20a_instobj_iommu *node = (void *)_node;
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
+ struct gk20a_instmem *imem = _node->imem;
struct nvkm_mm_node *r;
int i;
- if (unlikely(list_empty(&_node->mem->regions)))
+ if (unlikely(list_empty(&_node->mem.regions)))
return;
- r = list_first_entry(&_node->mem->regions, struct nvkm_mm_node,
+ r = list_first_entry(&_node->mem.regions, struct nvkm_mm_node,
rl_entry);
/* clear bit 34 to unmap pages */
- r->offset &= ~BIT(34 - priv->iommu_pgshift);
+ r->offset &= ~BIT(34 - imem->iommu_pgshift);
/* Unmap pages from GPU address space and free them */
- for (i = 0; i < _node->mem->size; i++) {
- iommu_unmap(priv->domain,
- (r->offset + i) << priv->iommu_pgshift, PAGE_SIZE);
+ for (i = 0; i < _node->mem.size; i++) {
+ iommu_unmap(imem->domain,
+ (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
__free_page(node->pages[i]);
}
/* Release area from GPU address space */
- mutex_lock(priv->mm_mutex);
- nvkm_mm_free(priv->mm, &r);
- mutex_unlock(priv->mm_mutex);
+ mutex_lock(imem->mm_mutex);
+ nvkm_mm_free(imem->mm, &r);
+ mutex_unlock(imem->mm_mutex);
}
-static void
-gk20a_instobj_dtor(struct nvkm_object *object)
+static void *
+gk20a_instobj_dtor(struct nvkm_memory *memory)
{
- struct gk20a_instobj_priv *node = (void *)object;
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
+ struct gk20a_instobj *node = gk20a_instobj(memory);
+ struct gk20a_instmem *imem = node->imem;
- if (priv->domain)
+ if (imem->domain)
gk20a_instobj_dtor_iommu(node);
else
gk20a_instobj_dtor_dma(node);
- nvkm_instobj_destroy(&node->base);
+ return node;
}
+static const struct nvkm_memory_func
+gk20a_instobj_func = {
+ .dtor = gk20a_instobj_dtor,
+ .target = gk20a_instobj_target,
+ .addr = gk20a_instobj_addr,
+ .size = gk20a_instobj_size,
+ .acquire = gk20a_instobj_acquire,
+ .release = gk20a_instobj_release,
+ .rd32 = gk20a_instobj_rd32,
+ .wr32 = gk20a_instobj_wr32,
+ .map = gk20a_instobj_map,
+};
+
static int
-gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, u32 npages, u32 align,
- struct gk20a_instobj_priv **_node)
+gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
+ struct gk20a_instobj **_node)
{
struct gk20a_instobj_dma *node;
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
- struct device *dev = nv_device_base(nv_device(parent));
- int ret;
+ struct nvkm_subdev *subdev = &imem->base.subdev;
+ struct device *dev = subdev->device->dev;
- ret = nvkm_instobj_create_(parent, engine, oclass, sizeof(*node),
- (void **)&node);
+ if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
+ return -ENOMEM;
*_node = &node->base;
- if (ret)
- return ret;
node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
&node->handle, GFP_KERNEL,
- &priv->attrs);
+ &imem->attrs);
if (!node->cpuaddr) {
- nv_error(priv, "cannot allocate DMA memory\n");
+ nvkm_error(subdev, "cannot allocate DMA memory\n");
return -ENOMEM;
}
/* alignment check */
if (unlikely(node->handle & (align - 1)))
- nv_warn(priv, "memory not aligned as requested: %pad (0x%x)\n",
- &node->handle, align);
+ nvkm_warn(subdev,
+ "memory not aligned as requested: %pad (0x%x)\n",
+ &node->handle, align);
/* present memory for being mapped using small pages */
node->r.type = 12;
node->r.offset = node->handle >> 12;
node->r.length = (npages << PAGE_SHIFT) >> 12;
- node->base._mem.offset = node->handle;
+ node->base.mem.offset = node->handle;
- INIT_LIST_HEAD(&node->base._mem.regions);
- list_add_tail(&node->r.rl_entry, &node->base._mem.regions);
+ INIT_LIST_HEAD(&node->base.mem.regions);
+ list_add_tail(&node->r.rl_entry, &node->base.mem.regions);
return 0;
}
static int
-gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, u32 npages, u32 align,
- struct gk20a_instobj_priv **_node)
+gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
+ struct gk20a_instobj **_node)
{
struct gk20a_instobj_iommu *node;
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
+ struct nvkm_subdev *subdev = &imem->base.subdev;
struct nvkm_mm_node *r;
int ret;
int i;
- ret = nvkm_instobj_create_(parent, engine, oclass,
- sizeof(*node) + sizeof(node->pages[0]) * npages,
- (void **)&node);
+ if (!(node = kzalloc(sizeof(*node) +
+ sizeof( node->pages[0]) * npages, GFP_KERNEL)))
+ return -ENOMEM;
*_node = &node->base;
- if (ret)
- return ret;
/* Allocate backing memory */
for (i = 0; i < npages; i++) {
@@ -270,48 +312,48 @@ gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
node->pages[i] = p;
}
- mutex_lock(priv->mm_mutex);
+ mutex_lock(imem->mm_mutex);
/* Reserve area from GPU address space */
- ret = nvkm_mm_head(priv->mm, 0, 1, npages, npages,
- align >> priv->iommu_pgshift, &r);
- mutex_unlock(priv->mm_mutex);
+ ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages,
+ align >> imem->iommu_pgshift, &r);
+ mutex_unlock(imem->mm_mutex);
if (ret) {
- nv_error(priv, "virtual space is full!\n");
+ nvkm_error(subdev, "virtual space is full!\n");
goto free_pages;
}
/* Map into GPU address space */
for (i = 0; i < npages; i++) {
struct page *p = node->pages[i];
- u32 offset = (r->offset + i) << priv->iommu_pgshift;
+ u32 offset = (r->offset + i) << imem->iommu_pgshift;
- ret = iommu_map(priv->domain, offset, page_to_phys(p),
+ ret = iommu_map(imem->domain, offset, page_to_phys(p),
PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
if (ret < 0) {
- nv_error(priv, "IOMMU mapping failure: %d\n", ret);
+ nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
while (i-- > 0) {
offset -= PAGE_SIZE;
- iommu_unmap(priv->domain, offset, PAGE_SIZE);
+ iommu_unmap(imem->domain, offset, PAGE_SIZE);
}
goto release_area;
}
}
/* Bit 34 tells that an address is to be resolved through the IOMMU */
- r->offset |= BIT(34 - priv->iommu_pgshift);
+ r->offset |= BIT(34 - imem->iommu_pgshift);
- node->base._mem.offset = ((u64)r->offset) << priv->iommu_pgshift;
+ node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
- INIT_LIST_HEAD(&node->base._mem.regions);
- list_add_tail(&r->rl_entry, &node->base._mem.regions);
+ INIT_LIST_HEAD(&node->base.mem.regions);
+ list_add_tail(&r->rl_entry, &node->base.mem.regions);
return 0;
release_area:
- mutex_lock(priv->mm_mutex);
- nvkm_mm_free(priv->mm, &r);
- mutex_unlock(priv->mm_mutex);
+ mutex_lock(imem->mm_mutex);
+ nvkm_mm_free(imem->mm, &r);
+ mutex_unlock(imem->mm_mutex);
free_pages:
for (i = 0; i < npages && node->pages[i] != NULL; i++)
@@ -321,120 +363,92 @@ free_pages:
}
static int
-gk20a_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 _size,
- struct nvkm_object **pobject)
+gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
+ struct nvkm_memory **pmemory)
{
- struct nvkm_instobj_args *args = data;
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
- struct gk20a_instobj_priv *node;
- u32 size, align;
+ struct gk20a_instmem *imem = gk20a_instmem(base);
+ struct gk20a_instobj *node = NULL;
+ struct nvkm_subdev *subdev = &imem->base.subdev;
int ret;
- nv_debug(parent, "%s (%s): size: %x align: %x\n", __func__,
- priv->domain ? "IOMMU" : "DMA", args->size, args->align);
+ nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__,
+ imem->domain ? "IOMMU" : "DMA", size, align);
/* Round size and align to page bounds */
- size = max(roundup(args->size, PAGE_SIZE), PAGE_SIZE);
- align = max(roundup(args->align, PAGE_SIZE), PAGE_SIZE);
+ size = max(roundup(size, PAGE_SIZE), PAGE_SIZE);
+ align = max(roundup(align, PAGE_SIZE), PAGE_SIZE);
- if (priv->domain)
- ret = gk20a_instobj_ctor_iommu(parent, engine, oclass,
- size >> PAGE_SHIFT, align, &node);
+ if (imem->domain)
+ ret = gk20a_instobj_ctor_iommu(imem, size >> PAGE_SHIFT,
+ align, &node);
else
- ret = gk20a_instobj_ctor_dma(parent, engine, oclass,
- size >> PAGE_SHIFT, align, &node);
- *pobject = nv_object(node);
+ ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT,
+ align, &node);
+ *pmemory = node ? &node->memory : NULL;
if (ret)
return ret;
- node->mem = &node->_mem;
+ nvkm_memory_ctor(&gk20a_instobj_func, &node->memory);
+ node->imem = imem;
/* present memory for being mapped using small pages */
- node->mem->size = size >> 12;
- node->mem->memtype = 0;
- node->mem->page_shift = 12;
-
- node->base.addr = node->mem->offset;
- node->base.size = size;
+ node->mem.size = size >> 12;
+ node->mem.memtype = 0;
+ node->mem.page_shift = 12;
- nv_debug(parent, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
- size, align, node->mem->offset);
+ nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
+ size, align, node->mem.offset);
return 0;
}
-static struct nvkm_instobj_impl
-gk20a_instobj_oclass = {
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk20a_instobj_ctor,
- .dtor = gk20a_instobj_dtor,
- .init = _nvkm_instobj_init,
- .fini = _nvkm_instobj_fini,
- .rd32 = gk20a_instobj_rd32,
- .wr32 = gk20a_instobj_wr32,
- },
-};
-
-
-
-static int
-gk20a_instmem_fini(struct nvkm_object *object, bool suspend)
+static void
+gk20a_instmem_fini(struct nvkm_instmem *base)
{
- struct gk20a_instmem_priv *priv = (void *)object;
- priv->addr = ~0ULL;
- return nvkm_instmem_fini(&priv->base, suspend);
+ gk20a_instmem(base)->addr = ~0ULL;
}
-static int
-gk20a_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gk20a_instmem_priv *priv;
- struct nouveau_platform_device *plat;
- int ret;
+static const struct nvkm_instmem_func
+gk20a_instmem = {
+ .fini = gk20a_instmem_fini,
+ .memory_new = gk20a_instobj_new,
+ .persistent = true,
+ .zero = false,
+};
- ret = nvkm_instmem_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+int
+gk20a_instmem_new(struct nvkm_device *device, int index,
+ struct nvkm_instmem **pimem)
+{
+ struct nvkm_device_tegra *tdev = device->func->tegra(device);
+ struct gk20a_instmem *imem;
- spin_lock_init(&priv->lock);
+ if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
+ spin_lock_init(&imem->lock);
+ *pimem = &imem->base;
- plat = nv_device_to_platform(nv_device(parent));
- if (plat->gpu->iommu.domain) {
- priv->domain = plat->gpu->iommu.domain;
- priv->mm = plat->gpu->iommu.mm;
- priv->iommu_pgshift = plat->gpu->iommu.pgshift;
- priv->mm_mutex = &plat->gpu->iommu.mutex;
+ if (tdev->iommu.domain) {
+ imem->domain = tdev->iommu.domain;
+ imem->mm = &tdev->iommu.mm;
+ imem->iommu_pgshift = tdev->iommu.pgshift;
+ imem->mm_mutex = &tdev->iommu.mutex;
- nv_info(priv, "using IOMMU\n");
+ nvkm_info(&imem->base.subdev, "using IOMMU\n");
} else {
- init_dma_attrs(&priv->attrs);
+ init_dma_attrs(&imem->attrs);
/*
* We will access instmem through PRAMIN and thus do not need a
* consistent CPU pointer or kernel mapping
*/
- dma_set_attr(DMA_ATTR_NON_CONSISTENT, &priv->attrs);
- dma_set_attr(DMA_ATTR_WEAK_ORDERING, &priv->attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &priv->attrs);
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &priv->attrs);
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
+ dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs);
- nv_info(priv, "using DMA API\n");
+ nvkm_info(&imem->base.subdev, "using DMA API\n");
}
return 0;
}
-
-struct nvkm_oclass *
-gk20a_instmem_oclass = &(struct nvkm_instmem_impl) {
- .base.handle = NV_SUBDEV(INSTMEM, 0xea),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk20a_instmem_ctor,
- .dtor = _nvkm_instmem_dtor,
- .init = _nvkm_instmem_init,
- .fini = gk20a_instmem_fini,
- },
- .instobj = &gk20a_instobj_oclass.base,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index 80614f1b2074..6133c8bb2d42 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -21,165 +21,207 @@
*
* Authors: Ben Skeggs
*/
-#include "nv04.h"
+#define nv04_instmem(p) container_of((p), struct nv04_instmem, base)
+#include "priv.h"
+#include <core/memory.h>
#include <core/ramht.h>
+struct nv04_instmem {
+ struct nvkm_instmem base;
+ struct nvkm_mm heap;
+};
+
/******************************************************************************
* instmem object implementation
*****************************************************************************/
+#define nv04_instobj(p) container_of((p), struct nv04_instobj, memory)
-static u32
-nv04_instobj_rd32(struct nvkm_object *object, u64 addr)
+struct nv04_instobj {
+ struct nvkm_memory memory;
+ struct nv04_instmem *imem;
+ struct nvkm_mm_node *node;
+};
+
+static enum nvkm_memory_target
+nv04_instobj_target(struct nvkm_memory *memory)
+{
+ return NVKM_MEM_TARGET_INST;
+}
+
+static u64
+nv04_instobj_addr(struct nvkm_memory *memory)
+{
+ return nv04_instobj(memory)->node->offset;
+}
+
+static u64
+nv04_instobj_size(struct nvkm_memory *memory)
{
- struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
- struct nv04_instobj_priv *node = (void *)object;
- return nv_ro32(priv, node->mem->offset + addr);
+ return nv04_instobj(memory)->node->length;
+}
+
+static void __iomem *
+nv04_instobj_acquire(struct nvkm_memory *memory)
+{
+ struct nv04_instobj *iobj = nv04_instobj(memory);
+ struct nvkm_device *device = iobj->imem->base.subdev.device;
+ return device->pri + 0x700000 + iobj->node->offset;
}
static void
-nv04_instobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
+nv04_instobj_release(struct nvkm_memory *memory)
+{
+}
+
+static u32
+nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset)
{
- struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
- struct nv04_instobj_priv *node = (void *)object;
- nv_wo32(priv, node->mem->offset + addr, data);
+ struct nv04_instobj *iobj = nv04_instobj(memory);
+ struct nvkm_device *device = iobj->imem->base.subdev.device;
+ return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset);
}
static void
-nv04_instobj_dtor(struct nvkm_object *object)
+nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
{
- struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
- struct nv04_instobj_priv *node = (void *)object;
- nvkm_mm_free(&priv->heap, &node->mem);
- nvkm_instobj_destroy(&node->base);
+ struct nv04_instobj *iobj = nv04_instobj(memory);
+ struct nvkm_device *device = iobj->imem->base.subdev.device;
+ nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data);
}
+static void *
+nv04_instobj_dtor(struct nvkm_memory *memory)
+{
+ struct nv04_instobj *iobj = nv04_instobj(memory);
+ mutex_lock(&iobj->imem->base.subdev.mutex);
+ nvkm_mm_free(&iobj->imem->heap, &iobj->node);
+ mutex_unlock(&iobj->imem->base.subdev.mutex);
+ return iobj;
+}
+
+static const struct nvkm_memory_func
+nv04_instobj_func = {
+ .dtor = nv04_instobj_dtor,
+ .target = nv04_instobj_target,
+ .size = nv04_instobj_size,
+ .addr = nv04_instobj_addr,
+ .acquire = nv04_instobj_acquire,
+ .release = nv04_instobj_release,
+ .rd32 = nv04_instobj_rd32,
+ .wr32 = nv04_instobj_wr32,
+};
+
static int
-nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
+ struct nvkm_memory **pmemory)
{
- struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
- struct nv04_instobj_priv *node;
- struct nvkm_instobj_args *args = data;
+ struct nv04_instmem *imem = nv04_instmem(base);
+ struct nv04_instobj *iobj;
int ret;
- if (!args->align)
- args->align = 1;
+ if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
+ return -ENOMEM;
+ *pmemory = &iobj->memory;
- ret = nvkm_instobj_create(parent, engine, oclass, &node);
- *pobject = nv_object(node);
- if (ret)
- return ret;
-
- ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
- args->align, &node->mem);
- if (ret)
- return ret;
+ nvkm_memory_ctor(&nv04_instobj_func, &iobj->memory);
+ iobj->imem = imem;
- node->base.addr = node->mem->offset;
- node->base.size = node->mem->length;
- return 0;
+ mutex_lock(&imem->base.subdev.mutex);
+ ret = nvkm_mm_head(&imem->heap, 0, 1, size, size,
+ align ? align : 1, &iobj->node);
+ mutex_unlock(&imem->base.subdev.mutex);
+ return ret;
}
-struct nvkm_instobj_impl
-nv04_instobj_oclass = {
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_instobj_ctor,
- .dtor = nv04_instobj_dtor,
- .init = _nvkm_instobj_init,
- .fini = _nvkm_instobj_fini,
- .rd32 = nv04_instobj_rd32,
- .wr32 = nv04_instobj_wr32,
- },
-};
-
/******************************************************************************
* instmem subdev implementation
*****************************************************************************/
static u32
-nv04_instmem_rd32(struct nvkm_object *object, u64 addr)
+nv04_instmem_rd32(struct nvkm_instmem *imem, u32 addr)
{
- return nv_rd32(object, 0x700000 + addr);
+ return nvkm_rd32(imem->subdev.device, 0x700000 + addr);
}
static void
-nv04_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
- return nv_wr32(object, 0x700000 + addr, data);
-}
-
-void
-nv04_instmem_dtor(struct nvkm_object *object)
+nv04_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
{
- struct nv04_instmem_priv *priv = (void *)object;
- nvkm_gpuobj_ref(NULL, &priv->ramfc);
- nvkm_gpuobj_ref(NULL, &priv->ramro);
- nvkm_ramht_ref(NULL, &priv->ramht);
- nvkm_gpuobj_ref(NULL, &priv->vbios);
- nvkm_mm_fini(&priv->heap);
- if (priv->iomem)
- iounmap(priv->iomem);
- nvkm_instmem_destroy(&priv->base);
+ nvkm_wr32(imem->subdev.device, 0x700000 + addr, data);
}
static int
-nv04_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv04_instmem_oneinit(struct nvkm_instmem *base)
{
- struct nv04_instmem_priv *priv;
+ struct nv04_instmem *imem = nv04_instmem(base);
+ struct nvkm_device *device = imem->base.subdev.device;
int ret;
- ret = nvkm_instmem_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
/* PRAMIN aperture maps over the end of VRAM, reserve it */
- priv->base.reserved = 512 * 1024;
+ imem->base.reserved = 512 * 1024;
- ret = nvkm_mm_init(&priv->heap, 0, priv->base.reserved, 1);
+ ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
if (ret)
return ret;
/* 0x00000-0x10000: reserve for probable vbios image */
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
- &priv->vbios);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false,
+ &imem->base.vbios);
if (ret)
return ret;
/* 0x10000-0x18000: reserve for RAMHT */
- ret = nvkm_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht);
+ ret = nvkm_ramht_new(device, 0x08000, 0, NULL, &imem->base.ramht);
if (ret)
return ret;
/* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x00800, 0,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x00800, 0, true,
+ &imem->base.ramfc);
if (ret)
return ret;
/* 0x18800-0x18a00: reserve for RAMRO */
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x00200, 0, 0,
- &priv->ramro);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x00200, 0, false,
+ &imem->base.ramro);
if (ret)
return ret;
return 0;
}
-struct nvkm_oclass *
-nv04_instmem_oclass = &(struct nvkm_instmem_impl) {
- .base.handle = NV_SUBDEV(INSTMEM, 0x04),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_instmem_ctor,
- .dtor = nv04_instmem_dtor,
- .init = _nvkm_instmem_init,
- .fini = _nvkm_instmem_fini,
- .rd32 = nv04_instmem_rd32,
- .wr32 = nv04_instmem_wr32,
- },
- .instobj = &nv04_instobj_oclass.base,
-}.base;
+static void *
+nv04_instmem_dtor(struct nvkm_instmem *base)
+{
+ struct nv04_instmem *imem = nv04_instmem(base);
+ nvkm_memory_del(&imem->base.ramfc);
+ nvkm_memory_del(&imem->base.ramro);
+ nvkm_ramht_del(&imem->base.ramht);
+ nvkm_memory_del(&imem->base.vbios);
+ nvkm_mm_fini(&imem->heap);
+ return imem;
+}
+
+static const struct nvkm_instmem_func
+nv04_instmem = {
+ .dtor = nv04_instmem_dtor,
+ .oneinit = nv04_instmem_oneinit,
+ .rd32 = nv04_instmem_rd32,
+ .wr32 = nv04_instmem_wr32,
+ .memory_new = nv04_instobj_new,
+ .persistent = false,
+ .zero = false,
+};
+
+int
+nv04_instmem_new(struct nvkm_device *device, int index,
+ struct nvkm_instmem **pimem)
+{
+ struct nv04_instmem *imem;
+
+ if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_instmem_ctor(&nv04_instmem, device, index, &imem->base);
+ *pimem = &imem->base;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h
deleted file mode 100644
index 42b6c928047c..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef __NV04_INSTMEM_H__
-#define __NV04_INSTMEM_H__
-#include "priv.h"
-
-#include <core/mm.h>
-
-extern struct nvkm_instobj_impl nv04_instobj_oclass;
-
-struct nv04_instmem_priv {
- struct nvkm_instmem base;
-
- void __iomem *iomem;
- struct nvkm_mm heap;
-
- struct nvkm_gpuobj *vbios;
- struct nvkm_ramht *ramht;
- struct nvkm_gpuobj *ramro;
- struct nvkm_gpuobj *ramfc;
-};
-
-static inline struct nv04_instmem_priv *
-nv04_instmem(void *obj)
-{
- return (void *)nvkm_instmem(obj);
-}
-
-struct nv04_instobj_priv {
- struct nvkm_instobj base;
- struct nvkm_mm_node *mem;
-};
-
-void nv04_instmem_dtor(struct nvkm_object *);
-
-int nv04_instmem_alloc(struct nvkm_instmem *, struct nvkm_object *,
- u32 size, u32 align, struct nvkm_object **pobject);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
index b42b8588fc0e..c0543875e490 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
@@ -21,116 +21,239 @@
*
* Authors: Ben Skeggs
*/
-#include "nv04.h"
+#define nv40_instmem(p) container_of((p), struct nv40_instmem, base)
+#include "priv.h"
+#include <core/memory.h>
#include <core/ramht.h>
#include <engine/gr/nv40.h>
+struct nv40_instmem {
+ struct nvkm_instmem base;
+ struct nvkm_mm heap;
+ void __iomem *iomem;
+};
+
/******************************************************************************
- * instmem subdev implementation
+ * instmem object implementation
*****************************************************************************/
+#define nv40_instobj(p) container_of((p), struct nv40_instobj, memory)
+
+struct nv40_instobj {
+ struct nvkm_memory memory;
+ struct nv40_instmem *imem;
+ struct nvkm_mm_node *node;
+};
+
+static enum nvkm_memory_target
+nv40_instobj_target(struct nvkm_memory *memory)
+{
+ return NVKM_MEM_TARGET_INST;
+}
+
+static u64
+nv40_instobj_addr(struct nvkm_memory *memory)
+{
+ return nv40_instobj(memory)->node->offset;
+}
+
+static u64
+nv40_instobj_size(struct nvkm_memory *memory)
+{
+ return nv40_instobj(memory)->node->length;
+}
+
+static void __iomem *
+nv40_instobj_acquire(struct nvkm_memory *memory)
+{
+ struct nv40_instobj *iobj = nv40_instobj(memory);
+ return iobj->imem->iomem + iobj->node->offset;
+}
+
+static void
+nv40_instobj_release(struct nvkm_memory *memory)
+{
+}
static u32
-nv40_instmem_rd32(struct nvkm_object *object, u64 addr)
+nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset)
{
- struct nv04_instmem_priv *priv = (void *)object;
- return ioread32_native(priv->iomem + addr);
+ struct nv40_instobj *iobj = nv40_instobj(memory);
+ return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset);
}
static void
-nv40_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data)
+nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
+{
+ struct nv40_instobj *iobj = nv40_instobj(memory);
+ iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset);
+}
+
+static void *
+nv40_instobj_dtor(struct nvkm_memory *memory)
{
- struct nv04_instmem_priv *priv = (void *)object;
- iowrite32_native(data, priv->iomem + addr);
+ struct nv40_instobj *iobj = nv40_instobj(memory);
+ mutex_lock(&iobj->imem->base.subdev.mutex);
+ nvkm_mm_free(&iobj->imem->heap, &iobj->node);
+ mutex_unlock(&iobj->imem->base.subdev.mutex);
+ return iobj;
}
+static const struct nvkm_memory_func
+nv40_instobj_func = {
+ .dtor = nv40_instobj_dtor,
+ .target = nv40_instobj_target,
+ .size = nv40_instobj_size,
+ .addr = nv40_instobj_addr,
+ .acquire = nv40_instobj_acquire,
+ .release = nv40_instobj_release,
+ .rd32 = nv40_instobj_rd32,
+ .wr32 = nv40_instobj_wr32,
+};
+
static int
-nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
+ struct nvkm_memory **pmemory)
{
- struct nvkm_device *device = nv_device(parent);
- struct nv04_instmem_priv *priv;
- int ret, bar, vs;
+ struct nv40_instmem *imem = nv40_instmem(base);
+ struct nv40_instobj *iobj;
+ int ret;
- ret = nvkm_instmem_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
+ return -ENOMEM;
+ *pmemory = &iobj->memory;
- /* map bar */
- if (nv_device_resource_len(device, 2))
- bar = 2;
- else
- bar = 3;
+ nvkm_memory_ctor(&nv40_instobj_func, &iobj->memory);
+ iobj->imem = imem;
- priv->iomem = ioremap(nv_device_resource_start(device, bar),
- nv_device_resource_len(device, bar));
- if (!priv->iomem) {
- nv_error(priv, "unable to map PRAMIN BAR\n");
- return -EFAULT;
- }
+ mutex_lock(&imem->base.subdev.mutex);
+ ret = nvkm_mm_head(&imem->heap, 0, 1, size, size,
+ align ? align : 1, &iobj->node);
+ mutex_unlock(&imem->base.subdev.mutex);
+ return ret;
+}
+
+/******************************************************************************
+ * instmem subdev implementation
+ *****************************************************************************/
+
+static u32
+nv40_instmem_rd32(struct nvkm_instmem *base, u32 addr)
+{
+ return ioread32_native(nv40_instmem(base)->iomem + addr);
+}
+
+static void
+nv40_instmem_wr32(struct nvkm_instmem *base, u32 addr, u32 data)
+{
+ iowrite32_native(data, nv40_instmem(base)->iomem + addr);
+}
+
+static int
+nv40_instmem_oneinit(struct nvkm_instmem *base)
+{
+ struct nv40_instmem *imem = nv40_instmem(base);
+ struct nvkm_device *device = imem->base.subdev.device;
+ int ret, vs;
/* PRAMIN aperture maps over the end of vram, reserve enough space
* to fit graphics contexts for every channel, the magics come
* from engine/gr/nv40.c
*/
- vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8);
- if (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs;
- else if (device->chipset < 0x43) priv->base.reserved = 0x4f00 * vs;
- else if (nv44_gr_class(priv)) priv->base.reserved = 0x4980 * vs;
- else priv->base.reserved = 0x4a40 * vs;
- priv->base.reserved += 16 * 1024;
- priv->base.reserved *= 32; /* per-channel */
- priv->base.reserved += 512 * 1024; /* pci(e)gart table */
- priv->base.reserved += 512 * 1024; /* object storage */
-
- priv->base.reserved = round_up(priv->base.reserved, 4096);
-
- ret = nvkm_mm_init(&priv->heap, 0, priv->base.reserved, 1);
+ vs = hweight8((nvkm_rd32(device, 0x001540) & 0x0000ff00) >> 8);
+ if (device->chipset == 0x40) imem->base.reserved = 0x6aa0 * vs;
+ else if (device->chipset < 0x43) imem->base.reserved = 0x4f00 * vs;
+ else if (nv44_gr_class(device)) imem->base.reserved = 0x4980 * vs;
+ else imem->base.reserved = 0x4a40 * vs;
+ imem->base.reserved += 16 * 1024;
+ imem->base.reserved *= 32; /* per-channel */
+ imem->base.reserved += 512 * 1024; /* pci(e)gart table */
+ imem->base.reserved += 512 * 1024; /* object storage */
+ imem->base.reserved = round_up(imem->base.reserved, 4096);
+
+ ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
if (ret)
return ret;
/* 0x00000-0x10000: reserve for probable vbios image */
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
- &priv->vbios);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false,
+ &imem->base.vbios);
if (ret)
return ret;
/* 0x10000-0x18000: reserve for RAMHT */
- ret = nvkm_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht);
+ ret = nvkm_ramht_new(device, 0x08000, 0, NULL, &imem->base.ramht);
if (ret)
return ret;
/* 0x18000-0x18200: reserve for RAMRO
* 0x18200-0x20000: padding
*/
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0,
- &priv->ramro);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x08000, 0, false,
+ &imem->base.ramro);
if (ret)
return ret;
/* 0x20000-0x21000: reserve for RAMFC
* 0x21000-0x40000: padding and some unknown crap
*/
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x20000, 0, true,
+ &imem->base.ramfc);
if (ret)
return ret;
return 0;
}
-struct nvkm_oclass *
-nv40_instmem_oclass = &(struct nvkm_instmem_impl) {
- .base.handle = NV_SUBDEV(INSTMEM, 0x40),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv40_instmem_ctor,
- .dtor = nv04_instmem_dtor,
- .init = _nvkm_instmem_init,
- .fini = _nvkm_instmem_fini,
- .rd32 = nv40_instmem_rd32,
- .wr32 = nv40_instmem_wr32,
- },
- .instobj = &nv04_instobj_oclass.base,
-}.base;
+static void *
+nv40_instmem_dtor(struct nvkm_instmem *base)
+{
+ struct nv40_instmem *imem = nv40_instmem(base);
+ nvkm_memory_del(&imem->base.ramfc);
+ nvkm_memory_del(&imem->base.ramro);
+ nvkm_ramht_del(&imem->base.ramht);
+ nvkm_memory_del(&imem->base.vbios);
+ nvkm_mm_fini(&imem->heap);
+ if (imem->iomem)
+ iounmap(imem->iomem);
+ return imem;
+}
+
+static const struct nvkm_instmem_func
+nv40_instmem = {
+ .dtor = nv40_instmem_dtor,
+ .oneinit = nv40_instmem_oneinit,
+ .rd32 = nv40_instmem_rd32,
+ .wr32 = nv40_instmem_wr32,
+ .memory_new = nv40_instobj_new,
+ .persistent = false,
+ .zero = false,
+};
+
+int
+nv40_instmem_new(struct nvkm_device *device, int index,
+ struct nvkm_instmem **pimem)
+{
+ struct nv40_instmem *imem;
+ int bar;
+
+ if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_instmem_ctor(&nv40_instmem, device, index, &imem->base);
+ *pimem = &imem->base;
+
+ /* map bar */
+ if (device->func->resource_size(device, 2))
+ bar = 2;
+ else
+ bar = 3;
+
+ imem->iomem = ioremap(device->func->resource_addr(device, bar),
+ device->func->resource_size(device, bar));
+ if (!imem->iomem) {
+ nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 8404143f93ee..6d512c062ae3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -21,149 +21,229 @@
*
* Authors: Ben Skeggs
*/
+#define nv50_instmem(p) container_of((p), struct nv50_instmem, base)
#include "priv.h"
+#include <core/memory.h>
+#include <subdev/bar.h>
#include <subdev/fb.h>
+#include <subdev/mmu.h>
-struct nv50_instmem_priv {
+struct nv50_instmem {
struct nvkm_instmem base;
+ unsigned long lock_flags;
spinlock_t lock;
u64 addr;
};
-struct nv50_instobj_priv {
- struct nvkm_instobj base;
- struct nvkm_mem *mem;
-};
-
/******************************************************************************
* instmem object implementation
*****************************************************************************/
+#define nv50_instobj(p) container_of((p), struct nv50_instobj, memory)
-static u32
-nv50_instobj_rd32(struct nvkm_object *object, u64 offset)
+struct nv50_instobj {
+ struct nvkm_memory memory;
+ struct nv50_instmem *imem;
+ struct nvkm_mem *mem;
+ struct nvkm_vma bar;
+ void *map;
+};
+
+static enum nvkm_memory_target
+nv50_instobj_target(struct nvkm_memory *memory)
+{
+ return NVKM_MEM_TARGET_VRAM;
+}
+
+static u64
+nv50_instobj_addr(struct nvkm_memory *memory)
+{
+ return nv50_instobj(memory)->mem->offset;
+}
+
+static u64
+nv50_instobj_size(struct nvkm_memory *memory)
+{
+ return (u64)nv50_instobj(memory)->mem->size << NVKM_RAM_MM_SHIFT;
+}
+
+static void
+nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
+{
+ struct nv50_instobj *iobj = nv50_instobj(memory);
+ struct nvkm_subdev *subdev = &iobj->imem->base.subdev;
+ struct nvkm_device *device = subdev->device;
+ u64 size = nvkm_memory_size(memory);
+ void __iomem *map;
+ int ret;
+
+ iobj->map = ERR_PTR(-ENOMEM);
+
+ ret = nvkm_vm_get(vm, size, 12, NV_MEM_ACCESS_RW, &iobj->bar);
+ if (ret == 0) {
+ map = ioremap(device->func->resource_addr(device, 3) +
+ (u32)iobj->bar.offset, size);
+ if (map) {
+ nvkm_memory_map(memory, &iobj->bar, 0);
+ iobj->map = map;
+ } else {
+ nvkm_warn(subdev, "PRAMIN ioremap failed\n");
+ nvkm_vm_put(&iobj->bar);
+ }
+ } else {
+ nvkm_warn(subdev, "PRAMIN exhausted\n");
+ }
+}
+
+static void
+nv50_instobj_release(struct nvkm_memory *memory)
{
- struct nv50_instmem_priv *priv = (void *)nvkm_instmem(object);
- struct nv50_instobj_priv *node = (void *)object;
+ struct nv50_instmem *imem = nv50_instobj(memory)->imem;
+ spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
+}
+
+static void __iomem *
+nv50_instobj_acquire(struct nvkm_memory *memory)
+{
+ struct nv50_instobj *iobj = nv50_instobj(memory);
+ struct nv50_instmem *imem = iobj->imem;
+ struct nvkm_bar *bar = imem->base.subdev.device->bar;
+ struct nvkm_vm *vm;
unsigned long flags;
- u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
- u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+
+ if (!iobj->map && (vm = nvkm_bar_kmap(bar)))
+ nvkm_memory_boot(memory, vm);
+ if (!IS_ERR_OR_NULL(iobj->map))
+ return iobj->map;
+
+ spin_lock_irqsave(&imem->lock, flags);
+ imem->lock_flags = flags;
+ return NULL;
+}
+
+static u32
+nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset)
+{
+ struct nv50_instobj *iobj = nv50_instobj(memory);
+ struct nv50_instmem *imem = iobj->imem;
+ struct nvkm_device *device = imem->base.subdev.device;
+ u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
+ u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
u32 data;
- spin_lock_irqsave(&priv->lock, flags);
- if (unlikely(priv->addr != base)) {
- nv_wr32(priv, 0x001700, base >> 16);
- priv->addr = base;
+ if (unlikely(imem->addr != base)) {
+ nvkm_wr32(device, 0x001700, base >> 16);
+ imem->addr = base;
}
- data = nv_rd32(priv, 0x700000 + addr);
- spin_unlock_irqrestore(&priv->lock, flags);
+ data = nvkm_rd32(device, 0x700000 + addr);
return data;
}
static void
-nv50_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
+nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
{
- struct nv50_instmem_priv *priv = (void *)nvkm_instmem(object);
- struct nv50_instobj_priv *node = (void *)object;
- unsigned long flags;
- u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
- u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
-
- spin_lock_irqsave(&priv->lock, flags);
- if (unlikely(priv->addr != base)) {
- nv_wr32(priv, 0x001700, base >> 16);
- priv->addr = base;
+ struct nv50_instobj *iobj = nv50_instobj(memory);
+ struct nv50_instmem *imem = iobj->imem;
+ struct nvkm_device *device = imem->base.subdev.device;
+ u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
+ u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
+
+ if (unlikely(imem->addr != base)) {
+ nvkm_wr32(device, 0x001700, base >> 16);
+ imem->addr = base;
}
- nv_wr32(priv, 0x700000 + addr, data);
- spin_unlock_irqrestore(&priv->lock, flags);
+ nvkm_wr32(device, 0x700000 + addr, data);
}
static void
-nv50_instobj_dtor(struct nvkm_object *object)
+nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
{
- struct nv50_instobj_priv *node = (void *)object;
- struct nvkm_fb *pfb = nvkm_fb(object);
- pfb->ram->put(pfb, &node->mem);
- nvkm_instobj_destroy(&node->base);
+ struct nv50_instobj *iobj = nv50_instobj(memory);
+ nvkm_vm_map_at(vma, offset, iobj->mem);
}
+static void *
+nv50_instobj_dtor(struct nvkm_memory *memory)
+{
+ struct nv50_instobj *iobj = nv50_instobj(memory);
+ struct nvkm_ram *ram = iobj->imem->base.subdev.device->fb->ram;
+ if (!IS_ERR_OR_NULL(iobj->map)) {
+ nvkm_vm_put(&iobj->bar);
+ iounmap(iobj->map);
+ }
+ ram->func->put(ram, &iobj->mem);
+ return iobj;
+}
+
+static const struct nvkm_memory_func
+nv50_instobj_func = {
+ .dtor = nv50_instobj_dtor,
+ .target = nv50_instobj_target,
+ .size = nv50_instobj_size,
+ .addr = nv50_instobj_addr,
+ .boot = nv50_instobj_boot,
+ .acquire = nv50_instobj_acquire,
+ .release = nv50_instobj_release,
+ .rd32 = nv50_instobj_rd32,
+ .wr32 = nv50_instobj_wr32,
+ .map = nv50_instobj_map,
+};
+
static int
-nv50_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
+ struct nvkm_memory **pmemory)
{
- struct nvkm_fb *pfb = nvkm_fb(parent);
- struct nvkm_instobj_args *args = data;
- struct nv50_instobj_priv *node;
+ struct nv50_instmem *imem = nv50_instmem(base);
+ struct nv50_instobj *iobj;
+ struct nvkm_ram *ram = imem->base.subdev.device->fb->ram;
int ret;
- args->size = max((args->size + 4095) & ~4095, (u32)4096);
- args->align = max((args->align + 4095) & ~4095, (u32)4096);
+ if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
+ return -ENOMEM;
+ *pmemory = &iobj->memory;
- ret = nvkm_instobj_create(parent, engine, oclass, &node);
- *pobject = nv_object(node);
- if (ret)
- return ret;
+ nvkm_memory_ctor(&nv50_instobj_func, &iobj->memory);
+ iobj->imem = imem;
+
+ size = max((size + 4095) & ~4095, (u32)4096);
+ align = max((align + 4095) & ~4095, (u32)4096);
- ret = pfb->ram->get(pfb, args->size, args->align, 0, 0x800, &node->mem);
+ ret = ram->func->get(ram, size, align, 0, 0x800, &iobj->mem);
if (ret)
return ret;
- node->base.addr = node->mem->offset;
- node->base.size = node->mem->size << 12;
- node->mem->page_shift = 12;
+ iobj->mem->page_shift = 12;
return 0;
}
-static struct nvkm_instobj_impl
-nv50_instobj_oclass = {
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_instobj_ctor,
- .dtor = nv50_instobj_dtor,
- .init = _nvkm_instobj_init,
- .fini = _nvkm_instobj_fini,
- .rd32 = nv50_instobj_rd32,
- .wr32 = nv50_instobj_wr32,
- },
-};
-
/******************************************************************************
* instmem subdev implementation
*****************************************************************************/
-static int
-nv50_instmem_fini(struct nvkm_object *object, bool suspend)
+static void
+nv50_instmem_fini(struct nvkm_instmem *base)
{
- struct nv50_instmem_priv *priv = (void *)object;
- priv->addr = ~0ULL;
- return nvkm_instmem_fini(&priv->base, suspend);
+ nv50_instmem(base)->addr = ~0ULL;
}
-static int
-nv50_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv50_instmem_priv *priv;
- int ret;
+static const struct nvkm_instmem_func
+nv50_instmem = {
+ .fini = nv50_instmem_fini,
+ .memory_new = nv50_instobj_new,
+ .persistent = false,
+ .zero = false,
+};
- ret = nvkm_instmem_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+int
+nv50_instmem_new(struct nvkm_device *device, int index,
+ struct nvkm_instmem **pimem)
+{
+ struct nv50_instmem *imem;
- spin_lock_init(&priv->lock);
+ if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base);
+ spin_lock_init(&imem->lock);
+ *pimem = &imem->base;
return 0;
}
-
-struct nvkm_oclass *
-nv50_instmem_oclass = &(struct nvkm_instmem_impl) {
- .base.handle = NV_SUBDEV(INSTMEM, 0x50),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_instmem_ctor,
- .dtor = _nvkm_instmem_dtor,
- .init = _nvkm_instmem_init,
- .fini = nv50_instmem_fini,
- },
- .instobj = &nv50_instobj_oclass.base,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
index b10e292e5607..ace4471864a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -1,54 +1,20 @@
#ifndef __NVKM_INSTMEM_PRIV_H__
#define __NVKM_INSTMEM_PRIV_H__
+#define nvkm_instmem(p) container_of((p), struct nvkm_instmem, subdev)
#include <subdev/instmem.h>
-struct nvkm_instobj_impl {
- struct nvkm_oclass base;
+struct nvkm_instmem_func {
+ void *(*dtor)(struct nvkm_instmem *);
+ int (*oneinit)(struct nvkm_instmem *);
+ void (*fini)(struct nvkm_instmem *);
+ u32 (*rd32)(struct nvkm_instmem *, u32 addr);
+ void (*wr32)(struct nvkm_instmem *, u32 addr, u32 data);
+ int (*memory_new)(struct nvkm_instmem *, u32 size, u32 align,
+ bool zero, struct nvkm_memory **);
+ bool persistent;
+ bool zero;
};
-struct nvkm_instobj_args {
- u32 size;
- u32 align;
-};
-
-#define nvkm_instobj_create(p,e,o,d) \
- nvkm_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_instobj_destroy(p) ({ \
- struct nvkm_instobj *iobj = (p); \
- _nvkm_instobj_dtor(nv_object(iobj)); \
-})
-#define nvkm_instobj_init(p) \
- nvkm_object_init(&(p)->base)
-#define nvkm_instobj_fini(p,s) \
- nvkm_object_fini(&(p)->base, (s))
-
-int nvkm_instobj_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, int, void **);
-void _nvkm_instobj_dtor(struct nvkm_object *);
-#define _nvkm_instobj_init nvkm_object_init
-#define _nvkm_instobj_fini nvkm_object_fini
-
-struct nvkm_instmem_impl {
- struct nvkm_oclass base;
- struct nvkm_oclass *instobj;
-};
-
-#define nvkm_instmem_create(p,e,o,d) \
- nvkm_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_instmem_destroy(p) \
- nvkm_subdev_destroy(&(p)->base)
-#define nvkm_instmem_init(p) ({ \
- struct nvkm_instmem *imem = (p); \
- _nvkm_instmem_init(nv_object(imem)); \
-})
-#define nvkm_instmem_fini(p,s) ({ \
- struct nvkm_instmem *imem = (p); \
- _nvkm_instmem_fini(nv_object(imem), (s)); \
-})
-
-int nvkm_instmem_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, int, void **);
-#define _nvkm_instmem_dtor _nvkm_subdev_dtor
-int _nvkm_instmem_init(struct nvkm_object *);
-int _nvkm_instmem_fini(struct nvkm_object *, bool);
+void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *,
+ int index, struct nvkm_instmem *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
index 2fb87fbfd11c..930d25b6e63c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
@@ -23,102 +23,110 @@
*/
#include "priv.h"
-static int
+#include <subdev/fb.h>
+
+int
nvkm_ltc_tags_alloc(struct nvkm_ltc *ltc, u32 n, struct nvkm_mm_node **pnode)
{
- struct nvkm_ltc_priv *priv = (void *)ltc;
- int ret;
-
- ret = nvkm_mm_head(&priv->tags, 0, 1, n, n, 1, pnode);
+ int ret = nvkm_mm_head(&ltc->tags, 0, 1, n, n, 1, pnode);
if (ret)
*pnode = NULL;
-
return ret;
}
-static void
+void
nvkm_ltc_tags_free(struct nvkm_ltc *ltc, struct nvkm_mm_node **pnode)
{
- struct nvkm_ltc_priv *priv = (void *)ltc;
- nvkm_mm_free(&priv->tags, pnode);
+ nvkm_mm_free(&ltc->tags, pnode);
}
-static void
+void
nvkm_ltc_tags_clear(struct nvkm_ltc *ltc, u32 first, u32 count)
{
- const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
- struct nvkm_ltc_priv *priv = (void *)ltc;
const u32 limit = first + count - 1;
- BUG_ON((first > limit) || (limit >= priv->num_tags));
+ BUG_ON((first > limit) || (limit >= ltc->num_tags));
- impl->cbc_clear(priv, first, limit);
- impl->cbc_wait(priv);
+ ltc->func->cbc_clear(ltc, first, limit);
+ ltc->func->cbc_wait(ltc);
}
-static int
+int
nvkm_ltc_zbc_color_get(struct nvkm_ltc *ltc, int index, const u32 color[4])
{
- const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
- struct nvkm_ltc_priv *priv = (void *)ltc;
- memcpy(priv->zbc_color[index], color, sizeof(priv->zbc_color[index]));
- impl->zbc_clear_color(priv, index, color);
+ memcpy(ltc->zbc_color[index], color, sizeof(ltc->zbc_color[index]));
+ ltc->func->zbc_clear_color(ltc, index, color);
return index;
}
-static int
+int
nvkm_ltc_zbc_depth_get(struct nvkm_ltc *ltc, int index, const u32 depth)
{
- const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
- struct nvkm_ltc_priv *priv = (void *)ltc;
- priv->zbc_depth[index] = depth;
- impl->zbc_clear_depth(priv, index, depth);
+ ltc->zbc_depth[index] = depth;
+ ltc->func->zbc_clear_depth(ltc, index, depth);
return index;
}
-int
-_nvkm_ltc_init(struct nvkm_object *object)
+static void
+nvkm_ltc_intr(struct nvkm_subdev *subdev)
{
- const struct nvkm_ltc_impl *impl = (void *)nv_oclass(object);
- struct nvkm_ltc_priv *priv = (void *)object;
- int ret, i;
+ struct nvkm_ltc *ltc = nvkm_ltc(subdev);
+ ltc->func->intr(ltc);
+}
- ret = nvkm_subdev_init(&priv->base.base);
- if (ret)
- return ret;
+static int
+nvkm_ltc_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_ltc *ltc = nvkm_ltc(subdev);
+ return ltc->func->oneinit(ltc);
+}
+
+static int
+nvkm_ltc_init(struct nvkm_subdev *subdev)
+{
+ struct nvkm_ltc *ltc = nvkm_ltc(subdev);
+ int i;
- for (i = priv->base.zbc_min; i <= priv->base.zbc_max; i++) {
- impl->zbc_clear_color(priv, i, priv->zbc_color[i]);
- impl->zbc_clear_depth(priv, i, priv->zbc_depth[i]);
+ for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
+ ltc->func->zbc_clear_color(ltc, i, ltc->zbc_color[i]);
+ ltc->func->zbc_clear_depth(ltc, i, ltc->zbc_depth[i]);
}
+ ltc->func->init(ltc);
return 0;
}
+static void *
+nvkm_ltc_dtor(struct nvkm_subdev *subdev)
+{
+ struct nvkm_ltc *ltc = nvkm_ltc(subdev);
+ struct nvkm_ram *ram = ltc->subdev.device->fb->ram;
+ nvkm_mm_fini(&ltc->tags);
+ if (ram)
+ nvkm_mm_free(&ram->vram, &ltc->tag_ram);
+ return ltc;
+}
+
+static const struct nvkm_subdev_func
+nvkm_ltc = {
+ .dtor = nvkm_ltc_dtor,
+ .oneinit = nvkm_ltc_oneinit,
+ .init = nvkm_ltc_init,
+ .intr = nvkm_ltc_intr,
+};
+
int
-nvkm_ltc_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_ltc_new_(const struct nvkm_ltc_func *func, struct nvkm_device *device,
+ int index, struct nvkm_ltc **pltc)
{
- const struct nvkm_ltc_impl *impl = (void *)oclass;
- struct nvkm_ltc_priv *priv;
- int ret;
+ struct nvkm_ltc *ltc;
- ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PLTCG",
- "l2c", length, pobject);
- priv = *pobject;
- if (ret)
- return ret;
-
- memset(priv->zbc_color, 0x00, sizeof(priv->zbc_color));
- memset(priv->zbc_depth, 0x00, sizeof(priv->zbc_depth));
-
- priv->base.base.intr = impl->intr;
- priv->base.tags_alloc = nvkm_ltc_tags_alloc;
- priv->base.tags_free = nvkm_ltc_tags_free;
- priv->base.tags_clear = nvkm_ltc_tags_clear;
- priv->base.zbc_min = 1; /* reserve 0 for disabled */
- priv->base.zbc_max = min(impl->zbc, NVKM_LTC_MAX_ZBC_CNT) - 1;
- priv->base.zbc_color_get = nvkm_ltc_zbc_color_get;
- priv->base.zbc_depth_get = nvkm_ltc_zbc_depth_get;
+ if (!(ltc = *pltc = kzalloc(sizeof(*ltc), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_subdev_ctor(&nvkm_ltc, device, index, 0, &ltc->subdev);
+ ltc->func = func;
+ ltc->zbc_min = 1; /* reserve 0 for disabled */
+ ltc->zbc_max = min(func->zbc, NVKM_LTC_MAX_ZBC_CNT) - 1;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
index 7fb5ea0314cb..45ac765b753e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
@@ -28,38 +28,47 @@
#include <subdev/timer.h>
void
-gf100_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit)
+gf100_ltc_cbc_clear(struct nvkm_ltc *ltc, u32 start, u32 limit)
{
- nv_wr32(priv, 0x17e8cc, start);
- nv_wr32(priv, 0x17e8d0, limit);
- nv_wr32(priv, 0x17e8c8, 0x00000004);
+ struct nvkm_device *device = ltc->subdev.device;
+ nvkm_wr32(device, 0x17e8cc, start);
+ nvkm_wr32(device, 0x17e8d0, limit);
+ nvkm_wr32(device, 0x17e8c8, 0x00000004);
}
void
-gf100_ltc_cbc_wait(struct nvkm_ltc_priv *priv)
+gf100_ltc_cbc_wait(struct nvkm_ltc *ltc)
{
+ struct nvkm_device *device = ltc->subdev.device;
int c, s;
- for (c = 0; c < priv->ltc_nr; c++) {
- for (s = 0; s < priv->lts_nr; s++)
- nv_wait(priv, 0x1410c8 + c * 0x2000 + s * 0x400, ~0, 0);
+ for (c = 0; c < ltc->ltc_nr; c++) {
+ for (s = 0; s < ltc->lts_nr; s++) {
+ const u32 addr = 0x1410c8 + (c * 0x2000) + (s * 0x400);
+ nvkm_msec(device, 2000,
+ if (!nvkm_rd32(device, addr))
+ break;
+ );
+ }
}
}
void
-gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *priv, int i, const u32 color[4])
+gf100_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4])
{
- nv_mask(priv, 0x17ea44, 0x0000000f, i);
- nv_wr32(priv, 0x17ea48, color[0]);
- nv_wr32(priv, 0x17ea4c, color[1]);
- nv_wr32(priv, 0x17ea50, color[2]);
- nv_wr32(priv, 0x17ea54, color[3]);
+ struct nvkm_device *device = ltc->subdev.device;
+ nvkm_mask(device, 0x17ea44, 0x0000000f, i);
+ nvkm_wr32(device, 0x17ea48, color[0]);
+ nvkm_wr32(device, 0x17ea4c, color[1]);
+ nvkm_wr32(device, 0x17ea50, color[2]);
+ nvkm_wr32(device, 0x17ea54, color[3]);
}
void
-gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth)
+gf100_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
{
- nv_mask(priv, 0x17ea44, 0x0000000f, i);
- nv_wr32(priv, 0x17ea58, depth);
+ struct nvkm_device *device = ltc->subdev.device;
+ nvkm_mask(device, 0x17ea44, 0x0000000f, i);
+ nvkm_wr32(device, 0x17ea58, depth);
}
static const struct nvkm_bitfield
@@ -81,88 +90,60 @@ gf100_ltc_lts_intr_name[] = {
};
static void
-gf100_ltc_lts_intr(struct nvkm_ltc_priv *priv, int ltc, int lts)
+gf100_ltc_lts_intr(struct nvkm_ltc *ltc, int c, int s)
{
- u32 base = 0x141000 + (ltc * 0x2000) + (lts * 0x400);
- u32 intr = nv_rd32(priv, base + 0x020);
+ struct nvkm_subdev *subdev = &ltc->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 base = 0x141000 + (c * 0x2000) + (s * 0x400);
+ u32 intr = nvkm_rd32(device, base + 0x020);
u32 stat = intr & 0x0000ffff;
+ char msg[128];
if (stat) {
- nv_info(priv, "LTC%d_LTS%d:", ltc, lts);
- nvkm_bitfield_print(gf100_ltc_lts_intr_name, stat);
- pr_cont("\n");
+ nvkm_snprintbf(msg, sizeof(msg), gf100_ltc_lts_intr_name, stat);
+ nvkm_error(subdev, "LTC%d_LTS%d: %08x [%s]\n", c, s, stat, msg);
}
- nv_wr32(priv, base + 0x020, intr);
+ nvkm_wr32(device, base + 0x020, intr);
}
void
-gf100_ltc_intr(struct nvkm_subdev *subdev)
+gf100_ltc_intr(struct nvkm_ltc *ltc)
{
- struct nvkm_ltc_priv *priv = (void *)subdev;
+ struct nvkm_device *device = ltc->subdev.device;
u32 mask;
- mask = nv_rd32(priv, 0x00017c);
+ mask = nvkm_rd32(device, 0x00017c);
while (mask) {
- u32 lts, ltc = __ffs(mask);
- for (lts = 0; lts < priv->lts_nr; lts++)
- gf100_ltc_lts_intr(priv, ltc, lts);
- mask &= ~(1 << ltc);
+ u32 s, c = __ffs(mask);
+ for (s = 0; s < ltc->lts_nr; s++)
+ gf100_ltc_lts_intr(ltc, c, s);
+ mask &= ~(1 << c);
}
}
-static int
-gf100_ltc_init(struct nvkm_object *object)
-{
- struct nvkm_ltc_priv *priv = (void *)object;
- u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
- int ret;
-
- ret = nvkm_ltc_init(priv);
- if (ret)
- return ret;
-
- nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
- nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
- nv_wr32(priv, 0x17e8d4, priv->tag_base);
- nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
- return 0;
-}
-
-void
-gf100_ltc_dtor(struct nvkm_object *object)
-{
- struct nvkm_fb *pfb = nvkm_fb(object);
- struct nvkm_ltc_priv *priv = (void *)object;
-
- nvkm_mm_fini(&priv->tags);
- if (pfb->ram)
- nvkm_mm_free(&pfb->vram, &priv->tag_ram);
-
- nvkm_ltc_destroy(priv);
-}
-
/* TODO: Figure out tag memory details and drop the over-cautious allocation.
*/
int
-gf100_ltc_init_tag_ram(struct nvkm_fb *pfb, struct nvkm_ltc_priv *priv)
+gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
{
+ struct nvkm_ram *ram = ltc->subdev.device->fb->ram;
u32 tag_size, tag_margin, tag_align;
int ret;
/* No VRAM, no tags for now. */
- if (!pfb->ram) {
- priv->num_tags = 0;
+ if (!ram) {
+ ltc->num_tags = 0;
goto mm_init;
}
/* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
- priv->num_tags = (pfb->ram->size >> 17) / 4;
- if (priv->num_tags > (1 << 17))
- priv->num_tags = 1 << 17; /* we have 17 bits in PTE */
- priv->num_tags = (priv->num_tags + 63) & ~63; /* round up to 64 */
+ ltc->num_tags = (ram->size >> 17) / 4;
+ if (ltc->num_tags > (1 << 17))
+ ltc->num_tags = 1 << 17; /* we have 17 bits in PTE */
+ ltc->num_tags = (ltc->num_tags + 63) & ~63; /* round up to 64 */
- tag_align = priv->ltc_nr * 0x800;
+ tag_align = ltc->ltc_nr * 0x800;
tag_margin = (tag_align < 0x6000) ? 0x6000 : tag_align;
/* 4 part 4 sub: 0x2000 bytes for 56 tags */
@@ -173,72 +154,71 @@ gf100_ltc_init_tag_ram(struct nvkm_fb *pfb, struct nvkm_ltc_priv *priv)
*
* For 4 GiB of memory we'll have 8192 tags which makes 3 MiB, < 0.1 %.
*/
- tag_size = (priv->num_tags / 64) * 0x6000 + tag_margin;
+ tag_size = (ltc->num_tags / 64) * 0x6000 + tag_margin;
tag_size += tag_align;
tag_size = (tag_size + 0xfff) >> 12; /* round up */
- ret = nvkm_mm_tail(&pfb->vram, 1, 1, tag_size, tag_size, 1,
- &priv->tag_ram);
+ ret = nvkm_mm_tail(&ram->vram, 1, 1, tag_size, tag_size, 1,
+ &ltc->tag_ram);
if (ret) {
- priv->num_tags = 0;
+ ltc->num_tags = 0;
} else {
- u64 tag_base = ((u64)priv->tag_ram->offset << 12) + tag_margin;
+ u64 tag_base = ((u64)ltc->tag_ram->offset << 12) + tag_margin;
tag_base += tag_align - 1;
- ret = do_div(tag_base, tag_align);
+ do_div(tag_base, tag_align);
- priv->tag_base = tag_base;
+ ltc->tag_base = tag_base;
}
mm_init:
- ret = nvkm_mm_init(&priv->tags, 0, priv->num_tags, 1);
- return ret;
+ return nvkm_mm_init(&ltc->tags, 0, ltc->num_tags, 1);
}
int
-gf100_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+gf100_ltc_oneinit(struct nvkm_ltc *ltc)
{
- struct nvkm_fb *pfb = nvkm_fb(parent);
- struct nvkm_ltc_priv *priv;
- u32 parts, mask;
- int ret, i;
-
- ret = nvkm_ltc_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- parts = nv_rd32(priv, 0x022438);
- mask = nv_rd32(priv, 0x022554);
+ struct nvkm_device *device = ltc->subdev.device;
+ const u32 parts = nvkm_rd32(device, 0x022438);
+ const u32 mask = nvkm_rd32(device, 0x022554);
+ const u32 slice = nvkm_rd32(device, 0x17e8dc) >> 28;
+ int i;
+
for (i = 0; i < parts; i++) {
if (!(mask & (1 << i)))
- priv->ltc_nr++;
+ ltc->ltc_nr++;
}
- priv->lts_nr = nv_rd32(priv, 0x17e8dc) >> 28;
+ ltc->lts_nr = slice;
+
+ return gf100_ltc_oneinit_tag_ram(ltc);
+}
- ret = gf100_ltc_init_tag_ram(pfb, priv);
- if (ret)
- return ret;
+static void
+gf100_ltc_init(struct nvkm_ltc *ltc)
+{
+ struct nvkm_device *device = ltc->subdev.device;
+ u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001);
- nv_subdev(priv)->intr = gf100_ltc_intr;
- return 0;
+ nvkm_mask(device, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
+ nvkm_wr32(device, 0x17e8d8, ltc->ltc_nr);
+ nvkm_wr32(device, 0x17e8d4, ltc->tag_base);
+ nvkm_mask(device, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
}
-struct nvkm_oclass *
-gf100_ltc_oclass = &(struct nvkm_ltc_impl) {
- .base.handle = NV_SUBDEV(LTC, 0xc0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_ltc_ctor,
- .dtor = gf100_ltc_dtor,
- .init = gf100_ltc_init,
- .fini = _nvkm_ltc_fini,
- },
+static const struct nvkm_ltc_func
+gf100_ltc = {
+ .oneinit = gf100_ltc_oneinit,
+ .init = gf100_ltc_init,
.intr = gf100_ltc_intr,
.cbc_clear = gf100_ltc_cbc_clear,
.cbc_wait = gf100_ltc_cbc_wait,
.zbc = 16,
.zbc_clear_color = gf100_ltc_zbc_clear_color,
.zbc_clear_depth = gf100_ltc_zbc_clear_depth,
-}.base;
+};
+
+int
+gf100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+ return nvkm_ltc_new_(&gf100_ltc, device, index, pltc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
index d53959b5ec67..839e6b4c597b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
@@ -23,37 +23,32 @@
*/
#include "priv.h"
-static int
-gk104_ltc_init(struct nvkm_object *object)
+static void
+gk104_ltc_init(struct nvkm_ltc *ltc)
{
- struct nvkm_ltc_priv *priv = (void *)object;
- u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
- int ret;
+ struct nvkm_device *device = ltc->subdev.device;
+ u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001);
- ret = nvkm_ltc_init(priv);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
- nv_wr32(priv, 0x17e000, priv->ltc_nr);
- nv_wr32(priv, 0x17e8d4, priv->tag_base);
- nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
- return 0;
+ nvkm_wr32(device, 0x17e8d8, ltc->ltc_nr);
+ nvkm_wr32(device, 0x17e000, ltc->ltc_nr);
+ nvkm_wr32(device, 0x17e8d4, ltc->tag_base);
+ nvkm_mask(device, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
}
-struct nvkm_oclass *
-gk104_ltc_oclass = &(struct nvkm_ltc_impl) {
- .base.handle = NV_SUBDEV(LTC, 0xe4),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_ltc_ctor,
- .dtor = gf100_ltc_dtor,
- .init = gk104_ltc_init,
- .fini = _nvkm_ltc_fini,
- },
+static const struct nvkm_ltc_func
+gk104_ltc = {
+ .oneinit = gf100_ltc_oneinit,
+ .init = gk104_ltc_init,
.intr = gf100_ltc_intr,
.cbc_clear = gf100_ltc_cbc_clear,
.cbc_wait = gf100_ltc_cbc_wait,
.zbc = 16,
.zbc_clear_color = gf100_ltc_zbc_clear_color,
.zbc_clear_depth = gf100_ltc_zbc_clear_depth,
-}.base;
+};
+
+int
+gk104_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+ return nvkm_ltc_new_(&gk104_ltc, device, index, pltc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
index 6b3f6f4ce107..389331bb63ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
@@ -27,127 +27,121 @@
#include <subdev/timer.h>
static void
-gm107_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit)
+gm107_ltc_cbc_clear(struct nvkm_ltc *ltc, u32 start, u32 limit)
{
- nv_wr32(priv, 0x17e270, start);
- nv_wr32(priv, 0x17e274, limit);
- nv_wr32(priv, 0x17e26c, 0x00000004);
+ struct nvkm_device *device = ltc->subdev.device;
+ nvkm_wr32(device, 0x17e270, start);
+ nvkm_wr32(device, 0x17e274, limit);
+ nvkm_wr32(device, 0x17e26c, 0x00000004);
}
static void
-gm107_ltc_cbc_wait(struct nvkm_ltc_priv *priv)
+gm107_ltc_cbc_wait(struct nvkm_ltc *ltc)
{
+ struct nvkm_device *device = ltc->subdev.device;
int c, s;
- for (c = 0; c < priv->ltc_nr; c++) {
- for (s = 0; s < priv->lts_nr; s++)
- nv_wait(priv, 0x14046c + c * 0x2000 + s * 0x200, ~0, 0);
+ for (c = 0; c < ltc->ltc_nr; c++) {
+ for (s = 0; s < ltc->lts_nr; s++) {
+ const u32 addr = 0x14046c + (c * 0x2000) + (s * 0x200);
+ nvkm_msec(device, 2000,
+ if (!nvkm_rd32(device, addr))
+ break;
+ );
+ }
}
}
static void
-gm107_ltc_zbc_clear_color(struct nvkm_ltc_priv *priv, int i, const u32 color[4])
+gm107_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4])
{
- nv_mask(priv, 0x17e338, 0x0000000f, i);
- nv_wr32(priv, 0x17e33c, color[0]);
- nv_wr32(priv, 0x17e340, color[1]);
- nv_wr32(priv, 0x17e344, color[2]);
- nv_wr32(priv, 0x17e348, color[3]);
+ struct nvkm_device *device = ltc->subdev.device;
+ nvkm_mask(device, 0x17e338, 0x0000000f, i);
+ nvkm_wr32(device, 0x17e33c, color[0]);
+ nvkm_wr32(device, 0x17e340, color[1]);
+ nvkm_wr32(device, 0x17e344, color[2]);
+ nvkm_wr32(device, 0x17e348, color[3]);
}
static void
-gm107_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth)
+gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
{
- nv_mask(priv, 0x17e338, 0x0000000f, i);
- nv_wr32(priv, 0x17e34c, depth);
+ struct nvkm_device *device = ltc->subdev.device;
+ nvkm_mask(device, 0x17e338, 0x0000000f, i);
+ nvkm_wr32(device, 0x17e34c, depth);
}
static void
-gm107_ltc_lts_isr(struct nvkm_ltc_priv *priv, int ltc, int lts)
+gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s)
{
- u32 base = 0x140000 + (ltc * 0x2000) + (lts * 0x400);
- u32 stat = nv_rd32(priv, base + 0x00c);
+ struct nvkm_subdev *subdev = &ltc->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 base = 0x140000 + (c * 0x2000) + (s * 0x400);
+ u32 stat = nvkm_rd32(device, base + 0x00c);
if (stat) {
- nv_info(priv, "LTC%d_LTS%d: 0x%08x\n", ltc, lts, stat);
- nv_wr32(priv, base + 0x00c, stat);
+ nvkm_error(subdev, "LTC%d_LTS%d: %08x\n", c, s, stat);
+ nvkm_wr32(device, base + 0x00c, stat);
}
}
static void
-gm107_ltc_intr(struct nvkm_subdev *subdev)
+gm107_ltc_intr(struct nvkm_ltc *ltc)
{
- struct nvkm_ltc_priv *priv = (void *)subdev;
+ struct nvkm_device *device = ltc->subdev.device;
u32 mask;
- mask = nv_rd32(priv, 0x00017c);
+ mask = nvkm_rd32(device, 0x00017c);
while (mask) {
- u32 lts, ltc = __ffs(mask);
- for (lts = 0; lts < priv->lts_nr; lts++)
- gm107_ltc_lts_isr(priv, ltc, lts);
- mask &= ~(1 << ltc);
+ u32 s, c = __ffs(mask);
+ for (s = 0; s < ltc->lts_nr; s++)
+ gm107_ltc_lts_isr(ltc, c, s);
+ mask &= ~(1 << c);
}
}
static int
-gm107_ltc_init(struct nvkm_object *object)
+gm107_ltc_oneinit(struct nvkm_ltc *ltc)
{
- struct nvkm_ltc_priv *priv = (void *)object;
- u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
- int ret;
-
- ret = nvkm_ltc_init(priv);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x17e27c, priv->ltc_nr);
- nv_wr32(priv, 0x17e278, priv->tag_base);
- nv_mask(priv, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
- return 0;
-}
+ struct nvkm_device *device = ltc->subdev.device;
+ const u32 parts = nvkm_rd32(device, 0x022438);
+ const u32 mask = nvkm_rd32(device, 0x021c14);
+ const u32 slice = nvkm_rd32(device, 0x17e280) >> 28;
+ int i;
-static int
-gm107_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nvkm_fb *pfb = nvkm_fb(parent);
- struct nvkm_ltc_priv *priv;
- u32 parts, mask;
- int ret, i;
-
- ret = nvkm_ltc_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- parts = nv_rd32(priv, 0x022438);
- mask = nv_rd32(priv, 0x021c14);
for (i = 0; i < parts; i++) {
if (!(mask & (1 << i)))
- priv->ltc_nr++;
+ ltc->ltc_nr++;
}
- priv->lts_nr = nv_rd32(priv, 0x17e280) >> 28;
+ ltc->lts_nr = slice;
+
+ return gf100_ltc_oneinit_tag_ram(ltc);
+}
- ret = gf100_ltc_init_tag_ram(pfb, priv);
- if (ret)
- return ret;
+static void
+gm107_ltc_init(struct nvkm_ltc *ltc)
+{
+ struct nvkm_device *device = ltc->subdev.device;
+ u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001);
- return 0;
+ nvkm_wr32(device, 0x17e27c, ltc->ltc_nr);
+ nvkm_wr32(device, 0x17e278, ltc->tag_base);
+ nvkm_mask(device, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
}
-struct nvkm_oclass *
-gm107_ltc_oclass = &(struct nvkm_ltc_impl) {
- .base.handle = NV_SUBDEV(LTC, 0xff),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gm107_ltc_ctor,
- .dtor = gf100_ltc_dtor,
- .init = gm107_ltc_init,
- .fini = _nvkm_ltc_fini,
- },
+static const struct nvkm_ltc_func
+gm107_ltc = {
+ .oneinit = gm107_ltc_oneinit,
+ .init = gm107_ltc_init,
.intr = gm107_ltc_intr,
.cbc_clear = gm107_ltc_cbc_clear,
.cbc_wait = gm107_ltc_cbc_wait,
.zbc = 16,
.zbc_clear_color = gm107_ltc_zbc_clear_color,
.zbc_clear_depth = gm107_ltc_zbc_clear_depth,
-}.base;
+};
+
+int
+gm107_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+ return nvkm_ltc_new_(&gm107_ltc, device, index, pltc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index 09537d7b6783..4e05037cc99f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -1,69 +1,29 @@
#ifndef __NVKM_LTC_PRIV_H__
#define __NVKM_LTC_PRIV_H__
+#define nvkm_ltc(p) container_of((p), struct nvkm_ltc, subdev)
#include <subdev/ltc.h>
-#include <core/mm.h>
-struct nvkm_fb;
+int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *,
+ int index, struct nvkm_ltc **);
-struct nvkm_ltc_priv {
- struct nvkm_ltc base;
- u32 ltc_nr;
- u32 lts_nr;
+struct nvkm_ltc_func {
+ int (*oneinit)(struct nvkm_ltc *);
+ void (*init)(struct nvkm_ltc *);
+ void (*intr)(struct nvkm_ltc *);
- u32 num_tags;
- u32 tag_base;
- struct nvkm_mm tags;
- struct nvkm_mm_node *tag_ram;
-
- u32 zbc_color[NVKM_LTC_MAX_ZBC_CNT][4];
- u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
-};
-
-#define nvkm_ltc_create(p,e,o,d) \
- nvkm_ltc_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_ltc_destroy(p) ({ \
- struct nvkm_ltc_priv *_priv = (p); \
- _nvkm_ltc_dtor(nv_object(_priv)); \
-})
-#define nvkm_ltc_init(p) ({ \
- struct nvkm_ltc_priv *_priv = (p); \
- _nvkm_ltc_init(nv_object(_priv)); \
-})
-#define nvkm_ltc_fini(p,s) ({ \
- struct nvkm_ltc_priv *_priv = (p); \
- _nvkm_ltc_fini(nv_object(_priv), (s)); \
-})
-
-int nvkm_ltc_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, int, void **);
-
-#define _nvkm_ltc_dtor _nvkm_subdev_dtor
-int _nvkm_ltc_init(struct nvkm_object *);
-#define _nvkm_ltc_fini _nvkm_subdev_fini
-
-int gf100_ltc_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-void gf100_ltc_dtor(struct nvkm_object *);
-int gf100_ltc_init_tag_ram(struct nvkm_fb *, struct nvkm_ltc_priv *);
-int gf100_ltc_tags_alloc(struct nvkm_ltc *, u32, struct nvkm_mm_node **);
-void gf100_ltc_tags_free(struct nvkm_ltc *, struct nvkm_mm_node **);
-
-struct nvkm_ltc_impl {
- struct nvkm_oclass base;
- void (*intr)(struct nvkm_subdev *);
-
- void (*cbc_clear)(struct nvkm_ltc_priv *, u32 start, u32 limit);
- void (*cbc_wait)(struct nvkm_ltc_priv *);
+ void (*cbc_clear)(struct nvkm_ltc *, u32 start, u32 limit);
+ void (*cbc_wait)(struct nvkm_ltc *);
int zbc;
- void (*zbc_clear_color)(struct nvkm_ltc_priv *, int, const u32[4]);
- void (*zbc_clear_depth)(struct nvkm_ltc_priv *, int, const u32);
+ void (*zbc_clear_color)(struct nvkm_ltc *, int, const u32[4]);
+ void (*zbc_clear_depth)(struct nvkm_ltc *, int, const u32);
};
-void gf100_ltc_intr(struct nvkm_subdev *);
-void gf100_ltc_cbc_clear(struct nvkm_ltc_priv *, u32, u32);
-void gf100_ltc_cbc_wait(struct nvkm_ltc_priv *);
-void gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *, int, const u32[4]);
-void gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *, int, const u32);
+int gf100_ltc_oneinit(struct nvkm_ltc *);
+int gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *);
+void gf100_ltc_intr(struct nvkm_ltc *);
+void gf100_ltc_cbc_clear(struct nvkm_ltc *, u32, u32);
+void gf100_ltc_cbc_wait(struct nvkm_ltc *);
+void gf100_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
+void gf100_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index 721643f04bb5..bef325dcb4d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -1,11 +1,7 @@
nvkm-y += nvkm/subdev/mc/base.o
nvkm-y += nvkm/subdev/mc/nv04.o
-nvkm-y += nvkm/subdev/mc/nv40.o
nvkm-y += nvkm/subdev/mc/nv44.o
-nvkm-y += nvkm/subdev/mc/nv4c.o
nvkm-y += nvkm/subdev/mc/nv50.o
-nvkm-y += nvkm/subdev/mc/g94.o
nvkm-y += nvkm/subdev/mc/g98.o
nvkm-y += nvkm/subdev/mc/gf100.o
-nvkm-y += nvkm/subdev/mc/gf106.o
nvkm-y += nvkm/subdev/mc/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
index 5b051a26653e..954fbbe56c4b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
@@ -23,147 +23,101 @@
*/
#include "priv.h"
-#include <core/device.h>
#include <core/option.h>
-static inline void
-nvkm_mc_unk260(struct nvkm_mc *pmc, u32 data)
+void
+nvkm_mc_unk260(struct nvkm_mc *mc, u32 data)
{
- const struct nvkm_mc_oclass *impl = (void *)nv_oclass(pmc);
- if (impl->unk260)
- impl->unk260(pmc, data);
+ if (mc->func->unk260)
+ mc->func->unk260(mc, data);
}
-static inline u32
-nvkm_mc_intr_mask(struct nvkm_mc *pmc)
+void
+nvkm_mc_intr_unarm(struct nvkm_mc *mc)
{
- u32 intr = nv_rd32(pmc, 0x000100);
- if (intr == 0xffffffff) /* likely fallen off the bus */
- intr = 0x00000000;
- return intr;
+ return mc->func->intr_unarm(mc);
}
-static irqreturn_t
-nvkm_mc_intr(int irq, void *arg)
+void
+nvkm_mc_intr_rearm(struct nvkm_mc *mc)
{
- struct nvkm_mc *pmc = arg;
- const struct nvkm_mc_oclass *oclass = (void *)nv_object(pmc)->oclass;
- const struct nvkm_mc_intr *map = oclass->intr;
- struct nvkm_subdev *unit;
- u32 intr;
+ return mc->func->intr_rearm(mc);
+}
- nv_wr32(pmc, 0x000140, 0x00000000);
- nv_rd32(pmc, 0x000140);
- intr = nvkm_mc_intr_mask(pmc);
- if (pmc->use_msi)
- oclass->msi_rearm(pmc);
+static u32
+nvkm_mc_intr_mask(struct nvkm_mc *mc)
+{
+ u32 intr = mc->func->intr_mask(mc);
+ if (WARN_ON_ONCE(intr == 0xffffffff))
+ intr = 0; /* likely fallen off the bus */
+ return intr;
+}
- if (intr) {
- u32 stat = intr = nvkm_mc_intr_mask(pmc);
- while (map->stat) {
- if (intr & map->stat) {
- unit = nvkm_subdev(pmc, map->unit);
- if (unit && unit->intr)
- unit->intr(unit);
- stat &= ~map->stat;
- }
- map++;
+void
+nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
+{
+ struct nvkm_device *device = mc->subdev.device;
+ struct nvkm_subdev *subdev;
+ const struct nvkm_mc_intr *map = mc->func->intr;
+ u32 stat, intr;
+
+ stat = intr = nvkm_mc_intr_mask(mc);
+ while (map->stat) {
+ if (intr & map->stat) {
+ subdev = nvkm_device_subdev(device, map->unit);
+ if (subdev)
+ nvkm_subdev_intr(subdev);
+ stat &= ~map->stat;
}
-
- if (stat)
- nv_error(pmc, "unknown intr 0x%08x\n", stat);
+ map++;
}
- nv_wr32(pmc, 0x000140, 0x00000001);
- return intr ? IRQ_HANDLED : IRQ_NONE;
+ if (stat)
+ nvkm_error(&mc->subdev, "intr %08x\n", stat);
+ *handled = intr != 0;
}
-int
-_nvkm_mc_fini(struct nvkm_object *object, bool suspend)
+static int
+nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend)
{
- struct nvkm_mc *pmc = (void *)object;
- nv_wr32(pmc, 0x000140, 0x00000000);
- return nvkm_subdev_fini(&pmc->base, suspend);
+ struct nvkm_mc *mc = nvkm_mc(subdev);
+ nvkm_mc_intr_unarm(mc);
+ return 0;
}
-int
-_nvkm_mc_init(struct nvkm_object *object)
+static int
+nvkm_mc_init(struct nvkm_subdev *subdev)
{
- struct nvkm_mc *pmc = (void *)object;
- int ret = nvkm_subdev_init(&pmc->base);
- if (ret)
- return ret;
- nv_wr32(pmc, 0x000140, 0x00000001);
+ struct nvkm_mc *mc = nvkm_mc(subdev);
+ if (mc->func->init)
+ mc->func->init(mc);
+ nvkm_mc_intr_rearm(mc);
return 0;
}
-void
-_nvkm_mc_dtor(struct nvkm_object *object)
+static void *
+nvkm_mc_dtor(struct nvkm_subdev *subdev)
{
- struct nvkm_device *device = nv_device(object);
- struct nvkm_mc *pmc = (void *)object;
- free_irq(pmc->irq, pmc);
- if (pmc->use_msi)
- pci_disable_msi(device->pdev);
- nvkm_subdev_destroy(&pmc->base);
+ return nvkm_mc(subdev);
}
+static const struct nvkm_subdev_func
+nvkm_mc = {
+ .dtor = nvkm_mc_dtor,
+ .init = nvkm_mc_init,
+ .fini = nvkm_mc_fini,
+};
+
int
-nvkm_mc_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *bclass, int length, void **pobject)
+nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
+ int index, struct nvkm_mc **pmc)
{
- const struct nvkm_mc_oclass *oclass = (void *)bclass;
- struct nvkm_device *device = nv_device(parent);
- struct nvkm_mc *pmc;
- int ret;
-
- ret = nvkm_subdev_create_(parent, engine, bclass, 0, "PMC",
- "master", length, pobject);
- pmc = *pobject;
- if (ret)
- return ret;
-
- pmc->unk260 = nvkm_mc_unk260;
-
- if (nv_device_is_pci(device)) {
- switch (device->pdev->device & 0x0ff0) {
- case 0x00f0:
- case 0x02e0:
- /* BR02? NFI how these would be handled yet exactly */
- break;
- default:
- switch (device->chipset) {
- case 0xaa:
- /* reported broken, nv also disable it */
- break;
- default:
- pmc->use_msi = true;
- break;
- }
- }
-
- pmc->use_msi = nvkm_boolopt(device->cfgopt, "NvMSI",
- pmc->use_msi);
-
- if (pmc->use_msi && oclass->msi_rearm) {
- pmc->use_msi = pci_enable_msi(device->pdev) == 0;
- if (pmc->use_msi) {
- nv_info(pmc, "MSI interrupts enabled\n");
- oclass->msi_rearm(pmc);
- }
- } else {
- pmc->use_msi = false;
- }
- }
-
- ret = nv_device_get_irq(device, true);
- if (ret < 0)
- return ret;
- pmc->irq = ret;
+ struct nvkm_mc *mc;
- ret = request_irq(pmc->irq, nvkm_mc_intr, IRQF_SHARED, "nvkm", pmc);
- if (ret < 0)
- return ret;
+ if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_subdev_ctor(&nvkm_mc, device, index, 0, &mc->subdev);
+ mc->func = func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
index 8ab7f1272a14..7344ad659105 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
@@ -21,38 +21,40 @@
*
* Authors: Ben Skeggs
*/
-#include "nv04.h"
+#include "priv.h"
static const struct nvkm_mc_intr
g98_mc_intr[] = {
- { 0x04000000, NVDEV_ENGINE_DISP }, /* DISP first, so pageflip timestamps work */
- { 0x00000001, NVDEV_ENGINE_MSPPP },
- { 0x00000100, NVDEV_ENGINE_FIFO },
- { 0x00001000, NVDEV_ENGINE_GR },
- { 0x00004000, NVDEV_ENGINE_SEC }, /* NV84:NVA3 */
- { 0x00008000, NVDEV_ENGINE_MSVLD },
- { 0x00020000, NVDEV_ENGINE_MSPDEC },
- { 0x00040000, NVDEV_SUBDEV_PMU }, /* NVA3:NVC0 */
- { 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */
- { 0x00100000, NVDEV_SUBDEV_TIMER },
- { 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */
- { 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */
- { 0x00400000, NVDEV_ENGINE_CE0 }, /* NVA3- */
- { 0x10000000, NVDEV_SUBDEV_BUS },
- { 0x80000000, NVDEV_ENGINE_SW },
- { 0x0042d101, NVDEV_SUBDEV_FB },
+ { 0x04000000, NVKM_ENGINE_DISP }, /* DISP first, so pageflip timestamps work */
+ { 0x00000001, NVKM_ENGINE_MSPPP },
+ { 0x00000100, NVKM_ENGINE_FIFO },
+ { 0x00001000, NVKM_ENGINE_GR },
+ { 0x00004000, NVKM_ENGINE_SEC }, /* NV84:NVA3 */
+ { 0x00008000, NVKM_ENGINE_MSVLD },
+ { 0x00020000, NVKM_ENGINE_MSPDEC },
+ { 0x00040000, NVKM_SUBDEV_PMU }, /* NVA3:NVC0 */
+ { 0x00080000, NVKM_SUBDEV_THERM }, /* NVA3:NVC0 */
+ { 0x00100000, NVKM_SUBDEV_TIMER },
+ { 0x00200000, NVKM_SUBDEV_GPIO }, /* PMGR->GPIO */
+ { 0x00200000, NVKM_SUBDEV_I2C }, /* PMGR->I2C/AUX */
+ { 0x00400000, NVKM_ENGINE_CE0 }, /* NVA3- */
+ { 0x10000000, NVKM_SUBDEV_BUS },
+ { 0x80000000, NVKM_ENGINE_SW },
+ { 0x0042d101, NVKM_SUBDEV_FB },
{},
};
-struct nvkm_oclass *
-g98_mc_oclass = &(struct nvkm_mc_oclass) {
- .base.handle = NV_SUBDEV(MC, 0x98),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_mc_ctor,
- .dtor = _nvkm_mc_dtor,
- .init = nv50_mc_init,
- .fini = _nvkm_mc_fini,
- },
+static const struct nvkm_mc_func
+g98_mc = {
+ .init = nv50_mc_init,
.intr = g98_mc_intr,
- .msi_rearm = nv40_mc_msi_rearm,
-}.base;
+ .intr_unarm = nv04_mc_intr_unarm,
+ .intr_rearm = nv04_mc_intr_rearm,
+ .intr_mask = nv04_mc_intr_mask,
+};
+
+int
+g98_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ return nvkm_mc_new_(&g98_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
index 2425984b045e..122fe69e83e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
@@ -21,56 +21,77 @@
*
* Authors: Ben Skeggs
*/
-#include "nv04.h"
+#include "priv.h"
const struct nvkm_mc_intr
gf100_mc_intr[] = {
- { 0x04000000, NVDEV_ENGINE_DISP }, /* DISP first, so pageflip timestamps work. */
- { 0x00000001, NVDEV_ENGINE_MSPPP },
- { 0x00000020, NVDEV_ENGINE_CE0 },
- { 0x00000040, NVDEV_ENGINE_CE1 },
- { 0x00000080, NVDEV_ENGINE_CE2 },
- { 0x00000100, NVDEV_ENGINE_FIFO },
- { 0x00001000, NVDEV_ENGINE_GR },
- { 0x00002000, NVDEV_SUBDEV_FB },
- { 0x00008000, NVDEV_ENGINE_MSVLD },
- { 0x00040000, NVDEV_SUBDEV_THERM },
- { 0x00020000, NVDEV_ENGINE_MSPDEC },
- { 0x00100000, NVDEV_SUBDEV_TIMER },
- { 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */
- { 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */
- { 0x01000000, NVDEV_SUBDEV_PMU },
- { 0x02000000, NVDEV_SUBDEV_LTC },
- { 0x08000000, NVDEV_SUBDEV_FB },
- { 0x10000000, NVDEV_SUBDEV_BUS },
- { 0x40000000, NVDEV_SUBDEV_IBUS },
- { 0x80000000, NVDEV_ENGINE_SW },
+ { 0x04000000, NVKM_ENGINE_DISP }, /* DISP first, so pageflip timestamps work. */
+ { 0x00000001, NVKM_ENGINE_MSPPP },
+ { 0x00000020, NVKM_ENGINE_CE0 },
+ { 0x00000040, NVKM_ENGINE_CE1 },
+ { 0x00000080, NVKM_ENGINE_CE2 },
+ { 0x00000100, NVKM_ENGINE_FIFO },
+ { 0x00001000, NVKM_ENGINE_GR },
+ { 0x00002000, NVKM_SUBDEV_FB },
+ { 0x00008000, NVKM_ENGINE_MSVLD },
+ { 0x00040000, NVKM_SUBDEV_THERM },
+ { 0x00020000, NVKM_ENGINE_MSPDEC },
+ { 0x00100000, NVKM_SUBDEV_TIMER },
+ { 0x00200000, NVKM_SUBDEV_GPIO }, /* PMGR->GPIO */
+ { 0x00200000, NVKM_SUBDEV_I2C }, /* PMGR->I2C/AUX */
+ { 0x01000000, NVKM_SUBDEV_PMU },
+ { 0x02000000, NVKM_SUBDEV_LTC },
+ { 0x08000000, NVKM_SUBDEV_FB },
+ { 0x10000000, NVKM_SUBDEV_BUS },
+ { 0x40000000, NVKM_SUBDEV_IBUS },
+ { 0x80000000, NVKM_ENGINE_SW },
{},
};
-static void
-gf100_mc_msi_rearm(struct nvkm_mc *pmc)
+void
+gf100_mc_intr_unarm(struct nvkm_mc *mc)
+{
+ struct nvkm_device *device = mc->subdev.device;
+ nvkm_wr32(device, 0x000140, 0x00000000);
+ nvkm_wr32(device, 0x000144, 0x00000000);
+ nvkm_rd32(device, 0x000140);
+}
+
+void
+gf100_mc_intr_rearm(struct nvkm_mc *mc)
+{
+ struct nvkm_device *device = mc->subdev.device;
+ nvkm_wr32(device, 0x000140, 0x00000001);
+ nvkm_wr32(device, 0x000144, 0x00000001);
+}
+
+u32
+gf100_mc_intr_mask(struct nvkm_mc *mc)
{
- struct nv04_mc_priv *priv = (void *)pmc;
- nv_wr32(priv, 0x088704, 0x00000000);
+ struct nvkm_device *device = mc->subdev.device;
+ u32 intr0 = nvkm_rd32(device, 0x000100);
+ u32 intr1 = nvkm_rd32(device, 0x000104);
+ return intr0 | intr1;
}
void
-gf100_mc_unk260(struct nvkm_mc *pmc, u32 data)
+gf100_mc_unk260(struct nvkm_mc *mc, u32 data)
{
- nv_wr32(pmc, 0x000260, data);
+ nvkm_wr32(mc->subdev.device, 0x000260, data);
}
-struct nvkm_oclass *
-gf100_mc_oclass = &(struct nvkm_mc_oclass) {
- .base.handle = NV_SUBDEV(MC, 0xc0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_mc_ctor,
- .dtor = _nvkm_mc_dtor,
- .init = nv50_mc_init,
- .fini = _nvkm_mc_fini,
- },
+static const struct nvkm_mc_func
+gf100_mc = {
+ .init = nv50_mc_init,
.intr = gf100_mc_intr,
- .msi_rearm = gf100_mc_msi_rearm,
+ .intr_unarm = gf100_mc_intr_unarm,
+ .intr_rearm = gf100_mc_intr_rearm,
+ .intr_mask = gf100_mc_intr_mask,
.unk260 = gf100_mc_unk260,
-}.base;
+};
+
+int
+gf100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ return nvkm_mc_new_(&gf100_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
index 43b27742956d..d92efb33bcc3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
@@ -21,17 +21,19 @@
*
* Authors: Ben Skeggs
*/
-#include "nv04.h"
+#include "priv.h"
-struct nvkm_oclass *
-gk20a_mc_oclass = &(struct nvkm_mc_oclass) {
- .base.handle = NV_SUBDEV(MC, 0xea),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_mc_ctor,
- .dtor = _nvkm_mc_dtor,
- .init = nv50_mc_init,
- .fini = _nvkm_mc_fini,
- },
+static const struct nvkm_mc_func
+gk20a_mc = {
+ .init = nv50_mc_init,
.intr = gf100_mc_intr,
- .msi_rearm = nv40_mc_msi_rearm,
-}.base;
+ .intr_unarm = gf100_mc_intr_unarm,
+ .intr_rearm = gf100_mc_intr_rearm,
+ .intr_mask = gf100_mc_intr_mask,
+};
+
+int
+gk20a_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ return nvkm_mc_new_(&gk20a_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
index 32713827b4dc..d282ec1555f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
@@ -21,58 +21,63 @@
*
* Authors: Ben Skeggs
*/
-#include "nv04.h"
+#include "priv.h"
const struct nvkm_mc_intr
nv04_mc_intr[] = {
- { 0x00000001, NVDEV_ENGINE_MPEG }, /* NV17- MPEG/ME */
- { 0x00000100, NVDEV_ENGINE_FIFO },
- { 0x00001000, NVDEV_ENGINE_GR },
- { 0x00010000, NVDEV_ENGINE_DISP },
- { 0x00020000, NVDEV_ENGINE_VP }, /* NV40- */
- { 0x00100000, NVDEV_SUBDEV_TIMER },
- { 0x01000000, NVDEV_ENGINE_DISP }, /* NV04- PCRTC0 */
- { 0x02000000, NVDEV_ENGINE_DISP }, /* NV11- PCRTC1 */
- { 0x10000000, NVDEV_SUBDEV_BUS },
- { 0x80000000, NVDEV_ENGINE_SW },
+ { 0x00000001, NVKM_ENGINE_MPEG }, /* NV17- MPEG/ME */
+ { 0x00000100, NVKM_ENGINE_FIFO },
+ { 0x00001000, NVKM_ENGINE_GR },
+ { 0x00010000, NVKM_ENGINE_DISP },
+ { 0x00020000, NVKM_ENGINE_VP }, /* NV40- */
+ { 0x00100000, NVKM_SUBDEV_TIMER },
+ { 0x01000000, NVKM_ENGINE_DISP }, /* NV04- PCRTC0 */
+ { 0x02000000, NVKM_ENGINE_DISP }, /* NV11- PCRTC1 */
+ { 0x10000000, NVKM_SUBDEV_BUS },
+ { 0x80000000, NVKM_ENGINE_SW },
{}
};
-int
-nv04_mc_init(struct nvkm_object *object)
+void
+nv04_mc_intr_unarm(struct nvkm_mc *mc)
{
- struct nv04_mc_priv *priv = (void *)object;
-
- nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
- nv_wr32(priv, 0x001850, 0x00000001); /* disable rom access */
-
- return nvkm_mc_init(&priv->base);
+ struct nvkm_device *device = mc->subdev.device;
+ nvkm_wr32(device, 0x000140, 0x00000000);
+ nvkm_rd32(device, 0x000140);
}
-int
-nv04_mc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+void
+nv04_mc_intr_rearm(struct nvkm_mc *mc)
{
- struct nv04_mc_priv *priv;
- int ret;
+ struct nvkm_device *device = mc->subdev.device;
+ nvkm_wr32(device, 0x000140, 0x00000001);
+}
- ret = nvkm_mc_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+u32
+nv04_mc_intr_mask(struct nvkm_mc *mc)
+{
+ return nvkm_rd32(mc->subdev.device, 0x000100);
+}
- return 0;
+void
+nv04_mc_init(struct nvkm_mc *mc)
+{
+ struct nvkm_device *device = mc->subdev.device;
+ nvkm_wr32(device, 0x000200, 0xffffffff); /* everything enabled */
+ nvkm_wr32(device, 0x001850, 0x00000001); /* disable rom access */
}
-struct nvkm_oclass *
-nv04_mc_oclass = &(struct nvkm_mc_oclass) {
- .base.handle = NV_SUBDEV(MC, 0x04),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_mc_ctor,
- .dtor = _nvkm_mc_dtor,
- .init = nv04_mc_init,
- .fini = _nvkm_mc_fini,
- },
+static const struct nvkm_mc_func
+nv04_mc = {
+ .init = nv04_mc_init,
.intr = nv04_mc_intr,
-}.base;
+ .intr_unarm = nv04_mc_intr_unarm,
+ .intr_rearm = nv04_mc_intr_rearm,
+ .intr_mask = nv04_mc_intr_mask,
+};
+
+int
+nv04_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ return nvkm_mc_new_(&nv04_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h
deleted file mode 100644
index 411de3d08ab6..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __NVKM_MC_NV04_H__
-#define __NVKM_MC_NV04_H__
-#include "priv.h"
-
-struct nv04_mc_priv {
- struct nvkm_mc base;
-};
-
-int nv04_mc_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-
-extern const struct nvkm_mc_intr nv04_mc_intr[];
-int nv04_mc_init(struct nvkm_object *);
-void nv40_mc_msi_rearm(struct nvkm_mc *);
-int nv44_mc_init(struct nvkm_object *object);
-int nv50_mc_init(struct nvkm_object *);
-extern const struct nvkm_mc_intr nv50_mc_intr[];
-extern const struct nvkm_mc_intr gf100_mc_intr[];
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
index 2c7f7c701a2b..9a3ac9965be0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
@@ -21,33 +21,33 @@
*
* Authors: Ben Skeggs
*/
-#include "nv04.h"
+#include "priv.h"
-int
-nv44_mc_init(struct nvkm_object *object)
+void
+nv44_mc_init(struct nvkm_mc *mc)
{
- struct nv04_mc_priv *priv = (void *)object;
- u32 tmp = nv_rd32(priv, 0x10020c);
-
- nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
+ struct nvkm_device *device = mc->subdev.device;
+ u32 tmp = nvkm_rd32(device, 0x10020c);
- nv_wr32(priv, 0x001700, tmp);
- nv_wr32(priv, 0x001704, 0);
- nv_wr32(priv, 0x001708, 0);
- nv_wr32(priv, 0x00170c, tmp);
+ nvkm_wr32(device, 0x000200, 0xffffffff); /* everything enabled */
- return nvkm_mc_init(&priv->base);
+ nvkm_wr32(device, 0x001700, tmp);
+ nvkm_wr32(device, 0x001704, 0);
+ nvkm_wr32(device, 0x001708, 0);
+ nvkm_wr32(device, 0x00170c, tmp);
}
-struct nvkm_oclass *
-nv44_mc_oclass = &(struct nvkm_mc_oclass) {
- .base.handle = NV_SUBDEV(MC, 0x44),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_mc_ctor,
- .dtor = _nvkm_mc_dtor,
- .init = nv44_mc_init,
- .fini = _nvkm_mc_fini,
- },
+static const struct nvkm_mc_func
+nv44_mc = {
+ .init = nv44_mc_init,
.intr = nv04_mc_intr,
- .msi_rearm = nv40_mc_msi_rearm,
-}.base;
+ .intr_unarm = nv04_mc_intr_unarm,
+ .intr_rearm = nv04_mc_intr_rearm,
+ .intr_mask = nv04_mc_intr_mask,
+};
+
+int
+nv44_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ return nvkm_mc_new_(&nv44_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
index 40e3019e1fde..5f27d7b8fddd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
@@ -21,52 +21,44 @@
*
* Authors: Ben Skeggs
*/
-#include "nv04.h"
-
-#include <core/device.h>
+#include "priv.h"
const struct nvkm_mc_intr
nv50_mc_intr[] = {
- { 0x04000000, NVDEV_ENGINE_DISP }, /* DISP before FIFO, so pageflip-timestamping works! */
- { 0x00000001, NVDEV_ENGINE_MPEG },
- { 0x00000100, NVDEV_ENGINE_FIFO },
- { 0x00001000, NVDEV_ENGINE_GR },
- { 0x00004000, NVDEV_ENGINE_CIPHER }, /* NV84- */
- { 0x00008000, NVDEV_ENGINE_BSP }, /* NV84- */
- { 0x00020000, NVDEV_ENGINE_VP }, /* NV84- */
- { 0x00100000, NVDEV_SUBDEV_TIMER },
- { 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */
- { 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */
- { 0x10000000, NVDEV_SUBDEV_BUS },
- { 0x80000000, NVDEV_ENGINE_SW },
- { 0x0002d101, NVDEV_SUBDEV_FB },
+ { 0x04000000, NVKM_ENGINE_DISP }, /* DISP before FIFO, so pageflip-timestamping works! */
+ { 0x00000001, NVKM_ENGINE_MPEG },
+ { 0x00000100, NVKM_ENGINE_FIFO },
+ { 0x00001000, NVKM_ENGINE_GR },
+ { 0x00004000, NVKM_ENGINE_CIPHER }, /* NV84- */
+ { 0x00008000, NVKM_ENGINE_BSP }, /* NV84- */
+ { 0x00020000, NVKM_ENGINE_VP }, /* NV84- */
+ { 0x00100000, NVKM_SUBDEV_TIMER },
+ { 0x00200000, NVKM_SUBDEV_GPIO }, /* PMGR->GPIO */
+ { 0x00200000, NVKM_SUBDEV_I2C }, /* PMGR->I2C/AUX */
+ { 0x10000000, NVKM_SUBDEV_BUS },
+ { 0x80000000, NVKM_ENGINE_SW },
+ { 0x0002d101, NVKM_SUBDEV_FB },
{},
};
-static void
-nv50_mc_msi_rearm(struct nvkm_mc *pmc)
+void
+nv50_mc_init(struct nvkm_mc *mc)
{
- struct nvkm_device *device = nv_device(pmc);
- pci_write_config_byte(device->pdev, 0x68, 0xff);
+ struct nvkm_device *device = mc->subdev.device;
+ nvkm_wr32(device, 0x000200, 0xffffffff); /* everything on */
}
+static const struct nvkm_mc_func
+nv50_mc = {
+ .init = nv50_mc_init,
+ .intr = nv50_mc_intr,
+ .intr_unarm = nv04_mc_intr_unarm,
+ .intr_rearm = nv04_mc_intr_rearm,
+ .intr_mask = nv04_mc_intr_mask,
+};
+
int
-nv50_mc_init(struct nvkm_object *object)
+nv50_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
{
- struct nv04_mc_priv *priv = (void *)object;
- nv_wr32(priv, 0x000200, 0xffffffff); /* everything on */
- return nvkm_mc_init(&priv->base);
+ return nvkm_mc_new_(&nv50_mc, device, index, pmc);
}
-
-struct nvkm_oclass *
-nv50_mc_oclass = &(struct nvkm_mc_oclass) {
- .base.handle = NV_SUBDEV(MC, 0x50),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_mc_ctor,
- .dtor = _nvkm_mc_dtor,
- .init = nv50_mc_init,
- .fini = _nvkm_mc_fini,
- },
- .intr = nv50_mc_intr,
- .msi_rearm = nv50_mc_msi_rearm,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index d2cad07afd1a..307f6c692287 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -1,36 +1,42 @@
#ifndef __NVKM_MC_PRIV_H__
#define __NVKM_MC_PRIV_H__
+#define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev)
#include <subdev/mc.h>
-#define nvkm_mc_create(p,e,o,d) \
- nvkm_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_mc_destroy(p) ({ \
- struct nvkm_mc *pmc = (p); _nvkm_mc_dtor(nv_object(pmc)); \
-})
-#define nvkm_mc_init(p) ({ \
- struct nvkm_mc *pmc = (p); _nvkm_mc_init(nv_object(pmc)); \
-})
-#define nvkm_mc_fini(p,s) ({ \
- struct nvkm_mc *pmc = (p); _nvkm_mc_fini(nv_object(pmc), (s)); \
-})
-
-int nvkm_mc_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, int, void **);
-void _nvkm_mc_dtor(struct nvkm_object *);
-int _nvkm_mc_init(struct nvkm_object *);
-int _nvkm_mc_fini(struct nvkm_object *, bool);
+int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *,
+ int index, struct nvkm_mc **);
struct nvkm_mc_intr {
u32 stat;
u32 unit;
};
-struct nvkm_mc_oclass {
- struct nvkm_oclass base;
+struct nvkm_mc_func {
+ void (*init)(struct nvkm_mc *);
const struct nvkm_mc_intr *intr;
- void (*msi_rearm)(struct nvkm_mc *);
+ /* disable reporting of interrupts to host */
+ void (*intr_unarm)(struct nvkm_mc *);
+ /* enable reporting of interrupts to host */
+ void (*intr_rearm)(struct nvkm_mc *);
+ /* retrieve pending interrupt mask (NV_PMC_INTR) */
+ u32 (*intr_mask)(struct nvkm_mc *);
void (*unk260)(struct nvkm_mc *, u32);
};
+void nv04_mc_init(struct nvkm_mc *);
+extern const struct nvkm_mc_intr nv04_mc_intr[];
+void nv04_mc_intr_unarm(struct nvkm_mc *);
+void nv04_mc_intr_rearm(struct nvkm_mc *);
+u32 nv04_mc_intr_mask(struct nvkm_mc *);
+
+void nv44_mc_init(struct nvkm_mc *);
+
+void nv50_mc_init(struct nvkm_mc *);
+extern const struct nvkm_mc_intr nv50_mc_intr[];
+
+extern const struct nvkm_mc_intr gf100_mc_intr[];
+void gf100_mc_intr_unarm(struct nvkm_mc *);
+void gf100_mc_intr_rearm(struct nvkm_mc *);
+u32 gf100_mc_intr_mask(struct nvkm_mc *);
void gf100_mc_unk260(struct nvkm_mc *, u32);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index 277b6ec04e24..e04a2296ecd0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -21,10 +21,10 @@
*
* Authors: Ben Skeggs
*/
-#include <subdev/mmu.h>
-#include <subdev/fb.h>
+#include "priv.h"
#include <core/gpuobj.h>
+#include <subdev/fb.h>
void
nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
@@ -32,12 +32,12 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
struct nvkm_vm *vm = vma->vm;
struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_mm_node *r;
- int big = vma->node->type != mmu->spg_shift;
+ int big = vma->node->type != mmu->func->spg_shift;
u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12;
- u32 pde = (offset >> mmu->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (mmu->pgt_bits - bits);
+ u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (mmu->func->pgt_bits - bits);
u32 end, len;
delta = 0;
@@ -46,14 +46,14 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
u32 num = r->length >> bits;
while (num) {
- struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+ struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
end = (pte + num);
if (unlikely(end >= max))
end = max;
len = end - pte;
- mmu->map(vma, pgt, node, pte, len, phys, delta);
+ mmu->func->map(vma, pgt, node, pte, len, phys, delta);
num -= len;
pte += len;
@@ -67,7 +67,7 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
}
}
- mmu->flush(vm);
+ mmu->func->flush(vm);
}
static void
@@ -76,20 +76,20 @@ nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
{
struct nvkm_vm *vm = vma->vm;
struct nvkm_mmu *mmu = vm->mmu;
- int big = vma->node->type != mmu->spg_shift;
+ int big = vma->node->type != mmu->func->spg_shift;
u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12;
u32 num = length >> vma->node->type;
- u32 pde = (offset >> mmu->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (mmu->pgt_bits - bits);
+ u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (mmu->func->pgt_bits - bits);
unsigned m, sglen;
u32 end, len;
int i;
struct scatterlist *sg;
for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
- struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+ struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
sglen = sg_dma_len(sg) >> PAGE_SHIFT;
end = pte + sglen;
@@ -100,7 +100,7 @@ nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
for (m = 0; m < len; m++) {
dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
- mmu->map_sg(vma, pgt, mem, pte, 1, &addr);
+ mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
num--;
pte++;
@@ -115,7 +115,7 @@ nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
for (; m < sglen; m++) {
dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
- mmu->map_sg(vma, pgt, mem, pte, 1, &addr);
+ mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
num--;
pte++;
if (num == 0)
@@ -125,7 +125,7 @@ nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
}
finish:
- mmu->flush(vm);
+ mmu->func->flush(vm);
}
static void
@@ -135,24 +135,24 @@ nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
struct nvkm_vm *vm = vma->vm;
struct nvkm_mmu *mmu = vm->mmu;
dma_addr_t *list = mem->pages;
- int big = vma->node->type != mmu->spg_shift;
+ int big = vma->node->type != mmu->func->spg_shift;
u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12;
u32 num = length >> vma->node->type;
- u32 pde = (offset >> mmu->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (mmu->pgt_bits - bits);
+ u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (mmu->func->pgt_bits - bits);
u32 end, len;
while (num) {
- struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+ struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
end = (pte + num);
if (unlikely(end >= max))
end = max;
len = end - pte;
- mmu->map_sg(vma, pgt, mem, pte, len, list);
+ mmu->func->map_sg(vma, pgt, mem, pte, len, list);
num -= len;
pte += len;
@@ -163,7 +163,7 @@ nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
}
}
- mmu->flush(vm);
+ mmu->func->flush(vm);
}
void
@@ -183,24 +183,24 @@ nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
{
struct nvkm_vm *vm = vma->vm;
struct nvkm_mmu *mmu = vm->mmu;
- int big = vma->node->type != mmu->spg_shift;
+ int big = vma->node->type != mmu->func->spg_shift;
u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12;
u32 num = length >> vma->node->type;
- u32 pde = (offset >> mmu->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (mmu->pgt_bits - bits);
+ u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (mmu->func->pgt_bits - bits);
u32 end, len;
while (num) {
- struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+ struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
end = (pte + num);
if (unlikely(end >= max))
end = max;
len = end - pte;
- mmu->unmap(pgt, pte, len);
+ mmu->func->unmap(vma, pgt, pte, len);
num -= len;
pte += len;
@@ -210,7 +210,7 @@ nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
}
}
- mmu->flush(vm);
+ mmu->func->flush(vm);
}
void
@@ -225,7 +225,7 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_vm_pgd *vpgd;
struct nvkm_vm_pgt *vpgt;
- struct nvkm_gpuobj *pgt;
+ struct nvkm_memory *pgt;
u32 pde;
for (pde = fpde; pde <= lpde; pde++) {
@@ -233,16 +233,14 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
if (--vpgt->refcount[big])
continue;
- pgt = vpgt->obj[big];
- vpgt->obj[big] = NULL;
+ pgt = vpgt->mem[big];
+ vpgt->mem[big] = NULL;
list_for_each_entry(vpgd, &vm->pgd_list, head) {
- mmu->map_pgt(vpgd->obj, pde, vpgt->obj);
+ mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
}
- mutex_unlock(&nv_subdev(mmu)->mutex);
- nvkm_gpuobj_ref(NULL, &pgt);
- mutex_lock(&nv_subdev(mmu)->mutex);
+ nvkm_memory_del(&pgt);
}
}
@@ -252,34 +250,23 @@ nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
struct nvkm_vm_pgd *vpgd;
- struct nvkm_gpuobj *pgt;
- int big = (type != mmu->spg_shift);
+ int big = (type != mmu->func->spg_shift);
u32 pgt_size;
int ret;
- pgt_size = (1 << (mmu->pgt_bits + 12)) >> type;
+ pgt_size = (1 << (mmu->func->pgt_bits + 12)) >> type;
pgt_size *= 8;
- mutex_unlock(&nv_subdev(mmu)->mutex);
- ret = nvkm_gpuobj_new(nv_object(vm->mmu), NULL, pgt_size, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &pgt);
- mutex_lock(&nv_subdev(mmu)->mutex);
+ ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
+ pgt_size, 0x1000, true, &vpgt->mem[big]);
if (unlikely(ret))
return ret;
- /* someone beat us to filling the PDE while we didn't have the lock */
- if (unlikely(vpgt->refcount[big]++)) {
- mutex_unlock(&nv_subdev(mmu)->mutex);
- nvkm_gpuobj_ref(NULL, &pgt);
- mutex_lock(&nv_subdev(mmu)->mutex);
- return 0;
- }
-
- vpgt->obj[big] = pgt;
list_for_each_entry(vpgd, &vm->pgd_list, head) {
- mmu->map_pgt(vpgd->obj, pde, vpgt->obj);
+ mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
}
+ vpgt->refcount[big]++;
return 0;
}
@@ -293,20 +280,20 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
u32 fpde, lpde, pde;
int ret;
- mutex_lock(&nv_subdev(mmu)->mutex);
+ mutex_lock(&vm->mutex);
ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
&vma->node);
if (unlikely(ret != 0)) {
- mutex_unlock(&nv_subdev(mmu)->mutex);
+ mutex_unlock(&vm->mutex);
return ret;
}
- fpde = (vma->node->offset >> mmu->pgt_bits);
- lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits;
+ fpde = (vma->node->offset >> mmu->func->pgt_bits);
+ lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
for (pde = fpde; pde <= lpde; pde++) {
struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
- int big = (vma->node->type != mmu->spg_shift);
+ int big = (vma->node->type != mmu->func->spg_shift);
if (likely(vpgt->refcount[big])) {
vpgt->refcount[big]++;
@@ -318,11 +305,11 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
if (pde != fpde)
nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
nvkm_mm_free(&vm->mm, &vma->node);
- mutex_unlock(&nv_subdev(mmu)->mutex);
+ mutex_unlock(&vm->mutex);
return ret;
}
}
- mutex_unlock(&nv_subdev(mmu)->mutex);
+ mutex_unlock(&vm->mutex);
vma->vm = NULL;
nvkm_vm_ref(vm, &vma->vm, NULL);
@@ -334,27 +321,49 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
void
nvkm_vm_put(struct nvkm_vma *vma)
{
- struct nvkm_vm *vm = vma->vm;
- struct nvkm_mmu *mmu = vm->mmu;
+ struct nvkm_mmu *mmu;
+ struct nvkm_vm *vm;
u32 fpde, lpde;
if (unlikely(vma->node == NULL))
return;
- fpde = (vma->node->offset >> mmu->pgt_bits);
- lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits;
+ vm = vma->vm;
+ mmu = vm->mmu;
+
+ fpde = (vma->node->offset >> mmu->func->pgt_bits);
+ lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
- mutex_lock(&nv_subdev(mmu)->mutex);
- nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->spg_shift, fpde, lpde);
+ mutex_lock(&vm->mutex);
+ nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde);
nvkm_mm_free(&vm->mm, &vma->node);
- mutex_unlock(&nv_subdev(mmu)->mutex);
+ mutex_unlock(&vm->mutex);
nvkm_vm_ref(NULL, &vma->vm, NULL);
}
int
+nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
+{
+ struct nvkm_mmu *mmu = vm->mmu;
+ struct nvkm_memory *pgt;
+ int ret;
+
+ ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
+ (size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt);
+ if (ret == 0) {
+ vm->pgt[0].refcount[0] = 1;
+ vm->pgt[0].mem[0] = pgt;
+ nvkm_memory_boot(pgt, vm);
+ }
+
+ return ret;
+}
+
+int
nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
- u32 block, struct nvkm_vm **pvm)
+ u32 block, struct lock_class_key *key, struct nvkm_vm **pvm)
{
+ static struct lock_class_key _key;
struct nvkm_vm *vm;
u64 mm_length = (offset + length) - mm_offset;
int ret;
@@ -363,11 +372,12 @@ nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
if (!vm)
return -ENOMEM;
+ __mutex_init(&vm->mutex, "&vm->mutex", key ? key : &_key);
INIT_LIST_HEAD(&vm->pgd_list);
vm->mmu = mmu;
kref_init(&vm->refcount);
- vm->fpde = offset >> (mmu->pgt_bits + 12);
- vm->lpde = (offset + length - 1) >> (mmu->pgt_bits + 12);
+ vm->fpde = offset >> (mmu->func->pgt_bits + 12);
+ vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12);
vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
if (!vm->pgt) {
@@ -390,10 +400,12 @@ nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
int
nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
- struct nvkm_vm **pvm)
+ struct lock_class_key *key, struct nvkm_vm **pvm)
{
- struct nvkm_mmu *mmu = nvkm_mmu(device);
- return mmu->create(mmu, offset, length, mm_offset, pvm);
+ struct nvkm_mmu *mmu = device->mmu;
+ if (!mmu->func->create)
+ return -EINVAL;
+ return mmu->func->create(mmu, offset, length, mm_offset, key, pvm);
}
static int
@@ -410,38 +422,33 @@ nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
if (!vpgd)
return -ENOMEM;
- nvkm_gpuobj_ref(pgd, &vpgd->obj);
+ vpgd->obj = pgd;
- mutex_lock(&nv_subdev(mmu)->mutex);
+ mutex_lock(&vm->mutex);
for (i = vm->fpde; i <= vm->lpde; i++)
- mmu->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
+ mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem);
list_add(&vpgd->head, &vm->pgd_list);
- mutex_unlock(&nv_subdev(mmu)->mutex);
+ mutex_unlock(&vm->mutex);
return 0;
}
static void
nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
{
- struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_vm_pgd *vpgd, *tmp;
- struct nvkm_gpuobj *pgd = NULL;
if (!mpgd)
return;
- mutex_lock(&nv_subdev(mmu)->mutex);
+ mutex_lock(&vm->mutex);
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
if (vpgd->obj == mpgd) {
- pgd = vpgd->obj;
list_del(&vpgd->head);
kfree(vpgd);
break;
}
}
- mutex_unlock(&nv_subdev(mmu)->mutex);
-
- nvkm_gpuobj_ref(NULL, &pgd);
+ mutex_unlock(&vm->mutex);
}
static void
@@ -478,3 +485,58 @@ nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd)
*ptr = ref;
return 0;
}
+
+static int
+nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_mmu *mmu = nvkm_mmu(subdev);
+ if (mmu->func->oneinit)
+ return mmu->func->oneinit(mmu);
+ return 0;
+}
+
+static int
+nvkm_mmu_init(struct nvkm_subdev *subdev)
+{
+ struct nvkm_mmu *mmu = nvkm_mmu(subdev);
+ if (mmu->func->init)
+ mmu->func->init(mmu);
+ return 0;
+}
+
+static void *
+nvkm_mmu_dtor(struct nvkm_subdev *subdev)
+{
+ struct nvkm_mmu *mmu = nvkm_mmu(subdev);
+ if (mmu->func->dtor)
+ return mmu->func->dtor(mmu);
+ return mmu;
+}
+
+static const struct nvkm_subdev_func
+nvkm_mmu = {
+ .dtor = nvkm_mmu_dtor,
+ .oneinit = nvkm_mmu_oneinit,
+ .init = nvkm_mmu_init,
+};
+
+void
+nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
+ int index, struct nvkm_mmu *mmu)
+{
+ nvkm_subdev_ctor(&nvkm_mmu, device, index, 0, &mmu->subdev);
+ mmu->func = func;
+ mmu->limit = func->limit;
+ mmu->dma_bits = func->dma_bits;
+ mmu->lpg_shift = func->lpg_shift;
+}
+
+int
+nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
+ int index, struct nvkm_mmu **pmmu)
+{
+ if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_mmu_ctor(func, device, index, *pmmu);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
index 294cda37f068..7ac507c927bb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -21,19 +21,14 @@
*
* Authors: Ben Skeggs
*/
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
+#include "priv.h"
+
#include <subdev/fb.h>
#include <subdev/ltc.h>
#include <subdev/timer.h>
#include <core/gpuobj.h>
-struct gf100_mmu_priv {
- struct nvkm_mmu base;
-};
-
-
/* Map from compressed to corresponding uncompressed storage type.
* The value 0xff represents an invalid storage type.
*/
@@ -75,17 +70,19 @@ const u8 gf100_pte_storage_type_map[256] =
static void
-gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_gpuobj *pgt[2])
+gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_memory *pgt[2])
{
u32 pde[2] = { 0, 0 };
if (pgt[0])
- pde[1] = 0x00000001 | (pgt[0]->addr >> 8);
+ pde[1] = 0x00000001 | (nvkm_memory_addr(pgt[0]) >> 8);
if (pgt[1])
- pde[0] = 0x00000001 | (pgt[1]->addr >> 8);
+ pde[0] = 0x00000001 | (nvkm_memory_addr(pgt[1]) >> 8);
- nv_wo32(pgd, (index * 8) + 0, pde[0]);
- nv_wo32(pgd, (index * 8) + 4, pde[1]);
+ nvkm_kmap(pgd);
+ nvkm_wo32(pgd, (index * 8) + 0, pde[0]);
+ nvkm_wo32(pgd, (index * 8) + 4, pde[1]);
+ nvkm_done(pgd);
}
static inline u64
@@ -103,7 +100,7 @@ gf100_vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
}
static void
-gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+gf100_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
{
u64 next = 1 << (vma->node->type - 8);
@@ -112,126 +109,113 @@ gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
pte <<= 3;
if (mem->tag) {
- struct nvkm_ltc *ltc = nvkm_ltc(vma->vm->mmu);
+ struct nvkm_ltc *ltc = vma->vm->mmu->subdev.device->ltc;
u32 tag = mem->tag->offset + (delta >> 17);
phys |= (u64)tag << (32 + 12);
next |= (u64)1 << (32 + 12);
- ltc->tags_clear(ltc, tag, cnt);
+ nvkm_ltc_tags_clear(ltc, tag, cnt);
}
+ nvkm_kmap(pgt);
while (cnt--) {
- nv_wo32(pgt, pte + 0, lower_32_bits(phys));
- nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+ nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
+ nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
phys += next;
pte += 8;
}
+ nvkm_done(pgt);
}
static void
-gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
/* compressed storage types are invalid for system memory */
u32 memtype = gf100_pte_storage_type_map[mem->memtype & 0xff];
+ nvkm_kmap(pgt);
pte <<= 3;
while (cnt--) {
u64 phys = gf100_vm_addr(vma, *list++, memtype, target);
- nv_wo32(pgt, pte + 0, lower_32_bits(phys));
- nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+ nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
+ nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
pte += 8;
}
+ nvkm_done(pgt);
}
static void
-gf100_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+gf100_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
{
+ nvkm_kmap(pgt);
pte <<= 3;
while (cnt--) {
- nv_wo32(pgt, pte + 0, 0x00000000);
- nv_wo32(pgt, pte + 4, 0x00000000);
+ nvkm_wo32(pgt, pte + 0, 0x00000000);
+ nvkm_wo32(pgt, pte + 4, 0x00000000);
pte += 8;
}
+ nvkm_done(pgt);
}
static void
gf100_vm_flush(struct nvkm_vm *vm)
{
- struct gf100_mmu_priv *priv = (void *)vm->mmu;
- struct nvkm_bar *bar = nvkm_bar(priv);
+ struct nvkm_mmu *mmu = vm->mmu;
+ struct nvkm_device *device = mmu->subdev.device;
struct nvkm_vm_pgd *vpgd;
u32 type;
- bar->flush(bar);
-
type = 0x00000001; /* PAGE_ALL */
- if (atomic_read(&vm->engref[NVDEV_SUBDEV_BAR]))
+ if (atomic_read(&vm->engref[NVKM_SUBDEV_BAR]))
type |= 0x00000004; /* HUB_ONLY */
- mutex_lock(&nv_subdev(priv)->mutex);
+ mutex_lock(&mmu->subdev.mutex);
list_for_each_entry(vpgd, &vm->pgd_list, head) {
/* looks like maybe a "free flush slots" counter, the
* faster you write to 0x100cbc to more it decreases
*/
- if (!nv_wait_ne(priv, 0x100c80, 0x00ff0000, 0x00000000)) {
- nv_error(priv, "vm timeout 0: 0x%08x %d\n",
- nv_rd32(priv, 0x100c80), type);
- }
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100c80) & 0x00ff0000)
+ break;
+ );
- nv_wr32(priv, 0x100cb8, vpgd->obj->addr >> 8);
- nv_wr32(priv, 0x100cbc, 0x80000000 | type);
+ nvkm_wr32(device, 0x100cb8, vpgd->obj->addr >> 8);
+ nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
/* wait for flush to be queued? */
- if (!nv_wait(priv, 0x100c80, 0x00008000, 0x00008000)) {
- nv_error(priv, "vm timeout 1: 0x%08x %d\n",
- nv_rd32(priv, 0x100c80), type);
- }
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100c80) & 0x00008000)
+ break;
+ );
}
- mutex_unlock(&nv_subdev(priv)->mutex);
+ mutex_unlock(&mmu->subdev.mutex);
}
static int
gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
- struct nvkm_vm **pvm)
+ struct lock_class_key *key, struct nvkm_vm **pvm)
{
- return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, pvm);
+ return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, key, pvm);
}
-static int
-gf100_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static const struct nvkm_mmu_func
+gf100_mmu = {
+ .limit = (1ULL << 40),
+ .dma_bits = 40,
+ .pgt_bits = 27 - 12,
+ .spg_shift = 12,
+ .lpg_shift = 17,
+ .create = gf100_vm_create,
+ .map_pgt = gf100_vm_map_pgt,
+ .map = gf100_vm_map,
+ .map_sg = gf100_vm_map_sg,
+ .unmap = gf100_vm_unmap,
+ .flush = gf100_vm_flush,
+};
+
+int
+gf100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
{
- struct gf100_mmu_priv *priv;
- int ret;
-
- ret = nvkm_mmu_create(parent, engine, oclass, "VM", "vm", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.limit = 1ULL << 40;
- priv->base.dma_bits = 40;
- priv->base.pgt_bits = 27 - 12;
- priv->base.spg_shift = 12;
- priv->base.lpg_shift = 17;
- priv->base.create = gf100_vm_create;
- priv->base.map_pgt = gf100_vm_map_pgt;
- priv->base.map = gf100_vm_map;
- priv->base.map_sg = gf100_vm_map_sg;
- priv->base.unmap = gf100_vm_unmap;
- priv->base.flush = gf100_vm_flush;
- return 0;
+ return nvkm_mmu_new_(&gf100_mmu, device, index, pmmu);
}
-
-struct nvkm_oclass
-gf100_mmu_oclass = {
- .handle = NV_SUBDEV(MMU, 0xc0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf100_mmu_ctor,
- .dtor = _nvkm_mmu_dtor,
- .init = _nvkm_mmu_init,
- .fini = _nvkm_mmu_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
index fe93ea2711c9..37927c3fdc3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
@@ -23,7 +23,6 @@
*/
#include "nv04.h"
-#include <core/device.h>
#include <core/gpuobj.h>
#define NV04_PDMA_SIZE (128 * 1024 * 1024)
@@ -34,30 +33,34 @@
******************************************************************************/
static void
-nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
pte = 0x00008 + (pte * 4);
+ nvkm_kmap(pgt);
while (cnt) {
u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
u32 phys = (u32)*list++;
while (cnt && page--) {
- nv_wo32(pgt, pte, phys | 3);
+ nvkm_wo32(pgt, pte, phys | 3);
phys += NV04_PDMA_PAGE;
pte += 4;
cnt -= 1;
}
}
+ nvkm_done(pgt);
}
static void
-nv04_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv04_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
{
pte = 0x00008 + (pte * 4);
+ nvkm_kmap(pgt);
while (cnt--) {
- nv_wo32(pgt, pte, 0x00000000);
+ nvkm_wo32(pgt, pte, 0x00000000);
pte += 4;
}
+ nvkm_done(pgt);
}
static void
@@ -66,86 +69,81 @@ nv04_vm_flush(struct nvkm_vm *vm)
}
/*******************************************************************************
- * VM object
- ******************************************************************************/
-
-int
-nv04_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mmstart,
- struct nvkm_vm **pvm)
-{
- return -EINVAL;
-}
-
-/*******************************************************************************
* MMU subdev
******************************************************************************/
static int
-nv04_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv04_mmu_oneinit(struct nvkm_mmu *base)
{
- struct nv04_mmu_priv *priv;
- struct nvkm_gpuobj *dma;
+ struct nv04_mmu *mmu = nv04_mmu(base);
+ struct nvkm_device *device = mmu->base.subdev.device;
+ struct nvkm_memory *dma;
int ret;
- ret = nvkm_mmu_create(parent, engine, oclass, "PCIGART",
- "pcigart", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.create = nv04_vm_create;
- priv->base.limit = NV04_PDMA_SIZE;
- priv->base.dma_bits = 32;
- priv->base.pgt_bits = 32 - 12;
- priv->base.spg_shift = 12;
- priv->base.lpg_shift = 12;
- priv->base.map_sg = nv04_vm_map_sg;
- priv->base.unmap = nv04_vm_unmap;
- priv->base.flush = nv04_vm_flush;
-
- ret = nvkm_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096,
- &priv->vm);
+ ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096, NULL,
+ &mmu->vm);
if (ret)
return ret;
- ret = nvkm_gpuobj_new(nv_object(priv), NULL,
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
(NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8,
- 16, NVOBJ_FLAG_ZERO_ALLOC,
- &priv->vm->pgt[0].obj[0]);
- dma = priv->vm->pgt[0].obj[0];
- priv->vm->pgt[0].refcount[0] = 1;
+ 16, true, &dma);
+ mmu->vm->pgt[0].mem[0] = dma;
+ mmu->vm->pgt[0].refcount[0] = 1;
if (ret)
return ret;
- nv_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
- nv_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
+ nvkm_kmap(dma);
+ nvkm_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
+ nvkm_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
+ nvkm_done(dma);
return 0;
}
-void
-nv04_mmu_dtor(struct nvkm_object *object)
+void *
+nv04_mmu_dtor(struct nvkm_mmu *base)
{
- struct nv04_mmu_priv *priv = (void *)object;
- if (priv->vm) {
- nvkm_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]);
- nvkm_vm_ref(NULL, &priv->vm, NULL);
+ struct nv04_mmu *mmu = nv04_mmu(base);
+ struct nvkm_device *device = mmu->base.subdev.device;
+ if (mmu->vm) {
+ nvkm_memory_del(&mmu->vm->pgt[0].mem[0]);
+ nvkm_vm_ref(NULL, &mmu->vm, NULL);
}
- if (priv->nullp) {
- pci_free_consistent(nv_device(priv)->pdev, 16 * 1024,
- priv->nullp, priv->null);
+ if (mmu->nullp) {
+ dma_free_coherent(device->dev, 16 * 1024,
+ mmu->nullp, mmu->null);
}
- nvkm_mmu_destroy(&priv->base);
+ return mmu;
+}
+
+int
+nv04_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
+ int index, struct nvkm_mmu **pmmu)
+{
+ struct nv04_mmu *mmu;
+ if (!(mmu = kzalloc(sizeof(*mmu), GFP_KERNEL)))
+ return -ENOMEM;
+ *pmmu = &mmu->base;
+ nvkm_mmu_ctor(func, device, index, &mmu->base);
+ return 0;
}
-struct nvkm_oclass
-nv04_mmu_oclass = {
- .handle = NV_SUBDEV(MMU, 0x04),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_mmu_ctor,
- .dtor = nv04_mmu_dtor,
- .init = _nvkm_mmu_init,
- .fini = _nvkm_mmu_fini,
- },
+const struct nvkm_mmu_func
+nv04_mmu = {
+ .oneinit = nv04_mmu_oneinit,
+ .dtor = nv04_mmu_dtor,
+ .limit = NV04_PDMA_SIZE,
+ .dma_bits = 32,
+ .pgt_bits = 32 - 12,
+ .spg_shift = 12,
+ .lpg_shift = 12,
+ .map_sg = nv04_vm_map_sg,
+ .unmap = nv04_vm_unmap,
+ .flush = nv04_vm_flush,
};
+
+int
+nv04_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ return nv04_mmu_new_(&nv04_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
index 7bf6f4b38f1d..363e33b296d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
@@ -1,19 +1,18 @@
#ifndef __NV04_MMU_PRIV__
#define __NV04_MMU_PRIV__
+#define nv04_mmu(p) container_of((p), struct nv04_mmu, base)
+#include "priv.h"
-#include <subdev/mmu.h>
-
-struct nv04_mmu_priv {
+struct nv04_mmu {
struct nvkm_mmu base;
struct nvkm_vm *vm;
dma_addr_t null;
void *nullp;
};
-static inline struct nv04_mmu_priv *
-nv04_mmu(void *obj)
-{
- return (void *)nvkm_mmu(obj);
-}
+int nv04_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
+ int index, struct nvkm_mmu **);
+void *nv04_mmu_dtor(struct nvkm_mmu *);
+extern const struct nvkm_mmu_func nv04_mmu;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
index 61ee3ab11660..c6a26f907009 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
@@ -23,7 +23,6 @@
*/
#include "nv04.h"
-#include <core/device.h>
#include <core/gpuobj.h>
#include <core/option.h>
#include <subdev/timer.h>
@@ -36,45 +35,50 @@
******************************************************************************/
static void
-nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
pte = pte * 4;
+ nvkm_kmap(pgt);
while (cnt) {
u32 page = PAGE_SIZE / NV41_GART_PAGE;
u64 phys = (u64)*list++;
while (cnt && page--) {
- nv_wo32(pgt, pte, (phys >> 7) | 1);
+ nvkm_wo32(pgt, pte, (phys >> 7) | 1);
phys += NV41_GART_PAGE;
pte += 4;
cnt -= 1;
}
}
+ nvkm_done(pgt);
}
static void
-nv41_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv41_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
{
pte = pte * 4;
+ nvkm_kmap(pgt);
while (cnt--) {
- nv_wo32(pgt, pte, 0x00000000);
+ nvkm_wo32(pgt, pte, 0x00000000);
pte += 4;
}
+ nvkm_done(pgt);
}
static void
nv41_vm_flush(struct nvkm_vm *vm)
{
- struct nv04_mmu_priv *priv = (void *)vm->mmu;
-
- mutex_lock(&nv_subdev(priv)->mutex);
- nv_wr32(priv, 0x100810, 0x00000022);
- if (!nv_wait(priv, 0x100810, 0x00000020, 0x00000020)) {
- nv_warn(priv, "flush timeout, 0x%08x\n",
- nv_rd32(priv, 0x100810));
- }
- nv_wr32(priv, 0x100810, 0x00000000);
- mutex_unlock(&nv_subdev(priv)->mutex);
+ struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
+ struct nvkm_device *device = mmu->base.subdev.device;
+
+ mutex_lock(&mmu->base.subdev.mutex);
+ nvkm_wr32(device, 0x100810, 0x00000022);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100810) & 0x00000020)
+ break;
+ );
+ nvkm_wr32(device, 0x100810, 0x00000000);
+ mutex_unlock(&mmu->base.subdev.mutex);
}
/*******************************************************************************
@@ -82,76 +86,56 @@ nv41_vm_flush(struct nvkm_vm *vm)
******************************************************************************/
static int
-nv41_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv41_mmu_oneinit(struct nvkm_mmu *base)
{
- struct nvkm_device *device = nv_device(parent);
- struct nv04_mmu_priv *priv;
+ struct nv04_mmu *mmu = nv04_mmu(base);
+ struct nvkm_device *device = mmu->base.subdev.device;
int ret;
- if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
- !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) {
- return nvkm_object_ctor(parent, engine, &nv04_mmu_oclass,
- data, size, pobject);
- }
-
- ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART",
- "pciegart", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.create = nv04_vm_create;
- priv->base.limit = NV41_GART_SIZE;
- priv->base.dma_bits = 39;
- priv->base.pgt_bits = 32 - 12;
- priv->base.spg_shift = 12;
- priv->base.lpg_shift = 12;
- priv->base.map_sg = nv41_vm_map_sg;
- priv->base.unmap = nv41_vm_unmap;
- priv->base.flush = nv41_vm_flush;
-
- ret = nvkm_vm_create(&priv->base, 0, NV41_GART_SIZE, 0, 4096,
- &priv->vm);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(nv_object(priv), NULL,
- (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC,
- &priv->vm->pgt[0].obj[0]);
- priv->vm->pgt[0].refcount[0] = 1;
+ ret = nvkm_vm_create(&mmu->base, 0, NV41_GART_SIZE, 0, 4096, NULL,
+ &mmu->vm);
if (ret)
return ret;
- return 0;
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16, true,
+ &mmu->vm->pgt[0].mem[0]);
+ mmu->vm->pgt[0].refcount[0] = 1;
+ return ret;
}
-static int
-nv41_mmu_init(struct nvkm_object *object)
+static void
+nv41_mmu_init(struct nvkm_mmu *base)
{
- struct nv04_mmu_priv *priv = (void *)object;
- struct nvkm_gpuobj *dma = priv->vm->pgt[0].obj[0];
- int ret;
-
- ret = nvkm_mmu_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x100800, dma->addr | 0x00000002);
- nv_mask(priv, 0x10008c, 0x00000100, 0x00000100);
- nv_wr32(priv, 0x100820, 0x00000000);
- return 0;
+ struct nv04_mmu *mmu = nv04_mmu(base);
+ struct nvkm_device *device = mmu->base.subdev.device;
+ struct nvkm_memory *dma = mmu->vm->pgt[0].mem[0];
+ nvkm_wr32(device, 0x100800, 0x00000002 | nvkm_memory_addr(dma));
+ nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100);
+ nvkm_wr32(device, 0x100820, 0x00000000);
}
-struct nvkm_oclass
-nv41_mmu_oclass = {
- .handle = NV_SUBDEV(MMU, 0x41),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv41_mmu_ctor,
- .dtor = nv04_mmu_dtor,
- .init = nv41_mmu_init,
- .fini = _nvkm_mmu_fini,
- },
+static const struct nvkm_mmu_func
+nv41_mmu = {
+ .dtor = nv04_mmu_dtor,
+ .oneinit = nv41_mmu_oneinit,
+ .init = nv41_mmu_init,
+ .limit = NV41_GART_SIZE,
+ .dma_bits = 39,
+ .pgt_bits = 32 - 12,
+ .spg_shift = 12,
+ .lpg_shift = 12,
+ .map_sg = nv41_vm_map_sg,
+ .unmap = nv41_vm_unmap,
+ .flush = nv41_vm_flush,
};
+
+int
+nv41_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ if (device->type == NVKM_DEVICE_AGP ||
+ !nvkm_boolopt(device->cfgopt, "NvPCIE", true))
+ return nv04_mmu_new(device, index, pmmu);
+
+ return nv04_mmu_new_(&nv41_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
index b90ded1887aa..a648c2395545 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
@@ -23,7 +23,6 @@
*/
#include "nv04.h"
-#include <core/device.h>
#include <core/gpuobj.h>
#include <core/option.h>
#include <subdev/timer.h>
@@ -36,16 +35,16 @@
******************************************************************************/
static void
-nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null,
+nv44_vm_fill(struct nvkm_memory *pgt, dma_addr_t null,
dma_addr_t *list, u32 pte, u32 cnt)
{
u32 base = (pte << 2) & ~0x0000000f;
u32 tmp[4];
- tmp[0] = nv_ro32(pgt, base + 0x0);
- tmp[1] = nv_ro32(pgt, base + 0x4);
- tmp[2] = nv_ro32(pgt, base + 0x8);
- tmp[3] = nv_ro32(pgt, base + 0xc);
+ tmp[0] = nvkm_ro32(pgt, base + 0x0);
+ tmp[1] = nvkm_ro32(pgt, base + 0x4);
+ tmp[2] = nvkm_ro32(pgt, base + 0x8);
+ tmp[3] = nvkm_ro32(pgt, base + 0xc);
while (cnt--) {
u32 addr = list ? (*list++ >> 12) : (null >> 12);
@@ -75,24 +74,25 @@ nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null,
}
}
- nv_wo32(pgt, base + 0x0, tmp[0]);
- nv_wo32(pgt, base + 0x4, tmp[1]);
- nv_wo32(pgt, base + 0x8, tmp[2]);
- nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
+ nvkm_wo32(pgt, base + 0x0, tmp[0]);
+ nvkm_wo32(pgt, base + 0x4, tmp[1]);
+ nvkm_wo32(pgt, base + 0x8, tmp[2]);
+ nvkm_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
}
static void
-nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
- struct nv04_mmu_priv *priv = (void *)vma->vm->mmu;
+ struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
u32 tmp[4];
int i;
+ nvkm_kmap(pgt);
if (pte & 3) {
u32 max = 4 - (pte & 3);
u32 part = (cnt > max) ? max : cnt;
- nv44_vm_fill(pgt, priv->null, list, pte, part);
+ nv44_vm_fill(pgt, mmu->null, list, pte, part);
pte += part;
list += part;
cnt -= part;
@@ -101,51 +101,57 @@ nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
while (cnt >= 4) {
for (i = 0; i < 4; i++)
tmp[i] = *list++ >> 12;
- nv_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27);
- nv_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22);
- nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
- nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
+ nvkm_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27);
+ nvkm_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22);
+ nvkm_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
+ nvkm_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
cnt -= 4;
}
if (cnt)
- nv44_vm_fill(pgt, priv->null, list, pte, cnt);
+ nv44_vm_fill(pgt, mmu->null, list, pte, cnt);
+ nvkm_done(pgt);
}
static void
-nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv44_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
{
- struct nv04_mmu_priv *priv = (void *)nvkm_mmu(pgt);
+ struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
+ nvkm_kmap(pgt);
if (pte & 3) {
u32 max = 4 - (pte & 3);
u32 part = (cnt > max) ? max : cnt;
- nv44_vm_fill(pgt, priv->null, NULL, pte, part);
+ nv44_vm_fill(pgt, mmu->null, NULL, pte, part);
pte += part;
cnt -= part;
}
while (cnt >= 4) {
- nv_wo32(pgt, pte++ * 4, 0x00000000);
- nv_wo32(pgt, pte++ * 4, 0x00000000);
- nv_wo32(pgt, pte++ * 4, 0x00000000);
- nv_wo32(pgt, pte++ * 4, 0x00000000);
+ nvkm_wo32(pgt, pte++ * 4, 0x00000000);
+ nvkm_wo32(pgt, pte++ * 4, 0x00000000);
+ nvkm_wo32(pgt, pte++ * 4, 0x00000000);
+ nvkm_wo32(pgt, pte++ * 4, 0x00000000);
cnt -= 4;
}
if (cnt)
- nv44_vm_fill(pgt, priv->null, NULL, pte, cnt);
+ nv44_vm_fill(pgt, mmu->null, NULL, pte, cnt);
+ nvkm_done(pgt);
}
static void
nv44_vm_flush(struct nvkm_vm *vm)
{
- struct nv04_mmu_priv *priv = (void *)vm->mmu;
- nv_wr32(priv, 0x100814, priv->base.limit - NV44_GART_PAGE);
- nv_wr32(priv, 0x100808, 0x00000020);
- if (!nv_wait(priv, 0x100808, 0x00000001, 0x00000001))
- nv_error(priv, "timeout: 0x%08x\n", nv_rd32(priv, 0x100808));
- nv_wr32(priv, 0x100808, 0x00000000);
+ struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
+ struct nvkm_device *device = mmu->base.subdev.device;
+ nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE);
+ nvkm_wr32(device, 0x100808, 0x00000020);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100808) & 0x00000001)
+ break;
+ );
+ nvkm_wr32(device, 0x100808, 0x00000000);
}
/*******************************************************************************
@@ -153,95 +159,78 @@ nv44_vm_flush(struct nvkm_vm *vm)
******************************************************************************/
static int
-nv44_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv44_mmu_oneinit(struct nvkm_mmu *base)
{
- struct nvkm_device *device = nv_device(parent);
- struct nv04_mmu_priv *priv;
+ struct nv04_mmu *mmu = nv04_mmu(base);
+ struct nvkm_device *device = mmu->base.subdev.device;
int ret;
- if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
- !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) {
- return nvkm_object_ctor(parent, engine, &nv04_mmu_oclass,
- data, size, pobject);
- }
-
- ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART",
- "pciegart", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.create = nv04_vm_create;
- priv->base.limit = NV44_GART_SIZE;
- priv->base.dma_bits = 39;
- priv->base.pgt_bits = 32 - 12;
- priv->base.spg_shift = 12;
- priv->base.lpg_shift = 12;
- priv->base.map_sg = nv44_vm_map_sg;
- priv->base.unmap = nv44_vm_unmap;
- priv->base.flush = nv44_vm_flush;
-
- priv->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &priv->null);
- if (!priv->nullp) {
- nv_error(priv, "unable to allocate dummy pages\n");
- return -ENOMEM;
+ mmu->nullp = dma_alloc_coherent(device->dev, 16 * 1024,
+ &mmu->null, GFP_KERNEL);
+ if (!mmu->nullp) {
+ nvkm_warn(&mmu->base.subdev, "unable to allocate dummy pages\n");
+ mmu->null = 0;
}
- ret = nvkm_vm_create(&priv->base, 0, NV44_GART_SIZE, 0, 4096,
- &priv->vm);
+ ret = nvkm_vm_create(&mmu->base, 0, NV44_GART_SIZE, 0, 4096, NULL,
+ &mmu->vm);
if (ret)
return ret;
- ret = nvkm_gpuobj_new(nv_object(priv), NULL,
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
(NV44_GART_SIZE / NV44_GART_PAGE) * 4,
- 512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
- &priv->vm->pgt[0].obj[0]);
- priv->vm->pgt[0].refcount[0] = 1;
- if (ret)
- return ret;
-
- return 0;
+ 512 * 1024, true,
+ &mmu->vm->pgt[0].mem[0]);
+ mmu->vm->pgt[0].refcount[0] = 1;
+ return ret;
}
-static int
-nv44_mmu_init(struct nvkm_object *object)
+static void
+nv44_mmu_init(struct nvkm_mmu *base)
{
- struct nv04_mmu_priv *priv = (void *)object;
- struct nvkm_gpuobj *gart = priv->vm->pgt[0].obj[0];
+ struct nv04_mmu *mmu = nv04_mmu(base);
+ struct nvkm_device *device = mmu->base.subdev.device;
+ struct nvkm_memory *gart = mmu->vm->pgt[0].mem[0];
u32 addr;
- int ret;
-
- ret = nvkm_mmu_init(&priv->base);
- if (ret)
- return ret;
/* calculate vram address of this PRAMIN block, object must be
* allocated on 512KiB alignment, and not exceed a total size
* of 512KiB for this to work correctly
*/
- addr = nv_rd32(priv, 0x10020c);
- addr -= ((gart->addr >> 19) + 1) << 19;
-
- nv_wr32(priv, 0x100850, 0x80000000);
- nv_wr32(priv, 0x100818, priv->null);
- nv_wr32(priv, 0x100804, NV44_GART_SIZE);
- nv_wr32(priv, 0x100850, 0x00008000);
- nv_mask(priv, 0x10008c, 0x00000200, 0x00000200);
- nv_wr32(priv, 0x100820, 0x00000000);
- nv_wr32(priv, 0x10082c, 0x00000001);
- nv_wr32(priv, 0x100800, addr | 0x00000010);
- return 0;
+ addr = nvkm_rd32(device, 0x10020c);
+ addr -= ((nvkm_memory_addr(gart) >> 19) + 1) << 19;
+
+ nvkm_wr32(device, 0x100850, 0x80000000);
+ nvkm_wr32(device, 0x100818, mmu->null);
+ nvkm_wr32(device, 0x100804, NV44_GART_SIZE);
+ nvkm_wr32(device, 0x100850, 0x00008000);
+ nvkm_mask(device, 0x10008c, 0x00000200, 0x00000200);
+ nvkm_wr32(device, 0x100820, 0x00000000);
+ nvkm_wr32(device, 0x10082c, 0x00000001);
+ nvkm_wr32(device, 0x100800, addr | 0x00000010);
}
-struct nvkm_oclass
-nv44_mmu_oclass = {
- .handle = NV_SUBDEV(MMU, 0x44),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv44_mmu_ctor,
- .dtor = nv04_mmu_dtor,
- .init = nv44_mmu_init,
- .fini = _nvkm_mmu_fini,
- },
+static const struct nvkm_mmu_func
+nv44_mmu = {
+ .dtor = nv04_mmu_dtor,
+ .oneinit = nv44_mmu_oneinit,
+ .init = nv44_mmu_init,
+ .limit = NV44_GART_SIZE,
+ .dma_bits = 39,
+ .pgt_bits = 32 - 12,
+ .spg_shift = 12,
+ .lpg_shift = 12,
+ .map_sg = nv44_vm_map_sg,
+ .unmap = nv44_vm_unmap,
+ .flush = nv44_vm_flush,
};
+
+int
+nv44_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ if (device->type == NVKM_DEVICE_AGP ||
+ !nvkm_boolopt(device->cfgopt, "NvPCIE", true))
+ return nv04_mmu_new(device, index, pmmu);
+
+ return nv04_mmu_new_(&nv44_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
index b83550fa7f96..a1f8d65f0276 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
@@ -21,31 +21,28 @@
*
* Authors: Ben Skeggs
*/
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
-#include <subdev/fb.h>
-#include <subdev/timer.h>
+#include "priv.h"
-#include <core/engine.h>
#include <core/gpuobj.h>
-
-struct nv50_mmu_priv {
- struct nvkm_mmu base;
-};
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <engine/gr.h>
static void
-nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_gpuobj *pgt[2])
+nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_memory *pgt[2])
{
u64 phys = 0xdeadcafe00000000ULL;
u32 coverage = 0;
if (pgt[0]) {
- phys = 0x00000003 | pgt[0]->addr; /* present, 4KiB pages */
- coverage = (pgt[0]->size >> 3) << 12;
+ /* present, 4KiB pages */
+ phys = 0x00000003 | nvkm_memory_addr(pgt[0]);
+ coverage = (nvkm_memory_size(pgt[0]) >> 3) << 12;
} else
if (pgt[1]) {
- phys = 0x00000001 | pgt[1]->addr; /* present */
- coverage = (pgt[1]->size >> 3) << 16;
+ /* present, 64KiB pages */
+ phys = 0x00000001 | nvkm_memory_addr(pgt[1]);
+ coverage = (nvkm_memory_size(pgt[1]) >> 3) << 16;
}
if (phys & 1) {
@@ -57,8 +54,10 @@ nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_gpuobj *pgt[2])
phys |= 0x20;
}
- nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
- nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
+ nvkm_kmap(pgd);
+ nvkm_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
+ nvkm_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
+ nvkm_done(pgd);
}
static inline u64
@@ -75,17 +74,18 @@ vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
}
static void
-nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv50_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
{
+ struct nvkm_ram *ram = vma->vm->mmu->subdev.device->fb->ram;
u32 comp = (mem->memtype & 0x180) >> 7;
u32 block, target;
int i;
/* IGPs don't have real VRAM, re-target to stolen system memory */
target = 0;
- if (nvkm_fb(vma->vm->mmu)->ram->stolen) {
- phys += nvkm_fb(vma->vm->mmu)->ram->stolen;
+ if (ram->stolen) {
+ phys += ram->stolen;
target = 3;
}
@@ -93,6 +93,7 @@ nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
pte <<= 3;
cnt <<= 3;
+ nvkm_kmap(pgt);
while (cnt) {
u32 offset_h = upper_32_bits(phys);
u32 offset_l = lower_32_bits(phys);
@@ -113,129 +114,118 @@ nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
}
while (block) {
- nv_wo32(pgt, pte + 0, offset_l);
- nv_wo32(pgt, pte + 4, offset_h);
+ nvkm_wo32(pgt, pte + 0, offset_l);
+ nvkm_wo32(pgt, pte + 4, offset_h);
pte += 8;
block -= 8;
}
}
+ nvkm_done(pgt);
}
static void
-nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
pte <<= 3;
+ nvkm_kmap(pgt);
while (cnt--) {
u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target);
- nv_wo32(pgt, pte + 0, lower_32_bits(phys));
- nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+ nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
+ nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
pte += 8;
}
+ nvkm_done(pgt);
}
static void
-nv50_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv50_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
{
pte <<= 3;
+ nvkm_kmap(pgt);
while (cnt--) {
- nv_wo32(pgt, pte + 0, 0x00000000);
- nv_wo32(pgt, pte + 4, 0x00000000);
+ nvkm_wo32(pgt, pte + 0, 0x00000000);
+ nvkm_wo32(pgt, pte + 4, 0x00000000);
pte += 8;
}
+ nvkm_done(pgt);
}
static void
nv50_vm_flush(struct nvkm_vm *vm)
{
- struct nv50_mmu_priv *priv = (void *)vm->mmu;
- struct nvkm_bar *bar = nvkm_bar(priv);
- struct nvkm_engine *engine;
+ struct nvkm_mmu *mmu = vm->mmu;
+ struct nvkm_subdev *subdev = &mmu->subdev;
+ struct nvkm_device *device = subdev->device;
int i, vme;
- bar->flush(bar);
-
- mutex_lock(&nv_subdev(priv)->mutex);
- for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
+ mutex_lock(&subdev->mutex);
+ for (i = 0; i < NVKM_SUBDEV_NR; i++) {
if (!atomic_read(&vm->engref[i]))
continue;
/* unfortunate hw bug workaround... */
- engine = nvkm_engine(priv, i);
- if (engine && engine->tlb_flush) {
- engine->tlb_flush(engine);
- continue;
+ if (i == NVKM_ENGINE_GR && device->gr) {
+ int ret = nvkm_gr_tlb_flush(device->gr);
+ if (ret != -ENODEV)
+ continue;
}
switch (i) {
- case NVDEV_ENGINE_GR : vme = 0x00; break;
- case NVDEV_ENGINE_VP :
- case NVDEV_ENGINE_MSPDEC: vme = 0x01; break;
- case NVDEV_SUBDEV_BAR : vme = 0x06; break;
- case NVDEV_ENGINE_MSPPP :
- case NVDEV_ENGINE_MPEG : vme = 0x08; break;
- case NVDEV_ENGINE_BSP :
- case NVDEV_ENGINE_MSVLD : vme = 0x09; break;
- case NVDEV_ENGINE_CIPHER:
- case NVDEV_ENGINE_SEC : vme = 0x0a; break;
- case NVDEV_ENGINE_CE0 : vme = 0x0d; break;
+ case NVKM_ENGINE_GR : vme = 0x00; break;
+ case NVKM_ENGINE_VP :
+ case NVKM_ENGINE_MSPDEC: vme = 0x01; break;
+ case NVKM_SUBDEV_BAR : vme = 0x06; break;
+ case NVKM_ENGINE_MSPPP :
+ case NVKM_ENGINE_MPEG : vme = 0x08; break;
+ case NVKM_ENGINE_BSP :
+ case NVKM_ENGINE_MSVLD : vme = 0x09; break;
+ case NVKM_ENGINE_CIPHER:
+ case NVKM_ENGINE_SEC : vme = 0x0a; break;
+ case NVKM_ENGINE_CE0 : vme = 0x0d; break;
default:
continue;
}
- nv_wr32(priv, 0x100c80, (vme << 16) | 1);
- if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000))
- nv_error(priv, "vm flush timeout: engine %d\n", vme);
+ nvkm_wr32(device, 0x100c80, (vme << 16) | 1);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
+ break;
+ ) < 0)
+ nvkm_error(subdev, "vm flush timeout: engine %d\n", vme);
}
- mutex_unlock(&nv_subdev(priv)->mutex);
+ mutex_unlock(&subdev->mutex);
}
static int
-nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length,
- u64 mm_offset, struct nvkm_vm **pvm)
+nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
+ struct lock_class_key *key, struct nvkm_vm **pvm)
{
- u32 block = (1 << (mmu->pgt_bits + 12));
+ u32 block = (1 << (mmu->func->pgt_bits + 12));
if (block > length)
block = length;
- return nvkm_vm_create(mmu, offset, length, mm_offset, block, pvm);
+ return nvkm_vm_create(mmu, offset, length, mm_offset, block, key, pvm);
}
-static int
-nv50_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static const struct nvkm_mmu_func
+nv50_mmu = {
+ .limit = (1ULL << 40),
+ .dma_bits = 40,
+ .pgt_bits = 29 - 12,
+ .spg_shift = 12,
+ .lpg_shift = 16,
+ .create = nv50_vm_create,
+ .map_pgt = nv50_vm_map_pgt,
+ .map = nv50_vm_map,
+ .map_sg = nv50_vm_map_sg,
+ .unmap = nv50_vm_unmap,
+ .flush = nv50_vm_flush,
+};
+
+int
+nv50_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
{
- struct nv50_mmu_priv *priv;
- int ret;
-
- ret = nvkm_mmu_create(parent, engine, oclass, "VM", "vm", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.limit = 1ULL << 40;
- priv->base.dma_bits = 40;
- priv->base.pgt_bits = 29 - 12;
- priv->base.spg_shift = 12;
- priv->base.lpg_shift = 16;
- priv->base.create = nv50_vm_create;
- priv->base.map_pgt = nv50_vm_map_pgt;
- priv->base.map = nv50_vm_map;
- priv->base.map_sg = nv50_vm_map_sg;
- priv->base.unmap = nv50_vm_unmap;
- priv->base.flush = nv50_vm_flush;
- return 0;
+ return nvkm_mmu_new_(&nv50_mmu, device, index, pmmu);
}
-
-struct nvkm_oclass
-nv50_mmu_oclass = {
- .handle = NV_SUBDEV(MMU, 0x50),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_mmu_ctor,
- .dtor = _nvkm_mmu_dtor,
- .init = _nvkm_mmu_init,
- .fini = _nvkm_mmu_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
new file mode 100644
index 000000000000..27cedc60b507
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -0,0 +1,39 @@
+#ifndef __NVKM_MMU_PRIV_H__
+#define __NVKM_MMU_PRIV_H__
+#define nvkm_mmu(p) container_of((p), struct nvkm_mmu, subdev)
+#include <subdev/mmu.h>
+
+void nvkm_mmu_ctor(const struct nvkm_mmu_func *, struct nvkm_device *,
+ int index, struct nvkm_mmu *);
+int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
+ int index, struct nvkm_mmu **);
+
+struct nvkm_mmu_func {
+ void *(*dtor)(struct nvkm_mmu *);
+ int (*oneinit)(struct nvkm_mmu *);
+ void (*init)(struct nvkm_mmu *);
+
+ u64 limit;
+ u8 dma_bits;
+ u32 pgt_bits;
+ u8 spg_shift;
+ u8 lpg_shift;
+
+ int (*create)(struct nvkm_mmu *, u64 offset, u64 length, u64 mm_offset,
+ struct lock_class_key *, struct nvkm_vm **);
+
+ void (*map_pgt)(struct nvkm_gpuobj *pgd, u32 pde,
+ struct nvkm_memory *pgt[2]);
+ void (*map)(struct nvkm_vma *, struct nvkm_memory *,
+ struct nvkm_mem *, u32 pte, u32 cnt,
+ u64 phys, u64 delta);
+ void (*map_sg)(struct nvkm_vma *, struct nvkm_memory *,
+ struct nvkm_mem *, u32 pte, u32 cnt, dma_addr_t *);
+ void (*unmap)(struct nvkm_vma *, struct nvkm_memory *pgt,
+ u32 pte, u32 cnt);
+ void (*flush)(struct nvkm_vm *);
+};
+
+int nvkm_vm_create(struct nvkm_mmu *, u64, u64, u64, u32,
+ struct lock_class_key *, struct nvkm_vm **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
index 0ca9dcabb6d3..9700a7625012 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
@@ -23,14 +23,13 @@
*/
#include "mxms.h"
-#include <core/device.h>
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/mxm.h>
#include <subdev/i2c.h>
static bool
-mxm_shadow_rom_fetch(struct nvkm_i2c_port *i2c, u8 addr,
+mxm_shadow_rom_fetch(struct nvkm_i2c_bus *bus, u8 addr,
u8 offset, u8 size, u8 *data)
{
struct i2c_msg msgs[] = {
@@ -38,27 +37,28 @@ mxm_shadow_rom_fetch(struct nvkm_i2c_port *i2c, u8 addr,
{ .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
};
- return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
+ return i2c_transfer(&bus->i2c, msgs, 2) == 2;
}
static bool
mxm_shadow_rom(struct nvkm_mxm *mxm, u8 version)
{
- struct nvkm_bios *bios = nvkm_bios(mxm);
- struct nvkm_i2c *i2c = nvkm_i2c(mxm);
- struct nvkm_i2c_port *port = NULL;
+ struct nvkm_device *device = mxm->subdev.device;
+ struct nvkm_bios *bios = device->bios;
+ struct nvkm_i2c *i2c = device->i2c;
+ struct nvkm_i2c_bus *bus = NULL;
u8 i2cidx, mxms[6], addr, size;
i2cidx = mxm_ddc_map(bios, 1 /* LVDS_DDC */) & 0x0f;
if (i2cidx < 0x0f)
- port = i2c->find(i2c, i2cidx);
- if (!port)
+ bus = nvkm_i2c_bus_find(i2c, i2cidx);
+ if (!bus)
return false;
addr = 0x54;
- if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms)) {
+ if (!mxm_shadow_rom_fetch(bus, addr, 0, 6, mxms)) {
addr = 0x56;
- if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms))
+ if (!mxm_shadow_rom_fetch(bus, addr, 0, 6, mxms))
return false;
}
@@ -67,7 +67,7 @@ mxm_shadow_rom(struct nvkm_mxm *mxm, u8 version)
mxm->mxms = kmalloc(size, GFP_KERNEL);
if (mxm->mxms &&
- mxm_shadow_rom_fetch(port, addr, 0, size, mxm->mxms))
+ mxm_shadow_rom_fetch(bus, addr, 0, size, mxm->mxms))
return true;
kfree(mxm->mxms);
@@ -79,7 +79,8 @@ mxm_shadow_rom(struct nvkm_mxm *mxm, u8 version)
static bool
mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
{
- struct nvkm_device *device = nv_device(mxm);
+ struct nvkm_subdev *subdev = &mxm->subdev;
+ struct nvkm_device *device = subdev->device;
static char muid[] = {
0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
@@ -94,7 +95,7 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
acpi_handle handle;
int rev;
- handle = ACPI_HANDLE(nv_device_base(device));
+ handle = ACPI_HANDLE(device->dev);
if (!handle)
return false;
@@ -106,7 +107,7 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
rev = (version & 0xf0) << 4 | (version & 0x0f);
obj = acpi_evaluate_dsm(handle, muid, rev, 0x00000010, &argv4);
if (!obj) {
- nv_debug(mxm, "DSM MXMS failed\n");
+ nvkm_debug(subdev, "DSM MXMS failed\n");
return false;
}
@@ -114,7 +115,8 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
mxm->mxms = kmemdup(obj->buffer.pointer,
obj->buffer.length, GFP_KERNEL);
} else if (obj->type == ACPI_TYPE_INTEGER) {
- nv_debug(mxm, "DSM MXMS returned 0x%llx\n", obj->integer.value);
+ nvkm_debug(subdev, "DSM MXMS returned 0x%llx\n",
+ obj->integer.value);
}
ACPI_FREE(obj);
@@ -129,6 +131,7 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
static u8
wmi_wmmx_mxmi(struct nvkm_mxm *mxm, u8 version)
{
+ struct nvkm_subdev *subdev = &mxm->subdev;
u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 };
struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args };
struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -137,18 +140,18 @@ wmi_wmmx_mxmi(struct nvkm_mxm *mxm, u8 version)
status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
if (ACPI_FAILURE(status)) {
- nv_debug(mxm, "WMMX MXMI returned %d\n", status);
+ nvkm_debug(subdev, "WMMX MXMI returned %d\n", status);
return 0x00;
}
obj = retn.pointer;
if (obj->type == ACPI_TYPE_INTEGER) {
version = obj->integer.value;
- nv_debug(mxm, "WMMX MXMI version %d.%d\n",
- (version >> 4), version & 0x0f);
+ nvkm_debug(subdev, "WMMX MXMI version %d.%d\n",
+ (version >> 4), version & 0x0f);
} else {
version = 0;
- nv_debug(mxm, "WMMX MXMI returned non-integer\n");
+ nvkm_debug(subdev, "WMMX MXMI returned non-integer\n");
}
kfree(obj);
@@ -158,6 +161,7 @@ wmi_wmmx_mxmi(struct nvkm_mxm *mxm, u8 version)
static bool
mxm_shadow_wmi(struct nvkm_mxm *mxm, u8 version)
{
+ struct nvkm_subdev *subdev = &mxm->subdev;
u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -165,7 +169,7 @@ mxm_shadow_wmi(struct nvkm_mxm *mxm, u8 version)
acpi_status status;
if (!wmi_has_guid(WMI_WMMX_GUID)) {
- nv_debug(mxm, "WMMX GUID not found\n");
+ nvkm_debug(subdev, "WMMX GUID not found\n");
return false;
}
@@ -177,7 +181,7 @@ mxm_shadow_wmi(struct nvkm_mxm *mxm, u8 version)
status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
if (ACPI_FAILURE(status)) {
- nv_debug(mxm, "WMMX MXMS returned %d\n", status);
+ nvkm_debug(subdev, "WMMX MXMS returned %d\n", status);
return false;
}
@@ -211,7 +215,7 @@ mxm_shadow(struct nvkm_mxm *mxm, u8 version)
{
struct mxm_shadow_h *shadow = _mxm_shadow;
do {
- nv_debug(mxm, "checking %s\n", shadow->name);
+ nvkm_debug(&mxm->subdev, "checking %s\n", shadow->name);
if (shadow->exec(mxm, version)) {
if (mxms_valid(mxm))
return 0;
@@ -222,33 +226,33 @@ mxm_shadow(struct nvkm_mxm *mxm, u8 version)
return -ENOENT;
}
+static const struct nvkm_subdev_func
+nvkm_mxm = {
+};
+
int
-nvkm_mxm_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_mxm_new_(struct nvkm_device *device, int index, struct nvkm_mxm **pmxm)
{
- struct nvkm_device *device = nv_device(parent);
- struct nvkm_bios *bios = nvkm_bios(device);
+ struct nvkm_bios *bios = device->bios;
struct nvkm_mxm *mxm;
u8 ver, len;
u16 data;
- int ret;
- ret = nvkm_subdev_create_(parent, engine, oclass, 0, "MXM", "mxm",
- length, pobject);
- mxm = *pobject;
- if (ret)
- return ret;
+ if (!(mxm = *pmxm = kzalloc(sizeof(*mxm), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_subdev_ctor(&nvkm_mxm, device, index, 0, &mxm->subdev);
data = mxm_table(bios, &ver, &len);
- if (!data || !(ver = nv_ro08(bios, data))) {
- nv_debug(mxm, "no VBIOS data, nothing to do\n");
+ if (!data || !(ver = nvbios_rd08(bios, data))) {
+ nvkm_debug(&mxm->subdev, "no VBIOS data, nothing to do\n");
return 0;
}
- nv_info(mxm, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f);
+ nvkm_info(&mxm->subdev, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f);
if (mxm_shadow(mxm, ver)) {
- nv_info(mxm, "failed to locate valid SIS\n");
+ nvkm_warn(&mxm->subdev, "failed to locate valid SIS\n");
#if 0
/* we should, perhaps, fall back to some kind of limited
* mode here if the x86 vbios hasn't already done the
@@ -261,8 +265,8 @@ nvkm_mxm_create_(struct nvkm_object *parent, struct nvkm_object *engine,
#endif
}
- nv_info(mxm, "MXMS Version %d.%d\n",
- mxms_version(mxm) >> 8, mxms_version(mxm) & 0xff);
+ nvkm_debug(&mxm->subdev, "MXMS Version %d.%d\n",
+ mxms_version(mxm) >> 8, mxms_version(mxm) & 0xff);
mxms_foreach(mxm, 0, NULL, NULL);
if (nvkm_boolopt(device->cfgopt, "NvMXMDCB", true))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
index a9b1d63fed58..45a2f8e784f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
@@ -47,7 +47,7 @@ mxms_version(struct nvkm_mxm *mxm)
break;
}
- nv_debug(mxm, "unknown version %d.%d\n", mxms[4], mxms[5]);
+ nvkm_debug(&mxm->subdev, "unknown version %d.%d\n", mxms[4], mxms[5]);
return 0x0000;
}
@@ -71,7 +71,7 @@ mxms_checksum(struct nvkm_mxm *mxm)
while (size--)
sum += *mxms++;
if (sum) {
- nv_debug(mxm, "checksum invalid\n");
+ nvkm_debug(&mxm->subdev, "checksum invalid\n");
return false;
}
return true;
@@ -82,7 +82,7 @@ mxms_valid(struct nvkm_mxm *mxm)
{
u8 *mxms = mxms_data(mxm);
if (*(u32 *)mxms != 0x5f4d584d) {
- nv_debug(mxm, "signature invalid\n");
+ nvkm_debug(&mxm->subdev, "signature invalid\n");
return false;
}
@@ -96,6 +96,7 @@ bool
mxms_foreach(struct nvkm_mxm *mxm, u8 types,
bool (*exec)(struct nvkm_mxm *, u8 *, void *), void *info)
{
+ struct nvkm_subdev *subdev = &mxm->subdev;
u8 *mxms = mxms_data(mxm);
u8 *desc = mxms + mxms_headerlen(mxm);
u8 *fini = desc + mxms_structlen(mxm) - 1;
@@ -140,29 +141,28 @@ mxms_foreach(struct nvkm_mxm *mxm, u8 types,
entries = desc[1] & 0x07;
break;
default:
- nv_debug(mxm, "unknown descriptor type %d\n", type);
+ nvkm_debug(subdev, "unknown descriptor type %d\n", type);
return false;
}
- if (nv_subdev(mxm)->debug >= NV_DBG_DEBUG && (exec == NULL)) {
- static const char * mxms_desc_name[] = {
+ if (mxm->subdev.debug >= NV_DBG_DEBUG && (exec == NULL)) {
+ static const char * mxms_desc[] = {
"ODS", "SCCS", "TS", "IPS",
"GSD", "VSS", "BCS", "FCS",
};
u8 *dump = desc;
+ char data[32], *ptr;
int i, j;
- nv_debug(mxm, "%4s: ", mxms_desc_name[type]);
- for (j = headerlen - 1; j >= 0; j--)
- pr_cont("%02x", dump[j]);
- pr_cont("\n");
+ for (j = headerlen - 1, ptr = data; j >= 0; j--)
+ ptr += sprintf(ptr, "%02x", dump[j]);
dump += headerlen;
+ nvkm_debug(subdev, "%4s: %s\n", mxms_desc[type], data);
for (i = 0; i < entries; i++, dump += recordlen) {
- nv_debug(mxm, " ");
- for (j = recordlen - 1; j >= 0; j--)
- pr_cont("%02x", dump[j]);
- pr_cont("\n");
+ for (j = recordlen - 1, ptr = data; j >= 0; j--)
+ ptr += sprintf(ptr, "%02x", dump[j]);
+ nvkm_debug(subdev, " %s\n", data);
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
index 4ef804012d06..333e0c01545a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
@@ -1,6 +1,6 @@
#ifndef __NVMXM_MXMS_H__
#define __NVMXM_MXMS_H__
-#include <subdev/mxm.h>
+#include "priv.h"
struct mxms_odev {
u8 outp_type;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
index f20e4ca87e17..db14fad2ddfc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
@@ -28,10 +28,6 @@
#include <subdev/bios/dcb.h>
#include <subdev/bios/mxm.h>
-struct nv50_mxm_priv {
- struct nvkm_mxm base;
-};
-
struct context {
u32 *outp;
struct mxms_odev desc;
@@ -53,7 +49,7 @@ mxm_match_tmds_partner(struct nvkm_mxm *mxm, u8 *data, void *info)
static bool
mxm_match_dcb(struct nvkm_mxm *mxm, u8 *data, void *info)
{
- struct nvkm_bios *bios = nvkm_bios(mxm);
+ struct nvkm_bios *bios = mxm->subdev.device->bios;
struct context *ctx = info;
u64 desc = *(u64 *)data;
@@ -107,8 +103,8 @@ mxm_dcb_sanitise_entry(struct nvkm_bios *bios, void *data, int idx, u16 pdcb)
* if one isn't found, disable it.
*/
if (mxms_foreach(mxm, 0x01, mxm_match_dcb, &ctx)) {
- nv_debug(mxm, "disable %d: 0x%08x 0x%08x\n",
- idx, ctx.outp[0], ctx.outp[1]);
+ nvkm_debug(&mxm->subdev, "disable %d: %08x %08x\n",
+ idx, ctx.outp[0], ctx.outp[1]);
ctx.outp[0] |= 0x0000000f;
return 0;
}
@@ -180,20 +176,22 @@ mxm_dcb_sanitise_entry(struct nvkm_bios *bios, void *data, int idx, u16 pdcb)
static bool
mxm_show_unmatched(struct nvkm_mxm *mxm, u8 *data, void *info)
{
+ struct nvkm_subdev *subdev = &mxm->subdev;
u64 desc = *(u64 *)data;
if ((desc & 0xf0) != 0xf0)
- nv_info(mxm, "unmatched output device 0x%016llx\n", desc);
+ nvkm_info(subdev, "unmatched output device %016llx\n", desc);
return true;
}
static void
mxm_dcb_sanitise(struct nvkm_mxm *mxm)
{
- struct nvkm_bios *bios = nvkm_bios(mxm);
+ struct nvkm_subdev *subdev = &mxm->subdev;
+ struct nvkm_bios *bios = subdev->device->bios;
u8 ver, hdr, cnt, len;
u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
if (dcb == 0x0000 || ver != 0x40) {
- nv_debug(mxm, "unsupported DCB version\n");
+ nvkm_debug(subdev, "unsupported DCB version\n");
return;
}
@@ -201,31 +199,20 @@ mxm_dcb_sanitise(struct nvkm_mxm *mxm)
mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL);
}
-static int
-nv50_mxm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv50_mxm_new(struct nvkm_device *device, int index, struct nvkm_subdev **pmxm)
{
- struct nv50_mxm_priv *priv;
+ struct nvkm_mxm *mxm;
int ret;
- ret = nvkm_mxm_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
+ ret = nvkm_mxm_new_(device, index, &mxm);
+ if (mxm)
+ *pmxm = &mxm->subdev;
if (ret)
return ret;
- if (priv->base.action & MXM_SANITISE_DCB)
- mxm_dcb_sanitise(&priv->base);
+ if (mxm->action & MXM_SANITISE_DCB)
+ mxm_dcb_sanitise(mxm);
+
return 0;
}
-
-struct nvkm_oclass
-nv50_mxm_oclass = {
- .handle = NV_SUBDEV(MXM, 0x50),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_mxm_ctor,
- .dtor = _nvkm_mxm_dtor,
- .init = _nvkm_mxm_init,
- .fini = _nvkm_mxm_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
new file mode 100644
index 000000000000..7d970157aed1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
@@ -0,0 +1,15 @@
+#ifndef __NVKM_MXM_PRIV_H__
+#define __NVKM_MXM_PRIV_H__
+#define nvkm_mxm(p) container_of((p), struct nvkm_mxm, subdev)
+#include <subdev/mxm.h>
+
+#define MXM_SANITISE_DCB 0x00000001
+
+struct nvkm_mxm {
+ struct nvkm_subdev subdev;
+ u32 action;
+ u8 *mxms;
+};
+
+int nvkm_mxm_new_(struct nvkm_device *, int index, struct nvkm_mxm **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
new file mode 100644
index 000000000000..99672c3d0bad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
@@ -0,0 +1,7 @@
+nvkm-y += nvkm/subdev/pci/agp.o
+nvkm-y += nvkm/subdev/pci/base.o
+nvkm-y += nvkm/subdev/pci/nv04.o
+nvkm-y += nvkm/subdev/pci/nv40.o
+nvkm-y += nvkm/subdev/pci/nv4c.o
+nvkm-y += nvkm/subdev/pci/nv50.o
+nvkm-y += nvkm/subdev/pci/gf100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c
new file mode 100644
index 000000000000..814cb51cc873
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2015 Nouveau Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "agp.h"
+#ifdef __NVKM_PCI_AGP_H__
+#include <core/option.h>
+
+struct nvkm_device_agp_quirk {
+ u16 hostbridge_vendor;
+ u16 hostbridge_device;
+ u16 chip_vendor;
+ u16 chip_device;
+ int mode;
+};
+
+static const struct nvkm_device_agp_quirk
+nvkm_device_agp_quirks[] = {
+ /* VIA Apollo PRO133x / GeForce FX 5600 Ultra - fdo#20341 */
+ { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 },
+ {},
+};
+
+void
+nvkm_agp_fini(struct nvkm_pci *pci)
+{
+ if (pci->agp.acquired) {
+ agp_backend_release(pci->agp.bridge);
+ pci->agp.acquired = false;
+ }
+}
+
+/* Ensure AGP controller is in a consistent state in case we need to
+ * execute the VBIOS DEVINIT scripts.
+ */
+void
+nvkm_agp_preinit(struct nvkm_pci *pci)
+{
+ struct nvkm_device *device = pci->subdev.device;
+ u32 mode = nvkm_pci_rd32(pci, 0x004c);
+ u32 save[2];
+
+ /* First of all, disable fast writes, otherwise if it's already
+ * enabled in the AGP bridge and we disable the card's AGP
+ * controller we might be locking ourselves out of it.
+ */
+ if ((mode | pci->agp.mode) & PCI_AGP_COMMAND_FW) {
+ mode = pci->agp.mode & ~PCI_AGP_COMMAND_FW;
+ agp_enable(pci->agp.bridge, mode);
+ }
+
+ /* clear busmaster bit, and disable AGP */
+ save[0] = nvkm_pci_rd32(pci, 0x0004);
+ nvkm_pci_wr32(pci, 0x0004, save[0] & ~0x00000004);
+ nvkm_pci_wr32(pci, 0x004c, 0x00000000);
+
+ /* reset PGRAPH, PFIFO and PTIMER */
+ save[1] = nvkm_mask(device, 0x000200, 0x00011100, 0x00000000);
+ nvkm_mask(device, 0x000200, 0x00011100, save[1]);
+
+ /* and restore busmaster bit (gives effect of resetting AGP) */
+ nvkm_pci_wr32(pci, 0x0004, save[0]);
+}
+
+int
+nvkm_agp_init(struct nvkm_pci *pci)
+{
+ if (!agp_backend_acquire(pci->pdev)) {
+ nvkm_error(&pci->subdev, "failed to acquire agp\n");
+ return -ENODEV;
+ }
+
+ agp_enable(pci->agp.bridge, pci->agp.mode);
+ pci->agp.acquired = true;
+ return 0;
+}
+
+void
+nvkm_agp_dtor(struct nvkm_pci *pci)
+{
+ arch_phys_wc_del(pci->agp.mtrr);
+}
+
+void
+nvkm_agp_ctor(struct nvkm_pci *pci)
+{
+ const struct nvkm_device_agp_quirk *quirk = nvkm_device_agp_quirks;
+ struct nvkm_subdev *subdev = &pci->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct agp_kern_info info;
+ int mode = -1;
+
+#ifdef __powerpc__
+ /* Disable AGP by default on all PowerPC machines for now -- At
+ * least some UniNorth-2 AGP bridges are known to be broken:
+ * DMA from the host to the card works just fine, but writeback
+ * from the card to the host goes straight to memory
+ * untranslated bypassing that GATT somehow, making them quite
+ * painful to deal with...
+ */
+ mode = 0;
+#endif
+ mode = nvkm_longopt(device->cfgopt, "NvAGP", mode);
+
+ /* acquire bridge temporarily, so that we can copy its info */
+ if (!(pci->agp.bridge = agp_backend_acquire(pci->pdev))) {
+ nvkm_warn(subdev, "failed to acquire agp\n");
+ return;
+ }
+ agp_copy_info(pci->agp.bridge, &info);
+ agp_backend_release(pci->agp.bridge);
+
+ pci->agp.mode = info.mode;
+ pci->agp.base = info.aper_base;
+ pci->agp.size = info.aper_size * 1024 * 1024;
+ pci->agp.cma = info.cant_use_aperture;
+ pci->agp.mtrr = -1;
+
+ /* determine if bridge + chipset combination needs a workaround */
+ while (quirk->hostbridge_vendor) {
+ if (info.device->vendor == quirk->hostbridge_vendor &&
+ info.device->device == quirk->hostbridge_device &&
+ pci->pdev->vendor == quirk->chip_vendor &&
+ pci->pdev->device == quirk->chip_device) {
+ nvkm_info(subdev, "forcing default agp mode to %dX, "
+ "use NvAGP=<mode> to override\n",
+ quirk->mode);
+ mode = quirk->mode;
+ break;
+ }
+ quirk++;
+ }
+
+ /* apply quirk / user-specified mode */
+ if (mode >= 1) {
+ if (pci->agp.mode & 0x00000008)
+ mode /= 4; /* AGPv3 */
+ pci->agp.mode &= ~0x00000007;
+ pci->agp.mode |= (mode & 0x7);
+ } else
+ if (mode == 0) {
+ pci->agp.bridge = NULL;
+ return;
+ }
+
+ /* fast writes appear to be broken on nv18, they make the card
+ * lock up randomly.
+ */
+ if (device->chipset == 0x18)
+ pci->agp.mode &= ~PCI_AGP_COMMAND_FW;
+
+ pci->agp.mtrr = arch_phys_wc_add(pci->agp.base, pci->agp.size);
+}
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h
new file mode 100644
index 000000000000..df2dd08363ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h
@@ -0,0 +1,18 @@
+#include "priv.h"
+#if defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))
+#ifndef __NVKM_PCI_AGP_H__
+#define __NVKM_PCI_AGP_H__
+
+void nvkm_agp_ctor(struct nvkm_pci *);
+void nvkm_agp_dtor(struct nvkm_pci *);
+void nvkm_agp_preinit(struct nvkm_pci *);
+int nvkm_agp_init(struct nvkm_pci *);
+void nvkm_agp_fini(struct nvkm_pci *);
+#endif
+#else
+static inline void nvkm_agp_ctor(struct nvkm_pci *pci) {}
+static inline void nvkm_agp_dtor(struct nvkm_pci *pci) {}
+static inline void nvkm_agp_preinit(struct nvkm_pci *pci) {}
+static inline int nvkm_agp_init(struct nvkm_pci *pci) { return -ENOSYS; }
+static inline void nvkm_agp_fini(struct nvkm_pci *pci) {}
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
new file mode 100644
index 000000000000..d1c148e51922
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+#include "agp.h"
+
+#include <core/option.h>
+#include <core/pci.h>
+#include <subdev/mc.h>
+
+u32
+nvkm_pci_rd32(struct nvkm_pci *pci, u16 addr)
+{
+ return pci->func->rd32(pci, addr);
+}
+
+void
+nvkm_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
+{
+ pci->func->wr08(pci, addr, data);
+}
+
+void
+nvkm_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
+{
+ pci->func->wr32(pci, addr, data);
+}
+
+void
+nvkm_pci_rom_shadow(struct nvkm_pci *pci, bool shadow)
+{
+ u32 data = nvkm_pci_rd32(pci, 0x0050);
+ if (shadow)
+ data |= 0x00000001;
+ else
+ data &= ~0x00000001;
+ nvkm_pci_wr32(pci, 0x0050, data);
+}
+
+static irqreturn_t
+nvkm_pci_intr(int irq, void *arg)
+{
+ struct nvkm_pci *pci = arg;
+ struct nvkm_mc *mc = pci->subdev.device->mc;
+ bool handled = false;
+ if (likely(mc)) {
+ nvkm_mc_intr_unarm(mc);
+ if (pci->msi)
+ pci->func->msi_rearm(pci);
+ nvkm_mc_intr(mc, &handled);
+ nvkm_mc_intr_rearm(mc);
+ }
+ return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int
+nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+ struct nvkm_pci *pci = nvkm_pci(subdev);
+
+ if (pci->irq >= 0) {
+ free_irq(pci->irq, pci);
+ pci->irq = -1;
+ };
+
+ if (pci->agp.bridge)
+ nvkm_agp_fini(pci);
+
+ return 0;
+}
+
+static int
+nvkm_pci_preinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_pci *pci = nvkm_pci(subdev);
+ if (pci->agp.bridge)
+ nvkm_agp_preinit(pci);
+ return 0;
+}
+
+static int
+nvkm_pci_init(struct nvkm_subdev *subdev)
+{
+ struct nvkm_pci *pci = nvkm_pci(subdev);
+ struct pci_dev *pdev = pci->pdev;
+ int ret;
+
+ if (pci->agp.bridge) {
+ ret = nvkm_agp_init(pci);
+ if (ret)
+ return ret;
+ }
+
+ ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
+ if (ret)
+ return ret;
+
+ pci->irq = pdev->irq;
+ return ret;
+}
+
+static void *
+nvkm_pci_dtor(struct nvkm_subdev *subdev)
+{
+ struct nvkm_pci *pci = nvkm_pci(subdev);
+ nvkm_agp_dtor(pci);
+ if (pci->msi)
+ pci_disable_msi(pci->pdev);
+ return nvkm_pci(subdev);
+}
+
+static const struct nvkm_subdev_func
+nvkm_pci_func = {
+ .dtor = nvkm_pci_dtor,
+ .preinit = nvkm_pci_preinit,
+ .init = nvkm_pci_init,
+ .fini = nvkm_pci_fini,
+};
+
+int
+nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
+ int index, struct nvkm_pci **ppci)
+{
+ struct nvkm_pci *pci;
+
+ if (!(pci = *ppci = kzalloc(sizeof(**ppci), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_subdev_ctor(&nvkm_pci_func, device, index, 0, &pci->subdev);
+ pci->func = func;
+ pci->pdev = device->func->pci(device)->pdev;
+ pci->irq = -1;
+
+ if (device->type == NVKM_DEVICE_AGP)
+ nvkm_agp_ctor(pci);
+
+ switch (pci->pdev->device & 0x0ff0) {
+ case 0x00f0:
+ case 0x02e0:
+ /* BR02? NFI how these would be handled yet exactly */
+ break;
+ default:
+ switch (device->chipset) {
+ case 0xaa:
+ /* reported broken, nv also disable it */
+ break;
+ default:
+ pci->msi = true;
+ break;
+ }
+ }
+
+ pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
+ if (pci->msi && func->msi_rearm) {
+ pci->msi = pci_enable_msi(pci->pdev) == 0;
+ if (pci->msi)
+ nvkm_debug(&pci->subdev, "MSI enabled\n");
+ } else {
+ pci->msi = false;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
new file mode 100644
index 000000000000..86f8226532d3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static void
+gf100_pci_msi_rearm(struct nvkm_pci *pci)
+{
+ nvkm_pci_wr08(pci, 0x0704, 0xff);
+}
+
+static const struct nvkm_pci_func
+gf100_pci_func = {
+ .rd32 = nv40_pci_rd32,
+ .wr08 = nv40_pci_wr08,
+ .wr32 = nv40_pci_wr32,
+ .msi_rearm = gf100_pci_msi_rearm,
+};
+
+int
+gf100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+ return nvkm_pci_new_(&gf100_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
new file mode 100644
index 000000000000..5b1ed42cb90b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static u32
+nv04_pci_rd32(struct nvkm_pci *pci, u16 addr)
+{
+ struct nvkm_device *device = pci->subdev.device;
+ return nvkm_rd32(device, 0x001800 + addr);
+}
+
+static void
+nv04_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
+{
+ struct nvkm_device *device = pci->subdev.device;
+ nvkm_wr08(device, 0x001800 + addr, data);
+}
+
+static void
+nv04_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
+{
+ struct nvkm_device *device = pci->subdev.device;
+ nvkm_wr32(device, 0x001800 + addr, data);
+}
+
+static const struct nvkm_pci_func
+nv04_pci_func = {
+ .rd32 = nv04_pci_rd32,
+ .wr08 = nv04_pci_wr08,
+ .wr32 = nv04_pci_wr32,
+};
+
+int
+nv04_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+ return nvkm_pci_new_(&nv04_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
new file mode 100644
index 000000000000..090a187f165f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+u32
+nv40_pci_rd32(struct nvkm_pci *pci, u16 addr)
+{
+ struct nvkm_device *device = pci->subdev.device;
+ return nvkm_rd32(device, 0x088000 + addr);
+}
+
+void
+nv40_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
+{
+ struct nvkm_device *device = pci->subdev.device;
+ nvkm_wr08(device, 0x088000 + addr, data);
+}
+
+void
+nv40_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
+{
+ struct nvkm_device *device = pci->subdev.device;
+ nvkm_wr32(device, 0x088000 + addr, data);
+}
+
+static void
+nv40_pci_msi_rearm(struct nvkm_pci *pci)
+{
+ nvkm_pci_wr08(pci, 0x0068, 0xff);
+}
+
+static const struct nvkm_pci_func
+nv40_pci_func = {
+ .rd32 = nv40_pci_rd32,
+ .wr08 = nv40_pci_wr08,
+ .wr32 = nv40_pci_wr32,
+ .msi_rearm = nv40_pci_msi_rearm,
+};
+
+int
+nv40_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+ return nvkm_pci_new_(&nv40_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
new file mode 100644
index 000000000000..1f1b26b5fa72
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static const struct nvkm_pci_func
+nv4c_pci_func = {
+ .rd32 = nv40_pci_rd32,
+ .wr08 = nv40_pci_wr08,
+ .wr32 = nv40_pci_wr32,
+};
+
+int
+nv4c_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+ return nvkm_pci_new_(&nv4c_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c
new file mode 100644
index 000000000000..3e167d4a381f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+#include <core/pci.h>
+
+/* MSI re-arm through the PRI appears to be broken on the original G80,
+ * so we access it via alternate PCI config space mechanisms.
+ */
+static void
+nv50_pci_msi_rearm(struct nvkm_pci *pci)
+{
+ struct nvkm_device *device = pci->subdev.device;
+ struct pci_dev *pdev = device->func->pci(device)->pdev;
+ pci_write_config_byte(pdev, 0x68, 0xff);
+}
+
+static const struct nvkm_pci_func
+nv50_pci_func = {
+ .rd32 = nv40_pci_rd32,
+ .wr08 = nv40_pci_wr08,
+ .wr32 = nv40_pci_wr32,
+ .msi_rearm = nv50_pci_msi_rearm,
+};
+
+int
+nv50_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+ return nvkm_pci_new_(&nv50_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
new file mode 100644
index 000000000000..d22c2c117106
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
@@ -0,0 +1,19 @@
+#ifndef __NVKM_PCI_PRIV_H__
+#define __NVKM_PCI_PRIV_H__
+#define nvkm_pci(p) container_of((p), struct nvkm_pci, subdev)
+#include <subdev/pci.h>
+
+int nvkm_pci_new_(const struct nvkm_pci_func *, struct nvkm_device *,
+ int index, struct nvkm_pci **);
+
+struct nvkm_pci_func {
+ u32 (*rd32)(struct nvkm_pci *, u16 addr);
+ void (*wr08)(struct nvkm_pci *, u16 addr, u8 data);
+ void (*wr32)(struct nvkm_pci *, u16 addr, u32 data);
+ void (*msi_rearm)(struct nvkm_pci *);
+};
+
+u32 nv40_pci_rd32(struct nvkm_pci *, u16);
+void nv40_pci_wr08(struct nvkm_pci *, u16, u8);
+void nv40_pci_wr32(struct nvkm_pci *, u16, u32);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
index 7081d6a9b95f..88b643b8664e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
@@ -2,8 +2,9 @@ nvkm-y += nvkm/subdev/pmu/base.o
nvkm-y += nvkm/subdev/pmu/memx.o
nvkm-y += nvkm/subdev/pmu/gt215.o
nvkm-y += nvkm/subdev/pmu/gf100.o
-nvkm-y += nvkm/subdev/pmu/gf110.o
+nvkm-y += nvkm/subdev/pmu/gf119.o
nvkm-y += nvkm/subdev/pmu/gk104.o
nvkm-y += nvkm/subdev/pmu/gk110.o
nvkm-y += nvkm/subdev/pmu/gk208.o
nvkm-y += nvkm/subdev/pmu/gk20a.o
+nvkm-y += nvkm/subdev/pmu/gm107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index 054b2d2eec35..27a79c0c3888 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -28,21 +28,25 @@
void
nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
{
- const struct nvkm_pmu_impl *impl = (void *)nv_oclass(pmu);
- if (impl->pgob)
- impl->pgob(pmu, enable);
+ if (pmu->func->pgob)
+ pmu->func->pgob(pmu, enable);
}
-static int
+int
nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
u32 process, u32 message, u32 data0, u32 data1)
{
- struct nvkm_subdev *subdev = nv_subdev(pmu);
+ struct nvkm_subdev *subdev = &pmu->subdev;
+ struct nvkm_device *device = subdev->device;
u32 addr;
/* wait for a free slot in the fifo */
- addr = nv_rd32(pmu, 0x10a4a0);
- if (!nv_wait_ne(pmu, 0x10a4b0, 0xffffffff, addr ^ 8))
+ addr = nvkm_rd32(device, 0x10a4a0);
+ if (nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x10a4b0);
+ if (tmp != (addr ^ 8))
+ break;
+ ) < 0)
return -EBUSY;
/* we currently only support a single process at a time waiting
@@ -57,20 +61,20 @@ nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
/* acquire data segment access */
do {
- nv_wr32(pmu, 0x10a580, 0x00000001);
- } while (nv_rd32(pmu, 0x10a580) != 0x00000001);
+ nvkm_wr32(device, 0x10a580, 0x00000001);
+ } while (nvkm_rd32(device, 0x10a580) != 0x00000001);
/* write the packet */
- nv_wr32(pmu, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
+ nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
pmu->send.base));
- nv_wr32(pmu, 0x10a1c4, process);
- nv_wr32(pmu, 0x10a1c4, message);
- nv_wr32(pmu, 0x10a1c4, data0);
- nv_wr32(pmu, 0x10a1c4, data1);
- nv_wr32(pmu, 0x10a4a0, (addr + 1) & 0x0f);
+ nvkm_wr32(device, 0x10a1c4, process);
+ nvkm_wr32(device, 0x10a1c4, message);
+ nvkm_wr32(device, 0x10a1c4, data0);
+ nvkm_wr32(device, 0x10a1c4, data1);
+ nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
/* release data segment access */
- nv_wr32(pmu, 0x10a580, 0x00000000);
+ nvkm_wr32(device, 0x10a580, 0x00000000);
/* wait for reply, if requested */
if (reply) {
@@ -87,29 +91,31 @@ static void
nvkm_pmu_recv(struct work_struct *work)
{
struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work);
+ struct nvkm_subdev *subdev = &pmu->subdev;
+ struct nvkm_device *device = subdev->device;
u32 process, message, data0, data1;
/* nothing to do if GET == PUT */
- u32 addr = nv_rd32(pmu, 0x10a4cc);
- if (addr == nv_rd32(pmu, 0x10a4c8))
+ u32 addr = nvkm_rd32(device, 0x10a4cc);
+ if (addr == nvkm_rd32(device, 0x10a4c8))
return;
/* acquire data segment access */
do {
- nv_wr32(pmu, 0x10a580, 0x00000002);
- } while (nv_rd32(pmu, 0x10a580) != 0x00000002);
+ nvkm_wr32(device, 0x10a580, 0x00000002);
+ } while (nvkm_rd32(device, 0x10a580) != 0x00000002);
/* read the packet */
- nv_wr32(pmu, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
+ nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
pmu->recv.base));
- process = nv_rd32(pmu, 0x10a1c4);
- message = nv_rd32(pmu, 0x10a1c4);
- data0 = nv_rd32(pmu, 0x10a1c4);
- data1 = nv_rd32(pmu, 0x10a1c4);
- nv_wr32(pmu, 0x10a4cc, (addr + 1) & 0x0f);
+ process = nvkm_rd32(device, 0x10a1c4);
+ message = nvkm_rd32(device, 0x10a1c4);
+ data0 = nvkm_rd32(device, 0x10a1c4);
+ data1 = nvkm_rd32(device, 0x10a1c4);
+ nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
/* release data segment access */
- nv_wr32(pmu, 0x10a580, 0x00000000);
+ nvkm_wr32(device, 0x10a580, 0x00000000);
/* wake process if it's waiting on a synchronous reply */
if (pmu->recv.process) {
@@ -126,143 +132,149 @@ nvkm_pmu_recv(struct work_struct *work)
/* right now there's no other expected responses from the engine,
* so assume that any unexpected message is an error.
*/
- nv_warn(pmu, "%c%c%c%c 0x%08x 0x%08x 0x%08x 0x%08x\n",
- (char)((process & 0x000000ff) >> 0),
- (char)((process & 0x0000ff00) >> 8),
- (char)((process & 0x00ff0000) >> 16),
- (char)((process & 0xff000000) >> 24),
- process, message, data0, data1);
+ nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
+ (char)((process & 0x000000ff) >> 0),
+ (char)((process & 0x0000ff00) >> 8),
+ (char)((process & 0x00ff0000) >> 16),
+ (char)((process & 0xff000000) >> 24),
+ process, message, data0, data1);
}
static void
nvkm_pmu_intr(struct nvkm_subdev *subdev)
{
- struct nvkm_pmu *pmu = (void *)subdev;
- u32 disp = nv_rd32(pmu, 0x10a01c);
- u32 intr = nv_rd32(pmu, 0x10a008) & disp & ~(disp >> 16);
+ struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+ struct nvkm_device *device = pmu->subdev.device;
+ u32 disp = nvkm_rd32(device, 0x10a01c);
+ u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
if (intr & 0x00000020) {
- u32 stat = nv_rd32(pmu, 0x10a16c);
+ u32 stat = nvkm_rd32(device, 0x10a16c);
if (stat & 0x80000000) {
- nv_error(pmu, "UAS fault at 0x%06x addr 0x%08x\n",
- stat & 0x00ffffff, nv_rd32(pmu, 0x10a168));
- nv_wr32(pmu, 0x10a16c, 0x00000000);
+ nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
+ stat & 0x00ffffff,
+ nvkm_rd32(device, 0x10a168));
+ nvkm_wr32(device, 0x10a16c, 0x00000000);
intr &= ~0x00000020;
}
}
if (intr & 0x00000040) {
schedule_work(&pmu->recv.work);
- nv_wr32(pmu, 0x10a004, 0x00000040);
+ nvkm_wr32(device, 0x10a004, 0x00000040);
intr &= ~0x00000040;
}
if (intr & 0x00000080) {
- nv_info(pmu, "wr32 0x%06x 0x%08x\n", nv_rd32(pmu, 0x10a7a0),
- nv_rd32(pmu, 0x10a7a4));
- nv_wr32(pmu, 0x10a004, 0x00000080);
+ nvkm_info(subdev, "wr32 %06x %08x\n",
+ nvkm_rd32(device, 0x10a7a0),
+ nvkm_rd32(device, 0x10a7a4));
+ nvkm_wr32(device, 0x10a004, 0x00000080);
intr &= ~0x00000080;
}
if (intr) {
- nv_error(pmu, "intr 0x%08x\n", intr);
- nv_wr32(pmu, 0x10a004, intr);
+ nvkm_error(subdev, "intr %08x\n", intr);
+ nvkm_wr32(device, 0x10a004, intr);
}
}
-int
-_nvkm_pmu_fini(struct nvkm_object *object, bool suspend)
+static int
+nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
{
- struct nvkm_pmu *pmu = (void *)object;
+ struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+ struct nvkm_device *device = pmu->subdev.device;
- nv_wr32(pmu, 0x10a014, 0x00000060);
+ nvkm_wr32(device, 0x10a014, 0x00000060);
flush_work(&pmu->recv.work);
-
- return nvkm_subdev_fini(&pmu->base, suspend);
+ return 0;
}
-int
-_nvkm_pmu_init(struct nvkm_object *object)
+static int
+nvkm_pmu_init(struct nvkm_subdev *subdev)
{
- const struct nvkm_pmu_impl *impl = (void *)object->oclass;
- struct nvkm_pmu *pmu = (void *)object;
- int ret, i;
-
- ret = nvkm_subdev_init(&pmu->base);
- if (ret)
- return ret;
-
- nv_subdev(pmu)->intr = nvkm_pmu_intr;
- pmu->message = nvkm_pmu_send;
- pmu->pgob = nvkm_pmu_pgob;
+ struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+ struct nvkm_device *device = pmu->subdev.device;
+ int i;
/* prevent previous ucode from running, wait for idle, reset */
- nv_wr32(pmu, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
- nv_wait(pmu, 0x10a04c, 0xffffffff, 0x00000000);
- nv_mask(pmu, 0x000200, 0x00002000, 0x00000000);
- nv_mask(pmu, 0x000200, 0x00002000, 0x00002000);
- nv_rd32(pmu, 0x000200);
- nv_wait(pmu, 0x10a10c, 0x00000006, 0x00000000);
+ nvkm_wr32(device, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
+ nvkm_msec(device, 2000,
+ if (!nvkm_rd32(device, 0x10a04c))
+ break;
+ );
+ nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
+ nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
+ nvkm_rd32(device, 0x000200);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
+ break;
+ );
/* upload data segment */
- nv_wr32(pmu, 0x10a1c0, 0x01000000);
- for (i = 0; i < impl->data.size / 4; i++)
- nv_wr32(pmu, 0x10a1c4, impl->data.data[i]);
+ nvkm_wr32(device, 0x10a1c0, 0x01000000);
+ for (i = 0; i < pmu->func->data.size / 4; i++)
+ nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
/* upload code segment */
- nv_wr32(pmu, 0x10a180, 0x01000000);
- for (i = 0; i < impl->code.size / 4; i++) {
+ nvkm_wr32(device, 0x10a180, 0x01000000);
+ for (i = 0; i < pmu->func->code.size / 4; i++) {
if ((i & 0x3f) == 0)
- nv_wr32(pmu, 0x10a188, i >> 6);
- nv_wr32(pmu, 0x10a184, impl->code.data[i]);
+ nvkm_wr32(device, 0x10a188, i >> 6);
+ nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
}
/* start it running */
- nv_wr32(pmu, 0x10a10c, 0x00000000);
- nv_wr32(pmu, 0x10a104, 0x00000000);
- nv_wr32(pmu, 0x10a100, 0x00000002);
+ nvkm_wr32(device, 0x10a10c, 0x00000000);
+ nvkm_wr32(device, 0x10a104, 0x00000000);
+ nvkm_wr32(device, 0x10a100, 0x00000002);
/* wait for valid host->pmu ring configuration */
- if (!nv_wait_ne(pmu, 0x10a4d0, 0xffffffff, 0x00000000))
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x10a4d0))
+ break;
+ ) < 0)
return -EBUSY;
- pmu->send.base = nv_rd32(pmu, 0x10a4d0) & 0x0000ffff;
- pmu->send.size = nv_rd32(pmu, 0x10a4d0) >> 16;
+ pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
+ pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
/* wait for valid pmu->host ring configuration */
- if (!nv_wait_ne(pmu, 0x10a4dc, 0xffffffff, 0x00000000))
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x10a4dc))
+ break;
+ ) < 0)
return -EBUSY;
- pmu->recv.base = nv_rd32(pmu, 0x10a4dc) & 0x0000ffff;
- pmu->recv.size = nv_rd32(pmu, 0x10a4dc) >> 16;
+ pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
+ pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
- nv_wr32(pmu, 0x10a010, 0x000000e0);
+ nvkm_wr32(device, 0x10a010, 0x000000e0);
return 0;
}
-int
-nvkm_pmu_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, int length, void **pobject)
+static void *
+nvkm_pmu_dtor(struct nvkm_subdev *subdev)
{
- struct nvkm_pmu *pmu;
- int ret;
-
- ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PMU",
- "pmu", length, pobject);
- pmu = *pobject;
- if (ret)
- return ret;
-
- INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
- init_waitqueue_head(&pmu->recv.wait);
- return 0;
+ return nvkm_pmu(subdev);
}
+static const struct nvkm_subdev_func
+nvkm_pmu = {
+ .dtor = nvkm_pmu_dtor,
+ .init = nvkm_pmu_init,
+ .fini = nvkm_pmu_fini,
+ .intr = nvkm_pmu_intr,
+};
+
int
-_nvkm_pmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device,
+ int index, struct nvkm_pmu **ppmu)
{
struct nvkm_pmu *pmu;
- int ret = nvkm_pmu_create(parent, engine, oclass, &pmu);
- *pobject = nv_object(pmu);
- return ret;
+ if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_subdev_ctor(&nvkm_pmu, device, index, 0, &pmu->subdev);
+ pmu->func = func;
+ INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
+ init_waitqueue_head(&pmu->recv.wait);
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4 b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4
index ae9c3f18ae01..2f28c7e26a14 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4
@@ -32,7 +32,7 @@
#include "macros.fuc"
-.section #gf110_pmu_data
+.section #gf119_pmu_data
#define INCLUDE_PROC
#include "kernel.fuc"
#include "arith.fuc"
@@ -56,7 +56,7 @@
#undef INCLUDE_DATA
.align 256
-.section #gf110_pmu_code
+.section #gf119_pmu_code
#define INCLUDE_CODE
#include "kernel.fuc"
#include "arith.fuc"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
index a0c499e4543c..31552af9b06e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
@@ -1,4 +1,4 @@
-uint32_t gf110_pmu_data[] = {
+uint32_t gf119_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
0x00000000,
@@ -915,7 +915,7 @@ uint32_t gf110_pmu_data[] = {
0x00000000,
};
-uint32_t gf110_pmu_code[] = {
+uint32_t gf119_pmu_code[] = {
0x034d0ef5,
/* 0x0004: rd32 */
0x07a007f1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
index 78a4ea0101f1..aeb8ccd891fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
@@ -24,17 +24,16 @@
#include "priv.h"
#include "fuc/gf100.fuc3.h"
-struct nvkm_oclass *
-gf100_pmu_oclass = &(struct nvkm_pmu_impl) {
- .base.handle = NV_SUBDEV(PMU, 0xc0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_pmu_ctor,
- .dtor = _nvkm_pmu_dtor,
- .init = _nvkm_pmu_init,
- .fini = _nvkm_pmu_fini,
- },
+static const struct nvkm_pmu_func
+gf100_pmu = {
.code.data = gf100_pmu_code,
.code.size = sizeof(gf100_pmu_code),
.data.data = gf100_pmu_data,
.data.size = sizeof(gf100_pmu_data),
-}.base;
+};
+
+int
+gf100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ return nvkm_pmu_new_(&gf100_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
index 6b3a23839ff0..fbc88d8ecd4d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
@@ -22,19 +22,18 @@
* Authors: Ben Skeggs
*/
#include "priv.h"
-#include "fuc/gf110.fuc4.h"
+#include "fuc/gf119.fuc4.h"
-struct nvkm_oclass *
-gf110_pmu_oclass = &(struct nvkm_pmu_impl) {
- .base.handle = NV_SUBDEV(PMU, 0xd0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_pmu_ctor,
- .dtor = _nvkm_pmu_dtor,
- .init = _nvkm_pmu_init,
- .fini = _nvkm_pmu_fini,
- },
- .code.data = gf110_pmu_code,
- .code.size = sizeof(gf110_pmu_code),
- .data.data = gf110_pmu_data,
- .data.size = sizeof(gf110_pmu_data),
-}.base;
+static const struct nvkm_pmu_func
+gf119_pmu = {
+ .code.data = gf119_pmu_code,
+ .code.size = sizeof(gf119_pmu_code),
+ .data.data = gf119_pmu_data,
+ .data.size = sizeof(gf119_pmu_data),
+};
+
+int
+gf119_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ return nvkm_pmu_new_(&gf119_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index 28fdb8ea9ed8..e33f5c03b9ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -21,47 +21,97 @@
*
* Authors: Ben Skeggs
*/
-#define gf110_pmu_code gk104_pmu_code
-#define gf110_pmu_data gk104_pmu_data
+#define gf119_pmu_code gk104_pmu_code
+#define gf119_pmu_data gk104_pmu_data
#include "priv.h"
-#include "fuc/gf110.fuc4.h"
+#include "fuc/gf119.fuc4.h"
+
+#include <core/option.h>
+#include <subdev/timer.h>
+
+static void
+magic_(struct nvkm_device *device, u32 ctrl, int size)
+{
+ nvkm_wr32(device, 0x00c800, 0x00000000);
+ nvkm_wr32(device, 0x00c808, 0x00000000);
+ nvkm_wr32(device, 0x00c800, ctrl);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x00c800) & 0x40000000) {
+ while (size--)
+ nvkm_wr32(device, 0x00c804, 0x00000000);
+ break;
+ }
+ );
+ nvkm_wr32(device, 0x00c800, 0x00000000);
+}
+
+static void
+magic(struct nvkm_device *device, u32 ctrl)
+{
+ magic_(device, 0x8000a41f | ctrl, 6);
+ magic_(device, 0x80000421 | ctrl, 1);
+}
static void
gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
{
- nv_mask(pmu, 0x000200, 0x00001000, 0x00000000);
- nv_rd32(pmu, 0x000200);
- nv_mask(pmu, 0x000200, 0x08000000, 0x08000000);
+ struct nvkm_device *device = pmu->subdev.device;
+
+ nvkm_mask(device, 0x000200, 0x00001000, 0x00000000);
+ nvkm_rd32(device, 0x000200);
+ nvkm_mask(device, 0x000200, 0x08000000, 0x08000000);
msleep(50);
- nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000002);
- nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
- nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000002);
+ nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
- nv_mask(pmu, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000);
+ nvkm_mask(device, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000);
msleep(50);
- nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000000);
- nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
- nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000000);
+ nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
+
+ nvkm_mask(device, 0x000200, 0x08000000, 0x00000000);
+ nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
+ nvkm_rd32(device, 0x000200);
- nv_mask(pmu, 0x000200, 0x08000000, 0x00000000);
- nv_mask(pmu, 0x000200, 0x00001000, 0x00001000);
- nv_rd32(pmu, 0x000200);
+ if ( nvkm_boolopt(device->cfgopt, "War00C800_0",
+ device->quirk ? device->quirk->War00C800_0 : false)) {
+ nvkm_info(&pmu->subdev, "hw bug workaround enabled\n");
+ switch (device->chipset) {
+ case 0xe4:
+ magic(device, 0x04000000);
+ magic(device, 0x06000000);
+ magic(device, 0x0c000000);
+ magic(device, 0x0e000000);
+ break;
+ case 0xe6:
+ magic(device, 0x02000000);
+ magic(device, 0x04000000);
+ magic(device, 0x0a000000);
+ break;
+ case 0xe7:
+ magic(device, 0x02000000);
+ break;
+ default:
+ break;
+ }
+ }
}
-struct nvkm_oclass *
-gk104_pmu_oclass = &(struct nvkm_pmu_impl) {
- .base.handle = NV_SUBDEV(PMU, 0xe4),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_pmu_ctor,
- .dtor = _nvkm_pmu_dtor,
- .init = _nvkm_pmu_init,
- .fini = _nvkm_pmu_fini,
- },
+static const struct nvkm_pmu_func
+gk104_pmu = {
.code.data = gk104_pmu_code,
.code.size = sizeof(gk104_pmu_code),
.data.data = gk104_pmu_data,
.data.size = sizeof(gk104_pmu_data),
.pgob = gk104_pmu_pgob,
-}.base;
+};
+
+int
+gk104_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ return nvkm_pmu_new_(&gk104_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
index 89bb94b0af8b..ae255247c9d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
@@ -21,16 +21,17 @@
*
* Authors: Ben Skeggs
*/
-#define gf110_pmu_code gk110_pmu_code
-#define gf110_pmu_data gk110_pmu_data
+#define gf119_pmu_code gk110_pmu_code
+#define gf119_pmu_data gk110_pmu_data
#include "priv.h"
-#include "fuc/gf110.fuc4.h"
+#include "fuc/gf119.fuc4.h"
#include <subdev/timer.h>
void
gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
{
+ struct nvkm_device *device = pmu->subdev.device;
static const struct {
u32 addr;
u32 data;
@@ -54,42 +55,44 @@ gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
};
int i;
- nv_mask(pmu, 0x000200, 0x00001000, 0x00000000);
- nv_rd32(pmu, 0x000200);
- nv_mask(pmu, 0x000200, 0x08000000, 0x08000000);
+ nvkm_mask(device, 0x000200, 0x00001000, 0x00000000);
+ nvkm_rd32(device, 0x000200);
+ nvkm_mask(device, 0x000200, 0x08000000, 0x08000000);
msleep(50);
- nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000002);
- nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
- nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000002);
+ nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
- nv_mask(pmu, 0x0206b4, 0x00000000, 0x00000000);
+ nvkm_mask(device, 0x0206b4, 0x00000000, 0x00000000);
for (i = 0; i < ARRAY_SIZE(magic); i++) {
- nv_wr32(pmu, magic[i].addr, magic[i].data);
- nv_wait(pmu, magic[i].addr, 0x80000000, 0x00000000);
+ nvkm_wr32(device, magic[i].addr, magic[i].data);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, magic[i].addr) & 0x80000000))
+ break;
+ );
}
- nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000000);
- nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
- nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000000);
+ nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
+ nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
- nv_mask(pmu, 0x000200, 0x08000000, 0x00000000);
- nv_mask(pmu, 0x000200, 0x00001000, 0x00001000);
- nv_rd32(pmu, 0x000200);
+ nvkm_mask(device, 0x000200, 0x08000000, 0x00000000);
+ nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
+ nvkm_rd32(device, 0x000200);
}
-struct nvkm_oclass *
-gk110_pmu_oclass = &(struct nvkm_pmu_impl) {
- .base.handle = NV_SUBDEV(PMU, 0xf0),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_pmu_ctor,
- .dtor = _nvkm_pmu_dtor,
- .init = _nvkm_pmu_init,
- .fini = _nvkm_pmu_fini,
- },
+static const struct nvkm_pmu_func
+gk110_pmu = {
.code.data = gk110_pmu_code,
.code.size = sizeof(gk110_pmu_code),
.data.data = gk110_pmu_data,
.data.size = sizeof(gk110_pmu_data),
.pgob = gk110_pmu_pgob,
-}.base;
+};
+
+int
+gk110_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ return nvkm_pmu_new_(&gk110_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
index b14134ef9ea5..3b4917637902 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
@@ -24,18 +24,17 @@
#include "priv.h"
#include "fuc/gk208.fuc5.h"
-struct nvkm_oclass *
-gk208_pmu_oclass = &(struct nvkm_pmu_impl) {
- .base.handle = NV_SUBDEV(PMU, 0x00),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_pmu_ctor,
- .dtor = _nvkm_pmu_dtor,
- .init = _nvkm_pmu_init,
- .fini = _nvkm_pmu_fini,
- },
+static const struct nvkm_pmu_func
+gk208_pmu = {
.code.data = gk208_pmu_code,
.code.size = sizeof(gk208_pmu_code),
.data.data = gk208_pmu_data,
.data.size = sizeof(gk208_pmu_data),
.pgob = gk110_pmu_pgob,
-}.base;
+};
+
+int
+gk208_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ return nvkm_pmu_new_(&gk208_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
index 594f746e68f2..6689d0290a7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -19,6 +19,7 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
+#define gk20a_pmu(p) container_of((p), struct gk20a_pmu, base.subdev)
#include "priv.h"
#include <subdev/clk.h>
@@ -35,7 +36,7 @@ struct gk20a_pmu_dvfs_data {
unsigned int avg_load;
};
-struct gk20a_pmu_priv {
+struct gk20a_pmu {
struct nvkm_pmu base;
struct nvkm_alarm alarm;
struct gk20a_pmu_dvfs_data *data;
@@ -48,28 +49,28 @@ struct gk20a_pmu_dvfs_dev_status {
};
static int
-gk20a_pmu_dvfs_target(struct gk20a_pmu_priv *priv, int *state)
+gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state)
{
- struct nvkm_clk *clk = nvkm_clk(priv);
+ struct nvkm_clk *clk = pmu->base.subdev.device->clk;
return nvkm_clk_astate(clk, *state, 0, false);
}
static int
-gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu_priv *priv, int *state)
+gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state)
{
- struct nvkm_clk *clk = nvkm_clk(priv);
+ struct nvkm_clk *clk = pmu->base.subdev.device->clk;
*state = clk->pstate;
return 0;
}
static int
-gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu_priv *priv,
+gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu,
int *state, int load)
{
- struct gk20a_pmu_dvfs_data *data = priv->data;
- struct nvkm_clk *clk = nvkm_clk(priv);
+ struct gk20a_pmu_dvfs_data *data = pmu->data;
+ struct nvkm_clk *clk = pmu->base.subdev.device->clk;
int cur_level, level;
/* For GK20A, the performance level is directly mapped to pstate */
@@ -84,7 +85,8 @@ gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu_priv *priv,
level = min(clk->state_nr - 1, level);
}
- nv_trace(priv, "cur level = %d, new level = %d\n", cur_level, level);
+ nvkm_trace(&pmu->base.subdev, "cur level = %d, new level = %d\n",
+ cur_level, level);
*state = level;
@@ -95,30 +97,35 @@ gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu_priv *priv,
}
static int
-gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu_priv *priv,
+gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
struct gk20a_pmu_dvfs_dev_status *status)
{
- status->busy = nv_rd32(priv, 0x10a508 + (BUSY_SLOT * 0x10));
- status->total= nv_rd32(priv, 0x10a508 + (CLK_SLOT * 0x10));
+ struct nvkm_device *device = pmu->base.subdev.device;
+ status->busy = nvkm_rd32(device, 0x10a508 + (BUSY_SLOT * 0x10));
+ status->total= nvkm_rd32(device, 0x10a508 + (CLK_SLOT * 0x10));
return 0;
}
static void
-gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu_priv *priv)
+gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu)
{
- nv_wr32(priv, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000);
- nv_wr32(priv, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000);
+ struct nvkm_device *device = pmu->base.subdev.device;
+ nvkm_wr32(device, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000);
+ nvkm_wr32(device, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000);
}
static void
gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
{
- struct gk20a_pmu_priv *priv =
- container_of(alarm, struct gk20a_pmu_priv, alarm);
- struct gk20a_pmu_dvfs_data *data = priv->data;
+ struct gk20a_pmu *pmu =
+ container_of(alarm, struct gk20a_pmu, alarm);
+ struct gk20a_pmu_dvfs_data *data = pmu->data;
struct gk20a_pmu_dvfs_dev_status status;
- struct nvkm_clk *clk = nvkm_clk(priv);
- struct nvkm_volt *volt = nvkm_volt(priv);
+ struct nvkm_subdev *subdev = &pmu->base.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_clk *clk = device->clk;
+ struct nvkm_timer *tmr = device->timer;
+ struct nvkm_volt *volt = device->volt;
u32 utilization = 0;
int state, ret;
@@ -129,9 +136,9 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
if (!clk || !volt)
goto resched;
- ret = gk20a_pmu_dvfs_get_dev_status(priv, &status);
+ ret = gk20a_pmu_dvfs_get_dev_status(pmu, &status);
if (ret) {
- nv_warn(priv, "failed to get device status\n");
+ nvkm_warn(subdev, "failed to get device status\n");
goto resched;
}
@@ -140,56 +147,52 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
data->avg_load = (data->p_smooth * data->avg_load) + utilization;
data->avg_load /= data->p_smooth + 1;
- nv_trace(priv, "utilization = %d %%, avg_load = %d %%\n",
- utilization, data->avg_load);
+ nvkm_trace(subdev, "utilization = %d %%, avg_load = %d %%\n",
+ utilization, data->avg_load);
- ret = gk20a_pmu_dvfs_get_cur_state(priv, &state);
+ ret = gk20a_pmu_dvfs_get_cur_state(pmu, &state);
if (ret) {
- nv_warn(priv, "failed to get current state\n");
+ nvkm_warn(subdev, "failed to get current state\n");
goto resched;
}
- if (gk20a_pmu_dvfs_get_target_state(priv, &state, data->avg_load)) {
- nv_trace(priv, "set new state to %d\n", state);
- gk20a_pmu_dvfs_target(priv, &state);
+ if (gk20a_pmu_dvfs_get_target_state(pmu, &state, data->avg_load)) {
+ nvkm_trace(subdev, "set new state to %d\n", state);
+ gk20a_pmu_dvfs_target(pmu, &state);
}
resched:
- gk20a_pmu_dvfs_reset_dev_status(priv);
- nvkm_timer_alarm(priv, 100000000, alarm);
+ gk20a_pmu_dvfs_reset_dev_status(pmu);
+ nvkm_timer_alarm(tmr, 100000000, alarm);
}
static int
-gk20a_pmu_fini(struct nvkm_object *object, bool suspend)
+gk20a_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
{
- struct nvkm_pmu *pmu = (void *)object;
- struct gk20a_pmu_priv *priv = (void *)pmu;
-
- nvkm_timer_alarm_cancel(priv, &priv->alarm);
+ struct gk20a_pmu *pmu = gk20a_pmu(subdev);
+ nvkm_timer_alarm_cancel(subdev->device->timer, &pmu->alarm);
+ return 0;
+}
- return nvkm_subdev_fini(&pmu->base, suspend);
+static void *
+gk20a_pmu_dtor(struct nvkm_subdev *subdev)
+{
+ return gk20a_pmu(subdev);
}
static int
-gk20a_pmu_init(struct nvkm_object *object)
+gk20a_pmu_init(struct nvkm_subdev *subdev)
{
- struct nvkm_pmu *pmu = (void *)object;
- struct gk20a_pmu_priv *priv = (void *)pmu;
- int ret;
-
- ret = nvkm_subdev_init(&pmu->base);
- if (ret)
- return ret;
-
- pmu->pgob = nvkm_pmu_pgob;
+ struct gk20a_pmu *pmu = gk20a_pmu(subdev);
+ struct nvkm_device *device = pmu->base.subdev.device;
/* init pwr perf counter */
- nv_wr32(pmu, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001);
- nv_wr32(pmu, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002);
- nv_wr32(pmu, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003);
+ nvkm_wr32(device, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001);
+ nvkm_wr32(device, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002);
+ nvkm_wr32(device, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003);
- nvkm_timer_alarm(pmu, 2000000000, &priv->alarm);
- return ret;
+ nvkm_timer_alarm(device->timer, 2000000000, &pmu->alarm);
+ return 0;
}
static struct gk20a_pmu_dvfs_data
@@ -199,32 +202,26 @@ gk20a_dvfs_data= {
.p_smooth = 1,
};
-static int
-gk20a_pmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gk20a_pmu_priv *priv;
- int ret;
+static const struct nvkm_subdev_func
+gk20a_pmu = {
+ .init = gk20a_pmu_init,
+ .fini = gk20a_pmu_fini,
+ .dtor = gk20a_pmu_dtor,
+};
- ret = nvkm_pmu_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+int
+gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ static const struct nvkm_pmu_func func = {};
+ struct gk20a_pmu *pmu;
- priv->data = &gk20a_dvfs_data;
+ if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
+ return -ENOMEM;
+ pmu->base.func = &func;
+ *ppmu = &pmu->base;
- nvkm_alarm_init(&priv->alarm, gk20a_pmu_dvfs_work);
+ nvkm_subdev_ctor(&gk20a_pmu, device, index, 0, &pmu->base.subdev);
+ pmu->data = &gk20a_dvfs_data;
+ nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work);
return 0;
}
-
-struct nvkm_oclass *
-gk20a_pmu_oclass = &(struct nvkm_pmu_impl) {
- .base.handle = NV_SUBDEV(PMU, 0xea),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk20a_pmu_ctor,
- .dtor = _nvkm_pmu_dtor,
- .init = gk20a_pmu_init,
- .fini = gk20a_pmu_fini,
- },
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
new file mode 100644
index 000000000000..31b8692b4641
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#define gk208_pmu_code gm107_pmu_code
+#define gk208_pmu_data gm107_pmu_data
+#include "fuc/gk208.fuc5.h"
+
+static const struct nvkm_pmu_func
+gm107_pmu = {
+ .code.data = gm107_pmu_code,
+ .code.size = sizeof(gm107_pmu_code),
+ .data.data = gm107_pmu_data,
+ .data.size = sizeof(gm107_pmu_data),
+};
+
+int
+gm107_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ return nvkm_pmu_new_(&gm107_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
index 30aaeb21de41..8ba7fa4ca75b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
@@ -24,26 +24,25 @@
#include "priv.h"
#include "fuc/gt215.fuc3.h"
-static int
-gt215_pmu_init(struct nvkm_object *object)
+static void
+gt215_pmu_reset(struct nvkm_pmu *pmu)
{
- struct nvkm_pmu *pmu = (void *)object;
- nv_mask(pmu, 0x022210, 0x00000001, 0x00000000);
- nv_mask(pmu, 0x022210, 0x00000001, 0x00000001);
- return nvkm_pmu_init(pmu);
+ struct nvkm_device *device = pmu->subdev.device;
+ nvkm_mask(device, 0x022210, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x022210, 0x00000001, 0x00000001);
}
-struct nvkm_oclass *
-gt215_pmu_oclass = &(struct nvkm_pmu_impl) {
- .base.handle = NV_SUBDEV(PMU, 0xa3),
- .base.ofuncs = &(struct nvkm_ofuncs) {
- .ctor = _nvkm_pmu_ctor,
- .dtor = _nvkm_pmu_dtor,
- .init = gt215_pmu_init,
- .fini = _nvkm_pmu_fini,
- },
+static const struct nvkm_pmu_func
+gt215_pmu = {
+ .reset = gt215_pmu_reset,
.code.data = gt215_pmu_code,
.code.size = sizeof(gt215_pmu_code),
.data.data = gt215_pmu_data,
.data.size = sizeof(gt215_pmu_data),
-}.base;
+};
+
+int
+gt215_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ return nvkm_pmu_new_(&gt215_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
index b75c5b885980..e6f74168238c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
@@ -2,8 +2,6 @@
#define __NVKM_PMU_MEMX_H__
#include "priv.h"
-#include <core/device.h>
-
struct nvkm_memx {
struct nvkm_pmu *pmu;
u32 base;
@@ -18,13 +16,13 @@ struct nvkm_memx {
static void
memx_out(struct nvkm_memx *memx)
{
- struct nvkm_pmu *pmu = memx->pmu;
+ struct nvkm_device *device = memx->pmu->subdev.device;
int i;
if (memx->c.mthd) {
- nv_wr32(pmu, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
+ nvkm_wr32(device, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
for (i = 0; i < memx->c.size; i++)
- nv_wr32(pmu, 0x10a1c4, memx->c.data[i]);
+ nvkm_wr32(device, 0x10a1c4, memx->c.data[i]);
memx->c.mthd = 0;
memx->c.size = 0;
}
@@ -44,12 +42,13 @@ memx_cmd(struct nvkm_memx *memx, u32 mthd, u32 size, u32 data[])
int
nvkm_memx_init(struct nvkm_pmu *pmu, struct nvkm_memx **pmemx)
{
+ struct nvkm_device *device = pmu->subdev.device;
struct nvkm_memx *memx;
u32 reply[2];
int ret;
- ret = pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
- MEMX_INFO_DATA, 0);
+ ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
+ MEMX_INFO_DATA, 0);
if (ret)
return ret;
@@ -62,9 +61,9 @@ nvkm_memx_init(struct nvkm_pmu *pmu, struct nvkm_memx **pmemx)
/* acquire data segment access */
do {
- nv_wr32(pmu, 0x10a580, 0x00000003);
- } while (nv_rd32(pmu, 0x10a580) != 0x00000003);
- nv_wr32(pmu, 0x10a1c0, 0x01000000 | memx->base);
+ nvkm_wr32(device, 0x10a580, 0x00000003);
+ } while (nvkm_rd32(device, 0x10a580) != 0x00000003);
+ nvkm_wr32(device, 0x10a1c0, 0x01000000 | memx->base);
return 0;
}
@@ -73,23 +72,25 @@ nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec)
{
struct nvkm_memx *memx = *pmemx;
struct nvkm_pmu *pmu = memx->pmu;
+ struct nvkm_subdev *subdev = &pmu->subdev;
+ struct nvkm_device *device = subdev->device;
u32 finish, reply[2];
/* flush the cache... */
memx_out(memx);
/* release data segment access */
- finish = nv_rd32(pmu, 0x10a1c0) & 0x00ffffff;
- nv_wr32(pmu, 0x10a580, 0x00000000);
+ finish = nvkm_rd32(device, 0x10a1c0) & 0x00ffffff;
+ nvkm_wr32(device, 0x10a580, 0x00000000);
/* call MEMX process to execute the script, and wait for reply */
if (exec) {
- pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
- memx->base, finish);
+ nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
+ memx->base, finish);
}
- nv_debug(memx->pmu, "Exec took %uns, PMU_IN %08x\n",
- reply[0], reply[1]);
+ nvkm_debug(subdev, "Exec took %uns, PMU_IN %08x\n",
+ reply[0], reply[1]);
kfree(memx);
return 0;
}
@@ -97,7 +98,7 @@ nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec)
void
nvkm_memx_wr32(struct nvkm_memx *memx, u32 addr, u32 data)
{
- nv_debug(memx->pmu, "R[%06x] = 0x%08x\n", addr, data);
+ nvkm_debug(&memx->pmu->subdev, "R[%06x] = %08x\n", addr, data);
memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data });
}
@@ -105,8 +106,8 @@ void
nvkm_memx_wait(struct nvkm_memx *memx,
u32 addr, u32 mask, u32 data, u32 nsec)
{
- nv_debug(memx->pmu, "R[%06x] & 0x%08x == 0x%08x, %d us\n",
- addr, mask, data, nsec);
+ nvkm_debug(&memx->pmu->subdev, "R[%06x] & %08x == %08x, %d us\n",
+ addr, mask, data, nsec);
memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec });
memx_out(memx); /* fuc can't handle multiple */
}
@@ -114,7 +115,7 @@ nvkm_memx_wait(struct nvkm_memx *memx,
void
nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec)
{
- nv_debug(memx->pmu, " DELAY = %d ns\n", nsec);
+ nvkm_debug(&memx->pmu->subdev, " DELAY = %d ns\n", nsec);
memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec });
memx_out(memx); /* fuc can't handle multiple */
}
@@ -122,16 +123,17 @@ nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec)
void
nvkm_memx_wait_vblank(struct nvkm_memx *memx)
{
- struct nvkm_pmu *pmu = memx->pmu;
+ struct nvkm_subdev *subdev = &memx->pmu->subdev;
+ struct nvkm_device *device = subdev->device;
u32 heads, x, y, px = 0;
int i, head_sync;
- if (nv_device(pmu)->chipset < 0xd0) {
- heads = nv_rd32(pmu, 0x610050);
+ if (device->chipset < 0xd0) {
+ heads = nvkm_rd32(device, 0x610050);
for (i = 0; i < 2; i++) {
/* Heuristic: sync to head with biggest resolution */
if (heads & (2 << (i << 3))) {
- x = nv_rd32(pmu, 0x610b40 + (0x540 * i));
+ x = nvkm_rd32(device, 0x610b40 + (0x540 * i));
y = (x & 0xffff0000) >> 16;
x &= 0x0000ffff;
if ((x * y) > px) {
@@ -143,11 +145,11 @@ nvkm_memx_wait_vblank(struct nvkm_memx *memx)
}
if (px == 0) {
- nv_debug(memx->pmu, "WAIT VBLANK !NO ACTIVE HEAD\n");
+ nvkm_debug(subdev, "WAIT VBLANK !NO ACTIVE HEAD\n");
return;
}
- nv_debug(memx->pmu, "WAIT VBLANK HEAD%d\n", head_sync);
+ nvkm_debug(subdev, "WAIT VBLANK HEAD%d\n", head_sync);
memx_cmd(memx, MEMX_VBLANK, 1, (u32[]){ head_sync });
memx_out(memx); /* fuc can't handle multiple */
}
@@ -155,18 +157,19 @@ nvkm_memx_wait_vblank(struct nvkm_memx *memx)
void
nvkm_memx_train(struct nvkm_memx *memx)
{
- nv_debug(memx->pmu, " MEM TRAIN\n");
+ nvkm_debug(&memx->pmu->subdev, " MEM TRAIN\n");
memx_cmd(memx, MEMX_TRAIN, 0, NULL);
}
int
nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize)
{
+ struct nvkm_device *device = pmu->subdev.device;
u32 reply[2], base, size, i;
int ret;
- ret = pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
- MEMX_INFO_TRAIN, 0);
+ ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
+ MEMX_INFO_TRAIN, 0);
if (ret)
return ret;
@@ -176,10 +179,10 @@ nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize)
return -ENOMEM;
/* read the packet */
- nv_wr32(pmu, 0x10a1c0, 0x02000000 | base);
+ nvkm_wr32(device, 0x10a1c0, 0x02000000 | base);
for (i = 0; i < size; i++)
- res[i] = nv_rd32(pmu, 0x10a1c4);
+ res[i] = nvkm_rd32(device, 0x10a1c4);
return 0;
}
@@ -187,14 +190,14 @@ nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize)
void
nvkm_memx_block(struct nvkm_memx *memx)
{
- nv_debug(memx->pmu, " HOST BLOCKED\n");
+ nvkm_debug(&memx->pmu->subdev, " HOST BLOCKED\n");
memx_cmd(memx, MEMX_ENTER, 0, NULL);
}
void
nvkm_memx_unblock(struct nvkm_memx *memx)
{
- nv_debug(memx->pmu, " HOST UNBLOCKED\n");
+ nvkm_debug(&memx->pmu->subdev, " HOST UNBLOCKED\n");
memx_cmd(memx, MEMX_LEAVE, 0, NULL);
}
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index 799e7c8b88f5..f38c88fae3d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -1,38 +1,20 @@
#ifndef __NVKM_PMU_PRIV_H__
#define __NVKM_PMU_PRIV_H__
+#define nvkm_pmu(p) container_of((p), struct nvkm_pmu, subdev)
#include <subdev/pmu.h>
#include <subdev/pmu/fuc/os.h>
-#define nvkm_pmu_create(p, e, o, d) \
- nvkm_pmu_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_pmu_destroy(p) \
- nvkm_subdev_destroy(&(p)->base)
-#define nvkm_pmu_init(p) ({ \
- struct nvkm_pmu *_pmu = (p); \
- _nvkm_pmu_init(nv_object(_pmu)); \
-})
-#define nvkm_pmu_fini(p,s) ({ \
- struct nvkm_pmu *_pmu = (p); \
- _nvkm_pmu_fini(nv_object(_pmu), (s)); \
-})
+int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *,
+ int index, struct nvkm_pmu **);
-int nvkm_pmu_create_(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, int, void **);
+struct nvkm_pmu_func {
+ void (*reset)(struct nvkm_pmu *);
-int _nvkm_pmu_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-#define _nvkm_pmu_dtor _nvkm_subdev_dtor
-int _nvkm_pmu_init(struct nvkm_object *);
-int _nvkm_pmu_fini(struct nvkm_object *, bool);
-void nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable);
-
-struct nvkm_pmu_impl {
- struct nvkm_oclass base;
struct {
u32 *data;
u32 size;
} code;
+
struct {
u32 *data;
u32 size;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
index 5837cf1292d9..135758ba3e28 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
@@ -9,5 +9,5 @@ nvkm-y += nvkm/subdev/therm/nv40.o
nvkm-y += nvkm/subdev/therm/nv50.o
nvkm-y += nvkm/subdev/therm/g84.o
nvkm-y += nvkm/subdev/therm/gt215.o
-nvkm-y += nvkm/subdev/therm/gf110.o
+nvkm-y += nvkm/subdev/therm/gf119.o
nvkm-y += nvkm/subdev/therm/gm107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index ec327cb64a0d..949dc6101a58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -23,21 +23,26 @@
*/
#include "priv.h"
-#include <core/device.h>
+int
+nvkm_therm_temp_get(struct nvkm_therm *therm)
+{
+ if (therm->func->temp_get)
+ return therm->func->temp_get(therm);
+ return -ENODEV;
+}
static int
nvkm_therm_update_trip(struct nvkm_therm *therm)
{
- struct nvkm_therm_priv *priv = (void *)therm;
- struct nvbios_therm_trip_point *trip = priv->fan->bios.trip,
+ struct nvbios_therm_trip_point *trip = therm->fan->bios.trip,
*cur_trip = NULL,
- *last_trip = priv->last_trip;
- u8 temp = therm->temp_get(therm);
+ *last_trip = therm->last_trip;
+ u8 temp = therm->func->temp_get(therm);
u16 duty, i;
/* look for the trip point corresponding to the current temperature */
cur_trip = NULL;
- for (i = 0; i < priv->fan->bios.nr_fan_trip; i++) {
+ for (i = 0; i < therm->fan->bios.nr_fan_trip; i++) {
if (temp >= trip[i].temp)
cur_trip = &trip[i];
}
@@ -49,10 +54,10 @@ nvkm_therm_update_trip(struct nvkm_therm *therm)
if (cur_trip) {
duty = cur_trip->fan_duty;
- priv->last_trip = cur_trip;
+ therm->last_trip = cur_trip;
} else {
duty = 0;
- priv->last_trip = NULL;
+ therm->last_trip = NULL;
}
return duty;
@@ -61,51 +66,50 @@ nvkm_therm_update_trip(struct nvkm_therm *therm)
static int
nvkm_therm_update_linear(struct nvkm_therm *therm)
{
- struct nvkm_therm_priv *priv = (void *)therm;
- u8 linear_min_temp = priv->fan->bios.linear_min_temp;
- u8 linear_max_temp = priv->fan->bios.linear_max_temp;
- u8 temp = therm->temp_get(therm);
+ u8 linear_min_temp = therm->fan->bios.linear_min_temp;
+ u8 linear_max_temp = therm->fan->bios.linear_max_temp;
+ u8 temp = therm->func->temp_get(therm);
u16 duty;
/* handle the non-linear part first */
if (temp < linear_min_temp)
- return priv->fan->bios.min_duty;
+ return therm->fan->bios.min_duty;
else if (temp > linear_max_temp)
- return priv->fan->bios.max_duty;
+ return therm->fan->bios.max_duty;
/* we are in the linear zone */
duty = (temp - linear_min_temp);
- duty *= (priv->fan->bios.max_duty - priv->fan->bios.min_duty);
+ duty *= (therm->fan->bios.max_duty - therm->fan->bios.min_duty);
duty /= (linear_max_temp - linear_min_temp);
- duty += priv->fan->bios.min_duty;
+ duty += therm->fan->bios.min_duty;
return duty;
}
static void
nvkm_therm_update(struct nvkm_therm *therm, int mode)
{
- struct nvkm_timer *ptimer = nvkm_timer(therm);
- struct nvkm_therm_priv *priv = (void *)therm;
+ struct nvkm_subdev *subdev = &therm->subdev;
+ struct nvkm_timer *tmr = subdev->device->timer;
unsigned long flags;
bool immd = true;
bool poll = true;
int duty = -1;
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&therm->lock, flags);
if (mode < 0)
- mode = priv->mode;
- priv->mode = mode;
+ mode = therm->mode;
+ therm->mode = mode;
switch (mode) {
case NVKM_THERM_CTRL_MANUAL:
- ptimer->alarm_cancel(ptimer, &priv->alarm);
+ nvkm_timer_alarm_cancel(tmr, &therm->alarm);
duty = nvkm_therm_fan_get(therm);
if (duty < 0)
duty = 100;
poll = false;
break;
case NVKM_THERM_CTRL_AUTO:
- switch(priv->fan->bios.fan_mode) {
+ switch(therm->fan->bios.fan_mode) {
case NVBIOS_THERM_FAN_TRIP:
duty = nvkm_therm_update_trip(therm);
break;
@@ -113,8 +117,8 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
duty = nvkm_therm_update_linear(therm);
break;
case NVBIOS_THERM_FAN_OTHER:
- if (priv->cstate)
- duty = priv->cstate;
+ if (therm->cstate)
+ duty = therm->cstate;
poll = false;
break;
}
@@ -122,29 +126,29 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
break;
case NVKM_THERM_CTRL_NONE:
default:
- ptimer->alarm_cancel(ptimer, &priv->alarm);
+ nvkm_timer_alarm_cancel(tmr, &therm->alarm);
poll = false;
}
- if (list_empty(&priv->alarm.head) && poll)
- ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm);
- spin_unlock_irqrestore(&priv->lock, flags);
+ if (list_empty(&therm->alarm.head) && poll)
+ nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm);
+ spin_unlock_irqrestore(&therm->lock, flags);
if (duty >= 0) {
- nv_debug(therm, "FAN target request: %d%%\n", duty);
+ nvkm_debug(subdev, "FAN target request: %d%%\n", duty);
nvkm_therm_fan_set(therm, immd, duty);
}
}
int
-nvkm_therm_cstate(struct nvkm_therm *ptherm, int fan, int dir)
+nvkm_therm_cstate(struct nvkm_therm *therm, int fan, int dir)
{
- struct nvkm_therm_priv *priv = (void *)ptherm;
- if (!dir || (dir < 0 && fan < priv->cstate) ||
- (dir > 0 && fan > priv->cstate)) {
- nv_debug(ptherm, "default fan speed -> %d%%\n", fan);
- priv->cstate = fan;
- nvkm_therm_update(ptherm, -1);
+ struct nvkm_subdev *subdev = &therm->subdev;
+ if (!dir || (dir < 0 && fan < therm->cstate) ||
+ (dir > 0 && fan > therm->cstate)) {
+ nvkm_debug(subdev, "default fan speed -> %d%%\n", fan);
+ therm->cstate = fan;
+ nvkm_therm_update(therm, -1);
}
return 0;
}
@@ -152,16 +156,16 @@ nvkm_therm_cstate(struct nvkm_therm *ptherm, int fan, int dir)
static void
nvkm_therm_alarm(struct nvkm_alarm *alarm)
{
- struct nvkm_therm_priv *priv =
- container_of(alarm, struct nvkm_therm_priv, alarm);
- nvkm_therm_update(&priv->base, -1);
+ struct nvkm_therm *therm =
+ container_of(alarm, struct nvkm_therm, alarm);
+ nvkm_therm_update(therm, -1);
}
int
nvkm_therm_fan_mode(struct nvkm_therm *therm, int mode)
{
- struct nvkm_therm_priv *priv = (void *)therm;
- struct nvkm_device *device = nv_device(therm);
+ struct nvkm_subdev *subdev = &therm->subdev;
+ struct nvkm_device *device = subdev->device;
static const char *name[] = {
"disabled",
"manual",
@@ -171,51 +175,49 @@ nvkm_therm_fan_mode(struct nvkm_therm *therm, int mode)
/* The default PPWR ucode on fermi interferes with fan management */
if ((mode >= ARRAY_SIZE(name)) ||
(mode != NVKM_THERM_CTRL_NONE && device->card_type >= NV_C0 &&
- !nvkm_subdev(device, NVDEV_SUBDEV_PMU)))
+ !device->pmu))
return -EINVAL;
/* do not allow automatic fan management if the thermal sensor is
* not available */
- if (mode == NVKM_THERM_CTRL_AUTO && therm->temp_get(therm) < 0)
+ if (mode == NVKM_THERM_CTRL_AUTO &&
+ therm->func->temp_get(therm) < 0)
return -EINVAL;
- if (priv->mode == mode)
+ if (therm->mode == mode)
return 0;
- nv_info(therm, "fan management: %s\n", name[mode]);
+ nvkm_debug(subdev, "fan management: %s\n", name[mode]);
nvkm_therm_update(therm, mode);
return 0;
}
int
-nvkm_therm_attr_get(struct nvkm_therm *therm,
- enum nvkm_therm_attr_type type)
+nvkm_therm_attr_get(struct nvkm_therm *therm, enum nvkm_therm_attr_type type)
{
- struct nvkm_therm_priv *priv = (void *)therm;
-
switch (type) {
case NVKM_THERM_ATTR_FAN_MIN_DUTY:
- return priv->fan->bios.min_duty;
+ return therm->fan->bios.min_duty;
case NVKM_THERM_ATTR_FAN_MAX_DUTY:
- return priv->fan->bios.max_duty;
+ return therm->fan->bios.max_duty;
case NVKM_THERM_ATTR_FAN_MODE:
- return priv->mode;
+ return therm->mode;
case NVKM_THERM_ATTR_THRS_FAN_BOOST:
- return priv->bios_sensor.thrs_fan_boost.temp;
+ return therm->bios_sensor.thrs_fan_boost.temp;
case NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST:
- return priv->bios_sensor.thrs_fan_boost.hysteresis;
+ return therm->bios_sensor.thrs_fan_boost.hysteresis;
case NVKM_THERM_ATTR_THRS_DOWN_CLK:
- return priv->bios_sensor.thrs_down_clock.temp;
+ return therm->bios_sensor.thrs_down_clock.temp;
case NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST:
- return priv->bios_sensor.thrs_down_clock.hysteresis;
+ return therm->bios_sensor.thrs_down_clock.hysteresis;
case NVKM_THERM_ATTR_THRS_CRITICAL:
- return priv->bios_sensor.thrs_critical.temp;
+ return therm->bios_sensor.thrs_critical.temp;
case NVKM_THERM_ATTR_THRS_CRITICAL_HYST:
- return priv->bios_sensor.thrs_critical.hysteresis;
+ return therm->bios_sensor.thrs_critical.hysteresis;
case NVKM_THERM_ATTR_THRS_SHUTDOWN:
- return priv->bios_sensor.thrs_shutdown.temp;
+ return therm->bios_sensor.thrs_shutdown.temp;
case NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST:
- return priv->bios_sensor.thrs_shutdown.hysteresis;
+ return therm->bios_sensor.thrs_shutdown.hysteresis;
}
return -EINVAL;
@@ -225,143 +227,156 @@ int
nvkm_therm_attr_set(struct nvkm_therm *therm,
enum nvkm_therm_attr_type type, int value)
{
- struct nvkm_therm_priv *priv = (void *)therm;
-
switch (type) {
case NVKM_THERM_ATTR_FAN_MIN_DUTY:
if (value < 0)
value = 0;
- if (value > priv->fan->bios.max_duty)
- value = priv->fan->bios.max_duty;
- priv->fan->bios.min_duty = value;
+ if (value > therm->fan->bios.max_duty)
+ value = therm->fan->bios.max_duty;
+ therm->fan->bios.min_duty = value;
return 0;
case NVKM_THERM_ATTR_FAN_MAX_DUTY:
if (value < 0)
value = 0;
- if (value < priv->fan->bios.min_duty)
- value = priv->fan->bios.min_duty;
- priv->fan->bios.max_duty = value;
+ if (value < therm->fan->bios.min_duty)
+ value = therm->fan->bios.min_duty;
+ therm->fan->bios.max_duty = value;
return 0;
case NVKM_THERM_ATTR_FAN_MODE:
return nvkm_therm_fan_mode(therm, value);
case NVKM_THERM_ATTR_THRS_FAN_BOOST:
- priv->bios_sensor.thrs_fan_boost.temp = value;
- priv->sensor.program_alarms(therm);
+ therm->bios_sensor.thrs_fan_boost.temp = value;
+ therm->func->program_alarms(therm);
return 0;
case NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST:
- priv->bios_sensor.thrs_fan_boost.hysteresis = value;
- priv->sensor.program_alarms(therm);
+ therm->bios_sensor.thrs_fan_boost.hysteresis = value;
+ therm->func->program_alarms(therm);
return 0;
case NVKM_THERM_ATTR_THRS_DOWN_CLK:
- priv->bios_sensor.thrs_down_clock.temp = value;
- priv->sensor.program_alarms(therm);
+ therm->bios_sensor.thrs_down_clock.temp = value;
+ therm->func->program_alarms(therm);
return 0;
case NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST:
- priv->bios_sensor.thrs_down_clock.hysteresis = value;
- priv->sensor.program_alarms(therm);
+ therm->bios_sensor.thrs_down_clock.hysteresis = value;
+ therm->func->program_alarms(therm);
return 0;
case NVKM_THERM_ATTR_THRS_CRITICAL:
- priv->bios_sensor.thrs_critical.temp = value;
- priv->sensor.program_alarms(therm);
+ therm->bios_sensor.thrs_critical.temp = value;
+ therm->func->program_alarms(therm);
return 0;
case NVKM_THERM_ATTR_THRS_CRITICAL_HYST:
- priv->bios_sensor.thrs_critical.hysteresis = value;
- priv->sensor.program_alarms(therm);
+ therm->bios_sensor.thrs_critical.hysteresis = value;
+ therm->func->program_alarms(therm);
return 0;
case NVKM_THERM_ATTR_THRS_SHUTDOWN:
- priv->bios_sensor.thrs_shutdown.temp = value;
- priv->sensor.program_alarms(therm);
+ therm->bios_sensor.thrs_shutdown.temp = value;
+ therm->func->program_alarms(therm);
return 0;
case NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST:
- priv->bios_sensor.thrs_shutdown.hysteresis = value;
- priv->sensor.program_alarms(therm);
+ therm->bios_sensor.thrs_shutdown.hysteresis = value;
+ therm->func->program_alarms(therm);
return 0;
}
return -EINVAL;
}
-int
-_nvkm_therm_init(struct nvkm_object *object)
+static void
+nvkm_therm_intr(struct nvkm_subdev *subdev)
{
- struct nvkm_therm *therm = (void *)object;
- struct nvkm_therm_priv *priv = (void *)therm;
- int ret;
-
- ret = nvkm_subdev_init(&therm->base);
- if (ret)
- return ret;
-
- if (priv->suspend >= 0) {
- /* restore the pwm value only when on manual or auto mode */
- if (priv->suspend > 0)
- nvkm_therm_fan_set(therm, true, priv->fan->percent);
-
- nvkm_therm_fan_mode(therm, priv->suspend);
- }
- nvkm_therm_sensor_init(therm);
- nvkm_therm_fan_init(therm);
- return 0;
+ struct nvkm_therm *therm = nvkm_therm(subdev);
+ if (therm->func->intr)
+ therm->func->intr(therm);
}
-int
-_nvkm_therm_fini(struct nvkm_object *object, bool suspend)
+static int
+nvkm_therm_fini(struct nvkm_subdev *subdev, bool suspend)
{
- struct nvkm_therm *therm = (void *)object;
- struct nvkm_therm_priv *priv = (void *)therm;
+ struct nvkm_therm *therm = nvkm_therm(subdev);
+
+ if (therm->func->fini)
+ therm->func->fini(therm);
nvkm_therm_fan_fini(therm, suspend);
nvkm_therm_sensor_fini(therm, suspend);
+
if (suspend) {
- priv->suspend = priv->mode;
- priv->mode = NVKM_THERM_CTRL_NONE;
+ therm->suspend = therm->mode;
+ therm->mode = NVKM_THERM_CTRL_NONE;
}
- return nvkm_subdev_fini(&therm->base, suspend);
-}
-
-int
-nvkm_therm_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, int length, void **pobject)
-{
- struct nvkm_therm_priv *priv;
- int ret;
-
- ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PTHERM",
- "therm", length, pobject);
- priv = *pobject;
- if (ret)
- return ret;
-
- nvkm_alarm_init(&priv->alarm, nvkm_therm_alarm);
- spin_lock_init(&priv->lock);
- spin_lock_init(&priv->sensor.alarm_program_lock);
-
- priv->base.fan_get = nvkm_therm_fan_user_get;
- priv->base.fan_set = nvkm_therm_fan_user_set;
- priv->base.fan_sense = nvkm_therm_fan_sense;
- priv->base.attr_get = nvkm_therm_attr_get;
- priv->base.attr_set = nvkm_therm_attr_set;
- priv->mode = priv->suspend = -1; /* undefined */
return 0;
}
-int
-nvkm_therm_preinit(struct nvkm_therm *therm)
+static int
+nvkm_therm_oneinit(struct nvkm_subdev *subdev)
{
+ struct nvkm_therm *therm = nvkm_therm(subdev);
nvkm_therm_sensor_ctor(therm);
nvkm_therm_ic_ctor(therm);
nvkm_therm_fan_ctor(therm);
-
nvkm_therm_fan_mode(therm, NVKM_THERM_CTRL_AUTO);
nvkm_therm_sensor_preinit(therm);
return 0;
}
-void
-_nvkm_therm_dtor(struct nvkm_object *object)
+static int
+nvkm_therm_init(struct nvkm_subdev *subdev)
+{
+ struct nvkm_therm *therm = nvkm_therm(subdev);
+
+ therm->func->init(therm);
+
+ if (therm->suspend >= 0) {
+ /* restore the pwm value only when on manual or auto mode */
+ if (therm->suspend > 0)
+ nvkm_therm_fan_set(therm, true, therm->fan->percent);
+
+ nvkm_therm_fan_mode(therm, therm->suspend);
+ }
+
+ nvkm_therm_sensor_init(therm);
+ nvkm_therm_fan_init(therm);
+ return 0;
+}
+
+static void *
+nvkm_therm_dtor(struct nvkm_subdev *subdev)
+{
+ struct nvkm_therm *therm = nvkm_therm(subdev);
+ kfree(therm->fan);
+ return therm;
+}
+
+static const struct nvkm_subdev_func
+nvkm_therm = {
+ .dtor = nvkm_therm_dtor,
+ .oneinit = nvkm_therm_oneinit,
+ .init = nvkm_therm_init,
+ .fini = nvkm_therm_fini,
+ .intr = nvkm_therm_intr,
+};
+
+int
+nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
+ int index, struct nvkm_therm **ptherm)
{
- struct nvkm_therm_priv *priv = (void *)object;
- kfree(priv->fan);
- nvkm_subdev_destroy(&priv->base.base);
+ struct nvkm_therm *therm;
+
+ if (!(therm = *ptherm = kzalloc(sizeof(*therm), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_subdev_ctor(&nvkm_therm, device, index, 0, &therm->subdev);
+ therm->func = func;
+
+ nvkm_alarm_init(&therm->alarm, nvkm_therm_alarm);
+ spin_lock_init(&therm->lock);
+ spin_lock_init(&therm->sensor.alarm_program_lock);
+
+ therm->fan_get = nvkm_therm_fan_user_get;
+ therm->fan_set = nvkm_therm_fan_user_set;
+ therm->attr_get = nvkm_therm_attr_get;
+ therm->attr_set = nvkm_therm_attr_set;
+ therm->mode = therm->suspend = -1; /* undefined */
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
index 434fa745ca40..91198d79393a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
@@ -32,8 +32,8 @@ static int
nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
{
struct nvkm_therm *therm = fan->parent;
- struct nvkm_therm_priv *priv = (void *)therm;
- struct nvkm_timer *ptimer = nvkm_timer(priv);
+ struct nvkm_subdev *subdev = &therm->subdev;
+ struct nvkm_timer *tmr = subdev->device->timer;
unsigned long flags;
int ret = 0;
int duty;
@@ -45,7 +45,7 @@ nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
target = max_t(u8, target, fan->bios.min_duty);
target = min_t(u8, target, fan->bios.max_duty);
if (fan->percent != target) {
- nv_debug(therm, "FAN target: %d\n", target);
+ nvkm_debug(subdev, "FAN target: %d\n", target);
fan->percent = target;
}
@@ -70,7 +70,7 @@ nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
duty = target;
}
- nv_debug(therm, "FAN update: %d\n", duty);
+ nvkm_debug(subdev, "FAN update: %d\n", duty);
ret = fan->set(therm, duty);
if (ret) {
spin_unlock_irqrestore(&fan->lock, flags);
@@ -95,7 +95,7 @@ nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
else
delay = bump_period;
- ptimer->alarm(ptimer, delay * 1000 * 1000, &fan->alarm);
+ nvkm_timer_alarm(tmr, delay * 1000 * 1000, &fan->alarm);
}
return ret;
@@ -111,48 +111,51 @@ nvkm_fan_alarm(struct nvkm_alarm *alarm)
int
nvkm_therm_fan_get(struct nvkm_therm *therm)
{
- struct nvkm_therm_priv *priv = (void *)therm;
- return priv->fan->get(therm);
+ return therm->fan->get(therm);
}
int
nvkm_therm_fan_set(struct nvkm_therm *therm, bool immediate, int percent)
{
- struct nvkm_therm_priv *priv = (void *)therm;
- return nvkm_fan_update(priv->fan, immediate, percent);
+ return nvkm_fan_update(therm->fan, immediate, percent);
}
int
nvkm_therm_fan_sense(struct nvkm_therm *therm)
{
- struct nvkm_therm_priv *priv = (void *)therm;
- struct nvkm_timer *ptimer = nvkm_timer(therm);
- struct nvkm_gpio *gpio = nvkm_gpio(therm);
+ struct nvkm_device *device = therm->subdev.device;
+ struct nvkm_timer *tmr = device->timer;
+ struct nvkm_gpio *gpio = device->gpio;
u32 cycles, cur, prev;
u64 start, end, tach;
- if (priv->fan->tach.func == DCB_GPIO_UNUSED)
+ if (therm->func->fan_sense)
+ return therm->func->fan_sense(therm);
+
+ if (therm->fan->tach.func == DCB_GPIO_UNUSED)
return -ENODEV;
/* Time a complete rotation and extrapolate to RPM:
* When the fan spins, it changes the value of GPIO FAN_SENSE.
* We get 4 changes (0 -> 1 -> 0 -> 1) per complete rotation.
*/
- start = ptimer->read(ptimer);
- prev = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line);
+ start = nvkm_timer_read(tmr);
+ prev = nvkm_gpio_get(gpio, 0, therm->fan->tach.func,
+ therm->fan->tach.line);
cycles = 0;
do {
usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
- cur = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line);
+ cur = nvkm_gpio_get(gpio, 0, therm->fan->tach.func,
+ therm->fan->tach.line);
if (prev != cur) {
if (!start)
- start = ptimer->read(ptimer);
+ start = nvkm_timer_read(tmr);
cycles++;
prev = cur;
}
- } while (cycles < 5 && ptimer->read(ptimer) - start < 250000000);
- end = ptimer->read(ptimer);
+ } while (cycles < 5 && nvkm_timer_read(tmr) - start < 250000000);
+ end = nvkm_timer_read(tmr);
if (cycles == 5) {
tach = (u64)60000000000ULL;
@@ -171,9 +174,7 @@ nvkm_therm_fan_user_get(struct nvkm_therm *therm)
int
nvkm_therm_fan_user_set(struct nvkm_therm *therm, int percent)
{
- struct nvkm_therm_priv *priv = (void *)therm;
-
- if (priv->mode != NVKM_THERM_CTRL_MANUAL)
+ if (therm->mode != NVKM_THERM_CTRL_MANUAL)
return -EINVAL;
return nvkm_therm_fan_set(therm, true, percent);
@@ -182,29 +183,25 @@ nvkm_therm_fan_user_set(struct nvkm_therm *therm, int percent)
static void
nvkm_therm_fan_set_defaults(struct nvkm_therm *therm)
{
- struct nvkm_therm_priv *priv = (void *)therm;
-
- priv->fan->bios.pwm_freq = 0;
- priv->fan->bios.min_duty = 0;
- priv->fan->bios.max_duty = 100;
- priv->fan->bios.bump_period = 500;
- priv->fan->bios.slow_down_period = 2000;
- priv->fan->bios.linear_min_temp = 40;
- priv->fan->bios.linear_max_temp = 85;
+ therm->fan->bios.pwm_freq = 0;
+ therm->fan->bios.min_duty = 0;
+ therm->fan->bios.max_duty = 100;
+ therm->fan->bios.bump_period = 500;
+ therm->fan->bios.slow_down_period = 2000;
+ therm->fan->bios.linear_min_temp = 40;
+ therm->fan->bios.linear_max_temp = 85;
}
static void
nvkm_therm_fan_safety_checks(struct nvkm_therm *therm)
{
- struct nvkm_therm_priv *priv = (void *)therm;
+ if (therm->fan->bios.min_duty > 100)
+ therm->fan->bios.min_duty = 100;
+ if (therm->fan->bios.max_duty > 100)
+ therm->fan->bios.max_duty = 100;
- if (priv->fan->bios.min_duty > 100)
- priv->fan->bios.min_duty = 100;
- if (priv->fan->bios.max_duty > 100)
- priv->fan->bios.max_duty = 100;
-
- if (priv->fan->bios.min_duty > priv->fan->bios.max_duty)
- priv->fan->bios.min_duty = priv->fan->bios.max_duty;
+ if (therm->fan->bios.min_duty > therm->fan->bios.max_duty)
+ therm->fan->bios.min_duty = therm->fan->bios.max_duty;
}
int
@@ -216,29 +213,28 @@ nvkm_therm_fan_init(struct nvkm_therm *therm)
int
nvkm_therm_fan_fini(struct nvkm_therm *therm, bool suspend)
{
- struct nvkm_therm_priv *priv = (void *)therm;
- struct nvkm_timer *ptimer = nvkm_timer(therm);
-
+ struct nvkm_timer *tmr = therm->subdev.device->timer;
if (suspend)
- ptimer->alarm_cancel(ptimer, &priv->fan->alarm);
+ nvkm_timer_alarm_cancel(tmr, &therm->fan->alarm);
return 0;
}
int
nvkm_therm_fan_ctor(struct nvkm_therm *therm)
{
- struct nvkm_therm_priv *priv = (void *)therm;
- struct nvkm_gpio *gpio = nvkm_gpio(therm);
- struct nvkm_bios *bios = nvkm_bios(therm);
+ struct nvkm_subdev *subdev = &therm->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_gpio *gpio = device->gpio;
+ struct nvkm_bios *bios = device->bios;
struct dcb_gpio_func func;
int ret;
/* attempt to locate a drivable fan, and determine control method */
- ret = gpio->find(gpio, 0, DCB_GPIO_FAN, 0xff, &func);
+ ret = nvkm_gpio_find(gpio, 0, DCB_GPIO_FAN, 0xff, &func);
if (ret == 0) {
/* FIXME: is this really the place to perform such checks ? */
if (func.line != 16 && func.log[0] & DCB_GPIO_LOG_DIR_IN) {
- nv_debug(therm, "GPIO_FAN is in input mode\n");
+ nvkm_debug(subdev, "GPIO_FAN is in input mode\n");
ret = -EINVAL;
} else {
ret = nvkm_fanpwm_create(therm, &func);
@@ -254,28 +250,29 @@ nvkm_therm_fan_ctor(struct nvkm_therm *therm)
return ret;
}
- nv_info(therm, "FAN control: %s\n", priv->fan->type);
+ nvkm_debug(subdev, "FAN control: %s\n", therm->fan->type);
/* read the current speed, it is useful when resuming */
- priv->fan->percent = nvkm_therm_fan_get(therm);
+ therm->fan->percent = nvkm_therm_fan_get(therm);
/* attempt to detect a tachometer connection */
- ret = gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &priv->fan->tach);
+ ret = nvkm_gpio_find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff,
+ &therm->fan->tach);
if (ret)
- priv->fan->tach.func = DCB_GPIO_UNUSED;
+ therm->fan->tach.func = DCB_GPIO_UNUSED;
/* initialise fan bump/slow update handling */
- priv->fan->parent = therm;
- nvkm_alarm_init(&priv->fan->alarm, nvkm_fan_alarm);
- spin_lock_init(&priv->fan->lock);
+ therm->fan->parent = therm;
+ nvkm_alarm_init(&therm->fan->alarm, nvkm_fan_alarm);
+ spin_lock_init(&therm->fan->lock);
/* other random init... */
nvkm_therm_fan_set_defaults(therm);
- nvbios_perf_fan_parse(bios, &priv->fan->perf);
- if (!nvbios_fan_parse(bios, &priv->fan->bios)) {
- nv_debug(therm, "parsing the fan table failed\n");
- if (nvbios_therm_fan_parse(bios, &priv->fan->bios))
- nv_error(therm, "parsing both fan tables failed\n");
+ nvbios_perf_fan_parse(bios, &therm->fan->perf);
+ if (!nvbios_fan_parse(bios, &therm->fan->bios)) {
+ nvkm_debug(subdev, "parsing the fan table failed\n");
+ if (nvbios_therm_fan_parse(bios, &therm->fan->bios))
+ nvkm_error(subdev, "parsing both fan tables failed\n");
}
nvkm_therm_fan_safety_checks(therm);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c
index 534e5970ec9c..8ae300f911b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c
@@ -38,11 +38,10 @@ nvkm_fannil_set(struct nvkm_therm *therm, int percent)
int
nvkm_fannil_create(struct nvkm_therm *therm)
{
- struct nvkm_therm_priv *tpriv = (void *)therm;
struct nvkm_fan *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- tpriv->fan = priv;
+ therm->fan = priv;
if (!priv)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c
index bde5ceaeb70a..340f37a299dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c
@@ -24,13 +24,12 @@
*/
#include "priv.h"
-#include <core/device.h>
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/fan.h>
#include <subdev/gpio.h>
-struct nvkm_fanpwm_priv {
+struct nvkm_fanpwm {
struct nvkm_fan base;
struct dcb_gpio_func func;
};
@@ -38,76 +37,74 @@ struct nvkm_fanpwm_priv {
static int
nvkm_fanpwm_get(struct nvkm_therm *therm)
{
- struct nvkm_therm_priv *tpriv = (void *)therm;
- struct nvkm_fanpwm_priv *priv = (void *)tpriv->fan;
- struct nvkm_gpio *gpio = nvkm_gpio(therm);
- int card_type = nv_device(therm)->card_type;
+ struct nvkm_fanpwm *fan = (void *)therm->fan;
+ struct nvkm_device *device = therm->subdev.device;
+ struct nvkm_gpio *gpio = device->gpio;
+ int card_type = device->card_type;
u32 divs, duty;
int ret;
- ret = therm->pwm_get(therm, priv->func.line, &divs, &duty);
+ ret = therm->func->pwm_get(therm, fan->func.line, &divs, &duty);
if (ret == 0 && divs) {
divs = max(divs, duty);
- if (card_type <= NV_40 || (priv->func.log[0] & 1))
+ if (card_type <= NV_40 || (fan->func.log[0] & 1))
duty = divs - duty;
return (duty * 100) / divs;
}
- return gpio->get(gpio, 0, priv->func.func, priv->func.line) * 100;
+ return nvkm_gpio_get(gpio, 0, fan->func.func, fan->func.line) * 100;
}
static int
nvkm_fanpwm_set(struct nvkm_therm *therm, int percent)
{
- struct nvkm_therm_priv *tpriv = (void *)therm;
- struct nvkm_fanpwm_priv *priv = (void *)tpriv->fan;
- int card_type = nv_device(therm)->card_type;
+ struct nvkm_fanpwm *fan = (void *)therm->fan;
+ int card_type = therm->subdev.device->card_type;
u32 divs, duty;
int ret;
- divs = priv->base.perf.pwm_divisor;
- if (priv->base.bios.pwm_freq) {
+ divs = fan->base.perf.pwm_divisor;
+ if (fan->base.bios.pwm_freq) {
divs = 1;
- if (therm->pwm_clock)
- divs = therm->pwm_clock(therm, priv->func.line);
- divs /= priv->base.bios.pwm_freq;
+ if (therm->func->pwm_clock)
+ divs = therm->func->pwm_clock(therm, fan->func.line);
+ divs /= fan->base.bios.pwm_freq;
}
duty = ((divs * percent) + 99) / 100;
- if (card_type <= NV_40 || (priv->func.log[0] & 1))
+ if (card_type <= NV_40 || (fan->func.log[0] & 1))
duty = divs - duty;
- ret = therm->pwm_set(therm, priv->func.line, divs, duty);
+ ret = therm->func->pwm_set(therm, fan->func.line, divs, duty);
if (ret == 0)
- ret = therm->pwm_ctrl(therm, priv->func.line, true);
+ ret = therm->func->pwm_ctrl(therm, fan->func.line, true);
return ret;
}
int
nvkm_fanpwm_create(struct nvkm_therm *therm, struct dcb_gpio_func *func)
{
- struct nvkm_device *device = nv_device(therm);
- struct nvkm_therm_priv *tpriv = (void *)therm;
- struct nvkm_bios *bios = nvkm_bios(therm);
- struct nvkm_fanpwm_priv *priv;
- struct nvbios_therm_fan fan;
+ struct nvkm_device *device = therm->subdev.device;
+ struct nvkm_bios *bios = device->bios;
+ struct nvkm_fanpwm *fan;
+ struct nvbios_therm_fan info = {};
u32 divs, duty;
- nvbios_fan_parse(bios, &fan);
+ nvbios_fan_parse(bios, &info);
if (!nvkm_boolopt(device->cfgopt, "NvFanPWM", func->param) ||
- !therm->pwm_ctrl || fan.type == NVBIOS_THERM_FAN_TOGGLE ||
- therm->pwm_get(therm, func->line, &divs, &duty) == -ENODEV)
+ !therm->func->pwm_ctrl || info.type == NVBIOS_THERM_FAN_TOGGLE ||
+ therm->func->pwm_get(therm, func->line, &divs, &duty) == -ENODEV)
return -ENODEV;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- tpriv->fan = &priv->base;
- if (!priv)
+ fan = kzalloc(sizeof(*fan), GFP_KERNEL);
+ therm->fan = &fan->base;
+ if (!fan)
return -ENOMEM;
- priv->base.type = "PWM";
- priv->base.get = nvkm_fanpwm_get;
- priv->base.set = nvkm_fanpwm_set;
- priv->func = *func;
+ fan->base.type = "PWM";
+ fan->base.get = nvkm_fanpwm_get;
+ fan->base.set = nvkm_fanpwm_set;
+ fan->func = *func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
index 4ce041e81371..59701b7a6597 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
@@ -26,7 +26,7 @@
#include <subdev/gpio.h>
#include <subdev/timer.h>
-struct nvkm_fantog_priv {
+struct nvkm_fantog {
struct nvkm_fan base;
struct nvkm_alarm alarm;
spinlock_t lock;
@@ -36,83 +36,81 @@ struct nvkm_fantog_priv {
};
static void
-nvkm_fantog_update(struct nvkm_fantog_priv *priv, int percent)
+nvkm_fantog_update(struct nvkm_fantog *fan, int percent)
{
- struct nvkm_therm_priv *tpriv = (void *)priv->base.parent;
- struct nvkm_timer *ptimer = nvkm_timer(tpriv);
- struct nvkm_gpio *gpio = nvkm_gpio(tpriv);
+ struct nvkm_therm *therm = fan->base.parent;
+ struct nvkm_device *device = therm->subdev.device;
+ struct nvkm_timer *tmr = device->timer;
+ struct nvkm_gpio *gpio = device->gpio;
unsigned long flags;
int duty;
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&fan->lock, flags);
if (percent < 0)
- percent = priv->percent;
- priv->percent = percent;
+ percent = fan->percent;
+ fan->percent = percent;
- duty = !gpio->get(gpio, 0, DCB_GPIO_FAN, 0xff);
- gpio->set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
+ duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff);
+ nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
- if (list_empty(&priv->alarm.head) && percent != (duty * 100)) {
- u64 next_change = (percent * priv->period_us) / 100;
+ if (list_empty(&fan->alarm.head) && percent != (duty * 100)) {
+ u64 next_change = (percent * fan->period_us) / 100;
if (!duty)
- next_change = priv->period_us - next_change;
- ptimer->alarm(ptimer, next_change * 1000, &priv->alarm);
+ next_change = fan->period_us - next_change;
+ nvkm_timer_alarm(tmr, next_change * 1000, &fan->alarm);
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&fan->lock, flags);
}
static void
nvkm_fantog_alarm(struct nvkm_alarm *alarm)
{
- struct nvkm_fantog_priv *priv =
- container_of(alarm, struct nvkm_fantog_priv, alarm);
- nvkm_fantog_update(priv, -1);
+ struct nvkm_fantog *fan =
+ container_of(alarm, struct nvkm_fantog, alarm);
+ nvkm_fantog_update(fan, -1);
}
static int
nvkm_fantog_get(struct nvkm_therm *therm)
{
- struct nvkm_therm_priv *tpriv = (void *)therm;
- struct nvkm_fantog_priv *priv = (void *)tpriv->fan;
- return priv->percent;
+ struct nvkm_fantog *fan = (void *)therm->fan;
+ return fan->percent;
}
static int
nvkm_fantog_set(struct nvkm_therm *therm, int percent)
{
- struct nvkm_therm_priv *tpriv = (void *)therm;
- struct nvkm_fantog_priv *priv = (void *)tpriv->fan;
- if (therm->pwm_ctrl)
- therm->pwm_ctrl(therm, priv->func.line, false);
- nvkm_fantog_update(priv, percent);
+ struct nvkm_fantog *fan = (void *)therm->fan;
+ if (therm->func->pwm_ctrl)
+ therm->func->pwm_ctrl(therm, fan->func.line, false);
+ nvkm_fantog_update(fan, percent);
return 0;
}
int
nvkm_fantog_create(struct nvkm_therm *therm, struct dcb_gpio_func *func)
{
- struct nvkm_therm_priv *tpriv = (void *)therm;
- struct nvkm_fantog_priv *priv;
+ struct nvkm_fantog *fan;
int ret;
- if (therm->pwm_ctrl) {
- ret = therm->pwm_ctrl(therm, func->line, false);
+ if (therm->func->pwm_ctrl) {
+ ret = therm->func->pwm_ctrl(therm, func->line, false);
if (ret)
return ret;
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- tpriv->fan = &priv->base;
- if (!priv)
+ fan = kzalloc(sizeof(*fan), GFP_KERNEL);
+ therm->fan = &fan->base;
+ if (!fan)
return -ENOMEM;
- priv->base.type = "toggle";
- priv->base.get = nvkm_fantog_get;
- priv->base.set = nvkm_fantog_set;
- nvkm_alarm_init(&priv->alarm, nvkm_fantog_alarm);
- priv->period_us = 100000; /* 10Hz */
- priv->percent = 100;
- priv->func = *func;
- spin_lock_init(&priv->lock);
+ fan->base.type = "toggle";
+ fan->base.get = nvkm_fantog_get;
+ fan->base.set = nvkm_fantog_set;
+ nvkm_alarm_init(&fan->alarm, nvkm_fantog_alarm);
+ fan->period_us = 100000; /* 10Hz */
+ fan->percent = 100;
+ fan->func = *func;
+ spin_lock_init(&fan->lock);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
index 85b5d0c18c0b..86e81930d8ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
@@ -26,17 +26,13 @@
#include <subdev/fuse.h>
-struct g84_therm_priv {
- struct nvkm_therm_priv base;
-};
-
int
g84_temp_get(struct nvkm_therm *therm)
{
- struct nvkm_fuse *fuse = nvkm_fuse(therm);
+ struct nvkm_device *device = therm->subdev.device;
- if (nv_ro32(fuse, 0x1a8) == 1)
- return nv_rd32(therm, 0x20400);
+ if (nvkm_fuse_read(device->fuse, 0x1a8) == 1)
+ return nvkm_rd32(device, 0x20400);
else
return -ENODEV;
}
@@ -44,12 +40,12 @@ g84_temp_get(struct nvkm_therm *therm)
void
g84_sensor_setup(struct nvkm_therm *therm)
{
- struct nvkm_fuse *fuse = nvkm_fuse(therm);
+ struct nvkm_device *device = therm->subdev.device;
/* enable temperature reading for cards with insane defaults */
- if (nv_ro32(fuse, 0x1a8) == 1) {
- nv_mask(therm, 0x20008, 0x80008000, 0x80000000);
- nv_mask(therm, 0x2000c, 0x80000003, 0x00000000);
+ if (nvkm_fuse_read(device->fuse, 0x1a8) == 1) {
+ nvkm_mask(device, 0x20008, 0x80008000, 0x80000000);
+ nvkm_mask(device, 0x2000c, 0x80000003, 0x00000000);
mdelay(20); /* wait for the temperature to stabilize */
}
}
@@ -57,36 +53,40 @@ g84_sensor_setup(struct nvkm_therm *therm)
static void
g84_therm_program_alarms(struct nvkm_therm *therm)
{
- struct nvkm_therm_priv *priv = (void *)therm;
- struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+ struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
+ struct nvkm_subdev *subdev = &therm->subdev;
+ struct nvkm_device *device = subdev->device;
unsigned long flags;
- spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+ spin_lock_irqsave(&therm->sensor.alarm_program_lock, flags);
/* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */
- nv_wr32(therm, 0x20000, 0x000003ff);
+ nvkm_wr32(device, 0x20000, 0x000003ff);
/* shutdown: The computer should be shutdown when reached */
- nv_wr32(therm, 0x20484, sensor->thrs_shutdown.hysteresis);
- nv_wr32(therm, 0x20480, sensor->thrs_shutdown.temp);
+ nvkm_wr32(device, 0x20484, sensor->thrs_shutdown.hysteresis);
+ nvkm_wr32(device, 0x20480, sensor->thrs_shutdown.temp);
/* THRS_1 : fan boost*/
- nv_wr32(therm, 0x204c4, sensor->thrs_fan_boost.temp);
+ nvkm_wr32(device, 0x204c4, sensor->thrs_fan_boost.temp);
/* THRS_2 : critical */
- nv_wr32(therm, 0x204c0, sensor->thrs_critical.temp);
+ nvkm_wr32(device, 0x204c0, sensor->thrs_critical.temp);
/* THRS_4 : down clock */
- nv_wr32(therm, 0x20414, sensor->thrs_down_clock.temp);
- spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
-
- nv_debug(therm,
- "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
- sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
- sensor->thrs_down_clock.temp,
- sensor->thrs_down_clock.hysteresis,
- sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
- sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
+ nvkm_wr32(device, 0x20414, sensor->thrs_down_clock.temp);
+ spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
+
+ nvkm_debug(subdev,
+ "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
+ sensor->thrs_fan_boost.temp,
+ sensor->thrs_fan_boost.hysteresis,
+ sensor->thrs_down_clock.temp,
+ sensor->thrs_down_clock.hysteresis,
+ sensor->thrs_critical.temp,
+ sensor->thrs_critical.hysteresis,
+ sensor->thrs_shutdown.temp,
+ sensor->thrs_shutdown.hysteresis);
}
@@ -97,24 +97,25 @@ g84_therm_threshold_hyst_emulation(struct nvkm_therm *therm,
const struct nvbios_therm_threshold *thrs,
enum nvkm_therm_thrs thrs_name)
{
+ struct nvkm_device *device = therm->subdev.device;
enum nvkm_therm_thrs_direction direction;
enum nvkm_therm_thrs_state prev_state, new_state;
int temp, cur;
prev_state = nvkm_therm_sensor_get_threshold_state(therm, thrs_name);
- temp = nv_rd32(therm, thrs_reg);
+ temp = nvkm_rd32(device, thrs_reg);
/* program the next threshold */
if (temp == thrs->temp) {
- nv_wr32(therm, thrs_reg, thrs->temp - thrs->hysteresis);
+ nvkm_wr32(device, thrs_reg, thrs->temp - thrs->hysteresis);
new_state = NVKM_THERM_THRS_HIGHER;
} else {
- nv_wr32(therm, thrs_reg, thrs->temp);
+ nvkm_wr32(device, thrs_reg, thrs->temp);
new_state = NVKM_THERM_THRS_LOWER;
}
/* fix the state (in case someone reprogrammed the alarms) */
- cur = therm->temp_get(therm);
+ cur = therm->func->temp_get(therm);
if (new_state == NVKM_THERM_THRS_LOWER && cur > thrs->temp)
new_state = NVKM_THERM_THRS_HIGHER;
else if (new_state == NVKM_THERM_THRS_HIGHER &&
@@ -135,17 +136,17 @@ g84_therm_threshold_hyst_emulation(struct nvkm_therm *therm,
}
static void
-g84_therm_intr(struct nvkm_subdev *subdev)
+g84_therm_intr(struct nvkm_therm *therm)
{
- struct nvkm_therm *therm = nvkm_therm(subdev);
- struct nvkm_therm_priv *priv = (void *)therm;
- struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+ struct nvkm_subdev *subdev = &therm->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
unsigned long flags;
uint32_t intr;
- spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+ spin_lock_irqsave(&therm->sensor.alarm_program_lock, flags);
- intr = nv_rd32(therm, 0x20100) & 0x3ff;
+ intr = nvkm_rd32(device, 0x20100) & 0x3ff;
/* THRS_4: downclock */
if (intr & 0x002) {
@@ -180,87 +181,66 @@ g84_therm_intr(struct nvkm_subdev *subdev)
}
if (intr)
- nv_error(therm, "unhandled intr 0x%08x\n", intr);
+ nvkm_error(subdev, "intr %08x\n", intr);
/* ACK everything */
- nv_wr32(therm, 0x20100, 0xffffffff);
- nv_wr32(therm, 0x1100, 0x10000); /* PBUS */
+ nvkm_wr32(device, 0x20100, 0xffffffff);
+ nvkm_wr32(device, 0x1100, 0x10000); /* PBUS */
- spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+ spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
}
-static int
-g84_therm_init(struct nvkm_object *object)
+void
+g84_therm_fini(struct nvkm_therm *therm)
{
- struct g84_therm_priv *priv = (void *)object;
- int ret;
+ struct nvkm_device *device = therm->subdev.device;
- ret = nvkm_therm_init(&priv->base.base);
- if (ret)
- return ret;
+ /* Disable PTherm IRQs */
+ nvkm_wr32(device, 0x20000, 0x00000000);
- g84_sensor_setup(&priv->base.base);
- return 0;
+ /* ACK all PTherm IRQs */
+ nvkm_wr32(device, 0x20100, 0xffffffff);
+ nvkm_wr32(device, 0x1100, 0x10000); /* PBUS */
}
-static int
-g84_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static void
+g84_therm_init(struct nvkm_therm *therm)
{
- struct g84_therm_priv *priv;
- int ret;
-
- ret = nvkm_therm_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
- priv->base.base.pwm_get = nv50_fan_pwm_get;
- priv->base.base.pwm_set = nv50_fan_pwm_set;
- priv->base.base.pwm_clock = nv50_fan_pwm_clock;
- priv->base.base.temp_get = g84_temp_get;
- priv->base.sensor.program_alarms = g84_therm_program_alarms;
- nv_subdev(priv)->intr = g84_therm_intr;
-
- /* init the thresholds */
- nvkm_therm_sensor_set_threshold_state(&priv->base.base,
- NVKM_THERM_THRS_SHUTDOWN,
- NVKM_THERM_THRS_LOWER);
- nvkm_therm_sensor_set_threshold_state(&priv->base.base,
- NVKM_THERM_THRS_FANBOOST,
- NVKM_THERM_THRS_LOWER);
- nvkm_therm_sensor_set_threshold_state(&priv->base.base,
- NVKM_THERM_THRS_CRITICAL,
- NVKM_THERM_THRS_LOWER);
- nvkm_therm_sensor_set_threshold_state(&priv->base.base,
- NVKM_THERM_THRS_DOWNCLOCK,
- NVKM_THERM_THRS_LOWER);
-
- return nvkm_therm_preinit(&priv->base.base);
+ g84_sensor_setup(therm);
}
+static const struct nvkm_therm_func
+g84_therm = {
+ .init = g84_therm_init,
+ .fini = g84_therm_fini,
+ .intr = g84_therm_intr,
+ .pwm_ctrl = nv50_fan_pwm_ctrl,
+ .pwm_get = nv50_fan_pwm_get,
+ .pwm_set = nv50_fan_pwm_set,
+ .pwm_clock = nv50_fan_pwm_clock,
+ .temp_get = g84_temp_get,
+ .program_alarms = g84_therm_program_alarms,
+};
+
int
-g84_therm_fini(struct nvkm_object *object, bool suspend)
+g84_therm_new(struct nvkm_device *device, int index, struct nvkm_therm **ptherm)
{
- /* Disable PTherm IRQs */
- nv_wr32(object, 0x20000, 0x00000000);
+ struct nvkm_therm *therm;
+ int ret;
- /* ACK all PTherm IRQs */
- nv_wr32(object, 0x20100, 0xffffffff);
- nv_wr32(object, 0x1100, 0x10000); /* PBUS */
+ ret = nvkm_therm_new_(&g84_therm, device, index, &therm);
+ *ptherm = therm;
+ if (ret)
+ return ret;
- return _nvkm_therm_fini(object, suspend);
+ /* init the thresholds */
+ nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_SHUTDOWN,
+ NVKM_THERM_THRS_LOWER);
+ nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_FANBOOST,
+ NVKM_THERM_THRS_LOWER);
+ nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_CRITICAL,
+ NVKM_THERM_THRS_LOWER);
+ nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_DOWNCLOCK,
+ NVKM_THERM_THRS_LOWER);
+ return 0;
}
-
-struct nvkm_oclass
-g84_therm_oclass = {
- .handle = NV_SUBDEV(THERM, 0x84),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = g84_therm_ctor,
- .dtor = _nvkm_therm_dtor,
- .init = g84_therm_init,
- .fini = g84_therm_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c
deleted file mode 100644
index 46b7e656a752..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/device.h>
-
-struct gf110_therm_priv {
- struct nvkm_therm_priv base;
-};
-
-static int
-pwm_info(struct nvkm_therm *therm, int line)
-{
- u32 gpio = nv_rd32(therm, 0x00d610 + (line * 0x04));
-
- switch (gpio & 0x000000c0) {
- case 0x00000000: /* normal mode, possibly pwm forced off by us */
- case 0x00000040: /* nvio special */
- switch (gpio & 0x0000001f) {
- case 0x00: return 2;
- case 0x19: return 1;
- case 0x1c: return 0;
- case 0x1e: return 2;
- default:
- break;
- }
- default:
- break;
- }
-
- nv_error(therm, "GPIO %d unknown PWM: 0x%08x\n", line, gpio);
- return -ENODEV;
-}
-
-static int
-gf110_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
-{
- u32 data = enable ? 0x00000040 : 0x00000000;
- int indx = pwm_info(therm, line);
- if (indx < 0)
- return indx;
- else if (indx < 2)
- nv_mask(therm, 0x00d610 + (line * 0x04), 0x000000c0, data);
- /* nothing to do for indx == 2, it seems hardwired to PTHERM */
- return 0;
-}
-
-static int
-gf110_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
-{
- int indx = pwm_info(therm, line);
- if (indx < 0)
- return indx;
- else if (indx < 2) {
- if (nv_rd32(therm, 0x00d610 + (line * 0x04)) & 0x00000040) {
- *divs = nv_rd32(therm, 0x00e114 + (indx * 8));
- *duty = nv_rd32(therm, 0x00e118 + (indx * 8));
- return 0;
- }
- } else if (indx == 2) {
- *divs = nv_rd32(therm, 0x0200d8) & 0x1fff;
- *duty = nv_rd32(therm, 0x0200dc) & 0x1fff;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int
-gf110_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
-{
- int indx = pwm_info(therm, line);
- if (indx < 0)
- return indx;
- else if (indx < 2) {
- nv_wr32(therm, 0x00e114 + (indx * 8), divs);
- nv_wr32(therm, 0x00e118 + (indx * 8), duty | 0x80000000);
- } else if (indx == 2) {
- nv_mask(therm, 0x0200d8, 0x1fff, divs); /* keep the high bits */
- nv_wr32(therm, 0x0200dc, duty | 0x40000000);
- }
- return 0;
-}
-
-static int
-gf110_fan_pwm_clock(struct nvkm_therm *therm, int line)
-{
- int indx = pwm_info(therm, line);
- if (indx < 0)
- return 0;
- else if (indx < 2)
- return (nv_device(therm)->crystal * 1000) / 20;
- else
- return nv_device(therm)->crystal * 1000 / 10;
-}
-
-int
-gf110_therm_init(struct nvkm_object *object)
-{
- struct gf110_therm_priv *priv = (void *)object;
- int ret;
-
- ret = nvkm_therm_init(&priv->base.base);
- if (ret)
- return ret;
-
- /* enable fan tach, count revolutions per-second */
- nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
- if (priv->base.fan->tach.func != DCB_GPIO_UNUSED) {
- nv_mask(priv, 0x00d79c, 0x000000ff, priv->base.fan->tach.line);
- nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000);
- nv_mask(priv, 0x00e720, 0x00000001, 0x00000001);
- }
- nv_mask(priv, 0x00e720, 0x00000002, 0x00000000);
-
- return 0;
-}
-
-static int
-gf110_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gf110_therm_priv *priv;
- int ret;
-
- ret = nvkm_therm_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- g84_sensor_setup(&priv->base.base);
-
- priv->base.base.pwm_ctrl = gf110_fan_pwm_ctrl;
- priv->base.base.pwm_get = gf110_fan_pwm_get;
- priv->base.base.pwm_set = gf110_fan_pwm_set;
- priv->base.base.pwm_clock = gf110_fan_pwm_clock;
- priv->base.base.temp_get = g84_temp_get;
- priv->base.base.fan_sense = gt215_therm_fan_sense;
- priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
- return nvkm_therm_preinit(&priv->base.base);
-}
-
-struct nvkm_oclass
-gf110_therm_oclass = {
- .handle = NV_SUBDEV(THERM, 0xd0),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gf110_therm_ctor,
- .dtor = _nvkm_therm_dtor,
- .init = gf110_therm_init,
- .fini = g84_therm_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
new file mode 100644
index 000000000000..06dcfd6ee966
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+static int
+pwm_info(struct nvkm_therm *therm, int line)
+{
+ struct nvkm_subdev *subdev = &therm->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 gpio = nvkm_rd32(device, 0x00d610 + (line * 0x04));
+
+ switch (gpio & 0x000000c0) {
+ case 0x00000000: /* normal mode, possibly pwm forced off by us */
+ case 0x00000040: /* nvio special */
+ switch (gpio & 0x0000001f) {
+ case 0x00: return 2;
+ case 0x19: return 1;
+ case 0x1c: return 0;
+ case 0x1e: return 2;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ nvkm_error(subdev, "GPIO %d unknown PWM: %08x\n", line, gpio);
+ return -ENODEV;
+}
+
+static int
+gf119_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
+{
+ struct nvkm_device *device = therm->subdev.device;
+ u32 data = enable ? 0x00000040 : 0x00000000;
+ int indx = pwm_info(therm, line);
+ if (indx < 0)
+ return indx;
+ else if (indx < 2)
+ nvkm_mask(device, 0x00d610 + (line * 0x04), 0x000000c0, data);
+ /* nothing to do for indx == 2, it seems hardwired to PTHERM */
+ return 0;
+}
+
+static int
+gf119_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
+{
+ struct nvkm_device *device = therm->subdev.device;
+ int indx = pwm_info(therm, line);
+ if (indx < 0)
+ return indx;
+ else if (indx < 2) {
+ if (nvkm_rd32(device, 0x00d610 + (line * 0x04)) & 0x00000040) {
+ *divs = nvkm_rd32(device, 0x00e114 + (indx * 8));
+ *duty = nvkm_rd32(device, 0x00e118 + (indx * 8));
+ return 0;
+ }
+ } else if (indx == 2) {
+ *divs = nvkm_rd32(device, 0x0200d8) & 0x1fff;
+ *duty = nvkm_rd32(device, 0x0200dc) & 0x1fff;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int
+gf119_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
+{
+ struct nvkm_device *device = therm->subdev.device;
+ int indx = pwm_info(therm, line);
+ if (indx < 0)
+ return indx;
+ else if (indx < 2) {
+ nvkm_wr32(device, 0x00e114 + (indx * 8), divs);
+ nvkm_wr32(device, 0x00e118 + (indx * 8), duty | 0x80000000);
+ } else if (indx == 2) {
+ nvkm_mask(device, 0x0200d8, 0x1fff, divs); /* keep the high bits */
+ nvkm_wr32(device, 0x0200dc, duty | 0x40000000);
+ }
+ return 0;
+}
+
+static int
+gf119_fan_pwm_clock(struct nvkm_therm *therm, int line)
+{
+ struct nvkm_device *device = therm->subdev.device;
+ int indx = pwm_info(therm, line);
+ if (indx < 0)
+ return 0;
+ else if (indx < 2)
+ return (device->crystal * 1000) / 20;
+ else
+ return device->crystal * 1000 / 10;
+}
+
+void
+gf119_therm_init(struct nvkm_therm *therm)
+{
+ struct nvkm_device *device = therm->subdev.device;
+
+ g84_sensor_setup(therm);
+
+ /* enable fan tach, count revolutions per-second */
+ nvkm_mask(device, 0x00e720, 0x00000003, 0x00000002);
+ if (therm->fan->tach.func != DCB_GPIO_UNUSED) {
+ nvkm_mask(device, 0x00d79c, 0x000000ff, therm->fan->tach.line);
+ nvkm_wr32(device, 0x00e724, device->crystal * 1000);
+ nvkm_mask(device, 0x00e720, 0x00000001, 0x00000001);
+ }
+ nvkm_mask(device, 0x00e720, 0x00000002, 0x00000000);
+}
+
+static const struct nvkm_therm_func
+gf119_therm = {
+ .init = gf119_therm_init,
+ .fini = g84_therm_fini,
+ .pwm_ctrl = gf119_fan_pwm_ctrl,
+ .pwm_get = gf119_fan_pwm_get,
+ .pwm_set = gf119_fan_pwm_set,
+ .pwm_clock = gf119_fan_pwm_clock,
+ .temp_get = g84_temp_get,
+ .fan_sense = gt215_therm_fan_sense,
+ .program_alarms = nvkm_therm_program_alarms_polling,
+};
+
+int
+gf119_therm_new(struct nvkm_device *device, int index,
+ struct nvkm_therm **ptherm)
+{
+ return nvkm_therm_new_(&gf119_therm, device, index, ptherm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
index 2fd110f09878..86848ece4d89 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
@@ -23,12 +23,6 @@
*/
#include "priv.h"
-#include <core/device.h>
-
-struct gm107_therm_priv {
- struct nvkm_therm_priv base;
-};
-
static int
gm107_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
{
@@ -39,55 +33,43 @@ gm107_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
static int
gm107_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
{
- *divs = nv_rd32(therm, 0x10eb20) & 0x1fff;
- *duty = nv_rd32(therm, 0x10eb24) & 0x1fff;
+ struct nvkm_device *device = therm->subdev.device;
+ *divs = nvkm_rd32(device, 0x10eb20) & 0x1fff;
+ *duty = nvkm_rd32(device, 0x10eb24) & 0x1fff;
return 0;
}
static int
gm107_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
{
- nv_mask(therm, 0x10eb10, 0x1fff, divs); /* keep the high bits */
- nv_wr32(therm, 0x10eb14, duty | 0x80000000);
+ struct nvkm_device *device = therm->subdev.device;
+ nvkm_mask(device, 0x10eb10, 0x1fff, divs); /* keep the high bits */
+ nvkm_wr32(device, 0x10eb14, duty | 0x80000000);
return 0;
}
static int
gm107_fan_pwm_clock(struct nvkm_therm *therm, int line)
{
- return nv_device(therm)->crystal * 1000;
+ return therm->subdev.device->crystal * 1000;
}
-static int
-gm107_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gm107_therm_priv *priv;
- int ret;
-
- ret = nvkm_therm_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static const struct nvkm_therm_func
+gm107_therm = {
+ .init = gf119_therm_init,
+ .fini = g84_therm_fini,
+ .pwm_ctrl = gm107_fan_pwm_ctrl,
+ .pwm_get = gm107_fan_pwm_get,
+ .pwm_set = gm107_fan_pwm_set,
+ .pwm_clock = gm107_fan_pwm_clock,
+ .temp_get = g84_temp_get,
+ .fan_sense = gt215_therm_fan_sense,
+ .program_alarms = nvkm_therm_program_alarms_polling,
+};
- priv->base.base.pwm_ctrl = gm107_fan_pwm_ctrl;
- priv->base.base.pwm_get = gm107_fan_pwm_get;
- priv->base.base.pwm_set = gm107_fan_pwm_set;
- priv->base.base.pwm_clock = gm107_fan_pwm_clock;
- priv->base.base.temp_get = g84_temp_get;
- priv->base.base.fan_sense = gt215_therm_fan_sense;
- priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
- return nvkm_therm_preinit(&priv->base.base);
+int
+gm107_therm_new(struct nvkm_device *device, int index,
+ struct nvkm_therm **ptherm)
+{
+ return nvkm_therm_new_(&gm107_therm, device, index, ptherm);
}
-
-struct nvkm_oclass
-gm107_therm_oclass = {
- .handle = NV_SUBDEV(THERM, 0x117),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gm107_therm_ctor,
- .dtor = _nvkm_therm_dtor,
- .init = gf110_therm_init,
- .fini = g84_therm_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
index e99be20332f2..c08097f2aff5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
@@ -23,78 +23,53 @@
*/
#include "priv.h"
-#include <core/device.h>
#include <subdev/gpio.h>
-struct gt215_therm_priv {
- struct nvkm_therm_priv base;
-};
-
int
gt215_therm_fan_sense(struct nvkm_therm *therm)
{
- u32 tach = nv_rd32(therm, 0x00e728) & 0x0000ffff;
- u32 ctrl = nv_rd32(therm, 0x00e720);
+ struct nvkm_device *device = therm->subdev.device;
+ u32 tach = nvkm_rd32(device, 0x00e728) & 0x0000ffff;
+ u32 ctrl = nvkm_rd32(device, 0x00e720);
if (ctrl & 0x00000001)
return tach * 60 / 2;
return -ENODEV;
}
-static int
-gt215_therm_init(struct nvkm_object *object)
+static void
+gt215_therm_init(struct nvkm_therm *therm)
{
- struct gt215_therm_priv *priv = (void *)object;
- struct dcb_gpio_func *tach = &priv->base.fan->tach;
- int ret;
-
- ret = nvkm_therm_init(&priv->base.base);
- if (ret)
- return ret;
+ struct nvkm_device *device = therm->subdev.device;
+ struct dcb_gpio_func *tach = &therm->fan->tach;
- g84_sensor_setup(&priv->base.base);
+ g84_sensor_setup(therm);
/* enable fan tach, count revolutions per-second */
- nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
+ nvkm_mask(device, 0x00e720, 0x00000003, 0x00000002);
if (tach->func != DCB_GPIO_UNUSED) {
- nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000);
- nv_mask(priv, 0x00e720, 0x001f0000, tach->line << 16);
- nv_mask(priv, 0x00e720, 0x00000001, 0x00000001);
+ nvkm_wr32(device, 0x00e724, device->crystal * 1000);
+ nvkm_mask(device, 0x00e720, 0x001f0000, tach->line << 16);
+ nvkm_mask(device, 0x00e720, 0x00000001, 0x00000001);
}
- nv_mask(priv, 0x00e720, 0x00000002, 0x00000000);
-
- return 0;
+ nvkm_mask(device, 0x00e720, 0x00000002, 0x00000000);
}
-static int
-gt215_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct gt215_therm_priv *priv;
- int ret;
-
- ret = nvkm_therm_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static const struct nvkm_therm_func
+gt215_therm = {
+ .init = gt215_therm_init,
+ .fini = g84_therm_fini,
+ .pwm_ctrl = nv50_fan_pwm_ctrl,
+ .pwm_get = nv50_fan_pwm_get,
+ .pwm_set = nv50_fan_pwm_set,
+ .pwm_clock = nv50_fan_pwm_clock,
+ .temp_get = g84_temp_get,
+ .fan_sense = gt215_therm_fan_sense,
+ .program_alarms = nvkm_therm_program_alarms_polling,
+};
- priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
- priv->base.base.pwm_get = nv50_fan_pwm_get;
- priv->base.base.pwm_set = nv50_fan_pwm_set;
- priv->base.base.pwm_clock = nv50_fan_pwm_clock;
- priv->base.base.temp_get = g84_temp_get;
- priv->base.base.fan_sense = gt215_therm_fan_sense;
- priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
- return nvkm_therm_preinit(&priv->base.base);
+int
+gt215_therm_new(struct nvkm_device *device, int index,
+ struct nvkm_therm **ptherm)
+{
+ return nvkm_therm_new_(&gt215_therm, device, index, ptherm);
}
-
-struct nvkm_oclass
-gt215_therm_oclass = {
- .handle = NV_SUBDEV(THERM, 0xa3),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gt215_therm_ctor,
- .dtor = _nvkm_therm_dtor,
- .init = gt215_therm_init,
- .fini = g84_therm_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
index 09fc4605e853..6e0ddc1bb583 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
@@ -27,16 +27,16 @@
#include <subdev/i2c.h>
static bool
-probe_monitoring_device(struct nvkm_i2c_port *i2c,
+probe_monitoring_device(struct nvkm_i2c_bus *bus,
struct i2c_board_info *info, void *data)
{
- struct nvkm_therm_priv *priv = data;
- struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+ struct nvkm_therm *therm = data;
+ struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
struct i2c_client *client;
request_module("%s%s", I2C_MODULE_PREFIX, info->type);
- client = i2c_new_device(&i2c->adapter, info);
+ client = i2c_new_device(&bus->i2c, info);
if (!client)
return false;
@@ -46,15 +46,15 @@ probe_monitoring_device(struct nvkm_i2c_port *i2c,
return false;
}
- nv_info(priv,
- "Found an %s at address 0x%x (controlled by lm_sensors, "
- "temp offset %+i C)\n",
- info->type, info->addr, sensor->offset_constant);
- priv->ic = client;
+ nvkm_debug(&therm->subdev,
+ "Found an %s at address 0x%x (controlled by lm_sensors, "
+ "temp offset %+i C)\n",
+ info->type, info->addr, sensor->offset_constant);
+ therm->ic = client;
return true;
}
-static struct nvkm_i2c_board_info
+static struct nvkm_i2c_bus_probe
nv_board_infos[] = {
{ { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 0 },
{ { I2C_BOARD_INFO("w83781d", 0x2d) }, 0 },
@@ -82,38 +82,43 @@ nv_board_infos[] = {
void
nvkm_therm_ic_ctor(struct nvkm_therm *therm)
{
- struct nvkm_therm_priv *priv = (void *)therm;
- struct nvkm_bios *bios = nvkm_bios(therm);
- struct nvkm_i2c *i2c = nvkm_i2c(therm);
+ struct nvkm_device *device = therm->subdev.device;
+ struct nvkm_bios *bios = device->bios;
+ struct nvkm_i2c *i2c = device->i2c;
+ struct nvkm_i2c_bus *bus;
struct nvbios_extdev_func extdev_entry;
+ bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
+ if (!bus)
+ return;
+
if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) {
- struct nvkm_i2c_board_info board[] = {
+ struct nvkm_i2c_bus_probe board[] = {
{ { I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) }, 0},
{ }
};
- i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
- board, probe_monitoring_device, therm);
- if (priv->ic)
+ nvkm_i2c_bus_probe(bus, "monitoring device", board,
+ probe_monitoring_device, therm);
+ if (therm->ic)
return;
}
if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_ADT7473, &extdev_entry)) {
- struct nvkm_i2c_board_info board[] = {
+ struct nvkm_i2c_bus_probe board[] = {
{ { I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) }, 20 },
{ }
};
- i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
- board, probe_monitoring_device, therm);
- if (priv->ic)
+ nvkm_i2c_bus_probe(bus, "monitoring device", board,
+ probe_monitoring_device, therm);
+ if (therm->ic)
return;
}
/* The vbios doesn't provide the address of an exisiting monitoring
device. Let's try our static list.
*/
- i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
- nv_board_infos, probe_monitoring_device, therm);
+ nvkm_i2c_bus_probe(bus, "monitoring device", nv_board_infos,
+ probe_monitoring_device, therm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
index 8496fffd4688..6326fdc5a48d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
@@ -24,26 +24,17 @@
*/
#include "priv.h"
-#include <core/device.h>
-
-struct nv40_therm_priv {
- struct nvkm_therm_priv base;
-};
-
enum nv40_sensor_style { INVALID_STYLE = -1, OLD_STYLE = 0, NEW_STYLE = 1 };
static enum nv40_sensor_style
nv40_sensor_style(struct nvkm_therm *therm)
{
- struct nvkm_device *device = nv_device(therm);
-
- switch (device->chipset) {
+ switch (therm->subdev.device->chipset) {
case 0x43:
case 0x44:
case 0x4a:
case 0x47:
return OLD_STYLE;
-
case 0x46:
case 0x49:
case 0x4b:
@@ -61,18 +52,19 @@ nv40_sensor_style(struct nvkm_therm *therm)
static int
nv40_sensor_setup(struct nvkm_therm *therm)
{
+ struct nvkm_device *device = therm->subdev.device;
enum nv40_sensor_style style = nv40_sensor_style(therm);
/* enable ADC readout and disable the ALARM threshold */
if (style == NEW_STYLE) {
- nv_mask(therm, 0x15b8, 0x80000000, 0);
- nv_wr32(therm, 0x15b0, 0x80003fff);
+ nvkm_mask(device, 0x15b8, 0x80000000, 0);
+ nvkm_wr32(device, 0x15b0, 0x80003fff);
mdelay(20); /* wait for the temperature to stabilize */
- return nv_rd32(therm, 0x15b4) & 0x3fff;
+ return nvkm_rd32(device, 0x15b4) & 0x3fff;
} else if (style == OLD_STYLE) {
- nv_wr32(therm, 0x15b0, 0xff);
+ nvkm_wr32(device, 0x15b0, 0xff);
mdelay(20); /* wait for the temperature to stabilize */
- return nv_rd32(therm, 0x15b4) & 0xff;
+ return nvkm_rd32(device, 0x15b4) & 0xff;
} else
return -ENODEV;
}
@@ -80,17 +72,17 @@ nv40_sensor_setup(struct nvkm_therm *therm)
static int
nv40_temp_get(struct nvkm_therm *therm)
{
- struct nvkm_therm_priv *priv = (void *)therm;
- struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+ struct nvkm_device *device = therm->subdev.device;
+ struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
enum nv40_sensor_style style = nv40_sensor_style(therm);
int core_temp;
if (style == NEW_STYLE) {
- nv_wr32(therm, 0x15b0, 0x80003fff);
- core_temp = nv_rd32(therm, 0x15b4) & 0x3fff;
+ nvkm_wr32(device, 0x15b0, 0x80003fff);
+ core_temp = nvkm_rd32(device, 0x15b4) & 0x3fff;
} else if (style == OLD_STYLE) {
- nv_wr32(therm, 0x15b0, 0xff);
- core_temp = nv_rd32(therm, 0x15b4) & 0xff;
+ nvkm_wr32(device, 0x15b0, 0xff);
+ core_temp = nvkm_rd32(device, 0x15b4) & 0xff;
} else
return -ENODEV;
@@ -113,11 +105,13 @@ nv40_temp_get(struct nvkm_therm *therm)
static int
nv40_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
{
+ struct nvkm_subdev *subdev = &therm->subdev;
+ struct nvkm_device *device = subdev->device;
u32 mask = enable ? 0x80000000 : 0x0000000;
- if (line == 2) nv_mask(therm, 0x0010f0, 0x80000000, mask);
- else if (line == 9) nv_mask(therm, 0x0015f4, 0x80000000, mask);
+ if (line == 2) nvkm_mask(device, 0x0010f0, 0x80000000, mask);
+ else if (line == 9) nvkm_mask(device, 0x0015f4, 0x80000000, mask);
else {
- nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
+ nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", line);
return -ENODEV;
}
return 0;
@@ -126,8 +120,10 @@ nv40_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
static int
nv40_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
{
+ struct nvkm_subdev *subdev = &therm->subdev;
+ struct nvkm_device *device = subdev->device;
if (line == 2) {
- u32 reg = nv_rd32(therm, 0x0010f0);
+ u32 reg = nvkm_rd32(device, 0x0010f0);
if (reg & 0x80000000) {
*duty = (reg & 0x7fff0000) >> 16;
*divs = (reg & 0x00007fff);
@@ -135,14 +131,14 @@ nv40_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
}
} else
if (line == 9) {
- u32 reg = nv_rd32(therm, 0x0015f4);
+ u32 reg = nvkm_rd32(device, 0x0015f4);
if (reg & 0x80000000) {
- *divs = nv_rd32(therm, 0x0015f8);
+ *divs = nvkm_rd32(device, 0x0015f8);
*duty = (reg & 0x7fffffff);
return 0;
}
} else {
- nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
+ nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", line);
return -ENODEV;
}
@@ -152,14 +148,16 @@ nv40_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
static int
nv40_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
{
+ struct nvkm_subdev *subdev = &therm->subdev;
+ struct nvkm_device *device = subdev->device;
if (line == 2) {
- nv_mask(therm, 0x0010f0, 0x7fff7fff, (duty << 16) | divs);
+ nvkm_mask(device, 0x0010f0, 0x7fff7fff, (duty << 16) | divs);
} else
if (line == 9) {
- nv_wr32(therm, 0x0015f8, divs);
- nv_mask(therm, 0x0015f4, 0x7fffffff, duty);
+ nvkm_wr32(device, 0x0015f8, divs);
+ nvkm_mask(device, 0x0015f4, 0x7fffffff, duty);
} else {
- nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
+ nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", line);
return -ENODEV;
}
@@ -167,59 +165,40 @@ nv40_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
}
void
-nv40_therm_intr(struct nvkm_subdev *subdev)
+nv40_therm_intr(struct nvkm_therm *therm)
{
- struct nvkm_therm *therm = nvkm_therm(subdev);
- uint32_t stat = nv_rd32(therm, 0x1100);
+ struct nvkm_subdev *subdev = &therm->subdev;
+ struct nvkm_device *device = subdev->device;
+ uint32_t stat = nvkm_rd32(device, 0x1100);
/* traitement */
/* ack all IRQs */
- nv_wr32(therm, 0x1100, 0x70000);
+ nvkm_wr32(device, 0x1100, 0x70000);
- nv_error(therm, "THERM received an IRQ: stat = %x\n", stat);
+ nvkm_error(subdev, "THERM received an IRQ: stat = %x\n", stat);
}
-static int
-nv40_therm_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static void
+nv40_therm_init(struct nvkm_therm *therm)
{
- struct nv40_therm_priv *priv;
- int ret;
-
- ret = nvkm_therm_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.base.pwm_ctrl = nv40_fan_pwm_ctrl;
- priv->base.base.pwm_get = nv40_fan_pwm_get;
- priv->base.base.pwm_set = nv40_fan_pwm_set;
- priv->base.base.temp_get = nv40_temp_get;
- priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
- nv_subdev(priv)->intr = nv40_therm_intr;
- return nvkm_therm_preinit(&priv->base.base);
-}
-
-static int
-nv40_therm_init(struct nvkm_object *object)
-{
- struct nvkm_therm *therm = (void *)object;
-
nv40_sensor_setup(therm);
-
- return _nvkm_therm_init(object);
}
-struct nvkm_oclass
-nv40_therm_oclass = {
- .handle = NV_SUBDEV(THERM, 0x40),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv40_therm_ctor,
- .dtor = _nvkm_therm_dtor,
- .init = nv40_therm_init,
- .fini = _nvkm_therm_fini,
- },
+static const struct nvkm_therm_func
+nv40_therm = {
+ .init = nv40_therm_init,
+ .intr = nv40_therm_intr,
+ .pwm_ctrl = nv40_fan_pwm_ctrl,
+ .pwm_get = nv40_fan_pwm_get,
+ .pwm_set = nv40_fan_pwm_set,
+ .temp_get = nv40_temp_get,
+ .program_alarms = nvkm_therm_program_alarms_polling,
};
+
+int
+nv40_therm_new(struct nvkm_device *device, int index,
+ struct nvkm_therm **ptherm)
+{
+ return nvkm_therm_new_(&nv40_therm, device, index, ptherm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
index 1ef59e8922d4..9b57b433d4cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
@@ -24,15 +24,11 @@
*/
#include "priv.h"
-#include <core/device.h>
-
-struct nv50_therm_priv {
- struct nvkm_therm_priv base;
-};
-
static int
pwm_info(struct nvkm_therm *therm, int *line, int *ctrl, int *indx)
{
+ struct nvkm_subdev *subdev = &therm->subdev;
+
if (*line == 0x04) {
*ctrl = 0x00e100;
*line = 4;
@@ -48,7 +44,7 @@ pwm_info(struct nvkm_therm *therm, int *line, int *ctrl, int *indx)
*line = 0;
*indx = 0;
} else {
- nv_error(therm, "unknown pwm ctrl for gpio %d\n", *line);
+ nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", *line);
return -ENODEV;
}
@@ -58,23 +54,25 @@ pwm_info(struct nvkm_therm *therm, int *line, int *ctrl, int *indx)
int
nv50_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
{
+ struct nvkm_device *device = therm->subdev.device;
u32 data = enable ? 0x00000001 : 0x00000000;
int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
if (ret == 0)
- nv_mask(therm, ctrl, 0x00010001 << line, data << line);
+ nvkm_mask(device, ctrl, 0x00010001 << line, data << line);
return ret;
}
int
nv50_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
{
+ struct nvkm_device *device = therm->subdev.device;
int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
if (ret)
return ret;
- if (nv_rd32(therm, ctrl) & (1 << line)) {
- *divs = nv_rd32(therm, 0x00e114 + (id * 8));
- *duty = nv_rd32(therm, 0x00e118 + (id * 8));
+ if (nvkm_rd32(device, ctrl) & (1 << line)) {
+ *divs = nvkm_rd32(device, 0x00e114 + (id * 8));
+ *duty = nvkm_rd32(device, 0x00e118 + (id * 8));
return 0;
}
@@ -84,36 +82,36 @@ nv50_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
int
nv50_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
{
+ struct nvkm_device *device = therm->subdev.device;
int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
if (ret)
return ret;
- nv_wr32(therm, 0x00e114 + (id * 8), divs);
- nv_wr32(therm, 0x00e118 + (id * 8), duty | 0x80000000);
+ nvkm_wr32(device, 0x00e114 + (id * 8), divs);
+ nvkm_wr32(device, 0x00e118 + (id * 8), duty | 0x80000000);
return 0;
}
int
nv50_fan_pwm_clock(struct nvkm_therm *therm, int line)
{
- int chipset = nv_device(therm)->chipset;
- int crystal = nv_device(therm)->crystal;
+ struct nvkm_device *device = therm->subdev.device;
int pwm_clock;
/* determine the PWM source clock */
- if (chipset > 0x50 && chipset < 0x94) {
- u8 pwm_div = nv_rd32(therm, 0x410c);
- if (nv_rd32(therm, 0xc040) & 0x800000) {
+ if (device->chipset > 0x50 && device->chipset < 0x94) {
+ u8 pwm_div = nvkm_rd32(device, 0x410c);
+ if (nvkm_rd32(device, 0xc040) & 0x800000) {
/* Use the HOST clock (100 MHz)
* Where does this constant(2.4) comes from? */
pwm_clock = (100000000 >> pwm_div) * 10 / 24;
} else {
/* Where does this constant(20) comes from? */
- pwm_clock = (crystal * 1000) >> pwm_div;
+ pwm_clock = (device->crystal * 1000) >> pwm_div;
pwm_clock /= 20;
}
} else {
- pwm_clock = (crystal * 1000) / 20;
+ pwm_clock = (device->crystal * 1000) / 20;
}
return pwm_clock;
@@ -122,18 +120,19 @@ nv50_fan_pwm_clock(struct nvkm_therm *therm, int line)
static void
nv50_sensor_setup(struct nvkm_therm *therm)
{
- nv_mask(therm, 0x20010, 0x40000000, 0x0);
+ struct nvkm_device *device = therm->subdev.device;
+ nvkm_mask(device, 0x20010, 0x40000000, 0x0);
mdelay(20); /* wait for the temperature to stabilize */
}
static int
nv50_temp_get(struct nvkm_therm *therm)
{
- struct nvkm_therm_priv *priv = (void *)therm;
- struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+ struct nvkm_device *device = therm->subdev.device;
+ struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
int core_temp;
- core_temp = nv_rd32(therm, 0x20014) & 0x3fff;
+ core_temp = nvkm_rd32(device, 0x20014) & 0x3fff;
/* if the slope or the offset is unset, do no use the sensor */
if (!sensor->slope_div || !sensor->slope_mult ||
@@ -151,48 +150,27 @@ nv50_temp_get(struct nvkm_therm *therm)
return core_temp;
}
-static int
-nv50_therm_ctor(struct nvkm_object *parent,
- struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nv50_therm_priv *priv;
- int ret;
-
- ret = nvkm_therm_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
- priv->base.base.pwm_get = nv50_fan_pwm_get;
- priv->base.base.pwm_set = nv50_fan_pwm_set;
- priv->base.base.pwm_clock = nv50_fan_pwm_clock;
- priv->base.base.temp_get = nv50_temp_get;
- priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
- nv_subdev(priv)->intr = nv40_therm_intr;
-
- return nvkm_therm_preinit(&priv->base.base);
-}
-
-static int
-nv50_therm_init(struct nvkm_object *object)
+static void
+nv50_therm_init(struct nvkm_therm *therm)
{
- struct nvkm_therm *therm = (void *)object;
-
nv50_sensor_setup(therm);
-
- return _nvkm_therm_init(object);
}
-struct nvkm_oclass
-nv50_therm_oclass = {
- .handle = NV_SUBDEV(THERM, 0x50),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv50_therm_ctor,
- .dtor = _nvkm_therm_dtor,
- .init = nv50_therm_init,
- .fini = _nvkm_therm_fini,
- },
+static const struct nvkm_therm_func
+nv50_therm = {
+ .init = nv50_therm_init,
+ .intr = nv40_therm_intr,
+ .pwm_ctrl = nv50_fan_pwm_ctrl,
+ .pwm_get = nv50_fan_pwm_get,
+ .pwm_set = nv50_fan_pwm_set,
+ .pwm_clock = nv50_fan_pwm_clock,
+ .temp_get = nv50_temp_get,
+ .program_alarms = nvkm_therm_program_alarms_polling,
};
+
+int
+nv50_therm_new(struct nvkm_device *device, int index,
+ struct nvkm_therm **ptherm)
+{
+ return nvkm_therm_new_(&nv50_therm, device, index, ptherm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
index 916a149efe6e..235a5d8daff6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
@@ -1,5 +1,6 @@
#ifndef __NVTHERM_PRIV_H__
#define __NVTHERM_PRIV_H__
+#define nvkm_therm(p) container_of((p), struct nvkm_therm, subdev)
/*
* Copyright 2012 The Nouveau community
*
@@ -28,8 +29,9 @@
#include <subdev/bios/extdev.h>
#include <subdev/bios/gpio.h>
#include <subdev/bios/perf.h>
-#include <subdev/bios/therm.h>
-#include <subdev/timer.h>
+
+int nvkm_therm_new_(const struct nvkm_therm_func *, struct nvkm_device *,
+ int index, struct nvkm_therm **);
struct nvkm_fan {
struct nvkm_therm *parent;
@@ -48,59 +50,6 @@ struct nvkm_fan {
struct dcb_gpio_func tach;
};
-enum nvkm_therm_thrs_direction {
- NVKM_THERM_THRS_FALLING = 0,
- NVKM_THERM_THRS_RISING = 1
-};
-
-enum nvkm_therm_thrs_state {
- NVKM_THERM_THRS_LOWER = 0,
- NVKM_THERM_THRS_HIGHER = 1
-};
-
-enum nvkm_therm_thrs {
- NVKM_THERM_THRS_FANBOOST = 0,
- NVKM_THERM_THRS_DOWNCLOCK = 1,
- NVKM_THERM_THRS_CRITICAL = 2,
- NVKM_THERM_THRS_SHUTDOWN = 3,
- NVKM_THERM_THRS_NR
-};
-
-struct nvkm_therm_priv {
- struct nvkm_therm base;
-
- /* automatic thermal management */
- struct nvkm_alarm alarm;
- spinlock_t lock;
- struct nvbios_therm_trip_point *last_trip;
- int mode;
- int cstate;
- int suspend;
-
- /* bios */
- struct nvbios_therm_sensor bios_sensor;
-
- /* fan priv */
- struct nvkm_fan *fan;
-
- /* alarms priv */
- struct {
- spinlock_t alarm_program_lock;
- struct nvkm_alarm therm_poll_alarm;
- enum nvkm_therm_thrs_state alarm_state[NVKM_THERM_THRS_NR];
- void (*program_alarms)(struct nvkm_therm *);
- } sensor;
-
- /* what should be done if the card overheats */
- struct {
- void (*downclock)(struct nvkm_therm *, bool active);
- void (*pause)(struct nvkm_therm *, bool active);
- } emergency;
-
- /* ic */
- struct i2c_client *ic;
-};
-
int nvkm_therm_fan_mode(struct nvkm_therm *, int mode);
int nvkm_therm_attr_get(struct nvkm_therm *, enum nvkm_therm_attr_type);
int nvkm_therm_attr_set(struct nvkm_therm *, enum nvkm_therm_attr_type, int);
@@ -117,8 +66,6 @@ int nvkm_therm_fan_set(struct nvkm_therm *, bool now, int percent);
int nvkm_therm_fan_user_get(struct nvkm_therm *);
int nvkm_therm_fan_user_set(struct nvkm_therm *, int percent);
-int nvkm_therm_fan_sense(struct nvkm_therm *);
-
int nvkm_therm_preinit(struct nvkm_therm *);
int nvkm_therm_sensor_init(struct nvkm_therm *);
@@ -134,18 +81,37 @@ void nvkm_therm_sensor_event(struct nvkm_therm *, enum nvkm_therm_thrs,
enum nvkm_therm_thrs_direction);
void nvkm_therm_program_alarms_polling(struct nvkm_therm *);
-void nv40_therm_intr(struct nvkm_subdev *);
+struct nvkm_therm_func {
+ void (*init)(struct nvkm_therm *);
+ void (*fini)(struct nvkm_therm *);
+ void (*intr)(struct nvkm_therm *);
+
+ int (*pwm_ctrl)(struct nvkm_therm *, int line, bool);
+ int (*pwm_get)(struct nvkm_therm *, int line, u32 *, u32 *);
+ int (*pwm_set)(struct nvkm_therm *, int line, u32, u32);
+ int (*pwm_clock)(struct nvkm_therm *, int line);
+
+ int (*temp_get)(struct nvkm_therm *);
+
+ int (*fan_sense)(struct nvkm_therm *);
+
+ void (*program_alarms)(struct nvkm_therm *);
+};
+
+void nv40_therm_intr(struct nvkm_therm *);
+
int nv50_fan_pwm_ctrl(struct nvkm_therm *, int, bool);
int nv50_fan_pwm_get(struct nvkm_therm *, int, u32 *, u32 *);
int nv50_fan_pwm_set(struct nvkm_therm *, int, u32, u32);
int nv50_fan_pwm_clock(struct nvkm_therm *, int);
+
int g84_temp_get(struct nvkm_therm *);
void g84_sensor_setup(struct nvkm_therm *);
-int g84_therm_fini(struct nvkm_object *, bool suspend);
+void g84_therm_fini(struct nvkm_therm *);
int gt215_therm_fan_sense(struct nvkm_therm *);
-int gf110_therm_init(struct nvkm_object *);
+void gf119_therm_init(struct nvkm_therm *);
int nvkm_fanpwm_create(struct nvkm_therm *, struct dcb_gpio_func *);
int nvkm_fantog_create(struct nvkm_therm *, struct dcb_gpio_func *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
index aa13744f3854..b9703c02d8ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
@@ -26,29 +26,25 @@
static void
nvkm_therm_temp_set_defaults(struct nvkm_therm *therm)
{
- struct nvkm_therm_priv *priv = (void *)therm;
+ therm->bios_sensor.offset_constant = 0;
- priv->bios_sensor.offset_constant = 0;
+ therm->bios_sensor.thrs_fan_boost.temp = 90;
+ therm->bios_sensor.thrs_fan_boost.hysteresis = 3;
- priv->bios_sensor.thrs_fan_boost.temp = 90;
- priv->bios_sensor.thrs_fan_boost.hysteresis = 3;
+ therm->bios_sensor.thrs_down_clock.temp = 95;
+ therm->bios_sensor.thrs_down_clock.hysteresis = 3;
- priv->bios_sensor.thrs_down_clock.temp = 95;
- priv->bios_sensor.thrs_down_clock.hysteresis = 3;
+ therm->bios_sensor.thrs_critical.temp = 105;
+ therm->bios_sensor.thrs_critical.hysteresis = 5;
- priv->bios_sensor.thrs_critical.temp = 105;
- priv->bios_sensor.thrs_critical.hysteresis = 5;
-
- priv->bios_sensor.thrs_shutdown.temp = 135;
- priv->bios_sensor.thrs_shutdown.hysteresis = 5; /*not that it matters */
+ therm->bios_sensor.thrs_shutdown.temp = 135;
+ therm->bios_sensor.thrs_shutdown.hysteresis = 5; /*not that it matters */
}
-
static void
nvkm_therm_temp_safety_checks(struct nvkm_therm *therm)
{
- struct nvkm_therm_priv *priv = (void *)therm;
- struct nvbios_therm_sensor *s = &priv->bios_sensor;
+ struct nvbios_therm_sensor *s = &therm->bios_sensor;
/* enforce a minimum hysteresis on thresholds */
s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2);
@@ -63,8 +59,7 @@ nvkm_therm_sensor_set_threshold_state(struct nvkm_therm *therm,
enum nvkm_therm_thrs thrs,
enum nvkm_therm_thrs_state st)
{
- struct nvkm_therm_priv *priv = (void *)therm;
- priv->sensor.alarm_state[thrs] = st;
+ therm->sensor.alarm_state[thrs] = st;
}
/* must be called with alarm_program_lock taken ! */
@@ -72,8 +67,7 @@ enum nvkm_therm_thrs_state
nvkm_therm_sensor_get_threshold_state(struct nvkm_therm *therm,
enum nvkm_therm_thrs thrs)
{
- struct nvkm_therm_priv *priv = (void *)therm;
- return priv->sensor.alarm_state[thrs];
+ return therm->sensor.alarm_state[thrs];
}
static void
@@ -87,22 +81,23 @@ void
nvkm_therm_sensor_event(struct nvkm_therm *therm, enum nvkm_therm_thrs thrs,
enum nvkm_therm_thrs_direction dir)
{
- struct nvkm_therm_priv *priv = (void *)therm;
+ struct nvkm_subdev *subdev = &therm->subdev;
bool active;
const char *thresolds[] = {
"fanboost", "downclock", "critical", "shutdown"
};
- int temperature = therm->temp_get(therm);
+ int temperature = therm->func->temp_get(therm);
if (thrs < 0 || thrs > 3)
return;
if (dir == NVKM_THERM_THRS_FALLING)
- nv_info(therm, "temperature (%i C) went below the '%s' threshold\n",
- temperature, thresolds[thrs]);
+ nvkm_info(subdev,
+ "temperature (%i C) went below the '%s' threshold\n",
+ temperature, thresolds[thrs]);
else
- nv_info(therm, "temperature (%i C) hit the '%s' threshold\n",
- temperature, thresolds[thrs]);
+ nvkm_info(subdev, "temperature (%i C) hit the '%s' threshold\n",
+ temperature, thresolds[thrs]);
active = (dir == NVKM_THERM_THRS_RISING);
switch (thrs) {
@@ -113,12 +108,12 @@ nvkm_therm_sensor_event(struct nvkm_therm *therm, enum nvkm_therm_thrs thrs,
}
break;
case NVKM_THERM_THRS_DOWNCLOCK:
- if (priv->emergency.downclock)
- priv->emergency.downclock(therm, active);
+ if (therm->emergency.downclock)
+ therm->emergency.downclock(therm, active);
break;
case NVKM_THERM_THRS_CRITICAL:
- if (priv->emergency.pause)
- priv->emergency.pause(therm, active);
+ if (therm->emergency.pause)
+ therm->emergency.pause(therm, active);
break;
case NVKM_THERM_THRS_SHUTDOWN:
if (active) {
@@ -145,7 +140,7 @@ nvkm_therm_threshold_hyst_polling(struct nvkm_therm *therm,
{
enum nvkm_therm_thrs_direction direction;
enum nvkm_therm_thrs_state prev_state, new_state;
- int temp = therm->temp_get(therm);
+ int temp = therm->func->temp_get(therm);
prev_state = nvkm_therm_sensor_get_threshold_state(therm, thrs_name);
@@ -166,19 +161,19 @@ nvkm_therm_threshold_hyst_polling(struct nvkm_therm *therm,
static void
alarm_timer_callback(struct nvkm_alarm *alarm)
{
- struct nvkm_therm_priv *priv =
- container_of(alarm, struct nvkm_therm_priv, sensor.therm_poll_alarm);
- struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
- struct nvkm_timer *ptimer = nvkm_timer(priv);
- struct nvkm_therm *therm = &priv->base;
+ struct nvkm_therm *therm =
+ container_of(alarm, struct nvkm_therm, sensor.therm_poll_alarm);
+ struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
+ struct nvkm_timer *tmr = therm->subdev.device->timer;
unsigned long flags;
- spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+ spin_lock_irqsave(&therm->sensor.alarm_program_lock, flags);
nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
NVKM_THERM_THRS_FANBOOST);
- nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_down_clock,
+ nvkm_therm_threshold_hyst_polling(therm,
+ &sensor->thrs_down_clock,
NVKM_THERM_THRS_DOWNCLOCK);
nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_critical,
@@ -187,46 +182,45 @@ alarm_timer_callback(struct nvkm_alarm *alarm)
nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
NVKM_THERM_THRS_SHUTDOWN);
- spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+ spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
/* schedule the next poll in one second */
- if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
- ptimer->alarm(ptimer, 1000000000ULL, alarm);
+ if (therm->func->temp_get(therm) >= 0 && list_empty(&alarm->head))
+ nvkm_timer_alarm(tmr, 1000000000ULL, alarm);
}
void
nvkm_therm_program_alarms_polling(struct nvkm_therm *therm)
{
- struct nvkm_therm_priv *priv = (void *)therm;
- struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
-
- nv_debug(therm,
- "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
- sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
- sensor->thrs_down_clock.temp,
- sensor->thrs_down_clock.hysteresis,
- sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
- sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
-
- alarm_timer_callback(&priv->sensor.therm_poll_alarm);
+ struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
+
+ nvkm_debug(&therm->subdev,
+ "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
+ sensor->thrs_fan_boost.temp,
+ sensor->thrs_fan_boost.hysteresis,
+ sensor->thrs_down_clock.temp,
+ sensor->thrs_down_clock.hysteresis,
+ sensor->thrs_critical.temp,
+ sensor->thrs_critical.hysteresis,
+ sensor->thrs_shutdown.temp,
+ sensor->thrs_shutdown.hysteresis);
+
+ alarm_timer_callback(&therm->sensor.therm_poll_alarm);
}
int
nvkm_therm_sensor_init(struct nvkm_therm *therm)
{
- struct nvkm_therm_priv *priv = (void *)therm;
- priv->sensor.program_alarms(therm);
+ therm->func->program_alarms(therm);
return 0;
}
int
nvkm_therm_sensor_fini(struct nvkm_therm *therm, bool suspend)
{
- struct nvkm_therm_priv *priv = (void *)therm;
- struct nvkm_timer *ptimer = nvkm_timer(therm);
-
+ struct nvkm_timer *tmr = therm->subdev.device->timer;
if (suspend)
- ptimer->alarm_cancel(ptimer, &priv->sensor.therm_poll_alarm);
+ nvkm_timer_alarm_cancel(tmr, &therm->sensor.therm_poll_alarm);
return 0;
}
@@ -235,24 +229,24 @@ nvkm_therm_sensor_preinit(struct nvkm_therm *therm)
{
const char *sensor_avail = "yes";
- if (therm->temp_get(therm) < 0)
+ if (therm->func->temp_get(therm) < 0)
sensor_avail = "no";
- nv_info(therm, "internal sensor: %s\n", sensor_avail);
+ nvkm_debug(&therm->subdev, "internal sensor: %s\n", sensor_avail);
}
int
nvkm_therm_sensor_ctor(struct nvkm_therm *therm)
{
- struct nvkm_therm_priv *priv = (void *)therm;
- struct nvkm_bios *bios = nvkm_bios(therm);
+ struct nvkm_subdev *subdev = &therm->subdev;
+ struct nvkm_bios *bios = subdev->device->bios;
- nvkm_alarm_init(&priv->sensor.therm_poll_alarm, alarm_timer_callback);
+ nvkm_alarm_init(&therm->sensor.therm_poll_alarm, alarm_timer_callback);
nvkm_therm_temp_set_defaults(therm);
if (nvbios_therm_sensor_parse(bios, NVBIOS_THERM_DOMAIN_CORE,
- &priv->bios_sensor))
- nv_error(therm, "nvbios_therm_sensor_parse failed\n");
+ &therm->bios_sensor))
+ nvkm_error(subdev, "nvbios_therm_sensor_parse failed\n");
nvkm_therm_temp_safety_checks(therm);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild
index d1d38b4ba30a..e436f0ffe3f4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild
@@ -1,3 +1,5 @@
nvkm-y += nvkm/subdev/timer/base.o
nvkm-y += nvkm/subdev/timer/nv04.o
+nvkm-y += nvkm/subdev/timer/nv40.o
+nvkm-y += nvkm/subdev/timer/nv41.o
nvkm-y += nvkm/subdev/timer/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index d894061ced52..d4dae1f12d62 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -21,73 +21,131 @@
*
* Authors: Ben Skeggs
*/
-#include <subdev/timer.h>
+#include "priv.h"
-bool
-nvkm_timer_wait_eq(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
+u64
+nvkm_timer_read(struct nvkm_timer *tmr)
{
- struct nvkm_timer *ptimer = nvkm_timer(obj);
- u64 time0;
-
- time0 = ptimer->read(ptimer);
- do {
- if (nv_iclass(obj, NV_SUBDEV_CLASS)) {
- if ((nv_rd32(obj, addr) & mask) == data)
- return true;
- } else {
- if ((nv_ro32(obj, addr) & mask) == data)
- return true;
- }
- } while (ptimer->read(ptimer) - time0 < nsec);
+ return tmr->func->read(tmr);
+}
+
+void
+nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
+{
+ struct nvkm_alarm *alarm, *atemp;
+ unsigned long flags;
+ LIST_HEAD(exec);
+
+ /* move any due alarms off the pending list */
+ spin_lock_irqsave(&tmr->lock, flags);
+ list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) {
+ if (alarm->timestamp <= nvkm_timer_read(tmr))
+ list_move_tail(&alarm->head, &exec);
+ }
- return false;
+ /* reschedule interrupt for next alarm time */
+ if (!list_empty(&tmr->alarms)) {
+ alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head);
+ tmr->func->alarm_init(tmr, alarm->timestamp);
+ } else {
+ tmr->func->alarm_fini(tmr);
+ }
+ spin_unlock_irqrestore(&tmr->lock, flags);
+
+ /* execute any pending alarm handlers */
+ list_for_each_entry_safe(alarm, atemp, &exec, head) {
+ list_del_init(&alarm->head);
+ alarm->func(alarm);
+ }
}
-bool
-nvkm_timer_wait_ne(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
+void
+nvkm_timer_alarm(struct nvkm_timer *tmr, u32 nsec, struct nvkm_alarm *alarm)
{
- struct nvkm_timer *ptimer = nvkm_timer(obj);
- u64 time0;
-
- time0 = ptimer->read(ptimer);
- do {
- if (nv_iclass(obj, NV_SUBDEV_CLASS)) {
- if ((nv_rd32(obj, addr) & mask) != data)
- return true;
- } else {
- if ((nv_ro32(obj, addr) & mask) != data)
- return true;
+ struct nvkm_alarm *list;
+ unsigned long flags;
+
+ alarm->timestamp = nvkm_timer_read(tmr) + nsec;
+
+ /* append new alarm to list, in soonest-alarm-first order */
+ spin_lock_irqsave(&tmr->lock, flags);
+ if (!nsec) {
+ if (!list_empty(&alarm->head))
+ list_del(&alarm->head);
+ } else {
+ list_for_each_entry(list, &tmr->alarms, head) {
+ if (list->timestamp > alarm->timestamp)
+ break;
}
- } while (ptimer->read(ptimer) - time0 < nsec);
+ list_add_tail(&alarm->head, &list->head);
+ }
+ spin_unlock_irqrestore(&tmr->lock, flags);
- return false;
+ /* process pending alarms */
+ nvkm_timer_alarm_trigger(tmr);
}
-bool
-nvkm_timer_wait_cb(void *obj, u64 nsec, bool (*func)(void *), void *data)
+void
+nvkm_timer_alarm_cancel(struct nvkm_timer *tmr, struct nvkm_alarm *alarm)
{
- struct nvkm_timer *ptimer = nvkm_timer(obj);
- u64 time0;
+ unsigned long flags;
+ spin_lock_irqsave(&tmr->lock, flags);
+ list_del_init(&alarm->head);
+ spin_unlock_irqrestore(&tmr->lock, flags);
+}
- time0 = ptimer->read(ptimer);
- do {
- if (func(data) == true)
- return true;
- } while (ptimer->read(ptimer) - time0 < nsec);
+static void
+nvkm_timer_intr(struct nvkm_subdev *subdev)
+{
+ struct nvkm_timer *tmr = nvkm_timer(subdev);
+ tmr->func->intr(tmr);
+}
- return false;
+static int
+nvkm_timer_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+ struct nvkm_timer *tmr = nvkm_timer(subdev);
+ tmr->func->alarm_fini(tmr);
+ return 0;
}
-void
-nvkm_timer_alarm(void *obj, u32 nsec, struct nvkm_alarm *alarm)
+static int
+nvkm_timer_init(struct nvkm_subdev *subdev)
{
- struct nvkm_timer *ptimer = nvkm_timer(obj);
- ptimer->alarm(ptimer, nsec, alarm);
+ struct nvkm_timer *tmr = nvkm_timer(subdev);
+ if (tmr->func->init)
+ tmr->func->init(tmr);
+ tmr->func->time(tmr, ktime_to_ns(ktime_get()));
+ nvkm_timer_alarm_trigger(tmr);
+ return 0;
}
-void
-nvkm_timer_alarm_cancel(void *obj, struct nvkm_alarm *alarm)
+static void *
+nvkm_timer_dtor(struct nvkm_subdev *subdev)
{
- struct nvkm_timer *ptimer = nvkm_timer(obj);
- ptimer->alarm_cancel(ptimer, alarm);
+ return nvkm_timer(subdev);
+}
+
+static const struct nvkm_subdev_func
+nvkm_timer = {
+ .dtor = nvkm_timer_dtor,
+ .init = nvkm_timer_init,
+ .fini = nvkm_timer_fini,
+ .intr = nvkm_timer_intr,
+};
+
+int
+nvkm_timer_new_(const struct nvkm_timer_func *func, struct nvkm_device *device,
+ int index, struct nvkm_timer **ptmr)
+{
+ struct nvkm_timer *tmr;
+
+ if (!(tmr = *ptmr = kzalloc(sizeof(*tmr), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_subdev_ctor(&nvkm_timer, device, index, 0, &tmr->subdev);
+ tmr->func = func;
+ INIT_LIST_HEAD(&tmr->alarms);
+ spin_lock_init(&tmr->lock);
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
index 80e38063dd9b..9ed5f64912d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
@@ -21,36 +21,19 @@
*
* Authors: Ben Skeggs
*/
-#include "nv04.h"
+#include "priv.h"
-static int
-gk20a_timer_init(struct nvkm_object *object)
-{
- struct nv04_timer_priv *priv = (void *)object;
- u32 hi = upper_32_bits(priv->suspend_time);
- u32 lo = lower_32_bits(priv->suspend_time);
- int ret;
-
- ret = nvkm_timer_init(&priv->base);
- if (ret)
- return ret;
-
- nv_debug(priv, "time low : 0x%08x\n", lo);
- nv_debug(priv, "time high : 0x%08x\n", hi);
+static const struct nvkm_timer_func
+gk20a_timer = {
+ .intr = nv04_timer_intr,
+ .read = nv04_timer_read,
+ .time = nv04_timer_time,
+ .alarm_init = nv04_timer_alarm_init,
+ .alarm_fini = nv04_timer_alarm_fini,
+};
- /* restore the time before suspend */
- nv_wr32(priv, NV04_PTIMER_TIME_1, hi);
- nv_wr32(priv, NV04_PTIMER_TIME_0, lo);
- return 0;
+int
+gk20a_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+{
+ return nvkm_timer_new_(&gk20a_timer, device, index, ptmr);
}
-
-struct nvkm_oclass
-gk20a_timer_oclass = {
- .handle = NV_SUBDEV(TIMER, 0xff),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_timer_ctor,
- .dtor = nv04_timer_dtor,
- .init = gk20a_timer_init,
- .fini = nv04_timer_fini,
- }
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
index 6b7facbe59a2..7b9ce87f0617 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
@@ -21,165 +21,92 @@
*
* Authors: Ben Skeggs
*/
-#include "nv04.h"
+#include "priv.h"
+#include "regsnv04.h"
-#include <core/device.h>
-
-static u64
-nv04_timer_read(struct nvkm_timer *ptimer)
+void
+nv04_timer_time(struct nvkm_timer *tmr, u64 time)
{
- struct nv04_timer_priv *priv = (void *)ptimer;
- u32 hi, lo;
+ struct nvkm_subdev *subdev = &tmr->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 hi = upper_32_bits(time);
+ u32 lo = lower_32_bits(time);
- do {
- hi = nv_rd32(priv, NV04_PTIMER_TIME_1);
- lo = nv_rd32(priv, NV04_PTIMER_TIME_0);
- } while (hi != nv_rd32(priv, NV04_PTIMER_TIME_1));
+ nvkm_debug(subdev, "time low : %08x\n", lo);
+ nvkm_debug(subdev, "time high : %08x\n", hi);
- return ((u64)hi << 32 | lo);
+ nvkm_wr32(device, NV04_PTIMER_TIME_1, hi);
+ nvkm_wr32(device, NV04_PTIMER_TIME_0, lo);
}
-static void
-nv04_timer_alarm_trigger(struct nvkm_timer *ptimer)
+u64
+nv04_timer_read(struct nvkm_timer *tmr)
{
- struct nv04_timer_priv *priv = (void *)ptimer;
- struct nvkm_alarm *alarm, *atemp;
- unsigned long flags;
- LIST_HEAD(exec);
-
- /* move any due alarms off the pending list */
- spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry_safe(alarm, atemp, &priv->alarms, head) {
- if (alarm->timestamp <= ptimer->read(ptimer))
- list_move_tail(&alarm->head, &exec);
- }
+ struct nvkm_device *device = tmr->subdev.device;
+ u32 hi, lo;
- /* reschedule interrupt for next alarm time */
- if (!list_empty(&priv->alarms)) {
- alarm = list_first_entry(&priv->alarms, typeof(*alarm), head);
- nv_wr32(priv, NV04_PTIMER_ALARM_0, alarm->timestamp);
- nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000001);
- } else {
- nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
- }
- spin_unlock_irqrestore(&priv->lock, flags);
+ do {
+ hi = nvkm_rd32(device, NV04_PTIMER_TIME_1);
+ lo = nvkm_rd32(device, NV04_PTIMER_TIME_0);
+ } while (hi != nvkm_rd32(device, NV04_PTIMER_TIME_1));
- /* execute any pending alarm handlers */
- list_for_each_entry_safe(alarm, atemp, &exec, head) {
- list_del_init(&alarm->head);
- alarm->func(alarm);
- }
+ return ((u64)hi << 32 | lo);
}
-static void
-nv04_timer_alarm(struct nvkm_timer *ptimer, u64 time, struct nvkm_alarm *alarm)
+void
+nv04_timer_alarm_fini(struct nvkm_timer *tmr)
{
- struct nv04_timer_priv *priv = (void *)ptimer;
- struct nvkm_alarm *list;
- unsigned long flags;
-
- alarm->timestamp = ptimer->read(ptimer) + time;
-
- /* append new alarm to list, in soonest-alarm-first order */
- spin_lock_irqsave(&priv->lock, flags);
- if (!time) {
- if (!list_empty(&alarm->head))
- list_del(&alarm->head);
- } else {
- list_for_each_entry(list, &priv->alarms, head) {
- if (list->timestamp > alarm->timestamp)
- break;
- }
- list_add_tail(&alarm->head, &list->head);
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* process pending alarms */
- nv04_timer_alarm_trigger(ptimer);
+ struct nvkm_device *device = tmr->subdev.device;
+ nvkm_wr32(device, NV04_PTIMER_INTR_EN_0, 0x00000000);
}
-static void
-nv04_timer_alarm_cancel(struct nvkm_timer *ptimer, struct nvkm_alarm *alarm)
+void
+nv04_timer_alarm_init(struct nvkm_timer *tmr, u32 time)
{
- struct nv04_timer_priv *priv = (void *)ptimer;
- unsigned long flags;
- spin_lock_irqsave(&priv->lock, flags);
- list_del_init(&alarm->head);
- spin_unlock_irqrestore(&priv->lock, flags);
+ struct nvkm_device *device = tmr->subdev.device;
+ nvkm_wr32(device, NV04_PTIMER_ALARM_0, time);
+ nvkm_wr32(device, NV04_PTIMER_INTR_EN_0, 0x00000001);
}
-static void
-nv04_timer_intr(struct nvkm_subdev *subdev)
+void
+nv04_timer_intr(struct nvkm_timer *tmr)
{
- struct nv04_timer_priv *priv = (void *)subdev;
- u32 stat = nv_rd32(priv, NV04_PTIMER_INTR_0);
+ struct nvkm_subdev *subdev = &tmr->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0);
if (stat & 0x00000001) {
- nv04_timer_alarm_trigger(&priv->base);
- nv_wr32(priv, NV04_PTIMER_INTR_0, 0x00000001);
+ nvkm_timer_alarm_trigger(tmr);
+ nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001);
stat &= ~0x00000001;
}
if (stat) {
- nv_error(priv, "unknown stat 0x%08x\n", stat);
- nv_wr32(priv, NV04_PTIMER_INTR_0, stat);
+ nvkm_error(subdev, "intr %08x\n", stat);
+ nvkm_wr32(device, NV04_PTIMER_INTR_0, stat);
}
}
-int
-nv04_timer_fini(struct nvkm_object *object, bool suspend)
-{
- struct nv04_timer_priv *priv = (void *)object;
- if (suspend)
- priv->suspend_time = nv04_timer_read(&priv->base);
- nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
- return nvkm_timer_fini(&priv->base, suspend);
-}
-
-static int
-nv04_timer_init(struct nvkm_object *object)
+static void
+nv04_timer_init(struct nvkm_timer *tmr)
{
- struct nvkm_device *device = nv_device(object);
- struct nv04_timer_priv *priv = (void *)object;
- u32 m = 1, f, n, d, lo, hi;
- int ret;
-
- ret = nvkm_timer_init(&priv->base);
- if (ret)
- return ret;
+ struct nvkm_subdev *subdev = &tmr->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 f = 0; /*XXX: nvclk */
+ u32 n, d;
/* aim for 31.25MHz, which gives us nanosecond timestamps */
d = 1000000 / 32;
-
- /* determine base clock for timer source */
-#if 0 /*XXX*/
- if (device->chipset < 0x40) {
- n = nvkm_hw_get_clock(device, PLL_CORE);
- } else
-#endif
- if (device->chipset <= 0x40) {
- /*XXX: figure this out */
- f = -1;
- n = 0;
- } else {
- f = device->crystal;
- n = f;
- while (n < (d * 2)) {
- n += (n / m);
- m++;
- }
-
- nv_wr32(priv, 0x009220, m - 1);
- }
-
- if (!n) {
- nv_warn(priv, "unknown input clock freq\n");
- if (!nv_rd32(priv, NV04_PTIMER_NUMERATOR) ||
- !nv_rd32(priv, NV04_PTIMER_DENOMINATOR)) {
- nv_wr32(priv, NV04_PTIMER_NUMERATOR, 1);
- nv_wr32(priv, NV04_PTIMER_DENOMINATOR, 1);
+ n = f;
+
+ if (!f) {
+ n = nvkm_rd32(device, NV04_PTIMER_NUMERATOR);
+ d = nvkm_rd32(device, NV04_PTIMER_DENOMINATOR);
+ if (!n || !d) {
+ n = 1;
+ d = 1;
}
- return 0;
+ nvkm_warn(subdev, "unknown input clock freq\n");
}
/* reduce ratio to acceptable values */
@@ -198,65 +125,27 @@ nv04_timer_init(struct nvkm_object *object)
d >>= 1;
}
- /* restore the time before suspend */
- lo = priv->suspend_time;
- hi = (priv->suspend_time >> 32);
+ nvkm_debug(subdev, "input frequency : %dHz\n", f);
+ nvkm_debug(subdev, "numerator : %08x\n", n);
+ nvkm_debug(subdev, "denominator : %08x\n", d);
+ nvkm_debug(subdev, "timer frequency : %dHz\n", f * d / n);
- nv_debug(priv, "input frequency : %dHz\n", f);
- nv_debug(priv, "input multiplier: %d\n", m);
- nv_debug(priv, "numerator : 0x%08x\n", n);
- nv_debug(priv, "denominator : 0x%08x\n", d);
- nv_debug(priv, "timer frequency : %dHz\n", (f * m) * d / n);
- nv_debug(priv, "time low : 0x%08x\n", lo);
- nv_debug(priv, "time high : 0x%08x\n", hi);
-
- nv_wr32(priv, NV04_PTIMER_NUMERATOR, n);
- nv_wr32(priv, NV04_PTIMER_DENOMINATOR, d);
- nv_wr32(priv, NV04_PTIMER_INTR_0, 0xffffffff);
- nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
- nv_wr32(priv, NV04_PTIMER_TIME_1, hi);
- nv_wr32(priv, NV04_PTIMER_TIME_0, lo);
- return 0;
+ nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n);
+ nvkm_wr32(device, NV04_PTIMER_DENOMINATOR, d);
}
-void
-nv04_timer_dtor(struct nvkm_object *object)
-{
- struct nv04_timer_priv *priv = (void *)object;
- return nvkm_timer_destroy(&priv->base);
-}
+static const struct nvkm_timer_func
+nv04_timer = {
+ .init = nv04_timer_init,
+ .intr = nv04_timer_intr,
+ .read = nv04_timer_read,
+ .time = nv04_timer_time,
+ .alarm_init = nv04_timer_alarm_init,
+ .alarm_fini = nv04_timer_alarm_fini,
+};
int
-nv04_timer_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+nv04_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
{
- struct nv04_timer_priv *priv;
- int ret;
-
- ret = nvkm_timer_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.base.intr = nv04_timer_intr;
- priv->base.read = nv04_timer_read;
- priv->base.alarm = nv04_timer_alarm;
- priv->base.alarm_cancel = nv04_timer_alarm_cancel;
- priv->suspend_time = 0;
-
- INIT_LIST_HEAD(&priv->alarms);
- spin_lock_init(&priv->lock);
- return 0;
+ return nvkm_timer_new_(&nv04_timer, device, index, ptmr);
}
-
-struct nvkm_oclass
-nv04_timer_oclass = {
- .handle = NV_SUBDEV(TIMER, 0x04),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv04_timer_ctor,
- .dtor = nv04_timer_dtor,
- .init = nv04_timer_init,
- .fini = nv04_timer_fini,
- }
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h
deleted file mode 100644
index 89996a9826b1..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef __NVKM_TIMER_NV04_H__
-#define __NVKM_TIMER_NV04_H__
-#include "priv.h"
-
-#define NV04_PTIMER_INTR_0 0x009100
-#define NV04_PTIMER_INTR_EN_0 0x009140
-#define NV04_PTIMER_NUMERATOR 0x009200
-#define NV04_PTIMER_DENOMINATOR 0x009210
-#define NV04_PTIMER_TIME_0 0x009400
-#define NV04_PTIMER_TIME_1 0x009410
-#define NV04_PTIMER_ALARM_0 0x009420
-
-struct nv04_timer_priv {
- struct nvkm_timer base;
- struct list_head alarms;
- spinlock_t lock;
- u64 suspend_time;
-};
-
-int nv04_timer_ctor(struct nvkm_object *, struct nvkm_object *,
- struct nvkm_oclass *, void *, u32,
- struct nvkm_object **);
-void nv04_timer_dtor(struct nvkm_object *);
-int nv04_timer_fini(struct nvkm_object *, bool);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c
new file mode 100644
index 000000000000..bb99a152f26e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "regsnv04.h"
+
+static void
+nv40_timer_init(struct nvkm_timer *tmr)
+{
+ struct nvkm_subdev *subdev = &tmr->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 f = 0; /*XXX: figure this out */
+ u32 n, d;
+
+ /* aim for 31.25MHz, which gives us nanosecond timestamps */
+ d = 1000000 / 32;
+ n = f;
+
+ if (!f) {
+ n = nvkm_rd32(device, NV04_PTIMER_NUMERATOR);
+ d = nvkm_rd32(device, NV04_PTIMER_DENOMINATOR);
+ if (!n || !d) {
+ n = 1;
+ d = 1;
+ }
+ nvkm_warn(subdev, "unknown input clock freq\n");
+ }
+
+ /* reduce ratio to acceptable values */
+ while (((n % 5) == 0) && ((d % 5) == 0)) {
+ n /= 5;
+ d /= 5;
+ }
+
+ while (((n % 2) == 0) && ((d % 2) == 0)) {
+ n /= 2;
+ d /= 2;
+ }
+
+ while (n > 0xffff || d > 0xffff) {
+ n >>= 1;
+ d >>= 1;
+ }
+
+ nvkm_debug(subdev, "input frequency : %dHz\n", f);
+ nvkm_debug(subdev, "numerator : %08x\n", n);
+ nvkm_debug(subdev, "denominator : %08x\n", d);
+ nvkm_debug(subdev, "timer frequency : %dHz\n", f * d / n);
+
+ nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n);
+ nvkm_wr32(device, NV04_PTIMER_DENOMINATOR, d);
+}
+
+static const struct nvkm_timer_func
+nv40_timer = {
+ .init = nv40_timer_init,
+ .intr = nv04_timer_intr,
+ .read = nv04_timer_read,
+ .time = nv04_timer_time,
+ .alarm_init = nv04_timer_alarm_init,
+ .alarm_fini = nv04_timer_alarm_fini,
+};
+
+int
+nv40_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+{
+ return nvkm_timer_new_(&nv40_timer, device, index, ptmr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c
new file mode 100644
index 000000000000..3cf9ec1b1b57
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "regsnv04.h"
+
+static void
+nv41_timer_init(struct nvkm_timer *tmr)
+{
+ struct nvkm_subdev *subdev = &tmr->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 f = device->crystal;
+ u32 m = 1, n, d;
+
+ /* aim for 31.25MHz, which gives us nanosecond timestamps */
+ d = 1000000 / 32;
+ n = f;
+
+ while (n < (d * 2)) {
+ n += (n / m);
+ m++;
+ }
+
+ /* reduce ratio to acceptable values */
+ while (((n % 5) == 0) && ((d % 5) == 0)) {
+ n /= 5;
+ d /= 5;
+ }
+
+ while (((n % 2) == 0) && ((d % 2) == 0)) {
+ n /= 2;
+ d /= 2;
+ }
+
+ while (n > 0xffff || d > 0xffff) {
+ n >>= 1;
+ d >>= 1;
+ }
+
+ nvkm_debug(subdev, "input frequency : %dHz\n", f);
+ nvkm_debug(subdev, "input multiplier: %d\n", m);
+ nvkm_debug(subdev, "numerator : %08x\n", n);
+ nvkm_debug(subdev, "denominator : %08x\n", d);
+ nvkm_debug(subdev, "timer frequency : %dHz\n", (f * m) * d / n);
+
+ nvkm_wr32(device, 0x009220, m - 1);
+ nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n);
+ nvkm_wr32(device, NV04_PTIMER_DENOMINATOR, d);
+}
+
+static const struct nvkm_timer_func
+nv41_timer = {
+ .init = nv41_timer_init,
+ .intr = nv04_timer_intr,
+ .read = nv04_timer_read,
+ .time = nv04_timer_time,
+ .alarm_init = nv04_timer_alarm_init,
+ .alarm_fini = nv04_timer_alarm_fini,
+};
+
+int
+nv41_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+{
+ return nvkm_timer_new_(&nv41_timer, device, index, ptmr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
index 08e29a3da188..f820ca2aeda4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
@@ -1,4 +1,26 @@
#ifndef __NVKM_TIMER_PRIV_H__
#define __NVKM_TIMER_PRIV_H__
+#define nvkm_timer(p) container_of((p), struct nvkm_timer, subdev)
#include <subdev/timer.h>
+
+int nvkm_timer_new_(const struct nvkm_timer_func *, struct nvkm_device *,
+ int index, struct nvkm_timer **);
+
+struct nvkm_timer_func {
+ void (*init)(struct nvkm_timer *);
+ void (*intr)(struct nvkm_timer *);
+ u64 (*read)(struct nvkm_timer *);
+ void (*time)(struct nvkm_timer *, u64 time);
+ void (*alarm_init)(struct nvkm_timer *, u32 time);
+ void (*alarm_fini)(struct nvkm_timer *);
+};
+
+void nvkm_timer_alarm_trigger(struct nvkm_timer *);
+
+void nv04_timer_fini(struct nvkm_timer *);
+void nv04_timer_intr(struct nvkm_timer *);
+void nv04_timer_time(struct nvkm_timer *, u64);
+u64 nv04_timer_read(struct nvkm_timer *);
+void nv04_timer_alarm_init(struct nvkm_timer *, u32);
+void nv04_timer_alarm_fini(struct nvkm_timer *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h
new file mode 100644
index 000000000000..10bef85b485e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h
@@ -0,0 +1,7 @@
+#define NV04_PTIMER_INTR_0 0x009100
+#define NV04_PTIMER_INTR_EN_0 0x009140
+#define NV04_PTIMER_NUMERATOR 0x009200
+#define NV04_PTIMER_DENOMINATOR 0x009210
+#define NV04_PTIMER_TIME_0 0x009400
+#define NV04_PTIMER_TIME_1 0x009410
+#define NV04_PTIMER_ALARM_0 0x009420
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index 39f15803f2d4..4752dbd33923 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -21,49 +21,45 @@
*
* Authors: Ben Skeggs
*/
-#include <subdev/volt.h>
+#include "priv.h"
+
#include <subdev/bios.h>
#include <subdev/bios/vmap.h>
#include <subdev/bios/volt.h>
-static int
+int
nvkm_volt_get(struct nvkm_volt *volt)
{
- if (volt->vid_get) {
- int ret = volt->vid_get(volt), i;
- if (ret >= 0) {
- for (i = 0; i < volt->vid_nr; i++) {
- if (volt->vid[i].vid == ret)
- return volt->vid[i].uv;
- }
- ret = -EINVAL;
+ int ret = volt->func->vid_get(volt), i;
+ if (ret >= 0) {
+ for (i = 0; i < volt->vid_nr; i++) {
+ if (volt->vid[i].vid == ret)
+ return volt->vid[i].uv;
}
- return ret;
+ ret = -EINVAL;
}
- return -ENODEV;
+ return ret;
}
static int
nvkm_volt_set(struct nvkm_volt *volt, u32 uv)
{
- if (volt->vid_set) {
- int i, ret = -EINVAL;
- for (i = 0; i < volt->vid_nr; i++) {
- if (volt->vid[i].uv == uv) {
- ret = volt->vid_set(volt, volt->vid[i].vid);
- nv_debug(volt, "set %duv: %d\n", uv, ret);
- break;
- }
+ struct nvkm_subdev *subdev = &volt->subdev;
+ int i, ret = -EINVAL;
+ for (i = 0; i < volt->vid_nr; i++) {
+ if (volt->vid[i].uv == uv) {
+ ret = volt->func->vid_set(volt, volt->vid[i].vid);
+ nvkm_debug(subdev, "set %duv: %d\n", uv, ret);
+ break;
}
- return ret;
}
- return -ENODEV;
+ return ret;
}
static int
nvkm_volt_map(struct nvkm_volt *volt, u8 id)
{
- struct nvkm_bios *bios = nvkm_bios(volt);
+ struct nvkm_bios *bios = volt->subdev.device->bios;
struct nvbios_vmap_entry info;
u8 ver, len;
u16 vmap;
@@ -82,10 +78,15 @@ nvkm_volt_map(struct nvkm_volt *volt, u8 id)
return id ? id * 10000 : -ENODEV;
}
-static int
+int
nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
{
- int ret = nvkm_volt_map(volt, id);
+ int ret;
+
+ if (volt->func->set_id)
+ return volt->func->set_id(volt, id, condition);
+
+ ret = nvkm_volt_map(volt, id);
if (ret >= 0) {
int prev = nvkm_volt_get(volt);
if (!condition || prev < 0 ||
@@ -134,51 +135,41 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
}
}
-int
-_nvkm_volt_init(struct nvkm_object *object)
+static int
+nvkm_volt_init(struct nvkm_subdev *subdev)
{
- struct nvkm_volt *volt = (void *)object;
- int ret;
-
- ret = nvkm_subdev_init(&volt->base);
- if (ret)
- return ret;
-
- ret = volt->get(volt);
+ struct nvkm_volt *volt = nvkm_volt(subdev);
+ int ret = nvkm_volt_get(volt);
if (ret < 0) {
if (ret != -ENODEV)
- nv_debug(volt, "current voltage unknown\n");
+ nvkm_debug(subdev, "current voltage unknown\n");
return 0;
}
-
- nv_info(volt, "GPU voltage: %duv\n", ret);
+ nvkm_debug(subdev, "current voltage: %duv\n", ret);
return 0;
}
-void
-_nvkm_volt_dtor(struct nvkm_object *object)
+static void *
+nvkm_volt_dtor(struct nvkm_subdev *subdev)
{
- struct nvkm_volt *volt = (void *)object;
- nvkm_subdev_destroy(&volt->base);
+ return nvkm_volt(subdev);
}
-int
-nvkm_volt_create_(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, int length, void **pobject)
-{
- struct nvkm_bios *bios = nvkm_bios(parent);
- struct nvkm_volt *volt;
- int ret, i;
+static const struct nvkm_subdev_func
+nvkm_volt = {
+ .dtor = nvkm_volt_dtor,
+ .init = nvkm_volt_init,
+};
- ret = nvkm_subdev_create_(parent, engine, oclass, 0, "VOLT",
- "voltage", length, pobject);
- volt = *pobject;
- if (ret)
- return ret;
+void
+nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
+ int index, struct nvkm_volt *volt)
+{
+ struct nvkm_bios *bios = device->bios;
+ int i;
- volt->get = nvkm_volt_get;
- volt->set = nvkm_volt_set;
- volt->set_id = nvkm_volt_set_id;
+ nvkm_subdev_ctor(&nvkm_volt, device, index, 0, &volt->subdev);
+ volt->func = func;
/* Assuming the non-bios device should build the voltage table later */
if (bios)
@@ -186,19 +177,18 @@ nvkm_volt_create_(struct nvkm_object *parent, struct nvkm_object *engine,
if (volt->vid_nr) {
for (i = 0; i < volt->vid_nr; i++) {
- nv_debug(volt, "VID %02x: %duv\n",
- volt->vid[i].vid, volt->vid[i].uv);
- }
-
- /*XXX: this is an assumption.. there probably exists boards
- * out there with i2c-connected voltage controllers too..
- */
- ret = nvkm_voltgpio_init(volt);
- if (ret == 0) {
- volt->vid_get = nvkm_voltgpio_get;
- volt->vid_set = nvkm_voltgpio_set;
+ nvkm_debug(&volt->subdev, "VID %02x: %duv\n",
+ volt->vid[i].vid, volt->vid[i].uv);
}
}
+}
- return ret;
+int
+nvkm_volt_new_(const struct nvkm_volt_func *func, struct nvkm_device *device,
+ int index, struct nvkm_volt **pvolt)
+{
+ if (!(*pvolt = kzalloc(sizeof(**pvolt), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_volt_ctor(func, device, index, *pvolt);
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
index 871fd51011db..fd56c6476064 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
@@ -19,10 +19,10 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-#include <subdev/volt.h>
-#ifdef __KERNEL__
-#include <nouveau_platform.h>
-#endif
+#define gk20a_volt(p) container_of((p), struct gk20a_volt, base)
+#include "priv.h"
+
+#include <core/tegra.h>
struct cvb_coef {
int c0;
@@ -33,7 +33,7 @@ struct cvb_coef {
int c5;
};
-struct gk20a_volt_priv {
+struct gk20a_volt {
struct nvkm_volt base;
struct regulator *vdd;
};
@@ -101,43 +101,45 @@ gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo)
}
static int
-gk20a_volt_vid_get(struct nvkm_volt *volt)
+gk20a_volt_vid_get(struct nvkm_volt *base)
{
- struct gk20a_volt_priv *priv = (void *)volt;
+ struct gk20a_volt *volt = gk20a_volt(base);
int i, uv;
- uv = regulator_get_voltage(priv->vdd);
+ uv = regulator_get_voltage(volt->vdd);
- for (i = 0; i < volt->vid_nr; i++)
- if (volt->vid[i].uv >= uv)
+ for (i = 0; i < volt->base.vid_nr; i++)
+ if (volt->base.vid[i].uv >= uv)
return i;
return -EINVAL;
}
static int
-gk20a_volt_vid_set(struct nvkm_volt *volt, u8 vid)
+gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid)
{
- struct gk20a_volt_priv *priv = (void *)volt;
+ struct gk20a_volt *volt = gk20a_volt(base);
+ struct nvkm_subdev *subdev = &volt->base.subdev;
- nv_debug(volt, "set voltage as %duv\n", volt->vid[vid].uv);
- return regulator_set_voltage(priv->vdd, volt->vid[vid].uv, 1200000);
+ nvkm_debug(subdev, "set voltage as %duv\n", volt->base.vid[vid].uv);
+ return regulator_set_voltage(volt->vdd, volt->base.vid[vid].uv, 1200000);
}
static int
-gk20a_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
+gk20a_volt_set_id(struct nvkm_volt *base, u8 id, int condition)
{
- struct gk20a_volt_priv *priv = (void *)volt;
- int prev_uv = regulator_get_voltage(priv->vdd);
- int target_uv = volt->vid[id].uv;
+ struct gk20a_volt *volt = gk20a_volt(base);
+ struct nvkm_subdev *subdev = &volt->base.subdev;
+ int prev_uv = regulator_get_voltage(volt->vdd);
+ int target_uv = volt->base.vid[id].uv;
int ret;
- nv_debug(volt, "prev=%d, target=%d, condition=%d\n",
- prev_uv, target_uv, condition);
+ nvkm_debug(subdev, "prev=%d, target=%d, condition=%d\n",
+ prev_uv, target_uv, condition);
if (!condition ||
(condition < 0 && target_uv < prev_uv) ||
(condition > 0 && target_uv > prev_uv)) {
- ret = gk20a_volt_vid_set(volt, volt->vid[id].vid);
+ ret = gk20a_volt_vid_set(&volt->base, volt->base.vid[id].vid);
} else {
ret = 0;
}
@@ -145,53 +147,42 @@ gk20a_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
return ret;
}
-static int
-gk20a_volt_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+static const struct nvkm_volt_func
+gk20a_volt = {
+ .vid_get = gk20a_volt_vid_get,
+ .vid_set = gk20a_volt_vid_set,
+ .set_id = gk20a_volt_set_id,
+};
+
+int
+gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
{
- struct gk20a_volt_priv *priv;
- struct nvkm_volt *volt;
- struct nouveau_platform_device *plat;
- int i, ret, uv;
-
- ret = nvkm_volt_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- volt = &priv->base;
-
- plat = nv_device_to_platform(nv_device(parent));
-
- uv = regulator_get_voltage(plat->gpu->vdd);
- nv_info(priv, "The default voltage is %duV\n", uv);
-
- priv->vdd = plat->gpu->vdd;
- priv->base.vid_get = gk20a_volt_vid_get;
- priv->base.vid_set = gk20a_volt_vid_set;
- priv->base.set_id = gk20a_volt_set_id;
-
- volt->vid_nr = ARRAY_SIZE(gk20a_cvb_coef);
- nv_debug(priv, "%s - vid_nr = %d\n", __func__, volt->vid_nr);
- for (i = 0; i < volt->vid_nr; i++) {
- volt->vid[i].vid = i;
- volt->vid[i].uv = gk20a_volt_calc_voltage(&gk20a_cvb_coef[i],
- plat->gpu_speedo);
- nv_debug(priv, "%2d: vid=%d, uv=%d\n", i, volt->vid[i].vid,
- volt->vid[i].uv);
+ struct nvkm_device_tegra *tdev = device->func->tegra(device);
+ struct gk20a_volt *volt;
+ int i, uv;
+
+ if (!(volt = kzalloc(sizeof(*volt), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_volt_ctor(&gk20a_volt, device, index, &volt->base);
+ *pvolt = &volt->base;
+
+ uv = regulator_get_voltage(tdev->vdd);
+ nvkm_info(&volt->base.subdev, "The default voltage is %duV\n", uv);
+
+ volt->vdd = tdev->vdd;
+
+ volt->base.vid_nr = ARRAY_SIZE(gk20a_cvb_coef);
+ nvkm_debug(&volt->base.subdev, "%s - vid_nr = %d\n", __func__,
+ volt->base.vid_nr);
+ for (i = 0; i < volt->base.vid_nr; i++) {
+ volt->base.vid[i].vid = i;
+ volt->base.vid[i].uv =
+ gk20a_volt_calc_voltage(&gk20a_cvb_coef[i],
+ tdev->gpu_speedo);
+ nvkm_debug(&volt->base.subdev, "%2d: vid=%d, uv=%d\n", i,
+ volt->base.vid[i].vid, volt->base.vid[i].uv);
}
return 0;
}
-
-struct nvkm_oclass
-gk20a_volt_oclass = {
- .handle = NV_SUBDEV(VOLT, 0xea),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = gk20a_volt_ctor,
- .dtor = _nvkm_volt_dtor,
- .init = _nvkm_volt_init,
- .fini = _nvkm_volt_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
index b778deb32d93..d2bac1d77819 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
@@ -34,13 +34,13 @@ static const u8 tags[] = {
int
nvkm_voltgpio_get(struct nvkm_volt *volt)
{
- struct nvkm_gpio *gpio = nvkm_gpio(volt);
+ struct nvkm_gpio *gpio = volt->subdev.device->gpio;
u8 vid = 0;
int i;
for (i = 0; i < ARRAY_SIZE(tags); i++) {
if (volt->vid_mask & (1 << i)) {
- int ret = gpio->get(gpio, 0, tags[i], 0xff);
+ int ret = nvkm_gpio_get(gpio, 0, tags[i], 0xff);
if (ret < 0)
return ret;
vid |= ret << i;
@@ -53,12 +53,12 @@ nvkm_voltgpio_get(struct nvkm_volt *volt)
int
nvkm_voltgpio_set(struct nvkm_volt *volt, u8 vid)
{
- struct nvkm_gpio *gpio = nvkm_gpio(volt);
+ struct nvkm_gpio *gpio = volt->subdev.device->gpio;
int i;
for (i = 0; i < ARRAY_SIZE(tags); i++, vid >>= 1) {
if (volt->vid_mask & (1 << i)) {
- int ret = gpio->set(gpio, 0, tags[i], 0xff, vid & 1);
+ int ret = nvkm_gpio_set(gpio, 0, tags[i], 0xff, vid & 1);
if (ret < 0)
return ret;
}
@@ -70,7 +70,8 @@ nvkm_voltgpio_set(struct nvkm_volt *volt, u8 vid)
int
nvkm_voltgpio_init(struct nvkm_volt *volt)
{
- struct nvkm_gpio *gpio = nvkm_gpio(volt);
+ struct nvkm_subdev *subdev = &volt->subdev;
+ struct nvkm_gpio *gpio = subdev->device->gpio;
struct dcb_gpio_func func;
int i;
@@ -82,11 +83,11 @@ nvkm_voltgpio_init(struct nvkm_volt *volt)
*/
for (i = 0; i < ARRAY_SIZE(tags); i++) {
if (volt->vid_mask & (1 << i)) {
- int ret = gpio->find(gpio, 0, tags[i], 0xff, &func);
+ int ret = nvkm_gpio_find(gpio, 0, tags[i], 0xff, &func);
if (ret) {
if (ret != -ENOENT)
return ret;
- nv_debug(volt, "VID bit %d has no GPIO\n", i);
+ nvkm_debug(subdev, "VID bit %d has no GPIO\n", i);
volt->vid_mask &= ~(1 << i);
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
index 0ac5a3f8c9a8..23409387abb5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
@@ -21,35 +21,24 @@
*
* Authors: Ben Skeggs
*/
-#include <subdev/volt.h>
+#include "priv.h"
-struct nv40_volt_priv {
- struct nvkm_volt base;
+static const struct nvkm_volt_func
+nv40_volt = {
+ .vid_get = nvkm_voltgpio_get,
+ .vid_set = nvkm_voltgpio_set,
};
-static int
-nv40_volt_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
- struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
+int
+nv40_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
{
- struct nv40_volt_priv *priv;
+ struct nvkm_volt *volt;
int ret;
- ret = nvkm_volt_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
+ ret = nvkm_volt_new_(&nv40_volt, device, index, &volt);
+ *pvolt = volt;
if (ret)
return ret;
- return 0;
+ return nvkm_voltgpio_init(volt);
}
-
-struct nvkm_oclass
-nv40_volt_oclass = {
- .handle = NV_SUBDEV(VOLT, 0x40),
- .ofuncs = &(struct nvkm_ofuncs) {
- .ctor = nv40_volt_ctor,
- .dtor = _nvkm_volt_dtor,
- .init = _nvkm_volt_init,
- .fini = _nvkm_volt_fini,
- },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
new file mode 100644
index 000000000000..394f37c723af
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
@@ -0,0 +1,20 @@
+#ifndef __NVKM_VOLT_PRIV_H__
+#define __NVKM_VOLT_PRIV_H__
+#define nvkm_volt(p) container_of((p), struct nvkm_volt, subdev)
+#include <subdev/volt.h>
+
+void nvkm_volt_ctor(const struct nvkm_volt_func *, struct nvkm_device *,
+ int index, struct nvkm_volt *);
+int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *,
+ int index, struct nvkm_volt **);
+
+struct nvkm_volt_func {
+ int (*vid_get)(struct nvkm_volt *);
+ int (*vid_set)(struct nvkm_volt *, u8 vid);
+ int (*set_id)(struct nvkm_volt *, u8 id, int condition);
+};
+
+int nvkm_voltgpio_init(struct nvkm_volt *);
+int nvkm_voltgpio_get(struct nvkm_volt *);
+int nvkm_voltgpio_set(struct nvkm_volt *, u8);
+#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 23d9c928cdc9..9a4ba4f03567 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -388,11 +388,13 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
copy_timings_drm_to_omap(&omap_crtc->timings, mode);
}
-static void omap_crtc_atomic_begin(struct drm_crtc *crtc)
+static void omap_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
}
-static void omap_crtc_atomic_flush(struct drm_crtc *crtc)
+static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index f2daad8c3d96..7841970de48d 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -285,7 +285,7 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
if (wait) {
if (!wait_for_completion_timeout(&engine->compl,
- msecs_to_jiffies(1))) {
+ msecs_to_jiffies(100))) {
dev_err(dmm->dev, "timed out waiting for done\n");
ret = -ETIMEDOUT;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index ae2df41f216f..12081e61d45a 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -177,7 +177,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
int omap_framebuffer_pin(struct drm_framebuffer *fb);
-int omap_framebuffer_unpin(struct drm_framebuffer *fb);
+void omap_framebuffer_unpin(struct drm_framebuffer *fb);
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct omap_drm_window *win, struct omap_overlay_info *info);
struct drm_connector *omap_framebuffer_get_next_connector(
@@ -211,7 +211,7 @@ void omap_gem_dma_sync(struct drm_gem_object *obj,
enum dma_data_direction dir);
int omap_gem_get_paddr(struct drm_gem_object *obj,
dma_addr_t *paddr, bool remap);
-int omap_gem_put_paddr(struct drm_gem_object *obj);
+void omap_gem_put_paddr(struct drm_gem_object *obj);
int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
bool remap);
int omap_gem_put_pages(struct drm_gem_object *obj);
@@ -236,7 +236,7 @@ static inline int align_pitch(int pitch, int width, int bpp)
/* PVR needs alignment to 8 pixels.. right now that is the most
* restrictive stride requirement..
*/
- return ALIGN(pitch, 8 * bytespp);
+ return roundup(pitch, 8 * bytespp);
}
/* map crtc to vblank mask */
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 0b967e76df1a..51b1219af87f 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -287,10 +287,10 @@ fail:
}
/* unpin, no longer being scanned out: */
-int omap_framebuffer_unpin(struct drm_framebuffer *fb)
+void omap_framebuffer_unpin(struct drm_framebuffer *fb)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- int ret, i, n = drm_format_num_planes(fb->pixel_format);
+ int i, n = drm_format_num_planes(fb->pixel_format);
mutex_lock(&omap_fb->lock);
@@ -298,24 +298,16 @@ int omap_framebuffer_unpin(struct drm_framebuffer *fb)
if (omap_fb->pin_count > 0) {
mutex_unlock(&omap_fb->lock);
- return 0;
+ return;
}
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
- ret = omap_gem_put_paddr(plane->bo);
- if (ret)
- goto fail;
+ omap_gem_put_paddr(plane->bo);
plane->paddr = 0;
}
mutex_unlock(&omap_fb->lock);
-
- return 0;
-
-fail:
- mutex_unlock(&omap_fb->lock);
- return ret;
}
struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 23b5a84389e3..b8e4cdec28c3 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -86,11 +86,11 @@ static struct fb_ops omap_fb_ops = {
/* Note: to properly handle manual update displays, we wrap the
* basic fbdev ops which write to the framebuffer
*/
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ .fb_read = drm_fb_helper_sys_read,
+ .fb_write = drm_fb_helper_sys_write,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
@@ -135,7 +135,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
if (fbdev->ywrap_enabled) {
/* need to align pitch to page size if using DMM scrolling */
- mode_cmd.pitches[0] = ALIGN(mode_cmd.pitches[0], PAGE_SIZE);
+ mode_cmd.pitches[0] = PAGE_ALIGN(mode_cmd.pitches[0]);
}
/* allocate backing bo */
@@ -179,10 +179,10 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
mutex_lock(&dev->struct_mutex);
- fbi = framebuffer_alloc(0, dev->dev);
- if (!fbi) {
+ fbi = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(fbi)) {
dev_err(dev->dev, "failed to allocate fb info\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(fbi);
goto fail_unlock;
}
@@ -190,7 +190,6 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
fbdev->fb = fb;
helper->fb = fb;
- helper->fbdev = fbi;
fbi->par = helper;
fbi->flags = FBINFO_DEFAULT;
@@ -198,12 +197,6 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
strcpy(fbi->fix.id, MODULE_NAME);
- ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto fail_unlock;
- }
-
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
@@ -236,8 +229,9 @@ fail_unlock:
fail:
if (ret) {
- if (fbi)
- framebuffer_release(fbi);
+
+ drm_fb_helper_release_fbi(helper);
+
if (fb) {
drm_framebuffer_unregister_private(fb);
drm_framebuffer_remove(fb);
@@ -312,17 +306,11 @@ void omap_fbdev_free(struct drm_device *dev)
struct omap_drm_private *priv = dev->dev_private;
struct drm_fb_helper *helper = priv->fbdev;
struct omap_fbdev *fbdev;
- struct fb_info *fbi;
DBG();
- fbi = helper->fbdev;
-
- /* only cleanup framebuffer if it is present */
- if (fbi) {
- unregister_framebuffer(fbi);
- framebuffer_release(fbi);
- }
+ drm_fb_helper_unregister_fbi(helper);
+ drm_fb_helper_release_fbi(helper);
drm_fb_helper_fini(helper);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 2ab77801cf5f..7ed08fdc4c42 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -808,10 +808,10 @@ fail:
/* Release physical address, when DMA is no longer being performed.. this
* could potentially unpin and unmap buffers from TILER
*/
-int omap_gem_put_paddr(struct drm_gem_object *obj)
+void omap_gem_put_paddr(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- int ret = 0;
+ int ret;
mutex_lock(&obj->dev->struct_mutex);
if (omap_obj->paddr_cnt > 0) {
@@ -821,7 +821,6 @@ int omap_gem_put_paddr(struct drm_gem_object *obj)
if (ret) {
dev_err(obj->dev->dev,
"could not unpin pages: %d\n", ret);
- goto fail;
}
ret = tiler_release(omap_obj->block);
if (ret) {
@@ -832,9 +831,8 @@ int omap_gem_put_paddr(struct drm_gem_object *obj)
omap_obj->block = NULL;
}
}
-fail:
+
mutex_unlock(&obj->dev->struct_mutex);
- return ret;
}
/* Get rotated scanout address (only valid if already pinned), at the
@@ -1378,11 +1376,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
if (!omap_obj)
- goto fail;
-
- spin_lock(&priv->list_lock);
- list_add(&omap_obj->mm_list, &priv->obj_list);
- spin_unlock(&priv->list_lock);
+ return NULL;
obj = &omap_obj->base;
@@ -1392,11 +1386,19 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
*/
omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
&omap_obj->paddr, GFP_KERNEL);
- if (omap_obj->vaddr)
- flags |= OMAP_BO_DMA;
+ if (!omap_obj->vaddr) {
+ kfree(omap_obj);
+
+ return NULL;
+ }
+ flags |= OMAP_BO_DMA;
}
+ spin_lock(&priv->list_lock);
+ list_add(&omap_obj->mm_list, &priv->obj_list);
+ spin_unlock(&priv->list_lock);
+
omap_obj->flags = flags;
if (flags & OMAP_BO_TILED) {
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index cfa8276c4deb..098904696a5c 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -17,6 +17,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
@@ -153,9 +154,34 @@ static void omap_plane_atomic_disable(struct drm_plane *plane,
dispc_ovl_enable(omap_plane->id, false);
}
+static int omap_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+
+ if (!state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (state->crtc_x < 0 || state->crtc_y < 0)
+ return -EINVAL;
+
+ if (state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay)
+ return -EINVAL;
+
+ if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)
+ return -EINVAL;
+
+ return 0;
+}
+
static const struct drm_plane_helper_funcs omap_plane_helper_funcs = {
.prepare_fb = omap_plane_prepare_fb,
.cleanup_fb = omap_plane_cleanup_fb,
+ .atomic_check = omap_plane_atomic_check,
.atomic_update = omap_plane_atomic_update,
.atomic_disable = omap_plane_atomic_disable,
};
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 6d64c7bb908b..7d4704b1292b 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -18,13 +18,21 @@ config DRM_PANEL_SIMPLE
that it can be automatically turned off when the panel goes into a
low power state.
-config DRM_PANEL_LD9040
- tristate "LD9040 RGB/SPI panel"
+config DRM_PANEL_SAMSUNG_LD9040
+ tristate "Samsung LD9040 RGB/SPI panel"
depends on OF && SPI
select VIDEOMODE_HELPERS
-config DRM_PANEL_S6E8AA0
- tristate "S6E8AA0 DSI video mode panel"
+config DRM_PANEL_LG_LG4573
+ tristate "LG4573 RGB/SPI panel"
+ depends on OF && SPI
+ select VIDEOMODE_HELPERS
+ help
+ Say Y here if you want to enable support for LG4573 RGB panel.
+ To compile this driver as a module, choose M here.
+
+config DRM_PANEL_SAMSUNG_S6E8AA0
+ tristate "Samsung S6E8AA0 DSI video mode panel"
depends on OF
select DRM_MIPI_DSI
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 4b2a0430804b..d0f016dd7ddb 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
-obj-$(CONFIG_DRM_PANEL_LD9040) += panel-ld9040.o
-obj-$(CONFIG_DRM_PANEL_S6E8AA0) += panel-s6e8aa0.o
+obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
new file mode 100644
index 000000000000..a7b4939cee6d
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2015 Heiko Schocher <hs@denx.de>
+ *
+ * from:
+ * drivers/gpu/drm/panel/panel-ld9040.c
+ * ld9040 AMOLED LCD drm_panel driver.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ * Derived from drivers/video/backlight/ld9040.c
+ *
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <drm/drmP.h>
+#include <drm/drm_panel.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <video/mipi_display.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+struct lg4573 {
+ struct drm_panel panel;
+ struct spi_device *spi;
+ struct videomode vm;
+};
+
+static inline struct lg4573 *panel_to_lg4573(struct drm_panel *panel)
+{
+ return container_of(panel, struct lg4573, panel);
+}
+
+static int lg4573_spi_write_u16(struct lg4573 *ctx, u16 data)
+{
+ struct spi_transfer xfer = {
+ .len = 2,
+ };
+ u16 temp = cpu_to_be16(data);
+ struct spi_message msg;
+
+ dev_dbg(ctx->panel.dev, "writing data: %x\n", data);
+ xfer.tx_buf = &temp;
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(ctx->spi, &msg);
+}
+
+static int lg4573_spi_write_u16_array(struct lg4573 *ctx, const u16 *buffer,
+ unsigned int count)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < count; i++) {
+ ret = lg4573_spi_write_u16(ctx, buffer[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lg4573_spi_write_dcs(struct lg4573 *ctx, u8 dcs)
+{
+ return lg4573_spi_write_u16(ctx, (0x70 << 8 | dcs));
+}
+
+static int lg4573_display_on(struct lg4573 *ctx)
+{
+ int ret;
+
+ ret = lg4573_spi_write_dcs(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
+ if (ret)
+ return ret;
+
+ msleep(5);
+
+ return lg4573_spi_write_dcs(ctx, MIPI_DCS_SET_DISPLAY_ON);
+}
+
+static int lg4573_display_off(struct lg4573 *ctx)
+{
+ int ret;
+
+ ret = lg4573_spi_write_dcs(ctx, MIPI_DCS_SET_DISPLAY_OFF);
+ if (ret)
+ return ret;
+
+ msleep(120);
+
+ return lg4573_spi_write_dcs(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
+}
+
+static int lg4573_display_mode_settings(struct lg4573 *ctx)
+{
+ static const u16 display_mode_settings[] = {
+ 0x703A, 0x7270, 0x70B1, 0x7208,
+ 0x723B, 0x720F, 0x70B2, 0x7200,
+ 0x72C8, 0x70B3, 0x7200, 0x70B4,
+ 0x7200, 0x70B5, 0x7242, 0x7210,
+ 0x7210, 0x7200, 0x7220, 0x70B6,
+ 0x720B, 0x720F, 0x723C, 0x7213,
+ 0x7213, 0x72E8, 0x70B7, 0x7246,
+ 0x7206, 0x720C, 0x7200, 0x7200,
+ };
+
+ dev_dbg(ctx->panel.dev, "transfer display mode settings\n");
+ return lg4573_spi_write_u16_array(ctx, display_mode_settings,
+ ARRAY_SIZE(display_mode_settings));
+}
+
+static int lg4573_power_settings(struct lg4573 *ctx)
+{
+ static const u16 power_settings[] = {
+ 0x70C0, 0x7201, 0x7211, 0x70C3,
+ 0x7207, 0x7203, 0x7204, 0x7204,
+ 0x7204, 0x70C4, 0x7212, 0x7224,
+ 0x7218, 0x7218, 0x7202, 0x7249,
+ 0x70C5, 0x726F, 0x70C6, 0x7241,
+ 0x7263,
+ };
+
+ dev_dbg(ctx->panel.dev, "transfer power settings\n");
+ return lg4573_spi_write_u16_array(ctx, power_settings,
+ ARRAY_SIZE(power_settings));
+}
+
+static int lg4573_gamma_settings(struct lg4573 *ctx)
+{
+ static const u16 gamma_settings[] = {
+ 0x70D0, 0x7203, 0x7207, 0x7273,
+ 0x7235, 0x7200, 0x7201, 0x7220,
+ 0x7200, 0x7203, 0x70D1, 0x7203,
+ 0x7207, 0x7273, 0x7235, 0x7200,
+ 0x7201, 0x7220, 0x7200, 0x7203,
+ 0x70D2, 0x7203, 0x7207, 0x7273,
+ 0x7235, 0x7200, 0x7201, 0x7220,
+ 0x7200, 0x7203, 0x70D3, 0x7203,
+ 0x7207, 0x7273, 0x7235, 0x7200,
+ 0x7201, 0x7220, 0x7200, 0x7203,
+ 0x70D4, 0x7203, 0x7207, 0x7273,
+ 0x7235, 0x7200, 0x7201, 0x7220,
+ 0x7200, 0x7203, 0x70D5, 0x7203,
+ 0x7207, 0x7273, 0x7235, 0x7200,
+ 0x7201, 0x7220, 0x7200, 0x7203,
+ };
+
+ dev_dbg(ctx->panel.dev, "transfer gamma settings\n");
+ return lg4573_spi_write_u16_array(ctx, gamma_settings,
+ ARRAY_SIZE(gamma_settings));
+}
+
+static int lg4573_init(struct lg4573 *ctx)
+{
+ int ret;
+
+ dev_dbg(ctx->panel.dev, "initializing LCD\n");
+
+ ret = lg4573_display_mode_settings(ctx);
+ if (ret)
+ return ret;
+
+ ret = lg4573_power_settings(ctx);
+ if (ret)
+ return ret;
+
+ return lg4573_gamma_settings(ctx);
+}
+
+static int lg4573_power_on(struct lg4573 *ctx)
+{
+ return lg4573_display_on(ctx);
+}
+
+static int lg4573_disable(struct drm_panel *panel)
+{
+ struct lg4573 *ctx = panel_to_lg4573(panel);
+
+ return lg4573_display_off(ctx);
+}
+
+static int lg4573_enable(struct drm_panel *panel)
+{
+ struct lg4573 *ctx = panel_to_lg4573(panel);
+
+ lg4573_init(ctx);
+
+ return lg4573_power_on(ctx);
+}
+
+static const struct drm_display_mode default_mode = {
+ .clock = 27000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 10,
+ .hsync_end = 480 + 10 + 59,
+ .htotal = 480 + 10 + 59 + 10,
+ .vdisplay = 800,
+ .vsync_start = 800 + 15,
+ .vsync_end = 800 + 15 + 15,
+ .vtotal = 800 + 15 + 15 + 15,
+ .vrefresh = 60,
+};
+
+static int lg4573_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ panel->connector->display_info.width_mm = 61;
+ panel->connector->display_info.height_mm = 103;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs lg4573_drm_funcs = {
+ .disable = lg4573_disable,
+ .enable = lg4573_enable,
+ .get_modes = lg4573_get_modes,
+};
+
+static int lg4573_probe(struct spi_device *spi)
+{
+ struct lg4573 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(&spi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->spi = spi;
+
+ spi_set_drvdata(spi, ctx);
+ spi->bits_per_word = 8;
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "SPI setup failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = &spi->dev;
+ ctx->panel.funcs = &lg4573_drm_funcs;
+
+ return drm_panel_add(&ctx->panel);
+}
+
+static int lg4573_remove(struct spi_device *spi)
+{
+ struct lg4573 *ctx = spi_get_drvdata(spi);
+
+ lg4573_display_off(ctx);
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id lg4573_of_match[] = {
+ { .compatible = "lg,lg4573" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lg4573_of_match);
+
+static struct spi_driver lg4573_driver = {
+ .probe = lg4573_probe,
+ .remove = lg4573_remove,
+ .driver = {
+ .name = "lg4573",
+ .owner = THIS_MODULE,
+ .of_match_table = lg4573_of_match,
+ },
+};
+module_spi_driver(lg4573_driver);
+
+MODULE_AUTHOR("Heiko Schocher <hs@denx.de>");
+MODULE_DESCRIPTION("lg4573 LCD Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 9c27bded4c09..b202377135e7 100644
--- a/drivers/gpu/drm/panel/panel-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -377,7 +377,7 @@ static struct spi_driver ld9040_driver = {
.probe = ld9040_probe,
.remove = ld9040_remove,
.driver = {
- .name = "ld9040",
+ .name = "panel-samsung-ld9040",
.owner = THIS_MODULE,
.of_match_table = ld9040_of_match,
},
diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index 30051108eec4..a188a3959f1a 100644
--- a/drivers/gpu/drm/panel/panel-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -1051,7 +1051,7 @@ static struct mipi_dsi_driver s6e8aa0_driver = {
.probe = s6e8aa0_probe,
.remove = s6e8aa0_remove,
.driver = {
- .name = "panel_s6e8aa0",
+ .name = "panel-samsung-s6e8aa0",
.of_match_table = s6e8aa0_of_match,
},
};
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index f94201b6e882..f97b73ec4713 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -713,7 +713,12 @@ static const struct display_timing hannstar_hsd070pww1_timing = {
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 1, 1, 10 },
.hback_porch = { 1, 1, 10 },
- .hsync_len = { 52, 158, 661 },
+ /*
+ * According to the data sheet, the minimum horizontal blanking interval
+ * is 54 clocks (1 + 52 + 1), but tests with a Nitrogen6X have shown the
+ * minimum working horizontal blanking interval to be 60 clocks.
+ */
+ .hsync_len = { 58, 158, 661 },
.vactive = { 800, 800, 800 },
.vfront_porch = { 1, 1, 10 },
.vback_porch = { 1, 1, 10 },
@@ -729,6 +734,7 @@ static const struct panel_desc hannstar_hsd070pww1 = {
.width = 151,
.height = 94,
},
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
};
static const struct display_timing hannstar_hsd100pxn1_timing = {
@@ -943,6 +949,60 @@ static const struct panel_desc lg_lp129qe = {
},
};
+static const struct drm_display_mode nec_nl4827hc19_05b_mode = {
+ .clock = 10870,
+ .hdisplay = 480,
+ .hsync_start = 480 + 2,
+ .hsync_end = 480 + 2 + 41,
+ .htotal = 480 + 2 + 41 + 2,
+ .vdisplay = 272,
+ .vsync_start = 272 + 2,
+ .vsync_end = 272 + 2 + 4,
+ .vtotal = 272 + 2 + 4 + 2,
+ .vrefresh = 74,
+};
+
+static const struct panel_desc nec_nl4827hc19_05b = {
+ .modes = &nec_nl4827hc19_05b_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 95,
+ .height = 54,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24
+};
+
+static const struct display_timing okaya_rs800480t_7x0gp_timing = {
+ .pixelclock = { 30000000, 30000000, 40000000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 40, 40, 40 },
+ .hback_porch = { 40, 40, 40 },
+ .hsync_len = { 1, 48, 48 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 13, 13, 13 },
+ .vback_porch = { 29, 29, 29 },
+ .vsync_len = { 3, 3, 3 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc okaya_rs800480t_7x0gp = {
+ .timings = &okaya_rs800480t_7x0gp_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 154,
+ .height = 87,
+ },
+ .delay = {
+ .prepare = 41,
+ .enable = 50,
+ .unprepare = 41,
+ .disable = 50,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
.clock = 25000,
.hdisplay = 480,
@@ -1113,6 +1173,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "lg,lp129qe",
.data = &lg_lp129qe,
}, {
+ .compatible = "nec,nl4827hc19-05b",
+ .data = &nec_nl4827hc19_05b,
+ }, {
+ .compatible = "okaya,rs800480t-7x0gp",
+ .data = &okaya_rs800480t_7x0gp,
+ }, {
.compatible = "ortustech,com43h4m85ulc",
.data = &ortustech_com43h4m85ulc,
}, {
@@ -1169,6 +1235,34 @@ struct panel_desc_dsi {
unsigned int lanes;
};
+static const struct drm_display_mode auo_b080uan01_mode = {
+ .clock = 154500,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 62,
+ .hsync_end = 1200 + 62 + 4,
+ .htotal = 1200 + 62 + 4 + 62,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 9,
+ .vsync_end = 1920 + 9 + 2,
+ .vtotal = 1920 + 9 + 2 + 8,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc_dsi auo_b080uan01 = {
+ .desc = {
+ .modes = &auo_b080uan01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 108,
+ .height = 272,
+ },
+ },
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+};
+
static const struct drm_display_mode lg_ld070wx3_sl01_mode = {
.clock = 71000,
.hdisplay = 800,
@@ -1256,6 +1350,9 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
static const struct of_device_id dsi_of_match[] = {
{
+ .compatible = "auo,b080uan01",
+ .data = &auo_b080uan01
+ }, {
.compatible = "lg,ld070wx3-sl01",
.data = &lg_ld070wx3_sl01
}, {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index a8dbb3ef4e3c..7c6225c84ba6 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -160,9 +160,35 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector,
*pwidth = head->width;
*pheight = head->height;
drm_mode_probed_add(connector, mode);
+ /* remember the last custom size for mode validation */
+ qdev->monitors_config_width = mode->hdisplay;
+ qdev->monitors_config_height = mode->vdisplay;
return 1;
}
+static struct mode_size {
+ int w;
+ int h;
+} common_modes[] = {
+ { 640, 480},
+ { 720, 480},
+ { 800, 600},
+ { 848, 480},
+ {1024, 768},
+ {1152, 768},
+ {1280, 720},
+ {1280, 800},
+ {1280, 854},
+ {1280, 960},
+ {1280, 1024},
+ {1440, 900},
+ {1400, 1050},
+ {1680, 1050},
+ {1600, 1200},
+ {1920, 1080},
+ {1920, 1200}
+};
+
static int qxl_add_common_modes(struct drm_connector *connector,
unsigned pwidth,
unsigned pheight)
@@ -170,29 +196,6 @@ static int qxl_add_common_modes(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode = NULL;
int i;
- struct mode_size {
- int w;
- int h;
- } common_modes[] = {
- { 640, 480},
- { 720, 480},
- { 800, 600},
- { 848, 480},
- {1024, 768},
- {1152, 768},
- {1280, 720},
- {1280, 800},
- {1280, 854},
- {1280, 960},
- {1280, 1024},
- {1440, 900},
- {1400, 1050},
- {1680, 1050},
- {1600, 1200},
- {1920, 1080},
- {1920, 1200}
- };
-
for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
60, false, false, false);
@@ -823,11 +826,22 @@ static int qxl_conn_get_modes(struct drm_connector *connector)
static int qxl_conn_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_device *ddev = connector->dev;
+ struct qxl_device *qdev = ddev->dev_private;
+ int i;
+
/* TODO: is this called for user defined modes? (xrandr --add-mode)
* TODO: check that the mode fits in the framebuffer */
- DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay,
- mode->vdisplay, mode->status);
- return MODE_OK;
+
+ if(qdev->monitors_config_width == mode->hdisplay &&
+ qdev->monitors_config_height == mode->vdisplay)
+ return MODE_OK;
+
+ for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
+ if (common_modes[i].w == mode->hdisplay && common_modes[i].h == mode->vdisplay)
+ return MODE_OK;
+ }
+ return MODE_BAD;
}
static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index d8549690801d..01a86948eb8c 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -325,6 +325,8 @@ struct qxl_device {
struct work_struct fb_work;
struct drm_property *hotplug_mode_update_property;
+ int monitors_config_width;
+ int monitors_config_height;
};
/* forward declaration for QXL_INFO_IO */
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 6b6e57e8c2d6..41c422fee31a 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -197,7 +197,7 @@ static void qxl_fb_fillrect(struct fb_info *info,
{
struct qxl_fbdev *qfbdev = info->par;
- sys_fillrect(info, rect);
+ drm_fb_helper_sys_fillrect(info, rect);
qxl_dirty_update(qfbdev, rect->dx, rect->dy, rect->width,
rect->height);
}
@@ -207,7 +207,7 @@ static void qxl_fb_copyarea(struct fb_info *info,
{
struct qxl_fbdev *qfbdev = info->par;
- sys_copyarea(info, area);
+ drm_fb_helper_sys_copyarea(info, area);
qxl_dirty_update(qfbdev, area->dx, area->dy, area->width,
area->height);
}
@@ -217,7 +217,7 @@ static void qxl_fb_imageblit(struct fb_info *info,
{
struct qxl_fbdev *qfbdev = info->par;
- sys_imageblit(info, image);
+ drm_fb_helper_sys_imageblit(info, image);
qxl_dirty_update(qfbdev, image->dx, image->dy, image->width,
image->height);
}
@@ -345,7 +345,6 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct qxl_bo *qbo = NULL;
- struct device *device = &qdev->pdev->dev;
int ret;
int size;
int bpp = sizes->surface_bpp;
@@ -374,9 +373,9 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
shadow);
size = mode_cmd.pitches[0] * mode_cmd.height;
- info = framebuffer_alloc(0, device);
- if (info == NULL) {
- ret = -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(&qfbdev->helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto out_unref;
}
@@ -388,7 +387,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
/* setup helper with fb data */
qfbdev->helper.fb = fb;
- qfbdev->helper.fbdev = info;
+
qfbdev->shadow = shadow;
strcpy(info->fix.id, "qxldrmfb");
@@ -410,11 +409,6 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto out_unref;
- }
info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = qdev->vram_size;
@@ -423,13 +417,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
if (info->screen_base == NULL) {
ret = -ENOSPC;
- goto out_unref;
- }
-
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto out_unref;
+ goto out_destroy_fbi;
}
info->fbdefio = &qxl_defio;
@@ -441,6 +429,8 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
return 0;
+out_destroy_fbi:
+ drm_fb_helper_release_fbi(&qfbdev->helper);
out_unref:
if (qbo) {
ret = qxl_bo_reserve(qbo, false);
@@ -479,15 +469,11 @@ static int qxl_fb_find_or_create_single(
static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
{
- struct fb_info *info;
struct qxl_framebuffer *qfb = &qfbdev->qfb;
- if (qfbdev->helper.fbdev) {
- info = qfbdev->helper.fbdev;
+ drm_fb_helper_unregister_fbi(&qfbdev->helper);
+ drm_fb_helper_release_fbi(&qfbdev->helper);
- unregister_framebuffer(info);
- framebuffer_release(info);
- }
if (qfb->obj) {
qxlfb_destroy_pinned_object(qfb->obj);
qfb->obj = NULL;
@@ -557,7 +543,7 @@ void qxl_fbdev_fini(struct qxl_device *qdev)
void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state)
{
- fb_set_suspend(qdev->mode_info.qfbdev->helper.fbdev, state);
+ drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state);
}
bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj)
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 6d6f33de48f4..b28370e014c6 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -272,7 +272,6 @@ void qxl_bo_force_delete(struct qxl_device *qdev)
return;
dev_err(qdev->dev, "Userspace still has active objects !\n");
list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
- mutex_lock(&qdev->ddev->struct_mutex);
dev_err(qdev->dev, "%p %p %lu %lu force free\n",
&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
*((unsigned long *)&bo->gem_base.refcount));
@@ -280,8 +279,7 @@ void qxl_bo_force_delete(struct qxl_device *qdev)
list_del_init(&bo->list);
mutex_unlock(&qdev->gem.mutex);
/* this should unref the ttm bo */
- drm_gem_object_unreference(&bo->gem_base);
- mutex_unlock(&qdev->ddev->struct_mutex);
+ drm_gem_object_unreference_unlocked(&bo->gem_base);
}
}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index f81e0d7d0232..9cd49c584263 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -171,8 +171,9 @@ radeon_dp_aux_transfer_atom(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return -E2BIG;
tx_buf[0] = msg->address & 0xff;
- tx_buf[1] = msg->address >> 8;
- tx_buf[2] = msg->request << 4;
+ tx_buf[1] = (msg->address >> 8) & 0xff;
+ tx_buf[2] = (msg->request << 4) |
+ ((msg->address >> 16) & 0xf);
tx_buf[3] = msg->size ? (msg->size - 1) : 0;
switch (msg->request & ~DP_AUX_I2C_MOT) {
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index dd39f434b4a7..c3872598b85a 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2299,8 +2299,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
encoder_mode = atombios_get_encoder_mode(encoder);
if (connector && (radeon_audio != 0) &&
((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
- (ENCODER_MODE_IS_DP(encoder_mode) &&
- drm_detect_monitor_audio(radeon_connector_edid(connector)))))
+ ENCODER_MODE_IS_DP(encoder_mode)))
radeon_audio_mode_set(encoder, adjusted_mode);
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 8730562323a8..4a09947be244 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5818,7 +5818,7 @@ int ci_dpm_init(struct radeon_device *rdev)
tmp |= DPM_ENABLED;
break;
default:
- DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
+ DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
break;
}
WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 4ecf5caa8c6d..248953d2fdb7 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7964,23 +7964,27 @@ restart_ih:
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
- rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D1 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_vblank(rdev, 0);
+ rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+
break;
case 1: /* D1 vline */
- if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D1 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -7990,23 +7994,27 @@ restart_ih:
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
- rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D2 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_vblank(rdev, 1);
+ rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+
break;
case 1: /* D2 vline */
- if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D2 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8016,23 +8024,27 @@ restart_ih:
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
- if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[2]) {
- drm_handle_vblank(rdev->ddev, 2);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[2]))
- radeon_crtc_handle_vblank(rdev, 2);
- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D3 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[2]) {
+ drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[2]))
+ radeon_crtc_handle_vblank(rdev, 2);
+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
+
break;
case 1: /* D3 vline */
- if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D3 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8042,23 +8054,27 @@ restart_ih:
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
- if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[3]) {
- drm_handle_vblank(rdev->ddev, 3);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[3]))
- radeon_crtc_handle_vblank(rdev, 3);
- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D4 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[3]) {
+ drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[3]))
+ radeon_crtc_handle_vblank(rdev, 3);
+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
+
break;
case 1: /* D4 vline */
- if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D4 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8068,23 +8084,27 @@ restart_ih:
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
- if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[4]) {
- drm_handle_vblank(rdev->ddev, 4);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[4]))
- radeon_crtc_handle_vblank(rdev, 4);
- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D5 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[4]) {
+ drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[4]))
+ radeon_crtc_handle_vblank(rdev, 4);
+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
+
break;
case 1: /* D5 vline */
- if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D5 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8094,23 +8114,27 @@ restart_ih:
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
- if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[5]) {
- drm_handle_vblank(rdev->ddev, 5);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[5]))
- radeon_crtc_handle_vblank(rdev, 5);
- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D6 vblank\n");
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[5]) {
+ drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[5]))
+ radeon_crtc_handle_vblank(rdev, 5);
+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
+
break;
case 1: /* D6 vline */
- if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D6 vline\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8130,88 +8154,112 @@ restart_ih:
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
- if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD1\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+
break;
case 1:
- if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD2\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+
break;
case 2:
- if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD3\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+
break;
case 3:
- if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD4\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+
break;
case 4:
- if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD5\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+
break;
case 5:
- if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD6\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+
break;
case 6:
- if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 1\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 1\n");
+
break;
case 7:
- if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 2\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 2\n");
+
break;
case 8:
- if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 3\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 3\n");
+
break;
case 9:
- if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 4\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 4\n");
+
break;
case 10:
- if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 5\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 5\n");
+
break;
case 11:
- if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 6\n");
- }
+ if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 6\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 68fd9fc677e3..752072771388 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -76,16 +76,35 @@ static void dce6_afmt_get_connected_pins(struct radeon_device *rdev)
struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev)
{
- int i;
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+ struct radeon_encoder_atom_dig *dig;
+ struct r600_audio_pin *pin = NULL;
+ int i, pin_count;
dce6_afmt_get_connected_pins(rdev);
for (i = 0; i < rdev->audio.num_pins; i++) {
- if (rdev->audio.pin[i].connected)
- return &rdev->audio.pin[i];
+ if (rdev->audio.pin[i].connected) {
+ pin = &rdev->audio.pin[i];
+ pin_count = 0;
+
+ list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
+ if (radeon_encoder_is_digital(encoder)) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ dig = radeon_encoder->enc_priv;
+ if (dig->pin == pin)
+ pin_count++;
+ }
+ }
+
+ if (pin_count == 0)
+ return pin;
+ }
}
- DRM_ERROR("No connected audio pins found!\n");
- return NULL;
+ if (!pin)
+ DRM_ERROR("No connected audio pins found!\n");
+ return pin;
}
void dce6_afmt_select_pin(struct drm_encoder *encoder)
@@ -93,30 +112,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 offset;
- if (!dig || !dig->afmt || !dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->pin)
return;
- offset = dig->afmt->offset;
-
- WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
- AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
+ WREG32(AFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
+ AFMT_AUDIO_SRC_SELECT(dig->pin->id));
}
void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
- struct drm_connector *connector, struct drm_display_mode *mode)
+ struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 tmp = 0, offset;
+ u32 tmp = 0;
- if (!dig || !dig->afmt || !dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->pin)
return;
- offset = dig->afmt->pin->offset;
-
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (connector->latency_present[1])
tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
@@ -130,24 +145,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
else
tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
}
- WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
+ WREG32_ENDPOINT(dig->pin->offset,
+ AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
}
void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
- u8 *sadb, int sad_count)
+ u8 *sadb, int sad_count)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 offset, tmp;
+ u32 tmp;
- if (!dig || !dig->afmt || !dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->pin)
return;
- offset = dig->afmt->pin->offset;
-
/* program the speaker allocation */
- tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+ tmp = RREG32_ENDPOINT(dig->pin->offset,
+ AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
/* set HDMI mode */
tmp |= HDMI_CONNECTION;
@@ -155,24 +170,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
tmp |= SPEAKER_ALLOCATION(sadb[0]);
else
tmp |= SPEAKER_ALLOCATION(5); /* stereo */
- WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+ WREG32_ENDPOINT(dig->pin->offset,
+ AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
}
void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
- u8 *sadb, int sad_count)
+ u8 *sadb, int sad_count)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 offset, tmp;
+ u32 tmp;
- if (!dig || !dig->afmt || !dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->pin)
return;
- offset = dig->afmt->pin->offset;
-
/* program the speaker allocation */
- tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+ tmp = RREG32_ENDPOINT(dig->pin->offset,
+ AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
/* set DP mode */
tmp |= DP_CONNECTION;
@@ -180,13 +195,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
tmp |= SPEAKER_ALLOCATION(sadb[0]);
else
tmp |= SPEAKER_ALLOCATION(5); /* stereo */
- WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+ WREG32_ENDPOINT(dig->pin->offset,
+ AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
}
void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
- struct cea_sad *sads, int sad_count)
+ struct cea_sad *sads, int sad_count)
{
- u32 offset;
int i;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -206,11 +221,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
- if (!dig || !dig->afmt || !dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->pin)
return;
- offset = dig->afmt->pin->offset;
-
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 value = 0;
u8 stereo_freqs = 0;
@@ -237,7 +250,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
- WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
+ WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
}
}
@@ -253,7 +266,7 @@ void dce6_audio_enable(struct radeon_device *rdev,
}
void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
- struct radeon_crtc *crtc, unsigned int clock)
+ struct radeon_crtc *crtc, unsigned int clock)
{
/* Two dtos; generally use dto0 for HDMI */
u32 value = 0;
@@ -272,7 +285,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
}
void dce6_dp_audio_set_dto(struct radeon_device *rdev,
- struct radeon_crtc *crtc, unsigned int clock)
+ struct radeon_crtc *crtc, unsigned int clock)
{
/* Two dtos; generally use dto1 for DP */
u32 value = 0;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 3a6d483a2c36..0acde1949c18 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4924,7 +4924,7 @@ restart_ih:
return IRQ_NONE;
rptr = rdev->ih.rptr;
- DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+ DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */
rmb();
@@ -4942,23 +4942,27 @@ restart_ih:
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D1 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_vblank(rdev, 0);
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+
break;
case 1: /* D1 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D1 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4968,23 +4972,27 @@ restart_ih:
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D2 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_vblank(rdev, 1);
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+
break;
case 1: /* D2 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D2 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4994,23 +5002,27 @@ restart_ih:
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[2]) {
- drm_handle_vblank(rdev->ddev, 2);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[2]))
- radeon_crtc_handle_vblank(rdev, 2);
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D3 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D3 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[2]) {
+ drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[2]))
+ radeon_crtc_handle_vblank(rdev, 2);
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
+
break;
case 1: /* D3 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D3 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D3 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5020,23 +5032,27 @@ restart_ih:
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[3]) {
- drm_handle_vblank(rdev->ddev, 3);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[3]))
- radeon_crtc_handle_vblank(rdev, 3);
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D4 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D4 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[3]) {
+ drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[3]))
+ radeon_crtc_handle_vblank(rdev, 3);
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
+
break;
case 1: /* D4 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D4 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D4 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5046,23 +5062,27 @@ restart_ih:
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[4]) {
- drm_handle_vblank(rdev->ddev, 4);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[4]))
- radeon_crtc_handle_vblank(rdev, 4);
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D5 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D5 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[4]) {
+ drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[4]))
+ radeon_crtc_handle_vblank(rdev, 4);
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
+
break;
case 1: /* D5 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D5 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D5 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5072,23 +5092,27 @@ restart_ih:
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[5]) {
- drm_handle_vblank(rdev->ddev, 5);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[5]))
- radeon_crtc_handle_vblank(rdev, 5);
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D6 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D6 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[5]) {
+ drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[5]))
+ radeon_crtc_handle_vblank(rdev, 5);
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
+
break;
case 1: /* D6 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D6 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D6 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5108,88 +5132,100 @@ restart_ih:
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD1\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
break;
case 1:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD2\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
break;
case 2:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD3\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
break;
case 3:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD4\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
break;
case 4:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD5\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
break;
case 5:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD6\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
break;
case 6:
- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 1\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 1\n");
break;
case 7:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 2\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 2\n");
break;
case 8:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 3\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 3\n");
break;
case 9:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 4\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 4\n");
break;
case 10:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 5\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 5\n");
break;
case 11:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 6\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 6\n");
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5199,46 +5235,52 @@ restart_ih:
case 44: /* hdmi */
switch (src_data) {
case 0:
- if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI0\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI0\n");
break;
case 1:
- if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI1\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI1\n");
break;
case 2:
- if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI2\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI2\n");
break;
case 3:
- if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI3\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI3\n");
break;
case 4:
- if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI4\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI4\n");
break;
case 5:
- if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI5\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI5\n");
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 8e5aeeb058a5..158872eb78e4 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2162,18 +2162,20 @@ static int cayman_startup(struct radeon_device *rdev)
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
}
- ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
- if (ring->ring_size)
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
+ if (rdev->family == CHIP_ARUBA) {
+ ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
+ if (ring->ring_size)
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
- ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
- if (ring->ring_size)
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
+ ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
+ if (ring->ring_size)
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
- if (!r)
- r = vce_v1_0_init(rdev);
- else if (r != -ENOENT)
- DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
+ if (!r)
+ r = vce_v1_0_init(rdev);
+ if (r)
+ DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
+ }
r = radeon_ib_pool_init(rdev);
if (r) {
@@ -2396,7 +2398,8 @@ void cayman_fini(struct radeon_device *rdev)
radeon_irq_kms_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
- radeon_vce_fini(rdev);
+ if (rdev->family == CHIP_ARUBA)
+ radeon_vce_fini(rdev);
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 35dafd77a639..4ea5b10ff5f4 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -4086,23 +4086,27 @@ restart_ih:
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
- rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D1 vblank\n");
+ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_vblank(rdev, 0);
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+
break;
case 1: /* D1 vline */
- if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D1 vline\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4112,23 +4116,27 @@ restart_ih:
case 5: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
- rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D2 vblank\n");
+ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_vblank(rdev, 1);
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+
break;
case 1: /* D1 vline */
- if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D2 vline\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4148,46 +4156,53 @@ restart_ih:
case 19: /* HPD/DAC hotplug */
switch (src_data) {
case 0:
- if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD1\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
+ DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
break;
case 1:
- if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD2\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
+ DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
break;
case 4:
- if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD3\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
+ DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
break;
case 5:
- if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD4\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
+ DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
break;
case 10:
- if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD5\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
+ DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
break;
case 12:
- if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
- rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD6\n");
- }
+ if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
+ DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4197,18 +4212,22 @@ restart_ih:
case 21: /* hdmi */
switch (src_data) {
case 4:
- if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI0\n");
- }
+ if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI0\n");
+
break;
case 5:
- if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
- rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI1\n");
- }
+ if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI1\n");
+
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 09e3f39925fa..98f9adaccc3d 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -2483,7 +2483,7 @@ int r600_cp_dispatch_texture(struct drm_device *dev,
struct drm_buf *buf;
u32 *buffer;
const u8 __user *data;
- int size, pass_size;
+ unsigned int size, pass_size;
u64 src_offset, dst_offset;
if (!radeon_check_offset(dev_priv, tex->offset)) {
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index fa719c53449b..2c02e99b5f95 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -245,6 +245,28 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
static void radeon_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin, u8 enable_mask)
{
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+ struct radeon_encoder_atom_dig *dig;
+ int pin_count = 0;
+
+ if (!pin)
+ return;
+
+ if (rdev->mode_info.mode_config_initialized) {
+ list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
+ if (radeon_encoder_is_digital(encoder)) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ dig = radeon_encoder->enc_priv;
+ if (dig->pin == pin)
+ pin_count++;
+ }
+ }
+
+ if ((pin_count > 1) && (enable_mask == 0))
+ return;
+ }
+
if (rdev->audio.funcs->enable)
rdev->audio.funcs->enable(rdev, pin, enable_mask);
}
@@ -336,24 +358,13 @@ void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset,
static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct radeon_encoder *radeon_encoder;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct cea_sad *sads;
int sad_count;
- list_for_each_entry(connector,
- &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
+ if (!connector)
return;
- }
sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
if (sad_count <= 0) {
@@ -362,8 +373,6 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
}
BUG_ON(!sads);
- radeon_encoder = to_radeon_encoder(encoder);
-
if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count);
@@ -372,27 +381,16 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
u8 *sadb = NULL;
int sad_count;
- list_for_each_entry(connector,
- &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
+ if (!connector)
return;
- }
- sad_count = drm_edid_to_speaker_allocation(
- radeon_connector_edid(connector), &sadb);
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector),
+ &sadb);
if (sad_count < 0) {
DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n",
sad_count);
@@ -406,26 +404,13 @@ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
}
static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
- struct radeon_encoder *radeon_encoder;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = 0;
-
- list_for_each_entry(connector,
- &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
+ if (!connector)
return;
- }
-
- radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields)
radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
@@ -451,29 +436,23 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder)
}
void radeon_audio_detect(struct drm_connector *connector,
+ struct drm_encoder *encoder,
enum drm_connector_status status)
{
- struct radeon_device *rdev;
- struct radeon_encoder *radeon_encoder;
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
- if (!connector || !connector->encoder)
+ if (!radeon_audio_chipset_supported(rdev))
return;
- rdev = connector->encoder->dev->dev_private;
-
- if (!radeon_audio_chipset_supported(rdev))
+ if (!radeon_encoder_is_digital(encoder))
return;
- radeon_encoder = to_radeon_encoder(connector->encoder);
dig = radeon_encoder->enc_priv;
if (status == connector_status_connected) {
- if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
- radeon_encoder->audio = NULL;
- return;
- }
-
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -486,11 +465,17 @@ void radeon_audio_detect(struct drm_connector *connector,
radeon_encoder->audio = rdev->audio.hdmi_funcs;
}
- dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
- radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+ if (!dig->pin)
+ dig->pin = radeon_audio_get_pin(encoder);
+ radeon_audio_enable(rdev, dig->pin, 0xf);
+ } else {
+ radeon_audio_enable(rdev, dig->pin, 0);
+ dig->pin = NULL;
+ }
} else {
- radeon_audio_enable(rdev, dig->afmt->pin, 0);
- dig->afmt->pin = NULL;
+ radeon_audio_enable(rdev, dig->pin, 0);
+ dig->pin = NULL;
}
}
@@ -518,29 +503,18 @@ static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock
}
static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector = NULL;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
struct hdmi_avi_infoframe frame;
int err;
- list_for_each_entry(connector,
- &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- radeon_connector = to_radeon_connector(connector);
- break;
- }
- }
-
- if (!radeon_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return -ENOENT;
- }
+ if (!connector)
+ return -EINVAL;
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
if (err < 0) {
@@ -548,13 +522,15 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
return err;
}
- if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
- if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
- frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
- else
- frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
- } else {
- frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ if (radeon_encoder->output_csc != RADEON_OUTPUT_CSC_BYPASS) {
+ if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
+ if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
+ frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
+ else
+ frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
+ } else {
+ frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ }
}
err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
@@ -563,8 +539,8 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
return err;
}
- if (dig && dig->afmt &&
- radeon_encoder->audio && radeon_encoder->audio->set_avi_packet)
+ if (dig && dig->afmt && radeon_encoder->audio &&
+ radeon_encoder->audio->set_avi_packet)
radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset,
buffer, sizeof(buffer));
@@ -722,30 +698,41 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
if (!dig || !dig->afmt)
return;
- radeon_audio_set_mute(encoder, true);
+ if (!connector)
+ return;
- radeon_audio_write_speaker_allocation(encoder);
- radeon_audio_write_sad_regs(encoder);
- radeon_audio_write_latency_fields(encoder, mode);
- radeon_audio_set_dto(encoder, mode->clock);
- radeon_audio_set_vbi_packet(encoder);
- radeon_hdmi_set_color_depth(encoder);
- radeon_audio_update_acr(encoder, mode->clock);
- radeon_audio_set_audio_packet(encoder);
- radeon_audio_select_pin(encoder);
+ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+ radeon_audio_set_mute(encoder, true);
- if (radeon_audio_set_avi_packet(encoder, mode) < 0)
- return;
+ radeon_audio_write_speaker_allocation(encoder);
+ radeon_audio_write_sad_regs(encoder);
+ radeon_audio_write_latency_fields(encoder, mode);
+ radeon_audio_set_dto(encoder, mode->clock);
+ radeon_audio_set_vbi_packet(encoder);
+ radeon_hdmi_set_color_depth(encoder);
+ radeon_audio_update_acr(encoder, mode->clock);
+ radeon_audio_set_audio_packet(encoder);
+ radeon_audio_select_pin(encoder);
- radeon_audio_set_mute(encoder, false);
+ if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+ return;
+
+ radeon_audio_set_mute(encoder, false);
+ } else {
+ radeon_hdmi_set_color_depth(encoder);
+
+ if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+ return;
+ }
}
static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -759,22 +746,27 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
if (!dig || !dig->afmt)
return;
- radeon_audio_write_speaker_allocation(encoder);
- radeon_audio_write_sad_regs(encoder);
- radeon_audio_write_latency_fields(encoder, mode);
- if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
- radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
- else
- radeon_audio_set_dto(encoder, dig_connector->dp_clock);
- radeon_audio_set_audio_packet(encoder);
- radeon_audio_select_pin(encoder);
-
- if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+ if (!connector)
return;
+
+ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+ radeon_audio_write_speaker_allocation(encoder);
+ radeon_audio_write_sad_regs(encoder);
+ radeon_audio_write_latency_fields(encoder, mode);
+ if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
+ radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
+ else
+ radeon_audio_set_dto(encoder, dig_connector->dp_clock);
+ radeon_audio_set_audio_packet(encoder);
+ radeon_audio_select_pin(encoder);
+
+ if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+ return;
+ }
}
void radeon_audio_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
index 8438304f7139..059cc3012062 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.h
+++ b/drivers/gpu/drm/radeon/radeon_audio.h
@@ -68,7 +68,8 @@ struct radeon_audio_funcs
int radeon_audio_init(struct radeon_device *rdev);
void radeon_audio_detect(struct drm_connector *connector,
- enum drm_connector_status status);
+ struct drm_encoder *encoder,
+ enum drm_connector_status status);
u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
u32 offset, u32 reg);
void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 3e5f6b71f3ad..a9b01bcf7d0a 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
(RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
+ u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+
+ if (hss > lvds->native_mode.hdisplay)
+ hss = (10 - 1) * 8;
+
lvds->native_mode.htotal = lvds->native_mode.hdisplay +
(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
- (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+ hss;
lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
(RBIOS8(tmp + 23) * 8);
@@ -3382,6 +3387,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
rdev->pdev->subsystem_device == 0x30ae)
return;
+ /* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume
+ * - it hangs on resume inside the dynclk 1 table.
+ */
+ if (rdev->family == CHIP_RS480 &&
+ rdev->pdev->subsystem_vendor == 0x103c &&
+ rdev->pdev->subsystem_device == 0x280a)
+ return;
+
/* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
if (table)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cebb65e07e1d..5a2cafb4f1bc 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -95,6 +95,11 @@ void radeon_connector_hotplug(struct drm_connector *connector)
if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
} else if (radeon_dp_needs_link_train(radeon_connector)) {
+ /* Don't try to start link training before we
+ * have the dpcd */
+ if (!radeon_dp_getdpcd(radeon_connector))
+ return;
+
/* set it to OFF so that drm_helper_connector_dpms()
* won't return immediately since the current state
* is ON at this point.
@@ -1379,8 +1384,16 @@ out:
/* updated in get modes as well since we need to know if it's analog or digital */
radeon_connector_update_scratch_regs(connector, ret);
- if (radeon_audio != 0)
- radeon_audio_detect(connector, ret);
+ if ((radeon_audio != 0) && radeon_connector->use_digital) {
+ const struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+
+ encoder = connector_funcs->best_encoder(connector);
+ if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) {
+ radeon_connector_get_edid(connector);
+ radeon_audio_detect(connector, encoder, ret);
+ }
+ }
exit:
pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1717,8 +1730,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
radeon_connector_update_scratch_regs(connector, ret);
- if (radeon_audio != 0)
- radeon_audio_detect(connector, ret);
+ if ((radeon_audio != 0) && encoder) {
+ radeon_connector_get_edid(connector);
+ radeon_audio_detect(connector, encoder, ret);
+ }
out:
pm_runtime_mark_last_busy(connector->dev->dev);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 45e54060ee97..afaf346bd50e 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -91,15 +91,34 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_DCE4(rdev)) {
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(radeon_crtc->cursor_addr));
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ lower_32_bits(radeon_crtc->cursor_addr));
WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
} else if (ASIC_IS_AVIVO(rdev)) {
+ if (rdev->family >= CHIP_RV770) {
+ if (radeon_crtc->crtc_id)
+ WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(radeon_crtc->cursor_addr));
+ else
+ WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(radeon_crtc->cursor_addr));
+ }
+
+ WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ lower_32_bits(radeon_crtc->cursor_addr));
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
(AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else {
+ /* offset is from DISP(2)_BASE_ADDRESS */
+ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
+ radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr);
+
switch (radeon_crtc->crtc_id) {
case 0:
WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
@@ -205,8 +224,9 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
| (x << 16)
| y));
/* offset is from DISP(2)_BASE_ADDRESS */
- WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
- (yorigin * 256)));
+ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
+ radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr +
+ yorigin * 256);
}
radeon_crtc->cursor_x = x;
@@ -227,53 +247,6 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
return ret;
}
-static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
-{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct radeon_device *rdev = crtc->dev->dev_private;
- struct radeon_bo *robj = gem_to_radeon_bo(obj);
- uint64_t gpu_addr;
- int ret;
-
- ret = radeon_bo_reserve(robj, false);
- if (unlikely(ret != 0))
- goto fail;
- /* Only 27 bit offset for legacy cursor */
- ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
- ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
- &gpu_addr);
- radeon_bo_unreserve(robj);
- if (ret)
- goto fail;
-
- if (ASIC_IS_DCE4(rdev)) {
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
- upper_32_bits(gpu_addr));
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
- gpu_addr & 0xffffffff);
- } else if (ASIC_IS_AVIVO(rdev)) {
- if (rdev->family >= CHIP_RV770) {
- if (radeon_crtc->crtc_id)
- WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
- else
- WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
- }
- WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
- gpu_addr & 0xffffffff);
- } else {
- radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
- /* offset is from DISP(2)_BASE_ADDRESS */
- WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
- }
-
- return 0;
-
-fail:
- drm_gem_object_unreference_unlocked(obj);
-
- return ret;
-}
-
int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
@@ -283,7 +256,9 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_device *rdev = crtc->dev->dev_private;
struct drm_gem_object *obj;
+ struct radeon_bo *robj;
int ret;
if (!handle) {
@@ -305,6 +280,23 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
return -ENOENT;
}
+ robj = gem_to_radeon_bo(obj);
+ ret = radeon_bo_reserve(robj, false);
+ if (ret != 0) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
+ /* Only 27 bit offset for legacy cursor */
+ ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
+ ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+ &radeon_crtc->cursor_addr);
+ radeon_bo_unreserve(robj);
+ if (ret) {
+ DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
+
radeon_crtc->cursor_width = width;
radeon_crtc->cursor_height = height;
@@ -323,13 +315,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
radeon_crtc->cursor_hot_y = hot_y;
}
- ret = radeon_set_cursor(crtc, obj);
-
- if (ret)
- DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
- ret);
- else
- radeon_show_cursor(crtc);
+ radeon_show_cursor(crtc);
radeon_lock_cursor(crtc, false);
@@ -341,8 +327,7 @@ unpin:
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
- if (radeon_crtc->cursor_bo != obj)
- drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
+ drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
}
radeon_crtc->cursor_bo = obj;
@@ -360,7 +345,6 @@ unpin:
void radeon_cursor_reset(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- int ret;
if (radeon_crtc->cursor_bo) {
radeon_lock_cursor(crtc, true);
@@ -368,12 +352,7 @@ void radeon_cursor_reset(struct drm_crtc *crtc)
radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
radeon_crtc->cursor_y);
- ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo);
- if (ret)
- DRM_ERROR("radeon_set_cursor returned %d, not showing "
- "cursor\n", ret);
- else
- radeon_show_cursor(crtc);
+ radeon_show_cursor(crtc);
radeon_lock_cursor(crtc, false);
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 2593b1168bd6..d8319dae8358 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1080,6 +1080,22 @@ static bool radeon_check_pot_argument(int arg)
}
/**
+ * Determine a sensible default GART size according to ASIC family.
+ *
+ * @family ASIC family name
+ */
+static int radeon_gart_size_auto(enum radeon_family family)
+{
+ /* default to a larger gart size on newer asics */
+ if (family >= CHIP_TAHITI)
+ return 2048;
+ else if (family >= CHIP_RV770)
+ return 1024;
+ else
+ return 512;
+}
+
+/**
* radeon_check_arguments - validate module params
*
* @rdev: radeon_device pointer
@@ -1097,27 +1113,17 @@ static void radeon_check_arguments(struct radeon_device *rdev)
}
if (radeon_gart_size == -1) {
- /* default to a larger gart size on newer asics */
- if (rdev->family >= CHIP_RV770)
- radeon_gart_size = 1024;
- else
- radeon_gart_size = 512;
+ radeon_gart_size = radeon_gart_size_auto(rdev->family);
}
/* gtt size must be power of two and greater or equal to 32M */
if (radeon_gart_size < 32) {
dev_warn(rdev->dev, "gart size (%d) too small\n",
radeon_gart_size);
- if (rdev->family >= CHIP_RV770)
- radeon_gart_size = 1024;
- else
- radeon_gart_size = 512;
+ radeon_gart_size = radeon_gart_size_auto(rdev->family);
} else if (!radeon_check_pot_argument(radeon_gart_size)) {
dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
radeon_gart_size);
- if (rdev->family >= CHIP_RV770)
- radeon_gart_size = 1024;
- else
- radeon_gart_size = 512;
+ radeon_gart_size = radeon_gart_size_auto(rdev->family);
}
rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
@@ -1572,11 +1578,21 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
}
- /* unpin the front buffers */
+ /* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
struct radeon_bo *robj;
+ if (radeon_crtc->cursor_bo) {
+ struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+ r = radeon_bo_reserve(robj, false);
+ if (r == 0) {
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
+ }
+ }
+
if (rfb == NULL || rfb->obj == NULL) {
continue;
}
@@ -1639,6 +1655,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
{
struct drm_connector *connector;
struct radeon_device *rdev = dev->dev_private;
+ struct drm_crtc *crtc;
int r;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1678,6 +1695,27 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
radeon_restore_bios_scratch_regs(rdev);
+ /* pin cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ if (radeon_crtc->cursor_bo) {
+ struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+ r = radeon_bo_reserve(robj, false);
+ if (r == 0) {
+ /* Only 27 bit offset for legacy cursor */
+ r = radeon_bo_pin_restricted(robj,
+ RADEON_GEM_DOMAIN_VRAM,
+ ASIC_IS_AVIVO(rdev) ?
+ 0 : 1 << 27,
+ &radeon_crtc->cursor_addr);
+ if (r != 0)
+ DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
+ radeon_bo_unreserve(robj);
+ }
+ }
+ }
+
/* init dig PHYs, disp eng pll */
if (rdev->is_atom_bios) {
radeon_atom_encoder_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index fcbd60bb0349..3b0c229d7dcd 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -116,8 +116,8 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
AUX_SW_WR_BYTES(bytes));
/* write the data header into the registers */
- /* request, addres, msg size */
- byte = (msg->request << 4);
+ /* request, address, msg size */
+ byte = (msg->request << 4) | ((msg->address >> 16) & 0xf);
WREG32(AUX_SW_DATA + aux_offset[instance],
AUX_SW_DATA_MASK(byte) | AUX_SW_AUTOINCREMENT_DISABLE);
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 257b10be5cda..5e09c061847f 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -246,9 +246,10 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector)
kfree(radeon_connector);
}
-static void radeon_connector_dpms(struct drm_connector *connector, int mode)
+static int radeon_connector_dpms(struct drm_connector *connector, int mode)
{
DRM_DEBUG_KMS("\n");
+ return 0;
}
static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
@@ -284,11 +285,10 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
drm_mode_connector_set_path_property(connector, pathprop);
- drm_reinit_primary_mode_group(dev);
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
radeon_fb_add_connector(rdev, connector);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
drm_connector_register(connector);
return connector;
@@ -303,14 +303,12 @@ static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_connector_unregister(connector);
/* need to nuke the connector */
- mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
/* dpms off */
radeon_fb_remove_connector(rdev, connector);
drm_connector_cleanup(connector);
- mutex_unlock(&dev->mode_config.mutex);
- drm_reinit_primary_mode_group(dev);
-
+ drm_modeset_unlock_all(dev);
kfree(connector);
DRM_DEBUG_KMS("\n");
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 634793ea8418..7214858ffcea 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -82,9 +82,9 @@ static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = radeon_fb_helper_set_par,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
@@ -227,7 +227,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
- struct device *device = &rdev->pdev->dev;
int ret;
unsigned long tmp;
@@ -250,25 +249,25 @@ static int radeonfb_create(struct drm_fb_helper *helper,
rbo = gem_to_radeon_bo(gobj);
/* okay we have an object now allocate the framebuffer */
- info = framebuffer_alloc(0, device);
- if (info == NULL) {
- ret = -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto out_unref;
}
info->par = rfbdev;
+ info->skip_vt_switch = true;
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
- goto out_unref;
+ goto out_destroy_fbi;
}
fb = &rfbdev->rfb.base;
/* setup helper */
rfbdev->helper.fb = fb;
- rfbdev->helper.fbdev = info;
memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
@@ -288,11 +287,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto out_unref;
- }
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = rdev->mc.aper_size;
@@ -300,13 +294,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
if (info->screen_base == NULL) {
ret = -ENOSPC;
- goto out_unref;
- }
-
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto out_unref;
+ goto out_destroy_fbi;
}
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
@@ -318,6 +306,8 @@ static int radeonfb_create(struct drm_fb_helper *helper,
vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
return 0;
+out_destroy_fbi:
+ drm_fb_helper_release_fbi(helper);
out_unref:
if (rbo) {
@@ -338,17 +328,10 @@ void radeon_fb_output_poll_changed(struct radeon_device *rdev)
static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
{
- struct fb_info *info;
struct radeon_framebuffer *rfb = &rfbdev->rfb;
- if (rfbdev->helper.fbdev) {
- info = rfbdev->helper.fbdev;
-
- unregister_framebuffer(info);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&rfbdev->helper);
+ drm_fb_helper_release_fbi(&rfbdev->helper);
if (rfb->obj) {
radeonfb_destroy_pinned_object(rfb->obj);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 5450fa95a47e..c4777c8d0312 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -260,8 +260,10 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
}
}
}
- mb();
- radeon_gart_tlb_flush(rdev);
+ if (rdev->gart.ptr) {
+ mb();
+ radeon_gart_tlb_flush(rdev);
+ }
}
/**
@@ -306,8 +308,10 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
page_base += RADEON_GPU_PAGE_SIZE;
}
}
- mb();
- radeon_gart_tlb_flush(rdev);
+ if (rdev->gart.ptr) {
+ mb();
+ radeon_gart_tlb_flush(rdev);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index ac3c1310b953..3dcc5733ff69 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -36,6 +36,7 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
if (robj) {
if (robj->gem_base.import_attach)
drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
+ radeon_mn_unregister(robj);
radeon_bo_unref(&robj);
}
}
@@ -428,7 +429,6 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_busy *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -440,10 +440,16 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
- r = radeon_bo_wait(robj, &cur_placement, true);
+
+ r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
+ if (r == 0)
+ r = -EBUSY;
+ else
+ r = 0;
+
+ cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
args->domain = radeon_mem_type_to_domain(cur_placement);
drm_gem_object_unreference_unlocked(gobj);
- r = radeon_gem_handle_lockup(rdev, r);
return r;
}
@@ -471,6 +477,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
r = ret;
/* Flush HDP cache via MMIO if necessary */
+ cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
if (rdev->asic->mmio_hdp_flush &&
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
robj->rdev->asic->mmio_hdp_flush(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 1162bfa464f3..171d3e43c30c 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -79,6 +79,11 @@ static void radeon_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
+ /* we can race here at startup, some boards seem to trigger
+ * hotplug irqs when they shouldn't. */
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
mutex_lock(&mode_config->mutex);
if (mode_config->num_connector) {
list_for_each_entry(connector, &mode_config->connector_list, head)
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index e476c331f3fa..9a4d69e59401 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -845,7 +845,8 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data;
break;
- case KGD_ENGINE_SDMA:
+ case KGD_ENGINE_SDMA1:
+ case KGD_ENGINE_SDMA2:
hdr = (const union radeon_firmware_header *)
rdev->sdma_fw->data;
break;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 6de5459316b5..aecc3e3dec0c 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -237,7 +237,6 @@ struct radeon_afmt {
int offset;
bool last_buffer_filled_status;
int id;
- struct r600_audio_pin *pin;
};
struct radeon_mode_info {
@@ -343,7 +342,6 @@ struct radeon_crtc {
int max_cursor_width;
int max_cursor_height;
uint32_t legacy_display_base_addr;
- uint32_t legacy_cursor_offset;
enum radeon_rmx_type rmx_type;
u8 h_border;
u8 v_border;
@@ -440,6 +438,7 @@ struct radeon_encoder_atom_dig {
uint8_t backlight_level;
int panel_mode;
struct radeon_afmt *afmt;
+ struct r600_audio_pin *pin;
int active_mst_links;
};
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 318165d4855c..d3024883b844 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -75,7 +75,6 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
bo = container_of(tbo, struct radeon_bo, tbo);
radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
- radeon_mn_unregister(bo);
mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
@@ -420,7 +419,6 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
}
dev_err(rdev->dev, "Userspace still has active objects !\n");
list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
- mutex_lock(&rdev->ddev->struct_mutex);
dev_err(rdev->dev, "%p %p %lu %lu force free\n",
&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
*((unsigned long *)&bo->gem_base.refcount));
@@ -428,8 +426,7 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
/* this should unref the ttm bo */
- drm_gem_object_unreference(&bo->gem_base);
- mutex_unlock(&rdev->ddev->struct_mutex);
+ drm_gem_object_unreference_unlocked(&bo->gem_base);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index c1ba83a8dd8c..05751f3f8444 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -253,7 +253,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
(rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
return;
- mutex_lock(&rdev->ddev->struct_mutex);
down_write(&rdev->pm.mclk_lock);
mutex_lock(&rdev->ring_lock);
@@ -268,7 +267,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
/* needs a GPU reset dont reset here */
mutex_unlock(&rdev->ring_lock);
up_write(&rdev->pm.mclk_lock);
- mutex_unlock(&rdev->ddev->struct_mutex);
return;
}
}
@@ -304,7 +302,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
mutex_unlock(&rdev->ring_lock);
up_write(&rdev->pm.mclk_lock);
- mutex_unlock(&rdev->ddev->struct_mutex);
}
static void radeon_pm_print_states(struct radeon_device *rdev)
@@ -1062,7 +1059,6 @@ force:
radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
}
- mutex_lock(&rdev->ddev->struct_mutex);
down_write(&rdev->pm.mclk_lock);
mutex_lock(&rdev->ring_lock);
@@ -1113,7 +1109,6 @@ force:
done:
mutex_unlock(&rdev->ring_lock);
up_write(&rdev->pm.mclk_lock);
- mutex_unlock(&rdev->ddev->struct_mutex);
}
void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index ec10533a49b8..48d97c040f49 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -493,38 +493,35 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
}
if (bo_va->it.start || bo_va->it.last) {
- spin_lock(&vm->status_lock);
- if (list_empty(&bo_va->vm_status)) {
- /* add a clone of the bo_va to clear the old address */
- struct radeon_bo_va *tmp;
- spin_unlock(&vm->status_lock);
- tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
- if (!tmp) {
- mutex_unlock(&vm->mutex);
- r = -ENOMEM;
- goto error_unreserve;
- }
- tmp->it.start = bo_va->it.start;
- tmp->it.last = bo_va->it.last;
- tmp->vm = vm;
- tmp->bo = radeon_bo_ref(bo_va->bo);
- spin_lock(&vm->status_lock);
- list_add(&tmp->vm_status, &vm->freed);
+ /* add a clone of the bo_va to clear the old address */
+ struct radeon_bo_va *tmp;
+ tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ if (!tmp) {
+ mutex_unlock(&vm->mutex);
+ r = -ENOMEM;
+ goto error_unreserve;
}
- spin_unlock(&vm->status_lock);
+ tmp->it.start = bo_va->it.start;
+ tmp->it.last = bo_va->it.last;
+ tmp->vm = vm;
+ tmp->bo = radeon_bo_ref(bo_va->bo);
interval_tree_remove(&bo_va->it, &vm->va);
+ spin_lock(&vm->status_lock);
bo_va->it.start = 0;
bo_va->it.last = 0;
+ list_del_init(&bo_va->vm_status);
+ list_add(&tmp->vm_status, &vm->freed);
+ spin_unlock(&vm->status_lock);
}
if (soffset || eoffset) {
+ spin_lock(&vm->status_lock);
bo_va->it.start = soffset;
bo_va->it.last = eoffset - 1;
- interval_tree_insert(&bo_va->it, &vm->va);
- spin_lock(&vm->status_lock);
list_add(&bo_va->vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
+ interval_tree_insert(&bo_va->it, &vm->va);
}
bo_va->flags = flags;
@@ -1158,7 +1155,8 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
list_for_each_entry(bo_va, &bo->va, bo_list) {
spin_lock(&bo_va->vm->status_lock);
- if (list_empty(&bo_va->vm_status))
+ if (list_empty(&bo_va->vm_status) &&
+ (bo_va->it.start || bo_va->it.last))
list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
spin_unlock(&bo_va->vm->status_lock);
}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 26388b5dd6ed..07037e32dea3 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6466,23 +6466,27 @@ restart_ih:
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D1 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_vblank(rdev, 0);
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+
break;
case 1: /* D1 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D1 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6492,23 +6496,27 @@ restart_ih:
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D2 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_vblank(rdev, 1);
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+
break;
case 1: /* D2 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D2 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6518,23 +6526,27 @@ restart_ih:
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[2]) {
- drm_handle_vblank(rdev->ddev, 2);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[2]))
- radeon_crtc_handle_vblank(rdev, 2);
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D3 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[2]) {
+ drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[2]))
+ radeon_crtc_handle_vblank(rdev, 2);
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
+
break;
case 1: /* D3 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D3 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6544,23 +6556,27 @@ restart_ih:
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[3]) {
- drm_handle_vblank(rdev->ddev, 3);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[3]))
- radeon_crtc_handle_vblank(rdev, 3);
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D4 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[3]) {
+ drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[3]))
+ radeon_crtc_handle_vblank(rdev, 3);
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
+
break;
case 1: /* D4 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D4 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6570,23 +6586,27 @@ restart_ih:
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[4]) {
- drm_handle_vblank(rdev->ddev, 4);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[4]))
- radeon_crtc_handle_vblank(rdev, 4);
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D5 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[4]) {
+ drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[4]))
+ radeon_crtc_handle_vblank(rdev, 4);
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
+
break;
case 1: /* D5 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D5 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6596,23 +6616,27 @@ restart_ih:
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
- if (rdev->irq.crtc_vblank_int[5]) {
- drm_handle_vblank(rdev->ddev, 5);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- }
- if (atomic_read(&rdev->irq.pflip[5]))
- radeon_crtc_handle_vblank(rdev, 5);
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D6 vblank\n");
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (rdev->irq.crtc_vblank_int[5]) {
+ drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
+ if (atomic_read(&rdev->irq.pflip[5]))
+ radeon_crtc_handle_vblank(rdev, 5);
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
+
break;
case 1: /* D6 vline */
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D6 vline\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6632,88 +6656,112 @@ restart_ih:
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD1\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+
break;
case 1:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD2\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+
break;
case 2:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD3\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+
break;
case 3:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD4\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+
break;
case 4:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD5\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+
break;
case 5:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD6\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+
break;
case 6:
- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 1\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 1\n");
+
break;
case 7:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 2\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 2\n");
+
break;
case 8:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 3\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 3\n");
+
break;
case 9:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 4\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 4\n");
+
break;
case 10:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 5\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 5\n");
+
break;
case 11:
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 6\n");
- }
+ if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+ queue_dp = true;
+ DRM_DEBUG("IH: HPD_RX 6\n");
+
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 1dbdf3230dae..787cd8fd897f 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2926,6 +2926,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
{ 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 65d6ba6621ac..48cb19949ca3 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -496,7 +496,8 @@ static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
-static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc)
+static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct drm_pending_vblank_event *event = crtc->state->event;
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
@@ -512,7 +513,8 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc)
}
}
-static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc)
+static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 01b558fe3695..9a0c2911272a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -555,7 +555,6 @@ static struct platform_driver rockchip_drm_platform_driver = {
.probe = rockchip_drm_platform_probe,
.remove = rockchip_drm_platform_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "rockchip-drm",
.of_match_table = rockchip_drm_dt_ids,
.pm = &rockchip_drm_pm_ops,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 77d52893d40f..002645bb5bbf 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -162,7 +162,8 @@ static void rockchip_drm_output_poll_changed(struct drm_device *dev)
struct rockchip_drm_private *private = dev->dev_private;
struct drm_fb_helper *fb_helper = &private->fbdev_helper;
- drm_fb_helper_hotplug_event(fb_helper);
+ if (fb_helper)
+ drm_fb_helper_hotplug_event(fb_helper);
}
static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 5b0dc0f6fd94..f261512bb4a0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -37,9 +37,9 @@ static int rockchip_fbdev_mmap(struct fb_info *info,
static struct fb_ops rockchip_drm_fbdev_ops = {
.owner = THIS_MODULE,
.fb_mmap = rockchip_fbdev_mmap,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_blank = drm_fb_helper_blank,
@@ -77,10 +77,10 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
private->fbdev_bo = &rk_obj->base;
- fbi = framebuffer_alloc(0, dev->dev);
- if (!fbi) {
- dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
- ret = -ENOMEM;
+ fbi = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(fbi)) {
+ dev_err(dev->dev, "Failed to create framebuffer info.\n");
+ ret = PTR_ERR(fbi);
goto err_rockchip_gem_free_object;
}
@@ -89,21 +89,13 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
if (IS_ERR(helper->fb)) {
dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
ret = PTR_ERR(helper->fb);
- goto err_framebuffer_release;
+ goto err_release_fbi;
}
- helper->fbdev = fbi;
-
fbi->par = helper;
fbi->flags = FBINFO_FLAG_DEFAULT;
fbi->fbops = &rockchip_drm_fbdev_ops;
- ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
- if (ret) {
- dev_err(dev->dev, "Failed to allocate color map.\n");
- goto err_drm_framebuffer_unref;
- }
-
fb = helper->fb;
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
@@ -124,10 +116,8 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
return 0;
-err_drm_framebuffer_unref:
- drm_framebuffer_unreference(helper->fb);
-err_framebuffer_release:
- framebuffer_release(fbi);
+err_release_fbi:
+ drm_fb_helper_release_fbi(helper);
err_rockchip_gem_free_object:
rockchip_gem_free_object(&rk_obj->base);
return ret;
@@ -190,21 +180,8 @@ void rockchip_drm_fbdev_fini(struct drm_device *dev)
helper = &private->fbdev_helper;
- if (helper->fbdev) {
- struct fb_info *info;
- int ret;
-
- info = helper->fbdev;
- ret = unregister_framebuffer(info);
- if (ret < 0)
- DRM_DEBUG_KMS("failed unregister_framebuffer() - %d\n",
- ret);
-
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
-
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(helper);
+ drm_fb_helper_release_fbi(helper);
if (helper->fb)
drm_framebuffer_unreference(helper->fb);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index eb2282cc4a56..a6d9104f7f15 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -54,55 +54,56 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
&rk_obj->dma_attrs);
}
-int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
+static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+
{
+ int ret;
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
struct drm_device *drm = obj->dev;
- unsigned long vm_size;
-
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
- vm_size = vma->vm_end - vma->vm_start;
- if (vm_size > obj->size)
- return -EINVAL;
+ /*
+ * dma_alloc_attrs() allocated a struct page table for rk_obj, so clear
+ * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
+ */
+ vma->vm_flags &= ~VM_PFNMAP;
- return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
+ ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
obj->size, &rk_obj->dma_attrs);
+ if (ret)
+ drm_gem_vm_close(vma);
+
+ return ret;
}
-/* drm driver mmap file operations */
-int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_gem_object *obj;
- struct drm_vma_offset_node *node;
+ struct drm_device *drm = obj->dev;
int ret;
- if (drm_device_is_unplugged(dev))
- return -ENODEV;
+ mutex_lock(&drm->struct_mutex);
+ ret = drm_gem_mmap_obj(obj, obj->size, vma);
+ mutex_unlock(&drm->struct_mutex);
+ if (ret)
+ return ret;
- mutex_lock(&dev->struct_mutex);
+ return rockchip_drm_gem_object_mmap(obj, vma);
+}
- node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
- vma->vm_pgoff,
- vma_pages(vma));
- if (!node) {
- mutex_unlock(&dev->struct_mutex);
- DRM_ERROR("failed to find vma node.\n");
- return -EINVAL;
- } else if (!drm_vma_node_is_allowed(node, filp)) {
- mutex_unlock(&dev->struct_mutex);
- return -EACCES;
- }
+/* drm driver mmap file operations */
+int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj;
+ int ret;
- obj = container_of(node, struct drm_gem_object, vma_node);
- ret = rockchip_gem_mmap_buf(obj, vma);
+ ret = drm_gem_mmap(filp, vma);
+ if (ret)
+ return ret;
- mutex_unlock(&dev->struct_mutex);
+ obj = vma->vm_private_data;
- return ret;
+ return rockchip_drm_gem_object_mmap(obj, vma);
}
struct rockchip_gem_object *
@@ -199,13 +200,10 @@ int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
struct drm_gem_object *obj;
int ret;
- mutex_lock(&dev->struct_mutex);
-
obj = drm_gem_object_lookup(dev, file_priv, handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
- ret = -EINVAL;
- goto unlock;
+ return -EINVAL;
}
ret = drm_gem_create_mmap_offset(obj);
@@ -216,10 +214,9 @@ int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);
out:
- drm_gem_object_unreference(obj);
-unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ drm_gem_object_unreference_unlocked(obj);
+
+ return 0;
}
/*
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index dc65161d7cad..5d8ae5e49c44 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -50,6 +50,8 @@
#define VOP_WIN_SET(x, win, name, v) \
REG_SET(x, win->base, win->phy->name, v, RELAXED)
+#define VOP_SCL_SET(x, win, name, v) \
+ REG_SET(x, win->base, win->phy->scl->name, v, RELAXED)
#define VOP_CTRL_SET(x, name, v) \
REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
@@ -164,12 +166,43 @@ struct vop_ctrl {
struct vop_reg vpost_st_end;
};
+struct vop_scl_regs {
+ struct vop_reg cbcr_vsd_mode;
+ struct vop_reg cbcr_vsu_mode;
+ struct vop_reg cbcr_hsd_mode;
+ struct vop_reg cbcr_ver_scl_mode;
+ struct vop_reg cbcr_hor_scl_mode;
+ struct vop_reg yrgb_vsd_mode;
+ struct vop_reg yrgb_vsu_mode;
+ struct vop_reg yrgb_hsd_mode;
+ struct vop_reg yrgb_ver_scl_mode;
+ struct vop_reg yrgb_hor_scl_mode;
+ struct vop_reg line_load_mode;
+ struct vop_reg cbcr_axi_gather_num;
+ struct vop_reg yrgb_axi_gather_num;
+ struct vop_reg vsd_cbcr_gt2;
+ struct vop_reg vsd_cbcr_gt4;
+ struct vop_reg vsd_yrgb_gt2;
+ struct vop_reg vsd_yrgb_gt4;
+ struct vop_reg bic_coe_sel;
+ struct vop_reg cbcr_axi_gather_en;
+ struct vop_reg yrgb_axi_gather_en;
+
+ struct vop_reg lb_mode;
+ struct vop_reg scale_yrgb_x;
+ struct vop_reg scale_yrgb_y;
+ struct vop_reg scale_cbcr_x;
+ struct vop_reg scale_cbcr_y;
+};
+
struct vop_win_phy {
+ const struct vop_scl_regs *scl;
const uint32_t *data_formats;
uint32_t nformats;
struct vop_reg enable;
struct vop_reg format;
+ struct vop_reg rb_swap;
struct vop_reg act_info;
struct vop_reg dsp_info;
struct vop_reg dsp_st;
@@ -199,8 +232,12 @@ struct vop_data {
static const uint32_t formats_01[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
DRM_FORMAT_NV12,
DRM_FORMAT_NV16,
DRM_FORMAT_NV24,
@@ -209,15 +246,49 @@ static const uint32_t formats_01[] = {
static const uint32_t formats_234[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+};
+
+static const struct vop_scl_regs win_full_scl = {
+ .cbcr_vsd_mode = VOP_REG(WIN0_CTRL1, 0x1, 31),
+ .cbcr_vsu_mode = VOP_REG(WIN0_CTRL1, 0x1, 30),
+ .cbcr_hsd_mode = VOP_REG(WIN0_CTRL1, 0x3, 28),
+ .cbcr_ver_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 26),
+ .cbcr_hor_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 24),
+ .yrgb_vsd_mode = VOP_REG(WIN0_CTRL1, 0x1, 23),
+ .yrgb_vsu_mode = VOP_REG(WIN0_CTRL1, 0x1, 22),
+ .yrgb_hsd_mode = VOP_REG(WIN0_CTRL1, 0x3, 20),
+ .yrgb_ver_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 18),
+ .yrgb_hor_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 16),
+ .line_load_mode = VOP_REG(WIN0_CTRL1, 0x1, 15),
+ .cbcr_axi_gather_num = VOP_REG(WIN0_CTRL1, 0x7, 12),
+ .yrgb_axi_gather_num = VOP_REG(WIN0_CTRL1, 0xf, 8),
+ .vsd_cbcr_gt2 = VOP_REG(WIN0_CTRL1, 0x1, 7),
+ .vsd_cbcr_gt4 = VOP_REG(WIN0_CTRL1, 0x1, 6),
+ .vsd_yrgb_gt2 = VOP_REG(WIN0_CTRL1, 0x1, 5),
+ .vsd_yrgb_gt4 = VOP_REG(WIN0_CTRL1, 0x1, 4),
+ .bic_coe_sel = VOP_REG(WIN0_CTRL1, 0x3, 2),
+ .cbcr_axi_gather_en = VOP_REG(WIN0_CTRL1, 0x1, 1),
+ .yrgb_axi_gather_en = VOP_REG(WIN0_CTRL1, 0x1, 0),
+ .lb_mode = VOP_REG(WIN0_CTRL0, 0x7, 5),
+ .scale_yrgb_x = VOP_REG(WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
+ .scale_yrgb_y = VOP_REG(WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
+ .scale_cbcr_x = VOP_REG(WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
+ .scale_cbcr_y = VOP_REG(WIN0_SCL_FACTOR_CBR, 0xffff, 16),
};
static const struct vop_win_phy win01_data = {
+ .scl = &win_full_scl,
.data_formats = formats_01,
.nformats = ARRAY_SIZE(formats_01),
.enable = VOP_REG(WIN0_CTRL0, 0x1, 0),
.format = VOP_REG(WIN0_CTRL0, 0x7, 1),
+ .rb_swap = VOP_REG(WIN0_CTRL0, 0x1, 12),
.act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0),
@@ -234,6 +305,7 @@ static const struct vop_win_phy win23_data = {
.nformats = ARRAY_SIZE(formats_234),
.enable = VOP_REG(WIN2_CTRL0, 0x1, 0),
.format = VOP_REG(WIN2_CTRL0, 0x7, 1),
+ .rb_swap = VOP_REG(WIN2_CTRL0, 0x1, 12),
.dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0),
.dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0),
@@ -242,15 +314,6 @@ static const struct vop_win_phy win23_data = {
.dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0),
};
-static const struct vop_win_phy cursor_data = {
- .data_formats = formats_234,
- .nformats = ARRAY_SIZE(formats_234),
- .enable = VOP_REG(HWC_CTRL0, 0x1, 0),
- .format = VOP_REG(HWC_CTRL0, 0x7, 1),
- .dsp_st = VOP_REG(HWC_DSP_ST, 0x1fff1fff, 0),
- .yrgb_mst = VOP_REG(HWC_MST, 0xffffffff, 0),
-};
-
static const struct vop_ctrl ctrl_data = {
.standby = VOP_REG(SYS_CTRL, 0x1, 22),
.gate_en = VOP_REG(SYS_CTRL, 0x1, 23),
@@ -277,19 +340,25 @@ static const struct vop_reg_data vop_init_reg_table[] = {
{DSP_CTRL0, 0x00000000},
{WIN0_CTRL0, 0x00000080},
{WIN1_CTRL0, 0x00000080},
+ /* TODO: Win2/3 support multiple area function, but we haven't found
+ * a suitable way to use it yet, so let's just use them as other windows
+ * with only area 0 enabled.
+ */
+ {WIN2_CTRL0, 0x00000010},
+ {WIN3_CTRL0, 0x00000010},
};
/*
* Note: rk3288 has a dedicated 'cursor' window, however, that window requires
* special support to get alpha blending working. For now, just use overlay
- * window 1 for the drm cursor.
+ * window 3 for the drm cursor.
+ *
*/
static const struct vop_win_data rk3288_vop_win_data[] = {
{ .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY },
- { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_CURSOR },
+ { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_OVERLAY },
{ .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
- { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
- { .base = 0x00, .phy = &cursor_data, .type = DRM_PLANE_TYPE_OVERLAY },
+ { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_CURSOR },
};
static const struct vop_data rk3288_vop = {
@@ -352,15 +421,32 @@ static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
}
}
+static bool has_rb_swapped(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_BGR565:
+ return true;
+ default:
+ return false;
+ }
+}
+
static enum vop_data_format vop_convert_format(uint32_t format)
{
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
return VOP_FMT_ARGB8888;
case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
return VOP_FMT_RGB888;
case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
return VOP_FMT_RGB565;
case DRM_FORMAT_NV12:
return VOP_FMT_YUV420SP;
@@ -374,16 +460,149 @@ static enum vop_data_format vop_convert_format(uint32_t format)
}
}
+static bool is_yuv_support(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV24:
+ return true;
+ default:
+ return false;
+ }
+}
+
static bool is_alpha_support(uint32_t format)
{
switch (format) {
case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
return true;
default:
return false;
}
}
+static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
+ uint32_t dst, bool is_horizontal,
+ int vsu_mode, int *vskiplines)
+{
+ uint16_t val = 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT;
+
+ if (is_horizontal) {
+ if (mode == SCALE_UP)
+ val = GET_SCL_FT_BIC(src, dst);
+ else if (mode == SCALE_DOWN)
+ val = GET_SCL_FT_BILI_DN(src, dst);
+ } else {
+ if (mode == SCALE_UP) {
+ if (vsu_mode == SCALE_UP_BIL)
+ val = GET_SCL_FT_BILI_UP(src, dst);
+ else
+ val = GET_SCL_FT_BIC(src, dst);
+ } else if (mode == SCALE_DOWN) {
+ if (vskiplines) {
+ *vskiplines = scl_get_vskiplines(src, dst);
+ val = scl_get_bili_dn_vskip(src, dst,
+ *vskiplines);
+ } else {
+ val = GET_SCL_FT_BILI_DN(src, dst);
+ }
+ }
+ }
+
+ return val;
+}
+
+static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
+ uint32_t src_w, uint32_t src_h, uint32_t dst_w,
+ uint32_t dst_h, uint32_t pixel_format)
+{
+ uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode;
+ uint16_t cbcr_hor_scl_mode = SCALE_NONE;
+ uint16_t cbcr_ver_scl_mode = SCALE_NONE;
+ int hsub = drm_format_horz_chroma_subsampling(pixel_format);
+ int vsub = drm_format_vert_chroma_subsampling(pixel_format);
+ bool is_yuv = is_yuv_support(pixel_format);
+ uint16_t cbcr_src_w = src_w / hsub;
+ uint16_t cbcr_src_h = src_h / vsub;
+ uint16_t vsu_mode;
+ uint16_t lb_mode;
+ uint32_t val;
+ int vskiplines;
+
+ if (dst_w > 3840) {
+ DRM_ERROR("Maximum destination width (3840) exceeded\n");
+ return;
+ }
+
+ yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
+ yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
+
+ if (is_yuv) {
+ cbcr_hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
+ cbcr_ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
+ if (cbcr_hor_scl_mode == SCALE_DOWN)
+ lb_mode = scl_vop_cal_lb_mode(dst_w, true);
+ else
+ lb_mode = scl_vop_cal_lb_mode(cbcr_src_w, true);
+ } else {
+ if (yrgb_hor_scl_mode == SCALE_DOWN)
+ lb_mode = scl_vop_cal_lb_mode(dst_w, false);
+ else
+ lb_mode = scl_vop_cal_lb_mode(src_w, false);
+ }
+
+ VOP_SCL_SET(vop, win, lb_mode, lb_mode);
+ if (lb_mode == LB_RGB_3840X2) {
+ if (yrgb_ver_scl_mode != SCALE_NONE) {
+ DRM_ERROR("ERROR : not allow yrgb ver scale\n");
+ return;
+ }
+ if (cbcr_ver_scl_mode != SCALE_NONE) {
+ DRM_ERROR("ERROR : not allow cbcr ver scale\n");
+ return;
+ }
+ vsu_mode = SCALE_UP_BIL;
+ } else if (lb_mode == LB_RGB_2560X4) {
+ vsu_mode = SCALE_UP_BIL;
+ } else {
+ vsu_mode = SCALE_UP_BIC;
+ }
+
+ val = scl_vop_cal_scale(yrgb_hor_scl_mode, src_w, dst_w,
+ true, 0, NULL);
+ VOP_SCL_SET(vop, win, scale_yrgb_x, val);
+ val = scl_vop_cal_scale(yrgb_ver_scl_mode, src_h, dst_h,
+ false, vsu_mode, &vskiplines);
+ VOP_SCL_SET(vop, win, scale_yrgb_y, val);
+
+ VOP_SCL_SET(vop, win, vsd_yrgb_gt4, vskiplines == 4);
+ VOP_SCL_SET(vop, win, vsd_yrgb_gt2, vskiplines == 2);
+
+ VOP_SCL_SET(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode);
+ VOP_SCL_SET(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode);
+ VOP_SCL_SET(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL);
+ VOP_SCL_SET(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL);
+ VOP_SCL_SET(vop, win, yrgb_vsu_mode, vsu_mode);
+ if (is_yuv) {
+ val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w,
+ dst_w, true, 0, NULL);
+ VOP_SCL_SET(vop, win, scale_cbcr_x, val);
+ val = scl_vop_cal_scale(cbcr_ver_scl_mode, cbcr_src_h,
+ dst_h, false, vsu_mode, &vskiplines);
+ VOP_SCL_SET(vop, win, scale_cbcr_y, val);
+
+ VOP_SCL_SET(vop, win, vsd_cbcr_gt4, vskiplines == 4);
+ VOP_SCL_SET(vop, win, vsd_cbcr_gt2, vskiplines == 2);
+ VOP_SCL_SET(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode);
+ VOP_SCL_SET(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode);
+ VOP_SCL_SET(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL);
+ VOP_SCL_SET(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL);
+ VOP_SCL_SET(vop, win, cbcr_vsu_mode, vsu_mode);
+ }
+}
+
static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
{
unsigned long flags;
@@ -458,6 +677,7 @@ static void vop_enable(struct drm_crtc *crtc)
goto err_disable_aclk;
}
+ memcpy(vop->regs, vop->regsbak, vop->len);
/*
* At here, vop clock & iommu is enable, R/W vop regs would be safe.
*/
@@ -578,16 +798,22 @@ static int vop_update_plane_event(struct drm_plane *plane,
struct vop *vop = to_vop(crtc);
struct drm_gem_object *obj;
struct rockchip_gem_object *rk_obj;
+ struct drm_gem_object *uv_obj;
+ struct rockchip_gem_object *rk_uv_obj;
unsigned long offset;
unsigned int actual_w;
unsigned int actual_h;
unsigned int dsp_stx;
unsigned int dsp_sty;
unsigned int y_vir_stride;
+ unsigned int uv_vir_stride = 0;
dma_addr_t yrgb_mst;
+ dma_addr_t uv_mst = 0;
enum vop_data_format format;
uint32_t val;
bool is_alpha;
+ bool rb_swap;
+ bool is_yuv;
bool visible;
int ret;
struct drm_rect dest = {
@@ -608,11 +834,15 @@ static int vop_update_plane_event(struct drm_plane *plane,
.y2 = crtc->mode.vdisplay,
};
bool can_position = plane->type != DRM_PLANE_TYPE_PRIMARY;
+ int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
+ DRM_PLANE_HELPER_NO_SCALING;
+ int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
+ DRM_PLANE_HELPER_NO_SCALING;
ret = drm_plane_helper_check_update(plane, crtc, fb,
&src, &dest, &clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ min_scale,
+ max_scale,
can_position, false, &visible);
if (ret)
return ret;
@@ -621,6 +851,9 @@ static int vop_update_plane_event(struct drm_plane *plane,
return 0;
is_alpha = is_alpha_support(fb->pixel_format);
+ rb_swap = has_rb_swapped(fb->pixel_format);
+ is_yuv = is_yuv_support(fb->pixel_format);
+
format = vop_convert_format(fb->pixel_format);
if (format < 0)
return format;
@@ -633,19 +866,46 @@ static int vop_update_plane_event(struct drm_plane *plane,
rk_obj = to_rockchip_obj(obj);
+ if (is_yuv) {
+ /*
+ * Src.x1 can be odd when do clip, but yuv plane start point
+ * need align with 2 pixel.
+ */
+ val = (src.x1 >> 16) % 2;
+ src.x1 += val << 16;
+ src.x2 += val << 16;
+ }
+
actual_w = (src.x2 - src.x1) >> 16;
actual_h = (src.y2 - src.y1) >> 16;
- crtc_x = max(0, crtc_x);
- crtc_y = max(0, crtc_y);
- dsp_stx = crtc_x + crtc->mode.htotal - crtc->mode.hsync_start;
- dsp_sty = crtc_y + crtc->mode.vtotal - crtc->mode.vsync_start;
+ dsp_stx = dest.x1 + crtc->mode.htotal - crtc->mode.hsync_start;
+ dsp_sty = dest.y1 + crtc->mode.vtotal - crtc->mode.vsync_start;
- offset = (src.x1 >> 16) * (fb->bits_per_pixel >> 3);
+ offset = (src.x1 >> 16) * drm_format_plane_cpp(fb->pixel_format, 0);
offset += (src.y1 >> 16) * fb->pitches[0];
- yrgb_mst = rk_obj->dma_addr + offset;
- y_vir_stride = fb->pitches[0] / (fb->bits_per_pixel >> 3);
+ yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
+ y_vir_stride = fb->pitches[0] >> 2;
+
+ if (is_yuv) {
+ int hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
+ int vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
+ int bpp = drm_format_plane_cpp(fb->pixel_format, 1);
+
+ uv_obj = rockchip_fb_get_gem_obj(fb, 1);
+ if (!uv_obj) {
+ DRM_ERROR("fail to get uv object from framebuffer\n");
+ return -EINVAL;
+ }
+ rk_uv_obj = to_rockchip_obj(uv_obj);
+ uv_vir_stride = fb->pitches[1] >> 2;
+
+ offset = (src.x1 >> 16) * bpp / hsub;
+ offset += (src.y1 >> 16) * fb->pitches[1] / vsub;
+
+ uv_mst = rk_uv_obj->dma_addr + offset + fb->offsets[1];
+ }
/*
* If this plane update changes the plane's framebuffer, (or more
@@ -682,13 +942,27 @@ static int vop_update_plane_event(struct drm_plane *plane,
VOP_WIN_SET(vop, win, format, format);
VOP_WIN_SET(vop, win, yrgb_vir, y_vir_stride);
VOP_WIN_SET(vop, win, yrgb_mst, yrgb_mst);
+ if (is_yuv) {
+ VOP_WIN_SET(vop, win, uv_vir, uv_vir_stride);
+ VOP_WIN_SET(vop, win, uv_mst, uv_mst);
+ }
+
+ if (win->phy->scl)
+ scl_vop_cal_scl_fac(vop, win, actual_w, actual_h,
+ dest.x2 - dest.x1, dest.y2 - dest.y1,
+ fb->pixel_format);
+
val = (actual_h - 1) << 16;
val |= (actual_w - 1) & 0xffff;
VOP_WIN_SET(vop, win, act_info, val);
+
+ val = (dest.y2 - dest.y1 - 1) << 16;
+ val |= (dest.x2 - dest.x1 - 1) & 0xffff;
VOP_WIN_SET(vop, win, dsp_info, val);
val = (dsp_sty - 1) << 16;
val |= (dsp_stx - 1) & 0xffff;
VOP_WIN_SET(vop, win, dsp_st, val);
+ VOP_WIN_SET(vop, win, rb_swap, rb_swap);
if (is_alpha) {
VOP_WIN_SET(vop, win, dst_alpha_ctl,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 63e9b3a084c5..a2d4ddb896fa 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -198,4 +198,92 @@ enum factor_mode {
ALPHA_SRC_GLOBAL,
};
+enum scale_mode {
+ SCALE_NONE = 0x0,
+ SCALE_UP = 0x1,
+ SCALE_DOWN = 0x2
+};
+
+enum lb_mode {
+ LB_YUV_3840X5 = 0x0,
+ LB_YUV_2560X8 = 0x1,
+ LB_RGB_3840X2 = 0x2,
+ LB_RGB_2560X4 = 0x3,
+ LB_RGB_1920X5 = 0x4,
+ LB_RGB_1280X8 = 0x5
+};
+
+enum sacle_up_mode {
+ SCALE_UP_BIL = 0x0,
+ SCALE_UP_BIC = 0x1
+};
+
+enum scale_down_mode {
+ SCALE_DOWN_BIL = 0x0,
+ SCALE_DOWN_AVG = 0x1
+};
+
+#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
+#define SCL_FT_DEFAULT_FIXPOINT_SHIFT 12
+#define SCL_MAX_VSKIPLINES 4
+#define MIN_SCL_FT_AFTER_VSKIP 1
+
+static inline uint16_t scl_cal_scale(int src, int dst, int shift)
+{
+ return ((src * 2 - 3) << (shift - 1)) / (dst - 1);
+}
+
+#define GET_SCL_FT_BILI_DN(src, dst) scl_cal_scale(src, dst, 12)
+#define GET_SCL_FT_BILI_UP(src, dst) scl_cal_scale(src, dst, 16)
+#define GET_SCL_FT_BIC(src, dst) scl_cal_scale(src, dst, 16)
+
+static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
+ int vskiplines)
+{
+ int act_height;
+
+ act_height = (src_h + vskiplines - 1) / vskiplines;
+
+ return GET_SCL_FT_BILI_DN(act_height, dst_h);
+}
+
+static inline enum scale_mode scl_get_scl_mode(int src, int dst)
+{
+ if (src < dst)
+ return SCALE_UP;
+ else if (src > dst)
+ return SCALE_DOWN;
+
+ return SCALE_NONE;
+}
+
+static inline int scl_get_vskiplines(uint32_t srch, uint32_t dsth)
+{
+ uint32_t vskiplines;
+
+ for (vskiplines = SCL_MAX_VSKIPLINES; vskiplines > 1; vskiplines /= 2)
+ if (srch >= vskiplines * dsth * MIN_SCL_FT_AFTER_VSKIP)
+ break;
+
+ return vskiplines;
+}
+
+static inline int scl_vop_cal_lb_mode(int width, bool is_yuv)
+{
+ int lb_mode;
+
+ if (width > 2560)
+ lb_mode = LB_RGB_3840X2;
+ else if (width > 1920)
+ lb_mode = LB_RGB_2560X4;
+ else if (!is_yuv)
+ lb_mode = LB_RGB_1920X5;
+ else if (width > 1280)
+ lb_mode = LB_YUV_3840X5;
+ else
+ lb_mode = LB_YUV_2560X8;
+
+ return lb_mode;
+}
+
#endif /* _ROCKCHIP_DRM_VOP_H */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 859ccb658601..e9272b0a8592 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -248,7 +248,7 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
lcdc_write(sdev, LDDDSR, value);
/* Setup planes. */
- drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
+ drm_for_each_legacy_plane(plane, dev) {
if (plane->crtc == crtc)
shmob_drm_plane_setup(plane);
}
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index f0f1e4ee2d92..e27490b492a5 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -1,12 +1,11 @@
sticompositor-y := \
- sti_layer.o \
sti_mixer.o \
sti_gdp.o \
sti_vid.o \
sti_cursor.o \
sti_compositor.o \
- sti_drm_crtc.o \
- sti_drm_plane.o
+ sti_crtc.o \
+ sti_plane.o
stihdmi-y := sti_hdmi.o \
sti_hdmi_tx3g0c55phy.o \
@@ -24,4 +23,4 @@ obj-$(CONFIG_DRM_STI) = \
sticompositor.o \
sti_hqvdp.o \
stidvo.o \
- sti_drm_drv.o
+ sti_drv.o
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 43215d3020fb..c652627b1bca 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -14,10 +14,12 @@
#include <drm/drmP.h>
#include "sti_compositor.h"
-#include "sti_drm_crtc.h"
-#include "sti_drm_drv.h"
-#include "sti_drm_plane.h"
+#include "sti_crtc.h"
+#include "sti_cursor.h"
+#include "sti_drv.h"
#include "sti_gdp.h"
+#include "sti_plane.h"
+#include "sti_vid.h"
#include "sti_vtg.h"
/*
@@ -31,7 +33,7 @@ struct sti_compositor_data stih407_compositor_data = {
{STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
{STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300},
{STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400},
- {STI_VID_SUBDEV, (int)STI_VID_0, 0x700},
+ {STI_VID_SUBDEV, (int)STI_HQVDP_0, 0x700},
{STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00},
{STI_MIXER_AUX_SUBDEV, STI_MIXER_AUX, 0xD00},
},
@@ -53,14 +55,29 @@ struct sti_compositor_data stih416_compositor_data = {
},
};
-static int sti_compositor_init_subdev(struct sti_compositor *compo,
- struct sti_compositor_subdev_descriptor *desc,
- unsigned int array_size)
+static int sti_compositor_bind(struct device *dev,
+ struct device *master,
+ void *data)
{
- unsigned int i, mixer_id = 0, layer_id = 0;
+ struct sti_compositor *compo = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ unsigned int i, mixer_id = 0, vid_id = 0, crtc_id = 0;
+ struct sti_private *dev_priv = drm_dev->dev_private;
+ struct drm_plane *cursor = NULL;
+ struct drm_plane *primary = NULL;
+ struct sti_compositor_subdev_descriptor *desc = compo->data.subdev_desc;
+ unsigned int array_size = compo->data.nb_subdev;
+
+ dev_priv->compo = compo;
+ /* Register mixer subdev and video subdev first */
for (i = 0; i < array_size; i++) {
switch (desc[i].type) {
+ case STI_VID_SUBDEV:
+ compo->vid[vid_id++] =
+ sti_vid_create(compo->dev, desc[i].id,
+ compo->regs + desc[i].offset);
+ break;
case STI_MIXER_MAIN_SUBDEV:
case STI_MIXER_AUX_SUBDEV:
compo->mixer[mixer_id++] =
@@ -68,83 +85,68 @@ static int sti_compositor_init_subdev(struct sti_compositor *compo,
compo->regs + desc[i].offset);
break;
case STI_GPD_SUBDEV:
- case STI_VID_SUBDEV:
case STI_CURSOR_SUBDEV:
- compo->layer[layer_id++] =
- sti_layer_create(compo->dev, desc[i].id,
- compo->regs + desc[i].offset);
+ /* Nothing to do, wait for the second round */
break;
default:
DRM_ERROR("Unknow subdev compoment type\n");
return 1;
}
-
}
- compo->nb_mixers = mixer_id;
- compo->nb_layers = layer_id;
-
- return 0;
-}
-
-static int sti_compositor_bind(struct device *dev, struct device *master,
- void *data)
-{
- struct sti_compositor *compo = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
- unsigned int i, crtc = 0, plane = 0;
- struct sti_drm_private *dev_priv = drm_dev->dev_private;
- struct drm_plane *cursor = NULL;
- struct drm_plane *primary = NULL;
- dev_priv->compo = compo;
-
- for (i = 0; i < compo->nb_layers; i++) {
- if (compo->layer[i]) {
- enum sti_layer_desc desc = compo->layer[i]->desc;
- enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK;
- enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
+ /* Register the other subdevs, create crtc and planes */
+ for (i = 0; i < array_size; i++) {
+ enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
- if (crtc < compo->nb_mixers)
- plane_type = DRM_PLANE_TYPE_PRIMARY;
+ if (crtc_id < mixer_id)
+ plane_type = DRM_PLANE_TYPE_PRIMARY;
- switch (type) {
- case STI_CUR:
- cursor = sti_drm_plane_init(drm_dev,
- compo->layer[i],
- 1, DRM_PLANE_TYPE_CURSOR);
- break;
- case STI_GDP:
- case STI_VID:
- primary = sti_drm_plane_init(drm_dev,
- compo->layer[i],
- (1 << compo->nb_mixers) - 1,
- plane_type);
- plane++;
+ switch (desc[i].type) {
+ case STI_MIXER_MAIN_SUBDEV:
+ case STI_MIXER_AUX_SUBDEV:
+ case STI_VID_SUBDEV:
+ /* Nothing to do, already done at the first round */
+ break;
+ case STI_CURSOR_SUBDEV:
+ cursor = sti_cursor_create(drm_dev, compo->dev,
+ desc[i].id,
+ compo->regs + desc[i].offset,
+ 1);
+ if (!cursor) {
+ DRM_ERROR("Can't create CURSOR plane\n");
break;
- case STI_BCK:
- case STI_VDP:
+ }
+ break;
+ case STI_GPD_SUBDEV:
+ primary = sti_gdp_create(drm_dev, compo->dev,
+ desc[i].id,
+ compo->regs + desc[i].offset,
+ (1 << mixer_id) - 1,
+ plane_type);
+ if (!primary) {
+ DRM_ERROR("Can't create GDP plane\n");
break;
}
+ break;
+ default:
+ DRM_ERROR("Unknown subdev compoment type\n");
+ return 1;
+ }
- /* The first planes are reserved for primary planes*/
- if (crtc < compo->nb_mixers && primary) {
- sti_drm_crtc_init(drm_dev, compo->mixer[crtc],
- primary, cursor);
- crtc++;
- cursor = NULL;
- primary = NULL;
- }
+ /* The first planes are reserved for primary planes*/
+ if (crtc_id < mixer_id && primary) {
+ sti_crtc_init(drm_dev, compo->mixer[crtc_id],
+ primary, cursor);
+ crtc_id++;
+ cursor = NULL;
+ primary = NULL;
}
}
- drm_vblank_init(drm_dev, crtc);
+ drm_vblank_init(drm_dev, crtc_id);
/* Allow usage of vblank without having to call drm_irq_install */
drm_dev->irq_enabled = 1;
- DRM_DEBUG_DRIVER("Initialized %d DRM CRTC(s) and %d DRM plane(s)\n",
- crtc, plane);
- DRM_DEBUG_DRIVER("DRM plane(s) for VID/VDP not created yet\n");
-
return 0;
}
@@ -179,7 +181,6 @@ static int sti_compositor_probe(struct platform_device *pdev)
struct device_node *vtg_np;
struct sti_compositor *compo;
struct resource *res;
- int err;
compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
if (!compo) {
@@ -187,7 +188,7 @@ static int sti_compositor_probe(struct platform_device *pdev)
return -ENOMEM;
}
compo->dev = dev;
- compo->vtg_vblank_nb.notifier_call = sti_drm_crtc_vblank_cb;
+ compo->vtg_vblank_nb.notifier_call = sti_crtc_vblank_cb;
/* populate data structure depending on compatibility */
BUG_ON(!of_match_node(compositor_of_match, np)->data);
@@ -251,12 +252,6 @@ static int sti_compositor_probe(struct platform_device *pdev)
if (vtg_np)
compo->vtg_aux = of_vtg_find(vtg_np);
- /* Initialize compositor subdevices */
- err = sti_compositor_init_subdev(compo, compo->data.subdev_desc,
- compo->data.nb_subdev);
- if (err)
- return err;
-
platform_set_drvdata(pdev, compo);
return component_add(&pdev->dev, &sti_compositor_ops);
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index 019eb44c62cc..1a4a73dab11e 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -12,13 +12,13 @@
#include <linux/clk.h>
#include <linux/kernel.h>
-#include "sti_layer.h"
#include "sti_mixer.h"
+#include "sti_plane.h"
#define WAIT_NEXT_VSYNC_MS 50 /*ms*/
-#define STI_MAX_LAYER 8
#define STI_MAX_MIXER 2
+#define STI_MAX_VID 1
enum sti_compositor_subdev_type {
STI_MIXER_MAIN_SUBDEV,
@@ -59,11 +59,9 @@ struct sti_compositor_data {
* @rst_main: reset control of the main path
* @rst_aux: reset control of the aux path
* @mixer: array of mixers
+ * @vid: array of vids
* @vtg_main: vtg for main data path
* @vtg_aux: vtg for auxillary data path
- * @layer: array of layers
- * @nb_mixers: number of mixers for this compositor
- * @nb_layers: number of layers (GDP,VID,...) for this compositor
* @vtg_vblank_nb: callback for VTG VSYNC notification
*/
struct sti_compositor {
@@ -77,11 +75,9 @@ struct sti_compositor {
struct reset_control *rst_main;
struct reset_control *rst_aux;
struct sti_mixer *mixer[STI_MAX_MIXER];
+ struct sti_vid *vid[STI_MAX_VID];
struct sti_vtg *vtg_main;
struct sti_vtg *vtg_aux;
- struct sti_layer *layer[STI_MAX_LAYER];
- int nb_mixers;
- int nb_layers;
struct notifier_block vtg_vblank_nb;
};
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 6b641c5a2ec7..018ffc970e96 100644
--- a/drivers/gpu/drm/sti/sti_drm_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -15,22 +15,20 @@
#include <drm/drm_plane_helper.h>
#include "sti_compositor.h"
-#include "sti_drm_drv.h"
-#include "sti_drm_crtc.h"
+#include "sti_crtc.h"
+#include "sti_drv.h"
+#include "sti_vid.h"
#include "sti_vtg.h"
-static void sti_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- DRM_DEBUG_KMS("\n");
-}
-
-static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
+static void sti_crtc_enable(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct device *dev = mixer->dev;
struct sti_compositor *compo = dev_get_drvdata(dev);
- mixer->enabled = true;
+ DRM_DEBUG_DRIVER("\n");
+
+ mixer->status = STI_MIXER_READY;
/* Prepare and enable the compo IP clock */
if (mixer->id == STI_MIXER_MAIN) {
@@ -41,45 +39,28 @@ static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
DRM_INFO("Failed to prepare/enable compo_aux clk\n");
}
- sti_mixer_clear_all_layers(mixer);
+ drm_crtc_vblank_on(crtc);
}
-static void sti_drm_crtc_commit(struct drm_crtc *crtc)
+static void sti_crtc_disabling(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
- struct device *dev = mixer->dev;
- struct sti_compositor *compo = dev_get_drvdata(dev);
- struct sti_layer *layer;
-
- if ((!mixer || !compo)) {
- DRM_ERROR("Can not find mixer or compositor)\n");
- return;
- }
- /* get GDP which is reserved to the CRTC FB */
- layer = to_sti_layer(crtc->primary);
- if (layer)
- sti_layer_commit(layer);
- else
- DRM_ERROR("Can not find CRTC dedicated plane (GDP0)\n");
-
- /* Enable layer on mixer */
- if (sti_mixer_set_layer_status(mixer, layer, true))
- DRM_ERROR("Can not enable layer at mixer\n");
+ DRM_DEBUG_DRIVER("\n");
- drm_crtc_vblank_on(crtc);
+ mixer->status = STI_MIXER_DISABLING;
}
-static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool sti_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
/* accept the provided drm_display_mode, do not fix it up */
return true;
}
static int
-sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
+sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct device *dev = mixer->dev;
@@ -122,22 +103,19 @@ sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
res = sti_mixer_active_video_area(mixer, &crtc->mode);
if (res) {
- DRM_ERROR("Can not set active video area\n");
+ DRM_ERROR("Can't set active video area\n");
return -EINVAL;
}
return res;
}
-static void sti_drm_crtc_disable(struct drm_crtc *crtc)
+static void sti_crtc_disable(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct device *dev = mixer->dev;
struct sti_compositor *compo = dev_get_drvdata(dev);
- if (!mixer->enabled)
- return;
-
DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer));
/* Disable Background */
@@ -154,17 +132,18 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
clk_disable_unprepare(compo->clk_compo_aux);
}
- mixer->enabled = false;
+ mixer->status = STI_MIXER_DISABLED;
}
static void
-sti_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
+sti_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
- sti_drm_crtc_prepare(crtc);
- sti_drm_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
+ sti_crtc_enable(crtc);
+ sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
}
-static void sti_drm_atomic_begin(struct drm_crtc *crtc)
+static void sti_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
@@ -178,46 +157,109 @@ static void sti_drm_atomic_begin(struct drm_crtc *crtc)
}
}
-static void sti_drm_atomic_flush(struct drm_crtc *crtc)
+static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
+ struct drm_device *drm_dev = crtc->dev;
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
+ struct drm_plane *p;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* perform plane actions */
+ list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
+ struct sti_plane *plane = to_sti_plane(p);
+
+ switch (plane->status) {
+ case STI_PLANE_UPDATED:
+ /* update planes tag as updated */
+ DRM_DEBUG_DRIVER("update plane %s\n",
+ sti_plane_to_str(plane));
+
+ if (sti_mixer_set_plane_depth(mixer, plane)) {
+ DRM_ERROR("Cannot set plane %s depth\n",
+ sti_plane_to_str(plane));
+ break;
+ }
+
+ if (sti_mixer_set_plane_status(mixer, plane, true)) {
+ DRM_ERROR("Cannot enable plane %s at mixer\n",
+ sti_plane_to_str(plane));
+ break;
+ }
+
+ /* if plane is HQVDP_0 then commit the vid[0] */
+ if (plane->desc == STI_HQVDP_0)
+ sti_vid_commit(compo->vid[0], p->state);
+
+ plane->status = STI_PLANE_READY;
+
+ break;
+ case STI_PLANE_DISABLING:
+ /* disabling sequence for planes tag as disabling */
+ DRM_DEBUG_DRIVER("disable plane %s from mixer\n",
+ sti_plane_to_str(plane));
+
+ if (sti_mixer_set_plane_status(mixer, plane, false)) {
+ DRM_ERROR("Cannot disable plane %s at mixer\n",
+ sti_plane_to_str(plane));
+ continue;
+ }
+
+ if (plane->desc == STI_CURSOR)
+ /* tag plane status for disabled */
+ plane->status = STI_PLANE_DISABLED;
+ else
+ /* tag plane status for flushing */
+ plane->status = STI_PLANE_FLUSHING;
+
+ /* if plane is HQVDP_0 then disable the vid[0] */
+ if (plane->desc == STI_HQVDP_0)
+ sti_vid_disable(compo->vid[0]);
+
+ break;
+ default:
+ /* Other status case are not handled */
+ break;
+ }
+ }
}
static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
- .dpms = sti_drm_crtc_dpms,
- .prepare = sti_drm_crtc_prepare,
- .commit = sti_drm_crtc_commit,
- .mode_fixup = sti_drm_crtc_mode_fixup,
+ .enable = sti_crtc_enable,
+ .disable = sti_crtc_disabling,
+ .mode_fixup = sti_crtc_mode_fixup,
.mode_set = drm_helper_crtc_mode_set,
- .mode_set_nofb = sti_drm_crtc_mode_set_nofb,
+ .mode_set_nofb = sti_crtc_mode_set_nofb,
.mode_set_base = drm_helper_crtc_mode_set_base,
- .disable = sti_drm_crtc_disable,
- .atomic_begin = sti_drm_atomic_begin,
- .atomic_flush = sti_drm_atomic_flush,
+ .atomic_begin = sti_crtc_atomic_begin,
+ .atomic_flush = sti_crtc_atomic_flush,
};
-static void sti_drm_crtc_destroy(struct drm_crtc *crtc)
+static void sti_crtc_destroy(struct drm_crtc *crtc)
{
DRM_DEBUG_KMS("\n");
drm_crtc_cleanup(crtc);
}
-static int sti_drm_crtc_set_property(struct drm_crtc *crtc,
- struct drm_property *property,
- uint64_t val)
+static int sti_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property,
+ uint64_t val)
{
DRM_DEBUG_KMS("\n");
return 0;
}
-int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
- unsigned long event, void *data)
+int sti_crtc_vblank_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
{
struct drm_device *drm_dev;
struct sti_compositor *compo =
container_of(nb, struct sti_compositor, vtg_vblank_nb);
int *crtc = data;
unsigned long flags;
- struct sti_drm_private *priv;
+ struct sti_private *priv;
drm_dev = compo->mixer[*crtc]->drm_crtc.dev;
priv = drm_dev->dev_private;
@@ -233,21 +275,38 @@ int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
spin_lock_irqsave(&drm_dev->event_lock, flags);
if (compo->mixer[*crtc]->pending_event) {
drm_send_vblank_event(drm_dev, -1,
- compo->mixer[*crtc]->pending_event);
+ compo->mixer[*crtc]->pending_event);
drm_vblank_put(drm_dev, *crtc);
compo->mixer[*crtc]->pending_event = NULL;
}
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ if (compo->mixer[*crtc]->status == STI_MIXER_DISABLING) {
+ struct drm_plane *p;
+
+ /* Disable mixer only if all overlay planes (GDP and VDP)
+ * are disabled */
+ list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
+ struct sti_plane *plane = to_sti_plane(p);
+
+ if ((plane->desc & STI_PLANE_TYPE_MASK) <= STI_VDP)
+ if (plane->status != STI_PLANE_DISABLED)
+ return 0;
+ }
+ sti_crtc_disable(&compo->mixer[*crtc]->drm_crtc);
+ }
+
return 0;
}
-int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
+int sti_crtc_enable_vblank(struct drm_device *dev, int crtc)
{
- struct sti_drm_private *dev_priv = dev->dev_private;
+ struct sti_private *dev_priv = dev->dev_private;
struct sti_compositor *compo = dev_priv->compo;
struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
+ DRM_DEBUG_DRIVER("\n");
+
if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ?
compo->vtg_main : compo->vtg_aux,
vtg_vblank_nb, crtc)) {
@@ -257,11 +316,11 @@ int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
return 0;
}
-EXPORT_SYMBOL(sti_drm_crtc_enable_vblank);
+EXPORT_SYMBOL(sti_crtc_enable_vblank);
-void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
+void sti_crtc_disable_vblank(struct drm_device *drm_dev, int crtc)
{
- struct sti_drm_private *priv = dev->dev_private;
+ struct sti_private *priv = drm_dev->dev_private;
struct sti_compositor *compo = priv->compo;
struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
@@ -273,23 +332,23 @@ void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
/* free the resources of the pending requests */
if (compo->mixer[crtc]->pending_event) {
- drm_vblank_put(dev, crtc);
+ drm_vblank_put(drm_dev, crtc);
compo->mixer[crtc]->pending_event = NULL;
}
}
-EXPORT_SYMBOL(sti_drm_crtc_disable_vblank);
+EXPORT_SYMBOL(sti_crtc_disable_vblank);
static struct drm_crtc_funcs sti_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
- .destroy = sti_drm_crtc_destroy,
- .set_property = sti_drm_crtc_set_property,
+ .destroy = sti_crtc_destroy,
+ .set_property = sti_crtc_set_property,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
-bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
+bool sti_crtc_is_main(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
@@ -298,18 +357,18 @@ bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
return false;
}
-EXPORT_SYMBOL(sti_drm_crtc_is_main);
+EXPORT_SYMBOL(sti_crtc_is_main);
-int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
- struct drm_plane *primary, struct drm_plane *cursor)
+int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
+ struct drm_plane *primary, struct drm_plane *cursor)
{
struct drm_crtc *crtc = &mixer->drm_crtc;
int res;
res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
- &sti_crtc_funcs);
+ &sti_crtc_funcs);
if (res) {
- DRM_ERROR("Can not initialze CRTC\n");
+ DRM_ERROR("Can't initialze CRTC\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/sti/sti_crtc.h b/drivers/gpu/drm/sti/sti_crtc.h
new file mode 100644
index 000000000000..51963e6ddbe7
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_crtc.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_CRTC_H_
+#define _STI_CRTC_H_
+
+#include <drm/drmP.h>
+
+struct sti_mixer;
+
+int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
+ struct drm_plane *primary, struct drm_plane *cursor);
+int sti_crtc_enable_vblank(struct drm_device *dev, int crtc);
+void sti_crtc_disable_vblank(struct drm_device *dev, int crtc);
+int sti_crtc_vblank_cb(struct notifier_block *nb,
+ unsigned long event, void *data);
+bool sti_crtc_is_main(struct drm_crtc *drm_crtc);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 010eaee60bf7..dd1032195051 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -7,8 +7,14 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "sti_compositor.h"
#include "sti_cursor.h"
-#include "sti_layer.h"
+#include "sti_plane.h"
#include "sti_vtg.h"
/* Registers */
@@ -42,15 +48,19 @@ struct dma_pixmap {
/**
* STI Cursor structure
*
- * @layer: layer structure
- * @width: cursor width
- * @height: cursor height
- * @clut: color look up table
- * @clut_paddr: color look up table physical address
- * @pixmap: pixmap dma buffer (clut8-format cursor)
+ * @sti_plane: sti_plane structure
+ * @dev: driver device
+ * @regs: cursor registers
+ * @width: cursor width
+ * @height: cursor height
+ * @clut: color look up table
+ * @clut_paddr: color look up table physical address
+ * @pixmap: pixmap dma buffer (clut8-format cursor)
*/
struct sti_cursor {
- struct sti_layer layer;
+ struct sti_plane plane;
+ struct device *dev;
+ void __iomem *regs;
unsigned int width;
unsigned int height;
unsigned short *clut;
@@ -62,22 +72,10 @@ static const uint32_t cursor_supported_formats[] = {
DRM_FORMAT_ARGB8888,
};
-#define to_sti_cursor(x) container_of(x, struct sti_cursor, layer)
-
-static const uint32_t *sti_cursor_get_formats(struct sti_layer *layer)
-{
- return cursor_supported_formats;
-}
-
-static unsigned int sti_cursor_get_nb_formats(struct sti_layer *layer)
-{
- return ARRAY_SIZE(cursor_supported_formats);
-}
+#define to_sti_cursor(x) container_of(x, struct sti_cursor, plane)
-static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer)
+static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src)
{
- struct sti_cursor *cursor = to_sti_cursor(layer);
- u32 *src = layer->vaddr;
u8 *dst = cursor->pixmap.base;
unsigned int i, j;
u32 a, r, g, b;
@@ -96,127 +94,155 @@ static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer)
}
}
-static int sti_cursor_prepare_layer(struct sti_layer *layer, bool first_prepare)
+static void sti_cursor_init(struct sti_cursor *cursor)
{
- struct sti_cursor *cursor = to_sti_cursor(layer);
- struct drm_display_mode *mode = layer->mode;
+ unsigned short *base = cursor->clut;
+ unsigned int a, r, g, b;
+
+ /* Assign CLUT values, ARGB444 format */
+ for (a = 0; a < 4; a++)
+ for (r = 0; r < 4; r++)
+ for (g = 0; g < 4; g++)
+ for (b = 0; b < 4; b++)
+ *base++ = (a * 5) << 12 |
+ (r * 5) << 8 |
+ (g * 5) << 4 |
+ (b * 5);
+}
+
+static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
+ struct drm_plane_state *oldstate)
+{
+ struct drm_plane_state *state = drm_plane->state;
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_cursor *cursor = to_sti_cursor(plane);
+ struct drm_crtc *crtc = state->crtc;
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_display_mode *mode = &crtc->mode;
+ int dst_x = state->crtc_x;
+ int dst_y = state->crtc_y;
+ int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+ int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ /* src_x are in 16.16 format */
+ int src_w = state->src_w >> 16;
+ int src_h = state->src_h >> 16;
+ bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+ struct drm_gem_cma_object *cma_obj;
u32 y, x;
u32 val;
- DRM_DEBUG_DRIVER("\n");
+ DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+ crtc->base.id, sti_mixer_to_str(mixer),
+ drm_plane->base.id, sti_plane_to_str(plane));
+ DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", dst_w, dst_h, dst_x, dst_y);
- dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
+ dev_dbg(cursor->dev, "%s %s\n", __func__,
+ sti_plane_to_str(plane));
- if (layer->src_w < STI_CURS_MIN_SIZE ||
- layer->src_h < STI_CURS_MIN_SIZE ||
- layer->src_w > STI_CURS_MAX_SIZE ||
- layer->src_h > STI_CURS_MAX_SIZE) {
+ if (src_w < STI_CURS_MIN_SIZE ||
+ src_h < STI_CURS_MIN_SIZE ||
+ src_w > STI_CURS_MAX_SIZE ||
+ src_h > STI_CURS_MAX_SIZE) {
DRM_ERROR("Invalid cursor size (%dx%d)\n",
- layer->src_w, layer->src_h);
- return -EINVAL;
+ src_w, src_h);
+ return;
}
/* If the cursor size has changed, re-allocated the pixmap */
if (!cursor->pixmap.base ||
- (cursor->width != layer->src_w) ||
- (cursor->height != layer->src_h)) {
- cursor->width = layer->src_w;
- cursor->height = layer->src_h;
+ (cursor->width != src_w) ||
+ (cursor->height != src_h)) {
+ cursor->width = src_w;
+ cursor->height = src_h;
if (cursor->pixmap.base)
- dma_free_writecombine(layer->dev,
+ dma_free_writecombine(cursor->dev,
cursor->pixmap.size,
cursor->pixmap.base,
cursor->pixmap.paddr);
cursor->pixmap.size = cursor->width * cursor->height;
- cursor->pixmap.base = dma_alloc_writecombine(layer->dev,
+ cursor->pixmap.base = dma_alloc_writecombine(cursor->dev,
cursor->pixmap.size,
&cursor->pixmap.paddr,
GFP_KERNEL | GFP_DMA);
if (!cursor->pixmap.base) {
DRM_ERROR("Failed to allocate memory for pixmap\n");
- return -ENOMEM;
+ return;
}
}
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ if (!cma_obj) {
+ DRM_ERROR("Can't get CMA GEM object for fb\n");
+ return;
+ }
+
/* Convert ARGB8888 to CLUT8 */
- sti_cursor_argb8888_to_clut8(layer);
+ sti_cursor_argb8888_to_clut8(cursor, (u32 *)cma_obj->vaddr);
/* AWS and AWE depend on the mode */
y = sti_vtg_get_line_number(*mode, 0);
x = sti_vtg_get_pixel_number(*mode, 0);
val = y << 16 | x;
- writel(val, layer->regs + CUR_AWS);
+ writel(val, cursor->regs + CUR_AWS);
y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
val = y << 16 | x;
- writel(val, layer->regs + CUR_AWE);
+ writel(val, cursor->regs + CUR_AWE);
if (first_prepare) {
/* Set and fetch CLUT */
- writel(cursor->clut_paddr, layer->regs + CUR_CML);
- writel(CUR_CTL_CLUT_UPDATE, layer->regs + CUR_CTL);
+ writel(cursor->clut_paddr, cursor->regs + CUR_CML);
+ writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL);
}
- return 0;
-}
-
-static int sti_cursor_commit_layer(struct sti_layer *layer)
-{
- struct sti_cursor *cursor = to_sti_cursor(layer);
- struct drm_display_mode *mode = layer->mode;
- u32 ydo, xdo;
-
- dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
-
/* Set memory location, size, and position */
- writel(cursor->pixmap.paddr, layer->regs + CUR_PML);
- writel(cursor->width, layer->regs + CUR_PMP);
- writel(cursor->height << 16 | cursor->width, layer->regs + CUR_SIZE);
+ writel(cursor->pixmap.paddr, cursor->regs + CUR_PML);
+ writel(cursor->width, cursor->regs + CUR_PMP);
+ writel(cursor->height << 16 | cursor->width, cursor->regs + CUR_SIZE);
- ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
- xdo = sti_vtg_get_pixel_number(*mode, layer->dst_y);
- writel((ydo << 16) | xdo, layer->regs + CUR_VPO);
+ y = sti_vtg_get_line_number(*mode, dst_y);
+ x = sti_vtg_get_pixel_number(*mode, dst_y);
+ writel((y << 16) | x, cursor->regs + CUR_VPO);
- return 0;
+ plane->status = STI_PLANE_UPDATED;
}
-static int sti_cursor_disable_layer(struct sti_layer *layer)
+static void sti_cursor_atomic_disable(struct drm_plane *drm_plane,
+ struct drm_plane_state *oldstate)
{
- return 0;
-}
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
-static void sti_cursor_init(struct sti_layer *layer)
-{
- struct sti_cursor *cursor = to_sti_cursor(layer);
- unsigned short *base = cursor->clut;
- unsigned int a, r, g, b;
+ if (!drm_plane->crtc) {
+ DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
+ drm_plane->base.id);
+ return;
+ }
- /* Assign CLUT values, ARGB444 format */
- for (a = 0; a < 4; a++)
- for (r = 0; r < 4; r++)
- for (g = 0; g < 4; g++)
- for (b = 0; b < 4; b++)
- *base++ = (a * 5) << 12 |
- (r * 5) << 8 |
- (g * 5) << 4 |
- (b * 5);
+ DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
+ drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+ drm_plane->base.id, sti_plane_to_str(plane));
+
+ plane->status = STI_PLANE_DISABLING;
}
-static const struct sti_layer_funcs cursor_ops = {
- .get_formats = sti_cursor_get_formats,
- .get_nb_formats = sti_cursor_get_nb_formats,
- .init = sti_cursor_init,
- .prepare = sti_cursor_prepare_layer,
- .commit = sti_cursor_commit_layer,
- .disable = sti_cursor_disable_layer,
+static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
+ .atomic_update = sti_cursor_atomic_update,
+ .atomic_disable = sti_cursor_atomic_disable,
};
-struct sti_layer *sti_cursor_create(struct device *dev)
+struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
+ struct device *dev, int desc,
+ void __iomem *baseaddr,
+ unsigned int possible_crtcs)
{
struct sti_cursor *cursor;
+ size_t size;
+ int res;
cursor = devm_kzalloc(dev, sizeof(*cursor), GFP_KERNEL);
if (!cursor) {
@@ -225,18 +251,43 @@ struct sti_layer *sti_cursor_create(struct device *dev)
}
/* Allocate clut buffer */
- cursor->clut = dma_alloc_writecombine(dev,
- 0x100 * sizeof(unsigned short),
- &cursor->clut_paddr,
- GFP_KERNEL | GFP_DMA);
+ size = 0x100 * sizeof(unsigned short);
+ cursor->clut = dma_alloc_writecombine(dev, size, &cursor->clut_paddr,
+ GFP_KERNEL | GFP_DMA);
if (!cursor->clut) {
DRM_ERROR("Failed to allocate memory for cursor clut\n");
- devm_kfree(dev, cursor);
- return NULL;
+ goto err_clut;
+ }
+
+ cursor->dev = dev;
+ cursor->regs = baseaddr;
+ cursor->plane.desc = desc;
+ cursor->plane.status = STI_PLANE_DISABLED;
+
+ sti_cursor_init(cursor);
+
+ res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane,
+ possible_crtcs,
+ &sti_plane_helpers_funcs,
+ cursor_supported_formats,
+ ARRAY_SIZE(cursor_supported_formats),
+ DRM_PLANE_TYPE_CURSOR);
+ if (res) {
+ DRM_ERROR("Failed to initialize universal plane\n");
+ goto err_plane;
}
- cursor->layer.ops = &cursor_ops;
+ drm_plane_helper_add(&cursor->plane.drm_plane,
+ &sti_cursor_helpers_funcs);
+
+ sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR);
+
+ return &cursor->plane.drm_plane;
- return (struct sti_layer *)cursor;
+err_plane:
+ dma_free_writecombine(dev, size, cursor->clut, cursor->clut_paddr);
+err_clut:
+ devm_kfree(dev, cursor);
+ return NULL;
}
diff --git a/drivers/gpu/drm/sti/sti_cursor.h b/drivers/gpu/drm/sti/sti_cursor.h
index 3c9827404f27..2ee5c10e8b33 100644
--- a/drivers/gpu/drm/sti/sti_cursor.h
+++ b/drivers/gpu/drm/sti/sti_cursor.h
@@ -7,6 +7,9 @@
#ifndef _STI_CURSOR_H_
#define _STI_CURSOR_H_
-struct sti_layer *sti_cursor_create(struct device *dev);
+struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
+ struct device *dev, int desc,
+ void __iomem *baseaddr,
+ unsigned int possible_crtcs);
#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.h b/drivers/gpu/drm/sti/sti_drm_crtc.h
deleted file mode 100644
index caca8b14f017..000000000000
--- a/drivers/gpu/drm/sti/sti_drm_crtc.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_DRM_CRTC_H_
-#define _STI_DRM_CRTC_H_
-
-#include <drm/drmP.h>
-
-struct sti_mixer;
-
-int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
- struct drm_plane *primary, struct drm_plane *cursor);
-int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
-void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
-int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
- unsigned long event, void *data);
-bool sti_drm_crtc_is_main(struct drm_crtc *drm_crtc);
-
-#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c
deleted file mode 100644
index 64d4ed43dda3..000000000000
--- a/drivers/gpu/drm/sti/sti_drm_plane.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
- * Fabien Dessenne <fabien.dessenne@st.com>
- * for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
-
-#include "sti_compositor.h"
-#include "sti_drm_drv.h"
-#include "sti_drm_plane.h"
-#include "sti_vtg.h"
-
-enum sti_layer_desc sti_layer_default_zorder[] = {
- STI_GDP_0,
- STI_VID_0,
- STI_GDP_1,
- STI_VID_1,
- STI_GDP_2,
- STI_GDP_3,
-};
-
-/* (Background) < GDP0 < VID0 < GDP1 < VID1 < GDP2 < GDP3 < (ForeGround) */
-
-static int
-sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
-{
- struct sti_layer *layer = to_sti_layer(plane);
- struct sti_mixer *mixer = to_sti_mixer(crtc);
- int res;
-
- DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
- crtc->base.id, sti_mixer_to_str(mixer),
- plane->base.id, sti_layer_to_str(layer));
- DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", crtc_w, crtc_h, crtc_x, crtc_y);
-
- res = sti_mixer_set_layer_depth(mixer, layer);
- if (res) {
- DRM_ERROR("Can not set layer depth\n");
- return res;
- }
-
- /* src_x are in 16.16 format. */
- res = sti_layer_prepare(layer, crtc, fb,
- &crtc->mode, mixer->id,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x >> 16, src_y >> 16,
- src_w >> 16, src_h >> 16);
- if (res) {
- DRM_ERROR("Layer prepare failed\n");
- return res;
- }
-
- res = sti_layer_commit(layer);
- if (res) {
- DRM_ERROR("Layer commit failed\n");
- return res;
- }
-
- res = sti_mixer_set_layer_status(mixer, layer, true);
- if (res) {
- DRM_ERROR("Can not enable layer at mixer\n");
- return res;
- }
-
- return 0;
-}
-
-static int sti_drm_disable_plane(struct drm_plane *plane)
-{
- struct sti_layer *layer;
- struct sti_mixer *mixer;
- int lay_res, mix_res;
-
- if (!plane->crtc) {
- DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", plane->base.id);
- return 0;
- }
- layer = to_sti_layer(plane);
- mixer = to_sti_mixer(plane->crtc);
-
- DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
- plane->crtc->base.id, sti_mixer_to_str(mixer),
- plane->base.id, sti_layer_to_str(layer));
-
- /* Disable layer at mixer level */
- mix_res = sti_mixer_set_layer_status(mixer, layer, false);
- if (mix_res)
- DRM_ERROR("Can not disable layer at mixer\n");
-
- /* Wait a while to be sure that a Vsync event is received */
- msleep(WAIT_NEXT_VSYNC_MS);
-
- /* Then disable layer itself */
- lay_res = sti_layer_disable(layer);
- if (lay_res)
- DRM_ERROR("Layer disable failed\n");
-
- if (lay_res || mix_res)
- return -EINVAL;
-
- return 0;
-}
-
-static void sti_drm_plane_destroy(struct drm_plane *plane)
-{
- DRM_DEBUG_DRIVER("\n");
-
- drm_plane_helper_disable(plane);
- drm_plane_cleanup(plane);
-}
-
-static int sti_drm_plane_set_property(struct drm_plane *plane,
- struct drm_property *property,
- uint64_t val)
-{
- struct drm_device *dev = plane->dev;
- struct sti_drm_private *private = dev->dev_private;
- struct sti_layer *layer = to_sti_layer(plane);
-
- DRM_DEBUG_DRIVER("\n");
-
- if (property == private->plane_zorder_property) {
- layer->zorder = val;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static struct drm_plane_funcs sti_drm_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = sti_drm_plane_destroy,
- .set_property = sti_drm_plane_set_property,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
-};
-
-static int sti_drm_plane_prepare_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb,
- const struct drm_plane_state *new_state)
-{
- return 0;
-}
-
-static void sti_drm_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_framebuffer *fb,
- const struct drm_plane_state *old_fb)
-{
-}
-
-static int sti_drm_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- return 0;
-}
-
-static void sti_drm_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *oldstate)
-{
- struct drm_plane_state *state = plane->state;
-
- sti_drm_update_plane(plane, state->crtc, state->fb,
- state->crtc_x, state->crtc_y,
- state->crtc_w, state->crtc_h,
- state->src_x, state->src_y,
- state->src_w, state->src_h);
-}
-
-static void sti_drm_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *oldstate)
-{
- sti_drm_disable_plane(plane);
-}
-
-static const struct drm_plane_helper_funcs sti_drm_plane_helpers_funcs = {
- .prepare_fb = sti_drm_plane_prepare_fb,
- .cleanup_fb = sti_drm_plane_cleanup_fb,
- .atomic_check = sti_drm_plane_atomic_check,
- .atomic_update = sti_drm_plane_atomic_update,
- .atomic_disable = sti_drm_plane_atomic_disable,
-};
-
-static void sti_drm_plane_attach_zorder_property(struct drm_plane *plane,
- uint64_t default_val)
-{
- struct drm_device *dev = plane->dev;
- struct sti_drm_private *private = dev->dev_private;
- struct drm_property *prop;
- struct sti_layer *layer = to_sti_layer(plane);
-
- prop = private->plane_zorder_property;
- if (!prop) {
- prop = drm_property_create_range(dev, 0, "zpos", 0,
- GAM_MIXER_NB_DEPTH_LEVEL - 1);
- if (!prop)
- return;
-
- private->plane_zorder_property = prop;
- }
-
- drm_object_attach_property(&plane->base, prop, default_val);
- layer->zorder = default_val;
-}
-
-struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
- struct sti_layer *layer,
- unsigned int possible_crtcs,
- enum drm_plane_type type)
-{
- int err, i;
- uint64_t default_zorder = 0;
-
- err = drm_universal_plane_init(dev, &layer->plane, possible_crtcs,
- &sti_drm_plane_funcs,
- sti_layer_get_formats(layer),
- sti_layer_get_nb_formats(layer), type);
- if (err) {
- DRM_ERROR("Failed to initialize plane\n");
- return NULL;
- }
-
- drm_plane_helper_add(&layer->plane, &sti_drm_plane_helpers_funcs);
-
- for (i = 0; i < ARRAY_SIZE(sti_layer_default_zorder); i++)
- if (sti_layer_default_zorder[i] == layer->desc)
- break;
-
- default_zorder = i + 1;
-
- if (type == DRM_PLANE_TYPE_OVERLAY)
- sti_drm_plane_attach_zorder_property(&layer->plane,
- default_zorder);
-
- DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%llu\n",
- layer->plane.base.id,
- sti_layer_to_str(layer), default_zorder);
-
- return &layer->plane;
-}
-EXPORT_SYMBOL(sti_drm_plane_init);
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.h b/drivers/gpu/drm/sti/sti_drm_plane.h
deleted file mode 100644
index 4f191839f2a7..000000000000
--- a/drivers/gpu/drm/sti/sti_drm_plane.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_DRM_PLANE_H_
-#define _STI_DRM_PLANE_H_
-
-#include <drm/drmP.h>
-
-struct sti_layer;
-
-struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
- struct sti_layer *layer,
- unsigned int possible_crtcs,
- enum drm_plane_type type);
-#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 59d558b400b3..6f4af6a8ba1b 100644
--- a/drivers/gpu/drm/sti/sti_drm_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -18,8 +18,8 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
-#include "sti_drm_drv.h"
-#include "sti_drm_crtc.h"
+#include "sti_crtc.h"
+#include "sti_drv.h"
#define DRIVER_NAME "sti"
#define DRIVER_DESC "STMicroelectronics SoC DRM"
@@ -30,15 +30,15 @@
#define STI_MAX_FB_HEIGHT 4096
#define STI_MAX_FB_WIDTH 4096
-static void sti_drm_atomic_schedule(struct sti_drm_private *private,
- struct drm_atomic_state *state)
+static void sti_atomic_schedule(struct sti_private *private,
+ struct drm_atomic_state *state)
{
private->commit.state = state;
schedule_work(&private->commit.work);
}
-static void sti_drm_atomic_complete(struct sti_drm_private *private,
- struct drm_atomic_state *state)
+static void sti_atomic_complete(struct sti_private *private,
+ struct drm_atomic_state *state)
{
struct drm_device *drm = private->drm_dev;
@@ -68,18 +68,18 @@ static void sti_drm_atomic_complete(struct sti_drm_private *private,
drm_atomic_state_free(state);
}
-static void sti_drm_atomic_work(struct work_struct *work)
+static void sti_atomic_work(struct work_struct *work)
{
- struct sti_drm_private *private = container_of(work,
- struct sti_drm_private, commit.work);
+ struct sti_private *private = container_of(work,
+ struct sti_private, commit.work);
- sti_drm_atomic_complete(private, private->commit.state);
+ sti_atomic_complete(private, private->commit.state);
}
-static int sti_drm_atomic_commit(struct drm_device *drm,
- struct drm_atomic_state *state, bool async)
+static int sti_atomic_commit(struct drm_device *drm,
+ struct drm_atomic_state *state, bool async)
{
- struct sti_drm_private *private = drm->dev_private;
+ struct sti_private *private = drm->dev_private;
int err;
err = drm_atomic_helper_prepare_planes(drm, state);
@@ -99,21 +99,21 @@ static int sti_drm_atomic_commit(struct drm_device *drm,
drm_atomic_helper_swap_state(drm, state);
if (async)
- sti_drm_atomic_schedule(private, state);
+ sti_atomic_schedule(private, state);
else
- sti_drm_atomic_complete(private, state);
+ sti_atomic_complete(private, state);
mutex_unlock(&private->commit.lock);
return 0;
}
-static struct drm_mode_config_funcs sti_drm_mode_config_funcs = {
+static struct drm_mode_config_funcs sti_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.atomic_check = drm_atomic_helper_check,
- .atomic_commit = sti_drm_atomic_commit,
+ .atomic_commit = sti_atomic_commit,
};
-static void sti_drm_mode_config_init(struct drm_device *dev)
+static void sti_mode_config_init(struct drm_device *dev)
{
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
@@ -126,15 +126,15 @@ static void sti_drm_mode_config_init(struct drm_device *dev)
dev->mode_config.max_width = STI_MAX_FB_HEIGHT;
dev->mode_config.max_height = STI_MAX_FB_WIDTH;
- dev->mode_config.funcs = &sti_drm_mode_config_funcs;
+ dev->mode_config.funcs = &sti_mode_config_funcs;
}
-static int sti_drm_load(struct drm_device *dev, unsigned long flags)
+static int sti_load(struct drm_device *dev, unsigned long flags)
{
- struct sti_drm_private *private;
+ struct sti_private *private;
int ret;
- private = kzalloc(sizeof(struct sti_drm_private), GFP_KERNEL);
+ private = kzalloc(sizeof(*private), GFP_KERNEL);
if (!private) {
DRM_ERROR("Failed to allocate private\n");
return -ENOMEM;
@@ -143,12 +143,12 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags)
private->drm_dev = dev;
mutex_init(&private->commit.lock);
- INIT_WORK(&private->commit.work, sti_drm_atomic_work);
+ INIT_WORK(&private->commit.work, sti_atomic_work);
drm_mode_config_init(dev);
drm_kms_helper_poll_init(dev);
- sti_drm_mode_config_init(dev);
+ sti_mode_config_init(dev);
ret = component_bind_all(dev->dev, dev);
if (ret) {
@@ -162,13 +162,13 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags)
#ifdef CONFIG_DRM_STI_FBDEV
drm_fbdev_cma_init(dev, 32,
- dev->mode_config.num_crtc,
- dev->mode_config.num_connector);
+ dev->mode_config.num_crtc,
+ dev->mode_config.num_connector);
#endif
return 0;
}
-static const struct file_operations sti_drm_driver_fops = {
+static const struct file_operations sti_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.mmap = drm_gem_cma_mmap,
@@ -181,33 +181,33 @@ static const struct file_operations sti_drm_driver_fops = {
.release = drm_release,
};
-static struct dma_buf *sti_drm_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj,
- int flags)
+static struct dma_buf *sti_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int flags)
{
/* we want to be able to write in mmapped buffer */
flags |= O_RDWR;
return drm_gem_prime_export(dev, obj, flags);
}
-static struct drm_driver sti_drm_driver = {
+static struct drm_driver sti_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
DRIVER_GEM | DRIVER_PRIME,
- .load = sti_drm_load,
+ .load = sti_load,
.gem_free_object = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
.dumb_destroy = drm_gem_dumb_destroy,
- .fops = &sti_drm_driver_fops,
+ .fops = &sti_driver_fops,
.get_vblank_counter = drm_vblank_count,
- .enable_vblank = sti_drm_crtc_enable_vblank,
- .disable_vblank = sti_drm_crtc_disable_vblank,
+ .enable_vblank = sti_crtc_enable_vblank,
+ .disable_vblank = sti_crtc_disable_vblank,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = sti_drm_gem_prime_export,
+ .gem_prime_export = sti_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
@@ -227,30 +227,32 @@ static int compare_of(struct device *dev, void *data)
return dev->of_node == data;
}
-static int sti_drm_bind(struct device *dev)
+static int sti_bind(struct device *dev)
{
- return drm_platform_init(&sti_drm_driver, to_platform_device(dev));
+ return drm_platform_init(&sti_driver, to_platform_device(dev));
}
-static void sti_drm_unbind(struct device *dev)
+static void sti_unbind(struct device *dev)
{
drm_put_dev(dev_get_drvdata(dev));
}
-static const struct component_master_ops sti_drm_ops = {
- .bind = sti_drm_bind,
- .unbind = sti_drm_unbind,
+static const struct component_master_ops sti_ops = {
+ .bind = sti_bind,
+ .unbind = sti_unbind,
};
-static int sti_drm_master_probe(struct platform_device *pdev)
+static int sti_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *node = dev->parent->of_node;
+ struct device_node *node = dev->of_node;
struct device_node *child_np;
struct component_match *match = NULL;
dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ of_platform_populate(node, NULL, NULL, dev);
+
child_np = of_get_next_available_child(node, NULL);
while (child_np) {
@@ -259,68 +261,33 @@ static int sti_drm_master_probe(struct platform_device *pdev)
child_np = of_get_next_available_child(node, child_np);
}
- return component_master_add_with_match(dev, &sti_drm_ops, match);
-}
-
-static int sti_drm_master_remove(struct platform_device *pdev)
-{
- component_master_del(&pdev->dev, &sti_drm_ops);
- return 0;
+ return component_master_add_with_match(dev, &sti_ops, match);
}
-static struct platform_driver sti_drm_master_driver = {
- .probe = sti_drm_master_probe,
- .remove = sti_drm_master_remove,
- .driver = {
- .name = DRIVER_NAME "__master",
- },
-};
-
-static int sti_drm_platform_probe(struct platform_device *pdev)
+static int sti_platform_remove(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
- struct platform_device *master;
-
- of_platform_populate(node, NULL, NULL, dev);
-
- platform_driver_register(&sti_drm_master_driver);
- master = platform_device_register_resndata(dev,
- DRIVER_NAME "__master", -1,
- NULL, 0, NULL, 0);
- if (IS_ERR(master))
- return PTR_ERR(master);
-
- platform_set_drvdata(pdev, master);
- return 0;
-}
-
-static int sti_drm_platform_remove(struct platform_device *pdev)
-{
- struct platform_device *master = platform_get_drvdata(pdev);
-
+ component_master_del(&pdev->dev, &sti_ops);
of_platform_depopulate(&pdev->dev);
- platform_device_unregister(master);
- platform_driver_unregister(&sti_drm_master_driver);
+
return 0;
}
-static const struct of_device_id sti_drm_dt_ids[] = {
+static const struct of_device_id sti_dt_ids[] = {
{ .compatible = "st,sti-display-subsystem", },
{ /* end node */ },
};
-MODULE_DEVICE_TABLE(of, sti_drm_dt_ids);
+MODULE_DEVICE_TABLE(of, sti_dt_ids);
-static struct platform_driver sti_drm_platform_driver = {
- .probe = sti_drm_platform_probe,
- .remove = sti_drm_platform_remove,
+static struct platform_driver sti_platform_driver = {
+ .probe = sti_platform_probe,
+ .remove = sti_platform_remove,
.driver = {
.name = DRIVER_NAME,
- .of_match_table = sti_drm_dt_ids,
+ .of_match_table = sti_dt_ids,
},
};
-module_platform_driver(sti_drm_platform_driver);
+module_platform_driver(sti_platform_driver);
MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.h b/drivers/gpu/drm/sti/sti_drv.h
index c413aa3ff402..9372f69e1859 100644
--- a/drivers/gpu/drm/sti/sti_drm_drv.h
+++ b/drivers/gpu/drm/sti/sti_drv.h
@@ -4,8 +4,8 @@
* License terms: GNU General Public License (GPL), version 2
*/
-#ifndef _STI_DRM_DRV_H_
-#define _STI_DRM_DRV_H_
+#ifndef _STI_DRV_H_
+#define _STI_DRV_H_
#include <drm/drmP.h>
@@ -20,7 +20,7 @@ struct sti_tvout;
* @plane_zorder_property: z-order property for CRTC planes
* @drm_dev: drm device
*/
-struct sti_drm_private {
+struct sti_private {
struct sti_compositor *compo;
struct drm_property *plane_zorder_property;
struct drm_device *drm_dev;
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 087906fd8846..9365670427ad 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -9,9 +9,12 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
#include "sti_compositor.h"
#include "sti_gdp.h"
-#include "sti_layer.h"
+#include "sti_plane.h"
#include "sti_vtg.h"
#define ALPHASWITCH BIT(6)
@@ -26,7 +29,7 @@
#define GDP_XBGR8888 (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH)
#define GDP_ARGB8565 0x04
#define GDP_ARGB8888 0x05
-#define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
+#define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
#define GDP_ARGB1555 0x06
#define GDP_ARGB4444 0x07
#define GDP_CLUT8 0x0B
@@ -53,8 +56,8 @@
#define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0))
#define GAM_GDP_SIZE_MAX 0x7FF
-#define GDP_NODE_NB_BANK 2
-#define GDP_NODE_PER_FIELD 2
+#define GDP_NODE_NB_BANK 2
+#define GDP_NODE_PER_FIELD 2
struct sti_gdp_node {
u32 gam_gdp_ctl;
@@ -85,16 +88,20 @@ struct sti_gdp_node_list {
/**
* STI GDP structure
*
- * @layer: layer structure
+ * @sti_plane: sti_plane structure
+ * @dev: driver device
+ * @regs: gdp registers
* @clk_pix: pixel clock for the current gdp
* @clk_main_parent: gdp parent clock if main path used
* @clk_aux_parent: gdp parent clock if aux path used
* @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
* @is_curr_top: true if the current node processed is the top field
- * @node_list: array of node list
+ * @node_list: array of node list
*/
struct sti_gdp {
- struct sti_layer layer;
+ struct sti_plane plane;
+ struct device *dev;
+ void __iomem *regs;
struct clk *clk_pix;
struct clk *clk_main_parent;
struct clk *clk_aux_parent;
@@ -103,7 +110,7 @@ struct sti_gdp {
struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
};
-#define to_sti_gdp(x) container_of(x, struct sti_gdp, layer)
+#define to_sti_gdp(x) container_of(x, struct sti_gdp, plane)
static const uint32_t gdp_supported_formats[] = {
DRM_FORMAT_XRGB8888,
@@ -120,16 +127,6 @@ static const uint32_t gdp_supported_formats[] = {
DRM_FORMAT_C8,
};
-static const uint32_t *sti_gdp_get_formats(struct sti_layer *layer)
-{
- return gdp_supported_formats;
-}
-
-static unsigned int sti_gdp_get_nb_formats(struct sti_layer *layer)
-{
- return ARRAY_SIZE(gdp_supported_formats);
-}
-
static int sti_gdp_fourcc2format(int fourcc)
{
switch (fourcc) {
@@ -175,20 +172,19 @@ static int sti_gdp_get_alpharange(int format)
/**
* sti_gdp_get_free_nodes
- * @layer: gdp layer
+ * @gdp: gdp pointer
*
* Look for a GDP node list that is not currently read by the HW.
*
* RETURNS:
* Pointer to the free GDP node list
*/
-static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
+static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp)
{
int hw_nvn;
- struct sti_gdp *gdp = to_sti_gdp(layer);
unsigned int i;
- hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
+ hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
if (!hw_nvn)
goto end;
@@ -199,7 +195,7 @@ static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
/* in hazardious cases restart with the first node */
DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
- sti_layer_to_str(layer), hw_nvn);
+ sti_plane_to_str(&gdp->plane), hw_nvn);
end:
return &gdp->node_list[0];
@@ -207,7 +203,7 @@ end:
/**
* sti_gdp_get_current_nodes
- * @layer: GDP layer
+ * @gdp: gdp pointer
*
* Look for GDP nodes that are currently read by the HW.
*
@@ -215,13 +211,12 @@ end:
* Pointer to the current GDP node list
*/
static
-struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
+struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_gdp *gdp)
{
int hw_nvn;
- struct sti_gdp *gdp = to_sti_gdp(layer);
unsigned int i;
- hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
+ hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
if (!hw_nvn)
goto end;
@@ -232,205 +227,25 @@ struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
end:
DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
- hw_nvn, sti_layer_to_str(layer));
+ hw_nvn, sti_plane_to_str(&gdp->plane));
return NULL;
}
/**
- * sti_gdp_prepare_layer
- * @lay: gdp layer
- * @first_prepare: true if it is the first time this function is called
- *
- * Update the free GDP node list according to the layer properties.
- *
- * RETURNS:
- * 0 on success.
- */
-static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
-{
- struct sti_gdp_node_list *list;
- struct sti_gdp_node *top_field, *btm_field;
- struct drm_display_mode *mode = layer->mode;
- struct device *dev = layer->dev;
- struct sti_gdp *gdp = to_sti_gdp(layer);
- struct sti_compositor *compo = dev_get_drvdata(dev);
- int format;
- unsigned int depth, bpp;
- int rate = mode->clock * 1000;
- int res;
- u32 ydo, xdo, yds, xds;
-
- list = sti_gdp_get_free_nodes(layer);
- top_field = list->top_field;
- btm_field = list->btm_field;
-
- dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
- sti_layer_to_str(layer), top_field, btm_field);
-
- /* Build the top field from layer params */
- top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
- top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
- format = sti_gdp_fourcc2format(layer->format);
- if (format == -1) {
- DRM_ERROR("Format not supported by GDP %.4s\n",
- (char *)&layer->format);
- return 1;
- }
- top_field->gam_gdp_ctl |= format;
- top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
- top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
-
- /* pixel memory location */
- drm_fb_get_bpp_depth(layer->format, &depth, &bpp);
- top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0];
- top_field->gam_gdp_pml += layer->src_x * (bpp >> 3);
- top_field->gam_gdp_pml += layer->src_y * layer->pitches[0];
-
- /* input parameters */
- top_field->gam_gdp_pmp = layer->pitches[0];
- top_field->gam_gdp_size =
- clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
- clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX);
-
- /* output parameters */
- ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
- yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1);
- xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x);
- xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1);
- top_field->gam_gdp_vpo = (ydo << 16) | xdo;
- top_field->gam_gdp_vps = (yds << 16) | xds;
-
- /* Same content and chained together */
- memcpy(btm_field, top_field, sizeof(*btm_field));
- top_field->gam_gdp_nvn = list->btm_field_paddr;
- btm_field->gam_gdp_nvn = list->top_field_paddr;
-
- /* Interlaced mode */
- if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
- btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
- layer->pitches[0];
-
- if (first_prepare) {
- /* Register gdp callback */
- if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ?
- compo->vtg_main : compo->vtg_aux,
- &gdp->vtg_field_nb, layer->mixer_id)) {
- DRM_ERROR("Cannot register VTG notifier\n");
- return 1;
- }
-
- /* Set and enable gdp clock */
- if (gdp->clk_pix) {
- struct clk *clkp;
- /* According to the mixer used, the gdp pixel clock
- * should have a different parent clock. */
- if (layer->mixer_id == STI_MIXER_MAIN)
- clkp = gdp->clk_main_parent;
- else
- clkp = gdp->clk_aux_parent;
-
- if (clkp)
- clk_set_parent(gdp->clk_pix, clkp);
-
- res = clk_set_rate(gdp->clk_pix, rate);
- if (res < 0) {
- DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
- rate);
- return 1;
- }
-
- if (clk_prepare_enable(gdp->clk_pix)) {
- DRM_ERROR("Failed to prepare/enable gdp\n");
- return 1;
- }
- }
- }
-
- return 0;
-}
-
-/**
- * sti_gdp_commit_layer
- * @lay: gdp layer
- *
- * Update the NVN field of the 'right' field of the current GDP node (being
- * used by the HW) with the address of the updated ('free') top field GDP node.
- * - In interlaced mode the 'right' field is the bottom field as we update
- * frames starting from their top field
- * - In progressive mode, we update both bottom and top fields which are
- * equal nodes.
- * At the next VSYNC, the updated node list will be used by the HW.
- *
- * RETURNS:
- * 0 on success.
- */
-static int sti_gdp_commit_layer(struct sti_layer *layer)
-{
- struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer);
- struct sti_gdp_node *updated_top_node = updated_list->top_field;
- struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
- struct sti_gdp *gdp = to_sti_gdp(layer);
- u32 dma_updated_top = updated_list->top_field_paddr;
- u32 dma_updated_btm = updated_list->btm_field_paddr;
- struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
-
- dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
- sti_layer_to_str(layer),
- updated_top_node, updated_btm_node);
- dev_dbg(layer->dev, "Current NVN:0x%X\n",
- readl(layer->regs + GAM_GDP_NVN_OFFSET));
- dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n",
- (unsigned long)layer->paddr,
- readl(layer->regs + GAM_GDP_PML_OFFSET));
-
- if (curr_list == NULL) {
- /* First update or invalid node should directly write in the
- * hw register */
- DRM_DEBUG_DRIVER("%s first update (or invalid node)",
- sti_layer_to_str(layer));
-
- writel(gdp->is_curr_top == true ?
- dma_updated_btm : dma_updated_top,
- layer->regs + GAM_GDP_NVN_OFFSET);
- return 0;
- }
-
- if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) {
- if (gdp->is_curr_top == true) {
- /* Do not update in the middle of the frame, but
- * postpone the update after the bottom field has
- * been displayed */
- curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
- } else {
- /* Direct update to avoid one frame delay */
- writel(dma_updated_top,
- layer->regs + GAM_GDP_NVN_OFFSET);
- }
- } else {
- /* Direct update for progressive to avoid one frame delay */
- writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET);
- }
-
- return 0;
-}
-
-/**
- * sti_gdp_disable_layer
- * @lay: gdp layer
+ * sti_gdp_disable
+ * @gdp: gdp pointer
*
* Disable a GDP.
- *
- * RETURNS:
- * 0 on success.
*/
-static int sti_gdp_disable_layer(struct sti_layer *layer)
+static void sti_gdp_disable(struct sti_gdp *gdp)
{
+ struct drm_plane *drm_plane = &gdp->plane.drm_plane;
+ struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
+ struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
unsigned int i;
- struct sti_gdp *gdp = to_sti_gdp(layer);
- struct sti_compositor *compo = dev_get_drvdata(layer->dev);
- DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
+ DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane));
/* Set the nodes as 'to be ignored on mixer' */
for (i = 0; i < GDP_NODE_NB_BANK; i++) {
@@ -438,14 +253,14 @@ static int sti_gdp_disable_layer(struct sti_layer *layer)
gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
}
- if (sti_vtg_unregister_client(layer->mixer_id == STI_MIXER_MAIN ?
+ if (sti_vtg_unregister_client(mixer->id == STI_MIXER_MAIN ?
compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
if (gdp->clk_pix)
clk_disable_unprepare(gdp->clk_pix);
- return 0;
+ gdp->plane.status = STI_PLANE_DISABLED;
}
/**
@@ -464,6 +279,14 @@ int sti_gdp_field_cb(struct notifier_block *nb,
{
struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
+ if (gdp->plane.status == STI_PLANE_FLUSHING) {
+ /* disable need to be synchronize on vsync event */
+ DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
+ sti_plane_to_str(&gdp->plane));
+
+ sti_gdp_disable(gdp);
+ }
+
switch (event) {
case VTG_TOP_FIELD_EVENT:
gdp->is_curr_top = true;
@@ -479,10 +302,9 @@ int sti_gdp_field_cb(struct notifier_block *nb,
return 0;
}
-static void sti_gdp_init(struct sti_layer *layer)
+static void sti_gdp_init(struct sti_gdp *gdp)
{
- struct sti_gdp *gdp = to_sti_gdp(layer);
- struct device_node *np = layer->dev->of_node;
+ struct device_node *np = gdp->dev->of_node;
dma_addr_t dma_addr;
void *base;
unsigned int i, size;
@@ -490,8 +312,8 @@ static void sti_gdp_init(struct sti_layer *layer)
/* Allocate all the nodes within a single memory page */
size = sizeof(struct sti_gdp_node) *
GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
- base = dma_alloc_writecombine(layer->dev,
- size, &dma_addr, GFP_KERNEL | GFP_DMA);
+ base = dma_alloc_writecombine(gdp->dev,
+ size, &dma_addr, GFP_KERNEL | GFP_DMA);
if (!base) {
DRM_ERROR("Failed to allocate memory for GDP node\n");
@@ -526,7 +348,7 @@ static void sti_gdp_init(struct sti_layer *layer)
/* GDP of STiH407 chip have its own pixel clock */
char *clk_name;
- switch (layer->desc) {
+ switch (gdp->plane.desc) {
case STI_GDP_0:
clk_name = "pix_gdp1";
break;
@@ -544,32 +366,249 @@ static void sti_gdp_init(struct sti_layer *layer)
return;
}
- gdp->clk_pix = devm_clk_get(layer->dev, clk_name);
+ gdp->clk_pix = devm_clk_get(gdp->dev, clk_name);
if (IS_ERR(gdp->clk_pix))
DRM_ERROR("Cannot get %s clock\n", clk_name);
- gdp->clk_main_parent = devm_clk_get(layer->dev, "main_parent");
+ gdp->clk_main_parent = devm_clk_get(gdp->dev, "main_parent");
if (IS_ERR(gdp->clk_main_parent))
DRM_ERROR("Cannot get main_parent clock\n");
- gdp->clk_aux_parent = devm_clk_get(layer->dev, "aux_parent");
+ gdp->clk_aux_parent = devm_clk_get(gdp->dev, "aux_parent");
if (IS_ERR(gdp->clk_aux_parent))
DRM_ERROR("Cannot get aux_parent clock\n");
}
}
-static const struct sti_layer_funcs gdp_ops = {
- .get_formats = sti_gdp_get_formats,
- .get_nb_formats = sti_gdp_get_nb_formats,
- .init = sti_gdp_init,
- .prepare = sti_gdp_prepare_layer,
- .commit = sti_gdp_commit_layer,
- .disable = sti_gdp_disable_layer,
+static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
+ struct drm_plane_state *oldstate)
+{
+ struct drm_plane_state *state = drm_plane->state;
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_gdp *gdp = to_sti_gdp(plane);
+ struct drm_crtc *crtc = state->crtc;
+ struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
+ struct drm_framebuffer *fb = state->fb;
+ bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+ struct sti_mixer *mixer;
+ struct drm_display_mode *mode;
+ int dst_x, dst_y, dst_w, dst_h;
+ int src_x, src_y, src_w, src_h;
+ struct drm_gem_cma_object *cma_obj;
+ struct sti_gdp_node_list *list;
+ struct sti_gdp_node_list *curr_list;
+ struct sti_gdp_node *top_field, *btm_field;
+ u32 dma_updated_top;
+ u32 dma_updated_btm;
+ int format;
+ unsigned int depth, bpp;
+ u32 ydo, xdo, yds, xds;
+ int res;
+
+ /* Manage the case where crtc is null (disabled) */
+ if (!crtc)
+ return;
+
+ mixer = to_sti_mixer(crtc);
+ mode = &crtc->mode;
+ dst_x = state->crtc_x;
+ dst_y = state->crtc_y;
+ dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+ dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ /* src_x are in 16.16 format */
+ src_x = state->src_x >> 16;
+ src_y = state->src_y >> 16;
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
+
+ DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+ crtc->base.id, sti_mixer_to_str(mixer),
+ drm_plane->base.id, sti_plane_to_str(plane));
+ DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
+ sti_plane_to_str(plane),
+ dst_w, dst_h, dst_x, dst_y,
+ src_w, src_h, src_x, src_y);
+
+ list = sti_gdp_get_free_nodes(gdp);
+ top_field = list->top_field;
+ btm_field = list->btm_field;
+
+ dev_dbg(gdp->dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
+ sti_plane_to_str(plane), top_field, btm_field);
+
+ /* build the top field */
+ top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
+ top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
+ format = sti_gdp_fourcc2format(fb->pixel_format);
+ if (format == -1) {
+ DRM_ERROR("Format not supported by GDP %.4s\n",
+ (char *)&fb->pixel_format);
+ return;
+ }
+ top_field->gam_gdp_ctl |= format;
+ top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
+ top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
+
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ if (!cma_obj) {
+ DRM_ERROR("Can't get CMA GEM object for fb\n");
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
+ (char *)&fb->pixel_format,
+ (unsigned long)cma_obj->paddr);
+
+ /* pixel memory location */
+ drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
+ top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
+ top_field->gam_gdp_pml += src_x * (bpp >> 3);
+ top_field->gam_gdp_pml += src_y * fb->pitches[0];
+
+ /* input parameters */
+ top_field->gam_gdp_pmp = fb->pitches[0];
+ top_field->gam_gdp_size = clamp_val(src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
+ clamp_val(src_w, 0, GAM_GDP_SIZE_MAX);
+
+ /* output parameters */
+ ydo = sti_vtg_get_line_number(*mode, dst_y);
+ yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
+ xdo = sti_vtg_get_pixel_number(*mode, dst_x);
+ xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
+ top_field->gam_gdp_vpo = (ydo << 16) | xdo;
+ top_field->gam_gdp_vps = (yds << 16) | xds;
+
+ /* Same content and chained together */
+ memcpy(btm_field, top_field, sizeof(*btm_field));
+ top_field->gam_gdp_nvn = list->btm_field_paddr;
+ btm_field->gam_gdp_nvn = list->top_field_paddr;
+
+ /* Interlaced mode */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
+ fb->pitches[0];
+
+ if (first_prepare) {
+ /* Register gdp callback */
+ if (sti_vtg_register_client(mixer->id == STI_MIXER_MAIN ?
+ compo->vtg_main : compo->vtg_aux,
+ &gdp->vtg_field_nb, mixer->id)) {
+ DRM_ERROR("Cannot register VTG notifier\n");
+ return;
+ }
+
+ /* Set and enable gdp clock */
+ if (gdp->clk_pix) {
+ struct clk *clkp;
+ int rate = mode->clock * 1000;
+
+ /* According to the mixer used, the gdp pixel clock
+ * should have a different parent clock. */
+ if (mixer->id == STI_MIXER_MAIN)
+ clkp = gdp->clk_main_parent;
+ else
+ clkp = gdp->clk_aux_parent;
+
+ if (clkp)
+ clk_set_parent(gdp->clk_pix, clkp);
+
+ res = clk_set_rate(gdp->clk_pix, rate);
+ if (res < 0) {
+ DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
+ rate);
+ return;
+ }
+
+ if (clk_prepare_enable(gdp->clk_pix)) {
+ DRM_ERROR("Failed to prepare/enable gdp\n");
+ return;
+ }
+ }
+ }
+
+ /* Update the NVN field of the 'right' field of the current GDP node
+ * (being used by the HW) with the address of the updated ('free') top
+ * field GDP node.
+ * - In interlaced mode the 'right' field is the bottom field as we
+ * update frames starting from their top field
+ * - In progressive mode, we update both bottom and top fields which
+ * are equal nodes.
+ * At the next VSYNC, the updated node list will be used by the HW.
+ */
+ curr_list = sti_gdp_get_current_nodes(gdp);
+ dma_updated_top = list->top_field_paddr;
+ dma_updated_btm = list->btm_field_paddr;
+
+ dev_dbg(gdp->dev, "Current NVN:0x%X\n",
+ readl(gdp->regs + GAM_GDP_NVN_OFFSET));
+ dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n",
+ (unsigned long)cma_obj->paddr,
+ readl(gdp->regs + GAM_GDP_PML_OFFSET));
+
+ if (!curr_list) {
+ /* First update or invalid node should directly write in the
+ * hw register */
+ DRM_DEBUG_DRIVER("%s first update (or invalid node)",
+ sti_plane_to_str(plane));
+
+ writel(gdp->is_curr_top ?
+ dma_updated_btm : dma_updated_top,
+ gdp->regs + GAM_GDP_NVN_OFFSET);
+ goto end;
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (gdp->is_curr_top) {
+ /* Do not update in the middle of the frame, but
+ * postpone the update after the bottom field has
+ * been displayed */
+ curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
+ } else {
+ /* Direct update to avoid one frame delay */
+ writel(dma_updated_top,
+ gdp->regs + GAM_GDP_NVN_OFFSET);
+ }
+ } else {
+ /* Direct update for progressive to avoid one frame delay */
+ writel(dma_updated_top, gdp->regs + GAM_GDP_NVN_OFFSET);
+ }
+
+end:
+ plane->status = STI_PLANE_UPDATED;
+}
+
+static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
+ struct drm_plane_state *oldstate)
+{
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
+
+ if (!drm_plane->crtc) {
+ DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
+ drm_plane->base.id);
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
+ drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+ drm_plane->base.id, sti_plane_to_str(plane));
+
+ plane->status = STI_PLANE_DISABLING;
+}
+
+static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
+ .atomic_update = sti_gdp_atomic_update,
+ .atomic_disable = sti_gdp_atomic_disable,
};
-struct sti_layer *sti_gdp_create(struct device *dev, int id)
+struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
+ struct device *dev, int desc,
+ void __iomem *baseaddr,
+ unsigned int possible_crtcs,
+ enum drm_plane_type type)
{
struct sti_gdp *gdp;
+ int res;
gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
if (!gdp) {
@@ -577,8 +616,33 @@ struct sti_layer *sti_gdp_create(struct device *dev, int id)
return NULL;
}
- gdp->layer.ops = &gdp_ops;
+ gdp->dev = dev;
+ gdp->regs = baseaddr;
+ gdp->plane.desc = desc;
+ gdp->plane.status = STI_PLANE_DISABLED;
+
gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
- return (struct sti_layer *)gdp;
+ sti_gdp_init(gdp);
+
+ res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane,
+ possible_crtcs,
+ &sti_plane_helpers_funcs,
+ gdp_supported_formats,
+ ARRAY_SIZE(gdp_supported_formats),
+ type);
+ if (res) {
+ DRM_ERROR("Failed to initialize universal plane\n");
+ goto err;
+ }
+
+ drm_plane_helper_add(&gdp->plane.drm_plane, &sti_gdp_helpers_funcs);
+
+ sti_plane_init_property(&gdp->plane, type);
+
+ return &gdp->plane.drm_plane;
+
+err:
+ devm_kfree(dev, gdp);
+ return NULL;
}
diff --git a/drivers/gpu/drm/sti/sti_gdp.h b/drivers/gpu/drm/sti/sti_gdp.h
index 1dab68274ad3..73947a4a8004 100644
--- a/drivers/gpu/drm/sti/sti_gdp.h
+++ b/drivers/gpu/drm/sti/sti_gdp.h
@@ -11,6 +11,9 @@
#include <linux/types.h>
-struct sti_layer *sti_gdp_create(struct device *dev, int id);
-
+struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
+ struct device *dev, int desc,
+ void __iomem *baseaddr,
+ unsigned int possible_crtcs,
+ enum drm_plane_type type);
#endif
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index f28a4d54487c..09e29e43423e 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -588,7 +588,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
return count;
fail:
- DRM_ERROR("Can not read HDMI EDID\n");
+ DRM_ERROR("Can't read HDMI EDID\n");
return 0;
}
@@ -693,21 +693,8 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
struct sti_hdmi_connector *connector;
struct drm_connector *drm_connector;
struct drm_bridge *bridge;
- struct device_node *ddc;
int err;
- ddc = of_parse_phandle(dev->of_node, "ddc", 0);
- if (ddc) {
- hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc);
- if (!hdmi->ddc_adapt) {
- err = -EPROBE_DEFER;
- of_node_put(ddc);
- return err;
- }
-
- of_node_put(ddc);
- }
-
/* Set the drm device handle */
hdmi->drm_dev = drm_dev;
@@ -796,6 +783,7 @@ static int sti_hdmi_probe(struct platform_device *pdev)
struct sti_hdmi *hdmi;
struct device_node *np = dev->of_node;
struct resource *res;
+ struct device_node *ddc;
int ret;
DRM_INFO("%s\n", __func__);
@@ -804,6 +792,17 @@ static int sti_hdmi_probe(struct platform_device *pdev)
if (!hdmi)
return -ENOMEM;
+ ddc = of_parse_phandle(pdev->dev.of_node, "ddc", 0);
+ if (ddc) {
+ hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc);
+ if (!hdmi->ddc_adapt) {
+ of_node_put(ddc);
+ return -EPROBE_DEFER;
+ }
+
+ of_node_put(ddc);
+ }
+
hdmi->dev = pdev->dev;
/* Get resources */
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index b0eb62de1b2e..7c8f9b8bfae1 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -12,11 +12,12 @@
#include <linux/reset.h>
#include <drm/drmP.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
-#include "sti_drm_plane.h"
-#include "sti_hqvdp.h"
+#include "sti_compositor.h"
#include "sti_hqvdp_lut.h"
-#include "sti_layer.h"
+#include "sti_plane.h"
#include "sti_vtg.h"
/* Firmware name */
@@ -322,8 +323,7 @@ struct sti_hqvdp_cmd {
* @dev: driver device
* @drm_dev: the drm device
* @regs: registers
- * @layer: layer structure for hqvdp it self
- * @vid_plane: VID plug used as link with compositor IP
+ * @plane: plane structure for hqvdp it self
* @clk: IP clock
* @clk_pix_main: pix main clock
* @reset: reset control
@@ -334,13 +334,13 @@ struct sti_hqvdp_cmd {
* @hqvdp_cmd: buffer of commands
* @hqvdp_cmd_paddr: physical address of hqvdp_cmd
* @vtg: vtg for main data path
+ * @xp70_initialized: true if xp70 is already initialized
*/
struct sti_hqvdp {
struct device *dev;
struct drm_device *drm_dev;
void __iomem *regs;
- struct sti_layer layer;
- struct drm_plane *vid_plane;
+ struct sti_plane plane;
struct clk *clk;
struct clk *clk_pix_main;
struct reset_control *reset;
@@ -351,24 +351,15 @@ struct sti_hqvdp {
void *hqvdp_cmd;
dma_addr_t hqvdp_cmd_paddr;
struct sti_vtg *vtg;
+ bool xp70_initialized;
};
-#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, layer)
+#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, plane)
static const uint32_t hqvdp_supported_formats[] = {
DRM_FORMAT_NV12,
};
-static const uint32_t *sti_hqvdp_get_formats(struct sti_layer *layer)
-{
- return hqvdp_supported_formats;
-}
-
-static unsigned int sti_hqvdp_get_nb_formats(struct sti_layer *layer)
-{
- return ARRAY_SIZE(hqvdp_supported_formats);
-}
-
/**
* sti_hqvdp_get_free_cmd
* @hqvdp: hqvdp structure
@@ -484,7 +475,12 @@ static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
/**
* sti_hqvdp_check_hw_scaling
- * @layer: hqvdp layer
+ * @hqvdp: hqvdp pointer
+ * @mode: display mode with timing constraints
+ * @src_w: source width
+ * @src_h: source height
+ * @dst_w: destination width
+ * @dst_h: destination height
*
* Check if the HW is able to perform the scaling request
* The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where:
@@ -498,184 +494,36 @@ static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
* RETURNS:
* True if the HW can scale.
*/
-static bool sti_hqvdp_check_hw_scaling(struct sti_layer *layer)
+static bool sti_hqvdp_check_hw_scaling(struct sti_hqvdp *hqvdp,
+ struct drm_display_mode *mode,
+ int src_w, int src_h,
+ int dst_w, int dst_h)
{
- struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
unsigned long lfw;
unsigned int inv_zy;
- lfw = layer->mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
- lfw /= max(layer->src_w, layer->dst_w) * layer->mode->clock / 1000;
+ lfw = mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
+ lfw /= max(src_w, dst_w) * mode->clock / 1000;
- inv_zy = DIV_ROUND_UP(layer->src_h, layer->dst_h);
+ inv_zy = DIV_ROUND_UP(src_h, dst_h);
return (inv_zy <= lfw) ? true : false;
}
/**
- * sti_hqvdp_prepare_layer
- * @layer: hqvdp layer
- * @first_prepare: true if it is the first time this function is called
+ * sti_hqvdp_disable
+ * @hqvdp: hqvdp pointer
*
- * Prepares a command for the firmware
- *
- * RETURNS:
- * 0 on success.
+ * Disables the HQVDP plane
*/
-static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
-{
- struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
- struct sti_hqvdp_cmd *cmd;
- int scale_h, scale_v;
- int cmd_offset;
-
- dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
-
- /* prepare and commit VID plane */
- hqvdp->vid_plane->funcs->update_plane(hqvdp->vid_plane,
- layer->crtc, layer->fb,
- layer->dst_x, layer->dst_y,
- layer->dst_w, layer->dst_h,
- layer->src_x, layer->src_y,
- layer->src_w, layer->src_h);
-
- cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
- if (cmd_offset == -1) {
- DRM_ERROR("No available hqvdp_cmd now\n");
- return -EBUSY;
- }
- cmd = hqvdp->hqvdp_cmd + cmd_offset;
-
- if (!sti_hqvdp_check_hw_scaling(layer)) {
- DRM_ERROR("Scaling beyond HW capabilities\n");
- return -EINVAL;
- }
-
- /* Static parameters, defaulting to progressive mode */
- cmd->top.config = TOP_CONFIG_PROGRESSIVE;
- cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
- cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
- cmd->csdi.config = CSDI_CONFIG_PROG;
-
- /* VC1RE, FMD bypassed : keep everything set to 0
- * IQI/P2I bypassed */
- cmd->iqi.config = IQI_CONFIG_DFLT;
- cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
- cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
- cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
-
- /* Buffer planes address */
- cmd->top.current_luma = (u32) layer->paddr + layer->offsets[0];
- cmd->top.current_chroma = (u32) layer->paddr + layer->offsets[1];
-
- /* Pitches */
- cmd->top.luma_processed_pitch = cmd->top.luma_src_pitch =
- layer->pitches[0];
- cmd->top.chroma_processed_pitch = cmd->top.chroma_src_pitch =
- layer->pitches[1];
-
- /* Input / output size
- * Align to upper even value */
- layer->dst_w = ALIGN(layer->dst_w, 2);
- layer->dst_h = ALIGN(layer->dst_h, 2);
-
- if ((layer->src_w > MAX_WIDTH) || (layer->src_w < MIN_WIDTH) ||
- (layer->src_h > MAX_HEIGHT) || (layer->src_h < MIN_HEIGHT) ||
- (layer->dst_w > MAX_WIDTH) || (layer->dst_w < MIN_WIDTH) ||
- (layer->dst_h > MAX_HEIGHT) || (layer->dst_h < MIN_HEIGHT)) {
- DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
- layer->src_w, layer->src_h,
- layer->dst_w, layer->dst_h);
- return -EINVAL;
- }
- cmd->top.input_viewport_size = cmd->top.input_frame_size =
- layer->src_h << 16 | layer->src_w;
- cmd->hvsrc.output_picture_size = layer->dst_h << 16 | layer->dst_w;
- cmd->top.input_viewport_ori = layer->src_y << 16 | layer->src_x;
-
- /* Handle interlaced */
- if (layer->fb->flags & DRM_MODE_FB_INTERLACED) {
- /* Top field to display */
- cmd->top.config = TOP_CONFIG_INTER_TOP;
-
- /* Update pitches and vert size */
- cmd->top.input_frame_size = (layer->src_h / 2) << 16 |
- layer->src_w;
- cmd->top.luma_processed_pitch *= 2;
- cmd->top.luma_src_pitch *= 2;
- cmd->top.chroma_processed_pitch *= 2;
- cmd->top.chroma_src_pitch *= 2;
-
- /* Enable directional deinterlacing processing */
- cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
- cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
- cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
- }
-
- /* Update hvsrc lut coef */
- scale_h = SCALE_FACTOR * layer->dst_w / layer->src_w;
- sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
-
- scale_v = SCALE_FACTOR * layer->dst_h / layer->src_h;
- sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
-
- if (first_prepare) {
- /* Prevent VTG shutdown */
- if (clk_prepare_enable(hqvdp->clk_pix_main)) {
- DRM_ERROR("Failed to prepare/enable pix main clk\n");
- return -ENXIO;
- }
-
- /* Register VTG Vsync callback to handle bottom fields */
- if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
- sti_vtg_register_client(hqvdp->vtg,
- &hqvdp->vtg_nb, layer->mixer_id)) {
- DRM_ERROR("Cannot register VTG notifier\n");
- return -ENXIO;
- }
- }
-
- return 0;
-}
-
-static int sti_hqvdp_commit_layer(struct sti_layer *layer)
+static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
{
- struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
- int cmd_offset;
-
- dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
-
- cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
- if (cmd_offset == -1) {
- DRM_ERROR("No available hqvdp_cmd now\n");
- return -EBUSY;
- }
-
- writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
- hqvdp->regs + HQVDP_MBX_NEXT_CMD);
-
- hqvdp->curr_field_count++;
-
- /* Interlaced : get ready to display the bottom field at next Vsync */
- if (layer->fb->flags & DRM_MODE_FB_INTERLACED)
- hqvdp->btm_field_pending = true;
-
- dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
- __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
-
- return 0;
-}
-
-static int sti_hqvdp_disable_layer(struct sti_layer *layer)
-{
- struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
int i;
- DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
+ DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&hqvdp->plane));
/* Unregister VTG Vsync callback */
- if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
- sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
+ if (sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
/* Set next cmd to NULL */
@@ -691,15 +539,10 @@ static int sti_hqvdp_disable_layer(struct sti_layer *layer)
/* VTG can stop now */
clk_disable_unprepare(hqvdp->clk_pix_main);
- if (i == POLL_MAX_ATTEMPT) {
+ if (i == POLL_MAX_ATTEMPT)
DRM_ERROR("XP70 could not revert to idle\n");
- return -ENXIO;
- }
-
- /* disable VID plane */
- hqvdp->vid_plane->funcs->disable_plane(hqvdp->vid_plane);
- return 0;
+ hqvdp->plane.status = STI_PLANE_DISABLED;
}
/**
@@ -724,6 +567,14 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
return 0;
}
+ if (hqvdp->plane.status == STI_PLANE_FLUSHING) {
+ /* disable need to be synchronize on vsync event */
+ DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
+ sti_plane_to_str(&hqvdp->plane));
+
+ sti_hqvdp_disable(hqvdp);
+ }
+
if (hqvdp->btm_field_pending) {
/* Create the btm field command from the current one */
btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
@@ -758,32 +609,10 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
return 0;
}
-static struct drm_plane *sti_hqvdp_find_vid(struct drm_device *dev, int id)
+static void sti_hqvdp_init(struct sti_hqvdp *hqvdp)
{
- struct drm_plane *plane;
-
- list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
- struct sti_layer *layer = to_sti_layer(plane);
-
- if (layer->desc == id)
- return plane;
- }
-
- return NULL;
-}
-
-static void sti_hqvd_init(struct sti_layer *layer)
-{
- struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
int size;
- /* find the plane macthing with vid 0 */
- hqvdp->vid_plane = sti_hqvdp_find_vid(hqvdp->drm_dev, STI_VID_0);
- if (!hqvdp->vid_plane) {
- DRM_ERROR("Cannot find Main video layer\n");
- return;
- }
-
hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
/* Allocate memory for the VDP commands */
@@ -799,24 +628,213 @@ static void sti_hqvd_init(struct sti_layer *layer)
memset(hqvdp->hqvdp_cmd, 0, size);
}
-static const struct sti_layer_funcs hqvdp_ops = {
- .get_formats = sti_hqvdp_get_formats,
- .get_nb_formats = sti_hqvdp_get_nb_formats,
- .init = sti_hqvd_init,
- .prepare = sti_hqvdp_prepare_layer,
- .commit = sti_hqvdp_commit_layer,
- .disable = sti_hqvdp_disable_layer,
+static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
+ struct drm_plane_state *oldstate)
+{
+ struct drm_plane_state *state = drm_plane->state;
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
+ struct drm_crtc *crtc = state->crtc;
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_display_mode *mode = &crtc->mode;
+ int dst_x = state->crtc_x;
+ int dst_y = state->crtc_y;
+ int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+ int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ /* src_x are in 16.16 format */
+ int src_x = state->src_x >> 16;
+ int src_y = state->src_y >> 16;
+ int src_w = state->src_w >> 16;
+ int src_h = state->src_h >> 16;
+ bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+ struct drm_gem_cma_object *cma_obj;
+ struct sti_hqvdp_cmd *cmd;
+ int scale_h, scale_v;
+ int cmd_offset;
+
+ DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+ crtc->base.id, sti_mixer_to_str(mixer),
+ drm_plane->base.id, sti_plane_to_str(plane));
+ DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
+ sti_plane_to_str(plane),
+ dst_w, dst_h, dst_x, dst_y,
+ src_w, src_h, src_x, src_y);
+
+ cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
+ if (cmd_offset == -1) {
+ DRM_ERROR("No available hqvdp_cmd now\n");
+ return;
+ }
+ cmd = hqvdp->hqvdp_cmd + cmd_offset;
+
+ if (!sti_hqvdp_check_hw_scaling(hqvdp, mode,
+ src_w, src_h,
+ dst_w, dst_h)) {
+ DRM_ERROR("Scaling beyond HW capabilities\n");
+ return;
+ }
+
+ /* Static parameters, defaulting to progressive mode */
+ cmd->top.config = TOP_CONFIG_PROGRESSIVE;
+ cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
+ cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
+ cmd->csdi.config = CSDI_CONFIG_PROG;
+
+ /* VC1RE, FMD bypassed : keep everything set to 0
+ * IQI/P2I bypassed */
+ cmd->iqi.config = IQI_CONFIG_DFLT;
+ cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
+ cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
+ cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
+
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ if (!cma_obj) {
+ DRM_ERROR("Can't get CMA GEM object for fb\n");
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
+ (char *)&fb->pixel_format,
+ (unsigned long)cma_obj->paddr);
+
+ /* Buffer planes address */
+ cmd->top.current_luma = (u32)cma_obj->paddr + fb->offsets[0];
+ cmd->top.current_chroma = (u32)cma_obj->paddr + fb->offsets[1];
+
+ /* Pitches */
+ cmd->top.luma_processed_pitch = fb->pitches[0];
+ cmd->top.luma_src_pitch = fb->pitches[0];
+ cmd->top.chroma_processed_pitch = fb->pitches[1];
+ cmd->top.chroma_src_pitch = fb->pitches[1];
+
+ /* Input / output size
+ * Align to upper even value */
+ dst_w = ALIGN(dst_w, 2);
+ dst_h = ALIGN(dst_h, 2);
+
+ if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) ||
+ (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) ||
+ (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) ||
+ (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) {
+ DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
+ src_w, src_h,
+ dst_w, dst_h);
+ return;
+ }
+
+ cmd->top.input_viewport_size = src_h << 16 | src_w;
+ cmd->top.input_frame_size = src_h << 16 | src_w;
+ cmd->hvsrc.output_picture_size = dst_h << 16 | dst_w;
+ cmd->top.input_viewport_ori = src_y << 16 | src_x;
+
+ /* Handle interlaced */
+ if (fb->flags & DRM_MODE_FB_INTERLACED) {
+ /* Top field to display */
+ cmd->top.config = TOP_CONFIG_INTER_TOP;
+
+ /* Update pitches and vert size */
+ cmd->top.input_frame_size = (src_h / 2) << 16 | src_w;
+ cmd->top.luma_processed_pitch *= 2;
+ cmd->top.luma_src_pitch *= 2;
+ cmd->top.chroma_processed_pitch *= 2;
+ cmd->top.chroma_src_pitch *= 2;
+
+ /* Enable directional deinterlacing processing */
+ cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
+ cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
+ cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
+ }
+
+ /* Update hvsrc lut coef */
+ scale_h = SCALE_FACTOR * dst_w / src_w;
+ sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
+
+ scale_v = SCALE_FACTOR * dst_h / src_h;
+ sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
+
+ if (first_prepare) {
+ /* Prevent VTG shutdown */
+ if (clk_prepare_enable(hqvdp->clk_pix_main)) {
+ DRM_ERROR("Failed to prepare/enable pix main clk\n");
+ return;
+ }
+
+ /* Register VTG Vsync callback to handle bottom fields */
+ if (sti_vtg_register_client(hqvdp->vtg,
+ &hqvdp->vtg_nb,
+ mixer->id)) {
+ DRM_ERROR("Cannot register VTG notifier\n");
+ return;
+ }
+ }
+
+ writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
+ hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+ hqvdp->curr_field_count++;
+
+ /* Interlaced : get ready to display the bottom field at next Vsync */
+ if (fb->flags & DRM_MODE_FB_INTERLACED)
+ hqvdp->btm_field_pending = true;
+
+ dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
+ __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
+
+ plane->status = STI_PLANE_UPDATED;
+}
+
+static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane,
+ struct drm_plane_state *oldstate)
+{
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
+
+ if (!drm_plane->crtc) {
+ DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
+ drm_plane->base.id);
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
+ drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+ drm_plane->base.id, sti_plane_to_str(plane));
+
+ plane->status = STI_PLANE_DISABLING;
+}
+
+static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
+ .atomic_update = sti_hqvdp_atomic_update,
+ .atomic_disable = sti_hqvdp_atomic_disable,
};
-struct sti_layer *sti_hqvdp_create(struct device *dev)
+static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
+ struct device *dev, int desc)
{
struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
+ int res;
+
+ hqvdp->plane.desc = desc;
+ hqvdp->plane.status = STI_PLANE_DISABLED;
- hqvdp->layer.ops = &hqvdp_ops;
+ sti_hqvdp_init(hqvdp);
- return &hqvdp->layer;
+ res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1,
+ &sti_plane_helpers_funcs,
+ hqvdp_supported_formats,
+ ARRAY_SIZE(hqvdp_supported_formats),
+ DRM_PLANE_TYPE_OVERLAY);
+ if (res) {
+ DRM_ERROR("Failed to initialize universal plane\n");
+ return NULL;
+ }
+
+ drm_plane_helper_add(&hqvdp->plane.drm_plane, &sti_hqvdp_helpers_funcs);
+
+ sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
+
+ return &hqvdp->plane.drm_plane;
}
-EXPORT_SYMBOL(sti_hqvdp_create);
static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp)
{
@@ -859,6 +877,12 @@ static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt)
} *header;
DRM_DEBUG_DRIVER("\n");
+
+ if (hqvdp->xp70_initialized) {
+ DRM_INFO("HQVDP XP70 already initialized\n");
+ return;
+ }
+
/* Check firmware parts */
if (!firmware) {
DRM_ERROR("Firmware not available\n");
@@ -946,7 +970,10 @@ static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt)
/* Launch Vsync */
writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
- DRM_INFO("HQVDP XP70 started\n");
+ DRM_INFO("HQVDP XP70 initialized\n");
+
+ hqvdp->xp70_initialized = true;
+
out:
release_firmware(firmware);
}
@@ -955,7 +982,7 @@ int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
{
struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
- struct sti_layer *layer;
+ struct drm_plane *plane;
int err;
DRM_DEBUG_DRIVER("\n");
@@ -971,13 +998,10 @@ int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
return err;
}
- layer = sti_layer_create(hqvdp->dev, STI_HQVDP_0, hqvdp->regs);
- if (!layer) {
+ /* Create HQVDP plane once xp70 is initialized */
+ plane = sti_hqvdp_create(drm_dev, hqvdp->dev, STI_HQVDP_0);
+ if (!plane)
DRM_ERROR("Can't create HQVDP plane\n");
- return -ENOMEM;
- }
-
- sti_drm_plane_init(drm_dev, layer, 1, DRM_PLANE_TYPE_OVERLAY);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.h b/drivers/gpu/drm/sti/sti_hqvdp.h
deleted file mode 100644
index cd5ecd0a6dea..000000000000
--- a/drivers/gpu/drm/sti/sti_hqvdp.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_HQVDP_H_
-#define _STI_HQVDP_H_
-
-struct sti_layer *sti_hqvdp_create(struct device *dev);
-
-#endif
diff --git a/drivers/gpu/drm/sti/sti_layer.c b/drivers/gpu/drm/sti/sti_layer.c
deleted file mode 100644
index 899104f9d4bc..000000000000
--- a/drivers/gpu/drm/sti/sti_layer.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
- * Fabien Dessenne <fabien.dessenne@st.com>
- * for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-
-#include "sti_compositor.h"
-#include "sti_cursor.h"
-#include "sti_gdp.h"
-#include "sti_hqvdp.h"
-#include "sti_layer.h"
-#include "sti_vid.h"
-
-const char *sti_layer_to_str(struct sti_layer *layer)
-{
- switch (layer->desc) {
- case STI_GDP_0:
- return "GDP0";
- case STI_GDP_1:
- return "GDP1";
- case STI_GDP_2:
- return "GDP2";
- case STI_GDP_3:
- return "GDP3";
- case STI_VID_0:
- return "VID0";
- case STI_VID_1:
- return "VID1";
- case STI_CURSOR:
- return "CURSOR";
- case STI_HQVDP_0:
- return "HQVDP0";
- default:
- return "<UNKNOWN LAYER>";
- }
-}
-EXPORT_SYMBOL(sti_layer_to_str);
-
-struct sti_layer *sti_layer_create(struct device *dev, int desc,
- void __iomem *baseaddr)
-{
-
- struct sti_layer *layer = NULL;
-
- switch (desc & STI_LAYER_TYPE_MASK) {
- case STI_GDP:
- layer = sti_gdp_create(dev, desc);
- break;
- case STI_VID:
- layer = sti_vid_create(dev);
- break;
- case STI_CUR:
- layer = sti_cursor_create(dev);
- break;
- case STI_VDP:
- layer = sti_hqvdp_create(dev);
- break;
- }
-
- if (!layer) {
- DRM_ERROR("Failed to create layer\n");
- return NULL;
- }
-
- layer->desc = desc;
- layer->dev = dev;
- layer->regs = baseaddr;
-
- layer->ops->init(layer);
-
- DRM_DEBUG_DRIVER("%s created\n", sti_layer_to_str(layer));
-
- return layer;
-}
-EXPORT_SYMBOL(sti_layer_create);
-
-int sti_layer_prepare(struct sti_layer *layer,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_display_mode *mode, int mixer_id,
- int dest_x, int dest_y, int dest_w, int dest_h,
- int src_x, int src_y, int src_w, int src_h)
-{
- int ret;
- unsigned int i;
- struct drm_gem_cma_object *cma_obj;
-
- if (!layer || !fb || !mode) {
- DRM_ERROR("Null fb, layer or mode\n");
- return 1;
- }
-
- cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- if (!cma_obj) {
- DRM_ERROR("Can't get CMA GEM object for fb\n");
- return 1;
- }
-
- layer->crtc = crtc;
- layer->fb = fb;
- layer->mode = mode;
- layer->mixer_id = mixer_id;
- layer->dst_x = dest_x;
- layer->dst_y = dest_y;
- layer->dst_w = clamp_val(dest_w, 0, mode->crtc_hdisplay - dest_x);
- layer->dst_h = clamp_val(dest_h, 0, mode->crtc_vdisplay - dest_y);
- layer->src_x = src_x;
- layer->src_y = src_y;
- layer->src_w = src_w;
- layer->src_h = src_h;
- layer->format = fb->pixel_format;
- layer->vaddr = cma_obj->vaddr;
- layer->paddr = cma_obj->paddr;
- for (i = 0; i < 4; i++) {
- layer->pitches[i] = fb->pitches[i];
- layer->offsets[i] = fb->offsets[i];
- }
-
- DRM_DEBUG_DRIVER("%s is associated with mixer_id %d\n",
- sti_layer_to_str(layer),
- layer->mixer_id);
- DRM_DEBUG_DRIVER("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
- sti_layer_to_str(layer),
- layer->dst_w, layer->dst_h, layer->dst_x, layer->dst_y,
- layer->src_w, layer->src_h, layer->src_x,
- layer->src_y);
-
- DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
- (char *)&layer->format, (unsigned long)layer->paddr);
-
- if (!layer->ops->prepare)
- goto err_no_prepare;
-
- ret = layer->ops->prepare(layer, !layer->enabled);
- if (!ret)
- layer->enabled = true;
-
- return ret;
-
-err_no_prepare:
- DRM_ERROR("Cannot prepare\n");
- return 1;
-}
-
-int sti_layer_commit(struct sti_layer *layer)
-{
- if (!layer)
- return 1;
-
- if (!layer->ops->commit)
- goto err_no_commit;
-
- return layer->ops->commit(layer);
-
-err_no_commit:
- DRM_ERROR("Cannot commit\n");
- return 1;
-}
-
-int sti_layer_disable(struct sti_layer *layer)
-{
- int ret;
-
- DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
- if (!layer)
- return 1;
-
- if (!layer->enabled)
- return 0;
-
- if (!layer->ops->disable)
- goto err_no_disable;
-
- ret = layer->ops->disable(layer);
- if (!ret)
- layer->enabled = false;
- else
- DRM_ERROR("Disable failed\n");
-
- return ret;
-
-err_no_disable:
- DRM_ERROR("Cannot disable\n");
- return 1;
-}
-
-const uint32_t *sti_layer_get_formats(struct sti_layer *layer)
-{
- if (!layer)
- return NULL;
-
- if (!layer->ops->get_formats)
- return NULL;
-
- return layer->ops->get_formats(layer);
-}
-
-unsigned int sti_layer_get_nb_formats(struct sti_layer *layer)
-{
- if (!layer)
- return 0;
-
- if (!layer->ops->get_nb_formats)
- return 0;
-
- return layer->ops->get_nb_formats(layer);
-}
diff --git a/drivers/gpu/drm/sti/sti_layer.h b/drivers/gpu/drm/sti/sti_layer.h
deleted file mode 100644
index ceff497f557e..000000000000
--- a/drivers/gpu/drm/sti/sti_layer.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
- * Fabien Dessenne <fabien.dessenne@st.com>
- * for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_LAYER_H_
-#define _STI_LAYER_H_
-
-#include <drm/drmP.h>
-
-#define to_sti_layer(x) container_of(x, struct sti_layer, plane)
-
-#define STI_LAYER_TYPE_SHIFT 8
-#define STI_LAYER_TYPE_MASK (~((1<<STI_LAYER_TYPE_SHIFT)-1))
-
-struct sti_layer;
-
-enum sti_layer_type {
- STI_GDP = 1 << STI_LAYER_TYPE_SHIFT,
- STI_VID = 2 << STI_LAYER_TYPE_SHIFT,
- STI_CUR = 3 << STI_LAYER_TYPE_SHIFT,
- STI_BCK = 4 << STI_LAYER_TYPE_SHIFT,
- STI_VDP = 5 << STI_LAYER_TYPE_SHIFT
-};
-
-enum sti_layer_id_of_type {
- STI_ID_0 = 0,
- STI_ID_1 = 1,
- STI_ID_2 = 2,
- STI_ID_3 = 3
-};
-
-enum sti_layer_desc {
- STI_GDP_0 = STI_GDP | STI_ID_0,
- STI_GDP_1 = STI_GDP | STI_ID_1,
- STI_GDP_2 = STI_GDP | STI_ID_2,
- STI_GDP_3 = STI_GDP | STI_ID_3,
- STI_VID_0 = STI_VID | STI_ID_0,
- STI_VID_1 = STI_VID | STI_ID_1,
- STI_HQVDP_0 = STI_VDP | STI_ID_0,
- STI_CURSOR = STI_CUR,
- STI_BACK = STI_BCK
-};
-
-/**
- * STI layer functions structure
- *
- * @get_formats: get layer supported formats
- * @get_nb_formats: get number of format supported
- * @init: initialize the layer
- * @prepare: prepare layer before rendering
- * @commit: set layer for rendering
- * @disable: disable layer
- */
-struct sti_layer_funcs {
- const uint32_t* (*get_formats)(struct sti_layer *layer);
- unsigned int (*get_nb_formats)(struct sti_layer *layer);
- void (*init)(struct sti_layer *layer);
- int (*prepare)(struct sti_layer *layer, bool first_prepare);
- int (*commit)(struct sti_layer *layer);
- int (*disable)(struct sti_layer *layer);
-};
-
-/**
- * STI layer structure
- *
- * @plane: drm plane it is bound to (if any)
- * @fb: drm fb it is bound to
- * @crtc: crtc it is bound to
- * @mode: display mode
- * @desc: layer type & id
- * @device: driver device
- * @regs: layer registers
- * @ops: layer functions
- * @zorder: layer z-order
- * @mixer_id: id of the mixer used to display the layer
- * @enabled: to know if the layer is active or not
- * @src_x src_y: coordinates of the input (fb) area
- * @src_w src_h: size of the input (fb) area
- * @dst_x dst_y: coordinates of the output (crtc) area
- * @dst_w dst_h: size of the output (crtc) area
- * @format: format
- * @pitches: pitch of 'planes' (eg: Y, U, V)
- * @offsets: offset of 'planes'
- * @vaddr: virtual address of the input buffer
- * @paddr: physical address of the input buffer
- */
-struct sti_layer {
- struct drm_plane plane;
- struct drm_framebuffer *fb;
- struct drm_crtc *crtc;
- struct drm_display_mode *mode;
- enum sti_layer_desc desc;
- struct device *dev;
- void __iomem *regs;
- const struct sti_layer_funcs *ops;
- int zorder;
- int mixer_id;
- bool enabled;
- int src_x, src_y;
- int src_w, src_h;
- int dst_x, dst_y;
- int dst_w, dst_h;
- uint32_t format;
- unsigned int pitches[4];
- unsigned int offsets[4];
- void *vaddr;
- dma_addr_t paddr;
-};
-
-struct sti_layer *sti_layer_create(struct device *dev, int desc,
- void __iomem *baseaddr);
-int sti_layer_prepare(struct sti_layer *layer,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_display_mode *mode,
- int mixer_id,
- int dest_x, int dest_y,
- int dest_w, int dest_h,
- int src_x, int src_y,
- int src_w, int src_h);
-int sti_layer_commit(struct sti_layer *layer);
-int sti_layer_disable(struct sti_layer *layer);
-const uint32_t *sti_layer_get_formats(struct sti_layer *layer);
-unsigned int sti_layer_get_nb_formats(struct sti_layer *layer);
-const char *sti_layer_to_str(struct sti_layer *layer);
-
-#endif
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index 13a4b84deab6..0182e9365004 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -58,6 +58,7 @@ const char *sti_mixer_to_str(struct sti_mixer *mixer)
return "<UNKNOWN MIXER>";
}
}
+EXPORT_SYMBOL(sti_mixer_to_str);
static inline u32 sti_mixer_reg_read(struct sti_mixer *mixer, u32 reg_id)
{
@@ -101,52 +102,57 @@ static void sti_mixer_set_background_area(struct sti_mixer *mixer,
sti_mixer_reg_write(mixer, GAM_MIXER_BCS, yds << 16 | xds);
}
-int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer)
+int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane)
{
- int layer_id = 0, depth = layer->zorder;
+ int plane_id, depth = plane->zorder;
+ unsigned int i;
u32 mask, val;
- if (depth >= GAM_MIXER_NB_DEPTH_LEVEL)
+ if ((depth < 1) || (depth > GAM_MIXER_NB_DEPTH_LEVEL))
return 1;
- switch (layer->desc) {
+ switch (plane->desc) {
case STI_GDP_0:
- layer_id = GAM_DEPTH_GDP0_ID;
+ plane_id = GAM_DEPTH_GDP0_ID;
break;
case STI_GDP_1:
- layer_id = GAM_DEPTH_GDP1_ID;
+ plane_id = GAM_DEPTH_GDP1_ID;
break;
case STI_GDP_2:
- layer_id = GAM_DEPTH_GDP2_ID;
+ plane_id = GAM_DEPTH_GDP2_ID;
break;
case STI_GDP_3:
- layer_id = GAM_DEPTH_GDP3_ID;
+ plane_id = GAM_DEPTH_GDP3_ID;
break;
- case STI_VID_0:
case STI_HQVDP_0:
- layer_id = GAM_DEPTH_VID0_ID;
- break;
- case STI_VID_1:
- layer_id = GAM_DEPTH_VID1_ID;
+ plane_id = GAM_DEPTH_VID0_ID;
break;
case STI_CURSOR:
/* no need to set depth for cursor */
return 0;
default:
- DRM_ERROR("Unknown layer %d\n", layer->desc);
+ DRM_ERROR("Unknown plane %d\n", plane->desc);
return 1;
}
- mask = GAM_DEPTH_MASK_ID << (3 * depth);
- layer_id = layer_id << (3 * depth);
+
+ /* Search if a previous depth was already assigned to the plane */
+ val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
+ for (i = 0; i < GAM_MIXER_NB_DEPTH_LEVEL; i++) {
+ mask = GAM_DEPTH_MASK_ID << (3 * i);
+ if ((val & mask) == plane_id << (3 * i))
+ break;
+ }
+
+ mask |= GAM_DEPTH_MASK_ID << (3 * (depth - 1));
+ plane_id = plane_id << (3 * (depth - 1));
DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer),
- sti_layer_to_str(layer), depth);
+ sti_plane_to_str(plane), depth);
dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n",
- layer_id, mask);
+ plane_id, mask);
- val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
val &= ~mask;
- val |= layer_id;
+ val |= plane_id;
sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val);
dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n",
@@ -176,9 +182,9 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer,
return 0;
}
-static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
+static u32 sti_mixer_get_plane_mask(struct sti_plane *plane)
{
- switch (layer->desc) {
+ switch (plane->desc) {
case STI_BACK:
return GAM_CTL_BACK_MASK;
case STI_GDP_0:
@@ -189,11 +195,8 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
return GAM_CTL_GDP2_MASK;
case STI_GDP_3:
return GAM_CTL_GDP3_MASK;
- case STI_VID_0:
case STI_HQVDP_0:
return GAM_CTL_VID0_MASK;
- case STI_VID_1:
- return GAM_CTL_VID1_MASK;
case STI_CURSOR:
return GAM_CTL_CURSOR_MASK;
default:
@@ -201,17 +204,17 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
}
}
-int sti_mixer_set_layer_status(struct sti_mixer *mixer,
- struct sti_layer *layer, bool status)
+int sti_mixer_set_plane_status(struct sti_mixer *mixer,
+ struct sti_plane *plane, bool status)
{
u32 mask, val;
DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable",
- sti_mixer_to_str(mixer), sti_layer_to_str(layer));
+ sti_mixer_to_str(mixer), sti_plane_to_str(plane));
- mask = sti_mixer_get_layer_mask(layer);
+ mask = sti_mixer_get_plane_mask(plane);
if (!mask) {
- DRM_ERROR("Can not find layer mask\n");
+ DRM_ERROR("Can't find layer mask\n");
return -EINVAL;
}
@@ -223,15 +226,6 @@ int sti_mixer_set_layer_status(struct sti_mixer *mixer,
return 0;
}
-void sti_mixer_clear_all_layers(struct sti_mixer *mixer)
-{
- u32 val;
-
- DRM_DEBUG_DRIVER("%s clear all layer\n", sti_mixer_to_str(mixer));
- val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL) & 0xFFFF0000;
- sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
-}
-
void sti_mixer_set_matrix(struct sti_mixer *mixer)
{
unsigned int i;
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index b97282182908..efb1a9a5ba86 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -11,10 +11,16 @@
#include <drm/drmP.h>
-#include "sti_layer.h"
+#include "sti_plane.h"
#define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc)
+enum sti_mixer_status {
+ STI_MIXER_READY,
+ STI_MIXER_DISABLING,
+ STI_MIXER_DISABLED,
+};
+
/**
* STI Mixer subdevice structure
*
@@ -23,33 +29,32 @@
* @id: id of the mixer
* @drm_crtc: crtc object link to the mixer
* @pending_event: set if a flip event is pending on crtc
- * @enabled: to know if the mixer is active or not
+ * @status: to know the status of the mixer
*/
struct sti_mixer {
struct device *dev;
void __iomem *regs;
int id;
- struct drm_crtc drm_crtc;
+ struct drm_crtc drm_crtc;
struct drm_pending_vblank_event *pending_event;
- bool enabled;
+ enum sti_mixer_status status;
};
const char *sti_mixer_to_str(struct sti_mixer *mixer);
struct sti_mixer *sti_mixer_create(struct device *dev, int id,
- void __iomem *baseaddr);
+ void __iomem *baseaddr);
-int sti_mixer_set_layer_status(struct sti_mixer *mixer,
- struct sti_layer *layer, bool status);
-void sti_mixer_clear_all_layers(struct sti_mixer *mixer);
-int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer);
+int sti_mixer_set_plane_status(struct sti_mixer *mixer,
+ struct sti_plane *plane, bool status);
+int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane);
int sti_mixer_active_video_area(struct sti_mixer *mixer,
- struct drm_display_mode *mode);
+ struct drm_display_mode *mode);
void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
/* depth in Cross-bar control = z order */
-#define GAM_MIXER_NB_DEPTH_LEVEL 7
+#define GAM_MIXER_NB_DEPTH_LEVEL 6
#define STI_MIXER_MAIN 0
#define STI_MIXER_AUX 1
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
new file mode 100644
index 000000000000..d5c5e91f2956
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "sti_compositor.h"
+#include "sti_drv.h"
+#include "sti_plane.h"
+
+/* (Background) < GDP0 < GDP1 < HQVDP0 < GDP2 < GDP3 < (ForeGround) */
+enum sti_plane_desc sti_plane_default_zorder[] = {
+ STI_GDP_0,
+ STI_GDP_1,
+ STI_HQVDP_0,
+ STI_GDP_2,
+ STI_GDP_3,
+};
+
+const char *sti_plane_to_str(struct sti_plane *plane)
+{
+ switch (plane->desc) {
+ case STI_GDP_0:
+ return "GDP0";
+ case STI_GDP_1:
+ return "GDP1";
+ case STI_GDP_2:
+ return "GDP2";
+ case STI_GDP_3:
+ return "GDP3";
+ case STI_HQVDP_0:
+ return "HQVDP0";
+ case STI_CURSOR:
+ return "CURSOR";
+ default:
+ return "<UNKNOWN PLANE>";
+ }
+}
+EXPORT_SYMBOL(sti_plane_to_str);
+
+static void sti_plane_destroy(struct drm_plane *drm_plane)
+{
+ DRM_DEBUG_DRIVER("\n");
+
+ drm_plane_helper_disable(drm_plane);
+ drm_plane_cleanup(drm_plane);
+}
+
+static int sti_plane_set_property(struct drm_plane *drm_plane,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = drm_plane->dev;
+ struct sti_private *private = dev->dev_private;
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (property == private->plane_zorder_property) {
+ plane->zorder = val;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void sti_plane_attach_zorder_property(struct drm_plane *drm_plane)
+{
+ struct drm_device *dev = drm_plane->dev;
+ struct sti_private *private = dev->dev_private;
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct drm_property *prop;
+
+ prop = private->plane_zorder_property;
+ if (!prop) {
+ prop = drm_property_create_range(dev, 0, "zpos", 1,
+ GAM_MIXER_NB_DEPTH_LEVEL);
+ if (!prop)
+ return;
+
+ private->plane_zorder_property = prop;
+ }
+
+ drm_object_attach_property(&drm_plane->base, prop, plane->zorder);
+}
+
+void sti_plane_init_property(struct sti_plane *plane,
+ enum drm_plane_type type)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(sti_plane_default_zorder); i++)
+ if (sti_plane_default_zorder[i] == plane->desc)
+ break;
+
+ plane->zorder = i + 1;
+
+ if (type == DRM_PLANE_TYPE_OVERLAY)
+ sti_plane_attach_zorder_property(&plane->drm_plane);
+
+ DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%d\n",
+ plane->drm_plane.base.id,
+ sti_plane_to_str(plane), plane->zorder);
+}
+EXPORT_SYMBOL(sti_plane_init_property);
+
+struct drm_plane_funcs sti_plane_helpers_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = sti_plane_destroy,
+ .set_property = sti_plane_set_property,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+EXPORT_SYMBOL(sti_plane_helpers_funcs);
diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h
new file mode 100644
index 000000000000..86f1e6fc81b9
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_plane.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_PLANE_H_
+#define _STI_PLANE_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+
+extern struct drm_plane_funcs sti_plane_helpers_funcs;
+
+#define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane)
+
+#define STI_PLANE_TYPE_SHIFT 8
+#define STI_PLANE_TYPE_MASK (~((1 << STI_PLANE_TYPE_SHIFT) - 1))
+
+enum sti_plane_type {
+ STI_GDP = 1 << STI_PLANE_TYPE_SHIFT,
+ STI_VDP = 2 << STI_PLANE_TYPE_SHIFT,
+ STI_CUR = 3 << STI_PLANE_TYPE_SHIFT,
+ STI_BCK = 4 << STI_PLANE_TYPE_SHIFT
+};
+
+enum sti_plane_id_of_type {
+ STI_ID_0 = 0,
+ STI_ID_1 = 1,
+ STI_ID_2 = 2,
+ STI_ID_3 = 3
+};
+
+enum sti_plane_desc {
+ STI_GDP_0 = STI_GDP | STI_ID_0,
+ STI_GDP_1 = STI_GDP | STI_ID_1,
+ STI_GDP_2 = STI_GDP | STI_ID_2,
+ STI_GDP_3 = STI_GDP | STI_ID_3,
+ STI_HQVDP_0 = STI_VDP | STI_ID_0,
+ STI_CURSOR = STI_CUR,
+ STI_BACK = STI_BCK
+};
+
+enum sti_plane_status {
+ STI_PLANE_READY,
+ STI_PLANE_UPDATED,
+ STI_PLANE_DISABLING,
+ STI_PLANE_FLUSHING,
+ STI_PLANE_DISABLED,
+};
+
+/**
+ * STI plane structure
+ *
+ * @plane: drm plane it is bound to (if any)
+ * @desc: plane type & id
+ * @status: to know the status of the plane
+ * @zorder: plane z-order
+ */
+struct sti_plane {
+ struct drm_plane drm_plane;
+ enum sti_plane_desc desc;
+ enum sti_plane_status status;
+ int zorder;
+};
+
+const char *sti_plane_to_str(struct sti_plane *plane);
+void sti_plane_init_property(struct sti_plane *plane,
+ enum drm_plane_type type);
+#endif
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index 5cc53116508e..c1aac8e66fb5 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -16,7 +16,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
-#include "sti_drm_crtc.h"
+#include "sti_crtc.h"
/* glue registers */
#define TVO_CSC_MAIN_M0 0x000
@@ -473,7 +473,7 @@ static void sti_dvo_encoder_commit(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
- tvout_dvo_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
+ tvout_dvo_start(tvout, sti_crtc_is_main(encoder->crtc));
}
static void sti_dvo_encoder_disable(struct drm_encoder *encoder)
@@ -523,7 +523,7 @@ static void sti_hda_encoder_commit(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
- tvout_hda_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
+ tvout_hda_start(tvout, sti_crtc_is_main(encoder->crtc));
}
static void sti_hda_encoder_disable(struct drm_encoder *encoder)
@@ -575,7 +575,7 @@ static void sti_hdmi_encoder_commit(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
- tvout_hdmi_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
+ tvout_hdmi_start(tvout, sti_crtc_is_main(encoder->crtc));
}
static void sti_hdmi_encoder_disable(struct drm_encoder *encoder)
@@ -644,7 +644,6 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
struct sti_tvout *tvout = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
unsigned int i;
- int ret;
tvout->drm_dev = drm_dev;
@@ -658,17 +657,15 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
sti_tvout_create_encoders(drm_dev, tvout);
- ret = component_bind_all(dev, drm_dev);
- if (ret)
- sti_tvout_destroy_encoders(tvout);
-
- return ret;
+ return 0;
}
static void sti_tvout_unbind(struct device *dev, struct device *master,
void *data)
{
- /* do nothing */
+ struct sti_tvout *tvout = dev_get_drvdata(dev);
+
+ sti_tvout_destroy_encoders(tvout);
}
static const struct component_ops sti_tvout_ops = {
@@ -676,34 +673,12 @@ static const struct component_ops sti_tvout_ops = {
.unbind = sti_tvout_unbind,
};
-static int compare_of(struct device *dev, void *data)
-{
- return dev->of_node == data;
-}
-
-static int sti_tvout_master_bind(struct device *dev)
-{
- return 0;
-}
-
-static void sti_tvout_master_unbind(struct device *dev)
-{
- /* do nothing */
-}
-
-static const struct component_master_ops sti_tvout_master_ops = {
- .bind = sti_tvout_master_bind,
- .unbind = sti_tvout_master_unbind,
-};
-
static int sti_tvout_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct sti_tvout *tvout;
struct resource *res;
- struct device_node *child_np;
- struct component_match *match = NULL;
DRM_INFO("%s\n", __func__);
@@ -734,24 +709,11 @@ static int sti_tvout_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tvout);
- of_platform_populate(node, NULL, NULL, dev);
-
- child_np = of_get_next_available_child(node, NULL);
-
- while (child_np) {
- component_match_add(dev, &match, compare_of, child_np);
- of_node_put(child_np);
- child_np = of_get_next_available_child(node, child_np);
- }
-
- component_master_add_with_match(dev, &sti_tvout_master_ops, match);
-
return component_add(dev, &sti_tvout_ops);
}
static int sti_tvout_remove(struct platform_device *pdev)
{
- component_master_del(&pdev->dev, &sti_tvout_master_ops);
component_del(&pdev->dev, &sti_tvout_ops);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 10ced6a479f4..a8254cc362a1 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -6,7 +6,7 @@
#include <drm/drmP.h>
-#include "sti_layer.h"
+#include "sti_plane.h"
#include "sti_vid.h"
#include "sti_vtg.h"
@@ -43,35 +43,37 @@
#define VID_MPR2_BT709 0x07150545
#define VID_MPR3_BT709 0x00000AE8
-static int sti_vid_prepare_layer(struct sti_layer *vid, bool first_prepare)
+void sti_vid_commit(struct sti_vid *vid,
+ struct drm_plane_state *state)
{
- u32 val;
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_display_mode *mode = &crtc->mode;
+ int dst_x = state->crtc_x;
+ int dst_y = state->crtc_y;
+ int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+ int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ u32 val, ydo, xdo, yds, xds;
+
+ /* Input / output size
+ * Align to upper even value */
+ dst_w = ALIGN(dst_w, 2);
+ dst_h = ALIGN(dst_h, 2);
/* Unmask */
val = readl(vid->regs + VID_CTL);
val &= ~VID_CTL_IGNORE;
writel(val, vid->regs + VID_CTL);
- return 0;
-}
-
-static int sti_vid_commit_layer(struct sti_layer *vid)
-{
- struct drm_display_mode *mode = vid->mode;
- u32 ydo, xdo, yds, xds;
-
- ydo = sti_vtg_get_line_number(*mode, vid->dst_y);
- yds = sti_vtg_get_line_number(*mode, vid->dst_y + vid->dst_h - 1);
- xdo = sti_vtg_get_pixel_number(*mode, vid->dst_x);
- xds = sti_vtg_get_pixel_number(*mode, vid->dst_x + vid->dst_w - 1);
+ ydo = sti_vtg_get_line_number(*mode, dst_y);
+ yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
+ xdo = sti_vtg_get_pixel_number(*mode, dst_x);
+ xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
writel((ydo << 16) | xdo, vid->regs + VID_VPO);
writel((yds << 16) | xds, vid->regs + VID_VPS);
-
- return 0;
}
-static int sti_vid_disable_layer(struct sti_layer *vid)
+void sti_vid_disable(struct sti_vid *vid)
{
u32 val;
@@ -79,21 +81,9 @@ static int sti_vid_disable_layer(struct sti_layer *vid)
val = readl(vid->regs + VID_CTL);
val |= VID_CTL_IGNORE;
writel(val, vid->regs + VID_CTL);
-
- return 0;
}
-static const uint32_t *sti_vid_get_formats(struct sti_layer *layer)
-{
- return NULL;
-}
-
-static unsigned int sti_vid_get_nb_formats(struct sti_layer *layer)
-{
- return 0;
-}
-
-static void sti_vid_init(struct sti_layer *vid)
+static void sti_vid_init(struct sti_vid *vid)
{
/* Enable PSI, Mask layer */
writel(VID_CTL_PSI_ENABLE | VID_CTL_IGNORE, vid->regs + VID_CTL);
@@ -113,18 +103,10 @@ static void sti_vid_init(struct sti_layer *vid)
writel(VID_CSAT_DFLT, vid->regs + VID_CSAT);
}
-static const struct sti_layer_funcs vid_ops = {
- .get_formats = sti_vid_get_formats,
- .get_nb_formats = sti_vid_get_nb_formats,
- .init = sti_vid_init,
- .prepare = sti_vid_prepare_layer,
- .commit = sti_vid_commit_layer,
- .disable = sti_vid_disable_layer,
-};
-
-struct sti_layer *sti_vid_create(struct device *dev)
+struct sti_vid *sti_vid_create(struct device *dev, int id,
+ void __iomem *baseaddr)
{
- struct sti_layer *vid;
+ struct sti_vid *vid;
vid = devm_kzalloc(dev, sizeof(*vid), GFP_KERNEL);
if (!vid) {
@@ -132,7 +114,11 @@ struct sti_layer *sti_vid_create(struct device *dev)
return NULL;
}
- vid->ops = &vid_ops;
+ vid->dev = dev;
+ vid->regs = baseaddr;
+ vid->id = id;
+
+ sti_vid_init(vid);
return vid;
}
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
index 2c0aecd63294..5dea4791f1d6 100644
--- a/drivers/gpu/drm/sti/sti_vid.h
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -7,6 +7,23 @@
#ifndef _STI_VID_H_
#define _STI_VID_H_
-struct sti_layer *sti_vid_create(struct device *dev);
+/**
+ * STI VID structure
+ *
+ * @dev: driver device
+ * @regs: vid registers
+ * @id: id of the vid
+ */
+struct sti_vid {
+ struct device *dev;
+ void __iomem *regs;
+ int id;
+};
+
+void sti_vid_commit(struct sti_vid *vid,
+ struct drm_plane_state *state);
+void sti_vid_disable(struct sti_vid *vid);
+struct sti_vid *sti_vid_create(struct device *dev, int id,
+ void __iomem *baseaddr);
#endif
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index a287e4fec865..ddefb85dc4f7 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -76,6 +76,14 @@ to_tegra_plane_state(struct drm_plane_state *state)
return NULL;
}
+static void tegra_dc_stats_reset(struct tegra_dc_stats *stats)
+{
+ stats->frames = 0;
+ stats->vblank = 0;
+ stats->underflow = 0;
+ stats->overflow = 0;
+}
+
/*
* Reads the active copy of a register. This takes the dc->lock spinlock to
* prevent races with the VBLANK processing which also needs access to the
@@ -759,7 +767,6 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
/* position the cursor */
value = (state->crtc_y & 0x3fff) << 16 | (state->crtc_x & 0x3fff);
tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
-
}
static void tegra_cursor_atomic_disable(struct drm_plane *plane,
@@ -809,9 +816,11 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
return ERR_PTR(-ENOMEM);
/*
- * We'll treat the cursor as an overlay plane with index 6 here so
- * that the update and activation request bits in DC_CMD_STATE_CONTROL
- * match up.
+ * This index is kind of fake. The cursor isn't a regular plane, but
+ * its update and activation request bits in DC_CMD_STATE_CONTROL do
+ * use the same programming. Setting this fake index here allows the
+ * code in tegra_add_plane_state() to do the right thing without the
+ * need to special-casing the cursor plane.
*/
plane->index = 6;
@@ -1015,6 +1024,8 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
crtc->state = &state->base;
crtc->state->crtc = crtc;
}
+
+ drm_crtc_vblank_reset(crtc);
}
static struct drm_crtc_state *
@@ -1052,90 +1063,6 @@ static const struct drm_crtc_funcs tegra_crtc_funcs = {
.atomic_destroy_state = tegra_crtc_atomic_destroy_state,
};
-static void tegra_dc_stop(struct tegra_dc *dc)
-{
- u32 value;
-
- /* stop the display controller */
- value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
- value &= ~DISP_CTRL_MODE_MASK;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
-
- tegra_dc_commit(dc);
-}
-
-static bool tegra_dc_idle(struct tegra_dc *dc)
-{
- u32 value;
-
- value = tegra_dc_readl_active(dc, DC_CMD_DISPLAY_COMMAND);
-
- return (value & DISP_CTRL_MODE_MASK) == 0;
-}
-
-static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout)
-{
- timeout = jiffies + msecs_to_jiffies(timeout);
-
- while (time_before(jiffies, timeout)) {
- if (tegra_dc_idle(dc))
- return 0;
-
- usleep_range(1000, 2000);
- }
-
- dev_dbg(dc->dev, "timeout waiting for DC to become idle\n");
- return -ETIMEDOUT;
-}
-
-static void tegra_crtc_disable(struct drm_crtc *crtc)
-{
- struct tegra_dc *dc = to_tegra_dc(crtc);
- u32 value;
-
- if (!tegra_dc_idle(dc)) {
- tegra_dc_stop(dc);
-
- /*
- * Ignore the return value, there isn't anything useful to do
- * in case this fails.
- */
- tegra_dc_wait_idle(dc, 100);
- }
-
- /*
- * This should really be part of the RGB encoder driver, but clearing
- * these bits has the side-effect of stopping the display controller.
- * When that happens no VBLANK interrupts will be raised. At the same
- * time the encoder is disabled before the display controller, so the
- * above code is always going to timeout waiting for the controller
- * to go idle.
- *
- * Given the close coupling between the RGB encoder and the display
- * controller doing it here is still kind of okay. None of the other
- * encoder drivers require these bits to be cleared.
- *
- * XXX: Perhaps given that the display controller is switched off at
- * this point anyway maybe clearing these bits isn't even useful for
- * the RGB encoder?
- */
- if (dc->rgb) {
- value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
- value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
- PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
- }
-
- drm_crtc_vblank_off(crtc);
-}
-
-static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted)
-{
- return true;
-}
-
static int tegra_dc_set_timings(struct tegra_dc *dc,
struct drm_display_mode *mode)
{
@@ -1229,7 +1156,85 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
}
-static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
+static void tegra_dc_stop(struct tegra_dc *dc)
+{
+ u32 value;
+
+ /* stop the display controller */
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+ value &= ~DISP_CTRL_MODE_MASK;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ tegra_dc_commit(dc);
+}
+
+static bool tegra_dc_idle(struct tegra_dc *dc)
+{
+ u32 value;
+
+ value = tegra_dc_readl_active(dc, DC_CMD_DISPLAY_COMMAND);
+
+ return (value & DISP_CTRL_MODE_MASK) == 0;
+}
+
+static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout)
+{
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ if (tegra_dc_idle(dc))
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ dev_dbg(dc->dev, "timeout waiting for DC to become idle\n");
+ return -ETIMEDOUT;
+}
+
+static void tegra_crtc_disable(struct drm_crtc *crtc)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ u32 value;
+
+ if (!tegra_dc_idle(dc)) {
+ tegra_dc_stop(dc);
+
+ /*
+ * Ignore the return value, there isn't anything useful to do
+ * in case this fails.
+ */
+ tegra_dc_wait_idle(dc, 100);
+ }
+
+ /*
+ * This should really be part of the RGB encoder driver, but clearing
+ * these bits has the side-effect of stopping the display controller.
+ * When that happens no VBLANK interrupts will be raised. At the same
+ * time the encoder is disabled before the display controller, so the
+ * above code is always going to timeout waiting for the controller
+ * to go idle.
+ *
+ * Given the close coupling between the RGB encoder and the display
+ * controller doing it here is still kind of okay. None of the other
+ * encoder drivers require these bits to be cleared.
+ *
+ * XXX: Perhaps given that the display controller is switched off at
+ * this point anyway maybe clearing these bits isn't even useful for
+ * the RGB encoder?
+ */
+ if (dc->rgb) {
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+ }
+
+ tegra_dc_stats_reset(&dc->stats);
+ drm_crtc_vblank_off(crtc);
+}
+
+static void tegra_crtc_enable(struct drm_crtc *crtc)
{
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
struct tegra_dc_state *state = to_dc_state(crtc->state);
@@ -1259,15 +1264,7 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
tegra_dc_commit(dc);
-}
-
-static void tegra_crtc_prepare(struct drm_crtc *crtc)
-{
- drm_crtc_vblank_off(crtc);
-}
-static void tegra_crtc_commit(struct drm_crtc *crtc)
-{
drm_crtc_vblank_on(crtc);
}
@@ -1277,7 +1274,8 @@ static int tegra_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
-static void tegra_crtc_atomic_begin(struct drm_crtc *crtc)
+static void tegra_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct tegra_dc *dc = to_tegra_dc(crtc);
@@ -1291,7 +1289,8 @@ static void tegra_crtc_atomic_begin(struct drm_crtc *crtc)
}
}
-static void tegra_crtc_atomic_flush(struct drm_crtc *crtc)
+static void tegra_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct tegra_dc_state *state = to_dc_state(crtc->state);
struct tegra_dc *dc = to_tegra_dc(crtc);
@@ -1302,10 +1301,7 @@ static void tegra_crtc_atomic_flush(struct drm_crtc *crtc)
static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
.disable = tegra_crtc_disable,
- .mode_fixup = tegra_crtc_mode_fixup,
- .mode_set_nofb = tegra_crtc_mode_set_nofb,
- .prepare = tegra_crtc_prepare,
- .commit = tegra_crtc_commit,
+ .enable = tegra_crtc_enable,
.atomic_check = tegra_crtc_atomic_check,
.atomic_begin = tegra_crtc_atomic_begin,
.atomic_flush = tegra_crtc_atomic_flush,
@@ -1323,6 +1319,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
/*
dev_dbg(dc->dev, "%s(): frame end\n", __func__);
*/
+ dc->stats.frames++;
}
if (status & VBLANK_INT) {
@@ -1331,12 +1328,21 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
*/
drm_crtc_handle_vblank(&dc->base);
tegra_dc_finish_page_flip(dc);
+ dc->stats.vblank++;
}
if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) {
/*
dev_dbg(dc->dev, "%s(): underflow\n", __func__);
*/
+ dc->stats.underflow++;
+ }
+
+ if (status & (WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT)) {
+ /*
+ dev_dbg(dc->dev, "%s(): overflow\n", __func__);
+ */
+ dc->stats.overflow++;
}
return IRQ_HANDLED;
@@ -1346,6 +1352,14 @@ static int tegra_dc_show_regs(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct tegra_dc *dc = node->info_ent->data;
+ int err = 0;
+
+ drm_modeset_lock_crtc(&dc->base, NULL);
+
+ if (!dc->base.state->active) {
+ err = -EBUSY;
+ goto unlock;
+ }
#define DUMP_REG(name) \
seq_printf(s, "%-40s %#05x %08x\n", #name, name, \
@@ -1566,11 +1580,59 @@ static int tegra_dc_show_regs(struct seq_file *s, void *data)
#undef DUMP_REG
+unlock:
+ drm_modeset_unlock_crtc(&dc->base);
+ return err;
+}
+
+static int tegra_dc_show_crc(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_dc *dc = node->info_ent->data;
+ int err = 0;
+ u32 value;
+
+ drm_modeset_lock_crtc(&dc->base, NULL);
+
+ if (!dc->base.state->active) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ value = DC_COM_CRC_CONTROL_ACTIVE_DATA | DC_COM_CRC_CONTROL_ENABLE;
+ tegra_dc_writel(dc, value, DC_COM_CRC_CONTROL);
+ tegra_dc_commit(dc);
+
+ drm_crtc_wait_one_vblank(&dc->base);
+ drm_crtc_wait_one_vblank(&dc->base);
+
+ value = tegra_dc_readl(dc, DC_COM_CRC_CHECKSUM);
+ seq_printf(s, "%08x\n", value);
+
+ tegra_dc_writel(dc, 0, DC_COM_CRC_CONTROL);
+
+unlock:
+ drm_modeset_unlock_crtc(&dc->base);
+ return err;
+}
+
+static int tegra_dc_show_stats(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_dc *dc = node->info_ent->data;
+
+ seq_printf(s, "frames: %lu\n", dc->stats.frames);
+ seq_printf(s, "vblank: %lu\n", dc->stats.vblank);
+ seq_printf(s, "underflow: %lu\n", dc->stats.underflow);
+ seq_printf(s, "overflow: %lu\n", dc->stats.overflow);
+
return 0;
}
static struct drm_info_list debugfs_files[] = {
{ "regs", tegra_dc_show_regs, 0, NULL },
+ { "crc", tegra_dc_show_crc, 0, NULL },
+ { "stats", tegra_dc_show_stats, 0, NULL },
};
static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
@@ -1716,7 +1778,8 @@ static int tegra_dc_init(struct host1x_client *client)
tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
}
- value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
@@ -1732,15 +1795,19 @@ static int tegra_dc_init(struct host1x_client *client)
WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
- value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+ value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
- value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
if (dc->soc->supports_border_color)
tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
+ tegra_dc_stats_reset(&dc->stats);
+
return 0;
cleanup:
@@ -1826,8 +1893,20 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
.has_powergate = true,
};
+static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
+ .supports_border_color = false,
+ .supports_interlacing = true,
+ .supports_cursor = true,
+ .supports_block_linear = true,
+ .pitch_align = 64,
+ .has_powergate = true,
+};
+
static const struct of_device_id tegra_dc_of_match[] = {
{
+ .compatible = "nvidia,tegra210-dc",
+ .data = &tegra210_dc_soc_info,
+ }, {
.compatible = "nvidia,tegra124-dc",
.data = &tegra124_dc_soc_info,
}, {
@@ -1957,6 +2036,10 @@ static int tegra_dc_probe(struct platform_device *pdev)
return -ENXIO;
}
+ dc->syncpt = host1x_syncpt_request(&pdev->dev, flags);
+ if (!dc->syncpt)
+ dev_warn(&pdev->dev, "failed to allocate syncpoint\n");
+
INIT_LIST_HEAD(&dc->client.list);
dc->client.ops = &dc_client_ops;
dc->client.dev = &pdev->dev;
@@ -1974,10 +2057,6 @@ static int tegra_dc_probe(struct platform_device *pdev)
return err;
}
- dc->syncpt = host1x_syncpt_request(&pdev->dev, flags);
- if (!dc->syncpt)
- dev_warn(&pdev->dev, "failed to allocate syncpoint\n");
-
platform_set_drvdata(pdev, dc);
return 0;
@@ -2016,7 +2095,6 @@ static int tegra_dc_remove(struct platform_device *pdev)
struct platform_driver tegra_dc_driver = {
.driver = {
.name = "tegra-dc",
- .owner = THIS_MODULE,
.of_match_table = tegra_dc_of_match,
},
.probe = tegra_dc_probe,
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 55792daabbb5..4a268635749b 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -86,6 +86,11 @@
#define DC_CMD_REG_ACT_CONTROL 0x043
#define DC_COM_CRC_CONTROL 0x300
+#define DC_COM_CRC_CONTROL_ALWAYS (1 << 3)
+#define DC_COM_CRC_CONTROL_FULL_FRAME (0 << 2)
+#define DC_COM_CRC_CONTROL_ACTIVE_DATA (1 << 2)
+#define DC_COM_CRC_CONTROL_WAIT (1 << 1)
+#define DC_COM_CRC_CONTROL_ENABLE (1 << 0)
#define DC_COM_CRC_CHECKSUM 0x301
#define DC_COM_PIN_OUTPUT_ENABLE(x) (0x302 + (x))
#define DC_COM_PIN_OUTPUT_POLARITY(x) (0x306 + (x))
@@ -114,15 +119,17 @@
#define DC_COM_CRC_CHECKSUM_LATCHED 0x329
#define DC_DISP_DISP_SIGNAL_OPTIONS0 0x400
-#define H_PULSE_0_ENABLE (1 << 8)
-#define H_PULSE_1_ENABLE (1 << 10)
-#define H_PULSE_2_ENABLE (1 << 12)
+#define H_PULSE0_ENABLE (1 << 8)
+#define H_PULSE1_ENABLE (1 << 10)
+#define H_PULSE2_ENABLE (1 << 12)
#define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401
#define DC_DISP_DISP_WIN_OPTIONS 0x402
#define HDMI_ENABLE (1 << 30)
#define DSI_ENABLE (1 << 29)
+#define SOR1_TIMING_CYA (1 << 27)
+#define SOR1_ENABLE (1 << 26)
#define SOR_ENABLE (1 << 25)
#define CURSOR_ENABLE (1 << 16)
@@ -242,9 +249,20 @@
#define BASE_COLOR_SIZE565 (6 << 0)
#define BASE_COLOR_SIZE332 (7 << 0)
#define BASE_COLOR_SIZE888 (8 << 0)
+#define DITHER_CONTROL_MASK (3 << 8)
#define DITHER_CONTROL_DISABLE (0 << 8)
#define DITHER_CONTROL_ORDERED (2 << 8)
#define DITHER_CONTROL_ERRDIFF (3 << 8)
+#define BASE_COLOR_SIZE_MASK (0xf << 0)
+#define BASE_COLOR_SIZE_666 (0 << 0)
+#define BASE_COLOR_SIZE_111 (1 << 0)
+#define BASE_COLOR_SIZE_222 (2 << 0)
+#define BASE_COLOR_SIZE_333 (3 << 0)
+#define BASE_COLOR_SIZE_444 (4 << 0)
+#define BASE_COLOR_SIZE_555 (5 << 0)
+#define BASE_COLOR_SIZE_565 (6 << 0)
+#define BASE_COLOR_SIZE_332 (7 << 0)
+#define BASE_COLOR_SIZE_888 (8 << 0)
#define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431
#define SC1_H_QUALIFIER_NONE (1 << 16)
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 07b26972f487..224a7dc8e4ed 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -294,26 +294,41 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
}
dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
- if (IS_ERR(dpaux->rst))
+ if (IS_ERR(dpaux->rst)) {
+ dev_err(&pdev->dev, "failed to get reset control: %ld\n",
+ PTR_ERR(dpaux->rst));
return PTR_ERR(dpaux->rst);
+ }
dpaux->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(dpaux->clk))
+ if (IS_ERR(dpaux->clk)) {
+ dev_err(&pdev->dev, "failed to get module clock: %ld\n",
+ PTR_ERR(dpaux->clk));
return PTR_ERR(dpaux->clk);
+ }
err = clk_prepare_enable(dpaux->clk);
- if (err < 0)
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable module clock: %d\n",
+ err);
return err;
+ }
reset_control_deassert(dpaux->rst);
dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent");
- if (IS_ERR(dpaux->clk_parent))
+ if (IS_ERR(dpaux->clk_parent)) {
+ dev_err(&pdev->dev, "failed to get parent clock: %ld\n",
+ PTR_ERR(dpaux->clk_parent));
return PTR_ERR(dpaux->clk_parent);
+ }
err = clk_prepare_enable(dpaux->clk_parent);
- if (err < 0)
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable parent clock: %d\n",
+ err);
return err;
+ }
err = clk_set_rate(dpaux->clk_parent, 270000000);
if (err < 0) {
@@ -323,8 +338,11 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
}
dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(dpaux->vdd))
+ if (IS_ERR(dpaux->vdd)) {
+ dev_err(&pdev->dev, "failed to get VDD supply: %ld\n",
+ PTR_ERR(dpaux->vdd));
return PTR_ERR(dpaux->vdd);
+ }
err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0,
dev_name(dpaux->dev), dpaux);
@@ -334,6 +352,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
return err;
}
+ disable_irq(dpaux->irq);
+
dpaux->aux.transfer = tegra_dpaux_transfer;
dpaux->aux.dev = &pdev->dev;
@@ -341,6 +361,24 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
if (err < 0)
return err;
+ /*
+ * Assume that by default the DPAUX/I2C pads will be used for HDMI,
+ * so power them up and configure them in I2C mode.
+ *
+ * The DPAUX code paths reconfigure the pads in AUX mode, but there
+ * is no possibility to perform the I2C mode configuration in the
+ * HDMI path.
+ */
+ value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+ value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+
+ value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_PADCTL);
+ value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
+ DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
+ DPAUX_HYBRID_PADCTL_MODE_I2C;
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
+
/* enable and clear all interrupts */
value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT |
DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT;
@@ -359,6 +397,12 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
static int tegra_dpaux_remove(struct platform_device *pdev)
{
struct tegra_dpaux *dpaux = platform_get_drvdata(pdev);
+ u32 value;
+
+ /* make sure pads are powered down when not in use */
+ value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+ value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
drm_dp_aux_unregister(&dpaux->aux);
@@ -376,6 +420,7 @@ static int tegra_dpaux_remove(struct platform_device *pdev)
}
static const struct of_device_id tegra_dpaux_of_match[] = {
+ { .compatible = "nvidia,tegra210-dpaux", },
{ .compatible = "nvidia,tegra124-dpaux", },
{ },
};
@@ -425,8 +470,10 @@ int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output)
enum drm_connector_status status;
status = tegra_dpaux_detect(dpaux);
- if (status == connector_status_connected)
+ if (status == connector_status_connected) {
+ enable_irq(dpaux->irq);
return 0;
+ }
usleep_range(1000, 2000);
}
@@ -439,6 +486,8 @@ int tegra_dpaux_detach(struct tegra_dpaux *dpaux)
unsigned long timeout;
int err;
+ disable_irq(dpaux->irq);
+
err = regulator_disable(dpaux->vdd);
if (err < 0)
return err;
diff --git a/drivers/gpu/drm/tegra/dpaux.h b/drivers/gpu/drm/tegra/dpaux.h
index 806e245ca787..20783d9f4728 100644
--- a/drivers/gpu/drm/tegra/dpaux.h
+++ b/drivers/gpu/drm/tegra/dpaux.h
@@ -57,6 +57,8 @@
#define DPAUX_DP_AUX_CONFIG 0x45
#define DPAUX_HYBRID_PADCTL 0x49
+#define DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV (1 << 15)
+#define DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV (1 << 14)
#define DPAUX_HYBRID_PADCTL_AUX_CMH(x) (((x) & 0x3) << 12)
#define DPAUX_HYBRID_PADCTL_AUX_DRVZ(x) (((x) & 0x7) << 8)
#define DPAUX_HYBRID_PADCTL_AUX_DRVI(x) (((x) & 0x3f) << 2)
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 427f50c6803c..6d88cf1fcd1c 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -171,8 +171,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
if (err < 0)
goto fbdev;
- drm_mode_config_reset(drm);
-
/*
* We don't use the drm_irq_install() helpers provided by the DRM
* core, so we need to set this manually in order to allow the
@@ -182,11 +180,14 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
/* syncpoints are used for full 32-bit hardware VBLANK counters */
drm->max_vblank_count = 0xffffffff;
+ drm->vblank_disable_allowed = true;
err = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (err < 0)
goto device;
+ drm_mode_config_reset(drm);
+
err = tegra_drm_fb_init(drm);
if (err < 0)
goto vblank;
@@ -1037,9 +1038,8 @@ static int host1x_drm_resume(struct device *dev)
}
#endif
-static const struct dev_pm_ops host1x_drm_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(host1x_drm_suspend, host1x_drm_resume)
-};
+static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
+ host1x_drm_resume);
static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra20-dc", },
@@ -1056,6 +1056,12 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra124-dc", },
{ .compatible = "nvidia,tegra124-sor", },
{ .compatible = "nvidia,tegra124-hdmi", },
+ { .compatible = "nvidia,tegra124-dsi", },
+ { .compatible = "nvidia,tegra132-dsi", },
+ { .compatible = "nvidia,tegra210-dc", },
+ { .compatible = "nvidia,tegra210-dsi", },
+ { .compatible = "nvidia,tegra210-sor", },
+ { .compatible = "nvidia,tegra210-sor1", },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 659b2fcc986d..ec49275ffb24 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -12,6 +12,7 @@
#include <uapi/drm/tegra_drm.h>
#include <linux/host1x.h>
+#include <linux/of_gpio.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -104,6 +105,13 @@ int tegra_drm_exit(struct tegra_drm *tegra);
struct tegra_dc_soc_info;
struct tegra_output;
+struct tegra_dc_stats {
+ unsigned long frames;
+ unsigned long vblank;
+ unsigned long underflow;
+ unsigned long overflow;
+};
+
struct tegra_dc {
struct host1x_client client;
struct host1x_syncpt *syncpt;
@@ -121,6 +129,7 @@ struct tegra_dc {
struct tegra_output *rgb;
+ struct tegra_dc_stats stats;
struct list_head list;
struct drm_info_list *debugfs_files;
@@ -200,6 +209,7 @@ struct tegra_output {
const struct edid *edid;
unsigned int hpd_irq;
int hpd_gpio;
+ enum of_gpio_flags hpd_gpio_flags;
struct drm_encoder encoder;
struct drm_connector connector;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index ed970f622903..f0a138ef68ce 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -119,6 +119,16 @@ static int tegra_dsi_show_regs(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct tegra_dsi *dsi = node->info_ent->data;
+ struct drm_crtc *crtc = dsi->output.encoder.crtc;
+ struct drm_device *drm = node->minor->dev;
+ int err = 0;
+
+ drm_modeset_lock_all(drm);
+
+ if (!crtc || !crtc->state->active) {
+ err = -EBUSY;
+ goto unlock;
+ }
#define DUMP_REG(name) \
seq_printf(s, "%-32s %#05x %08x\n", #name, name, \
@@ -208,7 +218,9 @@ static int tegra_dsi_show_regs(struct seq_file *s, void *data)
#undef DUMP_REG
- return 0;
+unlock:
+ drm_modeset_unlock_all(drm);
+ return err;
}
static struct drm_info_list debugfs_files[] = {
@@ -548,14 +560,19 @@ static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
/* horizontal sync width */
hsw = (mode->hsync_end - mode->hsync_start) * mul / div;
- hsw -= 10;
/* horizontal back porch */
hbp = (mode->htotal - mode->hsync_end) * mul / div;
- hbp -= 14;
+
+ if ((dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) == 0)
+ hbp += hsw;
/* horizontal front porch */
hfp = (mode->hsync_start - mode->hdisplay) * mul / div;
+
+ /* subtract packet overhead */
+ hsw -= 10;
+ hbp -= 14;
hfp -= 8;
tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1);
@@ -726,10 +743,6 @@ static void tegra_dsi_soft_reset(struct tegra_dsi *dsi)
tegra_dsi_soft_reset(dsi->slave);
}
-static void tegra_dsi_connector_dpms(struct drm_connector *connector, int mode)
-{
-}
-
static void tegra_dsi_connector_reset(struct drm_connector *connector)
{
struct tegra_dsi_state *state;
@@ -756,7 +769,7 @@ tegra_dsi_connector_duplicate_state(struct drm_connector *connector)
}
static const struct drm_connector_funcs tegra_dsi_connector_funcs = {
- .dpms = tegra_dsi_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.reset = tegra_dsi_connector_reset,
.detect = tegra_output_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -782,22 +795,48 @@ static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = {
.destroy = tegra_output_encoder_destroy,
};
-static void tegra_dsi_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
{
-}
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_dsi *dsi = to_dsi(output);
+ u32 value;
+ int err;
-static void tegra_dsi_encoder_prepare(struct drm_encoder *encoder)
-{
-}
+ if (output->panel)
+ drm_panel_disable(output->panel);
-static void tegra_dsi_encoder_commit(struct drm_encoder *encoder)
-{
+ tegra_dsi_video_disable(dsi);
+
+ /*
+ * The following accesses registers of the display controller, so make
+ * sure it's only executed when the output is attached to one.
+ */
+ if (dc) {
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~DSI_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_commit(dc);
+ }
+
+ err = tegra_dsi_wait_idle(dsi, 100);
+ if (err < 0)
+ dev_dbg(dsi->dev, "failed to idle DSI: %d\n", err);
+
+ tegra_dsi_soft_reset(dsi);
+
+ if (output->panel)
+ drm_panel_unprepare(output->panel);
+
+ tegra_dsi_disable(dsi);
+
+ return;
}
-static void tegra_dsi_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted)
+static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
{
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_dsi *dsi = to_dsi(output);
@@ -835,45 +874,6 @@ static void tegra_dsi_encoder_mode_set(struct drm_encoder *encoder,
return;
}
-static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
-{
- struct tegra_output *output = encoder_to_output(encoder);
- struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- struct tegra_dsi *dsi = to_dsi(output);
- u32 value;
- int err;
-
- if (output->panel)
- drm_panel_disable(output->panel);
-
- tegra_dsi_video_disable(dsi);
-
- /*
- * The following accesses registers of the display controller, so make
- * sure it's only executed when the output is attached to one.
- */
- if (dc) {
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value &= ~DSI_ENABLE;
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
- tegra_dc_commit(dc);
- }
-
- err = tegra_dsi_wait_idle(dsi, 100);
- if (err < 0)
- dev_dbg(dsi->dev, "failed to idle DSI: %d\n", err);
-
- tegra_dsi_soft_reset(dsi);
-
- if (output->panel)
- drm_panel_unprepare(output->panel);
-
- tegra_dsi_disable(dsi);
-
- return;
-}
-
static int
tegra_dsi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -956,11 +956,8 @@ tegra_dsi_encoder_atomic_check(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = {
- .dpms = tegra_dsi_encoder_dpms,
- .prepare = tegra_dsi_encoder_prepare,
- .commit = tegra_dsi_encoder_commit,
- .mode_set = tegra_dsi_encoder_mode_set,
.disable = tegra_dsi_encoder_disable,
+ .enable = tegra_dsi_encoder_enable,
.atomic_check = tegra_dsi_encoder_atomic_check,
};
@@ -992,6 +989,10 @@ static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
DSI_PAD_OUT_CLK(0x0);
tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
+ value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) |
+ DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
+ tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
+
return tegra_mipi_calibrate(dsi->mipi);
}
@@ -1621,6 +1622,9 @@ static int tegra_dsi_remove(struct platform_device *pdev)
}
static const struct of_device_id tegra_dsi_of_match[] = {
+ { .compatible = "nvidia,tegra210-dsi", },
+ { .compatible = "nvidia,tegra132-dsi", },
+ { .compatible = "nvidia,tegra124-dsi", },
{ .compatible = "nvidia,tegra114-dsi", },
{ },
};
diff --git a/drivers/gpu/drm/tegra/dsi.h b/drivers/gpu/drm/tegra/dsi.h
index bad1006a5150..219263615399 100644
--- a/drivers/gpu/drm/tegra/dsi.h
+++ b/drivers/gpu/drm/tegra/dsi.h
@@ -113,6 +113,10 @@
#define DSI_PAD_SLEW_DN(x) (((x) & 0x7) << 12)
#define DSI_PAD_SLEW_UP(x) (((x) & 0x7) << 16)
#define DSI_PAD_CONTROL_3 0x51
+#define DSI_PAD_PREEMP_PD_CLK(x) (((x) & 0x3) << 12)
+#define DSI_PAD_PREEMP_PU_CLK(x) (((x) & 0x3) << 8)
+#define DSI_PAD_PREEMP_PD(x) (((x) & 0x3) << 4)
+#define DSI_PAD_PREEMP_PU(x) (((x) & 0x3) << 0)
#define DSI_PAD_CONTROL_4 0x52
#define DSI_GANGED_MODE_CONTROL 0x53
#define DSI_GANGED_MODE_CONTROL_ENABLE (1 << 0)
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 397fb34d5d5b..07c844b746b4 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -184,9 +184,9 @@ unreference:
#ifdef CONFIG_DRM_TEGRA_FBDEV
static struct fb_ops tegra_fb_ops = {
.owner = THIS_MODULE,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_blank = drm_fb_helper_blank,
@@ -224,11 +224,11 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
if (IS_ERR(bo))
return PTR_ERR(bo);
- info = framebuffer_alloc(0, drm->dev);
- if (!info) {
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
dev_err(drm->dev, "failed to allocate framebuffer info\n");
drm_gem_object_unreference_unlocked(&bo->gem);
- return -ENOMEM;
+ return PTR_ERR(info);
}
fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
@@ -248,12 +248,6 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
info->flags = FBINFO_FLAG_DEFAULT;
info->fbops = &tegra_fb_ops;
- err = fb_alloc_cmap(&info->cmap, 256, 0);
- if (err < 0) {
- dev_err(drm->dev, "failed to allocate color map: %d\n", err);
- goto destroy;
- }
-
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, helper, fb->width, fb->height);
@@ -282,7 +276,7 @@ destroy:
drm_framebuffer_unregister_private(fb);
tegra_fb_destroy(fb);
release:
- framebuffer_release(info);
+ drm_fb_helper_release_fbi(helper);
return err;
}
@@ -347,20 +341,9 @@ fini:
static void tegra_fbdev_exit(struct tegra_fbdev *fbdev)
{
- struct fb_info *info = fbdev->base.fbdev;
-
- if (info) {
- int err;
- err = unregister_framebuffer(info);
- if (err < 0)
- DRM_DEBUG_KMS("failed to unregister framebuffer\n");
-
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
-
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&fbdev->base);
+ drm_fb_helper_release_fbi(&fbdev->base);
if (fbdev->fb) {
drm_framebuffer_unregister_private(&fbdev->fb->base);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 06ab1783bba1..52b32cbd9de6 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -772,13 +772,8 @@ static bool tegra_output_is_hdmi(struct tegra_output *output)
return drm_detect_hdmi_monitor(edid);
}
-static void tegra_hdmi_connector_dpms(struct drm_connector *connector,
- int mode)
-{
-}
-
static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
- .dpms = tegra_hdmi_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.detect = tegra_output_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -818,22 +813,27 @@ static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
.destroy = tegra_output_encoder_destroy,
};
-static void tegra_hdmi_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
{
-}
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ u32 value;
-static void tegra_hdmi_encoder_prepare(struct drm_encoder *encoder)
-{
-}
+ /*
+ * The following accesses registers of the display controller, so make
+ * sure it's only executed when the output is attached to one.
+ */
+ if (dc) {
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~HDMI_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-static void tegra_hdmi_encoder_commit(struct drm_encoder *encoder)
-{
+ tegra_dc_commit(dc);
+ }
}
-static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted)
+static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
{
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
@@ -872,13 +872,13 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
tegra_dc_writel(dc, VSYNC_H_POSITION(1),
DC_DISP_DISP_TIMING_OPTIONS);
- tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888,
+ tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE_888,
DC_DISP_DISP_COLOR_CONTROL);
/* video_preamble uses h_pulse2 */
pulse_start = 1 + h_sync_width + h_back_porch - 10;
- tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
+ tegra_dc_writel(dc, H_PULSE2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE |
PULSE_LAST_END_A;
@@ -1035,24 +1035,6 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
/* TODO: add HDCP support */
}
-static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
-{
- struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- u32 value;
-
- /*
- * The following accesses registers of the display controller, so make
- * sure it's only executed when the output is attached to one.
- */
- if (dc) {
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value &= ~HDMI_ENABLE;
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
- tegra_dc_commit(dc);
- }
-}
-
static int
tegra_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -1075,11 +1057,8 @@ tegra_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = {
- .dpms = tegra_hdmi_encoder_dpms,
- .prepare = tegra_hdmi_encoder_prepare,
- .commit = tegra_hdmi_encoder_commit,
- .mode_set = tegra_hdmi_encoder_mode_set,
.disable = tegra_hdmi_encoder_disable,
+ .enable = tegra_hdmi_encoder_enable,
.atomic_check = tegra_hdmi_encoder_atomic_check,
};
@@ -1087,11 +1066,16 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct tegra_hdmi *hdmi = node->info_ent->data;
- int err;
+ struct drm_crtc *crtc = hdmi->output.encoder.crtc;
+ struct drm_device *drm = node->minor->dev;
+ int err = 0;
- err = clk_prepare_enable(hdmi->clk);
- if (err)
- return err;
+ drm_modeset_lock_all(drm);
+
+ if (!crtc || !crtc->state->active) {
+ err = -EBUSY;
+ goto unlock;
+ }
#define DUMP_REG(name) \
seq_printf(s, "%-56s %#05x %08x\n", #name, name, \
@@ -1258,9 +1242,9 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
#undef DUMP_REG
- clk_disable_unprepare(hdmi->clk);
-
- return 0;
+unlock:
+ drm_modeset_unlock_all(drm);
+ return err;
}
static struct drm_info_list debugfs_files[] = {
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 37db47975d48..46664b622270 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -7,8 +7,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/of_gpio.h>
-
#include <drm/drm_atomic_helper.h>
#include <drm/drm_panel.h>
#include "drm.h"
@@ -59,10 +57,17 @@ tegra_output_connector_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status = connector_status_unknown;
if (gpio_is_valid(output->hpd_gpio)) {
- if (gpio_get_value(output->hpd_gpio) == 0)
- status = connector_status_disconnected;
- else
- status = connector_status_connected;
+ if (output->hpd_gpio_flags & OF_GPIO_ACTIVE_LOW) {
+ if (gpio_get_value(output->hpd_gpio) != 0)
+ status = connector_status_disconnected;
+ else
+ status = connector_status_connected;
+ } else {
+ if (gpio_get_value(output->hpd_gpio) == 0)
+ status = connector_status_disconnected;
+ else
+ status = connector_status_connected;
+ }
} else {
if (!output->panel)
status = connector_status_disconnected;
@@ -97,7 +102,6 @@ static irqreturn_t hpd_irq(int irq, void *data)
int tegra_output_probe(struct tegra_output *output)
{
struct device_node *ddc, *panel;
- enum of_gpio_flags flags;
int err, size;
if (!output->of_node)
@@ -128,7 +132,7 @@ int tegra_output_probe(struct tegra_output *output)
output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
"nvidia,hpd-gpio", 0,
- &flags);
+ &output->hpd_gpio_flags);
if (gpio_is_valid(output->hpd_gpio)) {
unsigned long flags;
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 7cd833f5b5b5..bc9735b4ad60 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -18,7 +18,6 @@
struct tegra_rgb {
struct tegra_output output;
struct tegra_dc *dc;
- bool enabled;
struct clk *clk_parent;
struct clk *clk;
@@ -88,13 +87,8 @@ static void tegra_dc_write_regs(struct tegra_dc *dc,
tegra_dc_writel(dc, table[i].value, table[i].offset);
}
-static void tegra_rgb_connector_dpms(struct drm_connector *connector,
- int mode)
-{
-}
-
static const struct drm_connector_funcs tegra_rgb_connector_funcs = {
- .dpms = tegra_rgb_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.detect = tegra_output_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -125,21 +119,22 @@ static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = {
.destroy = tegra_output_encoder_destroy,
};
-static void tegra_rgb_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
{
-}
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_rgb *rgb = to_rgb(output);
-static void tegra_rgb_encoder_prepare(struct drm_encoder *encoder)
-{
-}
+ if (output->panel)
+ drm_panel_disable(output->panel);
-static void tegra_rgb_encoder_commit(struct drm_encoder *encoder)
-{
+ tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+ tegra_dc_commit(rgb->dc);
+
+ if (output->panel)
+ drm_panel_unprepare(output->panel);
}
-static void tegra_rgb_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted)
+static void tegra_rgb_encoder_enable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_rgb *rgb = to_rgb(output);
@@ -174,21 +169,6 @@ static void tegra_rgb_encoder_mode_set(struct drm_encoder *encoder,
drm_panel_enable(output->panel);
}
-static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
-{
- struct tegra_output *output = encoder_to_output(encoder);
- struct tegra_rgb *rgb = to_rgb(output);
-
- if (output->panel)
- drm_panel_disable(output->panel);
-
- tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
- tegra_dc_commit(rgb->dc);
-
- if (output->panel)
- drm_panel_unprepare(output->panel);
-}
-
static int
tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -231,11 +211,8 @@ tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs tegra_rgb_encoder_helper_funcs = {
- .dpms = tegra_rgb_encoder_dpms,
- .prepare = tegra_rgb_encoder_prepare,
- .commit = tegra_rgb_encoder_commit,
- .mode_set = tegra_rgb_encoder_mode_set,
.disable = tegra_rgb_encoder_disable,
+ .enable = tegra_rgb_encoder_enable,
.atomic_check = tegra_rgb_encoder_atomic_check,
};
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 7591d8901f9a..da1715ebdd71 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -10,7 +10,9 @@
#include <linux/debugfs.h>
#include <linux/gpio.h>
#include <linux/io.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <soc/tegra/pmc.h>
@@ -23,11 +25,146 @@
#include "drm.h"
#include "sor.h"
+#define SOR_REKEY 0x38
+
+struct tegra_sor_hdmi_settings {
+ unsigned long frequency;
+
+ u8 vcocap;
+ u8 ichpmp;
+ u8 loadadj;
+ u8 termadj;
+ u8 tx_pu;
+ u8 bg_vref;
+
+ u8 drive_current[4];
+ u8 preemphasis[4];
+};
+
+#if 1
+static const struct tegra_sor_hdmi_settings tegra210_sor_hdmi_defaults[] = {
+ {
+ .frequency = 54000000,
+ .vcocap = 0x0,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .termadj = 0x9,
+ .tx_pu = 0x10,
+ .bg_vref = 0x8,
+ .drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 75000000,
+ .vcocap = 0x3,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .termadj = 0x9,
+ .tx_pu = 0x40,
+ .bg_vref = 0x8,
+ .drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 150000000,
+ .vcocap = 0x3,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .termadj = 0x9,
+ .tx_pu = 0x66,
+ .bg_vref = 0x8,
+ .drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 300000000,
+ .vcocap = 0x3,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .termadj = 0x9,
+ .tx_pu = 0x66,
+ .bg_vref = 0xa,
+ .drive_current = { 0x33, 0x3f, 0x3f, 0x3f },
+ .preemphasis = { 0x00, 0x17, 0x17, 0x17 },
+ }, {
+ .frequency = 600000000,
+ .vcocap = 0x3,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .termadj = 0x9,
+ .tx_pu = 0x66,
+ .bg_vref = 0x8,
+ .drive_current = { 0x33, 0x3f, 0x3f, 0x3f },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ },
+};
+#else
+static const struct tegra_sor_hdmi_settings tegra210_sor_hdmi_defaults[] = {
+ {
+ .frequency = 75000000,
+ .vcocap = 0x3,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .termadj = 0x9,
+ .tx_pu = 0x40,
+ .bg_vref = 0x8,
+ .drive_current = { 0x29, 0x29, 0x29, 0x29 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 150000000,
+ .vcocap = 0x3,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .termadj = 0x9,
+ .tx_pu = 0x66,
+ .bg_vref = 0x8,
+ .drive_current = { 0x30, 0x37, 0x37, 0x37 },
+ .preemphasis = { 0x01, 0x02, 0x02, 0x02 },
+ }, {
+ .frequency = 300000000,
+ .vcocap = 0x3,
+ .ichpmp = 0x6,
+ .loadadj = 0x3,
+ .termadj = 0x9,
+ .tx_pu = 0x66,
+ .bg_vref = 0xf,
+ .drive_current = { 0x30, 0x37, 0x37, 0x37 },
+ .preemphasis = { 0x10, 0x3e, 0x3e, 0x3e },
+ }, {
+ .frequency = 600000000,
+ .vcocap = 0x3,
+ .ichpmp = 0xa,
+ .loadadj = 0x3,
+ .termadj = 0xb,
+ .tx_pu = 0x66,
+ .bg_vref = 0xe,
+ .drive_current = { 0x35, 0x3e, 0x3e, 0x3e },
+ .preemphasis = { 0x02, 0x3f, 0x3f, 0x3f },
+ },
+};
+#endif
+
+struct tegra_sor_soc {
+ bool supports_edp;
+ bool supports_lvds;
+ bool supports_hdmi;
+ bool supports_dp;
+
+ const struct tegra_sor_hdmi_settings *settings;
+ unsigned int num_settings;
+};
+
+struct tegra_sor;
+
+struct tegra_sor_ops {
+ const char *name;
+ int (*probe)(struct tegra_sor *sor);
+ int (*remove)(struct tegra_sor *sor);
+};
+
struct tegra_sor {
struct host1x_client client;
struct tegra_output output;
struct device *dev;
+ const struct tegra_sor_soc *soc;
void __iomem *regs;
struct reset_control *rst;
@@ -38,12 +175,19 @@ struct tegra_sor {
struct tegra_dpaux *dpaux;
- struct mutex lock;
- bool enabled;
-
struct drm_info_list *debugfs_files;
struct drm_minor *minor;
struct dentry *debugfs;
+
+ const struct tegra_sor_ops *ops;
+
+ /* for HDMI 2.0 */
+ struct tegra_sor_hdmi_settings *settings;
+ unsigned int num_settings;
+
+ struct regulator *avdd_io_supply;
+ struct regulator *vdd_pll_supply;
+ struct regulator *hdmi_supply;
};
struct tegra_sor_config {
@@ -94,40 +238,40 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
SOR_LANE_DRIVE_CURRENT_LANE2(0x40) |
SOR_LANE_DRIVE_CURRENT_LANE1(0x40) |
SOR_LANE_DRIVE_CURRENT_LANE0(0x40);
- tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT_0);
+ tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
value = SOR_LANE_PREEMPHASIS_LANE3(0x0f) |
SOR_LANE_PREEMPHASIS_LANE2(0x0f) |
SOR_LANE_PREEMPHASIS_LANE1(0x0f) |
SOR_LANE_PREEMPHASIS_LANE0(0x0f);
- tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS_0);
+ tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
- value = SOR_LANE_POST_CURSOR_LANE3(0x00) |
- SOR_LANE_POST_CURSOR_LANE2(0x00) |
- SOR_LANE_POST_CURSOR_LANE1(0x00) |
- SOR_LANE_POST_CURSOR_LANE0(0x00);
- tegra_sor_writel(sor, value, SOR_LANE_POST_CURSOR_0);
+ value = SOR_LANE_POSTCURSOR_LANE3(0x00) |
+ SOR_LANE_POSTCURSOR_LANE2(0x00) |
+ SOR_LANE_POSTCURSOR_LANE1(0x00) |
+ SOR_LANE_POSTCURSOR_LANE0(0x00);
+ tegra_sor_writel(sor, value, SOR_LANE_POSTCURSOR0);
/* disable LVDS mode */
tegra_sor_writel(sor, 0, SOR_LVDS);
- value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value |= SOR_DP_PADCTL_TX_PU_ENABLE;
value &= ~SOR_DP_PADCTL_TX_PU_MASK;
value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */
- tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
- value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0;
- tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
usleep_range(10, 100);
- value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0);
- tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
err = tegra_dpaux_prepare(sor->dpaux, DP_SET_ANSI_8B10B);
if (err < 0)
@@ -148,11 +292,11 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
if (err < 0)
return err;
- value = tegra_sor_readl(sor, SOR_DP_SPARE_0);
+ value = tegra_sor_readl(sor, SOR_DP_SPARE0);
value |= SOR_DP_SPARE_SEQ_ENABLE;
value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
value |= SOR_DP_SPARE_MACRO_SOR_CLK;
- tegra_sor_writel(sor, value, SOR_DP_SPARE_0);
+ tegra_sor_writel(sor, value, SOR_DP_SPARE0);
for (i = 0, value = 0; i < link->num_lanes; i++) {
unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
@@ -187,18 +331,59 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
return 0;
}
+static void tegra_sor_dp_term_calibrate(struct tegra_sor *sor)
+{
+ u32 mask = 0x08, adj = 0, value;
+
+ /* enable pad calibration logic */
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+ value = tegra_sor_readl(sor, SOR_PLL1);
+ value |= SOR_PLL1_TMDS_TERM;
+ tegra_sor_writel(sor, value, SOR_PLL1);
+
+ while (mask) {
+ adj |= mask;
+
+ value = tegra_sor_readl(sor, SOR_PLL1);
+ value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+ value |= SOR_PLL1_TMDS_TERMADJ(adj);
+ tegra_sor_writel(sor, value, SOR_PLL1);
+
+ usleep_range(100, 200);
+
+ value = tegra_sor_readl(sor, SOR_PLL1);
+ if (value & SOR_PLL1_TERM_COMPOUT)
+ adj &= ~mask;
+
+ mask >>= 1;
+ }
+
+ value = tegra_sor_readl(sor, SOR_PLL1);
+ value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+ value |= SOR_PLL1_TMDS_TERMADJ(adj);
+ tegra_sor_writel(sor, value, SOR_PLL1);
+
+ /* disable pad calibration logic */
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value |= SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+}
+
static void tegra_sor_super_update(struct tegra_sor *sor)
{
- tegra_sor_writel(sor, 0, SOR_SUPER_STATE_0);
- tegra_sor_writel(sor, 1, SOR_SUPER_STATE_0);
- tegra_sor_writel(sor, 0, SOR_SUPER_STATE_0);
+ tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
+ tegra_sor_writel(sor, 1, SOR_SUPER_STATE0);
+ tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
}
static void tegra_sor_update(struct tegra_sor *sor)
{
- tegra_sor_writel(sor, 0, SOR_STATE_0);
- tegra_sor_writel(sor, 1, SOR_STATE_0);
- tegra_sor_writel(sor, 0, SOR_STATE_0);
+ tegra_sor_writel(sor, 0, SOR_STATE0);
+ tegra_sor_writel(sor, 1, SOR_STATE0);
+ tegra_sor_writel(sor, 0, SOR_STATE0);
}
static int tegra_sor_setup_pwm(struct tegra_sor *sor, unsigned long timeout)
@@ -235,16 +420,16 @@ static int tegra_sor_attach(struct tegra_sor *sor)
unsigned long value, timeout;
/* wake up in normal mode */
- value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
value |= SOR_SUPER_STATE_HEAD_MODE_AWAKE;
value |= SOR_SUPER_STATE_MODE_NORMAL;
- tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
tegra_sor_super_update(sor);
/* attach */
- value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
value |= SOR_SUPER_STATE_ATTACHED;
- tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
tegra_sor_super_update(sor);
timeout = jiffies + msecs_to_jiffies(250);
@@ -385,7 +570,7 @@ static int tegra_sor_compute_params(struct tegra_sor *sor,
}
static int tegra_sor_calc_config(struct tegra_sor *sor,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct tegra_sor_config *config,
struct drm_dp_link *link)
{
@@ -481,9 +666,9 @@ static int tegra_sor_detach(struct tegra_sor *sor)
unsigned long value, timeout;
/* switch to safe mode */
- value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
value &= ~SOR_SUPER_STATE_MODE_NORMAL;
- tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
tegra_sor_super_update(sor);
timeout = jiffies + msecs_to_jiffies(250);
@@ -498,15 +683,15 @@ static int tegra_sor_detach(struct tegra_sor *sor)
return -ETIMEDOUT;
/* go to sleep */
- value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
value &= ~SOR_SUPER_STATE_HEAD_MODE_MASK;
- tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
tegra_sor_super_update(sor);
/* detach */
- value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
value &= ~SOR_SUPER_STATE_ATTACHED;
- tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
tegra_sor_super_update(sor);
timeout = jiffies + msecs_to_jiffies(250);
@@ -552,10 +737,10 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
if (err < 0)
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
- value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
- tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
/* stop lane sequencer */
value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
@@ -575,39 +760,26 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
return -ETIMEDOUT;
- value = tegra_sor_readl(sor, SOR_PLL_2);
- value |= SOR_PLL_2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL_2);
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value |= SOR_PLL2_PORT_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL2);
usleep_range(20, 100);
- value = tegra_sor_readl(sor, SOR_PLL_0);
- value |= SOR_PLL_0_POWER_OFF;
- value |= SOR_PLL_0_VCOPD;
- tegra_sor_writel(sor, value, SOR_PLL_0);
+ value = tegra_sor_readl(sor, SOR_PLL0);
+ value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
+ tegra_sor_writel(sor, value, SOR_PLL0);
- value = tegra_sor_readl(sor, SOR_PLL_2);
- value |= SOR_PLL_2_SEQ_PLLCAPPD;
- value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
- tegra_sor_writel(sor, value, SOR_PLL_2);
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value |= SOR_PLL2_SEQ_PLLCAPPD;
+ value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+ tegra_sor_writel(sor, value, SOR_PLL2);
usleep_range(20, 100);
return 0;
}
-static int tegra_sor_crc_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
-
- return 0;
-}
-
-static int tegra_sor_crc_release(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout)
{
u32 value;
@@ -615,8 +787,8 @@ static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout)
timeout = jiffies + msecs_to_jiffies(timeout);
while (time_before(jiffies, timeout)) {
- value = tegra_sor_readl(sor, SOR_CRC_A);
- if (value & SOR_CRC_A_VALID)
+ value = tegra_sor_readl(sor, SOR_CRCA);
+ if (value & SOR_CRCA_VALID)
return 0;
usleep_range(100, 200);
@@ -625,24 +797,25 @@ static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout)
return -ETIMEDOUT;
}
-static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer,
- size_t size, loff_t *ppos)
+static int tegra_sor_show_crc(struct seq_file *s, void *data)
{
- struct tegra_sor *sor = file->private_data;
- ssize_t num, err;
- char buf[10];
+ struct drm_info_node *node = s->private;
+ struct tegra_sor *sor = node->info_ent->data;
+ struct drm_crtc *crtc = sor->output.encoder.crtc;
+ struct drm_device *drm = node->minor->dev;
+ int err = 0;
u32 value;
- mutex_lock(&sor->lock);
+ drm_modeset_lock_all(drm);
- if (!sor->enabled) {
- err = -EAGAIN;
+ if (!crtc || !crtc->state->active) {
+ err = -EBUSY;
goto unlock;
}
- value = tegra_sor_readl(sor, SOR_STATE_1);
+ value = tegra_sor_readl(sor, SOR_STATE1);
value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
- tegra_sor_writel(sor, value, SOR_STATE_1);
+ tegra_sor_writel(sor, value, SOR_STATE1);
value = tegra_sor_readl(sor, SOR_CRC_CNTRL);
value |= SOR_CRC_CNTRL_ENABLE;
@@ -656,65 +829,66 @@ static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer,
if (err < 0)
goto unlock;
- tegra_sor_writel(sor, SOR_CRC_A_RESET, SOR_CRC_A);
- value = tegra_sor_readl(sor, SOR_CRC_B);
+ tegra_sor_writel(sor, SOR_CRCA_RESET, SOR_CRCA);
+ value = tegra_sor_readl(sor, SOR_CRCB);
- num = scnprintf(buf, sizeof(buf), "%08x\n", value);
-
- err = simple_read_from_buffer(buffer, size, ppos, buf, num);
+ seq_printf(s, "%08x\n", value);
unlock:
- mutex_unlock(&sor->lock);
+ drm_modeset_unlock_all(drm);
return err;
}
-static const struct file_operations tegra_sor_crc_fops = {
- .owner = THIS_MODULE,
- .open = tegra_sor_crc_open,
- .read = tegra_sor_crc_read,
- .release = tegra_sor_crc_release,
-};
-
static int tegra_sor_show_regs(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct tegra_sor *sor = node->info_ent->data;
+ struct drm_crtc *crtc = sor->output.encoder.crtc;
+ struct drm_device *drm = node->minor->dev;
+ int err = 0;
+
+ drm_modeset_lock_all(drm);
+
+ if (!crtc || !crtc->state->active) {
+ err = -EBUSY;
+ goto unlock;
+ }
#define DUMP_REG(name) \
seq_printf(s, "%-38s %#05x %08x\n", #name, name, \
tegra_sor_readl(sor, name))
DUMP_REG(SOR_CTXSW);
- DUMP_REG(SOR_SUPER_STATE_0);
- DUMP_REG(SOR_SUPER_STATE_1);
- DUMP_REG(SOR_STATE_0);
- DUMP_REG(SOR_STATE_1);
- DUMP_REG(SOR_HEAD_STATE_0(0));
- DUMP_REG(SOR_HEAD_STATE_0(1));
- DUMP_REG(SOR_HEAD_STATE_1(0));
- DUMP_REG(SOR_HEAD_STATE_1(1));
- DUMP_REG(SOR_HEAD_STATE_2(0));
- DUMP_REG(SOR_HEAD_STATE_2(1));
- DUMP_REG(SOR_HEAD_STATE_3(0));
- DUMP_REG(SOR_HEAD_STATE_3(1));
- DUMP_REG(SOR_HEAD_STATE_4(0));
- DUMP_REG(SOR_HEAD_STATE_4(1));
- DUMP_REG(SOR_HEAD_STATE_5(0));
- DUMP_REG(SOR_HEAD_STATE_5(1));
+ DUMP_REG(SOR_SUPER_STATE0);
+ DUMP_REG(SOR_SUPER_STATE1);
+ DUMP_REG(SOR_STATE0);
+ DUMP_REG(SOR_STATE1);
+ DUMP_REG(SOR_HEAD_STATE0(0));
+ DUMP_REG(SOR_HEAD_STATE0(1));
+ DUMP_REG(SOR_HEAD_STATE1(0));
+ DUMP_REG(SOR_HEAD_STATE1(1));
+ DUMP_REG(SOR_HEAD_STATE2(0));
+ DUMP_REG(SOR_HEAD_STATE2(1));
+ DUMP_REG(SOR_HEAD_STATE3(0));
+ DUMP_REG(SOR_HEAD_STATE3(1));
+ DUMP_REG(SOR_HEAD_STATE4(0));
+ DUMP_REG(SOR_HEAD_STATE4(1));
+ DUMP_REG(SOR_HEAD_STATE5(0));
+ DUMP_REG(SOR_HEAD_STATE5(1));
DUMP_REG(SOR_CRC_CNTRL);
DUMP_REG(SOR_DP_DEBUG_MVID);
DUMP_REG(SOR_CLK_CNTRL);
DUMP_REG(SOR_CAP);
DUMP_REG(SOR_PWR);
DUMP_REG(SOR_TEST);
- DUMP_REG(SOR_PLL_0);
- DUMP_REG(SOR_PLL_1);
- DUMP_REG(SOR_PLL_2);
- DUMP_REG(SOR_PLL_3);
+ DUMP_REG(SOR_PLL0);
+ DUMP_REG(SOR_PLL1);
+ DUMP_REG(SOR_PLL2);
+ DUMP_REG(SOR_PLL3);
DUMP_REG(SOR_CSTM);
DUMP_REG(SOR_LVDS);
- DUMP_REG(SOR_CRC_A);
- DUMP_REG(SOR_CRC_B);
+ DUMP_REG(SOR_CRCA);
+ DUMP_REG(SOR_CRCB);
DUMP_REG(SOR_BLANK);
DUMP_REG(SOR_SEQ_CTL);
DUMP_REG(SOR_LANE_SEQ_CTL);
@@ -736,86 +910,89 @@ static int tegra_sor_show_regs(struct seq_file *s, void *data)
DUMP_REG(SOR_SEQ_INST(15));
DUMP_REG(SOR_PWM_DIV);
DUMP_REG(SOR_PWM_CTL);
- DUMP_REG(SOR_VCRC_A_0);
- DUMP_REG(SOR_VCRC_A_1);
- DUMP_REG(SOR_VCRC_B_0);
- DUMP_REG(SOR_VCRC_B_1);
- DUMP_REG(SOR_CCRC_A_0);
- DUMP_REG(SOR_CCRC_A_1);
- DUMP_REG(SOR_CCRC_B_0);
- DUMP_REG(SOR_CCRC_B_1);
- DUMP_REG(SOR_EDATA_A_0);
- DUMP_REG(SOR_EDATA_A_1);
- DUMP_REG(SOR_EDATA_B_0);
- DUMP_REG(SOR_EDATA_B_1);
- DUMP_REG(SOR_COUNT_A_0);
- DUMP_REG(SOR_COUNT_A_1);
- DUMP_REG(SOR_COUNT_B_0);
- DUMP_REG(SOR_COUNT_B_1);
- DUMP_REG(SOR_DEBUG_A_0);
- DUMP_REG(SOR_DEBUG_A_1);
- DUMP_REG(SOR_DEBUG_B_0);
- DUMP_REG(SOR_DEBUG_B_1);
+ DUMP_REG(SOR_VCRC_A0);
+ DUMP_REG(SOR_VCRC_A1);
+ DUMP_REG(SOR_VCRC_B0);
+ DUMP_REG(SOR_VCRC_B1);
+ DUMP_REG(SOR_CCRC_A0);
+ DUMP_REG(SOR_CCRC_A1);
+ DUMP_REG(SOR_CCRC_B0);
+ DUMP_REG(SOR_CCRC_B1);
+ DUMP_REG(SOR_EDATA_A0);
+ DUMP_REG(SOR_EDATA_A1);
+ DUMP_REG(SOR_EDATA_B0);
+ DUMP_REG(SOR_EDATA_B1);
+ DUMP_REG(SOR_COUNT_A0);
+ DUMP_REG(SOR_COUNT_A1);
+ DUMP_REG(SOR_COUNT_B0);
+ DUMP_REG(SOR_COUNT_B1);
+ DUMP_REG(SOR_DEBUG_A0);
+ DUMP_REG(SOR_DEBUG_A1);
+ DUMP_REG(SOR_DEBUG_B0);
+ DUMP_REG(SOR_DEBUG_B1);
DUMP_REG(SOR_TRIG);
DUMP_REG(SOR_MSCHECK);
DUMP_REG(SOR_XBAR_CTRL);
DUMP_REG(SOR_XBAR_POL);
- DUMP_REG(SOR_DP_LINKCTL_0);
- DUMP_REG(SOR_DP_LINKCTL_1);
- DUMP_REG(SOR_LANE_DRIVE_CURRENT_0);
- DUMP_REG(SOR_LANE_DRIVE_CURRENT_1);
- DUMP_REG(SOR_LANE4_DRIVE_CURRENT_0);
- DUMP_REG(SOR_LANE4_DRIVE_CURRENT_1);
- DUMP_REG(SOR_LANE_PREEMPHASIS_0);
- DUMP_REG(SOR_LANE_PREEMPHASIS_1);
- DUMP_REG(SOR_LANE4_PREEMPHASIS_0);
- DUMP_REG(SOR_LANE4_PREEMPHASIS_1);
- DUMP_REG(SOR_LANE_POST_CURSOR_0);
- DUMP_REG(SOR_LANE_POST_CURSOR_1);
- DUMP_REG(SOR_DP_CONFIG_0);
- DUMP_REG(SOR_DP_CONFIG_1);
- DUMP_REG(SOR_DP_MN_0);
- DUMP_REG(SOR_DP_MN_1);
- DUMP_REG(SOR_DP_PADCTL_0);
- DUMP_REG(SOR_DP_PADCTL_1);
- DUMP_REG(SOR_DP_DEBUG_0);
- DUMP_REG(SOR_DP_DEBUG_1);
- DUMP_REG(SOR_DP_SPARE_0);
- DUMP_REG(SOR_DP_SPARE_1);
+ DUMP_REG(SOR_DP_LINKCTL0);
+ DUMP_REG(SOR_DP_LINKCTL1);
+ DUMP_REG(SOR_LANE_DRIVE_CURRENT0);
+ DUMP_REG(SOR_LANE_DRIVE_CURRENT1);
+ DUMP_REG(SOR_LANE4_DRIVE_CURRENT0);
+ DUMP_REG(SOR_LANE4_DRIVE_CURRENT1);
+ DUMP_REG(SOR_LANE_PREEMPHASIS0);
+ DUMP_REG(SOR_LANE_PREEMPHASIS1);
+ DUMP_REG(SOR_LANE4_PREEMPHASIS0);
+ DUMP_REG(SOR_LANE4_PREEMPHASIS1);
+ DUMP_REG(SOR_LANE_POSTCURSOR0);
+ DUMP_REG(SOR_LANE_POSTCURSOR1);
+ DUMP_REG(SOR_DP_CONFIG0);
+ DUMP_REG(SOR_DP_CONFIG1);
+ DUMP_REG(SOR_DP_MN0);
+ DUMP_REG(SOR_DP_MN1);
+ DUMP_REG(SOR_DP_PADCTL0);
+ DUMP_REG(SOR_DP_PADCTL1);
+ DUMP_REG(SOR_DP_DEBUG0);
+ DUMP_REG(SOR_DP_DEBUG1);
+ DUMP_REG(SOR_DP_SPARE0);
+ DUMP_REG(SOR_DP_SPARE1);
DUMP_REG(SOR_DP_AUDIO_CTRL);
DUMP_REG(SOR_DP_AUDIO_HBLANK_SYMBOLS);
DUMP_REG(SOR_DP_AUDIO_VBLANK_SYMBOLS);
DUMP_REG(SOR_DP_GENERIC_INFOFRAME_HEADER);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_0);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_1);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_2);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_3);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_4);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_5);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_6);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK0);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK1);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK2);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK3);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK4);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK5);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK6);
DUMP_REG(SOR_DP_TPG);
DUMP_REG(SOR_DP_TPG_CONFIG);
- DUMP_REG(SOR_DP_LQ_CSTM_0);
- DUMP_REG(SOR_DP_LQ_CSTM_1);
- DUMP_REG(SOR_DP_LQ_CSTM_2);
+ DUMP_REG(SOR_DP_LQ_CSTM0);
+ DUMP_REG(SOR_DP_LQ_CSTM1);
+ DUMP_REG(SOR_DP_LQ_CSTM2);
#undef DUMP_REG
- return 0;
+unlock:
+ drm_modeset_unlock_all(drm);
+ return err;
}
static const struct drm_info_list debugfs_files[] = {
+ { "crc", tegra_sor_show_crc, 0, NULL },
{ "regs", tegra_sor_show_regs, 0, NULL },
};
static int tegra_sor_debugfs_init(struct tegra_sor *sor,
struct drm_minor *minor)
{
- struct dentry *entry;
+ const char *name = sor->soc->supports_dp ? "sor1" : "sor";
unsigned int i;
- int err = 0;
+ int err;
- sor->debugfs = debugfs_create_dir("sor", minor->debugfs_root);
+ sor->debugfs = debugfs_create_dir(name, minor->debugfs_root);
if (!sor->debugfs)
return -ENOMEM;
@@ -835,14 +1012,9 @@ static int tegra_sor_debugfs_init(struct tegra_sor *sor,
if (err < 0)
goto free;
- entry = debugfs_create_file("crc", 0644, sor->debugfs, sor,
- &tegra_sor_crc_fops);
- if (!entry) {
- err = -ENOMEM;
- goto free;
- }
+ sor->minor = minor;
- return err;
+ return 0;
free:
kfree(sor->debugfs_files);
@@ -860,14 +1032,10 @@ static void tegra_sor_debugfs_exit(struct tegra_sor *sor)
sor->minor = NULL;
kfree(sor->debugfs_files);
- sor->debugfs = NULL;
-
- debugfs_remove_recursive(sor->debugfs);
sor->debugfs_files = NULL;
-}
-static void tegra_sor_connector_dpms(struct drm_connector *connector, int mode)
-{
+ debugfs_remove_recursive(sor->debugfs);
+ sor->debugfs = NULL;
}
static enum drm_connector_status
@@ -879,11 +1047,11 @@ tegra_sor_connector_detect(struct drm_connector *connector, bool force)
if (sor->dpaux)
return tegra_dpaux_detect(sor->dpaux);
- return connector_status_unknown;
+ return tegra_output_connector_detect(connector, force);
}
static const struct drm_connector_funcs tegra_sor_connector_funcs = {
- .dpms = tegra_sor_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.detect = tegra_sor_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -926,22 +1094,102 @@ static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
.destroy = tegra_output_encoder_destroy,
};
-static void tegra_sor_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tegra_sor_edp_disable(struct drm_encoder *encoder)
{
-}
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_sor *sor = to_sor(output);
+ u32 value;
+ int err;
-static void tegra_sor_encoder_prepare(struct drm_encoder *encoder)
-{
+ if (output->panel)
+ drm_panel_disable(output->panel);
+
+ err = tegra_sor_detach(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to detach SOR: %d\n", err);
+
+ tegra_sor_writel(sor, 0, SOR_STATE1);
+ tegra_sor_update(sor);
+
+ /*
+ * The following accesses registers of the display controller, so make
+ * sure it's only executed when the output is attached to one.
+ */
+ if (dc) {
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~SOR_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_commit(dc);
+ }
+
+ err = tegra_sor_power_down(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power down SOR: %d\n", err);
+
+ if (sor->dpaux) {
+ err = tegra_dpaux_disable(sor->dpaux);
+ if (err < 0)
+ dev_err(sor->dev, "failed to disable DP: %d\n", err);
+ }
+
+ err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power off I/O rail: %d\n", err);
+
+ if (output->panel)
+ drm_panel_unprepare(output->panel);
+
+ reset_control_assert(sor->rst);
+ clk_disable_unprepare(sor->clk);
}
-static void tegra_sor_encoder_commit(struct drm_encoder *encoder)
+#if 0
+static int calc_h_ref_to_sync(const struct drm_display_mode *mode,
+ unsigned int *value)
{
+ unsigned int hfp, hsw, hbp, a = 0, b;
+
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsw = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ pr_info("hfp: %u, hsw: %u, hbp: %u\n", hfp, hsw, hbp);
+
+ b = hfp - 1;
+
+ pr_info("a: %u, b: %u\n", a, b);
+ pr_info("a + hsw + hbp = %u\n", a + hsw + hbp);
+
+ if (a + hsw + hbp <= 11) {
+ a = 1 + 11 - hsw - hbp;
+ pr_info("a: %u\n", a);
+ }
+
+ if (a > b)
+ return -EINVAL;
+
+ if (hsw < 1)
+ return -EINVAL;
+
+ if (mode->hdisplay < 16)
+ return -EINVAL;
+
+ if (value) {
+ if (b > a && a % 2)
+ *value = a + 1;
+ else
+ *value = a;
+ }
+
+ return 0;
}
+#endif
-static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted)
+static void tegra_sor_edp_enable(struct drm_encoder *encoder)
{
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
unsigned int vbe, vse, hbe, hse, vbs, hbs, i;
@@ -952,14 +1200,9 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
int err = 0;
u32 value;
- mutex_lock(&sor->lock);
-
- if (sor->enabled)
- goto unlock;
-
err = clk_prepare_enable(sor->clk);
if (err < 0)
- goto unlock;
+ dev_err(sor->dev, "failed to enable clock: %d\n", err);
reset_control_deassert(sor->rst);
@@ -978,7 +1221,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
if (err < 0) {
dev_err(sor->dev, "failed to probe eDP link: %d\n",
err);
- goto unlock;
+ return;
}
}
@@ -999,40 +1242,40 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
- value = tegra_sor_readl(sor, SOR_PLL_2);
- value &= ~SOR_PLL_2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL_2);
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL2);
usleep_range(20, 100);
- value = tegra_sor_readl(sor, SOR_PLL_3);
- value |= SOR_PLL_3_PLL_VDD_MODE_V3_3;
- tegra_sor_writel(sor, value, SOR_PLL_3);
+ value = tegra_sor_readl(sor, SOR_PLL3);
+ value |= SOR_PLL3_PLL_VDD_MODE_3V3;
+ tegra_sor_writel(sor, value, SOR_PLL3);
- value = SOR_PLL_0_ICHPMP(0xf) | SOR_PLL_0_VCOCAP_RST |
- SOR_PLL_0_PLLREG_LEVEL_V45 | SOR_PLL_0_RESISTOR_EXT;
- tegra_sor_writel(sor, value, SOR_PLL_0);
+ value = SOR_PLL0_ICHPMP(0xf) | SOR_PLL0_VCOCAP_RST |
+ SOR_PLL0_PLLREG_LEVEL_V45 | SOR_PLL0_RESISTOR_EXT;
+ tegra_sor_writel(sor, value, SOR_PLL0);
- value = tegra_sor_readl(sor, SOR_PLL_2);
- value |= SOR_PLL_2_SEQ_PLLCAPPD;
- value &= ~SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
- value |= SOR_PLL_2_LVDS_ENABLE;
- tegra_sor_writel(sor, value, SOR_PLL_2);
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value |= SOR_PLL2_SEQ_PLLCAPPD;
+ value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+ value |= SOR_PLL2_LVDS_ENABLE;
+ tegra_sor_writel(sor, value, SOR_PLL2);
- value = SOR_PLL_1_TERM_COMPOUT | SOR_PLL_1_TMDS_TERM;
- tegra_sor_writel(sor, value, SOR_PLL_1);
+ value = SOR_PLL1_TERM_COMPOUT | SOR_PLL1_TMDS_TERM;
+ tegra_sor_writel(sor, value, SOR_PLL1);
while (true) {
- value = tegra_sor_readl(sor, SOR_PLL_2);
- if ((value & SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE) == 0)
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ if ((value & SOR_PLL2_SEQ_PLLCAPPD_ENFORCE) == 0)
break;
usleep_range(250, 1000);
}
- value = tegra_sor_readl(sor, SOR_PLL_2);
- value &= ~SOR_PLL_2_POWERDOWN_OVERRIDE;
- value &= ~SOR_PLL_2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL_2);
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
+ value &= ~SOR_PLL2_PORT_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL2);
/*
* power up
@@ -1045,51 +1288,49 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
/* step 1 */
- value = tegra_sor_readl(sor, SOR_PLL_2);
- value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL_2_PORT_POWERDOWN |
- SOR_PLL_2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL_2);
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL2_PORT_POWERDOWN |
+ SOR_PLL2_BANDGAP_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL2);
- value = tegra_sor_readl(sor, SOR_PLL_0);
- value |= SOR_PLL_0_VCOPD | SOR_PLL_0_POWER_OFF;
- tegra_sor_writel(sor, value, SOR_PLL_0);
+ value = tegra_sor_readl(sor, SOR_PLL0);
+ value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
+ tegra_sor_writel(sor, value, SOR_PLL0);
- value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
- tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
/* step 2 */
err = tegra_io_rail_power_on(TEGRA_IO_RAIL_LVDS);
- if (err < 0) {
+ if (err < 0)
dev_err(sor->dev, "failed to power on I/O rail: %d\n", err);
- goto unlock;
- }
usleep_range(5, 100);
/* step 3 */
- value = tegra_sor_readl(sor, SOR_PLL_2);
- value &= ~SOR_PLL_2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL_2);
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL2);
usleep_range(20, 100);
/* step 4 */
- value = tegra_sor_readl(sor, SOR_PLL_0);
- value &= ~SOR_PLL_0_POWER_OFF;
- value &= ~SOR_PLL_0_VCOPD;
- tegra_sor_writel(sor, value, SOR_PLL_0);
+ value = tegra_sor_readl(sor, SOR_PLL0);
+ value &= ~SOR_PLL0_VCOPD;
+ value &= ~SOR_PLL0_PWR;
+ tegra_sor_writel(sor, value, SOR_PLL0);
- value = tegra_sor_readl(sor, SOR_PLL_2);
- value &= ~SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
- tegra_sor_writel(sor, value, SOR_PLL_2);
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+ tegra_sor_writel(sor, value, SOR_PLL2);
usleep_range(200, 1000);
/* step 5 */
- value = tegra_sor_readl(sor, SOR_PLL_2);
- value &= ~SOR_PLL_2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL_2);
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value &= ~SOR_PLL2_PORT_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL2);
/* switch to DP clock */
err = clk_set_parent(sor->clk, sor->clk_dp);
@@ -1097,7 +1338,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
dev_err(sor->dev, "failed to set DP parent clock: %d\n", err);
/* power DP lanes */
- value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
if (link.num_lanes <= 2)
value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2);
@@ -1114,12 +1355,12 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
else
value |= SOR_DP_PADCTL_PD_TXD_0;
- tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
value |= SOR_DP_LINKCTL_LANE_COUNT(link.num_lanes);
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
/* start lane sequencer */
value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
@@ -1141,14 +1382,14 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
/* set linkctl */
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value |= SOR_DP_LINKCTL_ENABLE;
value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
value |= SOR_DP_LINKCTL_TU_SIZE(config.tu_size);
value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
for (i = 0, value = 0; i < 4; i++) {
unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
@@ -1159,7 +1400,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
tegra_sor_writel(sor, value, SOR_DP_TPG);
- value = tegra_sor_readl(sor, SOR_DP_CONFIG_0);
+ value = tegra_sor_readl(sor, SOR_DP_CONFIG0);
value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
value |= SOR_DP_CONFIG_WATERMARK(config.watermark);
@@ -1176,7 +1417,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
- tegra_sor_writel(sor, value, SOR_DP_CONFIG_0);
+ tegra_sor_writel(sor, value, SOR_DP_CONFIG0);
value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
@@ -1189,33 +1430,27 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
/* enable pad calibration logic */
- value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value |= SOR_DP_PADCTL_PAD_CAL_PD;
- tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
if (sor->dpaux) {
u8 rate, lanes;
err = drm_dp_link_probe(aux, &link);
- if (err < 0) {
+ if (err < 0)
dev_err(sor->dev, "failed to probe eDP link: %d\n",
err);
- goto unlock;
- }
err = drm_dp_link_power_up(aux, &link);
- if (err < 0) {
+ if (err < 0)
dev_err(sor->dev, "failed to power up eDP link: %d\n",
err);
- goto unlock;
- }
err = drm_dp_link_configure(aux, &link);
- if (err < 0) {
+ if (err < 0)
dev_err(sor->dev, "failed to configure eDP link: %d\n",
err);
- goto unlock;
- }
rate = drm_dp_link_rate_to_bw_code(link.rate);
lanes = link.num_lanes;
@@ -1225,14 +1460,14 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
/* disable training pattern generator */
@@ -1249,17 +1484,14 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
if (err < 0) {
dev_err(sor->dev, "DP fast link training failed: %d\n",
err);
- goto unlock;
}
dev_dbg(sor->dev, "fast link training succeeded\n");
}
err = tegra_sor_power_up(sor, 250);
- if (err < 0) {
+ if (err < 0)
dev_err(sor->dev, "failed to power up SOR: %d\n", err);
- goto unlock;
- }
/*
* configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
@@ -1295,7 +1527,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
break;
}
- tegra_sor_writel(sor, value, SOR_STATE_1);
+ tegra_sor_writel(sor, value, SOR_STATE1);
/*
* TODO: The video timing programming below doesn't seem to match the
@@ -1303,25 +1535,27 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
*/
value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE_1(0));
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
vse = mode->vsync_end - mode->vsync_start - 1;
hse = mode->hsync_end - mode->hsync_start - 1;
value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE_2(0));
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
vbe = vse + (mode->vsync_start - mode->vdisplay);
hbe = hse + (mode->hsync_start - mode->hdisplay);
value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE_3(0));
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
vbs = vbe + mode->vdisplay;
hbs = hbe + mode->hdisplay;
value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE_4(0));
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
+
+ tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
/* CSTM (LVDS, link A/B, upper) */
value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
@@ -1330,10 +1564,8 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
/* PWM setup */
err = tegra_sor_setup_pwm(sor, 250);
- if (err < 0) {
+ if (err < 0)
dev_err(sor->dev, "failed to setup PWM: %d\n", err);
- goto unlock;
- }
tegra_sor_update(sor);
@@ -1344,147 +1576,610 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
tegra_dc_commit(dc);
err = tegra_sor_attach(sor);
- if (err < 0) {
+ if (err < 0)
dev_err(sor->dev, "failed to attach SOR: %d\n", err);
- goto unlock;
- }
err = tegra_sor_wakeup(sor);
- if (err < 0) {
+ if (err < 0)
dev_err(sor->dev, "failed to enable DC: %d\n", err);
- goto unlock;
- }
if (output->panel)
drm_panel_enable(output->panel);
-
- sor->enabled = true;
-
-unlock:
- mutex_unlock(&sor->lock);
}
-static void tegra_sor_encoder_disable(struct drm_encoder *encoder)
+static int
+tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
struct tegra_output *output = encoder_to_output(encoder);
- struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
+ unsigned long pclk = crtc_state->mode.clock * 1000;
struct tegra_sor *sor = to_sor(output);
- u32 value;
int err;
- mutex_lock(&sor->lock);
+ err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent,
+ pclk, 0);
+ if (err < 0) {
+ dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
+ return err;
+ }
- if (!sor->enabled)
- goto unlock;
+ return 0;
+}
- if (output->panel)
- drm_panel_disable(output->panel);
+static const struct drm_encoder_helper_funcs tegra_sor_edp_helpers = {
+ .disable = tegra_sor_edp_disable,
+ .enable = tegra_sor_edp_enable,
+ .atomic_check = tegra_sor_encoder_atomic_check,
+};
- err = tegra_sor_detach(sor);
- if (err < 0) {
- dev_err(sor->dev, "failed to detach SOR: %d\n", err);
- goto unlock;
+static inline u32 tegra_sor_hdmi_subpack(const u8 *ptr, size_t size)
+{
+ u32 value = 0;
+ size_t i;
+
+ for (i = size; i > 0; i--)
+ value = (value << 8) | ptr[i - 1];
+
+ return value;
+}
+
+static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor,
+ const void *data, size_t size)
+{
+ const u8 *ptr = data;
+ unsigned long offset;
+ size_t i, j;
+ u32 value;
+
+ switch (ptr[0]) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ offset = SOR_HDMI_AVI_INFOFRAME_HEADER;
+ break;
+
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ offset = SOR_HDMI_AUDIO_INFOFRAME_HEADER;
+ break;
+
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ offset = SOR_HDMI_VSI_INFOFRAME_HEADER;
+ break;
+
+ default:
+ dev_err(sor->dev, "unsupported infoframe type: %02x\n",
+ ptr[0]);
+ return;
}
- tegra_sor_writel(sor, 0, SOR_STATE_1);
- tegra_sor_update(sor);
+ value = INFOFRAME_HEADER_TYPE(ptr[0]) |
+ INFOFRAME_HEADER_VERSION(ptr[1]) |
+ INFOFRAME_HEADER_LEN(ptr[2]);
+ tegra_sor_writel(sor, value, offset);
+ offset++;
/*
- * The following accesses registers of the display controller, so make
- * sure it's only executed when the output is attached to one.
+ * Each subpack contains 7 bytes, divided into:
+ * - subpack_low: bytes 0 - 3
+ * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
*/
- if (dc) {
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value &= ~SOR_ENABLE;
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+ for (i = 3, j = 0; i < size; i += 7, j += 8) {
+ size_t rem = size - i, num = min_t(size_t, rem, 4);
- tegra_dc_commit(dc);
- }
+ value = tegra_sor_hdmi_subpack(&ptr[i], num);
+ tegra_sor_writel(sor, value, offset++);
- err = tegra_sor_power_down(sor);
- if (err < 0) {
- dev_err(sor->dev, "failed to power down SOR: %d\n", err);
- goto unlock;
+ num = min_t(size_t, rem - num, 3);
+
+ value = tegra_sor_hdmi_subpack(&ptr[i + 4], num);
+ tegra_sor_writel(sor, value, offset++);
}
+}
- if (sor->dpaux) {
- err = tegra_dpaux_disable(sor->dpaux);
- if (err < 0) {
- dev_err(sor->dev, "failed to disable DP: %d\n", err);
- goto unlock;
- }
+static int
+tegra_sor_hdmi_setup_avi_infoframe(struct tegra_sor *sor,
+ const struct drm_display_mode *mode)
+{
+ u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
+ struct hdmi_avi_infoframe frame;
+ u32 value;
+ int err;
+
+ /* disable AVI infoframe */
+ value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL);
+ value &= ~INFOFRAME_CTRL_SINGLE;
+ value &= ~INFOFRAME_CTRL_OTHER;
+ value &= ~INFOFRAME_CTRL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
+
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
+ return err;
}
- err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS);
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
- dev_err(sor->dev, "failed to power off I/O rail: %d\n", err);
- goto unlock;
+ dev_err(sor->dev, "failed to pack AVI infoframe: %d\n", err);
+ return err;
}
- if (output->panel)
- drm_panel_unprepare(output->panel);
+ tegra_sor_hdmi_write_infopack(sor, buffer, err);
- clk_disable_unprepare(sor->clk);
- reset_control_assert(sor->rst);
+ /* enable AVI infoframe */
+ value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL);
+ value |= INFOFRAME_CTRL_CHECKSUM_ENABLE;
+ value |= INFOFRAME_CTRL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
- sor->enabled = false;
+ return 0;
+}
-unlock:
- mutex_unlock(&sor->lock);
+static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+ value &= ~INFOFRAME_CTRL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
}
-static int
-tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+static struct tegra_sor_hdmi_settings *
+tegra_sor_hdmi_find_settings(struct tegra_sor *sor, unsigned long frequency)
+{
+ unsigned int i;
+
+ for (i = 0; i < sor->num_settings; i++)
+ if (frequency <= sor->settings[i].frequency)
+ return &sor->settings[i];
+
+ return NULL;
+}
+
+static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
- struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
- unsigned long pclk = crtc_state->mode.clock * 1000;
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_sor *sor = to_sor(output);
+ u32 value;
int err;
- err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent,
- pclk, 0);
- if (err < 0) {
- dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
- return err;
+ err = tegra_sor_detach(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to detach SOR: %d\n", err);
+
+ tegra_sor_writel(sor, 0, SOR_STATE1);
+ tegra_sor_update(sor);
+
+ /* disable display to SOR clock */
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~SOR1_TIMING_CYA;
+ value &= ~SOR1_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_commit(dc);
+
+ err = tegra_sor_power_down(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power down SOR: %d\n", err);
+
+ err = tegra_io_rail_power_off(TEGRA_IO_RAIL_HDMI);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power off HDMI rail: %d\n", err);
+
+ reset_control_assert(sor->rst);
+ usleep_range(1000, 2000);
+ clk_disable_unprepare(sor->clk);
+}
+
+static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ unsigned int h_ref_to_sync = 1, pulse_start, max_ac;
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ unsigned int vbe, vse, hbe, hse, vbs, hbs, div;
+ struct tegra_sor_hdmi_settings *settings;
+ struct tegra_sor *sor = to_sor(output);
+ struct drm_display_mode *mode;
+ struct drm_display_info *info;
+ u32 value;
+ int err;
+
+ mode = &encoder->crtc->state->adjusted_mode;
+ info = &output->connector.display_info;
+
+ err = clk_prepare_enable(sor->clk);
+ if (err < 0)
+ dev_err(sor->dev, "failed to enable clock: %d\n", err);
+
+ usleep_range(1000, 2000);
+
+ reset_control_deassert(sor->rst);
+
+ err = clk_set_parent(sor->clk, sor->clk_safe);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+
+ div = clk_get_rate(sor->clk) / 1000000 * 4;
+
+ err = tegra_io_rail_power_on(TEGRA_IO_RAIL_HDMI);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power on HDMI rail: %d\n", err);
+
+ usleep_range(20, 100);
+
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL2);
+
+ usleep_range(20, 100);
+
+ value = tegra_sor_readl(sor, SOR_PLL3);
+ value &= ~SOR_PLL3_PLL_VDD_MODE_3V3;
+ tegra_sor_writel(sor, value, SOR_PLL3);
+
+ value = tegra_sor_readl(sor, SOR_PLL0);
+ value &= ~SOR_PLL0_VCOPD;
+ value &= ~SOR_PLL0_PWR;
+ tegra_sor_writel(sor, value, SOR_PLL0);
+
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+ tegra_sor_writel(sor, value, SOR_PLL2);
+
+ usleep_range(200, 400);
+
+ value = tegra_sor_readl(sor, SOR_PLL2);
+ value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
+ value &= ~SOR_PLL2_PORT_POWERDOWN;
+ tegra_sor_writel(sor, value, SOR_PLL2);
+
+ usleep_range(20, 100);
+
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
+ SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2;
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+ while (true) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_STATE_BUSY) == 0)
+ break;
+
+ usleep_range(250, 1000);
}
- return 0;
+ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
+ SOR_LANE_SEQ_CTL_POWER_STATE_UP | SOR_LANE_SEQ_CTL_DELAY(5);
+ tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+ while (true) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+ break;
+
+ usleep_range(250, 1000);
+ }
+
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
+ value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
+
+ if (mode->clock < 340000)
+ value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G2_70;
+ else
+ value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G5_40;
+
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK;
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ value = tegra_sor_readl(sor, SOR_DP_SPARE0);
+ value |= SOR_DP_SPARE_DISP_VIDEO_PREAMBLE;
+ value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
+ value |= SOR_DP_SPARE_SEQ_ENABLE;
+ tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+
+ value = SOR_SEQ_CTL_PU_PC(0) | SOR_SEQ_CTL_PU_PC_ALT(0) |
+ SOR_SEQ_CTL_PD_PC(8) | SOR_SEQ_CTL_PD_PC_ALT(8);
+ tegra_sor_writel(sor, value, SOR_SEQ_CTL);
+
+ value = SOR_SEQ_INST_DRIVE_PWM_OUT_LO | SOR_SEQ_INST_HALT |
+ SOR_SEQ_INST_WAIT_VSYNC | SOR_SEQ_INST_WAIT(1);
+ tegra_sor_writel(sor, value, SOR_SEQ_INST(0));
+ tegra_sor_writel(sor, value, SOR_SEQ_INST(8));
+
+ /* program the reference clock */
+ value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div);
+ tegra_sor_writel(sor, value, SOR_REFCLK);
+
+ /* XXX don't hardcode */
+ value = SOR_XBAR_CTRL_LINK1_XSEL(4, 4) |
+ SOR_XBAR_CTRL_LINK1_XSEL(3, 3) |
+ SOR_XBAR_CTRL_LINK1_XSEL(2, 2) |
+ SOR_XBAR_CTRL_LINK1_XSEL(1, 1) |
+ SOR_XBAR_CTRL_LINK1_XSEL(0, 0) |
+ SOR_XBAR_CTRL_LINK0_XSEL(4, 4) |
+ SOR_XBAR_CTRL_LINK0_XSEL(3, 3) |
+ SOR_XBAR_CTRL_LINK0_XSEL(2, 0) |
+ SOR_XBAR_CTRL_LINK0_XSEL(1, 1) |
+ SOR_XBAR_CTRL_LINK0_XSEL(0, 2);
+ tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+
+ tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+
+ err = clk_set_parent(sor->clk, sor->clk_parent);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set parent clock: %d\n", err);
+
+ value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe);
+
+ /* XXX is this the proper check? */
+ if (mode->clock < 75000)
+ value |= SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED;
+
+ tegra_sor_writel(sor, value, SOR_INPUT_CONTROL);
+
+ max_ac = ((mode->htotal - mode->hdisplay) - SOR_REKEY - 18) / 32;
+
+ value = SOR_HDMI_CTRL_ENABLE | SOR_HDMI_CTRL_MAX_AC_PACKET(max_ac) |
+ SOR_HDMI_CTRL_AUDIO_LAYOUT | SOR_HDMI_CTRL_REKEY(SOR_REKEY);
+ tegra_sor_writel(sor, value, SOR_HDMI_CTRL);
+
+ /* H_PULSE2 setup */
+ pulse_start = h_ref_to_sync + (mode->hsync_end - mode->hsync_start) +
+ (mode->htotal - mode->hsync_end) - 10;
+
+ value = PULSE_LAST_END_A | PULSE_QUAL_VACTIVE |
+ PULSE_POLARITY_HIGH | PULSE_MODE_NORMAL;
+ tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
+
+ value = PULSE_END(pulse_start + 8) | PULSE_START(pulse_start);
+ tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_SIGNAL_OPTIONS0);
+ value |= H_PULSE2_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_SIGNAL_OPTIONS0);
+
+ /* infoframe setup */
+ err = tegra_sor_hdmi_setup_avi_infoframe(sor, mode);
+ if (err < 0)
+ dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
+
+ /* XXX HDMI audio support not implemented yet */
+ tegra_sor_hdmi_disable_audio_infoframe(sor);
+
+ /* use single TMDS protocol */
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+ value |= SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A;
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
+ /* power up pad calibration */
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+ /* production settings */
+ settings = tegra_sor_hdmi_find_settings(sor, mode->clock * 1000);
+ if (IS_ERR(settings)) {
+ dev_err(sor->dev, "no settings for pixel clock %d Hz: %ld\n",
+ mode->clock * 1000, PTR_ERR(settings));
+ return;
+ }
+
+ value = tegra_sor_readl(sor, SOR_PLL0);
+ value &= ~SOR_PLL0_ICHPMP_MASK;
+ value &= ~SOR_PLL0_VCOCAP_MASK;
+ value |= SOR_PLL0_ICHPMP(settings->ichpmp);
+ value |= SOR_PLL0_VCOCAP(settings->vcocap);
+ tegra_sor_writel(sor, value, SOR_PLL0);
+
+ tegra_sor_dp_term_calibrate(sor);
+
+ value = tegra_sor_readl(sor, SOR_PLL1);
+ value &= ~SOR_PLL1_LOADADJ_MASK;
+ value |= SOR_PLL1_LOADADJ(settings->loadadj);
+ tegra_sor_writel(sor, value, SOR_PLL1);
+
+ value = tegra_sor_readl(sor, SOR_PLL3);
+ value &= ~SOR_PLL3_BG_VREF_LEVEL_MASK;
+ value |= SOR_PLL3_BG_VREF_LEVEL(settings->bg_vref);
+ tegra_sor_writel(sor, value, SOR_PLL3);
+
+ value = settings->drive_current[0] << 24 |
+ settings->drive_current[1] << 16 |
+ settings->drive_current[2] << 8 |
+ settings->drive_current[3] << 0;
+ tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
+
+ value = settings->preemphasis[0] << 24 |
+ settings->preemphasis[1] << 16 |
+ settings->preemphasis[2] << 8 |
+ settings->preemphasis[3] << 0;
+ tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
+
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value &= ~SOR_DP_PADCTL_TX_PU_MASK;
+ value |= SOR_DP_PADCTL_TX_PU_ENABLE;
+ value |= SOR_DP_PADCTL_TX_PU(settings->tx_pu);
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+ /* power down pad calibration */
+ value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value |= SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+ /* miscellaneous display controller settings */
+ value = VSYNC_H_POSITION(1);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_TIMING_OPTIONS);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_COLOR_CONTROL);
+ value &= ~DITHER_CONTROL_MASK;
+ value &= ~BASE_COLOR_SIZE_MASK;
+
+ switch (info->bpc) {
+ case 6:
+ value |= BASE_COLOR_SIZE_666;
+ break;
+
+ case 8:
+ value |= BASE_COLOR_SIZE_888;
+ break;
+
+ default:
+ WARN(1, "%u bits-per-color not supported\n", info->bpc);
+ break;
+ }
+
+ tegra_dc_writel(dc, value, DC_DISP_DISP_COLOR_CONTROL);
+
+ err = tegra_sor_power_up(sor, 250);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power up SOR: %d\n", err);
+
+ /* configure mode */
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK;
+ value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
+ value &= ~SOR_STATE_ASY_OWNER_MASK;
+
+ value |= SOR_STATE_ASY_CRC_MODE_COMPLETE |
+ SOR_STATE_ASY_OWNER(dc->pipe + 1);
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ value &= ~SOR_STATE_ASY_HSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ value |= SOR_STATE_ASY_HSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ value &= ~SOR_STATE_ASY_VSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ value |= SOR_STATE_ASY_VSYNCPOL;
+
+ switch (info->bpc) {
+ case 8:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
+ break;
+
+ case 6:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
+ value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
+ value &= ~SOR_HEAD_STATE_RANGECOMPRESS_MASK;
+ value &= ~SOR_HEAD_STATE_DYNRANGE_MASK;
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
+
+ value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
+ value &= ~SOR_HEAD_STATE_COLORSPACE_MASK;
+ value |= SOR_HEAD_STATE_COLORSPACE_RGB;
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
+
+ /*
+ * TODO: The video timing programming below doesn't seem to match the
+ * register definitions.
+ */
+
+ value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
+
+ /* sync end = sync width - 1 */
+ vse = mode->vsync_end - mode->vsync_start - 1;
+ hse = mode->hsync_end - mode->hsync_start - 1;
+
+ value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
+
+ /* blank end = sync end + back porch */
+ vbe = vse + (mode->vtotal - mode->vsync_end);
+ hbe = hse + (mode->htotal - mode->hsync_end);
+
+ value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
+
+ /* blank start = blank end + active */
+ vbs = vbe + mode->vdisplay;
+ hbs = hbe + mode->hdisplay;
+
+ value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
+
+ tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
+
+ tegra_sor_update(sor);
+
+ err = tegra_sor_attach(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to attach SOR: %d\n", err);
+
+ /* enable display to SOR clock and generate HDMI preamble */
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value |= SOR1_ENABLE | SOR1_TIMING_CYA;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_commit(dc);
+
+ err = tegra_sor_wakeup(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
}
-static const struct drm_encoder_helper_funcs tegra_sor_encoder_helper_funcs = {
- .dpms = tegra_sor_encoder_dpms,
- .prepare = tegra_sor_encoder_prepare,
- .commit = tegra_sor_encoder_commit,
- .mode_set = tegra_sor_encoder_mode_set,
- .disable = tegra_sor_encoder_disable,
+static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
+ .disable = tegra_sor_hdmi_disable,
+ .enable = tegra_sor_hdmi_enable,
.atomic_check = tegra_sor_encoder_atomic_check,
};
static int tegra_sor_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
+ const struct drm_encoder_helper_funcs *helpers = NULL;
struct tegra_sor *sor = host1x_client_to_sor(client);
+ int connector = DRM_MODE_CONNECTOR_Unknown;
+ int encoder = DRM_MODE_ENCODER_NONE;
int err;
- if (!sor->dpaux)
- return -ENODEV;
+ if (!sor->dpaux) {
+ if (sor->soc->supports_hdmi) {
+ connector = DRM_MODE_CONNECTOR_HDMIA;
+ encoder = DRM_MODE_ENCODER_TMDS;
+ helpers = &tegra_sor_hdmi_helpers;
+ } else if (sor->soc->supports_lvds) {
+ connector = DRM_MODE_CONNECTOR_LVDS;
+ encoder = DRM_MODE_ENCODER_LVDS;
+ }
+ } else {
+ if (sor->soc->supports_edp) {
+ connector = DRM_MODE_CONNECTOR_eDP;
+ encoder = DRM_MODE_ENCODER_TMDS;
+ helpers = &tegra_sor_edp_helpers;
+ } else if (sor->soc->supports_dp) {
+ connector = DRM_MODE_CONNECTOR_DisplayPort;
+ encoder = DRM_MODE_ENCODER_TMDS;
+ }
+ }
sor->output.dev = sor->dev;
drm_connector_init(drm, &sor->output.connector,
&tegra_sor_connector_funcs,
- DRM_MODE_CONNECTOR_eDP);
+ connector);
drm_connector_helper_add(&sor->output.connector,
&tegra_sor_connector_helper_funcs);
sor->output.connector.dpms = DRM_MODE_DPMS_OFF;
drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&sor->output.encoder,
- &tegra_sor_encoder_helper_funcs);
+ encoder);
+ drm_encoder_helper_add(&sor->output.encoder, helpers);
drm_mode_connector_attach_encoder(&sor->output.connector,
&sor->output.encoder);
@@ -1577,18 +2272,130 @@ static const struct host1x_client_ops sor_client_ops = {
.exit = tegra_sor_exit,
};
+static const struct tegra_sor_ops tegra_sor_edp_ops = {
+ .name = "eDP",
+};
+
+static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
+{
+ int err;
+
+ sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io");
+ if (IS_ERR(sor->avdd_io_supply)) {
+ dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
+ PTR_ERR(sor->avdd_io_supply));
+ return PTR_ERR(sor->avdd_io_supply);
+ }
+
+ err = regulator_enable(sor->avdd_io_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
+ err);
+ return err;
+ }
+
+ sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll");
+ if (IS_ERR(sor->vdd_pll_supply)) {
+ dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
+ PTR_ERR(sor->vdd_pll_supply));
+ return PTR_ERR(sor->vdd_pll_supply);
+ }
+
+ err = regulator_enable(sor->vdd_pll_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
+ err);
+ return err;
+ }
+
+ sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi");
+ if (IS_ERR(sor->hdmi_supply)) {
+ dev_err(sor->dev, "cannot get HDMI supply: %ld\n",
+ PTR_ERR(sor->hdmi_supply));
+ return PTR_ERR(sor->hdmi_supply);
+ }
+
+ err = regulator_enable(sor->hdmi_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
+{
+ regulator_disable(sor->hdmi_supply);
+ regulator_disable(sor->vdd_pll_supply);
+ regulator_disable(sor->avdd_io_supply);
+
+ return 0;
+}
+
+static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
+ .name = "HDMI",
+ .probe = tegra_sor_hdmi_probe,
+ .remove = tegra_sor_hdmi_remove,
+};
+
+static const struct tegra_sor_soc tegra124_sor = {
+ .supports_edp = true,
+ .supports_lvds = true,
+ .supports_hdmi = false,
+ .supports_dp = false,
+};
+
+static const struct tegra_sor_soc tegra210_sor = {
+ .supports_edp = true,
+ .supports_lvds = false,
+ .supports_hdmi = false,
+ .supports_dp = false,
+};
+
+static const struct tegra_sor_soc tegra210_sor1 = {
+ .supports_edp = false,
+ .supports_lvds = false,
+ .supports_hdmi = true,
+ .supports_dp = true,
+
+ .num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
+ .settings = tegra210_sor_hdmi_defaults,
+};
+
+static const struct of_device_id tegra_sor_of_match[] = {
+ { .compatible = "nvidia,tegra210-sor1", .data = &tegra210_sor1 },
+ { .compatible = "nvidia,tegra210-sor", .data = &tegra210_sor },
+ { .compatible = "nvidia,tegra124-sor", .data = &tegra124_sor },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
+
static int tegra_sor_probe(struct platform_device *pdev)
{
+ const struct of_device_id *match;
struct device_node *np;
struct tegra_sor *sor;
struct resource *regs;
int err;
+ match = of_match_device(tegra_sor_of_match, &pdev->dev);
+
sor = devm_kzalloc(&pdev->dev, sizeof(*sor), GFP_KERNEL);
if (!sor)
return -ENOMEM;
sor->output.dev = sor->dev = &pdev->dev;
+ sor->soc = match->data;
+
+ sor->settings = devm_kmemdup(&pdev->dev, sor->soc->settings,
+ sor->soc->num_settings *
+ sizeof(*sor->settings),
+ GFP_KERNEL);
+ if (!sor->settings)
+ return -ENOMEM;
+
+ sor->num_settings = sor->soc->num_settings;
np = of_parse_phandle(pdev->dev.of_node, "nvidia,dpaux", 0);
if (np) {
@@ -1599,51 +2406,106 @@ static int tegra_sor_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
+ if (!sor->dpaux) {
+ if (sor->soc->supports_hdmi) {
+ sor->ops = &tegra_sor_hdmi_ops;
+ } else if (sor->soc->supports_lvds) {
+ dev_err(&pdev->dev, "LVDS not supported yet\n");
+ return -ENODEV;
+ } else {
+ dev_err(&pdev->dev, "unknown (non-DP) support\n");
+ return -ENODEV;
+ }
+ } else {
+ if (sor->soc->supports_edp) {
+ sor->ops = &tegra_sor_edp_ops;
+ } else if (sor->soc->supports_dp) {
+ dev_err(&pdev->dev, "DisplayPort not supported yet\n");
+ return -ENODEV;
+ } else {
+ dev_err(&pdev->dev, "unknown (DP) support\n");
+ return -ENODEV;
+ }
+ }
+
err = tegra_output_probe(&sor->output);
- if (err < 0)
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to probe output: %d\n", err);
return err;
+ }
+
+ if (sor->ops && sor->ops->probe) {
+ err = sor->ops->probe(sor);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to probe %s: %d\n",
+ sor->ops->name, err);
+ goto output;
+ }
+ }
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sor->regs = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(sor->regs))
- return PTR_ERR(sor->regs);
+ if (IS_ERR(sor->regs)) {
+ err = PTR_ERR(sor->regs);
+ goto remove;
+ }
sor->rst = devm_reset_control_get(&pdev->dev, "sor");
- if (IS_ERR(sor->rst))
- return PTR_ERR(sor->rst);
+ if (IS_ERR(sor->rst)) {
+ err = PTR_ERR(sor->rst);
+ dev_err(&pdev->dev, "failed to get reset control: %d\n", err);
+ goto remove;
+ }
sor->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(sor->clk))
- return PTR_ERR(sor->clk);
+ if (IS_ERR(sor->clk)) {
+ err = PTR_ERR(sor->clk);
+ dev_err(&pdev->dev, "failed to get module clock: %d\n", err);
+ goto remove;
+ }
sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
- if (IS_ERR(sor->clk_parent))
- return PTR_ERR(sor->clk_parent);
+ if (IS_ERR(sor->clk_parent)) {
+ err = PTR_ERR(sor->clk_parent);
+ dev_err(&pdev->dev, "failed to get parent clock: %d\n", err);
+ goto remove;
+ }
sor->clk_safe = devm_clk_get(&pdev->dev, "safe");
- if (IS_ERR(sor->clk_safe))
- return PTR_ERR(sor->clk_safe);
+ if (IS_ERR(sor->clk_safe)) {
+ err = PTR_ERR(sor->clk_safe);
+ dev_err(&pdev->dev, "failed to get safe clock: %d\n", err);
+ goto remove;
+ }
sor->clk_dp = devm_clk_get(&pdev->dev, "dp");
- if (IS_ERR(sor->clk_dp))
- return PTR_ERR(sor->clk_dp);
+ if (IS_ERR(sor->clk_dp)) {
+ err = PTR_ERR(sor->clk_dp);
+ dev_err(&pdev->dev, "failed to get DP clock: %d\n", err);
+ goto remove;
+ }
INIT_LIST_HEAD(&sor->client.list);
sor->client.ops = &sor_client_ops;
sor->client.dev = &pdev->dev;
- mutex_init(&sor->lock);
-
err = host1x_client_register(&sor->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
- return err;
+ goto remove;
}
platform_set_drvdata(pdev, sor);
return 0;
+
+remove:
+ if (sor->ops && sor->ops->remove)
+ sor->ops->remove(sor);
+output:
+ tegra_output_remove(&sor->output);
+ return err;
}
static int tegra_sor_remove(struct platform_device *pdev)
@@ -1658,17 +2520,17 @@ static int tegra_sor_remove(struct platform_device *pdev)
return err;
}
+ if (sor->ops && sor->ops->remove) {
+ err = sor->ops->remove(sor);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to remove SOR: %d\n", err);
+ }
+
tegra_output_remove(&sor->output);
return 0;
}
-static const struct of_device_id tegra_sor_of_match[] = {
- { .compatible = "nvidia,tegra124-sor", },
- { },
-};
-MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
-
struct platform_driver tegra_sor_driver = {
.driver = {
.name = "tegra-sor",
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index a5f8853fedb5..2d31d027e3f6 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -11,9 +11,9 @@
#define SOR_CTXSW 0x00
-#define SOR_SUPER_STATE_0 0x01
+#define SOR_SUPER_STATE0 0x01
-#define SOR_SUPER_STATE_1 0x02
+#define SOR_SUPER_STATE1 0x02
#define SOR_SUPER_STATE_ATTACHED (1 << 3)
#define SOR_SUPER_STATE_MODE_NORMAL (1 << 2)
#define SOR_SUPER_STATE_HEAD_MODE_MASK (3 << 0)
@@ -21,9 +21,9 @@
#define SOR_SUPER_STATE_HEAD_MODE_SNOOZE (1 << 0)
#define SOR_SUPER_STATE_HEAD_MODE_SLEEP (0 << 0)
-#define SOR_STATE_0 0x03
+#define SOR_STATE0 0x03
-#define SOR_STATE_1 0x04
+#define SOR_STATE1 0x04
#define SOR_STATE_ASY_PIXELDEPTH_MASK (0xf << 17)
#define SOR_STATE_ASY_PIXELDEPTH_BPP_18_444 (0x2 << 17)
#define SOR_STATE_ASY_PIXELDEPTH_BPP_24_444 (0x5 << 17)
@@ -33,19 +33,27 @@
#define SOR_STATE_ASY_PROTOCOL_CUSTOM (0xf << 8)
#define SOR_STATE_ASY_PROTOCOL_DP_A (0x8 << 8)
#define SOR_STATE_ASY_PROTOCOL_DP_B (0x9 << 8)
+#define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (0x1 << 8)
#define SOR_STATE_ASY_PROTOCOL_LVDS (0x0 << 8)
#define SOR_STATE_ASY_CRC_MODE_MASK (0x3 << 6)
#define SOR_STATE_ASY_CRC_MODE_NON_ACTIVE (0x2 << 6)
#define SOR_STATE_ASY_CRC_MODE_COMPLETE (0x1 << 6)
#define SOR_STATE_ASY_CRC_MODE_ACTIVE (0x0 << 6)
+#define SOR_STATE_ASY_OWNER_MASK 0xf
#define SOR_STATE_ASY_OWNER(x) (((x) & 0xf) << 0)
-#define SOR_HEAD_STATE_0(x) (0x05 + (x))
-#define SOR_HEAD_STATE_1(x) (0x07 + (x))
-#define SOR_HEAD_STATE_2(x) (0x09 + (x))
-#define SOR_HEAD_STATE_3(x) (0x0b + (x))
-#define SOR_HEAD_STATE_4(x) (0x0d + (x))
-#define SOR_HEAD_STATE_5(x) (0x0f + (x))
+#define SOR_HEAD_STATE0(x) (0x05 + (x))
+#define SOR_HEAD_STATE_RANGECOMPRESS_MASK (0x1 << 3)
+#define SOR_HEAD_STATE_DYNRANGE_MASK (0x1 << 2)
+#define SOR_HEAD_STATE_DYNRANGE_VESA (0 << 2)
+#define SOR_HEAD_STATE_DYNRANGE_CEA (1 << 2)
+#define SOR_HEAD_STATE_COLORSPACE_MASK (0x3 << 0)
+#define SOR_HEAD_STATE_COLORSPACE_RGB (0 << 0)
+#define SOR_HEAD_STATE1(x) (0x07 + (x))
+#define SOR_HEAD_STATE2(x) (0x09 + (x))
+#define SOR_HEAD_STATE3(x) (0x0b + (x))
+#define SOR_HEAD_STATE4(x) (0x0d + (x))
+#define SOR_HEAD_STATE5(x) (0x0f + (x))
#define SOR_CRC_CNTRL 0x11
#define SOR_CRC_CNTRL_ENABLE (1 << 0)
#define SOR_DP_DEBUG_MVID 0x12
@@ -75,62 +83,101 @@
#define SOR_TEST_HEAD_MODE_MASK (3 << 8)
#define SOR_TEST_HEAD_MODE_AWAKE (2 << 8)
-#define SOR_PLL_0 0x17
-#define SOR_PLL_0_ICHPMP_MASK (0xf << 24)
-#define SOR_PLL_0_ICHPMP(x) (((x) & 0xf) << 24)
-#define SOR_PLL_0_VCOCAP_MASK (0xf << 8)
-#define SOR_PLL_0_VCOCAP(x) (((x) & 0xf) << 8)
-#define SOR_PLL_0_VCOCAP_RST SOR_PLL_0_VCOCAP(3)
-#define SOR_PLL_0_PLLREG_MASK (0x3 << 6)
-#define SOR_PLL_0_PLLREG_LEVEL(x) (((x) & 0x3) << 6)
-#define SOR_PLL_0_PLLREG_LEVEL_V25 SOR_PLL_0_PLLREG_LEVEL(0)
-#define SOR_PLL_0_PLLREG_LEVEL_V15 SOR_PLL_0_PLLREG_LEVEL(1)
-#define SOR_PLL_0_PLLREG_LEVEL_V35 SOR_PLL_0_PLLREG_LEVEL(2)
-#define SOR_PLL_0_PLLREG_LEVEL_V45 SOR_PLL_0_PLLREG_LEVEL(3)
-#define SOR_PLL_0_PULLDOWN (1 << 5)
-#define SOR_PLL_0_RESISTOR_EXT (1 << 4)
-#define SOR_PLL_0_VCOPD (1 << 2)
-#define SOR_PLL_0_POWER_OFF (1 << 0)
-
-#define SOR_PLL_1 0x18
+#define SOR_PLL0 0x17
+#define SOR_PLL0_ICHPMP_MASK (0xf << 24)
+#define SOR_PLL0_ICHPMP(x) (((x) & 0xf) << 24)
+#define SOR_PLL0_VCOCAP_MASK (0xf << 8)
+#define SOR_PLL0_VCOCAP(x) (((x) & 0xf) << 8)
+#define SOR_PLL0_VCOCAP_RST SOR_PLL0_VCOCAP(3)
+#define SOR_PLL0_PLLREG_MASK (0x3 << 6)
+#define SOR_PLL0_PLLREG_LEVEL(x) (((x) & 0x3) << 6)
+#define SOR_PLL0_PLLREG_LEVEL_V25 SOR_PLL0_PLLREG_LEVEL(0)
+#define SOR_PLL0_PLLREG_LEVEL_V15 SOR_PLL0_PLLREG_LEVEL(1)
+#define SOR_PLL0_PLLREG_LEVEL_V35 SOR_PLL0_PLLREG_LEVEL(2)
+#define SOR_PLL0_PLLREG_LEVEL_V45 SOR_PLL0_PLLREG_LEVEL(3)
+#define SOR_PLL0_PULLDOWN (1 << 5)
+#define SOR_PLL0_RESISTOR_EXT (1 << 4)
+#define SOR_PLL0_VCOPD (1 << 2)
+#define SOR_PLL0_PWR (1 << 0)
+
+#define SOR_PLL1 0x18
/* XXX: read-only bit? */
-#define SOR_PLL_1_TERM_COMPOUT (1 << 15)
-#define SOR_PLL_1_TMDS_TERM (1 << 8)
-
-#define SOR_PLL_2 0x19
-#define SOR_PLL_2_LVDS_ENABLE (1 << 25)
-#define SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE (1 << 24)
-#define SOR_PLL_2_PORT_POWERDOWN (1 << 23)
-#define SOR_PLL_2_BANDGAP_POWERDOWN (1 << 22)
-#define SOR_PLL_2_POWERDOWN_OVERRIDE (1 << 18)
-#define SOR_PLL_2_SEQ_PLLCAPPD (1 << 17)
-
-#define SOR_PLL_3 0x1a
-#define SOR_PLL_3_PLL_VDD_MODE_V1_8 (0 << 13)
-#define SOR_PLL_3_PLL_VDD_MODE_V3_3 (1 << 13)
+#define SOR_PLL1_LOADADJ_MASK (0xf << 20)
+#define SOR_PLL1_LOADADJ(x) (((x) & 0xf) << 20)
+#define SOR_PLL1_TERM_COMPOUT (1 << 15)
+#define SOR_PLL1_TMDS_TERMADJ_MASK (0xf << 9)
+#define SOR_PLL1_TMDS_TERMADJ(x) (((x) & 0xf) << 9)
+#define SOR_PLL1_TMDS_TERM (1 << 8)
+
+#define SOR_PLL2 0x19
+#define SOR_PLL2_LVDS_ENABLE (1 << 25)
+#define SOR_PLL2_SEQ_PLLCAPPD_ENFORCE (1 << 24)
+#define SOR_PLL2_PORT_POWERDOWN (1 << 23)
+#define SOR_PLL2_BANDGAP_POWERDOWN (1 << 22)
+#define SOR_PLL2_POWERDOWN_OVERRIDE (1 << 18)
+#define SOR_PLL2_SEQ_PLLCAPPD (1 << 17)
+#define SOR_PLL2_SEQ_PLL_PULLDOWN (1 << 16)
+
+#define SOR_PLL3 0x1a
+#define SOR_PLL3_BG_VREF_LEVEL_MASK (0xf << 24)
+#define SOR_PLL3_BG_VREF_LEVEL(x) (((x) & 0xf) << 24)
+#define SOR_PLL3_PLL_VDD_MODE_1V8 (0 << 13)
+#define SOR_PLL3_PLL_VDD_MODE_3V3 (1 << 13)
#define SOR_CSTM 0x1b
+#define SOR_CSTM_ROTCLK_MASK (0xf << 24)
+#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24)
#define SOR_CSTM_LVDS (1 << 16)
#define SOR_CSTM_LINK_ACT_B (1 << 15)
#define SOR_CSTM_LINK_ACT_A (1 << 14)
#define SOR_CSTM_UPPER (1 << 11)
#define SOR_LVDS 0x1c
-#define SOR_CRC_A 0x1d
-#define SOR_CRC_A_VALID (1 << 0)
-#define SOR_CRC_A_RESET (1 << 0)
-#define SOR_CRC_B 0x1e
+#define SOR_CRCA 0x1d
+#define SOR_CRCA_VALID (1 << 0)
+#define SOR_CRCA_RESET (1 << 0)
+#define SOR_CRCB 0x1e
#define SOR_BLANK 0x1f
#define SOR_SEQ_CTL 0x20
+#define SOR_SEQ_CTL_PD_PC_ALT(x) (((x) & 0xf) << 12)
+#define SOR_SEQ_CTL_PD_PC(x) (((x) & 0xf) << 8)
+#define SOR_SEQ_CTL_PU_PC_ALT(x) (((x) & 0xf) << 4)
+#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) << 0)
#define SOR_LANE_SEQ_CTL 0x21
#define SOR_LANE_SEQ_CTL_TRIGGER (1 << 31)
+#define SOR_LANE_SEQ_CTL_STATE_BUSY (1 << 28)
#define SOR_LANE_SEQ_CTL_SEQUENCE_UP (0 << 20)
#define SOR_LANE_SEQ_CTL_SEQUENCE_DOWN (1 << 20)
#define SOR_LANE_SEQ_CTL_POWER_STATE_UP (0 << 16)
#define SOR_LANE_SEQ_CTL_POWER_STATE_DOWN (1 << 16)
+#define SOR_LANE_SEQ_CTL_DELAY(x) (((x) & 0xf) << 12)
#define SOR_SEQ_INST(x) (0x22 + (x))
+#define SOR_SEQ_INST_PLL_PULLDOWN (1 << 31)
+#define SOR_SEQ_INST_POWERDOWN_MACRO (1 << 30)
+#define SOR_SEQ_INST_ASSERT_PLL_RESET (1 << 29)
+#define SOR_SEQ_INST_BLANK_V (1 << 28)
+#define SOR_SEQ_INST_BLANK_H (1 << 27)
+#define SOR_SEQ_INST_BLANK_DE (1 << 26)
+#define SOR_SEQ_INST_BLACK_DATA (1 << 25)
+#define SOR_SEQ_INST_TRISTATE_IOS (1 << 24)
+#define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23)
+#define SOR_SEQ_INST_PIN_B_LOW (0 << 22)
+#define SOR_SEQ_INST_PIN_B_HIGH (1 << 22)
+#define SOR_SEQ_INST_PIN_A_LOW (0 << 21)
+#define SOR_SEQ_INST_PIN_A_HIGH (1 << 21)
+#define SOR_SEQ_INST_SEQUENCE_UP (0 << 19)
+#define SOR_SEQ_INST_SEQUENCE_DOWN (1 << 19)
+#define SOR_SEQ_INST_LANE_SEQ_STOP (0 << 18)
+#define SOR_SEQ_INST_LANE_SEQ_RUN (1 << 18)
+#define SOR_SEQ_INST_PORT_POWERDOWN (1 << 17)
+#define SOR_SEQ_INST_PLL_POWERDOWN (1 << 16)
+#define SOR_SEQ_INST_HALT (1 << 15)
+#define SOR_SEQ_INST_WAIT_US (0 << 12)
+#define SOR_SEQ_INST_WAIT_MS (1 << 12)
+#define SOR_SEQ_INST_WAIT_VSYNC (2 << 12)
+#define SOR_SEQ_INST_WAIT(x) (((x) & 0x3ff) << 0)
#define SOR_PWM_DIV 0x32
#define SOR_PWM_DIV_MASK 0xffffff
@@ -140,32 +187,36 @@
#define SOR_PWM_CTL_CLK_SEL (1 << 30)
#define SOR_PWM_CTL_DUTY_CYCLE_MASK 0xffffff
-#define SOR_VCRC_A_0 0x34
-#define SOR_VCRC_A_1 0x35
-#define SOR_VCRC_B_0 0x36
-#define SOR_VCRC_B_1 0x37
-#define SOR_CCRC_A_0 0x38
-#define SOR_CCRC_A_1 0x39
-#define SOR_CCRC_B_0 0x3a
-#define SOR_CCRC_B_1 0x3b
-#define SOR_EDATA_A_0 0x3c
-#define SOR_EDATA_A_1 0x3d
-#define SOR_EDATA_B_0 0x3e
-#define SOR_EDATA_B_1 0x3f
-#define SOR_COUNT_A_0 0x40
-#define SOR_COUNT_A_1 0x41
-#define SOR_COUNT_B_0 0x42
-#define SOR_COUNT_B_1 0x43
-#define SOR_DEBUG_A_0 0x44
-#define SOR_DEBUG_A_1 0x45
-#define SOR_DEBUG_B_0 0x46
-#define SOR_DEBUG_B_1 0x47
+#define SOR_VCRC_A0 0x34
+#define SOR_VCRC_A1 0x35
+#define SOR_VCRC_B0 0x36
+#define SOR_VCRC_B1 0x37
+#define SOR_CCRC_A0 0x38
+#define SOR_CCRC_A1 0x39
+#define SOR_CCRC_B0 0x3a
+#define SOR_CCRC_B1 0x3b
+#define SOR_EDATA_A0 0x3c
+#define SOR_EDATA_A1 0x3d
+#define SOR_EDATA_B0 0x3e
+#define SOR_EDATA_B1 0x3f
+#define SOR_COUNT_A0 0x40
+#define SOR_COUNT_A1 0x41
+#define SOR_COUNT_B0 0x42
+#define SOR_COUNT_B1 0x43
+#define SOR_DEBUG_A0 0x44
+#define SOR_DEBUG_A1 0x45
+#define SOR_DEBUG_B0 0x46
+#define SOR_DEBUG_B1 0x47
#define SOR_TRIG 0x48
#define SOR_MSCHECK 0x49
#define SOR_XBAR_CTRL 0x4a
+#define SOR_XBAR_CTRL_LINK1_XSEL(channel, value) ((((value) & 0x7) << ((channel) * 3)) << 17)
+#define SOR_XBAR_CTRL_LINK0_XSEL(channel, value) ((((value) & 0x7) << ((channel) * 3)) << 2)
+#define SOR_XBAR_CTRL_LINK_SWAP (1 << 1)
+#define SOR_XBAR_CTRL_BYPASS (1 << 0)
#define SOR_XBAR_POL 0x4b
-#define SOR_DP_LINKCTL_0 0x4c
+#define SOR_DP_LINKCTL0 0x4c
#define SOR_DP_LINKCTL_LANE_COUNT_MASK (0x1f << 16)
#define SOR_DP_LINKCTL_LANE_COUNT(x) (((1 << (x)) - 1) << 16)
#define SOR_DP_LINKCTL_ENHANCED_FRAME (1 << 14)
@@ -173,34 +224,34 @@
#define SOR_DP_LINKCTL_TU_SIZE(x) (((x) & 0x7f) << 2)
#define SOR_DP_LINKCTL_ENABLE (1 << 0)
-#define SOR_DP_LINKCTL_1 0x4d
+#define SOR_DP_LINKCTL1 0x4d
-#define SOR_LANE_DRIVE_CURRENT_0 0x4e
-#define SOR_LANE_DRIVE_CURRENT_1 0x4f
-#define SOR_LANE4_DRIVE_CURRENT_0 0x50
-#define SOR_LANE4_DRIVE_CURRENT_1 0x51
+#define SOR_LANE_DRIVE_CURRENT0 0x4e
+#define SOR_LANE_DRIVE_CURRENT1 0x4f
+#define SOR_LANE4_DRIVE_CURRENT0 0x50
+#define SOR_LANE4_DRIVE_CURRENT1 0x51
#define SOR_LANE_DRIVE_CURRENT_LANE3(x) (((x) & 0xff) << 24)
#define SOR_LANE_DRIVE_CURRENT_LANE2(x) (((x) & 0xff) << 16)
#define SOR_LANE_DRIVE_CURRENT_LANE1(x) (((x) & 0xff) << 8)
#define SOR_LANE_DRIVE_CURRENT_LANE0(x) (((x) & 0xff) << 0)
-#define SOR_LANE_PREEMPHASIS_0 0x52
-#define SOR_LANE_PREEMPHASIS_1 0x53
-#define SOR_LANE4_PREEMPHASIS_0 0x54
-#define SOR_LANE4_PREEMPHASIS_1 0x55
+#define SOR_LANE_PREEMPHASIS0 0x52
+#define SOR_LANE_PREEMPHASIS1 0x53
+#define SOR_LANE4_PREEMPHASIS0 0x54
+#define SOR_LANE4_PREEMPHASIS1 0x55
#define SOR_LANE_PREEMPHASIS_LANE3(x) (((x) & 0xff) << 24)
#define SOR_LANE_PREEMPHASIS_LANE2(x) (((x) & 0xff) << 16)
#define SOR_LANE_PREEMPHASIS_LANE1(x) (((x) & 0xff) << 8)
#define SOR_LANE_PREEMPHASIS_LANE0(x) (((x) & 0xff) << 0)
-#define SOR_LANE_POST_CURSOR_0 0x56
-#define SOR_LANE_POST_CURSOR_1 0x57
-#define SOR_LANE_POST_CURSOR_LANE3(x) (((x) & 0xff) << 24)
-#define SOR_LANE_POST_CURSOR_LANE2(x) (((x) & 0xff) << 16)
-#define SOR_LANE_POST_CURSOR_LANE1(x) (((x) & 0xff) << 8)
-#define SOR_LANE_POST_CURSOR_LANE0(x) (((x) & 0xff) << 0)
+#define SOR_LANE_POSTCURSOR0 0x56
+#define SOR_LANE_POSTCURSOR1 0x57
+#define SOR_LANE_POSTCURSOR_LANE3(x) (((x) & 0xff) << 24)
+#define SOR_LANE_POSTCURSOR_LANE2(x) (((x) & 0xff) << 16)
+#define SOR_LANE_POSTCURSOR_LANE1(x) (((x) & 0xff) << 8)
+#define SOR_LANE_POSTCURSOR_LANE0(x) (((x) & 0xff) << 0)
-#define SOR_DP_CONFIG_0 0x58
+#define SOR_DP_CONFIG0 0x58
#define SOR_DP_CONFIG_DISPARITY_NEGATIVE (1 << 31)
#define SOR_DP_CONFIG_ACTIVE_SYM_ENABLE (1 << 26)
#define SOR_DP_CONFIG_ACTIVE_SYM_POLARITY (1 << 24)
@@ -211,11 +262,11 @@
#define SOR_DP_CONFIG_WATERMARK_MASK (0x3f << 0)
#define SOR_DP_CONFIG_WATERMARK(x) (((x) & 0x3f) << 0)
-#define SOR_DP_CONFIG_1 0x59
-#define SOR_DP_MN_0 0x5a
-#define SOR_DP_MN_1 0x5b
+#define SOR_DP_CONFIG1 0x59
+#define SOR_DP_MN0 0x5a
+#define SOR_DP_MN1 0x5b
-#define SOR_DP_PADCTL_0 0x5c
+#define SOR_DP_PADCTL0 0x5c
#define SOR_DP_PADCTL_PAD_CAL_PD (1 << 23)
#define SOR_DP_PADCTL_TX_PU_ENABLE (1 << 22)
#define SOR_DP_PADCTL_TX_PU_MASK (0xff << 8)
@@ -229,17 +280,18 @@
#define SOR_DP_PADCTL_PD_TXD_1 (1 << 1)
#define SOR_DP_PADCTL_PD_TXD_2 (1 << 0)
-#define SOR_DP_PADCTL_1 0x5d
+#define SOR_DP_PADCTL1 0x5d
-#define SOR_DP_DEBUG_0 0x5e
-#define SOR_DP_DEBUG_1 0x5f
+#define SOR_DP_DEBUG0 0x5e
+#define SOR_DP_DEBUG1 0x5f
-#define SOR_DP_SPARE_0 0x60
-#define SOR_DP_SPARE_MACRO_SOR_CLK (1 << 2)
-#define SOR_DP_SPARE_PANEL_INTERNAL (1 << 1)
-#define SOR_DP_SPARE_SEQ_ENABLE (1 << 0)
+#define SOR_DP_SPARE0 0x60
+#define SOR_DP_SPARE_DISP_VIDEO_PREAMBLE (1 << 3)
+#define SOR_DP_SPARE_MACRO_SOR_CLK (1 << 2)
+#define SOR_DP_SPARE_PANEL_INTERNAL (1 << 1)
+#define SOR_DP_SPARE_SEQ_ENABLE (1 << 0)
-#define SOR_DP_SPARE_1 0x61
+#define SOR_DP_SPARE1 0x61
#define SOR_DP_AUDIO_CTRL 0x62
#define SOR_DP_AUDIO_HBLANK_SYMBOLS 0x63
@@ -249,13 +301,13 @@
#define SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK (0x1fffff << 0)
#define SOR_DP_GENERIC_INFOFRAME_HEADER 0x65
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_0 0x66
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_1 0x67
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_2 0x68
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_3 0x69
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_4 0x6a
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_5 0x6b
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_6 0x6c
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK0 0x66
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK1 0x67
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK2 0x68
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK3 0x69
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK4 0x6a
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK5 0x6b
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK6 0x6c
#define SOR_DP_TPG 0x6d
#define SOR_DP_TPG_CHANNEL_CODING (1 << 6)
@@ -275,8 +327,44 @@
#define SOR_DP_TPG_PATTERN_NONE (0x0 << 0)
#define SOR_DP_TPG_CONFIG 0x6e
-#define SOR_DP_LQ_CSTM_0 0x6f
-#define SOR_DP_LQ_CSTM_1 0x70
-#define SOR_DP_LQ_CSTM_2 0x71
+#define SOR_DP_LQ_CSTM0 0x6f
+#define SOR_DP_LQ_CSTM1 0x70
+#define SOR_DP_LQ_CSTM2 0x71
+
+#define SOR_HDMI_AUDIO_INFOFRAME_CTRL 0x9a
+#define SOR_HDMI_AUDIO_INFOFRAME_STATUS 0x9b
+#define SOR_HDMI_AUDIO_INFOFRAME_HEADER 0x9c
+
+#define SOR_HDMI_AVI_INFOFRAME_CTRL 0x9f
+#define INFOFRAME_CTRL_CHECKSUM_ENABLE (1 << 9)
+#define INFOFRAME_CTRL_SINGLE (1 << 8)
+#define INFOFRAME_CTRL_OTHER (1 << 4)
+#define INFOFRAME_CTRL_ENABLE (1 << 0)
+
+#define SOR_HDMI_AVI_INFOFRAME_STATUS 0xa0
+#define INFOFRAME_STATUS_DONE (1 << 0)
+
+#define SOR_HDMI_AVI_INFOFRAME_HEADER 0xa1
+#define INFOFRAME_HEADER_LEN(x) (((x) & 0xff) << 16)
+#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
+#define INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
+
+#define SOR_HDMI_CTRL 0xc0
+#define SOR_HDMI_CTRL_ENABLE (1 << 30)
+#define SOR_HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
+#define SOR_HDMI_CTRL_AUDIO_LAYOUT (1 << 10)
+#define SOR_HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
+
+#define SOR_REFCLK 0xe6
+#define SOR_REFCLK_DIV_INT(x) ((((x) >> 2) & 0xff) << 8)
+#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x3) << 6)
+
+#define SOR_INPUT_CONTROL 0xe8
+#define SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED (1 << 1)
+#define SOR_INPUT_CONTROL_HDMI_SRC_SELECT(x) (((x) & 0x1) << 0)
+
+#define SOR_HDMI_VSI_INFOFRAME_CTRL 0x123
+#define SOR_HDMI_VSI_INFOFRAME_STATUS 0x124
+#define SOR_HDMI_VSI_INFOFRAME_HEADER 0x125
#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 7a0315855e90..0af8bed7ce1e 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -375,25 +375,17 @@ static int panel_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "found backlight\n");
}
- panel_mod->enable_gpio = devm_gpiod_get(&pdev->dev, "enable");
+ panel_mod->enable_gpio = devm_gpiod_get_optional(&pdev->dev, "enable",
+ GPIOD_OUT_LOW);
if (IS_ERR(panel_mod->enable_gpio)) {
ret = PTR_ERR(panel_mod->enable_gpio);
- if (ret != -ENOENT) {
- dev_err(&pdev->dev, "failed to request enable GPIO\n");
- goto fail_backlight;
- }
-
- /* Optional GPIO is not here, continue silently. */
- panel_mod->enable_gpio = NULL;
- } else {
- ret = gpiod_direction_output(panel_mod->enable_gpio, 0);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to setup GPIO\n");
- goto fail_backlight;
- }
- dev_info(&pdev->dev, "found enable GPIO\n");
+ dev_err(&pdev->dev, "failed to request enable GPIO\n");
+ goto fail_backlight;
}
+ if (panel_mod->enable_gpio)
+ dev_info(&pdev->dev, "found enable GPIO\n");
+
mod = &panel_mod->base;
pdev->dev.platform_data = mod;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 882cccdad272..ac6fe40b99f7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -490,7 +490,8 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
else if (boot_cpu_data.x86 > 3)
tmp = pgprot_noncached(tmp);
#endif
-#if defined(__ia64__) || defined(__arm__) || defined(__powerpc__)
+#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
+ defined(__powerpc__)
if (caching_flags & TTM_PL_FLAG_WC)
tmp = pgprot_writecombine(tmp);
else
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 3077f1554099..624d941aaad1 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -963,14 +963,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
} else {
pool->npages_free += count;
list_splice(&ttm_dma->pages_list, &pool->free_list);
- npages = count;
- if (pool->npages_free > _manager->options.max_size) {
+ /*
+ * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
+ * to free in order to minimize calls to set_memory_wb().
+ */
+ if (pool->npages_free >= (_manager->options.max_size +
+ NUM_PAGES_TO_ALLOC))
npages = pool->npages_free - _manager->options.max_size;
- /* free at least NUM_PAGES_TO_ALLOC number of pages
- * to reduce calls to set_memory_wb */
- if (npages < NUM_PAGES_TO_ALLOC)
- npages = NUM_PAGES_TO_ALLOC;
- }
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index bf080abc86d1..4e19d0f9cc30 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -340,7 +340,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
swap_storage = shmem_file_setup("ttm swap",
ttm->num_pages << PAGE_SHIFT,
0);
- if (unlikely(IS_ERR(swap_storage))) {
+ if (IS_ERR(swap_storage)) {
pr_err("Failed allocating swap storage\n");
return PTR_ERR(swap_storage);
}
@@ -354,7 +354,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
if (unlikely(from_page == NULL))
continue;
to_page = shmem_read_mapping_page(swap_space, i);
- if (unlikely(IS_ERR(to_page))) {
+ if (IS_ERR(to_page)) {
ret = PTR_ERR(to_page);
goto out_err;
}
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 5fc16cecd3ba..62c7b1dafaa4 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -288,7 +288,7 @@ static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect
{
struct udl_fbdev *ufbdev = info->par;
- sys_fillrect(info, rect);
+ drm_fb_helper_sys_fillrect(info, rect);
udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width,
rect->height);
@@ -298,7 +298,7 @@ static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *regi
{
struct udl_fbdev *ufbdev = info->par;
- sys_copyarea(info, region);
+ drm_fb_helper_sys_copyarea(info, region);
udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width,
region->height);
@@ -308,7 +308,7 @@ static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct udl_fbdev *ufbdev = info->par;
- sys_imageblit(info, image);
+ drm_fb_helper_sys_imageblit(info, image);
udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width,
image->height);
@@ -476,7 +476,6 @@ static int udlfb_create(struct drm_fb_helper *helper,
container_of(helper, struct udl_fbdev, helper);
struct drm_device *dev = ufbdev->helper.dev;
struct fb_info *info;
- struct device *device = dev->dev;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd;
struct udl_gem_object *obj;
@@ -506,21 +505,20 @@ static int udlfb_create(struct drm_fb_helper *helper,
goto out_gfree;
}
- info = framebuffer_alloc(0, device);
- if (!info) {
- ret = -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto out_gfree;
}
info->par = ufbdev;
ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
if (ret)
- goto out_gfree;
+ goto out_destroy_fbi;
fb = &ufbdev->ufb.base;
ufbdev->helper.fb = fb;
- ufbdev->helper.fbdev = info;
strcpy(info->fix.id, "udldrmfb");
@@ -533,18 +531,13 @@ static int udlfb_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto out_gfree;
- }
-
-
DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
fb->width, fb->height,
ufbdev->ufb.obj->vmapping);
return ret;
+out_destroy_fbi:
+ drm_fb_helper_release_fbi(helper);
out_gfree:
drm_gem_object_unreference(&ufbdev->ufb.obj->base);
out:
@@ -558,14 +551,8 @@ static const struct drm_fb_helper_funcs udl_fb_helper_funcs = {
static void udl_fbdev_destroy(struct drm_device *dev,
struct udl_fbdev *ufbdev)
{
- struct fb_info *info;
- if (ufbdev->helper.fbdev) {
- info = ufbdev->helper.fbdev;
- unregister_framebuffer(info);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&ufbdev->helper);
+ drm_fb_helper_release_fbi(&ufbdev->helper);
drm_fb_helper_fini(&ufbdev->helper);
drm_framebuffer_unregister_private(&ufbdev->ufb.base);
drm_framebuffer_cleanup(&ufbdev->ufb.base);
@@ -631,11 +618,7 @@ void udl_fbdev_unplug(struct drm_device *dev)
return;
ufbdev = udl->fbdev;
- if (ufbdev->helper.fbdev) {
- struct fb_info *info;
- info = ufbdev->helper.fbdev;
- unlink_framebuffer(info);
- }
+ drm_fb_helper_unlink_fbi(&ufbdev->helper);
}
struct drm_framebuffer *
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 6394547cf67a..860062ef8814 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -125,7 +125,7 @@ static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
}
-static struct vm_operations_struct vgem_gem_vm_ops = {
+static const struct vm_operations_struct vgem_gem_vm_ops = {
.fault = vgem_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index ba33cf679180..d0cbd5ecd7f0 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -260,7 +260,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
/*
* Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
* pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
- * quite large for some blits, and pages don't need to be contingous.
+ * quite large for some blits, and pages don't need to be contiguous.
*/
static int
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index df198d9e770c..6a81e084593b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -173,7 +173,7 @@ static void virtio_gpu_3d_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
struct virtio_gpu_fbdev *vfbdev = info->par;
- sys_fillrect(info, rect);
+ drm_fb_helper_sys_fillrect(info, rect);
virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy,
rect->width, rect->height);
schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
@@ -183,7 +183,7 @@ static void virtio_gpu_3d_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
struct virtio_gpu_fbdev *vfbdev = info->par;
- sys_copyarea(info, area);
+ drm_fb_helper_sys_copyarea(info, area);
virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy,
area->width, area->height);
schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
@@ -193,7 +193,7 @@ static void virtio_gpu_3d_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct virtio_gpu_fbdev *vfbdev = info->par;
- sys_imageblit(info, image);
+ drm_fb_helper_sys_imageblit(info, image);
virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy,
image->width, image->height);
schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
@@ -230,7 +230,6 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd = {};
struct virtio_gpu_object *obj;
- struct device *device = vgdev->dev;
uint32_t resid, format, size;
int ret;
@@ -317,18 +316,12 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
if (ret)
goto err_obj_attach;
- info = framebuffer_alloc(0, device);
- if (!info) {
- ret = -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto err_fb_alloc;
}
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto err_fb_alloc_cmap;
- }
-
info->par = helper;
ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb,
@@ -339,7 +332,6 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
fb = &vfbdev->vgfb.base;
vfbdev->helper.fb = fb;
- vfbdev->helper.fbdev = info;
strcpy(info->fix.id, "virtiodrmfb");
info->flags = FBINFO_DEFAULT;
@@ -357,9 +349,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
return 0;
err_fb_init:
- fb_dealloc_cmap(&info->cmap);
-err_fb_alloc_cmap:
- framebuffer_release(info);
+ drm_fb_helper_release_fbi(helper);
err_fb_alloc:
virtio_gpu_cmd_resource_inval_backing(vgdev, resid);
err_obj_attach:
@@ -371,15 +361,11 @@ err_obj_vmap:
static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
struct virtio_gpu_fbdev *vgfbdev)
{
- struct fb_info *info;
struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb;
- if (vgfbdev->helper.fbdev) {
- info = vgfbdev->helper.fbdev;
+ drm_fb_helper_unregister_fbi(&vgfbdev->helper);
+ drm_fb_helper_release_fbi(&vgfbdev->helper);
- unregister_framebuffer(info);
- framebuffer_release(info);
- }
if (vgfb->obj)
vgfb->obj = NULL;
drm_fb_helper_fini(&vgfbdev->helper);
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index ce0ab951f507..d281575bbe11 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -7,6 +7,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
- vmwgfx_cmdbuf_res.o \
+ vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
+ vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h b/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h
new file mode 100644
index 000000000000..8cce7f15b6eb
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h
@@ -0,0 +1,3 @@
+/*
+ * Intentionally empty file.
+ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
new file mode 100644
index 000000000000..9ce2466a5d00
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
@@ -0,0 +1,110 @@
+/**********************************************************
+ * Copyright 2007-2015 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_caps.h --
+ *
+ * Definitions for SVGA3D hardware capabilities. Capabilities
+ * are used to query for optional rendering features during
+ * driver initialization. The capability data is stored as very
+ * basic key/value dictionary within the "FIFO register" memory
+ * area at the beginning of BAR2.
+ *
+ * Note that these definitions are only for 3D capabilities.
+ * The SVGA device also has "device capabilities" and "FIFO
+ * capabilities", which are non-3D-specific and are stored as
+ * bitfields rather than key/value pairs.
+ */
+
+#ifndef _SVGA3D_CAPS_H_
+#define _SVGA3D_CAPS_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+
+#include "includeCheck.h"
+
+#include "svga_reg.h"
+
+#define SVGA_FIFO_3D_CAPS_SIZE (SVGA_FIFO_3D_CAPS_LAST - \
+ SVGA_FIFO_3D_CAPS + 1)
+
+
+/*
+ * SVGA3dCapsRecordType
+ *
+ * Record types that can be found in the caps block.
+ * Related record types are grouped together numerically so that
+ * SVGA3dCaps_FindRecord() can be applied on a range of record
+ * types.
+ */
+
+typedef enum {
+ SVGA3DCAPS_RECORD_UNKNOWN = 0,
+ SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
+ SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
+ SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
+} SVGA3dCapsRecordType;
+
+
+/*
+ * SVGA3dCapsRecordHeader
+ *
+ * Header field leading each caps block record. Contains the offset (in
+ * register words, NOT bytes) to the next caps block record (or the end
+ * of caps block records which will be a zero word) and the record type
+ * as defined above.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCapsRecordHeader {
+ uint32 length;
+ SVGA3dCapsRecordType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCapsRecordHeader;
+
+
+/*
+ * SVGA3dCapsRecord
+ *
+ * Caps block record; "data" is a placeholder for the actual data structure
+ * contained within the record;
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCapsRecord {
+ SVGA3dCapsRecordHeader header;
+ uint32 data[1];
+}
+#include "vmware_pack_end.h"
+SVGA3dCapsRecord;
+
+
+typedef uint32 SVGA3dCapPair[2];
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
new file mode 100644
index 000000000000..2dfd57c5f463
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
@@ -0,0 +1,2071 @@
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_cmd.h --
+ *
+ * SVGA 3d hardware cmd definitions
+ */
+
+#ifndef _SVGA3D_CMD_H_
+#define _SVGA3D_CMD_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+#include "svga3d_types.h"
+
+/*
+ * Identifiers for commands in the command FIFO.
+ *
+ * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
+ * the SVGA3D protocol and remain reserved; they should not be used in the
+ * future.
+ *
+ * IDs between 1040 and 1999 (inclusive) are available for use by the
+ * current SVGA3D protocol.
+ *
+ * FIFO clients other than SVGA3D should stay below 1000, or at 2000
+ * and up.
+ */
+
+typedef enum {
+ SVGA_3D_CMD_LEGACY_BASE = 1000,
+ SVGA_3D_CMD_BASE = 1040,
+
+ SVGA_3D_CMD_SURFACE_DEFINE = 1040,
+ SVGA_3D_CMD_SURFACE_DESTROY = 1041,
+ SVGA_3D_CMD_SURFACE_COPY = 1042,
+ SVGA_3D_CMD_SURFACE_STRETCHBLT = 1043,
+ SVGA_3D_CMD_SURFACE_DMA = 1044,
+ SVGA_3D_CMD_CONTEXT_DEFINE = 1045,
+ SVGA_3D_CMD_CONTEXT_DESTROY = 1046,
+ SVGA_3D_CMD_SETTRANSFORM = 1047,
+ SVGA_3D_CMD_SETZRANGE = 1048,
+ SVGA_3D_CMD_SETRENDERSTATE = 1049,
+ SVGA_3D_CMD_SETRENDERTARGET = 1050,
+ SVGA_3D_CMD_SETTEXTURESTATE = 1051,
+ SVGA_3D_CMD_SETMATERIAL = 1052,
+ SVGA_3D_CMD_SETLIGHTDATA = 1053,
+ SVGA_3D_CMD_SETLIGHTENABLED = 1054,
+ SVGA_3D_CMD_SETVIEWPORT = 1055,
+ SVGA_3D_CMD_SETCLIPPLANE = 1056,
+ SVGA_3D_CMD_CLEAR = 1057,
+ SVGA_3D_CMD_PRESENT = 1058,
+ SVGA_3D_CMD_SHADER_DEFINE = 1059,
+ SVGA_3D_CMD_SHADER_DESTROY = 1060,
+ SVGA_3D_CMD_SET_SHADER = 1061,
+ SVGA_3D_CMD_SET_SHADER_CONST = 1062,
+ SVGA_3D_CMD_DRAW_PRIMITIVES = 1063,
+ SVGA_3D_CMD_SETSCISSORRECT = 1064,
+ SVGA_3D_CMD_BEGIN_QUERY = 1065,
+ SVGA_3D_CMD_END_QUERY = 1066,
+ SVGA_3D_CMD_WAIT_FOR_QUERY = 1067,
+ SVGA_3D_CMD_PRESENT_READBACK = 1068,
+ SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN = 1069,
+ SVGA_3D_CMD_SURFACE_DEFINE_V2 = 1070,
+ SVGA_3D_CMD_GENERATE_MIPMAPS = 1071,
+ SVGA_3D_CMD_VIDEO_CREATE_DECODER = 1072,
+ SVGA_3D_CMD_VIDEO_DESTROY_DECODER = 1073,
+ SVGA_3D_CMD_VIDEO_CREATE_PROCESSOR = 1074,
+ SVGA_3D_CMD_VIDEO_DESTROY_PROCESSOR = 1075,
+ SVGA_3D_CMD_VIDEO_DECODE_START_FRAME = 1076,
+ SVGA_3D_CMD_VIDEO_DECODE_RENDER = 1077,
+ SVGA_3D_CMD_VIDEO_DECODE_END_FRAME = 1078,
+ SVGA_3D_CMD_VIDEO_PROCESS_FRAME = 1079,
+ SVGA_3D_CMD_ACTIVATE_SURFACE = 1080,
+ SVGA_3D_CMD_DEACTIVATE_SURFACE = 1081,
+ SVGA_3D_CMD_SCREEN_DMA = 1082,
+ SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE = 1083,
+ SVGA_3D_CMD_OPEN_CONTEXT_SURFACE = 1084,
+
+ SVGA_3D_CMD_LOGICOPS_BITBLT = 1085,
+ SVGA_3D_CMD_LOGICOPS_TRANSBLT = 1086,
+ SVGA_3D_CMD_LOGICOPS_STRETCHBLT = 1087,
+ SVGA_3D_CMD_LOGICOPS_COLORFILL = 1088,
+ SVGA_3D_CMD_LOGICOPS_ALPHABLEND = 1089,
+ SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND = 1090,
+
+ SVGA_3D_CMD_SET_OTABLE_BASE = 1091,
+ SVGA_3D_CMD_READBACK_OTABLE = 1092,
+
+ SVGA_3D_CMD_DEFINE_GB_MOB = 1093,
+ SVGA_3D_CMD_DESTROY_GB_MOB = 1094,
+ SVGA_3D_CMD_DEAD3 = 1095,
+ SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING = 1096,
+
+ SVGA_3D_CMD_DEFINE_GB_SURFACE = 1097,
+ SVGA_3D_CMD_DESTROY_GB_SURFACE = 1098,
+ SVGA_3D_CMD_BIND_GB_SURFACE = 1099,
+ SVGA_3D_CMD_COND_BIND_GB_SURFACE = 1100,
+ SVGA_3D_CMD_UPDATE_GB_IMAGE = 1101,
+ SVGA_3D_CMD_UPDATE_GB_SURFACE = 1102,
+ SVGA_3D_CMD_READBACK_GB_IMAGE = 1103,
+ SVGA_3D_CMD_READBACK_GB_SURFACE = 1104,
+ SVGA_3D_CMD_INVALIDATE_GB_IMAGE = 1105,
+ SVGA_3D_CMD_INVALIDATE_GB_SURFACE = 1106,
+
+ SVGA_3D_CMD_DEFINE_GB_CONTEXT = 1107,
+ SVGA_3D_CMD_DESTROY_GB_CONTEXT = 1108,
+ SVGA_3D_CMD_BIND_GB_CONTEXT = 1109,
+ SVGA_3D_CMD_READBACK_GB_CONTEXT = 1110,
+ SVGA_3D_CMD_INVALIDATE_GB_CONTEXT = 1111,
+
+ SVGA_3D_CMD_DEFINE_GB_SHADER = 1112,
+ SVGA_3D_CMD_DESTROY_GB_SHADER = 1113,
+ SVGA_3D_CMD_BIND_GB_SHADER = 1114,
+
+ SVGA_3D_CMD_SET_OTABLE_BASE64 = 1115,
+
+ SVGA_3D_CMD_BEGIN_GB_QUERY = 1116,
+ SVGA_3D_CMD_END_GB_QUERY = 1117,
+ SVGA_3D_CMD_WAIT_FOR_GB_QUERY = 1118,
+
+ SVGA_3D_CMD_NOP = 1119,
+
+ SVGA_3D_CMD_ENABLE_GART = 1120,
+ SVGA_3D_CMD_DISABLE_GART = 1121,
+ SVGA_3D_CMD_MAP_MOB_INTO_GART = 1122,
+ SVGA_3D_CMD_UNMAP_GART_RANGE = 1123,
+
+ SVGA_3D_CMD_DEFINE_GB_SCREENTARGET = 1124,
+ SVGA_3D_CMD_DESTROY_GB_SCREENTARGET = 1125,
+ SVGA_3D_CMD_BIND_GB_SCREENTARGET = 1126,
+ SVGA_3D_CMD_UPDATE_GB_SCREENTARGET = 1127,
+
+ SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL = 1128,
+ SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL = 1129,
+
+ SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE = 1130,
+
+ SVGA_3D_CMD_GB_SCREEN_DMA = 1131,
+ SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH = 1132,
+ SVGA_3D_CMD_GB_MOB_FENCE = 1133,
+ SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 = 1134,
+ SVGA_3D_CMD_DEFINE_GB_MOB64 = 1135,
+ SVGA_3D_CMD_REDEFINE_GB_MOB64 = 1136,
+ SVGA_3D_CMD_NOP_ERROR = 1137,
+
+ SVGA_3D_CMD_SET_VERTEX_STREAMS = 1138,
+ SVGA_3D_CMD_SET_VERTEX_DECLS = 1139,
+ SVGA_3D_CMD_SET_VERTEX_DIVISORS = 1140,
+ SVGA_3D_CMD_DRAW = 1141,
+ SVGA_3D_CMD_DRAW_INDEXED = 1142,
+
+ /*
+ * DX10 Commands
+ */
+ SVGA_3D_CMD_DX_MIN = 1143,
+ SVGA_3D_CMD_DX_DEFINE_CONTEXT = 1143,
+ SVGA_3D_CMD_DX_DESTROY_CONTEXT = 1144,
+ SVGA_3D_CMD_DX_BIND_CONTEXT = 1145,
+ SVGA_3D_CMD_DX_READBACK_CONTEXT = 1146,
+ SVGA_3D_CMD_DX_INVALIDATE_CONTEXT = 1147,
+ SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER = 1148,
+ SVGA_3D_CMD_DX_SET_SHADER_RESOURCES = 1149,
+ SVGA_3D_CMD_DX_SET_SHADER = 1150,
+ SVGA_3D_CMD_DX_SET_SAMPLERS = 1151,
+ SVGA_3D_CMD_DX_DRAW = 1152,
+ SVGA_3D_CMD_DX_DRAW_INDEXED = 1153,
+ SVGA_3D_CMD_DX_DRAW_INSTANCED = 1154,
+ SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED = 1155,
+ SVGA_3D_CMD_DX_DRAW_AUTO = 1156,
+ SVGA_3D_CMD_DX_SET_INPUT_LAYOUT = 1157,
+ SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS = 1158,
+ SVGA_3D_CMD_DX_SET_INDEX_BUFFER = 1159,
+ SVGA_3D_CMD_DX_SET_TOPOLOGY = 1160,
+ SVGA_3D_CMD_DX_SET_RENDERTARGETS = 1161,
+ SVGA_3D_CMD_DX_SET_BLEND_STATE = 1162,
+ SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE = 1163,
+ SVGA_3D_CMD_DX_SET_RASTERIZER_STATE = 1164,
+ SVGA_3D_CMD_DX_DEFINE_QUERY = 1165,
+ SVGA_3D_CMD_DX_DESTROY_QUERY = 1166,
+ SVGA_3D_CMD_DX_BIND_QUERY = 1167,
+ SVGA_3D_CMD_DX_SET_QUERY_OFFSET = 1168,
+ SVGA_3D_CMD_DX_BEGIN_QUERY = 1169,
+ SVGA_3D_CMD_DX_END_QUERY = 1170,
+ SVGA_3D_CMD_DX_READBACK_QUERY = 1171,
+ SVGA_3D_CMD_DX_SET_PREDICATION = 1172,
+ SVGA_3D_CMD_DX_SET_SOTARGETS = 1173,
+ SVGA_3D_CMD_DX_SET_VIEWPORTS = 1174,
+ SVGA_3D_CMD_DX_SET_SCISSORRECTS = 1175,
+ SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW = 1176,
+ SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW = 1177,
+ SVGA_3D_CMD_DX_PRED_COPY_REGION = 1178,
+ SVGA_3D_CMD_DX_PRED_COPY = 1179,
+ SVGA_3D_CMD_DX_STRETCHBLT = 1180,
+ SVGA_3D_CMD_DX_GENMIPS = 1181,
+ SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE = 1182,
+ SVGA_3D_CMD_DX_READBACK_SUBRESOURCE = 1183,
+ SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE = 1184,
+ SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW = 1185,
+ SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW = 1186,
+ SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW = 1187,
+ SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW = 1188,
+ SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW = 1189,
+ SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW = 1190,
+ SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT = 1191,
+ SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT = 1192,
+ SVGA_3D_CMD_DX_DEFINE_BLEND_STATE = 1193,
+ SVGA_3D_CMD_DX_DESTROY_BLEND_STATE = 1194,
+ SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE = 1195,
+ SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE = 1196,
+ SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE = 1197,
+ SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE = 1198,
+ SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE = 1199,
+ SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE = 1200,
+ SVGA_3D_CMD_DX_DEFINE_SHADER = 1201,
+ SVGA_3D_CMD_DX_DESTROY_SHADER = 1202,
+ SVGA_3D_CMD_DX_BIND_SHADER = 1203,
+ SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT = 1204,
+ SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT = 1205,
+ SVGA_3D_CMD_DX_SET_STREAMOUTPUT = 1206,
+ SVGA_3D_CMD_DX_SET_COTABLE = 1207,
+ SVGA_3D_CMD_DX_READBACK_COTABLE = 1208,
+ SVGA_3D_CMD_DX_BUFFER_COPY = 1209,
+ SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER = 1210,
+ SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK = 1211,
+ SVGA_3D_CMD_DX_MOVE_QUERY = 1212,
+ SVGA_3D_CMD_DX_BIND_ALL_QUERY = 1213,
+ SVGA_3D_CMD_DX_READBACK_ALL_QUERY = 1214,
+ SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER = 1215,
+ SVGA_3D_CMD_DX_MOB_FENCE_64 = 1216,
+ SVGA_3D_CMD_DX_BIND_SHADER_ON_CONTEXT = 1217,
+ SVGA_3D_CMD_DX_HINT = 1218,
+ SVGA_3D_CMD_DX_BUFFER_UPDATE = 1219,
+ SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET = 1220,
+ SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET = 1221,
+ SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET = 1222,
+
+ /*
+ * Reserve some IDs to be used for the DX11 shader types.
+ */
+ SVGA_3D_CMD_DX_RESERVED1 = 1223,
+ SVGA_3D_CMD_DX_RESERVED2 = 1224,
+ SVGA_3D_CMD_DX_RESERVED3 = 1225,
+
+ SVGA_3D_CMD_DX_MAX = 1226,
+ SVGA_3D_CMD_MAX = 1226,
+ SVGA_3D_CMD_FUTURE_MAX = 3000
+} SVGAFifo3dCmdId;
+
+/*
+ * FIFO command format definitions:
+ */
+
+/*
+ * The data size header following cmdNum for every 3d command
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 id;
+ uint32 size;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdHeader;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 numMipLevels;
+}
+#include "vmware_pack_end.h"
+SVGA3dSurfaceFace;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+ SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ /*
+ * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
+ * structures must have the same value of numMipLevels field.
+ * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
+ * numMipLevels set to 0.
+ */
+ SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
+ /*
+ * Followed by an SVGA3dSize structure for each mip level in each face.
+ *
+ * A note on surface sizes: Sizes are always specified in pixels,
+ * even if the true surface size is not a multiple of the minimum
+ * block size of the surface's format. For example, a 3x3x1 DXT1
+ * compressed texture would actually be stored as a 4x4x1 image in
+ * memory.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineSurface; /* SVGA_3D_CMD_SURFACE_DEFINE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+ SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ /*
+ * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
+ * structures must have the same value of numMipLevels field.
+ * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
+ * numMipLevels set to 0.
+ */
+ SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
+ uint32 multisampleCount;
+ SVGA3dTextureFilter autogenFilter;
+ /*
+ * Followed by an SVGA3dSize structure for each mip level in each face.
+ *
+ * A note on surface sizes: Sizes are always specified in pixels,
+ * even if the true surface size is not a multiple of the minimum
+ * block size of the surface's format. For example, a 3x3x1 DXT1
+ * compressed texture would actually be stored as a 4x4x1 image in
+ * memory.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineSurface_v2; /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineContext; /* SVGA_3D_CMD_CONTEXT_DEFINE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyContext; /* SVGA_3D_CMD_CONTEXT_DESTROY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dClearFlag clearFlag;
+ uint32 color;
+ float depth;
+ uint32 stencil;
+ /* Followed by variable number of SVGA3dRect structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdClear; /* SVGA_3D_CMD_CLEAR */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dLightType type;
+ SVGA3dBool inWorldSpace;
+ float diffuse[4];
+ float specular[4];
+ float ambient[4];
+ float position[4];
+ float direction[4];
+ float range;
+ float falloff;
+ float attenuation0;
+ float attenuation1;
+ float attenuation2;
+ float theta;
+ float phi;
+}
+#include "vmware_pack_end.h"
+SVGA3dLightData;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+ /* Followed by variable number of SVGA3dCopyRect structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdPresent; /* SVGA_3D_CMD_PRESENT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dRenderStateName state;
+ union {
+ uint32 uintValue;
+ float floatValue;
+ };
+}
+#include "vmware_pack_end.h"
+SVGA3dRenderState;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ /* Followed by variable number of SVGA3dRenderState structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetRenderState; /* SVGA_3D_CMD_SETRENDERSTATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dRenderTargetType type;
+ SVGA3dSurfaceImageId target;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetRenderTarget; /* SVGA_3D_CMD_SETRENDERTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceImageId src;
+ SVGA3dSurfaceImageId dest;
+ /* Followed by variable number of SVGA3dCopyBox structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceImageId src;
+ SVGA3dSurfaceImageId dest;
+ SVGA3dBox boxSrc;
+ SVGA3dBox boxDest;
+ SVGA3dStretchBltMode mode;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceStretchBlt; /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ /*
+ * If the discard flag is present in a surface DMA operation, the host may
+ * discard the contents of the current mipmap level and face of the target
+ * surface before applying the surface DMA contents.
+ */
+ uint32 discard : 1;
+
+ /*
+ * If the unsynchronized flag is present, the host may perform this upload
+ * without syncing to pending reads on this surface.
+ */
+ uint32 unsynchronized : 1;
+
+ /*
+ * Guests *MUST* set the reserved bits to 0 before submitting the command
+ * suffix as future flags may occupy these bits.
+ */
+ uint32 reserved : 30;
+}
+#include "vmware_pack_end.h"
+SVGA3dSurfaceDMAFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGAGuestImage guest;
+ SVGA3dSurfaceImageId host;
+ SVGA3dTransferType transfer;
+ /*
+ * Followed by variable number of SVGA3dCopyBox structures. For consistency
+ * in all clipping logic and coordinate translation, we define the
+ * "source" in each copyBox as the guest image and the
+ * "destination" as the host image, regardless of transfer
+ * direction.
+ *
+ * For efficiency, the SVGA3D device is free to copy more data than
+ * specified. For example, it may round copy boxes outwards such
+ * that they lie on particular alignment boundaries.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceDMA; /* SVGA_3D_CMD_SURFACE_DMA */
+
+/*
+ * SVGA3dCmdSurfaceDMASuffix --
+ *
+ * This is a command suffix that will appear after a SurfaceDMA command in
+ * the FIFO. It contains some extra information that hosts may use to
+ * optimize performance or protect the guest. This suffix exists to preserve
+ * backwards compatibility while also allowing for new functionality to be
+ * implemented.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 suffixSize;
+
+ /*
+ * The maximum offset is used to determine the maximum offset from the
+ * guestPtr base address that will be accessed or written to during this
+ * surfaceDMA. If the suffix is supported, the host will respect this
+ * boundary while performing surface DMAs.
+ *
+ * Defaults to MAX_UINT32
+ */
+ uint32 maximumOffset;
+
+ /*
+ * A set of flags that describes optimizations that the host may perform
+ * while performing this surface DMA operation. The guest should never rely
+ * on behaviour that is different when these flags are set for correctness.
+ *
+ * Defaults to 0
+ */
+ SVGA3dSurfaceDMAFlags flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceDMASuffix;
+
+/*
+ * SVGA_3D_CMD_DRAW_PRIMITIVES --
+ *
+ * This command is the SVGA3D device's generic drawing entry point.
+ * It can draw multiple ranges of primitives, optionally using an
+ * index buffer, using an arbitrary collection of vertex buffers.
+ *
+ * Each SVGA3dVertexDecl defines a distinct vertex array to bind
+ * during this draw call. The declarations specify which surface
+ * the vertex data lives in, what that vertex data is used for,
+ * and how to interpret it.
+ *
+ * Each SVGA3dPrimitiveRange defines a collection of primitives
+ * to render using the same vertex arrays. An index buffer is
+ * optional.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ /*
+ * A range hint is an optional specification for the range of indices
+ * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
+ * that the entire array will be used.
+ *
+ * These are only hints. The SVGA3D device may use them for
+ * performance optimization if possible, but it's also allowed to
+ * ignore these values.
+ */
+ uint32 first;
+ uint32 last;
+}
+#include "vmware_pack_end.h"
+SVGA3dArrayRangeHint;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ /*
+ * Define the origin and shape of a vertex or index array. Both
+ * 'offset' and 'stride' are in bytes. The provided surface will be
+ * reinterpreted as a flat array of bytes in the same format used
+ * by surface DMA operations. To avoid unnecessary conversions, the
+ * surface should be created with the SVGA3D_BUFFER format.
+ *
+ * Index 0 in the array starts 'offset' bytes into the surface.
+ * Index 1 begins at byte 'offset + stride', etc. Array indices may
+ * not be negative.
+ */
+ uint32 surfaceId;
+ uint32 offset;
+ uint32 stride;
+}
+#include "vmware_pack_end.h"
+SVGA3dArray;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ /*
+ * Describe a vertex array's data type, and define how it is to be
+ * used by the fixed function pipeline or the vertex shader. It
+ * isn't useful to have two VertexDecls with the same
+ * VertexArrayIdentity in one draw call.
+ */
+ SVGA3dDeclType type;
+ SVGA3dDeclMethod method;
+ SVGA3dDeclUsage usage;
+ uint32 usageIndex;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexArrayIdentity;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dVertexDecl {
+ SVGA3dVertexArrayIdentity identity;
+ SVGA3dArray array;
+ SVGA3dArrayRangeHint rangeHint;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexDecl;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dPrimitiveRange {
+ /*
+ * Define a group of primitives to render, from sequential indices.
+ *
+ * The value of 'primitiveType' and 'primitiveCount' imply the
+ * total number of vertices that will be rendered.
+ */
+ SVGA3dPrimitiveType primType;
+ uint32 primitiveCount;
+
+ /*
+ * Optional index buffer. If indexArray.surfaceId is
+ * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
+ * without an index buffer is identical to rendering with an index
+ * buffer containing the sequence [0, 1, 2, 3, ...].
+ *
+ * If an index buffer is in use, indexWidth specifies the width in
+ * bytes of each index value. It must be less than or equal to
+ * indexArray.stride.
+ *
+ * (Currently, the SVGA3D device requires index buffers to be tightly
+ * packed. In other words, indexWidth == indexArray.stride)
+ */
+ SVGA3dArray indexArray;
+ uint32 indexWidth;
+
+ /*
+ * Optional index bias. This number is added to all indices from
+ * indexArray before they are used as vertex array indices. This
+ * can be used in multiple ways:
+ *
+ * - When not using an indexArray, this bias can be used to
+ * specify where in the vertex arrays to begin rendering.
+ *
+ * - A positive number here is equivalent to increasing the
+ * offset in each vertex array.
+ *
+ * - A negative number can be used to render using a small
+ * vertex array and an index buffer that contains large
+ * values. This may be used by some applications that
+ * crop a vertex buffer without modifying their index
+ * buffer.
+ *
+ * Note that rendering with a negative bias value may be slower and
+ * use more memory than rendering with a positive or zero bias.
+ */
+ int32 indexBias;
+}
+#include "vmware_pack_end.h"
+SVGA3dPrimitiveRange;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ uint32 numVertexDecls;
+ uint32 numRanges;
+
+ /*
+ * There are two variable size arrays after the
+ * SVGA3dCmdDrawPrimitives structure. In order,
+ * they are:
+ *
+ * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
+ * SVGA3D_MAX_VERTEX_ARRAYS;
+ * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
+ * SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
+ * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
+ * the frequency divisor for the corresponding vertex decl).
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+
+ uint32 primitiveCount; /* How many primitives to render */
+ uint32 startVertexLocation; /* Which vertex do we start rendering at. */
+
+ uint8 primitiveType; /* SVGA3dPrimitiveType */
+ uint8 padding[3];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDraw;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+
+ uint8 primitiveType; /* SVGA3dPrimitiveType */
+
+ uint32 indexBufferSid; /* Valid index buffer sid. */
+ uint32 indexBufferOffset; /* Byte offset into the vertex buffer, almost */
+ /* always 0 for DX9 guests, non-zero for OpenGL */
+ /* guests. We can't represent non-multiple of */
+ /* stride offsets in D3D9Renderer... */
+ uint8 indexBufferStride; /* Allowable values = 1, 2, or 4 */
+
+ int32 baseVertexLocation; /* Bias applied to the index when selecting a */
+ /* vertex from the streams, may be negative */
+
+ uint32 primitiveCount; /* How many primitives to render */
+ uint32 pad0;
+ uint16 pad1;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDrawIndexed;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ /*
+ * Describe a vertex array's data type, and define how it is to be
+ * used by the fixed function pipeline or the vertex shader. It
+ * isn't useful to have two VertexDecls with the same
+ * VertexArrayIdentity in one draw call.
+ */
+ uint16 streamOffset;
+ uint8 stream;
+ uint8 type; /* SVGA3dDeclType */
+ uint8 method; /* SVGA3dDeclMethod */
+ uint8 usage; /* SVGA3dDeclUsage */
+ uint8 usageIndex;
+ uint8 padding;
+
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexElement;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+
+ uint32 numElements;
+
+ /*
+ * Followed by numElements SVGA3dVertexElement structures.
+ *
+ * If numElements < SVGA3D_MAX_VERTEX_ARRAYS, the remaining elements
+ * are cleared and will not be used by following draws.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetVertexDecls;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+ uint32 stride;
+ uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexStream;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+
+ uint32 numStreams;
+ /*
+ * Followed by numStream SVGA3dVertexStream structures.
+ *
+ * If numStreams < SVGA3D_MAX_VERTEX_ARRAYS, the remaining streams
+ * are cleared and will not be used by following draws.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetVertexStreams;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ uint32 numDivisors;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetVertexDivisors;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 stage;
+ SVGA3dTextureStateName name;
+ union {
+ uint32 value;
+ float floatValue;
+ };
+}
+#include "vmware_pack_end.h"
+SVGA3dTextureState;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ /* Followed by variable number of SVGA3dTextureState structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetTextureState; /* SVGA_3D_CMD_SETTEXTURESTATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dTransformType type;
+ float matrix[16];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetTransform; /* SVGA_3D_CMD_SETTRANSFORM */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ float min;
+ float max;
+}
+#include "vmware_pack_end.h"
+SVGA3dZRange;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dZRange zRange;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetZRange; /* SVGA_3D_CMD_SETZRANGE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ float diffuse[4];
+ float ambient[4];
+ float specular[4];
+ float emissive[4];
+ float shininess;
+}
+#include "vmware_pack_end.h"
+SVGA3dMaterial;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dFace face;
+ SVGA3dMaterial material;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetMaterial; /* SVGA_3D_CMD_SETMATERIAL */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ uint32 index;
+ SVGA3dLightData data;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetLightData; /* SVGA_3D_CMD_SETLIGHTDATA */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ uint32 index;
+ uint32 enabled;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetLightEnabled; /* SVGA_3D_CMD_SETLIGHTENABLED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dRect rect;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetViewport; /* SVGA_3D_CMD_SETVIEWPORT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dRect rect;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetScissorRect; /* SVGA_3D_CMD_SETSCISSORRECT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ uint32 index;
+ float plane[4];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetClipPlane; /* SVGA_3D_CMD_SETCLIPPLANE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ uint32 shid;
+ SVGA3dShaderType type;
+ /* Followed by variable number of DWORDs for shader bycode */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineShader; /* SVGA_3D_CMD_SHADER_DEFINE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ uint32 shid;
+ SVGA3dShaderType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyShader; /* SVGA_3D_CMD_SHADER_DESTROY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ uint32 reg; /* register number */
+ SVGA3dShaderType type;
+ SVGA3dShaderConstType ctype;
+ uint32 values[4];
+
+ /*
+ * Followed by a variable number of additional values.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetShaderConst; /* SVGA_3D_CMD_SET_SHADER_CONST */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dShaderType type;
+ uint32 shid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetShader; /* SVGA_3D_CMD_SET_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBeginQuery; /* SVGA_3D_CMD_BEGIN_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+ SVGAGuestPtr guestResult; /* Points to an SVGA3dQueryResult structure */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdEndQuery; /* SVGA_3D_CMD_END_QUERY */
+
+
+/*
+ * SVGA3D_CMD_WAIT_FOR_QUERY --
+ *
+ * Will read the SVGA3dQueryResult structure pointed to by guestResult,
+ * and if the state member is set to anything else than
+ * SVGA3D_QUERYSTATE_PENDING, this command will always be a no-op.
+ *
+ * Otherwise, in addition to the query explicitly waited for,
+ * All queries with the same type and issued with the same cid, for which
+ * an SVGA_3D_CMD_END_QUERY command has previously been sent, will
+ * be finished after execution of this command.
+ *
+ * A query will be identified by the gmrId and offset of the guestResult
+ * member. If the device can't find an SVGA_3D_CMD_END_QUERY that has
+ * been sent previously with an indentical gmrId and offset, it will
+ * effectively end all queries with an identical type issued with the
+ * same cid, and the SVGA3dQueryResult structure pointed to by
+ * guestResult will not be written to. This property can be used to
+ * implement a query barrier for a given cid and query type.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid; /* Same parameters passed to END_QUERY */
+ SVGA3dQueryType type;
+ SVGAGuestPtr guestResult;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdWaitForQuery; /* SVGA_3D_CMD_WAIT_FOR_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 totalSize; /* Set by guest before query is ended. */
+ SVGA3dQueryState state; /* Set by host or guest. See SVGA3dQueryState. */
+ union { /* Set by host on exit from PENDING state */
+ uint32 result32;
+ uint32 queryCookie; /* May be used to identify which QueryGetData this
+ result corresponds to. */
+ };
+}
+#include "vmware_pack_end.h"
+SVGA3dQueryResult;
+
+
+/*
+ * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
+ *
+ * This is a blit from an SVGA3D surface to a Screen Object.
+ * This blit must be directed at a specific screen.
+ *
+ * The blit copies from a rectangular region of an SVGA3D surface
+ * image to a rectangular region of a screen.
+ *
+ * This command takes an optional variable-length list of clipping
+ * rectangles after the body of the command. If no rectangles are
+ * specified, there is no clipping region. The entire destRect is
+ * drawn to. If one or more rectangles are included, they describe
+ * a clipping region. The clip rectangle coordinates are measured
+ * relative to the top-left corner of destRect.
+ *
+ * The srcImage must be from mip=0 face=0.
+ *
+ * This supports scaling if the src and dest are of different sizes.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceImageId srcImage;
+ SVGASignedRect srcRect;
+ uint32 destScreenId; /* Screen Object ID */
+ SVGASignedRect destRect;
+ /* Clipping: zero or more SVGASignedRects follow */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+ SVGA3dTextureFilter filter;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */
+
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdActivateSurface; /* SVGA_3D_CMD_ACTIVATE_SURFACE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDeactivateSurface; /* SVGA_3D_CMD_DEACTIVATE_SURFACE */
+
+/*
+ * Screen DMA command
+ *
+ * Available with SVGA_FIFO_CAP_SCREEN_OBJECT_2. The SVGA_CAP_3D device
+ * cap bit is not required.
+ *
+ * - refBuffer and destBuffer are 32bit BGRX; refBuffer and destBuffer could
+ * be different, but it is required that guest makes sure refBuffer has
+ * exactly the same contents that were written to when last time screen DMA
+ * command is received by host.
+ *
+ * - changemap is generated by lib/blit, and it has the changes from last
+ * received screen DMA or more.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdScreenDMA {
+ uint32 screenId;
+ SVGAGuestImage refBuffer;
+ SVGAGuestImage destBuffer;
+ SVGAGuestImage changeMap;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdScreenDMA; /* SVGA_3D_CMD_SCREEN_DMA */
+
+/*
+ * Set Unity Surface Cookie
+ *
+ * Associates the supplied cookie with the surface id for use with
+ * Unity. This cookie is a hint from guest to host, there is no way
+ * for the guest to readback the cookie and the host is free to drop
+ * the cookie association at will. The default value for the cookie
+ * on all surfaces is 0.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdSetUnitySurfaceCookie {
+ uint32 sid;
+ uint64 cookie;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetUnitySurfaceCookie; /* SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE */
+
+/*
+ * Open a context-specific surface in a non-context-specific manner.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdOpenContextSurface {
+ uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdOpenContextSurface; /* SVGA_3D_CMD_OPEN_CONTEXT_SURFACE */
+
+
+/*
+ * Logic ops
+ */
+
+#define SVGA3D_LOTRANSBLT_HONORALPHA (0x01)
+#define SVGA3D_LOSTRETCHBLT_MIRRORX (0x01)
+#define SVGA3D_LOSTRETCHBLT_MIRRORY (0x02)
+#define SVGA3D_LOALPHABLEND_SRCHASALPHA (0x01)
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsBitBlt {
+ /*
+ * All LogicOps surfaces are one-level
+ * surfaces so mipmap & face should always
+ * be zero.
+ */
+ SVGA3dSurfaceImageId src;
+ SVGA3dSurfaceImageId dst;
+ SVGA3dLogicOp logicOp;
+ /* Followed by variable number of SVGA3dCopyBox structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsBitBlt; /* SVGA_3D_CMD_LOGICOPS_BITBLT */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsTransBlt {
+ /*
+ * All LogicOps surfaces are one-level
+ * surfaces so mipmap & face should always
+ * be zero.
+ */
+ SVGA3dSurfaceImageId src;
+ SVGA3dSurfaceImageId dst;
+ uint32 color;
+ uint32 flags;
+ SVGA3dBox srcBox;
+ SVGA3dBox dstBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsTransBlt; /* SVGA_3D_CMD_LOGICOPS_TRANSBLT */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsStretchBlt {
+ /*
+ * All LogicOps surfaces are one-level
+ * surfaces so mipmap & face should always
+ * be zero.
+ */
+ SVGA3dSurfaceImageId src;
+ SVGA3dSurfaceImageId dst;
+ uint16 mode;
+ uint16 flags;
+ SVGA3dBox srcBox;
+ SVGA3dBox dstBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsStretchBlt; /* SVGA_3D_CMD_LOGICOPS_STRETCHBLT */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsColorFill {
+ /*
+ * All LogicOps surfaces are one-level
+ * surfaces so mipmap & face should always
+ * be zero.
+ */
+ SVGA3dSurfaceImageId dst;
+ uint32 color;
+ SVGA3dLogicOp logicOp;
+ /* Followed by variable number of SVGA3dRect structures. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsColorFill; /* SVGA_3D_CMD_LOGICOPS_COLORFILL */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsAlphaBlend {
+ /*
+ * All LogicOps surfaces are one-level
+ * surfaces so mipmap & face should always
+ * be zero.
+ */
+ SVGA3dSurfaceImageId src;
+ SVGA3dSurfaceImageId dst;
+ uint32 alphaVal;
+ uint32 flags;
+ SVGA3dBox srcBox;
+ SVGA3dBox dstBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsAlphaBlend; /* SVGA_3D_CMD_LOGICOPS_ALPHABLEND */
+
+#define SVGA3D_CLEARTYPE_INVALID_GAMMA_INDEX 0xFFFFFFFF
+
+#define SVGA3D_CLEARTYPE_GAMMA_WIDTH 512
+#define SVGA3D_CLEARTYPE_GAMMA_HEIGHT 16
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsClearTypeBlend {
+ /*
+ * All LogicOps surfaces are one-level
+ * surfaces so mipmap & face should always
+ * be zero.
+ */
+ SVGA3dSurfaceImageId tmp;
+ SVGA3dSurfaceImageId dst;
+ SVGA3dSurfaceImageId gammaSurf;
+ SVGA3dSurfaceImageId alphaSurf;
+ uint32 gamma;
+ uint32 color;
+ uint32 color2;
+ int32 alphaOffsetX;
+ int32 alphaOffsetY;
+ /* Followed by variable number of SVGA3dBox structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsClearTypeBlend; /* SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND */
+
+
+/*
+ * Guest-backed objects definitions.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGAMobFormat ptDepth;
+ uint32 sizeInBytes;
+ PPN64 base;
+}
+#include "vmware_pack_end.h"
+SVGAOTableMobEntry;
+#define SVGA3D_OTABLE_MOB_ENTRY_SIZE (sizeof(SVGAOTableMobEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceFormat format;
+ SVGA3dSurfaceFlags surfaceFlags;
+ uint32 numMipLevels;
+ uint32 multisampleCount;
+ SVGA3dTextureFilter autogenFilter;
+ SVGA3dSize size;
+ SVGAMobId mobid;
+ uint32 arraySize;
+ uint32 mobPitch;
+ uint32 pad[5];
+}
+#include "vmware_pack_end.h"
+SVGAOTableSurfaceEntry;
+#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE (sizeof(SVGAOTableSurfaceEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGAOTableContextEntry;
+#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE (sizeof(SVGAOTableContextEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dShaderType type;
+ uint32 sizeInBytes;
+ uint32 offsetInBytes;
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGAOTableShaderEntry;
+#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE (sizeof(SVGAOTableShaderEntry))
+
+#define SVGA_STFLAG_PRIMARY (1 << 0)
+typedef uint32 SVGAScreenTargetFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceImageId image;
+ uint32 width;
+ uint32 height;
+ int32 xRoot;
+ int32 yRoot;
+ SVGAScreenTargetFlags flags;
+ uint32 dpi;
+ uint32 pad[7];
+}
+#include "vmware_pack_end.h"
+SVGAOTableScreenTargetEntry;
+#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE \
+ (sizeof(SVGAOTableScreenTargetEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ float value[4];
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderConstFloat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ int32 value[4];
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderConstInt;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 value;
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderConstBool;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint16 streamOffset;
+ uint8 stream;
+ uint8 type;
+ uint8 methodUsage;
+ uint8 usageIndex;
+}
+#include "vmware_pack_end.h"
+SVGAGBVertexElement;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+ uint16 stride;
+ uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGAGBVertexStream;
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dRect viewport;
+ SVGA3dRect scissorRect;
+ SVGA3dZRange zRange;
+
+ SVGA3dSurfaceImageId renderTargets[SVGA3D_RT_MAX];
+ SVGAGBVertexElement decl1[4];
+
+ uint32 renderStates[SVGA3D_RS_MAX];
+ SVGAGBVertexElement decl2[18];
+ uint32 pad0[2];
+
+ struct {
+ SVGA3dFace face;
+ SVGA3dMaterial material;
+ } material;
+
+ float clipPlanes[SVGA3D_NUM_CLIPPLANES][4];
+ float matrices[SVGA3D_TRANSFORM_MAX][16];
+
+ SVGA3dBool lightEnabled[SVGA3D_NUM_LIGHTS];
+ SVGA3dLightData lightData[SVGA3D_NUM_LIGHTS];
+
+ /*
+ * Shaders currently bound
+ */
+ uint32 shaders[SVGA3D_NUM_SHADERTYPE_PREDX];
+ SVGAGBVertexElement decl3[10];
+ uint32 pad1[3];
+
+ uint32 occQueryActive;
+ uint32 occQueryValue;
+
+ /*
+ * Int/Bool Shader constants
+ */
+ SVGA3dShaderConstInt pShaderIValues[SVGA3D_CONSTINTREG_MAX];
+ SVGA3dShaderConstInt vShaderIValues[SVGA3D_CONSTINTREG_MAX];
+ uint16 pShaderBValues;
+ uint16 vShaderBValues;
+
+
+ SVGAGBVertexStream streams[SVGA3D_MAX_VERTEX_ARRAYS];
+ SVGA3dVertexDivisor divisors[SVGA3D_MAX_VERTEX_ARRAYS];
+ uint32 numVertexDecls;
+ uint32 numVertexStreams;
+ uint32 numVertexDivisors;
+ uint32 pad2[30];
+
+ /*
+ * Texture Stages
+ *
+ * SVGA3D_TS_INVALID through SVGA3D_TS_CONSTANT are in the
+ * textureStages array.
+ * SVGA3D_TS_COLOR_KEY is in tsColorKey.
+ */
+ uint32 tsColorKey[SVGA3D_NUM_TEXTURE_UNITS];
+ uint32 textureStages[SVGA3D_NUM_TEXTURE_UNITS][SVGA3D_TS_CONSTANT + 1];
+ uint32 tsColorKeyEnable[SVGA3D_NUM_TEXTURE_UNITS];
+
+ /*
+ * Float Shader constants.
+ */
+ SVGA3dShaderConstFloat pShaderFValues[SVGA3D_CONSTREG_MAX];
+ SVGA3dShaderConstFloat vShaderFValues[SVGA3D_CONSTREG_MAX];
+}
+#include "vmware_pack_end.h"
+SVGAGBContextData;
+#define SVGA3D_CONTEXT_DATA_SIZE (sizeof(SVGAGBContextData))
+
+/*
+ * SVGA3dCmdSetOTableBase --
+ *
+ * This command allows the guest to specify the base PPN of the
+ * specified object table.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGAOTableType type;
+ PPN baseAddress;
+ uint32 sizeInBytes;
+ uint32 validSizeInBytes;
+ SVGAMobFormat ptDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGAOTableType type;
+ PPN64 baseAddress;
+ uint32 sizeInBytes;
+ uint32 validSizeInBytes;
+ SVGAMobFormat ptDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGAOTableType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */
+
+/*
+ * Define a memory object (Mob) in the OTable.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBMob {
+ SVGAMobId mobid;
+ SVGAMobFormat ptDepth;
+ PPN base;
+ uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
+
+
+/*
+ * Destroys an object in the OTable.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBMob {
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
+
+
+/*
+ * Define a memory object (Mob) in the OTable with a PPN64 base.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBMob64 {
+ SVGAMobId mobid;
+ SVGAMobFormat ptDepth;
+ PPN64 base;
+ uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
+
+/*
+ * Redefine an object in the OTable with PPN64 base.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdRedefineGBMob64 {
+ SVGAMobId mobid;
+ SVGAMobFormat ptDepth;
+ PPN64 base;
+ uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
+
+/*
+ * Notification that the page tables have been modified.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdUpdateGBMobMapping {
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
+
+/*
+ * Define a guest-backed surface.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface {
+ uint32 sid;
+ SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ uint32 numMipLevels;
+ uint32 multisampleCount;
+ SVGA3dTextureFilter autogenFilter;
+ SVGA3dSize size;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
+
+/*
+ * Destroy a guest-backed surface.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBSurface {
+ uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
+
+/*
+ * Bind a guest-backed surface to a mob.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBSurface {
+ uint32 sid;
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBSurfaceWithPitch {
+ uint32 sid;
+ SVGAMobId mobid;
+ uint32 baseLevelPitch;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBSurfaceWithPitch; /* SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH */
+
+/*
+ * Conditionally bind a mob to a guest-backed surface if testMobid
+ * matches the currently bound mob. Optionally issue a
+ * readback/update on the surface while it is still bound to the old
+ * mobid if the mobid is changed by this command.
+ */
+
+#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
+#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_UPDATE (1 << 1)
+
+typedef
+#include "vmware_pack_begin.h"
+struct{
+ uint32 sid;
+ SVGAMobId testMobid;
+ SVGAMobId mobid;
+ uint32 flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
+
+/*
+ * Update an image in a guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdUpdateGBImage {
+ SVGA3dSurfaceImageId image;
+ SVGA3dBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
+
+/*
+ * Update an entire guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdUpdateGBSurface {
+ uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
+
+/*
+ * Readback an image in a guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBImage {
+ SVGA3dSurfaceImageId image;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE */
+
+/*
+ * Readback an entire guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBSurface {
+ uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
+
+/*
+ * Readback a sub rect of an image in a guest-backed surface. After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBImagePartial {
+ SVGA3dSurfaceImageId image;
+ SVGA3dBox box;
+ uint32 invertBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
+
+
+/*
+ * Invalidate an image in a guest-backed surface.
+ * (Notify the device that the contents can be lost.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBImage {
+ SVGA3dSurfaceImageId image;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
+
+/*
+ * Invalidate an entire guest-backed surface.
+ * (Notify the device that the contents if all images can be lost.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBSurface {
+ uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
+
+/*
+ * Invalidate a sub rect of an image in a guest-backed surface. After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBImagePartial {
+ SVGA3dSurfaceImageId image;
+ SVGA3dBox box;
+ uint32 invertBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
+
+
+/*
+ * Define a guest-backed context.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBContext {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
+
+/*
+ * Destroy a guest-backed context.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBContext {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
+
+/*
+ * Bind a guest-backed context.
+ *
+ * validContents should be set to 0 for new contexts,
+ * and 1 if this is an old context which is getting paged
+ * back on to the device.
+ *
+ * For new contexts, it is recommended that the driver
+ * issue commands to initialize all interesting state
+ * prior to rendering.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBContext {
+ uint32 cid;
+ SVGAMobId mobid;
+ uint32 validContents;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
+
+/*
+ * Readback a guest-backed context.
+ * (Request that the device flush the contents back into guest memory.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBContext {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
+
+/*
+ * Invalidate a guest-backed context.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBContext {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
+
+/*
+ * Define a guest-backed shader.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBShader {
+ uint32 shid;
+ SVGA3dShaderType type;
+ uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
+
+/*
+ * Bind a guest-backed shader.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBShader {
+ uint32 shid;
+ SVGAMobId mobid;
+ uint32 offsetInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
+
+/*
+ * Destroy a guest-backed shader.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBShader {
+ uint32 shid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ uint32 regStart;
+ SVGA3dShaderType shaderType;
+ SVGA3dShaderConstType constType;
+
+ /*
+ * Followed by a variable number of shader constants.
+ *
+ * Note that FLOAT and INT constants are 4-dwords in length, while
+ * BOOL constants are 1-dword in length.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetGBShaderConstInline; /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+ SVGAMobId mobid;
+ uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
+
+
+/*
+ * SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
+ *
+ * The semantics of this command are identical to the
+ * SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
+ * to a Mob instead of a GMR.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+ SVGAMobId mobid;
+ uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGAMobId mobid;
+ uint32 mustBeZero;
+ uint32 initialized;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGAMobId mobid;
+ uint32 gartOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 gartOffset;
+ uint32 numPages;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */
+
+
+/*
+ * Screen Targets
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 stid;
+ uint32 width;
+ uint32 height;
+ int32 xRoot;
+ int32 yRoot;
+ SVGAScreenTargetFlags flags;
+
+ /*
+ * The physical DPI that the guest expects this screen displayed at.
+ *
+ * Guests which are not DPI-aware should set this to zero.
+ */
+ uint32 dpi;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 stid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 stid;
+ SVGA3dSurfaceImageId image;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 stid;
+ SVGA3dRect rect;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdGBScreenDMA {
+ uint32 screenId;
+ uint32 dead;
+ SVGAMobId destMobID;
+ uint32 destPitch;
+ SVGAMobId changeMapMobID;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGBScreenDMA; /* SVGA_3D_CMD_GB_SCREEN_DMA */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 value;
+ uint32 mobId;
+ uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGBMobFence; /* SVGA_3D_CMD_GB_MOB_FENCE*/
+
+#endif /* _SVGA3D_CMD_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
new file mode 100644
index 000000000000..c18b663f360f
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
@@ -0,0 +1,457 @@
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_devcaps.h --
+ *
+ * SVGA 3d caps definitions
+ */
+
+#ifndef _SVGA3D_DEVCAPS_H_
+#define _SVGA3D_DEVCAPS_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+/*
+ * 3D Hardware Version
+ *
+ * The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
+ * register. Is set by the host and read by the guest. This lets
+ * us make new guest drivers which are backwards-compatible with old
+ * SVGA hardware revisions. It does not let us support old guest
+ * drivers. Good enough for now.
+ *
+ */
+
+#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
+#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16)
+#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF)
+
+typedef enum {
+ SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1),
+ SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2),
+ SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3),
+ SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1),
+ SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
+ SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0),
+ SVGA3D_HWVERSION_WS8_B1 = SVGA3D_MAKE_HWVERSION(2, 1),
+ SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS8_B1,
+} SVGA3dHardwareVersion;
+
+/*
+ * DevCap indexes.
+ */
+
+typedef enum {
+ SVGA3D_DEVCAP_INVALID = ((uint32)-1),
+ SVGA3D_DEVCAP_3D = 0,
+ SVGA3D_DEVCAP_MAX_LIGHTS = 1,
+
+ /*
+ * SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
+ * fixed-function texture units available. Each of these units
+ * work in both FFP and Shader modes, and they support texture
+ * transforms and texture coordinates. The host may have additional
+ * texture image units that are only usable with shaders.
+ */
+ SVGA3D_DEVCAP_MAX_TEXTURES = 2,
+ SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3,
+ SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4,
+ SVGA3D_DEVCAP_VERTEX_SHADER = 5,
+ SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6,
+ SVGA3D_DEVCAP_FRAGMENT_SHADER = 7,
+ SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8,
+ SVGA3D_DEVCAP_S23E8_TEXTURES = 9,
+ SVGA3D_DEVCAP_S10E5_TEXTURES = 10,
+ SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11,
+ SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12,
+ SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13,
+ SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14,
+ SVGA3D_DEVCAP_QUERY_TYPES = 15,
+ SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16,
+ SVGA3D_DEVCAP_MAX_POINT_SIZE = 17,
+ SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18,
+ SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19,
+ SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20,
+ SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21,
+ SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22,
+ SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23,
+ SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24,
+ SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25,
+ SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26,
+ SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27,
+ SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28,
+ SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29,
+ SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30,
+ SVGA3D_DEVCAP_TEXTURE_OPS = 31,
+ SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32,
+ SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33,
+ SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34,
+ SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35,
+ SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36,
+ SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37,
+ SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38,
+ SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39,
+ SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40,
+ SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41,
+ SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50,
+ SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51,
+ SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52,
+ SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53,
+ SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54,
+ SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55,
+ SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56,
+ SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57,
+ SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58,
+ SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59,
+ SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60,
+ SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61,
+
+ /*
+ * There is a hole in our devcap definitions for
+ * historical reasons.
+ *
+ * Define a constant just for completeness.
+ */
+ SVGA3D_DEVCAP_MISSING62 = 62,
+
+ SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63,
+
+ /*
+ * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
+ * render targets. This does not include the depth or stencil targets.
+ */
+ SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64,
+
+ SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65,
+ SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66,
+ SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
+ SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
+ SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
+ SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES = 70,
+ SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES = 71,
+ SVGA3D_DEVCAP_ALPHATOCOVERAGE = 72,
+ SVGA3D_DEVCAP_SUPERSAMPLE = 73,
+ SVGA3D_DEVCAP_AUTOGENMIPMAPS = 74,
+ SVGA3D_DEVCAP_SURFACEFMT_NV12 = 75,
+ SVGA3D_DEVCAP_SURFACEFMT_AYUV = 76,
+
+ /*
+ * This is the maximum number of SVGA context IDs that the guest
+ * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
+ */
+ SVGA3D_DEVCAP_MAX_CONTEXT_IDS = 77,
+
+ /*
+ * This is the maximum number of SVGA surface IDs that the guest
+ * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
+ */
+ SVGA3D_DEVCAP_MAX_SURFACE_IDS = 78,
+
+ SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 = 79,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 = 80,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT = 81,
+
+ SVGA3D_DEVCAP_SURFACEFMT_ATI1 = 82,
+ SVGA3D_DEVCAP_SURFACEFMT_ATI2 = 83,
+
+ /*
+ * Deprecated.
+ */
+ SVGA3D_DEVCAP_DEAD1 = 84,
+
+ /*
+ * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
+ * ored together, one for every type of video decoding supported.
+ */
+ SVGA3D_DEVCAP_VIDEO_DECODE = 85,
+
+ /*
+ * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
+ * ored together, one for every type of video processing supported.
+ */
+ SVGA3D_DEVCAP_VIDEO_PROCESS = 86,
+
+ SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */
+ SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */
+ SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */
+ SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */
+
+ SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91,
+
+ /*
+ * Does the host support the SVGA logic ops commands?
+ */
+ SVGA3D_DEVCAP_LOGICOPS = 92,
+
+ /*
+ * Are TS_CONSTANT, TS_COLOR_KEY, and TS_COLOR_KEY_ENABLE supported?
+ */
+ SVGA3D_DEVCAP_TS_COLOR_KEY = 93, /* boolean */
+
+ /*
+ * Deprecated.
+ */
+ SVGA3D_DEVCAP_DEAD2 = 94,
+
+ /*
+ * Does the device support the DX commands?
+ */
+ SVGA3D_DEVCAP_DX = 95,
+
+ /*
+ * What is the maximum size of a texture array?
+ *
+ * (Even if this cap is zero, cubemaps are still allowed.)
+ */
+ SVGA3D_DEVCAP_MAX_TEXTURE_ARRAY_SIZE = 96,
+
+ /*
+ * What is the maximum number of vertex buffers that can
+ * be used in the DXContext inputAssembly?
+ */
+ SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS = 97,
+
+ /*
+ * What is the maximum number of constant buffers
+ * that can be expected to work correctly with a
+ * DX context?
+ */
+ SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS = 98,
+
+ /*
+ * Does the device support provoking vertex control?
+ * If zero, the first vertex will always be the provoking vertex.
+ */
+ SVGA3D_DEVCAP_DX_PROVOKING_VERTEX = 99,
+
+ SVGA3D_DEVCAP_DXFMT_X8R8G8B8 = 100,
+ SVGA3D_DEVCAP_DXFMT_A8R8G8B8 = 101,
+ SVGA3D_DEVCAP_DXFMT_R5G6B5 = 102,
+ SVGA3D_DEVCAP_DXFMT_X1R5G5B5 = 103,
+ SVGA3D_DEVCAP_DXFMT_A1R5G5B5 = 104,
+ SVGA3D_DEVCAP_DXFMT_A4R4G4B4 = 105,
+ SVGA3D_DEVCAP_DXFMT_Z_D32 = 106,
+ SVGA3D_DEVCAP_DXFMT_Z_D16 = 107,
+ SVGA3D_DEVCAP_DXFMT_Z_D24S8 = 108,
+ SVGA3D_DEVCAP_DXFMT_Z_D15S1 = 109,
+ SVGA3D_DEVCAP_DXFMT_LUMINANCE8 = 110,
+ SVGA3D_DEVCAP_DXFMT_LUMINANCE4_ALPHA4 = 111,
+ SVGA3D_DEVCAP_DXFMT_LUMINANCE16 = 112,
+ SVGA3D_DEVCAP_DXFMT_LUMINANCE8_ALPHA8 = 113,
+ SVGA3D_DEVCAP_DXFMT_DXT1 = 114,
+ SVGA3D_DEVCAP_DXFMT_DXT2 = 115,
+ SVGA3D_DEVCAP_DXFMT_DXT3 = 116,
+ SVGA3D_DEVCAP_DXFMT_DXT4 = 117,
+ SVGA3D_DEVCAP_DXFMT_DXT5 = 118,
+ SVGA3D_DEVCAP_DXFMT_BUMPU8V8 = 119,
+ SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5 = 120,
+ SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8 = 121,
+ SVGA3D_DEVCAP_DXFMT_BUMPL8V8U8 = 122,
+ SVGA3D_DEVCAP_DXFMT_ARGB_S10E5 = 123,
+ SVGA3D_DEVCAP_DXFMT_ARGB_S23E8 = 124,
+ SVGA3D_DEVCAP_DXFMT_A2R10G10B10 = 125,
+ SVGA3D_DEVCAP_DXFMT_V8U8 = 126,
+ SVGA3D_DEVCAP_DXFMT_Q8W8V8U8 = 127,
+ SVGA3D_DEVCAP_DXFMT_CxV8U8 = 128,
+ SVGA3D_DEVCAP_DXFMT_X8L8V8U8 = 129,
+ SVGA3D_DEVCAP_DXFMT_A2W10V10U10 = 130,
+ SVGA3D_DEVCAP_DXFMT_ALPHA8 = 131,
+ SVGA3D_DEVCAP_DXFMT_R_S10E5 = 132,
+ SVGA3D_DEVCAP_DXFMT_R_S23E8 = 133,
+ SVGA3D_DEVCAP_DXFMT_RG_S10E5 = 134,
+ SVGA3D_DEVCAP_DXFMT_RG_S23E8 = 135,
+ SVGA3D_DEVCAP_DXFMT_BUFFER = 136,
+ SVGA3D_DEVCAP_DXFMT_Z_D24X8 = 137,
+ SVGA3D_DEVCAP_DXFMT_V16U16 = 138,
+ SVGA3D_DEVCAP_DXFMT_G16R16 = 139,
+ SVGA3D_DEVCAP_DXFMT_A16B16G16R16 = 140,
+ SVGA3D_DEVCAP_DXFMT_UYVY = 141,
+ SVGA3D_DEVCAP_DXFMT_YUY2 = 142,
+ SVGA3D_DEVCAP_DXFMT_NV12 = 143,
+ SVGA3D_DEVCAP_DXFMT_AYUV = 144,
+ SVGA3D_DEVCAP_DXFMT_R32G32B32A32_TYPELESS = 145,
+ SVGA3D_DEVCAP_DXFMT_R32G32B32A32_UINT = 146,
+ SVGA3D_DEVCAP_DXFMT_R32G32B32A32_SINT = 147,
+ SVGA3D_DEVCAP_DXFMT_R32G32B32_TYPELESS = 148,
+ SVGA3D_DEVCAP_DXFMT_R32G32B32_FLOAT = 149,
+ SVGA3D_DEVCAP_DXFMT_R32G32B32_UINT = 150,
+ SVGA3D_DEVCAP_DXFMT_R32G32B32_SINT = 151,
+ SVGA3D_DEVCAP_DXFMT_R16G16B16A16_TYPELESS = 152,
+ SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UINT = 153,
+ SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SNORM = 154,
+ SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SINT = 155,
+ SVGA3D_DEVCAP_DXFMT_R32G32_TYPELESS = 156,
+ SVGA3D_DEVCAP_DXFMT_R32G32_UINT = 157,
+ SVGA3D_DEVCAP_DXFMT_R32G32_SINT = 158,
+ SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS = 159,
+ SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT = 160,
+ SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24_TYPELESS = 161,
+ SVGA3D_DEVCAP_DXFMT_X32_TYPELESS_G8X24_UINT = 162,
+ SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS = 163,
+ SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT = 164,
+ SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT = 165,
+ SVGA3D_DEVCAP_DXFMT_R8G8B8A8_TYPELESS = 166,
+ SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM = 167,
+ SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM_SRGB = 168,
+ SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UINT = 169,
+ SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SINT = 170,
+ SVGA3D_DEVCAP_DXFMT_R16G16_TYPELESS = 171,
+ SVGA3D_DEVCAP_DXFMT_R16G16_UINT = 172,
+ SVGA3D_DEVCAP_DXFMT_R16G16_SINT = 173,
+ SVGA3D_DEVCAP_DXFMT_R32_TYPELESS = 174,
+ SVGA3D_DEVCAP_DXFMT_D32_FLOAT = 175,
+ SVGA3D_DEVCAP_DXFMT_R32_UINT = 176,
+ SVGA3D_DEVCAP_DXFMT_R32_SINT = 177,
+ SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS = 178,
+ SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT = 179,
+ SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8_TYPELESS = 180,
+ SVGA3D_DEVCAP_DXFMT_X24_TYPELESS_G8_UINT = 181,
+ SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS = 182,
+ SVGA3D_DEVCAP_DXFMT_R8G8_UNORM = 183,
+ SVGA3D_DEVCAP_DXFMT_R8G8_UINT = 184,
+ SVGA3D_DEVCAP_DXFMT_R8G8_SINT = 185,
+ SVGA3D_DEVCAP_DXFMT_R16_TYPELESS = 186,
+ SVGA3D_DEVCAP_DXFMT_R16_UNORM = 187,
+ SVGA3D_DEVCAP_DXFMT_R16_UINT = 188,
+ SVGA3D_DEVCAP_DXFMT_R16_SNORM = 189,
+ SVGA3D_DEVCAP_DXFMT_R16_SINT = 190,
+ SVGA3D_DEVCAP_DXFMT_R8_TYPELESS = 191,
+ SVGA3D_DEVCAP_DXFMT_R8_UNORM = 192,
+ SVGA3D_DEVCAP_DXFMT_R8_UINT = 193,
+ SVGA3D_DEVCAP_DXFMT_R8_SNORM = 194,
+ SVGA3D_DEVCAP_DXFMT_R8_SINT = 195,
+ SVGA3D_DEVCAP_DXFMT_P8 = 196,
+ SVGA3D_DEVCAP_DXFMT_R9G9B9E5_SHAREDEXP = 197,
+ SVGA3D_DEVCAP_DXFMT_R8G8_B8G8_UNORM = 198,
+ SVGA3D_DEVCAP_DXFMT_G8R8_G8B8_UNORM = 199,
+ SVGA3D_DEVCAP_DXFMT_BC1_TYPELESS = 200,
+ SVGA3D_DEVCAP_DXFMT_BC1_UNORM_SRGB = 201,
+ SVGA3D_DEVCAP_DXFMT_BC2_TYPELESS = 202,
+ SVGA3D_DEVCAP_DXFMT_BC2_UNORM_SRGB = 203,
+ SVGA3D_DEVCAP_DXFMT_BC3_TYPELESS = 204,
+ SVGA3D_DEVCAP_DXFMT_BC3_UNORM_SRGB = 205,
+ SVGA3D_DEVCAP_DXFMT_BC4_TYPELESS = 206,
+ SVGA3D_DEVCAP_DXFMT_ATI1 = 207,
+ SVGA3D_DEVCAP_DXFMT_BC4_SNORM = 208,
+ SVGA3D_DEVCAP_DXFMT_BC5_TYPELESS = 209,
+ SVGA3D_DEVCAP_DXFMT_ATI2 = 210,
+ SVGA3D_DEVCAP_DXFMT_BC5_SNORM = 211,
+ SVGA3D_DEVCAP_DXFMT_R10G10B10_XR_BIAS_A2_UNORM = 212,
+ SVGA3D_DEVCAP_DXFMT_B8G8R8A8_TYPELESS = 213,
+ SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM_SRGB = 214,
+ SVGA3D_DEVCAP_DXFMT_B8G8R8X8_TYPELESS = 215,
+ SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM_SRGB = 216,
+ SVGA3D_DEVCAP_DXFMT_Z_DF16 = 217,
+ SVGA3D_DEVCAP_DXFMT_Z_DF24 = 218,
+ SVGA3D_DEVCAP_DXFMT_Z_D24S8_INT = 219,
+ SVGA3D_DEVCAP_DXFMT_YV12 = 220,
+ SVGA3D_DEVCAP_DXFMT_R32G32B32A32_FLOAT = 221,
+ SVGA3D_DEVCAP_DXFMT_R16G16B16A16_FLOAT = 222,
+ SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UNORM = 223,
+ SVGA3D_DEVCAP_DXFMT_R32G32_FLOAT = 224,
+ SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UNORM = 225,
+ SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SNORM = 226,
+ SVGA3D_DEVCAP_DXFMT_R16G16_FLOAT = 227,
+ SVGA3D_DEVCAP_DXFMT_R16G16_UNORM = 228,
+ SVGA3D_DEVCAP_DXFMT_R16G16_SNORM = 229,
+ SVGA3D_DEVCAP_DXFMT_R32_FLOAT = 230,
+ SVGA3D_DEVCAP_DXFMT_R8G8_SNORM = 231,
+ SVGA3D_DEVCAP_DXFMT_R16_FLOAT = 232,
+ SVGA3D_DEVCAP_DXFMT_D16_UNORM = 233,
+ SVGA3D_DEVCAP_DXFMT_A8_UNORM = 234,
+ SVGA3D_DEVCAP_DXFMT_BC1_UNORM = 235,
+ SVGA3D_DEVCAP_DXFMT_BC2_UNORM = 236,
+ SVGA3D_DEVCAP_DXFMT_BC3_UNORM = 237,
+ SVGA3D_DEVCAP_DXFMT_B5G6R5_UNORM = 238,
+ SVGA3D_DEVCAP_DXFMT_B5G5R5A1_UNORM = 239,
+ SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM = 240,
+ SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM = 241,
+ SVGA3D_DEVCAP_DXFMT_BC4_UNORM = 242,
+ SVGA3D_DEVCAP_DXFMT_BC5_UNORM = 243,
+
+ SVGA3D_DEVCAP_MAX /* This must be the last index. */
+} SVGA3dDevCapIndex;
+
+/*
+ * Bit definitions for DXFMT devcaps
+ *
+ *
+ * SUPPORTED: Can the format be defined?
+ * SHADER_SAMPLE: Can the format be sampled from a shader?
+ * COLOR_RENDERTARGET: Can the format be a color render target?
+ * DEPTH_RENDERTARGET: Can the format be a depth render target?
+ * BLENDABLE: Is the format blendable?
+ * MIPS: Does the format support mip levels?
+ * ARRAY: Does the format support texture arrays?
+ * VOLUME: Does the format support having volume?
+ * MULTISAMPLE_2: Does the format support 2x multisample?
+ * MULTISAMPLE_4: Does the format support 4x multisample?
+ * MULTISAMPLE_8: Does the format support 8x multisample?
+ */
+#define SVGA3D_DXFMT_SUPPORTED (1 << 0)
+#define SVGA3D_DXFMT_SHADER_SAMPLE (1 << 1)
+#define SVGA3D_DXFMT_COLOR_RENDERTARGET (1 << 2)
+#define SVGA3D_DXFMT_DEPTH_RENDERTARGET (1 << 3)
+#define SVGA3D_DXFMT_BLENDABLE (1 << 4)
+#define SVGA3D_DXFMT_MIPS (1 << 5)
+#define SVGA3D_DXFMT_ARRAY (1 << 6)
+#define SVGA3D_DXFMT_VOLUME (1 << 7)
+#define SVGA3D_DXFMT_DX_VERTEX_BUFFER (1 << 8)
+#define SVGADX_DXFMT_MULTISAMPLE_2 (1 << 9)
+#define SVGADX_DXFMT_MULTISAMPLE_4 (1 << 10)
+#define SVGADX_DXFMT_MULTISAMPLE_8 (1 << 11)
+#define SVGADX_DXFMT_MAX (1 << 12)
+
+/*
+ * Convenience mask for any multisample capability.
+ *
+ * The multisample bits imply both load and render capability.
+ */
+#define SVGA3D_DXFMT_MULTISAMPLE ( \
+ SVGADX_DXFMT_MULTISAMPLE_2 | \
+ SVGADX_DXFMT_MULTISAMPLE_4 | \
+ SVGADX_DXFMT_MULTISAMPLE_8 )
+
+typedef union {
+ Bool b;
+ uint32 u;
+ int32 i;
+ float f;
+} SVGA3dDevCapResult;
+
+#endif /* _SVGA3D_DEVCAPS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
new file mode 100644
index 000000000000..8c5ae608cfb4
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
@@ -0,0 +1,1487 @@
+/**********************************************************
+ * Copyright 2012-2015 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_dx.h --
+ *
+ * SVGA 3d hardware definitions for DX10 support.
+ */
+
+#ifndef _SVGA3D_DX_H_
+#define _SVGA3D_DX_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+#include "includeCheck.h"
+
+#include "svga3d_limits.h"
+
+#define SVGA3D_INPUT_MIN 0
+#define SVGA3D_INPUT_PER_VERTEX_DATA 0
+#define SVGA3D_INPUT_PER_INSTANCE_DATA 1
+#define SVGA3D_INPUT_MAX 2
+typedef uint32 SVGA3dInputClassification;
+
+#define SVGA3D_RESOURCE_TYPE_MIN 1
+#define SVGA3D_RESOURCE_BUFFER 1
+#define SVGA3D_RESOURCE_TEXTURE1D 2
+#define SVGA3D_RESOURCE_TEXTURE2D 3
+#define SVGA3D_RESOURCE_TEXTURE3D 4
+#define SVGA3D_RESOURCE_TEXTURECUBE 5
+#define SVGA3D_RESOURCE_TYPE_DX10_MAX 6
+#define SVGA3D_RESOURCE_BUFFEREX 6
+#define SVGA3D_RESOURCE_TYPE_MAX 7
+typedef uint32 SVGA3dResourceType;
+
+#define SVGA3D_DEPTH_WRITE_MASK_ZERO 0
+#define SVGA3D_DEPTH_WRITE_MASK_ALL 1
+typedef uint8 SVGA3dDepthWriteMask;
+
+#define SVGA3D_FILTER_MIP_LINEAR (1 << 0)
+#define SVGA3D_FILTER_MAG_LINEAR (1 << 2)
+#define SVGA3D_FILTER_MIN_LINEAR (1 << 4)
+#define SVGA3D_FILTER_ANISOTROPIC (1 << 6)
+#define SVGA3D_FILTER_COMPARE (1 << 7)
+typedef uint32 SVGA3dFilter;
+
+#define SVGA3D_CULL_INVALID 0
+#define SVGA3D_CULL_MIN 1
+#define SVGA3D_CULL_NONE 1
+#define SVGA3D_CULL_FRONT 2
+#define SVGA3D_CULL_BACK 3
+#define SVGA3D_CULL_MAX 4
+typedef uint8 SVGA3dCullMode;
+
+#define SVGA3D_COMPARISON_INVALID 0
+#define SVGA3D_COMPARISON_MIN 1
+#define SVGA3D_COMPARISON_NEVER 1
+#define SVGA3D_COMPARISON_LESS 2
+#define SVGA3D_COMPARISON_EQUAL 3
+#define SVGA3D_COMPARISON_LESS_EQUAL 4
+#define SVGA3D_COMPARISON_GREATER 5
+#define SVGA3D_COMPARISON_NOT_EQUAL 6
+#define SVGA3D_COMPARISON_GREATER_EQUAL 7
+#define SVGA3D_COMPARISON_ALWAYS 8
+#define SVGA3D_COMPARISON_MAX 9
+typedef uint8 SVGA3dComparisonFunc;
+
+#define SVGA3D_DX_MAX_VERTEXBUFFERS 32
+#define SVGA3D_DX_MAX_SOTARGETS 4
+#define SVGA3D_DX_MAX_SRVIEWS 128
+#define SVGA3D_DX_MAX_CONSTBUFFERS 16
+#define SVGA3D_DX_MAX_SAMPLERS 16
+
+/* Id limits */
+static const uint32 SVGA3dBlendObjectCountPerContext = 4096;
+static const uint32 SVGA3dDepthStencilObjectCountPerContext = 4096;
+
+typedef uint32 SVGA3dSurfaceId;
+typedef uint32 SVGA3dShaderResourceViewId;
+typedef uint32 SVGA3dRenderTargetViewId;
+typedef uint32 SVGA3dDepthStencilViewId;
+
+typedef uint32 SVGA3dShaderId;
+typedef uint32 SVGA3dElementLayoutId;
+typedef uint32 SVGA3dSamplerId;
+typedef uint32 SVGA3dBlendStateId;
+typedef uint32 SVGA3dDepthStencilStateId;
+typedef uint32 SVGA3dRasterizerStateId;
+typedef uint32 SVGA3dQueryId;
+typedef uint32 SVGA3dStreamOutputId;
+
+typedef union {
+ struct {
+ float r;
+ float g;
+ float b;
+ float a;
+ };
+
+ float value[4];
+} SVGA3dRGBAFloat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 cid;
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGAOTableDXContextEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineContext {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineContext; /* SVGA_3D_CMD_DX_DEFINE_CONTEXT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyContext {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyContext; /* SVGA_3D_CMD_DX_DESTROY_CONTEXT */
+
+/*
+ * Bind a DX context.
+ *
+ * validContents should be set to 0 for new contexts,
+ * and 1 if this is an old context which is getting paged
+ * back on to the device.
+ *
+ * For new contexts, it is recommended that the driver
+ * issue commands to initialize all interesting state
+ * prior to rendering.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindContext {
+ uint32 cid;
+ SVGAMobId mobid;
+ uint32 validContents;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindContext; /* SVGA_3D_CMD_DX_BIND_CONTEXT */
+
+/*
+ * Readback a DX context.
+ * (Request that the device flush the contents back into guest memory.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackContext {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackContext; /* SVGA_3D_CMD_DX_READBACK_CONTEXT */
+
+/*
+ * Invalidate a guest-backed context.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXInvalidateContext {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXInvalidateContext; /* SVGA_3D_CMD_DX_INVALIDATE_CONTEXT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dReplyFormatData {
+ uint32 formatSupport;
+ uint32 msaa2xQualityLevels:5;
+ uint32 msaa4xQualityLevels:5;
+ uint32 msaa8xQualityLevels:5;
+ uint32 msaa16xQualityLevels:5;
+ uint32 msaa32xQualityLevels:5;
+ uint32 pad:7;
+}
+#include "vmware_pack_end.h"
+SVGA3dReplyFormatData;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetSingleConstantBuffer {
+ uint32 slot;
+ SVGA3dShaderType type;
+ SVGA3dSurfaceId sid;
+ uint32 offsetInBytes;
+ uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetSingleConstantBuffer;
+/* SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetShaderResources {
+ uint32 startView;
+ SVGA3dShaderType type;
+
+ /*
+ * Followed by a variable number of SVGA3dShaderResourceViewId's.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetShaderResources; /* SVGA_3D_CMD_DX_SET_SHADER_RESOURCES */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetShader {
+ SVGA3dShaderId shaderId;
+ SVGA3dShaderType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetShader; /* SVGA_3D_CMD_DX_SET_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetSamplers {
+ uint32 startSampler;
+ SVGA3dShaderType type;
+
+ /*
+ * Followed by a variable number of SVGA3dSamplerId's.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetSamplers; /* SVGA_3D_CMD_DX_SET_SAMPLERS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDraw {
+ uint32 vertexCount;
+ uint32 startVertexLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDraw; /* SVGA_3D_CMD_DX_DRAW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawIndexed {
+ uint32 indexCount;
+ uint32 startIndexLocation;
+ int32 baseVertexLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawIndexed; /* SVGA_3D_CMD_DX_DRAW_INDEXED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawInstanced {
+ uint32 vertexCountPerInstance;
+ uint32 instanceCount;
+ uint32 startVertexLocation;
+ uint32 startInstanceLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawInstanced; /* SVGA_3D_CMD_DX_DRAW_INSTANCED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawIndexedInstanced {
+ uint32 indexCountPerInstance;
+ uint32 instanceCount;
+ uint32 startIndexLocation;
+ int32 baseVertexLocation;
+ uint32 startInstanceLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawIndexedInstanced; /* SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawAuto {
+ uint32 pad0;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawAuto; /* SVGA_3D_CMD_DX_DRAW_AUTO */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetInputLayout {
+ SVGA3dElementLayoutId elementLayoutId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetInputLayout; /* SVGA_3D_CMD_DX_SET_INPUT_LAYOUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dVertexBuffer {
+ SVGA3dSurfaceId sid;
+ uint32 stride;
+ uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexBuffer;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetVertexBuffers {
+ uint32 startBuffer;
+ /* Followed by a variable number of SVGA3dVertexBuffer's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetVertexBuffers; /* SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetIndexBuffer {
+ SVGA3dSurfaceId sid;
+ SVGA3dSurfaceFormat format;
+ uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetIndexBuffer; /* SVGA_3D_CMD_DX_SET_INDEX_BUFFER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetTopology {
+ SVGA3dPrimitiveType topology;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetTopology; /* SVGA_3D_CMD_DX_SET_TOPOLOGY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetRenderTargets {
+ SVGA3dDepthStencilViewId depthStencilViewId;
+ /* Followed by a variable number of SVGA3dRenderTargetViewId's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetRenderTargets; /* SVGA_3D_CMD_DX_SET_RENDERTARGETS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetBlendState {
+ SVGA3dBlendStateId blendId;
+ float blendFactor[4];
+ uint32 sampleMask;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetBlendState; /* SVGA_3D_CMD_DX_SET_BLEND_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetDepthStencilState {
+ SVGA3dDepthStencilStateId depthStencilId;
+ uint32 stencilRef;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetDepthStencilState; /* SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetRasterizerState {
+ SVGA3dRasterizerStateId rasterizerId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetRasterizerState; /* SVGA_3D_CMD_DX_SET_RASTERIZER_STATE */
+
+#define SVGA3D_DXQUERY_FLAG_PREDICATEHINT (1 << 0)
+typedef uint32 SVGA3dDXQueryFlags;
+
+/*
+ * The SVGADXQueryDeviceState and SVGADXQueryDeviceBits are used by the device
+ * to track query state transitions, but are not intended to be used by the
+ * driver.
+ */
+#define SVGADX_QDSTATE_INVALID ((uint8)-1) /* Query has no state */
+#define SVGADX_QDSTATE_MIN 0
+#define SVGADX_QDSTATE_IDLE 0 /* Query hasn't started yet */
+#define SVGADX_QDSTATE_ACTIVE 1 /* Query is actively gathering data */
+#define SVGADX_QDSTATE_PENDING 2 /* Query is waiting for results */
+#define SVGADX_QDSTATE_FINISHED 3 /* Query has completed */
+#define SVGADX_QDSTATE_MAX 4
+typedef uint8 SVGADXQueryDeviceState;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dQueryTypeUint8 type;
+ uint16 pad0;
+ SVGADXQueryDeviceState state;
+ SVGA3dDXQueryFlags flags;
+ SVGAMobId mobid;
+ uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXQueryEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineQuery {
+ SVGA3dQueryId queryId;
+ SVGA3dQueryType type;
+ SVGA3dDXQueryFlags flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineQuery; /* SVGA_3D_CMD_DX_DEFINE_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyQuery {
+ SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyQuery; /* SVGA_3D_CMD_DX_DESTROY_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindQuery {
+ SVGA3dQueryId queryId;
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindQuery; /* SVGA_3D_CMD_DX_BIND_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetQueryOffset {
+ SVGA3dQueryId queryId;
+ uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetQueryOffset; /* SVGA_3D_CMD_DX_SET_QUERY_OFFSET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBeginQuery {
+ SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBeginQuery; /* SVGA_3D_CMD_DX_QUERY_BEGIN */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXEndQuery {
+ SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXEndQuery; /* SVGA_3D_CMD_DX_QUERY_END */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackQuery {
+ SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackQuery; /* SVGA_3D_CMD_DX_READBACK_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXMoveQuery {
+ SVGA3dQueryId queryId;
+ SVGAMobId mobid;
+ uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXMoveQuery; /* SVGA_3D_CMD_DX_MOVE_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindAllQuery {
+ uint32 cid;
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindAllQuery; /* SVGA_3D_CMD_DX_BIND_ALL_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackAllQuery {
+ uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackAllQuery; /* SVGA_3D_CMD_DX_READBACK_ALL_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetPredication {
+ SVGA3dQueryId queryId;
+ uint32 predicateValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetPredication; /* SVGA_3D_CMD_DX_SET_PREDICATION */
+
+typedef
+#include "vmware_pack_begin.h"
+struct MKS3dDXSOState {
+ uint32 offset; /* Starting offset */
+ uint32 intOffset; /* Internal offset */
+ uint32 vertexCount; /* vertices written */
+ uint32 sizeInBytes; /* max bytes to write */
+}
+#include "vmware_pack_end.h"
+SVGA3dDXSOState;
+
+/* Set the offset field to this value to append SO values to the buffer */
+#define SVGA3D_DX_SO_OFFSET_APPEND ((uint32) ~0u)
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dSoTarget {
+ SVGA3dSurfaceId sid;
+ uint32 offset;
+ uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dSoTarget;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetSOTargets {
+ uint32 pad0;
+ /* Followed by a variable number of SVGA3dSOTarget's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetSOTargets; /* SVGA_3D_CMD_DX_SET_SOTARGETS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dViewport
+{
+ float x;
+ float y;
+ float width;
+ float height;
+ float minDepth;
+ float maxDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dViewport;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetViewports {
+ uint32 pad0;
+ /* Followed by a variable number of SVGA3dViewport's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetViewports; /* SVGA_3D_CMD_DX_SET_VIEWPORTS */
+
+#define SVGA3D_DX_MAX_VIEWPORTS 16
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetScissorRects {
+ uint32 pad0;
+ /* Followed by a variable number of SVGASignedRect's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetScissorRects; /* SVGA_3D_CMD_DX_SET_SCISSORRECTS */
+
+#define SVGA3D_DX_MAX_SCISSORRECTS 16
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXClearRenderTargetView {
+ SVGA3dRenderTargetViewId renderTargetViewId;
+ SVGA3dRGBAFloat rgba;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXClearRenderTargetView; /* SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXClearDepthStencilView {
+ uint16 flags;
+ uint16 stencil;
+ SVGA3dDepthStencilViewId depthStencilViewId;
+ float depth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXClearDepthStencilView; /* SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredCopyRegion {
+ SVGA3dSurfaceId dstSid;
+ uint32 dstSubResource;
+ SVGA3dSurfaceId srcSid;
+ uint32 srcSubResource;
+ SVGA3dCopyBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredCopyRegion;
+/* SVGA_3D_CMD_DX_PRED_COPY_REGION */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredCopy {
+ SVGA3dSurfaceId dstSid;
+ SVGA3dSurfaceId srcSid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredCopy; /* SVGA_3D_CMD_DX_PRED_COPY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBufferCopy {
+ SVGA3dSurfaceId dest;
+ SVGA3dSurfaceId src;
+ uint32 destX;
+ uint32 srcX;
+ uint32 width;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBufferCopy;
+/* SVGA_3D_CMD_DX_BUFFER_COPY */
+
+typedef uint32 SVGA3dDXStretchBltMode;
+#define SVGADX_STRETCHBLT_LINEAR (1 << 0)
+#define SVGADX_STRETCHBLT_FORCE_SRC_SRGB (1 << 1)
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXStretchBlt {
+ SVGA3dSurfaceId srcSid;
+ uint32 srcSubResource;
+ SVGA3dSurfaceId dstSid;
+ uint32 destSubResource;
+ SVGA3dBox boxSrc;
+ SVGA3dBox boxDest;
+ SVGA3dDXStretchBltMode mode;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXStretchBlt; /* SVGA_3D_CMD_DX_STRETCHBLT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXGenMips {
+ SVGA3dShaderResourceViewId shaderResourceViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXGenMips; /* SVGA_3D_CMD_DX_GENMIPS */
+
+/*
+ * Defines a resource/DX surface. Resources share the surfaceId namespace.
+ *
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface_v2 {
+ uint32 sid;
+ SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ uint32 numMipLevels;
+ uint32 multisampleCount;
+ SVGA3dTextureFilter autogenFilter;
+ SVGA3dSize size;
+ uint32 arraySize;
+ uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface_v2; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */
+
+/*
+ * Update a sub-resource in a guest-backed resource.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXUpdateSubResource {
+ SVGA3dSurfaceId sid;
+ uint32 subResource;
+ SVGA3dBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXUpdateSubResource; /* SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE */
+
+/*
+ * Readback a subresource in a guest-backed resource.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackSubResource {
+ SVGA3dSurfaceId sid;
+ uint32 subResource;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackSubResource; /* SVGA_3D_CMD_DX_READBACK_SUBRESOURCE */
+
+/*
+ * Invalidate an image in a guest-backed surface.
+ * (Notify the device that the contents can be lost.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXInvalidateSubResource {
+ SVGA3dSurfaceId sid;
+ uint32 subResource;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXInvalidateSubResource; /* SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE */
+
+
+/*
+ * Raw byte wise transfer from a buffer surface into another surface
+ * of the requested box.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXTransferFromBuffer {
+ SVGA3dSurfaceId srcSid;
+ uint32 srcOffset;
+ uint32 srcPitch;
+ uint32 srcSlicePitch;
+ SVGA3dSurfaceId destSid;
+ uint32 destSubResource;
+ SVGA3dBox destBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXTransferFromBuffer; /* SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER */
+
+
+/*
+ * Raw byte wise transfer from a buffer surface into another surface
+ * of the requested box. Supported if SVGA3D_DEVCAP_DXCONTEXT is set.
+ * The context is implied from the command buffer header.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredTransferFromBuffer {
+ SVGA3dSurfaceId srcSid;
+ uint32 srcOffset;
+ uint32 srcPitch;
+ uint32 srcSlicePitch;
+ SVGA3dSurfaceId destSid;
+ uint32 destSubResource;
+ SVGA3dBox destBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredTransferFromBuffer;
+/* SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSurfaceCopyAndReadback {
+ SVGA3dSurfaceId srcSid;
+ SVGA3dSurfaceId destSid;
+ SVGA3dCopyBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSurfaceCopyAndReadback;
+/* SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ union {
+ struct {
+ uint32 firstElement;
+ uint32 numElements;
+ uint32 pad0;
+ uint32 pad1;
+ } buffer;
+ struct {
+ uint32 mostDetailedMip;
+ uint32 firstArraySlice;
+ uint32 mipLevels;
+ uint32 arraySize;
+ } tex;
+ struct {
+ uint32 firstElement;
+ uint32 numElements;
+ uint32 flags;
+ uint32 pad0;
+ } bufferex;
+ };
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderResourceViewDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceId sid;
+ SVGA3dSurfaceFormat format;
+ SVGA3dResourceType resourceDimension;
+ SVGA3dShaderResourceViewDesc desc;
+ uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXSRViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineShaderResourceView {
+ SVGA3dShaderResourceViewId shaderResourceViewId;
+
+ SVGA3dSurfaceId sid;
+ SVGA3dSurfaceFormat format;
+ SVGA3dResourceType resourceDimension;
+
+ SVGA3dShaderResourceViewDesc desc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineShaderResourceView;
+/* SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyShaderResourceView {
+ SVGA3dShaderResourceViewId shaderResourceViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyShaderResourceView;
+/* SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dRenderTargetViewDesc {
+ union {
+ struct {
+ uint32 firstElement;
+ uint32 numElements;
+ } buffer;
+ struct {
+ uint32 mipSlice;
+ uint32 firstArraySlice;
+ uint32 arraySize;
+ } tex; /* 1d, 2d, cube */
+ struct {
+ uint32 mipSlice;
+ uint32 firstW;
+ uint32 wSize;
+ } tex3D;
+ };
+}
+#include "vmware_pack_end.h"
+SVGA3dRenderTargetViewDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceId sid;
+ SVGA3dSurfaceFormat format;
+ SVGA3dResourceType resourceDimension;
+ SVGA3dRenderTargetViewDesc desc;
+ uint32 pad[2];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXRTViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineRenderTargetView {
+ SVGA3dRenderTargetViewId renderTargetViewId;
+
+ SVGA3dSurfaceId sid;
+ SVGA3dSurfaceFormat format;
+ SVGA3dResourceType resourceDimension;
+
+ SVGA3dRenderTargetViewDesc desc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineRenderTargetView;
+/* SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyRenderTargetView {
+ SVGA3dRenderTargetViewId renderTargetViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyRenderTargetView;
+/* SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW */
+
+/*
+ */
+#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_DEPTH 0x01
+#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_STENCIL 0x02
+#define SVGA3D_DXDSVIEW_CREATE_FLAG_MASK 0x03
+typedef uint8 SVGA3DCreateDSViewFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceId sid;
+ SVGA3dSurfaceFormat format;
+ SVGA3dResourceType resourceDimension;
+ uint32 mipSlice;
+ uint32 firstArraySlice;
+ uint32 arraySize;
+ SVGA3DCreateDSViewFlags flags;
+ uint8 pad0;
+ uint16 pad1;
+ uint32 pad2;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXDSViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineDepthStencilView {
+ SVGA3dDepthStencilViewId depthStencilViewId;
+
+ SVGA3dSurfaceId sid;
+ SVGA3dSurfaceFormat format;
+ SVGA3dResourceType resourceDimension;
+ uint32 mipSlice;
+ uint32 firstArraySlice;
+ uint32 arraySize;
+ SVGA3DCreateDSViewFlags flags;
+ uint8 pad0;
+ uint16 pad1;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineDepthStencilView;
+/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyDepthStencilView {
+ SVGA3dDepthStencilViewId depthStencilViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyDepthStencilView;
+/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dInputElementDesc {
+ uint32 inputSlot;
+ uint32 alignedByteOffset;
+ SVGA3dSurfaceFormat format;
+ SVGA3dInputClassification inputSlotClass;
+ uint32 instanceDataStepRate;
+ uint32 inputRegister;
+}
+#include "vmware_pack_end.h"
+SVGA3dInputElementDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ /*
+ * XXX: How many of these can there be?
+ */
+ uint32 elid;
+ uint32 numDescs;
+ SVGA3dInputElementDesc desc[32];
+ uint32 pad[62];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXElementLayoutEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineElementLayout {
+ SVGA3dElementLayoutId elementLayoutId;
+ /* Followed by a variable number of SVGA3dInputElementDesc's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineElementLayout;
+/* SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyElementLayout {
+ SVGA3dElementLayoutId elementLayoutId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyElementLayout;
+/* SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT */
+
+
+#define SVGA3D_DX_MAX_RENDER_TARGETS 8
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dDXBlendStatePerRT {
+ uint8 blendEnable;
+ uint8 srcBlend;
+ uint8 destBlend;
+ uint8 blendOp;
+ uint8 srcBlendAlpha;
+ uint8 destBlendAlpha;
+ uint8 blendOpAlpha;
+ uint8 renderTargetWriteMask;
+ uint8 logicOpEnable;
+ uint8 logicOp;
+ uint16 pad0;
+}
+#include "vmware_pack_end.h"
+SVGA3dDXBlendStatePerRT;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint8 alphaToCoverageEnable;
+ uint8 independentBlendEnable;
+ uint16 pad0;
+ SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS];
+ uint32 pad1[7];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXBlendStateEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineBlendState {
+ SVGA3dBlendStateId blendId;
+ uint8 alphaToCoverageEnable;
+ uint8 independentBlendEnable;
+ uint16 pad0;
+ SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineBlendState; /* SVGA_3D_CMD_DX_DEFINE_BLEND_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyBlendState {
+ SVGA3dBlendStateId blendId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyBlendState; /* SVGA_3D_CMD_DX_DESTROY_BLEND_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint8 depthEnable;
+ SVGA3dDepthWriteMask depthWriteMask;
+ SVGA3dComparisonFunc depthFunc;
+ uint8 stencilEnable;
+ uint8 frontEnable;
+ uint8 backEnable;
+ uint8 stencilReadMask;
+ uint8 stencilWriteMask;
+
+ uint8 frontStencilFailOp;
+ uint8 frontStencilDepthFailOp;
+ uint8 frontStencilPassOp;
+ SVGA3dComparisonFunc frontStencilFunc;
+
+ uint8 backStencilFailOp;
+ uint8 backStencilDepthFailOp;
+ uint8 backStencilPassOp;
+ SVGA3dComparisonFunc backStencilFunc;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXDepthStencilEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineDepthStencilState {
+ SVGA3dDepthStencilStateId depthStencilId;
+
+ uint8 depthEnable;
+ SVGA3dDepthWriteMask depthWriteMask;
+ SVGA3dComparisonFunc depthFunc;
+ uint8 stencilEnable;
+ uint8 frontEnable;
+ uint8 backEnable;
+ uint8 stencilReadMask;
+ uint8 stencilWriteMask;
+
+ uint8 frontStencilFailOp;
+ uint8 frontStencilDepthFailOp;
+ uint8 frontStencilPassOp;
+ SVGA3dComparisonFunc frontStencilFunc;
+
+ uint8 backStencilFailOp;
+ uint8 backStencilDepthFailOp;
+ uint8 backStencilPassOp;
+ SVGA3dComparisonFunc backStencilFunc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineDepthStencilState;
+/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyDepthStencilState {
+ SVGA3dDepthStencilStateId depthStencilId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyDepthStencilState;
+/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint8 fillMode;
+ SVGA3dCullMode cullMode;
+ uint8 frontCounterClockwise;
+ uint8 provokingVertexLast;
+ int32 depthBias;
+ float depthBiasClamp;
+ float slopeScaledDepthBias;
+ uint8 depthClipEnable;
+ uint8 scissorEnable;
+ uint8 multisampleEnable;
+ uint8 antialiasedLineEnable;
+ float lineWidth;
+ uint8 lineStippleEnable;
+ uint8 lineStippleFactor;
+ uint16 lineStipplePattern;
+ uint32 forcedSampleCount;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXRasterizerStateEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineRasterizerState {
+ SVGA3dRasterizerStateId rasterizerId;
+
+ uint8 fillMode;
+ SVGA3dCullMode cullMode;
+ uint8 frontCounterClockwise;
+ uint8 provokingVertexLast;
+ int32 depthBias;
+ float depthBiasClamp;
+ float slopeScaledDepthBias;
+ uint8 depthClipEnable;
+ uint8 scissorEnable;
+ uint8 multisampleEnable;
+ uint8 antialiasedLineEnable;
+ float lineWidth;
+ uint8 lineStippleEnable;
+ uint8 lineStippleFactor;
+ uint16 lineStipplePattern;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineRasterizerState;
+/* SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyRasterizerState {
+ SVGA3dRasterizerStateId rasterizerId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyRasterizerState;
+/* SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dFilter filter;
+ uint8 addressU;
+ uint8 addressV;
+ uint8 addressW;
+ uint8 pad0;
+ float mipLODBias;
+ uint8 maxAnisotropy;
+ SVGA3dComparisonFunc comparisonFunc;
+ uint16 pad1;
+ SVGA3dRGBAFloat borderColor;
+ float minLOD;
+ float maxLOD;
+ uint32 pad2[6];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXSamplerEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineSamplerState {
+ SVGA3dSamplerId samplerId;
+ SVGA3dFilter filter;
+ uint8 addressU;
+ uint8 addressV;
+ uint8 addressW;
+ uint8 pad0;
+ float mipLODBias;
+ uint8 maxAnisotropy;
+ SVGA3dComparisonFunc comparisonFunc;
+ uint16 pad1;
+ SVGA3dRGBAFloat borderColor;
+ float minLOD;
+ float maxLOD;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineSamplerState; /* SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroySamplerState {
+ SVGA3dSamplerId samplerId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroySamplerState; /* SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE */
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dSignatureEntry {
+ uint8 systemValue;
+ uint8 reg; /* register is a reserved word */
+ uint16 mask;
+ uint8 registerComponentType;
+ uint8 minPrecision;
+ uint16 pad0;
+}
+#include "vmware_pack_end.h"
+SVGA3dSignatureEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineShader {
+ SVGA3dShaderId shaderId;
+ SVGA3dShaderType type;
+ uint32 sizeInBytes; /* Number of bytes of shader text. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineShader; /* SVGA_3D_CMD_DX_DEFINE_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGACOTableDXShaderEntry {
+ SVGA3dShaderType type;
+ uint32 sizeInBytes;
+ uint32 offsetInBytes;
+ SVGAMobId mobid;
+ uint32 numInputSignatureEntries;
+ uint32 numOutputSignatureEntries;
+
+ uint32 numPatchConstantSignatureEntries;
+
+ uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXShaderEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyShader {
+ SVGA3dShaderId shaderId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyShader; /* SVGA_3D_CMD_DX_DESTROY_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindShader {
+ uint32 cid;
+ uint32 shid;
+ SVGAMobId mobid;
+ uint32 offsetInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindShader; /* SVGA_3D_CMD_DX_BIND_SHADER */
+
+/*
+ * The maximum number of streamout decl's in each streamout entry.
+ */
+#define SVGA3D_MAX_STREAMOUT_DECLS 64
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dStreamOutputDeclarationEntry {
+ uint32 outputSlot;
+ uint32 registerIndex;
+ uint8 registerMask;
+ uint8 pad0;
+ uint16 pad1;
+ uint32 stream;
+}
+#include "vmware_pack_end.h"
+SVGA3dStreamOutputDeclarationEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAOTableStreamOutputEntry {
+ uint32 numOutputStreamEntries;
+ SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
+ uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
+ uint32 rasterizedStream;
+ uint32 pad[250];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXStreamOutputEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineStreamOutput {
+ SVGA3dStreamOutputId soid;
+ uint32 numOutputStreamEntries;
+ SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
+ uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
+ uint32 rasterizedStream;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineStreamOutput; /* SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyStreamOutput {
+ SVGA3dStreamOutputId soid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyStreamOutput; /* SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetStreamOutput {
+ SVGA3dStreamOutputId soid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetStreamOutput; /* SVGA_3D_CMD_DX_SET_STREAMOUTPUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint64 value;
+ uint32 mobId;
+ uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXMobFence64; /* SVGA_3D_CMD_DX_MOB_FENCE_64 */
+
+/*
+ * SVGA3dCmdSetCOTable --
+ *
+ * This command allows the guest to bind a mob to a context-object table.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetCOTable {
+ uint32 cid;
+ uint32 mobid;
+ SVGACOTableType type;
+ uint32 validSizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetCOTable; /* SVGA_3D_CMD_DX_SET_COTABLE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackCOTable {
+ uint32 cid;
+ SVGACOTableType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackCOTable; /* SVGA_3D_CMD_DX_READBACK_COTABLE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCOTableData {
+ uint32 mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCOTableData;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dBufferBinding {
+ uint32 bufferId;
+ uint32 stride;
+ uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dBufferBinding;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dConstantBufferBinding {
+ uint32 sid;
+ uint32 offsetInBytes;
+ uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dConstantBufferBinding;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGADXInputAssemblyMobFormat {
+ uint32 layoutId;
+ SVGA3dBufferBinding vertexBuffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
+ uint32 indexBufferSid;
+ uint32 pad;
+ uint32 indexBufferOffset;
+ uint32 indexBufferFormat;
+ uint32 topology;
+}
+#include "vmware_pack_end.h"
+SVGADXInputAssemblyMobFormat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGADXContextMobFormat {
+ SVGADXInputAssemblyMobFormat inputAssembly;
+
+ struct {
+ uint32 blendStateId;
+ uint32 blendFactor[4];
+ uint32 sampleMask;
+ uint32 depthStencilStateId;
+ uint32 stencilRef;
+ uint32 rasterizerStateId;
+ uint32 depthStencilViewId;
+ uint32 renderTargetViewIds[SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS];
+ uint32 unorderedAccessViewIds[SVGA3D_MAX_UAVIEWS];
+ } renderState;
+
+ struct {
+ uint32 targets[SVGA3D_DX_MAX_SOTARGETS];
+ uint32 soid;
+ } streamOut;
+ uint32 pad0[11];
+
+ uint8 numViewports;
+ uint8 numScissorRects;
+ uint16 pad1[1];
+
+ uint32 pad2[3];
+
+ SVGA3dViewport viewports[SVGA3D_DX_MAX_VIEWPORTS];
+ uint32 pad3[32];
+
+ SVGASignedRect scissorRects[SVGA3D_DX_MAX_SCISSORRECTS];
+ uint32 pad4[64];
+
+ struct {
+ uint32 queryID;
+ uint32 value;
+ } predication;
+ uint32 pad5[2];
+
+ struct {
+ uint32 shaderId;
+ SVGA3dConstantBufferBinding constantBuffers[SVGA3D_DX_MAX_CONSTBUFFERS];
+ uint32 shaderResources[SVGA3D_DX_MAX_SRVIEWS];
+ uint32 samplers[SVGA3D_DX_MAX_SAMPLERS];
+ } shaderState[SVGA3D_NUM_SHADERTYPE];
+ uint32 pad6[26];
+
+ SVGA3dQueryId queryID[SVGA3D_MAX_QUERY];
+
+ SVGA3dCOTableData cotables[SVGA_COTABLE_MAX];
+ uint32 pad7[381];
+}
+#include "vmware_pack_end.h"
+SVGADXContextMobFormat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXTempSetContext {
+ uint32 dxcid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXTempSetContext; /* SVGA_3D_CMD_DX_TEMP_SET_CONTEXT */
+
+#endif /* _SVGA3D_DX_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
new file mode 100644
index 000000000000..a1c36877ad55
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
@@ -0,0 +1,99 @@
+/**********************************************************
+ * Copyright 2007-2015 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_limits.h --
+ *
+ * SVGA 3d hardware limits
+ */
+
+#ifndef _SVGA3D_LIMITS_H_
+#define _SVGA3D_LIMITS_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+#define SVGA3D_NUM_CLIPPLANES 6
+#define SVGA3D_MAX_RENDER_TARGETS 8
+#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS (SVGA3D_MAX_RENDER_TARGETS)
+#define SVGA3D_MAX_UAVIEWS 8
+#define SVGA3D_MAX_CONTEXT_IDS 256
+#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
+
+/*
+ * Maximum ID a shader can be assigned on a given context.
+ */
+#define SVGA3D_MAX_SHADERIDS 5000
+/*
+ * Maximum number of shaders of a given type that can be defined
+ * (including all contexts).
+ */
+#define SVGA3D_MAX_SIMULTANEOUS_SHADERS 20000
+
+#define SVGA3D_NUM_TEXTURE_UNITS 32
+#define SVGA3D_NUM_LIGHTS 8
+
+/*
+ * Maximum size in dwords of shader text the SVGA device will allow.
+ * Currently 8 MB.
+ */
+#define SVGA3D_MAX_SHADER_MEMORY (8 * 1024 * 1024 / sizeof(uint32))
+
+#define SVGA3D_MAX_CLIP_PLANES 6
+
+/*
+ * This is the limit to the number of fixed-function texture
+ * transforms and texture coordinates we can support. It does *not*
+ * correspond to the number of texture image units (samplers) we
+ * support!
+ */
+#define SVGA3D_MAX_TEXTURE_COORDS 8
+
+/*
+ * Number of faces in a cubemap.
+ */
+#define SVGA3D_MAX_SURFACE_FACES 6
+
+/*
+ * Maximum number of array indexes in a GB surface (with DX enabled).
+ */
+#define SVGA3D_MAX_SURFACE_ARRAYSIZE 512
+
+/*
+ * The maximum number of vertex arrays we're guaranteed to support in
+ * SVGA_3D_CMD_DRAWPRIMITIVES.
+ */
+#define SVGA3D_MAX_VERTEX_ARRAYS 32
+
+/*
+ * The maximum number of primitive ranges we're guaranteed to support
+ * in SVGA_3D_CMD_DRAWPRIMITIVES.
+ */
+#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
+
+#endif /* _SVGA3D_LIMITS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
new file mode 100644
index 000000000000..b44ce648f592
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
@@ -0,0 +1,50 @@
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_reg.h --
+ *
+ * SVGA 3d hardware definitions
+ */
+
+#ifndef _SVGA3D_REG_H_
+#define _SVGA3D_REG_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+#include "svga_reg.h"
+
+#include "svga3d_types.h"
+#include "svga3d_limits.h"
+#include "svga3d_cmd.h"
+#include "svga3d_dx.h"
+#include "svga3d_devcaps.h"
+
+
+#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
new file mode 100644
index 000000000000..58704f0a4607
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -0,0 +1,1204 @@
+/**************************************************************************
+ *
+ * Copyright © 2008-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifdef __KERNEL__
+
+#include <drm/vmwgfx_drm.h>
+#define surf_size_struct struct drm_vmw_size
+
+#else /* __KERNEL__ */
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
+#endif /* ARRAY_SIZE */
+
+#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
+#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
+#define surf_size_struct SVGA3dSize
+#define u32 uint32
+
+#endif /* __KERNEL__ */
+
+#include "svga3d_reg.h"
+
+/*
+ * enum svga3d_block_desc describes the active data channels in a block.
+ *
+ * There can be at-most four active channels in a block:
+ * 1. Red, bump W, luminance and depth are stored in the first channel.
+ * 2. Green, bump V and stencil are stored in the second channel.
+ * 3. Blue and bump U are stored in the third channel.
+ * 4. Alpha and bump Q are stored in the fourth channel.
+ *
+ * Block channels can be used to store compressed and buffer data:
+ * 1. For compressed formats, only the data channel is used and its size
+ * is equal to that of a singular block in the compression scheme.
+ * 2. For buffer formats, only the data channel is used and its size is
+ * exactly one byte in length.
+ * 3. In each case the bit depth represent the size of a singular block.
+ *
+ * Note: Compressed and IEEE formats do not use the bitMask structure.
+ */
+
+enum svga3d_block_desc {
+ SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
+ SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
+ data */
+ SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
+ data */
+ SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
+ U and V */
+ SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
+ data */
+ SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
+ data */
+ SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
+ channel */
+ SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
+ data */
+ SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
+ data */
+ SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
+ data */
+ SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
+ data */
+ SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
+ SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
+ channel */
+ SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
+ data */
+ SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
+ data */
+ SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
+ data depending on the
+ compression method used */
+ SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
+ floating point
+ representation in
+ all channels */
+ SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
+ data. */
+ SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
+ SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
+ SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
+ SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
+ SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
+ e.g., NV12. */
+ SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
+ Y, U, V, e.g., YV12. */
+
+ SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN,
+ SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
+ SVGA3DBLOCKDESC_BLUE,
+ SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
+ SVGA3DBLOCKDESC_V,
+ SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
+ SVGA3DBLOCKDESC_LUMINANCE,
+ SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
+ SVGA3DBLOCKDESC_W,
+ SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
+ SVGA3DBLOCKDESC_V |
+ SVGA3DBLOCKDESC_W |
+ SVGA3DBLOCKDESC_Q,
+ SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_IEEE_FP,
+ SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
+ SVGA3DBLOCKDESC_GREEN,
+ SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
+ SVGA3DBLOCKDESC_BLUE,
+ SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
+ SVGA3DBLOCKDESC_STENCIL,
+ SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
+ SVGA3DBLOCKDESC_Y,
+ SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_Y |
+ SVGA3DBLOCKDESC_U_VIDEO |
+ SVGA3DBLOCKDESC_V_VIDEO,
+ SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
+ SVGA3DBLOCKDESC_EXP,
+ SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
+ SVGA3DBLOCKDESC_2PLANAR_YUV,
+ SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
+ SVGA3DBLOCKDESC_3PLANAR_YUV,
+};
+
+/*
+ * SVGA3dSurfaceDesc describes the actual pixel data.
+ *
+ * This structure provides the following information:
+ * 1. Block description.
+ * 2. Dimensions of a block in the surface.
+ * 3. Size of block in bytes.
+ * 4. Bit depth of the pixel data.
+ * 5. Channel bit depths and masks (if applicable).
+ */
+struct svga3d_channel_def {
+ union {
+ u8 blue;
+ u8 u;
+ u8 uv_video;
+ u8 u_video;
+ };
+ union {
+ u8 green;
+ u8 v;
+ u8 stencil;
+ u8 v_video;
+ };
+ union {
+ u8 red;
+ u8 w;
+ u8 luminance;
+ u8 y;
+ u8 depth;
+ u8 data;
+ };
+ union {
+ u8 alpha;
+ u8 q;
+ u8 exp;
+ };
+};
+
+struct svga3d_surface_desc {
+ SVGA3dSurfaceFormat format;
+ enum svga3d_block_desc block_desc;
+ surf_size_struct block_size;
+ u32 bytes_per_block;
+ u32 pitch_bytes_per_block;
+
+ u32 total_bit_depth;
+ struct svga3d_channel_def bit_depth;
+ struct svga3d_channel_def bit_offset;
+};
+
+static const struct svga3d_surface_desc svga3d_surface_descs[] = {
+ {SVGA3D_FORMAT_INVALID, SVGA3DBLOCKDESC_NONE,
+ {1, 1, 1}, 0, 0,
+ 0, {{0}, {0}, {0}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 4, 4,
+ 24, {{8}, {8}, {8}, {0}},
+ {{0}, {8}, {16}, {24}}},
+
+ {SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{0}, {8}, {16}, {24}}},
+
+ {SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 2, 2,
+ 16, {{5}, {6}, {5}, {0}},
+ {{0}, {5}, {11}, {0}}},
+
+ {SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 2, 2,
+ 15, {{5}, {5}, {5}, {0}},
+ {{0}, {5}, {10}, {0}}},
+
+ {SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 2, 2,
+ 16, {{5}, {5}, {5}, {1}},
+ {{0}, {5}, {10}, {15}}},
+
+ {SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 2, 2,
+ 16, {{4}, {4}, {4}, {4}},
+ {{0}, {4}, {8}, {12}}},
+
+ {SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {8}, {24}, {0}},
+ {{0}, {24}, {0}, {0}}},
+
+ {SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {1}, {15}, {0}},
+ {{0}, {15}, {0}, {0}}},
+
+ {SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_LUMINANCE,
+ {1, 1, 1}, 1, 1,
+ 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA,
+ {1 , 1, 1}, 1, 1,
+ 8, {{0}, {0}, {4}, {4}},
+ {{0}, {0}, {0}, {4}}},
+
+ {SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_LUMINANCE,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {8}, {8}},
+ {{0}, {0}, {0}, {8}}},
+
+ {SVGA3D_DXT1, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8,
+ 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_DXT2, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_DXT3, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_DXT4, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_DXT5, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {8}, {8}},
+ {{0}, {0}, {0}, {8}}},
+
+ {SVGA3D_BUMPL6V5U5, SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 2, 2,
+ 16, {{5}, {5}, {6}, {0}},
+ {{11}, {6}, {0}, {0}}},
+
+ {SVGA3D_BUMPX8L8V8U8, SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {0}},
+ {{16}, {8}, {0}, {0}}},
+
+ {SVGA3D_BUMPL8V8U8, SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 3, 3,
+ 24, {{8}, {8}, {8}, {0}},
+ {{16}, {8}, {0}, {0}}},
+
+ {SVGA3D_ARGB_S10E5, SVGA3DBLOCKDESC_RGBA_FP,
+ {1, 1, 1}, 8, 8,
+ 64, {{16}, {16}, {16}, {16}},
+ {{32}, {16}, {0}, {48}}},
+
+ {SVGA3D_ARGB_S23E8, SVGA3DBLOCKDESC_RGBA_FP,
+ {1, 1, 1}, 16, 16,
+ 128, {{32}, {32}, {32}, {32}},
+ {{64}, {32}, {0}, {96}}},
+
+ {SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{10}, {10}, {10}, {2}},
+ {{0}, {10}, {20}, {30}}},
+
+ {SVGA3D_V8U8, SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2,
+ 16, {{8}, {8}, {0}, {0}},
+ {{8}, {0}, {0}, {0}}},
+
+ {SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{24}, {16}, {8}, {0}}},
+
+ {SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2,
+ 16, {{8}, {8}, {0}, {0}},
+ {{8}, {0}, {0}, {0}}},
+
+ {SVGA3D_X8L8V8U8, SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 4, 4,
+ 24, {{8}, {8}, {8}, {0}},
+ {{16}, {8}, {0}, {0}}},
+
+ {SVGA3D_A2W10V10U10, SVGA3DBLOCKDESC_UVWA,
+ {1, 1, 1}, 4, 4,
+ 32, {{10}, {10}, {10}, {2}},
+ {{0}, {10}, {20}, {30}}},
+
+ {SVGA3D_ALPHA8, SVGA3DBLOCKDESC_ALPHA,
+ {1, 1, 1}, 1, 1,
+ 8, {{0}, {0}, {0}, {8}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R_S10E5, SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R_S23E8, SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_RG_S10E5, SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
+
+ {SVGA3D_RG_S23E8, SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 8, 8,
+ 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {0}, {0}}},
+
+ {SVGA3D_BUFFER, SVGA3DBLOCKDESC_BUFFER,
+ {1, 1, 1}, 1, 1,
+ 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {0}, {24}, {0}},
+ {{0}, {24}, {0}, {0}}},
+
+ {SVGA3D_V16U16, SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 4, 4,
+ 32, {{16}, {16}, {0}, {0}},
+ {{16}, {0}, {0}, {0}}},
+
+ {SVGA3D_G16R16, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {16}, {16}, {0}},
+ {{0}, {0}, {16}, {0}}},
+
+ {SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 8, 8,
+ 64, {{16}, {16}, {16}, {16}},
+ {{32}, {16}, {0}, {48}}},
+
+ {SVGA3D_UYVY, SVGA3DBLOCKDESC_YUV,
+ {1, 1, 1}, 2, 2,
+ 16, {{8}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}}},
+
+ {SVGA3D_YUY2, SVGA3DBLOCKDESC_YUV,
+ {1, 1, 1}, 2, 2,
+ 16, {{8}, {0}, {8}, {0}},
+ {{8}, {0}, {0}, {0}}},
+
+ {SVGA3D_NV12, SVGA3DBLOCKDESC_NV12,
+ {2, 2, 1}, 6, 2,
+ 48, {{0}, {0}, {48}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{0}, {8}, {16}, {24}}},
+
+ {SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 16, 16,
+ 128, {{32}, {32}, {32}, {32}},
+ {{64}, {32}, {0}, {96}}},
+
+ {SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 16, 16,
+ 128, {{32}, {32}, {32}, {32}},
+ {{64}, {32}, {0}, {96}}},
+
+ {SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 16, 16,
+ 128, {{32}, {32}, {32}, {32}},
+ {{64}, {32}, {0}, {96}}},
+
+ {SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 12, 12,
+ 96, {{32}, {32}, {32}, {0}},
+ {{64}, {32}, {0}, {0}}},
+
+ {SVGA3D_R32G32B32_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
+ {1, 1, 1}, 12, 12,
+ 96, {{32}, {32}, {32}, {0}},
+ {{64}, {32}, {0}, {0}}},
+
+ {SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 12, 12,
+ 96, {{32}, {32}, {32}, {0}},
+ {{64}, {32}, {0}, {0}}},
+
+ {SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_UVW,
+ {1, 1, 1}, 12, 12,
+ 96, {{32}, {32}, {32}, {0}},
+ {{64}, {32}, {0}, {0}}},
+
+ {SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 8, 8,
+ 64, {{16}, {16}, {16}, {16}},
+ {{32}, {16}, {0}, {48}}},
+
+ {SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 8, 8,
+ 64, {{16}, {16}, {16}, {16}},
+ {{32}, {16}, {0}, {48}}},
+
+ {SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 8, 8,
+ 64, {{16}, {16}, {16}, {16}},
+ {{32}, {16}, {0}, {48}}},
+
+ {SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 8, 8,
+ 64, {{16}, {16}, {16}, {16}},
+ {{32}, {16}, {0}, {48}}},
+
+ {SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 8, 8,
+ 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {0}, {0}}},
+
+ {SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 8, 8,
+ 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {0}, {0}}},
+
+ {SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 8, 8,
+ 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {0}, {0}}},
+
+ {SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 8, 8,
+ 64, {{0}, {8}, {32}, {0}},
+ {{0}, {32}, {0}, {0}}},
+
+ {SVGA3D_D32_FLOAT_S8X24_UINT, SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 8, 8,
+ 64, {{0}, {8}, {32}, {0}},
+ {{0}, {32}, {0}, {0}}},
+
+ {SVGA3D_R32_FLOAT_X8X24_TYPELESS, SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 8, 8,
+ 64, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_X32_TYPELESS_G8X24_UINT, SVGA3DBLOCKDESC_GREEN,
+ {1, 1, 1}, 8, 8,
+ 64, {{0}, {8}, {0}, {0}},
+ {{0}, {32}, {0}, {0}}},
+
+ {SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{10}, {10}, {10}, {2}},
+ {{0}, {10}, {20}, {30}}},
+
+ {SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{10}, {10}, {10}, {2}},
+ {{0}, {10}, {20}, {30}}},
+
+ {SVGA3D_R11G11B10_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
+ {1, 1, 1}, 4, 4,
+ 32, {{10}, {11}, {11}, {0}},
+ {{0}, {10}, {21}, {0}}},
+
+ {SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{16}, {8}, {0}, {24}}},
+
+ {SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{16}, {8}, {0}, {24}}},
+
+ {SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{16}, {8}, {0}, {24}}},
+
+ {SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{16}, {8}, {0}, {24}}},
+
+ {SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{16}, {8}, {0}, {24}}},
+
+ {SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
+
+ {SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
+
+ {SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
+
+ {SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R32_UINT, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R32_SINT, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {8}, {24}, {0}},
+ {{0}, {24}, {0}, {0}}},
+
+ {SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {8}, {24}, {0}},
+ {{0}, {24}, {0}, {0}}},
+
+ {SVGA3D_R24_UNORM_X8_TYPELESS, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {0}, {24}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_X24_TYPELESS_G8_UINT, SVGA3DBLOCKDESC_GREEN,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {8}, {0}, {0}},
+ {{0}, {24}, {0}, {0}}},
+
+ {SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
+
+ {SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
+
+ {SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
+
+ {SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
+
+ {SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R16_UINT, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R16_SINT, SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 1, 1,
+ 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 1, 1,
+ 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R8_UINT, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 1, 1,
+ 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 1, 1,
+ 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R8_SINT, SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 1, 1,
+ 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_P8, SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 1, 1,
+ 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGBE,
+ {1, 1, 1}, 4, 4,
+ 32, {{9}, {9}, {9}, {5}},
+ {{18}, {9}, {0}, {27}}},
+
+ {SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
+
+ {SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
+
+ {SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8,
+ 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {4, 4, 1}, 8, 8,
+ 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8,
+ 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_ATI1, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8,
+ 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8,
+ 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_ATI2, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{10}, {10}, {10}, {2}},
+ {{0}, {10}, {20}, {30}}},
+
+ {SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{0}, {8}, {16}, {24}}},
+
+ {SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{0}, {8}, {16}, {24}}},
+
+ {SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 4, 4,
+ 24, {{8}, {8}, {8}, {0}},
+ {{0}, {8}, {16}, {24}}},
+
+ {SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_SRGB,
+ {1, 1, 1}, 4, 4,
+ 24, {{8}, {8}, {8}, {0}},
+ {{0}, {8}, {16}, {24}}},
+
+ {SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {8}, {24}, {0}},
+ {{0}, {24}, {0}, {0}}},
+
+ {SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {8}, {24}, {0}},
+ {{0}, {24}, {0}, {0}}},
+
+ {SVGA3D_YV12, SVGA3DBLOCKDESC_YV12,
+ {2, 2, 1}, 6, 2,
+ 48, {{0}, {0}, {48}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R32G32B32A32_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
+ {1, 1, 1}, 16, 16,
+ 128, {{32}, {32}, {32}, {32}},
+ {{64}, {32}, {0}, {96}}},
+
+ {SVGA3D_R16G16B16A16_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
+ {1, 1, 1}, 8, 8,
+ 64, {{16}, {16}, {16}, {16}},
+ {{32}, {16}, {0}, {48}}},
+
+ {SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 8, 8,
+ 64, {{16}, {16}, {16}, {16}},
+ {{32}, {16}, {0}, {48}}},
+
+ {SVGA3D_R32G32_FLOAT, SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 8, 8,
+ 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {0}, {0}}},
+
+ {SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{10}, {10}, {10}, {2}},
+ {{0}, {10}, {20}, {30}}},
+
+ {SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{24}, {16}, {8}, {0}}},
+
+ {SVGA3D_R16G16_FLOAT, SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
+
+ {SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {16}, {16}, {0}},
+ {{0}, {0}, {16}, {0}}},
+
+ {SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4,
+ 32, {{16}, {16}, {0}, {0}},
+ {{16}, {0}, {0}, {0}}},
+
+ {SVGA3D_R32_FLOAT, SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 4, 4,
+ 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2,
+ 16, {{8}, {8}, {0}, {0}},
+ {{8}, {0}, {0}, {0}}},
+
+ {SVGA3D_R16_FLOAT, SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 2, 2,
+ 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_ALPHA,
+ {1, 1, 1}, 1, 1,
+ 8, {{0}, {0}, {0}, {8}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8,
+ 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 2, 2,
+ 16, {{5}, {6}, {5}, {0}},
+ {{0}, {5}, {11}, {0}}},
+
+ {SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 2, 2,
+ 16, {{5}, {5}, {5}, {1}},
+ {{0}, {5}, {10}, {15}}},
+
+ {SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4,
+ 32, {{8}, {8}, {8}, {8}},
+ {{0}, {8}, {16}, {24}}},
+
+ {SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 4, 4,
+ 24, {{8}, {8}, {8}, {0}},
+ {{0}, {8}, {16}, {24}}},
+
+ {SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8,
+ 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16,
+ 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+};
+
+static inline u32 clamped_umul32(u32 a, u32 b)
+{
+ uint64_t tmp = (uint64_t) a*b;
+ return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
+}
+
+static inline const struct svga3d_surface_desc *
+svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
+{
+ if (format < ARRAY_SIZE(svga3d_surface_descs))
+ return &svga3d_surface_descs[format];
+
+ return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * svga3dsurface_get_mip_size --
+ *
+ * Given a base level size and the mip level, compute the size of
+ * the mip level.
+ *
+ * Results:
+ * See above.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline surf_size_struct
+svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
+{
+ surf_size_struct size;
+
+ size.width = max_t(u32, base_level.width >> mip_level, 1);
+ size.height = max_t(u32, base_level.height >> mip_level, 1);
+ size.depth = max_t(u32, base_level.depth >> mip_level, 1);
+ return size;
+}
+
+static inline void
+svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
+ const surf_size_struct *pixel_size,
+ surf_size_struct *block_size)
+{
+ block_size->width = DIV_ROUND_UP(pixel_size->width,
+ desc->block_size.width);
+ block_size->height = DIV_ROUND_UP(pixel_size->height,
+ desc->block_size.height);
+ block_size->depth = DIV_ROUND_UP(pixel_size->depth,
+ desc->block_size.depth);
+}
+
+static inline bool
+svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
+{
+ return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
+}
+
+static inline u32
+svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
+ const surf_size_struct *size)
+{
+ u32 pitch;
+ surf_size_struct blocks;
+
+ svga3dsurface_get_size_in_blocks(desc, size, &blocks);
+
+ pitch = blocks.width * desc->pitch_bytes_per_block;
+
+ return pitch;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * svga3dsurface_get_image_buffer_size --
+ *
+ * Return the number of bytes of buffer space required to store
+ * one image of a surface, optionally using the specified pitch.
+ *
+ * If pitch is zero, it is assumed that rows are tightly packed.
+ *
+ * This function is overflow-safe. If the result would have
+ * overflowed, instead we return MAX_UINT32.
+ *
+ * Results:
+ * Byte count.
+ *
+ * Side effects:
+ * None.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+static inline u32
+svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
+ const surf_size_struct *size,
+ u32 pitch)
+{
+ surf_size_struct image_blocks;
+ u32 slice_size, total_size;
+
+ svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
+
+ if (svga3dsurface_is_planar_surface(desc)) {
+ total_size = clamped_umul32(image_blocks.width,
+ image_blocks.height);
+ total_size = clamped_umul32(total_size, image_blocks.depth);
+ total_size = clamped_umul32(total_size, desc->bytes_per_block);
+ return total_size;
+ }
+
+ if (pitch == 0)
+ pitch = svga3dsurface_calculate_pitch(desc, size);
+
+ slice_size = clamped_umul32(image_blocks.height, pitch);
+ total_size = clamped_umul32(slice_size, image_blocks.depth);
+
+ return total_size;
+}
+
+static inline u32
+svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
+ surf_size_struct base_level_size,
+ u32 num_mip_levels,
+ u32 num_layers)
+{
+ const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+ u32 total_size = 0;
+ u32 mip;
+
+ for (mip = 0; mip < num_mip_levels; mip++) {
+ surf_size_struct size =
+ svga3dsurface_get_mip_size(base_level_size, mip);
+ total_size += svga3dsurface_get_image_buffer_size(desc,
+ &size, 0);
+ }
+
+ return total_size * num_layers;
+}
+
+
+/**
+ * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
+ * in an image (or volume).
+ *
+ * @width: The image width in pixels.
+ * @height: The image height in pixels
+ */
+static inline u32
+svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
+ u32 width, u32 height,
+ u32 x, u32 y, u32 z)
+{
+ const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+ const u32 bw = desc->block_size.width, bh = desc->block_size.height;
+ const u32 bd = desc->block_size.depth;
+ const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
+ const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
+ const u32 offset = (z / bd * imgstride +
+ y / bh * rowstride +
+ x / bw * desc->bytes_per_block);
+ return offset;
+}
+
+
+static inline u32
+svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
+ surf_size_struct baseLevelSize,
+ u32 numMipLevels,
+ u32 face,
+ u32 mip)
+
+{
+ u32 offset;
+ u32 mipChainBytes;
+ u32 mipChainBytesToLevel;
+ u32 i;
+ const struct svga3d_surface_desc *desc;
+ surf_size_struct mipSize;
+ u32 bytes;
+
+ desc = svga3dsurface_get_desc(format);
+
+ mipChainBytes = 0;
+ mipChainBytesToLevel = 0;
+ for (i = 0; i < numMipLevels; i++) {
+ mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
+ bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
+ mipChainBytes += bytes;
+ if (i < mip)
+ mipChainBytesToLevel += bytes;
+ }
+
+ offset = mipChainBytes * face + mipChainBytesToLevel;
+
+ return offset;
+}
+
+
+/**
+ * svga3dsurface_is_gb_screen_target_format - Is the specified format usable as
+ * a ScreenTarget?
+ * (with just the GBObjects cap-bit
+ * set)
+ * @format: format to queried
+ *
+ * RETURNS:
+ * true if queried format is valid for screen targets
+ */
+static inline bool
+svga3dsurface_is_gb_screen_target_format(SVGA3dSurfaceFormat format)
+{
+ return (format == SVGA3D_X8R8G8B8 ||
+ format == SVGA3D_A8R8G8B8 ||
+ format == SVGA3D_R5G6B5 ||
+ format == SVGA3D_X1R5G5B5 ||
+ format == SVGA3D_A1R5G5B5 ||
+ format == SVGA3D_P8);
+}
+
+
+/**
+ * svga3dsurface_is_dx_screen_target_format - Is the specified format usable as
+ * a ScreenTarget?
+ * (with DX10 enabled)
+ *
+ * @format: format to queried
+ *
+ * Results:
+ * true if queried format is valid for screen targets
+ */
+static inline bool
+svga3dsurface_is_dx_screen_target_format(SVGA3dSurfaceFormat format)
+{
+ return (format == SVGA3D_R8G8B8A8_UNORM ||
+ format == SVGA3D_B8G8R8A8_UNORM ||
+ format == SVGA3D_B8G8R8X8_UNORM);
+}
+
+
+/**
+ * svga3dsurface_is_screen_target_format - Is the specified format usable as a
+ * ScreenTarget?
+ * (for some combination of caps)
+ *
+ * @format: format to queried
+ *
+ * Results:
+ * true if queried format is valid for screen targets
+ */
+static inline bool
+svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format)
+{
+ if (svga3dsurface_is_gb_screen_target_format(format)) {
+ return true;
+ }
+ return svga3dsurface_is_dx_screen_target_format(format);
+}
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
new file mode 100644
index 000000000000..27b33ba88430
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
@@ -0,0 +1,1633 @@
+/**********************************************************
+ * Copyright 2012-2015 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_types.h --
+ *
+ * SVGA 3d hardware definitions for basic types
+ */
+
+#ifndef _SVGA3D_TYPES_H_
+#define _SVGA3D_TYPES_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+/*
+ * Generic Types
+ */
+
+#define SVGA3D_INVALID_ID ((uint32)-1)
+
+typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
+typedef uint32 SVGA3dColor; /* a, r, g, b */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCopyRect {
+ uint32 x;
+ uint32 y;
+ uint32 w;
+ uint32 h;
+ uint32 srcx;
+ uint32 srcy;
+}
+#include "vmware_pack_end.h"
+SVGA3dCopyRect;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCopyBox {
+ uint32 x;
+ uint32 y;
+ uint32 z;
+ uint32 w;
+ uint32 h;
+ uint32 d;
+ uint32 srcx;
+ uint32 srcy;
+ uint32 srcz;
+}
+#include "vmware_pack_end.h"
+SVGA3dCopyBox;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dRect {
+ uint32 x;
+ uint32 y;
+ uint32 w;
+ uint32 h;
+}
+#include "vmware_pack_end.h"
+SVGA3dRect;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 x;
+ uint32 y;
+ uint32 z;
+ uint32 w;
+ uint32 h;
+ uint32 d;
+}
+#include "vmware_pack_end.h"
+SVGA3dBox;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 x;
+ uint32 y;
+ uint32 z;
+}
+#include "vmware_pack_end.h"
+SVGA3dPoint;
+
+/*
+ * Surface formats.
+ */
+typedef enum SVGA3dSurfaceFormat {
+ SVGA3D_FORMAT_INVALID = 0,
+
+ SVGA3D_X8R8G8B8 = 1,
+ SVGA3D_FORMAT_MIN = 1,
+
+ SVGA3D_A8R8G8B8 = 2,
+
+ SVGA3D_R5G6B5 = 3,
+ SVGA3D_X1R5G5B5 = 4,
+ SVGA3D_A1R5G5B5 = 5,
+ SVGA3D_A4R4G4B4 = 6,
+
+ SVGA3D_Z_D32 = 7,
+ SVGA3D_Z_D16 = 8,
+ SVGA3D_Z_D24S8 = 9,
+ SVGA3D_Z_D15S1 = 10,
+
+ SVGA3D_LUMINANCE8 = 11,
+ SVGA3D_LUMINANCE4_ALPHA4 = 12,
+ SVGA3D_LUMINANCE16 = 13,
+ SVGA3D_LUMINANCE8_ALPHA8 = 14,
+
+ SVGA3D_DXT1 = 15,
+ SVGA3D_DXT2 = 16,
+ SVGA3D_DXT3 = 17,
+ SVGA3D_DXT4 = 18,
+ SVGA3D_DXT5 = 19,
+
+ SVGA3D_BUMPU8V8 = 20,
+ SVGA3D_BUMPL6V5U5 = 21,
+ SVGA3D_BUMPX8L8V8U8 = 22,
+ SVGA3D_BUMPL8V8U8 = 23,
+
+ SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
+ SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
+
+ SVGA3D_A2R10G10B10 = 26,
+
+ /* signed formats */
+ SVGA3D_V8U8 = 27,
+ SVGA3D_Q8W8V8U8 = 28,
+ SVGA3D_CxV8U8 = 29,
+
+ /* mixed formats */
+ SVGA3D_X8L8V8U8 = 30,
+ SVGA3D_A2W10V10U10 = 31,
+
+ SVGA3D_ALPHA8 = 32,
+
+ /* Single- and dual-component floating point formats */
+ SVGA3D_R_S10E5 = 33,
+ SVGA3D_R_S23E8 = 34,
+ SVGA3D_RG_S10E5 = 35,
+ SVGA3D_RG_S23E8 = 36,
+
+ SVGA3D_BUFFER = 37,
+
+ SVGA3D_Z_D24X8 = 38,
+
+ SVGA3D_V16U16 = 39,
+
+ SVGA3D_G16R16 = 40,
+ SVGA3D_A16B16G16R16 = 41,
+
+ /* Packed Video formats */
+ SVGA3D_UYVY = 42,
+ SVGA3D_YUY2 = 43,
+
+ /* Planar video formats */
+ SVGA3D_NV12 = 44,
+
+ /* Video format with alpha */
+ SVGA3D_AYUV = 45,
+
+ SVGA3D_R32G32B32A32_TYPELESS = 46,
+ SVGA3D_R32G32B32A32_UINT = 47,
+ SVGA3D_R32G32B32A32_SINT = 48,
+ SVGA3D_R32G32B32_TYPELESS = 49,
+ SVGA3D_R32G32B32_FLOAT = 50,
+ SVGA3D_R32G32B32_UINT = 51,
+ SVGA3D_R32G32B32_SINT = 52,
+ SVGA3D_R16G16B16A16_TYPELESS = 53,
+ SVGA3D_R16G16B16A16_UINT = 54,
+ SVGA3D_R16G16B16A16_SNORM = 55,
+ SVGA3D_R16G16B16A16_SINT = 56,
+ SVGA3D_R32G32_TYPELESS = 57,
+ SVGA3D_R32G32_UINT = 58,
+ SVGA3D_R32G32_SINT = 59,
+ SVGA3D_R32G8X24_TYPELESS = 60,
+ SVGA3D_D32_FLOAT_S8X24_UINT = 61,
+ SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62,
+ SVGA3D_X32_TYPELESS_G8X24_UINT = 63,
+ SVGA3D_R10G10B10A2_TYPELESS = 64,
+ SVGA3D_R10G10B10A2_UINT = 65,
+ SVGA3D_R11G11B10_FLOAT = 66,
+ SVGA3D_R8G8B8A8_TYPELESS = 67,
+ SVGA3D_R8G8B8A8_UNORM = 68,
+ SVGA3D_R8G8B8A8_UNORM_SRGB = 69,
+ SVGA3D_R8G8B8A8_UINT = 70,
+ SVGA3D_R8G8B8A8_SINT = 71,
+ SVGA3D_R16G16_TYPELESS = 72,
+ SVGA3D_R16G16_UINT = 73,
+ SVGA3D_R16G16_SINT = 74,
+ SVGA3D_R32_TYPELESS = 75,
+ SVGA3D_D32_FLOAT = 76,
+ SVGA3D_R32_UINT = 77,
+ SVGA3D_R32_SINT = 78,
+ SVGA3D_R24G8_TYPELESS = 79,
+ SVGA3D_D24_UNORM_S8_UINT = 80,
+ SVGA3D_R24_UNORM_X8_TYPELESS = 81,
+ SVGA3D_X24_TYPELESS_G8_UINT = 82,
+ SVGA3D_R8G8_TYPELESS = 83,
+ SVGA3D_R8G8_UNORM = 84,
+ SVGA3D_R8G8_UINT = 85,
+ SVGA3D_R8G8_SINT = 86,
+ SVGA3D_R16_TYPELESS = 87,
+ SVGA3D_R16_UNORM = 88,
+ SVGA3D_R16_UINT = 89,
+ SVGA3D_R16_SNORM = 90,
+ SVGA3D_R16_SINT = 91,
+ SVGA3D_R8_TYPELESS = 92,
+ SVGA3D_R8_UNORM = 93,
+ SVGA3D_R8_UINT = 94,
+ SVGA3D_R8_SNORM = 95,
+ SVGA3D_R8_SINT = 96,
+ SVGA3D_P8 = 97,
+ SVGA3D_R9G9B9E5_SHAREDEXP = 98,
+ SVGA3D_R8G8_B8G8_UNORM = 99,
+ SVGA3D_G8R8_G8B8_UNORM = 100,
+ SVGA3D_BC1_TYPELESS = 101,
+ SVGA3D_BC1_UNORM_SRGB = 102,
+ SVGA3D_BC2_TYPELESS = 103,
+ SVGA3D_BC2_UNORM_SRGB = 104,
+ SVGA3D_BC3_TYPELESS = 105,
+ SVGA3D_BC3_UNORM_SRGB = 106,
+ SVGA3D_BC4_TYPELESS = 107,
+ SVGA3D_ATI1 = 108, /* DX9-specific BC4_UNORM */
+ SVGA3D_BC4_SNORM = 109,
+ SVGA3D_BC5_TYPELESS = 110,
+ SVGA3D_ATI2 = 111, /* DX9-specific BC5_UNORM */
+ SVGA3D_BC5_SNORM = 112,
+ SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113,
+ SVGA3D_B8G8R8A8_TYPELESS = 114,
+ SVGA3D_B8G8R8A8_UNORM_SRGB = 115,
+ SVGA3D_B8G8R8X8_TYPELESS = 116,
+ SVGA3D_B8G8R8X8_UNORM_SRGB = 117,
+
+ /* Advanced depth formats. */
+ SVGA3D_Z_DF16 = 118,
+ SVGA3D_Z_DF24 = 119,
+ SVGA3D_Z_D24S8_INT = 120,
+
+ /* Planar video formats. */
+ SVGA3D_YV12 = 121,
+
+ SVGA3D_R32G32B32A32_FLOAT = 122,
+ SVGA3D_R16G16B16A16_FLOAT = 123,
+ SVGA3D_R16G16B16A16_UNORM = 124,
+ SVGA3D_R32G32_FLOAT = 125,
+ SVGA3D_R10G10B10A2_UNORM = 126,
+ SVGA3D_R8G8B8A8_SNORM = 127,
+ SVGA3D_R16G16_FLOAT = 128,
+ SVGA3D_R16G16_UNORM = 129,
+ SVGA3D_R16G16_SNORM = 130,
+ SVGA3D_R32_FLOAT = 131,
+ SVGA3D_R8G8_SNORM = 132,
+ SVGA3D_R16_FLOAT = 133,
+ SVGA3D_D16_UNORM = 134,
+ SVGA3D_A8_UNORM = 135,
+ SVGA3D_BC1_UNORM = 136,
+ SVGA3D_BC2_UNORM = 137,
+ SVGA3D_BC3_UNORM = 138,
+ SVGA3D_B5G6R5_UNORM = 139,
+ SVGA3D_B5G5R5A1_UNORM = 140,
+ SVGA3D_B8G8R8A8_UNORM = 141,
+ SVGA3D_B8G8R8X8_UNORM = 142,
+ SVGA3D_BC4_UNORM = 143,
+ SVGA3D_BC5_UNORM = 144,
+
+ SVGA3D_FORMAT_MAX
+} SVGA3dSurfaceFormat;
+
+typedef enum SVGA3dSurfaceFlags {
+ SVGA3D_SURFACE_CUBEMAP = (1 << 0),
+
+ /*
+ * HINT flags are not enforced by the device but are useful for
+ * performance.
+ */
+ SVGA3D_SURFACE_HINT_STATIC = (1 << 1),
+ SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2),
+ SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3),
+ SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4),
+ SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5),
+ SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
+ SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
+ SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
+ SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9),
+ SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10),
+ SVGA3D_SURFACE_DECODE_RENDERTARGET = (1 << 11),
+
+ /*
+ * Is this surface using a base-level pitch for it's mob backing?
+ *
+ * This flag is not intended to be set by guest-drivers, but is instead
+ * set by the device when the surface is bound to a mob with a specified
+ * pitch.
+ */
+ SVGA3D_SURFACE_MOB_PITCH = (1 << 12),
+
+ SVGA3D_SURFACE_INACTIVE = (1 << 13),
+ SVGA3D_SURFACE_HINT_RT_LOCKABLE = (1 << 14),
+ SVGA3D_SURFACE_VOLUME = (1 << 15),
+
+ /*
+ * Required to be set on a surface to bind it to a screen target.
+ */
+ SVGA3D_SURFACE_SCREENTARGET = (1 << 16),
+
+ /*
+ * Align images in the guest-backing mob to 16-bytes.
+ */
+ SVGA3D_SURFACE_ALIGN16 = (1 << 17),
+
+ SVGA3D_SURFACE_1D = (1 << 18),
+ SVGA3D_SURFACE_ARRAY = (1 << 19),
+
+ /*
+ * Bind flags.
+ * These are enforced for any surface defined with DefineGBSurface_v2.
+ */
+ SVGA3D_SURFACE_BIND_VERTEX_BUFFER = (1 << 20),
+ SVGA3D_SURFACE_BIND_INDEX_BUFFER = (1 << 21),
+ SVGA3D_SURFACE_BIND_CONSTANT_BUFFER = (1 << 22),
+ SVGA3D_SURFACE_BIND_SHADER_RESOURCE = (1 << 23),
+ SVGA3D_SURFACE_BIND_RENDER_TARGET = (1 << 24),
+ SVGA3D_SURFACE_BIND_DEPTH_STENCIL = (1 << 25),
+ SVGA3D_SURFACE_BIND_STREAM_OUTPUT = (1 << 26),
+
+ /*
+ * A note on staging flags:
+ *
+ * The STAGING flags notes that the surface will not be used directly by the
+ * drawing pipeline, i.e. that it will not be bound to any bind point.
+ * Staging surfaces may be used by copy operations to move data in and out
+ * of other surfaces.
+ *
+ * The HINT_INDIRECT_UPDATE flag suggests that the surface will receive
+ * updates indirectly, i.e. the surface will not be updated directly, but
+ * will receive copies from staging surfaces.
+ */
+ SVGA3D_SURFACE_STAGING_UPLOAD = (1 << 27),
+ SVGA3D_SURFACE_STAGING_DOWNLOAD = (1 << 28),
+ SVGA3D_SURFACE_HINT_INDIRECT_UPDATE = (1 << 29),
+
+ /*
+ * Setting this flag allow this surface to be used with the
+ * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command. It is only valid for
+ * buffer surfaces, an no bind flags are allowed to be set on surfaces
+ * with this flag.
+ */
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER = (1 << 30),
+
+ /*
+ * Marker for the last defined bit.
+ */
+ SVGA3D_SURFACE_FLAG_MAX = (1 << 31),
+} SVGA3dSurfaceFlags;
+
+#define SVGA3D_SURFACE_HB_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_MOB_PITCH | \
+ SVGA3D_SURFACE_SCREENTARGET | \
+ SVGA3D_SURFACE_ALIGN16 | \
+ SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
+ SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_STAGING_UPLOAD | \
+ SVGA3D_SURFACE_STAGING_DOWNLOAD | \
+ SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+ )
+
+#define SVGA3D_SURFACE_2D_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_CUBEMAP | \
+ SVGA3D_SURFACE_MASKABLE_ANTIALIAS | \
+ SVGA3D_SURFACE_AUTOGENMIPMAPS | \
+ SVGA3D_SURFACE_DECODE_RENDERTARGET | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_ARRAY | \
+ SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
+ SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
+ SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+ )
+
+#define SVGA3D_SURFACE_SCREENTARGET_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_CUBEMAP | \
+ SVGA3D_SURFACE_AUTOGENMIPMAPS | \
+ SVGA3D_SURFACE_DECODE_RENDERTARGET | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
+ SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
+ SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_INACTIVE | \
+ SVGA3D_SURFACE_STAGING_UPLOAD | \
+ SVGA3D_SURFACE_STAGING_DOWNLOAD | \
+ SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+ )
+
+#define SVGA3D_SURFACE_DX_ONLY_MASK \
+ ( SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+
+#define SVGA3D_SURFACE_STAGING_MASK \
+ ( SVGA3D_SURFACE_STAGING_UPLOAD | \
+ SVGA3D_SURFACE_STAGING_DOWNLOAD \
+ )
+
+#define SVGA3D_SURFACE_BIND_MASK \
+ ( SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
+ SVGA3D_SURFACE_BIND_SHADER_RESOURCE | \
+ SVGA3D_SURFACE_BIND_RENDER_TARGET | \
+ SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
+ SVGA3D_SURFACE_BIND_STREAM_OUTPUT \
+ )
+
+typedef enum {
+ SVGA3DFORMAT_OP_TEXTURE = 0x00000001,
+ SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002,
+ SVGA3DFORMAT_OP_CUBETEXTURE = 0x00000004,
+ SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET = 0x00000008,
+ SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET = 0x00000010,
+ SVGA3DFORMAT_OP_ZSTENCIL = 0x00000040,
+ SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH = 0x00000080,
+
+/*
+ * This format can be used as a render target if the current display mode
+ * is the same depth if the alpha channel is ignored. e.g. if the device
+ * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
+ * format op list entry for A8R8G8B8 should have this cap.
+ */
+ SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET = 0x00000100,
+
+/*
+ * This format contains DirectDraw support (including Flip). This flag
+ * should not to be set on alpha formats.
+ */
+ SVGA3DFORMAT_OP_DISPLAYMODE = 0x00000400,
+
+/*
+ * The rasterizer can support some level of Direct3D support in this format
+ * and implies that the driver can create a Context in this mode (for some
+ * render target format). When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
+ * flag must also be set.
+ */
+ SVGA3DFORMAT_OP_3DACCELERATION = 0x00000800,
+
+/*
+ * This is set for a private format when the driver has put the bpp in
+ * the structure.
+ */
+ SVGA3DFORMAT_OP_PIXELSIZE = 0x00001000,
+
+/*
+ * Indicates that this format can be converted to any RGB format for which
+ * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
+ */
+ SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000,
+
+/*
+ * Indicates that this format can be used to create offscreen plain surfaces.
+ */
+ SVGA3DFORMAT_OP_OFFSCREENPLAIN = 0x00004000,
+
+/*
+ * Indicated that this format can be read as an SRGB texture (meaning that the
+ * sampler will linearize the looked up data)
+ */
+ SVGA3DFORMAT_OP_SRGBREAD = 0x00008000,
+
+/*
+ * Indicates that this format can be used in the bumpmap instructions
+ */
+ SVGA3DFORMAT_OP_BUMPMAP = 0x00010000,
+
+/*
+ * Indicates that this format can be sampled by the displacement map sampler
+ */
+ SVGA3DFORMAT_OP_DMAP = 0x00020000,
+
+/*
+ * Indicates that this format cannot be used with texture filtering
+ */
+ SVGA3DFORMAT_OP_NOFILTER = 0x00040000,
+
+/*
+ * Indicates that format conversions are supported to this RGB format if
+ * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
+ */
+ SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB = 0x00080000,
+
+/*
+ * Indicated that this format can be written as an SRGB target
+ * (meaning that the pixel pipe will DE-linearize data on output to format)
+ */
+ SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000,
+
+/*
+ * Indicates that this format cannot be used with alpha blending
+ */
+ SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000,
+
+/*
+ * Indicates that the device can auto-generated sublevels for resources
+ * of this format
+ */
+ SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000,
+
+/*
+ * Indicates that this format can be used by vertex texture sampler
+ */
+ SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000,
+
+/*
+ * Indicates that this format supports neither texture coordinate
+ * wrap modes, nor mipmapping.
+ */
+ SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP = 0x01000000
+} SVGA3dFormatOp;
+
+#define SVGA3D_FORMAT_POSITIVE \
+ (SVGA3DFORMAT_OP_TEXTURE | \
+ SVGA3DFORMAT_OP_VOLUMETEXTURE | \
+ SVGA3DFORMAT_OP_CUBETEXTURE | \
+ SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET | \
+ SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET | \
+ SVGA3DFORMAT_OP_ZSTENCIL | \
+ SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH | \
+ SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET | \
+ SVGA3DFORMAT_OP_DISPLAYMODE | \
+ SVGA3DFORMAT_OP_3DACCELERATION | \
+ SVGA3DFORMAT_OP_PIXELSIZE | \
+ SVGA3DFORMAT_OP_CONVERT_TO_ARGB | \
+ SVGA3DFORMAT_OP_OFFSCREENPLAIN | \
+ SVGA3DFORMAT_OP_SRGBREAD | \
+ SVGA3DFORMAT_OP_BUMPMAP | \
+ SVGA3DFORMAT_OP_DMAP | \
+ SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB | \
+ SVGA3DFORMAT_OP_SRGBWRITE | \
+ SVGA3DFORMAT_OP_AUTOGENMIPMAP | \
+ SVGA3DFORMAT_OP_VERTEXTEXTURE)
+
+#define SVGA3D_FORMAT_NEGATIVE \
+ (SVGA3DFORMAT_OP_NOFILTER | \
+ SVGA3DFORMAT_OP_NOALPHABLEND | \
+ SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP)
+
+/*
+ * This structure is a conversion of SVGA3DFORMAT_OP_*
+ * Entries must be located at the same position.
+ */
+typedef union {
+ uint32 value;
+ struct {
+ uint32 texture : 1;
+ uint32 volumeTexture : 1;
+ uint32 cubeTexture : 1;
+ uint32 offscreenRenderTarget : 1;
+ uint32 sameFormatRenderTarget : 1;
+ uint32 unknown1 : 1;
+ uint32 zStencil : 1;
+ uint32 zStencilArbitraryDepth : 1;
+ uint32 sameFormatUpToAlpha : 1;
+ uint32 unknown2 : 1;
+ uint32 displayMode : 1;
+ uint32 acceleration3d : 1;
+ uint32 pixelSize : 1;
+ uint32 convertToARGB : 1;
+ uint32 offscreenPlain : 1;
+ uint32 sRGBRead : 1;
+ uint32 bumpMap : 1;
+ uint32 dmap : 1;
+ uint32 noFilter : 1;
+ uint32 memberOfGroupARGB : 1;
+ uint32 sRGBWrite : 1;
+ uint32 noAlphaBlend : 1;
+ uint32 autoGenMipMap : 1;
+ uint32 vertexTexture : 1;
+ uint32 noTexCoordWrapNorMip : 1;
+ };
+} SVGA3dSurfaceFormatCaps;
+
+/*
+ * SVGA_3D_CMD_SETRENDERSTATE Types. All value types
+ * must fit in a uint32.
+ */
+
+typedef enum {
+ SVGA3D_RS_INVALID = 0,
+ SVGA3D_RS_MIN = 1,
+ SVGA3D_RS_ZENABLE = 1, /* SVGA3dBool */
+ SVGA3D_RS_ZWRITEENABLE = 2, /* SVGA3dBool */
+ SVGA3D_RS_ALPHATESTENABLE = 3, /* SVGA3dBool */
+ SVGA3D_RS_DITHERENABLE = 4, /* SVGA3dBool */
+ SVGA3D_RS_BLENDENABLE = 5, /* SVGA3dBool */
+ SVGA3D_RS_FOGENABLE = 6, /* SVGA3dBool */
+ SVGA3D_RS_SPECULARENABLE = 7, /* SVGA3dBool */
+ SVGA3D_RS_STENCILENABLE = 8, /* SVGA3dBool */
+ SVGA3D_RS_LIGHTINGENABLE = 9, /* SVGA3dBool */
+ SVGA3D_RS_NORMALIZENORMALS = 10, /* SVGA3dBool */
+ SVGA3D_RS_POINTSPRITEENABLE = 11, /* SVGA3dBool */
+ SVGA3D_RS_POINTSCALEENABLE = 12, /* SVGA3dBool */
+ SVGA3D_RS_STENCILREF = 13, /* uint32 */
+ SVGA3D_RS_STENCILMASK = 14, /* uint32 */
+ SVGA3D_RS_STENCILWRITEMASK = 15, /* uint32 */
+ SVGA3D_RS_FOGSTART = 16, /* float */
+ SVGA3D_RS_FOGEND = 17, /* float */
+ SVGA3D_RS_FOGDENSITY = 18, /* float */
+ SVGA3D_RS_POINTSIZE = 19, /* float */
+ SVGA3D_RS_POINTSIZEMIN = 20, /* float */
+ SVGA3D_RS_POINTSIZEMAX = 21, /* float */
+ SVGA3D_RS_POINTSCALE_A = 22, /* float */
+ SVGA3D_RS_POINTSCALE_B = 23, /* float */
+ SVGA3D_RS_POINTSCALE_C = 24, /* float */
+ SVGA3D_RS_FOGCOLOR = 25, /* SVGA3dColor */
+ SVGA3D_RS_AMBIENT = 26, /* SVGA3dColor */
+ SVGA3D_RS_CLIPPLANEENABLE = 27, /* SVGA3dClipPlanes */
+ SVGA3D_RS_FOGMODE = 28, /* SVGA3dFogMode */
+ SVGA3D_RS_FILLMODE = 29, /* SVGA3dFillMode */
+ SVGA3D_RS_SHADEMODE = 30, /* SVGA3dShadeMode */
+ SVGA3D_RS_LINEPATTERN = 31, /* SVGA3dLinePattern */
+ SVGA3D_RS_SRCBLEND = 32, /* SVGA3dBlendOp */
+ SVGA3D_RS_DSTBLEND = 33, /* SVGA3dBlendOp */
+ SVGA3D_RS_BLENDEQUATION = 34, /* SVGA3dBlendEquation */
+ SVGA3D_RS_CULLMODE = 35, /* SVGA3dFace */
+ SVGA3D_RS_ZFUNC = 36, /* SVGA3dCmpFunc */
+ SVGA3D_RS_ALPHAFUNC = 37, /* SVGA3dCmpFunc */
+ SVGA3D_RS_STENCILFUNC = 38, /* SVGA3dCmpFunc */
+ SVGA3D_RS_STENCILFAIL = 39, /* SVGA3dStencilOp */
+ SVGA3D_RS_STENCILZFAIL = 40, /* SVGA3dStencilOp */
+ SVGA3D_RS_STENCILPASS = 41, /* SVGA3dStencilOp */
+ SVGA3D_RS_ALPHAREF = 42, /* float (0.0 .. 1.0) */
+ SVGA3D_RS_FRONTWINDING = 43, /* SVGA3dFrontWinding */
+ SVGA3D_RS_COORDINATETYPE = 44, /* SVGA3dCoordinateType */
+ SVGA3D_RS_ZBIAS = 45, /* float */
+ SVGA3D_RS_RANGEFOGENABLE = 46, /* SVGA3dBool */
+ SVGA3D_RS_COLORWRITEENABLE = 47, /* SVGA3dColorMask */
+ SVGA3D_RS_VERTEXMATERIALENABLE = 48, /* SVGA3dBool */
+ SVGA3D_RS_DIFFUSEMATERIALSOURCE = 49, /* SVGA3dVertexMaterial */
+ SVGA3D_RS_SPECULARMATERIALSOURCE = 50, /* SVGA3dVertexMaterial */
+ SVGA3D_RS_AMBIENTMATERIALSOURCE = 51, /* SVGA3dVertexMaterial */
+ SVGA3D_RS_EMISSIVEMATERIALSOURCE = 52, /* SVGA3dVertexMaterial */
+ SVGA3D_RS_TEXTUREFACTOR = 53, /* SVGA3dColor */
+ SVGA3D_RS_LOCALVIEWER = 54, /* SVGA3dBool */
+ SVGA3D_RS_SCISSORTESTENABLE = 55, /* SVGA3dBool */
+ SVGA3D_RS_BLENDCOLOR = 56, /* SVGA3dColor */
+ SVGA3D_RS_STENCILENABLE2SIDED = 57, /* SVGA3dBool */
+ SVGA3D_RS_CCWSTENCILFUNC = 58, /* SVGA3dCmpFunc */
+ SVGA3D_RS_CCWSTENCILFAIL = 59, /* SVGA3dStencilOp */
+ SVGA3D_RS_CCWSTENCILZFAIL = 60, /* SVGA3dStencilOp */
+ SVGA3D_RS_CCWSTENCILPASS = 61, /* SVGA3dStencilOp */
+ SVGA3D_RS_VERTEXBLEND = 62, /* SVGA3dVertexBlendFlags */
+ SVGA3D_RS_SLOPESCALEDEPTHBIAS = 63, /* float */
+ SVGA3D_RS_DEPTHBIAS = 64, /* float */
+
+
+ /*
+ * Output Gamma Level
+ *
+ * Output gamma effects the gamma curve of colors that are output from the
+ * rendering pipeline. A value of 1.0 specifies a linear color space. If the
+ * value is <= 0.0, gamma correction is ignored and linear color space is
+ * used.
+ */
+
+ SVGA3D_RS_OUTPUTGAMMA = 65, /* float */
+ SVGA3D_RS_ZVISIBLE = 66, /* SVGA3dBool */
+ SVGA3D_RS_LASTPIXEL = 67, /* SVGA3dBool */
+ SVGA3D_RS_CLIPPING = 68, /* SVGA3dBool */
+ SVGA3D_RS_WRAP0 = 69, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP1 = 70, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP2 = 71, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP3 = 72, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP4 = 73, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP5 = 74, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP6 = 75, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP7 = 76, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP8 = 77, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP9 = 78, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP10 = 79, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP11 = 80, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP12 = 81, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP13 = 82, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP14 = 83, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP15 = 84, /* SVGA3dWrapFlags */
+ SVGA3D_RS_MULTISAMPLEANTIALIAS = 85, /* SVGA3dBool */
+ SVGA3D_RS_MULTISAMPLEMASK = 86, /* uint32 */
+ SVGA3D_RS_INDEXEDVERTEXBLENDENABLE = 87, /* SVGA3dBool */
+ SVGA3D_RS_TWEENFACTOR = 88, /* float */
+ SVGA3D_RS_ANTIALIASEDLINEENABLE = 89, /* SVGA3dBool */
+ SVGA3D_RS_COLORWRITEENABLE1 = 90, /* SVGA3dColorMask */
+ SVGA3D_RS_COLORWRITEENABLE2 = 91, /* SVGA3dColorMask */
+ SVGA3D_RS_COLORWRITEENABLE3 = 92, /* SVGA3dColorMask */
+ SVGA3D_RS_SEPARATEALPHABLENDENABLE = 93, /* SVGA3dBool */
+ SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */
+ SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */
+ SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */
+ SVGA3D_RS_TRANSPARENCYANTIALIAS = 97, /* SVGA3dTransparencyAntialiasType */
+ SVGA3D_RS_LINEWIDTH = 98, /* float */
+ SVGA3D_RS_MAX
+} SVGA3dRenderStateName;
+
+typedef enum {
+ SVGA3D_TRANSPARENCYANTIALIAS_NORMAL = 0,
+ SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE = 1,
+ SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE = 2,
+ SVGA3D_TRANSPARENCYANTIALIAS_MAX
+} SVGA3dTransparencyAntialiasType;
+
+typedef enum {
+ SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */
+ SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */
+ SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */
+ SVGA3D_VERTEXMATERIAL_MAX = 3,
+} SVGA3dVertexMaterial;
+
+typedef enum {
+ SVGA3D_FILLMODE_INVALID = 0,
+ SVGA3D_FILLMODE_MIN = 1,
+ SVGA3D_FILLMODE_POINT = 1,
+ SVGA3D_FILLMODE_LINE = 2,
+ SVGA3D_FILLMODE_FILL = 3,
+ SVGA3D_FILLMODE_MAX
+} SVGA3dFillModeType;
+
+
+typedef
+#include "vmware_pack_begin.h"
+union {
+ struct {
+ uint16 mode; /* SVGA3dFillModeType */
+ uint16 face; /* SVGA3dFace */
+ };
+ uint32 uintValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dFillMode;
+
+typedef enum {
+ SVGA3D_SHADEMODE_INVALID = 0,
+ SVGA3D_SHADEMODE_FLAT = 1,
+ SVGA3D_SHADEMODE_SMOOTH = 2,
+ SVGA3D_SHADEMODE_PHONG = 3, /* Not supported */
+ SVGA3D_SHADEMODE_MAX
+} SVGA3dShadeMode;
+
+typedef
+#include "vmware_pack_begin.h"
+union {
+ struct {
+ uint16 repeat;
+ uint16 pattern;
+ };
+ uint32 uintValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dLinePattern;
+
+typedef enum {
+ SVGA3D_BLENDOP_INVALID = 0,
+ SVGA3D_BLENDOP_MIN = 1,
+ SVGA3D_BLENDOP_ZERO = 1,
+ SVGA3D_BLENDOP_ONE = 2,
+ SVGA3D_BLENDOP_SRCCOLOR = 3,
+ SVGA3D_BLENDOP_INVSRCCOLOR = 4,
+ SVGA3D_BLENDOP_SRCALPHA = 5,
+ SVGA3D_BLENDOP_INVSRCALPHA = 6,
+ SVGA3D_BLENDOP_DESTALPHA = 7,
+ SVGA3D_BLENDOP_INVDESTALPHA = 8,
+ SVGA3D_BLENDOP_DESTCOLOR = 9,
+ SVGA3D_BLENDOP_INVDESTCOLOR = 10,
+ SVGA3D_BLENDOP_SRCALPHASAT = 11,
+ SVGA3D_BLENDOP_BLENDFACTOR = 12,
+ SVGA3D_BLENDOP_INVBLENDFACTOR = 13,
+ SVGA3D_BLENDOP_SRC1COLOR = 14,
+ SVGA3D_BLENDOP_INVSRC1COLOR = 15,
+ SVGA3D_BLENDOP_SRC1ALPHA = 16,
+ SVGA3D_BLENDOP_INVSRC1ALPHA = 17,
+ SVGA3D_BLENDOP_BLENDFACTORALPHA = 18,
+ SVGA3D_BLENDOP_INVBLENDFACTORALPHA = 19,
+ SVGA3D_BLENDOP_MAX
+} SVGA3dBlendOp;
+
+typedef enum {
+ SVGA3D_BLENDEQ_INVALID = 0,
+ SVGA3D_BLENDEQ_MIN = 1,
+ SVGA3D_BLENDEQ_ADD = 1,
+ SVGA3D_BLENDEQ_SUBTRACT = 2,
+ SVGA3D_BLENDEQ_REVSUBTRACT = 3,
+ SVGA3D_BLENDEQ_MINIMUM = 4,
+ SVGA3D_BLENDEQ_MAXIMUM = 5,
+ SVGA3D_BLENDEQ_MAX
+} SVGA3dBlendEquation;
+
+typedef enum {
+ SVGA3D_DX11_LOGICOP_MIN = 0,
+ SVGA3D_DX11_LOGICOP_CLEAR = 0,
+ SVGA3D_DX11_LOGICOP_SET = 1,
+ SVGA3D_DX11_LOGICOP_COPY = 2,
+ SVGA3D_DX11_LOGICOP_COPY_INVERTED = 3,
+ SVGA3D_DX11_LOGICOP_NOOP = 4,
+ SVGA3D_DX11_LOGICOP_INVERT = 5,
+ SVGA3D_DX11_LOGICOP_AND = 6,
+ SVGA3D_DX11_LOGICOP_NAND = 7,
+ SVGA3D_DX11_LOGICOP_OR = 8,
+ SVGA3D_DX11_LOGICOP_NOR = 9,
+ SVGA3D_DX11_LOGICOP_XOR = 10,
+ SVGA3D_DX11_LOGICOP_EQUIV = 11,
+ SVGA3D_DX11_LOGICOP_AND_REVERSE = 12,
+ SVGA3D_DX11_LOGICOP_AND_INVERTED = 13,
+ SVGA3D_DX11_LOGICOP_OR_REVERSE = 14,
+ SVGA3D_DX11_LOGICOP_OR_INVERTED = 15,
+ SVGA3D_DX11_LOGICOP_MAX
+} SVGA3dDX11LogicOp;
+
+typedef enum {
+ SVGA3D_FRONTWINDING_INVALID = 0,
+ SVGA3D_FRONTWINDING_CW = 1,
+ SVGA3D_FRONTWINDING_CCW = 2,
+ SVGA3D_FRONTWINDING_MAX
+} SVGA3dFrontWinding;
+
+typedef enum {
+ SVGA3D_FACE_INVALID = 0,
+ SVGA3D_FACE_NONE = 1,
+ SVGA3D_FACE_MIN = 1,
+ SVGA3D_FACE_FRONT = 2,
+ SVGA3D_FACE_BACK = 3,
+ SVGA3D_FACE_FRONT_BACK = 4,
+ SVGA3D_FACE_MAX
+} SVGA3dFace;
+
+/*
+ * The order and the values should not be changed
+ */
+
+typedef enum {
+ SVGA3D_CMP_INVALID = 0,
+ SVGA3D_CMP_NEVER = 1,
+ SVGA3D_CMP_LESS = 2,
+ SVGA3D_CMP_EQUAL = 3,
+ SVGA3D_CMP_LESSEQUAL = 4,
+ SVGA3D_CMP_GREATER = 5,
+ SVGA3D_CMP_NOTEQUAL = 6,
+ SVGA3D_CMP_GREATEREQUAL = 7,
+ SVGA3D_CMP_ALWAYS = 8,
+ SVGA3D_CMP_MAX
+} SVGA3dCmpFunc;
+
+/*
+ * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
+ * the fog factor to be specified in the alpha component of the specular
+ * (a.k.a. secondary) vertex color.
+ */
+typedef enum {
+ SVGA3D_FOGFUNC_INVALID = 0,
+ SVGA3D_FOGFUNC_EXP = 1,
+ SVGA3D_FOGFUNC_EXP2 = 2,
+ SVGA3D_FOGFUNC_LINEAR = 3,
+ SVGA3D_FOGFUNC_PER_VERTEX = 4
+} SVGA3dFogFunction;
+
+/*
+ * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
+ * or per-pixel basis.
+ */
+typedef enum {
+ SVGA3D_FOGTYPE_INVALID = 0,
+ SVGA3D_FOGTYPE_VERTEX = 1,
+ SVGA3D_FOGTYPE_PIXEL = 2,
+ SVGA3D_FOGTYPE_MAX = 3
+} SVGA3dFogType;
+
+/*
+ * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
+ * computed using the eye Z value of each pixel (or vertex), whereas range-
+ * based fog is computed using the actual distance (range) to the eye.
+ */
+typedef enum {
+ SVGA3D_FOGBASE_INVALID = 0,
+ SVGA3D_FOGBASE_DEPTHBASED = 1,
+ SVGA3D_FOGBASE_RANGEBASED = 2,
+ SVGA3D_FOGBASE_MAX = 3
+} SVGA3dFogBase;
+
+typedef enum {
+ SVGA3D_STENCILOP_INVALID = 0,
+ SVGA3D_STENCILOP_MIN = 1,
+ SVGA3D_STENCILOP_KEEP = 1,
+ SVGA3D_STENCILOP_ZERO = 2,
+ SVGA3D_STENCILOP_REPLACE = 3,
+ SVGA3D_STENCILOP_INCRSAT = 4,
+ SVGA3D_STENCILOP_DECRSAT = 5,
+ SVGA3D_STENCILOP_INVERT = 6,
+ SVGA3D_STENCILOP_INCR = 7,
+ SVGA3D_STENCILOP_DECR = 8,
+ SVGA3D_STENCILOP_MAX
+} SVGA3dStencilOp;
+
+typedef enum {
+ SVGA3D_CLIPPLANE_0 = (1 << 0),
+ SVGA3D_CLIPPLANE_1 = (1 << 1),
+ SVGA3D_CLIPPLANE_2 = (1 << 2),
+ SVGA3D_CLIPPLANE_3 = (1 << 3),
+ SVGA3D_CLIPPLANE_4 = (1 << 4),
+ SVGA3D_CLIPPLANE_5 = (1 << 5),
+} SVGA3dClipPlanes;
+
+typedef enum {
+ SVGA3D_CLEAR_COLOR = 0x1,
+ SVGA3D_CLEAR_DEPTH = 0x2,
+ SVGA3D_CLEAR_STENCIL = 0x4,
+
+ /*
+ * Hint only, must be used together with SVGA3D_CLEAR_COLOR. If
+ * SVGA3D_CLEAR_DEPTH or SVGA3D_CLEAR_STENCIL bit is set, this
+ * bit will be ignored.
+ */
+ SVGA3D_CLEAR_COLORFILL = 0x8
+} SVGA3dClearFlag;
+
+typedef enum {
+ SVGA3D_RT_DEPTH = 0,
+ SVGA3D_RT_MIN = 0,
+ SVGA3D_RT_STENCIL = 1,
+ SVGA3D_RT_COLOR0 = 2,
+ SVGA3D_RT_COLOR1 = 3,
+ SVGA3D_RT_COLOR2 = 4,
+ SVGA3D_RT_COLOR3 = 5,
+ SVGA3D_RT_COLOR4 = 6,
+ SVGA3D_RT_COLOR5 = 7,
+ SVGA3D_RT_COLOR6 = 8,
+ SVGA3D_RT_COLOR7 = 9,
+ SVGA3D_RT_MAX,
+ SVGA3D_RT_INVALID = ((uint32)-1),
+} SVGA3dRenderTargetType;
+
+#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
+
+typedef
+#include "vmware_pack_begin.h"
+union {
+ struct {
+ uint32 red : 1;
+ uint32 green : 1;
+ uint32 blue : 1;
+ uint32 alpha : 1;
+ };
+ uint32 uintValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dColorMask;
+
+typedef enum {
+ SVGA3D_VBLEND_DISABLE = 0,
+ SVGA3D_VBLEND_1WEIGHT = 1,
+ SVGA3D_VBLEND_2WEIGHT = 2,
+ SVGA3D_VBLEND_3WEIGHT = 3,
+ SVGA3D_VBLEND_MAX = 4,
+} SVGA3dVertexBlendFlags;
+
+typedef enum {
+ SVGA3D_WRAPCOORD_0 = 1 << 0,
+ SVGA3D_WRAPCOORD_1 = 1 << 1,
+ SVGA3D_WRAPCOORD_2 = 1 << 2,
+ SVGA3D_WRAPCOORD_3 = 1 << 3,
+ SVGA3D_WRAPCOORD_ALL = 0xF,
+} SVGA3dWrapFlags;
+
+/*
+ * SVGA_3D_CMD_TEXTURESTATE Types. All value types
+ * must fit in a uint32.
+ */
+
+typedef enum {
+ SVGA3D_TS_INVALID = 0,
+ SVGA3D_TS_MIN = 1,
+ SVGA3D_TS_BIND_TEXTURE = 1, /* SVGA3dSurfaceId */
+ SVGA3D_TS_COLOROP = 2, /* SVGA3dTextureCombiner */
+ SVGA3D_TS_COLORARG1 = 3, /* SVGA3dTextureArgData */
+ SVGA3D_TS_COLORARG2 = 4, /* SVGA3dTextureArgData */
+ SVGA3D_TS_ALPHAOP = 5, /* SVGA3dTextureCombiner */
+ SVGA3D_TS_ALPHAARG1 = 6, /* SVGA3dTextureArgData */
+ SVGA3D_TS_ALPHAARG2 = 7, /* SVGA3dTextureArgData */
+ SVGA3D_TS_ADDRESSU = 8, /* SVGA3dTextureAddress */
+ SVGA3D_TS_ADDRESSV = 9, /* SVGA3dTextureAddress */
+ SVGA3D_TS_MIPFILTER = 10, /* SVGA3dTextureFilter */
+ SVGA3D_TS_MAGFILTER = 11, /* SVGA3dTextureFilter */
+ SVGA3D_TS_MINFILTER = 12, /* SVGA3dTextureFilter */
+ SVGA3D_TS_BORDERCOLOR = 13, /* SVGA3dColor */
+ SVGA3D_TS_TEXCOORDINDEX = 14, /* uint32 */
+ SVGA3D_TS_TEXTURETRANSFORMFLAGS = 15, /* SVGA3dTexTransformFlags */
+ SVGA3D_TS_TEXCOORDGEN = 16, /* SVGA3dTextureCoordGen */
+ SVGA3D_TS_BUMPENVMAT00 = 17, /* float */
+ SVGA3D_TS_BUMPENVMAT01 = 18, /* float */
+ SVGA3D_TS_BUMPENVMAT10 = 19, /* float */
+ SVGA3D_TS_BUMPENVMAT11 = 20, /* float */
+ SVGA3D_TS_TEXTURE_MIPMAP_LEVEL = 21, /* uint32 */
+ SVGA3D_TS_TEXTURE_LOD_BIAS = 22, /* float */
+ SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL = 23, /* uint32 */
+ SVGA3D_TS_ADDRESSW = 24, /* SVGA3dTextureAddress */
+
+
+ /*
+ * Sampler Gamma Level
+ *
+ * Sampler gamma effects the color of samples taken from the sampler. A
+ * value of 1.0 will produce linear samples. If the value is <= 0.0 the
+ * gamma value is ignored and a linear space is used.
+ */
+
+ SVGA3D_TS_GAMMA = 25, /* float */
+ SVGA3D_TS_BUMPENVLSCALE = 26, /* float */
+ SVGA3D_TS_BUMPENVLOFFSET = 27, /* float */
+ SVGA3D_TS_COLORARG0 = 28, /* SVGA3dTextureArgData */
+ SVGA3D_TS_ALPHAARG0 = 29, /* SVGA3dTextureArgData */
+ SVGA3D_TS_PREGB_MAX = 30, /* Max value before GBObjects */
+ SVGA3D_TS_CONSTANT = 30, /* SVGA3dColor */
+ SVGA3D_TS_COLOR_KEY_ENABLE = 31, /* SVGA3dBool */
+ SVGA3D_TS_COLOR_KEY = 32, /* SVGA3dColor */
+ SVGA3D_TS_MAX
+} SVGA3dTextureStateName;
+
+typedef enum {
+ SVGA3D_TC_INVALID = 0,
+ SVGA3D_TC_DISABLE = 1,
+ SVGA3D_TC_SELECTARG1 = 2,
+ SVGA3D_TC_SELECTARG2 = 3,
+ SVGA3D_TC_MODULATE = 4,
+ SVGA3D_TC_ADD = 5,
+ SVGA3D_TC_ADDSIGNED = 6,
+ SVGA3D_TC_SUBTRACT = 7,
+ SVGA3D_TC_BLENDTEXTUREALPHA = 8,
+ SVGA3D_TC_BLENDDIFFUSEALPHA = 9,
+ SVGA3D_TC_BLENDCURRENTALPHA = 10,
+ SVGA3D_TC_BLENDFACTORALPHA = 11,
+ SVGA3D_TC_MODULATE2X = 12,
+ SVGA3D_TC_MODULATE4X = 13,
+ SVGA3D_TC_DSDT = 14,
+ SVGA3D_TC_DOTPRODUCT3 = 15,
+ SVGA3D_TC_BLENDTEXTUREALPHAPM = 16,
+ SVGA3D_TC_ADDSIGNED2X = 17,
+ SVGA3D_TC_ADDSMOOTH = 18,
+ SVGA3D_TC_PREMODULATE = 19,
+ SVGA3D_TC_MODULATEALPHA_ADDCOLOR = 20,
+ SVGA3D_TC_MODULATECOLOR_ADDALPHA = 21,
+ SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
+ SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
+ SVGA3D_TC_BUMPENVMAPLUMINANCE = 24,
+ SVGA3D_TC_MULTIPLYADD = 25,
+ SVGA3D_TC_LERP = 26,
+ SVGA3D_TC_MAX
+} SVGA3dTextureCombiner;
+
+#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
+
+typedef enum {
+ SVGA3D_TEX_ADDRESS_INVALID = 0,
+ SVGA3D_TEX_ADDRESS_MIN = 1,
+ SVGA3D_TEX_ADDRESS_WRAP = 1,
+ SVGA3D_TEX_ADDRESS_MIRROR = 2,
+ SVGA3D_TEX_ADDRESS_CLAMP = 3,
+ SVGA3D_TEX_ADDRESS_BORDER = 4,
+ SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
+ SVGA3D_TEX_ADDRESS_EDGE = 6,
+ SVGA3D_TEX_ADDRESS_MAX
+} SVGA3dTextureAddress;
+
+/*
+ * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
+ * disabled, and the rasterizer should use the magnification filter instead.
+ */
+typedef enum {
+ SVGA3D_TEX_FILTER_NONE = 0,
+ SVGA3D_TEX_FILTER_MIN = 0,
+ SVGA3D_TEX_FILTER_NEAREST = 1,
+ SVGA3D_TEX_FILTER_LINEAR = 2,
+ SVGA3D_TEX_FILTER_ANISOTROPIC = 3,
+ SVGA3D_TEX_FILTER_FLATCUBIC = 4, /* Deprecated, not implemented */
+ SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, /* Deprecated, not implemented */
+ SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, /* Not currently implemented */
+ SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, /* Not currently implemented */
+ SVGA3D_TEX_FILTER_MAX
+} SVGA3dTextureFilter;
+
+typedef enum {
+ SVGA3D_TEX_TRANSFORM_OFF = 0,
+ SVGA3D_TEX_TRANSFORM_S = (1 << 0),
+ SVGA3D_TEX_TRANSFORM_T = (1 << 1),
+ SVGA3D_TEX_TRANSFORM_R = (1 << 2),
+ SVGA3D_TEX_TRANSFORM_Q = (1 << 3),
+ SVGA3D_TEX_PROJECTED = (1 << 15),
+} SVGA3dTexTransformFlags;
+
+typedef enum {
+ SVGA3D_TEXCOORD_GEN_OFF = 0,
+ SVGA3D_TEXCOORD_GEN_EYE_POSITION = 1,
+ SVGA3D_TEXCOORD_GEN_EYE_NORMAL = 2,
+ SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
+ SVGA3D_TEXCOORD_GEN_SPHERE = 4,
+ SVGA3D_TEXCOORD_GEN_MAX
+} SVGA3dTextureCoordGen;
+
+/*
+ * Texture argument constants for texture combiner
+ */
+typedef enum {
+ SVGA3D_TA_INVALID = 0,
+ SVGA3D_TA_TFACTOR = 1,
+ SVGA3D_TA_PREVIOUS = 2,
+ SVGA3D_TA_DIFFUSE = 3,
+ SVGA3D_TA_TEXTURE = 4,
+ SVGA3D_TA_SPECULAR = 5,
+ SVGA3D_TA_CONSTANT = 6,
+ SVGA3D_TA_MAX
+} SVGA3dTextureArgData;
+
+#define SVGA3D_TM_MASK_LEN 4
+
+/* Modifiers for texture argument constants defined above. */
+typedef enum {
+ SVGA3D_TM_NONE = 0,
+ SVGA3D_TM_ALPHA = (1 << SVGA3D_TM_MASK_LEN),
+ SVGA3D_TM_ONE_MINUS = (2 << SVGA3D_TM_MASK_LEN),
+} SVGA3dTextureArgModifier;
+
+/*
+ * Vertex declarations
+ *
+ * Notes:
+ *
+ * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
+ * draw with any POSITIONT vertex arrays, the programmable vertex
+ * pipeline will be implicitly disabled. Drawing will take place as if
+ * no vertex shader was bound.
+ */
+
+typedef enum {
+ SVGA3D_DECLUSAGE_POSITION = 0,
+ SVGA3D_DECLUSAGE_BLENDWEIGHT,
+ SVGA3D_DECLUSAGE_BLENDINDICES,
+ SVGA3D_DECLUSAGE_NORMAL,
+ SVGA3D_DECLUSAGE_PSIZE,
+ SVGA3D_DECLUSAGE_TEXCOORD,
+ SVGA3D_DECLUSAGE_TANGENT,
+ SVGA3D_DECLUSAGE_BINORMAL,
+ SVGA3D_DECLUSAGE_TESSFACTOR,
+ SVGA3D_DECLUSAGE_POSITIONT,
+ SVGA3D_DECLUSAGE_COLOR,
+ SVGA3D_DECLUSAGE_FOG,
+ SVGA3D_DECLUSAGE_DEPTH,
+ SVGA3D_DECLUSAGE_SAMPLE,
+ SVGA3D_DECLUSAGE_MAX
+} SVGA3dDeclUsage;
+
+typedef enum {
+ SVGA3D_DECLMETHOD_DEFAULT = 0,
+ SVGA3D_DECLMETHOD_PARTIALU,
+ SVGA3D_DECLMETHOD_PARTIALV,
+ SVGA3D_DECLMETHOD_CROSSUV, /* Normal */
+ SVGA3D_DECLMETHOD_UV,
+ SVGA3D_DECLMETHOD_LOOKUP, /* Lookup a displacement map */
+ SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement */
+ /* map */
+} SVGA3dDeclMethod;
+
+typedef enum {
+ SVGA3D_DECLTYPE_FLOAT1 = 0,
+ SVGA3D_DECLTYPE_FLOAT2 = 1,
+ SVGA3D_DECLTYPE_FLOAT3 = 2,
+ SVGA3D_DECLTYPE_FLOAT4 = 3,
+ SVGA3D_DECLTYPE_D3DCOLOR = 4,
+ SVGA3D_DECLTYPE_UBYTE4 = 5,
+ SVGA3D_DECLTYPE_SHORT2 = 6,
+ SVGA3D_DECLTYPE_SHORT4 = 7,
+ SVGA3D_DECLTYPE_UBYTE4N = 8,
+ SVGA3D_DECLTYPE_SHORT2N = 9,
+ SVGA3D_DECLTYPE_SHORT4N = 10,
+ SVGA3D_DECLTYPE_USHORT2N = 11,
+ SVGA3D_DECLTYPE_USHORT4N = 12,
+ SVGA3D_DECLTYPE_UDEC3 = 13,
+ SVGA3D_DECLTYPE_DEC3N = 14,
+ SVGA3D_DECLTYPE_FLOAT16_2 = 15,
+ SVGA3D_DECLTYPE_FLOAT16_4 = 16,
+ SVGA3D_DECLTYPE_MAX,
+} SVGA3dDeclType;
+
+/*
+ * This structure is used for the divisor for geometry instancing;
+ * it's a direct translation of the Direct3D equivalent.
+ */
+typedef union {
+ struct {
+ /*
+ * For index data, this number represents the number of instances to draw.
+ * For instance data, this number represents the number of
+ * instances/vertex in this stream
+ */
+ uint32 count : 30;
+
+ /*
+ * This is 1 if this is supposed to be the data that is repeated for
+ * every instance.
+ */
+ uint32 indexedData : 1;
+
+ /*
+ * This is 1 if this is supposed to be the per-instance data.
+ */
+ uint32 instanceData : 1;
+ };
+
+ uint32 value;
+} SVGA3dVertexDivisor;
+
+typedef enum {
+ /*
+ * SVGA3D_PRIMITIVE_INVALID is a valid primitive type.
+ *
+ * List MIN second so debuggers will think INVALID is
+ * the correct name.
+ */
+ SVGA3D_PRIMITIVE_INVALID = 0,
+ SVGA3D_PRIMITIVE_MIN = 0,
+ SVGA3D_PRIMITIVE_TRIANGLELIST = 1,
+ SVGA3D_PRIMITIVE_POINTLIST = 2,
+ SVGA3D_PRIMITIVE_LINELIST = 3,
+ SVGA3D_PRIMITIVE_LINESTRIP = 4,
+ SVGA3D_PRIMITIVE_TRIANGLESTRIP = 5,
+ SVGA3D_PRIMITIVE_TRIANGLEFAN = 6,
+ SVGA3D_PRIMITIVE_LINELIST_ADJ = 7,
+ SVGA3D_PRIMITIVE_PREDX_MAX = 7,
+ SVGA3D_PRIMITIVE_LINESTRIP_ADJ = 8,
+ SVGA3D_PRIMITIVE_TRIANGLELIST_ADJ = 9,
+ SVGA3D_PRIMITIVE_TRIANGLESTRIP_ADJ = 10,
+ SVGA3D_PRIMITIVE_MAX
+} SVGA3dPrimitiveType;
+
+typedef enum {
+ SVGA3D_COORDINATE_INVALID = 0,
+ SVGA3D_COORDINATE_LEFTHANDED = 1,
+ SVGA3D_COORDINATE_RIGHTHANDED = 2,
+ SVGA3D_COORDINATE_MAX
+} SVGA3dCoordinateType;
+
+typedef enum {
+ SVGA3D_TRANSFORM_INVALID = 0,
+ SVGA3D_TRANSFORM_WORLD = 1,
+ SVGA3D_TRANSFORM_MIN = 1,
+ SVGA3D_TRANSFORM_VIEW = 2,
+ SVGA3D_TRANSFORM_PROJECTION = 3,
+ SVGA3D_TRANSFORM_TEXTURE0 = 4,
+ SVGA3D_TRANSFORM_TEXTURE1 = 5,
+ SVGA3D_TRANSFORM_TEXTURE2 = 6,
+ SVGA3D_TRANSFORM_TEXTURE3 = 7,
+ SVGA3D_TRANSFORM_TEXTURE4 = 8,
+ SVGA3D_TRANSFORM_TEXTURE5 = 9,
+ SVGA3D_TRANSFORM_TEXTURE6 = 10,
+ SVGA3D_TRANSFORM_TEXTURE7 = 11,
+ SVGA3D_TRANSFORM_WORLD1 = 12,
+ SVGA3D_TRANSFORM_WORLD2 = 13,
+ SVGA3D_TRANSFORM_WORLD3 = 14,
+ SVGA3D_TRANSFORM_MAX
+} SVGA3dTransformType;
+
+typedef enum {
+ SVGA3D_LIGHTTYPE_INVALID = 0,
+ SVGA3D_LIGHTTYPE_MIN = 1,
+ SVGA3D_LIGHTTYPE_POINT = 1,
+ SVGA3D_LIGHTTYPE_SPOT1 = 2, /* 1-cone, in degrees */
+ SVGA3D_LIGHTTYPE_SPOT2 = 3, /* 2-cone, in radians */
+ SVGA3D_LIGHTTYPE_DIRECTIONAL = 4,
+ SVGA3D_LIGHTTYPE_MAX
+} SVGA3dLightType;
+
+typedef enum {
+ SVGA3D_CUBEFACE_POSX = 0,
+ SVGA3D_CUBEFACE_NEGX = 1,
+ SVGA3D_CUBEFACE_POSY = 2,
+ SVGA3D_CUBEFACE_NEGY = 3,
+ SVGA3D_CUBEFACE_POSZ = 4,
+ SVGA3D_CUBEFACE_NEGZ = 5,
+} SVGA3dCubeFace;
+
+typedef enum {
+ SVGA3D_SHADERTYPE_INVALID = 0,
+ SVGA3D_SHADERTYPE_MIN = 1,
+ SVGA3D_SHADERTYPE_VS = 1,
+ SVGA3D_SHADERTYPE_PS = 2,
+ SVGA3D_SHADERTYPE_PREDX_MAX = 3,
+ SVGA3D_SHADERTYPE_GS = 3,
+ SVGA3D_SHADERTYPE_DX10_MAX = 4,
+ SVGA3D_SHADERTYPE_HS = 4,
+ SVGA3D_SHADERTYPE_DS = 5,
+ SVGA3D_SHADERTYPE_CS = 6,
+ SVGA3D_SHADERTYPE_MAX = 7
+} SVGA3dShaderType;
+
+#define SVGA3D_NUM_SHADERTYPE_PREDX \
+ (SVGA3D_SHADERTYPE_PREDX_MAX - SVGA3D_SHADERTYPE_MIN)
+
+#define SVGA3D_NUM_SHADERTYPE_DX10 \
+ (SVGA3D_SHADERTYPE_DX10_MAX - SVGA3D_SHADERTYPE_MIN)
+
+#define SVGA3D_NUM_SHADERTYPE \
+ (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
+
+typedef enum {
+ SVGA3D_CONST_TYPE_MIN = 0,
+ SVGA3D_CONST_TYPE_FLOAT = 0,
+ SVGA3D_CONST_TYPE_INT = 1,
+ SVGA3D_CONST_TYPE_BOOL = 2,
+ SVGA3D_CONST_TYPE_MAX = 3,
+} SVGA3dShaderConstType;
+
+/*
+ * Register limits for shader consts.
+ */
+#define SVGA3D_CONSTREG_MAX 256
+#define SVGA3D_CONSTINTREG_MAX 16
+#define SVGA3D_CONSTBOOLREG_MAX 16
+
+typedef enum {
+ SVGA3D_STRETCH_BLT_POINT = 0,
+ SVGA3D_STRETCH_BLT_LINEAR = 1,
+ SVGA3D_STRETCH_BLT_MAX
+} SVGA3dStretchBltMode;
+
+typedef enum {
+ SVGA3D_QUERYTYPE_INVALID = ((uint8)-1),
+ SVGA3D_QUERYTYPE_MIN = 0,
+ SVGA3D_QUERYTYPE_OCCLUSION = 0,
+ SVGA3D_QUERYTYPE_TIMESTAMP = 1,
+ SVGA3D_QUERYTYPE_TIMESTAMPDISJOINT = 2,
+ SVGA3D_QUERYTYPE_PIPELINESTATS = 3,
+ SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE = 4,
+ SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS = 5,
+ SVGA3D_QUERYTYPE_STREAMOVERFLOWPREDICATE = 6,
+ SVGA3D_QUERYTYPE_OCCLUSION64 = 7,
+ SVGA3D_QUERYTYPE_EVENT = 8,
+ SVGA3D_QUERYTYPE_DX10_MAX = 9,
+ SVGA3D_QUERYTYPE_SOSTATS_STREAM0 = 9,
+ SVGA3D_QUERYTYPE_SOSTATS_STREAM1 = 10,
+ SVGA3D_QUERYTYPE_SOSTATS_STREAM2 = 11,
+ SVGA3D_QUERYTYPE_SOSTATS_STREAM3 = 12,
+ SVGA3D_QUERYTYPE_SOP_STREAM0 = 13,
+ SVGA3D_QUERYTYPE_SOP_STREAM1 = 14,
+ SVGA3D_QUERYTYPE_SOP_STREAM2 = 15,
+ SVGA3D_QUERYTYPE_SOP_STREAM3 = 16,
+ SVGA3D_QUERYTYPE_MAX
+} SVGA3dQueryType;
+
+typedef uint8 SVGA3dQueryTypeUint8;
+
+#define SVGA3D_NUM_QUERYTYPE (SVGA3D_QUERYTYPE_MAX - SVGA3D_QUERYTYPE_MIN)
+
+/*
+ * This is the maximum number of queries per context that can be active
+ * simultaneously between a beginQuery and endQuery.
+ */
+#define SVGA3D_MAX_QUERY 64
+
+/*
+ * Query result buffer formats
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 samplesRendered;
+}
+#include "vmware_pack_end.h"
+SVGADXOcclusionQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 passed;
+}
+#include "vmware_pack_end.h"
+SVGADXEventQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint64 timestamp;
+}
+#include "vmware_pack_end.h"
+SVGADXTimestampQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint64 realFrequency;
+ uint32 disjoint;
+}
+#include "vmware_pack_end.h"
+SVGADXTimestampDisjointQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint64 inputAssemblyVertices;
+ uint64 inputAssemblyPrimitives;
+ uint64 vertexShaderInvocations;
+ uint64 geometryShaderInvocations;
+ uint64 geometryShaderPrimitives;
+ uint64 clipperInvocations;
+ uint64 clipperPrimitives;
+ uint64 pixelShaderInvocations;
+ uint64 hullShaderInvocations;
+ uint64 domainShaderInvocations;
+ uint64 computeShaderInvocations;
+}
+#include "vmware_pack_end.h"
+SVGADXPipelineStatisticsQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 anySamplesRendered;
+}
+#include "vmware_pack_end.h"
+SVGADXOcclusionPredicateQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint64 numPrimitivesWritten;
+ uint64 numPrimitivesRequired;
+}
+#include "vmware_pack_end.h"
+SVGADXStreamOutStatisticsQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 overflowed;
+}
+#include "vmware_pack_end.h"
+SVGADXStreamOutPredicateQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint64 samplesRendered;
+}
+#include "vmware_pack_end.h"
+SVGADXOcclusion64QueryResult;
+
+/*
+ * SVGADXQueryResultUnion is not intended for use in the protocol, but is
+ * very helpful when working with queries generically.
+ */
+typedef
+#include "vmware_pack_begin.h"
+union SVGADXQueryResultUnion {
+ SVGADXOcclusionQueryResult occ;
+ SVGADXEventQueryResult event;
+ SVGADXTimestampQueryResult ts;
+ SVGADXTimestampDisjointQueryResult tsDisjoint;
+ SVGADXPipelineStatisticsQueryResult pipelineStats;
+ SVGADXOcclusionPredicateQueryResult occPred;
+ SVGADXStreamOutStatisticsQueryResult soStats;
+ SVGADXStreamOutPredicateQueryResult soPred;
+ SVGADXOcclusion64QueryResult occ64;
+}
+#include "vmware_pack_end.h"
+SVGADXQueryResultUnion;
+
+
+typedef enum {
+ SVGA3D_QUERYSTATE_PENDING = 0, /* Query is not finished yet */
+ SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully */
+ SVGA3D_QUERYSTATE_FAILED = 2, /* Completed unsuccessfully */
+ SVGA3D_QUERYSTATE_NEW = 3, /* Never submitted (guest only) */
+} SVGA3dQueryState;
+
+typedef enum {
+ SVGA3D_WRITE_HOST_VRAM = 1,
+ SVGA3D_READ_HOST_VRAM = 2,
+} SVGA3dTransferType;
+
+typedef enum {
+ SVGA3D_LOGICOP_INVALID = 0,
+ SVGA3D_LOGICOP_MIN = 1,
+ SVGA3D_LOGICOP_COPY = 1,
+ SVGA3D_LOGICOP_NOT = 2,
+ SVGA3D_LOGICOP_AND = 3,
+ SVGA3D_LOGICOP_OR = 4,
+ SVGA3D_LOGICOP_XOR = 5,
+ SVGA3D_LOGICOP_NXOR = 6,
+ SVGA3D_LOGICOP_ROP3MIN = 30, /* 7-29 are reserved for future logic ops. */
+ SVGA3D_LOGICOP_ROP3MAX = (SVGA3D_LOGICOP_ROP3MIN + 255),
+ SVGA3D_LOGICOP_MAX = (SVGA3D_LOGICOP_ROP3MAX + 1),
+} SVGA3dLogicOp;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ union {
+ struct {
+ uint16 function; /* SVGA3dFogFunction */
+ uint8 type; /* SVGA3dFogType */
+ uint8 base; /* SVGA3dFogBase */
+ };
+ uint32 uintValue;
+ };
+}
+#include "vmware_pack_end.h"
+SVGA3dFogMode;
+
+/*
+ * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
+ * is a surface ID as well as face/mipmap indices.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dSurfaceImageId {
+ uint32 sid;
+ uint32 face;
+ uint32 mipmap;
+}
+#include "vmware_pack_end.h"
+SVGA3dSurfaceImageId;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 width;
+ uint32 height;
+ uint32 depth;
+}
+#include "vmware_pack_end.h"
+SVGA3dSize;
+
+/*
+ * Guest-backed objects definitions.
+ */
+typedef enum {
+ SVGA_OTABLE_MOB = 0,
+ SVGA_OTABLE_MIN = 0,
+ SVGA_OTABLE_SURFACE = 1,
+ SVGA_OTABLE_CONTEXT = 2,
+ SVGA_OTABLE_SHADER = 3,
+ SVGA_OTABLE_SCREENTARGET = 4,
+
+ SVGA_OTABLE_DX9_MAX = 5,
+
+ SVGA_OTABLE_DXCONTEXT = 5,
+ SVGA_OTABLE_MAX = 6
+} SVGAOTableType;
+
+/*
+ * Deprecated.
+ */
+#define SVGA_OTABLE_COUNT 4
+
+typedef enum {
+ SVGA_COTABLE_MIN = 0,
+ SVGA_COTABLE_RTVIEW = 0,
+ SVGA_COTABLE_DSVIEW = 1,
+ SVGA_COTABLE_SRVIEW = 2,
+ SVGA_COTABLE_ELEMENTLAYOUT = 3,
+ SVGA_COTABLE_BLENDSTATE = 4,
+ SVGA_COTABLE_DEPTHSTENCIL = 5,
+ SVGA_COTABLE_RASTERIZERSTATE = 6,
+ SVGA_COTABLE_SAMPLER = 7,
+ SVGA_COTABLE_STREAMOUTPUT = 8,
+ SVGA_COTABLE_DXQUERY = 9,
+ SVGA_COTABLE_DXSHADER = 10,
+ SVGA_COTABLE_DX10_MAX = 11,
+ SVGA_COTABLE_UAVIEW = 11,
+ SVGA_COTABLE_MAX
+} SVGACOTableType;
+
+/*
+ * The largest size (number of entries) allowed in a COTable.
+ */
+#define SVGA_COTABLE_MAX_IDS (MAX_UINT16 - 2)
+
+typedef enum SVGAMobFormat {
+ SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID,
+ SVGA3D_MOBFMT_PTDEPTH_0 = 0,
+ SVGA3D_MOBFMT_MIN = 0,
+ SVGA3D_MOBFMT_PTDEPTH_1 = 1,
+ SVGA3D_MOBFMT_PTDEPTH_2 = 2,
+ SVGA3D_MOBFMT_RANGE = 3,
+ SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
+ SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
+ SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
+ SVGA3D_MOBFMT_PREDX_MAX = 7,
+ SVGA3D_MOBFMT_EMPTY = 7,
+ SVGA3D_MOBFMT_MAX,
+} SVGAMobFormat;
+
+#define SVGA3D_MOB_EMPTY_BASE 1
+
+#endif /* _SVGA3D_TYPES_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_escape.h b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
index 8e8d9682e018..884b1d1fb85f 100644
--- a/drivers/gpu/drm/vmwgfx/svga_escape.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
@@ -1,5 +1,5 @@
/**********************************************************
- * Copyright 2007-2009 VMware, Inc. All rights reserved.
+ * Copyright 2007-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
diff --git a/drivers/gpu/drm/vmwgfx/svga_overlay.h b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
index f38416fcb046..faf6d9b2b891 100644
--- a/drivers/gpu/drm/vmwgfx/svga_overlay.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
@@ -1,5 +1,5 @@
/**********************************************************
- * Copyright 2007-2009 VMware, Inc. All rights reserved.
+ * Copyright 2007-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -152,19 +152,17 @@ VMwareVideoGetAttributes(const SVGAOverlayFormat format, /* IN */
switch (format) {
case VMWARE_FOURCC_YV12:
*height = (*height + 1) & ~1;
- *size = (*width + 3) & ~3;
+ *size = (*width) * (*height);
if (pitches) {
- pitches[0] = *size;
+ pitches[0] = *width;
}
- *size *= *height;
-
if (offsets) {
offsets[1] = *size;
}
- tmp = ((*width >> 1) + 3) & ~3;
+ tmp = *width >> 1;
if (pitches) {
pitches[1] = pitches[2] = tmp;
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
index e4259c2c1acc..6e0ccb70a700 100644
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -1,5 +1,5 @@
/**********************************************************
- * Copyright 1998-2009 VMware, Inc. All rights reserved.
+ * Copyright 1998-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -31,20 +31,38 @@
#ifndef _SVGA_REG_H_
#define _SVGA_REG_H_
+#include <linux/pci_ids.h>
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+
+#define INCLUDE_ALLOW_VMCORE
+#include "includeCheck.h"
+
+#include "svga_types.h"
/*
- * PCI device IDs.
+ * SVGA_REG_ENABLE bit definitions.
*/
-#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405
+typedef enum {
+ SVGA_REG_ENABLE_DISABLE = 0,
+ SVGA_REG_ENABLE_ENABLE = (1 << 0),
+ SVGA_REG_ENABLE_HIDE = (1 << 1),
+} SvgaRegEnable;
+
+typedef uint32 SVGAMobId;
/*
- * SVGA_REG_ENABLE bit definitions.
+ * Arbitrary and meaningless limits. Please ignore these when writing
+ * new drivers.
*/
-#define SVGA_REG_ENABLE_DISABLE 0
-#define SVGA_REG_ENABLE_ENABLE 1
-#define SVGA_REG_ENABLE_HIDE 2
-#define SVGA_REG_ENABLE_ENABLE_HIDE (SVGA_REG_ENABLE_ENABLE |\
- SVGA_REG_ENABLE_HIDE)
+#define SVGA_MAX_WIDTH 2560
+#define SVGA_MAX_HEIGHT 1600
+
+
+#define SVGA_MAX_BITS_PER_PIXEL 32
+#define SVGA_MAX_DEPTH 24
+#define SVGA_MAX_DISPLAYS 10
/*
* Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
@@ -57,14 +75,9 @@
#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 /* Put the cursor back in the framebuffer so the user can see it */
/*
- * The maximum framebuffer size that can traced for e.g. guests in VESA mode.
- * The changeMap in the monitor is proportional to this number. Therefore, we'd
- * like to keep it as small as possible to reduce monitor overhead (using
- * SVGA_VRAM_MAX_SIZE for this increases the size of the shared area by over
- * 4k!).
- *
- * NB: For compatibility reasons, this value must be greater than 0xff0000.
- * See bug 335072.
+ * The maximum framebuffer size that can traced for guests unless the
+ * SVGA_CAP_GBOBJECTS is set in SVGA_REG_CAPABILITIES. In that case
+ * the full framebuffer can be traced independent of this limit.
*/
#define SVGA_FB_MAX_TRACEABLE_SIZE 0x1000000
@@ -106,6 +119,8 @@
#define SVGA_IRQFLAG_ANY_FENCE 0x1 /* Any fence was passed */
#define SVGA_IRQFLAG_FIFO_PROGRESS 0x2 /* Made forward progress in the FIFO */
#define SVGA_IRQFLAG_FENCE_GOAL 0x4 /* SVGA_FIFO_FENCE_GOAL reached */
+#define SVGA_IRQFLAG_COMMAND_BUFFER 0x8 /* Command buffer completed */
+#define SVGA_IRQFLAG_ERROR 0x10 /* Error while processing commands */
/*
* Registers
@@ -131,6 +146,7 @@ enum {
SVGA_REG_FB_SIZE = 16,
/* ID 0 implementation only had the above registers, then the palette */
+ SVGA_REG_ID_0_TOP = 17,
SVGA_REG_CAPABILITIES = 17,
SVGA_REG_MEM_START = 18, /* (Deprecated) */
@@ -171,7 +187,7 @@ enum {
SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */
SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */
- SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */
+ SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Sugested limit on mob mem */
SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */
SVGA_REG_CMD_PREPEND_LOW = 53,
SVGA_REG_CMD_PREPEND_HIGH = 54,
@@ -182,7 +198,6 @@ enum {
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
-
SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS
/* Base of scratch registers */
/* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
@@ -190,7 +205,6 @@ enum {
the use of the current SVGA driver. */
};
-
/*
* Guest memory regions (GMRs):
*
@@ -288,17 +302,205 @@ enum {
#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) /* Guest Framebuffer (GFB) */
typedef
+#include "vmware_pack_begin.h"
struct SVGAGuestMemDescriptor {
uint32 ppn;
uint32 numPages;
-} SVGAGuestMemDescriptor;
+}
+#include "vmware_pack_end.h"
+SVGAGuestMemDescriptor;
typedef
+#include "vmware_pack_begin.h"
struct SVGAGuestPtr {
uint32 gmrId;
uint32 offset;
-} SVGAGuestPtr;
+}
+#include "vmware_pack_end.h"
+SVGAGuestPtr;
+
+/*
+ * Register based command buffers --
+ *
+ * Provide an SVGA device interface that allows the guest to submit
+ * command buffers to the SVGA device through an SVGA device register.
+ * The metadata for each command buffer is contained in the
+ * SVGACBHeader structure along with the return status codes.
+ *
+ * The SVGA device supports command buffers if
+ * SVGA_CAP_COMMAND_BUFFERS is set in the device caps register. The
+ * fifo must be enabled for command buffers to be submitted.
+ *
+ * Command buffers are submitted when the guest writing the 64 byte
+ * aligned physical address into the SVGA_REG_COMMAND_LOW and
+ * SVGA_REG_COMMAND_HIGH. SVGA_REG_COMMAND_HIGH contains the upper 32
+ * bits of the physical address. SVGA_REG_COMMAND_LOW contains the
+ * lower 32 bits of the physical address, since the command buffer
+ * headers are required to be 64 byte aligned the lower 6 bits are
+ * used for the SVGACBContext value. Writing to SVGA_REG_COMMAND_LOW
+ * submits the command buffer to the device and queues it for
+ * execution. The SVGA device supports at least
+ * SVGA_CB_MAX_QUEUED_PER_CONTEXT command buffers that can be queued
+ * per context and if that limit is reached the device will write the
+ * status SVGA_CB_STATUS_QUEUE_FULL to the status value of the command
+ * buffer header synchronously and not raise any IRQs.
+ *
+ * It is invalid to submit a command buffer without a valid physical
+ * address and results are undefined.
+ *
+ * The device guarantees that command buffers of size SVGA_CB_MAX_SIZE
+ * will be supported. If a larger command buffer is submitted results
+ * are unspecified and the device will either complete the command
+ * buffer or return an error.
+ *
+ * The device guarantees that any individual command in a command
+ * buffer can be up to SVGA_CB_MAX_COMMAND_SIZE in size which is
+ * enough to fit a 64x64 color-cursor definition. If the command is
+ * too large the device is allowed to process the command or return an
+ * error.
+ *
+ * The device context is a special SVGACBContext that allows for
+ * synchronous register like accesses with the flexibility of
+ * commands. There is a different command set defined by
+ * SVGADeviceContextCmdId. The commands in each command buffer is not
+ * allowed to straddle physical pages.
+ *
+ * The offset field which is available starting with the
+ * SVGA_CAP_CMD_BUFFERS_2 cap bit can be set by the guest to bias the
+ * start of command processing into the buffer. If an error is
+ * encountered the errorOffset will still be relative to the specific
+ * PA, not biased by the offset. When the command buffer is finished
+ * the guest should not read the offset field as there is no guarantee
+ * what it will set to.
+ */
+
+#define SVGA_CB_MAX_SIZE (512 * 1024) /* 512 KB */
+#define SVGA_CB_MAX_QUEUED_PER_CONTEXT 32
+#define SVGA_CB_MAX_COMMAND_SIZE (32 * 1024) /* 32 KB */
+
+#define SVGA_CB_CONTEXT_MASK 0x3f
+typedef enum {
+ SVGA_CB_CONTEXT_DEVICE = 0x3f,
+ SVGA_CB_CONTEXT_0 = 0x0,
+ SVGA_CB_CONTEXT_MAX = 0x1,
+} SVGACBContext;
+
+
+typedef enum {
+ /*
+ * The guest is supposed to write SVGA_CB_STATUS_NONE to the status
+ * field before submitting the command buffer header, the host will
+ * change the value when it is done with the command buffer.
+ */
+ SVGA_CB_STATUS_NONE = 0,
+
+ /*
+ * Written by the host when a command buffer completes successfully.
+ * The device raises an IRQ with SVGA_IRQFLAG_COMMAND_BUFFER unless
+ * the SVGA_CB_FLAG_NO_IRQ flag is set.
+ */
+ SVGA_CB_STATUS_COMPLETED = 1,
+
+ /*
+ * Written by the host synchronously with the command buffer
+ * submission to indicate the command buffer was not submitted. No
+ * IRQ is raised.
+ */
+ SVGA_CB_STATUS_QUEUE_FULL = 2,
+
+ /*
+ * Written by the host when an error was detected parsing a command
+ * in the command buffer, errorOffset is written to contain the
+ * offset to the first byte of the failing command. The device
+ * raises the IRQ with both SVGA_IRQFLAG_ERROR and
+ * SVGA_IRQFLAG_COMMAND_BUFFER. Some of the commands may have been
+ * processed.
+ */
+ SVGA_CB_STATUS_COMMAND_ERROR = 3,
+
+ /*
+ * Written by the host if there is an error parsing the command
+ * buffer header. The device raises the IRQ with both
+ * SVGA_IRQFLAG_ERROR and SVGA_IRQFLAG_COMMAND_BUFFER. The device
+ * did not processes any of the command buffer.
+ */
+ SVGA_CB_STATUS_CB_HEADER_ERROR = 4,
+ /*
+ * Written by the host if the guest requested the host to preempt
+ * the command buffer. The device will not raise any IRQs and the
+ * command buffer was not processed.
+ */
+ SVGA_CB_STATUS_PREEMPTED = 5,
+
+ /*
+ * Written by the host synchronously with the command buffer
+ * submission to indicate the the command buffer was not submitted
+ * due to an error. No IRQ is raised.
+ */
+ SVGA_CB_STATUS_SUBMISSION_ERROR = 6,
+} SVGACBStatus;
+
+typedef enum {
+ SVGA_CB_FLAG_NONE = 0,
+ SVGA_CB_FLAG_NO_IRQ = 1 << 0,
+ SVGA_CB_FLAG_DX_CONTEXT = 1 << 1,
+ SVGA_CB_FLAG_MOB = 1 << 2,
+} SVGACBFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ volatile SVGACBStatus status;
+ volatile uint32 errorOffset;
+ uint64 id;
+ SVGACBFlags flags;
+ uint32 length;
+ union {
+ PA pa;
+ struct {
+ SVGAMobId mobid;
+ uint32 mobOffset;
+ } mob;
+ } ptr;
+ uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise */
+ uint32 dxContext; /* Valid if DX_CONTEXT flag set, must be zero otherwise */
+ uint32 mustBeZero[6];
+}
+#include "vmware_pack_end.h"
+SVGACBHeader;
+
+typedef enum {
+ SVGA_DC_CMD_NOP = 0,
+ SVGA_DC_CMD_START_STOP_CONTEXT = 1,
+ SVGA_DC_CMD_PREEMPT = 2,
+ SVGA_DC_CMD_MAX = 3,
+ SVGA_DC_CMD_FORCE_UINT = MAX_UINT32,
+} SVGADeviceContextCmdId;
+
+typedef struct {
+ uint32 enable;
+ SVGACBContext context;
+} SVGADCCmdStartStop;
+
+/*
+ * SVGADCCmdPreempt --
+ *
+ * This command allows the guest to request that all command buffers
+ * on the specified context be preempted that can be. After execution
+ * of this command all command buffers that were preempted will
+ * already have SVGA_CB_STATUS_PREEMPTED written into the status
+ * field. The device might still be processing a command buffer,
+ * assuming execution of it started before the preemption request was
+ * received. Specifying the ignoreIDZero flag to TRUE will cause the
+ * device to not preempt command buffers with the id field in the
+ * command buffer header set to zero.
+ */
+
+typedef struct {
+ SVGACBContext context;
+ uint32 ignoreIDZero;
+} SVGADCCmdPreempt;
/*
* SVGAGMRImageFormat --
@@ -320,13 +522,12 @@ struct SVGAGuestPtr {
*
*/
-typedef
-struct SVGAGMRImageFormat {
+typedef struct SVGAGMRImageFormat {
union {
struct {
uint32 bitsPerPixel : 8;
uint32 colorDepth : 8;
- uint32 reserved : 16; /* Must be zero */
+ uint32 reserved : 16; /* Must be zero */
};
uint32 value;
@@ -334,6 +535,7 @@ struct SVGAGMRImageFormat {
} SVGAGMRImageFormat;
typedef
+#include "vmware_pack_begin.h"
struct SVGAGuestImage {
SVGAGuestPtr ptr;
@@ -353,7 +555,9 @@ struct SVGAGuestImage {
* assuming each row of blocks is tightly packed.
*/
uint32 pitch;
-} SVGAGuestImage;
+}
+#include "vmware_pack_end.h"
+SVGAGuestImage;
/*
* SVGAColorBGRX --
@@ -363,14 +567,13 @@ struct SVGAGuestImage {
* GMRFB state.
*/
-typedef
-struct SVGAColorBGRX {
+typedef struct SVGAColorBGRX {
union {
struct {
uint32 b : 8;
uint32 g : 8;
uint32 r : 8;
- uint32 x : 8; /* Unused */
+ uint32 x : 8; /* Unused */
};
uint32 value;
@@ -392,26 +595,49 @@ struct SVGAColorBGRX {
*/
typedef
-struct SVGASignedRect {
+#include "vmware_pack_begin.h"
+struct {
int32 left;
int32 top;
int32 right;
int32 bottom;
-} SVGASignedRect;
+}
+#include "vmware_pack_end.h"
+SVGASignedRect;
typedef
-struct SVGASignedPoint {
+#include "vmware_pack_begin.h"
+struct {
int32 x;
int32 y;
-} SVGASignedPoint;
+}
+#include "vmware_pack_end.h"
+SVGASignedPoint;
/*
- * Capabilities
+ * SVGA Device Capabilities
+ *
+ * Note the holes in the bitfield. Missing bits have been deprecated,
+ * and must not be reused. Those capabilities will never be reported
+ * by new versions of the SVGA device.
+ *
+ * XXX: Add longer descriptions for each capability, including a list
+ * of the new features that each capability provides.
*
- * Note the holes in the bitfield. Missing bits have been deprecated,
- * and must not be reused. Those capabilities will never be reported
- * by new versions of the SVGA device.
+ * SVGA_CAP_IRQMASK --
+ * Provides device interrupts. Adds device register SVGA_REG_IRQMASK
+ * to set interrupt mask and direct I/O port SVGA_IRQSTATUS_PORT to
+ * set/clear pending interrupts.
+ *
+ * SVGA_CAP_GMR --
+ * Provides synchronous mapping of guest memory regions (GMR).
+ * Adds device registers SVGA_REG_GMR_ID, SVGA_REG_GMR_DESCRIPTOR,
+ * SVGA_REG_GMR_MAX_IDS, and SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH.
+ *
+ * SVGA_CAP_TRACES --
+ * Allows framebuffer trace-based updates even when FIFO is enabled.
+ * Adds device register SVGA_REG_TRACES.
*
* SVGA_CAP_GMR2 --
* Provides asynchronous commands to define and remap guest memory
@@ -421,21 +647,39 @@ struct SVGASignedPoint {
* SVGA_CAP_SCREEN_OBJECT_2 --
* Allow screen object support, and require backing stores from the
* guest for each screen object.
+ *
+ * SVGA_CAP_COMMAND_BUFFERS --
+ * Enable register based command buffer submission.
+ *
+ * SVGA_CAP_DEAD1 --
+ * This cap was incorrectly used by old drivers and should not be
+ * reused.
+ *
+ * SVGA_CAP_CMD_BUFFERS_2 --
+ * Enable support for the prepend command buffer submision
+ * registers. SVGA_REG_CMD_PREPEND_LOW and
+ * SVGA_REG_CMD_PREPEND_HIGH.
+ *
+ * SVGA_CAP_GBOBJECTS --
+ * Enable guest-backed objects and surfaces.
+ *
+ * SVGA_CAP_CMD_BUFFERS_3 --
+ * Enable support for command buffers in a mob.
*/
#define SVGA_CAP_NONE 0x00000000
#define SVGA_CAP_RECT_COPY 0x00000002
#define SVGA_CAP_CURSOR 0x00000020
-#define SVGA_CAP_CURSOR_BYPASS 0x00000040 /* Legacy (Use Cursor Bypass 3 instead) */
-#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 /* Legacy (Use Cursor Bypass 3 instead) */
+#define SVGA_CAP_CURSOR_BYPASS 0x00000040
+#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080
#define SVGA_CAP_8BIT_EMULATION 0x00000100
#define SVGA_CAP_ALPHA_CURSOR 0x00000200
#define SVGA_CAP_3D 0x00004000
#define SVGA_CAP_EXTENDED_FIFO 0x00008000
-#define SVGA_CAP_MULTIMON 0x00010000 /* Legacy multi-monitor support */
+#define SVGA_CAP_MULTIMON 0x00010000
#define SVGA_CAP_PITCHLOCK 0x00020000
#define SVGA_CAP_IRQMASK 0x00040000
-#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 /* Legacy multi-monitor support */
+#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000
#define SVGA_CAP_GMR 0x00100000
#define SVGA_CAP_TRACES 0x00200000
#define SVGA_CAP_GMR2 0x00400000
@@ -444,6 +688,33 @@ struct SVGASignedPoint {
#define SVGA_CAP_DEAD1 0x02000000
#define SVGA_CAP_CMD_BUFFERS_2 0x04000000
#define SVGA_CAP_GBOBJECTS 0x08000000
+#define SVGA_CAP_DX 0x10000000
+
+#define SVGA_CAP_CMD_RESERVED 0x80000000
+
+
+/*
+ * The Guest can optionally read some SVGA device capabilities through
+ * the backdoor with command BDOOR_CMD_GET_SVGA_CAPABILITIES before
+ * the SVGA device is initialized. The type of capability the guest
+ * is requesting from the SVGABackdoorCapType enum should be placed in
+ * the upper 16 bits of the backdoor command id (ECX). On success the
+ * the value of EBX will be set to BDOOR_MAGIC and EAX will be set to
+ * the requested capability. If the command is not supported then EBX
+ * will be left unchanged and EAX will be set to -1. Because it is
+ * possible that -1 is the value of the requested cap the correct way
+ * to check if the command was successful is to check if EBX was changed
+ * to BDOOR_MAGIC making sure to initialize the register to something
+ * else first.
+ */
+
+typedef enum {
+ SVGABackdoorCapDeviceCaps = 0,
+ SVGABackdoorCapFifoCaps = 1,
+ SVGABackdoorCap3dHWVersion = 2,
+ SVGABackdoorCapMax = 3,
+} SVGABackdoorCapType;
+
/*
* FIFO register indices.
@@ -883,7 +1154,8 @@ enum {
SVGA_VIDEO_PITCH_2,
SVGA_VIDEO_PITCH_3,
SVGA_VIDEO_DATA_GMRID, /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */
- SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords (SVGA_ID_INVALID) */
+ SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords */
+ /* (SVGA_ID_INVALID) */
SVGA_VIDEO_NUM_REGS
};
@@ -896,7 +1168,9 @@ enum {
* video frame to be displayed.
*/
-typedef struct SVGAOverlayUnit {
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAOverlayUnit {
uint32 enabled;
uint32 flags;
uint32 dataOffset;
@@ -916,7 +1190,27 @@ typedef struct SVGAOverlayUnit {
uint32 pitches[3];
uint32 dataGMRId;
uint32 dstScreenId;
-} SVGAOverlayUnit;
+}
+#include "vmware_pack_end.h"
+SVGAOverlayUnit;
+
+
+/*
+ * Guest display topology
+ *
+ * XXX: This structure is not part of the SVGA device's interface, and
+ * doesn't really belong here.
+ */
+#define SVGA_INVALID_DISPLAY_ID ((uint32)-1)
+
+typedef struct SVGADisplayTopology {
+ uint16 displayId;
+ uint16 isPrimary;
+ uint32 width;
+ uint32 height;
+ uint32 positionX;
+ uint32 positionY;
+} SVGADisplayTopology;
/*
@@ -951,10 +1245,10 @@ typedef struct SVGAOverlayUnit {
* value of zero means no cloning should happen.
*/
-#define SVGA_SCREEN_MUST_BE_SET (1 << 0) /* Must be set or results undefined */
+#define SVGA_SCREEN_MUST_BE_SET (1 << 0)
#define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */
-#define SVGA_SCREEN_IS_PRIMARY (1 << 1) /* Guest considers this screen to be 'primary' */
-#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) /* Guest is running a fullscreen app here */
+#define SVGA_SCREEN_IS_PRIMARY (1 << 1)
+#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2)
/*
* Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When the screen is
@@ -977,7 +1271,8 @@ typedef struct SVGAOverlayUnit {
#define SVGA_SCREEN_BLANKING (1 << 4)
typedef
-struct SVGAScreenObject {
+#include "vmware_pack_begin.h"
+struct {
uint32 structSize; /* sizeof(SVGAScreenObject) */
uint32 id;
uint32 flags;
@@ -995,8 +1290,17 @@ struct SVGAScreenObject {
* with SVGA_FIFO_CAP_SCREEN_OBJECT.
*/
SVGAGuestImage backingStore;
+
+ /*
+ * The cloneCount field is treated as a hint from the guest that
+ * the user wants this display to be cloned, cloneCount times.
+ *
+ * A value of zero means no cloning should happen.
+ */
uint32 cloneCount;
-} SVGAScreenObject;
+}
+#include "vmware_pack_end.h"
+SVGAScreenObject;
/*
@@ -1009,7 +1313,7 @@ struct SVGAScreenObject {
* Note the holes in the command ID numbers: These commands have been
* deprecated, and the old IDs must not be reused.
*
- * Command IDs from 1000 to 1999 are reserved for use by the SVGA3D
+ * Command IDs from 1000 to 2999 are reserved for use by the SVGA3D
* protocol.
*
* Each command's parameters are described by the comments and
@@ -1020,6 +1324,7 @@ typedef enum {
SVGA_CMD_INVALID_CMD = 0,
SVGA_CMD_UPDATE = 1,
SVGA_CMD_RECT_COPY = 3,
+ SVGA_CMD_RECT_ROP_COPY = 14,
SVGA_CMD_DEFINE_CURSOR = 19,
SVGA_CMD_DEFINE_ALPHA_CURSOR = 22,
SVGA_CMD_UPDATE_VERBOSE = 25,
@@ -1035,9 +1340,14 @@ typedef enum {
SVGA_CMD_ANNOTATION_COPY = 40,
SVGA_CMD_DEFINE_GMR2 = 41,
SVGA_CMD_REMAP_GMR2 = 42,
+ SVGA_CMD_DEAD = 43,
+ SVGA_CMD_DEAD_2 = 44,
+ SVGA_CMD_NOP = 45,
+ SVGA_CMD_NOP_ERROR = 46,
SVGA_CMD_MAX
} SVGAFifoCmdId;
+#define SVGA_CMD_MAX_DATASIZE (256 * 1024)
#define SVGA_CMD_MAX_ARGS 64
@@ -1070,12 +1380,15 @@ typedef enum {
*/
typedef
-struct SVGAFifoCmdUpdate {
+#include "vmware_pack_begin.h"
+struct {
uint32 x;
uint32 y;
uint32 width;
uint32 height;
-} SVGAFifoCmdUpdate;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdUpdate;
/*
@@ -1089,14 +1402,44 @@ struct SVGAFifoCmdUpdate {
*/
typedef
-struct SVGAFifoCmdRectCopy {
+#include "vmware_pack_begin.h"
+struct {
+ uint32 srcX;
+ uint32 srcY;
+ uint32 destX;
+ uint32 destY;
+ uint32 width;
+ uint32 height;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdRectCopy;
+
+
+/*
+ * SVGA_CMD_RECT_ROP_COPY --
+ *
+ * Perform a rectangular DMA transfer from one area of the GFB to
+ * another, and copy the result to any screens which intersect it.
+ * The value of ROP may only be SVGA_ROP_COPY, and this command is
+ * only supported for backwards compatibility reasons.
+ *
+ * Availability:
+ * SVGA_CAP_RECT_COPY
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
uint32 srcX;
uint32 srcY;
uint32 destX;
uint32 destY;
uint32 width;
uint32 height;
-} SVGAFifoCmdRectCopy;
+ uint32 rop;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdRectRopCopy;
/*
@@ -1113,7 +1456,8 @@ struct SVGAFifoCmdRectCopy {
*/
typedef
-struct SVGAFifoCmdDefineCursor {
+#include "vmware_pack_begin.h"
+struct {
uint32 id; /* Reserved, must be zero. */
uint32 hotspotX;
uint32 hotspotY;
@@ -1125,7 +1469,9 @@ struct SVGAFifoCmdDefineCursor {
* Followed by scanline data for AND mask, then XOR mask.
* Each scanline is padded to a 32-bit boundary.
*/
-} SVGAFifoCmdDefineCursor;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineCursor;
/*
@@ -1142,14 +1488,17 @@ struct SVGAFifoCmdDefineCursor {
*/
typedef
-struct SVGAFifoCmdDefineAlphaCursor {
+#include "vmware_pack_begin.h"
+struct {
uint32 id; /* Reserved, must be zero. */
uint32 hotspotX;
uint32 hotspotY;
uint32 width;
uint32 height;
/* Followed by scanline data */
-} SVGAFifoCmdDefineAlphaCursor;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineAlphaCursor;
/*
@@ -1165,13 +1514,16 @@ struct SVGAFifoCmdDefineAlphaCursor {
*/
typedef
-struct SVGAFifoCmdUpdateVerbose {
+#include "vmware_pack_begin.h"
+struct {
uint32 x;
uint32 y;
uint32 width;
uint32 height;
uint32 reason;
-} SVGAFifoCmdUpdateVerbose;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdUpdateVerbose;
/*
@@ -1190,14 +1542,17 @@ struct SVGAFifoCmdUpdateVerbose {
#define SVGA_ROP_COPY 0x03
typedef
-struct SVGAFifoCmdFrontRopFill {
+#include "vmware_pack_begin.h"
+struct {
uint32 color; /* In the same format as the GFB */
uint32 x;
uint32 y;
uint32 width;
uint32 height;
uint32 rop; /* Must be SVGA_ROP_COPY */
-} SVGAFifoCmdFrontRopFill;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdFrontRopFill;
/*
@@ -1216,9 +1571,12 @@ struct SVGAFifoCmdFrontRopFill {
*/
typedef
+#include "vmware_pack_begin.h"
struct {
uint32 fence;
-} SVGAFifoCmdFence;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdFence;
/*
@@ -1233,11 +1591,14 @@ struct {
*/
typedef
-struct SVGAFifoCmdEscape {
+#include "vmware_pack_begin.h"
+struct {
uint32 nsid;
uint32 size;
/* followed by 'size' bytes of data */
-} SVGAFifoCmdEscape;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdEscape;
/*
@@ -1267,9 +1628,12 @@ struct SVGAFifoCmdEscape {
*/
typedef
+#include "vmware_pack_begin.h"
struct {
SVGAScreenObject screen; /* Variable-length according to version */
-} SVGAFifoCmdDefineScreen;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineScreen;
/*
@@ -1283,9 +1647,12 @@ struct {
*/
typedef
+#include "vmware_pack_begin.h"
struct {
uint32 screenId;
-} SVGAFifoCmdDestroyScreen;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDestroyScreen;
/*
@@ -1336,11 +1703,14 @@ struct {
*/
typedef
+#include "vmware_pack_begin.h"
struct {
SVGAGuestPtr ptr;
uint32 bytesPerLine;
SVGAGMRImageFormat format;
-} SVGAFifoCmdDefineGMRFB;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineGMRFB;
/*
@@ -1348,19 +1718,10 @@ struct {
*
* This is a guest-to-host blit. It performs a DMA operation to
* copy a rectangular region of pixels from the current GMRFB to
- * one or more Screen Objects.
+ * a ScreenObject.
*
* The destination coordinate may be specified relative to a
- * screen's origin (if a screen ID is specified) or relative to the
- * virtual coordinate system's origin (if the screen ID is
- * SVGA_ID_INVALID). The actual destination may span zero or more
- * screens, in the case of a virtual destination rect or a rect
- * which extends off the edge of the specified screen.
- *
- * This command writes to the screen's "base layer": the underlying
- * framebuffer which exists below any cursor or video overlays. No
- * action is necessary to explicitly hide or update any overlays
- * which exist on top of the updated region.
+ * screen's origin. The provided screen ID must be valid.
*
* The SVGA device is guaranteed to finish reading from the GMRFB
* by the time any subsequent FENCE commands are reached.
@@ -1373,46 +1734,27 @@ struct {
*/
typedef
+#include "vmware_pack_begin.h"
struct {
SVGASignedPoint srcOrigin;
SVGASignedRect destRect;
uint32 destScreenId;
-} SVGAFifoCmdBlitGMRFBToScreen;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdBlitGMRFBToScreen;
/*
* SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
*
* This is a host-to-guest blit. It performs a DMA operation to
- * copy a rectangular region of pixels from a single Screen Object
+ * copy a rectangular region of pixels from a single ScreenObject
* back to the current GMRFB.
*
- * Usage note: This command should be used rarely. It will
- * typically be inefficient, but it is necessary for some types of
- * synchronization between 3D (GPU) and 2D (CPU) rendering into
- * overlapping areas of a screen.
- *
* The source coordinate is specified relative to a screen's
- * origin. The provided screen ID must be valid. If any parameters
+ * origin. The provided screen ID must be valid. If any parameters
* are invalid, the resulting pixel values are undefined.
*
- * This command reads the screen's "base layer". Overlays like
- * video and cursor are not included, but any data which was sent
- * using a blit-to-screen primitive will be available, no matter
- * whether the data's original source was the GMRFB or the 3D
- * acceleration hardware.
- *
- * Note that our guest-to-host blits and host-to-guest blits aren't
- * symmetric in their current implementation. While the parameters
- * are identical, host-to-guest blits are a lot less featureful.
- * They do not support clipping: If the source parameters don't
- * fully fit within a screen, the blit fails. They must originate
- * from exactly one screen. Virtual coordinates are not directly
- * supported.
- *
- * Host-to-guest blits do support the same set of GMRFB formats
- * offered by guest-to-host blits.
- *
* The SVGA device is guaranteed to finish writing to the GMRFB by
* the time any subsequent FENCE commands are reached.
*
@@ -1421,77 +1763,57 @@ struct {
*/
typedef
+#include "vmware_pack_begin.h"
struct {
SVGASignedPoint destOrigin;
SVGASignedRect srcRect;
uint32 srcScreenId;
-} SVGAFifoCmdBlitScreenToGMRFB;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdBlitScreenToGMRFB;
/*
* SVGA_CMD_ANNOTATION_FILL --
*
- * This is a blit annotation. This command stores a small piece of
- * device state which is consumed by the next blit-to-screen
- * command. The state is only cleared by commands which are
- * specifically documented as consuming an annotation. Other
- * commands (such as ESCAPEs for debugging) may intervene between
- * the annotation and its associated blit.
- *
- * This annotation is a promise about the contents of the next
- * blit: The video driver is guaranteeing that all pixels in that
- * blit will have the same value, specified here as a color in
- * SVGAColorBGRX format.
- *
- * The SVGA device can still render the blit correctly even if it
- * ignores this annotation, but the annotation may allow it to
- * perform the blit more efficiently, for example by ignoring the
- * source data and performing a fill in hardware.
- *
- * This annotation is most important for performance when the
- * user's display is being remoted over a network connection.
+ * The annotation commands have been deprecated, should not be used
+ * by new drivers. They used to provide performance hints to the SVGA
+ * device about the content of screen updates, but newer SVGA devices
+ * ignore these.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
typedef
+#include "vmware_pack_begin.h"
struct {
SVGAColorBGRX color;
-} SVGAFifoCmdAnnotationFill;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdAnnotationFill;
/*
* SVGA_CMD_ANNOTATION_COPY --
*
- * This is a blit annotation. See SVGA_CMD_ANNOTATION_FILL for more
- * information about annotations.
- *
- * This annotation is a promise about the contents of the next
- * blit: The video driver is guaranteeing that all pixels in that
- * blit will have the same value as those which already exist at an
- * identically-sized region on the same or a different screen.
- *
- * Note that the source pixels for the COPY in this annotation are
- * sampled before applying the anqnotation's associated blit. They
- * are allowed to overlap with the blit's destination pixels.
- *
- * The copy source rectangle is specified the same way as the blit
- * destination: it can be a rectangle which spans zero or more
- * screens, specified relative to either a screen or to the virtual
- * coordinate system's origin. If the source rectangle includes
- * pixels which are not from exactly one screen, the results are
- * undefined.
+ * The annotation commands have been deprecated, should not be used
+ * by new drivers. They used to provide performance hints to the SVGA
+ * device about the content of screen updates, but newer SVGA devices
+ * ignore these.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
typedef
+#include "vmware_pack_begin.h"
struct {
SVGASignedPoint srcOrigin;
uint32 srcScreenId;
-} SVGAFifoCmdAnnotationCopy;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdAnnotationCopy;
/*
@@ -1504,10 +1826,13 @@ struct {
*/
typedef
+#include "vmware_pack_begin.h"
struct {
uint32 gmrId;
uint32 numPages;
-} SVGAFifoCmdDefineGMR2;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineGMR2;
/*
@@ -1546,6 +1871,7 @@ typedef enum {
} SVGARemapGMR2Flags;
typedef
+#include "vmware_pack_begin.h"
struct {
uint32 gmrId;
SVGARemapGMR2Flags flags;
@@ -1559,6 +1885,52 @@ struct {
* (according to flag SVGA_REMAP_GMR2_PPN64) follows. If flag
* SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry.
*/
-} SVGAFifoCmdRemapGMR2;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdRemapGMR2;
+
+
+/*
+ * Size of SVGA device memory such as frame buffer and FIFO.
+ */
+#define SVGA_VRAM_MIN_SIZE (4 * 640 * 480) /* bytes */
+#define SVGA_VRAM_MIN_SIZE_3D (16 * 1024 * 1024)
+#define SVGA_VRAM_MAX_SIZE (128 * 1024 * 1024)
+#define SVGA_MEMORY_SIZE_MAX (1024 * 1024 * 1024)
+#define SVGA_FIFO_SIZE_MAX (2 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MIN (32 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MAX (2 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_DEFAULT (256 * 1024)
+
+#define SVGA_VRAM_SIZE_W2K (64 * 1024 * 1024) /* 64 MB */
+
+/*
+ * To simplify autoDetect display configuration, support a minimum of
+ * two 1920x1200 monitors, 32bpp, side-by-side, optionally rotated:
+ * numDisplays = 2
+ * maxWidth = numDisplay * 1920 = 3840
+ * maxHeight = rotated width of single monitor = 1920
+ * vramSize = maxWidth * maxHeight * 4 = 29491200
+ */
+#define SVGA_VRAM_SIZE_AUTODETECT (32 * 1024 * 1024)
+
+#if defined(VMX86_SERVER)
+#define SVGA_VRAM_SIZE (4 * 1024 * 1024)
+#define SVGA_VRAM_SIZE_3D (64 * 1024 * 1024)
+#define SVGA_FIFO_SIZE (256 * 1024)
+#define SVGA_FIFO_SIZE_3D (516 * 1024)
+#define SVGA_MEMORY_SIZE_DEFAULT (160 * 1024 * 1024)
+#define SVGA_AUTODETECT_DEFAULT FALSE
+#else
+#define SVGA_VRAM_SIZE (16 * 1024 * 1024)
+#define SVGA_VRAM_SIZE_3D SVGA_VRAM_MAX_SIZE
+#define SVGA_FIFO_SIZE (2 * 1024 * 1024)
+#define SVGA_FIFO_SIZE_3D SVGA_FIFO_SIZE
+#define SVGA_MEMORY_SIZE_DEFAULT (768 * 1024 * 1024)
+#define SVGA_AUTODETECT_DEFAULT TRUE
+#endif
+
+#define SVGA_FIFO_SIZE_GBOBJECTS (256 * 1024)
+#define SVGA_VRAM_SIZE_GBOBJECTS (4 * 1024 * 1024)
#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
new file mode 100644
index 000000000000..2e8ba4df8de9
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
@@ -0,0 +1,46 @@
+/**********************************************************
+ * Copyright 2015 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+#ifndef _VM_BASIC_TYPES_H_
+#define _VM_BASIC_TYPES_H_
+#include <linux/kernel.h>
+
+typedef u32 uint32;
+typedef s32 int32;
+typedef u64 uint64;
+typedef u16 uint16;
+typedef s16 int16;
+typedef u8 uint8;
+typedef s8 int8;
+
+typedef uint64 PA;
+typedef uint32 PPN;
+typedef uint64 PPN64;
+
+typedef bool Bool;
+
+#define MAX_UINT32 U32_MAX
+#define MAX_UINT16 U16_MAX
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
new file mode 100644
index 000000000000..120eab830eaf
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
@@ -0,0 +1,21 @@
+#ifndef _VM_BASIC_TYPES_H_
+#define _VM_BASIC_TYPES_H_
+#include <linux/kernel.h>
+
+typedef u32 uint32;
+typedef s32 int32;
+typedef u64 uint64;
+typedef u16 uint16;
+typedef s16 int16;
+typedef u8 uint8;
+typedef s8 int8;
+
+typedef uint64 PA;
+typedef uint32 PPN;
+typedef uint64 PPN64;
+
+typedef bool Bool;
+
+#define MAX_UINT32 U32_MAX
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
new file mode 100644
index 000000000000..7e7b0ce34aa2
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
@@ -0,0 +1,25 @@
+/**********************************************************
+ * Copyright 2015 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+#include <linux/compiler.h>
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
new file mode 100644
index 000000000000..e2e440ed3d44
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
@@ -0,0 +1,25 @@
+/**********************************************************
+ * Copyright 2015 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+__packed
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
deleted file mode 100644
index f58dc7dd15c5..000000000000
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ /dev/null
@@ -1,2627 +0,0 @@
-/**********************************************************
- * Copyright 1998-2009 VMware, Inc. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy,
- * modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- **********************************************************/
-
-/*
- * svga3d_reg.h --
- *
- * SVGA 3D hardware definitions
- */
-
-#ifndef _SVGA3D_REG_H_
-#define _SVGA3D_REG_H_
-
-#include "svga_reg.h"
-
-typedef uint32 PPN;
-typedef __le64 PPN64;
-
-/*
- * 3D Hardware Version
- *
- * The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
- * register. Is set by the host and read by the guest. This lets
- * us make new guest drivers which are backwards-compatible with old
- * SVGA hardware revisions. It does not let us support old guest
- * drivers. Good enough for now.
- *
- */
-
-#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
-#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16)
-#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF)
-
-typedef enum {
- SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1),
- SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2),
- SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3),
- SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1),
- SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
- SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0),
- SVGA3D_HWVERSION_WS8_B1 = SVGA3D_MAKE_HWVERSION(2, 1),
- SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS8_B1,
-} SVGA3dHardwareVersion;
-
-/*
- * Generic Types
- */
-
-typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
-#define SVGA3D_NUM_CLIPPLANES 6
-#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS 8
-#define SVGA3D_MAX_CONTEXT_IDS 256
-#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
-
-#define SVGA3D_NUM_TEXTURE_UNITS 32
-#define SVGA3D_NUM_LIGHTS 8
-
-/*
- * Surface formats.
- *
- * If you modify this list, be sure to keep GLUtil.c in sync. It
- * includes the internal format definition of each surface in
- * GLUtil_ConvertSurfaceFormat, and it contains a table of
- * human-readable names in GLUtil_GetFormatName.
- */
-
-typedef enum SVGA3dSurfaceFormat {
- SVGA3D_FORMAT_MIN = 0,
- SVGA3D_FORMAT_INVALID = 0,
-
- SVGA3D_X8R8G8B8 = 1,
- SVGA3D_A8R8G8B8 = 2,
-
- SVGA3D_R5G6B5 = 3,
- SVGA3D_X1R5G5B5 = 4,
- SVGA3D_A1R5G5B5 = 5,
- SVGA3D_A4R4G4B4 = 6,
-
- SVGA3D_Z_D32 = 7,
- SVGA3D_Z_D16 = 8,
- SVGA3D_Z_D24S8 = 9,
- SVGA3D_Z_D15S1 = 10,
-
- SVGA3D_LUMINANCE8 = 11,
- SVGA3D_LUMINANCE4_ALPHA4 = 12,
- SVGA3D_LUMINANCE16 = 13,
- SVGA3D_LUMINANCE8_ALPHA8 = 14,
-
- SVGA3D_DXT1 = 15,
- SVGA3D_DXT2 = 16,
- SVGA3D_DXT3 = 17,
- SVGA3D_DXT4 = 18,
- SVGA3D_DXT5 = 19,
-
- SVGA3D_BUMPU8V8 = 20,
- SVGA3D_BUMPL6V5U5 = 21,
- SVGA3D_BUMPX8L8V8U8 = 22,
- SVGA3D_BUMPL8V8U8 = 23,
-
- SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
- SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
-
- SVGA3D_A2R10G10B10 = 26,
-
- /* signed formats */
- SVGA3D_V8U8 = 27,
- SVGA3D_Q8W8V8U8 = 28,
- SVGA3D_CxV8U8 = 29,
-
- /* mixed formats */
- SVGA3D_X8L8V8U8 = 30,
- SVGA3D_A2W10V10U10 = 31,
-
- SVGA3D_ALPHA8 = 32,
-
- /* Single- and dual-component floating point formats */
- SVGA3D_R_S10E5 = 33,
- SVGA3D_R_S23E8 = 34,
- SVGA3D_RG_S10E5 = 35,
- SVGA3D_RG_S23E8 = 36,
-
- SVGA3D_BUFFER = 37,
-
- SVGA3D_Z_D24X8 = 38,
-
- SVGA3D_V16U16 = 39,
-
- SVGA3D_G16R16 = 40,
- SVGA3D_A16B16G16R16 = 41,
-
- /* Packed Video formats */
- SVGA3D_UYVY = 42,
- SVGA3D_YUY2 = 43,
-
- /* Planar video formats */
- SVGA3D_NV12 = 44,
-
- /* Video format with alpha */
- SVGA3D_AYUV = 45,
-
- SVGA3D_R32G32B32A32_TYPELESS = 46,
- SVGA3D_R32G32B32A32_FLOAT = 25,
- SVGA3D_R32G32B32A32_UINT = 47,
- SVGA3D_R32G32B32A32_SINT = 48,
- SVGA3D_R32G32B32_TYPELESS = 49,
- SVGA3D_R32G32B32_FLOAT = 50,
- SVGA3D_R32G32B32_UINT = 51,
- SVGA3D_R32G32B32_SINT = 52,
- SVGA3D_R16G16B16A16_TYPELESS = 53,
- SVGA3D_R16G16B16A16_FLOAT = 24,
- SVGA3D_R16G16B16A16_UNORM = 41,
- SVGA3D_R16G16B16A16_UINT = 54,
- SVGA3D_R16G16B16A16_SNORM = 55,
- SVGA3D_R16G16B16A16_SINT = 56,
- SVGA3D_R32G32_TYPELESS = 57,
- SVGA3D_R32G32_FLOAT = 36,
- SVGA3D_R32G32_UINT = 58,
- SVGA3D_R32G32_SINT = 59,
- SVGA3D_R32G8X24_TYPELESS = 60,
- SVGA3D_D32_FLOAT_S8X24_UINT = 61,
- SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62,
- SVGA3D_X32_TYPELESS_G8X24_UINT = 63,
- SVGA3D_R10G10B10A2_TYPELESS = 64,
- SVGA3D_R10G10B10A2_UNORM = 26,
- SVGA3D_R10G10B10A2_UINT = 65,
- SVGA3D_R11G11B10_FLOAT = 66,
- SVGA3D_R8G8B8A8_TYPELESS = 67,
- SVGA3D_R8G8B8A8_UNORM = 68,
- SVGA3D_R8G8B8A8_UNORM_SRGB = 69,
- SVGA3D_R8G8B8A8_UINT = 70,
- SVGA3D_R8G8B8A8_SNORM = 28,
- SVGA3D_R8G8B8A8_SINT = 71,
- SVGA3D_R16G16_TYPELESS = 72,
- SVGA3D_R16G16_FLOAT = 35,
- SVGA3D_R16G16_UNORM = 40,
- SVGA3D_R16G16_UINT = 73,
- SVGA3D_R16G16_SNORM = 39,
- SVGA3D_R16G16_SINT = 74,
- SVGA3D_R32_TYPELESS = 75,
- SVGA3D_D32_FLOAT = 76,
- SVGA3D_R32_FLOAT = 34,
- SVGA3D_R32_UINT = 77,
- SVGA3D_R32_SINT = 78,
- SVGA3D_R24G8_TYPELESS = 79,
- SVGA3D_D24_UNORM_S8_UINT = 80,
- SVGA3D_R24_UNORM_X8_TYPELESS = 81,
- SVGA3D_X24_TYPELESS_G8_UINT = 82,
- SVGA3D_R8G8_TYPELESS = 83,
- SVGA3D_R8G8_UNORM = 84,
- SVGA3D_R8G8_UINT = 85,
- SVGA3D_R8G8_SNORM = 27,
- SVGA3D_R8G8_SINT = 86,
- SVGA3D_R16_TYPELESS = 87,
- SVGA3D_R16_FLOAT = 33,
- SVGA3D_D16_UNORM = 8,
- SVGA3D_R16_UNORM = 88,
- SVGA3D_R16_UINT = 89,
- SVGA3D_R16_SNORM = 90,
- SVGA3D_R16_SINT = 91,
- SVGA3D_R8_TYPELESS = 92,
- SVGA3D_R8_UNORM = 93,
- SVGA3D_R8_UINT = 94,
- SVGA3D_R8_SNORM = 95,
- SVGA3D_R8_SINT = 96,
- SVGA3D_A8_UNORM = 32,
- SVGA3D_R1_UNORM = 97,
- SVGA3D_R9G9B9E5_SHAREDEXP = 98,
- SVGA3D_R8G8_B8G8_UNORM = 99,
- SVGA3D_G8R8_G8B8_UNORM = 100,
- SVGA3D_BC1_TYPELESS = 101,
- SVGA3D_BC1_UNORM = 15,
- SVGA3D_BC1_UNORM_SRGB = 102,
- SVGA3D_BC2_TYPELESS = 103,
- SVGA3D_BC2_UNORM = 17,
- SVGA3D_BC2_UNORM_SRGB = 104,
- SVGA3D_BC3_TYPELESS = 105,
- SVGA3D_BC3_UNORM = 19,
- SVGA3D_BC3_UNORM_SRGB = 106,
- SVGA3D_BC4_TYPELESS = 107,
- SVGA3D_BC4_UNORM = 108,
- SVGA3D_BC4_SNORM = 109,
- SVGA3D_BC5_TYPELESS = 110,
- SVGA3D_BC5_UNORM = 111,
- SVGA3D_BC5_SNORM = 112,
- SVGA3D_B5G6R5_UNORM = 3,
- SVGA3D_B5G5R5A1_UNORM = 5,
- SVGA3D_B8G8R8A8_UNORM = 2,
- SVGA3D_B8G8R8X8_UNORM = 1,
- SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113,
- SVGA3D_B8G8R8A8_TYPELESS = 114,
- SVGA3D_B8G8R8A8_UNORM_SRGB = 115,
- SVGA3D_B8G8R8X8_TYPELESS = 116,
- SVGA3D_B8G8R8X8_UNORM_SRGB = 117,
-
- /* Advanced D3D9 depth formats. */
- SVGA3D_Z_DF16 = 118,
- SVGA3D_Z_DF24 = 119,
- SVGA3D_Z_D24S8_INT = 120,
-
- /* Planar video formats. */
- SVGA3D_YV12 = 121,
-
- SVGA3D_FORMAT_MAX = 122,
-} SVGA3dSurfaceFormat;
-
-typedef uint32 SVGA3dColor; /* a, r, g, b */
-
-/*
- * These match the D3DFORMAT_OP definitions used by Direct3D. We need
- * them so that we can query the host for what the supported surface
- * operations are (when we're using the D3D backend, in particular),
- * and so we can send those operations to the guest.
- */
-typedef enum {
- SVGA3DFORMAT_OP_TEXTURE = 0x00000001,
- SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002,
- SVGA3DFORMAT_OP_CUBETEXTURE = 0x00000004,
- SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET = 0x00000008,
- SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET = 0x00000010,
- SVGA3DFORMAT_OP_ZSTENCIL = 0x00000040,
- SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH = 0x00000080,
-
-/*
- * This format can be used as a render target if the current display mode
- * is the same depth if the alpha channel is ignored. e.g. if the device
- * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
- * format op list entry for A8R8G8B8 should have this cap.
- */
- SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET = 0x00000100,
-
-/*
- * This format contains DirectDraw support (including Flip). This flag
- * should not to be set on alpha formats.
- */
- SVGA3DFORMAT_OP_DISPLAYMODE = 0x00000400,
-
-/*
- * The rasterizer can support some level of Direct3D support in this format
- * and implies that the driver can create a Context in this mode (for some
- * render target format). When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
- * flag must also be set.
- */
- SVGA3DFORMAT_OP_3DACCELERATION = 0x00000800,
-
-/*
- * This is set for a private format when the driver has put the bpp in
- * the structure.
- */
- SVGA3DFORMAT_OP_PIXELSIZE = 0x00001000,
-
-/*
- * Indicates that this format can be converted to any RGB format for which
- * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
- */
- SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000,
-
-/*
- * Indicates that this format can be used to create offscreen plain surfaces.
- */
- SVGA3DFORMAT_OP_OFFSCREENPLAIN = 0x00004000,
-
-/*
- * Indicated that this format can be read as an SRGB texture (meaning that the
- * sampler will linearize the looked up data)
- */
- SVGA3DFORMAT_OP_SRGBREAD = 0x00008000,
-
-/*
- * Indicates that this format can be used in the bumpmap instructions
- */
- SVGA3DFORMAT_OP_BUMPMAP = 0x00010000,
-
-/*
- * Indicates that this format can be sampled by the displacement map sampler
- */
- SVGA3DFORMAT_OP_DMAP = 0x00020000,
-
-/*
- * Indicates that this format cannot be used with texture filtering
- */
- SVGA3DFORMAT_OP_NOFILTER = 0x00040000,
-
-/*
- * Indicates that format conversions are supported to this RGB format if
- * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
- */
- SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB = 0x00080000,
-
-/*
- * Indicated that this format can be written as an SRGB target (meaning that the
- * pixel pipe will DE-linearize data on output to format)
- */
- SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000,
-
-/*
- * Indicates that this format cannot be used with alpha blending
- */
- SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000,
-
-/*
- * Indicates that the device can auto-generated sublevels for resources
- * of this format
- */
- SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000,
-
-/*
- * Indicates that this format can be used by vertex texture sampler
- */
- SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000,
-
-/*
- * Indicates that this format supports neither texture coordinate wrap
- * modes, nor mipmapping
- */
- SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP = 0x01000000
-} SVGA3dFormatOp;
-
-/*
- * This structure is a conversion of SVGA3DFORMAT_OP_*.
- * Entries must be located at the same position.
- */
-typedef union {
- uint32 value;
- struct {
- uint32 texture : 1;
- uint32 volumeTexture : 1;
- uint32 cubeTexture : 1;
- uint32 offscreenRenderTarget : 1;
- uint32 sameFormatRenderTarget : 1;
- uint32 unknown1 : 1;
- uint32 zStencil : 1;
- uint32 zStencilArbitraryDepth : 1;
- uint32 sameFormatUpToAlpha : 1;
- uint32 unknown2 : 1;
- uint32 displayMode : 1;
- uint32 acceleration3d : 1;
- uint32 pixelSize : 1;
- uint32 convertToARGB : 1;
- uint32 offscreenPlain : 1;
- uint32 sRGBRead : 1;
- uint32 bumpMap : 1;
- uint32 dmap : 1;
- uint32 noFilter : 1;
- uint32 memberOfGroupARGB : 1;
- uint32 sRGBWrite : 1;
- uint32 noAlphaBlend : 1;
- uint32 autoGenMipMap : 1;
- uint32 vertexTexture : 1;
- uint32 noTexCoordWrapNorMip : 1;
- };
-} SVGA3dSurfaceFormatCaps;
-
-/*
- * SVGA_3D_CMD_SETRENDERSTATE Types. All value types
- * must fit in a uint32.
- */
-
-typedef enum {
- SVGA3D_RS_INVALID = 0,
- SVGA3D_RS_ZENABLE = 1, /* SVGA3dBool */
- SVGA3D_RS_ZWRITEENABLE = 2, /* SVGA3dBool */
- SVGA3D_RS_ALPHATESTENABLE = 3, /* SVGA3dBool */
- SVGA3D_RS_DITHERENABLE = 4, /* SVGA3dBool */
- SVGA3D_RS_BLENDENABLE = 5, /* SVGA3dBool */
- SVGA3D_RS_FOGENABLE = 6, /* SVGA3dBool */
- SVGA3D_RS_SPECULARENABLE = 7, /* SVGA3dBool */
- SVGA3D_RS_STENCILENABLE = 8, /* SVGA3dBool */
- SVGA3D_RS_LIGHTINGENABLE = 9, /* SVGA3dBool */
- SVGA3D_RS_NORMALIZENORMALS = 10, /* SVGA3dBool */
- SVGA3D_RS_POINTSPRITEENABLE = 11, /* SVGA3dBool */
- SVGA3D_RS_POINTSCALEENABLE = 12, /* SVGA3dBool */
- SVGA3D_RS_STENCILREF = 13, /* uint32 */
- SVGA3D_RS_STENCILMASK = 14, /* uint32 */
- SVGA3D_RS_STENCILWRITEMASK = 15, /* uint32 */
- SVGA3D_RS_FOGSTART = 16, /* float */
- SVGA3D_RS_FOGEND = 17, /* float */
- SVGA3D_RS_FOGDENSITY = 18, /* float */
- SVGA3D_RS_POINTSIZE = 19, /* float */
- SVGA3D_RS_POINTSIZEMIN = 20, /* float */
- SVGA3D_RS_POINTSIZEMAX = 21, /* float */
- SVGA3D_RS_POINTSCALE_A = 22, /* float */
- SVGA3D_RS_POINTSCALE_B = 23, /* float */
- SVGA3D_RS_POINTSCALE_C = 24, /* float */
- SVGA3D_RS_FOGCOLOR = 25, /* SVGA3dColor */
- SVGA3D_RS_AMBIENT = 26, /* SVGA3dColor */
- SVGA3D_RS_CLIPPLANEENABLE = 27, /* SVGA3dClipPlanes */
- SVGA3D_RS_FOGMODE = 28, /* SVGA3dFogMode */
- SVGA3D_RS_FILLMODE = 29, /* SVGA3dFillMode */
- SVGA3D_RS_SHADEMODE = 30, /* SVGA3dShadeMode */
- SVGA3D_RS_LINEPATTERN = 31, /* SVGA3dLinePattern */
- SVGA3D_RS_SRCBLEND = 32, /* SVGA3dBlendOp */
- SVGA3D_RS_DSTBLEND = 33, /* SVGA3dBlendOp */
- SVGA3D_RS_BLENDEQUATION = 34, /* SVGA3dBlendEquation */
- SVGA3D_RS_CULLMODE = 35, /* SVGA3dFace */
- SVGA3D_RS_ZFUNC = 36, /* SVGA3dCmpFunc */
- SVGA3D_RS_ALPHAFUNC = 37, /* SVGA3dCmpFunc */
- SVGA3D_RS_STENCILFUNC = 38, /* SVGA3dCmpFunc */
- SVGA3D_RS_STENCILFAIL = 39, /* SVGA3dStencilOp */
- SVGA3D_RS_STENCILZFAIL = 40, /* SVGA3dStencilOp */
- SVGA3D_RS_STENCILPASS = 41, /* SVGA3dStencilOp */
- SVGA3D_RS_ALPHAREF = 42, /* float (0.0 .. 1.0) */
- SVGA3D_RS_FRONTWINDING = 43, /* SVGA3dFrontWinding */
- SVGA3D_RS_COORDINATETYPE = 44, /* SVGA3dCoordinateType */
- SVGA3D_RS_ZBIAS = 45, /* float */
- SVGA3D_RS_RANGEFOGENABLE = 46, /* SVGA3dBool */
- SVGA3D_RS_COLORWRITEENABLE = 47, /* SVGA3dColorMask */
- SVGA3D_RS_VERTEXMATERIALENABLE = 48, /* SVGA3dBool */
- SVGA3D_RS_DIFFUSEMATERIALSOURCE = 49, /* SVGA3dVertexMaterial */
- SVGA3D_RS_SPECULARMATERIALSOURCE = 50, /* SVGA3dVertexMaterial */
- SVGA3D_RS_AMBIENTMATERIALSOURCE = 51, /* SVGA3dVertexMaterial */
- SVGA3D_RS_EMISSIVEMATERIALSOURCE = 52, /* SVGA3dVertexMaterial */
- SVGA3D_RS_TEXTUREFACTOR = 53, /* SVGA3dColor */
- SVGA3D_RS_LOCALVIEWER = 54, /* SVGA3dBool */
- SVGA3D_RS_SCISSORTESTENABLE = 55, /* SVGA3dBool */
- SVGA3D_RS_BLENDCOLOR = 56, /* SVGA3dColor */
- SVGA3D_RS_STENCILENABLE2SIDED = 57, /* SVGA3dBool */
- SVGA3D_RS_CCWSTENCILFUNC = 58, /* SVGA3dCmpFunc */
- SVGA3D_RS_CCWSTENCILFAIL = 59, /* SVGA3dStencilOp */
- SVGA3D_RS_CCWSTENCILZFAIL = 60, /* SVGA3dStencilOp */
- SVGA3D_RS_CCWSTENCILPASS = 61, /* SVGA3dStencilOp */
- SVGA3D_RS_VERTEXBLEND = 62, /* SVGA3dVertexBlendFlags */
- SVGA3D_RS_SLOPESCALEDEPTHBIAS = 63, /* float */
- SVGA3D_RS_DEPTHBIAS = 64, /* float */
-
-
- /*
- * Output Gamma Level
- *
- * Output gamma effects the gamma curve of colors that are output from the
- * rendering pipeline. A value of 1.0 specifies a linear color space. If the
- * value is <= 0.0, gamma correction is ignored and linear color space is
- * used.
- */
-
- SVGA3D_RS_OUTPUTGAMMA = 65, /* float */
- SVGA3D_RS_ZVISIBLE = 66, /* SVGA3dBool */
- SVGA3D_RS_LASTPIXEL = 67, /* SVGA3dBool */
- SVGA3D_RS_CLIPPING = 68, /* SVGA3dBool */
- SVGA3D_RS_WRAP0 = 69, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP1 = 70, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP2 = 71, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP3 = 72, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP4 = 73, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP5 = 74, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP6 = 75, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP7 = 76, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP8 = 77, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP9 = 78, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP10 = 79, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP11 = 80, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP12 = 81, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP13 = 82, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP14 = 83, /* SVGA3dWrapFlags */
- SVGA3D_RS_WRAP15 = 84, /* SVGA3dWrapFlags */
- SVGA3D_RS_MULTISAMPLEANTIALIAS = 85, /* SVGA3dBool */
- SVGA3D_RS_MULTISAMPLEMASK = 86, /* uint32 */
- SVGA3D_RS_INDEXEDVERTEXBLENDENABLE = 87, /* SVGA3dBool */
- SVGA3D_RS_TWEENFACTOR = 88, /* float */
- SVGA3D_RS_ANTIALIASEDLINEENABLE = 89, /* SVGA3dBool */
- SVGA3D_RS_COLORWRITEENABLE1 = 90, /* SVGA3dColorMask */
- SVGA3D_RS_COLORWRITEENABLE2 = 91, /* SVGA3dColorMask */
- SVGA3D_RS_COLORWRITEENABLE3 = 92, /* SVGA3dColorMask */
- SVGA3D_RS_SEPARATEALPHABLENDENABLE = 93, /* SVGA3dBool */
- SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */
- SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */
- SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */
- SVGA3D_RS_TRANSPARENCYANTIALIAS = 97, /* SVGA3dTransparencyAntialiasType */
- SVGA3D_RS_LINEAA = 98, /* SVGA3dBool */
- SVGA3D_RS_LINEWIDTH = 99, /* float */
- SVGA3D_RS_MAX
-} SVGA3dRenderStateName;
-
-typedef enum {
- SVGA3D_TRANSPARENCYANTIALIAS_NORMAL = 0,
- SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE = 1,
- SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE = 2,
- SVGA3D_TRANSPARENCYANTIALIAS_MAX
-} SVGA3dTransparencyAntialiasType;
-
-typedef enum {
- SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */
- SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */
- SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */
-} SVGA3dVertexMaterial;
-
-typedef enum {
- SVGA3D_FILLMODE_INVALID = 0,
- SVGA3D_FILLMODE_POINT = 1,
- SVGA3D_FILLMODE_LINE = 2,
- SVGA3D_FILLMODE_FILL = 3,
- SVGA3D_FILLMODE_MAX
-} SVGA3dFillModeType;
-
-
-typedef
-union {
- struct {
- uint16 mode; /* SVGA3dFillModeType */
- uint16 face; /* SVGA3dFace */
- };
- uint32 uintValue;
-} SVGA3dFillMode;
-
-typedef enum {
- SVGA3D_SHADEMODE_INVALID = 0,
- SVGA3D_SHADEMODE_FLAT = 1,
- SVGA3D_SHADEMODE_SMOOTH = 2,
- SVGA3D_SHADEMODE_PHONG = 3, /* Not supported */
- SVGA3D_SHADEMODE_MAX
-} SVGA3dShadeMode;
-
-typedef
-union {
- struct {
- uint16 repeat;
- uint16 pattern;
- };
- uint32 uintValue;
-} SVGA3dLinePattern;
-
-typedef enum {
- SVGA3D_BLENDOP_INVALID = 0,
- SVGA3D_BLENDOP_ZERO = 1,
- SVGA3D_BLENDOP_ONE = 2,
- SVGA3D_BLENDOP_SRCCOLOR = 3,
- SVGA3D_BLENDOP_INVSRCCOLOR = 4,
- SVGA3D_BLENDOP_SRCALPHA = 5,
- SVGA3D_BLENDOP_INVSRCALPHA = 6,
- SVGA3D_BLENDOP_DESTALPHA = 7,
- SVGA3D_BLENDOP_INVDESTALPHA = 8,
- SVGA3D_BLENDOP_DESTCOLOR = 9,
- SVGA3D_BLENDOP_INVDESTCOLOR = 10,
- SVGA3D_BLENDOP_SRCALPHASAT = 11,
- SVGA3D_BLENDOP_BLENDFACTOR = 12,
- SVGA3D_BLENDOP_INVBLENDFACTOR = 13,
- SVGA3D_BLENDOP_MAX
-} SVGA3dBlendOp;
-
-typedef enum {
- SVGA3D_BLENDEQ_INVALID = 0,
- SVGA3D_BLENDEQ_ADD = 1,
- SVGA3D_BLENDEQ_SUBTRACT = 2,
- SVGA3D_BLENDEQ_REVSUBTRACT = 3,
- SVGA3D_BLENDEQ_MINIMUM = 4,
- SVGA3D_BLENDEQ_MAXIMUM = 5,
- SVGA3D_BLENDEQ_MAX
-} SVGA3dBlendEquation;
-
-typedef enum {
- SVGA3D_FRONTWINDING_INVALID = 0,
- SVGA3D_FRONTWINDING_CW = 1,
- SVGA3D_FRONTWINDING_CCW = 2,
- SVGA3D_FRONTWINDING_MAX
-} SVGA3dFrontWinding;
-
-typedef enum {
- SVGA3D_FACE_INVALID = 0,
- SVGA3D_FACE_NONE = 1,
- SVGA3D_FACE_FRONT = 2,
- SVGA3D_FACE_BACK = 3,
- SVGA3D_FACE_FRONT_BACK = 4,
- SVGA3D_FACE_MAX
-} SVGA3dFace;
-
-/*
- * The order and the values should not be changed
- */
-
-typedef enum {
- SVGA3D_CMP_INVALID = 0,
- SVGA3D_CMP_NEVER = 1,
- SVGA3D_CMP_LESS = 2,
- SVGA3D_CMP_EQUAL = 3,
- SVGA3D_CMP_LESSEQUAL = 4,
- SVGA3D_CMP_GREATER = 5,
- SVGA3D_CMP_NOTEQUAL = 6,
- SVGA3D_CMP_GREATEREQUAL = 7,
- SVGA3D_CMP_ALWAYS = 8,
- SVGA3D_CMP_MAX
-} SVGA3dCmpFunc;
-
-/*
- * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
- * the fog factor to be specified in the alpha component of the specular
- * (a.k.a. secondary) vertex color.
- */
-typedef enum {
- SVGA3D_FOGFUNC_INVALID = 0,
- SVGA3D_FOGFUNC_EXP = 1,
- SVGA3D_FOGFUNC_EXP2 = 2,
- SVGA3D_FOGFUNC_LINEAR = 3,
- SVGA3D_FOGFUNC_PER_VERTEX = 4
-} SVGA3dFogFunction;
-
-/*
- * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
- * or per-pixel basis.
- */
-typedef enum {
- SVGA3D_FOGTYPE_INVALID = 0,
- SVGA3D_FOGTYPE_VERTEX = 1,
- SVGA3D_FOGTYPE_PIXEL = 2,
- SVGA3D_FOGTYPE_MAX = 3
-} SVGA3dFogType;
-
-/*
- * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
- * computed using the eye Z value of each pixel (or vertex), whereas range-
- * based fog is computed using the actual distance (range) to the eye.
- */
-typedef enum {
- SVGA3D_FOGBASE_INVALID = 0,
- SVGA3D_FOGBASE_DEPTHBASED = 1,
- SVGA3D_FOGBASE_RANGEBASED = 2,
- SVGA3D_FOGBASE_MAX = 3
-} SVGA3dFogBase;
-
-typedef enum {
- SVGA3D_STENCILOP_INVALID = 0,
- SVGA3D_STENCILOP_KEEP = 1,
- SVGA3D_STENCILOP_ZERO = 2,
- SVGA3D_STENCILOP_REPLACE = 3,
- SVGA3D_STENCILOP_INCRSAT = 4,
- SVGA3D_STENCILOP_DECRSAT = 5,
- SVGA3D_STENCILOP_INVERT = 6,
- SVGA3D_STENCILOP_INCR = 7,
- SVGA3D_STENCILOP_DECR = 8,
- SVGA3D_STENCILOP_MAX
-} SVGA3dStencilOp;
-
-typedef enum {
- SVGA3D_CLIPPLANE_0 = (1 << 0),
- SVGA3D_CLIPPLANE_1 = (1 << 1),
- SVGA3D_CLIPPLANE_2 = (1 << 2),
- SVGA3D_CLIPPLANE_3 = (1 << 3),
- SVGA3D_CLIPPLANE_4 = (1 << 4),
- SVGA3D_CLIPPLANE_5 = (1 << 5),
-} SVGA3dClipPlanes;
-
-typedef enum {
- SVGA3D_CLEAR_COLOR = 0x1,
- SVGA3D_CLEAR_DEPTH = 0x2,
- SVGA3D_CLEAR_STENCIL = 0x4
-} SVGA3dClearFlag;
-
-typedef enum {
- SVGA3D_RT_DEPTH = 0,
- SVGA3D_RT_STENCIL = 1,
- SVGA3D_RT_COLOR0 = 2,
- SVGA3D_RT_COLOR1 = 3,
- SVGA3D_RT_COLOR2 = 4,
- SVGA3D_RT_COLOR3 = 5,
- SVGA3D_RT_COLOR4 = 6,
- SVGA3D_RT_COLOR5 = 7,
- SVGA3D_RT_COLOR6 = 8,
- SVGA3D_RT_COLOR7 = 9,
- SVGA3D_RT_MAX,
- SVGA3D_RT_INVALID = ((uint32)-1),
-} SVGA3dRenderTargetType;
-
-#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
-
-typedef
-union {
- struct {
- uint32 red : 1;
- uint32 green : 1;
- uint32 blue : 1;
- uint32 alpha : 1;
- };
- uint32 uintValue;
-} SVGA3dColorMask;
-
-typedef enum {
- SVGA3D_VBLEND_DISABLE = 0,
- SVGA3D_VBLEND_1WEIGHT = 1,
- SVGA3D_VBLEND_2WEIGHT = 2,
- SVGA3D_VBLEND_3WEIGHT = 3,
-} SVGA3dVertexBlendFlags;
-
-typedef enum {
- SVGA3D_WRAPCOORD_0 = 1 << 0,
- SVGA3D_WRAPCOORD_1 = 1 << 1,
- SVGA3D_WRAPCOORD_2 = 1 << 2,
- SVGA3D_WRAPCOORD_3 = 1 << 3,
- SVGA3D_WRAPCOORD_ALL = 0xF,
-} SVGA3dWrapFlags;
-
-/*
- * SVGA_3D_CMD_TEXTURESTATE Types. All value types
- * must fit in a uint32.
- */
-
-typedef enum {
- SVGA3D_TS_INVALID = 0,
- SVGA3D_TS_BIND_TEXTURE = 1, /* SVGA3dSurfaceId */
- SVGA3D_TS_COLOROP = 2, /* SVGA3dTextureCombiner */
- SVGA3D_TS_COLORARG1 = 3, /* SVGA3dTextureArgData */
- SVGA3D_TS_COLORARG2 = 4, /* SVGA3dTextureArgData */
- SVGA3D_TS_ALPHAOP = 5, /* SVGA3dTextureCombiner */
- SVGA3D_TS_ALPHAARG1 = 6, /* SVGA3dTextureArgData */
- SVGA3D_TS_ALPHAARG2 = 7, /* SVGA3dTextureArgData */
- SVGA3D_TS_ADDRESSU = 8, /* SVGA3dTextureAddress */
- SVGA3D_TS_ADDRESSV = 9, /* SVGA3dTextureAddress */
- SVGA3D_TS_MIPFILTER = 10, /* SVGA3dTextureFilter */
- SVGA3D_TS_MAGFILTER = 11, /* SVGA3dTextureFilter */
- SVGA3D_TS_MINFILTER = 12, /* SVGA3dTextureFilter */
- SVGA3D_TS_BORDERCOLOR = 13, /* SVGA3dColor */
- SVGA3D_TS_TEXCOORDINDEX = 14, /* uint32 */
- SVGA3D_TS_TEXTURETRANSFORMFLAGS = 15, /* SVGA3dTexTransformFlags */
- SVGA3D_TS_TEXCOORDGEN = 16, /* SVGA3dTextureCoordGen */
- SVGA3D_TS_BUMPENVMAT00 = 17, /* float */
- SVGA3D_TS_BUMPENVMAT01 = 18, /* float */
- SVGA3D_TS_BUMPENVMAT10 = 19, /* float */
- SVGA3D_TS_BUMPENVMAT11 = 20, /* float */
- SVGA3D_TS_TEXTURE_MIPMAP_LEVEL = 21, /* uint32 */
- SVGA3D_TS_TEXTURE_LOD_BIAS = 22, /* float */
- SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL = 23, /* uint32 */
- SVGA3D_TS_ADDRESSW = 24, /* SVGA3dTextureAddress */
-
-
- /*
- * Sampler Gamma Level
- *
- * Sampler gamma effects the color of samples taken from the sampler. A
- * value of 1.0 will produce linear samples. If the value is <= 0.0 the
- * gamma value is ignored and a linear space is used.
- */
-
- SVGA3D_TS_GAMMA = 25, /* float */
- SVGA3D_TS_BUMPENVLSCALE = 26, /* float */
- SVGA3D_TS_BUMPENVLOFFSET = 27, /* float */
- SVGA3D_TS_COLORARG0 = 28, /* SVGA3dTextureArgData */
- SVGA3D_TS_ALPHAARG0 = 29, /* SVGA3dTextureArgData */
- SVGA3D_TS_MAX
-} SVGA3dTextureStateName;
-
-typedef enum {
- SVGA3D_TC_INVALID = 0,
- SVGA3D_TC_DISABLE = 1,
- SVGA3D_TC_SELECTARG1 = 2,
- SVGA3D_TC_SELECTARG2 = 3,
- SVGA3D_TC_MODULATE = 4,
- SVGA3D_TC_ADD = 5,
- SVGA3D_TC_ADDSIGNED = 6,
- SVGA3D_TC_SUBTRACT = 7,
- SVGA3D_TC_BLENDTEXTUREALPHA = 8,
- SVGA3D_TC_BLENDDIFFUSEALPHA = 9,
- SVGA3D_TC_BLENDCURRENTALPHA = 10,
- SVGA3D_TC_BLENDFACTORALPHA = 11,
- SVGA3D_TC_MODULATE2X = 12,
- SVGA3D_TC_MODULATE4X = 13,
- SVGA3D_TC_DSDT = 14,
- SVGA3D_TC_DOTPRODUCT3 = 15,
- SVGA3D_TC_BLENDTEXTUREALPHAPM = 16,
- SVGA3D_TC_ADDSIGNED2X = 17,
- SVGA3D_TC_ADDSMOOTH = 18,
- SVGA3D_TC_PREMODULATE = 19,
- SVGA3D_TC_MODULATEALPHA_ADDCOLOR = 20,
- SVGA3D_TC_MODULATECOLOR_ADDALPHA = 21,
- SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
- SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
- SVGA3D_TC_BUMPENVMAPLUMINANCE = 24,
- SVGA3D_TC_MULTIPLYADD = 25,
- SVGA3D_TC_LERP = 26,
- SVGA3D_TC_MAX
-} SVGA3dTextureCombiner;
-
-#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
-
-typedef enum {
- SVGA3D_TEX_ADDRESS_INVALID = 0,
- SVGA3D_TEX_ADDRESS_WRAP = 1,
- SVGA3D_TEX_ADDRESS_MIRROR = 2,
- SVGA3D_TEX_ADDRESS_CLAMP = 3,
- SVGA3D_TEX_ADDRESS_BORDER = 4,
- SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
- SVGA3D_TEX_ADDRESS_EDGE = 6,
- SVGA3D_TEX_ADDRESS_MAX
-} SVGA3dTextureAddress;
-
-/*
- * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
- * disabled, and the rasterizer should use the magnification filter instead.
- */
-typedef enum {
- SVGA3D_TEX_FILTER_NONE = 0,
- SVGA3D_TEX_FILTER_NEAREST = 1,
- SVGA3D_TEX_FILTER_LINEAR = 2,
- SVGA3D_TEX_FILTER_ANISOTROPIC = 3,
- SVGA3D_TEX_FILTER_FLATCUBIC = 4, /* Deprecated, not implemented */
- SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, /* Deprecated, not implemented */
- SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, /* Not currently implemented */
- SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, /* Not currently implemented */
- SVGA3D_TEX_FILTER_MAX
-} SVGA3dTextureFilter;
-
-typedef enum {
- SVGA3D_TEX_TRANSFORM_OFF = 0,
- SVGA3D_TEX_TRANSFORM_S = (1 << 0),
- SVGA3D_TEX_TRANSFORM_T = (1 << 1),
- SVGA3D_TEX_TRANSFORM_R = (1 << 2),
- SVGA3D_TEX_TRANSFORM_Q = (1 << 3),
- SVGA3D_TEX_PROJECTED = (1 << 15),
-} SVGA3dTexTransformFlags;
-
-typedef enum {
- SVGA3D_TEXCOORD_GEN_OFF = 0,
- SVGA3D_TEXCOORD_GEN_EYE_POSITION = 1,
- SVGA3D_TEXCOORD_GEN_EYE_NORMAL = 2,
- SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
- SVGA3D_TEXCOORD_GEN_SPHERE = 4,
- SVGA3D_TEXCOORD_GEN_MAX
-} SVGA3dTextureCoordGen;
-
-/*
- * Texture argument constants for texture combiner
- */
-typedef enum {
- SVGA3D_TA_INVALID = 0,
- SVGA3D_TA_CONSTANT = 1,
- SVGA3D_TA_PREVIOUS = 2,
- SVGA3D_TA_DIFFUSE = 3,
- SVGA3D_TA_TEXTURE = 4,
- SVGA3D_TA_SPECULAR = 5,
- SVGA3D_TA_MAX
-} SVGA3dTextureArgData;
-
-#define SVGA3D_TM_MASK_LEN 4
-
-/* Modifiers for texture argument constants defined above. */
-typedef enum {
- SVGA3D_TM_NONE = 0,
- SVGA3D_TM_ALPHA = (1 << SVGA3D_TM_MASK_LEN),
- SVGA3D_TM_ONE_MINUS = (2 << SVGA3D_TM_MASK_LEN),
-} SVGA3dTextureArgModifier;
-
-#define SVGA3D_INVALID_ID ((uint32)-1)
-#define SVGA3D_MAX_CLIP_PLANES 6
-
-/*
- * This is the limit to the number of fixed-function texture
- * transforms and texture coordinates we can support. It does *not*
- * correspond to the number of texture image units (samplers) we
- * support!
- */
-#define SVGA3D_MAX_TEXTURE_COORDS 8
-
-/*
- * Vertex declarations
- *
- * Notes:
- *
- * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
- * draw with any POSITIONT vertex arrays, the programmable vertex
- * pipeline will be implicitly disabled. Drawing will take place as if
- * no vertex shader was bound.
- */
-
-typedef enum {
- SVGA3D_DECLUSAGE_POSITION = 0,
- SVGA3D_DECLUSAGE_BLENDWEIGHT, /* 1 */
- SVGA3D_DECLUSAGE_BLENDINDICES, /* 2 */
- SVGA3D_DECLUSAGE_NORMAL, /* 3 */
- SVGA3D_DECLUSAGE_PSIZE, /* 4 */
- SVGA3D_DECLUSAGE_TEXCOORD, /* 5 */
- SVGA3D_DECLUSAGE_TANGENT, /* 6 */
- SVGA3D_DECLUSAGE_BINORMAL, /* 7 */
- SVGA3D_DECLUSAGE_TESSFACTOR, /* 8 */
- SVGA3D_DECLUSAGE_POSITIONT, /* 9 */
- SVGA3D_DECLUSAGE_COLOR, /* 10 */
- SVGA3D_DECLUSAGE_FOG, /* 11 */
- SVGA3D_DECLUSAGE_DEPTH, /* 12 */
- SVGA3D_DECLUSAGE_SAMPLE, /* 13 */
- SVGA3D_DECLUSAGE_MAX
-} SVGA3dDeclUsage;
-
-typedef enum {
- SVGA3D_DECLMETHOD_DEFAULT = 0,
- SVGA3D_DECLMETHOD_PARTIALU,
- SVGA3D_DECLMETHOD_PARTIALV,
- SVGA3D_DECLMETHOD_CROSSUV, /* Normal */
- SVGA3D_DECLMETHOD_UV,
- SVGA3D_DECLMETHOD_LOOKUP, /* Lookup a displacement map */
- SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement map */
-} SVGA3dDeclMethod;
-
-typedef enum {
- SVGA3D_DECLTYPE_FLOAT1 = 0,
- SVGA3D_DECLTYPE_FLOAT2 = 1,
- SVGA3D_DECLTYPE_FLOAT3 = 2,
- SVGA3D_DECLTYPE_FLOAT4 = 3,
- SVGA3D_DECLTYPE_D3DCOLOR = 4,
- SVGA3D_DECLTYPE_UBYTE4 = 5,
- SVGA3D_DECLTYPE_SHORT2 = 6,
- SVGA3D_DECLTYPE_SHORT4 = 7,
- SVGA3D_DECLTYPE_UBYTE4N = 8,
- SVGA3D_DECLTYPE_SHORT2N = 9,
- SVGA3D_DECLTYPE_SHORT4N = 10,
- SVGA3D_DECLTYPE_USHORT2N = 11,
- SVGA3D_DECLTYPE_USHORT4N = 12,
- SVGA3D_DECLTYPE_UDEC3 = 13,
- SVGA3D_DECLTYPE_DEC3N = 14,
- SVGA3D_DECLTYPE_FLOAT16_2 = 15,
- SVGA3D_DECLTYPE_FLOAT16_4 = 16,
- SVGA3D_DECLTYPE_MAX,
-} SVGA3dDeclType;
-
-/*
- * This structure is used for the divisor for geometry instancing;
- * it's a direct translation of the Direct3D equivalent.
- */
-typedef union {
- struct {
- /*
- * For index data, this number represents the number of instances to draw.
- * For instance data, this number represents the number of
- * instances/vertex in this stream
- */
- uint32 count : 30;
-
- /*
- * This is 1 if this is supposed to be the data that is repeated for
- * every instance.
- */
- uint32 indexedData : 1;
-
- /*
- * This is 1 if this is supposed to be the per-instance data.
- */
- uint32 instanceData : 1;
- };
-
- uint32 value;
-} SVGA3dVertexDivisor;
-
-typedef enum {
- SVGA3D_PRIMITIVE_INVALID = 0,
- SVGA3D_PRIMITIVE_TRIANGLELIST = 1,
- SVGA3D_PRIMITIVE_POINTLIST = 2,
- SVGA3D_PRIMITIVE_LINELIST = 3,
- SVGA3D_PRIMITIVE_LINESTRIP = 4,
- SVGA3D_PRIMITIVE_TRIANGLESTRIP = 5,
- SVGA3D_PRIMITIVE_TRIANGLEFAN = 6,
- SVGA3D_PRIMITIVE_MAX
-} SVGA3dPrimitiveType;
-
-typedef enum {
- SVGA3D_COORDINATE_INVALID = 0,
- SVGA3D_COORDINATE_LEFTHANDED = 1,
- SVGA3D_COORDINATE_RIGHTHANDED = 2,
- SVGA3D_COORDINATE_MAX
-} SVGA3dCoordinateType;
-
-typedef enum {
- SVGA3D_TRANSFORM_INVALID = 0,
- SVGA3D_TRANSFORM_WORLD = 1,
- SVGA3D_TRANSFORM_VIEW = 2,
- SVGA3D_TRANSFORM_PROJECTION = 3,
- SVGA3D_TRANSFORM_TEXTURE0 = 4,
- SVGA3D_TRANSFORM_TEXTURE1 = 5,
- SVGA3D_TRANSFORM_TEXTURE2 = 6,
- SVGA3D_TRANSFORM_TEXTURE3 = 7,
- SVGA3D_TRANSFORM_TEXTURE4 = 8,
- SVGA3D_TRANSFORM_TEXTURE5 = 9,
- SVGA3D_TRANSFORM_TEXTURE6 = 10,
- SVGA3D_TRANSFORM_TEXTURE7 = 11,
- SVGA3D_TRANSFORM_WORLD1 = 12,
- SVGA3D_TRANSFORM_WORLD2 = 13,
- SVGA3D_TRANSFORM_WORLD3 = 14,
- SVGA3D_TRANSFORM_MAX
-} SVGA3dTransformType;
-
-typedef enum {
- SVGA3D_LIGHTTYPE_INVALID = 0,
- SVGA3D_LIGHTTYPE_POINT = 1,
- SVGA3D_LIGHTTYPE_SPOT1 = 2, /* 1-cone, in degrees */
- SVGA3D_LIGHTTYPE_SPOT2 = 3, /* 2-cone, in radians */
- SVGA3D_LIGHTTYPE_DIRECTIONAL = 4,
- SVGA3D_LIGHTTYPE_MAX
-} SVGA3dLightType;
-
-typedef enum {
- SVGA3D_CUBEFACE_POSX = 0,
- SVGA3D_CUBEFACE_NEGX = 1,
- SVGA3D_CUBEFACE_POSY = 2,
- SVGA3D_CUBEFACE_NEGY = 3,
- SVGA3D_CUBEFACE_POSZ = 4,
- SVGA3D_CUBEFACE_NEGZ = 5,
-} SVGA3dCubeFace;
-
-typedef enum {
- SVGA3D_SHADERTYPE_INVALID = 0,
- SVGA3D_SHADERTYPE_MIN = 1,
- SVGA3D_SHADERTYPE_VS = 1,
- SVGA3D_SHADERTYPE_PS = 2,
- SVGA3D_SHADERTYPE_MAX = 3,
- SVGA3D_SHADERTYPE_GS = 3,
-} SVGA3dShaderType;
-
-#define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
-
-typedef enum {
- SVGA3D_CONST_TYPE_FLOAT = 0,
- SVGA3D_CONST_TYPE_INT = 1,
- SVGA3D_CONST_TYPE_BOOL = 2,
- SVGA3D_CONST_TYPE_MAX
-} SVGA3dShaderConstType;
-
-#define SVGA3D_MAX_SURFACE_FACES 6
-
-typedef enum {
- SVGA3D_STRETCH_BLT_POINT = 0,
- SVGA3D_STRETCH_BLT_LINEAR = 1,
- SVGA3D_STRETCH_BLT_MAX
-} SVGA3dStretchBltMode;
-
-typedef enum {
- SVGA3D_QUERYTYPE_OCCLUSION = 0,
- SVGA3D_QUERYTYPE_MAX
-} SVGA3dQueryType;
-
-typedef enum {
- SVGA3D_QUERYSTATE_PENDING = 0, /* Waiting on the host (set by guest) */
- SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully (set by host) */
- SVGA3D_QUERYSTATE_FAILED = 2, /* Completed unsuccessfully (set by host) */
- SVGA3D_QUERYSTATE_NEW = 3, /* Never submitted (For guest use only) */
-} SVGA3dQueryState;
-
-typedef enum {
- SVGA3D_WRITE_HOST_VRAM = 1,
- SVGA3D_READ_HOST_VRAM = 2,
-} SVGA3dTransferType;
-
-/*
- * The maximum number of vertex arrays we're guaranteed to support in
- * SVGA_3D_CMD_DRAWPRIMITIVES.
- */
-#define SVGA3D_MAX_VERTEX_ARRAYS 32
-
-/*
- * The maximum number of primitive ranges we're guaranteed to support
- * in SVGA_3D_CMD_DRAWPRIMITIVES.
- */
-#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
-
-/*
- * Identifiers for commands in the command FIFO.
- *
- * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
- * the SVGA3D protocol and remain reserved; they should not be used in the
- * future.
- *
- * IDs between 1040 and 1999 (inclusive) are available for use by the
- * current SVGA3D protocol.
- *
- * FIFO clients other than SVGA3D should stay below 1000, or at 2000
- * and up.
- */
-
-#define SVGA_3D_CMD_LEGACY_BASE 1000
-#define SVGA_3D_CMD_BASE 1040
-
-#define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0 /* Deprecated */
-#define SVGA_3D_CMD_SURFACE_DESTROY SVGA_3D_CMD_BASE + 1
-#define SVGA_3D_CMD_SURFACE_COPY SVGA_3D_CMD_BASE + 2
-#define SVGA_3D_CMD_SURFACE_STRETCHBLT SVGA_3D_CMD_BASE + 3
-#define SVGA_3D_CMD_SURFACE_DMA SVGA_3D_CMD_BASE + 4
-#define SVGA_3D_CMD_CONTEXT_DEFINE SVGA_3D_CMD_BASE + 5
-#define SVGA_3D_CMD_CONTEXT_DESTROY SVGA_3D_CMD_BASE + 6
-#define SVGA_3D_CMD_SETTRANSFORM SVGA_3D_CMD_BASE + 7
-#define SVGA_3D_CMD_SETZRANGE SVGA_3D_CMD_BASE + 8
-#define SVGA_3D_CMD_SETRENDERSTATE SVGA_3D_CMD_BASE + 9
-#define SVGA_3D_CMD_SETRENDERTARGET SVGA_3D_CMD_BASE + 10
-#define SVGA_3D_CMD_SETTEXTURESTATE SVGA_3D_CMD_BASE + 11
-#define SVGA_3D_CMD_SETMATERIAL SVGA_3D_CMD_BASE + 12
-#define SVGA_3D_CMD_SETLIGHTDATA SVGA_3D_CMD_BASE + 13
-#define SVGA_3D_CMD_SETLIGHTENABLED SVGA_3D_CMD_BASE + 14
-#define SVGA_3D_CMD_SETVIEWPORT SVGA_3D_CMD_BASE + 15
-#define SVGA_3D_CMD_SETCLIPPLANE SVGA_3D_CMD_BASE + 16
-#define SVGA_3D_CMD_CLEAR SVGA_3D_CMD_BASE + 17
-#define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 /* Deprecated */
-#define SVGA_3D_CMD_SHADER_DEFINE SVGA_3D_CMD_BASE + 19
-#define SVGA_3D_CMD_SHADER_DESTROY SVGA_3D_CMD_BASE + 20
-#define SVGA_3D_CMD_SET_SHADER SVGA_3D_CMD_BASE + 21
-#define SVGA_3D_CMD_SET_SHADER_CONST SVGA_3D_CMD_BASE + 22
-#define SVGA_3D_CMD_DRAW_PRIMITIVES SVGA_3D_CMD_BASE + 23
-#define SVGA_3D_CMD_SETSCISSORRECT SVGA_3D_CMD_BASE + 24
-#define SVGA_3D_CMD_BEGIN_QUERY SVGA_3D_CMD_BASE + 25
-#define SVGA_3D_CMD_END_QUERY SVGA_3D_CMD_BASE + 26
-#define SVGA_3D_CMD_WAIT_FOR_QUERY SVGA_3D_CMD_BASE + 27
-#define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 /* Deprecated */
-#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
-#define SVGA_3D_CMD_SURFACE_DEFINE_V2 SVGA_3D_CMD_BASE + 30
-#define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31
-#define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40
-#define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41
-#define SVGA_3D_CMD_SCREEN_DMA 1082
-#define SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE 1083
-#define SVGA_3D_CMD_OPEN_CONTEXT_SURFACE 1084
-
-#define SVGA_3D_CMD_LOGICOPS_BITBLT 1085
-#define SVGA_3D_CMD_LOGICOPS_TRANSBLT 1086
-#define SVGA_3D_CMD_LOGICOPS_STRETCHBLT 1087
-#define SVGA_3D_CMD_LOGICOPS_COLORFILL 1088
-#define SVGA_3D_CMD_LOGICOPS_ALPHABLEND 1089
-#define SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND 1090
-
-#define SVGA_3D_CMD_SET_OTABLE_BASE 1091
-#define SVGA_3D_CMD_READBACK_OTABLE 1092
-
-#define SVGA_3D_CMD_DEFINE_GB_MOB 1093
-#define SVGA_3D_CMD_DESTROY_GB_MOB 1094
-#define SVGA_3D_CMD_REDEFINE_GB_MOB 1095
-#define SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING 1096
-
-#define SVGA_3D_CMD_DEFINE_GB_SURFACE 1097
-#define SVGA_3D_CMD_DESTROY_GB_SURFACE 1098
-#define SVGA_3D_CMD_BIND_GB_SURFACE 1099
-#define SVGA_3D_CMD_COND_BIND_GB_SURFACE 1100
-#define SVGA_3D_CMD_UPDATE_GB_IMAGE 1101
-#define SVGA_3D_CMD_UPDATE_GB_SURFACE 1102
-#define SVGA_3D_CMD_READBACK_GB_IMAGE 1103
-#define SVGA_3D_CMD_READBACK_GB_SURFACE 1104
-#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE 1105
-#define SVGA_3D_CMD_INVALIDATE_GB_SURFACE 1106
-
-#define SVGA_3D_CMD_DEFINE_GB_CONTEXT 1107
-#define SVGA_3D_CMD_DESTROY_GB_CONTEXT 1108
-#define SVGA_3D_CMD_BIND_GB_CONTEXT 1109
-#define SVGA_3D_CMD_READBACK_GB_CONTEXT 1110
-#define SVGA_3D_CMD_INVALIDATE_GB_CONTEXT 1111
-
-#define SVGA_3D_CMD_DEFINE_GB_SHADER 1112
-#define SVGA_3D_CMD_DESTROY_GB_SHADER 1113
-#define SVGA_3D_CMD_BIND_GB_SHADER 1114
-
-#define SVGA_3D_CMD_SET_OTABLE_BASE64 1115
-
-#define SVGA_3D_CMD_BEGIN_GB_QUERY 1116
-#define SVGA_3D_CMD_END_GB_QUERY 1117
-#define SVGA_3D_CMD_WAIT_FOR_GB_QUERY 1118
-
-#define SVGA_3D_CMD_NOP 1119
-
-#define SVGA_3D_CMD_ENABLE_GART 1120
-#define SVGA_3D_CMD_DISABLE_GART 1121
-#define SVGA_3D_CMD_MAP_MOB_INTO_GART 1122
-#define SVGA_3D_CMD_UNMAP_GART_RANGE 1123
-
-#define SVGA_3D_CMD_DEFINE_GB_SCREENTARGET 1124
-#define SVGA_3D_CMD_DESTROY_GB_SCREENTARGET 1125
-#define SVGA_3D_CMD_BIND_GB_SCREENTARGET 1126
-#define SVGA_3D_CMD_UPDATE_GB_SCREENTARGET 1127
-
-#define SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL 1128
-#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129
-
-#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130
-#define SVGA_3D_CMD_GB_SCREEN_DMA 1131
-#define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132
-#define SVGA_3D_CMD_GB_MOB_FENCE 1133
-#define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134
-#define SVGA_3D_CMD_DEFINE_GB_MOB64 1135
-#define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136
-#define SVGA_3D_CMD_NOP_ERROR 1137
-
-#define SVGA_3D_CMD_RESERVED1 1138
-#define SVGA_3D_CMD_RESERVED2 1139
-#define SVGA_3D_CMD_RESERVED3 1140
-#define SVGA_3D_CMD_RESERVED4 1141
-#define SVGA_3D_CMD_RESERVED5 1142
-
-#define SVGA_3D_CMD_MAX 1142
-#define SVGA_3D_CMD_FUTURE_MAX 3000
-
-/*
- * Common substructures used in multiple FIFO commands:
- */
-
-typedef struct {
- union {
- struct {
- uint16 function; /* SVGA3dFogFunction */
- uint8 type; /* SVGA3dFogType */
- uint8 base; /* SVGA3dFogBase */
- };
- uint32 uintValue;
- };
-} SVGA3dFogMode;
-
-/*
- * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
- * is a surface ID as well as face/mipmap indices.
- */
-
-typedef
-struct SVGA3dSurfaceImageId {
- uint32 sid;
- uint32 face;
- uint32 mipmap;
-} SVGA3dSurfaceImageId;
-
-typedef
-struct SVGA3dGuestImage {
- SVGAGuestPtr ptr;
-
- /*
- * A note on interpretation of pitch: This value of pitch is the
- * number of bytes between vertically adjacent image
- * blocks. Normally this is the number of bytes between the first
- * pixel of two adjacent scanlines. With compressed textures,
- * however, this may represent the number of bytes between
- * compression blocks rather than between rows of pixels.
- *
- * XXX: Compressed textures currently must be tightly packed in guest memory.
- *
- * If the image is 1-dimensional, pitch is ignored.
- *
- * If 'pitch' is zero, the SVGA3D device calculates a pitch value
- * assuming each row of blocks is tightly packed.
- */
- uint32 pitch;
-} SVGA3dGuestImage;
-
-
-/*
- * FIFO command format definitions:
- */
-
-/*
- * The data size header following cmdNum for every 3d command
- */
-typedef
-struct {
- uint32 id;
- uint32 size;
-} SVGA3dCmdHeader;
-
-/*
- * A surface is a hierarchy of host VRAM surfaces: 1D, 2D, or 3D, with
- * optional mipmaps and cube faces.
- */
-
-typedef
-struct {
- uint32 width;
- uint32 height;
- uint32 depth;
-} SVGA3dSize;
-
-typedef enum {
- SVGA3D_SURFACE_CUBEMAP = (1 << 0),
- SVGA3D_SURFACE_HINT_STATIC = (1 << 1),
- SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2),
- SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3),
- SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4),
- SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5),
- SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
- SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
- SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
- SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9),
- SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10),
-} SVGA3dSurfaceFlags;
-
-typedef
-struct {
- uint32 numMipLevels;
-} SVGA3dSurfaceFace;
-
-typedef
-struct {
- uint32 sid;
- SVGA3dSurfaceFlags surfaceFlags;
- SVGA3dSurfaceFormat format;
- /*
- * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
- * structures must have the same value of numMipLevels field.
- * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
- * numMipLevels set to 0.
- */
- SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
- /*
- * Followed by an SVGA3dSize structure for each mip level in each face.
- *
- * A note on surface sizes: Sizes are always specified in pixels,
- * even if the true surface size is not a multiple of the minimum
- * block size of the surface's format. For example, a 3x3x1 DXT1
- * compressed texture would actually be stored as a 4x4x1 image in
- * memory.
- */
-} SVGA3dCmdDefineSurface; /* SVGA_3D_CMD_SURFACE_DEFINE */
-
-typedef
-struct {
- uint32 sid;
- SVGA3dSurfaceFlags surfaceFlags;
- SVGA3dSurfaceFormat format;
- /*
- * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
- * structures must have the same value of numMipLevels field.
- * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
- * numMipLevels set to 0.
- */
- SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
- uint32 multisampleCount;
- SVGA3dTextureFilter autogenFilter;
- /*
- * Followed by an SVGA3dSize structure for each mip level in each face.
- *
- * A note on surface sizes: Sizes are always specified in pixels,
- * even if the true surface size is not a multiple of the minimum
- * block size of the surface's format. For example, a 3x3x1 DXT1
- * compressed texture would actually be stored as a 4x4x1 image in
- * memory.
- */
-} SVGA3dCmdDefineSurface_v2; /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
-
-typedef
-struct {
- uint32 sid;
-} SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */
-
-typedef
-struct {
- uint32 cid;
-} SVGA3dCmdDefineContext; /* SVGA_3D_CMD_CONTEXT_DEFINE */
-
-typedef
-struct {
- uint32 cid;
-} SVGA3dCmdDestroyContext; /* SVGA_3D_CMD_CONTEXT_DESTROY */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dClearFlag clearFlag;
- uint32 color;
- float depth;
- uint32 stencil;
- /* Followed by variable number of SVGA3dRect structures */
-} SVGA3dCmdClear; /* SVGA_3D_CMD_CLEAR */
-
-typedef
-struct SVGA3dCopyRect {
- uint32 x;
- uint32 y;
- uint32 w;
- uint32 h;
- uint32 srcx;
- uint32 srcy;
-} SVGA3dCopyRect;
-
-typedef
-struct SVGA3dCopyBox {
- uint32 x;
- uint32 y;
- uint32 z;
- uint32 w;
- uint32 h;
- uint32 d;
- uint32 srcx;
- uint32 srcy;
- uint32 srcz;
-} SVGA3dCopyBox;
-
-typedef
-struct {
- uint32 x;
- uint32 y;
- uint32 w;
- uint32 h;
-} SVGA3dRect;
-
-typedef
-struct {
- uint32 x;
- uint32 y;
- uint32 z;
- uint32 w;
- uint32 h;
- uint32 d;
-} SVGA3dBox;
-
-typedef
-struct {
- uint32 x;
- uint32 y;
- uint32 z;
-} SVGA3dPoint;
-
-typedef
-struct {
- SVGA3dLightType type;
- SVGA3dBool inWorldSpace;
- float diffuse[4];
- float specular[4];
- float ambient[4];
- float position[4];
- float direction[4];
- float range;
- float falloff;
- float attenuation0;
- float attenuation1;
- float attenuation2;
- float theta;
- float phi;
-} SVGA3dLightData;
-
-typedef
-struct {
- uint32 sid;
- /* Followed by variable number of SVGA3dCopyRect structures */
-} SVGA3dCmdPresent; /* SVGA_3D_CMD_PRESENT */
-
-typedef
-struct {
- SVGA3dRenderStateName state;
- union {
- uint32 uintValue;
- float floatValue;
- };
-} SVGA3dRenderState;
-
-typedef
-struct {
- uint32 cid;
- /* Followed by variable number of SVGA3dRenderState structures */
-} SVGA3dCmdSetRenderState; /* SVGA_3D_CMD_SETRENDERSTATE */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dRenderTargetType type;
- SVGA3dSurfaceImageId target;
-} SVGA3dCmdSetRenderTarget; /* SVGA_3D_CMD_SETRENDERTARGET */
-
-typedef
-struct {
- SVGA3dSurfaceImageId src;
- SVGA3dSurfaceImageId dest;
- /* Followed by variable number of SVGA3dCopyBox structures */
-} SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */
-
-typedef
-struct {
- SVGA3dSurfaceImageId src;
- SVGA3dSurfaceImageId dest;
- SVGA3dBox boxSrc;
- SVGA3dBox boxDest;
- SVGA3dStretchBltMode mode;
-} SVGA3dCmdSurfaceStretchBlt; /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
-
-typedef
-struct {
- /*
- * If the discard flag is present in a surface DMA operation, the host may
- * discard the contents of the current mipmap level and face of the target
- * surface before applying the surface DMA contents.
- */
- uint32 discard : 1;
-
- /*
- * If the unsynchronized flag is present, the host may perform this upload
- * without syncing to pending reads on this surface.
- */
- uint32 unsynchronized : 1;
-
- /*
- * Guests *MUST* set the reserved bits to 0 before submitting the command
- * suffix as future flags may occupy these bits.
- */
- uint32 reserved : 30;
-} SVGA3dSurfaceDMAFlags;
-
-typedef
-struct {
- SVGA3dGuestImage guest;
- SVGA3dSurfaceImageId host;
- SVGA3dTransferType transfer;
- /*
- * Followed by variable number of SVGA3dCopyBox structures. For consistency
- * in all clipping logic and coordinate translation, we define the
- * "source" in each copyBox as the guest image and the
- * "destination" as the host image, regardless of transfer
- * direction.
- *
- * For efficiency, the SVGA3D device is free to copy more data than
- * specified. For example, it may round copy boxes outwards such
- * that they lie on particular alignment boundaries.
- */
-} SVGA3dCmdSurfaceDMA; /* SVGA_3D_CMD_SURFACE_DMA */
-
-/*
- * SVGA3dCmdSurfaceDMASuffix --
- *
- * This is a command suffix that will appear after a SurfaceDMA command in
- * the FIFO. It contains some extra information that hosts may use to
- * optimize performance or protect the guest. This suffix exists to preserve
- * backwards compatibility while also allowing for new functionality to be
- * implemented.
- */
-
-typedef
-struct {
- uint32 suffixSize;
-
- /*
- * The maximum offset is used to determine the maximum offset from the
- * guestPtr base address that will be accessed or written to during this
- * surfaceDMA. If the suffix is supported, the host will respect this
- * boundary while performing surface DMAs.
- *
- * Defaults to MAX_UINT32
- */
- uint32 maximumOffset;
-
- /*
- * A set of flags that describes optimizations that the host may perform
- * while performing this surface DMA operation. The guest should never rely
- * on behaviour that is different when these flags are set for correctness.
- *
- * Defaults to 0
- */
- SVGA3dSurfaceDMAFlags flags;
-} SVGA3dCmdSurfaceDMASuffix;
-
-/*
- * SVGA_3D_CMD_DRAW_PRIMITIVES --
- *
- * This command is the SVGA3D device's generic drawing entry point.
- * It can draw multiple ranges of primitives, optionally using an
- * index buffer, using an arbitrary collection of vertex buffers.
- *
- * Each SVGA3dVertexDecl defines a distinct vertex array to bind
- * during this draw call. The declarations specify which surface
- * the vertex data lives in, what that vertex data is used for,
- * and how to interpret it.
- *
- * Each SVGA3dPrimitiveRange defines a collection of primitives
- * to render using the same vertex arrays. An index buffer is
- * optional.
- */
-
-typedef
-struct {
- /*
- * A range hint is an optional specification for the range of indices
- * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
- * that the entire array will be used.
- *
- * These are only hints. The SVGA3D device may use them for
- * performance optimization if possible, but it's also allowed to
- * ignore these values.
- */
- uint32 first;
- uint32 last;
-} SVGA3dArrayRangeHint;
-
-typedef
-struct {
- /*
- * Define the origin and shape of a vertex or index array. Both
- * 'offset' and 'stride' are in bytes. The provided surface will be
- * reinterpreted as a flat array of bytes in the same format used
- * by surface DMA operations. To avoid unnecessary conversions, the
- * surface should be created with the SVGA3D_BUFFER format.
- *
- * Index 0 in the array starts 'offset' bytes into the surface.
- * Index 1 begins at byte 'offset + stride', etc. Array indices may
- * not be negative.
- */
- uint32 surfaceId;
- uint32 offset;
- uint32 stride;
-} SVGA3dArray;
-
-typedef
-struct {
- /*
- * Describe a vertex array's data type, and define how it is to be
- * used by the fixed function pipeline or the vertex shader. It
- * isn't useful to have two VertexDecls with the same
- * VertexArrayIdentity in one draw call.
- */
- SVGA3dDeclType type;
- SVGA3dDeclMethod method;
- SVGA3dDeclUsage usage;
- uint32 usageIndex;
-} SVGA3dVertexArrayIdentity;
-
-typedef
-struct {
- SVGA3dVertexArrayIdentity identity;
- SVGA3dArray array;
- SVGA3dArrayRangeHint rangeHint;
-} SVGA3dVertexDecl;
-
-typedef
-struct {
- /*
- * Define a group of primitives to render, from sequential indices.
- *
- * The value of 'primitiveType' and 'primitiveCount' imply the
- * total number of vertices that will be rendered.
- */
- SVGA3dPrimitiveType primType;
- uint32 primitiveCount;
-
- /*
- * Optional index buffer. If indexArray.surfaceId is
- * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
- * without an index buffer is identical to rendering with an index
- * buffer containing the sequence [0, 1, 2, 3, ...].
- *
- * If an index buffer is in use, indexWidth specifies the width in
- * bytes of each index value. It must be less than or equal to
- * indexArray.stride.
- *
- * (Currently, the SVGA3D device requires index buffers to be tightly
- * packed. In other words, indexWidth == indexArray.stride)
- */
- SVGA3dArray indexArray;
- uint32 indexWidth;
-
- /*
- * Optional index bias. This number is added to all indices from
- * indexArray before they are used as vertex array indices. This
- * can be used in multiple ways:
- *
- * - When not using an indexArray, this bias can be used to
- * specify where in the vertex arrays to begin rendering.
- *
- * - A positive number here is equivalent to increasing the
- * offset in each vertex array.
- *
- * - A negative number can be used to render using a small
- * vertex array and an index buffer that contains large
- * values. This may be used by some applications that
- * crop a vertex buffer without modifying their index
- * buffer.
- *
- * Note that rendering with a negative bias value may be slower and
- * use more memory than rendering with a positive or zero bias.
- */
- int32 indexBias;
-} SVGA3dPrimitiveRange;
-
-typedef
-struct {
- uint32 cid;
- uint32 numVertexDecls;
- uint32 numRanges;
-
- /*
- * There are two variable size arrays after the
- * SVGA3dCmdDrawPrimitives structure. In order,
- * they are:
- *
- * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
- * SVGA3D_MAX_VERTEX_ARRAYS;
- * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
- * SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
- * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
- * the frequency divisor for the corresponding vertex decl).
- */
-} SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */
-
-typedef
-struct {
- uint32 stage;
- SVGA3dTextureStateName name;
- union {
- uint32 value;
- float floatValue;
- };
-} SVGA3dTextureState;
-
-typedef
-struct {
- uint32 cid;
- /* Followed by variable number of SVGA3dTextureState structures */
-} SVGA3dCmdSetTextureState; /* SVGA_3D_CMD_SETTEXTURESTATE */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dTransformType type;
- float matrix[16];
-} SVGA3dCmdSetTransform; /* SVGA_3D_CMD_SETTRANSFORM */
-
-typedef
-struct {
- float min;
- float max;
-} SVGA3dZRange;
-
-typedef
-struct {
- uint32 cid;
- SVGA3dZRange zRange;
-} SVGA3dCmdSetZRange; /* SVGA_3D_CMD_SETZRANGE */
-
-typedef
-struct {
- float diffuse[4];
- float ambient[4];
- float specular[4];
- float emissive[4];
- float shininess;
-} SVGA3dMaterial;
-
-typedef
-struct {
- uint32 cid;
- SVGA3dFace face;
- SVGA3dMaterial material;
-} SVGA3dCmdSetMaterial; /* SVGA_3D_CMD_SETMATERIAL */
-
-typedef
-struct {
- uint32 cid;
- uint32 index;
- SVGA3dLightData data;
-} SVGA3dCmdSetLightData; /* SVGA_3D_CMD_SETLIGHTDATA */
-
-typedef
-struct {
- uint32 cid;
- uint32 index;
- uint32 enabled;
-} SVGA3dCmdSetLightEnabled; /* SVGA_3D_CMD_SETLIGHTENABLED */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dRect rect;
-} SVGA3dCmdSetViewport; /* SVGA_3D_CMD_SETVIEWPORT */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dRect rect;
-} SVGA3dCmdSetScissorRect; /* SVGA_3D_CMD_SETSCISSORRECT */
-
-typedef
-struct {
- uint32 cid;
- uint32 index;
- float plane[4];
-} SVGA3dCmdSetClipPlane; /* SVGA_3D_CMD_SETCLIPPLANE */
-
-typedef
-struct {
- uint32 cid;
- uint32 shid;
- SVGA3dShaderType type;
- /* Followed by variable number of DWORDs for shader bycode */
-} SVGA3dCmdDefineShader; /* SVGA_3D_CMD_SHADER_DEFINE */
-
-typedef
-struct {
- uint32 cid;
- uint32 shid;
- SVGA3dShaderType type;
-} SVGA3dCmdDestroyShader; /* SVGA_3D_CMD_SHADER_DESTROY */
-
-typedef
-struct {
- uint32 cid;
- uint32 reg; /* register number */
- SVGA3dShaderType type;
- SVGA3dShaderConstType ctype;
- uint32 values[4];
-} SVGA3dCmdSetShaderConst; /* SVGA_3D_CMD_SET_SHADER_CONST */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dShaderType type;
- uint32 shid;
-} SVGA3dCmdSetShader; /* SVGA_3D_CMD_SET_SHADER */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dQueryType type;
-} SVGA3dCmdBeginQuery; /* SVGA_3D_CMD_BEGIN_QUERY */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dQueryType type;
- SVGAGuestPtr guestResult; /* Points to an SVGA3dQueryResult structure */
-} SVGA3dCmdEndQuery; /* SVGA_3D_CMD_END_QUERY */
-
-typedef
-struct {
- uint32 cid; /* Same parameters passed to END_QUERY */
- SVGA3dQueryType type;
- SVGAGuestPtr guestResult;
-} SVGA3dCmdWaitForQuery; /* SVGA_3D_CMD_WAIT_FOR_QUERY */
-
-typedef
-struct {
- uint32 totalSize; /* Set by guest before query is ended. */
- SVGA3dQueryState state; /* Set by host or guest. See SVGA3dQueryState. */
- union { /* Set by host on exit from PENDING state */
- uint32 result32;
- };
-} SVGA3dQueryResult;
-
-/*
- * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
- *
- * This is a blit from an SVGA3D surface to a Screen Object. Just
- * like GMR-to-screen blits, this blit may be directed at a
- * specific screen or to the virtual coordinate space.
- *
- * The blit copies from a rectangular region of an SVGA3D surface
- * image to a rectangular region of a screen or screens.
- *
- * This command takes an optional variable-length list of clipping
- * rectangles after the body of the command. If no rectangles are
- * specified, there is no clipping region. The entire destRect is
- * drawn to. If one or more rectangles are included, they describe
- * a clipping region. The clip rectangle coordinates are measured
- * relative to the top-left corner of destRect.
- *
- * This clipping region serves multiple purposes:
- *
- * - It can be used to perform an irregularly shaped blit more
- * efficiently than by issuing many separate blit commands.
- *
- * - It is equivalent to allowing blits with non-integer
- * source coordinates. You could blit just one half-pixel
- * of a source, for example, by specifying a larger
- * destination rectangle than you need, then removing
- * part of it using a clip rectangle.
- *
- * Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT
- *
- * Limitations:
- *
- * - Currently, no backend supports blits from a mipmap or face
- * other than the first one.
- */
-
-typedef
-struct {
- SVGA3dSurfaceImageId srcImage;
- SVGASignedRect srcRect;
- uint32 destScreenId; /* Screen ID or SVGA_ID_INVALID for virt. coords */
- SVGASignedRect destRect; /* Supports scaling if src/rest different size */
- /* Clipping: zero or more SVGASignedRects follow */
-} SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
-
-typedef
-struct {
- uint32 sid;
- SVGA3dTextureFilter filter;
-} SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */
-
-
-/*
- * Guest-backed surface definitions.
- */
-
-typedef uint32 SVGAMobId;
-
-typedef enum SVGAMobFormat {
- SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID,
- SVGA3D_MOBFMT_PTDEPTH_0 = 0,
- SVGA3D_MOBFMT_PTDEPTH_1 = 1,
- SVGA3D_MOBFMT_PTDEPTH_2 = 2,
- SVGA3D_MOBFMT_RANGE = 3,
- SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
- SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
- SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
- SVGA3D_MOBFMT_MAX,
-} SVGAMobFormat;
-
-/*
- * Sizes of opaque types.
- */
-
-#define SVGA3D_OTABLE_MOB_ENTRY_SIZE 16
-#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE 8
-#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE 64
-#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE 16
-#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE 64
-#define SVGA3D_CONTEXT_DATA_SIZE 16384
-
-/*
- * SVGA3dCmdSetOTableBase --
- *
- * This command allows the guest to specify the base PPN of the
- * specified object table.
- */
-
-typedef enum {
- SVGA_OTABLE_MOB = 0,
- SVGA_OTABLE_MIN = 0,
- SVGA_OTABLE_SURFACE = 1,
- SVGA_OTABLE_CONTEXT = 2,
- SVGA_OTABLE_SHADER = 3,
- SVGA_OTABLE_SCREEN_TARGET = 4,
- SVGA_OTABLE_DX9_MAX = 5,
- SVGA_OTABLE_MAX = 8
-} SVGAOTableType;
-
-typedef
-struct {
- SVGAOTableType type;
- PPN baseAddress;
- uint32 sizeInBytes;
- uint32 validSizeInBytes;
- SVGAMobFormat ptDepth;
-} __packed
-SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */
-
-typedef
-struct {
- SVGAOTableType type;
- PPN64 baseAddress;
- uint32 sizeInBytes;
- uint32 validSizeInBytes;
- SVGAMobFormat ptDepth;
-} __packed
-SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
-
-typedef
-struct {
- SVGAOTableType type;
-} __packed
-SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */
-
-/*
- * Define a memory object (Mob) in the OTable.
- */
-
-typedef
-struct SVGA3dCmdDefineGBMob {
- SVGAMobId mobid;
- SVGAMobFormat ptDepth;
- PPN base;
- uint32 sizeInBytes;
-} __packed
-SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
-
-
-/*
- * Destroys an object in the OTable.
- */
-
-typedef
-struct SVGA3dCmdDestroyGBMob {
- SVGAMobId mobid;
-} __packed
-SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
-
-/*
- * Redefine an object in the OTable.
- */
-
-typedef
-struct SVGA3dCmdRedefineGBMob {
- SVGAMobId mobid;
- SVGAMobFormat ptDepth;
- PPN base;
- uint32 sizeInBytes;
-} __packed
-SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */
-
-/*
- * Define a memory object (Mob) in the OTable with a PPN64 base.
- */
-
-typedef
-struct SVGA3dCmdDefineGBMob64 {
- SVGAMobId mobid;
- SVGAMobFormat ptDepth;
- PPN64 base;
- uint32 sizeInBytes;
-} __packed
-SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
-
-/*
- * Redefine an object in the OTable with PPN64 base.
- */
-
-typedef
-struct SVGA3dCmdRedefineGBMob64 {
- SVGAMobId mobid;
- SVGAMobFormat ptDepth;
- PPN64 base;
- uint32 sizeInBytes;
-} __packed
-SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
-
-/*
- * Notification that the page tables have been modified.
- */
-
-typedef
-struct SVGA3dCmdUpdateGBMobMapping {
- SVGAMobId mobid;
-} __packed
-SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
-
-/*
- * Define a guest-backed surface.
- */
-
-typedef
-struct SVGA3dCmdDefineGBSurface {
- uint32 sid;
- SVGA3dSurfaceFlags surfaceFlags;
- SVGA3dSurfaceFormat format;
- uint32 numMipLevels;
- uint32 multisampleCount;
- SVGA3dTextureFilter autogenFilter;
- SVGA3dSize size;
-} __packed
-SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
-
-/*
- * Destroy a guest-backed surface.
- */
-
-typedef
-struct SVGA3dCmdDestroyGBSurface {
- uint32 sid;
-} __packed
-SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
-
-/*
- * Bind a guest-backed surface to an object.
- */
-
-typedef
-struct SVGA3dCmdBindGBSurface {
- uint32 sid;
- SVGAMobId mobid;
-} __packed
-SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
-
-/*
- * Conditionally bind a mob to a guest backed surface if testMobid
- * matches the currently bound mob. Optionally issue a readback on
- * the surface while it is still bound to the old mobid if the mobid
- * is changed by this command.
- */
-
-#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
-
-typedef
-struct{
- uint32 sid;
- SVGAMobId testMobid;
- SVGAMobId mobid;
- uint32 flags;
-} __packed
-SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
-
-/*
- * Update an image in a guest-backed surface.
- * (Inform the device that the guest-contents have been updated.)
- */
-
-typedef
-struct SVGA3dCmdUpdateGBImage {
- SVGA3dSurfaceImageId image;
- SVGA3dBox box;
-} __packed
-SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
-
-/*
- * Update an entire guest-backed surface.
- * (Inform the device that the guest-contents have been updated.)
- */
-
-typedef
-struct SVGA3dCmdUpdateGBSurface {
- uint32 sid;
-} __packed
-SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
-
-/*
- * Readback an image in a guest-backed surface.
- * (Request the device to flush the dirty contents into the guest.)
- */
-
-typedef
-struct SVGA3dCmdReadbackGBImage {
- SVGA3dSurfaceImageId image;
-} __packed
-SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
-
-/*
- * Readback an entire guest-backed surface.
- * (Request the device to flush the dirty contents into the guest.)
- */
-
-typedef
-struct SVGA3dCmdReadbackGBSurface {
- uint32 sid;
-} __packed
-SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
-
-/*
- * Readback a sub rect of an image in a guest-backed surface. After
- * issuing this command the driver is required to issue an update call
- * of the same region before issuing any other commands that reference
- * this surface or rendering is not guaranteed.
- */
-
-typedef
-struct SVGA3dCmdReadbackGBImagePartial {
- SVGA3dSurfaceImageId image;
- SVGA3dBox box;
- uint32 invertBox;
-} __packed
-SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
-
-/*
- * Invalidate an image in a guest-backed surface.
- * (Notify the device that the contents can be lost.)
- */
-
-typedef
-struct SVGA3dCmdInvalidateGBImage {
- SVGA3dSurfaceImageId image;
-} __packed
-SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
-
-/*
- * Invalidate an entire guest-backed surface.
- * (Notify the device that the contents if all images can be lost.)
- */
-
-typedef
-struct SVGA3dCmdInvalidateGBSurface {
- uint32 sid;
-} __packed
-SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
-
-/*
- * Invalidate a sub rect of an image in a guest-backed surface. After
- * issuing this command the driver is required to issue an update call
- * of the same region before issuing any other commands that reference
- * this surface or rendering is not guaranteed.
- */
-
-typedef
-struct SVGA3dCmdInvalidateGBImagePartial {
- SVGA3dSurfaceImageId image;
- SVGA3dBox box;
- uint32 invertBox;
-} __packed
-SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
-
-/*
- * Define a guest-backed context.
- */
-
-typedef
-struct SVGA3dCmdDefineGBContext {
- uint32 cid;
-} __packed
-SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
-
-/*
- * Destroy a guest-backed context.
- */
-
-typedef
-struct SVGA3dCmdDestroyGBContext {
- uint32 cid;
-} __packed
-SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
-
-/*
- * Bind a guest-backed context.
- *
- * validContents should be set to 0 for new contexts,
- * and 1 if this is an old context which is getting paged
- * back on to the device.
- *
- * For new contexts, it is recommended that the driver
- * issue commands to initialize all interesting state
- * prior to rendering.
- */
-
-typedef
-struct SVGA3dCmdBindGBContext {
- uint32 cid;
- SVGAMobId mobid;
- uint32 validContents;
-} __packed
-SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
-
-/*
- * Readback a guest-backed context.
- * (Request that the device flush the contents back into guest memory.)
- */
-
-typedef
-struct SVGA3dCmdReadbackGBContext {
- uint32 cid;
-} __packed
-SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
-
-/*
- * Invalidate a guest-backed context.
- */
-typedef
-struct SVGA3dCmdInvalidateGBContext {
- uint32 cid;
-} __packed
-SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
-
-/*
- * Define a guest-backed shader.
- */
-
-typedef
-struct SVGA3dCmdDefineGBShader {
- uint32 shid;
- SVGA3dShaderType type;
- uint32 sizeInBytes;
-} __packed
-SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
-
-/*
- * Bind a guest-backed shader.
- */
-
-typedef struct SVGA3dCmdBindGBShader {
- uint32 shid;
- SVGAMobId mobid;
- uint32 offsetInBytes;
-} __packed
-SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
-
-/*
- * Destroy a guest-backed shader.
- */
-
-typedef struct SVGA3dCmdDestroyGBShader {
- uint32 shid;
-} __packed
-SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
-
-typedef
-struct {
- uint32 cid;
- uint32 regStart;
- SVGA3dShaderType shaderType;
- SVGA3dShaderConstType constType;
-
- /*
- * Followed by a variable number of shader constants.
- *
- * Note that FLOAT and INT constants are 4-dwords in length, while
- * BOOL constants are 1-dword in length.
- */
-} __packed
-SVGA3dCmdSetGBShaderConstInline;
-/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dQueryType type;
-} __packed
-SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dQueryType type;
- SVGAMobId mobid;
- uint32 offset;
-} __packed
-SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
-
-
-/*
- * SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
- *
- * The semantics of this command are identical to the
- * SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
- * to a Mob instead of a GMR.
- */
-
-typedef
-struct {
- uint32 cid;
- SVGA3dQueryType type;
- SVGAMobId mobid;
- uint32 offset;
-} __packed
-SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
-
-typedef
-struct {
- SVGAMobId mobid;
- uint32 fbOffset;
- uint32 initalized;
-} __packed
-SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */
-
-typedef
-struct {
- SVGAMobId mobid;
- uint32 gartOffset;
-} __packed
-SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
-
-
-typedef
-struct {
- uint32 gartOffset;
- uint32 numPages;
-} __packed
-SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */
-
-
-/*
- * Screen Targets
- */
-#define SVGA_STFLAG_PRIMARY (1 << 0)
-
-typedef
-struct {
- uint32 stid;
- uint32 width;
- uint32 height;
- int32 xRoot;
- int32 yRoot;
- uint32 flags;
-} __packed
-SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
-
-typedef
-struct {
- uint32 stid;
-} __packed
-SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
-
-typedef
-struct {
- uint32 stid;
- SVGA3dSurfaceImageId image;
-} __packed
-SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
-
-typedef
-struct {
- uint32 stid;
- SVGA3dBox box;
-} __packed
-SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
-
-/*
- * Capability query index.
- *
- * Notes:
- *
- * 1. SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
- * fixed-function texture units available. Each of these units
- * work in both FFP and Shader modes, and they support texture
- * transforms and texture coordinates. The host may have additional
- * texture image units that are only usable with shaders.
- *
- * 2. The BUFFER_FORMAT capabilities are deprecated, and they always
- * return TRUE. Even on physical hardware that does not support
- * these formats natively, the SVGA3D device will provide an emulation
- * which should be invisible to the guest OS.
- *
- * In general, the SVGA3D device should support any operation on
- * any surface format, it just may perform some of these
- * operations in software depending on the capabilities of the
- * available physical hardware.
- *
- * XXX: In the future, we will add capabilities that describe in
- * detail what formats are supported in hardware for what kinds
- * of operations.
- */
-
-typedef enum {
- SVGA3D_DEVCAP_3D = 0,
- SVGA3D_DEVCAP_MAX_LIGHTS = 1,
- SVGA3D_DEVCAP_MAX_TEXTURES = 2, /* See note (1) */
- SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3,
- SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4,
- SVGA3D_DEVCAP_VERTEX_SHADER = 5,
- SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6,
- SVGA3D_DEVCAP_FRAGMENT_SHADER = 7,
- SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8,
- SVGA3D_DEVCAP_S23E8_TEXTURES = 9,
- SVGA3D_DEVCAP_S10E5_TEXTURES = 10,
- SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11,
- SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12, /* See note (2) */
- SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13, /* See note (2) */
- SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14, /* See note (2) */
- SVGA3D_DEVCAP_QUERY_TYPES = 15,
- SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16,
- SVGA3D_DEVCAP_MAX_POINT_SIZE = 17,
- SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18,
- SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19,
- SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20,
- SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21,
- SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22,
- SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23,
- SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24,
- SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25,
- SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26,
- SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27,
- SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28,
- SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29,
- SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30,
- SVGA3D_DEVCAP_TEXTURE_OPS = 31,
- SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32,
- SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33,
- SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34,
- SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35,
- SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36,
- SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37,
- SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38,
- SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39,
- SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40,
- SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41,
- SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42,
- SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43,
- SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44,
- SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45,
- SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46,
- SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47,
- SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48,
- SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49,
- SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50,
- SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51,
- SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52,
- SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53,
- SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54,
- SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55,
- SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56,
- SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57,
- SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58,
- SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59,
- SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60,
- SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61,
- SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63,
-
- /*
- * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
- * render targets. This does no include the depth or stencil targets.
- */
- SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64,
-
- SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65,
- SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66,
- SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
- SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
- SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
- SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES = 70,
- SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES = 71,
- SVGA3D_DEVCAP_ALPHATOCOVERAGE = 72,
- SVGA3D_DEVCAP_SUPERSAMPLE = 73,
- SVGA3D_DEVCAP_AUTOGENMIPMAPS = 74,
- SVGA3D_DEVCAP_SURFACEFMT_NV12 = 75,
- SVGA3D_DEVCAP_SURFACEFMT_AYUV = 76,
-
- /*
- * This is the maximum number of SVGA context IDs that the guest
- * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
- */
- SVGA3D_DEVCAP_MAX_CONTEXT_IDS = 77,
-
- /*
- * This is the maximum number of SVGA surface IDs that the guest
- * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
- */
- SVGA3D_DEVCAP_MAX_SURFACE_IDS = 78,
-
- SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 = 79,
- SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 = 80,
- SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT = 81,
-
- SVGA3D_DEVCAP_SURFACEFMT_BC4_UNORM = 82,
- SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83,
-
- /*
- * Deprecated.
- */
- SVGA3D_DEVCAP_VGPU10 = 84,
-
- /*
- * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
- * ored together, one for every type of video decoding supported.
- */
- SVGA3D_DEVCAP_VIDEO_DECODE = 85,
-
- /*
- * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
- * ored together, one for every type of video processing supported.
- */
- SVGA3D_DEVCAP_VIDEO_PROCESS = 86,
-
- SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */
- SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */
- SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */
- SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */
-
- SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91,
-
- /*
- * Does the host support the SVGA logic ops commands?
- */
- SVGA3D_DEVCAP_LOGICOPS = 92,
-
- /*
- * What support does the host have for screen targets?
- *
- * See the SVGA3D_SCREENTARGET_CAP bits below.
- */
- SVGA3D_DEVCAP_SCREENTARGETS = 93,
-
- SVGA3D_DEVCAP_MAX /* This must be the last index. */
-} SVGA3dDevCapIndex;
-
-typedef union {
- Bool b;
- uint32 u;
- int32 i;
- float f;
-} SVGA3dDevCapResult;
-
-typedef enum {
- SVGA3DCAPS_RECORD_UNKNOWN = 0,
- SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
- SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
- SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
-} SVGA3dCapsRecordType;
-
-typedef
-struct SVGA3dCapsRecordHeader {
- uint32 length;
- SVGA3dCapsRecordType type;
-}
-SVGA3dCapsRecordHeader;
-
-typedef
-struct SVGA3dCapsRecord {
- SVGA3dCapsRecordHeader header;
- uint32 data[1];
-}
-SVGA3dCapsRecord;
-
-
-typedef uint32 SVGA3dCapPair[2];
-
-#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
deleted file mode 100644
index ef3385096145..000000000000
--- a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
+++ /dev/null
@@ -1,912 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifdef __KERNEL__
-
-#include <drm/vmwgfx_drm.h>
-#define surf_size_struct struct drm_vmw_size
-
-#else /* __KERNEL__ */
-
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
-#endif /* ARRAY_SIZE */
-
-#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
-#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
-#define min_t(type, x, y) ((x) < (y) ? (x) : (y))
-#define surf_size_struct SVGA3dSize
-#define u32 uint32
-#define u64 uint64_t
-#define U32_MAX ((u32)~0U)
-
-#endif /* __KERNEL__ */
-
-#include "svga3d_reg.h"
-
-/*
- * enum svga3d_block_desc describes the active data channels in a block.
- *
- * There can be at-most four active channels in a block:
- * 1. Red, bump W, luminance and depth are stored in the first channel.
- * 2. Green, bump V and stencil are stored in the second channel.
- * 3. Blue and bump U are stored in the third channel.
- * 4. Alpha and bump Q are stored in the fourth channel.
- *
- * Block channels can be used to store compressed and buffer data:
- * 1. For compressed formats, only the data channel is used and its size
- * is equal to that of a singular block in the compression scheme.
- * 2. For buffer formats, only the data channel is used and its size is
- * exactly one byte in length.
- * 3. In each case the bit depth represent the size of a singular block.
- *
- * Note: Compressed and IEEE formats do not use the bitMask structure.
- */
-
-enum svga3d_block_desc {
- SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
- SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
- data */
- SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
- data */
- SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
- U and V */
- SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
- data */
- SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
- data */
- SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
- channel */
- SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
- data */
- SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
- data */
- SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
- data */
- SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
- data */
- SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
- SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
- channel */
- SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
- data */
- SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
- data */
- SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
- data depending on the
- compression method used */
- SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
- floating point
- representation in
- all channels */
- SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
- data. */
- SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
- SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
- SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
- SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
- SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
- e.g., NV12. */
- SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
- Y, U, V, e.g., YV12. */
-
- SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
- SVGA3DBLOCKDESC_GREEN,
- SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
- SVGA3DBLOCKDESC_BLUE,
- SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
- SVGA3DBLOCKDESC_SRGB,
- SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
- SVGA3DBLOCKDESC_ALPHA,
- SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
- SVGA3DBLOCKDESC_SRGB,
- SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
- SVGA3DBLOCKDESC_V,
- SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
- SVGA3DBLOCKDESC_LUMINANCE,
- SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
- SVGA3DBLOCKDESC_W,
- SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
- SVGA3DBLOCKDESC_ALPHA,
- SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
- SVGA3DBLOCKDESC_V |
- SVGA3DBLOCKDESC_W |
- SVGA3DBLOCKDESC_Q,
- SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
- SVGA3DBLOCKDESC_ALPHA,
- SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
- SVGA3DBLOCKDESC_IEEE_FP,
- SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
- SVGA3DBLOCKDESC_GREEN,
- SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
- SVGA3DBLOCKDESC_BLUE,
- SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
- SVGA3DBLOCKDESC_ALPHA,
- SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
- SVGA3DBLOCKDESC_STENCIL,
- SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
- SVGA3DBLOCKDESC_Y,
- SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
- SVGA3DBLOCKDESC_Y |
- SVGA3DBLOCKDESC_U_VIDEO |
- SVGA3DBLOCKDESC_V_VIDEO,
- SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
- SVGA3DBLOCKDESC_EXP,
- SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
- SVGA3DBLOCKDESC_SRGB,
- SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
- SVGA3DBLOCKDESC_2PLANAR_YUV,
- SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
- SVGA3DBLOCKDESC_3PLANAR_YUV,
-};
-
-/*
- * SVGA3dSurfaceDesc describes the actual pixel data.
- *
- * This structure provides the following information:
- * 1. Block description.
- * 2. Dimensions of a block in the surface.
- * 3. Size of block in bytes.
- * 4. Bit depth of the pixel data.
- * 5. Channel bit depths and masks (if applicable).
- */
-#define SVGA3D_CHANNEL_DEF(type) \
- struct { \
- union { \
- type blue; \
- type u; \
- type uv_video; \
- type u_video; \
- }; \
- union { \
- type green; \
- type v; \
- type stencil; \
- type v_video; \
- }; \
- union { \
- type red; \
- type w; \
- type luminance; \
- type y; \
- type depth; \
- type data; \
- }; \
- union { \
- type alpha; \
- type q; \
- type exp; \
- }; \
- }
-
-struct svga3d_surface_desc {
- enum svga3d_block_desc block_desc;
- surf_size_struct block_size;
- u32 bytes_per_block;
- u32 pitch_bytes_per_block;
-
- struct {
- u32 total;
- SVGA3D_CHANNEL_DEF(uint8);
- } bit_depth;
-
- struct {
- SVGA3D_CHANNEL_DEF(uint8);
- } bit_offset;
-};
-
-static const struct svga3d_surface_desc svga3d_surface_descs[] = {
- {SVGA3DBLOCKDESC_NONE,
- {1, 1, 1}, 0, 0, {0, {{0}, {0}, {0}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_FORMAT_INVALID */
-
- {SVGA3DBLOCKDESC_RGB,
- {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
- {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_X8R8G8B8 */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
- {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_A8R8G8B8 */
-
- {SVGA3DBLOCKDESC_RGB,
- {1, 1, 1}, 2, 2, {16, {{5}, {6}, {5}, {0} } },
- {{{0}, {5}, {11}, {0} } } }, /* SVGA3D_R5G6B5 */
-
- {SVGA3DBLOCKDESC_RGB,
- {1, 1, 1}, 2, 2, {15, {{5}, {5}, {5}, {0} } },
- {{{0}, {5}, {10}, {0} } } }, /* SVGA3D_X1R5G5B5 */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 2, 2, {16, {{5}, {5}, {5}, {1} } },
- {{{0}, {5}, {10}, {15} } } }, /* SVGA3D_A1R5G5B5 */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 2, 2, {16, {{4}, {4}, {4}, {4} } },
- {{{0}, {4}, {8}, {12} } } }, /* SVGA3D_A4R4G4B4 */
-
- {SVGA3DBLOCKDESC_DEPTH,
- {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D32 */
-
- {SVGA3DBLOCKDESC_DEPTH,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D16 */
-
- {SVGA3DBLOCKDESC_DS,
- {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
- {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8 */
-
- {SVGA3DBLOCKDESC_DS,
- {1, 1, 1}, 2, 2, {16, {{0}, {1}, {15}, {0} } },
- {{{0}, {15}, {0}, {0} } } }, /* SVGA3D_Z_D15S1 */
-
- {SVGA3DBLOCKDESC_LUMINANCE,
- {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE8 */
-
- {SVGA3DBLOCKDESC_LA,
- {1, 1, 1}, 1, 1, {8, {{0}, {0}, {4}, {4} } },
- {{{0}, {0}, {0}, {4} } } }, /* SVGA3D_LUMINANCE4_ALPHA4 */
-
- {SVGA3DBLOCKDESC_LUMINANCE,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE16 */
-
- {SVGA3DBLOCKDESC_LA,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
- {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_LUMINANCE8_ALPHA8 */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT1 */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT2 */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT3 */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT4 */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT5 */
-
- {SVGA3DBLOCKDESC_UV,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
- {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_BUMPU8V8 */
-
- {SVGA3DBLOCKDESC_UVL,
- {1, 1, 1}, 2, 2, {16, {{5}, {5}, {6}, {0} } },
- {{{11}, {6}, {0}, {0} } } }, /* SVGA3D_BUMPL6V5U5 */
-
- {SVGA3DBLOCKDESC_UVL,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {0} } },
- {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPX8L8V8U8 */
-
- {SVGA3DBLOCKDESC_UVL,
- {1, 1, 1}, 3, 3, {24, {{8}, {8}, {8}, {0} } },
- {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPL8V8U8 */
-
- {SVGA3DBLOCKDESC_RGBA_FP,
- {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
- {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_ARGB_S10E5 */
-
- {SVGA3DBLOCKDESC_RGBA_FP,
- {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
- {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_ARGB_S23E8 */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
- {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2R10G10B10 */
-
- {SVGA3DBLOCKDESC_UV,
- {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
- {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_V8U8 */
-
- {SVGA3DBLOCKDESC_UVWQ,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
- {{{24}, {16}, {8}, {0} } } }, /* SVGA3D_Q8W8V8U8 */
-
- {SVGA3DBLOCKDESC_UV,
- {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
- {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_CxV8U8 */
-
- {SVGA3DBLOCKDESC_UVL,
- {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
- {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_X8L8V8U8 */
-
- {SVGA3DBLOCKDESC_UVWA,
- {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
- {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2W10V10U10 */
-
- {SVGA3DBLOCKDESC_ALPHA,
- {1, 1, 1}, 1, 1, {8, {{0}, {0}, {0}, {8} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_ALPHA8 */
-
- {SVGA3DBLOCKDESC_R_FP,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S10E5 */
-
- {SVGA3DBLOCKDESC_R_FP,
- {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S23E8 */
-
- {SVGA3DBLOCKDESC_RG_FP,
- {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
- {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_RG_S10E5 */
-
- {SVGA3DBLOCKDESC_RG_FP,
- {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
- {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_RG_S23E8 */
-
- {SVGA3DBLOCKDESC_BUFFER,
- {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BUFFER */
-
- {SVGA3DBLOCKDESC_DEPTH,
- {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
- {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24X8 */
-
- {SVGA3DBLOCKDESC_UV,
- {1, 1, 1}, 4, 4, {32, {{16}, {16}, {0}, {0} } },
- {{{16}, {0}, {0}, {0} } } }, /* SVGA3D_V16U16 */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
- {{{0}, {0}, {16}, {0} } } }, /* SVGA3D_G16R16 */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
- {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_A16B16G16R16 */
-
- {SVGA3DBLOCKDESC_YUV,
- {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
- {{{0}, {0}, {8}, {0} } } }, /* SVGA3D_UYVY */
-
- {SVGA3DBLOCKDESC_YUV,
- {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
- {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_YUY2 */
-
- {SVGA3DBLOCKDESC_NV12,
- {2, 2, 1}, 6, 2, {48, {{0}, {0}, {48}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_NV12 */
-
- {SVGA3DBLOCKDESC_AYUV,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
- {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_AYUV */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
- {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_TYPELESS */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
- {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_UINT */
-
- {SVGA3DBLOCKDESC_UVWQ,
- {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
- {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_SINT */
-
- {SVGA3DBLOCKDESC_RGB,
- {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
- {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_TYPELESS */
-
- {SVGA3DBLOCKDESC_RGB_FP,
- {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
- {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_FLOAT */
-
- {SVGA3DBLOCKDESC_RGB,
- {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
- {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_UINT */
-
- {SVGA3DBLOCKDESC_UVW,
- {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
- {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_SINT */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
- {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_TYPELESS */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
- {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_UINT */
-
- {SVGA3DBLOCKDESC_UVWQ,
- {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
- {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SNORM */
-
- {SVGA3DBLOCKDESC_UVWQ,
- {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
- {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SINT */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
- {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_TYPELESS */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
- {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_UINT */
-
- {SVGA3DBLOCKDESC_UV,
- {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
- {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_SINT */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
- {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G8X24_TYPELESS */
-
- {SVGA3DBLOCKDESC_DS,
- {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
- {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT_S8X24_UINT */
-
- {SVGA3DBLOCKDESC_R_FP,
- {1, 1, 1}, 8, 8, {64, {{0}, {0}, {32}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
-
- {SVGA3DBLOCKDESC_GREEN,
- {1, 1, 1}, 8, 8, {64, {{0}, {8}, {0}, {0} } },
- {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_X32_TYPELESS_G8X24_UINT */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
- {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_TYPELESS */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
- {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_UINT */
-
- {SVGA3DBLOCKDESC_RGB_FP,
- {1, 1, 1}, 4, 4, {32, {{10}, {11}, {11}, {0} } },
- {{{0}, {10}, {21}, {0} } } }, /* SVGA3D_R11G11B10_FLOAT */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
- {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_TYPELESS */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
- {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM */
-
- {SVGA3DBLOCKDESC_RGBA_SRGB,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
- {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM_SRGB */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
- {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UINT */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
- {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_SINT */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
- {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_TYPELESS */
-
- {SVGA3DBLOCKDESC_RG_FP,
- {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
- {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_UINT */
-
- {SVGA3DBLOCKDESC_UV,
- {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
- {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_SINT */
-
- {SVGA3DBLOCKDESC_RED,
- {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_TYPELESS */
-
- {SVGA3DBLOCKDESC_DEPTH,
- {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT */
-
- {SVGA3DBLOCKDESC_RED,
- {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_UINT */
-
- {SVGA3DBLOCKDESC_RED,
- {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_SINT */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
- {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_R24G8_TYPELESS */
-
- {SVGA3DBLOCKDESC_DS,
- {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
- {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_D24_UNORM_S8_UINT */
-
- {SVGA3DBLOCKDESC_RED,
- {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R24_UNORM_X8_TYPELESS */
-
- {SVGA3DBLOCKDESC_GREEN,
- {1, 1, 1}, 4, 4, {32, {{0}, {8}, {0}, {0} } },
- {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_X24_TYPELESS_G8_UINT */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
- {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_TYPELESS */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
- {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UNORM */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
- {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UINT */
-
- {SVGA3DBLOCKDESC_UV,
- {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
- {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_SINT */
-
- {SVGA3DBLOCKDESC_RED,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_TYPELESS */
-
- {SVGA3DBLOCKDESC_RED,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UNORM */
-
- {SVGA3DBLOCKDESC_RED,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UINT */
-
- {SVGA3DBLOCKDESC_U,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SNORM */
-
- {SVGA3DBLOCKDESC_U,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SINT */
-
- {SVGA3DBLOCKDESC_RED,
- {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_TYPELESS */
-
- {SVGA3DBLOCKDESC_RED,
- {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UNORM */
-
- {SVGA3DBLOCKDESC_RED,
- {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UINT */
-
- {SVGA3DBLOCKDESC_U,
- {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SNORM */
-
- {SVGA3DBLOCKDESC_U,
- {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SINT */
-
- {SVGA3DBLOCKDESC_RED,
- {8, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R1_UNORM */
-
- {SVGA3DBLOCKDESC_RGBE,
- {1, 1, 1}, 4, 4, {32, {{9}, {9}, {9}, {5} } },
- {{{18}, {9}, {0}, {27} } } }, /* SVGA3D_R9G9B9E5_SHAREDEXP */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
- {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_B8G8_UNORM */
-
- {SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
- {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_G8R8_G8B8_UNORM */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_TYPELESS */
-
- {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
- {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_UNORM_SRGB */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_TYPELESS */
-
- {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_UNORM_SRGB */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_TYPELESS */
-
- {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_UNORM_SRGB */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_TYPELESS */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_UNORM */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_SNORM */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_TYPELESS */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_UNORM */
-
- {SVGA3DBLOCKDESC_COMPRESSED,
- {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_SNORM */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
- {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
-
- {SVGA3DBLOCKDESC_RGBA,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
- {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_TYPELESS */
-
- {SVGA3DBLOCKDESC_RGBA_SRGB,
- {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
- {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_UNORM_SRGB */
-
- {SVGA3DBLOCKDESC_RGB,
- {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
- {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_TYPELESS */
-
- {SVGA3DBLOCKDESC_RGB_SRGB,
- {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
- {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_UNORM_SRGB */
-
- {SVGA3DBLOCKDESC_DEPTH,
- {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
- {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_DF16 */
-
- {SVGA3DBLOCKDESC_DS,
- {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
- {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_DF24 */
-
- {SVGA3DBLOCKDESC_DS,
- {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
- {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8_INT */
-};
-
-static inline u32 clamped_umul32(u32 a, u32 b)
-{
- u64 tmp = (u64) a*b;
- return (tmp > (u64) U32_MAX) ? U32_MAX : tmp;
-}
-
-static inline const struct svga3d_surface_desc *
-svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
-{
- if (format < ARRAY_SIZE(svga3d_surface_descs))
- return &svga3d_surface_descs[format];
-
- return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * svga3dsurface_get_mip_size --
- *
- * Given a base level size and the mip level, compute the size of
- * the mip level.
- *
- * Results:
- * See above.
- *
- * Side effects:
- * None.
- *
- *----------------------------------------------------------------------
- */
-
-static inline surf_size_struct
-svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
-{
- surf_size_struct size;
-
- size.width = max_t(u32, base_level.width >> mip_level, 1);
- size.height = max_t(u32, base_level.height >> mip_level, 1);
- size.depth = max_t(u32, base_level.depth >> mip_level, 1);
- return size;
-}
-
-static inline void
-svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
- const surf_size_struct *pixel_size,
- surf_size_struct *block_size)
-{
- block_size->width = DIV_ROUND_UP(pixel_size->width,
- desc->block_size.width);
- block_size->height = DIV_ROUND_UP(pixel_size->height,
- desc->block_size.height);
- block_size->depth = DIV_ROUND_UP(pixel_size->depth,
- desc->block_size.depth);
-}
-
-static inline bool
-svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
-{
- return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
-}
-
-static inline u32
-svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
- const surf_size_struct *size)
-{
- u32 pitch;
- surf_size_struct blocks;
-
- svga3dsurface_get_size_in_blocks(desc, size, &blocks);
-
- pitch = blocks.width * desc->pitch_bytes_per_block;
-
- return pitch;
-}
-
-/*
- *-----------------------------------------------------------------------------
- *
- * svga3dsurface_get_image_buffer_size --
- *
- * Return the number of bytes of buffer space required to store
- * one image of a surface, optionally using the specified pitch.
- *
- * If pitch is zero, it is assumed that rows are tightly packed.
- *
- * This function is overflow-safe. If the result would have
- * overflowed, instead we return MAX_UINT32.
- *
- * Results:
- * Byte count.
- *
- * Side effects:
- * None.
- *
- *-----------------------------------------------------------------------------
- */
-
-static inline u32
-svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
- const surf_size_struct *size,
- u32 pitch)
-{
- surf_size_struct image_blocks;
- u32 slice_size, total_size;
-
- svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
-
- if (svga3dsurface_is_planar_surface(desc)) {
- total_size = clamped_umul32(image_blocks.width,
- image_blocks.height);
- total_size = clamped_umul32(total_size, image_blocks.depth);
- total_size = clamped_umul32(total_size, desc->bytes_per_block);
- return total_size;
- }
-
- if (pitch == 0)
- pitch = svga3dsurface_calculate_pitch(desc, size);
-
- slice_size = clamped_umul32(image_blocks.height, pitch);
- total_size = clamped_umul32(slice_size, image_blocks.depth);
-
- return total_size;
-}
-
-static inline u32
-svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
- surf_size_struct base_level_size,
- u32 num_mip_levels,
- bool cubemap)
-{
- const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
- u64 total_size = 0;
- u32 mip;
-
- for (mip = 0; mip < num_mip_levels; mip++) {
- surf_size_struct size =
- svga3dsurface_get_mip_size(base_level_size, mip);
- total_size += svga3dsurface_get_image_buffer_size(desc,
- &size, 0);
- }
-
- if (cubemap)
- total_size *= SVGA3D_MAX_SURFACE_FACES;
-
- return (u32) min_t(u64, total_size, (u64) U32_MAX);
-}
-
-
-/**
- * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
- * in an image (or volume).
- *
- * @width: The image width in pixels.
- * @height: The image height in pixels
- */
-static inline u32
-svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
- u32 width, u32 height,
- u32 x, u32 y, u32 z)
-{
- const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
- const u32 bw = desc->block_size.width, bh = desc->block_size.height;
- const u32 bd = desc->block_size.depth;
- const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
- const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
- const u32 offset = (z / bd * imgstride +
- y / bh * rowstride +
- x / bw * desc->bytes_per_block);
- return offset;
-}
-
-
-static inline u32
-svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
- surf_size_struct baseLevelSize,
- u32 numMipLevels,
- u32 face,
- u32 mip)
-
-{
- u32 offset;
- u32 mipChainBytes;
- u32 mipChainBytesToLevel;
- u32 i;
- const struct svga3d_surface_desc *desc;
- surf_size_struct mipSize;
- u32 bytes;
-
- desc = svga3dsurface_get_desc(format);
-
- mipChainBytes = 0;
- mipChainBytesToLevel = 0;
- for (i = 0; i < numMipLevels; i++) {
- mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
- bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
- mipChainBytes += bytes;
- if (i < mip)
- mipChainBytesToLevel += bytes;
- }
-
- offset = mipChainBytes * face + mipChainBytesToLevel;
-
- return offset;
-}
diff --git a/drivers/gpu/drm/vmwgfx/svga_types.h b/drivers/gpu/drm/vmwgfx/svga_types.h
deleted file mode 100644
index 55836dedcfc2..000000000000
--- a/drivers/gpu/drm/vmwgfx/svga_types.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-/**
- * Silly typedefs for the svga headers. Currently the headers are shared
- * between all components that talk to svga. And as such the headers are
- * are in a completely different style and use weird defines.
- *
- * This file lets all the ugly be prefixed with svga*.
- */
-
-#ifndef _SVGA_TYPES_H_
-#define _SVGA_TYPES_H_
-
-typedef uint16_t uint16;
-typedef uint32_t uint32;
-typedef uint8_t uint8;
-typedef int32_t int32;
-typedef bool Bool;
-
-#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
new file mode 100644
index 000000000000..9c42e96da510
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -0,0 +1,1294 @@
+/**************************************************************************
+ *
+ * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * This file implements the vmwgfx context binding manager,
+ * The sole reason for having to use this code is that vmware guest
+ * backed contexts can be swapped out to their backing mobs by the device
+ * at any time, also swapped in at any time. At swapin time, the device
+ * validates the context bindings to make sure they point to valid resources.
+ * It's this outside-of-drawcall validation (that can happen at any time),
+ * that makes this code necessary.
+ *
+ * We therefore need to kill any context bindings pointing to a resource
+ * when the resource is swapped out. Furthermore, if the vmwgfx driver has
+ * swapped out the context we can't swap it in again to kill bindings because
+ * of backing mob reservation lockdep violations, so as part of
+ * context swapout, also kill all bindings of a context, so that they are
+ * already killed if a resource to which a binding points
+ * needs to be swapped out.
+ *
+ * Note that a resource can be pointed to by bindings from multiple contexts,
+ * Therefore we can't easily protect this data by a per context mutex
+ * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex
+ * to protect all binding manager data.
+ *
+ * Finally, any association between a context and a global resource
+ * (surface, shader or even DX query) is conceptually a context binding that
+ * needs to be tracked by this code.
+ */
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_binding.h"
+#include "device_include/svga3d_reg.h"
+
+#define VMW_BINDING_RT_BIT 0
+#define VMW_BINDING_PS_BIT 1
+#define VMW_BINDING_SO_BIT 2
+#define VMW_BINDING_VB_BIT 3
+#define VMW_BINDING_NUM_BITS 4
+
+#define VMW_BINDING_PS_SR_BIT 0
+
+/**
+ * struct vmw_ctx_binding_state - per context binding state
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @list: linked list of individual active bindings.
+ * @render_targets: Render target bindings.
+ * @texture_units: Texture units bindings.
+ * @ds_view: Depth-stencil view binding.
+ * @so_targets: StreamOutput target bindings.
+ * @vertex_buffers: Vertex buffer bindings.
+ * @index_buffer: Index buffer binding.
+ * @per_shader: Per shader-type bindings.
+ * @dirty: Bitmap tracking per binding-type changes that have not yet
+ * been emitted to the device.
+ * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
+ * have not yet been emitted to the device.
+ * @bind_cmd_buffer: Scratch space used to construct binding commands.
+ * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer
+ * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the
+ * device binding slot of the first command data entry in @bind_cmd_buffer.
+ *
+ * Note that this structure also provides storage space for the individual
+ * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
+ * for individual bindings.
+ *
+ */
+struct vmw_ctx_binding_state {
+ struct vmw_private *dev_priv;
+ struct list_head list;
+ struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
+ struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
+ struct vmw_ctx_bindinfo_view ds_view;
+ struct vmw_ctx_bindinfo_so so_targets[SVGA3D_DX_MAX_SOTARGETS];
+ struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
+ struct vmw_ctx_bindinfo_ib index_buffer;
+ struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE_DX10];
+
+ unsigned long dirty;
+ DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
+
+ u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS];
+ u32 bind_cmd_count;
+ u32 bind_first_slot;
+};
+
+static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+ bool rebind);
+static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
+static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
+ bool rebind);
+static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
+static void vmw_binding_build_asserts(void) __attribute__ ((unused));
+
+typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
+
+/**
+ * struct vmw_binding_info - Per binding type information for the binding
+ * manager
+ *
+ * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo.
+ * @offsets: array[shader_slot] of offsets to the array[slot]
+ * of struct bindings for the binding type.
+ * @scrub_func: Pointer to the scrub function for this binding type.
+ *
+ * Holds static information to help optimize the binding manager and avoid
+ * an excessive amount of switch statements.
+ */
+struct vmw_binding_info {
+ size_t size;
+ const size_t *offsets;
+ vmw_scrub_func scrub_func;
+};
+
+/*
+ * A number of static variables that help determine the scrub func and the
+ * location of the struct vmw_ctx_bindinfo slots for each binding type.
+ */
+static const size_t vmw_binding_shader_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
+ offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
+ offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
+};
+static const size_t vmw_binding_rt_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, render_targets),
+};
+static const size_t vmw_binding_tex_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, texture_units),
+};
+static const size_t vmw_binding_cb_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
+ offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
+ offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
+};
+static const size_t vmw_binding_dx_ds_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, ds_view),
+};
+static const size_t vmw_binding_sr_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
+ offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
+ offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
+};
+static const size_t vmw_binding_so_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, so_targets),
+};
+static const size_t vmw_binding_vb_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, vertex_buffers),
+};
+static const size_t vmw_binding_ib_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, index_buffer),
+};
+
+static const struct vmw_binding_info vmw_binding_infos[] = {
+ [vmw_ctx_binding_shader] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_shader),
+ .offsets = vmw_binding_shader_offsets,
+ .scrub_func = vmw_binding_scrub_shader},
+ [vmw_ctx_binding_rt] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_view),
+ .offsets = vmw_binding_rt_offsets,
+ .scrub_func = vmw_binding_scrub_render_target},
+ [vmw_ctx_binding_tex] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_tex),
+ .offsets = vmw_binding_tex_offsets,
+ .scrub_func = vmw_binding_scrub_texture},
+ [vmw_ctx_binding_cb] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_cb),
+ .offsets = vmw_binding_cb_offsets,
+ .scrub_func = vmw_binding_scrub_cb},
+ [vmw_ctx_binding_dx_shader] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_shader),
+ .offsets = vmw_binding_shader_offsets,
+ .scrub_func = vmw_binding_scrub_dx_shader},
+ [vmw_ctx_binding_dx_rt] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_view),
+ .offsets = vmw_binding_rt_offsets,
+ .scrub_func = vmw_binding_scrub_dx_rt},
+ [vmw_ctx_binding_sr] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_view),
+ .offsets = vmw_binding_sr_offsets,
+ .scrub_func = vmw_binding_scrub_sr},
+ [vmw_ctx_binding_ds] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_view),
+ .offsets = vmw_binding_dx_ds_offsets,
+ .scrub_func = vmw_binding_scrub_dx_rt},
+ [vmw_ctx_binding_so] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_so),
+ .offsets = vmw_binding_so_offsets,
+ .scrub_func = vmw_binding_scrub_so},
+ [vmw_ctx_binding_vb] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_vb),
+ .offsets = vmw_binding_vb_offsets,
+ .scrub_func = vmw_binding_scrub_vb},
+ [vmw_ctx_binding_ib] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_ib),
+ .offsets = vmw_binding_ib_offsets,
+ .scrub_func = vmw_binding_scrub_ib},
+};
+
+/**
+ * vmw_cbs_context - Return a pointer to the context resource of a
+ * context binding state tracker.
+ *
+ * @cbs: The context binding state tracker.
+ *
+ * Provided there are any active bindings, this function will return an
+ * unreferenced pointer to the context resource that owns the context
+ * binding state tracker. If there are no active bindings, this function
+ * will return NULL. Note that the caller must somehow ensure that a reference
+ * is held on the context resource prior to calling this function.
+ */
+static const struct vmw_resource *
+vmw_cbs_context(const struct vmw_ctx_binding_state *cbs)
+{
+ if (list_empty(&cbs->list))
+ return NULL;
+
+ return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo,
+ ctx_list)->ctx;
+}
+
+/**
+ * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location.
+ *
+ * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot.
+ * @bt: The binding type.
+ * @shader_slot: The shader slot of the binding. If none, then set to 0.
+ * @slot: The slot of the binding.
+ */
+static struct vmw_ctx_bindinfo *
+vmw_binding_loc(struct vmw_ctx_binding_state *cbs,
+ enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot)
+{
+ const struct vmw_binding_info *b = &vmw_binding_infos[bt];
+ size_t offset = b->offsets[shader_slot] + b->size*slot;
+
+ return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset);
+}
+
+/**
+ * vmw_binding_drop: Stop tracking a context binding
+ *
+ * @bi: Pointer to binding tracker storage.
+ *
+ * Stops tracking a context binding, and re-initializes its storage.
+ * Typically used when the context binding is replaced with a binding to
+ * another (or the same, for that matter) resource.
+ */
+static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi)
+{
+ list_del(&bi->ctx_list);
+ if (!list_empty(&bi->res_list))
+ list_del(&bi->res_list);
+ bi->ctx = NULL;
+}
+
+/**
+ * vmw_binding_add: Start tracking a context binding
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ * Starts tracking the binding in the context binding
+ * state structure @cbs.
+ */
+void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *bi,
+ u32 shader_slot, u32 slot)
+{
+ struct vmw_ctx_bindinfo *loc =
+ vmw_binding_loc(cbs, bi->bt, shader_slot, slot);
+ const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt];
+
+ if (loc->ctx != NULL)
+ vmw_binding_drop(loc);
+
+ memcpy(loc, bi, b->size);
+ loc->scrubbed = false;
+ list_add(&loc->ctx_list, &cbs->list);
+ INIT_LIST_HEAD(&loc->res_list);
+}
+
+/**
+ * vmw_binding_transfer: Transfer a context binding tracking entry.
+ *
+ * @cbs: Pointer to the persistent context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ */
+static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_binding_state *from,
+ const struct vmw_ctx_bindinfo *bi)
+{
+ size_t offset = (unsigned long)bi - (unsigned long)from;
+ struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *)
+ ((unsigned long) cbs + offset);
+
+ if (loc->ctx != NULL) {
+ WARN_ON(bi->scrubbed);
+
+ vmw_binding_drop(loc);
+ }
+
+ if (bi->res != NULL) {
+ memcpy(loc, bi, vmw_binding_infos[bi->bt].size);
+ list_add_tail(&loc->ctx_list, &cbs->list);
+ list_add_tail(&loc->res_list, &loc->res->binding_head);
+ }
+}
+
+/**
+ * vmw_binding_state_kill - Kill all bindings associated with a
+ * struct vmw_ctx_binding state structure, and re-initialize the structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker. Then re-initializes the whole structure.
+ */
+void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_ctx_bindinfo *entry, *next;
+
+ vmw_binding_state_scrub(cbs);
+ list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
+ vmw_binding_drop(entry);
+}
+
+/**
+ * vmw_binding_state_scrub - Scrub all bindings associated with a
+ * struct vmw_ctx_binding state structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker.
+ */
+void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_ctx_bindinfo *entry;
+
+ list_for_each_entry(entry, &cbs->list, ctx_list) {
+ if (!entry->scrubbed) {
+ (void) vmw_binding_infos[entry->bt].scrub_func
+ (entry, false);
+ entry->scrubbed = true;
+ }
+ }
+
+ (void) vmw_binding_emit_dirty(cbs);
+}
+
+/**
+ * vmw_binding_res_list_kill - Kill all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Kills all bindings associated with a specific resource. Typically
+ * called before the resource is destroyed.
+ */
+void vmw_binding_res_list_kill(struct list_head *head)
+{
+ struct vmw_ctx_bindinfo *entry, *next;
+
+ vmw_binding_res_list_scrub(head);
+ list_for_each_entry_safe(entry, next, head, res_list)
+ vmw_binding_drop(entry);
+}
+
+/**
+ * vmw_binding_res_list_scrub - Scrub all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Scrub all bindings associated with a specific resource. Typically
+ * called before the resource is evicted.
+ */
+void vmw_binding_res_list_scrub(struct list_head *head)
+{
+ struct vmw_ctx_bindinfo *entry;
+
+ list_for_each_entry(entry, head, res_list) {
+ if (!entry->scrubbed) {
+ (void) vmw_binding_infos[entry->bt].scrub_func
+ (entry, false);
+ entry->scrubbed = true;
+ }
+ }
+
+ list_for_each_entry(entry, head, res_list) {
+ struct vmw_ctx_binding_state *cbs =
+ vmw_context_binding_state(entry->ctx);
+
+ (void) vmw_binding_emit_dirty(cbs);
+ }
+}
+
+
+/**
+ * vmw_binding_state_commit - Commit staged binding info
+ *
+ * @ctx: Pointer to context to commit the staged binding info to.
+ * @from: Staged binding info built during execbuf.
+ * @scrubbed: Transfer only scrubbed bindings.
+ *
+ * Transfers binding info from a temporary structure
+ * (typically used by execbuf) to the persistent
+ * structure in the context. This can be done once commands have been
+ * submitted to hardware
+ */
+void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
+ struct vmw_ctx_binding_state *from)
+{
+ struct vmw_ctx_bindinfo *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &from->list, ctx_list) {
+ vmw_binding_transfer(to, from, entry);
+ vmw_binding_drop(entry);
+ }
+}
+
+/**
+ * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context
+ *
+ * @ctx: The context resource
+ *
+ * Walks through the context binding list and rebinds all scrubbed
+ * resources.
+ */
+int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_ctx_bindinfo *entry;
+ int ret;
+
+ list_for_each_entry(entry, &cbs->list, ctx_list) {
+ if (likely(!entry->scrubbed))
+ continue;
+
+ if ((entry->res == NULL || entry->res->id ==
+ SVGA3D_INVALID_ID))
+ continue;
+
+ ret = vmw_binding_infos[entry->bt].scrub_func(entry, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ entry->scrubbed = false;
+ }
+
+ return vmw_binding_emit_dirty(cbs);
+}
+
+/**
+ * vmw_binding_scrub_shader - scrub a shader binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_bindinfo_shader *binding =
+ container_of(bi, typeof(*binding), bi);
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetShader body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_SET_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = bi->ctx->id;
+ cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+ cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_binding_scrub_render_target - scrub a render target binding
+ * from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+ bool rebind)
+{
+ struct vmw_ctx_bindinfo_view *binding =
+ container_of(bi, typeof(*binding), bi);
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetRenderTarget body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for render target "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = bi->ctx->id;
+ cmd->body.type = binding->slot;
+ cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+ cmd->body.target.face = 0;
+ cmd->body.target.mipmap = 0;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_binding_scrub_texture - scrub a texture binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ *
+ * TODO: Possibly complement this function with a function that takes
+ * a list of texture bindings and combines them to a single command.
+ */
+static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
+ bool rebind)
+{
+ struct vmw_ctx_bindinfo_tex *binding =
+ container_of(bi, typeof(*binding), bi);
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ struct {
+ SVGA3dCmdSetTextureState c;
+ SVGA3dTextureState s1;
+ } body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for texture "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.c.cid = bi->ctx->id;
+ cmd->body.s1.stage = binding->texture_stage;
+ cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
+ cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_bindinfo_shader *binding =
+ container_of(bi, typeof(*binding), bi);
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetShader body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for DX shader "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+ cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+ cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_binding_scrub_cb - scrub a constant buffer binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_bindinfo_cb *binding =
+ container_of(bi, typeof(*binding), bi);
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetSingleConstantBuffer body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for DX shader "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.slot = binding->slot;
+ cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+ if (rebind) {
+ cmd->body.offsetInBytes = binding->offset;
+ cmd->body.sizeInBytes = binding->size;
+ cmd->body.sid = bi->res->id;
+ } else {
+ cmd->body.offsetInBytes = 0;
+ cmd->body.sizeInBytes = 0;
+ cmd->body.sid = SVGA3D_INVALID_ID;
+ }
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_collect_view_ids - Build view id data for a view binding command
+ * without checking which bindings actually need to be emitted
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings and builds a buffer of view id data.
+ * Stops at the first non-existing binding in the @bi array.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
+ * contains the command data.
+ */
+static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *bi,
+ u32 max_num)
+{
+ const struct vmw_ctx_bindinfo_view *biv =
+ container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+ unsigned long i;
+
+ cbs->bind_cmd_count = 0;
+ cbs->bind_first_slot = 0;
+
+ for (i = 0; i < max_num; ++i, ++biv) {
+ if (!biv->bi.ctx)
+ break;
+
+ cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
+ ((biv->bi.scrubbed) ?
+ SVGA3D_INVALID_ID : biv->bi.res->id);
+ }
+}
+
+/**
+ * vmw_collect_dirty_view_ids - Build view id data for a view binding command
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @dirty: Bitmap indicating which bindings need to be emitted.
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings that need to be emitted and
+ * builds a buffer of view id data.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot indicates the index of the first emitted
+ * binding, and @cbs->bind_cmd_buffer contains the command data.
+ */
+static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *bi,
+ unsigned long *dirty,
+ u32 max_num)
+{
+ const struct vmw_ctx_bindinfo_view *biv =
+ container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+ unsigned long i, next_bit;
+
+ cbs->bind_cmd_count = 0;
+ i = find_first_bit(dirty, max_num);
+ next_bit = i;
+ cbs->bind_first_slot = i;
+
+ biv += i;
+ for (; i < max_num; ++i, ++biv) {
+ cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
+ ((!biv->bi.ctx || biv->bi.scrubbed) ?
+ SVGA3D_INVALID_ID : biv->bi.res->id);
+
+ if (next_bit == i) {
+ next_bit = find_next_bit(dirty, max_num, i + 1);
+ if (next_bit >= max_num)
+ break;
+ }
+ }
+}
+
+/**
+ * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
+ int shader_slot)
+{
+ const struct vmw_ctx_bindinfo *loc =
+ &cbs->per_shader[shader_slot].shader_res[0].bi;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetShaderResources body;
+ } *cmd;
+ size_t cmd_size, view_id_size;
+ const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+ vmw_collect_dirty_view_ids(cbs, loc,
+ cbs->per_shader[shader_slot].dirty_sr,
+ SVGA3D_DX_MAX_SRVIEWS);
+ if (cbs->bind_cmd_count == 0)
+ return 0;
+
+ view_id_size = cbs->bind_cmd_count*sizeof(uint32);
+ cmd_size = sizeof(*cmd) + view_id_size;
+ cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for DX shader"
+ " resource binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
+ cmd->header.size = sizeof(cmd->body) + view_id_size;
+ cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN;
+ cmd->body.startView = cbs->bind_first_slot;
+
+ memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
+
+ vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
+ cbs->bind_first_slot, cbs->bind_cmd_count);
+
+ return 0;
+}
+
+/**
+ * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
+{
+ const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetRenderTargets body;
+ } *cmd;
+ size_t cmd_size, view_id_size;
+ const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+ vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
+ view_id_size = cbs->bind_cmd_count*sizeof(uint32);
+ cmd_size = sizeof(*cmd) + view_id_size;
+ cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for DX render-target"
+ " binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
+ cmd->header.size = sizeof(cmd->body) + view_id_size;
+
+ if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed)
+ cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id;
+ else
+ cmd->body.depthStencilViewId = SVGA3D_INVALID_ID;
+
+ memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
+
+ vmw_fifo_commit(ctx->dev_priv, cmd_size);
+
+ return 0;
+
+}
+
+/**
+ * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command
+ * without checking which bindings actually need to be emitted
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data.
+ * Stops at the first non-existing binding in the @bi array.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
+ * contains the command data.
+ */
+static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *bi,
+ u32 max_num)
+{
+ const struct vmw_ctx_bindinfo_so *biso =
+ container_of(bi, struct vmw_ctx_bindinfo_so, bi);
+ unsigned long i;
+ SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
+
+ cbs->bind_cmd_count = 0;
+ cbs->bind_first_slot = 0;
+
+ for (i = 0; i < max_num; ++i, ++biso, ++so_buffer,
+ ++cbs->bind_cmd_count) {
+ if (!biso->bi.ctx)
+ break;
+
+ if (!biso->bi.scrubbed) {
+ so_buffer->sid = biso->bi.res->id;
+ so_buffer->offset = biso->offset;
+ so_buffer->sizeInBytes = biso->size;
+ } else {
+ so_buffer->sid = SVGA3D_INVALID_ID;
+ so_buffer->offset = 0;
+ so_buffer->sizeInBytes = 0;
+ }
+ }
+}
+
+/**
+ * vmw_binding_emit_set_so - Issue delayed streamout binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs)
+{
+ const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetSOTargets body;
+ } *cmd;
+ size_t cmd_size, so_target_size;
+ const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+ vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS);
+ if (cbs->bind_cmd_count == 0)
+ return 0;
+
+ so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
+ cmd_size = sizeof(*cmd) + so_target_size;
+ cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for DX SO target"
+ " binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
+ cmd->header.size = sizeof(cmd->body) + so_target_size;
+ memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
+
+ vmw_fifo_commit(ctx->dev_priv, cmd_size);
+
+ return 0;
+
+}
+
+/**
+ * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ */
+static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0];
+ u32 i;
+ int ret;
+
+ for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) {
+ if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty))
+ continue;
+
+ ret = vmw_emit_set_sr(cbs, i);
+ if (ret)
+ break;
+
+ __clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty);
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a
+ * SVGA3dCmdDXSetVertexBuffers command
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @dirty: Bitmap indicating which bindings need to be emitted.
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings that need to be emitted and
+ * builds a buffer of SVGA3dVertexBuffer data.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot indicates the index of the first emitted
+ * binding, and @cbs->bind_cmd_buffer contains the command data.
+ */
+static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *bi,
+ unsigned long *dirty,
+ u32 max_num)
+{
+ const struct vmw_ctx_bindinfo_vb *biv =
+ container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
+ unsigned long i, next_bit;
+ SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer;
+
+ cbs->bind_cmd_count = 0;
+ i = find_first_bit(dirty, max_num);
+ next_bit = i;
+ cbs->bind_first_slot = i;
+
+ biv += i;
+ for (; i < max_num; ++i, ++biv, ++vbs) {
+ if (!biv->bi.ctx || biv->bi.scrubbed) {
+ vbs->sid = SVGA3D_INVALID_ID;
+ vbs->stride = 0;
+ vbs->offset = 0;
+ } else {
+ vbs->sid = biv->bi.res->id;
+ vbs->stride = biv->stride;
+ vbs->offset = biv->offset;
+ }
+ cbs->bind_cmd_count++;
+ if (next_bit == i) {
+ next_bit = find_next_bit(dirty, max_num, i + 1);
+ if (next_bit >= max_num)
+ break;
+ }
+ }
+}
+
+/**
+ * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ */
+static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
+{
+ const struct vmw_ctx_bindinfo *loc =
+ &cbs->vertex_buffers[0].bi;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetVertexBuffers body;
+ } *cmd;
+ size_t cmd_size, set_vb_size;
+ const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+ vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb,
+ SVGA3D_DX_MAX_VERTEXBUFFERS);
+ if (cbs->bind_cmd_count == 0)
+ return 0;
+
+ set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
+ cmd_size = sizeof(*cmd) + set_vb_size;
+ cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for DX vertex buffer"
+ " binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
+ cmd->header.size = sizeof(cmd->body) + set_vb_size;
+ cmd->body.startBuffer = cbs->bind_first_slot;
+
+ memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
+
+ vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ bitmap_clear(cbs->dirty_vb,
+ cbs->bind_first_slot, cbs->bind_cmd_count);
+
+ return 0;
+}
+
+/**
+ * vmw_binding_emit_dirty - Issue delayed binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ * This function issues the delayed binding commands that arise from
+ * previous scrub / unscrub calls. These binding commands are typically
+ * commands that batch a number of bindings and therefore it makes sense
+ * to delay them.
+ */
+static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
+{
+ int ret = 0;
+ unsigned long hit = 0;
+
+ while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit))
+ < VMW_BINDING_NUM_BITS) {
+
+ switch (hit) {
+ case VMW_BINDING_RT_BIT:
+ ret = vmw_emit_set_rt(cbs);
+ break;
+ case VMW_BINDING_PS_BIT:
+ ret = vmw_binding_emit_dirty_ps(cbs);
+ break;
+ case VMW_BINDING_SO_BIT:
+ ret = vmw_emit_set_so(cbs);
+ break;
+ case VMW_BINDING_VB_BIT:
+ ret = vmw_emit_set_vb(cbs);
+ break;
+ default:
+ BUG();
+ }
+ if (ret)
+ return ret;
+
+ __clear_bit(hit, &cbs->dirty);
+ hit++;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_binding_scrub_sr - Schedule a dx shaderresource binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_bindinfo_view *biv =
+ container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+ struct vmw_ctx_binding_state *cbs =
+ vmw_context_binding_state(bi->ctx);
+
+ __set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr);
+ __set_bit(VMW_BINDING_PS_SR_BIT,
+ &cbs->per_shader[biv->shader_slot].dirty);
+ __set_bit(VMW_BINDING_PS_BIT, &cbs->dirty);
+
+ return 0;
+}
+
+/**
+ * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_binding_state *cbs =
+ vmw_context_binding_state(bi->ctx);
+
+ __set_bit(VMW_BINDING_RT_BIT, &cbs->dirty);
+
+ return 0;
+}
+
+/**
+ * vmw_binding_scrub_so - Schedule a dx streamoutput buffer binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_binding_state *cbs =
+ vmw_context_binding_state(bi->ctx);
+
+ __set_bit(VMW_BINDING_SO_BIT, &cbs->dirty);
+
+ return 0;
+}
+
+/**
+ * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_bindinfo_vb *bivb =
+ container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
+ struct vmw_ctx_binding_state *cbs =
+ vmw_context_binding_state(bi->ctx);
+
+ __set_bit(bivb->slot, cbs->dirty_vb);
+ __set_bit(VMW_BINDING_VB_BIT, &cbs->dirty);
+
+ return 0;
+}
+
+/**
+ * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_bindinfo_ib *binding =
+ container_of(bi, typeof(*binding), bi);
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetIndexBuffer body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for DX index buffer "
+ "binding.\n");
+ return -ENOMEM;
+ }
+ cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
+ cmd->header.size = sizeof(cmd->body);
+ if (rebind) {
+ cmd->body.sid = bi->res->id;
+ cmd->body.format = binding->format;
+ cmd->body.offset = binding->offset;
+ } else {
+ cmd->body.sid = SVGA3D_INVALID_ID;
+ cmd->body.format = 0;
+ cmd->body.offset = 0;
+ }
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
+ * memory accounting.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * Returns a pointer to a newly allocated struct or an error pointer on error.
+ */
+struct vmw_ctx_binding_state *
+vmw_binding_state_alloc(struct vmw_private *dev_priv)
+{
+ struct vmw_ctx_binding_state *cbs;
+ int ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
+ false, false);
+ if (ret)
+ return ERR_PTR(ret);
+
+ cbs = vzalloc(sizeof(*cbs));
+ if (!cbs) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
+ return ERR_PTR(-ENOMEM);
+ }
+
+ cbs->dev_priv = dev_priv;
+ INIT_LIST_HEAD(&cbs->list);
+
+ return cbs;
+}
+
+/**
+ * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its
+ * memory accounting info.
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
+ */
+void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_private *dev_priv = cbs->dev_priv;
+
+ vfree(cbs);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
+}
+
+/**
+ * vmw_binding_state_list - Get the binding list of a
+ * struct vmw_ctx_binding_state
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state
+ *
+ * Returns the binding list which can be used to traverse through the bindings
+ * and access the resource information of all bindings.
+ */
+struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
+{
+ return &cbs->list;
+}
+
+/**
+ * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
+ *
+ * Drops all bindings registered in @cbs. No device binding actions are
+ * performed.
+ */
+void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_ctx_bindinfo *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
+ vmw_binding_drop(entry);
+}
+
+/*
+ * This function is unused at run-time, and only used to hold various build
+ * asserts important for code optimization assumptions.
+ */
+static void vmw_binding_build_asserts(void)
+{
+ BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3);
+ BUILD_BUG_ON(SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS > SVGA3D_RT_MAX);
+ BUILD_BUG_ON(sizeof(uint32) != sizeof(u32));
+
+ /*
+ * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various
+ * view id arrays.
+ */
+ BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX);
+ BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS);
+ BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS);
+
+ /*
+ * struct vmw_ctx_binding_state::bind_cmd_buffer is used for
+ * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers
+ */
+ BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) >
+ VMW_MAX_VIEW_BINDINGS*sizeof(u32));
+ BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) >
+ VMW_MAX_VIEW_BINDINGS*sizeof(u32));
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
new file mode 100644
index 000000000000..bf2e77ad5a20
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -0,0 +1,209 @@
+/**************************************************************************
+ *
+ * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef _VMWGFX_BINDING_H_
+#define _VMWGFX_BINDING_H_
+
+#include "device_include/svga3d_reg.h"
+#include <linux/list.h>
+
+#define VMW_MAX_VIEW_BINDINGS 128
+
+struct vmw_private;
+struct vmw_ctx_binding_state;
+
+/*
+ * enum vmw_ctx_binding_type - abstract resource to context binding types
+ */
+enum vmw_ctx_binding_type {
+ vmw_ctx_binding_shader,
+ vmw_ctx_binding_rt,
+ vmw_ctx_binding_tex,
+ vmw_ctx_binding_cb,
+ vmw_ctx_binding_dx_shader,
+ vmw_ctx_binding_dx_rt,
+ vmw_ctx_binding_sr,
+ vmw_ctx_binding_ds,
+ vmw_ctx_binding_so,
+ vmw_ctx_binding_vb,
+ vmw_ctx_binding_ib,
+ vmw_ctx_binding_max
+};
+
+/**
+ * struct vmw_ctx_bindinfo - single binding metadata
+ *
+ * @ctx_list: List head for the context's list of bindings.
+ * @res_list: List head for a resource's list of bindings.
+ * @ctx: Non-refcounted pointer to the context that owns the binding. NULL
+ * indicates no binding present.
+ * @res: Non-refcounted pointer to the resource the binding points to. This
+ * is typically a surface or a view.
+ * @bt: Binding type.
+ * @scrubbed: Whether the binding has been scrubbed from the context.
+ */
+struct vmw_ctx_bindinfo {
+ struct list_head ctx_list;
+ struct list_head res_list;
+ struct vmw_resource *ctx;
+ struct vmw_resource *res;
+ enum vmw_ctx_binding_type bt;
+ bool scrubbed;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_tex - texture stage binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @texture_stage: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_tex {
+ struct vmw_ctx_bindinfo bi;
+ uint32 texture_stage;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_shader - Shader binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_shader {
+ struct vmw_ctx_bindinfo bi;
+ SVGA3dShaderType shader_slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_cb - Constant buffer binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ * @offset: Device data used to reconstruct binding command.
+ * @size: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_cb {
+ struct vmw_ctx_bindinfo bi;
+ SVGA3dShaderType shader_slot;
+ uint32 offset;
+ uint32 size;
+ uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_view - View binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_view {
+ struct vmw_ctx_bindinfo bi;
+ SVGA3dShaderType shader_slot;
+ uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_so - StreamOutput binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @size: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_so {
+ struct vmw_ctx_bindinfo bi;
+ uint32 offset;
+ uint32 size;
+ uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_vb - Vertex buffer binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @stride: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_vb {
+ struct vmw_ctx_bindinfo bi;
+ uint32 offset;
+ uint32 stride;
+ uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_ib - StreamOutput binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @format: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_ib {
+ struct vmw_ctx_bindinfo bi;
+ uint32 offset;
+ uint32 format;
+};
+
+/**
+ * struct vmw_dx_shader_bindings - per shader type context binding state
+ *
+ * @shader: The shader binding for this shader type
+ * @const_buffer: Const buffer bindings for this shader type.
+ * @shader_res: Shader resource view bindings for this shader type.
+ * @dirty_sr: Bitmap tracking individual shader resource bindings changes
+ * that have not yet been emitted to the device.
+ * @dirty: Bitmap tracking per-binding type binding changes that have not
+ * yet been emitted to the device.
+ */
+struct vmw_dx_shader_bindings {
+ struct vmw_ctx_bindinfo_shader shader;
+ struct vmw_ctx_bindinfo_cb const_buffers[SVGA3D_DX_MAX_CONSTBUFFERS];
+ struct vmw_ctx_bindinfo_view shader_res[SVGA3D_DX_MAX_SRVIEWS];
+ DECLARE_BITMAP(dirty_sr, SVGA3D_DX_MAX_SRVIEWS);
+ unsigned long dirty;
+};
+
+extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *ci,
+ u32 shader_slot, u32 slot);
+extern void
+vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
+ struct vmw_ctx_binding_state *from);
+extern void vmw_binding_res_list_kill(struct list_head *head);
+extern void vmw_binding_res_list_scrub(struct list_head *head);
+extern int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
+extern struct vmw_ctx_binding_state *
+vmw_binding_state_alloc(struct vmw_private *dev_priv);
+extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs);
+extern struct list_head *
+vmw_binding_state_list(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs);
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index cff2bf9db9d2..3329f623c8bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -72,6 +72,12 @@ static struct ttm_place mob_placement_flags = {
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
};
+static struct ttm_place mob_ne_placement_flags = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+};
+
struct ttm_placement vmw_vram_placement = {
.num_placement = 1,
.placement = &vram_placement_flags,
@@ -200,6 +206,13 @@ struct ttm_placement vmw_mob_placement = {
.busy_placement = &mob_placement_flags
};
+struct ttm_placement vmw_mob_ne_placement = {
+ .num_placement = 1,
+ .num_busy_placement = 1,
+ .placement = &mob_ne_placement_flags,
+ .busy_placement = &mob_ne_placement_flags
+};
+
struct vmw_ttm_tt {
struct ttm_dma_tt dma_ttm;
struct vmw_private *dev_priv;
@@ -804,9 +817,9 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
/**
* vmw_move_notify - TTM move_notify_callback
*
- * @bo: The TTM buffer object about to move.
- * @mem: The truct ttm_mem_reg indicating to what memory
- * region the move is taking place.
+ * @bo: The TTM buffer object about to move.
+ * @mem: The struct ttm_mem_reg indicating to what memory
+ * region the move is taking place.
*
* Calls move_notify for all subsystems needing it.
* (currently only resources).
@@ -815,13 +828,14 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
vmw_resource_move_notify(bo, mem);
+ vmw_query_move_notify(bo, mem);
}
/**
* vmw_swap_notify - TTM move_notify_callback
*
- * @bo: The TTM buffer object about to be swapped out.
+ * @bo: The TTM buffer object about to be swapped out.
*/
static void vmw_swap_notify(struct ttm_buffer_object *bo)
{
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
new file mode 100644
index 000000000000..5ae8f921da2a
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -0,0 +1,1303 @@
+/**************************************************************************
+ *
+ * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "ttm/ttm_bo_api.h"
+
+/*
+ * Size of inline command buffers. Try to make sure that a page size is a
+ * multiple of the DMA pool allocation size.
+ */
+#define VMW_CMDBUF_INLINE_ALIGN 64
+#define VMW_CMDBUF_INLINE_SIZE \
+ (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
+
+/**
+ * struct vmw_cmdbuf_context - Command buffer context queues
+ *
+ * @submitted: List of command buffers that have been submitted to the
+ * manager but not yet submitted to hardware.
+ * @hw_submitted: List of command buffers submitted to hardware.
+ * @preempted: List of preempted command buffers.
+ * @num_hw_submitted: Number of buffers currently being processed by hardware
+ */
+struct vmw_cmdbuf_context {
+ struct list_head submitted;
+ struct list_head hw_submitted;
+ struct list_head preempted;
+ unsigned num_hw_submitted;
+};
+
+/**
+ * struct vmw_cmdbuf_man: - Command buffer manager
+ *
+ * @cur_mutex: Mutex protecting the command buffer used for incremental small
+ * kernel command submissions, @cur.
+ * @space_mutex: Mutex to protect against starvation when we allocate
+ * main pool buffer space.
+ * @work: A struct work_struct implementeing command buffer error handling.
+ * Immutable.
+ * @dev_priv: Pointer to the device private struct. Immutable.
+ * @ctx: Array of command buffer context queues. The queues and the context
+ * data is protected by @lock.
+ * @error: List of command buffers that have caused device errors.
+ * Protected by @lock.
+ * @mm: Range manager for the command buffer space. Manager allocations and
+ * frees are protected by @lock.
+ * @cmd_space: Buffer object for the command buffer space, unless we were
+ * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
+ * @map_obj: Mapping state for @cmd_space. Immutable.
+ * @map: Pointer to command buffer space. May be a mapped buffer object or
+ * a contigous coherent DMA memory allocation. Immutable.
+ * @cur: Command buffer for small kernel command submissions. Protected by
+ * the @cur_mutex.
+ * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
+ * @default_size: Default size for the @cur command buffer. Immutable.
+ * @max_hw_submitted: Max number of in-flight command buffers the device can
+ * handle. Immutable.
+ * @lock: Spinlock protecting command submission queues.
+ * @header: Pool of DMA memory for device command buffer headers.
+ * Internal protection.
+ * @dheaders: Pool of DMA memory for device command buffer headers with trailing
+ * space for inline data. Internal protection.
+ * @tasklet: Tasklet struct for irq processing. Immutable.
+ * @alloc_queue: Wait queue for processes waiting to allocate command buffer
+ * space.
+ * @idle_queue: Wait queue for processes waiting for command buffer idle.
+ * @irq_on: Whether the process function has requested irq to be turned on.
+ * Protected by @lock.
+ * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
+ * allocation. Immutable.
+ * @has_pool: Has a large pool of DMA memory which allows larger allocations.
+ * Typically this is false only during bootstrap.
+ * @handle: DMA address handle for the command buffer space if @using_mob is
+ * false. Immutable.
+ * @size: The size of the command buffer space. Immutable.
+ */
+struct vmw_cmdbuf_man {
+ struct mutex cur_mutex;
+ struct mutex space_mutex;
+ struct work_struct work;
+ struct vmw_private *dev_priv;
+ struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
+ struct list_head error;
+ struct drm_mm mm;
+ struct ttm_buffer_object *cmd_space;
+ struct ttm_bo_kmap_obj map_obj;
+ u8 *map;
+ struct vmw_cmdbuf_header *cur;
+ size_t cur_pos;
+ size_t default_size;
+ unsigned max_hw_submitted;
+ spinlock_t lock;
+ struct dma_pool *headers;
+ struct dma_pool *dheaders;
+ struct tasklet_struct tasklet;
+ wait_queue_head_t alloc_queue;
+ wait_queue_head_t idle_queue;
+ bool irq_on;
+ bool using_mob;
+ bool has_pool;
+ dma_addr_t handle;
+ size_t size;
+};
+
+/**
+ * struct vmw_cmdbuf_header - Command buffer metadata
+ *
+ * @man: The command buffer manager.
+ * @cb_header: Device command buffer header, allocated from a DMA pool.
+ * @cb_context: The device command buffer context.
+ * @list: List head for attaching to the manager lists.
+ * @node: The range manager node.
+ * @handle. The DMA address of @cb_header. Handed to the device on command
+ * buffer submission.
+ * @cmd: Pointer to the command buffer space of this buffer.
+ * @size: Size of the command buffer space of this buffer.
+ * @reserved: Reserved space of this buffer.
+ * @inline_space: Whether inline command buffer space is used.
+ */
+struct vmw_cmdbuf_header {
+ struct vmw_cmdbuf_man *man;
+ SVGACBHeader *cb_header;
+ SVGACBContext cb_context;
+ struct list_head list;
+ struct drm_mm_node node;
+ dma_addr_t handle;
+ u8 *cmd;
+ size_t size;
+ size_t reserved;
+ bool inline_space;
+};
+
+/**
+ * struct vmw_cmdbuf_dheader - Device command buffer header with inline
+ * command buffer space.
+ *
+ * @cb_header: Device command buffer header.
+ * @cmd: Inline command buffer space.
+ */
+struct vmw_cmdbuf_dheader {
+ SVGACBHeader cb_header;
+ u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
+};
+
+/**
+ * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
+ *
+ * @page_size: Size of requested command buffer space in pages.
+ * @node: Pointer to the range manager node.
+ * @done: True if this allocation has succeeded.
+ */
+struct vmw_cmdbuf_alloc_info {
+ size_t page_size;
+ struct drm_mm_node *node;
+ bool done;
+};
+
+/* Loop over each context in the command buffer manager. */
+#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
+ for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
+ ++(_i), ++(_ctx))
+
+static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
+
+
+/**
+ * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
+ *
+ * @man: The range manager.
+ * @interruptible: Whether to wait interruptible when locking.
+ */
+static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
+{
+ if (interruptible) {
+ if (mutex_lock_interruptible(&man->cur_mutex))
+ return -ERESTARTSYS;
+ } else {
+ mutex_lock(&man->cur_mutex);
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
+ *
+ * @man: The range manager.
+ */
+static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
+{
+ mutex_unlock(&man->cur_mutex);
+}
+
+/**
+ * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
+ * been used for the device context with inline command buffers.
+ * Need not be called locked.
+ *
+ * @header: Pointer to the header to free.
+ */
+static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
+{
+ struct vmw_cmdbuf_dheader *dheader;
+
+ if (WARN_ON_ONCE(!header->inline_space))
+ return;
+
+ dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
+ cb_header);
+ dma_pool_free(header->man->dheaders, dheader, header->handle);
+ kfree(header);
+}
+
+/**
+ * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
+ * associated structures.
+ *
+ * header: Pointer to the header to free.
+ *
+ * For internal use. Must be called with man::lock held.
+ */
+static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
+{
+ struct vmw_cmdbuf_man *man = header->man;
+
+ BUG_ON(!spin_is_locked(&man->lock));
+
+ if (header->inline_space) {
+ vmw_cmdbuf_header_inline_free(header);
+ return;
+ }
+
+ drm_mm_remove_node(&header->node);
+ wake_up_all(&man->alloc_queue);
+ if (header->cb_header)
+ dma_pool_free(man->headers, header->cb_header,
+ header->handle);
+ kfree(header);
+}
+
+/**
+ * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
+ * associated structures.
+ *
+ * @header: Pointer to the header to free.
+ */
+void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
+{
+ struct vmw_cmdbuf_man *man = header->man;
+
+ /* Avoid locking if inline_space */
+ if (header->inline_space) {
+ vmw_cmdbuf_header_inline_free(header);
+ return;
+ }
+ spin_lock_bh(&man->lock);
+ __vmw_cmdbuf_header_free(header);
+ spin_unlock_bh(&man->lock);
+}
+
+
+/**
+ * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
+ *
+ * @header: The header of the buffer to submit.
+ */
+static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
+{
+ struct vmw_cmdbuf_man *man = header->man;
+ u32 val;
+
+ if (sizeof(header->handle) > 4)
+ val = (header->handle >> 32);
+ else
+ val = 0;
+ vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
+
+ val = (header->handle & 0xFFFFFFFFULL);
+ val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
+ vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
+
+ return header->cb_header->status;
+}
+
+/**
+ * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
+ *
+ * @ctx: The command buffer context to initialize
+ */
+static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
+{
+ INIT_LIST_HEAD(&ctx->hw_submitted);
+ INIT_LIST_HEAD(&ctx->submitted);
+ INIT_LIST_HEAD(&ctx->preempted);
+ ctx->num_hw_submitted = 0;
+}
+
+/**
+ * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
+ * context.
+ *
+ * @man: The command buffer manager.
+ * @ctx: The command buffer context.
+ *
+ * Submits command buffers to hardware until there are no more command
+ * buffers to submit or the hardware can't handle more command buffers.
+ */
+static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
+ struct vmw_cmdbuf_context *ctx)
+{
+ while (ctx->num_hw_submitted < man->max_hw_submitted &&
+ !list_empty(&ctx->submitted)) {
+ struct vmw_cmdbuf_header *entry;
+ SVGACBStatus status;
+
+ entry = list_first_entry(&ctx->submitted,
+ struct vmw_cmdbuf_header,
+ list);
+
+ status = vmw_cmdbuf_header_submit(entry);
+
+ /* This should never happen */
+ if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
+ entry->cb_header->status = SVGA_CB_STATUS_NONE;
+ break;
+ }
+
+ list_del(&entry->list);
+ list_add_tail(&entry->list, &ctx->hw_submitted);
+ ctx->num_hw_submitted++;
+ }
+
+}
+
+/**
+ * vmw_cmdbuf_ctx_submit: Process a command buffer context.
+ *
+ * @man: The command buffer manager.
+ * @ctx: The command buffer context.
+ *
+ * Submit command buffers to hardware if possible, and process finished
+ * buffers. Typically freeing them, but on preemption or error take
+ * appropriate action. Wake up waiters if appropriate.
+ */
+static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
+ struct vmw_cmdbuf_context *ctx,
+ int *notempty)
+{
+ struct vmw_cmdbuf_header *entry, *next;
+
+ vmw_cmdbuf_ctx_submit(man, ctx);
+
+ list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
+ SVGACBStatus status = entry->cb_header->status;
+
+ if (status == SVGA_CB_STATUS_NONE)
+ break;
+
+ list_del(&entry->list);
+ wake_up_all(&man->idle_queue);
+ ctx->num_hw_submitted--;
+ switch (status) {
+ case SVGA_CB_STATUS_COMPLETED:
+ __vmw_cmdbuf_header_free(entry);
+ break;
+ case SVGA_CB_STATUS_COMMAND_ERROR:
+ case SVGA_CB_STATUS_CB_HEADER_ERROR:
+ list_add_tail(&entry->list, &man->error);
+ schedule_work(&man->work);
+ break;
+ case SVGA_CB_STATUS_PREEMPTED:
+ list_add(&entry->list, &ctx->preempted);
+ break;
+ default:
+ WARN_ONCE(true, "Undefined command buffer status.\n");
+ __vmw_cmdbuf_header_free(entry);
+ break;
+ }
+ }
+
+ vmw_cmdbuf_ctx_submit(man, ctx);
+ if (!list_empty(&ctx->submitted))
+ (*notempty)++;
+}
+
+/**
+ * vmw_cmdbuf_man_process - Process all command buffer contexts and
+ * switch on and off irqs as appropriate.
+ *
+ * @man: The command buffer manager.
+ *
+ * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
+ * command buffers left that are not submitted to hardware, Make sure
+ * IRQ handling is turned on. Otherwise, make sure it's turned off. This
+ * function may return -EAGAIN to indicate it should be rerun due to
+ * possibly missed IRQs if IRQs has just been turned on.
+ */
+static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
+{
+ int notempty = 0;
+ struct vmw_cmdbuf_context *ctx;
+ int i;
+
+ for_each_cmdbuf_ctx(man, i, ctx)
+ vmw_cmdbuf_ctx_process(man, ctx, &notempty);
+
+ if (man->irq_on && !notempty) {
+ vmw_generic_waiter_remove(man->dev_priv,
+ SVGA_IRQFLAG_COMMAND_BUFFER,
+ &man->dev_priv->cmdbuf_waiters);
+ man->irq_on = false;
+ } else if (!man->irq_on && notempty) {
+ vmw_generic_waiter_add(man->dev_priv,
+ SVGA_IRQFLAG_COMMAND_BUFFER,
+ &man->dev_priv->cmdbuf_waiters);
+ man->irq_on = true;
+
+ /* Rerun in case we just missed an irq. */
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
+ * command buffer context
+ *
+ * @man: The command buffer manager.
+ * @header: The header of the buffer to submit.
+ * @cb_context: The command buffer context to use.
+ *
+ * This function adds @header to the "submitted" queue of the command
+ * buffer context identified by @cb_context. It then calls the command buffer
+ * manager processing to potentially submit the buffer to hardware.
+ * @man->lock needs to be held when calling this function.
+ */
+static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
+ struct vmw_cmdbuf_header *header,
+ SVGACBContext cb_context)
+{
+ if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
+ header->cb_header->dxContext = 0;
+ header->cb_context = cb_context;
+ list_add_tail(&header->list, &man->ctx[cb_context].submitted);
+
+ if (vmw_cmdbuf_man_process(man) == -EAGAIN)
+ vmw_cmdbuf_man_process(man);
+}
+
+/**
+ * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
+ * handler implemented as a tasklet.
+ *
+ * @data: Tasklet closure. A pointer to the command buffer manager cast to
+ * an unsigned long.
+ *
+ * The bottom half (tasklet) of the interrupt handler simply calls into the
+ * command buffer processor to free finished buffers and submit any
+ * queued buffers to hardware.
+ */
+static void vmw_cmdbuf_man_tasklet(unsigned long data)
+{
+ struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
+
+ spin_lock(&man->lock);
+ if (vmw_cmdbuf_man_process(man) == -EAGAIN)
+ (void) vmw_cmdbuf_man_process(man);
+ spin_unlock(&man->lock);
+}
+
+/**
+ * vmw_cmdbuf_work_func - The deferred work function that handles
+ * command buffer errors.
+ *
+ * @work: The work func closure argument.
+ *
+ * Restarting the command buffer context after an error requires process
+ * context, so it is deferred to this work function.
+ */
+static void vmw_cmdbuf_work_func(struct work_struct *work)
+{
+ struct vmw_cmdbuf_man *man =
+ container_of(work, struct vmw_cmdbuf_man, work);
+ struct vmw_cmdbuf_header *entry, *next;
+ bool restart = false;
+
+ spin_lock_bh(&man->lock);
+ list_for_each_entry_safe(entry, next, &man->error, list) {
+ restart = true;
+ DRM_ERROR("Command buffer error.\n");
+
+ list_del(&entry->list);
+ __vmw_cmdbuf_header_free(entry);
+ wake_up_all(&man->idle_queue);
+ }
+ spin_unlock_bh(&man->lock);
+
+ if (restart && vmw_cmdbuf_startstop(man, true))
+ DRM_ERROR("Failed restarting command buffer context 0.\n");
+
+}
+
+/**
+ * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
+ *
+ * @man: The command buffer manager.
+ * @check_preempted: Check also the preempted queue for pending command buffers.
+ *
+ */
+static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
+ bool check_preempted)
+{
+ struct vmw_cmdbuf_context *ctx;
+ bool idle = false;
+ int i;
+
+ spin_lock_bh(&man->lock);
+ vmw_cmdbuf_man_process(man);
+ for_each_cmdbuf_ctx(man, i, ctx) {
+ if (!list_empty(&ctx->submitted) ||
+ !list_empty(&ctx->hw_submitted) ||
+ (check_preempted && !list_empty(&ctx->preempted)))
+ goto out_unlock;
+ }
+
+ idle = list_empty(&man->error);
+
+out_unlock:
+ spin_unlock_bh(&man->lock);
+
+ return idle;
+}
+
+/**
+ * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
+ * command submissions
+ *
+ * @man: The command buffer manager.
+ *
+ * Flushes the current command buffer without allocating a new one. A new one
+ * is automatically allocated when needed. Call with @man->cur_mutex held.
+ */
+static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
+{
+ struct vmw_cmdbuf_header *cur = man->cur;
+
+ WARN_ON(!mutex_is_locked(&man->cur_mutex));
+
+ if (!cur)
+ return;
+
+ spin_lock_bh(&man->lock);
+ if (man->cur_pos == 0) {
+ __vmw_cmdbuf_header_free(cur);
+ goto out_unlock;
+ }
+
+ man->cur->cb_header->length = man->cur_pos;
+ vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
+out_unlock:
+ spin_unlock_bh(&man->lock);
+ man->cur = NULL;
+ man->cur_pos = 0;
+}
+
+/**
+ * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
+ * command submissions
+ *
+ * @man: The command buffer manager.
+ * @interruptible: Whether to sleep interruptible when sleeping.
+ *
+ * Flushes the current command buffer without allocating a new one. A new one
+ * is automatically allocated when needed.
+ */
+int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
+ bool interruptible)
+{
+ int ret = vmw_cmdbuf_cur_lock(man, interruptible);
+
+ if (ret)
+ return ret;
+
+ __vmw_cmdbuf_cur_flush(man);
+ vmw_cmdbuf_cur_unlock(man);
+
+ return 0;
+}
+
+/**
+ * vmw_cmdbuf_idle - Wait for command buffer manager idle.
+ *
+ * @man: The command buffer manager.
+ * @interruptible: Sleep interruptible while waiting.
+ * @timeout: Time out after this many ticks.
+ *
+ * Wait until the command buffer manager has processed all command buffers,
+ * or until a timeout occurs. If a timeout occurs, the function will return
+ * -EBUSY.
+ */
+int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
+ unsigned long timeout)
+{
+ int ret;
+
+ ret = vmw_cmdbuf_cur_flush(man, interruptible);
+ vmw_generic_waiter_add(man->dev_priv,
+ SVGA_IRQFLAG_COMMAND_BUFFER,
+ &man->dev_priv->cmdbuf_waiters);
+
+ if (interruptible) {
+ ret = wait_event_interruptible_timeout
+ (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
+ timeout);
+ } else {
+ ret = wait_event_timeout
+ (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
+ timeout);
+ }
+ vmw_generic_waiter_remove(man->dev_priv,
+ SVGA_IRQFLAG_COMMAND_BUFFER,
+ &man->dev_priv->cmdbuf_waiters);
+ if (ret == 0) {
+ if (!vmw_cmdbuf_man_idle(man, true))
+ ret = -EBUSY;
+ else
+ ret = 0;
+ }
+ if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
+/**
+ * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
+ *
+ * @man: The command buffer manager.
+ * @info: Allocation info. Will hold the size on entry and allocated mm node
+ * on successful return.
+ *
+ * Try to allocate buffer space from the main pool. Returns true if succeeded.
+ * If a fatal error was hit, the error code is returned in @info->ret.
+ */
+static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
+ struct vmw_cmdbuf_alloc_info *info)
+{
+ int ret;
+
+ if (info->done)
+ return true;
+
+ memset(info->node, 0, sizeof(*info->node));
+ spin_lock_bh(&man->lock);
+ ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
+ 0, 0,
+ DRM_MM_SEARCH_DEFAULT,
+ DRM_MM_CREATE_DEFAULT);
+ spin_unlock_bh(&man->lock);
+ info->done = !ret;
+
+ return info->done;
+}
+
+/**
+ * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
+ *
+ * @man: The command buffer manager.
+ * @node: Pointer to pre-allocated range-manager node.
+ * @size: The size of the allocation.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ *
+ * This function allocates buffer space from the main pool, and if there is
+ * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
+ * become available.
+ */
+static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
+ struct drm_mm_node *node,
+ size_t size,
+ bool interruptible)
+{
+ struct vmw_cmdbuf_alloc_info info;
+
+ info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ info.node = node;
+ info.done = false;
+
+ /*
+ * To prevent starvation of large requests, only one allocating call
+ * at a time waiting for space.
+ */
+ if (interruptible) {
+ if (mutex_lock_interruptible(&man->space_mutex))
+ return -ERESTARTSYS;
+ } else {
+ mutex_lock(&man->space_mutex);
+ }
+
+ /* Try to allocate space without waiting. */
+ if (vmw_cmdbuf_try_alloc(man, &info))
+ goto out_unlock;
+
+ vmw_generic_waiter_add(man->dev_priv,
+ SVGA_IRQFLAG_COMMAND_BUFFER,
+ &man->dev_priv->cmdbuf_waiters);
+
+ if (interruptible) {
+ int ret;
+
+ ret = wait_event_interruptible
+ (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
+ if (ret) {
+ vmw_generic_waiter_remove
+ (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
+ &man->dev_priv->cmdbuf_waiters);
+ mutex_unlock(&man->space_mutex);
+ return ret;
+ }
+ } else {
+ wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
+ }
+ vmw_generic_waiter_remove(man->dev_priv,
+ SVGA_IRQFLAG_COMMAND_BUFFER,
+ &man->dev_priv->cmdbuf_waiters);
+
+out_unlock:
+ mutex_unlock(&man->space_mutex);
+
+ return 0;
+}
+
+/**
+ * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
+ * space from the main pool.
+ *
+ * @man: The command buffer manager.
+ * @header: Pointer to the header to set up.
+ * @size: The requested size of the buffer space.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ */
+static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
+ struct vmw_cmdbuf_header *header,
+ size_t size,
+ bool interruptible)
+{
+ SVGACBHeader *cb_hdr;
+ size_t offset;
+ int ret;
+
+ if (!man->has_pool)
+ return -ENOMEM;
+
+ ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);
+
+ if (ret)
+ return ret;
+
+ header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
+ &header->handle);
+ if (!header->cb_header) {
+ ret = -ENOMEM;
+ goto out_no_cb_header;
+ }
+
+ header->size = header->node.size << PAGE_SHIFT;
+ cb_hdr = header->cb_header;
+ offset = header->node.start << PAGE_SHIFT;
+ header->cmd = man->map + offset;
+ memset(cb_hdr, 0, sizeof(*cb_hdr));
+ if (man->using_mob) {
+ cb_hdr->flags = SVGA_CB_FLAG_MOB;
+ cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
+ cb_hdr->ptr.mob.mobOffset = offset;
+ } else {
+ cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
+ }
+
+ return 0;
+
+out_no_cb_header:
+ spin_lock_bh(&man->lock);
+ drm_mm_remove_node(&header->node);
+ spin_unlock_bh(&man->lock);
+
+ return ret;
+}
+
+/**
+ * vmw_cmdbuf_space_inline - Set up a command buffer header with
+ * inline command buffer space.
+ *
+ * @man: The command buffer manager.
+ * @header: Pointer to the header to set up.
+ * @size: The requested size of the buffer space.
+ */
+static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
+ struct vmw_cmdbuf_header *header,
+ int size)
+{
+ struct vmw_cmdbuf_dheader *dheader;
+ SVGACBHeader *cb_hdr;
+
+ if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
+ return -ENOMEM;
+
+ dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL,
+ &header->handle);
+ if (!dheader)
+ return -ENOMEM;
+
+ header->inline_space = true;
+ header->size = VMW_CMDBUF_INLINE_SIZE;
+ cb_hdr = &dheader->cb_header;
+ header->cb_header = cb_hdr;
+ header->cmd = dheader->cmd;
+ memset(dheader, 0, sizeof(*dheader));
+ cb_hdr->status = SVGA_CB_STATUS_NONE;
+ cb_hdr->flags = SVGA_CB_FLAG_NONE;
+ cb_hdr->ptr.pa = (u64)header->handle +
+ (u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
+
+ return 0;
+}
+
+/**
+ * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
+ * command buffer space.
+ *
+ * @man: The command buffer manager.
+ * @size: The requested size of the buffer space.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ * @p_header: points to a header pointer to populate on successful return.
+ *
+ * Returns a pointer to command buffer space if successful. Otherwise
+ * returns an error pointer. The header pointer returned in @p_header should
+ * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
+ */
+void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
+ size_t size, bool interruptible,
+ struct vmw_cmdbuf_header **p_header)
+{
+ struct vmw_cmdbuf_header *header;
+ int ret = 0;
+
+ *p_header = NULL;
+
+ header = kzalloc(sizeof(*header), GFP_KERNEL);
+ if (!header)
+ return ERR_PTR(-ENOMEM);
+
+ if (size <= VMW_CMDBUF_INLINE_SIZE)
+ ret = vmw_cmdbuf_space_inline(man, header, size);
+ else
+ ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
+
+ if (ret) {
+ kfree(header);
+ return ERR_PTR(ret);
+ }
+
+ header->man = man;
+ INIT_LIST_HEAD(&header->list);
+ header->cb_header->status = SVGA_CB_STATUS_NONE;
+ *p_header = header;
+
+ return header->cmd;
+}
+
+/**
+ * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
+ * command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The requested size of the commands.
+ * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ *
+ * Returns a pointer to command buffer space if successful. Otherwise
+ * returns an error pointer.
+ */
+static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
+ size_t size,
+ int ctx_id,
+ bool interruptible)
+{
+ struct vmw_cmdbuf_header *cur;
+ void *ret;
+
+ if (vmw_cmdbuf_cur_lock(man, interruptible))
+ return ERR_PTR(-ERESTARTSYS);
+
+ cur = man->cur;
+ if (cur && (size + man->cur_pos > cur->size ||
+ ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
+ ctx_id != cur->cb_header->dxContext)))
+ __vmw_cmdbuf_cur_flush(man);
+
+ if (!man->cur) {
+ ret = vmw_cmdbuf_alloc(man,
+ max_t(size_t, size, man->default_size),
+ interruptible, &man->cur);
+ if (IS_ERR(ret)) {
+ vmw_cmdbuf_cur_unlock(man);
+ return ret;
+ }
+
+ cur = man->cur;
+ }
+
+ if (ctx_id != SVGA3D_INVALID_ID) {
+ cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
+ cur->cb_header->dxContext = ctx_id;
+ }
+
+ cur->reserved = size;
+
+ return (void *) (man->cur->cmd + man->cur_pos);
+}
+
+/**
+ * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The size of the commands actually written.
+ * @flush: Whether to flush the command buffer immediately.
+ */
+static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
+ size_t size, bool flush)
+{
+ struct vmw_cmdbuf_header *cur = man->cur;
+
+ WARN_ON(!mutex_is_locked(&man->cur_mutex));
+
+ WARN_ON(size > cur->reserved);
+ man->cur_pos += size;
+ if (!size)
+ cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
+ if (flush)
+ __vmw_cmdbuf_cur_flush(man);
+ vmw_cmdbuf_cur_unlock(man);
+}
+
+/**
+ * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The requested size of the commands.
+ * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ * @header: Header of the command buffer. NULL if the current command buffer
+ * should be used.
+ *
+ * Returns a pointer to command buffer space if successful. Otherwise
+ * returns an error pointer.
+ */
+void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
+ int ctx_id, bool interruptible,
+ struct vmw_cmdbuf_header *header)
+{
+ if (!header)
+ return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
+
+ if (size > header->size)
+ return ERR_PTR(-EINVAL);
+
+ if (ctx_id != SVGA3D_INVALID_ID) {
+ header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
+ header->cb_header->dxContext = ctx_id;
+ }
+
+ header->reserved = size;
+ return header->cmd;
+}
+
+/**
+ * vmw_cmdbuf_commit - Commit commands in a command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The size of the commands actually written.
+ * @header: Header of the command buffer. NULL if the current command buffer
+ * should be used.
+ * @flush: Whether to flush the command buffer immediately.
+ */
+void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
+ struct vmw_cmdbuf_header *header, bool flush)
+{
+ if (!header) {
+ vmw_cmdbuf_commit_cur(man, size, flush);
+ return;
+ }
+
+ (void) vmw_cmdbuf_cur_lock(man, false);
+ __vmw_cmdbuf_cur_flush(man);
+ WARN_ON(size > header->reserved);
+ man->cur = header;
+ man->cur_pos = size;
+ if (!size)
+ header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
+ if (flush)
+ __vmw_cmdbuf_cur_flush(man);
+ vmw_cmdbuf_cur_unlock(man);
+}
+
+/**
+ * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
+ *
+ * @man: The command buffer manager.
+ */
+void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
+{
+ if (!man)
+ return;
+
+ tasklet_schedule(&man->tasklet);
+}
+
+/**
+ * vmw_cmdbuf_send_device_command - Send a command through the device context.
+ *
+ * @man: The command buffer manager.
+ * @command: Pointer to the command to send.
+ * @size: Size of the command.
+ *
+ * Synchronously sends a device context command.
+ */
+static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
+ const void *command,
+ size_t size)
+{
+ struct vmw_cmdbuf_header *header;
+ int status;
+ void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
+
+ if (IS_ERR(cmd))
+ return PTR_ERR(cmd);
+
+ memcpy(cmd, command, size);
+ header->cb_header->length = size;
+ header->cb_context = SVGA_CB_CONTEXT_DEVICE;
+ spin_lock_bh(&man->lock);
+ status = vmw_cmdbuf_header_submit(header);
+ spin_unlock_bh(&man->lock);
+ vmw_cmdbuf_header_free(header);
+
+ if (status != SVGA_CB_STATUS_COMPLETED) {
+ DRM_ERROR("Device context command failed with status %d\n",
+ status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cmdbuf_startstop - Send a start / stop command through the device
+ * context.
+ *
+ * @man: The command buffer manager.
+ * @enable: Whether to enable or disable the context.
+ *
+ * Synchronously sends a device start / stop context command.
+ */
+static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
+ bool enable)
+{
+ struct {
+ uint32 id;
+ SVGADCCmdStartStop body;
+ } __packed cmd;
+
+ cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
+ cmd.body.enable = (enable) ? 1 : 0;
+ cmd.body.context = SVGA_CB_CONTEXT_0;
+
+ return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
+}
+
+/**
+ * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
+ *
+ * @man: The command buffer manager.
+ * @size: The size of the main space pool.
+ * @default_size: The default size of the command buffer for small kernel
+ * submissions.
+ *
+ * Set the size and allocate the main command buffer space pool,
+ * as well as the default size of the command buffer for
+ * small kernel submissions. If successful, this enables large command
+ * submissions. Note that this function requires that rudimentary command
+ * submission is already available and that the MOB memory manager is alive.
+ * Returns 0 on success. Negative error code on failure.
+ */
+int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
+ size_t size, size_t default_size)
+{
+ struct vmw_private *dev_priv = man->dev_priv;
+ bool dummy;
+ int ret;
+
+ if (man->has_pool)
+ return -EINVAL;
+
+ /* First, try to allocate a huge chunk of DMA memory */
+ size = PAGE_ALIGN(size);
+ man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
+ &man->handle, GFP_KERNEL);
+ if (man->map) {
+ man->using_mob = false;
+ } else {
+ /*
+ * DMA memory failed. If we can have command buffers in a
+ * MOB, try to use that instead. Note that this will
+ * actually call into the already enabled manager, when
+ * binding the MOB.
+ */
+ if (!(dev_priv->capabilities & SVGA_CAP_DX))
+ return -ENOMEM;
+
+ ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
+ &vmw_mob_ne_placement, 0, false, NULL,
+ &man->cmd_space);
+ if (ret)
+ return ret;
+
+ man->using_mob = true;
+ ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
+ &man->map_obj);
+ if (ret)
+ goto out_no_map;
+
+ man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
+ }
+
+ man->size = size;
+ drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
+
+ man->has_pool = true;
+ man->default_size = default_size;
+ DRM_INFO("Using command buffers with %s pool.\n",
+ (man->using_mob) ? "MOB" : "DMA");
+
+ return 0;
+
+out_no_map:
+ if (man->using_mob)
+ ttm_bo_unref(&man->cmd_space);
+
+ return ret;
+}
+
+/**
+ * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
+ * inline command buffer submissions only.
+ *
+ * @dev_priv: Pointer to device private structure.
+ *
+ * Returns a pointer to a cummand buffer manager to success or error pointer
+ * on failure. The command buffer manager will be enabled for submissions of
+ * size VMW_CMDBUF_INLINE_SIZE only.
+ */
+struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
+{
+ struct vmw_cmdbuf_man *man;
+ struct vmw_cmdbuf_context *ctx;
+ int i;
+ int ret;
+
+ if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
+ return ERR_PTR(-ENOSYS);
+
+ man = kzalloc(sizeof(*man), GFP_KERNEL);
+ if (!man)
+ return ERR_PTR(-ENOMEM);
+
+ man->headers = dma_pool_create("vmwgfx cmdbuf",
+ &dev_priv->dev->pdev->dev,
+ sizeof(SVGACBHeader),
+ 64, PAGE_SIZE);
+ if (!man->headers) {
+ ret = -ENOMEM;
+ goto out_no_pool;
+ }
+
+ man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
+ &dev_priv->dev->pdev->dev,
+ sizeof(struct vmw_cmdbuf_dheader),
+ 64, PAGE_SIZE);
+ if (!man->dheaders) {
+ ret = -ENOMEM;
+ goto out_no_dpool;
+ }
+
+ for_each_cmdbuf_ctx(man, i, ctx)
+ vmw_cmdbuf_ctx_init(ctx);
+
+ INIT_LIST_HEAD(&man->error);
+ spin_lock_init(&man->lock);
+ mutex_init(&man->cur_mutex);
+ mutex_init(&man->space_mutex);
+ tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
+ (unsigned long) man);
+ man->default_size = VMW_CMDBUF_INLINE_SIZE;
+ init_waitqueue_head(&man->alloc_queue);
+ init_waitqueue_head(&man->idle_queue);
+ man->dev_priv = dev_priv;
+ man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
+ INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
+ vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
+ &dev_priv->error_waiters);
+ ret = vmw_cmdbuf_startstop(man, true);
+ if (ret) {
+ DRM_ERROR("Failed starting command buffer context 0.\n");
+ vmw_cmdbuf_man_destroy(man);
+ return ERR_PTR(ret);
+ }
+
+ return man;
+
+out_no_dpool:
+ dma_pool_destroy(man->headers);
+out_no_pool:
+ kfree(man);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
+ *
+ * @man: Pointer to a command buffer manager.
+ *
+ * This function removes the main buffer space pool, and should be called
+ * before MOB memory management is removed. When this function has been called,
+ * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
+ * less are allowed, and the default size of the command buffer for small kernel
+ * submissions is also set to this size.
+ */
+void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
+{
+ if (!man->has_pool)
+ return;
+
+ man->has_pool = false;
+ man->default_size = VMW_CMDBUF_INLINE_SIZE;
+ (void) vmw_cmdbuf_idle(man, false, 10*HZ);
+ if (man->using_mob) {
+ (void) ttm_bo_kunmap(&man->map_obj);
+ ttm_bo_unref(&man->cmd_space);
+ } else {
+ dma_free_coherent(&man->dev_priv->dev->pdev->dev,
+ man->size, man->map, man->handle);
+ }
+}
+
+/**
+ * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
+ *
+ * @man: Pointer to a command buffer manager.
+ *
+ * This function idles and then destroys a command buffer manager.
+ */
+void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
+{
+ WARN_ON_ONCE(man->has_pool);
+ (void) vmw_cmdbuf_idle(man, false, 10*HZ);
+ if (vmw_cmdbuf_startstop(man, false))
+ DRM_ERROR("Failed stopping command buffer context 0.\n");
+
+ vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
+ &man->dev_priv->error_waiters);
+ tasklet_kill(&man->tasklet);
+ (void) cancel_work_sync(&man->work);
+ dma_pool_destroy(man->dheaders);
+ dma_pool_destroy(man->headers);
+ mutex_destroy(&man->cur_mutex);
+ mutex_destroy(&man->space_mutex);
+ kfree(man);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 21e9b7f8dad0..13db8a2851ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,15 +26,10 @@
**************************************************************************/
#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
#define VMW_CMDBUF_RES_MAN_HT_ORDER 12
-enum vmw_cmdbuf_res_state {
- VMW_CMDBUF_RES_COMMITED,
- VMW_CMDBUF_RES_ADD,
- VMW_CMDBUF_RES_DEL
-};
-
/**
* struct vmw_cmdbuf_res - Command buffer managed resource entry.
*
@@ -132,9 +127,12 @@ void vmw_cmdbuf_res_commit(struct list_head *list)
list_for_each_entry_safe(entry, next, list, head) {
list_del(&entry->head);
+ if (entry->res->func->commit_notify)
+ entry->res->func->commit_notify(entry->res,
+ entry->state);
switch (entry->state) {
case VMW_CMDBUF_RES_ADD:
- entry->state = VMW_CMDBUF_RES_COMMITED;
+ entry->state = VMW_CMDBUF_RES_COMMITTED;
list_add_tail(&entry->head, &entry->man->list);
break;
case VMW_CMDBUF_RES_DEL:
@@ -175,7 +173,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list)
&entry->hash);
list_del(&entry->head);
list_add_tail(&entry->head, &entry->man->list);
- entry->state = VMW_CMDBUF_RES_COMMITED;
+ entry->state = VMW_CMDBUF_RES_COMMITTED;
break;
default:
BUG();
@@ -231,6 +229,9 @@ out_invalid_key:
* @res_type: The resource type.
* @user_key: The user-space id of the resource.
* @list: The staging list.
+ * @res_p: If the resource is in an already committed state, points to the
+ * struct vmw_resource on successful return. The pointer will be
+ * non ref-counted.
*
* This function looks up the struct vmw_cmdbuf_res entry from the manager
* hash table and, if it exists, removes it. Depending on its current staging
@@ -240,7 +241,8 @@ out_invalid_key:
int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
u32 user_key,
- struct list_head *list)
+ struct list_head *list,
+ struct vmw_resource **res_p)
{
struct vmw_cmdbuf_res *entry;
struct drm_hash_item *hash;
@@ -256,12 +258,14 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
switch (entry->state) {
case VMW_CMDBUF_RES_ADD:
vmw_cmdbuf_res_free(man, entry);
+ *res_p = NULL;
break;
- case VMW_CMDBUF_RES_COMMITED:
+ case VMW_CMDBUF_RES_COMMITTED:
(void) drm_ht_remove_item(&man->resources, &entry->hash);
list_del(&entry->head);
entry->state = VMW_CMDBUF_RES_DEL;
list_add_tail(&entry->head, list);
+ *res_p = entry->res;
break;
default:
BUG();
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 5ac92874404d..443d1ed00de7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,19 +27,19 @@
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
#include "ttm/ttm_placement.h"
struct vmw_user_context {
struct ttm_base_object base;
struct vmw_resource res;
- struct vmw_ctx_binding_state cbs;
+ struct vmw_ctx_binding_state *cbs;
struct vmw_cmdbuf_res_manager *man;
+ struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
+ spinlock_t cotable_lock;
+ struct vmw_dma_buffer *dx_query_mob;
};
-
-
-typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
-
static void vmw_user_context_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_context_base_to_res(struct ttm_base_object *base);
@@ -51,12 +51,14 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_context_destroy(struct vmw_resource *res);
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
- bool rebind);
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
-static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
-static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
+static int vmw_dx_context_create(struct vmw_resource *res);
+static int vmw_dx_context_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_dx_context_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_dx_context_destroy(struct vmw_resource *res);
+
static uint64_t vmw_user_context_size;
static const struct vmw_user_resource_conv user_context_conv = {
@@ -93,15 +95,38 @@ static const struct vmw_res_func vmw_gb_context_func = {
.unbind = vmw_gb_context_unbind
};
-static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
- [vmw_ctx_binding_shader] = vmw_context_scrub_shader,
- [vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
- [vmw_ctx_binding_tex] = vmw_context_scrub_texture };
+static const struct vmw_res_func vmw_dx_context_func = {
+ .res_type = vmw_res_dx_context,
+ .needs_backup = true,
+ .may_evict = true,
+ .type_name = "dx contexts",
+ .backup_placement = &vmw_mob_placement,
+ .create = vmw_dx_context_create,
+ .destroy = vmw_dx_context_destroy,
+ .bind = vmw_dx_context_bind,
+ .unbind = vmw_dx_context_unbind
+};
/**
* Context management:
*/
+static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
+{
+ struct vmw_resource *res;
+ int i;
+
+ for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ spin_lock(&uctx->cotable_lock);
+ res = uctx->cotables[i];
+ uctx->cotables[i] = NULL;
+ spin_unlock(&uctx->cotable_lock);
+
+ if (res)
+ vmw_resource_unreference(&res);
+ }
+}
+
static void vmw_hw_context_destroy(struct vmw_resource *res)
{
struct vmw_user_context *uctx =
@@ -113,17 +138,19 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
} *cmd;
- if (res->func->destroy == vmw_gb_context_destroy) {
+ if (res->func->destroy == vmw_gb_context_destroy ||
+ res->func->destroy == vmw_dx_context_destroy) {
mutex_lock(&dev_priv->cmdbuf_mutex);
vmw_cmdbuf_res_man_destroy(uctx->man);
mutex_lock(&dev_priv->binding_mutex);
- (void) vmw_context_binding_state_kill(&uctx->cbs);
- (void) vmw_gb_context_destroy(res);
+ vmw_binding_state_kill(uctx->cbs);
+ (void) res->func->destroy(res);
mutex_unlock(&dev_priv->binding_mutex);
if (dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid)
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
+ vmw_context_cotables_unref(uctx);
return;
}
@@ -135,43 +162,67 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
return;
}
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.cid = cpu_to_le32(res->id);
+ cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
}
static int vmw_gb_context_init(struct vmw_private *dev_priv,
+ bool dx,
struct vmw_resource *res,
- void (*res_free) (struct vmw_resource *res))
+ void (*res_free)(struct vmw_resource *res))
{
- int ret;
+ int ret, i;
struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res);
+ res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
+ SVGA3D_CONTEXT_DATA_SIZE);
ret = vmw_resource_init(dev_priv, res, true,
- res_free, &vmw_gb_context_func);
- res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
+ res_free,
+ dx ? &vmw_dx_context_func :
+ &vmw_gb_context_func);
if (unlikely(ret != 0))
goto out_err;
if (dev_priv->has_mob) {
uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
- if (unlikely(IS_ERR(uctx->man))) {
+ if (IS_ERR(uctx->man)) {
ret = PTR_ERR(uctx->man);
uctx->man = NULL;
goto out_err;
}
}
- memset(&uctx->cbs, 0, sizeof(uctx->cbs));
- INIT_LIST_HEAD(&uctx->cbs.list);
+ uctx->cbs = vmw_binding_state_alloc(dev_priv);
+ if (IS_ERR(uctx->cbs)) {
+ ret = PTR_ERR(uctx->cbs);
+ goto out_err;
+ }
+
+ spin_lock_init(&uctx->cotable_lock);
+
+ if (dx) {
+ for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
+ &uctx->res, i);
+ if (unlikely(uctx->cotables[i] == NULL)) {
+ ret = -ENOMEM;
+ goto out_cotables;
+ }
+ }
+ }
+
+
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
+out_cotables:
+ vmw_context_cotables_unref(uctx);
out_err:
if (res_free)
res_free(res);
@@ -182,7 +233,8 @@ out_err:
static int vmw_context_init(struct vmw_private *dev_priv,
struct vmw_resource *res,
- void (*res_free) (struct vmw_resource *res))
+ void (*res_free)(struct vmw_resource *res),
+ bool dx)
{
int ret;
@@ -192,7 +244,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
} *cmd;
if (dev_priv->has_mob)
- return vmw_gb_context_init(dev_priv, res, res_free);
+ return vmw_gb_context_init(dev_priv, dx, res, res_free);
ret = vmw_resource_init(dev_priv, res, false,
res_free, &vmw_legacy_context_func);
@@ -215,12 +267,12 @@ static int vmw_context_init(struct vmw_private *dev_priv,
return -ENOMEM;
}
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.cid = cpu_to_le32(res->id);
+ cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
- (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_fifo_resource_inc(dev_priv);
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
@@ -232,19 +284,10 @@ out_early:
return ret;
}
-struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
-{
- struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
- int ret;
-
- if (unlikely(res == NULL))
- return NULL;
-
- ret = vmw_context_init(dev_priv, res, NULL);
-
- return (ret == 0) ? res : NULL;
-}
+/*
+ * GB context.
+ */
static int vmw_gb_context_create(struct vmw_resource *res)
{
@@ -281,7 +324,7 @@ static int vmw_gb_context_create(struct vmw_resource *res)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
- (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_fifo_resource_inc(dev_priv);
return 0;
@@ -309,7 +352,6 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
"binding.\n");
return -ENOMEM;
}
-
cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
@@ -346,7 +388,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
- vmw_context_binding_state_scrub(&uctx->cbs);
+ vmw_binding_state_scrub(uctx->cbs);
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
@@ -414,7 +456,231 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
if (dev_priv->query_cid == res->id)
dev_priv->query_cid_valid = false;
vmw_resource_release_id(res);
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
+
+ return 0;
+}
+
+/*
+ * DX context.
+ */
+
+static int vmw_dx_context_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ int ret;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDefineContext body;
+ } *cmd;
+
+ if (likely(res->id != -1))
+ return 0;
+
+ ret = vmw_resource_alloc_id(res);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a context id.\n");
+ goto out_no_id;
+ }
+
+ if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
+ ret = -EBUSY;
+ goto out_no_fifo;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "creation.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_fifo_resource_inc(dev_priv);
+
+ return 0;
+
+out_no_fifo:
+ vmw_resource_release_id(res);
+out_no_id:
+ return ret;
+}
+
+static int vmw_dx_context_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindContext body;
+ } *cmd;
+ struct ttm_buffer_object *bo = val_buf->bo;
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ cmd->body.mobid = bo->mem.start;
+ cmd->body.validContents = res->backup_dirty;
+ res->backup_dirty = false;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+
+ return 0;
+}
+
+/**
+ * vmw_dx_context_scrub_cotables - Scrub all bindings and
+ * cotables from a context
+ *
+ * @ctx: Pointer to the context resource
+ * @readback: Whether to save the otable contents on scrubbing.
+ *
+ * COtables must be unbound before their context, but unbinding requires
+ * the backup buffer being reserved, whereas scrubbing does not.
+ * This function scrubs all cotables of a context, potentially reading back
+ * the contents into their backup buffers. However, scrubbing cotables
+ * also makes the device context invalid, so scrub all bindings first so
+ * that doesn't have to be done later with an invalid context.
+ */
+void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
+ bool readback)
+{
+ struct vmw_user_context *uctx =
+ container_of(ctx, struct vmw_user_context, res);
+ int i;
+
+ vmw_binding_state_scrub(uctx->cbs);
+ for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ struct vmw_resource *res;
+
+ /* Avoid racing with ongoing cotable destruction. */
+ spin_lock(&uctx->cotable_lock);
+ res = uctx->cotables[vmw_cotable_scrub_order[i]];
+ if (res)
+ res = vmw_resource_reference_unless_doomed(res);
+ spin_unlock(&uctx->cotable_lock);
+ if (!res)
+ continue;
+
+ WARN_ON(vmw_cotable_scrub(res, readback));
+ vmw_resource_unreference(&res);
+ }
+}
+
+static int vmw_dx_context_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct ttm_buffer_object *bo = val_buf->bo;
+ struct vmw_fence_obj *fence;
+ struct vmw_user_context *uctx =
+ container_of(res, struct vmw_user_context, res);
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXReadbackContext body;
+ } *cmd1;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindContext body;
+ } *cmd2;
+ uint32_t submit_size;
+ uint8_t *cmd;
+
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_dx_context_scrub_cotables(res, readback);
+
+ if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
+ readback) {
+ WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
+ if (vmw_query_readback_all(uctx->dx_query_mob))
+ DRM_ERROR("Failed to read back query states\n");
+ }
+
+ submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
+
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "unbinding.\n");
+ mutex_unlock(&dev_priv->binding_mutex);
+ return -ENOMEM;
+ }
+
+ cmd2 = (void *) cmd;
+ if (readback) {
+ cmd1 = (void *) cmd;
+ cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
+ cmd1->header.size = sizeof(cmd1->body);
+ cmd1->body.cid = res->id;
+ cmd2 = (void *) (&cmd1[1]);
+ }
+ cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
+ cmd2->header.size = sizeof(cmd2->body);
+ cmd2->body.cid = res->id;
+ cmd2->body.mobid = SVGA3D_INVALID_ID;
+
+ vmw_fifo_commit(dev_priv, submit_size);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ /*
+ * Create a fence object and fence the backup buffer.
+ */
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+
+ vmw_fence_single_bo(bo, fence);
+
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+static int vmw_dx_context_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDestroyContext body;
+ } *cmd;
+
+ if (likely(res->id == -1))
+ return 0;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "destruction.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ if (dev_priv->query_cid == res->id)
+ dev_priv->query_cid_valid = false;
+ vmw_resource_release_id(res);
+ vmw_fifo_resource_dec(dev_priv);
return 0;
}
@@ -435,6 +701,11 @@ static void vmw_user_context_free(struct vmw_resource *res)
container_of(res, struct vmw_user_context, res);
struct vmw_private *dev_priv = res->dev_priv;
+ if (ctx->cbs)
+ vmw_binding_state_free(ctx->cbs);
+
+ (void) vmw_context_bind_dx_query(res, NULL);
+
ttm_base_object_kfree(ctx, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_context_size);
@@ -465,8 +736,8 @@ int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
}
-int vmw_context_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static int vmw_context_define(struct drm_device *dev, void *data,
+ struct drm_file *file_priv, bool dx)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_context *ctx;
@@ -476,6 +747,10 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
+ if (!dev_priv->has_dx && dx) {
+ DRM_ERROR("DX contexts not supported by device.\n");
+ return -EINVAL;
+ }
/*
* Approximate idr memory usage with 128 bytes. It will be limited
@@ -516,7 +791,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
* From here on, the destructor takes over resource freeing.
*/
- ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+ ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
if (unlikely(ret != 0))
goto out_unlock;
@@ -535,387 +810,128 @@ out_err:
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
-
-}
-
-/**
- * vmw_context_scrub_shader - scrub a shader binding from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- */
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
-{
- struct vmw_private *dev_priv = bi->ctx->dev_priv;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetShader body;
- } *cmd;
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "unbinding.\n");
- return -ENOMEM;
- }
-
- cmd->header.id = SVGA_3D_CMD_SET_SHADER;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.cid = bi->ctx->id;
- cmd->body.type = bi->i1.shader_type;
- cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
- return 0;
-}
-
-/**
- * vmw_context_scrub_render_target - scrub a render target binding
- * from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- */
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
- bool rebind)
-{
- struct vmw_private *dev_priv = bi->ctx->dev_priv;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetRenderTarget body;
- } *cmd;
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for render target "
- "unbinding.\n");
- return -ENOMEM;
- }
-
- cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.cid = bi->ctx->id;
- cmd->body.type = bi->i1.rt_type;
- cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- cmd->body.target.face = 0;
- cmd->body.target.mipmap = 0;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
- return 0;
}
-/**
- * vmw_context_scrub_texture - scrub a texture binding from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- *
- * TODO: Possibly complement this function with a function that takes
- * a list of texture bindings and combines them to a single command.
- */
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
- bool rebind)
-{
- struct vmw_private *dev_priv = bi->ctx->dev_priv;
- struct {
- SVGA3dCmdHeader header;
- struct {
- SVGA3dCmdSetTextureState c;
- SVGA3dTextureState s1;
- } body;
- } *cmd;
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for texture "
- "unbinding.\n");
- return -ENOMEM;
- }
-
-
- cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.c.cid = bi->ctx->id;
- cmd->body.s1.stage = bi->i1.texture_stage;
- cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
- cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
- return 0;
-}
-
-/**
- * vmw_context_binding_drop: Stop tracking a context binding
- *
- * @cb: Pointer to binding tracker storage.
- *
- * Stops tracking a context binding, and re-initializes its storage.
- * Typically used when the context binding is replaced with a binding to
- * another (or the same, for that matter) resource.
- */
-static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
+int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- list_del(&cb->ctx_list);
- if (!list_empty(&cb->res_list))
- list_del(&cb->res_list);
- cb->bi.ctx = NULL;
+ return vmw_context_define(dev, data, file_priv, false);
}
-/**
- * vmw_context_binding_add: Start tracking a context binding
- *
- * @cbs: Pointer to the context binding state tracker.
- * @bi: Information about the binding to track.
- *
- * Performs basic checks on the binding to make sure arguments are within
- * bounds and then starts tracking the binding in the context binding
- * state structure @cbs.
- */
-int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
- const struct vmw_ctx_bindinfo *bi)
+int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- struct vmw_ctx_binding *loc;
-
- switch (bi->bt) {
- case vmw_ctx_binding_rt:
- if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
- DRM_ERROR("Illegal render target type %u.\n",
- (unsigned) bi->i1.rt_type);
- return -EINVAL;
- }
- loc = &cbs->render_targets[bi->i1.rt_type];
- break;
- case vmw_ctx_binding_tex:
- if (unlikely((unsigned)bi->i1.texture_stage >=
- SVGA3D_NUM_TEXTURE_UNITS)) {
- DRM_ERROR("Illegal texture/sampler unit %u.\n",
- (unsigned) bi->i1.texture_stage);
- return -EINVAL;
- }
- loc = &cbs->texture_units[bi->i1.texture_stage];
- break;
- case vmw_ctx_binding_shader:
- if (unlikely((unsigned)bi->i1.shader_type >=
- SVGA3D_SHADERTYPE_MAX)) {
- DRM_ERROR("Illegal shader type %u.\n",
- (unsigned) bi->i1.shader_type);
- return -EINVAL;
- }
- loc = &cbs->shaders[bi->i1.shader_type];
- break;
+ union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
+ struct drm_vmw_context_arg *rep = &arg->rep;
+
+ switch (arg->req) {
+ case drm_vmw_context_legacy:
+ return vmw_context_define(dev, rep, file_priv, false);
+ case drm_vmw_context_dx:
+ return vmw_context_define(dev, rep, file_priv, true);
default:
- BUG();
- }
-
- if (loc->bi.ctx != NULL)
- vmw_context_binding_drop(loc);
-
- loc->bi = *bi;
- loc->bi.scrubbed = false;
- list_add_tail(&loc->ctx_list, &cbs->list);
- INIT_LIST_HEAD(&loc->res_list);
-
- return 0;
-}
-
-/**
- * vmw_context_binding_transfer: Transfer a context binding tracking entry.
- *
- * @cbs: Pointer to the persistent context binding state tracker.
- * @bi: Information about the binding to track.
- *
- */
-static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
- const struct vmw_ctx_bindinfo *bi)
-{
- struct vmw_ctx_binding *loc;
-
- switch (bi->bt) {
- case vmw_ctx_binding_rt:
- loc = &cbs->render_targets[bi->i1.rt_type];
break;
- case vmw_ctx_binding_tex:
- loc = &cbs->texture_units[bi->i1.texture_stage];
- break;
- case vmw_ctx_binding_shader:
- loc = &cbs->shaders[bi->i1.shader_type];
- break;
- default:
- BUG();
- }
-
- if (loc->bi.ctx != NULL)
- vmw_context_binding_drop(loc);
-
- if (bi->res != NULL) {
- loc->bi = *bi;
- list_add_tail(&loc->ctx_list, &cbs->list);
- list_add_tail(&loc->res_list, &bi->res->binding_head);
}
+ return -EINVAL;
}
/**
- * vmw_context_binding_kill - Kill a binding on the device
- * and stop tracking it.
- *
- * @cb: Pointer to binding tracker storage.
- *
- * Emits FIFO commands to scrub a binding represented by @cb.
- * Then stops tracking the binding and re-initializes its storage.
- */
-static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
-{
- if (!cb->bi.scrubbed) {
- (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
- cb->bi.scrubbed = true;
- }
- vmw_context_binding_drop(cb);
-}
-
-/**
- * vmw_context_binding_state_kill - Kill all bindings associated with a
- * struct vmw_ctx_binding state structure, and re-initialize the structure.
+ * vmw_context_binding_list - Return a list of context bindings
*
- * @cbs: Pointer to the context binding state tracker.
+ * @ctx: The context resource
*
- * Emits commands to scrub all bindings associated with the
- * context binding state tracker. Then re-initializes the whole structure.
+ * Returns the current list of bindings of the given context. Note that
+ * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
*/
-static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
+struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
{
- struct vmw_ctx_binding *entry, *next;
+ struct vmw_user_context *uctx =
+ container_of(ctx, struct vmw_user_context, res);
- list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
- vmw_context_binding_kill(entry);
+ return vmw_binding_state_list(uctx->cbs);
}
-/**
- * vmw_context_binding_state_scrub - Scrub all bindings associated with a
- * struct vmw_ctx_binding state structure.
- *
- * @cbs: Pointer to the context binding state tracker.
- *
- * Emits commands to scrub all bindings associated with the
- * context binding state tracker.
- */
-static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
{
- struct vmw_ctx_binding *entry;
-
- list_for_each_entry(entry, &cbs->list, ctx_list) {
- if (!entry->bi.scrubbed) {
- (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
- entry->bi.scrubbed = true;
- }
- }
+ return container_of(ctx, struct vmw_user_context, res)->man;
}
-/**
- * vmw_context_binding_res_list_kill - Kill all bindings on a
- * resource binding list
- *
- * @head: list head of resource binding list
- *
- * Kills all bindings associated with a specific resource. Typically
- * called before the resource is destroyed.
- */
-void vmw_context_binding_res_list_kill(struct list_head *head)
+struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
+ SVGACOTableType cotable_type)
{
- struct vmw_ctx_binding *entry, *next;
+ if (cotable_type >= SVGA_COTABLE_DX10_MAX)
+ return ERR_PTR(-EINVAL);
- list_for_each_entry_safe(entry, next, head, res_list)
- vmw_context_binding_kill(entry);
+ return vmw_resource_reference
+ (container_of(ctx, struct vmw_user_context, res)->
+ cotables[cotable_type]);
}
/**
- * vmw_context_binding_res_list_scrub - Scrub all bindings on a
- * resource binding list
+ * vmw_context_binding_state -
+ * Return a pointer to a context binding state structure
*
- * @head: list head of resource binding list
+ * @ctx: The context resource
*
- * Scrub all bindings associated with a specific resource. Typically
- * called before the resource is evicted.
+ * Returns the current state of bindings of the given context. Note that
+ * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
*/
-void vmw_context_binding_res_list_scrub(struct list_head *head)
+struct vmw_ctx_binding_state *
+vmw_context_binding_state(struct vmw_resource *ctx)
{
- struct vmw_ctx_binding *entry;
-
- list_for_each_entry(entry, head, res_list) {
- if (!entry->bi.scrubbed) {
- (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
- entry->bi.scrubbed = true;
- }
- }
+ return container_of(ctx, struct vmw_user_context, res)->cbs;
}
/**
- * vmw_context_binding_state_transfer - Commit staged binding info
+ * vmw_context_bind_dx_query -
+ * Sets query MOB for the context. If @mob is NULL, then this function will
+ * remove the association between the MOB and the context. This function
+ * assumes the binding_mutex is held.
*
- * @ctx: Pointer to context to commit the staged binding info to.
- * @from: Staged binding info built during execbuf.
+ * @ctx_res: The context resource
+ * @mob: a reference to the query MOB
*
- * Transfers binding info from a temporary structure to the persistent
- * structure in the context. This can be done once commands
+ * Returns -EINVAL if a MOB has already been set and does not match the one
+ * specified in the parameter. 0 otherwise.
*/
-void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
- struct vmw_ctx_binding_state *from)
+int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
+ struct vmw_dma_buffer *mob)
{
struct vmw_user_context *uctx =
- container_of(ctx, struct vmw_user_context, res);
- struct vmw_ctx_binding *entry, *next;
-
- list_for_each_entry_safe(entry, next, &from->list, ctx_list)
- vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
-}
+ container_of(ctx_res, struct vmw_user_context, res);
-/**
- * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
- *
- * @ctx: The context resource
- *
- * Walks through the context binding list and rebinds all scrubbed
- * resources.
- */
-int vmw_context_rebind_all(struct vmw_resource *ctx)
-{
- struct vmw_ctx_binding *entry;
- struct vmw_user_context *uctx =
- container_of(ctx, struct vmw_user_context, res);
- struct vmw_ctx_binding_state *cbs = &uctx->cbs;
- int ret;
+ if (mob == NULL) {
+ if (uctx->dx_query_mob) {
+ uctx->dx_query_mob->dx_query_ctx = NULL;
+ vmw_dmabuf_unreference(&uctx->dx_query_mob);
+ uctx->dx_query_mob = NULL;
+ }
- list_for_each_entry(entry, &cbs->list, ctx_list) {
- if (likely(!entry->bi.scrubbed))
- continue;
+ return 0;
+ }
- if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
- SVGA3D_INVALID_ID))
- continue;
+ /* Can only have one MOB per context for queries */
+ if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
+ return -EINVAL;
- ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
- if (unlikely(ret != 0))
- return ret;
+ mob->dx_query_ctx = ctx_res;
- entry->bi.scrubbed = false;
- }
+ if (!uctx->dx_query_mob)
+ uctx->dx_query_mob = vmw_dmabuf_reference(mob);
return 0;
}
/**
- * vmw_context_binding_list - Return a list of context bindings
- *
- * @ctx: The context resource
+ * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
*
- * Returns the current list of bindings of the given context. Note that
- * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
+ * @ctx_res: The context resource
*/
-struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
+struct vmw_dma_buffer *
+vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
{
- return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
-}
+ struct vmw_user_context *uctx =
+ container_of(ctx_res, struct vmw_user_context, res);
-struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
-{
- return container_of(ctx, struct vmw_user_context, res)->man;
+ return uctx->dx_query_mob;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
new file mode 100644
index 000000000000..ce659a125f2b
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -0,0 +1,662 @@
+/**************************************************************************
+ *
+ * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Treat context OTables as resources to make use of the resource
+ * backing MOB eviction mechanism, that is used to read back the COTable
+ * whenever the backing MOB is evicted.
+ */
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include <ttm/ttm_placement.h>
+#include "vmwgfx_so.h"
+
+/**
+ * struct vmw_cotable - Context Object Table resource
+ *
+ * @res: struct vmw_resource we are deriving from.
+ * @ctx: non-refcounted pointer to the owning context.
+ * @size_read_back: Size of data read back during eviction.
+ * @seen_entries: Seen entries in command stream for this cotable.
+ * @type: The cotable type.
+ * @scrubbed: Whether the cotable has been scrubbed.
+ * @resource_list: List of resources in the cotable.
+ */
+struct vmw_cotable {
+ struct vmw_resource res;
+ struct vmw_resource *ctx;
+ size_t size_read_back;
+ int seen_entries;
+ u32 type;
+ bool scrubbed;
+ struct list_head resource_list;
+};
+
+/**
+ * struct vmw_cotable_info - Static info about cotable types
+ *
+ * @min_initial_entries: Min number of initial intries at cotable allocation
+ * for this cotable type.
+ * @size: Size of each entry.
+ */
+struct vmw_cotable_info {
+ u32 min_initial_entries;
+ u32 size;
+ void (*unbind_func)(struct vmw_private *, struct list_head *,
+ bool);
+};
+
+static const struct vmw_cotable_info co_info[] = {
+ {1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
+ {1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
+ {1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
+ {1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
+ {1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
+ {1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
+ {1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
+ {1, sizeof(SVGACOTableDXSamplerEntry), NULL},
+ {1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
+ {1, sizeof(SVGACOTableDXQueryEntry), NULL},
+ {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub}
+};
+
+/*
+ * Cotables with bindings that we remove must be scrubbed first,
+ * otherwise, the device will swap in an invalid context when we remove
+ * bindings before scrubbing a cotable...
+ */
+const SVGACOTableType vmw_cotable_scrub_order[] = {
+ SVGA_COTABLE_RTVIEW,
+ SVGA_COTABLE_DSVIEW,
+ SVGA_COTABLE_SRVIEW,
+ SVGA_COTABLE_DXSHADER,
+ SVGA_COTABLE_ELEMENTLAYOUT,
+ SVGA_COTABLE_BLENDSTATE,
+ SVGA_COTABLE_DEPTHSTENCIL,
+ SVGA_COTABLE_RASTERIZERSTATE,
+ SVGA_COTABLE_SAMPLER,
+ SVGA_COTABLE_STREAMOUTPUT,
+ SVGA_COTABLE_DXQUERY,
+};
+
+static int vmw_cotable_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_cotable_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_cotable_create(struct vmw_resource *res);
+static int vmw_cotable_destroy(struct vmw_resource *res);
+
+static const struct vmw_res_func vmw_cotable_func = {
+ .res_type = vmw_res_cotable,
+ .needs_backup = true,
+ .may_evict = true,
+ .type_name = "context guest backed object tables",
+ .backup_placement = &vmw_mob_placement,
+ .create = vmw_cotable_create,
+ .destroy = vmw_cotable_destroy,
+ .bind = vmw_cotable_bind,
+ .unbind = vmw_cotable_unbind,
+};
+
+/**
+ * vmw_cotable - Convert a struct vmw_resource pointer to a struct
+ * vmw_cotable pointer
+ *
+ * @res: Pointer to the resource.
+ */
+static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
+{
+ return container_of(res, struct vmw_cotable, res);
+}
+
+/**
+ * vmw_cotable_destroy - Cotable resource destroy callback
+ *
+ * @res: Pointer to the cotable resource.
+ *
+ * There is no device cotable destroy command, so this function only
+ * makes sure that the resource id is set to invalid.
+ */
+static int vmw_cotable_destroy(struct vmw_resource *res)
+{
+ res->id = -1;
+ return 0;
+}
+
+/**
+ * vmw_cotable_unscrub - Undo a cotable unscrub operation
+ *
+ * @res: Pointer to the cotable resource
+ *
+ * This function issues commands to (re)bind the cotable to
+ * its backing mob, which needs to be validated and reserved at this point.
+ * This is identical to bind() except the function interface looks different.
+ */
+static int vmw_cotable_unscrub(struct vmw_resource *res)
+{
+ struct vmw_cotable *vcotbl = vmw_cotable(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct ttm_buffer_object *bo = &res->backup->base;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetCOTable body;
+ } *cmd;
+
+ WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+ lockdep_assert_held(&bo->resv->lock.base);
+
+ cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), SVGA3D_INVALID_ID);
+ if (!cmd) {
+ DRM_ERROR("Failed reserving FIFO space for cotable "
+ "binding.\n");
+ return -ENOMEM;
+ }
+
+ WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
+ WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
+ cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = vcotbl->ctx->id;
+ cmd->body.type = vcotbl->type;
+ cmd->body.mobid = bo->mem.start;
+ cmd->body.validSizeInBytes = vcotbl->size_read_back;
+
+ vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
+ vcotbl->scrubbed = false;
+
+ return 0;
+}
+
+/**
+ * vmw_cotable_bind - Undo a cotable unscrub operation
+ *
+ * @res: Pointer to the cotable resource
+ * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
+ * for convenience / fencing.
+ *
+ * This function issues commands to (re)bind the cotable to
+ * its backing mob, which needs to be validated and reserved at this point.
+ */
+static int vmw_cotable_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ /*
+ * The create() callback may have changed @res->backup without
+ * the caller noticing, and with val_buf->bo still pointing to
+ * the old backup buffer. Although hackish, and not used currently,
+ * take the opportunity to correct the value here so that it's not
+ * misused in the future.
+ */
+ val_buf->bo = &res->backup->base;
+
+ return vmw_cotable_unscrub(res);
+}
+
+/**
+ * vmw_cotable_scrub - Scrub the cotable from the device.
+ *
+ * @res: Pointer to the cotable resource.
+ * @readback: Whether initiate a readback of the cotable data to the backup
+ * buffer.
+ *
+ * In some situations (context swapouts) it might be desirable to make the
+ * device forget about the cotable without performing a full unbind. A full
+ * unbind requires reserved backup buffers and it might not be possible to
+ * reserve them due to locking order violation issues. The vmw_cotable_scrub
+ * function implements a partial unbind() without that requirement but with the
+ * following restrictions.
+ * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
+ * be called.
+ * 2) Before the cotable backing buffer is used by the CPU, or during the
+ * resource destruction, vmw_cotable_unbind() must be called.
+ */
+int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
+{
+ struct vmw_cotable *vcotbl = vmw_cotable(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+ size_t submit_size;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXReadbackCOTable body;
+ } *cmd0;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetCOTable body;
+ } *cmd1;
+
+ if (vcotbl->scrubbed)
+ return 0;
+
+ if (co_info[vcotbl->type].unbind_func)
+ co_info[vcotbl->type].unbind_func(dev_priv,
+ &vcotbl->resource_list,
+ readback);
+ submit_size = sizeof(*cmd1);
+ if (readback)
+ submit_size += sizeof(*cmd0);
+
+ cmd1 = vmw_fifo_reserve_dx(dev_priv, submit_size, SVGA3D_INVALID_ID);
+ if (!cmd1) {
+ DRM_ERROR("Failed reserving FIFO space for cotable "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ vcotbl->size_read_back = 0;
+ if (readback) {
+ cmd0 = (void *) cmd1;
+ cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
+ cmd0->header.size = sizeof(cmd0->body);
+ cmd0->body.cid = vcotbl->ctx->id;
+ cmd0->body.type = vcotbl->type;
+ cmd1 = (void *) &cmd0[1];
+ vcotbl->size_read_back = res->backup_size;
+ }
+ cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
+ cmd1->header.size = sizeof(cmd1->body);
+ cmd1->body.cid = vcotbl->ctx->id;
+ cmd1->body.type = vcotbl->type;
+ cmd1->body.mobid = SVGA3D_INVALID_ID;
+ cmd1->body.validSizeInBytes = 0;
+ vmw_fifo_commit_flush(dev_priv, submit_size);
+ vcotbl->scrubbed = true;
+
+ /* Trigger a create() on next validate. */
+ res->id = -1;
+
+ return 0;
+}
+
+/**
+ * vmw_cotable_unbind - Cotable resource unbind callback
+ *
+ * @res: Pointer to the cotable resource.
+ * @readback: Whether to read back cotable data to the backup buffer.
+ * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
+ * for convenience / fencing.
+ *
+ * Unbinds the cotable from the device and fences the backup buffer.
+ */
+static int vmw_cotable_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_cotable *vcotbl = vmw_cotable(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct ttm_buffer_object *bo = val_buf->bo;
+ struct vmw_fence_obj *fence;
+ int ret;
+
+ if (list_empty(&res->mob_head))
+ return 0;
+
+ WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+ lockdep_assert_held(&bo->resv->lock.base);
+
+ mutex_lock(&dev_priv->binding_mutex);
+ if (!vcotbl->scrubbed)
+ vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
+ mutex_unlock(&dev_priv->binding_mutex);
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ vmw_fence_single_bo(bo, fence);
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return ret;
+}
+
+/**
+ * vmw_cotable_readback - Read back a cotable without unbinding.
+ *
+ * @res: The cotable resource.
+ *
+ * Reads back a cotable to its backing mob without scrubbing the MOB from
+ * the cotable. The MOB is fenced for subsequent CPU access.
+ */
+static int vmw_cotable_readback(struct vmw_resource *res)
+{
+ struct vmw_cotable *vcotbl = vmw_cotable(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXReadbackCOTable body;
+ } *cmd;
+ struct vmw_fence_obj *fence;
+
+ if (!vcotbl->scrubbed) {
+ cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
+ SVGA3D_INVALID_ID);
+ if (!cmd) {
+ DRM_ERROR("Failed reserving FIFO space for cotable "
+ "readback.\n");
+ return -ENOMEM;
+ }
+ cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = vcotbl->ctx->id;
+ cmd->body.type = vcotbl->type;
+ vcotbl->size_read_back = res->backup_size;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ }
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ vmw_fence_single_bo(&res->backup->base, fence);
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+/**
+ * vmw_cotable_resize - Resize a cotable.
+ *
+ * @res: The cotable resource.
+ * @new_size: The new size.
+ *
+ * Resizes a cotable and binds the new backup buffer.
+ * On failure the cotable is left intact.
+ * Important! This function may not fail once the MOB switch has been
+ * committed to hardware. That would put the device context in an
+ * invalid state which we can't currently recover from.
+ */
+static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_cotable *vcotbl = vmw_cotable(res);
+ struct vmw_dma_buffer *buf, *old_buf = res->backup;
+ struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
+ size_t old_size = res->backup_size;
+ size_t old_size_read_back = vcotbl->size_read_back;
+ size_t cur_size_read_back;
+ struct ttm_bo_kmap_obj old_map, new_map;
+ int ret;
+ size_t i;
+
+ ret = vmw_cotable_readback(res);
+ if (ret)
+ return ret;
+
+ cur_size_read_back = vcotbl->size_read_back;
+ vcotbl->size_read_back = old_size_read_back;
+
+ /*
+ * While device is processing, Allocate and reserve a buffer object
+ * for the new COTable. Initially pin the buffer object to make sure
+ * we can use tryreserve without failure.
+ */
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
+ true, vmw_dmabuf_bo_free);
+ if (ret) {
+ DRM_ERROR("Failed initializing new cotable MOB.\n");
+ return ret;
+ }
+
+ bo = &buf->base;
+ WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, false, NULL));
+
+ ret = ttm_bo_wait(old_bo, false, false, false);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed waiting for cotable unbind.\n");
+ goto out_wait;
+ }
+
+ /*
+ * Do a page by page copy of COTables. This eliminates slow vmap()s.
+ * This should really be a TTM utility.
+ */
+ for (i = 0; i < old_bo->num_pages; ++i) {
+ bool dummy;
+
+ ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed mapping old COTable on resize.\n");
+ goto out_wait;
+ }
+ ret = ttm_bo_kmap(bo, i, 1, &new_map);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed mapping new COTable on resize.\n");
+ goto out_map_new;
+ }
+ memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
+ ttm_kmap_obj_virtual(&old_map, &dummy),
+ PAGE_SIZE);
+ ttm_bo_kunmap(&new_map);
+ ttm_bo_kunmap(&old_map);
+ }
+
+ /* Unpin new buffer, and switch backup buffers. */
+ ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed validating new COTable backup buffer.\n");
+ goto out_wait;
+ }
+
+ res->backup = buf;
+ res->backup_size = new_size;
+ vcotbl->size_read_back = cur_size_read_back;
+
+ /*
+ * Now tell the device to switch. If this fails, then we need to
+ * revert the full resize.
+ */
+ ret = vmw_cotable_unscrub(res);
+ if (ret) {
+ DRM_ERROR("Failed switching COTable backup buffer.\n");
+ res->backup = old_buf;
+ res->backup_size = old_size;
+ vcotbl->size_read_back = old_size_read_back;
+ goto out_wait;
+ }
+
+ /* Let go of the old mob. */
+ list_del(&res->mob_head);
+ list_add_tail(&res->mob_head, &buf->res_list);
+ vmw_dmabuf_unreference(&old_buf);
+ res->id = vcotbl->type;
+
+ return 0;
+
+out_map_new:
+ ttm_bo_kunmap(&old_map);
+out_wait:
+ ttm_bo_unreserve(bo);
+ vmw_dmabuf_unreference(&buf);
+
+ return ret;
+}
+
+/**
+ * vmw_cotable_create - Cotable resource create callback
+ *
+ * @res: Pointer to a cotable resource.
+ *
+ * There is no separate create command for cotables, so this callback, which
+ * is called before bind() in the validation sequence is instead used for two
+ * things.
+ * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
+ * buffer, that is, if @res->mob_head is non-empty.
+ * 2) Resize the cotable if needed.
+ */
+static int vmw_cotable_create(struct vmw_resource *res)
+{
+ struct vmw_cotable *vcotbl = vmw_cotable(res);
+ size_t new_size = res->backup_size;
+ size_t needed_size;
+ int ret;
+
+ /* Check whether we need to resize the cotable */
+ needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
+ while (needed_size > new_size)
+ new_size *= 2;
+
+ if (likely(new_size <= res->backup_size)) {
+ if (vcotbl->scrubbed && !list_empty(&res->mob_head)) {
+ ret = vmw_cotable_unscrub(res);
+ if (ret)
+ return ret;
+ }
+ res->id = vcotbl->type;
+ return 0;
+ }
+
+ return vmw_cotable_resize(res, new_size);
+}
+
+/**
+ * vmw_hw_cotable_destroy - Cotable hw_destroy callback
+ *
+ * @res: Pointer to a cotable resource.
+ *
+ * The final (part of resource destruction) destroy callback.
+ */
+static void vmw_hw_cotable_destroy(struct vmw_resource *res)
+{
+ (void) vmw_cotable_destroy(res);
+}
+
+static size_t cotable_acc_size;
+
+/**
+ * vmw_cotable_free - Cotable resource destructor
+ *
+ * @res: Pointer to a cotable resource.
+ */
+static void vmw_cotable_free(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ kfree(res);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
+}
+
+/**
+ * vmw_cotable_alloc - Create a cotable resource
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @ctx: Pointer to the context resource.
+ * The cotable resource will not add a refcount.
+ * @type: The cotable type.
+ */
+struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
+ struct vmw_resource *ctx,
+ u32 type)
+{
+ struct vmw_cotable *vcotbl;
+ int ret;
+ u32 num_entries;
+
+ if (unlikely(cotable_acc_size == 0))
+ cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ cotable_acc_size, false, true);
+ if (unlikely(ret))
+ return ERR_PTR(ret);
+
+ vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
+ if (unlikely(vcotbl == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_alloc;
+ }
+
+ ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
+ vmw_cotable_free, &vmw_cotable_func);
+ if (unlikely(ret != 0))
+ goto out_no_init;
+
+ INIT_LIST_HEAD(&vcotbl->resource_list);
+ vcotbl->res.id = type;
+ vcotbl->res.backup_size = PAGE_SIZE;
+ num_entries = PAGE_SIZE / co_info[type].size;
+ if (num_entries < co_info[type].min_initial_entries) {
+ vcotbl->res.backup_size = co_info[type].min_initial_entries *
+ co_info[type].size;
+ vcotbl->res.backup_size =
+ (vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+ }
+
+ vcotbl->scrubbed = true;
+ vcotbl->seen_entries = -1;
+ vcotbl->type = type;
+ vcotbl->ctx = ctx;
+
+ vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy);
+
+ return &vcotbl->res;
+
+out_no_init:
+ kfree(vcotbl);
+out_no_alloc:
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
+ return ERR_PTR(ret);
+}
+
+/**
+ * vmw_cotable_notify - Notify the cotable about an item creation
+ *
+ * @res: Pointer to a cotable resource.
+ * @id: Item id.
+ */
+int vmw_cotable_notify(struct vmw_resource *res, int id)
+{
+ struct vmw_cotable *vcotbl = vmw_cotable(res);
+
+ if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
+ DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
+ (unsigned) vcotbl->type, id);
+ return -EINVAL;
+ }
+
+ if (vcotbl->seen_entries < id) {
+ /* Trigger a call to create() on next validate */
+ res->id = -1;
+ vcotbl->seen_entries = id;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cotable_add_view - add a view to the cotable's list of active views.
+ *
+ * @res: pointer struct vmw_resource representing the cotable.
+ * @head: pointer to the struct list_head member of the resource, dedicated
+ * to the cotable active resource list.
+ */
+void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
+{
+ struct vmw_cotable *vcotbl =
+ container_of(res, struct vmw_cotable, res);
+
+ list_add_tail(head, &vcotbl->resource_list);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 914b375763dc..299925a1f6c6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -32,25 +32,20 @@
/**
- * vmw_dmabuf_to_placement - Validate a buffer to placement.
+ * vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
- * @pin: Pin buffer if true.
+ * @placement: The placement to pin it.
* @interruptible: Use interruptible wait.
*
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
-int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- struct ttm_placement *placement,
- bool interruptible)
+int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ struct ttm_placement *placement,
+ bool interruptible)
{
struct ttm_buffer_object *bo = &buf->base;
int ret;
@@ -66,6 +61,8 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
goto err;
ret = ttm_bo_validate(bo, placement, interruptible, false);
+ if (!ret)
+ vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo);
@@ -75,12 +72,10 @@ err:
}
/**
- * vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr.
+ * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
*
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
- * Flushes and unpins the query bo if @pin == true to avoid failures.
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
@@ -90,55 +85,34 @@ err:
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
-int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool pin, bool interruptible)
+int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool interruptible)
{
struct ttm_buffer_object *bo = &buf->base;
- struct ttm_placement *placement;
int ret;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
- if (pin)
- vmw_execbuf_release_pinned_bo(dev_priv);
+ vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0))
goto err;
- /**
- * Put BO in VRAM if there is space, otherwise as a GMR.
- * If there is no space in VRAM and GMR ids are all used up,
- * start evicting GMRs to make room. If the DMA buffer can't be
- * used as a GMR, this will return -ENOMEM.
- */
-
- if (pin)
- placement = &vmw_vram_gmr_ne_placement;
- else
- placement = &vmw_vram_gmr_placement;
-
- ret = ttm_bo_validate(bo, placement, interruptible, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
+ false);
if (likely(ret == 0) || ret == -ERESTARTSYS)
- goto err_unreserve;
-
+ goto out_unreserve;
- /**
- * If that failed, try VRAM again, this time evicting
- * previous contents.
- */
-
- if (pin)
- placement = &vmw_vram_ne_placement;
- else
- placement = &vmw_vram_placement;
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
- ret = ttm_bo_validate(bo, placement, interruptible, false);
+out_unreserve:
+ if (!ret)
+ vmw_bo_pin_reserved(buf, true);
-err_unreserve:
ttm_bo_unreserve(bo);
err:
ttm_write_unlock(&dev_priv->reservation_sem);
@@ -146,67 +120,50 @@ err:
}
/**
- * vmw_dmabuf_to_vram - Move a buffer to vram.
+ * vmw_dmabuf_pin_in_vram - Move a buffer to vram.
*
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
- * @pin: Pin buffer in vram if true.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
-int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool pin, bool interruptible)
+int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool interruptible)
{
- struct ttm_placement *placement;
-
- if (pin)
- placement = &vmw_vram_ne_placement;
- else
- placement = &vmw_vram_placement;
-
- return vmw_dmabuf_to_placement(dev_priv, buf,
- placement,
- interruptible);
+ return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
+ interruptible);
}
/**
- * vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
+ * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
*
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
- * Flushes and unpins the query bo if @pin == true to avoid failures.
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
- * @buf: DMA buffer to move.
- * @pin: Pin buffer in vram if true.
+ * @buf: DMA buffer to pin.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
-int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool pin, bool interruptible)
+int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool interruptible)
{
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement placement;
struct ttm_place place;
int ret = 0;
- if (pin)
- place = vmw_vram_ne_placement.placement[0];
- else
- place = vmw_vram_placement.placement[0];
+ place = vmw_vram_placement.placement[0];
place.lpfn = bo->num_pages;
-
placement.num_placement = 1;
placement.placement = &place;
placement.num_busy_placement = 1;
@@ -216,13 +173,16 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- if (pin)
- vmw_execbuf_release_pinned_bo(dev_priv);
+ vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0))
goto err_unlock;
- /* Is this buffer already in vram but not at the start of it? */
+ /*
+ * Is this buffer already in vram but not at the start of it?
+ * In that case, evict it first because TTM isn't good at handling
+ * that situation.
+ */
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
bo->mem.start > 0)
@@ -230,8 +190,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
ret = ttm_bo_validate(bo, &placement, interruptible, false);
- /* For some reason we didn't up at the start of vram */
+ /* For some reason we didn't end up at the start of vram */
WARN_ON(ret == 0 && bo->offset != 0);
+ if (!ret)
+ vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo);
err_unlock:
@@ -240,13 +202,10 @@ err_unlock:
return ret;
}
-
/**
- * vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer.
+ * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
*
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
+ * This function takes the reservation_sem in write mode.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to unpin.
@@ -259,16 +218,25 @@ int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible)
{
- /*
- * We could in theory early out if the buffer is
- * unpinned but we need to lock and reserve the buffer
- * anyways so we don't gain much by that.
- */
- return vmw_dmabuf_to_placement(dev_priv, buf,
- &vmw_evictable_placement,
- interruptible);
-}
+ struct ttm_buffer_object *bo = &buf->base;
+ int ret;
+
+ ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+ ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
+ if (unlikely(ret != 0))
+ goto err;
+
+ vmw_bo_pin_reserved(buf, false);
+
+ ttm_bo_unreserve(bo);
+
+err:
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
/**
* vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
@@ -291,21 +259,31 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
/**
- * vmw_bo_pin - Pin or unpin a buffer object without moving it.
+ * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
*
- * @bo: The buffer object. Must be reserved.
+ * @vbo: The buffer object. Must be reserved.
* @pin: Whether to pin or unpin.
*
*/
-void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
+void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
{
struct ttm_place pl;
struct ttm_placement placement;
+ struct ttm_buffer_object *bo = &vbo->base;
uint32_t old_mem_type = bo->mem.mem_type;
int ret;
lockdep_assert_held(&bo->resv->lock.base);
+ if (pin) {
+ if (vbo->pin_count++ > 0)
+ return;
+ } else {
+ WARN_ON(vbo->pin_count <= 0);
+ if (--vbo->pin_count > 0)
+ return;
+ }
+
pl.fpfn = 0;
pl.lpfn = 0;
pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 620bb5cf617c..e13b20bd9908 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -28,6 +28,7 @@
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
+#include "vmwgfx_binding.h"
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_object.h>
@@ -127,6 +128,9 @@
#define DRM_IOCTL_VMW_SYNCCPU \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
struct drm_vmw_synccpu_arg)
+#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
+ struct drm_vmw_context_arg)
/**
* The core DRM version of this macro doesn't account for
@@ -168,8 +172,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
- VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
- DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_UNLOCKED |
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
@@ -206,6 +210,9 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_SYNCCPU,
vmw_user_dmabuf_synccpu_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
+ vmw_extended_context_define_ioctl,
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
};
static struct pci_device_id vmw_pci_id_list[] = {
@@ -278,6 +285,8 @@ static void vmw_print_capabilities(uint32_t capabilities)
DRM_INFO(" Command Buffers 2.\n");
if (capabilities & SVGA_CAP_GBOBJECTS)
DRM_INFO(" Guest Backed Resources.\n");
+ if (capabilities & SVGA_CAP_DX)
+ DRM_INFO(" DX Features.\n");
}
/**
@@ -296,30 +305,31 @@ static void vmw_print_capabilities(uint32_t capabilities)
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{
int ret;
- struct ttm_buffer_object *bo;
+ struct vmw_dma_buffer *vbo;
struct ttm_bo_kmap_obj map;
volatile SVGA3dQueryResult *result;
bool dummy;
/*
- * Create the bo as pinned, so that a tryreserve will
+ * Create the vbo as pinned, so that a tryreserve will
* immediately succeed. This is because we're the only
* user of the bo currently.
*/
- ret = ttm_bo_create(&dev_priv->bdev,
- PAGE_SIZE,
- ttm_bo_type_device,
- &vmw_sys_ne_placement,
- 0, false, NULL,
- &bo);
+ vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
+ if (!vbo)
+ return -ENOMEM;
+ ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
+ &vmw_sys_ne_placement, false,
+ &vmw_dmabuf_bo_free);
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(bo, false, true, false, NULL);
+ ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
BUG_ON(ret != 0);
+ vmw_bo_pin_reserved(vbo, true);
- ret = ttm_bo_kmap(bo, 0, 1, &map);
+ ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
if (likely(ret == 0)) {
result = ttm_kmap_obj_virtual(&map, &dummy);
result->totalSize = sizeof(*result);
@@ -327,18 +337,55 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
result->result32 = 0xff;
ttm_bo_kunmap(&map);
}
- vmw_bo_pin(bo, false);
- ttm_bo_unreserve(bo);
+ vmw_bo_pin_reserved(vbo, false);
+ ttm_bo_unreserve(&vbo->base);
if (unlikely(ret != 0)) {
DRM_ERROR("Dummy query buffer map failed.\n");
- ttm_bo_unref(&bo);
+ vmw_dmabuf_unreference(&vbo);
} else
- dev_priv->dummy_query_bo = bo;
+ dev_priv->dummy_query_bo = vbo;
return ret;
}
+/**
+ * vmw_request_device_late - Perform late device setup
+ *
+ * @dev_priv: Pointer to device private.
+ *
+ * This function performs setup of otables and enables large command
+ * buffer submission. These tasks are split out to a separate function
+ * because it reverts vmw_release_device_early and is intended to be used
+ * by an error path in the hibernation code.
+ */
+static int vmw_request_device_late(struct vmw_private *dev_priv)
+{
+ int ret;
+
+ if (dev_priv->has_mob) {
+ ret = vmw_otables_setup(dev_priv);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Unable to initialize "
+ "guest Memory OBjects.\n");
+ return ret;
+ }
+ }
+
+ if (dev_priv->cman) {
+ ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
+ 256*4096, 2*4096);
+ if (ret) {
+ struct vmw_cmdbuf_man *man = dev_priv->cman;
+
+ dev_priv->cman = NULL;
+ vmw_cmdbuf_man_destroy(man);
+ }
+ }
+
+ return 0;
+}
+
static int vmw_request_device(struct vmw_private *dev_priv)
{
int ret;
@@ -349,14 +396,16 @@ static int vmw_request_device(struct vmw_private *dev_priv)
return ret;
}
vmw_fence_fifo_up(dev_priv->fman);
- if (dev_priv->has_mob) {
- ret = vmw_otables_setup(dev_priv);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Unable to initialize "
- "guest Memory OBjects.\n");
- goto out_no_mob;
- }
+ dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
+ if (IS_ERR(dev_priv->cman)) {
+ dev_priv->cman = NULL;
+ dev_priv->has_dx = false;
}
+
+ ret = vmw_request_device_late(dev_priv);
+ if (ret)
+ goto out_no_mob;
+
ret = vmw_dummy_query_bo_create(dev_priv);
if (unlikely(ret != 0))
goto out_no_query_bo;
@@ -364,15 +413,29 @@ static int vmw_request_device(struct vmw_private *dev_priv)
return 0;
out_no_query_bo:
- if (dev_priv->has_mob)
+ if (dev_priv->cman)
+ vmw_cmdbuf_remove_pool(dev_priv->cman);
+ if (dev_priv->has_mob) {
+ (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
vmw_otables_takedown(dev_priv);
+ }
+ if (dev_priv->cman)
+ vmw_cmdbuf_man_destroy(dev_priv->cman);
out_no_mob:
vmw_fence_fifo_down(dev_priv->fman);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
return ret;
}
-static void vmw_release_device(struct vmw_private *dev_priv)
+/**
+ * vmw_release_device_early - Early part of fifo takedown.
+ *
+ * @dev_priv: Pointer to device private struct.
+ *
+ * This is the first part of command submission takedown, to be called before
+ * buffer management is taken down.
+ */
+static void vmw_release_device_early(struct vmw_private *dev_priv)
{
/*
* Previous destructions should've released
@@ -381,65 +444,31 @@ static void vmw_release_device(struct vmw_private *dev_priv)
BUG_ON(dev_priv->pinned_bo != NULL);
- ttm_bo_unref(&dev_priv->dummy_query_bo);
- if (dev_priv->has_mob)
- vmw_otables_takedown(dev_priv);
- vmw_fence_fifo_down(dev_priv->fman);
- vmw_fifo_release(dev_priv, &dev_priv->fifo);
-}
-
+ vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
+ if (dev_priv->cman)
+ vmw_cmdbuf_remove_pool(dev_priv->cman);
-/**
- * Increase the 3d resource refcount.
- * If the count was prevously zero, initialize the fifo, switching to svga
- * mode. Note that the master holds a ref as well, and may request an
- * explicit switch to svga mode if fb is not running, using @unhide_svga.
- */
-int vmw_3d_resource_inc(struct vmw_private *dev_priv,
- bool unhide_svga)
-{
- int ret = 0;
-
- mutex_lock(&dev_priv->release_mutex);
- if (unlikely(dev_priv->num_3d_resources++ == 0)) {
- ret = vmw_request_device(dev_priv);
- if (unlikely(ret != 0))
- --dev_priv->num_3d_resources;
- } else if (unhide_svga) {
- vmw_write(dev_priv, SVGA_REG_ENABLE,
- vmw_read(dev_priv, SVGA_REG_ENABLE) &
- ~SVGA_REG_ENABLE_HIDE);
+ if (dev_priv->has_mob) {
+ ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
+ vmw_otables_takedown(dev_priv);
}
-
- mutex_unlock(&dev_priv->release_mutex);
- return ret;
}
/**
- * Decrease the 3d resource refcount.
- * If the count reaches zero, disable the fifo, switching to vga mode.
- * Note that the master holds a refcount as well, and may request an
- * explicit switch to vga mode when it releases its refcount to account
- * for the situation of an X server vt switch to VGA with 3d resources
- * active.
+ * vmw_release_device_late - Late part of fifo takedown.
+ *
+ * @dev_priv: Pointer to device private struct.
+ *
+ * This is the last part of the command submission takedown, to be called when
+ * command submission is no longer needed. It may wait on pending fences.
*/
-void vmw_3d_resource_dec(struct vmw_private *dev_priv,
- bool hide_svga)
+static void vmw_release_device_late(struct vmw_private *dev_priv)
{
- int32_t n3d;
-
- mutex_lock(&dev_priv->release_mutex);
- if (unlikely(--dev_priv->num_3d_resources == 0))
- vmw_release_device(dev_priv);
- else if (hide_svga)
- vmw_write(dev_priv, SVGA_REG_ENABLE,
- vmw_read(dev_priv, SVGA_REG_ENABLE) |
- SVGA_REG_ENABLE_HIDE);
-
- n3d = (int32_t) dev_priv->num_3d_resources;
- mutex_unlock(&dev_priv->release_mutex);
+ vmw_fence_fifo_down(dev_priv->fman);
+ if (dev_priv->cman)
+ vmw_cmdbuf_man_destroy(dev_priv->cman);
- BUG_ON(n3d < 0);
+ vmw_fifo_release(dev_priv, &dev_priv->fifo);
}
/**
@@ -603,6 +632,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
spin_lock_init(&dev_priv->hw_lock);
spin_lock_init(&dev_priv->waiter_lock);
spin_lock_init(&dev_priv->cap_lock);
+ spin_lock_init(&dev_priv->svga_lock);
for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->res_idr[i]);
@@ -673,22 +703,31 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
dev_priv->max_mob_size =
vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
- } else
+ dev_priv->stdu_max_width =
+ vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
+ dev_priv->stdu_max_height =
+ vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
+
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+ SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
+ dev_priv->texture_max_width = vmw_read(dev_priv,
+ SVGA_REG_DEV_CAP);
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+ SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
+ dev_priv->texture_max_height = vmw_read(dev_priv,
+ SVGA_REG_DEV_CAP);
+ } else {
+ dev_priv->texture_max_width = 8192;
+ dev_priv->texture_max_height = 8192;
dev_priv->prim_bb_mem = dev_priv->vram_size;
+ }
+
+ vmw_print_capabilities(dev_priv->capabilities);
ret = vmw_dma_masks(dev_priv);
if (unlikely(ret != 0))
goto out_err0;
- /*
- * Limit back buffer size to VRAM size. Remove this once
- * screen targets are implemented.
- */
- if (dev_priv->prim_bb_mem > dev_priv->vram_size)
- dev_priv->prim_bb_mem = dev_priv->vram_size;
-
- vmw_print_capabilities(dev_priv->capabilities);
-
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
DRM_INFO("Max GMR ids is %u\n",
(unsigned)dev_priv->max_gmr_ids);
@@ -714,17 +753,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->active_master = &dev_priv->fbdev_master;
- ret = ttm_bo_device_init(&dev_priv->bdev,
- dev_priv->bo_global_ref.ref.object,
- &vmw_bo_driver,
- dev->anon_inode->i_mapping,
- VMWGFX_FILE_PAGE_OFFSET,
- false);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed initializing TTM buffer object driver.\n");
- goto out_err1;
- }
-
dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
dev_priv->mmio_size);
@@ -787,13 +815,28 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_no_fman;
}
+ ret = ttm_bo_device_init(&dev_priv->bdev,
+ dev_priv->bo_global_ref.ref.object,
+ &vmw_bo_driver,
+ dev->anon_inode->i_mapping,
+ VMWGFX_FILE_PAGE_OFFSET,
+ false);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed initializing TTM buffer object driver.\n");
+ goto out_no_bdev;
+ }
+ /*
+ * Enable VRAM, but initially don't use it until SVGA is enabled and
+ * unhidden.
+ */
ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
(dev_priv->vram_size >> PAGE_SHIFT));
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing memory manager for VRAM.\n");
goto out_no_vram;
}
+ dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
dev_priv->has_gmr = true;
if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
@@ -814,18 +857,28 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
}
- vmw_kms_save_vga(dev_priv);
+ if (dev_priv->has_mob) {
+ spin_lock(&dev_priv->cap_lock);
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
+ dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+ spin_unlock(&dev_priv->cap_lock);
+ }
+
- /* Start kms and overlay systems, needs fifo. */
ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0))
goto out_no_kms;
vmw_overlay_init(dev_priv);
+ ret = vmw_request_device(dev_priv);
+ if (ret)
+ goto out_no_fifo;
+
+ DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
+
if (dev_priv->enable_fb) {
- ret = vmw_3d_resource_inc(dev_priv, true);
- if (unlikely(ret != 0))
- goto out_no_fifo;
+ vmw_fifo_resource_inc(dev_priv);
+ vmw_svga_enable(dev_priv);
vmw_fb_init(dev_priv);
}
@@ -838,13 +891,14 @@ out_no_fifo:
vmw_overlay_close(dev_priv);
vmw_kms_close(dev_priv);
out_no_kms:
- vmw_kms_restore_vga(dev_priv);
if (dev_priv->has_mob)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
if (dev_priv->has_gmr)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
out_no_vram:
+ (void)ttm_bo_device_release(&dev_priv->bdev);
+out_no_bdev:
vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -860,13 +914,13 @@ out_err4:
iounmap(dev_priv->mmio_virt);
out_err3:
arch_phys_wc_del(dev_priv->mmio_mtrr);
- (void)ttm_bo_device_release(&dev_priv->bdev);
-out_err1:
vmw_ttm_global_release(dev_priv);
out_err0:
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);
+ if (dev_priv->ctx.staged_bindings)
+ vmw_binding_state_free(dev_priv->ctx.staged_bindings);
kfree(dev_priv);
return ret;
}
@@ -882,19 +936,24 @@ static int vmw_driver_unload(struct drm_device *dev)
drm_ht_remove(&dev_priv->ctx.res_ht);
vfree(dev_priv->ctx.cmd_bounce);
if (dev_priv->enable_fb) {
+ vmw_fb_off(dev_priv);
vmw_fb_close(dev_priv);
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
+ vmw_svga_disable(dev_priv);
}
+
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
- if (dev_priv->has_mob)
- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
if (dev_priv->has_gmr)
(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+ vmw_release_device_early(dev_priv);
+ if (dev_priv->has_mob)
+ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+ (void) ttm_bo_device_release(&dev_priv->bdev);
+ vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
drm_irq_uninstall(dev_priv->dev);
@@ -906,7 +965,8 @@ static int vmw_driver_unload(struct drm_device *dev)
ttm_object_device_release(&dev_priv->tdev);
iounmap(dev_priv->mmio_virt);
arch_phys_wc_del(dev_priv->mmio_mtrr);
- (void)ttm_bo_device_release(&dev_priv->bdev);
+ if (dev_priv->ctx.staged_bindings)
+ vmw_binding_state_free(dev_priv->ctx.staged_bindings);
vmw_ttm_global_release(dev_priv);
for (i = vmw_res_context; i < vmw_res_max; ++i)
@@ -992,10 +1052,15 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev,
}
/*
- * Check if we were previously master, but now dropped.
+ * Check if we were previously master, but now dropped. In that
+ * case, allow at least render node functionality.
*/
if (vmw_fp->locked_master) {
mutex_unlock(&dev->master_mutex);
+
+ if (flags & DRM_RENDER_ALLOW)
+ return NULL;
+
DRM_ERROR("Dropped master trying to access ioctl that "
"requires authentication.\n");
return ERR_PTR(-EACCES);
@@ -1044,17 +1109,27 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
const struct drm_ioctl_desc *ioctl =
&vmw_ioctls[nr - DRM_COMMAND_BASE];
- if (unlikely(ioctl->cmd != cmd)) {
- DRM_ERROR("Invalid command format, ioctl %d\n",
- nr - DRM_COMMAND_BASE);
- return -EINVAL;
+ if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
+ ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
+ goto out_io_encoding;
+
+ return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
+ _IOC_SIZE(cmd));
}
+
+ if (unlikely(ioctl->cmd != cmd))
+ goto out_io_encoding;
+
flags = ioctl->flags;
} else if (!drm_ioctl_flags(nr, &flags))
return -EINVAL;
vmaster = vmw_master_check(dev, file_priv, flags);
- if (unlikely(IS_ERR(vmaster))) {
+ if (IS_ERR(vmaster)) {
ret = PTR_ERR(vmaster);
if (ret != -ERESTARTSYS)
@@ -1068,6 +1143,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
ttm_read_unlock(&vmaster->lock);
return ret;
+
+out_io_encoding:
+ DRM_ERROR("Invalid command format, ioctl %d\n",
+ nr - DRM_COMMAND_BASE);
+
+ return -EINVAL;
}
static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
@@ -1086,30 +1167,11 @@ static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
static void vmw_lastclose(struct drm_device *dev)
{
- struct drm_crtc *crtc;
- struct drm_mode_set set;
- int ret;
-
- set.x = 0;
- set.y = 0;
- set.fb = NULL;
- set.mode = NULL;
- set.connectors = NULL;
- set.num_connectors = 0;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- set.crtc = crtc;
- ret = drm_mode_set_config_internal(&set);
- WARN_ON(ret != 0);
- }
-
}
static void vmw_master_init(struct vmw_master *vmaster)
{
ttm_lock_init(&vmaster->lock);
- INIT_LIST_HEAD(&vmaster->fb_surf);
- mutex_init(&vmaster->fb_surf_mutex);
}
static int vmw_master_create(struct drm_device *dev,
@@ -1137,7 +1199,6 @@ static void vmw_master_destroy(struct drm_device *dev,
kfree(vmaster);
}
-
static int vmw_master_set(struct drm_device *dev,
struct drm_file *file_priv,
bool from_open)
@@ -1148,27 +1209,13 @@ static int vmw_master_set(struct drm_device *dev,
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret = 0;
- if (!dev_priv->enable_fb) {
- ret = vmw_3d_resource_inc(dev_priv, true);
- if (unlikely(ret != 0))
- return ret;
- vmw_kms_save_vga(dev_priv);
- vmw_write(dev_priv, SVGA_REG_TRACES, 0);
- }
-
if (active) {
BUG_ON(active != &dev_priv->fbdev_master);
ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
if (unlikely(ret != 0))
- goto out_no_active_lock;
+ return ret;
ttm_lock_set_kill(&active->lock, true, SIGTERM);
- ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Unable to clean VRAM on "
- "master drop.\n");
- }
-
dev_priv->active_master = NULL;
}
@@ -1182,14 +1229,6 @@ static int vmw_master_set(struct drm_device *dev,
dev_priv->active_master = vmaster;
return 0;
-
-out_no_active_lock:
- if (!dev_priv->enable_fb) {
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, true);
- vmw_write(dev_priv, SVGA_REG_TRACES, 1);
- }
- return ret;
}
static void vmw_master_drop(struct drm_device *dev,
@@ -1214,16 +1253,9 @@ static void vmw_master_drop(struct drm_device *dev,
}
ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
- vmw_execbuf_release_pinned_bo(dev_priv);
- if (!dev_priv->enable_fb) {
- ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
- if (unlikely(ret != 0))
- DRM_ERROR("Unable to clean VRAM on master drop.\n");
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, true);
- vmw_write(dev_priv, SVGA_REG_TRACES, 1);
- }
+ if (!dev_priv->enable_fb)
+ vmw_svga_disable(dev_priv);
dev_priv->active_master = &dev_priv->fbdev_master;
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
@@ -1233,6 +1265,76 @@ static void vmw_master_drop(struct drm_device *dev,
vmw_fb_on(dev_priv);
}
+/**
+ * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Needs the reservation sem to be held in non-exclusive mode.
+ */
+static void __vmw_svga_enable(struct vmw_private *dev_priv)
+{
+ spin_lock(&dev_priv->svga_lock);
+ if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+ vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
+ dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
+ }
+ spin_unlock(&dev_priv->svga_lock);
+}
+
+/**
+ * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ */
+void vmw_svga_enable(struct vmw_private *dev_priv)
+{
+ ttm_read_lock(&dev_priv->reservation_sem, false);
+ __vmw_svga_enable(dev_priv);
+ ttm_read_unlock(&dev_priv->reservation_sem);
+}
+
+/**
+ * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Needs the reservation sem to be held in exclusive mode.
+ * Will not empty VRAM. VRAM must be emptied by caller.
+ */
+static void __vmw_svga_disable(struct vmw_private *dev_priv)
+{
+ spin_lock(&dev_priv->svga_lock);
+ if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+ dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+ vmw_write(dev_priv, SVGA_REG_ENABLE,
+ SVGA_REG_ENABLE_HIDE |
+ SVGA_REG_ENABLE_ENABLE);
+ }
+ spin_unlock(&dev_priv->svga_lock);
+}
+
+/**
+ * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
+ * running.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Will empty VRAM.
+ */
+void vmw_svga_disable(struct vmw_private *dev_priv)
+{
+ ttm_write_lock(&dev_priv->reservation_sem, false);
+ spin_lock(&dev_priv->svga_lock);
+ if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+ dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+ spin_unlock(&dev_priv->svga_lock);
+ if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
+ DRM_ERROR("Failed evicting VRAM buffers.\n");
+ vmw_write(dev_priv, SVGA_REG_ENABLE,
+ SVGA_REG_ENABLE_HIDE |
+ SVGA_REG_ENABLE_ENABLE);
+ } else
+ spin_unlock(&dev_priv->svga_lock);
+ ttm_write_unlock(&dev_priv->reservation_sem);
+}
static void vmw_remove(struct pci_dev *pdev)
{
@@ -1250,23 +1352,26 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
switch (val) {
case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
+ if (dev_priv->enable_fb)
+ vmw_fb_off(dev_priv);
ttm_suspend_lock(&dev_priv->reservation_sem);
- /**
+ /*
* This empties VRAM and unbinds all GMR bindings.
* Buffer contents is moved to swappable memory.
*/
vmw_execbuf_release_pinned_bo(dev_priv);
vmw_resource_evict_all(dev_priv);
+ vmw_release_device_early(dev_priv);
ttm_bo_swapout_all(&dev_priv->bdev);
-
+ vmw_fence_fifo_down(dev_priv->fman);
break;
case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
case PM_POST_RESTORE:
+ vmw_fence_fifo_up(dev_priv->fman);
ttm_suspend_unlock(&dev_priv->reservation_sem);
-
+ if (dev_priv->enable_fb)
+ vmw_fb_on(dev_priv);
break;
case PM_RESTORE_PREPARE:
break;
@@ -1276,20 +1381,13 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
return 0;
}
-/**
- * These might not be needed with the virtual SVGA device.
- */
-
static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
- if (dev_priv->num_3d_resources != 0) {
- DRM_INFO("Can't suspend or hibernate "
- "while 3D resources are active.\n");
+ if (dev_priv->refuse_hibernation)
return -EBUSY;
- }
pci_save_state(pdev);
pci_disable_device(pdev);
@@ -1321,56 +1419,62 @@ static int vmw_pm_resume(struct device *kdev)
return vmw_pci_resume(pdev);
}
-static int vmw_pm_prepare(struct device *kdev)
+static int vmw_pm_freeze(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
- /**
- * Release 3d reference held by fbdev and potentially
- * stop fifo.
- */
dev_priv->suspended = true;
if (dev_priv->enable_fb)
- vmw_3d_resource_dec(dev_priv, true);
-
- if (dev_priv->num_3d_resources != 0) {
-
- DRM_INFO("Can't suspend or hibernate "
- "while 3D resources are active.\n");
+ vmw_fifo_resource_dec(dev_priv);
+ if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
+ DRM_ERROR("Can't hibernate while 3D resources are active.\n");
if (dev_priv->enable_fb)
- vmw_3d_resource_inc(dev_priv, true);
+ vmw_fifo_resource_inc(dev_priv);
+ WARN_ON(vmw_request_device_late(dev_priv));
dev_priv->suspended = false;
return -EBUSY;
}
+ if (dev_priv->enable_fb)
+ __vmw_svga_disable(dev_priv);
+
+ vmw_release_device_late(dev_priv);
+
return 0;
}
-static void vmw_pm_complete(struct device *kdev)
+static int vmw_pm_restore(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
+ int ret;
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
(void) vmw_read(dev_priv, SVGA_REG_ID);
- /**
- * Reclaim 3d reference held by fbdev and potentially
- * start fifo.
- */
if (dev_priv->enable_fb)
- vmw_3d_resource_inc(dev_priv, false);
+ vmw_fifo_resource_inc(dev_priv);
+
+ ret = vmw_request_device(dev_priv);
+ if (ret)
+ return ret;
+
+ if (dev_priv->enable_fb)
+ __vmw_svga_enable(dev_priv);
dev_priv->suspended = false;
+
+ return 0;
}
static const struct dev_pm_ops vmw_pm_ops = {
- .prepare = vmw_pm_prepare,
- .complete = vmw_pm_complete,
+ .freeze = vmw_pm_freeze,
+ .thaw = vmw_pm_restore,
+ .restore = vmw_pm_restore,
.suspend = vmw_pm_suspend,
.resume = vmw_pm_resume,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index d26a6daa9719..6d02de6dc36c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -40,17 +40,17 @@
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20140704"
+#define VMWGFX_DRIVER_DATE "20150810"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 6
-#define VMWGFX_DRIVER_PATCHLEVEL 1
+#define VMWGFX_DRIVER_MINOR 9
+#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
#define VMWGFX_MAX_VALIDATIONS 2048
#define VMWGFX_MAX_DISPLAYS 16
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
-#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0
+#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
/*
* Perhaps we should have sysfs entries for these.
@@ -59,6 +59,8 @@
#define VMWGFX_NUM_GB_SHADER 20000
#define VMWGFX_NUM_GB_SURFACE 32768
#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
+#define VMWGFX_NUM_DXCONTEXT 256
+#define VMWGFX_NUM_DXQUERY 512
#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
VMWGFX_NUM_GB_SHADER +\
VMWGFX_NUM_GB_SURFACE +\
@@ -85,6 +87,9 @@ struct vmw_fpriv {
struct vmw_dma_buffer {
struct ttm_buffer_object base;
struct list_head res_list;
+ s32 pin_count;
+ /* Not ref-counted. Protected by binding_mutex */
+ struct vmw_resource *dx_query_ctx;
};
/**
@@ -113,6 +118,7 @@ struct vmw_resource {
bool backup_dirty; /* Protected by backup buffer reserved */
struct vmw_dma_buffer *backup;
unsigned long backup_offset;
+ unsigned long pin_count; /* Protected by resource reserved */
const struct vmw_res_func *func;
struct list_head lru_head; /* Protected by the resource lock */
struct list_head mob_head; /* Protected by @backup reserved */
@@ -130,6 +136,9 @@ enum vmw_res_type {
vmw_res_surface,
vmw_res_stream,
vmw_res_shader,
+ vmw_res_dx_context,
+ vmw_res_cotable,
+ vmw_res_view,
vmw_res_max
};
@@ -137,7 +146,8 @@ enum vmw_res_type {
* Resources that are managed using command streams.
*/
enum vmw_cmdbuf_res_type {
- vmw_cmdbuf_res_compat_shader
+ vmw_cmdbuf_res_shader,
+ vmw_cmdbuf_res_view
};
struct vmw_cmdbuf_res_manager;
@@ -160,11 +170,13 @@ struct vmw_surface {
struct drm_vmw_size *sizes;
uint32_t num_sizes;
bool scanout;
+ uint32_t array_size;
/* TODO so far just a extra pointer */
struct vmw_cursor_snooper snooper;
struct vmw_surface_offset *offsets;
SVGA3dTextureFilter autogen_filter;
uint32_t multisample_count;
+ struct list_head view_list;
};
struct vmw_marker_queue {
@@ -176,14 +188,15 @@ struct vmw_marker_queue {
struct vmw_fifo_state {
unsigned long reserved_size;
- __le32 *dynamic_buffer;
- __le32 *static_buffer;
+ u32 *dynamic_buffer;
+ u32 *static_buffer;
unsigned long static_buffer_size;
bool using_bounce_buffer;
uint32_t capabilities;
struct mutex fifo_mutex;
struct rw_semaphore rwsem;
struct vmw_marker_queue marker_queue;
+ bool dx;
};
struct vmw_relocation {
@@ -227,7 +240,7 @@ enum vmw_dma_map_mode {
* device-specific information.
*
* @sgt: Pointer to a struct sg_table with binding information
- * @num_regions: Number of regions with device-address contigous pages
+ * @num_regions: Number of regions with device-address contiguous pages
*/
struct vmw_sg_table {
enum vmw_dma_map_mode mode;
@@ -264,70 +277,15 @@ struct vmw_piter {
};
/*
- * enum vmw_ctx_binding_type - abstract resource to context binding types
+ * enum vmw_display_unit_type - Describes the display unit
*/
-enum vmw_ctx_binding_type {
- vmw_ctx_binding_shader,
- vmw_ctx_binding_rt,
- vmw_ctx_binding_tex,
- vmw_ctx_binding_max
+enum vmw_display_unit_type {
+ vmw_du_invalid = 0,
+ vmw_du_legacy,
+ vmw_du_screen_object,
+ vmw_du_screen_target
};
-/**
- * struct vmw_ctx_bindinfo - structure representing a single context binding
- *
- * @ctx: Pointer to the context structure. NULL means the binding is not
- * active.
- * @res: Non ref-counted pointer to the bound resource.
- * @bt: The binding type.
- * @i1: Union of information needed to unbind.
- */
-struct vmw_ctx_bindinfo {
- struct vmw_resource *ctx;
- struct vmw_resource *res;
- enum vmw_ctx_binding_type bt;
- bool scrubbed;
- union {
- SVGA3dShaderType shader_type;
- SVGA3dRenderTargetType rt_type;
- uint32 texture_stage;
- } i1;
-};
-
-/**
- * struct vmw_ctx_binding - structure representing a single context binding
- * - suitable for tracking in a context
- *
- * @ctx_list: List head for context.
- * @res_list: List head for bound resource.
- * @bi: Binding info
- */
-struct vmw_ctx_binding {
- struct list_head ctx_list;
- struct list_head res_list;
- struct vmw_ctx_bindinfo bi;
-};
-
-
-/**
- * struct vmw_ctx_binding_state - context binding state
- *
- * @list: linked list of individual bindings.
- * @render_targets: Render target bindings.
- * @texture_units: Texture units/samplers bindings.
- * @shaders: Shader bindings.
- *
- * Note that this structure also provides storage space for the individual
- * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
- * for individual bindings.
- *
- */
-struct vmw_ctx_binding_state {
- struct list_head list;
- struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
- struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
- struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
-};
struct vmw_sw_context{
struct drm_open_hash res_ht;
@@ -342,15 +300,21 @@ struct vmw_sw_context{
uint32_t *cmd_bounce;
uint32_t cmd_bounce_size;
struct list_head resource_list;
- struct ttm_buffer_object *cur_query_bo;
+ struct list_head ctx_resource_list; /* For contexts and cotables */
+ struct vmw_dma_buffer *cur_query_bo;
struct list_head res_relocations;
uint32_t *buf_start;
struct vmw_res_cache_entry res_cache[vmw_res_max];
struct vmw_resource *last_query_ctx;
bool needs_post_query_barrier;
struct vmw_resource *error_resource;
- struct vmw_ctx_binding_state staged_bindings;
+ struct vmw_ctx_binding_state *staged_bindings;
+ bool staged_bindings_inuse;
struct list_head staged_cmd_res;
+ struct vmw_resource_val_node *dx_ctx_node;
+ struct vmw_dma_buffer *dx_query_mob;
+ struct vmw_resource *dx_query_ctx;
+ struct vmw_cmdbuf_res_manager *man;
};
struct vmw_legacy_display;
@@ -358,8 +322,6 @@ struct vmw_overlay;
struct vmw_master {
struct ttm_lock lock;
- struct mutex fb_surf_mutex;
- struct list_head fb_surf;
};
struct vmw_vga_topology_state {
@@ -370,6 +332,26 @@ struct vmw_vga_topology_state {
uint32_t pos_y;
};
+
+/*
+ * struct vmw_otable - Guest Memory OBject table metadata
+ *
+ * @size: Size of the table (page-aligned).
+ * @page_table: Pointer to a struct vmw_mob holding the page table.
+ */
+struct vmw_otable {
+ unsigned long size;
+ struct vmw_mob *page_table;
+ bool enabled;
+};
+
+struct vmw_otable_batch {
+ unsigned num_otables;
+ struct vmw_otable *otables;
+ struct vmw_resource *context;
+ struct ttm_buffer_object *otable_bo;
+};
+
struct vmw_private {
struct ttm_bo_device bdev;
struct ttm_bo_global_ref bo_global_ref;
@@ -387,9 +369,13 @@ struct vmw_private {
uint32_t mmio_size;
uint32_t fb_max_width;
uint32_t fb_max_height;
+ uint32_t texture_max_width;
+ uint32_t texture_max_height;
+ uint32_t stdu_max_width;
+ uint32_t stdu_max_height;
uint32_t initial_width;
uint32_t initial_height;
- __le32 __iomem *mmio_virt;
+ u32 __iomem *mmio_virt;
int mmio_mtrr;
uint32_t capabilities;
uint32_t max_gmr_ids;
@@ -401,6 +387,7 @@ struct vmw_private {
bool has_mob;
spinlock_t hw_lock;
spinlock_t cap_lock;
+ bool has_dx;
/*
* VGA registers.
@@ -420,6 +407,7 @@ struct vmw_private {
*/
void *fb_info;
+ enum vmw_display_unit_type active_display_unit;
struct vmw_legacy_display *ldu_priv;
struct vmw_screen_object_display *sou_priv;
struct vmw_overlay *overlay_priv;
@@ -453,6 +441,8 @@ struct vmw_private {
spinlock_t waiter_lock;
int fence_queue_waiters; /* Protected by waiter_lock */
int goal_queue_waiters; /* Protected by waiter_lock */
+ int cmdbuf_waiters; /* Protected by irq_lock */
+ int error_waiters; /* Protected by irq_lock */
atomic_t fifo_queue_waiters;
uint32_t last_read_seqno;
spinlock_t irq_lock;
@@ -484,6 +474,7 @@ struct vmw_private {
bool stealth;
bool enable_fb;
+ spinlock_t svga_lock;
/**
* Master management.
@@ -493,9 +484,10 @@ struct vmw_private {
struct vmw_master fbdev_master;
struct notifier_block pm_nb;
bool suspended;
+ bool refuse_hibernation;
struct mutex release_mutex;
- uint32_t num_3d_resources;
+ atomic_t num_fifo_resources;
/*
* Replace this with an rwsem as soon as we have down_xx_interruptible()
@@ -507,8 +499,8 @@ struct vmw_private {
* are protected by the cmdbuf mutex.
*/
- struct ttm_buffer_object *dummy_query_bo;
- struct ttm_buffer_object *pinned_bo;
+ struct vmw_dma_buffer *dummy_query_bo;
+ struct vmw_dma_buffer *pinned_bo;
uint32_t query_cid;
uint32_t query_cid_valid;
bool dummy_query_bo_pinned;
@@ -531,8 +523,9 @@ struct vmw_private {
/*
* Guest Backed stuff
*/
- struct ttm_buffer_object *otable_bo;
- struct vmw_otable *otables;
+ struct vmw_otable_batch otable_batch;
+
+ struct vmw_cmdbuf_man *cman;
};
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -587,8 +580,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
return val;
}
-int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
-void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
+extern void vmw_svga_enable(struct vmw_private *dev_priv);
+extern void vmw_svga_disable(struct vmw_private *dev_priv);
+
/**
* GMR utilities - vmwgfx_gmr.c
@@ -610,7 +604,8 @@ extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
extern struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource *res);
extern int vmw_resource_validate(struct vmw_resource *res);
-extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
+extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
+ bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
@@ -660,10 +655,14 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
uint32_t *inout_id,
struct vmw_resource **out);
extern void vmw_resource_unreserve(struct vmw_resource *res,
+ bool switch_backup,
struct vmw_dma_buffer *new_backup,
unsigned long new_backup_offset);
extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
+extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
@@ -671,25 +670,25 @@ extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
/**
* DMA buffer helper routines - vmwgfx_dmabuf.c
*/
-extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *bo,
- struct ttm_placement *placement,
- bool interruptible);
-extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool pin, bool interruptible);
-extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool pin, bool interruptible);
-extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
+extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo,
- bool pin, bool interruptible);
+ struct ttm_placement *placement,
+ bool interruptible);
+extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool interruptible);
+extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool interruptible);
+extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *bo,
+ bool interruptible);
extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo,
bool interruptible);
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
SVGAGuestPtr *ptr);
-extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
+extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -717,7 +716,10 @@ extern int vmw_fifo_init(struct vmw_private *dev_priv,
extern void vmw_fifo_release(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo);
extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
+extern void *
+vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
+extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
uint32_t *seqno);
extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
@@ -726,6 +728,8 @@ extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
uint32_t cid);
+extern int vmw_fifo_flush(struct vmw_private *dev_priv,
+ bool interruptible);
/**
* TTM glue - vmwgfx_ttm_glue.c
@@ -750,6 +754,7 @@ extern struct ttm_placement vmw_sys_ne_placement;
extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement;
extern struct ttm_placement vmw_mob_placement;
+extern struct ttm_placement vmw_mob_ne_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern int vmw_dma_quiescent(struct drm_device *dev);
extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
@@ -800,14 +805,15 @@ static inline struct page *vmw_piter_page(struct vmw_piter *viter)
* Command submission - vmwgfx_execbuf.c
*/
-extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
+ struct drm_file *file_priv, size_t size);
extern int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands,
void *kernel_commands,
uint32_t command_size,
uint64_t throttle_us,
+ uint32_t dx_context_handle,
struct drm_vmw_fence_rep __user
*user_fence_rep,
struct vmw_fence_obj **out_fence);
@@ -826,6 +832,11 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
*user_fence_rep,
struct vmw_fence_obj *fence,
uint32_t fence_handle);
+extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
+ struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool validate_as_mob);
+
/**
* IRQs and wating - vmwgfx_irq.c
@@ -833,8 +844,8 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
extern irqreturn_t vmw_irq_handler(int irq, void *arg);
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
- uint32_t seqno, bool interruptible,
- unsigned long timeout);
+ uint32_t seqno, bool interruptible,
+ unsigned long timeout);
extern void vmw_irq_preinstall(struct drm_device *dev);
extern int vmw_irq_postinstall(struct drm_device *dev);
extern void vmw_irq_uninstall(struct drm_device *dev);
@@ -852,6 +863,10 @@ extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
+extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
+ int *waiter_count);
+extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+ u32 flag, int *waiter_count);
/**
* Rudimentary fence-like objects currently used only for throttling -
@@ -861,9 +876,9 @@ extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
extern int vmw_marker_push(struct vmw_marker_queue *queue,
- uint32_t seqno);
+ uint32_t seqno);
extern int vmw_marker_pull(struct vmw_marker_queue *queue,
- uint32_t signaled_seqno);
+ uint32_t signaled_seqno);
extern int vmw_wait_lag(struct vmw_private *dev_priv,
struct vmw_marker_queue *queue, uint32_t us);
@@ -908,12 +923,6 @@ int vmw_kms_present(struct vmw_private *dev_priv,
uint32_t sid, int32_t destX, int32_t destY,
struct drm_vmw_rect *clips,
uint32_t num_clips);
-int vmw_kms_readback(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_framebuffer *vfb,
- struct drm_vmw_fence_rep __user *user_fence_rep,
- struct drm_vmw_rect *clips,
- uint32_t num_clips);
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -927,6 +936,10 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
int vmw_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle);
+extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
+extern void vmw_resource_unpin(struct vmw_resource *res);
+extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
+
/**
* Overlay control - vmwgfx_overlay.c
*/
@@ -982,27 +995,33 @@ extern void vmw_otables_takedown(struct vmw_private *dev_priv);
extern const struct vmw_user_resource_conv *user_context_converter;
-extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
-
extern int vmw_context_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
int id,
struct vmw_resource **p_res);
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
- const struct vmw_ctx_bindinfo *ci);
-extern void
-vmw_context_binding_state_transfer(struct vmw_resource *res,
- struct vmw_ctx_binding_state *cbs);
-extern void vmw_context_binding_res_list_kill(struct list_head *head);
-extern void vmw_context_binding_res_list_scrub(struct list_head *head);
-extern int vmw_context_rebind_all(struct vmw_resource *ctx);
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
extern struct vmw_cmdbuf_res_manager *
vmw_context_res_man(struct vmw_resource *ctx);
+extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
+ SVGACOTableType cotable_type);
+extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
+struct vmw_ctx_binding_state;
+extern struct vmw_ctx_binding_state *
+vmw_context_binding_state(struct vmw_resource *ctx);
+extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
+ bool readback);
+extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
+ struct vmw_dma_buffer *mob);
+extern struct vmw_dma_buffer *
+vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
+
+
/*
* Surface management - vmwgfx_surface.c
*/
@@ -1025,6 +1044,16 @@ extern int vmw_surface_check(struct vmw_private *dev_priv,
uint32_t handle, int *id);
extern int vmw_surface_validate(struct vmw_private *dev_priv,
struct vmw_surface *srf);
+int vmw_surface_gb_priv_define(struct drm_device *dev,
+ uint32_t user_accounting_size,
+ uint32_t svga3d_flags,
+ SVGA3dSurfaceFormat format,
+ bool for_scanout,
+ uint32_t num_mip_levels,
+ uint32_t multisample_count,
+ uint32_t array_size,
+ struct drm_vmw_size size,
+ struct vmw_surface **srf_out);
/*
* Shader management - vmwgfx_shader.c
@@ -1042,12 +1071,21 @@ extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
SVGA3dShaderType shader_type,
size_t size,
struct list_head *list);
-extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
- u32 user_key, SVGA3dShaderType shader_type,
- struct list_head *list);
+extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key, SVGA3dShaderType shader_type,
+ struct list_head *list);
+extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
+ struct vmw_resource *ctx,
+ u32 user_key,
+ SVGA3dShaderType shader_type,
+ struct list_head *list);
+extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
+ struct list_head *list,
+ bool readback);
+
extern struct vmw_resource *
-vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
- u32 user_key, SVGA3dShaderType shader_type);
+vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key, SVGA3dShaderType shader_type);
/*
* Command buffer managed resources - vmwgfx_cmdbuf_res.c
@@ -1071,7 +1109,48 @@ extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
u32 user_key,
- struct list_head *list);
+ struct list_head *list,
+ struct vmw_resource **res);
+
+/*
+ * COTable management - vmwgfx_cotable.c
+ */
+extern const SVGACOTableType vmw_cotable_scrub_order[];
+extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
+ struct vmw_resource *ctx,
+ u32 type);
+extern int vmw_cotable_notify(struct vmw_resource *res, int id);
+extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
+extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
+ struct list_head *head);
+
+/*
+ * Command buffer managerment vmwgfx_cmdbuf.c
+ */
+struct vmw_cmdbuf_man;
+struct vmw_cmdbuf_header;
+
+extern struct vmw_cmdbuf_man *
+vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
+extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
+ size_t size, size_t default_size);
+extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
+extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
+extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
+ unsigned long timeout);
+extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
+ int ctx_id, bool interruptible,
+ struct vmw_cmdbuf_header *header);
+extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
+ struct vmw_cmdbuf_header *header,
+ bool flush);
+extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
+extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
+ size_t size, bool interruptible,
+ struct vmw_cmdbuf_header **p_header);
+extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
+extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
+ bool interruptible);
/**
@@ -1116,4 +1195,14 @@ static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
{
return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
}
+
+static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
+{
+ atomic_inc(&dev_priv->num_fifo_resources);
+}
+
+static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
+{
+ atomic_dec(&dev_priv->num_fifo_resources);
+}
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 654c8daeb5ab..b56565457c96 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,6 +29,8 @@
#include "vmwgfx_reg.h"
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_placement.h>
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
#define VMW_RES_HT_ORDER 12
@@ -59,8 +61,11 @@ struct vmw_resource_relocation {
* @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
* @first_usage: Set to true the first time the resource is referenced in
* the command stream.
- * @no_buffer_needed: Resources do not need to allocate buffer backup on
- * reservation. The command stream will provide one.
+ * @switching_backup: The command stream provides a new backup buffer for a
+ * resource.
+ * @no_buffer_needed: This means @switching_backup is true on first buffer
+ * reference. So resource reservation does not need to allocate a backup
+ * buffer for the resource.
*/
struct vmw_resource_val_node {
struct list_head head;
@@ -69,8 +74,9 @@ struct vmw_resource_val_node {
struct vmw_dma_buffer *new_backup;
struct vmw_ctx_binding_state *staged_bindings;
unsigned long new_backup_offset;
- bool first_usage;
- bool no_buffer_needed;
+ u32 first_usage : 1;
+ u32 switching_backup : 1;
+ u32 no_buffer_needed : 1;
};
/**
@@ -92,22 +98,40 @@ struct vmw_cmd_entry {
[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
(_gb_disable), (_gb_enable)}
+static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ struct vmw_resource *ctx);
+static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGAMobId *id,
+ struct vmw_dma_buffer **vmw_bo_p);
+static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
+ struct vmw_dma_buffer *vbo,
+ bool validate_as_mob,
+ uint32_t *p_val_node);
+
+
/**
- * vmw_resource_unreserve - unreserve resources previously reserved for
+ * vmw_resources_unreserve - unreserve resources previously reserved for
* command submission.
*
- * @list_head: list of resources to unreserve.
+ * @sw_context: pointer to the software context
* @backoff: Whether command submission failed.
*/
-static void vmw_resource_list_unreserve(struct list_head *list,
- bool backoff)
+static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
+ bool backoff)
{
struct vmw_resource_val_node *val;
+ struct list_head *list = &sw_context->resource_list;
+
+ if (sw_context->dx_query_mob && !backoff)
+ vmw_context_bind_dx_query(sw_context->dx_query_ctx,
+ sw_context->dx_query_mob);
list_for_each_entry(val, list, head) {
struct vmw_resource *res = val->res;
- struct vmw_dma_buffer *new_backup =
- backoff ? NULL : val->new_backup;
+ bool switch_backup =
+ (backoff) ? false : val->switching_backup;
/*
* Transfer staged context bindings to the
@@ -115,18 +139,71 @@ static void vmw_resource_list_unreserve(struct list_head *list,
*/
if (unlikely(val->staged_bindings)) {
if (!backoff) {
- vmw_context_binding_state_transfer
- (val->res, val->staged_bindings);
+ vmw_binding_state_commit
+ (vmw_context_binding_state(val->res),
+ val->staged_bindings);
}
- kfree(val->staged_bindings);
+
+ if (val->staged_bindings != sw_context->staged_bindings)
+ vmw_binding_state_free(val->staged_bindings);
+ else
+ sw_context->staged_bindings_inuse = false;
val->staged_bindings = NULL;
}
- vmw_resource_unreserve(res, new_backup,
- val->new_backup_offset);
+ vmw_resource_unreserve(res, switch_backup, val->new_backup,
+ val->new_backup_offset);
vmw_dmabuf_unreference(&val->new_backup);
}
}
+/**
+ * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
+ * added to the validate list.
+ *
+ * @dev_priv: Pointer to the device private:
+ * @sw_context: The validation context:
+ * @node: The validation node holding this context.
+ */
+static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ struct vmw_resource_val_node *node)
+{
+ int ret;
+
+ ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ if (!sw_context->staged_bindings) {
+ sw_context->staged_bindings =
+ vmw_binding_state_alloc(dev_priv);
+ if (IS_ERR(sw_context->staged_bindings)) {
+ DRM_ERROR("Failed to allocate context binding "
+ "information.\n");
+ ret = PTR_ERR(sw_context->staged_bindings);
+ sw_context->staged_bindings = NULL;
+ goto out_err;
+ }
+ }
+
+ if (sw_context->staged_bindings_inuse) {
+ node->staged_bindings = vmw_binding_state_alloc(dev_priv);
+ if (IS_ERR(node->staged_bindings)) {
+ DRM_ERROR("Failed to allocate context binding "
+ "information.\n");
+ ret = PTR_ERR(node->staged_bindings);
+ node->staged_bindings = NULL;
+ goto out_err;
+ }
+ } else {
+ node->staged_bindings = sw_context->staged_bindings;
+ sw_context->staged_bindings_inuse = true;
+ }
+
+ return 0;
+out_err:
+ return ret;
+}
/**
* vmw_resource_val_add - Add a resource to the software context's
@@ -141,6 +218,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
struct vmw_resource *res,
struct vmw_resource_val_node **p_node)
{
+ struct vmw_private *dev_priv = res->dev_priv;
struct vmw_resource_val_node *node;
struct drm_hash_item *hash;
int ret;
@@ -169,14 +247,90 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
kfree(node);
return ret;
}
- list_add_tail(&node->head, &sw_context->resource_list);
node->res = vmw_resource_reference(res);
node->first_usage = true;
-
if (unlikely(p_node != NULL))
*p_node = node;
- return 0;
+ if (!dev_priv->has_mob) {
+ list_add_tail(&node->head, &sw_context->resource_list);
+ return 0;
+ }
+
+ switch (vmw_res_type(res)) {
+ case vmw_res_context:
+ case vmw_res_dx_context:
+ list_add(&node->head, &sw_context->ctx_resource_list);
+ ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
+ break;
+ case vmw_res_cotable:
+ list_add_tail(&node->head, &sw_context->ctx_resource_list);
+ break;
+ default:
+ list_add_tail(&node->head, &sw_context->resource_list);
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * vmw_view_res_val_add - Add a view and the surface it's pointing to
+ * to the validation list
+ *
+ * @sw_context: The software context holding the validation list.
+ * @view: Pointer to the view resource.
+ *
+ * Returns 0 if success, negative error code otherwise.
+ */
+static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
+ struct vmw_resource *view)
+{
+ int ret;
+
+ /*
+ * First add the resource the view is pointing to, otherwise
+ * it may be swapped out when the view is validated.
+ */
+ ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
+ if (ret)
+ return ret;
+
+ return vmw_resource_val_add(sw_context, view, NULL);
+}
+
+/**
+ * vmw_view_id_val_add - Look up a view and add it and the surface it's
+ * pointing to to the validation list.
+ *
+ * @sw_context: The software context holding the validation list.
+ * @view_type: The view type to look up.
+ * @id: view id of the view.
+ *
+ * The view is represented by a view id and the DX context it's created on,
+ * or scheduled for creation on. If there is no DX context set, the function
+ * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
+ */
+static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
+ enum vmw_view_type view_type, u32 id)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *view;
+ int ret;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ view = vmw_view_lookup(sw_context->man, view_type, id);
+ if (IS_ERR(view))
+ return PTR_ERR(view);
+
+ ret = vmw_view_res_val_add(sw_context, view);
+ vmw_resource_unreference(&view);
+
+ return ret;
}
/**
@@ -195,24 +349,56 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
struct vmw_resource *ctx)
{
struct list_head *binding_list;
- struct vmw_ctx_binding *entry;
+ struct vmw_ctx_bindinfo *entry;
int ret = 0;
struct vmw_resource *res;
+ u32 i;
+
+ /* Add all cotables to the validation list. */
+ if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
+ for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ res = vmw_context_cotable(ctx, i);
+ if (IS_ERR(res))
+ continue;
+ ret = vmw_resource_val_add(sw_context, res, NULL);
+ vmw_resource_unreference(&res);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ }
+
+
+ /* Add all resources bound to the context to the validation list */
mutex_lock(&dev_priv->binding_mutex);
binding_list = vmw_context_binding_list(ctx);
list_for_each_entry(entry, binding_list, ctx_list) {
- res = vmw_resource_reference_unless_doomed(entry->bi.res);
+ /* entry->res is not refcounted */
+ res = vmw_resource_reference_unless_doomed(entry->res);
if (unlikely(res == NULL))
continue;
- ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
+ if (vmw_res_type(entry->res) == vmw_res_view)
+ ret = vmw_view_res_val_add(sw_context, entry->res);
+ else
+ ret = vmw_resource_val_add(sw_context, entry->res,
+ NULL);
vmw_resource_unreference(&res);
if (unlikely(ret != 0))
break;
}
+ if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
+ struct vmw_dma_buffer *dx_query_mob;
+
+ dx_query_mob = vmw_context_get_dx_query_mob(ctx);
+ if (dx_query_mob)
+ ret = vmw_bo_to_validate_list(sw_context,
+ dx_query_mob,
+ true, NULL);
+ }
+
mutex_unlock(&dev_priv->binding_mutex);
return ret;
}
@@ -308,7 +494,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
* submission is reached.
*/
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
- struct ttm_buffer_object *bo,
+ struct vmw_dma_buffer *vbo,
bool validate_as_mob,
uint32_t *p_val_node)
{
@@ -318,7 +504,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct drm_hash_item *hash;
int ret;
- if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
+ if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
&hash) == 0)) {
vval_buf = container_of(hash, struct vmw_validate_buffer,
hash);
@@ -336,7 +522,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
return -EINVAL;
}
vval_buf = &sw_context->val_bufs[val_node];
- vval_buf->hash.key = (unsigned long) bo;
+ vval_buf->hash.key = (unsigned long) vbo;
ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to initialize a buffer validation "
@@ -345,7 +531,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
}
++sw_context->cur_val_buf;
val_buf = &vval_buf->base;
- val_buf->bo = ttm_bo_reference(bo);
+ val_buf->bo = ttm_bo_reference(&vbo->base);
val_buf->shared = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
vval_buf->validate_as_mob = validate_as_mob;
@@ -370,27 +556,39 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{
struct vmw_resource_val_node *val;
- int ret;
+ int ret = 0;
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
- ret = vmw_resource_reserve(res, val->no_buffer_needed);
+ ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
if (unlikely(ret != 0))
return ret;
if (res->backup) {
- struct ttm_buffer_object *bo = &res->backup->base;
+ struct vmw_dma_buffer *vbo = res->backup;
ret = vmw_bo_to_validate_list
- (sw_context, bo,
+ (sw_context, vbo,
vmw_resource_needs_backup(res), NULL);
if (unlikely(ret != 0))
return ret;
}
}
- return 0;
+
+ if (sw_context->dx_query_mob) {
+ struct vmw_dma_buffer *expected_dx_query_mob;
+
+ expected_dx_query_mob =
+ vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
+ if (expected_dx_query_mob &&
+ expected_dx_query_mob != sw_context->dx_query_mob) {
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
}
/**
@@ -409,6 +607,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
+ struct vmw_dma_buffer *backup = res->backup;
ret = vmw_resource_validate(res);
if (unlikely(ret != 0)) {
@@ -416,18 +615,29 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
DRM_ERROR("Failed to validate resource.\n");
return ret;
}
+
+ /* Check if the resource switched backup buffer */
+ if (backup && res->backup && (backup != res->backup)) {
+ struct vmw_dma_buffer *vbo = res->backup;
+
+ ret = vmw_bo_to_validate_list
+ (sw_context, vbo,
+ vmw_resource_needs_backup(res), NULL);
+ if (ret) {
+ ttm_bo_unreserve(&vbo->base);
+ return ret;
+ }
+ }
}
return 0;
}
-
/**
* vmw_cmd_res_reloc_add - Add a resource to a software context's
* relocation- and validation lists.
*
* @dev_priv: Pointer to a struct vmw_private identifying the device.
* @sw_context: Pointer to the software context.
- * @res_type: Resource type.
* @id_loc: Pointer to where the id that needs translation is located.
* @res: Valid pointer to a struct vmw_resource.
* @p_val: If non null, a pointer to the struct vmw_resource_validate_node
@@ -435,7 +645,6 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
*/
static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- enum vmw_res_type res_type,
uint32_t *id_loc,
struct vmw_resource *res,
struct vmw_resource_val_node **p_val)
@@ -454,29 +663,6 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- if (res_type == vmw_res_context && dev_priv->has_mob &&
- node->first_usage) {
-
- /*
- * Put contexts first on the list to be able to exit
- * list traversal for contexts early.
- */
- list_del(&node->head);
- list_add(&node->head, &sw_context->resource_list);
-
- ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
- if (unlikely(ret != 0))
- return ret;
- node->staged_bindings =
- kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
- if (node->staged_bindings == NULL) {
- DRM_ERROR("Failed to allocate context binding "
- "information.\n");
- return -ENOMEM;
- }
- INIT_LIST_HEAD(&node->staged_bindings->list);
- }
-
if (p_val)
*p_val = node;
@@ -554,7 +740,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
rcache->res = res;
rcache->handle = *id_loc;
- ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
+ ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
res, &node);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -573,6 +759,46 @@ out_no_reloc:
}
/**
+ * vmw_rebind_dx_query - Rebind DX query associated with the context
+ *
+ * @ctx_res: context the query belongs to
+ *
+ * This function assumes binding_mutex is held.
+ */
+static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
+{
+ struct vmw_private *dev_priv = ctx_res->dev_priv;
+ struct vmw_dma_buffer *dx_query_mob;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindAllQuery body;
+ } *cmd;
+
+
+ dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
+
+ if (!dx_query_mob || dx_query_mob->dx_query_ctx)
+ return 0;
+
+ cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
+
+ if (cmd == NULL) {
+ DRM_ERROR("Failed to rebind queries.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = ctx_res->id;
+ cmd->body.mobid = dx_query_mob->base.mem.start;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ vmw_context_bind_dx_query(ctx_res, dx_query_mob);
+
+ return 0;
+}
+
+/**
* vmw_rebind_contexts - Rebind all resources previously bound to
* referenced contexts.
*
@@ -589,12 +815,80 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
if (unlikely(!val->staged_bindings))
break;
- ret = vmw_context_rebind_all(val->res);
+ ret = vmw_binding_rebind_all
+ (vmw_context_binding_state(val->res));
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to rebind context.\n");
return ret;
}
+
+ ret = vmw_rebind_all_dx_query(val->res);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_view_bindings_add - Add an array of view bindings to a context
+ * binding state tracker.
+ *
+ * @sw_context: The execbuf state used for this command.
+ * @view_type: View type for the bindings.
+ * @binding_type: Binding type for the bindings.
+ * @shader_slot: The shader slot to user for the bindings.
+ * @view_ids: Array of view ids to be bound.
+ * @num_views: Number of view ids in @view_ids.
+ * @first_slot: The binding slot to be used for the first view id in @view_ids.
+ */
+static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
+ enum vmw_view_type view_type,
+ enum vmw_ctx_binding_type binding_type,
+ uint32 shader_slot,
+ uint32 view_ids[], u32 num_views,
+ u32 first_slot)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_cmdbuf_res_manager *man;
+ u32 i;
+ int ret;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ man = sw_context->man;
+ for (i = 0; i < num_views; ++i) {
+ struct vmw_ctx_bindinfo_view binding;
+ struct vmw_resource *view = NULL;
+
+ if (view_ids[i] != SVGA3D_INVALID_ID) {
+ view = vmw_view_lookup(man, view_type, view_ids[i]);
+ if (IS_ERR(view)) {
+ DRM_ERROR("View not found.\n");
+ return PTR_ERR(view);
+ }
+
+ ret = vmw_view_res_val_add(sw_context, view);
+ if (ret) {
+ DRM_ERROR("Could not add view to "
+ "validation list.\n");
+ vmw_resource_unreference(&view);
+ return ret;
+ }
+ }
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = view;
+ binding.bi.bt = binding_type;
+ binding.shader_slot = shader_slot;
+ binding.slot = first_slot + i;
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ shader_slot, binding.slot);
+ if (view)
+ vmw_resource_unreference(&view);
}
return 0;
@@ -638,6 +932,12 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
cmd = container_of(header, struct vmw_sid_cmd, header);
+ if (cmd->body.type >= SVGA3D_RT_MAX) {
+ DRM_ERROR("Illegal render target type %u.\n",
+ (unsigned) cmd->body.type);
+ return -EINVAL;
+ }
+
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
&ctx_node);
@@ -651,13 +951,14 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
return ret;
if (dev_priv->has_mob) {
- struct vmw_ctx_bindinfo bi;
-
- bi.ctx = ctx_node->res;
- bi.res = res_node ? res_node->res : NULL;
- bi.bt = vmw_ctx_binding_rt;
- bi.i1.rt_type = cmd->body.type;
- return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+ struct vmw_ctx_bindinfo_view binding;
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = res_node ? res_node->res : NULL;
+ binding.bi.bt = vmw_ctx_binding_rt;
+ binding.slot = cmd->body.type;
+ vmw_binding_add(ctx_node->staged_bindings,
+ &binding.bi, 0, binding.slot);
}
return 0;
@@ -674,16 +975,62 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
+
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.src.sid, NULL);
- if (unlikely(ret != 0))
+ if (ret)
return ret;
+
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.dest.sid, NULL);
}
+static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBufferCopy body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, typeof(*cmd), header);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.src, NULL);
+ if (ret != 0)
+ return ret;
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.dest, NULL);
+}
+
+static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXPredCopyRegion body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, typeof(*cmd), header);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.srcSid, NULL);
+ if (ret != 0)
+ return ret;
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.dstSid, NULL);
+}
+
static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -752,7 +1099,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* command batch.
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
- struct ttm_buffer_object *new_query_bo,
+ struct vmw_dma_buffer *new_query_bo,
struct vmw_sw_context *sw_context)
{
struct vmw_res_cache_entry *ctx_entry =
@@ -764,7 +1111,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
- if (unlikely(new_query_bo->num_pages > 4)) {
+ if (unlikely(new_query_bo->base.num_pages > 4)) {
DRM_ERROR("Query buffer too large.\n");
return -EINVAL;
}
@@ -833,12 +1180,12 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
if (dev_priv->pinned_bo) {
- vmw_bo_pin(dev_priv->pinned_bo, false);
- ttm_bo_unref(&dev_priv->pinned_bo);
+ vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+ vmw_dmabuf_unreference(&dev_priv->pinned_bo);
}
if (!sw_context->needs_post_query_barrier) {
- vmw_bo_pin(sw_context->cur_query_bo, true);
+ vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
/*
* We pin also the dummy_query_bo buffer so that we
@@ -846,14 +1193,17 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
* dummy queries in context destroy paths.
*/
- vmw_bo_pin(dev_priv->dummy_query_bo, true);
- dev_priv->dummy_query_bo_pinned = true;
+ if (!dev_priv->dummy_query_bo_pinned) {
+ vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
+ true);
+ dev_priv->dummy_query_bo_pinned = true;
+ }
BUG_ON(sw_context->last_query_ctx == NULL);
dev_priv->query_cid = sw_context->last_query_ctx->id;
dev_priv->query_cid_valid = true;
dev_priv->pinned_bo =
- ttm_bo_reference(sw_context->cur_query_bo);
+ vmw_dmabuf_reference(sw_context->cur_query_bo);
}
}
}
@@ -882,7 +1232,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_dma_buffer **vmw_bo_p)
{
struct vmw_dma_buffer *vmw_bo = NULL;
- struct ttm_buffer_object *bo;
uint32_t handle = *id;
struct vmw_relocation *reloc;
int ret;
@@ -893,7 +1242,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
ret = -EINVAL;
goto out_no_reloc;
}
- bo = &vmw_bo->base;
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
DRM_ERROR("Max number relocations per submission"
@@ -906,7 +1254,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
reloc->mob_loc = id;
reloc->location = NULL;
- ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
+ ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -944,7 +1292,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_dma_buffer **vmw_bo_p)
{
struct vmw_dma_buffer *vmw_bo = NULL;
- struct ttm_buffer_object *bo;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
int ret;
@@ -955,7 +1302,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
ret = -EINVAL;
goto out_no_reloc;
}
- bo = &vmw_bo->base;
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
DRM_ERROR("Max number relocations per submission"
@@ -967,7 +1313,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = ptr;
- ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
+ ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -980,6 +1326,98 @@ out_no_reloc:
return ret;
}
+
+
+/**
+ * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * This function adds the new query into the query COTABLE
+ */
+static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dx_define_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDefineQuery q;
+ } *cmd;
+
+ int ret;
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *cotable_res;
+
+
+ if (ctx_node == NULL) {
+ DRM_ERROR("DX Context not set for query.\n");
+ return -EINVAL;
+ }
+
+ cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
+
+ if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
+ cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
+ return -EINVAL;
+
+ cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
+ ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
+ vmw_resource_unreference(&cotable_res);
+
+ return ret;
+}
+
+
+
+/**
+ * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * The query bind operation will eventually associate the query ID
+ * with its backing MOB. In this function, we take the user mode
+ * MOB ID and use vmw_translate_mob_ptr() to translate it to its
+ * kernel mode equivalent.
+ */
+static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dx_bind_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindQuery q;
+ } *cmd;
+
+ struct vmw_dma_buffer *vmw_bo;
+ int ret;
+
+
+ cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
+
+ /*
+ * Look up the buffer pointed to by q.mobid, put it on the relocation
+ * list so its kernel mode MOB ID can be filled in later
+ */
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
+ &vmw_bo);
+
+ if (ret != 0)
+ return ret;
+
+ sw_context->dx_query_mob = vmw_bo;
+ sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
+
+ vmw_dmabuf_unreference(&vmw_bo);
+
+ return ret;
+}
+
+
+
/**
* vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
*
@@ -1074,7 +1512,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
+ ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
vmw_dmabuf_unreference(&vmw_bo);
return ret;
@@ -1128,7 +1566,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
+ ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
vmw_dmabuf_unreference(&vmw_bo);
return ret;
@@ -1363,6 +1801,12 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
continue;
+ if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
+ DRM_ERROR("Illegal texture/sampler unit %u.\n",
+ (unsigned) cur_state->stage);
+ return -EINVAL;
+ }
+
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cur_state->value, &res_node);
@@ -1370,14 +1814,14 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
return ret;
if (dev_priv->has_mob) {
- struct vmw_ctx_bindinfo bi;
-
- bi.ctx = ctx_node->res;
- bi.res = res_node ? res_node->res : NULL;
- bi.bt = vmw_ctx_binding_tex;
- bi.i1.texture_stage = cur_state->stage;
- vmw_context_binding_add(ctx_node->staged_bindings,
- &bi);
+ struct vmw_ctx_bindinfo_tex binding;
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = res_node ? res_node->res : NULL;
+ binding.bi.bt = vmw_ctx_binding_tex;
+ binding.texture_stage = cur_state->stage;
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ 0, binding.texture_stage);
}
}
@@ -1407,6 +1851,47 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
return ret;
}
+
+/**
+ * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
+ * switching
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @val_node: The validation node representing the resource.
+ * @buf_id: Pointer to the user-space backup buffer handle in the command
+ * stream.
+ * @backup_offset: Offset of backup into MOB.
+ *
+ * This function prepares for registering a switch of backup buffers
+ * in the resource metadata just prior to unreserving. It's basically a wrapper
+ * around vmw_cmd_res_switch_backup with a different interface.
+ */
+static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ struct vmw_resource_val_node *val_node,
+ uint32_t *buf_id,
+ unsigned long backup_offset)
+{
+ struct vmw_dma_buffer *dma_buf;
+ int ret;
+
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
+ if (ret)
+ return ret;
+
+ val_node->switching_backup = true;
+ if (val_node->first_usage)
+ val_node->no_buffer_needed = true;
+
+ vmw_dmabuf_unreference(&val_node->new_backup);
+ val_node->new_backup = dma_buf;
+ val_node->new_backup_offset = backup_offset;
+
+ return 0;
+}
+
+
/**
* vmw_cmd_switch_backup - Utility function to handle backup buffer switching
*
@@ -1420,7 +1905,8 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
* @backup_offset: Offset of backup into MOB.
*
* This function prepares for registering a switch of backup buffers
- * in the resource metadata just prior to unreserving.
+ * in the resource metadata just prior to unreserving. It's basically a wrapper
+ * around vmw_cmd_res_switch_backup with a different interface.
*/
static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -1431,27 +1917,16 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
uint32_t *buf_id,
unsigned long backup_offset)
{
- int ret;
- struct vmw_dma_buffer *dma_buf;
struct vmw_resource_val_node *val_node;
+ int ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
converter, res_id, &val_node);
- if (unlikely(ret != 0))
- return ret;
-
- ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
- if (unlikely(ret != 0))
+ if (ret)
return ret;
- if (val_node->first_usage)
- val_node->no_buffer_needed = true;
-
- vmw_dmabuf_unreference(&val_node->new_backup);
- val_node->new_backup = dma_buf;
- val_node->new_backup_offset = backup_offset;
-
- return 0;
+ return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
+ buf_id, backup_offset);
}
/**
@@ -1703,10 +2178,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
if (unlikely(!dev_priv->has_mob))
return 0;
- ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
- cmd->body.shid,
- cmd->body.type,
- &sw_context->staged_cmd_res);
+ ret = vmw_shader_remove(vmw_context_res_man(val->res),
+ cmd->body.shid,
+ cmd->body.type,
+ &sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
@@ -1734,13 +2209,19 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdSetShader body;
} *cmd;
struct vmw_resource_val_node *ctx_node, *res_node = NULL;
- struct vmw_ctx_bindinfo bi;
+ struct vmw_ctx_bindinfo_shader binding;
struct vmw_resource *res = NULL;
int ret;
cmd = container_of(header, struct vmw_set_shader_cmd,
header);
+ if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
+ DRM_ERROR("Illegal shader type %u.\n",
+ (unsigned) cmd->body.type);
+ return -EINVAL;
+ }
+
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
&ctx_node);
@@ -1751,14 +2232,12 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
return 0;
if (cmd->body.shid != SVGA3D_INVALID_ID) {
- res = vmw_compat_shader_lookup
- (vmw_context_res_man(ctx_node->res),
- cmd->body.shid,
- cmd->body.type);
+ res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+ cmd->body.shid,
+ cmd->body.type);
if (!IS_ERR(res)) {
ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
- vmw_res_shader,
&cmd->body.shid, res,
&res_node);
vmw_resource_unreference(&res);
@@ -1776,11 +2255,13 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
return ret;
}
- bi.ctx = ctx_node->res;
- bi.res = res_node ? res_node->res : NULL;
- bi.bt = vmw_ctx_binding_shader;
- bi.i1.shader_type = cmd->body.type;
- return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = res_node ? res_node->res : NULL;
+ binding.bi.bt = vmw_ctx_binding_shader;
+ binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ binding.shader_slot, 0);
+ return 0;
}
/**
@@ -1842,6 +2323,690 @@ static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
cmd->body.offsetInBytes);
}
+/**
+ * vmw_cmd_dx_set_single_constant_buffer - Validate an
+ * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int
+vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetSingleConstantBuffer body;
+ } *cmd;
+ struct vmw_resource_val_node *res_node = NULL;
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_bindinfo_cb binding;
+ int ret;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ cmd = container_of(header, typeof(*cmd), header);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.sid, &res_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = res_node ? res_node->res : NULL;
+ binding.bi.bt = vmw_ctx_binding_cb;
+ binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+ binding.offset = cmd->body.offsetInBytes;
+ binding.size = cmd->body.sizeInBytes;
+ binding.slot = cmd->body.slot;
+
+ if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
+ binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
+ DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
+ (unsigned) cmd->body.type,
+ (unsigned) binding.slot);
+ return -EINVAL;
+ }
+
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ binding.shader_slot, binding.slot);
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_dx_set_shader_res - Validate an
+ * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetShaderResources body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
+ sizeof(SVGA3dShaderResourceViewId);
+
+ if ((u64) cmd->body.startView + (u64) num_sr_view >
+ (u64) SVGA3D_DX_MAX_SRVIEWS ||
+ cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+ DRM_ERROR("Invalid shader binding.\n");
+ return -EINVAL;
+ }
+
+ return vmw_view_bindings_add(sw_context, vmw_view_sr,
+ vmw_ctx_binding_sr,
+ cmd->body.type - SVGA3D_SHADERTYPE_MIN,
+ (void *) &cmd[1], num_sr_view,
+ cmd->body.startView);
+}
+
+/**
+ * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetShader body;
+ } *cmd;
+ struct vmw_resource *res = NULL;
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_bindinfo_shader binding;
+ int ret = 0;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ cmd = container_of(header, typeof(*cmd), header);
+
+ if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+ DRM_ERROR("Illegal shader type %u.\n",
+ (unsigned) cmd->body.type);
+ return -EINVAL;
+ }
+
+ if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
+ res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
+ if (IS_ERR(res)) {
+ DRM_ERROR("Could not find shader for binding.\n");
+ return PTR_ERR(res);
+ }
+
+ ret = vmw_resource_val_add(sw_context, res, NULL);
+ if (ret)
+ goto out_unref;
+ }
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = res;
+ binding.bi.bt = vmw_ctx_binding_dx_shader;
+ binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ binding.shader_slot, 0);
+out_unref:
+ if (res)
+ vmw_resource_unreference(&res);
+
+ return ret;
+}
+
+/**
+ * vmw_cmd_dx_set_vertex_buffers - Validates an
+ * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_bindinfo_vb binding;
+ struct vmw_resource_val_node *res_node;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetVertexBuffers body;
+ SVGA3dVertexBuffer buf[];
+ } *cmd;
+ int i, ret, num;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ cmd = container_of(header, typeof(*cmd), header);
+ num = (cmd->header.size - sizeof(cmd->body)) /
+ sizeof(SVGA3dVertexBuffer);
+ if ((u64)num + (u64)cmd->body.startBuffer >
+ (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
+ DRM_ERROR("Invalid number of vertex buffers.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num; i++) {
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->buf[i].sid, &res_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.bt = vmw_ctx_binding_vb;
+ binding.bi.res = ((res_node) ? res_node->res : NULL);
+ binding.offset = cmd->buf[i].offset;
+ binding.stride = cmd->buf[i].stride;
+ binding.slot = i + cmd->body.startBuffer;
+
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ 0, binding.slot);
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
+ * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_bindinfo_ib binding;
+ struct vmw_resource_val_node *res_node;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetIndexBuffer body;
+ } *cmd;
+ int ret;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ cmd = container_of(header, typeof(*cmd), header);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.sid, &res_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = ((res_node) ? res_node->res : NULL);
+ binding.bi.bt = vmw_ctx_binding_ib;
+ binding.offset = cmd->body.offset;
+ binding.format = cmd->body.format;
+
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_dx_set_rendertarget - Validate an
+ * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetRenderTargets body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+ u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
+ sizeof(SVGA3dRenderTargetViewId);
+
+ if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
+ DRM_ERROR("Invalid DX Rendertarget binding.\n");
+ return -EINVAL;
+ }
+
+ ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
+ vmw_ctx_binding_ds, 0,
+ &cmd->body.depthStencilViewId, 1, 0);
+ if (ret)
+ return ret;
+
+ return vmw_view_bindings_add(sw_context, vmw_view_rt,
+ vmw_ctx_binding_dx_rt, 0,
+ (void *)&cmd[1], num_rt_view, 0);
+}
+
+/**
+ * vmw_cmd_dx_clear_rendertarget_view - Validate an
+ * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXClearRenderTargetView body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+
+ return vmw_view_id_val_add(sw_context, vmw_view_rt,
+ cmd->body.renderTargetViewId);
+}
+
+/**
+ * vmw_cmd_dx_clear_rendertarget_view - Validate an
+ * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXClearDepthStencilView body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+
+ return vmw_view_id_val_add(sw_context, vmw_view_ds,
+ cmd->body.depthStencilViewId);
+}
+
+static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource_val_node *srf_node;
+ struct vmw_resource *res;
+ enum vmw_view_type view_type;
+ int ret;
+ /*
+ * This is based on the fact that all affected define commands have
+ * the same initial command body layout.
+ */
+ struct {
+ SVGA3dCmdHeader header;
+ uint32 defined_id;
+ uint32 sid;
+ } *cmd;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ view_type = vmw_view_cmd_to_type(header->id);
+ cmd = container_of(header, typeof(*cmd), header);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->sid, &srf_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
+ ret = vmw_cotable_notify(res, cmd->defined_id);
+ vmw_resource_unreference(&res);
+ if (unlikely(ret != 0))
+ return ret;
+
+ return vmw_view_add(sw_context->man,
+ ctx_node->res,
+ srf_node->res,
+ view_type,
+ cmd->defined_id,
+ header,
+ header->size + sizeof(*header),
+ &sw_context->staged_cmd_res);
+}
+
+/**
+ * vmw_cmd_dx_set_so_targets - Validate an
+ * SVGA_3D_CMD_DX_SET_SOTARGETS command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_bindinfo_so binding;
+ struct vmw_resource_val_node *res_node;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetSOTargets body;
+ SVGA3dSoTarget targets[];
+ } *cmd;
+ int i, ret, num;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ cmd = container_of(header, typeof(*cmd), header);
+ num = (cmd->header.size - sizeof(cmd->body)) /
+ sizeof(SVGA3dSoTarget);
+
+ if (num > SVGA3D_DX_MAX_SOTARGETS) {
+ DRM_ERROR("Invalid DX SO binding.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num; i++) {
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->targets[i].sid, &res_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ binding.bi.ctx = ctx_node->res;
+ binding.bi.res = ((res_node) ? res_node->res : NULL);
+ binding.bi.bt = vmw_ctx_binding_so,
+ binding.offset = cmd->targets[i].offset;
+ binding.size = cmd->targets[i].sizeInBytes;
+ binding.slot = i;
+
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ 0, binding.slot);
+ }
+
+ return 0;
+}
+
+static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res;
+ /*
+ * This is based on the fact that all affected define commands have
+ * the same initial command body layout.
+ */
+ struct {
+ SVGA3dCmdHeader header;
+ uint32 defined_id;
+ } *cmd;
+ enum vmw_so_type so_type;
+ int ret;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ so_type = vmw_so_cmd_to_type(header->id);
+ res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
+ cmd = container_of(header, typeof(*cmd), header);
+ ret = vmw_cotable_notify(res, cmd->defined_id);
+ vmw_resource_unreference(&res);
+
+ return ret;
+}
+
+/**
+ * vmw_cmd_dx_check_subresource - Validate an
+ * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ union {
+ SVGA3dCmdDXReadbackSubResource r_body;
+ SVGA3dCmdDXInvalidateSubResource i_body;
+ SVGA3dCmdDXUpdateSubResource u_body;
+ SVGA3dSurfaceId sid;
+ };
+ } *cmd;
+
+ BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
+ offsetof(typeof(*cmd), sid));
+ BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
+ offsetof(typeof(*cmd), sid));
+ BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
+ offsetof(typeof(*cmd), sid));
+
+ cmd = container_of(header, typeof(*cmd), header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->sid, NULL);
+}
+
+static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+
+ if (unlikely(ctx_node == NULL)) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_dx_view_remove - validate a view remove command and
+ * schedule the view resource for removal.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * Check that the view exists, and if it was not created using this
+ * command batch, make sure it's validated (present in the device) so that
+ * the remove command will not confuse the device.
+ */
+static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct {
+ SVGA3dCmdHeader header;
+ union vmw_view_destroy body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
+ struct vmw_resource *view;
+ int ret;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ ret = vmw_view_remove(sw_context->man,
+ cmd->body.view_id, view_type,
+ &sw_context->staged_cmd_res,
+ &view);
+ if (ret || !view)
+ return ret;
+
+ /*
+ * Add view to the validate list iff it was not created using this
+ * command batch.
+ */
+ return vmw_view_res_val_add(sw_context, view);
+}
+
+/**
+ * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDefineShader body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
+ ret = vmw_cotable_notify(res, cmd->body.shaderId);
+ vmw_resource_unreference(&res);
+ if (ret)
+ return ret;
+
+ return vmw_dx_shader_add(sw_context->man, ctx_node->res,
+ cmd->body.shaderId, cmd->body.type,
+ &sw_context->staged_cmd_res);
+}
+
+/**
+ * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDestroyShader body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
+ &sw_context->staged_cmd_res);
+ if (ret)
+ DRM_ERROR("Could not find shader to remove.\n");
+
+ return ret;
+}
+
+/**
+ * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_resource_val_node *ctx_node;
+ struct vmw_resource_val_node *res_node;
+ struct vmw_resource *res;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindShader body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ if (cmd->body.cid != SVGA3D_INVALID_ID) {
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter,
+ &cmd->body.cid, &ctx_node);
+ if (ret)
+ return ret;
+ } else {
+ ctx_node = sw_context->dx_ctx_node;
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+ }
+
+ res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+ cmd->body.shid, 0);
+ if (IS_ERR(res)) {
+ DRM_ERROR("Could not find shader to bind.\n");
+ return PTR_ERR(res);
+ }
+
+ ret = vmw_resource_val_add(sw_context, res, &res_node);
+ if (ret) {
+ DRM_ERROR("Error creating resource validation node.\n");
+ goto out_unref;
+ }
+
+
+ ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
+ &cmd->body.mobid,
+ cmd->body.offsetInBytes);
+out_unref:
+ vmw_resource_unreference(&res);
+
+ return ret;
+}
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -1849,7 +3014,7 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
uint32_t size_remaining = *size;
uint32_t cmd_id;
- cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
+ cmd_id = ((uint32_t *)buf)[0];
switch (cmd_id) {
case SVGA_CMD_UPDATE:
*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
@@ -1980,7 +3145,7 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
false, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
+ VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
false, false, true),
@@ -2051,7 +3216,147 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
- true, false, true)
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
+ false, false, true),
+
+ /*
+ * DX commands
+ */
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
+ &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
+ &vmw_cmd_dx_set_shader_res, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
+ &vmw_cmd_dx_set_vertex_buffers, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
+ &vmw_cmd_dx_set_index_buffer, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
+ &vmw_cmd_dx_set_rendertargets, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
+ &vmw_cmd_ok, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
+ &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
+ &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
+ &vmw_cmd_dx_check_subresource, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
+ &vmw_cmd_dx_check_subresource, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
+ &vmw_cmd_dx_check_subresource, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
+ &vmw_cmd_dx_view_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
+ &vmw_cmd_dx_view_remove, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
+ &vmw_cmd_dx_view_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
+ &vmw_cmd_dx_view_remove, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
+ &vmw_cmd_dx_view_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
+ &vmw_cmd_dx_view_remove, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
+ &vmw_cmd_dx_so_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
+ &vmw_cmd_dx_so_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
+ &vmw_cmd_dx_so_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
+ &vmw_cmd_dx_so_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
+ &vmw_cmd_dx_so_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
+ &vmw_cmd_dx_define_shader, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
+ &vmw_cmd_dx_destroy_shader, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
+ &vmw_cmd_dx_bind_shader, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
+ &vmw_cmd_dx_so_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
+ &vmw_cmd_dx_set_so_targets, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
+ &vmw_cmd_buffer_copy_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
+ &vmw_cmd_pred_copy_check, true, false, true),
};
static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -2065,14 +3370,14 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
const struct vmw_cmd_entry *entry;
bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
- cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
+ cmd_id = ((uint32_t *)buf)[0];
/* Handle any none 3D commands */
if (unlikely(cmd_id < SVGA_CMD_MAX))
return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
- cmd_id = le32_to_cpu(header->id);
- *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
+ cmd_id = header->id;
+ *size = header->size + sizeof(SVGA3dCmdHeader);
cmd_id -= SVGA_3D_CMD_BASE;
if (unlikely(*size > size_remaining))
@@ -2184,7 +3489,8 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
*
* @list: The resource list.
*/
-static void vmw_resource_list_unreference(struct list_head *list)
+static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
+ struct list_head *list)
{
struct vmw_resource_val_node *val, *val_next;
@@ -2195,8 +3501,15 @@ static void vmw_resource_list_unreference(struct list_head *list)
list_for_each_entry_safe(val, val_next, list, head) {
list_del_init(&val->head);
vmw_resource_unreference(&val->res);
- if (unlikely(val->staged_bindings))
- kfree(val->staged_bindings);
+
+ if (val->staged_bindings) {
+ if (val->staged_bindings != sw_context->staged_bindings)
+ vmw_binding_state_free(val->staged_bindings);
+ else
+ sw_context->staged_bindings_inuse = false;
+ val->staged_bindings = NULL;
+ }
+
kfree(val);
}
}
@@ -2222,24 +3535,21 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context)
(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
}
-static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
- struct ttm_buffer_object *bo,
- bool validate_as_mob)
+int vmw_validate_single_buffer(struct vmw_private *dev_priv,
+ struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool validate_as_mob)
{
+ struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
+ base);
int ret;
-
- /*
- * Don't validate pinned buffers.
- */
-
- if (bo == dev_priv->pinned_bo ||
- (bo == dev_priv->dummy_query_bo &&
- dev_priv->dummy_query_bo_pinned))
+ if (vbo->pin_count > 0)
return 0;
if (validate_as_mob)
- return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
+ return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
+ false);
/**
* Put BO in VRAM if there is space, otherwise as a GMR.
@@ -2248,7 +3558,8 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* used as a GMR, this will return -ENOMEM.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
+ false);
if (likely(ret == 0 || ret == -ERESTARTSYS))
return ret;
@@ -2257,8 +3568,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* previous contents.
*/
- DRM_INFO("Falling through to VRAM.\n");
- ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
return ret;
}
@@ -2270,6 +3580,7 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv,
list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
+ true,
entry->validate_as_mob);
if (unlikely(ret != 0))
return ret;
@@ -2417,7 +3728,164 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
}
}
+/**
+ * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
+ * the fifo.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @kernel_commands: Pointer to the unpatched command batch.
+ * @command_size: Size of the unpatched command batch.
+ * @sw_context: Structure holding the relocation lists.
+ *
+ * Side effects: If this function returns 0, then the command batch
+ * pointed to by @kernel_commands will have been modified.
+ */
+static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
+ void *kernel_commands,
+ u32 command_size,
+ struct vmw_sw_context *sw_context)
+{
+ void *cmd;
+
+ if (sw_context->dx_ctx_node)
+ cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
+ sw_context->dx_ctx_node->res->id);
+ else
+ cmd = vmw_fifo_reserve(dev_priv, command_size);
+ if (!cmd) {
+ DRM_ERROR("Failed reserving fifo space for commands.\n");
+ return -ENOMEM;
+ }
+
+ vmw_apply_relocations(sw_context);
+ memcpy(cmd, kernel_commands, command_size);
+ vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+ vmw_resource_relocations_free(&sw_context->res_relocations);
+ vmw_fifo_commit(dev_priv, command_size);
+
+ return 0;
+}
+
+/**
+ * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
+ * the command buffer manager.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @header: Opaque handle to the command buffer allocation.
+ * @command_size: Size of the unpatched command batch.
+ * @sw_context: Structure holding the relocation lists.
+ *
+ * Side effects: If this function returns 0, then the command buffer
+ * represented by @header will have been modified.
+ */
+static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
+ struct vmw_cmdbuf_header *header,
+ u32 command_size,
+ struct vmw_sw_context *sw_context)
+{
+ u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
+ SVGA3D_INVALID_ID);
+ void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
+ id, false, header);
+
+ vmw_apply_relocations(sw_context);
+ vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+ vmw_resource_relocations_free(&sw_context->res_relocations);
+ vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
+
+ return 0;
+}
+
+/**
+ * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
+ * submission using a command buffer.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @user_commands: User-space pointer to the commands to be submitted.
+ * @command_size: Size of the unpatched command batch.
+ * @header: Out parameter returning the opaque pointer to the command buffer.
+ *
+ * This function checks whether we can use the command buffer manager for
+ * submission and if so, creates a command buffer of suitable size and
+ * copies the user data into that buffer.
+ *
+ * On successful return, the function returns a pointer to the data in the
+ * command buffer and *@header is set to non-NULL.
+ * If command buffers could not be used, the function will return the value
+ * of @kernel_commands on function call. That value may be NULL. In that case,
+ * the value of *@header will be set to NULL.
+ * If an error is encountered, the function will return a pointer error value.
+ * If the function is interrupted by a signal while sleeping, it will return
+ * -ERESTARTSYS casted to a pointer error value.
+ */
+static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
+ void __user *user_commands,
+ void *kernel_commands,
+ u32 command_size,
+ struct vmw_cmdbuf_header **header)
+{
+ size_t cmdbuf_size;
+ int ret;
+
+ *header = NULL;
+ if (!dev_priv->cman || kernel_commands)
+ return kernel_commands;
+
+ if (command_size > SVGA_CB_MAX_SIZE) {
+ DRM_ERROR("Command buffer is too large.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* If possible, add a little space for fencing. */
+ cmdbuf_size = command_size + 512;
+ cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
+ kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
+ true, header);
+ if (IS_ERR(kernel_commands))
+ return kernel_commands;
+
+ ret = copy_from_user(kernel_commands, user_commands,
+ command_size);
+ if (ret) {
+ DRM_ERROR("Failed copying commands.\n");
+ vmw_cmdbuf_header_free(*header);
+ *header = NULL;
+ return ERR_PTR(-EFAULT);
+ }
+
+ return kernel_commands;
+}
+
+static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ uint32_t handle)
+{
+ struct vmw_resource_val_node *ctx_node;
+ struct vmw_resource *res;
+ int ret;
+
+ if (handle == SVGA3D_INVALID_ID)
+ return 0;
+ ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
+ handle, user_context_converter,
+ &res);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not find or user DX context 0x%08x.\n",
+ (unsigned) handle);
+ return ret;
+ }
+
+ ret = vmw_resource_val_add(sw_context, res, &ctx_node);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ sw_context->dx_ctx_node = ctx_node;
+ sw_context->man = vmw_context_res_man(res);
+out_err:
+ vmw_resource_unreference(&res);
+ return ret;
+}
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
@@ -2425,6 +3893,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
void *kernel_commands,
uint32_t command_size,
uint64_t throttle_us,
+ uint32_t dx_context_handle,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj **out_fence)
{
@@ -2432,18 +3901,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_fence_obj *fence = NULL;
struct vmw_resource *error_resource;
struct list_head resource_list;
+ struct vmw_cmdbuf_header *header;
struct ww_acquire_ctx ticket;
uint32_t handle;
- void *cmd;
int ret;
+ if (throttle_us) {
+ ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
+ throttle_us);
+
+ if (ret)
+ return ret;
+ }
+
+ kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
+ kernel_commands, command_size,
+ &header);
+ if (IS_ERR(kernel_commands))
+ return PTR_ERR(kernel_commands);
+
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
- if (unlikely(ret != 0))
- return -ERESTARTSYS;
+ if (ret) {
+ ret = -ERESTARTSYS;
+ goto out_free_header;
+ }
+ sw_context->kernel = false;
if (kernel_commands == NULL) {
- sw_context->kernel = false;
-
ret = vmw_resize_cmd_bounce(sw_context, command_size);
if (unlikely(ret != 0))
goto out_unlock;
@@ -2458,19 +3942,26 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_unlock;
}
kernel_commands = sw_context->cmd_bounce;
- } else
+ } else if (!header)
sw_context->kernel = true;
sw_context->fp = vmw_fpriv(file_priv);
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
INIT_LIST_HEAD(&sw_context->resource_list);
+ INIT_LIST_HEAD(&sw_context->ctx_resource_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
sw_context->last_query_ctx = NULL;
sw_context->needs_post_query_barrier = false;
+ sw_context->dx_ctx_node = NULL;
+ sw_context->dx_query_mob = NULL;
+ sw_context->dx_query_ctx = NULL;
memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
INIT_LIST_HEAD(&sw_context->validate_nodes);
INIT_LIST_HEAD(&sw_context->res_relocations);
+ if (sw_context->staged_bindings)
+ vmw_binding_state_reset(sw_context->staged_bindings);
+
if (!sw_context->res_ht_initialized) {
ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
if (unlikely(ret != 0))
@@ -2478,10 +3969,24 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->res_ht_initialized = true;
}
INIT_LIST_HEAD(&sw_context->staged_cmd_res);
-
INIT_LIST_HEAD(&resource_list);
+ ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
+ if (unlikely(ret != 0)) {
+ list_splice_init(&sw_context->ctx_resource_list,
+ &sw_context->resource_list);
+ goto out_err_nores;
+ }
+
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
+ /*
+ * Merge the resource lists before checking the return status
+ * from vmd_cmd_check_all so that all the open hashtabs will
+ * be handled properly even if vmw_cmd_check_all fails.
+ */
+ list_splice_init(&sw_context->ctx_resource_list,
+ &sw_context->resource_list);
+
if (unlikely(ret != 0))
goto out_err_nores;
@@ -2492,7 +3997,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
true, NULL);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = vmw_validate_buffers(dev_priv, sw_context);
if (unlikely(ret != 0))
@@ -2502,14 +4007,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0))
goto out_err;
- if (throttle_us) {
- ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
- throttle_us);
-
- if (unlikely(ret != 0))
- goto out_err;
- }
-
ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
if (unlikely(ret != 0)) {
ret = -ERESTARTSYS;
@@ -2522,20 +4019,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_unlock_binding;
}
- cmd = vmw_fifo_reserve(dev_priv, command_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving fifo space for commands.\n");
- ret = -ENOMEM;
- goto out_unlock_binding;
+ if (!header) {
+ ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
+ command_size, sw_context);
+ } else {
+ ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
+ sw_context);
+ header = NULL;
}
-
- vmw_apply_relocations(sw_context);
- memcpy(cmd, kernel_commands, command_size);
-
- vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
- vmw_resource_relocations_free(&sw_context->res_relocations);
-
- vmw_fifo_commit(dev_priv, command_size);
+ mutex_unlock(&dev_priv->binding_mutex);
+ if (ret)
+ goto out_err;
vmw_query_bo_switch_commit(dev_priv, sw_context);
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2550,8 +4044,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n");
- vmw_resource_list_unreserve(&sw_context->resource_list, false);
- mutex_unlock(&dev_priv->binding_mutex);
+ vmw_resources_unreserve(sw_context, false);
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence);
@@ -2580,7 +4073,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths.
*/
- vmw_resource_list_unreference(&resource_list);
+ vmw_resource_list_unreference(sw_context, &resource_list);
return 0;
@@ -2589,7 +4082,7 @@ out_unlock_binding:
out_err:
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
out_err_nores:
- vmw_resource_list_unreserve(&sw_context->resource_list, true);
+ vmw_resources_unreserve(sw_context, true);
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
vmw_clear_validations(sw_context);
@@ -2607,9 +4100,12 @@ out_unlock:
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths.
*/
- vmw_resource_list_unreference(&resource_list);
+ vmw_resource_list_unreference(sw_context, &resource_list);
if (unlikely(error_resource != NULL))
vmw_resource_unreference(&error_resource);
+out_free_header:
+ if (header)
+ vmw_cmdbuf_header_free(header);
return ret;
}
@@ -2628,9 +4124,11 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
- vmw_bo_pin(dev_priv->pinned_bo, false);
- vmw_bo_pin(dev_priv->dummy_query_bo, false);
- dev_priv->dummy_query_bo_pinned = false;
+ vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+ if (dev_priv->dummy_query_bo_pinned) {
+ vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
+ dev_priv->dummy_query_bo_pinned = false;
+ }
}
@@ -2672,11 +4170,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
INIT_LIST_HEAD(&validate_list);
- pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
+ pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
pinned_val.shared = false;
list_add_tail(&pinned_val.head, &validate_list);
- query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
+ query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
query_val.shared = false;
list_add_tail(&query_val.head, &validate_list);
@@ -2697,10 +4195,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
dev_priv->query_cid_valid = false;
}
- vmw_bo_pin(dev_priv->pinned_bo, false);
- vmw_bo_pin(dev_priv->dummy_query_bo, false);
- dev_priv->dummy_query_bo_pinned = false;
-
+ vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+ if (dev_priv->dummy_query_bo_pinned) {
+ vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
+ dev_priv->dummy_query_bo_pinned = false;
+ }
if (fence == NULL) {
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
NULL);
@@ -2712,7 +4211,9 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
- ttm_bo_unref(&dev_priv->pinned_bo);
+ vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+ DRM_INFO("Dummy query bo pin count: %d\n",
+ dev_priv->dummy_query_bo->pin_count);
out_unlock:
return;
@@ -2722,7 +4223,7 @@ out_no_emit:
out_no_reserve:
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
- ttm_bo_unref(&dev_priv->pinned_bo);
+ vmw_dmabuf_unreference(&dev_priv->pinned_bo);
}
/**
@@ -2751,36 +4252,68 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
-
-int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
+ struct drm_file *file_priv, size_t size)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
+ struct drm_vmw_execbuf_arg arg;
int ret;
+ static const size_t copy_offset[] = {
+ offsetof(struct drm_vmw_execbuf_arg, context_handle),
+ sizeof(struct drm_vmw_execbuf_arg)};
+
+ if (unlikely(size < copy_offset[0])) {
+ DRM_ERROR("Invalid command size, ioctl %d\n",
+ DRM_VMW_EXECBUF);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
+ return -EFAULT;
/*
- * This will allow us to extend the ioctl argument while
+ * Extend the ioctl argument while
* maintaining backwards compatibility:
* We take different code paths depending on the value of
- * arg->version.
+ * arg.version.
*/
- if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
+ if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
+ arg.version == 0)) {
DRM_ERROR("Incorrect execbuf version.\n");
- DRM_ERROR("You're running outdated experimental "
- "vmwgfx user-space drivers.");
return -EINVAL;
}
+ if (arg.version > 1 &&
+ copy_from_user(&arg.context_handle,
+ (void __user *) (data + copy_offset[0]),
+ copy_offset[arg.version - 1] -
+ copy_offset[0]) != 0)
+ return -EFAULT;
+
+ switch (arg.version) {
+ case 1:
+ arg.context_handle = (uint32_t) -1;
+ break;
+ case 2:
+ if (arg.pad64 != 0) {
+ DRM_ERROR("Unused IOCTL data not set to zero.\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ break;
+ }
+
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
ret = vmw_execbuf_process(file_priv, dev_priv,
- (void __user *)(unsigned long)arg->commands,
- NULL, arg->command_size, arg->throttle_us,
- (void __user *)(unsigned long)arg->fence_rep,
+ (void __user *)(unsigned long)arg.commands,
+ NULL, arg.command_size, arg.throttle_us,
+ arg.context_handle,
+ (void __user *)(unsigned long)arg.fence_rep,
NULL);
ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 0a474f391fad..3b1faf7862a5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -1,7 +1,7 @@
/**************************************************************************
*
* Copyright © 2007 David Airlie
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -30,6 +30,7 @@
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
+#include "vmwgfx_kms.h"
#include <drm/ttm/ttm_placement.h>
@@ -40,21 +41,22 @@ struct vmw_fb_par {
void *vmalloc;
+ struct mutex bo_mutex;
struct vmw_dma_buffer *vmw_bo;
struct ttm_bo_kmap_obj map;
+ void *bo_ptr;
+ unsigned bo_size;
+ struct drm_framebuffer *set_fb;
+ struct drm_display_mode *set_mode;
+ u32 fb_x;
+ u32 fb_y;
+ bool bo_iowrite;
u32 pseudo_palette[17];
- unsigned depth;
- unsigned bpp;
-
unsigned max_width;
unsigned max_height;
- void *bo_ptr;
- unsigned bo_size;
- bool bo_iowrite;
-
struct {
spinlock_t lock;
bool active;
@@ -63,6 +65,10 @@ struct vmw_fb_par {
unsigned x2;
unsigned y2;
} dirty;
+
+ struct drm_crtc *crtc;
+ struct drm_connector *con;
+ struct delayed_work local_work;
};
static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
@@ -77,7 +83,7 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 1;
}
- switch (par->depth) {
+ switch (par->set_fb->depth) {
case 24:
case 32:
pal[regno] = ((red & 0xff00) << 8) |
@@ -85,7 +91,8 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
((blue & 0xff00) >> 8);
break;
default:
- DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
+ DRM_ERROR("Bad depth %u, bpp %u.\n", par->set_fb->depth,
+ par->set_fb->bits_per_pixel);
return 1;
}
@@ -134,12 +141,6 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
- if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
- (var->xoffset != 0 || var->yoffset != 0)) {
- DRM_ERROR("Can not handle panning without display topology\n");
- return -EINVAL;
- }
-
if ((var->xoffset + var->xres) > par->max_width ||
(var->yoffset + var->yres) > par->max_height) {
DRM_ERROR("Requested geom can not fit in framebuffer\n");
@@ -156,46 +157,6 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
return 0;
}
-static int vmw_fb_set_par(struct fb_info *info)
-{
- struct vmw_fb_par *par = info->par;
- struct vmw_private *vmw_priv = par->vmw_priv;
- int ret;
-
- info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
-
- ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
- info->fix.line_length,
- par->bpp, par->depth);
- if (ret)
- return ret;
-
- if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
- /* TODO check if pitch and offset changes */
- vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
- }
-
- /* This is really helpful since if this fails the user
- * can probably not see anything on the screen.
- */
- WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
-
- return 0;
-}
-
-static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- return 0;
-}
-
static int vmw_fb_blank(int blank, struct fb_info *info)
{
return 0;
@@ -205,65 +166,89 @@ static int vmw_fb_blank(int blank, struct fb_info *info)
* Dirty code
*/
-static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
+static void vmw_fb_dirty_flush(struct work_struct *work)
{
+ struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
+ local_work.work);
struct vmw_private *vmw_priv = par->vmw_priv;
struct fb_info *info = vmw_priv->fb_info;
- int stride = (info->fix.line_length / 4);
- int *src = (int *)info->screen_base;
- __le32 __iomem *vram_mem = par->bo_ptr;
- unsigned long flags;
- unsigned x, y, w, h;
- int i, k;
- struct {
- uint32_t header;
- SVGAFifoCmdUpdate body;
- } *cmd;
+ unsigned long irq_flags;
+ s32 dst_x1, dst_x2, dst_y1, dst_y2, w, h;
+ u32 cpp, max_x, max_y;
+ struct drm_clip_rect clip;
+ struct drm_framebuffer *cur_fb;
+ u8 *src_ptr, *dst_ptr;
if (vmw_priv->suspended)
return;
- spin_lock_irqsave(&par->dirty.lock, flags);
- if (!par->dirty.active) {
- spin_unlock_irqrestore(&par->dirty.lock, flags);
- return;
- }
- x = par->dirty.x1;
- y = par->dirty.y1;
- w = min(par->dirty.x2, info->var.xres) - x;
- h = min(par->dirty.y2, info->var.yres) - y;
- par->dirty.x1 = par->dirty.x2 = 0;
- par->dirty.y1 = par->dirty.y2 = 0;
- spin_unlock_irqrestore(&par->dirty.lock, flags);
+ mutex_lock(&par->bo_mutex);
+ cur_fb = par->set_fb;
+ if (!cur_fb)
+ goto out_unlock;
- for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
- for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
- iowrite32(src[k], vram_mem + k);
+ spin_lock_irqsave(&par->dirty.lock, irq_flags);
+ if (!par->dirty.active) {
+ spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
+ goto out_unlock;
}
-#if 0
- DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
-#endif
+ /*
+ * Handle panning when copying from vmalloc to framebuffer.
+ * Clip dirty area to framebuffer.
+ */
+ cpp = (cur_fb->bits_per_pixel + 7) / 8;
+ max_x = par->fb_x + cur_fb->width;
+ max_y = par->fb_y + cur_fb->height;
+
+ dst_x1 = par->dirty.x1 - par->fb_x;
+ dst_y1 = par->dirty.y1 - par->fb_y;
+ dst_x1 = max_t(s32, dst_x1, 0);
+ dst_y1 = max_t(s32, dst_y1, 0);
+
+ dst_x2 = par->dirty.x2 - par->fb_x;
+ dst_y2 = par->dirty.y2 - par->fb_y;
+ dst_x2 = min_t(s32, dst_x2, max_x);
+ dst_y2 = min_t(s32, dst_y2, max_y);
+ w = dst_x2 - dst_x1;
+ h = dst_y2 - dst_y1;
+ w = max_t(s32, 0, w);
+ h = max_t(s32, 0, h);
- cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
- return;
+ par->dirty.x1 = par->dirty.x2 = 0;
+ par->dirty.y1 = par->dirty.y2 = 0;
+ spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
+
+ if (w && h) {
+ dst_ptr = (u8 *)par->bo_ptr +
+ (dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
+ src_ptr = (u8 *)par->vmalloc +
+ ((dst_y1 + par->fb_y) * info->fix.line_length +
+ (dst_x1 + par->fb_x) * cpp);
+
+ while (h-- > 0) {
+ memcpy(dst_ptr, src_ptr, w*cpp);
+ dst_ptr += par->set_fb->pitches[0];
+ src_ptr += info->fix.line_length;
+ }
+
+ clip.x1 = dst_x1;
+ clip.x2 = dst_x2;
+ clip.y1 = dst_y1;
+ clip.y2 = dst_y2;
+
+ WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
+ &clip, 1));
+ vmw_fifo_flush(vmw_priv, false);
}
-
- cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
- cmd->body.x = cpu_to_le32(x);
- cmd->body.y = cpu_to_le32(y);
- cmd->body.width = cpu_to_le32(w);
- cmd->body.height = cpu_to_le32(h);
- vmw_fifo_commit(vmw_priv, sizeof(*cmd));
+out_unlock:
+ mutex_unlock(&par->bo_mutex);
}
static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
unsigned x1, unsigned y1,
unsigned width, unsigned height)
{
- struct fb_info *info = par->vmw_priv->fb_info;
unsigned long flags;
unsigned x2 = x1 + width;
unsigned y2 = y1 + height;
@@ -277,7 +262,8 @@ static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
/* if we are active start the dirty work
* we share the work with the defio system */
if (par->dirty.active)
- schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
+ schedule_delayed_work(&par->local_work,
+ VMW_DIRTY_DELAY);
} else {
if (x1 < par->dirty.x1)
par->dirty.x1 = x1;
@@ -291,6 +277,28 @@ static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
spin_unlock_irqrestore(&par->dirty.lock, flags);
}
+static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct vmw_fb_par *par = info->par;
+
+ if ((var->xoffset + var->xres) > var->xres_virtual ||
+ (var->yoffset + var->yres) > var->yres_virtual) {
+ DRM_ERROR("Requested panning can not fit in framebuffer\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&par->bo_mutex);
+ par->fb_x = var->xoffset;
+ par->fb_y = var->yoffset;
+ if (par->set_fb)
+ vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
+ par->set_fb->height);
+ mutex_unlock(&par->bo_mutex);
+
+ return 0;
+}
+
static void vmw_deferred_io(struct fb_info *info,
struct list_head *pagelist)
{
@@ -319,12 +327,17 @@ static void vmw_deferred_io(struct fb_info *info,
par->dirty.x2 = info->var.xres;
par->dirty.y2 = y2;
spin_unlock_irqrestore(&par->dirty.lock, flags);
- }
- vmw_fb_dirty_flush(par);
+ /*
+ * Since we've already waited on this work once, try to
+ * execute asap.
+ */
+ cancel_delayed_work(&par->local_work);
+ schedule_delayed_work(&par->local_work, 0);
+ }
};
-struct fb_deferred_io vmw_defio = {
+static struct fb_deferred_io vmw_defio = {
.delay = VMW_DIRTY_DELAY,
.deferred_io = vmw_deferred_io,
};
@@ -358,33 +371,12 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
* Bring up code
*/
-static struct fb_ops vmw_fb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = vmw_fb_check_var,
- .fb_set_par = vmw_fb_set_par,
- .fb_setcolreg = vmw_fb_setcolreg,
- .fb_fillrect = vmw_fb_fillrect,
- .fb_copyarea = vmw_fb_copyarea,
- .fb_imageblit = vmw_fb_imageblit,
- .fb_pan_display = vmw_fb_pan_display,
- .fb_blank = vmw_fb_blank,
-};
-
static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
size_t size, struct vmw_dma_buffer **out)
{
struct vmw_dma_buffer *vmw_bo;
- struct ttm_place ne_place = vmw_vram_ne_placement.placement[0];
- struct ttm_placement ne_placement;
int ret;
- ne_placement.num_placement = 1;
- ne_placement.placement = &ne_place;
- ne_placement.num_busy_placement = 1;
- ne_placement.busy_placement = &ne_place;
-
- ne_place.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
@@ -394,31 +386,261 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
}
ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
- &ne_placement,
+ &vmw_sys_placement,
false,
&vmw_dmabuf_bo_free);
if (unlikely(ret != 0))
goto err_unlock; /* init frees the buffer on failure */
*out = vmw_bo;
-
- ttm_write_unlock(&vmw_priv->fbdev_master.lock);
+ ttm_write_unlock(&vmw_priv->reservation_sem);
return 0;
err_unlock:
- ttm_write_unlock(&vmw_priv->fbdev_master.lock);
+ ttm_write_unlock(&vmw_priv->reservation_sem);
return ret;
}
+static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
+ int *depth)
+{
+ switch (var->bits_per_pixel) {
+ case 32:
+ *depth = (var->transp.length > 0) ? 32 : 24;
+ break;
+ default:
+ DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vmw_fb_kms_detach(struct vmw_fb_par *par,
+ bool detach_bo,
+ bool unref_bo)
+{
+ struct drm_framebuffer *cur_fb = par->set_fb;
+ int ret;
+
+ /* Detach the KMS framebuffer from crtcs */
+ if (par->set_mode) {
+ struct drm_mode_set set;
+
+ set.crtc = par->crtc;
+ set.x = 0;
+ set.y = 0;
+ set.mode = NULL;
+ set.fb = NULL;
+ set.num_connectors = 1;
+ set.connectors = &par->con;
+ ret = drm_mode_set_config_internal(&set);
+ if (ret) {
+ DRM_ERROR("Could not unset a mode.\n");
+ return ret;
+ }
+ drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
+ par->set_mode = NULL;
+ }
+
+ if (cur_fb) {
+ drm_framebuffer_unreference(cur_fb);
+ par->set_fb = NULL;
+ }
+
+ if (par->vmw_bo && detach_bo) {
+ if (par->bo_ptr) {
+ ttm_bo_kunmap(&par->map);
+ par->bo_ptr = NULL;
+ }
+ if (unref_bo)
+ vmw_dmabuf_unreference(&par->vmw_bo);
+ else
+ vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false);
+ }
+
+ return 0;
+}
+
+static int vmw_fb_kms_framebuffer(struct fb_info *info)
+{
+ struct drm_mode_fb_cmd mode_cmd;
+ struct vmw_fb_par *par = info->par;
+ struct fb_var_screeninfo *var = &info->var;
+ struct drm_framebuffer *cur_fb;
+ struct vmw_framebuffer *vfb;
+ int ret = 0;
+ size_t new_bo_size;
+
+ ret = vmw_fb_compute_depth(var, &mode_cmd.depth);
+ if (ret)
+ return ret;
+
+ mode_cmd.width = var->xres;
+ mode_cmd.height = var->yres;
+ mode_cmd.bpp = var->bits_per_pixel;
+ mode_cmd.pitch = ((mode_cmd.bpp + 7) / 8) * mode_cmd.width;
+
+ cur_fb = par->set_fb;
+ if (cur_fb && cur_fb->width == mode_cmd.width &&
+ cur_fb->height == mode_cmd.height &&
+ cur_fb->bits_per_pixel == mode_cmd.bpp &&
+ cur_fb->depth == mode_cmd.depth &&
+ cur_fb->pitches[0] == mode_cmd.pitch)
+ return 0;
+
+ /* Need new buffer object ? */
+ new_bo_size = (size_t) mode_cmd.pitch * (size_t) mode_cmd.height;
+ ret = vmw_fb_kms_detach(par,
+ par->bo_size < new_bo_size ||
+ par->bo_size > 2*new_bo_size,
+ true);
+ if (ret)
+ return ret;
+
+ if (!par->vmw_bo) {
+ ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
+ &par->vmw_bo);
+ if (ret) {
+ DRM_ERROR("Failed creating a buffer object for "
+ "fbdev.\n");
+ return ret;
+ }
+ par->bo_size = new_bo_size;
+ }
+
+ vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
+ true, &mode_cmd);
+ if (IS_ERR(vfb))
+ return PTR_ERR(vfb);
+
+ par->set_fb = &vfb->base;
+
+ if (!par->bo_ptr) {
+ /*
+ * Pin before mapping. Since we don't know in what placement
+ * to pin, call into KMS to do it for us.
+ */
+ ret = vfb->pin(vfb);
+ if (ret) {
+ DRM_ERROR("Could not pin the fbdev framebuffer.\n");
+ return ret;
+ }
+
+ ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
+ par->vmw_bo->base.num_pages, &par->map);
+ if (ret) {
+ vfb->unpin(vfb);
+ DRM_ERROR("Could not map the fbdev framebuffer.\n");
+ return ret;
+ }
+
+ par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
+ }
+
+ return 0;
+}
+
+static int vmw_fb_set_par(struct fb_info *info)
+{
+ struct vmw_fb_par *par = info->par;
+ struct vmw_private *vmw_priv = par->vmw_priv;
+ struct drm_mode_set set;
+ struct fb_var_screeninfo *var = &info->var;
+ struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
+ DRM_MODE_TYPE_DRIVER,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+ };
+ struct drm_display_mode *old_mode;
+ struct drm_display_mode *mode;
+ int ret;
+
+ old_mode = par->set_mode;
+ mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
+ if (!mode) {
+ DRM_ERROR("Could not create new fb mode.\n");
+ return -ENOMEM;
+ }
+
+ mode->hdisplay = var->xres;
+ mode->vdisplay = var->yres;
+ vmw_guess_mode_timing(mode);
+
+ if (old_mode && drm_mode_equal(old_mode, mode)) {
+ drm_mode_destroy(vmw_priv->dev, mode);
+ mode = old_mode;
+ old_mode = NULL;
+ } else if (!vmw_kms_validate_mode_vram(vmw_priv,
+ mode->hdisplay *
+ (var->bits_per_pixel + 7) / 8,
+ mode->vdisplay)) {
+ drm_mode_destroy(vmw_priv->dev, mode);
+ return -EINVAL;
+ }
+
+ mutex_lock(&par->bo_mutex);
+ drm_modeset_lock_all(vmw_priv->dev);
+ ret = vmw_fb_kms_framebuffer(info);
+ if (ret)
+ goto out_unlock;
+
+ par->fb_x = var->xoffset;
+ par->fb_y = var->yoffset;
+
+ set.crtc = par->crtc;
+ set.x = 0;
+ set.y = 0;
+ set.mode = mode;
+ set.fb = par->set_fb;
+ set.num_connectors = 1;
+ set.connectors = &par->con;
+
+ ret = drm_mode_set_config_internal(&set);
+ if (ret)
+ goto out_unlock;
+
+ vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
+ par->set_fb->width, par->set_fb->height);
+
+ /* If there already was stuff dirty we wont
+ * schedule a new work, so lets do it now */
+
+ schedule_delayed_work(&par->local_work, 0);
+
+out_unlock:
+ if (old_mode)
+ drm_mode_destroy(vmw_priv->dev, old_mode);
+ par->set_mode = mode;
+
+ drm_modeset_unlock_all(vmw_priv->dev);
+ mutex_unlock(&par->bo_mutex);
+
+ return ret;
+}
+
+
+static struct fb_ops vmw_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = vmw_fb_check_var,
+ .fb_set_par = vmw_fb_set_par,
+ .fb_setcolreg = vmw_fb_setcolreg,
+ .fb_fillrect = vmw_fb_fillrect,
+ .fb_copyarea = vmw_fb_copyarea,
+ .fb_imageblit = vmw_fb_imageblit,
+ .fb_pan_display = vmw_fb_pan_display,
+ .fb_blank = vmw_fb_blank,
+};
+
int vmw_fb_init(struct vmw_private *vmw_priv)
{
struct device *device = &vmw_priv->dev->pdev->dev;
struct vmw_fb_par *par;
struct fb_info *info;
- unsigned initial_width, initial_height;
unsigned fb_width, fb_height;
unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
+ struct drm_display_mode *init_mode;
int ret;
fb_bpp = 32;
@@ -428,9 +650,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
- initial_width = min(vmw_priv->initial_width, fb_width);
- initial_height = min(vmw_priv->initial_height, fb_height);
-
fb_pitch = fb_width * fb_bpp / 8;
fb_size = fb_pitch * fb_height;
fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
@@ -444,35 +663,35 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
*/
vmw_priv->fb_info = info;
par = info->par;
+ memset(par, 0, sizeof(*par));
+ INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
par->vmw_priv = vmw_priv;
- par->depth = fb_depth;
- par->bpp = fb_bpp;
par->vmalloc = NULL;
par->max_width = fb_width;
par->max_height = fb_height;
+ drm_modeset_lock_all(vmw_priv->dev);
+ ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
+ par->max_height, &par->con,
+ &par->crtc, &init_mode);
+ if (ret) {
+ drm_modeset_unlock_all(vmw_priv->dev);
+ goto err_kms;
+ }
+
+ info->var.xres = init_mode->hdisplay;
+ info->var.yres = init_mode->vdisplay;
+ drm_modeset_unlock_all(vmw_priv->dev);
+
/*
* Create buffers and alloc memory
*/
- par->vmalloc = vmalloc(fb_size);
+ par->vmalloc = vzalloc(fb_size);
if (unlikely(par->vmalloc == NULL)) {
ret = -ENOMEM;
goto err_free;
}
- ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
- if (unlikely(ret != 0))
- goto err_free;
-
- ret = ttm_bo_kmap(&par->vmw_bo->base,
- 0,
- par->vmw_bo->base.num_pages,
- &par->map);
- if (unlikely(ret != 0))
- goto err_unref;
- par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
- par->bo_size = fb_size;
-
/*
* Fixed and var
*/
@@ -490,7 +709,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
info->fix.smem_len = fb_size;
info->pseudo_palette = par->pseudo_palette;
- info->screen_base = par->vmalloc;
+ info->screen_base = (char __iomem *)par->vmalloc;
info->screen_size = fb_size;
info->flags = FBINFO_DEFAULT;
@@ -508,18 +727,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
info->var.xres_virtual = fb_width;
info->var.yres_virtual = fb_height;
- info->var.bits_per_pixel = par->bpp;
+ info->var.bits_per_pixel = fb_bpp;
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
info->var.height = -1;
info->var.width = -1;
- info->var.xres = initial_width;
- info->var.yres = initial_height;
-
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
-
info->apertures = alloc_apertures(1);
if (!info->apertures) {
ret = -ENOMEM;
@@ -535,6 +750,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
par->dirty.y1 = par->dirty.y2 = 0;
par->dirty.active = true;
spin_lock_init(&par->dirty.lock);
+ mutex_init(&par->bo_mutex);
info->fbdefio = &vmw_defio;
fb_deferred_io_init(info);
@@ -542,16 +758,16 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
if (unlikely(ret != 0))
goto err_defio;
+ vmw_fb_set_par(info);
+
return 0;
err_defio:
fb_deferred_io_cleanup(info);
err_aper:
- ttm_bo_kunmap(&par->map);
-err_unref:
- ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
err_free:
vfree(par->vmalloc);
+err_kms:
framebuffer_release(info);
vmw_priv->fb_info = NULL;
@@ -562,22 +778,19 @@ int vmw_fb_close(struct vmw_private *vmw_priv)
{
struct fb_info *info;
struct vmw_fb_par *par;
- struct ttm_buffer_object *bo;
if (!vmw_priv->fb_info)
return 0;
info = vmw_priv->fb_info;
par = info->par;
- bo = &par->vmw_bo->base;
- par->vmw_bo = NULL;
/* ??? order */
fb_deferred_io_cleanup(info);
+ cancel_delayed_work_sync(&par->local_work);
unregister_framebuffer(info);
- ttm_bo_kunmap(&par->map);
- ttm_bo_unref(&bo);
+ (void) vmw_fb_kms_detach(par, true, true);
vfree(par->vmalloc);
framebuffer_release(info);
@@ -602,11 +815,11 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
spin_unlock_irqrestore(&par->dirty.lock, flags);
flush_delayed_work(&info->deferred_work);
+ flush_delayed_work(&par->local_work);
- par->bo_ptr = NULL;
- ttm_bo_kunmap(&par->map);
-
- vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
+ mutex_lock(&par->bo_mutex);
+ (void) vmw_fb_kms_detach(par, true, false);
+ mutex_unlock(&par->bo_mutex);
return 0;
}
@@ -616,8 +829,6 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
struct fb_info *info;
struct vmw_fb_par *par;
unsigned long flags;
- bool dummy;
- int ret;
if (!vmw_priv->fb_info)
return -EINVAL;
@@ -625,38 +836,10 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
info = vmw_priv->fb_info;
par = info->par;
- /* we are already active */
- if (par->bo_ptr != NULL)
- return 0;
-
- /* Make sure that all overlays are stoped when we take over */
- vmw_overlay_stop_all(vmw_priv);
-
- ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
- if (unlikely(ret != 0)) {
- DRM_ERROR("could not move buffer to start of VRAM\n");
- goto err_no_buffer;
- }
-
- ret = ttm_bo_kmap(&par->vmw_bo->base,
- 0,
- par->vmw_bo->base.num_pages,
- &par->map);
- BUG_ON(ret != 0);
- par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
-
+ vmw_fb_set_par(info);
spin_lock_irqsave(&par->dirty.lock, flags);
par->dirty.active = true;
spin_unlock_irqrestore(&par->dirty.lock, flags);
-
-err_no_buffer:
- vmw_fb_set_par(info);
-
- vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
-
- /* If there already was stuff dirty we wont
- * schedule a new work, so lets do it now */
- schedule_delayed_work(&info->deferred_work, 0);
-
+
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 945f1e0dad92..567ddede51d1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -142,7 +142,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
struct vmw_fence_manager *fman = fman_from_fence(fence);
struct vmw_private *dev_priv = fman->dev_priv;
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
return false;
@@ -386,7 +386,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
u32 passed_seqno)
{
u32 goal_seqno;
- __le32 __iomem *fifo_mem;
+ u32 __iomem *fifo_mem;
struct vmw_fence_obj *fence;
if (likely(!fman->seqno_valid))
@@ -430,7 +430,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
{
struct vmw_fence_manager *fman = fman_from_fence(fence);
u32 goal_seqno;
- __le32 __iomem *fifo_mem;
+ u32 __iomem *fifo_mem;
if (fence_is_signaled_locked(&fence->base))
return false;
@@ -453,7 +453,7 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
struct list_head action_list;
bool needs_rerun;
uint32_t seqno, new_seqno;
- __le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
rerun:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 26a4add39208..8be6c29f5eb5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2012 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 39f2b03888e7..80c40c31d4f8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,9 +29,14 @@
#include <drm/drmP.h>
#include <drm/ttm/ttm_placement.h>
+struct vmw_temp_set_context {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXTempSetContext body;
+};
+
bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
@@ -71,8 +76,8 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (hwversion < SVGA3D_HWVERSION_WS8_B1)
return false;
- /* Non-Screen Object path does not support surfaces */
- if (!dev_priv->sou_priv)
+ /* Legacy Display Unit does not support surfaces */
+ if (dev_priv->active_display_unit == vmw_du_legacy)
return false;
return true;
@@ -80,7 +85,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t caps;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
@@ -95,11 +100,11 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max;
uint32_t min;
- uint32_t dummy;
+ fifo->dx = false;
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
fifo->static_buffer = vmalloc(fifo->static_buffer_size);
if (unlikely(fifo->static_buffer == NULL))
@@ -112,10 +117,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
mutex_init(&fifo->fifo_mutex);
init_rwsem(&fifo->rwsem);
- /*
- * Allow mapping the first page read-only to user-space.
- */
-
DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
@@ -123,7 +124,10 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
- vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
+
+ vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
+ SVGA_REG_ENABLE_HIDE);
+ vmw_write(dev_priv, SVGA_REG_TRACES, 0);
min = 4;
if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
@@ -155,12 +159,13 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
vmw_marker_queue_init(&fifo->marker_queue);
- return vmw_fifo_send_fence(dev_priv, &dummy);
+
+ return 0;
}
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
static DEFINE_SPINLOCK(ping_lock);
unsigned long irq_flags;
@@ -178,7 +183,7 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
@@ -208,7 +213,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
@@ -312,10 +317,11 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
* Returns:
* Pointer to the fifo, or null on error (possible hardware hang).
*/
-void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
+ uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max;
uint32_t min;
uint32_t next_cmd;
@@ -372,7 +378,8 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
if (reserveable)
iowrite32(bytes, fifo_mem +
SVGA_FIFO_RESERVED);
- return fifo_mem + (next_cmd >> 2);
+ return (void __force *) (fifo_mem +
+ (next_cmd >> 2));
} else {
need_bounce = true;
}
@@ -391,11 +398,36 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
out_err:
fifo_state->reserved_size = 0;
mutex_unlock(&fifo_state->fifo_mutex);
+
return NULL;
}
+void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
+ int ctx_id)
+{
+ void *ret;
+
+ if (dev_priv->cman)
+ ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
+ ctx_id, false, NULL);
+ else if (ctx_id == SVGA3D_INVALID_ID)
+ ret = vmw_local_fifo_reserve(dev_priv, bytes);
+ else {
+ WARN_ON("Command buffer has not been allocated.\n");
+ ret = NULL;
+ }
+ if (IS_ERR_OR_NULL(ret)) {
+ DRM_ERROR("Fifo reserve failure of %u bytes.\n",
+ (unsigned) bytes);
+ dump_stack();
+ return NULL;
+ }
+
+ return ret;
+}
+
static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
- __le32 __iomem *fifo_mem,
+ u32 __iomem *fifo_mem,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
@@ -417,7 +449,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
}
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
- __le32 __iomem *fifo_mem,
+ u32 __iomem *fifo_mem,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
@@ -436,15 +468,19 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
}
}
-void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
+ if (fifo_state->dx)
+ bytes += sizeof(struct vmw_temp_set_context);
+
+ fifo_state->dx = false;
BUG_ON((bytes & 3) != 0);
BUG_ON(bytes > fifo_state->reserved_size);
@@ -482,13 +518,53 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
mutex_unlock(&fifo_state->fifo_mutex);
}
+void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+{
+ if (dev_priv->cman)
+ vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
+ else
+ vmw_local_fifo_commit(dev_priv, bytes);
+}
+
+
+/**
+ * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @bytes: Number of bytes to commit.
+ */
+void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
+{
+ if (dev_priv->cman)
+ vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
+ else
+ vmw_local_fifo_commit(dev_priv, bytes);
+}
+
+/**
+ * vmw_fifo_flush - Flush any buffered commands and make sure command processing
+ * starts.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @interruptible: Whether to wait interruptible if function needs to sleep.
+ */
+int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
+{
+ might_sleep();
+
+ if (dev_priv->cman)
+ return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
+ else
+ return 0;
+}
+
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
struct svga_fifo_cmd_fence *cmd_fence;
- void *fm;
+ u32 *fm;
int ret = 0;
- uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
+ uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
fm = vmw_fifo_reserve(dev_priv, bytes);
if (unlikely(fm == NULL)) {
@@ -514,12 +590,10 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
return 0;
}
- *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
- cmd_fence = (struct svga_fifo_cmd_fence *)
- ((unsigned long)fm + sizeof(__le32));
-
- iowrite32(*seqno, &cmd_fence->fence);
- vmw_fifo_commit(dev_priv, bytes);
+ *fm++ = SVGA_CMD_FENCE;
+ cmd_fence = (struct svga_fifo_cmd_fence *) fm;
+ cmd_fence->fence = *seqno;
+ vmw_fifo_commit_flush(dev_priv, bytes);
(void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
vmw_update_seqno(dev_priv, fifo_state);
@@ -545,7 +619,7 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
* without writing to the query result structure.
*/
- struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+ struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery body;
@@ -594,7 +668,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
* without writing to the query result structure.
*/
- struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+ struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForGBQuery body;
@@ -647,3 +721,8 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
}
+
+void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+{
+ return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 61d8d803199f..66ffa1d4759c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 69c8ce23123c..0a970afed93b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -28,6 +28,7 @@
#include "vmwgfx_drv.h"
#include <drm/vmwgfx_drm.h>
#include "vmwgfx_kms.h"
+#include "device_include/svga3d_caps.h"
struct svga_3d_compat_cap {
SVGA3dCapsRecordHeader header;
@@ -63,7 +64,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
break;
case DRM_VMW_PARAM_FIFO_HW_VERSION:
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
@@ -105,6 +106,13 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_MAX_MOB_SIZE:
param->value = dev_priv->max_mob_size;
break;
+ case DRM_VMW_PARAM_SCREEN_TARGET:
+ param->value =
+ (dev_priv->active_display_unit == vmw_du_screen_target);
+ break;
+ case DRM_VMW_PARAM_DX:
+ param->value = dev_priv->has_dx;
+ break;
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
@@ -154,7 +162,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_get_3d_cap_arg *) data;
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t size;
- __le32 __iomem *fifo_mem;
+ u32 __iomem *fifo_mem;
void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
void *bounce;
int ret;
@@ -235,7 +243,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
int ret;
num_clips = arg->num_clips;
- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
if (unlikely(num_clips == 0))
return 0;
@@ -318,7 +326,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
int ret;
num_clips = arg->num_clips;
- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
if (unlikely(num_clips == 0))
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 9fe9827ee499..9498a5e33c12 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -56,6 +56,9 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
wake_up_all(&dev_priv->fifo_queue);
+ if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
+ SVGA_IRQFLAG_ERROR))
+ vmw_cmdbuf_tasklet_schedule(dev_priv->cman);
return IRQ_HANDLED;
}
@@ -69,7 +72,7 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
void vmw_update_seqno(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
if (dev_priv->last_read_seqno != seqno) {
@@ -131,8 +134,16 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
* Block command submission while waiting for idle.
*/
- if (fifo_idle)
+ if (fifo_idle) {
down_read(&fifo_state->rwsem);
+ if (dev_priv->cman) {
+ ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
+ 10*HZ);
+ if (ret)
+ goto out_err;
+ }
+ }
+
signal_seq = atomic_read(&dev_priv->marker_seq);
ret = 0;
@@ -167,10 +178,11 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
}
finish_wait(&dev_priv->fence_queue, &__wait);
if (ret == 0 && fifo_idle) {
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
}
wake_up_all(&dev_priv->fence_queue);
+out_err:
if (fifo_idle)
up_read(&fifo_state->rwsem);
@@ -315,3 +327,30 @@ void vmw_irq_uninstall(struct drm_device *dev)
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
}
+
+void vmw_generic_waiter_add(struct vmw_private *dev_priv,
+ u32 flag, int *waiter_count)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ if ((*waiter_count)++ == 0) {
+ outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ dev_priv->irq_mask |= flag;
+ vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+}
+
+void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+ u32 flag, int *waiter_count)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ if (--(*waiter_count) == 0) {
+ dev_priv->irq_mask &= ~flag;
+ vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 07cda8cbbddb..61fb7f3de311 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,45 +31,7 @@
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
-
-struct vmw_clip_rect {
- int x1, x2, y1, y2;
-};
-
-/**
- * Clip @num_rects number of @rects against @clip storing the
- * results in @out_rects and the number of passed rects in @out_num.
- */
-static void vmw_clip_cliprects(struct drm_clip_rect *rects,
- int num_rects,
- struct vmw_clip_rect clip,
- SVGASignedRect *out_rects,
- int *out_num)
-{
- int i, k;
-
- for (i = 0, k = 0; i < num_rects; i++) {
- int x1 = max_t(int, clip.x1, rects[i].x1);
- int y1 = max_t(int, clip.y1, rects[i].y1);
- int x2 = min_t(int, clip.x2, rects[i].x2);
- int y2 = min_t(int, clip.y2, rects[i].y2);
-
- if (x1 >= x2)
- continue;
- if (y1 >= y2)
- continue;
-
- out_rects[k].left = x1;
- out_rects[k].top = y1;
- out_rects[k].right = x2;
- out_rects[k].bottom = y2;
- k++;
- }
-
- *out_num = k;
-}
-
-void vmw_display_unit_cleanup(struct vmw_display_unit *du)
+void vmw_du_cleanup(struct vmw_display_unit *du)
{
if (du->cursor_surface)
vmw_surface_unreference(&du->cursor_surface);
@@ -109,12 +71,12 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
memcpy(&cmd[1], image, image_size);
- cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
- cmd->cursor.id = cpu_to_le32(0);
- cmd->cursor.width = cpu_to_le32(width);
- cmd->cursor.height = cpu_to_le32(height);
- cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
- cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
+ cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
+ cmd->cursor.id = 0;
+ cmd->cursor.width = width;
+ cmd->cursor.height = height;
+ cmd->cursor.hotspotX = hotspotX;
+ cmd->cursor.hotspotY = hotspotY;
vmw_fifo_commit(dev_priv, cmd_size);
@@ -161,7 +123,7 @@ err_unreserve:
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t count;
iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
@@ -367,15 +329,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
srf->snooper.age++;
- /* we can't call this function from this function since execbuf has
- * reserved fifo space.
- *
- * if (srf->snooper.crtc)
- * vmw_ldu_crtc_cursor_update_image(dev_priv,
- * srf->snooper.image, 64, 64,
- * du->hotspot_x, du->hotspot_y);
- */
-
ttm_bo_kunmap(&map);
err_unreserve:
ttm_bo_unreserve(bo);
@@ -412,183 +365,19 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
* Surface framebuffer code
*/
-#define vmw_framebuffer_to_vfbs(x) \
- container_of(x, struct vmw_framebuffer_surface, base.base)
-
-struct vmw_framebuffer_surface {
- struct vmw_framebuffer base;
- struct vmw_surface *surface;
- struct vmw_dma_buffer *buffer;
- struct list_head head;
- struct drm_master *master;
-};
-
static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
- struct vmw_master *vmaster = vmw_master(vfbs->master);
-
- mutex_lock(&vmaster->fb_surf_mutex);
- list_del(&vfbs->head);
- mutex_unlock(&vmaster->fb_surf_mutex);
-
- drm_master_put(&vfbs->master);
drm_framebuffer_cleanup(framebuffer);
vmw_surface_unreference(&vfbs->surface);
- ttm_base_object_unref(&vfbs->base.user_obj);
+ if (vfbs->base.user_obj)
+ ttm_base_object_unref(&vfbs->base.user_obj);
kfree(vfbs);
}
-static int do_surface_dirty_sou(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_framebuffer *framebuffer,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips, int inc,
- struct vmw_fence_obj **out_fence)
-{
- struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
- struct drm_clip_rect *clips_ptr;
- struct drm_clip_rect *tmp;
- struct drm_crtc *crtc;
- size_t fifo_size;
- int i, num_units;
- int ret = 0; /* silence warning */
- int left, right, top, bottom;
-
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdBlitSurfaceToScreen body;
- } *cmd;
- SVGASignedRect *blits;
-
- num_units = 0;
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
- head) {
- if (crtc->primary->fb != &framebuffer->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
- }
-
- BUG_ON(!clips || !num_clips);
-
- tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
- if (unlikely(tmp == NULL)) {
- DRM_ERROR("Temporary cliprect memory alloc failed.\n");
- return -ENOMEM;
- }
-
- fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
- cmd = kzalloc(fifo_size, GFP_KERNEL);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Temporary fifo memory alloc failed.\n");
- ret = -ENOMEM;
- goto out_free_tmp;
- }
-
- /* setup blits pointer */
- blits = (SVGASignedRect *)&cmd[1];
-
- /* initial clip region */
- left = clips->x1;
- right = clips->x2;
- top = clips->y1;
- bottom = clips->y2;
-
- /* skip the first clip rect */
- for (i = 1, clips_ptr = clips + inc;
- i < num_clips; i++, clips_ptr += inc) {
- left = min_t(int, left, (int)clips_ptr->x1);
- right = max_t(int, right, (int)clips_ptr->x2);
- top = min_t(int, top, (int)clips_ptr->y1);
- bottom = max_t(int, bottom, (int)clips_ptr->y2);
- }
-
- /* only need to do this once */
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
- cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-
- cmd->body.srcRect.left = left;
- cmd->body.srcRect.right = right;
- cmd->body.srcRect.top = top;
- cmd->body.srcRect.bottom = bottom;
-
- clips_ptr = clips;
- for (i = 0; i < num_clips; i++, clips_ptr += inc) {
- tmp[i].x1 = clips_ptr->x1 - left;
- tmp[i].x2 = clips_ptr->x2 - left;
- tmp[i].y1 = clips_ptr->y1 - top;
- tmp[i].y2 = clips_ptr->y2 - top;
- }
-
- /* do per unit writing, reuse fifo for each */
- for (i = 0; i < num_units; i++) {
- struct vmw_display_unit *unit = units[i];
- struct vmw_clip_rect clip;
- int num;
-
- clip.x1 = left - unit->crtc.x;
- clip.y1 = top - unit->crtc.y;
- clip.x2 = right - unit->crtc.x;
- clip.y2 = bottom - unit->crtc.y;
-
- /* skip any crtcs that misses the clip region */
- if (clip.x1 >= unit->crtc.mode.hdisplay ||
- clip.y1 >= unit->crtc.mode.vdisplay ||
- clip.x2 <= 0 || clip.y2 <= 0)
- continue;
-
- /*
- * In order for the clip rects to be correctly scaled
- * the src and dest rects needs to be the same size.
- */
- cmd->body.destRect.left = clip.x1;
- cmd->body.destRect.right = clip.x2;
- cmd->body.destRect.top = clip.y1;
- cmd->body.destRect.bottom = clip.y2;
-
- /* create a clip rect of the crtc in dest coords */
- clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
- clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
- clip.x1 = 0 - clip.x1;
- clip.y1 = 0 - clip.y1;
-
- /* need to reset sid as it is changed by execbuf */
- cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
- cmd->body.destScreenId = unit->unit;
-
- /* clip and write blits to cmd stream */
- vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
-
- /* if no cliprects hit skip this */
- if (num == 0)
- continue;
-
- /* only return the last fence */
- if (out_fence && *out_fence)
- vmw_fence_obj_unreference(out_fence);
-
- /* recalculate package length */
- fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
- cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
- ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL, out_fence);
-
- if (unlikely(ret != 0))
- break;
- }
-
-
- kfree(cmd);
-out_free_tmp:
- kfree(tmp);
-
- return ret;
-}
-
static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
@@ -601,11 +390,8 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
struct drm_clip_rect norect;
int ret, inc = 1;
- if (unlikely(vfbs->master != file_priv->master))
- return -EINVAL;
-
- /* Require ScreenObject support for 3D */
- if (!dev_priv->sou_priv)
+ /* Legacy Display Unit does not support 3D */
+ if (dev_priv->active_display_unit == vmw_du_legacy)
return -EINVAL;
drm_modeset_lock_all(dev_priv->dev);
@@ -627,10 +413,16 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
inc = 2; /* skip source rects */
}
- ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base,
- flags, color,
- clips, num_clips, inc, NULL);
+ if (dev_priv->active_display_unit == vmw_du_screen_object)
+ ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
+ clips, NULL, NULL, 0, 0,
+ num_clips, inc, NULL);
+ else
+ ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
+ clips, NULL, NULL, 0, 0,
+ num_clips, inc, NULL);
+ vmw_fifo_flush(dev_priv, false);
ttm_read_unlock(&dev_priv->reservation_sem);
drm_modeset_unlock_all(dev_priv->dev);
@@ -638,27 +430,66 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
return 0;
}
+/**
+ * vmw_kms_readback - Perform a readback from the screen system to
+ * a dma-buffer backed framebuffer.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * Must be set to NULL if @user_fence_rep is NULL.
+ * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @user_fence_rep: User-space provided structure for fence information.
+ * Must be set to non-NULL if @file_priv is non-NULL.
+ * @vclips: Array of clip rects.
+ * @num_clips: Number of clip rects in @vclips.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips)
+{
+ switch (dev_priv->active_display_unit) {
+ case vmw_du_screen_object:
+ return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
+ user_fence_rep, vclips, num_clips);
+ case vmw_du_screen_target:
+ return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
+ user_fence_rep, NULL, vclips, num_clips,
+ 1, false, true);
+ default:
+ WARN_ONCE(true,
+ "Readback called with invalid display system.\n");
+}
+
+ return -ENOSYS;
+}
+
+
static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
.destroy = vmw_framebuffer_surface_destroy,
.dirty = vmw_framebuffer_surface_dirty,
};
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
struct vmw_surface *surface,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd
- *mode_cmd)
+ *mode_cmd,
+ bool is_dmabuf_proxy)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_framebuffer_surface *vfbs;
enum SVGA3dSurfaceFormat format;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
- /* 3D is only supported on HWv8 hosts which supports screen objects */
- if (!dev_priv->sou_priv)
+ /* 3D is only supported on HWv8 and newer hosts */
+ if (dev_priv->active_display_unit == vmw_du_legacy)
return -ENOSYS;
/*
@@ -692,15 +523,16 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
case 15:
format = SVGA3D_A1R5G5B5;
break;
- case 8:
- format = SVGA3D_LUMINANCE8;
- break;
default:
DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
return -EINVAL;
}
- if (unlikely(format != surface->format)) {
+ /*
+ * For DX, surface format validation is done when surface->scanout
+ * is set.
+ */
+ if (!dev_priv->has_dx && format != surface->format) {
DRM_ERROR("Invalid surface format for requested mode.\n");
return -EINVAL;
}
@@ -711,38 +543,27 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
goto out_err1;
}
- if (!vmw_surface_reference(surface)) {
- DRM_ERROR("failed to reference surface %p\n", surface);
- ret = -EINVAL;
- goto out_err2;
- }
-
/* XXX get the first 3 from the surface info */
vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
vfbs->base.base.pitches[0] = mode_cmd->pitch;
vfbs->base.base.depth = mode_cmd->depth;
vfbs->base.base.width = mode_cmd->width;
vfbs->base.base.height = mode_cmd->height;
- vfbs->surface = surface;
+ vfbs->surface = vmw_surface_reference(surface);
vfbs->base.user_handle = mode_cmd->handle;
- vfbs->master = drm_master_get(file_priv->master);
-
- mutex_lock(&vmaster->fb_surf_mutex);
- list_add_tail(&vfbs->head, &vmaster->fb_surf);
- mutex_unlock(&vmaster->fb_surf_mutex);
+ vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
*out = &vfbs->base;
ret = drm_framebuffer_init(dev, &vfbs->base.base,
&vmw_framebuffer_surface_funcs);
if (ret)
- goto out_err3;
+ goto out_err2;
return 0;
-out_err3:
- vmw_surface_unreference(&surface);
out_err2:
+ vmw_surface_unreference(&surface);
kfree(vfbs);
out_err1:
return ret;
@@ -752,14 +573,6 @@ out_err1:
* Dmabuf framebuffer code
*/
-#define vmw_framebuffer_to_vfbd(x) \
- container_of(x, struct vmw_framebuffer_dmabuf, base.base)
-
-struct vmw_framebuffer_dmabuf {
- struct vmw_framebuffer base;
- struct vmw_dma_buffer *buffer;
-};
-
static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_dmabuf *vfbd =
@@ -767,185 +580,12 @@ static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
drm_framebuffer_cleanup(framebuffer);
vmw_dmabuf_unreference(&vfbd->buffer);
- ttm_base_object_unref(&vfbd->base.user_obj);
+ if (vfbd->base.user_obj)
+ ttm_base_object_unref(&vfbd->base.user_obj);
kfree(vfbd);
}
-static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips, int increment)
-{
- size_t fifo_size;
- int i;
-
- struct {
- uint32_t header;
- SVGAFifoCmdUpdate body;
- } *cmd;
-
- fifo_size = sizeof(*cmd) * num_clips;
- cmd = vmw_fifo_reserve(dev_priv, fifo_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
- return -ENOMEM;
- }
-
- memset(cmd, 0, fifo_size);
- for (i = 0; i < num_clips; i++, clips += increment) {
- cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
- cmd[i].body.x = cpu_to_le32(clips->x1);
- cmd[i].body.y = cpu_to_le32(clips->y1);
- cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
- cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
- }
-
- vmw_fifo_commit(dev_priv, fifo_size);
- return 0;
-}
-
-static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
- struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer)
-{
- int depth = framebuffer->base.depth;
- size_t fifo_size;
- int ret;
-
- struct {
- uint32_t header;
- SVGAFifoCmdDefineGMRFB body;
- } *cmd;
-
- /* Emulate RGBA support, contrary to svga_reg.h this is not
- * supported by hosts. This is only a problem if we are reading
- * this value later and expecting what we uploaded back.
- */
- if (depth == 32)
- depth = 24;
-
- fifo_size = sizeof(*cmd);
- cmd = kmalloc(fifo_size, GFP_KERNEL);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
- return -ENOMEM;
- }
-
- memset(cmd, 0, fifo_size);
- cmd->header = SVGA_CMD_DEFINE_GMRFB;
- cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
- cmd->body.format.colorDepth = depth;
- cmd->body.format.reserved = 0;
- cmd->body.bytesPerLine = framebuffer->base.pitches[0];
- cmd->body.ptr.gmrId = framebuffer->user_handle;
- cmd->body.ptr.offset = 0;
-
- ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL, NULL);
-
- kfree(cmd);
-
- return ret;
-}
-
-static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
- struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips, int increment,
- struct vmw_fence_obj **out_fence)
-{
- struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
- struct drm_clip_rect *clips_ptr;
- int i, k, num_units, ret;
- struct drm_crtc *crtc;
- size_t fifo_size;
-
- struct {
- uint32_t header;
- SVGAFifoCmdBlitGMRFBToScreen body;
- } *blits;
-
- ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer);
- if (unlikely(ret != 0))
- return ret; /* define_gmrfb prints warnings */
-
- fifo_size = sizeof(*blits) * num_clips;
- blits = kmalloc(fifo_size, GFP_KERNEL);
- if (unlikely(blits == NULL)) {
- DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
- return -ENOMEM;
- }
-
- num_units = 0;
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
- if (crtc->primary->fb != &framebuffer->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
- }
-
- for (k = 0; k < num_units; k++) {
- struct vmw_display_unit *unit = units[k];
- int hit_num = 0;
-
- clips_ptr = clips;
- for (i = 0; i < num_clips; i++, clips_ptr += increment) {
- int clip_x1 = clips_ptr->x1 - unit->crtc.x;
- int clip_y1 = clips_ptr->y1 - unit->crtc.y;
- int clip_x2 = clips_ptr->x2 - unit->crtc.x;
- int clip_y2 = clips_ptr->y2 - unit->crtc.y;
- int move_x, move_y;
-
- /* skip any crtcs that misses the clip region */
- if (clip_x1 >= unit->crtc.mode.hdisplay ||
- clip_y1 >= unit->crtc.mode.vdisplay ||
- clip_x2 <= 0 || clip_y2 <= 0)
- continue;
-
- /* clip size to crtc size */
- clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
- clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
-
- /* translate both src and dest to bring clip into screen */
- move_x = min_t(int, clip_x1, 0);
- move_y = min_t(int, clip_y1, 0);
-
- /* actual translate done here */
- blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
- blits[hit_num].body.destScreenId = unit->unit;
- blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
- blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
- blits[hit_num].body.destRect.left = clip_x1 - move_x;
- blits[hit_num].body.destRect.top = clip_y1 - move_y;
- blits[hit_num].body.destRect.right = clip_x2;
- blits[hit_num].body.destRect.bottom = clip_y2;
- hit_num++;
- }
-
- /* no clips hit the crtc */
- if (hit_num == 0)
- continue;
-
- /* only return the last fence */
- if (out_fence && *out_fence)
- vmw_fence_obj_unreference(out_fence);
-
- fifo_size = sizeof(*blits) * hit_num;
- ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
- fifo_size, 0, NULL, out_fence);
-
- if (unlikely(ret != 0))
- break;
- }
-
- kfree(blits);
-
- return ret;
-}
-
static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
@@ -977,16 +617,29 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
increment = 2;
}
- if (dev_priv->ldu_priv) {
- ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base,
- flags, color,
- clips, num_clips, increment);
- } else {
- ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
- flags, color,
- clips, num_clips, increment, NULL);
+ switch (dev_priv->active_display_unit) {
+ case vmw_du_screen_target:
+ ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
+ clips, NULL, num_clips, increment,
+ true, true);
+ break;
+ case vmw_du_screen_object:
+ ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
+ clips, num_clips, increment,
+ true,
+ NULL);
+ break;
+ case vmw_du_legacy:
+ ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
+ clips, num_clips, increment);
+ break;
+ default:
+ ret = -EINVAL;
+ WARN_ONCE(true, "Dirty called with invalid display system.\n");
+ break;
}
+ vmw_fifo_flush(dev_priv, false);
ttm_read_unlock(&dev_priv->reservation_sem);
drm_modeset_unlock_all(dev_priv->dev);
@@ -1002,41 +655,133 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
/**
* Pin the dmabuffer to the start of vram.
*/
-static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
+static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
- struct vmw_framebuffer_dmabuf *vfbd =
- vmw_framebuffer_to_vfbd(&vfb->base);
+ struct vmw_dma_buffer *buf;
int ret;
- /* This code should not be used with screen objects */
- BUG_ON(dev_priv->sou_priv);
-
- vmw_overlay_pause_all(dev_priv);
+ buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
- ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false);
-
- vmw_overlay_resume_all(dev_priv);
+ if (!buf)
+ return 0;
- WARN_ON(ret != 0);
+ switch (dev_priv->active_display_unit) {
+ case vmw_du_legacy:
+ vmw_overlay_pause_all(dev_priv);
+ ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
+ vmw_overlay_resume_all(dev_priv);
+ break;
+ case vmw_du_screen_object:
+ case vmw_du_screen_target:
+ if (vfb->dmabuf)
+ return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf,
+ false);
+
+ return vmw_dmabuf_pin_in_placement(dev_priv, buf,
+ &vmw_mob_placement, false);
+ default:
+ return -EINVAL;
+ }
- return 0;
+ return ret;
}
-static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
+static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
- struct vmw_framebuffer_dmabuf *vfbd =
- vmw_framebuffer_to_vfbd(&vfb->base);
+ struct vmw_dma_buffer *buf;
- if (!vfbd->buffer) {
- WARN_ON(!vfbd->buffer);
+ buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
+
+ if (WARN_ON(!buf))
return 0;
+
+ return vmw_dmabuf_unpin(dev_priv, buf, false);
+}
+
+/**
+ * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
+ *
+ * @dev: DRM device
+ * @mode_cmd: parameters for the new surface
+ * @dmabuf_mob: MOB backing the DMA buf
+ * @srf_out: newly created surface
+ *
+ * When the content FB is a DMA buf, we create a surface as a proxy to the
+ * same buffer. This way we can do a surface copy rather than a surface DMA.
+ * This is a more efficient approach
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+static int vmw_create_dmabuf_proxy(struct drm_device *dev,
+ const struct drm_mode_fb_cmd *mode_cmd,
+ struct vmw_dma_buffer *dmabuf_mob,
+ struct vmw_surface **srf_out)
+{
+ uint32_t format;
+ struct drm_vmw_size content_base_size;
+ struct vmw_resource *res;
+ int ret;
+
+ switch (mode_cmd->depth) {
+ case 32:
+ case 24:
+ format = SVGA3D_X8R8G8B8;
+ break;
+
+ case 16:
+ case 15:
+ format = SVGA3D_R5G6B5;
+ break;
+
+ case 8:
+ format = SVGA3D_P8;
+ break;
+
+ default:
+ DRM_ERROR("Invalid framebuffer format %d\n", mode_cmd->depth);
+ return -EINVAL;
}
- return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false);
+ content_base_size.width = mode_cmd->width;
+ content_base_size.height = mode_cmd->height;
+ content_base_size.depth = 1;
+
+ ret = vmw_surface_gb_priv_define(dev,
+ 0, /* kernel visible only */
+ 0, /* flags */
+ format,
+ true, /* can be a scanout buffer */
+ 1, /* num of mip levels */
+ 0,
+ 0,
+ content_base_size,
+ srf_out);
+ if (ret) {
+ DRM_ERROR("Failed to allocate proxy content buffer\n");
+ return ret;
+ }
+
+ res = &(*srf_out)->res;
+
+ /* Reserve and switch the backing mob. */
+ mutex_lock(&res->dev_priv->cmdbuf_mutex);
+ (void) vmw_resource_reserve(res, false, true);
+ vmw_dmabuf_unreference(&res->backup);
+ res->backup = vmw_dmabuf_reference(dmabuf_mob);
+ res->backup_offset = 0;
+ vmw_resource_unreserve(res, false, NULL, 0);
+ mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+
+ return 0;
}
+
+
static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
struct vmw_framebuffer **out,
@@ -1057,7 +802,7 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
}
/* Limited framebuffer color depth support for screen objects */
- if (dev_priv->sou_priv) {
+ if (dev_priv->active_display_unit == vmw_du_screen_object) {
switch (mode_cmd->depth) {
case 32:
case 24:
@@ -1089,41 +834,96 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
goto out_err1;
}
- if (!vmw_dmabuf_reference(dmabuf)) {
- DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
- ret = -EINVAL;
- goto out_err2;
- }
-
vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
vfbd->base.base.pitches[0] = mode_cmd->pitch;
vfbd->base.base.depth = mode_cmd->depth;
vfbd->base.base.width = mode_cmd->width;
vfbd->base.base.height = mode_cmd->height;
- if (!dev_priv->sou_priv) {
- vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
- vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
- }
vfbd->base.dmabuf = true;
- vfbd->buffer = dmabuf;
+ vfbd->buffer = vmw_dmabuf_reference(dmabuf);
vfbd->base.user_handle = mode_cmd->handle;
*out = &vfbd->base;
ret = drm_framebuffer_init(dev, &vfbd->base.base,
&vmw_framebuffer_dmabuf_funcs);
if (ret)
- goto out_err3;
+ goto out_err2;
return 0;
-out_err3:
- vmw_dmabuf_unreference(&dmabuf);
out_err2:
+ vmw_dmabuf_unreference(&dmabuf);
kfree(vfbd);
out_err1:
return ret;
}
+/**
+ * vmw_kms_new_framebuffer - Create a new framebuffer.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
+ * Either @dmabuf or @surface must be NULL.
+ * @surface: Pointer to a surface to wrap the kms framebuffer around.
+ * Either @dmabuf or @surface must be NULL.
+ * @only_2d: No presents will occur to this dma buffer based framebuffer. This
+ * Helps the code to do some important optimizations.
+ * @mode_cmd: Frame-buffer metadata.
+ */
+struct vmw_framebuffer *
+vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ struct vmw_surface *surface,
+ bool only_2d,
+ const struct drm_mode_fb_cmd *mode_cmd)
+{
+ struct vmw_framebuffer *vfb = NULL;
+ bool is_dmabuf_proxy = false;
+ int ret;
+
+ /*
+ * We cannot use the SurfaceDMA command in an non-accelerated VM,
+ * therefore, wrap the DMA buf in a surface so we can use the
+ * SurfaceCopy command.
+ */
+ if (dmabuf && only_2d &&
+ dev_priv->active_display_unit == vmw_du_screen_target) {
+ ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
+ dmabuf, &surface);
+ if (ret)
+ return ERR_PTR(ret);
+
+ is_dmabuf_proxy = true;
+ }
+
+ /* Create the new framebuffer depending one what we have */
+ if (surface) {
+ ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
+ mode_cmd,
+ is_dmabuf_proxy);
+
+ /*
+ * vmw_create_dmabuf_proxy() adds a reference that is no longer
+ * needed
+ */
+ if (is_dmabuf_proxy)
+ vmw_surface_unreference(&surface);
+ } else if (dmabuf) {
+ ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
+ mode_cmd);
+ } else {
+ BUG();
+ }
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ vfb->pin = vmw_framebuffer_pin;
+ vfb->unpin = vmw_framebuffer_unpin;
+
+ return vfb;
+}
+
/*
* Generic Kernel modesetting functions
*/
@@ -1157,7 +957,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
if (!vmw_kms_validate_mode_vram(dev_priv,
mode_cmd.pitch,
mode_cmd.height)) {
- DRM_ERROR("VRAM size is too small for requested mode.\n");
+ DRM_ERROR("Requested mode exceed bounding box limit.\n");
return ERR_PTR(-ENOMEM);
}
@@ -1187,15 +987,13 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
if (ret)
goto err_out;
- /* Create the new framebuffer depending one what we got back */
- if (bo)
- ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
- &mode_cmd);
- else if (surface)
- ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
- surface, &vfb, &mode_cmd);
- else
- BUG();
+ vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
+ !(dev_priv->capabilities & SVGA_CAP_3D),
+ &mode_cmd);
+ if (IS_ERR(vfb)) {
+ ret = PTR_ERR(vfb);
+ goto err_out;
+ }
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
@@ -1218,6 +1016,21 @@ static const struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
};
+static int vmw_kms_generic_present(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct vmw_surface *surface,
+ uint32_t sid,
+ int32_t destX, int32_t destY,
+ struct drm_vmw_rect *clips,
+ uint32_t num_clips)
+{
+ return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
+ &surface->res, destX, destY,
+ num_clips, 1, NULL);
+}
+
+
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
@@ -1227,238 +1040,31 @@ int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_vmw_rect *clips,
uint32_t num_clips)
{
- struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
- struct drm_clip_rect *tmp;
- struct drm_crtc *crtc;
- size_t fifo_size;
- int i, k, num_units;
- int ret = 0; /* silence warning */
- int left, right, top, bottom;
-
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdBlitSurfaceToScreen body;
- } *cmd;
- SVGASignedRect *blits;
-
- num_units = 0;
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
- if (crtc->primary->fb != &vfb->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
- }
-
- BUG_ON(surface == NULL);
- BUG_ON(!clips || !num_clips);
-
- tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
- if (unlikely(tmp == NULL)) {
- DRM_ERROR("Temporary cliprect memory alloc failed.\n");
- return -ENOMEM;
- }
-
- fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
- cmd = kmalloc(fifo_size, GFP_KERNEL);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed to allocate temporary fifo memory.\n");
- ret = -ENOMEM;
- goto out_free_tmp;
- }
-
- left = clips->x;
- right = clips->x + clips->w;
- top = clips->y;
- bottom = clips->y + clips->h;
-
- for (i = 1; i < num_clips; i++) {
- left = min_t(int, left, (int)clips[i].x);
- right = max_t(int, right, (int)clips[i].x + clips[i].w);
- top = min_t(int, top, (int)clips[i].y);
- bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
- }
-
- /* only need to do this once */
- memset(cmd, 0, fifo_size);
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
-
- blits = (SVGASignedRect *)&cmd[1];
-
- cmd->body.srcRect.left = left;
- cmd->body.srcRect.right = right;
- cmd->body.srcRect.top = top;
- cmd->body.srcRect.bottom = bottom;
-
- for (i = 0; i < num_clips; i++) {
- tmp[i].x1 = clips[i].x - left;
- tmp[i].x2 = clips[i].x + clips[i].w - left;
- tmp[i].y1 = clips[i].y - top;
- tmp[i].y2 = clips[i].y + clips[i].h - top;
- }
-
- for (k = 0; k < num_units; k++) {
- struct vmw_display_unit *unit = units[k];
- struct vmw_clip_rect clip;
- int num;
-
- clip.x1 = left + destX - unit->crtc.x;
- clip.y1 = top + destY - unit->crtc.y;
- clip.x2 = right + destX - unit->crtc.x;
- clip.y2 = bottom + destY - unit->crtc.y;
-
- /* skip any crtcs that misses the clip region */
- if (clip.x1 >= unit->crtc.mode.hdisplay ||
- clip.y1 >= unit->crtc.mode.vdisplay ||
- clip.x2 <= 0 || clip.y2 <= 0)
- continue;
-
- /*
- * In order for the clip rects to be correctly scaled
- * the src and dest rects needs to be the same size.
- */
- cmd->body.destRect.left = clip.x1;
- cmd->body.destRect.right = clip.x2;
- cmd->body.destRect.top = clip.y1;
- cmd->body.destRect.bottom = clip.y2;
-
- /* create a clip rect of the crtc in dest coords */
- clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
- clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
- clip.x1 = 0 - clip.x1;
- clip.y1 = 0 - clip.y1;
-
- /* need to reset sid as it is changed by execbuf */
- cmd->body.srcImage.sid = sid;
- cmd->body.destScreenId = unit->unit;
-
- /* clip and write blits to cmd stream */
- vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
-
- /* if no cliprects hit skip this */
- if (num == 0)
- continue;
-
- /* recalculate package length */
- fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
- cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
- ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL, NULL);
-
- if (unlikely(ret != 0))
- break;
- }
-
- kfree(cmd);
-out_free_tmp:
- kfree(tmp);
-
- return ret;
-}
-
-int vmw_kms_readback(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_framebuffer *vfb,
- struct drm_vmw_fence_rep __user *user_fence_rep,
- struct drm_vmw_rect *clips,
- uint32_t num_clips)
-{
- struct vmw_framebuffer_dmabuf *vfbd =
- vmw_framebuffer_to_vfbd(&vfb->base);
- struct vmw_dma_buffer *dmabuf = vfbd->buffer;
- struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
- struct drm_crtc *crtc;
- size_t fifo_size;
- int i, k, ret, num_units, blits_pos;
-
- struct {
- uint32_t header;
- SVGAFifoCmdDefineGMRFB body;
- } *cmd;
- struct {
- uint32_t header;
- SVGAFifoCmdBlitScreenToGMRFB body;
- } *blits;
-
- num_units = 0;
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
- if (crtc->primary->fb != &vfb->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
- }
-
- BUG_ON(dmabuf == NULL);
- BUG_ON(!clips || !num_clips);
-
- /* take a safe guess at fifo size */
- fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units;
- cmd = kmalloc(fifo_size, GFP_KERNEL);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed to allocate temporary fifo memory.\n");
- return -ENOMEM;
- }
-
- memset(cmd, 0, fifo_size);
- cmd->header = SVGA_CMD_DEFINE_GMRFB;
- cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
- cmd->body.format.colorDepth = vfb->base.depth;
- cmd->body.format.reserved = 0;
- cmd->body.bytesPerLine = vfb->base.pitches[0];
- cmd->body.ptr.gmrId = vfb->user_handle;
- cmd->body.ptr.offset = 0;
-
- blits = (void *)&cmd[1];
- blits_pos = 0;
- for (i = 0; i < num_units; i++) {
- struct drm_vmw_rect *c = clips;
- for (k = 0; k < num_clips; k++, c++) {
- /* transform clip coords to crtc origin based coords */
- int clip_x1 = c->x - units[i]->crtc.x;
- int clip_x2 = c->x - units[i]->crtc.x + c->w;
- int clip_y1 = c->y - units[i]->crtc.y;
- int clip_y2 = c->y - units[i]->crtc.y + c->h;
- int dest_x = c->x;
- int dest_y = c->y;
-
- /* compensate for clipping, we negate
- * a negative number and add that.
- */
- if (clip_x1 < 0)
- dest_x += -clip_x1;
- if (clip_y1 < 0)
- dest_y += -clip_y1;
-
- /* clip */
- clip_x1 = max(clip_x1, 0);
- clip_y1 = max(clip_y1, 0);
- clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay);
- clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay);
-
- /* and cull any rects that misses the crtc */
- if (clip_x1 >= units[i]->crtc.mode.hdisplay ||
- clip_y1 >= units[i]->crtc.mode.vdisplay ||
- clip_x2 <= 0 || clip_y2 <= 0)
- continue;
-
- blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
- blits[blits_pos].body.srcScreenId = units[i]->unit;
- blits[blits_pos].body.destOrigin.x = dest_x;
- blits[blits_pos].body.destOrigin.y = dest_y;
+ int ret;
- blits[blits_pos].body.srcRect.left = clip_x1;
- blits[blits_pos].body.srcRect.top = clip_y1;
- blits[blits_pos].body.srcRect.right = clip_x2;
- blits[blits_pos].body.srcRect.bottom = clip_y2;
- blits_pos++;
- }
+ switch (dev_priv->active_display_unit) {
+ case vmw_du_screen_target:
+ ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
+ &surface->res, destX, destY,
+ num_clips, 1, NULL);
+ break;
+ case vmw_du_screen_object:
+ ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
+ sid, destX, destY, clips,
+ num_clips);
+ break;
+ default:
+ WARN_ONCE(true,
+ "Present called with invalid display system.\n");
+ ret = -ENOSYS;
+ break;
}
- /* reset size here and use calculated exact size from loops */
- fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
-
- ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
- 0, user_fence_rep, NULL);
+ if (ret)
+ return ret;
- kfree(cmd);
+ vmw_fifo_flush(dev_priv, false);
- return ret;
+ return 0;
}
int vmw_kms_init(struct vmw_private *dev_priv)
@@ -1470,30 +1076,37 @@ int vmw_kms_init(struct vmw_private *dev_priv)
dev->mode_config.funcs = &vmw_kms_funcs;
dev->mode_config.min_width = 1;
dev->mode_config.min_height = 1;
- /* assumed largest fb size */
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
+ dev->mode_config.max_width = dev_priv->texture_max_width;
+ dev->mode_config.max_height = dev_priv->texture_max_height;
- ret = vmw_kms_init_screen_object_display(dev_priv);
- if (ret) /* Fallback */
- (void)vmw_kms_init_legacy_display_system(dev_priv);
+ ret = vmw_kms_stdu_init_display(dev_priv);
+ if (ret) {
+ ret = vmw_kms_sou_init_display(dev_priv);
+ if (ret) /* Fallback */
+ ret = vmw_kms_ldu_init_display(dev_priv);
+ }
- return 0;
+ return ret;
}
int vmw_kms_close(struct vmw_private *dev_priv)
{
+ int ret;
+
/*
* Docs says we should take the lock before calling this function
* but since it destroys encoders and our destructor calls
* drm_encoder_cleanup which takes the lock we deadlock.
*/
drm_mode_config_cleanup(dev_priv->dev);
- if (dev_priv->sou_priv)
- vmw_kms_close_screen_object_display(dev_priv);
+ if (dev_priv->active_display_unit == vmw_du_screen_object)
+ ret = vmw_kms_sou_close_display(dev_priv);
+ else if (dev_priv->active_display_unit == vmw_du_screen_target)
+ ret = vmw_kms_stdu_close_display(dev_priv);
else
- vmw_kms_close_legacy_display_system(dev_priv);
- return 0;
+ ret = vmw_kms_ldu_close_display(dev_priv);
+
+ return ret;
}
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
@@ -1569,7 +1182,7 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
else if (vmw_fifo_have_pitchlock(vmw_priv))
vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
- SVGA_FIFO_PITCHLOCK);
+ SVGA_FIFO_PITCHLOCK);
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
return 0;
@@ -1641,7 +1254,9 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height)
{
- return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem;
+ return ((u64) pitch * (u64) height) < (u64)
+ ((dev_priv->active_display_unit == vmw_du_screen_target) ?
+ dev_priv->prim_bb_mem : dev_priv->vram_size);
}
@@ -1715,75 +1330,6 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
return 0;
}
-int vmw_du_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
-{
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
- struct drm_file *file_priv ;
- struct vmw_fence_obj *fence = NULL;
- struct drm_clip_rect clips;
- int ret;
-
- if (event == NULL)
- return -EINVAL;
-
- /* require ScreenObject support for page flipping */
- if (!dev_priv->sou_priv)
- return -ENOSYS;
-
- file_priv = event->base.file_priv;
- if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
- return -EINVAL;
-
- crtc->primary->fb = fb;
-
- /* do a full screen dirty update */
- clips.x1 = clips.y1 = 0;
- clips.x2 = fb->width;
- clips.y2 = fb->height;
-
- if (vfb->dmabuf)
- ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb,
- 0, 0, &clips, 1, 1, &fence);
- else
- ret = do_surface_dirty_sou(dev_priv, file_priv, vfb,
- 0, 0, &clips, 1, 1, &fence);
-
-
- if (ret != 0)
- goto out_no_fence;
- if (!fence) {
- ret = -EINVAL;
- goto out_no_fence;
- }
-
- ret = vmw_event_fence_action_queue(file_priv, fence,
- &event->base,
- &event->event.tv_sec,
- &event->event.tv_usec,
- true);
-
- /*
- * No need to hold on to this now. The only cleanup
- * we need to do if we fail is unref the fence.
- */
- vmw_fence_obj_unreference(&fence);
-
- if (vmw_crtc_to_du(crtc)->is_implicit)
- vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc);
-
- return ret;
-
-out_no_fence:
- crtc->primary->fb = old_fb;
- return ret;
-}
-
-
void vmw_du_crtc_save(struct drm_crtc *crtc)
{
}
@@ -1808,8 +1354,9 @@ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
}
}
-void vmw_du_connector_dpms(struct drm_connector *connector, int mode)
+int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
{
+ return 0;
}
void vmw_du_connector_save(struct drm_connector *connector)
@@ -1919,7 +1466,7 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = {
* @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
* members filled in.
*/
-static void vmw_guess_mode_timing(struct drm_display_mode *mode)
+void vmw_guess_mode_timing(struct drm_display_mode *mode)
{
mode->hsync_start = mode->hdisplay + 50;
mode->hsync_end = mode->hsync_start + 50;
@@ -1954,36 +1501,39 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
* If using screen objects, then assume 32-bpp because that's what the
* SVGA device is assuming
*/
- if (dev_priv->sou_priv)
+ if (dev_priv->active_display_unit == vmw_du_screen_object)
assumed_bpp = 4;
+ if (dev_priv->active_display_unit == vmw_du_screen_target) {
+ max_width = min(max_width, dev_priv->stdu_max_width);
+ max_height = min(max_height, dev_priv->stdu_max_height);
+ }
+
/* Add preferred mode */
- {
- mode = drm_mode_duplicate(dev, &prefmode);
- if (!mode)
- return 0;
- mode->hdisplay = du->pref_width;
- mode->vdisplay = du->pref_height;
- vmw_guess_mode_timing(mode);
-
- if (vmw_kms_validate_mode_vram(dev_priv,
- mode->hdisplay * assumed_bpp,
- mode->vdisplay)) {
- drm_mode_probed_add(connector, mode);
- } else {
- drm_mode_destroy(dev, mode);
- mode = NULL;
- }
+ mode = drm_mode_duplicate(dev, &prefmode);
+ if (!mode)
+ return 0;
+ mode->hdisplay = du->pref_width;
+ mode->vdisplay = du->pref_height;
+ vmw_guess_mode_timing(mode);
- if (du->pref_mode) {
- list_del_init(&du->pref_mode->head);
- drm_mode_destroy(dev, du->pref_mode);
- }
+ if (vmw_kms_validate_mode_vram(dev_priv,
+ mode->hdisplay * assumed_bpp,
+ mode->vdisplay)) {
+ drm_mode_probed_add(connector, mode);
+ } else {
+ drm_mode_destroy(dev, mode);
+ mode = NULL;
+ }
- /* mode might be null here, this is intended */
- du->pref_mode = mode;
+ if (du->pref_mode) {
+ list_del_init(&du->pref_mode->head);
+ drm_mode_destroy(dev, du->pref_mode);
}
+ /* mode might be null here, this is intended */
+ du->pref_mode = mode;
+
for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
bmode = &vmw_kms_connector_builtin[i];
if (bmode->hdisplay > max_width ||
@@ -2003,11 +1553,9 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
drm_mode_probed_add(connector, mode);
}
- /* Move the prefered mode first, help apps pick the right mode. */
- if (du->pref_mode)
- list_move(&du->pref_mode->head, &connector->probed_modes);
-
drm_mode_connector_list_update(connector, true);
+ /* Move the prefered mode first, help apps pick the right mode. */
+ drm_mode_sort(&connector->modes);
return 1;
}
@@ -2031,7 +1579,9 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
unsigned rects_size;
int ret;
int i;
+ u64 total_pixels = 0;
struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_vmw_rect bounding_box = {0};
if (!arg->num_outputs) {
struct drm_vmw_rect def_rect = {0, 0, 800, 600};
@@ -2062,6 +1612,40 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
ret = -EINVAL;
goto out_free;
}
+
+ /*
+ * bounding_box.w and bunding_box.h are used as
+ * lower-right coordinates
+ */
+ if (rects[i].x + rects[i].w > bounding_box.w)
+ bounding_box.w = rects[i].x + rects[i].w;
+
+ if (rects[i].y + rects[i].h > bounding_box.h)
+ bounding_box.h = rects[i].y + rects[i].h;
+
+ total_pixels += (u64) rects[i].w * (u64) rects[i].h;
+ }
+
+ if (dev_priv->active_display_unit == vmw_du_screen_target) {
+ /*
+ * For Screen Targets, the limits for a toplogy are:
+ * 1. Bounding box (assuming 32bpp) must be < prim_bb_mem
+ * 2. Total pixels (assuming 32bpp) must be < prim_bb_mem
+ */
+ u64 bb_mem = bounding_box.w * bounding_box.h * 4;
+ u64 pixel_mem = total_pixels * 4;
+
+ if (bb_mem > dev_priv->prim_bb_mem) {
+ DRM_ERROR("Topology is beyond supported limits.\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ if (pixel_mem > dev_priv->prim_bb_mem) {
+ DRM_ERROR("Combined output size too large\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
}
vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
@@ -2070,3 +1654,419 @@ out_free:
kfree(rects);
return ret;
}
+
+/**
+ * vmw_kms_helper_dirty - Helper to build commands and perform actions based
+ * on a set of cliprects and a set of display units.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @framebuffer: Pointer to the framebuffer on which to perform the actions.
+ * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
+ * Cliprects are given in framebuffer coordinates.
+ * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
+ * be NULL. Cliprects are given in source coordinates.
+ * @dest_x: X coordinate offset for the crtc / destination clip rects.
+ * @dest_y: Y coordinate offset for the crtc / destination clip rects.
+ * @num_clips: Number of cliprects in the @clips or @vclips array.
+ * @increment: Integer with which to increment the clip counter when looping.
+ * Used to skip a predetermined number of clip rects.
+ * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
+ */
+int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ const struct drm_clip_rect *clips,
+ const struct drm_vmw_rect *vclips,
+ s32 dest_x, s32 dest_y,
+ int num_clips,
+ int increment,
+ struct vmw_kms_dirty *dirty)
+{
+ struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_crtc *crtc;
+ u32 num_units = 0;
+ u32 i, k;
+ int ret;
+
+ dirty->dev_priv = dev_priv;
+
+ list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
+ if (crtc->primary->fb != &framebuffer->base)
+ continue;
+ units[num_units++] = vmw_crtc_to_du(crtc);
+ }
+
+ for (k = 0; k < num_units; k++) {
+ struct vmw_display_unit *unit = units[k];
+ s32 crtc_x = unit->crtc.x;
+ s32 crtc_y = unit->crtc.y;
+ s32 crtc_width = unit->crtc.mode.hdisplay;
+ s32 crtc_height = unit->crtc.mode.vdisplay;
+ const struct drm_clip_rect *clips_ptr = clips;
+ const struct drm_vmw_rect *vclips_ptr = vclips;
+
+ dirty->unit = unit;
+ if (dirty->fifo_reserve_size > 0) {
+ dirty->cmd = vmw_fifo_reserve(dev_priv,
+ dirty->fifo_reserve_size);
+ if (!dirty->cmd) {
+ DRM_ERROR("Couldn't reserve fifo space "
+ "for dirty blits.\n");
+ return ret;
+ }
+ memset(dirty->cmd, 0, dirty->fifo_reserve_size);
+ }
+ dirty->num_hits = 0;
+ for (i = 0; i < num_clips; i++, clips_ptr += increment,
+ vclips_ptr += increment) {
+ s32 clip_left;
+ s32 clip_top;
+
+ /*
+ * Select clip array type. Note that integer type
+ * in @clips is unsigned short, whereas in @vclips
+ * it's 32-bit.
+ */
+ if (clips) {
+ dirty->fb_x = (s32) clips_ptr->x1;
+ dirty->fb_y = (s32) clips_ptr->y1;
+ dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
+ crtc_x;
+ dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
+ crtc_y;
+ } else {
+ dirty->fb_x = vclips_ptr->x;
+ dirty->fb_y = vclips_ptr->y;
+ dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
+ dest_x - crtc_x;
+ dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
+ dest_y - crtc_y;
+ }
+
+ dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
+ dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
+
+ /* Skip this clip if it's outside the crtc region */
+ if (dirty->unit_x1 >= crtc_width ||
+ dirty->unit_y1 >= crtc_height ||
+ dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
+ continue;
+
+ /* Clip right and bottom to crtc limits */
+ dirty->unit_x2 = min_t(s32, dirty->unit_x2,
+ crtc_width);
+ dirty->unit_y2 = min_t(s32, dirty->unit_y2,
+ crtc_height);
+
+ /* Clip left and top to crtc limits */
+ clip_left = min_t(s32, dirty->unit_x1, 0);
+ clip_top = min_t(s32, dirty->unit_y1, 0);
+ dirty->unit_x1 -= clip_left;
+ dirty->unit_y1 -= clip_top;
+ dirty->fb_x -= clip_left;
+ dirty->fb_y -= clip_top;
+
+ dirty->clip(dirty);
+ }
+
+ dirty->fifo_commit(dirty);
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
+ * command submission.
+ *
+ * @dev_priv. Pointer to a device private structure.
+ * @buf: The buffer object
+ * @interruptible: Whether to perform waits as interruptible.
+ * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
+ * The buffer will be validated as a GMR. Already pinned buffers will not be
+ * validated.
+ *
+ * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
+ * interrupted by a signal.
+ */
+int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool interruptible,
+ bool validate_as_mob)
+{
+ struct ttm_buffer_object *bo = &buf->base;
+ int ret;
+
+ ttm_bo_reserve(bo, false, false, interruptible, NULL);
+ ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
+ validate_as_mob);
+ if (ret)
+ ttm_bo_unreserve(bo);
+
+ return ret;
+}
+
+/**
+ * vmw_kms_helper_buffer_revert - Undo the actions of
+ * vmw_kms_helper_buffer_prepare.
+ *
+ * @res: Pointer to the buffer object.
+ *
+ * Helper to be used if an error forces the caller to undo the actions of
+ * vmw_kms_helper_buffer_prepare.
+ */
+void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
+{
+ if (buf)
+ ttm_bo_unreserve(&buf->base);
+}
+
+/**
+ * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
+ * kms command submission.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @file_priv: Pointer to a struct drm_file representing the caller's
+ * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
+ * if non-NULL, @user_fence_rep must be non-NULL.
+ * @buf: The buffer object.
+ * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
+ * ref-counted fence pointer is returned here.
+ * @user_fence_rep: Optional pointer to a user-space provided struct
+ * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
+ * function copies fence data to user-space in a fail-safe manner.
+ */
+void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_dma_buffer *buf,
+ struct vmw_fence_obj **out_fence,
+ struct drm_vmw_fence_rep __user *
+ user_fence_rep)
+{
+ struct vmw_fence_obj *fence;
+ uint32_t handle;
+ int ret;
+
+ ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
+ file_priv ? &handle : NULL);
+ if (buf)
+ vmw_fence_single_bo(&buf->base, fence);
+ if (file_priv)
+ vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
+ ret, user_fence_rep, fence,
+ handle);
+ if (out_fence)
+ *out_fence = fence;
+ else
+ vmw_fence_obj_unreference(&fence);
+
+ vmw_kms_helper_buffer_revert(buf);
+}
+
+
+/**
+ * vmw_kms_helper_resource_revert - Undo the actions of
+ * vmw_kms_helper_resource_prepare.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ *
+ * Helper to be used if an error forces the caller to undo the actions of
+ * vmw_kms_helper_resource_prepare.
+ */
+void vmw_kms_helper_resource_revert(struct vmw_resource *res)
+{
+ vmw_kms_helper_buffer_revert(res->backup);
+ vmw_resource_unreserve(res, false, NULL, 0);
+ mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+}
+
+/**
+ * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
+ * command submission.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ * @interruptible: Whether to perform waits as interruptible.
+ *
+ * Reserves and validates also the backup buffer if a guest-backed resource.
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted by a signal.
+ */
+int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
+ bool interruptible)
+{
+ int ret = 0;
+
+ if (interruptible)
+ ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
+ else
+ mutex_lock(&res->dev_priv->cmdbuf_mutex);
+
+ if (unlikely(ret != 0))
+ return -ERESTARTSYS;
+
+ ret = vmw_resource_reserve(res, interruptible, false);
+ if (ret)
+ goto out_unlock;
+
+ if (res->backup) {
+ ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
+ interruptible,
+ res->dev_priv->has_mob);
+ if (ret)
+ goto out_unreserve;
+ }
+ ret = vmw_resource_validate(res);
+ if (ret)
+ goto out_revert;
+ return 0;
+
+out_revert:
+ vmw_kms_helper_buffer_revert(res->backup);
+out_unreserve:
+ vmw_resource_unreserve(res, false, NULL, 0);
+out_unlock:
+ mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+ return ret;
+}
+
+/**
+ * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
+ * kms command submission.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
+ * ref-counted fence pointer is returned here.
+ */
+void vmw_kms_helper_resource_finish(struct vmw_resource *res,
+ struct vmw_fence_obj **out_fence)
+{
+ if (res->backup || out_fence)
+ vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
+ out_fence, NULL);
+
+ vmw_resource_unreserve(res, false, NULL, 0);
+ mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+}
+
+/**
+ * vmw_kms_update_proxy - Helper function to update a proxy surface from
+ * its backing MOB.
+ *
+ * @res: Pointer to the surface resource
+ * @clips: Clip rects in framebuffer (surface) space.
+ * @num_clips: Number of clips in @clips.
+ * @increment: Integer with which to increment the clip counter when looping.
+ * Used to skip a predetermined number of clip rects.
+ *
+ * This function makes sure the proxy surface is updated from its backing MOB
+ * using the region given by @clips. The surface resource @res and its backing
+ * MOB needs to be reserved and validated on call.
+ */
+int vmw_kms_update_proxy(struct vmw_resource *res,
+ const struct drm_clip_rect *clips,
+ unsigned num_clips,
+ int increment)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBImage body;
+ } *cmd;
+ SVGA3dBox *box;
+ size_t copy_size = 0;
+ int i;
+
+ if (!clips)
+ return 0;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
+ if (!cmd) {
+ DRM_ERROR("Couldn't reserve fifo space for proxy surface "
+ "update.\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
+ box = &cmd->body.box;
+
+ cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.image.sid = res->id;
+ cmd->body.image.face = 0;
+ cmd->body.image.mipmap = 0;
+
+ if (clips->x1 > size->width || clips->x2 > size->width ||
+ clips->y1 > size->height || clips->y2 > size->height) {
+ DRM_ERROR("Invalid clips outsize of framebuffer.\n");
+ return -EINVAL;
+ }
+
+ box->x = clips->x1;
+ box->y = clips->y1;
+ box->z = 0;
+ box->w = clips->x2 - clips->x1;
+ box->h = clips->y2 - clips->y1;
+ box->d = 1;
+
+ copy_size += sizeof(*cmd);
+ }
+
+ vmw_fifo_commit(dev_priv, copy_size);
+
+ return 0;
+}
+
+int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
+ unsigned unit,
+ u32 max_width,
+ u32 max_height,
+ struct drm_connector **p_con,
+ struct drm_crtc **p_crtc,
+ struct drm_display_mode **p_mode)
+{
+ struct drm_connector *con;
+ struct vmw_display_unit *du;
+ struct drm_display_mode *mode;
+ int i = 0;
+
+ list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
+ head) {
+ if (i == unit)
+ break;
+
+ ++i;
+ }
+
+ if (i != unit) {
+ DRM_ERROR("Could not find initial display unit.\n");
+ return -EINVAL;
+ }
+
+ if (list_empty(&con->modes))
+ (void) vmw_du_connector_fill_modes(con, max_width, max_height);
+
+ if (list_empty(&con->modes)) {
+ DRM_ERROR("Could not find initial display mode.\n");
+ return -EINVAL;
+ }
+
+ du = vmw_connector_to_du(con);
+ *p_con = con;
+ *p_crtc = &du->crtc;
+
+ list_for_each_entry(mode, &con->modes, head) {
+ if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ break;
+ }
+
+ if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ *p_mode = mode;
+ else {
+ WARN_ONCE(true, "Could not find initial preferred mode.\n");
+ *p_mode = list_first_entry(&con->modes,
+ struct drm_display_mode,
+ head);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 8d038c36bd57..782df7ca9794 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -32,11 +32,60 @@
#include <drm/drm_crtc_helper.h>
#include "vmwgfx_drv.h"
+/**
+ * struct vmw_kms_dirty - closure structure for the vmw_kms_helper_dirty
+ * function.
+ *
+ * @fifo_commit: Callback that is called once for each display unit after
+ * all clip rects. This function must commit the fifo space reserved by the
+ * helper. Set up by the caller.
+ * @clip: Callback that is called for each cliprect on each display unit.
+ * Set up by the caller.
+ * @fifo_reserve_size: Fifo size that the helper should try to allocat for
+ * each display unit. Set up by the caller.
+ * @dev_priv: Pointer to the device private. Set up by the helper.
+ * @unit: The current display unit. Set up by the helper before a call to @clip.
+ * @cmd: The allocated fifo space. Set up by the helper before the first @clip
+ * call.
+ * @num_hits: Number of clip rect commands for this display unit.
+ * Cleared by the helper before the first @clip call. Updated by the @clip
+ * callback.
+ * @fb_x: Clip rect left side in framebuffer coordinates.
+ * @fb_y: Clip rect right side in framebuffer coordinates.
+ * @unit_x1: Clip rect left side in crtc coordinates.
+ * @unit_y1: Clip rect top side in crtc coordinates.
+ * @unit_x2: Clip rect right side in crtc coordinates.
+ * @unit_y2: Clip rect bottom side in crtc coordinates.
+ *
+ * The clip rect coordinates are updated by the helper for each @clip call.
+ * Note that this may be derived from if more info needs to be passed between
+ * helper caller and helper callbacks.
+ */
+struct vmw_kms_dirty {
+ void (*fifo_commit)(struct vmw_kms_dirty *);
+ void (*clip)(struct vmw_kms_dirty *);
+ size_t fifo_reserve_size;
+ struct vmw_private *dev_priv;
+ struct vmw_display_unit *unit;
+ void *cmd;
+ u32 num_hits;
+ s32 fb_x;
+ s32 fb_y;
+ s32 unit_x1;
+ s32 unit_y1;
+ s32 unit_x2;
+ s32 unit_y2;
+};
+
#define VMWGFX_NUM_DISPLAY_UNITS 8
#define vmw_framebuffer_to_vfb(x) \
container_of(x, struct vmw_framebuffer, base)
+#define vmw_framebuffer_to_vfbs(x) \
+ container_of(x, struct vmw_framebuffer_surface, base.base)
+#define vmw_framebuffer_to_vfbd(x) \
+ container_of(x, struct vmw_framebuffer_dmabuf, base.base)
/**
* Base class for framebuffers
@@ -53,9 +102,27 @@ struct vmw_framebuffer {
uint32_t user_handle;
};
+/*
+ * Clip rectangle
+ */
+struct vmw_clip_rect {
+ int x1, x2, y1, y2;
+};
+
+struct vmw_framebuffer_surface {
+ struct vmw_framebuffer base;
+ struct vmw_surface *surface;
+ struct vmw_dma_buffer *buffer;
+ struct list_head head;
+ bool is_dmabuf_proxy; /* true if this is proxy surface for DMA buf */
+};
+
+
+struct vmw_framebuffer_dmabuf {
+ struct vmw_framebuffer base;
+ struct vmw_dma_buffer *buffer;
+};
-#define vmw_crtc_to_du(x) \
- container_of(x, struct vmw_display_unit, crtc)
/*
* Basic cursor manipulation
@@ -120,11 +187,7 @@ struct vmw_display_unit {
/*
* Shared display unit functions - vmwgfx_kms.c
*/
-void vmw_display_unit_cleanup(struct vmw_display_unit *du);
-int vmw_du_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags);
+void vmw_du_cleanup(struct vmw_display_unit *du);
void vmw_du_crtc_save(struct drm_crtc *crtc);
void vmw_du_crtc_restore(struct drm_crtc *crtc);
void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
@@ -133,7 +196,7 @@ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height);
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
-void vmw_du_connector_dpms(struct drm_connector *connector, int mode);
+int vmw_du_connector_dpms(struct drm_connector *connector, int mode);
void vmw_du_connector_save(struct drm_connector *connector);
void vmw_du_connector_restore(struct drm_connector *connector);
enum drm_connector_status
@@ -143,25 +206,118 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
int vmw_du_connector_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val);
+int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ const struct drm_clip_rect *clips,
+ const struct drm_vmw_rect *vclips,
+ s32 dest_x, s32 dest_y,
+ int num_clips,
+ int increment,
+ struct vmw_kms_dirty *dirty);
+int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool interruptible,
+ bool validate_as_mob);
+void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf);
+void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_dma_buffer *buf,
+ struct vmw_fence_obj **out_fence,
+ struct drm_vmw_fence_rep __user *
+ user_fence_rep);
+int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
+ bool interruptible);
+void vmw_kms_helper_resource_revert(struct vmw_resource *res);
+void vmw_kms_helper_resource_finish(struct vmw_resource *res,
+ struct vmw_fence_obj **out_fence);
+int vmw_kms_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips);
+struct vmw_framebuffer *
+vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ struct vmw_surface *surface,
+ bool only_2d,
+ const struct drm_mode_fb_cmd *mode_cmd);
+int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
+ unsigned unit,
+ u32 max_width,
+ u32 max_height,
+ struct drm_connector **p_con,
+ struct drm_crtc **p_crtc,
+ struct drm_display_mode **p_mode);
+void vmw_guess_mode_timing(struct drm_display_mode *mode);
/*
* Legacy display unit functions - vmwgfx_ldu.c
*/
-int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
-int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
+int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
+int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
+int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips, int increment);
+int vmw_kms_update_proxy(struct vmw_resource *res,
+ const struct drm_clip_rect *clips,
+ unsigned num_clips,
+ int increment);
/*
* Screen Objects display functions - vmwgfx_scrn.c
*/
-int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv);
-int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv);
-int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num,
- struct drm_vmw_rect *rects);
-bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
- struct drm_crtc *crtc);
-void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
- struct drm_crtc *crtc);
+int vmw_kms_sou_init_display(struct vmw_private *dev_priv);
+int vmw_kms_sou_close_display(struct vmw_private *dev_priv);
+int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ struct vmw_resource *srf,
+ s32 dest_x,
+ s32 dest_y,
+ unsigned num_clips, int inc,
+ struct vmw_fence_obj **out_fence);
+int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ struct drm_clip_rect *clips,
+ unsigned num_clips, int increment,
+ bool interruptible,
+ struct vmw_fence_obj **out_fence);
+int vmw_kms_sou_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips);
+
+/*
+ * Screen Target Display Unit functions - vmwgfx_stdu.c
+ */
+int vmw_kms_stdu_init_display(struct vmw_private *dev_priv);
+int vmw_kms_stdu_close_display(struct vmw_private *dev_priv);
+int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ struct vmw_resource *srf,
+ s32 dest_x,
+ s32 dest_y,
+ unsigned num_clips, int inc,
+ struct vmw_fence_obj **out_fence);
+int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips,
+ int increment,
+ bool to_surface,
+ bool interruptible);
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 5c289f748ab4..bb63e4d795fa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -57,7 +57,7 @@ struct vmw_legacy_display_unit {
static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
{
list_del_init(&ldu->active);
- vmw_display_unit_cleanup(&ldu->base);
+ vmw_du_cleanup(&ldu->base);
kfree(ldu);
}
@@ -279,7 +279,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
return -EINVAL;
}
- vmw_fb_off(dev_priv);
+ vmw_svga_enable(dev_priv);
crtc->primary->fb = fb;
encoder->crtc = crtc;
@@ -385,7 +385,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
return 0;
}
-int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
+int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
int i, ret;
@@ -422,6 +422,10 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
else
vmw_ldu_init(dev_priv, 0);
+ dev_priv->active_display_unit = vmw_du_legacy;
+
+ DRM_INFO("Legacy Display Unit initialized\n");
+
return 0;
err_vblank_cleanup:
@@ -432,7 +436,7 @@ err_free:
return ret;
}
-int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
+int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
@@ -447,3 +451,38 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
return 0;
}
+
+
+int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips, int increment)
+{
+ size_t fifo_size;
+ int i;
+
+ struct {
+ uint32_t header;
+ SVGAFifoCmdUpdate body;
+ } *cmd;
+
+ fifo_size = sizeof(*cmd) * num_clips;
+ cmd = vmw_fifo_reserve(dev_priv, fifo_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ return -ENOMEM;
+ }
+
+ memset(cmd, 0, fifo_size);
+ for (i = 0; i < num_clips; i++, clips += increment) {
+ cmd[i].header = SVGA_CMD_UPDATE;
+ cmd[i].body.x = clips->x1;
+ cmd[i].body.y = clips->y1;
+ cmd[i].body.width = clips->x2 - clips->x1;
+ cmd[i].body.height = clips->y2 - clips->y1;
+ }
+
+ vmw_fifo_commit(dev_priv, fifo_size);
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 04a64b8cd3cd..23db16008e39 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2012-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,7 +31,7 @@
* If we set up the screen target otable, screen objects stop working.
*/
-#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1)
+#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
#ifdef CONFIG_64BIT
#define VMW_PPN_SIZE 8
@@ -67,9 +67,23 @@ struct vmw_mob {
* @size: Size of the table (page-aligned).
* @page_table: Pointer to a struct vmw_mob holding the page table.
*/
-struct vmw_otable {
- unsigned long size;
- struct vmw_mob *page_table;
+static const struct vmw_otable pre_dx_tables[] = {
+ {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
+ {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
+ {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
+ {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
+ {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
+ NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
+};
+
+static const struct vmw_otable dx_tables[] = {
+ {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
+ {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
+ {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
+ {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
+ {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
+ NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
+ {VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
};
static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
@@ -92,6 +106,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
*/
static int vmw_setup_otable_base(struct vmw_private *dev_priv,
SVGAOTableType type,
+ struct ttm_buffer_object *otable_bo,
unsigned long offset,
struct vmw_otable *otable)
{
@@ -106,7 +121,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
BUG_ON(otable->page_table != NULL);
- vsgt = vmw_bo_sg_table(dev_priv->otable_bo);
+ vsgt = vmw_bo_sg_table(otable_bo);
vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
WARN_ON(!vmw_piter_next(&iter));
@@ -142,7 +157,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
cmd->header.size = sizeof(cmd->body);
cmd->body.type = type;
- cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
+ cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
cmd->body.sizeInBytes = otable->size;
cmd->body.validSizeInBytes = 0;
cmd->body.ptDepth = mob->pt_level;
@@ -191,18 +206,19 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for OTable "
"takedown.\n");
- } else {
- memset(cmd, 0, sizeof(*cmd));
- cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.type = type;
- cmd->body.baseAddress = 0;
- cmd->body.sizeInBytes = 0;
- cmd->body.validSizeInBytes = 0;
- cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ return;
}
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.type = type;
+ cmd->body.baseAddress = 0;
+ cmd->body.sizeInBytes = 0;
+ cmd->body.validSizeInBytes = 0;
+ cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
if (bo) {
int ret;
@@ -217,47 +233,21 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
otable->page_table = NULL;
}
-/*
- * vmw_otables_setup - Set up guest backed memory object tables
- *
- * @dev_priv: Pointer to a device private structure
- *
- * Takes care of the device guest backed surface
- * initialization, by setting up the guest backed memory object tables.
- * Returns 0 on success and various error codes on failure. A succesful return
- * means the object tables can be taken down using the vmw_otables_takedown
- * function.
- */
-int vmw_otables_setup(struct vmw_private *dev_priv)
+
+static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
+ struct vmw_otable_batch *batch)
{
unsigned long offset;
unsigned long bo_size;
- struct vmw_otable *otables;
+ struct vmw_otable *otables = batch->otables;
SVGAOTableType i;
int ret;
- otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
- GFP_KERNEL);
- if (unlikely(otables == NULL)) {
- DRM_ERROR("Failed to allocate space for otable "
- "metadata.\n");
- return -ENOMEM;
- }
-
- otables[SVGA_OTABLE_MOB].size =
- VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
- otables[SVGA_OTABLE_SURFACE].size =
- VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
- otables[SVGA_OTABLE_CONTEXT].size =
- VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
- otables[SVGA_OTABLE_SHADER].size =
- VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
- otables[SVGA_OTABLE_SCREEN_TARGET].size =
- VMWGFX_NUM_GB_SCREEN_TARGET *
- SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
-
bo_size = 0;
- for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) {
+ for (i = 0; i < batch->num_otables; ++i) {
+ if (!otables[i].enabled)
+ continue;
+
otables[i].size =
(otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
bo_size += otables[i].size;
@@ -267,63 +257,105 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
ttm_bo_type_device,
&vmw_sys_ne_placement,
0, false, NULL,
- &dev_priv->otable_bo);
+ &batch->otable_bo);
if (unlikely(ret != 0))
goto out_no_bo;
- ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL);
+ ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL);
BUG_ON(ret != 0);
- ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm);
+ ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
if (unlikely(ret != 0))
goto out_unreserve;
- ret = vmw_bo_map_dma(dev_priv->otable_bo);
+ ret = vmw_bo_map_dma(batch->otable_bo);
if (unlikely(ret != 0))
goto out_unreserve;
- ttm_bo_unreserve(dev_priv->otable_bo);
+ ttm_bo_unreserve(batch->otable_bo);
offset = 0;
- for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) {
- ret = vmw_setup_otable_base(dev_priv, i, offset,
+ for (i = 0; i < batch->num_otables; ++i) {
+ if (!batch->otables[i].enabled)
+ continue;
+
+ ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
+ offset,
&otables[i]);
if (unlikely(ret != 0))
goto out_no_setup;
offset += otables[i].size;
}
- dev_priv->otables = otables;
return 0;
out_unreserve:
- ttm_bo_unreserve(dev_priv->otable_bo);
+ ttm_bo_unreserve(batch->otable_bo);
out_no_setup:
- for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
- vmw_takedown_otable_base(dev_priv, i, &otables[i]);
+ for (i = 0; i < batch->num_otables; ++i) {
+ if (batch->otables[i].enabled)
+ vmw_takedown_otable_base(dev_priv, i,
+ &batch->otables[i]);
+ }
- ttm_bo_unref(&dev_priv->otable_bo);
+ ttm_bo_unref(&batch->otable_bo);
out_no_bo:
- kfree(otables);
return ret;
}
-
/*
- * vmw_otables_takedown - Take down guest backed memory object tables
+ * vmw_otables_setup - Set up guest backed memory object tables
*
* @dev_priv: Pointer to a device private structure
*
- * Take down the Guest Memory Object tables.
+ * Takes care of the device guest backed surface
+ * initialization, by setting up the guest backed memory object tables.
+ * Returns 0 on success and various error codes on failure. A successful return
+ * means the object tables can be taken down using the vmw_otables_takedown
+ * function.
*/
-void vmw_otables_takedown(struct vmw_private *dev_priv)
+int vmw_otables_setup(struct vmw_private *dev_priv)
+{
+ struct vmw_otable **otables = &dev_priv->otable_batch.otables;
+ int ret;
+
+ if (dev_priv->has_dx) {
+ *otables = kmalloc(sizeof(dx_tables), GFP_KERNEL);
+ if (*otables == NULL)
+ return -ENOMEM;
+
+ memcpy(*otables, dx_tables, sizeof(dx_tables));
+ dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
+ } else {
+ *otables = kmalloc(sizeof(pre_dx_tables), GFP_KERNEL);
+ if (*otables == NULL)
+ return -ENOMEM;
+
+ memcpy(*otables, pre_dx_tables, sizeof(pre_dx_tables));
+ dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
+ }
+
+ ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
+ if (unlikely(ret != 0))
+ goto out_setup;
+
+ return 0;
+
+out_setup:
+ kfree(*otables);
+ return ret;
+}
+
+static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
+ struct vmw_otable_batch *batch)
{
SVGAOTableType i;
- struct ttm_buffer_object *bo = dev_priv->otable_bo;
+ struct ttm_buffer_object *bo = batch->otable_bo;
int ret;
- for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
- vmw_takedown_otable_base(dev_priv, i,
- &dev_priv->otables[i]);
+ for (i = 0; i < batch->num_otables; ++i)
+ if (batch->otables[i].enabled)
+ vmw_takedown_otable_base(dev_priv, i,
+ &batch->otables[i]);
ret = ttm_bo_reserve(bo, false, true, false, NULL);
BUG_ON(ret != 0);
@@ -331,11 +363,21 @@ void vmw_otables_takedown(struct vmw_private *dev_priv)
vmw_fence_single_bo(bo, NULL);
ttm_bo_unreserve(bo);
- ttm_bo_unref(&dev_priv->otable_bo);
- kfree(dev_priv->otables);
- dev_priv->otables = NULL;
+ ttm_bo_unref(&batch->otable_bo);
}
+/*
+ * vmw_otables_takedown - Take down guest backed memory object tables
+ *
+ * @dev_priv: Pointer to a device private structure
+ *
+ * Take down the Guest Memory Object tables.
+ */
+void vmw_otables_takedown(struct vmw_private *dev_priv)
+{
+ vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
+ kfree(dev_priv->otable_batch.otables);
+}
/*
* vmw_mob_calculate_pt_pages - Calculate the number of page table pages
@@ -409,7 +451,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
goto out_unreserve;
ttm_bo_unreserve(mob->pt_bo);
-
+
return 0;
out_unreserve:
@@ -429,15 +471,15 @@ out_unreserve:
* *@addr according to the page table entry size.
*/
#if (VMW_PPN_SIZE == 8)
-static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
+static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
{
- *((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT);
+ *((u64 *) *addr) = val >> PAGE_SHIFT;
*addr += 2;
}
#else
-static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
+static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
{
- *(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT);
+ *(*addr)++ = val >> PAGE_SHIFT;
}
#endif
@@ -459,7 +501,7 @@ static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
unsigned long pt_page;
- __le32 *addr, *save_addr;
+ u32 *addr, *save_addr;
unsigned long i;
struct page *page;
@@ -574,7 +616,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
vmw_fence_single_bo(bo, NULL);
ttm_bo_unreserve(bo);
}
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
}
/*
@@ -627,7 +669,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
}
- (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_fifo_resource_inc(dev_priv);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
@@ -640,7 +682,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
cmd->header.size = sizeof(cmd->body);
cmd->body.mobid = mob_id;
cmd->body.ptDepth = mob->pt_level;
- cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
+ cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
@@ -648,7 +690,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
return 0;
out_no_cmd_space:
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
if (pt_set_up)
ttm_bo_unref(&mob->pt_bo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 87e39f68e9d0..76069f093ccf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,8 +31,8 @@
#include <drm/ttm/ttm_placement.h>
-#include "svga_overlay.h"
-#include "svga_escape.h"
+#include "device_include/svga_overlay.h"
+#include "device_include/svga_escape.h"
#define VMW_MAX_NUM_STREAMS 1
#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
@@ -100,7 +100,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
{
struct vmw_escape_video_flush *flush;
size_t fifo_size;
- bool have_so = dev_priv->sou_priv ? true : false;
+ bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
int i, num_items;
SVGAGuestPtr ptr;
@@ -231,10 +231,10 @@ static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
if (!pin)
return vmw_dmabuf_unpin(dev_priv, buf, inter);
- if (!dev_priv->sou_priv)
- return vmw_dmabuf_to_vram(dev_priv, buf, true, inter);
+ if (dev_priv->active_display_unit == vmw_du_legacy)
+ return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter);
- return vmw_dmabuf_to_vram_or_gmr(dev_priv, buf, true, inter);
+ return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter);
}
/**
@@ -453,7 +453,7 @@ int vmw_overlay_pause_all(struct vmw_private *dev_priv)
static bool vmw_overlay_available(const struct vmw_private *dev_priv)
{
- return (dev_priv->overlay_priv != NULL &&
+ return (dev_priv->overlay_priv != NULL &&
((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
VMW_OVERLAY_CAP_MASK));
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
index 9d0dd3a342eb..dce798053a96 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -39,19 +39,17 @@
#define VMWGFX_IRQSTATUS_PORT 0x8
struct svga_guest_mem_descriptor {
- __le32 ppn;
- __le32 num_pages;
+ u32 ppn;
+ u32 num_pages;
};
struct svga_fifo_cmd_fence {
- __le32 fence;
+ u32 fence;
};
#define SVGA_SYNC_GENERIC 1
#define SVGA_SYNC_FIFOFULL 2
-#include "svga_types.h"
-
-#include "svga3d_reg.h"
+#include "device_include/svga3d_reg.h"
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 210ef15b1d09..c1912f852b42 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,6 +31,7 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/drmP.h>
#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
#define VMW_RES_EVICT_ERR_COUNT 10
@@ -121,6 +122,7 @@ static void vmw_resource_release(struct kref *kref)
int id;
struct idr *idr = &dev_priv->res_idr[res->func->res_type];
+ write_lock(&dev_priv->resource_lock);
res->avail = false;
list_del_init(&res->lru_head);
write_unlock(&dev_priv->resource_lock);
@@ -143,10 +145,10 @@ static void vmw_resource_release(struct kref *kref)
}
if (likely(res->hw_destroy != NULL)) {
- res->hw_destroy(res);
mutex_lock(&dev_priv->binding_mutex);
- vmw_context_binding_res_list_kill(&res->binding_head);
+ vmw_binding_res_list_kill(&res->binding_head);
mutex_unlock(&dev_priv->binding_mutex);
+ res->hw_destroy(res);
}
id = res->id;
@@ -156,20 +158,17 @@ static void vmw_resource_release(struct kref *kref)
kfree(res);
write_lock(&dev_priv->resource_lock);
-
if (id != -1)
idr_remove(idr, id);
+ write_unlock(&dev_priv->resource_lock);
}
void vmw_resource_unreference(struct vmw_resource **p_res)
{
struct vmw_resource *res = *p_res;
- struct vmw_private *dev_priv = res->dev_priv;
*p_res = NULL;
- write_lock(&dev_priv->resource_lock);
kref_put(&res->kref, vmw_resource_release);
- write_unlock(&dev_priv->resource_lock);
}
@@ -260,17 +259,16 @@ void vmw_resource_activate(struct vmw_resource *res,
write_unlock(&dev_priv->resource_lock);
}
-struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
- struct idr *idr, int id)
+static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
+ struct idr *idr, int id)
{
struct vmw_resource *res;
read_lock(&dev_priv->resource_lock);
res = idr_find(idr, id);
- if (res && res->avail)
- kref_get(&res->kref);
- else
+ if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
res = NULL;
+
read_unlock(&dev_priv->resource_lock);
if (unlikely(res == NULL))
@@ -900,20 +898,21 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_stream_size,
false, true);
+ ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for stream"
" creation.\n");
- goto out_unlock;
- }
+ goto out_ret;
+ }
stream = kmalloc(sizeof(*stream), GFP_KERNEL);
if (unlikely(stream == NULL)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_stream_size);
ret = -ENOMEM;
- goto out_unlock;
+ goto out_ret;
}
res = &stream->stream.res;
@@ -926,7 +925,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
if (unlikely(ret != 0))
- goto out_unlock;
+ goto out_ret;
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
@@ -940,8 +939,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
arg->stream_id = res->id;
out_err:
vmw_resource_unreference(&res);
-out_unlock:
- ttm_read_unlock(&dev_priv->reservation_sem);
+out_ret:
return ret;
}
@@ -1152,14 +1150,16 @@ out_bind_failed:
* command submission.
*
* @res: Pointer to the struct vmw_resource to unreserve.
+ * @switch_backup: Backup buffer has been switched.
* @new_backup: Pointer to new backup buffer if command submission
- * switched.
- * @new_backup_offset: New backup offset if @new_backup is !NULL.
+ * switched. May be NULL.
+ * @new_backup_offset: New backup offset if @switch_backup is true.
*
* Currently unreserving a resource means putting it back on the device's
* resource lru list, so that it can be evicted if necessary.
*/
void vmw_resource_unreserve(struct vmw_resource *res,
+ bool switch_backup,
struct vmw_dma_buffer *new_backup,
unsigned long new_backup_offset)
{
@@ -1168,22 +1168,25 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (!list_empty(&res->lru_head))
return;
- if (new_backup && new_backup != res->backup) {
-
+ if (switch_backup && new_backup != res->backup) {
if (res->backup) {
lockdep_assert_held(&res->backup->base.resv->lock.base);
list_del_init(&res->mob_head);
vmw_dmabuf_unreference(&res->backup);
}
- res->backup = vmw_dmabuf_reference(new_backup);
- lockdep_assert_held(&new_backup->base.resv->lock.base);
- list_add_tail(&res->mob_head, &new_backup->res_list);
+ if (new_backup) {
+ res->backup = vmw_dmabuf_reference(new_backup);
+ lockdep_assert_held(&new_backup->base.resv->lock.base);
+ list_add_tail(&res->mob_head, &new_backup->res_list);
+ } else {
+ res->backup = NULL;
+ }
}
- if (new_backup)
+ if (switch_backup)
res->backup_offset = new_backup_offset;
- if (!res->func->may_evict || res->id == -1)
+ if (!res->func->may_evict || res->id == -1 || res->pin_count)
return;
write_lock(&dev_priv->resource_lock);
@@ -1259,7 +1262,8 @@ out_no_reserve:
* the buffer may not be bound to the resource at this point.
*
*/
-int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
+int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
+ bool no_backup)
{
struct vmw_private *dev_priv = res->dev_priv;
int ret;
@@ -1270,9 +1274,13 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
if (res->func->needs_backup && res->backup == NULL &&
!no_backup) {
- ret = vmw_resource_buf_alloc(res, true);
- if (unlikely(ret != 0))
+ ret = vmw_resource_buf_alloc(res, interruptible);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a backup buffer "
+ "of size %lu. bytes\n",
+ (unsigned long) res->backup_size);
return ret;
+ }
}
return 0;
@@ -1305,7 +1313,7 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
* @res: The resource to evict.
* @interruptible: Whether to wait interruptible.
*/
-int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
+static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
{
struct ttm_validate_buffer val_buf;
const struct vmw_res_func *func = res->func;
@@ -1356,7 +1364,7 @@ int vmw_resource_validate(struct vmw_resource *res)
struct ttm_validate_buffer val_buf;
unsigned err_count = 0;
- if (likely(!res->func->may_evict))
+ if (!res->func->create)
return 0;
val_buf.bo = NULL;
@@ -1443,9 +1451,9 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
/**
* vmw_resource_move_notify - TTM move_notify_callback
*
- * @bo: The TTM buffer object about to move.
- * @mem: The truct ttm_mem_reg indicating to what memory
- * region the move is taking place.
+ * @bo: The TTM buffer object about to move.
+ * @mem: The struct ttm_mem_reg indicating to what memory
+ * region the move is taking place.
*
* Evicts the Guest Backed hardware resource if the backup
* buffer is being moved out of MOB memory.
@@ -1495,6 +1503,101 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
}
}
+
+
+/**
+ * vmw_query_readback_all - Read back cached query states
+ *
+ * @dx_query_mob: Buffer containing the DX query MOB
+ *
+ * Read back cached states from the device if they exist. This function
+ * assumings binding_mutex is held.
+ */
+int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
+{
+ struct vmw_resource *dx_query_ctx;
+ struct vmw_private *dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXReadbackAllQuery body;
+ } *cmd;
+
+
+ /* No query bound, so do nothing */
+ if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
+ return 0;
+
+ dx_query_ctx = dx_query_mob->dx_query_ctx;
+ dev_priv = dx_query_ctx->dev_priv;
+
+ cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for "
+ "query MOB read back.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = dx_query_ctx->id;
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ /* Triggers a rebind the next time affected context is bound */
+ dx_query_mob->dx_query_ctx = NULL;
+
+ return 0;
+}
+
+
+
+/**
+ * vmw_query_move_notify - Read back cached query states
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The memory region @bo is moving to.
+ *
+ * Called before the query MOB is swapped out to read back cached query
+ * states from the device.
+ */
+void vmw_query_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem)
+{
+ struct vmw_dma_buffer *dx_query_mob;
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct vmw_private *dev_priv;
+
+
+ dev_priv = container_of(bdev, struct vmw_private, bdev);
+
+ mutex_lock(&dev_priv->binding_mutex);
+
+ dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
+ if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
+ mutex_unlock(&dev_priv->binding_mutex);
+ return;
+ }
+
+ /* If BO is being moved from MOB to system memory */
+ if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
+ struct vmw_fence_obj *fence;
+
+ (void) vmw_query_readback_all(dx_query_mob);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ /* Create a fence and attach the BO to it */
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ vmw_fence_single_bo(bo, fence);
+
+ if (fence != NULL)
+ vmw_fence_obj_unreference(&fence);
+
+ (void) ttm_bo_wait(bo, false, false, false);
+ } else
+ mutex_unlock(&dev_priv->binding_mutex);
+
+}
+
/**
* vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
*
@@ -1573,3 +1676,107 @@ void vmw_resource_evict_all(struct vmw_private *dev_priv)
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
+
+/**
+ * vmw_resource_pin - Add a pin reference on a resource
+ *
+ * @res: The resource to add a pin reference on
+ *
+ * This function adds a pin reference, and if needed validates the resource.
+ * Having a pin reference means that the resource can never be evicted, and
+ * its id will never change as long as there is a pin reference.
+ * This function returns 0 on success and a negative error code on failure.
+ */
+int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ int ret;
+
+ ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+ ret = vmw_resource_reserve(res, interruptible, false);
+ if (ret)
+ goto out_no_reserve;
+
+ if (res->pin_count == 0) {
+ struct vmw_dma_buffer *vbo = NULL;
+
+ if (res->backup) {
+ vbo = res->backup;
+
+ ttm_bo_reserve(&vbo->base, interruptible, false, false,
+ NULL);
+ if (!vbo->pin_count) {
+ ret = ttm_bo_validate
+ (&vbo->base,
+ res->func->backup_placement,
+ interruptible, false);
+ if (ret) {
+ ttm_bo_unreserve(&vbo->base);
+ goto out_no_validate;
+ }
+ }
+
+ /* Do we really need to pin the MOB as well? */
+ vmw_bo_pin_reserved(vbo, true);
+ }
+ ret = vmw_resource_validate(res);
+ if (vbo)
+ ttm_bo_unreserve(&vbo->base);
+ if (ret)
+ goto out_no_validate;
+ }
+ res->pin_count++;
+
+out_no_validate:
+ vmw_resource_unreserve(res, false, NULL, 0UL);
+out_no_reserve:
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+ ttm_write_unlock(&dev_priv->reservation_sem);
+
+ return ret;
+}
+
+/**
+ * vmw_resource_unpin - Remove a pin reference from a resource
+ *
+ * @res: The resource to remove a pin reference from
+ *
+ * Having a pin reference means that the resource can never be evicted, and
+ * its id will never change as long as there is a pin reference.
+ */
+void vmw_resource_unpin(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ int ret;
+
+ ttm_read_lock(&dev_priv->reservation_sem, false);
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+
+ ret = vmw_resource_reserve(res, false, true);
+ WARN_ON(ret);
+
+ WARN_ON(res->pin_count == 0);
+ if (--res->pin_count == 0 && res->backup) {
+ struct vmw_dma_buffer *vbo = res->backup;
+
+ ttm_bo_reserve(&vbo->base, false, false, false, NULL);
+ vmw_bo_pin_reserved(vbo, false);
+ ttm_bo_unreserve(&vbo->base);
+ }
+
+ vmw_resource_unreserve(res, false, NULL, 0UL);
+
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+ ttm_read_unlock(&dev_priv->reservation_sem);
+}
+
+/**
+ * vmw_res_type - Return the resource type
+ *
+ * @res: Pointer to the resource
+ */
+enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
+{
+ return res->func->res_type;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
index f3adeed2854c..5994ef6265e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2012-2014 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -30,6 +30,12 @@
#include "vmwgfx_drv.h"
+enum vmw_cmdbuf_res_state {
+ VMW_CMDBUF_RES_COMMITTED,
+ VMW_CMDBUF_RES_ADD,
+ VMW_CMDBUF_RES_DEL
+};
+
/**
* struct vmw_user_resource_conv - Identify a derived user-exported resource
* type and provide a function to convert its ttm_base_object pointer to
@@ -55,8 +61,10 @@ struct vmw_user_resource_conv {
* @bind: Bind a hardware resource to persistent buffer storage.
* @unbind: Unbind a hardware resource from persistent
* buffer storage.
+ * @commit_notify: If the resource is a command buffer managed resource,
+ * callback to notify that a define or remove command
+ * has been committed to the device.
*/
-
struct vmw_res_func {
enum vmw_res_type res_type;
bool needs_backup;
@@ -71,6 +79,8 @@ struct vmw_res_func {
int (*unbind) (struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
+ void (*commit_notify)(struct vmw_resource *res,
+ enum vmw_cmdbuf_res_state state);
};
int vmw_resource_alloc_id(struct vmw_resource *res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 7dc591d04d9a..b96d1ab610c5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -36,10 +36,55 @@
#define vmw_connector_to_sou(x) \
container_of(x, struct vmw_screen_object_unit, base.connector)
+/**
+ * struct vmw_kms_sou_surface_dirty - Closure structure for
+ * blit surface to screen command.
+ * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
+ * @left: Left side of bounding box.
+ * @right: Right side of bounding box.
+ * @top: Top side of bounding box.
+ * @bottom: Bottom side of bounding box.
+ * @dst_x: Difference between source clip rects and framebuffer coordinates.
+ * @dst_y: Difference between source clip rects and framebuffer coordinates.
+ * @sid: Surface id of surface to copy from.
+ */
+struct vmw_kms_sou_surface_dirty {
+ struct vmw_kms_dirty base;
+ s32 left, right, top, bottom;
+ s32 dst_x, dst_y;
+ u32 sid;
+};
+
+/*
+ * SVGA commands that are used by this code. Please see the device headers
+ * for explanation.
+ */
+struct vmw_kms_sou_readback_blit {
+ uint32 header;
+ SVGAFifoCmdBlitScreenToGMRFB body;
+};
+
+struct vmw_kms_sou_dmabuf_blit {
+ uint32 header;
+ SVGAFifoCmdBlitGMRFBToScreen body;
+};
+
+struct vmw_kms_sou_dirty_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBlitSurfaceToScreen body;
+};
+
+
+/*
+ * Other structs.
+ */
+
struct vmw_screen_object_display {
unsigned num_implicit;
struct vmw_framebuffer *implicit_fb;
+ SVGAFifoCmdDefineGMRFB cur;
+ struct vmw_dma_buffer *pinned_gmrfb;
};
/**
@@ -57,7 +102,7 @@ struct vmw_screen_object_unit {
static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
{
- vmw_display_unit_cleanup(&sou->base);
+ vmw_du_cleanup(&sou->base);
kfree(sou);
}
@@ -72,7 +117,7 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
}
static void vmw_sou_del_active(struct vmw_private *vmw_priv,
- struct vmw_screen_object_unit *sou)
+ struct vmw_screen_object_unit *sou)
{
struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
@@ -84,8 +129,8 @@ static void vmw_sou_del_active(struct vmw_private *vmw_priv,
}
static void vmw_sou_add_active(struct vmw_private *vmw_priv,
- struct vmw_screen_object_unit *sou,
- struct vmw_framebuffer *vfb)
+ struct vmw_screen_object_unit *sou,
+ struct vmw_framebuffer *vfb)
{
struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
@@ -202,14 +247,7 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
static void vmw_sou_backing_free(struct vmw_private *dev_priv,
struct vmw_screen_object_unit *sou)
{
- struct ttm_buffer_object *bo;
-
- if (unlikely(sou->buffer == NULL))
- return;
-
- bo = &sou->buffer->base;
- ttm_bo_unref(&bo);
- sou->buffer = NULL;
+ vmw_dmabuf_unreference(&sou->buffer);
sou->buffer_size = 0;
}
@@ -274,13 +312,13 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
dev_priv = vmw_priv(crtc->dev);
if (set->num_connectors > 1) {
- DRM_ERROR("to many connectors\n");
+ DRM_ERROR("Too many connectors\n");
return -EINVAL;
}
if (set->num_connectors == 1 &&
set->connectors[0] != &sou->base.connector) {
- DRM_ERROR("connector doesn't match %p %p\n",
+ DRM_ERROR("Connector doesn't match %p %p\n",
set->connectors[0], &sou->base.connector);
return -EINVAL;
}
@@ -331,7 +369,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
return -EINVAL;
}
- vmw_fb_off(dev_priv);
+ vmw_svga_enable(dev_priv);
if (mode->hdisplay != crtc->mode.hdisplay ||
mode->vdisplay != crtc->mode.vdisplay) {
@@ -390,6 +428,108 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
return 0;
}
+/**
+ * Returns if this unit can be page flipped.
+ * Must be called with the mode_config mutex held.
+ */
+static bool vmw_sou_screen_object_flippable(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc)
+{
+ struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+
+ if (!sou->base.is_implicit)
+ return true;
+
+ if (dev_priv->sou_priv->num_implicit != 1)
+ return false;
+
+ return true;
+}
+
+/**
+ * Update the implicit fb to the current fb of this crtc.
+ * Must be called with the mode_config mutex held.
+ */
+static void vmw_sou_update_implicit_fb(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc)
+{
+ struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+
+ BUG_ON(!sou->base.is_implicit);
+
+ dev_priv->sou_priv->implicit_fb =
+ vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
+}
+
+static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags)
+{
+ struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
+ struct vmw_fence_obj *fence = NULL;
+ struct drm_clip_rect clips;
+ int ret;
+
+ /* require ScreenObject support for page flipping */
+ if (!dev_priv->sou_priv)
+ return -ENOSYS;
+
+ if (!vmw_sou_screen_object_flippable(dev_priv, crtc))
+ return -EINVAL;
+
+ crtc->primary->fb = fb;
+
+ /* do a full screen dirty update */
+ clips.x1 = clips.y1 = 0;
+ clips.x2 = fb->width;
+ clips.y2 = fb->height;
+
+ if (vfb->dmabuf)
+ ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
+ &clips, 1, 1,
+ true, &fence);
+ else
+ ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
+ &clips, NULL, NULL,
+ 0, 0, 1, 1, &fence);
+
+
+ if (ret != 0)
+ goto out_no_fence;
+ if (!fence) {
+ ret = -EINVAL;
+ goto out_no_fence;
+ }
+
+ if (event) {
+ struct drm_file *file_priv = event->base.file_priv;
+
+ ret = vmw_event_fence_action_queue(file_priv, fence,
+ &event->base,
+ &event->event.tv_sec,
+ &event->event.tv_usec,
+ true);
+ }
+
+ /*
+ * No need to hold on to this now. The only cleanup
+ * we need to do if we fail is unref the fence.
+ */
+ vmw_fence_obj_unreference(&fence);
+
+ if (vmw_crtc_to_du(crtc)->is_implicit)
+ vmw_sou_update_implicit_fb(dev_priv, crtc);
+
+ return ret;
+
+out_no_fence:
+ crtc->primary->fb = old_fb;
+ return ret;
+}
+
static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.save = vmw_du_crtc_save,
.restore = vmw_du_crtc_restore,
@@ -398,7 +538,7 @@ static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_sou_crtc_destroy,
.set_config = vmw_sou_crtc_set_config,
- .page_flip = vmw_du_page_flip,
+ .page_flip = vmw_sou_crtc_page_flip,
};
/*
@@ -423,7 +563,7 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector)
vmw_sou_destroy(vmw_connector_to_sou(connector));
}
-static struct drm_connector_funcs vmw_legacy_connector_funcs = {
+static struct drm_connector_funcs vmw_sou_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.save = vmw_du_connector_save,
.restore = vmw_du_connector_restore,
@@ -458,7 +598,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
sou->base.pref_mode = NULL;
sou->base.is_implicit = true;
- drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
+ drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
connector->status = vmw_du_connector_detect(connector, true);
@@ -481,7 +621,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
return 0;
}
-int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
+int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
int i, ret;
@@ -516,7 +656,9 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
vmw_sou_init(dev_priv, i);
- DRM_INFO("Screen objects system initialized\n");
+ dev_priv->active_display_unit = vmw_du_screen_object;
+
+ DRM_INFO("Screen Objects Display Unit initialized\n");
return 0;
@@ -529,7 +671,7 @@ err_no_mem:
return ret;
}
-int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
+int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
@@ -543,35 +685,369 @@ int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
return 0;
}
+static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer)
+{
+ struct vmw_dma_buffer *buf =
+ container_of(framebuffer, struct vmw_framebuffer_dmabuf,
+ base)->buffer;
+ int depth = framebuffer->base.depth;
+ struct {
+ uint32_t header;
+ SVGAFifoCmdDefineGMRFB body;
+ } *cmd;
+
+ /* Emulate RGBA support, contrary to svga_reg.h this is not
+ * supported by hosts. This is only a problem if we are reading
+ * this value later and expecting what we uploaded back.
+ */
+ if (depth == 32)
+ depth = 24;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (!cmd) {
+ DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header = SVGA_CMD_DEFINE_GMRFB;
+ cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
+ cmd->body.format.colorDepth = depth;
+ cmd->body.format.reserved = 0;
+ cmd->body.bytesPerLine = framebuffer->base.pitches[0];
+ /* Buffer is reserved in vram or GMR */
+ vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
/**
- * Returns if this unit can be page flipped.
- * Must be called with the mode_config mutex held.
+ * vmw_sou_surface_fifo_commit - Callback to fill in and submit a
+ * blit surface to screen command.
+ *
+ * @dirty: The closure structure.
+ *
+ * Fills in the missing fields in the command, and translates the cliprects
+ * to match the destination bounding box encoded.
*/
-bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
- struct drm_crtc *crtc)
+static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
{
- struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+ struct vmw_kms_sou_surface_dirty *sdirty =
+ container_of(dirty, typeof(*sdirty), base);
+ struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
+ s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x;
+ s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y;
+ size_t region_size = dirty->num_hits * sizeof(SVGASignedRect);
+ SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
+ int i;
+
+ cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
+ cmd->header.size = sizeof(cmd->body) + region_size;
+
+ /*
+ * Use the destination bounding box to specify destination - and
+ * source bounding regions.
+ */
+ cmd->body.destRect.left = sdirty->left;
+ cmd->body.destRect.right = sdirty->right;
+ cmd->body.destRect.top = sdirty->top;
+ cmd->body.destRect.bottom = sdirty->bottom;
+
+ cmd->body.srcRect.left = sdirty->left + trans_x;
+ cmd->body.srcRect.right = sdirty->right + trans_x;
+ cmd->body.srcRect.top = sdirty->top + trans_y;
+ cmd->body.srcRect.bottom = sdirty->bottom + trans_y;
+
+ cmd->body.srcImage.sid = sdirty->sid;
+ cmd->body.destScreenId = dirty->unit->unit;
+
+ /* Blits are relative to the destination rect. Translate. */
+ for (i = 0; i < dirty->num_hits; ++i, ++blit) {
+ blit->left -= sdirty->left;
+ blit->right -= sdirty->left;
+ blit->top -= sdirty->top;
+ blit->bottom -= sdirty->top;
+ }
- if (!sou->base.is_implicit)
- return true;
+ vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
- if (dev_priv->sou_priv->num_implicit != 1)
- return false;
+ sdirty->left = sdirty->top = S32_MAX;
+ sdirty->right = sdirty->bottom = S32_MIN;
+}
- return true;
+/**
+ * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect.
+ *
+ * @dirty: The closure structure
+ *
+ * Encodes a SVGASignedRect cliprect and updates the bounding box of the
+ * BLIT_SURFACE_TO_SCREEN command.
+ */
+static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
+{
+ struct vmw_kms_sou_surface_dirty *sdirty =
+ container_of(dirty, typeof(*sdirty), base);
+ struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
+ SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
+
+ /* Destination rect. */
+ blit += dirty->num_hits;
+ blit->left = dirty->unit_x1;
+ blit->top = dirty->unit_y1;
+ blit->right = dirty->unit_x2;
+ blit->bottom = dirty->unit_y2;
+
+ /* Destination bounding box */
+ sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
+ sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
+ sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
+ sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
+
+ dirty->num_hits++;
}
/**
- * Update the implicit fb to the current fb of this crtc.
- * Must be called with the mode_config mutex held.
+ * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @framebuffer: Pointer to the surface-buffer backed framebuffer.
+ * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
+ * @srf: Pointer to surface to blit from. If NULL, the surface attached
+ * to @framebuffer will be used.
+ * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
+ * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
+ * @num_clips: Number of clip rects in @clips.
+ * @inc: Increment to use when looping over @clips.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to a
+ * struct vmw_fence_obj. The returned fence pointer may be NULL in which
+ * case the device has already synchronized.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
*/
-void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
- struct drm_crtc *crtc)
+int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ struct vmw_resource *srf,
+ s32 dest_x,
+ s32 dest_y,
+ unsigned num_clips, int inc,
+ struct vmw_fence_obj **out_fence)
{
- struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+ struct vmw_framebuffer_surface *vfbs =
+ container_of(framebuffer, typeof(*vfbs), base);
+ struct vmw_kms_sou_surface_dirty sdirty;
+ int ret;
- BUG_ON(!sou->base.is_implicit);
+ if (!srf)
+ srf = &vfbs->surface->res;
- dev_priv->sou_priv->implicit_fb =
- vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
+ ret = vmw_kms_helper_resource_prepare(srf, true);
+ if (ret)
+ return ret;
+
+ sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
+ sdirty.base.clip = vmw_sou_surface_clip;
+ sdirty.base.dev_priv = dev_priv;
+ sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
+ sizeof(SVGASignedRect) * num_clips;
+
+ sdirty.sid = srf->id;
+ sdirty.left = sdirty.top = S32_MAX;
+ sdirty.right = sdirty.bottom = S32_MIN;
+ sdirty.dst_x = dest_x;
+ sdirty.dst_y = dest_y;
+
+ ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
+ dest_x, dest_y, num_clips, inc,
+ &sdirty.base);
+ vmw_kms_helper_resource_finish(srf, out_fence);
+
+ return ret;
+}
+
+/**
+ * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
+ *
+ * @dirty: The closure structure.
+ *
+ * Commits a previously built command buffer of readback clips.
+ */
+static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+ vmw_fifo_commit(dirty->dev_priv,
+ sizeof(struct vmw_kms_sou_dmabuf_blit) *
+ dirty->num_hits);
+}
+
+/**
+ * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
+ *
+ * @dirty: The closure structure
+ *
+ * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
+ */
+static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
+{
+ struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
+
+ blit += dirty->num_hits;
+ blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
+ blit->body.destScreenId = dirty->unit->unit;
+ blit->body.srcOrigin.x = dirty->fb_x;
+ blit->body.srcOrigin.y = dirty->fb_y;
+ blit->body.destRect.left = dirty->unit_x1;
+ blit->body.destRect.top = dirty->unit_y1;
+ blit->body.destRect.right = dirty->unit_x2;
+ blit->body.destRect.bottom = dirty->unit_y2;
+ dirty->num_hits++;
+}
+
+/**
+ * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @framebuffer: Pointer to the dma-buffer backed framebuffer.
+ * @clips: Array of clip rects.
+ * @num_clips: Number of clip rects in @clips.
+ * @increment: Increment to use when looping over @clips.
+ * @interruptible: Whether to perform waits interruptible if possible.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to a
+ * struct vmw_fence_obj. The returned fence pointer may be NULL in which
+ * case the device has already synchronized.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ struct drm_clip_rect *clips,
+ unsigned num_clips, int increment,
+ bool interruptible,
+ struct vmw_fence_obj **out_fence)
+{
+ struct vmw_dma_buffer *buf =
+ container_of(framebuffer, struct vmw_framebuffer_dmabuf,
+ base)->buffer;
+ struct vmw_kms_dirty dirty;
+ int ret;
+
+ ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
+ false);
+ if (ret)
+ return ret;
+
+ ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
+ if (unlikely(ret != 0))
+ goto out_revert;
+
+ dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
+ dirty.clip = vmw_sou_dmabuf_clip;
+ dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
+ num_clips;
+ ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, NULL,
+ 0, 0, num_clips, increment, &dirty);
+ vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
+
+ return ret;
+
+out_revert:
+ vmw_kms_helper_buffer_revert(buf);
+
+ return ret;
+}
+
+
+/**
+ * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips.
+ *
+ * @dirty: The closure structure.
+ *
+ * Commits a previously built command buffer of readback clips.
+ */
+static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+ vmw_fifo_commit(dirty->dev_priv,
+ sizeof(struct vmw_kms_sou_readback_blit) *
+ dirty->num_hits);
+}
+
+/**
+ * vmw_sou_readback_clip - Callback to encode a readback cliprect.
+ *
+ * @dirty: The closure structure
+ *
+ * Encodes a BLIT_SCREEN_TO_GMRFB cliprect.
+ */
+static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
+{
+ struct vmw_kms_sou_readback_blit *blit = dirty->cmd;
+
+ blit += dirty->num_hits;
+ blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
+ blit->body.srcScreenId = dirty->unit->unit;
+ blit->body.destOrigin.x = dirty->fb_x;
+ blit->body.destOrigin.y = dirty->fb_y;
+ blit->body.srcRect.left = dirty->unit_x1;
+ blit->body.srcRect.top = dirty->unit_y1;
+ blit->body.srcRect.right = dirty->unit_x2;
+ blit->body.srcRect.bottom = dirty->unit_y2;
+ dirty->num_hits++;
+}
+
+/**
+ * vmw_kms_sou_readback - Perform a readback from the screen object system to
+ * a dma-buffer backed framebuffer.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * Must be set to NULL if @user_fence_rep is NULL.
+ * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @user_fence_rep: User-space provided structure for fence information.
+ * Must be set to non-NULL if @file_priv is non-NULL.
+ * @vclips: Array of clip rects.
+ * @num_clips: Number of clip rects in @vclips.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_sou_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips)
+{
+ struct vmw_dma_buffer *buf =
+ container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
+ struct vmw_kms_dirty dirty;
+ int ret;
+
+ ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false);
+ if (ret)
+ return ret;
+
+ ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
+ if (unlikely(ret != 0))
+ goto out_revert;
+
+ dirty.fifo_commit = vmw_sou_readback_fifo_commit;
+ dirty.clip = vmw_sou_readback_clip;
+ dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
+ num_clips;
+ ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
+ 0, 0, num_clips, 1, &dirty);
+ vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
+ user_fence_rep);
+
+ return ret;
+
+out_revert:
+ vmw_kms_helper_buffer_revert(buf);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 6a4584a43aa6..bba1ee395478 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,12 +27,15 @@
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
#include "ttm/ttm_placement.h"
struct vmw_shader {
struct vmw_resource res;
SVGA3dShaderType type;
uint32_t size;
+ uint8_t num_input_sig;
+ uint8_t num_output_sig;
};
struct vmw_user_shader {
@@ -40,8 +43,18 @@ struct vmw_user_shader {
struct vmw_shader shader;
};
+struct vmw_dx_shader {
+ struct vmw_resource res;
+ struct vmw_resource *ctx;
+ struct vmw_resource *cotable;
+ u32 id;
+ bool committed;
+ struct list_head cotable_head;
+};
+
static uint64_t vmw_user_shader_size;
static uint64_t vmw_shader_size;
+static size_t vmw_shader_dx_size;
static void vmw_user_shader_free(struct vmw_resource *res);
static struct vmw_resource *
@@ -55,6 +68,18 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_shader_destroy(struct vmw_resource *res);
+static int vmw_dx_shader_create(struct vmw_resource *res);
+static int vmw_dx_shader_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_dx_shader_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
+ enum vmw_cmdbuf_res_state state);
+static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type);
+static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type);
+static uint64_t vmw_user_shader_size;
+
static const struct vmw_user_resource_conv user_shader_conv = {
.object_type = VMW_RES_SHADER,
.base_obj_to_res = vmw_user_shader_base_to_res,
@@ -77,6 +102,24 @@ static const struct vmw_res_func vmw_gb_shader_func = {
.unbind = vmw_gb_shader_unbind
};
+static const struct vmw_res_func vmw_dx_shader_func = {
+ .res_type = vmw_res_shader,
+ .needs_backup = true,
+ .may_evict = false,
+ .type_name = "dx shaders",
+ .backup_placement = &vmw_mob_placement,
+ .create = vmw_dx_shader_create,
+ /*
+ * The destroy callback is only called with a committed resource on
+ * context destroy, in which case we destroy the cotable anyway,
+ * so there's no need to destroy DX shaders separately.
+ */
+ .destroy = NULL,
+ .bind = vmw_dx_shader_bind,
+ .unbind = vmw_dx_shader_unbind,
+ .commit_notify = vmw_dx_shader_commit_notify,
+};
+
/**
* Shader management:
*/
@@ -87,25 +130,42 @@ vmw_res_to_shader(struct vmw_resource *res)
return container_of(res, struct vmw_shader, res);
}
+/**
+ * vmw_res_to_dx_shader - typecast a struct vmw_resource to a
+ * struct vmw_dx_shader
+ *
+ * @res: Pointer to the struct vmw_resource.
+ */
+static inline struct vmw_dx_shader *
+vmw_res_to_dx_shader(struct vmw_resource *res)
+{
+ return container_of(res, struct vmw_dx_shader, res);
+}
+
static void vmw_hw_shader_destroy(struct vmw_resource *res)
{
- (void) vmw_gb_shader_destroy(res);
+ if (likely(res->func->destroy))
+ (void) res->func->destroy(res);
+ else
+ res->id = -1;
}
+
static int vmw_gb_shader_init(struct vmw_private *dev_priv,
struct vmw_resource *res,
uint32_t size,
uint64_t offset,
SVGA3dShaderType type,
+ uint8_t num_input_sig,
+ uint8_t num_output_sig,
struct vmw_dma_buffer *byte_code,
void (*res_free) (struct vmw_resource *res))
{
struct vmw_shader *shader = vmw_res_to_shader(res);
int ret;
- ret = vmw_resource_init(dev_priv, res, true,
- res_free, &vmw_gb_shader_func);
-
+ ret = vmw_resource_init(dev_priv, res, true, res_free,
+ &vmw_gb_shader_func);
if (unlikely(ret != 0)) {
if (res_free)
@@ -122,11 +182,17 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
}
shader->size = size;
shader->type = type;
+ shader->num_input_sig = num_input_sig;
+ shader->num_output_sig = num_output_sig;
vmw_resource_activate(res, vmw_hw_shader_destroy);
return 0;
}
+/*
+ * GB shader code:
+ */
+
static int vmw_gb_shader_create(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
@@ -165,7 +231,7 @@ static int vmw_gb_shader_create(struct vmw_resource *res)
cmd->body.type = shader->type;
cmd->body.sizeInBytes = shader->size;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
- (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_fifo_resource_inc(dev_priv);
return 0;
@@ -259,7 +325,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
return 0;
mutex_lock(&dev_priv->binding_mutex);
- vmw_context_binding_res_list_scrub(&res->binding_head);
+ vmw_binding_res_list_scrub(&res->binding_head);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
@@ -275,12 +341,327 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
vmw_fifo_commit(dev_priv, sizeof(*cmd));
mutex_unlock(&dev_priv->binding_mutex);
vmw_resource_release_id(res);
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
+
+ return 0;
+}
+
+/*
+ * DX shader code:
+ */
+
+/**
+ * vmw_dx_shader_commit_notify - Notify that a shader operation has been
+ * committed to hardware from a user-supplied command stream.
+ *
+ * @res: Pointer to the shader resource.
+ * @state: Indicating whether a creation or removal has been committed.
+ *
+ */
+static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
+ enum vmw_cmdbuf_res_state state)
+{
+ struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ if (state == VMW_CMDBUF_RES_ADD) {
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_cotable_add_resource(shader->cotable,
+ &shader->cotable_head);
+ shader->committed = true;
+ res->id = shader->id;
+ mutex_unlock(&dev_priv->binding_mutex);
+ } else {
+ mutex_lock(&dev_priv->binding_mutex);
+ list_del_init(&shader->cotable_head);
+ shader->committed = false;
+ res->id = -1;
+ mutex_unlock(&dev_priv->binding_mutex);
+ }
+}
+
+/**
+ * vmw_dx_shader_unscrub - Have the device reattach a MOB to a DX shader.
+ *
+ * @res: The shader resource
+ *
+ * This function reverts a scrub operation.
+ */
+static int vmw_dx_shader_unscrub(struct vmw_resource *res)
+{
+ struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindShader body;
+ } *cmd;
+
+ if (!list_empty(&shader->cotable_head) || !shader->committed)
+ return 0;
+
+ cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
+ shader->ctx->id);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "scrubbing.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = shader->ctx->id;
+ cmd->body.shid = shader->id;
+ cmd->body.mobid = res->backup->base.mem.start;
+ cmd->body.offsetInBytes = res->backup_offset;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ vmw_cotable_add_resource(shader->cotable, &shader->cotable_head);
+
+ return 0;
+}
+
+/**
+ * vmw_dx_shader_create - The DX shader create callback
+ *
+ * @res: The DX shader resource
+ *
+ * The create callback is called as part of resource validation and
+ * makes sure that we unscrub the shader if it's previously been scrubbed.
+ */
+static int vmw_dx_shader_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+ int ret = 0;
+
+ WARN_ON_ONCE(!shader->committed);
+
+ if (!list_empty(&res->mob_head)) {
+ mutex_lock(&dev_priv->binding_mutex);
+ ret = vmw_dx_shader_unscrub(res);
+ mutex_unlock(&dev_priv->binding_mutex);
+ }
+
+ res->id = shader->id;
+ return ret;
+}
+
+/**
+ * vmw_dx_shader_bind - The DX shader bind callback
+ *
+ * @res: The DX shader resource
+ * @val_buf: Pointer to the validate buffer.
+ *
+ */
+static int vmw_dx_shader_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct ttm_buffer_object *bo = val_buf->bo;
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_dx_shader_unscrub(res);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ return 0;
+}
+
+/**
+ * vmw_dx_shader_scrub - Have the device unbind a MOB from a DX shader.
+ *
+ * @res: The shader resource
+ *
+ * This function unbinds a MOB from the DX shader without requiring the
+ * MOB dma_buffer to be reserved. The driver still considers the MOB bound.
+ * However, once the driver eventually decides to unbind the MOB, it doesn't
+ * need to access the context.
+ */
+static int vmw_dx_shader_scrub(struct vmw_resource *res)
+{
+ struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindShader body;
+ } *cmd;
+
+ if (list_empty(&shader->cotable_head))
+ return 0;
+
+ WARN_ON_ONCE(!shader->committed);
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "scrubbing.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = shader->ctx->id;
+ cmd->body.shid = res->id;
+ cmd->body.mobid = SVGA3D_INVALID_ID;
+ cmd->body.offsetInBytes = 0;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ res->id = -1;
+ list_del_init(&shader->cotable_head);
return 0;
}
/**
+ * vmw_dx_shader_unbind - The dx shader unbind callback.
+ *
+ * @res: The shader resource
+ * @readback: Whether this is a readback unbind. Currently unused.
+ * @val_buf: MOB buffer information.
+ */
+static int vmw_dx_shader_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_fence_obj *fence;
+ int ret;
+
+ BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+
+ mutex_lock(&dev_priv->binding_mutex);
+ ret = vmw_dx_shader_scrub(res);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ if (ret)
+ return ret;
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+ vmw_fence_single_bo(val_buf->bo, fence);
+
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+/**
+ * vmw_dx_shader_cotable_list_scrub - The cotable unbind_func callback for
+ * DX shaders.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @list: The list of cotable resources.
+ * @readback: Whether the call was part of a readback unbind.
+ *
+ * Scrubs all shader MOBs so that any subsequent shader unbind or shader
+ * destroy operation won't need to swap in the context.
+ */
+void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
+ struct list_head *list,
+ bool readback)
+{
+ struct vmw_dx_shader *entry, *next;
+
+ WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+
+ list_for_each_entry_safe(entry, next, list, cotable_head) {
+ WARN_ON(vmw_dx_shader_scrub(&entry->res));
+ if (!readback)
+ entry->committed = false;
+ }
+}
+
+/**
+ * vmw_dx_shader_res_free - The DX shader free callback
+ *
+ * @res: The shader resource
+ *
+ * Frees the DX shader resource and updates memory accounting.
+ */
+static void vmw_dx_shader_res_free(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+
+ vmw_resource_unreference(&shader->cotable);
+ kfree(shader);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
+}
+
+/**
+ * vmw_dx_shader_add - Add a shader resource as a command buffer managed
+ * resource.
+ *
+ * @man: The command buffer resource manager.
+ * @ctx: Pointer to the context resource.
+ * @user_key: The id used for this shader.
+ * @shader_type: The shader type.
+ * @list: The list of staged command buffer managed resources.
+ */
+int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
+ struct vmw_resource *ctx,
+ u32 user_key,
+ SVGA3dShaderType shader_type,
+ struct list_head *list)
+{
+ struct vmw_dx_shader *shader;
+ struct vmw_resource *res;
+ struct vmw_private *dev_priv = ctx->dev_priv;
+ int ret;
+
+ if (!vmw_shader_dx_size)
+ vmw_shader_dx_size = ttm_round_pot(sizeof(*shader));
+
+ if (!vmw_shader_id_ok(user_key, shader_type))
+ return -EINVAL;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size,
+ false, true);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for shader "
+ "creation.\n");
+ return ret;
+ }
+
+ shader = kmalloc(sizeof(*shader), GFP_KERNEL);
+ if (!shader) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
+ return -ENOMEM;
+ }
+
+ res = &shader->res;
+ shader->ctx = ctx;
+ shader->cotable = vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER);
+ shader->id = user_key;
+ shader->committed = false;
+ INIT_LIST_HEAD(&shader->cotable_head);
+ ret = vmw_resource_init(dev_priv, res, true,
+ vmw_dx_shader_res_free, &vmw_dx_shader_func);
+ if (ret)
+ goto out_resource_init;
+
+ /*
+ * The user_key name-space is not per shader type for DX shaders,
+ * so when hashing, use a single zero shader type.
+ */
+ ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
+ vmw_shader_key(user_key, 0),
+ res, list);
+ if (ret)
+ goto out_resource_init;
+
+ res->id = shader->id;
+ vmw_resource_activate(res, vmw_hw_shader_destroy);
+
+out_resource_init:
+ vmw_resource_unreference(&res);
+
+ return ret;
+}
+
+
+
+/**
* User-space shader management:
*/
@@ -341,6 +722,8 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
size_t shader_size,
size_t offset,
SVGA3dShaderType shader_type,
+ uint8_t num_input_sig,
+ uint8_t num_output_sig,
struct ttm_object_file *tfile,
u32 *handle)
{
@@ -383,7 +766,8 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
*/
ret = vmw_gb_shader_init(dev_priv, res, shader_size,
- offset, shader_type, buffer,
+ offset, shader_type, num_input_sig,
+ num_output_sig, buffer,
vmw_user_shader_free);
if (unlikely(ret != 0))
goto out;
@@ -407,11 +791,11 @@ out:
}
-struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buffer,
- size_t shader_size,
- size_t offset,
- SVGA3dShaderType shader_type)
+static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buffer,
+ size_t shader_size,
+ size_t offset,
+ SVGA3dShaderType shader_type)
{
struct vmw_shader *shader;
struct vmw_resource *res;
@@ -449,7 +833,7 @@ struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
* From here on, the destructor takes over resource freeing.
*/
ret = vmw_gb_shader_init(dev_priv, res, shader_size,
- offset, shader_type, buffer,
+ offset, shader_type, 0, 0, buffer,
vmw_shader_free);
out_err:
@@ -457,19 +841,20 @@ out_err:
}
-int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
+ enum drm_vmw_shader_type shader_type_drm,
+ u32 buffer_handle, size_t size, size_t offset,
+ uint8_t num_input_sig, uint8_t num_output_sig,
+ uint32_t *shader_handle)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_vmw_shader_create_arg *arg =
- (struct drm_vmw_shader_create_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_dma_buffer *buffer = NULL;
SVGA3dShaderType shader_type;
int ret;
- if (arg->buffer_handle != SVGA3D_INVALID_ID) {
- ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
+ if (buffer_handle != SVGA3D_INVALID_ID) {
+ ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
&buffer);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find buffer for shader "
@@ -478,23 +863,20 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
}
if ((u64)buffer->base.num_pages * PAGE_SIZE <
- (u64)arg->size + (u64)arg->offset) {
+ (u64)size + (u64)offset) {
DRM_ERROR("Illegal buffer- or shader size.\n");
ret = -EINVAL;
goto out_bad_arg;
}
}
- switch (arg->shader_type) {
+ switch (shader_type_drm) {
case drm_vmw_shader_type_vs:
shader_type = SVGA3D_SHADERTYPE_VS;
break;
case drm_vmw_shader_type_ps:
shader_type = SVGA3D_SHADERTYPE_PS;
break;
- case drm_vmw_shader_type_gs:
- shader_type = SVGA3D_SHADERTYPE_GS;
- break;
default:
DRM_ERROR("Illegal shader type.\n");
ret = -EINVAL;
@@ -505,8 +887,9 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out_bad_arg;
- ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
- shader_type, tfile, &arg->shader_handle);
+ ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset,
+ shader_type, num_input_sig,
+ num_output_sig, tfile, shader_handle);
ttm_read_unlock(&dev_priv->reservation_sem);
out_bad_arg:
@@ -515,7 +898,7 @@ out_bad_arg:
}
/**
- * vmw_compat_shader_id_ok - Check whether a compat shader user key and
+ * vmw_shader_id_ok - Check whether a compat shader user key and
* shader type are within valid bounds.
*
* @user_key: User space id of the shader.
@@ -523,13 +906,13 @@ out_bad_arg:
*
* Returns true if valid false if not.
*/
-static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
+static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
{
return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
}
/**
- * vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
+ * vmw_shader_key - Compute a hash key suitable for a compat shader.
*
* @user_key: User space id of the shader.
* @shader_type: Shader type.
@@ -537,13 +920,13 @@ static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
* Returns a hash key suitable for a command buffer managed resource
* manager hash table.
*/
-static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
+static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type)
{
return user_key | (shader_type << 20);
}
/**
- * vmw_compat_shader_remove - Stage a compat shader for removal.
+ * vmw_shader_remove - Stage a compat shader for removal.
*
* @man: Pointer to the compat shader manager identifying the shader namespace.
* @user_key: The key that is used to identify the shader. The key is
@@ -551,17 +934,18 @@ static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
* @shader_type: Shader type.
* @list: Caller's list of staged command buffer resource actions.
*/
-int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
- u32 user_key, SVGA3dShaderType shader_type,
- struct list_head *list)
+int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key, SVGA3dShaderType shader_type,
+ struct list_head *list)
{
- if (!vmw_compat_shader_id_ok(user_key, shader_type))
+ struct vmw_resource *dummy;
+
+ if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL;
- return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader,
- vmw_compat_shader_key(user_key,
- shader_type),
- list);
+ return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_shader,
+ vmw_shader_key(user_key, shader_type),
+ list, &dummy);
}
/**
@@ -591,7 +975,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
int ret;
struct vmw_resource *res;
- if (!vmw_compat_shader_id_ok(user_key, shader_type))
+ if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL;
/* Allocate and pin a DMA buffer */
@@ -628,8 +1012,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto no_reserve;
- ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader,
- vmw_compat_shader_key(user_key, shader_type),
+ ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
+ vmw_shader_key(user_key, shader_type),
res, list);
vmw_resource_unreference(&res);
no_reserve:
@@ -639,7 +1023,7 @@ out:
}
/**
- * vmw_compat_shader_lookup - Look up a compat shader
+ * vmw_shader_lookup - Look up a compat shader
*
* @man: Pointer to the command buffer managed resource manager identifying
* the shader namespace.
@@ -650,14 +1034,26 @@ out:
* found. An error pointer otherwise.
*/
struct vmw_resource *
-vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
- u32 user_key,
- SVGA3dShaderType shader_type)
+vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key,
+ SVGA3dShaderType shader_type)
{
- if (!vmw_compat_shader_id_ok(user_key, shader_type))
+ if (!vmw_shader_id_ok(user_key, shader_type))
return ERR_PTR(-EINVAL);
- return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader,
- vmw_compat_shader_key(user_key,
- shader_type));
+ return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_shader,
+ vmw_shader_key(user_key, shader_type));
+}
+
+int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_shader_create_arg *arg =
+ (struct drm_vmw_shader_create_arg *)data;
+
+ return vmw_shader_define(dev, file_priv, arg->shader_type,
+ arg->buffer_handle,
+ arg->size, arg->offset,
+ 0, 0,
+ &arg->shader_handle);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
new file mode 100644
index 000000000000..5a73eebd0f35
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -0,0 +1,555 @@
+/**************************************************************************
+ * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
+
+/*
+ * The currently only reason we need to keep track of views is that if we
+ * destroy a hardware surface, all views pointing to it must also be destroyed,
+ * otherwise the device will error.
+ * So in particuar if a surface is evicted, we must destroy all views pointing
+ * to it, and all context bindings of that view. Similarly we must restore
+ * the view bindings, views and surfaces pointed to by the views when a
+ * context is referenced in the command stream.
+ */
+
+/**
+ * struct vmw_view - view metadata
+ *
+ * @res: The struct vmw_resource we derive from
+ * @ctx: Non-refcounted pointer to the context this view belongs to.
+ * @srf: Refcounted pointer to the surface pointed to by this view.
+ * @cotable: Refcounted pointer to the cotable holding this view.
+ * @srf_head: List head for the surface-to-view list.
+ * @cotable_head: List head for the cotable-to_view list.
+ * @view_type: View type.
+ * @view_id: User-space per context view id. Currently used also as per
+ * context device view id.
+ * @cmd_size: Size of the SVGA3D define view command that we've copied from the
+ * command stream.
+ * @committed: Whether the view is actually created or pending creation at the
+ * device level.
+ * @cmd: The SVGA3D define view command copied from the command stream.
+ */
+struct vmw_view {
+ struct rcu_head rcu;
+ struct vmw_resource res;
+ struct vmw_resource *ctx; /* Immutable */
+ struct vmw_resource *srf; /* Immutable */
+ struct vmw_resource *cotable; /* Immutable */
+ struct list_head srf_head; /* Protected by binding_mutex */
+ struct list_head cotable_head; /* Protected by binding_mutex */
+ unsigned view_type; /* Immutable */
+ unsigned view_id; /* Immutable */
+ u32 cmd_size; /* Immutable */
+ bool committed; /* Protected by binding_mutex */
+ u32 cmd[1]; /* Immutable */
+};
+
+static int vmw_view_create(struct vmw_resource *res);
+static int vmw_view_destroy(struct vmw_resource *res);
+static void vmw_hw_view_destroy(struct vmw_resource *res);
+static void vmw_view_commit_notify(struct vmw_resource *res,
+ enum vmw_cmdbuf_res_state state);
+
+static const struct vmw_res_func vmw_view_func = {
+ .res_type = vmw_res_view,
+ .needs_backup = false,
+ .may_evict = false,
+ .type_name = "DX view",
+ .backup_placement = NULL,
+ .create = vmw_view_create,
+ .commit_notify = vmw_view_commit_notify,
+};
+
+/**
+ * struct vmw_view - view define command body stub
+ *
+ * @view_id: The device id of the view being defined
+ * @sid: The surface id of the view being defined
+ *
+ * This generic struct is used by the code to change @view_id and @sid of a
+ * saved view define command.
+ */
+struct vmw_view_define {
+ uint32 view_id;
+ uint32 sid;
+};
+
+/**
+ * vmw_view - Convert a struct vmw_resource to a struct vmw_view
+ *
+ * @res: Pointer to the resource to convert.
+ *
+ * Returns a pointer to a struct vmw_view.
+ */
+static struct vmw_view *vmw_view(struct vmw_resource *res)
+{
+ return container_of(res, struct vmw_view, res);
+}
+
+/**
+ * vmw_view_commit_notify - Notify that a view operation has been committed to
+ * hardware from a user-supplied command stream.
+ *
+ * @res: Pointer to the view resource.
+ * @state: Indicating whether a creation or removal has been committed.
+ *
+ */
+static void vmw_view_commit_notify(struct vmw_resource *res,
+ enum vmw_cmdbuf_res_state state)
+{
+ struct vmw_view *view = vmw_view(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ if (state == VMW_CMDBUF_RES_ADD) {
+ struct vmw_surface *srf = vmw_res_to_srf(view->srf);
+
+ list_add_tail(&view->srf_head, &srf->view_list);
+ vmw_cotable_add_resource(view->cotable, &view->cotable_head);
+ view->committed = true;
+ res->id = view->view_id;
+
+ } else {
+ list_del_init(&view->cotable_head);
+ list_del_init(&view->srf_head);
+ view->committed = false;
+ res->id = -1;
+ }
+ mutex_unlock(&dev_priv->binding_mutex);
+}
+
+/**
+ * vmw_view_create - Create a hardware view.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Create a hardware view. Typically used if that view has previously been
+ * destroyed by an eviction operation.
+ */
+static int vmw_view_create(struct vmw_resource *res)
+{
+ struct vmw_view *view = vmw_view(res);
+ struct vmw_surface *srf = vmw_res_to_srf(view->srf);
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ struct vmw_view_define body;
+ } *cmd;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ if (!view->committed) {
+ mutex_unlock(&dev_priv->binding_mutex);
+ return 0;
+ }
+
+ cmd = vmw_fifo_reserve_dx(res->dev_priv, view->cmd_size,
+ view->ctx->id);
+ if (!cmd) {
+ DRM_ERROR("Failed reserving FIFO space for view creation.\n");
+ mutex_unlock(&dev_priv->binding_mutex);
+ return -ENOMEM;
+ }
+ memcpy(cmd, &view->cmd, view->cmd_size);
+ WARN_ON(cmd->body.view_id != view->view_id);
+ /* Sid may have changed due to surface eviction. */
+ WARN_ON(view->srf->id == SVGA3D_INVALID_ID);
+ cmd->body.sid = view->srf->id;
+ vmw_fifo_commit(res->dev_priv, view->cmd_size);
+ res->id = view->view_id;
+ list_add_tail(&view->srf_head, &srf->view_list);
+ vmw_cotable_add_resource(view->cotable, &view->cotable_head);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ return 0;
+}
+
+/**
+ * vmw_view_destroy - Destroy a hardware view.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Destroy a hardware view. Typically used on unexpected termination of the
+ * owning process or if the surface the view is pointing to is destroyed.
+ */
+static int vmw_view_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_view *view = vmw_view(res);
+ struct {
+ SVGA3dCmdHeader header;
+ union vmw_view_destroy body;
+ } *cmd;
+
+ WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+ vmw_binding_res_list_scrub(&res->binding_head);
+
+ if (!view->committed || res->id == -1)
+ return 0;
+
+ cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), view->ctx->id);
+ if (!cmd) {
+ DRM_ERROR("Failed reserving FIFO space for view "
+ "destruction.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = vmw_view_destroy_cmds[view->view_type];
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.view_id = view->view_id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ res->id = -1;
+ list_del_init(&view->cotable_head);
+ list_del_init(&view->srf_head);
+
+ return 0;
+}
+
+/**
+ * vmw_hw_view_destroy - Destroy a hardware view as part of resource cleanup.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Destroy a hardware view if it's still present.
+ */
+static void vmw_hw_view_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ WARN_ON(vmw_view_destroy(res));
+ res->id = -1;
+ mutex_unlock(&dev_priv->binding_mutex);
+}
+
+/**
+ * vmw_view_key - Compute a view key suitable for the cmdbuf resource manager
+ *
+ * @user_key: The user-space id used for the view.
+ * @view_type: The view type.
+ *
+ * Destroy a hardware view if it's still present.
+ */
+static u32 vmw_view_key(u32 user_key, enum vmw_view_type view_type)
+{
+ return user_key | (view_type << 20);
+}
+
+/**
+ * vmw_view_id_ok - Basic view id and type range checks.
+ *
+ * @user_key: The user-space id used for the view.
+ * @view_type: The view type.
+ *
+ * Checks that the view id and type (typically provided by user-space) is
+ * valid.
+ */
+static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type)
+{
+ return (user_key < SVGA_COTABLE_MAX_IDS &&
+ view_type < vmw_view_max);
+}
+
+/**
+ * vmw_view_res_free - resource res_free callback for view resources
+ *
+ * @res: Pointer to a struct vmw_resource
+ *
+ * Frees memory and memory accounting held by a struct vmw_view.
+ */
+static void vmw_view_res_free(struct vmw_resource *res)
+{
+ struct vmw_view *view = vmw_view(res);
+ size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size;
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ vmw_resource_unreference(&view->cotable);
+ vmw_resource_unreference(&view->srf);
+ kfree_rcu(view, rcu);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_view_add - Create a view resource and stage it for addition
+ * as a command buffer managed resource.
+ *
+ * @man: Pointer to the compat shader manager identifying the shader namespace.
+ * @ctx: Pointer to a struct vmw_resource identifying the active context.
+ * @srf: Pointer to a struct vmw_resource identifying the surface the view
+ * points to.
+ * @view_type: The view type deduced from the view create command.
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the view type and to the context.
+ * @cmd: Pointer to the view create command in the command stream.
+ * @cmd_size: Size of the view create command in the command stream.
+ * @list: Caller's list of staged command buffer resource actions.
+ */
+int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
+ struct vmw_resource *ctx,
+ struct vmw_resource *srf,
+ enum vmw_view_type view_type,
+ u32 user_key,
+ const void *cmd,
+ size_t cmd_size,
+ struct list_head *list)
+{
+ static const size_t vmw_view_define_sizes[] = {
+ [vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView),
+ [vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView),
+ [vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView)
+ };
+
+ struct vmw_private *dev_priv = ctx->dev_priv;
+ struct vmw_resource *res;
+ struct vmw_view *view;
+ size_t size;
+ int ret;
+
+ if (cmd_size != vmw_view_define_sizes[view_type] +
+ sizeof(SVGA3dCmdHeader)) {
+ DRM_ERROR("Illegal view create command size.\n");
+ return -EINVAL;
+ }
+
+ if (!vmw_view_id_ok(user_key, view_type)) {
+ DRM_ERROR("Illegal view add view id.\n");
+ return -EINVAL;
+ }
+
+ size = offsetof(struct vmw_view, cmd) + cmd_size;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, false, true);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for view"
+ " creation.\n");
+ return ret;
+ }
+
+ view = kmalloc(size, GFP_KERNEL);
+ if (!view) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+ return -ENOMEM;
+ }
+
+ res = &view->res;
+ view->ctx = ctx;
+ view->srf = vmw_resource_reference(srf);
+ view->cotable = vmw_context_cotable(ctx, vmw_view_cotables[view_type]);
+ view->view_type = view_type;
+ view->view_id = user_key;
+ view->cmd_size = cmd_size;
+ view->committed = false;
+ INIT_LIST_HEAD(&view->srf_head);
+ INIT_LIST_HEAD(&view->cotable_head);
+ memcpy(&view->cmd, cmd, cmd_size);
+ ret = vmw_resource_init(dev_priv, res, true,
+ vmw_view_res_free, &vmw_view_func);
+ if (ret)
+ goto out_resource_init;
+
+ ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_view,
+ vmw_view_key(user_key, view_type),
+ res, list);
+ if (ret)
+ goto out_resource_init;
+
+ res->id = view->view_id;
+ vmw_resource_activate(res, vmw_hw_view_destroy);
+
+out_resource_init:
+ vmw_resource_unreference(&res);
+
+ return ret;
+}
+
+/**
+ * vmw_view_remove - Stage a view for removal.
+ *
+ * @man: Pointer to the view manager identifying the shader namespace.
+ * @user_key: The key that is used to identify the view. The key is
+ * unique to the view type.
+ * @view_type: View type
+ * @list: Caller's list of staged command buffer resource actions.
+ * @res_p: If the resource is in an already committed state, points to the
+ * struct vmw_resource on successful return. The pointer will be
+ * non ref-counted.
+ */
+int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key, enum vmw_view_type view_type,
+ struct list_head *list,
+ struct vmw_resource **res_p)
+{
+ if (!vmw_view_id_ok(user_key, view_type)) {
+ DRM_ERROR("Illegal view remove view id.\n");
+ return -EINVAL;
+ }
+
+ return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_view,
+ vmw_view_key(user_key, view_type),
+ list, res_p);
+}
+
+/**
+ * vmw_view_cotable_list_destroy - Evict all views belonging to a cotable.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @list: List of views belonging to a cotable.
+ * @readback: Unused. Needed for function interface only.
+ *
+ * This function evicts all views belonging to a cotable.
+ * It must be called with the binding_mutex held, and the caller must hold
+ * a reference to the view resource. This is typically called before the
+ * cotable is paged out.
+ */
+void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
+ struct list_head *list,
+ bool readback)
+{
+ struct vmw_view *entry, *next;
+
+ WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+
+ list_for_each_entry_safe(entry, next, list, cotable_head)
+ WARN_ON(vmw_view_destroy(&entry->res));
+}
+
+/**
+ * vmw_view_surface_list_destroy - Evict all views pointing to a surface
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @list: List of views pointing to a surface.
+ *
+ * This function evicts all views pointing to a surface. This is typically
+ * called before the surface is evicted.
+ */
+void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
+ struct list_head *list)
+{
+ struct vmw_view *entry, *next;
+
+ WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+
+ list_for_each_entry_safe(entry, next, list, srf_head)
+ WARN_ON(vmw_view_destroy(&entry->res));
+}
+
+/**
+ * vmw_view_srf - Return a non-refcounted pointer to the surface a view is
+ * pointing to.
+ *
+ * @res: pointer to a view resource.
+ *
+ * Note that the view itself is holding a reference, so as long
+ * the view resource is alive, the surface resource will be.
+ */
+struct vmw_resource *vmw_view_srf(struct vmw_resource *res)
+{
+ return vmw_view(res)->srf;
+}
+
+/**
+ * vmw_view_lookup - Look up a view.
+ *
+ * @man: The context's cmdbuf ref manager.
+ * @view_type: The view type.
+ * @user_key: The view user id.
+ *
+ * returns a refcounted pointer to a view or an error pointer if not found.
+ */
+struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_view_type view_type,
+ u32 user_key)
+{
+ return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_view,
+ vmw_view_key(user_key, view_type));
+}
+
+const u32 vmw_view_destroy_cmds[] = {
+ [vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
+ [vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
+ [vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
+};
+
+const SVGACOTableType vmw_view_cotables[] = {
+ [vmw_view_sr] = SVGA_COTABLE_SRVIEW,
+ [vmw_view_rt] = SVGA_COTABLE_RTVIEW,
+ [vmw_view_ds] = SVGA_COTABLE_DSVIEW,
+};
+
+const SVGACOTableType vmw_so_cotables[] = {
+ [vmw_so_el] = SVGA_COTABLE_ELEMENTLAYOUT,
+ [vmw_so_bs] = SVGA_COTABLE_BLENDSTATE,
+ [vmw_so_ds] = SVGA_COTABLE_DEPTHSTENCIL,
+ [vmw_so_rs] = SVGA_COTABLE_RASTERIZERSTATE,
+ [vmw_so_ss] = SVGA_COTABLE_SAMPLER,
+ [vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT
+};
+
+
+/* To remove unused function warning */
+static void vmw_so_build_asserts(void) __attribute__((used));
+
+
+/*
+ * This function is unused at run-time, and only used to dump various build
+ * asserts important for code optimization assumptions.
+ */
+static void vmw_so_build_asserts(void)
+{
+ /* Assert that our vmw_view_cmd_to_type() function is correct. */
+ BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW !=
+ SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 1);
+ BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW !=
+ SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 2);
+ BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW !=
+ SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 3);
+ BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW !=
+ SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 4);
+ BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW !=
+ SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 5);
+
+ /* Assert that our "one body fits all" assumption is valid */
+ BUILD_BUG_ON(sizeof(union vmw_view_destroy) != sizeof(u32));
+
+ /* Assert that the view key space can hold all view ids. */
+ BUILD_BUG_ON(SVGA_COTABLE_MAX_IDS >= ((1 << 20) - 1));
+
+ /*
+ * Assert that the offset of sid in all view define commands
+ * is what we assume it to be.
+ */
+ BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+ offsetof(SVGA3dCmdDXDefineShaderResourceView, sid));
+ BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+ offsetof(SVGA3dCmdDXDefineRenderTargetView, sid));
+ BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+ offsetof(SVGA3dCmdDXDefineDepthStencilView, sid));
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
new file mode 100644
index 000000000000..268738387b5e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
@@ -0,0 +1,160 @@
+/**************************************************************************
+ * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef VMW_SO_H
+#define VMW_SO_H
+
+enum vmw_view_type {
+ vmw_view_sr,
+ vmw_view_rt,
+ vmw_view_ds,
+ vmw_view_max,
+};
+
+enum vmw_so_type {
+ vmw_so_el,
+ vmw_so_bs,
+ vmw_so_ds,
+ vmw_so_rs,
+ vmw_so_ss,
+ vmw_so_so,
+ vmw_so_max,
+};
+
+/**
+ * union vmw_view_destroy - view destruction command body
+ *
+ * @rtv: RenderTarget view destruction command body
+ * @srv: ShaderResource view destruction command body
+ * @dsv: DepthStencil view destruction command body
+ * @view_id: A single u32 view id.
+ *
+ * The assumption here is that all union members are really represented by a
+ * single u32 in the command stream. If that's not the case,
+ * the size of this union will not equal the size of an u32, and the
+ * assumption is invalid, and we detect that at compile time in the
+ * vmw_so_build_asserts() function.
+ */
+union vmw_view_destroy {
+ struct SVGA3dCmdDXDestroyRenderTargetView rtv;
+ struct SVGA3dCmdDXDestroyShaderResourceView srv;
+ struct SVGA3dCmdDXDestroyDepthStencilView dsv;
+ u32 view_id;
+};
+
+/* Map enum vmw_view_type to view destroy command ids*/
+extern const u32 vmw_view_destroy_cmds[];
+
+/* Map enum vmw_view_type to SVGACOTableType */
+extern const SVGACOTableType vmw_view_cotables[];
+
+/* Map enum vmw_so_type to SVGACOTableType */
+extern const SVGACOTableType vmw_so_cotables[];
+
+/*
+ * vmw_view_cmd_to_type - Return the view type for a create or destroy command
+ *
+ * @id: The SVGA3D command id.
+ *
+ * For a given view create or destroy command id, return the corresponding
+ * enum vmw_view_type. If the command is unknown, return vmw_view_max.
+ * The validity of the simplified calculation is verified in the
+ * vmw_so_build_asserts() function.
+ */
+static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id)
+{
+ u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2;
+
+ if (tmp > (u32)vmw_view_max)
+ return vmw_view_max;
+
+ return (enum vmw_view_type) tmp;
+}
+
+/*
+ * vmw_so_cmd_to_type - Return the state object type for a
+ * create or destroy command
+ *
+ * @id: The SVGA3D command id.
+ *
+ * For a given state object create or destroy command id,
+ * return the corresponding enum vmw_so_type. If the command is uknown,
+ * return vmw_so_max. We should perhaps optimize this function using
+ * a similar strategy as vmw_view_cmd_to_type().
+ */
+static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id)
+{
+ switch (id) {
+ case SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT:
+ case SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT:
+ return vmw_so_el;
+ case SVGA_3D_CMD_DX_DEFINE_BLEND_STATE:
+ case SVGA_3D_CMD_DX_DESTROY_BLEND_STATE:
+ return vmw_so_bs;
+ case SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE:
+ case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE:
+ return vmw_so_ds;
+ case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE:
+ case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE:
+ return vmw_so_rs;
+ case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE:
+ case SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE:
+ return vmw_so_ss;
+ case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT:
+ case SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT:
+ return vmw_so_so;
+ default:
+ break;
+ }
+ return vmw_so_max;
+}
+
+/*
+ * View management - vmwgfx_so.c
+ */
+extern int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
+ struct vmw_resource *ctx,
+ struct vmw_resource *srf,
+ enum vmw_view_type view_type,
+ u32 user_key,
+ const void *cmd,
+ size_t cmd_size,
+ struct list_head *list);
+
+extern int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key, enum vmw_view_type view_type,
+ struct list_head *list,
+ struct vmw_resource **res_p);
+
+extern void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
+ struct list_head *view_list);
+extern void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
+ struct list_head *list,
+ bool readback);
+extern struct vmw_resource *vmw_view_srf(struct vmw_resource *res);
+extern struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_view_type view_type,
+ u32 user_key);
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
new file mode 100644
index 000000000000..c22e2df1b336
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -0,0 +1,1266 @@
+/******************************************************************************
+ *
+ * COPYRIGHT © 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ ******************************************************************************/
+
+#include "vmwgfx_kms.h"
+#include "device_include/svga3d_surfacedefs.h"
+#include <drm/drm_plane_helper.h>
+
+#define vmw_crtc_to_stdu(x) \
+ container_of(x, struct vmw_screen_target_display_unit, base.crtc)
+#define vmw_encoder_to_stdu(x) \
+ container_of(x, struct vmw_screen_target_display_unit, base.encoder)
+#define vmw_connector_to_stdu(x) \
+ container_of(x, struct vmw_screen_target_display_unit, base.connector)
+
+
+
+enum stdu_content_type {
+ SAME_AS_DISPLAY = 0,
+ SEPARATE_SURFACE,
+ SEPARATE_DMA
+};
+
+/**
+ * struct vmw_stdu_dirty - closure structure for the update functions
+ *
+ * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
+ * @transfer: Transfer direction for DMA command.
+ * @left: Left side of bounding box.
+ * @right: Right side of bounding box.
+ * @top: Top side of bounding box.
+ * @bottom: Bottom side of bounding box.
+ * @buf: DMA buffer when DMA-ing between buffer and screen targets.
+ * @sid: Surface ID when copying between surface and screen targets.
+ */
+struct vmw_stdu_dirty {
+ struct vmw_kms_dirty base;
+ SVGA3dTransferType transfer;
+ s32 left, right, top, bottom;
+ u32 pitch;
+ union {
+ struct vmw_dma_buffer *buf;
+ u32 sid;
+ };
+};
+
+/*
+ * SVGA commands that are used by this code. Please see the device headers
+ * for explanation.
+ */
+struct vmw_stdu_update {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBScreenTarget body;
+};
+
+struct vmw_stdu_dma {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA body;
+};
+
+struct vmw_stdu_surface_copy {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceCopy body;
+};
+
+
+/**
+ * struct vmw_screen_target_display_unit
+ *
+ * @base: VMW specific DU structure
+ * @display_srf: surface to be displayed. The dimension of this will always
+ * match the display mode. If the display mode matches
+ * content_vfbs dimensions, then this is a pointer into the
+ * corresponding field in content_vfbs. If not, then this
+ * is a separate buffer to which content_vfbs will blit to.
+ * @content_fb: holds the rendered content, can be a surface or DMA buffer
+ * @content_type: content_fb type
+ * @defined: true if the current display unit has been initialized
+ */
+struct vmw_screen_target_display_unit {
+ struct vmw_display_unit base;
+
+ struct vmw_surface *display_srf;
+ struct drm_framebuffer *content_fb;
+
+ enum stdu_content_type content_fb_type;
+
+ bool defined;
+};
+
+
+
+static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu);
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit helper Functions
+ *****************************************************************************/
+
+/**
+ * vmw_stdu_pin_display - pins the resource associated with the display surface
+ *
+ * @stdu: contains the display surface
+ *
+ * Since the display surface can either be a private surface allocated by us,
+ * or it can point to the content surface, we use this function to not pin the
+ * same resource twice.
+ */
+static int vmw_stdu_pin_display(struct vmw_screen_target_display_unit *stdu)
+{
+ return vmw_resource_pin(&stdu->display_srf->res, false);
+}
+
+
+
+/**
+ * vmw_stdu_unpin_display - unpins the resource associated with display surface
+ *
+ * @stdu: contains the display surface
+ *
+ * If the display surface was privatedly allocated by
+ * vmw_surface_gb_priv_define() and not registered as a framebuffer, then it
+ * won't be automatically cleaned up when all the framebuffers are freed. As
+ * such, we have to explicitly call vmw_resource_unreference() to get it freed.
+ */
+static void vmw_stdu_unpin_display(struct vmw_screen_target_display_unit *stdu)
+{
+ if (stdu->display_srf) {
+ struct vmw_resource *res = &stdu->display_srf->res;
+
+ vmw_resource_unpin(res);
+
+ if (stdu->content_fb_type != SAME_AS_DISPLAY) {
+ vmw_resource_unreference(&res);
+ stdu->content_fb_type = SAME_AS_DISPLAY;
+ }
+
+ stdu->display_srf = NULL;
+ }
+}
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit CRTC Functions
+ *****************************************************************************/
+
+
+/**
+ * vmw_stdu_crtc_destroy - cleans up the STDU
+ *
+ * @crtc: used to get a reference to the containing STDU
+ */
+static void vmw_stdu_crtc_destroy(struct drm_crtc *crtc)
+{
+ vmw_stdu_destroy(vmw_crtc_to_stdu(crtc));
+}
+
+/**
+ * vmw_stdu_define_st - Defines a Screen Target
+ *
+ * @dev_priv: VMW DRM device
+ * @stdu: display unit to create a Screen Target for
+ *
+ * Creates a STDU that we can used later. This function is called whenever the
+ * framebuffer size changes.
+ *
+ * RETURNs:
+ * 0 on success, error code on failure
+ */
+static int vmw_stdu_define_st(struct vmw_private *dev_priv,
+ struct vmw_screen_target_display_unit *stdu)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBScreenTarget body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Out of FIFO space defining Screen Target\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SCREENTARGET;
+ cmd->header.size = sizeof(cmd->body);
+
+ cmd->body.stid = stdu->base.unit;
+ cmd->body.width = stdu->display_srf->base_size.width;
+ cmd->body.height = stdu->display_srf->base_size.height;
+ cmd->body.flags = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0;
+ cmd->body.dpi = 0;
+ cmd->body.xRoot = stdu->base.crtc.x;
+ cmd->body.yRoot = stdu->base.crtc.y;
+
+ if (!stdu->base.is_implicit) {
+ cmd->body.xRoot = stdu->base.gui_x;
+ cmd->body.yRoot = stdu->base.gui_y;
+ }
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ stdu->defined = true;
+
+ return 0;
+}
+
+
+
+/**
+ * vmw_stdu_bind_st - Binds a surface to a Screen Target
+ *
+ * @dev_priv: VMW DRM device
+ * @stdu: display unit affected
+ * @res: Buffer to bind to the screen target. Set to NULL to blank screen.
+ *
+ * Binding a surface to a Screen Target the same as flipping
+ */
+static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
+ struct vmw_screen_target_display_unit *stdu,
+ struct vmw_resource *res)
+{
+ SVGA3dSurfaceImageId image;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBScreenTarget body;
+ } *cmd;
+
+
+ if (!stdu->defined) {
+ DRM_ERROR("No screen target defined\n");
+ return -EINVAL;
+ }
+
+ /* Set up image using information in vfb */
+ memset(&image, 0, sizeof(image));
+ image.sid = res ? res->id : SVGA3D_INVALID_ID;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Out of FIFO space binding a screen target\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_BIND_GB_SCREENTARGET;
+ cmd->header.size = sizeof(cmd->body);
+
+ cmd->body.stid = stdu->base.unit;
+ cmd->body.image = image;
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_stdu_populate_update - populate an UPDATE_GB_SCREENTARGET command with a
+ * bounding box.
+ *
+ * @cmd: Pointer to command stream.
+ * @unit: Screen target unit.
+ * @left: Left side of bounding box.
+ * @right: Right side of bounding box.
+ * @top: Top side of bounding box.
+ * @bottom: Bottom side of bounding box.
+ */
+static void vmw_stdu_populate_update(void *cmd, int unit,
+ s32 left, s32 right, s32 top, s32 bottom)
+{
+ struct vmw_stdu_update *update = cmd;
+
+ update->header.id = SVGA_3D_CMD_UPDATE_GB_SCREENTARGET;
+ update->header.size = sizeof(update->body);
+
+ update->body.stid = unit;
+ update->body.rect.x = left;
+ update->body.rect.y = top;
+ update->body.rect.w = right - left;
+ update->body.rect.h = bottom - top;
+}
+
+/**
+ * vmw_stdu_update_st - Full update of a Screen Target
+ *
+ * @dev_priv: VMW DRM device
+ * @stdu: display unit affected
+ *
+ * This function needs to be called whenever the content of a screen
+ * target has changed completely. Typically as a result of a backing
+ * surface change.
+ *
+ * RETURNS:
+ * 0 on success, error code on failure
+ */
+static int vmw_stdu_update_st(struct vmw_private *dev_priv,
+ struct vmw_screen_target_display_unit *stdu)
+{
+ struct vmw_stdu_update *cmd;
+ struct drm_crtc *crtc = &stdu->base.crtc;
+
+ if (!stdu->defined) {
+ DRM_ERROR("No screen target defined");
+ return -EINVAL;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Out of FIFO space updating a Screen Target\n");
+ return -ENOMEM;
+ }
+
+ vmw_stdu_populate_update(cmd, stdu->base.unit, 0, crtc->mode.hdisplay,
+ 0, crtc->mode.vdisplay);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+
+
+/**
+ * vmw_stdu_destroy_st - Destroy a Screen Target
+ *
+ * @dev_priv: VMW DRM device
+ * @stdu: display unit to destroy
+ */
+static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
+ struct vmw_screen_target_display_unit *stdu)
+{
+ int ret;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyGBScreenTarget body;
+ } *cmd;
+
+
+ /* Nothing to do if not successfully defined */
+ if (unlikely(!stdu->defined))
+ return 0;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Out of FIFO space, screen target not destroyed\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SCREENTARGET;
+ cmd->header.size = sizeof(cmd->body);
+
+ cmd->body.stid = stdu->base.unit;
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ /* Force sync */
+ ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
+ if (unlikely(ret != 0))
+ DRM_ERROR("Failed to sync with HW");
+
+ stdu->defined = false;
+
+ return ret;
+}
+
+
+
+/**
+ * vmw_stdu_crtc_set_config - Sets a mode
+ *
+ * @set: mode parameters
+ *
+ * This function is the device-specific portion of the DRM CRTC mode set.
+ * For the SVGA device, we do this by defining a Screen Target, binding a
+ * GB Surface to that target, and finally update the screen target.
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
+{
+ struct vmw_private *dev_priv;
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer *vfb;
+ struct vmw_framebuffer_surface *new_vfbs;
+ struct drm_display_mode *mode;
+ struct drm_framebuffer *new_fb;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+
+ if (!set || !set->crtc)
+ return -EINVAL;
+
+ crtc = set->crtc;
+ crtc->x = set->x;
+ crtc->y = set->y;
+ stdu = vmw_crtc_to_stdu(crtc);
+ mode = set->mode;
+ new_fb = set->fb;
+ dev_priv = vmw_priv(crtc->dev);
+
+
+ if (set->num_connectors > 1) {
+ DRM_ERROR("Too many connectors\n");
+ return -EINVAL;
+ }
+
+ if (set->num_connectors == 1 &&
+ set->connectors[0] != &stdu->base.connector) {
+ DRM_ERROR("Connectors don't match %p %p\n",
+ set->connectors[0], &stdu->base.connector);
+ return -EINVAL;
+ }
+
+
+ /* Since they always map one to one these are safe */
+ connector = &stdu->base.connector;
+ encoder = &stdu->base.encoder;
+
+
+ /*
+ * After this point the CRTC will be considered off unless a new fb
+ * is bound
+ */
+ if (stdu->defined) {
+ /* Unbind current surface by binding an invalid one */
+ ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ /* Update Screen Target, display will now be blank */
+ if (crtc->primary->fb) {
+ vmw_stdu_update_st(dev_priv, stdu);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ crtc->primary->fb = NULL;
+ crtc->enabled = false;
+ encoder->crtc = NULL;
+ connector->encoder = NULL;
+
+ vmw_stdu_unpin_display(stdu);
+ stdu->content_fb = NULL;
+ stdu->content_fb_type = SAME_AS_DISPLAY;
+
+ ret = vmw_stdu_destroy_st(dev_priv, stdu);
+ /* The hardware is hung, give up */
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+
+ /* Any of these conditions means the caller wants CRTC off */
+ if (set->num_connectors == 0 || !mode || !new_fb)
+ return 0;
+
+
+ if (set->x + mode->hdisplay > new_fb->width ||
+ set->y + mode->vdisplay > new_fb->height) {
+ DRM_ERROR("Set outside of framebuffer\n");
+ return -EINVAL;
+ }
+
+ stdu->content_fb = new_fb;
+ vfb = vmw_framebuffer_to_vfb(stdu->content_fb);
+
+ if (vfb->dmabuf)
+ stdu->content_fb_type = SEPARATE_DMA;
+
+ /*
+ * If the requested mode is different than the width and height
+ * of the FB or if the content buffer is a DMA buf, then allocate
+ * a display FB that matches the dimension of the mode
+ */
+ if (mode->hdisplay != new_fb->width ||
+ mode->vdisplay != new_fb->height ||
+ stdu->content_fb_type != SAME_AS_DISPLAY) {
+ struct vmw_surface content_srf;
+ struct drm_vmw_size display_base_size = {0};
+ struct vmw_surface *display_srf;
+
+
+ display_base_size.width = mode->hdisplay;
+ display_base_size.height = mode->vdisplay;
+ display_base_size.depth = 1;
+
+ /*
+ * If content buffer is a DMA buf, then we have to construct
+ * surface info
+ */
+ if (stdu->content_fb_type == SEPARATE_DMA) {
+
+ switch (new_fb->bits_per_pixel) {
+ case 32:
+ content_srf.format = SVGA3D_X8R8G8B8;
+ break;
+
+ case 16:
+ content_srf.format = SVGA3D_R5G6B5;
+ break;
+
+ case 8:
+ content_srf.format = SVGA3D_P8;
+ break;
+
+ default:
+ DRM_ERROR("Invalid format\n");
+ ret = -EINVAL;
+ goto err_unref_content;
+ }
+
+ content_srf.flags = 0;
+ content_srf.mip_levels[0] = 1;
+ content_srf.multisample_count = 0;
+ } else {
+
+ stdu->content_fb_type = SEPARATE_SURFACE;
+
+ new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
+ content_srf = *new_vfbs->surface;
+ }
+
+
+ ret = vmw_surface_gb_priv_define(crtc->dev,
+ 0, /* because kernel visible only */
+ content_srf.flags,
+ content_srf.format,
+ true, /* a scanout buffer */
+ content_srf.mip_levels[0],
+ content_srf.multisample_count,
+ 0,
+ display_base_size,
+ &display_srf);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Cannot allocate a display FB.\n");
+ goto err_unref_content;
+ }
+
+ stdu->display_srf = display_srf;
+ } else {
+ new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
+ stdu->display_srf = new_vfbs->surface;
+ }
+
+
+ ret = vmw_stdu_pin_display(stdu);
+ if (unlikely(ret != 0)) {
+ stdu->display_srf = NULL;
+ goto err_unref_content;
+ }
+
+ vmw_svga_enable(dev_priv);
+
+ /*
+ * Steps to displaying a surface, assume surface is already
+ * bound:
+ * 1. define a screen target
+ * 2. bind a fb to the screen target
+ * 3. update that screen target (this is done later by
+ * vmw_kms_stdu_do_surface_dirty_or_present)
+ */
+ ret = vmw_stdu_define_st(dev_priv, stdu);
+ if (unlikely(ret != 0))
+ goto err_unpin_display_and_content;
+
+ ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
+ if (unlikely(ret != 0))
+ goto err_unpin_destroy_st;
+
+
+ connector->encoder = encoder;
+ encoder->crtc = crtc;
+
+ crtc->mode = *mode;
+ crtc->primary->fb = new_fb;
+ crtc->enabled = true;
+
+ return ret;
+
+err_unpin_destroy_st:
+ vmw_stdu_destroy_st(dev_priv, stdu);
+err_unpin_display_and_content:
+ vmw_stdu_unpin_display(stdu);
+err_unref_content:
+ stdu->content_fb = NULL;
+ return ret;
+}
+
+
+
+/**
+ * vmw_stdu_crtc_page_flip - Binds a buffer to a screen target
+ *
+ * @crtc: CRTC to attach FB to
+ * @fb: FB to attach
+ * @event: Event to be posted. This event should've been alloced
+ * using k[mz]alloc, and should've been completely initialized.
+ * @page_flip_flags: Input flags.
+ *
+ * If the STDU uses the same display and content buffers, i.e. a true flip,
+ * this function will replace the existing display buffer with the new content
+ * buffer.
+ *
+ * If the STDU uses different display and content buffers, i.e. a blit, then
+ * only the content buffer will be updated.
+ *
+ * RETURNS:
+ * 0 on success, error code on failure
+ */
+static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *new_fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags)
+
+{
+ struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+ struct vmw_screen_target_display_unit *stdu;
+ int ret;
+
+ if (crtc == NULL)
+ return -EINVAL;
+
+ dev_priv = vmw_priv(crtc->dev);
+ stdu = vmw_crtc_to_stdu(crtc);
+ crtc->primary->fb = new_fb;
+ stdu->content_fb = new_fb;
+
+ if (stdu->display_srf) {
+ /*
+ * If the display surface is the same as the content surface
+ * then remove the reference
+ */
+ if (stdu->content_fb_type == SAME_AS_DISPLAY) {
+ if (stdu->defined) {
+ /* Unbind the current surface */
+ ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
+ if (unlikely(ret != 0))
+ goto err_out;
+ }
+ vmw_stdu_unpin_display(stdu);
+ stdu->display_srf = NULL;
+ }
+ }
+
+
+ if (!new_fb) {
+ /* Blanks the display */
+ (void) vmw_stdu_update_st(dev_priv, stdu);
+
+ return 0;
+ }
+
+
+ if (stdu->content_fb_type == SAME_AS_DISPLAY) {
+ stdu->display_srf = vmw_framebuffer_to_vfbs(new_fb)->surface;
+ ret = vmw_stdu_pin_display(stdu);
+ if (ret) {
+ stdu->display_srf = NULL;
+ goto err_out;
+ }
+
+ /* Bind display surface */
+ ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
+ if (unlikely(ret != 0))
+ goto err_unpin_display_and_content;
+ }
+
+ /* Update display surface: after this point everything is bound */
+ ret = vmw_stdu_update_st(dev_priv, stdu);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (event) {
+ struct vmw_fence_obj *fence = NULL;
+ struct drm_file *file_priv = event->base.file_priv;
+
+ vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ if (!fence)
+ return -ENOMEM;
+
+ ret = vmw_event_fence_action_queue(file_priv, fence,
+ &event->base,
+ &event->event.tv_sec,
+ &event->event.tv_usec,
+ true);
+ vmw_fence_obj_unreference(&fence);
+ }
+
+ return ret;
+
+err_unpin_display_and_content:
+ vmw_stdu_unpin_display(stdu);
+err_out:
+ crtc->primary->fb = NULL;
+ stdu->content_fb = NULL;
+ return ret;
+}
+
+
+/**
+ * vmw_stdu_dmabuf_clip - Callback to encode a suface DMA command cliprect
+ *
+ * @dirty: The closure structure.
+ *
+ * Encodes a surface DMA command cliprect and updates the bounding box
+ * for the DMA.
+ */
+static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty)
+{
+ struct vmw_stdu_dirty *ddirty =
+ container_of(dirty, struct vmw_stdu_dirty, base);
+ struct vmw_stdu_dma *cmd = dirty->cmd;
+ struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+
+ blit += dirty->num_hits;
+ blit->srcx = dirty->fb_x;
+ blit->srcy = dirty->fb_y;
+ blit->x = dirty->unit_x1;
+ blit->y = dirty->unit_y1;
+ blit->d = 1;
+ blit->w = dirty->unit_x2 - dirty->unit_x1;
+ blit->h = dirty->unit_y2 - dirty->unit_y1;
+ dirty->num_hits++;
+
+ if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM)
+ return;
+
+ /* Destination bounding box */
+ ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
+ ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
+ ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
+ ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
+}
+
+/**
+ * vmw_stdu_dmabuf_fifo_commit - Callback to fill in and submit a DMA command.
+ *
+ * @dirty: The closure structure.
+ *
+ * Fills in the missing fields in a DMA command, and optionally encodes
+ * a screen target update command, depending on transfer direction.
+ */
+static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+ struct vmw_stdu_dirty *ddirty =
+ container_of(dirty, struct vmw_stdu_dirty, base);
+ struct vmw_screen_target_display_unit *stdu =
+ container_of(dirty->unit, typeof(*stdu), base);
+ struct vmw_stdu_dma *cmd = dirty->cmd;
+ struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+ SVGA3dCmdSurfaceDMASuffix *suffix =
+ (SVGA3dCmdSurfaceDMASuffix *) &blit[dirty->num_hits];
+ size_t blit_size = sizeof(*blit) * dirty->num_hits + sizeof(*suffix);
+
+ if (!dirty->num_hits) {
+ vmw_fifo_commit(dirty->dev_priv, 0);
+ return;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_SURFACE_DMA;
+ cmd->header.size = sizeof(cmd->body) + blit_size;
+ vmw_bo_get_guest_ptr(&ddirty->buf->base, &cmd->body.guest.ptr);
+ cmd->body.guest.pitch = ddirty->pitch;
+ cmd->body.host.sid = stdu->display_srf->res.id;
+ cmd->body.host.face = 0;
+ cmd->body.host.mipmap = 0;
+ cmd->body.transfer = ddirty->transfer;
+ suffix->suffixSize = sizeof(*suffix);
+ suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE;
+
+ if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
+ blit_size += sizeof(struct vmw_stdu_update);
+
+ vmw_stdu_populate_update(&suffix[1], stdu->base.unit,
+ ddirty->left, ddirty->right,
+ ddirty->top, ddirty->bottom);
+ }
+
+ vmw_fifo_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
+
+ ddirty->left = ddirty->top = S32_MAX;
+ ddirty->right = ddirty->bottom = S32_MIN;
+}
+
+/**
+ * vmw_kms_stdu_dma - Perform a DMA transfer between a dma-buffer backed
+ * framebuffer and the screen target system.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @file_priv: Pointer to a struct drm-file identifying the caller. May be
+ * set to NULL, but then @user_fence_rep must also be set to NULL.
+ * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
+ * @num_clips: Number of clip rects in @clips or @vclips.
+ * @increment: Increment to use when looping over @clips or @vclips.
+ * @to_surface: Whether to DMA to the screen target system as opposed to
+ * from the screen target system.
+ * @interruptible: Whether to perform waits interruptible if possible.
+ *
+ * If DMA-ing till the screen target system, the function will also notify
+ * the screen target system that a bounding box of the cliprects has been
+ * updated.
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips,
+ int increment,
+ bool to_surface,
+ bool interruptible)
+{
+ struct vmw_dma_buffer *buf =
+ container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
+ struct vmw_stdu_dirty ddirty;
+ int ret;
+
+ ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
+ false);
+ if (ret)
+ return ret;
+
+ ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM :
+ SVGA3D_READ_HOST_VRAM;
+ ddirty.left = ddirty.top = S32_MAX;
+ ddirty.right = ddirty.bottom = S32_MIN;
+ ddirty.pitch = vfb->base.pitches[0];
+ ddirty.buf = buf;
+ ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
+ ddirty.base.clip = vmw_stdu_dmabuf_clip;
+ ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) +
+ num_clips * sizeof(SVGA3dCopyBox) +
+ sizeof(SVGA3dCmdSurfaceDMASuffix);
+ if (to_surface)
+ ddirty.base.fifo_reserve_size += sizeof(struct vmw_stdu_update);
+
+ ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips,
+ 0, 0, num_clips, increment, &ddirty.base);
+ vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
+ user_fence_rep);
+
+ return ret;
+}
+
+/**
+ * vmw_stdu_surface_clip - Callback to encode a surface copy command cliprect
+ *
+ * @dirty: The closure structure.
+ *
+ * Encodes a surface copy command cliprect and updates the bounding box
+ * for the copy.
+ */
+static void vmw_kms_stdu_surface_clip(struct vmw_kms_dirty *dirty)
+{
+ struct vmw_stdu_dirty *sdirty =
+ container_of(dirty, struct vmw_stdu_dirty, base);
+ struct vmw_stdu_surface_copy *cmd = dirty->cmd;
+ struct vmw_screen_target_display_unit *stdu =
+ container_of(dirty->unit, typeof(*stdu), base);
+
+ if (sdirty->sid != stdu->display_srf->res.id) {
+ struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+
+ blit += dirty->num_hits;
+ blit->srcx = dirty->fb_x;
+ blit->srcy = dirty->fb_y;
+ blit->x = dirty->unit_x1;
+ blit->y = dirty->unit_y1;
+ blit->d = 1;
+ blit->w = dirty->unit_x2 - dirty->unit_x1;
+ blit->h = dirty->unit_y2 - dirty->unit_y1;
+ }
+
+ dirty->num_hits++;
+
+ /* Destination bounding box */
+ sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
+ sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
+ sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
+ sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
+}
+
+/**
+ * vmw_stdu_surface_fifo_commit - Callback to fill in and submit a surface
+ * copy command.
+ *
+ * @dirty: The closure structure.
+ *
+ * Fills in the missing fields in a surface copy command, and encodes a screen
+ * target update command.
+ */
+static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+ struct vmw_stdu_dirty *sdirty =
+ container_of(dirty, struct vmw_stdu_dirty, base);
+ struct vmw_screen_target_display_unit *stdu =
+ container_of(dirty->unit, typeof(*stdu), base);
+ struct vmw_stdu_surface_copy *cmd = dirty->cmd;
+ struct vmw_stdu_update *update;
+ size_t blit_size = sizeof(SVGA3dCopyBox) * dirty->num_hits;
+ size_t commit_size;
+
+ if (!dirty->num_hits) {
+ vmw_fifo_commit(dirty->dev_priv, 0);
+ return;
+ }
+
+ if (sdirty->sid != stdu->display_srf->res.id) {
+ struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+
+ cmd->header.id = SVGA_3D_CMD_SURFACE_COPY;
+ cmd->header.size = sizeof(cmd->body) + blit_size;
+ cmd->body.src.sid = sdirty->sid;
+ cmd->body.dest.sid = stdu->display_srf->res.id;
+ update = (struct vmw_stdu_update *) &blit[dirty->num_hits];
+ commit_size = sizeof(*cmd) + blit_size + sizeof(*update);
+ } else {
+ update = dirty->cmd;
+ commit_size = sizeof(*update);
+ }
+
+ vmw_stdu_populate_update(update, stdu->base.unit, sdirty->left,
+ sdirty->right, sdirty->top, sdirty->bottom);
+
+ vmw_fifo_commit(dirty->dev_priv, commit_size);
+
+ sdirty->left = sdirty->top = S32_MAX;
+ sdirty->right = sdirty->bottom = S32_MIN;
+}
+
+/**
+ * vmw_kms_stdu_surface_dirty - Dirty part of a surface backed framebuffer
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @framebuffer: Pointer to the surface-buffer backed framebuffer.
+ * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
+ * @srf: Pointer to surface to blit from. If NULL, the surface attached
+ * to @framebuffer will be used.
+ * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
+ * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
+ * @num_clips: Number of clip rects in @clips.
+ * @inc: Increment to use when looping over @clips.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to a
+ * struct vmw_fence_obj. The returned fence pointer may be NULL in which
+ * case the device has already synchronized.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ struct vmw_resource *srf,
+ s32 dest_x,
+ s32 dest_y,
+ unsigned num_clips, int inc,
+ struct vmw_fence_obj **out_fence)
+{
+ struct vmw_framebuffer_surface *vfbs =
+ container_of(framebuffer, typeof(*vfbs), base);
+ struct vmw_stdu_dirty sdirty;
+ int ret;
+
+ if (!srf)
+ srf = &vfbs->surface->res;
+
+ ret = vmw_kms_helper_resource_prepare(srf, true);
+ if (ret)
+ return ret;
+
+ if (vfbs->is_dmabuf_proxy) {
+ ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
+ if (ret)
+ goto out_finish;
+ }
+
+ sdirty.base.fifo_commit = vmw_kms_stdu_surface_fifo_commit;
+ sdirty.base.clip = vmw_kms_stdu_surface_clip;
+ sdirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_surface_copy) +
+ sizeof(SVGA3dCopyBox) * num_clips +
+ sizeof(struct vmw_stdu_update);
+ sdirty.sid = srf->id;
+ sdirty.left = sdirty.top = S32_MAX;
+ sdirty.right = sdirty.bottom = S32_MIN;
+
+ ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
+ dest_x, dest_y, num_clips, inc,
+ &sdirty.base);
+out_finish:
+ vmw_kms_helper_resource_finish(srf, out_fence);
+
+ return ret;
+}
+
+
+/*
+ * Screen Target CRTC dispatch table
+ */
+static struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
+ .save = vmw_du_crtc_save,
+ .restore = vmw_du_crtc_restore,
+ .cursor_set = vmw_du_crtc_cursor_set,
+ .cursor_move = vmw_du_crtc_cursor_move,
+ .gamma_set = vmw_du_crtc_gamma_set,
+ .destroy = vmw_stdu_crtc_destroy,
+ .set_config = vmw_stdu_crtc_set_config,
+ .page_flip = vmw_stdu_crtc_page_flip,
+};
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit Encoder Functions
+ *****************************************************************************/
+
+/**
+ * vmw_stdu_encoder_destroy - cleans up the STDU
+ *
+ * @encoder: used the get the containing STDU
+ *
+ * vmwgfx cleans up crtc/encoder/connector all at the same time so technically
+ * this can be a no-op. Nevertheless, it doesn't hurt of have this in case
+ * the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't
+ * get called.
+ */
+static void vmw_stdu_encoder_destroy(struct drm_encoder *encoder)
+{
+ vmw_stdu_destroy(vmw_encoder_to_stdu(encoder));
+}
+
+static struct drm_encoder_funcs vmw_stdu_encoder_funcs = {
+ .destroy = vmw_stdu_encoder_destroy,
+};
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit Connector Functions
+ *****************************************************************************/
+
+/**
+ * vmw_stdu_connector_destroy - cleans up the STDU
+ *
+ * @connector: used to get the containing STDU
+ *
+ * vmwgfx cleans up crtc/encoder/connector all at the same time so technically
+ * this can be a no-op. Nevertheless, it doesn't hurt of have this in case
+ * the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't
+ * get called.
+ */
+static void vmw_stdu_connector_destroy(struct drm_connector *connector)
+{
+ vmw_stdu_destroy(vmw_connector_to_stdu(connector));
+}
+
+
+
+static struct drm_connector_funcs vmw_stdu_connector_funcs = {
+ .dpms = vmw_du_connector_dpms,
+ .save = vmw_du_connector_save,
+ .restore = vmw_du_connector_restore,
+ .detect = vmw_du_connector_detect,
+ .fill_modes = vmw_du_connector_fill_modes,
+ .set_property = vmw_du_connector_set_property,
+ .destroy = vmw_stdu_connector_destroy,
+};
+
+
+
+/**
+ * vmw_stdu_init - Sets up a Screen Target Display Unit
+ *
+ * @dev_priv: VMW DRM device
+ * @unit: unit number range from 0 to VMWGFX_NUM_DISPLAY_UNITS
+ *
+ * This function is called once per CRTC, and allocates one Screen Target
+ * display unit to represent that CRTC. Since the SVGA device does not separate
+ * out encoder and connector, they are represented as part of the STDU as well.
+ */
+static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
+{
+ struct vmw_screen_target_display_unit *stdu;
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+
+
+ stdu = kzalloc(sizeof(*stdu), GFP_KERNEL);
+ if (!stdu)
+ return -ENOMEM;
+
+ stdu->base.unit = unit;
+ crtc = &stdu->base.crtc;
+ encoder = &stdu->base.encoder;
+ connector = &stdu->base.connector;
+
+ stdu->base.pref_active = (unit == 0);
+ stdu->base.pref_width = dev_priv->initial_width;
+ stdu->base.pref_height = dev_priv->initial_height;
+ stdu->base.is_implicit = true;
+
+ drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ connector->status = vmw_du_connector_detect(connector, false);
+
+ drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL);
+ drm_mode_connector_attach_encoder(connector, encoder);
+ encoder->possible_crtcs = (1 << unit);
+ encoder->possible_clones = 0;
+
+ (void) drm_connector_register(connector);
+
+ drm_crtc_init(dev, crtc, &vmw_stdu_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(crtc, 256);
+
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.dirty_info_property,
+ 1);
+
+ return 0;
+}
+
+
+
+/**
+ * vmw_stdu_destroy - Cleans up a vmw_screen_target_display_unit
+ *
+ * @stdu: Screen Target Display Unit to be destroyed
+ *
+ * Clean up after vmw_stdu_init
+ */
+static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu)
+{
+ vmw_stdu_unpin_display(stdu);
+
+ vmw_du_cleanup(&stdu->base);
+ kfree(stdu);
+}
+
+
+
+/******************************************************************************
+ * Screen Target Display KMS Functions
+ *
+ * These functions are called by the common KMS code in vmwgfx_kms.c
+ *****************************************************************************/
+
+/**
+ * vmw_kms_stdu_init_display - Initializes a Screen Target based display
+ *
+ * @dev_priv: VMW DRM device
+ *
+ * This function initialize a Screen Target based display device. It checks
+ * the capability bits to make sure the underlying hardware can support
+ * screen targets, and then creates the maximum number of CRTCs, a.k.a Display
+ * Units, as supported by the display hardware.
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ int i, ret;
+
+
+ /* Do nothing if Screen Target support is turned off */
+ if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE)
+ return -ENOSYS;
+
+ if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
+ return -ENOSYS;
+
+ ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = drm_mode_create_dirty_info_property(dev);
+ if (unlikely(ret != 0))
+ goto err_vblank_cleanup;
+
+ dev_priv->active_display_unit = vmw_du_screen_target;
+
+ for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
+ ret = vmw_stdu_init(dev_priv, i);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to initialize STDU %d", i);
+ goto err_vblank_cleanup;
+ }
+ }
+
+ DRM_INFO("Screen Target Display device initialized\n");
+
+ return 0;
+
+err_vblank_cleanup:
+ drm_vblank_cleanup(dev);
+ return ret;
+}
+
+
+
+/**
+ * vmw_kms_stdu_close_display - Cleans up after vmw_kms_stdu_init_display
+ *
+ * @dev_priv: VMW DRM device
+ *
+ * Frees up any resources allocated by vmw_kms_stdu_init_display
+ *
+ * RETURNS:
+ * 0 on success
+ */
+int vmw_kms_stdu_close_display(struct vmw_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ drm_vblank_cleanup(dev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 4ecdbf3e59da..3361769842f4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,8 +27,11 @@
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
#include <ttm/ttm_placement.h>
-#include "svga3d_surfacedefs.h"
+#include "device_include/svga3d_surfacedefs.h"
+
/**
* struct vmw_user_surface - User-space visible surface resource
@@ -36,7 +39,7 @@
* @base: The TTM base object handling user-space visibility.
* @srf: The surface metadata.
* @size: TTM accounting size for the surface.
- * @master: master of the creating client. Used for security check.
+ * @master: master of the creating client. Used for security check.
*/
struct vmw_user_surface {
struct ttm_prime_object prime;
@@ -220,7 +223,7 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf,
cmd->header.size = cmd_len;
cmd->body.sid = srf->res.id;
cmd->body.surfaceFlags = srf->flags;
- cmd->body.format = cpu_to_le32(srf->format);
+ cmd->body.format = srf->format;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
cmd->body.face[i].numMipLevels = srf->mip_levels[i];
@@ -340,7 +343,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
dev_priv->used_memory_size -= res->backup_size;
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
}
/**
@@ -576,14 +579,14 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
BUG_ON(res_free == NULL);
if (!dev_priv->has_mob)
- (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_fifo_resource_inc(dev_priv);
ret = vmw_resource_init(dev_priv, res, true, res_free,
(dev_priv->has_mob) ? &vmw_gb_surface_func :
&vmw_legacy_surface_func);
if (unlikely(ret != 0)) {
if (!dev_priv->has_mob)
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
res_free(res);
return ret;
}
@@ -593,6 +596,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
* surface validate.
*/
+ INIT_LIST_HEAD(&srf->view_list);
vmw_resource_activate(res, vmw_hw_surface_destroy);
return ret;
}
@@ -723,6 +727,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
desc = svga3dsurface_get_desc(req->format);
if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
DRM_ERROR("Invalid surface format for surface creation.\n");
+ DRM_ERROR("Format requested is: %d\n", req->format);
return -EINVAL;
}
@@ -906,6 +911,12 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
"surface reference.\n");
return -EACCES;
}
+ if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
+ DRM_ERROR("Locked master refused legacy "
+ "surface reference.\n");
+ return -EACCES;
+ }
+
handle = u_handle;
}
@@ -1018,17 +1029,21 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_surface *srf = vmw_res_to_srf(res);
- uint32_t cmd_len, submit_len;
+ uint32_t cmd_len, cmd_id, submit_len;
int ret;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDefineGBSurface body;
} *cmd;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBSurface_v2 body;
+ } *cmd2;
if (likely(res->id != -1))
return 0;
- (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_fifo_resource_inc(dev_priv);
ret = vmw_resource_alloc_id(res);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to allocate a surface id.\n");
@@ -1040,9 +1055,19 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd_len = sizeof(cmd->body);
- submit_len = sizeof(*cmd);
+ if (srf->array_size > 0) {
+ /* has_dx checked on creation time. */
+ cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
+ cmd_len = sizeof(cmd2->body);
+ submit_len = sizeof(*cmd2);
+ } else {
+ cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
+ cmd_len = sizeof(cmd->body);
+ submit_len = sizeof(*cmd);
+ }
+
cmd = vmw_fifo_reserve(dev_priv, submit_len);
+ cmd2 = (typeof(cmd2))cmd;
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"creation.\n");
@@ -1050,17 +1075,33 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
- cmd->header.size = cmd_len;
- cmd->body.sid = srf->res.id;
- cmd->body.surfaceFlags = srf->flags;
- cmd->body.format = cpu_to_le32(srf->format);
- cmd->body.numMipLevels = srf->mip_levels[0];
- cmd->body.multisampleCount = srf->multisample_count;
- cmd->body.autogenFilter = srf->autogen_filter;
- cmd->body.size.width = srf->base_size.width;
- cmd->body.size.height = srf->base_size.height;
- cmd->body.size.depth = srf->base_size.depth;
+ if (srf->array_size > 0) {
+ cmd2->header.id = cmd_id;
+ cmd2->header.size = cmd_len;
+ cmd2->body.sid = srf->res.id;
+ cmd2->body.surfaceFlags = srf->flags;
+ cmd2->body.format = cpu_to_le32(srf->format);
+ cmd2->body.numMipLevels = srf->mip_levels[0];
+ cmd2->body.multisampleCount = srf->multisample_count;
+ cmd2->body.autogenFilter = srf->autogen_filter;
+ cmd2->body.size.width = srf->base_size.width;
+ cmd2->body.size.height = srf->base_size.height;
+ cmd2->body.size.depth = srf->base_size.depth;
+ cmd2->body.arraySize = srf->array_size;
+ } else {
+ cmd->header.id = cmd_id;
+ cmd->header.size = cmd_len;
+ cmd->body.sid = srf->res.id;
+ cmd->body.surfaceFlags = srf->flags;
+ cmd->body.format = cpu_to_le32(srf->format);
+ cmd->body.numMipLevels = srf->mip_levels[0];
+ cmd->body.multisampleCount = srf->multisample_count;
+ cmd->body.autogenFilter = srf->autogen_filter;
+ cmd->body.size.width = srf->base_size.width;
+ cmd->body.size.height = srf->base_size.height;
+ cmd->body.size.depth = srf->base_size.depth;
+ }
+
vmw_fifo_commit(dev_priv, submit_len);
return 0;
@@ -1068,7 +1109,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
out_no_fifo:
vmw_resource_release_id(res);
out_no_id:
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
return ret;
}
@@ -1188,6 +1229,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
static int vmw_gb_surface_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_surface *srf = vmw_res_to_srf(res);
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDestroyGBSurface body;
@@ -1197,7 +1239,8 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
return 0;
mutex_lock(&dev_priv->binding_mutex);
- vmw_context_binding_res_list_scrub(&res->binding_head);
+ vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
+ vmw_binding_res_list_scrub(&res->binding_head);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
@@ -1213,11 +1256,12 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
vmw_fifo_commit(dev_priv, sizeof(*cmd));
mutex_unlock(&dev_priv->binding_mutex);
vmw_resource_release_id(res);
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
return 0;
}
+
/**
* vmw_gb_surface_define_ioctl - Ioctl function implementing
* the user surface define functionality.
@@ -1241,77 +1285,51 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
uint32_t size;
- const struct svga3d_surface_desc *desc;
uint32_t backup_handle;
+
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
128;
size = vmw_user_surface_size + 128;
- desc = svga3dsurface_get_desc(req->format);
- if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
- DRM_ERROR("Invalid surface format for surface creation.\n");
- return -EINVAL;
- }
-
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+ /* Define a surface based on the parameters. */
+ ret = vmw_surface_gb_priv_define(dev,
+ size,
+ req->svga3d_flags,
+ req->format,
+ req->drm_surface_flags & drm_vmw_surface_flag_scanout,
+ req->mip_levels,
+ req->multisample_count,
+ req->array_size,
+ req->base_size,
+ &srf);
if (unlikely(ret != 0))
return ret;
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- size, false, true);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for surface"
- " creation.\n");
- goto out_unlock;
- }
-
- user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
- if (unlikely(user_srf == NULL)) {
- ret = -ENOMEM;
- goto out_no_user_srf;
- }
-
- srf = &user_srf->srf;
- res = &srf->res;
-
- srf->flags = req->svga3d_flags;
- srf->format = req->format;
- srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout;
- srf->mip_levels[0] = req->mip_levels;
- srf->num_sizes = 1;
- srf->sizes = NULL;
- srf->offsets = NULL;
- user_srf->size = size;
- srf->base_size = req->base_size;
- srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
- srf->multisample_count = req->multisample_count;
- res->backup_size = svga3dsurface_get_serialized_size
- (srf->format, srf->base_size, srf->mip_levels[0],
- srf->flags & SVGA3D_SURFACE_CUBEMAP);
-
- user_srf->prime.base.shareable = false;
- user_srf->prime.base.tfile = NULL;
+ user_srf = container_of(srf, struct vmw_user_surface, srf);
if (drm_is_primary_client(file_priv))
user_srf->master = drm_master_get(file_priv->master);
- /**
- * From this point, the generic resource management functions
- * destroy the object on failure.
- */
-
- ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
- goto out_unlock;
+ return ret;
+
+ res = &user_srf->srf.res;
+
if (req->buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
&res->backup);
- } else if (req->drm_surface_flags &
- drm_vmw_surface_flag_create_buffer)
+ if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
+ res->backup_size) {
+ DRM_ERROR("Surface backup buffer is too small.\n");
+ vmw_dmabuf_unreference(&res->backup);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
res->backup_size,
req->drm_surface_flags &
@@ -1324,7 +1342,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- tmp = vmw_resource_reference(&srf->res);
+ tmp = vmw_resource_reference(res);
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
req->drm_surface_flags &
drm_vmw_surface_flag_shareable,
@@ -1337,7 +1355,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- rep->handle = user_srf->prime.base.hash.key;
+ rep->handle = user_srf->prime.base.hash.key;
rep->backup_size = res->backup_size;
if (res->backup) {
rep->buffer_map_handle =
@@ -1352,10 +1370,6 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
vmw_resource_unreference(&res);
- ttm_read_unlock(&dev_priv->reservation_sem);
- return 0;
-out_no_user_srf:
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
@@ -1415,6 +1429,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
rep->creq.drm_surface_flags = 0;
rep->creq.multisample_count = srf->multisample_count;
rep->creq.autogen_filter = srf->autogen_filter;
+ rep->creq.array_size = srf->array_size;
rep->creq.buffer_handle = backup_handle;
rep->creq.base_size = srf->base_size;
rep->crep.handle = user_srf->prime.base.hash.key;
@@ -1429,3 +1444,137 @@ out_bad_resource:
return ret;
}
+
+/**
+ * vmw_surface_gb_priv_define - Define a private GB surface
+ *
+ * @dev: Pointer to a struct drm_device
+ * @user_accounting_size: Used to track user-space memory usage, set
+ * to 0 for kernel mode only memory
+ * @svga3d_flags: SVGA3d surface flags for the device
+ * @format: requested surface format
+ * @for_scanout: true if inteded to be used for scanout buffer
+ * @num_mip_levels: number of MIP levels
+ * @multisample_count:
+ * @array_size: Surface array size.
+ * @size: width, heigh, depth of the surface requested
+ * @user_srf_out: allocated user_srf. Set to NULL on failure.
+ *
+ * GB surfaces allocated by this function will not have a user mode handle, and
+ * thus will only be visible to vmwgfx. For optimization reasons the
+ * surface may later be given a user mode handle by another function to make
+ * it available to user mode drivers.
+ */
+int vmw_surface_gb_priv_define(struct drm_device *dev,
+ uint32_t user_accounting_size,
+ uint32_t svga3d_flags,
+ SVGA3dSurfaceFormat format,
+ bool for_scanout,
+ uint32_t num_mip_levels,
+ uint32_t multisample_count,
+ uint32_t array_size,
+ struct drm_vmw_size size,
+ struct vmw_surface **srf_out)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_surface *user_srf;
+ struct vmw_surface *srf;
+ int ret;
+ u32 num_layers;
+
+ *srf_out = NULL;
+
+ if (for_scanout) {
+ if (!svga3dsurface_is_screen_target_format(format)) {
+ DRM_ERROR("Invalid Screen Target surface format.");
+ return -EINVAL;
+ }
+ } else {
+ const struct svga3d_surface_desc *desc;
+
+ desc = svga3dsurface_get_desc(format);
+ if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+ DRM_ERROR("Invalid surface format.\n");
+ return -EINVAL;
+ }
+ }
+
+ /* array_size must be null for non-GL3 host. */
+ if (array_size > 0 && !dev_priv->has_dx) {
+ DRM_ERROR("Tried to create DX surface on non-DX host.\n");
+ return -EINVAL;
+ }
+
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ user_accounting_size, false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for surface"
+ " creation.\n");
+ goto out_unlock;
+ }
+
+ user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+ if (unlikely(user_srf == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_user_srf;
+ }
+
+ *srf_out = &user_srf->srf;
+ user_srf->size = user_accounting_size;
+ user_srf->prime.base.shareable = false;
+ user_srf->prime.base.tfile = NULL;
+
+ srf = &user_srf->srf;
+ srf->flags = svga3d_flags;
+ srf->format = format;
+ srf->scanout = for_scanout;
+ srf->mip_levels[0] = num_mip_levels;
+ srf->num_sizes = 1;
+ srf->sizes = NULL;
+ srf->offsets = NULL;
+ srf->base_size = size;
+ srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+ srf->array_size = array_size;
+ srf->multisample_count = multisample_count;
+
+ if (array_size)
+ num_layers = array_size;
+ else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
+ num_layers = SVGA3D_MAX_SURFACE_FACES;
+ else
+ num_layers = 1;
+
+ srf->res.backup_size =
+ svga3dsurface_get_serialized_size(srf->format,
+ srf->base_size,
+ srf->mip_levels[0],
+ num_layers);
+
+ if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
+ srf->res.backup_size += sizeof(SVGA3dDXSOState);
+
+ if (dev_priv->active_display_unit == vmw_du_screen_target &&
+ for_scanout)
+ srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
+
+ /*
+ * From this point, the generic resource management functions
+ * destroy the object on failure.
+ */
+ ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+
+out_no_user_srf:
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
+
+out_unlock:
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 98d6bfb3a997..e771091d2cd3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
index fbc6ee6ca337..52a6fd224127 100644
--- a/drivers/gpu/host1x/mipi.c
+++ b/drivers/gpu/host1x/mipi.c
@@ -31,6 +31,9 @@
#include "dev.h"
#define MIPI_CAL_CTRL 0x00
+#define MIPI_CAL_CTRL_NOISE_FILTER(x) (((x) & 0xf) << 26)
+#define MIPI_CAL_CTRL_PRESCALE(x) (((x) & 0x3) << 24)
+#define MIPI_CAL_CTRL_CLKEN_OVR (1 << 4)
#define MIPI_CAL_CTRL_START (1 << 0)
#define MIPI_CAL_AUTOCAL_CTRL 0x01
@@ -44,15 +47,18 @@
#define MIPI_CAL_CONFIG_CSIC 0x07
#define MIPI_CAL_CONFIG_CSID 0x08
#define MIPI_CAL_CONFIG_CSIE 0x09
+#define MIPI_CAL_CONFIG_CSIF 0x0a
#define MIPI_CAL_CONFIG_DSIA 0x0e
#define MIPI_CAL_CONFIG_DSIB 0x0f
#define MIPI_CAL_CONFIG_DSIC 0x10
#define MIPI_CAL_CONFIG_DSID 0x11
-#define MIPI_CAL_CONFIG_DSIAB_CLK 0x19
-#define MIPI_CAL_CONFIG_DSICD_CLK 0x1a
+#define MIPI_CAL_CONFIG_DSIA_CLK 0x19
+#define MIPI_CAL_CONFIG_DSIB_CLK 0x1a
#define MIPI_CAL_CONFIG_CSIAB_CLK 0x1b
+#define MIPI_CAL_CONFIG_DSIC_CLK 0x1c
#define MIPI_CAL_CONFIG_CSICD_CLK 0x1c
+#define MIPI_CAL_CONFIG_DSID_CLK 0x1d
#define MIPI_CAL_CONFIG_CSIE_CLK 0x1d
/* for data and clock lanes */
@@ -73,8 +79,11 @@
#define MIPI_CAL_BIAS_PAD_CFG1 0x17
#define MIPI_CAL_BIAS_PAD_DRV_DN_REF(x) (((x) & 0x7) << 16)
+#define MIPI_CAL_BIAS_PAD_DRV_UP_REF(x) (((x) & 0x7) << 8)
#define MIPI_CAL_BIAS_PAD_CFG2 0x18
+#define MIPI_CAL_BIAS_PAD_VCLAMP(x) (((x) & 0x7) << 16)
+#define MIPI_CAL_BIAS_PAD_VAUXP(x) (((x) & 0x7) << 4)
#define MIPI_CAL_BIAS_PAD_PDVREG (1 << 1)
struct tegra_mipi_pad {
@@ -86,13 +95,35 @@ struct tegra_mipi_soc {
bool has_clk_lane;
const struct tegra_mipi_pad *pads;
unsigned int num_pads;
+
+ bool clock_enable_override;
+ bool needs_vclamp_ref;
+
+ /* bias pad configuration settings */
+ u8 pad_drive_down_ref;
+ u8 pad_drive_up_ref;
+
+ u8 pad_vclamp_level;
+ u8 pad_vauxp_level;
+
+ /* calibration settings for data lanes */
+ u8 hspdos;
+ u8 hspuos;
+ u8 termos;
+
+ /* calibration settings for clock lanes */
+ u8 hsclkpdos;
+ u8 hsclkpuos;
};
struct tegra_mipi {
const struct tegra_mipi_soc *soc;
+ struct device *dev;
void __iomem *regs;
struct mutex lock;
struct clk *clk;
+
+ unsigned long usage_count;
};
struct tegra_mipi_device {
@@ -114,6 +145,67 @@ static inline void tegra_mipi_writel(struct tegra_mipi *mipi, u32 value,
writel(value, mipi->regs + (offset << 2));
}
+static int tegra_mipi_power_up(struct tegra_mipi *mipi)
+{
+ u32 value;
+ int err;
+
+ err = clk_enable(mipi->clk);
+ if (err < 0)
+ return err;
+
+ value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG0);
+ value &= ~MIPI_CAL_BIAS_PAD_PDVCLAMP;
+
+ if (mipi->soc->needs_vclamp_ref)
+ value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
+
+ tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
+
+ value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG2);
+ value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
+ tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
+
+ clk_disable(mipi->clk);
+
+ return 0;
+}
+
+static int tegra_mipi_power_down(struct tegra_mipi *mipi)
+{
+ u32 value;
+ int err;
+
+ err = clk_enable(mipi->clk);
+ if (err < 0)
+ return err;
+
+ /*
+ * The MIPI_CAL_BIAS_PAD_PDVREG controls a voltage regulator that
+ * supplies the DSI pads. This must be kept enabled until none of the
+ * DSI lanes are used anymore.
+ */
+ value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG2);
+ value |= MIPI_CAL_BIAS_PAD_PDVREG;
+ tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
+
+ /*
+ * MIPI_CAL_BIAS_PAD_PDVCLAMP and MIPI_CAL_BIAS_PAD_E_VCLAMP_REF
+ * control a regulator that supplies current to the pre-driver logic.
+ * Powering down this regulator causes DSI to fail, so it must remain
+ * powered on until none of the DSI lanes are used anymore.
+ */
+ value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG0);
+
+ if (mipi->soc->needs_vclamp_ref)
+ value &= ~MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
+
+ value |= MIPI_CAL_BIAS_PAD_PDVCLAMP;
+ tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
+
+ return 0;
+}
+
struct tegra_mipi_device *tegra_mipi_request(struct device *device)
{
struct device_node *np = device->of_node;
@@ -150,6 +242,20 @@ struct tegra_mipi_device *tegra_mipi_request(struct device *device)
dev->pads = args.args[0];
dev->device = device;
+ mutex_lock(&dev->mipi->lock);
+
+ if (dev->mipi->usage_count++ == 0) {
+ err = tegra_mipi_power_up(dev->mipi);
+ if (err < 0) {
+ dev_err(dev->mipi->dev,
+ "failed to power up MIPI bricks: %d\n",
+ err);
+ return ERR_PTR(err);
+ }
+ }
+
+ mutex_unlock(&dev->mipi->lock);
+
return dev;
put:
@@ -164,6 +270,25 @@ EXPORT_SYMBOL(tegra_mipi_request);
void tegra_mipi_free(struct tegra_mipi_device *device)
{
+ int err;
+
+ mutex_lock(&device->mipi->lock);
+
+ if (--device->mipi->usage_count == 0) {
+ err = tegra_mipi_power_down(device->mipi);
+ if (err < 0) {
+ /*
+ * Not much that can be done here, so an error message
+ * will have to do.
+ */
+ dev_err(device->mipi->dev,
+ "failed to power down MIPI bricks: %d\n",
+ err);
+ }
+ }
+
+ mutex_unlock(&device->mipi->lock);
+
platform_device_put(device->pdev);
kfree(device);
}
@@ -199,16 +324,15 @@ int tegra_mipi_calibrate(struct tegra_mipi_device *device)
mutex_lock(&device->mipi->lock);
- value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG0);
- value &= ~MIPI_CAL_BIAS_PAD_PDVCLAMP;
- value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
- tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
-
- tegra_mipi_writel(device->mipi, MIPI_CAL_BIAS_PAD_DRV_DN_REF(2),
- MIPI_CAL_BIAS_PAD_CFG1);
+ value = MIPI_CAL_BIAS_PAD_DRV_DN_REF(soc->pad_drive_down_ref) |
+ MIPI_CAL_BIAS_PAD_DRV_UP_REF(soc->pad_drive_up_ref);
+ tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG1);
value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG2);
- value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
+ value &= ~MIPI_CAL_BIAS_PAD_VCLAMP(0x7);
+ value &= ~MIPI_CAL_BIAS_PAD_VAUXP(0x7);
+ value |= MIPI_CAL_BIAS_PAD_VCLAMP(soc->pad_vclamp_level);
+ value |= MIPI_CAL_BIAS_PAD_VAUXP(soc->pad_vauxp_level);
tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
for (i = 0; i < soc->num_pads; i++) {
@@ -216,21 +340,38 @@ int tegra_mipi_calibrate(struct tegra_mipi_device *device)
if (device->pads & BIT(i)) {
data = MIPI_CAL_CONFIG_SELECT |
- MIPI_CAL_CONFIG_HSPDOS(0) |
- MIPI_CAL_CONFIG_HSPUOS(4) |
- MIPI_CAL_CONFIG_TERMOS(5);
+ MIPI_CAL_CONFIG_HSPDOS(soc->hspdos) |
+ MIPI_CAL_CONFIG_HSPUOS(soc->hspuos) |
+ MIPI_CAL_CONFIG_TERMOS(soc->termos);
clk = MIPI_CAL_CONFIG_SELECT |
- MIPI_CAL_CONFIG_HSCLKPDOSD(0) |
- MIPI_CAL_CONFIG_HSCLKPUOSD(4);
+ MIPI_CAL_CONFIG_HSCLKPDOSD(soc->hsclkpdos) |
+ MIPI_CAL_CONFIG_HSCLKPUOSD(soc->hsclkpuos);
}
tegra_mipi_writel(device->mipi, data, soc->pads[i].data);
- if (soc->has_clk_lane)
+ if (soc->has_clk_lane && soc->pads[i].clk != 0)
tegra_mipi_writel(device->mipi, clk, soc->pads[i].clk);
}
value = tegra_mipi_readl(device->mipi, MIPI_CAL_CTRL);
+ value &= ~MIPI_CAL_CTRL_NOISE_FILTER(0xf);
+ value &= ~MIPI_CAL_CTRL_PRESCALE(0x3);
+ value |= MIPI_CAL_CTRL_NOISE_FILTER(0xa);
+ value |= MIPI_CAL_CTRL_PRESCALE(0x2);
+
+ if (!soc->clock_enable_override)
+ value &= ~MIPI_CAL_CTRL_CLKEN_OVR;
+ else
+ value |= MIPI_CAL_CTRL_CLKEN_OVR;
+
+ tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
+
+ /* clear any pending status bits */
+ value = tegra_mipi_readl(device->mipi, MIPI_CAL_STATUS);
+ tegra_mipi_writel(device->mipi, value, MIPI_CAL_STATUS);
+
+ value = tegra_mipi_readl(device->mipi, MIPI_CAL_CTRL);
value |= MIPI_CAL_CTRL_START;
tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
@@ -259,6 +400,17 @@ static const struct tegra_mipi_soc tegra114_mipi_soc = {
.has_clk_lane = false,
.pads = tegra114_mipi_pads,
.num_pads = ARRAY_SIZE(tegra114_mipi_pads),
+ .clock_enable_override = true,
+ .needs_vclamp_ref = true,
+ .pad_drive_down_ref = 0x2,
+ .pad_drive_up_ref = 0x0,
+ .pad_vclamp_level = 0x0,
+ .pad_vauxp_level = 0x0,
+ .hspdos = 0x0,
+ .hspuos = 0x4,
+ .termos = 0x5,
+ .hsclkpdos = 0x0,
+ .hsclkpuos = 0x4,
};
static const struct tegra_mipi_pad tegra124_mipi_pads[] = {
@@ -266,20 +418,80 @@ static const struct tegra_mipi_pad tegra124_mipi_pads[] = {
{ .data = MIPI_CAL_CONFIG_CSIB, .clk = MIPI_CAL_CONFIG_CSIAB_CLK },
{ .data = MIPI_CAL_CONFIG_CSIC, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
{ .data = MIPI_CAL_CONFIG_CSID, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
- { .data = MIPI_CAL_CONFIG_CSIE, .clk = MIPI_CAL_CONFIG_CSIE_CLK },
- { .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIAB_CLK },
- { .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIAB_CLK },
+ { .data = MIPI_CAL_CONFIG_CSIE, .clk = MIPI_CAL_CONFIG_CSIE_CLK },
+ { .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIA_CLK },
+ { .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIB_CLK },
};
static const struct tegra_mipi_soc tegra124_mipi_soc = {
.has_clk_lane = true,
.pads = tegra124_mipi_pads,
.num_pads = ARRAY_SIZE(tegra124_mipi_pads),
+ .clock_enable_override = true,
+ .needs_vclamp_ref = true,
+ .pad_drive_down_ref = 0x2,
+ .pad_drive_up_ref = 0x0,
+ .pad_vclamp_level = 0x0,
+ .pad_vauxp_level = 0x0,
+ .hspdos = 0x0,
+ .hspuos = 0x0,
+ .termos = 0x0,
+ .hsclkpdos = 0x1,
+ .hsclkpuos = 0x2,
+};
+
+static const struct tegra_mipi_soc tegra132_mipi_soc = {
+ .has_clk_lane = true,
+ .pads = tegra124_mipi_pads,
+ .num_pads = ARRAY_SIZE(tegra124_mipi_pads),
+ .clock_enable_override = false,
+ .needs_vclamp_ref = false,
+ .pad_drive_down_ref = 0x0,
+ .pad_drive_up_ref = 0x3,
+ .pad_vclamp_level = 0x0,
+ .pad_vauxp_level = 0x0,
+ .hspdos = 0x0,
+ .hspuos = 0x0,
+ .termos = 0x0,
+ .hsclkpdos = 0x3,
+ .hsclkpuos = 0x2,
+};
+
+static const struct tegra_mipi_pad tegra210_mipi_pads[] = {
+ { .data = MIPI_CAL_CONFIG_CSIA, .clk = 0 },
+ { .data = MIPI_CAL_CONFIG_CSIB, .clk = 0 },
+ { .data = MIPI_CAL_CONFIG_CSIC, .clk = 0 },
+ { .data = MIPI_CAL_CONFIG_CSID, .clk = 0 },
+ { .data = MIPI_CAL_CONFIG_CSIE, .clk = 0 },
+ { .data = MIPI_CAL_CONFIG_CSIF, .clk = 0 },
+ { .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIA_CLK },
+ { .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIB_CLK },
+ { .data = MIPI_CAL_CONFIG_DSIC, .clk = MIPI_CAL_CONFIG_DSIC_CLK },
+ { .data = MIPI_CAL_CONFIG_DSID, .clk = MIPI_CAL_CONFIG_DSID_CLK },
+};
+
+static const struct tegra_mipi_soc tegra210_mipi_soc = {
+ .has_clk_lane = true,
+ .pads = tegra210_mipi_pads,
+ .num_pads = ARRAY_SIZE(tegra210_mipi_pads),
+ .clock_enable_override = true,
+ .needs_vclamp_ref = false,
+ .pad_drive_down_ref = 0x0,
+ .pad_drive_up_ref = 0x3,
+ .pad_vclamp_level = 0x1,
+ .pad_vauxp_level = 0x1,
+ .hspdos = 0x0,
+ .hspuos = 0x2,
+ .termos = 0x0,
+ .hsclkpdos = 0x0,
+ .hsclkpuos = 0x2,
};
-static struct of_device_id tegra_mipi_of_match[] = {
+static const struct of_device_id tegra_mipi_of_match[] = {
{ .compatible = "nvidia,tegra114-mipi", .data = &tegra114_mipi_soc },
{ .compatible = "nvidia,tegra124-mipi", .data = &tegra124_mipi_soc },
+ { .compatible = "nvidia,tegra132-mipi", .data = &tegra132_mipi_soc },
+ { .compatible = "nvidia,tegra210-mipi", .data = &tegra210_mipi_soc },
{ },
};
@@ -299,6 +511,7 @@ static int tegra_mipi_probe(struct platform_device *pdev)
return -ENOMEM;
mipi->soc = match->data;
+ mipi->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mipi->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 6d2f39d36e44..243f99a80253 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -915,8 +915,8 @@ static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc)
{
struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
- struct irq_chip *chip = irq_get_chip(irq);
chained_irq_enter(chip, desc);
@@ -928,8 +928,8 @@ static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc)
static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc)
{
struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
const int int_reg[] = { 4, 5, 8, 9};
- struct irq_chip *chip = irq_get_chip(irq);
chained_irq_enter(chip, desc);
@@ -1107,6 +1107,9 @@ static int ipu_irq_init(struct ipu_soc *ipu)
return ret;
}
+ for (i = 0; i < IPU_NUM_IRQS; i += 32)
+ ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32));
+
for (i = 0; i < IPU_NUM_IRQS; i += 32) {
gc = irq_get_domain_generic_chip(ipu->domain, i);
gc->reg_base = ipu->cm_reg;
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 37ac7b5dbd06..21060668fd25 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -6,17 +6,19 @@
* Licensed under GPLv2
*
* vga_switcheroo.c - Support for laptop with dual GPU using one set of outputs
-
- Switcher interface - methods require for ATPX and DCM
- - switchto - this throws the output MUX switch
- - discrete_set_power - sets the power state for the discrete card
-
- GPU driver interface
- - set_gpu_state - this should do the equiv of s/r for the card
- - this should *not* set the discrete power state
- - switch_check - check if the device is in a position to switch now
+ *
+ * Switcher interface - methods require for ATPX and DCM
+ * - switchto - this throws the output MUX switch
+ * - discrete_set_power - sets the power state for the discrete card
+ *
+ * GPU driver interface
+ * - set_gpu_state - this should do the equiv of s/r for the card
+ * - this should *not* set the discrete power state
+ * - switch_check - check if the device is in a position to switch now
*/
+#define pr_fmt(fmt) "vga_switcheroo: " fmt
+
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
@@ -111,7 +113,7 @@ int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
vgasr_priv.handler = handler;
if (vga_switcheroo_ready()) {
- printk(KERN_INFO "vga_switcheroo: enabled\n");
+ pr_info("enabled\n");
vga_switcheroo_enable();
}
mutex_unlock(&vgasr_mutex);
@@ -124,7 +126,7 @@ void vga_switcheroo_unregister_handler(void)
mutex_lock(&vgasr_mutex);
vgasr_priv.handler = NULL;
if (vgasr_priv.active) {
- pr_info("vga_switcheroo: disabled\n");
+ pr_info("disabled\n");
vga_switcheroo_debugfs_fini(&vgasr_priv);
vgasr_priv.active = false;
}
@@ -155,7 +157,7 @@ static int register_client(struct pci_dev *pdev,
vgasr_priv.registered_clients++;
if (vga_switcheroo_ready()) {
- printk(KERN_INFO "vga_switcheroo: enabled\n");
+ pr_info("enabled\n");
vga_switcheroo_enable();
}
mutex_unlock(&vgasr_mutex);
@@ -167,7 +169,8 @@ int vga_switcheroo_register_client(struct pci_dev *pdev,
bool driver_power_control)
{
return register_client(pdev, ops, -1,
- pdev == vga_default_device(), driver_power_control);
+ pdev == vga_default_device(),
+ driver_power_control);
}
EXPORT_SYMBOL(vga_switcheroo_register_client);
@@ -183,6 +186,7 @@ static struct vga_switcheroo_client *
find_client_from_pci(struct list_head *head, struct pci_dev *pdev)
{
struct vga_switcheroo_client *client;
+
list_for_each_entry(client, head, list)
if (client->pdev == pdev)
return client;
@@ -193,6 +197,7 @@ static struct vga_switcheroo_client *
find_client_from_id(struct list_head *head, int client_id)
{
struct vga_switcheroo_client *client;
+
list_for_each_entry(client, head, list)
if (client->id == client_id)
return client;
@@ -203,6 +208,7 @@ static struct vga_switcheroo_client *
find_active_client(struct list_head *head)
{
struct vga_switcheroo_client *client;
+
list_for_each_entry(client, head, list)
if (client->active && client_is_vga(client))
return client;
@@ -235,7 +241,7 @@ void vga_switcheroo_unregister_client(struct pci_dev *pdev)
kfree(client);
}
if (vgasr_priv.active && vgasr_priv.registered_clients < 2) {
- printk(KERN_INFO "vga_switcheroo: disabled\n");
+ pr_info("disabled\n");
vga_switcheroo_debugfs_fini(&vgasr_priv);
vgasr_priv.active = false;
}
@@ -260,10 +266,12 @@ static int vga_switcheroo_show(struct seq_file *m, void *v)
{
struct vga_switcheroo_client *client;
int i = 0;
+
mutex_lock(&vgasr_mutex);
list_for_each_entry(client, &vgasr_priv.clients, list) {
seq_printf(m, "%d:%s%s:%c:%s%s:%s\n", i,
- client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
+ client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" :
+ "IGD",
client_is_vga(client) ? "" : "-Audio",
client->active ? '+' : ' ',
client->driver_power_control ? "Dyn" : "",
@@ -347,6 +355,7 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
if (new_client->fb_info) {
struct fb_event event;
+
console_lock();
event.info = new_client->fb_info;
fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event);
@@ -375,7 +384,7 @@ static bool check_can_switch(void)
list_for_each_entry(client, &vgasr_priv.clients, list) {
if (!client->ops->can_switch(client->pdev)) {
- printk(KERN_ERR "vga_switcheroo: client %x refused switch\n", client->id);
+ pr_err("client %x refused switch\n", client->id);
return false;
}
}
@@ -484,20 +493,20 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
if (can_switch) {
ret = vga_switchto_stage1(client);
if (ret)
- printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret);
+ pr_err("switching failed stage 1 %d\n", ret);
ret = vga_switchto_stage2(client);
if (ret)
- printk(KERN_ERR "vga_switcheroo: switching failed stage 2 %d\n", ret);
+ pr_err("switching failed stage 2 %d\n", ret);
} else {
- printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id);
+ pr_info("setting delayed switch to client %d\n", client->id);
vgasr_priv.delayed_switch_active = true;
vgasr_priv.delayed_client_id = client_id;
ret = vga_switchto_stage1(client);
if (ret)
- printk(KERN_ERR "vga_switcheroo: delayed switching stage 1 failed %d\n", ret);
+ pr_err("delayed switching stage 1 failed %d\n", ret);
}
out:
@@ -516,32 +525,32 @@ static const struct file_operations vga_switcheroo_debugfs_fops = {
static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv)
{
- if (priv->switch_file) {
- debugfs_remove(priv->switch_file);
- priv->switch_file = NULL;
- }
- if (priv->debugfs_root) {
- debugfs_remove(priv->debugfs_root);
- priv->debugfs_root = NULL;
- }
+ debugfs_remove(priv->switch_file);
+ priv->switch_file = NULL;
+
+ debugfs_remove(priv->debugfs_root);
+ priv->debugfs_root = NULL;
}
static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv)
{
+ static const char mp[] = "/sys/kernel/debug";
+
/* already initialised */
if (priv->debugfs_root)
return 0;
priv->debugfs_root = debugfs_create_dir("vgaswitcheroo", NULL);
if (!priv->debugfs_root) {
- printk(KERN_ERR "vga_switcheroo: Cannot create /sys/kernel/debug/vgaswitcheroo\n");
+ pr_err("Cannot create %s/vgaswitcheroo\n", mp);
goto fail;
}
priv->switch_file = debugfs_create_file("switch", 0644,
- priv->debugfs_root, NULL, &vga_switcheroo_debugfs_fops);
+ priv->debugfs_root, NULL,
+ &vga_switcheroo_debugfs_fops);
if (!priv->switch_file) {
- printk(KERN_ERR "vga_switcheroo: cannot create /sys/kernel/debug/vgaswitcheroo/switch\n");
+ pr_err("cannot create %s/vgaswitcheroo/switch\n", mp);
goto fail;
}
return 0;
@@ -560,7 +569,8 @@ int vga_switcheroo_process_delayed_switch(void)
if (!vgasr_priv.delayed_switch_active)
goto err;
- printk(KERN_INFO "vga_switcheroo: processing delayed switch to %d\n", vgasr_priv.delayed_client_id);
+ pr_info("processing delayed switch to %d\n",
+ vgasr_priv.delayed_client_id);
client = find_client_from_id(&vgasr_priv.clients,
vgasr_priv.delayed_client_id);
@@ -569,7 +579,7 @@ int vga_switcheroo_process_delayed_switch(void)
ret = vga_switchto_stage2(client);
if (ret)
- printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret);
+ pr_err("delayed switching failed stage 2 %d\n", ret);
vgasr_priv.delayed_switch_active = false;
err = 0;
@@ -579,7 +589,8 @@ err:
}
EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
-static void vga_switcheroo_power_switch(struct pci_dev *pdev, enum vga_switcheroo_state state)
+static void vga_switcheroo_power_switch(struct pci_dev *pdev,
+ enum vga_switcheroo_state state)
{
struct vga_switcheroo_client *client;
@@ -598,7 +609,8 @@ static void vga_switcheroo_power_switch(struct pci_dev *pdev, enum vga_switchero
/* force a PCI device to a certain state - mainly to turn off audio clients */
-void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic)
+void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev,
+ enum vga_switcheroo_state dynamic)
{
struct vga_switcheroo_client *client;
@@ -644,7 +656,8 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
/* this version is for the case where the power switch is separate
to the device being powered down. */
-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
+int vga_switcheroo_init_domain_pm_ops(struct device *dev,
+ struct dev_pm_domain *domain)
{
/* copy over all the bus versions */
if (dev->bus && dev->bus->pm) {
@@ -675,7 +688,8 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
/* we need to check if we have to switch back on the video
device so the audio device can come back */
list_for_each_entry(client, &vgasr_priv.clients, list) {
- if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) && client_is_vga(client)) {
+ if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) &&
+ client_is_vga(client)) {
found = client;
ret = pm_runtime_get_sync(&client->pdev->dev);
if (ret) {
@@ -695,12 +709,15 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
return ret;
}
-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
+int
+vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev,
+ struct dev_pm_domain *domain)
{
/* copy over all the bus versions */
if (dev->bus && dev->bus->pm) {
domain->ops = *dev->bus->pm;
- domain->ops.runtime_resume = vga_switcheroo_runtime_resume_hdmi_audio;
+ domain->ops.runtime_resume =
+ vga_switcheroo_runtime_resume_hdmi_audio;
dev->pm_domain = domain;
return 0;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 7bcbf863656e..a0b433456107 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -29,6 +29,8 @@
*
*/
+#define pr_fmt(fmt) "vgaarb: " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
@@ -134,7 +136,6 @@ struct pci_dev *vga_default_device(void)
{
return vga_default;
}
-
EXPORT_SYMBOL_GPL(vga_default_device);
void vga_set_default_device(struct pci_dev *pdev)
@@ -298,9 +299,9 @@ enable_them:
pci_set_vga_state(vgadev->pdev, true, pci_bits, flags);
- if (!vgadev->bridge_has_one_vga) {
+ if (!vgadev->bridge_has_one_vga)
vga_irq_set_state(vgadev, true);
- }
+
vgadev->owns |= wants;
lock_them:
vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK);
@@ -452,15 +453,15 @@ bail:
}
EXPORT_SYMBOL(vga_put);
-/* Rules for using a bridge to control a VGA descendant decoding:
- if a bridge has only one VGA descendant then it can be used
- to control the VGA routing for that device.
- It should always use the bridge closest to the device to control it.
- If a bridge has a direct VGA descendant, but also have a sub-bridge
- VGA descendant then we cannot use that bridge to control the direct VGA descendant.
- So for every device we register, we need to iterate all its parent bridges
- so we can invalidate any devices using them properly.
-*/
+/*
+ * Rules for using a bridge to control a VGA descendant decoding: if a bridge
+ * has only one VGA descendant then it can be used to control the VGA routing
+ * for that device. It should always use the bridge closest to the device to
+ * control it. If a bridge has a direct VGA descendant, but also have a sub-
+ * bridge VGA descendant then we cannot use that bridge to control the direct
+ * VGA descendant. So for every device we register, we need to iterate all
+ * its parent bridges so we can invalidate any devices using them properly.
+ */
static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
{
struct vga_device *same_bridge_vgadev;
@@ -484,21 +485,26 @@ static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
/* see if the share a bridge with this device */
if (new_bridge == bridge) {
- /* if their direct parent bridge is the same
- as any bridge of this device then it can't be used
- for that device */
+ /*
+ * If their direct parent bridge is the same
+ * as any bridge of this device then it can't
+ * be used for that device.
+ */
same_bridge_vgadev->bridge_has_one_vga = false;
}
- /* now iterate the previous devices bridge hierarchy */
- /* if the new devices parent bridge is in the other devices
- hierarchy then we can't use it to control this device */
+ /*
+ * Now iterate the previous devices bridge hierarchy.
+ * If the new devices parent bridge is in the other
+ * devices hierarchy then we can't use it to control
+ * this device
+ */
while (bus) {
bridge = bus->self;
- if (bridge) {
- if (bridge == vgadev->pdev->bus->self)
- vgadev->bridge_has_one_vga = false;
- }
+
+ if (bridge && bridge == vgadev->pdev->bus->self)
+ vgadev->bridge_has_one_vga = false;
+
bus = bus->parent;
}
}
@@ -527,10 +533,10 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
/* Allocate structure */
vgadev = kmalloc(sizeof(struct vga_device), GFP_KERNEL);
if (vgadev == NULL) {
- pr_err("vgaarb: failed to allocate pci device\n");
- /* What to do on allocation failure ? For now, let's
- * just do nothing, I'm not sure there is anything saner
- * to be done
+ pr_err("failed to allocate pci device\n");
+ /*
+ * What to do on allocation failure ? For now, let's just do
+ * nothing, I'm not sure there is anything saner to be done.
*/
return false;
}
@@ -566,8 +572,8 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
bridge = bus->self;
if (bridge) {
u16 l;
- pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
- &l);
+
+ pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &l);
if (!(l & PCI_BRIDGE_CTL_VGA)) {
vgadev->owns = 0;
break;
@@ -581,8 +587,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
*/
if (vga_default == NULL &&
((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) {
- pr_info("vgaarb: setting as boot device: PCI:%s\n",
- pci_name(pdev));
+ pr_info("setting as boot device: PCI:%s\n", pci_name(pdev));
vga_set_default_device(pdev);
}
@@ -591,7 +596,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
/* Add to the list */
list_add(&vgadev->list, &vga_list);
vga_count++;
- pr_info("vgaarb: device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n",
+ pr_info("device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n",
pci_name(pdev),
vga_iostate_to_str(vgadev->decodes),
vga_iostate_to_str(vgadev->owns),
@@ -651,7 +656,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
decodes_unlocked = vgadev->locks & decodes_removed;
vgadev->decodes = new_decodes;
- pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
+ pr_info("device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
pci_name(vgadev->pdev),
vga_iostate_to_str(old_decodes),
vga_iostate_to_str(vgadev->decodes),
@@ -673,10 +678,12 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
if (!(old_decodes & VGA_RSRC_LEGACY_MASK) &&
new_decodes & VGA_RSRC_LEGACY_MASK)
vga_decode_count++;
- pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
+ pr_debug("decoding count now is: %d\n", vga_decode_count);
}
-static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
+static void __vga_set_legacy_decoding(struct pci_dev *pdev,
+ unsigned int decodes,
+ bool userspace)
{
struct vga_device *vgadev;
unsigned long flags;
@@ -712,7 +719,8 @@ EXPORT_SYMBOL(vga_set_legacy_decoding);
/* call with NULL to unregister */
int vga_client_register(struct pci_dev *pdev, void *cookie,
void (*irq_set_state)(void *cookie, bool state),
- unsigned int (*set_vga_decode)(void *cookie, bool decode))
+ unsigned int (*set_vga_decode)(void *cookie,
+ bool decode))
{
int ret = -ENODEV;
struct vga_device *vgadev;
@@ -832,7 +840,7 @@ static int vga_pci_str_to_vars(char *buf, int count, unsigned int *domain,
return 1;
}
-static ssize_t vga_arb_read(struct file *file, char __user * buf,
+static ssize_t vga_arb_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct vga_arb_private *priv = file->private_data;
@@ -899,7 +907,7 @@ done:
* TODO: To avoid parsing inside kernel and to improve the speed we may
* consider use ioctl here
*/
-static ssize_t vga_arb_write(struct file *file, const char __user * buf,
+static ssize_t vga_arb_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct vga_arb_private *priv = file->private_data;
@@ -1075,13 +1083,13 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
ret_val = -EPROTO;
goto done;
}
- pr_debug("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos,
+ pr_debug("%s ==> %x:%x:%x.%x\n", curr_pos,
domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
pdev = pci_get_domain_bus_and_slot(domain, bus, devfn);
- pr_debug("vgaarb: pdev %p\n", pdev);
+ pr_debug("pdev %p\n", pdev);
if (!pdev) {
- pr_err("vgaarb: invalid PCI address %x:%x:%x\n",
+ pr_err("invalid PCI address %x:%x:%x\n",
domain, bus, devfn);
ret_val = -ENODEV;
goto done;
@@ -1089,10 +1097,13 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
}
vgadev = vgadev_find(pdev);
- pr_debug("vgaarb: vgadev %p\n", vgadev);
+ pr_debug("vgadev %p\n", vgadev);
if (vgadev == NULL) {
- pr_err("vgaarb: this pci device is not a vga device\n");
- pci_dev_put(pdev);
+ if (pdev) {
+ pr_err("this pci device is not a vga device\n");
+ pci_dev_put(pdev);
+ }
+
ret_val = -ENODEV;
goto done;
}
@@ -1109,7 +1120,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
}
}
if (i == MAX_USER_CARDS) {
- pr_err("vgaarb: maximum user cards (%d) number reached!\n",
+ pr_err("maximum user cards (%d) number reached!\n",
MAX_USER_CARDS);
pci_dev_put(pdev);
/* XXX: which value to return? */
@@ -1125,7 +1136,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
} else if (strncmp(curr_pos, "decodes ", 8) == 0) {
curr_pos += 8;
remaining -= 8;
- pr_debug("vgaarb: client 0x%p called 'decodes'\n", priv);
+ pr_debug("client 0x%p called 'decodes'\n", priv);
if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
ret_val = -EPROTO;
@@ -1150,7 +1161,7 @@ done:
return ret_val;
}
-static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait)
+static unsigned int vga_arb_fpoll(struct file *file, poll_table *wait)
{
struct vga_arb_private *priv = file->private_data;
@@ -1246,7 +1257,8 @@ static void vga_arbiter_notify_clients(void)
else
new_state = true;
if (vgadev->set_vga_decode) {
- new_decodes = vgadev->set_vga_decode(vgadev->cookie, new_state);
+ new_decodes = vgadev->set_vga_decode(vgadev->cookie,
+ new_state);
vga_update_device_decodes(vgadev, new_decodes);
}
}
@@ -1300,7 +1312,7 @@ static int __init vga_arb_device_init(void)
rc = misc_register(&vga_arb_device);
if (rc < 0)
- pr_err("vgaarb: error %d registering device\n", rc);
+ pr_err("error %d registering device\n", rc);
bus_register_notifier(&pci_bus_type, &pci_notifier);
@@ -1312,21 +1324,29 @@ static int __init vga_arb_device_init(void)
PCI_ANY_ID, pdev)) != NULL)
vga_arbiter_add_pci_device(pdev);
- pr_info("vgaarb: loaded\n");
+ pr_info("loaded\n");
list_for_each_entry(vgadev, &vga_list, list) {
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
- /* Override I/O based detection done by vga_arbiter_add_pci_device()
- * as it may take the wrong device (e.g. on Apple system under EFI).
+ /*
+ * Override vga_arbiter_add_pci_device()'s I/O based detection
+ * as it may take the wrong device (e.g. on Apple system under
+ * EFI).
*
- * Select the device owning the boot framebuffer if there is one.
+ * Select the device owning the boot framebuffer if there is
+ * one.
*/
- resource_size_t start, end;
+ resource_size_t start, end, limit;
+ unsigned long flags;
int i;
+ limit = screen_info.lfb_base + screen_info.lfb_size;
+
/* Does firmware framebuffer belong to us? */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- if (!(pci_resource_flags(vgadev->pdev, i) & IORESOURCE_MEM))
+ flags = pci_resource_flags(vgadev->pdev, i);
+
+ if ((flags & IORESOURCE_MEM) == 0)
continue;
start = pci_resource_start(vgadev->pdev, i);
@@ -1335,22 +1355,24 @@ static int __init vga_arb_device_init(void)
if (!start || !end)
continue;
- if (screen_info.lfb_base < start ||
- (screen_info.lfb_base + screen_info.lfb_size) >= end)
+ if (screen_info.lfb_base < start || limit >= end)
continue;
+
if (!vga_default_device())
- pr_info("vgaarb: setting as boot device: PCI:%s\n",
+ pr_info("setting as boot device: PCI:%s\n",
pci_name(vgadev->pdev));
else if (vgadev->pdev != vga_default_device())
- pr_info("vgaarb: overriding boot device: PCI:%s\n",
+ pr_info("overriding boot device: PCI:%s\n",
pci_name(vgadev->pdev));
vga_set_default_device(vgadev->pdev);
}
#endif
if (vgadev->bridge_has_one_vga)
- pr_info("vgaarb: bridge control possible %s\n", pci_name(vgadev->pdev));
+ pr_info("bridge control possible %s\n",
+ pci_name(vgadev->pdev));
else
- pr_info("vgaarb: no bridge control possible %s\n", pci_name(vgadev->pdev));
+ pr_info("no bridge control possible %s\n",
+ pci_name(vgadev->pdev));
}
return rc;
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index cc4c6649d195..6ab51ae3c39d 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -251,6 +251,12 @@ config HID_EZKEY
---help---
Support for Ezkey BTC 8193 keyboard.
+config HID_GEMBIRD
+ tristate "Gembird Joypad"
+ depends on HID
+ ---help---
+ Support for Gembird JPD-DualForce 2.
+
config HID_HOLTEK
tristate "Holtek HID devices"
depends on USB_HID
@@ -480,6 +486,7 @@ config HID_MULTITOUCH
- Atmel panels
- Cando dual touch panels
- Chunghwa panels
+ - CJTouch panels
- CVTouch panels
- Cypress TrueTouch panels
- Elan Microelectronics touch panels
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 2f8a41dc3cc8..e6441bc7dae4 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_HID_EMS_FF) += hid-emsff.o
obj-$(CONFIG_HID_ELECOM) += hid-elecom.o
obj-$(CONFIG_HID_ELO) += hid-elo.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
+obj-$(CONFIG_HID_GEMBIRD) += hid-gembird.o
obj-$(CONFIG_HID_GT683R) += hid-gt683r.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
obj-$(CONFIG_HID_HOLTEK) += hid-holtek-kbd.o
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index f822fd2a1ada..884d82f9190e 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -546,6 +546,12 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
index b613d5a79684..bc3cec199fee 100644
--- a/drivers/hid/hid-chicony.c
+++ b/drivers/hid/hid-chicony.c
@@ -20,6 +20,7 @@
#include <linux/input.h>
#include <linux/hid.h>
#include <linux/module.h>
+#include <linux/usb.h>
#include "hid-ids.h"
@@ -57,10 +58,34 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
return 1;
}
+static __u8 *ch_switch12_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+
+ if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
+ /* Change usage maximum and logical maximum from 0x7fff to
+ * 0x2fff, so they don't exceed HID_MAX_USAGES */
+ switch (hdev->product) {
+ case USB_DEVICE_ID_CHICONY_ACER_SWITCH12:
+ if (*rsize >= 128 && rdesc[64] == 0xff && rdesc[65] == 0x7f
+ && rdesc[69] == 0xff && rdesc[70] == 0x7f) {
+ hid_info(hdev, "Fixing up report descriptor\n");
+ rdesc[65] = rdesc[70] = 0x2f;
+ }
+ break;
+ }
+
+ }
+ return rdesc;
+}
+
+
static const struct hid_device_id ch_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
{ }
};
MODULE_DEVICE_TABLE(hid, ch_devices);
@@ -68,6 +93,7 @@ MODULE_DEVICE_TABLE(hid, ch_devices);
static struct hid_driver ch_driver = {
.name = "chicony",
.id_table = ch_devices,
+ .report_fixup = ch_switch12_report_fixup,
.input_mapping = ch_input_mapping,
};
module_hid_driver(ch_driver);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 157c62775053..70a11ac38119 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -427,6 +427,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
{
__u32 data;
unsigned n;
+ __u32 count;
data = item_udata(item);
@@ -490,6 +491,24 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
if (item->size <= 2)
data = (parser->global.usage_page << 16) + data;
+ count = data - parser->local.usage_minimum;
+ if (count + parser->local.usage_index >= HID_MAX_USAGES) {
+ /*
+ * We do not warn if the name is not set, we are
+ * actually pre-scanning the device.
+ */
+ if (dev_name(&parser->device->dev))
+ hid_warn(parser->device,
+ "ignoring exceeding usage max\n");
+ data = HID_MAX_USAGES - parser->local.usage_index +
+ parser->local.usage_minimum - 1;
+ if (data <= 0) {
+ hid_err(parser->device,
+ "no more usage index available\n");
+ return -1;
+ }
+ }
+
for (n = parser->local.usage_minimum; n <= data; n++)
if (hid_add_usage(parser, n)) {
dbg_hid("hid_add_usage failed\n");
@@ -705,8 +724,9 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type)
hid->group = HID_GROUP_SENSOR_HUB;
if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
- (hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 ||
- hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3_JP ||
+ (hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 ||
+ hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP ||
+ hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 ||
hid->product == USB_DEVICE_ID_MS_POWER_COVER) &&
hid->group == HID_GROUP_MULTITOUCH)
hid->group = HID_GROUP_GENERIC;
@@ -1782,6 +1802,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -1804,6 +1827,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
@@ -1820,6 +1844,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
@@ -1902,8 +1927,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
@@ -2267,6 +2293,8 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0401) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
@@ -2463,6 +2491,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ }
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 3318de690e00..7afc3fcc122c 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -156,6 +156,7 @@ struct cp2112_device {
wait_queue_head_t wait;
u8 read_data[61];
u8 read_length;
+ u8 hwversion;
int xfer_status;
atomic_t read_avail;
atomic_t xfer_avail;
@@ -356,6 +357,8 @@ static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size)
struct cp2112_force_read_report report;
int ret;
+ if (size > sizeof(dev->read_data))
+ size = sizeof(dev->read_data);
report.report = CP2112_DATA_READ_FORCE_SEND;
report.length = cpu_to_be16(size);
@@ -444,6 +447,24 @@ static int cp2112_i2c_write_req(void *buf, u8 slave_address, u8 *data,
return data_length + 3;
}
+static int cp2112_i2c_write_read_req(void *buf, u8 slave_address,
+ u8 *addr, int addr_length,
+ int read_length)
+{
+ struct cp2112_write_read_req_report *report = buf;
+
+ if (read_length < 1 || read_length > 512 ||
+ addr_length > sizeof(report->target_address))
+ return -EINVAL;
+
+ report->report = CP2112_DATA_WRITE_READ_REQUEST;
+ report->slave_address = slave_address << 1;
+ report->length = cpu_to_be16(read_length);
+ report->target_address_length = addr_length;
+ memcpy(report->target_address, addr, addr_length);
+ return addr_length + 5;
+}
+
static int cp2112_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
int num)
{
@@ -451,26 +472,46 @@ static int cp2112_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
struct hid_device *hdev = dev->hdev;
u8 buf[64];
ssize_t count;
+ ssize_t read_length = 0;
+ u8 *read_buf = NULL;
unsigned int retries;
int ret;
hid_dbg(hdev, "I2C %d messages\n", num);
- if (num != 1) {
+ if (num == 1) {
+ if (msgs->flags & I2C_M_RD) {
+ hid_dbg(hdev, "I2C read %#04x len %d\n",
+ msgs->addr, msgs->len);
+ read_length = msgs->len;
+ read_buf = msgs->buf;
+ count = cp2112_read_req(buf, msgs->addr, msgs->len);
+ } else {
+ hid_dbg(hdev, "I2C write %#04x len %d\n",
+ msgs->addr, msgs->len);
+ count = cp2112_i2c_write_req(buf, msgs->addr,
+ msgs->buf, msgs->len);
+ }
+ if (count < 0)
+ return count;
+ } else if (dev->hwversion > 1 && /* no repeated start in rev 1 */
+ num == 2 &&
+ msgs[0].addr == msgs[1].addr &&
+ !(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD)) {
+ hid_dbg(hdev, "I2C write-read %#04x wlen %d rlen %d\n",
+ msgs[0].addr, msgs[0].len, msgs[1].len);
+ read_length = msgs[1].len;
+ read_buf = msgs[1].buf;
+ count = cp2112_i2c_write_read_req(buf, msgs[0].addr,
+ msgs[0].buf, msgs[0].len, msgs[1].len);
+ if (count < 0)
+ return count;
+ } else {
hid_err(hdev,
"Multi-message I2C transactions not supported\n");
return -EOPNOTSUPP;
}
- if (msgs->flags & I2C_M_RD)
- count = cp2112_read_req(buf, msgs->addr, msgs->len);
- else
- count = cp2112_i2c_write_req(buf, msgs->addr, msgs->buf,
- msgs->len);
-
- if (count < 0)
- return count;
-
ret = hid_hw_power(hdev, PM_HINT_FULLON);
if (ret < 0) {
hid_err(hdev, "power management error: %d\n", ret);
@@ -506,21 +547,34 @@ static int cp2112_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
goto power_normal;
}
- if (!(msgs->flags & I2C_M_RD))
- goto finish;
-
- ret = cp2112_read(dev, msgs->buf, msgs->len);
- if (ret < 0)
- goto power_normal;
- if (ret != msgs->len) {
- hid_warn(hdev, "short read: %d < %d\n", ret, msgs->len);
- ret = -EIO;
- goto power_normal;
+ for (count = 0; count < read_length;) {
+ ret = cp2112_read(dev, read_buf + count, read_length - count);
+ if (ret < 0)
+ goto power_normal;
+ if (ret == 0) {
+ hid_err(hdev, "read returned 0\n");
+ ret = -EIO;
+ goto power_normal;
+ }
+ count += ret;
+ if (count > read_length) {
+ /*
+ * The hardware returned too much data.
+ * This is mostly harmless because cp2112_read()
+ * has a limit check so didn't overrun our
+ * buffer. Nevertheless, we return an error
+ * because something is seriously wrong and
+ * it shouldn't go unnoticed.
+ */
+ hid_err(hdev, "long read: %d > %zd\n",
+ ret, read_length - count + ret);
+ ret = -EIO;
+ goto power_normal;
+ }
}
-finish:
/* return the number of transferred messages */
- ret = 1;
+ ret = num;
power_normal:
hid_hw_power(hdev, PM_HINT_NORMAL);
@@ -535,7 +589,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
struct cp2112_device *dev = (struct cp2112_device *)adap->algo_data;
struct hid_device *hdev = dev->hdev;
u8 buf[64];
- __be16 word;
+ __le16 word;
ssize_t count;
size_t read_length = 0;
unsigned int retries;
@@ -552,7 +606,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
if (I2C_SMBUS_READ == read_write)
count = cp2112_read_req(buf, addr, read_length);
else
- count = cp2112_write_req(buf, addr, data->byte, NULL,
+ count = cp2112_write_req(buf, addr, command, NULL,
0);
break;
case I2C_SMBUS_BYTE_DATA:
@@ -567,7 +621,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
break;
case I2C_SMBUS_WORD_DATA:
read_length = 2;
- word = cpu_to_be16(data->word);
+ word = cpu_to_le16(data->word);
if (I2C_SMBUS_READ == read_write)
count = cp2112_write_read_req(buf, addr, read_length,
@@ -580,7 +634,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
size = I2C_SMBUS_WORD_DATA;
read_write = I2C_SMBUS_READ;
read_length = 2;
- word = cpu_to_be16(data->word);
+ word = cpu_to_le16(data->word);
count = cp2112_write_read_req(buf, addr, read_length, command,
(u8 *)&word, 2);
@@ -673,7 +727,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
data->byte = buf[0];
break;
case I2C_SMBUS_WORD_DATA:
- data->word = be16_to_cpup((__be16 *)buf);
+ data->word = le16_to_cpup((__le16 *)buf);
break;
case I2C_SMBUS_BLOCK_DATA:
if (read_length > I2C_SMBUS_BLOCK_MAX) {
@@ -1028,6 +1082,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev->adap.dev.parent = &hdev->dev;
snprintf(dev->adap.name, sizeof(dev->adap.name),
"CP2112 SMBus Bridge on hiddev%d", hdev->minor);
+ dev->hwversion = buf[2];
init_waitqueue_head(&dev->wait);
hid_device_io_start(hdev);
diff --git a/drivers/hid/hid-gembird.c b/drivers/hid/hid-gembird.c
new file mode 100644
index 000000000000..e55e519f311e
--- /dev/null
+++ b/drivers/hid/hid-gembird.c
@@ -0,0 +1,116 @@
+/*
+ * HID driver for Gembird Joypad, "PC Game Controller"
+ *
+ * Copyright (c) 2015 Red Hat, Inc
+ * Copyright (c) 2015 Benjamin Tissoires
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+#define GEMBIRD_START_FAULTY_RDESC 8
+
+static const __u8 gembird_jpd_faulty_rdesc[] = {
+ 0x75, 0x08, /* Report Size (8) */
+ 0x95, 0x05, /* Report Count (5) */
+ 0x15, 0x00, /* Logical Minimum (0) */
+ 0x26, 0xff, 0x00, /* Logical Maximum (255) */
+ 0x35, 0x00, /* Physical Minimum (0) */
+ 0x46, 0xff, 0x00, /* Physical Maximum (255) */
+ 0x09, 0x30, /* Usage (X) */
+ 0x09, 0x31, /* Usage (Y) */
+ 0x09, 0x32, /* Usage (Z) */
+ 0x09, 0x32, /* Usage (Z) */
+ 0x09, 0x35, /* Usage (Rz) */
+ 0x81, 0x02, /* Input (Data,Var,Abs) */
+};
+
+/*
+ * we fix the report descriptor by:
+ * - marking the first Z axis as constant (so it is ignored by HID)
+ * - assign the original second Z to Rx
+ * - assign the original Rz to Ry
+ */
+static const __u8 gembird_jpd_fixed_rdesc[] = {
+ 0x75, 0x08, /* Report Size (8) */
+ 0x95, 0x02, /* Report Count (2) */
+ 0x15, 0x00, /* Logical Minimum (0) */
+ 0x26, 0xff, 0x00, /* Logical Maximum (255) */
+ 0x35, 0x00, /* Physical Minimum (0) */
+ 0x46, 0xff, 0x00, /* Physical Maximum (255) */
+ 0x09, 0x30, /* Usage (X) */
+ 0x09, 0x31, /* Usage (Y) */
+ 0x81, 0x02, /* Input (Data,Var,Abs) */
+ 0x95, 0x01, /* Report Count (1) */
+ 0x09, 0x32, /* Usage (Z) */
+ 0x81, 0x01, /* Input (Cnst,Arr,Abs) */
+ 0x95, 0x02, /* Report Count (2) */
+ 0x09, 0x33, /* Usage (Rx) */
+ 0x09, 0x34, /* Usage (Ry) */
+ 0x81, 0x02, /* Input (Data,Var,Abs) */
+};
+
+static __u8 *gembird_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ __u8 *new_rdesc;
+ /* delta_size is > 0 */
+ size_t delta_size = sizeof(gembird_jpd_fixed_rdesc) -
+ sizeof(gembird_jpd_faulty_rdesc);
+ size_t new_size = *rsize + delta_size;
+
+ if (*rsize >= 31 && !memcmp(&rdesc[GEMBIRD_START_FAULTY_RDESC],
+ gembird_jpd_faulty_rdesc,
+ sizeof(gembird_jpd_faulty_rdesc))) {
+ new_rdesc = devm_kzalloc(&hdev->dev, new_size, GFP_KERNEL);
+ if (new_rdesc == NULL)
+ return rdesc;
+
+ dev_info(&hdev->dev,
+ "fixing Gembird JPD-DualForce 2 report descriptor.\n");
+
+ /* start by copying the end of the rdesc */
+ memcpy(new_rdesc + delta_size, rdesc, *rsize);
+
+ /* add the correct beginning */
+ memcpy(new_rdesc, rdesc, GEMBIRD_START_FAULTY_RDESC);
+
+ /* replace the faulty part with the fixed one */
+ memcpy(new_rdesc + GEMBIRD_START_FAULTY_RDESC,
+ gembird_jpd_fixed_rdesc,
+ sizeof(gembird_jpd_fixed_rdesc));
+
+ *rsize = new_size;
+ rdesc = new_rdesc;
+ }
+
+ return rdesc;
+}
+
+static const struct hid_device_id gembird_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD,
+ USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, gembird_devices);
+
+static struct hid_driver gembird_driver = {
+ .name = "gembird",
+ .id_table = gembird_devices,
+ .report_fixup = gembird_report_fixup,
+};
+module_hid_driver(gembird_driver);
+
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
+MODULE_DESCRIPTION("HID Gembird joypad driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b04b0820d816..f769208276ae 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -142,6 +142,9 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274
#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240
@@ -230,12 +233,17 @@
#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE 0x1053
#define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123
#define USB_DEVICE_ID_CHICONY_AK1D 0x1125
+#define USB_DEVICE_ID_CHICONY_ACER_SWITCH12 0x1421
#define USB_VENDOR_ID_CHUNGHWAT 0x2247
#define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001
#define USB_VENDOR_ID_CIDC 0x1677
+#define USB_VENDOR_ID_CJTOUCH 0x24b8
+#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020 0x0020
+#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040 0x0040
+
#define USB_VENDOR_ID_CMEDIA 0x0d8c
#define USB_DEVICE_ID_CM109 0x000e
@@ -355,6 +363,9 @@
#define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR 0x0001
#define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
+#define USB_VENDOR_ID_GEMBIRD 0x11ff
+#define USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2 0x3331
+
#define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS 0x0100
@@ -497,6 +508,9 @@
#define USB_VENDOR_ID_IRTOUCHSYSTEMS 0x6615
#define USB_DEVICE_ID_IRTOUCH_INFRARED_USB 0x0070
+#define USB_VENDOR_ID_ITE 0x048d
+#define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386
+
#define USB_VENDOR_ID_JABRA 0x0b0e
#define USB_DEVICE_ID_JABRA_SPEAK_410 0x0412
#define USB_DEVICE_ID_JABRA_SPEAK_510 0x0420
@@ -599,6 +613,7 @@
#define USB_DEVICE_ID_LOGITECH_DUAL_ACTION 0xc216
#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2 0xc218
#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2 0xc219
+#define USB_DEVICE_ID_LOGITECH_G29_WHEEL 0xc24f
#define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D 0xc283
#define USB_DEVICE_ID_LOGITECH_FORCE3D_PRO 0xc286
#define USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940 0xc287
@@ -663,8 +678,9 @@
#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799
#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
-#define USB_DEVICE_ID_MS_TYPE_COVER_3 0x07dc
-#define USB_DEVICE_ID_MS_TYPE_COVER_3_JP 0x07dd
+#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 0x07dc
+#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP 0x07dd
+#define USB_DEVICE_ID_MS_TYPE_COVER_3 0x07de
#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
#define USB_VENDOR_ID_MOJO 0x8282
@@ -922,7 +938,8 @@
#define USB_DEVICE_ID_TOUCHPACK_RTS 0x1688
#define USB_VENDOR_ID_TPV 0x25aa
-#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN 0x8883
+#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882 0x8882
+#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883 0x8883
#define USB_VENDOR_ID_TURBOX 0x062a
#define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 3511bbaba505..53aeaf6252c7 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -462,12 +462,15 @@ out:
static void hidinput_cleanup_battery(struct hid_device *dev)
{
+ const struct power_supply_desc *psy_desc;
+
if (!dev->battery)
return;
+ psy_desc = dev->battery->desc;
power_supply_unregister(dev->battery);
- kfree(dev->battery->desc->name);
- kfree(dev->battery->desc);
+ kfree(psy_desc->name);
+ kfree(psy_desc);
dev->battery = NULL;
}
#else /* !CONFIG_HID_BATTERY_STRENGTH */
@@ -1163,8 +1166,11 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
input_event(input, usage->type, usage->code, value);
- if ((field->flags & HID_MAIN_ITEM_RELATIVE) && (usage->type == EV_KEY))
+ if ((field->flags & HID_MAIN_ITEM_RELATIVE) &&
+ usage->type == EV_KEY && value) {
+ input_sync(input);
input_event(input, usage->type, usage->code, 0);
+ }
}
void hidinput_report_event(struct hid_device *hid, struct hid_report *report)
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 4f59bffd0205..e4bc6cb6d7fa 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -37,6 +37,7 @@ struct lenovo_drvdata_tpkbd {
};
struct lenovo_drvdata_cptkbd {
+ u8 middlebutton_state; /* 0:Up, 1:Down (undecided), 2:Scrolling */
bool fn_lock;
int sensitivity;
};
@@ -146,10 +147,10 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
switch (usage->hid & HID_USAGE) {
case 0x0000:
- hid_map_usage(hi, usage, bit, max, EV_REL, 0x06);
+ hid_map_usage(hi, usage, bit, max, EV_REL, REL_HWHEEL);
return 1;
case 0x0001:
- hid_map_usage(hi, usage, bit, max, EV_REL, 0x08);
+ hid_map_usage(hi, usage, bit, max, EV_REL, REL_WHEEL);
return 1;
default:
return -1;
@@ -207,9 +208,12 @@ static void lenovo_features_set_cptkbd(struct hid_device *hdev)
struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev);
ret = lenovo_send_cmd_cptkbd(hdev, 0x05, cptkbd_data->fn_lock);
- ret = lenovo_send_cmd_cptkbd(hdev, 0x02, cptkbd_data->sensitivity);
if (ret)
hid_err(hdev, "Fn-lock setting failed: %d\n", ret);
+
+ ret = lenovo_send_cmd_cptkbd(hdev, 0x02, cptkbd_data->sensitivity);
+ if (ret)
+ hid_err(hdev, "Sensitivity setting failed: %d\n", ret);
}
static ssize_t attr_fn_lock_show_cptkbd(struct device *dev,
@@ -313,6 +317,53 @@ static int lenovo_raw_event(struct hid_device *hdev,
return 0;
}
+static int lenovo_event_cptkbd(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage, __s32 value)
+{
+ struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev);
+
+ /* "wheel" scroll events */
+ if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
+ usage->code == REL_HWHEEL)) {
+ /* Scroll events disable middle-click event */
+ cptkbd_data->middlebutton_state = 2;
+ return 0;
+ }
+
+ /* Middle click events */
+ if (usage->type == EV_KEY && usage->code == BTN_MIDDLE) {
+ if (value == 1) {
+ cptkbd_data->middlebutton_state = 1;
+ } else if (value == 0) {
+ if (cptkbd_data->middlebutton_state == 1) {
+ /* No scrolling inbetween, send middle-click */
+ input_event(field->hidinput->input,
+ EV_KEY, BTN_MIDDLE, 1);
+ input_sync(field->hidinput->input);
+ input_event(field->hidinput->input,
+ EV_KEY, BTN_MIDDLE, 0);
+ input_sync(field->hidinput->input);
+ }
+ cptkbd_data->middlebutton_state = 0;
+ }
+ return 1;
+ }
+
+ return 0;
+}
+
+static int lenovo_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ switch (hdev->product) {
+ case USB_DEVICE_ID_LENOVO_CUSBKBD:
+ case USB_DEVICE_ID_LENOVO_CBTKBD:
+ return lenovo_event_cptkbd(hdev, field, usage, value);
+ default:
+ return 0;
+ }
+}
+
static int lenovo_features_set_tpkbd(struct hid_device *hdev)
{
struct hid_report *report;
@@ -705,6 +756,7 @@ static int lenovo_probe_cptkbd(struct hid_device *hdev)
hid_warn(hdev, "Failed to switch middle button: %d\n", ret);
/* Set keyboard settings to known state */
+ cptkbd_data->middlebutton_state = 0;
cptkbd_data->fn_lock = true;
cptkbd_data->sensitivity = 0x05;
lenovo_features_set_cptkbd(hdev);
@@ -832,6 +884,7 @@ static struct hid_driver lenovo_driver = {
.probe = lenovo_probe,
.remove = lenovo_remove,
.raw_event = lenovo_raw_event,
+ .event = lenovo_event,
.report_fixup = lenovo_report_fixup,
};
module_hid_driver(lenovo_driver);
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 429340d809b5..5332fb7d072a 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -776,6 +776,8 @@ static const struct hid_device_id lg_devices[] = {
.driver_data = LG_FF },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2),
.driver_data = LG_FF },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL),
+ .driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D),
.driver_data = LG_FF },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO),
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 32a596f554af..9aa3515090a7 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -276,9 +276,11 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
.driver_data = MS_DUPLICATE_USAGES },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3),
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3),
+ .driver_data = MS_HIDINPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP),
.driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP),
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3),
.driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER),
.driver_data = MS_HIDINPUT },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 6a9b05b328a9..426b2f1a3450 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -778,9 +778,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
/*
* some egalax touchscreens have "application == HID_DG_TOUCHSCREEN"
* for the stylus.
+ * The check for mt_report_id ensures we don't process
+ * HID_DG_CONTACTCOUNT from the pen report as it is outside the physical
+ * collection, but within the report ID.
*/
if (field->physical == HID_DG_STYLUS)
return 0;
+ else if ((field->physical == 0) &&
+ (field->report->id != td->mt_report_id) &&
+ (td->mt_report_id != -1))
+ return 0;
if (field->application == HID_DG_TOUCHSCREEN ||
field->application == HID_DG_TOUCHPAD)
@@ -1138,6 +1145,14 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
+ /* CJTouch panels */
+ { .driver_data = MT_CLS_NSMU,
+ MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH,
+ USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020) },
+ { .driver_data = MT_CLS_NSMU,
+ MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH,
+ USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040) },
+
/* CVTouch panels */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_CVTOUCH,
diff --git a/drivers/hid/hid-picolcd_backlight.c b/drivers/hid/hid-picolcd_backlight.c
index a32c5f86b0b3..808807ad388f 100644
--- a/drivers/hid/hid-picolcd_backlight.c
+++ b/drivers/hid/hid-picolcd_backlight.c
@@ -94,8 +94,7 @@ void picolcd_exit_backlight(struct picolcd_data *data)
struct backlight_device *bdev = data->backlight;
data->backlight = NULL;
- if (bdev)
- backlight_device_unregister(bdev);
+ backlight_device_unregister(bdev);
}
int picolcd_resume_backlight(struct picolcd_data *data)
diff --git a/drivers/hid/hid-picolcd_cir.c b/drivers/hid/hid-picolcd_cir.c
index 045f8ebf16b5..96286510f42e 100644
--- a/drivers/hid/hid-picolcd_cir.c
+++ b/drivers/hid/hid-picolcd_cir.c
@@ -145,7 +145,6 @@ void picolcd_exit_cir(struct picolcd_data *data)
struct rc_dev *rdev = data->rc_dev;
data->rc_dev = NULL;
- if (rdev)
- rc_unregister_device(rdev);
+ rc_unregister_device(rdev);
}
diff --git a/drivers/hid/hid-picolcd_lcd.c b/drivers/hid/hid-picolcd_lcd.c
index 89821c2da6d7..22dcbe13da89 100644
--- a/drivers/hid/hid-picolcd_lcd.c
+++ b/drivers/hid/hid-picolcd_lcd.c
@@ -92,8 +92,7 @@ void picolcd_exit_lcd(struct picolcd_data *data)
struct lcd_device *ldev = data->lcd;
data->lcd = NULL;
- if (ldev)
- lcd_device_unregister(ldev);
+ lcd_device_unregister(ldev);
}
int picolcd_resume_lcd(struct picolcd_data *data)
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 4cf80bb276dc..2c148129beb2 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -33,10 +33,21 @@
#define RMI_READ_DATA_PENDING 1
#define RMI_STARTED 2
+#define RMI_SLEEP_NORMAL 0x0
+#define RMI_SLEEP_DEEP_SLEEP 0x1
+
/* device flags */
#define RMI_DEVICE BIT(0)
#define RMI_DEVICE_HAS_PHYS_BUTTONS BIT(1)
+/*
+ * retrieve the ctrl registers
+ * the ctrl register has a size of 20 but a fw bug split it into 16 + 4,
+ * and there is no way to know if the first 20 bytes are here or not.
+ * We use only the first 12 bytes, so get only them.
+ */
+#define RMI_F11_CTRL_REG_COUNT 12
+
enum rmi_mode_type {
RMI_MODE_OFF = 0,
RMI_MODE_ATTN_REPORTS = 1,
@@ -113,6 +124,8 @@ struct rmi_data {
unsigned int max_y;
unsigned int x_size_mm;
unsigned int y_size_mm;
+ bool read_f11_ctrl_regs;
+ u8 f11_ctrl_regs[RMI_F11_CTRL_REG_COUNT];
unsigned int gpio_led_count;
unsigned int button_count;
@@ -126,6 +139,10 @@ struct rmi_data {
unsigned long device_flags;
unsigned long firmware_id;
+
+ u8 f01_ctrl0;
+ u8 interrupt_enable_mask;
+ bool restore_interrupt_mask;
};
#define RMI_PAGE(addr) (((addr) >> 8) & 0xff)
@@ -346,13 +363,34 @@ static void rmi_f11_process_touch(struct rmi_data *hdata, int slot,
}
}
+static int rmi_reset_attn_mode(struct hid_device *hdev)
+{
+ struct rmi_data *data = hid_get_drvdata(hdev);
+ int ret;
+
+ ret = rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
+ if (ret)
+ return ret;
+
+ if (data->restore_interrupt_mask) {
+ ret = rmi_write(hdev, data->f01.control_base_addr + 1,
+ &data->interrupt_enable_mask);
+ if (ret) {
+ hid_err(hdev, "can not write F01 control register\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void rmi_reset_work(struct work_struct *work)
{
struct rmi_data *hdata = container_of(work, struct rmi_data,
reset_work);
/* switch the device to RMI if we receive a generic mouse report */
- rmi_set_mode(hdata->hdev, RMI_MODE_ATTN_REPORTS);
+ rmi_reset_attn_mode(hdata->hdev);
}
static inline int rmi_schedule_reset(struct hid_device *hdev)
@@ -532,14 +570,77 @@ static int rmi_event(struct hid_device *hdev, struct hid_field *field,
}
#ifdef CONFIG_PM
+static int rmi_set_sleep_mode(struct hid_device *hdev, int sleep_mode)
+{
+ struct rmi_data *data = hid_get_drvdata(hdev);
+ int ret;
+ u8 f01_ctrl0;
+
+ f01_ctrl0 = (data->f01_ctrl0 & ~0x3) | sleep_mode;
+
+ ret = rmi_write(hdev, data->f01.control_base_addr,
+ &f01_ctrl0);
+ if (ret) {
+ hid_err(hdev, "can not write sleep mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rmi_suspend(struct hid_device *hdev, pm_message_t message)
+{
+ struct rmi_data *data = hid_get_drvdata(hdev);
+ int ret;
+ u8 buf[RMI_F11_CTRL_REG_COUNT];
+
+ ret = rmi_read_block(hdev, data->f11.control_base_addr, buf,
+ RMI_F11_CTRL_REG_COUNT);
+ if (ret)
+ hid_warn(hdev, "can not read F11 control registers\n");
+ else
+ memcpy(data->f11_ctrl_regs, buf, RMI_F11_CTRL_REG_COUNT);
+
+
+ if (!device_may_wakeup(hdev->dev.parent))
+ return rmi_set_sleep_mode(hdev, RMI_SLEEP_DEEP_SLEEP);
+
+ return 0;
+}
+
static int rmi_post_reset(struct hid_device *hdev)
{
- return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
+ struct rmi_data *data = hid_get_drvdata(hdev);
+ int ret;
+
+ ret = rmi_reset_attn_mode(hdev);
+ if (ret) {
+ hid_err(hdev, "can not set rmi mode\n");
+ return ret;
+ }
+
+ if (data->read_f11_ctrl_regs) {
+ ret = rmi_write_block(hdev, data->f11.control_base_addr,
+ data->f11_ctrl_regs, RMI_F11_CTRL_REG_COUNT);
+ if (ret)
+ hid_warn(hdev,
+ "can not write F11 control registers after reset\n");
+ }
+
+ if (!device_may_wakeup(hdev->dev.parent)) {
+ ret = rmi_set_sleep_mode(hdev, RMI_SLEEP_NORMAL);
+ if (ret) {
+ hid_err(hdev, "can not write sleep mode\n");
+ return ret;
+ }
+ }
+
+ return ret;
}
static int rmi_post_resume(struct hid_device *hdev)
{
- return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
+ return rmi_reset_attn_mode(hdev);
}
#endif /* CONFIG_PM */
@@ -595,6 +696,7 @@ static void rmi_register_function(struct rmi_data *data,
f->interrupt_count = pdt_entry->interrupt_source_count;
f->irq_mask = rmi_gen_mask(f->interrupt_base,
f->interrupt_count);
+ data->interrupt_enable_mask |= f->irq_mask;
}
}
@@ -732,6 +834,35 @@ static int rmi_populate_f01(struct hid_device *hdev)
data->firmware_id += info[2] * 65536;
}
+ ret = rmi_read_block(hdev, data->f01.control_base_addr, info,
+ 2);
+
+ if (ret) {
+ hid_err(hdev, "can not read f01 ctrl registers\n");
+ return ret;
+ }
+
+ data->f01_ctrl0 = info[0];
+
+ if (!info[1]) {
+ /*
+ * Do to a firmware bug in some touchpads the F01 interrupt
+ * enable control register will be cleared on reset.
+ * This will stop the touchpad from reporting data, so
+ * if F01 CTRL1 is 0 then we need to explicitly enable
+ * interrupts for the functions we want data for.
+ */
+ data->restore_interrupt_mask = true;
+
+ ret = rmi_write(hdev, data->f01.control_base_addr + 1,
+ &data->interrupt_enable_mask);
+ if (ret) {
+ hid_err(hdev, "can not write to control reg 1: %d.\n",
+ ret);
+ return ret;
+ }
+ }
+
return 0;
}
@@ -904,24 +1035,23 @@ static int rmi_populate_f11(struct hid_device *hdev)
if (has_data40)
data->f11.report_size += data->max_fingers * 2;
- /*
- * retrieve the ctrl registers
- * the ctrl register has a size of 20 but a fw bug split it into 16 + 4,
- * and there is no way to know if the first 20 bytes are here or not.
- * We use only the first 12 bytes, so get only them.
- */
- ret = rmi_read_block(hdev, data->f11.control_base_addr, buf, 12);
+ ret = rmi_read_block(hdev, data->f11.control_base_addr,
+ data->f11_ctrl_regs, RMI_F11_CTRL_REG_COUNT);
if (ret) {
hid_err(hdev, "can not read ctrl block of size 11: %d.\n", ret);
return ret;
}
- data->max_x = buf[6] | (buf[7] << 8);
- data->max_y = buf[8] | (buf[9] << 8);
+ /* data->f11_ctrl_regs now contains valid register data */
+ data->read_f11_ctrl_regs = true;
+
+ data->max_x = data->f11_ctrl_regs[6] | (data->f11_ctrl_regs[7] << 8);
+ data->max_y = data->f11_ctrl_regs[8] | (data->f11_ctrl_regs[9] << 8);
if (has_dribble) {
- buf[0] = buf[0] & ~BIT(6);
- ret = rmi_write(hdev, data->f11.control_base_addr, buf);
+ data->f11_ctrl_regs[0] = data->f11_ctrl_regs[0] & ~BIT(6);
+ ret = rmi_write(hdev, data->f11.control_base_addr,
+ data->f11_ctrl_regs);
if (ret) {
hid_err(hdev, "can not write to control reg 0: %d.\n",
ret);
@@ -930,9 +1060,9 @@ static int rmi_populate_f11(struct hid_device *hdev)
}
if (has_palm_detect) {
- buf[11] = buf[11] & ~BIT(0);
+ data->f11_ctrl_regs[11] = data->f11_ctrl_regs[11] & ~BIT(0);
ret = rmi_write(hdev, data->f11.control_base_addr + 11,
- &buf[11]);
+ &data->f11_ctrl_regs[11]);
if (ret) {
hid_err(hdev, "can not write to control reg 11: %d.\n",
ret);
@@ -1273,6 +1403,7 @@ static struct hid_driver rmi_driver = {
.input_mapping = rmi_input_mapping,
.input_configured = rmi_input_configured,
#ifdef CONFIG_PM
+ .suspend = rmi_suspend,
.resume = rmi_post_resume,
.reset_resume = rmi_post_reset,
#endif
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 090a1ba0abb6..a76eb2a0a987 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -774,6 +774,9 @@ static const struct hid_device_id sensor_hub_devices[] = {
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_TEXAS_INSTRUMENTS,
USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA),
.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_ITE,
+ USB_DEVICE_ID_ITE_LENOVO_YOGA),
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID,
HID_ANY_ID) },
{ }
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index ed2f008f8403..661f94f8ab8b 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -296,7 +296,14 @@ static __u8 navigation_rdesc[] = {
0x09, 0x01, /* Usage (Pointer), */
0x81, 0x02, /* Input (Variable), */
0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
- 0x95, 0x20, /* Report Count (26), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x09, 0x01, /* Usage (Pointer), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+ 0x95, 0x1E, /* Report Count (24), */
0x81, 0x02, /* Input (Variable), */
0x75, 0x08, /* Report Size (8), */
0x95, 0x30, /* Report Count (48), */
@@ -1270,6 +1277,17 @@ static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
* has to be BYTE_SWAPPED before passing up to joystick interface
*/
if ((sc->quirks & SIXAXIS_CONTROLLER) && rd[0] == 0x01 && size == 49) {
+ /*
+ * When connected via Bluetooth the Sixaxis occasionally sends
+ * a report with the second byte 0xff and the rest zeroed.
+ *
+ * This report does not reflect the actual state of the
+ * controller must be ignored to avoid generating false input
+ * events.
+ */
+ if (rd[1] == 0xff)
+ return -EINVAL;
+
swap(rd[41], rd[42]);
swap(rd[43], rd[44]);
swap(rd[45], rd[46]);
@@ -1836,7 +1854,7 @@ static void dualshock4_state_worker(struct work_struct *work)
} else {
memset(buf, 0, DS4_REPORT_0x11_SIZE);
buf[0] = 0x11;
- buf[1] = 0xB0;
+ buf[1] = 0x80;
buf[3] = 0x0F;
offset = 6;
}
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
index 94167310e15a..b905d501e752 100644
--- a/drivers/hid/hid-uclogic.c
+++ b/drivers/hid/hid-uclogic.c
@@ -858,7 +858,7 @@ static int uclogic_tablet_enable(struct hid_device *hdev)
for (p = drvdata->rdesc;
p <= drvdata->rdesc + drvdata->rsize - 4;) {
if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
- p[3] < sizeof(params)) {
+ p[3] < ARRAY_SIZE(params)) {
v = params[p[3]];
put_unaligned(cpu_to_le32(v), (s32 *)p);
p += 4;
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index f77469d4edfb..2871f3c81a4c 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -149,6 +149,8 @@ struct i2c_hid {
int irq;
struct i2c_hid_platform_data pdata;
+
+ bool irq_wake_enabled;
};
static int __i2c_hid_command(struct i2c_client *client,
@@ -1091,14 +1093,21 @@ static int i2c_hid_suspend(struct device *dev)
struct i2c_hid *ihid = i2c_get_clientdata(client);
struct hid_device *hid = ihid->hid;
int ret = 0;
-
- disable_irq(ihid->irq);
- if (device_may_wakeup(&client->dev))
- enable_irq_wake(ihid->irq);
+ int wake_status;
if (hid->driver && hid->driver->suspend)
ret = hid->driver->suspend(hid, PMSG_SUSPEND);
+ disable_irq(ihid->irq);
+ if (device_may_wakeup(&client->dev)) {
+ wake_status = enable_irq_wake(ihid->irq);
+ if (!wake_status)
+ ihid->irq_wake_enabled = true;
+ else
+ hid_warn(hid, "Failed to enable irq wake: %d\n",
+ wake_status);
+ }
+
/* Save some power */
i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
@@ -1111,14 +1120,21 @@ static int i2c_hid_resume(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct i2c_hid *ihid = i2c_get_clientdata(client);
struct hid_device *hid = ihid->hid;
+ int wake_status;
enable_irq(ihid->irq);
ret = i2c_hid_hwreset(client);
if (ret)
return ret;
- if (device_may_wakeup(&client->dev))
- disable_irq_wake(ihid->irq);
+ if (device_may_wakeup(&client->dev) && ihid->irq_wake_enabled) {
+ wake_status = disable_irq_wake(ihid->irq);
+ if (!wake_status)
+ ihid->irq_wake_enabled = false;
+ else
+ hid_warn(hid, "Failed to disable irq wake: %d\n",
+ wake_status);
+ }
if (hid->driver && hid->driver->reset_resume) {
ret = hid->driver->reset_resume(hid);
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index bfbe1bedda7f..36712e9f56c2 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -164,7 +164,7 @@ static void hid_io_error(struct hid_device *hid)
if (time_after(jiffies, usbhid->stop_retry)) {
/* Retries failed, so do a port reset unless we lack bandwidth*/
- if (test_bit(HID_NO_BANDWIDTH, &usbhid->iofl)
+ if (!test_bit(HID_NO_BANDWIDTH, &usbhid->iofl)
&& !test_and_set_bit(HID_RESET_PENDING, &usbhid->iofl)) {
schedule_work(&usbhid->reset_work);
@@ -710,7 +710,8 @@ int usbhid_open(struct hid_device *hid)
* Wait 50 msec for the queue to empty before allowing events
* to go through hid.
*/
- msleep(50);
+ if (res == 0 && !(hid->quirks & HID_QUIRK_ALWAYS_POLL))
+ msleep(50);
clear_bit(HID_RESUME_RUNNING, &usbhid->iofl);
}
done:
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 53e7de7cb9e2..1dff8f0015ba 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -87,8 +87,12 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
@@ -114,7 +118,8 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index a533787a6d85..4681a65a4579 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -113,7 +113,7 @@ struct wacom {
struct mutex lock;
struct work_struct work;
struct wacom_led {
- u8 select[2]; /* status led selector (0..3) */
+ u8 select[5]; /* status led selector (0..3) */
u8 llv; /* status led brightness no button (1..127) */
u8 hlv; /* status led brightness button pressed (1..127) */
u8 img_lum; /* OLED matrix display brightness */
@@ -123,6 +123,8 @@ struct wacom {
struct power_supply *ac;
struct power_supply_desc battery_desc;
struct power_supply_desc ac_desc;
+ struct kobject *remote_dir;
+ struct attribute_group remote_group[5];
};
static inline void wacom_schedule_work(struct wacom_wac *wacom_wac)
@@ -147,4 +149,7 @@ int wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value);
void wacom_wac_report(struct hid_device *hdev, struct hid_report *report);
void wacom_battery_work(struct work_struct *work);
+int wacom_remote_create_attr_group(struct wacom *wacom, __u32 serial,
+ int index);
+void wacom_remote_destroy_attr_group(struct wacom *wacom, __u32 serial);
#endif
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 4c0ffca97bef..9a4912c1828d 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -23,9 +23,13 @@
#define WAC_CMD_ICON_XFER 0x23
#define WAC_CMD_ICON_BT_XFER 0x26
#define WAC_CMD_RETRIES 10
+#define WAC_CMD_DELETE_PAIRING 0x20
+#define WAC_CMD_UNPAIR_ALL 0xFF
+#define WAC_REMOTE_SERIAL_MAX_STRLEN 9
#define DEV_ATTR_RW_PERM (S_IRUGO | S_IWUSR | S_IWGRP)
#define DEV_ATTR_WO_PERM (S_IWUSR | S_IWGRP)
+#define DEV_ATTR_RO_PERM (S_IRUSR | S_IRGRP)
static int wacom_get_report(struct hid_device *hdev, u8 type, u8 *buf,
size_t size, unsigned int retries)
@@ -335,7 +339,7 @@ static int wacom_set_device_mode(struct hid_device *hdev, int report_id,
if (error >= 0)
error = wacom_get_report(hdev, HID_FEATURE_REPORT,
rep_data, length, 1);
- } while ((error < 0 || rep_data[1] != mode) && limit++ < WAC_MSG_RETRIES);
+ } while (error >= 0 && rep_data[1] != mode && limit++ < WAC_MSG_RETRIES);
kfree(rep_data);
@@ -453,12 +457,11 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev,
* interface number.
*/
if (features->type == WIRELESS) {
- if (intf->cur_altsetting->desc.bInterfaceNumber == 0) {
+ if (intf->cur_altsetting->desc.bInterfaceNumber == 0)
+ features->device_type = WACOM_DEVICETYPE_WL_MONITOR;
+ else
features->device_type = WACOM_DEVICETYPE_NONE;
- } else if (intf->cur_altsetting->desc.bInterfaceNumber == 2) {
- features->device_type |= WACOM_DEVICETYPE_TOUCH;
- features->pktlen = WACOM_PKGLEN_BBTOUCH3;
- }
+ return;
}
wacom_parse_hid(hdev, features);
@@ -1120,6 +1123,189 @@ static ssize_t wacom_store_speed(struct device *dev,
static DEVICE_ATTR(speed, DEV_ATTR_RW_PERM,
wacom_show_speed, wacom_store_speed);
+
+static ssize_t wacom_show_remote_mode(struct kobject *kobj,
+ struct kobj_attribute *kattr,
+ char *buf, int index)
+{
+ struct device *dev = container_of(kobj->parent, struct device, kobj);
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ u8 mode;
+
+ mode = wacom->led.select[index];
+ if (mode >= 0 && mode < 3)
+ return snprintf(buf, PAGE_SIZE, "%d\n", mode);
+ else
+ return snprintf(buf, PAGE_SIZE, "%d\n", -1);
+}
+
+#define DEVICE_EKR_ATTR_GROUP(SET_ID) \
+static ssize_t wacom_show_remote##SET_ID##_mode(struct kobject *kobj, \
+ struct kobj_attribute *kattr, char *buf) \
+{ \
+ return wacom_show_remote_mode(kobj, kattr, buf, SET_ID); \
+} \
+static struct kobj_attribute remote##SET_ID##_mode_attr = { \
+ .attr = {.name = "remote_mode", \
+ .mode = DEV_ATTR_RO_PERM}, \
+ .show = wacom_show_remote##SET_ID##_mode, \
+}; \
+static struct attribute *remote##SET_ID##_serial_attrs[] = { \
+ &remote##SET_ID##_mode_attr.attr, \
+ NULL \
+}; \
+static struct attribute_group remote##SET_ID##_serial_group = { \
+ .name = NULL, \
+ .attrs = remote##SET_ID##_serial_attrs, \
+}
+
+DEVICE_EKR_ATTR_GROUP(0);
+DEVICE_EKR_ATTR_GROUP(1);
+DEVICE_EKR_ATTR_GROUP(2);
+DEVICE_EKR_ATTR_GROUP(3);
+DEVICE_EKR_ATTR_GROUP(4);
+
+int wacom_remote_create_attr_group(struct wacom *wacom, __u32 serial, int index)
+{
+ int error = 0;
+ char *buf;
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
+ wacom_wac->serial[index] = serial;
+
+ buf = kzalloc(WAC_REMOTE_SERIAL_MAX_STRLEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ snprintf(buf, WAC_REMOTE_SERIAL_MAX_STRLEN, "%d", serial);
+ wacom->remote_group[index].name = buf;
+
+ error = sysfs_create_group(wacom->remote_dir,
+ &wacom->remote_group[index]);
+ if (error) {
+ hid_err(wacom->hdev,
+ "cannot create sysfs group err: %d\n", error);
+ kobject_put(wacom->remote_dir);
+ return error;
+ }
+
+ return 0;
+}
+
+void wacom_remote_destroy_attr_group(struct wacom *wacom, __u32 serial)
+{
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ int i;
+
+ if (!serial)
+ return;
+
+ for (i = 0; i < WACOM_MAX_REMOTES; i++) {
+ if (wacom_wac->serial[i] == serial) {
+ wacom_wac->serial[i] = 0;
+ wacom->led.select[i] = WACOM_STATUS_UNKNOWN;
+ if (wacom->remote_group[i].name) {
+ sysfs_remove_group(wacom->remote_dir,
+ &wacom->remote_group[i]);
+ kfree(wacom->remote_group[i].name);
+ wacom->remote_group[i].name = NULL;
+ }
+ }
+ }
+}
+
+static int wacom_cmd_unpair_remote(struct wacom *wacom, unsigned char selector)
+{
+ const size_t buf_size = 2;
+ unsigned char *buf;
+ int retval;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = WAC_CMD_DELETE_PAIRING;
+ buf[1] = selector;
+
+ retval = wacom_set_report(wacom->hdev, HID_OUTPUT_REPORT, buf,
+ buf_size, WAC_CMD_RETRIES);
+ kfree(buf);
+
+ return retval;
+}
+
+static ssize_t wacom_store_unpair_remote(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned char selector = 0;
+ struct device *dev = container_of(kobj->parent, struct device, kobj);
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ int err;
+
+ if (!strncmp(buf, "*\n", 2)) {
+ selector = WAC_CMD_UNPAIR_ALL;
+ } else {
+ hid_info(wacom->hdev, "remote: unrecognized unpair code: %s\n",
+ buf);
+ return -1;
+ }
+
+ mutex_lock(&wacom->lock);
+
+ err = wacom_cmd_unpair_remote(wacom, selector);
+ mutex_unlock(&wacom->lock);
+
+ return err < 0 ? err : count;
+}
+
+static struct kobj_attribute unpair_remote_attr = {
+ .attr = {.name = "unpair_remote", .mode = 0200},
+ .store = wacom_store_unpair_remote,
+};
+
+static const struct attribute *remote_unpair_attrs[] = {
+ &unpair_remote_attr.attr,
+ NULL
+};
+
+static int wacom_initialize_remote(struct wacom *wacom)
+{
+ int error = 0;
+ struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
+ int i;
+
+ if (wacom->wacom_wac.features.type != REMOTE)
+ return 0;
+
+ wacom->remote_group[0] = remote0_serial_group;
+ wacom->remote_group[1] = remote1_serial_group;
+ wacom->remote_group[2] = remote2_serial_group;
+ wacom->remote_group[3] = remote3_serial_group;
+ wacom->remote_group[4] = remote4_serial_group;
+
+ wacom->remote_dir = kobject_create_and_add("wacom_remote",
+ &wacom->hdev->dev.kobj);
+ if (!wacom->remote_dir)
+ return -ENOMEM;
+
+ error = sysfs_create_files(wacom->remote_dir, remote_unpair_attrs);
+
+ if (error) {
+ hid_err(wacom->hdev,
+ "cannot create sysfs group err: %d\n", error);
+ return error;
+ }
+
+ for (i = 0; i < WACOM_MAX_REMOTES; i++) {
+ wacom->led.select[i] = WACOM_STATUS_UNKNOWN;
+ wacom_wac->serial[i] = 0;
+ }
+
+ return 0;
+}
+
static struct input_dev *wacom_allocate_input(struct wacom *wacom)
{
struct input_dev *input_dev;
@@ -1130,7 +1316,7 @@ static struct input_dev *wacom_allocate_input(struct wacom *wacom)
if (!input_dev)
return NULL;
- input_dev->name = wacom_wac->pen_name;
+ input_dev->name = wacom_wac->features.name;
input_dev->phys = hdev->phys;
input_dev->dev.parent = &hdev->dev;
input_dev->open = wacom_open;
@@ -1145,43 +1331,6 @@ static struct input_dev *wacom_allocate_input(struct wacom *wacom)
return input_dev;
}
-static void wacom_free_inputs(struct wacom *wacom)
-{
- struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
-
- if (wacom_wac->pen_input)
- input_free_device(wacom_wac->pen_input);
- if (wacom_wac->touch_input)
- input_free_device(wacom_wac->touch_input);
- if (wacom_wac->pad_input)
- input_free_device(wacom_wac->pad_input);
- wacom_wac->pen_input = NULL;
- wacom_wac->touch_input = NULL;
- wacom_wac->pad_input = NULL;
-}
-
-static int wacom_allocate_inputs(struct wacom *wacom)
-{
- struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev;
- struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
-
- pen_input_dev = wacom_allocate_input(wacom);
- touch_input_dev = wacom_allocate_input(wacom);
- pad_input_dev = wacom_allocate_input(wacom);
- if (!pen_input_dev || !touch_input_dev || !pad_input_dev) {
- wacom_free_inputs(wacom);
- return -ENOMEM;
- }
-
- wacom_wac->pen_input = pen_input_dev;
- wacom_wac->touch_input = touch_input_dev;
- wacom_wac->touch_input->name = wacom_wac->touch_name;
- wacom_wac->pad_input = pad_input_dev;
- wacom_wac->pad_input->name = wacom_wac->pad_name;
-
- return 0;
-}
-
static void wacom_clean_inputs(struct wacom *wacom)
{
if (wacom->wacom_wac.pen_input) {
@@ -1202,12 +1351,33 @@ static void wacom_clean_inputs(struct wacom *wacom)
else
input_free_device(wacom->wacom_wac.pad_input);
}
+ if (wacom->remote_dir)
+ kobject_put(wacom->remote_dir);
wacom->wacom_wac.pen_input = NULL;
wacom->wacom_wac.touch_input = NULL;
wacom->wacom_wac.pad_input = NULL;
wacom_destroy_leds(wacom);
}
+static int wacom_allocate_inputs(struct wacom *wacom)
+{
+ struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
+
+ wacom_wac->pen_input = wacom_allocate_input(wacom);
+ wacom_wac->touch_input = wacom_allocate_input(wacom);
+ wacom_wac->pad_input = wacom_allocate_input(wacom);
+ if (!wacom_wac->pen_input || !wacom_wac->touch_input || !wacom_wac->pad_input) {
+ wacom_clean_inputs(wacom);
+ return -ENOMEM;
+ }
+
+ wacom_wac->pen_input->name = wacom_wac->pen_name;
+ wacom_wac->touch_input->name = wacom_wac->touch_name;
+ wacom_wac->pad_input->name = wacom_wac->pad_name;
+
+ return 0;
+}
+
static int wacom_register_inputs(struct wacom *wacom)
{
struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev;
@@ -1262,26 +1432,67 @@ static int wacom_register_inputs(struct wacom *wacom)
error = wacom_initialize_leds(wacom);
if (error)
goto fail_leds;
+
+ error = wacom_initialize_remote(wacom);
+ if (error)
+ goto fail_remote;
}
return 0;
+fail_remote:
+ wacom_destroy_leds(wacom);
fail_leds:
input_unregister_device(pad_input_dev);
pad_input_dev = NULL;
wacom_wac->pad_registered = false;
fail_register_pad_input:
- input_unregister_device(touch_input_dev);
+ if (touch_input_dev)
+ input_unregister_device(touch_input_dev);
wacom_wac->touch_input = NULL;
wacom_wac->touch_registered = false;
fail_register_touch_input:
- input_unregister_device(pen_input_dev);
+ if (pen_input_dev)
+ input_unregister_device(pen_input_dev);
wacom_wac->pen_input = NULL;
wacom_wac->pen_registered = false;
fail_register_pen_input:
return error;
}
+/*
+ * Not all devices report physical dimensions from HID.
+ * Compute the default from hardcoded logical dimension
+ * and resolution before driver overwrites them.
+ */
+static void wacom_set_default_phy(struct wacom_features *features)
+{
+ if (features->x_resolution) {
+ features->x_phy = (features->x_max * 100) /
+ features->x_resolution;
+ features->y_phy = (features->y_max * 100) /
+ features->y_resolution;
+ }
+}
+
+static void wacom_calculate_res(struct wacom_features *features)
+{
+ /* set unit to "100th of a mm" for devices not reported by HID */
+ if (!features->unit) {
+ features->unit = 0x11;
+ features->unitExpo = -3;
+ }
+
+ features->x_resolution = wacom_calc_hid_res(features->x_max,
+ features->x_phy,
+ features->unit,
+ features->unitExpo);
+ features->y_resolution = wacom_calc_hid_res(features->y_max,
+ features->y_phy,
+ features->unit,
+ features->unitExpo);
+}
+
static void wacom_wireless_work(struct work_struct *work)
{
struct wacom *wacom = container_of(work, struct wacom, work);
@@ -1339,6 +1550,8 @@ static void wacom_wireless_work(struct work_struct *work)
if (wacom_wac1->features.type != INTUOSHT &&
wacom_wac1->features.type != BAMBOO_PT)
wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD;
+ wacom_set_default_phy(&wacom_wac1->features);
+ wacom_calculate_res(&wacom_wac1->features);
snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen",
wacom_wac1->features.name);
snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad",
@@ -1357,7 +1570,9 @@ static void wacom_wireless_work(struct work_struct *work)
wacom_wac2->features =
*((struct wacom_features *)id->driver_data);
wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
+ wacom_set_default_phy(&wacom_wac2->features);
wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
+ wacom_calculate_res(&wacom_wac2->features);
snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX,
"%s (WL) Finger",wacom_wac2->features.name);
snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX,
@@ -1405,39 +1620,6 @@ void wacom_battery_work(struct work_struct *work)
}
}
-/*
- * Not all devices report physical dimensions from HID.
- * Compute the default from hardcoded logical dimension
- * and resolution before driver overwrites them.
- */
-static void wacom_set_default_phy(struct wacom_features *features)
-{
- if (features->x_resolution) {
- features->x_phy = (features->x_max * 100) /
- features->x_resolution;
- features->y_phy = (features->y_max * 100) /
- features->y_resolution;
- }
-}
-
-static void wacom_calculate_res(struct wacom_features *features)
-{
- /* set unit to "100th of a mm" for devices not reported by HID */
- if (!features->unit) {
- features->unit = 0x11;
- features->unitExpo = -3;
- }
-
- features->x_resolution = wacom_calc_hid_res(features->x_max,
- features->x_phy,
- features->unit,
- features->unitExpo);
- features->y_resolution = wacom_calc_hid_res(features->y_max,
- features->y_phy,
- features->unit,
- features->unitExpo);
-}
-
static size_t wacom_compute_pktlen(struct hid_device *hdev)
{
struct hid_report_enum *report_enum;
@@ -1550,11 +1732,9 @@ static int wacom_probe(struct hid_device *hdev,
mutex_init(&wacom->lock);
INIT_WORK(&wacom->work, wacom_wireless_work);
- if (!(features->quirks & WACOM_QUIRK_NO_INPUT)) {
- error = wacom_allocate_inputs(wacom);
- if (error)
- goto fail_allocate_inputs;
- }
+ error = wacom_allocate_inputs(wacom);
+ if (error)
+ goto fail_allocate_inputs;
/*
* Bamboo Pad has a generic hid handling for the Pen, and we switch it
@@ -1600,18 +1780,16 @@ static int wacom_probe(struct hid_device *hdev,
if (error)
goto fail_shared_data;
- if (!(features->quirks & WACOM_QUIRK_MONITOR) &&
+ if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) &&
(features->quirks & WACOM_QUIRK_BATTERY)) {
error = wacom_initialize_battery(wacom);
if (error)
goto fail_battery;
}
- if (!(features->quirks & WACOM_QUIRK_NO_INPUT)) {
- error = wacom_register_inputs(wacom);
- if (error)
- goto fail_register_inputs;
- }
+ error = wacom_register_inputs(wacom);
+ if (error)
+ goto fail_register_inputs;
if (hdev->bus == BUS_BLUETOOTH) {
error = device_create_file(&hdev->dev, &dev_attr_speed);
@@ -1634,7 +1812,7 @@ static int wacom_probe(struct hid_device *hdev,
/* Note that if query fails it is not a hard failure */
wacom_query_tablet_data(hdev, features);
- if (features->quirks & WACOM_QUIRK_MONITOR)
+ if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR)
error = hid_hw_open(hdev);
if (wacom_wac->features.type == INTUOSHT &&
@@ -1708,7 +1886,6 @@ static struct hid_driver wacom_driver = {
.id_table = wacom_ids,
.probe = wacom_probe,
.remove = wacom_remove,
- .event = wacom_wac_event,
.report = wacom_wac_report,
#ifdef CONFIG_PM
.resume = wacom_resume,
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 232da89f4e88..0215ab62bb93 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -125,61 +125,47 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
prox = data[1] & 0x40;
- if (prox) {
- wacom->id[0] = ERASER_DEVICE_ID;
- pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
- if (features->pressure_max > 255)
- pressure = (pressure << 1) | ((data[4] >> 6) & 1);
- pressure += (features->pressure_max + 1) / 2;
-
- /*
- * if going from out of proximity into proximity select between the eraser
- * and the pen based on the state of the stylus2 button, choose eraser if
- * pressed else choose pen. if not a proximity change from out to in, send
- * an out of proximity for previous tool then a in for new tool.
- */
- if (!wacom->tool[0]) {
- /* Eraser bit set for DTF */
- if (data[1] & 0x10)
- wacom->tool[1] = BTN_TOOL_RUBBER;
- else
- /* Going into proximity select tool */
- wacom->tool[1] = (data[4] & 0x20) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
- } else {
- /* was entered with stylus2 pressed */
- if (wacom->tool[1] == BTN_TOOL_RUBBER && !(data[4] & 0x20)) {
- /* report out proximity for previous tool */
- input_report_key(input, wacom->tool[1], 0);
- input_sync(input);
- wacom->tool[1] = BTN_TOOL_PEN;
- return 0;
- }
+ if (!wacom->id[0]) {
+ if ((data[0] & 0x10) || (data[4] & 0x20)) {
+ wacom->tool[0] = BTN_TOOL_RUBBER;
+ wacom->id[0] = ERASER_DEVICE_ID;
}
- if (wacom->tool[1] != BTN_TOOL_RUBBER) {
- /* Unknown tool selected default to pen tool */
- wacom->tool[1] = BTN_TOOL_PEN;
+ else {
+ wacom->tool[0] = BTN_TOOL_PEN;
wacom->id[0] = STYLUS_DEVICE_ID;
}
- input_report_key(input, wacom->tool[1], prox); /* report in proximity for tool */
- input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */
- input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
- input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
- input_report_abs(input, ABS_PRESSURE, pressure);
+ }
- input_report_key(input, BTN_TOUCH, data[4] & 0x08);
- input_report_key(input, BTN_STYLUS, data[4] & 0x10);
- /* Only allow the stylus2 button to be reported for the pen tool. */
- input_report_key(input, BTN_STYLUS2, (wacom->tool[1] == BTN_TOOL_PEN) && (data[4] & 0x20));
- } else {
- /* report proximity-out of a (valid) tool */
- if (wacom->tool[1] != BTN_TOOL_RUBBER) {
- /* Unknown tool selected default to pen tool */
- wacom->tool[1] = BTN_TOOL_PEN;
- }
- input_report_key(input, wacom->tool[1], prox);
+ /* If the eraser is in prox, STYLUS2 is always set. If we
+ * mis-detected the type and notice that STYLUS2 isn't set
+ * then force the eraser out of prox and let the pen in.
+ */
+ if (wacom->tool[0] == BTN_TOOL_RUBBER && !(data[4] & 0x20)) {
+ input_report_key(input, BTN_TOOL_RUBBER, 0);
+ input_report_abs(input, ABS_MISC, 0);
+ input_sync(input);
+ wacom->tool[0] = BTN_TOOL_PEN;
+ wacom->id[0] = STYLUS_DEVICE_ID;
}
- wacom->tool[0] = prox; /* Save proximity state */
+ pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
+ if (features->pressure_max > 255)
+ pressure = (pressure << 1) | ((data[4] >> 6) & 1);
+ pressure += (features->pressure_max + 1) / 2;
+
+ input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
+ input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
+ input_report_abs(input, ABS_PRESSURE, pressure);
+
+ input_report_key(input, BTN_TOUCH, data[4] & 0x08);
+ input_report_key(input, BTN_STYLUS, data[4] & 0x10);
+ /* Only allow the stylus2 button to be reported for the pen tool. */
+ input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
+
+ if (!prox)
+ wacom->id[0] = 0;
+ input_report_key(input, wacom->tool[0], prox);
+ input_report_abs(input, ABS_MISC, wacom->id[0]);
return 1;
}
@@ -645,6 +631,130 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
return 0;
}
+static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
+{
+ unsigned char *data = wacom_wac->data;
+ struct input_dev *input = wacom_wac->pad_input;
+ struct wacom *wacom = container_of(wacom_wac, struct wacom, wacom_wac);
+ struct wacom_features *features = &wacom_wac->features;
+ int bat_charging, bat_percent, touch_ring_mode;
+ __u32 serial;
+ int i;
+
+ if (data[0] != WACOM_REPORT_REMOTE) {
+ dev_dbg(input->dev.parent,
+ "%s: received unknown report #%d", __func__, data[0]);
+ return 0;
+ }
+
+ serial = data[3] + (data[4] << 8) + (data[5] << 16);
+ wacom_wac->id[0] = PAD_DEVICE_ID;
+
+ input_report_key(input, BTN_0, (data[9] & 0x01));
+ input_report_key(input, BTN_1, (data[9] & 0x02));
+ input_report_key(input, BTN_2, (data[9] & 0x04));
+ input_report_key(input, BTN_3, (data[9] & 0x08));
+ input_report_key(input, BTN_4, (data[9] & 0x10));
+ input_report_key(input, BTN_5, (data[9] & 0x20));
+ input_report_key(input, BTN_6, (data[9] & 0x40));
+ input_report_key(input, BTN_7, (data[9] & 0x80));
+
+ input_report_key(input, BTN_8, (data[10] & 0x01));
+ input_report_key(input, BTN_9, (data[10] & 0x02));
+ input_report_key(input, BTN_A, (data[10] & 0x04));
+ input_report_key(input, BTN_B, (data[10] & 0x08));
+ input_report_key(input, BTN_C, (data[10] & 0x10));
+ input_report_key(input, BTN_X, (data[10] & 0x20));
+ input_report_key(input, BTN_Y, (data[10] & 0x40));
+ input_report_key(input, BTN_Z, (data[10] & 0x80));
+
+ input_report_key(input, BTN_BASE, (data[11] & 0x01));
+ input_report_key(input, BTN_BASE2, (data[11] & 0x02));
+
+ if (data[12] & 0x80)
+ input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
+ else
+ input_report_abs(input, ABS_WHEEL, 0);
+
+ bat_percent = data[7] & 0x7f;
+ bat_charging = !!(data[7] & 0x80);
+
+ if (data[9] | data[10] | (data[11] & 0x03) | data[12])
+ input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
+ else
+ input_report_abs(input, ABS_MISC, 0);
+
+ input_event(input, EV_MSC, MSC_SERIAL, serial);
+
+ /*Which mode select (LED light) is currently on?*/
+ touch_ring_mode = (data[11] & 0xC0) >> 6;
+
+ for (i = 0; i < WACOM_MAX_REMOTES; i++) {
+ if (wacom_wac->serial[i] == serial)
+ wacom->led.select[i] = touch_ring_mode;
+ }
+
+ if (!wacom->battery &&
+ !(features->quirks & WACOM_QUIRK_BATTERY)) {
+ features->quirks |= WACOM_QUIRK_BATTERY;
+ INIT_WORK(&wacom->work, wacom_battery_work);
+ wacom_schedule_work(wacom_wac);
+ }
+
+ wacom_notify_battery(wacom_wac, bat_percent, bat_charging, 1,
+ bat_charging);
+
+ return 1;
+}
+
+static int wacom_remote_status_irq(struct wacom_wac *wacom_wac, size_t len)
+{
+ struct wacom *wacom = container_of(wacom_wac, struct wacom, wacom_wac);
+ unsigned char *data = wacom_wac->data;
+ int i;
+
+ if (data[0] != WACOM_REPORT_DEVICE_LIST)
+ return 0;
+
+ for (i = 0; i < WACOM_MAX_REMOTES; i++) {
+ int j = i * 6;
+ int serial = (data[j+6] << 16) + (data[j+5] << 8) + data[j+4];
+ bool connected = data[j+2];
+
+ if (connected) {
+ int k;
+
+ if (wacom_wac->serial[i] == serial)
+ continue;
+
+ if (wacom_wac->serial[i]) {
+ wacom_remote_destroy_attr_group(wacom,
+ wacom_wac->serial[i]);
+ }
+
+ /* A remote can pair more than once with an EKR,
+ * check to make sure this serial isn't already paired.
+ */
+ for (k = 0; k < WACOM_MAX_REMOTES; k++) {
+ if (wacom_wac->serial[k] == serial)
+ break;
+ }
+
+ if (k < WACOM_MAX_REMOTES) {
+ wacom_wac->serial[i] = serial;
+ continue;
+ }
+ wacom_remote_create_attr_group(wacom, serial, i);
+
+ } else if (wacom_wac->serial[i]) {
+ wacom_remote_destroy_attr_group(wacom,
+ wacom_wac->serial[i]);
+ }
+ }
+
+ return 0;
+}
+
static void wacom_intuos_general(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
@@ -1437,6 +1547,12 @@ static int wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field,
return 0;
}
+static void wacom_wac_pen_pre_report(struct hid_device *hdev,
+ struct hid_report *report)
+{
+ return;
+}
+
static void wacom_wac_pen_report(struct hid_device *hdev,
struct hid_report *report)
{
@@ -1491,6 +1607,13 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
wacom_map_usage(input, usage, field, EV_ABS,
ABS_MT_POSITION_Y, 4);
break;
+ case HID_DG_WIDTH:
+ case HID_DG_HEIGHT:
+ features->last_slot_field = usage->hid;
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_MT_TOUCH_MAJOR, 0);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_MT_TOUCH_MINOR, 0);
+ input_set_abs_params(input, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+ break;
case HID_DG_CONTACTID:
features->last_slot_field = usage->hid;
break;
@@ -1504,6 +1627,10 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
features->last_slot_field = usage->hid;
wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0);
break;
+ case HID_DG_CONTACTCOUNT:
+ wacom_wac->hid_data.cc_index = field->index;
+ wacom_wac->hid_data.cc_value_index = usage->usage_index;
+ break;
}
}
@@ -1515,6 +1642,10 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
bool prox = hid_data->tipswitch &&
!wacom_wac->shared->stylus_in_proximity;
+ wacom_wac->hid_data.num_received++;
+ if (wacom_wac->hid_data.num_received > wacom_wac->hid_data.num_expected)
+ return;
+
if (mt) {
int slot;
@@ -1531,6 +1662,13 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
hid_data->x);
input_report_abs(input, mt ? ABS_MT_POSITION_Y : ABS_Y,
hid_data->y);
+
+ if (test_bit(ABS_MT_TOUCH_MAJOR, input->absbit)) {
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, max(hid_data->width, hid_data->height));
+ input_report_abs(input, ABS_MT_TOUCH_MINOR, min(hid_data->width, hid_data->height));
+ if (hid_data->width != hid_data->height)
+ input_report_abs(input, ABS_MT_ORIENTATION, hid_data->width <= hid_data->height ? 0 : 1);
+ }
}
}
@@ -1547,6 +1685,12 @@ static int wacom_wac_finger_event(struct hid_device *hdev,
case HID_GD_Y:
wacom_wac->hid_data.y = value;
break;
+ case HID_DG_WIDTH:
+ wacom_wac->hid_data.width = value;
+ break;
+ case HID_DG_HEIGHT:
+ wacom_wac->hid_data.height = value;
+ break;
case HID_DG_CONTACTID:
wacom_wac->hid_data.id = value;
break;
@@ -1564,6 +1708,24 @@ static int wacom_wac_finger_event(struct hid_device *hdev,
return 0;
}
+static void wacom_wac_finger_pre_report(struct hid_device *hdev,
+ struct hid_report *report)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct hid_data* hid_data = &wacom_wac->hid_data;
+
+ if (hid_data->cc_index >= 0) {
+ struct hid_field *field = report->field[hid_data->cc_index];
+ int value = field->value[hid_data->cc_value_index];
+ if (value)
+ hid_data->num_expected = value;
+ }
+ else {
+ hid_data->num_expected = wacom_wac->features.touch_max;
+ }
+}
+
static void wacom_wac_finger_report(struct hid_device *hdev,
struct hid_report *report)
{
@@ -1572,10 +1734,18 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
struct input_dev *input = wacom_wac->touch_input;
unsigned touch_max = wacom_wac->features.touch_max;
+ /* If more packets of data are expected, give us a chance to
+ * process them rather than immediately syncing a partial
+ * update.
+ */
+ if (wacom_wac->hid_data.num_received < wacom_wac->hid_data.num_expected)
+ return;
+
if (touch_max > 1)
input_mt_sync_frame(input);
input_sync(input);
+ wacom_wac->hid_data.num_received = 0;
/* keep touch state for pen event */
wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac);
@@ -1615,6 +1785,25 @@ int wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
return 0;
}
+static void wacom_report_events(struct hid_device *hdev, struct hid_report *report)
+{
+ int r;
+
+ for (r = 0; r < report->maxfield; r++) {
+ struct hid_field *field;
+ unsigned count, n;
+
+ field = report->field[r];
+ count = field->report_count;
+
+ if (!(HID_MAIN_ITEM_VARIABLE & field->flags))
+ continue;
+
+ for (n = 0; n < count; n++)
+ wacom_wac_event(hdev, field, &field->usage[n], field->value[n]);
+ }
+}
+
void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
{
struct wacom *wacom = hid_get_drvdata(hdev);
@@ -1625,6 +1814,14 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
return;
if (WACOM_PEN_FIELD(field))
+ wacom_wac_pen_pre_report(hdev, report);
+
+ if (WACOM_FINGER_FIELD(field))
+ wacom_wac_finger_pre_report(hdev, report);
+
+ wacom_report_events(hdev, report);
+
+ if (WACOM_PEN_FIELD(field))
return wacom_wac_pen_report(hdev, report);
if (WACOM_FINGER_FIELD(field))
@@ -1699,7 +1896,7 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
int y = (data[3] << 4) | (data[4] & 0x0f);
int width, height;
- if (features->type >= INTUOSPS && features->type <= INTUOSPL) {
+ if (features->type >= INTUOSPS && features->type <= INTUOSHT) {
width = data[5] * 100;
height = data[6] * 100;
} else {
@@ -2118,6 +2315,13 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
sync = wacom_wireless_irq(wacom_wac, len);
break;
+ case REMOTE:
+ if (wacom_wac->data[0] == WACOM_REPORT_DEVICE_LIST)
+ sync = wacom_remote_status_irq(wacom_wac, len);
+ else
+ sync = wacom_remote_irq(wacom_wac, len);
+ break;
+
default:
sync = false;
break;
@@ -2213,6 +2417,9 @@ void wacom_setup_device_quirks(struct wacom *wacom)
features->x_max = 4096;
features->y_max = 4096;
}
+ else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ }
}
/*
@@ -2220,10 +2427,13 @@ void wacom_setup_device_quirks(struct wacom *wacom)
* 0, whose HID descriptor has an application usage of 0xFF0D
* (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
* out through the HID_GENERIC device created for interface 1,
- * so rewrite this one to be of type BTN_TOOL_FINGER.
+ * so rewrite this one to be of type WACOM_DEVICETYPE_TOUCH.
*/
if (features->type == BAMBOO_PAD)
- features->device_type |= WACOM_DEVICETYPE_TOUCH;
+ features->device_type = WACOM_DEVICETYPE_TOUCH;
+
+ if (features->type == REMOTE)
+ features->device_type = WACOM_DEVICETYPE_PAD;
if (wacom->hdev->bus == BUS_BLUETOOTH)
features->quirks |= WACOM_QUIRK_BATTERY;
@@ -2239,13 +2449,7 @@ void wacom_setup_device_quirks(struct wacom *wacom)
}
if (features->type == WIRELESS) {
-
- /* monitor never has input and pen/touch have delayed create */
- features->quirks |= WACOM_QUIRK_NO_INPUT;
-
- /* must be monitor interface if no device_type set */
- if (features->device_type == WACOM_DEVICETYPE_NONE) {
- features->quirks |= WACOM_QUIRK_MONITOR;
+ if (features->device_type == WACOM_DEVICETYPE_WL_MONITOR) {
features->quirks |= WACOM_QUIRK_BATTERY;
}
}
@@ -2510,11 +2714,23 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
return 0;
}
+static void wacom_setup_numbered_buttons(struct input_dev *input_dev,
+ int button_count)
+{
+ int i;
+
+ for (i = 0; i < button_count && i < 10; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+ for (i = 10; i < button_count && i < 16; i++)
+ __set_bit(BTN_A + (i-10), input_dev->keybit);
+ for (i = 16; i < button_count && i < 18; i++)
+ __set_bit(BTN_BASE + (i-16), input_dev->keybit);
+}
+
int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
struct wacom_wac *wacom_wac)
{
struct wacom_features *features = &wacom_wac->features;
- int i;
if (!(features->device_type & WACOM_DEVICETYPE_PAD))
return -ENODEV;
@@ -2531,10 +2747,14 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
/* kept for making udev and libwacom accepting the pad */
__set_bit(BTN_STYLUS, input_dev->keybit);
+ wacom_setup_numbered_buttons(input_dev, features->numbered_buttons);
+
switch (features->type) {
+
+ case CINTIQ_HYBRID:
+ case DTK:
+ case DTUS:
case GRAPHIRE_BT:
- __set_bit(BTN_0, input_dev->keybit);
- __set_bit(BTN_1, input_dev->keybit);
break;
case WACOM_MO:
@@ -2552,16 +2772,6 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
break;
case WACOM_24HD:
- __set_bit(BTN_A, input_dev->keybit);
- __set_bit(BTN_B, input_dev->keybit);
- __set_bit(BTN_C, input_dev->keybit);
- __set_bit(BTN_X, input_dev->keybit);
- __set_bit(BTN_Y, input_dev->keybit);
- __set_bit(BTN_Z, input_dev->keybit);
-
- for (i = 0; i < 10; i++)
- __set_bit(BTN_0 + i, input_dev->keybit);
-
__set_bit(KEY_PROG1, input_dev->keybit);
__set_bit(KEY_PROG2, input_dev->keybit);
__set_bit(KEY_PROG3, input_dev->keybit);
@@ -2583,12 +2793,6 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
__set_bit(INPUT_PROP_ACCELEROMETER, input_dev->propbit);
break;
- case DTK:
- for (i = 0; i < 6; i++)
- __set_bit(BTN_0 + i, input_dev->keybit);
-
- break;
-
case WACOM_22HD:
__set_bit(KEY_PROG1, input_dev->keybit);
__set_bit(KEY_PROG2, input_dev->keybit);
@@ -2596,52 +2800,22 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
/* fall through */
case WACOM_21UX2:
- __set_bit(BTN_A, input_dev->keybit);
- __set_bit(BTN_B, input_dev->keybit);
- __set_bit(BTN_C, input_dev->keybit);
- __set_bit(BTN_X, input_dev->keybit);
- __set_bit(BTN_Y, input_dev->keybit);
- __set_bit(BTN_Z, input_dev->keybit);
- __set_bit(BTN_BASE, input_dev->keybit);
- __set_bit(BTN_BASE2, input_dev->keybit);
- /* fall through */
-
case WACOM_BEE:
- __set_bit(BTN_8, input_dev->keybit);
- __set_bit(BTN_9, input_dev->keybit);
- /* fall through */
-
case CINTIQ:
- for (i = 0; i < 8; i++)
- __set_bit(BTN_0 + i, input_dev->keybit);
-
input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
break;
case WACOM_13HD:
- for (i = 0; i < 9; i++)
- __set_bit(BTN_0 + i, input_dev->keybit);
-
input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
break;
case INTUOS3:
case INTUOS3L:
- __set_bit(BTN_4, input_dev->keybit);
- __set_bit(BTN_5, input_dev->keybit);
- __set_bit(BTN_6, input_dev->keybit);
- __set_bit(BTN_7, input_dev->keybit);
-
input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
/* fall through */
case INTUOS3S:
- __set_bit(BTN_0, input_dev->keybit);
- __set_bit(BTN_1, input_dev->keybit);
- __set_bit(BTN_2, input_dev->keybit);
- __set_bit(BTN_3, input_dev->keybit);
-
input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
break;
@@ -2649,15 +2823,8 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
case INTUOS5L:
case INTUOSPM:
case INTUOSPL:
- __set_bit(BTN_7, input_dev->keybit);
- __set_bit(BTN_8, input_dev->keybit);
- /* fall through */
-
case INTUOS5S:
case INTUOSPS:
- for (i = 0; i < 7; i++)
- __set_bit(BTN_0 + i, input_dev->keybit);
-
input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
break;
@@ -2672,28 +2839,10 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
case INTUOS4:
case INTUOS4L:
- __set_bit(BTN_7, input_dev->keybit);
- __set_bit(BTN_8, input_dev->keybit);
- /* fall through */
-
case INTUOS4S:
- for (i = 0; i < 7; i++)
- __set_bit(BTN_0 + i, input_dev->keybit);
-
input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
break;
- case CINTIQ_HYBRID:
- for (i = 0; i < 9; i++)
- __set_bit(BTN_0 + i, input_dev->keybit);
-
- break;
-
- case DTUS:
- for (i = 0; i < 4; i++)
- __set_bit(BTN_0 + i, input_dev->keybit);
- break;
-
case INTUOSHT:
case BAMBOO_PT:
__clear_bit(ABS_MISC, input_dev->absbit);
@@ -2705,6 +2854,11 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
break;
+ case REMOTE:
+ input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
+ input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
+ break;
+
default:
/* no pad supported */
return -ENODEV;
@@ -2720,7 +2874,7 @@ static const struct wacom_features wacom_features_0x10 =
GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
static const struct wacom_features wacom_features_0x81 =
{ "Wacom Graphire BT", 16704, 12064, 511, 32,
- GRAPHIRE_BT, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
+ GRAPHIRE_BT, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES, 2 };
static const struct wacom_features wacom_features_0x11 =
{ "Wacom Graphire2 4x5", 10206, 7422, 511, 63,
GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
@@ -2846,77 +3000,77 @@ static const struct wacom_features wacom_features_0x45 =
INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xB0 =
{ "Wacom Intuos3 4x5", 25400, 20320, 1023, 63,
- INTUOS3S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ INTUOS3S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 4 };
static const struct wacom_features wacom_features_0xB1 =
{ "Wacom Intuos3 6x8", 40640, 30480, 1023, 63,
- INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 8 };
static const struct wacom_features wacom_features_0xB2 =
{ "Wacom Intuos3 9x12", 60960, 45720, 1023, 63,
- INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 8 };
static const struct wacom_features wacom_features_0xB3 =
{ "Wacom Intuos3 12x12", 60960, 60960, 1023, 63,
- INTUOS3L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ INTUOS3L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 8 };
static const struct wacom_features wacom_features_0xB4 =
{ "Wacom Intuos3 12x19", 97536, 60960, 1023, 63,
- INTUOS3L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ INTUOS3L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 8 };
static const struct wacom_features wacom_features_0xB5 =
{ "Wacom Intuos3 6x11", 54204, 31750, 1023, 63,
- INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 8 };
static const struct wacom_features wacom_features_0xB7 =
{ "Wacom Intuos3 4x6", 31496, 19685, 1023, 63,
- INTUOS3S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ INTUOS3S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 4 };
static const struct wacom_features wacom_features_0xB8 =
{ "Wacom Intuos4 4x6", 31496, 19685, 2047, 63,
- INTUOS4S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ INTUOS4S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7 };
static const struct wacom_features wacom_features_0xB9 =
{ "Wacom Intuos4 6x9", 44704, 27940, 2047, 63,
- INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9 };
static const struct wacom_features wacom_features_0xBA =
{ "Wacom Intuos4 8x13", 65024, 40640, 2047, 63,
- INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9 };
static const struct wacom_features wacom_features_0xBB =
{ "Wacom Intuos4 12x19", 97536, 60960, 2047, 63,
- INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9 };
static const struct wacom_features wacom_features_0xBC =
{ "Wacom Intuos4 WL", 40640, 25400, 2047, 63,
- INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9 };
static const struct wacom_features wacom_features_0xBD =
{ "Wacom Intuos4 WL", 40640, 25400, 2047, 63,
- INTUOS4WL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ INTUOS4WL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9 };
static const struct wacom_features wacom_features_0x26 =
{ "Wacom Intuos5 touch S", 31496, 19685, 2047, 63,
- INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16 };
+ INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7, .touch_max = 16 };
static const struct wacom_features wacom_features_0x27 =
{ "Wacom Intuos5 touch M", 44704, 27940, 2047, 63,
- INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16 };
+ INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 16 };
static const struct wacom_features wacom_features_0x28 =
{ "Wacom Intuos5 touch L", 65024, 40640, 2047, 63,
- INTUOS5L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16 };
+ INTUOS5L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 16 };
static const struct wacom_features wacom_features_0x29 =
{ "Wacom Intuos5 S", 31496, 19685, 2047, 63,
- INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7 };
static const struct wacom_features wacom_features_0x2A =
{ "Wacom Intuos5 M", 44704, 27940, 2047, 63,
- INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9 };
static const struct wacom_features wacom_features_0x314 =
{ "Wacom Intuos Pro S", 31496, 19685, 2047, 63,
- INTUOSPS, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16,
+ INTUOSPS, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7, .touch_max = 16,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x315 =
{ "Wacom Intuos Pro M", 44704, 27940, 2047, 63,
- INTUOSPM, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16,
+ INTUOSPM, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 16,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x317 =
{ "Wacom Intuos Pro L", 65024, 40640, 2047, 63,
- INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16,
+ INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 16,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0xF4 =
{ "Wacom Cintiq 24HD", 104080, 65200, 2047, 63,
- WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 16,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0xF8 =
{ "Wacom Cintiq 24HD touch", 104080, 65200, 2047, 63, /* Pen */
- WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 16,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
static const struct wacom_features wacom_features_0xF6 =
@@ -2925,11 +3079,11 @@ static const struct wacom_features wacom_features_0xF6 =
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x32A =
{ "Wacom Cintiq 27QHD", 119740, 67520, 2047, 63,
- WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 0,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x32B =
{ "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63,
- WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 0,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x32C };
static const struct wacom_features wacom_features_0x32C =
@@ -2937,20 +3091,20 @@ static const struct wacom_features wacom_features_0x32C =
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x32B, .touch_max = 10 };
static const struct wacom_features wacom_features_0x3F =
{ "Wacom Cintiq 21UX", 87200, 65600, 1023, 63,
- CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 8 };
static const struct wacom_features wacom_features_0xC5 =
{ "Wacom Cintiq 20WSX", 86680, 54180, 1023, 63,
- WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 10 };
static const struct wacom_features wacom_features_0xC6 =
{ "Wacom Cintiq 12WX", 53020, 33440, 1023, 63,
- WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 10 };
static const struct wacom_features wacom_features_0x304 =
{ "Wacom Cintiq 13HD", 59152, 33448, 1023, 63,
- WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x333 =
{ "Wacom Cintiq 13HD touch", 59152, 33448, 2047, 63,
- WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x335 };
static const struct wacom_features wacom_features_0x335 =
@@ -2969,22 +3123,22 @@ static const struct wacom_features wacom_features_0xF0 =
DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xFB =
{ "Wacom DTU1031", 21896, 13760, 511, 0,
- DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_0x32F =
{ "Wacom DTU1031X", 22472, 12728, 511, 0,
- DTUSX, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ DTUSX, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 0,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_0x336 =
{ "Wacom DTU1141", 23472, 13203, 1023, 0,
- DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
static const struct wacom_features wacom_features_0x57 =
{ "Wacom DTK2241", 95640, 54060, 2047, 63,
- DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x59 = /* Pen */
{ "Wacom DTH2242", 95640, 54060, 2047, 63,
- DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5D };
static const struct wacom_features wacom_features_0x5D = /* Touch */
@@ -2993,15 +3147,15 @@ static const struct wacom_features wacom_features_0x5D = /* Touch */
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0xCC =
{ "Wacom Cintiq 21UX2", 86800, 65200, 2047, 63,
- WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0xFA =
{ "Wacom Cintiq 22HD", 95440, 53860, 2047, 63,
- WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x5B =
{ "Wacom Cintiq 22HDT", 95440, 53860, 2047, 63,
- WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e };
static const struct wacom_features wacom_features_0x5E =
@@ -3148,7 +3302,7 @@ static const struct wacom_features wacom_features_0x6004 =
TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x307 =
{ "Wacom ISDv5 307", 59152, 33448, 2047, 63,
- CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x309 };
static const struct wacom_features wacom_features_0x309 =
@@ -3157,7 +3311,7 @@ static const struct wacom_features wacom_features_0x309 =
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x30A =
{ "Wacom ISDv5 30A", 59152, 33448, 2047, 63,
- CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30C };
static const struct wacom_features wacom_features_0x30C =
@@ -3174,6 +3328,10 @@ static const struct wacom_features wacom_features_0x323 =
{ "Wacom Intuos P M", 21600, 13500, 1023, 31,
INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
+static const struct wacom_features wacom_features_0x331 =
+ { "Wacom Express Key Remote", 0, 0, 0, 0,
+ REMOTE, 0, 0, 18, .check_for_hid_type = true,
+ .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_HID_ANY_ID =
{ "Wacom HID", .type = HID_GENERIC };
@@ -3329,6 +3487,7 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x32B) },
{ USB_DEVICE_WACOM(0x32C) },
{ USB_DEVICE_WACOM(0x32F) },
+ { USB_DEVICE_WACOM(0x331) },
{ USB_DEVICE_WACOM(0x333) },
{ USB_DEVICE_WACOM(0x335) },
{ USB_DEVICE_WACOM(0x336) },
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 2978c303909d..1e270d401e18 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -16,6 +16,8 @@
#define WACOM_PKGLEN_MAX 192
#define WACOM_NAME_MAX 64
+#define WACOM_MAX_REMOTES 5
+#define WACOM_STATUS_UNKNOWN 255
/* packet length for individual models */
#define WACOM_PKGLEN_BBFUN 9
@@ -65,11 +67,11 @@
#define WACOM_REPORT_USB 192
#define WACOM_REPORT_BPAD_PEN 3
#define WACOM_REPORT_BPAD_TOUCH 16
+#define WACOM_REPORT_DEVICE_LIST 16
+#define WACOM_REPORT_REMOTE 17
/* device quirks */
#define WACOM_QUIRK_BBTOUCH_LOWRES 0x0001
-#define WACOM_QUIRK_NO_INPUT 0x0002
-#define WACOM_QUIRK_MONITOR 0x0004
#define WACOM_QUIRK_BATTERY 0x0008
/* device types */
@@ -77,6 +79,7 @@
#define WACOM_DEVICETYPE_PEN 0x0001
#define WACOM_DEVICETYPE_TOUCH 0x0002
#define WACOM_DEVICETYPE_PAD 0x0004
+#define WACOM_DEVICETYPE_WL_MONITOR 0x0008
#define WACOM_VENDORDEFINED_PEN 0xff0d0001
@@ -130,6 +133,7 @@ enum {
WACOM_24HDT,
WACOM_27QHDT,
BAMBOO_PAD,
+ REMOTE,
TABLETPC, /* add new TPC below */
TABLETPCE,
TABLETPC2FG,
@@ -149,6 +153,7 @@ struct wacom_features {
int type;
int x_resolution;
int y_resolution;
+ int numbered_buttons;
int x_min;
int y_min;
int device_type;
@@ -193,6 +198,10 @@ struct hid_data {
int width;
int height;
int id;
+ int cc_index;
+ int cc_value_index;
+ int num_expected;
+ int num_received;
};
struct wacom_wac {
@@ -204,7 +213,7 @@ struct wacom_wac {
unsigned char data[WACOM_PKGLEN_MAX];
int tool[2];
int id[2];
- __u32 serial[2];
+ __u32 serial[5];
bool reporting_data;
struct wacom_features features;
struct wacom_shared *shared;
diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c
index d04643f9548b..95638df73d1c 100644
--- a/drivers/hsi/clients/cmt_speech.c
+++ b/drivers/hsi/clients/cmt_speech.c
@@ -1110,7 +1110,7 @@ static int cs_char_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return 0;
}
-static struct vm_operations_struct cs_char_vm_ops = {
+static const struct vm_operations_struct cs_char_vm_ops = {
.fault = cs_char_vma_fault,
};
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 603ce97e9027..c4dcab048cb8 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -601,6 +601,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
u64 aligned_data = 0;
int ret;
bool signal = false;
+ int num_vecs = ((bufferlen != 0) ? 3 : 1);
/* Setup the descriptor */
@@ -618,7 +619,8 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
+ &signal);
/*
* Signalling the host is conditional on many factors:
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 4506a6623618..2f9aead4ecfc 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -347,6 +347,7 @@ enum {
IDE = 0,
SCSI,
NIC,
+ ND_NIC,
MAX_PERF_CHN,
};
@@ -391,6 +392,7 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
struct vmbus_channel *primary = channel->primary_channel;
int next_node;
struct cpumask available_mask;
+ struct cpumask *alloced_mask;
for (i = IDE; i < MAX_PERF_CHN; i++) {
if (!memcmp(type_guid->b, hp_devs[i].guid,
@@ -408,7 +410,6 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
* channel, bind it to cpu 0.
*/
channel->numa_node = 0;
- cpumask_set_cpu(0, &channel->alloced_cpus_in_node);
channel->target_cpu = 0;
channel->target_vp = hv_context.vp_index[0];
return;
@@ -433,21 +434,38 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
channel->numa_node = next_node;
primary = channel;
}
+ alloced_mask = &hv_context.hv_numa_map[primary->numa_node];
- if (cpumask_weight(&primary->alloced_cpus_in_node) ==
+ if (cpumask_weight(alloced_mask) ==
cpumask_weight(cpumask_of_node(primary->numa_node))) {
/*
* We have cycled through all the CPUs in the node;
* reset the alloced map.
*/
- cpumask_clear(&primary->alloced_cpus_in_node);
+ cpumask_clear(alloced_mask);
}
- cpumask_xor(&available_mask, &primary->alloced_cpus_in_node,
+ cpumask_xor(&available_mask, alloced_mask,
cpumask_of_node(primary->numa_node));
- cur_cpu = cpumask_next(-1, &available_mask);
- cpumask_set_cpu(cur_cpu, &primary->alloced_cpus_in_node);
+ cur_cpu = -1;
+ while (true) {
+ cur_cpu = cpumask_next(cur_cpu, &available_mask);
+ if (cur_cpu >= nr_cpu_ids) {
+ cur_cpu = -1;
+ cpumask_copy(&available_mask,
+ cpumask_of_node(primary->numa_node));
+ continue;
+ }
+
+ if (!cpumask_test_cpu(cur_cpu,
+ &primary->alloced_cpus_in_node)) {
+ cpumask_set_cpu(cur_cpu,
+ &primary->alloced_cpus_in_node);
+ cpumask_set_cpu(cur_cpu, alloced_mask);
+ break;
+ }
+ }
channel->target_cpu = cur_cpu;
channel->target_vp = hv_context.vp_index[cur_cpu];
@@ -469,6 +487,10 @@ void vmbus_initiate_unload(void)
{
struct vmbus_channel_message_header hdr;
+ /* Pre-Win2012R2 hosts don't support reconnect */
+ if (vmbus_proto_version < VERSION_WIN8_1)
+ return;
+
init_completion(&vmbus_connection.unload_event);
memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
hdr.msgtype = CHANNELMSG_UNLOAD;
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index d3943bceecc3..6341be8739ae 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -93,11 +93,14 @@ static int query_hypervisor_info(void)
*/
static u64 do_hypercall(u64 control, void *input, void *output)
{
-#ifdef CONFIG_X86_64
- u64 hv_status = 0;
u64 input_address = (input) ? virt_to_phys(input) : 0;
u64 output_address = (output) ? virt_to_phys(output) : 0;
void *hypercall_page = hv_context.hypercall_page;
+#ifdef CONFIG_X86_64
+ u64 hv_status = 0;
+
+ if (!hypercall_page)
+ return (u64)ULLONG_MAX;
__asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
__asm__ __volatile__("call *%3" : "=a" (hv_status) :
@@ -112,13 +115,13 @@ static u64 do_hypercall(u64 control, void *input, void *output)
u32 control_lo = control & 0xFFFFFFFF;
u32 hv_status_hi = 1;
u32 hv_status_lo = 1;
- u64 input_address = (input) ? virt_to_phys(input) : 0;
u32 input_address_hi = input_address >> 32;
u32 input_address_lo = input_address & 0xFFFFFFFF;
- u64 output_address = (output) ? virt_to_phys(output) : 0;
u32 output_address_hi = output_address >> 32;
u32 output_address_lo = output_address & 0xFFFFFFFF;
- void *hypercall_page = hv_context.hypercall_page;
+
+ if (!hypercall_page)
+ return (u64)ULLONG_MAX;
__asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
"=a"(hv_status_lo) : "d" (control_hi),
@@ -130,6 +133,56 @@ static u64 do_hypercall(u64 control, void *input, void *output)
#endif /* !x86_64 */
}
+#ifdef CONFIG_X86_64
+static cycle_t read_hv_clock_tsc(struct clocksource *arg)
+{
+ cycle_t current_tick;
+ struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page;
+
+ if (tsc_pg->tsc_sequence != -1) {
+ /*
+ * Use the tsc page to compute the value.
+ */
+
+ while (1) {
+ cycle_t tmp;
+ u32 sequence = tsc_pg->tsc_sequence;
+ u64 cur_tsc;
+ u64 scale = tsc_pg->tsc_scale;
+ s64 offset = tsc_pg->tsc_offset;
+
+ rdtscll(cur_tsc);
+ /* current_tick = ((cur_tsc *scale) >> 64) + offset */
+ asm("mulq %3"
+ : "=d" (current_tick), "=a" (tmp)
+ : "a" (cur_tsc), "r" (scale));
+
+ current_tick += offset;
+ if (tsc_pg->tsc_sequence == sequence)
+ return current_tick;
+
+ if (tsc_pg->tsc_sequence != -1)
+ continue;
+ /*
+ * Fallback using MSR method.
+ */
+ break;
+ }
+ }
+ rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
+ return current_tick;
+}
+
+static struct clocksource hyperv_cs_tsc = {
+ .name = "hyperv_clocksource_tsc_page",
+ .rating = 425,
+ .read = read_hv_clock_tsc,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+#endif
+
+
/*
* hv_init - Main initialization routine.
*
@@ -139,7 +192,9 @@ int hv_init(void)
{
int max_leaf;
union hv_x64_msr_hypercall_contents hypercall_msr;
+ union hv_x64_msr_hypercall_contents tsc_msr;
void *virtaddr = NULL;
+ void *va_tsc = NULL;
memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
memset(hv_context.synic_message_page, 0,
@@ -183,6 +238,22 @@ int hv_init(void)
hv_context.hypercall_page = virtaddr;
+#ifdef CONFIG_X86_64
+ if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
+ va_tsc = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
+ if (!va_tsc)
+ goto cleanup;
+ hv_context.tsc_page = va_tsc;
+
+ rdmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
+
+ tsc_msr.enable = 1;
+ tsc_msr.guest_physical_address = vmalloc_to_pfn(va_tsc);
+
+ wrmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
+ clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
+ }
+#endif
return 0;
cleanup:
@@ -216,6 +287,21 @@ void hv_cleanup(void)
vfree(hv_context.hypercall_page);
hv_context.hypercall_page = NULL;
}
+
+#ifdef CONFIG_X86_64
+ /*
+ * Cleanup the TSC page based CS.
+ */
+ if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
+ clocksource_change_rating(&hyperv_cs_tsc, 10);
+ clocksource_unregister(&hyperv_cs_tsc);
+
+ hypercall_msr.as_uint64 = 0;
+ wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
+ vfree(hv_context.tsc_page);
+ hv_context.tsc_page = NULL;
+ }
+#endif
}
/*
@@ -271,7 +357,7 @@ static int hv_ce_set_next_event(unsigned long delta,
{
cycle_t current_tick;
- WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
+ WARN_ON(!clockevent_state_oneshot(evt));
rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
current_tick += delta;
@@ -279,31 +365,24 @@ static int hv_ce_set_next_event(unsigned long delta,
return 0;
}
-static void hv_ce_setmode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int hv_ce_shutdown(struct clock_event_device *evt)
+{
+ wrmsrl(HV_X64_MSR_STIMER0_COUNT, 0);
+ wrmsrl(HV_X64_MSR_STIMER0_CONFIG, 0);
+
+ return 0;
+}
+
+static int hv_ce_set_oneshot(struct clock_event_device *evt)
{
union hv_timer_config timer_cfg;
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- /* unsupported */
- break;
-
- case CLOCK_EVT_MODE_ONESHOT:
- timer_cfg.enable = 1;
- timer_cfg.auto_enable = 1;
- timer_cfg.sintx = VMBUS_MESSAGE_SINT;
- wrmsrl(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
- break;
-
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- wrmsrl(HV_X64_MSR_STIMER0_COUNT, 0);
- wrmsrl(HV_X64_MSR_STIMER0_CONFIG, 0);
- break;
- case CLOCK_EVT_MODE_RESUME:
- break;
- }
+ timer_cfg.enable = 1;
+ timer_cfg.auto_enable = 1;
+ timer_cfg.sintx = VMBUS_MESSAGE_SINT;
+ wrmsrl(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
+
+ return 0;
}
static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
@@ -318,7 +397,8 @@ static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
* references to the hv_vmbus module making it impossible to unload.
*/
- dev->set_mode = hv_ce_setmode;
+ dev->set_state_shutdown = hv_ce_shutdown;
+ dev->set_state_oneshot = hv_ce_set_oneshot;
dev->set_next_event = hv_ce_set_next_event;
}
@@ -329,6 +409,13 @@ int hv_synic_alloc(void)
size_t ced_size = sizeof(struct clock_event_device);
int cpu;
+ hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids,
+ GFP_ATOMIC);
+ if (hv_context.hv_numa_map == NULL) {
+ pr_err("Unable to allocate NUMA map\n");
+ goto err;
+ }
+
for_each_online_cpu(cpu) {
hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
if (hv_context.event_dpc[cpu] == NULL) {
@@ -342,6 +429,7 @@ int hv_synic_alloc(void)
pr_err("Unable to allocate clock event device\n");
goto err;
}
+
hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu);
hv_context.synic_message_page[cpu] =
@@ -390,6 +478,7 @@ void hv_synic_free(void)
{
int cpu;
+ kfree(hv_context.hv_numa_map);
for_each_online_cpu(cpu)
hv_synic_free_cpu(cpu);
}
@@ -503,8 +592,7 @@ void hv_synic_cleanup(void *arg)
/* Turn off clockevent device */
if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
- hv_ce_setmode(CLOCK_EVT_MODE_SHUTDOWN,
- hv_context.clk_evt[cpu]);
+ hv_ce_shutdown(hv_context.clk_evt[cpu]);
rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
@@ -530,6 +618,4 @@ void hv_synic_cleanup(void *arg)
rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
sctrl.enable = 0;
wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
-
- hv_synic_free_cpu(cpu);
}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 8a725cd69ad7..b853b4b083bd 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -62,11 +62,13 @@
enum {
DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
+ DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
+ DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
- DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN8
+ DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
};
@@ -1296,13 +1298,25 @@ static void version_resp(struct hv_dynmem_device *dm,
if (dm->next_version == 0)
goto version_error;
- dm->next_version = 0;
memset(&version_req, 0, sizeof(struct dm_version_request));
version_req.hdr.type = DM_VERSION_REQUEST;
version_req.hdr.size = sizeof(struct dm_version_request);
version_req.hdr.trans_id = atomic_inc_return(&trans_id);
- version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
- version_req.is_last_attempt = 1;
+ version_req.version.version = dm->next_version;
+
+ /*
+ * Set the next version to try in case current version fails.
+ * Win7 protocol ought to be the last one to try.
+ */
+ switch (version_req.version.version) {
+ case DYNMEM_PROTOCOL_VERSION_WIN8:
+ dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
+ version_req.is_last_attempt = 0;
+ break;
+ default:
+ dm->next_version = 0;
+ version_req.is_last_attempt = 1;
+ }
ret = vmbus_sendpacket(dm->dev->channel, &version_req,
sizeof(struct dm_version_request),
@@ -1442,7 +1456,7 @@ static int balloon_probe(struct hv_device *dev,
dm_device.dev = dev;
dm_device.state = DM_INITIALIZING;
- dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
+ dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
init_completion(&dm_device.host_event);
init_completion(&dm_device.config_event);
INIT_LIST_HEAD(&dm_device.ha_region_list);
@@ -1474,7 +1488,7 @@ static int balloon_probe(struct hv_device *dev,
version_req.hdr.type = DM_VERSION_REQUEST;
version_req.hdr.size = sizeof(struct dm_version_request);
version_req.hdr.trans_id = atomic_inc_return(&trans_id);
- version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
+ version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
version_req.is_last_attempt = 0;
ret = vmbus_sendpacket(dev->channel, &version_req,
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index b50dd330cf31..db4b887b889d 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -116,7 +116,7 @@ static int fcopy_handle_handshake(u32 version)
static void fcopy_send_data(struct work_struct *dummy)
{
- struct hv_start_fcopy smsg_out;
+ struct hv_start_fcopy *smsg_out = NULL;
int operation = fcopy_transaction.fcopy_msg->operation;
struct hv_start_fcopy *smsg_in;
void *out_src;
@@ -136,21 +136,24 @@ static void fcopy_send_data(struct work_struct *dummy)
switch (operation) {
case START_FILE_COPY:
out_len = sizeof(struct hv_start_fcopy);
- memset(&smsg_out, 0, out_len);
- smsg_out.hdr.operation = operation;
+ smsg_out = kzalloc(sizeof(*smsg_out), GFP_KERNEL);
+ if (!smsg_out)
+ return;
+
+ smsg_out->hdr.operation = operation;
smsg_in = (struct hv_start_fcopy *)fcopy_transaction.fcopy_msg;
utf16s_to_utf8s((wchar_t *)smsg_in->file_name, W_MAX_PATH,
UTF16_LITTLE_ENDIAN,
- (__u8 *)&smsg_out.file_name, W_MAX_PATH - 1);
+ (__u8 *)&smsg_out->file_name, W_MAX_PATH - 1);
utf16s_to_utf8s((wchar_t *)smsg_in->path_name, W_MAX_PATH,
UTF16_LITTLE_ENDIAN,
- (__u8 *)&smsg_out.path_name, W_MAX_PATH - 1);
+ (__u8 *)&smsg_out->path_name, W_MAX_PATH - 1);
- smsg_out.copy_flags = smsg_in->copy_flags;
- smsg_out.file_size = smsg_in->file_size;
- out_src = &smsg_out;
+ smsg_out->copy_flags = smsg_in->copy_flags;
+ smsg_out->file_size = smsg_in->file_size;
+ out_src = smsg_out;
break;
default:
@@ -168,6 +171,8 @@ static void fcopy_send_data(struct work_struct *dummy)
fcopy_transaction.state = HVUTIL_READY;
}
}
+ kfree(smsg_out);
+
return;
}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index d85798d5992c..74c38a9f34a6 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -353,6 +353,9 @@ kvp_send_key(struct work_struct *dummy)
return;
message = kzalloc(sizeof(*message), GFP_KERNEL);
+ if (!message)
+ return;
+
message->kvp_hdr.operation = operation;
message->kvp_hdr.pool = pool;
in_msg = kvp_transaction.kvp_msg;
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index ea7ba5ef16a9..6a9d80a5332d 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -186,7 +186,7 @@ int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len)
return -EINVAL;
} else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
cn_msg = kzalloc(sizeof(*cn_msg) + len, GFP_ATOMIC);
- if (!msg)
+ if (!cn_msg)
return -ENOMEM;
cn_msg->id.idx = hvt->cn_id.idx;
cn_msg->id.val = hvt->cn_id.val;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index cddc0c9f6bf9..3d70e36c918e 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -141,7 +141,7 @@ struct hv_port_info {
struct {
u32 target_sint;
u32 target_vp;
- u16 base_flag_bumber;
+ u16 base_flag_number;
u16 flag_count;
u32 rsvdz;
} event_port_info;
@@ -517,6 +517,7 @@ struct hv_context {
u64 guestid;
void *hypercall_page;
+ void *tsc_page;
bool synic_initialized;
@@ -551,10 +552,23 @@ struct hv_context {
* Support PV clockevent device.
*/
struct clock_event_device *clk_evt[NR_CPUS];
+ /*
+ * To manage allocations in a NUMA node.
+ * Array indexed by numa node ID.
+ */
+ struct cpumask *hv_numa_map;
};
extern struct hv_context hv_context;
+struct ms_hyperv_tsc_page {
+ volatile u32 tsc_sequence;
+ u32 reserved1;
+ volatile u64 tsc_scale;
+ volatile s64 tsc_offset;
+ u64 reserved2[509];
+};
+
struct hv_ring_buffer_debug_info {
u32 current_interrupt_mask;
u32 current_read_index;
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 6361d124f67d..70a1a9a22f87 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -103,10 +103,9 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
* there is room for the producer to send the pending packet.
*/
-static bool hv_need_to_signal_on_read(u32 old_rd,
- struct hv_ring_buffer_info *rbi)
+static bool hv_need_to_signal_on_read(u32 prev_write_sz,
+ struct hv_ring_buffer_info *rbi)
{
- u32 prev_write_sz;
u32 cur_write_sz;
u32 r_size;
u32 write_loc = rbi->ring_buffer->write_index;
@@ -123,10 +122,6 @@ static bool hv_need_to_signal_on_read(u32 old_rd,
cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
read_loc - write_loc;
- prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
- old_rd - write_loc;
-
-
if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
return true;
@@ -517,7 +512,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
u32 next_read_location = 0;
u64 prev_indices = 0;
unsigned long flags;
- u32 old_read;
if (buflen <= 0)
return -EINVAL;
@@ -528,8 +522,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
&bytes_avail_toread,
&bytes_avail_towrite);
- old_read = bytes_avail_toread;
-
/* Make sure there is something to read */
if (bytes_avail_toread < buflen) {
spin_unlock_irqrestore(&inring_info->ring_lock, flags);
@@ -560,7 +552,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
spin_unlock_irqrestore(&inring_info->ring_lock, flags);
- *signal = hv_need_to_signal_on_read(old_read, inring_info);
+ *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
return 0;
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index cf204005ee78..f19b6f7a467a 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -39,6 +39,8 @@
#include <asm/mshyperv.h>
#include <linux/notifier.h>
#include <linux/ptrace.h>
+#include <linux/screen_info.h>
+#include <linux/kdebug.h>
#include "hyperv_vmbus.h"
static struct acpi_device *hv_acpi_dev;
@@ -48,12 +50,18 @@ static struct completion probe_event;
static int irq;
-static int hyperv_panic_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
+static void hyperv_report_panic(struct pt_regs *regs)
{
- struct pt_regs *regs;
+ static bool panic_reported;
- regs = current_pt_regs();
+ /*
+ * We prefer to report panic on 'die' chain as we have proper
+ * registers to report, but if we miss it (e.g. on BUG()) we need
+ * to report it on 'panic'.
+ */
+ if (panic_reported)
+ return;
+ panic_reported = true;
wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
@@ -65,18 +73,37 @@ static int hyperv_panic_event(struct notifier_block *nb,
* Let Hyper-V know there is crash data available
*/
wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
+}
+
+static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
+ void *args)
+{
+ struct pt_regs *regs;
+
+ regs = current_pt_regs();
+
+ hyperv_report_panic(regs);
return NOTIFY_DONE;
}
+static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
+ void *args)
+{
+ struct die_args *die = (struct die_args *)args;
+ struct pt_regs *regs = die->regs;
+
+ hyperv_report_panic(regs);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block hyperv_die_block = {
+ .notifier_call = hyperv_die_event,
+};
static struct notifier_block hyperv_panic_block = {
.notifier_call = hyperv_panic_event,
};
-struct resource hyperv_mmio = {
- .name = "hyperv mmio",
- .flags = IORESOURCE_MEM,
-};
-EXPORT_SYMBOL_GPL(hyperv_mmio);
+struct resource *hyperv_mmio;
static int vmbus_exists(void)
{
@@ -414,6 +441,43 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
}
static DEVICE_ATTR_RO(in_write_bytes_avail);
+static ssize_t channel_vp_mapping_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
+ unsigned long flags;
+ int buf_size = PAGE_SIZE, n_written, tot_written;
+ struct list_head *cur;
+
+ if (!channel)
+ return -ENODEV;
+
+ tot_written = snprintf(buf, buf_size, "%u:%u\n",
+ channel->offermsg.child_relid, channel->target_cpu);
+
+ spin_lock_irqsave(&channel->lock, flags);
+
+ list_for_each(cur, &channel->sc_list) {
+ if (tot_written >= buf_size - 1)
+ break;
+
+ cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
+ n_written = scnprintf(buf + tot_written,
+ buf_size - tot_written,
+ "%u:%u\n",
+ cur_sc->offermsg.child_relid,
+ cur_sc->target_cpu);
+ tot_written += n_written;
+ }
+
+ spin_unlock_irqrestore(&channel->lock, flags);
+
+ return tot_written;
+}
+static DEVICE_ATTR_RO(channel_vp_mapping);
+
/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
static struct attribute *vmbus_attrs[] = {
&dev_attr_id.attr,
@@ -438,6 +502,7 @@ static struct attribute *vmbus_attrs[] = {
&dev_attr_in_write_index.attr,
&dev_attr_in_read_bytes_avail.attr,
&dev_attr_in_write_bytes_avail.attr,
+ &dev_attr_channel_vp_mapping.attr,
NULL,
};
ATTRIBUTE_GROUPS(vmbus);
@@ -763,38 +828,6 @@ static void vmbus_isr(void)
}
}
-#ifdef CONFIG_HOTPLUG_CPU
-static int hyperv_cpu_disable(void)
-{
- return -ENOSYS;
-}
-
-static void hv_cpu_hotplug_quirk(bool vmbus_loaded)
-{
- static void *previous_cpu_disable;
-
- /*
- * Offlining a CPU when running on newer hypervisors (WS2012R2, Win8,
- * ...) is not supported at this moment as channel interrupts are
- * distributed across all of them.
- */
-
- if ((vmbus_proto_version == VERSION_WS2008) ||
- (vmbus_proto_version == VERSION_WIN7))
- return;
-
- if (vmbus_loaded) {
- previous_cpu_disable = smp_ops.cpu_disable;
- smp_ops.cpu_disable = hyperv_cpu_disable;
- pr_notice("CPU offlining is not supported by hypervisor\n");
- } else if (previous_cpu_disable)
- smp_ops.cpu_disable = previous_cpu_disable;
-}
-#else
-static void hv_cpu_hotplug_quirk(bool vmbus_loaded)
-{
-}
-#endif
/*
* vmbus_bus_init -Main vmbus driver initialization routine.
@@ -836,12 +869,14 @@ static int vmbus_bus_init(int irq)
if (ret)
goto err_alloc;
- hv_cpu_hotplug_quirk(true);
+ if (vmbus_proto_version > VERSION_WIN7)
+ cpu_hotplug_disable();
/*
* Only register if the crash MSRs are available
*/
- if (ms_hyperv.features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+ if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+ register_die_notifier(&hyperv_die_block);
atomic_notifier_chain_register(&panic_notifier_list,
&hyperv_panic_block);
}
@@ -863,8 +898,8 @@ err_cleanup:
}
/**
- * __vmbus_child_driver_register - Register a vmbus's driver
- * @drv: Pointer to driver structure you want to register
+ * __vmbus_child_driver_register() - Register a vmbus's driver
+ * @hv_driver: Pointer to driver structure you want to register
* @owner: owner module of the drv
* @mod_name: module name string
*
@@ -896,7 +931,8 @@ EXPORT_SYMBOL_GPL(__vmbus_driver_register);
/**
* vmbus_driver_unregister() - Unregister a vmbus's driver
- * @drv: Pointer to driver structure you want to un-register
+ * @hv_driver: Pointer to driver structure you want to
+ * un-register
*
* Un-register the given driver that was previous registered with a call to
* vmbus_driver_register()
@@ -982,30 +1018,184 @@ void vmbus_device_unregister(struct hv_device *device_obj)
/*
- * VMBUS is an acpi enumerated device. Get the the information we
+ * VMBUS is an acpi enumerated device. Get the information we
* need from DSDT.
*/
-
+#define VTPM_BASE_ADDRESS 0xfed40000
static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
{
+ resource_size_t start = 0;
+ resource_size_t end = 0;
+ struct resource *new_res;
+ struct resource **old_res = &hyperv_mmio;
+ struct resource **prev_res = NULL;
+
switch (res->type) {
case ACPI_RESOURCE_TYPE_IRQ:
irq = res->data.irq.interrupts[0];
+ return AE_OK;
+
+ /*
+ * "Address" descriptors are for bus windows. Ignore
+ * "memory" descriptors, which are for registers on
+ * devices.
+ */
+ case ACPI_RESOURCE_TYPE_ADDRESS32:
+ start = res->data.address32.address.minimum;
+ end = res->data.address32.address.maximum;
break;
case ACPI_RESOURCE_TYPE_ADDRESS64:
- hyperv_mmio.start = res->data.address64.address.minimum;
- hyperv_mmio.end = res->data.address64.address.maximum;
+ start = res->data.address64.address.minimum;
+ end = res->data.address64.address.maximum;
break;
+
+ default:
+ /* Unused resource type */
+ return AE_OK;
+
}
+ /*
+ * Ignore ranges that are below 1MB, as they're not
+ * necessary or useful here.
+ */
+ if (end < 0x100000)
+ return AE_OK;
+
+ new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
+ if (!new_res)
+ return AE_NO_MEMORY;
+
+ /* If this range overlaps the virtual TPM, truncate it. */
+ if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
+ end = VTPM_BASE_ADDRESS;
+
+ new_res->name = "hyperv mmio";
+ new_res->flags = IORESOURCE_MEM;
+ new_res->start = start;
+ new_res->end = end;
+
+ do {
+ if (!*old_res) {
+ *old_res = new_res;
+ break;
+ }
+
+ if ((*old_res)->end < new_res->start) {
+ new_res->sibling = *old_res;
+ if (prev_res)
+ (*prev_res)->sibling = new_res;
+ *old_res = new_res;
+ break;
+ }
+
+ prev_res = old_res;
+ old_res = &(*old_res)->sibling;
+
+ } while (1);
return AE_OK;
}
+static int vmbus_acpi_remove(struct acpi_device *device)
+{
+ struct resource *cur_res;
+ struct resource *next_res;
+
+ if (hyperv_mmio) {
+ for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
+ next_res = cur_res->sibling;
+ kfree(cur_res);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
+ * @new: If successful, supplied a pointer to the
+ * allocated MMIO space.
+ * @device_obj: Identifies the caller
+ * @min: Minimum guest physical address of the
+ * allocation
+ * @max: Maximum guest physical address
+ * @size: Size of the range to be allocated
+ * @align: Alignment of the range to be allocated
+ * @fb_overlap_ok: Whether this allocation can be allowed
+ * to overlap the video frame buffer.
+ *
+ * This function walks the resources granted to VMBus by the
+ * _CRS object in the ACPI namespace underneath the parent
+ * "bridge" whether that's a root PCI bus in the Generation 1
+ * case or a Module Device in the Generation 2 case. It then
+ * attempts to allocate from the global MMIO pool in a way that
+ * matches the constraints supplied in these parameters and by
+ * that _CRS.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
+ resource_size_t min, resource_size_t max,
+ resource_size_t size, resource_size_t align,
+ bool fb_overlap_ok)
+{
+ struct resource *iter;
+ resource_size_t range_min, range_max, start, local_min, local_max;
+ const char *dev_n = dev_name(&device_obj->device);
+ u32 fb_end = screen_info.lfb_base + (screen_info.lfb_size << 1);
+ int i;
+
+ for (iter = hyperv_mmio; iter; iter = iter->sibling) {
+ if ((iter->start >= max) || (iter->end <= min))
+ continue;
+
+ range_min = iter->start;
+ range_max = iter->end;
+
+ /* If this range overlaps the frame buffer, split it into
+ two tries. */
+ for (i = 0; i < 2; i++) {
+ local_min = range_min;
+ local_max = range_max;
+ if (fb_overlap_ok || (range_min >= fb_end) ||
+ (range_max <= screen_info.lfb_base)) {
+ i++;
+ } else {
+ if ((range_min <= screen_info.lfb_base) &&
+ (range_max >= screen_info.lfb_base)) {
+ /*
+ * The frame buffer is in this window,
+ * so trim this into the part that
+ * preceeds the frame buffer.
+ */
+ local_max = screen_info.lfb_base - 1;
+ range_min = fb_end;
+ } else {
+ range_min = fb_end;
+ continue;
+ }
+ }
+
+ start = (local_min + align - 1) & ~(align - 1);
+ for (; start + size - 1 <= local_max; start += align) {
+ *new = request_mem_region_exclusive(start, size,
+ dev_n);
+ if (*new)
+ return 0;
+ }
+ }
+ }
+
+ return -ENXIO;
+}
+EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
+
static int vmbus_acpi_add(struct acpi_device *device)
{
acpi_status result;
int ret_val = -ENODEV;
+ struct acpi_device *ancestor;
hv_acpi_dev = device;
@@ -1015,35 +1205,27 @@ static int vmbus_acpi_add(struct acpi_device *device)
if (ACPI_FAILURE(result))
goto acpi_walk_err;
/*
- * The parent of the vmbus acpi device (Gen2 firmware) is the VMOD that
- * has the mmio ranges. Get that.
+ * Some ancestor of the vmbus acpi device (Gen1 or Gen2
+ * firmware) is the VMOD that has the mmio ranges. Get that.
*/
- if (device->parent) {
- result = acpi_walk_resources(device->parent->handle,
- METHOD_NAME__CRS,
- vmbus_walk_resources, NULL);
+ for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
+ result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
+ vmbus_walk_resources, NULL);
if (ACPI_FAILURE(result))
- goto acpi_walk_err;
- if (hyperv_mmio.start && hyperv_mmio.end)
- request_resource(&iomem_resource, &hyperv_mmio);
+ continue;
+ if (hyperv_mmio)
+ break;
}
ret_val = 0;
acpi_walk_err:
complete(&probe_event);
+ if (ret_val)
+ vmbus_acpi_remove(device);
return ret_val;
}
-static int vmbus_acpi_remove(struct acpi_device *device)
-{
- int ret = 0;
-
- if (hyperv_mmio.start && hyperv_mmio.end)
- ret = release_resource(&hyperv_mmio);
- return ret;
-}
-
static const struct acpi_device_id vmbus_acpi_device_ids[] = {
{"VMBUS", 0},
{"VMBus", 0},
@@ -1060,6 +1242,29 @@ static struct acpi_driver vmbus_acpi_driver = {
},
};
+static void hv_kexec_handler(void)
+{
+ int cpu;
+
+ hv_synic_clockevents_cleanup();
+ vmbus_initiate_unload();
+ for_each_online_cpu(cpu)
+ smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
+ hv_cleanup();
+};
+
+static void hv_crash_handler(struct pt_regs *regs)
+{
+ vmbus_initiate_unload();
+ /*
+ * In crash handler we can't schedule synic cleanup for all CPUs,
+ * doing the cleanup for current CPU only. This should be sufficient
+ * for kdump.
+ */
+ hv_synic_cleanup(NULL);
+ hv_cleanup();
+};
+
static int __init hv_acpi_init(void)
{
int ret, t;
@@ -1092,6 +1297,9 @@ static int __init hv_acpi_init(void)
if (ret)
goto cleanup;
+ hv_setup_kexec_handler(hv_kexec_handler);
+ hv_setup_crash_handler(hv_crash_handler);
+
return 0;
cleanup:
@@ -1104,13 +1312,16 @@ static void __exit vmbus_exit(void)
{
int cpu;
+ hv_remove_kexec_handler();
+ hv_remove_crash_handler();
vmbus_connection.conn_state = DISCONNECTED;
hv_synic_clockevents_cleanup();
vmbus_disconnect();
hv_remove_vmbus_irq();
tasklet_kill(&msg_dpc);
vmbus_free_channels();
- if (ms_hyperv.features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+ if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+ unregister_die_notifier(&hyperv_die_block);
atomic_notifier_chain_unregister(&panic_notifier_list,
&hyperv_panic_block);
}
@@ -1120,8 +1331,10 @@ static void __exit vmbus_exit(void)
tasklet_kill(hv_context.event_dpc[cpu]);
smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
}
+ hv_synic_free();
acpi_bus_unregister_driver(&vmbus_acpi_driver);
- hv_cpu_hotplug_quirk(false);
+ if (vmbus_proto_version > VERSION_WIN7)
+ cpu_hotplug_enable();
}
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 7c65b7334738..500b262b89bb 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -609,8 +609,8 @@ config SENSORS_IT87
depends on !PPC
select HWMON_VID
help
- If you say yes here you get support for ITE IT8705F, IT8712F,
- IT8716F, IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8758E,
+ If you say yes here you get support for ITE IT8705F, IT8712F, IT8716F,
+ IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8732F, IT8758E,
IT8771E, IT8772E, IT8781F, IT8782F, IT8783E/F, IT8786E, IT8790E,
IT8603E, IT8620E, and IT8623E sensor chips, and the SiS950 clone.
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 37c16afe007a..c8487894b312 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -929,6 +929,21 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
+static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
+ {
+ /*
+ * CPU fan speed going up and down on Dell Studio XPS 8100
+ * for unknown reasons.
+ */
+ .ident = "Dell Studio XPS 8100",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
+ },
+ },
+ { }
+};
+
/*
* Probe for the presence of a supported laptop.
*/
@@ -940,7 +955,8 @@ static int __init i8k_probe(void)
/*
* Get DMI information
*/
- if (!dmi_check_system(i8k_dmi_table)) {
+ if (!dmi_check_system(i8k_dmi_table) ||
+ dmi_check_system(i8k_blacklist_dmi_table)) {
if (!ignore_dmi && !force)
return -ENODEV;
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 2e5c6f46e442..cb28e4b4fb10 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -51,6 +51,7 @@
#define SIO_F71808A_ID 0x1001 /* Chipset ID */
#define SIO_F71858_ID 0x0507 /* Chipset ID */
#define SIO_F71862_ID 0x0601 /* Chipset ID */
+#define SIO_F71868_ID 0x1106 /* Chipset ID */
#define SIO_F71869_ID 0x0814 /* Chipset ID */
#define SIO_F71869A_ID 0x1007 /* Chipset ID */
#define SIO_F71882_ID 0x0541 /* Chipset ID */
@@ -58,7 +59,9 @@
#define SIO_F71889E_ID 0x0909 /* Chipset ID */
#define SIO_F71889A_ID 0x1005 /* Chipset ID */
#define SIO_F8000_ID 0x0581 /* Chipset ID */
+#define SIO_F81768D_ID 0x1210 /* Chipset ID */
#define SIO_F81865_ID 0x0704 /* Chipset ID */
+#define SIO_F81866_ID 0x1010 /* Chipset ID */
#define REGION_LENGTH 8
#define ADDR_REG_OFFSET 5
@@ -69,6 +72,10 @@
#define F71882FG_REG_IN(nr) (0x20 + (nr))
#define F71882FG_REG_IN1_HIGH 0x32 /* f7188x only */
+#define F81866_REG_IN_STATUS 0x16 /* F81866 only */
+#define F81866_REG_IN_BEEP 0x17 /* F81866 only */
+#define F81866_REG_IN1_HIGH 0x3a /* F81866 only */
+
#define F71882FG_REG_FAN(nr) (0xA0 + (16 * (nr)))
#define F71882FG_REG_FAN_TARGET(nr) (0xA2 + (16 * (nr)))
#define F71882FG_REG_FAN_FULL_SPEED(nr) (0xA4 + (16 * (nr)))
@@ -101,7 +108,7 @@
#define F71882FG_REG_START 0x01
-#define F71882FG_MAX_INS 9
+#define F71882FG_MAX_INS 11
#define FAN_MIN_DETECT 366 /* Lowest detectable fanspeed */
@@ -109,14 +116,16 @@ static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
-enum chips { f71808e, f71808a, f71858fg, f71862fg, f71869, f71869a, f71882fg,
- f71889fg, f71889ed, f71889a, f8000, f81865f };
+enum chips { f71808e, f71808a, f71858fg, f71862fg, f71868a, f71869, f71869a,
+ f71882fg, f71889fg, f71889ed, f71889a, f8000, f81768d, f81865f,
+ f81866a};
static const char *const f71882fg_names[] = {
"f71808e",
"f71808a",
"f71858fg",
"f71862fg",
+ "f71868a",
"f71869", /* Both f71869f and f71869e, reg. compatible and same id */
"f71869a",
"f71882fg",
@@ -124,22 +133,27 @@ static const char *const f71882fg_names[] = {
"f71889ed",
"f71889a",
"f8000",
+ "f81768d",
"f81865f",
+ "f81866a",
};
static const char f71882fg_has_in[][F71882FG_MAX_INS] = {
- [f71808e] = { 1, 1, 1, 1, 1, 1, 0, 1, 1 },
- [f71808a] = { 1, 1, 1, 1, 0, 0, 0, 1, 1 },
- [f71858fg] = { 1, 1, 1, 0, 0, 0, 0, 0, 0 },
- [f71862fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
- [f71869] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
- [f71869a] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
- [f71882fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
- [f71889fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
- [f71889ed] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
- [f71889a] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
- [f8000] = { 1, 1, 1, 0, 0, 0, 0, 0, 0 },
- [f81865f] = { 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+ [f71808e] = { 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0 },
+ [f71808a] = { 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0 },
+ [f71858fg] = { 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 },
+ [f71862fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+ [f71868a] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0 },
+ [f71869] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+ [f71869a] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+ [f71882fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+ [f71889fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+ [f71889ed] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+ [f71889a] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+ [f8000] = { 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 },
+ [f81768d] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
+ [f81865f] = { 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 },
+ [f81866a] = { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0 },
};
static const char f71882fg_has_in1_alarm[] = {
@@ -147,6 +161,7 @@ static const char f71882fg_has_in1_alarm[] = {
[f71808a] = 0,
[f71858fg] = 0,
[f71862fg] = 0,
+ [f71868a] = 0,
[f71869] = 0,
[f71869a] = 0,
[f71882fg] = 1,
@@ -154,7 +169,9 @@ static const char f71882fg_has_in1_alarm[] = {
[f71889ed] = 1,
[f71889a] = 1,
[f8000] = 0,
+ [f81768d] = 1,
[f81865f] = 1,
+ [f81866a] = 1,
};
static const char f71882fg_fan_has_beep[] = {
@@ -162,6 +179,7 @@ static const char f71882fg_fan_has_beep[] = {
[f71808a] = 0,
[f71858fg] = 0,
[f71862fg] = 1,
+ [f71868a] = 1,
[f71869] = 1,
[f71869a] = 1,
[f71882fg] = 1,
@@ -169,7 +187,9 @@ static const char f71882fg_fan_has_beep[] = {
[f71889ed] = 1,
[f71889a] = 1,
[f8000] = 0,
+ [f81768d] = 1,
[f81865f] = 1,
+ [f81866a] = 1,
};
static const char f71882fg_nr_fans[] = {
@@ -177,6 +197,7 @@ static const char f71882fg_nr_fans[] = {
[f71808a] = 2, /* +1 fan which is monitor + simple pwm only */
[f71858fg] = 3,
[f71862fg] = 3,
+ [f71868a] = 3,
[f71869] = 3,
[f71869a] = 3,
[f71882fg] = 4,
@@ -184,7 +205,9 @@ static const char f71882fg_nr_fans[] = {
[f71889ed] = 3,
[f71889a] = 3,
[f8000] = 3, /* +1 fan which is monitor only */
+ [f81768d] = 3,
[f81865f] = 2,
+ [f81866a] = 3,
};
static const char f71882fg_temp_has_beep[] = {
@@ -192,6 +215,7 @@ static const char f71882fg_temp_has_beep[] = {
[f71808a] = 1,
[f71858fg] = 0,
[f71862fg] = 1,
+ [f71868a] = 1,
[f71869] = 1,
[f71869a] = 1,
[f71882fg] = 1,
@@ -199,7 +223,9 @@ static const char f71882fg_temp_has_beep[] = {
[f71889ed] = 1,
[f71889a] = 1,
[f8000] = 0,
+ [f81768d] = 1,
[f81865f] = 1,
+ [f81866a] = 1,
};
static const char f71882fg_nr_temps[] = {
@@ -207,6 +233,7 @@ static const char f71882fg_nr_temps[] = {
[f71808a] = 2,
[f71858fg] = 3,
[f71862fg] = 3,
+ [f71868a] = 3,
[f71869] = 3,
[f71869a] = 3,
[f71882fg] = 3,
@@ -214,7 +241,9 @@ static const char f71882fg_nr_temps[] = {
[f71889ed] = 3,
[f71889a] = 3,
[f8000] = 3,
+ [f81768d] = 3,
[f81865f] = 2,
+ [f81866a] = 3,
};
static struct platform_device *f71882fg_pdev;
@@ -490,6 +519,23 @@ static struct sensor_device_attribute_2 fxxxx_temp_beep_attr[3][2] = { {
store_temp_beep, 0, 7),
} };
+static struct sensor_device_attribute_2 f81866_temp_beep_attr[3][2] = { {
+ SENSOR_ATTR_2(temp1_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 0),
+ SENSOR_ATTR_2(temp1_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 4),
+}, {
+ SENSOR_ATTR_2(temp2_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 1),
+ SENSOR_ATTR_2(temp2_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 5),
+}, {
+ SENSOR_ATTR_2(temp3_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 2),
+ SENSOR_ATTR_2(temp3_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 6),
+} };
+
/*
* Temp attr for the f8000
* Note on the f8000 temp_ovt (crit) is used as max, and temp_high (max)
@@ -531,6 +577,8 @@ static struct sensor_device_attribute_2 fxxxx_in_attr[] = {
SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6),
SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7),
SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8),
+ SENSOR_ATTR_2(in9_input, S_IRUGO, show_in, NULL, 0, 9),
+ SENSOR_ATTR_2(in10_input, S_IRUGO, show_in, NULL, 0, 10),
};
/* For models with in1 alarm capability */
@@ -1170,10 +1218,21 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
if (time_after(jiffies, data->last_limits + 60 * HZ) ||
!data->valid) {
if (f71882fg_has_in1_alarm[data->type]) {
- data->in1_max =
- f71882fg_read8(data, F71882FG_REG_IN1_HIGH);
- data->in_beep =
- f71882fg_read8(data, F71882FG_REG_IN_BEEP);
+ if (data->type == f81866a) {
+ data->in1_max =
+ f71882fg_read8(data,
+ F81866_REG_IN1_HIGH);
+ data->in_beep =
+ f71882fg_read8(data,
+ F81866_REG_IN_BEEP);
+ } else {
+ data->in1_max =
+ f71882fg_read8(data,
+ F71882FG_REG_IN1_HIGH);
+ data->in_beep =
+ f71882fg_read8(data,
+ F71882FG_REG_IN_BEEP);
+ }
}
/* Get High & boundary temps*/
@@ -1297,9 +1356,16 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
data->fan[3] = f71882fg_read16(data,
F71882FG_REG_FAN(3));
- if (f71882fg_has_in1_alarm[data->type])
- data->in_status = f71882fg_read8(data,
+ if (f71882fg_has_in1_alarm[data->type]) {
+ if (data->type == f81866a)
+ data->in_status = f71882fg_read8(data,
+ F81866_REG_IN_STATUS);
+
+ else
+ data->in_status = f71882fg_read8(data,
F71882FG_REG_IN_STATUS);
+ }
+
for (nr = 0; nr < F71882FG_MAX_INS; nr++)
if (f71882fg_has_in[data->type][nr])
data->in[nr] = f71882fg_read8(data,
@@ -1440,7 +1506,10 @@ static ssize_t store_in_max(struct device *dev, struct device_attribute
val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
- f71882fg_write8(data, F71882FG_REG_IN1_HIGH, val);
+ if (data->type == f81866a)
+ f71882fg_write8(data, F81866_REG_IN1_HIGH, val);
+ else
+ f71882fg_write8(data, F71882FG_REG_IN1_HIGH, val);
data->in1_max = val;
mutex_unlock(&data->update_lock);
@@ -1471,13 +1540,20 @@ static ssize_t store_in_beep(struct device *dev, struct device_attribute
return err;
mutex_lock(&data->update_lock);
- data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP);
+ if (data->type == f81866a)
+ data->in_beep = f71882fg_read8(data, F81866_REG_IN_BEEP);
+ else
+ data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP);
+
if (val)
data->in_beep |= 1 << nr;
else
data->in_beep &= ~(1 << nr);
- f71882fg_write8(data, F71882FG_REG_IN_BEEP, data->in_beep);
+ if (data->type == f81866a)
+ f71882fg_write8(data, F81866_REG_IN_BEEP, data->in_beep);
+ else
+ f71882fg_write8(data, F71882FG_REG_IN_BEEP, data->in_beep);
mutex_unlock(&data->update_lock);
return count;
@@ -2270,6 +2346,7 @@ static int f71882fg_probe(struct platform_device *pdev)
int nr_fans = f71882fg_nr_fans[sio_data->type];
int nr_temps = f71882fg_nr_temps[sio_data->type];
int err, i;
+ int size;
u8 start_reg, reg;
data = devm_kzalloc(&pdev->dev, sizeof(struct f71882fg_data),
@@ -2280,7 +2357,8 @@ static int f71882fg_probe(struct platform_device *pdev)
data->addr = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
data->type = sio_data->type;
data->temp_start =
- (data->type == f71858fg || data->type == f8000) ? 0 : 1;
+ (data->type == f71858fg || data->type == f8000 ||
+ data->type == f81866a) ? 0 : 1;
mutex_init(&data->update_lock);
platform_set_drvdata(pdev, data);
@@ -2322,6 +2400,11 @@ static int f71882fg_probe(struct platform_device *pdev)
f8000_temp_attr,
ARRAY_SIZE(f8000_temp_attr));
break;
+ case f81866a:
+ err = f71882fg_create_sysfs_files(pdev,
+ f71858fg_temp_attr,
+ ARRAY_SIZE(f71858fg_temp_attr));
+ break;
default:
err = f71882fg_create_sysfs_files(pdev,
&fxxxx_temp_attr[0][0],
@@ -2331,10 +2414,18 @@ static int f71882fg_probe(struct platform_device *pdev)
goto exit_unregister_sysfs;
if (f71882fg_temp_has_beep[data->type]) {
- err = f71882fg_create_sysfs_files(pdev,
- &fxxxx_temp_beep_attr[0][0],
- ARRAY_SIZE(fxxxx_temp_beep_attr[0])
- * nr_temps);
+ if (data->type == f81866a) {
+ size = ARRAY_SIZE(f81866_temp_beep_attr[0]);
+ err = f71882fg_create_sysfs_files(pdev,
+ &f81866_temp_beep_attr[0][0],
+ size * nr_temps);
+
+ } else {
+ size = ARRAY_SIZE(fxxxx_temp_beep_attr[0]);
+ err = f71882fg_create_sysfs_files(pdev,
+ &fxxxx_temp_beep_attr[0][0],
+ size * nr_temps);
+ }
if (err)
goto exit_unregister_sysfs;
}
@@ -2451,15 +2542,27 @@ static int f71882fg_remove(struct platform_device *pdev)
f8000_temp_attr,
ARRAY_SIZE(f8000_temp_attr));
break;
+ case f81866a:
+ f71882fg_remove_sysfs_files(pdev,
+ f71858fg_temp_attr,
+ ARRAY_SIZE(f71858fg_temp_attr));
+ break;
default:
f71882fg_remove_sysfs_files(pdev,
&fxxxx_temp_attr[0][0],
ARRAY_SIZE(fxxxx_temp_attr[0]) * nr_temps);
}
if (f71882fg_temp_has_beep[data->type]) {
- f71882fg_remove_sysfs_files(pdev,
- &fxxxx_temp_beep_attr[0][0],
- ARRAY_SIZE(fxxxx_temp_beep_attr[0]) * nr_temps);
+ if (data->type == f81866a)
+ f71882fg_remove_sysfs_files(pdev,
+ &f81866_temp_beep_attr[0][0],
+ ARRAY_SIZE(f81866_temp_beep_attr[0])
+ * nr_temps);
+ else
+ f71882fg_remove_sysfs_files(pdev,
+ &fxxxx_temp_beep_attr[0][0],
+ ARRAY_SIZE(fxxxx_temp_beep_attr[0])
+ * nr_temps);
}
for (i = 0; i < F71882FG_MAX_INS; i++) {
@@ -2551,6 +2654,9 @@ static int __init f71882fg_find(int sioaddr, struct f71882fg_sio_data *sio_data)
case SIO_F71862_ID:
sio_data->type = f71862fg;
break;
+ case SIO_F71868_ID:
+ sio_data->type = f71868a;
+ break;
case SIO_F71869_ID:
sio_data->type = f71869;
break;
@@ -2572,9 +2678,15 @@ static int __init f71882fg_find(int sioaddr, struct f71882fg_sio_data *sio_data)
case SIO_F8000_ID:
sio_data->type = f8000;
break;
+ case SIO_F81768D_ID:
+ sio_data->type = f81768d;
+ break;
case SIO_F81865_ID:
sio_data->type = f81865f;
break;
+ case SIO_F81866_ID:
+ sio_data->type = f81866a;
+ break;
default:
pr_info("Unsupported Fintek device: %04x\n",
(unsigned int)devid);
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index 3057dfc7e3bc..e80ee23b62d3 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -46,6 +46,7 @@ struct fam15h_power_data {
unsigned int tdp_to_watts;
unsigned int base_tdp;
unsigned int processor_pwr_watts;
+ unsigned int cpu_pwr_sample_ratio;
};
static ssize_t show_power(struct device *dev,
@@ -59,8 +60,19 @@ static ssize_t show_power(struct device *dev,
pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5),
REG_TDP_RUNNING_AVERAGE, &val);
- running_avg_capture = (val >> 4) & 0x3fffff;
- running_avg_capture = sign_extend32(running_avg_capture, 21);
+
+ /*
+ * On Carrizo and later platforms, TdpRunAvgAccCap bit field
+ * is extended to 4:31 from 4:25.
+ */
+ if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model >= 0x60) {
+ running_avg_capture = val >> 4;
+ running_avg_capture = sign_extend32(running_avg_capture, 27);
+ } else {
+ running_avg_capture = (val >> 4) & 0x3fffff;
+ running_avg_capture = sign_extend32(running_avg_capture, 21);
+ }
+
running_avg_range = (val & 0xf) + 1;
pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5),
@@ -117,7 +129,7 @@ static const struct attribute_group fam15h_power_group = {
};
__ATTRIBUTE_GROUPS(fam15h_power);
-static bool fam15h_power_is_internal_node0(struct pci_dev *f4)
+static bool should_load_on_this_node(struct pci_dev *f4)
{
u32 val;
@@ -177,7 +189,7 @@ static int fam15h_power_resume(struct pci_dev *pdev)
static void fam15h_power_init_data(struct pci_dev *f4,
struct fam15h_power_data *data)
{
- u32 val;
+ u32 val, eax, ebx, ecx, edx;
u64 tmp;
pci_read_config_dword(f4, REG_PROCESSOR_TDP, &val);
@@ -198,6 +210,19 @@ static void fam15h_power_init_data(struct pci_dev *f4,
/* convert to microWatt */
data->processor_pwr_watts = (tmp * 15625) >> 10;
+
+ cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
+
+ /* CPUID Fn8000_0007:EDX[12] indicates to support accumulated power */
+ if (!(edx & BIT(12)))
+ return;
+
+ /*
+ * determine the ratio of the compute unit power accumulator
+ * sample period to the PTSC counter period by executing CPUID
+ * Fn8000_0007:ECX
+ */
+ data->cpu_pwr_sample_ratio = ecx;
}
static int fam15h_power_probe(struct pci_dev *pdev,
@@ -214,7 +239,7 @@ static int fam15h_power_probe(struct pci_dev *pdev,
*/
tweak_runavg_range(pdev);
- if (!fam15h_power_is_internal_node0(pdev))
+ if (!should_load_on_this_node(pdev))
return -ENODEV;
data = devm_kzalloc(dev, sizeof(struct fam15h_power_data), GFP_KERNEL);
@@ -233,6 +258,7 @@ static int fam15h_power_probe(struct pci_dev *pdev,
static const struct pci_device_id fam15h_power_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
{}
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index 9b55e673b67c..b96a2a9e4df7 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -582,6 +582,7 @@ static const struct of_device_id g762_dt_match[] = {
{ .compatible = "gmt,g763" },
{ },
};
+MODULE_DEVICE_TABLE(of, g762_dt_match);
/*
* Grab clock (a required property), enable it, get (fixed) clock frequency
@@ -1112,7 +1113,6 @@ static int g762_remove(struct i2c_client *client)
static struct i2c_driver g762_driver = {
.driver = {
.name = DRVNAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(g762_dt_match),
},
.probe = g762_probe,
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index d0ee556e8ce0..1896e26df634 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -21,6 +21,7 @@
* IT8721F Super I/O chip w/LPC interface
* IT8726F Super I/O chip w/LPC interface
* IT8728F Super I/O chip w/LPC interface
+ * IT8732F Super I/O chip w/LPC interface
* IT8758E Super I/O chip w/LPC interface
* IT8771E Super I/O chip w/LPC interface
* IT8772E Super I/O chip w/LPC interface
@@ -69,8 +70,9 @@
#define DRVNAME "it87"
-enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8771,
- it8772, it8781, it8782, it8783, it8786, it8790, it8603, it8620 };
+enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8732,
+ it8771, it8772, it8781, it8782, it8783, it8786, it8790, it8603,
+ it8620 };
static unsigned short force_id;
module_param(force_id, ushort, 0);
@@ -148,6 +150,7 @@ static inline void superio_exit(void)
#define IT8721F_DEVID 0x8721
#define IT8726F_DEVID 0x8726
#define IT8728F_DEVID 0x8728
+#define IT8732F_DEVID 0x8732
#define IT8771E_DEVID 0x8771
#define IT8772E_DEVID 0x8772
#define IT8781F_DEVID 0x8781
@@ -265,6 +268,7 @@ struct it87_devices {
#define FEAT_VID (1 << 9) /* Set if chip supports VID */
#define FEAT_IN7_INTERNAL (1 << 10) /* Set if in7 is internal */
#define FEAT_SIX_FANS (1 << 11) /* Supports six fans */
+#define FEAT_10_9MV_ADC (1 << 12)
static const struct it87_devices it87_devices[] = {
[it87] = {
@@ -315,6 +319,15 @@ static const struct it87_devices it87_devices[] = {
| FEAT_IN7_INTERNAL,
.peci_mask = 0x07,
},
+ [it8732] = {
+ .name = "it8732",
+ .suffix = "F",
+ .features = FEAT_NEWER_AUTOPWM | FEAT_16BIT_FANS
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_OLD_PECI | FEAT_TEMP_PECI
+ | FEAT_10_9MV_ADC | FEAT_IN7_INTERNAL,
+ .peci_mask = 0x07,
+ .old_peci_mask = 0x02, /* Actually reports PCH */
+ },
[it8771] = {
.name = "it8771",
.suffix = "E",
@@ -391,6 +404,7 @@ static const struct it87_devices it87_devices[] = {
#define has_16bit_fans(data) ((data)->features & FEAT_16BIT_FANS)
#define has_12mv_adc(data) ((data)->features & FEAT_12MV_ADC)
+#define has_10_9mv_adc(data) ((data)->features & FEAT_10_9MV_ADC)
#define has_newer_autopwm(data) ((data)->features & FEAT_NEWER_AUTOPWM)
#define has_old_autopwm(data) ((data)->features & FEAT_OLD_AUTOPWM)
#define has_temp_offset(data) ((data)->features & FEAT_TEMP_OFFSET)
@@ -475,7 +489,14 @@ struct it87_data {
static int adc_lsb(const struct it87_data *data, int nr)
{
- int lsb = has_12mv_adc(data) ? 12 : 16;
+ int lsb;
+
+ if (has_12mv_adc(data))
+ lsb = 120;
+ else if (has_10_9mv_adc(data))
+ lsb = 109;
+ else
+ lsb = 160;
if (data->in_scaled & (1 << nr))
lsb <<= 1;
return lsb;
@@ -483,13 +504,13 @@ static int adc_lsb(const struct it87_data *data, int nr)
static u8 in_to_reg(const struct it87_data *data, int nr, long val)
{
- val = DIV_ROUND_CLOSEST(val, adc_lsb(data, nr));
+ val = DIV_ROUND_CLOSEST(val * 10, adc_lsb(data, nr));
return clamp_val(val, 0, 255);
}
static int in_from_reg(const struct it87_data *data, int nr, int val)
{
- return val * adc_lsb(data, nr);
+ return DIV_ROUND_CLOSEST(val * adc_lsb(data, nr), 10);
}
static inline u8 FAN_TO_REG(long rpm, int div)
@@ -1515,9 +1536,14 @@ static ssize_t show_label(struct device *dev, struct device_attribute *attr,
};
struct it87_data *data = dev_get_drvdata(dev);
int nr = to_sensor_dev_attr(attr)->index;
+ const char *label;
- return sprintf(buf, "%s\n", has_12mv_adc(data) ? labels_it8721[nr]
- : labels[nr]);
+ if (has_12mv_adc(data) || has_10_9mv_adc(data))
+ label = labels_it8721[nr];
+ else
+ label = labels[nr];
+
+ return sprintf(buf, "%s\n", label);
}
static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 1);
@@ -1853,6 +1879,9 @@ static int __init it87_find(unsigned short *address,
case IT8728F_DEVID:
sio_data->type = it8728;
break;
+ case IT8732F_DEVID:
+ sio_data->type = it8732;
+ break;
case IT8771E_DEVID:
sio_data->type = it8771;
break;
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index 97204dce162d..9296e9daf774 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -37,6 +37,7 @@
#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
+#include <linux/of_device.h>
#define DRVNAME "lm70"
@@ -130,11 +131,41 @@ ATTRIBUTE_GROUPS(lm70);
/*----------------------------------------------------------------------*/
+#ifdef CONFIG_OF
+static const struct of_device_id lm70_of_ids[] = {
+ {
+ .compatible = "ti,lm70",
+ .data = (void *) LM70_CHIP_LM70,
+ },
+ {
+ .compatible = "ti,tmp121",
+ .data = (void *) LM70_CHIP_TMP121,
+ },
+ {
+ .compatible = "ti,lm71",
+ .data = (void *) LM70_CHIP_LM71,
+ },
+ {
+ .compatible = "ti,lm74",
+ .data = (void *) LM70_CHIP_LM74,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, lm70_of_ids);
+#endif
+
static int lm70_probe(struct spi_device *spi)
{
- int chip = spi_get_device_id(spi)->driver_data;
+ const struct of_device_id *match;
struct device *hwmon_dev;
struct lm70 *p_lm70;
+ int chip;
+
+ match = of_match_device(lm70_of_ids, &spi->dev);
+ if (match)
+ chip = (int)(uintptr_t)match->data;
+ else
+ chip = spi_get_device_id(spi)->driver_data;
/* signaling is SPI_MODE_0 */
if (spi->mode & (SPI_CPOL | SPI_CPHA))
@@ -169,6 +200,7 @@ static struct spi_driver lm70_driver = {
.driver = {
.name = "lm70",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(lm70_of_ids),
},
.id_table = lm70_ids,
.probe = lm70_probe,
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index fe41d5ae7cb2..e4e57bbafb10 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -104,7 +104,7 @@ static inline long lm75_reg_to_mc(s16 temp, u8 resolution)
/* sysfs attributes for hwmon */
-static int lm75_read_temp(void *dev, long *temp)
+static int lm75_read_temp(void *dev, int *temp)
{
struct lm75_data *data = lm75_update_device(dev);
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 28fcb2e246d5..3ce33d244cc0 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -49,10 +49,13 @@ static const u8 REG_VOLTAGE_LIMIT_MSB_SHIFT[2][5] = {
#define REG_VOLTAGE_LOW 0x0f
#define REG_FANCOUNT_LOW 0x13
#define REG_START 0x21
-#define REG_MODE 0x22
+#define REG_MODE 0x22 /* 7.2.32 Mode Selection Register */
#define REG_PECI_ENABLE 0x23
#define REG_FAN_ENABLE 0x24
#define REG_VMON_ENABLE 0x25
+#define REG_PWM(x) (0x60 + (x))
+#define REG_SMARTFAN_EN(x) (0x64 + (x) / 2)
+#define SMARTFAN_EN_SHIFT(x) ((x) % 2 * 4)
#define REG_VENDOR_ID 0xfd
#define REG_CHIP_ID 0xfe
#define REG_VERSION_ID 0xff
@@ -66,6 +69,129 @@ struct nct7802_data {
struct mutex access_lock; /* for multi-byte read and write operations */
};
+static ssize_t show_temp_type(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nct7802_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+ unsigned int mode;
+ int ret;
+
+ ret = regmap_read(data->regmap, REG_MODE, &mode);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%u\n", (mode >> (2 * sattr->index) & 3) + 2);
+}
+
+static ssize_t store_temp_type(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nct7802_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+ unsigned int type;
+ int err;
+
+ err = kstrtouint(buf, 0, &type);
+ if (err < 0)
+ return err;
+ if (sattr->index == 2 && type != 4) /* RD3 */
+ return -EINVAL;
+ if (type < 3 || type > 4)
+ return -EINVAL;
+ err = regmap_update_bits(data->regmap, REG_MODE,
+ 3 << 2 * sattr->index, (type - 2) << 2 * sattr->index);
+ return err ? : count;
+}
+
+static ssize_t show_pwm_mode(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+ struct nct7802_data *data = dev_get_drvdata(dev);
+ unsigned int regval;
+ int ret;
+
+ if (sattr->index > 1)
+ return sprintf(buf, "1\n");
+
+ ret = regmap_read(data->regmap, 0x5E, &regval);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%u\n", !(regval & (1 << sattr->index)));
+}
+
+static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct nct7802_data *data = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ if (!attr->index)
+ return sprintf(buf, "255\n");
+
+ ret = regmap_read(data->regmap, attr->index, &val);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t store_pwm(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct nct7802_data *data = dev_get_drvdata(dev);
+ int err;
+ u8 val;
+
+ err = kstrtou8(buf, 0, &val);
+ if (err < 0)
+ return err;
+
+ err = regmap_write(data->regmap, attr->index, val);
+ return err ? : count;
+}
+
+static ssize_t show_pwm_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nct7802_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+ unsigned int reg, enabled;
+ int ret;
+
+ ret = regmap_read(data->regmap, REG_SMARTFAN_EN(sattr->index), &reg);
+ if (ret < 0)
+ return ret;
+ enabled = reg >> SMARTFAN_EN_SHIFT(sattr->index) & 1;
+ return sprintf(buf, "%u\n", enabled + 1);
+}
+
+static ssize_t store_pwm_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nct7802_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+ u8 val;
+ int ret;
+
+ ret = kstrtou8(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ if (val < 1 || val > 2)
+ return -EINVAL;
+ ret = regmap_update_bits(data->regmap, REG_SMARTFAN_EN(sattr->index),
+ 1 << SMARTFAN_EN_SHIFT(sattr->index),
+ (val - 1) << SMARTFAN_EN_SHIFT(sattr->index));
+ return ret ? : count;
+}
+
static int nct7802_read_temp(struct nct7802_data *data,
u8 reg_temp, u8 reg_temp_low, int *temp)
{
@@ -195,7 +321,7 @@ abort:
}
static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index,
- unsigned int voltage)
+ unsigned long voltage)
{
int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr];
int err;
@@ -377,6 +503,8 @@ store_beep(struct device *dev, struct device_attribute *attr, const char *buf,
return err ? : count;
}
+static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO | S_IWUSR,
+ show_temp_type, store_temp_type, 0);
static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0x01,
REG_TEMP_LSB);
static SENSOR_DEVICE_ATTR_2(temp1_min, S_IRUGO | S_IWUSR, show_temp,
@@ -386,6 +514,8 @@ static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp,
static SENSOR_DEVICE_ATTR_2(temp1_crit, S_IRUGO | S_IWUSR, show_temp,
store_temp, 0x3a, 0);
+static SENSOR_DEVICE_ATTR(temp2_type, S_IRUGO | S_IWUSR,
+ show_temp_type, store_temp_type, 1);
static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0x02,
REG_TEMP_LSB);
static SENSOR_DEVICE_ATTR_2(temp2_min, S_IRUGO | S_IWUSR, show_temp,
@@ -395,6 +525,8 @@ static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp,
static SENSOR_DEVICE_ATTR_2(temp2_crit, S_IRUGO | S_IWUSR, show_temp,
store_temp, 0x3b, 0);
+static SENSOR_DEVICE_ATTR(temp3_type, S_IRUGO | S_IWUSR,
+ show_temp_type, store_temp_type, 2);
static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0x03,
REG_TEMP_LSB);
static SENSOR_DEVICE_ATTR_2(temp3_min, S_IRUGO | S_IWUSR, show_temp,
@@ -475,6 +607,7 @@ static SENSOR_DEVICE_ATTR_2(temp6_beep, S_IRUGO | S_IWUSR, show_beep,
store_beep, 0x5c, 5);
static struct attribute *nct7802_temp_attrs[] = {
+ &sensor_dev_attr_temp1_type.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_min.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
@@ -485,7 +618,8 @@ static struct attribute *nct7802_temp_attrs[] = {
&sensor_dev_attr_temp1_fault.dev_attr.attr,
&sensor_dev_attr_temp1_beep.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr, /* 9 */
+ &sensor_dev_attr_temp2_type.dev_attr.attr, /* 10 */
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp2_min.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp2_crit.dev_attr.attr,
@@ -495,7 +629,8 @@ static struct attribute *nct7802_temp_attrs[] = {
&sensor_dev_attr_temp2_fault.dev_attr.attr,
&sensor_dev_attr_temp2_beep.dev_attr.attr,
- &sensor_dev_attr_temp3_input.dev_attr.attr, /* 18 */
+ &sensor_dev_attr_temp3_type.dev_attr.attr, /* 20 */
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
&sensor_dev_attr_temp3_min.dev_attr.attr,
&sensor_dev_attr_temp3_max.dev_attr.attr,
&sensor_dev_attr_temp3_crit.dev_attr.attr,
@@ -505,7 +640,7 @@ static struct attribute *nct7802_temp_attrs[] = {
&sensor_dev_attr_temp3_fault.dev_attr.attr,
&sensor_dev_attr_temp3_beep.dev_attr.attr,
- &sensor_dev_attr_temp4_input.dev_attr.attr, /* 27 */
+ &sensor_dev_attr_temp4_input.dev_attr.attr, /* 30 */
&sensor_dev_attr_temp4_min.dev_attr.attr,
&sensor_dev_attr_temp4_max.dev_attr.attr,
&sensor_dev_attr_temp4_crit.dev_attr.attr,
@@ -514,7 +649,7 @@ static struct attribute *nct7802_temp_attrs[] = {
&sensor_dev_attr_temp4_crit_alarm.dev_attr.attr,
&sensor_dev_attr_temp4_beep.dev_attr.attr,
- &sensor_dev_attr_temp5_input.dev_attr.attr, /* 35 */
+ &sensor_dev_attr_temp5_input.dev_attr.attr, /* 38 */
&sensor_dev_attr_temp5_min.dev_attr.attr,
&sensor_dev_attr_temp5_max.dev_attr.attr,
&sensor_dev_attr_temp5_crit.dev_attr.attr,
@@ -523,7 +658,7 @@ static struct attribute *nct7802_temp_attrs[] = {
&sensor_dev_attr_temp5_crit_alarm.dev_attr.attr,
&sensor_dev_attr_temp5_beep.dev_attr.attr,
- &sensor_dev_attr_temp6_input.dev_attr.attr, /* 43 */
+ &sensor_dev_attr_temp6_input.dev_attr.attr, /* 46 */
&sensor_dev_attr_temp6_beep.dev_attr.attr,
NULL
@@ -541,25 +676,27 @@ static umode_t nct7802_temp_is_visible(struct kobject *kobj,
if (err < 0)
return 0;
- if (index < 9 &&
+ if (index < 10 &&
(reg & 03) != 0x01 && (reg & 0x03) != 0x02) /* RD1 */
return 0;
- if (index >= 9 && index < 18 &&
+
+ if (index >= 10 && index < 20 &&
(reg & 0x0c) != 0x04 && (reg & 0x0c) != 0x08) /* RD2 */
return 0;
- if (index >= 18 && index < 27 && (reg & 0x30) != 0x20) /* RD3 */
+ if (index >= 20 && index < 30 && (reg & 0x30) != 0x20) /* RD3 */
return 0;
- if (index >= 27 && index < 35) /* local */
+
+ if (index >= 30 && index < 38) /* local */
return attr->mode;
err = regmap_read(data->regmap, REG_PECI_ENABLE, &reg);
if (err < 0)
return 0;
- if (index >= 35 && index < 43 && !(reg & 0x01)) /* PECI 0 */
+ if (index >= 38 && index < 46 && !(reg & 0x01)) /* PECI 0 */
return 0;
- if (index >= 0x43 && (!(reg & 0x02))) /* PECI 1 */
+ if (index >= 0x46 && (!(reg & 0x02))) /* PECI 1 */
return 0;
return attr->mode;
@@ -687,6 +824,27 @@ static SENSOR_DEVICE_ATTR_2(fan3_alarm, S_IRUGO, show_alarm, NULL, 0x1a, 2);
static SENSOR_DEVICE_ATTR_2(fan3_beep, S_IRUGO | S_IWUSR, show_beep, store_beep,
0x5b, 2);
+/* 7.2.89 Fan Control Output Type */
+static SENSOR_DEVICE_ATTR(pwm1_mode, S_IRUGO, show_pwm_mode, NULL, 0);
+static SENSOR_DEVICE_ATTR(pwm2_mode, S_IRUGO, show_pwm_mode, NULL, 1);
+static SENSOR_DEVICE_ATTR(pwm3_mode, S_IRUGO, show_pwm_mode, NULL, 2);
+
+/* 7.2.91... Fan Control Output Value */
+static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm, store_pwm,
+ REG_PWM(0));
+static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, show_pwm, store_pwm,
+ REG_PWM(1));
+static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, show_pwm, store_pwm,
+ REG_PWM(2));
+
+/* 7.2.95... Temperature to Fan mapping Relationships Register */
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, show_pwm_enable,
+ store_pwm_enable, 0);
+static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR, show_pwm_enable,
+ store_pwm_enable, 1);
+static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR, show_pwm_enable,
+ store_pwm_enable, 2);
+
static struct attribute *nct7802_fan_attrs[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan1_min.dev_attr.attr,
@@ -725,10 +883,142 @@ static struct attribute_group nct7802_fan_group = {
.is_visible = nct7802_fan_is_visible,
};
+static struct attribute *nct7802_pwm_attrs[] = {
+ &sensor_dev_attr_pwm1_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm1_mode.dev_attr.attr,
+ &sensor_dev_attr_pwm1.dev_attr.attr,
+ &sensor_dev_attr_pwm2_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm2_mode.dev_attr.attr,
+ &sensor_dev_attr_pwm2.dev_attr.attr,
+ &sensor_dev_attr_pwm3_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm3_mode.dev_attr.attr,
+ &sensor_dev_attr_pwm3.dev_attr.attr,
+ NULL
+};
+
+static struct attribute_group nct7802_pwm_group = {
+ .attrs = nct7802_pwm_attrs,
+};
+
+/* 7.2.115... 0x80-0x83, 0x84 Temperature (X-axis) transition */
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x80, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x81, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x82, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point4_temp, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x83, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point5_temp, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x84, 0);
+
+/* 7.2.120... 0x85-0x88 PWM (Y-axis) transition */
+static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IRUGO | S_IWUSR,
+ show_pwm, store_pwm, 0x85);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point2_pwm, S_IRUGO | S_IWUSR,
+ show_pwm, store_pwm, 0x86);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point3_pwm, S_IRUGO | S_IWUSR,
+ show_pwm, store_pwm, 0x87);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point4_pwm, S_IRUGO | S_IWUSR,
+ show_pwm, store_pwm, 0x88);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point5_pwm, S_IRUGO, show_pwm, NULL, 0);
+
+/* 7.2.124 Table 2 X-axis Transition Point 1 Register */
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x90, 0);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x91, 0);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x92, 0);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point4_temp, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x93, 0);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point5_temp, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x94, 0);
+
+/* 7.2.129 Table 2 Y-axis Transition Point 1 Register */
+static SENSOR_DEVICE_ATTR(pwm2_auto_point1_pwm, S_IRUGO | S_IWUSR,
+ show_pwm, store_pwm, 0x95);
+static SENSOR_DEVICE_ATTR(pwm2_auto_point2_pwm, S_IRUGO | S_IWUSR,
+ show_pwm, store_pwm, 0x96);
+static SENSOR_DEVICE_ATTR(pwm2_auto_point3_pwm, S_IRUGO | S_IWUSR,
+ show_pwm, store_pwm, 0x97);
+static SENSOR_DEVICE_ATTR(pwm2_auto_point4_pwm, S_IRUGO | S_IWUSR,
+ show_pwm, store_pwm, 0x98);
+static SENSOR_DEVICE_ATTR(pwm2_auto_point5_pwm, S_IRUGO, show_pwm, NULL, 0);
+
+/* 7.2.133 Table 3 X-axis Transition Point 1 Register */
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0xA0, 0);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0xA1, 0);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0xA2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point4_temp, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0xA3, 0);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point5_temp, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0xA4, 0);
+
+/* 7.2.138 Table 3 Y-axis Transition Point 1 Register */
+static SENSOR_DEVICE_ATTR(pwm3_auto_point1_pwm, S_IRUGO | S_IWUSR,
+ show_pwm, store_pwm, 0xA5);
+static SENSOR_DEVICE_ATTR(pwm3_auto_point2_pwm, S_IRUGO | S_IWUSR,
+ show_pwm, store_pwm, 0xA6);
+static SENSOR_DEVICE_ATTR(pwm3_auto_point3_pwm, S_IRUGO | S_IWUSR,
+ show_pwm, store_pwm, 0xA7);
+static SENSOR_DEVICE_ATTR(pwm3_auto_point4_pwm, S_IRUGO | S_IWUSR,
+ show_pwm, store_pwm, 0xA8);
+static SENSOR_DEVICE_ATTR(pwm3_auto_point5_pwm, S_IRUGO, show_pwm, NULL, 0);
+
+static struct attribute *nct7802_auto_point_attrs[] = {
+ &sensor_dev_attr_pwm1_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point4_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point5_temp.dev_attr.attr,
+
+ &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point4_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point5_pwm.dev_attr.attr,
+
+ &sensor_dev_attr_pwm2_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point4_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point5_temp.dev_attr.attr,
+
+ &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point3_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point4_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point5_pwm.dev_attr.attr,
+
+ &sensor_dev_attr_pwm3_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point4_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point5_temp.dev_attr.attr,
+
+ &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point3_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point4_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point5_pwm.dev_attr.attr,
+
+ NULL
+};
+
+static struct attribute_group nct7802_auto_point_group = {
+ .attrs = nct7802_auto_point_attrs,
+};
+
static const struct attribute_group *nct7802_groups[] = {
&nct7802_temp_group,
&nct7802_in_group,
&nct7802_fan_group,
+ &nct7802_pwm_group,
+ &nct7802_auto_point_group,
NULL
};
@@ -776,7 +1066,8 @@ static int nct7802_detect(struct i2c_client *client,
static bool nct7802_regmap_is_volatile(struct device *dev, unsigned int reg)
{
- return reg != REG_BANK && reg <= 0x20;
+ return (reg != REG_BANK && reg <= 0x20) ||
+ (reg >= REG_PWM(0) && reg <= REG_PWM(2));
}
static const struct regmap_config nct7802_regmap_config = {
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index b77b82f24480..08ff89d222e5 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -412,8 +412,9 @@ static ssize_t show_pwm(struct device *dev,
return sprintf(buf, "%d\n", val);
}
-static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t store_enable(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
int index = to_sensor_dev_attr(devattr)->index;
struct nct7904_data *data = dev_get_drvdata(dev);
@@ -422,18 +423,18 @@ static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
- if (val > 1 || (val && !data->fan_mode[index]))
+ if (val < 1 || val > 2 || (val == 2 && !data->fan_mode[index]))
return -EINVAL;
ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index,
- val ? data->fan_mode[index] : 0);
+ val == 2 ? data->fan_mode[index] : 0);
return ret ? ret : count;
}
-/* Return 0 for manual mode or 1 for SmartFan mode */
-static ssize_t show_mode(struct device *dev,
- struct device_attribute *devattr, char *buf)
+/* Return 1 for manual mode or 2 for SmartFan mode */
+static ssize_t show_enable(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
int index = to_sensor_dev_attr(devattr)->index;
struct nct7904_data *data = dev_get_drvdata(dev);
@@ -443,36 +444,36 @@ static ssize_t show_mode(struct device *dev,
if (val < 0)
return val;
- return sprintf(buf, "%d\n", val ? 1 : 0);
+ return sprintf(buf, "%d\n", val ? 2 : 1);
}
/* 2 attributes per channel: pwm and mode */
-static SENSOR_DEVICE_ATTR(fan1_pwm, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
show_pwm, store_pwm, 0);
-static SENSOR_DEVICE_ATTR(fan1_mode, S_IRUGO | S_IWUSR,
- show_mode, store_mode, 0);
-static SENSOR_DEVICE_ATTR(fan2_pwm, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
+ show_enable, store_enable, 0);
+static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR,
show_pwm, store_pwm, 1);
-static SENSOR_DEVICE_ATTR(fan2_mode, S_IRUGO | S_IWUSR,
- show_mode, store_mode, 1);
-static SENSOR_DEVICE_ATTR(fan3_pwm, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
+ show_enable, store_enable, 1);
+static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR,
show_pwm, store_pwm, 2);
-static SENSOR_DEVICE_ATTR(fan3_mode, S_IRUGO | S_IWUSR,
- show_mode, store_mode, 2);
-static SENSOR_DEVICE_ATTR(fan4_pwm, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR,
+ show_enable, store_enable, 2);
+static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR,
show_pwm, store_pwm, 3);
-static SENSOR_DEVICE_ATTR(fan4_mode, S_IRUGO | S_IWUSR,
- show_mode, store_mode, 3);
+static SENSOR_DEVICE_ATTR(pwm4_enable, S_IRUGO | S_IWUSR,
+ show_enable, store_enable, 3);
static struct attribute *nct7904_fanctl_attrs[] = {
- &sensor_dev_attr_fan1_pwm.dev_attr.attr,
- &sensor_dev_attr_fan1_mode.dev_attr.attr,
- &sensor_dev_attr_fan2_pwm.dev_attr.attr,
- &sensor_dev_attr_fan2_mode.dev_attr.attr,
- &sensor_dev_attr_fan3_pwm.dev_attr.attr,
- &sensor_dev_attr_fan3_mode.dev_attr.attr,
- &sensor_dev_attr_fan4_pwm.dev_attr.attr,
- &sensor_dev_attr_fan4_mode.dev_attr.attr,
+ &sensor_dev_attr_pwm1.dev_attr.attr,
+ &sensor_dev_attr_pwm1_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm2.dev_attr.attr,
+ &sensor_dev_attr_pwm2_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm3.dev_attr.attr,
+ &sensor_dev_attr_pwm3_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm4.dev_attr.attr,
+ &sensor_dev_attr_pwm4_enable.dev_attr.attr,
NULL
};
@@ -574,6 +575,7 @@ static const struct i2c_device_id nct7904_id[] = {
{"nct7904", 0},
{}
};
+MODULE_DEVICE_TABLE(i2c, nct7904_id);
static struct i2c_driver nct7904_driver = {
.class = I2C_CLASS_HWMON,
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index dc0b76c5e302..feed30646d91 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -477,7 +477,7 @@ static int ntc_thermistor_get_ohm(struct ntc_data *data)
return -EINVAL;
}
-static int ntc_read_temp(void *dev, long *temp)
+static int ntc_read_temp(void *dev, int *temp)
{
struct ntc_data *data = dev_get_drvdata(dev);
int ohm;
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 9f7dbd189c97..df6ebb2b8f0f 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -20,7 +20,8 @@ config SENSORS_PMBUS
help
If you say yes here you get hardware monitoring support for generic
PMBus devices, including but not limited to ADP4000, BMR453, BMR454,
- MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, UDT020, and TPS40400.
+ MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400, TPS544B20,
+ TPS544B25, TPS544C20, TPS544C25, and UDT020.
This driver can also be built as a module. If so, the module will
be called pmbus.
@@ -30,8 +31,8 @@ config SENSORS_ADM1275
default n
help
If you say yes here you get hardware monitoring support for Analog
- Devices ADM1075, ADM1275, and ADM1276 Hot-Swap Controller and Digital
- Power Monitors.
+ Devices ADM1075, ADM1275, ADM1276, ADM1293, and ADM1294 Hot-Swap
+ Controller and Digital Power Monitors.
This driver can also be built as a module. If so, the module will
be called adm1275.
@@ -51,7 +52,8 @@ config SENSORS_LTC2978
default n
help
If you say yes here you get hardware monitoring support for Linear
- Technology LTC2974, LTC2977, LTC2978, LTC3880, LTC3883, and LTM4676.
+ Technology LTC2974, LTC2975, LTC2977, LTC2978, LTC2980, LTC3880,
+ LTC3883, LTC3886, LTC3887, LTCM2987, LTM4675, and LTM4676.
This driver can also be built as a module. If so, the module will
be called ltc2978.
@@ -73,6 +75,16 @@ config SENSORS_MAX16064
This driver can also be built as a module. If so, the module will
be called max16064.
+config SENSORS_MAX20751
+ tristate "Maxim MAX20751"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX20751.
+
+ This driver can also be built as a module. If so, the module will
+ be called max20751.
+
config SENSORS_MAX34440
tristate "Maxim MAX34440 and compatibles"
default n
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 1454293e985c..bce046d37f02 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
+obj-$(CONFIG_SENSORS_MAX20751) += max20751.o
obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 60aad9570f01..188af4c89f40 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -21,46 +21,120 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/bitops.h>
#include "pmbus.h"
-enum chips { adm1075, adm1275, adm1276 };
+enum chips { adm1075, adm1275, adm1276, adm1293, adm1294 };
+
+#define ADM1275_MFR_STATUS_IOUT_WARN2 BIT(0)
+#define ADM1293_MFR_STATUS_VAUX_UV_WARN BIT(5)
+#define ADM1293_MFR_STATUS_VAUX_OV_WARN BIT(6)
#define ADM1275_PEAK_IOUT 0xd0
#define ADM1275_PEAK_VIN 0xd1
#define ADM1275_PEAK_VOUT 0xd2
#define ADM1275_PMON_CONFIG 0xd4
-#define ADM1275_VIN_VOUT_SELECT (1 << 6)
-#define ADM1275_VRANGE (1 << 5)
-#define ADM1075_IRANGE_50 (1 << 4)
-#define ADM1075_IRANGE_25 (1 << 3)
-#define ADM1075_IRANGE_MASK ((1 << 3) | (1 << 4))
+#define ADM1275_VIN_VOUT_SELECT BIT(6)
+#define ADM1275_VRANGE BIT(5)
+#define ADM1075_IRANGE_50 BIT(4)
+#define ADM1075_IRANGE_25 BIT(3)
+#define ADM1075_IRANGE_MASK (BIT(3) | BIT(4))
+
+#define ADM1293_IRANGE_25 0
+#define ADM1293_IRANGE_50 BIT(6)
+#define ADM1293_IRANGE_100 BIT(7)
+#define ADM1293_IRANGE_200 (BIT(6) | BIT(7))
+#define ADM1293_IRANGE_MASK (BIT(6) | BIT(7))
+
+#define ADM1293_VIN_SEL_012 BIT(2)
+#define ADM1293_VIN_SEL_074 BIT(3)
+#define ADM1293_VIN_SEL_210 (BIT(2) | BIT(3))
+#define ADM1293_VIN_SEL_MASK (BIT(2) | BIT(3))
+
+#define ADM1293_VAUX_EN BIT(1)
#define ADM1275_IOUT_WARN2_LIMIT 0xd7
#define ADM1275_DEVICE_CONFIG 0xd8
-#define ADM1275_IOUT_WARN2_SELECT (1 << 4)
+#define ADM1275_IOUT_WARN2_SELECT BIT(4)
#define ADM1276_PEAK_PIN 0xda
-
-#define ADM1275_MFR_STATUS_IOUT_WARN2 (1 << 0)
-
#define ADM1075_READ_VAUX 0xdd
#define ADM1075_VAUX_OV_WARN_LIMIT 0xde
#define ADM1075_VAUX_UV_WARN_LIMIT 0xdf
+#define ADM1293_IOUT_MIN 0xe3
+#define ADM1293_PIN_MIN 0xe4
#define ADM1075_VAUX_STATUS 0xf6
-#define ADM1075_VAUX_OV_WARN (1<<7)
-#define ADM1075_VAUX_UV_WARN (1<<6)
+#define ADM1075_VAUX_OV_WARN BIT(7)
+#define ADM1075_VAUX_UV_WARN BIT(6)
struct adm1275_data {
int id;
bool have_oc_fault;
+ bool have_uc_fault;
+ bool have_vout;
+ bool have_vaux_status;
+ bool have_mfr_vaux_status;
+ bool have_iout_min;
+ bool have_pin_min;
+ bool have_pin_max;
struct pmbus_driver_info info;
};
#define to_adm1275_data(x) container_of(x, struct adm1275_data, info)
+struct coefficients {
+ s16 m;
+ s16 b;
+ s16 R;
+};
+
+static const struct coefficients adm1075_coefficients[] = {
+ [0] = { 27169, 0, -1 }, /* voltage */
+ [1] = { 806, 20475, -1 }, /* current, irange25 */
+ [2] = { 404, 20475, -1 }, /* current, irange50 */
+ [3] = { 0, -1, 8549 }, /* power, irange25 */
+ [4] = { 0, -1, 4279 }, /* power, irange50 */
+};
+
+static const struct coefficients adm1275_coefficients[] = {
+ [0] = { 19199, 0, -2 }, /* voltage, vrange set */
+ [1] = { 6720, 0, -1 }, /* voltage, vrange not set */
+ [2] = { 807, 20475, -1 }, /* current */
+};
+
+static const struct coefficients adm1276_coefficients[] = {
+ [0] = { 19199, 0, -2 }, /* voltage, vrange set */
+ [1] = { 6720, 0, -1 }, /* voltage, vrange not set */
+ [2] = { 807, 20475, -1 }, /* current */
+ [3] = { 6043, 0, -2 }, /* power, vrange set */
+ [4] = { 2115, 0, -1 }, /* power, vrange not set */
+};
+
+static const struct coefficients adm1293_coefficients[] = {
+ [0] = { 3333, -1, 0 }, /* voltage, vrange 1.2V */
+ [1] = { 5552, -5, -1 }, /* voltage, vrange 7.4V */
+ [2] = { 19604, -50, -2 }, /* voltage, vrange 21V */
+ [3] = { 8000, -100, -2 }, /* current, irange25 */
+ [4] = { 4000, -100, -2 }, /* current, irange50 */
+ [5] = { 20000, -1000, -3 }, /* current, irange100 */
+ [6] = { 10000, -1000, -3 }, /* current, irange200 */
+ [7] = { 10417, 0, -1 }, /* power, 1.2V, irange25 */
+ [8] = { 5208, 0, -1 }, /* power, 1.2V, irange50 */
+ [9] = { 26042, 0, -2 }, /* power, 1.2V, irange100 */
+ [10] = { 13021, 0, -2 }, /* power, 1.2V, irange200 */
+ [11] = { 17351, 0, -2 }, /* power, 7.4V, irange25 */
+ [12] = { 8676, 0, -2 }, /* power, 7.4V, irange50 */
+ [13] = { 4338, 0, -2 }, /* power, 7.4V, irange100 */
+ [14] = { 21689, 0, -3 }, /* power, 7.4V, irange200 */
+ [15] = { 6126, 0, -2 }, /* power, 21V, irange25 */
+ [16] = { 30631, 0, -3 }, /* power, 21V, irange50 */
+ [17] = { 15316, 0, -3 }, /* power, 21V, irange100 */
+ [18] = { 7658, 0, -3 }, /* power, 21V, irange200 */
+};
+
static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
@@ -72,42 +146,37 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
switch (reg) {
case PMBUS_IOUT_UC_FAULT_LIMIT:
- if (data->have_oc_fault) {
- ret = -ENXIO;
- break;
- }
+ if (!data->have_uc_fault)
+ return -ENXIO;
ret = pmbus_read_word_data(client, 0, ADM1275_IOUT_WARN2_LIMIT);
break;
case PMBUS_IOUT_OC_FAULT_LIMIT:
- if (!data->have_oc_fault) {
- ret = -ENXIO;
- break;
- }
+ if (!data->have_oc_fault)
+ return -ENXIO;
ret = pmbus_read_word_data(client, 0, ADM1275_IOUT_WARN2_LIMIT);
break;
case PMBUS_VOUT_OV_WARN_LIMIT:
- if (data->id != adm1075) {
- ret = -ENODATA;
- break;
- }
+ if (data->have_vout)
+ return -ENODATA;
ret = pmbus_read_word_data(client, 0,
ADM1075_VAUX_OV_WARN_LIMIT);
break;
case PMBUS_VOUT_UV_WARN_LIMIT:
- if (data->id != adm1075) {
- ret = -ENODATA;
- break;
- }
+ if (data->have_vout)
+ return -ENODATA;
ret = pmbus_read_word_data(client, 0,
ADM1075_VAUX_UV_WARN_LIMIT);
break;
case PMBUS_READ_VOUT:
- if (data->id != adm1075) {
- ret = -ENODATA;
- break;
- }
+ if (data->have_vout)
+ return -ENODATA;
ret = pmbus_read_word_data(client, 0, ADM1075_READ_VAUX);
break;
+ case PMBUS_VIRT_READ_IOUT_MIN:
+ if (!data->have_iout_min)
+ return -ENXIO;
+ ret = pmbus_read_word_data(client, 0, ADM1293_IOUT_MIN);
+ break;
case PMBUS_VIRT_READ_IOUT_MAX:
ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_IOUT);
break;
@@ -117,11 +186,14 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
case PMBUS_VIRT_READ_VIN_MAX:
ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_VIN);
break;
+ case PMBUS_VIRT_READ_PIN_MIN:
+ if (!data->have_pin_min)
+ return -ENXIO;
+ ret = pmbus_read_word_data(client, 0, ADM1293_PIN_MIN);
+ break;
case PMBUS_VIRT_READ_PIN_MAX:
- if (data->id == adm1275) {
- ret = -ENXIO;
- break;
- }
+ if (!data->have_pin_max)
+ return -ENXIO;
ret = pmbus_read_word_data(client, 0, ADM1276_PEAK_PIN);
break;
case PMBUS_VIRT_RESET_IOUT_HISTORY:
@@ -129,8 +201,8 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
case PMBUS_VIRT_RESET_VIN_HISTORY:
break;
case PMBUS_VIRT_RESET_PIN_HISTORY:
- if (data->id == adm1275)
- ret = -ENXIO;
+ if (!data->have_pin_max)
+ return -ENXIO;
break;
default:
ret = -ENODATA;
@@ -142,6 +214,8 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
u16 word)
{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ const struct adm1275_data *data = to_adm1275_data(info);
int ret;
if (page)
@@ -155,6 +229,9 @@ static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
break;
case PMBUS_VIRT_RESET_IOUT_HISTORY:
ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_IOUT, 0);
+ if (!ret && data->have_iout_min)
+ ret = pmbus_write_word_data(client, 0,
+ ADM1293_IOUT_MIN, 0);
break;
case PMBUS_VIRT_RESET_VOUT_HISTORY:
ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_VOUT, 0);
@@ -164,6 +241,9 @@ static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
break;
case PMBUS_VIRT_RESET_PIN_HISTORY:
ret = pmbus_write_word_data(client, 0, ADM1276_PEAK_PIN, 0);
+ if (!ret && data->have_pin_min)
+ ret = pmbus_write_word_data(client, 0,
+ ADM1293_PIN_MIN, 0);
break;
default:
ret = -ENODATA;
@@ -186,29 +266,40 @@ static int adm1275_read_byte_data(struct i2c_client *client, int page, int reg)
ret = pmbus_read_byte_data(client, page, PMBUS_STATUS_IOUT);
if (ret < 0)
break;
+ if (!data->have_oc_fault && !data->have_uc_fault)
+ break;
mfr_status = pmbus_read_byte_data(client, page,
PMBUS_STATUS_MFR_SPECIFIC);
- if (mfr_status < 0) {
- ret = mfr_status;
- break;
- }
+ if (mfr_status < 0)
+ return mfr_status;
if (mfr_status & ADM1275_MFR_STATUS_IOUT_WARN2) {
ret |= data->have_oc_fault ?
PB_IOUT_OC_FAULT : PB_IOUT_UC_FAULT;
}
break;
case PMBUS_STATUS_VOUT:
- if (data->id != adm1075) {
- ret = -ENODATA;
- break;
- }
+ if (data->have_vout)
+ return -ENODATA;
ret = 0;
- mfr_status = pmbus_read_byte_data(client, 0,
- ADM1075_VAUX_STATUS);
- if (mfr_status & ADM1075_VAUX_OV_WARN)
- ret |= PB_VOLTAGE_OV_WARNING;
- if (mfr_status & ADM1075_VAUX_UV_WARN)
- ret |= PB_VOLTAGE_UV_WARNING;
+ if (data->have_vaux_status) {
+ mfr_status = pmbus_read_byte_data(client, 0,
+ ADM1075_VAUX_STATUS);
+ if (mfr_status < 0)
+ return mfr_status;
+ if (mfr_status & ADM1075_VAUX_OV_WARN)
+ ret |= PB_VOLTAGE_OV_WARNING;
+ if (mfr_status & ADM1075_VAUX_UV_WARN)
+ ret |= PB_VOLTAGE_UV_WARNING;
+ } else if (data->have_mfr_vaux_status) {
+ mfr_status = pmbus_read_byte_data(client, page,
+ PMBUS_STATUS_MFR_SPECIFIC);
+ if (mfr_status < 0)
+ return mfr_status;
+ if (mfr_status & ADM1293_MFR_STATUS_VAUX_OV_WARN)
+ ret |= PB_VOLTAGE_OV_WARNING;
+ if (mfr_status & ADM1293_MFR_STATUS_VAUX_UV_WARN)
+ ret |= PB_VOLTAGE_UV_WARNING;
+ }
break;
default:
ret = -ENODATA;
@@ -221,6 +312,8 @@ static const struct i2c_device_id adm1275_id[] = {
{ "adm1075", adm1075 },
{ "adm1275", adm1275 },
{ "adm1276", adm1276 },
+ { "adm1293", adm1293 },
+ { "adm1294", adm1294 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adm1275_id);
@@ -234,6 +327,8 @@ static int adm1275_probe(struct i2c_client *client,
struct pmbus_driver_info *info;
struct adm1275_data *data;
const struct i2c_device_id *mid;
+ const struct coefficients *coefficients;
+ int vindex = -1, voindex = -1, cindex = -1, pindex = -1;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_BYTE_DATA
@@ -290,61 +385,38 @@ static int adm1275_probe(struct i2c_client *client,
info->format[PSC_VOLTAGE_IN] = direct;
info->format[PSC_VOLTAGE_OUT] = direct;
info->format[PSC_CURRENT_OUT] = direct;
- info->m[PSC_CURRENT_OUT] = 807;
- info->b[PSC_CURRENT_OUT] = 20475;
- info->R[PSC_CURRENT_OUT] = -1;
+ info->format[PSC_POWER] = direct;
info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
info->read_word_data = adm1275_read_word_data;
info->read_byte_data = adm1275_read_byte_data;
info->write_word_data = adm1275_write_word_data;
- if (data->id == adm1075) {
- info->m[PSC_VOLTAGE_IN] = 27169;
- info->b[PSC_VOLTAGE_IN] = 0;
- info->R[PSC_VOLTAGE_IN] = -1;
- info->m[PSC_VOLTAGE_OUT] = 27169;
- info->b[PSC_VOLTAGE_OUT] = 0;
- info->R[PSC_VOLTAGE_OUT] = -1;
- } else if (config & ADM1275_VRANGE) {
- info->m[PSC_VOLTAGE_IN] = 19199;
- info->b[PSC_VOLTAGE_IN] = 0;
- info->R[PSC_VOLTAGE_IN] = -2;
- info->m[PSC_VOLTAGE_OUT] = 19199;
- info->b[PSC_VOLTAGE_OUT] = 0;
- info->R[PSC_VOLTAGE_OUT] = -2;
- } else {
- info->m[PSC_VOLTAGE_IN] = 6720;
- info->b[PSC_VOLTAGE_IN] = 0;
- info->R[PSC_VOLTAGE_IN] = -1;
- info->m[PSC_VOLTAGE_OUT] = 6720;
- info->b[PSC_VOLTAGE_OUT] = 0;
- info->R[PSC_VOLTAGE_OUT] = -1;
- }
-
- if (device_config & ADM1275_IOUT_WARN2_SELECT)
- data->have_oc_fault = true;
-
switch (data->id) {
case adm1075:
- info->format[PSC_POWER] = direct;
- info->b[PSC_POWER] = 0;
- info->R[PSC_POWER] = -1;
+ if (device_config & ADM1275_IOUT_WARN2_SELECT)
+ data->have_oc_fault = true;
+ else
+ data->have_uc_fault = true;
+ data->have_pin_max = true;
+ data->have_vaux_status = true;
+
+ coefficients = adm1075_coefficients;
+ vindex = 0;
switch (config & ADM1075_IRANGE_MASK) {
case ADM1075_IRANGE_25:
- info->m[PSC_POWER] = 8549;
- info->m[PSC_CURRENT_OUT] = 806;
+ cindex = 1;
+ pindex = 3;
break;
case ADM1075_IRANGE_50:
- info->m[PSC_POWER] = 4279;
- info->m[PSC_CURRENT_OUT] = 404;
+ cindex = 2;
+ pindex = 4;
break;
default:
dev_err(&client->dev, "Invalid input current range");
- info->m[PSC_POWER] = 0;
- info->m[PSC_CURRENT_OUT] = 0;
break;
}
+
info->func[0] |= PMBUS_HAVE_VIN | PMBUS_HAVE_PIN
| PMBUS_HAVE_STATUS_INPUT;
if (config & ADM1275_VIN_VOUT_SELECT)
@@ -352,6 +424,16 @@ static int adm1275_probe(struct i2c_client *client,
PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
break;
case adm1275:
+ if (device_config & ADM1275_IOUT_WARN2_SELECT)
+ data->have_oc_fault = true;
+ else
+ data->have_uc_fault = true;
+ data->have_vout = true;
+
+ coefficients = adm1275_coefficients;
+ vindex = (config & ADM1275_VRANGE) ? 0 : 1;
+ cindex = 2;
+
if (config & ADM1275_VIN_VOUT_SELECT)
info->func[0] |=
PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
@@ -360,22 +442,100 @@ static int adm1275_probe(struct i2c_client *client,
PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT;
break;
case adm1276:
- info->format[PSC_POWER] = direct;
+ if (device_config & ADM1275_IOUT_WARN2_SELECT)
+ data->have_oc_fault = true;
+ else
+ data->have_uc_fault = true;
+ data->have_vout = true;
+ data->have_pin_max = true;
+
+ coefficients = adm1276_coefficients;
+ vindex = (config & ADM1275_VRANGE) ? 0 : 1;
+ cindex = 2;
+ pindex = (config & ADM1275_VRANGE) ? 3 : 4;
+
info->func[0] |= PMBUS_HAVE_VIN | PMBUS_HAVE_PIN
| PMBUS_HAVE_STATUS_INPUT;
if (config & ADM1275_VIN_VOUT_SELECT)
info->func[0] |=
PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
- if (config & ADM1275_VRANGE) {
- info->m[PSC_POWER] = 6043;
- info->b[PSC_POWER] = 0;
- info->R[PSC_POWER] = -2;
- } else {
- info->m[PSC_POWER] = 2115;
- info->b[PSC_POWER] = 0;
- info->R[PSC_POWER] = -1;
+ break;
+ case adm1293:
+ case adm1294:
+ data->have_iout_min = true;
+ data->have_pin_min = true;
+ data->have_pin_max = true;
+ data->have_mfr_vaux_status = true;
+
+ coefficients = adm1293_coefficients;
+
+ voindex = 0;
+ switch (config & ADM1293_VIN_SEL_MASK) {
+ case ADM1293_VIN_SEL_012: /* 1.2V */
+ vindex = 0;
+ break;
+ case ADM1293_VIN_SEL_074: /* 7.4V */
+ vindex = 1;
+ break;
+ case ADM1293_VIN_SEL_210: /* 21V */
+ vindex = 2;
+ break;
+ default: /* disabled */
+ break;
}
+
+ switch (config & ADM1293_IRANGE_MASK) {
+ case ADM1293_IRANGE_25:
+ cindex = 3;
+ break;
+ case ADM1293_IRANGE_50:
+ cindex = 4;
+ break;
+ case ADM1293_IRANGE_100:
+ cindex = 5;
+ break;
+ case ADM1293_IRANGE_200:
+ cindex = 6;
+ break;
+ }
+
+ if (vindex >= 0)
+ pindex = 7 + vindex * 4 + (cindex - 3);
+
+ if (config & ADM1293_VAUX_EN)
+ info->func[0] |=
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
+
+ info->func[0] |= PMBUS_HAVE_PIN |
+ PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT;
+
break;
+ default:
+ dev_err(&client->dev, "Unsupported device\n");
+ return -ENODEV;
+ }
+
+ if (voindex < 0)
+ voindex = vindex;
+ if (vindex >= 0) {
+ info->m[PSC_VOLTAGE_IN] = coefficients[vindex].m;
+ info->b[PSC_VOLTAGE_IN] = coefficients[vindex].b;
+ info->R[PSC_VOLTAGE_IN] = coefficients[vindex].R;
+ }
+ if (voindex >= 0) {
+ info->m[PSC_VOLTAGE_OUT] = coefficients[voindex].m;
+ info->b[PSC_VOLTAGE_OUT] = coefficients[voindex].b;
+ info->R[PSC_VOLTAGE_OUT] = coefficients[voindex].R;
+ }
+ if (cindex >= 0) {
+ info->m[PSC_CURRENT_OUT] = coefficients[cindex].m;
+ info->b[PSC_CURRENT_OUT] = coefficients[cindex].b;
+ info->R[PSC_CURRENT_OUT] = coefficients[cindex].R;
+ }
+ if (pindex >= 0) {
+ info->m[PSC_POWER] = coefficients[pindex].m;
+ info->b[PSC_POWER] = coefficients[pindex].b;
+ info->R[PSC_POWER] = coefficients[pindex].R;
}
return pmbus_do_probe(client, id, info);
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index a26b1d1d9514..a3d912cd3b8d 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -19,6 +19,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -42,15 +43,15 @@ enum chips { lm25056, lm25063, lm25066, lm5064, lm5066 };
#define LM25066_READ_AVG_IIN 0xde
#define LM25066_READ_AVG_PIN 0xdf
-#define LM25066_DEV_SETUP_CL (1 << 4) /* Current limit */
+#define LM25066_DEV_SETUP_CL BIT(4) /* Current limit */
/* LM25056 only */
#define LM25056_VAUX_OV_WARN_LIMIT 0xe3
#define LM25056_VAUX_UV_WARN_LIMIT 0xe4
-#define LM25056_MFR_STS_VAUX_OV_WARN (1 << 1)
-#define LM25056_MFR_STS_VAUX_UV_WARN (1 << 0)
+#define LM25056_MFR_STS_VAUX_OV_WARN BIT(1)
+#define LM25056_MFR_STS_VAUX_UV_WARN BIT(0)
/* LM25063 only */
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index 0835050ec245..58b789c28b48 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -1,9 +1,9 @@
/*
- * Hardware monitoring driver for LTC2974, LTC2977, LTC2978, LTC3880,
- * LTC3883, and LTM4676
+ * Hardware monitoring driver for LTC2978 and compatible chips.
*
* Copyright (c) 2011 Ericsson AB.
- * Copyright (c) 2013, 2014 Guenter Roeck
+ * Copyright (c) 2013, 2014, 2015 Guenter Roeck
+ * Copyright (c) 2015 Linear Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -16,6 +16,8 @@
* GNU General Public License for more details.
*/
+#include <linux/delay.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -25,49 +27,71 @@
#include <linux/regulator/driver.h>
#include "pmbus.h"
-enum chips { ltc2974, ltc2977, ltc2978, ltc3880, ltc3883, ltm4676 };
+enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882,
+ ltc3883, ltc3886, ltc3887, ltm2987, ltm4675, ltm4676 };
/* Common for all chips */
#define LTC2978_MFR_VOUT_PEAK 0xdd
#define LTC2978_MFR_VIN_PEAK 0xde
#define LTC2978_MFR_TEMPERATURE_PEAK 0xdf
-#define LTC2978_MFR_SPECIAL_ID 0xe7
+#define LTC2978_MFR_SPECIAL_ID 0xe7 /* Undocumented on LTC3882 */
+#define LTC2978_MFR_COMMON 0xef
-/* LTC2974, LCT2977, and LTC2978 */
+/* LTC2974, LTC2975, LCT2977, LTC2980, LTC2978, and LTM2987 */
#define LTC2978_MFR_VOUT_MIN 0xfb
#define LTC2978_MFR_VIN_MIN 0xfc
#define LTC2978_MFR_TEMPERATURE_MIN 0xfd
-/* LTC2974 only */
+/* LTC2974, LTC2975 */
#define LTC2974_MFR_IOUT_PEAK 0xd7
#define LTC2974_MFR_IOUT_MIN 0xd8
-/* LTC3880, LTC3883, and LTM4676 */
+/* LTC3880, LTC3882, LTC3883, LTC3887, LTM4675, and LTM4676 */
#define LTC3880_MFR_IOUT_PEAK 0xd7
#define LTC3880_MFR_CLEAR_PEAKS 0xe3
#define LTC3880_MFR_TEMPERATURE2_PEAK 0xf4
-/* LTC3883 only */
+/* LTC3883 and LTC3886 only */
#define LTC3883_MFR_IIN_PEAK 0xe1
-#define LTC2974_ID_REV1 0x0212
-#define LTC2974_ID_REV2 0x0213
+/* LTC2975 only */
+#define LTC2975_MFR_IIN_PEAK 0xc4
+#define LTC2975_MFR_IIN_MIN 0xc5
+#define LTC2975_MFR_PIN_PEAK 0xc6
+#define LTC2975_MFR_PIN_MIN 0xc7
+
+#define LTC2978_ID_MASK 0xfff0
+
+#define LTC2974_ID 0x0210
+#define LTC2975_ID 0x0220
#define LTC2977_ID 0x0130
-#define LTC2978_ID_REV1 0x0121
-#define LTC2978_ID_REV2 0x0122
-#define LTC2978A_ID 0x0124
-#define LTC3880_ID 0x4000
-#define LTC3880_ID_MASK 0xff00
+#define LTC2978_ID_REV1 0x0110 /* Early revision */
+#define LTC2978_ID_REV2 0x0120
+#define LTC2980_ID_A 0x8030 /* A/B for two die IDs */
+#define LTC2980_ID_B 0x8040
+#define LTC3880_ID 0x4020
+#define LTC3882_ID 0x4200
+#define LTC3882_ID_D1 0x4240 /* Dash 1 */
#define LTC3883_ID 0x4300
-#define LTC3883_ID_MASK 0xff00
-#define LTM4676_ID 0x4480 /* datasheet claims 0x440X */
-#define LTM4676_ID_MASK 0xfff0
+#define LTC3886_ID 0x4600
+#define LTC3887_ID 0x4700
+#define LTM2987_ID_A 0x8010 /* A/B for two die IDs */
+#define LTM2987_ID_B 0x8020
+#define LTM4675_ID 0x47a0
+#define LTM4676_ID_REV1 0x4400
+#define LTM4676_ID_REV2 0x4480
+#define LTM4676A_ID 0x47e0
#define LTC2974_NUM_PAGES 4
#define LTC2978_NUM_PAGES 8
#define LTC3880_NUM_PAGES 2
#define LTC3883_NUM_PAGES 1
+#define LTC_POLL_TIMEOUT 100 /* in milli-seconds */
+
+#define LTC_NOT_BUSY BIT(5)
+#define LTC_NOT_PENDING BIT(4)
+
/*
* LTC2978 clears peak data whenever the CLEAR_FAULTS command is executed, which
* happens pretty much each time chip data is updated. Raw peak data therefore
@@ -82,13 +106,91 @@ struct ltc2978_data {
u16 temp_min[LTC2974_NUM_PAGES], temp_max[LTC2974_NUM_PAGES];
u16 vout_min[LTC2978_NUM_PAGES], vout_max[LTC2978_NUM_PAGES];
u16 iout_min[LTC2974_NUM_PAGES], iout_max[LTC2974_NUM_PAGES];
- u16 iin_max;
+ u16 iin_min, iin_max;
+ u16 pin_min, pin_max;
u16 temp2_max;
struct pmbus_driver_info info;
+ u32 features;
};
-
#define to_ltc2978_data(x) container_of(x, struct ltc2978_data, info)
+#define FEAT_CLEAR_PEAKS BIT(0)
+#define FEAT_NEEDS_POLLING BIT(1)
+
+#define has_clear_peaks(d) ((d)->features & FEAT_CLEAR_PEAKS)
+#define needs_polling(d) ((d)->features & FEAT_NEEDS_POLLING)
+
+static int ltc_wait_ready(struct i2c_client *client)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(LTC_POLL_TIMEOUT);
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct ltc2978_data *data = to_ltc2978_data(info);
+ int status;
+ u8 mask;
+
+ if (!needs_polling(data))
+ return 0;
+
+ /*
+ * LTC3883 does not support LTC_NOT_PENDING, even though
+ * the datasheet claims that it does.
+ */
+ mask = LTC_NOT_BUSY;
+ if (data->id != ltc3883)
+ mask |= LTC_NOT_PENDING;
+
+ do {
+ status = pmbus_read_byte_data(client, 0, LTC2978_MFR_COMMON);
+ if (status == -EBADMSG || status == -ENXIO) {
+ /* PEC error or NACK: chip may be busy, try again */
+ usleep_range(50, 100);
+ continue;
+ }
+ if (status < 0)
+ return status;
+
+ if ((status & mask) == mask)
+ return 0;
+
+ usleep_range(50, 100);
+ } while (time_before(jiffies, timeout));
+
+ return -ETIMEDOUT;
+}
+
+static int ltc_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+
+ ret = ltc_wait_ready(client);
+ if (ret < 0)
+ return ret;
+
+ return pmbus_read_word_data(client, page, reg);
+}
+
+static int ltc_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+
+ ret = ltc_wait_ready(client);
+ if (ret < 0)
+ return ret;
+
+ return pmbus_read_byte_data(client, page, reg);
+}
+
+static int ltc_write_byte(struct i2c_client *client, int page, u8 byte)
+{
+ int ret;
+
+ ret = ltc_wait_ready(client);
+ if (ret < 0)
+ return ret;
+
+ return pmbus_write_byte(client, page, byte);
+}
+
static inline int lin11_to_val(int data)
{
s16 e = ((s16)data) >> 11;
@@ -102,6 +204,34 @@ static inline int lin11_to_val(int data)
return (e < 0 ? m >> -e : m << e);
}
+static int ltc_get_max(struct ltc2978_data *data, struct i2c_client *client,
+ int page, int reg, u16 *pmax)
+{
+ int ret;
+
+ ret = ltc_read_word_data(client, page, reg);
+ if (ret >= 0) {
+ if (lin11_to_val(ret) > lin11_to_val(*pmax))
+ *pmax = ret;
+ ret = *pmax;
+ }
+ return ret;
+}
+
+static int ltc_get_min(struct ltc2978_data *data, struct i2c_client *client,
+ int page, int reg, u16 *pmin)
+{
+ int ret;
+
+ ret = ltc_read_word_data(client, page, reg);
+ if (ret >= 0) {
+ if (lin11_to_val(ret) < lin11_to_val(*pmin))
+ *pmin = ret;
+ ret = *pmin;
+ }
+ return ret;
+}
+
static int ltc2978_read_word_data_common(struct i2c_client *client, int page,
int reg)
{
@@ -111,15 +241,11 @@ static int ltc2978_read_word_data_common(struct i2c_client *client, int page,
switch (reg) {
case PMBUS_VIRT_READ_VIN_MAX:
- ret = pmbus_read_word_data(client, page, LTC2978_MFR_VIN_PEAK);
- if (ret >= 0) {
- if (lin11_to_val(ret) > lin11_to_val(data->vin_max))
- data->vin_max = ret;
- ret = data->vin_max;
- }
+ ret = ltc_get_max(data, client, page, LTC2978_MFR_VIN_PEAK,
+ &data->vin_max);
break;
case PMBUS_VIRT_READ_VOUT_MAX:
- ret = pmbus_read_word_data(client, page, LTC2978_MFR_VOUT_PEAK);
+ ret = ltc_read_word_data(client, page, LTC2978_MFR_VOUT_PEAK);
if (ret >= 0) {
/*
* VOUT is 16 bit unsigned with fixed exponent,
@@ -131,14 +257,9 @@ static int ltc2978_read_word_data_common(struct i2c_client *client, int page,
}
break;
case PMBUS_VIRT_READ_TEMP_MAX:
- ret = pmbus_read_word_data(client, page,
- LTC2978_MFR_TEMPERATURE_PEAK);
- if (ret >= 0) {
- if (lin11_to_val(ret)
- > lin11_to_val(data->temp_max[page]))
- data->temp_max[page] = ret;
- ret = data->temp_max[page];
- }
+ ret = ltc_get_max(data, client, page,
+ LTC2978_MFR_TEMPERATURE_PEAK,
+ &data->temp_max[page]);
break;
case PMBUS_VIRT_RESET_VOUT_HISTORY:
case PMBUS_VIRT_RESET_VIN_HISTORY:
@@ -146,6 +267,9 @@ static int ltc2978_read_word_data_common(struct i2c_client *client, int page,
ret = 0;
break;
default:
+ ret = ltc_wait_ready(client);
+ if (ret < 0)
+ return ret;
ret = -ENODATA;
break;
}
@@ -160,15 +284,11 @@ static int ltc2978_read_word_data(struct i2c_client *client, int page, int reg)
switch (reg) {
case PMBUS_VIRT_READ_VIN_MIN:
- ret = pmbus_read_word_data(client, page, LTC2978_MFR_VIN_MIN);
- if (ret >= 0) {
- if (lin11_to_val(ret) < lin11_to_val(data->vin_min))
- data->vin_min = ret;
- ret = data->vin_min;
- }
+ ret = ltc_get_min(data, client, page, LTC2978_MFR_VIN_MIN,
+ &data->vin_min);
break;
case PMBUS_VIRT_READ_VOUT_MIN:
- ret = pmbus_read_word_data(client, page, LTC2978_MFR_VOUT_MIN);
+ ret = ltc_read_word_data(client, page, LTC2978_MFR_VOUT_MIN);
if (ret >= 0) {
/*
* VOUT_MIN is known to not be supported on some lots
@@ -184,14 +304,9 @@ static int ltc2978_read_word_data(struct i2c_client *client, int page, int reg)
}
break;
case PMBUS_VIRT_READ_TEMP_MIN:
- ret = pmbus_read_word_data(client, page,
- LTC2978_MFR_TEMPERATURE_MIN);
- if (ret >= 0) {
- if (lin11_to_val(ret)
- < lin11_to_val(data->temp_min[page]))
- data->temp_min[page] = ret;
- ret = data->temp_min[page];
- }
+ ret = ltc_get_min(data, client, page,
+ LTC2978_MFR_TEMPERATURE_MIN,
+ &data->temp_min[page]);
break;
case PMBUS_VIRT_READ_IOUT_MAX:
case PMBUS_VIRT_RESET_IOUT_HISTORY:
@@ -214,22 +329,12 @@ static int ltc2974_read_word_data(struct i2c_client *client, int page, int reg)
switch (reg) {
case PMBUS_VIRT_READ_IOUT_MAX:
- ret = pmbus_read_word_data(client, page, LTC2974_MFR_IOUT_PEAK);
- if (ret >= 0) {
- if (lin11_to_val(ret)
- > lin11_to_val(data->iout_max[page]))
- data->iout_max[page] = ret;
- ret = data->iout_max[page];
- }
+ ret = ltc_get_max(data, client, page, LTC2974_MFR_IOUT_PEAK,
+ &data->iout_max[page]);
break;
case PMBUS_VIRT_READ_IOUT_MIN:
- ret = pmbus_read_word_data(client, page, LTC2974_MFR_IOUT_MIN);
- if (ret >= 0) {
- if (lin11_to_val(ret)
- < lin11_to_val(data->iout_min[page]))
- data->iout_min[page] = ret;
- ret = data->iout_min[page];
- }
+ ret = ltc_get_min(data, client, page, LTC2974_MFR_IOUT_MIN,
+ &data->iout_min[page]);
break;
case PMBUS_VIRT_RESET_IOUT_HISTORY:
ret = 0;
@@ -241,6 +346,40 @@ static int ltc2974_read_word_data(struct i2c_client *client, int page, int reg)
return ret;
}
+static int ltc2975_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct ltc2978_data *data = to_ltc2978_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_IIN_MAX:
+ ret = ltc_get_max(data, client, page, LTC2975_MFR_IIN_PEAK,
+ &data->iin_max);
+ break;
+ case PMBUS_VIRT_READ_IIN_MIN:
+ ret = ltc_get_min(data, client, page, LTC2975_MFR_IIN_MIN,
+ &data->iin_min);
+ break;
+ case PMBUS_VIRT_READ_PIN_MAX:
+ ret = ltc_get_max(data, client, page, LTC2975_MFR_PIN_PEAK,
+ &data->pin_max);
+ break;
+ case PMBUS_VIRT_READ_PIN_MIN:
+ ret = ltc_get_min(data, client, page, LTC2975_MFR_PIN_MIN,
+ &data->pin_min);
+ break;
+ case PMBUS_VIRT_RESET_IIN_HISTORY:
+ case PMBUS_VIRT_RESET_PIN_HISTORY:
+ ret = 0;
+ break;
+ default:
+ ret = ltc2978_read_word_data(client, page, reg);
+ break;
+ }
+ return ret;
+}
+
static int ltc3880_read_word_data(struct i2c_client *client, int page, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
@@ -249,22 +388,13 @@ static int ltc3880_read_word_data(struct i2c_client *client, int page, int reg)
switch (reg) {
case PMBUS_VIRT_READ_IOUT_MAX:
- ret = pmbus_read_word_data(client, page, LTC3880_MFR_IOUT_PEAK);
- if (ret >= 0) {
- if (lin11_to_val(ret)
- > lin11_to_val(data->iout_max[page]))
- data->iout_max[page] = ret;
- ret = data->iout_max[page];
- }
+ ret = ltc_get_max(data, client, page, LTC3880_MFR_IOUT_PEAK,
+ &data->iout_max[page]);
break;
case PMBUS_VIRT_READ_TEMP2_MAX:
- ret = pmbus_read_word_data(client, page,
- LTC3880_MFR_TEMPERATURE2_PEAK);
- if (ret >= 0) {
- if (lin11_to_val(ret) > lin11_to_val(data->temp2_max))
- data->temp2_max = ret;
- ret = data->temp2_max;
- }
+ ret = ltc_get_max(data, client, page,
+ LTC3880_MFR_TEMPERATURE2_PEAK,
+ &data->temp2_max);
break;
case PMBUS_VIRT_READ_VIN_MIN:
case PMBUS_VIRT_READ_VOUT_MIN:
@@ -290,13 +420,8 @@ static int ltc3883_read_word_data(struct i2c_client *client, int page, int reg)
switch (reg) {
case PMBUS_VIRT_READ_IIN_MAX:
- ret = pmbus_read_word_data(client, page, LTC3883_MFR_IIN_PEAK);
- if (ret >= 0) {
- if (lin11_to_val(ret)
- > lin11_to_val(data->iin_max))
- data->iin_max = ret;
- ret = data->iin_max;
- }
+ ret = ltc_get_max(data, client, page, LTC3883_MFR_IIN_PEAK,
+ &data->iin_max);
break;
case PMBUS_VIRT_RESET_IIN_HISTORY:
ret = 0;
@@ -308,15 +433,15 @@ static int ltc3883_read_word_data(struct i2c_client *client, int page, int reg)
return ret;
}
-static int ltc2978_clear_peaks(struct i2c_client *client, int page,
- enum chips id)
+static int ltc2978_clear_peaks(struct ltc2978_data *data,
+ struct i2c_client *client, int page)
{
int ret;
- if (id == ltc3880 || id == ltc3883)
- ret = pmbus_write_byte(client, 0, LTC3880_MFR_CLEAR_PEAKS);
+ if (has_clear_peaks(data))
+ ret = ltc_write_byte(client, 0, LTC3880_MFR_CLEAR_PEAKS);
else
- ret = pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS);
+ ret = ltc_write_byte(client, page, PMBUS_CLEAR_FAULTS);
return ret;
}
@@ -331,33 +456,42 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
switch (reg) {
case PMBUS_VIRT_RESET_IIN_HISTORY:
data->iin_max = 0x7c00;
- ret = ltc2978_clear_peaks(client, page, data->id);
+ data->iin_min = 0x7bff;
+ ret = ltc2978_clear_peaks(data, client, 0);
+ break;
+ case PMBUS_VIRT_RESET_PIN_HISTORY:
+ data->pin_max = 0x7c00;
+ data->pin_min = 0x7bff;
+ ret = ltc2978_clear_peaks(data, client, 0);
break;
case PMBUS_VIRT_RESET_IOUT_HISTORY:
data->iout_max[page] = 0x7c00;
data->iout_min[page] = 0xfbff;
- ret = ltc2978_clear_peaks(client, page, data->id);
+ ret = ltc2978_clear_peaks(data, client, page);
break;
case PMBUS_VIRT_RESET_TEMP2_HISTORY:
data->temp2_max = 0x7c00;
- ret = ltc2978_clear_peaks(client, page, data->id);
+ ret = ltc2978_clear_peaks(data, client, page);
break;
case PMBUS_VIRT_RESET_VOUT_HISTORY:
data->vout_min[page] = 0xffff;
data->vout_max[page] = 0;
- ret = ltc2978_clear_peaks(client, page, data->id);
+ ret = ltc2978_clear_peaks(data, client, page);
break;
case PMBUS_VIRT_RESET_VIN_HISTORY:
data->vin_min = 0x7bff;
data->vin_max = 0x7c00;
- ret = ltc2978_clear_peaks(client, page, data->id);
+ ret = ltc2978_clear_peaks(data, client, page);
break;
case PMBUS_VIRT_RESET_TEMP_HISTORY:
data->temp_min[page] = 0x7bff;
data->temp_max[page] = 0x7c00;
- ret = ltc2978_clear_peaks(client, page, data->id);
+ ret = ltc2978_clear_peaks(data, client, page);
break;
default:
+ ret = ltc_wait_ready(client);
+ if (ret < 0)
+ return ret;
ret = -ENODATA;
break;
}
@@ -366,10 +500,17 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
static const struct i2c_device_id ltc2978_id[] = {
{"ltc2974", ltc2974},
+ {"ltc2975", ltc2975},
{"ltc2977", ltc2977},
{"ltc2978", ltc2978},
+ {"ltc2980", ltc2980},
{"ltc3880", ltc3880},
+ {"ltc3882", ltc3882},
{"ltc3883", ltc3883},
+ {"ltc3886", ltc3886},
+ {"ltc3887", ltc3887},
+ {"ltm2987", ltm2987},
+ {"ltm4675", ltm4675},
{"ltm4676", ltm4676},
{}
};
@@ -388,10 +529,74 @@ static const struct regulator_desc ltc2978_reg_desc[] = {
};
#endif /* CONFIG_SENSORS_LTC2978_REGULATOR */
+static int ltc2978_get_id(struct i2c_client *client)
+{
+ int chip_id;
+
+ chip_id = i2c_smbus_read_word_data(client, LTC2978_MFR_SPECIAL_ID);
+ if (chip_id < 0) {
+ const struct i2c_device_id *id;
+ u8 buf[I2C_SMBUS_BLOCK_MAX];
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA))
+ return -ENODEV;
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf);
+ if (ret < 0)
+ return ret;
+ if (ret < 3 || strncmp(buf, "LTC", 3))
+ return -ENODEV;
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf);
+ if (ret < 0)
+ return ret;
+ for (id = &ltc2978_id[0]; strlen(id->name); id++) {
+ if (!strncasecmp(id->name, buf, strlen(id->name)))
+ return (int)id->driver_data;
+ }
+ return -ENODEV;
+ }
+
+ chip_id &= LTC2978_ID_MASK;
+
+ if (chip_id == LTC2974_ID)
+ return ltc2974;
+ else if (chip_id == LTC2975_ID)
+ return ltc2975;
+ else if (chip_id == LTC2977_ID)
+ return ltc2977;
+ else if (chip_id == LTC2978_ID_REV1 || chip_id == LTC2978_ID_REV2)
+ return ltc2978;
+ else if (chip_id == LTC2980_ID_A || chip_id == LTC2980_ID_B)
+ return ltc2980;
+ else if (chip_id == LTC3880_ID)
+ return ltc3880;
+ else if (chip_id == LTC3882_ID || chip_id == LTC3882_ID_D1)
+ return ltc3882;
+ else if (chip_id == LTC3883_ID)
+ return ltc3883;
+ else if (chip_id == LTC3886_ID)
+ return ltc3886;
+ else if (chip_id == LTC3887_ID)
+ return ltc3887;
+ else if (chip_id == LTM2987_ID_A || chip_id == LTM2987_ID_B)
+ return ltm2987;
+ else if (chip_id == LTM4675_ID)
+ return ltm4675;
+ else if (chip_id == LTM4676_ID_REV1 || chip_id == LTM4676_ID_REV2 ||
+ chip_id == LTM4676A_ID)
+ return ltm4676;
+
+ dev_err(&client->dev, "Unsupported chip ID 0x%x\n", chip_id);
+ return -ENODEV;
+}
+
static int ltc2978_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- int chip_id, i;
+ int i, chip_id;
struct ltc2978_data *data;
struct pmbus_driver_info *info;
@@ -404,27 +609,11 @@ static int ltc2978_probe(struct i2c_client *client,
if (!data)
return -ENOMEM;
- chip_id = i2c_smbus_read_word_data(client, LTC2978_MFR_SPECIAL_ID);
+ chip_id = ltc2978_get_id(client);
if (chip_id < 0)
return chip_id;
- if (chip_id == LTC2974_ID_REV1 || chip_id == LTC2974_ID_REV2) {
- data->id = ltc2974;
- } else if (chip_id == LTC2977_ID) {
- data->id = ltc2977;
- } else if (chip_id == LTC2978_ID_REV1 || chip_id == LTC2978_ID_REV2 ||
- chip_id == LTC2978A_ID) {
- data->id = ltc2978;
- } else if ((chip_id & LTC3880_ID_MASK) == LTC3880_ID) {
- data->id = ltc3880;
- } else if ((chip_id & LTC3883_ID_MASK) == LTC3883_ID) {
- data->id = ltc3883;
- } else if ((chip_id & LTM4676_ID_MASK) == LTM4676_ID) {
- data->id = ltm4676;
- } else {
- dev_err(&client->dev, "Unsupported chip ID 0x%x\n", chip_id);
- return -ENODEV;
- }
+ data->id = chip_id;
if (data->id != id->driver_data)
dev_warn(&client->dev,
"Device mismatch: Configured %s, detected %s\n",
@@ -433,6 +622,9 @@ static int ltc2978_probe(struct i2c_client *client,
info = &data->info;
info->write_word_data = ltc2978_write_word_data;
+ info->write_byte = ltc_write_byte;
+ info->read_word_data = ltc_read_word_data;
+ info->read_byte_data = ltc_read_byte_data;
data->vin_min = 0x7bff;
data->vin_max = 0x7c00;
@@ -461,8 +653,23 @@ static int ltc2978_probe(struct i2c_client *client,
| PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
}
break;
+ case ltc2975:
+ info->read_word_data = ltc2975_read_word_data;
+ info->pages = LTC2974_NUM_PAGES;
+ info->func[0] = PMBUS_HAVE_IIN | PMBUS_HAVE_PIN
+ | PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_TEMP2;
+ for (i = 0; i < info->pages; i++) {
+ info->func[i] |= PMBUS_HAVE_VOUT
+ | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_POUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
+ }
+ break;
case ltc2977:
case ltc2978:
+ case ltc2980:
+ case ltm2987:
info->read_word_data = ltc2978_read_word_data;
info->pages = LTC2978_NUM_PAGES;
info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
@@ -474,7 +681,10 @@ static int ltc2978_probe(struct i2c_client *client,
}
break;
case ltc3880:
+ case ltc3887:
+ case ltm4675:
case ltm4676:
+ data->features |= FEAT_CLEAR_PEAKS | FEAT_NEEDS_POLLING;
info->read_word_data = ltc3880_read_word_data;
info->pages = LTC3880_NUM_PAGES;
info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN
@@ -488,7 +698,23 @@ static int ltc2978_probe(struct i2c_client *client,
| PMBUS_HAVE_POUT
| PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
break;
+ case ltc3882:
+ data->features |= FEAT_CLEAR_PEAKS | FEAT_NEEDS_POLLING;
+ info->read_word_data = ltc3880_read_word_data;
+ info->pages = LTC3880_NUM_PAGES;
+ info->func[0] = PMBUS_HAVE_VIN
+ | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP
+ | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP;
+ info->func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_POUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+ break;
case ltc3883:
+ data->features |= FEAT_CLEAR_PEAKS | FEAT_NEEDS_POLLING;
info->read_word_data = ltc3883_read_word_data;
info->pages = LTC3883_NUM_PAGES;
info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN
@@ -498,6 +724,21 @@ static int ltc2978_probe(struct i2c_client *client,
| PMBUS_HAVE_PIN | PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP
| PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP;
break;
+ case ltc3886:
+ data->features |= FEAT_CLEAR_PEAKS | FEAT_NEEDS_POLLING;
+ info->read_word_data = ltc3883_read_word_data;
+ info->pages = LTC3880_NUM_PAGES;
+ info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN
+ | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_PIN | PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP
+ | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP;
+ info->func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_POUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+ break;
default:
return -ENODEV;
}
@@ -517,10 +758,17 @@ static int ltc2978_probe(struct i2c_client *client,
#ifdef CONFIG_OF
static const struct of_device_id ltc2978_of_match[] = {
{ .compatible = "lltc,ltc2974" },
+ { .compatible = "lltc,ltc2975" },
{ .compatible = "lltc,ltc2977" },
{ .compatible = "lltc,ltc2978" },
+ { .compatible = "lltc,ltc2980" },
{ .compatible = "lltc,ltc3880" },
+ { .compatible = "lltc,ltc3882" },
{ .compatible = "lltc,ltc3883" },
+ { .compatible = "lltc,ltc3886" },
+ { .compatible = "lltc,ltc3887" },
+ { .compatible = "lltc,ltm2987" },
+ { .compatible = "lltc,ltm4675" },
{ .compatible = "lltc,ltm4676" },
{ }
};
@@ -540,5 +788,5 @@ static struct i2c_driver ltc2978_driver = {
module_i2c_driver(ltc2978_driver);
MODULE_AUTHOR("Guenter Roeck");
-MODULE_DESCRIPTION("PMBus driver for LTC2974, LTC2978, LTC3880, LTC3883, and LTM4676");
+MODULE_DESCRIPTION("PMBus driver for LTC2978 and comppatible chips");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/max20751.c b/drivers/hwmon/pmbus/max20751.c
new file mode 100644
index 000000000000..ab74aeae8cf2
--- /dev/null
+++ b/drivers/hwmon/pmbus/max20751.c
@@ -0,0 +1,64 @@
+/*
+ * Hardware monitoring driver for Maxim MAX20751
+ *
+ * Copyright (c) 2015 Guenter Roeck
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+static struct pmbus_driver_info max20751_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = vid,
+ .vrm_version = vr12,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_POUT,
+};
+
+static int max20751_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &max20751_info);
+}
+
+static const struct i2c_device_id max20751_id[] = {
+ {"max20751", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, max20751_id);
+
+static struct i2c_driver max20751_driver = {
+ .driver = {
+ .name = "max20751",
+ },
+ .probe = max20751_probe,
+ .remove = pmbus_do_remove,
+ .id_table = max20751_id,
+};
+
+module_i2c_driver(max20751_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX20751");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index 7e930c3ce1ab..74a1f6f68fb3 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -19,6 +19,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -38,10 +39,10 @@ enum chips { max34440, max34441, max34446, max34460, max34461 };
#define MAX34446_MFR_IOUT_AVG 0xe2
#define MAX34446_MFR_TEMPERATURE_AVG 0xe3
-#define MAX34440_STATUS_OC_WARN (1 << 0)
-#define MAX34440_STATUS_OC_FAULT (1 << 1)
-#define MAX34440_STATUS_OT_FAULT (1 << 5)
-#define MAX34440_STATUS_OT_WARN (1 << 6)
+#define MAX34440_STATUS_OC_WARN BIT(0)
+#define MAX34440_STATUS_OC_FAULT BIT(1)
+#define MAX34440_STATUS_OT_FAULT BIT(5)
+#define MAX34440_STATUS_OT_WARN BIT(6)
struct max34440_data {
int id;
diff --git a/drivers/hwmon/pmbus/max8688.c b/drivers/hwmon/pmbus/max8688.c
index f04454a42fdd..dd4883a19045 100644
--- a/drivers/hwmon/pmbus/max8688.c
+++ b/drivers/hwmon/pmbus/max8688.c
@@ -18,6 +18,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -30,15 +31,15 @@
#define MAX8688_MFR_TEMPERATURE_PEAK 0xd6
#define MAX8688_MFG_STATUS 0xd8
-#define MAX8688_STATUS_OC_FAULT (1 << 4)
-#define MAX8688_STATUS_OV_FAULT (1 << 5)
-#define MAX8688_STATUS_OV_WARNING (1 << 8)
-#define MAX8688_STATUS_UV_FAULT (1 << 9)
-#define MAX8688_STATUS_UV_WARNING (1 << 10)
-#define MAX8688_STATUS_UC_FAULT (1 << 11)
-#define MAX8688_STATUS_OC_WARNING (1 << 12)
-#define MAX8688_STATUS_OT_FAULT (1 << 13)
-#define MAX8688_STATUS_OT_WARNING (1 << 14)
+#define MAX8688_STATUS_OC_FAULT BIT(4)
+#define MAX8688_STATUS_OV_FAULT BIT(5)
+#define MAX8688_STATUS_OV_WARNING BIT(8)
+#define MAX8688_STATUS_UV_FAULT BIT(9)
+#define MAX8688_STATUS_UV_WARNING BIT(10)
+#define MAX8688_STATUS_UC_FAULT BIT(11)
+#define MAX8688_STATUS_OC_WARNING BIT(12)
+#define MAX8688_STATUS_OT_FAULT BIT(13)
+#define MAX8688_STATUS_OT_WARNING BIT(14)
static int max8688_read_word_data(struct i2c_client *client, int page, int reg)
{
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 554d0249dcde..0a74991a60f0 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -129,6 +129,7 @@ static int pmbus_identify(struct i2c_client *client,
break;
case 1:
info->format[PSC_VOLTAGE_OUT] = vid;
+ info->vrm_version = vr11;
break;
case 2:
info->format[PSC_VOLTAGE_OUT] = direct;
@@ -193,6 +194,10 @@ static const struct i2c_device_id pmbus_id[] = {
{"pdt012", 1},
{"pmbus", 0},
{"tps40400", 1},
+ {"tps544b20", 1},
+ {"tps544b25", 1},
+ {"tps544c20", 1},
+ {"tps544c25", 1},
{"udt020", 1},
{}
};
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 89a23ff836e7..bfcb13bae34b 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -19,114 +19,116 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/regulator/driver.h>
-
#ifndef PMBUS_H
#define PMBUS_H
+#include <linux/bitops.h>
+#include <linux/regulator/driver.h>
+
/*
* Registers
*/
-#define PMBUS_PAGE 0x00
-#define PMBUS_OPERATION 0x01
-#define PMBUS_ON_OFF_CONFIG 0x02
-#define PMBUS_CLEAR_FAULTS 0x03
-#define PMBUS_PHASE 0x04
-
-#define PMBUS_CAPABILITY 0x19
-#define PMBUS_QUERY 0x1A
-
-#define PMBUS_VOUT_MODE 0x20
-#define PMBUS_VOUT_COMMAND 0x21
-#define PMBUS_VOUT_TRIM 0x22
-#define PMBUS_VOUT_CAL_OFFSET 0x23
-#define PMBUS_VOUT_MAX 0x24
-#define PMBUS_VOUT_MARGIN_HIGH 0x25
-#define PMBUS_VOUT_MARGIN_LOW 0x26
-#define PMBUS_VOUT_TRANSITION_RATE 0x27
-#define PMBUS_VOUT_DROOP 0x28
-#define PMBUS_VOUT_SCALE_LOOP 0x29
-#define PMBUS_VOUT_SCALE_MONITOR 0x2A
-
-#define PMBUS_COEFFICIENTS 0x30
-#define PMBUS_POUT_MAX 0x31
-
-#define PMBUS_FAN_CONFIG_12 0x3A
-#define PMBUS_FAN_COMMAND_1 0x3B
-#define PMBUS_FAN_COMMAND_2 0x3C
-#define PMBUS_FAN_CONFIG_34 0x3D
-#define PMBUS_FAN_COMMAND_3 0x3E
-#define PMBUS_FAN_COMMAND_4 0x3F
-
-#define PMBUS_VOUT_OV_FAULT_LIMIT 0x40
-#define PMBUS_VOUT_OV_FAULT_RESPONSE 0x41
-#define PMBUS_VOUT_OV_WARN_LIMIT 0x42
-#define PMBUS_VOUT_UV_WARN_LIMIT 0x43
-#define PMBUS_VOUT_UV_FAULT_LIMIT 0x44
-#define PMBUS_VOUT_UV_FAULT_RESPONSE 0x45
-#define PMBUS_IOUT_OC_FAULT_LIMIT 0x46
-#define PMBUS_IOUT_OC_FAULT_RESPONSE 0x47
-#define PMBUS_IOUT_OC_LV_FAULT_LIMIT 0x48
-#define PMBUS_IOUT_OC_LV_FAULT_RESPONSE 0x49
-#define PMBUS_IOUT_OC_WARN_LIMIT 0x4A
-#define PMBUS_IOUT_UC_FAULT_LIMIT 0x4B
-#define PMBUS_IOUT_UC_FAULT_RESPONSE 0x4C
-
-#define PMBUS_OT_FAULT_LIMIT 0x4F
-#define PMBUS_OT_FAULT_RESPONSE 0x50
-#define PMBUS_OT_WARN_LIMIT 0x51
-#define PMBUS_UT_WARN_LIMIT 0x52
-#define PMBUS_UT_FAULT_LIMIT 0x53
-#define PMBUS_UT_FAULT_RESPONSE 0x54
-#define PMBUS_VIN_OV_FAULT_LIMIT 0x55
-#define PMBUS_VIN_OV_FAULT_RESPONSE 0x56
-#define PMBUS_VIN_OV_WARN_LIMIT 0x57
-#define PMBUS_VIN_UV_WARN_LIMIT 0x58
-#define PMBUS_VIN_UV_FAULT_LIMIT 0x59
-
-#define PMBUS_IIN_OC_FAULT_LIMIT 0x5B
-#define PMBUS_IIN_OC_WARN_LIMIT 0x5D
-
-#define PMBUS_POUT_OP_FAULT_LIMIT 0x68
-#define PMBUS_POUT_OP_WARN_LIMIT 0x6A
-#define PMBUS_PIN_OP_WARN_LIMIT 0x6B
-
-#define PMBUS_STATUS_BYTE 0x78
-#define PMBUS_STATUS_WORD 0x79
-#define PMBUS_STATUS_VOUT 0x7A
-#define PMBUS_STATUS_IOUT 0x7B
-#define PMBUS_STATUS_INPUT 0x7C
-#define PMBUS_STATUS_TEMPERATURE 0x7D
-#define PMBUS_STATUS_CML 0x7E
-#define PMBUS_STATUS_OTHER 0x7F
-#define PMBUS_STATUS_MFR_SPECIFIC 0x80
-#define PMBUS_STATUS_FAN_12 0x81
-#define PMBUS_STATUS_FAN_34 0x82
-
-#define PMBUS_READ_VIN 0x88
-#define PMBUS_READ_IIN 0x89
-#define PMBUS_READ_VCAP 0x8A
-#define PMBUS_READ_VOUT 0x8B
-#define PMBUS_READ_IOUT 0x8C
-#define PMBUS_READ_TEMPERATURE_1 0x8D
-#define PMBUS_READ_TEMPERATURE_2 0x8E
-#define PMBUS_READ_TEMPERATURE_3 0x8F
-#define PMBUS_READ_FAN_SPEED_1 0x90
-#define PMBUS_READ_FAN_SPEED_2 0x91
-#define PMBUS_READ_FAN_SPEED_3 0x92
-#define PMBUS_READ_FAN_SPEED_4 0x93
-#define PMBUS_READ_DUTY_CYCLE 0x94
-#define PMBUS_READ_FREQUENCY 0x95
-#define PMBUS_READ_POUT 0x96
-#define PMBUS_READ_PIN 0x97
-
-#define PMBUS_REVISION 0x98
-#define PMBUS_MFR_ID 0x99
-#define PMBUS_MFR_MODEL 0x9A
-#define PMBUS_MFR_REVISION 0x9B
-#define PMBUS_MFR_LOCATION 0x9C
-#define PMBUS_MFR_DATE 0x9D
-#define PMBUS_MFR_SERIAL 0x9E
+enum pmbus_regs {
+ PMBUS_PAGE = 0x00,
+ PMBUS_OPERATION = 0x01,
+ PMBUS_ON_OFF_CONFIG = 0x02,
+ PMBUS_CLEAR_FAULTS = 0x03,
+ PMBUS_PHASE = 0x04,
+
+ PMBUS_CAPABILITY = 0x19,
+ PMBUS_QUERY = 0x1A,
+
+ PMBUS_VOUT_MODE = 0x20,
+ PMBUS_VOUT_COMMAND = 0x21,
+ PMBUS_VOUT_TRIM = 0x22,
+ PMBUS_VOUT_CAL_OFFSET = 0x23,
+ PMBUS_VOUT_MAX = 0x24,
+ PMBUS_VOUT_MARGIN_HIGH = 0x25,
+ PMBUS_VOUT_MARGIN_LOW = 0x26,
+ PMBUS_VOUT_TRANSITION_RATE = 0x27,
+ PMBUS_VOUT_DROOP = 0x28,
+ PMBUS_VOUT_SCALE_LOOP = 0x29,
+ PMBUS_VOUT_SCALE_MONITOR = 0x2A,
+
+ PMBUS_COEFFICIENTS = 0x30,
+ PMBUS_POUT_MAX = 0x31,
+
+ PMBUS_FAN_CONFIG_12 = 0x3A,
+ PMBUS_FAN_COMMAND_1 = 0x3B,
+ PMBUS_FAN_COMMAND_2 = 0x3C,
+ PMBUS_FAN_CONFIG_34 = 0x3D,
+ PMBUS_FAN_COMMAND_3 = 0x3E,
+ PMBUS_FAN_COMMAND_4 = 0x3F,
+
+ PMBUS_VOUT_OV_FAULT_LIMIT = 0x40,
+ PMBUS_VOUT_OV_FAULT_RESPONSE = 0x41,
+ PMBUS_VOUT_OV_WARN_LIMIT = 0x42,
+ PMBUS_VOUT_UV_WARN_LIMIT = 0x43,
+ PMBUS_VOUT_UV_FAULT_LIMIT = 0x44,
+ PMBUS_VOUT_UV_FAULT_RESPONSE = 0x45,
+ PMBUS_IOUT_OC_FAULT_LIMIT = 0x46,
+ PMBUS_IOUT_OC_FAULT_RESPONSE = 0x47,
+ PMBUS_IOUT_OC_LV_FAULT_LIMIT = 0x48,
+ PMBUS_IOUT_OC_LV_FAULT_RESPONSE = 0x49,
+ PMBUS_IOUT_OC_WARN_LIMIT = 0x4A,
+ PMBUS_IOUT_UC_FAULT_LIMIT = 0x4B,
+ PMBUS_IOUT_UC_FAULT_RESPONSE = 0x4C,
+
+ PMBUS_OT_FAULT_LIMIT = 0x4F,
+ PMBUS_OT_FAULT_RESPONSE = 0x50,
+ PMBUS_OT_WARN_LIMIT = 0x51,
+ PMBUS_UT_WARN_LIMIT = 0x52,
+ PMBUS_UT_FAULT_LIMIT = 0x53,
+ PMBUS_UT_FAULT_RESPONSE = 0x54,
+ PMBUS_VIN_OV_FAULT_LIMIT = 0x55,
+ PMBUS_VIN_OV_FAULT_RESPONSE = 0x56,
+ PMBUS_VIN_OV_WARN_LIMIT = 0x57,
+ PMBUS_VIN_UV_WARN_LIMIT = 0x58,
+ PMBUS_VIN_UV_FAULT_LIMIT = 0x59,
+
+ PMBUS_IIN_OC_FAULT_LIMIT = 0x5B,
+ PMBUS_IIN_OC_WARN_LIMIT = 0x5D,
+
+ PMBUS_POUT_OP_FAULT_LIMIT = 0x68,
+ PMBUS_POUT_OP_WARN_LIMIT = 0x6A,
+ PMBUS_PIN_OP_WARN_LIMIT = 0x6B,
+
+ PMBUS_STATUS_BYTE = 0x78,
+ PMBUS_STATUS_WORD = 0x79,
+ PMBUS_STATUS_VOUT = 0x7A,
+ PMBUS_STATUS_IOUT = 0x7B,
+ PMBUS_STATUS_INPUT = 0x7C,
+ PMBUS_STATUS_TEMPERATURE = 0x7D,
+ PMBUS_STATUS_CML = 0x7E,
+ PMBUS_STATUS_OTHER = 0x7F,
+ PMBUS_STATUS_MFR_SPECIFIC = 0x80,
+ PMBUS_STATUS_FAN_12 = 0x81,
+ PMBUS_STATUS_FAN_34 = 0x82,
+
+ PMBUS_READ_VIN = 0x88,
+ PMBUS_READ_IIN = 0x89,
+ PMBUS_READ_VCAP = 0x8A,
+ PMBUS_READ_VOUT = 0x8B,
+ PMBUS_READ_IOUT = 0x8C,
+ PMBUS_READ_TEMPERATURE_1 = 0x8D,
+ PMBUS_READ_TEMPERATURE_2 = 0x8E,
+ PMBUS_READ_TEMPERATURE_3 = 0x8F,
+ PMBUS_READ_FAN_SPEED_1 = 0x90,
+ PMBUS_READ_FAN_SPEED_2 = 0x91,
+ PMBUS_READ_FAN_SPEED_3 = 0x92,
+ PMBUS_READ_FAN_SPEED_4 = 0x93,
+ PMBUS_READ_DUTY_CYCLE = 0x94,
+ PMBUS_READ_FREQUENCY = 0x95,
+ PMBUS_READ_POUT = 0x96,
+ PMBUS_READ_PIN = 0x97,
+
+ PMBUS_REVISION = 0x98,
+ PMBUS_MFR_ID = 0x99,
+ PMBUS_MFR_MODEL = 0x9A,
+ PMBUS_MFR_REVISION = 0x9B,
+ PMBUS_MFR_LOCATION = 0x9C,
+ PMBUS_MFR_DATE = 0x9D,
+ PMBUS_MFR_SERIAL = 0x9E,
/*
* Virtual registers.
@@ -148,55 +150,58 @@
* the calling PMBus core code will abort if the chip driver returns an error
* code when reading or writing virtual registers.
*/
-#define PMBUS_VIRT_BASE 0x100
-#define PMBUS_VIRT_READ_TEMP_AVG (PMBUS_VIRT_BASE + 0)
-#define PMBUS_VIRT_READ_TEMP_MIN (PMBUS_VIRT_BASE + 1)
-#define PMBUS_VIRT_READ_TEMP_MAX (PMBUS_VIRT_BASE + 2)
-#define PMBUS_VIRT_RESET_TEMP_HISTORY (PMBUS_VIRT_BASE + 3)
-#define PMBUS_VIRT_READ_VIN_AVG (PMBUS_VIRT_BASE + 4)
-#define PMBUS_VIRT_READ_VIN_MIN (PMBUS_VIRT_BASE + 5)
-#define PMBUS_VIRT_READ_VIN_MAX (PMBUS_VIRT_BASE + 6)
-#define PMBUS_VIRT_RESET_VIN_HISTORY (PMBUS_VIRT_BASE + 7)
-#define PMBUS_VIRT_READ_IIN_AVG (PMBUS_VIRT_BASE + 8)
-#define PMBUS_VIRT_READ_IIN_MIN (PMBUS_VIRT_BASE + 9)
-#define PMBUS_VIRT_READ_IIN_MAX (PMBUS_VIRT_BASE + 10)
-#define PMBUS_VIRT_RESET_IIN_HISTORY (PMBUS_VIRT_BASE + 11)
-#define PMBUS_VIRT_READ_PIN_AVG (PMBUS_VIRT_BASE + 12)
-#define PMBUS_VIRT_READ_PIN_MAX (PMBUS_VIRT_BASE + 13)
-#define PMBUS_VIRT_RESET_PIN_HISTORY (PMBUS_VIRT_BASE + 14)
-#define PMBUS_VIRT_READ_POUT_AVG (PMBUS_VIRT_BASE + 15)
-#define PMBUS_VIRT_READ_POUT_MAX (PMBUS_VIRT_BASE + 16)
-#define PMBUS_VIRT_RESET_POUT_HISTORY (PMBUS_VIRT_BASE + 17)
-#define PMBUS_VIRT_READ_VOUT_AVG (PMBUS_VIRT_BASE + 18)
-#define PMBUS_VIRT_READ_VOUT_MIN (PMBUS_VIRT_BASE + 19)
-#define PMBUS_VIRT_READ_VOUT_MAX (PMBUS_VIRT_BASE + 20)
-#define PMBUS_VIRT_RESET_VOUT_HISTORY (PMBUS_VIRT_BASE + 21)
-#define PMBUS_VIRT_READ_IOUT_AVG (PMBUS_VIRT_BASE + 22)
-#define PMBUS_VIRT_READ_IOUT_MIN (PMBUS_VIRT_BASE + 23)
-#define PMBUS_VIRT_READ_IOUT_MAX (PMBUS_VIRT_BASE + 24)
-#define PMBUS_VIRT_RESET_IOUT_HISTORY (PMBUS_VIRT_BASE + 25)
-#define PMBUS_VIRT_READ_TEMP2_AVG (PMBUS_VIRT_BASE + 26)
-#define PMBUS_VIRT_READ_TEMP2_MIN (PMBUS_VIRT_BASE + 27)
-#define PMBUS_VIRT_READ_TEMP2_MAX (PMBUS_VIRT_BASE + 28)
-#define PMBUS_VIRT_RESET_TEMP2_HISTORY (PMBUS_VIRT_BASE + 29)
-
-#define PMBUS_VIRT_READ_VMON (PMBUS_VIRT_BASE + 30)
-#define PMBUS_VIRT_VMON_UV_WARN_LIMIT (PMBUS_VIRT_BASE + 31)
-#define PMBUS_VIRT_VMON_OV_WARN_LIMIT (PMBUS_VIRT_BASE + 32)
-#define PMBUS_VIRT_VMON_UV_FAULT_LIMIT (PMBUS_VIRT_BASE + 33)
-#define PMBUS_VIRT_VMON_OV_FAULT_LIMIT (PMBUS_VIRT_BASE + 34)
-#define PMBUS_VIRT_STATUS_VMON (PMBUS_VIRT_BASE + 35)
+ PMBUS_VIRT_BASE = 0x100,
+ PMBUS_VIRT_READ_TEMP_AVG,
+ PMBUS_VIRT_READ_TEMP_MIN,
+ PMBUS_VIRT_READ_TEMP_MAX,
+ PMBUS_VIRT_RESET_TEMP_HISTORY,
+ PMBUS_VIRT_READ_VIN_AVG,
+ PMBUS_VIRT_READ_VIN_MIN,
+ PMBUS_VIRT_READ_VIN_MAX,
+ PMBUS_VIRT_RESET_VIN_HISTORY,
+ PMBUS_VIRT_READ_IIN_AVG,
+ PMBUS_VIRT_READ_IIN_MIN,
+ PMBUS_VIRT_READ_IIN_MAX,
+ PMBUS_VIRT_RESET_IIN_HISTORY,
+ PMBUS_VIRT_READ_PIN_AVG,
+ PMBUS_VIRT_READ_PIN_MIN,
+ PMBUS_VIRT_READ_PIN_MAX,
+ PMBUS_VIRT_RESET_PIN_HISTORY,
+ PMBUS_VIRT_READ_POUT_AVG,
+ PMBUS_VIRT_READ_POUT_MIN,
+ PMBUS_VIRT_READ_POUT_MAX,
+ PMBUS_VIRT_RESET_POUT_HISTORY,
+ PMBUS_VIRT_READ_VOUT_AVG,
+ PMBUS_VIRT_READ_VOUT_MIN,
+ PMBUS_VIRT_READ_VOUT_MAX,
+ PMBUS_VIRT_RESET_VOUT_HISTORY,
+ PMBUS_VIRT_READ_IOUT_AVG,
+ PMBUS_VIRT_READ_IOUT_MIN,
+ PMBUS_VIRT_READ_IOUT_MAX,
+ PMBUS_VIRT_RESET_IOUT_HISTORY,
+ PMBUS_VIRT_READ_TEMP2_AVG,
+ PMBUS_VIRT_READ_TEMP2_MIN,
+ PMBUS_VIRT_READ_TEMP2_MAX,
+ PMBUS_VIRT_RESET_TEMP2_HISTORY,
+
+ PMBUS_VIRT_READ_VMON,
+ PMBUS_VIRT_VMON_UV_WARN_LIMIT,
+ PMBUS_VIRT_VMON_OV_WARN_LIMIT,
+ PMBUS_VIRT_VMON_UV_FAULT_LIMIT,
+ PMBUS_VIRT_VMON_OV_FAULT_LIMIT,
+ PMBUS_VIRT_STATUS_VMON,
+};
/*
* OPERATION
*/
-#define PB_OPERATION_CONTROL_ON (1<<7)
+#define PB_OPERATION_CONTROL_ON BIT(7)
/*
* CAPABILITY
*/
-#define PB_CAPABILITY_SMBALERT (1<<4)
-#define PB_CAPABILITY_ERROR_CHECK (1<<7)
+#define PB_CAPABILITY_SMBALERT BIT(4)
+#define PB_CAPABILITY_ERROR_CHECK BIT(7)
/*
* VOUT_MODE
@@ -211,94 +216,94 @@
/*
* Fan configuration
*/
-#define PB_FAN_2_PULSE_MASK ((1 << 0) | (1 << 1))
-#define PB_FAN_2_RPM (1 << 2)
-#define PB_FAN_2_INSTALLED (1 << 3)
-#define PB_FAN_1_PULSE_MASK ((1 << 4) | (1 << 5))
-#define PB_FAN_1_RPM (1 << 6)
-#define PB_FAN_1_INSTALLED (1 << 7)
+#define PB_FAN_2_PULSE_MASK (BIT(0) | BIT(1))
+#define PB_FAN_2_RPM BIT(2)
+#define PB_FAN_2_INSTALLED BIT(3)
+#define PB_FAN_1_PULSE_MASK (BIT(4) | BIT(5))
+#define PB_FAN_1_RPM BIT(6)
+#define PB_FAN_1_INSTALLED BIT(7)
/*
* STATUS_BYTE, STATUS_WORD (lower)
*/
-#define PB_STATUS_NONE_ABOVE (1<<0)
-#define PB_STATUS_CML (1<<1)
-#define PB_STATUS_TEMPERATURE (1<<2)
-#define PB_STATUS_VIN_UV (1<<3)
-#define PB_STATUS_IOUT_OC (1<<4)
-#define PB_STATUS_VOUT_OV (1<<5)
-#define PB_STATUS_OFF (1<<6)
-#define PB_STATUS_BUSY (1<<7)
+#define PB_STATUS_NONE_ABOVE BIT(0)
+#define PB_STATUS_CML BIT(1)
+#define PB_STATUS_TEMPERATURE BIT(2)
+#define PB_STATUS_VIN_UV BIT(3)
+#define PB_STATUS_IOUT_OC BIT(4)
+#define PB_STATUS_VOUT_OV BIT(5)
+#define PB_STATUS_OFF BIT(6)
+#define PB_STATUS_BUSY BIT(7)
/*
* STATUS_WORD (upper)
*/
-#define PB_STATUS_UNKNOWN (1<<8)
-#define PB_STATUS_OTHER (1<<9)
-#define PB_STATUS_FANS (1<<10)
-#define PB_STATUS_POWER_GOOD_N (1<<11)
-#define PB_STATUS_WORD_MFR (1<<12)
-#define PB_STATUS_INPUT (1<<13)
-#define PB_STATUS_IOUT_POUT (1<<14)
-#define PB_STATUS_VOUT (1<<15)
+#define PB_STATUS_UNKNOWN BIT(8)
+#define PB_STATUS_OTHER BIT(9)
+#define PB_STATUS_FANS BIT(10)
+#define PB_STATUS_POWER_GOOD_N BIT(11)
+#define PB_STATUS_WORD_MFR BIT(12)
+#define PB_STATUS_INPUT BIT(13)
+#define PB_STATUS_IOUT_POUT BIT(14)
+#define PB_STATUS_VOUT BIT(15)
/*
* STATUS_IOUT
*/
-#define PB_POUT_OP_WARNING (1<<0)
-#define PB_POUT_OP_FAULT (1<<1)
-#define PB_POWER_LIMITING (1<<2)
-#define PB_CURRENT_SHARE_FAULT (1<<3)
-#define PB_IOUT_UC_FAULT (1<<4)
-#define PB_IOUT_OC_WARNING (1<<5)
-#define PB_IOUT_OC_LV_FAULT (1<<6)
-#define PB_IOUT_OC_FAULT (1<<7)
+#define PB_POUT_OP_WARNING BIT(0)
+#define PB_POUT_OP_FAULT BIT(1)
+#define PB_POWER_LIMITING BIT(2)
+#define PB_CURRENT_SHARE_FAULT BIT(3)
+#define PB_IOUT_UC_FAULT BIT(4)
+#define PB_IOUT_OC_WARNING BIT(5)
+#define PB_IOUT_OC_LV_FAULT BIT(6)
+#define PB_IOUT_OC_FAULT BIT(7)
/*
* STATUS_VOUT, STATUS_INPUT
*/
-#define PB_VOLTAGE_UV_FAULT (1<<4)
-#define PB_VOLTAGE_UV_WARNING (1<<5)
-#define PB_VOLTAGE_OV_WARNING (1<<6)
-#define PB_VOLTAGE_OV_FAULT (1<<7)
+#define PB_VOLTAGE_UV_FAULT BIT(4)
+#define PB_VOLTAGE_UV_WARNING BIT(5)
+#define PB_VOLTAGE_OV_WARNING BIT(6)
+#define PB_VOLTAGE_OV_FAULT BIT(7)
/*
* STATUS_INPUT
*/
-#define PB_PIN_OP_WARNING (1<<0)
-#define PB_IIN_OC_WARNING (1<<1)
-#define PB_IIN_OC_FAULT (1<<2)
+#define PB_PIN_OP_WARNING BIT(0)
+#define PB_IIN_OC_WARNING BIT(1)
+#define PB_IIN_OC_FAULT BIT(2)
/*
* STATUS_TEMPERATURE
*/
-#define PB_TEMP_UT_FAULT (1<<4)
-#define PB_TEMP_UT_WARNING (1<<5)
-#define PB_TEMP_OT_WARNING (1<<6)
-#define PB_TEMP_OT_FAULT (1<<7)
+#define PB_TEMP_UT_FAULT BIT(4)
+#define PB_TEMP_UT_WARNING BIT(5)
+#define PB_TEMP_OT_WARNING BIT(6)
+#define PB_TEMP_OT_FAULT BIT(7)
/*
* STATUS_FAN
*/
-#define PB_FAN_AIRFLOW_WARNING (1<<0)
-#define PB_FAN_AIRFLOW_FAULT (1<<1)
-#define PB_FAN_FAN2_SPEED_OVERRIDE (1<<2)
-#define PB_FAN_FAN1_SPEED_OVERRIDE (1<<3)
-#define PB_FAN_FAN2_WARNING (1<<4)
-#define PB_FAN_FAN1_WARNING (1<<5)
-#define PB_FAN_FAN2_FAULT (1<<6)
-#define PB_FAN_FAN1_FAULT (1<<7)
+#define PB_FAN_AIRFLOW_WARNING BIT(0)
+#define PB_FAN_AIRFLOW_FAULT BIT(1)
+#define PB_FAN_FAN2_SPEED_OVERRIDE BIT(2)
+#define PB_FAN_FAN1_SPEED_OVERRIDE BIT(3)
+#define PB_FAN_FAN2_WARNING BIT(4)
+#define PB_FAN_FAN1_WARNING BIT(5)
+#define PB_FAN_FAN2_FAULT BIT(6)
+#define PB_FAN_FAN1_FAULT BIT(7)
/*
* CML_FAULT_STATUS
*/
-#define PB_CML_FAULT_OTHER_MEM_LOGIC (1<<0)
-#define PB_CML_FAULT_OTHER_COMM (1<<1)
-#define PB_CML_FAULT_PROCESSOR (1<<3)
-#define PB_CML_FAULT_MEMORY (1<<4)
-#define PB_CML_FAULT_PACKET_ERROR (1<<5)
-#define PB_CML_FAULT_INVALID_DATA (1<<6)
-#define PB_CML_FAULT_INVALID_COMMAND (1<<7)
+#define PB_CML_FAULT_OTHER_MEM_LOGIC BIT(0)
+#define PB_CML_FAULT_OTHER_COMM BIT(1)
+#define PB_CML_FAULT_PROCESSOR BIT(3)
+#define PB_CML_FAULT_MEMORY BIT(4)
+#define PB_CML_FAULT_PACKET_ERROR BIT(5)
+#define PB_CML_FAULT_INVALID_DATA BIT(6)
+#define PB_CML_FAULT_INVALID_COMMAND BIT(7)
enum pmbus_sensor_classes {
PSC_VOLTAGE_IN = 0,
@@ -314,32 +319,34 @@ enum pmbus_sensor_classes {
#define PMBUS_PAGES 32 /* Per PMBus specification */
/* Functionality bit mask */
-#define PMBUS_HAVE_VIN (1 << 0)
-#define PMBUS_HAVE_VCAP (1 << 1)
-#define PMBUS_HAVE_VOUT (1 << 2)
-#define PMBUS_HAVE_IIN (1 << 3)
-#define PMBUS_HAVE_IOUT (1 << 4)
-#define PMBUS_HAVE_PIN (1 << 5)
-#define PMBUS_HAVE_POUT (1 << 6)
-#define PMBUS_HAVE_FAN12 (1 << 7)
-#define PMBUS_HAVE_FAN34 (1 << 8)
-#define PMBUS_HAVE_TEMP (1 << 9)
-#define PMBUS_HAVE_TEMP2 (1 << 10)
-#define PMBUS_HAVE_TEMP3 (1 << 11)
-#define PMBUS_HAVE_STATUS_VOUT (1 << 12)
-#define PMBUS_HAVE_STATUS_IOUT (1 << 13)
-#define PMBUS_HAVE_STATUS_INPUT (1 << 14)
-#define PMBUS_HAVE_STATUS_TEMP (1 << 15)
-#define PMBUS_HAVE_STATUS_FAN12 (1 << 16)
-#define PMBUS_HAVE_STATUS_FAN34 (1 << 17)
-#define PMBUS_HAVE_VMON (1 << 18)
-#define PMBUS_HAVE_STATUS_VMON (1 << 19)
+#define PMBUS_HAVE_VIN BIT(0)
+#define PMBUS_HAVE_VCAP BIT(1)
+#define PMBUS_HAVE_VOUT BIT(2)
+#define PMBUS_HAVE_IIN BIT(3)
+#define PMBUS_HAVE_IOUT BIT(4)
+#define PMBUS_HAVE_PIN BIT(5)
+#define PMBUS_HAVE_POUT BIT(6)
+#define PMBUS_HAVE_FAN12 BIT(7)
+#define PMBUS_HAVE_FAN34 BIT(8)
+#define PMBUS_HAVE_TEMP BIT(9)
+#define PMBUS_HAVE_TEMP2 BIT(10)
+#define PMBUS_HAVE_TEMP3 BIT(11)
+#define PMBUS_HAVE_STATUS_VOUT BIT(12)
+#define PMBUS_HAVE_STATUS_IOUT BIT(13)
+#define PMBUS_HAVE_STATUS_INPUT BIT(14)
+#define PMBUS_HAVE_STATUS_TEMP BIT(15)
+#define PMBUS_HAVE_STATUS_FAN12 BIT(16)
+#define PMBUS_HAVE_STATUS_FAN34 BIT(17)
+#define PMBUS_HAVE_VMON BIT(18)
+#define PMBUS_HAVE_STATUS_VMON BIT(19)
enum pmbus_data_format { linear = 0, direct, vid };
+enum vrm_version { vr11 = 0, vr12 };
struct pmbus_driver_info {
int pages; /* Total number of pages */
enum pmbus_data_format format[PSC_NUM_CLASSES];
+ enum vrm_version vrm_version;
/*
* Support one set of coefficients for each sensor type
* Used for chips providing data in direct mode.
@@ -380,7 +387,7 @@ struct pmbus_driver_info {
/* Regulator ops */
-extern struct regulator_ops pmbus_regulator_ops;
+extern const struct regulator_ops pmbus_regulator_ops;
/* Macro for filling in array of struct regulator_desc */
#define PMBUS_REGULATOR(_name, _id) \
@@ -390,6 +397,7 @@ extern struct regulator_ops pmbus_regulator_ops;
.of_match = of_match_ptr(_name # _id), \
.regulators_node = of_match_ptr("regulators"), \
.ops = &pmbus_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index f2e47c7dd808..ba59eaef2e07 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -515,16 +515,24 @@ static long pmbus_reg2data_direct(struct pmbus_data *data,
/*
* Convert VID sensor values to milli- or micro-units
* depending on sensor type.
- * We currently only support VR11.
*/
static long pmbus_reg2data_vid(struct pmbus_data *data,
struct pmbus_sensor *sensor)
{
long val = sensor->data;
+ long rv = 0;
- if (val < 0x02 || val > 0xb2)
- return 0;
- return DIV_ROUND_CLOSEST(160000 - (val - 2) * 625, 100);
+ switch (data->info->vrm_version) {
+ case vr11:
+ if (val >= 0x02 && val <= 0xb2)
+ rv = DIV_ROUND_CLOSEST(160000 - (val - 2) * 625, 100);
+ break;
+ case vr12:
+ if (val >= 0x01)
+ rv = 250 + (val - 1) * 5;
+ break;
+ }
+ return rv;
}
static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
@@ -1329,6 +1337,10 @@ static const struct pmbus_limit_attr pin_limit_attrs[] = {
.update = true,
.attr = "average",
}, {
+ .reg = PMBUS_VIRT_READ_PIN_MIN,
+ .update = true,
+ .attr = "input_lowest",
+ }, {
.reg = PMBUS_VIRT_READ_PIN_MAX,
.update = true,
.attr = "input_highest",
@@ -1359,6 +1371,10 @@ static const struct pmbus_limit_attr pout_limit_attrs[] = {
.update = true,
.attr = "average",
}, {
+ .reg = PMBUS_VIRT_READ_POUT_MIN,
+ .update = true,
+ .attr = "input_lowest",
+ }, {
.reg = PMBUS_VIRT_READ_POUT_MAX,
.update = true,
.attr = "input_highest",
@@ -1735,6 +1751,11 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
}
}
+ /* Enable PEC if the controller supports it */
+ ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
+ if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK))
+ client->flags |= I2C_CLIENT_PEC;
+
pmbus_clear_faults(client);
if (info->identify) {
@@ -1796,7 +1817,7 @@ static int pmbus_regulator_disable(struct regulator_dev *rdev)
return _pmbus_regulator_on_off(rdev, 0);
}
-struct regulator_ops pmbus_regulator_ops = {
+const struct regulator_ops pmbus_regulator_ops = {
.enable = pmbus_regulator_enable,
.disable = pmbus_regulator_disable,
.is_enabled = pmbus_regulator_is_enabled,
diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c
index 819644121259..771802d7e20d 100644
--- a/drivers/hwmon/pmbus/zl6100.c
+++ b/drivers/hwmon/pmbus/zl6100.c
@@ -19,6 +19,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -44,16 +45,16 @@ struct zl6100_data {
#define ZL6100_MFR_CONFIG 0xd0
#define ZL6100_DEVICE_ID 0xe4
-#define ZL6100_MFR_XTEMP_ENABLE (1 << 7)
+#define ZL6100_MFR_XTEMP_ENABLE BIT(7)
#define MFR_VMON_OV_FAULT_LIMIT 0xf5
#define MFR_VMON_UV_FAULT_LIMIT 0xf6
#define MFR_READ_VMON 0xf7
-#define VMON_UV_WARNING (1 << 5)
-#define VMON_OV_WARNING (1 << 4)
-#define VMON_UV_FAULT (1 << 1)
-#define VMON_OV_FAULT (1 << 0)
+#define VMON_UV_WARNING BIT(5)
+#define VMON_OV_WARNING BIT(4)
+#define VMON_UV_FAULT BIT(1)
+#define VMON_OV_FAULT BIT(0)
#define ZL6100_WAIT_TIME 1000 /* uS */
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 497a7f822a12..a2fdbb7d20ed 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -33,6 +33,7 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/atomic.h>
+#include <linux/bitrev.h>
/* Commands */
#define SHT15_MEASURE_TEMP 0x03
@@ -173,19 +174,6 @@ struct sht15_data {
};
/**
- * sht15_reverse() - reverse a byte
- * @byte: byte to reverse.
- */
-static u8 sht15_reverse(u8 byte)
-{
- u8 i, c;
-
- for (c = 0, i = 0; i < 8; i++)
- c |= (!!(byte & (1 << i))) << (7 - i);
- return c;
-}
-
-/**
* sht15_crc8() - compute crc8
* @data: sht15 specific data.
* @value: sht15 retrieved data.
@@ -196,7 +184,7 @@ static u8 sht15_crc8(struct sht15_data *data,
const u8 *value,
int len)
{
- u8 crc = sht15_reverse(data->val_status & 0x0F);
+ u8 crc = bitrev8(data->val_status & 0x0F);
while (len--) {
crc = sht15_crc8_table[*value ^ crc];
@@ -477,7 +465,7 @@ static int sht15_update_status(struct sht15_data *data)
if (data->checksumming) {
sht15_ack(data);
- dev_checksum = sht15_reverse(sht15_read_byte(data));
+ dev_checksum = bitrev8(sht15_read_byte(data));
checksum_vals[0] = SHT15_READ_STATUS;
checksum_vals[1] = status;
data->checksum_ok = (sht15_crc8(data, checksum_vals, 2)
@@ -864,7 +852,7 @@ static void sht15_bh_read_data(struct work_struct *work_s)
*/
if (sht15_ack(data))
goto wakeup;
- dev_checksum = sht15_reverse(sht15_read_byte(data));
+ dev_checksum = bitrev8(sht15_read_byte(data));
checksum_vals[0] = (data->state == SHT15_READING_TEMP) ?
SHT15_MEASURE_TEMP : SHT15_MEASURE_RH;
checksum_vals[1] = (u8) (val >> 8);
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 9da2735f1424..65482624ea2c 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -98,7 +98,7 @@ static struct tmp102 *tmp102_update_device(struct device *dev)
return tmp102;
}
-static int tmp102_read_temp(void *dev, long *temp)
+static int tmp102_read_temp(void *dev, int *temp)
{
struct tmp102 *tmp102 = tmp102_update_device(dev);
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index 098ffbec0a44..b4481eb29304 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -183,7 +183,9 @@
* @seq_13_event: event causing the transition from 1 to 3.
* @seq_curr_state: current value of the sequencer register.
* @ctxid_idx: index for the context ID registers.
- * @ctxid_val: value for the context ID to trigger on.
+ * @ctxid_pid: value for the context ID to trigger on.
+ * @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise
+ * the same value of ctxid_pid.
* @ctxid_mask: mask applicable to all the context IDs.
* @sync_freq: Synchronisation frequency.
* @timestamp_event: Defines an event that requests the insertion
@@ -235,7 +237,8 @@ struct etm_drvdata {
u32 seq_13_event;
u32 seq_curr_state;
u8 ctxid_idx;
- u32 ctxid_val[ETM_MAX_CTXID_CMP];
+ u32 ctxid_pid[ETM_MAX_CTXID_CMP];
+ u32 ctxid_vpid[ETM_MAX_CTXID_CMP];
u32 ctxid_mask;
u32 sync_freq;
u32 timestamp_event;
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index 018a00fda611..bf2476ed9356 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -237,8 +237,11 @@ static void etm_set_default(struct etm_drvdata *drvdata)
drvdata->seq_curr_state = 0x0;
drvdata->ctxid_idx = 0x0;
- for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
- drvdata->ctxid_val[i] = 0x0;
+ for (i = 0; i < drvdata->nr_ctxid_cmp; i++) {
+ drvdata->ctxid_pid[i] = 0x0;
+ drvdata->ctxid_vpid[i] = 0x0;
+ }
+
drvdata->ctxid_mask = 0x0;
}
@@ -289,7 +292,7 @@ static void etm_enable_hw(void *info)
for (i = 0; i < drvdata->nr_ext_out; i++)
etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
- etm_writel(drvdata, drvdata->ctxid_val[i], ETMCIDCVRn(i));
+ etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i));
etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
/* No external input selected */
@@ -1386,38 +1389,41 @@ static ssize_t ctxid_idx_store(struct device *dev,
}
static DEVICE_ATTR_RW(ctxid_idx);
-static ssize_t ctxid_val_show(struct device *dev,
+static ssize_t ctxid_pid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
spin_lock(&drvdata->spinlock);
- val = drvdata->ctxid_val[drvdata->ctxid_idx];
+ val = drvdata->ctxid_vpid[drvdata->ctxid_idx];
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#lx\n", val);
}
-static ssize_t ctxid_val_store(struct device *dev,
+static ssize_t ctxid_pid_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
- unsigned long val;
+ unsigned long vpid, pid;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
- ret = kstrtoul(buf, 16, &val);
+ ret = kstrtoul(buf, 16, &vpid);
if (ret)
return ret;
+ pid = coresight_vpid_to_pid(vpid);
+
spin_lock(&drvdata->spinlock);
- drvdata->ctxid_val[drvdata->ctxid_idx] = val;
+ drvdata->ctxid_pid[drvdata->ctxid_idx] = pid;
+ drvdata->ctxid_vpid[drvdata->ctxid_idx] = vpid;
spin_unlock(&drvdata->spinlock);
return size;
}
-static DEVICE_ATTR_RW(ctxid_val);
+static DEVICE_ATTR_RW(ctxid_pid);
static ssize_t ctxid_mask_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -1609,7 +1615,7 @@ static struct attribute *coresight_etm_attrs[] = {
&dev_attr_seq_13_event.attr,
&dev_attr_seq_curr_state.attr,
&dev_attr_ctxid_idx.attr,
- &dev_attr_ctxid_val.attr,
+ &dev_attr_ctxid_pid.attr,
&dev_attr_ctxid_mask.attr,
&dev_attr_sync_freq.attr,
&dev_attr_timestamp_event.attr,
@@ -1912,6 +1918,11 @@ static struct amba_id etm_ids[] = {
.mask = 0x0003ffff,
.data = "PTM 1.1",
},
+ { /* PTM 1.1 Qualcomm */
+ .id = 0x0003006f,
+ .mask = 0x0003ffff,
+ .data = "PTM 1.1",
+ },
{ 0, 0},
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 1312e993c501..254a81a4e6f4 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -155,7 +155,7 @@ static void etm4_enable_hw(void *info)
drvdata->base + TRCACATRn(i));
}
for (i = 0; i < drvdata->numcidc; i++)
- writeq_relaxed(drvdata->ctxid_val[i],
+ writeq_relaxed(drvdata->ctxid_pid[i],
drvdata->base + TRCCIDCVRn(i));
writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
@@ -506,8 +506,11 @@ static ssize_t reset_store(struct device *dev,
}
drvdata->ctxid_idx = 0x0;
- for (i = 0; i < drvdata->numcidc; i++)
- drvdata->ctxid_val[i] = 0x0;
+ for (i = 0; i < drvdata->numcidc; i++) {
+ drvdata->ctxid_pid[i] = 0x0;
+ drvdata->ctxid_vpid[i] = 0x0;
+ }
+
drvdata->ctxid_mask0 = 0x0;
drvdata->ctxid_mask1 = 0x0;
@@ -1815,7 +1818,7 @@ static ssize_t ctxid_idx_store(struct device *dev,
}
static DEVICE_ATTR_RW(ctxid_idx);
-static ssize_t ctxid_val_show(struct device *dev,
+static ssize_t ctxid_pid_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
@@ -1825,17 +1828,17 @@ static ssize_t ctxid_val_show(struct device *dev,
spin_lock(&drvdata->spinlock);
idx = drvdata->ctxid_idx;
- val = (unsigned long)drvdata->ctxid_val[idx];
+ val = (unsigned long)drvdata->ctxid_vpid[idx];
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
-static ssize_t ctxid_val_store(struct device *dev,
+static ssize_t ctxid_pid_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
u8 idx;
- unsigned long val;
+ unsigned long vpid, pid;
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
/*
@@ -1845,16 +1848,19 @@ static ssize_t ctxid_val_store(struct device *dev,
*/
if (!drvdata->ctxid_size || !drvdata->numcidc)
return -EINVAL;
- if (kstrtoul(buf, 16, &val))
+ if (kstrtoul(buf, 16, &vpid))
return -EINVAL;
+ pid = coresight_vpid_to_pid(vpid);
+
spin_lock(&drvdata->spinlock);
idx = drvdata->ctxid_idx;
- drvdata->ctxid_val[idx] = (u64)val;
+ drvdata->ctxid_pid[idx] = (u64)pid;
+ drvdata->ctxid_vpid[idx] = (u64)vpid;
spin_unlock(&drvdata->spinlock);
return size;
}
-static DEVICE_ATTR_RW(ctxid_val);
+static DEVICE_ATTR_RW(ctxid_pid);
static ssize_t ctxid_masks_show(struct device *dev,
struct device_attribute *attr,
@@ -1949,7 +1955,7 @@ static ssize_t ctxid_masks_store(struct device *dev,
*/
for (j = 0; j < 8; j++) {
if (maskbyte & 1)
- drvdata->ctxid_val[i] &= ~(0xFF << (j * 8));
+ drvdata->ctxid_pid[i] &= ~(0xFF << (j * 8));
maskbyte >>= 1;
}
/* Select the next ctxid comparator mask value */
@@ -2193,7 +2199,7 @@ static struct attribute *coresight_etmv4_attrs[] = {
&dev_attr_res_idx.attr,
&dev_attr_res_ctrl.attr,
&dev_attr_ctxid_idx.attr,
- &dev_attr_ctxid_val.attr,
+ &dev_attr_ctxid_pid.attr,
&dev_attr_ctxid_masks.attr,
&dev_attr_vmid_idx.attr,
&dev_attr_vmid_val.attr,
@@ -2513,8 +2519,11 @@ static void etm4_init_default_data(struct etmv4_drvdata *drvdata)
drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
}
- for (i = 0; i < drvdata->numcidc; i++)
- drvdata->ctxid_val[i] = 0x0;
+ for (i = 0; i < drvdata->numcidc; i++) {
+ drvdata->ctxid_pid[i] = 0x0;
+ drvdata->ctxid_vpid[i] = 0x0;
+ }
+
drvdata->ctxid_mask0 = 0x0;
drvdata->ctxid_mask1 = 0x0;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index e08e983dd2d9..c34100205ca9 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -265,7 +265,9 @@
* @addr_type: Current status of the comparator register.
* @ctxid_idx: Context ID index selector.
* @ctxid_size: Size of the context ID field to consider.
- * @ctxid_val: Value of the context ID comparator.
+ * @ctxid_pid: Value of the context ID comparator.
+ * @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise
+ * the same value of ctxid_pid.
* @ctxid_mask0:Context ID comparator mask for comparator 0-3.
* @ctxid_mask1:Context ID comparator mask for comparator 4-7.
* @vmid_idx: VM ID index selector.
@@ -352,7 +354,8 @@ struct etmv4_drvdata {
u8 addr_type[ETM_MAX_SINGLE_ADDR_CMP];
u8 ctxid_idx;
u8 ctxid_size;
- u64 ctxid_val[ETMv4_MAX_CTXID_CMP];
+ u64 ctxid_pid[ETMv4_MAX_CTXID_CMP];
+ u64 ctxid_vpid[ETMv4_MAX_CTXID_CMP];
u32 ctxid_mask0;
u32 ctxid_mask1;
u8 vmid_idx;
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 7974b7c3da6b..963ac197c253 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/io.h>
@@ -184,17 +183,7 @@ static struct platform_driver replicator_driver = {
},
};
-static int __init replicator_init(void)
-{
- return platform_driver_register(&replicator_driver);
-}
-module_init(replicator_init);
-
-static void __exit replicator_exit(void)
-{
- platform_driver_unregister(&replicator_driver);
-}
-module_exit(replicator_exit);
+builtin_platform_driver(replicator_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CoreSight Replicator driver");
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 35ac23768ce9..08b86178e8fb 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -526,6 +526,13 @@ config I2C_EG20T
ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
+config I2C_EMEV2
+ tristate "EMMA Mobile series I2C adapter"
+ depends on HAVE_CLK
+ help
+ If you say yes to this option, support will be included for the
+ I2C interface on the Renesas Electronics EM/EV family of processors.
+
config I2C_EXYNOS5
tristate "Exynos5 high-speed I2C driver"
depends on ARCH_EXYNOS && OF
@@ -612,6 +619,16 @@ config I2C_KEMPLD
This driver can also be built as a module. If so, the module
will be called i2c-kempld.
+config I2C_LPC2K
+ tristate "I2C bus support for NXP LPC2K/LPC178x/18xx/43xx"
+ depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
+ help
+ This driver supports the I2C interface found several NXP
+ devices including LPC2xxx, LPC178x/7x and LPC18xx/43xx.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-lpc2k.
+
config I2C_MESON
tristate "Amlogic Meson I2C controller"
depends on ARCH_MESON
@@ -633,6 +650,7 @@ config I2C_MPC
config I2C_MT65XX
tristate "MediaTek I2C adapter"
depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on HAS_DMA
help
This selects the MediaTek(R) Integrated Inter Circuit bus driver
for MT65xx and MT81xx.
@@ -1122,7 +1140,7 @@ config I2C_SIBYTE
config I2C_CROS_EC_TUNNEL
tristate "ChromeOS EC tunnel I2C bus"
- depends on CROS_EC_PROTO
+ depends on MFD_CROS_EC
help
If you say yes here you get an I2C bus that will tunnel i2c commands
through to the other side of the ChromeOS EC to the i2c bus
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index e5f537c80da0..6df3b303bd09 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -48,6 +48,7 @@ i2c-designware-pci-objs := i2c-designware-pcidrv.o
obj-$(CONFIG_I2C_DIGICOLOR) += i2c-digicolor.o
obj-$(CONFIG_I2C_EFM32) += i2c-efm32.o
obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o
+obj-$(CONFIG_I2C_EMEV2) += i2c-emev2.o
obj-$(CONFIG_I2C_EXYNOS5) += i2c-exynos5.o
obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o
obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o
@@ -58,6 +59,7 @@ obj-$(CONFIG_I2C_IMX) += i2c-imx.o
obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
obj-$(CONFIG_I2C_JZ4780) += i2c-jz4780.o
obj-$(CONFIG_I2C_KEMPLD) += i2c-kempld.o
+obj-$(CONFIG_I2C_LPC2K) += i2c-lpc2k.o
obj-$(CONFIG_I2C_MESON) += i2c-meson.o
obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
obj-$(CONFIG_I2C_MT65XX) += i2c-mt65xx.o
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index af162b4c7a6d..025686d41640 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -692,7 +692,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, iface);
- dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Contoller, "
+ dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Controller, "
"regs_base@%p\n", iface->regs_base);
return 0;
@@ -735,6 +735,6 @@ subsys_initcall(i2c_bfin_twi_init);
module_exit(i2c_bfin_twi_exit);
MODULE_AUTHOR("Bryan Wu, Sonic Zhang");
-MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Contoller Driver");
+MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Controller Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:i2c-bfin-twi");
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 2ee78e099d30..84deed6571bd 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -17,6 +17,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
/* Register offsets for the I2C device. */
#define CDNS_I2C_CR_OFFSET 0x00 /* Control Register, RW */
@@ -113,6 +114,8 @@
#define CDNS_I2C_TIMEOUT_MAX 0xFF
+#define CDNS_I2C_BROKEN_HOLD_BIT BIT(0)
+
#define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset)
#define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset)
@@ -135,6 +138,7 @@
* @bus_hold_flag: Flag used in repeated start for clearing HOLD bit
* @clk: Pointer to struct clk
* @clk_rate_change_nb: Notifier block for clock rate changes
+ * @quirks: flag for broken hold bit usage in r1p10
*/
struct cdns_i2c {
void __iomem *membase;
@@ -154,6 +158,11 @@ struct cdns_i2c {
unsigned int bus_hold_flag;
struct clk *clk;
struct notifier_block clk_rate_change_nb;
+ u32 quirks;
+};
+
+struct cdns_platform_data {
+ u32 quirks;
};
#define to_cdns_i2c(_nb) container_of(_nb, struct cdns_i2c, \
@@ -172,6 +181,12 @@ static void cdns_i2c_clear_bus_hold(struct cdns_i2c *id)
cdns_i2c_writereg(reg & ~CDNS_I2C_CR_HOLD, CDNS_I2C_CR_OFFSET);
}
+static inline bool cdns_is_holdquirk(struct cdns_i2c *id, bool hold_wrkaround)
+{
+ return (hold_wrkaround &&
+ (id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1));
+}
+
/**
* cdns_i2c_isr - Interrupt handler for the I2C device
* @irq: irq number for the I2C device
@@ -186,6 +201,7 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
{
unsigned int isr_status, avail_bytes, updatetx;
unsigned int bytes_to_send;
+ bool hold_quirk;
struct cdns_i2c *id = ptr;
/* Signal completion only after everything is updated */
int done_flag = 0;
@@ -208,6 +224,8 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
if (id->recv_count > id->curr_recv_count)
updatetx = 1;
+ hold_quirk = (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) && updatetx;
+
/* When receiving, handle data interrupt and completion interrupt */
if (id->p_recv_buf &&
((isr_status & CDNS_I2C_IXR_COMP) ||
@@ -229,8 +247,7 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
id->recv_count--;
id->curr_recv_count--;
- if (updatetx &&
- (id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1))
+ if (cdns_is_holdquirk(id, hold_quirk))
break;
}
@@ -241,8 +258,7 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
* maintain transfer size non-zero while performing a large
* receive operation.
*/
- if (updatetx &&
- (id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1)) {
+ if (cdns_is_holdquirk(id, hold_quirk)) {
/* wait while fifo is full */
while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) !=
(id->curr_recv_count - CDNS_I2C_FIFO_DEPTH))
@@ -264,6 +280,22 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
CDNS_I2C_XFER_SIZE_OFFSET);
id->curr_recv_count = id->recv_count;
}
+ } else if (id->recv_count && !hold_quirk &&
+ !id->curr_recv_count) {
+
+ /* Set the slave address in address register*/
+ cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
+ CDNS_I2C_ADDR_OFFSET);
+
+ if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) {
+ cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
+ CDNS_I2C_XFER_SIZE_OFFSET);
+ id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE;
+ } else {
+ cdns_i2c_writereg(id->recv_count,
+ CDNS_I2C_XFER_SIZE_OFFSET);
+ id->curr_recv_count = id->recv_count;
+ }
}
/* Clear hold (if not repeated start) and signal completion */
@@ -535,11 +567,13 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
int ret, count;
u32 reg;
struct cdns_i2c *id = adap->algo_data;
+ bool hold_quirk;
/* Check if the bus is free */
if (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & CDNS_I2C_SR_BA)
return -EAGAIN;
+ hold_quirk = !!(id->quirks & CDNS_I2C_BROKEN_HOLD_BIT);
/*
* Set the flag to one when multiple messages are to be
* processed with a repeated start.
@@ -552,7 +586,7 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
* followed by any other message, an error is returned
* indicating that this sequence is not supported.
*/
- for (count = 0; count < num - 1; count++) {
+ for (count = 0; (count < num - 1 && hold_quirk); count++) {
if (msgs[count].flags & I2C_M_RD) {
dev_warn(adap->dev.parent,
"Can't do repeated start after a receive message\n");
@@ -815,6 +849,17 @@ static int __maybe_unused cdns_i2c_resume(struct device *_dev)
static SIMPLE_DEV_PM_OPS(cdns_i2c_dev_pm_ops, cdns_i2c_suspend,
cdns_i2c_resume);
+static const struct cdns_platform_data r1p10_i2c_def = {
+ .quirks = CDNS_I2C_BROKEN_HOLD_BIT,
+};
+
+static const struct of_device_id cdns_i2c_of_match[] = {
+ { .compatible = "cdns,i2c-r1p10", .data = &r1p10_i2c_def },
+ { .compatible = "cdns,i2c-r1p14",},
+ { /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, cdns_i2c_of_match);
+
/**
* cdns_i2c_probe - Platform registration call
* @pdev: Handle to the platform device structure
@@ -830,6 +875,7 @@ static int cdns_i2c_probe(struct platform_device *pdev)
struct resource *r_mem;
struct cdns_i2c *id;
int ret;
+ const struct of_device_id *match;
id = devm_kzalloc(&pdev->dev, sizeof(*id), GFP_KERNEL);
if (!id)
@@ -837,6 +883,12 @@ static int cdns_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, id);
+ match = of_match_node(cdns_i2c_of_match, pdev->dev.of_node);
+ if (match && match->data) {
+ const struct cdns_platform_data *data = match->data;
+ id->quirks = data->quirks;
+ }
+
r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
id->membase = devm_ioremap_resource(&pdev->dev, r_mem);
if (IS_ERR(id->membase))
@@ -844,6 +896,7 @@ static int cdns_i2c_probe(struct platform_device *pdev)
id->irq = platform_get_irq(pdev, 0);
+ id->adap.owner = THIS_MODULE;
id->adap.dev.of_node = pdev->dev.of_node;
id->adap.algo = &cdns_i2c_algo;
id->adap.timeout = CDNS_I2C_TIMEOUT;
@@ -935,12 +988,6 @@ static int cdns_i2c_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id cdns_i2c_of_match[] = {
- { .compatible = "cdns,i2c-r1p10", },
- { /* end of table */ }
-};
-MODULE_DEVICE_TABLE(of, cdns_i2c_of_match);
-
static struct platform_driver cdns_i2c_drv = {
.driver = {
.name = DRIVER_NAME,
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 6f19a33773fe..7441cdc1b34a 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -777,8 +777,7 @@ irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
enabled = dw_readl(dev, DW_IC_ENABLE);
stat = dw_readl(dev, DW_IC_RAW_INTR_STAT);
- dev_dbg(dev->dev, "%s: %s enabled= 0x%x stat=0x%x\n", __func__,
- dev->adapter.name, enabled, stat);
+ dev_dbg(dev->dev, "%s: enabled=%#x stat=%#x\n", __func__, enabled, stat);
if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
return IRQ_NONE;
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 6643d2dc0b25..df23e8c30e6f 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -260,8 +260,8 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
snprintf(adap->name, sizeof(adap->name), "i2c-designware-pci");
- r = devm_request_irq(&pdev->dev, pdev->irq, i2c_dw_isr, IRQF_SHARED,
- adap->name, dev);
+ r = devm_request_irq(&pdev->dev, pdev->irq, i2c_dw_isr,
+ IRQF_SHARED | IRQF_COND_SUSPEND, adap->name, dev);
if (r) {
dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
return r;
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
new file mode 100644
index 000000000000..192ef6b50c79
--- /dev/null
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -0,0 +1,332 @@
+/*
+ * I2C driver for the Renesas EMEV2 SoC
+ *
+ * Copyright (C) 2015 Wolfram Sang <wsa@sang-engineering.com>
+ * Copyright 2013 Codethink Ltd.
+ * Copyright 2010-2015 Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+
+/* I2C Registers */
+#define I2C_OFS_IICACT0 0x00 /* start */
+#define I2C_OFS_IIC0 0x04 /* shift */
+#define I2C_OFS_IICC0 0x08 /* control */
+#define I2C_OFS_SVA0 0x0c /* slave address */
+#define I2C_OFS_IICCL0 0x10 /* clock select */
+#define I2C_OFS_IICX0 0x14 /* extension */
+#define I2C_OFS_IICS0 0x18 /* status */
+#define I2C_OFS_IICSE0 0x1c /* status For emulation */
+#define I2C_OFS_IICF0 0x20 /* IIC flag */
+
+/* I2C IICACT0 Masks */
+#define I2C_BIT_IICE0 0x0001
+
+/* I2C IICC0 Masks */
+#define I2C_BIT_LREL0 0x0040
+#define I2C_BIT_WREL0 0x0020
+#define I2C_BIT_SPIE0 0x0010
+#define I2C_BIT_WTIM0 0x0008
+#define I2C_BIT_ACKE0 0x0004
+#define I2C_BIT_STT0 0x0002
+#define I2C_BIT_SPT0 0x0001
+
+/* I2C IICCL0 Masks */
+#define I2C_BIT_SMC0 0x0008
+#define I2C_BIT_DFC0 0x0004
+
+/* I2C IICSE0 Masks */
+#define I2C_BIT_MSTS0 0x0080
+#define I2C_BIT_ALD0 0x0040
+#define I2C_BIT_EXC0 0x0020
+#define I2C_BIT_COI0 0x0010
+#define I2C_BIT_TRC0 0x0008
+#define I2C_BIT_ACKD0 0x0004
+#define I2C_BIT_STD0 0x0002
+#define I2C_BIT_SPD0 0x0001
+
+/* I2C IICF0 Masks */
+#define I2C_BIT_STCF 0x0080
+#define I2C_BIT_IICBSY 0x0040
+#define I2C_BIT_STCEN 0x0002
+#define I2C_BIT_IICRSV 0x0001
+
+struct em_i2c_device {
+ void __iomem *base;
+ struct i2c_adapter adap;
+ struct completion msg_done;
+ struct clk *sclk;
+};
+
+static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg)
+{
+ writeb((readb(priv->base + reg) & ~clear) | set, priv->base + reg);
+}
+
+static int em_i2c_wait_for_event(struct em_i2c_device *priv)
+{
+ unsigned long time_left;
+ int status;
+
+ reinit_completion(&priv->msg_done);
+
+ time_left = wait_for_completion_timeout(&priv->msg_done, priv->adap.timeout);
+
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ status = readb(priv->base + I2C_OFS_IICSE0);
+ return status & I2C_BIT_ALD0 ? -EAGAIN : status;
+}
+
+static void em_i2c_stop(struct em_i2c_device *priv)
+{
+ /* Send Stop condition */
+ em_clear_set_bit(priv, 0, I2C_BIT_SPT0 | I2C_BIT_SPIE0, I2C_OFS_IICC0);
+
+ /* Wait for stop condition */
+ em_i2c_wait_for_event(priv);
+}
+
+static void em_i2c_reset(struct i2c_adapter *adap)
+{
+ struct em_i2c_device *priv = i2c_get_adapdata(adap);
+ int retr;
+
+ /* If I2C active */
+ if (readb(priv->base + I2C_OFS_IICACT0) & I2C_BIT_IICE0) {
+ /* Disable I2C operation */
+ writeb(0, priv->base + I2C_OFS_IICACT0);
+
+ retr = 1000;
+ while (readb(priv->base + I2C_OFS_IICACT0) == 1 && retr)
+ retr--;
+ WARN_ON(retr == 0);
+ }
+
+ /* Transfer mode set */
+ writeb(I2C_BIT_DFC0, priv->base + I2C_OFS_IICCL0);
+
+ /* Can Issue start without detecting a stop, Reservation disabled. */
+ writeb(I2C_BIT_STCEN | I2C_BIT_IICRSV, priv->base + I2C_OFS_IICF0);
+
+ /* I2C enable, 9 bit interrupt mode */
+ writeb(I2C_BIT_WTIM0, priv->base + I2C_OFS_IICC0);
+
+ /* Enable I2C operation */
+ writeb(I2C_BIT_IICE0, priv->base + I2C_OFS_IICACT0);
+
+ retr = 1000;
+ while (readb(priv->base + I2C_OFS_IICACT0) == 0 && retr)
+ retr--;
+ WARN_ON(retr == 0);
+}
+
+static int __em_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
+ int stop)
+{
+ struct em_i2c_device *priv = i2c_get_adapdata(adap);
+ int count, status, read = !!(msg->flags & I2C_M_RD);
+
+ /* Send start condition */
+ em_clear_set_bit(priv, 0, I2C_BIT_ACKE0 | I2C_BIT_WTIM0, I2C_OFS_IICC0);
+ em_clear_set_bit(priv, 0, I2C_BIT_STT0, I2C_OFS_IICC0);
+
+ /* Send slave address and R/W type */
+ writeb((msg->addr << 1) | read, priv->base + I2C_OFS_IIC0);
+
+ /* Wait for transaction */
+ status = em_i2c_wait_for_event(priv);
+ if (status < 0)
+ goto out_reset;
+
+ /* Received NACK (result of setting slave address and R/W) */
+ if (!(status & I2C_BIT_ACKD0)) {
+ em_i2c_stop(priv);
+ goto out;
+ }
+
+ /* Extra setup for read transactions */
+ if (read) {
+ /* 8 bit interrupt mode */
+ em_clear_set_bit(priv, I2C_BIT_WTIM0, I2C_BIT_ACKE0, I2C_OFS_IICC0);
+ em_clear_set_bit(priv, I2C_BIT_WTIM0, I2C_BIT_WREL0, I2C_OFS_IICC0);
+
+ /* Wait for transaction */
+ status = em_i2c_wait_for_event(priv);
+ if (status < 0)
+ goto out_reset;
+ }
+
+ /* Send / receive data */
+ for (count = 0; count < msg->len; count++) {
+ if (read) { /* Read transaction */
+ msg->buf[count] = readb(priv->base + I2C_OFS_IIC0);
+ em_clear_set_bit(priv, 0, I2C_BIT_WREL0, I2C_OFS_IICC0);
+
+ } else { /* Write transaction */
+ /* Received NACK */
+ if (!(status & I2C_BIT_ACKD0)) {
+ em_i2c_stop(priv);
+ goto out;
+ }
+
+ /* Write data */
+ writeb(msg->buf[count], priv->base + I2C_OFS_IIC0);
+ }
+
+ /* Wait for R/W transaction */
+ status = em_i2c_wait_for_event(priv);
+ if (status < 0)
+ goto out_reset;
+ }
+
+ if (stop)
+ em_i2c_stop(priv);
+
+ return count;
+
+out_reset:
+ em_i2c_reset(adap);
+out:
+ return status < 0 ? status : -ENXIO;
+}
+
+static int em_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ struct em_i2c_device *priv = i2c_get_adapdata(adap);
+ int ret, i;
+
+ if (readb(priv->base + I2C_OFS_IICF0) & I2C_BIT_IICBSY)
+ return -EAGAIN;
+
+ for (i = 0; i < num; i++) {
+ ret = __em_i2c_xfer(adap, &msgs[i], (i == (num - 1)));
+ if (ret < 0)
+ return ret;
+ }
+
+ /* I2C transfer completed */
+ return num;
+}
+
+static irqreturn_t em_i2c_irq_handler(int this_irq, void *dev_id)
+{
+ struct em_i2c_device *priv = dev_id;
+
+ complete(&priv->msg_done);
+ return IRQ_HANDLED;
+}
+
+static u32 em_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static struct i2c_algorithm em_i2c_algo = {
+ .master_xfer = em_i2c_xfer,
+ .functionality = em_i2c_func,
+};
+
+static int em_i2c_probe(struct platform_device *pdev)
+{
+ struct em_i2c_device *priv;
+ struct resource *r;
+ int irq, ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ strlcpy(priv->adap.name, "EMEV2 I2C", sizeof(priv->adap.name));
+
+ priv->sclk = devm_clk_get(&pdev->dev, "sclk");
+ if (IS_ERR(priv->sclk))
+ return PTR_ERR(priv->sclk);
+
+ clk_prepare_enable(priv->sclk);
+
+ priv->adap.timeout = msecs_to_jiffies(100);
+ priv->adap.retries = 5;
+ priv->adap.dev.parent = &pdev->dev;
+ priv->adap.algo = &em_i2c_algo;
+ priv->adap.owner = THIS_MODULE;
+ priv->adap.dev.of_node = pdev->dev.of_node;
+
+ init_completion(&priv->msg_done);
+
+ platform_set_drvdata(pdev, priv);
+ i2c_set_adapdata(&priv->adap, priv);
+
+ em_i2c_reset(&priv->adap);
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq, em_i2c_irq_handler, 0,
+ "em_i2c", priv);
+ if (ret)
+ goto err_clk;
+
+ ret = i2c_add_adapter(&priv->adap);
+
+ if (ret)
+ goto err_clk;
+
+ dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, irq);
+
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(priv->sclk);
+ return ret;
+}
+
+static int em_i2c_remove(struct platform_device *dev)
+{
+ struct em_i2c_device *priv = platform_get_drvdata(dev);
+
+ i2c_del_adapter(&priv->adap);
+ clk_disable_unprepare(priv->sclk);
+
+ return 0;
+}
+
+static const struct of_device_id em_i2c_ids[] = {
+ { .compatible = "renesas,iic-emev2", },
+ { }
+};
+
+static struct platform_driver em_i2c_driver = {
+ .probe = em_i2c_probe,
+ .remove = em_i2c_remove,
+ .driver = {
+ .name = "em-i2c",
+ .of_match_table = em_i2c_ids,
+ }
+};
+module_platform_driver(em_i2c_driver);
+
+MODULE_DESCRIPTION("EMEV2 I2C bus driver");
+MODULE_AUTHOR("Ian Molton and Wolfram Sang <wsa@sang-engineering.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, em_i2c_ids);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 5ecbb3fdc27e..eaef9bc9d88c 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -88,12 +88,13 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/itco_wdt.h>
#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
defined CONFIG_DMI
#include <linux/gpio.h>
#include <linux/i2c-mux-gpio.h>
-#include <linux/platform_device.h>
#endif
/* I801 SMBus address offsets */
@@ -113,6 +114,16 @@
#define SMBPCICTL 0x004
#define SMBPCISTS 0x006
#define SMBHSTCFG 0x040
+#define TCOBASE 0x050
+#define TCOCTL 0x054
+
+#define ACPIBASE 0x040
+#define ACPIBASE_SMI_OFF 0x030
+#define ACPICTRL 0x044
+#define ACPICTRL_EN 0x080
+
+#define SBREG_BAR 0x10
+#define SBREG_SMBCTRL 0xc6000c
/* Host status bits for SMBPCISTS */
#define SMBPCISTS_INTS 0x08
@@ -125,6 +136,9 @@
#define SMBHSTCFG_SMB_SMI_EN 2
#define SMBHSTCFG_I2C_EN 4
+/* TCO configuration bits for TCOCTL */
+#define TCOCTL_EN 0x0100
+
/* Auxiliary control register bits, ICH4+ only */
#define SMBAUXCTL_CRC 1
#define SMBAUXCTL_E32B 2
@@ -221,6 +235,7 @@ struct i801_priv {
const struct i801_mux_config *mux_drvdata;
struct platform_device *mux_pdev;
#endif
+ struct platform_device *tco_pdev;
};
#define FEATURE_SMBUS_PEC (1 << 0)
@@ -230,6 +245,7 @@ struct i801_priv {
#define FEATURE_IRQ (1 << 4)
/* Not really a feature, but it's convenient to handle it as such */
#define FEATURE_IDF (1 << 15)
+#define FEATURE_TCO (1 << 16)
static const char *i801_feature_names[] = {
"SMBus PEC",
@@ -1132,6 +1148,95 @@ static inline unsigned int i801_get_adapter_class(struct i801_priv *priv)
}
#endif
+static const struct itco_wdt_platform_data tco_platform_data = {
+ .name = "Intel PCH",
+ .version = 4,
+};
+
+static DEFINE_SPINLOCK(p2sb_spinlock);
+
+static void i801_add_tco(struct i801_priv *priv)
+{
+ struct pci_dev *pci_dev = priv->pci_dev;
+ struct resource tco_res[3], *res;
+ struct platform_device *pdev;
+ unsigned int devfn;
+ u32 tco_base, tco_ctl;
+ u32 base_addr, ctrl_val;
+ u64 base64_addr;
+
+ if (!(priv->features & FEATURE_TCO))
+ return;
+
+ pci_read_config_dword(pci_dev, TCOBASE, &tco_base);
+ pci_read_config_dword(pci_dev, TCOCTL, &tco_ctl);
+ if (!(tco_ctl & TCOCTL_EN))
+ return;
+
+ memset(tco_res, 0, sizeof(tco_res));
+
+ res = &tco_res[ICH_RES_IO_TCO];
+ res->start = tco_base & ~1;
+ res->end = res->start + 32 - 1;
+ res->flags = IORESOURCE_IO;
+
+ /*
+ * Power Management registers.
+ */
+ devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 2);
+ pci_bus_read_config_dword(pci_dev->bus, devfn, ACPIBASE, &base_addr);
+
+ res = &tco_res[ICH_RES_IO_SMI];
+ res->start = (base_addr & ~1) + ACPIBASE_SMI_OFF;
+ res->end = res->start + 3;
+ res->flags = IORESOURCE_IO;
+
+ /*
+ * Enable the ACPI I/O space.
+ */
+ pci_bus_read_config_dword(pci_dev->bus, devfn, ACPICTRL, &ctrl_val);
+ ctrl_val |= ACPICTRL_EN;
+ pci_bus_write_config_dword(pci_dev->bus, devfn, ACPICTRL, ctrl_val);
+
+ /*
+ * We must access the NO_REBOOT bit over the Primary to Sideband
+ * bridge (P2SB). The BIOS prevents the P2SB device from being
+ * enumerated by the PCI subsystem, so we need to unhide/hide it
+ * to lookup the P2SB BAR.
+ */
+ spin_lock(&p2sb_spinlock);
+
+ devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 1);
+
+ /* Unhide the P2SB device */
+ pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, 0x0);
+
+ pci_bus_read_config_dword(pci_dev->bus, devfn, SBREG_BAR, &base_addr);
+ base64_addr = base_addr & 0xfffffff0;
+
+ pci_bus_read_config_dword(pci_dev->bus, devfn, SBREG_BAR + 0x4, &base_addr);
+ base64_addr |= (u64)base_addr << 32;
+
+ /* Hide the P2SB device */
+ pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, 0x1);
+ spin_unlock(&p2sb_spinlock);
+
+ res = &tco_res[ICH_RES_MEM_OFF];
+ res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+ res->end = res->start + 3;
+ res->flags = IORESOURCE_MEM;
+
+ pdev = platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
+ tco_res, 3, &tco_platform_data,
+ sizeof(tco_platform_data));
+ if (IS_ERR(pdev)) {
+ dev_warn(&pci_dev->dev, "failed to create iTCO device\n");
+ return;
+ }
+
+ priv->tco_pdev = pdev;
+}
+
static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
unsigned char temp;
@@ -1149,6 +1254,15 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
priv->pci_dev = dev;
switch (dev->device) {
+ case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS:
+ case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS:
+ priv->features |= FEATURE_I2C_BLOCK_READ;
+ priv->features |= FEATURE_IRQ;
+ priv->features |= FEATURE_SMBUS_PEC;
+ priv->features |= FEATURE_BLOCK_BUFFER;
+ priv->features |= FEATURE_TCO;
+ break;
+
case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0:
case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1:
case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2:
@@ -1265,6 +1379,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
dev_info(&dev->dev, "SMBus using %s\n",
priv->features & FEATURE_IRQ ? "PCI interrupt" : "polling");
+ i801_add_tco(priv);
+
/* set up the sysfs linkage to our parent device */
priv->adapter.dev.parent = &dev->dev;
@@ -1296,6 +1412,8 @@ static void i801_remove(struct pci_dev *dev)
i2c_del_adapter(&priv->adapter);
pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
+ platform_device_unregister(priv->tco_pdev);
+
/*
* do not call pci_disable_device(dev) since it can cause hard hangs on
* some systems during power-off (eg. Fujitsu-Siemens Lifebook E8010)
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index 19b2d689a5ef..f325663c27c5 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -764,12 +764,15 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
if (IS_ERR(i2c->clk))
return PTR_ERR(i2c->clk);
- clk_prepare_enable(i2c->clk);
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ return ret;
- if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
- &clk_freq)) {
+ ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &clk_freq);
+ if (ret) {
dev_err(&pdev->dev, "clock-frequency not specified in DT");
- return clk_freq;
+ goto err;
}
i2c->speed = clk_freq / 1000;
@@ -790,10 +793,8 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
i2c->irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
dev_name(&pdev->dev), i2c);
- if (ret) {
- ret = -ENODEV;
+ if (ret)
goto err;
- }
ret = i2c_add_adapter(&i2c->adap);
if (ret < 0) {
diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c
new file mode 100644
index 000000000000..8560a13bf1b3
--- /dev/null
+++ b/drivers/i2c/busses/i2c-lpc2k.c
@@ -0,0 +1,513 @@
+/*
+ * Copyright (C) 2011 NXP Semiconductors
+ *
+ * Code portions referenced from the i2x-pxa and i2c-pnx drivers
+ *
+ * Make SMBus byte and word transactions work on LPC178x/7x
+ * Copyright (c) 2012
+ * Alexander Potashev, Emcraft Systems, aspotashev@emcraft.com
+ * Anton Protopopov, Emcraft Systems, antonp@emcraft.com
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+
+/* LPC24xx register offsets and bits */
+#define LPC24XX_I2CONSET 0x00
+#define LPC24XX_I2STAT 0x04
+#define LPC24XX_I2DAT 0x08
+#define LPC24XX_I2ADDR 0x0c
+#define LPC24XX_I2SCLH 0x10
+#define LPC24XX_I2SCLL 0x14
+#define LPC24XX_I2CONCLR 0x18
+
+#define LPC24XX_AA BIT(2)
+#define LPC24XX_SI BIT(3)
+#define LPC24XX_STO BIT(4)
+#define LPC24XX_STA BIT(5)
+#define LPC24XX_I2EN BIT(6)
+
+#define LPC24XX_STO_AA (LPC24XX_STO | LPC24XX_AA)
+#define LPC24XX_CLEAR_ALL (LPC24XX_AA | LPC24XX_SI | LPC24XX_STO | \
+ LPC24XX_STA | LPC24XX_I2EN)
+
+/* I2C SCL clock has different duty cycle depending on mode */
+#define I2C_STD_MODE_DUTY 46
+#define I2C_FAST_MODE_DUTY 36
+#define I2C_FAST_MODE_PLUS_DUTY 38
+
+/*
+ * 26 possible I2C status codes, but codes applicable only
+ * to master are listed here and used in this driver
+ */
+enum {
+ M_BUS_ERROR = 0x00,
+ M_START = 0x08,
+ M_REPSTART = 0x10,
+ MX_ADDR_W_ACK = 0x18,
+ MX_ADDR_W_NACK = 0x20,
+ MX_DATA_W_ACK = 0x28,
+ MX_DATA_W_NACK = 0x30,
+ M_DATA_ARB_LOST = 0x38,
+ MR_ADDR_R_ACK = 0x40,
+ MR_ADDR_R_NACK = 0x48,
+ MR_DATA_R_ACK = 0x50,
+ MR_DATA_R_NACK = 0x58,
+ M_I2C_IDLE = 0xf8,
+};
+
+struct lpc2k_i2c {
+ void __iomem *base;
+ struct clk *clk;
+ int irq;
+ wait_queue_head_t wait;
+ struct i2c_adapter adap;
+ struct i2c_msg *msg;
+ int msg_idx;
+ int msg_status;
+ int is_last;
+};
+
+static void i2c_lpc2k_reset(struct lpc2k_i2c *i2c)
+{
+ /* Will force clear all statuses */
+ writel(LPC24XX_CLEAR_ALL, i2c->base + LPC24XX_I2CONCLR);
+ writel(0, i2c->base + LPC24XX_I2ADDR);
+ writel(LPC24XX_I2EN, i2c->base + LPC24XX_I2CONSET);
+}
+
+static int i2c_lpc2k_clear_arb(struct lpc2k_i2c *i2c)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ /*
+ * If the transfer needs to abort for some reason, we'll try to
+ * force a stop condition to clear any pending bus conditions
+ */
+ writel(LPC24XX_STO, i2c->base + LPC24XX_I2CONSET);
+
+ /* Wait for status change */
+ while (readl(i2c->base + LPC24XX_I2STAT) != M_I2C_IDLE) {
+ if (time_after(jiffies, timeout)) {
+ /* Bus was not idle, try to reset adapter */
+ i2c_lpc2k_reset(i2c);
+ return -EBUSY;
+ }
+
+ cpu_relax();
+ }
+
+ return 0;
+}
+
+static void i2c_lpc2k_pump_msg(struct lpc2k_i2c *i2c)
+{
+ unsigned char data;
+ u32 status;
+
+ /*
+ * I2C in the LPC2xxx series is basically a state machine.
+ * Just run through the steps based on the current status.
+ */
+ status = readl(i2c->base + LPC24XX_I2STAT);
+
+ switch (status) {
+ case M_START:
+ case M_REPSTART:
+ /* Start bit was just sent out, send out addr and dir */
+ data = i2c->msg->addr << 1;
+ if (i2c->msg->flags & I2C_M_RD)
+ data |= 1;
+
+ writel(data, i2c->base + LPC24XX_I2DAT);
+ writel(LPC24XX_STA, i2c->base + LPC24XX_I2CONCLR);
+ break;
+
+ case MX_ADDR_W_ACK:
+ case MX_DATA_W_ACK:
+ /*
+ * Address or data was sent out with an ACK. If there is more
+ * data to send, send it now
+ */
+ if (i2c->msg_idx < i2c->msg->len) {
+ writel(i2c->msg->buf[i2c->msg_idx],
+ i2c->base + LPC24XX_I2DAT);
+ } else if (i2c->is_last) {
+ /* Last message, send stop */
+ writel(LPC24XX_STO_AA, i2c->base + LPC24XX_I2CONSET);
+ writel(LPC24XX_SI, i2c->base + LPC24XX_I2CONCLR);
+ i2c->msg_status = 0;
+ disable_irq_nosync(i2c->irq);
+ } else {
+ i2c->msg_status = 0;
+ disable_irq_nosync(i2c->irq);
+ }
+
+ i2c->msg_idx++;
+ break;
+
+ case MR_ADDR_R_ACK:
+ /* Receive first byte from slave */
+ if (i2c->msg->len == 1) {
+ /* Last byte, return NACK */
+ writel(LPC24XX_AA, i2c->base + LPC24XX_I2CONCLR);
+ } else {
+ /* Not last byte, return ACK */
+ writel(LPC24XX_AA, i2c->base + LPC24XX_I2CONSET);
+ }
+
+ writel(LPC24XX_STA, i2c->base + LPC24XX_I2CONCLR);
+ break;
+
+ case MR_DATA_R_NACK:
+ /*
+ * The I2C shows NACK status on reads, so we need to accept
+ * the NACK as an ACK here. This should be ok, as the real
+ * BACK would of been caught on the address write.
+ */
+ case MR_DATA_R_ACK:
+ /* Data was received */
+ if (i2c->msg_idx < i2c->msg->len) {
+ i2c->msg->buf[i2c->msg_idx] =
+ readl(i2c->base + LPC24XX_I2DAT);
+ }
+
+ /* If transfer is done, send STOP */
+ if (i2c->msg_idx >= i2c->msg->len - 1 && i2c->is_last) {
+ writel(LPC24XX_STO_AA, i2c->base + LPC24XX_I2CONSET);
+ writel(LPC24XX_SI, i2c->base + LPC24XX_I2CONCLR);
+ i2c->msg_status = 0;
+ }
+
+ /* Message is done */
+ if (i2c->msg_idx >= i2c->msg->len - 1) {
+ i2c->msg_status = 0;
+ disable_irq_nosync(i2c->irq);
+ }
+
+ /*
+ * One pre-last data input, send NACK to tell the slave that
+ * this is going to be the last data byte to be transferred.
+ */
+ if (i2c->msg_idx >= i2c->msg->len - 2) {
+ /* One byte left to receive - NACK */
+ writel(LPC24XX_AA, i2c->base + LPC24XX_I2CONCLR);
+ } else {
+ /* More than one byte left to receive - ACK */
+ writel(LPC24XX_AA, i2c->base + LPC24XX_I2CONSET);
+ }
+
+ writel(LPC24XX_STA, i2c->base + LPC24XX_I2CONCLR);
+ i2c->msg_idx++;
+ break;
+
+ case MX_ADDR_W_NACK:
+ case MX_DATA_W_NACK:
+ case MR_ADDR_R_NACK:
+ /* NACK processing is done */
+ writel(LPC24XX_STO_AA, i2c->base + LPC24XX_I2CONSET);
+ i2c->msg_status = -ENXIO;
+ disable_irq_nosync(i2c->irq);
+ break;
+
+ case M_DATA_ARB_LOST:
+ /* Arbitration lost */
+ i2c->msg_status = -EAGAIN;
+
+ /* Release the I2C bus */
+ writel(LPC24XX_STA | LPC24XX_STO, i2c->base + LPC24XX_I2CONCLR);
+ disable_irq_nosync(i2c->irq);
+ break;
+
+ default:
+ /* Unexpected statuses */
+ i2c->msg_status = -EIO;
+ disable_irq_nosync(i2c->irq);
+ break;
+ }
+
+ /* Exit on failure or all bytes transferred */
+ if (i2c->msg_status != -EBUSY)
+ wake_up(&i2c->wait);
+
+ /*
+ * If `msg_status` is zero, then `lpc2k_process_msg()`
+ * is responsible for clearing the SI flag.
+ */
+ if (i2c->msg_status != 0)
+ writel(LPC24XX_SI, i2c->base + LPC24XX_I2CONCLR);
+}
+
+static int lpc2k_process_msg(struct lpc2k_i2c *i2c, int msgidx)
+{
+ /* A new transfer is kicked off by initiating a start condition */
+ if (!msgidx) {
+ writel(LPC24XX_STA, i2c->base + LPC24XX_I2CONSET);
+ } else {
+ /*
+ * A multi-message I2C transfer continues where the
+ * previous I2C transfer left off and uses the
+ * current condition of the I2C adapter.
+ */
+ if (unlikely(i2c->msg->flags & I2C_M_NOSTART)) {
+ WARN_ON(i2c->msg->len == 0);
+
+ if (!(i2c->msg->flags & I2C_M_RD)) {
+ /* Start transmit of data */
+ writel(i2c->msg->buf[0],
+ i2c->base + LPC24XX_I2DAT);
+ i2c->msg_idx++;
+ }
+ } else {
+ /* Start or repeated start */
+ writel(LPC24XX_STA, i2c->base + LPC24XX_I2CONSET);
+ }
+
+ writel(LPC24XX_SI, i2c->base + LPC24XX_I2CONCLR);
+ }
+
+ enable_irq(i2c->irq);
+
+ /* Wait for transfer completion */
+ if (wait_event_timeout(i2c->wait, i2c->msg_status != -EBUSY,
+ msecs_to_jiffies(1000)) == 0) {
+ disable_irq_nosync(i2c->irq);
+
+ return -ETIMEDOUT;
+ }
+
+ return i2c->msg_status;
+}
+
+static int i2c_lpc2k_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int msg_num)
+{
+ struct lpc2k_i2c *i2c = i2c_get_adapdata(adap);
+ int ret, i;
+ u32 stat;
+
+ /* Check for bus idle condition */
+ stat = readl(i2c->base + LPC24XX_I2STAT);
+ if (stat != M_I2C_IDLE) {
+ /* Something is holding the bus, try to clear it */
+ return i2c_lpc2k_clear_arb(i2c);
+ }
+
+ /* Process a single message at a time */
+ for (i = 0; i < msg_num; i++) {
+ /* Save message pointer and current message data index */
+ i2c->msg = &msgs[i];
+ i2c->msg_idx = 0;
+ i2c->msg_status = -EBUSY;
+ i2c->is_last = (i == (msg_num - 1));
+
+ ret = lpc2k_process_msg(i2c, i);
+ if (ret)
+ return ret;
+ }
+
+ return msg_num;
+}
+
+static irqreturn_t i2c_lpc2k_handler(int irq, void *dev_id)
+{
+ struct lpc2k_i2c *i2c = dev_id;
+
+ if (readl(i2c->base + LPC24XX_I2CONSET) & LPC24XX_SI) {
+ i2c_lpc2k_pump_msg(i2c);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static u32 i2c_lpc2k_functionality(struct i2c_adapter *adap)
+{
+ /* Only emulated SMBus for now */
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm i2c_lpc2k_algorithm = {
+ .master_xfer = i2c_lpc2k_xfer,
+ .functionality = i2c_lpc2k_functionality,
+};
+
+static int i2c_lpc2k_probe(struct platform_device *pdev)
+{
+ struct lpc2k_i2c *i2c;
+ struct resource *res;
+ u32 bus_clk_rate;
+ u32 scl_high;
+ u32 clkrate;
+ int ret;
+
+ i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ i2c->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(i2c->base))
+ return PTR_ERR(i2c->base);
+
+ i2c->irq = platform_get_irq(pdev, 0);
+ if (i2c->irq < 0) {
+ dev_err(&pdev->dev, "can't get interrupt resource\n");
+ return i2c->irq;
+ }
+
+ init_waitqueue_head(&i2c->wait);
+
+ i2c->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(i2c->clk)) {
+ dev_err(&pdev->dev, "error getting clock\n");
+ return PTR_ERR(i2c->clk);
+ }
+
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable clock.\n");
+ return ret;
+ }
+
+ ret = devm_request_irq(&pdev->dev, i2c->irq, i2c_lpc2k_handler, 0,
+ dev_name(&pdev->dev), i2c);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't request interrupt.\n");
+ goto fail_clk;
+ }
+
+ disable_irq_nosync(i2c->irq);
+
+ /* Place controller is a known state */
+ i2c_lpc2k_reset(i2c);
+
+ ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &bus_clk_rate);
+ if (ret)
+ bus_clk_rate = 100000; /* 100 kHz default clock rate */
+
+ clkrate = clk_get_rate(i2c->clk);
+ if (clkrate == 0) {
+ dev_err(&pdev->dev, "can't get I2C base clock\n");
+ ret = -EINVAL;
+ goto fail_clk;
+ }
+
+ /* Setup I2C dividers to generate clock with proper duty cycle */
+ clkrate = clkrate / bus_clk_rate;
+ if (bus_clk_rate <= 100000)
+ scl_high = (clkrate * I2C_STD_MODE_DUTY) / 100;
+ else if (bus_clk_rate <= 400000)
+ scl_high = (clkrate * I2C_FAST_MODE_DUTY) / 100;
+ else
+ scl_high = (clkrate * I2C_FAST_MODE_PLUS_DUTY) / 100;
+
+ writel(scl_high, i2c->base + LPC24XX_I2SCLH);
+ writel(clkrate - scl_high, i2c->base + LPC24XX_I2SCLL);
+
+ platform_set_drvdata(pdev, i2c);
+
+ i2c_set_adapdata(&i2c->adap, i2c);
+ i2c->adap.owner = THIS_MODULE;
+ strlcpy(i2c->adap.name, "LPC2K I2C adapter", sizeof(i2c->adap.name));
+ i2c->adap.algo = &i2c_lpc2k_algorithm;
+ i2c->adap.dev.parent = &pdev->dev;
+ i2c->adap.dev.of_node = pdev->dev.of_node;
+
+ ret = i2c_add_adapter(&i2c->adap);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add adapter!\n");
+ goto fail_clk;
+ }
+
+ dev_info(&pdev->dev, "LPC2K I2C adapter\n");
+
+ return 0;
+
+fail_clk:
+ clk_disable_unprepare(i2c->clk);
+ return ret;
+}
+
+static int i2c_lpc2k_remove(struct platform_device *dev)
+{
+ struct lpc2k_i2c *i2c = platform_get_drvdata(dev);
+
+ i2c_del_adapter(&i2c->adap);
+ clk_disable_unprepare(i2c->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int i2c_lpc2k_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lpc2k_i2c *i2c = platform_get_drvdata(pdev);
+
+ clk_disable(i2c->clk);
+
+ return 0;
+}
+
+static int i2c_lpc2k_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lpc2k_i2c *i2c = platform_get_drvdata(pdev);
+
+ clk_enable(i2c->clk);
+ i2c_lpc2k_reset(i2c);
+
+ return 0;
+}
+
+static const struct dev_pm_ops i2c_lpc2k_dev_pm_ops = {
+ .suspend_noirq = i2c_lpc2k_suspend,
+ .resume_noirq = i2c_lpc2k_resume,
+};
+
+#define I2C_LPC2K_DEV_PM_OPS (&i2c_lpc2k_dev_pm_ops)
+#else
+#define I2C_LPC2K_DEV_PM_OPS NULL
+#endif
+
+static const struct of_device_id lpc2k_i2c_match[] = {
+ { .compatible = "nxp,lpc1788-i2c" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, lpc2k_i2c_match);
+
+static struct platform_driver i2c_lpc2k_driver = {
+ .probe = i2c_lpc2k_probe,
+ .remove = i2c_lpc2k_remove,
+ .driver = {
+ .name = "lpc2k-i2c",
+ .pm = I2C_LPC2K_DEV_PM_OPS,
+ .of_match_table = lpc2k_i2c_match,
+ },
+};
+module_platform_driver(i2c_lpc2k_driver);
+
+MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
+MODULE_DESCRIPTION("I2C driver for LPC2xxx devices");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lpc2k-i2c");
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 9920eef74672..c02e6c018c39 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -59,6 +59,7 @@
#define I2C_DMA_START_EN 0x0001
#define I2C_DMA_INT_FLAG_NONE 0x0000
#define I2C_DMA_CLR_FLAG 0x0000
+#define I2C_DMA_HARD_RST 0x0002
#define I2C_DEFAULT_SPEED 100000 /* hz */
#define MAX_FS_MODE_SPEED 400000
@@ -81,6 +82,7 @@ enum DMA_REGS_OFFSET {
OFFSET_INT_FLAG = 0x0,
OFFSET_INT_EN = 0x04,
OFFSET_EN = 0x08,
+ OFFSET_RST = 0x0c,
OFFSET_CON = 0x18,
OFFSET_TX_MEM_ADDR = 0x1c,
OFFSET_RX_MEM_ADDR = 0x20,
@@ -262,6 +264,10 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
I2C_CONTROL_CLK_EXT_EN | I2C_CONTROL_DMA_EN;
writew(control_reg, i2c->base + OFFSET_CONTROL);
writew(I2C_DELAY_LEN, i2c->base + OFFSET_DELAY_LEN);
+
+ writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
+ udelay(50);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
}
/*
@@ -551,15 +557,22 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id)
{
struct mtk_i2c *i2c = dev_id;
u16 restart_flag = 0;
+ u16 intr_stat;
if (i2c->dev_comp->auto_restart)
restart_flag = I2C_RS_TRANSFER;
- i2c->irq_stat = readw(i2c->base + OFFSET_INTR_STAT);
- writew(restart_flag | I2C_HS_NACKERR | I2C_ACKERR
- | I2C_TRANSAC_COMP, i2c->base + OFFSET_INTR_STAT);
+ intr_stat = readw(i2c->base + OFFSET_INTR_STAT);
+ writew(intr_stat, i2c->base + OFFSET_INTR_STAT);
- complete(&i2c->msg_complete);
+ /*
+ * when occurs ack error, i2c controller generate two interrupts
+ * first is the ack error interrupt, then the complete interrupt
+ * i2c->irq_stat need keep the two interrupt value.
+ */
+ i2c->irq_stat |= intr_stat;
+ if (i2c->irq_stat & (I2C_TRANSAC_COMP | restart_flag))
+ complete(&i2c->msg_complete);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index d1c22e3fdd14..08d26ba61ed3 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -270,35 +270,35 @@ static const u8 reg_map_ip_v2[] = {
[OMAP_I2C_IP_V2_IRQENABLE_CLR] = 0x30,
};
-static inline void omap_i2c_write_reg(struct omap_i2c_dev *i2c_dev,
+static inline void omap_i2c_write_reg(struct omap_i2c_dev *omap,
int reg, u16 val)
{
- writew_relaxed(val, i2c_dev->base +
- (i2c_dev->regs[reg] << i2c_dev->reg_shift));
+ writew_relaxed(val, omap->base +
+ (omap->regs[reg] << omap->reg_shift));
}
-static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
+static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *omap, int reg)
{
- return readw_relaxed(i2c_dev->base +
- (i2c_dev->regs[reg] << i2c_dev->reg_shift));
+ return readw_relaxed(omap->base +
+ (omap->regs[reg] << omap->reg_shift));
}
-static void __omap_i2c_init(struct omap_i2c_dev *dev)
+static void __omap_i2c_init(struct omap_i2c_dev *omap)
{
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
+ omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
/* Setup clock prescaler to obtain approx 12MHz I2C module clock: */
- omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate);
+ omap_i2c_write_reg(omap, OMAP_I2C_PSC_REG, omap->pscstate);
/* SCL low and high time values */
- omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate);
- omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, dev->sclhstate);
- if (dev->rev >= OMAP_I2C_REV_ON_3430_3530)
- omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
+ omap_i2c_write_reg(omap, OMAP_I2C_SCLL_REG, omap->scllstate);
+ omap_i2c_write_reg(omap, OMAP_I2C_SCLH_REG, omap->sclhstate);
+ if (omap->rev >= OMAP_I2C_REV_ON_3430_3530)
+ omap_i2c_write_reg(omap, OMAP_I2C_WE_REG, omap->westate);
/* Take the I2C module out of reset: */
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
+ omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
/*
* NOTE: right after setting CON_EN, STAT_BB could be 0 while the
@@ -310,32 +310,32 @@ static void __omap_i2c_init(struct omap_i2c_dev *dev)
* Don't write to this register if the IE state is 0 as it can
* cause deadlock.
*/
- if (dev->iestate)
- omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
+ if (omap->iestate)
+ omap_i2c_write_reg(omap, OMAP_I2C_IE_REG, omap->iestate);
}
-static int omap_i2c_reset(struct omap_i2c_dev *dev)
+static int omap_i2c_reset(struct omap_i2c_dev *omap)
{
unsigned long timeout;
u16 sysc;
- if (dev->rev >= OMAP_I2C_OMAP1_REV_2) {
- sysc = omap_i2c_read_reg(dev, OMAP_I2C_SYSC_REG);
+ if (omap->rev >= OMAP_I2C_OMAP1_REV_2) {
+ sysc = omap_i2c_read_reg(omap, OMAP_I2C_SYSC_REG);
/* Disable I2C controller before soft reset */
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG,
- omap_i2c_read_reg(dev, OMAP_I2C_CON_REG) &
+ omap_i2c_write_reg(omap, OMAP_I2C_CON_REG,
+ omap_i2c_read_reg(omap, OMAP_I2C_CON_REG) &
~(OMAP_I2C_CON_EN));
- omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, SYSC_SOFTRESET_MASK);
+ omap_i2c_write_reg(omap, OMAP_I2C_SYSC_REG, SYSC_SOFTRESET_MASK);
/* For some reason we need to set the EN bit before the
* reset done bit gets set. */
timeout = jiffies + OMAP_I2C_TIMEOUT;
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
- while (!(omap_i2c_read_reg(dev, OMAP_I2C_SYSS_REG) &
+ omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
+ while (!(omap_i2c_read_reg(omap, OMAP_I2C_SYSS_REG) &
SYSS_RESETDONE_MASK)) {
if (time_after(jiffies, timeout)) {
- dev_warn(dev->dev, "timeout waiting "
+ dev_warn(omap->dev, "timeout waiting "
"for controller reset\n");
return -ETIMEDOUT;
}
@@ -343,18 +343,18 @@ static int omap_i2c_reset(struct omap_i2c_dev *dev)
}
/* SYSC register is cleared by the reset; rewrite it */
- omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, sysc);
+ omap_i2c_write_reg(omap, OMAP_I2C_SYSC_REG, sysc);
- if (dev->rev > OMAP_I2C_REV_ON_3430_3530) {
+ if (omap->rev > OMAP_I2C_REV_ON_3430_3530) {
/* Schedule I2C-bus monitoring on the next transfer */
- dev->bb_valid = 0;
+ omap->bb_valid = 0;
}
}
return 0;
}
-static int omap_i2c_init(struct omap_i2c_dev *dev)
+static int omap_i2c_init(struct omap_i2c_dev *omap)
{
u16 psc = 0, scll = 0, sclh = 0;
u16 fsscll = 0, fssclh = 0, hsscll = 0, hssclh = 0;
@@ -362,23 +362,23 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
unsigned long internal_clk = 0;
struct clk *fclk;
- if (dev->rev >= OMAP_I2C_REV_ON_3430_3530) {
+ if (omap->rev >= OMAP_I2C_REV_ON_3430_3530) {
/*
* Enabling all wakup sources to stop I2C freezing on
* WFI instruction.
* REVISIT: Some wkup sources might not be needed.
*/
- dev->westate = OMAP_I2C_WE_ALL;
+ omap->westate = OMAP_I2C_WE_ALL;
}
- if (dev->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
+ if (omap->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
/*
* The I2C functional clock is the armxor_ck, so there's
* no need to get "armxor_ck" separately. Now, if OMAP2420
* always returns 12MHz for the functional clock, we can
* do this bit unconditionally.
*/
- fclk = clk_get(dev->dev, "fck");
+ fclk = clk_get(omap->dev, "fck");
fclk_rate = clk_get_rate(fclk);
clk_put(fclk);
@@ -395,7 +395,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
psc = fclk_rate / 12000000;
}
- if (!(dev->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) {
+ if (!(omap->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) {
/*
* HSI2C controller internal clk rate should be 19.2 Mhz for
@@ -403,14 +403,14 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
* to get longer filter period for better noise suppression.
* The filter is iclk (fclk for HS) period.
*/
- if (dev->speed > 400 ||
- dev->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK)
+ if (omap->speed > 400 ||
+ omap->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK)
internal_clk = 19200;
- else if (dev->speed > 100)
+ else if (omap->speed > 100)
internal_clk = 9600;
else
internal_clk = 4000;
- fclk = clk_get(dev->dev, "fck");
+ fclk = clk_get(omap->dev, "fck");
fclk_rate = clk_get_rate(fclk) / 1000;
clk_put(fclk);
@@ -419,7 +419,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
psc = psc - 1;
/* If configured for High Speed */
- if (dev->speed > 400) {
+ if (omap->speed > 400) {
unsigned long scl;
/* For first phase of HS mode */
@@ -428,20 +428,20 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
fssclh = (scl / 3) - 5;
/* For second phase of HS mode */
- scl = fclk_rate / dev->speed;
+ scl = fclk_rate / omap->speed;
hsscll = scl - (scl / 3) - 7;
hssclh = (scl / 3) - 5;
- } else if (dev->speed > 100) {
+ } else if (omap->speed > 100) {
unsigned long scl;
/* Fast mode */
- scl = internal_clk / dev->speed;
+ scl = internal_clk / omap->speed;
fsscll = scl - (scl / 3) - 7;
fssclh = (scl / 3) - 5;
} else {
/* Standard mode */
- fsscll = internal_clk / (dev->speed * 2) - 7;
- fssclh = internal_clk / (dev->speed * 2) - 5;
+ fsscll = internal_clk / (omap->speed * 2) - 7;
+ fssclh = internal_clk / (omap->speed * 2) - 5;
}
scll = (hsscll << OMAP_I2C_SCLL_HSSCLL) | fsscll;
sclh = (hssclh << OMAP_I2C_SCLH_HSSCLH) | fssclh;
@@ -450,25 +450,25 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
fclk_rate /= (psc + 1) * 1000;
if (psc > 2)
psc = 2;
- scll = fclk_rate / (dev->speed * 2) - 7 + psc;
- sclh = fclk_rate / (dev->speed * 2) - 7 + psc;
+ scll = fclk_rate / (omap->speed * 2) - 7 + psc;
+ sclh = fclk_rate / (omap->speed * 2) - 7 + psc;
}
- dev->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY |
+ omap->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY |
OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK |
- OMAP_I2C_IE_AL) | ((dev->fifo_size) ?
+ OMAP_I2C_IE_AL) | ((omap->fifo_size) ?
(OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0);
- dev->pscstate = psc;
- dev->scllstate = scll;
- dev->sclhstate = sclh;
+ omap->pscstate = psc;
+ omap->scllstate = scll;
+ omap->sclhstate = sclh;
- if (dev->rev <= OMAP_I2C_REV_ON_3430_3530) {
+ if (omap->rev <= OMAP_I2C_REV_ON_3430_3530) {
/* Not implemented */
- dev->bb_valid = 1;
+ omap->bb_valid = 1;
}
- __omap_i2c_init(dev);
+ __omap_i2c_init(omap);
return 0;
}
@@ -476,14 +476,14 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
/*
* Waiting on Bus Busy
*/
-static int omap_i2c_wait_for_bb(struct omap_i2c_dev *dev)
+static int omap_i2c_wait_for_bb(struct omap_i2c_dev *omap)
{
unsigned long timeout;
timeout = jiffies + OMAP_I2C_TIMEOUT;
- while (omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG) & OMAP_I2C_STAT_BB) {
+ while (omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG) & OMAP_I2C_STAT_BB) {
if (time_after(jiffies, timeout))
- return i2c_recover_bus(&dev->adapter);
+ return i2c_recover_bus(&omap->adapter);
msleep(1);
}
@@ -518,19 +518,19 @@ static int omap_i2c_wait_for_bb(struct omap_i2c_dev *dev)
* 3. Any transfer started in the middle of another master's transfer
* results in unpredictable results and data corruption
*/
-static int omap_i2c_wait_for_bb_valid(struct omap_i2c_dev *dev)
+static int omap_i2c_wait_for_bb_valid(struct omap_i2c_dev *omap)
{
unsigned long bus_free_timeout = 0;
unsigned long timeout;
int bus_free = 0;
u16 stat, systest;
- if (dev->bb_valid)
+ if (omap->bb_valid)
return 0;
timeout = jiffies + OMAP_I2C_TIMEOUT;
while (1) {
- stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
+ stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
/*
* We will see BB or BF event in a case IP had detected any
* activity on the I2C bus. Now IP correctly tracks the bus
@@ -543,7 +543,7 @@ static int omap_i2c_wait_for_bb_valid(struct omap_i2c_dev *dev)
* Otherwise, we must look signals on the bus to make
* the right decision.
*/
- systest = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
+ systest = omap_i2c_read_reg(omap, OMAP_I2C_SYSTEST_REG);
if ((systest & OMAP_I2C_SYSTEST_SCL_I_FUNC) &&
(systest & OMAP_I2C_SYSTEST_SDA_I_FUNC)) {
if (!bus_free) {
@@ -564,22 +564,22 @@ static int omap_i2c_wait_for_bb_valid(struct omap_i2c_dev *dev)
}
if (time_after(jiffies, timeout)) {
- dev_warn(dev->dev, "timeout waiting for bus ready\n");
+ dev_warn(omap->dev, "timeout waiting for bus ready\n");
return -ETIMEDOUT;
}
msleep(1);
}
- dev->bb_valid = 1;
+ omap->bb_valid = 1;
return 0;
}
-static void omap_i2c_resize_fifo(struct omap_i2c_dev *dev, u8 size, bool is_rx)
+static void omap_i2c_resize_fifo(struct omap_i2c_dev *omap, u8 size, bool is_rx)
{
u16 buf;
- if (dev->flags & OMAP_I2C_FLAG_NO_FIFO)
+ if (omap->flags & OMAP_I2C_FLAG_NO_FIFO)
return;
/*
@@ -589,29 +589,29 @@ static void omap_i2c_resize_fifo(struct omap_i2c_dev *dev, u8 size, bool is_rx)
* then we might use draining feature to transfer the remaining bytes.
*/
- dev->threshold = clamp(size, (u8) 1, dev->fifo_size);
+ omap->threshold = clamp(size, (u8) 1, omap->fifo_size);
- buf = omap_i2c_read_reg(dev, OMAP_I2C_BUF_REG);
+ buf = omap_i2c_read_reg(omap, OMAP_I2C_BUF_REG);
if (is_rx) {
/* Clear RX Threshold */
buf &= ~(0x3f << 8);
- buf |= ((dev->threshold - 1) << 8) | OMAP_I2C_BUF_RXFIF_CLR;
+ buf |= ((omap->threshold - 1) << 8) | OMAP_I2C_BUF_RXFIF_CLR;
} else {
/* Clear TX Threshold */
buf &= ~0x3f;
- buf |= (dev->threshold - 1) | OMAP_I2C_BUF_TXFIF_CLR;
+ buf |= (omap->threshold - 1) | OMAP_I2C_BUF_TXFIF_CLR;
}
- omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, buf);
+ omap_i2c_write_reg(omap, OMAP_I2C_BUF_REG, buf);
- if (dev->rev < OMAP_I2C_REV_ON_3630)
- dev->b_hw = 1; /* Enable hardware fixes */
+ if (omap->rev < OMAP_I2C_REV_ON_3630)
+ omap->b_hw = 1; /* Enable hardware fixes */
/* calculate wakeup latency constraint for MPU */
- if (dev->set_mpu_wkup_lat != NULL)
- dev->latency = (1000000 * dev->threshold) /
- (1000 * dev->speed / 8);
+ if (omap->set_mpu_wkup_lat != NULL)
+ omap->latency = (1000000 * omap->threshold) /
+ (1000 * omap->speed / 8);
}
/*
@@ -620,42 +620,42 @@ static void omap_i2c_resize_fifo(struct omap_i2c_dev *dev, u8 size, bool is_rx)
static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
struct i2c_msg *msg, int stop)
{
- struct omap_i2c_dev *dev = i2c_get_adapdata(adap);
+ struct omap_i2c_dev *omap = i2c_get_adapdata(adap);
unsigned long timeout;
u16 w;
- dev_dbg(dev->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
+ dev_dbg(omap->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
msg->addr, msg->len, msg->flags, stop);
if (msg->len == 0)
return -EINVAL;
- dev->receiver = !!(msg->flags & I2C_M_RD);
- omap_i2c_resize_fifo(dev, msg->len, dev->receiver);
+ omap->receiver = !!(msg->flags & I2C_M_RD);
+ omap_i2c_resize_fifo(omap, msg->len, omap->receiver);
- omap_i2c_write_reg(dev, OMAP_I2C_SA_REG, msg->addr);
+ omap_i2c_write_reg(omap, OMAP_I2C_SA_REG, msg->addr);
/* REVISIT: Could the STB bit of I2C_CON be used with probing? */
- dev->buf = msg->buf;
- dev->buf_len = msg->len;
+ omap->buf = msg->buf;
+ omap->buf_len = msg->len;
- /* make sure writes to dev->buf_len are ordered */
+ /* make sure writes to omap->buf_len are ordered */
barrier();
- omap_i2c_write_reg(dev, OMAP_I2C_CNT_REG, dev->buf_len);
+ omap_i2c_write_reg(omap, OMAP_I2C_CNT_REG, omap->buf_len);
/* Clear the FIFO Buffers */
- w = omap_i2c_read_reg(dev, OMAP_I2C_BUF_REG);
+ w = omap_i2c_read_reg(omap, OMAP_I2C_BUF_REG);
w |= OMAP_I2C_BUF_RXFIF_CLR | OMAP_I2C_BUF_TXFIF_CLR;
- omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, w);
+ omap_i2c_write_reg(omap, OMAP_I2C_BUF_REG, w);
- reinit_completion(&dev->cmd_complete);
- dev->cmd_err = 0;
+ reinit_completion(&omap->cmd_complete);
+ omap->cmd_err = 0;
w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT;
/* High speed configuration */
- if (dev->speed > 400)
+ if (omap->speed > 400)
w |= OMAP_I2C_CON_OPMODE_HS;
if (msg->flags & I2C_M_STOP)
@@ -665,27 +665,27 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
if (!(msg->flags & I2C_M_RD))
w |= OMAP_I2C_CON_TRX;
- if (!dev->b_hw && stop)
+ if (!omap->b_hw && stop)
w |= OMAP_I2C_CON_STP;
/*
* NOTE: STAT_BB bit could became 1 here if another master occupy
* the bus. IP successfully complete transfer when the bus will be
* free again (BB reset to 0).
*/
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w);
+ omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, w);
/*
* Don't write stt and stp together on some hardware.
*/
- if (dev->b_hw && stop) {
+ if (omap->b_hw && stop) {
unsigned long delay = jiffies + OMAP_I2C_TIMEOUT;
- u16 con = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG);
+ u16 con = omap_i2c_read_reg(omap, OMAP_I2C_CON_REG);
while (con & OMAP_I2C_CON_STT) {
- con = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG);
+ con = omap_i2c_read_reg(omap, OMAP_I2C_CON_REG);
/* Let the user know if i2c is in a bad state */
if (time_after(jiffies, delay)) {
- dev_err(dev->dev, "controller timed out "
+ dev_err(omap->dev, "controller timed out "
"waiting for start condition to finish\n");
return -ETIMEDOUT;
}
@@ -694,42 +694,42 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
w |= OMAP_I2C_CON_STP;
w &= ~OMAP_I2C_CON_STT;
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w);
+ omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, w);
}
/*
* REVISIT: We should abort the transfer on signals, but the bus goes
* into arbitration and we're currently unable to recover from it.
*/
- timeout = wait_for_completion_timeout(&dev->cmd_complete,
+ timeout = wait_for_completion_timeout(&omap->cmd_complete,
OMAP_I2C_TIMEOUT);
if (timeout == 0) {
- dev_err(dev->dev, "controller timed out\n");
- omap_i2c_reset(dev);
- __omap_i2c_init(dev);
+ dev_err(omap->dev, "controller timed out\n");
+ omap_i2c_reset(omap);
+ __omap_i2c_init(omap);
return -ETIMEDOUT;
}
- if (likely(!dev->cmd_err))
+ if (likely(!omap->cmd_err))
return 0;
/* We have an error */
- if (dev->cmd_err & (OMAP_I2C_STAT_ROVR | OMAP_I2C_STAT_XUDF)) {
- omap_i2c_reset(dev);
- __omap_i2c_init(dev);
+ if (omap->cmd_err & (OMAP_I2C_STAT_ROVR | OMAP_I2C_STAT_XUDF)) {
+ omap_i2c_reset(omap);
+ __omap_i2c_init(omap);
return -EIO;
}
- if (dev->cmd_err & OMAP_I2C_STAT_AL)
+ if (omap->cmd_err & OMAP_I2C_STAT_AL)
return -EAGAIN;
- if (dev->cmd_err & OMAP_I2C_STAT_NACK) {
+ if (omap->cmd_err & OMAP_I2C_STAT_NACK) {
if (msg->flags & I2C_M_IGNORE_NAK)
return 0;
- w = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG);
+ w = omap_i2c_read_reg(omap, OMAP_I2C_CON_REG);
w |= OMAP_I2C_CON_STP;
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w);
+ omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, w);
return -EREMOTEIO;
}
return -EIO;
@@ -743,24 +743,24 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
static int
omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
- struct omap_i2c_dev *dev = i2c_get_adapdata(adap);
+ struct omap_i2c_dev *omap = i2c_get_adapdata(adap);
int i;
int r;
- r = pm_runtime_get_sync(dev->dev);
+ r = pm_runtime_get_sync(omap->dev);
if (r < 0)
goto out;
- r = omap_i2c_wait_for_bb_valid(dev);
+ r = omap_i2c_wait_for_bb_valid(omap);
if (r < 0)
goto out;
- r = omap_i2c_wait_for_bb(dev);
+ r = omap_i2c_wait_for_bb(omap);
if (r < 0)
goto out;
- if (dev->set_mpu_wkup_lat != NULL)
- dev->set_mpu_wkup_lat(dev->dev, dev->latency);
+ if (omap->set_mpu_wkup_lat != NULL)
+ omap->set_mpu_wkup_lat(omap->dev, omap->latency);
for (i = 0; i < num; i++) {
r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1)));
@@ -771,14 +771,14 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
if (r == 0)
r = num;
- omap_i2c_wait_for_bb(dev);
+ omap_i2c_wait_for_bb(omap);
- if (dev->set_mpu_wkup_lat != NULL)
- dev->set_mpu_wkup_lat(dev->dev, -1);
+ if (omap->set_mpu_wkup_lat != NULL)
+ omap->set_mpu_wkup_lat(omap->dev, -1);
out:
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(omap->dev);
+ pm_runtime_put_autosuspend(omap->dev);
return r;
}
@@ -790,19 +790,19 @@ omap_i2c_func(struct i2c_adapter *adap)
}
static inline void
-omap_i2c_complete_cmd(struct omap_i2c_dev *dev, u16 err)
+omap_i2c_complete_cmd(struct omap_i2c_dev *omap, u16 err)
{
- dev->cmd_err |= err;
- complete(&dev->cmd_complete);
+ omap->cmd_err |= err;
+ complete(&omap->cmd_complete);
}
static inline void
-omap_i2c_ack_stat(struct omap_i2c_dev *dev, u16 stat)
+omap_i2c_ack_stat(struct omap_i2c_dev *omap, u16 stat)
{
- omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat);
+ omap_i2c_write_reg(omap, OMAP_I2C_STAT_REG, stat);
}
-static inline void i2c_omap_errata_i207(struct omap_i2c_dev *dev, u16 stat)
+static inline void i2c_omap_errata_i207(struct omap_i2c_dev *omap, u16 stat)
{
/*
* I2C Errata(Errata Nos. OMAP2: 1.67, OMAP3: 1.8)
@@ -813,17 +813,17 @@ static inline void i2c_omap_errata_i207(struct omap_i2c_dev *dev, u16 stat)
*/
if (stat & OMAP_I2C_STAT_RDR) {
/* Step 1: If RDR is set, clear it */
- omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
+ omap_i2c_ack_stat(omap, OMAP_I2C_STAT_RDR);
/* Step 2: */
- if (!(omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG)
+ if (!(omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG)
& OMAP_I2C_STAT_BB)) {
/* Step 3: */
- if (omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG)
+ if (omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG)
& OMAP_I2C_STAT_RDR) {
- omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
- dev_dbg(dev->dev, "RDR when bus is busy.\n");
+ omap_i2c_ack_stat(omap, OMAP_I2C_STAT_RDR);
+ dev_dbg(omap->dev, "RDR when bus is busy.\n");
}
}
@@ -836,50 +836,50 @@ static inline void i2c_omap_errata_i207(struct omap_i2c_dev *dev, u16 stat)
static irqreturn_t
omap_i2c_omap1_isr(int this_irq, void *dev_id)
{
- struct omap_i2c_dev *dev = dev_id;
+ struct omap_i2c_dev *omap = dev_id;
u16 iv, w;
- if (pm_runtime_suspended(dev->dev))
+ if (pm_runtime_suspended(omap->dev))
return IRQ_NONE;
- iv = omap_i2c_read_reg(dev, OMAP_I2C_IV_REG);
+ iv = omap_i2c_read_reg(omap, OMAP_I2C_IV_REG);
switch (iv) {
case 0x00: /* None */
break;
case 0x01: /* Arbitration lost */
- dev_err(dev->dev, "Arbitration lost\n");
- omap_i2c_complete_cmd(dev, OMAP_I2C_STAT_AL);
+ dev_err(omap->dev, "Arbitration lost\n");
+ omap_i2c_complete_cmd(omap, OMAP_I2C_STAT_AL);
break;
case 0x02: /* No acknowledgement */
- omap_i2c_complete_cmd(dev, OMAP_I2C_STAT_NACK);
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_STP);
+ omap_i2c_complete_cmd(omap, OMAP_I2C_STAT_NACK);
+ omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, OMAP_I2C_CON_STP);
break;
case 0x03: /* Register access ready */
- omap_i2c_complete_cmd(dev, 0);
+ omap_i2c_complete_cmd(omap, 0);
break;
case 0x04: /* Receive data ready */
- if (dev->buf_len) {
- w = omap_i2c_read_reg(dev, OMAP_I2C_DATA_REG);
- *dev->buf++ = w;
- dev->buf_len--;
- if (dev->buf_len) {
- *dev->buf++ = w >> 8;
- dev->buf_len--;
+ if (omap->buf_len) {
+ w = omap_i2c_read_reg(omap, OMAP_I2C_DATA_REG);
+ *omap->buf++ = w;
+ omap->buf_len--;
+ if (omap->buf_len) {
+ *omap->buf++ = w >> 8;
+ omap->buf_len--;
}
} else
- dev_err(dev->dev, "RRDY IRQ while no data requested\n");
+ dev_err(omap->dev, "RRDY IRQ while no data requested\n");
break;
case 0x05: /* Transmit data ready */
- if (dev->buf_len) {
- w = *dev->buf++;
- dev->buf_len--;
- if (dev->buf_len) {
- w |= *dev->buf++ << 8;
- dev->buf_len--;
+ if (omap->buf_len) {
+ w = *omap->buf++;
+ omap->buf_len--;
+ if (omap->buf_len) {
+ w |= *omap->buf++ << 8;
+ omap->buf_len--;
}
- omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w);
+ omap_i2c_write_reg(omap, OMAP_I2C_DATA_REG, w);
} else
- dev_err(dev->dev, "XRDY IRQ while no data to send\n");
+ dev_err(omap->dev, "XRDY IRQ while no data to send\n");
break;
default:
return IRQ_NONE;
@@ -896,28 +896,28 @@ omap_i2c_omap1_isr(int this_irq, void *dev_id)
* data to DATA_REG. Otherwise some data bytes can be lost while transferring
* them from the memory to the I2C interface.
*/
-static int errata_omap3_i462(struct omap_i2c_dev *dev)
+static int errata_omap3_i462(struct omap_i2c_dev *omap)
{
unsigned long timeout = 10000;
u16 stat;
do {
- stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
+ stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
if (stat & OMAP_I2C_STAT_XUDF)
break;
if (stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) {
- omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_XRDY |
+ omap_i2c_ack_stat(omap, (OMAP_I2C_STAT_XRDY |
OMAP_I2C_STAT_XDR));
if (stat & OMAP_I2C_STAT_NACK) {
- dev->cmd_err |= OMAP_I2C_STAT_NACK;
- omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
+ omap->cmd_err |= OMAP_I2C_STAT_NACK;
+ omap_i2c_ack_stat(omap, OMAP_I2C_STAT_NACK);
}
if (stat & OMAP_I2C_STAT_AL) {
- dev_err(dev->dev, "Arbitration lost\n");
- dev->cmd_err |= OMAP_I2C_STAT_AL;
- omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
+ dev_err(omap->dev, "Arbitration lost\n");
+ omap->cmd_err |= OMAP_I2C_STAT_AL;
+ omap_i2c_ack_stat(omap, OMAP_I2C_STAT_AL);
}
return -EIO;
@@ -927,61 +927,61 @@ static int errata_omap3_i462(struct omap_i2c_dev *dev)
} while (--timeout);
if (!timeout) {
- dev_err(dev->dev, "timeout waiting on XUDF bit\n");
+ dev_err(omap->dev, "timeout waiting on XUDF bit\n");
return 0;
}
return 0;
}
-static void omap_i2c_receive_data(struct omap_i2c_dev *dev, u8 num_bytes,
+static void omap_i2c_receive_data(struct omap_i2c_dev *omap, u8 num_bytes,
bool is_rdr)
{
u16 w;
while (num_bytes--) {
- w = omap_i2c_read_reg(dev, OMAP_I2C_DATA_REG);
- *dev->buf++ = w;
- dev->buf_len--;
+ w = omap_i2c_read_reg(omap, OMAP_I2C_DATA_REG);
+ *omap->buf++ = w;
+ omap->buf_len--;
/*
* Data reg in 2430, omap3 and
* omap4 is 8 bit wide
*/
- if (dev->flags & OMAP_I2C_FLAG_16BIT_DATA_REG) {
- *dev->buf++ = w >> 8;
- dev->buf_len--;
+ if (omap->flags & OMAP_I2C_FLAG_16BIT_DATA_REG) {
+ *omap->buf++ = w >> 8;
+ omap->buf_len--;
}
}
}
-static int omap_i2c_transmit_data(struct omap_i2c_dev *dev, u8 num_bytes,
+static int omap_i2c_transmit_data(struct omap_i2c_dev *omap, u8 num_bytes,
bool is_xdr)
{
u16 w;
while (num_bytes--) {
- w = *dev->buf++;
- dev->buf_len--;
+ w = *omap->buf++;
+ omap->buf_len--;
/*
* Data reg in 2430, omap3 and
* omap4 is 8 bit wide
*/
- if (dev->flags & OMAP_I2C_FLAG_16BIT_DATA_REG) {
- w |= *dev->buf++ << 8;
- dev->buf_len--;
+ if (omap->flags & OMAP_I2C_FLAG_16BIT_DATA_REG) {
+ w |= *omap->buf++ << 8;
+ omap->buf_len--;
}
- if (dev->errata & I2C_OMAP_ERRATA_I462) {
+ if (omap->errata & I2C_OMAP_ERRATA_I462) {
int ret;
- ret = errata_omap3_i462(dev);
+ ret = errata_omap3_i462(omap);
if (ret < 0)
return ret;
}
- omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w);
+ omap_i2c_write_reg(omap, OMAP_I2C_DATA_REG, w);
}
return 0;
@@ -990,19 +990,19 @@ static int omap_i2c_transmit_data(struct omap_i2c_dev *dev, u8 num_bytes,
static irqreturn_t
omap_i2c_isr(int irq, void *dev_id)
{
- struct omap_i2c_dev *dev = dev_id;
+ struct omap_i2c_dev *omap = dev_id;
irqreturn_t ret = IRQ_HANDLED;
u16 mask;
u16 stat;
- spin_lock(&dev->lock);
- mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
- stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
+ spin_lock(&omap->lock);
+ mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
+ stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
if (stat & mask)
ret = IRQ_WAKE_THREAD;
- spin_unlock(&dev->lock);
+ spin_unlock(&omap->lock);
return ret;
}
@@ -1010,20 +1010,20 @@ omap_i2c_isr(int irq, void *dev_id)
static irqreturn_t
omap_i2c_isr_thread(int this_irq, void *dev_id)
{
- struct omap_i2c_dev *dev = dev_id;
+ struct omap_i2c_dev *omap = dev_id;
unsigned long flags;
u16 bits;
u16 stat;
int err = 0, count = 0;
- spin_lock_irqsave(&dev->lock, flags);
+ spin_lock_irqsave(&omap->lock, flags);
do {
- bits = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
- stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
+ bits = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
+ stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
stat &= bits;
/* If we're in receiver mode, ignore XDR/XRDY */
- if (dev->receiver)
+ if (omap->receiver)
stat &= ~(OMAP_I2C_STAT_XDR | OMAP_I2C_STAT_XRDY);
else
stat &= ~(OMAP_I2C_STAT_RDR | OMAP_I2C_STAT_RRDY);
@@ -1033,32 +1033,32 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
goto out;
}
- dev_dbg(dev->dev, "IRQ (ISR = 0x%04x)\n", stat);
+ dev_dbg(omap->dev, "IRQ (ISR = 0x%04x)\n", stat);
if (count++ == 100) {
- dev_warn(dev->dev, "Too much work in one IRQ\n");
+ dev_warn(omap->dev, "Too much work in one IRQ\n");
break;
}
if (stat & OMAP_I2C_STAT_NACK) {
err |= OMAP_I2C_STAT_NACK;
- omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
+ omap_i2c_ack_stat(omap, OMAP_I2C_STAT_NACK);
}
if (stat & OMAP_I2C_STAT_AL) {
- dev_err(dev->dev, "Arbitration lost\n");
+ dev_err(omap->dev, "Arbitration lost\n");
err |= OMAP_I2C_STAT_AL;
- omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
+ omap_i2c_ack_stat(omap, OMAP_I2C_STAT_AL);
}
/*
* ProDB0017052: Clear ARDY bit twice
*/
if (stat & OMAP_I2C_STAT_ARDY)
- omap_i2c_ack_stat(dev, OMAP_I2C_STAT_ARDY);
+ omap_i2c_ack_stat(omap, OMAP_I2C_STAT_ARDY);
if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
OMAP_I2C_STAT_AL)) {
- omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_RRDY |
+ omap_i2c_ack_stat(omap, (OMAP_I2C_STAT_RRDY |
OMAP_I2C_STAT_RDR |
OMAP_I2C_STAT_XRDY |
OMAP_I2C_STAT_XDR |
@@ -1069,28 +1069,28 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
if (stat & OMAP_I2C_STAT_RDR) {
u8 num_bytes = 1;
- if (dev->fifo_size)
- num_bytes = dev->buf_len;
+ if (omap->fifo_size)
+ num_bytes = omap->buf_len;
- if (dev->errata & I2C_OMAP_ERRATA_I207) {
- i2c_omap_errata_i207(dev, stat);
- num_bytes = (omap_i2c_read_reg(dev,
+ if (omap->errata & I2C_OMAP_ERRATA_I207) {
+ i2c_omap_errata_i207(omap, stat);
+ num_bytes = (omap_i2c_read_reg(omap,
OMAP_I2C_BUFSTAT_REG) >> 8) & 0x3F;
}
- omap_i2c_receive_data(dev, num_bytes, true);
- omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
+ omap_i2c_receive_data(omap, num_bytes, true);
+ omap_i2c_ack_stat(omap, OMAP_I2C_STAT_RDR);
continue;
}
if (stat & OMAP_I2C_STAT_RRDY) {
u8 num_bytes = 1;
- if (dev->threshold)
- num_bytes = dev->threshold;
+ if (omap->threshold)
+ num_bytes = omap->threshold;
- omap_i2c_receive_data(dev, num_bytes, false);
- omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RRDY);
+ omap_i2c_receive_data(omap, num_bytes, false);
+ omap_i2c_ack_stat(omap, OMAP_I2C_STAT_RRDY);
continue;
}
@@ -1098,14 +1098,14 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
u8 num_bytes = 1;
int ret;
- if (dev->fifo_size)
- num_bytes = dev->buf_len;
+ if (omap->fifo_size)
+ num_bytes = omap->buf_len;
- ret = omap_i2c_transmit_data(dev, num_bytes, true);
+ ret = omap_i2c_transmit_data(omap, num_bytes, true);
if (ret < 0)
break;
- omap_i2c_ack_stat(dev, OMAP_I2C_STAT_XDR);
+ omap_i2c_ack_stat(omap, OMAP_I2C_STAT_XDR);
continue;
}
@@ -1113,36 +1113,36 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
u8 num_bytes = 1;
int ret;
- if (dev->threshold)
- num_bytes = dev->threshold;
+ if (omap->threshold)
+ num_bytes = omap->threshold;
- ret = omap_i2c_transmit_data(dev, num_bytes, false);
+ ret = omap_i2c_transmit_data(omap, num_bytes, false);
if (ret < 0)
break;
- omap_i2c_ack_stat(dev, OMAP_I2C_STAT_XRDY);
+ omap_i2c_ack_stat(omap, OMAP_I2C_STAT_XRDY);
continue;
}
if (stat & OMAP_I2C_STAT_ROVR) {
- dev_err(dev->dev, "Receive overrun\n");
+ dev_err(omap->dev, "Receive overrun\n");
err |= OMAP_I2C_STAT_ROVR;
- omap_i2c_ack_stat(dev, OMAP_I2C_STAT_ROVR);
+ omap_i2c_ack_stat(omap, OMAP_I2C_STAT_ROVR);
break;
}
if (stat & OMAP_I2C_STAT_XUDF) {
- dev_err(dev->dev, "Transmit underflow\n");
+ dev_err(omap->dev, "Transmit underflow\n");
err |= OMAP_I2C_STAT_XUDF;
- omap_i2c_ack_stat(dev, OMAP_I2C_STAT_XUDF);
+ omap_i2c_ack_stat(omap, OMAP_I2C_STAT_XUDF);
break;
}
} while (stat);
- omap_i2c_complete_cmd(dev, err);
+ omap_i2c_complete_cmd(omap, err);
out:
- spin_unlock_irqrestore(&dev->lock, flags);
+ spin_unlock_irqrestore(&omap->lock, flags);
return IRQ_HANDLED;
}
@@ -1247,7 +1247,14 @@ static void omap_i2c_prepare_recovery(struct i2c_adapter *adap)
u32 reg;
reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
+ /* enable test mode */
reg |= OMAP_I2C_SYSTEST_ST_EN;
+ /* select SDA/SCL IO mode */
+ reg |= 3 << OMAP_I2C_SYSTEST_TMODE_SHIFT;
+ /* set SCL to high-impedance state (reset value is 0) */
+ reg |= OMAP_I2C_SYSTEST_SCL_O;
+ /* set SDA to high-impedance state (reset value is 0) */
+ reg |= OMAP_I2C_SYSTEST_SDA_O;
omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg);
}
@@ -1257,7 +1264,11 @@ static void omap_i2c_unprepare_recovery(struct i2c_adapter *adap)
u32 reg;
reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
+ /* restore reset values */
reg &= ~OMAP_I2C_SYSTEST_ST_EN;
+ reg &= ~OMAP_I2C_SYSTEST_TMODE_MASK;
+ reg &= ~OMAP_I2C_SYSTEST_SCL_O;
+ reg &= ~OMAP_I2C_SYSTEST_SDA_O;
omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg);
}
@@ -1273,7 +1284,7 @@ static struct i2c_bus_recovery_info omap_i2c_bus_recovery_info = {
static int
omap_i2c_probe(struct platform_device *pdev)
{
- struct omap_i2c_dev *dev;
+ struct omap_i2c_dev *omap;
struct i2c_adapter *adap;
struct resource *mem;
const struct omap_i2c_bus_platform_data *pdata =
@@ -1291,46 +1302,46 @@ omap_i2c_probe(struct platform_device *pdev)
return irq;
}
- dev = devm_kzalloc(&pdev->dev, sizeof(struct omap_i2c_dev), GFP_KERNEL);
- if (!dev)
+ omap = devm_kzalloc(&pdev->dev, sizeof(struct omap_i2c_dev), GFP_KERNEL);
+ if (!omap)
return -ENOMEM;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->base = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(dev->base))
- return PTR_ERR(dev->base);
+ omap->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(omap->base))
+ return PTR_ERR(omap->base);
match = of_match_device(of_match_ptr(omap_i2c_of_match), &pdev->dev);
if (match) {
u32 freq = 100000; /* default to 100000 Hz */
pdata = match->data;
- dev->flags = pdata->flags;
+ omap->flags = pdata->flags;
of_property_read_u32(node, "clock-frequency", &freq);
/* convert DT freq value in Hz into kHz for speed */
- dev->speed = freq / 1000;
+ omap->speed = freq / 1000;
} else if (pdata != NULL) {
- dev->speed = pdata->clkrate;
- dev->flags = pdata->flags;
- dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
+ omap->speed = pdata->clkrate;
+ omap->flags = pdata->flags;
+ omap->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
}
- dev->dev = &pdev->dev;
- dev->irq = irq;
+ omap->dev = &pdev->dev;
+ omap->irq = irq;
- spin_lock_init(&dev->lock);
+ spin_lock_init(&omap->lock);
- platform_set_drvdata(pdev, dev);
- init_completion(&dev->cmd_complete);
+ platform_set_drvdata(pdev, omap);
+ init_completion(&omap->cmd_complete);
- dev->reg_shift = (dev->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
+ omap->reg_shift = (omap->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
- pm_runtime_enable(dev->dev);
- pm_runtime_set_autosuspend_delay(dev->dev, OMAP_I2C_PM_TIMEOUT);
- pm_runtime_use_autosuspend(dev->dev);
+ pm_runtime_enable(omap->dev);
+ pm_runtime_set_autosuspend_delay(omap->dev, OMAP_I2C_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(omap->dev);
- r = pm_runtime_get_sync(dev->dev);
+ r = pm_runtime_get_sync(omap->dev);
if (r < 0)
goto err_free_mem;
@@ -1340,42 +1351,42 @@ omap_i2c_probe(struct platform_device *pdev)
* Also since the omap_i2c_read_reg uses reg_map_ip_* a
* readw_relaxed is done.
*/
- rev = readw_relaxed(dev->base + 0x04);
+ rev = readw_relaxed(omap->base + 0x04);
- dev->scheme = OMAP_I2C_SCHEME(rev);
- switch (dev->scheme) {
+ omap->scheme = OMAP_I2C_SCHEME(rev);
+ switch (omap->scheme) {
case OMAP_I2C_SCHEME_0:
- dev->regs = (u8 *)reg_map_ip_v1;
- dev->rev = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG);
- minor = OMAP_I2C_REV_SCHEME_0_MAJOR(dev->rev);
- major = OMAP_I2C_REV_SCHEME_0_MAJOR(dev->rev);
+ omap->regs = (u8 *)reg_map_ip_v1;
+ omap->rev = omap_i2c_read_reg(omap, OMAP_I2C_REV_REG);
+ minor = OMAP_I2C_REV_SCHEME_0_MAJOR(omap->rev);
+ major = OMAP_I2C_REV_SCHEME_0_MAJOR(omap->rev);
break;
case OMAP_I2C_SCHEME_1:
/* FALLTHROUGH */
default:
- dev->regs = (u8 *)reg_map_ip_v2;
+ omap->regs = (u8 *)reg_map_ip_v2;
rev = (rev << 16) |
- omap_i2c_read_reg(dev, OMAP_I2C_IP_V2_REVNB_LO);
+ omap_i2c_read_reg(omap, OMAP_I2C_IP_V2_REVNB_LO);
minor = OMAP_I2C_REV_SCHEME_1_MINOR(rev);
major = OMAP_I2C_REV_SCHEME_1_MAJOR(rev);
- dev->rev = rev;
+ omap->rev = rev;
}
- dev->errata = 0;
+ omap->errata = 0;
- if (dev->rev >= OMAP_I2C_REV_ON_2430 &&
- dev->rev < OMAP_I2C_REV_ON_4430_PLUS)
- dev->errata |= I2C_OMAP_ERRATA_I207;
+ if (omap->rev >= OMAP_I2C_REV_ON_2430 &&
+ omap->rev < OMAP_I2C_REV_ON_4430_PLUS)
+ omap->errata |= I2C_OMAP_ERRATA_I207;
- if (dev->rev <= OMAP_I2C_REV_ON_3430_3530)
- dev->errata |= I2C_OMAP_ERRATA_I462;
+ if (omap->rev <= OMAP_I2C_REV_ON_3430_3530)
+ omap->errata |= I2C_OMAP_ERRATA_I462;
- if (!(dev->flags & OMAP_I2C_FLAG_NO_FIFO)) {
+ if (!(omap->flags & OMAP_I2C_FLAG_NO_FIFO)) {
u16 s;
/* Set up the fifo size - Get total size */
- s = (omap_i2c_read_reg(dev, OMAP_I2C_BUFSTAT_REG) >> 14) & 0x3;
- dev->fifo_size = 0x8 << s;
+ s = (omap_i2c_read_reg(omap, OMAP_I2C_BUFSTAT_REG) >> 14) & 0x3;
+ omap->fifo_size = 0x8 << s;
/*
* Set up notification threshold as half the total available
@@ -1383,36 +1394,36 @@ omap_i2c_probe(struct platform_device *pdev)
* call back latencies.
*/
- dev->fifo_size = (dev->fifo_size / 2);
+ omap->fifo_size = (omap->fifo_size / 2);
- if (dev->rev < OMAP_I2C_REV_ON_3630)
- dev->b_hw = 1; /* Enable hardware fixes */
+ if (omap->rev < OMAP_I2C_REV_ON_3630)
+ omap->b_hw = 1; /* Enable hardware fixes */
/* calculate wakeup latency constraint for MPU */
- if (dev->set_mpu_wkup_lat != NULL)
- dev->latency = (1000000 * dev->fifo_size) /
- (1000 * dev->speed / 8);
+ if (omap->set_mpu_wkup_lat != NULL)
+ omap->latency = (1000000 * omap->fifo_size) /
+ (1000 * omap->speed / 8);
}
/* reset ASAP, clearing any IRQs */
- omap_i2c_init(dev);
+ omap_i2c_init(omap);
- if (dev->rev < OMAP_I2C_OMAP1_REV_2)
- r = devm_request_irq(&pdev->dev, dev->irq, omap_i2c_omap1_isr,
- IRQF_NO_SUSPEND, pdev->name, dev);
+ if (omap->rev < OMAP_I2C_OMAP1_REV_2)
+ r = devm_request_irq(&pdev->dev, omap->irq, omap_i2c_omap1_isr,
+ IRQF_NO_SUSPEND, pdev->name, omap);
else
- r = devm_request_threaded_irq(&pdev->dev, dev->irq,
+ r = devm_request_threaded_irq(&pdev->dev, omap->irq,
omap_i2c_isr, omap_i2c_isr_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
- pdev->name, dev);
+ pdev->name, omap);
if (r) {
- dev_err(dev->dev, "failure requesting irq %i\n", dev->irq);
+ dev_err(omap->dev, "failure requesting irq %i\n", omap->irq);
goto err_unuse_clocks;
}
- adap = &dev->adapter;
- i2c_set_adapdata(adap, dev);
+ adap = &omap->adapter;
+ i2c_set_adapdata(adap, omap);
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_DEPRECATED;
strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
@@ -1425,21 +1436,21 @@ omap_i2c_probe(struct platform_device *pdev)
adap->nr = pdev->id;
r = i2c_add_numbered_adapter(adap);
if (r) {
- dev_err(dev->dev, "failure adding adapter\n");
+ dev_err(omap->dev, "failure adding adapter\n");
goto err_unuse_clocks;
}
- dev_info(dev->dev, "bus %d rev%d.%d at %d kHz\n", adap->nr,
- major, minor, dev->speed);
+ dev_info(omap->dev, "bus %d rev%d.%d at %d kHz\n", adap->nr,
+ major, minor, omap->speed);
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(omap->dev);
+ pm_runtime_put_autosuspend(omap->dev);
return 0;
err_unuse_clocks:
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
- pm_runtime_put(dev->dev);
+ omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
+ pm_runtime_put(omap->dev);
pm_runtime_disable(&pdev->dev);
err_free_mem:
@@ -1448,16 +1459,16 @@ err_free_mem:
static int omap_i2c_remove(struct platform_device *pdev)
{
- struct omap_i2c_dev *dev = platform_get_drvdata(pdev);
+ struct omap_i2c_dev *omap = platform_get_drvdata(pdev);
int ret;
- i2c_del_adapter(&dev->adapter);
+ i2c_del_adapter(&omap->adapter);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
return ret;
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
- pm_runtime_put(&pdev->dev);
+ omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
@@ -1465,24 +1476,23 @@ static int omap_i2c_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int omap_i2c_runtime_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_i2c_dev *_dev = platform_get_drvdata(pdev);
+ struct omap_i2c_dev *omap = dev_get_drvdata(dev);
- _dev->iestate = omap_i2c_read_reg(_dev, OMAP_I2C_IE_REG);
+ omap->iestate = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
- if (_dev->scheme == OMAP_I2C_SCHEME_0)
- omap_i2c_write_reg(_dev, OMAP_I2C_IE_REG, 0);
+ if (omap->scheme == OMAP_I2C_SCHEME_0)
+ omap_i2c_write_reg(omap, OMAP_I2C_IE_REG, 0);
else
- omap_i2c_write_reg(_dev, OMAP_I2C_IP_V2_IRQENABLE_CLR,
+ omap_i2c_write_reg(omap, OMAP_I2C_IP_V2_IRQENABLE_CLR,
OMAP_I2C_IP_V2_INTERRUPTS_MASK);
- if (_dev->rev < OMAP_I2C_OMAP1_REV_2) {
- omap_i2c_read_reg(_dev, OMAP_I2C_IV_REG); /* Read clears */
+ if (omap->rev < OMAP_I2C_OMAP1_REV_2) {
+ omap_i2c_read_reg(omap, OMAP_I2C_IV_REG); /* Read clears */
} else {
- omap_i2c_write_reg(_dev, OMAP_I2C_STAT_REG, _dev->iestate);
+ omap_i2c_write_reg(omap, OMAP_I2C_STAT_REG, omap->iestate);
/* Flush posted write */
- omap_i2c_read_reg(_dev, OMAP_I2C_STAT_REG);
+ omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
}
pinctrl_pm_select_sleep_state(dev);
@@ -1492,15 +1502,14 @@ static int omap_i2c_runtime_suspend(struct device *dev)
static int omap_i2c_runtime_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_i2c_dev *_dev = platform_get_drvdata(pdev);
+ struct omap_i2c_dev *omap = dev_get_drvdata(dev);
pinctrl_pm_select_default_state(dev);
- if (!_dev->regs)
+ if (!omap->regs)
return 0;
- __omap_i2c_init(_dev);
+ __omap_i2c_init(omap);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 9b94c3db80ab..a8e54df4aed6 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -20,6 +20,8 @@
GNU General Public License for more details.
* ------------------------------------------------------------------------ */
+#define pr_fmt(fmt) "i2c-parport: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -176,26 +178,24 @@ static void i2c_parport_attach(struct parport *port)
break;
}
if (i == MAX_DEVICE) {
- pr_debug("i2c-parport: Not using parport%d.\n", port->number);
+ pr_debug("Not using parport%d.\n", port->number);
return;
}
adapter = kzalloc(sizeof(struct i2c_par), GFP_KERNEL);
- if (adapter == NULL) {
- printk(KERN_ERR "i2c-parport: Failed to kzalloc\n");
+ if (!adapter)
return;
- }
memset(&i2c_parport_cb, 0, sizeof(i2c_parport_cb));
i2c_parport_cb.flags = PARPORT_FLAG_EXCL;
i2c_parport_cb.irq_func = i2c_parport_irq;
i2c_parport_cb.private = adapter;
- pr_debug("i2c-parport: attaching to %s\n", port->name);
+ pr_debug("attaching to %s\n", port->name);
parport_disable_irq(port);
adapter->pdev = parport_register_dev_model(port, "i2c-parport",
&i2c_parport_cb, i);
if (!adapter->pdev) {
- printk(KERN_ERR "i2c-parport: Unable to register with parport\n");
+ pr_err("Unable to register with parport\n");
goto err_free;
}
@@ -215,7 +215,8 @@ static void i2c_parport_attach(struct parport *port)
adapter->adapter.dev.parent = port->physport->dev;
if (parport_claim_or_block(adapter->pdev) < 0) {
- printk(KERN_ERR "i2c-parport: Could not claim parallel port\n");
+ dev_err(&adapter->pdev->dev,
+ "Could not claim parallel port\n");
goto err_unregister;
}
@@ -230,7 +231,7 @@ static void i2c_parport_attach(struct parport *port)
}
if (i2c_bit_add_bus(&adapter->adapter) < 0) {
- printk(KERN_ERR "i2c-parport: Unable to register with I2C\n");
+ dev_err(&adapter->pdev->dev, "Unable to register with I2C\n");
goto err_unregister;
}
@@ -242,8 +243,8 @@ static void i2c_parport_attach(struct parport *port)
if (adapter->ara)
parport_enable_irq(port);
else
- printk(KERN_WARNING "i2c-parport: Failed to register "
- "ARA client\n");
+ dev_warn(&adapter->pdev->dev,
+ "Failed to register ARA client\n");
}
/* Add the new adapter to the list */
@@ -298,12 +299,12 @@ static struct parport_driver i2c_parport_driver = {
static int __init i2c_parport_init(void)
{
if (type < 0) {
- printk(KERN_WARNING "i2c-parport: adapter type unspecified\n");
+ pr_warn("adapter type unspecified\n");
return -ENODEV;
}
if (type >= ARRAY_SIZE(adapter_parm)) {
- printk(KERN_WARNING "i2c-parport: invalid type (%d)\n", type);
+ pr_warn("invalid type (%d)\n", type);
return -ENODEV;
}
diff --git a/drivers/i2c/busses/i2c-parport.h b/drivers/i2c/busses/i2c-parport.h
index 4e1294536805..84a6616b072f 100644
--- a/drivers/i2c/busses/i2c-parport.h
+++ b/drivers/i2c/busses/i2c-parport.h
@@ -89,6 +89,13 @@ static const struct adapter_parm adapter_parm[] = {
.getsda = { 0x80, PORT_STAT, 1 },
.init = { 0x04, PORT_DATA, 1 },
},
+ /* type 8: VCT-jig */
+ {
+ .setsda = { 0x04, PORT_DATA, 1 },
+ .setscl = { 0x01, PORT_DATA, 1 },
+ .getsda = { 0x40, PORT_STAT, 0 },
+ .getscl = { 0x80, PORT_STAT, 1 },
+ },
};
static int type = -1;
@@ -103,4 +110,5 @@ MODULE_PARM_DESC(type,
" 5 = ADM1025, ADM1030 and ADM1031 evaluation boards\n"
" 6 = Barco LPT->DVI (K5800236) adapter\n"
" 7 = One For All JP1 parallel port adapter\n"
+ " 8 = VCT-jig\n"
);
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index d9c0d6a17ad6..645e4b79d968 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -132,6 +132,7 @@ struct pxa_i2c {
unsigned int msg_idx;
unsigned int msg_ptr;
unsigned int slave_addr;
+ unsigned int req_slave_addr;
struct i2c_adapter adap;
struct clk *clk;
@@ -253,15 +254,20 @@ static void i2c_pxa_show_state(struct pxa_i2c *i2c, int lno, const char *fname)
static void i2c_pxa_scream_blue_murder(struct pxa_i2c *i2c, const char *why)
{
unsigned int i;
- printk(KERN_ERR "i2c: error: %s\n", why);
- printk(KERN_ERR "i2c: msg_num: %d msg_idx: %d msg_ptr: %d\n",
+ struct device *dev = &i2c->adap.dev;
+
+ dev_err(dev, "slave_0x%x error: %s\n",
+ i2c->req_slave_addr >> 1, why);
+ dev_err(dev, "msg_num: %d msg_idx: %d msg_ptr: %d\n",
i2c->msg_num, i2c->msg_idx, i2c->msg_ptr);
- printk(KERN_ERR "i2c: ICR: %08x ISR: %08x\n",
- readl(_ICR(i2c)), readl(_ISR(i2c)));
- printk(KERN_DEBUG "i2c: log: ");
+ dev_err(dev, "IBMR: %08x IDBR: %08x ICR: %08x ISR: %08x\n",
+ readl(_IBMR(i2c)), readl(_IDBR(i2c)), readl(_ICR(i2c)),
+ readl(_ISR(i2c)));
+ dev_dbg(dev, "log: ");
for (i = 0; i < i2c->irqlogidx; i++)
- printk("[%08x:%08x] ", i2c->isrlog[i], i2c->icrlog[i]);
- printk("\n");
+ pr_debug("[%08x:%08x] ", i2c->isrlog[i], i2c->icrlog[i]);
+
+ pr_debug("\n");
}
#else /* ifdef DEBUG */
@@ -459,7 +465,7 @@ static void i2c_pxa_reset(struct pxa_i2c *i2c)
writel(I2C_ISR_INIT, _ISR(i2c));
writel(readl(_ICR(i2c)) & ~ICR_UR, _ICR(i2c));
- if (i2c->reg_isar)
+ if (i2c->reg_isar && IS_ENABLED(CONFIG_I2C_PXA_SLAVE))
writel(i2c->slave_addr, _ISAR(i2c));
/* set control register values */
@@ -638,6 +644,7 @@ static inline void i2c_pxa_start_message(struct pxa_i2c *i2c)
* Step 1: target slave address into IDBR
*/
writel(i2c_pxa_addr_byte(i2c->msg), _IDBR(i2c));
+ i2c->req_slave_addr = i2c_pxa_addr_byte(i2c->msg);
/*
* Step 2: initiate the write.
@@ -745,8 +752,10 @@ static int i2c_pxa_do_pio_xfer(struct pxa_i2c *i2c,
ret = i2c->msg_idx;
out:
- if (timeout == 0)
+ if (timeout == 0) {
i2c_pxa_scream_blue_murder(i2c, "timeout");
+ ret = I2C_RETRY;
+ }
return ret;
}
@@ -949,6 +958,7 @@ static void i2c_pxa_irq_txempty(struct pxa_i2c *i2c, u32 isr)
* Write the next address.
*/
writel(i2c_pxa_addr_byte(i2c->msg), _IDBR(i2c));
+ i2c->req_slave_addr = i2c_pxa_addr_byte(i2c->msg);
/*
* And trigger a repeated start, and send the byte.
@@ -1114,7 +1124,9 @@ static int i2c_pxa_probe_dt(struct platform_device *pdev, struct pxa_i2c *i2c,
i2c->use_pio = 1;
if (of_get_property(np, "mrvl,i2c-fast-mode", NULL))
i2c->fast_mode = 1;
- *i2c_types = (u32)(of_id->data);
+
+ *i2c_types = (enum pxa_i2c_types)(of_id->data);
+
return 0;
}
@@ -1146,10 +1158,19 @@ static int i2c_pxa_probe(struct platform_device *dev)
struct resource *res = NULL;
int ret, irq;
- i2c = kzalloc(sizeof(struct pxa_i2c), GFP_KERNEL);
- if (!i2c) {
- ret = -ENOMEM;
- goto emalloc;
+ i2c = devm_kzalloc(&dev->dev, sizeof(struct pxa_i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ i2c->reg_base = devm_ioremap_resource(&dev->dev, res);
+ if (IS_ERR(i2c->reg_base))
+ return PTR_ERR(i2c->reg_base);
+
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
+ dev_err(&dev->dev, "no irq resource: %d\n", irq);
+ return irq;
}
/* Default adapter num to device id; i2c_pxa_probe_dt can override. */
@@ -1159,19 +1180,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
if (ret > 0)
ret = i2c_pxa_probe_pdata(dev, i2c, &i2c_type);
if (ret < 0)
- goto eclk;
-
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- irq = platform_get_irq(dev, 0);
- if (res == NULL || irq < 0) {
- ret = -ENODEV;
- goto eclk;
- }
-
- if (!request_mem_region(res->start, resource_size(res), res->name)) {
- ret = -ENOMEM;
- goto eclk;
- }
+ return ret;
i2c->adap.owner = THIS_MODULE;
i2c->adap.retries = 5;
@@ -1181,16 +1190,10 @@ static int i2c_pxa_probe(struct platform_device *dev)
strlcpy(i2c->adap.name, "pxa_i2c-i2c", sizeof(i2c->adap.name));
- i2c->clk = clk_get(&dev->dev, NULL);
+ i2c->clk = devm_clk_get(&dev->dev, NULL);
if (IS_ERR(i2c->clk)) {
- ret = PTR_ERR(i2c->clk);
- goto eclk;
- }
-
- i2c->reg_base = ioremap(res->start, resource_size(res));
- if (!i2c->reg_base) {
- ret = -EIO;
- goto eremap;
+ dev_err(&dev->dev, "failed to get the clk: %ld\n", PTR_ERR(i2c->clk));
+ return PTR_ERR(i2c->clk);
}
i2c->reg_ibmr = i2c->reg_base + pxa_reg_layout[i2c_type].ibmr;
@@ -1232,10 +1235,13 @@ static int i2c_pxa_probe(struct platform_device *dev)
i2c->adap.algo = &i2c_pxa_pio_algorithm;
} else {
i2c->adap.algo = &i2c_pxa_algorithm;
- ret = request_irq(irq, i2c_pxa_handler, IRQF_SHARED,
- dev_name(&dev->dev), i2c);
- if (ret)
+ ret = devm_request_irq(&dev->dev, irq, i2c_pxa_handler,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ dev_name(&dev->dev), i2c);
+ if (ret) {
+ dev_err(&dev->dev, "failed to request irq: %d\n", ret);
goto ereqirq;
+ }
}
i2c_pxa_reset(i2c);
@@ -1248,33 +1254,22 @@ static int i2c_pxa_probe(struct platform_device *dev)
ret = i2c_add_numbered_adapter(&i2c->adap);
if (ret < 0) {
- printk(KERN_INFO "I2C: Failed to add bus\n");
- goto eadapt;
+ dev_err(&dev->dev, "failed to add bus: %d\n", ret);
+ goto ereqirq;
}
platform_set_drvdata(dev, i2c);
#ifdef CONFIG_I2C_PXA_SLAVE
- printk(KERN_INFO "I2C: %s: PXA I2C adapter, slave address %d\n",
- dev_name(&i2c->adap.dev), i2c->slave_addr);
+ dev_info(&i2c->adap.dev, " PXA I2C adapter, slave address %d\n",
+ i2c->slave_addr);
#else
- printk(KERN_INFO "I2C: %s: PXA I2C adapter\n",
- dev_name(&i2c->adap.dev));
+ dev_info(&i2c->adap.dev, " PXA I2C adapter\n");
#endif
return 0;
-eadapt:
- if (!i2c->use_pio)
- free_irq(irq, i2c);
ereqirq:
clk_disable_unprepare(i2c->clk);
- iounmap(i2c->reg_base);
-eremap:
- clk_put(i2c->clk);
-eclk:
- kfree(i2c);
-emalloc:
- release_mem_region(res->start, resource_size(res));
return ret;
}
@@ -1283,15 +1278,8 @@ static int i2c_pxa_remove(struct platform_device *dev)
struct pxa_i2c *i2c = platform_get_drvdata(dev);
i2c_del_adapter(&i2c->adap);
- if (!i2c->use_pio)
- free_irq(i2c->irq, i2c);
clk_disable_unprepare(i2c->clk);
- clk_put(i2c->clk);
-
- iounmap(i2c->reg_base);
- release_mem_region(i2c->iobase, i2c->iosize);
- kfree(i2c);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 78a366814696..b7e1a3655421 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -100,6 +100,12 @@
#define I2C_HEADER_CONTINUE_XFER (1<<15)
#define I2C_HEADER_MASTER_ADDR_SHIFT 12
#define I2C_HEADER_SLAVE_ADDR_SHIFT 1
+
+#define I2C_CONFIG_LOAD 0x08C
+#define I2C_MSTR_CONFIG_LOAD (1 << 0)
+#define I2C_SLV_CONFIG_LOAD (1 << 1)
+#define I2C_TIMEOUT_CONFIG_LOAD (1 << 2)
+
/*
* msg_end_type: The bus control which need to be send at end of transfer.
* @MSG_END_STOP: Send stop pulse at end of transfer.
@@ -121,6 +127,8 @@ enum msg_end_type {
* @has_single_clk_source: The i2c controller has single clock source. Tegra30
* and earlier Socs has two clock sources i.e. div-clk and
* fast-clk.
+ * @has_config_load_reg: Has the config load register to load the new
+ * configuration.
* @clk_divisor_hs_mode: Clock divisor in HS mode.
* @clk_divisor_std_fast_mode: Clock divisor in standard/fast mode. It is
* applicable if there is no fast clock source i.e. single clock
@@ -131,8 +139,10 @@ struct tegra_i2c_hw_feature {
bool has_continue_xfer_support;
bool has_per_pkt_xfer_complete_irq;
bool has_single_clk_source;
+ bool has_config_load_reg;
int clk_divisor_hs_mode;
int clk_divisor_std_fast_mode;
+ u16 clk_divisor_fast_plus_mode;
};
/**
@@ -172,6 +182,7 @@ struct tegra_i2c_dev {
size_t msg_buf_remaining;
int msg_read;
u32 bus_clk_rate;
+ u16 clk_divisor_non_hs_mode;
bool is_suspended;
};
@@ -410,6 +421,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
u32 val;
int err = 0;
u32 clk_divisor;
+ unsigned long timeout = jiffies + HZ;
err = tegra_i2c_clock_enable(i2c_dev);
if (err < 0) {
@@ -431,7 +443,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
/* Make sure clock divisor programmed correctly */
clk_divisor = i2c_dev->hw->clk_divisor_hs_mode;
- clk_divisor |= i2c_dev->hw->clk_divisor_std_fast_mode <<
+ clk_divisor |= i2c_dev->clk_divisor_non_hs_mode <<
I2C_CLK_DIVISOR_STD_FAST_MODE_SHIFT;
i2c_writel(i2c_dev, clk_divisor, I2C_CLK_DIVISOR);
@@ -451,6 +463,18 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
if (tegra_i2c_flush_fifos(i2c_dev))
err = -ETIMEDOUT;
+ if (i2c_dev->hw->has_config_load_reg) {
+ i2c_writel(i2c_dev, I2C_MSTR_CONFIG_LOAD, I2C_CONFIG_LOAD);
+ while (i2c_readl(i2c_dev, I2C_CONFIG_LOAD) != 0) {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(i2c_dev->dev,
+ "timeout waiting for config load\n");
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+ }
+
tegra_i2c_clock_disable(i2c_dev);
if (i2c_dev->irq_disabled) {
@@ -681,6 +705,8 @@ static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
.has_single_clk_source = false,
.clk_divisor_hs_mode = 3,
.clk_divisor_std_fast_mode = 0,
+ .clk_divisor_fast_plus_mode = 0,
+ .has_config_load_reg = false,
};
static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
@@ -689,6 +715,8 @@ static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
.has_single_clk_source = false,
.clk_divisor_hs_mode = 3,
.clk_divisor_std_fast_mode = 0,
+ .clk_divisor_fast_plus_mode = 0,
+ .has_config_load_reg = false,
};
static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
@@ -697,10 +725,23 @@ static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
.has_single_clk_source = true,
.clk_divisor_hs_mode = 1,
.clk_divisor_std_fast_mode = 0x19,
+ .clk_divisor_fast_plus_mode = 0x10,
+ .has_config_load_reg = false,
+};
+
+static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
+ .has_continue_xfer_support = true,
+ .has_per_pkt_xfer_complete_irq = true,
+ .has_single_clk_source = true,
+ .clk_divisor_hs_mode = 1,
+ .clk_divisor_std_fast_mode = 0x19,
+ .clk_divisor_fast_plus_mode = 0x10,
+ .has_config_load_reg = true,
};
/* Match table for of_platform binding */
static const struct of_device_id tegra_i2c_of_match[] = {
+ { .compatible = "nvidia,tegra124-i2c", .data = &tegra124_i2c_hw, },
{ .compatible = "nvidia,tegra114-i2c", .data = &tegra114_i2c_hw, },
{ .compatible = "nvidia,tegra30-i2c", .data = &tegra30_i2c_hw, },
{ .compatible = "nvidia,tegra20-i2c", .data = &tegra20_i2c_hw, },
@@ -793,7 +834,14 @@ static int tegra_i2c_probe(struct platform_device *pdev)
}
}
- clk_multiplier *= (i2c_dev->hw->clk_divisor_std_fast_mode + 1);
+ i2c_dev->clk_divisor_non_hs_mode =
+ i2c_dev->hw->clk_divisor_std_fast_mode;
+ if (i2c_dev->hw->clk_divisor_fast_plus_mode &&
+ (i2c_dev->bus_clk_rate == 1000000))
+ i2c_dev->clk_divisor_non_hs_mode =
+ i2c_dev->hw->clk_divisor_fast_plus_mode;
+
+ clk_multiplier *= (i2c_dev->clk_divisor_non_hs_mode + 1);
ret = clk_set_rate(i2c_dev->div_clk,
i2c_dev->bus_clk_rate * clk_multiplier);
if (ret) {
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index 47e88adf2011..543456a0a338 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -391,11 +391,11 @@ static int vprbrd_i2c_probe(struct platform_device *pdev)
VPRBRD_USB_REQUEST_I2C_FREQ, VPRBRD_USB_TYPE_OUT,
0x0000, 0x0000, &vb_i2c->bus_freq_param, 1,
VPRBRD_USB_TIMEOUT_MS);
- if (ret != 1) {
- dev_err(&pdev->dev,
- "failure setting i2c_bus_freq to %d\n", i2c_bus_freq);
- return -EIO;
- }
+ if (ret != 1) {
+ dev_err(&pdev->dev, "failure setting i2c_bus_freq to %d\n",
+ i2c_bus_freq);
+ return -EIO;
+ }
} else {
dev_err(&pdev->dev,
"invalid i2c_bus_freq setting:%d\n", i2c_bus_freq);
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index dcca7076231e..4233f5695352 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -198,10 +198,10 @@ static int slimpro_i2c_blkrd(struct slimpro_i2c_dev *ctx, u32 chip, u32 addr,
int rc;
paddr = dma_map_single(ctx->dev, ctx->dma_buffer, readlen, DMA_FROM_DEVICE);
- rc = dma_mapping_error(ctx->dev, paddr);
- if (rc) {
+ if (dma_mapping_error(ctx->dev, paddr)) {
dev_err(&ctx->adapter.dev, "Error in mapping dma buffer %p\n",
ctx->dma_buffer);
+ rc = -ENOMEM;
goto err;
}
@@ -241,10 +241,10 @@ static int slimpro_i2c_blkwr(struct slimpro_i2c_dev *ctx, u32 chip,
memcpy(ctx->dma_buffer, data, writelen);
paddr = dma_map_single(ctx->dev, ctx->dma_buffer, writelen,
DMA_TO_DEVICE);
- rc = dma_mapping_error(ctx->dev, paddr);
- if (rc) {
+ if (dma_mapping_error(ctx->dev, paddr)) {
dev_err(&ctx->adapter.dev, "Error in mapping dma buffer %p\n",
ctx->dma_buffer);
+ rc = -ENOMEM;
goto err;
}
@@ -419,6 +419,7 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
rc = i2c_add_adapter(adapter);
if (rc) {
dev_err(&pdev->dev, "Adapter registeration failed\n");
+ mbox_free_channel(ctx->mbox_chan);
return rc;
}
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 4dda23f22a67..e23a7b068c60 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -283,7 +283,7 @@ static void xiic_reinit(struct xiic_i2c *i2c)
/* Enable interrupts */
xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
- xiic_irq_clr_en(i2c, XIIC_INTR_AAS_MASK | XIIC_INTR_ARB_LOST_MASK);
+ xiic_irq_clr_en(i2c, XIIC_INTR_ARB_LOST_MASK);
}
static void xiic_deinit(struct xiic_i2c *i2c)
@@ -358,8 +358,9 @@ static void xiic_wakeup(struct xiic_i2c *i2c, int code)
wake_up(&i2c->wait);
}
-static void xiic_process(struct xiic_i2c *i2c)
+static irqreturn_t xiic_process(int irq, void *dev_id)
{
+ struct xiic_i2c *i2c = dev_id;
u32 pend, isr, ier;
u32 clr = 0;
@@ -368,6 +369,7 @@ static void xiic_process(struct xiic_i2c *i2c)
* To find which interrupts are pending; AND interrupts pending with
* interrupts masked.
*/
+ spin_lock(&i2c->lock);
isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
pend = isr & ier;
@@ -378,11 +380,6 @@ static void xiic_process(struct xiic_i2c *i2c)
__func__, xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
i2c->tx_msg, i2c->nmsgs);
- /* Do not processes a devices interrupts if the device has no
- * interrupts pending
- */
- if (!pend)
- return;
/* Service requesting interrupt */
if ((pend & XIIC_INTR_ARB_LOST_MASK) ||
@@ -402,13 +399,15 @@ static void xiic_process(struct xiic_i2c *i2c)
*/
xiic_reinit(i2c);
+ if (i2c->rx_msg)
+ xiic_wakeup(i2c, STATE_ERROR);
if (i2c->tx_msg)
xiic_wakeup(i2c, STATE_ERROR);
-
- } else if (pend & XIIC_INTR_RX_FULL_MASK) {
+ }
+ if (pend & XIIC_INTR_RX_FULL_MASK) {
/* Receive register/FIFO is full */
- clr = XIIC_INTR_RX_FULL_MASK;
+ clr |= XIIC_INTR_RX_FULL_MASK;
if (!i2c->rx_msg) {
dev_dbg(i2c->adap.dev.parent,
"%s unexpexted RX IRQ\n", __func__);
@@ -441,9 +440,10 @@ static void xiic_process(struct xiic_i2c *i2c)
__xiic_start_xfer(i2c);
}
}
- } else if (pend & XIIC_INTR_BNB_MASK) {
+ }
+ if (pend & XIIC_INTR_BNB_MASK) {
/* IIC bus has transitioned to not busy */
- clr = XIIC_INTR_BNB_MASK;
+ clr |= XIIC_INTR_BNB_MASK;
/* The bus is not busy, disable BusNotBusy interrupt */
xiic_irq_dis(i2c, XIIC_INTR_BNB_MASK);
@@ -456,12 +456,12 @@ static void xiic_process(struct xiic_i2c *i2c)
xiic_wakeup(i2c, STATE_DONE);
else
xiic_wakeup(i2c, STATE_ERROR);
-
- } else if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
+ }
+ if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
/* Transmit register/FIFO is empty or ½ empty */
- clr = pend &
- (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK);
+ clr |= (pend &
+ (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK));
if (!i2c->tx_msg) {
dev_dbg(i2c->adap.dev.parent,
@@ -492,16 +492,13 @@ static void xiic_process(struct xiic_i2c *i2c)
* make sure to disable tx half
*/
xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
- } else {
- /* got IRQ which is not acked */
- dev_err(i2c->adap.dev.parent, "%s Got unexpected IRQ\n",
- __func__);
- clr = pend;
}
out:
dev_dbg(i2c->adap.dev.parent, "%s clr: 0x%x\n", __func__, clr);
xiic_setreg32(i2c, XIIC_IISR_OFFSET, clr);
+ spin_unlock(&i2c->lock);
+ return IRQ_HANDLED;
}
static int xiic_bus_busy(struct xiic_i2c *i2c)
@@ -525,7 +522,7 @@ static int xiic_busy(struct xiic_i2c *i2c)
*/
err = xiic_bus_busy(i2c);
while (err && tries--) {
- mdelay(1);
+ msleep(1);
err = xiic_bus_busy(i2c);
}
@@ -602,19 +599,21 @@ static void xiic_start_send(struct xiic_i2c *i2c)
static irqreturn_t xiic_isr(int irq, void *dev_id)
{
struct xiic_i2c *i2c = dev_id;
-
- spin_lock(&i2c->lock);
- /* disable interrupts globally */
- xiic_setreg32(i2c, XIIC_DGIER_OFFSET, 0);
+ u32 pend, isr, ier;
+ irqreturn_t ret = IRQ_NONE;
+ /* Do not processes a devices interrupts if the device has no
+ * interrupts pending
+ */
dev_dbg(i2c->adap.dev.parent, "%s entry\n", __func__);
- xiic_process(i2c);
-
- xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
- spin_unlock(&i2c->lock);
+ isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
+ ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
+ pend = isr & ier;
+ if (pend)
+ ret = IRQ_WAKE_THREAD;
- return IRQ_HANDLED;
+ return ret;
}
static void __xiic_start_xfer(struct xiic_i2c *i2c)
@@ -663,16 +662,8 @@ static void __xiic_start_xfer(struct xiic_i2c *i2c)
static void xiic_start_xfer(struct xiic_i2c *i2c)
{
- unsigned long flags;
-
- spin_lock_irqsave(&i2c->lock, flags);
- xiic_reinit(i2c);
- /* disable interrupts globally */
- xiic_setreg32(i2c, XIIC_DGIER_OFFSET, 0);
- spin_unlock_irqrestore(&i2c->lock, flags);
__xiic_start_xfer(i2c);
- xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
}
static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
@@ -755,7 +746,10 @@ static int xiic_i2c_probe(struct platform_device *pdev)
spin_lock_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
- ret = devm_request_irq(&pdev->dev, irq, xiic_isr, 0, pdev->name, i2c);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, xiic_isr,
+ xiic_process, IRQF_ONESHOT,
+ pdev->name, i2c);
+
if (ret < 0) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
return ret;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 069a41f116dd..5f89f1e3c2f2 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -27,6 +27,7 @@
I2C slave support (c) 2014 by Wolfram Sang <wsa@sang-engineering.com>
*/
+#include <dt-bindings/i2c/i2c.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
@@ -47,6 +48,7 @@
#include <linux/rwsem.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
+#include <linux/pm_wakeirq.h>
#include <linux/acpi.h>
#include <linux/jump_label.h>
#include <asm/uaccess.h>
@@ -57,6 +59,9 @@
#define CREATE_TRACE_POINTS
#include <trace/events/i2c.h>
+#define I2C_ADDR_OFFSET_TEN_BIT 0xa000
+#define I2C_ADDR_OFFSET_SLAVE 0x1000
+
/* core_lock protects i2c_adapter_idr, and guarantees
that device detection, deletion of detected devices, and attach_adapter
calls are serialized */
@@ -567,6 +572,9 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
if (bri->prepare_recovery)
bri->prepare_recovery(adap);
+ bri->set_scl(adap, val);
+ ndelay(RECOVERY_NDELAY);
+
/*
* By this time SCL is high, as we need to give 9 falling-rising edges
*/
@@ -597,7 +605,6 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
int i2c_generic_scl_recovery(struct i2c_adapter *adap)
{
- adap->bus_recovery_info->set_scl(adap, 1);
return i2c_generic_recovery(adap);
}
EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
@@ -639,11 +646,13 @@ static int i2c_device_probe(struct device *dev)
if (!client->irq) {
int irq = -ENOENT;
- if (dev->of_node)
- irq = of_irq_get(dev->of_node, 0);
- else if (ACPI_COMPANION(dev))
+ if (dev->of_node) {
+ irq = of_irq_get_byname(dev->of_node, "irq");
+ if (irq == -EINVAL || irq == -ENODATA)
+ irq = of_irq_get(dev->of_node, 0);
+ } else if (ACPI_COMPANION(dev)) {
irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 0);
-
+ }
if (irq == -EPROBE_DEFER)
return irq;
if (irq < 0)
@@ -656,23 +665,49 @@ static int i2c_device_probe(struct device *dev)
if (!driver->probe || !driver->id_table)
return -ENODEV;
- if (!device_can_wakeup(&client->dev))
- device_init_wakeup(&client->dev,
- client->flags & I2C_CLIENT_WAKE);
+ if (client->flags & I2C_CLIENT_WAKE) {
+ int wakeirq = -ENOENT;
+
+ if (dev->of_node) {
+ wakeirq = of_irq_get_byname(dev->of_node, "wakeup");
+ if (wakeirq == -EPROBE_DEFER)
+ return wakeirq;
+ }
+
+ device_init_wakeup(&client->dev, true);
+
+ if (wakeirq > 0 && wakeirq != client->irq)
+ status = dev_pm_set_dedicated_wake_irq(dev, wakeirq);
+ else if (client->irq > 0)
+ status = dev_pm_set_wake_irq(dev, wakeirq);
+ else
+ status = 0;
+
+ if (status)
+ dev_warn(&client->dev, "failed to set up wakeup irq");
+ }
+
dev_dbg(dev, "probe\n");
status = of_clk_set_defaults(dev->of_node, false);
if (status < 0)
- return status;
+ goto err_clear_wakeup_irq;
status = dev_pm_domain_attach(&client->dev, true);
if (status != -EPROBE_DEFER) {
status = driver->probe(client, i2c_match_id(driver->id_table,
client));
if (status)
- dev_pm_domain_detach(&client->dev, true);
+ goto err_detach_pm_domain;
}
+ return 0;
+
+err_detach_pm_domain:
+ dev_pm_domain_detach(&client->dev, true);
+err_clear_wakeup_irq:
+ dev_pm_clear_wake_irq(&client->dev);
+ device_init_wakeup(&client->dev, false);
return status;
}
@@ -692,6 +727,10 @@ static int i2c_device_remove(struct device *dev)
}
dev_pm_domain_detach(&client->dev, true);
+
+ dev_pm_clear_wake_irq(&client->dev);
+ device_init_wakeup(&client->dev, false);
+
return status;
}
@@ -776,17 +815,32 @@ struct i2c_client *i2c_verify_client(struct device *dev)
EXPORT_SYMBOL(i2c_verify_client);
+/* Return a unique address which takes the flags of the client into account */
+static unsigned short i2c_encode_flags_to_addr(struct i2c_client *client)
+{
+ unsigned short addr = client->addr;
+
+ /* For some client flags, add an arbitrary offset to avoid collisions */
+ if (client->flags & I2C_CLIENT_TEN)
+ addr |= I2C_ADDR_OFFSET_TEN_BIT;
+
+ if (client->flags & I2C_CLIENT_SLAVE)
+ addr |= I2C_ADDR_OFFSET_SLAVE;
+
+ return addr;
+}
+
/* This is a permissive address validity check, I2C address map constraints
* are purposely not enforced, except for the general call address. */
-static int i2c_check_client_addr_validity(const struct i2c_client *client)
+static int i2c_check_addr_validity(unsigned addr, unsigned short flags)
{
- if (client->flags & I2C_CLIENT_TEN) {
+ if (flags & I2C_CLIENT_TEN) {
/* 10-bit address, all values are valid */
- if (client->addr > 0x3ff)
+ if (addr > 0x3ff)
return -EINVAL;
} else {
/* 7-bit address, reject the general call address */
- if (client->addr == 0x00 || client->addr > 0x7f)
+ if (addr == 0x00 || addr > 0x7f)
return -EINVAL;
}
return 0;
@@ -796,7 +850,7 @@ static int i2c_check_client_addr_validity(const struct i2c_client *client)
* device uses a reserved address, then it shouldn't be probed. 7-bit
* addressing is assumed, 10-bit address devices are rare and should be
* explicitly enumerated. */
-static int i2c_check_addr_validity(unsigned short addr)
+static int i2c_check_7bit_addr_validity_strict(unsigned short addr)
{
/*
* Reserved addresses per I2C specification:
@@ -818,7 +872,7 @@ static int __i2c_check_addr_busy(struct device *dev, void *addrp)
struct i2c_client *client = i2c_verify_client(dev);
int addr = *(int *)addrp;
- if (client && client->addr == addr)
+ if (client && i2c_encode_flags_to_addr(client) == addr)
return -EBUSY;
return 0;
}
@@ -921,10 +975,8 @@ static void i2c_dev_set_name(struct i2c_adapter *adap,
return;
}
- /* For 10-bit clients, add an arbitrary offset to avoid collisions */
dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
- client->addr | ((client->flags & I2C_CLIENT_TEN)
- ? 0xa000 : 0));
+ i2c_encode_flags_to_addr(client));
}
/**
@@ -966,8 +1018,7 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
strlcpy(client->name, info->type, sizeof(client->name));
- /* Check for address validity */
- status = i2c_check_client_addr_validity(client);
+ status = i2c_check_addr_validity(client->addr, client->flags);
if (status) {
dev_err(&adap->dev, "Invalid %d-bit I2C address 0x%02hx\n",
client->flags & I2C_CLIENT_TEN ? 10 : 7, client->addr);
@@ -975,7 +1026,7 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
}
/* Check for address business */
- status = i2c_check_addr_busy(adap, client->addr);
+ status = i2c_check_addr_busy(adap, i2c_encode_flags_to_addr(client));
if (status)
goto out_err;
@@ -1012,6 +1063,8 @@ EXPORT_SYMBOL_GPL(i2c_new_device);
*/
void i2c_unregister_device(struct i2c_client *client)
{
+ if (client->dev.of_node)
+ of_node_clear_flag(client->dev.of_node, OF_POPULATED);
device_unregister(&client->dev);
}
EXPORT_SYMBOL_GPL(i2c_unregister_device);
@@ -1138,6 +1191,16 @@ i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
+ if ((info.addr & I2C_ADDR_OFFSET_TEN_BIT) == I2C_ADDR_OFFSET_TEN_BIT) {
+ info.addr &= ~I2C_ADDR_OFFSET_TEN_BIT;
+ info.flags |= I2C_CLIENT_TEN;
+ }
+
+ if (info.addr & I2C_ADDR_OFFSET_SLAVE) {
+ info.addr &= ~I2C_ADDR_OFFSET_SLAVE;
+ info.flags |= I2C_CLIENT_SLAVE;
+ }
+
client = i2c_new_device(adap, &info);
if (!client)
return -EINVAL;
@@ -1189,7 +1252,7 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr,
i2c_adapter_depth(adap));
list_for_each_entry_safe(client, next, &adap->userspace_clients,
detected) {
- if (client->addr == addr) {
+ if (i2c_encode_flags_to_addr(client) == addr) {
dev_info(dev, "%s: Deleting device %s at 0x%02hx\n",
"delete_device", client->name, client->addr);
@@ -1269,7 +1332,8 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
struct i2c_client *result;
struct i2c_board_info info = {};
struct dev_archdata dev_ad = {};
- const __be32 *addr;
+ const __be32 *addr_be;
+ u32 addr;
int len;
dev_dbg(&adap->dev, "of_i2c: register %s\n", node->full_name);
@@ -1280,20 +1344,31 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
return ERR_PTR(-EINVAL);
}
- addr = of_get_property(node, "reg", &len);
- if (!addr || (len < sizeof(*addr))) {
+ addr_be = of_get_property(node, "reg", &len);
+ if (!addr_be || (len < sizeof(*addr_be))) {
dev_err(&adap->dev, "of_i2c: invalid reg on %s\n",
node->full_name);
return ERR_PTR(-EINVAL);
}
- info.addr = be32_to_cpup(addr);
- if (info.addr > (1 << 10) - 1) {
+ addr = be32_to_cpup(addr_be);
+ if (addr & I2C_TEN_BIT_ADDRESS) {
+ addr &= ~I2C_TEN_BIT_ADDRESS;
+ info.flags |= I2C_CLIENT_TEN;
+ }
+
+ if (addr & I2C_OWN_SLAVE_ADDRESS) {
+ addr &= ~I2C_OWN_SLAVE_ADDRESS;
+ info.flags |= I2C_CLIENT_SLAVE;
+ }
+
+ if (i2c_check_addr_validity(addr, info.flags)) {
dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
info.addr, node->full_name);
return ERR_PTR(-EINVAL);
}
+ info.addr = addr;
info.of_node = of_node_get(node);
info.archdata = &dev_ad;
@@ -1320,8 +1395,11 @@ static void of_i2c_register_devices(struct i2c_adapter *adap)
dev_dbg(&adap->dev, "of_i2c: walking child nodes\n");
- for_each_available_child_of_node(adap->dev.of_node, node)
+ for_each_available_child_of_node(adap->dev.of_node, node) {
+ if (of_node_test_and_set_flag(node, OF_POPULATED))
+ continue;
of_i2c_register_device(adap, node);
+ }
}
static int of_dev_node_match(struct device *dev, void *data)
@@ -1333,13 +1411,17 @@ static int of_dev_node_match(struct device *dev, void *data)
struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
{
struct device *dev;
+ struct i2c_client *client;
- dev = bus_find_device(&i2c_bus_type, NULL, node,
- of_dev_node_match);
+ dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
if (!dev)
return NULL;
- return i2c_verify_client(dev);
+ client = i2c_verify_client(dev);
+ if (!client)
+ put_device(dev);
+
+ return client;
}
EXPORT_SYMBOL(of_find_i2c_device_by_node);
@@ -1347,15 +1429,37 @@ EXPORT_SYMBOL(of_find_i2c_device_by_node);
struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
{
struct device *dev;
+ struct i2c_adapter *adapter;
- dev = bus_find_device(&i2c_bus_type, NULL, node,
- of_dev_node_match);
+ dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
if (!dev)
return NULL;
- return i2c_verify_adapter(dev);
+ adapter = i2c_verify_adapter(dev);
+ if (!adapter)
+ put_device(dev);
+
+ return adapter;
}
EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
+
+/* must call i2c_put_adapter() when done with returned i2c_adapter device */
+struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
+{
+ struct i2c_adapter *adapter;
+
+ adapter = of_find_i2c_adapter_by_node(node);
+ if (!adapter)
+ return NULL;
+
+ if (!try_module_get(adapter->owner)) {
+ put_device(&adapter->dev);
+ adapter = NULL;
+ }
+
+ return adapter;
+}
+EXPORT_SYMBOL(of_get_i2c_adapter_by_node);
#else
static void of_i2c_register_devices(struct i2c_adapter *adap) { }
#endif /* CONFIG_OF */
@@ -1853,6 +1957,11 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
if (adap == NULL)
return NOTIFY_OK; /* not for us */
+ if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
+ put_device(&adap->dev);
+ return NOTIFY_OK;
+ }
+
client = of_i2c_register_device(adap, rd->dn);
put_device(&adap->dev);
@@ -1863,6 +1972,10 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
}
break;
case OF_RECONFIG_CHANGE_REMOVE:
+ /* already depopulated? */
+ if (!of_node_check_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK;
+
/* find our device by node */
client = of_find_i2c_device_by_node(rd->dn);
if (client == NULL)
@@ -2238,14 +2351,14 @@ static int i2c_detect_address(struct i2c_client *temp_client,
int err;
/* Make sure the address is valid */
- err = i2c_check_addr_validity(addr);
+ err = i2c_check_7bit_addr_validity_strict(addr);
if (err) {
dev_warn(&adapter->dev, "Invalid probe address 0x%02x\n",
addr);
return err;
}
- /* Skip if already in use */
+ /* Skip if already in use (7 bit, no need to encode flags) */
if (i2c_check_addr_busy(adapter, addr))
return 0;
@@ -2355,13 +2468,13 @@ i2c_new_probed_device(struct i2c_adapter *adap,
for (i = 0; addr_list[i] != I2C_CLIENT_END; i++) {
/* Check address validity */
- if (i2c_check_addr_validity(addr_list[i]) < 0) {
+ if (i2c_check_7bit_addr_validity_strict(addr_list[i]) < 0) {
dev_warn(&adap->dev, "Invalid 7-bit address "
"0x%02x\n", addr_list[i]);
continue;
}
- /* Check address availability */
+ /* Check address availability (7 bit, no need to encode flags) */
if (i2c_check_addr_busy(adap, addr_list[i])) {
dev_dbg(&adap->dev, "Address 0x%02x already in "
"use, not probing\n", addr_list[i]);
@@ -2389,9 +2502,15 @@ struct i2c_adapter *i2c_get_adapter(int nr)
mutex_lock(&core_lock);
adapter = idr_find(&i2c_adapter_idr, nr);
- if (adapter && !try_module_get(adapter->owner))
+ if (!adapter)
+ goto exit;
+
+ if (try_module_get(adapter->owner))
+ get_device(&adapter->dev);
+ else
adapter = NULL;
+ exit:
mutex_unlock(&core_lock);
return adapter;
}
@@ -2399,8 +2518,11 @@ EXPORT_SYMBOL(i2c_get_adapter);
void i2c_put_adapter(struct i2c_adapter *adap)
{
- if (adap)
- module_put(adap->owner);
+ if (!adap)
+ return;
+
+ put_device(&adap->dev);
+ module_put(adap->owner);
}
EXPORT_SYMBOL(i2c_put_adapter);
@@ -2918,6 +3040,63 @@ trace:
}
EXPORT_SYMBOL(i2c_smbus_xfer);
+/**
+ * i2c_smbus_read_i2c_block_data_or_emulated - read block or emulate
+ * @client: Handle to slave device
+ * @command: Byte interpreted by slave
+ * @length: Size of data block; SMBus allows at most I2C_SMBUS_BLOCK_MAX bytes
+ * @values: Byte array into which data will be read; big enough to hold
+ * the data returned by the slave. SMBus allows at most
+ * I2C_SMBUS_BLOCK_MAX bytes.
+ *
+ * This executes the SMBus "block read" protocol if supported by the adapter.
+ * If block read is not supported, it emulates it using either word or byte
+ * read protocols depending on availability.
+ *
+ * The addresses of the I2C slave device that are accessed with this function
+ * must be mapped to a linear region, so that a block read will have the same
+ * effect as a byte read. Before using this function you must double-check
+ * if the I2C slave does support exchanging a block transfer with a byte
+ * transfer.
+ */
+s32 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
+ u8 command, u8 length, u8 *values)
+{
+ u8 i = 0;
+ int status;
+
+ if (length > I2C_SMBUS_BLOCK_MAX)
+ length = I2C_SMBUS_BLOCK_MAX;
+
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK))
+ return i2c_smbus_read_i2c_block_data(client, command, length, values);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA))
+ return -EOPNOTSUPP;
+
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_WORD_DATA)) {
+ while ((i + 2) <= length) {
+ status = i2c_smbus_read_word_data(client, command + i);
+ if (status < 0)
+ return status;
+ values[i] = status & 0xff;
+ values[i + 1] = status >> 8;
+ i += 2;
+ }
+ }
+
+ while (i < length) {
+ status = i2c_smbus_read_byte_data(client, command + i);
+ if (status < 0)
+ return status;
+ values[i] = status;
+ i++;
+ }
+
+ return i;
+}
+EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data_or_emulated);
+
#if IS_ENABLED(CONFIG_I2C_SLAVE)
int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
{
@@ -2928,9 +3107,13 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
return -EINVAL;
}
+ if (!(client->flags & I2C_CLIENT_SLAVE))
+ dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n",
+ __func__);
+
if (!(client->flags & I2C_CLIENT_TEN)) {
/* Enforce stricter address checking */
- ret = i2c_check_addr_validity(client->addr);
+ ret = i2c_check_7bit_addr_validity_strict(client->addr);
if (ret) {
dev_err(&client->dev, "%s: invalid address\n", __func__);
return ret;
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 822374654609..b2039f94c9d8 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -80,9 +80,6 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
struct eeprom_data *eeprom;
unsigned long flags;
- if (off + count > attr->size)
- return -EFBIG;
-
eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
spin_lock_irqsave(&eeprom->buffer_lock, flags);
@@ -98,9 +95,6 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob
struct eeprom_data *eeprom;
unsigned long flags;
- if (off + count > attr->size)
- return -EFBIG;
-
eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
spin_lock_irqsave(&eeprom->buffer_lock, flags);
@@ -163,7 +157,6 @@ MODULE_DEVICE_TABLE(i2c, i2c_slave_eeprom_id);
static struct i2c_driver i2c_slave_eeprom_driver = {
.driver = {
.name = "i2c-slave-eeprom",
- .owner = THIS_MODULE,
},
.probe = i2c_slave_eeprom_probe,
.remove = i2c_slave_eeprom_remove,
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index fdd0769c84a3..f06b0e24673b 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -61,4 +61,15 @@ config I2C_MUX_PINCTRL
This driver can also be built as a module. If so, the module will be
called pinctrl-i2cmux.
+config I2C_MUX_REG
+ tristate "Register-based I2C multiplexer"
+ help
+ If you say yes to this option, support will be included for a
+ register based I2C multiplexer. This driver provides access to
+ I2C busses connected through a MUX, which is controlled
+ by a single register.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-mux-reg.
+
endmenu
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index 465778b5d5dc..e89799b76a92 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -7,5 +7,6 @@ obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o
obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o
obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o
obj-$(CONFIG_I2C_MUX_PINCTRL) += i2c-mux-pinctrl.o
+obj-$(CONFIG_I2C_MUX_REG) += i2c-mux-reg.o
ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
index 5cf1b60b69e2..402e3a6c671a 100644
--- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -196,7 +196,8 @@ static int i2c_arbitrator_probe(struct platform_device *pdev)
dev_err(dev, "Cannot parse i2c-parent\n");
return -EINVAL;
}
- arb->parent = of_find_i2c_adapter_by_node(parent_np);
+ arb->parent = of_get_i2c_adapter_by_node(parent_np);
+ of_node_put(parent_np);
if (!arb->parent) {
dev_err(dev, "Cannot find parent bus\n");
return -EPROBE_DEFER;
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 70db99264339..b8e11c16d98c 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -76,6 +76,7 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
return -ENODEV;
}
adapter = of_find_i2c_adapter_by_node(adapter_np);
+ of_node_put(adapter_np);
if (!adapter)
return -EPROBE_DEFER;
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index 0c8d4d2cbdaf..d0ba424adebc 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -386,7 +386,6 @@ static int pca9541_remove(struct i2c_client *client)
static struct i2c_driver pca9541_driver = {
.driver = {
.name = "pca9541",
- .owner = THIS_MODULE,
},
.probe = pca9541_probe,
.remove = pca9541_remove,
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index ea4aa9dfcea9..acfcef3d4068 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -300,7 +300,6 @@ static struct i2c_driver pca954x_driver = {
.driver = {
.name = "pca954x",
.pm = &pca954x_pm,
- .owner = THIS_MODULE,
},
.probe = pca954x_probe,
.remove = pca954x_remove,
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index b48378c4b40d..b5a982ba8898 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -111,6 +111,7 @@ static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
return -ENODEV;
}
adapter = of_find_i2c_adapter_by_node(adapter_np);
+ of_node_put(adapter_np);
if (!adapter) {
dev_err(mux->dev, "Cannot find parent bus\n");
return -EPROBE_DEFER;
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
new file mode 100644
index 000000000000..5fbd5bd0878f
--- /dev/null
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -0,0 +1,290 @@
+/*
+ * I2C multiplexer using a single register
+ *
+ * Copyright 2015 Freescale Semiconductor
+ * York Sun <yorksun@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_data/i2c-mux-reg.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct regmux {
+ struct i2c_adapter *parent;
+ struct i2c_adapter **adap; /* child busses */
+ struct i2c_mux_reg_platform_data data;
+};
+
+static int i2c_mux_reg_set(const struct regmux *mux, unsigned int chan_id)
+{
+ if (!mux->data.reg)
+ return -EINVAL;
+
+ /*
+ * Write to the register, followed by a read to ensure the write is
+ * completed on a "posted" bus, for example PCI or write buffers.
+ * The endianness of reading doesn't matter and the return data
+ * is not used.
+ */
+ switch (mux->data.reg_size) {
+ case 4:
+ if (mux->data.little_endian)
+ iowrite32(chan_id, mux->data.reg);
+ else
+ iowrite32be(chan_id, mux->data.reg);
+ if (!mux->data.write_only)
+ ioread32(mux->data.reg);
+ break;
+ case 2:
+ if (mux->data.little_endian)
+ iowrite16(chan_id, mux->data.reg);
+ else
+ iowrite16be(chan_id, mux->data.reg);
+ if (!mux->data.write_only)
+ ioread16(mux->data.reg);
+ break;
+ case 1:
+ iowrite8(chan_id, mux->data.reg);
+ if (!mux->data.write_only)
+ ioread8(mux->data.reg);
+ break;
+ }
+
+ return 0;
+}
+
+static int i2c_mux_reg_select(struct i2c_adapter *adap, void *data,
+ unsigned int chan)
+{
+ struct regmux *mux = data;
+
+ return i2c_mux_reg_set(mux, chan);
+}
+
+static int i2c_mux_reg_deselect(struct i2c_adapter *adap, void *data,
+ unsigned int chan)
+{
+ struct regmux *mux = data;
+
+ if (mux->data.idle_in_use)
+ return i2c_mux_reg_set(mux, mux->data.idle);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static int i2c_mux_reg_probe_dt(struct regmux *mux,
+ struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *adapter_np, *child;
+ struct i2c_adapter *adapter;
+ struct resource res;
+ unsigned *values;
+ int i = 0;
+
+ if (!np)
+ return -ENODEV;
+
+ adapter_np = of_parse_phandle(np, "i2c-parent", 0);
+ if (!adapter_np) {
+ dev_err(&pdev->dev, "Cannot parse i2c-parent\n");
+ return -ENODEV;
+ }
+ adapter = of_find_i2c_adapter_by_node(adapter_np);
+ of_node_put(adapter_np);
+ if (!adapter)
+ return -EPROBE_DEFER;
+
+ mux->parent = adapter;
+ mux->data.parent = i2c_adapter_id(adapter);
+ put_device(&adapter->dev);
+
+ mux->data.n_values = of_get_child_count(np);
+ if (of_find_property(np, "little-endian", NULL)) {
+ mux->data.little_endian = true;
+ } else if (of_find_property(np, "big-endian", NULL)) {
+ mux->data.little_endian = false;
+ } else {
+#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : \
+ defined(__LITTLE_ENDIAN)
+ mux->data.little_endian = true;
+#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : \
+ defined(__BIG_ENDIAN)
+ mux->data.little_endian = false;
+#else
+#error Endianness not defined?
+#endif
+ }
+ if (of_find_property(np, "write-only", NULL))
+ mux->data.write_only = true;
+ else
+ mux->data.write_only = false;
+
+ values = devm_kzalloc(&pdev->dev,
+ sizeof(*mux->data.values) * mux->data.n_values,
+ GFP_KERNEL);
+ if (!values) {
+ dev_err(&pdev->dev, "Cannot allocate values array");
+ return -ENOMEM;
+ }
+
+ for_each_child_of_node(np, child) {
+ of_property_read_u32(child, "reg", values + i);
+ i++;
+ }
+ mux->data.values = values;
+
+ if (!of_property_read_u32(np, "idle-state", &mux->data.idle))
+ mux->data.idle_in_use = true;
+
+ /* map address from "reg" if exists */
+ if (of_address_to_resource(np, 0, &res)) {
+ mux->data.reg_size = resource_size(&res);
+ mux->data.reg = devm_ioremap_resource(&pdev->dev, &res);
+ if (IS_ERR(mux->data.reg))
+ return PTR_ERR(mux->data.reg);
+ }
+
+ return 0;
+}
+#else
+static int i2c_mux_reg_probe_dt(struct regmux *mux,
+ struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
+static int i2c_mux_reg_probe(struct platform_device *pdev)
+{
+ struct regmux *mux;
+ struct i2c_adapter *parent;
+ struct resource *res;
+ int (*deselect)(struct i2c_adapter *, void *, u32);
+ unsigned int class;
+ int i, ret, nr;
+
+ mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mux);
+
+ if (dev_get_platdata(&pdev->dev)) {
+ memcpy(&mux->data, dev_get_platdata(&pdev->dev),
+ sizeof(mux->data));
+
+ parent = i2c_get_adapter(mux->data.parent);
+ if (!parent)
+ return -EPROBE_DEFER;
+
+ mux->parent = parent;
+ } else {
+ ret = i2c_mux_reg_probe_dt(mux, pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error parsing device tree");
+ return ret;
+ }
+ }
+
+ if (!mux->data.reg) {
+ dev_info(&pdev->dev,
+ "Register not set, using platform resource\n");
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mux->data.reg_size = resource_size(res);
+ mux->data.reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mux->data.reg))
+ return PTR_ERR(mux->data.reg);
+ }
+
+ if (mux->data.reg_size != 4 && mux->data.reg_size != 2 &&
+ mux->data.reg_size != 1) {
+ dev_err(&pdev->dev, "Invalid register size\n");
+ return -EINVAL;
+ }
+
+ mux->adap = devm_kzalloc(&pdev->dev,
+ sizeof(*mux->adap) * mux->data.n_values,
+ GFP_KERNEL);
+ if (!mux->adap) {
+ dev_err(&pdev->dev, "Cannot allocate i2c_adapter structure");
+ return -ENOMEM;
+ }
+
+ if (mux->data.idle_in_use)
+ deselect = i2c_mux_reg_deselect;
+ else
+ deselect = NULL;
+
+ for (i = 0; i < mux->data.n_values; i++) {
+ nr = mux->data.base_nr ? (mux->data.base_nr + i) : 0;
+ class = mux->data.classes ? mux->data.classes[i] : 0;
+
+ mux->adap[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev, mux,
+ nr, mux->data.values[i],
+ class, i2c_mux_reg_select,
+ deselect);
+ if (!mux->adap[i]) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
+ goto add_adapter_failed;
+ }
+ }
+
+ dev_dbg(&pdev->dev, "%d port mux on %s adapter\n",
+ mux->data.n_values, mux->parent->name);
+
+ return 0;
+
+add_adapter_failed:
+ for (; i > 0; i--)
+ i2c_del_mux_adapter(mux->adap[i - 1]);
+
+ return ret;
+}
+
+static int i2c_mux_reg_remove(struct platform_device *pdev)
+{
+ struct regmux *mux = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < mux->data.n_values; i++)
+ i2c_del_mux_adapter(mux->adap[i]);
+
+ i2c_put_adapter(mux->parent);
+
+ return 0;
+}
+
+static const struct of_device_id i2c_mux_reg_of_match[] = {
+ { .compatible = "i2c-mux-reg", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_mux_reg_of_match);
+
+static struct platform_driver i2c_mux_reg_driver = {
+ .probe = i2c_mux_reg_probe,
+ .remove = i2c_mux_reg_remove,
+ .driver = {
+ .name = "i2c-mux-reg",
+ },
+};
+
+module_platform_driver(i2c_mux_reg_driver);
+
+MODULE_DESCRIPTION("Register-based I2C multiplexer driver");
+MODULE_AUTHOR("York Sun <yorksun@freescale.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:i2c-mux-reg");
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 2a36a95d95cf..3a3738fe016b 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -591,6 +591,67 @@ static struct cpuidle_state bdw_cstates[] = {
.enter = NULL }
};
+static struct cpuidle_state skl_cstates[] = {
+ {
+ .name = "C1-SKL",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 2,
+ .target_residency = 2,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C1E-SKL",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01),
+ .exit_latency = 10,
+ .target_residency = 20,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C3-SKL",
+ .desc = "MWAIT 0x10",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 70,
+ .target_residency = 100,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C6-SKL",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 75,
+ .target_residency = 200,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C7s-SKL",
+ .desc = "MWAIT 0x33",
+ .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 124,
+ .target_residency = 800,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C8-SKL",
+ .desc = "MWAIT 0x40",
+ .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 174,
+ .target_residency = 800,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C10-SKL",
+ .desc = "MWAIT 0x60",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 890,
+ .target_residency = 5000,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .enter = NULL }
+};
+
static struct cpuidle_state atom_cstates[] = {
{
.name = "C1E-ATM",
@@ -810,6 +871,12 @@ static const struct idle_cpu idle_cpu_bdw = {
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_skl = {
+ .state_table = skl_cstates,
+ .disable_promotion_to_c1e = true,
+};
+
+
static const struct idle_cpu idle_cpu_avn = {
.state_table = avn_cstates,
.disable_promotion_to_c1e = true,
@@ -844,6 +911,8 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(0x47, idle_cpu_bdw),
ICPU(0x4f, idle_cpu_bdw),
ICPU(0x56, idle_cpu_bdw),
+ ICPU(0x4e, idle_cpu_skl),
+ ICPU(0x5e, idle_cpu_skl),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -965,7 +1034,8 @@ static int __init intel_idle_cpuidle_driver_init(void)
for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
int num_substates, mwait_hint, mwait_cstate;
- if (cpuidle_state_table[cstate].enter == NULL)
+ if ((cpuidle_state_table[cstate].enter == NULL) &&
+ (cpuidle_state_table[cstate].enter_freeze == NULL))
break;
if (cstate + 1 > max_cstate) {
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 00e7bcbdbe24..a59047d7657e 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -86,18 +86,6 @@ config KXSD9
To compile this driver as a module, choose M here: the module
will be called kxsd9.
-config MMA8452
- tristate "Freescale MMA8452Q Accelerometer Driver"
- depends on I2C
- select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
- help
- Say yes here to build support for the Freescale MMA8452Q 3-axis
- accelerometer.
-
- To compile this driver as a module, choose M here: the module
- will be called mma8452.
-
config KXCJK1013
tristate "Kionix 3-Axis Accelerometer Driver"
depends on I2C
@@ -111,6 +99,18 @@ config KXCJK1013
To compile this driver as a module, choose M here: the module will
be called kxcjk-1013.
+config MMA8452
+ tristate "Freescale MMA8452Q Accelerometer Driver"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for the Freescale MMA8452Q 3-axis
+ accelerometer.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mma8452.
+
config MMA9551_CORE
tristate
@@ -140,6 +140,8 @@ config MMA9553
config STK8312
tristate "Sensortek STK8312 3-Axis Accelerometer Driver"
depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to get support for the Sensortek STK8312 3-axis
accelerometer.
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 75c6d2103e07..f04b88406995 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -846,7 +846,6 @@ MODULE_DEVICE_TABLE(i2c, bma180_ids);
static struct i2c_driver bma180_driver = {
.driver = {
.name = "bma180",
- .owner = THIS_MODULE,
.pm = BMA180_PM_OPS,
},
.probe = bma180_probe,
diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c
index 4e70f51c2370..0104cdef8709 100644
--- a/drivers/iio/accel/bmc150-accel.c
+++ b/drivers/iio/accel/bmc150-accel.c
@@ -151,6 +151,7 @@ struct bmc150_scale_info {
};
struct bmc150_accel_chip_info {
+ const char *name;
u8 chip_id;
const struct iio_chan_spec *channels;
int num_channels;
@@ -241,7 +242,6 @@ static const struct {
{500000, BMC150_ACCEL_SLEEP_500_MS},
{1000000, BMC150_ACCEL_SLEEP_1_SEC} };
-
static int bmc150_accel_set_mode(struct bmc150_accel_data *data,
enum bmc150_power_modes mode,
int dur_us)
@@ -259,8 +259,9 @@ static int bmc150_accel_set_mode(struct bmc150_accel_data *data,
dur_val =
bmc150_accel_sleep_value_table[i].reg_value;
}
- } else
+ } else {
dur_val = 0;
+ }
if (dur_val < 0)
return -EINVAL;
@@ -288,7 +289,7 @@ static int bmc150_accel_set_bw(struct bmc150_accel_data *data, int val,
for (i = 0; i < ARRAY_SIZE(bmc150_accel_samp_freq_table); ++i) {
if (bmc150_accel_samp_freq_table[i].val == val &&
- bmc150_accel_samp_freq_table[i].val2 == val2) {
+ bmc150_accel_samp_freq_table[i].val2 == val2) {
ret = i2c_smbus_write_byte_data(
data->client,
BMC150_ACCEL_REG_PMU_BW,
@@ -345,65 +346,6 @@ static int bmc150_accel_any_motion_setup(struct bmc150_accel_trigger *t,
return 0;
}
-static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
-{
- int ret;
-
- ret = i2c_smbus_read_byte_data(data->client, BMC150_ACCEL_REG_CHIP_ID);
- if (ret < 0) {
- dev_err(&data->client->dev,
- "Error: Reading chip id\n");
- return ret;
- }
-
- dev_dbg(&data->client->dev, "Chip Id %x\n", ret);
- if (ret != data->chip_info->chip_id) {
- dev_err(&data->client->dev, "Invalid chip %x\n", ret);
- return -ENODEV;
- }
-
- ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
- if (ret < 0)
- return ret;
-
- /* Set Bandwidth */
- ret = bmc150_accel_set_bw(data, BMC150_ACCEL_DEF_BW, 0);
- if (ret < 0)
- return ret;
-
- /* Set Default Range */
- ret = i2c_smbus_write_byte_data(data->client,
- BMC150_ACCEL_REG_PMU_RANGE,
- BMC150_ACCEL_DEF_RANGE_4G);
- if (ret < 0) {
- dev_err(&data->client->dev,
- "Error writing reg_pmu_range\n");
- return ret;
- }
-
- data->range = BMC150_ACCEL_DEF_RANGE_4G;
-
- /* Set default slope duration and thresholds */
- data->slope_thres = BMC150_ACCEL_DEF_SLOPE_THRESHOLD;
- data->slope_dur = BMC150_ACCEL_DEF_SLOPE_DURATION;
- ret = bmc150_accel_update_slope(data);
- if (ret < 0)
- return ret;
-
- /* Set default as latched interrupts */
- ret = i2c_smbus_write_byte_data(data->client,
- BMC150_ACCEL_REG_INT_RST_LATCH,
- BMC150_ACCEL_INT_MODE_LATCH_INT |
- BMC150_ACCEL_INT_MODE_LATCH_RESET);
- if (ret < 0) {
- dev_err(&data->client->dev,
- "Error writing reg_int_rst_latch\n");
- return ret;
- }
-
- return 0;
-}
-
static int bmc150_accel_get_bw(struct bmc150_accel_data *data, int *val,
int *val2)
{
@@ -437,12 +379,13 @@ static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
{
int ret;
- if (on)
+ if (on) {
ret = pm_runtime_get_sync(&data->client->dev);
- else {
+ } else {
pm_runtime_mark_last_busy(&data->client->dev);
ret = pm_runtime_put_autosuspend(&data->client->dev);
}
+
if (ret < 0) {
dev_err(&data->client->dev,
"Failed: bmc150_accel_set_power_state for %d\n", on);
@@ -514,13 +457,13 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
}
/*
- * We will expect the enable and disable to do operation in
- * in reverse order. This will happen here anyway as our
- * resume operation uses sync mode runtime pm calls, the
- * suspend operation will be delayed by autosuspend delay
- * So the disable operation will still happen in reverse of
- * enable operation. When runtime pm is disabled the mode
- * is always on so sequence doesn't matter
+ * We will expect the enable and disable to do operation in reverse
+ * order. This will happen here anyway, as our resume operation uses
+ * sync mode runtime pm calls. The suspend operation will be delayed
+ * by autosuspend delay.
+ * So the disable operation will still happen in reverse order of
+ * enable operation. When runtime pm is disabled the mode is always on,
+ * so sequence doesn't matter.
*/
ret = bmc150_accel_set_power_state(data, state);
if (ret < 0)
@@ -574,7 +517,6 @@ out_fix_power_state:
return ret;
}
-
static int bmc150_accel_set_scale(struct bmc150_accel_data *data, int val)
{
int ret, i;
@@ -674,8 +616,9 @@ static int bmc150_accel_read_raw(struct iio_dev *indio_dev,
if (chan->type == IIO_TEMP) {
*val = BMC150_ACCEL_TEMP_CENTER_VAL;
return IIO_VAL_INT;
- } else
+ } else {
return -EINVAL;
+ }
case IIO_CHAN_INFO_SCALE:
*val = 0;
switch (chan->type) {
@@ -776,7 +719,7 @@ static int bmc150_accel_write_event(struct iio_dev *indio_dev,
switch (info) {
case IIO_EV_INFO_VALUE:
- data->slope_thres = val & 0xFF;
+ data->slope_thres = val & BMC150_ACCEL_SLOPE_THRES_MASK;
break;
case IIO_EV_INFO_PERIOD:
data->slope_dur = val & BMC150_ACCEL_SLOPE_DUR_MASK;
@@ -793,7 +736,6 @@ static int bmc150_accel_read_event_config(struct iio_dev *indio_dev,
enum iio_event_type type,
enum iio_event_direction dir)
{
-
struct bmc150_accel_data *data = iio_priv(indio_dev);
return data->ev_enable_state;
@@ -827,7 +769,7 @@ static int bmc150_accel_write_event_config(struct iio_dev *indio_dev,
}
static int bmc150_accel_validate_trigger(struct iio_dev *indio_dev,
- struct iio_trigger *trig)
+ struct iio_trigger *trig)
{
struct bmc150_accel_data *data = iio_priv(indio_dev);
int i;
@@ -963,6 +905,7 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
u16 buffer[BMC150_ACCEL_FIFO_LENGTH * 3];
int64_t tstamp;
uint64_t sample_period;
+
ret = i2c_smbus_read_byte_data(data->client,
BMC150_ACCEL_REG_FIFO_STATUS);
if (ret < 0) {
@@ -1120,6 +1063,7 @@ enum {
static const struct bmc150_accel_chip_info bmc150_accel_chip_info_tbl[] = {
[bmc150] = {
+ .name = "BMC150A",
.chip_id = 0xFA,
.channels = bmc150_accel_channels,
.num_channels = ARRAY_SIZE(bmc150_accel_channels),
@@ -1129,6 +1073,7 @@ static const struct bmc150_accel_chip_info bmc150_accel_chip_info_tbl[] = {
{76590, BMC150_ACCEL_DEF_RANGE_16G} },
},
[bmi055] = {
+ .name = "BMI055A",
.chip_id = 0xFA,
.channels = bmc150_accel_channels,
.num_channels = ARRAY_SIZE(bmc150_accel_channels),
@@ -1138,6 +1083,7 @@ static const struct bmc150_accel_chip_info bmc150_accel_chip_info_tbl[] = {
{76590, BMC150_ACCEL_DEF_RANGE_16G} },
},
[bma255] = {
+ .name = "BMA0255",
.chip_id = 0xFA,
.channels = bmc150_accel_channels,
.num_channels = ARRAY_SIZE(bmc150_accel_channels),
@@ -1147,6 +1093,7 @@ static const struct bmc150_accel_chip_info bmc150_accel_chip_info_tbl[] = {
{76590, BMC150_ACCEL_DEF_RANGE_16G} },
},
[bma250e] = {
+ .name = "BMA250E",
.chip_id = 0xF9,
.channels = bma250e_accel_channels,
.num_channels = ARRAY_SIZE(bma250e_accel_channels),
@@ -1156,6 +1103,7 @@ static const struct bmc150_accel_chip_info bmc150_accel_chip_info_tbl[] = {
{306457, BMC150_ACCEL_DEF_RANGE_16G} },
},
[bma222e] = {
+ .name = "BMA222E",
.chip_id = 0xF8,
.channels = bma222e_accel_channels,
.num_channels = ARRAY_SIZE(bma222e_accel_channels),
@@ -1165,6 +1113,7 @@ static const struct bmc150_accel_chip_info bmc150_accel_chip_info_tbl[] = {
{1225831, BMC150_ACCEL_DEF_RANGE_16G} },
},
[bma280] = {
+ .name = "BMA0280",
.chip_id = 0xFB,
.channels = bma280_accel_channels,
.num_channels = ARRAY_SIZE(bma280_accel_channels),
@@ -1255,7 +1204,7 @@ static int bmc150_accel_trig_try_reen(struct iio_trigger *trig)
}
static int bmc150_accel_trigger_set_state(struct iio_trigger *trig,
- bool state)
+ bool state)
{
struct bmc150_accel_trigger *t = iio_trigger_get_drvdata(trig);
struct bmc150_accel_data *data = t->data;
@@ -1314,26 +1263,32 @@ static int bmc150_accel_handle_roc_event(struct iio_dev *indio_dev)
dir = IIO_EV_DIR_RISING;
if (ret & BMC150_ACCEL_ANY_MOTION_BIT_X)
- iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_X,
- IIO_EV_TYPE_ROC,
- dir),
- data->timestamp);
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_X,
+ IIO_EV_TYPE_ROC,
+ dir),
+ data->timestamp);
+
if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Y)
- iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Y,
- IIO_EV_TYPE_ROC,
- dir),
- data->timestamp);
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Y,
+ IIO_EV_TYPE_ROC,
+ dir),
+ data->timestamp);
+
if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Z)
- iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Z,
- IIO_EV_TYPE_ROC,
- dir),
- data->timestamp);
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Z,
+ IIO_EV_TYPE_ROC,
+ dir),
+ data->timestamp);
+
return ret;
}
@@ -1365,7 +1320,9 @@ static irqreturn_t bmc150_accel_irq_thread_handler(int irq, void *private)
BMC150_ACCEL_INT_MODE_LATCH_INT |
BMC150_ACCEL_INT_MODE_LATCH_RESET);
if (ret)
- dev_err(&data->client->dev, "Error writing reg_int_rst_latch\n");
+ dev_err(&data->client->dev,
+ "Error writing reg_int_rst_latch\n");
+
ret = IRQ_HANDLED;
} else {
ret = IRQ_NONE;
@@ -1403,22 +1360,8 @@ static irqreturn_t bmc150_accel_irq_handler(int irq, void *private)
return IRQ_NONE;
}
-static const char *bmc150_accel_match_acpi_device(struct device *dev, int *data)
-{
- const struct acpi_device_id *id;
-
- id = acpi_match_device(dev->driver->acpi_match_table, dev);
-
- if (!id)
- return NULL;
-
- *data = (int) id->driver_data;
-
- return dev_name(dev);
-}
-
static int bmc150_accel_gpio_probe(struct i2c_client *client,
- struct bmc150_accel_data *data)
+ struct bmc150_accel_data *data)
{
struct device *dev;
struct gpio_desc *gpio;
@@ -1464,7 +1407,7 @@ static void bmc150_accel_unregister_triggers(struct bmc150_accel_data *data,
{
int i;
- for (i = from; i >= 0; i++) {
+ for (i = from; i >= 0; i--) {
if (data->triggers[i].indio_trig) {
iio_trigger_unregister(data->triggers[i].indio_trig);
data->triggers[i].indio_trig = NULL;
@@ -1611,6 +1554,70 @@ static const struct iio_buffer_setup_ops bmc150_accel_buffer_ops = {
.postdisable = bmc150_accel_buffer_postdisable,
};
+static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
+{
+ int ret, i;
+
+ ret = i2c_smbus_read_byte_data(data->client, BMC150_ACCEL_REG_CHIP_ID);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error: Reading chip id\n");
+ return ret;
+ }
+
+ dev_dbg(&data->client->dev, "Chip Id %x\n", ret);
+ for (i = 0; i < ARRAY_SIZE(bmc150_accel_chip_info_tbl); i++) {
+ if (bmc150_accel_chip_info_tbl[i].chip_id == ret) {
+ data->chip_info = &bmc150_accel_chip_info_tbl[i];
+ break;
+ }
+ }
+
+ if (!data->chip_info) {
+ dev_err(&data->client->dev, "Unsupported chip %x\n", ret);
+ return -ENODEV;
+ }
+
+ ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Set Bandwidth */
+ ret = bmc150_accel_set_bw(data, BMC150_ACCEL_DEF_BW, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Set Default Range */
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMC150_ACCEL_REG_PMU_RANGE,
+ BMC150_ACCEL_DEF_RANGE_4G);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_pmu_range\n");
+ return ret;
+ }
+
+ data->range = BMC150_ACCEL_DEF_RANGE_4G;
+
+ /* Set default slope duration and thresholds */
+ data->slope_thres = BMC150_ACCEL_DEF_SLOPE_THRESHOLD;
+ data->slope_dur = BMC150_ACCEL_DEF_SLOPE_DURATION;
+ ret = bmc150_accel_update_slope(data);
+ if (ret < 0)
+ return ret;
+
+ /* Set default as latched interrupts */
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMC150_ACCEL_REG_INT_RST_LATCH,
+ BMC150_ACCEL_INT_MODE_LATCH_INT |
+ BMC150_ACCEL_INT_MODE_LATCH_RESET);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Error writing reg_int_rst_latch\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int bmc150_accel_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1618,7 +1625,6 @@ static int bmc150_accel_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
int ret;
const char *name = NULL;
- int chip_id = 0;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
@@ -1628,15 +1634,8 @@ static int bmc150_accel_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
data->client = client;
- if (id) {
+ if (id)
name = id->name;
- chip_id = id->driver_data;
- }
-
- if (ACPI_HANDLE(&client->dev))
- name = bmc150_accel_match_acpi_device(&client->dev, &chip_id);
-
- data->chip_info = &bmc150_accel_chip_info_tbl[chip_id];
ret = bmc150_accel_chip_init(data);
if (ret < 0)
@@ -1647,7 +1646,7 @@ static int bmc150_accel_probe(struct i2c_client *client,
indio_dev->dev.parent = &client->dev;
indio_dev->channels = data->chip_info->channels;
indio_dev->num_channels = data->chip_info->num_channels;
- indio_dev->name = name;
+ indio_dev->name = name ? name : data->chip_info->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &bmc150_accel_info;
@@ -1663,7 +1662,7 @@ static int bmc150_accel_probe(struct i2c_client *client,
if (client->irq < 0)
client->irq = bmc150_accel_gpio_probe(client, data);
- if (client->irq >= 0) {
+ if (client->irq > 0) {
ret = devm_request_threaded_irq(
&client->dev, client->irq,
bmc150_accel_irq_handler,
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 0d9bd35ff258..3292bc0c1d0e 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -658,10 +658,8 @@ static int kxcjk1013_set_scale(struct kxcjk1013_data *data, int val)
int ret, i;
enum kxcjk1013_mode store_mode;
-
for (i = 0; i < ARRAY_SIZE(KXCJK1013_scale_table); ++i) {
if (KXCJK1013_scale_table[i].scale == val) {
-
ret = kxcjk1013_get_mode(data, &store_mode);
if (ret < 0)
return ret;
@@ -820,7 +818,6 @@ static int kxcjk1013_read_event_config(struct iio_dev *indio_dev,
enum iio_event_type type,
enum iio_event_direction dir)
{
-
struct kxcjk1013_data *data = iio_priv(indio_dev);
return data->ev_enable_state;
@@ -1243,7 +1240,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
if (client->irq < 0)
client->irq = kxcjk1013_gpio_probe(client, data);
- if (client->irq >= 0) {
+ if (client->irq > 0) {
ret = devm_request_threaded_irq(&client->dev, client->irq,
kxcjk1013_data_rdy_trig_poll,
kxcjk1013_event_handler,
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index e8e2077c7244..b921d84c1be6 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -16,7 +16,6 @@
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include <linux/iio/trigger_consumer.h>
#include <linux/iio/buffer.h>
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
@@ -24,54 +23,51 @@
#include <linux/iio/events.h>
#include <linux/delay.h>
-#define MMA8452_STATUS 0x00
-#define MMA8452_OUT_X 0x01 /* MSB first, 12-bit */
-#define MMA8452_OUT_Y 0x03
-#define MMA8452_OUT_Z 0x05
-#define MMA8452_INT_SRC 0x0c
-#define MMA8452_WHO_AM_I 0x0d
-#define MMA8452_DATA_CFG 0x0e
-#define MMA8452_HP_FILTER_CUTOFF 0x0f
-#define MMA8452_HP_FILTER_CUTOFF_SEL_MASK (BIT(0) | BIT(1))
-#define MMA8452_TRANSIENT_CFG 0x1d
-#define MMA8452_TRANSIENT_CFG_ELE BIT(4)
-#define MMA8452_TRANSIENT_CFG_CHAN(chan) BIT(chan + 1)
-#define MMA8452_TRANSIENT_CFG_HPF_BYP BIT(0)
-#define MMA8452_TRANSIENT_SRC 0x1e
-#define MMA8452_TRANSIENT_SRC_XTRANSE BIT(1)
-#define MMA8452_TRANSIENT_SRC_YTRANSE BIT(3)
-#define MMA8452_TRANSIENT_SRC_ZTRANSE BIT(5)
-#define MMA8452_TRANSIENT_THS 0x1f
-#define MMA8452_TRANSIENT_THS_MASK 0x7f
-#define MMA8452_TRANSIENT_COUNT 0x20
-#define MMA8452_OFF_X 0x2f
-#define MMA8452_OFF_Y 0x30
-#define MMA8452_OFF_Z 0x31
-#define MMA8452_CTRL_REG1 0x2a
-#define MMA8452_CTRL_REG2 0x2b
-#define MMA8452_CTRL_REG2_RST BIT(6)
-#define MMA8452_CTRL_REG4 0x2d
-#define MMA8452_CTRL_REG5 0x2e
-
-#define MMA8452_MAX_REG 0x31
-
-#define MMA8452_STATUS_DRDY (BIT(2) | BIT(1) | BIT(0))
-
-#define MMA8452_CTRL_DR_MASK (BIT(5) | BIT(4) | BIT(3))
-#define MMA8452_CTRL_DR_SHIFT 3
-#define MMA8452_CTRL_DR_DEFAULT 0x4 /* 50 Hz sample frequency */
-#define MMA8452_CTRL_ACTIVE BIT(0)
-
-#define MMA8452_DATA_CFG_FS_MASK (BIT(1) | BIT(0))
-#define MMA8452_DATA_CFG_FS_2G 0
-#define MMA8452_DATA_CFG_FS_4G 1
-#define MMA8452_DATA_CFG_FS_8G 2
-#define MMA8452_DATA_CFG_HPF_MASK BIT(4)
-
-#define MMA8452_INT_DRDY BIT(0)
-#define MMA8452_INT_TRANS BIT(5)
-
-#define MMA8452_DEVICE_ID 0x2a
+#define MMA8452_STATUS 0x00
+#define MMA8452_STATUS_DRDY (BIT(2) | BIT(1) | BIT(0))
+#define MMA8452_OUT_X 0x01 /* MSB first, 12-bit */
+#define MMA8452_OUT_Y 0x03
+#define MMA8452_OUT_Z 0x05
+#define MMA8452_INT_SRC 0x0c
+#define MMA8452_WHO_AM_I 0x0d
+#define MMA8452_DATA_CFG 0x0e
+#define MMA8452_DATA_CFG_FS_MASK GENMASK(1, 0)
+#define MMA8452_DATA_CFG_FS_2G 0
+#define MMA8452_DATA_CFG_FS_4G 1
+#define MMA8452_DATA_CFG_FS_8G 2
+#define MMA8452_DATA_CFG_HPF_MASK BIT(4)
+#define MMA8452_HP_FILTER_CUTOFF 0x0f
+#define MMA8452_HP_FILTER_CUTOFF_SEL_MASK GENMASK(1, 0)
+#define MMA8452_TRANSIENT_CFG 0x1d
+#define MMA8452_TRANSIENT_CFG_HPF_BYP BIT(0)
+#define MMA8452_TRANSIENT_CFG_CHAN(chan) BIT(chan + 1)
+#define MMA8452_TRANSIENT_CFG_ELE BIT(4)
+#define MMA8452_TRANSIENT_SRC 0x1e
+#define MMA8452_TRANSIENT_SRC_XTRANSE BIT(1)
+#define MMA8452_TRANSIENT_SRC_YTRANSE BIT(3)
+#define MMA8452_TRANSIENT_SRC_ZTRANSE BIT(5)
+#define MMA8452_TRANSIENT_THS 0x1f
+#define MMA8452_TRANSIENT_THS_MASK GENMASK(6, 0)
+#define MMA8452_TRANSIENT_COUNT 0x20
+#define MMA8452_CTRL_REG1 0x2a
+#define MMA8452_CTRL_ACTIVE BIT(0)
+#define MMA8452_CTRL_DR_MASK GENMASK(5, 3)
+#define MMA8452_CTRL_DR_SHIFT 3
+#define MMA8452_CTRL_DR_DEFAULT 0x4 /* 50 Hz sample frequency */
+#define MMA8452_CTRL_REG2 0x2b
+#define MMA8452_CTRL_REG2_RST BIT(6)
+#define MMA8452_CTRL_REG4 0x2d
+#define MMA8452_CTRL_REG5 0x2e
+#define MMA8452_OFF_X 0x2f
+#define MMA8452_OFF_Y 0x30
+#define MMA8452_OFF_Z 0x31
+
+#define MMA8452_MAX_REG 0x31
+
+#define MMA8452_INT_DRDY BIT(0)
+#define MMA8452_INT_TRANS BIT(5)
+
+#define MMA8452_DEVICE_ID 0x2a
struct mma8452_data {
struct i2c_client *client;
@@ -91,30 +87,34 @@ static int mma8452_drdy(struct mma8452_data *data)
return ret;
if ((ret & MMA8452_STATUS_DRDY) == MMA8452_STATUS_DRDY)
return 0;
+
msleep(20);
}
dev_err(&data->client->dev, "data not ready\n");
+
return -EIO;
}
static int mma8452_read(struct mma8452_data *data, __be16 buf[3])
{
int ret = mma8452_drdy(data);
+
if (ret < 0)
return ret;
- return i2c_smbus_read_i2c_block_data(data->client,
- MMA8452_OUT_X, 3 * sizeof(__be16), (u8 *) buf);
+
+ return i2c_smbus_read_i2c_block_data(data->client, MMA8452_OUT_X,
+ 3 * sizeof(__be16), (u8 *)buf);
}
-static ssize_t mma8452_show_int_plus_micros(char *buf,
- const int (*vals)[2], int n)
+static ssize_t mma8452_show_int_plus_micros(char *buf, const int (*vals)[2],
+ int n)
{
size_t len = 0;
while (n-- > 0)
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "%d.%06d ", vals[n][0], vals[n][1]);
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06d ",
+ vals[n][0], vals[n][1]);
/* replace trailing space by newline */
buf[len - 1] = '\n';
@@ -123,7 +123,7 @@ static ssize_t mma8452_show_int_plus_micros(char *buf,
}
static int mma8452_get_int_plus_micros_index(const int (*vals)[2], int n,
- int val, int val2)
+ int val, int val2)
{
while (n-- > 0)
if (val == vals[n][0] && val2 == vals[n][1])
@@ -147,7 +147,7 @@ static const int mma8452_samp_freq[8][2] = {
* Hardware has fullscale of -2G, -4G, -8G corresponding to raw value -2048
* The userspace interface uses m/s^2 and we declare micro units
* So scale factor is given by:
- * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
+ * g * N * 1000000 / 2048 for N = 2, 4, 8 and g = 9.80665
*/
static const int mma8452_scales[3][2] = {
{0, 9577}, {0, 19154}, {0, 38307}
@@ -178,17 +178,19 @@ static const int mma8452_hp_filter_cutoff[8][4][2] = {
};
static ssize_t mma8452_show_samp_freq_avail(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
return mma8452_show_int_plus_micros(buf, mma8452_samp_freq,
- ARRAY_SIZE(mma8452_samp_freq));
+ ARRAY_SIZE(mma8452_samp_freq));
}
static ssize_t mma8452_show_scale_avail(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
return mma8452_show_int_plus_micros(buf, mma8452_scales,
- ARRAY_SIZE(mma8452_scales));
+ ARRAY_SIZE(mma8452_scales));
}
static ssize_t mma8452_show_hp_cutoff_avail(struct device *dev,
@@ -205,22 +207,23 @@ static ssize_t mma8452_show_hp_cutoff_avail(struct device *dev,
static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(mma8452_show_samp_freq_avail);
static IIO_DEVICE_ATTR(in_accel_scale_available, S_IRUGO,
- mma8452_show_scale_avail, NULL, 0);
+ mma8452_show_scale_avail, NULL, 0);
static IIO_DEVICE_ATTR(in_accel_filter_high_pass_3db_frequency_available,
- S_IRUGO, mma8452_show_hp_cutoff_avail, NULL, 0);
+ S_IRUGO, mma8452_show_hp_cutoff_avail, NULL, 0);
static int mma8452_get_samp_freq_index(struct mma8452_data *data,
- int val, int val2)
+ int val, int val2)
{
return mma8452_get_int_plus_micros_index(mma8452_samp_freq,
- ARRAY_SIZE(mma8452_samp_freq), val, val2);
+ ARRAY_SIZE(mma8452_samp_freq),
+ val, val2);
}
-static int mma8452_get_scale_index(struct mma8452_data *data,
- int val, int val2)
+static int mma8452_get_scale_index(struct mma8452_data *data, int val, int val2)
{
return mma8452_get_int_plus_micros_index(mma8452_scales,
- ARRAY_SIZE(mma8452_scales), val, val2);
+ ARRAY_SIZE(mma8452_scales),
+ val, val2);
}
static int mma8452_get_hp_filter_index(struct mma8452_data *data,
@@ -229,7 +232,7 @@ static int mma8452_get_hp_filter_index(struct mma8452_data *data,
int i = mma8452_get_odr_index(data);
return mma8452_get_int_plus_micros_index(mma8452_hp_filter_cutoff[i],
- ARRAY_SIZE(mma8452_scales[0]), val, val2);
+ ARRAY_SIZE(mma8452_hp_filter_cutoff[0]), val, val2);
}
static int mma8452_read_hp_filter(struct mma8452_data *data, int *hz, int *uHz)
@@ -266,25 +269,31 @@ static int mma8452_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->lock);
if (ret < 0)
return ret;
- *val = sign_extend32(
- be16_to_cpu(buffer[chan->scan_index]) >> 4, 11);
+
+ *val = sign_extend32(be16_to_cpu(buffer[chan->scan_index]) >> 4,
+ 11);
+
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
i = data->data_cfg & MMA8452_DATA_CFG_FS_MASK;
*val = mma8452_scales[i][0];
*val2 = mma8452_scales[i][1];
+
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SAMP_FREQ:
i = mma8452_get_odr_index(data);
*val = mma8452_samp_freq[i][0];
*val2 = mma8452_samp_freq[i][1];
+
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_CALIBBIAS:
- ret = i2c_smbus_read_byte_data(data->client, MMA8452_OFF_X +
- chan->scan_index);
+ ret = i2c_smbus_read_byte_data(data->client,
+ MMA8452_OFF_X + chan->scan_index);
if (ret < 0)
return ret;
+
*val = sign_extend32(ret, 7);
+
return IIO_VAL_INT;
case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
if (data->data_cfg & MMA8452_DATA_CFG_HPF_MASK) {
@@ -295,21 +304,23 @@ static int mma8452_read_raw(struct iio_dev *indio_dev,
*val = 0;
*val2 = 0;
}
+
return IIO_VAL_INT_PLUS_MICRO;
}
+
return -EINVAL;
}
static int mma8452_standby(struct mma8452_data *data)
{
return i2c_smbus_write_byte_data(data->client, MMA8452_CTRL_REG1,
- data->ctrl_reg1 & ~MMA8452_CTRL_ACTIVE);
+ data->ctrl_reg1 & ~MMA8452_CTRL_ACTIVE);
}
static int mma8452_active(struct mma8452_data *data)
{
return i2c_smbus_write_byte_data(data->client, MMA8452_CTRL_REG1,
- data->ctrl_reg1);
+ data->ctrl_reg1);
}
static int mma8452_change_config(struct mma8452_data *data, u8 reg, u8 val)
@@ -334,6 +345,7 @@ static int mma8452_change_config(struct mma8452_data *data, u8 reg, u8 val)
ret = 0;
fail:
mutex_unlock(&data->lock);
+
return ret;
}
@@ -344,12 +356,13 @@ static int mma8452_set_hp_filter_frequency(struct mma8452_data *data,
i = mma8452_get_hp_filter_index(data, val, val2);
if (i < 0)
- return -EINVAL;
+ return i;
reg = i2c_smbus_read_byte_data(data->client,
MMA8452_HP_FILTER_CUTOFF);
if (reg < 0)
return reg;
+
reg &= ~MMA8452_HP_FILTER_CUTOFF_SEL_MASK;
reg |= i;
@@ -370,25 +383,30 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SAMP_FREQ:
i = mma8452_get_samp_freq_index(data, val, val2);
if (i < 0)
- return -EINVAL;
+ return i;
data->ctrl_reg1 &= ~MMA8452_CTRL_DR_MASK;
data->ctrl_reg1 |= i << MMA8452_CTRL_DR_SHIFT;
+
return mma8452_change_config(data, MMA8452_CTRL_REG1,
- data->ctrl_reg1);
+ data->ctrl_reg1);
case IIO_CHAN_INFO_SCALE:
i = mma8452_get_scale_index(data, val, val2);
if (i < 0)
- return -EINVAL;
+ return i;
+
data->data_cfg &= ~MMA8452_DATA_CFG_FS_MASK;
data->data_cfg |= i;
+
return mma8452_change_config(data, MMA8452_DATA_CFG,
- data->data_cfg);
+ data->data_cfg);
case IIO_CHAN_INFO_CALIBBIAS:
if (val < -128 || val > 127)
return -EINVAL;
- return mma8452_change_config(data, MMA8452_OFF_X +
- chan->scan_index, val);
+
+ return mma8452_change_config(data,
+ MMA8452_OFF_X + chan->scan_index,
+ val);
case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
if (val == 0 && val2 == 0) {
@@ -399,8 +417,9 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
}
+
return mma8452_change_config(data, MMA8452_DATA_CFG,
- data->data_cfg);
+ data->data_cfg);
default:
return -EINVAL;
@@ -425,6 +444,7 @@ static int mma8452_read_thresh(struct iio_dev *indio_dev,
return ret;
*val = ret & MMA8452_TRANSIENT_THS_MASK;
+
return IIO_VAL_INT;
case IIO_EV_INFO_PERIOD:
@@ -437,6 +457,7 @@ static int mma8452_read_thresh(struct iio_dev *indio_dev,
mma8452_get_odr_index(data)];
*val = us / USEC_PER_SEC;
*val2 = us % USEC_PER_SEC;
+
return IIO_VAL_INT_PLUS_MICRO;
case IIO_EV_INFO_HIGH_PASS_FILTER_3DB:
@@ -453,6 +474,7 @@ static int mma8452_read_thresh(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
}
+
return IIO_VAL_INT_PLUS_MICRO;
default:
@@ -472,19 +494,22 @@ static int mma8452_write_thresh(struct iio_dev *indio_dev,
switch (info) {
case IIO_EV_INFO_VALUE:
- return mma8452_change_config(data, MMA8452_TRANSIENT_THS,
- val & MMA8452_TRANSIENT_THS_MASK);
+ if (val < 0 || val > MMA8452_TRANSIENT_THS_MASK)
+ return -EINVAL;
+
+ return mma8452_change_config(data, MMA8452_TRANSIENT_THS, val);
case IIO_EV_INFO_PERIOD:
steps = (val * USEC_PER_SEC + val2) /
mma8452_transient_time_step_us[
mma8452_get_odr_index(data)];
- if (steps > 0xff)
+ if (steps < 0 || steps > 0xff)
return -EINVAL;
return mma8452_change_config(data, MMA8452_TRANSIENT_COUNT,
steps);
+
case IIO_EV_INFO_HIGH_PASS_FILTER_3DB:
reg = i2c_smbus_read_byte_data(data->client,
MMA8452_TRANSIENT_CFG);
@@ -499,6 +524,7 @@ static int mma8452_write_thresh(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
}
+
return mma8452_change_config(data, MMA8452_TRANSIENT_CFG, reg);
default:
@@ -557,21 +583,21 @@ static void mma8452_transient_interrupt(struct iio_dev *indio_dev)
if (src & MMA8452_TRANSIENT_SRC_XTRANSE)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X,
- IIO_EV_TYPE_THRESH,
+ IIO_EV_TYPE_MAG,
IIO_EV_DIR_RISING),
ts);
if (src & MMA8452_TRANSIENT_SRC_YTRANSE)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y,
- IIO_EV_TYPE_THRESH,
+ IIO_EV_TYPE_MAG,
IIO_EV_DIR_RISING),
ts);
if (src & MMA8452_TRANSIENT_SRC_ZTRANSE)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z,
- IIO_EV_TYPE_THRESH,
+ IIO_EV_TYPE_MAG,
IIO_EV_DIR_RISING),
ts);
}
@@ -608,15 +634,16 @@ static irqreturn_t mma8452_trigger_handler(int irq, void *p)
u8 buffer[16]; /* 3 16-bit channels + padding + ts */
int ret;
- ret = mma8452_read(data, (__be16 *) buffer);
+ ret = mma8452_read(data, (__be16 *)buffer);
if (ret < 0)
goto done;
iio_push_to_buffers_with_timestamp(indio_dev, buffer,
- iio_get_time_ns());
+ iio_get_time_ns());
done:
iio_trigger_notify_done(indio_dev->trig);
+
return IRQ_HANDLED;
}
@@ -644,7 +671,7 @@ static int mma8452_reg_access_dbg(struct iio_dev *indio_dev,
static const struct iio_event_spec mma8452_transient_event[] = {
{
- .type = IIO_EV_TYPE_THRESH,
+ .type = IIO_EV_TYPE_MAG,
.dir = IIO_EV_DIR_RISING,
.mask_separate = BIT(IIO_EV_INFO_ENABLE),
.mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
@@ -674,10 +701,10 @@ static struct attribute_group mma8452_event_attribute_group = {
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
- BIT(IIO_CHAN_INFO_SCALE) | \
- BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY), \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY), \
.scan_index = idx, \
.scan_type = { \
.sign = 's', \
@@ -780,6 +807,7 @@ static int mma8452_trigger_setup(struct iio_dev *indio_dev)
return ret;
indio_dev->trig = trig;
+
return 0;
}
@@ -849,7 +877,7 @@ static int mma8452_probe(struct i2c_client *client,
data->data_cfg = MMA8452_DATA_CFG_FS_2G;
ret = i2c_smbus_write_byte_data(client, MMA8452_DATA_CFG,
- data->data_cfg);
+ data->data_cfg);
if (ret < 0)
return ret;
@@ -891,14 +919,14 @@ static int mma8452_probe(struct i2c_client *client,
}
data->ctrl_reg1 = MMA8452_CTRL_ACTIVE |
- (MMA8452_CTRL_DR_DEFAULT << MMA8452_CTRL_DR_SHIFT);
+ (MMA8452_CTRL_DR_DEFAULT << MMA8452_CTRL_DR_SHIFT);
ret = i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG1,
data->ctrl_reg1);
if (ret < 0)
goto trigger_cleanup;
ret = iio_triggered_buffer_setup(indio_dev, NULL,
- mma8452_trigger_handler, NULL);
+ mma8452_trigger_handler, NULL);
if (ret < 0)
goto trigger_cleanup;
@@ -968,6 +996,7 @@ static const struct of_device_id mma8452_dt_ids[] = {
{ .compatible = "fsl,mma8452" },
{ }
};
+MODULE_DEVICE_TABLE(of, mma8452_dt_ids);
static struct i2c_driver mma8452_driver = {
.driver = {
diff --git a/drivers/iio/accel/mma9551_core.c b/drivers/iio/accel/mma9551_core.c
index 2fd2a995686b..c34c5ce8123b 100644
--- a/drivers/iio/accel/mma9551_core.c
+++ b/drivers/iio/accel/mma9551_core.c
@@ -297,7 +297,7 @@ EXPORT_SYMBOL(mma9551_read_status_byte);
* Returns: 0 on success, negative value on failure.
*/
int mma9551_read_config_word(struct i2c_client *client, u8 app_id,
- u16 reg, u16 *val)
+ u16 reg, u16 *val)
{
int ret;
__be16 v;
@@ -328,12 +328,12 @@ EXPORT_SYMBOL(mma9551_read_config_word);
* Returns: 0 on success, negative value on failure.
*/
int mma9551_write_config_word(struct i2c_client *client, u8 app_id,
- u16 reg, u16 val)
+ u16 reg, u16 val)
{
__be16 v = cpu_to_be16(val);
return mma9551_transfer(client, app_id, MMA9551_CMD_WRITE_CONFIG, reg,
- (u8 *) &v, 2, NULL, 0);
+ (u8 *)&v, 2, NULL, 0);
}
EXPORT_SYMBOL(mma9551_write_config_word);
@@ -373,7 +373,7 @@ EXPORT_SYMBOL(mma9551_read_status_word);
* @client: I2C client
* @app_id: Application ID
* @reg: Application register
- * @len: Length of array to read in bytes
+ * @len: Length of array to read (in words)
* @buf: Array of words to read
*
* Read multiple configuration registers (word-sized registers).
@@ -385,23 +385,22 @@ EXPORT_SYMBOL(mma9551_read_status_word);
* Returns: 0 on success, negative value on failure.
*/
int mma9551_read_config_words(struct i2c_client *client, u8 app_id,
- u16 reg, u8 len, u16 *buf)
+ u16 reg, u8 len, u16 *buf)
{
int ret, i;
- int len_words = len / sizeof(u16);
__be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS / 2];
- if (len_words > ARRAY_SIZE(be_buf)) {
+ if (len > ARRAY_SIZE(be_buf)) {
dev_err(&client->dev, "Invalid buffer size %d\n", len);
return -EINVAL;
}
ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_CONFIG,
- reg, NULL, 0, (u8 *) be_buf, len);
+ reg, NULL, 0, (u8 *)be_buf, len * sizeof(u16));
if (ret < 0)
return ret;
- for (i = 0; i < len_words; i++)
+ for (i = 0; i < len; i++)
buf[i] = be16_to_cpu(be_buf[i]);
return 0;
@@ -413,7 +412,7 @@ EXPORT_SYMBOL(mma9551_read_config_words);
* @client: I2C client
* @app_id: Application ID
* @reg: Application register
- * @len: Length of array to read in bytes
+ * @len: Length of array to read (in words)
* @buf: Array of words to read
*
* Read multiple status registers (word-sized registers).
@@ -428,20 +427,19 @@ int mma9551_read_status_words(struct i2c_client *client, u8 app_id,
u16 reg, u8 len, u16 *buf)
{
int ret, i;
- int len_words = len / sizeof(u16);
__be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS / 2];
- if (len_words > ARRAY_SIZE(be_buf)) {
+ if (len > ARRAY_SIZE(be_buf)) {
dev_err(&client->dev, "Invalid buffer size %d\n", len);
return -EINVAL;
}
ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_STATUS,
- reg, NULL, 0, (u8 *) be_buf, len);
+ reg, NULL, 0, (u8 *)be_buf, len * sizeof(u16));
if (ret < 0)
return ret;
- for (i = 0; i < len_words; i++)
+ for (i = 0; i < len; i++)
buf[i] = be16_to_cpu(be_buf[i]);
return 0;
@@ -453,7 +451,7 @@ EXPORT_SYMBOL(mma9551_read_status_words);
* @client: I2C client
* @app_id: Application ID
* @reg: Application register
- * @len: Length of array to write in bytes
+ * @len: Length of array to write (in words)
* @buf: Array of words to write
*
* Write multiple configuration registers (word-sized registers).
@@ -468,19 +466,18 @@ int mma9551_write_config_words(struct i2c_client *client, u8 app_id,
u16 reg, u8 len, u16 *buf)
{
int i;
- int len_words = len / sizeof(u16);
__be16 be_buf[(MMA9551_MAX_MAILBOX_DATA_REGS - 1) / 2];
- if (len_words > ARRAY_SIZE(be_buf)) {
+ if (len > ARRAY_SIZE(be_buf)) {
dev_err(&client->dev, "Invalid buffer size %d\n", len);
return -EINVAL;
}
- for (i = 0; i < len_words; i++)
+ for (i = 0; i < len; i++)
be_buf[i] = cpu_to_be16(buf[i]);
return mma9551_transfer(client, app_id, MMA9551_CMD_WRITE_CONFIG,
- reg, (u8 *) be_buf, len, NULL, 0);
+ reg, (u8 *)be_buf, len * sizeof(u16), NULL, 0);
}
EXPORT_SYMBOL(mma9551_write_config_words);
diff --git a/drivers/iio/accel/mma9551_core.h b/drivers/iio/accel/mma9551_core.h
index 79939e40805a..5e88e6454dfd 100644
--- a/drivers/iio/accel/mma9551_core.h
+++ b/drivers/iio/accel/mma9551_core.h
@@ -53,13 +53,13 @@ int mma9551_write_config_byte(struct i2c_client *client, u8 app_id,
int mma9551_read_status_byte(struct i2c_client *client, u8 app_id,
u16 reg, u8 *val);
int mma9551_read_config_word(struct i2c_client *client, u8 app_id,
- u16 reg, u16 *val);
+ u16 reg, u16 *val);
int mma9551_write_config_word(struct i2c_client *client, u8 app_id,
- u16 reg, u16 val);
+ u16 reg, u16 val);
int mma9551_read_status_word(struct i2c_client *client, u8 app_id,
u16 reg, u16 *val);
int mma9551_read_config_words(struct i2c_client *client, u8 app_id,
- u16 reg, u8 len, u16 *buf);
+ u16 reg, u8 len, u16 *buf);
int mma9551_read_status_words(struct i2c_client *client, u8 app_id,
u16 reg, u8 len, u16 *buf);
int mma9551_write_config_words(struct i2c_client *client, u8 app_id,
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index 8bfc61824fb2..771858cb67a1 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -182,6 +182,10 @@ struct mma9553_conf_regs {
struct mma9553_data {
struct i2c_client *client;
+ /*
+ * 1. Serialize access to HW (requested by mma9551_core API).
+ * 2. Serialize sequences that power on/off the device and access HW.
+ */
struct mutex mutex;
struct mma9553_conf_regs conf;
struct mma9553_event events[MMA9553_EVENTS_INFO_SIZE];
@@ -322,7 +326,8 @@ static int mma9553_read_activity_stepcnt(struct mma9553_data *data,
int ret;
ret = mma9551_read_status_words(data->client, MMA9551_APPID_PEDOMETER,
- MMA9553_REG_STATUS, sizeof(u32), buf);
+ MMA9553_REG_STATUS, ARRAY_SIZE(buf),
+ buf);
if (ret < 0) {
dev_err(&data->client->dev,
"error reading status and stepcnt\n");
@@ -342,10 +347,10 @@ static int mma9553_conf_gpio(struct mma9553_data *data)
struct mma9553_event *ev_step_detect;
bool activity_enabled;
- activity_enabled =
- mma9553_is_any_event_enabled(data, true, IIO_ACTIVITY);
- ev_step_detect =
- mma9553_get_event(data, IIO_STEPS, IIO_NO_MOD, IIO_EV_DIR_NONE);
+ activity_enabled = mma9553_is_any_event_enabled(data, true,
+ IIO_ACTIVITY);
+ ev_step_detect = mma9553_get_event(data, IIO_STEPS, IIO_NO_MOD,
+ IIO_EV_DIR_NONE);
/*
* If both step detector and activity are enabled, use the MRGFL bit.
@@ -371,9 +376,8 @@ static int mma9553_conf_gpio(struct mma9553_data *data)
return ret;
}
- ret = mma9551_gpio_config(data->client,
- MMA9553_DEFAULT_GPIO_PIN,
- appid, bitnum, MMA9553_DEFAULT_GPIO_POLARITY);
+ ret = mma9551_gpio_config(data->client, MMA9553_DEFAULT_GPIO_PIN, appid,
+ bitnum, MMA9553_DEFAULT_GPIO_POLARITY);
if (ret < 0)
return ret;
data->gpio_bitnum = bitnum;
@@ -394,17 +398,16 @@ static int mma9553_init(struct mma9553_data *data)
* a device identification command to differentiate the MMA9553L
* from the MMA9550L.
*/
- ret =
- mma9551_read_config_words(data->client, MMA9551_APPID_PEDOMETER,
- MMA9553_REG_CONF_SLEEPMIN,
- sizeof(data->conf), (u16 *) &data->conf);
+ ret = mma9551_read_config_words(data->client, MMA9551_APPID_PEDOMETER,
+ MMA9553_REG_CONF_SLEEPMIN,
+ sizeof(data->conf) / sizeof(u16),
+ (u16 *)&data->conf);
if (ret < 0) {
dev_err(&data->client->dev,
"failed to read configuration registers\n");
return ret;
}
-
/* Reset GPIO */
data->gpio_bitnum = MMA9553_MAX_BITNUM;
ret = mma9553_conf_gpio(data);
@@ -419,18 +422,18 @@ static int mma9553_init(struct mma9553_data *data)
data->conf.sleepmin = MMA9553_DEFAULT_SLEEPMIN;
data->conf.sleepmax = MMA9553_DEFAULT_SLEEPMAX;
data->conf.sleepthd = MMA9553_DEFAULT_SLEEPTHD;
- data->conf.config =
- mma9553_set_bits(data->conf.config, 1, MMA9553_MASK_CONF_CONFIG);
+ data->conf.config = mma9553_set_bits(data->conf.config, 1,
+ MMA9553_MASK_CONF_CONFIG);
/*
* Clear the activity debounce counter when the activity level changes,
* so that the confidence level applies for any activity level.
*/
data->conf.config = mma9553_set_bits(data->conf.config, 1,
MMA9553_MASK_CONF_ACT_DBCNTM);
- ret =
- mma9551_write_config_words(data->client, MMA9551_APPID_PEDOMETER,
- MMA9553_REG_CONF_SLEEPMIN,
- sizeof(data->conf), (u16 *) &data->conf);
+ ret = mma9551_write_config_words(data->client, MMA9551_APPID_PEDOMETER,
+ MMA9553_REG_CONF_SLEEPMIN,
+ sizeof(data->conf) / sizeof(u16),
+ (u16 *)&data->conf);
if (ret < 0) {
dev_err(&data->client->dev,
"failed to write configuration registers\n");
@@ -567,7 +570,7 @@ static int mma9553_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_CALIBHEIGHT:
tmp = mma9553_get_bits(data->conf.height_weight,
- MMA9553_MASK_CONF_HEIGHT);
+ MMA9553_MASK_CONF_HEIGHT);
*val = tmp / 100; /* cm to m */
*val2 = (tmp % 100) * 10000;
return IIO_VAL_INT_PLUS_MICRO;
@@ -719,7 +722,6 @@ static int mma9553_read_event_config(struct iio_dev *indio_dev,
enum iio_event_type type,
enum iio_event_direction dir)
{
-
struct mma9553_data *data = iio_priv(indio_dev);
struct mma9553_event *event;
@@ -1026,22 +1028,22 @@ static irqreturn_t mma9553_event_handler(int irq, void *private)
return IRQ_HANDLED;
}
- ev_prev_activity =
- mma9553_get_event(data, IIO_ACTIVITY,
- mma9553_activity_to_mod(data->activity),
- IIO_EV_DIR_FALLING);
- ev_activity =
- mma9553_get_event(data, IIO_ACTIVITY,
- mma9553_activity_to_mod(activity),
- IIO_EV_DIR_RISING);
- ev_step_detect =
- mma9553_get_event(data, IIO_STEPS, IIO_NO_MOD, IIO_EV_DIR_NONE);
+ ev_prev_activity = mma9553_get_event(data, IIO_ACTIVITY,
+ mma9553_activity_to_mod(
+ data->activity),
+ IIO_EV_DIR_FALLING);
+ ev_activity = mma9553_get_event(data, IIO_ACTIVITY,
+ mma9553_activity_to_mod(activity),
+ IIO_EV_DIR_RISING);
+ ev_step_detect = mma9553_get_event(data, IIO_STEPS, IIO_NO_MOD,
+ IIO_EV_DIR_NONE);
if (ev_step_detect->enabled && (stepcnt != data->stepcnt)) {
data->stepcnt = stepcnt;
iio_push_event(indio_dev,
IIO_EVENT_CODE(IIO_STEPS, 0, IIO_NO_MOD,
- IIO_EV_DIR_NONE, IIO_EV_TYPE_CHANGE, 0, 0, 0),
+ IIO_EV_DIR_NONE,
+ IIO_EV_TYPE_CHANGE, 0, 0, 0),
data->timestamp);
}
@@ -1051,17 +1053,19 @@ static irqreturn_t mma9553_event_handler(int irq, void *private)
if (ev_prev_activity && ev_prev_activity->enabled)
iio_push_event(indio_dev,
IIO_EVENT_CODE(IIO_ACTIVITY, 0,
- ev_prev_activity->info->mod,
- IIO_EV_DIR_FALLING,
- IIO_EV_TYPE_THRESH, 0, 0, 0),
+ ev_prev_activity->info->mod,
+ IIO_EV_DIR_FALLING,
+ IIO_EV_TYPE_THRESH, 0, 0,
+ 0),
data->timestamp);
if (ev_activity && ev_activity->enabled)
iio_push_event(indio_dev,
IIO_EVENT_CODE(IIO_ACTIVITY, 0,
- ev_activity->info->mod,
- IIO_EV_DIR_RISING,
- IIO_EV_TYPE_THRESH, 0, 0, 0),
+ ev_activity->info->mod,
+ IIO_EV_DIR_RISING,
+ IIO_EV_TYPE_THRESH, 0, 0,
+ 0),
data->timestamp);
}
mutex_unlock(&data->mutex);
@@ -1145,7 +1149,7 @@ static int mma9553_probe(struct i2c_client *client,
if (client->irq < 0)
client->irq = mma9553_gpio_probe(client);
- if (client->irq >= 0) {
+ if (client->irq > 0) {
ret = devm_request_threaded_irq(&client->dev, client->irq,
mma9553_irq_handler,
mma9553_event_handler,
@@ -1156,7 +1160,6 @@ static int mma9553_probe(struct i2c_client *client,
client->irq);
goto out_poweroff;
}
-
}
ret = iio_device_register(indio_dev);
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index aa1001931d0c..468f21fa2950 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -26,6 +26,7 @@
#define LSM303DLH_ACCEL_DEV_NAME "lsm303dlh_accel"
#define LSM303DLM_ACCEL_DEV_NAME "lsm303dlm_accel"
#define LSM330_ACCEL_DEV_NAME "lsm330_accel"
+#define LSM303AGR_ACCEL_DEV_NAME "lsm303agr_accel"
/**
* struct st_sensors_platform_data - default accel platform data
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 4002e6410444..ff30f8806880 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -226,12 +226,14 @@ static const struct iio_chan_spec st_accel_16bit_channels[] = {
static const struct st_sensor_settings st_accel_sensors_settings[] = {
{
.wai = ST_ACCEL_1_WAI_EXP,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS3DH_ACCEL_DEV_NAME,
[1] = LSM303DLHC_ACCEL_DEV_NAME,
[2] = LSM330D_ACCEL_DEV_NAME,
[3] = LSM330DL_ACCEL_DEV_NAME,
[4] = LSM330DLC_ACCEL_DEV_NAME,
+ [5] = LSM303AGR_ACCEL_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
.odr = {
@@ -297,6 +299,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
},
{
.wai = ST_ACCEL_2_WAI_EXP,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS331DLH_ACCEL_DEV_NAME,
[1] = LSM303DL_ACCEL_DEV_NAME,
@@ -359,6 +362,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
},
{
.wai = ST_ACCEL_3_WAI_EXP,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LSM330_ACCEL_DEV_NAME,
},
@@ -437,6 +441,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
},
{
.wai = ST_ACCEL_4_WAI_EXP,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS3LV02DL_ACCEL_DEV_NAME,
},
@@ -494,6 +499,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
},
{
.wai = ST_ACCEL_5_WAI_EXP,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS331DL_ACCEL_DEV_NAME,
},
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index d4ad72ca4a3d..8b9cc84fd44f 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -68,6 +68,10 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,lsm330-accel",
.data = LSM330_ACCEL_DEV_NAME,
},
+ {
+ .compatible = "st,lsm303agr-accel",
+ .data = LSM303AGR_ACCEL_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -116,13 +120,13 @@ static const struct i2c_device_id st_accel_id_table[] = {
{ LSM303DL_ACCEL_DEV_NAME },
{ LSM303DLM_ACCEL_DEV_NAME },
{ LSM330_ACCEL_DEV_NAME },
+ { LSM303AGR_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
static struct i2c_driver st_accel_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "st-accel-i2c",
.of_match_table = of_match_ptr(st_accel_of_match),
},
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index 12ec29389e4b..54b61a3961c3 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -57,6 +57,7 @@ static const struct spi_device_id st_accel_id_table[] = {
{ LSM303DL_ACCEL_DEV_NAME },
{ LSM303DLM_ACCEL_DEV_NAME },
{ LSM330_ACCEL_DEV_NAME },
+ { LSM303AGR_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_accel_id_table);
diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
index d211d9f3975b..c764af284c94 100644
--- a/drivers/iio/accel/stk8312.c
+++ b/drivers/iio/accel/stk8312.c
@@ -11,17 +11,25 @@
*/
#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
#define STK8312_REG_XOUT 0x00
#define STK8312_REG_YOUT 0x01
#define STK8312_REG_ZOUT 0x02
+#define STK8312_REG_INTSU 0x06
#define STK8312_REG_MODE 0x07
+#define STK8312_REG_SR 0x08
#define STK8312_REG_STH 0x13
#define STK8312_REG_RESET 0x20
#define STK8312_REG_AFECTRL 0x24
@@ -29,14 +37,21 @@
#define STK8312_REG_OTPDATA 0x3E
#define STK8312_REG_OTPCTRL 0x3F
-#define STK8312_MODE_ACTIVE 1
-#define STK8312_MODE_STANDBY 0
-#define STK8312_MODE_MASK 0x01
-#define STK8312_RNG_MASK 0xC0
+#define STK8312_MODE_ACTIVE BIT(0)
+#define STK8312_MODE_STANDBY 0x00
+#define STK8312_MODE_INT_AH_PP 0xC0 /* active-high, push-pull */
+#define STK8312_DREADY_BIT BIT(4)
+#define STK8312_RNG_6G 1
#define STK8312_RNG_SHIFT 6
-#define STK8312_READ_RETRIES 16
+#define STK8312_RNG_MASK GENMASK(7, 6)
+#define STK8312_SR_MASK GENMASK(2, 0)
+#define STK8312_SR_400HZ_IDX 0
+#define STK8312_ALL_CHANNEL_MASK GENMASK(2, 0)
+#define STK8312_ALL_CHANNEL_SIZE 3
#define STK8312_DRIVER_NAME "stk8312"
+#define STK8312_GPIO "stk8312_gpio"
+#define STK8312_IRQ_NAME "stk8312_event"
/*
* The accelerometer has two measurement ranges:
@@ -53,32 +68,56 @@ static const int stk8312_scale_table[][2] = {
{0, 461600}, {1, 231100}
};
-#define STK8312_ACCEL_CHANNEL(reg, axis) { \
- .type = IIO_ACCEL, \
- .address = reg, \
- .modified = 1, \
- .channel2 = IIO_MOD_##axis, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+static const struct {
+ int val;
+ int val2;
+} stk8312_samp_freq_table[] = {
+ {400, 0}, {200, 0}, {100, 0}, {50, 0}, {25, 0},
+ {12, 500000}, {6, 250000}, {3, 125000}
+};
+
+#define STK8312_ACCEL_CHANNEL(index, reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 8, \
+ .storagebits = 8, \
+ .endianness = IIO_CPU, \
+ }, \
}
static const struct iio_chan_spec stk8312_channels[] = {
- STK8312_ACCEL_CHANNEL(STK8312_REG_XOUT, X),
- STK8312_ACCEL_CHANNEL(STK8312_REG_YOUT, Y),
- STK8312_ACCEL_CHANNEL(STK8312_REG_ZOUT, Z),
+ STK8312_ACCEL_CHANNEL(0, STK8312_REG_XOUT, X),
+ STK8312_ACCEL_CHANNEL(1, STK8312_REG_YOUT, Y),
+ STK8312_ACCEL_CHANNEL(2, STK8312_REG_ZOUT, Z),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
};
struct stk8312_data {
struct i2c_client *client;
struct mutex lock;
- int range;
+ u8 range;
+ u8 sample_rate_idx;
u8 mode;
+ struct iio_trigger *dready_trig;
+ bool dready_trigger_on;
+ s8 buffer[16]; /* 3x8-bit channels + 5x8 padding + 64-bit timestamp */
};
static IIO_CONST_ATTR(in_accel_scale_available, STK8312_SCALE_AVAIL);
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("3.125 6.25 12.5 25 50 100 200 400");
+
static struct attribute *stk8312_attributes[] = {
&iio_const_attr_in_accel_scale_available.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
NULL,
};
@@ -105,22 +144,25 @@ static int stk8312_otp_init(struct stk8312_data *data)
if (ret < 0)
goto exit_err;
count--;
- } while (!(ret & 0x80) && count > 0);
+ } while (!(ret & BIT(7)) && count > 0);
- if (count == 0)
+ if (count == 0) {
+ ret = -ETIMEDOUT;
goto exit_err;
+ }
ret = i2c_smbus_read_byte_data(client, STK8312_REG_OTPDATA);
+ if (ret == 0)
+ ret = -EINVAL;
if (ret < 0)
goto exit_err;
- ret = i2c_smbus_write_byte_data(data->client,
- STK8312_REG_AFECTRL, ret);
+ ret = i2c_smbus_write_byte_data(data->client, STK8312_REG_AFECTRL, ret);
if (ret < 0)
goto exit_err;
msleep(150);
- return ret;
+ return 0;
exit_err:
dev_err(&client->dev, "failed to initialize sensor\n");
@@ -130,31 +172,19 @@ exit_err:
static int stk8312_set_mode(struct stk8312_data *data, u8 mode)
{
int ret;
- u8 masked_reg;
struct i2c_client *client = data->client;
- if (mode > 1)
- return -EINVAL;
- else if (mode == data->mode)
+ if (mode == data->mode)
return 0;
- ret = i2c_smbus_read_byte_data(client, STK8312_REG_MODE);
- if (ret < 0) {
- dev_err(&client->dev, "failed to change sensor mode\n");
- return ret;
- }
- masked_reg = ret & (~STK8312_MODE_MASK);
- masked_reg |= mode;
-
- ret = i2c_smbus_write_byte_data(client,
- STK8312_REG_MODE, masked_reg);
+ ret = i2c_smbus_write_byte_data(client, STK8312_REG_MODE, mode);
if (ret < 0) {
dev_err(&client->dev, "failed to change sensor mode\n");
return ret;
}
data->mode = mode;
- if (mode == STK8312_MODE_ACTIVE) {
+ if (mode & STK8312_MODE_ACTIVE) {
/* Need to run OTP sequence before entering active mode */
usleep_range(1000, 5000);
ret = stk8312_otp_init(data);
@@ -163,6 +193,92 @@ static int stk8312_set_mode(struct stk8312_data *data, u8 mode)
return ret;
}
+static int stk8312_set_interrupts(struct stk8312_data *data, u8 int_mask)
+{
+ int ret;
+ u8 mode;
+ struct i2c_client *client = data->client;
+
+ mode = data->mode;
+ /* We need to go in standby mode to modify registers */
+ ret = stk8312_set_mode(data, STK8312_MODE_STANDBY);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, STK8312_REG_INTSU, int_mask);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to set interrupts\n");
+ stk8312_set_mode(data, mode);
+ return ret;
+ }
+
+ return stk8312_set_mode(data, mode);
+}
+
+static int stk8312_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct stk8312_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (state)
+ ret = stk8312_set_interrupts(data, STK8312_DREADY_BIT);
+ else
+ ret = stk8312_set_interrupts(data, 0x00);
+
+ if (ret < 0) {
+ dev_err(&data->client->dev, "failed to set trigger state\n");
+ return ret;
+ }
+
+ data->dready_trigger_on = state;
+
+ return 0;
+}
+
+static const struct iio_trigger_ops stk8312_trigger_ops = {
+ .set_trigger_state = stk8312_data_rdy_trigger_set_state,
+ .owner = THIS_MODULE,
+};
+
+static int stk8312_set_sample_rate(struct stk8312_data *data, u8 rate)
+{
+ int ret;
+ u8 masked_reg;
+ u8 mode;
+ struct i2c_client *client = data->client;
+
+ if (rate == data->sample_rate_idx)
+ return 0;
+
+ mode = data->mode;
+ /* We need to go in standby mode to modify registers */
+ ret = stk8312_set_mode(data, STK8312_MODE_STANDBY);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(client, STK8312_REG_SR);
+ if (ret < 0)
+ goto err_activate;
+
+ masked_reg = (ret & (~STK8312_SR_MASK)) | rate;
+
+ ret = i2c_smbus_write_byte_data(client, STK8312_REG_SR, masked_reg);
+ if (ret < 0)
+ goto err_activate;
+
+ data->sample_rate_idx = rate;
+
+ return stk8312_set_mode(data, mode);
+
+err_activate:
+ dev_err(&client->dev, "failed to set sampling rate\n");
+ stk8312_set_mode(data, mode);
+
+ return ret;
+}
+
static int stk8312_set_range(struct stk8312_data *data, u8 range)
{
int ret;
@@ -182,21 +298,25 @@ static int stk8312_set_range(struct stk8312_data *data, u8 range)
return ret;
ret = i2c_smbus_read_byte_data(client, STK8312_REG_STH);
- if (ret < 0) {
- dev_err(&client->dev, "failed to change sensor range\n");
- return ret;
- }
+ if (ret < 0)
+ goto err_activate;
masked_reg = ret & (~STK8312_RNG_MASK);
masked_reg |= range << STK8312_RNG_SHIFT;
ret = i2c_smbus_write_byte_data(client, STK8312_REG_STH, masked_reg);
if (ret < 0)
- dev_err(&client->dev, "failed to change sensor range\n");
- else
- data->range = range;
+ goto err_activate;
+
+ data->range = range;
return stk8312_set_mode(data, mode);
+
+err_activate:
+ dev_err(&client->dev, "failed to change sensor range\n");
+ stk8312_set_mode(data, mode);
+
+ return ret;
}
static int stk8312_read_accel(struct stk8312_data *data, u8 address)
@@ -208,12 +328,10 @@ static int stk8312_read_accel(struct stk8312_data *data, u8 address)
return -EINVAL;
ret = i2c_smbus_read_byte_data(client, address);
- if (ret < 0) {
+ if (ret < 0)
dev_err(&client->dev, "register read failed\n");
- return ret;
- }
- return sign_extend32(ret, 7);
+ return ret;
}
static int stk8312_read_raw(struct iio_dev *indio_dev,
@@ -221,20 +339,40 @@ static int stk8312_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct stk8312_data *data = iio_priv(indio_dev);
-
- if (chan->type != IIO_ACCEL)
- return -EINVAL;
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
mutex_lock(&data->lock);
- *val = stk8312_read_accel(data, chan->address);
+ ret = stk8312_set_mode(data, data->mode | STK8312_MODE_ACTIVE);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+ ret = stk8312_read_accel(data, chan->address);
+ if (ret < 0) {
+ stk8312_set_mode(data,
+ data->mode & (~STK8312_MODE_ACTIVE));
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+ *val = sign_extend32(ret, 7);
+ ret = stk8312_set_mode(data,
+ data->mode & (~STK8312_MODE_ACTIVE));
mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = stk8312_scale_table[data->range - 1][0];
*val2 = stk8312_scale_table[data->range - 1][1];
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = stk8312_samp_freq_table[data->sample_rate_idx].val;
+ *val2 = stk8312_samp_freq_table[data->sample_rate_idx].val2;
+ return IIO_VAL_INT_PLUS_MICRO;
}
return -EINVAL;
@@ -265,6 +403,20 @@ static int stk8312_write_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->lock);
return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ for (i = 0; i < ARRAY_SIZE(stk8312_samp_freq_table); i++)
+ if (val == stk8312_samp_freq_table[i].val &&
+ val2 == stk8312_samp_freq_table[i].val2) {
+ index = i;
+ break;
+ }
+ if (index < 0)
+ return -EINVAL;
+ mutex_lock(&data->lock);
+ ret = stk8312_set_sample_rate(data, index);
+ mutex_unlock(&data->lock);
+
+ return ret;
}
return -EINVAL;
@@ -277,6 +429,105 @@ static const struct iio_info stk8312_info = {
.attrs = &stk8312_attribute_group,
};
+static irqreturn_t stk8312_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct stk8312_data *data = iio_priv(indio_dev);
+ int bit, ret, i = 0;
+
+ mutex_lock(&data->lock);
+ /*
+ * Do a bulk read if all channels are requested,
+ * from 0x00 (XOUT) to 0x02 (ZOUT)
+ */
+ if (*(indio_dev->active_scan_mask) == STK8312_ALL_CHANNEL_MASK) {
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ STK8312_REG_XOUT,
+ STK8312_ALL_CHANNEL_SIZE,
+ data->buffer);
+ if (ret < STK8312_ALL_CHANNEL_SIZE) {
+ dev_err(&data->client->dev, "register read failed\n");
+ mutex_unlock(&data->lock);
+ goto err;
+ }
+ } else {
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ ret = stk8312_read_accel(data, bit);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ goto err;
+ }
+ data->buffer[i++] = ret;
+ }
+ }
+ mutex_unlock(&data->lock);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ pf->timestamp);
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stk8312_data_rdy_trig_poll(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct stk8312_data *data = iio_priv(indio_dev);
+
+ if (data->dready_trigger_on)
+ iio_trigger_poll(data->dready_trig);
+
+ return IRQ_HANDLED;
+}
+
+static int stk8312_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct stk8312_data *data = iio_priv(indio_dev);
+
+ return stk8312_set_mode(data, data->mode | STK8312_MODE_ACTIVE);
+}
+
+static int stk8312_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct stk8312_data *data = iio_priv(indio_dev);
+
+ return stk8312_set_mode(data, data->mode & (~STK8312_MODE_ACTIVE));
+}
+
+static const struct iio_buffer_setup_ops stk8312_buffer_setup_ops = {
+ .preenable = stk8312_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = iio_triggered_buffer_predisable,
+ .postdisable = stk8312_buffer_postdisable,
+};
+
+static int stk8312_gpio_probe(struct i2c_client *client)
+{
+ struct device *dev;
+ struct gpio_desc *gpio;
+ int ret;
+
+ if (!client)
+ return -EINVAL;
+
+ dev = &client->dev;
+
+ /* data ready gpio interrupt pin */
+ gpio = devm_gpiod_get_index(dev, STK8312_GPIO, 0, GPIOD_IN);
+ if (IS_ERR(gpio)) {
+ dev_err(dev, "acpi gpio get index failed\n");
+ return PTR_ERR(gpio);
+ }
+
+ ret = gpiod_to_irq(gpio);
+ dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
+
+ return ret;
+}
+
static int stk8312_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -308,30 +559,91 @@ static int stk8312_probe(struct i2c_client *client,
dev_err(&client->dev, "failed to reset sensor\n");
return ret;
}
- ret = stk8312_set_range(data, 1);
+ data->sample_rate_idx = STK8312_SR_400HZ_IDX;
+ ret = stk8312_set_range(data, STK8312_RNG_6G);
if (ret < 0)
return ret;
- ret = stk8312_set_mode(data, STK8312_MODE_ACTIVE);
+ ret = stk8312_set_mode(data,
+ STK8312_MODE_INT_AH_PP | STK8312_MODE_ACTIVE);
if (ret < 0)
return ret;
+ if (client->irq < 0)
+ client->irq = stk8312_gpio_probe(client);
+
+ if (client->irq >= 0) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ stk8312_data_rdy_trig_poll,
+ NULL,
+ IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT,
+ STK8312_IRQ_NAME,
+ indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "request irq %d failed\n",
+ client->irq);
+ goto err_power_off;
+ }
+
+ data->dready_trig = devm_iio_trigger_alloc(&client->dev,
+ "%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (!data->dready_trig) {
+ ret = -ENOMEM;
+ goto err_power_off;
+ }
+
+ data->dready_trig->dev.parent = &client->dev;
+ data->dready_trig->ops = &stk8312_trigger_ops;
+ iio_trigger_set_drvdata(data->dready_trig, indio_dev);
+ ret = iio_trigger_register(data->dready_trig);
+ if (ret) {
+ dev_err(&client->dev, "iio trigger register failed\n");
+ goto err_power_off;
+ }
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev,
+ iio_pollfunc_store_time,
+ stk8312_trigger_handler,
+ &stk8312_buffer_setup_ops);
+ if (ret < 0) {
+ dev_err(&client->dev, "iio triggered buffer setup failed\n");
+ goto err_trigger_unregister;
+ }
+
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev, "device_register failed\n");
- stk8312_set_mode(data, STK8312_MODE_STANDBY);
+ goto err_buffer_cleanup;
}
+ return 0;
+
+err_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+err_trigger_unregister:
+ if (data->dready_trig)
+ iio_trigger_unregister(data->dready_trig);
+err_power_off:
+ stk8312_set_mode(data, STK8312_MODE_STANDBY);
return ret;
}
static int stk8312_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct stk8312_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ if (data->dready_trig)
+ iio_trigger_unregister(data->dready_trig);
- return stk8312_set_mode(iio_priv(indio_dev), STK8312_MODE_STANDBY);
+ return stk8312_set_mode(data, STK8312_MODE_STANDBY);
}
#ifdef CONFIG_PM_SLEEP
@@ -341,7 +653,7 @@ static int stk8312_suspend(struct device *dev)
data = iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
- return stk8312_set_mode(data, STK8312_MODE_STANDBY);
+ return stk8312_set_mode(data, data->mode & (~STK8312_MODE_ACTIVE));
}
static int stk8312_resume(struct device *dev)
@@ -350,7 +662,7 @@ static int stk8312_resume(struct device *dev)
data = iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
- return stk8312_set_mode(data, STK8312_MODE_ACTIVE);
+ return stk8312_set_mode(data, data->mode | STK8312_MODE_ACTIVE);
}
static SIMPLE_DEV_PM_OPS(stk8312_pm_ops, stk8312_suspend, stk8312_resume);
@@ -364,6 +676,7 @@ static const struct i2c_device_id stk8312_i2c_id[] = {
{"STK8312", 0},
{}
};
+MODULE_DEVICE_TABLE(i2c, stk8312_i2c_id);
static const struct acpi_device_id stk8312_acpi_id[] = {
{"STK8312", 0},
@@ -374,7 +687,7 @@ MODULE_DEVICE_TABLE(acpi, stk8312_acpi_id);
static struct i2c_driver stk8312_driver = {
.driver = {
- .name = "stk8312",
+ .name = STK8312_DRIVER_NAME,
.pm = STK8312_PM_OPS,
.acpi_match_table = ACPI_PTR(stk8312_acpi_id),
},
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index 30950c6b36de..80f77d8704b5 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -11,26 +11,42 @@
*/
#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
#define STK8BA50_REG_XOUT 0x02
#define STK8BA50_REG_YOUT 0x04
#define STK8BA50_REG_ZOUT 0x06
#define STK8BA50_REG_RANGE 0x0F
+#define STK8BA50_REG_BWSEL 0x10
#define STK8BA50_REG_POWMODE 0x11
#define STK8BA50_REG_SWRST 0x14
+#define STK8BA50_REG_INTEN2 0x17
+#define STK8BA50_REG_INTMAP2 0x1A
#define STK8BA50_MODE_NORMAL 0
#define STK8BA50_MODE_SUSPEND 1
#define STK8BA50_MODE_POWERBIT BIT(7)
#define STK8BA50_DATA_SHIFT 6
#define STK8BA50_RESET_CMD 0xB6
+#define STK8BA50_SR_1792HZ_IDX 7
+#define STK8BA50_DREADY_INT_MASK 0x10
+#define STK8BA50_DREADY_INT_MAP 0x81
+#define STK8BA50_ALL_CHANNEL_MASK 7
+#define STK8BA50_ALL_CHANNEL_SIZE 6
#define STK8BA50_DRIVER_NAME "stk8ba50"
+#define STK8BA50_GPIO "stk8ba50_gpio"
+#define STK8BA50_IRQ_NAME "stk8ba50_event"
#define STK8BA50_SCALE_AVAIL "0.0384 0.0767 0.1534 0.3069"
@@ -50,35 +66,76 @@
*
* Locally, the range is stored as a table index.
*/
-static const int stk8ba50_scale_table[][2] = {
+static const struct {
+ u8 reg_val;
+ u32 scale_val;
+} stk8ba50_scale_table[] = {
{3, 38400}, {5, 76700}, {8, 153400}, {12, 306900}
};
+/* Sample rates are stored as { <register value>, <Hz value> } */
+static const struct {
+ u8 reg_val;
+ u16 samp_freq;
+} stk8ba50_samp_freq_table[] = {
+ {0x08, 14}, {0x09, 25}, {0x0A, 56}, {0x0B, 112},
+ {0x0C, 224}, {0x0D, 448}, {0x0E, 896}, {0x0F, 1792}
+};
+
+/* Used to map scan mask bits to their corresponding channel register. */
+static const int stk8ba50_channel_table[] = {
+ STK8BA50_REG_XOUT,
+ STK8BA50_REG_YOUT,
+ STK8BA50_REG_ZOUT
+};
+
struct stk8ba50_data {
struct i2c_client *client;
struct mutex lock;
int range;
+ u8 sample_rate_idx;
+ struct iio_trigger *dready_trig;
+ bool dready_trigger_on;
+ /*
+ * 3 x 16-bit channels (10-bit data, 6-bit padding) +
+ * 1 x 16 padding +
+ * 4 x 16 64-bit timestamp
+ */
+ s16 buffer[8];
};
-#define STK8BA50_ACCEL_CHANNEL(reg, axis) { \
- .type = IIO_ACCEL, \
- .address = reg, \
- .modified = 1, \
- .channel2 = IIO_MOD_##axis, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+#define STK8BA50_ACCEL_CHANNEL(index, reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 10, \
+ .storagebits = 16, \
+ .shift = STK8BA50_DATA_SHIFT, \
+ .endianness = IIO_CPU, \
+ }, \
}
static const struct iio_chan_spec stk8ba50_channels[] = {
- STK8BA50_ACCEL_CHANNEL(STK8BA50_REG_XOUT, X),
- STK8BA50_ACCEL_CHANNEL(STK8BA50_REG_YOUT, Y),
- STK8BA50_ACCEL_CHANNEL(STK8BA50_REG_ZOUT, Z),
+ STK8BA50_ACCEL_CHANNEL(0, STK8BA50_REG_XOUT, X),
+ STK8BA50_ACCEL_CHANNEL(1, STK8BA50_REG_YOUT, Y),
+ STK8BA50_ACCEL_CHANNEL(2, STK8BA50_REG_ZOUT, Z),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
};
static IIO_CONST_ATTR(in_accel_scale_available, STK8BA50_SCALE_AVAIL);
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("14 25 56 112 224 448 896 1792");
+
static struct attribute *stk8ba50_attributes[] = {
&iio_const_attr_in_accel_scale_available.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
NULL,
};
@@ -97,7 +154,61 @@ static int stk8ba50_read_accel(struct stk8ba50_data *data, u8 reg)
return ret;
}
- return sign_extend32(ret >> STK8BA50_DATA_SHIFT, 9);
+ return ret;
+}
+
+static int stk8ba50_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct stk8ba50_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (state)
+ ret = i2c_smbus_write_byte_data(data->client,
+ STK8BA50_REG_INTEN2, STK8BA50_DREADY_INT_MASK);
+ else
+ ret = i2c_smbus_write_byte_data(data->client,
+ STK8BA50_REG_INTEN2, 0x00);
+
+ if (ret < 0)
+ dev_err(&data->client->dev, "failed to set trigger state\n");
+ else
+ data->dready_trigger_on = state;
+
+ return ret;
+}
+
+static const struct iio_trigger_ops stk8ba50_trigger_ops = {
+ .set_trigger_state = stk8ba50_data_rdy_trigger_set_state,
+ .owner = THIS_MODULE,
+};
+
+static int stk8ba50_set_power(struct stk8ba50_data *data, bool mode)
+{
+ int ret;
+ u8 masked_reg;
+ struct i2c_client *client = data->client;
+
+ ret = i2c_smbus_read_byte_data(client, STK8BA50_REG_POWMODE);
+ if (ret < 0)
+ goto exit_err;
+
+ if (mode)
+ masked_reg = ret | STK8BA50_MODE_POWERBIT;
+ else
+ masked_reg = ret & (~STK8BA50_MODE_POWERBIT);
+
+ ret = i2c_smbus_write_byte_data(client, STK8BA50_REG_POWMODE,
+ masked_reg);
+ if (ret < 0)
+ goto exit_err;
+
+ return ret;
+
+exit_err:
+ dev_err(&client->dev, "failed to change sensor mode\n");
+ return ret;
}
static int stk8ba50_read_raw(struct iio_dev *indio_dev,
@@ -105,17 +216,37 @@ static int stk8ba50_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct stk8ba50_data *data = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
mutex_lock(&data->lock);
- *val = stk8ba50_read_accel(data, chan->address);
+ ret = stk8ba50_set_power(data, STK8BA50_MODE_NORMAL);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return -EINVAL;
+ }
+ ret = stk8ba50_read_accel(data, chan->address);
+ if (ret < 0) {
+ stk8ba50_set_power(data, STK8BA50_MODE_SUSPEND);
+ mutex_unlock(&data->lock);
+ return -EINVAL;
+ }
+ *val = sign_extend32(ret >> STK8BA50_DATA_SHIFT, 9);
+ stk8ba50_set_power(data, STK8BA50_MODE_SUSPEND);
mutex_unlock(&data->lock);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 0;
- *val2 = stk8ba50_scale_table[data->range][1];
+ *val2 = stk8ba50_scale_table[data->range].scale_val;
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = stk8ba50_samp_freq_table
+ [data->sample_rate_idx].samp_freq;
+ *val2 = 0;
+ return IIO_VAL_INT;
}
return -EINVAL;
@@ -136,7 +267,7 @@ static int stk8ba50_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(stk8ba50_scale_table); i++)
- if (val2 == stk8ba50_scale_table[i][1]) {
+ if (val2 == stk8ba50_scale_table[i].scale_val) {
index = i;
break;
}
@@ -145,7 +276,7 @@ static int stk8ba50_write_raw(struct iio_dev *indio_dev,
ret = i2c_smbus_write_byte_data(data->client,
STK8BA50_REG_RANGE,
- stk8ba50_scale_table[index][0]);
+ stk8ba50_scale_table[index].reg_val);
if (ret < 0)
dev_err(&data->client->dev,
"failed to set measurement range\n");
@@ -153,6 +284,25 @@ static int stk8ba50_write_raw(struct iio_dev *indio_dev,
data->range = index;
return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ for (i = 0; i < ARRAY_SIZE(stk8ba50_samp_freq_table); i++)
+ if (val == stk8ba50_samp_freq_table[i].samp_freq) {
+ index = i;
+ break;
+ }
+ if (index < 0)
+ return -EINVAL;
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ STK8BA50_REG_BWSEL,
+ stk8ba50_samp_freq_table[index].reg_val);
+ if (ret < 0)
+ dev_err(&data->client->dev,
+ "failed to set sampling rate\n");
+ else
+ data->sample_rate_idx = index;
+
+ return ret;
}
return -EINVAL;
@@ -165,30 +315,100 @@ static const struct iio_info stk8ba50_info = {
.attrs = &stk8ba50_attribute_group,
};
-static int stk8ba50_set_power(struct stk8ba50_data *data, bool mode)
+static irqreturn_t stk8ba50_trigger_handler(int irq, void *p)
{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct stk8ba50_data *data = iio_priv(indio_dev);
+ int bit, ret, i = 0;
+
+ mutex_lock(&data->lock);
+ /*
+ * Do a bulk read if all channels are requested,
+ * from 0x02 (XOUT1) to 0x07 (ZOUT2)
+ */
+ if (*(indio_dev->active_scan_mask) == STK8BA50_ALL_CHANNEL_MASK) {
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ STK8BA50_REG_XOUT,
+ STK8BA50_ALL_CHANNEL_SIZE,
+ (u8 *)data->buffer);
+ if (ret < STK8BA50_ALL_CHANNEL_SIZE) {
+ dev_err(&data->client->dev, "register read failed\n");
+ goto err;
+ }
+ } else {
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ ret = stk8ba50_read_accel(data,
+ stk8ba50_channel_table[bit]);
+ if (ret < 0)
+ goto err;
+
+ data->buffer[i++] = ret;
+ }
+ }
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ pf->timestamp);
+err:
+ mutex_unlock(&data->lock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stk8ba50_data_rdy_trig_poll(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct stk8ba50_data *data = iio_priv(indio_dev);
+
+ if (data->dready_trigger_on)
+ iio_trigger_poll(data->dready_trig);
+
+ return IRQ_HANDLED;
+}
+
+static int stk8ba50_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct stk8ba50_data *data = iio_priv(indio_dev);
+
+ return stk8ba50_set_power(data, STK8BA50_MODE_NORMAL);
+}
+
+static int stk8ba50_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct stk8ba50_data *data = iio_priv(indio_dev);
+
+ return stk8ba50_set_power(data, STK8BA50_MODE_SUSPEND);
+}
+
+static const struct iio_buffer_setup_ops stk8ba50_buffer_setup_ops = {
+ .preenable = stk8ba50_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = iio_triggered_buffer_predisable,
+ .postdisable = stk8ba50_buffer_postdisable,
+};
+
+static int stk8ba50_gpio_probe(struct i2c_client *client)
+{
+ struct device *dev;
+ struct gpio_desc *gpio;
int ret;
- u8 masked_reg;
- struct i2c_client *client = data->client;
- ret = i2c_smbus_read_byte_data(client, STK8BA50_REG_POWMODE);
- if (ret < 0)
- goto exit_err;
+ if (!client)
+ return -EINVAL;
- if (mode)
- masked_reg = ret | STK8BA50_MODE_POWERBIT;
- else
- masked_reg = ret & (~STK8BA50_MODE_POWERBIT);
+ dev = &client->dev;
- ret = i2c_smbus_write_byte_data(client, STK8BA50_REG_POWMODE,
- masked_reg);
- if (ret < 0)
- goto exit_err;
+ /* data ready gpio interrupt pin */
+ gpio = devm_gpiod_get_index(dev, STK8BA50_GPIO, 0, GPIOD_IN);
+ if (IS_ERR(gpio)) {
+ dev_err(dev, "acpi gpio get index failed\n");
+ return PTR_ERR(gpio);
+ }
- return ret;
+ ret = gpiod_to_irq(gpio);
+ dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
-exit_err:
- dev_err(&client->dev, "failed to change sensor mode\n");
return ret;
}
@@ -222,28 +442,104 @@ static int stk8ba50_probe(struct i2c_client *client,
STK8BA50_REG_SWRST, STK8BA50_RESET_CMD);
if (ret < 0) {
dev_err(&client->dev, "failed to reset sensor\n");
- return ret;
+ goto err_power_off;
}
/* The default range is +/-2g */
data->range = 0;
+ /* The default sampling rate is 1792 Hz (maximum) */
+ data->sample_rate_idx = STK8BA50_SR_1792HZ_IDX;
+
+ /* Set up interrupts */
+ ret = i2c_smbus_write_byte_data(client,
+ STK8BA50_REG_INTEN2, STK8BA50_DREADY_INT_MASK);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to set up interrupts\n");
+ goto err_power_off;
+ }
+ ret = i2c_smbus_write_byte_data(client,
+ STK8BA50_REG_INTMAP2, STK8BA50_DREADY_INT_MAP);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to set up interrupts\n");
+ goto err_power_off;
+ }
+
+ if (client->irq < 0)
+ client->irq = stk8ba50_gpio_probe(client);
+
+ if (client->irq >= 0) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ stk8ba50_data_rdy_trig_poll,
+ NULL,
+ IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT,
+ STK8BA50_IRQ_NAME,
+ indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "request irq %d failed\n",
+ client->irq);
+ goto err_power_off;
+ }
+
+ data->dready_trig = devm_iio_trigger_alloc(&client->dev,
+ "%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (!data->dready_trig) {
+ ret = -ENOMEM;
+ goto err_power_off;
+ }
+
+ data->dready_trig->dev.parent = &client->dev;
+ data->dready_trig->ops = &stk8ba50_trigger_ops;
+ iio_trigger_set_drvdata(data->dready_trig, indio_dev);
+ ret = iio_trigger_register(data->dready_trig);
+ if (ret) {
+ dev_err(&client->dev, "iio trigger register failed\n");
+ goto err_power_off;
+ }
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev,
+ iio_pollfunc_store_time,
+ stk8ba50_trigger_handler,
+ &stk8ba50_buffer_setup_ops);
+ if (ret < 0) {
+ dev_err(&client->dev, "iio triggered buffer setup failed\n");
+ goto err_trigger_unregister;
+ }
+
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev, "device_register failed\n");
- stk8ba50_set_power(data, STK8BA50_MODE_SUSPEND);
+ goto err_buffer_cleanup;
}
return ret;
+
+err_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+err_trigger_unregister:
+ if (data->dready_trig)
+ iio_trigger_unregister(data->dready_trig);
+err_power_off:
+ stk8ba50_set_power(data, STK8BA50_MODE_SUSPEND);
+ return ret;
}
static int stk8ba50_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct stk8ba50_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
- return stk8ba50_set_power(iio_priv(indio_dev), STK8BA50_MODE_SUSPEND);
+ if (data->dready_trig)
+ iio_trigger_unregister(data->dready_trig);
+
+ return stk8ba50_set_power(data, STK8BA50_MODE_SUSPEND);
}
#ifdef CONFIG_PM_SLEEP
@@ -276,6 +572,7 @@ static const struct i2c_device_id stk8ba50_i2c_id[] = {
{"stk8ba50", 0},
{}
};
+MODULE_DEVICE_TABLE(i2c, stk8ba50_i2c_id);
static const struct acpi_device_id stk8ba50_acpi_id[] = {
{"STK8BA50", 0},
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 7c5565891cb8..50c103d75af9 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -20,6 +20,9 @@ config AD7266
Say yes here to build support for Analog Devices AD7265 and AD7266
ADCs.
+ To compile this driver as a module, choose M here: the module will be
+ called ad7266.
+
config AD7291
tristate "Analog Devices AD7291 ADC driver"
depends on I2C
@@ -52,8 +55,6 @@ config AD7476
AD7277, AD7278, AD7475, AD7476, AD7477, AD7478, AD7466, AD7467, AD7468,
AD7495, AD7910, AD7920, AD7920 SPI analog to digital converters (ADC).
- If unsure, say N (but it's safe to say "Y").
-
To compile this driver as a module, choose M here: the
module will be called ad7476.
@@ -63,8 +64,7 @@ config AD7791
select AD_SIGMA_DELTA
help
Say yes here to build support for Analog Devices AD7787, AD7788, AD7789,
- AD7790 and AD7791 SPI analog to digital converters (ADC). If unsure, say
- N (but it is safe to say "Y").
+ AD7790 and AD7791 SPI analog to digital converters (ADC).
To compile this driver as a module, choose M here: the module will be
called ad7791.
@@ -76,7 +76,6 @@ config AD7793
help
Say yes here to build support for Analog Devices AD7785, AD7792, AD7793,
AD7794 and AD7795 SPI analog to digital converters (ADC).
- If unsure, say N (but it's safe to say "Y").
To compile this driver as a module, choose M here: the
module will be called AD7793.
@@ -89,7 +88,6 @@ config AD7887
help
Say yes here to build support for Analog Devices
AD7887 SPI analog to digital converter (ADC).
- If unsure, say N (but it's safe to say "Y").
To compile this driver as a module, choose M here: the
module will be called ad7887.
@@ -117,6 +115,9 @@ config AD799X
i2c analog to digital converters (ADC). Provides direct access
via sysfs.
+ To compile this driver as a module, choose M here: the module will be
+ called ad799x.
+
config AT91_ADC
tristate "Atmel AT91 ADC"
depends on ARCH_AT91
@@ -127,6 +128,9 @@ config AT91_ADC
help
Say yes here to build support for Atmel AT91 ADC.
+ To compile this driver as a module, choose M here: the module will be
+ called at91_adc.
+
config AXP288_ADC
tristate "X-Powers AXP288 ADC driver"
depends on MFD_AXP20X
@@ -135,6 +139,9 @@ config AXP288_ADC
device. Depending on platform configuration, this general purpose ADC can
be used for sampling sensors such as thermal resistors.
+ To compile this driver as a module, choose M here: the module will be
+ called axp288_adc.
+
config BERLIN2_ADC
tristate "Marvell Berlin2 ADC driver"
depends on ARCH_BERLIN
@@ -151,10 +158,12 @@ config DA9150_GPADC
This driver can also be built as a module. If chosen, the module name
will be da9150-gpadc.
+ To compile this driver as a module, choose M here: the module will be
+ called berlin2-adc.
+
config CC10001_ADC
tristate "Cosmic Circuits 10001 ADC driver"
- depends on HAVE_CLK || REGULATOR
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && HAVE_CLK && REGULATOR
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
@@ -171,12 +180,18 @@ config EXYNOS_ADC
of SoCs for drivers such as the touchscreen and hwmon to use to share
this resource.
+ To compile this driver as a module, choose M here: the module will be
+ called exynos_adc.
+
config LP8788_ADC
tristate "LP8788 ADC driver"
depends on MFD_LP8788
help
Say yes here to build support for TI LP8788 ADC.
+ To compile this driver as a module, choose M here: the module will be
+ called lp8788_adc.
+
config MAX1027
tristate "Maxim max1027 ADC driver"
depends on SPI
@@ -186,6 +201,9 @@ config MAX1027
Say yes here to build support for Maxim SPI ADC models
max1027, max1029 and max1031.
+ To compile this driver as a module, choose M here: the module will be
+ called max1027.
+
config MAX1363
tristate "Maxim max1363 ADC driver"
depends on I2C
@@ -202,13 +220,16 @@ config MAX1363
max11646, max11647) Provides direct access via sysfs and buffered
data via the iio dev interface.
+ To compile this driver as a module, choose M here: the module will be
+ called max1363.
+
config MCP320X
tristate "Microchip Technology MCP3x01/02/04/08"
depends on SPI
help
Say yes here to build support for Microchip Technology's
- MCP3001, MCP3002, MCP3004, MCP3008, MCP3201, MCP3202, MCP3204 or
- MCP3208 analog to digital converter.
+ MCP3001, MCP3002, MCP3004, MCP3008, MCP3201, MCP3202, MCP3204,
+ MCP3208 or MCP3301 analog to digital converter.
This driver can also be built as a module. If so, the module will be
called mcp320x.
@@ -310,15 +331,18 @@ config TI_AM335X_ADC
Say yes here to build support for Texas Instruments ADC
driver which is also a MFD client.
+ To compile this driver as a module, choose M here: the module will be
+ called ti_am335x_adc.
+
config TWL4030_MADC
tristate "TWL4030 MADC (Monitoring A/D Converter)"
depends on TWL4030_CORE
help
- This driver provides support for Triton TWL4030-MADC. The
- driver supports both RT and SW conversion methods.
+ This driver provides support for Triton TWL4030-MADC. The
+ driver supports both RT and SW conversion methods.
- This driver can also be built as a module. If so, the module will be
- called twl4030-madc.
+ This driver can also be built as a module. If so, the module will be
+ called twl4030-madc.
config TWL6030_GPADC
tristate "TWL6030 GPADC (General Purpose A/D Converter) Support"
@@ -351,6 +375,9 @@ config VIPERBOARD_ADC
Say yes here to access the ADC part of the Nano River
Technologies Viperboard.
+ To compile this driver as a module, choose M here: the module will be
+ called viperboard_adc.
+
config XILINX_XADC
tristate "Xilinx XADC driver"
depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 8a0eb4a04fb5..7b40925dd4ff 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -182,7 +182,7 @@ struct at91_adc_caps {
u8 ts_pen_detect_sensitivity;
/* startup time calculate function */
- u32 (*calc_startup_ticks)(u8 startup_time, u32 adc_clk_khz);
+ u32 (*calc_startup_ticks)(u32 startup_time, u32 adc_clk_khz);
u8 num_channels;
struct at91_adc_reg_desc registers;
@@ -201,7 +201,7 @@ struct at91_adc_state {
u8 num_channels;
void __iomem *reg_base;
struct at91_adc_reg_desc *registers;
- u8 startup_time;
+ u32 startup_time;
u8 sample_hold_time;
bool sleep_mode;
struct iio_trigger **trig;
@@ -779,7 +779,7 @@ ret:
return ret;
}
-static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz)
+static u32 calc_startup_ticks_9260(u32 startup_time, u32 adc_clk_khz)
{
/*
* Number of ticks needed to cover the startup time of the ADC
@@ -790,7 +790,7 @@ static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz)
return round_up((startup_time * adc_clk_khz / 1000) - 1, 8) / 8;
}
-static u32 calc_startup_ticks_9x5(u8 startup_time, u32 adc_clk_khz)
+static u32 calc_startup_ticks_9x5(u32 startup_time, u32 adc_clk_khz)
{
/*
* For sama5d3x and at91sam9x5, the formula changes to:
diff --git a/drivers/iio/adc/berlin2-adc.c b/drivers/iio/adc/berlin2-adc.c
index aecc9ad995ad..4946d9bf1764 100644
--- a/drivers/iio/adc/berlin2-adc.c
+++ b/drivers/iio/adc/berlin2-adc.c
@@ -26,7 +26,7 @@
#define BERLIN2_SM_CTRL 0x14
#define BERLIN2_SM_CTRL_SM_SOC_INT BIT(1)
#define BERLIN2_SM_CTRL_SOC_SM_INT BIT(2)
-#define BERLIN2_SM_CTRL_ADC_SEL(x) (BIT(x) << 5) /* 0-15 */
+#define BERLIN2_SM_CTRL_ADC_SEL(x) ((x) << 5) /* 0-15 */
#define BERLIN2_SM_CTRL_ADC_SEL_MASK (0xf << 5)
#define BERLIN2_SM_CTRL_ADC_POWER BIT(9)
#define BERLIN2_SM_CTRL_ADC_CLKSEL_DIV2 (0x0 << 10)
@@ -53,14 +53,14 @@
#define BERLIN2_SM_ADC_MASK 0x3ff
#define BERLIN2_SM_ADC_STATUS 0x1c
#define BERLIN2_SM_ADC_STATUS_DATA_RDY(x) BIT(x) /* 0-15 */
-#define BERLIN2_SM_ADC_STATUS_DATA_RDY_MASK 0xf
+#define BERLIN2_SM_ADC_STATUS_DATA_RDY_MASK GENMASK(15, 0)
#define BERLIN2_SM_ADC_STATUS_INT_EN(x) (BIT(x) << 16) /* 0-15 */
-#define BERLIN2_SM_ADC_STATUS_INT_EN_MASK (0xf << 16)
+#define BERLIN2_SM_ADC_STATUS_INT_EN_MASK GENMASK(31, 16)
#define BERLIN2_SM_TSEN_STATUS 0x24
#define BERLIN2_SM_TSEN_STATUS_DATA_RDY BIT(0)
#define BERLIN2_SM_TSEN_STATUS_INT_EN BIT(1)
#define BERLIN2_SM_TSEN_DATA 0x28
-#define BERLIN2_SM_TSEN_MASK 0xfff
+#define BERLIN2_SM_TSEN_MASK GENMASK(9, 0)
#define BERLIN2_SM_TSEN_CTRL 0x74
#define BERLIN2_SM_TSEN_CTRL_START BIT(8)
#define BERLIN2_SM_TSEN_CTRL_SETTLING_4 (0x0 << 21) /* 4 us */
@@ -86,7 +86,7 @@ struct berlin2_adc_priv {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
}
-static struct iio_chan_spec berlin2_adc_channels[] = {
+static const struct iio_chan_spec berlin2_adc_channels[] = {
BERLIN2_ADC_CHANNEL(0, IIO_VOLTAGE), /* external input */
BERLIN2_ADC_CHANNEL(1, IIO_VOLTAGE), /* external input */
BERLIN2_ADC_CHANNEL(2, IIO_VOLTAGE), /* external input */
@@ -103,7 +103,6 @@ static struct iio_chan_spec berlin2_adc_channels[] = {
BERLIN2_ADC_CHANNEL(7, IIO_VOLTAGE), /* reserved */
IIO_CHAN_SOFT_TIMESTAMP(8), /* timestamp */
};
-#define BERLIN2_N_CHANNELS ARRAY_SIZE(berlin2_adc_channels)
static int berlin2_adc_read(struct iio_dev *indio_dev, int channel)
{
@@ -221,7 +220,7 @@ static int berlin2_adc_read_raw(struct iio_dev *indio_dev,
return temp;
if (temp > 2047)
- temp = -(4096 - temp);
+ temp -= 4096;
/* Convert to milli Celsius */
*val = ((temp * 100000) / 264 - 270000);
@@ -286,8 +285,7 @@ static int berlin2_adc_probe(struct platform_device *pdev)
int irq, tsen_irq;
int ret;
- indio_dev = devm_iio_device_alloc(&pdev->dev,
- sizeof(struct berlin2_adc_priv));
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv));
if (!indio_dev)
return -ENOMEM;
@@ -301,11 +299,11 @@ static int berlin2_adc_probe(struct platform_device *pdev)
irq = platform_get_irq_byname(pdev, "adc");
if (irq < 0)
- return -ENODEV;
+ return irq;
tsen_irq = platform_get_irq_byname(pdev, "tsen");
if (tsen_irq < 0)
- return -ENODEV;
+ return tsen_irq;
ret = devm_request_irq(&pdev->dev, irq, berlin2_adc_irq, 0,
pdev->dev.driver->name, indio_dev);
@@ -325,8 +323,8 @@ static int berlin2_adc_probe(struct platform_device *pdev)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &berlin2_adc_info;
- indio_dev->num_channels = BERLIN2_N_CHANNELS;
indio_dev->channels = berlin2_adc_channels;
+ indio_dev->num_channels = ARRAY_SIZE(berlin2_adc_channels);
/* Power up the ADC */
regmap_update_bits(priv->regmap, BERLIN2_SM_CTRL,
diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c
index 115f6e99a7fa..8254f529b2a9 100644
--- a/drivers/iio/adc/cc10001_adc.c
+++ b/drivers/iio/adc/cc10001_adc.c
@@ -62,6 +62,7 @@ struct cc10001_adc_device {
struct regulator *reg;
u16 *buf;
+ bool shared;
struct mutex lock;
unsigned int start_delay_ns;
unsigned int eoc_delay_ns;
@@ -153,7 +154,8 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
mutex_lock(&adc_dev->lock);
- cc10001_adc_power_up(adc_dev);
+ if (!adc_dev->shared)
+ cc10001_adc_power_up(adc_dev);
/* Calculate delay step for eoc and sampled data */
delay_ns = adc_dev->eoc_delay_ns / CC10001_MAX_POLL_COUNT;
@@ -177,7 +179,8 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
}
done:
- cc10001_adc_power_down(adc_dev);
+ if (!adc_dev->shared)
+ cc10001_adc_power_down(adc_dev);
mutex_unlock(&adc_dev->lock);
@@ -196,7 +199,8 @@ static u16 cc10001_adc_read_raw_voltage(struct iio_dev *indio_dev,
unsigned int delay_ns;
u16 val;
- cc10001_adc_power_up(adc_dev);
+ if (!adc_dev->shared)
+ cc10001_adc_power_up(adc_dev);
/* Calculate delay step for eoc and sampled data */
delay_ns = adc_dev->eoc_delay_ns / CC10001_MAX_POLL_COUNT;
@@ -205,7 +209,8 @@ static u16 cc10001_adc_read_raw_voltage(struct iio_dev *indio_dev,
val = cc10001_adc_poll_done(indio_dev, chan->channel, delay_ns);
- cc10001_adc_power_down(adc_dev);
+ if (!adc_dev->shared)
+ cc10001_adc_power_down(adc_dev);
return val;
}
@@ -322,8 +327,10 @@ static int cc10001_adc_probe(struct platform_device *pdev)
adc_dev = iio_priv(indio_dev);
channel_map = GENMASK(CC10001_ADC_NUM_CHANNELS - 1, 0);
- if (!of_property_read_u32(node, "adc-reserved-channels", &ret))
+ if (!of_property_read_u32(node, "adc-reserved-channels", &ret)) {
+ adc_dev->shared = true;
channel_map &= ~ret;
+ }
adc_dev->reg = devm_regulator_get(&pdev->dev, "vref");
if (IS_ERR(adc_dev->reg))
@@ -368,6 +375,14 @@ static int cc10001_adc_probe(struct platform_device *pdev)
adc_dev->eoc_delay_ns = NSEC_PER_SEC / adc_clk_rate;
adc_dev->start_delay_ns = adc_dev->eoc_delay_ns * CC10001_WAIT_CYCLES;
+ /*
+ * There is only one register to power-up/power-down the AUX ADC.
+ * If the ADC is shared among multiple CPUs, always power it up here.
+ * If the ADC is used only by the MIPS, power-up/power-down at runtime.
+ */
+ if (adc_dev->shared)
+ cc10001_adc_power_up(adc_dev);
+
/* Setup the ADC channels available on the device */
ret = cc10001_adc_channel_init(indio_dev, channel_map);
if (ret < 0)
@@ -402,6 +417,7 @@ static int cc10001_adc_remove(struct platform_device *pdev)
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct cc10001_adc_device *adc_dev = iio_priv(indio_dev);
+ cc10001_adc_power_down(adc_dev);
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
clk_disable_unprepare(adc_dev->adc_clk);
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 8d9c9b9215dd..b19e4f9d16e0 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -25,6 +25,7 @@
* http://ww1.microchip.com/downloads/en/DeviceDoc/21290D.pdf mcp3201
* http://ww1.microchip.com/downloads/en/DeviceDoc/21034D.pdf mcp3202
* http://ww1.microchip.com/downloads/en/DeviceDoc/21298c.pdf mcp3204/08
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21700E.pdf mcp3301
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -47,6 +48,7 @@ enum {
mcp3202,
mcp3204,
mcp3208,
+ mcp3301,
};
struct mcp320x_chip_info {
@@ -76,6 +78,7 @@ static int mcp320x_channel_to_tx_data(int device_index,
switch (device_index) {
case mcp3001:
case mcp3201:
+ case mcp3301:
return 0;
case mcp3002:
case mcp3202:
@@ -102,7 +105,7 @@ static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
adc->tx_buf = mcp320x_channel_to_tx_data(device_index,
channel, differential);
- if (device_index != mcp3001 && device_index != mcp3201) {
+ if (device_index != mcp3001 && device_index != mcp3201 && device_index != mcp3301) {
ret = spi_sync(adc->spi, &adc->msg);
if (ret < 0)
return ret;
@@ -125,6 +128,8 @@ static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
case mcp3204:
case mcp3208:
return (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
+ case mcp3301:
+ return sign_extend32((adc->rx_buf[0] & 0x1f) << 8 | adc->rx_buf[1], 12);
default:
return -EINVAL;
}
@@ -274,6 +279,11 @@ static const struct mcp320x_chip_info mcp320x_chip_infos[] = {
.num_channels = ARRAY_SIZE(mcp3208_channels),
.resolution = 12
},
+ [mcp3301] = {
+ .channels = mcp3201_channels,
+ .num_channels = ARRAY_SIZE(mcp3201_channels),
+ .resolution = 13
+ },
};
static int mcp320x_probe(struct spi_device *spi)
@@ -299,6 +309,8 @@ static int mcp320x_probe(struct spi_device *spi)
indio_dev->channels = chip_info->channels;
indio_dev->num_channels = chip_info->num_channels;
+ adc->chip_info = chip_info;
+
adc->transfer[0].tx_buf = &adc->tx_buf;
adc->transfer[0].len = sizeof(adc->tx_buf);
adc->transfer[1].rx_buf = adc->rx_buf;
@@ -367,6 +379,9 @@ static const struct of_device_id mcp320x_dt_ids[] = {
.compatible = "mcp3208",
.data = &mcp320x_chip_infos[mcp3208],
}, {
+ .compatible = "mcp3301",
+ .data = &mcp320x_chip_infos[mcp3301],
+ }, {
}
};
MODULE_DEVICE_TABLE(of, mcp320x_dt_ids);
@@ -381,6 +396,7 @@ static const struct spi_device_id mcp320x_id[] = {
{ "mcp3202", mcp3202 },
{ "mcp3204", mcp3204 },
{ "mcp3208", mcp3208 },
+ { "mcp3301", mcp3301 },
{ }
};
MODULE_DEVICE_TABLE(spi, mcp320x_id);
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index b96c636470ef..3555122008b4 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -404,7 +404,6 @@ MODULE_DEVICE_TABLE(of, mcp3422_of_match);
static struct i2c_driver mcp3422_driver = {
.driver = {
.name = "mcp3422",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(mcp3422_of_match),
},
.probe = mcp3422_probe,
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index 8d4e019ea4ca..9c311c1e1ac7 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -349,3 +349,7 @@ static struct platform_driver rockchip_saradc_driver = {
};
module_platform_driver(rockchip_saradc_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("Rockchip SARADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index b3a82b4d1a75..2c8374f86252 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -140,7 +140,6 @@ MODULE_DEVICE_TABLE(of, adc081c_of_match);
static struct i2c_driver adc081c_driver = {
.driver = {
.name = "adc081c",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(adc081c_of_match),
},
.probe = adc081c_probe,
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index 06f4792240f0..ebe415f10640 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -833,7 +833,8 @@ static int twl4030_madc_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
twl4030_madc_threaded_irq_handler,
- IRQF_TRIGGER_RISING, "twl4030_madc", madc);
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "twl4030_madc", madc);
if (ret) {
dev_err(&pdev->dev, "could not request irq\n");
goto err_i2c;
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 480f335a0f9f..6bf4c20eb231 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -68,6 +68,9 @@
#define VF610_ADC_CLK_DIV8 0x60
#define VF610_ADC_CLK_MASK 0x60
#define VF610_ADC_ADLSMP_LONG 0x10
+#define VF610_ADC_ADSTS_SHORT 0x100
+#define VF610_ADC_ADSTS_NORMAL 0x200
+#define VF610_ADC_ADSTS_LONG 0x300
#define VF610_ADC_ADSTS_MASK 0x300
#define VF610_ADC_ADLPC_EN 0x80
#define VF610_ADC_ADHSC_EN 0x400
@@ -98,6 +101,8 @@
#define VF610_ADC_CALF 0x2
#define VF610_ADC_TIMEOUT msecs_to_jiffies(100)
+#define DEFAULT_SAMPLE_TIME 1000
+
enum clk_sel {
VF610_ADCIOC_BUSCLK_SET,
VF610_ADCIOC_ALTCLK_SET,
@@ -124,6 +129,17 @@ enum conversion_mode_sel {
VF610_ADC_CONV_LOW_POWER,
};
+enum lst_adder_sel {
+ VF610_ADCK_CYCLES_3,
+ VF610_ADCK_CYCLES_5,
+ VF610_ADCK_CYCLES_7,
+ VF610_ADCK_CYCLES_9,
+ VF610_ADCK_CYCLES_13,
+ VF610_ADCK_CYCLES_17,
+ VF610_ADCK_CYCLES_21,
+ VF610_ADCK_CYCLES_25,
+};
+
struct vf610_adc_feature {
enum clk_sel clk_sel;
enum vol_ref vol_ref;
@@ -132,6 +148,8 @@ struct vf610_adc_feature {
int clk_div;
int sample_rate;
int res_mode;
+ u32 lst_adder_index;
+ u32 default_sample_time;
bool calibration;
bool ovwren;
@@ -155,11 +173,13 @@ struct vf610_adc {
};
static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 };
+static const u32 vf610_lst_adder[] = { 3, 5, 7, 9, 13, 17, 21, 25 };
static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
{
struct vf610_adc_feature *adc_feature = &info->adc_feature;
unsigned long adck_rate, ipg_rate = clk_get_rate(info->clk);
+ u32 adck_period, lst_addr_min;
int divisor, i;
adck_rate = info->max_adck_rate[adc_feature->conv_mode];
@@ -174,6 +194,19 @@ static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
}
/*
+ * Determine the long sample time adder value to be used based
+ * on the default minimum sample time provided.
+ */
+ adck_period = NSEC_PER_SEC / adck_rate;
+ lst_addr_min = adc_feature->default_sample_time / adck_period;
+ for (i = 0; i < ARRAY_SIZE(vf610_lst_adder); i++) {
+ if (vf610_lst_adder[i] > lst_addr_min) {
+ adc_feature->lst_adder_index = i;
+ break;
+ }
+ }
+
+ /*
* Calculate ADC sample frequencies
* Sample time unit is ADCK cycles. ADCK clk source is ipg clock,
* which is the same as bus clock.
@@ -182,12 +215,13 @@ static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
* SFCAdder: fixed to 6 ADCK cycles
* AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
* BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
- * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
+ * LSTAdder(Long Sample Time): 3, 5, 7, 9, 13, 17, 21, 25 ADCK cycles
*/
adck_rate = ipg_rate / info->adc_feature.clk_div;
for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++)
info->sample_freq_avail[i] =
- adck_rate / (6 + vf610_hw_avgs[i] * (25 + 3));
+ adck_rate / (6 + vf610_hw_avgs[i] *
+ (25 + vf610_lst_adder[adc_feature->lst_adder_index]));
}
static inline void vf610_adc_cfg_init(struct vf610_adc *info)
@@ -347,8 +381,40 @@ static void vf610_adc_sample_set(struct vf610_adc *info)
break;
}
- /* Use the short sample mode */
- cfg_data &= ~(VF610_ADC_ADLSMP_LONG | VF610_ADC_ADSTS_MASK);
+ /*
+ * Set ADLSMP and ADSTS based on the Long Sample Time Adder value
+ * determined.
+ */
+ switch (adc_feature->lst_adder_index) {
+ case VF610_ADCK_CYCLES_3:
+ break;
+ case VF610_ADCK_CYCLES_5:
+ cfg_data |= VF610_ADC_ADSTS_SHORT;
+ break;
+ case VF610_ADCK_CYCLES_7:
+ cfg_data |= VF610_ADC_ADSTS_NORMAL;
+ break;
+ case VF610_ADCK_CYCLES_9:
+ cfg_data |= VF610_ADC_ADSTS_LONG;
+ break;
+ case VF610_ADCK_CYCLES_13:
+ cfg_data |= VF610_ADC_ADLSMP_LONG;
+ break;
+ case VF610_ADCK_CYCLES_17:
+ cfg_data |= VF610_ADC_ADLSMP_LONG;
+ cfg_data |= VF610_ADC_ADSTS_SHORT;
+ break;
+ case VF610_ADCK_CYCLES_21:
+ cfg_data |= VF610_ADC_ADLSMP_LONG;
+ cfg_data |= VF610_ADC_ADSTS_NORMAL;
+ break;
+ case VF610_ADCK_CYCLES_25:
+ cfg_data |= VF610_ADC_ADLSMP_LONG;
+ cfg_data |= VF610_ADC_ADSTS_NORMAL;
+ break;
+ default:
+ dev_err(info->dev, "error in sample time select\n");
+ }
/* update hardware average selection */
cfg_data &= ~VF610_ADC_AVGS_MASK;
@@ -635,7 +701,7 @@ static int vf610_adc_reg_access(struct iio_dev *indio_dev,
struct vf610_adc *info = iio_priv(indio_dev);
if ((readval == NULL) ||
- (!(reg % 4) || (reg > VF610_REG_ADC_PCTL)))
+ ((reg % 4) || (reg > VF610_REG_ADC_PCTL)))
return -EINVAL;
*readval = readl(info->regs + reg);
@@ -713,6 +779,11 @@ static int vf610_adc_probe(struct platform_device *pdev)
of_property_read_u32_array(pdev->dev.of_node, "fsl,adck-max-frequency",
info->max_adck_rate, 3);
+ ret = of_property_read_u32(pdev->dev.of_node, "min-sample-time",
+ &info->adc_feature.default_sample_time);
+ if (ret)
+ info->adc_feature.default_sample_time = DEFAULT_SAMPLE_TIME;
+
platform_set_drvdata(pdev, indio_dev);
init_completion(&info->completion);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 610fc98f88ef..595511022795 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -36,6 +36,8 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
s32 poll_value = 0;
if (state) {
+ if (!atomic_read(&st->user_requested_state))
+ return 0;
if (sensor_hub_device_open(st->hsdev))
return -EIO;
@@ -52,8 +54,12 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
poll_value = hid_sensor_read_poll_value(st);
} else {
- if (!atomic_dec_and_test(&st->data_ready))
+ int val;
+
+ val = atomic_dec_if_positive(&st->data_ready);
+ if (val < 0)
return 0;
+
sensor_hub_device_close(st->hsdev);
state_val = hid_sensor_get_usage_index(st->hsdev,
st->power_state.report_id,
@@ -92,9 +98,11 @@ EXPORT_SYMBOL(hid_sensor_power_state);
int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
{
+
#ifdef CONFIG_PM
int ret;
+ atomic_set(&st->user_requested_state, state);
if (state)
ret = pm_runtime_get_sync(&st->pdev->dev);
else {
@@ -109,6 +117,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
return 0;
#else
+ atomic_set(&st->user_requested_state, state);
return _hid_sensor_power_state(st, state);
#endif
}
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c
index 9a40097e7cf8..d338bb595db3 100644
--- a/drivers/iio/common/ssp_sensors/ssp_dev.c
+++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
@@ -700,7 +700,6 @@ static struct spi_driver ssp_driver = {
.remove = ssp_remove,
.driver = {
.pm = &ssp_pm_ops,
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(ssp_of_match),
.name = "sensorhub"
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 8086cbcff87d..2e7fdb502645 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -126,6 +126,9 @@ static int st_sensors_set_fullscale(struct iio_dev *indio_dev, unsigned int fs)
int err, i = 0;
struct st_sensor_data *sdata = iio_priv(indio_dev);
+ if (sdata->sensor_settings->fs.addr == 0)
+ return 0;
+
err = st_sensors_match_fs(sdata->sensor_settings, fs, &i);
if (err < 0)
goto st_accel_set_fullscale_error;
@@ -479,46 +482,43 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev,
int num_sensors_list,
const struct st_sensor_settings *sensor_settings)
{
- u8 wai;
int i, n, err;
+ u8 wai;
struct st_sensor_data *sdata = iio_priv(indio_dev);
- err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
- ST_SENSORS_DEFAULT_WAI_ADDRESS, &wai);
- if (err < 0) {
- dev_err(&indio_dev->dev, "failed to read Who-Am-I register.\n");
- goto read_wai_error;
- }
-
for (i = 0; i < num_sensors_list; i++) {
- if (sensor_settings[i].wai == wai)
+ for (n = 0; n < ST_SENSORS_MAX_4WAI; n++) {
+ if (strcmp(indio_dev->name,
+ sensor_settings[i].sensors_supported[n]) == 0) {
+ break;
+ }
+ }
+ if (n < ST_SENSORS_MAX_4WAI)
break;
}
- if (i == num_sensors_list)
- goto device_not_supported;
+ if (i == num_sensors_list) {
+ dev_err(&indio_dev->dev, "device name %s not recognized.\n",
+ indio_dev->name);
+ return -ENODEV;
+ }
- for (n = 0; n < ARRAY_SIZE(sensor_settings[i].sensors_supported); n++) {
- if (strcmp(indio_dev->name,
- &sensor_settings[i].sensors_supported[n][0]) == 0)
- break;
+ err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
+ sensor_settings[i].wai_addr, &wai);
+ if (err < 0) {
+ dev_err(&indio_dev->dev, "failed to read Who-Am-I register.\n");
+ return err;
}
- if (n == ARRAY_SIZE(sensor_settings[i].sensors_supported)) {
- dev_err(&indio_dev->dev, "device name \"%s\" and WhoAmI (0x%02x) mismatch",
- indio_dev->name, wai);
- goto sensor_name_mismatch;
+
+ if (sensor_settings[i].wai != wai) {
+ dev_err(&indio_dev->dev, "%s: WhoAmI mismatch (0x%x).\n",
+ indio_dev->name, wai);
+ return -EINVAL;
}
sdata->sensor_settings =
(struct st_sensor_settings *)&sensor_settings[i];
return i;
-
-device_not_supported:
- dev_err(&indio_dev->dev, "device not supported: WhoAmI (0x%x).\n", wai);
-sensor_name_mismatch:
- err = -ENODEV;
-read_wai_error:
- return err;
}
EXPORT_SYMBOL(st_sensors_check_device_support);
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index f03b92fd3803..c067e6821496 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -630,7 +630,6 @@ MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids);
static struct i2c_driver ad5064_i2c_driver = {
.driver = {
.name = "ad5064",
- .owner = THIS_MODULE,
},
.probe = ad5064_i2c_probe,
.remove = ad5064_i2c_remove,
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 9de4c4d38280..130de9b3e0bf 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -593,7 +593,6 @@ MODULE_DEVICE_TABLE(i2c, ad5380_i2c_ids);
static struct i2c_driver ad5380_i2c_driver = {
.driver = {
.name = "ad5380",
- .owner = THIS_MODULE,
},
.probe = ad5380_i2c_probe,
.remove = ad5380_i2c_remove,
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 46bb62a5c1d4..07e17d72a3f3 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -569,7 +569,6 @@ MODULE_DEVICE_TABLE(i2c, ad5446_i2c_ids);
static struct i2c_driver ad5446_i2c_driver = {
.driver = {
.name = "ad5446",
- .owner = THIS_MODULE,
},
.probe = ad5446_i2c_probe,
.remove = ad5446_i2c_remove,
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index 61bb9d4239ea..e98428df0d44 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -22,7 +22,7 @@
#include "ad5624r.h"
static int ad5624r_spi_write(struct spi_device *spi,
- u8 cmd, u8 addr, u16 val, u8 len)
+ u8 cmd, u8 addr, u16 val, u8 shift)
{
u32 data;
u8 msg[3];
@@ -35,7 +35,7 @@ static int ad5624r_spi_write(struct spi_device *spi,
* 14-, 12-bit input code followed by 0, 2, or 4 don't care bits,
* for the AD5664R, AD5644R, and AD5624R, respectively.
*/
- data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << (16 - len));
+ data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << shift);
msg[0] = data >> 16;
msg[1] = data >> 8;
msg[2] = data;
diff --git a/drivers/iio/dac/max5821.c b/drivers/iio/dac/max5821.c
index 6e914495b346..28b8748ea824 100644
--- a/drivers/iio/dac/max5821.c
+++ b/drivers/iio/dac/max5821.c
@@ -392,7 +392,6 @@ static struct i2c_driver max5821_driver = {
.driver = {
.name = "max5821",
.pm = MAX5821_PM_OPS,
- .owner = THIS_MODULE,
},
.probe = max5821_probe,
.remove = max5821_remove,
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index 10a0dfc3b01f..9890c81c027d 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -72,7 +72,6 @@ static int adf4350_sync_config(struct adf4350_state *st)
for (i = ADF4350_REG5; i >= ADF4350_REG0; i--) {
if ((st->regs_hw[i] != st->regs[i]) ||
((i == ADF4350_REG0) && doublebuf)) {
-
switch (i) {
case ADF4350_REG1:
case ADF4350_REG4:
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index b3d0e94f72eb..8d2439345673 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -53,7 +53,8 @@ config ADXRS450
config BMG160
tristate "BOSCH BMG160 Gyro Sensor"
depends on I2C
- select IIO_TRIGGERED_BUFFER if IIO_BUFFER
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Bosch BMG160 Tri-axis Gyro Sensor
driver. This driver also supports BMI055 gyroscope.
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
index 591bd555e1f3..26de876b223d 100644
--- a/drivers/iio/gyro/adis16136.c
+++ b/drivers/iio/gyro/adis16136.c
@@ -473,6 +473,7 @@ enum adis16136_id {
ID_ADIS16133,
ID_ADIS16135,
ID_ADIS16136,
+ ID_ADIS16137,
};
static const struct adis16136_chip_info adis16136_chip_info[] = {
@@ -488,6 +489,10 @@ static const struct adis16136_chip_info adis16136_chip_info[] = {
.precision = IIO_DEGREE_TO_RAD(450),
.fullscale = 24623,
},
+ [ID_ADIS16137] = {
+ .precision = IIO_DEGREE_TO_RAD(1000),
+ .fullscale = 24609,
+ },
};
static int adis16136_probe(struct spi_device *spi)
@@ -557,6 +562,7 @@ static const struct spi_device_id adis16136_ids[] = {
{ "adis16133", ID_ADIS16133 },
{ "adis16135", ID_ADIS16135 },
{ "adis16136", ID_ADIS16136 },
+ { "adis16137", ID_ADIS16137 },
{ }
};
MODULE_DEVICE_TABLE(spi, adis16136_ids);
diff --git a/drivers/iio/gyro/adis16260.c b/drivers/iio/gyro/adis16260.c
index 75fe0edd3d0f..00c6ad9bf35f 100644
--- a/drivers/iio/gyro/adis16260.c
+++ b/drivers/iio/gyro/adis16260.c
@@ -101,19 +101,24 @@
#define ADIS16260_SCAN_TEMP 3
#define ADIS16260_SCAN_ANGL 4
-/* Power down the device */
-static int adis16260_stop_device(struct iio_dev *indio_dev)
-{
- struct adis *adis = iio_priv(indio_dev);
- int ret;
- u16 val = ADIS16260_SLP_CNT_POWER_OFF;
+struct adis16260_chip_info {
+ unsigned int gyro_max_val;
+ unsigned int gyro_max_scale;
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+};
- ret = adis_write_reg_16(adis, ADIS16260_SLP_CNT, val);
- if (ret)
- dev_err(&indio_dev->dev, "problem with turning device off: SLP_CNT");
+struct adis16260 {
+ const struct adis16260_chip_info *info;
- return ret;
-}
+ struct adis adis;
+};
+
+enum adis16260_type {
+ ADIS16251,
+ ADIS16260,
+ ADIS16266,
+};
static const struct iio_chan_spec adis16260_channels[] = {
ADIS_GYRO_CHAN(X, ADIS16260_GYRO_OUT, ADIS16260_SCAN_GYRO,
@@ -131,6 +136,55 @@ static const struct iio_chan_spec adis16260_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(5),
};
+static const struct iio_chan_spec adis16266_channels[] = {
+ ADIS_GYRO_CHAN(X, ADIS16260_GYRO_OUT, ADIS16260_SCAN_GYRO,
+ BIT(IIO_CHAN_INFO_CALIBBIAS) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), 14),
+ ADIS_TEMP_CHAN(ADIS16260_TEMP_OUT, ADIS16260_SCAN_TEMP,
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), 12),
+ ADIS_SUPPLY_CHAN(ADIS16260_SUPPLY_OUT, ADIS16260_SCAN_SUPPLY,
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), 12),
+ ADIS_AUX_ADC_CHAN(ADIS16260_AUX_ADC, ADIS16260_SCAN_AUX_ADC,
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), 12),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static const struct adis16260_chip_info adis16260_chip_info_table[] = {
+ [ADIS16251] = {
+ .gyro_max_scale = 80,
+ .gyro_max_val = IIO_RAD_TO_DEGREE(4368),
+ .channels = adis16260_channels,
+ .num_channels = ARRAY_SIZE(adis16260_channels),
+ },
+ [ADIS16260] = {
+ .gyro_max_scale = 320,
+ .gyro_max_val = IIO_RAD_TO_DEGREE(4368),
+ .channels = adis16260_channels,
+ .num_channels = ARRAY_SIZE(adis16260_channels),
+ },
+ [ADIS16266] = {
+ .gyro_max_scale = 14000,
+ .gyro_max_val = IIO_RAD_TO_DEGREE(3357),
+ .channels = adis16266_channels,
+ .num_channels = ARRAY_SIZE(adis16266_channels),
+ },
+};
+
+/* Power down the device */
+static int adis16260_stop_device(struct iio_dev *indio_dev)
+{
+ struct adis16260 *adis16260 = iio_priv(indio_dev);
+ int ret;
+ u16 val = ADIS16260_SLP_CNT_POWER_OFF;
+
+ ret = adis_write_reg_16(&adis16260->adis, ADIS16260_SLP_CNT, val);
+ if (ret)
+ dev_err(&indio_dev->dev, "problem with turning device off: SLP_CNT");
+
+ return ret;
+}
+
static const u8 adis16260_addresses[][2] = {
[ADIS16260_SCAN_GYRO] = { ADIS16260_GYRO_OFF, ADIS16260_GYRO_SCALE },
};
@@ -140,7 +194,9 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
int *val, int *val2,
long mask)
{
- struct adis *adis = iio_priv(indio_dev);
+ struct adis16260 *adis16260 = iio_priv(indio_dev);
+ const struct adis16260_chip_info *info = adis16260->info;
+ struct adis *adis = &adis16260->adis;
int ret;
u8 addr;
s16 val16;
@@ -152,15 +208,9 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_ANGL_VEL:
- *val = 0;
- if (spi_get_device_id(adis->spi)->driver_data) {
- /* 0.01832 degree / sec */
- *val2 = IIO_DEGREE_TO_RAD(18320);
- } else {
- /* 0.07326 degree / sec */
- *val2 = IIO_DEGREE_TO_RAD(73260);
- }
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = info->gyro_max_scale;
+ *val2 = info->gyro_max_val;
+ return IIO_VAL_FRACTIONAL;
case IIO_INCLI:
*val = 0;
*val2 = IIO_DEGREE_TO_RAD(36630);
@@ -224,7 +274,8 @@ static int adis16260_write_raw(struct iio_dev *indio_dev,
int val2,
long mask)
{
- struct adis *adis = iio_priv(indio_dev);
+ struct adis16260 *adis16260 = iio_priv(indio_dev);
+ struct adis *adis = &adis16260->adis;
int ret;
u8 addr;
u8 t;
@@ -305,35 +356,42 @@ static const struct adis_data adis16260_data = {
static int adis16260_probe(struct spi_device *spi)
{
+ const struct spi_device_id *id;
+ struct adis16260 *adis16260;
struct iio_dev *indio_dev;
- struct adis *adis;
int ret;
+ id = spi_get_device_id(spi);
+ if (!id)
+ return -ENODEV;
+
/* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adis));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adis16260));
if (!indio_dev)
return -ENOMEM;
- adis = iio_priv(indio_dev);
+ adis16260 = iio_priv(indio_dev);
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
- indio_dev->name = spi_get_device_id(spi)->name;
+ adis16260->info = &adis16260_chip_info_table[id->driver_data];
+
+ indio_dev->name = id->name;
indio_dev->dev.parent = &spi->dev;
indio_dev->info = &adis16260_info;
- indio_dev->channels = adis16260_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16260_channels);
+ indio_dev->channels = adis16260->info->channels;
+ indio_dev->num_channels = adis16260->info->num_channels;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = adis_init(adis, indio_dev, spi, &adis16260_data);
+ ret = adis_init(&adis16260->adis, indio_dev, spi, &adis16260_data);
if (ret)
return ret;
- ret = adis_setup_buffer_and_trigger(adis, indio_dev, NULL);
+ ret = adis_setup_buffer_and_trigger(&adis16260->adis, indio_dev, NULL);
if (ret)
return ret;
/* Get the device into a sane initial state */
- ret = adis_initial_startup(adis);
+ ret = adis_initial_startup(&adis16260->adis);
if (ret)
goto error_cleanup_buffer_trigger;
ret = iio_device_register(indio_dev);
@@ -343,18 +401,18 @@ static int adis16260_probe(struct spi_device *spi)
return 0;
error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(adis, indio_dev);
+ adis_cleanup_buffer_and_trigger(&adis16260->adis, indio_dev);
return ret;
}
static int adis16260_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis *adis = iio_priv(indio_dev);
+ struct adis16260 *adis16260 = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
adis16260_stop_device(indio_dev);
- adis_cleanup_buffer_and_trigger(adis, indio_dev);
+ adis_cleanup_buffer_and_trigger(&adis16260->adis, indio_dev);
return 0;
}
@@ -364,11 +422,12 @@ static int adis16260_remove(struct spi_device *spi)
* support for the on chip filtering.
*/
static const struct spi_device_id adis16260_id[] = {
- {"adis16260", 0},
- {"adis16265", 0},
- {"adis16250", 0},
- {"adis16255", 0},
- {"adis16251", 1},
+ {"adis16260", ADIS16260},
+ {"adis16265", ADIS16260},
+ {"adis16266", ADIS16266},
+ {"adis16250", ADIS16260},
+ {"adis16255", ADIS16260},
+ {"adis16251", ADIS16251},
{}
};
MODULE_DEVICE_TABLE(spi, adis16260_id);
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
index f0fd94055d88..c102a6325bb0 100644
--- a/drivers/iio/gyro/itg3200_core.c
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -379,7 +379,6 @@ MODULE_DEVICE_TABLE(i2c, itg3200_id);
static struct i2c_driver itg3200_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "itg3200",
.pm = &itg3200_pm_ops,
},
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index ffe96642b6d0..4b993a5bc9a1 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -131,6 +131,7 @@ static const struct iio_chan_spec st_gyro_16bit_channels[] = {
static const struct st_sensor_settings st_gyro_sensors_settings[] = {
{
.wai = ST_GYRO_1_WAI_EXP,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = L3G4200D_GYRO_DEV_NAME,
[1] = LSM330DL_GYRO_DEV_NAME,
@@ -190,6 +191,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
},
{
.wai = ST_GYRO_2_WAI_EXP,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = L3GD20_GYRO_DEV_NAME,
[1] = LSM330D_GYRO_DEV_NAME,
@@ -252,6 +254,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
},
{
.wai = ST_GYRO_3_WAI_EXP,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = L3GD20_GYRO_DEV_NAME,
},
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index 64480b16c689..6848451f817a 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -99,7 +99,6 @@ MODULE_DEVICE_TABLE(i2c, st_gyro_id_table);
static struct i2c_driver st_gyro_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "st-gyro-i2c",
.of_match_table = of_match_ptr(st_gyro_of_match),
},
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index 7d79a1ac5f5f..1165b1c4f9d6 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -33,6 +33,7 @@
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
+#include <linux/timekeeping.h>
#include <linux/iio/iio.h>
@@ -46,7 +47,8 @@
* Note that when reading the sensor actually 84 edges are detected, but
* since the last edge is not significant, we only store 83:
*/
-#define DHT11_EDGES_PER_READ (2*DHT11_BITS_PER_READ + DHT11_EDGES_PREAMBLE + 1)
+#define DHT11_EDGES_PER_READ (2 * DHT11_BITS_PER_READ + \
+ DHT11_EDGES_PREAMBLE + 1)
/* Data transmission timing (nano seconds) */
#define DHT11_START_TRANSMISSION 18 /* ms */
@@ -62,6 +64,7 @@ struct dht11 {
int irq;
struct completion completion;
+ /* The iio sysfs interface doesn't prevent concurrent reads: */
struct mutex lock;
s64 timestamp;
@@ -87,32 +90,20 @@ static unsigned char dht11_decode_byte(int *timing, int threshold)
return ret;
}
-static int dht11_decode(struct dht11 *dht11, int offset)
+static int dht11_decode(struct dht11 *dht11, int offset, int timeres)
{
- int i, t, timing[DHT11_BITS_PER_READ], threshold,
- timeres = DHT11_SENSOR_RESPONSE;
+ int i, t, timing[DHT11_BITS_PER_READ], threshold;
unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum;
- /* Calculate timestamp resolution */
- for (i = 1; i < dht11->num_edges; ++i) {
- t = dht11->edges[i].ts - dht11->edges[i-1].ts;
- if (t > 0 && t < timeres)
- timeres = t;
- }
- if (2*timeres > DHT11_DATA_BIT_HIGH) {
- pr_err("dht11: timeresolution %d too bad for decoding\n",
- timeres);
- return -EIO;
- }
threshold = DHT11_DATA_BIT_HIGH / timeres;
- if (DHT11_DATA_BIT_LOW/timeres + 1 >= threshold)
+ if (DHT11_DATA_BIT_LOW / timeres + 1 >= threshold)
pr_err("dht11: WARNING: decoding ambiguous\n");
/* scale down with timeres and check validity */
for (i = 0; i < DHT11_BITS_PER_READ; ++i) {
- t = dht11->edges[offset + 2*i + 2].ts -
- dht11->edges[offset + 2*i + 1].ts;
- if (!dht11->edges[offset + 2*i + 1].value)
+ t = dht11->edges[offset + 2 * i + 2].ts -
+ dht11->edges[offset + 2 * i + 1].ts;
+ if (!dht11->edges[offset + 2 * i + 1].value)
return -EIO; /* lost synchronisation */
timing[i] = t / timeres;
}
@@ -126,7 +117,7 @@ static int dht11_decode(struct dht11 *dht11, int offset)
if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum)
return -EIO;
- dht11->timestamp = iio_get_time_ns();
+ dht11->timestamp = ktime_get_real_ns();
if (hum_int < 20) { /* DHT22 */
dht11->temperature = (((temp_int & 0x7f) << 8) + temp_dec) *
((temp_int & 0x80) ? -100 : 100);
@@ -154,7 +145,7 @@ static irqreturn_t dht11_handle_irq(int irq, void *data)
/* TODO: Consider making the handler safe for IRQ sharing */
if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
- dht11->edges[dht11->num_edges].ts = iio_get_time_ns();
+ dht11->edges[dht11->num_edges].ts = ktime_get_real_ns();
dht11->edges[dht11->num_edges++].value =
gpio_get_value(dht11->gpio);
@@ -166,14 +157,26 @@ static irqreturn_t dht11_handle_irq(int irq, void *data)
}
static int dht11_read_raw(struct iio_dev *iio_dev,
- const struct iio_chan_spec *chan,
+ const struct iio_chan_spec *chan,
int *val, int *val2, long m)
{
struct dht11 *dht11 = iio_priv(iio_dev);
- int ret;
+ int ret, timeres;
mutex_lock(&dht11->lock);
- if (dht11->timestamp + DHT11_DATA_VALID_TIME < iio_get_time_ns()) {
+ if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_real_ns()) {
+ timeres = ktime_get_resolution_ns();
+ if (DHT11_DATA_BIT_HIGH < 2 * timeres) {
+ dev_err(dht11->dev, "timeresolution %dns too low\n",
+ timeres);
+ /* In theory a better clock could become available
+ * at some point ... and there is no error code
+ * that really fits better.
+ */
+ ret = -EAGAIN;
+ goto err;
+ }
+
reinit_completion(&dht11->completion);
dht11->num_edges = 0;
@@ -192,13 +195,13 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
goto err;
ret = wait_for_completion_killable_timeout(&dht11->completion,
- HZ);
+ HZ);
free_irq(dht11->irq, iio_dev);
if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) {
dev_err(&iio_dev->dev,
- "Only %d signal edges detected\n",
+ "Only %d signal edges detected\n",
dht11->num_edges);
ret = -ETIMEDOUT;
}
@@ -206,9 +209,10 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
goto err;
ret = dht11_decode(dht11,
- dht11->num_edges == DHT11_EDGES_PER_READ ?
+ dht11->num_edges == DHT11_EDGES_PER_READ ?
DHT11_EDGES_PREAMBLE :
- DHT11_EDGES_PREAMBLE - 2);
+ DHT11_EDGES_PREAMBLE - 2,
+ timeres);
if (ret)
goto err;
}
@@ -261,9 +265,10 @@ static int dht11_probe(struct platform_device *pdev)
dht11 = iio_priv(iio);
dht11->dev = dev;
- dht11->gpio = ret = of_get_gpio(node, 0);
+ ret = of_get_gpio(node, 0);
if (ret < 0)
return ret;
+ dht11->gpio = ret;
ret = devm_gpio_request_one(dev, dht11->gpio, GPIOF_IN, pdev->name);
if (ret)
return ret;
@@ -274,7 +279,7 @@ static int dht11_probe(struct platform_device *pdev)
return -EINVAL;
}
- dht11->timestamp = iio_get_time_ns() - DHT11_DATA_VALID_TIME - 1;
+ dht11->timestamp = ktime_get_real_ns() - DHT11_DATA_VALID_TIME - 1;
dht11->num_edges = -1;
platform_set_drvdata(pdev, iio);
diff --git a/drivers/iio/humidity/si7005.c b/drivers/iio/humidity/si7005.c
index bdd586e6d955..91972ccd8aaf 100644
--- a/drivers/iio/humidity/si7005.c
+++ b/drivers/iio/humidity/si7005.c
@@ -177,7 +177,6 @@ MODULE_DEVICE_TABLE(i2c, si7005_id);
static struct i2c_driver si7005_driver = {
.driver = {
.name = "si7005",
- .owner = THIS_MODULE,
},
.probe = si7005_probe,
.id_table = si7005_id,
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 2fd68f2219a7..abc4c50de9e8 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -139,7 +139,9 @@ enum adis16400_chip_variant {
ADIS16360,
ADIS16362,
ADIS16364,
+ ADIS16367,
ADIS16400,
+ ADIS16445,
ADIS16448,
};
@@ -622,6 +624,17 @@ static const struct iio_chan_spec adis16400_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
+static const struct iio_chan_spec adis16445_channels[] = {
+ ADIS16400_GYRO_CHAN(X, ADIS16400_XGYRO_OUT, 16),
+ ADIS16400_GYRO_CHAN(Y, ADIS16400_YGYRO_OUT, 16),
+ ADIS16400_GYRO_CHAN(Z, ADIS16400_ZGYRO_OUT, 16),
+ ADIS16400_ACCEL_CHAN(X, ADIS16400_XACCL_OUT, 16),
+ ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 16),
+ ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 16),
+ ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
+};
+
static const struct iio_chan_spec adis16448_channels[] = {
ADIS16400_GYRO_CHAN(X, ADIS16400_XGYRO_OUT, 16),
ADIS16400_GYRO_CHAN(Y, ADIS16400_YGYRO_OUT, 16),
@@ -696,7 +709,8 @@ static struct adis16400_chip_info adis16400_chips[] = {
[ADIS16300] = {
.channels = adis16300_channels,
.num_channels = ARRAY_SIZE(adis16300_channels),
- .flags = ADIS16400_HAS_SLOW_MODE,
+ .flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SLOW_MODE |
+ ADIS16400_HAS_SERIAL_NUMBER,
.gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
.accel_scale_micro = 5884,
.temp_scale_nano = 140000000, /* 0.14 C */
@@ -763,6 +777,18 @@ static struct adis16400_chip_info adis16400_chips[] = {
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
},
+ [ADIS16367] = {
+ .channels = adis16350_channels,
+ .num_channels = ARRAY_SIZE(adis16350_channels),
+ .flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SLOW_MODE |
+ ADIS16400_HAS_SERIAL_NUMBER,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(2000), /* 0.2 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(3333), /* 3.333 mg */
+ .temp_scale_nano = 136000000, /* 0.136 C */
+ .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
+ .set_freq = adis16400_set_freq,
+ .get_freq = adis16400_get_freq,
+ },
[ADIS16400] = {
.channels = adis16400_channels,
.num_channels = ARRAY_SIZE(adis16400_channels),
@@ -774,13 +800,26 @@ static struct adis16400_chip_info adis16400_chips[] = {
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
},
+ [ADIS16445] = {
+ .channels = adis16445_channels,
+ .num_channels = ARRAY_SIZE(adis16445_channels),
+ .flags = ADIS16400_HAS_PROD_ID |
+ ADIS16400_HAS_SERIAL_NUMBER |
+ ADIS16400_BURST_DIAG_STAT,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(250), /* 1/4000 g */
+ .temp_scale_nano = 73860000, /* 0.07386 C */
+ .temp_offset = 31000000 / 73860, /* 31 C = 0x00 */
+ .set_freq = adis16334_set_freq,
+ .get_freq = adis16334_get_freq,
+ },
[ADIS16448] = {
.channels = adis16448_channels,
.num_channels = ARRAY_SIZE(adis16448_channels),
.flags = ADIS16400_HAS_PROD_ID |
ADIS16400_HAS_SERIAL_NUMBER |
ADIS16400_BURST_DIAG_STAT,
- .gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(40000), /* 0.04 deg/s */
.accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */
.temp_scale_nano = 73860000, /* 0.07386 C */
.temp_offset = 31000000 / 73860, /* 31 C = 0x00 */
@@ -926,6 +965,7 @@ static int adis16400_remove(struct spi_device *spi)
static const struct spi_device_id adis16400_id[] = {
{"adis16300", ADIS16300},
+ {"adis16305", ADIS16300},
{"adis16334", ADIS16334},
{"adis16350", ADIS16350},
{"adis16354", ADIS16350},
@@ -934,8 +974,10 @@ static const struct spi_device_id adis16400_id[] = {
{"adis16362", ADIS16362},
{"adis16364", ADIS16364},
{"adis16365", ADIS16360},
+ {"adis16367", ADIS16367},
{"adis16400", ADIS16400},
{"adis16405", ADIS16400},
+ {"adis16445", ADIS16445},
{"adis16448", ADIS16448},
{}
};
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 989605dd6f78..b94bfd3f595b 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -110,6 +110,10 @@
struct adis16480_chip_info {
unsigned int num_channels;
const struct iio_chan_spec *channels;
+ unsigned int gyro_max_val;
+ unsigned int gyro_max_scale;
+ unsigned int accel_max_val;
+ unsigned int accel_max_scale;
};
struct adis16480 {
@@ -497,19 +501,21 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
static int adis16480_read_raw(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, int *val, int *val2, long info)
{
+ struct adis16480 *st = iio_priv(indio_dev);
+
switch (info) {
case IIO_CHAN_INFO_RAW:
return adis_single_conversion(indio_dev, chan, 0, val);
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_ANGL_VEL:
- *val = 0;
- *val2 = IIO_DEGREE_TO_RAD(20000); /* 0.02 degree/sec */
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = st->chip_info->gyro_max_scale;
+ *val2 = st->chip_info->gyro_max_val;
+ return IIO_VAL_FRACTIONAL;
case IIO_ACCEL:
- *val = 0;
- *val2 = IIO_G_TO_M_S_2(800); /* 0.8 mg */
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = st->chip_info->accel_max_scale;
+ *val2 = st->chip_info->accel_max_val;
+ return IIO_VAL_FRACTIONAL;
case IIO_MAGN:
*val = 0;
*val2 = 100; /* 0.0001 gauss */
@@ -674,18 +680,39 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16375] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
+ /*
+ * storing the value in rad/degree and the scale in degree
+ * gives us the result in rad and better precession than
+ * storing the scale directly in rad.
+ */
+ .gyro_max_val = IIO_RAD_TO_DEGREE(22887),
+ .gyro_max_scale = 300,
+ .accel_max_val = IIO_M_S_2_TO_G(21973),
+ .accel_max_scale = 18,
},
[ADIS16480] = {
.channels = adis16480_channels,
.num_channels = ARRAY_SIZE(adis16480_channels),
+ .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
+ .gyro_max_scale = 450,
+ .accel_max_val = IIO_M_S_2_TO_G(12500),
+ .accel_max_scale = 5,
},
[ADIS16485] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
+ .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
+ .gyro_max_scale = 450,
+ .accel_max_val = IIO_M_S_2_TO_G(20000),
+ .accel_max_scale = 5,
},
[ADIS16488] = {
.channels = adis16480_channels,
.num_channels = ARRAY_SIZE(adis16480_channels),
+ .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
+ .gyro_max_scale = 450,
+ .accel_max_val = IIO_M_S_2_TO_G(22500),
+ .accel_max_scale = 18,
},
};
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 17d4bb15be4d..f0e06093b5e8 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -431,6 +431,23 @@ static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
return -EINVAL;
}
+static int inv_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ default:
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ return -EINVAL;
+}
static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
{
int result, i;
@@ -673,6 +690,10 @@ static const struct iio_chan_spec inv_mpu_channels[] = {
/* constant IIO attribute */
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("10 20 50 100 200 500");
+static IIO_CONST_ATTR(in_anglvel_scale_available,
+ "0.000133090 0.000266181 0.000532362 0.001064724");
+static IIO_CONST_ATTR(in_accel_scale_available,
+ "0.000598 0.001196 0.002392 0.004785");
static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR, inv_fifo_rate_show,
inv_mpu6050_fifo_rate_store);
static IIO_DEVICE_ATTR(in_gyro_matrix, S_IRUGO, inv_attr_show, NULL,
@@ -685,6 +706,8 @@ static struct attribute *inv_attributes[] = {
&iio_dev_attr_in_accel_matrix.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_const_attr_in_accel_scale_available.dev_attr.attr,
+ &iio_const_attr_in_anglvel_scale_available.dev_attr.attr,
NULL,
};
@@ -696,6 +719,7 @@ static const struct iio_info mpu_info = {
.driver_module = THIS_MODULE,
.read_raw = &inv_mpu6050_read_raw,
.write_raw = &inv_mpu6050_write_raw,
+ .write_raw_get_fmt = &inv_write_raw_get_fmt,
.attrs = &inv_attribute_group,
.validate_trigger = inv_mpu6050_validate_trigger,
};
@@ -903,7 +927,6 @@ static struct i2c_driver inv_mpu_driver = {
.remove = inv_mpu_remove,
.id_table = inv_mpu_id,
.driver = {
- .owner = THIS_MODULE,
.name = "inv-mpu6050",
.pm = INV_MPU6050_PMOPS,
.acpi_match_table = ACPI_PTR(inv_acpi_match),
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index 462a010628cd..82cdf5090fa7 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1363,7 +1363,7 @@ static int kmx61_probe(struct i2c_client *client,
if (client->irq < 0)
client->irq = kmx61_gpio_probe(client, data);
- if (client->irq >= 0) {
+ if (client->irq > 0) {
ret = devm_request_threaded_irq(&client->dev, client->irq,
kmx61_data_rdy_trig_poll,
kmx61_event_handler,
@@ -1445,10 +1445,10 @@ err_iio_unregister_mag:
err_iio_unregister_acc:
iio_device_unregister(data->acc_indio_dev);
err_buffer_cleanup_mag:
- if (client->irq >= 0)
+ if (client->irq > 0)
iio_triggered_buffer_cleanup(data->mag_indio_dev);
err_buffer_cleanup_acc:
- if (client->irq >= 0)
+ if (client->irq > 0)
iio_triggered_buffer_cleanup(data->acc_indio_dev);
err_trigger_unregister_motion:
iio_trigger_unregister(data->motion_trig);
@@ -1472,7 +1472,7 @@ static int kmx61_remove(struct i2c_client *client)
iio_device_unregister(data->acc_indio_dev);
iio_device_unregister(data->mag_indio_dev);
- if (client->irq >= 0) {
+ if (client->irq > 0) {
iio_triggered_buffer_cleanup(data->acc_indio_dev);
iio_triggered_buffer_cleanup(data->mag_indio_dev);
iio_trigger_unregister(data->acc_dready_trig);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 6eee1b044c60..d7e908acb480 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -71,8 +71,9 @@ static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
if (avail >= to_wait) {
/* force a flush for non-blocking reads */
- if (!to_wait && !avail && to_flush)
- iio_buffer_flush_hwfifo(indio_dev, buf, to_flush);
+ if (!to_wait && avail < to_flush)
+ iio_buffer_flush_hwfifo(indio_dev, buf,
+ to_flush - avail);
return true;
}
@@ -90,9 +91,16 @@ static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
/**
* iio_buffer_read_first_n_outer() - chrdev read for buffer access
+ * @filp: File structure pointer for the char device
+ * @buf: Destination buffer for iio buffer read
+ * @n: First n bytes to read
+ * @f_ps: Long offset provided by the user as a seek position
*
* This function relies on all buffer implementations having an
* iio_buffer as their first element.
+ *
+ * Return: negative values corresponding to error codes or ret != 0
+ * for ending the reading activity
**/
ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
size_t n, loff_t *f_ps)
@@ -100,8 +108,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
size_t datum_size;
- size_t to_wait = 0;
- size_t to_read;
+ size_t to_wait;
int ret;
if (!indio_dev->info)
@@ -119,14 +126,14 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
if (!datum_size)
return 0;
- to_read = min_t(size_t, n / datum_size, rb->watermark);
-
- if (!(filp->f_flags & O_NONBLOCK))
- to_wait = to_read;
+ if (filp->f_flags & O_NONBLOCK)
+ to_wait = 0;
+ else
+ to_wait = min_t(size_t, n / datum_size, rb->watermark);
do {
ret = wait_event_interruptible(rb->pollq,
- iio_buffer_ready(indio_dev, rb, to_wait, to_read));
+ iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size));
if (ret)
return ret;
@@ -143,6 +150,12 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
/**
* iio_buffer_poll() - poll the buffer to find out if it has data
+ * @filp: File structure pointer for device access
+ * @wait: Poll table structure pointer for which the driver adds
+ * a wait queue
+ *
+ * Return: (POLLIN | POLLRDNORM) if data is available for reading
+ * or 0 for other cases
*/
unsigned int iio_buffer_poll(struct file *filp,
struct poll_table_struct *wait)
@@ -151,7 +164,7 @@ unsigned int iio_buffer_poll(struct file *filp,
struct iio_buffer *rb = indio_dev->buffer;
if (!indio_dev->info)
- return -ENODEV;
+ return 0;
poll_wait(filp, &rb->pollq, wait);
if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
@@ -1136,7 +1149,7 @@ int iio_scan_mask_query(struct iio_dev *indio_dev,
EXPORT_SYMBOL_GPL(iio_scan_mask_query);
/**
- * struct iio_demux_table() - table describing demux memcpy ops
+ * struct iio_demux_table - table describing demux memcpy ops
* @from: index to copy from
* @to: index to copy to
* @length: how many bytes to copy
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 3524b0de8721..b3fcc2c449d8 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -81,6 +81,14 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_X] = "x",
[IIO_MOD_Y] = "y",
[IIO_MOD_Z] = "z",
+ [IIO_MOD_X_AND_Y] = "x&y",
+ [IIO_MOD_X_AND_Z] = "x&z",
+ [IIO_MOD_Y_AND_Z] = "y&z",
+ [IIO_MOD_X_AND_Y_AND_Z] = "x&y&z",
+ [IIO_MOD_X_OR_Y] = "x|y",
+ [IIO_MOD_X_OR_Z] = "x|z",
+ [IIO_MOD_Y_OR_Z] = "y|z",
+ [IIO_MOD_X_OR_Y_OR_Z] = "x|y|z",
[IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)",
[IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2",
[IIO_MOD_LIGHT_BOTH] = "both",
@@ -398,10 +406,16 @@ EXPORT_SYMBOL_GPL(iio_enum_write);
/**
* iio_format_value() - Formats a IIO value into its string representation
- * @buf: The buffer to which the formated value gets written
- * @type: One of the IIO_VAL_... constants. This decides how the val and val2
- * parameters are formatted.
- * @vals: pointer to the values, exact meaning depends on the type parameter.
+ * @buf: The buffer to which the formatted value gets written
+ * @type: One of the IIO_VAL_... constants. This decides how the val
+ * and val2 parameters are formatted.
+ * @size: Number of IIO value entries contained in vals
+ * @vals: Pointer to the values, exact meaning depends on the
+ * type parameter.
+ *
+ * Return: 0 by default, a negative number on failure or the
+ * total number of characters written for a type that belongs
+ * to the IIO_VAL_... constant.
*/
ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
{
@@ -1088,6 +1102,11 @@ EXPORT_SYMBOL_GPL(devm_iio_device_free);
/**
* iio_chrdev_open() - chrdev file open for buffer access and ioctls
+ * @inode: Inode structure for identifying the device in the file system
+ * @filp: File structure for iio device used to keep and later access
+ * private data
+ *
+ * Return: 0 on success or -EBUSY if the device is already opened
**/
static int iio_chrdev_open(struct inode *inode, struct file *filp)
{
@@ -1106,7 +1125,11 @@ static int iio_chrdev_open(struct inode *inode, struct file *filp)
/**
* iio_chrdev_release() - chrdev file close buffer access and ioctls
- **/
+ * @inode: Inode structure pointer for the char device
+ * @filp: File structure pointer for the char device
+ *
+ * Return: 0 for successful release
+ */
static int iio_chrdev_release(struct inode *inode, struct file *filp)
{
struct iio_dev *indio_dev = container_of(inode->i_cdev,
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 894d8137c4cf..cae332b1d7ea 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -32,6 +32,7 @@
* @dev_attr_list: list of event interface sysfs attribute
* @flags: file operations related flags including busy flag.
* @group: event interface sysfs attribute group
+ * @read_lock: lock to protect kfifo read operations
*/
struct iio_event_interface {
wait_queue_head_t wait;
@@ -75,6 +76,11 @@ EXPORT_SYMBOL(iio_push_event);
/**
* iio_event_poll() - poll the event queue to find out if it has data
+ * @filep: File structure pointer to identify the device
+ * @wait: Poll table pointer to add the wait queue on
+ *
+ * Return: (POLLIN | POLLRDNORM) if data is available for reading
+ * or a negative error code on failure
*/
static unsigned int iio_event_poll(struct file *filep,
struct poll_table_struct *wait)
@@ -84,7 +90,7 @@ static unsigned int iio_event_poll(struct file *filep,
unsigned int events = 0;
if (!indio_dev->info)
- return -ENODEV;
+ return events;
poll_wait(filep, &ev_int->wait, wait);
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index d31098e0c43f..570606c2adbd 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -40,7 +40,14 @@ static DEFINE_MUTEX(iio_trigger_list_lock);
/**
* iio_trigger_read_name() - retrieve useful identifying name
- **/
+ * @dev: device associated with the iio_trigger
+ * @attr: pointer to the device_attribute structure that is
+ * being processed
+ * @buf: buffer to print the name into
+ *
+ * Return: a negative number on failure or the number of written
+ * characters on success.
+ */
static ssize_t iio_trigger_read_name(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -288,10 +295,17 @@ EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
/**
* iio_trigger_read_current() - trigger consumer sysfs query current trigger
+ * @dev: device associated with an industrial I/O device
+ * @attr: pointer to the device_attribute structure that
+ * is being processed
+ * @buf: buffer where the current trigger name will be printed into
*
* For trigger consumers the current_trigger interface allows the trigger
* used by the device to be queried.
- **/
+ *
+ * Return: a negative number on failure, the number of characters written
+ * on success or 0 if no trigger is available
+ */
static ssize_t iio_trigger_read_current(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -305,11 +319,18 @@ static ssize_t iio_trigger_read_current(struct device *dev,
/**
* iio_trigger_write_current() - trigger consumer sysfs set current trigger
+ * @dev: device associated with an industrial I/O device
+ * @attr: device attribute that is being processed
+ * @buf: string buffer that holds the name of the trigger
+ * @len: length of the trigger name held by buf
*
* For trigger consumers the current_trigger interface allows the trigger
* used for this device to be specified at run time based on the trigger's
* name.
- **/
+ *
+ * Return: negative error code on failure or length of the buffer
+ * on success
+ */
static ssize_t iio_trigger_write_current(struct device *dev,
struct device_attribute *attr,
const char *buf,
diff --git a/drivers/iio/industrialio-triggered-buffer.c b/drivers/iio/industrialio-triggered-buffer.c
index 15a5341b5e7b..4b2858ba1fd6 100644
--- a/drivers/iio/industrialio-triggered-buffer.c
+++ b/drivers/iio/industrialio-triggered-buffer.c
@@ -24,8 +24,8 @@ static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = {
/**
* iio_triggered_buffer_setup() - Setup triggered buffer and pollfunc
* @indio_dev: IIO device structure
- * @pollfunc_bh: Function which will be used as pollfunc bottom half
- * @pollfunc_th: Function which will be used as pollfunc top half
+ * @h: Function which will be used as pollfunc top half
+ * @thread: Function which will be used as pollfunc bottom half
* @setup_ops: Buffer setup functions to use for this device.
* If NULL the default setup functions for triggered
* buffers will be used.
@@ -42,8 +42,8 @@ static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = {
* iio_triggered_buffer_cleanup().
*/
int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
- irqreturn_t (*pollfunc_bh)(int irq, void *p),
- irqreturn_t (*pollfunc_th)(int irq, void *p),
+ irqreturn_t (*h)(int irq, void *p),
+ irqreturn_t (*thread)(int irq, void *p),
const struct iio_buffer_setup_ops *setup_ops)
{
struct iio_buffer *buffer;
@@ -57,8 +57,8 @@ int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
iio_device_attach_buffer(indio_dev, buffer);
- indio_dev->pollfunc = iio_alloc_pollfunc(pollfunc_bh,
- pollfunc_th,
+ indio_dev->pollfunc = iio_alloc_pollfunc(h,
+ thread,
IRQF_ONESHOT,
indio_dev,
"%s_consumer%d",
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index e6198b7c9cbf..7ed859a700c4 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -86,7 +86,7 @@ config CM3323
depends on I2C
tristate "Capella CM3323 color light sensor"
help
- Say Y here if you want to build a driver for Capela CM3323
+ Say Y here if you want to build a driver for Capella CM3323
color sensor.
To compile this driver as a module, choose M here: the module will
@@ -168,6 +168,17 @@ config JSA1212
To compile this driver as a module, choose M here:
the module will be called jsa1212.
+config RPR0521
+ tristate "ROHM RPR0521 ALS and proximity sensor driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y here if you want to build support for ROHM's RPR0521
+ ambient light and proximity sensor device.
+
+ To compile this driver as a module, choose M here:
+ the module will be called rpr0521.
+
config SENSORS_LM3533
tristate "LM3533 ambient light sensor"
depends on MFD_LM3533
@@ -188,6 +199,7 @@ config SENSORS_LM3533
config LTR501
tristate "LTR-501ALS-01 light sensor"
depends on I2C
+ select REGMAP_I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
@@ -198,9 +210,31 @@ config LTR501
This driver can also be built as a module. If so, the module
will be called ltr501.
+config OPT3001
+ tristate "Texas Instruments OPT3001 Light Sensor"
+ depends on I2C
+ help
+ If you say Y or M here, you get support for Texas Instruments
+ OPT3001 Ambient Light Sensor.
+
+ If built as a dynamically linked module, it will be called
+ opt3001.
+
+config PA12203001
+ tristate "TXC PA12203001 light and proximity sensor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the TXC PA12203001
+ ambient light and proximity sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called pa12203001.
+
config STK3310
tristate "STK3310 ALS and proximity sensor"
depends on I2C
+ select REGMAP_I2C
help
Say yes here to get support for the Sensortek STK3310 ambient light
and proximity sensor. The STK3311 model is also supported by this
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index e2d50fd59c66..91c74c014b6f 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -19,6 +19,9 @@ obj-$(CONFIG_ISL29125) += isl29125.o
obj-$(CONFIG_JSA1212) += jsa1212.o
obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o
obj-$(CONFIG_LTR501) += ltr501.o
+obj-$(CONFIG_OPT3001) += opt3001.o
+obj-$(CONFIG_PA12203001) += pa12203001.o
+obj-$(CONFIG_RPR0521) += rpr0521.o
obj-$(CONFIG_SENSORS_TSL2563) += tsl2563.o
obj-$(CONFIG_STK3310) += stk3310.o
obj-$(CONFIG_TCS3414) += tcs3414.o
diff --git a/drivers/iio/light/acpi-als.c b/drivers/iio/light/acpi-als.c
index 1dafa0756bfa..60537ec0c923 100644
--- a/drivers/iio/light/acpi-als.c
+++ b/drivers/iio/light/acpi-als.c
@@ -65,20 +65,20 @@ static const struct iio_chan_spec acpi_als_channels[] = {
* to acpi_als_channels[], the evt_buffer below will grow
* automatically.
*/
-#define EVT_NR_SOURCES ARRAY_SIZE(acpi_als_channels)
-#define EVT_BUFFER_SIZE \
- (sizeof(s64) + (EVT_NR_SOURCES * sizeof(s32)))
+#define ACPI_ALS_EVT_NR_SOURCES ARRAY_SIZE(acpi_als_channels)
+#define ACPI_ALS_EVT_BUFFER_SIZE \
+ (sizeof(s64) + (ACPI_ALS_EVT_NR_SOURCES * sizeof(s32)))
struct acpi_als {
struct acpi_device *device;
struct mutex lock;
- s32 evt_buffer[EVT_BUFFER_SIZE];
+ s32 evt_buffer[ACPI_ALS_EVT_BUFFER_SIZE];
};
/*
* All types of properties the ACPI0008 block can report. The ALI, ALC, ALT
- * and ALP can all be handled by als_read_value() below, while the ALR is
+ * and ALP can all be handled by acpi_als_read_value() below, while the ALR is
* special.
*
* The _ALR property returns tables that can be used to fine-tune the values
@@ -93,7 +93,7 @@ struct acpi_als {
#define ACPI_ALS_POLLING "_ALP"
#define ACPI_ALS_TABLES "_ALR"
-static int als_read_value(struct acpi_als *als, char *prop, s32 *val)
+static int acpi_als_read_value(struct acpi_als *als, char *prop, s32 *val)
{
unsigned long long temp_val;
acpi_status status;
@@ -122,11 +122,11 @@ static void acpi_als_notify(struct acpi_device *device, u32 event)
mutex_lock(&als->lock);
- memset(buffer, 0, EVT_BUFFER_SIZE);
+ memset(buffer, 0, ACPI_ALS_EVT_BUFFER_SIZE);
switch (event) {
case ACPI_ALS_NOTIFY_ILLUMINANCE:
- ret = als_read_value(als, ACPI_ALS_ILLUMINANCE, &val);
+ ret = acpi_als_read_value(als, ACPI_ALS_ILLUMINANCE, &val);
if (ret < 0)
goto out;
*buffer++ = val;
@@ -159,7 +159,7 @@ static int acpi_als_read_raw(struct iio_dev *indio_dev,
if (chan->type != IIO_LIGHT)
return -EINVAL;
- ret = als_read_value(als, ACPI_ALS_ILLUMINANCE, &temp_val);
+ ret = acpi_als_read_value(als, ACPI_ALS_ILLUMINANCE, &temp_val);
if (ret < 0)
return ret;
diff --git a/drivers/iio/light/apds9300.c b/drivers/iio/light/apds9300.c
index 9ddde0ca9c34..e1b9fa5a7e91 100644
--- a/drivers/iio/light/apds9300.c
+++ b/drivers/iio/light/apds9300.c
@@ -515,7 +515,6 @@ MODULE_DEVICE_TABLE(i2c, apds9300_id);
static struct i2c_driver apds9300_driver = {
.driver = {
.name = APDS9300_DRV_NAME,
- .owner = THIS_MODULE,
.pm = APDS9300_PM_OPS,
},
.probe = apds9300_probe,
diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c
index 564c2b3c1a83..8b4164343f20 100644
--- a/drivers/iio/light/bh1750.c
+++ b/drivers/iio/light/bh1750.c
@@ -319,7 +319,6 @@ MODULE_DEVICE_TABLE(i2c, bh1750_id);
static struct i2c_driver bh1750_driver = {
.driver = {
.name = "bh1750",
- .owner = THIS_MODULE,
.pm = BH1750_PM_OPS,
},
.probe = bh1750_probe,
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index 5d12ae54d088..d6fd0dace74f 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -353,12 +353,12 @@ static const struct of_device_id cm32181_of_match[] = {
{ .compatible = "capella,cm32181" },
{ }
};
+MODULE_DEVICE_TABLE(of, cm32181_of_match);
static struct i2c_driver cm32181_driver = {
.driver = {
.name = "cm32181",
.of_match_table = of_match_ptr(cm32181_of_match),
- .owner = THIS_MODULE,
},
.id_table = cm32181_id,
.probe = cm32181_probe,
diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
index 39c8d99cc48e..fe89b6823217 100644
--- a/drivers/iio/light/cm3232.c
+++ b/drivers/iio/light/cm3232.c
@@ -417,11 +417,11 @@ static const struct of_device_id cm3232_of_match[] = {
{.compatible = "capella,cm3232"},
{}
};
+MODULE_DEVICE_TABLE(of, cm3232_of_match);
static struct i2c_driver cm3232_driver = {
.driver = {
.name = "cm3232",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(cm3232_of_match),
#ifdef CONFIG_PM_SLEEP
.pm = &cm3232_pm_ops,
diff --git a/drivers/iio/light/cm3323.c b/drivers/iio/light/cm3323.c
index 869033e48a1f..d823c112d54b 100644
--- a/drivers/iio/light/cm3323.c
+++ b/drivers/iio/light/cm3323.c
@@ -29,7 +29,7 @@
#define CM3323_CONF_SD_BIT BIT(0) /* sensor disable */
#define CM3323_CONF_AF_BIT BIT(1) /* auto/manual force mode */
-#define CM3323_CONF_IT_MASK (BIT(4) | BIT(5) | BIT(6))
+#define CM3323_CONF_IT_MASK GENMASK(6, 4)
#define CM3323_CONF_IT_SHIFT 4
#define CM3323_INT_TIME_AVAILABLE "0.04 0.08 0.16 0.32 0.64 1.28"
@@ -123,7 +123,7 @@ static int cm3323_set_it_bits(struct cm3323_data *data, int val, int val2)
for (i = 0; i < ARRAY_SIZE(cm3323_int_time); i++) {
if (val == cm3323_int_time[i].val &&
val2 == cm3323_int_time[i].val2) {
- reg_conf = data->reg_conf;
+ reg_conf = data->reg_conf & ~CM3323_CONF_IT_MASK;
reg_conf |= i << CM3323_CONF_IT_SHIFT;
ret = i2c_smbus_write_word_data(data->client,
@@ -133,9 +133,11 @@ static int cm3323_set_it_bits(struct cm3323_data *data, int val, int val2)
return ret;
data->reg_conf = reg_conf;
+
return 0;
}
}
+
return -EINVAL;
}
@@ -148,6 +150,7 @@ static int cm3323_get_it_bits(struct cm3323_data *data)
if (bits >= ARRAY_SIZE(cm3323_int_time))
return -EINVAL;
+
return bits;
}
@@ -155,7 +158,7 @@ static int cm3323_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val,
int *val2, long mask)
{
- int i, ret;
+ int ret;
struct cm3323_data *data = iio_priv(indio_dev);
switch (mask) {
@@ -172,14 +175,14 @@ static int cm3323_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_INT_TIME:
mutex_lock(&data->mutex);
- i = cm3323_get_it_bits(data);
- if (i < 0) {
+ ret = cm3323_get_it_bits(data);
+ if (ret < 0) {
mutex_unlock(&data->mutex);
- return -EINVAL;
+ return ret;
}
- *val = cm3323_int_time[i].val;
- *val2 = cm3323_int_time[i].val2;
+ *val = cm3323_int_time[ret].val;
+ *val2 = cm3323_int_time[ret].val2;
mutex_unlock(&data->mutex);
return IIO_VAL_INT_PLUS_MICRO;
@@ -243,11 +246,13 @@ static int cm3323_probe(struct i2c_client *client,
dev_err(&client->dev, "cm3323 chip init failed\n");
return ret;
}
+
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev, "failed to register iio dev\n");
goto err_init;
}
+
return 0;
err_init:
cm3323_disable(indio_dev);
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 39fc67e82138..c8d7b5ea7e78 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -731,12 +731,12 @@ static const struct of_device_id cm36651_of_match[] = {
{ .compatible = "capella,cm36651" },
{ }
};
+MODULE_DEVICE_TABLE(of, cm36651_of_match);
static struct i2c_driver cm36651_driver = {
.driver = {
.name = "cm36651",
.of_match_table = cm36651_of_match,
- .owner = THIS_MODULE,
},
.probe = cm36651_probe,
.remove = cm36651_remove,
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index 32b6449833fa..6d41086f7c64 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -1634,13 +1634,13 @@ static const struct of_device_id gp2ap020a00f_of_match[] = {
{ .compatible = "sharp,gp2ap020a00f" },
{ }
};
+MODULE_DEVICE_TABLE(of, gp2ap020a00f_of_match);
#endif
static struct i2c_driver gp2ap020a00f_driver = {
.driver = {
.name = GP2A_I2C_NAME,
.of_match_table = of_match_ptr(gp2ap020a00f_of_match),
- .owner = THIS_MODULE,
},
.probe = gp2ap020a00f_probe,
.remove = gp2ap020a00f_remove,
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index 0d248476f4c9..45ca056f019e 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -284,8 +284,7 @@ static int hid_prox_probe(struct platform_device *pdev)
goto error_free_dev_mem;
}
- indio_dev->num_channels =
- ARRAY_SIZE(prox_channels);
+ indio_dev->num_channels = ARRAY_SIZE(prox_channels);
indio_dev->dev.parent = &pdev->dev;
indio_dev->info = &prox_info;
indio_dev->name = name;
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index c82f4a6f8464..e2945a20e5f6 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -197,9 +197,21 @@ done:
return IRQ_HANDLED;
}
+static IIO_CONST_ATTR(scale_available, "0.005722 0.152590");
+
+static struct attribute *isl29125_attributes[] = {
+ &iio_const_attr_scale_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group isl29125_attribute_group = {
+ .attrs = isl29125_attributes,
+};
+
static const struct iio_info isl29125_info = {
.read_raw = isl29125_read_raw,
.write_raw = isl29125_write_raw,
+ .attrs = &isl29125_attribute_group,
.driver_module = THIS_MODULE,
};
@@ -334,7 +346,6 @@ static struct i2c_driver isl29125_driver = {
.driver = {
.name = ISL29125_DRV_NAME,
.pm = &isl29125_pm_ops,
- .owner = THIS_MODULE,
},
.probe = isl29125_probe,
.remove = isl29125_remove,
diff --git a/drivers/iio/light/jsa1212.c b/drivers/iio/light/jsa1212.c
index 3a3af89beaf9..c4e8c6b6c3c3 100644
--- a/drivers/iio/light/jsa1212.c
+++ b/drivers/iio/light/jsa1212.c
@@ -457,7 +457,6 @@ static struct i2c_driver jsa1212_driver = {
.driver = {
.name = JSA1212_DRIVER_NAME,
.pm = JSA1212_PM_OPS,
- .owner = THIS_MODULE,
.acpi_match_table = ACPI_PTR(jsa1212_acpi_match),
},
.probe = jsa1212_probe,
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 1ef7d3773ab9..809a961b9a7f 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1302,7 +1302,7 @@ static int ltr501_init(struct ltr501_data *data)
if (ret < 0)
return ret;
- data->als_contr = ret | data->chip_info->als_mode_active;
+ data->als_contr = status | data->chip_info->als_mode_active;
ret = regmap_read(data->regmap, LTR501_PS_CONTR, &status);
if (ret < 0)
@@ -1551,7 +1551,6 @@ static struct i2c_driver ltr501_driver = {
.name = LTR501_DRV_NAME,
.pm = &ltr501_pm_ops,
.acpi_match_table = ACPI_PTR(ltr_acpi_match),
- .owner = THIS_MODULE,
},
.probe = ltr501_probe,
.remove = ltr501_remove,
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
new file mode 100644
index 000000000000..923aa6aef0ed
--- /dev/null
+++ b/drivers/iio/light/opt3001.c
@@ -0,0 +1,804 @@
+/**
+ * opt3001.c - Texas Instruments OPT3001 Light Sensor
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Andreas Dannenberg <dannenberg@ti.com>
+ * Based on previous work from: Felipe Balbi <balbi@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 of the License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define OPT3001_RESULT 0x00
+#define OPT3001_CONFIGURATION 0x01
+#define OPT3001_LOW_LIMIT 0x02
+#define OPT3001_HIGH_LIMIT 0x03
+#define OPT3001_MANUFACTURER_ID 0x7e
+#define OPT3001_DEVICE_ID 0x7f
+
+#define OPT3001_CONFIGURATION_RN_MASK (0xf << 12)
+#define OPT3001_CONFIGURATION_RN_AUTO (0xc << 12)
+
+#define OPT3001_CONFIGURATION_CT BIT(11)
+
+#define OPT3001_CONFIGURATION_M_MASK (3 << 9)
+#define OPT3001_CONFIGURATION_M_SHUTDOWN (0 << 9)
+#define OPT3001_CONFIGURATION_M_SINGLE (1 << 9)
+#define OPT3001_CONFIGURATION_M_CONTINUOUS (2 << 9) /* also 3 << 9 */
+
+#define OPT3001_CONFIGURATION_OVF BIT(8)
+#define OPT3001_CONFIGURATION_CRF BIT(7)
+#define OPT3001_CONFIGURATION_FH BIT(6)
+#define OPT3001_CONFIGURATION_FL BIT(5)
+#define OPT3001_CONFIGURATION_L BIT(4)
+#define OPT3001_CONFIGURATION_POL BIT(3)
+#define OPT3001_CONFIGURATION_ME BIT(2)
+
+#define OPT3001_CONFIGURATION_FC_MASK (3 << 0)
+
+/* The end-of-conversion enable is located in the low-limit register */
+#define OPT3001_LOW_LIMIT_EOC_ENABLE 0xc000
+
+#define OPT3001_REG_EXPONENT(n) ((n) >> 12)
+#define OPT3001_REG_MANTISSA(n) ((n) & 0xfff)
+
+/*
+ * Time to wait for conversion result to be ready. The device datasheet
+ * worst-case max value is 880ms. Add some slack to be on the safe side.
+ */
+#define OPT3001_RESULT_READY_TIMEOUT msecs_to_jiffies(1000)
+
+struct opt3001 {
+ struct i2c_client *client;
+ struct device *dev;
+
+ struct mutex lock;
+ u16 ok_to_ignore_lock:1;
+ u16 result_ready:1;
+ wait_queue_head_t result_ready_queue;
+ u16 result;
+
+ u32 int_time;
+ u32 mode;
+
+ u16 high_thresh_mantissa;
+ u16 low_thresh_mantissa;
+
+ u8 high_thresh_exp;
+ u8 low_thresh_exp;
+};
+
+struct opt3001_scale {
+ int val;
+ int val2;
+};
+
+static const struct opt3001_scale opt3001_scales[] = {
+ {
+ .val = 40,
+ .val2 = 950000,
+ },
+ {
+ .val = 81,
+ .val2 = 900000,
+ },
+ {
+ .val = 163,
+ .val2 = 800000,
+ },
+ {
+ .val = 327,
+ .val2 = 600000,
+ },
+ {
+ .val = 655,
+ .val2 = 200000,
+ },
+ {
+ .val = 1310,
+ .val2 = 400000,
+ },
+ {
+ .val = 2620,
+ .val2 = 800000,
+ },
+ {
+ .val = 5241,
+ .val2 = 600000,
+ },
+ {
+ .val = 10483,
+ .val2 = 200000,
+ },
+ {
+ .val = 20966,
+ .val2 = 400000,
+ },
+ {
+ .val = 83865,
+ .val2 = 600000,
+ },
+};
+
+static int opt3001_find_scale(const struct opt3001 *opt, int val,
+ int val2, u8 *exponent)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(opt3001_scales); i++) {
+ const struct opt3001_scale *scale = &opt3001_scales[i];
+
+ /*
+ * Combine the integer and micro parts for comparison
+ * purposes. Use milli lux precision to avoid 32-bit integer
+ * overflows.
+ */
+ if ((val * 1000 + val2 / 1000) <=
+ (scale->val * 1000 + scale->val2 / 1000)) {
+ *exponent = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void opt3001_to_iio_ret(struct opt3001 *opt, u8 exponent,
+ u16 mantissa, int *val, int *val2)
+{
+ int lux;
+
+ lux = 10 * (mantissa << exponent);
+ *val = lux / 1000;
+ *val2 = (lux - (*val * 1000)) * 1000;
+}
+
+static void opt3001_set_mode(struct opt3001 *opt, u16 *reg, u16 mode)
+{
+ *reg &= ~OPT3001_CONFIGURATION_M_MASK;
+ *reg |= mode;
+ opt->mode = mode;
+}
+
+static IIO_CONST_ATTR_INT_TIME_AVAIL("0.1 0.8");
+
+static struct attribute *opt3001_attributes[] = {
+ &iio_const_attr_integration_time_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group opt3001_attribute_group = {
+ .attrs = opt3001_attributes,
+};
+
+static const struct iio_event_spec opt3001_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec opt3001_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ .event_spec = opt3001_event_spec,
+ .num_event_specs = ARRAY_SIZE(opt3001_event_spec),
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static int opt3001_get_lux(struct opt3001 *opt, int *val, int *val2)
+{
+ int ret;
+ u16 mantissa;
+ u16 reg;
+ u8 exponent;
+ u16 value;
+
+ /*
+ * Enable the end-of-conversion interrupt mechanism. Note that doing
+ * so will overwrite the low-level limit value however we will restore
+ * this value later on.
+ */
+ ret = i2c_smbus_write_word_swapped(opt->client, OPT3001_LOW_LIMIT,
+ OPT3001_LOW_LIMIT_EOC_ENABLE);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to write register %02x\n",
+ OPT3001_LOW_LIMIT);
+ return ret;
+ }
+
+ /* Reset data-ready indicator flag (will be set in the IRQ routine) */
+ opt->result_ready = false;
+
+ /* Allow IRQ to access the device despite lock being set */
+ opt->ok_to_ignore_lock = true;
+
+ /* Configure for single-conversion mode and start a new conversion */
+ ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to read register %02x\n",
+ OPT3001_CONFIGURATION);
+ goto err;
+ }
+
+ reg = ret;
+ opt3001_set_mode(opt, &reg, OPT3001_CONFIGURATION_M_SINGLE);
+
+ ret = i2c_smbus_write_word_swapped(opt->client, OPT3001_CONFIGURATION,
+ reg);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to write register %02x\n",
+ OPT3001_CONFIGURATION);
+ goto err;
+ }
+
+ /* Wait for the IRQ to indicate the conversion is complete */
+ ret = wait_event_timeout(opt->result_ready_queue, opt->result_ready,
+ OPT3001_RESULT_READY_TIMEOUT);
+
+err:
+ /* Disallow IRQ to access the device while lock is active */
+ opt->ok_to_ignore_lock = false;
+
+ if (ret == 0)
+ return -ETIMEDOUT;
+ else if (ret < 0)
+ return ret;
+
+ /*
+ * Disable the end-of-conversion interrupt mechanism by restoring the
+ * low-level limit value (clearing OPT3001_LOW_LIMIT_EOC_ENABLE). Note
+ * that selectively clearing those enable bits would affect the actual
+ * limit value due to bit-overlap and therefore can't be done.
+ */
+ value = (opt->low_thresh_exp << 12) | opt->low_thresh_mantissa;
+ ret = i2c_smbus_write_word_swapped(opt->client, OPT3001_LOW_LIMIT,
+ value);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to write register %02x\n",
+ OPT3001_LOW_LIMIT);
+ return ret;
+ }
+
+ exponent = OPT3001_REG_EXPONENT(opt->result);
+ mantissa = OPT3001_REG_MANTISSA(opt->result);
+
+ opt3001_to_iio_ret(opt, exponent, mantissa, val, val2);
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int opt3001_get_int_time(struct opt3001 *opt, int *val, int *val2)
+{
+ *val = 0;
+ *val2 = opt->int_time;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int opt3001_set_int_time(struct opt3001 *opt, int time)
+{
+ int ret;
+ u16 reg;
+
+ ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to read register %02x\n",
+ OPT3001_CONFIGURATION);
+ return ret;
+ }
+
+ reg = ret;
+
+ switch (time) {
+ case 100000:
+ reg &= ~OPT3001_CONFIGURATION_CT;
+ opt->int_time = 100000;
+ break;
+ case 800000:
+ reg |= OPT3001_CONFIGURATION_CT;
+ opt->int_time = 800000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return i2c_smbus_write_word_swapped(opt->client, OPT3001_CONFIGURATION,
+ reg);
+}
+
+static int opt3001_read_raw(struct iio_dev *iio,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask)
+{
+ struct opt3001 *opt = iio_priv(iio);
+ int ret;
+
+ if (opt->mode == OPT3001_CONFIGURATION_M_CONTINUOUS)
+ return -EBUSY;
+
+ if (chan->type != IIO_LIGHT)
+ return -EINVAL;
+
+ mutex_lock(&opt->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ ret = opt3001_get_lux(opt, val, val2);
+ break;
+ case IIO_CHAN_INFO_INT_TIME:
+ ret = opt3001_get_int_time(opt, val, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&opt->lock);
+
+ return ret;
+}
+
+static int opt3001_write_raw(struct iio_dev *iio,
+ struct iio_chan_spec const *chan, int val, int val2,
+ long mask)
+{
+ struct opt3001 *opt = iio_priv(iio);
+ int ret;
+
+ if (opt->mode == OPT3001_CONFIGURATION_M_CONTINUOUS)
+ return -EBUSY;
+
+ if (chan->type != IIO_LIGHT)
+ return -EINVAL;
+
+ if (mask != IIO_CHAN_INFO_INT_TIME)
+ return -EINVAL;
+
+ if (val != 0)
+ return -EINVAL;
+
+ mutex_lock(&opt->lock);
+ ret = opt3001_set_int_time(opt, val2);
+ mutex_unlock(&opt->lock);
+
+ return ret;
+}
+
+static int opt3001_read_event_value(struct iio_dev *iio,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct opt3001 *opt = iio_priv(iio);
+ int ret = IIO_VAL_INT_PLUS_MICRO;
+
+ mutex_lock(&opt->lock);
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ opt3001_to_iio_ret(opt, opt->high_thresh_exp,
+ opt->high_thresh_mantissa, val, val2);
+ break;
+ case IIO_EV_DIR_FALLING:
+ opt3001_to_iio_ret(opt, opt->low_thresh_exp,
+ opt->low_thresh_mantissa, val, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&opt->lock);
+
+ return ret;
+}
+
+static int opt3001_write_event_value(struct iio_dev *iio,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int val, int val2)
+{
+ struct opt3001 *opt = iio_priv(iio);
+ int ret;
+
+ u16 mantissa;
+ u16 value;
+ u16 reg;
+
+ u8 exponent;
+
+ if (val < 0)
+ return -EINVAL;
+
+ mutex_lock(&opt->lock);
+
+ ret = opt3001_find_scale(opt, val, val2, &exponent);
+ if (ret < 0) {
+ dev_err(opt->dev, "can't find scale for %d.%06u\n", val, val2);
+ goto err;
+ }
+
+ mantissa = (((val * 1000) + (val2 / 1000)) / 10) >> exponent;
+ value = (exponent << 12) | mantissa;
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ reg = OPT3001_HIGH_LIMIT;
+ opt->high_thresh_mantissa = mantissa;
+ opt->high_thresh_exp = exponent;
+ break;
+ case IIO_EV_DIR_FALLING:
+ reg = OPT3001_LOW_LIMIT;
+ opt->low_thresh_mantissa = mantissa;
+ opt->low_thresh_exp = exponent;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = i2c_smbus_write_word_swapped(opt->client, reg, value);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to write register %02x\n", reg);
+ goto err;
+ }
+
+err:
+ mutex_unlock(&opt->lock);
+
+ return ret;
+}
+
+static int opt3001_read_event_config(struct iio_dev *iio,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct opt3001 *opt = iio_priv(iio);
+
+ return opt->mode == OPT3001_CONFIGURATION_M_CONTINUOUS;
+}
+
+static int opt3001_write_event_config(struct iio_dev *iio,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct opt3001 *opt = iio_priv(iio);
+ int ret;
+ u16 mode;
+ u16 reg;
+
+ if (state && opt->mode == OPT3001_CONFIGURATION_M_CONTINUOUS)
+ return 0;
+
+ if (!state && opt->mode == OPT3001_CONFIGURATION_M_SHUTDOWN)
+ return 0;
+
+ mutex_lock(&opt->lock);
+
+ mode = state ? OPT3001_CONFIGURATION_M_CONTINUOUS
+ : OPT3001_CONFIGURATION_M_SHUTDOWN;
+
+ ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to read register %02x\n",
+ OPT3001_CONFIGURATION);
+ goto err;
+ }
+
+ reg = ret;
+ opt3001_set_mode(opt, &reg, mode);
+
+ ret = i2c_smbus_write_word_swapped(opt->client, OPT3001_CONFIGURATION,
+ reg);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to write register %02x\n",
+ OPT3001_CONFIGURATION);
+ goto err;
+ }
+
+err:
+ mutex_unlock(&opt->lock);
+
+ return ret;
+}
+
+static const struct iio_info opt3001_info = {
+ .driver_module = THIS_MODULE,
+ .attrs = &opt3001_attribute_group,
+ .read_raw = opt3001_read_raw,
+ .write_raw = opt3001_write_raw,
+ .read_event_value = opt3001_read_event_value,
+ .write_event_value = opt3001_write_event_value,
+ .read_event_config = opt3001_read_event_config,
+ .write_event_config = opt3001_write_event_config,
+};
+
+static int opt3001_read_id(struct opt3001 *opt)
+{
+ char manufacturer[2];
+ u16 device_id;
+ int ret;
+
+ ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_MANUFACTURER_ID);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to read register %02x\n",
+ OPT3001_MANUFACTURER_ID);
+ return ret;
+ }
+
+ manufacturer[0] = ret >> 8;
+ manufacturer[1] = ret & 0xff;
+
+ ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_DEVICE_ID);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to read register %02x\n",
+ OPT3001_DEVICE_ID);
+ return ret;
+ }
+
+ device_id = ret;
+
+ dev_info(opt->dev, "Found %c%c OPT%04x\n", manufacturer[0],
+ manufacturer[1], device_id);
+
+ return 0;
+}
+
+static int opt3001_configure(struct opt3001 *opt)
+{
+ int ret;
+ u16 reg;
+
+ ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to read register %02x\n",
+ OPT3001_CONFIGURATION);
+ return ret;
+ }
+
+ reg = ret;
+
+ /* Enable automatic full-scale setting mode */
+ reg &= ~OPT3001_CONFIGURATION_RN_MASK;
+ reg |= OPT3001_CONFIGURATION_RN_AUTO;
+
+ /* Reflect status of the device's integration time setting */
+ if (reg & OPT3001_CONFIGURATION_CT)
+ opt->int_time = 800000;
+ else
+ opt->int_time = 100000;
+
+ /* Ensure device is in shutdown initially */
+ opt3001_set_mode(opt, &reg, OPT3001_CONFIGURATION_M_SHUTDOWN);
+
+ /* Configure for latched window-style comparison operation */
+ reg |= OPT3001_CONFIGURATION_L;
+ reg &= ~OPT3001_CONFIGURATION_POL;
+ reg &= ~OPT3001_CONFIGURATION_ME;
+ reg &= ~OPT3001_CONFIGURATION_FC_MASK;
+
+ ret = i2c_smbus_write_word_swapped(opt->client, OPT3001_CONFIGURATION,
+ reg);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to write register %02x\n",
+ OPT3001_CONFIGURATION);
+ return ret;
+ }
+
+ ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_LOW_LIMIT);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to read register %02x\n",
+ OPT3001_LOW_LIMIT);
+ return ret;
+ }
+
+ opt->low_thresh_mantissa = OPT3001_REG_MANTISSA(ret);
+ opt->low_thresh_exp = OPT3001_REG_EXPONENT(ret);
+
+ ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_HIGH_LIMIT);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to read register %02x\n",
+ OPT3001_HIGH_LIMIT);
+ return ret;
+ }
+
+ opt->high_thresh_mantissa = OPT3001_REG_MANTISSA(ret);
+ opt->high_thresh_exp = OPT3001_REG_EXPONENT(ret);
+
+ return 0;
+}
+
+static irqreturn_t opt3001_irq(int irq, void *_iio)
+{
+ struct iio_dev *iio = _iio;
+ struct opt3001 *opt = iio_priv(iio);
+ int ret;
+
+ if (!opt->ok_to_ignore_lock)
+ mutex_lock(&opt->lock);
+
+ ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to read register %02x\n",
+ OPT3001_CONFIGURATION);
+ goto out;
+ }
+
+ if ((ret & OPT3001_CONFIGURATION_M_MASK) ==
+ OPT3001_CONFIGURATION_M_CONTINUOUS) {
+ if (ret & OPT3001_CONFIGURATION_FH)
+ iio_push_event(iio,
+ IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns());
+ if (ret & OPT3001_CONFIGURATION_FL)
+ iio_push_event(iio,
+ IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns());
+ } else if (ret & OPT3001_CONFIGURATION_CRF) {
+ ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_RESULT);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to read register %02x\n",
+ OPT3001_RESULT);
+ goto out;
+ }
+ opt->result = ret;
+ opt->result_ready = true;
+ wake_up(&opt->result_ready_queue);
+ }
+
+out:
+ if (!opt->ok_to_ignore_lock)
+ mutex_unlock(&opt->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int opt3001_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+
+ struct iio_dev *iio;
+ struct opt3001 *opt;
+ int irq = client->irq;
+ int ret;
+
+ iio = devm_iio_device_alloc(dev, sizeof(*opt));
+ if (!iio)
+ return -ENOMEM;
+
+ opt = iio_priv(iio);
+ opt->client = client;
+ opt->dev = dev;
+
+ mutex_init(&opt->lock);
+ init_waitqueue_head(&opt->result_ready_queue);
+ i2c_set_clientdata(client, iio);
+
+ ret = opt3001_read_id(opt);
+ if (ret)
+ return ret;
+
+ ret = opt3001_configure(opt);
+ if (ret)
+ return ret;
+
+ iio->name = client->name;
+ iio->channels = opt3001_channels;
+ iio->num_channels = ARRAY_SIZE(opt3001_channels);
+ iio->dev.parent = dev;
+ iio->modes = INDIO_DIRECT_MODE;
+ iio->info = &opt3001_info;
+
+ ret = devm_iio_device_register(dev, iio);
+ if (ret) {
+ dev_err(dev, "failed to register IIO device\n");
+ return ret;
+ }
+
+ ret = request_threaded_irq(irq, NULL, opt3001_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "opt3001", iio);
+ if (ret) {
+ dev_err(dev, "failed to request IRQ #%d\n", irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int opt3001_remove(struct i2c_client *client)
+{
+ struct iio_dev *iio = i2c_get_clientdata(client);
+ struct opt3001 *opt = iio_priv(iio);
+ int ret;
+ u16 reg;
+
+ free_irq(client->irq, iio);
+
+ ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to read register %02x\n",
+ OPT3001_CONFIGURATION);
+ return ret;
+ }
+
+ reg = ret;
+ opt3001_set_mode(opt, &reg, OPT3001_CONFIGURATION_M_SHUTDOWN);
+
+ ret = i2c_smbus_write_word_swapped(opt->client, OPT3001_CONFIGURATION,
+ reg);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to write register %02x\n",
+ OPT3001_CONFIGURATION);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id opt3001_id[] = {
+ { "opt3001", 0 },
+ { } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(i2c, opt3001_id);
+
+static const struct of_device_id opt3001_of_match[] = {
+ { .compatible = "ti,opt3001" },
+ { }
+};
+
+static struct i2c_driver opt3001_driver = {
+ .probe = opt3001_probe,
+ .remove = opt3001_remove,
+ .id_table = opt3001_id,
+
+ .driver = {
+ .name = "opt3001",
+ .of_match_table = of_match_ptr(opt3001_of_match),
+ .owner = THIS_MODULE,
+ },
+};
+
+module_i2c_driver(opt3001_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
+MODULE_DESCRIPTION("Texas Instruments OPT3001 Light Sensor Driver");
diff --git a/drivers/iio/light/pa12203001.c b/drivers/iio/light/pa12203001.c
new file mode 100644
index 000000000000..45f7bde02bbf
--- /dev/null
+++ b/drivers/iio/light/pa12203001.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Driver for TXC PA12203001 Proximity and Ambient Light Sensor.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ * To do: Interrupt support.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/mutex.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#define PA12203001_DRIVER_NAME "pa12203001"
+
+#define PA12203001_REG_CFG0 0x00
+#define PA12203001_REG_CFG1 0x01
+#define PA12203001_REG_CFG2 0x02
+#define PA12203001_REG_CFG3 0x03
+
+#define PA12203001_REG_ADL 0x0b
+#define PA12203001_REG_PDH 0x0e
+
+#define PA12203001_REG_POFS 0x10
+#define PA12203001_REG_PSET 0x11
+
+#define PA12203001_ALS_EN_MASK BIT(0)
+#define PA12203001_PX_EN_MASK BIT(1)
+#define PA12203001_PX_NORMAL_MODE_MASK GENMASK(7, 6)
+#define PA12203001_AFSR_MASK GENMASK(5, 4)
+#define PA12203001_AFSR_SHIFT 4
+
+#define PA12203001_PSCAN 0x03
+
+/* als range 31000, ps, als disabled */
+#define PA12203001_REG_CFG0_DEFAULT 0x30
+
+/* led current: 100 mA */
+#define PA12203001_REG_CFG1_DEFAULT 0x20
+
+/* ps mode: normal, interrupts not active */
+#define PA12203001_REG_CFG2_DEFAULT 0xcc
+
+#define PA12203001_REG_CFG3_DEFAULT 0x00
+
+#define PA12203001_SLEEP_DELAY_MS 3000
+
+#define PA12203001_CHIP_ENABLE 0xff
+#define PA12203001_CHIP_DISABLE 0x00
+
+/* available scales: corresponding to [500, 4000, 7000, 31000] lux */
+static const int pa12203001_scales[] = { 7629, 61036, 106813, 473029};
+
+struct pa12203001_data {
+ struct i2c_client *client;
+
+ /* protect device states */
+ struct mutex lock;
+
+ bool als_enabled;
+ bool px_enabled;
+ bool als_needs_enable;
+ bool px_needs_enable;
+
+ struct regmap *map;
+};
+
+static const struct {
+ u8 reg;
+ u8 val;
+} regvals[] = {
+ {PA12203001_REG_CFG0, PA12203001_REG_CFG0_DEFAULT},
+ {PA12203001_REG_CFG1, PA12203001_REG_CFG1_DEFAULT},
+ {PA12203001_REG_CFG2, PA12203001_REG_CFG2_DEFAULT},
+ {PA12203001_REG_CFG3, PA12203001_REG_CFG3_DEFAULT},
+ {PA12203001_REG_PSET, PA12203001_PSCAN},
+};
+
+static IIO_CONST_ATTR(in_illuminance_scale_available,
+ "0.007629 0.061036 0.106813 0.473029");
+
+static struct attribute *pa12203001_attrs[] = {
+ &iio_const_attr_in_illuminance_scale_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group pa12203001_attr_group = {
+ .attrs = pa12203001_attrs,
+};
+
+static const struct iio_chan_spec pa12203001_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+ {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }
+};
+
+static const struct regmap_range pa12203001_volatile_regs_ranges[] = {
+ regmap_reg_range(PA12203001_REG_ADL, PA12203001_REG_ADL + 1),
+ regmap_reg_range(PA12203001_REG_PDH, PA12203001_REG_PDH),
+};
+
+static const struct regmap_access_table pa12203001_volatile_regs = {
+ .yes_ranges = pa12203001_volatile_regs_ranges,
+ .n_yes_ranges = ARRAY_SIZE(pa12203001_volatile_regs_ranges),
+};
+
+static const struct regmap_config pa12203001_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = PA12203001_REG_PSET,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_table = &pa12203001_volatile_regs,
+};
+
+static inline int pa12203001_als_enable(struct pa12203001_data *data, u8 enable)
+{
+ int ret;
+
+ ret = regmap_update_bits(data->map, PA12203001_REG_CFG0,
+ PA12203001_ALS_EN_MASK, enable);
+ if (ret < 0)
+ return ret;
+
+ data->als_enabled = !!enable;
+
+ return 0;
+}
+
+static inline int pa12203001_px_enable(struct pa12203001_data *data, u8 enable)
+{
+ int ret;
+
+ ret = regmap_update_bits(data->map, PA12203001_REG_CFG0,
+ PA12203001_PX_EN_MASK, enable);
+ if (ret < 0)
+ return ret;
+
+ data->px_enabled = !!enable;
+
+ return 0;
+}
+
+static int pa12203001_set_power_state(struct pa12203001_data *data, bool on,
+ u8 mask)
+{
+#ifdef CONFIG_PM
+ int ret;
+
+ if (on && (mask & PA12203001_ALS_EN_MASK)) {
+ mutex_lock(&data->lock);
+ if (data->px_enabled) {
+ ret = pa12203001_als_enable(data,
+ PA12203001_ALS_EN_MASK);
+ if (ret < 0)
+ goto err;
+ } else {
+ data->als_needs_enable = true;
+ }
+ mutex_unlock(&data->lock);
+ }
+
+ if (on && (mask & PA12203001_PX_EN_MASK)) {
+ mutex_lock(&data->lock);
+ if (data->als_enabled) {
+ ret = pa12203001_px_enable(data, PA12203001_PX_EN_MASK);
+ if (ret < 0)
+ goto err;
+ } else {
+ data->px_needs_enable = true;
+ }
+ mutex_unlock(&data->lock);
+ }
+
+ if (on) {
+ ret = pm_runtime_get_sync(&data->client->dev);
+ if (ret < 0)
+ pm_runtime_put_noidle(&data->client->dev);
+
+ } else {
+ pm_runtime_mark_last_busy(&data->client->dev);
+ ret = pm_runtime_put_autosuspend(&data->client->dev);
+ }
+
+ return ret;
+
+err:
+ mutex_unlock(&data->lock);
+ return ret;
+
+#endif
+ return 0;
+}
+
+static int pa12203001_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct pa12203001_data *data = iio_priv(indio_dev);
+ int ret;
+ u8 dev_mask;
+ unsigned int reg_byte;
+ __le16 reg_word;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ dev_mask = PA12203001_ALS_EN_MASK;
+ ret = pa12203001_set_power_state(data, true, dev_mask);
+ if (ret < 0)
+ return ret;
+ /*
+ * ALS ADC value is stored in registers
+ * PA12203001_REG_ADL and in PA12203001_REG_ADL + 1.
+ */
+ ret = regmap_bulk_read(data->map, PA12203001_REG_ADL,
+ &reg_word, 2);
+ if (ret < 0)
+ goto reg_err;
+
+ *val = le16_to_cpu(reg_word);
+ ret = pa12203001_set_power_state(data, false, dev_mask);
+ if (ret < 0)
+ return ret;
+ break;
+ case IIO_PROXIMITY:
+ dev_mask = PA12203001_PX_EN_MASK;
+ ret = pa12203001_set_power_state(data, true, dev_mask);
+ if (ret < 0)
+ return ret;
+ ret = regmap_read(data->map, PA12203001_REG_PDH,
+ &reg_byte);
+ if (ret < 0)
+ goto reg_err;
+
+ *val = reg_byte;
+ ret = pa12203001_set_power_state(data, false, dev_mask);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ ret = regmap_read(data->map, PA12203001_REG_CFG0, &reg_byte);
+ if (ret < 0)
+ return ret;
+ *val = 0;
+ reg_byte = (reg_byte & PA12203001_AFSR_MASK);
+ *val2 = pa12203001_scales[reg_byte >> 4];
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+
+reg_err:
+ pa12203001_set_power_state(data, false, dev_mask);
+ return ret;
+}
+
+static int pa12203001_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct pa12203001_data *data = iio_priv(indio_dev);
+ int i, ret, new_val;
+ unsigned int reg_byte;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ ret = regmap_read(data->map, PA12203001_REG_CFG0, &reg_byte);
+ if (val != 0 || ret < 0)
+ return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(pa12203001_scales); i++) {
+ if (val2 == pa12203001_scales[i]) {
+ new_val = i << PA12203001_AFSR_SHIFT;
+ return regmap_update_bits(data->map,
+ PA12203001_REG_CFG0,
+ PA12203001_AFSR_MASK,
+ new_val);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info pa12203001_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = pa12203001_read_raw,
+ .write_raw = pa12203001_write_raw,
+ .attrs = &pa12203001_attr_group,
+};
+
+static int pa12203001_init(struct iio_dev *indio_dev)
+{
+ struct pa12203001_data *data = iio_priv(indio_dev);
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(regvals); i++) {
+ ret = regmap_write(data->map, regvals[i].reg, regvals[i].val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pa12203001_power_chip(struct iio_dev *indio_dev, u8 state)
+{
+ struct pa12203001_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = pa12203001_als_enable(data, state);
+ if (ret < 0)
+ goto out;
+
+ ret = pa12203001_px_enable(data, state);
+
+out:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int pa12203001_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pa12203001_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev,
+ sizeof(struct pa12203001_data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ data->map = devm_regmap_init_i2c(client, &pa12203001_regmap_config);
+ if (IS_ERR(data->map))
+ return PTR_ERR(data->map);
+
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &pa12203001_info;
+ indio_dev->name = PA12203001_DRIVER_NAME;
+ indio_dev->channels = pa12203001_channels;
+ indio_dev->num_channels = ARRAY_SIZE(pa12203001_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = pa12203001_init(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ ret = pa12203001_power_chip(indio_dev, PA12203001_CHIP_ENABLE);
+ if (ret < 0)
+ return ret;
+
+ ret = pm_runtime_set_active(&client->dev);
+ if (ret < 0) {
+ pa12203001_power_chip(indio_dev, PA12203001_CHIP_DISABLE);
+ return ret;
+ }
+
+ pm_runtime_enable(&client->dev);
+ pm_runtime_set_autosuspend_delay(&client->dev,
+ PA12203001_SLEEP_DELAY_MS);
+ pm_runtime_use_autosuspend(&client->dev);
+
+ return iio_device_register(indio_dev);
+}
+
+static int pa12203001_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ return pa12203001_power_chip(indio_dev, PA12203001_CHIP_DISABLE);
+}
+
+#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM)
+static int pa12203001_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+
+ return pa12203001_power_chip(indio_dev, PA12203001_CHIP_DISABLE);
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int pa12203001_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+
+ return pa12203001_power_chip(indio_dev, PA12203001_CHIP_ENABLE);
+}
+#endif
+
+#ifdef CONFIG_PM
+static int pa12203001_runtime_resume(struct device *dev)
+{
+ struct pa12203001_data *data;
+
+ data = iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+ mutex_lock(&data->lock);
+ if (data->als_needs_enable) {
+ pa12203001_als_enable(data, PA12203001_ALS_EN_MASK);
+ data->als_needs_enable = false;
+ }
+ if (data->px_needs_enable) {
+ pa12203001_px_enable(data, PA12203001_PX_EN_MASK);
+ data->px_needs_enable = false;
+ }
+ mutex_unlock(&data->lock);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops pa12203001_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pa12203001_suspend, pa12203001_resume)
+ SET_RUNTIME_PM_OPS(pa12203001_suspend, pa12203001_runtime_resume, NULL)
+};
+
+static const struct acpi_device_id pa12203001_acpi_match[] = {
+ { "TXCPA122", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, pa12203001_acpi_match);
+
+static const struct i2c_device_id pa12203001_id[] = {
+ {"txcpa122", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, pa12203001_id);
+
+static struct i2c_driver pa12203001_driver = {
+ .driver = {
+ .name = PA12203001_DRIVER_NAME,
+ .pm = &pa12203001_pm_ops,
+ .acpi_match_table = ACPI_PTR(pa12203001_acpi_match),
+ },
+ .probe = pa12203001_probe,
+ .remove = pa12203001_remove,
+ .id_table = pa12203001_id,
+
+};
+module_i2c_driver(pa12203001_driver);
+
+MODULE_AUTHOR("Adriana Reus <adriana.reus@intel.com>");
+MODULE_DESCRIPTION("Driver for TXC PA12203001 Proximity and Light Sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
new file mode 100644
index 000000000000..4b75bb0998b3
--- /dev/null
+++ b/drivers/iio/light/rpr0521.c
@@ -0,0 +1,615 @@
+/*
+ * RPR-0521 ROHM Ambient Light and Proximity Sensor
+ *
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * IIO driver for RPR-0521RS (7-bit I2C slave address 0x38).
+ *
+ * TODO: illuminance channel, PM support, buffer
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/acpi.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/pm_runtime.h>
+
+#define RPR0521_REG_SYSTEM_CTRL 0x40
+#define RPR0521_REG_MODE_CTRL 0x41
+#define RPR0521_REG_ALS_CTRL 0x42
+#define RPR0521_REG_PXS_CTRL 0x43
+#define RPR0521_REG_PXS_DATA 0x44 /* 16-bit, little endian */
+#define RPR0521_REG_ALS_DATA0 0x46 /* 16-bit, little endian */
+#define RPR0521_REG_ALS_DATA1 0x48 /* 16-bit, little endian */
+#define RPR0521_REG_ID 0x92
+
+#define RPR0521_MODE_ALS_MASK BIT(7)
+#define RPR0521_MODE_PXS_MASK BIT(6)
+#define RPR0521_MODE_MEAS_TIME_MASK GENMASK(3, 0)
+#define RPR0521_ALS_DATA0_GAIN_MASK GENMASK(5, 4)
+#define RPR0521_ALS_DATA0_GAIN_SHIFT 4
+#define RPR0521_ALS_DATA1_GAIN_MASK GENMASK(3, 2)
+#define RPR0521_ALS_DATA1_GAIN_SHIFT 2
+#define RPR0521_PXS_GAIN_MASK GENMASK(5, 4)
+#define RPR0521_PXS_GAIN_SHIFT 4
+
+#define RPR0521_MODE_ALS_ENABLE BIT(7)
+#define RPR0521_MODE_ALS_DISABLE 0x00
+#define RPR0521_MODE_PXS_ENABLE BIT(6)
+#define RPR0521_MODE_PXS_DISABLE 0x00
+
+#define RPR0521_MANUFACT_ID 0xE0
+#define RPR0521_DEFAULT_MEAS_TIME 0x06 /* ALS - 100ms, PXS - 100ms */
+
+#define RPR0521_DRV_NAME "RPR0521"
+#define RPR0521_REGMAP_NAME "rpr0521_regmap"
+
+#define RPR0521_SLEEP_DELAY_MS 2000
+
+#define RPR0521_ALS_SCALE_AVAIL "0.007812 0.015625 0.5 1"
+#define RPR0521_PXS_SCALE_AVAIL "0.125 0.5 1"
+
+struct rpr0521_gain {
+ int scale;
+ int uscale;
+};
+
+static const struct rpr0521_gain rpr0521_als_gain[4] = {
+ {1, 0}, /* x1 */
+ {0, 500000}, /* x2 */
+ {0, 15625}, /* x64 */
+ {0, 7812}, /* x128 */
+};
+
+static const struct rpr0521_gain rpr0521_pxs_gain[3] = {
+ {1, 0}, /* x1 */
+ {0, 500000}, /* x2 */
+ {0, 125000}, /* x4 */
+};
+
+enum rpr0521_channel {
+ RPR0521_CHAN_ALS_DATA0,
+ RPR0521_CHAN_ALS_DATA1,
+ RPR0521_CHAN_PXS,
+};
+
+struct rpr0521_reg_desc {
+ u8 address;
+ u8 device_mask;
+};
+
+static const struct rpr0521_reg_desc rpr0521_data_reg[] = {
+ [RPR0521_CHAN_ALS_DATA0] = {
+ .address = RPR0521_REG_ALS_DATA0,
+ .device_mask = RPR0521_MODE_ALS_MASK,
+ },
+ [RPR0521_CHAN_ALS_DATA1] = {
+ .address = RPR0521_REG_ALS_DATA1,
+ .device_mask = RPR0521_MODE_ALS_MASK,
+ },
+ [RPR0521_CHAN_PXS] = {
+ .address = RPR0521_REG_PXS_DATA,
+ .device_mask = RPR0521_MODE_PXS_MASK,
+ },
+};
+
+static const struct rpr0521_gain_info {
+ u8 reg;
+ u8 mask;
+ u8 shift;
+ const struct rpr0521_gain *gain;
+ int size;
+} rpr0521_gain[] = {
+ [RPR0521_CHAN_ALS_DATA0] = {
+ .reg = RPR0521_REG_ALS_CTRL,
+ .mask = RPR0521_ALS_DATA0_GAIN_MASK,
+ .shift = RPR0521_ALS_DATA0_GAIN_SHIFT,
+ .gain = rpr0521_als_gain,
+ .size = ARRAY_SIZE(rpr0521_als_gain),
+ },
+ [RPR0521_CHAN_ALS_DATA1] = {
+ .reg = RPR0521_REG_ALS_CTRL,
+ .mask = RPR0521_ALS_DATA1_GAIN_MASK,
+ .shift = RPR0521_ALS_DATA1_GAIN_SHIFT,
+ .gain = rpr0521_als_gain,
+ .size = ARRAY_SIZE(rpr0521_als_gain),
+ },
+ [RPR0521_CHAN_PXS] = {
+ .reg = RPR0521_REG_PXS_CTRL,
+ .mask = RPR0521_PXS_GAIN_MASK,
+ .shift = RPR0521_PXS_GAIN_SHIFT,
+ .gain = rpr0521_pxs_gain,
+ .size = ARRAY_SIZE(rpr0521_pxs_gain),
+ },
+};
+
+struct rpr0521_data {
+ struct i2c_client *client;
+
+ /* protect device params updates (e.g state, gain) */
+ struct mutex lock;
+
+ /* device active status */
+ bool als_dev_en;
+ bool pxs_dev_en;
+
+ /* optimize runtime pm ops - enable device only if needed */
+ bool als_ps_need_en;
+ bool pxs_ps_need_en;
+
+ struct regmap *regmap;
+};
+
+static IIO_CONST_ATTR(in_intensity_scale_available, RPR0521_ALS_SCALE_AVAIL);
+static IIO_CONST_ATTR(in_proximity_scale_available, RPR0521_PXS_SCALE_AVAIL);
+
+static struct attribute *rpr0521_attributes[] = {
+ &iio_const_attr_in_intensity_scale_available.dev_attr.attr,
+ &iio_const_attr_in_proximity_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group rpr0521_attribute_group = {
+ .attrs = rpr0521_attributes,
+};
+
+static const struct iio_chan_spec rpr0521_channels[] = {
+ {
+ .type = IIO_INTENSITY,
+ .modified = 1,
+ .address = RPR0521_CHAN_ALS_DATA0,
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+ {
+ .type = IIO_INTENSITY,
+ .modified = 1,
+ .address = RPR0521_CHAN_ALS_DATA1,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+ {
+ .type = IIO_PROXIMITY,
+ .address = RPR0521_CHAN_PXS,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ }
+};
+
+static int rpr0521_als_enable(struct rpr0521_data *data, u8 status)
+{
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, RPR0521_REG_MODE_CTRL,
+ RPR0521_MODE_ALS_MASK,
+ status);
+ if (ret < 0)
+ return ret;
+
+ data->als_dev_en = true;
+
+ return 0;
+}
+
+static int rpr0521_pxs_enable(struct rpr0521_data *data, u8 status)
+{
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, RPR0521_REG_MODE_CTRL,
+ RPR0521_MODE_PXS_MASK,
+ status);
+ if (ret < 0)
+ return ret;
+
+ data->pxs_dev_en = true;
+
+ return 0;
+}
+
+/**
+ * rpr0521_set_power_state - handles runtime PM state and sensors enabled status
+ *
+ * @data: rpr0521 device private data
+ * @on: state to be set for devices in @device_mask
+ * @device_mask: bitmask specifying for which device we need to update @on state
+ *
+ * We rely on rpr0521_runtime_resume to enable our @device_mask devices, but
+ * if (for example) PXS was enabled (pxs_dev_en = true) by a previous call to
+ * rpr0521_runtime_resume and we want to enable ALS we MUST set ALS enable
+ * bit of RPR0521_REG_MODE_CTRL here because rpr0521_runtime_resume will not
+ * be called twice.
+ */
+static int rpr0521_set_power_state(struct rpr0521_data *data, bool on,
+ u8 device_mask)
+{
+#ifdef CONFIG_PM
+ int ret;
+ u8 update_mask = 0;
+
+ if (device_mask & RPR0521_MODE_ALS_MASK) {
+ if (on && !data->als_ps_need_en && data->pxs_dev_en)
+ update_mask |= RPR0521_MODE_ALS_MASK;
+ else
+ data->als_ps_need_en = on;
+ }
+
+ if (device_mask & RPR0521_MODE_PXS_MASK) {
+ if (on && !data->pxs_ps_need_en && data->als_dev_en)
+ update_mask |= RPR0521_MODE_PXS_MASK;
+ else
+ data->pxs_ps_need_en = on;
+ }
+
+ if (update_mask) {
+ ret = regmap_update_bits(data->regmap, RPR0521_REG_MODE_CTRL,
+ update_mask, update_mask);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (on) {
+ ret = pm_runtime_get_sync(&data->client->dev);
+ } else {
+ pm_runtime_mark_last_busy(&data->client->dev);
+ ret = pm_runtime_put_autosuspend(&data->client->dev);
+ }
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Failed: rpr0521_set_power_state for %d, ret %d\n",
+ on, ret);
+ if (on)
+ pm_runtime_put_noidle(&data->client->dev);
+
+ return ret;
+ }
+#endif
+ return 0;
+}
+
+static int rpr0521_get_gain(struct rpr0521_data *data, int chan,
+ int *val, int *val2)
+{
+ int ret, reg, idx;
+
+ ret = regmap_read(data->regmap, rpr0521_gain[chan].reg, &reg);
+ if (ret < 0)
+ return ret;
+
+ idx = (rpr0521_gain[chan].mask & reg) >> rpr0521_gain[chan].shift;
+ *val = rpr0521_gain[chan].gain[idx].scale;
+ *val2 = rpr0521_gain[chan].gain[idx].uscale;
+
+ return 0;
+}
+
+static int rpr0521_set_gain(struct rpr0521_data *data, int chan,
+ int val, int val2)
+{
+ int i, idx = -EINVAL;
+
+ /* get gain index */
+ for (i = 0; i < rpr0521_gain[chan].size; i++)
+ if (val == rpr0521_gain[chan].gain[i].scale &&
+ val2 == rpr0521_gain[chan].gain[i].uscale) {
+ idx = i;
+ break;
+ }
+
+ if (idx < 0)
+ return idx;
+
+ return regmap_update_bits(data->regmap, rpr0521_gain[chan].reg,
+ rpr0521_gain[chan].mask,
+ idx << rpr0521_gain[chan].shift);
+}
+
+static int rpr0521_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct rpr0521_data *data = iio_priv(indio_dev);
+ int ret;
+ u8 device_mask;
+ __le16 raw_data;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type != IIO_INTENSITY && chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ device_mask = rpr0521_data_reg[chan->address].device_mask;
+
+ mutex_lock(&data->lock);
+ ret = rpr0521_set_power_state(data, true, device_mask);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+
+ ret = regmap_bulk_read(data->regmap,
+ rpr0521_data_reg[chan->address].address,
+ &raw_data, 2);
+ if (ret < 0) {
+ rpr0521_set_power_state(data, false, device_mask);
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+
+ ret = rpr0521_set_power_state(data, false, device_mask);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+
+ *val = le16_to_cpu(raw_data);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ mutex_lock(&data->lock);
+ ret = rpr0521_get_gain(data, chan->address, val, val2);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rpr0521_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct rpr0521_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ mutex_lock(&data->lock);
+ ret = rpr0521_set_gain(data, chan->address, val, val2);
+ mutex_unlock(&data->lock);
+
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info rpr0521_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = rpr0521_read_raw,
+ .write_raw = rpr0521_write_raw,
+ .attrs = &rpr0521_attribute_group,
+};
+
+static int rpr0521_init(struct rpr0521_data *data)
+{
+ int ret;
+ int id;
+
+ ret = regmap_read(data->regmap, RPR0521_REG_ID, &id);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Failed to read REG_ID register\n");
+ return ret;
+ }
+
+ if (id != RPR0521_MANUFACT_ID) {
+ dev_err(&data->client->dev, "Wrong id, got %x, expected %x\n",
+ id, RPR0521_MANUFACT_ID);
+ return -ENODEV;
+ }
+
+ /* set default measurement time - 100 ms for both ALS and PS */
+ ret = regmap_update_bits(data->regmap, RPR0521_REG_MODE_CTRL,
+ RPR0521_MODE_MEAS_TIME_MASK,
+ RPR0521_DEFAULT_MEAS_TIME);
+ if (ret) {
+ pr_err("regmap_update_bits returned %d\n", ret);
+ return ret;
+ }
+
+ ret = rpr0521_als_enable(data, RPR0521_MODE_ALS_ENABLE);
+ if (ret < 0)
+ return ret;
+ ret = rpr0521_pxs_enable(data, RPR0521_MODE_PXS_ENABLE);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int rpr0521_poweroff(struct rpr0521_data *data)
+{
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, RPR0521_REG_MODE_CTRL,
+ RPR0521_MODE_ALS_MASK |
+ RPR0521_MODE_PXS_MASK,
+ RPR0521_MODE_ALS_DISABLE |
+ RPR0521_MODE_PXS_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ data->als_dev_en = false;
+ data->pxs_dev_en = false;
+
+ return 0;
+}
+
+static bool rpr0521_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RPR0521_REG_MODE_CTRL:
+ case RPR0521_REG_ALS_CTRL:
+ case RPR0521_REG_PXS_CTRL:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static const struct regmap_config rpr0521_regmap_config = {
+ .name = RPR0521_REGMAP_NAME,
+
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = RPR0521_REG_ID,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = rpr0521_is_volatile_reg,
+};
+
+static int rpr0521_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct rpr0521_data *data;
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ regmap = devm_regmap_init_i2c(client, &rpr0521_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "regmap_init failed!\n");
+ return PTR_ERR(regmap);
+ }
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ data->regmap = regmap;
+
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &rpr0521_info;
+ indio_dev->name = RPR0521_DRV_NAME;
+ indio_dev->channels = rpr0521_channels;
+ indio_dev->num_channels = ARRAY_SIZE(rpr0521_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = rpr0521_init(data);
+ if (ret < 0) {
+ dev_err(&client->dev, "rpr0521 chip init failed\n");
+ return ret;
+ }
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ ret = pm_runtime_set_active(&client->dev);
+ if (ret < 0)
+ goto err_iio_unregister;
+
+ pm_runtime_enable(&client->dev);
+ pm_runtime_set_autosuspend_delay(&client->dev, RPR0521_SLEEP_DELAY_MS);
+ pm_runtime_use_autosuspend(&client->dev);
+
+ return 0;
+
+err_iio_unregister:
+ iio_device_unregister(indio_dev);
+ return ret;
+}
+
+static int rpr0521_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
+ iio_device_unregister(indio_dev);
+ rpr0521_poweroff(iio_priv(indio_dev));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int rpr0521_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct rpr0521_data *data = iio_priv(indio_dev);
+ int ret;
+
+ /* disable channels and sets {als,pxs}_dev_en to false */
+ mutex_lock(&data->lock);
+ ret = rpr0521_poweroff(data);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int rpr0521_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct rpr0521_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (data->als_ps_need_en) {
+ ret = rpr0521_als_enable(data, RPR0521_MODE_ALS_ENABLE);
+ if (ret < 0)
+ return ret;
+ data->als_ps_need_en = false;
+ }
+
+ if (data->pxs_ps_need_en) {
+ ret = rpr0521_pxs_enable(data, RPR0521_MODE_PXS_ENABLE);
+ if (ret < 0)
+ return ret;
+ data->pxs_ps_need_en = false;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops rpr0521_pm_ops = {
+ SET_RUNTIME_PM_OPS(rpr0521_runtime_suspend,
+ rpr0521_runtime_resume, NULL)
+};
+
+static const struct acpi_device_id rpr0521_acpi_match[] = {
+ {"RPR0521", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, rpr0521_acpi_match);
+
+static const struct i2c_device_id rpr0521_id[] = {
+ {"rpr0521", 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, rpr0521_id);
+
+static struct i2c_driver rpr0521_driver = {
+ .driver = {
+ .name = RPR0521_DRV_NAME,
+ .pm = &rpr0521_pm_ops,
+ .acpi_match_table = ACPI_PTR(rpr0521_acpi_match),
+ },
+ .probe = rpr0521_probe,
+ .remove = rpr0521_remove,
+ .id_table = rpr0521_id,
+};
+
+module_i2c_driver(rpr0521_driver);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com>");
+MODULE_DESCRIPTION("RPR0521 ROHM Ambient Light and Proximity Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index fee4297d7c8f..993eb201148e 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -43,7 +43,6 @@
#define STK3311_CHIP_ID_VAL 0x1D
#define STK3310_PSINT_EN 0x01
#define STK3310_PS_MAX_VAL 0xFFFF
-#define STK3310_THRESH_MAX 0xFFFF
#define STK3310_DRIVER_NAME "stk3310"
#define STK3310_REGMAP_NAME "stk3310_regmap"
@@ -84,15 +83,13 @@ static const struct reg_field stk3310_reg_field_flag_psint =
REG_FIELD(STK3310_REG_FLAG, 4, 4);
static const struct reg_field stk3310_reg_field_flag_nf =
REG_FIELD(STK3310_REG_FLAG, 0, 0);
-/*
- * Maximum PS values with regard to scale. Used to export the 'inverse'
- * PS value (high values for far objects, low values for near objects).
- */
+
+/* Estimate maximum proximity values with regard to measurement scale. */
static const int stk3310_ps_max[4] = {
- STK3310_PS_MAX_VAL / 64,
- STK3310_PS_MAX_VAL / 16,
- STK3310_PS_MAX_VAL / 4,
- STK3310_PS_MAX_VAL,
+ STK3310_PS_MAX_VAL / 640,
+ STK3310_PS_MAX_VAL / 160,
+ STK3310_PS_MAX_VAL / 40,
+ STK3310_PS_MAX_VAL / 10
};
static const int stk3310_scale_table[][2] = {
@@ -128,14 +125,14 @@ static const struct iio_event_spec stk3310_events[] = {
/* Proximity event */
{
.type = IIO_EV_TYPE_THRESH,
- .dir = IIO_EV_DIR_FALLING,
+ .dir = IIO_EV_DIR_RISING,
.mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE),
},
/* Out-of-proximity event */
{
.type = IIO_EV_TYPE_THRESH,
- .dir = IIO_EV_DIR_RISING,
+ .dir = IIO_EV_DIR_FALLING,
.mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE),
},
@@ -203,25 +200,18 @@ static int stk3310_read_event(struct iio_dev *indio_dev,
int *val, int *val2)
{
u8 reg;
- u16 buf;
+ __be16 buf;
int ret;
- unsigned int index;
struct stk3310_data *data = iio_priv(indio_dev);
if (info != IIO_EV_INFO_VALUE)
return -EINVAL;
- /*
- * Only proximity interrupts are implemented at the moment.
- * Since we're inverting proximity values, the sensor's 'high'
- * threshold will become our 'low' threshold, associated with
- * 'near' events. Similarly, the sensor's 'low' threshold will
- * be our 'high' threshold, associated with 'far' events.
- */
+ /* Only proximity interrupts are implemented at the moment. */
if (dir == IIO_EV_DIR_RISING)
- reg = STK3310_REG_THDL_PS;
- else if (dir == IIO_EV_DIR_FALLING)
reg = STK3310_REG_THDH_PS;
+ else if (dir == IIO_EV_DIR_FALLING)
+ reg = STK3310_REG_THDL_PS;
else
return -EINVAL;
@@ -232,8 +222,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev,
dev_err(&data->client->dev, "register read failed\n");
return ret;
}
- regmap_field_read(data->reg_ps_gain, &index);
- *val = swab16(stk3310_ps_max[index] - buf);
+ *val = be16_to_cpu(buf);
return IIO_VAL_INT;
}
@@ -246,7 +235,7 @@ static int stk3310_write_event(struct iio_dev *indio_dev,
int val, int val2)
{
u8 reg;
- u16 buf;
+ __be16 buf;
int ret;
unsigned int index;
struct stk3310_data *data = iio_priv(indio_dev);
@@ -257,13 +246,13 @@ static int stk3310_write_event(struct iio_dev *indio_dev,
return -EINVAL;
if (dir == IIO_EV_DIR_RISING)
- reg = STK3310_REG_THDL_PS;
- else if (dir == IIO_EV_DIR_FALLING)
reg = STK3310_REG_THDH_PS;
+ else if (dir == IIO_EV_DIR_FALLING)
+ reg = STK3310_REG_THDL_PS;
else
return -EINVAL;
- buf = swab16(stk3310_ps_max[index] - val);
+ buf = cpu_to_be16(val);
ret = regmap_bulk_write(data->regmap, reg, &buf, 2);
if (ret < 0)
dev_err(&client->dev, "failed to set PS threshold!\n");
@@ -312,7 +301,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
u8 reg;
- u16 buf;
+ __be16 buf;
int ret;
unsigned int index;
struct stk3310_data *data = iio_priv(indio_dev);
@@ -333,15 +322,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->lock);
return ret;
}
- *val = swab16(buf);
- if (chan->type == IIO_PROXIMITY) {
- /*
- * Invert the proximity data so we return low values
- * for close objects and high values for far ones.
- */
- regmap_field_read(data->reg_ps_gain, &index);
- *val = stk3310_ps_max[index] - *val;
- }
+ *val = be16_to_cpu(buf);
mutex_unlock(&data->lock);
return IIO_VAL_INT;
case IIO_CHAN_INFO_INT_TIME:
@@ -488,16 +469,12 @@ static int stk3310_gpio_probe(struct i2c_client *client)
dev = &client->dev;
/* gpio interrupt pin */
- gpio = devm_gpiod_get_index(dev, STK3310_GPIO, 0);
+ gpio = devm_gpiod_get_index(dev, STK3310_GPIO, 0, GPIOD_IN);
if (IS_ERR(gpio)) {
dev_err(dev, "acpi gpio get index failed\n");
return PTR_ERR(gpio);
}
- ret = gpiod_direction_input(gpio);
- if (ret)
- return ret;
-
ret = gpiod_to_irq(gpio);
dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
@@ -581,8 +558,8 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
}
event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1,
IIO_EV_TYPE_THRESH,
- (dir ? IIO_EV_DIR_RISING :
- IIO_EV_DIR_FALLING));
+ (dir ? IIO_EV_DIR_FALLING :
+ IIO_EV_DIR_RISING));
iio_push_event(indio_dev, event, data->timestamp);
/* Reset the interrupt flag */
@@ -627,13 +604,7 @@ static int stk3310_probe(struct i2c_client *client,
if (ret < 0)
return ret;
- ret = iio_device_register(indio_dev);
- if (ret < 0) {
- dev_err(&client->dev, "device_register failed\n");
- stk3310_set_state(data, STK3310_STATE_STANDBY);
- }
-
- if (client->irq <= 0)
+ if (client->irq < 0)
client->irq = stk3310_gpio_probe(client);
if (client->irq >= 0) {
@@ -648,6 +619,12 @@ static int stk3310_probe(struct i2c_client *client,
client->irq);
}
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "device_register failed\n");
+ stk3310_set_state(data, STK3310_STATE_STANDBY);
+ }
+
return ret;
}
@@ -695,6 +672,7 @@ static const struct i2c_device_id stk3310_i2c_id[] = {
{"STK3311", 0},
{}
};
+MODULE_DEVICE_TABLE(i2c, stk3310_i2c_id);
static const struct acpi_device_id stk3310_acpi_id[] = {
{"STK3310", 0},
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index 71c2bde275aa..f90f8c5919fe 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -185,7 +185,7 @@ static int tcs3414_write_raw(struct iio_dev *indio_dev,
if (val != 0)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(tcs3414_times); i++) {
- if (val == tcs3414_times[i] * 1000) {
+ if (val2 == tcs3414_times[i] * 1000) {
data->timing &= ~TCS3414_INTEG_MASK;
data->timing |= i;
return i2c_smbus_write_byte_data(
@@ -392,7 +392,6 @@ static struct i2c_driver tcs3414_driver = {
.driver = {
.name = TCS3414_DRV_NAME,
.pm = &tcs3414_pm_ops,
- .owner = THIS_MODULE,
},
.probe = tcs3414_probe,
.remove = tcs3414_remove,
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 752569985d1d..1b530bf04c89 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -366,7 +366,6 @@ static struct i2c_driver tcs3472_driver = {
.driver = {
.name = TCS3472_DRV_NAME,
.pm = &tcs3472_pm_ops,
- .owner = THIS_MODULE,
},
.probe = tcs3472_probe,
.remove = tcs3472_remove,
diff --git a/drivers/iio/light/tsl4531.c b/drivers/iio/light/tsl4531.c
index 63c26e2d5d97..26979183d27c 100644
--- a/drivers/iio/light/tsl4531.c
+++ b/drivers/iio/light/tsl4531.c
@@ -247,7 +247,6 @@ static struct i2c_driver tsl4531_driver = {
.driver = {
.name = TSL4531_DRV_NAME,
.pm = TSL4531_PM_OPS,
- .owner = THIS_MODULE,
},
.probe = tsl4531_probe,
.remove = tsl4531_remove,
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index d948c4778ba6..c9d85bbc9230 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -185,7 +185,6 @@ static int vcnl4000_probe(struct i2c_client *client,
static struct i2c_driver vcnl4000_driver = {
.driver = {
.name = VCNL4000_DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = vcnl4000_probe,
.id_table = vcnl4000_id,
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index dcadfc4f0661..efb9350b0d76 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -90,6 +90,7 @@ config IIO_ST_MAGN_SPI_3AXIS
config BMC150_MAGN
tristate "Bosch BMC150 Magnetometer Driver"
depends on I2C
+ select REGMAP_I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index d4c178869991..d8e614ca069f 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -85,6 +85,7 @@
#define BMC150_MAGN_REG_HIGH_THRESH 0x50
#define BMC150_MAGN_REG_REP_XY 0x51
#define BMC150_MAGN_REG_REP_Z 0x52
+#define BMC150_MAGN_REG_REP_DATAMASK GENMASK(7, 0)
#define BMC150_MAGN_REG_TRIM_START 0x5D
#define BMC150_MAGN_REG_TRIM_END 0x71
@@ -559,7 +560,7 @@ static int bmc150_magn_write_raw(struct iio_dev *indio_dev,
}
ret = regmap_update_bits(data->regmap,
BMC150_MAGN_REG_REP_XY,
- 0xFF,
+ BMC150_MAGN_REG_REP_DATAMASK,
BMC150_MAGN_REPXY_TO_REGVAL
(val));
mutex_unlock(&data->mutex);
@@ -575,7 +576,7 @@ static int bmc150_magn_write_raw(struct iio_dev *indio_dev,
}
ret = regmap_update_bits(data->regmap,
BMC150_MAGN_REG_REP_Z,
- 0xFF,
+ BMC150_MAGN_REG_REP_DATAMASK,
BMC150_MAGN_REPZ_TO_REGVAL
(val));
mutex_unlock(&data->mutex);
@@ -588,17 +589,6 @@ static int bmc150_magn_write_raw(struct iio_dev *indio_dev,
}
}
-static int bmc150_magn_validate_trigger(struct iio_dev *indio_dev,
- struct iio_trigger *trig)
-{
- struct bmc150_magn_data *data = iio_priv(indio_dev);
-
- if (data->dready_trig != trig)
- return -EINVAL;
-
- return 0;
-}
-
static ssize_t bmc150_magn_show_samp_freq_avail(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -659,11 +649,12 @@ static const struct iio_info bmc150_magn_info = {
.attrs = &bmc150_magn_attrs_group,
.read_raw = bmc150_magn_read_raw,
.write_raw = bmc150_magn_write_raw,
- .validate_trigger = bmc150_magn_validate_trigger,
.driver_module = THIS_MODULE,
};
-static const unsigned long bmc150_magn_scan_masks[] = {0x07, 0};
+static const unsigned long bmc150_magn_scan_masks[] = {
+ BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z),
+ 0};
static irqreturn_t bmc150_magn_trigger_handler(int irq, void *p)
{
@@ -674,7 +665,6 @@ static irqreturn_t bmc150_magn_trigger_handler(int irq, void *p)
mutex_lock(&data->mutex);
ret = bmc150_magn_read_xyz(data, data->buffer);
- mutex_unlock(&data->mutex);
if (ret < 0)
goto err;
@@ -682,7 +672,8 @@ static irqreturn_t bmc150_magn_trigger_handler(int irq, void *p)
pf->timestamp);
err:
- iio_trigger_notify_done(data->dready_trig);
+ mutex_unlock(&data->mutex);
+ iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
}
@@ -706,11 +697,11 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
goto err_poweroff;
}
if (chip_id != BMC150_MAGN_CHIP_ID_VAL) {
- dev_err(&data->client->dev, "Invalid chip id 0x%x\n", ret);
+ dev_err(&data->client->dev, "Invalid chip id 0x%x\n", chip_id);
ret = -ENODEV;
goto err_poweroff;
}
- dev_dbg(&data->client->dev, "Chip id %x\n", ret);
+ dev_dbg(&data->client->dev, "Chip id %x\n", chip_id);
preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET];
ret = bmc150_magn_set_odr(data, preset.odr);
@@ -793,29 +784,23 @@ static int bmc150_magn_data_rdy_trigger_set_state(struct iio_trigger *trig,
if (state == data->dready_trigger_on)
goto err_unlock;
- ret = bmc150_magn_set_power_state(data, state);
- if (ret < 0)
- goto err_unlock;
-
ret = regmap_update_bits(data->regmap, BMC150_MAGN_REG_INT_DRDY,
BMC150_MAGN_MASK_DRDY_EN,
state << BMC150_MAGN_SHIFT_DRDY_EN);
if (ret < 0)
- goto err_poweroff;
+ goto err_unlock;
data->dready_trigger_on = state;
if (state) {
ret = bmc150_magn_reset_intr(data);
if (ret < 0)
- goto err_poweroff;
+ goto err_unlock;
}
mutex_unlock(&data->mutex);
return 0;
-err_poweroff:
- bmc150_magn_set_power_state(data, false);
err_unlock:
mutex_unlock(&data->mutex);
return ret;
@@ -827,6 +812,27 @@ static const struct iio_trigger_ops bmc150_magn_trigger_ops = {
.owner = THIS_MODULE,
};
+static int bmc150_magn_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct bmc150_magn_data *data = iio_priv(indio_dev);
+
+ return bmc150_magn_set_power_state(data, true);
+}
+
+static int bmc150_magn_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct bmc150_magn_data *data = iio_priv(indio_dev);
+
+ return bmc150_magn_set_power_state(data, false);
+}
+
+static const struct iio_buffer_setup_ops bmc150_magn_buffer_setup_ops = {
+ .preenable = bmc150_magn_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = iio_triggered_buffer_predisable,
+ .postdisable = bmc150_magn_buffer_postdisable,
+};
+
static int bmc150_magn_gpio_probe(struct i2c_client *client)
{
struct device *dev;
@@ -839,16 +845,12 @@ static int bmc150_magn_gpio_probe(struct i2c_client *client)
dev = &client->dev;
/* data ready GPIO interrupt pin */
- gpio = devm_gpiod_get_index(dev, BMC150_MAGN_GPIO_INT, 0);
+ gpio = devm_gpiod_get_index(dev, BMC150_MAGN_GPIO_INT, 0, GPIOD_IN);
if (IS_ERR(gpio)) {
dev_err(dev, "ACPI GPIO get index failed\n");
return PTR_ERR(gpio);
}
- ret = gpiod_direction_input(gpio);
- if (ret)
- return ret;
-
ret = gpiod_to_irq(gpio);
dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
@@ -932,16 +934,6 @@ static int bmc150_magn_probe(struct i2c_client *client,
goto err_poweroff;
}
- ret = iio_triggered_buffer_setup(indio_dev,
- &iio_pollfunc_store_time,
- bmc150_magn_trigger_handler,
- NULL);
- if (ret < 0) {
- dev_err(&client->dev,
- "iio triggered buffer setup failed\n");
- goto err_trigger_unregister;
- }
-
ret = request_threaded_irq(client->irq,
iio_trigger_generic_data_rdy_poll,
NULL,
@@ -951,14 +943,24 @@ static int bmc150_magn_probe(struct i2c_client *client,
if (ret < 0) {
dev_err(&client->dev, "request irq %d failed\n",
client->irq);
- goto err_buffer_cleanup;
+ goto err_trigger_unregister;
}
}
+ ret = iio_triggered_buffer_setup(indio_dev,
+ iio_pollfunc_store_time,
+ bmc150_magn_trigger_handler,
+ &bmc150_magn_buffer_setup_ops);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "iio triggered buffer setup failed\n");
+ goto err_free_irq;
+ }
+
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev, "unable to register iio device\n");
- goto err_free_irq;
+ goto err_buffer_cleanup;
}
ret = pm_runtime_set_active(&client->dev);
@@ -976,12 +978,11 @@ static int bmc150_magn_probe(struct i2c_client *client,
err_iio_unregister:
iio_device_unregister(indio_dev);
+err_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
err_free_irq:
if (client->irq > 0)
free_irq(client->irq, data->dready_trig);
-err_buffer_cleanup:
- if (data->dready_trig)
- iio_triggered_buffer_cleanup(indio_dev);
err_trigger_unregister:
if (data->dready_trig)
iio_trigger_unregister(data->dready_trig);
@@ -1000,14 +1001,13 @@ static int bmc150_magn_remove(struct i2c_client *client)
pm_runtime_put_noidle(&client->dev);
iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
if (client->irq > 0)
free_irq(data->client->irq, data->dready_trig);
- if (data->dready_trig) {
- iio_triggered_buffer_cleanup(indio_dev);
+ if (data->dready_trig)
iio_trigger_unregister(data->dready_trig);
- }
mutex_lock(&data->mutex);
bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND, true);
@@ -1034,6 +1034,9 @@ static int bmc150_magn_runtime_suspend(struct device *dev)
return 0;
}
+/*
+ * Should be called with data->mutex held.
+ */
static int bmc150_magn_runtime_resume(struct device *dev)
{
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
@@ -1082,12 +1085,14 @@ static const struct dev_pm_ops bmc150_magn_pm_ops = {
static const struct acpi_device_id bmc150_magn_acpi_match[] = {
{"BMC150B", 0},
+ {"BMC156B", 0},
{},
};
MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
static const struct i2c_device_id bmc150_magn_id[] = {
{"bmc150_magn", 0},
+ {"bmc156_magn", 0},
{},
};
MODULE_DEVICE_TABLE(i2c, bmc150_magn_id);
diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c
index 7a2ea71c659a..176e14a61558 100644
--- a/drivers/iio/magnetometer/mmc35240.c
+++ b/drivers/iio/magnetometer/mmc35240.c
@@ -84,10 +84,10 @@
#define MMC35240_OTP_START_ADDR 0x1B
enum mmc35240_resolution {
- MMC35240_16_BITS_SLOW = 0, /* 100 Hz */
- MMC35240_16_BITS_FAST, /* 200 Hz */
- MMC35240_14_BITS, /* 333 Hz */
- MMC35240_12_BITS, /* 666 Hz */
+ MMC35240_16_BITS_SLOW = 0, /* 7.92 ms */
+ MMC35240_16_BITS_FAST, /* 4.08 ms */
+ MMC35240_14_BITS, /* 2.16 ms */
+ MMC35240_12_BITS, /* 1.20 ms */
};
enum mmc35240_axis {
@@ -100,22 +100,22 @@ static const struct {
int sens[3]; /* sensitivity per X, Y, Z axis */
int nfo; /* null field output */
} mmc35240_props_table[] = {
- /* 16 bits, 100Hz ODR */
+ /* 16 bits, 125Hz ODR */
{
{1024, 1024, 1024},
32768,
},
- /* 16 bits, 200Hz ODR */
+ /* 16 bits, 250Hz ODR */
{
{1024, 1024, 770},
32768,
},
- /* 14 bits, 333Hz ODR */
+ /* 14 bits, 450Hz ODR */
{
{256, 256, 193},
8192,
},
- /* 12 bits, 666Hz ODR */
+ /* 12 bits, 800Hz ODR */
{
{64, 64, 48},
2048,
@@ -133,9 +133,15 @@ struct mmc35240_data {
int axis_scale[3];
};
-static const int mmc35240_samp_freq[] = {100, 200, 333, 666};
+static const struct {
+ int val;
+ int val2;
+} mmc35240_samp_freq[] = { {1, 500000},
+ {13, 0},
+ {25, 0},
+ {50, 0} };
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("100 200 333 666");
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1.5 13 25 50");
#define MMC35240_CHANNEL(_axis) { \
.type = IIO_MAGN, \
@@ -168,7 +174,8 @@ static int mmc35240_get_samp_freq_index(struct mmc35240_data *data,
int i;
for (i = 0; i < ARRAY_SIZE(mmc35240_samp_freq); i++)
- if (mmc35240_samp_freq[i] == val)
+ if (mmc35240_samp_freq[i].val == val &&
+ mmc35240_samp_freq[i].val2 == val2)
return i;
return -EINVAL;
}
@@ -195,8 +202,8 @@ static int mmc35240_hw_set(struct mmc35240_data *data, bool set)
coil_bit = MMC35240_CTRL0_RESET_BIT;
return regmap_update_bits(data->regmap, MMC35240_REG_CTRL0,
- MMC35240_CTRL0_REFILL_BIT,
- coil_bit);
+ coil_bit, coil_bit);
+
}
static int mmc35240_init(struct mmc35240_data *data)
@@ -215,14 +222,15 @@ static int mmc35240_init(struct mmc35240_data *data)
/*
* make sure we restore sensor characteristics, by doing
- * a RESET/SET sequence
+ * a SET/RESET sequence, the axis polarity being naturally
+ * aligned after RESET
*/
- ret = mmc35240_hw_set(data, false);
+ ret = mmc35240_hw_set(data, true);
if (ret < 0)
return ret;
usleep_range(MMC53240_WAIT_SET_RESET, MMC53240_WAIT_SET_RESET + 1);
- ret = mmc35240_hw_set(data, true);
+ ret = mmc35240_hw_set(data, false);
if (ret < 0)
return ret;
@@ -308,31 +316,31 @@ static int mmc35240_read_measurement(struct mmc35240_data *data, __le16 buf[3])
static int mmc35240_raw_to_mgauss(struct mmc35240_data *data, int index,
__le16 buf[], int *val)
{
- int raw_x, raw_y, raw_z;
- int sens_x, sens_y, sens_z;
+ int raw[3];
+ int sens[3];
int nfo;
- raw_x = le16_to_cpu(buf[AXIS_X]);
- raw_y = le16_to_cpu(buf[AXIS_Y]);
- raw_z = le16_to_cpu(buf[AXIS_Z]);
+ raw[AXIS_X] = le16_to_cpu(buf[AXIS_X]);
+ raw[AXIS_Y] = le16_to_cpu(buf[AXIS_Y]);
+ raw[AXIS_Z] = le16_to_cpu(buf[AXIS_Z]);
- sens_x = mmc35240_props_table[data->res].sens[AXIS_X];
- sens_y = mmc35240_props_table[data->res].sens[AXIS_Y];
- sens_z = mmc35240_props_table[data->res].sens[AXIS_Z];
+ sens[AXIS_X] = mmc35240_props_table[data->res].sens[AXIS_X];
+ sens[AXIS_Y] = mmc35240_props_table[data->res].sens[AXIS_Y];
+ sens[AXIS_Z] = mmc35240_props_table[data->res].sens[AXIS_Z];
nfo = mmc35240_props_table[data->res].nfo;
switch (index) {
case AXIS_X:
- *val = (raw_x - nfo) * 1000 / sens_x;
+ *val = (raw[AXIS_X] - nfo) * 1000 / sens[AXIS_X];
break;
case AXIS_Y:
- *val = (raw_y - nfo) * 1000 / sens_y -
- (raw_z - nfo) * 1000 / sens_z;
+ *val = (raw[AXIS_Y] - nfo) * 1000 / sens[AXIS_Y] -
+ (raw[AXIS_Z] - nfo) * 1000 / sens[AXIS_Z];
break;
case AXIS_Z:
- *val = (raw_y - nfo) * 1000 / sens_y +
- (raw_z - nfo) * 1000 / sens_z;
+ *val = (raw[AXIS_Y] - nfo) * 1000 / sens[AXIS_Y] +
+ (raw[AXIS_Z] - nfo) * 1000 / sens[AXIS_Z];
break;
default:
return -EINVAL;
@@ -378,9 +386,9 @@ static int mmc35240_read_raw(struct iio_dev *indio_dev,
if (i < 0 || i >= ARRAY_SIZE(mmc35240_samp_freq))
return -EINVAL;
- *val = mmc35240_samp_freq[i];
- *val2 = 0;
- return IIO_VAL_INT;
+ *val = mmc35240_samp_freq[i].val;
+ *val2 = mmc35240_samp_freq[i].val2;
+ return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
@@ -496,6 +504,7 @@ static int mmc35240_probe(struct i2c_client *client,
}
data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
data->client = client;
data->regmap = regmap;
data->res = MMC35240_16_BITS_SLOW;
@@ -550,6 +559,12 @@ static const struct dev_pm_ops mmc35240_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mmc35240_suspend, mmc35240_resume)
};
+static const struct of_device_id mmc35240_of_match[] = {
+ { .compatible = "memsic,mmc35240", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mmc35240_of_match);
+
static const struct acpi_device_id mmc35240_acpi_match[] = {
{"MMC35240", 0},
{ },
@@ -565,6 +580,7 @@ MODULE_DEVICE_TABLE(i2c, mmc35240_id);
static struct i2c_driver mmc35240_driver = {
.driver = {
.name = MMC35240_DRV_NAME,
+ .of_match_table = mmc35240_of_match,
.pm = &mmc35240_pm_ops,
.acpi_match_table = ACPI_PTR(mmc35240_acpi_match),
},
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
index 287691ca56c1..06a4d9c35581 100644
--- a/drivers/iio/magnetometer/st_magn.h
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -18,6 +18,7 @@
#define LSM303DLHC_MAGN_DEV_NAME "lsm303dlhc_magn"
#define LSM303DLM_MAGN_DEV_NAME "lsm303dlm_magn"
#define LIS3MDL_MAGN_DEV_NAME "lis3mdl"
+#define LSM303AGR_MAGN_DEV_NAME "lsm303agr_magn"
int st_magn_common_probe(struct iio_dev *indio_dev);
void st_magn_common_remove(struct iio_dev *indio_dev);
@@ -25,6 +26,8 @@ void st_magn_common_remove(struct iio_dev *indio_dev);
#ifdef CONFIG_IIO_BUFFER
int st_magn_allocate_ring(struct iio_dev *indio_dev);
void st_magn_deallocate_ring(struct iio_dev *indio_dev);
+int st_magn_trig_set_state(struct iio_trigger *trig, bool state);
+#define ST_MAGN_TRIGGER_SET_STATE (&st_magn_trig_set_state)
#else /* CONFIG_IIO_BUFFER */
static inline int st_magn_probe_trigger(struct iio_dev *indio_dev, int irq)
{
diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c
index bf427dc0d226..ecd3bd0a9769 100644
--- a/drivers/iio/magnetometer/st_magn_buffer.c
+++ b/drivers/iio/magnetometer/st_magn_buffer.c
@@ -23,6 +23,13 @@
#include <linux/iio/common/st_sensors.h>
#include "st_magn.h"
+int st_magn_trig_set_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+
+ return st_sensors_set_dataready_irq(indio_dev, state);
+}
+
static int st_magn_buffer_preenable(struct iio_dev *indio_dev)
{
return st_sensors_set_enable(indio_dev, true);
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index b4bcfb790f49..f8dc4b85d70c 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -43,6 +43,7 @@
#define ST_MAGN_FS_AVL_8000MG 8000
#define ST_MAGN_FS_AVL_8100MG 8100
#define ST_MAGN_FS_AVL_12000MG 12000
+#define ST_MAGN_FS_AVL_15000MG 15000
#define ST_MAGN_FS_AVL_16000MG 16000
/* CUSTOM VALUES FOR SENSOR 0 */
@@ -157,6 +158,29 @@
#define ST_MAGN_2_OUT_Y_L_ADDR 0x2a
#define ST_MAGN_2_OUT_Z_L_ADDR 0x2c
+/* CUSTOM VALUES FOR SENSOR 3 */
+#define ST_MAGN_3_WAI_ADDR 0x4f
+#define ST_MAGN_3_WAI_EXP 0x40
+#define ST_MAGN_3_ODR_ADDR 0x60
+#define ST_MAGN_3_ODR_MASK 0x0c
+#define ST_MAGN_3_ODR_AVL_10HZ_VAL 0x00
+#define ST_MAGN_3_ODR_AVL_20HZ_VAL 0x01
+#define ST_MAGN_3_ODR_AVL_50HZ_VAL 0x02
+#define ST_MAGN_3_ODR_AVL_100HZ_VAL 0x03
+#define ST_MAGN_3_PW_ADDR 0x60
+#define ST_MAGN_3_PW_MASK 0x03
+#define ST_MAGN_3_PW_ON 0x00
+#define ST_MAGN_3_PW_OFF 0x03
+#define ST_MAGN_3_BDU_ADDR 0x62
+#define ST_MAGN_3_BDU_MASK 0x10
+#define ST_MAGN_3_DRDY_IRQ_ADDR 0x62
+#define ST_MAGN_3_DRDY_INT_MASK 0x01
+#define ST_MAGN_3_FS_AVL_15000_GAIN 1500
+#define ST_MAGN_3_MULTIREAD_BIT false
+#define ST_MAGN_3_OUT_X_L_ADDR 0x68
+#define ST_MAGN_3_OUT_Y_L_ADDR 0x6a
+#define ST_MAGN_3_OUT_Z_L_ADDR 0x6c
+
static const struct iio_chan_spec st_magn_16bit_channels[] = {
ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
@@ -189,9 +213,26 @@ static const struct iio_chan_spec st_magn_2_16bit_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(3)
};
+static const struct iio_chan_spec st_magn_3_16bit_channels[] = {
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_LE, 16, 16,
+ ST_MAGN_3_OUT_X_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_LE, 16, 16,
+ ST_MAGN_3_OUT_Y_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_LE, 16, 16,
+ ST_MAGN_3_OUT_Z_L_ADDR),
+ IIO_CHAN_SOFT_TIMESTAMP(3)
+};
+
static const struct st_sensor_settings st_magn_sensors_settings[] = {
{
.wai = 0, /* This sensor has no valid WhoAmI report 0 */
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LSM303DLH_MAGN_DEV_NAME,
},
@@ -268,6 +309,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
},
{
.wai = ST_MAGN_1_WAI_EXP,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LSM303DLHC_MAGN_DEV_NAME,
[1] = LSM303DLM_MAGN_DEV_NAME,
@@ -346,6 +388,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
},
{
.wai = ST_MAGN_2_WAI_EXP,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS3MDL_MAGN_DEV_NAME,
},
@@ -399,6 +442,48 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
.multi_read_bit = ST_MAGN_2_MULTIREAD_BIT,
.bootime = 2,
},
+ {
+ .wai = ST_MAGN_3_WAI_EXP,
+ .wai_addr = ST_MAGN_3_WAI_ADDR,
+ .sensors_supported = {
+ [0] = LSM303AGR_MAGN_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_magn_3_16bit_channels,
+ .odr = {
+ .addr = ST_MAGN_3_ODR_ADDR,
+ .mask = ST_MAGN_3_ODR_MASK,
+ .odr_avl = {
+ { 10, ST_MAGN_3_ODR_AVL_10HZ_VAL, },
+ { 20, ST_MAGN_3_ODR_AVL_20HZ_VAL, },
+ { 50, ST_MAGN_3_ODR_AVL_50HZ_VAL, },
+ { 100, ST_MAGN_3_ODR_AVL_100HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_MAGN_3_PW_ADDR,
+ .mask = ST_MAGN_3_PW_MASK,
+ .value_on = ST_MAGN_3_PW_ON,
+ .value_off = ST_MAGN_3_PW_OFF,
+ },
+ .fs = {
+ .fs_avl = {
+ [0] = {
+ .num = ST_MAGN_FS_AVL_15000MG,
+ .gain = ST_MAGN_3_FS_AVL_15000_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_MAGN_3_BDU_ADDR,
+ .mask = ST_MAGN_3_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_MAGN_3_DRDY_IRQ_ADDR,
+ .mask_int1 = ST_MAGN_3_DRDY_INT_MASK,
+ },
+ .multi_read_bit = ST_MAGN_3_MULTIREAD_BIT,
+ .bootime = 2,
+ },
};
static int st_magn_read_raw(struct iio_dev *indio_dev,
@@ -477,6 +562,16 @@ static const struct iio_info magn_info = {
.write_raw = &st_magn_write_raw,
};
+#ifdef CONFIG_IIO_TRIGGER
+static const struct iio_trigger_ops st_magn_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = ST_MAGN_TRIGGER_SET_STATE,
+};
+#define ST_MAGN_TRIGGER_OPS (&st_magn_trigger_ops)
+#else
+#define ST_MAGN_TRIGGER_OPS NULL
+#endif
+
int st_magn_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *mdata = iio_priv(indio_dev);
@@ -513,7 +608,8 @@ int st_magn_common_probe(struct iio_dev *indio_dev)
return err;
if (irq > 0) {
- err = st_sensors_allocate_trigger(indio_dev, NULL);
+ err = st_sensors_allocate_trigger(indio_dev,
+ ST_MAGN_TRIGGER_OPS);
if (err < 0)
goto st_magn_probe_trigger_error;
}
diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c
index 5311d8aea8cc..8aa37af306ed 100644
--- a/drivers/iio/magnetometer/st_magn_i2c.c
+++ b/drivers/iio/magnetometer/st_magn_i2c.c
@@ -36,6 +36,10 @@ static const struct of_device_id st_magn_of_match[] = {
.compatible = "st,lis3mdl-magn",
.data = LIS3MDL_MAGN_DEV_NAME,
},
+ {
+ .compatible = "st,lsm303agr-magn",
+ .data = LSM303AGR_MAGN_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_magn_of_match);
@@ -79,13 +83,13 @@ static const struct i2c_device_id st_magn_id_table[] = {
{ LSM303DLHC_MAGN_DEV_NAME },
{ LSM303DLM_MAGN_DEV_NAME },
{ LIS3MDL_MAGN_DEV_NAME },
+ { LSM303AGR_MAGN_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(i2c, st_magn_id_table);
static struct i2c_driver st_magn_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "st-magn-i2c",
.of_match_table = of_match_ptr(st_magn_of_match),
},
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index 7adacf160146..0abca2c6afa6 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -51,6 +51,7 @@ static const struct spi_device_id st_magn_id_table[] = {
{ LSM303DLHC_MAGN_DEV_NAME },
{ LSM303DLM_MAGN_DEV_NAME },
{ LIS3MDL_MAGN_DEV_NAME },
+ { LSM303AGR_MAGN_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_magn_id_table);
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index fa6295041947..4745179ff64b 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -53,10 +53,10 @@ config MPL3115
will be called mpl3115.
config MS5611
- tristate "Measurement Specialities MS5611 pressure sensor driver"
+ tristate "Measurement Specialties MS5611 pressure sensor driver"
help
- Say Y here to build support for the Measurement Specialities
- MS5611 pressure and temperature sensor.
+ Say Y here to build support for the Measurement Specialties
+ MS5611, MS5607 pressure and temperature sensors.
To compile this driver as a module, choose M here: the module will
be called ms5611_core.
diff --git a/drivers/iio/pressure/ms5611.h b/drivers/iio/pressure/ms5611.h
index 099c6cdea43f..23b93c797dba 100644
--- a/drivers/iio/pressure/ms5611.h
+++ b/drivers/iio/pressure/ms5611.h
@@ -27,6 +27,18 @@
#define MS5611_PROM_WORDS_NB 8
+enum {
+ MS5611,
+ MS5607,
+};
+
+struct ms5611_chip_info {
+ u16 prom[MS5611_PROM_WORDS_NB];
+
+ int (*temp_and_pressure_compensate)(struct ms5611_chip_info *chip_info,
+ s32 *temp, s32 *pressure);
+};
+
struct ms5611_state {
void *client;
struct mutex lock;
@@ -36,9 +48,9 @@ struct ms5611_state {
int (*read_adc_temp_and_pressure)(struct device *dev,
s32 *temp, s32 *pressure);
- u16 prom[MS5611_PROM_WORDS_NB];
+ struct ms5611_chip_info *chip_info;
};
-int ms5611_probe(struct iio_dev *indio_dev, struct device *dev);
+int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, int type);
#endif /* _MS5611_H */
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index e42c8531d9b3..2f3d9b4aca4e 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -9,6 +9,7 @@
*
* Data sheet:
* http://www.meas-spec.com/downloads/MS5611-01BA03.pdf
+ * http://www.meas-spec.com/downloads/MS5607-02BA03.pdf
*
*/
@@ -50,7 +51,8 @@ static int ms5611_read_prom(struct iio_dev *indio_dev)
struct ms5611_state *st = iio_priv(indio_dev);
for (i = 0; i < MS5611_PROM_WORDS_NB; i++) {
- ret = st->read_prom_word(&indio_dev->dev, i, &st->prom[i]);
+ ret = st->read_prom_word(&indio_dev->dev,
+ i, &st->chip_info->prom[i]);
if (ret < 0) {
dev_err(&indio_dev->dev,
"failed to read prom at %d\n", i);
@@ -58,7 +60,7 @@ static int ms5611_read_prom(struct iio_dev *indio_dev)
}
}
- if (!ms5611_prom_is_valid(st->prom, MS5611_PROM_WORDS_NB)) {
+ if (!ms5611_prom_is_valid(st->chip_info->prom, MS5611_PROM_WORDS_NB)) {
dev_err(&indio_dev->dev, "PROM integrity check failed\n");
return -ENODEV;
}
@@ -70,22 +72,30 @@ static int ms5611_read_temp_and_pressure(struct iio_dev *indio_dev,
s32 *temp, s32 *pressure)
{
int ret;
- s32 t, p;
- s64 off, sens, dt;
struct ms5611_state *st = iio_priv(indio_dev);
- ret = st->read_adc_temp_and_pressure(&indio_dev->dev, &t, &p);
+ ret = st->read_adc_temp_and_pressure(&indio_dev->dev, temp, pressure);
if (ret < 0) {
dev_err(&indio_dev->dev,
"failed to read temperature and pressure\n");
return ret;
}
- dt = t - (st->prom[5] << 8);
- off = ((s64)st->prom[2] << 16) + ((st->prom[4] * dt) >> 7);
- sens = ((s64)st->prom[1] << 15) + ((st->prom[3] * dt) >> 8);
+ return st->chip_info->temp_and_pressure_compensate(st->chip_info,
+ temp, pressure);
+}
+
+static int ms5611_temp_and_pressure_compensate(struct ms5611_chip_info *chip_info,
+ s32 *temp, s32 *pressure)
+{
+ s32 t = *temp, p = *pressure;
+ s64 off, sens, dt;
- t = 2000 + ((st->prom[6] * dt) >> 23);
+ dt = t - (chip_info->prom[5] << 8);
+ off = ((s64)chip_info->prom[2] << 16) + ((chip_info->prom[4] * dt) >> 7);
+ sens = ((s64)chip_info->prom[1] << 15) + ((chip_info->prom[3] * dt) >> 8);
+
+ t = 2000 + ((chip_info->prom[6] * dt) >> 23);
if (t < 2000) {
s64 off2, sens2, t2;
@@ -111,6 +121,42 @@ static int ms5611_read_temp_and_pressure(struct iio_dev *indio_dev,
return 0;
}
+static int ms5607_temp_and_pressure_compensate(struct ms5611_chip_info *chip_info,
+ s32 *temp, s32 *pressure)
+{
+ s32 t = *temp, p = *pressure;
+ s64 off, sens, dt;
+
+ dt = t - (chip_info->prom[5] << 8);
+ off = ((s64)chip_info->prom[2] << 17) + ((chip_info->prom[4] * dt) >> 6);
+ sens = ((s64)chip_info->prom[1] << 16) + ((chip_info->prom[3] * dt) >> 7);
+
+ t = 2000 + ((chip_info->prom[6] * dt) >> 23);
+ if (t < 2000) {
+ s64 off2, sens2, t2;
+
+ t2 = (dt * dt) >> 31;
+ off2 = (61 * (t - 2000) * (t - 2000)) >> 4;
+ sens2 = off2 << 1;
+
+ if (t < -1500) {
+ s64 tmp = (t + 1500) * (t + 1500);
+
+ off2 += 15 * tmp;
+ sens2 += (8 * tmp);
+ }
+
+ t -= t2;
+ off -= off2;
+ sens -= sens2;
+ }
+
+ *temp = t;
+ *pressure = (((p * sens) >> 21) - off) >> 15;
+
+ return 0;
+}
+
static int ms5611_reset(struct iio_dev *indio_dev)
{
int ret;
@@ -160,16 +206,23 @@ static int ms5611_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+static struct ms5611_chip_info chip_info_tbl[] = {
+ [MS5611] = {
+ .temp_and_pressure_compensate = ms5611_temp_and_pressure_compensate,
+ },
+ [MS5607] = {
+ .temp_and_pressure_compensate = ms5607_temp_and_pressure_compensate,
+ }
+};
+
static const struct iio_chan_spec ms5611_channels[] = {
{
.type = IIO_PRESSURE,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
- BIT(IIO_CHAN_INFO_SCALE)
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
},
{
.type = IIO_TEMP,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
- BIT(IIO_CHAN_INFO_SCALE)
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
}
};
@@ -189,12 +242,13 @@ static int ms5611_init(struct iio_dev *indio_dev)
return ms5611_read_prom(indio_dev);
}
-int ms5611_probe(struct iio_dev *indio_dev, struct device *dev)
+int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, int type)
{
int ret;
struct ms5611_state *st = iio_priv(indio_dev);
mutex_init(&st->lock);
+ st->chip_info = &chip_info_tbl[type];
indio_dev->dev.parent = dev;
indio_dev->name = dev->driver->name;
indio_dev->info = &ms5611_info;
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index 748fd9acaad8..245797d1ecf0 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -104,11 +104,12 @@ static int ms5611_i2c_probe(struct i2c_client *client,
st->read_adc_temp_and_pressure = ms5611_i2c_read_adc_temp_and_pressure;
st->client = client;
- return ms5611_probe(indio_dev, &client->dev);
+ return ms5611_probe(indio_dev, &client->dev, id->driver_data);
}
static const struct i2c_device_id ms5611_id[] = {
- { "ms5611", 0 },
+ { "ms5611", MS5611 },
+ { "ms5607", MS5607 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ms5611_id);
@@ -116,7 +117,6 @@ MODULE_DEVICE_TABLE(i2c, ms5611_id);
static struct i2c_driver ms5611_driver = {
.driver = {
.name = "ms5611",
- .owner = THIS_MODULE,
},
.id_table = ms5611_id,
.probe = ms5611_i2c_probe,
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index 976726fd4e6c..08ee6e88c79f 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -103,11 +103,13 @@ static int ms5611_spi_probe(struct spi_device *spi)
st->read_adc_temp_and_pressure = ms5611_spi_read_adc_temp_and_pressure;
st->client = spi;
- return ms5611_probe(indio_dev, &spi->dev);
+ return ms5611_probe(indio_dev, &spi->dev,
+ spi_get_device_id(spi)->driver_data);
}
static const struct spi_device_id ms5611_id[] = {
- { "ms5611", 0 },
+ { "ms5611", MS5611 },
+ { "ms5607", MS5607 },
{ }
};
MODULE_DEVICE_TABLE(spi, ms5611_id);
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index e881fa6291e9..eb41d2b92c24 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -178,6 +178,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
static const struct st_sensor_settings st_press_sensors_settings[] = {
{
.wai = ST_PRESS_LPS331AP_WAI_EXP,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LPS331AP_PRESS_DEV_NAME,
},
@@ -225,6 +226,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
},
{
.wai = ST_PRESS_LPS001WP_WAI_EXP,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LPS001WP_PRESS_DEV_NAME,
},
@@ -260,6 +262,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
},
{
.wai = ST_PRESS_LPS25H_WAI_EXP,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LPS25H_PRESS_DEV_NAME,
},
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 137788bba4a3..8fcf9766eaec 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -79,7 +79,6 @@ MODULE_DEVICE_TABLE(i2c, st_press_id_table);
static struct i2c_driver st_press_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "st-press-i2c",
.of_match_table = of_match_ptr(st_press_of_match),
},
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 2042e375f835..3d756bd8c703 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -80,6 +80,7 @@
#define SX9500_COMPSTAT_MASK GENMASK(3, 0)
#define SX9500_NUM_CHANNELS 4
+#define SX9500_CHAN_MASK GENMASK(SX9500_NUM_CHANNELS - 1, 0)
struct sx9500_data {
struct mutex mutex;
@@ -281,7 +282,7 @@ static int sx9500_read_prox_data(struct sx9500_data *data,
if (ret < 0)
return ret;
- *val = 32767 - (s16)be16_to_cpu(regval);
+ *val = be16_to_cpu(regval);
return IIO_VAL_INT;
}
@@ -329,27 +330,29 @@ static int sx9500_read_proximity(struct sx9500_data *data,
else
ret = sx9500_wait_for_sample(data);
- if (ret < 0)
- return ret;
-
mutex_lock(&data->mutex);
- ret = sx9500_read_prox_data(data, chan, val);
if (ret < 0)
- goto out;
+ goto out_dec_data_rdy;
- ret = sx9500_dec_chan_users(data, chan->channel);
+ ret = sx9500_read_prox_data(data, chan, val);
if (ret < 0)
- goto out;
+ goto out_dec_data_rdy;
ret = sx9500_dec_data_rdy_users(data);
if (ret < 0)
+ goto out_dec_chan;
+
+ ret = sx9500_dec_chan_users(data, chan->channel);
+ if (ret < 0)
goto out;
ret = IIO_VAL_INT;
goto out;
+out_dec_data_rdy:
+ sx9500_dec_data_rdy_users(data);
out_dec_chan:
sx9500_dec_chan_users(data, chan->channel);
out:
@@ -679,7 +682,7 @@ out:
static int sx9500_buffer_preenable(struct iio_dev *indio_dev)
{
struct sx9500_data *data = iio_priv(indio_dev);
- int ret, i;
+ int ret = 0, i;
mutex_lock(&data->mutex);
@@ -703,7 +706,7 @@ static int sx9500_buffer_preenable(struct iio_dev *indio_dev)
static int sx9500_buffer_predisable(struct iio_dev *indio_dev)
{
struct sx9500_data *data = iio_priv(indio_dev);
- int ret, i;
+ int ret = 0, i;
iio_triggered_buffer_predisable(indio_dev);
@@ -800,8 +803,7 @@ static int sx9500_init_compensation(struct iio_dev *indio_dev)
unsigned int val;
ret = regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0,
- GENMASK(SX9500_NUM_CHANNELS, 0),
- GENMASK(SX9500_NUM_CHANNELS, 0));
+ SX9500_CHAN_MASK, SX9500_CHAN_MASK);
if (ret < 0)
return ret;
@@ -821,7 +823,7 @@ static int sx9500_init_compensation(struct iio_dev *indio_dev)
out:
regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0,
- GENMASK(SX9500_NUM_CHANNELS, 0), 0);
+ SX9500_CHAN_MASK, 0);
return ret;
}
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index cb2e8ad8bfdc..5d033a5af615 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -65,6 +65,13 @@
#define MLX90614_AUTOSLEEP_DELAY 5000 /* default autosleep delay */
+/* Magic constants */
+#define MLX90614_CONST_OFFSET_DEC -13657 /* decimal part of the Kelvin offset */
+#define MLX90614_CONST_OFFSET_REM 500000 /* remainder of offset (273.15*50) */
+#define MLX90614_CONST_SCALE 20 /* Scale in milliKelvin (0.02 * 1000) */
+#define MLX90614_CONST_RAW_EMISSIVITY_MAX 65535 /* max value for emissivity */
+#define MLX90614_CONST_EMISSIVITY_RESOLUTION 15259 /* 1/65535 ~ 0.000015259 */
+
struct mlx90614_data {
struct i2c_client *client;
struct mutex lock; /* for EEPROM access only */
@@ -204,11 +211,11 @@ static int mlx90614_read_raw(struct iio_dev *indio_dev,
*val = ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
- *val = 13657;
- *val2 = 500000;
+ *val = MLX90614_CONST_OFFSET_DEC;
+ *val2 = MLX90614_CONST_OFFSET_REM;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SCALE:
- *val = 20;
+ *val = MLX90614_CONST_SCALE;
return IIO_VAL_INT;
case IIO_CHAN_INFO_CALIBEMISSIVITY: /* 1/65535 / LSB */
mlx90614_power_get(data, false);
@@ -221,12 +228,12 @@ static int mlx90614_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
- if (ret == 65535) {
+ if (ret == MLX90614_CONST_RAW_EMISSIVITY_MAX) {
*val = 1;
*val2 = 0;
} else {
*val = 0;
- *val2 = ret * 15259; /* 1/65535 ~ 0.000015259 */
+ *val2 = ret * MLX90614_CONST_EMISSIVITY_RESOLUTION;
}
return IIO_VAL_INT_PLUS_NANO;
default:
@@ -245,7 +252,8 @@ static int mlx90614_write_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_CALIBEMISSIVITY: /* 1/65535 / LSB */
if (val < 0 || val2 < 0 || val > 1 || (val == 1 && val2 != 0))
return -EINVAL;
- val = val * 65535 + val2 / 15259; /* 1/65535 ~ 0.000015259 */
+ val = val * MLX90614_CONST_RAW_EMISSIVITY_MAX +
+ val2 / MLX90614_CONST_EMISSIVITY_RESOLUTION;
mlx90614_power_get(data, false);
mutex_lock(&data->lock);
@@ -551,7 +559,6 @@ static const struct dev_pm_ops mlx90614_pm_ops = {
static struct i2c_driver mlx90614_driver = {
.driver = {
.name = "mlx90614",
- .owner = THIS_MODULE,
.pm = &mlx90614_pm_ops,
},
.probe = mlx90614_probe,
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index fcc49f89b946..e78c1069a6a9 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -36,9 +36,9 @@
#define TMP006_CONFIG_DRDY_EN BIT(8)
#define TMP006_CONFIG_DRDY BIT(7)
-#define TMP006_CONFIG_MOD_MASK 0x7000
+#define TMP006_CONFIG_MOD_MASK GENMASK(14, 12)
-#define TMP006_CONFIG_CR_MASK 0x0e00
+#define TMP006_CONFIG_CR_MASK GENMASK(11, 9)
#define TMP006_CONFIG_CR_SHIFT 9
#define TMP006_MANUFACTURER_MAGIC 0x5449
@@ -132,6 +132,9 @@ static int tmp006_write_raw(struct iio_dev *indio_dev,
struct tmp006_data *data = iio_priv(indio_dev);
int i;
+ if (mask != IIO_CHAN_INFO_SAMP_FREQ)
+ return -EINVAL;
+
for (i = 0; i < ARRAY_SIZE(tmp006_freqs); i++)
if ((val == tmp006_freqs[i][0]) &&
(val2 == tmp006_freqs[i][1])) {
@@ -277,7 +280,6 @@ static struct i2c_driver tmp006_driver = {
.driver = {
.name = "tmp006",
.pm = &tmp006_pm_ops,
- .owner = THIS_MODULE,
},
.probe = tmp006_probe,
.remove = tmp006_remove,
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index b899531498eb..da4c6979fbb8 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -55,10 +55,8 @@ config INFINIBAND_ADDR_TRANS
default y
source "drivers/infiniband/hw/mthca/Kconfig"
-source "drivers/infiniband/hw/ipath/Kconfig"
source "drivers/infiniband/hw/qib/Kconfig"
source "drivers/infiniband/hw/ehca/Kconfig"
-source "drivers/infiniband/hw/amso1100/Kconfig"
source "drivers/infiniband/hw/cxgb3/Kconfig"
source "drivers/infiniband/hw/cxgb4/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index acf736764445..d43a8994ac5c 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -9,7 +9,8 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
$(user_access-y)
ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
- device.o fmr_pool.o cache.o netlink.o
+ device.o fmr_pool.o cache.o netlink.o \
+ roce_gid_mgmt.o
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index c7dcfe4ca5f1..0429040304fd 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -88,7 +88,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
struct ib_ah *ah;
struct ib_mad_send_wr_private *mad_send_wr;
- if (device->node_type == RDMA_NODE_IB_SWITCH)
+ if (rdma_cap_ib_switch(device))
port_priv = ib_get_agent_port(device, 0);
else
port_priv = ib_get_agent_port(device, port_num);
@@ -122,7 +122,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
memcpy(send_buf->mad, mad_hdr, resp_mad_len);
send_buf->ah = ah;
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
+ if (rdma_cap_ib_switch(device)) {
mad_send_wr = container_of(send_buf,
struct ib_mad_send_wr_private,
send_buf);
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 871da832d016..8f66c67ff0df 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -37,6 +37,8 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <net/addrconf.h>
#include <rdma/ib_cache.h>
@@ -47,76 +49,621 @@ struct ib_pkey_cache {
u16 table[0];
};
-struct ib_gid_cache {
- int table_len;
- union ib_gid table[0];
-};
-
struct ib_update_work {
struct work_struct work;
struct ib_device *device;
u8 port_num;
};
-int ib_get_cached_gid(struct ib_device *device,
- u8 port_num,
- int index,
- union ib_gid *gid)
+union ib_gid zgid;
+EXPORT_SYMBOL(zgid);
+
+static const struct ib_gid_attr zattr;
+
+enum gid_attr_find_mask {
+ GID_ATTR_FIND_MASK_GID = 1UL << 0,
+ GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
+ GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
+};
+
+enum gid_table_entry_props {
+ GID_TABLE_ENTRY_INVALID = 1UL << 0,
+ GID_TABLE_ENTRY_DEFAULT = 1UL << 1,
+};
+
+enum gid_table_write_action {
+ GID_TABLE_WRITE_ACTION_ADD,
+ GID_TABLE_WRITE_ACTION_DEL,
+ /* MODIFY only updates the GID table. Currently only used by
+ * ib_cache_update.
+ */
+ GID_TABLE_WRITE_ACTION_MODIFY
+};
+
+struct ib_gid_table_entry {
+ /* This lock protects an entry from being
+ * read and written simultaneously.
+ */
+ rwlock_t lock;
+ unsigned long props;
+ union ib_gid gid;
+ struct ib_gid_attr attr;
+ void *context;
+};
+
+struct ib_gid_table {
+ int sz;
+ /* In RoCE, adding a GID to the table requires:
+ * (a) Find if this GID is already exists.
+ * (b) Find a free space.
+ * (c) Write the new GID
+ *
+ * Delete requires different set of operations:
+ * (a) Find the GID
+ * (b) Delete it.
+ *
+ * Add/delete should be carried out atomically.
+ * This is done by locking this mutex from multiple
+ * writers. We don't need this lock for IB, as the MAD
+ * layer replaces all entries. All data_vec entries
+ * are locked by this lock.
+ **/
+ struct mutex lock;
+ struct ib_gid_table_entry *data_vec;
+};
+
+static int write_gid(struct ib_device *ib_dev, u8 port,
+ struct ib_gid_table *table, int ix,
+ const union ib_gid *gid,
+ const struct ib_gid_attr *attr,
+ enum gid_table_write_action action,
+ bool default_gid)
{
- struct ib_gid_cache *cache;
+ int ret = 0;
+ struct net_device *old_net_dev;
unsigned long flags;
+
+ /* in rdma_cap_roce_gid_table, this funciton should be protected by a
+ * sleep-able lock.
+ */
+ write_lock_irqsave(&table->data_vec[ix].lock, flags);
+
+ if (rdma_cap_roce_gid_table(ib_dev, port)) {
+ table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
+ write_unlock_irqrestore(&table->data_vec[ix].lock, flags);
+ /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
+ * RoCE providers and thus only updates the cache.
+ */
+ if (action == GID_TABLE_WRITE_ACTION_ADD)
+ ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
+ &table->data_vec[ix].context);
+ else if (action == GID_TABLE_WRITE_ACTION_DEL)
+ ret = ib_dev->del_gid(ib_dev, port, ix,
+ &table->data_vec[ix].context);
+ write_lock_irqsave(&table->data_vec[ix].lock, flags);
+ }
+
+ old_net_dev = table->data_vec[ix].attr.ndev;
+ if (old_net_dev && old_net_dev != attr->ndev)
+ dev_put(old_net_dev);
+ /* if modify_gid failed, just delete the old gid */
+ if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
+ gid = &zgid;
+ attr = &zattr;
+ table->data_vec[ix].context = NULL;
+ }
+ if (default_gid)
+ table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
+ memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
+ memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
+ if (table->data_vec[ix].attr.ndev &&
+ table->data_vec[ix].attr.ndev != old_net_dev)
+ dev_hold(table->data_vec[ix].attr.ndev);
+
+ table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;
+
+ write_unlock_irqrestore(&table->data_vec[ix].lock, flags);
+
+ if (!ret && rdma_cap_roce_gid_table(ib_dev, port)) {
+ struct ib_event event;
+
+ event.device = ib_dev;
+ event.element.port_num = port;
+ event.event = IB_EVENT_GID_CHANGE;
+
+ ib_dispatch_event(&event);
+ }
+ return ret;
+}
+
+static int add_gid(struct ib_device *ib_dev, u8 port,
+ struct ib_gid_table *table, int ix,
+ const union ib_gid *gid,
+ const struct ib_gid_attr *attr,
+ bool default_gid) {
+ return write_gid(ib_dev, port, table, ix, gid, attr,
+ GID_TABLE_WRITE_ACTION_ADD, default_gid);
+}
+
+static int modify_gid(struct ib_device *ib_dev, u8 port,
+ struct ib_gid_table *table, int ix,
+ const union ib_gid *gid,
+ const struct ib_gid_attr *attr,
+ bool default_gid) {
+ return write_gid(ib_dev, port, table, ix, gid, attr,
+ GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
+}
+
+static int del_gid(struct ib_device *ib_dev, u8 port,
+ struct ib_gid_table *table, int ix,
+ bool default_gid) {
+ return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
+ GID_TABLE_WRITE_ACTION_DEL, default_gid);
+}
+
+static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
+ const struct ib_gid_attr *val, bool default_gid,
+ unsigned long mask)
+{
+ int i;
+
+ for (i = 0; i < table->sz; i++) {
+ unsigned long flags;
+ struct ib_gid_attr *attr = &table->data_vec[i].attr;
+
+ read_lock_irqsave(&table->data_vec[i].lock, flags);
+
+ if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
+ goto next;
+
+ if (mask & GID_ATTR_FIND_MASK_GID &&
+ memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
+ goto next;
+
+ if (mask & GID_ATTR_FIND_MASK_NETDEV &&
+ attr->ndev != val->ndev)
+ goto next;
+
+ if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
+ !!(table->data_vec[i].props & GID_TABLE_ENTRY_DEFAULT) !=
+ default_gid)
+ goto next;
+
+ read_unlock_irqrestore(&table->data_vec[i].lock, flags);
+ return i;
+next:
+ read_unlock_irqrestore(&table->data_vec[i].lock, flags);
+ }
+
+ return -1;
+}
+
+static void make_default_gid(struct net_device *dev, union ib_gid *gid)
+{
+ gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ addrconf_ifid_eui48(&gid->raw[8], dev);
+}
+
+int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
+ union ib_gid *gid, struct ib_gid_attr *attr)
+{
+ struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+ struct ib_gid_table *table;
+ int ix;
int ret = 0;
+ struct net_device *idev;
- if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
+ table = ports_table[port - rdma_start_port(ib_dev)];
+
+ if (!memcmp(gid, &zgid, sizeof(*gid)))
return -EINVAL;
- read_lock_irqsave(&device->cache.lock, flags);
+ if (ib_dev->get_netdev) {
+ idev = ib_dev->get_netdev(ib_dev, port);
+ if (idev && attr->ndev != idev) {
+ union ib_gid default_gid;
- cache = device->cache.gid_cache[port_num - rdma_start_port(device)];
+ /* Adding default GIDs in not permitted */
+ make_default_gid(idev, &default_gid);
+ if (!memcmp(gid, &default_gid, sizeof(*gid))) {
+ dev_put(idev);
+ return -EPERM;
+ }
+ }
+ if (idev)
+ dev_put(idev);
+ }
- if (index < 0 || index >= cache->table_len)
- ret = -EINVAL;
- else
- *gid = cache->table[index];
+ mutex_lock(&table->lock);
- read_unlock_irqrestore(&device->cache.lock, flags);
+ ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
+ GID_ATTR_FIND_MASK_NETDEV);
+ if (ix >= 0)
+ goto out_unlock;
+ ix = find_gid(table, &zgid, NULL, false, GID_ATTR_FIND_MASK_GID |
+ GID_ATTR_FIND_MASK_DEFAULT);
+ if (ix < 0) {
+ ret = -ENOSPC;
+ goto out_unlock;
+ }
+
+ add_gid(ib_dev, port, table, ix, gid, attr, false);
+
+out_unlock:
+ mutex_unlock(&table->lock);
return ret;
}
-EXPORT_SYMBOL(ib_get_cached_gid);
-int ib_find_cached_gid(struct ib_device *device,
- const union ib_gid *gid,
- u8 *port_num,
- u16 *index)
+int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
+ union ib_gid *gid, struct ib_gid_attr *attr)
{
- struct ib_gid_cache *cache;
+ struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+ struct ib_gid_table *table;
+ int ix;
+
+ table = ports_table[port - rdma_start_port(ib_dev)];
+
+ mutex_lock(&table->lock);
+
+ ix = find_gid(table, gid, attr, false,
+ GID_ATTR_FIND_MASK_GID |
+ GID_ATTR_FIND_MASK_NETDEV |
+ GID_ATTR_FIND_MASK_DEFAULT);
+ if (ix < 0)
+ goto out_unlock;
+
+ del_gid(ib_dev, port, table, ix, false);
+
+out_unlock:
+ mutex_unlock(&table->lock);
+ return 0;
+}
+
+int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
+ struct net_device *ndev)
+{
+ struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+ struct ib_gid_table *table;
+ int ix;
+
+ table = ports_table[port - rdma_start_port(ib_dev)];
+
+ mutex_lock(&table->lock);
+
+ for (ix = 0; ix < table->sz; ix++)
+ if (table->data_vec[ix].attr.ndev == ndev)
+ del_gid(ib_dev, port, table, ix, false);
+
+ mutex_unlock(&table->lock);
+ return 0;
+}
+
+static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
+ union ib_gid *gid, struct ib_gid_attr *attr)
+{
+ struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+ struct ib_gid_table *table;
unsigned long flags;
- int p, i;
- int ret = -ENOENT;
- *port_num = -1;
- if (index)
- *index = -1;
+ table = ports_table[port - rdma_start_port(ib_dev)];
- read_lock_irqsave(&device->cache.lock, flags);
+ if (index < 0 || index >= table->sz)
+ return -EINVAL;
- for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
- cache = device->cache.gid_cache[p];
- for (i = 0; i < cache->table_len; ++i) {
- if (!memcmp(gid, &cache->table[i], sizeof *gid)) {
- *port_num = p + rdma_start_port(device);
- if (index)
- *index = i;
- ret = 0;
- goto found;
- }
+ read_lock_irqsave(&table->data_vec[index].lock, flags);
+ if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID) {
+ read_unlock_irqrestore(&table->data_vec[index].lock, flags);
+ return -EAGAIN;
+ }
+
+ memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
+ if (attr) {
+ memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
+ if (attr->ndev)
+ dev_hold(attr->ndev);
+ }
+
+ read_unlock_irqrestore(&table->data_vec[index].lock, flags);
+ return 0;
+}
+
+static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
+ const union ib_gid *gid,
+ const struct ib_gid_attr *val,
+ unsigned long mask,
+ u8 *port, u16 *index)
+{
+ struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+ struct ib_gid_table *table;
+ u8 p;
+ int local_index;
+
+ for (p = 0; p < ib_dev->phys_port_cnt; p++) {
+ table = ports_table[p];
+ local_index = find_gid(table, gid, val, false, mask);
+ if (local_index >= 0) {
+ if (index)
+ *index = local_index;
+ if (port)
+ *port = p + rdma_start_port(ib_dev);
+ return 0;
}
}
-found:
- read_unlock_irqrestore(&device->cache.lock, flags);
- return ret;
+ return -ENOENT;
+}
+
+static int ib_cache_gid_find(struct ib_device *ib_dev,
+ const union ib_gid *gid,
+ struct net_device *ndev, u8 *port,
+ u16 *index)
+{
+ unsigned long mask = GID_ATTR_FIND_MASK_GID;
+ struct ib_gid_attr gid_attr_val = {.ndev = ndev};
+
+ if (ndev)
+ mask |= GID_ATTR_FIND_MASK_NETDEV;
+
+ return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
+ mask, port, index);
+}
+
+int ib_cache_gid_find_by_port(struct ib_device *ib_dev,
+ const union ib_gid *gid,
+ u8 port, struct net_device *ndev,
+ u16 *index)
+{
+ int local_index;
+ struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+ struct ib_gid_table *table;
+ unsigned long mask = GID_ATTR_FIND_MASK_GID;
+ struct ib_gid_attr val = {.ndev = ndev};
+
+ if (port < rdma_start_port(ib_dev) ||
+ port > rdma_end_port(ib_dev))
+ return -ENOENT;
+
+ table = ports_table[port - rdma_start_port(ib_dev)];
+
+ if (ndev)
+ mask |= GID_ATTR_FIND_MASK_NETDEV;
+
+ local_index = find_gid(table, gid, &val, false, mask);
+ if (local_index >= 0) {
+ if (index)
+ *index = local_index;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static struct ib_gid_table *alloc_gid_table(int sz)
+{
+ unsigned int i;
+ struct ib_gid_table *table =
+ kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
+ if (!table)
+ return NULL;
+
+ table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
+ if (!table->data_vec)
+ goto err_free_table;
+
+ mutex_init(&table->lock);
+
+ table->sz = sz;
+
+ for (i = 0; i < sz; i++)
+ rwlock_init(&table->data_vec[i].lock);
+
+ return table;
+
+err_free_table:
+ kfree(table);
+ return NULL;
+}
+
+static void release_gid_table(struct ib_gid_table *table)
+{
+ if (table) {
+ kfree(table->data_vec);
+ kfree(table);
+ }
+}
+
+static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
+ struct ib_gid_table *table)
+{
+ int i;
+
+ if (!table)
+ return;
+
+ for (i = 0; i < table->sz; ++i) {
+ if (memcmp(&table->data_vec[i].gid, &zgid,
+ sizeof(table->data_vec[i].gid)))
+ del_gid(ib_dev, port, table, i,
+ table->data_vec[i].props &
+ GID_ATTR_FIND_MASK_DEFAULT);
+ }
+}
+
+void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
+ struct net_device *ndev,
+ enum ib_cache_gid_default_mode mode)
+{
+ struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+ union ib_gid gid;
+ struct ib_gid_attr gid_attr;
+ struct ib_gid_table *table;
+ int ix;
+ union ib_gid current_gid;
+ struct ib_gid_attr current_gid_attr = {};
+
+ table = ports_table[port - rdma_start_port(ib_dev)];
+
+ make_default_gid(ndev, &gid);
+ memset(&gid_attr, 0, sizeof(gid_attr));
+ gid_attr.ndev = ndev;
+
+ ix = find_gid(table, NULL, NULL, true, GID_ATTR_FIND_MASK_DEFAULT);
+
+ /* Coudn't find default GID location */
+ WARN_ON(ix < 0);
+
+ mutex_lock(&table->lock);
+ if (!__ib_cache_gid_get(ib_dev, port, ix,
+ &current_gid, &current_gid_attr) &&
+ mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
+ !memcmp(&gid, &current_gid, sizeof(gid)) &&
+ !memcmp(&gid_attr, &current_gid_attr, sizeof(gid_attr)))
+ goto unlock;
+
+ if ((memcmp(&current_gid, &zgid, sizeof(current_gid)) ||
+ memcmp(&current_gid_attr, &zattr,
+ sizeof(current_gid_attr))) &&
+ del_gid(ib_dev, port, table, ix, true)) {
+ pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
+ ix, gid.raw);
+ goto unlock;
+ }
+
+ if (mode == IB_CACHE_GID_DEFAULT_MODE_SET)
+ if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
+ pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
+ gid.raw);
+
+unlock:
+ if (current_gid_attr.ndev)
+ dev_put(current_gid_attr.ndev);
+ mutex_unlock(&table->lock);
+}
+
+static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
+ struct ib_gid_table *table)
+{
+ if (rdma_protocol_roce(ib_dev, port)) {
+ struct ib_gid_table_entry *entry = &table->data_vec[0];
+
+ entry->props |= GID_TABLE_ENTRY_DEFAULT;
+ }
+
+ return 0;
+}
+
+static int _gid_table_setup_one(struct ib_device *ib_dev)
+{
+ u8 port;
+ struct ib_gid_table **table;
+ int err = 0;
+
+ table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
+
+ if (!table) {
+ pr_warn("failed to allocate ib gid cache for %s\n",
+ ib_dev->name);
+ return -ENOMEM;
+ }
+
+ for (port = 0; port < ib_dev->phys_port_cnt; port++) {
+ u8 rdma_port = port + rdma_start_port(ib_dev);
+
+ table[port] =
+ alloc_gid_table(
+ ib_dev->port_immutable[rdma_port].gid_tbl_len);
+ if (!table[port]) {
+ err = -ENOMEM;
+ goto rollback_table_setup;
+ }
+
+ err = gid_table_reserve_default(ib_dev,
+ port + rdma_start_port(ib_dev),
+ table[port]);
+ if (err)
+ goto rollback_table_setup;
+ }
+
+ ib_dev->cache.gid_cache = table;
+ return 0;
+
+rollback_table_setup:
+ for (port = 0; port < ib_dev->phys_port_cnt; port++) {
+ cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
+ table[port]);
+ release_gid_table(table[port]);
+ }
+
+ kfree(table);
+ return err;
+}
+
+static void gid_table_release_one(struct ib_device *ib_dev)
+{
+ struct ib_gid_table **table = ib_dev->cache.gid_cache;
+ u8 port;
+
+ if (!table)
+ return;
+
+ for (port = 0; port < ib_dev->phys_port_cnt; port++)
+ release_gid_table(table[port]);
+
+ kfree(table);
+ ib_dev->cache.gid_cache = NULL;
+}
+
+static void gid_table_cleanup_one(struct ib_device *ib_dev)
+{
+ struct ib_gid_table **table = ib_dev->cache.gid_cache;
+ u8 port;
+
+ if (!table)
+ return;
+
+ for (port = 0; port < ib_dev->phys_port_cnt; port++)
+ cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
+ table[port]);
+}
+
+static int gid_table_setup_one(struct ib_device *ib_dev)
+{
+ int err;
+
+ err = _gid_table_setup_one(ib_dev);
+
+ if (err)
+ return err;
+
+ err = roce_rescan_device(ib_dev);
+
+ if (err) {
+ gid_table_cleanup_one(ib_dev);
+ gid_table_release_one(ib_dev);
+ }
+
+ return err;
+}
+
+int ib_get_cached_gid(struct ib_device *device,
+ u8 port_num,
+ int index,
+ union ib_gid *gid)
+{
+ if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
+ return -EINVAL;
+
+ return __ib_cache_gid_get(device, port_num, index, gid, NULL);
+}
+EXPORT_SYMBOL(ib_get_cached_gid);
+
+int ib_find_cached_gid(struct ib_device *device,
+ const union ib_gid *gid,
+ u8 *port_num,
+ u16 *index)
+{
+ return ib_cache_gid_find(device, gid, NULL, port_num, index);
}
EXPORT_SYMBOL(ib_find_cached_gid);
@@ -243,9 +790,21 @@ static void ib_cache_update(struct ib_device *device,
{
struct ib_port_attr *tprops = NULL;
struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
- struct ib_gid_cache *gid_cache = NULL, *old_gid_cache;
+ struct ib_gid_cache {
+ int table_len;
+ union ib_gid table[0];
+ } *gid_cache = NULL;
int i;
int ret;
+ struct ib_gid_table *table;
+ struct ib_gid_table **ports_table = device->cache.gid_cache;
+ bool use_roce_gid_table =
+ rdma_cap_roce_gid_table(device, port);
+
+ if (port < rdma_start_port(device) || port > rdma_end_port(device))
+ return;
+
+ table = ports_table[port - rdma_start_port(device)];
tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
if (!tprops)
@@ -265,12 +824,14 @@ static void ib_cache_update(struct ib_device *device,
pkey_cache->table_len = tprops->pkey_tbl_len;
- gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len *
- sizeof *gid_cache->table, GFP_KERNEL);
- if (!gid_cache)
- goto err;
+ if (!use_roce_gid_table) {
+ gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
+ sizeof(*gid_cache->table), GFP_KERNEL);
+ if (!gid_cache)
+ goto err;
- gid_cache->table_len = tprops->gid_tbl_len;
+ gid_cache->table_len = tprops->gid_tbl_len;
+ }
for (i = 0; i < pkey_cache->table_len; ++i) {
ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
@@ -281,29 +842,36 @@ static void ib_cache_update(struct ib_device *device,
}
}
- for (i = 0; i < gid_cache->table_len; ++i) {
- ret = ib_query_gid(device, port, i, gid_cache->table + i);
- if (ret) {
- printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
- ret, device->name, i);
- goto err;
+ if (!use_roce_gid_table) {
+ for (i = 0; i < gid_cache->table_len; ++i) {
+ ret = ib_query_gid(device, port, i,
+ gid_cache->table + i);
+ if (ret) {
+ printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
+ ret, device->name, i);
+ goto err;
+ }
}
}
write_lock_irq(&device->cache.lock);
old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
- old_gid_cache = device->cache.gid_cache [port - rdma_start_port(device)];
device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
- device->cache.gid_cache [port - rdma_start_port(device)] = gid_cache;
+ if (!use_roce_gid_table) {
+ for (i = 0; i < gid_cache->table_len; i++) {
+ modify_gid(device, port, table, i, gid_cache->table + i,
+ &zattr, false);
+ }
+ }
device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
write_unlock_irq(&device->cache.lock);
+ kfree(gid_cache);
kfree(old_pkey_cache);
- kfree(old_gid_cache);
kfree(tprops);
return;
@@ -344,85 +912,88 @@ static void ib_cache_event(struct ib_event_handler *handler,
}
}
-static void ib_cache_setup_one(struct ib_device *device)
+int ib_cache_setup_one(struct ib_device *device)
{
int p;
+ int err;
rwlock_init(&device->cache.lock);
device->cache.pkey_cache =
- kmalloc(sizeof *device->cache.pkey_cache *
- (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
- device->cache.gid_cache =
- kmalloc(sizeof *device->cache.gid_cache *
+ kzalloc(sizeof *device->cache.pkey_cache *
(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
-
device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
(rdma_end_port(device) -
rdma_start_port(device) + 1),
GFP_KERNEL);
-
- if (!device->cache.pkey_cache || !device->cache.gid_cache ||
+ if (!device->cache.pkey_cache ||
!device->cache.lmc_cache) {
printk(KERN_WARNING "Couldn't allocate cache "
"for %s\n", device->name);
- goto err;
+ return -ENOMEM;
}
- for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
- device->cache.pkey_cache[p] = NULL;
- device->cache.gid_cache [p] = NULL;
+ err = gid_table_setup_one(device);
+ if (err)
+ /* Allocated memory will be cleaned in the release function */
+ return err;
+
+ for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
ib_cache_update(device, p + rdma_start_port(device));
- }
INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
device, ib_cache_event);
- if (ib_register_event_handler(&device->cache.event_handler))
- goto err_cache;
-
- return;
+ err = ib_register_event_handler(&device->cache.event_handler);
+ if (err)
+ goto err;
-err_cache:
- for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
- kfree(device->cache.pkey_cache[p]);
- kfree(device->cache.gid_cache[p]);
- }
+ return 0;
err:
- kfree(device->cache.pkey_cache);
- kfree(device->cache.gid_cache);
- kfree(device->cache.lmc_cache);
+ gid_table_cleanup_one(device);
+ return err;
}
-static void ib_cache_cleanup_one(struct ib_device *device)
+void ib_cache_release_one(struct ib_device *device)
{
int p;
- ib_unregister_event_handler(&device->cache.event_handler);
- flush_workqueue(ib_wq);
-
- for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
- kfree(device->cache.pkey_cache[p]);
- kfree(device->cache.gid_cache[p]);
- }
-
+ /*
+ * The release function frees all the cache elements.
+ * This function should be called as part of freeing
+ * all the device's resources when the cache could no
+ * longer be accessed.
+ */
+ if (device->cache.pkey_cache)
+ for (p = 0;
+ p <= rdma_end_port(device) - rdma_start_port(device); ++p)
+ kfree(device->cache.pkey_cache[p]);
+
+ gid_table_release_one(device);
kfree(device->cache.pkey_cache);
- kfree(device->cache.gid_cache);
kfree(device->cache.lmc_cache);
}
-static struct ib_client cache_client = {
- .name = "cache",
- .add = ib_cache_setup_one,
- .remove = ib_cache_cleanup_one
-};
+void ib_cache_cleanup_one(struct ib_device *device)
+{
+ /* The cleanup function unregisters the event handler,
+ * waits for all in-progress workqueue elements and cleans
+ * up the GID cache. This function should be called after
+ * the device was removed from the devices list and all
+ * clients were removed, so the cache exists but is
+ * non-functional and shouldn't be updated anymore.
+ */
+ ib_unregister_event_handler(&device->cache.event_handler);
+ flush_workqueue(ib_wq);
+ gid_table_cleanup_one(device);
+}
-int __init ib_cache_setup(void)
+void __init ib_cache_setup(void)
{
- return ib_register_client(&cache_client);
+ roce_gid_mgmt_init();
}
void __exit ib_cache_cleanup(void)
{
- ib_unregister_client(&cache_client);
+ roce_gid_mgmt_cleanup();
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index dbddddd6fb5d..ea4db9c1d44f 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -58,7 +58,7 @@ MODULE_DESCRIPTION("InfiniBand CM");
MODULE_LICENSE("Dual BSD/GPL");
static void cm_add_one(struct ib_device *device);
-static void cm_remove_one(struct ib_device *device);
+static void cm_remove_one(struct ib_device *device, void *client_data);
static struct ib_client cm_client = {
.name = "cm",
@@ -169,6 +169,7 @@ struct cm_device {
struct ib_device *ib_device;
struct device *device;
u8 ack_delay;
+ int going_down;
struct cm_port *port[0];
};
@@ -212,13 +213,15 @@ struct cm_id_private {
spinlock_t lock; /* Do not acquire inside cm.lock */
struct completion comp;
atomic_t refcount;
+ /* Number of clients sharing this ib_cm_id. Only valid for listeners.
+ * Protected by the cm.lock spinlock. */
+ int listen_sharecount;
struct ib_mad_send_buf *msg;
struct cm_timewait_info *timewait_info;
/* todo: use alternate port on send failure */
struct cm_av av;
struct cm_av alt_av;
- struct ib_cm_compare_data *compare_data;
void *private_data;
__be64 tid;
@@ -439,40 +442,6 @@ static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
return cm_id_priv;
}
-static void cm_mask_copy(u32 *dst, const u32 *src, const u32 *mask)
-{
- int i;
-
- for (i = 0; i < IB_CM_COMPARE_SIZE; i++)
- dst[i] = src[i] & mask[i];
-}
-
-static int cm_compare_data(struct ib_cm_compare_data *src_data,
- struct ib_cm_compare_data *dst_data)
-{
- u32 src[IB_CM_COMPARE_SIZE];
- u32 dst[IB_CM_COMPARE_SIZE];
-
- if (!src_data || !dst_data)
- return 0;
-
- cm_mask_copy(src, src_data->data, dst_data->mask);
- cm_mask_copy(dst, dst_data->data, src_data->mask);
- return memcmp(src, dst, sizeof(src));
-}
-
-static int cm_compare_private_data(u32 *private_data,
- struct ib_cm_compare_data *dst_data)
-{
- u32 src[IB_CM_COMPARE_SIZE];
-
- if (!dst_data)
- return 0;
-
- cm_mask_copy(src, private_data, dst_data->mask);
- return memcmp(src, dst_data->data, sizeof(src));
-}
-
/*
* Trivial helpers to strip endian annotation and compare; the
* endianness doesn't actually matter since we just need a stable
@@ -505,18 +474,14 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
struct cm_id_private *cur_cm_id_priv;
__be64 service_id = cm_id_priv->id.service_id;
__be64 service_mask = cm_id_priv->id.service_mask;
- int data_cmp;
while (*link) {
parent = *link;
cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
service_node);
- data_cmp = cm_compare_data(cm_id_priv->compare_data,
- cur_cm_id_priv->compare_data);
if ((cur_cm_id_priv->id.service_mask & service_id) ==
(service_mask & cur_cm_id_priv->id.service_id) &&
- (cm_id_priv->id.device == cur_cm_id_priv->id.device) &&
- !data_cmp)
+ (cm_id_priv->id.device == cur_cm_id_priv->id.device))
return cur_cm_id_priv;
if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
@@ -527,8 +492,6 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
link = &(*link)->rb_left;
else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
link = &(*link)->rb_right;
- else if (data_cmp < 0)
- link = &(*link)->rb_left;
else
link = &(*link)->rb_right;
}
@@ -538,20 +501,16 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
}
static struct cm_id_private * cm_find_listen(struct ib_device *device,
- __be64 service_id,
- u32 *private_data)
+ __be64 service_id)
{
struct rb_node *node = cm.listen_service_table.rb_node;
struct cm_id_private *cm_id_priv;
- int data_cmp;
while (node) {
cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
- data_cmp = cm_compare_private_data(private_data,
- cm_id_priv->compare_data);
if ((cm_id_priv->id.service_mask & service_id) ==
cm_id_priv->id.service_id &&
- (cm_id_priv->id.device == device) && !data_cmp)
+ (cm_id_priv->id.device == device))
return cm_id_priv;
if (device < cm_id_priv->id.device)
@@ -562,8 +521,6 @@ static struct cm_id_private * cm_find_listen(struct ib_device *device,
node = node->rb_left;
else if (be64_gt(service_id, cm_id_priv->id.service_id))
node = node->rb_right;
- else if (data_cmp < 0)
- node = node->rb_left;
else
node = node->rb_right;
}
@@ -805,6 +762,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
{
int wait_time;
unsigned long flags;
+ struct cm_device *cm_dev;
+
+ cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
+ if (!cm_dev)
+ return;
spin_lock_irqsave(&cm.lock, flags);
cm_cleanup_timewait(cm_id_priv->timewait_info);
@@ -818,8 +780,14 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
*/
cm_id_priv->id.state = IB_CM_TIMEWAIT;
wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
- queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
- msecs_to_jiffies(wait_time));
+
+ /* Check if the device started its remove_one */
+ spin_lock_irq(&cm.lock);
+ if (!cm_dev->going_down)
+ queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
+ msecs_to_jiffies(wait_time));
+ spin_unlock_irq(&cm.lock);
+
cm_id_priv->timewait_info = NULL;
}
@@ -847,9 +815,15 @@ retest:
spin_lock_irq(&cm_id_priv->lock);
switch (cm_id->state) {
case IB_CM_LISTEN:
- cm_id->state = IB_CM_IDLE;
spin_unlock_irq(&cm_id_priv->lock);
+
spin_lock_irq(&cm.lock);
+ if (--cm_id_priv->listen_sharecount > 0) {
+ /* The id is still shared. */
+ cm_deref_id(cm_id_priv);
+ spin_unlock_irq(&cm.lock);
+ return;
+ }
rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
spin_unlock_irq(&cm.lock);
break;
@@ -918,7 +892,6 @@ retest:
wait_for_completion(&cm_id_priv->comp);
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
cm_free_work(work);
- kfree(cm_id_priv->compare_data);
kfree(cm_id_priv->private_data);
kfree(cm_id_priv);
}
@@ -929,11 +902,23 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id)
}
EXPORT_SYMBOL(ib_destroy_cm_id);
-int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
- struct ib_cm_compare_data *compare_data)
+/**
+ * __ib_cm_listen - Initiates listening on the specified service ID for
+ * connection and service ID resolution requests.
+ * @cm_id: Connection identifier associated with the listen request.
+ * @service_id: Service identifier matched against incoming connection
+ * and service ID resolution requests. The service ID should be specified
+ * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
+ * assign a service ID to the caller.
+ * @service_mask: Mask applied to service ID used to listen across a
+ * range of service IDs. If set to 0, the service ID is matched
+ * exactly. This parameter is ignored if %service_id is set to
+ * IB_CM_ASSIGN_SERVICE_ID.
+ */
+static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
+ __be64 service_mask)
{
struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
- unsigned long flags;
int ret = 0;
service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
@@ -946,20 +931,9 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
if (cm_id->state != IB_CM_IDLE)
return -EINVAL;
- if (compare_data) {
- cm_id_priv->compare_data = kzalloc(sizeof *compare_data,
- GFP_KERNEL);
- if (!cm_id_priv->compare_data)
- return -ENOMEM;
- cm_mask_copy(cm_id_priv->compare_data->data,
- compare_data->data, compare_data->mask);
- memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
- sizeof(compare_data->mask));
- }
-
cm_id->state = IB_CM_LISTEN;
+ ++cm_id_priv->listen_sharecount;
- spin_lock_irqsave(&cm.lock, flags);
if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
cm_id->service_mask = ~cpu_to_be64(0);
@@ -968,18 +942,95 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
cm_id->service_mask = service_mask;
}
cur_cm_id_priv = cm_insert_listen(cm_id_priv);
- spin_unlock_irqrestore(&cm.lock, flags);
if (cur_cm_id_priv) {
cm_id->state = IB_CM_IDLE;
- kfree(cm_id_priv->compare_data);
- cm_id_priv->compare_data = NULL;
+ --cm_id_priv->listen_sharecount;
ret = -EBUSY;
}
return ret;
}
+
+int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cm.lock, flags);
+ ret = __ib_cm_listen(cm_id, service_id, service_mask);
+ spin_unlock_irqrestore(&cm.lock, flags);
+
+ return ret;
+}
EXPORT_SYMBOL(ib_cm_listen);
+/**
+ * Create a new listening ib_cm_id and listen on the given service ID.
+ *
+ * If there's an existing ID listening on that same device and service ID,
+ * return it.
+ *
+ * @device: Device associated with the cm_id. All related communication will
+ * be associated with the specified device.
+ * @cm_handler: Callback invoked to notify the user of CM events.
+ * @service_id: Service identifier matched against incoming connection
+ * and service ID resolution requests. The service ID should be specified
+ * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
+ * assign a service ID to the caller.
+ *
+ * Callers should call ib_destroy_cm_id when done with the listener ID.
+ */
+struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
+ ib_cm_handler cm_handler,
+ __be64 service_id)
+{
+ struct cm_id_private *cm_id_priv;
+ struct ib_cm_id *cm_id;
+ unsigned long flags;
+ int err = 0;
+
+ /* Create an ID in advance, since the creation may sleep */
+ cm_id = ib_create_cm_id(device, cm_handler, NULL);
+ if (IS_ERR(cm_id))
+ return cm_id;
+
+ spin_lock_irqsave(&cm.lock, flags);
+
+ if (service_id == IB_CM_ASSIGN_SERVICE_ID)
+ goto new_id;
+
+ /* Find an existing ID */
+ cm_id_priv = cm_find_listen(device, service_id);
+ if (cm_id_priv) {
+ if (cm_id->cm_handler != cm_handler || cm_id->context) {
+ /* Sharing an ib_cm_id with different handlers is not
+ * supported */
+ spin_unlock_irqrestore(&cm.lock, flags);
+ return ERR_PTR(-EINVAL);
+ }
+ atomic_inc(&cm_id_priv->refcount);
+ ++cm_id_priv->listen_sharecount;
+ spin_unlock_irqrestore(&cm.lock, flags);
+
+ ib_destroy_cm_id(cm_id);
+ cm_id = &cm_id_priv->id;
+ return cm_id;
+ }
+
+new_id:
+ /* Use newly created ID */
+ err = __ib_cm_listen(cm_id, service_id, 0);
+
+ spin_unlock_irqrestore(&cm.lock, flags);
+
+ if (err) {
+ ib_destroy_cm_id(cm_id);
+ return ERR_PTR(err);
+ }
+ return cm_id;
+}
+EXPORT_SYMBOL(ib_cm_insert_listen);
+
static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
enum cm_msg_sequence msg_seq)
{
@@ -1256,6 +1307,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
primary_path->packet_life_time =
cm_req_get_primary_local_ack_timeout(req_msg);
primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
+ primary_path->service_id = req_msg->service_id;
if (req_msg->alt_local_lid) {
memset(alt_path, 0, sizeof *alt_path);
@@ -1277,9 +1329,28 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
alt_path->packet_life_time =
cm_req_get_alt_local_ack_timeout(req_msg);
alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
+ alt_path->service_id = req_msg->service_id;
}
}
+static u16 cm_get_bth_pkey(struct cm_work *work)
+{
+ struct ib_device *ib_dev = work->port->cm_dev->ib_device;
+ u8 port_num = work->port->port_num;
+ u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
+ u16 pkey;
+ int ret;
+
+ ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
+ if (ret) {
+ dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
+ port_num, pkey_index, ret);
+ return 0;
+ }
+
+ return pkey;
+}
+
static void cm_format_req_event(struct cm_work *work,
struct cm_id_private *cm_id_priv,
struct ib_cm_id *listen_id)
@@ -1290,6 +1361,7 @@ static void cm_format_req_event(struct cm_work *work,
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.req_rcvd;
param->listen_id = listen_id;
+ param->bth_pkey = cm_get_bth_pkey(work);
param->port = cm_id_priv->av.port->port_num;
param->primary_path = &work->path[0];
if (req_msg->alt_local_lid)
@@ -1472,8 +1544,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
/* Find matching listen request. */
listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
- req_msg->service_id,
- req_msg->private_data);
+ req_msg->service_id);
if (!listen_cm_id_priv) {
cm_cleanup_timewait(cm_id_priv->timewait_info);
spin_unlock_irq(&cm.lock);
@@ -2980,6 +3051,8 @@ static void cm_format_sidr_req_event(struct cm_work *work,
param = &work->cm_event.param.sidr_req_rcvd;
param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
param->listen_id = listen_id;
+ param->service_id = sidr_req_msg->service_id;
+ param->bth_pkey = cm_get_bth_pkey(work);
param->port = work->port->port_num;
work->cm_event.private_data = &sidr_req_msg->private_data;
}
@@ -3019,8 +3092,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
}
cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
cur_cm_id_priv = cm_find_listen(cm_id->device,
- sidr_req_msg->service_id,
- sidr_req_msg->private_data);
+ sidr_req_msg->service_id);
if (!cur_cm_id_priv) {
spin_unlock_irq(&cm.lock);
cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
@@ -3305,6 +3377,11 @@ static int cm_establish(struct ib_cm_id *cm_id)
struct cm_work *work;
unsigned long flags;
int ret = 0;
+ struct cm_device *cm_dev;
+
+ cm_dev = ib_get_client_data(cm_id->device, &cm_client);
+ if (!cm_dev)
+ return -ENODEV;
work = kmalloc(sizeof *work, GFP_ATOMIC);
if (!work)
@@ -3343,7 +3420,17 @@ static int cm_establish(struct ib_cm_id *cm_id)
work->remote_id = cm_id->remote_id;
work->mad_recv_wc = NULL;
work->cm_event.event = IB_CM_USER_ESTABLISHED;
- queue_delayed_work(cm.wq, &work->work, 0);
+
+ /* Check if the device started its remove_one */
+ spin_lock_irq(&cm.lock);
+ if (!cm_dev->going_down) {
+ queue_delayed_work(cm.wq, &work->work, 0);
+ } else {
+ kfree(work);
+ ret = -ENODEV;
+ }
+ spin_unlock_irq(&cm.lock);
+
out:
return ret;
}
@@ -3394,6 +3481,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
enum ib_cm_event_type event;
u16 attr_id;
int paths = 0;
+ int going_down = 0;
switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
case CM_REQ_ATTR_ID:
@@ -3452,7 +3540,19 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
work->cm_event.event = event;
work->mad_recv_wc = mad_recv_wc;
work->port = port;
- queue_delayed_work(cm.wq, &work->work, 0);
+
+ /* Check if the device started its remove_one */
+ spin_lock_irq(&cm.lock);
+ if (!port->cm_dev->going_down)
+ queue_delayed_work(cm.wq, &work->work, 0);
+ else
+ going_down = 1;
+ spin_unlock_irq(&cm.lock);
+
+ if (going_down) {
+ kfree(work);
+ ib_free_recv_mad(mad_recv_wc);
+ }
}
static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
@@ -3771,7 +3871,7 @@ static void cm_add_one(struct ib_device *ib_device)
cm_dev->ib_device = ib_device;
cm_get_ack_delay(cm_dev);
-
+ cm_dev->going_down = 0;
cm_dev->device = device_create(&cm_class, &ib_device->dev,
MKDEV(0, 0), NULL,
"%s", ib_device->name);
@@ -3846,9 +3946,9 @@ free:
kfree(cm_dev);
}
-static void cm_remove_one(struct ib_device *ib_device)
+static void cm_remove_one(struct ib_device *ib_device, void *client_data)
{
- struct cm_device *cm_dev;
+ struct cm_device *cm_dev = client_data;
struct cm_port *port;
struct ib_port_modify port_modify = {
.clr_port_cap_mask = IB_PORT_CM_SUP
@@ -3856,7 +3956,6 @@ static void cm_remove_one(struct ib_device *ib_device)
unsigned long flags;
int i;
- cm_dev = ib_get_client_data(ib_device, &cm_client);
if (!cm_dev)
return;
@@ -3864,14 +3963,23 @@ static void cm_remove_one(struct ib_device *ib_device)
list_del(&cm_dev->list);
write_unlock_irqrestore(&cm.device_lock, flags);
+ spin_lock_irq(&cm.lock);
+ cm_dev->going_down = 1;
+ spin_unlock_irq(&cm.lock);
+
for (i = 1; i <= ib_device->phys_port_cnt; i++) {
if (!rdma_cap_ib_cm(ib_device, i))
continue;
port = cm_dev->port[i-1];
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
- ib_unregister_mad_agent(port->mad_agent);
+ /*
+ * We flush the queue here after the going_down set, this
+ * verify that no new works will be queued in the recv handler,
+ * after that we can call the unregister_mad_agent
+ */
flush_workqueue(cm.wq);
+ ib_unregister_mad_agent(port->mad_agent);
cm_remove_port_fs(port);
}
device_unregister(cm_dev->device);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 143ded2bbe7c..b1ab13f3e182 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -46,6 +46,8 @@
#include <net/tcp.h>
#include <net/ipv6.h>
+#include <net/ip_fib.h>
+#include <net/ip6_route.h>
#include <rdma/rdma_cm.h>
#include <rdma/rdma_cm_ib.h>
@@ -94,7 +96,7 @@ const char *rdma_event_msg(enum rdma_cm_event_type event)
EXPORT_SYMBOL(rdma_event_msg);
static void cma_add_one(struct ib_device *device);
-static void cma_remove_one(struct ib_device *device);
+static void cma_remove_one(struct ib_device *device, void *client_data);
static struct ib_client cma_client = {
.name = "cma",
@@ -113,6 +115,22 @@ static DEFINE_IDR(udp_ps);
static DEFINE_IDR(ipoib_ps);
static DEFINE_IDR(ib_ps);
+static struct idr *cma_idr(enum rdma_port_space ps)
+{
+ switch (ps) {
+ case RDMA_PS_TCP:
+ return &tcp_ps;
+ case RDMA_PS_UDP:
+ return &udp_ps;
+ case RDMA_PS_IPOIB:
+ return &ipoib_ps;
+ case RDMA_PS_IB:
+ return &ib_ps;
+ default:
+ return NULL;
+ }
+}
+
struct cma_device {
struct list_head list;
struct ib_device *device;
@@ -122,11 +140,33 @@ struct cma_device {
};
struct rdma_bind_list {
- struct idr *ps;
+ enum rdma_port_space ps;
struct hlist_head owners;
unsigned short port;
};
+static int cma_ps_alloc(enum rdma_port_space ps,
+ struct rdma_bind_list *bind_list, int snum)
+{
+ struct idr *idr = cma_idr(ps);
+
+ return idr_alloc(idr, bind_list, snum, snum + 1, GFP_KERNEL);
+}
+
+static struct rdma_bind_list *cma_ps_find(enum rdma_port_space ps, int snum)
+{
+ struct idr *idr = cma_idr(ps);
+
+ return idr_find(idr, snum);
+}
+
+static void cma_ps_remove(enum rdma_port_space ps, int snum)
+{
+ struct idr *idr = cma_idr(ps);
+
+ idr_remove(idr, snum);
+}
+
enum {
CMA_OPTION_AFONLY,
};
@@ -225,6 +265,15 @@ struct cma_hdr {
#define CMA_VERSION 0x00
+struct cma_req_info {
+ struct ib_device *device;
+ int port;
+ union ib_gid local_gid;
+ __be64 service_id;
+ u16 pkey;
+ bool has_gid:1;
+};
+
static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
{
unsigned long flags;
@@ -262,7 +311,7 @@ static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv,
return old;
}
-static inline u8 cma_get_ip_ver(struct cma_hdr *hdr)
+static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr)
{
return hdr->ip_version >> 4;
}
@@ -870,107 +919,397 @@ static inline int cma_any_port(struct sockaddr *addr)
return !cma_port(addr);
}
-static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
+static void cma_save_ib_info(struct sockaddr *src_addr,
+ struct sockaddr *dst_addr,
+ struct rdma_cm_id *listen_id,
struct ib_sa_path_rec *path)
{
struct sockaddr_ib *listen_ib, *ib;
listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr;
- ib = (struct sockaddr_ib *) &id->route.addr.src_addr;
- ib->sib_family = listen_ib->sib_family;
- if (path) {
- ib->sib_pkey = path->pkey;
- ib->sib_flowinfo = path->flow_label;
- memcpy(&ib->sib_addr, &path->sgid, 16);
- } else {
- ib->sib_pkey = listen_ib->sib_pkey;
- ib->sib_flowinfo = listen_ib->sib_flowinfo;
- ib->sib_addr = listen_ib->sib_addr;
- }
- ib->sib_sid = listen_ib->sib_sid;
- ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
- ib->sib_scope_id = listen_ib->sib_scope_id;
-
- if (path) {
- ib = (struct sockaddr_ib *) &id->route.addr.dst_addr;
- ib->sib_family = listen_ib->sib_family;
- ib->sib_pkey = path->pkey;
- ib->sib_flowinfo = path->flow_label;
- memcpy(&ib->sib_addr, &path->dgid, 16);
+ if (src_addr) {
+ ib = (struct sockaddr_ib *)src_addr;
+ ib->sib_family = AF_IB;
+ if (path) {
+ ib->sib_pkey = path->pkey;
+ ib->sib_flowinfo = path->flow_label;
+ memcpy(&ib->sib_addr, &path->sgid, 16);
+ ib->sib_sid = path->service_id;
+ ib->sib_scope_id = 0;
+ } else {
+ ib->sib_pkey = listen_ib->sib_pkey;
+ ib->sib_flowinfo = listen_ib->sib_flowinfo;
+ ib->sib_addr = listen_ib->sib_addr;
+ ib->sib_sid = listen_ib->sib_sid;
+ ib->sib_scope_id = listen_ib->sib_scope_id;
+ }
+ ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
+ }
+ if (dst_addr) {
+ ib = (struct sockaddr_ib *)dst_addr;
+ ib->sib_family = AF_IB;
+ if (path) {
+ ib->sib_pkey = path->pkey;
+ ib->sib_flowinfo = path->flow_label;
+ memcpy(&ib->sib_addr, &path->dgid, 16);
+ }
}
}
-static __be16 ss_get_port(const struct sockaddr_storage *ss)
-{
- if (ss->ss_family == AF_INET)
- return ((struct sockaddr_in *)ss)->sin_port;
- else if (ss->ss_family == AF_INET6)
- return ((struct sockaddr_in6 *)ss)->sin6_port;
- BUG();
-}
-
-static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
- struct cma_hdr *hdr)
+static void cma_save_ip4_info(struct sockaddr *src_addr,
+ struct sockaddr *dst_addr,
+ struct cma_hdr *hdr,
+ __be16 local_port)
{
struct sockaddr_in *ip4;
- ip4 = (struct sockaddr_in *) &id->route.addr.src_addr;
- ip4->sin_family = AF_INET;
- ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
- ip4->sin_port = ss_get_port(&listen_id->route.addr.src_addr);
+ if (src_addr) {
+ ip4 = (struct sockaddr_in *)src_addr;
+ ip4->sin_family = AF_INET;
+ ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
+ ip4->sin_port = local_port;
+ }
- ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr;
- ip4->sin_family = AF_INET;
- ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
- ip4->sin_port = hdr->port;
+ if (dst_addr) {
+ ip4 = (struct sockaddr_in *)dst_addr;
+ ip4->sin_family = AF_INET;
+ ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
+ ip4->sin_port = hdr->port;
+ }
}
-static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
- struct cma_hdr *hdr)
+static void cma_save_ip6_info(struct sockaddr *src_addr,
+ struct sockaddr *dst_addr,
+ struct cma_hdr *hdr,
+ __be16 local_port)
{
struct sockaddr_in6 *ip6;
- ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr;
- ip6->sin6_family = AF_INET6;
- ip6->sin6_addr = hdr->dst_addr.ip6;
- ip6->sin6_port = ss_get_port(&listen_id->route.addr.src_addr);
+ if (src_addr) {
+ ip6 = (struct sockaddr_in6 *)src_addr;
+ ip6->sin6_family = AF_INET6;
+ ip6->sin6_addr = hdr->dst_addr.ip6;
+ ip6->sin6_port = local_port;
+ }
- ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr;
- ip6->sin6_family = AF_INET6;
- ip6->sin6_addr = hdr->src_addr.ip6;
- ip6->sin6_port = hdr->port;
+ if (dst_addr) {
+ ip6 = (struct sockaddr_in6 *)dst_addr;
+ ip6->sin6_family = AF_INET6;
+ ip6->sin6_addr = hdr->src_addr.ip6;
+ ip6->sin6_port = hdr->port;
+ }
}
-static int cma_save_net_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
- struct ib_cm_event *ib_event)
+static u16 cma_port_from_service_id(__be64 service_id)
{
- struct cma_hdr *hdr;
+ return (u16)be64_to_cpu(service_id);
+}
- if (listen_id->route.addr.src_addr.ss_family == AF_IB) {
- if (ib_event->event == IB_CM_REQ_RECEIVED)
- cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path);
- else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
- cma_save_ib_info(id, listen_id, NULL);
- return 0;
- }
+static int cma_save_ip_info(struct sockaddr *src_addr,
+ struct sockaddr *dst_addr,
+ struct ib_cm_event *ib_event,
+ __be64 service_id)
+{
+ struct cma_hdr *hdr;
+ __be16 port;
hdr = ib_event->private_data;
if (hdr->cma_version != CMA_VERSION)
return -EINVAL;
+ port = htons(cma_port_from_service_id(service_id));
+
switch (cma_get_ip_ver(hdr)) {
case 4:
- cma_save_ip4_info(id, listen_id, hdr);
+ cma_save_ip4_info(src_addr, dst_addr, hdr, port);
break;
case 6:
- cma_save_ip6_info(id, listen_id, hdr);
+ cma_save_ip6_info(src_addr, dst_addr, hdr, port);
+ break;
+ default:
+ return -EAFNOSUPPORT;
+ }
+
+ return 0;
+}
+
+static int cma_save_net_info(struct sockaddr *src_addr,
+ struct sockaddr *dst_addr,
+ struct rdma_cm_id *listen_id,
+ struct ib_cm_event *ib_event,
+ sa_family_t sa_family, __be64 service_id)
+{
+ if (sa_family == AF_IB) {
+ if (ib_event->event == IB_CM_REQ_RECEIVED)
+ cma_save_ib_info(src_addr, dst_addr, listen_id,
+ ib_event->param.req_rcvd.primary_path);
+ else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
+ cma_save_ib_info(src_addr, dst_addr, listen_id, NULL);
+ return 0;
+ }
+
+ return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id);
+}
+
+static int cma_save_req_info(const struct ib_cm_event *ib_event,
+ struct cma_req_info *req)
+{
+ const struct ib_cm_req_event_param *req_param =
+ &ib_event->param.req_rcvd;
+ const struct ib_cm_sidr_req_event_param *sidr_param =
+ &ib_event->param.sidr_req_rcvd;
+
+ switch (ib_event->event) {
+ case IB_CM_REQ_RECEIVED:
+ req->device = req_param->listen_id->device;
+ req->port = req_param->port;
+ memcpy(&req->local_gid, &req_param->primary_path->sgid,
+ sizeof(req->local_gid));
+ req->has_gid = true;
+ req->service_id = req_param->primary_path->service_id;
+ req->pkey = req_param->bth_pkey;
+ break;
+ case IB_CM_SIDR_REQ_RECEIVED:
+ req->device = sidr_param->listen_id->device;
+ req->port = sidr_param->port;
+ req->has_gid = false;
+ req->service_id = sidr_param->service_id;
+ req->pkey = sidr_param->bth_pkey;
break;
default:
return -EINVAL;
}
+
return 0;
}
+static bool validate_ipv4_net_dev(struct net_device *net_dev,
+ const struct sockaddr_in *dst_addr,
+ const struct sockaddr_in *src_addr)
+{
+ __be32 daddr = dst_addr->sin_addr.s_addr,
+ saddr = src_addr->sin_addr.s_addr;
+ struct fib_result res;
+ struct flowi4 fl4;
+ int err;
+ bool ret;
+
+ if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
+ ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) ||
+ ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) ||
+ ipv4_is_loopback(saddr))
+ return false;
+
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.flowi4_iif = net_dev->ifindex;
+ fl4.daddr = daddr;
+ fl4.saddr = saddr;
+
+ rcu_read_lock();
+ err = fib_lookup(dev_net(net_dev), &fl4, &res, 0);
+ if (err)
+ return false;
+
+ ret = FIB_RES_DEV(res) == net_dev;
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static bool validate_ipv6_net_dev(struct net_device *net_dev,
+ const struct sockaddr_in6 *dst_addr,
+ const struct sockaddr_in6 *src_addr)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ const int strict = ipv6_addr_type(&dst_addr->sin6_addr) &
+ IPV6_ADDR_LINKLOCAL;
+ struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr,
+ &src_addr->sin6_addr, net_dev->ifindex,
+ strict);
+ bool ret;
+
+ if (!rt)
+ return false;
+
+ ret = rt->rt6i_idev->dev == net_dev;
+ ip6_rt_put(rt);
+
+ return ret;
+#else
+ return false;
+#endif
+}
+
+static bool validate_net_dev(struct net_device *net_dev,
+ const struct sockaddr *daddr,
+ const struct sockaddr *saddr)
+{
+ const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr;
+ const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr;
+ const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr;
+ const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr;
+
+ switch (daddr->sa_family) {
+ case AF_INET:
+ return saddr->sa_family == AF_INET &&
+ validate_ipv4_net_dev(net_dev, daddr4, saddr4);
+
+ case AF_INET6:
+ return saddr->sa_family == AF_INET6 &&
+ validate_ipv6_net_dev(net_dev, daddr6, saddr6);
+
+ default:
+ return false;
+ }
+}
+
+static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
+ const struct cma_req_info *req)
+{
+ struct sockaddr_storage listen_addr_storage, src_addr_storage;
+ struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage,
+ *src_addr = (struct sockaddr *)&src_addr_storage;
+ struct net_device *net_dev;
+ const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL;
+ int err;
+
+ err = cma_save_ip_info(listen_addr, src_addr, ib_event,
+ req->service_id);
+ if (err)
+ return ERR_PTR(err);
+
+ net_dev = ib_get_net_dev_by_params(req->device, req->port, req->pkey,
+ gid, listen_addr);
+ if (!net_dev)
+ return ERR_PTR(-ENODEV);
+
+ if (!validate_net_dev(net_dev, listen_addr, src_addr)) {
+ dev_put(net_dev);
+ return ERR_PTR(-EHOSTUNREACH);
+ }
+
+ return net_dev;
+}
+
+static enum rdma_port_space rdma_ps_from_service_id(__be64 service_id)
+{
+ return (be64_to_cpu(service_id) >> 16) & 0xffff;
+}
+
+static bool cma_match_private_data(struct rdma_id_private *id_priv,
+ const struct cma_hdr *hdr)
+{
+ struct sockaddr *addr = cma_src_addr(id_priv);
+ __be32 ip4_addr;
+ struct in6_addr ip6_addr;
+
+ if (cma_any_addr(addr) && !id_priv->afonly)
+ return true;
+
+ switch (addr->sa_family) {
+ case AF_INET:
+ ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
+ if (cma_get_ip_ver(hdr) != 4)
+ return false;
+ if (!cma_any_addr(addr) &&
+ hdr->dst_addr.ip4.addr != ip4_addr)
+ return false;
+ break;
+ case AF_INET6:
+ ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr;
+ if (cma_get_ip_ver(hdr) != 6)
+ return false;
+ if (!cma_any_addr(addr) &&
+ memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr)))
+ return false;
+ break;
+ case AF_IB:
+ return true;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static bool cma_match_net_dev(const struct rdma_id_private *id_priv,
+ const struct net_device *net_dev)
+{
+ const struct rdma_addr *addr = &id_priv->id.route.addr;
+
+ if (!net_dev)
+ /* This request is an AF_IB request */
+ return addr->src_addr.ss_family == AF_IB;
+
+ return !addr->dev_addr.bound_dev_if ||
+ (net_eq(dev_net(net_dev), &init_net) &&
+ addr->dev_addr.bound_dev_if == net_dev->ifindex);
+}
+
+static struct rdma_id_private *cma_find_listener(
+ const struct rdma_bind_list *bind_list,
+ const struct ib_cm_id *cm_id,
+ const struct ib_cm_event *ib_event,
+ const struct cma_req_info *req,
+ const struct net_device *net_dev)
+{
+ struct rdma_id_private *id_priv, *id_priv_dev;
+
+ if (!bind_list)
+ return ERR_PTR(-EINVAL);
+
+ hlist_for_each_entry(id_priv, &bind_list->owners, node) {
+ if (cma_match_private_data(id_priv, ib_event->private_data)) {
+ if (id_priv->id.device == cm_id->device &&
+ cma_match_net_dev(id_priv, net_dev))
+ return id_priv;
+ list_for_each_entry(id_priv_dev,
+ &id_priv->listen_list,
+ listen_list) {
+ if (id_priv_dev->id.device == cm_id->device &&
+ cma_match_net_dev(id_priv_dev, net_dev))
+ return id_priv_dev;
+ }
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
+ struct ib_cm_event *ib_event,
+ struct net_device **net_dev)
+{
+ struct cma_req_info req;
+ struct rdma_bind_list *bind_list;
+ struct rdma_id_private *id_priv;
+ int err;
+
+ err = cma_save_req_info(ib_event, &req);
+ if (err)
+ return ERR_PTR(err);
+
+ *net_dev = cma_get_net_dev(ib_event, &req);
+ if (IS_ERR(*net_dev)) {
+ if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) {
+ /* Assuming the protocol is AF_IB */
+ *net_dev = NULL;
+ } else {
+ return ERR_CAST(*net_dev);
+ }
+ }
+
+ bind_list = cma_ps_find(rdma_ps_from_service_id(req.service_id),
+ cma_port_from_service_id(req.service_id));
+ id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
+ if (IS_ERR(id_priv)) {
+ dev_put(*net_dev);
+ *net_dev = NULL;
+ }
+
+ return id_priv;
+}
+
static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
{
return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
@@ -1038,7 +1377,7 @@ static void cma_release_port(struct rdma_id_private *id_priv)
mutex_lock(&lock);
hlist_del(&id_priv->node);
if (hlist_empty(&bind_list->owners)) {
- idr_remove(bind_list->ps, bind_list->port);
+ cma_ps_remove(bind_list->ps, bind_list->port);
kfree(bind_list);
}
mutex_unlock(&lock);
@@ -1216,11 +1555,15 @@ out:
}
static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
- struct ib_cm_event *ib_event)
+ struct ib_cm_event *ib_event,
+ struct net_device *net_dev)
{
struct rdma_id_private *id_priv;
struct rdma_cm_id *id;
struct rdma_route *rt;
+ const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
+ const __be64 service_id =
+ ib_event->param.req_rcvd.primary_path->service_id;
int ret;
id = rdma_create_id(listen_id->event_handler, listen_id->context,
@@ -1229,7 +1572,9 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
return NULL;
id_priv = container_of(id, struct rdma_id_private, id);
- if (cma_save_net_info(id, listen_id, ib_event))
+ if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
+ (struct sockaddr *)&id->route.addr.dst_addr,
+ listen_id, ib_event, ss_family, service_id))
goto err;
rt = &id->route;
@@ -1243,14 +1588,16 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
if (rt->num_paths == 2)
rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
- if (cma_any_addr(cma_src_addr(id_priv))) {
- rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
- rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
- ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
- } else {
- ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
+ if (net_dev) {
+ ret = rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL);
if (ret)
goto err;
+ } else {
+ /* An AF_IB connection */
+ WARN_ON_ONCE(ss_family != AF_IB);
+
+ cma_translate_ib((struct sockaddr_ib *)cma_src_addr(id_priv),
+ &rt->addr.dev_addr);
}
rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
@@ -1263,10 +1610,12 @@ err:
}
static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
- struct ib_cm_event *ib_event)
+ struct ib_cm_event *ib_event,
+ struct net_device *net_dev)
{
struct rdma_id_private *id_priv;
struct rdma_cm_id *id;
+ const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
int ret;
id = rdma_create_id(listen_id->event_handler, listen_id->context,
@@ -1275,13 +1624,24 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
return NULL;
id_priv = container_of(id, struct rdma_id_private, id);
- if (cma_save_net_info(id, listen_id, ib_event))
+ if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
+ (struct sockaddr *)&id->route.addr.dst_addr,
+ listen_id, ib_event, ss_family,
+ ib_event->param.sidr_req_rcvd.service_id))
goto err;
- if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) {
- ret = cma_translate_addr(cma_src_addr(id_priv), &id->route.addr.dev_addr);
+ if (net_dev) {
+ ret = rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL);
if (ret)
goto err;
+ } else {
+ /* An AF_IB connection */
+ WARN_ON_ONCE(ss_family != AF_IB);
+
+ if (!cma_any_addr(cma_src_addr(id_priv)))
+ cma_translate_ib((struct sockaddr_ib *)
+ cma_src_addr(id_priv),
+ &id->route.addr.dev_addr);
}
id_priv->state = RDMA_CM_CONNECT;
@@ -1319,25 +1679,33 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
{
struct rdma_id_private *listen_id, *conn_id;
struct rdma_cm_event event;
+ struct net_device *net_dev;
int offset, ret;
- listen_id = cm_id->context;
- if (!cma_check_req_qp_type(&listen_id->id, ib_event))
- return -EINVAL;
+ listen_id = cma_id_from_event(cm_id, ib_event, &net_dev);
+ if (IS_ERR(listen_id))
+ return PTR_ERR(listen_id);
- if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
- return -ECONNABORTED;
+ if (!cma_check_req_qp_type(&listen_id->id, ib_event)) {
+ ret = -EINVAL;
+ goto net_dev_put;
+ }
+
+ if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) {
+ ret = -ECONNABORTED;
+ goto net_dev_put;
+ }
memset(&event, 0, sizeof event);
offset = cma_user_data_offset(listen_id);
event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
- conn_id = cma_new_udp_id(&listen_id->id, ib_event);
+ conn_id = cma_new_udp_id(&listen_id->id, ib_event, net_dev);
event.param.ud.private_data = ib_event->private_data + offset;
event.param.ud.private_data_len =
IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
} else {
- conn_id = cma_new_conn_id(&listen_id->id, ib_event);
+ conn_id = cma_new_conn_id(&listen_id->id, ib_event, net_dev);
cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
ib_event->private_data, offset);
}
@@ -1375,6 +1743,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
mutex_unlock(&conn_id->handler_mutex);
mutex_unlock(&listen_id->handler_mutex);
cma_deref_id(conn_id);
+ if (net_dev)
+ dev_put(net_dev);
return 0;
err3:
@@ -1388,6 +1758,11 @@ err1:
mutex_unlock(&listen_id->handler_mutex);
if (conn_id)
rdma_destroy_id(&conn_id->id);
+
+net_dev_put:
+ if (net_dev)
+ dev_put(net_dev);
+
return ret;
}
@@ -1400,42 +1775,6 @@ __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
}
EXPORT_SYMBOL(rdma_get_service_id);
-static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
- struct ib_cm_compare_data *compare)
-{
- struct cma_hdr *cma_data, *cma_mask;
- __be32 ip4_addr;
- struct in6_addr ip6_addr;
-
- memset(compare, 0, sizeof *compare);
- cma_data = (void *) compare->data;
- cma_mask = (void *) compare->mask;
-
- switch (addr->sa_family) {
- case AF_INET:
- ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr;
- cma_set_ip_ver(cma_data, 4);
- cma_set_ip_ver(cma_mask, 0xF);
- if (!cma_any_addr(addr)) {
- cma_data->dst_addr.ip4.addr = ip4_addr;
- cma_mask->dst_addr.ip4.addr = htonl(~0);
- }
- break;
- case AF_INET6:
- ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr;
- cma_set_ip_ver(cma_data, 6);
- cma_set_ip_ver(cma_mask, 0xF);
- if (!cma_any_addr(addr)) {
- cma_data->dst_addr.ip6 = ip6_addr;
- memset(&cma_mask->dst_addr.ip6, 0xFF,
- sizeof cma_mask->dst_addr.ip6);
- }
- break;
- default:
- break;
- }
-}
-
static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
{
struct rdma_id_private *id_priv = iw_id->context;
@@ -1589,33 +1928,18 @@ out:
static int cma_ib_listen(struct rdma_id_private *id_priv)
{
- struct ib_cm_compare_data compare_data;
struct sockaddr *addr;
struct ib_cm_id *id;
__be64 svc_id;
- int ret;
- id = ib_create_cm_id(id_priv->id.device, cma_req_handler, id_priv);
+ addr = cma_src_addr(id_priv);
+ svc_id = rdma_get_service_id(&id_priv->id, addr);
+ id = ib_cm_insert_listen(id_priv->id.device, cma_req_handler, svc_id);
if (IS_ERR(id))
return PTR_ERR(id);
-
id_priv->cm_id.ib = id;
- addr = cma_src_addr(id_priv);
- svc_id = rdma_get_service_id(&id_priv->id, addr);
- if (cma_any_addr(addr) && !id_priv->afonly)
- ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
- else {
- cma_set_compare_data(id_priv->id.ps, addr, &compare_data);
- ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data);
- }
-
- if (ret) {
- ib_destroy_cm_id(id_priv->cm_id.ib);
- id_priv->cm_id.ib = NULL;
- }
-
- return ret;
+ return 0;
}
static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
@@ -2203,8 +2527,11 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
src_addr = (struct sockaddr *) &id->route.addr.src_addr;
src_addr->sa_family = dst_addr->sa_family;
if (dst_addr->sa_family == AF_INET6) {
- ((struct sockaddr_in6 *) src_addr)->sin6_scope_id =
- ((struct sockaddr_in6 *) dst_addr)->sin6_scope_id;
+ struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
+ struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
+ src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
+ if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id;
} else if (dst_addr->sa_family == AF_IB) {
((struct sockaddr_ib *) src_addr)->sib_pkey =
((struct sockaddr_ib *) dst_addr)->sib_pkey;
@@ -2325,8 +2652,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list,
hlist_add_head(&id_priv->node, &bind_list->owners);
}
-static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
- unsigned short snum)
+static int cma_alloc_port(enum rdma_port_space ps,
+ struct rdma_id_private *id_priv, unsigned short snum)
{
struct rdma_bind_list *bind_list;
int ret;
@@ -2335,7 +2662,7 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
if (!bind_list)
return -ENOMEM;
- ret = idr_alloc(ps, bind_list, snum, snum + 1, GFP_KERNEL);
+ ret = cma_ps_alloc(ps, bind_list, snum);
if (ret < 0)
goto err;
@@ -2348,7 +2675,8 @@ err:
return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
}
-static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
+static int cma_alloc_any_port(enum rdma_port_space ps,
+ struct rdma_id_private *id_priv)
{
static unsigned int last_used_port;
int low, high, remaining;
@@ -2359,7 +2687,7 @@ static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
rover = prandom_u32() % remaining + low;
retry:
if (last_used_port != rover &&
- !idr_find(ps, (unsigned short) rover)) {
+ !cma_ps_find(ps, (unsigned short)rover)) {
int ret = cma_alloc_port(ps, id_priv, rover);
/*
* Remember previously used port number in order to avoid
@@ -2414,7 +2742,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
return 0;
}
-static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
+static int cma_use_port(enum rdma_port_space ps,
+ struct rdma_id_private *id_priv)
{
struct rdma_bind_list *bind_list;
unsigned short snum;
@@ -2424,7 +2753,7 @@ static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
return -EACCES;
- bind_list = idr_find(ps, snum);
+ bind_list = cma_ps_find(ps, snum);
if (!bind_list) {
ret = cma_alloc_port(ps, id_priv, snum);
} else {
@@ -2447,25 +2776,24 @@ static int cma_bind_listen(struct rdma_id_private *id_priv)
return ret;
}
-static struct idr *cma_select_inet_ps(struct rdma_id_private *id_priv)
+static enum rdma_port_space cma_select_inet_ps(
+ struct rdma_id_private *id_priv)
{
switch (id_priv->id.ps) {
case RDMA_PS_TCP:
- return &tcp_ps;
case RDMA_PS_UDP:
- return &udp_ps;
case RDMA_PS_IPOIB:
- return &ipoib_ps;
case RDMA_PS_IB:
- return &ib_ps;
+ return id_priv->id.ps;
default:
- return NULL;
+
+ return 0;
}
}
-static struct idr *cma_select_ib_ps(struct rdma_id_private *id_priv)
+static enum rdma_port_space cma_select_ib_ps(struct rdma_id_private *id_priv)
{
- struct idr *ps = NULL;
+ enum rdma_port_space ps = 0;
struct sockaddr_ib *sib;
u64 sid_ps, mask, sid;
@@ -2475,15 +2803,15 @@ static struct idr *cma_select_ib_ps(struct rdma_id_private *id_priv)
if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) {
sid_ps = RDMA_IB_IP_PS_IB;
- ps = &ib_ps;
+ ps = RDMA_PS_IB;
} else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) &&
(sid == (RDMA_IB_IP_PS_TCP & mask))) {
sid_ps = RDMA_IB_IP_PS_TCP;
- ps = &tcp_ps;
+ ps = RDMA_PS_TCP;
} else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) &&
(sid == (RDMA_IB_IP_PS_UDP & mask))) {
sid_ps = RDMA_IB_IP_PS_UDP;
- ps = &udp_ps;
+ ps = RDMA_PS_UDP;
}
if (ps) {
@@ -2496,7 +2824,7 @@ static struct idr *cma_select_ib_ps(struct rdma_id_private *id_priv)
static int cma_get_port(struct rdma_id_private *id_priv)
{
- struct idr *ps;
+ enum rdma_port_space ps;
int ret;
if (cma_family(id_priv) != AF_IB)
@@ -3551,11 +3879,10 @@ static void cma_process_remove(struct cma_device *cma_dev)
wait_for_completion(&cma_dev->comp);
}
-static void cma_remove_one(struct ib_device *device)
+static void cma_remove_one(struct ib_device *device, void *client_data)
{
- struct cma_device *cma_dev;
+ struct cma_device *cma_dev = client_data;
- cma_dev = ib_get_client_data(device, &cma_client);
if (!cma_dev)
return;
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 87d1936f5c1c..70bb36ebb03b 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -43,12 +43,58 @@ int ib_device_register_sysfs(struct ib_device *device,
u8, struct kobject *));
void ib_device_unregister_sysfs(struct ib_device *device);
-int ib_sysfs_setup(void);
-void ib_sysfs_cleanup(void);
-
-int ib_cache_setup(void);
+void ib_cache_setup(void);
void ib_cache_cleanup(void);
int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
struct ib_qp_attr *qp_attr, int *qp_attr_mask);
+
+typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
+ struct net_device *idev, void *cookie);
+
+typedef int (*roce_netdev_filter)(struct ib_device *device, u8 port,
+ struct net_device *idev, void *cookie);
+
+void ib_enum_roce_netdev(struct ib_device *ib_dev,
+ roce_netdev_filter filter,
+ void *filter_cookie,
+ roce_netdev_callback cb,
+ void *cookie);
+void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
+ void *filter_cookie,
+ roce_netdev_callback cb,
+ void *cookie);
+
+int ib_cache_gid_find_by_port(struct ib_device *ib_dev,
+ const union ib_gid *gid,
+ u8 port, struct net_device *ndev,
+ u16 *index);
+
+enum ib_cache_gid_default_mode {
+ IB_CACHE_GID_DEFAULT_MODE_SET,
+ IB_CACHE_GID_DEFAULT_MODE_DELETE
+};
+
+void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
+ struct net_device *ndev,
+ enum ib_cache_gid_default_mode mode);
+
+int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
+ union ib_gid *gid, struct ib_gid_attr *attr);
+
+int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
+ union ib_gid *gid, struct ib_gid_attr *attr);
+
+int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
+ struct net_device *ndev);
+
+int roce_gid_mgmt_init(void);
+void roce_gid_mgmt_cleanup(void);
+
+int roce_rescan_device(struct ib_device *ib_dev);
+
+int ib_cache_setup_one(struct ib_device *device);
+void ib_cache_cleanup_one(struct ib_device *device);
+void ib_cache_release_one(struct ib_device *device);
+
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 9567756ca4f9..17639117afc6 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -38,7 +38,10 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/mutex.h>
+#include <linux/netdevice.h>
#include <rdma/rdma_netlink.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
#include "core_priv.h"
@@ -50,22 +53,34 @@ struct ib_client_data {
struct list_head list;
struct ib_client *client;
void * data;
+ /* The device or client is going down. Do not call client or device
+ * callbacks other than remove(). */
+ bool going_down;
};
struct workqueue_struct *ib_wq;
EXPORT_SYMBOL_GPL(ib_wq);
+/* The device_list and client_list contain devices and clients after their
+ * registration has completed, and the devices and clients are removed
+ * during unregistration. */
static LIST_HEAD(device_list);
static LIST_HEAD(client_list);
/*
- * device_mutex protects access to both device_list and client_list.
- * There's no real point to using multiple locks or something fancier
- * like an rwsem: we always access both lists, and we're always
- * modifying one list or the other list. In any case this is not a
- * hot path so there's no point in trying to optimize.
+ * device_mutex and lists_rwsem protect access to both device_list and
+ * client_list. device_mutex protects writer access by device and client
+ * registration / de-registration. lists_rwsem protects reader access to
+ * these lists. Iterators of these lists must lock it for read, while updates
+ * to the lists must be done with a write lock. A special case is when the
+ * device_mutex is locked. In this case locking the lists for read access is
+ * not necessary as the device_mutex implies it.
+ *
+ * lists_rwsem also protects access to the client data list.
*/
static DEFINE_MUTEX(device_mutex);
+static DECLARE_RWSEM(lists_rwsem);
+
static int ib_device_check_mandatory(struct ib_device *device)
{
@@ -152,6 +167,36 @@ static int alloc_name(char *name)
return 0;
}
+static void ib_device_release(struct device *device)
+{
+ struct ib_device *dev = container_of(device, struct ib_device, dev);
+
+ ib_cache_release_one(dev);
+ kfree(dev->port_immutable);
+ kfree(dev);
+}
+
+static int ib_device_uevent(struct device *device,
+ struct kobj_uevent_env *env)
+{
+ struct ib_device *dev = container_of(device, struct ib_device, dev);
+
+ if (add_uevent_var(env, "NAME=%s", dev->name))
+ return -ENOMEM;
+
+ /*
+ * It would be nice to pass the node GUID with the event...
+ */
+
+ return 0;
+}
+
+static struct class ib_class = {
+ .name = "infiniband",
+ .dev_release = ib_device_release,
+ .dev_uevent = ib_device_uevent,
+};
+
/**
* ib_alloc_device - allocate an IB device struct
* @size:size of structure to allocate
@@ -164,9 +209,27 @@ static int alloc_name(char *name)
*/
struct ib_device *ib_alloc_device(size_t size)
{
- BUG_ON(size < sizeof (struct ib_device));
+ struct ib_device *device;
+
+ if (WARN_ON(size < sizeof(struct ib_device)))
+ return NULL;
+
+ device = kzalloc(size, GFP_KERNEL);
+ if (!device)
+ return NULL;
+
+ device->dev.class = &ib_class;
+ device_initialize(&device->dev);
+
+ dev_set_drvdata(&device->dev, device);
+
+ INIT_LIST_HEAD(&device->event_handler_list);
+ spin_lock_init(&device->event_handler_lock);
+ spin_lock_init(&device->client_data_lock);
+ INIT_LIST_HEAD(&device->client_data_list);
+ INIT_LIST_HEAD(&device->port_list);
- return kzalloc(size, GFP_KERNEL);
+ return device;
}
EXPORT_SYMBOL(ib_alloc_device);
@@ -178,13 +241,8 @@ EXPORT_SYMBOL(ib_alloc_device);
*/
void ib_dealloc_device(struct ib_device *device)
{
- if (device->reg_state == IB_DEV_UNINITIALIZED) {
- kfree(device);
- return;
- }
-
- BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
-
+ WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
+ device->reg_state != IB_DEV_UNINITIALIZED);
kobject_put(&device->dev.kobj);
}
EXPORT_SYMBOL(ib_dealloc_device);
@@ -203,10 +261,13 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
context->client = client;
context->data = NULL;
+ context->going_down = false;
+ down_write(&lists_rwsem);
spin_lock_irqsave(&device->client_data_lock, flags);
list_add(&context->list, &device->client_data_list);
spin_unlock_irqrestore(&device->client_data_lock, flags);
+ up_write(&lists_rwsem);
return 0;
}
@@ -219,7 +280,7 @@ static int verify_immutable(const struct ib_device *dev, u8 port)
static int read_port_immutable(struct ib_device *device)
{
- int ret = -ENOMEM;
+ int ret;
u8 start_port = rdma_start_port(device);
u8 end_port = rdma_end_port(device);
u8 port;
@@ -235,26 +296,18 @@ static int read_port_immutable(struct ib_device *device)
* (end_port + 1),
GFP_KERNEL);
if (!device->port_immutable)
- goto err;
+ return -ENOMEM;
for (port = start_port; port <= end_port; ++port) {
ret = device->get_port_immutable(device, port,
&device->port_immutable[port]);
if (ret)
- goto err;
+ return ret;
- if (verify_immutable(device, port)) {
- ret = -EINVAL;
- goto err;
- }
+ if (verify_immutable(device, port))
+ return -EINVAL;
}
-
- ret = 0;
- goto out;
-err:
- kfree(device->port_immutable);
-out:
- return ret;
+ return 0;
}
/**
@@ -271,6 +324,7 @@ int ib_register_device(struct ib_device *device,
u8, struct kobject *))
{
int ret;
+ struct ib_client *client;
mutex_lock(&device_mutex);
@@ -285,11 +339,6 @@ int ib_register_device(struct ib_device *device,
goto out;
}
- INIT_LIST_HEAD(&device->event_handler_list);
- INIT_LIST_HEAD(&device->client_data_list);
- spin_lock_init(&device->event_handler_lock);
- spin_lock_init(&device->client_data_lock);
-
ret = read_port_immutable(device);
if (ret) {
printk(KERN_WARNING "Couldn't create per port immutable data %s\n",
@@ -297,27 +346,30 @@ int ib_register_device(struct ib_device *device,
goto out;
}
+ ret = ib_cache_setup_one(device);
+ if (ret) {
+ printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
+ goto out;
+ }
+
ret = ib_device_register_sysfs(device, port_callback);
if (ret) {
printk(KERN_WARNING "Couldn't register device %s with driver model\n",
device->name);
- kfree(device->port_immutable);
+ ib_cache_cleanup_one(device);
goto out;
}
- list_add_tail(&device->core_list, &device_list);
-
device->reg_state = IB_DEV_REGISTERED;
- {
- struct ib_client *client;
-
- list_for_each_entry(client, &client_list, list)
- if (client->add && !add_client_context(device, client))
- client->add(device);
- }
+ list_for_each_entry(client, &client_list, list)
+ if (client->add && !add_client_context(device, client))
+ client->add(device);
- out:
+ down_write(&lists_rwsem);
+ list_add_tail(&device->core_list, &device_list);
+ up_write(&lists_rwsem);
+out:
mutex_unlock(&device_mutex);
return ret;
}
@@ -331,26 +383,37 @@ EXPORT_SYMBOL(ib_register_device);
*/
void ib_unregister_device(struct ib_device *device)
{
- struct ib_client *client;
struct ib_client_data *context, *tmp;
unsigned long flags;
mutex_lock(&device_mutex);
- list_for_each_entry_reverse(client, &client_list, list)
- if (client->remove)
- client->remove(device);
-
+ down_write(&lists_rwsem);
list_del(&device->core_list);
+ spin_lock_irqsave(&device->client_data_lock, flags);
+ list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ context->going_down = true;
+ spin_unlock_irqrestore(&device->client_data_lock, flags);
+ downgrade_write(&lists_rwsem);
+
+ list_for_each_entry_safe(context, tmp, &device->client_data_list,
+ list) {
+ if (context->client->remove)
+ context->client->remove(device, context->data);
+ }
+ up_read(&lists_rwsem);
mutex_unlock(&device_mutex);
ib_device_unregister_sysfs(device);
+ ib_cache_cleanup_one(device);
+ down_write(&lists_rwsem);
spin_lock_irqsave(&device->client_data_lock, flags);
list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
kfree(context);
spin_unlock_irqrestore(&device->client_data_lock, flags);
+ up_write(&lists_rwsem);
device->reg_state = IB_DEV_UNREGISTERED;
}
@@ -375,11 +438,14 @@ int ib_register_client(struct ib_client *client)
mutex_lock(&device_mutex);
- list_add_tail(&client->list, &client_list);
list_for_each_entry(device, &device_list, core_list)
if (client->add && !add_client_context(device, client))
client->add(device);
+ down_write(&lists_rwsem);
+ list_add_tail(&client->list, &client_list);
+ up_write(&lists_rwsem);
+
mutex_unlock(&device_mutex);
return 0;
@@ -402,19 +468,41 @@ void ib_unregister_client(struct ib_client *client)
mutex_lock(&device_mutex);
+ down_write(&lists_rwsem);
+ list_del(&client->list);
+ up_write(&lists_rwsem);
+
list_for_each_entry(device, &device_list, core_list) {
- if (client->remove)
- client->remove(device);
+ struct ib_client_data *found_context = NULL;
+ down_write(&lists_rwsem);
spin_lock_irqsave(&device->client_data_lock, flags);
list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
if (context->client == client) {
- list_del(&context->list);
- kfree(context);
+ context->going_down = true;
+ found_context = context;
+ break;
}
spin_unlock_irqrestore(&device->client_data_lock, flags);
+ up_write(&lists_rwsem);
+
+ if (client->remove)
+ client->remove(device, found_context ?
+ found_context->data : NULL);
+
+ if (!found_context) {
+ pr_warn("No client context found for %s/%s\n",
+ device->name, client->name);
+ continue;
+ }
+
+ down_write(&lists_rwsem);
+ spin_lock_irqsave(&device->client_data_lock, flags);
+ list_del(&found_context->list);
+ kfree(found_context);
+ spin_unlock_irqrestore(&device->client_data_lock, flags);
+ up_write(&lists_rwsem);
}
- list_del(&client->list);
mutex_unlock(&device_mutex);
}
@@ -590,11 +678,80 @@ EXPORT_SYMBOL(ib_query_port);
int ib_query_gid(struct ib_device *device,
u8 port_num, int index, union ib_gid *gid)
{
+ if (rdma_cap_roce_gid_table(device, port_num))
+ return ib_get_cached_gid(device, port_num, index, gid);
+
return device->query_gid(device, port_num, index, gid);
}
EXPORT_SYMBOL(ib_query_gid);
/**
+ * ib_enum_roce_netdev - enumerate all RoCE ports
+ * @ib_dev : IB device we want to query
+ * @filter: Should we call the callback?
+ * @filter_cookie: Cookie passed to filter
+ * @cb: Callback to call for each found RoCE ports
+ * @cookie: Cookie passed back to the callback
+ *
+ * Enumerates all of the physical RoCE ports of ib_dev
+ * which are related to netdevice and calls callback() on each
+ * device for which filter() function returns non zero.
+ */
+void ib_enum_roce_netdev(struct ib_device *ib_dev,
+ roce_netdev_filter filter,
+ void *filter_cookie,
+ roce_netdev_callback cb,
+ void *cookie)
+{
+ u8 port;
+
+ for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev);
+ port++)
+ if (rdma_protocol_roce(ib_dev, port)) {
+ struct net_device *idev = NULL;
+
+ if (ib_dev->get_netdev)
+ idev = ib_dev->get_netdev(ib_dev, port);
+
+ if (idev &&
+ idev->reg_state >= NETREG_UNREGISTERED) {
+ dev_put(idev);
+ idev = NULL;
+ }
+
+ if (filter(ib_dev, port, idev, filter_cookie))
+ cb(ib_dev, port, idev, cookie);
+
+ if (idev)
+ dev_put(idev);
+ }
+}
+
+/**
+ * ib_enum_all_roce_netdevs - enumerate all RoCE devices
+ * @filter: Should we call the callback?
+ * @filter_cookie: Cookie passed to filter
+ * @cb: Callback to call for each found RoCE ports
+ * @cookie: Cookie passed back to the callback
+ *
+ * Enumerates all RoCE devices' physical ports which are related
+ * to netdevices and calls callback() on each device for which
+ * filter() function returns non zero.
+ */
+void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
+ void *filter_cookie,
+ roce_netdev_callback cb,
+ void *cookie)
+{
+ struct ib_device *dev;
+
+ down_read(&lists_rwsem);
+ list_for_each_entry(dev, &device_list, core_list)
+ ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
+ up_read(&lists_rwsem);
+}
+
+/**
* ib_query_pkey - Get P_Key table entry
* @device:Device to query
* @port_num:Port number to query
@@ -673,6 +830,14 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
int ret, port, i;
for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
+ if (rdma_cap_roce_gid_table(device, port)) {
+ if (!ib_cache_gid_find_by_port(device, gid, port,
+ NULL, index)) {
+ *port_num = port;
+ return 0;
+ }
+ }
+
for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
ret = ib_query_gid(device, port, i, &tmp_gid);
if (ret)
@@ -729,6 +894,51 @@ int ib_find_pkey(struct ib_device *device,
}
EXPORT_SYMBOL(ib_find_pkey);
+/**
+ * ib_get_net_dev_by_params() - Return the appropriate net_dev
+ * for a received CM request
+ * @dev: An RDMA device on which the request has been received.
+ * @port: Port number on the RDMA device.
+ * @pkey: The Pkey the request came on.
+ * @gid: A GID that the net_dev uses to communicate.
+ * @addr: Contains the IP address that the request specified as its
+ * destination.
+ */
+struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
+ u8 port,
+ u16 pkey,
+ const union ib_gid *gid,
+ const struct sockaddr *addr)
+{
+ struct net_device *net_dev = NULL;
+ struct ib_client_data *context;
+
+ if (!rdma_protocol_ib(dev, port))
+ return NULL;
+
+ down_read(&lists_rwsem);
+
+ list_for_each_entry(context, &dev->client_data_list, list) {
+ struct ib_client *client = context->client;
+
+ if (context->going_down)
+ continue;
+
+ if (client->get_net_dev_by_params) {
+ net_dev = client->get_net_dev_by_params(dev, port, pkey,
+ gid, addr,
+ context->data);
+ if (net_dev)
+ break;
+ }
+ }
+
+ up_read(&lists_rwsem);
+
+ return net_dev;
+}
+EXPORT_SYMBOL(ib_get_net_dev_by_params);
+
static int __init ib_core_init(void)
{
int ret;
@@ -737,7 +947,7 @@ static int __init ib_core_init(void)
if (!ib_wq)
return -ENOMEM;
- ret = ib_sysfs_setup();
+ ret = class_register(&ib_class);
if (ret) {
printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
goto err;
@@ -749,19 +959,12 @@ static int __init ib_core_init(void)
goto err_sysfs;
}
- ret = ib_cache_setup();
- if (ret) {
- printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
- goto err_nl;
- }
+ ib_cache_setup();
return 0;
-err_nl:
- ibnl_cleanup();
-
err_sysfs:
- ib_sysfs_cleanup();
+ class_unregister(&ib_class);
err:
destroy_workqueue(ib_wq);
@@ -772,7 +975,7 @@ static void __exit ib_core_cleanup(void)
{
ib_cache_cleanup();
ibnl_cleanup();
- ib_sysfs_cleanup();
+ class_unregister(&ib_class);
/* Make sure that any pending umem accounting work is done. */
destroy_workqueue(ib_wq);
}
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index e6ffa2e66c1a..22a3abee2a54 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -67,7 +67,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
err_str = "Invalid port mapper client";
goto pid_query_error;
}
- if (iwpm_registered_client(nl_client))
+ if (iwpm_check_registration(nl_client, IWPM_REG_VALID) ||
+ iwpm_user_pid == IWPM_PID_UNAVAILABLE)
return 0;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client);
if (!skb) {
@@ -106,7 +107,6 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */
- iwpm_set_registered(nl_client, 1);
iwpm_user_pid = IWPM_PID_UNAVAILABLE;
err_str = "Unable to send a nlmsg";
goto pid_query_error;
@@ -144,12 +144,12 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
err_str = "Invalid port mapper client";
goto add_mapping_error;
}
- if (!iwpm_registered_client(nl_client)) {
+ if (!iwpm_valid_pid())
+ return 0;
+ if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
err_str = "Unregistered port mapper client";
goto add_mapping_error;
}
- if (!iwpm_valid_pid())
- return 0;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client);
if (!skb) {
err_str = "Unable to create a nlmsg";
@@ -214,12 +214,12 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
err_str = "Invalid port mapper client";
goto query_mapping_error;
}
- if (!iwpm_registered_client(nl_client)) {
+ if (!iwpm_valid_pid())
+ return 0;
+ if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
err_str = "Unregistered port mapper client";
goto query_mapping_error;
}
- if (!iwpm_valid_pid())
- return 0;
ret = -ENOMEM;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client);
if (!skb) {
@@ -288,12 +288,12 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
err_str = "Invalid port mapper client";
goto remove_mapping_error;
}
- if (!iwpm_registered_client(nl_client)) {
+ if (!iwpm_valid_pid())
+ return 0;
+ if (iwpm_check_registration(nl_client, IWPM_REG_UNDEF)) {
err_str = "Unregistered port mapper client";
goto remove_mapping_error;
}
- if (!iwpm_valid_pid())
- return 0;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client);
if (!skb) {
ret = -ENOMEM;
@@ -388,7 +388,7 @@ int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)
pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
__func__, iwpm_user_pid);
if (iwpm_valid_client(nl_client))
- iwpm_set_registered(nl_client, 1);
+ iwpm_set_registration(nl_client, IWPM_REG_VALID);
register_pid_response_exit:
nlmsg_request->request_done = 1;
/* always for found nlmsg_request */
@@ -644,7 +644,6 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nlattr *nltb[IWPM_NLA_MAPINFO_REQ_MAX];
const char *msg_type = "Mapping Info response";
- int iwpm_pid;
u8 nl_client;
char *iwpm_name;
u16 iwpm_version;
@@ -669,14 +668,14 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
__func__, nl_client);
return ret;
}
- iwpm_set_registered(nl_client, 0);
+ iwpm_set_registration(nl_client, IWPM_REG_INCOMPL);
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
+ iwpm_user_pid = cb->nlh->nlmsg_pid;
if (!iwpm_mapinfo_available())
return 0;
- iwpm_pid = cb->nlh->nlmsg_pid;
pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
- __func__, iwpm_pid);
- ret = iwpm_send_mapinfo(nl_client, iwpm_pid);
+ __func__, iwpm_user_pid);
+ ret = iwpm_send_mapinfo(nl_client, iwpm_user_pid);
return ret;
}
EXPORT_SYMBOL(iwpm_mapping_info_cb);
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index a626795bf9c7..5fb089e91353 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -78,6 +78,7 @@ init_exit:
mutex_unlock(&iwpm_admin_lock);
if (!ret) {
iwpm_set_valid(nl_client, 1);
+ iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
pr_debug("%s: Mapinfo and reminfo tables are created\n",
__func__);
}
@@ -106,6 +107,7 @@ int iwpm_exit(u8 nl_client)
}
mutex_unlock(&iwpm_admin_lock);
iwpm_set_valid(nl_client, 0);
+ iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
return 0;
}
EXPORT_SYMBOL(iwpm_exit);
@@ -397,17 +399,23 @@ void iwpm_set_valid(u8 nl_client, int valid)
}
/* valid client */
-int iwpm_registered_client(u8 nl_client)
+u32 iwpm_get_registration(u8 nl_client)
{
return iwpm_admin.reg_list[nl_client];
}
/* valid client */
-void iwpm_set_registered(u8 nl_client, int reg)
+void iwpm_set_registration(u8 nl_client, u32 reg)
{
iwpm_admin.reg_list[nl_client] = reg;
}
+/* valid client */
+u32 iwpm_check_registration(u8 nl_client, u32 reg)
+{
+ return (iwpm_get_registration(nl_client) & reg);
+}
+
int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr,
struct sockaddr_storage *b_sockaddr)
{
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index ee2d9ff095be..b7b9e194ce81 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -58,6 +58,10 @@
#define IWPM_PID_UNDEFINED -1
#define IWPM_PID_UNAVAILABLE -2
+#define IWPM_REG_UNDEF 0x01
+#define IWPM_REG_VALID 0x02
+#define IWPM_REG_INCOMPL 0x04
+
struct iwpm_nlmsg_request {
struct list_head inprocess_list;
__u32 nlmsg_seq;
@@ -88,7 +92,7 @@ struct iwpm_admin_data {
atomic_t refcount;
atomic_t nlmsg_seq;
int client_list[RDMA_NL_NUM_CLIENTS];
- int reg_list[RDMA_NL_NUM_CLIENTS];
+ u32 reg_list[RDMA_NL_NUM_CLIENTS];
};
/**
@@ -159,19 +163,31 @@ int iwpm_valid_client(u8 nl_client);
void iwpm_set_valid(u8 nl_client, int valid);
/**
- * iwpm_registered_client - Check if the port mapper client is registered
+ * iwpm_check_registration - Check if the client registration
+ * matches the given one
* @nl_client: The index of the netlink client
+ * @reg: The given registration type to compare with
*
* Call iwpm_register_pid() to register a client
+ * Returns true if the client registration matches reg,
+ * otherwise returns false
+ */
+u32 iwpm_check_registration(u8 nl_client, u32 reg);
+
+/**
+ * iwpm_set_registration - Set the client registration
+ * @nl_client: The index of the netlink client
+ * @reg: Registration type to set
*/
-int iwpm_registered_client(u8 nl_client);
+void iwpm_set_registration(u8 nl_client, u32 reg);
/**
- * iwpm_set_registered - Set the port mapper client to registered or not
+ * iwpm_get_registration
* @nl_client: The index of the netlink client
- * @reg: 1 if registered or 0 if not
+ *
+ * Returns the client registration type
*/
-void iwpm_set_registered(u8 nl_client, int reg);
+u32 iwpm_get_registration(u8 nl_client);
/**
* iwpm_send_mapinfo - Send local and mapped IPv4/IPv6 address info of
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index a4b1466c1bf6..4b5c72311deb 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -338,13 +338,6 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
goto error1;
}
- mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(mad_agent_priv->agent.mr)) {
- ret = ERR_PTR(-ENOMEM);
- goto error2;
- }
-
if (mad_reg_req) {
reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
if (!reg_req) {
@@ -429,8 +422,6 @@ error4:
spin_unlock_irqrestore(&port_priv->reg_lock, flags);
kfree(reg_req);
error3:
- ib_dereg_mr(mad_agent_priv->agent.mr);
-error2:
kfree(mad_agent_priv);
error1:
return ret;
@@ -590,7 +581,6 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
wait_for_completion(&mad_agent_priv->comp);
kfree(mad_agent_priv->reg_req);
- ib_dereg_mr(mad_agent_priv->agent.mr);
kfree(mad_agent_priv);
}
@@ -769,7 +759,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
mad_agent_priv->qp_info->port_priv->port_num);
- if (device->node_type == RDMA_NODE_IB_SWITCH &&
+ if (rdma_cap_ib_switch(device) &&
smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
port_num = send_wr->wr.ud.port_num;
else
@@ -787,14 +777,15 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
if ((opa_get_smp_direction(opa_smp)
? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
OPA_LID_PERMISSIVE &&
- opa_smi_handle_dr_smp_send(opa_smp, device->node_type,
+ opa_smi_handle_dr_smp_send(opa_smp,
+ rdma_cap_ib_switch(device),
port_num) == IB_SMI_DISCARD) {
ret = -EINVAL;
dev_err(&device->dev, "OPA Invalid directed route\n");
goto out;
}
opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
- if (opa_drslid != OPA_LID_PERMISSIVE &&
+ if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
opa_drslid & 0xffff0000) {
ret = -EINVAL;
dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
@@ -810,7 +801,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
} else {
if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
IB_LID_PERMISSIVE &&
- smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
+ smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
IB_SMI_DISCARD) {
ret = -EINVAL;
dev_err(&device->dev, "Invalid directed route\n");
@@ -1037,7 +1028,7 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
mad_send_wr->mad_agent_priv = mad_agent_priv;
mad_send_wr->sg_list[0].length = hdr_len;
- mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
+ mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
/* OPA MADs don't have to be the full 2048 bytes */
if (opa && base_version == OPA_MGMT_BASE_VERSION &&
@@ -1046,7 +1037,7 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
else
mad_send_wr->sg_list[1].length = mad_size - hdr_len;
- mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
+ mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
@@ -2030,7 +2021,7 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
struct ib_smp *smp = (struct ib_smp *)recv->mad;
if (smi_handle_dr_smp_recv(smp,
- port_priv->device->node_type,
+ rdma_cap_ib_switch(port_priv->device),
port_num,
port_priv->device->phys_port_cnt) ==
IB_SMI_DISCARD)
@@ -2042,13 +2033,13 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
if (retsmi == IB_SMI_SEND) { /* don't forward */
if (smi_handle_dr_smp_send(smp,
- port_priv->device->node_type,
+ rdma_cap_ib_switch(port_priv->device),
port_num) == IB_SMI_DISCARD)
return IB_SMI_DISCARD;
if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
return IB_SMI_DISCARD;
- } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
+ } else if (rdma_cap_ib_switch(port_priv->device)) {
/* forward case for switches */
memcpy(response, recv, mad_priv_size(response));
response->header.recv_wc.wc = &response->header.wc;
@@ -2115,7 +2106,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
struct opa_smp *smp = (struct opa_smp *)recv->mad;
if (opa_smi_handle_dr_smp_recv(smp,
- port_priv->device->node_type,
+ rdma_cap_ib_switch(port_priv->device),
port_num,
port_priv->device->phys_port_cnt) ==
IB_SMI_DISCARD)
@@ -2127,7 +2118,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
if (retsmi == IB_SMI_SEND) { /* don't forward */
if (opa_smi_handle_dr_smp_send(smp,
- port_priv->device->node_type,
+ rdma_cap_ib_switch(port_priv->device),
port_num) == IB_SMI_DISCARD)
return IB_SMI_DISCARD;
@@ -2135,7 +2126,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
IB_SMI_DISCARD)
return IB_SMI_DISCARD;
- } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
+ } else if (rdma_cap_ib_switch(port_priv->device)) {
/* forward case for switches */
memcpy(response, recv, mad_priv_size(response));
response->header.recv_wc.wc = &response->header.wc;
@@ -2235,7 +2226,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
goto out;
}
- if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
+ if (rdma_cap_ib_switch(port_priv->device))
port_num = wc->port_num;
else
port_num = port_priv->port_num;
@@ -2884,7 +2875,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
/* Initialize common scatter list fields */
- sg_list.lkey = (*qp_info->port_priv->mr).lkey;
+ sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
/* Initialize common receive WR fields */
recv_wr.next = NULL;
@@ -3200,13 +3191,6 @@ static int ib_mad_port_open(struct ib_device *device,
goto error4;
}
- port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(port_priv->mr)) {
- dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n");
- ret = PTR_ERR(port_priv->mr);
- goto error5;
- }
-
if (has_smi) {
ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
if (ret)
@@ -3247,8 +3231,6 @@ error8:
error7:
destroy_mad_qp(&port_priv->qp_info[0]);
error6:
- ib_dereg_mr(port_priv->mr);
-error5:
ib_dealloc_pd(port_priv->pd);
error4:
ib_destroy_cq(port_priv->cq);
@@ -3283,7 +3265,6 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
destroy_workqueue(port_priv->wq);
destroy_mad_qp(&port_priv->qp_info[1]);
destroy_mad_qp(&port_priv->qp_info[0]);
- ib_dereg_mr(port_priv->mr);
ib_dealloc_pd(port_priv->pd);
ib_destroy_cq(port_priv->cq);
cleanup_recv_queue(&port_priv->qp_info[1]);
@@ -3297,17 +3278,11 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
static void ib_mad_init_device(struct ib_device *device)
{
- int start, end, i;
+ int start, i;
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
- start = 0;
- end = 0;
- } else {
- start = 1;
- end = device->phys_port_cnt;
- }
+ start = rdma_start_port(device);
- for (i = start; i <= end; i++) {
+ for (i = start; i <= rdma_end_port(device); i++) {
if (!rdma_cap_ib_mad(device, i))
continue;
@@ -3340,19 +3315,11 @@ error:
}
}
-static void ib_mad_remove_device(struct ib_device *device)
+static void ib_mad_remove_device(struct ib_device *device, void *client_data)
{
- int start, end, i;
-
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
- start = 0;
- end = 0;
- } else {
- start = 1;
- end = device->phys_port_cnt;
- }
+ int i;
- for (i = start; i <= end; i++) {
+ for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
if (!rdma_cap_ib_mad(device, i))
continue;
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 5be89f98928f..4a4f7aad0978 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -199,7 +199,6 @@ struct ib_mad_port_private {
int port_num;
struct ib_cq *cq;
struct ib_pd *pd;
- struct ib_mr *mr;
spinlock_t reg_lock;
struct ib_mad_mgmt_version_table version[MAX_MGMT_VERSION];
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 1244f02a5c6d..d38d8b2b2979 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -43,7 +43,7 @@
#include "sa.h"
static void mcast_add_one(struct ib_device *device);
-static void mcast_remove_one(struct ib_device *device);
+static void mcast_remove_one(struct ib_device *device, void *client_data);
static struct ib_client mcast_client = {
.name = "ib_multicast",
@@ -812,12 +812,8 @@ static void mcast_add_one(struct ib_device *device)
if (!dev)
return;
- if (device->node_type == RDMA_NODE_IB_SWITCH)
- dev->start_port = dev->end_port = 0;
- else {
- dev->start_port = 1;
- dev->end_port = device->phys_port_cnt;
- }
+ dev->start_port = rdma_start_port(device);
+ dev->end_port = rdma_end_port(device);
for (i = 0; i <= dev->end_port - dev->start_port; i++) {
if (!rdma_cap_ib_mcast(device, dev->start_port + i))
@@ -844,13 +840,12 @@ static void mcast_add_one(struct ib_device *device)
ib_register_event_handler(&dev->event_handler);
}
-static void mcast_remove_one(struct ib_device *device)
+static void mcast_remove_one(struct ib_device *device, void *client_data)
{
- struct mcast_device *dev;
+ struct mcast_device *dev = client_data;
struct mcast_port *port;
int i;
- dev = ib_get_client_data(device, &mcast_client);
if (!dev)
return;
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 23dd5a5c7597..d47df9356779 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -49,6 +49,14 @@ static DEFINE_MUTEX(ibnl_mutex);
static struct sock *nls;
static LIST_HEAD(client_list);
+int ibnl_chk_listeners(unsigned int group)
+{
+ if (netlink_has_listeners(nls, group) == 0)
+ return -1;
+ return 0;
+}
+EXPORT_SYMBOL(ibnl_chk_listeners);
+
int ibnl_add_client(int index, int nops,
const struct ibnl_client_cbs cb_table[])
{
@@ -151,6 +159,23 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
!client->cb_table[op].dump)
return -EINVAL;
+ /*
+ * For response or local service set_timeout request,
+ * there is no need to use netlink_dump_start.
+ */
+ if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
+ (index == RDMA_NL_LS &&
+ op == RDMA_NL_LS_OP_SET_TIMEOUT)) {
+ struct netlink_callback cb = {
+ .skb = skb,
+ .nlh = nlh,
+ .dump = client->cb_table[op].dump,
+ .module = client->cb_table[op].module,
+ };
+
+ return cb.dump(skb, &cb);
+ }
+
{
struct netlink_dump_control c = {
.dump = client->cb_table[op].dump,
@@ -165,9 +190,39 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
return -EINVAL;
}
+static void ibnl_rcv_reply_skb(struct sk_buff *skb)
+{
+ struct nlmsghdr *nlh;
+ int msglen;
+
+ /*
+ * Process responses until there is no more message or the first
+ * request. Generally speaking, it is not recommended to mix responses
+ * with requests.
+ */
+ while (skb->len >= nlmsg_total_size(0)) {
+ nlh = nlmsg_hdr(skb);
+
+ if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
+ return;
+
+ /* Handle response only */
+ if (nlh->nlmsg_flags & NLM_F_REQUEST)
+ return;
+
+ ibnl_rcv_msg(skb, nlh);
+
+ msglen = NLMSG_ALIGN(nlh->nlmsg_len);
+ if (msglen > skb->len)
+ msglen = skb->len;
+ skb_pull(skb, msglen);
+ }
+}
+
static void ibnl_rcv(struct sk_buff *skb)
{
mutex_lock(&ibnl_mutex);
+ ibnl_rcv_reply_skb(skb);
netlink_rcv_skb(skb, &ibnl_rcv_msg);
mutex_unlock(&ibnl_mutex);
}
diff --git a/drivers/infiniband/core/opa_smi.h b/drivers/infiniband/core/opa_smi.h
index 62d91bfa4cb7..3bfab3505a29 100644
--- a/drivers/infiniband/core/opa_smi.h
+++ b/drivers/infiniband/core/opa_smi.h
@@ -39,12 +39,12 @@
#include "smi.h"
-enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type,
+enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
int port_num, int phys_port_cnt);
int opa_smi_get_fwd_port(struct opa_smp *smp);
extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp);
extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
- u8 node_type, int port_num);
+ bool is_switch, int port_num);
/*
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
new file mode 100644
index 000000000000..6b24cba1e474
--- /dev/null
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -0,0 +1,728 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "core_priv.h"
+
+#include <linux/in.h>
+#include <linux/in6.h>
+
+/* For in6_dev_get/in6_dev_put */
+#include <net/addrconf.h>
+#include <net/bonding.h>
+
+#include <rdma/ib_cache.h>
+#include <rdma/ib_addr.h>
+
+enum gid_op_type {
+ GID_DEL = 0,
+ GID_ADD
+};
+
+struct update_gid_event_work {
+ struct work_struct work;
+ union ib_gid gid;
+ struct ib_gid_attr gid_attr;
+ enum gid_op_type gid_op;
+};
+
+#define ROCE_NETDEV_CALLBACK_SZ 3
+struct netdev_event_work_cmd {
+ roce_netdev_callback cb;
+ roce_netdev_filter filter;
+ struct net_device *ndev;
+ struct net_device *filter_ndev;
+};
+
+struct netdev_event_work {
+ struct work_struct work;
+ struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ];
+};
+
+static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev,
+ u8 port, union ib_gid *gid,
+ struct ib_gid_attr *gid_attr)
+{
+ switch (gid_op) {
+ case GID_ADD:
+ ib_cache_gid_add(ib_dev, port, gid, gid_attr);
+ break;
+ case GID_DEL:
+ ib_cache_gid_del(ib_dev, port, gid, gid_attr);
+ break;
+ }
+}
+
+enum bonding_slave_state {
+ BONDING_SLAVE_STATE_ACTIVE = 1UL << 0,
+ BONDING_SLAVE_STATE_INACTIVE = 1UL << 1,
+ /* No primary slave or the device isn't a slave in bonding */
+ BONDING_SLAVE_STATE_NA = 1UL << 2,
+};
+
+static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_device *dev,
+ struct net_device *upper)
+{
+ if (upper && netif_is_bond_master(upper)) {
+ struct net_device *pdev =
+ bond_option_active_slave_get_rcu(netdev_priv(upper));
+
+ if (pdev)
+ return dev == pdev ? BONDING_SLAVE_STATE_ACTIVE :
+ BONDING_SLAVE_STATE_INACTIVE;
+ }
+
+ return BONDING_SLAVE_STATE_NA;
+}
+
+static bool is_upper_dev_rcu(struct net_device *dev, struct net_device *upper)
+{
+ struct net_device *_upper = NULL;
+ struct list_head *iter;
+
+ netdev_for_each_all_upper_dev_rcu(dev, _upper, iter)
+ if (_upper == upper)
+ break;
+
+ return _upper == upper;
+}
+
+#define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \
+ BONDING_SLAVE_STATE_NA)
+static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
+{
+ struct net_device *event_ndev = (struct net_device *)cookie;
+ struct net_device *real_dev;
+ int res;
+
+ if (!rdma_ndev)
+ return 0;
+
+ rcu_read_lock();
+ real_dev = rdma_vlan_dev_real_dev(event_ndev);
+ if (!real_dev)
+ real_dev = event_ndev;
+
+ res = ((is_upper_dev_rcu(rdma_ndev, event_ndev) &&
+ (is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) &
+ REQUIRED_BOND_STATES)) ||
+ real_dev == rdma_ndev);
+
+ rcu_read_unlock();
+ return res;
+}
+
+static int is_eth_port_inactive_slave(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
+{
+ struct net_device *master_dev;
+ int res;
+
+ if (!rdma_ndev)
+ return 0;
+
+ rcu_read_lock();
+ master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev);
+ res = is_eth_active_slave_of_bonding_rcu(rdma_ndev, master_dev) ==
+ BONDING_SLAVE_STATE_INACTIVE;
+ rcu_read_unlock();
+
+ return res;
+}
+
+static int pass_all_filter(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
+{
+ return 1;
+}
+
+static int upper_device_filter(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
+{
+ struct net_device *event_ndev = (struct net_device *)cookie;
+ int res;
+
+ if (!rdma_ndev)
+ return 0;
+
+ if (rdma_ndev == event_ndev)
+ return 1;
+
+ rcu_read_lock();
+ res = is_upper_dev_rcu(rdma_ndev, event_ndev);
+ rcu_read_unlock();
+
+ return res;
+}
+
+static void update_gid_ip(enum gid_op_type gid_op,
+ struct ib_device *ib_dev,
+ u8 port, struct net_device *ndev,
+ struct sockaddr *addr)
+{
+ union ib_gid gid;
+ struct ib_gid_attr gid_attr;
+
+ rdma_ip2gid(addr, &gid);
+ memset(&gid_attr, 0, sizeof(gid_attr));
+ gid_attr.ndev = ndev;
+
+ update_gid(gid_op, ib_dev, port, &gid, &gid_attr);
+}
+
+static void enum_netdev_default_gids(struct ib_device *ib_dev,
+ u8 port, struct net_device *event_ndev,
+ struct net_device *rdma_ndev)
+{
+ rcu_read_lock();
+ if (!rdma_ndev ||
+ ((rdma_ndev != event_ndev &&
+ !is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
+ is_eth_active_slave_of_bonding_rcu(rdma_ndev,
+ netdev_master_upper_dev_get_rcu(rdma_ndev)) ==
+ BONDING_SLAVE_STATE_INACTIVE)) {
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+
+ ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
+ IB_CACHE_GID_DEFAULT_MODE_SET);
+}
+
+static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
+ u8 port,
+ struct net_device *event_ndev,
+ struct net_device *rdma_ndev)
+{
+ struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
+
+ if (!rdma_ndev)
+ return;
+
+ if (!real_dev)
+ real_dev = event_ndev;
+
+ rcu_read_lock();
+
+ if (is_upper_dev_rcu(rdma_ndev, event_ndev) &&
+ is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) ==
+ BONDING_SLAVE_STATE_INACTIVE) {
+ rcu_read_unlock();
+
+ ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
+ IB_CACHE_GID_DEFAULT_MODE_DELETE);
+ } else {
+ rcu_read_unlock();
+ }
+}
+
+static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
+ u8 port, struct net_device *ndev)
+{
+ struct in_device *in_dev;
+
+ if (ndev->reg_state >= NETREG_UNREGISTERING)
+ return;
+
+ in_dev = in_dev_get(ndev);
+ if (!in_dev)
+ return;
+
+ for_ifa(in_dev) {
+ struct sockaddr_in ip;
+
+ ip.sin_family = AF_INET;
+ ip.sin_addr.s_addr = ifa->ifa_address;
+ update_gid_ip(GID_ADD, ib_dev, port, ndev,
+ (struct sockaddr *)&ip);
+ }
+ endfor_ifa(in_dev);
+
+ in_dev_put(in_dev);
+}
+
+static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
+ u8 port, struct net_device *ndev)
+{
+ struct inet6_ifaddr *ifp;
+ struct inet6_dev *in6_dev;
+ struct sin6_list {
+ struct list_head list;
+ struct sockaddr_in6 sin6;
+ };
+ struct sin6_list *sin6_iter;
+ struct sin6_list *sin6_temp;
+ struct ib_gid_attr gid_attr = {.ndev = ndev};
+ LIST_HEAD(sin6_list);
+
+ if (ndev->reg_state >= NETREG_UNREGISTERING)
+ return;
+
+ in6_dev = in6_dev_get(ndev);
+ if (!in6_dev)
+ return;
+
+ read_lock_bh(&in6_dev->lock);
+ list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
+ struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+
+ if (!entry) {
+ pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv6 update\n");
+ continue;
+ }
+
+ entry->sin6.sin6_family = AF_INET6;
+ entry->sin6.sin6_addr = ifp->addr;
+ list_add_tail(&entry->list, &sin6_list);
+ }
+ read_unlock_bh(&in6_dev->lock);
+
+ in6_dev_put(in6_dev);
+
+ list_for_each_entry_safe(sin6_iter, sin6_temp, &sin6_list, list) {
+ union ib_gid gid;
+
+ rdma_ip2gid((struct sockaddr *)&sin6_iter->sin6, &gid);
+ update_gid(GID_ADD, ib_dev, port, &gid, &gid_attr);
+ list_del(&sin6_iter->list);
+ kfree(sin6_iter);
+ }
+}
+
+static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
+ struct net_device *ndev)
+{
+ enum_netdev_ipv4_ips(ib_dev, port, ndev);
+ if (IS_ENABLED(CONFIG_IPV6))
+ enum_netdev_ipv6_ips(ib_dev, port, ndev);
+}
+
+static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
+{
+ struct net_device *event_ndev = (struct net_device *)cookie;
+
+ enum_netdev_default_gids(ib_dev, port, event_ndev, rdma_ndev);
+ _add_netdev_ips(ib_dev, port, event_ndev);
+}
+
+static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
+{
+ struct net_device *event_ndev = (struct net_device *)cookie;
+
+ ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
+}
+
+static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
+ u8 port,
+ struct net_device *rdma_ndev,
+ void *cookie)
+{
+ struct net *net;
+ struct net_device *ndev;
+
+ /* Lock the rtnl to make sure the netdevs does not move under
+ * our feet
+ */
+ rtnl_lock();
+ for_each_net(net)
+ for_each_netdev(net, ndev)
+ if (is_eth_port_of_netdev(ib_dev, port, rdma_ndev, ndev))
+ add_netdev_ips(ib_dev, port, rdma_ndev, ndev);
+ rtnl_unlock();
+}
+
+/* This function will rescan all of the network devices in the system
+ * and add their gids, as needed, to the relevant RoCE devices. */
+int roce_rescan_device(struct ib_device *ib_dev)
+{
+ ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL,
+ enum_all_gids_of_dev_cb, NULL);
+
+ return 0;
+}
+
+static void callback_for_addr_gid_device_scan(struct ib_device *device,
+ u8 port,
+ struct net_device *rdma_ndev,
+ void *cookie)
+{
+ struct update_gid_event_work *parsed = cookie;
+
+ return update_gid(parsed->gid_op, device,
+ port, &parsed->gid,
+ &parsed->gid_attr);
+}
+
+static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
+ void *cookie,
+ void (*handle_netdev)(struct ib_device *ib_dev,
+ u8 port,
+ struct net_device *ndev))
+{
+ struct net_device *ndev = (struct net_device *)cookie;
+ struct upper_list {
+ struct list_head list;
+ struct net_device *upper;
+ };
+ struct net_device *upper;
+ struct list_head *iter;
+ struct upper_list *upper_iter;
+ struct upper_list *upper_temp;
+ LIST_HEAD(upper_list);
+
+ rcu_read_lock();
+ netdev_for_each_all_upper_dev_rcu(ndev, upper, iter) {
+ struct upper_list *entry = kmalloc(sizeof(*entry),
+ GFP_ATOMIC);
+
+ if (!entry) {
+ pr_info("roce_gid_mgmt: couldn't allocate entry to delete ndev\n");
+ continue;
+ }
+
+ list_add_tail(&entry->list, &upper_list);
+ dev_hold(upper);
+ entry->upper = upper;
+ }
+ rcu_read_unlock();
+
+ handle_netdev(ib_dev, port, ndev);
+ list_for_each_entry_safe(upper_iter, upper_temp, &upper_list,
+ list) {
+ handle_netdev(ib_dev, port, upper_iter->upper);
+ dev_put(upper_iter->upper);
+ list_del(&upper_iter->list);
+ kfree(upper_iter);
+ }
+}
+
+static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
+ struct net_device *event_ndev)
+{
+ ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
+}
+
+static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
+{
+ handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids);
+}
+
+static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
+{
+ handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
+}
+
+static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev,
+ void *cookie)
+{
+ struct net_device *master_ndev;
+
+ rcu_read_lock();
+ master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev);
+ if (master_ndev)
+ dev_hold(master_ndev);
+ rcu_read_unlock();
+
+ if (master_ndev) {
+ bond_delete_netdev_default_gids(ib_dev, port, master_ndev,
+ rdma_ndev);
+ dev_put(master_ndev);
+ }
+}
+
+static void del_netdev_default_ips(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
+{
+ struct net_device *event_ndev = (struct net_device *)cookie;
+
+ bond_delete_netdev_default_gids(ib_dev, port, event_ndev, rdma_ndev);
+}
+
+/* The following functions operate on all IB devices. netdevice_event and
+ * addr_event execute ib_enum_all_roce_netdevs through a work.
+ * ib_enum_all_roce_netdevs iterates through all IB devices.
+ */
+
+static void netdevice_event_work_handler(struct work_struct *_work)
+{
+ struct netdev_event_work *work =
+ container_of(_work, struct netdev_event_work, work);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) {
+ ib_enum_all_roce_netdevs(work->cmds[i].filter,
+ work->cmds[i].filter_ndev,
+ work->cmds[i].cb,
+ work->cmds[i].ndev);
+ dev_put(work->cmds[i].ndev);
+ dev_put(work->cmds[i].filter_ndev);
+ }
+
+ kfree(work);
+}
+
+static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
+ struct net_device *ndev)
+{
+ unsigned int i;
+ struct netdev_event_work *ndev_work =
+ kmalloc(sizeof(*ndev_work), GFP_KERNEL);
+
+ if (!ndev_work) {
+ pr_warn("roce_gid_mgmt: can't allocate work for netdevice_event\n");
+ return NOTIFY_DONE;
+ }
+
+ memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
+ for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
+ if (!ndev_work->cmds[i].ndev)
+ ndev_work->cmds[i].ndev = ndev;
+ if (!ndev_work->cmds[i].filter_ndev)
+ ndev_work->cmds[i].filter_ndev = ndev;
+ dev_hold(ndev_work->cmds[i].ndev);
+ dev_hold(ndev_work->cmds[i].filter_ndev);
+ }
+ INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
+
+ queue_work(ib_wq, &ndev_work->work);
+
+ return NOTIFY_DONE;
+}
+
+static const struct netdev_event_work_cmd add_cmd = {
+ .cb = add_netdev_ips, .filter = is_eth_port_of_netdev};
+static const struct netdev_event_work_cmd add_cmd_upper_ips = {
+ .cb = add_netdev_upper_ips, .filter = is_eth_port_of_netdev};
+
+static void netdevice_event_changeupper(struct netdev_notifier_changeupper_info *changeupper_info,
+ struct netdev_event_work_cmd *cmds)
+{
+ static const struct netdev_event_work_cmd upper_ips_del_cmd = {
+ .cb = del_netdev_upper_ips, .filter = upper_device_filter};
+ static const struct netdev_event_work_cmd bonding_default_del_cmd = {
+ .cb = del_netdev_default_ips, .filter = is_eth_port_inactive_slave};
+
+ if (changeupper_info->linking == false) {
+ cmds[0] = upper_ips_del_cmd;
+ cmds[0].ndev = changeupper_info->upper_dev;
+ cmds[1] = add_cmd;
+ } else {
+ cmds[0] = bonding_default_del_cmd;
+ cmds[0].ndev = changeupper_info->upper_dev;
+ cmds[1] = add_cmd_upper_ips;
+ cmds[1].ndev = changeupper_info->upper_dev;
+ cmds[1].filter_ndev = changeupper_info->upper_dev;
+ }
+}
+
+static int netdevice_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ static const struct netdev_event_work_cmd del_cmd = {
+ .cb = del_netdev_ips, .filter = pass_all_filter};
+ static const struct netdev_event_work_cmd bonding_default_del_cmd_join = {
+ .cb = del_netdev_default_ips_join, .filter = is_eth_port_inactive_slave};
+ static const struct netdev_event_work_cmd default_del_cmd = {
+ .cb = del_netdev_default_ips, .filter = pass_all_filter};
+ static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
+ .cb = del_netdev_upper_ips, .filter = upper_device_filter};
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} };
+
+ if (ndev->type != ARPHRD_ETHER)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ case NETDEV_UP:
+ cmds[0] = bonding_default_del_cmd_join;
+ cmds[1] = add_cmd;
+ break;
+
+ case NETDEV_UNREGISTER:
+ if (ndev->reg_state < NETREG_UNREGISTERED)
+ cmds[0] = del_cmd;
+ else
+ return NOTIFY_DONE;
+ break;
+
+ case NETDEV_CHANGEADDR:
+ cmds[0] = default_del_cmd;
+ cmds[1] = add_cmd;
+ break;
+
+ case NETDEV_CHANGEUPPER:
+ netdevice_event_changeupper(
+ container_of(ptr, struct netdev_notifier_changeupper_info, info),
+ cmds);
+ break;
+
+ case NETDEV_BONDING_FAILOVER:
+ cmds[0] = bonding_event_ips_del_cmd;
+ cmds[1] = bonding_default_del_cmd_join;
+ cmds[2] = add_cmd_upper_ips;
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return netdevice_queue_work(cmds, ndev);
+}
+
+static void update_gid_event_work_handler(struct work_struct *_work)
+{
+ struct update_gid_event_work *work =
+ container_of(_work, struct update_gid_event_work, work);
+
+ ib_enum_all_roce_netdevs(is_eth_port_of_netdev, work->gid_attr.ndev,
+ callback_for_addr_gid_device_scan, work);
+
+ dev_put(work->gid_attr.ndev);
+ kfree(work);
+}
+
+static int addr_event(struct notifier_block *this, unsigned long event,
+ struct sockaddr *sa, struct net_device *ndev)
+{
+ struct update_gid_event_work *work;
+ enum gid_op_type gid_op;
+
+ if (ndev->type != ARPHRD_ETHER)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UP:
+ gid_op = GID_ADD;
+ break;
+
+ case NETDEV_DOWN:
+ gid_op = GID_DEL;
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ pr_warn("roce_gid_mgmt: Couldn't allocate work for addr_event\n");
+ return NOTIFY_DONE;
+ }
+
+ INIT_WORK(&work->work, update_gid_event_work_handler);
+
+ rdma_ip2gid(sa, &work->gid);
+ work->gid_op = gid_op;
+
+ memset(&work->gid_attr, 0, sizeof(work->gid_attr));
+ dev_hold(ndev);
+ work->gid_attr.ndev = ndev;
+
+ queue_work(ib_wq, &work->work);
+
+ return NOTIFY_DONE;
+}
+
+static int inetaddr_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct sockaddr_in in;
+ struct net_device *ndev;
+ struct in_ifaddr *ifa = ptr;
+
+ in.sin_family = AF_INET;
+ in.sin_addr.s_addr = ifa->ifa_address;
+ ndev = ifa->ifa_dev->dev;
+
+ return addr_event(this, event, (struct sockaddr *)&in, ndev);
+}
+
+static int inet6addr_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct sockaddr_in6 in6;
+ struct net_device *ndev;
+ struct inet6_ifaddr *ifa6 = ptr;
+
+ in6.sin6_family = AF_INET6;
+ in6.sin6_addr = ifa6->addr;
+ ndev = ifa6->idev->dev;
+
+ return addr_event(this, event, (struct sockaddr *)&in6, ndev);
+}
+
+static struct notifier_block nb_netdevice = {
+ .notifier_call = netdevice_event
+};
+
+static struct notifier_block nb_inetaddr = {
+ .notifier_call = inetaddr_event
+};
+
+static struct notifier_block nb_inet6addr = {
+ .notifier_call = inet6addr_event
+};
+
+int __init roce_gid_mgmt_init(void)
+{
+ register_inetaddr_notifier(&nb_inetaddr);
+ if (IS_ENABLED(CONFIG_IPV6))
+ register_inet6addr_notifier(&nb_inet6addr);
+ /* We relay on the netdevice notifier to enumerate all
+ * existing devices in the system. Register to this notifier
+ * last to make sure we will not miss any IP add/del
+ * callbacks.
+ */
+ register_netdevice_notifier(&nb_netdevice);
+
+ return 0;
+}
+
+void __exit roce_gid_mgmt_cleanup(void)
+{
+ if (IS_ENABLED(CONFIG_IPV6))
+ unregister_inet6addr_notifier(&nb_inet6addr);
+ unregister_inetaddr_notifier(&nb_inetaddr);
+ unregister_netdevice_notifier(&nb_netdevice);
+ /* Ensure all gid deletion tasks complete before we go down,
+ * to avoid any reference to free'd memory. By the time
+ * ib-core is removed, all physical devices have been removed,
+ * so no issue with remaining hardware contexts.
+ */
+}
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 0fae85062a65..8c014b33d8e0 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -45,12 +45,21 @@
#include <uapi/linux/if_ether.h>
#include <rdma/ib_pack.h>
#include <rdma/ib_cache.h>
+#include <rdma/rdma_netlink.h>
+#include <net/netlink.h>
+#include <uapi/rdma/ib_user_sa.h>
+#include <rdma/ib_marshall.h>
#include "sa.h"
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand subnet administration query support");
MODULE_LICENSE("Dual BSD/GPL");
+#define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
+#define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
+#define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
+static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
+
struct ib_sa_sm_ah {
struct ib_ah *ah;
struct kref ref;
@@ -80,8 +89,16 @@ struct ib_sa_query {
struct ib_mad_send_buf *mad_buf;
struct ib_sa_sm_ah *sm_ah;
int id;
+ u32 flags;
+ struct list_head list; /* Local svc request list */
+ u32 seq; /* Local svc request sequence number */
+ unsigned long timeout; /* Local svc timeout */
+ u8 path_use; /* How will the pathrecord be used */
};
+#define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
+#define IB_SA_CANCEL 0x00000002
+
struct ib_sa_service_query {
void (*callback)(int, struct ib_sa_service_rec *, void *);
void *context;
@@ -106,8 +123,28 @@ struct ib_sa_mcmember_query {
struct ib_sa_query sa_query;
};
+static LIST_HEAD(ib_nl_request_list);
+static DEFINE_SPINLOCK(ib_nl_request_lock);
+static atomic_t ib_nl_sa_request_seq;
+static struct workqueue_struct *ib_nl_wq;
+static struct delayed_work ib_nl_timed_work;
+static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
+ [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY,
+ .len = sizeof(struct ib_path_rec_data)},
+ [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32},
+ [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64},
+ [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
+ .len = sizeof(struct rdma_nla_ls_gid)},
+ [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY,
+ .len = sizeof(struct rdma_nla_ls_gid)},
+ [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8},
+ [LS_NLA_TYPE_PKEY] = {.type = NLA_U16},
+ [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16},
+};
+
+
static void ib_sa_add_one(struct ib_device *device);
-static void ib_sa_remove_one(struct ib_device *device);
+static void ib_sa_remove_one(struct ib_device *device, void *client_data);
static struct ib_client sa_client = {
.name = "sa",
@@ -381,6 +418,427 @@ static const struct ib_field guidinfo_rec_table[] = {
.size_bits = 512 },
};
+static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
+{
+ query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
+}
+
+static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
+{
+ return (query->flags & IB_SA_CANCEL);
+}
+
+static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
+ struct ib_sa_query *query)
+{
+ struct ib_sa_path_rec *sa_rec = query->mad_buf->context[1];
+ struct ib_sa_mad *mad = query->mad_buf->mad;
+ ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
+ u16 val16;
+ u64 val64;
+ struct rdma_ls_resolve_header *header;
+
+ query->mad_buf->context[1] = NULL;
+
+ /* Construct the family header first */
+ header = (struct rdma_ls_resolve_header *)
+ skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
+ memcpy(header->device_name, query->port->agent->device->name,
+ LS_DEVICE_NAME_MAX);
+ header->port_num = query->port->port_num;
+
+ if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
+ sa_rec->reversible != 0)
+ query->path_use = LS_RESOLVE_PATH_USE_GMP;
+ else
+ query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
+ header->path_use = query->path_use;
+
+ /* Now build the attributes */
+ if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
+ val64 = be64_to_cpu(sa_rec->service_id);
+ nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
+ sizeof(val64), &val64);
+ }
+ if (comp_mask & IB_SA_PATH_REC_DGID)
+ nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
+ sizeof(sa_rec->dgid), &sa_rec->dgid);
+ if (comp_mask & IB_SA_PATH_REC_SGID)
+ nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
+ sizeof(sa_rec->sgid), &sa_rec->sgid);
+ if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
+ nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
+ sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
+
+ if (comp_mask & IB_SA_PATH_REC_PKEY) {
+ val16 = be16_to_cpu(sa_rec->pkey);
+ nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
+ sizeof(val16), &val16);
+ }
+ if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
+ val16 = be16_to_cpu(sa_rec->qos_class);
+ nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
+ sizeof(val16), &val16);
+ }
+}
+
+static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
+{
+ int len = 0;
+
+ if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
+ len += nla_total_size(sizeof(u64));
+ if (comp_mask & IB_SA_PATH_REC_DGID)
+ len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
+ if (comp_mask & IB_SA_PATH_REC_SGID)
+ len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
+ if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
+ len += nla_total_size(sizeof(u8));
+ if (comp_mask & IB_SA_PATH_REC_PKEY)
+ len += nla_total_size(sizeof(u16));
+ if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
+ len += nla_total_size(sizeof(u16));
+
+ /*
+ * Make sure that at least some of the required comp_mask bits are
+ * set.
+ */
+ if (WARN_ON(len == 0))
+ return len;
+
+ /* Add the family header */
+ len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
+
+ return len;
+}
+
+static int ib_nl_send_msg(struct ib_sa_query *query)
+{
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh;
+ void *data;
+ int ret = 0;
+ struct ib_sa_mad *mad;
+ int len;
+
+ mad = query->mad_buf->mad;
+ len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
+ if (len <= 0)
+ return -EMSGSIZE;
+
+ skb = nlmsg_new(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Put nlmsg header only for now */
+ data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
+ RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
+ if (!data) {
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
+
+ /* Add attributes */
+ ib_nl_set_path_rec_attrs(skb, query);
+
+ /* Repair the nlmsg header length */
+ nlmsg_end(skb, nlh);
+
+ ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, GFP_KERNEL);
+ if (!ret)
+ ret = len;
+ else
+ ret = 0;
+
+ return ret;
+}
+
+static int ib_nl_make_request(struct ib_sa_query *query)
+{
+ unsigned long flags;
+ unsigned long delay;
+ int ret;
+
+ INIT_LIST_HEAD(&query->list);
+ query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
+
+ spin_lock_irqsave(&ib_nl_request_lock, flags);
+ ret = ib_nl_send_msg(query);
+ if (ret <= 0) {
+ ret = -EIO;
+ goto request_out;
+ } else {
+ ret = 0;
+ }
+
+ delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
+ query->timeout = delay + jiffies;
+ list_add_tail(&query->list, &ib_nl_request_list);
+ /* Start the timeout if this is the only request */
+ if (ib_nl_request_list.next == &query->list)
+ queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
+
+request_out:
+ spin_unlock_irqrestore(&ib_nl_request_lock, flags);
+
+ return ret;
+}
+
+static int ib_nl_cancel_request(struct ib_sa_query *query)
+{
+ unsigned long flags;
+ struct ib_sa_query *wait_query;
+ int found = 0;
+
+ spin_lock_irqsave(&ib_nl_request_lock, flags);
+ list_for_each_entry(wait_query, &ib_nl_request_list, list) {
+ /* Let the timeout to take care of the callback */
+ if (query == wait_query) {
+ query->flags |= IB_SA_CANCEL;
+ query->timeout = jiffies;
+ list_move(&query->list, &ib_nl_request_list);
+ found = 1;
+ mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ib_nl_request_lock, flags);
+
+ return found;
+}
+
+static void send_handler(struct ib_mad_agent *agent,
+ struct ib_mad_send_wc *mad_send_wc);
+
+static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
+ const struct nlmsghdr *nlh)
+{
+ struct ib_mad_send_wc mad_send_wc;
+ struct ib_sa_mad *mad = NULL;
+ const struct nlattr *head, *curr;
+ struct ib_path_rec_data *rec;
+ int len, rem;
+ u32 mask = 0;
+ int status = -EIO;
+
+ if (query->callback) {
+ head = (const struct nlattr *) nlmsg_data(nlh);
+ len = nlmsg_len(nlh);
+ switch (query->path_use) {
+ case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
+ mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
+ break;
+
+ case LS_RESOLVE_PATH_USE_ALL:
+ case LS_RESOLVE_PATH_USE_GMP:
+ default:
+ mask = IB_PATH_PRIMARY | IB_PATH_GMP |
+ IB_PATH_BIDIRECTIONAL;
+ break;
+ }
+ nla_for_each_attr(curr, head, len, rem) {
+ if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
+ rec = nla_data(curr);
+ /*
+ * Get the first one. In the future, we may
+ * need to get up to 6 pathrecords.
+ */
+ if ((rec->flags & mask) == mask) {
+ mad = query->mad_buf->mad;
+ mad->mad_hdr.method |=
+ IB_MGMT_METHOD_RESP;
+ memcpy(mad->data, rec->path_rec,
+ sizeof(rec->path_rec));
+ status = 0;
+ break;
+ }
+ }
+ }
+ query->callback(query, status, mad);
+ }
+
+ mad_send_wc.send_buf = query->mad_buf;
+ mad_send_wc.status = IB_WC_SUCCESS;
+ send_handler(query->mad_buf->mad_agent, &mad_send_wc);
+}
+
+static void ib_nl_request_timeout(struct work_struct *work)
+{
+ unsigned long flags;
+ struct ib_sa_query *query;
+ unsigned long delay;
+ struct ib_mad_send_wc mad_send_wc;
+ int ret;
+
+ spin_lock_irqsave(&ib_nl_request_lock, flags);
+ while (!list_empty(&ib_nl_request_list)) {
+ query = list_entry(ib_nl_request_list.next,
+ struct ib_sa_query, list);
+
+ if (time_after(query->timeout, jiffies)) {
+ delay = query->timeout - jiffies;
+ if ((long)delay <= 0)
+ delay = 1;
+ queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
+ break;
+ }
+
+ list_del(&query->list);
+ ib_sa_disable_local_svc(query);
+ /* Hold the lock to protect against query cancellation */
+ if (ib_sa_query_cancelled(query))
+ ret = -1;
+ else
+ ret = ib_post_send_mad(query->mad_buf, NULL);
+ if (ret) {
+ mad_send_wc.send_buf = query->mad_buf;
+ mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
+ spin_unlock_irqrestore(&ib_nl_request_lock, flags);
+ send_handler(query->port->agent, &mad_send_wc);
+ spin_lock_irqsave(&ib_nl_request_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&ib_nl_request_lock, flags);
+}
+
+static int ib_nl_handle_set_timeout(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
+ int timeout, delta, abs_delta;
+ const struct nlattr *attr;
+ unsigned long flags;
+ struct ib_sa_query *query;
+ long delay = 0;
+ struct nlattr *tb[LS_NLA_TYPE_MAX];
+ int ret;
+
+ if (!netlink_capable(skb, CAP_NET_ADMIN))
+ return -EPERM;
+
+ ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+ nlmsg_len(nlh), ib_nl_policy);
+ attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
+ if (ret || !attr)
+ goto settimeout_out;
+
+ timeout = *(int *) nla_data(attr);
+ if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
+ timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
+ if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
+ timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
+
+ delta = timeout - sa_local_svc_timeout_ms;
+ if (delta < 0)
+ abs_delta = -delta;
+ else
+ abs_delta = delta;
+
+ if (delta != 0) {
+ spin_lock_irqsave(&ib_nl_request_lock, flags);
+ sa_local_svc_timeout_ms = timeout;
+ list_for_each_entry(query, &ib_nl_request_list, list) {
+ if (delta < 0 && abs_delta > query->timeout)
+ query->timeout = 0;
+ else
+ query->timeout += delta;
+
+ /* Get the new delay from the first entry */
+ if (!delay) {
+ delay = query->timeout - jiffies;
+ if (delay <= 0)
+ delay = 1;
+ }
+ }
+ if (delay)
+ mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
+ (unsigned long)delay);
+ spin_unlock_irqrestore(&ib_nl_request_lock, flags);
+ }
+
+settimeout_out:
+ return skb->len;
+}
+
+static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
+{
+ struct nlattr *tb[LS_NLA_TYPE_MAX];
+ int ret;
+
+ if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
+ return 0;
+
+ ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+ nlmsg_len(nlh), ib_nl_policy);
+ if (ret)
+ return 0;
+
+ return 1;
+}
+
+static int ib_nl_handle_resolve_resp(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
+ unsigned long flags;
+ struct ib_sa_query *query;
+ struct ib_mad_send_buf *send_buf;
+ struct ib_mad_send_wc mad_send_wc;
+ int found = 0;
+ int ret;
+
+ if (!netlink_capable(skb, CAP_NET_ADMIN))
+ return -EPERM;
+
+ spin_lock_irqsave(&ib_nl_request_lock, flags);
+ list_for_each_entry(query, &ib_nl_request_list, list) {
+ /*
+ * If the query is cancelled, let the timeout routine
+ * take care of it.
+ */
+ if (nlh->nlmsg_seq == query->seq) {
+ found = !ib_sa_query_cancelled(query);
+ if (found)
+ list_del(&query->list);
+ break;
+ }
+ }
+
+ if (!found) {
+ spin_unlock_irqrestore(&ib_nl_request_lock, flags);
+ goto resp_out;
+ }
+
+ send_buf = query->mad_buf;
+
+ if (!ib_nl_is_good_resolve_resp(nlh)) {
+ /* if the result is a failure, send out the packet via IB */
+ ib_sa_disable_local_svc(query);
+ ret = ib_post_send_mad(query->mad_buf, NULL);
+ spin_unlock_irqrestore(&ib_nl_request_lock, flags);
+ if (ret) {
+ mad_send_wc.send_buf = send_buf;
+ mad_send_wc.status = IB_WC_GENERAL_ERR;
+ send_handler(query->port->agent, &mad_send_wc);
+ }
+ } else {
+ spin_unlock_irqrestore(&ib_nl_request_lock, flags);
+ ib_nl_process_good_resolve_rsp(query, nlh);
+ }
+
+resp_out:
+ return skb->len;
+}
+
+static struct ibnl_client_cbs ib_sa_cb_table[] = {
+ [RDMA_NL_LS_OP_RESOLVE] = {
+ .dump = ib_nl_handle_resolve_resp,
+ .module = THIS_MODULE },
+ [RDMA_NL_LS_OP_SET_TIMEOUT] = {
+ .dump = ib_nl_handle_set_timeout,
+ .module = THIS_MODULE },
+};
+
static void free_sm_ah(struct kref *kref)
{
struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
@@ -502,7 +960,13 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query)
mad_buf = query->mad_buf;
spin_unlock_irqrestore(&idr_lock, flags);
- ib_cancel_mad(agent, mad_buf);
+ /*
+ * If the query is still on the netlink request list, schedule
+ * it to be cancelled by the timeout routine. Otherwise, it has been
+ * sent to the MAD layer and has to be cancelled from there.
+ */
+ if (!ib_nl_cancel_request(query))
+ ib_cancel_mad(agent, mad_buf);
}
EXPORT_SYMBOL(ib_sa_cancel_query);
@@ -639,6 +1103,14 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
query->mad_buf->context[0] = query;
query->id = id;
+ if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) {
+ if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) {
+ if (!ib_nl_make_request(query))
+ return id;
+ }
+ ib_sa_disable_local_svc(query);
+ }
+
ret = ib_post_send_mad(query->mad_buf, NULL);
if (ret) {
spin_lock_irqsave(&idr_lock, flags);
@@ -740,7 +1212,7 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
port = &sa_dev->port[port_num - sa_dev->start_port];
agent = port->agent;
- query = kmalloc(sizeof *query, gfp_mask);
+ query = kzalloc(sizeof(*query), gfp_mask);
if (!query)
return -ENOMEM;
@@ -767,6 +1239,9 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
*sa_query = &query->sa_query;
+ query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
+ query->sa_query.mad_buf->context[1] = rec;
+
ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
if (ret < 0)
goto err2;
@@ -862,7 +1337,7 @@ int ib_sa_service_rec_query(struct ib_sa_client *client,
method != IB_SA_METHOD_DELETE)
return -EINVAL;
- query = kmalloc(sizeof *query, gfp_mask);
+ query = kzalloc(sizeof(*query), gfp_mask);
if (!query)
return -ENOMEM;
@@ -954,7 +1429,7 @@ int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
port = &sa_dev->port[port_num - sa_dev->start_port];
agent = port->agent;
- query = kmalloc(sizeof *query, gfp_mask);
+ query = kzalloc(sizeof(*query), gfp_mask);
if (!query)
return -ENOMEM;
@@ -1051,7 +1526,7 @@ int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
port = &sa_dev->port[port_num - sa_dev->start_port];
agent = port->agent;
- query = kmalloc(sizeof *query, gfp_mask);
+ query = kzalloc(sizeof(*query), gfp_mask);
if (!query)
return -ENOMEM;
@@ -1156,12 +1631,8 @@ static void ib_sa_add_one(struct ib_device *device)
int s, e, i;
int count = 0;
- if (device->node_type == RDMA_NODE_IB_SWITCH)
- s = e = 0;
- else {
- s = 1;
- e = device->phys_port_cnt;
- }
+ s = rdma_start_port(device);
+ e = rdma_end_port(device);
sa_dev = kzalloc(sizeof *sa_dev +
(e - s + 1) * sizeof (struct ib_sa_port),
@@ -1225,9 +1696,9 @@ free:
return;
}
-static void ib_sa_remove_one(struct ib_device *device)
+static void ib_sa_remove_one(struct ib_device *device, void *client_data)
{
- struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
+ struct ib_sa_device *sa_dev = client_data;
int i;
if (!sa_dev)
@@ -1255,6 +1726,8 @@ static int __init ib_sa_init(void)
get_random_bytes(&tid, sizeof tid);
+ atomic_set(&ib_nl_sa_request_seq, 0);
+
ret = ib_register_client(&sa_client);
if (ret) {
printk(KERN_ERR "Couldn't register ib_sa client\n");
@@ -1267,7 +1740,25 @@ static int __init ib_sa_init(void)
goto err2;
}
+ ib_nl_wq = create_singlethread_workqueue("ib_nl_sa_wq");
+ if (!ib_nl_wq) {
+ ret = -ENOMEM;
+ goto err3;
+ }
+
+ if (ibnl_add_client(RDMA_NL_LS, RDMA_NL_LS_NUM_OPS,
+ ib_sa_cb_table)) {
+ pr_err("Failed to add netlink callback\n");
+ ret = -EINVAL;
+ goto err4;
+ }
+ INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
+
return 0;
+err4:
+ destroy_workqueue(ib_nl_wq);
+err3:
+ mcast_cleanup();
err2:
ib_unregister_client(&sa_client);
err1:
@@ -1276,6 +1767,10 @@ err1:
static void __exit ib_sa_cleanup(void)
{
+ ibnl_remove_client(RDMA_NL_LS);
+ cancel_delayed_work(&ib_nl_timed_work);
+ flush_workqueue(ib_nl_wq);
+ destroy_workqueue(ib_nl_wq);
mcast_cleanup();
ib_unregister_client(&sa_client);
idr_destroy(&query_idr);
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index 368a561d1a5d..f19b23817c2b 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -41,7 +41,7 @@
#include "smi.h"
#include "opa_smi.h"
-static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
+static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num,
u8 *hop_ptr, u8 hop_cnt,
const u8 *initial_path,
const u8 *return_path,
@@ -64,7 +64,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
/* C14-9:2 */
if (*hop_ptr && *hop_ptr < hop_cnt) {
- if (node_type != RDMA_NODE_IB_SWITCH)
+ if (!is_switch)
return IB_SMI_DISCARD;
/* return_path set when received */
@@ -77,7 +77,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
if (*hop_ptr == hop_cnt) {
/* return_path set when received */
(*hop_ptr)++;
- return (node_type == RDMA_NODE_IB_SWITCH ||
+ return (is_switch ||
dr_dlid_is_permissive ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
}
@@ -96,7 +96,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
/* C14-13:2 */
if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
- if (node_type != RDMA_NODE_IB_SWITCH)
+ if (!is_switch)
return IB_SMI_DISCARD;
(*hop_ptr)--;
@@ -108,7 +108,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
if (*hop_ptr == 1) {
(*hop_ptr)--;
/* C14-13:3 -- SMPs destined for SM shouldn't be here */
- return (node_type == RDMA_NODE_IB_SWITCH ||
+ return (is_switch ||
dr_slid_is_permissive ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
}
@@ -127,9 +127,9 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
* Return IB_SMI_DISCARD if the SMP should be discarded
*/
enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
- u8 node_type, int port_num)
+ bool is_switch, int port_num)
{
- return __smi_handle_dr_smp_send(node_type, port_num,
+ return __smi_handle_dr_smp_send(is_switch, port_num,
&smp->hop_ptr, smp->hop_cnt,
smp->initial_path,
smp->return_path,
@@ -139,9 +139,9 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
}
enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
- u8 node_type, int port_num)
+ bool is_switch, int port_num)
{
- return __smi_handle_dr_smp_send(node_type, port_num,
+ return __smi_handle_dr_smp_send(is_switch, port_num,
&smp->hop_ptr, smp->hop_cnt,
smp->route.dr.initial_path,
smp->route.dr.return_path,
@@ -152,7 +152,7 @@ enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
OPA_LID_PERMISSIVE);
}
-static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
+static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num,
int phys_port_cnt,
u8 *hop_ptr, u8 hop_cnt,
const u8 *initial_path,
@@ -173,7 +173,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
/* C14-9:2 -- intermediate hop */
if (*hop_ptr && *hop_ptr < hop_cnt) {
- if (node_type != RDMA_NODE_IB_SWITCH)
+ if (!is_switch)
return IB_SMI_DISCARD;
return_path[*hop_ptr] = port_num;
@@ -188,7 +188,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
return_path[*hop_ptr] = port_num;
/* hop_ptr updated when sending */
- return (node_type == RDMA_NODE_IB_SWITCH ||
+ return (is_switch ||
dr_dlid_is_permissive ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
}
@@ -208,7 +208,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
/* C14-13:2 */
if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
- if (node_type != RDMA_NODE_IB_SWITCH)
+ if (!is_switch)
return IB_SMI_DISCARD;
/* hop_ptr updated when sending */
@@ -224,8 +224,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
return IB_SMI_HANDLE;
}
/* hop_ptr updated when sending */
- return (node_type == RDMA_NODE_IB_SWITCH ?
- IB_SMI_HANDLE : IB_SMI_DISCARD);
+ return (is_switch ? IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-13:4 -- hop_ptr = 0 -> give to SM */
@@ -238,10 +237,10 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
* Adjust information for a received SMP
* Return IB_SMI_DISCARD if the SMP should be dropped
*/
-enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
+enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
int port_num, int phys_port_cnt)
{
- return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt,
+ return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
&smp->hop_ptr, smp->hop_cnt,
smp->initial_path,
smp->return_path,
@@ -254,10 +253,10 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
* Adjust information for a received SMP
* Return IB_SMI_DISCARD if the SMP should be dropped
*/
-enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type,
+enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
int port_num, int phys_port_cnt)
{
- return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt,
+ return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
&smp->hop_ptr, smp->hop_cnt,
smp->route.dr.initial_path,
smp->route.dr.return_path,
diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h
index aff96bac49b4..33c91c8a16e9 100644
--- a/drivers/infiniband/core/smi.h
+++ b/drivers/infiniband/core/smi.h
@@ -51,12 +51,12 @@ enum smi_forward_action {
IB_SMI_FORWARD /* SMP should be forwarded (for switches only) */
};
-enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
+enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
int port_num, int phys_port_cnt);
int smi_get_fwd_port(struct ib_smp *smp);
extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp);
extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
- u8 node_type, int port_num);
+ bool is_switch, int port_num);
/*
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index ed6b6c85c334..34cdd74b0a17 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -457,29 +457,6 @@ static struct kobj_type port_type = {
.default_attrs = port_default_attrs
};
-static void ib_device_release(struct device *device)
-{
- struct ib_device *dev = container_of(device, struct ib_device, dev);
-
- kfree(dev->port_immutable);
- kfree(dev);
-}
-
-static int ib_device_uevent(struct device *device,
- struct kobj_uevent_env *env)
-{
- struct ib_device *dev = container_of(device, struct ib_device, dev);
-
- if (add_uevent_var(env, "NAME=%s", dev->name))
- return -ENOMEM;
-
- /*
- * It would be nice to pass the node GUID with the event...
- */
-
- return 0;
-}
-
static struct attribute **
alloc_group_attrs(ssize_t (*show)(struct ib_port *,
struct port_attribute *, char *buf),
@@ -702,12 +679,6 @@ static struct device_attribute *ib_class_attributes[] = {
&dev_attr_node_desc
};
-static struct class ib_class = {
- .name = "infiniband",
- .dev_release = ib_device_release,
- .dev_uevent = ib_device_uevent,
-};
-
/* Show a given an attribute in the statistics group */
static ssize_t show_protocol_stat(const struct device *device,
struct device_attribute *attr, char *buf,
@@ -846,14 +817,12 @@ int ib_device_register_sysfs(struct ib_device *device,
int ret;
int i;
- class_dev->class = &ib_class;
- class_dev->parent = device->dma_device;
- dev_set_name(class_dev, "%s", device->name);
- dev_set_drvdata(class_dev, device);
-
- INIT_LIST_HEAD(&device->port_list);
+ device->dev.parent = device->dma_device;
+ ret = dev_set_name(class_dev, "%s", device->name);
+ if (ret)
+ return ret;
- ret = device_register(class_dev);
+ ret = device_add(class_dev);
if (ret)
goto err;
@@ -870,7 +839,7 @@ int ib_device_register_sysfs(struct ib_device *device,
goto err_put;
}
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
+ if (rdma_cap_ib_switch(device)) {
ret = add_port(device, 0, port_callback);
if (ret)
goto err_put;
@@ -916,13 +885,3 @@ void ib_device_unregister_sysfs(struct ib_device *device)
device_unregister(&device->dev);
}
-
-int ib_sysfs_setup(void)
-{
- return class_register(&ib_class);
-}
-
-void ib_sysfs_cleanup(void)
-{
- class_unregister(&ib_class);
-}
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 62c24b1452b8..6b4e8a008bc0 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -109,7 +109,7 @@ enum {
#define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
static void ib_ucm_add_one(struct ib_device *device);
-static void ib_ucm_remove_one(struct ib_device *device);
+static void ib_ucm_remove_one(struct ib_device *device, void *client_data);
static struct ib_client ucm_client = {
.name = "ucm",
@@ -658,8 +658,7 @@ static ssize_t ib_ucm_listen(struct ib_ucm_file *file,
if (result)
goto out;
- result = ib_cm_listen(ctx->cm_id, cmd.service_id, cmd.service_mask,
- NULL);
+ result = ib_cm_listen(ctx->cm_id, cmd.service_id, cmd.service_mask);
out:
ib_ucm_ctx_put(ctx);
return result;
@@ -1193,6 +1192,7 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
return 0;
}
+static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
static void ib_ucm_release_dev(struct device *dev)
{
struct ib_ucm_device *ucm_dev;
@@ -1202,7 +1202,7 @@ static void ib_ucm_release_dev(struct device *dev)
if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
clear_bit(ucm_dev->devnum, dev_map);
else
- clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map);
+ clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, overflow_map);
kfree(ucm_dev);
}
@@ -1226,7 +1226,6 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
static dev_t overflow_maj;
-static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
static int find_overflow_devnum(void)
{
int ret;
@@ -1310,9 +1309,9 @@ err:
return;
}
-static void ib_ucm_remove_one(struct ib_device *device)
+static void ib_ucm_remove_one(struct ib_device *device, void *client_data)
{
- struct ib_ucm_device *ucm_dev = ib_get_client_data(device, &ucm_client);
+ struct ib_ucm_device *ucm_dev = client_data;
if (!ucm_dev)
return;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ad45469f7582..a53fc9b01c69 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -74,6 +74,7 @@ struct ucma_file {
struct list_head ctx_list;
struct list_head event_list;
wait_queue_head_t poll_wait;
+ struct workqueue_struct *close_wq;
};
struct ucma_context {
@@ -89,6 +90,13 @@ struct ucma_context {
struct list_head list;
struct list_head mc_list;
+ /* mark that device is in process of destroying the internal HW
+ * resources, protected by the global mut
+ */
+ int closing;
+ /* sync between removal event and id destroy, protected by file mut */
+ int destroying;
+ struct work_struct close_work;
};
struct ucma_multicast {
@@ -107,6 +115,7 @@ struct ucma_event {
struct list_head list;
struct rdma_cm_id *cm_id;
struct rdma_ucm_event_resp resp;
+ struct work_struct close_work;
};
static DEFINE_MUTEX(mut);
@@ -132,8 +141,12 @@ static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
mutex_lock(&mut);
ctx = _ucma_find_context(id, file);
- if (!IS_ERR(ctx))
- atomic_inc(&ctx->ref);
+ if (!IS_ERR(ctx)) {
+ if (ctx->closing)
+ ctx = ERR_PTR(-EIO);
+ else
+ atomic_inc(&ctx->ref);
+ }
mutex_unlock(&mut);
return ctx;
}
@@ -144,6 +157,28 @@ static void ucma_put_ctx(struct ucma_context *ctx)
complete(&ctx->comp);
}
+static void ucma_close_event_id(struct work_struct *work)
+{
+ struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work);
+
+ rdma_destroy_id(uevent_close->cm_id);
+ kfree(uevent_close);
+}
+
+static void ucma_close_id(struct work_struct *work)
+{
+ struct ucma_context *ctx = container_of(work, struct ucma_context, close_work);
+
+ /* once all inflight tasks are finished, we close all underlying
+ * resources. The context is still alive till its explicit destryoing
+ * by its creator.
+ */
+ ucma_put_ctx(ctx);
+ wait_for_completion(&ctx->comp);
+ /* No new events will be generated after destroying the id. */
+ rdma_destroy_id(ctx->cm_id);
+}
+
static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
{
struct ucma_context *ctx;
@@ -152,6 +187,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
if (!ctx)
return NULL;
+ INIT_WORK(&ctx->close_work, ucma_close_id);
atomic_set(&ctx->ref, 1);
init_completion(&ctx->comp);
INIT_LIST_HEAD(&ctx->mc_list);
@@ -242,6 +278,44 @@ static void ucma_set_event_context(struct ucma_context *ctx,
}
}
+/* Called with file->mut locked for the relevant context. */
+static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
+{
+ struct ucma_context *ctx = cm_id->context;
+ struct ucma_event *con_req_eve;
+ int event_found = 0;
+
+ if (ctx->destroying)
+ return;
+
+ /* only if context is pointing to cm_id that it owns it and can be
+ * queued to be closed, otherwise that cm_id is an inflight one that
+ * is part of that context event list pending to be detached and
+ * reattached to its new context as part of ucma_get_event,
+ * handled separately below.
+ */
+ if (ctx->cm_id == cm_id) {
+ mutex_lock(&mut);
+ ctx->closing = 1;
+ mutex_unlock(&mut);
+ queue_work(ctx->file->close_wq, &ctx->close_work);
+ return;
+ }
+
+ list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
+ if (con_req_eve->cm_id == cm_id &&
+ con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
+ list_del(&con_req_eve->list);
+ INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
+ queue_work(ctx->file->close_wq, &con_req_eve->close_work);
+ event_found = 1;
+ break;
+ }
+ }
+ if (!event_found)
+ printk(KERN_ERR "ucma_removal_event_handler: warning: connect request event wasn't found\n");
+}
+
static int ucma_event_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
@@ -276,14 +350,21 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
* We ignore events for new connections until userspace has set
* their context. This can only happen if an error occurs on a
* new connection before the user accepts it. This is okay,
- * since the accept will just fail later.
+ * since the accept will just fail later. However, we do need
+ * to release the underlying HW resources in case of a device
+ * removal event.
*/
+ if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
+ ucma_removal_event_handler(cm_id);
+
kfree(uevent);
goto out;
}
list_add_tail(&uevent->list, &ctx->file->event_list);
wake_up_interruptible(&ctx->file->poll_wait);
+ if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
+ ucma_removal_event_handler(cm_id);
out:
mutex_unlock(&ctx->file->mut);
return ret;
@@ -442,9 +523,15 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
}
/*
- * We cannot hold file->mut when calling rdma_destroy_id() or we can
- * deadlock. We also acquire file->mut in ucma_event_handler(), and
- * rdma_destroy_id() will wait until all callbacks have completed.
+ * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
+ * this point, no new events will be reported from the hardware. However, we
+ * still need to cleanup the UCMA context for this ID. Specifically, there
+ * might be events that have not yet been consumed by the user space software.
+ * These might include pending connect requests which we have not completed
+ * processing. We cannot call rdma_destroy_id while holding the lock of the
+ * context (file->mut), as it might cause a deadlock. We therefore extract all
+ * relevant events from the context pending events list while holding the
+ * mutex. After that we release them as needed.
*/
static int ucma_free_ctx(struct ucma_context *ctx)
{
@@ -452,8 +539,6 @@ static int ucma_free_ctx(struct ucma_context *ctx)
struct ucma_event *uevent, *tmp;
LIST_HEAD(list);
- /* No new events will be generated after destroying the id. */
- rdma_destroy_id(ctx->cm_id);
ucma_cleanup_multicast(ctx);
@@ -501,10 +586,24 @@ static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ucma_put_ctx(ctx);
- wait_for_completion(&ctx->comp);
- resp.events_reported = ucma_free_ctx(ctx);
+ mutex_lock(&ctx->file->mut);
+ ctx->destroying = 1;
+ mutex_unlock(&ctx->file->mut);
+ flush_workqueue(ctx->file->close_wq);
+ /* At this point it's guaranteed that there is no inflight
+ * closing task */
+ mutex_lock(&mut);
+ if (!ctx->closing) {
+ mutex_unlock(&mut);
+ ucma_put_ctx(ctx);
+ wait_for_completion(&ctx->comp);
+ rdma_destroy_id(ctx->cm_id);
+ } else {
+ mutex_unlock(&mut);
+ }
+
+ resp.events_reported = ucma_free_ctx(ctx);
if (copy_to_user((void __user *)(unsigned long)cmd.response,
&resp, sizeof(resp)))
ret = -EFAULT;
@@ -1321,10 +1420,10 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
mc = ERR_PTR(-ENOENT);
else if (mc->ctx->file != file)
mc = ERR_PTR(-EINVAL);
- else {
+ else if (!atomic_inc_not_zero(&mc->ctx->ref))
+ mc = ERR_PTR(-ENXIO);
+ else
idr_remove(&multicast_idr, mc->id);
- atomic_inc(&mc->ctx->ref);
- }
mutex_unlock(&mut);
if (IS_ERR(mc)) {
@@ -1354,10 +1453,10 @@ static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
/* Acquire mutex's based on pointer comparison to prevent deadlock. */
if (file1 < file2) {
mutex_lock(&file1->mut);
- mutex_lock(&file2->mut);
+ mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&file2->mut);
- mutex_lock(&file1->mut);
+ mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
}
}
@@ -1529,6 +1628,7 @@ static int ucma_open(struct inode *inode, struct file *filp)
INIT_LIST_HEAD(&file->ctx_list);
init_waitqueue_head(&file->poll_wait);
mutex_init(&file->mut);
+ file->close_wq = create_singlethread_workqueue("ucma_close_id");
filp->private_data = file;
file->filp = filp;
@@ -1543,16 +1643,34 @@ static int ucma_close(struct inode *inode, struct file *filp)
mutex_lock(&file->mut);
list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
+ ctx->destroying = 1;
mutex_unlock(&file->mut);
mutex_lock(&mut);
idr_remove(&ctx_idr, ctx->id);
mutex_unlock(&mut);
+ flush_workqueue(file->close_wq);
+ /* At that step once ctx was marked as destroying and workqueue
+ * was flushed we are safe from any inflights handlers that
+ * might put other closing task.
+ */
+ mutex_lock(&mut);
+ if (!ctx->closing) {
+ mutex_unlock(&mut);
+ /* rdma_destroy_id ensures that no event handlers are
+ * inflight for that id before releasing it.
+ */
+ rdma_destroy_id(ctx->cm_id);
+ } else {
+ mutex_unlock(&mut);
+ }
+
ucma_free_ctx(ctx);
mutex_lock(&file->mut);
}
mutex_unlock(&file->mut);
+ destroy_workqueue(file->close_wq);
kfree(file);
return 0;
}
@@ -1616,6 +1734,7 @@ static void __exit ucma_cleanup(void)
device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
misc_deregister(&ucma_misc);
idr_destroy(&ctx_idr);
+ idr_destroy(&multicast_idr);
}
module_init(ucma_init);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 35567fffaa4e..57f281f8d686 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -133,7 +133,7 @@ static DEFINE_SPINLOCK(port_lock);
static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
static void ib_umad_add_one(struct ib_device *device);
-static void ib_umad_remove_one(struct ib_device *device);
+static void ib_umad_remove_one(struct ib_device *device, void *client_data);
static void ib_umad_release_dev(struct kobject *kobj)
{
@@ -1322,9 +1322,9 @@ free:
kobject_put(&umad_dev->kobj);
}
-static void ib_umad_remove_one(struct ib_device *device)
+static void ib_umad_remove_one(struct ib_device *device, void *client_data)
{
- struct ib_umad_device *umad_dev = ib_get_client_data(device, &umad_client);
+ struct ib_umad_device *umad_dev = client_data;
int i;
if (!umad_dev)
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index ba365b6d1e8d..3863d33c243d 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -85,15 +85,20 @@
*/
struct ib_uverbs_device {
- struct kref ref;
+ atomic_t refcount;
int num_comp_vectors;
struct completion comp;
struct device *dev;
- struct ib_device *ib_dev;
+ struct ib_device __rcu *ib_dev;
int devnum;
struct cdev cdev;
struct rb_root xrcd_tree;
struct mutex xrcd_tree_mutex;
+ struct kobject kobj;
+ struct srcu_struct disassociate_srcu;
+ struct mutex lists_mutex; /* protect lists */
+ struct list_head uverbs_file_list;
+ struct list_head uverbs_events_file_list;
};
struct ib_uverbs_event_file {
@@ -105,6 +110,7 @@ struct ib_uverbs_event_file {
wait_queue_head_t poll_wait;
struct fasync_struct *async_queue;
struct list_head event_list;
+ struct list_head list;
};
struct ib_uverbs_file {
@@ -114,6 +120,8 @@ struct ib_uverbs_file {
struct ib_ucontext *ucontext;
struct ib_event_handler event_handler;
struct ib_uverbs_event_file *async_file;
+ struct list_head list;
+ int is_closed;
};
struct ib_uverbs_event {
@@ -177,7 +185,9 @@ extern struct idr ib_uverbs_rule_idr;
void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj);
struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
+ struct ib_device *ib_dev,
int is_async);
+void ib_uverbs_free_async_event_file(struct ib_uverbs_file *uverbs_file);
struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd);
void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
@@ -212,6 +222,7 @@ struct ib_uverbs_flow_spec {
#define IB_UVERBS_DECLARE_CMD(name) \
ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \
+ struct ib_device *ib_dev, \
const char __user *buf, int in_len, \
int out_len)
@@ -253,6 +264,7 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
#define IB_UVERBS_DECLARE_EX_CMD(name) \
int ib_uverbs_ex_##name(struct ib_uverbs_file *file, \
+ struct ib_device *ib_dev, \
struct ib_udata *ucore, \
struct ib_udata *uhw)
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index bbb02ffe87df..be4cb9f04be3 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -282,13 +282,13 @@ static void put_xrcd_read(struct ib_uobject *uobj)
}
ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf,
int in_len, int out_len)
{
struct ib_uverbs_get_context cmd;
struct ib_uverbs_get_context_resp resp;
struct ib_udata udata;
- struct ib_device *ibdev = file->device->ib_dev;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct ib_device_attr dev_attr;
#endif
@@ -313,13 +313,13 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
(unsigned long) cmd.response + sizeof resp,
in_len - sizeof cmd, out_len - sizeof resp);
- ucontext = ibdev->alloc_ucontext(ibdev, &udata);
+ ucontext = ib_dev->alloc_ucontext(ib_dev, &udata);
if (IS_ERR(ucontext)) {
ret = PTR_ERR(ucontext);
goto err;
}
- ucontext->device = ibdev;
+ ucontext->device = ib_dev;
INIT_LIST_HEAD(&ucontext->pd_list);
INIT_LIST_HEAD(&ucontext->mr_list);
INIT_LIST_HEAD(&ucontext->mw_list);
@@ -340,7 +340,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
ucontext->odp_mrs_count = 0;
INIT_LIST_HEAD(&ucontext->no_private_counters);
- ret = ib_query_device(ibdev, &dev_attr);
+ ret = ib_query_device(ib_dev, &dev_attr);
if (ret)
goto err_free;
if (!(dev_attr.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
@@ -355,7 +355,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
goto err_free;
resp.async_fd = ret;
- filp = ib_uverbs_alloc_event_file(file, 1);
+ filp = ib_uverbs_alloc_event_file(file, ib_dev, 1);
if (IS_ERR(filp)) {
ret = PTR_ERR(filp);
goto err_fd;
@@ -367,16 +367,6 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
goto err_file;
}
- file->async_file = filp->private_data;
-
- INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
- ib_uverbs_event_handler);
- ret = ib_register_event_handler(&file->event_handler);
- if (ret)
- goto err_file;
-
- kref_get(&file->async_file->ref);
- kref_get(&file->ref);
file->ucontext = ucontext;
fd_install(resp.async_fd, filp);
@@ -386,6 +376,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
return in_len;
err_file:
+ ib_uverbs_free_async_event_file(file);
fput(filp);
err_fd:
@@ -393,7 +384,7 @@ err_fd:
err_free:
put_pid(ucontext->tgid);
- ibdev->dealloc_ucontext(ucontext);
+ ib_dev->dealloc_ucontext(ucontext);
err:
mutex_unlock(&file->mutex);
@@ -401,11 +392,12 @@ err:
}
static void copy_query_dev_fields(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
struct ib_uverbs_query_device_resp *resp,
struct ib_device_attr *attr)
{
resp->fw_ver = attr->fw_ver;
- resp->node_guid = file->device->ib_dev->node_guid;
+ resp->node_guid = ib_dev->node_guid;
resp->sys_image_guid = attr->sys_image_guid;
resp->max_mr_size = attr->max_mr_size;
resp->page_size_cap = attr->page_size_cap;
@@ -443,10 +435,11 @@ static void copy_query_dev_fields(struct ib_uverbs_file *file,
resp->max_srq_sge = attr->max_srq_sge;
resp->max_pkeys = attr->max_pkeys;
resp->local_ca_ack_delay = attr->local_ca_ack_delay;
- resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt;
+ resp->phys_port_cnt = ib_dev->phys_port_cnt;
}
ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf,
int in_len, int out_len)
{
@@ -461,12 +454,12 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- ret = ib_query_device(file->device->ib_dev, &attr);
+ ret = ib_query_device(ib_dev, &attr);
if (ret)
return ret;
memset(&resp, 0, sizeof resp);
- copy_query_dev_fields(file, &resp, &attr);
+ copy_query_dev_fields(file, ib_dev, &resp, &attr);
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp))
@@ -476,6 +469,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf,
int in_len, int out_len)
{
@@ -490,7 +484,7 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
+ ret = ib_query_port(ib_dev, cmd.port_num, &attr);
if (ret)
return ret;
@@ -515,7 +509,7 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
resp.active_width = attr.active_width;
resp.active_speed = attr.active_speed;
resp.phys_state = attr.phys_state;
- resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev,
+ resp.link_layer = rdma_port_get_link_layer(ib_dev,
cmd.port_num);
if (copy_to_user((void __user *) (unsigned long) cmd.response,
@@ -526,6 +520,7 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf,
int in_len, int out_len)
{
@@ -553,15 +548,15 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
down_write(&uobj->mutex);
- pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
- file->ucontext, &udata);
+ pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata);
if (IS_ERR(pd)) {
ret = PTR_ERR(pd);
goto err;
}
- pd->device = file->device->ib_dev;
+ pd->device = ib_dev;
pd->uobject = uobj;
+ pd->local_mr = NULL;
atomic_set(&pd->usecnt, 0);
uobj->object = pd;
@@ -600,11 +595,13 @@ err:
}
ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf,
int in_len, int out_len)
{
struct ib_uverbs_dealloc_pd cmd;
struct ib_uobject *uobj;
+ struct ib_pd *pd;
int ret;
if (copy_from_user(&cmd, buf, sizeof cmd))
@@ -613,15 +610,20 @@ ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
if (!uobj)
return -EINVAL;
+ pd = uobj->object;
- ret = ib_dealloc_pd(uobj->object);
- if (!ret)
- uobj->live = 0;
-
- put_uobj_write(uobj);
+ if (atomic_read(&pd->usecnt)) {
+ ret = -EBUSY;
+ goto err_put;
+ }
+ ret = pd->device->dealloc_pd(uobj->object);
+ WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
if (ret)
- return ret;
+ goto err_put;
+
+ uobj->live = 0;
+ put_uobj_write(uobj);
idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
@@ -632,6 +634,10 @@ ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
put_uobj(uobj);
return in_len;
+
+err_put:
+ put_uobj_write(uobj);
+ return ret;
}
struct xrcd_table_entry {
@@ -720,6 +726,7 @@ static void xrcd_table_delete(struct ib_uverbs_device *dev,
}
ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -778,15 +785,14 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
down_write(&obj->uobject.mutex);
if (!xrcd) {
- xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev,
- file->ucontext, &udata);
+ xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata);
if (IS_ERR(xrcd)) {
ret = PTR_ERR(xrcd);
goto err;
}
xrcd->inode = inode;
- xrcd->device = file->device->ib_dev;
+ xrcd->device = ib_dev;
atomic_set(&xrcd->usecnt, 0);
mutex_init(&xrcd->tgt_qp_mutex);
INIT_LIST_HEAD(&xrcd->tgt_qp_list);
@@ -857,6 +863,7 @@ err_tree_mutex_unlock:
}
ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -934,6 +941,7 @@ void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
}
ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1043,6 +1051,7 @@ err_free:
}
ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1136,6 +1145,7 @@ put_uobjs:
}
ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1174,8 +1184,9 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
- const char __user *buf, int in_len,
- int out_len)
+ struct ib_device *ib_dev,
+ const char __user *buf, int in_len,
+ int out_len)
{
struct ib_uverbs_alloc_mw cmd;
struct ib_uverbs_alloc_mw_resp resp;
@@ -1256,8 +1267,9 @@ err_free:
}
ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
- const char __user *buf, int in_len,
- int out_len)
+ struct ib_device *ib_dev,
+ const char __user *buf, int in_len,
+ int out_len)
{
struct ib_uverbs_dealloc_mw cmd;
struct ib_mw *mw;
@@ -1294,6 +1306,7 @@ ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1313,7 +1326,7 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
return ret;
resp.fd = ret;
- filp = ib_uverbs_alloc_event_file(file, 0);
+ filp = ib_uverbs_alloc_event_file(file, ib_dev, 0);
if (IS_ERR(filp)) {
put_unused_fd(resp.fd);
return PTR_ERR(filp);
@@ -1331,6 +1344,7 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
}
static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw,
struct ib_uverbs_ex_create_cq *cmd,
@@ -1379,14 +1393,14 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
attr.flags = cmd->flags;
- cq = file->device->ib_dev->create_cq(file->device->ib_dev, &attr,
+ cq = ib_dev->create_cq(ib_dev, &attr,
file->ucontext, uhw);
if (IS_ERR(cq)) {
ret = PTR_ERR(cq);
goto err_file;
}
- cq->device = file->device->ib_dev;
+ cq->device = ib_dev;
cq->uobject = &obj->uobject;
cq->comp_handler = ib_uverbs_comp_handler;
cq->event_handler = ib_uverbs_cq_event_handler;
@@ -1447,6 +1461,7 @@ static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1475,7 +1490,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
cmd_ex.comp_vector = cmd.comp_vector;
cmd_ex.comp_channel = cmd.comp_channel;
- obj = create_cq(file, &ucore, &uhw, &cmd_ex,
+ obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex,
offsetof(typeof(cmd_ex), comp_channel) +
sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
NULL);
@@ -1498,6 +1513,7 @@ static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
}
int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -1523,7 +1539,7 @@ int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
sizeof(resp.response_length)))
return -ENOSPC;
- obj = create_cq(file, ucore, uhw, &cmd,
+ obj = create_cq(file, ib_dev, ucore, uhw, &cmd,
min(ucore->inlen, sizeof(cmd)),
ib_uverbs_ex_create_cq_cb, NULL);
@@ -1534,6 +1550,7 @@ int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1597,6 +1614,7 @@ static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
}
ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1648,6 +1666,7 @@ out_put:
}
ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1670,6 +1689,7 @@ ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1722,6 +1742,7 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -1917,6 +1938,7 @@ err_put:
}
ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len, int out_len)
{
struct ib_uverbs_open_qp cmd;
@@ -2011,6 +2033,7 @@ err_put:
}
ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -2125,6 +2148,7 @@ static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
}
ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -2221,6 +2245,7 @@ out:
}
ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -2279,6 +2304,7 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -2346,6 +2372,12 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
next->send_flags = user_wr->send_flags;
if (is_ud) {
+ if (next->opcode != IB_WR_SEND &&
+ next->opcode != IB_WR_SEND_WITH_IMM) {
+ ret = -EINVAL;
+ goto out_put;
+ }
+
next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
file->ucontext);
if (!next->wr.ud.ah) {
@@ -2385,9 +2417,11 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
user_wr->wr.atomic.compare_add;
next->wr.atomic.swap = user_wr->wr.atomic.swap;
next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
+ case IB_WR_SEND:
break;
default:
- break;
+ ret = -EINVAL;
+ goto out_put;
}
}
@@ -2523,6 +2557,7 @@ err:
}
ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -2572,6 +2607,7 @@ out:
}
ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -2621,6 +2657,7 @@ out:
}
ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -2713,6 +2750,7 @@ err:
}
ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len, int out_len)
{
struct ib_uverbs_destroy_ah cmd;
@@ -2749,6 +2787,7 @@ ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -2796,6 +2835,7 @@ out_put:
}
ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -2876,6 +2916,7 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
}
int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -3036,6 +3077,7 @@ err_free_attr:
}
int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
@@ -3078,6 +3120,7 @@ int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
}
static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
struct ib_uverbs_create_xsrq *cmd,
struct ib_udata *udata)
{
@@ -3211,6 +3254,7 @@ err:
}
ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -3238,7 +3282,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
(unsigned long) cmd.response + sizeof resp,
in_len - sizeof cmd, out_len - sizeof resp);
- ret = __uverbs_create_xsrq(file, &xcmd, &udata);
+ ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
if (ret)
return ret;
@@ -3246,6 +3290,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len, int out_len)
{
struct ib_uverbs_create_xsrq cmd;
@@ -3263,7 +3308,7 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
(unsigned long) cmd.response + sizeof resp,
in_len - sizeof cmd, out_len - sizeof resp);
- ret = __uverbs_create_xsrq(file, &cmd, &udata);
+ ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
if (ret)
return ret;
@@ -3271,6 +3316,7 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -3301,6 +3347,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf,
int in_len, int out_len)
{
@@ -3341,6 +3388,7 @@ ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
}
ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len)
{
@@ -3398,16 +3446,15 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
}
int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
struct ib_uverbs_ex_query_device_resp resp;
struct ib_uverbs_ex_query_device cmd;
struct ib_device_attr attr;
- struct ib_device *device;
int err;
- device = file->device->ib_dev;
if (ucore->inlen < sizeof(cmd))
return -EINVAL;
@@ -3428,11 +3475,11 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
memset(&attr, 0, sizeof(attr));
- err = device->query_device(device, &attr, uhw);
+ err = ib_dev->query_device(ib_dev, &attr, uhw);
if (err)
return err;
- copy_query_dev_fields(file, &resp.base, &attr);
+ copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
resp.comp_mask = 0;
if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index f6eef2da7097..c29a660c72fe 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -79,6 +79,7 @@ static DEFINE_SPINLOCK(map_lock);
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
const char __user *buf, int in_len,
int out_len) = {
[IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context,
@@ -119,6 +120,7 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
};
static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
struct ib_udata *ucore,
struct ib_udata *uhw) = {
[IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
@@ -128,16 +130,21 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
};
static void ib_uverbs_add_one(struct ib_device *device);
-static void ib_uverbs_remove_one(struct ib_device *device);
+static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
-static void ib_uverbs_release_dev(struct kref *ref)
+static void ib_uverbs_release_dev(struct kobject *kobj)
{
struct ib_uverbs_device *dev =
- container_of(ref, struct ib_uverbs_device, ref);
+ container_of(kobj, struct ib_uverbs_device, kobj);
- complete(&dev->comp);
+ cleanup_srcu_struct(&dev->disassociate_srcu);
+ kfree(dev);
}
+static struct kobj_type ib_uverbs_dev_ktype = {
+ .release = ib_uverbs_release_dev,
+};
+
static void ib_uverbs_release_event_file(struct kref *ref)
{
struct ib_uverbs_event_file *file =
@@ -201,9 +208,6 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
{
struct ib_uobject *uobj, *tmp;
- if (!context)
- return 0;
-
context->closing = 1;
list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) {
@@ -303,13 +307,27 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
return context->device->dealloc_ucontext(context);
}
+static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
+{
+ complete(&dev->comp);
+}
+
static void ib_uverbs_release_file(struct kref *ref)
{
struct ib_uverbs_file *file =
container_of(ref, struct ib_uverbs_file, ref);
+ struct ib_device *ib_dev;
+ int srcu_key;
+
+ srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
+ ib_dev = srcu_dereference(file->device->ib_dev,
+ &file->device->disassociate_srcu);
+ if (ib_dev && !ib_dev->disassociate_ucontext)
+ module_put(ib_dev->owner);
+ srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
- module_put(file->device->ib_dev->owner);
- kref_put(&file->device->ref, ib_uverbs_release_dev);
+ if (atomic_dec_and_test(&file->device->refcount))
+ ib_uverbs_comp_dev(file->device);
kfree(file);
}
@@ -331,9 +349,19 @@ static ssize_t ib_uverbs_event_read(struct file *filp, char __user *buf,
return -EAGAIN;
if (wait_event_interruptible(file->poll_wait,
- !list_empty(&file->event_list)))
+ (!list_empty(&file->event_list) ||
+ /* The barriers built into wait_event_interruptible()
+ * and wake_up() guarentee this will see the null set
+ * without using RCU
+ */
+ !file->uverbs_file->device->ib_dev)))
return -ERESTARTSYS;
+ /* If device was disassociated and no event exists set an error */
+ if (list_empty(&file->event_list) &&
+ !file->uverbs_file->device->ib_dev)
+ return -EIO;
+
spin_lock_irq(&file->lock);
}
@@ -396,8 +424,11 @@ static int ib_uverbs_event_close(struct inode *inode, struct file *filp)
{
struct ib_uverbs_event_file *file = filp->private_data;
struct ib_uverbs_event *entry, *tmp;
+ int closed_already = 0;
+ mutex_lock(&file->uverbs_file->device->lists_mutex);
spin_lock_irq(&file->lock);
+ closed_already = file->is_closed;
file->is_closed = 1;
list_for_each_entry_safe(entry, tmp, &file->event_list, list) {
if (entry->counter)
@@ -405,11 +436,15 @@ static int ib_uverbs_event_close(struct inode *inode, struct file *filp)
kfree(entry);
}
spin_unlock_irq(&file->lock);
-
- if (file->is_async) {
- ib_unregister_event_handler(&file->uverbs_file->event_handler);
- kref_put(&file->uverbs_file->ref, ib_uverbs_release_file);
+ if (!closed_already) {
+ list_del(&file->list);
+ if (file->is_async)
+ ib_unregister_event_handler(&file->uverbs_file->
+ event_handler);
}
+ mutex_unlock(&file->uverbs_file->device->lists_mutex);
+
+ kref_put(&file->uverbs_file->ref, ib_uverbs_release_file);
kref_put(&file->ref, ib_uverbs_release_event_file);
return 0;
@@ -541,13 +576,21 @@ void ib_uverbs_event_handler(struct ib_event_handler *handler,
NULL, NULL);
}
+void ib_uverbs_free_async_event_file(struct ib_uverbs_file *file)
+{
+ kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
+ file->async_file = NULL;
+}
+
struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
+ struct ib_device *ib_dev,
int is_async)
{
struct ib_uverbs_event_file *ev_file;
struct file *filp;
+ int ret;
- ev_file = kmalloc(sizeof *ev_file, GFP_KERNEL);
+ ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL);
if (!ev_file)
return ERR_PTR(-ENOMEM);
@@ -556,16 +599,47 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
INIT_LIST_HEAD(&ev_file->event_list);
init_waitqueue_head(&ev_file->poll_wait);
ev_file->uverbs_file = uverbs_file;
+ kref_get(&ev_file->uverbs_file->ref);
ev_file->async_queue = NULL;
- ev_file->is_async = is_async;
ev_file->is_closed = 0;
filp = anon_inode_getfile("[infinibandevent]", &uverbs_event_fops,
ev_file, O_RDONLY);
if (IS_ERR(filp))
- kfree(ev_file);
+ goto err_put_refs;
+
+ mutex_lock(&uverbs_file->device->lists_mutex);
+ list_add_tail(&ev_file->list,
+ &uverbs_file->device->uverbs_events_file_list);
+ mutex_unlock(&uverbs_file->device->lists_mutex);
+
+ if (is_async) {
+ WARN_ON(uverbs_file->async_file);
+ uverbs_file->async_file = ev_file;
+ kref_get(&uverbs_file->async_file->ref);
+ INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler,
+ ib_dev,
+ ib_uverbs_event_handler);
+ ret = ib_register_event_handler(&uverbs_file->event_handler);
+ if (ret)
+ goto err_put_file;
+
+ /* At that point async file stuff was fully set */
+ ev_file->is_async = 1;
+ }
return filp;
+
+err_put_file:
+ fput(filp);
+ kref_put(&uverbs_file->async_file->ref, ib_uverbs_release_event_file);
+ uverbs_file->async_file = NULL;
+ return ERR_PTR(ret);
+
+err_put_refs:
+ kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file);
+ kref_put(&ev_file->ref, ib_uverbs_release_event_file);
+ return filp;
}
/*
@@ -601,8 +675,11 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct ib_uverbs_file *file = filp->private_data;
+ struct ib_device *ib_dev;
struct ib_uverbs_cmd_hdr hdr;
__u32 flags;
+ int srcu_key;
+ ssize_t ret;
if (count < sizeof hdr)
return -EINVAL;
@@ -610,6 +687,14 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
if (copy_from_user(&hdr, buf, sizeof hdr))
return -EFAULT;
+ srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
+ ib_dev = srcu_dereference(file->device->ib_dev,
+ &file->device->disassociate_srcu);
+ if (!ib_dev) {
+ ret = -EIO;
+ goto out;
+ }
+
flags = (hdr.command &
IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
@@ -617,26 +702,36 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
__u32 command;
if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
- IB_USER_VERBS_CMD_COMMAND_MASK))
- return -EINVAL;
+ IB_USER_VERBS_CMD_COMMAND_MASK)) {
+ ret = -EINVAL;
+ goto out;
+ }
command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
if (command >= ARRAY_SIZE(uverbs_cmd_table) ||
- !uverbs_cmd_table[command])
- return -EINVAL;
+ !uverbs_cmd_table[command]) {
+ ret = -EINVAL;
+ goto out;
+ }
if (!file->ucontext &&
- command != IB_USER_VERBS_CMD_GET_CONTEXT)
- return -EINVAL;
+ command != IB_USER_VERBS_CMD_GET_CONTEXT) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << command)))
- return -ENOSYS;
+ if (!(ib_dev->uverbs_cmd_mask & (1ull << command))) {
+ ret = -ENOSYS;
+ goto out;
+ }
- if (hdr.in_words * 4 != count)
- return -EINVAL;
+ if (hdr.in_words * 4 != count) {
+ ret = -EINVAL;
+ goto out;
+ }
- return uverbs_cmd_table[command](file,
+ ret = uverbs_cmd_table[command](file, ib_dev,
buf + sizeof(hdr),
hdr.in_words * 4,
hdr.out_words * 4);
@@ -647,51 +742,72 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
struct ib_uverbs_ex_cmd_hdr ex_hdr;
struct ib_udata ucore;
struct ib_udata uhw;
- int err;
size_t written_count = count;
if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
- IB_USER_VERBS_CMD_COMMAND_MASK))
- return -EINVAL;
+ IB_USER_VERBS_CMD_COMMAND_MASK)) {
+ ret = -EINVAL;
+ goto out;
+ }
command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) ||
- !uverbs_ex_cmd_table[command])
- return -ENOSYS;
+ !uverbs_ex_cmd_table[command]) {
+ ret = -ENOSYS;
+ goto out;
+ }
- if (!file->ucontext)
- return -EINVAL;
+ if (!file->ucontext) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (!(file->device->ib_dev->uverbs_ex_cmd_mask & (1ull << command)))
- return -ENOSYS;
+ if (!(ib_dev->uverbs_ex_cmd_mask & (1ull << command))) {
+ ret = -ENOSYS;
+ goto out;
+ }
- if (count < (sizeof(hdr) + sizeof(ex_hdr)))
- return -EINVAL;
+ if (count < (sizeof(hdr) + sizeof(ex_hdr))) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr)))
- return -EFAULT;
+ if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr))) {
+ ret = -EFAULT;
+ goto out;
+ }
count -= sizeof(hdr) + sizeof(ex_hdr);
buf += sizeof(hdr) + sizeof(ex_hdr);
- if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count)
- return -EINVAL;
+ if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (ex_hdr.cmd_hdr_reserved)
- return -EINVAL;
+ if (ex_hdr.cmd_hdr_reserved) {
+ ret = -EINVAL;
+ goto out;
+ }
if (ex_hdr.response) {
- if (!hdr.out_words && !ex_hdr.provider_out_words)
- return -EINVAL;
+ if (!hdr.out_words && !ex_hdr.provider_out_words) {
+ ret = -EINVAL;
+ goto out;
+ }
if (!access_ok(VERIFY_WRITE,
(void __user *) (unsigned long) ex_hdr.response,
- (hdr.out_words + ex_hdr.provider_out_words) * 8))
- return -EFAULT;
+ (hdr.out_words + ex_hdr.provider_out_words) * 8)) {
+ ret = -EFAULT;
+ goto out;
+ }
} else {
- if (hdr.out_words || ex_hdr.provider_out_words)
- return -EINVAL;
+ if (hdr.out_words || ex_hdr.provider_out_words) {
+ ret = -EINVAL;
+ goto out;
+ }
}
INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response,
@@ -703,27 +819,43 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
ex_hdr.provider_in_words * 8,
ex_hdr.provider_out_words * 8);
- err = uverbs_ex_cmd_table[command](file,
+ ret = uverbs_ex_cmd_table[command](file,
+ ib_dev,
&ucore,
&uhw);
-
- if (err)
- return err;
-
- return written_count;
+ if (!ret)
+ ret = written_count;
+ } else {
+ ret = -ENOSYS;
}
- return -ENOSYS;
+out:
+ srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
+ return ret;
}
static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct ib_uverbs_file *file = filp->private_data;
+ struct ib_device *ib_dev;
+ int ret = 0;
+ int srcu_key;
+
+ srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
+ ib_dev = srcu_dereference(file->device->ib_dev,
+ &file->device->disassociate_srcu);
+ if (!ib_dev) {
+ ret = -EIO;
+ goto out;
+ }
if (!file->ucontext)
- return -ENODEV;
+ ret = -ENODEV;
else
- return file->device->ib_dev->mmap(file->ucontext, vma);
+ ret = ib_dev->mmap(file->ucontext, vma);
+out:
+ srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
+ return ret;
}
/*
@@ -740,23 +872,43 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
{
struct ib_uverbs_device *dev;
struct ib_uverbs_file *file;
+ struct ib_device *ib_dev;
int ret;
+ int module_dependent;
+ int srcu_key;
dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
- if (dev)
- kref_get(&dev->ref);
- else
+ if (!atomic_inc_not_zero(&dev->refcount))
return -ENXIO;
- if (!try_module_get(dev->ib_dev->owner)) {
- ret = -ENODEV;
+ srcu_key = srcu_read_lock(&dev->disassociate_srcu);
+ mutex_lock(&dev->lists_mutex);
+ ib_dev = srcu_dereference(dev->ib_dev,
+ &dev->disassociate_srcu);
+ if (!ib_dev) {
+ ret = -EIO;
goto err;
}
- file = kmalloc(sizeof *file, GFP_KERNEL);
+ /* In case IB device supports disassociate ucontext, there is no hard
+ * dependency between uverbs device and its low level device.
+ */
+ module_dependent = !(ib_dev->disassociate_ucontext);
+
+ if (module_dependent) {
+ if (!try_module_get(ib_dev->owner)) {
+ ret = -ENODEV;
+ goto err;
+ }
+ }
+
+ file = kzalloc(sizeof(*file), GFP_KERNEL);
if (!file) {
ret = -ENOMEM;
- goto err_module;
+ if (module_dependent)
+ goto err_module;
+
+ goto err;
}
file->device = dev;
@@ -766,27 +918,47 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
mutex_init(&file->mutex);
filp->private_data = file;
+ kobject_get(&dev->kobj);
+ list_add_tail(&file->list, &dev->uverbs_file_list);
+ mutex_unlock(&dev->lists_mutex);
+ srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
return nonseekable_open(inode, filp);
err_module:
- module_put(dev->ib_dev->owner);
+ module_put(ib_dev->owner);
err:
- kref_put(&dev->ref, ib_uverbs_release_dev);
+ mutex_unlock(&dev->lists_mutex);
+ srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
+ if (atomic_dec_and_test(&dev->refcount))
+ ib_uverbs_comp_dev(dev);
+
return ret;
}
static int ib_uverbs_close(struct inode *inode, struct file *filp)
{
struct ib_uverbs_file *file = filp->private_data;
-
- ib_uverbs_cleanup_ucontext(file, file->ucontext);
+ struct ib_uverbs_device *dev = file->device;
+ struct ib_ucontext *ucontext = NULL;
+
+ mutex_lock(&file->device->lists_mutex);
+ ucontext = file->ucontext;
+ file->ucontext = NULL;
+ if (!file->is_closed) {
+ list_del(&file->list);
+ file->is_closed = 1;
+ }
+ mutex_unlock(&file->device->lists_mutex);
+ if (ucontext)
+ ib_uverbs_cleanup_ucontext(file, ucontext);
if (file->async_file)
kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
kref_put(&file->ref, ib_uverbs_release_file);
+ kobject_put(&dev->kobj);
return 0;
}
@@ -817,12 +989,21 @@ static struct ib_client uverbs_client = {
static ssize_t show_ibdev(struct device *device, struct device_attribute *attr,
char *buf)
{
+ int ret = -ENODEV;
+ int srcu_key;
struct ib_uverbs_device *dev = dev_get_drvdata(device);
+ struct ib_device *ib_dev;
if (!dev)
return -ENODEV;
- return sprintf(buf, "%s\n", dev->ib_dev->name);
+ srcu_key = srcu_read_lock(&dev->disassociate_srcu);
+ ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
+ if (ib_dev)
+ ret = sprintf(buf, "%s\n", ib_dev->name);
+ srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
+
+ return ret;
}
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
@@ -830,11 +1011,19 @@ static ssize_t show_dev_abi_version(struct device *device,
struct device_attribute *attr, char *buf)
{
struct ib_uverbs_device *dev = dev_get_drvdata(device);
+ int ret = -ENODEV;
+ int srcu_key;
+ struct ib_device *ib_dev;
if (!dev)
return -ENODEV;
+ srcu_key = srcu_read_lock(&dev->disassociate_srcu);
+ ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
+ if (ib_dev)
+ ret = sprintf(buf, "%d\n", ib_dev->uverbs_abi_ver);
+ srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
- return sprintf(buf, "%d\n", dev->ib_dev->uverbs_abi_ver);
+ return ret;
}
static DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL);
@@ -874,6 +1063,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
int devnum;
dev_t base;
struct ib_uverbs_device *uverbs_dev;
+ int ret;
if (!device->alloc_ucontext)
return;
@@ -882,10 +1072,20 @@ static void ib_uverbs_add_one(struct ib_device *device)
if (!uverbs_dev)
return;
- kref_init(&uverbs_dev->ref);
+ ret = init_srcu_struct(&uverbs_dev->disassociate_srcu);
+ if (ret) {
+ kfree(uverbs_dev);
+ return;
+ }
+
+ atomic_set(&uverbs_dev->refcount, 1);
init_completion(&uverbs_dev->comp);
uverbs_dev->xrcd_tree = RB_ROOT;
mutex_init(&uverbs_dev->xrcd_tree_mutex);
+ kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype);
+ mutex_init(&uverbs_dev->lists_mutex);
+ INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
+ INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list);
spin_lock(&map_lock);
devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
@@ -906,12 +1106,13 @@ static void ib_uverbs_add_one(struct ib_device *device)
}
spin_unlock(&map_lock);
- uverbs_dev->ib_dev = device;
+ rcu_assign_pointer(uverbs_dev->ib_dev, device);
uverbs_dev->num_comp_vectors = device->num_comp_vectors;
cdev_init(&uverbs_dev->cdev, NULL);
uverbs_dev->cdev.owner = THIS_MODULE;
uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
+ uverbs_dev->cdev.kobj.parent = &uverbs_dev->kobj;
kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
if (cdev_add(&uverbs_dev->cdev, base, 1))
goto err_cdev;
@@ -942,15 +1143,79 @@ err_cdev:
clear_bit(devnum, overflow_map);
err:
- kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
+ if (atomic_dec_and_test(&uverbs_dev->refcount))
+ ib_uverbs_comp_dev(uverbs_dev);
wait_for_completion(&uverbs_dev->comp);
- kfree(uverbs_dev);
+ kobject_put(&uverbs_dev->kobj);
return;
}
-static void ib_uverbs_remove_one(struct ib_device *device)
+static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
+ struct ib_device *ib_dev)
{
- struct ib_uverbs_device *uverbs_dev = ib_get_client_data(device, &uverbs_client);
+ struct ib_uverbs_file *file;
+ struct ib_uverbs_event_file *event_file;
+ struct ib_event event;
+
+ /* Pending running commands to terminate */
+ synchronize_srcu(&uverbs_dev->disassociate_srcu);
+ event.event = IB_EVENT_DEVICE_FATAL;
+ event.element.port_num = 0;
+ event.device = ib_dev;
+
+ mutex_lock(&uverbs_dev->lists_mutex);
+ while (!list_empty(&uverbs_dev->uverbs_file_list)) {
+ struct ib_ucontext *ucontext;
+
+ file = list_first_entry(&uverbs_dev->uverbs_file_list,
+ struct ib_uverbs_file, list);
+ file->is_closed = 1;
+ ucontext = file->ucontext;
+ list_del(&file->list);
+ file->ucontext = NULL;
+ kref_get(&file->ref);
+ mutex_unlock(&uverbs_dev->lists_mutex);
+ /* We must release the mutex before going ahead and calling
+ * disassociate_ucontext. disassociate_ucontext might end up
+ * indirectly calling uverbs_close, for example due to freeing
+ * the resources (e.g mmput).
+ */
+ ib_uverbs_event_handler(&file->event_handler, &event);
+ if (ucontext) {
+ ib_dev->disassociate_ucontext(ucontext);
+ ib_uverbs_cleanup_ucontext(file, ucontext);
+ }
+
+ mutex_lock(&uverbs_dev->lists_mutex);
+ kref_put(&file->ref, ib_uverbs_release_file);
+ }
+
+ while (!list_empty(&uverbs_dev->uverbs_events_file_list)) {
+ event_file = list_first_entry(&uverbs_dev->
+ uverbs_events_file_list,
+ struct ib_uverbs_event_file,
+ list);
+ spin_lock_irq(&event_file->lock);
+ event_file->is_closed = 1;
+ spin_unlock_irq(&event_file->lock);
+
+ list_del(&event_file->list);
+ if (event_file->is_async) {
+ ib_unregister_event_handler(&event_file->uverbs_file->
+ event_handler);
+ event_file->uverbs_file->event_handler.device = NULL;
+ }
+
+ wake_up_interruptible(&event_file->poll_wait);
+ kill_fasync(&event_file->async_queue, SIGIO, POLL_IN);
+ }
+ mutex_unlock(&uverbs_dev->lists_mutex);
+}
+
+static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
+{
+ struct ib_uverbs_device *uverbs_dev = client_data;
+ int wait_clients = 1;
if (!uverbs_dev)
return;
@@ -964,9 +1229,28 @@ static void ib_uverbs_remove_one(struct ib_device *device)
else
clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
- kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
- wait_for_completion(&uverbs_dev->comp);
- kfree(uverbs_dev);
+ if (device->disassociate_ucontext) {
+ /* We disassociate HW resources and immediately return.
+ * Userspace will see a EIO errno for all future access.
+ * Upon returning, ib_device may be freed internally and is not
+ * valid any more.
+ * uverbs_device is still available until all clients close
+ * their files, then the uverbs device ref count will be zero
+ * and its resources will be freed.
+ * Note: At this point no more files can be opened since the
+ * cdev was deleted, however active clients can still issue
+ * commands and close their open files.
+ */
+ rcu_assign_pointer(uverbs_dev->ib_dev, NULL);
+ ib_uverbs_free_hw_resources(uverbs_dev, device);
+ wait_clients = 0;
+ }
+
+ if (atomic_dec_and_test(&uverbs_dev->refcount))
+ ib_uverbs_comp_dev(uverbs_dev);
+ if (wait_clients)
+ wait_for_completion(&uverbs_dev->comp);
+ kobject_put(&uverbs_dev->kobj);
}
static char *uverbs_devnode(struct device *dev, umode_t *mode)
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index bac3fb406a74..e1f2c9887f3f 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -213,28 +213,79 @@ EXPORT_SYMBOL(rdma_port_get_link_layer);
/* Protection domains */
+/**
+ * ib_alloc_pd - Allocates an unused protection domain.
+ * @device: The device on which to allocate the protection domain.
+ *
+ * A protection domain object provides an association between QPs, shared
+ * receive queues, address handles, memory regions, and memory windows.
+ *
+ * Every PD has a local_dma_lkey which can be used as the lkey value for local
+ * memory operations.
+ */
struct ib_pd *ib_alloc_pd(struct ib_device *device)
{
struct ib_pd *pd;
+ struct ib_device_attr devattr;
+ int rc;
+
+ rc = ib_query_device(device, &devattr);
+ if (rc)
+ return ERR_PTR(rc);
pd = device->alloc_pd(device, NULL, NULL);
+ if (IS_ERR(pd))
+ return pd;
+
+ pd->device = device;
+ pd->uobject = NULL;
+ pd->local_mr = NULL;
+ atomic_set(&pd->usecnt, 0);
+
+ if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
+ pd->local_dma_lkey = device->local_dma_lkey;
+ else {
+ struct ib_mr *mr;
+
+ mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(mr)) {
+ ib_dealloc_pd(pd);
+ return (struct ib_pd *)mr;
+ }
- if (!IS_ERR(pd)) {
- pd->device = device;
- pd->uobject = NULL;
- atomic_set(&pd->usecnt, 0);
+ pd->local_mr = mr;
+ pd->local_dma_lkey = pd->local_mr->lkey;
}
-
return pd;
}
EXPORT_SYMBOL(ib_alloc_pd);
-int ib_dealloc_pd(struct ib_pd *pd)
+/**
+ * ib_dealloc_pd - Deallocates a protection domain.
+ * @pd: The protection domain to deallocate.
+ *
+ * It is an error to call this function while any resources in the pd still
+ * exist. The caller is responsible to synchronously destroy them and
+ * guarantee no new allocations will happen.
+ */
+void ib_dealloc_pd(struct ib_pd *pd)
{
- if (atomic_read(&pd->usecnt))
- return -EBUSY;
+ int ret;
+
+ if (pd->local_mr) {
+ ret = ib_dereg_mr(pd->local_mr);
+ WARN_ON(ret);
+ pd->local_mr = NULL;
+ }
+
+ /* uverbs manipulates usecnt with proper locking, while the kabi
+ requires the caller to guarantee we can't race here. */
+ WARN_ON(atomic_read(&pd->usecnt));
- return pd->device->dealloc_pd(pd);
+ /* Making delalloc_pd a void return is a WIP, no driver should return
+ an error here. */
+ ret = pd->device->dealloc_pd(pd);
+ WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
}
EXPORT_SYMBOL(ib_dealloc_pd);
@@ -1144,73 +1195,6 @@ struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
}
EXPORT_SYMBOL(ib_get_dma_mr);
-struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
- struct ib_phys_buf *phys_buf_array,
- int num_phys_buf,
- int mr_access_flags,
- u64 *iova_start)
-{
- struct ib_mr *mr;
- int err;
-
- err = ib_check_mr_access(mr_access_flags);
- if (err)
- return ERR_PTR(err);
-
- if (!pd->device->reg_phys_mr)
- return ERR_PTR(-ENOSYS);
-
- mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
- mr_access_flags, iova_start);
-
- if (!IS_ERR(mr)) {
- mr->device = pd->device;
- mr->pd = pd;
- mr->uobject = NULL;
- atomic_inc(&pd->usecnt);
- atomic_set(&mr->usecnt, 0);
- }
-
- return mr;
-}
-EXPORT_SYMBOL(ib_reg_phys_mr);
-
-int ib_rereg_phys_mr(struct ib_mr *mr,
- int mr_rereg_mask,
- struct ib_pd *pd,
- struct ib_phys_buf *phys_buf_array,
- int num_phys_buf,
- int mr_access_flags,
- u64 *iova_start)
-{
- struct ib_pd *old_pd;
- int ret;
-
- ret = ib_check_mr_access(mr_access_flags);
- if (ret)
- return ret;
-
- if (!mr->device->rereg_phys_mr)
- return -ENOSYS;
-
- if (atomic_read(&mr->usecnt))
- return -EBUSY;
-
- old_pd = mr->pd;
-
- ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
- phys_buf_array, num_phys_buf,
- mr_access_flags, iova_start);
-
- if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
- atomic_dec(&old_pd->usecnt);
- atomic_inc(&pd->usecnt);
- }
-
- return ret;
-}
-EXPORT_SYMBOL(ib_rereg_phys_mr);
-
int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
{
return mr->device->query_mr ?
@@ -1235,54 +1219,28 @@ int ib_dereg_mr(struct ib_mr *mr)
}
EXPORT_SYMBOL(ib_dereg_mr);
-struct ib_mr *ib_create_mr(struct ib_pd *pd,
- struct ib_mr_init_attr *mr_init_attr)
-{
- struct ib_mr *mr;
-
- if (!pd->device->create_mr)
- return ERR_PTR(-ENOSYS);
-
- mr = pd->device->create_mr(pd, mr_init_attr);
-
- if (!IS_ERR(mr)) {
- mr->device = pd->device;
- mr->pd = pd;
- mr->uobject = NULL;
- atomic_inc(&pd->usecnt);
- atomic_set(&mr->usecnt, 0);
- }
-
- return mr;
-}
-EXPORT_SYMBOL(ib_create_mr);
-
-int ib_destroy_mr(struct ib_mr *mr)
-{
- struct ib_pd *pd;
- int ret;
-
- if (atomic_read(&mr->usecnt))
- return -EBUSY;
-
- pd = mr->pd;
- ret = mr->device->destroy_mr(mr);
- if (!ret)
- atomic_dec(&pd->usecnt);
-
- return ret;
-}
-EXPORT_SYMBOL(ib_destroy_mr);
-
-struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
+/**
+ * ib_alloc_mr() - Allocates a memory region
+ * @pd: protection domain associated with the region
+ * @mr_type: memory region type
+ * @max_num_sg: maximum sg entries available for registration.
+ *
+ * Notes:
+ * Memory registeration page/sg lists must not exceed max_num_sg.
+ * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
+ * max_num_sg * used_page_size.
+ *
+ */
+struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg)
{
struct ib_mr *mr;
- if (!pd->device->alloc_fast_reg_mr)
+ if (!pd->device->alloc_mr)
return ERR_PTR(-ENOSYS);
- mr = pd->device->alloc_fast_reg_mr(pd, max_page_list_len);
-
+ mr = pd->device->alloc_mr(pd, mr_type, max_num_sg);
if (!IS_ERR(mr)) {
mr->device = pd->device;
mr->pd = pd;
@@ -1293,7 +1251,7 @@ struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
return mr;
}
-EXPORT_SYMBOL(ib_alloc_fast_reg_mr);
+EXPORT_SYMBOL(ib_alloc_mr);
struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device,
int max_page_list_len)
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index e900b03531a9..1bdb9996d371 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -1,8 +1,6 @@
obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/
-obj-$(CONFIG_INFINIBAND_IPATH) += ipath/
obj-$(CONFIG_INFINIBAND_QIB) += qib/
obj-$(CONFIG_INFINIBAND_EHCA) += ehca/
-obj-$(CONFIG_INFINIBAND_AMSO1100) += amso1100/
obj-$(CONFIG_INFINIBAND_CXGB3) += cxgb3/
obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/
obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index b1b73232f217..93308c45f298 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -736,6 +736,10 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
/*
* T3 only supports 32 bits of size.
*/
+ if (sizeof(phys_addr_t) > 4) {
+ pr_warn_once(MOD "Cannot support dma_mrs on this platform.\n");
+ return ERR_PTR(-ENOTSUPP);
+ }
bl.size = 0xffffffff;
bl.addr = 0;
kva = 0;
@@ -796,7 +800,9 @@ static int iwch_dealloc_mw(struct ib_mw *mw)
return 0;
}
-static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
+static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg)
{
struct iwch_dev *rhp;
struct iwch_pd *php;
@@ -805,6 +811,10 @@ static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
u32 stag = 0;
int ret = 0;
+ if (mr_type != IB_MR_TYPE_MEM_REG ||
+ max_num_sg > T3_MAX_FASTREG_DEPTH)
+ return ERR_PTR(-EINVAL);
+
php = to_iwch_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
@@ -812,10 +822,10 @@ static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
goto err;
mhp->rhp = rhp;
- ret = iwch_alloc_pbl(mhp, pbl_depth);
+ ret = iwch_alloc_pbl(mhp, max_num_sg);
if (ret)
goto err1;
- mhp->attr.pbl_size = pbl_depth;
+ mhp->attr.pbl_size = max_num_sg;
ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
mhp->attr.pbl_size, mhp->attr.pbl_addr);
if (ret)
@@ -1439,7 +1449,7 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.alloc_mw = iwch_alloc_mw;
dev->ibdev.bind_mw = iwch_bind_mw;
dev->ibdev.dealloc_mw = iwch_dealloc_mw;
- dev->ibdev.alloc_fast_reg_mr = iwch_alloc_fast_reg_mr;
+ dev->ibdev.alloc_mr = iwch_alloc_mr;
dev->ibdev.alloc_fast_reg_page_list = iwch_alloc_fastreg_pbl;
dev->ibdev.free_fast_reg_page_list = iwch_free_fastreg_pbl;
dev->ibdev.attach_mcast = iwch_multicast_attach;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 3ad8dc798f52..debc39d2cbc2 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -50,6 +50,7 @@
#include <rdma/ib_addr.h>
#include "iw_cxgb4.h"
+#include "clip_tbl.h"
static char *states[] = {
"idle",
@@ -115,11 +116,11 @@ module_param(ep_timeout_secs, int, 0644);
MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
"in seconds (default=60)");
-static int mpa_rev = 1;
+static int mpa_rev = 2;
module_param(mpa_rev, int, 0644);
MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
"1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft"
- " compliant (default=1)");
+ " compliant (default=2)");
static int markers_enabled;
module_param(markers_enabled, int, 0644);
@@ -298,6 +299,16 @@ void _c4iw_free_ep(struct kref *kref)
if (test_bit(QP_REFERENCED, &ep->com.flags))
deref_qp(ep);
if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
+ if (ep->com.remote_addr.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 =
+ (struct sockaddr_in6 *)
+ &ep->com.mapped_local_addr;
+
+ cxgb4_clip_release(
+ ep->com.dev->rdev.lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr,
+ 1);
+ }
remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
dst_release(ep->dst);
@@ -442,6 +453,12 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
kfree_skb(skb);
connect_reply_upcall(ep, -EHOSTUNREACH);
state_set(&ep->com, DEAD);
+ if (ep->com.remote_addr.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 =
+ (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+ cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr, 1);
+ }
remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
dst_release(ep->dst);
@@ -640,6 +657,7 @@ static int send_connect(struct c4iw_ep *ep)
struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)
&ep->com.mapped_remote_addr;
int win;
+ int ret;
wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
roundup(sizev4, 16) :
@@ -693,6 +711,11 @@ static int send_connect(struct c4iw_ep *ep)
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= T5_ISS_F;
}
+
+ if (ep->com.remote_addr.ss_family == AF_INET6)
+ cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
+ (const u32 *)&la6->sin6_addr.s6_addr, 1);
+
t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
@@ -790,7 +813,11 @@ static int send_connect(struct c4iw_ep *ep)
}
set_bit(ACT_OPEN_REQ, &ep->com.history);
- return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+ ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+ if (ret && ep->com.remote_addr.ss_family == AF_INET6)
+ cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
+ (const u32 *)&la6->sin6_addr.s6_addr, 1);
+ return ret;
}
static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
@@ -2091,6 +2118,15 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
case CPL_ERR_CONN_EXIST:
if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
set_bit(ACT_RETRY_INUSE, &ep->com.history);
+ if (ep->com.remote_addr.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 =
+ (struct sockaddr_in6 *)
+ &ep->com.mapped_local_addr;
+ cxgb4_clip_release(
+ ep->com.dev->rdev.lldi.ports[0],
+ (const u32 *)
+ &sin6->sin6_addr.s6_addr, 1);
+ }
remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
atid);
cxgb4_free_atid(t, atid);
@@ -2118,6 +2154,12 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
connect_reply_upcall(ep, status2errno(status));
state_set(&ep->com, DEAD);
+ if (ep->com.remote_addr.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 =
+ (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+ cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr, 1);
+ }
if (status && act_open_has_tid(status))
cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
@@ -2302,6 +2344,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
struct dst_entry *dst;
__u8 local_ip[16], peer_ip[16];
__be16 local_port, peer_port;
+ struct sockaddr_in6 *sin6;
int err;
u16 peer_mss = ntohs(req->tcpopt.mss);
int iptype;
@@ -2400,9 +2443,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
sin->sin_port = peer_port;
sin->sin_addr.s_addr = *(__be32 *)peer_ip;
} else {
- struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
- &child_ep->com.mapped_local_addr;
-
+ sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_local_addr;
sin6->sin6_family = PF_INET6;
sin6->sin6_port = local_port;
memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
@@ -2436,6 +2477,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid);
accept_cr(child_ep, skb, req);
set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
+ if (iptype == 6) {
+ sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_local_addr;
+ cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr, 1);
+ }
goto out;
reject:
reject_cr(dev, hwtid, skb);
@@ -2672,6 +2718,15 @@ out:
if (release)
release_ep_resources(ep);
else if (ep->retry_with_mpa_v1) {
+ if (ep->com.remote_addr.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 =
+ (struct sockaddr_in6 *)
+ &ep->com.mapped_local_addr;
+ cxgb4_clip_release(
+ ep->com.dev->rdev.lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr,
+ 1);
+ }
remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
dst_release(ep->dst);
@@ -2976,7 +3031,7 @@ static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->local_addr;
struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->remote_addr;
- if (get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
+ if (!get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
memcpy(la6->sin6_addr.s6_addr, &addr, 16);
memcpy(ra6->sin6_addr.s6_addr, &addr, 16);
return 0;
@@ -3186,6 +3241,9 @@ static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
err, ep->stid,
sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port));
+ else
+ cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr, 1);
return err;
}
@@ -3334,6 +3392,7 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
ep->com.dev->rdev.lldi.ports[0], ep->stid,
ep->com.dev->rdev.lldi.rxq_ids[0], 0);
} else {
+ struct sockaddr_in6 *sin6;
c4iw_init_wr_wait(&ep->com.wr_wait);
err = cxgb4_remove_server(
ep->com.dev->rdev.lldi.ports[0], ep->stid,
@@ -3342,6 +3401,9 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
goto done;
err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
0, 0, __func__);
+ sin6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+ cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
@@ -3461,6 +3523,12 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
mutex_unlock(&dev->rdev.stats.lock);
connect_reply_upcall(ep, status2errno(req->retval));
state_set(&ep->com, DEAD);
+ if (ep->com.remote_addr.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 =
+ (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+ cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr, 1);
+ }
remove_handle(dev, &dev->atid_idr, atid);
cxgb4_free_atid(dev->rdev.lldi.tids, atid);
dst_release(ep->dst);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index c7aab48f07cd..92d518382a9f 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -814,7 +814,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
printk(KERN_ERR MOD
"Unexpected cqe_status 0x%x for QPID=0x%0x\n",
CQE_STATUS(&cqe), CQE_QPID(&cqe));
- ret = -EINVAL;
+ wc->status = IB_WC_FATAL_ERR;
}
}
out:
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index cc77844fada3..c7bb38c931a5 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -970,7 +970,9 @@ void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
struct ib_device *device,
int page_list_len);
-struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
+struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg);
int c4iw_dealloc_mw(struct ib_mw *mw);
struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index cff815b91707..026b91ebd5e2 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -853,7 +853,9 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
return 0;
}
-struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
+struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg)
{
struct c4iw_dev *rhp;
struct c4iw_pd *php;
@@ -862,6 +864,10 @@ struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
u32 stag = 0;
int ret = 0;
+ if (mr_type != IB_MR_TYPE_MEM_REG ||
+ max_num_sg > t4_max_fr_depth(use_dsgl))
+ return ERR_PTR(-EINVAL);
+
php = to_c4iw_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
@@ -871,10 +877,10 @@ struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
}
mhp->rhp = rhp;
- ret = alloc_pbl(mhp, pbl_depth);
+ ret = alloc_pbl(mhp, max_num_sg);
if (ret)
goto err1;
- mhp->attr.pbl_size = pbl_depth;
+ mhp->attr.pbl_size = max_num_sg;
ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
mhp->attr.pbl_size, mhp->attr.pbl_addr);
if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 6eee3d385541..7746113552e7 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -556,7 +556,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.alloc_mw = c4iw_alloc_mw;
dev->ibdev.bind_mw = c4iw_bind_mw;
dev->ibdev.dealloc_mw = c4iw_dealloc_mw;
- dev->ibdev.alloc_fast_reg_mr = c4iw_alloc_fast_reg_mr;
+ dev->ibdev.alloc_mr = c4iw_alloc_mr;
dev->ibdev.alloc_fast_reg_page_list = c4iw_alloc_fastreg_pbl;
dev->ibdev.free_fast_reg_page_list = c4iw_free_fastreg_pbl;
dev->ibdev.attach_mcast = c4iw_multicast_attach;
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index 12b5bc23832b..376b031c2c7f 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -226,8 +226,9 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
return IB_MAD_RESULT_FAILURE;
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index f50a546224ad..1688a17de4fe 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -89,7 +89,7 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
if (vlan_tag < 0x1000)
vlan_tag |= (ah_attr->sl & 7) << 13;
ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
- ah->av.eth.gid_index = ah_attr->grh.sgid_index;
+ ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
ah->av.eth.vlan = cpu_to_be16(vlan_tag);
if (ah_attr->static_rate) {
ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
@@ -148,9 +148,13 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
enum rdma_link_layer ll;
memset(ah_attr, 0, sizeof *ah_attr);
- ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24;
ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num);
+ if (ll == IB_LINK_LAYER_ETHERNET)
+ ah_attr->sl = be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) >> 29;
+ else
+ ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
+
ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0;
if (ah->av.ib.stat_rate)
ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 36eb3d012b6d..5fd49f9435f9 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -638,7 +638,7 @@ static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
* simulated FLUSH_ERR completions
*/
list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
- mlx4_ib_qp_sw_comp(qp, num_entries, wc, npolled, 1);
+ mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1);
if (*npolled >= num_entries)
goto out;
}
@@ -871,7 +871,7 @@ repoll:
if (is_eth) {
wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
if (be32_to_cpu(cqe->vlan_my_qpn) &
- MLX4_CQE_VLAN_PRESENT_MASK) {
+ MLX4_CQE_CVLAN_PRESENT_MASK) {
wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
MLX4_CQE_VID_MASK;
} else {
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 85a50df2f203..1cd75ff02251 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -580,7 +580,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
list.length = sizeof (struct mlx4_rcv_tunnel_mad);
- list.lkey = tun_ctx->mr->lkey;
+ list.lkey = tun_ctx->pd->local_dma_lkey;
wr.wr.ud.ah = ah;
wr.wr.ud.port_num = port;
@@ -860,21 +860,31 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct mlx4_ib_dev *dev = to_mdev(ibdev);
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
+ enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
- switch (rdma_port_get_link_layer(ibdev, port_num)) {
- case IB_LINK_LAYER_INFINIBAND:
- if (!mlx4_is_slave(dev->dev))
- return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
- case IB_LINK_LAYER_ETHERNET:
- return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
- default:
- return -EINVAL;
+ /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
+ * queries, should be called only by VFs and for that specific purpose
+ */
+ if (link == IB_LINK_LAYER_INFINIBAND) {
+ if (mlx4_is_slave(dev->dev) &&
+ in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
+ in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS)
+ return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
+ in_grh, in_mad, out_mad);
+
+ return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
+ in_grh, in_mad, out_mad);
}
+
+ if (link == IB_LINK_LAYER_ETHERNET)
+ return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
+ in_grh, in_mad, out_mad);
+
+ return -EINVAL;
}
static void send_handler(struct ib_mad_agent *agent,
@@ -1123,7 +1133,7 @@ static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
sg_list.addr = tun_qp->ring[index].map;
sg_list.length = size;
- sg_list.lkey = ctx->mr->lkey;
+ sg_list.lkey = ctx->pd->local_dma_lkey;
recv_wr.next = NULL;
recv_wr.sg_list = &sg_list;
@@ -1234,7 +1244,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
list.length = sizeof (struct mlx4_mad_snd_buf);
- list.lkey = sqp_ctx->mr->lkey;
+ list.lkey = sqp_ctx->pd->local_dma_lkey;
wr.wr.ud.ah = ah;
wr.wr.ud.port_num = port;
@@ -1817,19 +1827,12 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
goto err_cq;
}
- ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(ctx->mr)) {
- ret = PTR_ERR(ctx->mr);
- pr_err("Couldn't get tunnel DMA MR (%d)\n", ret);
- goto err_pd;
- }
-
if (ctx->has_smi) {
ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
if (ret) {
pr_err("Couldn't create %s QP0 (%d)\n",
create_tun ? "tunnel for" : "", ret);
- goto err_mr;
+ goto err_pd;
}
}
@@ -1866,10 +1869,6 @@ err_qp0:
ib_destroy_qp(ctx->qp[0].qp);
ctx->qp[0].qp = NULL;
-err_mr:
- ib_dereg_mr(ctx->mr);
- ctx->mr = NULL;
-
err_pd:
ib_dealloc_pd(ctx->pd);
ctx->pd = NULL;
@@ -1906,8 +1905,6 @@ static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
ib_destroy_qp(ctx->qp[1].qp);
ctx->qp[1].qp = NULL;
mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
- ib_dereg_mr(ctx->mr);
- ctx->mr = NULL;
ib_dealloc_pd(ctx->pd);
ctx->pd = NULL;
ib_destroy_cq(ctx->cq);
@@ -2040,8 +2037,6 @@ static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
ib_destroy_qp(sqp_ctx->qp[1].qp);
sqp_ctx->qp[1].qp = NULL;
mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
- ib_dereg_mr(sqp_ctx->mr);
- sqp_ctx->mr = NULL;
ib_dealloc_pd(sqp_ctx->pd);
sqp_ctx->pd = NULL;
ib_destroy_cq(sqp_ctx->cq);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 067a691ecbed..efecdf0216d8 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -45,6 +45,9 @@
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+
+#include <net/bonding.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/cmd.h>
@@ -74,13 +77,6 @@ static const char mlx4_ib_version[] =
DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
-struct update_gid_work {
- struct work_struct work;
- union ib_gid gids[128];
- struct mlx4_ib_dev *dev;
- int port;
-};
-
static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
static struct workqueue_struct *wq;
@@ -93,8 +89,6 @@ static void init_query_mad(struct ib_smp *mad)
mad->method = IB_MGMT_METHOD_GET;
}
-static union ib_gid zgid;
-
static int check_flow_steering_support(struct mlx4_dev *dev)
{
int eth_num_ports = 0;
@@ -131,6 +125,237 @@ static int num_ib_ports(struct mlx4_dev *dev)
return ib_ports;
}
+static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
+{
+ struct mlx4_ib_dev *ibdev = to_mdev(device);
+ struct net_device *dev;
+
+ rcu_read_lock();
+ dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
+
+ if (dev) {
+ if (mlx4_is_bonded(ibdev->dev)) {
+ struct net_device *upper = NULL;
+
+ upper = netdev_master_upper_dev_get_rcu(dev);
+ if (upper) {
+ struct net_device *active;
+
+ active = bond_option_active_slave_get_rcu(netdev_priv(upper));
+ if (active)
+ dev = active;
+ }
+ }
+ }
+ if (dev)
+ dev_hold(dev);
+
+ rcu_read_unlock();
+ return dev;
+}
+
+static int mlx4_ib_update_gids(struct gid_entry *gids,
+ struct mlx4_ib_dev *ibdev,
+ u8 port_num)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+ struct mlx4_dev *dev = ibdev->dev;
+ int i;
+ union ib_gid *gid_tbl;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return -ENOMEM;
+
+ gid_tbl = mailbox->buf;
+
+ for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
+ memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
+
+ err = mlx4_cmd(dev, mailbox->dma,
+ MLX4_SET_PORT_GID_TABLE << 8 | port_num,
+ 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+ if (mlx4_is_bonded(dev))
+ err += mlx4_cmd(dev, mailbox->dma,
+ MLX4_SET_PORT_GID_TABLE << 8 | 2,
+ 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+static int mlx4_ib_add_gid(struct ib_device *device,
+ u8 port_num,
+ unsigned int index,
+ const union ib_gid *gid,
+ const struct ib_gid_attr *attr,
+ void **context)
+{
+ struct mlx4_ib_dev *ibdev = to_mdev(device);
+ struct mlx4_ib_iboe *iboe = &ibdev->iboe;
+ struct mlx4_port_gid_table *port_gid_table;
+ int free = -1, found = -1;
+ int ret = 0;
+ int hw_update = 0;
+ int i;
+ struct gid_entry *gids = NULL;
+
+ if (!rdma_cap_roce_gid_table(device, port_num))
+ return -EINVAL;
+
+ if (port_num > MLX4_MAX_PORTS)
+ return -EINVAL;
+
+ if (!context)
+ return -EINVAL;
+
+ port_gid_table = &iboe->gids[port_num - 1];
+ spin_lock_bh(&iboe->lock);
+ for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
+ if (!memcmp(&port_gid_table->gids[i].gid, gid, sizeof(*gid))) {
+ found = i;
+ break;
+ }
+ if (free < 0 && !memcmp(&port_gid_table->gids[i].gid, &zgid, sizeof(*gid)))
+ free = i; /* HW has space */
+ }
+
+ if (found < 0) {
+ if (free < 0) {
+ ret = -ENOSPC;
+ } else {
+ port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
+ if (!port_gid_table->gids[free].ctx) {
+ ret = -ENOMEM;
+ } else {
+ *context = port_gid_table->gids[free].ctx;
+ memcpy(&port_gid_table->gids[free].gid, gid, sizeof(*gid));
+ port_gid_table->gids[free].ctx->real_index = free;
+ port_gid_table->gids[free].ctx->refcount = 1;
+ hw_update = 1;
+ }
+ }
+ } else {
+ struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
+ *context = ctx;
+ ctx->refcount++;
+ }
+ if (!ret && hw_update) {
+ gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
+ if (!gids) {
+ ret = -ENOMEM;
+ } else {
+ for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
+ memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
+ }
+ }
+ spin_unlock_bh(&iboe->lock);
+
+ if (!ret && hw_update) {
+ ret = mlx4_ib_update_gids(gids, ibdev, port_num);
+ kfree(gids);
+ }
+
+ return ret;
+}
+
+static int mlx4_ib_del_gid(struct ib_device *device,
+ u8 port_num,
+ unsigned int index,
+ void **context)
+{
+ struct gid_cache_context *ctx = *context;
+ struct mlx4_ib_dev *ibdev = to_mdev(device);
+ struct mlx4_ib_iboe *iboe = &ibdev->iboe;
+ struct mlx4_port_gid_table *port_gid_table;
+ int ret = 0;
+ int hw_update = 0;
+ struct gid_entry *gids = NULL;
+
+ if (!rdma_cap_roce_gid_table(device, port_num))
+ return -EINVAL;
+
+ if (port_num > MLX4_MAX_PORTS)
+ return -EINVAL;
+
+ port_gid_table = &iboe->gids[port_num - 1];
+ spin_lock_bh(&iboe->lock);
+ if (ctx) {
+ ctx->refcount--;
+ if (!ctx->refcount) {
+ unsigned int real_index = ctx->real_index;
+
+ memcpy(&port_gid_table->gids[real_index].gid, &zgid, sizeof(zgid));
+ kfree(port_gid_table->gids[real_index].ctx);
+ port_gid_table->gids[real_index].ctx = NULL;
+ hw_update = 1;
+ }
+ }
+ if (!ret && hw_update) {
+ int i;
+
+ gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
+ if (!gids) {
+ ret = -ENOMEM;
+ } else {
+ for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
+ memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
+ }
+ }
+ spin_unlock_bh(&iboe->lock);
+
+ if (!ret && hw_update) {
+ ret = mlx4_ib_update_gids(gids, ibdev, port_num);
+ kfree(gids);
+ }
+ return ret;
+}
+
+int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
+ u8 port_num, int index)
+{
+ struct mlx4_ib_iboe *iboe = &ibdev->iboe;
+ struct gid_cache_context *ctx = NULL;
+ union ib_gid gid;
+ struct mlx4_port_gid_table *port_gid_table;
+ int real_index = -EINVAL;
+ int i;
+ int ret;
+ unsigned long flags;
+
+ if (port_num > MLX4_MAX_PORTS)
+ return -EINVAL;
+
+ if (mlx4_is_bonded(ibdev->dev))
+ port_num = 1;
+
+ if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
+ return index;
+
+ ret = ib_get_cached_gid(&ibdev->ib_dev, port_num, index, &gid);
+ if (ret)
+ return ret;
+
+ if (!memcmp(&gid, &zgid, sizeof(gid)))
+ return -EINVAL;
+
+ spin_lock_irqsave(&iboe->lock, flags);
+ port_gid_table = &iboe->gids[port_num - 1];
+
+ for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
+ if (!memcmp(&port_gid_table->gids[i].gid, &gid, sizeof(gid))) {
+ ctx = port_gid_table->gids[i].ctx;
+ break;
+ }
+ if (ctx)
+ real_index = ctx->real_index;
+ spin_unlock_irqrestore(&iboe->lock, flags);
+ return real_index;
+}
+
static int mlx4_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *uhw)
@@ -229,6 +454,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
props->max_sge = min(dev->dev->caps.max_sq_sg,
dev->dev->caps.max_rq_sg);
+ props->max_sge_rd = props->max_sge;
props->max_cq = dev->dev->quotas.cq;
props->max_cqe = dev->dev->caps.max_cqes;
props->max_mr = dev->dev->quotas.mpt;
@@ -253,14 +479,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
props->timestamp_mask = 0xFFFFFFFFFFFFULL;
- err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
- if (err)
- goto out;
+ if (!mlx4_is_slave(dev->dev))
+ err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
- resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
resp.response_length += sizeof(resp.hca_core_clock_offset);
- resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
+ if (!err && !mlx4_is_slave(dev->dev)) {
+ resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
+ resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
+ }
}
if (uhw->outlen) {
@@ -413,12 +640,13 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->state = IB_PORT_DOWN;
props->phys_state = state_to_phys_state(props->state);
props->active_mtu = IB_MTU_256;
- if (is_bonded)
- rtnl_lock(); /* required to get upper dev */
spin_lock_bh(&iboe->lock);
ndev = iboe->netdevs[port - 1];
- if (ndev && is_bonded)
- ndev = netdev_master_upper_dev_get(ndev);
+ if (ndev && is_bonded) {
+ rcu_read_lock(); /* required to get upper dev */
+ ndev = netdev_master_upper_dev_get_rcu(ndev);
+ rcu_read_unlock();
+ }
if (!ndev)
goto out_unlock;
@@ -430,8 +658,6 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->phys_state = state_to_phys_state(props->state);
out_unlock:
spin_unlock_bh(&iboe->lock);
- if (is_bonded)
- rtnl_unlock();
out:
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return err;
@@ -514,23 +740,27 @@ out:
return err;
}
-static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
- union ib_gid *gid)
-{
- struct mlx4_ib_dev *dev = to_mdev(ibdev);
-
- *gid = dev->iboe.gid_table[port - 1][index];
-
- return 0;
-}
-
static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid)
{
- if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
+ int ret;
+
+ if (rdma_protocol_ib(ibdev, port))
return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
- else
- return iboe_query_gid(ibdev, port, index, gid);
+
+ if (!rdma_protocol_roce(ibdev, port))
+ return -ENODEV;
+
+ if (!rdma_cap_roce_gid_table(ibdev, port))
+ return -ENODEV;
+
+ ret = ib_get_cached_gid(ibdev, port, index, gid);
+ if (ret == -EAGAIN) {
+ memcpy(gid, &zgid, sizeof(*gid));
+ return 0;
+ }
+
+ return ret;
}
int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
@@ -691,7 +921,7 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
resp.cqe_size = dev->dev->caps.cqe_size;
}
- context = kmalloc(sizeof *context, GFP_KERNEL);
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return ERR_PTR(-ENOMEM);
@@ -728,21 +958,143 @@ static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
return 0;
}
+static void mlx4_ib_vma_open(struct vm_area_struct *area)
+{
+ /* vma_open is called when a new VMA is created on top of our VMA.
+ * This is done through either mremap flow or split_vma (usually due
+ * to mlock, madvise, munmap, etc.). We do not support a clone of the
+ * vma, as this VMA is strongly hardware related. Therefore we set the
+ * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
+ * calling us again and trying to do incorrect actions. We assume that
+ * the original vma size is exactly a single page that there will be no
+ * "splitting" operations on.
+ */
+ area->vm_ops = NULL;
+}
+
+static void mlx4_ib_vma_close(struct vm_area_struct *area)
+{
+ struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;
+
+ /* It's guaranteed that all VMAs opened on a FD are closed before the
+ * file itself is closed, therefore no sync is needed with the regular
+ * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
+ * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
+ * The close operation is usually called under mm->mmap_sem except when
+ * process is exiting. The exiting case is handled explicitly as part
+ * of mlx4_ib_disassociate_ucontext.
+ */
+ mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
+ area->vm_private_data;
+
+ /* set the vma context pointer to null in the mlx4_ib driver's private
+ * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
+ */
+ mlx4_ib_vma_priv_data->vma = NULL;
+}
+
+static const struct vm_operations_struct mlx4_ib_vm_ops = {
+ .open = mlx4_ib_vma_open,
+ .close = mlx4_ib_vma_close
+};
+
+static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+ int i;
+ int ret = 0;
+ struct vm_area_struct *vma;
+ struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
+ struct task_struct *owning_process = NULL;
+ struct mm_struct *owning_mm = NULL;
+
+ owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
+ if (!owning_process)
+ return;
+
+ owning_mm = get_task_mm(owning_process);
+ if (!owning_mm) {
+ pr_info("no mm, disassociate ucontext is pending task termination\n");
+ while (1) {
+ /* make sure that task is dead before returning, it may
+ * prevent a rare case of module down in parallel to a
+ * call to mlx4_ib_vma_close.
+ */
+ put_task_struct(owning_process);
+ msleep(1);
+ owning_process = get_pid_task(ibcontext->tgid,
+ PIDTYPE_PID);
+ if (!owning_process ||
+ owning_process->state == TASK_DEAD) {
+ pr_info("disassociate ucontext done, task was terminated\n");
+ /* in case task was dead need to release the task struct */
+ if (owning_process)
+ put_task_struct(owning_process);
+ return;
+ }
+ }
+ }
+
+ /* need to protect from a race on closing the vma as part of
+ * mlx4_ib_vma_close().
+ */
+ down_read(&owning_mm->mmap_sem);
+ for (i = 0; i < HW_BAR_COUNT; i++) {
+ vma = context->hw_bar_info[i].vma;
+ if (!vma)
+ continue;
+
+ ret = zap_vma_ptes(context->hw_bar_info[i].vma,
+ context->hw_bar_info[i].vma->vm_start,
+ PAGE_SIZE);
+ if (ret) {
+ pr_err("Error: zap_vma_ptes failed for index=%d, ret=%d\n", i, ret);
+ BUG_ON(1);
+ }
+
+ /* context going to be destroyed, should not access ops any more */
+ context->hw_bar_info[i].vma->vm_ops = NULL;
+ }
+
+ up_read(&owning_mm->mmap_sem);
+ mmput(owning_mm);
+ put_task_struct(owning_process);
+}
+
+static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
+ struct mlx4_ib_vma_private_data *vma_private_data)
+{
+ vma_private_data->vma = vma;
+ vma->vm_private_data = vma_private_data;
+ vma->vm_ops = &mlx4_ib_vm_ops;
+}
+
static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
struct mlx4_ib_dev *dev = to_mdev(context->device);
+ struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
if (vma->vm_pgoff == 0) {
+ /* We prevent double mmaping on same context */
+ if (mucontext->hw_bar_info[HW_BAR_DB].vma)
+ return -EINVAL;
+
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start,
to_mucontext(context)->uar.pfn,
PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN;
+
+ mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);
+
} else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
+ /* We prevent double mmaping on same context */
+ if (mucontext->hw_bar_info[HW_BAR_BF].vma)
+ return -EINVAL;
+
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start,
@@ -750,9 +1102,18 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
dev->dev->caps.num_uars,
PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN;
+
+ mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
+
} else if (vma->vm_pgoff == 3) {
struct mlx4_clock_params params;
- int ret = mlx4_get_internal_clock_params(dev->dev, &params);
+ int ret;
+
+ /* We prevent double mmaping on same context */
+ if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
+ return -EINVAL;
+
+ ret = mlx4_get_internal_clock_params(dev->dev, &params);
if (ret)
return ret;
@@ -765,6 +1126,9 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
>> PAGE_SHIFT,
PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN;
+
+ mlx4_ib_set_vma_data(vma,
+ &mucontext->hw_bar_info[HW_BAR_CLOCK]);
} else {
return -EINVAL;
}
@@ -1546,272 +1910,6 @@ static struct device_attribute *mlx4_class_attributes[] = {
&dev_attr_board_id
};
-static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
- struct net_device *dev)
-{
- memcpy(eui, dev->dev_addr, 3);
- memcpy(eui + 5, dev->dev_addr + 3, 3);
- if (vlan_id < 0x1000) {
- eui[3] = vlan_id >> 8;
- eui[4] = vlan_id & 0xff;
- } else {
- eui[3] = 0xff;
- eui[4] = 0xfe;
- }
- eui[0] ^= 2;
-}
-
-static void update_gids_task(struct work_struct *work)
-{
- struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
- struct mlx4_cmd_mailbox *mailbox;
- union ib_gid *gids;
- int err;
- struct mlx4_dev *dev = gw->dev->dev;
- int is_bonded = mlx4_is_bonded(dev);
-
- if (!gw->dev->ib_active)
- return;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox)) {
- pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
- return;
- }
-
- gids = mailbox->buf;
- memcpy(gids, gw->gids, sizeof gw->gids);
-
- err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
- MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
- if (err)
- pr_warn("set port command failed\n");
- else
- if ((gw->port == 1) || !is_bonded)
- mlx4_ib_dispatch_event(gw->dev,
- is_bonded ? 1 : gw->port,
- IB_EVENT_GID_CHANGE);
-
- mlx4_free_cmd_mailbox(dev, mailbox);
- kfree(gw);
-}
-
-static void reset_gids_task(struct work_struct *work)
-{
- struct update_gid_work *gw =
- container_of(work, struct update_gid_work, work);
- struct mlx4_cmd_mailbox *mailbox;
- union ib_gid *gids;
- int err;
- struct mlx4_dev *dev = gw->dev->dev;
-
- if (!gw->dev->ib_active)
- return;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox)) {
- pr_warn("reset gid table failed\n");
- goto free;
- }
-
- gids = mailbox->buf;
- memcpy(gids, gw->gids, sizeof(gw->gids));
-
- if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) ==
- IB_LINK_LAYER_ETHERNET) {
- err = mlx4_cmd(dev, mailbox->dma,
- MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
- MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B,
- MLX4_CMD_WRAPPED);
- if (err)
- pr_warn("set port %d command failed\n", gw->port);
- }
-
- mlx4_free_cmd_mailbox(dev, mailbox);
-free:
- kfree(gw);
-}
-
-static int update_gid_table(struct mlx4_ib_dev *dev, int port,
- union ib_gid *gid, int clear,
- int default_gid)
-{
- struct update_gid_work *work;
- int i;
- int need_update = 0;
- int free = -1;
- int found = -1;
- int max_gids;
-
- if (default_gid) {
- free = 0;
- } else {
- max_gids = dev->dev->caps.gid_table_len[port];
- for (i = 1; i < max_gids; ++i) {
- if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
- sizeof(*gid)))
- found = i;
-
- if (clear) {
- if (found >= 0) {
- need_update = 1;
- dev->iboe.gid_table[port - 1][found] =
- zgid;
- break;
- }
- } else {
- if (found >= 0)
- break;
-
- if (free < 0 &&
- !memcmp(&dev->iboe.gid_table[port - 1][i],
- &zgid, sizeof(*gid)))
- free = i;
- }
- }
- }
-
- if (found == -1 && !clear && free >= 0) {
- dev->iboe.gid_table[port - 1][free] = *gid;
- need_update = 1;
- }
-
- if (!need_update)
- return 0;
-
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work)
- return -ENOMEM;
-
- memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof(work->gids));
- INIT_WORK(&work->work, update_gids_task);
- work->port = port;
- work->dev = dev;
- queue_work(wq, &work->work);
-
- return 0;
-}
-
-static void mlx4_make_default_gid(struct net_device *dev, union ib_gid *gid)
-{
- gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
- mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
-}
-
-
-static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port)
-{
- struct update_gid_work *work;
-
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work)
- return -ENOMEM;
-
- memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids));
- memset(work->gids, 0, sizeof(work->gids));
- INIT_WORK(&work->work, reset_gids_task);
- work->dev = dev;
- work->port = port;
- queue_work(wq, &work->work);
- return 0;
-}
-
-static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
- struct mlx4_ib_dev *ibdev, union ib_gid *gid)
-{
- struct mlx4_ib_iboe *iboe;
- int port = 0;
- struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
- rdma_vlan_dev_real_dev(event_netdev) :
- event_netdev;
- union ib_gid default_gid;
-
- mlx4_make_default_gid(real_dev, &default_gid);
-
- if (!memcmp(gid, &default_gid, sizeof(*gid)))
- return 0;
-
- if (event != NETDEV_DOWN && event != NETDEV_UP)
- return 0;
-
- if ((real_dev != event_netdev) &&
- (event == NETDEV_DOWN) &&
- rdma_link_local_addr((struct in6_addr *)gid))
- return 0;
-
- iboe = &ibdev->iboe;
- spin_lock_bh(&iboe->lock);
-
- for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
- if ((netif_is_bond_master(real_dev) &&
- (real_dev == iboe->masters[port - 1])) ||
- (!netif_is_bond_master(real_dev) &&
- (real_dev == iboe->netdevs[port - 1])))
- update_gid_table(ibdev, port, gid,
- event == NETDEV_DOWN, 0);
-
- spin_unlock_bh(&iboe->lock);
- return 0;
-
-}
-
-static u8 mlx4_ib_get_dev_port(struct net_device *dev,
- struct mlx4_ib_dev *ibdev)
-{
- u8 port = 0;
- struct mlx4_ib_iboe *iboe;
- struct net_device *real_dev = rdma_vlan_dev_real_dev(dev) ?
- rdma_vlan_dev_real_dev(dev) : dev;
-
- iboe = &ibdev->iboe;
-
- for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
- if ((netif_is_bond_master(real_dev) &&
- (real_dev == iboe->masters[port - 1])) ||
- (!netif_is_bond_master(real_dev) &&
- (real_dev == iboe->netdevs[port - 1])))
- break;
-
- if ((port == 0) || (port > ibdev->dev->caps.num_ports))
- return 0;
- else
- return port;
-}
-
-static int mlx4_ib_inet_event(struct notifier_block *this, unsigned long event,
- void *ptr)
-{
- struct mlx4_ib_dev *ibdev;
- struct in_ifaddr *ifa = ptr;
- union ib_gid gid;
- struct net_device *event_netdev = ifa->ifa_dev->dev;
-
- ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
-
- ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet);
-
- mlx4_ib_addr_event(event, event_netdev, ibdev, &gid);
- return NOTIFY_DONE;
-}
-
-#if IS_ENABLED(CONFIG_IPV6)
-static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
- void *ptr)
-{
- struct mlx4_ib_dev *ibdev;
- struct inet6_ifaddr *ifa = ptr;
- union ib_gid *gid = (union ib_gid *)&ifa->addr;
- struct net_device *event_netdev = ifa->idev->dev;
-
- ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet6);
-
- mlx4_ib_addr_event(event, event_netdev, ibdev, gid);
- return NOTIFY_DONE;
-}
-#endif
-
#define MLX4_IB_INVALID_MAC ((u64)-1)
static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
struct net_device *dev,
@@ -1870,94 +1968,6 @@ unlock:
mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
}
-static void mlx4_ib_get_dev_addr(struct net_device *dev,
- struct mlx4_ib_dev *ibdev, u8 port)
-{
- struct in_device *in_dev;
-#if IS_ENABLED(CONFIG_IPV6)
- struct inet6_dev *in6_dev;
- union ib_gid *pgid;
- struct inet6_ifaddr *ifp;
- union ib_gid default_gid;
-#endif
- union ib_gid gid;
-
-
- if ((port == 0) || (port > ibdev->dev->caps.num_ports))
- return;
-
- /* IPv4 gids */
- in_dev = in_dev_get(dev);
- if (in_dev) {
- for_ifa(in_dev) {
- /*ifa->ifa_address;*/
- ipv6_addr_set_v4mapped(ifa->ifa_address,
- (struct in6_addr *)&gid);
- update_gid_table(ibdev, port, &gid, 0, 0);
- }
- endfor_ifa(in_dev);
- in_dev_put(in_dev);
- }
-#if IS_ENABLED(CONFIG_IPV6)
- mlx4_make_default_gid(dev, &default_gid);
- /* IPv6 gids */
- in6_dev = in6_dev_get(dev);
- if (in6_dev) {
- read_lock_bh(&in6_dev->lock);
- list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
- pgid = (union ib_gid *)&ifp->addr;
- if (!memcmp(pgid, &default_gid, sizeof(*pgid)))
- continue;
- update_gid_table(ibdev, port, pgid, 0, 0);
- }
- read_unlock_bh(&in6_dev->lock);
- in6_dev_put(in6_dev);
- }
-#endif
-}
-
-static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev,
- struct net_device *dev, u8 port)
-{
- union ib_gid gid;
- mlx4_make_default_gid(dev, &gid);
- update_gid_table(ibdev, port, &gid, 0, 1);
-}
-
-static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
-{
- struct net_device *dev;
- struct mlx4_ib_iboe *iboe = &ibdev->iboe;
- int i;
- int err = 0;
-
- for (i = 1; i <= ibdev->num_ports; ++i) {
- if (rdma_port_get_link_layer(&ibdev->ib_dev, i) ==
- IB_LINK_LAYER_ETHERNET) {
- err = reset_gid_table(ibdev, i);
- if (err)
- goto out;
- }
- }
-
- read_lock(&dev_base_lock);
- spin_lock_bh(&iboe->lock);
-
- for_each_netdev(&init_net, dev) {
- u8 port = mlx4_ib_get_dev_port(dev, ibdev);
- /* port will be non-zero only for ETH ports */
- if (port) {
- mlx4_ib_set_default_gid(ibdev, dev, port);
- mlx4_ib_get_dev_addr(dev, ibdev, port);
- }
- }
-
- spin_unlock_bh(&iboe->lock);
- read_unlock(&dev_base_lock);
-out:
- return err;
-}
-
static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
struct net_device *dev,
unsigned long event)
@@ -1967,81 +1977,22 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
int update_qps_port = -1;
int port;
+ ASSERT_RTNL();
+
iboe = &ibdev->iboe;
spin_lock_bh(&iboe->lock);
mlx4_foreach_ib_transport_port(port, ibdev->dev) {
- enum ib_port_state port_state = IB_PORT_NOP;
- struct net_device *old_master = iboe->masters[port - 1];
- struct net_device *curr_netdev;
- struct net_device *curr_master;
iboe->netdevs[port - 1] =
mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
- if (iboe->netdevs[port - 1])
- mlx4_ib_set_default_gid(ibdev,
- iboe->netdevs[port - 1], port);
- curr_netdev = iboe->netdevs[port - 1];
-
- if (iboe->netdevs[port - 1] &&
- netif_is_bond_slave(iboe->netdevs[port - 1])) {
- iboe->masters[port - 1] = netdev_master_upper_dev_get(
- iboe->netdevs[port - 1]);
- } else {
- iboe->masters[port - 1] = NULL;
- }
- curr_master = iboe->masters[port - 1];
if (dev == iboe->netdevs[port - 1] &&
(event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
event == NETDEV_UP || event == NETDEV_CHANGE))
update_qps_port = port;
- if (curr_netdev) {
- port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
- IB_PORT_ACTIVE : IB_PORT_DOWN;
- mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
- if (curr_master) {
- /* if using bonding/team and a slave port is down, we
- * don't want the bond IP based gids in the table since
- * flows that select port by gid may get the down port.
- */
- if (port_state == IB_PORT_DOWN &&
- !mlx4_is_bonded(ibdev->dev)) {
- reset_gid_table(ibdev, port);
- mlx4_ib_set_default_gid(ibdev,
- curr_netdev,
- port);
- } else {
- /* gids from the upper dev (bond/team)
- * should appear in port's gid table
- */
- mlx4_ib_get_dev_addr(curr_master,
- ibdev, port);
- }
- }
- /* if bonding is used it is possible that we add it to
- * masters only after IP address is assigned to the
- * net bonding interface.
- */
- if (curr_master && (old_master != curr_master)) {
- reset_gid_table(ibdev, port);
- mlx4_ib_set_default_gid(ibdev,
- curr_netdev, port);
- mlx4_ib_get_dev_addr(curr_master, ibdev, port);
- }
-
- if (!curr_master && (old_master != curr_master)) {
- reset_gid_table(ibdev, port);
- mlx4_ib_set_default_gid(ibdev,
- curr_netdev, port);
- mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
- }
- } else {
- reset_gid_table(ibdev, port);
- }
}
-
spin_unlock_bh(&iboe->lock);
if (update_qps_port > 0)
@@ -2224,6 +2175,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
1 : ibdev->num_ports;
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
ibdev->ib_dev.dma_device = &dev->persist->pdev->dev;
+ ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev;
+ ibdev->ib_dev.add_gid = mlx4_ib_add_gid;
+ ibdev->ib_dev.del_gid = mlx4_ib_del_gid;
if (dev->caps.userspace_caps)
ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
@@ -2292,13 +2246,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr;
ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
- ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
+ ibdev->ib_dev.alloc_mr = mlx4_ib_alloc_mr;
ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
ibdev->ib_dev.free_fast_reg_page_list = mlx4_ib_free_fast_reg_page_list;
ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
+ ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
if (!mlx4_is_slave(ibdev->dev)) {
ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
@@ -2434,26 +2389,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
goto err_notif;
}
}
- if (!iboe->nb_inet.notifier_call) {
- iboe->nb_inet.notifier_call = mlx4_ib_inet_event;
- err = register_inetaddr_notifier(&iboe->nb_inet);
- if (err) {
- iboe->nb_inet.notifier_call = NULL;
- goto err_notif;
- }
- }
-#if IS_ENABLED(CONFIG_IPV6)
- if (!iboe->nb_inet6.notifier_call) {
- iboe->nb_inet6.notifier_call = mlx4_ib_inet6_event;
- err = register_inet6addr_notifier(&iboe->nb_inet6);
- if (err) {
- iboe->nb_inet6.notifier_call = NULL;
- goto err_notif;
- }
- }
-#endif
- if (mlx4_ib_init_gid_table(ibdev))
- goto err_notif;
}
for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
@@ -2484,18 +2419,6 @@ err_notif:
pr_warn("failure unregistering notifier\n");
ibdev->iboe.nb.notifier_call = NULL;
}
- if (ibdev->iboe.nb_inet.notifier_call) {
- if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
- pr_warn("failure unregistering notifier\n");
- ibdev->iboe.nb_inet.notifier_call = NULL;
- }
-#if IS_ENABLED(CONFIG_IPV6)
- if (ibdev->iboe.nb_inet6.notifier_call) {
- if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
- pr_warn("failure unregistering notifier\n");
- ibdev->iboe.nb_inet6.notifier_call = NULL;
- }
-#endif
flush_workqueue(wq);
mlx4_ib_close_sriov(ibdev);
@@ -2621,19 +2544,6 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
kfree(ibdev->ib_uc_qpns_bitmap);
}
- if (ibdev->iboe.nb_inet.notifier_call) {
- if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
- pr_warn("failure unregistering notifier\n");
- ibdev->iboe.nb_inet.notifier_call = NULL;
- }
-#if IS_ENABLED(CONFIG_IPV6)
- if (ibdev->iboe.nb_inet6.notifier_call) {
- if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
- pr_warn("failure unregistering notifier\n");
- ibdev->iboe.nb_inet6.notifier_call = NULL;
- }
-#endif
-
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
if (ibdev->counters[p].index != -1 &&
@@ -2669,31 +2579,33 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
if (!dm) {
pr_err("failed to allocate memory for tunneling qp update\n");
- goto out;
+ return;
}
for (i = 0; i < ports; i++) {
dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
if (!dm[i]) {
pr_err("failed to allocate memory for tunneling qp update work struct\n");
- for (i = 0; i < dev->caps.num_ports; i++) {
- if (dm[i])
- kfree(dm[i]);
- }
+ while (--i >= 0)
+ kfree(dm[i]);
goto out;
}
- }
- /* initialize or tear down tunnel QPs for the slave */
- for (i = 0; i < ports; i++) {
INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
dm[i]->port = first_port + i + 1;
dm[i]->slave = slave;
dm[i]->do_init = do_init;
dm[i]->dev = ibdev;
- spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
- if (!ibdev->sriov.is_going_down)
+ }
+ /* initialize or tear down tunnel QPs for the slave */
+ spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
+ if (!ibdev->sriov.is_going_down) {
+ for (i = 0; i < ports; i++)
queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
+ } else {
+ spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
+ for (i = 0; i < ports; i++)
+ kfree(dm[i]);
}
out:
kfree(dm);
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index ed327e6c8fdc..2d5bccd71fc6 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -51,6 +51,10 @@
pr_warn("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
(group)->name, group->demux->port, ## arg)
+#define mcg_debug_group(group, format, arg...) \
+ pr_debug("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
+ (group)->name, (group)->demux->port, ## arg)
+
#define mcg_error_group(group, format, arg...) \
pr_err(" %16s: " format, (group)->name, ## arg)
@@ -206,15 +210,16 @@ static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad)
{
struct mlx4_ib_dev *dev = ctx->dev;
struct ib_ah_attr ah_attr;
+ unsigned long flags;
- spin_lock(&dev->sm_lock);
+ spin_lock_irqsave(&dev->sm_lock, flags);
if (!dev->sm_ah[ctx->port - 1]) {
/* port is not yet Active, sm_ah not ready */
- spin_unlock(&dev->sm_lock);
+ spin_unlock_irqrestore(&dev->sm_lock, flags);
return -EAGAIN;
}
mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
- spin_unlock(&dev->sm_lock);
+ spin_unlock_irqrestore(&dev->sm_lock, flags);
return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev),
ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY,
&ah_attr, NULL, mad);
@@ -961,8 +966,8 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
mutex_lock(&group->lock);
if (group->func[slave].num_pend_reqs > MAX_PEND_REQS_PER_FUNC) {
mutex_unlock(&group->lock);
- mcg_warn_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n",
- port, slave, MAX_PEND_REQS_PER_FUNC);
+ mcg_debug_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n",
+ port, slave, MAX_PEND_REQS_PER_FUNC);
release_group(group, 0);
kfree(req);
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 334387f63358..1e7b23bb2eb0 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -70,11 +70,24 @@ extern int mlx4_ib_sm_guid_assign;
#define MLX4_IB_UC_STEER_QPN_ALIGN 1
#define MLX4_IB_UC_MAX_NUM_QPS 256
+
+enum hw_bar_type {
+ HW_BAR_BF,
+ HW_BAR_DB,
+ HW_BAR_CLOCK,
+ HW_BAR_COUNT
+};
+
+struct mlx4_ib_vma_private_data {
+ struct vm_area_struct *vma;
+};
+
struct mlx4_ib_ucontext {
struct ib_ucontext ibucontext;
struct mlx4_uar uar;
struct list_head db_page_list;
struct mutex db_page_mutex;
+ struct mlx4_ib_vma_private_data hw_bar_info[HW_BAR_COUNT];
};
struct mlx4_ib_pd {
@@ -415,7 +428,6 @@ struct mlx4_ib_demux_pv_ctx {
struct ib_device *ib_dev;
struct ib_cq *cq;
struct ib_pd *pd;
- struct ib_mr *mr;
struct work_struct work;
struct workqueue_struct *wq;
struct mlx4_ib_demux_pv_qp qp[2];
@@ -457,15 +469,26 @@ struct mlx4_ib_sriov {
struct idr pv_id_table;
};
+struct gid_cache_context {
+ int real_index;
+ int refcount;
+};
+
+struct gid_entry {
+ union ib_gid gid;
+ struct gid_cache_context *ctx;
+};
+
+struct mlx4_port_gid_table {
+ struct gid_entry gids[MLX4_MAX_PORT_GIDS];
+};
+
struct mlx4_ib_iboe {
spinlock_t lock;
struct net_device *netdevs[MLX4_MAX_PORTS];
- struct net_device *masters[MLX4_MAX_PORTS];
atomic64_t mac[MLX4_MAX_PORTS];
struct notifier_block nb;
- struct notifier_block nb_inet;
- struct notifier_block nb_inet6;
- union ib_gid gid_table[MLX4_MAX_PORTS][128];
+ struct mlx4_port_gid_table gids[MLX4_MAX_PORTS];
};
struct pkey_mgt {
@@ -680,8 +703,9 @@ struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
struct ib_mw_bind *mw_bind);
int mlx4_ib_dealloc_mw(struct ib_mw *mw);
-struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
- int max_page_list_len);
+struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg);
struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
int page_list_len);
void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
@@ -838,5 +862,7 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
u64 start, u64 length, u64 virt_addr,
int mr_access_flags, struct ib_pd *pd,
struct ib_udata *udata);
+int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
+ u8 port_num, int index);
#endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index e0d271782d0a..2542fd3c1a49 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -350,19 +350,24 @@ int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
return 0;
}
-struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
- int max_page_list_len)
+struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg)
{
struct mlx4_ib_dev *dev = to_mdev(pd->device);
struct mlx4_ib_mr *mr;
int err;
+ if (mr_type != IB_MR_TYPE_MEM_REG ||
+ max_num_sg > MLX4_MAX_FAST_REG_PAGES)
+ return ERR_PTR(-EINVAL);
+
mr = kmalloc(sizeof *mr, GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0,
- max_page_list_len, 0, &mr->mmr);
+ max_num_sg, 0, &mr->mmr);
if (err)
goto err_free;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index c5a3a5f0de41..4ad9be3ad61c 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1292,14 +1292,18 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
path->static_rate = 0;
if (ah->ah_flags & IB_AH_GRH) {
- if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) {
+ int real_sgid_index = mlx4_ib_gid_index_to_real_index(dev,
+ port,
+ ah->grh.sgid_index);
+
+ if (real_sgid_index >= dev->dev->caps.gid_table_len[port]) {
pr_err("sgid_index (%u) too large. max is %d\n",
- ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1);
+ real_sgid_index, dev->dev->caps.gid_table_len[port] - 1);
return -1;
}
path->grh_mylmc |= 1 << 7;
- path->mgid_index = ah->grh.sgid_index;
+ path->mgid_index = real_sgid_index;
path->hop_limit = ah->grh.hop_limit;
path->tclass_flowlabel =
cpu_to_be32((ah->grh.traffic_class << 20) |
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index 6797108ce873..69fb5ba94d0f 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -640,6 +640,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
struct mlx4_port *p;
int i;
int ret;
+ int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port_num) ==
+ IB_LINK_LAYER_ETHERNET;
p = kzalloc(sizeof *p, GFP_KERNEL);
if (!p)
@@ -657,7 +659,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
p->pkey_group.name = "pkey_idx";
p->pkey_group.attrs =
- alloc_group_attrs(show_port_pkey, store_port_pkey,
+ alloc_group_attrs(show_port_pkey,
+ is_eth ? NULL : store_port_pkey,
dev->dev->caps.pkey_table_len[port_num]);
if (!p->pkey_group.attrs) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 5c9eeea62805..2d0dbbf38ceb 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -33,6 +33,7 @@
#include <linux/kref.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_cache.h>
#include "mlx5_ib.h"
#include "user.h"
@@ -227,7 +228,14 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
wc->dlid_path_bits = cqe->ml_path;
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
wc->wc_flags |= g ? IB_WC_GRH : 0;
- wc->pkey_index = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
+ if (unlikely(is_qp1(qp->ibqp.qp_type))) {
+ u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
+
+ ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
+ &wc->pkey_index);
+ } else {
+ wc->pkey_index = 0;
+ }
}
static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 01fc97db45d6..b84d13a487cc 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -68,8 +68,9 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 085c24b4b603..41d6911e244e 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -212,6 +212,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
int err = -ENOMEM;
int max_rq_sg;
int max_sq_sg;
+ u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
if (uhw->inlen || uhw->outlen)
return -EINVAL;
@@ -264,7 +265,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->hw_ver = mdev->pdev->revision;
props->max_mr_size = ~0ull;
- props->page_size_cap = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
+ props->page_size_cap = ~(min_page_size - 1);
props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
@@ -273,6 +274,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
sizeof(struct mlx5_wqe_ctrl_seg)) /
sizeof(struct mlx5_wqe_data_seg);
props->max_sge = min(max_rq_sg, max_sq_sg);
+ props->max_sge_rd = props->max_sge;
props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1;
props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
@@ -1121,7 +1123,6 @@ static void destroy_umrc_res(struct mlx5_ib_dev *dev)
mlx5_ib_destroy_qp(dev->umrc.qp);
ib_destroy_cq(dev->umrc.cq);
- ib_dereg_mr(dev->umrc.mr);
ib_dealloc_pd(dev->umrc.pd);
}
@@ -1136,7 +1137,6 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
struct ib_pd *pd;
struct ib_cq *cq;
struct ib_qp *qp;
- struct ib_mr *mr;
struct ib_cq_init_attr cq_attr = {};
int ret;
@@ -1154,13 +1154,6 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
goto error_0;
}
- mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(mr)) {
- mlx5_ib_dbg(dev, "Couldn't create DMA MR for sync UMR QP\n");
- ret = PTR_ERR(mr);
- goto error_1;
- }
-
cq_attr.cqe = 128;
cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL,
&cq_attr);
@@ -1218,7 +1211,6 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
dev->umrc.qp = qp;
dev->umrc.cq = cq;
- dev->umrc.mr = mr;
dev->umrc.pd = pd;
sema_init(&dev->umrc.sem, MAX_UMR_WR);
@@ -1240,9 +1232,6 @@ error_3:
ib_destroy_cq(cq);
error_2:
- ib_dereg_mr(mr);
-
-error_1:
ib_dealloc_pd(pd);
error_0:
@@ -1256,10 +1245,18 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
struct ib_srq_init_attr attr;
struct mlx5_ib_dev *dev;
struct ib_cq_init_attr cq_attr = {.cqe = 1};
+ u32 rsvd_lkey;
int ret = 0;
dev = container_of(devr, struct mlx5_ib_dev, devr);
+ ret = mlx5_core_query_special_context(dev->mdev, &rsvd_lkey);
+ if (ret) {
+ pr_err("Failed to query special context %d\n", ret);
+ return ret;
+ }
+ dev->ib_dev.local_dma_lkey = rsvd_lkey;
+
devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
if (IS_ERR(devr->p0)) {
ret = PTR_ERR(devr->p0);
@@ -1421,7 +1418,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
dev->ib_dev.owner = THIS_MODULE;
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
- dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.num_comp_vectors =
@@ -1490,12 +1486,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr;
dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr;
dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
- dev->ib_dev.destroy_mr = mlx5_ib_destroy_mr;
dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
dev->ib_dev.process_mad = mlx5_ib_process_mad;
- dev->ib_dev.create_mr = mlx5_ib_create_mr;
- dev->ib_dev.alloc_fast_reg_mr = mlx5_ib_alloc_fast_reg_mr;
+ dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list;
dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list;
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 7cae09836481..bb8cda79e881 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -349,7 +349,6 @@ struct umr_common {
struct ib_pd *pd;
struct ib_cq *cq;
struct ib_qp *qp;
- struct ib_mr *mr;
/* control access to UMR QP
*/
struct semaphore sem;
@@ -573,11 +572,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
int npages, int zap);
int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
-int mlx5_ib_destroy_mr(struct ib_mr *ibmr);
-struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
- struct ib_mr_init_attr *mr_init_attr);
-struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
- int max_page_list_len);
+struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg);
struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
int page_list_len);
void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
@@ -683,6 +680,11 @@ static inline u8 convert_access(int acc)
MLX5_PERM_LOCAL_READ;
}
+static inline int is_qp1(enum ib_qp_type qp_type)
+{
+ return qp_type == IB_QPT_GSI;
+}
+
#define MLX5_MAX_UMR_SHIFT 16
#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index bc9a0de897cb..54a15b5d336d 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -441,9 +441,6 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
spin_unlock_irq(&ent->lock);
queue_work(cache->wq, &ent->work);
-
- if (mr)
- break;
}
if (!mr)
@@ -690,12 +687,11 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
int access_flags)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct ib_mr *mr = dev->umrc.mr;
struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
sg->addr = dma;
sg->length = ALIGN(sizeof(u64) * n, 64);
- sg->lkey = mr->lkey;
+ sg->lkey = dev->umrc.pd->local_dma_lkey;
wr->next = NULL;
wr->send_flags = 0;
@@ -926,7 +922,7 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
sg.addr = dma;
sg.length = ALIGN(npages * sizeof(u64),
MLX5_UMR_MTT_ALIGNMENT);
- sg.lkey = dev->umrc.mr->lkey;
+ sg.lkey = dev->umrc.pd->local_dma_lkey;
wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
MLX5_IB_SEND_UMR_UPDATE_MTT;
@@ -1118,19 +1114,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return &mr->ibmr;
error:
- /*
- * Destroy the umem *before* destroying the MR, to ensure we
- * will not have any in-flight notifiers when destroying the
- * MR.
- *
- * As the MR is completely invalid to begin with, and this
- * error path is only taken if we can't push the mr entry into
- * the pagefault tree, this is safe.
- */
-
ib_umem_release(umem);
- /* Kill the MR, and return an error code. */
- clean_mr(mr);
return ERR_PTR(err);
}
@@ -1173,6 +1157,19 @@ static int clean_mr(struct mlx5_ib_mr *mr)
int umred = mr->umred;
int err;
+ if (mr->sig) {
+ if (mlx5_core_destroy_psv(dev->mdev,
+ mr->sig->psv_memory.psv_idx))
+ mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
+ mr->sig->psv_memory.psv_idx);
+ if (mlx5_core_destroy_psv(dev->mdev,
+ mr->sig->psv_wire.psv_idx))
+ mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
+ mr->sig->psv_wire.psv_idx);
+ kfree(mr->sig);
+ mr->sig = NULL;
+ }
+
if (!umred) {
err = destroy_mkey(dev, mr);
if (err) {
@@ -1234,14 +1231,15 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
return 0;
}
-struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
- struct ib_mr_init_attr *mr_init_attr)
+struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_create_mkey_mbox_in *in;
struct mlx5_ib_mr *mr;
int access_mode, err;
- int ndescs = roundup(mr_init_attr->max_reg_descriptors, 4);
+ int ndescs = roundup(max_num_sg, 4);
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
@@ -1257,9 +1255,11 @@ struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
in->seg.xlt_oct_size = cpu_to_be32(ndescs);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
- access_mode = MLX5_ACCESS_MODE_MTT;
- if (mr_init_attr->flags & IB_MR_SIGNATURE_EN) {
+ if (mr_type == IB_MR_TYPE_MEM_REG) {
+ access_mode = MLX5_ACCESS_MODE_MTT;
+ in->seg.log2_page_size = PAGE_SHIFT;
+ } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
u32 psv_index[2];
in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) |
@@ -1285,6 +1285,10 @@ struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
mr->sig->sig_err_exists = false;
/* Next UMR, Arm SIGERR */
++mr->sig->sigerr_count;
+ } else {
+ mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
+ err = -EINVAL;
+ goto err_free_in;
}
in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
@@ -1320,80 +1324,6 @@ err_free:
return ERR_PTR(err);
}
-int mlx5_ib_destroy_mr(struct ib_mr *ibmr)
-{
- struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
- struct mlx5_ib_mr *mr = to_mmr(ibmr);
- int err;
-
- if (mr->sig) {
- if (mlx5_core_destroy_psv(dev->mdev,
- mr->sig->psv_memory.psv_idx))
- mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
- mr->sig->psv_memory.psv_idx);
- if (mlx5_core_destroy_psv(dev->mdev,
- mr->sig->psv_wire.psv_idx))
- mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
- mr->sig->psv_wire.psv_idx);
- kfree(mr->sig);
- }
-
- err = destroy_mkey(dev, mr);
- if (err) {
- mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
- mr->mmr.key, err);
- return err;
- }
-
- kfree(mr);
-
- return err;
-}
-
-struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
- int max_page_list_len)
-{
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_create_mkey_mbox_in *in;
- struct mlx5_ib_mr *mr;
- int err;
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr)
- return ERR_PTR(-ENOMEM);
-
- in = kzalloc(sizeof(*in), GFP_KERNEL);
- if (!in) {
- err = -ENOMEM;
- goto err_free;
- }
-
- in->seg.status = MLX5_MKEY_STATUS_FREE;
- in->seg.xlt_oct_size = cpu_to_be32((max_page_list_len + 1) / 2);
- in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT;
- in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
- /*
- * TBD not needed - issue 197292 */
- in->seg.log2_page_size = PAGE_SHIFT;
-
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in), NULL,
- NULL, NULL);
- kfree(in);
- if (err)
- goto err_free;
-
- mr->ibmr.lkey = mr->mmr.key;
- mr->ibmr.rkey = mr->mmr.key;
- mr->umem = NULL;
-
- return &mr->ibmr;
-
-err_free:
- kfree(mr);
- return ERR_PTR(err);
-}
-
struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
int page_list_len)
{
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 203c8a45e095..c745c6c5e10d 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -76,11 +76,6 @@ static int is_qp0(enum ib_qp_type qp_type)
return qp_type == IB_QPT_SMI;
}
-static int is_qp1(enum ib_qp_type qp_type)
-{
- return qp_type == IB_QPT_GSI;
-}
-
static int is_sqp(enum ib_qp_type qp_type)
{
return is_qp0(qp_type) || is_qp1(qp_type);
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 6b2418b74c99..7c3f2fb44ba5 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -209,8 +209,9 @@ int mthca_process_mad(struct ib_device *ibdev,
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
/* Forward locally generated traps to the SM */
if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 93ae51dcf2ff..dc2d48c59e62 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -97,6 +97,7 @@ static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *pr
props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
props->max_qp_wr = mdev->limits.max_wqes;
props->max_sge = mdev->limits.max_sg;
+ props->max_sge_rd = props->max_sge;
props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
props->max_cqe = mdev->limits.max_cqes;
props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 9047af429906..8a3ad170d790 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1520,8 +1520,9 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
int rc = arpindex;
struct net_device *netdev;
struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
+ __be32 dst_ipaddr = htonl(dst_ip);
- rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0);
+ rt = ip_route_output(&init_net, dst_ipaddr, nesvnic->local_ipaddr, 0, 0);
if (IS_ERR(rt)) {
printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n",
__func__, dst_ip);
@@ -1533,7 +1534,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
else
netdev = nesvnic->netdev;
- neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
+ neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
rcu_read_lock();
if (neigh) {
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 02120d340d50..4713dd7ed764 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -3861,7 +3861,7 @@ void nes_manage_arp_cache(struct net_device *netdev, unsigned char *mac_addr,
(((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) |
(((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]);
cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32(
- (((u32)mac_addr[0]) << 16) | (u32)mac_addr[1]);
+ (((u32)mac_addr[0]) << 8) | (u32)mac_addr[1]);
} else {
cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0;
cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index fbc43e5f717b..44cb513f9a87 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -375,9 +375,11 @@ static int alloc_fast_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
}
/*
- * nes_alloc_fast_reg_mr
+ * nes_alloc_mr
*/
-static struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len)
+static struct ib_mr *nes_alloc_mr(struct ib_pd *ibpd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg)
{
struct nes_pd *nespd = to_nespd(ibpd);
struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
@@ -393,11 +395,18 @@ static struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list
u32 stag;
int ret;
struct ib_mr *ibmr;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ if (max_num_sg > (NES_4K_PBL_CHUNK_SIZE / sizeof(u64)))
+ return ERR_PTR(-E2BIG);
+
/*
* Note: Set to always use a fixed length single page entry PBL. This is to allow
* for the fast_reg_mr operation to always know the size of the PBL.
*/
- if (max_page_list_len > (NES_4K_PBL_CHUNK_SIZE / sizeof(u64)))
+ if (max_num_sg > (NES_4K_PBL_CHUNK_SIZE / sizeof(u64)))
return ERR_PTR(-E2BIG);
get_random_bytes(&next_stag_index, sizeof(next_stag_index));
@@ -424,7 +433,7 @@ static struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list
nes_debug(NES_DBG_MR, "Allocating STag 0x%08X index = 0x%08X\n",
stag, stag_index);
- ret = alloc_fast_reg_mr(nesdev, nespd, stag, max_page_list_len);
+ ret = alloc_fast_reg_mr(nesdev, nespd, stag, max_num_sg);
if (ret == 0) {
nesmr->ibmr.rkey = stag;
@@ -3929,7 +3938,7 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
nesibdev->ibdev.dealloc_mw = nes_dealloc_mw;
nesibdev->ibdev.bind_mw = nes_bind_mw;
- nesibdev->ibdev.alloc_fast_reg_mr = nes_alloc_fast_reg_mr;
+ nesibdev->ibdev.alloc_mr = nes_alloc_mr;
nesibdev->ibdev.alloc_fast_reg_page_list = nes_alloc_fast_reg_page_list;
nesibdev->ibdev.free_fast_reg_page_list = nes_free_fast_reg_page_list;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index b396344fae16..b4091ab48db0 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) adapters. *
- * Copyright (C) 2008-2012 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#ifndef __OCRDMA_H__
#define __OCRDMA_H__
@@ -231,7 +246,6 @@ struct ocrdma_dev {
u16 base_eqid;
u16 max_eq;
- union ib_gid *sgid_tbl;
/* provided synchronization to sgid table for
* updating gid entries triggered by notifier.
*/
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
index 1554cca5712a..430b1350fe96 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) adapters. *
- * Copyright (C) 2008-2012 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#ifndef __OCRDMA_ABI_H__
#define __OCRDMA_ABI_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 4bafa15708d0..44766fee1f4e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) adapters. *
- * Copyright (C) 2008-2012 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#include <net/neighbour.h>
#include <net/netevent.h>
@@ -215,8 +230,9 @@ int ocrdma_process_mad(struct ib_device *ibdev,
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_PERF_MGMT:
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index cf366fe03cb8..04a30ae67473 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) adapters. *
- * Copyright (C) 2008-2012 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#ifndef __OCRDMA_AH_H__
#define __OCRDMA_AH_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 47615ff33bc6..aab391a15db4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) CNA Adapters. *
- * Copyright (C) 2008-2012 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#include <linux/sched.h>
#include <linux/interrupt.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index e905972fceb7..7ed885c1851e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) CNA Adapters. *
- * Copyright (C) 2008-2012 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#ifndef __OCRDMA_HW_H__
#define __OCRDMA_HW_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 8a1398b253a2..87aa55df7c82 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) adapters. *
- * Copyright (C) 2008-2012 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#include <linux/module.h>
#include <linux/idr.h>
@@ -46,14 +61,12 @@
MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
MODULE_AUTHOR("Emulex Corporation");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
static LIST_HEAD(ocrdma_dev_list);
static DEFINE_SPINLOCK(ocrdma_devlist_lock);
static DEFINE_IDR(ocrdma_dev_id);
-static union ib_gid ocrdma_zero_sgid;
-
void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
{
u8 mac_addr[6];
@@ -68,135 +81,6 @@ void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
guid[6] = mac_addr[4];
guid[7] = mac_addr[5];
}
-
-static bool ocrdma_add_sgid(struct ocrdma_dev *dev, union ib_gid *new_sgid)
-{
- int i;
- unsigned long flags;
-
- memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));
-
-
- spin_lock_irqsave(&dev->sgid_lock, flags);
- for (i = 0; i < OCRDMA_MAX_SGID; i++) {
- if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,
- sizeof(union ib_gid))) {
- /* found free entry */
- memcpy(&dev->sgid_tbl[i], new_sgid,
- sizeof(union ib_gid));
- spin_unlock_irqrestore(&dev->sgid_lock, flags);
- return true;
- } else if (!memcmp(&dev->sgid_tbl[i], new_sgid,
- sizeof(union ib_gid))) {
- /* entry already present, no addition is required. */
- spin_unlock_irqrestore(&dev->sgid_lock, flags);
- return false;
- }
- }
- spin_unlock_irqrestore(&dev->sgid_lock, flags);
- return false;
-}
-
-static bool ocrdma_del_sgid(struct ocrdma_dev *dev, union ib_gid *sgid)
-{
- int found = false;
- int i;
- unsigned long flags;
-
-
- spin_lock_irqsave(&dev->sgid_lock, flags);
- /* first is default sgid, which cannot be deleted. */
- for (i = 1; i < OCRDMA_MAX_SGID; i++) {
- if (!memcmp(&dev->sgid_tbl[i], sgid, sizeof(union ib_gid))) {
- /* found matching entry */
- memset(&dev->sgid_tbl[i], 0, sizeof(union ib_gid));
- found = true;
- break;
- }
- }
- spin_unlock_irqrestore(&dev->sgid_lock, flags);
- return found;
-}
-
-static int ocrdma_addr_event(unsigned long event, struct net_device *netdev,
- union ib_gid *gid)
-{
- struct ib_event gid_event;
- struct ocrdma_dev *dev;
- bool found = false;
- bool updated = false;
- bool is_vlan = false;
-
- is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN;
- if (is_vlan)
- netdev = rdma_vlan_dev_real_dev(netdev);
-
- rcu_read_lock();
- list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
- if (dev->nic_info.netdev == netdev) {
- found = true;
- break;
- }
- }
- rcu_read_unlock();
-
- if (!found)
- return NOTIFY_DONE;
-
- mutex_lock(&dev->dev_lock);
- switch (event) {
- case NETDEV_UP:
- updated = ocrdma_add_sgid(dev, gid);
- break;
- case NETDEV_DOWN:
- updated = ocrdma_del_sgid(dev, gid);
- break;
- default:
- break;
- }
- if (updated) {
- /* GID table updated, notify the consumers about it */
- gid_event.device = &dev->ibdev;
- gid_event.element.port_num = 1;
- gid_event.event = IB_EVENT_GID_CHANGE;
- ib_dispatch_event(&gid_event);
- }
- mutex_unlock(&dev->dev_lock);
- return NOTIFY_OK;
-}
-
-static int ocrdma_inetaddr_event(struct notifier_block *notifier,
- unsigned long event, void *ptr)
-{
- struct in_ifaddr *ifa = ptr;
- union ib_gid gid;
- struct net_device *netdev = ifa->ifa_dev->dev;
-
- ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
- return ocrdma_addr_event(event, netdev, &gid);
-}
-
-static struct notifier_block ocrdma_inetaddr_notifier = {
- .notifier_call = ocrdma_inetaddr_event
-};
-
-#if IS_ENABLED(CONFIG_IPV6)
-
-static int ocrdma_inet6addr_event(struct notifier_block *notifier,
- unsigned long event, void *ptr)
-{
- struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
- union ib_gid *gid = (union ib_gid *)&ifa->addr;
- struct net_device *netdev = ifa->idev->dev;
- return ocrdma_addr_event(event, netdev, gid);
-}
-
-static struct notifier_block ocrdma_inet6addr_notifier = {
- .notifier_call = ocrdma_inet6addr_event
-};
-
-#endif /* IPV6 and VLAN */
-
static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
u8 port_num)
{
@@ -265,6 +149,9 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
dev->ibdev.query_port = ocrdma_query_port;
dev->ibdev.modify_port = ocrdma_modify_port;
dev->ibdev.query_gid = ocrdma_query_gid;
+ dev->ibdev.get_netdev = ocrdma_get_netdev;
+ dev->ibdev.add_gid = ocrdma_add_gid;
+ dev->ibdev.del_gid = ocrdma_del_gid;
dev->ibdev.get_link_layer = ocrdma_link_layer;
dev->ibdev.alloc_pd = ocrdma_alloc_pd;
dev->ibdev.dealloc_pd = ocrdma_dealloc_pd;
@@ -294,7 +181,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
dev->ibdev.dereg_mr = ocrdma_dereg_mr;
dev->ibdev.reg_user_mr = ocrdma_reg_user_mr;
- dev->ibdev.alloc_fast_reg_mr = ocrdma_alloc_frmr;
+ dev->ibdev.alloc_mr = ocrdma_alloc_mr;
dev->ibdev.alloc_fast_reg_page_list = ocrdma_alloc_frmr_page_list;
dev->ibdev.free_fast_reg_page_list = ocrdma_free_frmr_page_list;
@@ -327,12 +214,6 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
{
mutex_init(&dev->dev_lock);
- dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *
- OCRDMA_MAX_SGID, GFP_KERNEL);
- if (!dev->sgid_tbl)
- goto alloc_err;
- spin_lock_init(&dev->sgid_lock);
-
dev->cq_tbl = kzalloc(sizeof(struct ocrdma_cq *) *
OCRDMA_MAX_CQ, GFP_KERNEL);
if (!dev->cq_tbl)
@@ -364,7 +245,6 @@ static void ocrdma_free_resources(struct ocrdma_dev *dev)
kfree(dev->stag_arr);
kfree(dev->qp_tbl);
kfree(dev->cq_tbl);
- kfree(dev->sgid_tbl);
}
/* OCRDMA sysfs interface */
@@ -410,68 +290,6 @@ static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]);
}
-static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
-{
- /* GID Index 0 - Invariant manufacturer-assigned EUI-64 */
- union ib_gid *sgid = &dev->sgid_tbl[0];
-
- sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
- ocrdma_get_guid(dev, &sgid->raw[8]);
-}
-
-static void ocrdma_init_ipv4_gids(struct ocrdma_dev *dev,
- struct net_device *net)
-{
- struct in_device *in_dev;
- union ib_gid gid;
- in_dev = in_dev_get(net);
- if (in_dev) {
- for_ifa(in_dev) {
- ipv6_addr_set_v4mapped(ifa->ifa_address,
- (struct in6_addr *)&gid);
- ocrdma_add_sgid(dev, &gid);
- }
- endfor_ifa(in_dev);
- in_dev_put(in_dev);
- }
-}
-
-static void ocrdma_init_ipv6_gids(struct ocrdma_dev *dev,
- struct net_device *net)
-{
-#if IS_ENABLED(CONFIG_IPV6)
- struct inet6_dev *in6_dev;
- union ib_gid *pgid;
- struct inet6_ifaddr *ifp;
- in6_dev = in6_dev_get(net);
- if (in6_dev) {
- read_lock_bh(&in6_dev->lock);
- list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
- pgid = (union ib_gid *)&ifp->addr;
- ocrdma_add_sgid(dev, pgid);
- }
- read_unlock_bh(&in6_dev->lock);
- in6_dev_put(in6_dev);
- }
-#endif
-}
-
-static void ocrdma_init_gid_table(struct ocrdma_dev *dev)
-{
- struct net_device *net_dev;
-
- for_each_netdev(&init_net, net_dev) {
- struct net_device *real_dev = rdma_vlan_dev_real_dev(net_dev) ?
- rdma_vlan_dev_real_dev(net_dev) : net_dev;
-
- if (real_dev == dev->nic_info.netdev) {
- ocrdma_add_default_sgid(dev);
- ocrdma_init_ipv4_gids(dev, net_dev);
- ocrdma_init_ipv6_gids(dev, net_dev);
- }
- }
-}
-
static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
{
int status = 0, i;
@@ -500,7 +318,6 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
goto alloc_err;
ocrdma_init_service_level(dev);
- ocrdma_init_gid_table(dev);
status = ocrdma_register_device(dev);
if (status)
goto alloc_err;
@@ -647,34 +464,12 @@ static struct ocrdma_driver ocrdma_drv = {
.be_abi_version = OCRDMA_BE_ROCE_ABI_VERSION,
};
-static void ocrdma_unregister_inet6addr_notifier(void)
-{
-#if IS_ENABLED(CONFIG_IPV6)
- unregister_inet6addr_notifier(&ocrdma_inet6addr_notifier);
-#endif
-}
-
-static void ocrdma_unregister_inetaddr_notifier(void)
-{
- unregister_inetaddr_notifier(&ocrdma_inetaddr_notifier);
-}
-
static int __init ocrdma_init_module(void)
{
int status;
ocrdma_init_debugfs();
- status = register_inetaddr_notifier(&ocrdma_inetaddr_notifier);
- if (status)
- return status;
-
-#if IS_ENABLED(CONFIG_IPV6)
- status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier);
- if (status)
- goto err_notifier6;
-#endif
-
status = be_roce_register_driver(&ocrdma_drv);
if (status)
goto err_be_reg;
@@ -682,20 +477,15 @@ static int __init ocrdma_init_module(void)
return 0;
err_be_reg:
-#if IS_ENABLED(CONFIG_IPV6)
- ocrdma_unregister_inet6addr_notifier();
-err_notifier6:
-#endif
- ocrdma_unregister_inetaddr_notifier();
+
return status;
}
static void __exit ocrdma_exit_module(void)
{
be_roce_unregister_driver(&ocrdma_drv);
- ocrdma_unregister_inet6addr_notifier();
- ocrdma_unregister_inetaddr_notifier();
ocrdma_rem_debugfs();
+ idr_destroy(&ocrdma_dev_id);
}
module_init(ocrdma_init_module);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 02ad0aee99af..6a38268bbe9f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) adapters. *
- * Copyright (C) 2008-2012 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#ifndef __OCRDMA_SLI_H__
#define __OCRDMA_SLI_H__
@@ -125,6 +140,8 @@ enum {
OCRDMA_DB_RQ_SHIFT = 24
};
+#define OCRDMA_ROUDP_FLAGS_SHIFT 0x03
+
#define OCRDMA_DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
#define OCRDMA_DB_CQ_RING_ID_EXT_MASK 0x0C00 /* bits 10-11 of qid at 12-11 */
/* qid #2 msbits at 12-11 */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 48d7ef51aa0c..69334e214571 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) adapters. *
- * Copyright (C) 2008-2014 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#include <rdma/ib_addr.h>
#include <rdma/ib_pma.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index 091edd68a8a3..c9e58d04c7b8 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) adapters. *
- * Copyright (C) 2008-2014 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#ifndef __OCRDMA_STATS_H__
#define __OCRDMA_STATS_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 5bb61eb58f2c..1f3affb6a477 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) adapters. *
- * Copyright (C) 2008-2012 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#include <linux/dma-mapping.h>
#include <rdma/ib_verbs.h>
@@ -31,6 +46,7 @@
#include <rdma/iw_cm.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
#include "ocrdma.h"
#include "ocrdma_hw.h"
@@ -49,6 +65,7 @@ int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
int index, union ib_gid *sgid)
{
+ int ret;
struct ocrdma_dev *dev;
dev = get_ocrdma_dev(ibdev);
@@ -56,8 +73,28 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
if (index >= OCRDMA_MAX_SGID)
return -EINVAL;
- memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
+ ret = ib_get_cached_gid(ibdev, port, index, sgid);
+ if (ret == -EAGAIN) {
+ memcpy(sgid, &zgid, sizeof(*sgid));
+ return 0;
+ }
+
+ return ret;
+}
+
+int ocrdma_add_gid(struct ib_device *device,
+ u8 port_num,
+ unsigned int index,
+ const union ib_gid *gid,
+ const struct ib_gid_attr *attr,
+ void **context) {
+ return 0;
+}
+int ocrdma_del_gid(struct ib_device *device,
+ u8 port_num,
+ unsigned int index,
+ void **context) {
return 0;
}
@@ -110,6 +147,24 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
return 0;
}
+struct net_device *ocrdma_get_netdev(struct ib_device *ibdev, u8 port_num)
+{
+ struct ocrdma_dev *dev;
+ struct net_device *ndev = NULL;
+
+ rcu_read_lock();
+
+ dev = get_ocrdma_dev(ibdev);
+ if (dev)
+ ndev = dev->nic_info.netdev;
+ if (ndev)
+ dev_hold(ndev);
+
+ rcu_read_unlock();
+
+ return ndev;
+}
+
static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
u8 *ib_speed, u8 *ib_width)
{
@@ -179,7 +234,8 @@ int ocrdma_query_port(struct ib_device *ibdev,
props->port_cap_flags =
IB_PORT_CM_SUP |
IB_PORT_REINIT_SUP |
- IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS;
+ IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP |
+ IB_PORT_IP_BASED_GIDS;
props->gid_tbl_len = OCRDMA_MAX_SGID;
props->pkey_tbl_len = 1;
props->bad_pkey_cntr = 0;
@@ -2983,21 +3039,26 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
return 0;
}
-struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
+struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg)
{
int status;
struct ocrdma_mr *mr;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
- if (max_page_list_len > dev->attr.max_pages_per_frmr)
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ if (max_num_sg > dev->attr.max_pages_per_frmr)
return ERR_PTR(-EINVAL);
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
- status = ocrdma_get_pbl_info(dev, mr, max_page_list_len);
+ status = ocrdma_get_pbl_info(dev, mr, max_num_sg);
if (status)
goto pbl_err;
mr->hwmr.fr_mr = 1;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index b15c608efa7b..308c16857a5d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for *
- * RoCE (RDMA over Converged Ethernet) adapters. *
- * Copyright (C) 2008-2012 Emulex. All rights reserved. *
- * EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
- * *
- * This program is free software; you can redistribute it and/or *
- * modify it under the terms of version 2 of the GNU General *
- * Public License as published by the Free Software Foundation. *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID. See the GNU General Public License for *
- * more details, a copy of which can be found in the file COPYING *
- * included with this package. *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Contact Information:
* linux-drivers@emulex.com
@@ -23,7 +38,7 @@
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
- *******************************************************************/
+ */
#ifndef __OCRDMA_VERBS_H__
#define __OCRDMA_VERBS_H__
@@ -48,6 +63,17 @@ ocrdma_query_protocol(struct ib_device *device, u8 port_num);
void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid);
int ocrdma_query_gid(struct ib_device *, u8 port,
int index, union ib_gid *gid);
+struct net_device *ocrdma_get_netdev(struct ib_device *device, u8 port_num);
+int ocrdma_add_gid(struct ib_device *device,
+ u8 port_num,
+ unsigned int index,
+ const union ib_gid *gid,
+ const struct ib_gid_attr *attr,
+ void **context);
+int ocrdma_del_gid(struct ib_device *device,
+ u8 port_num,
+ unsigned int index,
+ void **context);
int ocrdma_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *,
@@ -96,7 +122,9 @@ struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *,
int num_phys_buf, int acc, u64 *iova_start);
struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *);
-struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *pd, int max_page_list_len);
+struct ib_mr *ocrdma_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg);
struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
*ibdev,
int page_list_len);
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 725881890c4a..e449e394963f 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -908,7 +908,7 @@ static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return 0;
}
-static struct vm_operations_struct qib_file_vm_ops = {
+static const struct vm_operations_struct qib_file_vm_ops = {
.fault = qib_file_vma_fault,
};
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index ad843c786e72..5afaa218508d 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -86,6 +86,10 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
* unrestricted LKEY.
*/
rkt->gen++;
+ /*
+ * bits are capped in qib_verbs.c to insure enough bits
+ * for generation number
+ */
mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
<< 8);
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 05e3242d8442..9625e7c438e5 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -2412,8 +2412,9 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h
index 941d4d50d8e7..57e99dc0d80c 100644
--- a/drivers/infiniband/hw/qib/qib_mad.h
+++ b/drivers/infiniband/hw/qib/qib_mad.h
@@ -36,148 +36,17 @@
#include <rdma/ib_pma.h>
-#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
-#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
-#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
-#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
+#define IB_SMP_UNSUP_VERSION \
+cpu_to_be16(IB_MGMT_MAD_STATUS_BAD_VERSION)
-struct ib_node_info {
- u8 base_version;
- u8 class_version;
- u8 node_type;
- u8 num_ports;
- __be64 sys_guid;
- __be64 node_guid;
- __be64 port_guid;
- __be16 partition_cap;
- __be16 device_id;
- __be32 revision;
- u8 local_port_num;
- u8 vendor_id[3];
-} __packed;
-
-struct ib_mad_notice_attr {
- u8 generic_type;
- u8 prod_type_msb;
- __be16 prod_type_lsb;
- __be16 trap_num;
- __be16 issuer_lid;
- __be16 toggle_count;
-
- union {
- struct {
- u8 details[54];
- } raw_data;
-
- struct {
- __be16 reserved;
- __be16 lid; /* where violation happened */
- u8 port_num; /* where violation happened */
- } __packed ntc_129_131;
-
- struct {
- __be16 reserved;
- __be16 lid; /* LID where change occurred */
- u8 reserved2;
- u8 local_changes; /* low bit - local changes */
- __be32 new_cap_mask; /* new capability mask */
- u8 reserved3;
- u8 change_flags; /* low 3 bits only */
- } __packed ntc_144;
-
- struct {
- __be16 reserved;
- __be16 lid; /* lid where sys guid changed */
- __be16 reserved2;
- __be64 new_sys_guid;
- } __packed ntc_145;
-
- struct {
- __be16 reserved;
- __be16 lid;
- __be16 dr_slid;
- u8 method;
- u8 reserved2;
- __be16 attr_id;
- __be32 attr_mod;
- __be64 mkey;
- u8 reserved3;
- u8 dr_trunc_hop;
- u8 dr_rtn_path[30];
- } __packed ntc_256;
-
- struct {
- __be16 reserved;
- __be16 lid1;
- __be16 lid2;
- __be32 key;
- __be32 sl_qp1; /* SL: high 4 bits */
- __be32 qp2; /* high 8 bits reserved */
- union ib_gid gid1;
- union ib_gid gid2;
- } __packed ntc_257_258;
-
- } details;
-};
-
-/*
- * Generic trap/notice types
- */
-#define IB_NOTICE_TYPE_FATAL 0x80
-#define IB_NOTICE_TYPE_URGENT 0x81
-#define IB_NOTICE_TYPE_SECURITY 0x82
-#define IB_NOTICE_TYPE_SM 0x83
-#define IB_NOTICE_TYPE_INFO 0x84
+#define IB_SMP_UNSUP_METHOD \
+cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD)
-/*
- * Generic trap/notice producers
- */
-#define IB_NOTICE_PROD_CA cpu_to_be16(1)
-#define IB_NOTICE_PROD_SWITCH cpu_to_be16(2)
-#define IB_NOTICE_PROD_ROUTER cpu_to_be16(3)
-#define IB_NOTICE_PROD_CLASS_MGR cpu_to_be16(4)
+#define IB_SMP_UNSUP_METH_ATTR \
+cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB)
-/*
- * Generic trap/notice numbers
- */
-#define IB_NOTICE_TRAP_LLI_THRESH cpu_to_be16(129)
-#define IB_NOTICE_TRAP_EBO_THRESH cpu_to_be16(130)
-#define IB_NOTICE_TRAP_FLOW_UPDATE cpu_to_be16(131)
-#define IB_NOTICE_TRAP_CAP_MASK_CHG cpu_to_be16(144)
-#define IB_NOTICE_TRAP_SYS_GUID_CHG cpu_to_be16(145)
-#define IB_NOTICE_TRAP_BAD_MKEY cpu_to_be16(256)
-#define IB_NOTICE_TRAP_BAD_PKEY cpu_to_be16(257)
-#define IB_NOTICE_TRAP_BAD_QKEY cpu_to_be16(258)
-
-/*
- * Repress trap/notice flags
- */
-#define IB_NOTICE_REPRESS_LLI_THRESH (1 << 0)
-#define IB_NOTICE_REPRESS_EBO_THRESH (1 << 1)
-#define IB_NOTICE_REPRESS_FLOW_UPDATE (1 << 2)
-#define IB_NOTICE_REPRESS_CAP_MASK_CHG (1 << 3)
-#define IB_NOTICE_REPRESS_SYS_GUID_CHG (1 << 4)
-#define IB_NOTICE_REPRESS_BAD_MKEY (1 << 5)
-#define IB_NOTICE_REPRESS_BAD_PKEY (1 << 6)
-#define IB_NOTICE_REPRESS_BAD_QKEY (1 << 7)
-
-/*
- * Generic trap/notice other local changes flags (trap 144).
- */
-#define IB_NOTICE_TRAP_LSE_CHG 0x04 /* Link Speed Enable changed */
-#define IB_NOTICE_TRAP_LWE_CHG 0x02 /* Link Width Enable changed */
-#define IB_NOTICE_TRAP_NODE_DESC_CHG 0x01
-
-/*
- * Generic trap/notice M_Key volation flags in dr_trunc_hop (trap 256).
- */
-#define IB_NOTICE_TRAP_DR_NOTICE 0x80
-#define IB_NOTICE_TRAP_DR_TRUNC 0x40
-
-struct ib_vl_weight_elem {
- u8 vl; /* Only low 4 bits, upper 4 bits reserved */
- u8 weight;
-};
+#define IB_SMP_INVALID_FIELD \
+cpu_to_be16(IB_MGMT_MAD_STATUS_INVALID_ATTRIB_VALUE)
#define IB_VLARB_LOWPRI_0_31 1
#define IB_VLARB_LOWPRI_32_63 2
diff --git a/drivers/infiniband/hw/qib/qib_mmap.c b/drivers/infiniband/hw/qib/qib_mmap.c
index 146cf29a2e1d..34927b700b0e 100644
--- a/drivers/infiniband/hw/qib/qib_mmap.c
+++ b/drivers/infiniband/hw/qib/qib_mmap.c
@@ -75,7 +75,7 @@ static void qib_vma_close(struct vm_area_struct *vma)
kref_put(&ip->ref, qib_release_mmap_info);
}
-static struct vm_operations_struct qib_vm_ops = {
+static const struct vm_operations_struct qib_vm_ops = {
.open = qib_vma_open,
.close = qib_vma_close,
};
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
index c4473db46699..19220dcb9a3b 100644
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -327,11 +327,16 @@ out:
*
* Return the memory region on success, otherwise return an errno.
*/
-struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
+struct ib_mr *qib_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg)
{
struct qib_mr *mr;
- mr = alloc_mr(max_page_list_len, pd);
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ mr = alloc_mr(max_num_sg, pd);
if (IS_ERR(mr))
return (struct ib_mr *)mr;
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index f42bd0f47577..22e356ca8058 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -32,6 +32,7 @@
*/
#include <linux/spinlock.h>
+#include <rdma/ib_smi.h>
#include "qib.h"
#include "qib_mad.h"
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index a05d1a372208..3dcc4985b60f 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -40,6 +40,7 @@
#include <linux/rculist.h>
#include <linux/mm.h>
#include <linux/random.h>
+#include <linux/vmalloc.h>
#include "qib.h"
#include "qib_common.h"
@@ -1574,6 +1575,7 @@ static int qib_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
props->max_qp = ib_qib_max_qps;
props->max_qp_wr = ib_qib_max_qp_wrs;
props->max_sge = ib_qib_max_sges;
+ props->max_sge_rd = ib_qib_max_sges;
props->max_cq = ib_qib_max_cqs;
props->max_ah = ib_qib_max_ahs;
props->max_cqe = ib_qib_max_cqes;
@@ -2109,10 +2111,16 @@ int qib_register_ib_device(struct qib_devdata *dd)
* the LKEY). The remaining bits act as a generation number or tag.
*/
spin_lock_init(&dev->lk_table.lock);
+ /* insure generation is at least 4 bits see keys.c */
+ if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) {
+ qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
+ ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS);
+ ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS;
+ }
dev->lk_table.max = 1 << ib_qib_lkey_table_size;
lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
dev->lk_table.table = (struct qib_mregion __rcu **)
- __get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
+ vmalloc(lk_tab_size);
if (dev->lk_table.table == NULL) {
ret = -ENOMEM;
goto err_lk;
@@ -2235,7 +2243,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
ibdev->reg_phys_mr = qib_reg_phys_mr;
ibdev->reg_user_mr = qib_reg_user_mr;
ibdev->dereg_mr = qib_dereg_mr;
- ibdev->alloc_fast_reg_mr = qib_alloc_fast_reg_mr;
+ ibdev->alloc_mr = qib_alloc_mr;
ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list;
ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list;
ibdev->alloc_fmr = qib_alloc_fmr;
@@ -2286,7 +2294,7 @@ err_tx:
sizeof(struct qib_pio_header),
dev->pio_hdrs, dev->pio_hdrs_phys);
err_hdrs:
- free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
+ vfree(dev->lk_table.table);
err_lk:
kfree(dev->qp_table);
err_qpt:
@@ -2340,8 +2348,7 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
sizeof(struct qib_pio_header),
dev->pio_hdrs, dev->pio_hdrs_phys);
lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
- free_pages((unsigned long) dev->lk_table.table,
- get_order(lk_tab_size));
+ vfree(dev->lk_table.table);
kfree(dev->qp_table);
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 1635572752ce..a08df70e8503 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -647,6 +647,8 @@ struct qib_qpn_table {
struct qpn_map map[QPNMAP_ENTRIES];
};
+#define MAX_LKEY_TABLE_BITS 23
+
struct qib_lkey_table {
spinlock_t lock; /* protect changes in this struct */
u32 next; /* next unused index (speeds search) */
@@ -1032,7 +1034,9 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int qib_dereg_mr(struct ib_mr *ibmr);
-struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
+struct ib_mr *qib_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_entries);
struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list(
struct ib_device *ibdev, int page_list_len);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index bd94b0a6e9e5..ca2873698d75 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -239,7 +239,7 @@ struct ipoib_cm_tx {
struct net_device *dev;
struct ipoib_neigh *neigh;
struct ipoib_path *path;
- struct ipoib_cm_tx_buf *tx_ring;
+ struct ipoib_tx_buf *tx_ring;
unsigned tx_head;
unsigned tx_tail;
unsigned long flags;
@@ -342,7 +342,6 @@ struct ipoib_dev_priv {
u16 pkey;
u16 pkey_index;
struct ib_pd *pd;
- struct ib_mr *mr;
struct ib_cq *recv_cq;
struct ib_cq *send_cq;
struct ib_qp *qp;
@@ -504,6 +503,33 @@ int ipoib_mcast_stop_thread(struct net_device *dev);
void ipoib_mcast_dev_down(struct net_device *dev);
void ipoib_mcast_dev_flush(struct net_device *dev);
+int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req);
+void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
+ struct ipoib_tx_buf *tx_req);
+
+static inline void ipoib_build_sge(struct ipoib_dev_priv *priv,
+ struct ipoib_tx_buf *tx_req)
+{
+ int i, off;
+ struct sk_buff *skb = tx_req->skb;
+ skb_frag_t *frags = skb_shinfo(skb)->frags;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ u64 *mapping = tx_req->mapping;
+
+ if (skb_headlen(skb)) {
+ priv->tx_sge[0].addr = mapping[0];
+ priv->tx_sge[0].length = skb_headlen(skb);
+ off = 1;
+ } else
+ off = 0;
+
+ for (i = 0; i < nr_frags; ++i) {
+ priv->tx_sge[i + off].addr = mapping[i + off];
+ priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
+ }
+ priv->tx_wr.num_sge = nr_frags + off;
+}
+
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev);
int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index cf32a778e7d0..c78dc1638030 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -332,7 +332,7 @@ static void ipoib_cm_init_rx_wr(struct net_device *dev,
int i;
for (i = 0; i < priv->cm.num_frags; ++i)
- sge[i].lkey = priv->mr->lkey;
+ sge[i].lkey = priv->pd->local_dma_lkey;
sge[0].length = IPOIB_CM_HEAD_SIZE;
for (i = 1; i < priv->cm.num_frags; ++i)
@@ -694,14 +694,12 @@ repost:
static inline int post_send(struct ipoib_dev_priv *priv,
struct ipoib_cm_tx *tx,
unsigned int wr_id,
- u64 addr, int len)
+ struct ipoib_tx_buf *tx_req)
{
struct ib_send_wr *bad_wr;
- priv->tx_sge[0].addr = addr;
- priv->tx_sge[0].length = len;
+ ipoib_build_sge(priv, tx_req);
- priv->tx_wr.num_sge = 1;
priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM;
return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
@@ -710,8 +708,7 @@ static inline int post_send(struct ipoib_dev_priv *priv,
void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct ipoib_cm_tx_buf *tx_req;
- u64 addr;
+ struct ipoib_tx_buf *tx_req;
int rc;
if (unlikely(skb->len > tx->mtu)) {
@@ -735,24 +732,21 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
*/
tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
tx_req->skb = skb;
- addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
- if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
+
+ if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
++dev->stats.tx_errors;
dev_kfree_skb_any(skb);
return;
}
- tx_req->mapping = addr;
-
skb_orphan(skb);
skb_dst_drop(skb);
- rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
- addr, skb->len);
+ rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
if (unlikely(rc)) {
ipoib_warn(priv, "post_send failed, error %d\n", rc);
++dev->stats.tx_errors;
- ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
+ ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(skb);
} else {
dev->trans_start = jiffies;
@@ -777,7 +771,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_tx *tx = wc->qp->qp_context;
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
- struct ipoib_cm_tx_buf *tx_req;
+ struct ipoib_tx_buf *tx_req;
unsigned long flags;
ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
@@ -791,7 +785,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
tx_req = &tx->tx_ring[wr_id];
- ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
+ ipoib_dma_unmap_tx(priv, tx_req);
/* FIXME: is this right? Shouldn't we only increment on success? */
++dev->stats.tx_packets;
@@ -854,7 +848,7 @@ int ipoib_cm_dev_open(struct net_device *dev)
}
ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
- 0, NULL);
+ 0);
if (ret) {
printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
IPOIB_CM_IETF_ID | priv->qp->qp_num);
@@ -1036,6 +1030,9 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
struct ib_qp *tx_qp;
+ if (dev->features & NETIF_F_SG)
+ attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
+
tx_qp = ib_create_qp(priv->pd, &attr);
if (PTR_ERR(tx_qp) == -EINVAL) {
ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n",
@@ -1170,7 +1167,7 @@ err_tx:
static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
{
struct ipoib_dev_priv *priv = netdev_priv(p->dev);
- struct ipoib_cm_tx_buf *tx_req;
+ struct ipoib_tx_buf *tx_req;
unsigned long begin;
ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
@@ -1197,8 +1194,7 @@ timeout:
while ((int) p->tx_tail - (int) p->tx_head < 0) {
tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
- ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
- DMA_TO_DEVICE);
+ ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(tx_req->skb);
++p->tx_tail;
netif_tx_lock_bh(p->dev);
@@ -1455,7 +1451,6 @@ static void ipoib_cm_stale_task(struct work_struct *work)
spin_unlock_irq(&priv->lock);
}
-
static ssize_t show_mode(struct device *d, struct device_attribute *attr,
char *buf)
{
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 63b92cbb29ad..d266667ca9b8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -263,8 +263,7 @@ repost:
"for buf %d\n", wr_id);
}
-static int ipoib_dma_map_tx(struct ib_device *ca,
- struct ipoib_tx_buf *tx_req)
+int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
{
struct sk_buff *skb = tx_req->skb;
u64 *mapping = tx_req->mapping;
@@ -305,8 +304,8 @@ partial_error:
return -EIO;
}
-static void ipoib_dma_unmap_tx(struct ib_device *ca,
- struct ipoib_tx_buf *tx_req)
+void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
+ struct ipoib_tx_buf *tx_req)
{
struct sk_buff *skb = tx_req->skb;
u64 *mapping = tx_req->mapping;
@@ -314,7 +313,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
int off;
if (skb_headlen(skb)) {
- ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
+ ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
+ DMA_TO_DEVICE);
off = 1;
} else
off = 0;
@@ -322,8 +322,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag),
- DMA_TO_DEVICE);
+ ib_dma_unmap_page(priv->ca, mapping[i + off],
+ skb_frag_size(frag), DMA_TO_DEVICE);
}
}
@@ -389,7 +389,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
tx_req = &priv->tx_ring[wr_id];
- ipoib_dma_unmap_tx(priv->ca, tx_req);
+ ipoib_dma_unmap_tx(priv, tx_req);
++dev->stats.tx_packets;
dev->stats.tx_bytes += tx_req->skb->len;
@@ -514,24 +514,10 @@ static inline int post_send(struct ipoib_dev_priv *priv,
void *head, int hlen)
{
struct ib_send_wr *bad_wr;
- int i, off;
struct sk_buff *skb = tx_req->skb;
- skb_frag_t *frags = skb_shinfo(skb)->frags;
- int nr_frags = skb_shinfo(skb)->nr_frags;
- u64 *mapping = tx_req->mapping;
- if (skb_headlen(skb)) {
- priv->tx_sge[0].addr = mapping[0];
- priv->tx_sge[0].length = skb_headlen(skb);
- off = 1;
- } else
- off = 0;
+ ipoib_build_sge(priv, tx_req);
- for (i = 0; i < nr_frags; ++i) {
- priv->tx_sge[i + off].addr = mapping[i + off];
- priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
- }
- priv->tx_wr.num_sge = nr_frags + off;
priv->tx_wr.wr_id = wr_id;
priv->tx_wr.wr.ud.remote_qpn = qpn;
priv->tx_wr.wr.ud.ah = address;
@@ -617,7 +603,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
ipoib_warn(priv, "post_send failed, error %d\n", rc);
++dev->stats.tx_errors;
--priv->tx_outstanding;
- ipoib_dma_unmap_tx(priv->ca, tx_req);
+ ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(skb);
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
@@ -868,7 +854,7 @@ int ipoib_ib_dev_stop(struct net_device *dev)
while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
tx_req = &priv->tx_ring[priv->tx_tail &
(ipoib_sendq_size - 1)];
- ipoib_dma_unmap_tx(priv->ca, tx_req);
+ ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
--priv->tx_outstanding;
@@ -985,20 +971,21 @@ static inline int update_child_pkey(struct ipoib_dev_priv *priv)
}
static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
- enum ipoib_flush_level level)
+ enum ipoib_flush_level level,
+ int nesting)
{
struct ipoib_dev_priv *cpriv;
struct net_device *dev = priv->dev;
int result;
- down_read(&priv->vlan_rwsem);
+ down_read_nested(&priv->vlan_rwsem, nesting);
/*
* Flush any child interfaces too -- they might be up even if
* the parent is down.
*/
list_for_each_entry(cpriv, &priv->child_intfs, list)
- __ipoib_ib_dev_flush(cpriv, level);
+ __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
up_read(&priv->vlan_rwsem);
@@ -1076,7 +1063,7 @@ void ipoib_ib_dev_flush_light(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_light);
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
}
void ipoib_ib_dev_flush_normal(struct work_struct *work)
@@ -1084,7 +1071,7 @@ void ipoib_ib_dev_flush_normal(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_normal);
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
}
void ipoib_ib_dev_flush_heavy(struct work_struct *work)
@@ -1092,7 +1079,7 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_heavy);
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
}
void ipoib_ib_dev_cleanup(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index da149c278cb8..36536ce5a3e2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -48,6 +48,9 @@
#include <linux/jhash.h>
#include <net/arp.h>
+#include <net/addrconf.h>
+#include <linux/inetdevice.h>
+#include <rdma/ib_cache.h>
#define DRV_VERSION "1.0.0"
@@ -89,13 +92,18 @@ struct workqueue_struct *ipoib_workqueue;
struct ib_sa_client ipoib_sa_client;
static void ipoib_add_one(struct ib_device *device);
-static void ipoib_remove_one(struct ib_device *device);
+static void ipoib_remove_one(struct ib_device *device, void *client_data);
static void ipoib_neigh_reclaim(struct rcu_head *rp);
+static struct net_device *ipoib_get_net_dev_by_params(
+ struct ib_device *dev, u8 port, u16 pkey,
+ const union ib_gid *gid, const struct sockaddr *addr,
+ void *client_data);
static struct ib_client ipoib_client = {
.name = "ipoib",
.add = ipoib_add_one,
- .remove = ipoib_remove_one
+ .remove = ipoib_remove_one,
+ .get_net_dev_by_params = ipoib_get_net_dev_by_params,
};
int ipoib_open(struct net_device *dev)
@@ -190,7 +198,7 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu
struct ipoib_dev_priv *priv = netdev_priv(dev);
if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
- features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
+ features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
return features;
}
@@ -222,6 +230,225 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+/* Called with an RCU read lock taken */
+static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr,
+ struct net_device *dev)
+{
+ struct net *net = dev_net(dev);
+ struct in_device *in_dev;
+ struct sockaddr_in *addr_in = (struct sockaddr_in *)addr;
+ struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr;
+ __be32 ret_addr;
+
+ switch (addr->sa_family) {
+ case AF_INET:
+ in_dev = in_dev_get(dev);
+ if (!in_dev)
+ return false;
+
+ ret_addr = inet_confirm_addr(net, in_dev, 0,
+ addr_in->sin_addr.s_addr,
+ RT_SCOPE_HOST);
+ in_dev_put(in_dev);
+ if (ret_addr)
+ return true;
+
+ break;
+ case AF_INET6:
+ if (IS_ENABLED(CONFIG_IPV6) &&
+ ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1))
+ return true;
+
+ break;
+ }
+ return false;
+}
+
+/**
+ * Find the master net_device on top of the given net_device.
+ * @dev: base IPoIB net_device
+ *
+ * Returns the master net_device with a reference held, or the same net_device
+ * if no master exists.
+ */
+static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
+{
+ struct net_device *master;
+
+ rcu_read_lock();
+ master = netdev_master_upper_dev_get_rcu(dev);
+ if (master)
+ dev_hold(master);
+ rcu_read_unlock();
+
+ if (master)
+ return master;
+
+ dev_hold(dev);
+ return dev;
+}
+
+/**
+ * Find a net_device matching the given address, which is an upper device of
+ * the given net_device.
+ * @addr: IP address to look for.
+ * @dev: base IPoIB net_device
+ *
+ * If found, returns the net_device with a reference held. Otherwise return
+ * NULL.
+ */
+static struct net_device *ipoib_get_net_dev_match_addr(
+ const struct sockaddr *addr, struct net_device *dev)
+{
+ struct net_device *upper,
+ *result = NULL;
+ struct list_head *iter;
+
+ rcu_read_lock();
+ if (ipoib_is_dev_match_addr_rcu(addr, dev)) {
+ dev_hold(dev);
+ result = dev;
+ goto out;
+ }
+
+ netdev_for_each_all_upper_dev_rcu(dev, upper, iter) {
+ if (ipoib_is_dev_match_addr_rcu(addr, upper)) {
+ dev_hold(upper);
+ result = upper;
+ break;
+ }
+ }
+out:
+ rcu_read_unlock();
+ return result;
+}
+
+/* returns the number of IPoIB netdevs on top a given ipoib device matching a
+ * pkey_index and address, if one exists.
+ *
+ * @found_net_dev: contains a matching net_device if the return value >= 1,
+ * with a reference held. */
+static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
+ const union ib_gid *gid,
+ u16 pkey_index,
+ const struct sockaddr *addr,
+ int nesting,
+ struct net_device **found_net_dev)
+{
+ struct ipoib_dev_priv *child_priv;
+ struct net_device *net_dev = NULL;
+ int matches = 0;
+
+ if (priv->pkey_index == pkey_index &&
+ (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) {
+ if (!addr) {
+ net_dev = ipoib_get_master_net_dev(priv->dev);
+ } else {
+ /* Verify the net_device matches the IP address, as
+ * IPoIB child devices currently share a GID. */
+ net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev);
+ }
+ if (net_dev) {
+ if (!*found_net_dev)
+ *found_net_dev = net_dev;
+ else
+ dev_put(net_dev);
+ ++matches;
+ }
+ }
+
+ /* Check child interfaces */
+ down_read_nested(&priv->vlan_rwsem, nesting);
+ list_for_each_entry(child_priv, &priv->child_intfs, list) {
+ matches += ipoib_match_gid_pkey_addr(child_priv, gid,
+ pkey_index, addr,
+ nesting + 1,
+ found_net_dev);
+ if (matches > 1)
+ break;
+ }
+ up_read(&priv->vlan_rwsem);
+
+ return matches;
+}
+
+/* Returns the number of matching net_devs found (between 0 and 2). Also
+ * return the matching net_device in the @net_dev parameter, holding a
+ * reference to the net_device, if the number of matches >= 1 */
+static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port,
+ u16 pkey_index,
+ const union ib_gid *gid,
+ const struct sockaddr *addr,
+ struct net_device **net_dev)
+{
+ struct ipoib_dev_priv *priv;
+ int matches = 0;
+
+ *net_dev = NULL;
+
+ list_for_each_entry(priv, dev_list, list) {
+ if (priv->port != port)
+ continue;
+
+ matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index,
+ addr, 0, net_dev);
+ if (matches > 1)
+ break;
+ }
+
+ return matches;
+}
+
+static struct net_device *ipoib_get_net_dev_by_params(
+ struct ib_device *dev, u8 port, u16 pkey,
+ const union ib_gid *gid, const struct sockaddr *addr,
+ void *client_data)
+{
+ struct net_device *net_dev;
+ struct list_head *dev_list = client_data;
+ u16 pkey_index;
+ int matches;
+ int ret;
+
+ if (!rdma_protocol_ib(dev, port))
+ return NULL;
+
+ ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index);
+ if (ret)
+ return NULL;
+
+ if (!dev_list)
+ return NULL;
+
+ /* See if we can find a unique device matching the L2 parameters */
+ matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
+ gid, NULL, &net_dev);
+
+ switch (matches) {
+ case 0:
+ return NULL;
+ case 1:
+ return net_dev;
+ }
+
+ dev_put(net_dev);
+
+ /* Couldn't find a unique device with L2 parameters only. Use L3
+ * address to uniquely match the net device */
+ matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
+ gid, addr, &net_dev);
+ switch (matches) {
+ case 0:
+ return NULL;
+ default:
+ dev_warn_ratelimited(&dev->dev,
+ "duplicate IP address detected\n");
+ /* Fall through */
+ case 1:
+ return net_dev;
+ }
+}
+
int ipoib_set_mode(struct net_device *dev, const char *buf)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -232,6 +459,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
ipoib_warn(priv, "enabling connected mode "
"will cause multicast packet drops\n");
netdev_update_features(dev);
+ dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
rtnl_unlock();
priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
@@ -1577,7 +1805,8 @@ static struct net_device *ipoib_add_port(const char *format,
SET_NETDEV_DEV(priv->dev, hca->dma_device);
priv->dev->dev_id = port - 1;
- if (!ib_query_port(hca, port, &attr))
+ result = ib_query_port(hca, port, &attr);
+ if (!result)
priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
else {
printk(KERN_WARNING "%s: ib_query_port %d failed\n",
@@ -1598,7 +1827,8 @@ static struct net_device *ipoib_add_port(const char *format,
goto device_init_failed;
}
- if (ipoib_set_dev_features(priv, hca))
+ result = ipoib_set_dev_features(priv, hca);
+ if (result)
goto device_init_failed;
/*
@@ -1684,7 +1914,7 @@ static void ipoib_add_one(struct ib_device *device)
struct list_head *dev_list;
struct net_device *dev;
struct ipoib_dev_priv *priv;
- int s, e, p;
+ int p;
int count = 0;
dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
@@ -1693,15 +1923,7 @@ static void ipoib_add_one(struct ib_device *device)
INIT_LIST_HEAD(dev_list);
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
- s = 0;
- e = 0;
- } else {
- s = 1;
- e = device->phys_port_cnt;
- }
-
- for (p = s; p <= e; ++p) {
+ for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
if (!rdma_protocol_ib(device, p))
continue;
dev = ipoib_add_port("ib%d", device, p);
@@ -1720,12 +1942,11 @@ static void ipoib_add_one(struct ib_device *device)
ib_set_client_data(device, &ipoib_client, dev_list);
}
-static void ipoib_remove_one(struct ib_device *device)
+static void ipoib_remove_one(struct ib_device *device, void *client_data)
{
struct ipoib_dev_priv *priv, *tmp;
- struct list_head *dev_list;
+ struct list_head *dev_list = client_data;
- dev_list = ib_get_client_data(device, &ipoib_client);
if (!dev_list)
return;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 0d23e0568deb..09a1748f9d13 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -393,8 +393,13 @@ static int ipoib_mcast_join_complete(int status,
goto out_locked;
}
} else {
- if (mcast->logcount++ < 20) {
- if (status == -ETIMEDOUT || status == -EAGAIN) {
+ bool silent_fail =
+ test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
+ status == -EINVAL;
+
+ if (mcast->logcount < 20) {
+ if (status == -ETIMEDOUT || status == -EAGAIN ||
+ silent_fail) {
ipoib_dbg_mcast(priv, "%smulticast join failed for %pI6, status %d\n",
test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "",
mcast->mcmember.mgid.raw, status);
@@ -403,6 +408,9 @@ static int ipoib_mcast_join_complete(int status,
test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "",
mcast->mcmember.mgid.raw, status);
}
+
+ if (!silent_fail)
+ mcast->logcount++;
}
if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
@@ -448,8 +456,7 @@ out_locked:
return status;
}
-static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
- int create)
+static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_sa_multicast *multicast;
@@ -471,7 +478,14 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
IB_SA_MCMEMBER_REC_PKEY |
IB_SA_MCMEMBER_REC_JOIN_STATE;
- if (create) {
+ if (mcast != priv->broadcast) {
+ /*
+ * RFC 4391:
+ * The MGID MUST use the same P_Key, Q_Key, SL, MTU,
+ * and HopLimit as those used in the broadcast-GID. The rest
+ * of attributes SHOULD follow the values used in the
+ * broadcast-GID as well.
+ */
comp_mask |=
IB_SA_MCMEMBER_REC_QKEY |
IB_SA_MCMEMBER_REC_MTU_SELECTOR |
@@ -492,6 +506,22 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
rec.sl = priv->broadcast->mcmember.sl;
rec.flow_label = priv->broadcast->mcmember.flow_label;
rec.hop_limit = priv->broadcast->mcmember.hop_limit;
+
+ /*
+ * Historically Linux IPoIB has never properly supported SEND
+ * ONLY join. It emulated it by not providing all the required
+ * attributes, which is enough to prevent group creation and
+ * detect if there are full members or not. A major problem
+ * with supporting SEND ONLY is detecting when the group is
+ * auto-destroyed as IPoIB will cache the MLID..
+ */
+#if 1
+ if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
+ comp_mask &= ~IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
+#else
+ if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
+ rec.join_state = 4;
+#endif
}
multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
@@ -517,7 +547,6 @@ void ipoib_mcast_join_task(struct work_struct *work)
struct ib_port_attr port_attr;
unsigned long delay_until = 0;
struct ipoib_mcast *mcast = NULL;
- int create = 1;
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
return;
@@ -566,7 +595,6 @@ void ipoib_mcast_join_task(struct work_struct *work)
if (IS_ERR_OR_NULL(priv->broadcast->mc) &&
!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) {
mcast = priv->broadcast;
- create = 0;
if (mcast->backoff > 1 &&
time_before(jiffies, mcast->delay_until)) {
delay_until = mcast->delay_until;
@@ -590,12 +618,8 @@ void ipoib_mcast_join_task(struct work_struct *work)
/* Found the next unjoined group */
init_completion(&mcast->done);
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
- create = 0;
- else
- create = 1;
spin_unlock_irq(&priv->lock);
- ipoib_mcast_join(dev, mcast, create);
+ ipoib_mcast_join(dev, mcast);
spin_lock_irq(&priv->lock);
} else if (!delay_until ||
time_before(mcast->delay_until, delay_until))
@@ -618,7 +642,7 @@ out:
}
spin_unlock_irq(&priv->lock);
if (mcast)
- ipoib_mcast_join(dev, mcast, create);
+ ipoib_mcast_join(dev, mcast);
}
int ipoib_mcast_start_thread(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 9e6ee82a8fd7..78845b6e8b81 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -152,12 +152,6 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
return -ENODEV;
}
- priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(priv->mr)) {
- printk(KERN_WARNING "%s: ib_get_dma_mr failed\n", ca->name);
- goto out_free_pd;
- }
-
/*
* the various IPoIB tasks assume they will never race against
* themselves, so always use a single thread workqueue
@@ -165,7 +159,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->wq = create_singlethread_workqueue("ipoib_wq");
if (!priv->wq) {
printk(KERN_WARNING "ipoib: failed to allocate device WQ\n");
- goto out_free_mr;
+ goto out_free_pd;
}
size = ipoib_recvq_size + 1;
@@ -177,7 +171,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
else
size += ipoib_recvq_size * ipoib_max_conn_qp;
} else
- goto out_free_wq;
+ if (ret != -ENOSYS)
+ goto out_free_wq;
cq_attr.cqe = size;
priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL,
@@ -224,13 +219,13 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->dev->dev_addr[3] = (priv->qp->qp_num ) & 0xff;
for (i = 0; i < MAX_SKB_FRAGS + 1; ++i)
- priv->tx_sge[i].lkey = priv->mr->lkey;
+ priv->tx_sge[i].lkey = priv->pd->local_dma_lkey;
priv->tx_wr.opcode = IB_WR_SEND;
priv->tx_wr.sg_list = priv->tx_sge;
priv->tx_wr.send_flags = IB_SEND_SIGNALED;
- priv->rx_sge[0].lkey = priv->mr->lkey;
+ priv->rx_sge[0].lkey = priv->pd->local_dma_lkey;
priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
priv->rx_wr.num_sge = 1;
@@ -253,9 +248,6 @@ out_free_wq:
destroy_workqueue(priv->wq);
priv->wq = NULL;
-out_free_mr:
- ib_dereg_mr(priv->mr);
-
out_free_pd:
ib_dealloc_pd(priv->pd);
@@ -288,12 +280,7 @@ void ipoib_transport_dev_cleanup(struct net_device *dev)
priv->wq = NULL;
}
- if (ib_dereg_mr(priv->mr))
- ipoib_warn(priv, "ib_dereg_mr failed\n");
-
- if (ib_dealloc_pd(priv->pd))
- ipoib_warn(priv, "ib_dealloc_pd failed\n");
-
+ ib_dealloc_pd(priv->pd);
}
void ipoib_event(struct ib_event_handler *handler,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 6a594aac2290..1ace5d83a4d7 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -74,34 +74,37 @@
#include "iscsi_iser.h"
+MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz");
+MODULE_VERSION(DRV_VER);
+
static struct scsi_host_template iscsi_iser_sht;
static struct iscsi_transport iscsi_iser_transport;
static struct scsi_transport_template *iscsi_iser_scsi_transport;
-
-static unsigned int iscsi_max_lun = 512;
-module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+static struct workqueue_struct *release_wq;
+struct iser_global ig;
int iser_debug_level = 0;
-bool iser_pi_enable = false;
-int iser_pi_guard = 1;
+module_param_named(debug_level, iser_debug_level, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)");
-MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz");
-MODULE_VERSION(DRV_VER);
+static unsigned int iscsi_max_lun = 512;
+module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+MODULE_PARM_DESC(max_lun, "Max LUNs to allow per session (default:512");
-module_param_named(debug_level, iser_debug_level, int, 0644);
-MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)");
+unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS;
+module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024");
-module_param_named(pi_enable, iser_pi_enable, bool, 0644);
+bool iser_pi_enable = false;
+module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO);
MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
-module_param_named(pi_guard, iser_pi_guard, int, 0644);
+int iser_pi_guard;
+module_param_named(pi_guard, iser_pi_guard, int, S_IRUGO);
MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]");
-static struct workqueue_struct *release_wq;
-struct iser_global ig;
-
/*
* iscsi_iser_recv() - Process a successfull recv completion
* @conn: iscsi connection
@@ -201,10 +204,12 @@ iser_initialize_task_headers(struct iscsi_task *task,
goto out;
}
+ tx_desc->wr_idx = 0;
+ tx_desc->mapped = true;
tx_desc->dma_addr = dma_addr;
tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
- tx_desc->tx_sg[0].lkey = device->mr->lkey;
+ tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
iser_task->iser_conn = iser_conn;
out:
@@ -360,16 +365,19 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
static void iscsi_iser_cleanup_task(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_tx_desc *tx_desc = &iser_task->desc;
- struct iser_conn *iser_conn = task->conn->dd_data;
+ struct iser_tx_desc *tx_desc = &iser_task->desc;
+ struct iser_conn *iser_conn = task->conn->dd_data;
struct iser_device *device = iser_conn->ib_conn.device;
/* DEVICE_REMOVAL event might have already released the device */
if (!device)
return;
- ib_dma_unmap_single(device->ib_device,
- tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
+ if (likely(tx_desc->mapped)) {
+ ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
+ ISER_HEADERS_LEN, DMA_TO_DEVICE);
+ tx_desc->mapped = false;
+ }
/* mgmt tasks do not need special cleanup */
if (!task->sc)
@@ -622,6 +630,8 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
if (ep) {
iser_conn = ep->dd_data;
max_cmds = iser_conn->max_cmds;
+ shost->sg_tablesize = iser_conn->scsi_sg_tablesize;
+ shost->max_sectors = iser_conn->scsi_max_sectors;
mutex_lock(&iser_conn->state_mutex);
if (iser_conn->state != ISER_CONN_UP) {
@@ -640,6 +650,15 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
SHOST_DIX_GUARD_CRC);
}
+ /*
+ * Limit the sg_tablesize and max_sectors based on the device
+ * max fastreg page list length.
+ */
+ shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
+ ib_conn->device->dev_attr.max_fast_reg_page_list_len);
+ shost->max_sectors = min_t(unsigned int,
+ 1024, (shost->sg_tablesize * PAGE_SIZE) >> 9);
+
if (iscsi_host_add(shost,
ib_conn->device->ib_device->dma_device)) {
mutex_unlock(&iser_conn->state_mutex);
@@ -742,15 +761,9 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
stats->r2t_pdus = conn->r2t_pdus_cnt; /* always 0 */
stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
- stats->custom_length = 4;
- strcpy(stats->custom[0].desc, "qp_tx_queue_full");
- stats->custom[0].value = 0; /* TB iser_conn->qp_tx_queue_full; */
- strcpy(stats->custom[1].desc, "fmr_map_not_avail");
- stats->custom[1].value = 0; /* TB iser_conn->fmr_map_not_avail */;
- strcpy(stats->custom[2].desc, "eh_abort_cnt");
- stats->custom[2].value = conn->eh_abort_cnt;
- strcpy(stats->custom[3].desc, "fmr_unalign_cnt");
- stats->custom[3].value = conn->fmr_unalign_cnt;
+ stats->custom_length = 1;
+ strcpy(stats->custom[0].desc, "fmr_unalign_cnt");
+ stats->custom[0].value = conn->fmr_unalign_cnt;
}
static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
@@ -839,10 +852,9 @@ failure:
static int
iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
{
- struct iser_conn *iser_conn;
+ struct iser_conn *iser_conn = ep->dd_data;
int rc;
- iser_conn = ep->dd_data;
rc = wait_for_completion_interruptible_timeout(&iser_conn->up_completion,
msecs_to_jiffies(timeout_ms));
/* if conn establishment failed, return error code to iscsi */
@@ -854,7 +866,7 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
mutex_unlock(&iser_conn->state_mutex);
}
- iser_info("ib conn %p rc = %d\n", iser_conn, rc);
+ iser_info("iser conn %p rc = %d\n", iser_conn, rc);
if (rc > 0)
return 1; /* success, this is the equivalent of POLLOUT */
@@ -876,11 +888,9 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
static void
iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
{
- struct iser_conn *iser_conn;
+ struct iser_conn *iser_conn = ep->dd_data;
- iser_conn = ep->dd_data;
- iser_info("ep %p iser conn %p state %d\n",
- ep, iser_conn, iser_conn->state);
+ iser_info("ep %p iser conn %p\n", ep, iser_conn);
mutex_lock(&iser_conn->state_mutex);
iser_conn_terminate(iser_conn);
@@ -900,6 +910,7 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
mutex_unlock(&iser_conn->state_mutex);
iser_conn_release(iser_conn);
}
+
iscsi_destroy_endpoint(ep);
}
@@ -962,8 +973,8 @@ static struct scsi_host_template iscsi_iser_sht = {
.name = "iSCSI Initiator over iSER",
.queuecommand = iscsi_queuecommand,
.change_queue_depth = scsi_change_queue_depth,
- .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
- .max_sectors = 1024,
+ .sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE,
+ .max_sectors = ISER_DEF_MAX_SECTORS,
.cmd_per_lun = ISER_DEF_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler= iscsi_eh_device_reset,
@@ -1074,7 +1085,7 @@ static void __exit iser_exit(void)
if (!connlist_empty) {
iser_err("Error cleanup stage completed but we still have iser "
- "connections, destroying them anyway.\n");
+ "connections, destroying them anyway\n");
list_for_each_entry_safe(iser_conn, n, &ig.connlist,
conn_list) {
iser_conn_release(iser_conn);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 262ba1f8ee50..86f6583485ef 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -98,8 +98,13 @@
#define SHIFT_4K 12
#define SIZE_4K (1ULL << SHIFT_4K)
#define MASK_4K (~(SIZE_4K-1))
- /* support up to 512KB in one RDMA */
-#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
+
+/* Default support is 512KB I/O size */
+#define ISER_DEF_MAX_SECTORS 1024
+#define ISCSI_ISER_DEF_SG_TABLESIZE ((ISER_DEF_MAX_SECTORS * 512) >> SHIFT_4K)
+/* Maximum support is 8MB I/O size */
+#define ISCSI_ISER_MAX_SG_TABLESIZE ((16384 * 512) >> SHIFT_4K)
+
#define ISER_DEF_XMIT_CMDS_DEFAULT 512
#if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT
#define ISER_DEF_XMIT_CMDS_MAX ISCSI_DEF_XMIT_CMDS_MAX
@@ -239,6 +244,7 @@ struct iser_data_buf {
struct iser_device;
struct iscsi_iser_task;
struct iscsi_endpoint;
+struct iser_reg_resources;
/**
* struct iser_mem_reg - iSER memory registration info
@@ -259,6 +265,14 @@ enum iser_desc_type {
ISCSI_TX_DATAOUT
};
+/* Maximum number of work requests per task:
+ * Data memory region local invalidate + fast registration
+ * Protection memory region local invalidate + fast registration
+ * Signature memory region local invalidate + fast registration
+ * PDU send
+ */
+#define ISER_MAX_WRS 7
+
/**
* struct iser_tx_desc - iSER TX descriptor (for send wr_id)
*
@@ -270,6 +284,12 @@ enum iser_desc_type {
* sg[1] optionally points to either of immediate data
* unsolicited data-out or control
* @num_sge: number sges used on this TX task
+ * @mapped: Is the task header mapped
+ * @wr_idx: Current WR index
+ * @wrs: Array of WRs per task
+ * @data_reg: Data buffer registration details
+ * @prot_reg: Protection buffer registration details
+ * @sig_attrs: Signature attributes
*/
struct iser_tx_desc {
struct iser_hdr iser_header;
@@ -278,6 +298,12 @@ struct iser_tx_desc {
u64 dma_addr;
struct ib_sge tx_sg[2];
int num_sge;
+ bool mapped;
+ u8 wr_idx;
+ struct ib_send_wr wrs[ISER_MAX_WRS];
+ struct iser_mem_reg data_reg;
+ struct iser_mem_reg prot_reg;
+ struct ib_sig_attrs sig_attrs;
};
#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
@@ -324,6 +350,33 @@ struct iser_comp {
};
/**
+ * struct iser_device - Memory registration operations
+ * per-device registration schemes
+ *
+ * @alloc_reg_res: Allocate registration resources
+ * @free_reg_res: Free registration resources
+ * @fast_reg_mem: Register memory buffers
+ * @unreg_mem: Un-register memory buffers
+ * @reg_desc_get: Get a registration descriptor for pool
+ * @reg_desc_put: Get a registration descriptor to pool
+ */
+struct iser_reg_ops {
+ int (*alloc_reg_res)(struct ib_conn *ib_conn,
+ unsigned cmds_max,
+ unsigned int size);
+ void (*free_reg_res)(struct ib_conn *ib_conn);
+ int (*reg_mem)(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *mem,
+ struct iser_reg_resources *rsc,
+ struct iser_mem_reg *reg);
+ void (*unreg_mem)(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir);
+ struct iser_fr_desc * (*reg_desc_get)(struct ib_conn *ib_conn);
+ void (*reg_desc_put)(struct ib_conn *ib_conn,
+ struct iser_fr_desc *desc);
+};
+
+/**
* struct iser_device - iSER device handle
*
* @ib_device: RDMA device
@@ -336,11 +389,7 @@ struct iser_comp {
* @comps_used: Number of completion contexts used, Min between online
* cpus and device max completion vectors
* @comps: Dinamically allocated array of completion handlers
- * Memory registration pool Function pointers (FMR or Fastreg):
- * @iser_alloc_rdma_reg_res: Allocation of memory regions pool
- * @iser_free_rdma_reg_res: Free of memory regions pool
- * @iser_reg_rdma_mem: Memory registration routine
- * @iser_unreg_rdma_mem: Memory deregistration routine
+ * @reg_ops: Registration ops
*/
struct iser_device {
struct ib_device *ib_device;
@@ -352,54 +401,73 @@ struct iser_device {
int refcount;
int comps_used;
struct iser_comp *comps;
- int (*iser_alloc_rdma_reg_res)(struct ib_conn *ib_conn,
- unsigned cmds_max);
- void (*iser_free_rdma_reg_res)(struct ib_conn *ib_conn);
- int (*iser_reg_rdma_mem)(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir);
- void (*iser_unreg_rdma_mem)(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir);
+ struct iser_reg_ops *reg_ops;
};
#define ISER_CHECK_GUARD 0xc0
#define ISER_CHECK_REFTAG 0x0f
#define ISER_CHECK_APPTAG 0x30
-enum iser_reg_indicator {
- ISER_DATA_KEY_VALID = 1 << 0,
- ISER_PROT_KEY_VALID = 1 << 1,
- ISER_SIG_KEY_VALID = 1 << 2,
- ISER_FASTREG_PROTECTED = 1 << 3,
+/**
+ * struct iser_reg_resources - Fast registration recources
+ *
+ * @mr: memory region
+ * @fmr_pool: pool of fmrs
+ * @frpl: fast reg page list used by frwrs
+ * @page_vec: fast reg page list used by fmr pool
+ * @mr_valid: is mr valid indicator
+ */
+struct iser_reg_resources {
+ union {
+ struct ib_mr *mr;
+ struct ib_fmr_pool *fmr_pool;
+ };
+ union {
+ struct ib_fast_reg_page_list *frpl;
+ struct iser_page_vec *page_vec;
+ };
+ u8 mr_valid:1;
};
/**
* struct iser_pi_context - Protection information context
*
- * @prot_mr: protection memory region
- * @prot_frpl: protection fastreg page list
- * @sig_mr: signature feature enabled memory region
+ * @rsc: protection buffer registration resources
+ * @sig_mr: signature enable memory region
+ * @sig_mr_valid: is sig_mr valid indicator
+ * @sig_protected: is region protected indicator
*/
struct iser_pi_context {
- struct ib_mr *prot_mr;
- struct ib_fast_reg_page_list *prot_frpl;
+ struct iser_reg_resources rsc;
struct ib_mr *sig_mr;
+ u8 sig_mr_valid:1;
+ u8 sig_protected:1;
};
/**
- * struct fast_reg_descriptor - Fast registration descriptor
+ * struct iser_fr_desc - Fast registration descriptor
*
* @list: entry in connection fastreg pool
- * @data_mr: data memory region
- * @data_frpl: data fastreg page list
+ * @rsc: data buffer registration resources
* @pi_ctx: protection information context
- * @reg_indicators: fast registration indicators
*/
-struct fast_reg_descriptor {
+struct iser_fr_desc {
struct list_head list;
- struct ib_mr *data_mr;
- struct ib_fast_reg_page_list *data_frpl;
+ struct iser_reg_resources rsc;
struct iser_pi_context *pi_ctx;
- u8 reg_indicators;
+};
+
+/**
+ * struct iser_fr_pool: connection fast registration pool
+ *
+ * @list: list of fastreg descriptors
+ * @lock: protects fmr/fastreg pool
+ * @size: size of the pool
+ */
+struct iser_fr_pool {
+ struct list_head list;
+ spinlock_t lock;
+ int size;
};
/**
@@ -415,15 +483,7 @@ struct fast_reg_descriptor {
* @pi_support: Indicate device T10-PI support
* @beacon: beacon send wr to signal all flush errors were drained
* @flush_comp: completes when all connection completions consumed
- * @lock: protects fmr/fastreg pool
- * @union.fmr:
- * @pool: FMR pool for fast registrations
- * @page_vec: page vector to hold mapped commands pages
- * used for registration
- * @union.fastreg:
- * @pool: Fast registration descriptors pool for fast
- * registrations
- * @pool_size: Size of pool
+ * @fr_pool: connection fast registration poool
*/
struct ib_conn {
struct rdma_cm_id *cma_id;
@@ -436,17 +496,7 @@ struct ib_conn {
bool pi_support;
struct ib_send_wr beacon;
struct completion flush_comp;
- spinlock_t lock;
- union {
- struct {
- struct ib_fmr_pool *pool;
- struct iser_page_vec *page_vec;
- } fmr;
- struct {
- struct list_head pool;
- int pool_size;
- } fastreg;
- };
+ struct iser_fr_pool fr_pool;
};
/**
@@ -477,6 +527,8 @@ struct ib_conn {
* @rx_desc_head: head of rx_descs cyclic buffer
* @rx_descs: rx buffers array (cyclic buffer)
* @num_rx_descs: number of rx descriptors
+ * @scsi_sg_tablesize: scsi host sg_tablesize
+ * @scsi_max_sectors: scsi host max sectors
*/
struct iser_conn {
struct ib_conn ib_conn;
@@ -501,6 +553,8 @@ struct iser_conn {
unsigned int rx_desc_head;
struct iser_rx_desc *rx_descs;
u32 num_rx_descs;
+ unsigned short scsi_sg_tablesize;
+ unsigned int scsi_max_sectors;
};
/**
@@ -556,6 +610,9 @@ extern struct iser_global ig;
extern int iser_debug_level;
extern bool iser_pi_enable;
extern int iser_pi_guard;
+extern unsigned int iser_max_sectors;
+
+int iser_assign_reg_ops(struct iser_device *device);
int iser_send_control(struct iscsi_conn *conn,
struct iscsi_task *task);
@@ -597,10 +654,10 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct iser_data_buf *mem,
enum iser_data_dir cmd_dir);
-int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task,
- enum iser_data_dir cmd_dir);
-int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *task,
- enum iser_data_dir cmd_dir);
+int iser_reg_rdma_mem(struct iscsi_iser_task *task,
+ enum iser_data_dir dir);
+void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
+ enum iser_data_dir dir);
int iser_connect(struct iser_conn *iser_conn,
struct sockaddr *src_addr,
@@ -630,15 +687,40 @@ int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc);
int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
struct iscsi_session *session);
-int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max);
+int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
+ unsigned cmds_max,
+ unsigned int size);
void iser_free_fmr_pool(struct ib_conn *ib_conn);
-int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max);
+int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
+ unsigned cmds_max,
+ unsigned int size);
void iser_free_fastreg_pool(struct ib_conn *ib_conn);
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, sector_t *sector);
-struct fast_reg_descriptor *
-iser_reg_desc_get(struct ib_conn *ib_conn);
+struct iser_fr_desc *
+iser_reg_desc_get_fr(struct ib_conn *ib_conn);
void
-iser_reg_desc_put(struct ib_conn *ib_conn,
- struct fast_reg_descriptor *desc);
+iser_reg_desc_put_fr(struct ib_conn *ib_conn,
+ struct iser_fr_desc *desc);
+struct iser_fr_desc *
+iser_reg_desc_get_fmr(struct ib_conn *ib_conn);
+void
+iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
+ struct iser_fr_desc *desc);
+
+static inline struct ib_send_wr *
+iser_tx_next_wr(struct iser_tx_desc *tx_desc)
+{
+ struct ib_send_wr *cur_wr = &tx_desc->wrs[tx_desc->wr_idx];
+ struct ib_send_wr *last_wr;
+
+ if (tx_desc->wr_idx) {
+ last_wr = &tx_desc->wrs[tx_desc->wr_idx - 1];
+ last_wr->next = cur_wr;
+ }
+ tx_desc->wr_idx++;
+
+ return cur_wr;
+}
+
#endif
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 3e2118e8ed87..d511879d8cdf 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -49,7 +49,6 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_device *device = iser_task->iser_conn->ib_conn.device;
struct iser_mem_reg *mem_reg;
int err;
struct iser_hdr *hdr = &iser_task->desc.iser_header;
@@ -73,7 +72,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
return err;
}
- err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN);
+ err = iser_reg_rdma_mem(iser_task, ISER_DIR_IN);
if (err) {
iser_err("Failed to set up Data-IN RDMA\n");
return err;
@@ -103,7 +102,6 @@ iser_prepare_write_cmd(struct iscsi_task *task,
unsigned int edtl)
{
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_device *device = iser_task->iser_conn->ib_conn.device;
struct iser_mem_reg *mem_reg;
int err;
struct iser_hdr *hdr = &iser_task->desc.iser_header;
@@ -128,7 +126,7 @@ iser_prepare_write_cmd(struct iscsi_task *task,
return err;
}
- err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT);
+ err = iser_reg_rdma_mem(iser_task, ISER_DIR_OUT);
if (err != 0) {
iser_err("Failed to register write cmd RDMA mem\n");
return err;
@@ -170,13 +168,7 @@ static void iser_create_send_desc(struct iser_conn *iser_conn,
memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
tx_desc->iser_header.flags = ISER_VER;
-
tx_desc->num_sge = 1;
-
- if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
- tx_desc->tx_sg[0].lkey = device->mr->lkey;
- iser_dbg("sdesc %p lkey mismatch, fixing\n", tx_desc);
- }
}
static void iser_free_login_buf(struct iser_conn *iser_conn)
@@ -266,7 +258,8 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
- if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max))
+ if (device->reg_ops->alloc_reg_res(ib_conn, session->scsi_cmds_max,
+ iser_conn->scsi_sg_tablesize))
goto create_rdma_reg_res_failed;
if (iser_alloc_login_buf(iser_conn))
@@ -291,7 +284,7 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
rx_sg = &rx_desc->rx_sg;
rx_sg->addr = rx_desc->dma_addr;
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
- rx_sg->lkey = device->mr->lkey;
+ rx_sg->lkey = device->pd->local_dma_lkey;
}
iser_conn->rx_desc_head = 0;
@@ -307,7 +300,7 @@ rx_desc_dma_map_failed:
rx_desc_alloc_fail:
iser_free_login_buf(iser_conn);
alloc_login_buf_fail:
- device->iser_free_rdma_reg_res(ib_conn);
+ device->reg_ops->free_reg_res(ib_conn);
create_rdma_reg_res_failed:
iser_err("failed allocating rx descriptors / data buffers\n");
return -ENOMEM;
@@ -320,8 +313,8 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
- if (device->iser_free_rdma_reg_res)
- device->iser_free_rdma_reg_res(ib_conn);
+ if (device->reg_ops->free_reg_res)
+ device->reg_ops->free_reg_res(ib_conn);
rx_desc = iser_conn->rx_descs;
for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
@@ -454,7 +447,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
unsigned long buf_offset;
unsigned long data_seg_len;
uint32_t itt;
- int err = 0;
+ int err;
struct ib_sge *tx_dsg;
itt = (__force uint32_t)hdr->itt;
@@ -475,7 +468,9 @@ int iser_send_data_out(struct iscsi_conn *conn,
memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
/* build the tx desc */
- iser_initialize_task_headers(task, tx_desc);
+ err = iser_initialize_task_headers(task, tx_desc);
+ if (err)
+ goto send_data_out_error;
mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
tx_dsg = &tx_desc->tx_sg[1];
@@ -502,7 +497,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
send_data_out_error:
kmem_cache_free(ig.desc_cache, tx_desc);
- iser_err("conn %p failed err %d\n",conn, err);
+ iser_err("conn %p failed err %d\n", conn, err);
return err;
}
@@ -543,7 +538,7 @@ int iser_send_control(struct iscsi_conn *conn,
tx_dsg->addr = iser_conn->login_req_dma;
tx_dsg->length = task->data_count;
- tx_dsg->lkey = device->mr->lkey;
+ tx_dsg->lkey = device->pd->local_dma_lkey;
mdesc->num_sge = 2;
}
@@ -666,7 +661,6 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
{
- struct iser_device *device = iser_task->iser_conn->ib_conn.device;
int is_rdma_data_aligned = 1;
int is_rdma_prot_aligned = 1;
int prot_count = scsi_prot_sg_count(iser_task->sc);
@@ -703,7 +697,7 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
}
if (iser_task->dir[ISER_DIR_IN]) {
- device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
+ iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
if (is_rdma_data_aligned)
iser_dma_unmap_task_data(iser_task,
&iser_task->data[ISER_DIR_IN],
@@ -715,7 +709,7 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
}
if (iser_task->dir[ISER_DIR_OUT]) {
- device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
+ iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
if (is_rdma_data_aligned)
iser_dma_unmap_task_data(iser_task,
&iser_task->data[ISER_DIR_OUT],
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index f0cdc961eb11..2493cc748db8 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -38,6 +38,55 @@
#include <linux/scatterlist.h>
#include "iscsi_iser.h"
+static
+int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *mem,
+ struct iser_reg_resources *rsc,
+ struct iser_mem_reg *mem_reg);
+static
+int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *mem,
+ struct iser_reg_resources *rsc,
+ struct iser_mem_reg *mem_reg);
+
+static struct iser_reg_ops fastreg_ops = {
+ .alloc_reg_res = iser_alloc_fastreg_pool,
+ .free_reg_res = iser_free_fastreg_pool,
+ .reg_mem = iser_fast_reg_mr,
+ .unreg_mem = iser_unreg_mem_fastreg,
+ .reg_desc_get = iser_reg_desc_get_fr,
+ .reg_desc_put = iser_reg_desc_put_fr,
+};
+
+static struct iser_reg_ops fmr_ops = {
+ .alloc_reg_res = iser_alloc_fmr_pool,
+ .free_reg_res = iser_free_fmr_pool,
+ .reg_mem = iser_fast_reg_fmr,
+ .unreg_mem = iser_unreg_mem_fmr,
+ .reg_desc_get = iser_reg_desc_get_fmr,
+ .reg_desc_put = iser_reg_desc_put_fmr,
+};
+
+int iser_assign_reg_ops(struct iser_device *device)
+{
+ struct ib_device_attr *dev_attr = &device->dev_attr;
+
+ /* Assign function handles - based on FMR support */
+ if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr &&
+ device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) {
+ iser_info("FMR supported, using FMR for registration\n");
+ device->reg_ops = &fmr_ops;
+ } else
+ if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
+ iser_info("FastReg supported, using FastReg for registration\n");
+ device->reg_ops = &fastreg_ops;
+ } else {
+ iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
+ return -1;
+ }
+
+ return 0;
+}
static void
iser_free_bounce_sg(struct iser_data_buf *data)
@@ -146,30 +195,47 @@ iser_copy_to_bounce(struct iser_data_buf *data)
iser_copy_bounce(data, true);
}
-struct fast_reg_descriptor *
-iser_reg_desc_get(struct ib_conn *ib_conn)
+struct iser_fr_desc *
+iser_reg_desc_get_fr(struct ib_conn *ib_conn)
{
- struct fast_reg_descriptor *desc;
+ struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
+ struct iser_fr_desc *desc;
unsigned long flags;
- spin_lock_irqsave(&ib_conn->lock, flags);
- desc = list_first_entry(&ib_conn->fastreg.pool,
- struct fast_reg_descriptor, list);
+ spin_lock_irqsave(&fr_pool->lock, flags);
+ desc = list_first_entry(&fr_pool->list,
+ struct iser_fr_desc, list);
list_del(&desc->list);
- spin_unlock_irqrestore(&ib_conn->lock, flags);
+ spin_unlock_irqrestore(&fr_pool->lock, flags);
return desc;
}
void
-iser_reg_desc_put(struct ib_conn *ib_conn,
- struct fast_reg_descriptor *desc)
+iser_reg_desc_put_fr(struct ib_conn *ib_conn,
+ struct iser_fr_desc *desc)
{
+ struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
unsigned long flags;
- spin_lock_irqsave(&ib_conn->lock, flags);
- list_add(&desc->list, &ib_conn->fastreg.pool);
- spin_unlock_irqrestore(&ib_conn->lock, flags);
+ spin_lock_irqsave(&fr_pool->lock, flags);
+ list_add(&desc->list, &fr_pool->list);
+ spin_unlock_irqrestore(&fr_pool->lock, flags);
+}
+
+struct iser_fr_desc *
+iser_reg_desc_get_fmr(struct ib_conn *ib_conn)
+{
+ struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
+
+ return list_first_entry(&fr_pool->list,
+ struct iser_fr_desc, list);
+}
+
+void
+iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
+ struct iser_fr_desc *desc)
+{
}
/**
@@ -297,7 +363,8 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
* consecutive SG elements are actually fragments of the same physcial page.
*/
static int iser_data_buf_aligned_len(struct iser_data_buf *data,
- struct ib_device *ibdev)
+ struct ib_device *ibdev,
+ unsigned sg_tablesize)
{
struct scatterlist *sg, *sgl, *next_sg = NULL;
u64 start_addr, end_addr;
@@ -309,6 +376,14 @@ static int iser_data_buf_aligned_len(struct iser_data_buf *data,
sgl = data->sg;
start_addr = ib_sg_dma_address(ibdev, sgl);
+ if (unlikely(sgl[0].offset &&
+ data->data_len >= sg_tablesize * PAGE_SIZE)) {
+ iser_dbg("can't register length %lx with offset %x "
+ "fall to bounce buffer\n", data->data_len,
+ sgl[0].offset);
+ return 0;
+ }
+
for_each_sg(sgl, sg, data->dma_nents, i) {
if (start_check && !IS_4K_ALIGNED(start_addr))
break;
@@ -330,8 +405,11 @@ static int iser_data_buf_aligned_len(struct iser_data_buf *data,
break;
}
ret_len = (next_sg) ? i : i+1;
- iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
- ret_len, data->dma_nents, data);
+
+ if (unlikely(ret_len != data->dma_nents))
+ iser_warn("rdma alignment violation (%d/%d aligned)\n",
+ ret_len, data->dma_nents);
+
return ret_len;
}
@@ -393,7 +471,7 @@ iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
{
struct scatterlist *sg = mem->sg;
- reg->sge.lkey = device->mr->lkey;
+ reg->sge.lkey = device->pd->local_dma_lkey;
reg->rkey = device->mr->rkey;
reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
@@ -407,15 +485,12 @@ iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
struct iser_data_buf *mem,
- enum iser_data_dir cmd_dir,
- int aligned_len)
+ enum iser_data_dir cmd_dir)
{
struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
struct iser_device *device = iser_task->iser_conn->ib_conn.device;
iscsi_conn->fmr_unalign_cnt++;
- iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
- aligned_len, mem->size);
if (iser_debug_level > 0)
iser_data_buf_dump(mem, device->ib_device);
@@ -439,13 +514,15 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
* returns: 0 on success, errno code on failure
*/
static
-int iser_reg_page_vec(struct iscsi_iser_task *iser_task,
+int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
struct iser_data_buf *mem,
- struct iser_page_vec *page_vec,
- struct iser_mem_reg *mem_reg)
+ struct iser_reg_resources *rsc,
+ struct iser_mem_reg *reg)
{
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
+ struct iser_page_vec *page_vec = rsc->page_vec;
+ struct ib_fmr_pool *fmr_pool = rsc->fmr_pool;
struct ib_pool_fmr *fmr;
int ret, plen;
@@ -461,7 +538,7 @@ int iser_reg_page_vec(struct iscsi_iser_task *iser_task,
return -EINVAL;
}
- fmr = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
+ fmr = ib_fmr_pool_map_phys(fmr_pool,
page_vec->pages,
page_vec->length,
page_vec->pages[0]);
@@ -471,11 +548,15 @@ int iser_reg_page_vec(struct iscsi_iser_task *iser_task,
return ret;
}
- mem_reg->sge.lkey = fmr->fmr->lkey;
- mem_reg->rkey = fmr->fmr->rkey;
- mem_reg->sge.addr = page_vec->pages[0] + page_vec->offset;
- mem_reg->sge.length = page_vec->data_size;
- mem_reg->mem_h = fmr;
+ reg->sge.lkey = fmr->fmr->lkey;
+ reg->rkey = fmr->fmr->rkey;
+ reg->sge.addr = page_vec->pages[0] + page_vec->offset;
+ reg->sge.length = page_vec->data_size;
+ reg->mem_h = fmr;
+
+ iser_dbg("fmr reg: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
+ " length=0x%x\n", reg->sge.lkey, reg->rkey,
+ reg->sge.addr, reg->sge.length);
return 0;
}
@@ -505,71 +586,17 @@ void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir)
{
+ struct iser_device *device = iser_task->iser_conn->ib_conn.device;
struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
if (!reg->mem_h)
return;
- iser_reg_desc_put(&iser_task->iser_conn->ib_conn,
- reg->mem_h);
+ device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn,
+ reg->mem_h);
reg->mem_h = NULL;
}
-/**
- * iser_reg_rdma_mem_fmr - Registers memory intended for RDMA,
- * using FMR (if possible) obtaining rkey and va
- *
- * returns 0 on success, errno code on failure
- */
-int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir)
-{
- struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
- struct iser_device *device = ib_conn->device;
- struct ib_device *ibdev = device->ib_device;
- struct iser_data_buf *mem = &iser_task->data[cmd_dir];
- struct iser_mem_reg *mem_reg;
- int aligned_len;
- int err;
- int i;
-
- mem_reg = &iser_task->rdma_reg[cmd_dir];
-
- aligned_len = iser_data_buf_aligned_len(mem, ibdev);
- if (aligned_len != mem->dma_nents) {
- err = fall_to_bounce_buf(iser_task, mem,
- cmd_dir, aligned_len);
- if (err) {
- iser_err("failed to allocate bounce buffer\n");
- return err;
- }
- }
-
- /* if there a single dma entry, FMR is not needed */
- if (mem->dma_nents == 1) {
- return iser_reg_dma(device, mem, mem_reg);
- } else { /* use FMR for multiple dma entries */
- err = iser_reg_page_vec(iser_task, mem, ib_conn->fmr.page_vec,
- mem_reg);
- if (err && err != -EAGAIN) {
- iser_data_buf_dump(mem, ibdev);
- iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
- mem->dma_nents,
- ntoh24(iser_task->desc.iscsi_header.dlength));
- iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
- ib_conn->fmr.page_vec->data_size,
- ib_conn->fmr.page_vec->length,
- ib_conn->fmr.page_vec->offset);
- for (i = 0; i < ib_conn->fmr.page_vec->length; i++)
- iser_err("page_vec[%d] = 0x%llx\n", i,
- (unsigned long long)ib_conn->fmr.page_vec->pages[i]);
- }
- if (err)
- return err;
- }
- return 0;
-}
-
static void
iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
struct ib_sig_domain *domain)
@@ -637,10 +664,11 @@ iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
{
u32 rkey;
- memset(inv_wr, 0, sizeof(*inv_wr));
inv_wr->opcode = IB_WR_LOCAL_INV;
inv_wr->wr_id = ISER_FASTREG_LI_WRID;
inv_wr->ex.invalidate_rkey = mr->rkey;
+ inv_wr->send_flags = 0;
+ inv_wr->num_sge = 0;
rkey = ib_inc_rkey(mr->rkey);
ib_update_fast_reg_key(mr, rkey);
@@ -648,61 +676,51 @@ iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
static int
iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
- struct fast_reg_descriptor *desc,
+ struct iser_pi_context *pi_ctx,
struct iser_mem_reg *data_reg,
struct iser_mem_reg *prot_reg,
struct iser_mem_reg *sig_reg)
{
- struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
- struct iser_pi_context *pi_ctx = desc->pi_ctx;
- struct ib_send_wr sig_wr, inv_wr;
- struct ib_send_wr *bad_wr, *wr = NULL;
- struct ib_sig_attrs sig_attrs;
+ struct iser_tx_desc *tx_desc = &iser_task->desc;
+ struct ib_sig_attrs *sig_attrs = &tx_desc->sig_attrs;
+ struct ib_send_wr *wr;
int ret;
- memset(&sig_attrs, 0, sizeof(sig_attrs));
- ret = iser_set_sig_attrs(iser_task->sc, &sig_attrs);
+ memset(sig_attrs, 0, sizeof(*sig_attrs));
+ ret = iser_set_sig_attrs(iser_task->sc, sig_attrs);
if (ret)
goto err;
- iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask);
+ iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);
- if (!(desc->reg_indicators & ISER_SIG_KEY_VALID)) {
- iser_inv_rkey(&inv_wr, pi_ctx->sig_mr);
- wr = &inv_wr;
+ if (!pi_ctx->sig_mr_valid) {
+ wr = iser_tx_next_wr(tx_desc);
+ iser_inv_rkey(wr, pi_ctx->sig_mr);
}
- memset(&sig_wr, 0, sizeof(sig_wr));
- sig_wr.opcode = IB_WR_REG_SIG_MR;
- sig_wr.wr_id = ISER_FASTREG_LI_WRID;
- sig_wr.sg_list = &data_reg->sge;
- sig_wr.num_sge = 1;
- sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
- sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
+ wr = iser_tx_next_wr(tx_desc);
+ wr->opcode = IB_WR_REG_SIG_MR;
+ wr->wr_id = ISER_FASTREG_LI_WRID;
+ wr->sg_list = &data_reg->sge;
+ wr->num_sge = 1;
+ wr->send_flags = 0;
+ wr->wr.sig_handover.sig_attrs = sig_attrs;
+ wr->wr.sig_handover.sig_mr = pi_ctx->sig_mr;
if (scsi_prot_sg_count(iser_task->sc))
- sig_wr.wr.sig_handover.prot = &prot_reg->sge;
- sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE;
-
- if (!wr)
- wr = &sig_wr;
+ wr->wr.sig_handover.prot = &prot_reg->sge;
else
- wr->next = &sig_wr;
-
- ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
- if (ret) {
- iser_err("reg_sig_mr failed, ret:%d\n", ret);
- goto err;
- }
- desc->reg_indicators &= ~ISER_SIG_KEY_VALID;
+ wr->wr.sig_handover.prot = NULL;
+ wr->wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE;
+ pi_ctx->sig_mr_valid = 0;
sig_reg->sge.lkey = pi_ctx->sig_mr->lkey;
sig_reg->rkey = pi_ctx->sig_mr->rkey;
sig_reg->sge.addr = 0;
sig_reg->sge.length = scsi_transfer_length(iser_task->sc);
- iser_dbg("sig_sge: lkey: 0x%x, rkey: 0x%x, addr: 0x%llx, length: %u\n",
+ iser_dbg("sig reg: lkey: 0x%x, rkey: 0x%x, addr: 0x%llx, length: %u\n",
sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
sig_reg->sge.length);
err:
@@ -711,29 +729,16 @@ err:
static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
struct iser_data_buf *mem,
- struct fast_reg_descriptor *desc,
- enum iser_reg_indicator ind,
+ struct iser_reg_resources *rsc,
struct iser_mem_reg *reg)
{
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
- struct ib_mr *mr;
- struct ib_fast_reg_page_list *frpl;
- struct ib_send_wr fastreg_wr, inv_wr;
- struct ib_send_wr *bad_wr, *wr = NULL;
- int ret, offset, size, plen;
-
- /* if there a single dma entry, dma mr suffices */
- if (mem->dma_nents == 1)
- return iser_reg_dma(device, mem, reg);
-
- if (ind == ISER_DATA_KEY_VALID) {
- mr = desc->data_mr;
- frpl = desc->data_frpl;
- } else {
- mr = desc->pi_ctx->prot_mr;
- frpl = desc->pi_ctx->prot_frpl;
- }
+ struct ib_mr *mr = rsc->mr;
+ struct ib_fast_reg_page_list *frpl = rsc->frpl;
+ struct iser_tx_desc *tx_desc = &iser_task->desc;
+ struct ib_send_wr *wr;
+ int offset, size, plen;
plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list,
&offset, &size);
@@ -742,118 +747,151 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
return -EINVAL;
}
- if (!(desc->reg_indicators & ind)) {
- iser_inv_rkey(&inv_wr, mr);
- wr = &inv_wr;
+ if (!rsc->mr_valid) {
+ wr = iser_tx_next_wr(tx_desc);
+ iser_inv_rkey(wr, mr);
}
- /* Prepare FASTREG WR */
- memset(&fastreg_wr, 0, sizeof(fastreg_wr));
- fastreg_wr.wr_id = ISER_FASTREG_LI_WRID;
- fastreg_wr.opcode = IB_WR_FAST_REG_MR;
- fastreg_wr.wr.fast_reg.iova_start = frpl->page_list[0] + offset;
- fastreg_wr.wr.fast_reg.page_list = frpl;
- fastreg_wr.wr.fast_reg.page_list_len = plen;
- fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K;
- fastreg_wr.wr.fast_reg.length = size;
- fastreg_wr.wr.fast_reg.rkey = mr->rkey;
- fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ);
-
- if (!wr)
- wr = &fastreg_wr;
- else
- wr->next = &fastreg_wr;
-
- ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
- if (ret) {
- iser_err("fast registration failed, ret:%d\n", ret);
- return ret;
- }
- desc->reg_indicators &= ~ind;
+ wr = iser_tx_next_wr(tx_desc);
+ wr->opcode = IB_WR_FAST_REG_MR;
+ wr->wr_id = ISER_FASTREG_LI_WRID;
+ wr->send_flags = 0;
+ wr->wr.fast_reg.iova_start = frpl->page_list[0] + offset;
+ wr->wr.fast_reg.page_list = frpl;
+ wr->wr.fast_reg.page_list_len = plen;
+ wr->wr.fast_reg.page_shift = SHIFT_4K;
+ wr->wr.fast_reg.length = size;
+ wr->wr.fast_reg.rkey = mr->rkey;
+ wr->wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ);
+ rsc->mr_valid = 0;
reg->sge.lkey = mr->lkey;
reg->rkey = mr->rkey;
reg->sge.addr = frpl->page_list[0] + offset;
reg->sge.length = size;
- return ret;
+ iser_dbg("fast reg: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
+ " length=0x%x\n", reg->sge.lkey, reg->rkey,
+ reg->sge.addr, reg->sge.length);
+
+ return 0;
}
-/**
- * iser_reg_rdma_mem_fastreg - Registers memory intended for RDMA,
- * using Fast Registration WR (if possible) obtaining rkey and va
- *
- * returns 0 on success, errno code on failure
- */
-int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir)
+static int
+iser_handle_unaligned_buf(struct iscsi_iser_task *task,
+ struct iser_data_buf *mem,
+ enum iser_data_dir dir)
{
- struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
- struct iser_device *device = ib_conn->device;
- struct ib_device *ibdev = device->ib_device;
- struct iser_data_buf *mem = &iser_task->data[cmd_dir];
- struct iser_mem_reg *mem_reg = &iser_task->rdma_reg[cmd_dir];
- struct fast_reg_descriptor *desc = NULL;
+ struct iser_conn *iser_conn = task->iser_conn;
+ struct iser_device *device = iser_conn->ib_conn.device;
int err, aligned_len;
- aligned_len = iser_data_buf_aligned_len(mem, ibdev);
+ aligned_len = iser_data_buf_aligned_len(mem, device->ib_device,
+ iser_conn->scsi_sg_tablesize);
if (aligned_len != mem->dma_nents) {
- err = fall_to_bounce_buf(iser_task, mem,
- cmd_dir, aligned_len);
- if (err) {
- iser_err("failed to allocate bounce buffer\n");
+ err = fall_to_bounce_buf(task, mem, dir);
+ if (err)
return err;
- }
}
+ return 0;
+}
+
+static int
+iser_reg_prot_sg(struct iscsi_iser_task *task,
+ struct iser_data_buf *mem,
+ struct iser_fr_desc *desc,
+ struct iser_mem_reg *reg)
+{
+ struct iser_device *device = task->iser_conn->ib_conn.device;
+
+ if (mem->dma_nents == 1)
+ return iser_reg_dma(device, mem, reg);
+
+ return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg);
+}
+
+static int
+iser_reg_data_sg(struct iscsi_iser_task *task,
+ struct iser_data_buf *mem,
+ struct iser_fr_desc *desc,
+ struct iser_mem_reg *reg)
+{
+ struct iser_device *device = task->iser_conn->ib_conn.device;
+
+ if (mem->dma_nents == 1)
+ return iser_reg_dma(device, mem, reg);
+
+ return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg);
+}
+
+int iser_reg_rdma_mem(struct iscsi_iser_task *task,
+ enum iser_data_dir dir)
+{
+ struct ib_conn *ib_conn = &task->iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
+ struct iser_data_buf *mem = &task->data[dir];
+ struct iser_mem_reg *reg = &task->rdma_reg[dir];
+ struct iser_mem_reg *data_reg;
+ struct iser_fr_desc *desc = NULL;
+ int err;
+
+ err = iser_handle_unaligned_buf(task, mem, dir);
+ if (unlikely(err))
+ return err;
+
if (mem->dma_nents != 1 ||
- scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
- desc = iser_reg_desc_get(ib_conn);
- mem_reg->mem_h = desc;
+ scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) {
+ desc = device->reg_ops->reg_desc_get(ib_conn);
+ reg->mem_h = desc;
}
- err = iser_fast_reg_mr(iser_task, mem, desc,
- ISER_DATA_KEY_VALID, mem_reg);
- if (err)
+ if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL)
+ data_reg = reg;
+ else
+ data_reg = &task->desc.data_reg;
+
+ err = iser_reg_data_sg(task, mem, desc, data_reg);
+ if (unlikely(err))
goto err_reg;
- if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
- struct iser_mem_reg prot_reg;
-
- memset(&prot_reg, 0, sizeof(prot_reg));
- if (scsi_prot_sg_count(iser_task->sc)) {
- mem = &iser_task->prot[cmd_dir];
- aligned_len = iser_data_buf_aligned_len(mem, ibdev);
- if (aligned_len != mem->dma_nents) {
- err = fall_to_bounce_buf(iser_task, mem,
- cmd_dir, aligned_len);
- if (err) {
- iser_err("failed to allocate bounce buffer\n");
- return err;
- }
- }
+ if (scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) {
+ struct iser_mem_reg *prot_reg = &task->desc.prot_reg;
- err = iser_fast_reg_mr(iser_task, mem, desc,
- ISER_PROT_KEY_VALID, &prot_reg);
- if (err)
+ if (scsi_prot_sg_count(task->sc)) {
+ mem = &task->prot[dir];
+ err = iser_handle_unaligned_buf(task, mem, dir);
+ if (unlikely(err))
goto err_reg;
- }
- err = iser_reg_sig_mr(iser_task, desc, mem_reg,
- &prot_reg, mem_reg);
- if (err) {
- iser_err("Failed to register signature mr\n");
- return err;
+ err = iser_reg_prot_sg(task, mem, desc, prot_reg);
+ if (unlikely(err))
+ goto err_reg;
}
- desc->reg_indicators |= ISER_FASTREG_PROTECTED;
+
+ err = iser_reg_sig_mr(task, desc->pi_ctx, data_reg,
+ prot_reg, reg);
+ if (unlikely(err))
+ goto err_reg;
+
+ desc->pi_ctx->sig_protected = 1;
}
return 0;
+
err_reg:
if (desc)
- iser_reg_desc_put(ib_conn, desc);
+ device->reg_ops->reg_desc_put(ib_conn, desc);
return err;
}
+
+void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
+ enum iser_data_dir dir)
+{
+ struct iser_device *device = task->iser_conn->ib_conn.device;
+
+ device->reg_ops->unreg_mem(task, dir);
+}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 5c9f565ea0e8..ae70cc1463ac 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -87,25 +87,9 @@ static int iser_create_device_ib_res(struct iser_device *device)
return ret;
}
- /* Assign function handles - based on FMR support */
- if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr &&
- device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) {
- iser_info("FMR supported, using FMR for registration\n");
- device->iser_alloc_rdma_reg_res = iser_create_fmr_pool;
- device->iser_free_rdma_reg_res = iser_free_fmr_pool;
- device->iser_reg_rdma_mem = iser_reg_rdma_mem_fmr;
- device->iser_unreg_rdma_mem = iser_unreg_mem_fmr;
- } else
- if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
- iser_info("FastReg supported, using FastReg for registration\n");
- device->iser_alloc_rdma_reg_res = iser_create_fastreg_pool;
- device->iser_free_rdma_reg_res = iser_free_fastreg_pool;
- device->iser_reg_rdma_mem = iser_reg_rdma_mem_fastreg;
- device->iser_unreg_rdma_mem = iser_unreg_mem_fastreg;
- } else {
- iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
- return -1;
- }
+ ret = iser_assign_reg_ops(device);
+ if (ret)
+ return ret;
device->comps_used = min_t(int, num_online_cpus(),
device->ib_device->num_comp_vectors);
@@ -201,7 +185,7 @@ static void iser_free_device_ib_res(struct iser_device *device)
(void)ib_unregister_event_handler(&device->event_handler);
(void)ib_dereg_mr(device->mr);
- (void)ib_dealloc_pd(device->pd);
+ ib_dealloc_pd(device->pd);
kfree(device->comps);
device->comps = NULL;
@@ -211,28 +195,40 @@ static void iser_free_device_ib_res(struct iser_device *device)
}
/**
- * iser_create_fmr_pool - Creates FMR pool and page_vector
+ * iser_alloc_fmr_pool - Creates FMR pool and page_vector
*
* returns 0 on success, or errno code on failure
*/
-int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max)
+int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
+ unsigned cmds_max,
+ unsigned int size)
{
struct iser_device *device = ib_conn->device;
+ struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
+ struct iser_page_vec *page_vec;
+ struct iser_fr_desc *desc;
+ struct ib_fmr_pool *fmr_pool;
struct ib_fmr_pool_param params;
- int ret = -ENOMEM;
+ int ret;
- ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) +
- (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)),
- GFP_KERNEL);
- if (!ib_conn->fmr.page_vec)
- return ret;
+ INIT_LIST_HEAD(&fr_pool->list);
+ spin_lock_init(&fr_pool->lock);
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
- ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1);
+ page_vec = kmalloc(sizeof(*page_vec) + (sizeof(u64) * size),
+ GFP_KERNEL);
+ if (!page_vec) {
+ ret = -ENOMEM;
+ goto err_frpl;
+ }
+
+ page_vec->pages = (u64 *)(page_vec + 1);
params.page_shift = SHIFT_4K;
- /* when the first/last SG element are not start/end *
- * page aligned, the map whould be of N+1 pages */
- params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
+ params.max_pages_per_fmr = size;
/* make the pool size twice the max number of SCSI commands *
* the ML is expected to queue, watermark for unmap at 50% */
params.pool_size = cmds_max * 2;
@@ -243,23 +239,25 @@ int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max)
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ);
- ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, &params);
- if (!IS_ERR(ib_conn->fmr.pool))
- return 0;
-
- /* no FMR => no need for page_vec */
- kfree(ib_conn->fmr.page_vec);
- ib_conn->fmr.page_vec = NULL;
-
- ret = PTR_ERR(ib_conn->fmr.pool);
- ib_conn->fmr.pool = NULL;
- if (ret != -ENOSYS) {
+ fmr_pool = ib_create_fmr_pool(device->pd, &params);
+ if (IS_ERR(fmr_pool)) {
+ ret = PTR_ERR(fmr_pool);
iser_err("FMR allocation failed, err %d\n", ret);
- return ret;
- } else {
- iser_warn("FMRs are not supported, using unaligned mode\n");
- return 0;
+ goto err_fmr;
}
+
+ desc->rsc.page_vec = page_vec;
+ desc->rsc.fmr_pool = fmr_pool;
+ list_add(&desc->list, &fr_pool->list);
+
+ return 0;
+
+err_fmr:
+ kfree(page_vec);
+err_frpl:
+ kfree(desc);
+
+ return ret;
}
/**
@@ -267,26 +265,68 @@ int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max)
*/
void iser_free_fmr_pool(struct ib_conn *ib_conn)
{
+ struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
+ struct iser_fr_desc *desc;
+
+ desc = list_first_entry(&fr_pool->list,
+ struct iser_fr_desc, list);
+ list_del(&desc->list);
+
iser_info("freeing conn %p fmr pool %p\n",
- ib_conn, ib_conn->fmr.pool);
+ ib_conn, desc->rsc.fmr_pool);
+
+ ib_destroy_fmr_pool(desc->rsc.fmr_pool);
+ kfree(desc->rsc.page_vec);
+ kfree(desc);
+}
+
+static int
+iser_alloc_reg_res(struct ib_device *ib_device,
+ struct ib_pd *pd,
+ struct iser_reg_resources *res,
+ unsigned int size)
+{
+ int ret;
+
+ res->frpl = ib_alloc_fast_reg_page_list(ib_device, size);
+ if (IS_ERR(res->frpl)) {
+ ret = PTR_ERR(res->frpl);
+ iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n",
+ ret);
+ return PTR_ERR(res->frpl);
+ }
+
+ res->mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, size);
+ if (IS_ERR(res->mr)) {
+ ret = PTR_ERR(res->mr);
+ iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
+ goto fast_reg_mr_failure;
+ }
+ res->mr_valid = 1;
- if (ib_conn->fmr.pool != NULL)
- ib_destroy_fmr_pool(ib_conn->fmr.pool);
+ return 0;
+
+fast_reg_mr_failure:
+ ib_free_fast_reg_page_list(res->frpl);
- ib_conn->fmr.pool = NULL;
+ return ret;
+}
- kfree(ib_conn->fmr.page_vec);
- ib_conn->fmr.page_vec = NULL;
+static void
+iser_free_reg_res(struct iser_reg_resources *rsc)
+{
+ ib_dereg_mr(rsc->mr);
+ ib_free_fast_reg_page_list(rsc->frpl);
}
static int
-iser_alloc_pi_ctx(struct ib_device *ib_device, struct ib_pd *pd,
- struct fast_reg_descriptor *desc)
+iser_alloc_pi_ctx(struct ib_device *ib_device,
+ struct ib_pd *pd,
+ struct iser_fr_desc *desc,
+ unsigned int size)
{
struct iser_pi_context *pi_ctx = NULL;
- struct ib_mr_init_attr mr_init_attr = {.max_reg_descriptors = 2,
- .flags = IB_MR_SIGNATURE_EN};
- int ret = 0;
+ int ret;
desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
if (!desc->pi_ctx)
@@ -294,36 +334,25 @@ iser_alloc_pi_ctx(struct ib_device *ib_device, struct ib_pd *pd,
pi_ctx = desc->pi_ctx;
- pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
- ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(pi_ctx->prot_frpl)) {
- ret = PTR_ERR(pi_ctx->prot_frpl);
- goto prot_frpl_failure;
- }
-
- pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd,
- ISCSI_ISER_SG_TABLESIZE + 1);
- if (IS_ERR(pi_ctx->prot_mr)) {
- ret = PTR_ERR(pi_ctx->prot_mr);
- goto prot_mr_failure;
+ ret = iser_alloc_reg_res(ib_device, pd, &pi_ctx->rsc, size);
+ if (ret) {
+ iser_err("failed to allocate reg_resources\n");
+ goto alloc_reg_res_err;
}
- desc->reg_indicators |= ISER_PROT_KEY_VALID;
- pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
+ pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
if (IS_ERR(pi_ctx->sig_mr)) {
ret = PTR_ERR(pi_ctx->sig_mr);
goto sig_mr_failure;
}
- desc->reg_indicators |= ISER_SIG_KEY_VALID;
- desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
+ pi_ctx->sig_mr_valid = 1;
+ desc->pi_ctx->sig_protected = 0;
return 0;
sig_mr_failure:
- ib_dereg_mr(desc->pi_ctx->prot_mr);
-prot_mr_failure:
- ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
-prot_frpl_failure:
+ iser_free_reg_res(&pi_ctx->rsc);
+alloc_reg_res_err:
kfree(desc->pi_ctx);
return ret;
@@ -332,82 +361,71 @@ prot_frpl_failure:
static void
iser_free_pi_ctx(struct iser_pi_context *pi_ctx)
{
- ib_free_fast_reg_page_list(pi_ctx->prot_frpl);
- ib_dereg_mr(pi_ctx->prot_mr);
- ib_destroy_mr(pi_ctx->sig_mr);
+ iser_free_reg_res(&pi_ctx->rsc);
+ ib_dereg_mr(pi_ctx->sig_mr);
kfree(pi_ctx);
}
-static int
-iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd,
- bool pi_enable, struct fast_reg_descriptor *desc)
+static struct iser_fr_desc *
+iser_create_fastreg_desc(struct ib_device *ib_device,
+ struct ib_pd *pd,
+ bool pi_enable,
+ unsigned int size)
{
+ struct iser_fr_desc *desc;
int ret;
- desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
- ISCSI_ISER_SG_TABLESIZE + 1);
- if (IS_ERR(desc->data_frpl)) {
- ret = PTR_ERR(desc->data_frpl);
- iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n",
- ret);
- return PTR_ERR(desc->data_frpl);
- }
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
- desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE + 1);
- if (IS_ERR(desc->data_mr)) {
- ret = PTR_ERR(desc->data_mr);
- iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
- goto fast_reg_mr_failure;
- }
- desc->reg_indicators |= ISER_DATA_KEY_VALID;
+ ret = iser_alloc_reg_res(ib_device, pd, &desc->rsc, size);
+ if (ret)
+ goto reg_res_alloc_failure;
if (pi_enable) {
- ret = iser_alloc_pi_ctx(ib_device, pd, desc);
+ ret = iser_alloc_pi_ctx(ib_device, pd, desc, size);
if (ret)
goto pi_ctx_alloc_failure;
}
- return 0;
+ return desc;
+
pi_ctx_alloc_failure:
- ib_dereg_mr(desc->data_mr);
-fast_reg_mr_failure:
- ib_free_fast_reg_page_list(desc->data_frpl);
+ iser_free_reg_res(&desc->rsc);
+reg_res_alloc_failure:
+ kfree(desc);
- return ret;
+ return ERR_PTR(ret);
}
/**
- * iser_create_fastreg_pool - Creates pool of fast_reg descriptors
+ * iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors
* for fast registration work requests.
* returns 0 on success, or errno code on failure
*/
-int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max)
+int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
+ unsigned cmds_max,
+ unsigned int size)
{
struct iser_device *device = ib_conn->device;
- struct fast_reg_descriptor *desc;
+ struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
+ struct iser_fr_desc *desc;
int i, ret;
- INIT_LIST_HEAD(&ib_conn->fastreg.pool);
- ib_conn->fastreg.pool_size = 0;
+ INIT_LIST_HEAD(&fr_pool->list);
+ spin_lock_init(&fr_pool->lock);
+ fr_pool->size = 0;
for (i = 0; i < cmds_max; i++) {
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc) {
- iser_err("Failed to allocate a new fast_reg descriptor\n");
- ret = -ENOMEM;
- goto err;
- }
-
- ret = iser_create_fastreg_desc(device->ib_device, device->pd,
- ib_conn->pi_support, desc);
- if (ret) {
- iser_err("Failed to create fastreg descriptor err=%d\n",
- ret);
- kfree(desc);
+ desc = iser_create_fastreg_desc(device->ib_device, device->pd,
+ ib_conn->pi_support, size);
+ if (IS_ERR(desc)) {
+ ret = PTR_ERR(desc);
goto err;
}
- list_add_tail(&desc->list, &ib_conn->fastreg.pool);
- ib_conn->fastreg.pool_size++;
+ list_add_tail(&desc->list, &fr_pool->list);
+ fr_pool->size++;
}
return 0;
@@ -422,27 +440,27 @@ err:
*/
void iser_free_fastreg_pool(struct ib_conn *ib_conn)
{
- struct fast_reg_descriptor *desc, *tmp;
+ struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
+ struct iser_fr_desc *desc, *tmp;
int i = 0;
- if (list_empty(&ib_conn->fastreg.pool))
+ if (list_empty(&fr_pool->list))
return;
iser_info("freeing conn %p fr pool\n", ib_conn);
- list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) {
+ list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) {
list_del(&desc->list);
- ib_free_fast_reg_page_list(desc->data_frpl);
- ib_dereg_mr(desc->data_mr);
+ iser_free_reg_res(&desc->rsc);
if (desc->pi_ctx)
iser_free_pi_ctx(desc->pi_ctx);
kfree(desc);
++i;
}
- if (i < ib_conn->fastreg.pool_size)
+ if (i < fr_pool->size)
iser_warn("pool still has %d regions registered\n",
- ib_conn->fastreg.pool_size - i);
+ fr_pool->size - i);
}
/**
@@ -738,6 +756,31 @@ static void iser_connect_error(struct rdma_cm_id *cma_id)
iser_conn->state = ISER_CONN_TERMINATING;
}
+static void
+iser_calc_scsi_params(struct iser_conn *iser_conn,
+ unsigned int max_sectors)
+{
+ struct iser_device *device = iser_conn->ib_conn.device;
+ unsigned short sg_tablesize, sup_sg_tablesize;
+
+ sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
+ sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
+ device->dev_attr.max_fast_reg_page_list_len);
+
+ if (sg_tablesize > sup_sg_tablesize) {
+ sg_tablesize = sup_sg_tablesize;
+ iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512;
+ } else {
+ iser_conn->scsi_max_sectors = max_sectors;
+ }
+
+ iser_conn->scsi_sg_tablesize = sg_tablesize;
+
+ iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
+ iser_conn, iser_conn->scsi_sg_tablesize,
+ iser_conn->scsi_max_sectors);
+}
+
/**
* Called with state mutex held
**/
@@ -776,6 +819,8 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
}
}
+ iser_calc_scsi_params(iser_conn, iser_max_sectors);
+
ret = rdma_resolve_route(cma_id, 1000);
if (ret) {
iser_err("resolve route failed: %d\n", ret);
@@ -938,7 +983,6 @@ void iser_conn_init(struct iser_conn *iser_conn)
init_completion(&iser_conn->ib_completion);
init_completion(&iser_conn->up_completion);
INIT_LIST_HEAD(&iser_conn->conn_list);
- spin_lock_init(&iser_conn->ib_conn.lock);
mutex_init(&iser_conn->state_mutex);
}
@@ -1017,7 +1061,7 @@ int iser_post_recvl(struct iser_conn *iser_conn)
sge.addr = iser_conn->login_resp_dma;
sge.length = ISER_RX_LOGIN_SIZE;
- sge.lkey = ib_conn->device->mr->lkey;
+ sge.lkey = ib_conn->device->pd->local_dma_lkey;
rx_wr.wr_id = (uintptr_t)iser_conn->login_resp_buf;
rx_wr.sg_list = &sge;
@@ -1072,23 +1116,24 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
bool signal)
{
- int ib_ret;
- struct ib_send_wr send_wr, *send_wr_failed;
+ struct ib_send_wr *bad_wr, *wr = iser_tx_next_wr(tx_desc);
+ int ib_ret;
ib_dma_sync_single_for_device(ib_conn->device->ib_device,
tx_desc->dma_addr, ISER_HEADERS_LEN,
DMA_TO_DEVICE);
- send_wr.next = NULL;
- send_wr.wr_id = (uintptr_t)tx_desc;
- send_wr.sg_list = tx_desc->tx_sg;
- send_wr.num_sge = tx_desc->num_sge;
- send_wr.opcode = IB_WR_SEND;
- send_wr.send_flags = signal ? IB_SEND_SIGNALED : 0;
+ wr->next = NULL;
+ wr->wr_id = (uintptr_t)tx_desc;
+ wr->sg_list = tx_desc->tx_sg;
+ wr->num_sge = tx_desc->num_sge;
+ wr->opcode = IB_WR_SEND;
+ wr->send_flags = signal ? IB_SEND_SIGNALED : 0;
- ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
+ ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0], &bad_wr);
if (ib_ret)
- iser_err("ib_post_send failed, ret:%d\n", ib_ret);
+ iser_err("ib_post_send failed, ret:%d opcode:%d\n",
+ ib_ret, bad_wr->opcode);
return ib_ret;
}
@@ -1240,13 +1285,13 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, sector_t *sector)
{
struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
- struct fast_reg_descriptor *desc = reg->mem_h;
+ struct iser_fr_desc *desc = reg->mem_h;
unsigned long sector_size = iser_task->sc->device->sector_size;
struct ib_mr_status mr_status;
int ret;
- if (desc && desc->reg_indicators & ISER_FASTREG_PROTECTED) {
- desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
+ if (desc && desc->pi_ctx->sig_protected) {
+ desc->pi_ctx->sig_protected = 0;
ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
IB_MR_CHECK_SIG_STATUS, &mr_status);
if (ret) {
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 771700963127..403bd29443b8 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -235,7 +235,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
rx_sg = &rx_desc->rx_sg;
rx_sg->addr = rx_desc->dma_addr;
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
- rx_sg->lkey = device->mr->lkey;
+ rx_sg->lkey = device->pd->local_dma_lkey;
}
isert_conn->rx_desc_head = 0;
@@ -385,22 +385,12 @@ isert_create_device_ib_res(struct isert_device *device)
goto out_cq;
}
- device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(device->mr)) {
- ret = PTR_ERR(device->mr);
- isert_err("failed to create dma mr, device %p, ret=%d\n",
- device, ret);
- goto out_mr;
- }
-
/* Check signature cap */
device->pi_capable = dev_attr->device_cap_flags &
IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
return 0;
-out_mr:
- ib_dealloc_pd(device->pd);
out_cq:
isert_free_comps(device);
return ret;
@@ -411,7 +401,6 @@ isert_free_device_ib_res(struct isert_device *device)
{
isert_info("device %p\n", device);
- ib_dereg_mr(device->mr);
ib_dealloc_pd(device->pd);
isert_free_comps(device);
}
@@ -491,7 +480,7 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
if (fr_desc->pi_ctx) {
ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
- ib_destroy_mr(fr_desc->pi_ctx->sig_mr);
+ ib_dereg_mr(fr_desc->pi_ctx->sig_mr);
kfree(fr_desc->pi_ctx);
}
kfree(fr_desc);
@@ -508,7 +497,6 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc,
struct ib_device *device,
struct ib_pd *pd)
{
- struct ib_mr_init_attr mr_init_attr;
struct pi_context *pi_ctx;
int ret;
@@ -527,7 +515,8 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc,
goto err_pi_ctx;
}
- pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
+ pi_ctx->prot_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
+ ISCSI_ISER_SG_TABLESIZE);
if (IS_ERR(pi_ctx->prot_mr)) {
isert_err("Failed to allocate prot frmr err=%ld\n",
PTR_ERR(pi_ctx->prot_mr));
@@ -536,10 +525,7 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc,
}
desc->ind |= ISERT_PROT_KEY_VALID;
- memset(&mr_init_attr, 0, sizeof(mr_init_attr));
- mr_init_attr.max_reg_descriptors = 2;
- mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
- pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
+ pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
if (IS_ERR(pi_ctx->sig_mr)) {
isert_err("Failed to allocate signature enabled mr err=%ld\n",
PTR_ERR(pi_ctx->sig_mr));
@@ -577,7 +563,8 @@ isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
return PTR_ERR(fr_desc->data_frpl);
}
- fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
+ fr_desc->data_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
+ ISCSI_ISER_SG_TABLESIZE);
if (IS_ERR(fr_desc->data_mr)) {
isert_err("Failed to allocate data frmr err=%ld\n",
PTR_ERR(fr_desc->data_mr));
@@ -775,6 +762,17 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
ret = isert_rdma_post_recvl(isert_conn);
if (ret)
goto out_conn_dev;
+ /*
+ * Obtain the second reference now before isert_rdma_accept() to
+ * ensure that any initiator generated REJECT CM event that occurs
+ * asynchronously won't drop the last reference until the error path
+ * in iscsi_target_login_sess_out() does it's ->iscsit_free_conn() ->
+ * isert_free_conn() -> isert_put_conn() -> kref_put().
+ */
+ if (!kref_get_unless_zero(&isert_conn->kref)) {
+ isert_warn("conn %p connect_release is running\n", isert_conn);
+ goto out_conn_dev;
+ }
ret = isert_rdma_accept(isert_conn);
if (ret)
@@ -836,11 +834,6 @@ isert_connected_handler(struct rdma_cm_id *cma_id)
isert_info("conn %p\n", isert_conn);
- if (!kref_get_unless_zero(&isert_conn->kref)) {
- isert_warn("conn %p connect_release is running\n", isert_conn);
- return;
- }
-
mutex_lock(&isert_conn->mutex);
if (isert_conn->state != ISER_CONN_FULL_FEATURE)
isert_conn->state = ISER_CONN_UP;
@@ -1086,8 +1079,8 @@ isert_create_send_desc(struct isert_conn *isert_conn,
tx_desc->num_sge = 1;
tx_desc->isert_cmd = isert_cmd;
- if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
- tx_desc->tx_sg[0].lkey = device->mr->lkey;
+ if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) {
+ tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
}
}
@@ -1110,7 +1103,7 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
tx_desc->dma_addr = dma_addr;
tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
- tx_desc->tx_sg[0].lkey = device->mr->lkey;
+ tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
@@ -1143,7 +1136,7 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
memset(&sge, 0, sizeof(struct ib_sge));
sge.addr = isert_conn->login_req_dma;
sge.length = ISER_RX_LOGIN_SIZE;
- sge.lkey = isert_conn->device->mr->lkey;
+ sge.lkey = isert_conn->device->pd->local_dma_lkey;
isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
sge.addr, sge.length, sge.lkey);
@@ -1193,7 +1186,7 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
tx_dsg->addr = isert_conn->login_rsp_dma;
tx_dsg->length = length;
- tx_dsg->lkey = isert_conn->device->mr->lkey;
+ tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey;
tx_desc->num_sge = 2;
}
if (!login->login_failed) {
@@ -2210,7 +2203,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_cmd->pdu_buf_len = pdu_len;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
tx_dsg->length = pdu_len;
- tx_dsg->lkey = device->mr->lkey;
+ tx_dsg->lkey = device->pd->local_dma_lkey;
isert_cmd->tx_desc.num_sge = 2;
}
@@ -2338,7 +2331,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
tx_dsg->length = ISCSI_HDR_LEN;
- tx_dsg->lkey = device->mr->lkey;
+ tx_dsg->lkey = device->pd->local_dma_lkey;
isert_cmd->tx_desc.num_sge = 2;
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
@@ -2379,7 +2372,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
isert_cmd->pdu_buf_len = txt_rsp_len;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
tx_dsg->length = txt_rsp_len;
- tx_dsg->lkey = device->mr->lkey;
+ tx_dsg->lkey = device->pd->local_dma_lkey;
isert_cmd->tx_desc.num_sge = 2;
}
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
@@ -2420,7 +2413,7 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
ib_sge->length = min_t(u32, data_left,
ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
- ib_sge->lkey = device->mr->lkey;
+ ib_sge->lkey = device->pd->local_dma_lkey;
isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
ib_sge->addr, ib_sge->length, ib_sge->lkey);
@@ -2594,7 +2587,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
u32 page_off;
if (mem->dma_nents == 1) {
- sge->lkey = device->mr->lkey;
+ sge->lkey = device->pd->local_dma_lkey;
sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
@@ -3102,7 +3095,7 @@ out:
static int
isert_setup_np(struct iscsi_np *np,
- struct __kernel_sockaddr_storage *ksockaddr)
+ struct sockaddr_storage *ksockaddr)
{
struct isert_np *isert_np;
struct rdma_cm_id *isert_lid;
@@ -3124,7 +3117,7 @@ isert_setup_np(struct iscsi_np *np,
* in iscsi_target_configfs.c code..
*/
memcpy(&np->np_sockaddr, ksockaddr,
- sizeof(struct __kernel_sockaddr_storage));
+ sizeof(struct sockaddr_storage));
isert_lid = isert_setup_id(isert_np);
if (IS_ERR(isert_lid)) {
@@ -3206,32 +3199,11 @@ isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
{
struct rdma_cm_id *cm_id = isert_conn->cm_id;
struct rdma_route *cm_route = &cm_id->route;
- struct sockaddr_in *sock_in;
- struct sockaddr_in6 *sock_in6;
conn->login_family = np->np_sockaddr.ss_family;
- if (np->np_sockaddr.ss_family == AF_INET6) {
- sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
- snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
- &sock_in6->sin6_addr.in6_u);
- conn->login_port = ntohs(sock_in6->sin6_port);
-
- sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
- snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
- &sock_in6->sin6_addr.in6_u);
- conn->local_port = ntohs(sock_in6->sin6_port);
- } else {
- sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
- sprintf(conn->login_ip, "%pI4",
- &sock_in->sin_addr.s_addr);
- conn->login_port = ntohs(sock_in->sin_port);
-
- sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
- sprintf(conn->local_ip, "%pI4",
- &sock_in->sin_addr.s_addr);
- conn->local_port = ntohs(sock_in->sin_port);
- }
+ conn->login_sockaddr = cm_route->addr.dst_addr;
+ conn->local_sockaddr = cm_route->addr.src_addr;
}
static int
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 9ec23a786c02..6a04ba3c0f72 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -209,7 +209,6 @@ struct isert_device {
int refcount;
struct ib_device *ib_device;
struct ib_pd *pd;
- struct ib_mr *mr;
struct isert_comp *comps;
int comps_used;
struct list_head dev_node;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 267dc4f75502..b481490ad257 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -55,8 +55,8 @@
#define DRV_NAME "ib_srp"
#define PFX DRV_NAME ": "
-#define DRV_VERSION "1.0"
-#define DRV_RELDATE "July 1, 2013"
+#define DRV_VERSION "2.0"
+#define DRV_RELDATE "July 26, 2015"
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
@@ -68,8 +68,8 @@ static unsigned int srp_sg_tablesize;
static unsigned int cmd_sg_entries;
static unsigned int indirect_sg_entries;
static bool allow_ext_sg;
-static bool prefer_fr;
-static bool register_always;
+static bool prefer_fr = true;
+static bool register_always = true;
static int topspin_workarounds = 1;
module_param(srp_sg_tablesize, uint, 0444);
@@ -131,7 +131,7 @@ MODULE_PARM_DESC(ch_count,
"Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
static void srp_add_one(struct ib_device *device);
-static void srp_remove_one(struct ib_device *device);
+static void srp_remove_one(struct ib_device *device, void *client_data);
static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
@@ -161,13 +161,10 @@ static int srp_tmo_set(const char *val, const struct kernel_param *kp)
{
int tmo, res;
- if (strncmp(val, "off", 3) != 0) {
- res = kstrtoint(val, 0, &tmo);
- if (res)
- goto out;
- } else {
- tmo = -1;
- }
+ res = srp_parse_tmo(&tmo, val);
+ if (res)
+ goto out;
+
if (kp->arg == &srp_reconnect_delay)
res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
srp_dev_loss_tmo);
@@ -381,7 +378,8 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
INIT_LIST_HEAD(&pool->free_list);
for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
- mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
+ mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
+ max_page_list_len);
if (IS_ERR(mr)) {
ret = PTR_ERR(mr);
goto destroy_pool;
@@ -548,7 +546,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
if (ret)
goto err_qp;
- if (dev->use_fast_reg && dev->has_fr) {
+ if (dev->use_fast_reg) {
fr_pool = srp_alloc_fr_pool(target);
if (IS_ERR(fr_pool)) {
ret = PTR_ERR(fr_pool);
@@ -556,10 +554,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
"FR pool allocation failed (%d)\n", ret);
goto err_qp;
}
- if (ch->fr_pool)
- srp_destroy_fr_pool(ch->fr_pool);
- ch->fr_pool = fr_pool;
- } else if (!dev->use_fast_reg && dev->has_fmr) {
+ } else if (dev->use_fmr) {
fmr_pool = srp_alloc_fmr_pool(target);
if (IS_ERR(fmr_pool)) {
ret = PTR_ERR(fmr_pool);
@@ -567,9 +562,6 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
"FMR pool allocation failed (%d)\n", ret);
goto err_qp;
}
- if (ch->fmr_pool)
- ib_destroy_fmr_pool(ch->fmr_pool);
- ch->fmr_pool = fmr_pool;
}
if (ch->qp)
@@ -583,6 +575,16 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
ch->recv_cq = recv_cq;
ch->send_cq = send_cq;
+ if (dev->use_fast_reg) {
+ if (ch->fr_pool)
+ srp_destroy_fr_pool(ch->fr_pool);
+ ch->fr_pool = fr_pool;
+ } else if (dev->use_fmr) {
+ if (ch->fmr_pool)
+ ib_destroy_fmr_pool(ch->fmr_pool);
+ ch->fmr_pool = fmr_pool;
+ }
+
kfree(init_attr);
return 0;
@@ -625,7 +627,7 @@ static void srp_free_ch_ib(struct srp_target_port *target,
if (dev->use_fast_reg) {
if (ch->fr_pool)
srp_destroy_fr_pool(ch->fr_pool);
- } else {
+ } else if (dev->use_fmr) {
if (ch->fmr_pool)
ib_destroy_fmr_pool(ch->fmr_pool);
}
@@ -1087,7 +1089,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
if (req->nmdesc)
srp_fr_pool_put(ch->fr_pool, req->fr_list,
req->nmdesc);
- } else {
+ } else if (dev->use_fmr) {
struct ib_pool_fmr **pfmr;
for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
@@ -1262,6 +1264,8 @@ static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
{
struct srp_direct_buf *desc = state->desc;
+ WARN_ON_ONCE(!dma_len);
+
desc->va = cpu_to_be64(dma_addr);
desc->key = cpu_to_be32(rkey);
desc->len = cpu_to_be32(dma_len);
@@ -1274,18 +1278,24 @@ static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
static int srp_map_finish_fmr(struct srp_map_state *state,
struct srp_rdma_ch *ch)
{
+ struct srp_target_port *target = ch->target;
+ struct srp_device *dev = target->srp_host->srp_dev;
struct ib_pool_fmr *fmr;
u64 io_addr = 0;
+ if (state->fmr.next >= state->fmr.end)
+ return -ENOMEM;
+
fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
state->npages, io_addr);
if (IS_ERR(fmr))
return PTR_ERR(fmr);
- *state->next_fmr++ = fmr;
+ *state->fmr.next++ = fmr;
state->nmdesc++;
- srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
+ srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
+ state->dma_len, fmr->fmr->rkey);
return 0;
}
@@ -1300,6 +1310,9 @@ static int srp_map_finish_fr(struct srp_map_state *state,
struct srp_fr_desc *desc;
u32 rkey;
+ if (state->fr.next >= state->fr.end)
+ return -ENOMEM;
+
desc = srp_fr_pool_get(ch->fr_pool);
if (!desc)
return -ENOMEM;
@@ -1323,7 +1336,7 @@ static int srp_map_finish_fr(struct srp_map_state *state,
IB_ACCESS_REMOTE_WRITE);
wr.wr.fast_reg.rkey = desc->mr->lkey;
- *state->next_fr++ = desc;
+ *state->fr.next++ = desc;
state->nmdesc++;
srp_map_desc(state, state->base_dma_addr, state->dma_len,
@@ -1336,17 +1349,19 @@ static int srp_finish_mapping(struct srp_map_state *state,
struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
+ struct srp_device *dev = target->srp_host->srp_dev;
int ret = 0;
+ WARN_ON_ONCE(!dev->use_fast_reg && !dev->use_fmr);
+
if (state->npages == 0)
return 0;
- if (state->npages == 1 && !register_always)
+ if (state->npages == 1 && target->global_mr)
srp_map_desc(state, state->base_dma_addr, state->dma_len,
- target->rkey);
+ target->global_mr->rkey);
else
- ret = target->srp_host->srp_dev->use_fast_reg ?
- srp_map_finish_fr(state, ch) :
+ ret = dev->use_fast_reg ? srp_map_finish_fr(state, ch) :
srp_map_finish_fmr(state, ch);
if (ret == 0) {
@@ -1357,66 +1372,19 @@ static int srp_finish_mapping(struct srp_map_state *state,
return ret;
}
-static void srp_map_update_start(struct srp_map_state *state,
- struct scatterlist *sg, int sg_index,
- dma_addr_t dma_addr)
-{
- state->unmapped_sg = sg;
- state->unmapped_index = sg_index;
- state->unmapped_addr = dma_addr;
-}
-
static int srp_map_sg_entry(struct srp_map_state *state,
struct srp_rdma_ch *ch,
- struct scatterlist *sg, int sg_index,
- bool use_mr)
+ struct scatterlist *sg, int sg_index)
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
struct ib_device *ibdev = dev->dev;
dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
- unsigned int len;
+ unsigned int len = 0;
int ret;
- if (!dma_len)
- return 0;
-
- if (!use_mr) {
- /*
- * Once we're in direct map mode for a request, we don't
- * go back to FMR or FR mode, so no need to update anything
- * other than the descriptor.
- */
- srp_map_desc(state, dma_addr, dma_len, target->rkey);
- return 0;
- }
-
- /*
- * Since not all RDMA HW drivers support non-zero page offsets for
- * FMR, if we start at an offset into a page, don't merge into the
- * current FMR mapping. Finish it out, and use the kernel's MR for
- * this sg entry.
- */
- if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
- dma_len > dev->mr_max_size) {
- ret = srp_finish_mapping(state, ch);
- if (ret)
- return ret;
-
- srp_map_desc(state, dma_addr, dma_len, target->rkey);
- srp_map_update_start(state, NULL, 0, 0);
- return 0;
- }
-
- /*
- * If this is the first sg that will be mapped via FMR or via FR, save
- * our position. We need to know the first unmapped entry, its index,
- * and the first unmapped address within that entry to be able to
- * restart mapping after an error.
- */
- if (!state->unmapped_sg)
- srp_map_update_start(state, sg, sg_index, dma_addr);
+ WARN_ON_ONCE(!dma_len);
while (dma_len) {
unsigned offset = dma_addr & ~dev->mr_page_mask;
@@ -1424,8 +1392,6 @@ static int srp_map_sg_entry(struct srp_map_state *state,
ret = srp_finish_mapping(state, ch);
if (ret)
return ret;
-
- srp_map_update_start(state, sg, sg_index, dma_addr);
}
len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
@@ -1444,11 +1410,8 @@ static int srp_map_sg_entry(struct srp_map_state *state,
* boundries.
*/
ret = 0;
- if (len != dev->mr_page_size) {
+ if (len != dev->mr_page_size)
ret = srp_finish_mapping(state, ch);
- if (!ret)
- srp_map_update_start(state, NULL, 0, 0);
- }
return ret;
}
@@ -1458,50 +1421,80 @@ static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
- struct ib_device *ibdev = dev->dev;
struct scatterlist *sg;
- int i;
- bool use_mr;
+ int i, ret;
state->desc = req->indirect_desc;
state->pages = req->map_page;
if (dev->use_fast_reg) {
- state->next_fr = req->fr_list;
- use_mr = !!ch->fr_pool;
- } else {
- state->next_fmr = req->fmr_list;
- use_mr = !!ch->fmr_pool;
+ state->fr.next = req->fr_list;
+ state->fr.end = req->fr_list + target->cmd_sg_cnt;
+ } else if (dev->use_fmr) {
+ state->fmr.next = req->fmr_list;
+ state->fmr.end = req->fmr_list + target->cmd_sg_cnt;
}
- for_each_sg(scat, sg, count, i) {
- if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
- /*
- * Memory registration failed, so backtrack to the
- * first unmapped entry and continue on without using
- * memory registration.
- */
- dma_addr_t dma_addr;
- unsigned int dma_len;
-
-backtrack:
- sg = state->unmapped_sg;
- i = state->unmapped_index;
-
- dma_addr = ib_sg_dma_address(ibdev, sg);
- dma_len = ib_sg_dma_len(ibdev, sg);
- dma_len -= (state->unmapped_addr - dma_addr);
- dma_addr = state->unmapped_addr;
- use_mr = false;
- srp_map_desc(state, dma_addr, dma_len, target->rkey);
+ if (dev->use_fast_reg || dev->use_fmr) {
+ for_each_sg(scat, sg, count, i) {
+ ret = srp_map_sg_entry(state, ch, sg, i);
+ if (ret)
+ goto out;
+ }
+ ret = srp_finish_mapping(state, ch);
+ if (ret)
+ goto out;
+ } else {
+ for_each_sg(scat, sg, count, i) {
+ srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
+ ib_sg_dma_len(dev->dev, sg),
+ target->global_mr->rkey);
}
}
- if (use_mr && srp_finish_mapping(state, ch))
- goto backtrack;
-
req->nmdesc = state->nmdesc;
+ ret = 0;
- return 0;
+out:
+ return ret;
+}
+
+/*
+ * Register the indirect data buffer descriptor with the HCA.
+ *
+ * Note: since the indirect data buffer descriptor has been allocated with
+ * kmalloc() it is guaranteed that this buffer is a physically contiguous
+ * memory buffer.
+ */
+static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
+ void **next_mr, void **end_mr, u32 idb_len,
+ __be32 *idb_rkey)
+{
+ struct srp_target_port *target = ch->target;
+ struct srp_device *dev = target->srp_host->srp_dev;
+ struct srp_map_state state;
+ struct srp_direct_buf idb_desc;
+ u64 idb_pages[1];
+ int ret;
+
+ memset(&state, 0, sizeof(state));
+ memset(&idb_desc, 0, sizeof(idb_desc));
+ state.gen.next = next_mr;
+ state.gen.end = end_mr;
+ state.desc = &idb_desc;
+ state.pages = idb_pages;
+ state.pages[0] = (req->indirect_dma_addr &
+ dev->mr_page_mask);
+ state.npages = 1;
+ state.base_dma_addr = req->indirect_dma_addr;
+ state.dma_len = idb_len;
+ ret = srp_finish_mapping(&state, ch);
+ if (ret < 0)
+ goto out;
+
+ *idb_rkey = idb_desc.key;
+
+out:
+ return ret;
}
static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
@@ -1510,12 +1503,13 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
struct srp_target_port *target = ch->target;
struct scatterlist *scat;
struct srp_cmd *cmd = req->cmd->buf;
- int len, nents, count;
+ int len, nents, count, ret;
struct srp_device *dev;
struct ib_device *ibdev;
struct srp_map_state state;
struct srp_indirect_buf *indirect_hdr;
- u32 table_len;
+ u32 idb_len, table_len;
+ __be32 idb_rkey;
u8 fmt;
if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
@@ -1542,7 +1536,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
fmt = SRP_DATA_DESC_DIRECT;
len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
- if (count == 1 && !register_always) {
+ if (count == 1 && target->global_mr) {
/*
* The midlayer only generated a single gather/scatter
* entry, or DMA mapping coalesced everything to a
@@ -1552,7 +1546,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
struct srp_direct_buf *buf = (void *) cmd->add_data;
buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
- buf->key = cpu_to_be32(target->rkey);
+ buf->key = cpu_to_be32(target->global_mr->rkey);
buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
req->nmdesc = 0;
@@ -1597,6 +1591,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
count = min(state.ndesc, target->cmd_sg_cnt);
table_len = state.ndesc * sizeof (struct srp_direct_buf);
+ idb_len = sizeof(struct srp_indirect_buf) + table_len;
fmt = SRP_DATA_DESC_INDIRECT;
len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
@@ -1605,8 +1600,18 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
memcpy(indirect_hdr->desc_list, req->indirect_desc,
count * sizeof (struct srp_direct_buf));
+ if (!target->global_mr) {
+ ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
+ idb_len, &idb_rkey);
+ if (ret < 0)
+ return ret;
+ req->nmdesc++;
+ } else {
+ idb_rkey = target->global_mr->rkey;
+ }
+
indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
- indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
+ indirect_hdr->table_desc.key = idb_rkey;
indirect_hdr->table_desc.len = cpu_to_be32(table_len);
indirect_hdr->len = cpu_to_be32(state.total_len);
@@ -2174,7 +2179,7 @@ static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
}
static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
- struct srp_login_rsp *lrsp,
+ const struct srp_login_rsp *lrsp,
struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
@@ -2760,6 +2765,13 @@ static int srp_sdev_count(struct Scsi_Host *host)
return c;
}
+/*
+ * Return values:
+ * < 0 upon failure. Caller is responsible for SRP target port cleanup.
+ * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
+ * removal has been scheduled.
+ * 0 and target->state != SRP_TARGET_REMOVED upon success.
+ */
static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
{
struct srp_rport_identifiers ids;
@@ -3149,8 +3161,8 @@ static ssize_t srp_create_target(struct device *dev,
target->io_class = SRP_REV16A_IB_IO_CLASS;
target->scsi_host = target_host;
target->srp_host = host;
- target->lkey = host->srp_dev->mr->lkey;
- target->rkey = host->srp_dev->mr->rkey;
+ target->lkey = host->srp_dev->pd->local_dma_lkey;
+ target->global_mr = host->srp_dev->global_mr;
target->cmd_sg_cnt = cmd_sg_entries;
target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
target->allow_ext_sg = allow_ext_sg;
@@ -3265,7 +3277,7 @@ static ssize_t srp_create_target(struct device *dev,
srp_free_ch_ib(target, ch);
srp_free_req_data(target, ch);
target->ch_count = ch - target->ch;
- break;
+ goto connected;
}
}
@@ -3275,6 +3287,7 @@ static ssize_t srp_create_target(struct device *dev,
node_idx++;
}
+connected:
target->scsi_host->nr_hw_queues = target->ch_count;
ret = srp_add_target(host, target);
@@ -3297,6 +3310,8 @@ out:
mutex_unlock(&host->add_target_mutex);
scsi_host_put(target->scsi_host);
+ if (ret < 0)
+ scsi_host_put(target->scsi_host);
return ret;
@@ -3379,7 +3394,7 @@ static void srp_add_one(struct ib_device *device)
struct srp_device *srp_dev;
struct ib_device_attr *dev_attr;
struct srp_host *host;
- int mr_page_shift, s, e, p;
+ int mr_page_shift, p;
u64 max_pages_per_mr;
dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
@@ -3404,6 +3419,7 @@ static void srp_add_one(struct ib_device *device)
srp_dev->use_fast_reg = (srp_dev->has_fr &&
(!srp_dev->has_fmr || prefer_fr));
+ srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
/*
* Use the smallest page size supported by the HCA, down to a
@@ -3436,22 +3452,18 @@ static void srp_add_one(struct ib_device *device)
if (IS_ERR(srp_dev->pd))
goto free_dev;
- srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
- IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE);
- if (IS_ERR(srp_dev->mr))
- goto err_pd;
-
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
- s = 0;
- e = 0;
+ if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
+ srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
+ IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE);
+ if (IS_ERR(srp_dev->global_mr))
+ goto err_pd;
} else {
- s = 1;
- e = device->phys_port_cnt;
+ srp_dev->global_mr = NULL;
}
- for (p = s; p <= e; ++p) {
+ for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
host = srp_add_port(srp_dev, p);
if (host)
list_add_tail(&host->list, &srp_dev->dev_list);
@@ -3471,13 +3483,13 @@ free_attr:
kfree(dev_attr);
}
-static void srp_remove_one(struct ib_device *device)
+static void srp_remove_one(struct ib_device *device, void *client_data)
{
struct srp_device *srp_dev;
struct srp_host *host, *tmp_host;
struct srp_target_port *target;
- srp_dev = ib_get_client_data(device, &srp_client);
+ srp_dev = client_data;
if (!srp_dev)
return;
@@ -3506,7 +3518,8 @@ static void srp_remove_one(struct ib_device *device)
kfree(host);
}
- ib_dereg_mr(srp_dev->mr);
+ if (srp_dev->global_mr)
+ ib_dereg_mr(srp_dev->global_mr);
ib_dealloc_pd(srp_dev->pd);
kfree(srp_dev);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 17ee3f80ba55..3608f2e4819c 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -95,13 +95,14 @@ struct srp_device {
struct list_head dev_list;
struct ib_device *dev;
struct ib_pd *pd;
- struct ib_mr *mr;
+ struct ib_mr *global_mr;
u64 mr_page_mask;
int mr_page_size;
int mr_max_size;
int max_pages_per_mr;
bool has_fmr;
bool has_fr;
+ bool use_fmr;
bool use_fast_reg;
};
@@ -182,10 +183,10 @@ struct srp_target_port {
spinlock_t lock;
/* read only in the hot path */
+ struct ib_mr *global_mr;
struct srp_rdma_ch *ch;
u32 ch_count;
u32 lkey;
- u32 rkey;
enum srp_target_state state;
unsigned int max_iu_len;
unsigned int cmd_sg_cnt;
@@ -276,14 +277,21 @@ struct srp_fr_pool {
* @npages: Number of page addresses in the pages[] array.
* @nmdesc: Number of FMR or FR memory descriptors used for mapping.
* @ndesc: Number of SRP buffer descriptors that have been filled in.
- * @unmapped_sg: First element of the sg-list that is mapped via FMR or FR.
- * @unmapped_index: Index of the first element mapped via FMR or FR.
- * @unmapped_addr: DMA address of the first element mapped via FMR or FR.
*/
struct srp_map_state {
union {
- struct ib_pool_fmr **next_fmr;
- struct srp_fr_desc **next_fr;
+ struct {
+ struct ib_pool_fmr **next;
+ struct ib_pool_fmr **end;
+ } fmr;
+ struct {
+ struct srp_fr_desc **next;
+ struct srp_fr_desc **end;
+ } fr;
+ struct {
+ void **next;
+ void **end;
+ } gen;
};
struct srp_direct_buf *desc;
u64 *pages;
@@ -293,9 +301,6 @@ struct srp_map_state {
unsigned int npages;
unsigned int nmdesc;
unsigned int ndesc;
- struct scatterlist *unmapped_sg;
- int unmapped_index;
- dma_addr_t unmapped_addr;
};
#endif /* IB_SRP_H */
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 82897ca17f32..f6fe0414139b 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -302,7 +302,7 @@ static void srpt_get_iou(struct ib_dm_mad *mad)
int i;
ioui = (struct ib_dm_iou_info *)mad->data;
- ioui->change_id = __constant_cpu_to_be16(1);
+ ioui->change_id = cpu_to_be16(1);
ioui->max_controllers = 16;
/* set present for slot 1 and empty for the rest */
@@ -330,13 +330,13 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
if (!slot || slot > 16) {
mad->mad_hdr.status
- = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+ = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
return;
}
if (slot > 2) {
mad->mad_hdr.status
- = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+ = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
return;
}
@@ -348,10 +348,10 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
iocp->subsys_device_id = 0x0;
- iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
- iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS);
- iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL);
- iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION);
+ iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
+ iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
+ iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
+ iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
iocp->rdma_read_depth = 4;
iocp->send_size = cpu_to_be32(srp_max_req_size);
@@ -379,13 +379,13 @@ static void srpt_get_svc_entries(u64 ioc_guid,
if (!slot || slot > 16) {
mad->mad_hdr.status
- = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+ = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
return;
}
if (slot > 2 || lo > hi || hi > 1) {
mad->mad_hdr.status
- = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+ = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
return;
}
@@ -436,7 +436,7 @@ static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
break;
default:
rsp_mad->mad_hdr.status =
- __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+ cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
break;
}
}
@@ -493,11 +493,11 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
break;
case IB_MGMT_METHOD_SET:
dm_mad->mad_hdr.status =
- __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+ cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
break;
default:
dm_mad->mad_hdr.status =
- __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
+ cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
break;
}
@@ -783,7 +783,7 @@ static int srpt_post_recv(struct srpt_device *sdev,
list.addr = ioctx->ioctx.dma;
list.length = srp_max_req_size;
- list.lkey = sdev->mr->lkey;
+ list.lkey = sdev->pd->local_dma_lkey;
wr.next = NULL;
wr.sg_list = &list;
@@ -818,7 +818,7 @@ static int srpt_post_send(struct srpt_rdma_ch *ch,
list.addr = ioctx->ioctx.dma;
list.length = len;
- list.lkey = sdev->mr->lkey;
+ list.lkey = sdev->pd->local_dma_lkey;
wr.next = NULL;
wr.wr_id = encode_wr_id(SRPT_SEND, ioctx->ioctx.index);
@@ -1206,7 +1206,7 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
while (rsize > 0 && tsize > 0) {
sge->addr = dma_addr;
- sge->lkey = ch->sport->sdev->mr->lkey;
+ sge->lkey = ch->sport->sdev->pd->local_dma_lkey;
if (rsize >= dma_len) {
sge->length =
@@ -1535,7 +1535,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
memset(srp_rsp, 0, sizeof *srp_rsp);
srp_rsp->opcode = SRP_RSP;
srp_rsp->req_lim_delta =
- __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
+ cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
srp_rsp->tag = tag;
srp_rsp->status = status;
@@ -1585,8 +1585,8 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
memset(srp_rsp, 0, sizeof *srp_rsp);
srp_rsp->opcode = SRP_RSP;
- srp_rsp->req_lim_delta = __constant_cpu_to_be32(1
- + atomic_xchg(&ch->req_lim_delta, 0));
+ srp_rsp->req_lim_delta =
+ cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
srp_rsp->tag = tag;
srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
@@ -1630,7 +1630,7 @@ static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
switch (len) {
case 8:
if ((*((__be64 *)lun) &
- __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
+ cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
goto out_err;
break;
case 4:
@@ -2449,8 +2449,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
}
if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
ret = -EINVAL;
pr_err("rejected SRP_LOGIN_REQ because its"
" length (%d bytes) is out of range (%d .. %d)\n",
@@ -2459,8 +2459,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
}
if (!sport->enabled) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
ret = -EINVAL;
pr_err("rejected SRP_LOGIN_REQ because the target port"
" has not yet been enabled\n");
@@ -2505,8 +2505,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
|| *(__be64 *)(req->target_port_id + 8) !=
cpu_to_be64(srpt_service_guid)) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
ret = -ENOMEM;
pr_err("rejected SRP_LOGIN_REQ because it"
" has an invalid target port identifier.\n");
@@ -2515,8 +2515,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
ch = kzalloc(sizeof *ch, GFP_KERNEL);
if (!ch) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_err("rejected SRP_LOGIN_REQ because no memory.\n");
ret = -ENOMEM;
goto reject;
@@ -2552,8 +2552,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
ret = srpt_create_ch_ib(ch);
if (ret) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_err("rejected SRP_LOGIN_REQ because creating"
" a new RDMA channel failed.\n");
goto free_ring;
@@ -2561,8 +2561,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
ret = srpt_ch_qp_rtr(ch, ch->qp);
if (ret) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_err("rejected SRP_LOGIN_REQ because enabling"
" RTR failed (error code = %d)\n", ret);
goto destroy_ib;
@@ -2580,15 +2579,15 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
if (!nacl) {
pr_info("Rejected login because no ACL has been"
" configured yet for initiator %s.\n", ch->sess_name);
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
goto destroy_ib;
}
ch->sess = transport_init_session(TARGET_PROT_NORMAL);
if (IS_ERR(ch->sess)) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_debug("Failed to create session\n");
goto deregister_session;
}
@@ -2604,8 +2603,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
rsp->max_it_iu_len = req->req_it_iu_len;
rsp->max_ti_iu_len = req->req_it_iu_len;
ch->max_ti_iu_len = it_iu_len;
- rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
- | SRP_BUF_FORMAT_INDIRECT);
+ rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
+ | SRP_BUF_FORMAT_INDIRECT);
rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
atomic_set(&ch->req_lim, ch->rq_size);
atomic_set(&ch->req_lim_delta, 0);
@@ -2655,8 +2654,8 @@ free_ch:
reject:
rej->opcode = SRP_LOGIN_REJ;
rej->tag = req->tag;
- rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
- | SRP_BUF_FORMAT_INDIRECT);
+ rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
+ | SRP_BUF_FORMAT_INDIRECT);
ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
(void *)rej, sizeof *rej);
@@ -3212,10 +3211,6 @@ static void srpt_add_one(struct ib_device *device)
if (IS_ERR(sdev->pd))
goto free_dev;
- sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(sdev->mr))
- goto err_pd;
-
sdev->srq_size = min(srpt_srq_size, sdev->dev_attr.max_srq_wr);
srq_attr.event_handler = srpt_srq_event;
@@ -3227,7 +3222,7 @@ static void srpt_add_one(struct ib_device *device)
sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
if (IS_ERR(sdev->srq))
- goto err_mr;
+ goto err_pd;
pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
__func__, sdev->srq_size, sdev->dev_attr.max_srq_wr,
@@ -3251,7 +3246,7 @@ static void srpt_add_one(struct ib_device *device)
* in the system as service_id; therefore, the target_id will change
* if this HCA is gone bad and replaced by different HCA
*/
- if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0, NULL))
+ if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0))
goto err_cm;
INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
@@ -3312,8 +3307,6 @@ err_cm:
ib_destroy_cm_id(sdev->cm_id);
err_srq:
ib_destroy_srq(sdev->srq);
-err_mr:
- ib_dereg_mr(sdev->mr);
err_pd:
ib_dealloc_pd(sdev->pd);
free_dev:
@@ -3327,12 +3320,11 @@ err:
/**
* srpt_remove_one() - InfiniBand device removal callback function.
*/
-static void srpt_remove_one(struct ib_device *device)
+static void srpt_remove_one(struct ib_device *device, void *client_data)
{
- struct srpt_device *sdev;
+ struct srpt_device *sdev = client_data;
int i;
- sdev = ib_get_client_data(device, &srpt_client);
if (!sdev) {
pr_info("%s(%s): nothing to do.\n", __func__, device->name);
return;
@@ -3359,7 +3351,6 @@ static void srpt_remove_one(struct ib_device *device)
srpt_release_sdev(sdev);
ib_destroy_srq(sdev->srq);
- ib_dereg_mr(sdev->mr);
ib_dealloc_pd(sdev->pd);
srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 21f8df67522a..5faad8acd789 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -393,7 +393,6 @@ struct srpt_port {
struct srpt_device {
struct ib_device *device;
struct ib_pd *pd;
- struct ib_mr *mr;
struct ib_srq *srq;
struct ib_cm_id *cm_id;
struct ib_device_attr dev_attr;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 9d35499faca4..08d496411f75 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -290,19 +290,14 @@ static int evdev_flush(struct file *file, fl_owner_t id)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
- int retval;
- retval = mutex_lock_interruptible(&evdev->mutex);
- if (retval)
- return retval;
+ mutex_lock(&evdev->mutex);
- if (!evdev->exist || client->revoked)
- retval = -ENODEV;
- else
- retval = input_flush_device(&evdev->handle, file);
+ if (evdev->exist && !client->revoked)
+ input_flush_device(&evdev->handle, file);
mutex_unlock(&evdev->mutex);
- return retval;
+ return 0;
}
static void evdev_free(struct device *dev)
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index 8f4a30fccbb6..c64208267198 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -343,9 +343,8 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects)
__set_bit(EV_FF, dev->evbit);
/* Copy "true" bits into ff device bitmap */
- for (i = 0; i <= FF_MAX; i++)
- if (test_bit(i, dev->ffbit))
- __set_bit(i, ff->ffbit);
+ for_each_set_bit(i, dev->ffbit, FF_CNT)
+ __set_bit(i, ff->ffbit);
/* we can emulate RUMBLE with periodic effects */
if (test_bit(FF_PERIODIC, ff->ffbit))
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index e853a2134680..4a2a9e370be7 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -149,9 +149,9 @@ static int old_gameport_measure_speed(struct gameport *gameport)
for(i = 0; i < 50; i++) {
local_irq_save(flags);
- rdtscl(t1);
+ t1 = rdtsc();
for (t = 0; t < 50; t++) gameport_read(gameport);
- rdtscl(t2);
+ t2 = rdtsc();
local_irq_restore(flags);
udelay(i * 10);
if (t2 - t1 < tx) tx = t2 - t1;
diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c
index 074a65ed17bb..766bf2660116 100644
--- a/drivers/input/input-leds.c
+++ b/drivers/input/input-leds.c
@@ -71,6 +71,18 @@ static void input_leds_event(struct input_handle *handle, unsigned int type,
{
}
+static int input_leds_get_count(struct input_dev *dev)
+{
+ unsigned int led_code;
+ int count = 0;
+
+ for_each_set_bit(led_code, dev->ledbit, LED_CNT)
+ if (input_led_info[led_code].name)
+ count++;
+
+ return count;
+}
+
static int input_leds_connect(struct input_handler *handler,
struct input_dev *dev,
const struct input_device_id *id)
@@ -81,7 +93,7 @@ static int input_leds_connect(struct input_handler *handler,
int led_no;
int error;
- num_leds = bitmap_weight(dev->ledbit, LED_CNT);
+ num_leds = input_leds_get_count(dev);
if (!num_leds)
return -ENXIO;
@@ -112,7 +124,7 @@ static int input_leds_connect(struct input_handler *handler,
led->handle = &leds->handle;
led->code = led_code;
- if (WARN_ON(!input_led_info[led_code].name))
+ if (!input_led_info[led_code].name)
continue;
led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 78d24990a816..5391abd28b27 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -674,13 +674,19 @@ EXPORT_SYMBOL(input_close_device);
*/
static void input_dev_release_keys(struct input_dev *dev)
{
+ bool need_sync = false;
int code;
if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
- for_each_set_bit(code, dev->key, KEY_CNT)
+ for_each_set_bit(code, dev->key, KEY_CNT) {
input_pass_event(dev, EV_KEY, code, 0);
+ need_sync = true;
+ }
+
+ if (need_sync)
+ input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
+
memset(dev->key, 0, sizeof(dev->key));
- input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
}
}
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 1d247bcf2ae2..6cb5a3e5f9a1 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -859,12 +859,11 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
joydev->handle.handler = handler;
joydev->handle.private = joydev;
- for (i = 0; i < ABS_CNT; i++)
- if (test_bit(i, dev->absbit)) {
- joydev->absmap[i] = joydev->nabs;
- joydev->abspam[joydev->nabs] = i;
- joydev->nabs++;
- }
+ for_each_set_bit(i, dev->absbit, ABS_CNT) {
+ joydev->absmap[i] = joydev->nabs;
+ joydev->abspam[joydev->nabs] = i;
+ joydev->nabs++;
+ }
for (i = BTN_JOYSTICK - BTN_MISC; i < KEY_MAX - BTN_MISC + 1; i++)
if (test_bit(i + BTN_MISC, dev->keybit)) {
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 4284080e481d..6f8b084e13d0 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -143,7 +143,7 @@ struct analog_port {
#include <linux/i8253.h>
-#define GET_TIME(x) do { if (cpu_has_tsc) rdtscl(x); else x = get_time_pit(); } while (0)
+#define GET_TIME(x) do { if (cpu_has_tsc) x = (unsigned int)rdtsc(); else x = get_time_pit(); } while (0)
#define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
#define TIME_NAME (cpu_has_tsc?"TSC":"PIT")
static unsigned int get_time_pit(void)
@@ -160,7 +160,7 @@ static unsigned int get_time_pit(void)
return count;
}
#elif defined(__x86_64__)
-#define GET_TIME(x) rdtscl(x)
+#define GET_TIME(x) do { x = (unsigned int)rdtsc(); } while (0)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "TSC"
#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE)
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index 27b6a3ce18ca..891797ad76bc 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -196,7 +196,7 @@ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs)
if (n_buttons[i] < 1)
continue;
- if (n_buttons[i] > 6) {
+ if (n_buttons[i] > ARRAY_SIZE(tgfx_buttons)) {
printk(KERN_ERR "turbografx.c: Invalid number of buttons %d\n", n_buttons[i]);
err = -EINVAL;
goto err_unreg_devs;
diff --git a/drivers/input/joystick/zhenhua.c b/drivers/input/joystick/zhenhua.c
index 30af2e8c670c..4a8258bf13fd 100644
--- a/drivers/input/joystick/zhenhua.c
+++ b/drivers/input/joystick/zhenhua.c
@@ -47,6 +47,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/bitrev.h>
#include <linux/input.h>
#include <linux/serio.h>
@@ -72,16 +73,6 @@ struct zhenhua {
char phys[32];
};
-
-/* bits in all incoming bytes needs to be "reversed" */
-static int zhenhua_bitreverse(int x)
-{
- x = ((x & 0xaa) >> 1) | ((x & 0x55) << 1);
- x = ((x & 0xcc) >> 2) | ((x & 0x33) << 2);
- x = ((x & 0xf0) >> 4) | ((x & 0x0f) << 4);
- return x;
-}
-
/*
* zhenhua_process_packet() decodes packets the driver receives from the
* RC transmitter. It updates the data accordingly.
@@ -120,7 +111,7 @@ static irqreturn_t zhenhua_interrupt(struct serio *serio, unsigned char data, un
return IRQ_HANDLED; /* wrong MSB -- ignore this byte */
if (zhenhua->idx < ZHENHUA_MAX_LENGTH)
- zhenhua->data[zhenhua->idx++] = zhenhua_bitreverse(data);
+ zhenhua->data[zhenhua->idx++] = bitrev8(data);
if (zhenhua->idx == ZHENHUA_MAX_LENGTH) {
zhenhua_process_packet(zhenhua);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 4cd94fd6cbad..2e80107ff630 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -187,7 +187,7 @@ config KEYBOARD_EP93XX
config KEYBOARD_GPIO
tristate "GPIO Buttons"
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
help
This driver implements support for buttons connected
to GPIO pins of various CPUs (and some other chips).
@@ -253,7 +253,7 @@ config KEYBOARD_TCA8418
config KEYBOARD_MATRIX
tristate "GPIO driven matrix keypad support"
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
select INPUT_MATRIXKMAP
help
Enable support for GPIO driven matrix keypad.
@@ -401,6 +401,17 @@ config KEYBOARD_MPR121
To compile this driver as a module, choose M here: the
module will be called mpr121_touchkey.
+config KEYBOARD_SNVS_PWRKEY
+ tristate "IMX SNVS Power Key Driver"
+ depends on SOC_IMX6SX
+ depends on OF
+ help
+ This is the snvs powerkey driver for the Freescale i.MX application
+ processors that are newer than i.MX6 SX.
+
+ To compile this driver as a module, choose M here; the
+ module will be called snvs_pwrkey.
+
config KEYBOARD_IMX
tristate "IMX keypad support"
depends on ARCH_MXC
@@ -678,7 +689,7 @@ config KEYBOARD_W90P910
config KEYBOARD_CROS_EC
tristate "ChromeOS EC keyboard"
select INPUT_MATRIXKMAP
- depends on CROS_EC_PROTO
+ depends on MFD_CROS_EC
help
Say Y here to enable the matrix keyboard used by ChromeOS devices
and implemented on the ChromeOS EC. You must enable one bus option
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index df28d5553c05..1d416ddf84e4 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_KEYBOARD_QT1070) += qt1070.o
obj-$(CONFIG_KEYBOARD_QT2160) += qt2160.o
obj-$(CONFIG_KEYBOARD_SAMSUNG) += samsung-keypad.o
obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o
+obj-$(CONFIG_KEYBOARD_SNVS_PWRKEY) += snvs_pwrkey.o
obj-$(CONFIG_KEYBOARD_SPEAR) += spear-keyboard.o
obj-$(CONFIG_KEYBOARD_STMPE) += stmpe-keypad.o
obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 6ed83cf8b74e..4d446d5085aa 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -1097,7 +1097,6 @@ MODULE_DEVICE_TABLE(i2c, adp5589_id);
static struct i2c_driver adp5589_driver = {
.driver = {
.name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
.pm = &adp5589_dev_pm_ops,
},
.probe = adp5589_probe,
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
index f07461a64d85..378db10001df 100644
--- a/drivers/input/keyboard/cap11xx.c
+++ b/drivers/input/keyboard/cap11xx.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/input.h>
+#include <linux/leds.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
#include <linux/i2c.h>
@@ -47,6 +48,20 @@
#define CAP11XX_REG_CONFIG2 0x44
#define CAP11XX_REG_CONFIG2_ALT_POL BIT(6)
#define CAP11XX_REG_SENSOR_BASE_CNT(X) (0x50 + (X))
+#define CAP11XX_REG_LED_POLARITY 0x73
+#define CAP11XX_REG_LED_OUTPUT_CONTROL 0x74
+
+#define CAP11XX_REG_LED_DUTY_CYCLE_1 0x90
+#define CAP11XX_REG_LED_DUTY_CYCLE_2 0x91
+#define CAP11XX_REG_LED_DUTY_CYCLE_3 0x92
+#define CAP11XX_REG_LED_DUTY_CYCLE_4 0x93
+
+#define CAP11XX_REG_LED_DUTY_MIN_MASK (0x0f)
+#define CAP11XX_REG_LED_DUTY_MIN_MASK_SHIFT (0)
+#define CAP11XX_REG_LED_DUTY_MAX_MASK (0xf0)
+#define CAP11XX_REG_LED_DUTY_MAX_MASK_SHIFT (4)
+#define CAP11XX_REG_LED_DUTY_MAX_VALUE (15)
+
#define CAP11XX_REG_SENSOR_CALIB (0xb1 + (X))
#define CAP11XX_REG_SENSOR_CALIB_LSB1 0xb9
#define CAP11XX_REG_SENSOR_CALIB_LSB2 0xba
@@ -56,10 +71,23 @@
#define CAP11XX_MANUFACTURER_ID 0x5d
+#ifdef CONFIG_LEDS_CLASS
+struct cap11xx_led {
+ struct cap11xx_priv *priv;
+ struct led_classdev cdev;
+ struct work_struct work;
+ u32 reg;
+ enum led_brightness new_brightness;
+};
+#endif
+
struct cap11xx_priv {
struct regmap *regmap;
struct input_dev *idev;
+ struct cap11xx_led *leds;
+ int num_leds;
+
/* config */
u32 keycodes[];
};
@@ -67,6 +95,7 @@ struct cap11xx_priv {
struct cap11xx_hw_model {
u8 product_id;
unsigned int num_channels;
+ unsigned int num_leds;
};
enum {
@@ -76,9 +105,9 @@ enum {
};
static const struct cap11xx_hw_model cap11xx_devices[] = {
- [CAP1106] = { .product_id = 0x55, .num_channels = 6 },
- [CAP1126] = { .product_id = 0x53, .num_channels = 6 },
- [CAP1188] = { .product_id = 0x50, .num_channels = 8 },
+ [CAP1106] = { .product_id = 0x55, .num_channels = 6, .num_leds = 0 },
+ [CAP1126] = { .product_id = 0x53, .num_channels = 6, .num_leds = 2 },
+ [CAP1188] = { .product_id = 0x50, .num_channels = 8, .num_leds = 8 },
};
static const struct reg_default cap11xx_reg_defaults[] = {
@@ -111,6 +140,7 @@ static const struct reg_default cap11xx_reg_defaults[] = {
{ CAP11XX_REG_STANDBY_SENSITIVITY, 0x02 },
{ CAP11XX_REG_STANDBY_THRESH, 0x40 },
{ CAP11XX_REG_CONFIG2, 0x40 },
+ { CAP11XX_REG_LED_POLARITY, 0x00 },
{ CAP11XX_REG_SENSOR_CALIB_LSB1, 0x00 },
{ CAP11XX_REG_SENSOR_CALIB_LSB2, 0x00 },
};
@@ -177,6 +207,12 @@ out:
static int cap11xx_set_sleep(struct cap11xx_priv *priv, bool sleep)
{
+ /*
+ * DLSEEP mode will turn off all LEDS, prevent this
+ */
+ if (IS_ENABLED(CONFIG_LEDS_CLASS) && priv->num_leds)
+ return 0;
+
return regmap_update_bits(priv->regmap, CAP11XX_REG_MAIN_CONTROL,
CAP11XX_REG_MAIN_CONTROL_DLSEEP,
sleep ? CAP11XX_REG_MAIN_CONTROL_DLSEEP : 0);
@@ -196,6 +232,104 @@ static void cap11xx_input_close(struct input_dev *idev)
cap11xx_set_sleep(priv, true);
}
+#ifdef CONFIG_LEDS_CLASS
+static void cap11xx_led_work(struct work_struct *work)
+{
+ struct cap11xx_led *led = container_of(work, struct cap11xx_led, work);
+ struct cap11xx_priv *priv = led->priv;
+ int value = led->new_brightness;
+
+ /*
+ * All LEDs share the same duty cycle as this is a HW limitation.
+ * Brightness levels per LED are either 0 (OFF) and 1 (ON).
+ */
+ regmap_update_bits(priv->regmap, CAP11XX_REG_LED_OUTPUT_CONTROL,
+ BIT(led->reg), value ? BIT(led->reg) : 0);
+}
+
+static void cap11xx_led_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
+
+ if (led->new_brightness == value)
+ return;
+
+ led->new_brightness = value;
+ schedule_work(&led->work);
+}
+
+static int cap11xx_init_leds(struct device *dev,
+ struct cap11xx_priv *priv, int num_leds)
+{
+ struct device_node *node = dev->of_node, *child;
+ struct cap11xx_led *led;
+ int cnt = of_get_child_count(node);
+ int error;
+
+ if (!num_leds || !cnt)
+ return 0;
+
+ if (cnt > num_leds)
+ return -EINVAL;
+
+ led = devm_kcalloc(dev, cnt, sizeof(struct cap11xx_led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ priv->leds = led;
+
+ error = regmap_update_bits(priv->regmap,
+ CAP11XX_REG_LED_OUTPUT_CONTROL, 0xff, 0);
+ if (error)
+ return error;
+
+ error = regmap_update_bits(priv->regmap, CAP11XX_REG_LED_DUTY_CYCLE_4,
+ CAP11XX_REG_LED_DUTY_MAX_MASK,
+ CAP11XX_REG_LED_DUTY_MAX_VALUE <<
+ CAP11XX_REG_LED_DUTY_MAX_MASK_SHIFT);
+ if (error)
+ return error;
+
+ for_each_child_of_node(node, child) {
+ u32 reg;
+
+ led->cdev.name =
+ of_get_property(child, "label", NULL) ? : child->name;
+ led->cdev.default_trigger =
+ of_get_property(child, "linux,default-trigger", NULL);
+ led->cdev.flags = 0;
+ led->cdev.brightness_set = cap11xx_led_set;
+ led->cdev.max_brightness = 1;
+ led->cdev.brightness = LED_OFF;
+
+ error = of_property_read_u32(child, "reg", &reg);
+ if (error != 0 || reg >= num_leds)
+ return -EINVAL;
+
+ led->reg = reg;
+ led->priv = priv;
+
+ INIT_WORK(&led->work, cap11xx_led_work);
+
+ error = devm_led_classdev_register(dev, &led->cdev);
+ if (error)
+ return error;
+
+ priv->num_leds++;
+ led++;
+ }
+
+ return 0;
+}
+#else
+static int cap11xx_init_leds(struct device *dev,
+ struct cap11xx_priv *priv, int num_leds)
+{
+ return 0;
+}
+#endif
+
static int cap11xx_i2c_probe(struct i2c_client *i2c_client,
const struct i2c_device_id *id)
{
@@ -316,6 +450,10 @@ static int cap11xx_i2c_probe(struct i2c_client *i2c_client,
priv->idev->open = cap11xx_input_open;
priv->idev->close = cap11xx_input_close;
+ error = cap11xx_init_leds(dev, priv, cap->num_leds);
+ if (error)
+ return error;
+
input_set_drvdata(priv->idev, priv);
/*
@@ -361,7 +499,6 @@ MODULE_DEVICE_TABLE(i2c, cap11xx_i2c_ids);
static struct i2c_driver cap11xx_i2c_driver = {
.driver = {
.name = "cap11xx",
- .owner = THIS_MODULE,
.of_match_table = cap11xx_dt_ids,
},
.id_table = cap11xx_i2c_ids,
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index ddf4045de084..9d517ca7eb5a 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -239,6 +239,11 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata,
}
}
+ if (i == ddata->pdata->nbuttons) {
+ error = -EINVAL;
+ goto out;
+ }
+
mutex_lock(&ddata->disable_lock);
for (i = 0; i < ddata->pdata->nbuttons; i++) {
@@ -655,7 +660,9 @@ gpio_keys_get_devtree_pdata(struct device *dev)
if (of_property_read_u32(pp, "linux,input-type", &button->type))
button->type = EV_KEY;
- button->wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
+ button->wakeup = of_property_read_bool(pp, "wakeup-source") ||
+ /* legacy name */
+ of_property_read_bool(pp, "gpio-key,wakeup");
button->can_disable = !!of_get_property(pp, "linux,can-disable", NULL);
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 097d7216d98e..870cfa6e2c44 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -152,7 +152,10 @@ static struct gpio_keys_platform_data *gpio_keys_polled_get_devtree_pdata(struct
&button->type))
button->type = EV_KEY;
- button->wakeup = fwnode_property_present(child, "gpio-key,wakeup");
+ button->wakeup =
+ fwnode_property_read_bool(child, "wakeup-source") ||
+ /* legacy name */
+ fwnode_property_read_bool(child, "gpio-key,wakeup");
if (fwnode_property_read_u32(child, "debounce-interval",
&button->debounce_interval))
@@ -246,7 +249,7 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
* convert it to descriptor.
*/
if (!button->gpiod && gpio_is_valid(button->gpio)) {
- unsigned flags = 0;
+ unsigned flags = GPIOF_IN;
if (button->active_low)
flags |= GPIOF_ACTIVE_LOW;
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index d2ea863d6a45..2165f3dd328b 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -5,8 +5,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * <<Power management needs to be implemented>>.
*/
#include <linux/clk.h>
diff --git a/drivers/input/keyboard/lm8333.c b/drivers/input/keyboard/lm8333.c
index 0ad422b8a260..c717e8f3c964 100644
--- a/drivers/input/keyboard/lm8333.c
+++ b/drivers/input/keyboard/lm8333.c
@@ -223,7 +223,6 @@ MODULE_DEVICE_TABLE(i2c, lm8333_id);
static struct i2c_driver lm8333_driver = {
.driver = {
.name = "lm8333",
- .owner = THIS_MODULE,
},
.probe = lm8333_probe,
.remove = lm8333_remove,
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index b370a59cb759..7f12b6579f82 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -425,8 +425,10 @@ matrix_keypad_parse_dt(struct device *dev)
if (of_get_property(np, "linux,no-autorepeat", NULL))
pdata->no_autorepeat = true;
- if (of_get_property(np, "linux,wakeup", NULL))
- pdata->wakeup = true;
+
+ pdata->wakeup = of_property_read_bool(np, "wakeup-source") ||
+ of_property_read_bool(np, "linux,wakeup"); /* legacy */
+
if (of_get_property(np, "gpio-activelow", NULL))
pdata->active_low = true;
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index 375b05ca8e2a..31090d71a685 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -265,7 +265,6 @@ MODULE_DEVICE_TABLE(i2c, mcs_touchkey_id);
static struct i2c_driver mcs_touchkey_driver = {
.driver = {
.name = "mcs_touchkey",
- .owner = THIS_MODULE,
.pm = &mcs_touchkey_pm_ops,
},
.probe = mcs_touchkey_probe,
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index 3aa2ec45bcab..0fd612dd76ed 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -305,7 +305,6 @@ MODULE_DEVICE_TABLE(i2c, mpr121_id);
static struct i2c_driver mpr_touchkey_driver = {
.driver = {
.name = "mpr121",
- .owner = THIS_MODULE,
.pm = &mpr121_touchkey_pm_ops,
},
.id_table = mpr121_id,
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index 32580afecc26..5c68e3f096bc 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -507,6 +507,7 @@ static void pmic8xxx_kp_close(struct input_dev *dev)
*/
static int pmic8xxx_kp_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
unsigned int rows, cols;
bool repeat;
bool wakeup;
@@ -524,10 +525,11 @@ static int pmic8xxx_kp_probe(struct platform_device *pdev)
return -EINVAL;
}
- repeat = !of_property_read_bool(pdev->dev.of_node,
- "linux,input-no-autorepeat");
- wakeup = of_property_read_bool(pdev->dev.of_node,
- "linux,keypad-wakeup");
+ repeat = !of_property_read_bool(np, "linux,input-no-autorepeat");
+
+ wakeup = of_property_read_bool(np, "wakeup-source") ||
+ /* legacy name */
+ of_property_read_bool(np, "linux,keypad-wakeup");
kp = devm_kzalloc(&pdev->dev, sizeof(*kp), GFP_KERNEL);
if (!kp)
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index 52cd6e88acd7..5a5778729e37 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -277,7 +277,6 @@ MODULE_DEVICE_TABLE(i2c, qt1070_id);
static struct i2c_driver qt1070_driver = {
.driver = {
.name = "qt1070",
- .owner = THIS_MODULE,
.pm = &qt1070_pm_ops,
},
.id_table = qt1070_id,
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 819b22897c13..43b86482dda0 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -497,7 +497,6 @@ MODULE_DEVICE_TABLE(i2c, qt2160_idtable);
static struct i2c_driver qt2160_driver = {
.driver = {
.name = "qt2160",
- .owner = THIS_MODULE,
},
.id_table = qt2160_idtable,
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c
index 43e48dac7687..4e319eb9e19d 100644
--- a/drivers/input/keyboard/samsung-keypad.c
+++ b/drivers/input/keyboard/samsung-keypad.c
@@ -299,8 +299,10 @@ samsung_keypad_parse_dt(struct device *dev)
if (of_get_property(np, "linux,input-no-autorepeat", NULL))
pdata->no_autorepeat = true;
- if (of_get_property(np, "linux,input-wakeup", NULL))
- pdata->wakeup = true;
+ pdata->wakeup = of_property_read_bool(np, "wakeup-source") ||
+ /* legacy name */
+ of_property_read_bool(np, "linux,input-wakeup");
+
return pdata;
}
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
new file mode 100644
index 000000000000..78fd24ca3813
--- /dev/null
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -0,0 +1,227 @@
+/*
+ * Driver for the IMX SNVS ON/OFF Power Key
+ * Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#define SNVS_LPSR_REG 0x4C /* LP Status Register */
+#define SNVS_LPCR_REG 0x38 /* LP Control Register */
+#define SNVS_HPSR_REG 0x14
+#define SNVS_HPSR_BTN BIT(6)
+#define SNVS_LPSR_SPO BIT(18)
+#define SNVS_LPCR_DEP_EN BIT(5)
+
+#define DEBOUNCE_TIME 30
+#define REPEAT_INTERVAL 60
+
+struct pwrkey_drv_data {
+ struct regmap *snvs;
+ int irq;
+ int keycode;
+ int keystate; /* 1:pressed */
+ int wakeup;
+ struct timer_list check_timer;
+ struct input_dev *input;
+};
+
+static void imx_imx_snvs_check_for_events(unsigned long data)
+{
+ struct pwrkey_drv_data *pdata = (struct pwrkey_drv_data *) data;
+ struct input_dev *input = pdata->input;
+ u32 state;
+
+ regmap_read(pdata->snvs, SNVS_HPSR_REG, &state);
+ state = state & SNVS_HPSR_BTN ? 1 : 0;
+
+ /* only report new event if status changed */
+ if (state ^ pdata->keystate) {
+ pdata->keystate = state;
+ input_event(input, EV_KEY, pdata->keycode, state);
+ input_sync(input);
+ pm_relax(pdata->input->dev.parent);
+ }
+
+ /* repeat check if pressed long */
+ if (state) {
+ mod_timer(&pdata->check_timer,
+ jiffies + msecs_to_jiffies(REPEAT_INTERVAL));
+ }
+}
+
+static irqreturn_t imx_snvs_pwrkey_interrupt(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct pwrkey_drv_data *pdata = platform_get_drvdata(pdev);
+ u32 lp_status;
+
+ pm_wakeup_event(pdata->input->dev.parent, 0);
+
+ regmap_read(pdata->snvs, SNVS_LPSR_REG, &lp_status);
+ if (lp_status & SNVS_LPSR_SPO)
+ mod_timer(&pdata->check_timer, jiffies + msecs_to_jiffies(DEBOUNCE_TIME));
+
+ /* clear SPO status */
+ regmap_write(pdata->snvs, SNVS_LPSR_REG, SNVS_LPSR_SPO);
+
+ return IRQ_HANDLED;
+}
+
+static void imx_snvs_pwrkey_act(void *pdata)
+{
+ struct pwrkey_drv_data *pd = pdata;
+
+ del_timer_sync(&pd->check_timer);
+}
+
+static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
+{
+ struct pwrkey_drv_data *pdata = NULL;
+ struct input_dev *input = NULL;
+ struct device_node *np;
+ int error;
+
+ /* Get SNVS register Page */
+ np = pdev->dev.of_node;
+ if (!np)
+ return -ENODEV;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->snvs = syscon_regmap_lookup_by_phandle(np, "regmap");;
+
+ if (!pdata->snvs) {
+ dev_err(&pdev->dev, "Can't get snvs syscon\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32(np, "linux,keycode", &pdata->keycode)) {
+ pdata->keycode = KEY_POWER;
+ dev_warn(&pdev->dev, "KEY_POWER without setting in dts\n");
+ }
+
+ pdata->wakeup = of_property_read_bool(np, "wakeup-source");
+
+ pdata->irq = platform_get_irq(pdev, 0);
+ if (pdata->irq < 0) {
+ dev_err(&pdev->dev, "no irq defined in platform data\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(pdata->snvs, SNVS_LPCR_REG, SNVS_LPCR_DEP_EN, SNVS_LPCR_DEP_EN);
+
+ /* clear the unexpected interrupt before driver ready */
+ regmap_write(pdata->snvs, SNVS_LPSR_REG, SNVS_LPSR_SPO);
+
+ setup_timer(&pdata->check_timer,
+ imx_imx_snvs_check_for_events, (unsigned long) pdata);
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input) {
+ dev_err(&pdev->dev, "failed to allocate the input device\n");
+ return -ENOMEM;
+ }
+
+ input->name = pdev->name;
+ input->phys = "snvs-pwrkey/input0";
+ input->id.bustype = BUS_HOST;
+
+ input_set_capability(input, EV_KEY, pdata->keycode);
+
+ /* input customer action to cancel release timer */
+ error = devm_add_action(&pdev->dev, imx_snvs_pwrkey_act, pdata);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register remove action\n");
+ return error;
+ }
+
+ error = devm_request_irq(&pdev->dev, pdata->irq,
+ imx_snvs_pwrkey_interrupt,
+ 0, pdev->name, pdev);
+
+ if (error) {
+ dev_err(&pdev->dev, "interrupt not available.\n");
+ return error;
+ }
+
+ error = input_register_device(input);
+ if (error < 0) {
+ dev_err(&pdev->dev, "failed to register input device\n");
+ input_free_device(input);
+ return error;
+ }
+
+ pdata->input = input;
+ platform_set_drvdata(pdev, pdata);
+
+ device_init_wakeup(&pdev->dev, pdata->wakeup);
+
+ return 0;
+}
+
+static int imx_snvs_pwrkey_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pwrkey_drv_data *pdata = platform_get_drvdata(pdev);
+
+ if (device_may_wakeup(&pdev->dev))
+ enable_irq_wake(pdata->irq);
+
+ return 0;
+}
+
+static int imx_snvs_pwrkey_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pwrkey_drv_data *pdata = platform_get_drvdata(pdev);
+
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(pdata->irq);
+
+ return 0;
+}
+
+static const struct of_device_id imx_snvs_pwrkey_ids[] = {
+ { .compatible = "fsl,sec-v4.0-pwrkey" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_snvs_pwrkey_ids);
+
+static SIMPLE_DEV_PM_OPS(imx_snvs_pwrkey_pm_ops, imx_snvs_pwrkey_suspend,
+ imx_snvs_pwrkey_resume);
+
+static struct platform_driver imx_snvs_pwrkey_driver = {
+ .driver = {
+ .name = "snvs_pwrkey",
+ .pm = &imx_snvs_pwrkey_pm_ops,
+ .of_match_table = imx_snvs_pwrkey_ids,
+ },
+ .probe = imx_snvs_pwrkey_probe,
+};
+module_platform_driver(imx_snvs_pwrkey_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor");
+MODULE_DESCRIPTION("i.MX snvs power key Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 31c606a4dd31..e92dfd8889c2 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -17,6 +17,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/mfd/tc3589x.h>
+#include <linux/device.h>
/* Maximum supported keypad matrix row/columns size */
#define TC3589x_MAX_KPROW 8
@@ -352,7 +353,10 @@ tc3589x_keypad_of_probe(struct device *dev)
}
plat->no_autorepeat = of_property_read_bool(np, "linux,no-autorepeat");
- plat->enable_wakeup = of_property_read_bool(np, "linux,wakeup");
+
+ plat->enable_wakeup = of_property_read_bool(np, "wakeup-source") ||
+ /* legacy name */
+ of_property_read_bool(np, "linux,wakeup");
/* The custom delay format is ms/16 */
of_property_read_u32(np, "debounce-delay-ms", &debounce_ms);
@@ -386,12 +390,15 @@ static int tc3589x_keypad_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- keypad = kzalloc(sizeof(struct tc_keypad), GFP_KERNEL);
- input = input_allocate_device();
- if (!keypad || !input) {
- dev_err(&pdev->dev, "failed to allocate keypad memory\n");
- error = -ENOMEM;
- goto err_free_mem;
+ keypad = devm_kzalloc(&pdev->dev, sizeof(struct tc_keypad),
+ GFP_KERNEL);
+ if (!keypad)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input) {
+ dev_err(&pdev->dev, "failed to allocate input device\n");
+ return -ENOMEM;
}
keypad->board = plat;
@@ -410,7 +417,7 @@ static int tc3589x_keypad_probe(struct platform_device *pdev)
NULL, input);
if (error) {
dev_err(&pdev->dev, "Failed to build keymap\n");
- goto err_free_mem;
+ return error;
}
keypad->keymap = input->keycode;
@@ -421,20 +428,23 @@ static int tc3589x_keypad_probe(struct platform_device *pdev)
input_set_drvdata(input, keypad);
- error = request_threaded_irq(irq, NULL, tc3589x_keypad_irq,
- plat->irqtype | IRQF_ONESHOT,
- "tc3589x-keypad", keypad);
- if (error < 0) {
+ tc3589x_keypad_disable(keypad);
+
+ error = devm_request_threaded_irq(&pdev->dev, irq,
+ NULL, tc3589x_keypad_irq,
+ plat->irqtype | IRQF_ONESHOT,
+ "tc3589x-keypad", keypad);
+ if (error) {
dev_err(&pdev->dev,
"Could not allocate irq %d,error %d\n",
irq, error);
- goto err_free_mem;
+ return error;
}
error = input_register_device(input);
if (error) {
dev_err(&pdev->dev, "Could not register input device\n");
- goto err_free_irq;
+ return error;
}
/* let platform decide if keypad is a wakeup source or not */
@@ -444,30 +454,6 @@ static int tc3589x_keypad_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, keypad);
return 0;
-
-err_free_irq:
- free_irq(irq, keypad);
-err_free_mem:
- input_free_device(input);
- kfree(keypad);
- return error;
-}
-
-static int tc3589x_keypad_remove(struct platform_device *pdev)
-{
- struct tc_keypad *keypad = platform_get_drvdata(pdev);
- int irq = platform_get_irq(pdev, 0);
-
- if (!keypad->keypad_stopped)
- tc3589x_keypad_disable(keypad);
-
- free_irq(irq, keypad);
-
- input_unregister_device(keypad->input);
-
- kfree(keypad);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -518,7 +504,6 @@ static struct platform_driver tc3589x_keypad_driver = {
.pm = &tc3589x_keypad_dev_pm_ops,
},
.probe = tc3589x_keypad_probe,
- .remove = tc3589x_keypad_remove,
};
module_platform_driver(tc3589x_keypad_driver);
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 4e491c1762cf..9002298698fc 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -404,7 +404,6 @@ MODULE_ALIAS("i2c:tca8418");
static struct i2c_driver tca8418_keypad_driver = {
.driver = {
.name = TCA8418_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(tca8418_dt_ids),
},
.probe = tca8418_keypad_probe,
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index d4f0a817e858..906dd1b25e41 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -167,28 +167,16 @@ config INPUT_M68K_BEEP
depends on M68K
config INPUT_MAX77693_HAPTIC
- tristate "MAXIM MAX77693 haptic controller support"
- depends on MFD_MAX77693 && PWM
+ tristate "MAXIM MAX77693/MAX77843 haptic controller support"
+ depends on (MFD_MAX77693 || MFD_MAX77843) && PWM
select INPUT_FF_MEMLESS
help
This option enables support for the haptic controller on
- MAXIM MAX77693 chip.
+ MAXIM MAX77693 and MAX77843 chips.
To compile this driver as module, choose M here: the
module will be called max77693-haptic.
-config INPUT_MAX77843_HAPTIC
- tristate "MAXIM MAX77843 haptic controller support"
- depends on MFD_MAX77843 && REGULATOR
- select INPUT_FF_MEMLESS
- help
- This option enables support for the haptic controller on
- MAXIM MAX77843 chip. The driver supports ff-memless interface
- from input framework.
-
- To compile this driver as module, choose M here: the
- module will be called max77843-haptic.
-
config INPUT_MAX8925_ONKEY
tristate "MAX8925 ONKEY support"
depends on MFD_MAX8925
@@ -259,7 +247,7 @@ config INPUT_APANEL
config INPUT_GP2A
tristate "Sharp GP2AP002A00F I2C Proximity/Opto sensor driver"
depends on I2C
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
help
Say Y here if you have a Sharp GP2AP002A00F proximity/als combo-chip
hooked to an I2C bus.
@@ -269,7 +257,7 @@ config INPUT_GP2A
config INPUT_GPIO_BEEPER
tristate "Generic GPIO Beeper support"
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
help
Say Y here if you have a beeper connected to a GPIO pin.
@@ -278,7 +266,7 @@ config INPUT_GPIO_BEEPER
config INPUT_GPIO_TILT_POLLED
tristate "Polled GPIO tilt switch"
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
select INPUT_POLLDEV
help
This driver implements support for tilt switches connected
@@ -569,7 +557,7 @@ config INPUT_PWM_BEEPER
config INPUT_GPIO_ROTARY_ENCODER
tristate "Rotary encoders connected to GPIO pins"
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
help
Say Y here to add support for rotary encoders connected to GPIO lines.
Check file:Documentation/input/rotary-encoder.txt for more
@@ -776,7 +764,8 @@ config INPUT_SOC_BUTTON_ARRAY
config INPUT_DRV260X_HAPTICS
tristate "TI DRV260X haptics support"
- depends on INPUT && I2C && GPIOLIB
+ depends on INPUT && I2C
+ depends on GPIOLIB || COMPILE_TEST
select INPUT_FF_MEMLESS
select REGMAP_I2C
help
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 53df07dcc23c..0357a088c6a9 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -41,7 +41,6 @@ obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
obj-$(CONFIG_INPUT_MAX77693_HAPTIC) += max77693-haptic.o
-obj-$(CONFIG_INPUT_MAX77843_HAPTIC) += max77843-haptic.o
obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
obj-$(CONFIG_INPUT_MAX8997_HAPTIC) += max8997_haptic.o
obj-$(CONFIG_INPUT_MC13783_PWRBUTTON) += mc13783-pwrbutton.o
diff --git a/drivers/input/misc/ab8500-ponkey.c b/drivers/input/misc/ab8500-ponkey.c
index 1f7e15ca5fbe..4f5ef5bb535b 100644
--- a/drivers/input/misc/ab8500-ponkey.c
+++ b/drivers/input/misc/ab8500-ponkey.c
@@ -118,6 +118,7 @@ static const struct of_device_id ab8500_ponkey_match[] = {
{ .compatible = "stericsson,ab8500-ponkey", },
{}
};
+MODULE_DEVICE_TABLE(of, ab8500_ponkey_match);
#endif
static struct platform_driver ab8500_ponkey_driver = {
diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c
index bdb5d03b296e..a8b0a2eec344 100644
--- a/drivers/input/misc/adxl34x-i2c.c
+++ b/drivers/input/misc/adxl34x-i2c.c
@@ -158,7 +158,6 @@ MODULE_DEVICE_TABLE(of, adxl34x_of_id);
static struct i2c_driver adxl34x_driver = {
.driver = {
.name = "adxl34x",
- .owner = THIS_MODULE,
.pm = &adxl34x_i2c_pm,
.of_match_table = of_match_ptr(adxl34x_of_id),
},
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index 4dbbed74c9e4..4bf678541496 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -170,8 +170,8 @@ static int arizona_haptics_probe(struct platform_device *pdev)
INIT_WORK(&haptics->work, arizona_haptics_work);
- haptics->input_dev = input_allocate_device();
- if (haptics->input_dev == NULL) {
+ haptics->input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!haptics->input_dev) {
dev_err(arizona->dev, "Failed to allocate input device\n");
return -ENOMEM;
}
@@ -188,41 +188,23 @@ static int arizona_haptics_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(arizona->dev, "input_ff_create_memless() failed: %d\n",
ret);
- goto err_ialloc;
+ return ret;
}
ret = input_register_device(haptics->input_dev);
if (ret < 0) {
dev_err(arizona->dev, "couldn't register input device: %d\n",
ret);
- goto err_iff;
+ return ret;
}
platform_set_drvdata(pdev, haptics);
return 0;
-
-err_iff:
- if (haptics->input_dev)
- input_ff_destroy(haptics->input_dev);
-err_ialloc:
- input_free_device(haptics->input_dev);
-
- return ret;
-}
-
-static int arizona_haptics_remove(struct platform_device *pdev)
-{
- struct arizona_haptics *haptics = platform_get_drvdata(pdev);
-
- input_unregister_device(haptics->input_dev);
-
- return 0;
}
static struct platform_driver arizona_haptics_driver = {
.probe = arizona_haptics_probe,
- .remove = arizona_haptics_remove,
.driver = {
.name = "arizona-haptics",
},
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index 10e140af5aac..1ac898db303a 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -292,3 +292,4 @@ module_platform_driver(axp20x_pek_driver);
MODULE_DESCRIPTION("axp20x Power Button");
MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:axp20x-pek");
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index b36831c828d3..1d0e61d7c131 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -333,10 +333,9 @@ static void bma150_report_xyz(struct bma150_data *bma150)
y = ((0xc0 & data[2]) >> 6) | (data[3] << 2);
z = ((0xc0 & data[4]) >> 6) | (data[5] << 2);
- /* sign extension */
- x = (s16) (x << 6) >> 6;
- y = (s16) (y << 6) >> 6;
- z = (s16) (z << 6) >> 6;
+ x = sign_extend32(x, 9);
+ y = sign_extend32(y, 9);
+ z = sign_extend32(z, 9);
input_report_abs(bma150->input, ABS_X, x);
input_report_abs(bma150->input, ABS_Y, y);
@@ -654,7 +653,6 @@ MODULE_DEVICE_TABLE(i2c, bma150_id);
static struct i2c_driver bma150_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = BMA150_DRIVER,
.pm = &bma150_pm,
},
diff --git a/drivers/input/misc/cma3000_d0x_i2c.c b/drivers/input/misc/cma3000_d0x_i2c.c
index 4fdef98ceb56..c7021916b64b 100644
--- a/drivers/input/misc/cma3000_d0x_i2c.c
+++ b/drivers/input/misc/cma3000_d0x_i2c.c
@@ -118,7 +118,6 @@ static struct i2c_driver cma3000_i2c_driver = {
.id_table = cma3000_i2c_id,
.driver = {
.name = "cma3000_i2c_accl",
- .owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &cma3000_i2c_pm_ops,
#endif
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index e5d60ecd29a4..2adfd86c869a 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -204,7 +204,7 @@ struct drv260x_data {
int overdrive_voltage;
};
-static struct reg_default drv260x_reg_defs[] = {
+static const struct reg_default drv260x_reg_defs[] = {
{ DRV260X_STATUS, 0xe0 },
{ DRV260X_MODE, 0x40 },
{ DRV260X_RT_PB_IN, 0x00 },
@@ -313,14 +313,14 @@ static void drv260x_close(struct input_dev *input)
gpiod_set_value(haptics->enable_gpio, 0);
}
-static const struct reg_default drv260x_lra_cal_regs[] = {
+static const struct reg_sequence drv260x_lra_cal_regs[] = {
{ DRV260X_MODE, DRV260X_AUTO_CAL },
{ DRV260X_CTRL3, DRV260X_NG_THRESH_2 },
{ DRV260X_FEEDBACK_CTRL, DRV260X_FB_REG_LRA_MODE |
DRV260X_BRAKE_FACTOR_4X | DRV260X_LOOP_GAIN_HIGH },
};
-static const struct reg_default drv260x_lra_init_regs[] = {
+static const struct reg_sequence drv260x_lra_init_regs[] = {
{ DRV260X_MODE, DRV260X_RT_PLAYBACK },
{ DRV260X_A_TO_V_CTRL, DRV260X_AUDIO_HAPTICS_PEAK_20MS |
DRV260X_AUDIO_HAPTICS_FILTER_125HZ },
@@ -337,7 +337,7 @@ static const struct reg_default drv260x_lra_init_regs[] = {
{ DRV260X_CTRL4, DRV260X_AUTOCAL_TIME_500MS },
};
-static const struct reg_default drv260x_erm_cal_regs[] = {
+static const struct reg_sequence drv260x_erm_cal_regs[] = {
{ DRV260X_MODE, DRV260X_AUTO_CAL },
{ DRV260X_A_TO_V_MIN_INPUT, DRV260X_AUDIO_HAPTICS_MIN_IN_VOLT },
{ DRV260X_A_TO_V_MAX_INPUT, DRV260X_AUDIO_HAPTICS_MAX_IN_VOLT },
@@ -720,7 +720,6 @@ static struct i2c_driver drv260x_driver = {
.probe = drv260x_probe,
.driver = {
.name = "drv260x-haptics",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(drv260x_of_match),
.pm = &drv260x_pm_ops,
},
diff --git a/drivers/input/misc/drv2665.c b/drivers/input/misc/drv2665.c
index 0afaa33de07d..ef9bc12b3be3 100644
--- a/drivers/input/misc/drv2665.c
+++ b/drivers/input/misc/drv2665.c
@@ -74,7 +74,7 @@ static const u8 drv2665_sine_wave_form[] = {
0x9b, 0x9f, 0xa5, 0xad, 0xb8, 0xc4, 0xd2, 0xe0, 0xf0, 0x00,
};
-static struct reg_default drv2665_reg_defs[] = {
+static const struct reg_default drv2665_reg_defs[] = {
{ DRV2665_STATUS, 0x02 },
{ DRV2665_CTRL_1, 0x28 },
{ DRV2665_CTRL_2, 0x40 },
@@ -132,7 +132,7 @@ static void drv2665_close(struct input_dev *input)
"Failed to enter standby mode: %d\n", error);
}
-static const struct reg_default drv2665_init_regs[] = {
+static const struct reg_sequence drv2665_init_regs[] = {
{ DRV2665_CTRL_2, 0 | DRV2665_10_MS_IDLE_TOUT },
{ DRV2665_CTRL_1, DRV2665_25_VPP_GAIN },
};
@@ -309,7 +309,6 @@ static struct i2c_driver drv2665_driver = {
.probe = drv2665_probe,
.driver = {
.name = "drv2665-haptics",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(drv2665_of_match),
.pm = &drv2665_pm_ops,
},
diff --git a/drivers/input/misc/drv2667.c b/drivers/input/misc/drv2667.c
index fc0fddf0896a..d5ba7481328c 100644
--- a/drivers/input/misc/drv2667.c
+++ b/drivers/input/misc/drv2667.c
@@ -116,7 +116,7 @@ struct drv2667_data {
u32 frequency;
};
-static struct reg_default drv2667_reg_defs[] = {
+static const struct reg_default drv2667_reg_defs[] = {
{ DRV2667_STATUS, 0x02 },
{ DRV2667_CTRL_1, 0x28 },
{ DRV2667_CTRL_2, 0x40 },
@@ -262,14 +262,14 @@ static void drv2667_close(struct input_dev *input)
"Failed to enter standby mode: %d\n", error);
}
-static const struct reg_default drv2667_init_regs[] = {
+static const struct reg_sequence drv2667_init_regs[] = {
{ DRV2667_CTRL_2, 0 },
{ DRV2667_CTRL_1, DRV2667_25_VPP_GAIN },
{ DRV2667_WV_SEQ_0, 1 },
{ DRV2667_WV_SEQ_1, 0 }
};
-static const struct reg_default drv2667_page1_init[] = {
+static const struct reg_sequence drv2667_page1_init[] = {
{ DRV2667_RAM_HDR_SZ, 0x05 },
{ DRV2667_RAM_START_HI, 0x80 },
{ DRV2667_RAM_START_LO, 0x06 },
@@ -484,7 +484,6 @@ static struct i2c_driver drv2667_driver = {
.probe = drv2667_probe,
.driver = {
.name = "drv2667-haptics",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(drv2667_of_match),
.pm = &drv2667_pm_ops,
},
diff --git a/drivers/input/misc/gp2ap002a00f.c b/drivers/input/misc/gp2ap002a00f.c
index 0ac176d66a6f..3bfdfcc20485 100644
--- a/drivers/input/misc/gp2ap002a00f.c
+++ b/drivers/input/misc/gp2ap002a00f.c
@@ -267,11 +267,11 @@ static const struct i2c_device_id gp2a_i2c_id[] = {
{ GP2A_I2C_NAME, 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, gp2a_i2c_id);
static struct i2c_driver gp2a_i2c_driver = {
.driver = {
.name = GP2A_I2C_NAME,
- .owner = THIS_MODULE,
.pm = &gp2a_pm,
},
.probe = gp2a_probe,
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
index 6e29349da537..e058d711256a 100644
--- a/drivers/input/misc/kxtj9.c
+++ b/drivers/input/misc/kxtj9.c
@@ -658,7 +658,6 @@ MODULE_DEVICE_TABLE(i2c, kxtj9_id);
static struct i2c_driver kxtj9_driver = {
.driver = {
.name = NAME,
- .owner = THIS_MODULE,
.pm = &kxtj9_pm_ops,
},
.probe = kxtj9_probe,
diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c
index 39e930c10ebb..6d96bff32a0e 100644
--- a/drivers/input/misc/max77693-haptic.c
+++ b/drivers/input/misc/max77693-haptic.c
@@ -1,8 +1,9 @@
/*
- * MAXIM MAX77693 Haptic device driver
+ * MAXIM MAX77693/MAX77843 Haptic device driver
*
- * Copyright (C) 2014 Samsung Electronics
+ * Copyright (C) 2014,2015 Samsung Electronics
* Jaewon Kim <jaewon02.kim@samsung.com>
+ * Krzysztof Kozlowski <k.kozlowski@samsung.com>
*
* This program is not provided / owned by Maxim Integrated Products.
*
@@ -24,7 +25,9 @@
#include <linux/workqueue.h>
#include <linux/regulator/consumer.h>
#include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77693-private.h>
+#include <linux/mfd/max77843-private.h>
#define MAX_MAGNITUDE_SHIFT 16
@@ -46,6 +49,8 @@ enum max77693_haptic_pwm_divisor {
};
struct max77693_haptic {
+ enum max77693_types dev_type;
+
struct regmap *regmap_pmic;
struct regmap *regmap_haptic;
struct device *dev;
@@ -59,7 +64,6 @@ struct max77693_haptic {
unsigned int pwm_duty;
enum max77693_haptic_motor_type type;
enum max77693_haptic_pulse_mode mode;
- enum max77693_haptic_pwm_divisor pwm_divisor;
struct work_struct work;
};
@@ -78,19 +82,52 @@ static int max77693_haptic_set_duty_cycle(struct max77693_haptic *haptic)
return 0;
}
+static int max77843_haptic_bias(struct max77693_haptic *haptic, bool on)
+{
+ int error;
+
+ if (haptic->dev_type != TYPE_MAX77843)
+ return 0;
+
+ error = regmap_update_bits(haptic->regmap_haptic,
+ MAX77843_SYS_REG_MAINCTRL1,
+ MAX77843_MAINCTRL1_BIASEN_MASK,
+ on << MAINCTRL1_BIASEN_SHIFT);
+ if (error) {
+ dev_err(haptic->dev, "failed to %s bias: %d\n",
+ on ? "enable" : "disable", error);
+ return error;
+ }
+
+ return 0;
+}
+
static int max77693_haptic_configure(struct max77693_haptic *haptic,
bool enable)
{
- unsigned int value;
+ unsigned int value, config_reg;
int error;
- value = ((haptic->type << MAX77693_CONFIG2_MODE) |
- (enable << MAX77693_CONFIG2_MEN) |
- (haptic->mode << MAX77693_CONFIG2_HTYP) |
- (haptic->pwm_divisor));
+ switch (haptic->dev_type) {
+ case TYPE_MAX77693:
+ value = ((haptic->type << MAX77693_CONFIG2_MODE) |
+ (enable << MAX77693_CONFIG2_MEN) |
+ (haptic->mode << MAX77693_CONFIG2_HTYP) |
+ MAX77693_HAPTIC_PWM_DIVISOR_128);
+ config_reg = MAX77693_HAPTIC_REG_CONFIG2;
+ break;
+ case TYPE_MAX77843:
+ value = (haptic->type << MCONFIG_MODE_SHIFT) |
+ (enable << MCONFIG_MEN_SHIFT) |
+ MAX77693_HAPTIC_PWM_DIVISOR_128;
+ config_reg = MAX77843_HAP_REG_MCONFIG;
+ break;
+ default:
+ return -EINVAL;
+ }
error = regmap_write(haptic->regmap_haptic,
- MAX77693_HAPTIC_REG_CONFIG2, value);
+ config_reg, value);
if (error) {
dev_err(haptic->dev,
"failed to update haptic config: %d\n", error);
@@ -104,6 +141,9 @@ static int max77693_haptic_lowsys(struct max77693_haptic *haptic, bool enable)
{
int error;
+ if (haptic->dev_type != TYPE_MAX77693)
+ return 0;
+
error = regmap_update_bits(haptic->regmap_pmic,
MAX77693_PMIC_REG_LSCNFG,
MAX77693_PMIC_LOW_SYS_MASK,
@@ -219,6 +259,10 @@ static int max77693_haptic_open(struct input_dev *dev)
struct max77693_haptic *haptic = input_get_drvdata(dev);
int error;
+ error = max77843_haptic_bias(haptic, true);
+ if (error)
+ return error;
+
error = regulator_enable(haptic->motor_reg);
if (error) {
dev_err(haptic->dev,
@@ -241,6 +285,8 @@ static void max77693_haptic_close(struct input_dev *dev)
if (error)
dev_err(haptic->dev,
"failed to disable regulator: %d\n", error);
+
+ max77843_haptic_bias(haptic, false);
}
static int max77693_haptic_probe(struct platform_device *pdev)
@@ -254,13 +300,26 @@ static int max77693_haptic_probe(struct platform_device *pdev)
return -ENOMEM;
haptic->regmap_pmic = max77693->regmap;
- haptic->regmap_haptic = max77693->regmap_haptic;
haptic->dev = &pdev->dev;
haptic->type = MAX77693_HAPTIC_LRA;
haptic->mode = MAX77693_HAPTIC_EXTERNAL_MODE;
- haptic->pwm_divisor = MAX77693_HAPTIC_PWM_DIVISOR_128;
haptic->suspend_state = false;
+ /* Variant-specific init */
+ haptic->dev_type = platform_get_device_id(pdev)->driver_data;
+ switch (haptic->dev_type) {
+ case TYPE_MAX77693:
+ haptic->regmap_haptic = max77693->regmap_haptic;
+ break;
+ case TYPE_MAX77843:
+ haptic->regmap_haptic = max77693->regmap;
+ break;
+ default:
+ dev_err(&pdev->dev, "unsupported device type: %u\n",
+ haptic->dev_type);
+ return -EINVAL;
+ }
+
INIT_WORK(&haptic->work, max77693_haptic_play_work);
/* Get pwm and regulatot for haptic device */
@@ -338,16 +397,25 @@ static int __maybe_unused max77693_haptic_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(max77693_haptic_pm_ops,
max77693_haptic_suspend, max77693_haptic_resume);
+static const struct platform_device_id max77693_haptic_id[] = {
+ { "max77693-haptic", TYPE_MAX77693 },
+ { "max77843-haptic", TYPE_MAX77843 },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, max77693_haptic_id);
+
static struct platform_driver max77693_haptic_driver = {
.driver = {
.name = "max77693-haptic",
.pm = &max77693_haptic_pm_ops,
},
.probe = max77693_haptic_probe,
+ .id_table = max77693_haptic_id,
};
module_platform_driver(max77693_haptic_driver);
MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
-MODULE_DESCRIPTION("MAXIM MAX77693 Haptic driver");
+MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>");
+MODULE_DESCRIPTION("MAXIM 77693/77843 Haptic driver");
MODULE_ALIAS("platform:max77693-haptic");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/max77843-haptic.c b/drivers/input/misc/max77843-haptic.c
deleted file mode 100644
index dccbb465a055..000000000000
--- a/drivers/input/misc/max77843-haptic.c
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- * MAXIM MAX77693 Haptic device driver
- *
- * Copyright (C) 2015 Samsung Electronics
- * Author: Jaewon Kim <jaewon02.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/err.h>
-#include <linux/i2c.h>
-#include <linux/init.h>
-#include <linux/input.h>
-#include <linux/mfd/max77843-private.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pwm.h>
-#include <linux/regmap.h>
-#include <linux/regulator/consumer.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-
-#define MAX_MAGNITUDE_SHIFT 16
-
-enum max77843_haptic_motor_type {
- MAX77843_HAPTIC_ERM = 0,
- MAX77843_HAPTIC_LRA,
-};
-
-enum max77843_haptic_pwm_divisor {
- MAX77843_HAPTIC_PWM_DIVISOR_32 = 0,
- MAX77843_HAPTIC_PWM_DIVISOR_64,
- MAX77843_HAPTIC_PWM_DIVISOR_128,
- MAX77843_HAPTIC_PWM_DIVISOR_256,
-};
-
-struct max77843_haptic {
- struct regmap *regmap_haptic;
- struct device *dev;
- struct input_dev *input_dev;
- struct pwm_device *pwm_dev;
- struct regulator *motor_reg;
- struct work_struct work;
- struct mutex mutex;
-
- unsigned int magnitude;
- unsigned int pwm_duty;
-
- bool active;
- bool suspended;
-
- enum max77843_haptic_motor_type type;
- enum max77843_haptic_pwm_divisor pwm_divisor;
-};
-
-static int max77843_haptic_set_duty_cycle(struct max77843_haptic *haptic)
-{
- int delta = (haptic->pwm_dev->period + haptic->pwm_duty) / 2;
- int error;
-
- error = pwm_config(haptic->pwm_dev, delta, haptic->pwm_dev->period);
- if (error) {
- dev_err(haptic->dev, "failed to configure pwm: %d\n", error);
- return error;
- }
-
- return 0;
-}
-
-static int max77843_haptic_bias(struct max77843_haptic *haptic, bool on)
-{
- int error;
-
- error = regmap_update_bits(haptic->regmap_haptic,
- MAX77843_SYS_REG_MAINCTRL1,
- MAX77843_MAINCTRL1_BIASEN_MASK,
- on << MAINCTRL1_BIASEN_SHIFT);
- if (error) {
- dev_err(haptic->dev, "failed to %s bias: %d\n",
- on ? "enable" : "disable", error);
- return error;
- }
-
- return 0;
-}
-
-static int max77843_haptic_config(struct max77843_haptic *haptic, bool enable)
-{
- unsigned int value;
- int error;
-
- value = (haptic->type << MCONFIG_MODE_SHIFT) |
- (enable << MCONFIG_MEN_SHIFT) |
- (haptic->pwm_divisor << MCONFIG_PDIV_SHIFT);
-
- error = regmap_write(haptic->regmap_haptic,
- MAX77843_HAP_REG_MCONFIG, value);
- if (error) {
- dev_err(haptic->dev,
- "failed to update haptic config: %d\n", error);
- return error;
- }
-
- return 0;
-}
-
-static int max77843_haptic_enable(struct max77843_haptic *haptic)
-{
- int error;
-
- if (haptic->active)
- return 0;
-
- error = pwm_enable(haptic->pwm_dev);
- if (error) {
- dev_err(haptic->dev,
- "failed to enable pwm device: %d\n", error);
- return error;
- }
-
- error = max77843_haptic_config(haptic, true);
- if (error)
- goto err_config;
-
- haptic->active = true;
-
- return 0;
-
-err_config:
- pwm_disable(haptic->pwm_dev);
-
- return error;
-}
-
-static int max77843_haptic_disable(struct max77843_haptic *haptic)
-{
- int error;
-
- if (!haptic->active)
- return 0;
-
- error = max77843_haptic_config(haptic, false);
- if (error)
- return error;
-
- pwm_disable(haptic->pwm_dev);
-
- haptic->active = false;
-
- return 0;
-}
-
-static void max77843_haptic_play_work(struct work_struct *work)
-{
- struct max77843_haptic *haptic =
- container_of(work, struct max77843_haptic, work);
- int error;
-
- mutex_lock(&haptic->mutex);
-
- if (haptic->suspended)
- goto out_unlock;
-
- if (haptic->magnitude) {
- error = max77843_haptic_set_duty_cycle(haptic);
- if (error) {
- dev_err(haptic->dev,
- "failed to set duty cycle: %d\n", error);
- goto out_unlock;
- }
-
- error = max77843_haptic_enable(haptic);
- if (error)
- dev_err(haptic->dev,
- "cannot enable haptic: %d\n", error);
- } else {
- error = max77843_haptic_disable(haptic);
- if (error)
- dev_err(haptic->dev,
- "cannot disable haptic: %d\n", error);
- }
-
-out_unlock:
- mutex_unlock(&haptic->mutex);
-}
-
-static int max77843_haptic_play_effect(struct input_dev *dev, void *data,
- struct ff_effect *effect)
-{
- struct max77843_haptic *haptic = input_get_drvdata(dev);
- u64 period_mag_multi;
-
- haptic->magnitude = effect->u.rumble.strong_magnitude;
- if (!haptic->magnitude)
- haptic->magnitude = effect->u.rumble.weak_magnitude;
-
- period_mag_multi = (u64)haptic->pwm_dev->period * haptic->magnitude;
- haptic->pwm_duty = (unsigned int)(period_mag_multi >>
- MAX_MAGNITUDE_SHIFT);
-
- schedule_work(&haptic->work);
-
- return 0;
-}
-
-static int max77843_haptic_open(struct input_dev *dev)
-{
- struct max77843_haptic *haptic = input_get_drvdata(dev);
- int error;
-
- error = max77843_haptic_bias(haptic, true);
- if (error)
- return error;
-
- error = regulator_enable(haptic->motor_reg);
- if (error) {
- dev_err(haptic->dev,
- "failed to enable regulator: %d\n", error);
- return error;
- }
-
- return 0;
-}
-
-static void max77843_haptic_close(struct input_dev *dev)
-{
- struct max77843_haptic *haptic = input_get_drvdata(dev);
- int error;
-
- cancel_work_sync(&haptic->work);
- max77843_haptic_disable(haptic);
-
- error = regulator_disable(haptic->motor_reg);
- if (error)
- dev_err(haptic->dev,
- "failed to disable regulator: %d\n", error);
-
- max77843_haptic_bias(haptic, false);
-}
-
-static int max77843_haptic_probe(struct platform_device *pdev)
-{
- struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent);
- struct max77843_haptic *haptic;
- int error;
-
- haptic = devm_kzalloc(&pdev->dev, sizeof(*haptic), GFP_KERNEL);
- if (!haptic)
- return -ENOMEM;
-
- haptic->regmap_haptic = max77843->regmap;
- haptic->dev = &pdev->dev;
- haptic->type = MAX77843_HAPTIC_LRA;
- haptic->pwm_divisor = MAX77843_HAPTIC_PWM_DIVISOR_128;
-
- INIT_WORK(&haptic->work, max77843_haptic_play_work);
- mutex_init(&haptic->mutex);
-
- haptic->pwm_dev = devm_pwm_get(&pdev->dev, NULL);
- if (IS_ERR(haptic->pwm_dev)) {
- dev_err(&pdev->dev, "failed to get pwm device\n");
- return PTR_ERR(haptic->pwm_dev);
- }
-
- haptic->motor_reg = devm_regulator_get_exclusive(&pdev->dev, "haptic");
- if (IS_ERR(haptic->motor_reg)) {
- dev_err(&pdev->dev, "failed to get regulator\n");
- return PTR_ERR(haptic->motor_reg);
- }
-
- haptic->input_dev = devm_input_allocate_device(&pdev->dev);
- if (!haptic->input_dev) {
- dev_err(&pdev->dev, "failed to allocate input device\n");
- return -ENOMEM;
- }
-
- haptic->input_dev->name = "max77843-haptic";
- haptic->input_dev->id.version = 1;
- haptic->input_dev->dev.parent = &pdev->dev;
- haptic->input_dev->open = max77843_haptic_open;
- haptic->input_dev->close = max77843_haptic_close;
- input_set_drvdata(haptic->input_dev, haptic);
- input_set_capability(haptic->input_dev, EV_FF, FF_RUMBLE);
-
- error = input_ff_create_memless(haptic->input_dev, NULL,
- max77843_haptic_play_effect);
- if (error) {
- dev_err(&pdev->dev, "failed to create force-feedback\n");
- return error;
- }
-
- error = input_register_device(haptic->input_dev);
- if (error) {
- dev_err(&pdev->dev, "failed to register input device\n");
- return error;
- }
-
- platform_set_drvdata(pdev, haptic);
-
- return 0;
-}
-
-static int __maybe_unused max77843_haptic_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct max77843_haptic *haptic = platform_get_drvdata(pdev);
- int error;
-
- error = mutex_lock_interruptible(&haptic->mutex);
- if (error)
- return error;
-
- max77843_haptic_disable(haptic);
-
- haptic->suspended = true;
-
- mutex_unlock(&haptic->mutex);
-
- return 0;
-}
-
-static int __maybe_unused max77843_haptic_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct max77843_haptic *haptic = platform_get_drvdata(pdev);
- unsigned int magnitude;
-
- mutex_lock(&haptic->mutex);
-
- haptic->suspended = false;
-
- magnitude = ACCESS_ONCE(haptic->magnitude);
- if (magnitude)
- max77843_haptic_enable(haptic);
-
- mutex_unlock(&haptic->mutex);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(max77843_haptic_pm_ops,
- max77843_haptic_suspend, max77843_haptic_resume);
-
-static struct platform_driver max77843_haptic_driver = {
- .driver = {
- .name = "max77843-haptic",
- .pm = &max77843_haptic_pm_ops,
- },
- .probe = max77843_haptic_probe,
-};
-module_platform_driver(max77843_haptic_driver);
-
-MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
-MODULE_DESCRIPTION("MAXIM MAX77843 Haptic driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index d0f687281339..a806ba3818f7 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -394,7 +394,7 @@ static const struct platform_device_id max8997_haptic_id[] = {
{ "max8997-haptic", 0 },
{ },
};
-MODULE_DEVICE_TABLE(i2c, max8997_haptic_id);
+MODULE_DEVICE_TABLE(platform, max8997_haptic_id);
static struct platform_driver max8997_haptic_driver = {
.driver = {
@@ -407,7 +407,6 @@ static struct platform_driver max8997_haptic_driver = {
};
module_platform_driver(max8997_haptic_driver);
-MODULE_ALIAS("platform:max8997-haptic");
MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
MODULE_DESCRIPTION("max8997_haptic driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index 5e5051351c3a..f088db31cfc7 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -466,7 +466,6 @@ MODULE_DEVICE_TABLE(of, mpu3050_of_match);
static struct i2c_driver mpu3050_i2c_driver = {
.driver = {
.name = "mpu3050",
- .owner = THIS_MODULE,
.pm = &mpu3050_pm,
.of_match_table = mpu3050_of_match,
},
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index 97f711a7bd20..4abdf1efb3e0 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -208,7 +208,6 @@ MODULE_DEVICE_TABLE(i2c, pcf8574_kp_id);
static struct i2c_driver pcf8574_kp_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &pcf8574_kp_pm_ops,
#endif
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index c4ca20e63221..3f02e0e03d12 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -20,17 +20,72 @@
#include <linux/regmap.h>
#include <linux/log2.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#define PON_CNTL_1 0x1C
#define PON_CNTL_PULL_UP BIT(7)
#define PON_CNTL_TRIG_DELAY_MASK (0x7)
+#define PON_CNTL_1_PULL_UP_EN 0xe0
+#define PON_CNTL_1_USB_PWR_EN 0x10
+#define PON_CNTL_1_WD_EN_RESET 0x08
+
+#define PM8058_SLEEP_CTRL 0x02b
+#define PM8921_SLEEP_CTRL 0x10a
+
+#define SLEEP_CTRL_SMPL_EN_RESET 0x04
+
+/* Regulator master enable addresses */
+#define REG_PM8058_VREG_EN_MSM 0x018
+#define REG_PM8058_VREG_EN_GRP_5_4 0x1c8
+
+/* Regulator control registers for shutdown/reset */
+#define PM8058_S0_CTRL 0x004
+#define PM8058_S1_CTRL 0x005
+#define PM8058_S3_CTRL 0x111
+#define PM8058_L21_CTRL 0x120
+#define PM8058_L22_CTRL 0x121
+
+#define PM8058_REGULATOR_ENABLE_MASK 0x80
+#define PM8058_REGULATOR_ENABLE 0x80
+#define PM8058_REGULATOR_DISABLE 0x00
+#define PM8058_REGULATOR_PULL_DOWN_MASK 0x40
+#define PM8058_REGULATOR_PULL_DOWN_EN 0x40
+
+/* Buck CTRL register */
+#define PM8058_SMPS_LEGACY_VREF_SEL 0x20
+#define PM8058_SMPS_LEGACY_VPROG_MASK 0x1f
+#define PM8058_SMPS_ADVANCED_BAND_MASK 0xC0
+#define PM8058_SMPS_ADVANCED_BAND_SHIFT 6
+#define PM8058_SMPS_ADVANCED_VPROG_MASK 0x3f
+
+/* Buck TEST2 registers for shutdown/reset */
+#define PM8058_S0_TEST2 0x084
+#define PM8058_S1_TEST2 0x085
+#define PM8058_S3_TEST2 0x11a
+
+#define PM8058_REGULATOR_BANK_WRITE 0x80
+#define PM8058_REGULATOR_BANK_MASK 0x70
+#define PM8058_REGULATOR_BANK_SHIFT 4
+#define PM8058_REGULATOR_BANK_SEL(n) ((n) << PM8058_REGULATOR_BANK_SHIFT)
+
+/* Buck TEST2 register bank 1 */
+#define PM8058_SMPS_LEGACY_VLOW_SEL 0x01
+
+/* Buck TEST2 register bank 7 */
+#define PM8058_SMPS_ADVANCED_MODE_MASK 0x02
+#define PM8058_SMPS_ADVANCED_MODE 0x02
+#define PM8058_SMPS_LEGACY_MODE 0x00
/**
* struct pmic8xxx_pwrkey - pmic8xxx pwrkey information
* @key_press_irq: key press irq number
+ * @regmap: device regmap
+ * @shutdown_fn: shutdown configuration function
*/
struct pmic8xxx_pwrkey {
int key_press_irq;
+ struct regmap *regmap;
+ int (*shutdown_fn)(struct pmic8xxx_pwrkey *, bool);
};
static irqreturn_t pwrkey_press_irq(int irq, void *_pwr)
@@ -76,6 +131,212 @@ static int __maybe_unused pmic8xxx_pwrkey_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pm8xxx_pwr_key_pm_ops,
pmic8xxx_pwrkey_suspend, pmic8xxx_pwrkey_resume);
+static void pmic8xxx_pwrkey_shutdown(struct platform_device *pdev)
+{
+ struct pmic8xxx_pwrkey *pwrkey = platform_get_drvdata(pdev);
+ int error;
+ u8 mask, val;
+ bool reset = system_state == SYSTEM_RESTART;
+
+ if (pwrkey->shutdown_fn) {
+ error = pwrkey->shutdown_fn(pwrkey, reset);
+ if (error)
+ return;
+ }
+
+ /*
+ * Select action to perform (reset or shutdown) when PS_HOLD goes low.
+ * Also ensure that KPD, CBL0, and CBL1 pull ups are enabled and that
+ * USB charging is enabled.
+ */
+ mask = PON_CNTL_1_PULL_UP_EN | PON_CNTL_1_USB_PWR_EN;
+ mask |= PON_CNTL_1_WD_EN_RESET;
+ val = mask;
+ if (!reset)
+ val &= ~PON_CNTL_1_WD_EN_RESET;
+
+ regmap_update_bits(pwrkey->regmap, PON_CNTL_1, mask, val);
+}
+
+/*
+ * Set an SMPS regulator to be disabled in its CTRL register, but enabled
+ * in the master enable register. Also set it's pull down enable bit.
+ * Take care to make sure that the output voltage doesn't change if switching
+ * from advanced mode to legacy mode.
+ */
+static int pm8058_disable_smps_locally_set_pull_down(struct regmap *regmap,
+ u16 ctrl_addr, u16 test2_addr, u16 master_enable_addr,
+ u8 master_enable_bit)
+{
+ int error;
+ u8 vref_sel, vlow_sel, band, vprog, bank;
+ unsigned int reg;
+
+ bank = PM8058_REGULATOR_BANK_SEL(7);
+ error = regmap_write(regmap, test2_addr, bank);
+ if (error)
+ return error;
+
+ error = regmap_read(regmap, test2_addr, &reg);
+ if (error)
+ return error;
+
+ reg &= PM8058_SMPS_ADVANCED_MODE_MASK;
+ /* Check if in advanced mode. */
+ if (reg == PM8058_SMPS_ADVANCED_MODE) {
+ /* Determine current output voltage. */
+ error = regmap_read(regmap, ctrl_addr, &reg);
+ if (error)
+ return error;
+
+ band = reg & PM8058_SMPS_ADVANCED_BAND_MASK;
+ band >>= PM8058_SMPS_ADVANCED_BAND_SHIFT;
+ switch (band) {
+ case 3:
+ vref_sel = 0;
+ vlow_sel = 0;
+ break;
+ case 2:
+ vref_sel = PM8058_SMPS_LEGACY_VREF_SEL;
+ vlow_sel = 0;
+ break;
+ case 1:
+ vref_sel = PM8058_SMPS_LEGACY_VREF_SEL;
+ vlow_sel = PM8058_SMPS_LEGACY_VLOW_SEL;
+ break;
+ default:
+ pr_err("%s: regulator already disabled\n", __func__);
+ return -EPERM;
+ }
+ vprog = reg & PM8058_SMPS_ADVANCED_VPROG_MASK;
+ /* Round up if fine step is in use. */
+ vprog = (vprog + 1) >> 1;
+ if (vprog > PM8058_SMPS_LEGACY_VPROG_MASK)
+ vprog = PM8058_SMPS_LEGACY_VPROG_MASK;
+
+ /* Set VLOW_SEL bit. */
+ bank = PM8058_REGULATOR_BANK_SEL(1);
+ error = regmap_write(regmap, test2_addr, bank);
+ if (error)
+ return error;
+
+ error = regmap_update_bits(regmap, test2_addr,
+ PM8058_REGULATOR_BANK_WRITE | PM8058_REGULATOR_BANK_MASK
+ | PM8058_SMPS_LEGACY_VLOW_SEL,
+ PM8058_REGULATOR_BANK_WRITE |
+ PM8058_REGULATOR_BANK_SEL(1) | vlow_sel);
+ if (error)
+ return error;
+
+ /* Switch to legacy mode */
+ bank = PM8058_REGULATOR_BANK_SEL(7);
+ error = regmap_write(regmap, test2_addr, bank);
+ if (error)
+ return error;
+
+ error = regmap_update_bits(regmap, test2_addr,
+ PM8058_REGULATOR_BANK_WRITE |
+ PM8058_REGULATOR_BANK_MASK |
+ PM8058_SMPS_ADVANCED_MODE_MASK,
+ PM8058_REGULATOR_BANK_WRITE |
+ PM8058_REGULATOR_BANK_SEL(7) |
+ PM8058_SMPS_LEGACY_MODE);
+ if (error)
+ return error;
+
+ /* Enable locally, enable pull down, keep voltage the same. */
+ error = regmap_update_bits(regmap, ctrl_addr,
+ PM8058_REGULATOR_ENABLE_MASK |
+ PM8058_REGULATOR_PULL_DOWN_MASK |
+ PM8058_SMPS_LEGACY_VREF_SEL |
+ PM8058_SMPS_LEGACY_VPROG_MASK,
+ PM8058_REGULATOR_ENABLE | PM8058_REGULATOR_PULL_DOWN_EN
+ | vref_sel | vprog);
+ if (error)
+ return error;
+ }
+
+ /* Enable in master control register. */
+ error = regmap_update_bits(regmap, master_enable_addr,
+ master_enable_bit, master_enable_bit);
+ if (error)
+ return error;
+
+ /* Disable locally and enable pull down. */
+ return regmap_update_bits(regmap, ctrl_addr,
+ PM8058_REGULATOR_ENABLE_MASK | PM8058_REGULATOR_PULL_DOWN_MASK,
+ PM8058_REGULATOR_DISABLE | PM8058_REGULATOR_PULL_DOWN_EN);
+}
+
+static int pm8058_disable_ldo_locally_set_pull_down(struct regmap *regmap,
+ u16 ctrl_addr, u16 master_enable_addr, u8 master_enable_bit)
+{
+ int error;
+
+ /* Enable LDO in master control register. */
+ error = regmap_update_bits(regmap, master_enable_addr,
+ master_enable_bit, master_enable_bit);
+ if (error)
+ return error;
+
+ /* Disable LDO in CTRL register and set pull down */
+ return regmap_update_bits(regmap, ctrl_addr,
+ PM8058_REGULATOR_ENABLE_MASK | PM8058_REGULATOR_PULL_DOWN_MASK,
+ PM8058_REGULATOR_DISABLE | PM8058_REGULATOR_PULL_DOWN_EN);
+}
+
+static int pm8058_pwrkey_shutdown(struct pmic8xxx_pwrkey *pwrkey, bool reset)
+{
+ int error;
+ struct regmap *regmap = pwrkey->regmap;
+ u8 mask, val;
+
+ /* When shutting down, enable active pulldowns on important rails. */
+ if (!reset) {
+ /* Disable SMPS's 0,1,3 locally and set pulldown enable bits. */
+ pm8058_disable_smps_locally_set_pull_down(regmap,
+ PM8058_S0_CTRL, PM8058_S0_TEST2,
+ REG_PM8058_VREG_EN_MSM, BIT(7));
+ pm8058_disable_smps_locally_set_pull_down(regmap,
+ PM8058_S1_CTRL, PM8058_S1_TEST2,
+ REG_PM8058_VREG_EN_MSM, BIT(6));
+ pm8058_disable_smps_locally_set_pull_down(regmap,
+ PM8058_S3_CTRL, PM8058_S3_TEST2,
+ REG_PM8058_VREG_EN_GRP_5_4, BIT(7) | BIT(4));
+ /* Disable LDO 21 locally and set pulldown enable bit. */
+ pm8058_disable_ldo_locally_set_pull_down(regmap,
+ PM8058_L21_CTRL, REG_PM8058_VREG_EN_GRP_5_4,
+ BIT(1));
+ }
+
+ /*
+ * Fix-up: Set regulator LDO22 to 1.225 V in high power mode. Leave its
+ * pull-down state intact. This ensures a safe shutdown.
+ */
+ error = regmap_update_bits(regmap, PM8058_L22_CTRL, 0xbf, 0x93);
+ if (error)
+ return error;
+
+ /* Enable SMPL if resetting is desired */
+ mask = SLEEP_CTRL_SMPL_EN_RESET;
+ val = 0;
+ if (reset)
+ val = mask;
+ return regmap_update_bits(regmap, PM8058_SLEEP_CTRL, mask, val);
+}
+
+static int pm8921_pwrkey_shutdown(struct pmic8xxx_pwrkey *pwrkey, bool reset)
+{
+ struct regmap *regmap = pwrkey->regmap;
+ u8 mask = SLEEP_CTRL_SMPL_EN_RESET;
+ u8 val = 0;
+
+ /* Enable SMPL if resetting is desired */
+ if (reset)
+ val = mask;
+ return regmap_update_bits(regmap, PM8921_SLEEP_CTRL, mask, val);
+}
+
static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
{
struct input_dev *pwr;
@@ -109,6 +370,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
if (!pwrkey)
return -ENOMEM;
+ pwrkey->shutdown_fn = of_device_get_match_data(&pdev->dev);
+ pwrkey->regmap = regmap;
pwrkey->key_press_irq = key_press_irq;
pwr = devm_input_allocate_device(&pdev->dev);
@@ -182,8 +445,8 @@ static int pmic8xxx_pwrkey_remove(struct platform_device *pdev)
}
static const struct of_device_id pm8xxx_pwr_key_id_table[] = {
- { .compatible = "qcom,pm8058-pwrkey" },
- { .compatible = "qcom,pm8921-pwrkey" },
+ { .compatible = "qcom,pm8058-pwrkey", .data = &pm8058_pwrkey_shutdown },
+ { .compatible = "qcom,pm8921-pwrkey", .data = &pm8921_pwrkey_shutdown },
{ }
};
MODULE_DEVICE_TABLE(of, pm8xxx_pwr_key_id_table);
@@ -191,6 +454,7 @@ MODULE_DEVICE_TABLE(of, pm8xxx_pwr_key_id_table);
static struct platform_driver pmic8xxx_pwrkey_driver = {
.probe = pmic8xxx_pwrkey_probe,
.remove = pmic8xxx_pwrkey_remove,
+ .shutdown = pmic8xxx_pwrkey_shutdown,
.driver = {
.name = "pm8xxx-pwrkey",
.pm = &pm8xxx_pwr_key_pm_ops,
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index e82edf810d1f..f2261ab54701 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -173,6 +173,7 @@ static const struct of_device_id pwm_beeper_match[] = {
{ .compatible = "pwm-beeper", },
{ },
};
+MODULE_DEVICE_TABLE(of, pwm_beeper_match);
#endif
static struct platform_driver pwm_beeper_driver = {
diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c
index e956e81cd4e6..62c5814c796b 100644
--- a/drivers/input/misc/rb532_button.c
+++ b/drivers/input/misc/rb532_button.c
@@ -7,6 +7,7 @@
#include <linux/input-polldev.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/gpio.h>
#include <asm/mach-rc32434/gpio.h>
#include <asm/mach-rc32434/rb.h>
diff --git a/drivers/input/misc/regulator-haptic.c b/drivers/input/misc/regulator-haptic.c
index 6bf3f1082f71..a804705eb04a 100644
--- a/drivers/input/misc/regulator-haptic.c
+++ b/drivers/input/misc/regulator-haptic.c
@@ -249,6 +249,7 @@ static const struct of_device_id regulator_haptic_dt_match[] = {
{ .compatible = "regulator-haptic" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, regulator_haptic_dt_match);
static struct platform_driver regulator_haptic_driver = {
.probe = regulator_haptic_probe,
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index 54116e544c96..6f997aa49183 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -253,6 +253,7 @@ static const struct of_device_id bbc_beep_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, bbc_beep_match);
static struct platform_driver bbc_beep_driver = {
.driver = {
@@ -332,6 +333,7 @@ static const struct of_device_id grover_beep_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, grover_beep_match);
static struct platform_driver grover_beep_driver = {
.driver = {
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index fc17b9592f54..10c4e3d462f1 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -183,7 +183,8 @@ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
if (pdata && pdata->coexist)
return true;
- if (of_find_node_by_name(node, "codec")) {
+ node = of_find_node_by_name(node, "codec");
+ if (node) {
of_node_put(node);
return true;
}
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 421e29e4cd81..345df9b03aed 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -320,10 +320,8 @@ static int uinput_validate_absbits(struct input_dev *dev)
* Check if absmin/absmax/absfuzz/absflat are sane.
*/
- for (cnt = 0; cnt < ABS_CNT; cnt++) {
+ for_each_set_bit(cnt, dev->absbit, ABS_CNT) {
int min, max;
- if (!test_bit(cnt, dev->absbit))
- continue;
min = input_abs_get_min(dev, cnt);
max = input_abs_get_max(dev, cnt);
@@ -416,7 +414,7 @@ static int uinput_setup_device(struct uinput_device *udev,
dev->id.product = user_dev->id.product;
dev->id.version = user_dev->id.version;
- for (i = 0; i < ABS_CNT; i++) {
+ for_each_set_bit(i, dev->absbit, ABS_CNT) {
input_abs_set_max(dev, i, user_dev->absmax[i]);
input_abs_set_min(dev, i, user_dev->absmin[i]);
input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]);
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 95599e478e19..23d0549539d4 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -232,7 +232,7 @@ static int xenkbd_connect_backend(struct xenbus_device *dev,
struct xenbus_transaction xbt;
ret = gnttab_grant_foreign_access(dev->otherend_id,
- virt_to_mfn(info->page), 0);
+ virt_to_gfn(info->page), 0);
if (ret < 0)
return ret;
info->gref = ret;
@@ -255,7 +255,7 @@ static int xenkbd_connect_backend(struct xenbus_device *dev,
goto error_irqh;
}
ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
- virt_to_mfn(info->page));
+ virt_to_gfn(info->page));
if (ret)
goto error_xenbus;
ret = xenbus_printf(xbt, dev->nodename, "page-gref", "%u", info->gref);
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index d7820d1152d2..17f97e5e11e7 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -341,7 +341,7 @@ config MOUSE_VSXXXAA
config MOUSE_GPIO
tristate "GPIO mouse"
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
select INPUT_POLLDEV
help
This driver simulates a mouse on GPIO lines of various CPUs (and some
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index 793300bfbddd..ee6a6e9563d4 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -24,7 +24,7 @@ obj-$(CONFIG_MOUSE_SYNAPTICS_I2C) += synaptics_i2c.o
obj-$(CONFIG_MOUSE_SYNAPTICS_USB) += synaptics_usb.o
obj-$(CONFIG_MOUSE_VSXXXAA) += vsxxxaa.o
-cyapatp-objs := cyapa.o cyapa_gen3.o cyapa_gen5.o
+cyapatp-objs := cyapa.o cyapa_gen3.o cyapa_gen5.o cyapa_gen6.o
psmouse-objs := psmouse-base.o synaptics.o focaltech.o
psmouse-$(CONFIG_MOUSE_PS2_ALPS) += alps.o
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 113d6f1516a5..4d246861d692 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -20,6 +20,7 @@
#include <linux/input/mt.h>
#include <linux/serio.h>
#include <linux/libps2.h>
+#include <linux/dmi.h>
#include "psmouse.h"
#include "alps.h"
@@ -99,6 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */
#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with
6-byte ALPS packet */
+#define ALPS_DELL 0x100 /* device is a Dell laptop */
#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */
static const struct alps_model_info alps_model_data[] = {
@@ -251,9 +253,9 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
return;
}
- /* Non interleaved V2 dualpoint has separate stick button bits */
+ /* Dell non interleaved V2 dualpoint has separate stick button bits */
if (priv->proto_version == ALPS_PROTO_V2 &&
- priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) {
+ priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) {
left |= packet[0] & 1;
right |= packet[0] & 2;
middle |= packet[0] & 4;
@@ -2550,6 +2552,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
priv->byte0 = protocol->byte0;
priv->mask0 = protocol->mask0;
priv->flags = protocol->flags;
+ if (dmi_name_in_vendors("Dell"))
+ priv->flags |= ALPS_DELL;
priv->x_max = 2000;
priv->y_max = 1400;
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index b10709f04615..30e3442518f8 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -2,6 +2,7 @@
* Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver
*
* Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se)
+ * Copyright (C) 2015 John Horan (knasher@gmail.com)
*
* The USB initialization and package decoding was made by
* Scott Shawcroft as part of the touchd user-space driver project:
@@ -91,6 +92,10 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
+/* MacbookPro12,1 (2015) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274
#define BCM5974_DEVICE(prod) { \
.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
@@ -152,6 +157,10 @@ static const struct usb_device_id bcm5974_table[] = {
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ISO),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
+ /* MacbookPro12,1 */
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
/* Terminating entry */
{}
};
@@ -180,21 +189,47 @@ struct bt_data {
enum tp_type {
TYPE1, /* plain trackpad */
TYPE2, /* button integrated in trackpad */
- TYPE3 /* additional header fields since June 2013 */
+ TYPE3, /* additional header fields since June 2013 */
+ TYPE4 /* additional header field for pressure data */
};
/* trackpad finger data offsets, le16-aligned */
-#define FINGER_TYPE1 (13 * sizeof(__le16))
-#define FINGER_TYPE2 (15 * sizeof(__le16))
-#define FINGER_TYPE3 (19 * sizeof(__le16))
+#define HEADER_TYPE1 (13 * sizeof(__le16))
+#define HEADER_TYPE2 (15 * sizeof(__le16))
+#define HEADER_TYPE3 (19 * sizeof(__le16))
+#define HEADER_TYPE4 (23 * sizeof(__le16))
/* trackpad button data offsets */
+#define BUTTON_TYPE1 0
#define BUTTON_TYPE2 15
#define BUTTON_TYPE3 23
+#define BUTTON_TYPE4 31
/* list of device capability bits */
#define HAS_INTEGRATED_BUTTON 1
+/* trackpad finger data block size */
+#define FSIZE_TYPE1 (14 * sizeof(__le16))
+#define FSIZE_TYPE2 (14 * sizeof(__le16))
+#define FSIZE_TYPE3 (14 * sizeof(__le16))
+#define FSIZE_TYPE4 (15 * sizeof(__le16))
+
+/* offset from header to finger struct */
+#define DELTA_TYPE1 (0 * sizeof(__le16))
+#define DELTA_TYPE2 (0 * sizeof(__le16))
+#define DELTA_TYPE3 (0 * sizeof(__le16))
+#define DELTA_TYPE4 (1 * sizeof(__le16))
+
+/* usb control message mode switch data */
+#define USBMSG_TYPE1 8, 0x300, 0, 0, 0x1, 0x8
+#define USBMSG_TYPE2 8, 0x300, 0, 0, 0x1, 0x8
+#define USBMSG_TYPE3 8, 0x300, 0, 0, 0x1, 0x8
+#define USBMSG_TYPE4 2, 0x302, 2, 1, 0x1, 0x0
+
+/* Wellspring initialization constants */
+#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1
+#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9
+
/* trackpad finger structure, le16-aligned */
struct tp_finger {
__le16 origin; /* zero when switching track finger */
@@ -207,14 +242,13 @@ struct tp_finger {
__le16 orientation; /* 16384 when point, else 15 bit angle */
__le16 touch_major; /* touch area, major axis */
__le16 touch_minor; /* touch area, minor axis */
- __le16 unused[3]; /* zeros */
+ __le16 unused[2]; /* zeros */
+ __le16 pressure; /* pressure on forcetouch touchpad */
__le16 multi; /* one finger: varies, more fingers: constant */
} __attribute__((packed,aligned(2)));
/* trackpad finger data size, empirically at least ten fingers */
#define MAX_FINGERS 16
-#define SIZEOF_FINGER sizeof(struct tp_finger)
-#define SIZEOF_ALL_FINGERS (MAX_FINGERS * SIZEOF_FINGER)
#define MAX_FINGER_ORIENTATION 16384
/* device-specific parameters */
@@ -232,8 +266,17 @@ struct bcm5974_config {
int bt_datalen; /* data length of the button interface */
int tp_ep; /* the endpoint of the trackpad interface */
enum tp_type tp_type; /* type of trackpad interface */
- int tp_offset; /* offset to trackpad finger data */
+ int tp_header; /* bytes in header block */
int tp_datalen; /* data length of the trackpad interface */
+ int tp_button; /* offset to button data */
+ int tp_fsize; /* bytes in single finger block */
+ int tp_delta; /* offset from header to finger struct */
+ int um_size; /* usb control message length */
+ int um_req_val; /* usb control message value */
+ int um_req_idx; /* usb control message index */
+ int um_switch_idx; /* usb control message mode switch index */
+ int um_switch_on; /* usb control message mode switch on */
+ int um_switch_off; /* usb control message mode switch off */
struct bcm5974_param p; /* finger pressure limits */
struct bcm5974_param w; /* finger width limits */
struct bcm5974_param x; /* horizontal limits */
@@ -259,6 +302,24 @@ struct bcm5974 {
int slots[MAX_FINGERS]; /* slot assignments */
};
+/* trackpad finger block data, le16-aligned */
+static const struct tp_finger *get_tp_finger(const struct bcm5974 *dev, int i)
+{
+ const struct bcm5974_config *c = &dev->cfg;
+ u8 *f_base = dev->tp_data + c->tp_header + c->tp_delta;
+
+ return (const struct tp_finger *)(f_base + i * c->tp_fsize);
+}
+
+#define DATAFORMAT(type) \
+ type, \
+ HEADER_##type, \
+ HEADER_##type + (MAX_FINGERS) * (FSIZE_##type), \
+ BUTTON_##type, \
+ FSIZE_##type, \
+ DELTA_##type, \
+ USBMSG_##type
+
/* logical signal quality */
#define SN_PRESSURE 45 /* pressure signal-to-noise ratio */
#define SN_WIDTH 25 /* width signal-to-noise ratio */
@@ -273,7 +334,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING_JIS,
0,
0x84, sizeof(struct bt_data),
- 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE1),
{ SN_PRESSURE, 0, 256 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4824, 5342 },
@@ -286,7 +347,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING2_JIS,
0,
0x84, sizeof(struct bt_data),
- 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE1),
{ SN_PRESSURE, 0, 256 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4824, 4824 },
@@ -299,7 +360,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING3_JIS,
HAS_INTEGRATED_BUTTON,
0x84, sizeof(struct bt_data),
- 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE2),
{ SN_PRESSURE, 0, 300 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4460, 5166 },
@@ -312,7 +373,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING4_JIS,
HAS_INTEGRATED_BUTTON,
0x84, sizeof(struct bt_data),
- 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE2),
{ SN_PRESSURE, 0, 300 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4620, 5140 },
@@ -325,7 +386,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS,
HAS_INTEGRATED_BUTTON,
0x84, sizeof(struct bt_data),
- 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE2),
{ SN_PRESSURE, 0, 300 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4616, 5112 },
@@ -338,7 +399,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING5_JIS,
HAS_INTEGRATED_BUTTON,
0x84, sizeof(struct bt_data),
- 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE2),
{ SN_PRESSURE, 0, 300 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4415, 5050 },
@@ -351,7 +412,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING6_JIS,
HAS_INTEGRATED_BUTTON,
0x84, sizeof(struct bt_data),
- 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE2),
{ SN_PRESSURE, 0, 300 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4620, 5140 },
@@ -364,7 +425,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS,
HAS_INTEGRATED_BUTTON,
0x84, sizeof(struct bt_data),
- 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE2),
{ SN_PRESSURE, 0, 300 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4750, 5280 },
@@ -377,7 +438,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS,
HAS_INTEGRATED_BUTTON,
0x84, sizeof(struct bt_data),
- 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE2),
{ SN_PRESSURE, 0, 300 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4620, 5140 },
@@ -390,7 +451,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
HAS_INTEGRATED_BUTTON,
0x84, sizeof(struct bt_data),
- 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE2),
{ SN_PRESSURE, 0, 300 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4750, 5280 },
@@ -403,7 +464,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS,
HAS_INTEGRATED_BUTTON,
0x84, sizeof(struct bt_data),
- 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ 0x81, DATAFORMAT(TYPE2),
{ SN_PRESSURE, 0, 300 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4750, 5280 },
@@ -416,13 +477,26 @@ static const struct bcm5974_config bcm5974_config_table[] = {
USB_DEVICE_ID_APPLE_WELLSPRING8_JIS,
HAS_INTEGRATED_BUTTON,
0, sizeof(struct bt_data),
- 0x83, TYPE3, FINGER_TYPE3, FINGER_TYPE3 + SIZEOF_ALL_FINGERS,
+ 0x83, DATAFORMAT(TYPE3),
{ SN_PRESSURE, 0, 300 },
{ SN_WIDTH, 0, 2048 },
{ SN_COORD, -4620, 5140 },
{ SN_COORD, -150, 6600 },
{ SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
},
+ {
+ USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI,
+ USB_DEVICE_ID_APPLE_WELLSPRING9_ISO,
+ USB_DEVICE_ID_APPLE_WELLSPRING9_JIS,
+ HAS_INTEGRATED_BUTTON,
+ 0, sizeof(struct bt_data),
+ 0x83, DATAFORMAT(TYPE4),
+ { SN_PRESSURE, 0, 300 },
+ { SN_WIDTH, 0, 2048 },
+ { SN_COORD, -4828, 5345 },
+ { SN_COORD, -203, 6803 },
+ { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
+ },
{}
};
@@ -549,19 +623,18 @@ static int report_tp_state(struct bcm5974 *dev, int size)
struct input_dev *input = dev->input;
int raw_n, i, n = 0;
- if (size < c->tp_offset || (size - c->tp_offset) % SIZEOF_FINGER != 0)
+ if (size < c->tp_header || (size - c->tp_header) % c->tp_fsize != 0)
return -EIO;
- /* finger data, le16-aligned */
- f = (const struct tp_finger *)(dev->tp_data + c->tp_offset);
- raw_n = (size - c->tp_offset) / SIZEOF_FINGER;
+ raw_n = (size - c->tp_header) / c->tp_fsize;
for (i = 0; i < raw_n; i++) {
- if (raw2int(f[i].touch_major) == 0)
+ f = get_tp_finger(dev, i);
+ if (raw2int(f->touch_major) == 0)
continue;
- dev->pos[n].x = raw2int(f[i].abs_x);
- dev->pos[n].y = c->y.min + c->y.max - raw2int(f[i].abs_y);
- dev->index[n++] = &f[i];
+ dev->pos[n].x = raw2int(f->abs_x);
+ dev->pos[n].y = c->y.min + c->y.max - raw2int(f->abs_y);
+ dev->index[n++] = f;
}
input_mt_assign_slots(input, dev->slots, dev->pos, n, 0);
@@ -572,32 +645,22 @@ static int report_tp_state(struct bcm5974 *dev, int size)
input_mt_sync_frame(input);
- report_synaptics_data(input, c, f, raw_n);
+ report_synaptics_data(input, c, get_tp_finger(dev, 0), raw_n);
- /* type 2 reports button events via ibt only */
- if (c->tp_type == TYPE2) {
- int ibt = raw2int(dev->tp_data[BUTTON_TYPE2]);
+ /* later types report button events via integrated button only */
+ if (c->caps & HAS_INTEGRATED_BUTTON) {
+ int ibt = raw2int(dev->tp_data[c->tp_button]);
input_report_key(input, BTN_LEFT, ibt);
}
- if (c->tp_type == TYPE3)
- input_report_key(input, BTN_LEFT, dev->tp_data[BUTTON_TYPE3]);
-
input_sync(input);
return 0;
}
-/* Wellspring initialization constants */
-#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1
-#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9
-#define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300
-#define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0
-#define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01
-#define BCM5974_WELLSPRING_MODE_NORMAL_VALUE 0x08
-
static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
{
+ const struct bcm5974_config *c = &dev->cfg;
int retval = 0, size;
char *data;
@@ -605,7 +668,7 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
if (dev->cfg.tp_type == TYPE3)
return 0;
- data = kmalloc(8, GFP_KERNEL);
+ data = kmalloc(c->um_size, GFP_KERNEL);
if (!data) {
dev_err(&dev->intf->dev, "out of memory\n");
retval = -ENOMEM;
@@ -616,28 +679,24 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
BCM5974_WELLSPRING_MODE_READ_REQUEST_ID,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- BCM5974_WELLSPRING_MODE_REQUEST_VALUE,
- BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
+ c->um_req_val, c->um_req_idx, data, c->um_size, 5000);
- if (size != 8) {
+ if (size != c->um_size) {
dev_err(&dev->intf->dev, "could not read from device\n");
retval = -EIO;
goto out;
}
/* apply the mode switch */
- data[0] = on ?
- BCM5974_WELLSPRING_MODE_VENDOR_VALUE :
- BCM5974_WELLSPRING_MODE_NORMAL_VALUE;
+ data[c->um_switch_idx] = on ? c->um_switch_on : c->um_switch_off;
/* write configuration */
size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- BCM5974_WELLSPRING_MODE_REQUEST_VALUE,
- BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
+ c->um_req_val, c->um_req_idx, data, c->um_size, 5000);
- if (size != 8) {
+ if (size != c->um_size) {
dev_err(&dev->intf->dev, "could not write to device\n");
retval = -EIO;
goto out;
diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
index efe148474e7f..eb76b61418f3 100644
--- a/drivers/input/mouse/cyapa.c
+++ b/drivers/input/mouse/cyapa.c
@@ -6,7 +6,7 @@
* Daniel Kurtz <djkurtz@chromium.org>
* Benson Leung <bleung@chromium.org>
*
- * Copyright (C) 2011-2014 Cypress Semiconductor, Inc.
+ * Copyright (C) 2011-2015 Cypress Semiconductor, Inc.
* Copyright (C) 2011-2012 Google, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -21,10 +21,12 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
#include <linux/acpi.h>
+#include <linux/of.h>
#include "cyapa.h"
@@ -39,11 +41,33 @@ const char product_id[] = "CYTRA";
static int cyapa_reinitialize(struct cyapa *cyapa);
-static inline bool cyapa_is_bootloader_mode(struct cyapa *cyapa)
+bool cyapa_is_pip_bl_mode(struct cyapa *cyapa)
{
+ if (cyapa->gen == CYAPA_GEN6 && cyapa->state == CYAPA_STATE_GEN6_BL)
+ return true;
+
if (cyapa->gen == CYAPA_GEN5 && cyapa->state == CYAPA_STATE_GEN5_BL)
return true;
+ return false;
+}
+
+bool cyapa_is_pip_app_mode(struct cyapa *cyapa)
+{
+ if (cyapa->gen == CYAPA_GEN6 && cyapa->state == CYAPA_STATE_GEN6_APP)
+ return true;
+
+ if (cyapa->gen == CYAPA_GEN5 && cyapa->state == CYAPA_STATE_GEN5_APP)
+ return true;
+
+ return false;
+}
+
+static bool cyapa_is_bootloader_mode(struct cyapa *cyapa)
+{
+ if (cyapa_is_pip_bl_mode(cyapa))
+ return true;
+
if (cyapa->gen == CYAPA_GEN3 &&
cyapa->state >= CYAPA_STATE_BL_BUSY &&
cyapa->state <= CYAPA_STATE_BL_ACTIVE)
@@ -54,7 +78,7 @@ static inline bool cyapa_is_bootloader_mode(struct cyapa *cyapa)
static inline bool cyapa_is_operational_mode(struct cyapa *cyapa)
{
- if (cyapa->gen == CYAPA_GEN5 && cyapa->state == CYAPA_STATE_GEN5_APP)
+ if (cyapa_is_pip_app_mode(cyapa))
return true;
if (cyapa->gen == CYAPA_GEN3 && cyapa->state == CYAPA_STATE_OP)
@@ -188,6 +212,15 @@ static int cyapa_get_state(struct cyapa *cyapa)
if (!error)
goto out_detected;
}
+ if (cyapa->gen == CYAPA_GEN_UNKNOWN ||
+ cyapa->gen == CYAPA_GEN6 ||
+ cyapa->gen == CYAPA_GEN5) {
+ error = cyapa_pip_state_parse(cyapa,
+ status, BL_STATUS_SIZE);
+ if (!error)
+ goto out_detected;
+ }
+ /* For old Gen5 trackpads detecting. */
if ((cyapa->gen == CYAPA_GEN_UNKNOWN ||
cyapa->gen == CYAPA_GEN5) &&
!smbus && even_addr) {
@@ -284,6 +317,9 @@ static int cyapa_check_is_operational(struct cyapa *cyapa)
return error;
switch (cyapa->gen) {
+ case CYAPA_GEN6:
+ cyapa->ops = &cyapa_gen6_ops;
+ break;
case CYAPA_GEN5:
cyapa->ops = &cyapa_gen5_ops;
break;
@@ -306,7 +342,7 @@ static int cyapa_check_is_operational(struct cyapa *cyapa)
/*
* Returns 0 on device detected, negative errno on no device detected.
- * And when the device is detected and opertaional, it will be reset to
+ * And when the device is detected and operational, it will be reset to
* full power active mode automatically.
*/
static int cyapa_detect(struct cyapa *cyapa)
@@ -333,6 +369,7 @@ static int cyapa_open(struct input_dev *input)
{
struct cyapa *cyapa = input_get_drvdata(input);
struct i2c_client *client = cyapa->client;
+ struct device *dev = &client->dev;
int error;
error = mutex_lock_interruptible(&cyapa->state_sync_lock);
@@ -346,10 +383,9 @@ static int cyapa_open(struct input_dev *input)
* when in operational mode.
*/
error = cyapa->ops->set_power_mode(cyapa,
- PWR_MODE_FULL_ACTIVE, 0);
+ PWR_MODE_FULL_ACTIVE, 0, false);
if (error) {
- dev_warn(&client->dev,
- "set active power failed: %d\n", error);
+ dev_warn(dev, "set active power failed: %d\n", error);
goto out;
}
} else {
@@ -361,10 +397,14 @@ static int cyapa_open(struct input_dev *input)
}
enable_irq(client->irq);
- if (!pm_runtime_enabled(&client->dev)) {
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
+ if (!pm_runtime_enabled(dev)) {
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
}
+
+ pm_runtime_get_sync(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_sync_autosuspend(dev);
out:
mutex_unlock(&cyapa->state_sync_lock);
return error;
@@ -374,16 +414,17 @@ static void cyapa_close(struct input_dev *input)
{
struct cyapa *cyapa = input_get_drvdata(input);
struct i2c_client *client = cyapa->client;
+ struct device *dev = &cyapa->client->dev;
mutex_lock(&cyapa->state_sync_lock);
disable_irq(client->irq);
- if (pm_runtime_enabled(&client->dev))
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ if (pm_runtime_enabled(dev))
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
if (cyapa->operational)
- cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0);
+ cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0, false);
mutex_unlock(&cyapa->state_sync_lock);
}
@@ -443,6 +484,7 @@ static int cyapa_create_input_dev(struct cyapa *cyapa)
if (cyapa->gen >= CYAPA_GEN5) {
input_set_abs_params(input, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0);
input_set_abs_params(input, ABS_MT_WIDTH_MINOR, 0, 255, 0, 0);
+ input_set_abs_params(input, ABS_DISTANCE, 0, 1, 0, 0);
}
input_abs_set_res(input, ABS_MT_POSITION_X,
@@ -492,7 +534,7 @@ static void cyapa_enable_irq_for_cmd(struct cyapa *cyapa)
*/
if (!input || cyapa->operational)
cyapa->ops->set_power_mode(cyapa,
- PWR_MODE_FULL_ACTIVE, 0);
+ PWR_MODE_FULL_ACTIVE, 0, false);
/* Gen3 always using polling mode for command. */
if (cyapa->gen >= CYAPA_GEN5)
enable_irq(cyapa->client->irq);
@@ -507,7 +549,8 @@ static void cyapa_disable_irq_for_cmd(struct cyapa *cyapa)
if (cyapa->gen >= CYAPA_GEN5)
disable_irq(cyapa->client->irq);
if (!input || cyapa->operational)
- cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0);
+ cyapa->ops->set_power_mode(cyapa,
+ PWR_MODE_OFF, 0, false);
}
}
@@ -563,6 +606,8 @@ static int cyapa_initialize(struct cyapa *cyapa)
error = cyapa_gen3_ops.initialize(cyapa);
if (!error)
error = cyapa_gen5_ops.initialize(cyapa);
+ if (!error)
+ error = cyapa_gen6_ops.initialize(cyapa);
if (error)
return error;
@@ -572,7 +617,7 @@ static int cyapa_initialize(struct cyapa *cyapa)
/* Power down the device until we need it. */
if (cyapa->operational)
- cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0);
+ cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0, false);
return 0;
}
@@ -588,7 +633,8 @@ static int cyapa_reinitialize(struct cyapa *cyapa)
/* Avoid command failures when TP was in OFF state. */
if (cyapa->operational)
- cyapa->ops->set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE, 0);
+ cyapa->ops->set_power_mode(cyapa,
+ PWR_MODE_FULL_ACTIVE, 0, false);
error = cyapa_detect(cyapa);
if (error)
@@ -607,7 +653,8 @@ out:
if (!input || !input->users) {
/* Reset to power OFF state to save power when no user open. */
if (cyapa->operational)
- cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0);
+ cyapa->ops->set_power_mode(cyapa,
+ PWR_MODE_OFF, 0, false);
} else if (!error && cyapa->operational) {
/*
* Make sure only enable runtime PM when device is
@@ -615,6 +662,10 @@ out:
*/
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+
+ pm_runtime_get_sync(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_sync_autosuspend(dev);
}
return error;
@@ -624,27 +675,44 @@ static irqreturn_t cyapa_irq(int irq, void *dev_id)
{
struct cyapa *cyapa = dev_id;
struct device *dev = &cyapa->client->dev;
+ int error;
- pm_runtime_get_sync(dev);
if (device_may_wakeup(dev))
pm_wakeup_event(dev, 0);
- /* Interrupt event maybe cuased by host command to trackpad device. */
+ /* Interrupt event can be caused by host command to trackpad device. */
if (cyapa->ops->irq_cmd_handler(cyapa)) {
/*
* Interrupt event maybe from trackpad device input reporting.
*/
if (!cyapa->input) {
/*
- * Still in probling or in firware image
- * udpating or reading.
+ * Still in probing or in firmware image
+ * updating or reading.
*/
cyapa->ops->sort_empty_output_data(cyapa,
NULL, NULL, NULL);
goto out;
}
- if (!cyapa->operational || cyapa->ops->irq_handler(cyapa)) {
+ if (cyapa->operational) {
+ error = cyapa->ops->irq_handler(cyapa);
+
+ /*
+ * Apply runtime power management to touch report event
+ * except the events caused by the command responses.
+ * Note:
+ * It will introduce about 20~40 ms additional delay
+ * time in receiving for first valid touch report data.
+ * The time is used to execute device runtime resume
+ * process.
+ */
+ pm_runtime_get_sync(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_sync_autosuspend(dev);
+ }
+
+ if (!cyapa->operational || error) {
if (!mutex_trylock(&cyapa->state_sync_lock)) {
cyapa->ops->sort_empty_output_data(cyapa,
NULL, NULL, NULL);
@@ -656,8 +724,6 @@ static irqreturn_t cyapa_irq(int irq, void *dev_id)
}
out:
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_sync_autosuspend(dev);
return IRQ_HANDLED;
}
@@ -1051,12 +1117,12 @@ static ssize_t cyapa_update_fw_store(struct device *dev,
dev_dbg(dev, "firmware update successfully done.\n");
/*
- * Redetect trackpad device states because firmware update process
+ * Re-detect trackpad device states because firmware update process
* will reset trackpad device into bootloader mode.
*/
ret = cyapa_reinitialize(cyapa);
if (ret) {
- dev_err(dev, "failed to redetect after updated: %d\n", ret);
+ dev_err(dev, "failed to re-detect after updated: %d\n", ret);
error = error ? error : ret;
}
@@ -1120,9 +1186,11 @@ static char *cyapa_state_to_string(struct cyapa *cyapa)
case CYAPA_STATE_BL_ACTIVE:
return "bootloader active";
case CYAPA_STATE_GEN5_BL:
+ case CYAPA_STATE_GEN6_BL:
return "bootloader";
case CYAPA_STATE_OP:
case CYAPA_STATE_GEN5_APP:
+ case CYAPA_STATE_GEN6_APP:
return "operational"; /* Normal valid state. */
default:
return "invalid mode";
@@ -1175,6 +1243,13 @@ static void cyapa_remove_sysfs_group(void *data)
sysfs_remove_group(&cyapa->client->dev.kobj, &cyapa_sysfs_group);
}
+static void cyapa_disable_regulator(void *data)
+{
+ struct cyapa *cyapa = data;
+
+ regulator_disable(cyapa->vcc);
+}
+
static int cyapa_probe(struct i2c_client *client,
const struct i2c_device_id *dev_id)
{
@@ -1208,6 +1283,27 @@ static int cyapa_probe(struct i2c_client *client,
sprintf(cyapa->phys, "i2c-%d-%04x/input0", client->adapter->nr,
client->addr);
+ cyapa->vcc = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(cyapa->vcc)) {
+ error = PTR_ERR(cyapa->vcc);
+ dev_err(dev, "failed to get vcc regulator: %d\n", error);
+ return error;
+ }
+
+ error = regulator_enable(cyapa->vcc);
+ if (error) {
+ dev_err(dev, "failed to enable regulator: %d\n", error);
+ return error;
+ }
+
+ error = devm_add_action(dev, cyapa_disable_regulator, cyapa);
+ if (error) {
+ cyapa_disable_regulator(cyapa);
+ dev_err(dev, "failed to add disable regulator action: %d\n",
+ error);
+ return error;
+ }
+
error = cyapa_initialize(cyapa);
if (error) {
dev_err(dev, "failed to detect and initialize tp device.\n");
@@ -1296,12 +1392,19 @@ static int __maybe_unused cyapa_suspend(struct device *dev)
power_mode = device_may_wakeup(dev) ? cyapa->suspend_power_mode
: PWR_MODE_OFF;
error = cyapa->ops->set_power_mode(cyapa, power_mode,
- cyapa->suspend_sleep_time);
+ cyapa->suspend_sleep_time, true);
if (error)
dev_err(dev, "suspend set power mode failed: %d\n",
error);
}
+ /*
+ * Disable proximity interrupt when system idle, want true touch to
+ * wake the system.
+ */
+ if (cyapa->dev_pwr_mode != PWR_MODE_OFF)
+ cyapa->ops->set_proximity(cyapa, false);
+
if (device_may_wakeup(dev))
cyapa->irq_wake = (enable_irq_wake(client->irq) == 0);
@@ -1322,7 +1425,10 @@ static int __maybe_unused cyapa_resume(struct device *dev)
cyapa->irq_wake = false;
}
- /* Update device states and runtime PM states. */
+ /*
+ * Update device states and runtime PM states.
+ * Re-Enable proximity interrupt after enter operational mode.
+ */
error = cyapa_reinitialize(cyapa);
if (error)
dev_warn(dev, "failed to reinitialize TP device: %d\n", error);
@@ -1340,7 +1446,8 @@ static int __maybe_unused cyapa_runtime_suspend(struct device *dev)
error = cyapa->ops->set_power_mode(cyapa,
cyapa->runtime_suspend_power_mode,
- cyapa->runtime_suspend_sleep_time);
+ cyapa->runtime_suspend_sleep_time,
+ false);
if (error)
dev_warn(dev, "runtime suspend failed: %d\n", error);
@@ -1352,7 +1459,8 @@ static int __maybe_unused cyapa_runtime_resume(struct device *dev)
struct cyapa *cyapa = dev_get_drvdata(dev);
int error;
- error = cyapa->ops->set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE, 0);
+ error = cyapa->ops->set_power_mode(cyapa,
+ PWR_MODE_FULL_ACTIVE, 0, false);
if (error)
dev_warn(dev, "runtime resume failed: %d\n", error);
@@ -1374,17 +1482,26 @@ MODULE_DEVICE_TABLE(i2c, cyapa_id_table);
static const struct acpi_device_id cyapa_acpi_id[] = {
{ "CYAP0000", 0 }, /* Gen3 trackpad with 0x67 I2C address. */
{ "CYAP0001", 0 }, /* Gen5 trackpad with 0x24 I2C address. */
+ { "CYAP0002", 0 }, /* Gen6 trackpad with 0x24 I2C address. */
{ }
};
MODULE_DEVICE_TABLE(acpi, cyapa_acpi_id);
#endif
+#ifdef CONFIG_OF
+static const struct of_device_id cyapa_of_match[] = {
+ { .compatible = "cypress,cyapa" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, cyapa_of_match);
+#endif
+
static struct i2c_driver cyapa_driver = {
.driver = {
.name = "cyapa",
- .owner = THIS_MODULE,
.pm = &cyapa_pm_ops,
.acpi_match_table = ACPI_PTR(cyapa_acpi_id),
+ .of_match_table = of_match_ptr(cyapa_of_match),
},
.probe = cyapa_probe,
diff --git a/drivers/input/mouse/cyapa.h b/drivers/input/mouse/cyapa.h
index adc9ed5dcb0e..b812bba8cdd7 100644
--- a/drivers/input/mouse/cyapa.h
+++ b/drivers/input/mouse/cyapa.h
@@ -3,7 +3,7 @@
*
* Author: Dudley Du <dudl@cypress.com>
*
- * Copyright (C) 2014 Cypress Semiconductor, Inc.
+ * Copyright (C) 2014-2015 Cypress Semiconductor, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
@@ -19,13 +19,14 @@
#define CYAPA_GEN_UNKNOWN 0x00 /* unknown protocol. */
#define CYAPA_GEN3 0x03 /* support MT-protocol B with tracking ID. */
#define CYAPA_GEN5 0x05 /* support TrueTouch GEN5 trackpad device. */
+#define CYAPA_GEN6 0x06 /* support TrueTouch GEN6 trackpad device. */
#define CYAPA_NAME "Cypress APA Trackpad (cyapa)"
/*
* Macros for SMBus communication
*/
-#define SMBUS_READ 0x01
+#define SMBUS_READ 0x01
#define SMBUS_WRITE 0x00
#define SMBUS_ENCODE_IDX(cmd, idx) ((cmd) | (((idx) & 0x03) << 1))
#define SMBUS_ENCODE_RW(cmd, rw) ((cmd) | ((rw) & 0x01))
@@ -159,12 +160,89 @@
#define AUTOSUSPEND_DELAY 2000 /* unit : ms */
-#define UNINIT_SLEEP_TIME 0xFFFF
-#define UNINIT_PWR_MODE 0xFF
-
#define BTN_ONLY_MODE_NAME "buttononly"
#define OFF_MODE_NAME "off"
+/* Common macros for PIP interface. */
+#define PIP_HID_DESCRIPTOR_ADDR 0x0001
+#define PIP_REPORT_DESCRIPTOR_ADDR 0x0002
+#define PIP_INPUT_REPORT_ADDR 0x0003
+#define PIP_OUTPUT_REPORT_ADDR 0x0004
+#define PIP_CMD_DATA_ADDR 0x0006
+
+#define PIP_RETRIEVE_DATA_STRUCTURE 0x24
+#define PIP_CMD_CALIBRATE 0x28
+#define PIP_BL_CMD_VERIFY_APP_INTEGRITY 0x31
+#define PIP_BL_CMD_GET_BL_INFO 0x38
+#define PIP_BL_CMD_PROGRAM_VERIFY_ROW 0x39
+#define PIP_BL_CMD_LAUNCH_APP 0x3b
+#define PIP_BL_CMD_INITIATE_BL 0x48
+#define PIP_INVALID_CMD 0xff
+
+#define PIP_HID_DESCRIPTOR_SIZE 32
+#define PIP_HID_APP_REPORT_ID 0xf7
+#define PIP_HID_BL_REPORT_ID 0xff
+
+#define PIP_BL_CMD_REPORT_ID 0x40
+#define PIP_BL_RESP_REPORT_ID 0x30
+#define PIP_APP_CMD_REPORT_ID 0x2f
+#define PIP_APP_RESP_REPORT_ID 0x1f
+
+#define PIP_READ_SYS_INFO_CMD_LENGTH 7
+#define PIP_BL_READ_APP_INFO_CMD_LENGTH 13
+#define PIP_MIN_BL_CMD_LENGTH 13
+#define PIP_MIN_BL_RESP_LENGTH 11
+#define PIP_MIN_APP_CMD_LENGTH 7
+#define PIP_MIN_APP_RESP_LENGTH 5
+#define PIP_UNSUPPORTED_CMD_RESP_LENGTH 6
+#define PIP_READ_SYS_INFO_RESP_LENGTH 71
+#define PIP_BL_APP_INFO_RESP_LENGTH 30
+#define PIP_BL_GET_INFO_RESP_LENGTH 19
+
+#define PIP_BL_PLATFORM_VER_SHIFT 4
+#define PIP_BL_PLATFORM_VER_MASK 0x0f
+
+#define PIP_PRODUCT_FAMILY_MASK 0xf000
+#define PIP_PRODUCT_FAMILY_TRACKPAD 0x1000
+
+#define PIP_DEEP_SLEEP_STATE_ON 0x00
+#define PIP_DEEP_SLEEP_STATE_OFF 0x01
+#define PIP_DEEP_SLEEP_STATE_MASK 0x03
+#define PIP_APP_DEEP_SLEEP_REPORT_ID 0xf0
+#define PIP_DEEP_SLEEP_RESP_LENGTH 5
+#define PIP_DEEP_SLEEP_OPCODE 0x08
+#define PIP_DEEP_SLEEP_OPCODE_MASK 0x0f
+
+#define PIP_RESP_LENGTH_OFFSET 0
+#define PIP_RESP_LENGTH_SIZE 2
+#define PIP_RESP_REPORT_ID_OFFSET 2
+#define PIP_RESP_RSVD_OFFSET 3
+#define PIP_RESP_RSVD_KEY 0x00
+#define PIP_RESP_BL_SOP_OFFSET 4
+#define PIP_SOP_KEY 0x01 /* Start of Packet */
+#define PIP_EOP_KEY 0x17 /* End of Packet */
+#define PIP_RESP_APP_CMD_OFFSET 4
+#define GET_PIP_CMD_CODE(reg) ((reg) & 0x7f)
+#define PIP_RESP_STATUS_OFFSET 5
+
+#define VALID_CMD_RESP_HEADER(resp, cmd) \
+ (((resp)[PIP_RESP_REPORT_ID_OFFSET] == PIP_APP_RESP_REPORT_ID) && \
+ ((resp)[PIP_RESP_RSVD_OFFSET] == PIP_RESP_RSVD_KEY) && \
+ (GET_PIP_CMD_CODE((resp)[PIP_RESP_APP_CMD_OFFSET]) == (cmd)))
+
+#define PIP_CMD_COMPLETE_SUCCESS(resp_data) \
+ ((resp_data)[PIP_RESP_STATUS_OFFSET] == 0x00)
+
+/* Variables to record latest gen5 trackpad power states. */
+#define UNINIT_SLEEP_TIME 0xffff
+#define UNINIT_PWR_MODE 0xff
+#define PIP_DEV_SET_PWR_STATE(cyapa, s) ((cyapa)->dev_pwr_mode = (s))
+#define PIP_DEV_GET_PWR_STATE(cyapa) ((cyapa)->dev_pwr_mode)
+#define PIP_DEV_SET_SLEEP_TIME(cyapa, t) ((cyapa)->dev_sleep_time = (t))
+#define PIP_DEV_GET_SLEEP_TIME(cyapa) ((cyapa)->dev_sleep_time)
+#define PIP_DEV_UNINIT_SLEEP_TIME(cyapa) \
+ (((cyapa)->dev_sleep_time) == UNINIT_SLEEP_TIME)
+
/* The touch.id is used as the MT slot id, thus max MT slot is 15 */
#define CYAPA_MAX_MT_SLOTS 15
@@ -195,10 +273,12 @@ struct cyapa_dev_ops {
int (*sort_empty_output_data)(struct cyapa *,
u8 *, int *, cb_sort);
- int (*set_power_mode)(struct cyapa *, u8, u16);
+ int (*set_power_mode)(struct cyapa *, u8, u16, bool);
+
+ int (*set_proximity)(struct cyapa *, bool);
};
-struct cyapa_gen5_cmd_states {
+struct cyapa_pip_cmd_states {
struct mutex cmd_lock;
struct completion cmd_ready;
atomic_t cmd_issued;
@@ -214,7 +294,7 @@ struct cyapa_gen5_cmd_states {
};
union cyapa_cmd_states {
- struct cyapa_gen5_cmd_states gen5;
+ struct cyapa_pip_cmd_states pip;
};
enum cyapa_state {
@@ -225,6 +305,14 @@ enum cyapa_state {
CYAPA_STATE_OP,
CYAPA_STATE_GEN5_BL,
CYAPA_STATE_GEN5_APP,
+ CYAPA_STATE_GEN6_BL,
+ CYAPA_STATE_GEN6_APP,
+};
+
+struct gen6_interval_setting {
+ u16 active_interval;
+ u16 lp1_interval;
+ u16 lp2_interval;
};
/* The main device structure */
@@ -233,6 +321,7 @@ struct cyapa {
u8 status[BL_STATUS_SIZE];
bool operational; /* true: ready for data reporting; false: not. */
+ struct regulator *vcc;
struct i2c_client *client;
struct input_dev *input;
char phys[32]; /* Device physical location */
@@ -246,9 +335,11 @@ struct cyapa {
u16 runtime_suspend_sleep_time;
u8 dev_pwr_mode;
u16 dev_sleep_time;
+ struct gen6_interval_setting gen6_interval_setting;
/* Read from query data region. */
char product_id[16];
+ u8 platform_ver; /* Platform version. */
u8 fw_maj_ver; /* Firmware major version. */
u8 fw_min_ver; /* Firmware minor version. */
u8 btn_capability;
@@ -259,7 +350,7 @@ struct cyapa {
int physical_size_y;
/* Used in ttsp and truetouch based trackpad devices. */
- u8 x_origin; /* X Axis Origin: 0 = left side; 1 = rigth side. */
+ u8 x_origin; /* X Axis Origin: 0 = left side; 1 = right side. */
u8 y_origin; /* Y Axis Origin: 0 = top; 1 = bottom. */
int electrodes_x; /* Number of electrodes on the X Axis*/
int electrodes_y; /* Number of electrodes on the Y Axis*/
@@ -282,9 +373,9 @@ struct cyapa {
ssize_t cyapa_i2c_reg_read_block(struct cyapa *cyapa, u8 reg, size_t len,
- u8 *values);
+ u8 *values);
ssize_t cyapa_smbus_read_block(struct cyapa *cyapa, u8 cmd, size_t len,
- u8 *values);
+ u8 *values);
ssize_t cyapa_read_block(struct cyapa *cyapa, u8 cmd_idx, u8 *values);
@@ -293,9 +384,51 @@ int cyapa_poll_state(struct cyapa *cyapa, unsigned int timeout);
u8 cyapa_sleep_time_to_pwr_cmd(u16 sleep_time);
u16 cyapa_pwr_cmd_to_sleep_time(u8 pwr_mode);
-
+ssize_t cyapa_i2c_pip_read(struct cyapa *cyapa, u8 *buf, size_t size);
+ssize_t cyapa_i2c_pip_write(struct cyapa *cyapa, u8 *buf, size_t size);
+int cyapa_empty_pip_output_data(struct cyapa *cyapa,
+ u8 *buf, int *len, cb_sort func);
+int cyapa_i2c_pip_cmd_irq_sync(struct cyapa *cyapa,
+ u8 *cmd, int cmd_len,
+ u8 *resp_data, int *resp_len,
+ unsigned long timeout,
+ cb_sort func,
+ bool irq_mode);
+int cyapa_pip_state_parse(struct cyapa *cyapa, u8 *reg_data, int len);
+bool cyapa_pip_sort_system_info_data(struct cyapa *cyapa, u8 *buf, int len);
+bool cyapa_sort_tsg_pip_bl_resp_data(struct cyapa *cyapa, u8 *data, int len);
+int cyapa_pip_deep_sleep(struct cyapa *cyapa, u8 state);
+bool cyapa_sort_tsg_pip_app_resp_data(struct cyapa *cyapa, u8 *data, int len);
+int cyapa_pip_bl_exit(struct cyapa *cyapa);
+int cyapa_pip_bl_enter(struct cyapa *cyapa);
+
+
+bool cyapa_is_pip_bl_mode(struct cyapa *cyapa);
+bool cyapa_is_pip_app_mode(struct cyapa *cyapa);
+int cyapa_pip_cmd_state_initialize(struct cyapa *cyapa);
+
+int cyapa_pip_resume_scanning(struct cyapa *cyapa);
+int cyapa_pip_suspend_scanning(struct cyapa *cyapa);
+
+int cyapa_pip_check_fw(struct cyapa *cyapa, const struct firmware *fw);
+int cyapa_pip_bl_initiate(struct cyapa *cyapa, const struct firmware *fw);
+int cyapa_pip_do_fw_update(struct cyapa *cyapa, const struct firmware *fw);
+int cyapa_pip_bl_activate(struct cyapa *cyapa);
+int cyapa_pip_bl_deactivate(struct cyapa *cyapa);
+ssize_t cyapa_pip_do_calibrate(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+int cyapa_pip_set_proximity(struct cyapa *cyapa, bool enable);
+
+bool cyapa_pip_irq_cmd_handler(struct cyapa *cyapa);
+int cyapa_pip_irq_handler(struct cyapa *cyapa);
+
+
+extern u8 pip_read_sys_info[];
+extern u8 pip_bl_read_app_info[];
extern const char product_id[];
extern const struct cyapa_dev_ops cyapa_gen3_ops;
extern const struct cyapa_dev_ops cyapa_gen5_ops;
+extern const struct cyapa_dev_ops cyapa_gen6_ops;
#endif
diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c
index 3faf01c1b191..1a9d12ae7538 100644
--- a/drivers/input/mouse/cyapa_gen3.c
+++ b/drivers/input/mouse/cyapa_gen3.c
@@ -6,7 +6,7 @@
* Daniel Kurtz <djkurtz@chromium.org>
* Benson Leung <bleung@chromium.org>
*
- * Copyright (C) 2011-2014 Cypress Semiconductor, Inc.
+ * Copyright (C) 2011-2015 Cypress Semiconductor, Inc.
* Copyright (C) 2011-2012 Google, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -950,7 +950,7 @@ static u16 cyapa_get_wait_time_for_pwr_cmd(u8 pwr_mode)
* Device power mode can only be set when device is in operational mode.
*/
static int cyapa_gen3_set_power_mode(struct cyapa *cyapa, u8 power_mode,
- u16 always_unused)
+ u16 always_unused, bool is_suspend_unused)
{
int ret;
u8 power;
@@ -999,6 +999,11 @@ static int cyapa_gen3_set_power_mode(struct cyapa *cyapa, u8 power_mode,
return ret;
}
+static int cyapa_gen3_set_proximity(struct cyapa *cyapa, bool enable)
+{
+ return -EOPNOTSUPP;
+}
+
static int cyapa_gen3_get_query_data(struct cyapa *cyapa)
{
u8 query_data[QUERY_DATA_SIZE];
@@ -1107,7 +1112,7 @@ static int cyapa_gen3_do_operational_check(struct cyapa *cyapa)
* may cause problems, so we set the power mode first here.
*/
error = cyapa_gen3_set_power_mode(cyapa,
- PWR_MODE_FULL_ACTIVE, 0);
+ PWR_MODE_FULL_ACTIVE, 0, false);
if (error)
dev_err(dev, "%s: set full power mode failed: %d\n",
__func__, error);
@@ -1156,7 +1161,7 @@ static bool cyapa_gen3_irq_cmd_handler(struct cyapa *cyapa)
* so, stop cyapa_gen3_irq_handler to continue process to
* avoid unwanted to error detecting and processing.
*
- * And also, avoid the periodicly accerted interrupts to be processed
+ * And also, avoid the periodically asserted interrupts to be processed
* as touch inputs when gen3 failed to launch into application mode,
* which will cause gen3 stays in bootloader mode.
*/
@@ -1243,4 +1248,6 @@ const struct cyapa_dev_ops cyapa_gen3_ops = {
.irq_cmd_handler = cyapa_gen3_irq_cmd_handler,
.sort_empty_output_data = cyapa_gen3_empty_output_data,
.set_power_mode = cyapa_gen3_set_power_mode,
+
+ .set_proximity = cyapa_gen3_set_proximity,
};
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c
index afc39e799da2..118ba977181e 100644
--- a/drivers/input/mouse/cyapa_gen5.c
+++ b/drivers/input/mouse/cyapa_gen5.c
@@ -3,7 +3,7 @@
*
* Author: Dudley Du <dudl@cypress.com>
*
- * Copyright (C) 2014 Cypress Semiconductor, Inc.
+ * Copyright (C) 2014-2015 Cypress Semiconductor, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
@@ -19,15 +19,11 @@
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <linux/crc-itu-t.h>
+#include <linux/pm_runtime.h>
#include "cyapa.h"
-/* Macro of Gen5 */
-#define RECORD_EVENT_NONE 0
-#define RECORD_EVENT_TOUCHDOWN 1
-#define RECORD_EVENT_DISPLACE 2
-#define RECORD_EVENT_LIFTOFF 3
-
+/* Macro of TSG firmware image */
#define CYAPA_TSG_FLASH_MAP_BLOCK_SIZE 0x80
#define CYAPA_TSG_IMG_FW_HDR_SIZE 13
#define CYAPA_TSG_FW_ROW_SIZE (CYAPA_TSG_FLASH_MAP_BLOCK_SIZE)
@@ -44,43 +40,55 @@
#define CYAPA_TSG_MAX_CMD_SIZE 256
-#define GEN5_BL_CMD_VERIFY_APP_INTEGRITY 0x31
-#define GEN5_BL_CMD_GET_BL_INFO 0x38
-#define GEN5_BL_CMD_PROGRAM_VERIFY_ROW 0x39
-#define GEN5_BL_CMD_LAUNCH_APP 0x3b
-#define GEN5_BL_CMD_INITIATE_BL 0x48
-
-#define GEN5_HID_DESCRIPTOR_ADDR 0x0001
-#define GEN5_REPORT_DESCRIPTOR_ADDR 0x0002
-#define GEN5_INPUT_REPORT_ADDR 0x0003
-#define GEN5_OUTPUT_REPORT_ADDR 0x0004
-#define GEN5_CMD_DATA_ADDR 0x0006
-
-#define GEN5_TOUCH_REPORT_HEAD_SIZE 7
-#define GEN5_TOUCH_REPORT_MAX_SIZE 127
-#define GEN5_BTN_REPORT_HEAD_SIZE 6
-#define GEN5_BTN_REPORT_MAX_SIZE 14
-#define GEN5_WAKEUP_EVENT_SIZE 4
-#define GEN5_RAW_DATA_HEAD_SIZE 24
-
-#define GEN5_BL_CMD_REPORT_ID 0x40
-#define GEN5_BL_RESP_REPORT_ID 0x30
-#define GEN5_APP_CMD_REPORT_ID 0x2f
-#define GEN5_APP_RESP_REPORT_ID 0x1f
-
-#define GEN5_APP_DEEP_SLEEP_REPORT_ID 0xf0
-#define GEN5_DEEP_SLEEP_RESP_LENGTH 5
+/* Macro of PIP interface */
+#define PIP_BL_INITIATE_RESP_LEN 11
+#define PIP_BL_FAIL_EXIT_RESP_LEN 11
+#define PIP_BL_FAIL_EXIT_STATUS_CODE 0x0c
+#define PIP_BL_VERIFY_INTEGRITY_RESP_LEN 12
+#define PIP_BL_INTEGRITY_CHEKC_PASS 0x00
+#define PIP_BL_BLOCK_WRITE_RESP_LEN 11
+
+#define PIP_TOUCH_REPORT_ID 0x01
+#define PIP_BTN_REPORT_ID 0x03
+#define PIP_WAKEUP_EVENT_REPORT_ID 0x04
+#define PIP_PUSH_BTN_REPORT_ID 0x06
+#define GEN5_OLD_PUSH_BTN_REPORT_ID 0x05 /* Special for old Gen5 TP. */
+#define PIP_PROXIMITY_REPORT_ID 0x07
+
+#define PIP_PROXIMITY_REPORT_SIZE 6
+#define PIP_PROXIMITY_DISTANCE_OFFSET 0x05
+#define PIP_PROXIMITY_DISTANCE_MASK 0x01
+
+#define PIP_TOUCH_REPORT_HEAD_SIZE 7
+#define PIP_TOUCH_REPORT_MAX_SIZE 127
+#define PIP_BTN_REPORT_HEAD_SIZE 6
+#define PIP_BTN_REPORT_MAX_SIZE 14
+#define PIP_WAKEUP_EVENT_SIZE 4
+
+#define PIP_NUMBER_OF_TOUCH_OFFSET 5
+#define PIP_NUMBER_OF_TOUCH_MASK 0x1f
+#define PIP_BUTTONS_OFFSET 5
+#define PIP_BUTTONS_MASK 0x0f
+#define PIP_GET_EVENT_ID(reg) (((reg) >> 5) & 0x03)
+#define PIP_GET_TOUCH_ID(reg) ((reg) & 0x1f)
+#define PIP_TOUCH_TYPE_FINGER 0x00
+#define PIP_TOUCH_TYPE_PROXIMITY 0x01
+#define PIP_TOUCH_TYPE_HOVER 0x02
+#define PIP_GET_TOUCH_TYPE(reg) ((reg) & 0x07)
-#define GEN5_CMD_GET_PARAMETER 0x05
-#define GEN5_CMD_SET_PARAMETER 0x06
-#define GEN5_PARAMETER_ACT_INTERVL_ID 0x4d
-#define GEN5_PARAMETER_ACT_INTERVL_SIZE 1
-#define GEN5_PARAMETER_ACT_LFT_INTERVL_ID 0x4f
-#define GEN5_PARAMETER_ACT_LFT_INTERVL_SIZE 2
-#define GEN5_PARAMETER_LP_INTRVL_ID 0x4c
-#define GEN5_PARAMETER_LP_INTRVL_SIZE 2
+#define RECORD_EVENT_NONE 0
+#define RECORD_EVENT_TOUCHDOWN 1
+#define RECORD_EVENT_DISPLACE 2
+#define RECORD_EVENT_LIFTOFF 3
-#define GEN5_PARAMETER_DISABLE_PIP_REPORT 0x08
+#define PIP_SENSING_MODE_MUTUAL_CAP_FINE 0x00
+#define PIP_SENSING_MODE_SELF_CAP 0x02
+
+#define PIP_SET_PROXIMITY 0x49
+
+/* Macro of Gen5 */
+#define GEN5_BL_MAX_OUTPUT_LENGTH 0x0100
+#define GEN5_APP_MAX_OUTPUT_LENGTH 0x00fe
#define GEN5_POWER_STATE_ACTIVE 0x01
#define GEN5_POWER_STATE_LOOK_FOR_TOUCH 0x02
@@ -89,46 +97,19 @@
#define GEN5_POWER_STATE_BTN_ONLY 0x05
#define GEN5_POWER_STATE_OFF 0x06
-#define GEN5_DEEP_SLEEP_STATE_MASK 0x03
-#define GEN5_DEEP_SLEEP_STATE_ON 0x00
-#define GEN5_DEEP_SLEEP_STATE_OFF 0x01
-
-#define GEN5_DEEP_SLEEP_OPCODE 0x08
-#define GEN5_DEEP_SLEEP_OPCODE_MASK 0x0f
-
#define GEN5_POWER_READY_MAX_INTRVL_TIME 50 /* Unit: ms */
#define GEN5_POWER_IDLE_MAX_INTRVL_TIME 250 /* Unit: ms */
-#define GEN5_CMD_REPORT_ID_OFFSET 4
-
-#define GEN5_RESP_REPORT_ID_OFFSET 2
-#define GEN5_RESP_RSVD_OFFSET 3
-#define GEN5_RESP_RSVD_KEY 0x00
-#define GEN5_RESP_BL_SOP_OFFSET 4
-#define GEN5_SOP_KEY 0x01 /* Start of Packet */
-#define GEN5_EOP_KEY 0x17 /* End of Packet */
-#define GEN5_RESP_APP_CMD_OFFSET 4
-#define GET_GEN5_CMD_CODE(reg) ((reg) & 0x7f)
-
-#define VALID_CMD_RESP_HEADER(resp, cmd) \
- (((resp)[GEN5_RESP_REPORT_ID_OFFSET] == GEN5_APP_RESP_REPORT_ID) && \
- ((resp)[GEN5_RESP_RSVD_OFFSET] == GEN5_RESP_RSVD_KEY) && \
- (GET_GEN5_CMD_CODE((resp)[GEN5_RESP_APP_CMD_OFFSET]) == (cmd)))
-
-#define GEN5_MIN_BL_CMD_LENGTH 13
-#define GEN5_MIN_BL_RESP_LENGTH 11
-#define GEN5_MIN_APP_CMD_LENGTH 7
-#define GEN5_MIN_APP_RESP_LENGTH 5
-#define GEN5_UNSUPPORTED_CMD_RESP_LENGTH 6
-
-#define GEN5_RESP_LENGTH_OFFSET 0x00
-#define GEN5_RESP_LENGTH_SIZE 2
-
-#define GEN5_HID_DESCRIPTOR_SIZE 32
-#define GEN5_BL_HID_REPORT_ID 0xff
-#define GEN5_APP_HID_REPORT_ID 0xf7
-#define GEN5_BL_MAX_OUTPUT_LENGTH 0x0100
-#define GEN5_APP_MAX_OUTPUT_LENGTH 0x00fe
+#define GEN5_CMD_GET_PARAMETER 0x05
+#define GEN5_CMD_SET_PARAMETER 0x06
+#define GEN5_PARAMETER_ACT_INTERVL_ID 0x4d
+#define GEN5_PARAMETER_ACT_INTERVL_SIZE 1
+#define GEN5_PARAMETER_ACT_LFT_INTERVL_ID 0x4f
+#define GEN5_PARAMETER_ACT_LFT_INTERVL_SIZE 2
+#define GEN5_PARAMETER_LP_INTRVL_ID 0x4c
+#define GEN5_PARAMETER_LP_INTRVL_SIZE 2
+
+#define GEN5_PARAMETER_DISABLE_PIP_REPORT 0x08
#define GEN5_BL_REPORT_DESCRIPTOR_SIZE 0x1d
#define GEN5_BL_REPORT_DESCRIPTOR_ID 0xfe
@@ -136,26 +117,6 @@
#define GEN5_APP_CONTRACT_REPORT_DESCRIPTOR_SIZE 0xfa
#define GEN5_APP_REPORT_DESCRIPTOR_ID 0xf6
-#define GEN5_TOUCH_REPORT_ID 0x01
-#define GEN5_BTN_REPORT_ID 0x03
-#define GEN5_WAKEUP_EVENT_REPORT_ID 0x04
-#define GEN5_OLD_PUSH_BTN_REPORT_ID 0x05
-#define GEN5_PUSH_BTN_REPORT_ID 0x06
-
-#define GEN5_CMD_COMPLETE_SUCCESS(status) ((status) == 0x00)
-
-#define GEN5_BL_INITIATE_RESP_LEN 11
-#define GEN5_BL_FAIL_EXIT_RESP_LEN 11
-#define GEN5_BL_FAIL_EXIT_STATUS_CODE 0x0c
-#define GEN5_BL_VERIFY_INTEGRITY_RESP_LEN 12
-#define GEN5_BL_INTEGRITY_CHEKC_PASS 0x00
-#define GEN5_BL_BLOCK_WRITE_RESP_LEN 11
-#define GEN5_BL_READ_APP_INFO_RESP_LEN 31
-#define GEN5_CMD_CALIBRATE 0x28
-#define CYAPA_SENSING_MODE_MUTUAL_CAP_FINE 0x00
-#define CYAPA_SENSING_MODE_SELF_CAP 0x02
-
-#define GEN5_CMD_RETRIEVE_DATA_STRUCTURE 0x24
#define GEN5_RETRIEVE_MUTUAL_PWC_DATA 0x00
#define GEN5_RETRIEVE_SELF_CAP_PWC_DATA 0x01
@@ -170,28 +131,19 @@
#define GEN5_PANEL_SCAN_SELF_BASELINE 0x04
#define GEN5_PANEL_SCAN_SELF_DIFFCOUNT 0x05
-/* The offset only valid for reterive PWC and panel scan commands */
+/* The offset only valid for retrieve PWC and panel scan commands */
#define GEN5_RESP_DATA_STRUCTURE_OFFSET 10
#define GEN5_PWC_DATA_ELEMENT_SIZE_MASK 0x07
-#define GEN5_NUMBER_OF_TOUCH_OFFSET 5
-#define GEN5_NUMBER_OF_TOUCH_MASK 0x1f
-#define GEN5_BUTTONS_OFFSET 5
-#define GEN5_BUTTONS_MASK 0x0f
-#define GEN5_GET_EVENT_ID(reg) (((reg) >> 5) & 0x03)
-#define GEN5_GET_TOUCH_ID(reg) ((reg) & 0x1f)
-
-#define GEN5_PRODUCT_FAMILY_MASK 0xf000
-#define GEN5_PRODUCT_FAMILY_TRACKPAD 0x1000
-#define TSG_INVALID_CMD 0xff
-
-struct cyapa_gen5_touch_record {
+struct cyapa_pip_touch_record {
/*
* Bit 7 - 3: reserved
* Bit 2 - 0: touch type;
* 0 : standard finger;
- * 1 - 15 : reserved.
+ * 1 : proximity (Start supported in Gen5 TP).
+ * 2 : finger hover (defined, but not used yet.)
+ * 3 - 15 : reserved.
*/
u8 touch_type;
@@ -221,7 +173,14 @@ struct cyapa_gen5_touch_record {
/* Bit 15 - 8 of Y-axis coordinate of the touch in pixel. */
u8 y_hi;
- /* Touch intensity in counts, pressure value. */
+ /*
+ * The meaning of this value is different when touch_type is different.
+ * For standard finger type:
+ * Touch intensity in counts, pressure value.
+ * For proximity type (Start supported in Gen5 TP):
+ * The distance, in surface units, between the contact and
+ * the surface.
+ **/
u8 z;
/*
@@ -260,9 +219,9 @@ struct cyapa_gen5_touch_record {
u8 orientation;
} __packed;
-struct cyapa_gen5_report_data {
- u8 report_head[GEN5_TOUCH_REPORT_HEAD_SIZE];
- struct cyapa_gen5_touch_record touch_records[10];
+struct cyapa_pip_report_data {
+ u8 report_head[PIP_TOUCH_REPORT_HEAD_SIZE];
+ struct cyapa_pip_touch_record touch_records[10];
} __packed;
struct cyapa_tsg_bin_image_head {
@@ -272,6 +231,12 @@ struct cyapa_tsg_bin_image_head {
u8 fw_major_version;
u8 fw_minor_version;
u8 fw_revision_control_number[8];
+ u8 silicon_id_hi;
+ u8 silicon_id_lo;
+ u8 chip_revision;
+ u8 family_id;
+ u8 bl_ver_maj;
+ u8 bl_ver_min;
} __packed;
struct cyapa_tsg_bin_image_data_record {
@@ -288,36 +253,36 @@ struct cyapa_tsg_bin_image {
struct cyapa_tsg_bin_image_data_record records[0];
} __packed;
-struct gen5_bl_packet_start {
+struct pip_bl_packet_start {
u8 sop; /* Start of packet, must be 01h */
u8 cmd_code;
__le16 data_length; /* Size of data parameter start from data[0] */
} __packed;
-struct gen5_bl_packet_end {
+struct pip_bl_packet_end {
__le16 crc;
u8 eop; /* End of packet, must be 17h */
} __packed;
-struct gen5_bl_cmd_head {
+struct pip_bl_cmd_head {
__le16 addr; /* Output report register address, must be 0004h */
/* Size of packet not including output report register address */
__le16 length;
u8 report_id; /* Bootloader output report id, must be 40h */
u8 rsvd; /* Reserved, must be 0 */
- struct gen5_bl_packet_start packet_start;
+ struct pip_bl_packet_start packet_start;
u8 data[0]; /* Command data variable based on commands */
} __packed;
/* Initiate bootload command data structure. */
-struct gen5_bl_initiate_cmd_data {
+struct pip_bl_initiate_cmd_data {
/* Key must be "A5h 01h 02h 03h FFh FEh FDh 5Ah" */
u8 key[CYAPA_TSG_BL_KEY_SIZE];
u8 metadata_raw_parameter[CYAPA_TSG_FLASH_MAP_METADATA_SIZE];
__le16 metadata_crc;
} __packed;
-struct gen5_bl_metadata_row_params {
+struct tsg_bl_metadata_row_params {
__le16 size;
__le16 maximum_size;
__le32 app_start;
@@ -332,13 +297,13 @@ struct gen5_bl_metadata_row_params {
} __packed;
/* Bootload program and verify row command data structure */
-struct gen5_bl_flash_row_head {
+struct tsg_bl_flash_row_head {
u8 flash_array_id;
__le16 flash_row_id;
u8 flash_data[0];
} __packed;
-struct gen5_app_cmd_head {
+struct pip_app_cmd_head {
__le16 addr; /* Output report register address, must be 0004h */
/* Size of packet not including output report register address */
__le16 length;
@@ -369,30 +334,26 @@ struct gen5_retrieve_panel_scan_data {
u8 data_id;
} __packed;
-/* Variables to record latest gen5 trackpad power states. */
-#define GEN5_DEV_SET_PWR_STATE(cyapa, s) ((cyapa)->dev_pwr_mode = (s))
-#define GEN5_DEV_GET_PWR_STATE(cyapa) ((cyapa)->dev_pwr_mode)
-#define GEN5_DEV_SET_SLEEP_TIME(cyapa, t) ((cyapa)->dev_sleep_time = (t))
-#define GEN5_DEV_GET_SLEEP_TIME(cyapa) ((cyapa)->dev_sleep_time)
-#define GEN5_DEV_UNINIT_SLEEP_TIME(cyapa) \
- (((cyapa)->dev_sleep_time) == UNINIT_SLEEP_TIME)
-
+u8 pip_read_sys_info[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x02 };
+u8 pip_bl_read_app_info[] = { 0x04, 0x00, 0x0b, 0x00, 0x40, 0x00,
+ 0x01, 0x3c, 0x00, 0x00, 0xb0, 0x42, 0x17
+ };
-static u8 cyapa_gen5_bl_cmd_key[] = { 0xa5, 0x01, 0x02, 0x03,
+static u8 cyapa_pip_bl_cmd_key[] = { 0xa5, 0x01, 0x02, 0x03,
0xff, 0xfe, 0xfd, 0x5a };
-static int cyapa_gen5_initialize(struct cyapa *cyapa)
+int cyapa_pip_cmd_state_initialize(struct cyapa *cyapa)
{
- struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+ struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
- init_completion(&gen5_pip->cmd_ready);
- atomic_set(&gen5_pip->cmd_issued, 0);
- mutex_init(&gen5_pip->cmd_lock);
+ init_completion(&pip->cmd_ready);
+ atomic_set(&pip->cmd_issued, 0);
+ mutex_init(&pip->cmd_lock);
- gen5_pip->resp_sort_func = NULL;
- gen5_pip->in_progress_cmd = TSG_INVALID_CMD;
- gen5_pip->resp_data = NULL;
- gen5_pip->resp_len = NULL;
+ pip->resp_sort_func = NULL;
+ pip->in_progress_cmd = PIP_INVALID_CMD;
+ pip->resp_data = NULL;
+ pip->resp_len = NULL;
cyapa->dev_pwr_mode = UNINIT_PWR_MODE;
cyapa->dev_sleep_time = UNINIT_SLEEP_TIME;
@@ -401,7 +362,7 @@ static int cyapa_gen5_initialize(struct cyapa *cyapa)
}
/* Return negative errno, or else the number of bytes read. */
-static ssize_t cyapa_i2c_pip_read(struct cyapa *cyapa, u8 *buf, size_t size)
+ssize_t cyapa_i2c_pip_read(struct cyapa *cyapa, u8 *buf, size_t size)
{
int ret;
@@ -415,14 +376,13 @@ static ssize_t cyapa_i2c_pip_read(struct cyapa *cyapa, u8 *buf, size_t size)
if (ret != size)
return (ret < 0) ? ret : -EIO;
-
return size;
}
/**
* Return a negative errno code else zero on success.
*/
-static ssize_t cyapa_i2c_pip_write(struct cyapa *cyapa, u8 *buf, size_t size)
+ssize_t cyapa_i2c_pip_write(struct cyapa *cyapa, u8 *buf, size_t size)
{
int ret;
@@ -441,10 +401,10 @@ static ssize_t cyapa_i2c_pip_write(struct cyapa *cyapa, u8 *buf, size_t size)
* This function is aimed to dump all not read data in Gen5 trackpad
* before send any command, otherwise, the interrupt line will be blocked.
*/
-static int cyapa_empty_pip_output_data(struct cyapa *cyapa,
+int cyapa_empty_pip_output_data(struct cyapa *cyapa,
u8 *buf, int *len, cb_sort func)
{
- struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+ struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
int length;
int report_count;
int empty_count;
@@ -476,13 +436,13 @@ static int cyapa_empty_pip_output_data(struct cyapa *cyapa,
if (empty_count > 5)
return 0;
- error = cyapa_i2c_pip_read(cyapa, gen5_pip->empty_buf,
- GEN5_RESP_LENGTH_SIZE);
+ error = cyapa_i2c_pip_read(cyapa, pip->empty_buf,
+ PIP_RESP_LENGTH_SIZE);
if (error < 0)
return error;
- length = get_unaligned_le16(gen5_pip->empty_buf);
- if (length == GEN5_RESP_LENGTH_SIZE) {
+ length = get_unaligned_le16(pip->empty_buf);
+ if (length == PIP_RESP_LENGTH_SIZE) {
empty_count++;
continue;
} else if (length > CYAPA_REG_MAP_SIZE) {
@@ -490,11 +450,11 @@ static int cyapa_empty_pip_output_data(struct cyapa *cyapa,
return -EINVAL;
} else if (length == 0) {
/* Application or bootloader launch data polled out. */
- length = GEN5_RESP_LENGTH_SIZE;
+ length = PIP_RESP_LENGTH_SIZE;
if (buf && buf_len && func &&
- func(cyapa, gen5_pip->empty_buf, length)) {
+ func(cyapa, pip->empty_buf, length)) {
length = min(buf_len, length);
- memcpy(buf, gen5_pip->empty_buf, length);
+ memcpy(buf, pip->empty_buf, length);
*len = length;
/* Response found, success. */
return 0;
@@ -502,19 +462,19 @@ static int cyapa_empty_pip_output_data(struct cyapa *cyapa,
continue;
}
- error = cyapa_i2c_pip_read(cyapa, gen5_pip->empty_buf, length);
+ error = cyapa_i2c_pip_read(cyapa, pip->empty_buf, length);
if (error < 0)
return error;
report_count--;
empty_count = 0;
- length = get_unaligned_le16(gen5_pip->empty_buf);
- if (length <= GEN5_RESP_LENGTH_SIZE) {
+ length = get_unaligned_le16(pip->empty_buf);
+ if (length <= PIP_RESP_LENGTH_SIZE) {
empty_count++;
} else if (buf && buf_len && func &&
- func(cyapa, gen5_pip->empty_buf, length)) {
+ func(cyapa, pip->empty_buf, length)) {
length = min(buf_len, length);
- memcpy(buf, gen5_pip->empty_buf, length);
+ memcpy(buf, pip->empty_buf, length);
*len = length;
/* Response found, success. */
return 0;
@@ -531,24 +491,24 @@ static int cyapa_do_i2c_pip_cmd_irq_sync(
u8 *cmd, size_t cmd_len,
unsigned long timeout)
{
- struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+ struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
int error;
/* Wait for interrupt to set ready completion */
- init_completion(&gen5_pip->cmd_ready);
+ init_completion(&pip->cmd_ready);
- atomic_inc(&gen5_pip->cmd_issued);
+ atomic_inc(&pip->cmd_issued);
error = cyapa_i2c_pip_write(cyapa, cmd, cmd_len);
if (error) {
- atomic_dec(&gen5_pip->cmd_issued);
+ atomic_dec(&pip->cmd_issued);
return (error < 0) ? error : -EIO;
}
/* Wait for interrupt to indicate command is completed. */
- timeout = wait_for_completion_timeout(&gen5_pip->cmd_ready,
+ timeout = wait_for_completion_timeout(&pip->cmd_ready,
msecs_to_jiffies(timeout));
if (timeout == 0) {
- atomic_dec(&gen5_pip->cmd_issued);
+ atomic_dec(&pip->cmd_issued);
return -ETIMEDOUT;
}
@@ -562,15 +522,15 @@ static int cyapa_do_i2c_pip_cmd_polling(
unsigned long timeout,
cb_sort func)
{
- struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+ struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
int tries;
int length;
int error;
- atomic_inc(&gen5_pip->cmd_issued);
+ atomic_inc(&pip->cmd_issued);
error = cyapa_i2c_pip_write(cyapa, cmd, cmd_len);
if (error) {
- atomic_dec(&gen5_pip->cmd_issued);
+ atomic_dec(&pip->cmd_issued);
return error < 0 ? error : -EIO;
}
@@ -591,11 +551,11 @@ static int cyapa_do_i2c_pip_cmd_polling(
error = error ? error : -ETIMEDOUT;
}
- atomic_dec(&gen5_pip->cmd_issued);
+ atomic_dec(&pip->cmd_issued);
return error;
}
-static int cyapa_i2c_pip_cmd_irq_sync(
+int cyapa_i2c_pip_cmd_irq_sync(
struct cyapa *cyapa,
u8 *cmd, int cmd_len,
u8 *resp_data, int *resp_len,
@@ -603,34 +563,34 @@ static int cyapa_i2c_pip_cmd_irq_sync(
cb_sort func,
bool irq_mode)
{
- struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+ struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
int error;
if (!cmd || !cmd_len)
return -EINVAL;
/* Commands must be serialized. */
- error = mutex_lock_interruptible(&gen5_pip->cmd_lock);
+ error = mutex_lock_interruptible(&pip->cmd_lock);
if (error)
return error;
- gen5_pip->resp_sort_func = func;
- gen5_pip->resp_data = resp_data;
- gen5_pip->resp_len = resp_len;
+ pip->resp_sort_func = func;
+ pip->resp_data = resp_data;
+ pip->resp_len = resp_len;
- if (cmd_len >= GEN5_MIN_APP_CMD_LENGTH &&
- cmd[4] == GEN5_APP_CMD_REPORT_ID) {
+ if (cmd_len >= PIP_MIN_APP_CMD_LENGTH &&
+ cmd[4] == PIP_APP_CMD_REPORT_ID) {
/* Application command */
- gen5_pip->in_progress_cmd = cmd[6] & 0x7f;
- } else if (cmd_len >= GEN5_MIN_BL_CMD_LENGTH &&
- cmd[4] == GEN5_BL_CMD_REPORT_ID) {
+ pip->in_progress_cmd = cmd[6] & 0x7f;
+ } else if (cmd_len >= PIP_MIN_BL_CMD_LENGTH &&
+ cmd[4] == PIP_BL_CMD_REPORT_ID) {
/* Bootloader command */
- gen5_pip->in_progress_cmd = cmd[7];
+ pip->in_progress_cmd = cmd[7];
}
/* Send command data, wait and read output response data's length. */
if (irq_mode) {
- gen5_pip->is_irq_mode = true;
+ pip->is_irq_mode = true;
error = cyapa_do_i2c_pip_cmd_irq_sync(cyapa, cmd, cmd_len,
timeout);
if (error == -ETIMEDOUT && resp_data &&
@@ -646,54 +606,54 @@ static int cyapa_i2c_pip_cmd_irq_sync(
error = error ? error : -ETIMEDOUT;
}
} else {
- gen5_pip->is_irq_mode = false;
+ pip->is_irq_mode = false;
error = cyapa_do_i2c_pip_cmd_polling(cyapa, cmd, cmd_len,
resp_data, resp_len, timeout, func);
}
- gen5_pip->resp_sort_func = NULL;
- gen5_pip->resp_data = NULL;
- gen5_pip->resp_len = NULL;
- gen5_pip->in_progress_cmd = TSG_INVALID_CMD;
+ pip->resp_sort_func = NULL;
+ pip->resp_data = NULL;
+ pip->resp_len = NULL;
+ pip->in_progress_cmd = PIP_INVALID_CMD;
- mutex_unlock(&gen5_pip->cmd_lock);
+ mutex_unlock(&pip->cmd_lock);
return error;
}
-static bool cyapa_gen5_sort_tsg_pip_bl_resp_data(struct cyapa *cyapa,
+bool cyapa_sort_tsg_pip_bl_resp_data(struct cyapa *cyapa,
u8 *data, int len)
{
- if (!data || len < GEN5_MIN_BL_RESP_LENGTH)
+ if (!data || len < PIP_MIN_BL_RESP_LENGTH)
return false;
/* Bootloader input report id 30h */
- if (data[GEN5_RESP_REPORT_ID_OFFSET] == GEN5_BL_RESP_REPORT_ID &&
- data[GEN5_RESP_RSVD_OFFSET] == GEN5_RESP_RSVD_KEY &&
- data[GEN5_RESP_BL_SOP_OFFSET] == GEN5_SOP_KEY)
+ if (data[PIP_RESP_REPORT_ID_OFFSET] == PIP_BL_RESP_REPORT_ID &&
+ data[PIP_RESP_RSVD_OFFSET] == PIP_RESP_RSVD_KEY &&
+ data[PIP_RESP_BL_SOP_OFFSET] == PIP_SOP_KEY)
return true;
return false;
}
-static bool cyapa_gen5_sort_tsg_pip_app_resp_data(struct cyapa *cyapa,
+bool cyapa_sort_tsg_pip_app_resp_data(struct cyapa *cyapa,
u8 *data, int len)
{
- struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+ struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
int resp_len;
- if (!data || len < GEN5_MIN_APP_RESP_LENGTH)
+ if (!data || len < PIP_MIN_APP_RESP_LENGTH)
return false;
- if (data[GEN5_RESP_REPORT_ID_OFFSET] == GEN5_APP_RESP_REPORT_ID &&
- data[GEN5_RESP_RSVD_OFFSET] == GEN5_RESP_RSVD_KEY) {
- resp_len = get_unaligned_le16(&data[GEN5_RESP_LENGTH_OFFSET]);
- if (GET_GEN5_CMD_CODE(data[GEN5_RESP_APP_CMD_OFFSET]) == 0x00 &&
- resp_len == GEN5_UNSUPPORTED_CMD_RESP_LENGTH &&
- data[5] == gen5_pip->in_progress_cmd) {
+ if (data[PIP_RESP_REPORT_ID_OFFSET] == PIP_APP_RESP_REPORT_ID &&
+ data[PIP_RESP_RSVD_OFFSET] == PIP_RESP_RSVD_KEY) {
+ resp_len = get_unaligned_le16(&data[PIP_RESP_LENGTH_OFFSET]);
+ if (GET_PIP_CMD_CODE(data[PIP_RESP_APP_CMD_OFFSET]) == 0x00 &&
+ resp_len == PIP_UNSUPPORTED_CMD_RESP_LENGTH &&
+ data[5] == pip->in_progress_cmd) {
/* Unsupported command code */
return false;
- } else if (GET_GEN5_CMD_CODE(data[GEN5_RESP_APP_CMD_OFFSET]) ==
- gen5_pip->in_progress_cmd) {
+ } else if (GET_PIP_CMD_CODE(data[PIP_RESP_APP_CMD_OFFSET]) ==
+ pip->in_progress_cmd) {
/* Correct command response received */
return true;
}
@@ -702,10 +662,10 @@ static bool cyapa_gen5_sort_tsg_pip_app_resp_data(struct cyapa *cyapa,
return false;
}
-static bool cyapa_gen5_sort_application_launch_data(struct cyapa *cyapa,
+static bool cyapa_sort_pip_application_launch_data(struct cyapa *cyapa,
u8 *buf, int len)
{
- if (buf == NULL || len < GEN5_RESP_LENGTH_SIZE)
+ if (buf == NULL || len < PIP_RESP_LENGTH_SIZE)
return false;
/*
@@ -718,25 +678,25 @@ static bool cyapa_gen5_sort_application_launch_data(struct cyapa *cyapa,
return false;
}
-static bool cyapa_gen5_sort_hid_descriptor_data(struct cyapa *cyapa,
+static bool cyapa_sort_gen5_hid_descriptor_data(struct cyapa *cyapa,
u8 *buf, int len)
{
int resp_len;
int max_output_len;
/* Check hid descriptor. */
- if (len != GEN5_HID_DESCRIPTOR_SIZE)
+ if (len != PIP_HID_DESCRIPTOR_SIZE)
return false;
- resp_len = get_unaligned_le16(&buf[GEN5_RESP_LENGTH_OFFSET]);
+ resp_len = get_unaligned_le16(&buf[PIP_RESP_LENGTH_OFFSET]);
max_output_len = get_unaligned_le16(&buf[16]);
- if (resp_len == GEN5_HID_DESCRIPTOR_SIZE) {
- if (buf[GEN5_RESP_REPORT_ID_OFFSET] == GEN5_BL_HID_REPORT_ID &&
+ if (resp_len == PIP_HID_DESCRIPTOR_SIZE) {
+ if (buf[PIP_RESP_REPORT_ID_OFFSET] == PIP_HID_BL_REPORT_ID &&
max_output_len == GEN5_BL_MAX_OUTPUT_LENGTH) {
/* BL mode HID Descriptor */
return true;
- } else if ((buf[GEN5_RESP_REPORT_ID_OFFSET] ==
- GEN5_APP_HID_REPORT_ID) &&
+ } else if ((buf[PIP_RESP_REPORT_ID_OFFSET] ==
+ PIP_HID_APP_REPORT_ID) &&
max_output_len == GEN5_APP_MAX_OUTPUT_LENGTH) {
/* APP mode HID Descriptor */
return true;
@@ -746,21 +706,21 @@ static bool cyapa_gen5_sort_hid_descriptor_data(struct cyapa *cyapa,
return false;
}
-static bool cyapa_gen5_sort_deep_sleep_data(struct cyapa *cyapa,
+static bool cyapa_sort_pip_deep_sleep_data(struct cyapa *cyapa,
u8 *buf, int len)
{
- if (len == GEN5_DEEP_SLEEP_RESP_LENGTH &&
- buf[GEN5_RESP_REPORT_ID_OFFSET] ==
- GEN5_APP_DEEP_SLEEP_REPORT_ID &&
- (buf[4] & GEN5_DEEP_SLEEP_OPCODE_MASK) ==
- GEN5_DEEP_SLEEP_OPCODE)
+ if (len == PIP_DEEP_SLEEP_RESP_LENGTH &&
+ buf[PIP_RESP_REPORT_ID_OFFSET] ==
+ PIP_APP_DEEP_SLEEP_REPORT_ID &&
+ (buf[4] & PIP_DEEP_SLEEP_OPCODE_MASK) ==
+ PIP_DEEP_SLEEP_OPCODE)
return true;
return false;
}
static int gen5_idle_state_parse(struct cyapa *cyapa)
{
- u8 resp_data[GEN5_HID_DESCRIPTOR_SIZE];
+ u8 resp_data[PIP_HID_DESCRIPTOR_SIZE];
int max_output_len;
int length;
u8 cmd[2];
@@ -778,9 +738,9 @@ static int gen5_idle_state_parse(struct cyapa *cyapa)
if (ret != 3)
return ret < 0 ? ret : -EIO;
- length = get_unaligned_le16(&resp_data[GEN5_RESP_LENGTH_OFFSET]);
- if (length == GEN5_RESP_LENGTH_SIZE) {
- /* Normal state of Gen5 with no data to respose */
+ length = get_unaligned_le16(&resp_data[PIP_RESP_LENGTH_OFFSET]);
+ if (length == PIP_RESP_LENGTH_SIZE) {
+ /* Normal state of Gen5 with no data to response */
cyapa->gen = CYAPA_GEN5;
cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
@@ -788,30 +748,30 @@ static int gen5_idle_state_parse(struct cyapa *cyapa)
/* Read description from trackpad device */
cmd[0] = 0x01;
cmd[1] = 0x00;
- length = GEN5_HID_DESCRIPTOR_SIZE;
+ length = PIP_HID_DESCRIPTOR_SIZE;
error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
- cmd, GEN5_RESP_LENGTH_SIZE,
+ cmd, PIP_RESP_LENGTH_SIZE,
resp_data, &length,
300,
- cyapa_gen5_sort_hid_descriptor_data,
+ cyapa_sort_gen5_hid_descriptor_data,
false);
if (error)
return error;
length = get_unaligned_le16(
- &resp_data[GEN5_RESP_LENGTH_OFFSET]);
+ &resp_data[PIP_RESP_LENGTH_OFFSET]);
max_output_len = get_unaligned_le16(&resp_data[16]);
- if ((length == GEN5_HID_DESCRIPTOR_SIZE ||
- length == GEN5_RESP_LENGTH_SIZE) &&
- (resp_data[GEN5_RESP_REPORT_ID_OFFSET] ==
- GEN5_BL_HID_REPORT_ID) &&
+ if ((length == PIP_HID_DESCRIPTOR_SIZE ||
+ length == PIP_RESP_LENGTH_SIZE) &&
+ (resp_data[PIP_RESP_REPORT_ID_OFFSET] ==
+ PIP_HID_BL_REPORT_ID) &&
max_output_len == GEN5_BL_MAX_OUTPUT_LENGTH) {
/* BL mode HID Description read */
cyapa->state = CYAPA_STATE_GEN5_BL;
- } else if ((length == GEN5_HID_DESCRIPTOR_SIZE ||
- length == GEN5_RESP_LENGTH_SIZE) &&
- (resp_data[GEN5_RESP_REPORT_ID_OFFSET] ==
- GEN5_APP_HID_REPORT_ID) &&
+ } else if ((length == PIP_HID_DESCRIPTOR_SIZE ||
+ length == PIP_RESP_LENGTH_SIZE) &&
+ (resp_data[PIP_RESP_REPORT_ID_OFFSET] ==
+ PIP_HID_APP_REPORT_ID) &&
max_output_len == GEN5_APP_MAX_OUTPUT_LENGTH) {
/* APP mode HID Description read */
cyapa->state = CYAPA_STATE_GEN5_APP;
@@ -839,14 +799,14 @@ static int gen5_hid_description_header_parse(struct cyapa *cyapa, u8 *reg_data)
* or report any touch or button data.
*/
ret = cyapa_i2c_pip_read(cyapa, resp_data,
- GEN5_HID_DESCRIPTOR_SIZE);
- if (ret != GEN5_HID_DESCRIPTOR_SIZE)
+ PIP_HID_DESCRIPTOR_SIZE);
+ if (ret != PIP_HID_DESCRIPTOR_SIZE)
return ret < 0 ? ret : -EIO;
- length = get_unaligned_le16(&resp_data[GEN5_RESP_LENGTH_OFFSET]);
+ length = get_unaligned_le16(&resp_data[PIP_RESP_LENGTH_OFFSET]);
max_output_len = get_unaligned_le16(&resp_data[16]);
- if (length == GEN5_RESP_LENGTH_SIZE) {
- if (reg_data[GEN5_RESP_REPORT_ID_OFFSET] ==
- GEN5_BL_HID_REPORT_ID) {
+ if (length == PIP_RESP_LENGTH_SIZE) {
+ if (reg_data[PIP_RESP_REPORT_ID_OFFSET] ==
+ PIP_HID_BL_REPORT_ID) {
/*
* BL mode HID Description has been previously
* read out.
@@ -861,15 +821,15 @@ static int gen5_hid_description_header_parse(struct cyapa *cyapa, u8 *reg_data)
cyapa->gen = CYAPA_GEN5;
cyapa->state = CYAPA_STATE_GEN5_APP;
}
- } else if (length == GEN5_HID_DESCRIPTOR_SIZE &&
- resp_data[2] == GEN5_BL_HID_REPORT_ID &&
+ } else if (length == PIP_HID_DESCRIPTOR_SIZE &&
+ resp_data[2] == PIP_HID_BL_REPORT_ID &&
max_output_len == GEN5_BL_MAX_OUTPUT_LENGTH) {
/* BL mode HID Description read. */
cyapa->gen = CYAPA_GEN5;
cyapa->state = CYAPA_STATE_GEN5_BL;
- } else if (length == GEN5_HID_DESCRIPTOR_SIZE &&
- (resp_data[GEN5_RESP_REPORT_ID_OFFSET] ==
- GEN5_APP_HID_REPORT_ID) &&
+ } else if (length == PIP_HID_DESCRIPTOR_SIZE &&
+ (resp_data[PIP_RESP_REPORT_ID_OFFSET] ==
+ PIP_HID_APP_REPORT_ID) &&
max_output_len == GEN5_APP_MAX_OUTPUT_LENGTH) {
/* APP mode HID Description read. */
cyapa->gen = CYAPA_GEN5;
@@ -886,22 +846,22 @@ static int gen5_report_data_header_parse(struct cyapa *cyapa, u8 *reg_data)
{
int length;
- length = get_unaligned_le16(&reg_data[GEN5_RESP_LENGTH_OFFSET]);
- switch (reg_data[GEN5_RESP_REPORT_ID_OFFSET]) {
- case GEN5_TOUCH_REPORT_ID:
- if (length < GEN5_TOUCH_REPORT_HEAD_SIZE ||
- length > GEN5_TOUCH_REPORT_MAX_SIZE)
+ length = get_unaligned_le16(&reg_data[PIP_RESP_LENGTH_OFFSET]);
+ switch (reg_data[PIP_RESP_REPORT_ID_OFFSET]) {
+ case PIP_TOUCH_REPORT_ID:
+ if (length < PIP_TOUCH_REPORT_HEAD_SIZE ||
+ length > PIP_TOUCH_REPORT_MAX_SIZE)
return -EINVAL;
break;
- case GEN5_BTN_REPORT_ID:
+ case PIP_BTN_REPORT_ID:
case GEN5_OLD_PUSH_BTN_REPORT_ID:
- case GEN5_PUSH_BTN_REPORT_ID:
- if (length < GEN5_BTN_REPORT_HEAD_SIZE ||
- length > GEN5_BTN_REPORT_MAX_SIZE)
+ case PIP_PUSH_BTN_REPORT_ID:
+ if (length < PIP_BTN_REPORT_HEAD_SIZE ||
+ length > PIP_BTN_REPORT_MAX_SIZE)
return -EINVAL;
break;
- case GEN5_WAKEUP_EVENT_REPORT_ID:
- if (length != GEN5_WAKEUP_EVENT_SIZE)
+ case PIP_WAKEUP_EVENT_REPORT_ID:
+ if (length != PIP_WAKEUP_EVENT_SIZE)
return -EINVAL;
break;
default:
@@ -915,7 +875,7 @@ static int gen5_report_data_header_parse(struct cyapa *cyapa, u8 *reg_data)
static int gen5_cmd_resp_header_parse(struct cyapa *cyapa, u8 *reg_data)
{
- struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+ struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
int length;
int ret;
@@ -924,15 +884,15 @@ static int gen5_cmd_resp_header_parse(struct cyapa *cyapa, u8 *reg_data)
* otherwise Gen5 trackpad cannot response next command
* or report any touch or button data.
*/
- length = get_unaligned_le16(&reg_data[GEN5_RESP_LENGTH_OFFSET]);
- ret = cyapa_i2c_pip_read(cyapa, gen5_pip->empty_buf, length);
+ length = get_unaligned_le16(&reg_data[PIP_RESP_LENGTH_OFFSET]);
+ ret = cyapa_i2c_pip_read(cyapa, pip->empty_buf, length);
if (ret != length)
return ret < 0 ? ret : -EIO;
- if (length == GEN5_RESP_LENGTH_SIZE) {
+ if (length == PIP_RESP_LENGTH_SIZE) {
/* Previous command has read the data through out. */
- if (reg_data[GEN5_RESP_REPORT_ID_OFFSET] ==
- GEN5_BL_RESP_REPORT_ID) {
+ if (reg_data[PIP_RESP_REPORT_ID_OFFSET] ==
+ PIP_BL_RESP_REPORT_ID) {
/* Gen5 BL command response data detected */
cyapa->gen = CYAPA_GEN5;
cyapa->state = CYAPA_STATE_GEN5_BL;
@@ -941,21 +901,21 @@ static int gen5_cmd_resp_header_parse(struct cyapa *cyapa, u8 *reg_data)
cyapa->gen = CYAPA_GEN5;
cyapa->state = CYAPA_STATE_GEN5_APP;
}
- } else if ((gen5_pip->empty_buf[GEN5_RESP_REPORT_ID_OFFSET] ==
- GEN5_BL_RESP_REPORT_ID) &&
- (gen5_pip->empty_buf[GEN5_RESP_RSVD_OFFSET] ==
- GEN5_RESP_RSVD_KEY) &&
- (gen5_pip->empty_buf[GEN5_RESP_BL_SOP_OFFSET] ==
- GEN5_SOP_KEY) &&
- (gen5_pip->empty_buf[length - 1] ==
- GEN5_EOP_KEY)) {
+ } else if ((pip->empty_buf[PIP_RESP_REPORT_ID_OFFSET] ==
+ PIP_BL_RESP_REPORT_ID) &&
+ (pip->empty_buf[PIP_RESP_RSVD_OFFSET] ==
+ PIP_RESP_RSVD_KEY) &&
+ (pip->empty_buf[PIP_RESP_BL_SOP_OFFSET] ==
+ PIP_SOP_KEY) &&
+ (pip->empty_buf[length - 1] ==
+ PIP_EOP_KEY)) {
/* Gen5 BL command response data detected */
cyapa->gen = CYAPA_GEN5;
cyapa->state = CYAPA_STATE_GEN5_BL;
- } else if (gen5_pip->empty_buf[GEN5_RESP_REPORT_ID_OFFSET] ==
- GEN5_APP_RESP_REPORT_ID &&
- gen5_pip->empty_buf[GEN5_RESP_RSVD_OFFSET] ==
- GEN5_RESP_RSVD_KEY) {
+ } else if (pip->empty_buf[PIP_RESP_REPORT_ID_OFFSET] ==
+ PIP_APP_RESP_REPORT_ID &&
+ pip->empty_buf[PIP_RESP_RSVD_OFFSET] ==
+ PIP_RESP_RSVD_KEY) {
/* Gen5 APP command response data detected */
cyapa->gen = CYAPA_GEN5;
cyapa->state = CYAPA_STATE_GEN5_APP;
@@ -977,12 +937,12 @@ static int cyapa_gen5_state_parse(struct cyapa *cyapa, u8 *reg_data, int len)
cyapa->state = CYAPA_STATE_NO_DEVICE;
/* Parse based on Gen5 characteristic registers and bits */
- length = get_unaligned_le16(&reg_data[GEN5_RESP_LENGTH_OFFSET]);
- if (length == 0 || length == GEN5_RESP_LENGTH_SIZE) {
+ length = get_unaligned_le16(&reg_data[PIP_RESP_LENGTH_OFFSET]);
+ if (length == 0 || length == PIP_RESP_LENGTH_SIZE) {
gen5_idle_state_parse(cyapa);
- } else if (length == GEN5_HID_DESCRIPTOR_SIZE &&
- (reg_data[2] == GEN5_BL_HID_REPORT_ID ||
- reg_data[2] == GEN5_APP_HID_REPORT_ID)) {
+ } else if (length == PIP_HID_DESCRIPTOR_SIZE &&
+ (reg_data[2] == PIP_HID_BL_REPORT_ID ||
+ reg_data[2] == PIP_HID_APP_REPORT_ID)) {
gen5_hid_description_header_parse(cyapa, reg_data);
} else if ((length == GEN5_APP_REPORT_DESCRIPTOR_SIZE ||
length == GEN5_APP_CONTRACT_REPORT_DESCRIPTOR_SIZE) &&
@@ -992,17 +952,17 @@ static int cyapa_gen5_state_parse(struct cyapa *cyapa, u8 *reg_data, int len)
cyapa->state = CYAPA_STATE_GEN5_APP;
} else if (length == GEN5_BL_REPORT_DESCRIPTOR_SIZE &&
reg_data[2] == GEN5_BL_REPORT_DESCRIPTOR_ID) {
- /* 0x1D 0x00 0xFE is Gen5 BL report descriptior header. */
+ /* 0x1D 0x00 0xFE is Gen5 BL report descriptor header. */
cyapa->gen = CYAPA_GEN5;
cyapa->state = CYAPA_STATE_GEN5_BL;
- } else if (reg_data[2] == GEN5_TOUCH_REPORT_ID ||
- reg_data[2] == GEN5_BTN_REPORT_ID ||
+ } else if (reg_data[2] == PIP_TOUCH_REPORT_ID ||
+ reg_data[2] == PIP_BTN_REPORT_ID ||
reg_data[2] == GEN5_OLD_PUSH_BTN_REPORT_ID ||
- reg_data[2] == GEN5_PUSH_BTN_REPORT_ID ||
- reg_data[2] == GEN5_WAKEUP_EVENT_REPORT_ID) {
+ reg_data[2] == PIP_PUSH_BTN_REPORT_ID ||
+ reg_data[2] == PIP_WAKEUP_EVENT_REPORT_ID) {
gen5_report_data_header_parse(cyapa, reg_data);
- } else if (reg_data[2] == GEN5_BL_RESP_REPORT_ID ||
- reg_data[2] == GEN5_APP_RESP_REPORT_ID) {
+ } else if (reg_data[2] == PIP_BL_RESP_REPORT_ID ||
+ reg_data[2] == PIP_APP_RESP_REPORT_ID) {
gen5_cmd_resp_header_parse(cyapa, reg_data);
}
@@ -1023,14 +983,25 @@ static int cyapa_gen5_state_parse(struct cyapa *cyapa, u8 *reg_data, int len)
return -EAGAIN;
}
-static int cyapa_gen5_bl_initiate(struct cyapa *cyapa,
- const struct firmware *fw)
+static struct cyapa_tsg_bin_image_data_record *
+cyapa_get_image_record_data_num(const struct firmware *fw,
+ int *record_num)
+{
+ int head_size;
+
+ head_size = fw->data[0] + 1;
+ *record_num = (fw->size - head_size) /
+ sizeof(struct cyapa_tsg_bin_image_data_record);
+ return (struct cyapa_tsg_bin_image_data_record *)&fw->data[head_size];
+}
+
+int cyapa_pip_bl_initiate(struct cyapa *cyapa, const struct firmware *fw)
{
- struct cyapa_tsg_bin_image *image;
- struct gen5_bl_cmd_head *bl_cmd_head;
- struct gen5_bl_packet_start *bl_packet_start;
- struct gen5_bl_initiate_cmd_data *cmd_data;
- struct gen5_bl_packet_end *bl_packet_end;
+ struct cyapa_tsg_bin_image_data_record *image_records;
+ struct pip_bl_cmd_head *bl_cmd_head;
+ struct pip_bl_packet_start *bl_packet_start;
+ struct pip_bl_initiate_cmd_data *cmd_data;
+ struct pip_bl_packet_end *bl_packet_end;
u8 cmd[CYAPA_TSG_MAX_CMD_SIZE];
int cmd_len;
u16 cmd_data_len;
@@ -1046,30 +1017,28 @@ static int cyapa_gen5_bl_initiate(struct cyapa *cyapa,
cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
memset(cmd, 0, CYAPA_TSG_MAX_CMD_SIZE);
- bl_cmd_head = (struct gen5_bl_cmd_head *)cmd;
+ bl_cmd_head = (struct pip_bl_cmd_head *)cmd;
cmd_data_len = CYAPA_TSG_BL_KEY_SIZE + CYAPA_TSG_FLASH_MAP_BLOCK_SIZE;
- cmd_len = sizeof(struct gen5_bl_cmd_head) + cmd_data_len +
- sizeof(struct gen5_bl_packet_end);
+ cmd_len = sizeof(struct pip_bl_cmd_head) + cmd_data_len +
+ sizeof(struct pip_bl_packet_end);
- put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &bl_cmd_head->addr);
+ put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &bl_cmd_head->addr);
put_unaligned_le16(cmd_len - 2, &bl_cmd_head->length);
- bl_cmd_head->report_id = GEN5_BL_CMD_REPORT_ID;
+ bl_cmd_head->report_id = PIP_BL_CMD_REPORT_ID;
bl_packet_start = &bl_cmd_head->packet_start;
- bl_packet_start->sop = GEN5_SOP_KEY;
- bl_packet_start->cmd_code = GEN5_BL_CMD_INITIATE_BL;
+ bl_packet_start->sop = PIP_SOP_KEY;
+ bl_packet_start->cmd_code = PIP_BL_CMD_INITIATE_BL;
/* 8 key bytes and 128 bytes block size */
put_unaligned_le16(cmd_data_len, &bl_packet_start->data_length);
- cmd_data = (struct gen5_bl_initiate_cmd_data *)bl_cmd_head->data;
- memcpy(cmd_data->key, cyapa_gen5_bl_cmd_key, CYAPA_TSG_BL_KEY_SIZE);
+ cmd_data = (struct pip_bl_initiate_cmd_data *)bl_cmd_head->data;
+ memcpy(cmd_data->key, cyapa_pip_bl_cmd_key, CYAPA_TSG_BL_KEY_SIZE);
+
+ image_records = cyapa_get_image_record_data_num(fw, &records_num);
- /* Copy 60 bytes Meta Data Row Parameters */
- image = (struct cyapa_tsg_bin_image *)fw->data;
- records_num = (fw->size - sizeof(struct cyapa_tsg_bin_image_head)) /
- sizeof(struct cyapa_tsg_bin_image_data_record);
/* APP_INTEGRITY row is always the last row block */
- data = image->records[records_num - 1].record_data;
+ data = image_records[records_num - 1].record_data;
memcpy(cmd_data->metadata_raw_parameter, data,
CYAPA_TSG_FLASH_MAP_METADATA_SIZE);
@@ -1077,47 +1046,47 @@ static int cyapa_gen5_bl_initiate(struct cyapa *cyapa,
CYAPA_TSG_FLASH_MAP_METADATA_SIZE);
put_unaligned_le16(meta_data_crc, &cmd_data->metadata_crc);
- bl_packet_end = (struct gen5_bl_packet_end *)(bl_cmd_head->data +
+ bl_packet_end = (struct pip_bl_packet_end *)(bl_cmd_head->data +
cmd_data_len);
cmd_crc = crc_itu_t(0xffff, (u8 *)bl_packet_start,
- sizeof(struct gen5_bl_packet_start) + cmd_data_len);
+ sizeof(struct pip_bl_packet_start) + cmd_data_len);
put_unaligned_le16(cmd_crc, &bl_packet_end->crc);
- bl_packet_end->eop = GEN5_EOP_KEY;
+ bl_packet_end->eop = PIP_EOP_KEY;
resp_len = sizeof(resp_data);
error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
cmd, cmd_len,
resp_data, &resp_len, 12000,
- cyapa_gen5_sort_tsg_pip_bl_resp_data, true);
- if (error || resp_len != GEN5_BL_INITIATE_RESP_LEN ||
- resp_data[2] != GEN5_BL_RESP_REPORT_ID ||
- !GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+ cyapa_sort_tsg_pip_bl_resp_data, true);
+ if (error || resp_len != PIP_BL_INITIATE_RESP_LEN ||
+ resp_data[2] != PIP_BL_RESP_REPORT_ID ||
+ !PIP_CMD_COMPLETE_SUCCESS(resp_data))
return error ? error : -EAGAIN;
return 0;
}
-static bool cyapa_gen5_sort_bl_exit_data(struct cyapa *cyapa, u8 *buf, int len)
+static bool cyapa_sort_pip_bl_exit_data(struct cyapa *cyapa, u8 *buf, int len)
{
- if (buf == NULL || len < GEN5_RESP_LENGTH_SIZE)
+ if (buf == NULL || len < PIP_RESP_LENGTH_SIZE)
return false;
if (buf[0] == 0 && buf[1] == 0)
return true;
/* Exit bootloader failed for some reason. */
- if (len == GEN5_BL_FAIL_EXIT_RESP_LEN &&
- buf[GEN5_RESP_REPORT_ID_OFFSET] ==
- GEN5_BL_RESP_REPORT_ID &&
- buf[GEN5_RESP_RSVD_OFFSET] == GEN5_RESP_RSVD_KEY &&
- buf[GEN5_RESP_BL_SOP_OFFSET] == GEN5_SOP_KEY &&
- buf[10] == GEN5_EOP_KEY)
+ if (len == PIP_BL_FAIL_EXIT_RESP_LEN &&
+ buf[PIP_RESP_REPORT_ID_OFFSET] ==
+ PIP_BL_RESP_REPORT_ID &&
+ buf[PIP_RESP_RSVD_OFFSET] == PIP_RESP_RSVD_KEY &&
+ buf[PIP_RESP_BL_SOP_OFFSET] == PIP_SOP_KEY &&
+ buf[10] == PIP_EOP_KEY)
return true;
return false;
}
-static int cyapa_gen5_bl_exit(struct cyapa *cyapa)
+int cyapa_pip_bl_exit(struct cyapa *cyapa)
{
u8 bl_gen5_bl_exit[] = { 0x04, 0x00,
@@ -1132,13 +1101,13 @@ static int cyapa_gen5_bl_exit(struct cyapa *cyapa)
error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
bl_gen5_bl_exit, sizeof(bl_gen5_bl_exit),
resp_data, &resp_len,
- 5000, cyapa_gen5_sort_bl_exit_data, false);
+ 5000, cyapa_sort_pip_bl_exit_data, false);
if (error)
return error;
- if (resp_len == GEN5_BL_FAIL_EXIT_RESP_LEN ||
- resp_data[GEN5_RESP_REPORT_ID_OFFSET] ==
- GEN5_BL_RESP_REPORT_ID)
+ if (resp_len == PIP_BL_FAIL_EXIT_RESP_LEN ||
+ resp_data[PIP_RESP_REPORT_ID_OFFSET] ==
+ PIP_BL_RESP_REPORT_ID)
return -EAGAIN;
if (resp_data[0] == 0x00 && resp_data[1] == 0x00)
@@ -1147,7 +1116,7 @@ static int cyapa_gen5_bl_exit(struct cyapa *cyapa)
return -ENODEV;
}
-static int cyapa_gen5_bl_enter(struct cyapa *cyapa)
+int cyapa_pip_bl_enter(struct cyapa *cyapa)
{
u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2F, 0x00, 0x01 };
u8 resp_data[2];
@@ -1157,15 +1126,12 @@ static int cyapa_gen5_bl_enter(struct cyapa *cyapa)
error = cyapa_poll_state(cyapa, 500);
if (error < 0)
return error;
- if (cyapa->gen != CYAPA_GEN5)
- return -EINVAL;
- /* Already in Gen5 BL. Skipping exit. */
- if (cyapa->state == CYAPA_STATE_GEN5_BL)
+ /* Already in bootloader mode, Skipping exit. */
+ if (cyapa_is_pip_bl_mode(cyapa))
return 0;
-
- if (cyapa->state != CYAPA_STATE_GEN5_APP)
- return -EAGAIN;
+ else if (!cyapa_is_pip_app_mode(cyapa))
+ return -EINVAL;
/* Try to dump all buffered report data before any send command. */
cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
@@ -1179,39 +1145,79 @@ static int cyapa_gen5_bl_enter(struct cyapa *cyapa)
error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
cmd, sizeof(cmd),
resp_data, &resp_len,
- 5000, cyapa_gen5_sort_application_launch_data,
+ 5000, cyapa_sort_pip_application_launch_data,
true);
if (error || resp_data[0] != 0x00 || resp_data[1] != 0x00)
return error < 0 ? error : -EAGAIN;
cyapa->operational = false;
- cyapa->state = CYAPA_STATE_GEN5_BL;
+ if (cyapa->gen == CYAPA_GEN5)
+ cyapa->state = CYAPA_STATE_GEN5_BL;
+ else if (cyapa->gen == CYAPA_GEN6)
+ cyapa->state = CYAPA_STATE_GEN6_BL;
return 0;
}
-static int cyapa_gen5_check_fw(struct cyapa *cyapa, const struct firmware *fw)
+static int cyapa_pip_fw_head_check(struct cyapa *cyapa,
+ struct cyapa_tsg_bin_image_head *image_head)
+{
+ if (image_head->head_size != 0x0C && image_head->head_size != 0x12)
+ return -EINVAL;
+
+ switch (cyapa->gen) {
+ case CYAPA_GEN6:
+ if (image_head->family_id != 0x9B ||
+ image_head->silicon_id_hi != 0x0B)
+ return -EINVAL;
+ break;
+ case CYAPA_GEN5:
+ /* Gen5 without proximity support. */
+ if (cyapa->platform_ver < 2) {
+ if (image_head->head_size == 0x0C)
+ break;
+ return -EINVAL;
+ }
+
+ if (image_head->family_id != 0x91 ||
+ image_head->silicon_id_hi != 0x02)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int cyapa_pip_check_fw(struct cyapa *cyapa, const struct firmware *fw)
{
struct device *dev = &cyapa->client->dev;
- const struct cyapa_tsg_bin_image *image = (const void *)fw->data;
+ struct cyapa_tsg_bin_image_data_record *image_records;
const struct cyapa_tsg_bin_image_data_record *app_integrity;
- const struct gen5_bl_metadata_row_params *metadata;
- size_t flash_records_count;
+ const struct tsg_bl_metadata_row_params *metadata;
+ int flash_records_count;
u32 fw_app_start, fw_upgrade_start;
u16 fw_app_len, fw_upgrade_len;
u16 app_crc;
u16 app_integrity_crc;
- int record_index;
int i;
- flash_records_count = (fw->size -
- sizeof(struct cyapa_tsg_bin_image_head)) /
- sizeof(struct cyapa_tsg_bin_image_data_record);
+ /* Verify the firmware image not miss-used for Gen5 and Gen6. */
+ if (cyapa_pip_fw_head_check(cyapa,
+ (struct cyapa_tsg_bin_image_head *)fw->data)) {
+ dev_err(dev, "%s: firmware image not match TP device.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ image_records =
+ cyapa_get_image_record_data_num(fw, &flash_records_count);
/*
* APP_INTEGRITY row is always the last row block,
* and the row id must be 0x01ff.
*/
- app_integrity = &image->records[flash_records_count - 1];
+ app_integrity = &image_records[flash_records_count - 1];
if (app_integrity->flash_array_id != 0x00 ||
get_unaligned_be16(&app_integrity->row_number) != 0x01ff) {
@@ -1242,14 +1248,11 @@ static int cyapa_gen5_check_fw(struct cyapa *cyapa, const struct firmware *fw)
return -EINVAL;
}
- /*
- * Verify application image CRC
- */
- record_index = fw_app_start / CYAPA_TSG_FW_ROW_SIZE -
- CYAPA_TSG_IMG_START_ROW_NUM;
+ /* Verify application image CRC. */
app_crc = 0xffffU;
for (i = 0; i < fw_app_len / CYAPA_TSG_FW_ROW_SIZE; i++) {
- const u8 *data = image->records[record_index + i].record_data;
+ const u8 *data = image_records[i].record_data;
+
app_crc = crc_itu_t(app_crc, data, CYAPA_TSG_FW_ROW_SIZE);
}
@@ -1261,13 +1264,13 @@ static int cyapa_gen5_check_fw(struct cyapa *cyapa, const struct firmware *fw)
return 0;
}
-static int cyapa_gen5_write_fw_block(struct cyapa *cyapa,
+static int cyapa_pip_write_fw_block(struct cyapa *cyapa,
struct cyapa_tsg_bin_image_data_record *flash_record)
{
- struct gen5_bl_cmd_head *bl_cmd_head;
- struct gen5_bl_packet_start *bl_packet_start;
- struct gen5_bl_flash_row_head *flash_row_head;
- struct gen5_bl_packet_end *bl_packet_end;
+ struct pip_bl_cmd_head *bl_cmd_head;
+ struct pip_bl_packet_start *bl_packet_start;
+ struct tsg_bl_flash_row_head *flash_row_head;
+ struct pip_bl_packet_end *bl_packet_end;
u8 cmd[CYAPA_TSG_MAX_CMD_SIZE];
u16 cmd_len;
u8 flash_array_id;
@@ -1286,71 +1289,68 @@ static int cyapa_gen5_write_fw_block(struct cyapa *cyapa,
record_data = flash_record->record_data;
memset(cmd, 0, CYAPA_TSG_MAX_CMD_SIZE);
- bl_cmd_head = (struct gen5_bl_cmd_head *)cmd;
+ bl_cmd_head = (struct pip_bl_cmd_head *)cmd;
bl_packet_start = &bl_cmd_head->packet_start;
- cmd_len = sizeof(struct gen5_bl_cmd_head) +
- sizeof(struct gen5_bl_flash_row_head) +
+ cmd_len = sizeof(struct pip_bl_cmd_head) +
+ sizeof(struct tsg_bl_flash_row_head) +
CYAPA_TSG_FLASH_MAP_BLOCK_SIZE +
- sizeof(struct gen5_bl_packet_end);
+ sizeof(struct pip_bl_packet_end);
- put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &bl_cmd_head->addr);
+ put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &bl_cmd_head->addr);
/* Don't include 2 bytes register address */
put_unaligned_le16(cmd_len - 2, &bl_cmd_head->length);
- bl_cmd_head->report_id = GEN5_BL_CMD_REPORT_ID;
- bl_packet_start->sop = GEN5_SOP_KEY;
- bl_packet_start->cmd_code = GEN5_BL_CMD_PROGRAM_VERIFY_ROW;
+ bl_cmd_head->report_id = PIP_BL_CMD_REPORT_ID;
+ bl_packet_start->sop = PIP_SOP_KEY;
+ bl_packet_start->cmd_code = PIP_BL_CMD_PROGRAM_VERIFY_ROW;
/* 1 (Flash Array ID) + 2 (Flash Row ID) + 128 (flash data) */
- data_len = sizeof(struct gen5_bl_flash_row_head) + record_len;
+ data_len = sizeof(struct tsg_bl_flash_row_head) + record_len;
put_unaligned_le16(data_len, &bl_packet_start->data_length);
- flash_row_head = (struct gen5_bl_flash_row_head *)bl_cmd_head->data;
+ flash_row_head = (struct tsg_bl_flash_row_head *)bl_cmd_head->data;
flash_row_head->flash_array_id = flash_array_id;
put_unaligned_le16(flash_row_id, &flash_row_head->flash_row_id);
memcpy(flash_row_head->flash_data, record_data, record_len);
- bl_packet_end = (struct gen5_bl_packet_end *)(bl_cmd_head->data +
+ bl_packet_end = (struct pip_bl_packet_end *)(bl_cmd_head->data +
data_len);
crc = crc_itu_t(0xffff, (u8 *)bl_packet_start,
- sizeof(struct gen5_bl_packet_start) + data_len);
+ sizeof(struct pip_bl_packet_start) + data_len);
put_unaligned_le16(crc, &bl_packet_end->crc);
- bl_packet_end->eop = GEN5_EOP_KEY;
+ bl_packet_end->eop = PIP_EOP_KEY;
resp_len = sizeof(resp_data);
error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, cmd_len,
resp_data, &resp_len,
- 500, cyapa_gen5_sort_tsg_pip_bl_resp_data, true);
- if (error || resp_len != GEN5_BL_BLOCK_WRITE_RESP_LEN ||
- resp_data[2] != GEN5_BL_RESP_REPORT_ID ||
- !GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+ 500, cyapa_sort_tsg_pip_bl_resp_data, true);
+ if (error || resp_len != PIP_BL_BLOCK_WRITE_RESP_LEN ||
+ resp_data[2] != PIP_BL_RESP_REPORT_ID ||
+ !PIP_CMD_COMPLETE_SUCCESS(resp_data))
return error < 0 ? error : -EAGAIN;
return 0;
}
-static int cyapa_gen5_do_fw_update(struct cyapa *cyapa,
+int cyapa_pip_do_fw_update(struct cyapa *cyapa,
const struct firmware *fw)
{
struct device *dev = &cyapa->client->dev;
- struct cyapa_tsg_bin_image_data_record *flash_record;
- struct cyapa_tsg_bin_image *image =
- (struct cyapa_tsg_bin_image *)fw->data;
+ struct cyapa_tsg_bin_image_data_record *image_records;
int flash_records_count;
int i;
int error;
cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
- flash_records_count =
- (fw->size - sizeof(struct cyapa_tsg_bin_image_head)) /
- sizeof(struct cyapa_tsg_bin_image_data_record);
+ image_records =
+ cyapa_get_image_record_data_num(fw, &flash_records_count);
+
/*
* The last flash row 0x01ff has been written through bl_initiate
* command, so DO NOT write flash 0x01ff to trackpad device.
*/
for (i = 0; i < (flash_records_count - 1); i++) {
- flash_record = &image->records[i];
- error = cyapa_gen5_write_fw_block(cyapa, flash_record);
+ error = cyapa_pip_write_fw_block(cyapa, &image_records[i]);
if (error) {
dev_err(dev, "%s: Gen5 FW update aborted: %d\n",
__func__, error);
@@ -1372,9 +1372,9 @@ static int cyapa_gen5_change_power_state(struct cyapa *cyapa, u8 power_state)
resp_len = sizeof(resp_data);
error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
resp_data, &resp_len,
- 500, cyapa_gen5_sort_tsg_pip_app_resp_data, false);
+ 500, cyapa_sort_tsg_pip_app_resp_data, false);
if (error || !VALID_CMD_RESP_HEADER(resp_data, 0x08) ||
- !GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+ !PIP_CMD_COMPLETE_SUCCESS(resp_data))
return error < 0 ? error : -EINVAL;
return 0;
@@ -1383,7 +1383,7 @@ static int cyapa_gen5_change_power_state(struct cyapa *cyapa, u8 power_state)
static int cyapa_gen5_set_interval_time(struct cyapa *cyapa,
u8 parameter_id, u16 interval_time)
{
- struct gen5_app_cmd_head *app_cmd_head;
+ struct pip_app_cmd_head *app_cmd_head;
struct gen5_app_set_parameter_data *parameter_data;
u8 cmd[CYAPA_TSG_MAX_CMD_SIZE];
int cmd_len;
@@ -1393,10 +1393,10 @@ static int cyapa_gen5_set_interval_time(struct cyapa *cyapa,
int error;
memset(cmd, 0, CYAPA_TSG_MAX_CMD_SIZE);
- app_cmd_head = (struct gen5_app_cmd_head *)cmd;
+ app_cmd_head = (struct pip_app_cmd_head *)cmd;
parameter_data = (struct gen5_app_set_parameter_data *)
app_cmd_head->parameter_data;
- cmd_len = sizeof(struct gen5_app_cmd_head) +
+ cmd_len = sizeof(struct pip_app_cmd_head) +
sizeof(struct gen5_app_set_parameter_data);
switch (parameter_id) {
@@ -1413,14 +1413,14 @@ static int cyapa_gen5_set_interval_time(struct cyapa *cyapa,
return -EINVAL;
}
- put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
+ put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
/*
* Don't include unused parameter value bytes and
* 2 bytes register address.
*/
put_unaligned_le16(cmd_len - (4 - parameter_size) - 2,
&app_cmd_head->length);
- app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+ app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
app_cmd_head->cmd_code = GEN5_CMD_SET_PARAMETER;
parameter_data->parameter_id = parameter_id;
parameter_data->parameter_size = parameter_size;
@@ -1428,7 +1428,7 @@ static int cyapa_gen5_set_interval_time(struct cyapa *cyapa,
resp_len = sizeof(resp_data);
error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, cmd_len,
resp_data, &resp_len,
- 500, cyapa_gen5_sort_tsg_pip_app_resp_data, false);
+ 500, cyapa_sort_tsg_pip_app_resp_data, false);
if (error || resp_data[5] != parameter_id ||
resp_data[6] != parameter_size ||
!VALID_CMD_RESP_HEADER(resp_data, GEN5_CMD_SET_PARAMETER))
@@ -1440,7 +1440,7 @@ static int cyapa_gen5_set_interval_time(struct cyapa *cyapa,
static int cyapa_gen5_get_interval_time(struct cyapa *cyapa,
u8 parameter_id, u16 *interval_time)
{
- struct gen5_app_cmd_head *app_cmd_head;
+ struct pip_app_cmd_head *app_cmd_head;
struct gen5_app_get_parameter_data *parameter_data;
u8 cmd[CYAPA_TSG_MAX_CMD_SIZE];
int cmd_len;
@@ -1451,10 +1451,10 @@ static int cyapa_gen5_get_interval_time(struct cyapa *cyapa,
int error;
memset(cmd, 0, CYAPA_TSG_MAX_CMD_SIZE);
- app_cmd_head = (struct gen5_app_cmd_head *)cmd;
+ app_cmd_head = (struct pip_app_cmd_head *)cmd;
parameter_data = (struct gen5_app_get_parameter_data *)
app_cmd_head->parameter_data;
- cmd_len = sizeof(struct gen5_app_cmd_head) +
+ cmd_len = sizeof(struct pip_app_cmd_head) +
sizeof(struct gen5_app_get_parameter_data);
*interval_time = 0;
@@ -1472,17 +1472,17 @@ static int cyapa_gen5_get_interval_time(struct cyapa *cyapa,
return -EINVAL;
}
- put_unaligned_le16(GEN5_HID_DESCRIPTOR_ADDR, &app_cmd_head->addr);
+ put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
/* Don't include 2 bytes register address */
put_unaligned_le16(cmd_len - 2, &app_cmd_head->length);
- app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+ app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
app_cmd_head->cmd_code = GEN5_CMD_GET_PARAMETER;
parameter_data->parameter_id = parameter_id;
resp_len = sizeof(resp_data);
error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, cmd_len,
resp_data, &resp_len,
- 500, cyapa_gen5_sort_tsg_pip_app_resp_data, false);
+ 500, cyapa_sort_tsg_pip_app_resp_data, false);
if (error || resp_data[5] != parameter_id || resp_data[6] == 0 ||
!VALID_CMD_RESP_HEADER(resp_data, GEN5_CMD_GET_PARAMETER))
return error < 0 ? error : -EINVAL;
@@ -1497,18 +1497,18 @@ static int cyapa_gen5_get_interval_time(struct cyapa *cyapa,
static int cyapa_gen5_disable_pip_report(struct cyapa *cyapa)
{
- struct gen5_app_cmd_head *app_cmd_head;
+ struct pip_app_cmd_head *app_cmd_head;
u8 cmd[10];
u8 resp_data[7];
int resp_len;
int error;
memset(cmd, 0, sizeof(cmd));
- app_cmd_head = (struct gen5_app_cmd_head *)cmd;
+ app_cmd_head = (struct pip_app_cmd_head *)cmd;
- put_unaligned_le16(GEN5_HID_DESCRIPTOR_ADDR, &app_cmd_head->addr);
+ put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
put_unaligned_le16(sizeof(cmd) - 2, &app_cmd_head->length);
- app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+ app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
app_cmd_head->cmd_code = GEN5_CMD_SET_PARAMETER;
app_cmd_head->parameter_data[0] = GEN5_PARAMETER_DISABLE_PIP_REPORT;
app_cmd_head->parameter_data[1] = 0x01;
@@ -1516,7 +1516,7 @@ static int cyapa_gen5_disable_pip_report(struct cyapa *cyapa)
resp_len = sizeof(resp_data);
error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
resp_data, &resp_len,
- 500, cyapa_gen5_sort_tsg_pip_app_resp_data, false);
+ 500, cyapa_sort_tsg_pip_app_resp_data, false);
if (error || resp_data[5] != GEN5_PARAMETER_DISABLE_PIP_REPORT ||
!VALID_CMD_RESP_HEADER(resp_data, GEN5_CMD_SET_PARAMETER) ||
resp_data[6] != 0x01)
@@ -1525,26 +1525,48 @@ static int cyapa_gen5_disable_pip_report(struct cyapa *cyapa)
return 0;
}
-static int cyapa_gen5_deep_sleep(struct cyapa *cyapa, u8 state)
+int cyapa_pip_set_proximity(struct cyapa *cyapa, bool enable)
+{
+ u8 cmd[] = { 0x04, 0x00, 0x06, 0x00, 0x2f, 0x00, PIP_SET_PROXIMITY,
+ (u8)!!enable
+ };
+ u8 resp_data[6];
+ int resp_len;
+ int error;
+
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
+ resp_data, &resp_len,
+ 500, cyapa_sort_tsg_pip_app_resp_data, false);
+ if (error || !VALID_CMD_RESP_HEADER(resp_data, PIP_SET_PROXIMITY) ||
+ !PIP_CMD_COMPLETE_SUCCESS(resp_data)) {
+ error = (error == -ETIMEDOUT) ? -EOPNOTSUPP : error;
+ return error < 0 ? error : -EINVAL;
+ }
+
+ return 0;
+}
+
+int cyapa_pip_deep_sleep(struct cyapa *cyapa, u8 state)
{
u8 cmd[] = { 0x05, 0x00, 0x00, 0x08};
u8 resp_data[5];
int resp_len;
int error;
- cmd[2] = state & GEN5_DEEP_SLEEP_STATE_MASK;
+ cmd[2] = state & PIP_DEEP_SLEEP_STATE_MASK;
resp_len = sizeof(resp_data);
error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
resp_data, &resp_len,
- 500, cyapa_gen5_sort_deep_sleep_data, false);
- if (error || ((resp_data[3] & GEN5_DEEP_SLEEP_STATE_MASK) != state))
+ 500, cyapa_sort_pip_deep_sleep_data, false);
+ if (error || ((resp_data[3] & PIP_DEEP_SLEEP_STATE_MASK) != state))
return -EINVAL;
return 0;
}
static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
- u8 power_mode, u16 sleep_time)
+ u8 power_mode, u16 sleep_time, bool is_suspend)
{
struct device *dev = &cyapa->client->dev;
u8 power_state;
@@ -1553,43 +1575,40 @@ static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
if (cyapa->state != CYAPA_STATE_GEN5_APP)
return 0;
- /* Dump all the report data before do power mode commmands. */
- cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
-
- if (GEN5_DEV_GET_PWR_STATE(cyapa) == UNINIT_PWR_MODE) {
+ if (PIP_DEV_GET_PWR_STATE(cyapa) == UNINIT_PWR_MODE) {
/*
* Assume TP in deep sleep mode when driver is loaded,
* avoid driver unload and reload command IO issue caused by TP
* has been set into deep sleep mode when unloading.
*/
- GEN5_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
+ PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
}
- if (GEN5_DEV_UNINIT_SLEEP_TIME(cyapa) &&
- GEN5_DEV_GET_PWR_STATE(cyapa) != PWR_MODE_OFF)
+ if (PIP_DEV_UNINIT_SLEEP_TIME(cyapa) &&
+ PIP_DEV_GET_PWR_STATE(cyapa) != PWR_MODE_OFF)
if (cyapa_gen5_get_interval_time(cyapa,
GEN5_PARAMETER_LP_INTRVL_ID,
&cyapa->dev_sleep_time) != 0)
- GEN5_DEV_SET_SLEEP_TIME(cyapa, UNINIT_SLEEP_TIME);
+ PIP_DEV_SET_SLEEP_TIME(cyapa, UNINIT_SLEEP_TIME);
- if (GEN5_DEV_GET_PWR_STATE(cyapa) == power_mode) {
+ if (PIP_DEV_GET_PWR_STATE(cyapa) == power_mode) {
if (power_mode == PWR_MODE_OFF ||
power_mode == PWR_MODE_FULL_ACTIVE ||
power_mode == PWR_MODE_BTN_ONLY ||
- GEN5_DEV_GET_SLEEP_TIME(cyapa) == sleep_time) {
+ PIP_DEV_GET_SLEEP_TIME(cyapa) == sleep_time) {
/* Has in correct power mode state, early return. */
return 0;
}
}
if (power_mode == PWR_MODE_OFF) {
- error = cyapa_gen5_deep_sleep(cyapa, GEN5_DEEP_SLEEP_STATE_OFF);
+ error = cyapa_pip_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_OFF);
if (error) {
dev_err(dev, "enter deep sleep fail: %d\n", error);
return error;
}
- GEN5_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
+ PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
return 0;
}
@@ -1598,8 +1617,8 @@ static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
* state directly, must be wake up from sleep firstly, then
* continue to do next power sate change.
*/
- if (GEN5_DEV_GET_PWR_STATE(cyapa) == PWR_MODE_OFF) {
- error = cyapa_gen5_deep_sleep(cyapa, GEN5_DEEP_SLEEP_STATE_ON);
+ if (PIP_DEV_GET_PWR_STATE(cyapa) == PWR_MODE_OFF) {
+ error = cyapa_pip_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_ON);
if (error) {
dev_err(dev, "deep sleep wake fail: %d\n", error);
return error;
@@ -1614,7 +1633,7 @@ static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
return error;
}
- GEN5_DEV_SET_PWR_STATE(cyapa, PWR_MODE_FULL_ACTIVE);
+ PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_FULL_ACTIVE);
} else if (power_mode == PWR_MODE_BTN_ONLY) {
error = cyapa_gen5_change_power_state(cyapa,
GEN5_POWER_STATE_BTN_ONLY);
@@ -1623,19 +1642,19 @@ static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
return error;
}
- GEN5_DEV_SET_PWR_STATE(cyapa, PWR_MODE_BTN_ONLY);
+ PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_BTN_ONLY);
} else {
/*
* Continue to change power mode even failed to set
* interval time, it won't affect the power mode change.
* except the sleep interval time is not correct.
*/
- if (GEN5_DEV_UNINIT_SLEEP_TIME(cyapa) ||
- sleep_time != GEN5_DEV_GET_SLEEP_TIME(cyapa))
+ if (PIP_DEV_UNINIT_SLEEP_TIME(cyapa) ||
+ sleep_time != PIP_DEV_GET_SLEEP_TIME(cyapa))
if (cyapa_gen5_set_interval_time(cyapa,
GEN5_PARAMETER_LP_INTRVL_ID,
sleep_time) == 0)
- GEN5_DEV_SET_SLEEP_TIME(cyapa, sleep_time);
+ PIP_DEV_SET_SLEEP_TIME(cyapa, sleep_time);
if (sleep_time <= GEN5_POWER_READY_MAX_INTRVL_TIME)
power_state = GEN5_POWER_STATE_READY;
@@ -1658,17 +1677,17 @@ static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
* is suspending which may cause interrupt line unable to be
* asserted again.
*/
- cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
- cyapa_gen5_disable_pip_report(cyapa);
+ if (is_suspend)
+ cyapa_gen5_disable_pip_report(cyapa);
- GEN5_DEV_SET_PWR_STATE(cyapa,
+ PIP_DEV_SET_PWR_STATE(cyapa,
cyapa_sleep_time_to_pwr_cmd(sleep_time));
}
return 0;
}
-static int cyapa_gen5_resume_scanning(struct cyapa *cyapa)
+int cyapa_pip_resume_scanning(struct cyapa *cyapa)
{
u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x04 };
u8 resp_data[6];
@@ -1682,7 +1701,7 @@ static int cyapa_gen5_resume_scanning(struct cyapa *cyapa)
error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
cmd, sizeof(cmd),
resp_data, &resp_len,
- 500, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+ 500, cyapa_sort_tsg_pip_app_resp_data, true);
if (error || !VALID_CMD_RESP_HEADER(resp_data, 0x04))
return -EINVAL;
@@ -1692,7 +1711,7 @@ static int cyapa_gen5_resume_scanning(struct cyapa *cyapa)
return 0;
}
-static int cyapa_gen5_suspend_scanning(struct cyapa *cyapa)
+int cyapa_pip_suspend_scanning(struct cyapa *cyapa)
{
u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x03 };
u8 resp_data[6];
@@ -1706,7 +1725,7 @@ static int cyapa_gen5_suspend_scanning(struct cyapa *cyapa)
error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
cmd, sizeof(cmd),
resp_data, &resp_len,
- 500, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+ 500, cyapa_sort_tsg_pip_app_resp_data, true);
if (error || !VALID_CMD_RESP_HEADER(resp_data, 0x03))
return -EINVAL;
@@ -1716,10 +1735,10 @@ static int cyapa_gen5_suspend_scanning(struct cyapa *cyapa)
return 0;
}
-static int cyapa_gen5_calibrate_pwcs(struct cyapa *cyapa,
+static int cyapa_pip_calibrate_pwcs(struct cyapa *cyapa,
u8 calibrate_sensing_mode_type)
{
- struct gen5_app_cmd_head *app_cmd_head;
+ struct pip_app_cmd_head *app_cmd_head;
u8 cmd[8];
u8 resp_data[6];
int resp_len;
@@ -1729,25 +1748,25 @@ static int cyapa_gen5_calibrate_pwcs(struct cyapa *cyapa,
cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
memset(cmd, 0, sizeof(cmd));
- app_cmd_head = (struct gen5_app_cmd_head *)cmd;
- put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
+ app_cmd_head = (struct pip_app_cmd_head *)cmd;
+ put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
put_unaligned_le16(sizeof(cmd) - 2, &app_cmd_head->length);
- app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
- app_cmd_head->cmd_code = GEN5_CMD_CALIBRATE;
+ app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
+ app_cmd_head->cmd_code = PIP_CMD_CALIBRATE;
app_cmd_head->parameter_data[0] = calibrate_sensing_mode_type;
resp_len = sizeof(resp_data);
error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
cmd, sizeof(cmd),
resp_data, &resp_len,
- 5000, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
- if (error || !VALID_CMD_RESP_HEADER(resp_data, GEN5_CMD_CALIBRATE) ||
- !GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+ 5000, cyapa_sort_tsg_pip_app_resp_data, true);
+ if (error || !VALID_CMD_RESP_HEADER(resp_data, PIP_CMD_CALIBRATE) ||
+ !PIP_CMD_COMPLETE_SUCCESS(resp_data))
return error < 0 ? error : -EAGAIN;
return 0;
}
-static ssize_t cyapa_gen5_do_calibrate(struct device *dev,
+ssize_t cyapa_pip_do_calibrate(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1755,25 +1774,25 @@ static ssize_t cyapa_gen5_do_calibrate(struct device *dev,
int error, calibrate_error;
/* 1. Suspend Scanning*/
- error = cyapa_gen5_suspend_scanning(cyapa);
+ error = cyapa_pip_suspend_scanning(cyapa);
if (error)
return error;
/* 2. Do mutual capacitance fine calibrate. */
- calibrate_error = cyapa_gen5_calibrate_pwcs(cyapa,
- CYAPA_SENSING_MODE_MUTUAL_CAP_FINE);
+ calibrate_error = cyapa_pip_calibrate_pwcs(cyapa,
+ PIP_SENSING_MODE_MUTUAL_CAP_FINE);
if (calibrate_error)
goto resume_scanning;
/* 3. Do self capacitance calibrate. */
- calibrate_error = cyapa_gen5_calibrate_pwcs(cyapa,
- CYAPA_SENSING_MODE_SELF_CAP);
+ calibrate_error = cyapa_pip_calibrate_pwcs(cyapa,
+ PIP_SENSING_MODE_SELF_CAP);
if (calibrate_error)
goto resume_scanning;
resume_scanning:
/* 4. Resume Scanning*/
- error = cyapa_gen5_resume_scanning(cyapa);
+ error = cyapa_pip_resume_scanning(cyapa);
if (error || calibrate_error)
return error ? error : calibrate_error;
@@ -1856,7 +1875,7 @@ static void cyapa_gen5_guess_electrodes(struct cyapa *cyapa,
* If the input value of @data_size is not 0, than means read the mutual or
* self local PWC data. The @idac_max, @idac_min and @idac_ave are used to
* return the max, min and average value of the mutual or self local PWC data.
- * Note, in order to raed mutual local PWC data, must read invoke this function
+ * Note, in order to read mutual local PWC data, must read invoke this function
* to read the mutual global idac data firstly to set the correct Rx number
* value, otherwise, the read mutual idac and PWC data may not correct.
*/
@@ -1864,7 +1883,7 @@ static int cyapa_gen5_read_idac_data(struct cyapa *cyapa,
u8 cmd_code, u8 idac_data_type, int *data_size,
int *idac_max, int *idac_min, int *idac_ave)
{
- struct gen5_app_cmd_head *cmd_head;
+ struct pip_app_cmd_head *cmd_head;
u8 cmd[12];
u8 resp_data[256];
int resp_len;
@@ -1879,7 +1898,7 @@ static int cyapa_gen5_read_idac_data(struct cyapa *cyapa,
int i;
int error;
- if (cmd_code != GEN5_CMD_RETRIEVE_DATA_STRUCTURE ||
+ if (cmd_code != PIP_RETRIEVE_DATA_STRUCTURE ||
(idac_data_type != GEN5_RETRIEVE_MUTUAL_PWC_DATA &&
idac_data_type != GEN5_RETRIEVE_SELF_CAP_PWC_DATA) ||
!data_size || !idac_max || !idac_min || !idac_ave)
@@ -1935,10 +1954,10 @@ static int cyapa_gen5_read_idac_data(struct cyapa *cyapa,
}
memset(cmd, 0, sizeof(cmd));
- cmd_head = (struct gen5_app_cmd_head *)cmd;
- put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &cmd_head->addr);
+ cmd_head = (struct pip_app_cmd_head *)cmd;
+ put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &cmd_head->addr);
put_unaligned_le16(sizeof(cmd) - 2, &cmd_head->length);
- cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+ cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
cmd_head->cmd_code = cmd_code;
do {
read_elements = (256 - GEN5_RESP_DATA_STRUCTURE_OFFSET) /
@@ -1953,11 +1972,11 @@ static int cyapa_gen5_read_idac_data(struct cyapa *cyapa,
error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
cmd, sizeof(cmd),
resp_data, &resp_len,
- 500, cyapa_gen5_sort_tsg_pip_app_resp_data,
+ 500, cyapa_sort_tsg_pip_app_resp_data,
true);
if (error || resp_len < GEN5_RESP_DATA_STRUCTURE_OFFSET ||
!VALID_CMD_RESP_HEADER(resp_data, cmd_code) ||
- !GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]) ||
+ !PIP_CMD_COMPLETE_SUCCESS(resp_data) ||
resp_data[6] != idac_data_type)
return (error < 0) ? error : -EAGAIN;
read_len = get_unaligned_le16(&resp_data[7]);
@@ -1997,7 +2016,7 @@ static int cyapa_gen5_read_idac_data(struct cyapa *cyapa,
tmp_count < cyapa->aligned_electrodes_rx &&
read_global_idac) {
/*
- * The value gap betwen global and local mutual
+ * The value gap between global and local mutual
* idac data must bigger than 50%.
* Normally, global value bigger than 50,
* local values less than 10.
@@ -2061,7 +2080,7 @@ static int cyapa_gen5_read_mutual_idac_data(struct cyapa *cyapa,
data_size = 0;
error = cyapa_gen5_read_idac_data(cyapa,
- GEN5_CMD_RETRIEVE_DATA_STRUCTURE,
+ PIP_RETRIEVE_DATA_STRUCTURE,
GEN5_RETRIEVE_MUTUAL_PWC_DATA,
&data_size,
gidac_mutual_max, gidac_mutual_min, gidac_mutual_ave);
@@ -2069,7 +2088,7 @@ static int cyapa_gen5_read_mutual_idac_data(struct cyapa *cyapa,
return error;
error = cyapa_gen5_read_idac_data(cyapa,
- GEN5_CMD_RETRIEVE_DATA_STRUCTURE,
+ PIP_RETRIEVE_DATA_STRUCTURE,
GEN5_RETRIEVE_MUTUAL_PWC_DATA,
&data_size,
lidac_mutual_max, lidac_mutual_min, lidac_mutual_ave);
@@ -2088,7 +2107,7 @@ static int cyapa_gen5_read_self_idac_data(struct cyapa *cyapa,
data_size = 0;
error = cyapa_gen5_read_idac_data(cyapa,
- GEN5_CMD_RETRIEVE_DATA_STRUCTURE,
+ PIP_RETRIEVE_DATA_STRUCTURE,
GEN5_RETRIEVE_SELF_CAP_PWC_DATA,
&data_size,
lidac_self_max, lidac_self_min, lidac_self_ave);
@@ -2098,7 +2117,7 @@ static int cyapa_gen5_read_self_idac_data(struct cyapa *cyapa,
*gidac_self_tx = *lidac_self_min;
error = cyapa_gen5_read_idac_data(cyapa,
- GEN5_CMD_RETRIEVE_DATA_STRUCTURE,
+ PIP_RETRIEVE_DATA_STRUCTURE,
GEN5_RETRIEVE_SELF_CAP_PWC_DATA,
&data_size,
lidac_self_max, lidac_self_min, lidac_self_ave);
@@ -2107,27 +2126,27 @@ static int cyapa_gen5_read_self_idac_data(struct cyapa *cyapa,
static ssize_t cyapa_gen5_execute_panel_scan(struct cyapa *cyapa)
{
- struct gen5_app_cmd_head *app_cmd_head;
+ struct pip_app_cmd_head *app_cmd_head;
u8 cmd[7];
u8 resp_data[6];
int resp_len;
int error;
memset(cmd, 0, sizeof(cmd));
- app_cmd_head = (struct gen5_app_cmd_head *)cmd;
- put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
+ app_cmd_head = (struct pip_app_cmd_head *)cmd;
+ put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
put_unaligned_le16(sizeof(cmd) - 2, &app_cmd_head->length);
- app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+ app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
app_cmd_head->cmd_code = GEN5_CMD_EXECUTE_PANEL_SCAN;
resp_len = sizeof(resp_data);
error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
cmd, sizeof(cmd),
resp_data, &resp_len,
- 500, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+ 500, cyapa_sort_tsg_pip_app_resp_data, true);
if (error || resp_len != sizeof(resp_data) ||
!VALID_CMD_RESP_HEADER(resp_data,
GEN5_CMD_EXECUTE_PANEL_SCAN) ||
- !GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+ !PIP_CMD_COMPLETE_SUCCESS(resp_data))
return error ? error : -EAGAIN;
return 0;
@@ -2138,7 +2157,7 @@ static int cyapa_gen5_read_panel_scan_raw_data(struct cyapa *cyapa,
int *raw_data_max, int *raw_data_min, int *raw_data_ave,
u8 *buffer)
{
- struct gen5_app_cmd_head *app_cmd_head;
+ struct pip_app_cmd_head *app_cmd_head;
struct gen5_retrieve_panel_scan_data *panel_sacn_data;
u8 cmd[12];
u8 resp_data[256]; /* Max bytes can transfer one time. */
@@ -2166,10 +2185,10 @@ static int cyapa_gen5_read_panel_scan_raw_data(struct cyapa *cyapa,
/* Assume max element size is 4 currently. */
read_elements = (256 - GEN5_RESP_DATA_STRUCTURE_OFFSET) / 4;
read_len = read_elements * 4;
- app_cmd_head = (struct gen5_app_cmd_head *)cmd;
- put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
+ app_cmd_head = (struct pip_app_cmd_head *)cmd;
+ put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
put_unaligned_le16(sizeof(cmd) - 2, &app_cmd_head->length);
- app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+ app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
app_cmd_head->cmd_code = cmd_code;
panel_sacn_data = (struct gen5_retrieve_panel_scan_data *)
app_cmd_head->parameter_data;
@@ -2183,10 +2202,10 @@ static int cyapa_gen5_read_panel_scan_raw_data(struct cyapa *cyapa,
error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
cmd, sizeof(cmd),
resp_data, &resp_len,
- 500, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+ 500, cyapa_sort_tsg_pip_app_resp_data, true);
if (error || resp_len < GEN5_RESP_DATA_STRUCTURE_OFFSET ||
!VALID_CMD_RESP_HEADER(resp_data, cmd_code) ||
- !GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]) ||
+ !PIP_CMD_COMPLETE_SUCCESS(resp_data) ||
resp_data[6] != raw_data_type)
return error ? error : -EAGAIN;
@@ -2245,11 +2264,11 @@ static ssize_t cyapa_gen5_show_baseline(struct device *dev,
int error, resume_error;
int size;
- if (cyapa->state != CYAPA_STATE_GEN5_APP)
+ if (!cyapa_is_pip_app_mode(cyapa))
return -EBUSY;
/* 1. Suspend Scanning*/
- error = cyapa_gen5_suspend_scanning(cyapa);
+ error = cyapa_pip_suspend_scanning(cyapa);
if (error)
return error;
@@ -2270,7 +2289,7 @@ static ssize_t cyapa_gen5_show_baseline(struct device *dev,
if (error)
goto resume_scanning;
- /* 4. Execuate panel scan. It must be executed before read data. */
+ /* 4. Execute panel scan. It must be executed before read data. */
error = cyapa_gen5_execute_panel_scan(cyapa);
if (error)
goto resume_scanning;
@@ -2343,7 +2362,7 @@ static ssize_t cyapa_gen5_show_baseline(struct device *dev,
resume_scanning:
/* 11. Resume Scanning*/
- resume_error = cyapa_gen5_resume_scanning(cyapa);
+ resume_error = cyapa_pip_resume_scanning(cyapa);
if (resume_error || error)
return resume_error ? resume_error : error;
@@ -2364,7 +2383,7 @@ resume_scanning:
return size;
}
-static bool cyapa_gen5_sort_system_info_data(struct cyapa *cyapa,
+bool cyapa_pip_sort_system_info_data(struct cyapa *cyapa,
u8 *buf, int len)
{
/* Check the report id and command code */
@@ -2376,20 +2395,17 @@ static bool cyapa_gen5_sort_system_info_data(struct cyapa *cyapa,
static int cyapa_gen5_bl_query_data(struct cyapa *cyapa)
{
- u8 bl_query_data_cmd[] = { 0x04, 0x00, 0x0b, 0x00, 0x40, 0x00,
- 0x01, 0x3c, 0x00, 0x00, 0xb0, 0x42, 0x17
- };
- u8 resp_data[GEN5_BL_READ_APP_INFO_RESP_LEN];
+ u8 resp_data[PIP_BL_APP_INFO_RESP_LENGTH];
int resp_len;
int error;
- resp_len = GEN5_BL_READ_APP_INFO_RESP_LEN;
+ resp_len = sizeof(resp_data);
error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
- bl_query_data_cmd, sizeof(bl_query_data_cmd),
+ pip_bl_read_app_info, PIP_BL_READ_APP_INFO_CMD_LENGTH,
resp_data, &resp_len,
- 500, cyapa_gen5_sort_tsg_pip_bl_resp_data, false);
- if (error || resp_len != GEN5_BL_READ_APP_INFO_RESP_LEN ||
- !GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+ 500, cyapa_sort_tsg_pip_bl_resp_data, false);
+ if (error || resp_len < PIP_BL_APP_INFO_RESP_LENGTH ||
+ !PIP_CMD_COMPLETE_SUCCESS(resp_data))
return error ? error : -EIO;
memcpy(&cyapa->product_id[0], &resp_data[8], 5);
@@ -2402,34 +2418,42 @@ static int cyapa_gen5_bl_query_data(struct cyapa *cyapa)
cyapa->fw_maj_ver = resp_data[22];
cyapa->fw_min_ver = resp_data[23];
+ cyapa->platform_ver = (resp_data[26] >> PIP_BL_PLATFORM_VER_SHIFT) &
+ PIP_BL_PLATFORM_VER_MASK;
+
return 0;
}
static int cyapa_gen5_get_query_data(struct cyapa *cyapa)
{
- u8 get_system_information[] = {
- 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x02
- };
- u8 resp_data[71];
+ u8 resp_data[PIP_READ_SYS_INFO_RESP_LENGTH];
int resp_len;
u16 product_family;
int error;
resp_len = sizeof(resp_data);
error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
- get_system_information, sizeof(get_system_information),
+ pip_read_sys_info, PIP_READ_SYS_INFO_CMD_LENGTH,
resp_data, &resp_len,
- 2000, cyapa_gen5_sort_system_info_data, false);
+ 2000, cyapa_pip_sort_system_info_data, false);
if (error || resp_len < sizeof(resp_data))
return error ? error : -EIO;
product_family = get_unaligned_le16(&resp_data[7]);
- if ((product_family & GEN5_PRODUCT_FAMILY_MASK) !=
- GEN5_PRODUCT_FAMILY_TRACKPAD)
+ if ((product_family & PIP_PRODUCT_FAMILY_MASK) !=
+ PIP_PRODUCT_FAMILY_TRACKPAD)
return -EINVAL;
- cyapa->fw_maj_ver = resp_data[15];
- cyapa->fw_min_ver = resp_data[16];
+ cyapa->platform_ver = (resp_data[49] >> PIP_BL_PLATFORM_VER_SHIFT) &
+ PIP_BL_PLATFORM_VER_MASK;
+ if (cyapa->gen == CYAPA_GEN5 && cyapa->platform_ver < 2) {
+ /* Gen5 firmware that does not support proximity. */
+ cyapa->fw_maj_ver = resp_data[15];
+ cyapa->fw_min_ver = resp_data[16];
+ } else {
+ cyapa->fw_maj_ver = resp_data[9];
+ cyapa->fw_min_ver = resp_data[10];
+ }
cyapa->electrodes_x = resp_data[52];
cyapa->electrodes_y = resp_data[53];
@@ -2472,9 +2496,9 @@ static int cyapa_gen5_do_operational_check(struct cyapa *cyapa)
switch (cyapa->state) {
case CYAPA_STATE_GEN5_BL:
- error = cyapa_gen5_bl_exit(cyapa);
+ error = cyapa_pip_bl_exit(cyapa);
if (error) {
- /* Rry to update trackpad product information. */
+ /* Try to update trackpad product information. */
cyapa_gen5_bl_query_data(cyapa);
goto out;
}
@@ -2486,14 +2510,23 @@ static int cyapa_gen5_do_operational_check(struct cyapa *cyapa)
* If trackpad device in deep sleep mode,
* the app command will fail.
* So always try to reset trackpad device to full active when
- * the device state is requeried.
+ * the device state is required.
*/
error = cyapa_gen5_set_power_mode(cyapa,
- PWR_MODE_FULL_ACTIVE, 0);
+ PWR_MODE_FULL_ACTIVE, 0, false);
if (error)
dev_warn(dev, "%s: failed to set power active mode.\n",
__func__);
+ /* By default, the trackpad proximity function is enabled. */
+ if (cyapa->platform_ver >= 2) {
+ error = cyapa_pip_set_proximity(cyapa, true);
+ if (error)
+ dev_warn(dev,
+ "%s: failed to enable proximity.\n",
+ __func__);
+ }
+
/* Get trackpad product information. */
error = cyapa_gen5_get_query_data(cyapa);
if (error)
@@ -2518,14 +2551,14 @@ out:
* Return false, do not continue process
* Return true, continue process.
*/
-static bool cyapa_gen5_irq_cmd_handler(struct cyapa *cyapa)
+bool cyapa_pip_irq_cmd_handler(struct cyapa *cyapa)
{
- struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+ struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
int length;
- if (atomic_read(&gen5_pip->cmd_issued)) {
+ if (atomic_read(&pip->cmd_issued)) {
/* Polling command response data. */
- if (gen5_pip->is_irq_mode == false)
+ if (pip->is_irq_mode == false)
return false;
/*
@@ -2533,59 +2566,64 @@ static bool cyapa_gen5_irq_cmd_handler(struct cyapa *cyapa)
* these output data may caused by user put finger on
* trackpad when host waiting the command response.
*/
- cyapa_i2c_pip_read(cyapa, gen5_pip->irq_cmd_buf,
- GEN5_RESP_LENGTH_SIZE);
- length = get_unaligned_le16(gen5_pip->irq_cmd_buf);
- length = (length <= GEN5_RESP_LENGTH_SIZE) ?
- GEN5_RESP_LENGTH_SIZE : length;
- if (length > GEN5_RESP_LENGTH_SIZE)
+ cyapa_i2c_pip_read(cyapa, pip->irq_cmd_buf,
+ PIP_RESP_LENGTH_SIZE);
+ length = get_unaligned_le16(pip->irq_cmd_buf);
+ length = (length <= PIP_RESP_LENGTH_SIZE) ?
+ PIP_RESP_LENGTH_SIZE : length;
+ if (length > PIP_RESP_LENGTH_SIZE)
cyapa_i2c_pip_read(cyapa,
- gen5_pip->irq_cmd_buf, length);
-
- if (!(gen5_pip->resp_sort_func &&
- gen5_pip->resp_sort_func(cyapa,
- gen5_pip->irq_cmd_buf, length))) {
+ pip->irq_cmd_buf, length);
+ if (!(pip->resp_sort_func &&
+ pip->resp_sort_func(cyapa,
+ pip->irq_cmd_buf, length))) {
/*
- * Work around the Gen5 V1 firmware
- * that does not assert interrupt signalling
- * that command response is ready if user
- * keeps touching the trackpad while command
- * is sent to the device.
+ * Cover the Gen5 V1 firmware issue.
+ * The issue is no interrupt would be asserted from
+ * trackpad device to host for the command response
+ * ready event. Because when there was a finger touch
+ * on trackpad device, and the firmware output queue
+ * won't be empty (always with touch report data), so
+ * the interrupt signal won't be asserted again until
+ * the output queue was previous emptied.
+ * This issue would happen in the scenario that
+ * user always has his/her fingers touched on the
+ * trackpad device during system booting/rebooting.
*/
length = 0;
- if (gen5_pip->resp_len)
- length = *gen5_pip->resp_len;
+ if (pip->resp_len)
+ length = *pip->resp_len;
cyapa_empty_pip_output_data(cyapa,
- gen5_pip->resp_data,
+ pip->resp_data,
&length,
- gen5_pip->resp_sort_func);
- if (gen5_pip->resp_len && length != 0) {
- *gen5_pip->resp_len = length;
- atomic_dec(&gen5_pip->cmd_issued);
- complete(&gen5_pip->cmd_ready);
+ pip->resp_sort_func);
+ if (pip->resp_len && length != 0) {
+ *pip->resp_len = length;
+ atomic_dec(&pip->cmd_issued);
+ complete(&pip->cmd_ready);
}
return false;
}
- if (gen5_pip->resp_data && gen5_pip->resp_len) {
- *gen5_pip->resp_len = (*gen5_pip->resp_len < length) ?
- *gen5_pip->resp_len : length;
- memcpy(gen5_pip->resp_data, gen5_pip->irq_cmd_buf,
- *gen5_pip->resp_len);
+ if (pip->resp_data && pip->resp_len) {
+ *pip->resp_len = (*pip->resp_len < length) ?
+ *pip->resp_len : length;
+ memcpy(pip->resp_data, pip->irq_cmd_buf,
+ *pip->resp_len);
}
- atomic_dec(&gen5_pip->cmd_issued);
- complete(&gen5_pip->cmd_ready);
+ atomic_dec(&pip->cmd_issued);
+ complete(&pip->cmd_ready);
return false;
}
return true;
}
-static void cyapa_gen5_report_buttons(struct cyapa *cyapa,
- const struct cyapa_gen5_report_data *report_data)
+static void cyapa_pip_report_buttons(struct cyapa *cyapa,
+ const struct cyapa_pip_report_data *report_data)
{
struct input_dev *input = cyapa->input;
- u8 buttons = report_data->report_head[GEN5_BUTTONS_OFFSET];
+ u8 buttons = report_data->report_head[PIP_BUTTONS_OFFSET];
buttons = (buttons << CAPABILITY_BTN_SHIFT) & CAPABILITY_BTN_MASK;
@@ -2605,12 +2643,23 @@ static void cyapa_gen5_report_buttons(struct cyapa *cyapa,
input_sync(input);
}
-static void cyapa_gen5_report_slot_data(struct cyapa *cyapa,
- const struct cyapa_gen5_touch_record *touch)
+static void cyapa_pip_report_proximity(struct cyapa *cyapa,
+ const struct cyapa_pip_report_data *report_data)
{
struct input_dev *input = cyapa->input;
- u8 event_id = GEN5_GET_EVENT_ID(touch->touch_tip_event_id);
- int slot = GEN5_GET_TOUCH_ID(touch->touch_tip_event_id);
+ u8 distance = report_data->report_head[PIP_PROXIMITY_DISTANCE_OFFSET] &
+ PIP_PROXIMITY_DISTANCE_MASK;
+
+ input_report_abs(input, ABS_DISTANCE, distance);
+ input_sync(input);
+}
+
+static void cyapa_pip_report_slot_data(struct cyapa *cyapa,
+ const struct cyapa_pip_touch_record *touch)
+{
+ struct input_dev *input = cyapa->input;
+ u8 event_id = PIP_GET_EVENT_ID(touch->touch_tip_event_id);
+ int slot = PIP_GET_TOUCH_ID(touch->touch_tip_event_id);
int x, y;
if (event_id == RECORD_EVENT_LIFTOFF)
@@ -2621,11 +2670,12 @@ static void cyapa_gen5_report_slot_data(struct cyapa *cyapa,
x = (touch->x_hi << 8) | touch->x_lo;
if (cyapa->x_origin)
x = cyapa->max_abs_x - x;
- input_report_abs(input, ABS_MT_POSITION_X, x);
y = (touch->y_hi << 8) | touch->y_lo;
if (cyapa->y_origin)
y = cyapa->max_abs_y - y;
+ input_report_abs(input, ABS_MT_POSITION_X, x);
input_report_abs(input, ABS_MT_POSITION_Y, y);
+ input_report_abs(input, ABS_DISTANCE, 0);
input_report_abs(input, ABS_MT_PRESSURE,
touch->z);
input_report_abs(input, ABS_MT_TOUCH_MAJOR,
@@ -2642,50 +2692,49 @@ static void cyapa_gen5_report_slot_data(struct cyapa *cyapa,
touch->orientation);
}
-static void cyapa_gen5_report_touches(struct cyapa *cyapa,
- const struct cyapa_gen5_report_data *report_data)
+static void cyapa_pip_report_touches(struct cyapa *cyapa,
+ const struct cyapa_pip_report_data *report_data)
{
struct input_dev *input = cyapa->input;
unsigned int touch_num;
int i;
- touch_num = report_data->report_head[GEN5_NUMBER_OF_TOUCH_OFFSET] &
- GEN5_NUMBER_OF_TOUCH_MASK;
+ touch_num = report_data->report_head[PIP_NUMBER_OF_TOUCH_OFFSET] &
+ PIP_NUMBER_OF_TOUCH_MASK;
for (i = 0; i < touch_num; i++)
- cyapa_gen5_report_slot_data(cyapa,
+ cyapa_pip_report_slot_data(cyapa,
&report_data->touch_records[i]);
input_mt_sync_frame(input);
input_sync(input);
}
-static int cyapa_gen5_irq_handler(struct cyapa *cyapa)
+int cyapa_pip_irq_handler(struct cyapa *cyapa)
{
struct device *dev = &cyapa->client->dev;
- struct cyapa_gen5_report_data report_data;
- int ret;
- u8 report_id;
+ struct cyapa_pip_report_data report_data;
unsigned int report_len;
+ u8 report_id;
+ int ret;
- if (cyapa->gen != CYAPA_GEN5 ||
- cyapa->state != CYAPA_STATE_GEN5_APP) {
+ if (!cyapa_is_pip_app_mode(cyapa)) {
dev_err(dev, "invalid device state, gen=%d, state=0x%02x\n",
cyapa->gen, cyapa->state);
return -EINVAL;
}
ret = cyapa_i2c_pip_read(cyapa, (u8 *)&report_data,
- GEN5_RESP_LENGTH_SIZE);
- if (ret != GEN5_RESP_LENGTH_SIZE) {
+ PIP_RESP_LENGTH_SIZE);
+ if (ret != PIP_RESP_LENGTH_SIZE) {
dev_err(dev, "failed to read length bytes, (%d)\n", ret);
return -EINVAL;
}
report_len = get_unaligned_le16(
- &report_data.report_head[GEN5_RESP_LENGTH_OFFSET]);
- if (report_len < GEN5_RESP_LENGTH_SIZE) {
- /* Invliad length or internal reset happened. */
+ &report_data.report_head[PIP_RESP_LENGTH_OFFSET]);
+ if (report_len < PIP_RESP_LENGTH_SIZE) {
+ /* Invalid length or internal reset happened. */
dev_err(dev, "invalid report_len=%d. bytes: %02x %02x\n",
report_len, report_data.report_head[0],
report_data.report_head[1]);
@@ -2693,7 +2742,7 @@ static int cyapa_gen5_irq_handler(struct cyapa *cyapa)
}
/* Idle, no data for report. */
- if (report_len == GEN5_RESP_LENGTH_SIZE)
+ if (report_len == PIP_RESP_LENGTH_SIZE)
return 0;
ret = cyapa_i2c_pip_read(cyapa, (u8 *)&report_data, report_len);
@@ -2703,70 +2752,92 @@ static int cyapa_gen5_irq_handler(struct cyapa *cyapa)
return -EINVAL;
}
- report_id = report_data.report_head[GEN5_RESP_REPORT_ID_OFFSET];
- if (report_id == GEN5_WAKEUP_EVENT_REPORT_ID &&
- report_len == GEN5_WAKEUP_EVENT_SIZE) {
+ report_id = report_data.report_head[PIP_RESP_REPORT_ID_OFFSET];
+ if (report_id == PIP_WAKEUP_EVENT_REPORT_ID &&
+ report_len == PIP_WAKEUP_EVENT_SIZE) {
/*
* Device wake event from deep sleep mode for touch.
* This interrupt event is used to wake system up.
+ *
+ * Note:
+ * It will introduce about 20~40 ms additional delay
+ * time in receiving for first valid touch report data.
+ * The time is used to execute device runtime resume
+ * process.
*/
+ pm_runtime_get_sync(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_sync_autosuspend(dev);
return 0;
- } else if (report_id != GEN5_TOUCH_REPORT_ID &&
- report_id != GEN5_BTN_REPORT_ID &&
+ } else if (report_id != PIP_TOUCH_REPORT_ID &&
+ report_id != PIP_BTN_REPORT_ID &&
report_id != GEN5_OLD_PUSH_BTN_REPORT_ID &&
- report_id != GEN5_PUSH_BTN_REPORT_ID) {
+ report_id != PIP_PUSH_BTN_REPORT_ID &&
+ report_id != PIP_PROXIMITY_REPORT_ID) {
/* Running in BL mode or unknown response data read. */
dev_err(dev, "invalid report_id=0x%02x\n", report_id);
return -EINVAL;
}
- if (report_id == GEN5_TOUCH_REPORT_ID &&
- (report_len < GEN5_TOUCH_REPORT_HEAD_SIZE ||
- report_len > GEN5_TOUCH_REPORT_MAX_SIZE)) {
+ if (report_id == PIP_TOUCH_REPORT_ID &&
+ (report_len < PIP_TOUCH_REPORT_HEAD_SIZE ||
+ report_len > PIP_TOUCH_REPORT_MAX_SIZE)) {
/* Invalid report data length for finger packet. */
dev_err(dev, "invalid touch packet length=%d\n", report_len);
return 0;
}
- if ((report_id == GEN5_BTN_REPORT_ID ||
+ if ((report_id == PIP_BTN_REPORT_ID ||
report_id == GEN5_OLD_PUSH_BTN_REPORT_ID ||
- report_id == GEN5_PUSH_BTN_REPORT_ID) &&
- (report_len < GEN5_BTN_REPORT_HEAD_SIZE ||
- report_len > GEN5_BTN_REPORT_MAX_SIZE)) {
+ report_id == PIP_PUSH_BTN_REPORT_ID) &&
+ (report_len < PIP_BTN_REPORT_HEAD_SIZE ||
+ report_len > PIP_BTN_REPORT_MAX_SIZE)) {
/* Invalid report data length of button packet. */
dev_err(dev, "invalid button packet length=%d\n", report_len);
return 0;
}
- if (report_id == GEN5_TOUCH_REPORT_ID)
- cyapa_gen5_report_touches(cyapa, &report_data);
+ if (report_id == PIP_PROXIMITY_REPORT_ID &&
+ report_len != PIP_PROXIMITY_REPORT_SIZE) {
+ /* Invalid report data length of proximity packet. */
+ dev_err(dev, "invalid proximity data, length=%d\n", report_len);
+ return 0;
+ }
+
+ if (report_id == PIP_TOUCH_REPORT_ID)
+ cyapa_pip_report_touches(cyapa, &report_data);
+ else if (report_id == PIP_PROXIMITY_REPORT_ID)
+ cyapa_pip_report_proximity(cyapa, &report_data);
else
- cyapa_gen5_report_buttons(cyapa, &report_data);
+ cyapa_pip_report_buttons(cyapa, &report_data);
return 0;
}
-static int cyapa_gen5_bl_activate(struct cyapa *cyapa) { return 0; }
-static int cyapa_gen5_bl_deactivate(struct cyapa *cyapa) { return 0; }
+int cyapa_pip_bl_activate(struct cyapa *cyapa) { return 0; }
+int cyapa_pip_bl_deactivate(struct cyapa *cyapa) { return 0; }
+
const struct cyapa_dev_ops cyapa_gen5_ops = {
- .check_fw = cyapa_gen5_check_fw,
- .bl_enter = cyapa_gen5_bl_enter,
- .bl_initiate = cyapa_gen5_bl_initiate,
- .update_fw = cyapa_gen5_do_fw_update,
- .bl_activate = cyapa_gen5_bl_activate,
- .bl_deactivate = cyapa_gen5_bl_deactivate,
+ .check_fw = cyapa_pip_check_fw,
+ .bl_enter = cyapa_pip_bl_enter,
+ .bl_initiate = cyapa_pip_bl_initiate,
+ .update_fw = cyapa_pip_do_fw_update,
+ .bl_activate = cyapa_pip_bl_activate,
+ .bl_deactivate = cyapa_pip_bl_deactivate,
.show_baseline = cyapa_gen5_show_baseline,
- .calibrate_store = cyapa_gen5_do_calibrate,
+ .calibrate_store = cyapa_pip_do_calibrate,
- .initialize = cyapa_gen5_initialize,
+ .initialize = cyapa_pip_cmd_state_initialize,
.state_parse = cyapa_gen5_state_parse,
.operational_check = cyapa_gen5_do_operational_check,
- .irq_handler = cyapa_gen5_irq_handler,
- .irq_cmd_handler = cyapa_gen5_irq_cmd_handler,
+ .irq_handler = cyapa_pip_irq_handler,
+ .irq_cmd_handler = cyapa_pip_irq_cmd_handler,
.sort_empty_output_data = cyapa_empty_pip_output_data,
.set_power_mode = cyapa_gen5_set_power_mode,
+
+ .set_proximity = cyapa_pip_set_proximity,
};
diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c
new file mode 100644
index 000000000000..5f191071d44a
--- /dev/null
+++ b/drivers/input/mouse/cyapa_gen6.c
@@ -0,0 +1,749 @@
+/*
+ * Cypress APA trackpad with I2C interface
+ *
+ * Author: Dudley Du <dudl@cypress.com>
+ *
+ * Copyright (C) 2015 Cypress Semiconductor, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+#include <linux/crc-itu-t.h>
+#include "cyapa.h"
+
+
+#define GEN6_ENABLE_CMD_IRQ 0x41
+#define GEN6_DISABLE_CMD_IRQ 0x42
+#define GEN6_ENABLE_DEV_IRQ 0x43
+#define GEN6_DISABLE_DEV_IRQ 0x44
+
+#define GEN6_POWER_MODE_ACTIVE 0x01
+#define GEN6_POWER_MODE_LP_MODE1 0x02
+#define GEN6_POWER_MODE_LP_MODE2 0x03
+#define GEN6_POWER_MODE_BTN_ONLY 0x04
+
+#define GEN6_SET_POWER_MODE_INTERVAL 0x47
+#define GEN6_GET_POWER_MODE_INTERVAL 0x48
+
+#define GEN6_MAX_RX_NUM 14
+#define GEN6_RETRIEVE_DATA_ID_RX_ATTENURATOR_IDAC 0x00
+#define GEN6_RETRIEVE_DATA_ID_ATTENURATOR_TRIM 0x12
+
+
+struct pip_app_cmd_head {
+ __le16 addr;
+ __le16 length;
+ u8 report_id;
+ u8 resv; /* Reserved, must be 0 */
+ u8 cmd_code; /* bit7: resv, set to 0; bit6~0: command code.*/
+} __packed;
+
+struct pip_app_resp_head {
+ __le16 length;
+ u8 report_id;
+ u8 resv; /* Reserved, must be 0 */
+ u8 cmd_code; /* bit7: TGL; bit6~0: command code.*/
+ /*
+ * The value of data_status can be the first byte of data or
+ * the command status or the unsupported command code depending on the
+ * requested command code.
+ */
+ u8 data_status;
+} __packed;
+
+struct pip_fixed_info {
+ u8 silicon_id_high;
+ u8 silicon_id_low;
+ u8 family_id;
+};
+
+static u8 pip_get_bl_info[] = {
+ 0x04, 0x00, 0x0B, 0x00, 0x40, 0x00, 0x01, 0x38,
+ 0x00, 0x00, 0x70, 0x9E, 0x17
+};
+
+static bool cyapa_sort_pip_hid_descriptor_data(struct cyapa *cyapa,
+ u8 *buf, int len)
+{
+ if (len != PIP_HID_DESCRIPTOR_SIZE)
+ return false;
+
+ if (buf[PIP_RESP_REPORT_ID_OFFSET] == PIP_HID_APP_REPORT_ID ||
+ buf[PIP_RESP_REPORT_ID_OFFSET] == PIP_HID_BL_REPORT_ID)
+ return true;
+
+ return false;
+}
+
+static int cyapa_get_pip_fixed_info(struct cyapa *cyapa,
+ struct pip_fixed_info *pip_info, bool is_bootloader)
+{
+ u8 resp_data[PIP_READ_SYS_INFO_RESP_LENGTH];
+ int resp_len;
+ u16 product_family;
+ int error;
+
+ if (is_bootloader) {
+ /* Read Bootloader Information to determine Gen5 or Gen6. */
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+ pip_get_bl_info, sizeof(pip_get_bl_info),
+ resp_data, &resp_len,
+ 2000, cyapa_sort_tsg_pip_bl_resp_data,
+ false);
+ if (error || resp_len < PIP_BL_GET_INFO_RESP_LENGTH)
+ return error ? error : -EIO;
+
+ pip_info->family_id = resp_data[8];
+ pip_info->silicon_id_low = resp_data[10];
+ pip_info->silicon_id_high = resp_data[11];
+
+ return 0;
+ }
+
+ /* Get App System Information to determine Gen5 or Gen6. */
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+ pip_read_sys_info, PIP_READ_SYS_INFO_CMD_LENGTH,
+ resp_data, &resp_len,
+ 2000, cyapa_pip_sort_system_info_data, false);
+ if (error || resp_len < PIP_READ_SYS_INFO_RESP_LENGTH)
+ return error ? error : -EIO;
+
+ product_family = get_unaligned_le16(&resp_data[7]);
+ if ((product_family & PIP_PRODUCT_FAMILY_MASK) !=
+ PIP_PRODUCT_FAMILY_TRACKPAD)
+ return -EINVAL;
+
+ pip_info->family_id = resp_data[19];
+ pip_info->silicon_id_low = resp_data[21];
+ pip_info->silicon_id_high = resp_data[22];
+
+ return 0;
+
+}
+
+int cyapa_pip_state_parse(struct cyapa *cyapa, u8 *reg_data, int len)
+{
+ u8 cmd[] = { 0x01, 0x00};
+ struct pip_fixed_info pip_info;
+ u8 resp_data[PIP_HID_DESCRIPTOR_SIZE];
+ int resp_len;
+ bool is_bootloader;
+ int error;
+
+ cyapa->state = CYAPA_STATE_NO_DEVICE;
+
+ /* Try to wake from it deep sleep state if it is. */
+ cyapa_pip_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_ON);
+
+ /* Empty the buffer queue to get fresh data with later commands. */
+ cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
+
+ /*
+ * Read description info from trackpad device to determine running in
+ * APP mode or Bootloader mode.
+ */
+ resp_len = PIP_HID_DESCRIPTOR_SIZE;
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+ cmd, sizeof(cmd),
+ resp_data, &resp_len,
+ 300,
+ cyapa_sort_pip_hid_descriptor_data,
+ false);
+ if (error)
+ return error;
+
+ if (resp_data[PIP_RESP_REPORT_ID_OFFSET] == PIP_HID_BL_REPORT_ID)
+ is_bootloader = true;
+ else if (resp_data[PIP_RESP_REPORT_ID_OFFSET] == PIP_HID_APP_REPORT_ID)
+ is_bootloader = false;
+ else
+ return -EAGAIN;
+
+ /* Get PIP fixed information to determine Gen5 or Gen6. */
+ memset(&pip_info, 0, sizeof(struct pip_fixed_info));
+ error = cyapa_get_pip_fixed_info(cyapa, &pip_info, is_bootloader);
+ if (error)
+ return error;
+
+ if (pip_info.family_id == 0x9B && pip_info.silicon_id_high == 0x0B) {
+ cyapa->gen = CYAPA_GEN6;
+ cyapa->state = is_bootloader ? CYAPA_STATE_GEN6_BL
+ : CYAPA_STATE_GEN6_APP;
+ } else if (pip_info.family_id == 0x91 &&
+ pip_info.silicon_id_high == 0x02) {
+ cyapa->gen = CYAPA_GEN5;
+ cyapa->state = is_bootloader ? CYAPA_STATE_GEN5_BL
+ : CYAPA_STATE_GEN5_APP;
+ }
+
+ return 0;
+}
+
+static int cyapa_gen6_read_sys_info(struct cyapa *cyapa)
+{
+ u8 resp_data[PIP_READ_SYS_INFO_RESP_LENGTH];
+ int resp_len;
+ u16 product_family;
+ u8 rotat_align;
+ int error;
+
+ /* Get App System Information to determine Gen5 or Gen6. */
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+ pip_read_sys_info, PIP_READ_SYS_INFO_CMD_LENGTH,
+ resp_data, &resp_len,
+ 2000, cyapa_pip_sort_system_info_data, false);
+ if (error || resp_len < sizeof(resp_data))
+ return error ? error : -EIO;
+
+ product_family = get_unaligned_le16(&resp_data[7]);
+ if ((product_family & PIP_PRODUCT_FAMILY_MASK) !=
+ PIP_PRODUCT_FAMILY_TRACKPAD)
+ return -EINVAL;
+
+ cyapa->platform_ver = (resp_data[67] >> PIP_BL_PLATFORM_VER_SHIFT) &
+ PIP_BL_PLATFORM_VER_MASK;
+ cyapa->fw_maj_ver = resp_data[9];
+ cyapa->fw_min_ver = resp_data[10];
+
+ cyapa->electrodes_x = resp_data[33];
+ cyapa->electrodes_y = resp_data[34];
+
+ cyapa->physical_size_x = get_unaligned_le16(&resp_data[35]) / 100;
+ cyapa->physical_size_y = get_unaligned_le16(&resp_data[37]) / 100;
+
+ cyapa->max_abs_x = get_unaligned_le16(&resp_data[39]);
+ cyapa->max_abs_y = get_unaligned_le16(&resp_data[41]);
+
+ cyapa->max_z = get_unaligned_le16(&resp_data[43]);
+
+ cyapa->x_origin = resp_data[45] & 0x01;
+ cyapa->y_origin = resp_data[46] & 0x01;
+
+ cyapa->btn_capability = (resp_data[70] << 3) & CAPABILITY_BTN_MASK;
+
+ memcpy(&cyapa->product_id[0], &resp_data[51], 5);
+ cyapa->product_id[5] = '-';
+ memcpy(&cyapa->product_id[6], &resp_data[56], 6);
+ cyapa->product_id[12] = '-';
+ memcpy(&cyapa->product_id[13], &resp_data[62], 2);
+ cyapa->product_id[15] = '\0';
+
+ rotat_align = resp_data[68];
+ if (rotat_align) {
+ cyapa->electrodes_rx = cyapa->electrodes_y;
+ cyapa->electrodes_rx = cyapa->electrodes_y;
+ } else {
+ cyapa->electrodes_rx = cyapa->electrodes_x;
+ cyapa->electrodes_rx = cyapa->electrodes_y;
+ }
+ cyapa->aligned_electrodes_rx = (cyapa->electrodes_rx + 3) & ~3u;
+
+ if (!cyapa->electrodes_x || !cyapa->electrodes_y ||
+ !cyapa->physical_size_x || !cyapa->physical_size_y ||
+ !cyapa->max_abs_x || !cyapa->max_abs_y || !cyapa->max_z)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cyapa_gen6_bl_read_app_info(struct cyapa *cyapa)
+{
+ u8 resp_data[PIP_BL_APP_INFO_RESP_LENGTH];
+ int resp_len;
+ int error;
+
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+ pip_bl_read_app_info, PIP_BL_READ_APP_INFO_CMD_LENGTH,
+ resp_data, &resp_len,
+ 500, cyapa_sort_tsg_pip_bl_resp_data, false);
+ if (error || resp_len < PIP_BL_APP_INFO_RESP_LENGTH ||
+ !PIP_CMD_COMPLETE_SUCCESS(resp_data))
+ return error ? error : -EIO;
+
+ cyapa->fw_maj_ver = resp_data[8];
+ cyapa->fw_min_ver = resp_data[9];
+
+ cyapa->platform_ver = (resp_data[12] >> PIP_BL_PLATFORM_VER_SHIFT) &
+ PIP_BL_PLATFORM_VER_MASK;
+
+ memcpy(&cyapa->product_id[0], &resp_data[13], 5);
+ cyapa->product_id[5] = '-';
+ memcpy(&cyapa->product_id[6], &resp_data[18], 6);
+ cyapa->product_id[12] = '-';
+ memcpy(&cyapa->product_id[13], &resp_data[24], 2);
+ cyapa->product_id[15] = '\0';
+
+ return 0;
+
+}
+
+static int cyapa_gen6_config_dev_irq(struct cyapa *cyapa, u8 cmd_code)
+{
+ u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, cmd_code };
+ u8 resp_data[6];
+ int resp_len;
+ int error;
+
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
+ resp_data, &resp_len,
+ 500, cyapa_sort_tsg_pip_app_resp_data, false);
+ if (error || !VALID_CMD_RESP_HEADER(resp_data, cmd_code) ||
+ !PIP_CMD_COMPLETE_SUCCESS(resp_data)
+ )
+ return error < 0 ? error : -EINVAL;
+
+ return 0;
+}
+
+static int cyapa_gen6_set_proximity(struct cyapa *cyapa, bool enable)
+{
+ int error;
+
+ cyapa_gen6_config_dev_irq(cyapa, GEN6_DISABLE_CMD_IRQ);
+ error = cyapa_pip_set_proximity(cyapa, enable);
+ cyapa_gen6_config_dev_irq(cyapa, GEN6_ENABLE_CMD_IRQ);
+
+ return error;
+}
+
+static int cyapa_gen6_change_power_state(struct cyapa *cyapa, u8 power_mode)
+{
+ u8 cmd[] = { 0x04, 0x00, 0x06, 0x00, 0x2f, 0x00, 0x46, power_mode };
+ u8 resp_data[6];
+ int resp_len;
+ int error;
+
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
+ resp_data, &resp_len,
+ 500, cyapa_sort_tsg_pip_app_resp_data, false);
+ if (error || !VALID_CMD_RESP_HEADER(resp_data, 0x46))
+ return error < 0 ? error : -EINVAL;
+
+ /* New power state applied in device not match the set power state. */
+ if (resp_data[5] != power_mode)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int cyapa_gen6_set_interval_setting(struct cyapa *cyapa,
+ struct gen6_interval_setting *interval_setting)
+{
+ struct gen6_set_interval_cmd {
+ __le16 addr;
+ __le16 length;
+ u8 report_id;
+ u8 rsvd; /* Reserved, must be 0 */
+ u8 cmd_code;
+ __le16 active_interval;
+ __le16 lp1_interval;
+ __le16 lp2_interval;
+ } __packed set_interval_cmd;
+ u8 resp_data[11];
+ int resp_len;
+ int error;
+
+ memset(&set_interval_cmd, 0, sizeof(set_interval_cmd));
+ put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &set_interval_cmd.addr);
+ put_unaligned_le16(sizeof(set_interval_cmd) - 2,
+ &set_interval_cmd.length);
+ set_interval_cmd.report_id = PIP_APP_CMD_REPORT_ID;
+ set_interval_cmd.cmd_code = GEN6_SET_POWER_MODE_INTERVAL;
+ put_unaligned_le16(interval_setting->active_interval,
+ &set_interval_cmd.active_interval);
+ put_unaligned_le16(interval_setting->lp1_interval,
+ &set_interval_cmd.lp1_interval);
+ put_unaligned_le16(interval_setting->lp2_interval,
+ &set_interval_cmd.lp2_interval);
+
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+ (u8 *)&set_interval_cmd, sizeof(set_interval_cmd),
+ resp_data, &resp_len,
+ 500, cyapa_sort_tsg_pip_app_resp_data, false);
+ if (error ||
+ !VALID_CMD_RESP_HEADER(resp_data, GEN6_SET_POWER_MODE_INTERVAL))
+ return error < 0 ? error : -EINVAL;
+
+ /* Get the real set intervals from response. */
+ interval_setting->active_interval = get_unaligned_le16(&resp_data[5]);
+ interval_setting->lp1_interval = get_unaligned_le16(&resp_data[7]);
+ interval_setting->lp2_interval = get_unaligned_le16(&resp_data[9]);
+
+ return 0;
+}
+
+static int cyapa_gen6_get_interval_setting(struct cyapa *cyapa,
+ struct gen6_interval_setting *interval_setting)
+{
+ u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00,
+ GEN6_GET_POWER_MODE_INTERVAL };
+ u8 resp_data[11];
+ int resp_len;
+ int error;
+
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
+ resp_data, &resp_len,
+ 500, cyapa_sort_tsg_pip_app_resp_data, false);
+ if (error ||
+ !VALID_CMD_RESP_HEADER(resp_data, GEN6_GET_POWER_MODE_INTERVAL))
+ return error < 0 ? error : -EINVAL;
+
+ interval_setting->active_interval = get_unaligned_le16(&resp_data[5]);
+ interval_setting->lp1_interval = get_unaligned_le16(&resp_data[7]);
+ interval_setting->lp2_interval = get_unaligned_le16(&resp_data[9]);
+
+ return 0;
+}
+
+static int cyapa_gen6_deep_sleep(struct cyapa *cyapa, u8 state)
+{
+ u8 ping[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x00 };
+
+ if (state == PIP_DEEP_SLEEP_STATE_ON)
+ /*
+ * Send ping command to notify device prepare for wake up
+ * when it's in deep sleep mode. At this time, device will
+ * response nothing except an I2C NAK.
+ */
+ cyapa_i2c_pip_write(cyapa, ping, sizeof(ping));
+
+ return cyapa_pip_deep_sleep(cyapa, state);
+}
+
+static int cyapa_gen6_set_power_mode(struct cyapa *cyapa,
+ u8 power_mode, u16 sleep_time, bool is_suspend)
+{
+ struct device *dev = &cyapa->client->dev;
+ struct gen6_interval_setting *interval_setting =
+ &cyapa->gen6_interval_setting;
+ u8 lp_mode;
+ int error;
+
+ if (cyapa->state != CYAPA_STATE_GEN6_APP)
+ return 0;
+
+ if (PIP_DEV_GET_PWR_STATE(cyapa) == UNINIT_PWR_MODE) {
+ /*
+ * Assume TP in deep sleep mode when driver is loaded,
+ * avoid driver unload and reload command IO issue caused by TP
+ * has been set into deep sleep mode when unloading.
+ */
+ PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
+ }
+
+ if (PIP_DEV_UNINIT_SLEEP_TIME(cyapa) &&
+ PIP_DEV_GET_PWR_STATE(cyapa) != PWR_MODE_OFF)
+ PIP_DEV_SET_SLEEP_TIME(cyapa, UNINIT_SLEEP_TIME);
+
+ if (PIP_DEV_GET_PWR_STATE(cyapa) == power_mode) {
+ if (power_mode == PWR_MODE_OFF ||
+ power_mode == PWR_MODE_FULL_ACTIVE ||
+ power_mode == PWR_MODE_BTN_ONLY ||
+ PIP_DEV_GET_SLEEP_TIME(cyapa) == sleep_time) {
+ /* Has in correct power mode state, early return. */
+ return 0;
+ }
+ }
+
+ if (power_mode == PWR_MODE_OFF) {
+ cyapa_gen6_config_dev_irq(cyapa, GEN6_DISABLE_CMD_IRQ);
+
+ error = cyapa_gen6_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_OFF);
+ if (error) {
+ dev_err(dev, "enter deep sleep fail: %d\n", error);
+ return error;
+ }
+
+ PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
+ return 0;
+ }
+
+ /*
+ * When trackpad in power off mode, it cannot change to other power
+ * state directly, must be wake up from sleep firstly, then
+ * continue to do next power sate change.
+ */
+ if (PIP_DEV_GET_PWR_STATE(cyapa) == PWR_MODE_OFF) {
+ error = cyapa_gen6_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_ON);
+ if (error) {
+ dev_err(dev, "deep sleep wake fail: %d\n", error);
+ return error;
+ }
+ }
+
+ /*
+ * Disable device assert interrupts for command response to avoid
+ * disturbing system suspending or hibernating process.
+ */
+ cyapa_gen6_config_dev_irq(cyapa, GEN6_DISABLE_CMD_IRQ);
+
+ if (power_mode == PWR_MODE_FULL_ACTIVE) {
+ error = cyapa_gen6_change_power_state(cyapa,
+ GEN6_POWER_MODE_ACTIVE);
+ if (error) {
+ dev_err(dev, "change to active fail: %d\n", error);
+ goto out;
+ }
+
+ PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_FULL_ACTIVE);
+
+ /* Sync the interval setting from device. */
+ cyapa_gen6_get_interval_setting(cyapa, interval_setting);
+
+ } else if (power_mode == PWR_MODE_BTN_ONLY) {
+ error = cyapa_gen6_change_power_state(cyapa,
+ GEN6_POWER_MODE_BTN_ONLY);
+ if (error) {
+ dev_err(dev, "fail to button only mode: %d\n", error);
+ goto out;
+ }
+
+ PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_BTN_ONLY);
+ } else {
+ /*
+ * Gen6 internally supports to 2 low power scan interval time,
+ * so can help to switch power mode quickly.
+ * such as runtime suspend and system suspend.
+ */
+ if (interval_setting->lp1_interval == sleep_time) {
+ lp_mode = GEN6_POWER_MODE_LP_MODE1;
+ } else if (interval_setting->lp2_interval == sleep_time) {
+ lp_mode = GEN6_POWER_MODE_LP_MODE2;
+ } else {
+ if (interval_setting->lp1_interval == 0) {
+ interval_setting->lp1_interval = sleep_time;
+ lp_mode = GEN6_POWER_MODE_LP_MODE1;
+ } else {
+ interval_setting->lp2_interval = sleep_time;
+ lp_mode = GEN6_POWER_MODE_LP_MODE2;
+ }
+ cyapa_gen6_set_interval_setting(cyapa,
+ interval_setting);
+ }
+
+ error = cyapa_gen6_change_power_state(cyapa, lp_mode);
+ if (error) {
+ dev_err(dev, "set power state to 0x%02x failed: %d\n",
+ lp_mode, error);
+ goto out;
+ }
+
+ PIP_DEV_SET_SLEEP_TIME(cyapa, sleep_time);
+ PIP_DEV_SET_PWR_STATE(cyapa,
+ cyapa_sleep_time_to_pwr_cmd(sleep_time));
+ }
+
+out:
+ cyapa_gen6_config_dev_irq(cyapa, GEN6_ENABLE_CMD_IRQ);
+ return error;
+}
+
+static int cyapa_gen6_initialize(struct cyapa *cyapa)
+{
+ return 0;
+}
+
+static int cyapa_pip_retrieve_data_structure(struct cyapa *cyapa,
+ u16 read_offset, u16 read_len, u8 data_id,
+ u8 *data, int *data_buf_lens)
+{
+ struct retrieve_data_struct_cmd {
+ struct pip_app_cmd_head head;
+ __le16 read_offset;
+ __le16 read_length;
+ u8 data_id;
+ } __packed cmd;
+ u8 resp_data[GEN6_MAX_RX_NUM + 10];
+ int resp_len;
+ int error;
+
+ memset(&cmd, 0, sizeof(cmd));
+ put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &cmd.head.addr);
+ put_unaligned_le16(sizeof(cmd), &cmd.head.length - 2);
+ cmd.head.report_id = PIP_APP_CMD_REPORT_ID;
+ cmd.head.cmd_code = PIP_RETRIEVE_DATA_STRUCTURE;
+ put_unaligned_le16(read_offset, &cmd.read_offset);
+ put_unaligned_le16(read_len, &cmd.read_length);
+ cmd.data_id = data_id;
+
+ resp_len = sizeof(resp_data);
+ error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+ (u8 *)&cmd, sizeof(cmd),
+ resp_data, &resp_len,
+ 500, cyapa_sort_tsg_pip_app_resp_data,
+ true);
+ if (error || !PIP_CMD_COMPLETE_SUCCESS(resp_data) ||
+ resp_data[6] != data_id ||
+ !VALID_CMD_RESP_HEADER(resp_data, PIP_RETRIEVE_DATA_STRUCTURE))
+ return (error < 0) ? error : -EAGAIN;
+
+ read_len = get_unaligned_le16(&resp_data[7]);
+ if (*data_buf_lens < read_len) {
+ *data_buf_lens = read_len;
+ return -ENOBUFS;
+ }
+
+ memcpy(data, &resp_data[10], read_len);
+ *data_buf_lens = read_len;
+ return 0;
+}
+
+static ssize_t cyapa_gen6_show_baseline(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cyapa *cyapa = dev_get_drvdata(dev);
+ u8 data[GEN6_MAX_RX_NUM];
+ int data_len;
+ int size = 0;
+ int i;
+ int error;
+ int resume_error;
+
+ if (!cyapa_is_pip_app_mode(cyapa))
+ return -EBUSY;
+
+ /* 1. Suspend Scanning*/
+ error = cyapa_pip_suspend_scanning(cyapa);
+ if (error)
+ return error;
+
+ /* 2. IDAC and RX Attenuator Calibration Data (Center Frequency). */
+ data_len = sizeof(data);
+ error = cyapa_pip_retrieve_data_structure(cyapa, 0, data_len,
+ GEN6_RETRIEVE_DATA_ID_RX_ATTENURATOR_IDAC,
+ data, &data_len);
+ if (error)
+ goto resume_scanning;
+
+ size = scnprintf(buf, PAGE_SIZE, "%d %d %d %d %d %d ",
+ data[0], /* RX Attenuator Mutual */
+ data[1], /* IDAC Mutual */
+ data[2], /* RX Attenuator Self RX */
+ data[3], /* IDAC Self RX */
+ data[4], /* RX Attenuator Self TX */
+ data[5] /* IDAC Self TX */
+ );
+
+ /* 3. Read Attenuator Trim. */
+ data_len = sizeof(data);
+ error = cyapa_pip_retrieve_data_structure(cyapa, 0, data_len,
+ GEN6_RETRIEVE_DATA_ID_ATTENURATOR_TRIM,
+ data, &data_len);
+ if (error)
+ goto resume_scanning;
+
+ /* set attenuator trim values. */
+ for (i = 0; i < data_len; i++)
+ size += scnprintf(buf + size, PAGE_SIZE - size, "%d ", data[i]);
+ size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+
+resume_scanning:
+ /* 4. Resume Scanning*/
+ resume_error = cyapa_pip_resume_scanning(cyapa);
+ if (resume_error || error) {
+ memset(buf, 0, PAGE_SIZE);
+ return resume_error ? resume_error : error;
+ }
+
+ return size;
+}
+
+static int cyapa_gen6_operational_check(struct cyapa *cyapa)
+{
+ struct device *dev = &cyapa->client->dev;
+ int error;
+
+ if (cyapa->gen != CYAPA_GEN6)
+ return -ENODEV;
+
+ switch (cyapa->state) {
+ case CYAPA_STATE_GEN6_BL:
+ error = cyapa_pip_bl_exit(cyapa);
+ if (error) {
+ /* Try to update trackpad product information. */
+ cyapa_gen6_bl_read_app_info(cyapa);
+ goto out;
+ }
+
+ cyapa->state = CYAPA_STATE_GEN6_APP;
+
+ case CYAPA_STATE_GEN6_APP:
+ /*
+ * If trackpad device in deep sleep mode,
+ * the app command will fail.
+ * So always try to reset trackpad device to full active when
+ * the device state is required.
+ */
+ error = cyapa_gen6_set_power_mode(cyapa,
+ PWR_MODE_FULL_ACTIVE, 0, false);
+ if (error)
+ dev_warn(dev, "%s: failed to set power active mode.\n",
+ __func__);
+
+ /* By default, the trackpad proximity function is enabled. */
+ error = cyapa_pip_set_proximity(cyapa, true);
+ if (error)
+ dev_warn(dev, "%s: failed to enable proximity.\n",
+ __func__);
+
+ /* Get trackpad product information. */
+ error = cyapa_gen6_read_sys_info(cyapa);
+ if (error)
+ goto out;
+ /* Only support product ID starting with CYTRA */
+ if (memcmp(cyapa->product_id, product_id,
+ strlen(product_id)) != 0) {
+ dev_err(dev, "%s: unknown product ID (%s)\n",
+ __func__, cyapa->product_id);
+ error = -EINVAL;
+ }
+ break;
+ default:
+ error = -EINVAL;
+ }
+
+out:
+ return error;
+}
+
+const struct cyapa_dev_ops cyapa_gen6_ops = {
+ .check_fw = cyapa_pip_check_fw,
+ .bl_enter = cyapa_pip_bl_enter,
+ .bl_initiate = cyapa_pip_bl_initiate,
+ .update_fw = cyapa_pip_do_fw_update,
+ .bl_activate = cyapa_pip_bl_activate,
+ .bl_deactivate = cyapa_pip_bl_deactivate,
+
+ .show_baseline = cyapa_gen6_show_baseline,
+ .calibrate_store = cyapa_pip_do_calibrate,
+
+ .initialize = cyapa_gen6_initialize,
+
+ .state_parse = cyapa_pip_state_parse,
+ .operational_check = cyapa_gen6_operational_check,
+
+ .irq_handler = cyapa_pip_irq_handler,
+ .irq_cmd_handler = cyapa_pip_irq_cmd_handler,
+ .sort_empty_output_data = cyapa_empty_pip_output_data,
+ .set_power_mode = cyapa_gen6_set_power_mode,
+
+ .set_proximity = cyapa_gen6_set_proximity,
+};
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 62641f2adaf7..fa945304b9a5 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -4,7 +4,7 @@
* Copyright (c) 2013 ELAN Microelectronics Corp.
*
* Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
- * Version: 1.5.9
+ * Version: 1.6.0
*
* Based on cyapa driver:
* copyright (c) 2011-2012 Cypress Semiconductor, Inc.
@@ -40,7 +40,7 @@
#include "elan_i2c.h"
#define DRIVER_NAME "elan_i2c"
-#define ELAN_DRIVER_VERSION "1.5.9"
+#define ELAN_DRIVER_VERSION "1.6.0"
#define ETP_MAX_PRESSURE 255
#define ETP_FWIDTH_REDUCE 90
#define ETP_FINGER_WIDTH 15
@@ -84,7 +84,7 @@ struct elan_tp_data {
int pressure_adjustment;
u8 mode;
u8 ic_type;
- u16 fw_vaildpage_count;
+ u16 fw_validpage_count;
u16 fw_signature_address;
bool irq_wake;
@@ -94,25 +94,28 @@ struct elan_tp_data {
bool baseline_ready;
};
-static int elan_get_fwinfo(u8 ic_type, u16 *vaildpage_count,
+static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count,
u16 *signature_address)
{
- switch(ic_type) {
+ switch (iap_version) {
+ case 0x08:
+ *validpage_count = 512;
+ break;
case 0x09:
- *vaildpage_count = 768;
+ *validpage_count = 768;
break;
case 0x0D:
- *vaildpage_count = 896;
+ *validpage_count = 896;
break;
default:
/* unknown ic type clear value */
- *vaildpage_count = 0;
+ *validpage_count = 0;
*signature_address = 0;
return -ENXIO;
}
*signature_address =
- (*vaildpage_count * ETP_FW_PAGE_SIZE) - ETP_FW_SIGNATURE_SIZE;
+ (*validpage_count * ETP_FW_PAGE_SIZE) - ETP_FW_SIGNATURE_SIZE;
return 0;
}
@@ -261,11 +264,11 @@ static int elan_query_device_info(struct elan_tp_data *data)
if (error)
return error;
- error = elan_get_fwinfo(data->ic_type, &data->fw_vaildpage_count,
+ error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count,
&data->fw_signature_address);
if (error) {
dev_err(&data->client->dev,
- "unknown ic type %d\n", data->ic_type);
+ "unknown iap version %d\n", data->iap_version);
return error;
}
@@ -353,7 +356,7 @@ static int __elan_update_firmware(struct elan_tp_data *data,
iap_start_addr = get_unaligned_le16(&fw->data[ETP_IAP_START_ADDR * 2]);
boot_page_count = (iap_start_addr * 2) / ETP_FW_PAGE_SIZE;
- for (i = boot_page_count; i < data->fw_vaildpage_count; i++) {
+ for (i = boot_page_count; i < data->fw_validpage_count; i++) {
u16 checksum = 0;
const u8 *page = &fw->data[i * ETP_FW_PAGE_SIZE];
@@ -771,7 +774,7 @@ static const struct attribute_group *elan_sysfs_groups[] = {
*/
static void elan_report_contact(struct elan_tp_data *data,
int contact_num, bool contact_valid,
- bool hover_event, u8 *finger_data)
+ u8 *finger_data)
{
struct input_dev *input = data->input;
unsigned int pos_x, pos_y;
@@ -815,9 +818,7 @@ static void elan_report_contact(struct elan_tp_data *data,
input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
input_report_abs(input, ABS_MT_POSITION_X, pos_x);
input_report_abs(input, ABS_MT_POSITION_Y, data->max_y - pos_y);
- input_report_abs(input, ABS_MT_DISTANCE, hover_event);
- input_report_abs(input, ABS_MT_PRESSURE,
- hover_event ? 0 : scaled_pressure);
+ input_report_abs(input, ABS_MT_PRESSURE, scaled_pressure);
input_report_abs(input, ABS_TOOL_WIDTH, mk_x);
input_report_abs(input, ABS_MT_TOUCH_MAJOR, major);
input_report_abs(input, ABS_MT_TOUCH_MINOR, minor);
@@ -839,14 +840,14 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet)
hover_event = hover_info & 0x40;
for (i = 0; i < ETP_MAX_FINGERS; i++) {
contact_valid = tp_info & (1U << (3 + i));
- elan_report_contact(data, i, contact_valid, hover_event,
- finger_data);
+ elan_report_contact(data, i, contact_valid, finger_data);
if (contact_valid)
finger_data += ETP_FINGER_DATA_LEN;
}
input_report_key(input, BTN_LEFT, tp_info & 0x01);
+ input_report_abs(input, ABS_DISTANCE, hover_event != 0);
input_mt_report_pointer_emulation(input, true);
input_sync(input);
}
@@ -922,6 +923,7 @@ static int elan_setup_input_device(struct elan_tp_data *data)
input_abs_set_res(input, ABS_Y, data->y_res);
input_set_abs_params(input, ABS_PRESSURE, 0, ETP_MAX_PRESSURE, 0, 0);
input_set_abs_params(input, ABS_TOOL_WIDTH, 0, ETP_FINGER_WIDTH, 0, 0);
+ input_set_abs_params(input, ABS_DISTANCE, 0, 1, 0, 0);
/* And MT parameters */
input_set_abs_params(input, ABS_MT_POSITION_X, 0, data->max_x, 0, 0);
@@ -934,7 +936,6 @@ static int elan_setup_input_device(struct elan_tp_data *data)
ETP_FINGER_WIDTH * max_width, 0, 0);
input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0,
ETP_FINGER_WIDTH * min_width, 0, 0);
- input_set_abs_params(input, ABS_MT_DISTANCE, 0, 1, 0, 0);
data->input = input;
@@ -1167,6 +1168,9 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
#ifdef CONFIG_ACPI
static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0000", 0 },
+ { "ELAN0100", 0 },
+ { "ELAN0600", 0 },
+ { "ELAN1000", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, elan_acpi_id);
@@ -1183,10 +1187,10 @@ MODULE_DEVICE_TABLE(of, elan_of_match);
static struct i2c_driver elan_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &elan_pm_ops,
.acpi_match_table = ACPI_PTR(elan_acpi_id),
.of_match_table = of_match_ptr(elan_of_match),
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = elan_probe,
.id_table = elan_id,
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index ce3d40004458..2955f1d0ca6c 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -783,19 +783,26 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
struct elantech_data *etd = psmouse->private;
unsigned char *packet = psmouse->packet;
unsigned char packet_type = packet[3] & 0x03;
+ unsigned int ic_version;
bool sanity_check;
if (etd->tp_dev && (packet[3] & 0x0f) == 0x06)
return PACKET_TRACKPOINT;
+ /* This represents the version of IC body. */
+ ic_version = (etd->fw_version & 0x0f0000) >> 16;
+
/*
* Sanity check based on the constant bits of a packet.
* The constant bits change depending on the value of
- * the hardware flag 'crc_enabled' but are the same for
- * every packet, regardless of the type.
+ * the hardware flag 'crc_enabled' and the version of
+ * the IC body, but are the same for every packet,
+ * regardless of the type.
*/
if (etd->crc_enabled)
sanity_check = ((packet[3] & 0x08) == 0x00);
+ else if (ic_version == 7 && etd->samples[1] == 0x2A)
+ sanity_check = ((packet[3] & 0x1c) == 0x10);
else
sanity_check = ((packet[0] & 0x0c) == 0x04 &&
(packet[3] & 0x1c) == 0x10);
@@ -1116,6 +1123,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* Avatar AVIU-145A2 0x361f00 ? clickpad
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
+ * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
* Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
* Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
* Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
@@ -1167,7 +1175,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
struct input_dev *dev = psmouse->dev;
struct elantech_data *etd = psmouse->private;
unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0;
- unsigned int x_res = 0, y_res = 0;
+ unsigned int x_res = 31, y_res = 31;
if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width))
return -1;
@@ -1232,8 +1240,6 @@ static int elantech_set_input_params(struct psmouse *psmouse)
/* For X to recognize me as touchpad. */
input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
- input_abs_set_res(dev, ABS_X, x_res);
- input_abs_set_res(dev, ABS_Y, y_res);
/*
* range of pressure and width is the same as v2,
* report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility.
@@ -1246,8 +1252,6 @@ static int elantech_set_input_params(struct psmouse *psmouse)
input_mt_init_slots(dev, ETP_MAX_FINGERS, 0);
input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
- input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
- input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2,
ETP_PMAX_V2, 0, 0);
/*
@@ -1259,6 +1263,13 @@ static int elantech_set_input_params(struct psmouse *psmouse)
break;
}
+ input_abs_set_res(dev, ABS_X, x_res);
+ input_abs_set_res(dev, ABS_Y, y_res);
+ if (etd->hw_version > 1) {
+ input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
+ input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
+ }
+
etd->y_max = y_max;
etd->width = width;
@@ -1648,6 +1659,16 @@ int elantech_init(struct psmouse *psmouse)
etd->capabilities[0], etd->capabilities[1],
etd->capabilities[2]);
+ if (etd->hw_version != 1) {
+ if (etd->send_cmd(psmouse, ETP_SAMPLE_QUERY, etd->samples)) {
+ psmouse_err(psmouse, "failed to query sample data\n");
+ goto init_fail;
+ }
+ psmouse_info(psmouse,
+ "Elan sample query result %02x, %02x, %02x\n",
+ etd->samples[0], etd->samples[1], etd->samples[2]);
+ }
+
if (elantech_set_absolute_mode(psmouse)) {
psmouse_err(psmouse,
"failed to put touchpad into absolute mode.\n");
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index f965d1569cc3..e1cbf409d9c8 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -129,6 +129,7 @@ struct elantech_data {
unsigned char reg_26;
unsigned char debug;
unsigned char capabilities[3];
+ unsigned char samples[3];
bool paritycheck;
bool jumpy_cursor;
bool reports_pressure;
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index ec3477036150..ad18dab0ac47 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1540,6 +1540,10 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
if (error)
goto err_clear_drvdata;
+ /* give PT device some time to settle down before probing */
+ if (serio->id.type == SERIO_PS_PSTHRU)
+ usleep_range(10000, 15000);
+
if (psmouse_probe(psmouse) < 0) {
error = -ENODEV;
goto err_close_serio;
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index cc7e0d4a8f93..11c32ac8234b 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -432,7 +432,7 @@ static int fsp_onpad_hscr(struct psmouse *psmouse, bool enable)
static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
- int reg, val;
+ unsigned int reg, val;
char *rest;
ssize_t retval;
@@ -440,7 +440,7 @@ static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
if (rest == buf || *rest != ' ' || reg > 0xff)
return -EINVAL;
- retval = kstrtoint(rest + 1, 16, &val);
+ retval = kstrtouint(rest + 1, 16, &val);
if (retval)
return retval;
@@ -476,9 +476,10 @@ static ssize_t fsp_attr_set_getreg(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
struct fsp_data *pad = psmouse->private;
- int reg, val, err;
+ unsigned int reg, val;
+ int err;
- err = kstrtoint(buf, 16, &reg);
+ err = kstrtouint(buf, 16, &reg);
if (err)
return err;
@@ -511,9 +512,10 @@ static ssize_t fsp_attr_show_pagereg(struct psmouse *psmouse,
static ssize_t fsp_attr_set_pagereg(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
- int val, err;
+ unsigned int val;
+ int err;
- err = kstrtoint(buf, 16, &val);
+ err = kstrtouint(buf, 16, &val);
if (err)
return err;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 35c8d0ceabee..994ae7886156 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -519,14 +519,18 @@ static int synaptics_set_mode(struct psmouse *psmouse)
struct synaptics_data *priv = psmouse->private;
priv->mode = 0;
- if (priv->absolute_mode)
+
+ if (priv->absolute_mode) {
priv->mode |= SYN_BIT_ABSOLUTE_MODE;
- if (priv->disable_gesture)
+ if (SYN_CAP_EXTENDED(priv->capabilities))
+ priv->mode |= SYN_BIT_W_MODE;
+ }
+
+ if (!SYN_MODE_WMODE(priv->mode) && priv->disable_gesture)
priv->mode |= SYN_BIT_DISABLE_GESTURE;
+
if (psmouse->rate >= 80)
priv->mode |= SYN_BIT_HIGH_RATE;
- if (SYN_CAP_EXTENDED(priv->capabilities))
- priv->mode |= SYN_BIT_W_MODE;
if (synaptics_mode_cmd(psmouse, priv->mode))
return -1;
@@ -1199,7 +1203,7 @@ static void set_input_params(struct psmouse *psmouse,
ABS_MT_POSITION_Y);
/* Image sensors can report per-contact pressure */
input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0);
- input_mt_init_slots(dev, 3, INPUT_MT_POINTER | INPUT_MT_TRACK);
+ input_mt_init_slots(dev, 2, INPUT_MT_POINTER | INPUT_MT_TRACK);
/* Image sensors can signal 4 and 5 finger clicks */
__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
@@ -1484,12 +1488,12 @@ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS;
psmouse_info(psmouse,
- "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n",
+ "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n",
SYN_ID_MODEL(priv->identity),
SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity),
priv->model_id,
priv->capabilities, priv->ext_cap, priv->ext_cap_0c,
- priv->board_id, priv->firmware_id);
+ priv->ext_cap_10, priv->board_id, priv->firmware_id);
set_input_params(psmouse, priv);
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index ffceedcaf3c8..aa7c5da60800 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -655,7 +655,6 @@ MODULE_DEVICE_TABLE(i2c, synaptics_i2c_id_table);
static struct i2c_driver synaptics_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &synaptics_i2c_pm,
},
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 8b748d99b934..c6606cacb6a7 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -175,9 +175,9 @@ static int amba_kmi_remove(struct amba_device *dev)
return 0;
}
-static int amba_kmi_resume(struct amba_device *dev)
+static int __maybe_unused amba_kmi_resume(struct device *dev)
{
- struct amba_kmi_port *kmi = amba_get_drvdata(dev);
+ struct amba_kmi_port *kmi = dev_get_drvdata(dev);
/* kick the serio layer to rescan this port */
serio_reconnect(kmi->io);
@@ -185,6 +185,8 @@ static int amba_kmi_resume(struct amba_device *dev)
return 0;
}
+static SIMPLE_DEV_PM_OPS(amba_kmi_dev_pm_ops, NULL, amba_kmi_resume);
+
static struct amba_id amba_kmi_idtable[] = {
{
.id = 0x00041050,
@@ -199,11 +201,11 @@ static struct amba_driver ambakmi_driver = {
.drv = {
.name = "kmi-pl050",
.owner = THIS_MODULE,
+ .pm = &amba_kmi_dev_pm_ops,
},
.id_table = amba_kmi_idtable,
.probe = amba_kmi_probe,
.remove = amba_kmi_remove,
- .resume = amba_kmi_resume,
};
module_amba_driver(ambakmi_driver);
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index cb5ece77fd7d..db91de539ee3 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -88,6 +88,10 @@ MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings");
static bool i8042_debug;
module_param_named(debug, i8042_debug, bool, 0600);
MODULE_PARM_DESC(debug, "Turn i8042 debugging mode on and off");
+
+static bool i8042_unmask_kbd_data;
+module_param_named(unmask_kbd_data, i8042_unmask_kbd_data, bool, 0600);
+MODULE_PARM_DESC(unmask_kbd_data, "Unconditional enable (may reveal sensitive data) of normally sanitize-filtered kbd data traffic debug log [pre-condition: i8042.debug=1 enabled]");
#endif
static bool i8042_bypass_aux_irq_test;
@@ -116,6 +120,7 @@ struct i8042_port {
struct serio *serio;
int irq;
bool exists;
+ bool driver_bound;
signed char mux;
};
@@ -133,6 +138,7 @@ static bool i8042_kbd_irq_registered;
static bool i8042_aux_irq_registered;
static unsigned char i8042_suppress_kbd_ack;
static struct platform_device *i8042_platform_device;
+static struct notifier_block i8042_kbd_bind_notifier_block;
static irqreturn_t i8042_interrupt(int irq, void *dev_id);
static bool (*i8042_platform_filter)(unsigned char data, unsigned char str,
@@ -528,10 +534,10 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
port = &i8042_ports[port_no];
serio = port->exists ? port->serio : NULL;
- dbg("%02x <- i8042 (interrupt, %d, %d%s%s)\n",
- data, port_no, irq,
- dfl & SERIO_PARITY ? ", bad parity" : "",
- dfl & SERIO_TIMEOUT ? ", timeout" : "");
+ filter_dbg(port->driver_bound, data, "<- i8042 (interrupt, %d, %d%s%s)\n",
+ port_no, irq,
+ dfl & SERIO_PARITY ? ", bad parity" : "",
+ dfl & SERIO_TIMEOUT ? ", timeout" : "");
filtered = i8042_filter(data, str, serio);
@@ -871,7 +877,7 @@ static int __init i8042_check_aux(void)
static int i8042_controller_check(void)
{
if (i8042_flush()) {
- pr_err("No controller found\n");
+ pr_info("No controller found\n");
return -ENODEV;
}
@@ -1438,6 +1444,29 @@ static int __init i8042_setup_kbd(void)
return error;
}
+static int i8042_kbd_bind_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct serio *serio = to_serio_port(dev);
+ struct i8042_port *port = serio->port_data;
+
+ if (serio != i8042_ports[I8042_KBD_PORT_NO].serio)
+ return 0;
+
+ switch (action) {
+ case BUS_NOTIFY_BOUND_DRIVER:
+ port->driver_bound = true;
+ break;
+
+ case BUS_NOTIFY_UNBIND_DRIVER:
+ port->driver_bound = false;
+ break;
+ }
+
+ return 0;
+}
+
static int __init i8042_probe(struct platform_device *dev)
{
int error;
@@ -1507,6 +1536,10 @@ static struct platform_driver i8042_driver = {
.shutdown = i8042_shutdown,
};
+static struct notifier_block i8042_kbd_bind_notifier_block = {
+ .notifier_call = i8042_kbd_bind_notifier,
+};
+
static int __init i8042_init(void)
{
struct platform_device *pdev;
@@ -1528,6 +1561,7 @@ static int __init i8042_init(void)
goto err_platform_exit;
}
+ bus_register_notifier(&serio_bus, &i8042_kbd_bind_notifier_block);
panic_blink = i8042_panic_blink;
return 0;
@@ -1543,6 +1577,7 @@ static void __exit i8042_exit(void)
platform_driver_unregister(&i8042_driver);
i8042_platform_exit();
+ bus_unregister_notifier(&serio_bus, &i8042_kbd_bind_notifier_block);
panic_blink = NULL;
}
diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h
index fc080beffedc..1db0a40c9bab 100644
--- a/drivers/input/serio/i8042.h
+++ b/drivers/input/serio/i8042.h
@@ -73,6 +73,17 @@ static unsigned long i8042_start_time;
printk(KERN_DEBUG KBUILD_MODNAME ": [%d] " format, \
(int) (jiffies - i8042_start_time), ##arg); \
} while (0)
+
+#define filter_dbg(filter, data, format, args...) \
+ do { \
+ if (!i8042_debug) \
+ break; \
+ \
+ if (!filter || i8042_unmask_kbd_data) \
+ dbg("%02x " format, data, ##args); \
+ else \
+ dbg("** " format, ##args); \
+ } while (0)
#else
#define dbg_init() do { } while (0)
#define dbg(format, arg...) \
@@ -80,6 +91,8 @@ static unsigned long i8042_start_time;
if (0) \
printk(KERN_DEBUG pr_fmt(format), ##arg); \
} while (0)
+
+#define filter_dbg(filter, data, format, args...) do { } while (0)
#endif
#endif /* _I8042_H */
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index a05a5179da32..8f828975ab10 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -49,8 +49,6 @@ static DEFINE_MUTEX(serio_mutex);
static LIST_HEAD(serio_list);
-static struct bus_type serio_bus;
-
static void serio_add_port(struct serio *serio);
static int serio_reconnect_port(struct serio *serio);
static void serio_disconnect_port(struct serio *serio);
@@ -1017,7 +1015,7 @@ irqreturn_t serio_interrupt(struct serio *serio,
}
EXPORT_SYMBOL(serio_interrupt);
-static struct bus_type serio_bus = {
+struct bus_type serio_bus = {
.name = "serio",
.drv_groups = serio_driver_groups,
.match = serio_bus_match,
@@ -1029,6 +1027,7 @@ static struct bus_type serio_bus = {
.pm = &serio_pm_ops,
#endif
};
+EXPORT_SYMBOL(serio_bus);
static int __init serio_init(void)
{
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index a854c6e5f09e..600dcceff542 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -11,9 +11,9 @@ menuconfig INPUT_TOUCHSCREEN
if INPUT_TOUCHSCREEN
-config OF_TOUCHSCREEN
+config TOUCHSCREEN_PROPERTIES
def_tristate INPUT
- depends on INPUT && OF
+ depends on INPUT
config TOUCHSCREEN_88PM860X
tristate "Marvell 88PM860x touchscreen"
@@ -118,7 +118,7 @@ config TOUCHSCREEN_ATMEL_MXT
config TOUCHSCREEN_AUO_PIXCIR
tristate "AUO in-cell touchscreen using Pixcir ICs"
depends on I2C
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
help
Say Y here if you have a AUO display with in-cell touchscreen
using Pixcir ICs.
@@ -142,7 +142,7 @@ config TOUCHSCREEN_BU21013
config TOUCHSCREEN_CHIPONE_ICN8318
tristate "chipone icn8318 touchscreen controller"
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
depends on I2C
depends on OF
help
@@ -156,7 +156,7 @@ config TOUCHSCREEN_CHIPONE_ICN8318
config TOUCHSCREEN_CY8CTMG110
tristate "cy8ctmg110 touchscreen"
depends on I2C
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
help
Say Y here if you have a cy8ctmg110 capacitive touchscreen on
an AAVA device.
@@ -479,6 +479,18 @@ config TOUCHSCREEN_MTOUCH
To compile this driver as a module, choose M here: the
module will be called mtouch.
+config TOUCHSCREEN_IMX6UL_TSC
+ tristate "Freescale i.MX6UL touchscreen controller"
+ depends on (OF && GPIOLIB) || COMPILE_TEST
+ help
+ Say Y here if you have a Freescale i.MX6UL, and want to
+ use the internal touchscreen controller.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx6ul_tsc.
+
config TOUCHSCREEN_INEXIO
tristate "iNexio serial touchscreens"
select SERIO
@@ -915,10 +927,11 @@ config TOUCHSCREEN_TSC_SERIO
module will be called tsc40.
config TOUCHSCREEN_TSC2005
- tristate "TSC2005 based touchscreens"
- depends on SPI_MASTER
- help
- Say Y here if you have a TSC2005 based touchscreen.
+ tristate "TSC2005 based touchscreens"
+ depends on SPI_MASTER
+ select REGMAP_SPI
+ help
+ Say Y here if you have a TSC2005 based touchscreen.
If unsure, say N.
@@ -1029,7 +1042,7 @@ config TOUCHSCREEN_TPS6507X
config TOUCHSCREEN_ZFORCE
tristate "Neonode zForce infrared touchscreens"
depends on I2C
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
help
Say Y here if you have a touchscreen using the zforce
infraread technology from Neonode.
@@ -1039,4 +1052,16 @@ config TOUCHSCREEN_ZFORCE
To compile this driver as a module, choose M here: the
module will be called zforce_ts.
+config TOUCHSCREEN_COLIBRI_VF50
+ tristate "Toradex Colibri on board touchscreen driver"
+ depends on GPIOLIB && IIO && VF610_ADC
+ help
+ Say Y here if you have a Colibri VF50 and plan to use
+ the on-board provided 4-wire touchscreen driver.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called colibri_vf50_ts.
+
endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index fa3d33bac7fc..1b79cc09744a 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -6,7 +6,7 @@
wm97xx-ts-y := wm97xx-core.o
-obj-$(CONFIG_OF_TOUCHSCREEN) += of_touchscreen.o
+obj-$(CONFIG_TOUCHSCREEN_PROPERTIES) += of_touchscreen.o
obj-$(CONFIG_TOUCHSCREEN_88PM860X) += 88pm860x-ts.o
obj-$(CONFIG_TOUCHSCREEN_AD7877) += ad7877.o
obj-$(CONFIG_TOUCHSCREEN_AD7879) += ad7879.o
@@ -38,6 +38,7 @@ obj-$(CONFIG_TOUCHSCREEN_EGALAX) += egalax_ts.o
obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
obj-$(CONFIG_TOUCHSCREEN_GOODIX) += goodix.o
obj-$(CONFIG_TOUCHSCREEN_ILI210X) += ili210x.o
+obj-$(CONFIG_TOUCHSCREEN_IMX6UL_TSC) += imx6ul_tsc.o
obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) += intel-mid-touch.o
obj-$(CONFIG_TOUCHSCREEN_IPROC) += bcm_iproc_tsc.o
@@ -85,3 +86,4 @@ obj-$(CONFIG_TOUCHSCREEN_W90X900) += w90p910_ts.o
obj-$(CONFIG_TOUCHSCREEN_SX8654) += sx8654.o
obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o
obj-$(CONFIG_TOUCHSCREEN_ZFORCE) += zforce_ts.o
+obj-$(CONFIG_TOUCHSCREEN_COLIBRI_VF50) += colibri-vf50-ts.o
diff --git a/drivers/input/touchscreen/ad7879-i2c.c b/drivers/input/touchscreen/ad7879-i2c.c
index dcf390771549..d66962c5b1c2 100644
--- a/drivers/input/touchscreen/ad7879-i2c.c
+++ b/drivers/input/touchscreen/ad7879-i2c.c
@@ -94,7 +94,6 @@ MODULE_DEVICE_TABLE(i2c, ad7879_id);
static struct i2c_driver ad7879_i2c_driver = {
.driver = {
.name = "ad7879",
- .owner = THIS_MODULE,
.pm = &ad7879_pm_ops,
},
.probe = ad7879_i2c_probe,
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index e4eb8a6c658f..0f5f968592bd 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1234,7 +1234,8 @@ static const struct ads7846_platform_data *ads7846_probe_dt(struct device *dev)
of_property_read_u32(node, "ti,pendown-gpio-debounce",
&pdata->gpio_pendown_debounce);
- pdata->wakeup = of_property_read_bool(node, "linux,wakeup");
+ pdata->wakeup = of_property_read_bool(node, "wakeup-source") ||
+ of_property_read_bool(node, "linux,wakeup");
pdata->gpio_pendown = of_get_named_gpio(dev->of_node, "pendown-gpio", 0);
diff --git a/drivers/input/touchscreen/ar1021_i2c.c b/drivers/input/touchscreen/ar1021_i2c.c
index f0b954d46a25..71b5a634cf6d 100644
--- a/drivers/input/touchscreen/ar1021_i2c.c
+++ b/drivers/input/touchscreen/ar1021_i2c.c
@@ -166,7 +166,6 @@ MODULE_DEVICE_TABLE(of, ar1021_i2c_of_match);
static struct i2c_driver ar1021_i2c_driver = {
.driver = {
.name = "ar1021_i2c",
- .owner = THIS_MODULE,
.pm = &ar1021_i2c_pm,
.of_match_table = ar1021_i2c_of_match,
},
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index dfc7309e3d38..c5622058c22b 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -22,34 +22,20 @@
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
-#include <linux/i2c/atmel_mxt_ts.h>
+#include <linux/platform_data/atmel_mxt_ts.h>
#include <linux/input/mt.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
-/* Version */
-#define MXT_VER_20 20
-#define MXT_VER_21 21
-#define MXT_VER_22 22
-
/* Firmware files */
#define MXT_FW_NAME "maxtouch.fw"
#define MXT_CFG_NAME "maxtouch.cfg"
#define MXT_CFG_MAGIC "OBP_RAW V1"
/* Registers */
-#define MXT_INFO 0x00
-#define MXT_FAMILY_ID 0x00
-#define MXT_VARIANT_ID 0x01
-#define MXT_VERSION 0x02
-#define MXT_BUILD 0x03
-#define MXT_MATRIX_X_SIZE 0x04
-#define MXT_MATRIX_Y_SIZE 0x05
-#define MXT_OBJECT_NUM 0x06
#define MXT_OBJECT_START 0x07
-
#define MXT_OBJECT_SIZE 6
#define MXT_INFO_CHECKSUM_SIZE 3
#define MXT_MAX_BLOCK_WRITE 256
@@ -103,21 +89,16 @@
#define MXT_T6_STATUS_COMSERR (1 << 2)
/* MXT_GEN_POWER_T7 field */
-#define MXT_POWER_IDLEACQINT 0
-#define MXT_POWER_ACTVACQINT 1
-#define MXT_POWER_ACTV2IDLETO 2
-
-/* MXT_GEN_ACQUIRE_T8 field */
-#define MXT_ACQUIRE_CHRGTIME 0
-#define MXT_ACQUIRE_TCHDRIFT 2
-#define MXT_ACQUIRE_DRIFTST 3
-#define MXT_ACQUIRE_TCHAUTOCAL 4
-#define MXT_ACQUIRE_SYNC 5
-#define MXT_ACQUIRE_ATCHCALST 6
-#define MXT_ACQUIRE_ATCHCALSTHR 7
+struct t7_config {
+ u8 idle;
+ u8 active;
+} __packed;
+
+#define MXT_POWER_CFG_RUN 0
+#define MXT_POWER_CFG_DEEPSLEEP 1
/* MXT_TOUCH_MULTI_T9 field */
-#define MXT_TOUCH_CTRL 0
+#define MXT_T9_CTRL 0
#define MXT_T9_ORIENT 9
#define MXT_T9_RANGE 18
@@ -139,51 +120,10 @@ struct t9_range {
/* MXT_TOUCH_MULTI_T9 orient */
#define MXT_T9_ORIENT_SWITCH (1 << 0)
-/* MXT_PROCI_GRIPFACE_T20 field */
-#define MXT_GRIPFACE_CTRL 0
-#define MXT_GRIPFACE_XLOGRIP 1
-#define MXT_GRIPFACE_XHIGRIP 2
-#define MXT_GRIPFACE_YLOGRIP 3
-#define MXT_GRIPFACE_YHIGRIP 4
-#define MXT_GRIPFACE_MAXTCHS 5
-#define MXT_GRIPFACE_SZTHR1 7
-#define MXT_GRIPFACE_SZTHR2 8
-#define MXT_GRIPFACE_SHPTHR1 9
-#define MXT_GRIPFACE_SHPTHR2 10
-#define MXT_GRIPFACE_SUPEXTTO 11
-
-/* MXT_PROCI_NOISE field */
-#define MXT_NOISE_CTRL 0
-#define MXT_NOISE_OUTFLEN 1
-#define MXT_NOISE_GCAFUL_LSB 3
-#define MXT_NOISE_GCAFUL_MSB 4
-#define MXT_NOISE_GCAFLL_LSB 5
-#define MXT_NOISE_GCAFLL_MSB 6
-#define MXT_NOISE_ACTVGCAFVALID 7
-#define MXT_NOISE_NOISETHR 8
-#define MXT_NOISE_FREQHOPSCALE 10
-#define MXT_NOISE_FREQ0 11
-#define MXT_NOISE_FREQ1 12
-#define MXT_NOISE_FREQ2 13
-#define MXT_NOISE_FREQ3 14
-#define MXT_NOISE_FREQ4 15
-#define MXT_NOISE_IDLEGCAFVALID 16
-
/* MXT_SPT_COMMSCONFIG_T18 */
#define MXT_COMMS_CTRL 0
#define MXT_COMMS_CMD 1
-/* MXT_SPT_CTECONFIG_T28 field */
-#define MXT_CTE_CTRL 0
-#define MXT_CTE_CMD 1
-#define MXT_CTE_MODE 2
-#define MXT_CTE_IDLEGCAFDEPTH 3
-#define MXT_CTE_ACTVGCAFDEPTH 4
-#define MXT_CTE_VOLTAGE 5
-
-#define MXT_VOLTAGE_DEFAULT 2700000
-#define MXT_VOLTAGE_STEP 10000
-
/* Define for MXT_GEN_COMMAND_T6 */
#define MXT_BOOT_VALUE 0xa5
#define MXT_RESET_VALUE 0x01
@@ -291,6 +231,7 @@ struct mxt_data {
u8 last_message_count;
u8 num_touchids;
u8 multitouch;
+ struct t7_config t7_cfg;
/* Cached parameters from object table */
u16 T5_address;
@@ -997,16 +938,15 @@ static irqreturn_t mxt_process_messages_t44(struct mxt_data *data)
count = data->msg_buf[0];
- if (count == 0) {
- /*
- * This condition is caused by the CHG line being configured
- * in Mode 0. It results in unnecessary I2C operations but it
- * is benign.
- */
- dev_dbg(dev, "Interrupt triggered but zero messages\n");
+ /*
+ * This condition may be caused by the CHG line being configured in
+ * Mode 0. It results in unnecessary I2C operations but it is benign.
+ */
+ if (count == 0)
return IRQ_NONE;
- } else if (count > data->max_reportid) {
- dev_err(dev, "T44 count %d exceeded max report id\n", count);
+
+ if (count > data->max_reportid) {
+ dev_warn(dev, "T44 count %d exceeded max report id\n", count);
count = data->max_reportid;
}
@@ -1157,7 +1097,9 @@ static int mxt_soft_reset(struct mxt_data *data)
struct device *dev = &data->client->dev;
int ret = 0;
- dev_info(dev, "Resetting chip\n");
+ dev_info(dev, "Resetting device\n");
+
+ disable_irq(data->irq);
reinit_completion(&data->reset_completion);
@@ -1165,6 +1107,11 @@ static int mxt_soft_reset(struct mxt_data *data)
if (ret)
return ret;
+ /* Ignore CHG line for 100ms after reset */
+ msleep(100);
+
+ enable_irq(data->irq);
+
ret = mxt_wait_for_completion(data, &data->reset_completion,
MXT_RESET_TIMEOUT);
if (ret)
@@ -1361,6 +1308,8 @@ static int mxt_upload_cfg_mem(struct mxt_data *data, unsigned int cfg_start,
return 0;
}
+static int mxt_init_t7_power_cfg(struct mxt_data *data);
+
/*
* mxt_update_cfg - download configuration to chip
*
@@ -1508,6 +1457,9 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
dev_info(dev, "Config successfully updated\n");
+ /* T7 config may have changed */
+ mxt_init_t7_power_cfg(data);
+
release_mem:
kfree(config_mem);
return ret;
@@ -1533,7 +1485,7 @@ static int mxt_get_info(struct mxt_data *data)
int error;
/* Read 7-byte info block starting at address 0 */
- error = __mxt_read_reg(client, MXT_INFO, sizeof(*info), info);
+ error = __mxt_read_reg(client, 0, sizeof(*info), info);
if (error)
return error;
@@ -1905,6 +1857,8 @@ static int mxt_initialize_input_device(struct mxt_data *data)
if (pdata->t19_num_keys) {
mxt_set_up_as_touchpad(input_dev, data);
mt_flags |= INPUT_MT_POINTER;
+ } else {
+ mt_flags |= INPUT_MT_DIRECT;
}
/* For multi touch */
@@ -2051,6 +2005,60 @@ err_free_object_table:
return error;
}
+static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
+{
+ struct device *dev = &data->client->dev;
+ int error;
+ struct t7_config *new_config;
+ struct t7_config deepsleep = { .active = 0, .idle = 0 };
+
+ if (sleep == MXT_POWER_CFG_DEEPSLEEP)
+ new_config = &deepsleep;
+ else
+ new_config = &data->t7_cfg;
+
+ error = __mxt_write_reg(data->client, data->T7_address,
+ sizeof(data->t7_cfg), new_config);
+ if (error)
+ return error;
+
+ dev_dbg(dev, "Set T7 ACTV:%d IDLE:%d\n",
+ new_config->active, new_config->idle);
+
+ return 0;
+}
+
+static int mxt_init_t7_power_cfg(struct mxt_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int error;
+ bool retry = false;
+
+recheck:
+ error = __mxt_read_reg(data->client, data->T7_address,
+ sizeof(data->t7_cfg), &data->t7_cfg);
+ if (error)
+ return error;
+
+ if (data->t7_cfg.active == 0 || data->t7_cfg.idle == 0) {
+ if (!retry) {
+ dev_dbg(dev, "T7 cfg zero, resetting\n");
+ mxt_soft_reset(data);
+ retry = true;
+ goto recheck;
+ } else {
+ dev_dbg(dev, "T7 cfg zero after reset, overriding\n");
+ data->t7_cfg.active = 20;
+ data->t7_cfg.idle = 100;
+ return mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
+ }
+ }
+
+ dev_dbg(dev, "Initialized power cfg: ACTV %d, IDLE %d\n",
+ data->t7_cfg.active, data->t7_cfg.idle);
+ return 0;
+}
+
static int mxt_configure_objects(struct mxt_data *data,
const struct firmware *cfg)
{
@@ -2058,6 +2066,12 @@ static int mxt_configure_objects(struct mxt_data *data,
struct mxt_info *info = &data->info;
int error;
+ error = mxt_init_t7_power_cfg(data);
+ if (error) {
+ dev_err(dev, "Failed to initialize power cfg\n");
+ return error;
+ }
+
if (cfg) {
error = mxt_update_cfg(data, cfg);
if (error)
@@ -2346,14 +2360,41 @@ static const struct attribute_group mxt_attr_group = {
static void mxt_start(struct mxt_data *data)
{
- /* Touch enable */
- mxt_write_object(data, data->multitouch, MXT_TOUCH_CTRL, 0x83);
+ switch (data->pdata->suspend_mode) {
+ case MXT_SUSPEND_T9_CTRL:
+ mxt_soft_reset(data);
+
+ /* Touch enable */
+ /* 0x83 = SCANEN | RPTEN | ENABLE */
+ mxt_write_object(data,
+ MXT_TOUCH_MULTI_T9, MXT_T9_CTRL, 0x83);
+ break;
+
+ case MXT_SUSPEND_DEEP_SLEEP:
+ default:
+ mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
+
+ /* Recalibrate since chip has been in deep sleep */
+ mxt_t6_command(data, MXT_COMMAND_CALIBRATE, 1, false);
+ break;
+ }
+
}
static void mxt_stop(struct mxt_data *data)
{
- /* Touch disable */
- mxt_write_object(data, data->multitouch, MXT_TOUCH_CTRL, 0);
+ switch (data->pdata->suspend_mode) {
+ case MXT_SUSPEND_T9_CTRL:
+ /* Touch disable */
+ mxt_write_object(data,
+ MXT_TOUCH_MULTI_T9, MXT_T9_CTRL, 0);
+ break;
+
+ case MXT_SUSPEND_DEEP_SLEEP:
+ default:
+ mxt_set_t7_power_cfg(data, MXT_POWER_CFG_DEEPSLEEP);
+ break;
+ }
}
static int mxt_input_open(struct input_dev *dev)
@@ -2376,19 +2417,18 @@ static void mxt_input_close(struct input_dev *dev)
static const struct mxt_platform_data *mxt_parse_dt(struct i2c_client *client)
{
struct mxt_platform_data *pdata;
+ struct device_node *np = client->dev.of_node;
u32 *keymap;
- u32 keycode;
- int proplen, i, ret;
+ int proplen, ret;
- if (!client->dev.of_node)
+ if (!np)
return ERR_PTR(-ENOENT);
pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
- if (of_find_property(client->dev.of_node, "linux,gpio-keymap",
- &proplen)) {
+ if (of_find_property(np, "linux,gpio-keymap", &proplen)) {
pdata->t19_num_keys = proplen / sizeof(u32);
keymap = devm_kzalloc(&client->dev,
@@ -2397,18 +2437,17 @@ static const struct mxt_platform_data *mxt_parse_dt(struct i2c_client *client)
if (!keymap)
return ERR_PTR(-ENOMEM);
- for (i = 0; i < pdata->t19_num_keys; i++) {
- ret = of_property_read_u32_index(client->dev.of_node,
- "linux,gpio-keymap", i, &keycode);
- if (ret)
- keycode = KEY_RESERVED;
-
- keymap[i] = keycode;
- }
+ ret = of_property_read_u32_array(np, "linux,gpio-keymap",
+ keymap, pdata->t19_num_keys);
+ if (ret)
+ dev_warn(&client->dev,
+ "Couldn't read linux,gpio-keymap: %d\n", ret);
pdata->t19_keymap = keymap;
}
+ pdata->suspend_mode = MXT_SUSPEND_DEEP_SLEEP;
+
return pdata;
}
#else
@@ -2609,6 +2648,9 @@ static int __maybe_unused mxt_suspend(struct device *dev)
struct mxt_data *data = i2c_get_clientdata(client);
struct input_dev *input_dev = data->input_dev;
+ if (!input_dev)
+ return 0;
+
mutex_lock(&input_dev->mutex);
if (input_dev->users)
@@ -2625,7 +2667,8 @@ static int __maybe_unused mxt_resume(struct device *dev)
struct mxt_data *data = i2c_get_clientdata(client);
struct input_dev *input_dev = data->input_dev;
- mxt_soft_reset(data);
+ if (!input_dev)
+ return 0;
mutex_lock(&input_dev->mutex);
@@ -2666,7 +2709,6 @@ MODULE_DEVICE_TABLE(i2c, mxt_id);
static struct i2c_driver mxt_driver = {
.driver = {
.name = "atmel_mxt_ts",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(mxt_of_match),
.acpi_match_table = ACPI_PTR(mxt_acpi_id),
.pm = &mxt_pm_ops,
diff --git a/drivers/input/touchscreen/auo-pixcir-ts.c b/drivers/input/touchscreen/auo-pixcir-ts.c
index 40e02dd5b2f9..38c06f754acd 100644
--- a/drivers/input/touchscreen/auo-pixcir-ts.c
+++ b/drivers/input/touchscreen/auo-pixcir-ts.c
@@ -686,7 +686,6 @@ MODULE_DEVICE_TABLE(of, auo_pixcir_ts_dt_idtable);
static struct i2c_driver auo_pixcir_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "auo_pixcir_ts",
.pm = &auo_pixcir_pm_ops,
.of_match_table = of_match_ptr(auo_pixcir_ts_dt_idtable),
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index b9b5ddad6658..931417eb4f5a 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -716,7 +716,6 @@ MODULE_DEVICE_TABLE(i2c, bu21013_id);
static struct i2c_driver bu21013_driver = {
.driver = {
.name = DRIVER_TP,
- .owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &bu21013_dev_pm_ops,
#endif
diff --git a/drivers/input/touchscreen/chipone_icn8318.c b/drivers/input/touchscreen/chipone_icn8318.c
index 32e9db0e04bf..22a6fead8cfb 100644
--- a/drivers/input/touchscreen/chipone_icn8318.c
+++ b/drivers/input/touchscreen/chipone_icn8318.c
@@ -300,7 +300,6 @@ MODULE_DEVICE_TABLE(i2c, icn8318_i2c_id);
static struct i2c_driver icn8318_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "chipone_icn8318",
.pm = &icn8318_pm_ops,
.of_match_table = icn8318_of_match,
diff --git a/drivers/input/touchscreen/colibri-vf50-ts.c b/drivers/input/touchscreen/colibri-vf50-ts.c
new file mode 100644
index 000000000000..5d4903a402cc
--- /dev/null
+++ b/drivers/input/touchscreen/colibri-vf50-ts.c
@@ -0,0 +1,386 @@
+/*
+ * Toradex Colibri VF50 Touchscreen driver
+ *
+ * Copyright 2015 Toradex AG
+ *
+ * Originally authored by Stefan Agner for 3.0 kernel
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/types.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define DRIVER_NAME "colibri-vf50-ts"
+#define DRV_VERSION "1.0"
+
+#define VF_ADC_MAX ((1 << 12) - 1)
+
+#define COLI_TOUCH_MIN_DELAY_US 1000
+#define COLI_TOUCH_MAX_DELAY_US 2000
+#define COLI_PULLUP_MIN_DELAY_US 10000
+#define COLI_PULLUP_MAX_DELAY_US 11000
+#define COLI_TOUCH_NO_OF_AVGS 5
+#define COLI_TOUCH_REQ_ADC_CHAN 4
+
+struct vf50_touch_device {
+ struct platform_device *pdev;
+ struct input_dev *ts_input;
+ struct iio_channel *channels;
+ struct gpio_desc *gpio_xp;
+ struct gpio_desc *gpio_xm;
+ struct gpio_desc *gpio_yp;
+ struct gpio_desc *gpio_ym;
+ int pen_irq;
+ int min_pressure;
+ bool stop_touchscreen;
+};
+
+/*
+ * Enables given plates and measures touch parameters using ADC
+ */
+static int adc_ts_measure(struct iio_channel *channel,
+ struct gpio_desc *plate_p, struct gpio_desc *plate_m)
+{
+ int i, value = 0, val = 0;
+ int error;
+
+ gpiod_set_value(plate_p, 1);
+ gpiod_set_value(plate_m, 1);
+
+ usleep_range(COLI_TOUCH_MIN_DELAY_US, COLI_TOUCH_MAX_DELAY_US);
+
+ for (i = 0; i < COLI_TOUCH_NO_OF_AVGS; i++) {
+ error = iio_read_channel_raw(channel, &val);
+ if (error < 0) {
+ value = error;
+ goto error_iio_read;
+ }
+
+ value += val;
+ }
+
+ value /= COLI_TOUCH_NO_OF_AVGS;
+
+error_iio_read:
+ gpiod_set_value(plate_p, 0);
+ gpiod_set_value(plate_m, 0);
+
+ return value;
+}
+
+/*
+ * Enable touch detection using falling edge detection on XM
+ */
+static void vf50_ts_enable_touch_detection(struct vf50_touch_device *vf50_ts)
+{
+ /* Enable plate YM (needs to be strong GND, high active) */
+ gpiod_set_value(vf50_ts->gpio_ym, 1);
+
+ /*
+ * Let the platform mux to idle state in order to enable
+ * Pull-Up on GPIO
+ */
+ pinctrl_pm_select_idle_state(&vf50_ts->pdev->dev);
+
+ /* Wait for the pull-up to be stable on high */
+ usleep_range(COLI_PULLUP_MIN_DELAY_US, COLI_PULLUP_MAX_DELAY_US);
+}
+
+/*
+ * ADC touch screen sampling bottom half irq handler
+ */
+static irqreturn_t vf50_ts_irq_bh(int irq, void *private)
+{
+ struct vf50_touch_device *vf50_ts = private;
+ struct device *dev = &vf50_ts->pdev->dev;
+ int val_x, val_y, val_z1, val_z2, val_p = 0;
+ bool discard_val_on_start = true;
+
+ /* Disable the touch detection plates */
+ gpiod_set_value(vf50_ts->gpio_ym, 0);
+
+ /* Let the platform mux to default state in order to mux as ADC */
+ pinctrl_pm_select_default_state(dev);
+
+ while (!vf50_ts->stop_touchscreen) {
+ /* X-Direction */
+ val_x = adc_ts_measure(&vf50_ts->channels[0],
+ vf50_ts->gpio_xp, vf50_ts->gpio_xm);
+ if (val_x < 0)
+ break;
+
+ /* Y-Direction */
+ val_y = adc_ts_measure(&vf50_ts->channels[1],
+ vf50_ts->gpio_yp, vf50_ts->gpio_ym);
+ if (val_y < 0)
+ break;
+
+ /*
+ * Touch pressure
+ * Measure on XP/YM
+ */
+ val_z1 = adc_ts_measure(&vf50_ts->channels[2],
+ vf50_ts->gpio_yp, vf50_ts->gpio_xm);
+ if (val_z1 < 0)
+ break;
+ val_z2 = adc_ts_measure(&vf50_ts->channels[3],
+ vf50_ts->gpio_yp, vf50_ts->gpio_xm);
+ if (val_z2 < 0)
+ break;
+
+ /* Validate signal (avoid calculation using noise) */
+ if (val_z1 > 64 && val_x > 64) {
+ /*
+ * Calculate resistance between the plates
+ * lower resistance means higher pressure
+ */
+ int r_x = (1000 * val_x) / VF_ADC_MAX;
+
+ val_p = (r_x * val_z2) / val_z1 - r_x;
+
+ } else {
+ val_p = 2000;
+ }
+
+ val_p = 2000 - val_p;
+ dev_dbg(dev,
+ "Measured values: x: %d, y: %d, z1: %d, z2: %d, p: %d\n",
+ val_x, val_y, val_z1, val_z2, val_p);
+
+ /*
+ * If touch pressure is too low, stop measuring and reenable
+ * touch detection
+ */
+ if (val_p < vf50_ts->min_pressure || val_p > 2000)
+ break;
+
+ /*
+ * The pressure may not be enough for the first x and the
+ * second y measurement, but, the pressure is ok when the
+ * driver is doing the third and fourth measurement. To
+ * take care of this, we drop the first measurement always.
+ */
+ if (discard_val_on_start) {
+ discard_val_on_start = false;
+ } else {
+ /*
+ * Report touch position and sleep for
+ * the next measurement.
+ */
+ input_report_abs(vf50_ts->ts_input,
+ ABS_X, VF_ADC_MAX - val_x);
+ input_report_abs(vf50_ts->ts_input,
+ ABS_Y, VF_ADC_MAX - val_y);
+ input_report_abs(vf50_ts->ts_input,
+ ABS_PRESSURE, val_p);
+ input_report_key(vf50_ts->ts_input, BTN_TOUCH, 1);
+ input_sync(vf50_ts->ts_input);
+ }
+
+ usleep_range(COLI_PULLUP_MIN_DELAY_US,
+ COLI_PULLUP_MAX_DELAY_US);
+ }
+
+ /* Report no more touch, re-enable touch detection */
+ input_report_abs(vf50_ts->ts_input, ABS_PRESSURE, 0);
+ input_report_key(vf50_ts->ts_input, BTN_TOUCH, 0);
+ input_sync(vf50_ts->ts_input);
+
+ vf50_ts_enable_touch_detection(vf50_ts);
+
+ return IRQ_HANDLED;
+}
+
+static int vf50_ts_open(struct input_dev *dev_input)
+{
+ struct vf50_touch_device *touchdev = input_get_drvdata(dev_input);
+ struct device *dev = &touchdev->pdev->dev;
+
+ dev_dbg(dev, "Input device %s opened, starting touch detection\n",
+ dev_input->name);
+
+ touchdev->stop_touchscreen = false;
+
+ /* Mux detection before request IRQ, wait for pull-up to settle */
+ vf50_ts_enable_touch_detection(touchdev);
+
+ return 0;
+}
+
+static void vf50_ts_close(struct input_dev *dev_input)
+{
+ struct vf50_touch_device *touchdev = input_get_drvdata(dev_input);
+ struct device *dev = &touchdev->pdev->dev;
+
+ touchdev->stop_touchscreen = true;
+
+ /* Make sure IRQ is not running past close */
+ mb();
+ synchronize_irq(touchdev->pen_irq);
+
+ gpiod_set_value(touchdev->gpio_ym, 0);
+ pinctrl_pm_select_default_state(dev);
+
+ dev_dbg(dev, "Input device %s closed, disable touch detection\n",
+ dev_input->name);
+}
+
+static int vf50_ts_get_gpiod(struct device *dev, struct gpio_desc **gpio_d,
+ const char *con_id, enum gpiod_flags flags)
+{
+ int error;
+
+ *gpio_d = devm_gpiod_get(dev, con_id, flags);
+ if (IS_ERR(*gpio_d)) {
+ error = PTR_ERR(*gpio_d);
+ dev_err(dev, "Could not get gpio_%s %d\n", con_id, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static void vf50_ts_channel_release(void *data)
+{
+ struct iio_channel *channels = data;
+
+ iio_channel_release_all(channels);
+}
+
+static int vf50_ts_probe(struct platform_device *pdev)
+{
+ struct input_dev *input;
+ struct iio_channel *channels;
+ struct device *dev = &pdev->dev;
+ struct vf50_touch_device *touchdev;
+ int num_adc_channels;
+ int error;
+
+ channels = iio_channel_get_all(dev);
+ if (IS_ERR(channels))
+ return PTR_ERR(channels);
+
+ error = devm_add_action(dev, vf50_ts_channel_release, channels);
+ if (error) {
+ iio_channel_release_all(channels);
+ dev_err(dev, "Failed to register iio channel release action");
+ return error;
+ }
+
+ num_adc_channels = 0;
+ while (channels[num_adc_channels].indio_dev)
+ num_adc_channels++;
+
+ if (num_adc_channels != COLI_TOUCH_REQ_ADC_CHAN) {
+ dev_err(dev, "Inadequate ADC channels specified\n");
+ return -EINVAL;
+ }
+
+ touchdev = devm_kzalloc(dev, sizeof(*touchdev), GFP_KERNEL);
+ if (!touchdev)
+ return -ENOMEM;
+
+ touchdev->pdev = pdev;
+ touchdev->channels = channels;
+
+ error = of_property_read_u32(dev->of_node, "vf50-ts-min-pressure",
+ &touchdev->min_pressure);
+ if (error)
+ return error;
+
+ input = devm_input_allocate_device(dev);
+ if (!input) {
+ dev_err(dev, "Failed to allocate TS input device\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, touchdev);
+
+ input->name = DRIVER_NAME;
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = dev;
+ input->open = vf50_ts_open;
+ input->close = vf50_ts_close;
+
+ input_set_capability(input, EV_KEY, BTN_TOUCH);
+ input_set_abs_params(input, ABS_X, 0, VF_ADC_MAX, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, VF_ADC_MAX, 0, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, VF_ADC_MAX, 0, 0);
+
+ touchdev->ts_input = input;
+ input_set_drvdata(input, touchdev);
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(dev, "Failed to register input device\n");
+ return error;
+ }
+
+ error = vf50_ts_get_gpiod(dev, &touchdev->gpio_xp, "xp", GPIOD_OUT_LOW);
+ if (error)
+ return error;
+
+ error = vf50_ts_get_gpiod(dev, &touchdev->gpio_xm,
+ "xm", GPIOD_OUT_LOW);
+ if (error)
+ return error;
+
+ error = vf50_ts_get_gpiod(dev, &touchdev->gpio_yp, "yp", GPIOD_OUT_LOW);
+ if (error)
+ return error;
+
+ error = vf50_ts_get_gpiod(dev, &touchdev->gpio_ym, "ym", GPIOD_OUT_LOW);
+ if (error)
+ return error;
+
+ touchdev->pen_irq = platform_get_irq(pdev, 0);
+ if (touchdev->pen_irq < 0)
+ return touchdev->pen_irq;
+
+ error = devm_request_threaded_irq(dev, touchdev->pen_irq,
+ NULL, vf50_ts_irq_bh, IRQF_ONESHOT,
+ "vf50 touch", touchdev);
+ if (error) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n",
+ touchdev->pen_irq, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id vf50_touch_of_match[] = {
+ { .compatible = "toradex,vf50-touchscreen", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, vf50_touch_of_match);
+
+static struct platform_driver vf50_touch_driver = {
+ .driver = {
+ .name = "toradex,vf50_touchctrl",
+ .of_match_table = vf50_touch_of_match,
+ },
+ .probe = vf50_ts_probe,
+};
+module_platform_driver(vf50_touch_driver);
+
+MODULE_AUTHOR("Sanchayan Maity");
+MODULE_DESCRIPTION("Colibri VF50 Touchscreen driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index f2119ee0e21b..cc1d1350074e 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -347,7 +347,6 @@ MODULE_DEVICE_TABLE(i2c, cy8ctmg110_idtable);
static struct i2c_driver cy8ctmg110_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = CY8CTMG110_DRIVER_NAME,
.pm = &cy8ctmg110_pm,
},
diff --git a/drivers/input/touchscreen/cyttsp4_i2c.c b/drivers/input/touchscreen/cyttsp4_i2c.c
index 8e2012c79058..a9f95c7d3c00 100644
--- a/drivers/input/touchscreen/cyttsp4_i2c.c
+++ b/drivers/input/touchscreen/cyttsp4_i2c.c
@@ -74,7 +74,6 @@ MODULE_DEVICE_TABLE(i2c, cyttsp4_i2c_id);
static struct i2c_driver cyttsp4_i2c_driver = {
.driver = {
.name = CYTTSP4_I2C_NAME,
- .owner = THIS_MODULE,
.pm = &cyttsp4_pm_ops,
},
.probe = cyttsp4_i2c_probe,
@@ -87,4 +86,3 @@ module_i2c_driver(cyttsp4_i2c_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) I2C driver");
MODULE_AUTHOR("Cypress");
-MODULE_ALIAS("i2c:cyttsp4");
diff --git a/drivers/input/touchscreen/cyttsp_i2c.c b/drivers/input/touchscreen/cyttsp_i2c.c
index 63104a86a9bd..eee51b3f2e3f 100644
--- a/drivers/input/touchscreen/cyttsp_i2c.c
+++ b/drivers/input/touchscreen/cyttsp_i2c.c
@@ -74,7 +74,6 @@ MODULE_DEVICE_TABLE(i2c, cyttsp_i2c_id);
static struct i2c_driver cyttsp_i2c_driver = {
.driver = {
.name = CY_I2C_NAME,
- .owner = THIS_MODULE,
.pm = &cyttsp_pm_ops,
},
.probe = cyttsp_i2c_probe,
@@ -87,4 +86,3 @@ module_i2c_driver(cyttsp_i2c_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) I2C driver");
MODULE_AUTHOR("Cypress");
-MODULE_ALIAS("i2c:cyttsp");
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 394b1de9a2a3..48de1e8b3c93 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -1041,7 +1041,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
0, tsdata->num_y * 64 - 1, 0, 0);
if (!pdata)
- touchscreen_parse_of_params(input, true);
+ touchscreen_parse_properties(input, true);
error = input_mt_init_slots(input, MAX_SUPPORT_POINTS, INPUT_MT_DIRECT);
if (error) {
@@ -1134,7 +1134,6 @@ MODULE_DEVICE_TABLE(of, edt_ft5x06_of_match);
static struct i2c_driver edt_ft5x06_ts_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "edt_ft5x06",
.of_match_table = of_match_ptr(edt_ft5x06_of_match),
.pm = &edt_ft5x06_ts_pm_ops,
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index 4c56299284ef..1afc08b08155 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -264,11 +264,11 @@ static const struct of_device_id egalax_ts_dt_ids[] = {
{ .compatible = "eeti,egalax_ts" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, egalax_ts_dt_ids);
static struct i2c_driver egalax_ts_driver = {
.driver = {
.name = "egalax_ts",
- .owner = THIS_MODULE,
.pm = &egalax_ts_pm_ops,
.of_match_table = egalax_ts_dt_ids,
},
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 0efd766a545b..17cc20ef4923 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -38,6 +38,8 @@
#include <linux/input/mt.h>
#include <linux/acpi.h>
#include <linux/of.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
#include <asm/unaligned.h>
/* Device, Driver information */
@@ -100,7 +102,10 @@
#define ELAN_FW_PAGESIZE 132
/* calibration timeout definition */
-#define ELAN_CALI_TIMEOUT_MSEC 10000
+#define ELAN_CALI_TIMEOUT_MSEC 12000
+
+#define ELAN_POWERON_DELAY_USEC 500
+#define ELAN_RESET_DELAY_MSEC 20
enum elants_state {
ELAN_STATE_NORMAL,
@@ -118,6 +123,10 @@ struct elants_data {
struct i2c_client *client;
struct input_dev *input;
+ struct regulator *vcc33;
+ struct regulator *vccio;
+ struct gpio_desc *reset_gpio;
+
u16 fw_version;
u8 test_version;
u8 solution_version;
@@ -141,6 +150,7 @@ struct elants_data {
u8 buf[MAX_PACKET_SIZE];
bool wake_irq_enabled;
+ bool keep_power_in_suspend;
};
static int elants_i2c_send(struct i2c_client *client,
@@ -605,6 +615,7 @@ static int elants_i2c_do_update_firmware(struct i2c_client *client,
const u8 enter_iap[] = { 0x45, 0x49, 0x41, 0x50 };
const u8 enter_iap2[] = { 0x54, 0x00, 0x12, 0x34 };
const u8 iap_ack[] = { 0x55, 0xaa, 0x33, 0xcc };
+ const u8 close_idle[] = {0x54, 0x2c, 0x01, 0x01};
u8 buf[HEADER_SIZE];
u16 send_id;
int page, n_fw_pages;
@@ -617,8 +628,13 @@ static int elants_i2c_do_update_firmware(struct i2c_client *client,
} else {
/* Start IAP Procedure */
dev_dbg(&client->dev, "Normal IAP procedure\n");
+ /* Close idle mode */
+ error = elants_i2c_send(client, close_idle, sizeof(close_idle));
+ if (error)
+ dev_err(&client->dev, "Failed close idle: %d\n", error);
+ msleep(60);
elants_i2c_sw_reset(client);
-
+ msleep(20);
error = elants_i2c_send(client, enter_iap, sizeof(enter_iap));
}
@@ -1052,6 +1068,67 @@ static void elants_i2c_remove_sysfs_group(void *_data)
sysfs_remove_group(&ts->client->dev.kobj, &elants_attribute_group);
}
+static int elants_i2c_power_on(struct elants_data *ts)
+{
+ int error;
+
+ /*
+ * If we do not have reset gpio assume platform firmware
+ * controls regulators and does power them on for us.
+ */
+ if (IS_ERR_OR_NULL(ts->reset_gpio))
+ return 0;
+
+ gpiod_set_value_cansleep(ts->reset_gpio, 1);
+
+ error = regulator_enable(ts->vcc33);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "failed to enable vcc33 regulator: %d\n",
+ error);
+ goto release_reset_gpio;
+ }
+
+ error = regulator_enable(ts->vccio);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "failed to enable vccio regulator: %d\n",
+ error);
+ regulator_disable(ts->vcc33);
+ goto release_reset_gpio;
+ }
+
+ /*
+ * We need to wait a bit after powering on controller before
+ * we are allowed to release reset GPIO.
+ */
+ udelay(ELAN_POWERON_DELAY_USEC);
+
+release_reset_gpio:
+ gpiod_set_value_cansleep(ts->reset_gpio, 0);
+ if (error)
+ return error;
+
+ msleep(ELAN_RESET_DELAY_MSEC);
+
+ return 0;
+}
+
+static void elants_i2c_power_off(void *_data)
+{
+ struct elants_data *ts = _data;
+
+ if (!IS_ERR_OR_NULL(ts->reset_gpio)) {
+ /*
+ * Activate reset gpio to prevent leakage through the
+ * pin once we shut off power to the controller.
+ */
+ gpiod_set_value_cansleep(ts->reset_gpio, 1);
+ regulator_disable(ts->vccio);
+ regulator_disable(ts->vcc33);
+ }
+}
+
static int elants_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1066,13 +1143,6 @@ static int elants_i2c_probe(struct i2c_client *client,
return -ENXIO;
}
- /* Make sure there is something at this address */
- if (i2c_smbus_xfer(client->adapter, client->addr, 0,
- I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &dummy) < 0) {
- dev_err(&client->dev, "nothing at this address\n");
- return -ENXIO;
- }
-
ts = devm_kzalloc(&client->dev, sizeof(struct elants_data), GFP_KERNEL);
if (!ts)
return -ENOMEM;
@@ -1083,6 +1153,62 @@ static int elants_i2c_probe(struct i2c_client *client,
ts->client = client;
i2c_set_clientdata(client, ts);
+ ts->vcc33 = devm_regulator_get(&client->dev, "vcc33");
+ if (IS_ERR(ts->vcc33)) {
+ error = PTR_ERR(ts->vcc33);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "Failed to get 'vcc33' regulator: %d\n",
+ error);
+ return error;
+ }
+
+ ts->vccio = devm_regulator_get(&client->dev, "vccio");
+ if (IS_ERR(ts->vccio)) {
+ error = PTR_ERR(ts->vccio);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "Failed to get 'vccio' regulator: %d\n",
+ error);
+ return error;
+ }
+
+ ts->reset_gpio = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ts->reset_gpio)) {
+ error = PTR_ERR(ts->reset_gpio);
+
+ if (error == -EPROBE_DEFER)
+ return error;
+
+ if (error != -ENOENT && error != -ENOSYS) {
+ dev_err(&client->dev,
+ "failed to get reset gpio: %d\n",
+ error);
+ return error;
+ }
+
+ ts->keep_power_in_suspend = true;
+ }
+
+ error = elants_i2c_power_on(ts);
+ if (error)
+ return error;
+
+ error = devm_add_action(&client->dev, elants_i2c_power_off, ts);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to install power off action: %d\n", error);
+ elants_i2c_power_off(ts);
+ return error;
+ }
+
+ /* Make sure there is something at this address */
+ if (i2c_smbus_xfer(client->adapter, client->addr, 0,
+ I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &dummy) < 0) {
+ dev_err(&client->dev, "nothing at this address\n");
+ return -ENXIO;
+ }
+
error = elants_i2c_initialize(ts);
if (error) {
dev_err(&client->dev, "failed to initialize: %d\n", error);
@@ -1190,17 +1316,23 @@ static int __maybe_unused elants_i2c_suspend(struct device *dev)
disable_irq(client->irq);
- for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
- error = elants_i2c_send(client, set_sleep_cmd,
- sizeof(set_sleep_cmd));
- if (!error)
- break;
+ if (device_may_wakeup(dev) || ts->keep_power_in_suspend) {
+ for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+ error = elants_i2c_send(client, set_sleep_cmd,
+ sizeof(set_sleep_cmd));
+ if (!error)
+ break;
- dev_err(&client->dev, "suspend command failed: %d\n", error);
- }
+ dev_err(&client->dev,
+ "suspend command failed: %d\n", error);
+ }
- if (device_may_wakeup(dev))
- ts->wake_irq_enabled = (enable_irq_wake(client->irq) == 0);
+ if (device_may_wakeup(dev))
+ ts->wake_irq_enabled =
+ (enable_irq_wake(client->irq) == 0);
+ } else {
+ elants_i2c_power_off(ts);
+ }
return 0;
}
@@ -1216,13 +1348,19 @@ static int __maybe_unused elants_i2c_resume(struct device *dev)
if (device_may_wakeup(dev) && ts->wake_irq_enabled)
disable_irq_wake(client->irq);
- for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
- error = elants_i2c_send(client, set_active_cmd,
- sizeof(set_active_cmd));
- if (!error)
- break;
+ if (ts->keep_power_in_suspend) {
+ for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+ error = elants_i2c_send(client, set_active_cmd,
+ sizeof(set_active_cmd));
+ if (!error)
+ break;
- dev_err(&client->dev, "resume command failed: %d\n", error);
+ dev_err(&client->dev,
+ "resume command failed: %d\n", error);
+ }
+ } else {
+ elants_i2c_power_on(ts);
+ elants_i2c_initialize(ts);
}
ts->state = ELAN_STATE_NORMAL;
@@ -1261,10 +1399,10 @@ static struct i2c_driver elants_i2c_driver = {
.id_table = elants_i2c_id,
.driver = {
.name = DEVICE_NAME,
- .owner = THIS_MODULE,
.pm = &elants_i2c_pm_ops,
.acpi_match_table = ACPI_PTR(elants_acpi_id),
.of_match_table = of_match_ptr(elants_of_match),
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
module_i2c_driver(elants_i2c_driver);
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index b4d12e29abff..4d113c9e4b77 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -15,6 +15,7 @@
*/
#include <linux/kernel.h>
+#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/input/mt.h>
@@ -34,6 +35,7 @@ struct goodix_ts_data {
int abs_y_max;
unsigned int max_touch_num;
unsigned int int_trigger_type;
+ bool rotated_screen;
};
#define GOODIX_MAX_HEIGHT 4096
@@ -60,6 +62,30 @@ static const unsigned long goodix_irq_flags[] = {
IRQ_TYPE_LEVEL_HIGH,
};
+/*
+ * Those tablets have their coordinates origin at the bottom right
+ * of the tablet, as if rotated 180 degrees
+ */
+static const struct dmi_system_id rotated_screen[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+ {
+ .ident = "WinBook TW100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
+ }
+ },
+ {
+ .ident = "WinBook TW700",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
+ },
+ },
+#endif
+ {}
+};
+
/**
* goodix_i2c_read - read data from a register of the i2c slave device.
*
@@ -129,6 +155,11 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
int input_y = get_unaligned_le16(&coor_data[3]);
int input_w = get_unaligned_le16(&coor_data[5]);
+ if (ts->rotated_screen) {
+ input_x = ts->abs_x_max - input_x;
+ input_y = ts->abs_y_max - input_y;
+ }
+
input_mt_slot(ts->input_dev, id);
input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true);
input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x);
@@ -223,6 +254,11 @@ static void goodix_read_config(struct goodix_ts_data *ts)
ts->abs_y_max = GOODIX_MAX_HEIGHT;
ts->max_touch_num = GOODIX_MAX_CONTACTS;
}
+
+ ts->rotated_screen = dmi_check_system(rotated_screen);
+ if (ts->rotated_screen)
+ dev_dbg(&ts->client->dev,
+ "Applying '180 degrees rotated screen' quirk\n");
}
/**
@@ -384,6 +420,7 @@ static const struct i2c_device_id goodix_ts_id[] = {
{ "GDIX1001:00", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
#ifdef CONFIG_ACPI
static const struct acpi_device_id goodix_acpi_match[] = {
@@ -412,7 +449,6 @@ static struct i2c_driver goodix_ts_driver = {
.id_table = goodix_ts_id,
.driver = {
.name = "Goodix-TS",
- .owner = THIS_MODULE,
.acpi_match_table = ACPI_PTR(goodix_acpi_match),
.of_match_table = of_match_ptr(goodix_of_match),
},
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index da6dc819c846..ddf694b9fffc 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -216,7 +216,7 @@ static int ili210x_i2c_probe(struct i2c_client *client,
/* get panel info */
error = ili210x_read_reg(client, REG_PANEL_INFO, &panel, sizeof(panel));
if (error) {
- dev_err(dev, "Failed to get panel informations, err: %d\n",
+ dev_err(dev, "Failed to get panel information, err: %d\n",
error);
return error;
}
@@ -276,7 +276,7 @@ static int ili210x_i2c_probe(struct i2c_client *client,
error = input_register_device(priv->input);
if (error) {
- dev_err(dev, "Cannot regiser input device, err: %d\n", error);
+ dev_err(dev, "Cannot register input device, err: %d\n", error);
goto err_remove_sysfs;
}
@@ -343,7 +343,6 @@ MODULE_DEVICE_TABLE(i2c, ili210x_i2c_id);
static struct i2c_driver ili210x_ts_driver = {
.driver = {
.name = "ili210x_i2c",
- .owner = THIS_MODULE,
.pm = &ili210x_i2c_pm,
},
.id_table = ili210x_i2c_id,
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
new file mode 100644
index 000000000000..ff0b75813daa
--- /dev/null
+++ b/drivers/input/touchscreen/imx6ul_tsc.c
@@ -0,0 +1,523 @@
+/*
+ * Freescale i.MX6UL touchscreen controller driver
+ *
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gpio/consumer.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+/* ADC configuration registers field define */
+#define ADC_AIEN (0x1 << 7)
+#define ADC_CONV_DISABLE 0x1F
+#define ADC_CAL (0x1 << 7)
+#define ADC_CALF 0x2
+#define ADC_12BIT_MODE (0x2 << 2)
+#define ADC_IPG_CLK 0x00
+#define ADC_CLK_DIV_8 (0x03 << 5)
+#define ADC_SHORT_SAMPLE_MODE (0x0 << 4)
+#define ADC_HARDWARE_TRIGGER (0x1 << 13)
+#define SELECT_CHANNEL_4 0x04
+#define SELECT_CHANNEL_1 0x01
+#define DISABLE_CONVERSION_INT (0x0 << 7)
+
+/* ADC registers */
+#define REG_ADC_HC0 0x00
+#define REG_ADC_HC1 0x04
+#define REG_ADC_HC2 0x08
+#define REG_ADC_HC3 0x0C
+#define REG_ADC_HC4 0x10
+#define REG_ADC_HS 0x14
+#define REG_ADC_R0 0x18
+#define REG_ADC_CFG 0x2C
+#define REG_ADC_GC 0x30
+#define REG_ADC_GS 0x34
+
+#define ADC_TIMEOUT msecs_to_jiffies(100)
+
+/* TSC registers */
+#define REG_TSC_BASIC_SETING 0x00
+#define REG_TSC_PRE_CHARGE_TIME 0x10
+#define REG_TSC_FLOW_CONTROL 0x20
+#define REG_TSC_MEASURE_VALUE 0x30
+#define REG_TSC_INT_EN 0x40
+#define REG_TSC_INT_SIG_EN 0x50
+#define REG_TSC_INT_STATUS 0x60
+#define REG_TSC_DEBUG_MODE 0x70
+#define REG_TSC_DEBUG_MODE2 0x80
+
+/* TSC configuration registers field define */
+#define DETECT_4_WIRE_MODE (0x0 << 4)
+#define AUTO_MEASURE 0x1
+#define MEASURE_SIGNAL 0x1
+#define DETECT_SIGNAL (0x1 << 4)
+#define VALID_SIGNAL (0x1 << 8)
+#define MEASURE_INT_EN 0x1
+#define MEASURE_SIG_EN 0x1
+#define VALID_SIG_EN (0x1 << 8)
+#define DE_GLITCH_2 (0x2 << 29)
+#define START_SENSE (0x1 << 12)
+#define TSC_DISABLE (0x1 << 16)
+#define DETECT_MODE 0x2
+
+struct imx6ul_tsc {
+ struct device *dev;
+ struct input_dev *input;
+ void __iomem *tsc_regs;
+ void __iomem *adc_regs;
+ struct clk *tsc_clk;
+ struct clk *adc_clk;
+ struct gpio_desc *xnur_gpio;
+
+ int measure_delay_time;
+ int pre_charge_time;
+
+ struct completion completion;
+};
+
+/*
+ * TSC module need ADC to get the measure value. So
+ * before config TSC, we should initialize ADC module.
+ */
+static void imx6ul_adc_init(struct imx6ul_tsc *tsc)
+{
+ int adc_hc = 0;
+ int adc_gc;
+ int adc_gs;
+ int adc_cfg;
+ int timeout;
+
+ reinit_completion(&tsc->completion);
+
+ adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG);
+ adc_cfg |= ADC_12BIT_MODE | ADC_IPG_CLK;
+ adc_cfg |= ADC_CLK_DIV_8 | ADC_SHORT_SAMPLE_MODE;
+ adc_cfg &= ~ADC_HARDWARE_TRIGGER;
+ writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG);
+
+ /* enable calibration interrupt */
+ adc_hc |= ADC_AIEN;
+ adc_hc |= ADC_CONV_DISABLE;
+ writel(adc_hc, tsc->adc_regs + REG_ADC_HC0);
+
+ /* start ADC calibration */
+ adc_gc = readl(tsc->adc_regs + REG_ADC_GC);
+ adc_gc |= ADC_CAL;
+ writel(adc_gc, tsc->adc_regs + REG_ADC_GC);
+
+ timeout = wait_for_completion_timeout
+ (&tsc->completion, ADC_TIMEOUT);
+ if (timeout == 0)
+ dev_err(tsc->dev, "Timeout for adc calibration\n");
+
+ adc_gs = readl(tsc->adc_regs + REG_ADC_GS);
+ if (adc_gs & ADC_CALF)
+ dev_err(tsc->dev, "ADC calibration failed\n");
+
+ /* TSC need the ADC work in hardware trigger */
+ adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG);
+ adc_cfg |= ADC_HARDWARE_TRIGGER;
+ writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG);
+}
+
+/*
+ * This is a TSC workaround. Currently TSC misconnect two
+ * ADC channels, this function remap channel configure for
+ * hardware trigger.
+ */
+static void imx6ul_tsc_channel_config(struct imx6ul_tsc *tsc)
+{
+ int adc_hc0, adc_hc1, adc_hc2, adc_hc3, adc_hc4;
+
+ adc_hc0 = DISABLE_CONVERSION_INT;
+ writel(adc_hc0, tsc->adc_regs + REG_ADC_HC0);
+
+ adc_hc1 = DISABLE_CONVERSION_INT | SELECT_CHANNEL_4;
+ writel(adc_hc1, tsc->adc_regs + REG_ADC_HC1);
+
+ adc_hc2 = DISABLE_CONVERSION_INT;
+ writel(adc_hc2, tsc->adc_regs + REG_ADC_HC2);
+
+ adc_hc3 = DISABLE_CONVERSION_INT | SELECT_CHANNEL_1;
+ writel(adc_hc3, tsc->adc_regs + REG_ADC_HC3);
+
+ adc_hc4 = DISABLE_CONVERSION_INT;
+ writel(adc_hc4, tsc->adc_regs + REG_ADC_HC4);
+}
+
+/*
+ * TSC setting, confige the pre-charge time and measure delay time.
+ * different touch screen may need different pre-charge time and
+ * measure delay time.
+ */
+static void imx6ul_tsc_set(struct imx6ul_tsc *tsc)
+{
+ int basic_setting = 0;
+ int start;
+
+ basic_setting |= tsc->measure_delay_time << 8;
+ basic_setting |= DETECT_4_WIRE_MODE | AUTO_MEASURE;
+ writel(basic_setting, tsc->tsc_regs + REG_TSC_BASIC_SETING);
+
+ writel(DE_GLITCH_2, tsc->tsc_regs + REG_TSC_DEBUG_MODE2);
+
+ writel(tsc->pre_charge_time, tsc->tsc_regs + REG_TSC_PRE_CHARGE_TIME);
+ writel(MEASURE_INT_EN, tsc->tsc_regs + REG_TSC_INT_EN);
+ writel(MEASURE_SIG_EN | VALID_SIG_EN,
+ tsc->tsc_regs + REG_TSC_INT_SIG_EN);
+
+ /* start sense detection */
+ start = readl(tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
+ start |= START_SENSE;
+ start &= ~TSC_DISABLE;
+ writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
+}
+
+static void imx6ul_tsc_init(struct imx6ul_tsc *tsc)
+{
+ imx6ul_adc_init(tsc);
+ imx6ul_tsc_channel_config(tsc);
+ imx6ul_tsc_set(tsc);
+}
+
+static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc)
+{
+ int tsc_flow;
+ int adc_cfg;
+
+ /* TSC controller enters to idle status */
+ tsc_flow = readl(tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
+ tsc_flow |= TSC_DISABLE;
+ writel(tsc_flow, tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
+
+ /* ADC controller enters to stop mode */
+ adc_cfg = readl(tsc->adc_regs + REG_ADC_HC0);
+ adc_cfg |= ADC_CONV_DISABLE;
+ writel(adc_cfg, tsc->adc_regs + REG_ADC_HC0);
+}
+
+/* Delay some time (max 2ms), wait the pre-charge done. */
+static bool tsc_wait_detect_mode(struct imx6ul_tsc *tsc)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(2);
+ int state_machine;
+ int debug_mode2;
+
+ do {
+ if (time_after(jiffies, timeout))
+ return false;
+
+ usleep_range(200, 400);
+ debug_mode2 = readl(tsc->tsc_regs + REG_TSC_DEBUG_MODE2);
+ state_machine = (debug_mode2 >> 20) & 0x7;
+ } while (state_machine != DETECT_MODE);
+
+ usleep_range(200, 400);
+ return true;
+}
+
+static irqreturn_t tsc_irq_fn(int irq, void *dev_id)
+{
+ struct imx6ul_tsc *tsc = dev_id;
+ int status;
+ int value;
+ int x, y;
+ int start;
+
+ status = readl(tsc->tsc_regs + REG_TSC_INT_STATUS);
+
+ /* write 1 to clear the bit measure-signal */
+ writel(MEASURE_SIGNAL | DETECT_SIGNAL,
+ tsc->tsc_regs + REG_TSC_INT_STATUS);
+
+ /* It's a HW self-clean bit. Set this bit and start sense detection */
+ start = readl(tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
+ start |= START_SENSE;
+ writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
+
+ if (status & MEASURE_SIGNAL) {
+ value = readl(tsc->tsc_regs + REG_TSC_MEASURE_VALUE);
+ x = (value >> 16) & 0x0fff;
+ y = value & 0x0fff;
+
+ /*
+ * In detect mode, we can get the xnur gpio value,
+ * otherwise assume contact is stiull active.
+ */
+ if (!tsc_wait_detect_mode(tsc) ||
+ gpiod_get_value_cansleep(tsc->xnur_gpio)) {
+ input_report_key(tsc->input, BTN_TOUCH, 1);
+ input_report_abs(tsc->input, ABS_X, x);
+ input_report_abs(tsc->input, ABS_Y, y);
+ } else {
+ input_report_key(tsc->input, BTN_TOUCH, 0);
+ }
+
+ input_sync(tsc->input);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t adc_irq_fn(int irq, void *dev_id)
+{
+ struct imx6ul_tsc *tsc = dev_id;
+ int coco;
+ int value;
+
+ coco = readl(tsc->adc_regs + REG_ADC_HS);
+ if (coco & 0x01) {
+ value = readl(tsc->adc_regs + REG_ADC_R0);
+ complete(&tsc->completion);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int imx6ul_tsc_open(struct input_dev *input_dev)
+{
+ struct imx6ul_tsc *tsc = input_get_drvdata(input_dev);
+ int err;
+
+ err = clk_prepare_enable(tsc->adc_clk);
+ if (err) {
+ dev_err(tsc->dev,
+ "Could not prepare or enable the adc clock: %d\n",
+ err);
+ return err;
+ }
+
+ err = clk_prepare_enable(tsc->tsc_clk);
+ if (err) {
+ dev_err(tsc->dev,
+ "Could not prepare or enable the tsc clock: %d\n",
+ err);
+ clk_disable_unprepare(tsc->adc_clk);
+ return err;
+ }
+
+ imx6ul_tsc_init(tsc);
+
+ return 0;
+}
+
+static void imx6ul_tsc_close(struct input_dev *input_dev)
+{
+ struct imx6ul_tsc *tsc = input_get_drvdata(input_dev);
+
+ imx6ul_tsc_disable(tsc);
+
+ clk_disable_unprepare(tsc->tsc_clk);
+ clk_disable_unprepare(tsc->adc_clk);
+}
+
+static int imx6ul_tsc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct imx6ul_tsc *tsc;
+ struct input_dev *input_dev;
+ struct resource *tsc_mem;
+ struct resource *adc_mem;
+ int err;
+ int tsc_irq;
+ int adc_irq;
+
+ tsc = devm_kzalloc(&pdev->dev, sizeof(struct imx6ul_tsc), GFP_KERNEL);
+ if (!tsc)
+ return -ENOMEM;
+
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev)
+ return -ENOMEM;
+
+ input_dev->name = "iMX6UL TouchScreen Controller";
+ input_dev->id.bustype = BUS_HOST;
+
+ input_dev->open = imx6ul_tsc_open;
+ input_dev->close = imx6ul_tsc_close;
+
+ input_set_capability(input_dev, EV_KEY, BTN_TOUCH);
+ input_set_abs_params(input_dev, ABS_X, 0, 0xFFF, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, 0xFFF, 0, 0);
+
+ input_set_drvdata(input_dev, tsc);
+
+ tsc->dev = &pdev->dev;
+ tsc->input = input_dev;
+ init_completion(&tsc->completion);
+
+ tsc->xnur_gpio = devm_gpiod_get(&pdev->dev, "xnur", GPIOD_IN);
+ if (IS_ERR(tsc->xnur_gpio)) {
+ err = PTR_ERR(tsc->xnur_gpio);
+ dev_err(&pdev->dev,
+ "failed to request GPIO tsc_X- (xnur): %d\n", err);
+ return err;
+ }
+
+ tsc_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tsc->tsc_regs = devm_ioremap_resource(&pdev->dev, tsc_mem);
+ if (IS_ERR(tsc->tsc_regs)) {
+ err = PTR_ERR(tsc->tsc_regs);
+ dev_err(&pdev->dev, "failed to remap tsc memory: %d\n", err);
+ return err;
+ }
+
+ adc_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ tsc->adc_regs = devm_ioremap_resource(&pdev->dev, adc_mem);
+ if (IS_ERR(tsc->adc_regs)) {
+ err = PTR_ERR(tsc->adc_regs);
+ dev_err(&pdev->dev, "failed to remap adc memory: %d\n", err);
+ return err;
+ }
+
+ tsc->tsc_clk = devm_clk_get(&pdev->dev, "tsc");
+ if (IS_ERR(tsc->tsc_clk)) {
+ err = PTR_ERR(tsc->tsc_clk);
+ dev_err(&pdev->dev, "failed getting tsc clock: %d\n", err);
+ return err;
+ }
+
+ tsc->adc_clk = devm_clk_get(&pdev->dev, "adc");
+ if (IS_ERR(tsc->adc_clk)) {
+ err = PTR_ERR(tsc->adc_clk);
+ dev_err(&pdev->dev, "failed getting adc clock: %d\n", err);
+ return err;
+ }
+
+ tsc_irq = platform_get_irq(pdev, 0);
+ if (tsc_irq < 0) {
+ dev_err(&pdev->dev, "no tsc irq resource?\n");
+ return tsc_irq;
+ }
+
+ adc_irq = platform_get_irq(pdev, 1);
+ if (adc_irq <= 0) {
+ dev_err(&pdev->dev, "no adc irq resource?\n");
+ return adc_irq;
+ }
+
+ err = devm_request_threaded_irq(tsc->dev, tsc_irq,
+ NULL, tsc_irq_fn, IRQF_ONESHOT,
+ dev_name(&pdev->dev), tsc);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed requesting tsc irq %d: %d\n",
+ tsc_irq, err);
+ return err;
+ }
+
+ err = devm_request_irq(tsc->dev, adc_irq, adc_irq_fn, 0,
+ dev_name(&pdev->dev), tsc);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed requesting adc irq %d: %d\n",
+ adc_irq, err);
+ return err;
+ }
+
+ err = of_property_read_u32(np, "measure-delay-time",
+ &tsc->measure_delay_time);
+ if (err)
+ tsc->measure_delay_time = 0xffff;
+
+ err = of_property_read_u32(np, "pre-charge-time",
+ &tsc->pre_charge_time);
+ if (err)
+ tsc->pre_charge_time = 0xfff;
+
+ err = input_register_device(tsc->input);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to register input device: %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, tsc);
+ return 0;
+}
+
+static int __maybe_unused imx6ul_tsc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct imx6ul_tsc *tsc = platform_get_drvdata(pdev);
+ struct input_dev *input_dev = tsc->input;
+
+ mutex_lock(&input_dev->mutex);
+
+ if (input_dev->users) {
+ imx6ul_tsc_disable(tsc);
+
+ clk_disable_unprepare(tsc->tsc_clk);
+ clk_disable_unprepare(tsc->adc_clk);
+ }
+
+ mutex_unlock(&input_dev->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused imx6ul_tsc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct imx6ul_tsc *tsc = platform_get_drvdata(pdev);
+ struct input_dev *input_dev = tsc->input;
+ int retval = 0;
+
+ mutex_lock(&input_dev->mutex);
+
+ if (input_dev->users) {
+ retval = clk_prepare_enable(tsc->adc_clk);
+ if (retval)
+ goto out;
+
+ retval = clk_prepare_enable(tsc->tsc_clk);
+ if (retval) {
+ clk_disable_unprepare(tsc->adc_clk);
+ goto out;
+ }
+
+ imx6ul_tsc_init(tsc);
+ }
+
+out:
+ mutex_unlock(&input_dev->mutex);
+ return retval;
+}
+
+static SIMPLE_DEV_PM_OPS(imx6ul_tsc_pm_ops,
+ imx6ul_tsc_suspend, imx6ul_tsc_resume);
+
+static const struct of_device_id imx6ul_tsc_match[] = {
+ { .compatible = "fsl,imx6ul-tsc", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx6ul_tsc_match);
+
+static struct platform_driver imx6ul_tsc_driver = {
+ .driver = {
+ .name = "imx6ul-tsc",
+ .of_match_table = imx6ul_tsc_match,
+ .pm = &imx6ul_tsc_pm_ops,
+ },
+ .probe = imx6ul_tsc_probe,
+};
+module_platform_driver(imx6ul_tsc_driver);
+
+MODULE_AUTHOR("Haibo Chen <haibo.chen@freescale.com>");
+MODULE_DESCRIPTION("Freescale i.MX6UL Touchscreen controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c
index a68ec142ee9a..82079cde849c 100644
--- a/drivers/input/touchscreen/max11801_ts.c
+++ b/drivers/input/touchscreen/max11801_ts.c
@@ -229,7 +229,6 @@ MODULE_DEVICE_TABLE(i2c, max11801_ts_id);
static struct i2c_driver max11801_ts_driver = {
.driver = {
.name = "max11801_ts",
- .owner = THIS_MODULE,
},
.id_table = max11801_ts_id,
.probe = max11801_ts_probe,
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 67c0d31613d8..7cce87650fc8 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -572,12 +572,12 @@ static const struct of_device_id mms114_dt_match[] = {
{ .compatible = "melfas,mms114" },
{ }
};
+MODULE_DEVICE_TABLE(of, mms114_dt_match);
#endif
static struct i2c_driver mms114_driver = {
.driver = {
.name = "mms114",
- .owner = THIS_MODULE,
.pm = &mms114_pm_ops,
.of_match_table = of_match_ptr(mms114_dt_match),
},
diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen/of_touchscreen.c
index 806cd0ad160f..bb6f2fe14667 100644
--- a/drivers/input/touchscreen/of_touchscreen.c
+++ b/drivers/input/touchscreen/of_touchscreen.c
@@ -9,12 +9,12 @@
*
*/
-#include <linux/of.h>
+#include <linux/property.h>
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
-static bool touchscreen_get_prop_u32(struct device_node *np,
+static bool touchscreen_get_prop_u32(struct device *dev,
const char *property,
unsigned int default_value,
unsigned int *value)
@@ -22,7 +22,7 @@ static bool touchscreen_get_prop_u32(struct device_node *np,
u32 val;
int error;
- error = of_property_read_u32(np, property, &val);
+ error = device_property_read_u32(dev, property, &val);
if (error) {
*value = default_value;
return false;
@@ -39,13 +39,9 @@ static void touchscreen_set_params(struct input_dev *dev,
struct input_absinfo *absinfo;
if (!test_bit(axis, dev->absbit)) {
- /*
- * Emit a warning only if the axis is not a multitouch
- * axis, which might not be set by the driver.
- */
- if (!input_is_mt_axis(axis))
- dev_warn(&dev->dev,
- "DT specifies parameters but the axis is not set up\n");
+ dev_warn(&dev->dev,
+ "DT specifies parameters but the axis %lu is not set up\n",
+ axis);
return;
}
@@ -55,52 +51,58 @@ static void touchscreen_set_params(struct input_dev *dev,
}
/**
- * touchscreen_parse_of_params - parse common touchscreen DT properties
- * @dev: device that should be parsed
+ * touchscreen_parse_properties - parse common touchscreen DT properties
+ * @input: input device that should be parsed
+ * @multitouch: specifies whether parsed properties should be applied to
+ * single-touch or multi-touch axes
*
* This function parses common DT properties for touchscreens and setups the
- * input device accordingly. The function keeps previously setuped default
+ * input device accordingly. The function keeps previously set up default
* values if no value is specified via DT.
*/
-void touchscreen_parse_of_params(struct input_dev *dev, bool multitouch)
+void touchscreen_parse_properties(struct input_dev *input, bool multitouch)
{
- struct device_node *np = dev->dev.parent->of_node;
+ struct device *dev = input->dev.parent;
unsigned int axis;
unsigned int maximum, fuzz;
bool data_present;
- input_alloc_absinfo(dev);
- if (!dev->absinfo)
+ input_alloc_absinfo(input);
+ if (!input->absinfo)
return;
axis = multitouch ? ABS_MT_POSITION_X : ABS_X;
- data_present = touchscreen_get_prop_u32(np, "touchscreen-size-x",
- input_abs_get_max(dev, axis),
+ data_present = touchscreen_get_prop_u32(dev, "touchscreen-size-x",
+ input_abs_get_max(input,
+ axis) + 1,
&maximum) |
- touchscreen_get_prop_u32(np, "touchscreen-fuzz-x",
- input_abs_get_fuzz(dev, axis),
+ touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x",
+ input_abs_get_fuzz(input, axis),
&fuzz);
if (data_present)
- touchscreen_set_params(dev, axis, maximum, fuzz);
+ touchscreen_set_params(input, axis, maximum - 1, fuzz);
axis = multitouch ? ABS_MT_POSITION_Y : ABS_Y;
- data_present = touchscreen_get_prop_u32(np, "touchscreen-size-y",
- input_abs_get_max(dev, axis),
+ data_present = touchscreen_get_prop_u32(dev, "touchscreen-size-y",
+ input_abs_get_max(input,
+ axis) + 1,
&maximum) |
- touchscreen_get_prop_u32(np, "touchscreen-fuzz-y",
- input_abs_get_fuzz(dev, axis),
+ touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y",
+ input_abs_get_fuzz(input, axis),
&fuzz);
if (data_present)
- touchscreen_set_params(dev, axis, maximum, fuzz);
+ touchscreen_set_params(input, axis, maximum - 1, fuzz);
axis = multitouch ? ABS_MT_PRESSURE : ABS_PRESSURE;
- data_present = touchscreen_get_prop_u32(np, "touchscreen-max-pressure",
- input_abs_get_max(dev, axis),
+ data_present = touchscreen_get_prop_u32(dev,
+ "touchscreen-max-pressure",
+ input_abs_get_max(input, axis),
&maximum) |
- touchscreen_get_prop_u32(np, "touchscreen-fuzz-pressure",
- input_abs_get_fuzz(dev, axis),
+ touchscreen_get_prop_u32(dev,
+ "touchscreen-fuzz-pressure",
+ input_abs_get_fuzz(input, axis),
&fuzz);
if (data_present)
- touchscreen_set_params(dev, axis, maximum, fuzz);
+ touchscreen_set_params(input, axis, maximum, fuzz);
}
-EXPORT_SYMBOL(touchscreen_parse_of_params);
+EXPORT_SYMBOL(touchscreen_parse_properties);
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index 8f3e243a62bf..91621725bfb5 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -24,20 +24,23 @@
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/input/mt.h>
-#include <linux/input/pixcir_ts.h>
+#include <linux/input/touchscreen.h>
#include <linux/gpio.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+/*#include <linux/of.h>*/
#include <linux/of_device.h>
+#include <linux/platform_data/pixcir_i2c_ts.h>
#define PIXCIR_MAX_SLOTS 5 /* Max fingers supported by driver */
struct pixcir_i2c_ts_data {
struct i2c_client *client;
struct input_dev *input;
- const struct pixcir_ts_platform_data *pdata;
- bool running;
+ struct gpio_desc *gpio_attb;
+ struct gpio_desc *gpio_reset;
+ const struct pixcir_i2c_chip_data *chip;
int max_fingers; /* Max fingers supported in this instance */
+ bool running;
};
struct pixcir_touch {
@@ -60,7 +63,7 @@ static void pixcir_ts_parse(struct pixcir_i2c_ts_data *tsdata,
u8 touch;
int ret, i;
int readsize;
- const struct pixcir_i2c_chip_data *chip = &tsdata->pdata->chip;
+ const struct pixcir_i2c_chip_data *chip = tsdata->chip;
memset(report, 0, sizeof(struct pixcir_report_data));
@@ -113,13 +116,13 @@ static void pixcir_ts_report(struct pixcir_i2c_ts_data *ts,
struct pixcir_touch *touch;
int n, i, slot;
struct device *dev = &ts->client->dev;
- const struct pixcir_i2c_chip_data *chip = &ts->pdata->chip;
+ const struct pixcir_i2c_chip_data *chip = ts->chip;
n = report->num_touches;
if (n > PIXCIR_MAX_SLOTS)
n = PIXCIR_MAX_SLOTS;
- if (!chip->has_hw_ids) {
+ if (!ts->chip->has_hw_ids) {
for (i = 0; i < n; i++) {
touch = &report->touches[i];
pos[i].x = touch->x;
@@ -161,7 +164,6 @@ static void pixcir_ts_report(struct pixcir_i2c_ts_data *ts,
static irqreturn_t pixcir_ts_isr(int irq, void *dev_id)
{
struct pixcir_i2c_ts_data *tsdata = dev_id;
- const struct pixcir_ts_platform_data *pdata = tsdata->pdata;
struct pixcir_report_data report;
while (tsdata->running) {
@@ -171,7 +173,7 @@ static irqreturn_t pixcir_ts_isr(int irq, void *dev_id)
/* report it */
pixcir_ts_report(tsdata, &report);
- if (gpio_get_value(pdata->gpio_attb)) {
+ if (gpiod_get_value_cansleep(tsdata->gpio_attb)) {
if (report.num_touches) {
/*
* Last report with no finger up?
@@ -189,6 +191,17 @@ static irqreturn_t pixcir_ts_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void pixcir_reset(struct pixcir_i2c_ts_data *tsdata)
+{
+ if (!IS_ERR_OR_NULL(tsdata->gpio_reset)) {
+ gpiod_set_value_cansleep(tsdata->gpio_reset, 1);
+ ndelay(100); /* datasheet section 1.2.3 says 80ns min. */
+ gpiod_set_value_cansleep(tsdata->gpio_reset, 0);
+ /* wait for controller ready. 100ms guess. */
+ msleep(100);
+ }
+}
+
static int pixcir_set_power_mode(struct pixcir_i2c_ts_data *ts,
enum pixcir_power_mode mode)
{
@@ -411,85 +424,59 @@ static SIMPLE_DEV_PM_OPS(pixcir_dev_pm_ops,
#ifdef CONFIG_OF
static const struct of_device_id pixcir_of_match[];
-static struct pixcir_ts_platform_data *pixcir_parse_dt(struct device *dev)
+static int pixcir_parse_dt(struct device *dev,
+ struct pixcir_i2c_ts_data *tsdata)
{
- struct pixcir_ts_platform_data *pdata;
- struct device_node *np = dev->of_node;
const struct of_device_id *match;
match = of_match_device(of_match_ptr(pixcir_of_match), dev);
if (!match)
- return ERR_PTR(-EINVAL);
-
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->chip = *(const struct pixcir_i2c_chip_data *)match->data;
-
- pdata->gpio_attb = of_get_named_gpio(np, "attb-gpio", 0);
- /* gpio_attb validity is checked in probe */
-
- if (of_property_read_u32(np, "touchscreen-size-x", &pdata->x_max)) {
- dev_err(dev, "Failed to get touchscreen-size-x property\n");
- return ERR_PTR(-EINVAL);
- }
- pdata->x_max -= 1;
-
- if (of_property_read_u32(np, "touchscreen-size-y", &pdata->y_max)) {
- dev_err(dev, "Failed to get touchscreen-size-y property\n");
- return ERR_PTR(-EINVAL);
- }
- pdata->y_max -= 1;
+ return -EINVAL;
- dev_dbg(dev, "%s: x %d, y %d, gpio %d\n", __func__,
- pdata->x_max + 1, pdata->y_max + 1, pdata->gpio_attb);
+ tsdata->chip = (const struct pixcir_i2c_chip_data *)match->data;
+ if (!tsdata->chip)
+ return -EINVAL;
- return pdata;
+ return 0;
}
#else
-static struct pixcir_ts_platform_data *pixcir_parse_dt(struct device *dev)
+static int pixcir_parse_dt(struct device *dev,
+ struct pixcir_i2c_ts_data *tsdata)
{
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
#endif
static int pixcir_i2c_ts_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
const struct pixcir_ts_platform_data *pdata =
dev_get_platdata(&client->dev);
struct device *dev = &client->dev;
- struct device_node *np = dev->of_node;
struct pixcir_i2c_ts_data *tsdata;
struct input_dev *input;
int error;
- if (np && !pdata) {
- pdata = pixcir_parse_dt(dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- }
+ tsdata = devm_kzalloc(dev, sizeof(*tsdata), GFP_KERNEL);
+ if (!tsdata)
+ return -ENOMEM;
- if (!pdata) {
+ if (pdata) {
+ tsdata->chip = &pdata->chip;
+ } else if (dev->of_node) {
+ error = pixcir_parse_dt(dev, tsdata);
+ if (error)
+ return error;
+ } else {
dev_err(&client->dev, "platform data not defined\n");
return -EINVAL;
}
- if (!gpio_is_valid(pdata->gpio_attb)) {
- dev_err(dev, "Invalid gpio_attb in pdata\n");
+ if (!tsdata->chip->max_fingers) {
+ dev_err(dev, "Invalid max_fingers in chip data\n");
return -EINVAL;
}
- if (!pdata->chip.max_fingers) {
- dev_err(dev, "Invalid max_fingers in pdata\n");
- return -EINVAL;
- }
-
- tsdata = devm_kzalloc(dev, sizeof(*tsdata), GFP_KERNEL);
- if (!tsdata)
- return -ENOMEM;
-
input = devm_input_allocate_device(dev);
if (!input) {
dev_err(dev, "Failed to allocate input device\n");
@@ -498,7 +485,6 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
tsdata->client = client;
tsdata->input = input;
- tsdata->pdata = pdata;
input->name = client->name;
input->id.bustype = BUS_I2C;
@@ -506,15 +492,21 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
input->close = pixcir_input_close;
input->dev.parent = &client->dev;
- __set_bit(EV_KEY, input->evbit);
- __set_bit(EV_ABS, input->evbit);
- __set_bit(BTN_TOUCH, input->keybit);
- input_set_abs_params(input, ABS_X, 0, pdata->x_max, 0, 0);
- input_set_abs_params(input, ABS_Y, 0, pdata->y_max, 0, 0);
- input_set_abs_params(input, ABS_MT_POSITION_X, 0, pdata->x_max, 0, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 0, pdata->y_max, 0, 0);
+ if (pdata) {
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, pdata->x_max, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, pdata->y_max, 0, 0);
+ } else {
+ input_set_capability(input, EV_ABS, ABS_MT_POSITION_X);
+ input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y);
+ touchscreen_parse_properties(input, true);
+ if (!input_abs_get_max(input, ABS_MT_POSITION_X) ||
+ !input_abs_get_max(input, ABS_MT_POSITION_Y)) {
+ dev_err(dev, "Touchscreen size is not specified\n");
+ return -EINVAL;
+ }
+ }
- tsdata->max_fingers = tsdata->pdata->chip.max_fingers;
+ tsdata->max_fingers = tsdata->chip->max_fingers;
if (tsdata->max_fingers > PIXCIR_MAX_SLOTS) {
tsdata->max_fingers = PIXCIR_MAX_SLOTS;
dev_info(dev, "Limiting maximum fingers to %d\n",
@@ -530,10 +522,18 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
input_set_drvdata(input, tsdata);
- error = devm_gpio_request_one(dev, pdata->gpio_attb,
- GPIOF_DIR_IN, "pixcir_i2c_attb");
- if (error) {
- dev_err(dev, "Failed to request ATTB gpio\n");
+ tsdata->gpio_attb = devm_gpiod_get(dev, "attb", GPIOD_IN);
+ if (IS_ERR(tsdata->gpio_attb)) {
+ error = PTR_ERR(tsdata->gpio_attb);
+ dev_err(dev, "Failed to request ATTB gpio: %d\n", error);
+ return error;
+ }
+
+ tsdata->gpio_reset = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(tsdata->gpio_reset)) {
+ error = PTR_ERR(tsdata->gpio_reset);
+ dev_err(dev, "Failed to request RESET gpio: %d\n", error);
return error;
}
@@ -545,6 +545,8 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
return error;
}
+ pixcir_reset(tsdata);
+
/* Always be in IDLE mode to save power, device supports auto wake */
error = pixcir_set_power_mode(tsdata, PIXCIR_POWER_IDLE);
if (error) {
@@ -602,7 +604,6 @@ MODULE_DEVICE_TABLE(of, pixcir_of_match);
static struct i2c_driver pixcir_i2c_ts_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "pixcir_ts",
.pm = &pixcir_dev_pm_ops,
.of_match_table = of_match_ptr(pixcir_of_match),
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 697e26e52d54..e943678ce54c 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -296,7 +296,6 @@ static struct i2c_driver st1232_ts_driver = {
.id_table = st1232_ts_id,
.driver = {
.name = ST1232_TS_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(st1232_ts_dt_ids),
.pm = &st1232_ts_pm_ops,
},
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
index c0116994067d..485794376ee5 100644
--- a/drivers/input/touchscreen/sun4i-ts.c
+++ b/drivers/input/touchscreen/sun4i-ts.c
@@ -191,7 +191,7 @@ static void sun4i_ts_close(struct input_dev *dev)
writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
}
-static int sun4i_get_temp(const struct sun4i_ts_data *ts, long *temp)
+static int sun4i_get_temp(const struct sun4i_ts_data *ts, int *temp)
{
/* No temp_data until the first irq */
if (ts->temp_data == -1)
@@ -202,7 +202,7 @@ static int sun4i_get_temp(const struct sun4i_ts_data *ts, long *temp)
return 0;
}
-static int sun4i_get_tz_temp(void *data, long *temp)
+static int sun4i_get_tz_temp(void *data, int *temp)
{
return sun4i_get_temp(data, temp);
}
@@ -215,14 +215,14 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sun4i_ts_data *ts = dev_get_drvdata(dev);
- long temp;
+ int temp;
int error;
error = sun4i_get_temp(ts, &temp);
if (error)
return error;
- return sprintf(buf, "%ld\n", temp);
+ return sprintf(buf, "%d\n", temp);
}
static ssize_t show_temp_label(struct device *dev,
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 8be7b9b79f20..3f117637e832 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -581,6 +581,7 @@ static int sur40_probe(struct usb_interface *interface,
sur40->alloc_ctx = vb2_dma_sg_init_ctx(sur40->dev);
if (IS_ERR(sur40->alloc_ctx)) {
dev_err(sur40->dev, "Can't allocate buffer context");
+ error = PTR_ERR(sur40->alloc_ctx);
goto err_unreg_v4l2;
}
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index d8c025b0f88c..0f65d02eeb26 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -30,10 +30,11 @@
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/tsc2005.h>
#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
/*
* The touchscreen interface operates as follows:
@@ -61,16 +62,24 @@
#define TSC2005_CMD_12BIT 0x04
/* control byte 0 */
-#define TSC2005_REG_READ 0x0001
-#define TSC2005_REG_PND0 0x0002
-#define TSC2005_REG_X 0x0000
-#define TSC2005_REG_Y 0x0008
-#define TSC2005_REG_Z1 0x0010
-#define TSC2005_REG_Z2 0x0018
-#define TSC2005_REG_TEMP_HIGH 0x0050
-#define TSC2005_REG_CFR0 0x0060
-#define TSC2005_REG_CFR1 0x0068
-#define TSC2005_REG_CFR2 0x0070
+#define TSC2005_REG_READ 0x01 /* R/W access */
+#define TSC2005_REG_PND0 0x02 /* Power Not Down Control */
+#define TSC2005_REG_X (0x0 << 3)
+#define TSC2005_REG_Y (0x1 << 3)
+#define TSC2005_REG_Z1 (0x2 << 3)
+#define TSC2005_REG_Z2 (0x3 << 3)
+#define TSC2005_REG_AUX (0x4 << 3)
+#define TSC2005_REG_TEMP1 (0x5 << 3)
+#define TSC2005_REG_TEMP2 (0x6 << 3)
+#define TSC2005_REG_STATUS (0x7 << 3)
+#define TSC2005_REG_AUX_HIGH (0x8 << 3)
+#define TSC2005_REG_AUX_LOW (0x9 << 3)
+#define TSC2005_REG_TEMP_HIGH (0xA << 3)
+#define TSC2005_REG_TEMP_LOW (0xB << 3)
+#define TSC2005_REG_CFR0 (0xC << 3)
+#define TSC2005_REG_CFR1 (0xD << 3)
+#define TSC2005_REG_CFR2 (0xE << 3)
+#define TSC2005_REG_CONV_FUNC (0xF << 3)
/* configuration register 0 */
#define TSC2005_CFR0_PRECHARGE_276US 0x0040
@@ -112,20 +121,37 @@
#define TSC2005_SPI_MAX_SPEED_HZ 10000000
#define TSC2005_PENUP_TIME_MS 40
-struct tsc2005_spi_rd {
- struct spi_transfer spi_xfer;
- u32 spi_tx;
- u32 spi_rx;
+static const struct regmap_range tsc2005_writable_ranges[] = {
+ regmap_reg_range(TSC2005_REG_AUX_HIGH, TSC2005_REG_CFR2),
};
+static const struct regmap_access_table tsc2005_writable_table = {
+ .yes_ranges = tsc2005_writable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(tsc2005_writable_ranges),
+};
+
+static struct regmap_config tsc2005_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .reg_stride = 0x08,
+ .max_register = 0x78,
+ .read_flag_mask = TSC2005_REG_READ,
+ .write_flag_mask = TSC2005_REG_PND0,
+ .wr_table = &tsc2005_writable_table,
+ .use_single_rw = true,
+};
+
+struct tsc2005_data {
+ u16 x;
+ u16 y;
+ u16 z1;
+ u16 z2;
+} __packed;
+#define TSC2005_DATA_REGS 4
+
struct tsc2005 {
struct spi_device *spi;
-
- struct spi_message spi_read_msg;
- struct tsc2005_spi_rd spi_x;
- struct tsc2005_spi_rd spi_y;
- struct tsc2005_spi_rd spi_z1;
- struct tsc2005_spi_rd spi_z2;
+ struct regmap *regmap;
struct input_dev *idev;
char phys[32];
@@ -154,7 +180,7 @@ struct tsc2005 {
struct regulator *vio;
- int reset_gpio;
+ struct gpio_desc *reset_gpio;
void (*set_reset)(bool enable);
};
@@ -182,62 +208,6 @@ static int tsc2005_cmd(struct tsc2005 *ts, u8 cmd)
return 0;
}
-static int tsc2005_write(struct tsc2005 *ts, u8 reg, u16 value)
-{
- u32 tx = ((reg | TSC2005_REG_PND0) << 16) | value;
- struct spi_transfer xfer = {
- .tx_buf = &tx,
- .len = 4,
- .bits_per_word = 24,
- };
- struct spi_message msg;
- int error;
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
-
- error = spi_sync(ts->spi, &msg);
- if (error) {
- dev_err(&ts->spi->dev,
- "%s: failed, register: %x, value: %x, error: %d\n",
- __func__, reg, value, error);
- return error;
- }
-
- return 0;
-}
-
-static void tsc2005_setup_read(struct tsc2005_spi_rd *rd, u8 reg, bool last)
-{
- memset(rd, 0, sizeof(*rd));
-
- rd->spi_tx = (reg | TSC2005_REG_READ) << 16;
- rd->spi_xfer.tx_buf = &rd->spi_tx;
- rd->spi_xfer.rx_buf = &rd->spi_rx;
- rd->spi_xfer.len = 4;
- rd->spi_xfer.bits_per_word = 24;
- rd->spi_xfer.cs_change = !last;
-}
-
-static int tsc2005_read(struct tsc2005 *ts, u8 reg, u16 *value)
-{
- struct tsc2005_spi_rd spi_rd;
- struct spi_message msg;
- int error;
-
- tsc2005_setup_read(&spi_rd, reg, true);
-
- spi_message_init(&msg);
- spi_message_add_tail(&spi_rd.spi_xfer, &msg);
-
- error = spi_sync(ts->spi, &msg);
- if (error)
- return error;
-
- *value = spi_rd.spi_rx;
- return 0;
-}
-
static void tsc2005_update_pen_state(struct tsc2005 *ts,
int x, int y, int pressure)
{
@@ -266,26 +236,23 @@ static irqreturn_t tsc2005_irq_thread(int irq, void *_ts)
struct tsc2005 *ts = _ts;
unsigned long flags;
unsigned int pressure;
- u32 x, y;
- u32 z1, z2;
+ struct tsc2005_data tsdata;
int error;
/* read the coordinates */
- error = spi_sync(ts->spi, &ts->spi_read_msg);
+ error = regmap_bulk_read(ts->regmap, TSC2005_REG_X, &tsdata,
+ TSC2005_DATA_REGS);
if (unlikely(error))
goto out;
- x = ts->spi_x.spi_rx;
- y = ts->spi_y.spi_rx;
- z1 = ts->spi_z1.spi_rx;
- z2 = ts->spi_z2.spi_rx;
-
/* validate position */
- if (unlikely(x > MAX_12BIT || y > MAX_12BIT))
+ if (unlikely(tsdata.x > MAX_12BIT || tsdata.y > MAX_12BIT))
goto out;
/* Skip reading if the pressure components are out of range */
- if (unlikely(z1 == 0 || z2 > MAX_12BIT || z1 >= z2))
+ if (unlikely(tsdata.z1 == 0 || tsdata.z2 > MAX_12BIT))
+ goto out;
+ if (unlikely(tsdata.z1 >= tsdata.z2))
goto out;
/*
@@ -293,8 +260,8 @@ static irqreturn_t tsc2005_irq_thread(int irq, void *_ts)
* the value before pen-up - that implies SPI fed us stale data
*/
if (!ts->pen_down &&
- ts->in_x == x && ts->in_y == y &&
- ts->in_z1 == z1 && ts->in_z2 == z2) {
+ ts->in_x == tsdata.x && ts->in_y == tsdata.y &&
+ ts->in_z1 == tsdata.z1 && ts->in_z2 == tsdata.z2) {
goto out;
}
@@ -302,20 +269,20 @@ static irqreturn_t tsc2005_irq_thread(int irq, void *_ts)
* At this point we are happy we have a valid and useful reading.
* Remember it for later comparisons. We may now begin downsampling.
*/
- ts->in_x = x;
- ts->in_y = y;
- ts->in_z1 = z1;
- ts->in_z2 = z2;
+ ts->in_x = tsdata.x;
+ ts->in_y = tsdata.y;
+ ts->in_z1 = tsdata.z1;
+ ts->in_z2 = tsdata.z2;
/* Compute touch pressure resistance using equation #1 */
- pressure = x * (z2 - z1) / z1;
+ pressure = tsdata.x * (tsdata.z2 - tsdata.z1) / tsdata.z1;
pressure = pressure * ts->x_plate_ohm / 4096;
if (unlikely(pressure > MAX_12BIT))
goto out;
spin_lock_irqsave(&ts->lock, flags);
- tsc2005_update_pen_state(ts, x, y, pressure);
+ tsc2005_update_pen_state(ts, tsdata.x, tsdata.y, pressure);
mod_timer(&ts->penup_timer,
jiffies + msecs_to_jiffies(TSC2005_PENUP_TIME_MS));
@@ -338,9 +305,9 @@ static void tsc2005_penup_timer(unsigned long data)
static void tsc2005_start_scan(struct tsc2005 *ts)
{
- tsc2005_write(ts, TSC2005_REG_CFR0, TSC2005_CFR0_INITVALUE);
- tsc2005_write(ts, TSC2005_REG_CFR1, TSC2005_CFR1_INITVALUE);
- tsc2005_write(ts, TSC2005_REG_CFR2, TSC2005_CFR2_INITVALUE);
+ regmap_write(ts->regmap, TSC2005_REG_CFR0, TSC2005_CFR0_INITVALUE);
+ regmap_write(ts->regmap, TSC2005_REG_CFR1, TSC2005_CFR1_INITVALUE);
+ regmap_write(ts->regmap, TSC2005_REG_CFR2, TSC2005_CFR2_INITVALUE);
tsc2005_cmd(ts, TSC2005_CMD_NORMAL);
}
@@ -351,8 +318,8 @@ static void tsc2005_stop_scan(struct tsc2005 *ts)
static void tsc2005_set_reset(struct tsc2005 *ts, bool enable)
{
- if (ts->reset_gpio >= 0)
- gpio_set_value(ts->reset_gpio, enable);
+ if (ts->reset_gpio)
+ gpiod_set_value_cansleep(ts->reset_gpio, enable);
else if (ts->set_reset)
ts->set_reset(enable);
}
@@ -388,11 +355,10 @@ static ssize_t tsc2005_selftest_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct spi_device *spi = to_spi_device(dev);
- struct tsc2005 *ts = spi_get_drvdata(spi);
- u16 temp_high;
- u16 temp_high_orig;
- u16 temp_high_test;
+ struct tsc2005 *ts = dev_get_drvdata(dev);
+ unsigned int temp_high;
+ unsigned int temp_high_orig;
+ unsigned int temp_high_test;
bool success = true;
int error;
@@ -403,7 +369,7 @@ static ssize_t tsc2005_selftest_show(struct device *dev,
*/
__tsc2005_disable(ts);
- error = tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high_orig);
+ error = regmap_read(ts->regmap, TSC2005_REG_TEMP_HIGH, &temp_high_orig);
if (error) {
dev_warn(dev, "selftest failed: read error %d\n", error);
success = false;
@@ -412,14 +378,14 @@ static ssize_t tsc2005_selftest_show(struct device *dev,
temp_high_test = (temp_high_orig - 1) & MAX_12BIT;
- error = tsc2005_write(ts, TSC2005_REG_TEMP_HIGH, temp_high_test);
+ error = regmap_write(ts->regmap, TSC2005_REG_TEMP_HIGH, temp_high_test);
if (error) {
dev_warn(dev, "selftest failed: write error %d\n", error);
success = false;
goto out;
}
- error = tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high);
+ error = regmap_read(ts->regmap, TSC2005_REG_TEMP_HIGH, &temp_high);
if (error) {
dev_warn(dev, "selftest failed: read error %d after write\n",
error);
@@ -442,7 +408,7 @@ static ssize_t tsc2005_selftest_show(struct device *dev,
goto out;
/* test that the reset really happened */
- error = tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high);
+ error = regmap_read(ts->regmap, TSC2005_REG_TEMP_HIGH, &temp_high);
if (error) {
dev_warn(dev, "selftest failed: read error %d after reset\n",
error);
@@ -474,8 +440,7 @@ static umode_t tsc2005_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct spi_device *spi = to_spi_device(dev);
- struct tsc2005 *ts = spi_get_drvdata(spi);
+ struct tsc2005 *ts = dev_get_drvdata(dev);
umode_t mode = attr->mode;
if (attr == &dev_attr_selftest.attr) {
@@ -495,7 +460,7 @@ static void tsc2005_esd_work(struct work_struct *work)
{
struct tsc2005 *ts = container_of(work, struct tsc2005, esd_work.work);
int error;
- u16 r;
+ unsigned int r;
if (!mutex_trylock(&ts->mutex)) {
/*
@@ -511,7 +476,7 @@ static void tsc2005_esd_work(struct work_struct *work)
goto out;
/* We should be able to read register without disabling interrupts. */
- error = tsc2005_read(ts, TSC2005_REG_CFR0, &r);
+ error = regmap_read(ts->regmap, TSC2005_REG_CFR0, &r);
if (!error &&
!((r ^ TSC2005_CFR0_INITVALUE) & TSC2005_CFR0_RW_MASK)) {
goto out;
@@ -575,20 +540,6 @@ static void tsc2005_close(struct input_dev *input)
mutex_unlock(&ts->mutex);
}
-static void tsc2005_setup_spi_xfer(struct tsc2005 *ts)
-{
- tsc2005_setup_read(&ts->spi_x, TSC2005_REG_X, false);
- tsc2005_setup_read(&ts->spi_y, TSC2005_REG_Y, false);
- tsc2005_setup_read(&ts->spi_z1, TSC2005_REG_Z1, false);
- tsc2005_setup_read(&ts->spi_z2, TSC2005_REG_Z2, true);
-
- spi_message_init(&ts->spi_read_msg);
- spi_message_add_tail(&ts->spi_x.spi_xfer, &ts->spi_read_msg);
- spi_message_add_tail(&ts->spi_y.spi_xfer, &ts->spi_read_msg);
- spi_message_add_tail(&ts->spi_z1.spi_xfer, &ts->spi_read_msg);
- spi_message_add_tail(&ts->spi_z2.spi_xfer, &ts->spi_read_msg);
-}
-
static int tsc2005_probe(struct spi_device *spi)
{
const struct tsc2005_platform_data *pdata = dev_get_platdata(&spi->dev);
@@ -653,37 +604,30 @@ static int tsc2005_probe(struct spi_device *spi)
ts->spi = spi;
ts->idev = input_dev;
+ ts->regmap = devm_regmap_init_spi(spi, &tsc2005_regmap_config);
+ if (IS_ERR(ts->regmap))
+ return PTR_ERR(ts->regmap);
+
ts->x_plate_ohm = x_plate_ohm;
ts->esd_timeout = esd_timeout;
- if (np) {
- ts->reset_gpio = of_get_named_gpio(np, "reset-gpios", 0);
- if (ts->reset_gpio == -EPROBE_DEFER)
- return ts->reset_gpio;
- if (ts->reset_gpio < 0) {
- dev_err(&spi->dev, "error acquiring reset gpio: %d\n",
- ts->reset_gpio);
- return ts->reset_gpio;
- }
+ ts->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ts->reset_gpio)) {
+ error = PTR_ERR(ts->reset_gpio);
+ dev_err(&spi->dev, "error acquiring reset gpio: %d\n", error);
+ return error;
+ }
- error = devm_gpio_request_one(&spi->dev, ts->reset_gpio, 0,
- "reset-gpios");
- if (error) {
- dev_err(&spi->dev, "error requesting reset gpio: %d\n",
- error);
- return error;
- }
+ ts->vio = devm_regulator_get_optional(&spi->dev, "vio");
+ if (IS_ERR(ts->vio)) {
+ error = PTR_ERR(ts->vio);
+ dev_err(&spi->dev, "vio regulator missing (%d)", error);
+ return error;
+ }
- ts->vio = devm_regulator_get(&spi->dev, "vio");
- if (IS_ERR(ts->vio)) {
- error = PTR_ERR(ts->vio);
- dev_err(&spi->dev, "vio regulator missing (%d)", error);
- return error;
- }
- } else {
- ts->reset_gpio = -1;
+ if (!ts->reset_gpio && pdata)
ts->set_reset = pdata->set_reset;
- }
mutex_init(&ts->mutex);
@@ -692,8 +636,6 @@ static int tsc2005_probe(struct spi_device *spi)
INIT_DELAYED_WORK(&ts->esd_work, tsc2005_esd_work);
- tsc2005_setup_spi_xfer(ts);
-
snprintf(ts->phys, sizeof(ts->phys),
"%s/input-ts", dev_name(&spi->dev));
@@ -709,7 +651,7 @@ static int tsc2005_probe(struct spi_device *spi)
input_set_abs_params(input_dev, ABS_PRESSURE, 0, max_p, fudge_p, 0);
if (np)
- touchscreen_parse_of_params(input_dev, false);
+ touchscreen_parse_properties(input_dev, false);
input_dev->open = tsc2005_open;
input_dev->close = tsc2005_close;
@@ -735,7 +677,7 @@ static int tsc2005_probe(struct spi_device *spi)
return error;
}
- spi_set_drvdata(spi, ts);
+ dev_set_drvdata(&spi->dev, ts);
error = sysfs_create_group(&spi->dev.kobj, &tsc2005_attr_group);
if (error) {
dev_err(&spi->dev,
@@ -763,7 +705,7 @@ disable_regulator:
static int tsc2005_remove(struct spi_device *spi)
{
- struct tsc2005 *ts = spi_get_drvdata(spi);
+ struct tsc2005 *ts = dev_get_drvdata(&spi->dev);
sysfs_remove_group(&spi->dev.kobj, &tsc2005_attr_group);
@@ -775,8 +717,7 @@ static int tsc2005_remove(struct spi_device *spi)
static int __maybe_unused tsc2005_suspend(struct device *dev)
{
- struct spi_device *spi = to_spi_device(dev);
- struct tsc2005 *ts = spi_get_drvdata(spi);
+ struct tsc2005 *ts = dev_get_drvdata(dev);
mutex_lock(&ts->mutex);
@@ -792,8 +733,7 @@ static int __maybe_unused tsc2005_suspend(struct device *dev)
static int __maybe_unused tsc2005_resume(struct device *dev)
{
- struct spi_device *spi = to_spi_device(dev);
- struct tsc2005 *ts = spi_get_drvdata(spi);
+ struct tsc2005 *ts = dev_get_drvdata(dev);
mutex_lock(&ts->mutex);
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index ccc8aa615709..5d0cd51c6f41 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -482,7 +482,6 @@ MODULE_DEVICE_TABLE(of, tsc2007_of_match);
static struct i2c_driver tsc2007_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "tsc2007",
.of_match_table = of_match_ptr(tsc2007_of_match),
},
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index f2c6c352c55a..2c41107240de 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -627,6 +627,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
goto err_out;
}
+ /* TSC-25 data sheet specifies a delay after the RESET command */
+ msleep(150);
+
/* set coordinate output rate */
buf[0] = buf[1] = 0xFF;
ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
diff --git a/drivers/input/touchscreen/wacom_i2c.c b/drivers/input/touchscreen/wacom_i2c.c
index 32f8ac003936..8d7a2852caef 100644
--- a/drivers/input/touchscreen/wacom_i2c.c
+++ b/drivers/input/touchscreen/wacom_i2c.c
@@ -271,7 +271,6 @@ MODULE_DEVICE_TABLE(i2c, wacom_i2c_id);
static struct i2c_driver wacom_i2c_driver = {
.driver = {
.name = "wacom_i2c",
- .owner = THIS_MODULE,
.pm = &wacom_i2c_pm,
},
diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c
index fb92ae1c5fae..515c20a6e10f 100644
--- a/drivers/input/touchscreen/wdt87xx_i2c.c
+++ b/drivers/input/touchscreen/wdt87xx_i2c.c
@@ -23,7 +23,7 @@
#include <asm/unaligned.h>
#define WDT87XX_NAME "wdt87xx_i2c"
-#define WDT87XX_DRV_VER "0.9.6"
+#define WDT87XX_DRV_VER "0.9.7"
#define WDT87XX_FW_NAME "wdt87xx_fw.bin"
#define WDT87XX_CFG_NAME "wdt87xx_cfg.bin"
@@ -85,6 +85,11 @@
#define CTL_PARAM_OFFSET_PHY_H 24
#define CTL_PARAM_OFFSET_FACTOR 32
+/* The definition of the device descriptor */
+#define WDT_GD_DEVICE 1
+#define DEV_DESC_OFFSET_VID 8
+#define DEV_DESC_OFFSET_PID 10
+
/* Communication commands */
#define PACKET_SIZE 56
#define VND_REQ_READ 0x06
@@ -152,6 +157,7 @@
/* Controller requires minimum 300us between commands */
#define WDT_COMMAND_DELAY_MS 2
#define WDT_FLASH_WRITE_DELAY_MS 4
+#define WDT_FW_RESET_TIME 2500
struct wdt87xx_sys_param {
u16 fw_id;
@@ -165,6 +171,8 @@ struct wdt87xx_sys_param {
u16 scaling_factor;
u32 max_x;
u32 max_y;
+ u16 vendor_id;
+ u16 product_id;
};
struct wdt87xx_data {
@@ -208,6 +216,32 @@ static int wdt87xx_i2c_xfer(struct i2c_client *client,
return 0;
}
+static int wdt87xx_get_desc(struct i2c_client *client, u8 desc_idx,
+ u8 *buf, size_t len)
+{
+ u8 tx_buf[] = { 0x22, 0x00, 0x10, 0x0E, 0x23, 0x00 };
+ int error;
+
+ tx_buf[2] |= desc_idx & 0xF;
+
+ error = wdt87xx_i2c_xfer(client, tx_buf, sizeof(tx_buf),
+ buf, len);
+ if (error) {
+ dev_err(&client->dev, "get desc failed: %d\n", error);
+ return error;
+ }
+
+ if (buf[0] != len) {
+ dev_err(&client->dev, "unexpected response to get desc: %d\n",
+ buf[0]);
+ return -EINVAL;
+ }
+
+ mdelay(WDT_COMMAND_DELAY_MS);
+
+ return 0;
+}
+
static int wdt87xx_get_string(struct i2c_client *client, u8 str_idx,
u8 *buf, size_t len)
{
@@ -373,7 +407,7 @@ static int wdt87xx_sw_reset(struct i2c_client *client)
}
/* Wait the device to be ready */
- msleep(200);
+ msleep(WDT_FW_RESET_TIME);
return 0;
}
@@ -403,6 +437,15 @@ static int wdt87xx_get_sysparam(struct i2c_client *client,
u8 buf[PKT_READ_SIZE];
int error;
+ error = wdt87xx_get_desc(client, WDT_GD_DEVICE, buf, 18);
+ if (error) {
+ dev_err(&client->dev, "failed to get device desc\n");
+ return error;
+ }
+
+ param->vendor_id = get_unaligned_le16(buf + DEV_DESC_OFFSET_VID);
+ param->product_id = get_unaligned_le16(buf + DEV_DESC_OFFSET_PID);
+
error = wdt87xx_get_string(client, STRIDX_PARAMETERS, buf, 34);
if (error) {
dev_err(&client->dev, "failed to get parameters\n");
@@ -994,6 +1037,8 @@ static int wdt87xx_ts_create_input_device(struct wdt87xx_data *wdt)
input->name = "WDT87xx Touchscreen";
input->id.bustype = BUS_I2C;
+ input->id.vendor = wdt->param.vendor_id;
+ input->id.product = wdt->param.product_id;
input->phys = wdt->phys;
input_set_abs_params(input, ABS_MT_POSITION_X, 0,
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index b1ae77995968..1534e9b0788c 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -732,8 +732,7 @@ static int wm97xx_remove(struct device *dev)
return 0;
}
-#ifdef CONFIG_PM
-static int wm97xx_suspend(struct device *dev, pm_message_t state)
+static int __maybe_unused wm97xx_suspend(struct device *dev)
{
struct wm97xx *wm = dev_get_drvdata(dev);
u16 reg;
@@ -765,7 +764,7 @@ static int wm97xx_suspend(struct device *dev, pm_message_t state)
return 0;
}
-static int wm97xx_resume(struct device *dev)
+static int __maybe_unused wm97xx_resume(struct device *dev)
{
struct wm97xx *wm = dev_get_drvdata(dev);
@@ -799,10 +798,7 @@ static int wm97xx_resume(struct device *dev)
return 0;
}
-#else
-#define wm97xx_suspend NULL
-#define wm97xx_resume NULL
-#endif
+static SIMPLE_DEV_PM_OPS(wm97xx_pm_ops, wm97xx_suspend, wm97xx_resume);
/*
* Machine specific operations
@@ -836,8 +832,7 @@ static struct device_driver wm97xx_driver = {
.owner = THIS_MODULE,
.probe = wm97xx_probe,
.remove = wm97xx_remove,
- .suspend = wm97xx_suspend,
- .resume = wm97xx_resume,
+ .pm = &wm97xx_pm_ops,
};
static int __init wm97xx_init(void)
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index f58a196521a9..781d0f83050a 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -24,14 +24,13 @@
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/device.h>
#include <linux/sysfs.h>
#include <linux/input/mt.h>
#include <linux/platform_data/zforce_ts.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#define WAIT_TIMEOUT msecs_to_jiffies(1000)
@@ -120,6 +119,9 @@ struct zforce_ts {
struct regulator *reg_vdd;
+ struct gpio_desc *gpio_int;
+ struct gpio_desc *gpio_rst;
+
bool suspending;
bool suspended;
bool boot_complete;
@@ -161,6 +163,16 @@ static int zforce_command(struct zforce_ts *ts, u8 cmd)
return 0;
}
+static void zforce_reset_assert(struct zforce_ts *ts)
+{
+ gpiod_set_value_cansleep(ts->gpio_rst, 1);
+}
+
+static void zforce_reset_deassert(struct zforce_ts *ts)
+{
+ gpiod_set_value_cansleep(ts->gpio_rst, 0);
+}
+
static int zforce_send_wait(struct zforce_ts *ts, const char *buf, int len)
{
struct i2c_client *client = ts->client;
@@ -479,7 +491,6 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
{
struct zforce_ts *ts = dev_id;
struct i2c_client *client = ts->client;
- const struct zforce_ts_platdata *pdata = ts->pdata;
int ret;
u8 payload_buffer[FRAME_MAXSIZE];
u8 *payload;
@@ -499,7 +510,16 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
if (!ts->suspending && device_may_wakeup(&client->dev))
pm_stay_awake(&client->dev);
- while (!gpio_get_value(pdata->gpio_int)) {
+ /*
+ * Run at least once and exit the loop if
+ * - the optional interrupt GPIO isn't specified
+ * (there is only one packet read per ISR invocation, then)
+ * or
+ * - the GPIO isn't active any more
+ * (packet read until the level GPIO indicates that there is
+ * no IRQ any more)
+ */
+ do {
ret = zforce_read_packet(ts, payload_buffer);
if (ret < 0) {
dev_err(&client->dev,
@@ -566,7 +586,7 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
payload[RESPONSE_ID]);
break;
}
- }
+ } while (gpiod_get_value_cansleep(ts->gpio_int));
if (!ts->suspending && device_may_wakeup(&client->dev))
pm_relax(&client->dev);
@@ -690,7 +710,7 @@ static void zforce_reset(void *data)
{
struct zforce_ts *ts = data;
- gpio_set_value(ts->pdata->gpio_rst, 0);
+ zforce_reset_assert(ts);
udelay(10);
@@ -712,18 +732,6 @@ static struct zforce_ts_platdata *zforce_parse_dt(struct device *dev)
return ERR_PTR(-ENOMEM);
}
- pdata->gpio_int = of_get_gpio(np, 0);
- if (!gpio_is_valid(pdata->gpio_int)) {
- dev_err(dev, "failed to get interrupt gpio\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata->gpio_rst = of_get_gpio(np, 1);
- if (!gpio_is_valid(pdata->gpio_rst)) {
- dev_err(dev, "failed to get reset gpio\n");
- return ERR_PTR(-EINVAL);
- }
-
if (of_property_read_u32(np, "x-size", &pdata->x_max)) {
dev_err(dev, "failed to get x-size property\n");
return ERR_PTR(-EINVAL);
@@ -755,20 +763,49 @@ static int zforce_probe(struct i2c_client *client,
if (!ts)
return -ENOMEM;
- ret = devm_gpio_request_one(&client->dev, pdata->gpio_int, GPIOF_IN,
- "zforce_ts_int");
- if (ret) {
- dev_err(&client->dev, "request of gpio %d failed, %d\n",
- pdata->gpio_int, ret);
+ ts->gpio_rst = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ts->gpio_rst)) {
+ ret = PTR_ERR(ts->gpio_rst);
+ dev_err(&client->dev,
+ "failed to request reset GPIO: %d\n", ret);
return ret;
}
- ret = devm_gpio_request_one(&client->dev, pdata->gpio_rst,
- GPIOF_OUT_INIT_LOW, "zforce_ts_rst");
- if (ret) {
- dev_err(&client->dev, "request of gpio %d failed, %d\n",
- pdata->gpio_rst, ret);
- return ret;
+ if (ts->gpio_rst) {
+ ts->gpio_int = devm_gpiod_get_optional(&client->dev, "irq",
+ GPIOD_IN);
+ if (IS_ERR(ts->gpio_int)) {
+ ret = PTR_ERR(ts->gpio_int);
+ dev_err(&client->dev,
+ "failed to request interrupt GPIO: %d\n", ret);
+ return ret;
+ }
+ } else {
+ /*
+ * Deprecated GPIO handling for compatibility
+ * with legacy binding.
+ */
+
+ /* INT GPIO */
+ ts->gpio_int = devm_gpiod_get_index(&client->dev, NULL, 0,
+ GPIOD_IN);
+ if (IS_ERR(ts->gpio_int)) {
+ ret = PTR_ERR(ts->gpio_int);
+ dev_err(&client->dev,
+ "failed to request interrupt GPIO: %d\n", ret);
+ return ret;
+ }
+
+ /* RST GPIO */
+ ts->gpio_rst = devm_gpiod_get_index(&client->dev, NULL, 1,
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ts->gpio_rst)) {
+ ret = PTR_ERR(ts->gpio_rst);
+ dev_err(&client->dev,
+ "failed to request reset GPIO: %d\n", ret);
+ return ret;
+ }
}
ts->reg_vdd = devm_regulator_get_optional(&client->dev, "vdd");
@@ -863,7 +900,7 @@ static int zforce_probe(struct i2c_client *client,
i2c_set_clientdata(client, ts);
/* let the controller boot */
- gpio_set_value(pdata->gpio_rst, 1);
+ zforce_reset_deassert(ts);
ts->command_waiting = NOTIFICATION_BOOTCOMPLETE;
if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0)
@@ -917,7 +954,6 @@ MODULE_DEVICE_TABLE(of, zforce_dt_idtable);
static struct i2c_driver zforce_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "zforce-ts",
.pm = &zforce_pm_ops,
.of_match_table = of_match_ptr(zforce_dt_idtable),
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index f1fb1d3ccc56..4664c2a96c67 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -23,7 +23,8 @@ config IOMMU_IO_PGTABLE
config IOMMU_IO_PGTABLE_LPAE
bool "ARMv7/v8 Long Descriptor Format"
select IOMMU_IO_PGTABLE
- depends on ARM || ARM64 || COMPILE_TEST
+ # SWIOTLB guarantees a dma_to_phys() implementation
+ depends on ARM || ARM64 || (COMPILE_TEST && SWIOTLB)
help
Enable support for the ARM long descriptor pagetable format.
This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
@@ -222,7 +223,7 @@ config TEGRA_IOMMU_SMMU
select IOMMU_API
help
This driver supports the IOMMU hardware (SMMU) found on NVIDIA Tegra
- SoCs (Tegra30 up to Tegra132).
+ SoCs (Tegra30 up to Tegra210).
config EXYNOS_IOMMU
bool "Exynos IOMMU Support"
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a57e9b749895..f82060e778a2 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -76,8 +76,6 @@ LIST_HEAD(hpet_map);
* Domain for untranslated devices - only allocated
* if iommu=pt passed on kernel cmd line.
*/
-static struct protection_domain *pt_domain;
-
static const struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
@@ -96,7 +94,7 @@ struct iommu_dev_data {
struct protection_domain *domain; /* Domain the device is bound to */
u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
- bool passthrough; /* Default for device is pt_domain */
+ bool passthrough; /* Device is identity mapped */
struct {
bool enabled;
int qdep;
@@ -116,7 +114,6 @@ struct iommu_cmd {
struct kmem_cache *amd_iommu_irq_cache;
static void update_domain(struct protection_domain *domain);
-static int alloc_passthrough_domain(void);
static int protection_domain_init(struct protection_domain *domain);
/****************************************************************************
@@ -1838,8 +1835,8 @@ static void free_gcr3_table(struct protection_domain *domain)
free_gcr3_tbl_level2(domain->gcr3_tbl);
else if (domain->glx == 1)
free_gcr3_tbl_level1(domain->gcr3_tbl);
- else if (domain->glx != 0)
- BUG();
+ else
+ BUG_ON(domain->glx != 0);
free_page((unsigned long)domain->gcr3_tbl);
}
@@ -2167,15 +2164,17 @@ static int attach_device(struct device *dev,
dev_data = get_dev_data(dev);
if (domain->flags & PD_IOMMUV2_MASK) {
- if (!dev_data->iommu_v2 || !dev_data->passthrough)
+ if (!dev_data->passthrough)
return -EINVAL;
- if (pdev_iommuv2_enable(pdev) != 0)
- return -EINVAL;
+ if (dev_data->iommu_v2) {
+ if (pdev_iommuv2_enable(pdev) != 0)
+ return -EINVAL;
- dev_data->ats.enabled = true;
- dev_data->ats.qdep = pci_ats_queue_depth(pdev);
- dev_data->pri_tlp = pci_pri_tlp_required(pdev);
+ dev_data->ats.enabled = true;
+ dev_data->ats.qdep = pci_ats_queue_depth(pdev);
+ dev_data->pri_tlp = pci_pri_tlp_required(pdev);
+ }
} else if (amd_iommu_iotlb_sup &&
pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
dev_data->ats.enabled = true;
@@ -2221,15 +2220,6 @@ static void __detach_device(struct iommu_dev_data *dev_data)
do_detach(head);
spin_unlock_irqrestore(&domain->lock, flags);
-
- /*
- * If we run in passthrough mode the device must be assigned to the
- * passthrough domain if it is detached from any other domain.
- * Make sure we can deassign from the pt_domain itself.
- */
- if (dev_data->passthrough &&
- (dev_data->domain == NULL && domain != pt_domain))
- __attach_device(dev_data, pt_domain);
}
/*
@@ -2249,7 +2239,7 @@ static void detach_device(struct device *dev)
__detach_device(dev_data);
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
- if (domain->flags & PD_IOMMUV2_MASK)
+ if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
pdev_iommuv2_disable(to_pci_dev(dev));
else if (dev_data->ats.enabled)
pci_disable_ats(to_pci_dev(dev));
@@ -2287,17 +2277,15 @@ static int amd_iommu_add_device(struct device *dev)
BUG_ON(!dev_data);
- if (dev_data->iommu_v2)
+ if (iommu_pass_through || dev_data->iommu_v2)
iommu_request_dm_for_dev(dev);
/* Domains are initialized for this device - have a look what we ended up with */
domain = iommu_get_domain_for_dev(dev);
- if (domain->type == IOMMU_DOMAIN_IDENTITY) {
+ if (domain->type == IOMMU_DOMAIN_IDENTITY)
dev_data->passthrough = true;
- dev->archdata.dma_ops = &nommu_dma_ops;
- } else {
+ else
dev->archdata.dma_ops = &amd_iommu_dma_ops;
- }
out:
iommu_completion_wait(iommu);
@@ -2862,8 +2850,17 @@ int __init amd_iommu_init_api(void)
int __init amd_iommu_init_dma_ops(void)
{
+ swiotlb = iommu_pass_through ? 1 : 0;
iommu_detected = 1;
- swiotlb = 0;
+
+ /*
+ * In case we don't initialize SWIOTLB (actually the common case
+ * when AMD IOMMU is enabled), make sure there are global
+ * dma_ops set as a fall-back for devices not handled by this
+ * driver (for example non-PCI devices).
+ */
+ if (!swiotlb)
+ dma_ops = &nommu_dma_ops;
amd_iommu_stats_init();
@@ -2947,21 +2944,6 @@ out_err:
return NULL;
}
-static int alloc_passthrough_domain(void)
-{
- if (pt_domain != NULL)
- return 0;
-
- /* allocate passthrough domain */
- pt_domain = protection_domain_alloc();
- if (!pt_domain)
- return -ENOMEM;
-
- pt_domain->mode = PAGE_MODE_NONE;
-
- return 0;
-}
-
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
struct protection_domain *pdomain;
@@ -3222,33 +3204,6 @@ static const struct iommu_ops amd_iommu_ops = {
*
*****************************************************************************/
-int __init amd_iommu_init_passthrough(void)
-{
- struct iommu_dev_data *dev_data;
- struct pci_dev *dev = NULL;
- int ret;
-
- ret = alloc_passthrough_domain();
- if (ret)
- return ret;
-
- for_each_pci_dev(dev) {
- if (!check_device(&dev->dev))
- continue;
-
- dev_data = get_dev_data(&dev->dev);
- dev_data->passthrough = true;
-
- attach_device(&dev->dev, pt_domain);
- }
-
- amd_iommu_stats_init();
-
- pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
-
- return 0;
-}
-
/* IOMMUv2 specific functions */
int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
{
@@ -3363,7 +3318,12 @@ static int __flush_pasid(struct protection_domain *domain, int pasid,
struct amd_iommu *iommu;
int qdep;
- BUG_ON(!dev_data->ats.enabled);
+ /*
+ There might be non-IOMMUv2 capable devices in an IOMMUv2
+ * domain.
+ */
+ if (!dev_data->ats.enabled)
+ continue;
qdep = dev_data->ats.qdep;
iommu = amd_iommu_rlookup_table[dev_data->devid];
@@ -3987,11 +3947,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
if (ret < 0)
return ret;
- ret = -ENOMEM;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- goto out_free_parent;
-
if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
if (get_irq_table(devid, true))
index = info->ioapic_pin;
@@ -4002,7 +3957,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
}
if (index < 0) {
pr_warn("Failed to allocate IRTE\n");
- kfree(data);
goto out_free_parent;
}
@@ -4014,17 +3968,18 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
goto out_free_data;
}
- if (i > 0) {
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- goto out_free_data;
- }
+ ret = -ENOMEM;
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto out_free_data;
+
irq_data->hwirq = (devid << 16) + i;
irq_data->chip_data = data;
irq_data->chip = &amd_ir_chip;
irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
}
+
return 0;
out_free_data:
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index dbda9ae68c5d..5ef347a13cb5 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -154,7 +154,7 @@ bool amd_iommu_iotlb_sup __read_mostly = true;
u32 amd_iommu_max_pasid __read_mostly = ~0;
bool amd_iommu_v2_present __read_mostly;
-bool amd_iommu_pc_present __read_mostly;
+static bool amd_iommu_pc_present __read_mostly;
bool amd_iommu_force_isolation __read_mostly;
@@ -2026,14 +2026,6 @@ static bool detect_ivrs(void)
return true;
}
-static int amd_iommu_init_dma(void)
-{
- if (iommu_pass_through)
- return amd_iommu_init_passthrough();
- else
- return amd_iommu_init_dma_ops();
-}
-
/****************************************************************************
*
* AMD IOMMU Initialization State Machine
@@ -2073,7 +2065,7 @@ static int __init state_next(void)
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
break;
case IOMMU_INTERRUPTS_EN:
- ret = amd_iommu_init_dma();
+ ret = amd_iommu_init_dma_ops();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
break;
case IOMMU_DMA_OPS:
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 3465faf1809e..1131664b918b 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -132,11 +132,19 @@ static struct device_state *get_device_state(u16 devid)
static void free_device_state(struct device_state *dev_state)
{
+ struct iommu_group *group;
+
/*
* First detach device from domain - No more PRI requests will arrive
* from that device after it is unbound from the IOMMUv2 domain.
*/
- iommu_detach_device(dev_state->domain, &dev_state->pdev->dev);
+ group = iommu_group_get(&dev_state->pdev->dev);
+ if (WARN_ON(!group))
+ return;
+
+ iommu_detach_group(dev_state->domain, group);
+
+ iommu_group_put(group);
/* Everything is down now, free the IOMMUv2 domain */
iommu_domain_free(dev_state->domain);
@@ -348,8 +356,8 @@ static void free_pasid_states(struct device_state *dev_state)
free_pasid_states_level2(dev_state->states);
else if (dev_state->pasid_levels == 1)
free_pasid_states_level1(dev_state->states);
- else if (dev_state->pasid_levels != 0)
- BUG();
+ else
+ BUG_ON(dev_state->pasid_levels != 0);
free_page((unsigned long)dev_state->states);
}
@@ -731,6 +739,7 @@ EXPORT_SYMBOL(amd_iommu_unbind_pasid);
int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
{
struct device_state *dev_state;
+ struct iommu_group *group;
unsigned long flags;
int ret, tmp;
u16 devid;
@@ -776,10 +785,16 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
if (ret)
goto out_free_domain;
- ret = iommu_attach_device(dev_state->domain, &pdev->dev);
- if (ret != 0)
+ group = iommu_group_get(&pdev->dev);
+ if (!group)
goto out_free_domain;
+ ret = iommu_attach_group(dev_state->domain, group);
+ if (ret != 0)
+ goto out_drop_group;
+
+ iommu_group_put(group);
+
spin_lock_irqsave(&state_lock, flags);
if (__get_device_state(devid) != NULL) {
@@ -794,6 +809,9 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
return 0;
+out_drop_group:
+ iommu_group_put(group);
+
out_free_domain:
iommu_domain_free(dev_state->domain);
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 8e9ec81ce4bb..dafaf59dc3b8 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -118,6 +118,7 @@
#define ARM_SMMU_IRQ_CTRL 0x50
#define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
+#define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
#define IRQ_CTRL_GERROR_IRQEN (1 << 0)
#define ARM_SMMU_IRQ_CTRLACK 0x54
@@ -173,14 +174,14 @@
#define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
/* Common MSI config fields */
-#define MSI_CFG0_SH_SHIFT 60
-#define MSI_CFG0_SH_NSH (0UL << MSI_CFG0_SH_SHIFT)
-#define MSI_CFG0_SH_OSH (2UL << MSI_CFG0_SH_SHIFT)
-#define MSI_CFG0_SH_ISH (3UL << MSI_CFG0_SH_SHIFT)
-#define MSI_CFG0_MEMATTR_SHIFT 56
-#define MSI_CFG0_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG0_MEMATTR_SHIFT)
#define MSI_CFG0_ADDR_SHIFT 2
#define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
+#define MSI_CFG2_SH_SHIFT 4
+#define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
+#define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
+#define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
+#define MSI_CFG2_MEMATTR_SHIFT 0
+#define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
#define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
#define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
@@ -199,9 +200,10 @@
* Stream table.
*
* Linear: Enough to cover 1 << IDR1.SIDSIZE entries
- * 2lvl: 8k L1 entries, 256 lazy entries per table (each table covers a PCI bus)
+ * 2lvl: 128k L1 entries,
+ * 256 lazy entries per table (each table covers a PCI bus)
*/
-#define STRTAB_L1_SZ_SHIFT 16
+#define STRTAB_L1_SZ_SHIFT 20
#define STRTAB_SPLIT 8
#define STRTAB_L1_DESC_DWORDS 1
@@ -269,10 +271,10 @@
#define ARM64_TCR_TG0_SHIFT 14
#define ARM64_TCR_TG0_MASK 0x3UL
#define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
-#define ARM64_TCR_IRGN0_SHIFT 24
+#define ARM64_TCR_IRGN0_SHIFT 8
#define ARM64_TCR_IRGN0_MASK 0x3UL
#define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
-#define ARM64_TCR_ORGN0_SHIFT 26
+#define ARM64_TCR_ORGN0_SHIFT 10
#define ARM64_TCR_ORGN0_MASK 0x3UL
#define CTXDESC_CD_0_TCR_SH0_SHIFT 12
#define ARM64_TCR_SH0_SHIFT 12
@@ -542,6 +544,9 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_HYP (1 << 12)
u32 features;
+#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
+ u32 options;
+
struct arm_smmu_cmdq cmdq;
struct arm_smmu_evtq evtq;
struct arm_smmu_priq priq;
@@ -602,11 +607,35 @@ struct arm_smmu_domain {
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
static LIST_HEAD(arm_smmu_devices);
+struct arm_smmu_option_prop {
+ u32 opt;
+ const char *prop;
+};
+
+static struct arm_smmu_option_prop arm_smmu_options[] = {
+ { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
+ { 0, NULL},
+};
+
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
return container_of(dom, struct arm_smmu_domain, domain);
}
+static void parse_driver_options(struct arm_smmu_device *smmu)
+{
+ int i = 0;
+
+ do {
+ if (of_property_read_bool(smmu->dev->of_node,
+ arm_smmu_options[i].prop)) {
+ smmu->options |= arm_smmu_options[i].opt;
+ dev_notice(smmu->dev, "option %s\n",
+ arm_smmu_options[i].prop);
+ }
+ } while (arm_smmu_options[++i].opt);
+}
+
/* Low-level queue manipulation functions */
static bool queue_full(struct arm_smmu_queue *q)
{
@@ -1036,7 +1065,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
arm_smmu_sync_ste_for_sid(smmu, sid);
/* It's likely that we'll want to use the new STE soon */
- arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+ if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
+ arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
}
static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
@@ -1064,7 +1094,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return 0;
size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
- strtab = &cfg->strtab[sid >> STRTAB_SPLIT << STRTAB_L1_DESC_DWORDS];
+ strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
desc->span = STRTAB_SPLIT + 1;
desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
@@ -1301,33 +1331,10 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
}
-static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
-{
- struct arm_smmu_domain *smmu_domain = cookie;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
-
- if (smmu->features & ARM_SMMU_FEAT_COHERENCY) {
- dsb(ishst);
- } else {
- dma_addr_t dma_addr;
- struct device *dev = smmu->dev;
-
- dma_addr = dma_map_page(dev, virt_to_page(addr), offset, size,
- DMA_TO_DEVICE);
-
- if (dma_mapping_error(dev, dma_addr))
- dev_err(dev, "failed to flush pgtable at %p\n", addr);
- else
- dma_unmap_page(dev, dma_addr, size, DMA_TO_DEVICE);
- }
-}
-
static struct iommu_gather_ops arm_smmu_gather_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_sync = arm_smmu_tlb_sync,
- .flush_pgtable = arm_smmu_flush_pgtable,
};
/* IOMMU API */
@@ -1502,6 +1509,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
.ias = ias,
.oas = oas,
.tlb = &arm_smmu_gather_ops,
+ .iommu_dev = smmu->dev,
};
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
@@ -2020,21 +2028,31 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
{
void *strtab;
u64 reg;
- u32 size;
+ u32 size, l1size;
int ret;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
- /* Calculate the L1 size, capped to the SIDSIZE */
- size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
- size = min(size, smmu->sid_bits - STRTAB_SPLIT);
- if (size + STRTAB_SPLIT < smmu->sid_bits)
+ /*
+ * If we can resolve everything with a single L2 table, then we
+ * just need a single L1 descriptor. Otherwise, calculate the L1
+ * size, capped to the SIDSIZE.
+ */
+ if (smmu->sid_bits < STRTAB_SPLIT) {
+ size = 0;
+ } else {
+ size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
+ size = min(size, smmu->sid_bits - STRTAB_SPLIT);
+ }
+ cfg->num_l1_ents = 1 << size;
+
+ size += STRTAB_SPLIT;
+ if (size < smmu->sid_bits)
dev_warn(smmu->dev,
"2-level strtab only covers %u/%u bits of SID\n",
- size + STRTAB_SPLIT, smmu->sid_bits);
+ size, smmu->sid_bits);
- cfg->num_l1_ents = 1 << size;
- size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
- strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma,
+ l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
+ strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
GFP_KERNEL);
if (!strtab) {
dev_err(smmu->dev,
@@ -2055,8 +2073,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
ret = arm_smmu_init_l1_strtab(smmu);
if (ret)
dma_free_coherent(smmu->dev,
- cfg->num_l1_ents *
- (STRTAB_L1_DESC_DWORDS << 3),
+ l1size,
strtab,
cfg->strtab_dma);
return ret;
@@ -2168,6 +2185,7 @@ static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
{
int ret, irq;
+ u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
/* Disable IRQs first */
ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
@@ -2222,13 +2240,13 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
if (IS_ERR_VALUE(ret))
dev_warn(smmu->dev,
"failed to enable priq irq\n");
+ else
+ irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
}
}
/* Enable interrupt generation on the SMMU */
- ret = arm_smmu_write_reg_sync(smmu,
- IRQ_CTRL_EVTQ_IRQEN |
- IRQ_CTRL_GERROR_IRQEN,
+ ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
if (ret)
dev_warn(smmu->dev, "failed to enable irqs\n");
@@ -2510,12 +2528,12 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
case IDR5_OAS_44_BIT:
smmu->oas = 44;
break;
+ default:
+ dev_info(smmu->dev,
+ "unknown output address size. Truncating to 48-bit\n");
+ /* Fallthrough */
case IDR5_OAS_48_BIT:
smmu->oas = 48;
- break;
- default:
- dev_err(smmu->dev, "unknown output address size!\n");
- return -ENXIO;
}
/* Set the DMA mask for our table walker */
@@ -2573,6 +2591,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
if (irq > 0)
smmu->gerr_irq = irq;
+ parse_driver_options(smmu);
+
/* Probe the h/w */
ret = arm_smmu_device_probe(smmu);
if (ret)
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 4cd0c29cb585..48a39dfa9777 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -37,6 +37,7 @@
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -607,34 +608,10 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
}
}
-static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
-{
- struct arm_smmu_domain *smmu_domain = cookie;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
-
-
- /* Ensure new page tables are visible to the hardware walker */
- if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
- dsb(ishst);
- } else {
- /*
- * If the SMMU can't walk tables in the CPU caches, treat them
- * like non-coherent DMA since we need to flush the new entries
- * all the way out to memory. There's no possibility of
- * recursion here as the SMMU table walker will not be wired
- * through another SMMU.
- */
- dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
- DMA_TO_DEVICE);
- }
-}
-
static struct iommu_gather_ops arm_smmu_gather_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_sync = arm_smmu_tlb_sync,
- .flush_pgtable = arm_smmu_flush_pgtable,
};
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
@@ -898,6 +875,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.ias = ias,
.oas = oas,
.tlb = &arm_smmu_gather_ops,
+ .iommu_dev = smmu->dev,
};
smmu_domain->smmu = smmu;
@@ -1532,6 +1510,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
unsigned long size;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
u32 id;
+ bool cttw_dt, cttw_reg;
dev_notice(smmu->dev, "probing hardware configuration...\n");
dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
@@ -1571,10 +1550,22 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
dev_notice(smmu->dev, "\taddress translation ops\n");
}
- if (id & ID0_CTTW) {
+ /*
+ * In order for DMA API calls to work properly, we must defer to what
+ * the DT says about coherency, regardless of what the hardware claims.
+ * Fortunately, this also opens up a workaround for systems where the
+ * ID register value has ended up configured incorrectly.
+ */
+ cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
+ cttw_reg = !!(id & ID0_CTTW);
+ if (cttw_dt)
smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
- dev_notice(smmu->dev, "\tcoherent table walk\n");
- }
+ if (cttw_dt || cttw_reg)
+ dev_notice(smmu->dev, "\t%scoherent table walk\n",
+ cttw_dt ? "" : "non-");
+ if (cttw_dt != cttw_reg)
+ dev_notice(smmu->dev,
+ "\t(IDR0.CTTW overridden by dma-coherent property)\n");
if (id & ID0_SMS) {
u32 smr, sid, mask;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index c9db04d4ef39..8757f8dfc4e5 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1068,7 +1068,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
if (intel_iommu_enabled)
iommu->iommu_dev = iommu_device_create(NULL, iommu,
intel_iommu_groups,
- iommu->name);
+ "%s", iommu->name);
return 0;
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index abeedc9a78c2..2570f2a25dc4 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -41,7 +41,6 @@ struct pamu_isr_data {
static struct paace *ppaact;
static struct paace *spaact;
-static struct ome *omt __initdata;
/*
* Table for matching compatible strings, for device tree
@@ -50,7 +49,7 @@ static struct ome *omt __initdata;
* SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
* string would be used.
*/
-static const struct of_device_id guts_device_ids[] __initconst = {
+static const struct of_device_id guts_device_ids[] = {
{ .compatible = "fsl,qoriq-device-config-1.0", },
{ .compatible = "fsl,qoriq-device-config-2.0", },
{}
@@ -599,7 +598,7 @@ found_cpu_node:
* Memory accesses to QMAN and BMAN private memory need not be coherent, so
* clear the PAACE entry coherency attribute for them.
*/
-static void __init setup_qbman_paace(struct paace *ppaace, int paace_type)
+static void setup_qbman_paace(struct paace *ppaace, int paace_type)
{
switch (paace_type) {
case QMAN_PAACE:
@@ -629,7 +628,7 @@ static void __init setup_qbman_paace(struct paace *ppaace, int paace_type)
* this table to translate device transaction to appropriate corenet
* transaction.
*/
-static void __init setup_omt(struct ome *omt)
+static void setup_omt(struct ome *omt)
{
struct ome *ome;
@@ -666,7 +665,7 @@ static void __init setup_omt(struct ome *omt)
* Get the maximum number of PAACT table entries
* and subwindows supported by PAMU
*/
-static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
+static void get_pamu_cap_values(unsigned long pamu_reg_base)
{
u32 pc_val;
@@ -676,9 +675,9 @@ static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
}
/* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
-static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
- phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
- phys_addr_t omt_phys)
+static int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
+ phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
+ phys_addr_t omt_phys)
{
u32 *pc;
struct pamu_mmap_regs *pamu_regs;
@@ -720,7 +719,7 @@ static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu
}
/* Enable all device LIODNS */
-static void __init setup_liodns(void)
+static void setup_liodns(void)
{
int i, len;
struct paace *ppaace;
@@ -846,7 +845,7 @@ struct ccsr_law {
/*
* Create a coherence subdomain for a given memory block.
*/
-static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
+static int create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
{
struct device_node *np;
const __be32 *iprop;
@@ -988,7 +987,7 @@ error:
static const struct {
u32 svr;
u32 port_id;
-} port_id_map[] __initconst = {
+} port_id_map[] = {
{(SVR_P2040 << 8) | 0x10, 0xFF000000}, /* P2040 1.0 */
{(SVR_P2040 << 8) | 0x11, 0xFF000000}, /* P2040 1.1 */
{(SVR_P2041 << 8) | 0x10, 0xFF000000}, /* P2041 1.0 */
@@ -1006,7 +1005,7 @@ static const struct {
#define SVR_SECURITY 0x80000 /* The Security (E) bit */
-static int __init fsl_pamu_probe(struct platform_device *pdev)
+static int fsl_pamu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
void __iomem *pamu_regs = NULL;
@@ -1022,6 +1021,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
int irq;
phys_addr_t ppaact_phys;
phys_addr_t spaact_phys;
+ struct ome *omt;
phys_addr_t omt_phys;
size_t mem_size = 0;
unsigned int order = 0;
@@ -1200,7 +1200,7 @@ error:
return ret;
}
-static struct platform_driver fsl_of_pamu_driver __initdata = {
+static struct platform_driver fsl_of_pamu_driver = {
.driver = {
.name = "fsl-of-pamu",
},
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a98a7b27aca1..2d7349a3ee14 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -364,7 +364,8 @@ static inline int first_pte_in_page(struct dma_pte *pte)
static struct dmar_domain *si_domain;
static int hw_pass_through = 1;
-/* domain represents a virtual machine, more than one devices
+/*
+ * Domain represents a virtual machine, more than one devices
* across iommus may be owned in one domain, e.g. kvm guest.
*/
#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
@@ -372,11 +373,21 @@ static int hw_pass_through = 1;
/* si_domain contains mulitple devices */
#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
+#define for_each_domain_iommu(idx, domain) \
+ for (idx = 0; idx < g_num_of_iommus; idx++) \
+ if (domain->iommu_refcnt[idx])
+
struct dmar_domain {
- int id; /* domain id */
int nid; /* node id */
- DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
- /* bitmap of iommus this domain uses*/
+
+ unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
+ /* Refcount of devices per iommu */
+
+
+ u16 iommu_did[DMAR_UNITS_SUPPORTED];
+ /* Domain ids per IOMMU. Use u16 since
+ * domain ids are 16 bit wide according
+ * to VT-d spec, section 9.3 */
struct list_head devices; /* all devices' list */
struct iova_domain iovad; /* iova's that belong to this domain */
@@ -395,7 +406,6 @@ struct dmar_domain {
int iommu_superpage;/* Level of superpages supported:
0 == 4KiB (no superpages), 1 == 2MiB,
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
- spinlock_t iommu_lock; /* protect iommu set in domain */
u64 max_addr; /* maximum mapped address */
struct iommu_domain domain; /* generic domain data structure for
@@ -408,6 +418,10 @@ struct device_domain_info {
struct list_head global; /* link to global list */
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
+ struct {
+ u8 enabled:1;
+ u8 qdep;
+ } ats; /* ATS state */
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
struct intel_iommu *iommu; /* IOMMU used by this device */
struct dmar_domain *domain; /* pointer to domain */
@@ -461,10 +475,11 @@ static long list_size;
static void domain_exit(struct dmar_domain *domain);
static void domain_remove_dev_info(struct dmar_domain *domain);
-static void domain_remove_one_dev_info(struct dmar_domain *domain,
- struct device *dev);
-static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
- struct device *dev);
+static void dmar_remove_one_dev_info(struct dmar_domain *domain,
+ struct device *dev);
+static void __dmar_remove_one_dev_info(struct device_domain_info *info);
+static void domain_context_clear(struct intel_iommu *iommu,
+ struct device *dev);
static int domain_detach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu);
@@ -564,6 +579,36 @@ __setup("intel_iommu=", intel_iommu_setup);
static struct kmem_cache *iommu_domain_cache;
static struct kmem_cache *iommu_devinfo_cache;
+static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
+{
+ struct dmar_domain **domains;
+ int idx = did >> 8;
+
+ domains = iommu->domains[idx];
+ if (!domains)
+ return NULL;
+
+ return domains[did & 0xff];
+}
+
+static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
+ struct dmar_domain *domain)
+{
+ struct dmar_domain **domains;
+ int idx = did >> 8;
+
+ if (!iommu->domains[idx]) {
+ size_t size = 256 * sizeof(struct dmar_domain *);
+ iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
+ }
+
+ domains = iommu->domains[idx];
+ if (WARN_ON(!domains))
+ return;
+ else
+ domains[did & 0xff] = domain;
+}
+
static inline void *alloc_pgtable_page(int node)
{
struct page *page;
@@ -605,6 +650,11 @@ static inline int domain_type_is_vm(struct dmar_domain *domain)
return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
}
+static inline int domain_type_is_si(struct dmar_domain *domain)
+{
+ return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
+}
+
static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
{
return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
@@ -659,7 +709,9 @@ static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
/* si_domain and vm domain should not get here. */
BUG_ON(domain_type_is_vm_or_si(domain));
- iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
+ for_each_domain_iommu(iommu_id, domain)
+ break;
+
if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
return NULL;
@@ -675,7 +727,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
domain->iommu_coherency = 1;
- for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
+ for_each_domain_iommu(i, domain) {
found = true;
if (!ecap_coherent(g_iommus[i]->ecap)) {
domain->iommu_coherency = 0;
@@ -755,6 +807,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
struct context_entry *context;
u64 *entry;
+ entry = &root->lo;
if (ecs_enabled(iommu)) {
if (devfn >= 0x80) {
devfn -= 0x80;
@@ -762,7 +815,6 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
}
devfn *= 2;
}
- entry = &root->lo;
if (*entry & 1)
context = phys_to_virt(*entry & VTD_PAGE_MASK);
else {
@@ -1162,9 +1214,9 @@ next:
/* We can't just free the pages because the IOMMU may still be walking
the page tables, and may have cached the intermediate levels. The
pages can only be freed after the IOTLB flush has been done. */
-struct page *domain_unmap(struct dmar_domain *domain,
- unsigned long start_pfn,
- unsigned long last_pfn)
+static struct page *domain_unmap(struct dmar_domain *domain,
+ unsigned long start_pfn,
+ unsigned long last_pfn)
{
struct page *freelist = NULL;
@@ -1188,7 +1240,7 @@ struct page *domain_unmap(struct dmar_domain *domain,
return freelist;
}
-void dma_free_pagelist(struct page *freelist)
+static void dma_free_pagelist(struct page *freelist)
{
struct page *pg;
@@ -1356,24 +1408,23 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
u8 bus, u8 devfn)
{
bool found = false;
- unsigned long flags;
struct device_domain_info *info;
struct pci_dev *pdev;
+ assert_spin_locked(&device_domain_lock);
+
if (!ecap_dev_iotlb_support(iommu->ecap))
return NULL;
if (!iommu->qi)
return NULL;
- spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry(info, &domain->devices, link)
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
found = true;
break;
}
- spin_unlock_irqrestore(&device_domain_lock, flags);
if (!found || !info->dev || !dev_is_pci(info->dev))
return NULL;
@@ -1391,19 +1442,26 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
{
+ struct pci_dev *pdev;
+
if (!info || !dev_is_pci(info->dev))
return;
- pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
+ pdev = to_pci_dev(info->dev);
+ if (pci_enable_ats(pdev, VTD_PAGE_SHIFT))
+ return;
+
+ info->ats.enabled = 1;
+ info->ats.qdep = pci_ats_queue_depth(pdev);
}
static void iommu_disable_dev_iotlb(struct device_domain_info *info)
{
- if (!info->dev || !dev_is_pci(info->dev) ||
- !pci_ats_enabled(to_pci_dev(info->dev)))
+ if (!info->ats.enabled)
return;
pci_disable_ats(to_pci_dev(info->dev));
+ info->ats.enabled = 0;
}
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
@@ -1415,26 +1473,24 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry(info, &domain->devices, link) {
- struct pci_dev *pdev;
- if (!info->dev || !dev_is_pci(info->dev))
- continue;
-
- pdev = to_pci_dev(info->dev);
- if (!pci_ats_enabled(pdev))
+ if (!info->ats.enabled)
continue;
sid = info->bus << 8 | info->devfn;
- qdep = pci_ats_queue_depth(pdev);
+ qdep = info->ats.qdep;
qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
}
-static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
- unsigned long pfn, unsigned int pages, int ih, int map)
+static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
+ struct dmar_domain *domain,
+ unsigned long pfn, unsigned int pages,
+ int ih, int map)
{
unsigned int mask = ilog2(__roundup_pow_of_two(pages));
uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
+ u16 did = domain->iommu_did[iommu->seq_id];
BUG_ON(pages == 0);
@@ -1458,7 +1514,8 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
* flush. However, device IOTLB doesn't need to be flushed in this case.
*/
if (!cap_caching_mode(iommu->cap) || !map)
- iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
+ iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
+ addr, mask);
}
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1513,65 +1570,80 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
static int iommu_init_domains(struct intel_iommu *iommu)
{
- unsigned long ndomains;
- unsigned long nlongs;
+ u32 ndomains, nlongs;
+ size_t size;
ndomains = cap_ndoms(iommu->cap);
- pr_debug("%s: Number of Domains supported <%ld>\n",
+ pr_debug("%s: Number of Domains supported <%d>\n",
iommu->name, ndomains);
nlongs = BITS_TO_LONGS(ndomains);
spin_lock_init(&iommu->lock);
- /* TBD: there might be 64K domains,
- * consider other allocation for future chip
- */
iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
if (!iommu->domain_ids) {
pr_err("%s: Allocating domain id array failed\n",
iommu->name);
return -ENOMEM;
}
- iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
- GFP_KERNEL);
- if (!iommu->domains) {
+
+ size = ((ndomains >> 8) + 1) * sizeof(struct dmar_domain **);
+ iommu->domains = kzalloc(size, GFP_KERNEL);
+
+ if (iommu->domains) {
+ size = 256 * sizeof(struct dmar_domain *);
+ iommu->domains[0] = kzalloc(size, GFP_KERNEL);
+ }
+
+ if (!iommu->domains || !iommu->domains[0]) {
pr_err("%s: Allocating domain array failed\n",
iommu->name);
kfree(iommu->domain_ids);
+ kfree(iommu->domains);
iommu->domain_ids = NULL;
+ iommu->domains = NULL;
return -ENOMEM;
}
+
+
/*
- * if Caching mode is set, then invalid translations are tagged
- * with domainid 0. Hence we need to pre-allocate it.
+ * If Caching mode is set, then invalid translations are tagged
+ * with domain-id 0, hence we need to pre-allocate it. We also
+ * use domain-id 0 as a marker for non-allocated domain-id, so
+ * make sure it is not used for a real domain.
*/
- if (cap_caching_mode(iommu->cap))
- set_bit(0, iommu->domain_ids);
+ set_bit(0, iommu->domain_ids);
+
return 0;
}
static void disable_dmar_iommu(struct intel_iommu *iommu)
{
- struct dmar_domain *domain;
- int i;
+ struct device_domain_info *info, *tmp;
+ unsigned long flags;
- if ((iommu->domains) && (iommu->domain_ids)) {
- for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
- /*
- * Domain id 0 is reserved for invalid translation
- * if hardware supports caching mode.
- */
- if (cap_caching_mode(iommu->cap) && i == 0)
- continue;
+ if (!iommu->domains || !iommu->domain_ids)
+ return;
- domain = iommu->domains[i];
- clear_bit(i, iommu->domain_ids);
- if (domain_detach_iommu(domain, iommu) == 0 &&
- !domain_type_is_vm(domain))
- domain_exit(domain);
- }
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
+ struct dmar_domain *domain;
+
+ if (info->iommu != iommu)
+ continue;
+
+ if (!info->dev || !info->domain)
+ continue;
+
+ domain = info->domain;
+
+ dmar_remove_one_dev_info(domain, info->dev);
+
+ if (!domain_type_is_vm_or_si(domain))
+ domain_exit(domain);
}
+ spin_unlock_irqrestore(&device_domain_lock, flags);
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
@@ -1580,6 +1652,11 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
static void free_dmar_iommu(struct intel_iommu *iommu)
{
if ((iommu->domains) && (iommu->domain_ids)) {
+ int elems = (cap_ndoms(iommu->cap) >> 8) + 1;
+ int i;
+
+ for (i = 0; i < elems; i++)
+ kfree(iommu->domains[i]);
kfree(iommu->domains);
kfree(iommu->domain_ids);
iommu->domains = NULL;
@@ -1594,8 +1671,6 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
static struct dmar_domain *alloc_domain(int flags)
{
- /* domain id for virtual machine, it won't be set in context */
- static atomic_t vm_domid = ATOMIC_INIT(0);
struct dmar_domain *domain;
domain = alloc_domain_mem();
@@ -1605,111 +1680,64 @@ static struct dmar_domain *alloc_domain(int flags)
memset(domain, 0, sizeof(*domain));
domain->nid = -1;
domain->flags = flags;
- spin_lock_init(&domain->iommu_lock);
INIT_LIST_HEAD(&domain->devices);
- if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
- domain->id = atomic_inc_return(&vm_domid);
return domain;
}
-static int __iommu_attach_domain(struct dmar_domain *domain,
- struct intel_iommu *iommu)
-{
- int num;
- unsigned long ndomains;
-
- ndomains = cap_ndoms(iommu->cap);
- num = find_first_zero_bit(iommu->domain_ids, ndomains);
- if (num < ndomains) {
- set_bit(num, iommu->domain_ids);
- iommu->domains[num] = domain;
- } else {
- num = -ENOSPC;
- }
-
- return num;
-}
-
-static int iommu_attach_domain(struct dmar_domain *domain,
+/* Must be called with iommu->lock */
+static int domain_attach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu)
{
- int num;
- unsigned long flags;
-
- spin_lock_irqsave(&iommu->lock, flags);
- num = __iommu_attach_domain(domain, iommu);
- spin_unlock_irqrestore(&iommu->lock, flags);
- if (num < 0)
- pr_err("%s: No free domain ids\n", iommu->name);
-
- return num;
-}
-
-static int iommu_attach_vm_domain(struct dmar_domain *domain,
- struct intel_iommu *iommu)
-{
- int num;
unsigned long ndomains;
+ int num;
- ndomains = cap_ndoms(iommu->cap);
- for_each_set_bit(num, iommu->domain_ids, ndomains)
- if (iommu->domains[num] == domain)
- return num;
-
- return __iommu_attach_domain(domain, iommu);
-}
-
-static void iommu_detach_domain(struct dmar_domain *domain,
- struct intel_iommu *iommu)
-{
- unsigned long flags;
- int num, ndomains;
+ assert_spin_locked(&device_domain_lock);
+ assert_spin_locked(&iommu->lock);
- spin_lock_irqsave(&iommu->lock, flags);
- if (domain_type_is_vm_or_si(domain)) {
+ domain->iommu_refcnt[iommu->seq_id] += 1;
+ domain->iommu_count += 1;
+ if (domain->iommu_refcnt[iommu->seq_id] == 1) {
ndomains = cap_ndoms(iommu->cap);
- for_each_set_bit(num, iommu->domain_ids, ndomains) {
- if (iommu->domains[num] == domain) {
- clear_bit(num, iommu->domain_ids);
- iommu->domains[num] = NULL;
- break;
- }
+ num = find_first_zero_bit(iommu->domain_ids, ndomains);
+
+ if (num >= ndomains) {
+ pr_err("%s: No free domain ids\n", iommu->name);
+ domain->iommu_refcnt[iommu->seq_id] -= 1;
+ domain->iommu_count -= 1;
+ return -ENOSPC;
}
- } else {
- clear_bit(domain->id, iommu->domain_ids);
- iommu->domains[domain->id] = NULL;
- }
- spin_unlock_irqrestore(&iommu->lock, flags);
-}
-static void domain_attach_iommu(struct dmar_domain *domain,
- struct intel_iommu *iommu)
-{
- unsigned long flags;
+ set_bit(num, iommu->domain_ids);
+ set_iommu_domain(iommu, num, domain);
+
+ domain->iommu_did[iommu->seq_id] = num;
+ domain->nid = iommu->node;
- spin_lock_irqsave(&domain->iommu_lock, flags);
- if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
- domain->iommu_count++;
- if (domain->iommu_count == 1)
- domain->nid = iommu->node;
domain_update_iommu_cap(domain);
}
- spin_unlock_irqrestore(&domain->iommu_lock, flags);
+
+ return 0;
}
static int domain_detach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu)
{
- unsigned long flags;
- int count = INT_MAX;
+ int num, count = INT_MAX;
+
+ assert_spin_locked(&device_domain_lock);
+ assert_spin_locked(&iommu->lock);
+
+ domain->iommu_refcnt[iommu->seq_id] -= 1;
+ count = --domain->iommu_count;
+ if (domain->iommu_refcnt[iommu->seq_id] == 0) {
+ num = domain->iommu_did[iommu->seq_id];
+ clear_bit(num, iommu->domain_ids);
+ set_iommu_domain(iommu, num, NULL);
- spin_lock_irqsave(&domain->iommu_lock, flags);
- if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
- count = --domain->iommu_count;
domain_update_iommu_cap(domain);
+ domain->iommu_did[iommu->seq_id] = 0;
}
- spin_unlock_irqrestore(&domain->iommu_lock, flags);
return count;
}
@@ -1776,9 +1804,9 @@ static inline int guestwidth_to_adjustwidth(int gaw)
return agaw;
}
-static int domain_init(struct dmar_domain *domain, int guest_width)
+static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
+ int guest_width)
{
- struct intel_iommu *iommu;
int adjust_width, agaw;
unsigned long sagaw;
@@ -1787,7 +1815,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
domain_reserve_special_ranges(domain);
/* calculate AGAW */
- iommu = domain_get_iommu(domain);
if (guest_width > cap_mgaw(iommu->cap))
guest_width = cap_mgaw(iommu->cap);
domain->gaw = guest_width;
@@ -1831,7 +1858,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
static void domain_exit(struct dmar_domain *domain)
{
struct page *freelist = NULL;
- int i;
/* Domain 0 is reserved, so dont process it */
if (!domain)
@@ -1841,20 +1867,16 @@ static void domain_exit(struct dmar_domain *domain)
if (!intel_iommu_strict)
flush_unmaps_timeout(0);
- /* remove associated devices */
+ /* Remove associated devices and clear attached or cached domains */
+ rcu_read_lock();
domain_remove_dev_info(domain);
+ rcu_read_unlock();
/* destroy iovas */
put_iova_domain(&domain->iovad);
freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
- /* clear attached or cached domains */
- rcu_read_lock();
- for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
- iommu_detach_domain(domain, g_iommus[i]);
- rcu_read_unlock();
-
dma_free_pagelist(freelist);
free_domain_mem(domain);
@@ -1862,79 +1884,68 @@ static void domain_exit(struct dmar_domain *domain)
static int domain_context_mapping_one(struct dmar_domain *domain,
struct intel_iommu *iommu,
- u8 bus, u8 devfn, int translation)
+ u8 bus, u8 devfn)
{
+ u16 did = domain->iommu_did[iommu->seq_id];
+ int translation = CONTEXT_TT_MULTI_LEVEL;
+ struct device_domain_info *info = NULL;
struct context_entry *context;
unsigned long flags;
struct dma_pte *pgd;
- int id;
- int agaw;
- struct device_domain_info *info = NULL;
+ int ret, agaw;
+
+ WARN_ON(did == 0);
+
+ if (hw_pass_through && domain_type_is_si(domain))
+ translation = CONTEXT_TT_PASS_THROUGH;
pr_debug("Set context mapping for %02x:%02x.%d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
BUG_ON(!domain->pgd);
- BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
- translation != CONTEXT_TT_MULTI_LEVEL);
- spin_lock_irqsave(&iommu->lock, flags);
+ spin_lock_irqsave(&device_domain_lock, flags);
+ spin_lock(&iommu->lock);
+
+ ret = -ENOMEM;
context = iommu_context_addr(iommu, bus, devfn, 1);
- spin_unlock_irqrestore(&iommu->lock, flags);
if (!context)
- return -ENOMEM;
- spin_lock_irqsave(&iommu->lock, flags);
- if (context_present(context)) {
- spin_unlock_irqrestore(&iommu->lock, flags);
- return 0;
- }
+ goto out_unlock;
- context_clear_entry(context);
+ ret = 0;
+ if (context_present(context))
+ goto out_unlock;
- id = domain->id;
pgd = domain->pgd;
- if (domain_type_is_vm_or_si(domain)) {
- if (domain_type_is_vm(domain)) {
- id = iommu_attach_vm_domain(domain, iommu);
- if (id < 0) {
- spin_unlock_irqrestore(&iommu->lock, flags);
- pr_err("%s: No free domain ids\n", iommu->name);
- return -EFAULT;
- }
- }
+ context_clear_entry(context);
+ context_set_domain_id(context, did);
- /* Skip top levels of page tables for
- * iommu which has less agaw than default.
- * Unnecessary for PT mode.
- */
- if (translation != CONTEXT_TT_PASS_THROUGH) {
- for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
- pgd = phys_to_virt(dma_pte_addr(pgd));
- if (!dma_pte_present(pgd)) {
- spin_unlock_irqrestore(&iommu->lock, flags);
- return -ENOMEM;
- }
- }
+ /*
+ * Skip top levels of page tables for iommu which has less agaw
+ * than default. Unnecessary for PT mode.
+ */
+ if (translation != CONTEXT_TT_PASS_THROUGH) {
+ for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
+ ret = -ENOMEM;
+ pgd = phys_to_virt(dma_pte_addr(pgd));
+ if (!dma_pte_present(pgd))
+ goto out_unlock;
}
- }
-
- context_set_domain_id(context, id);
- if (translation != CONTEXT_TT_PASS_THROUGH) {
info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
translation = info ? CONTEXT_TT_DEV_IOTLB :
CONTEXT_TT_MULTI_LEVEL;
- }
- /*
- * In pass through mode, AW must be programmed to indicate the largest
- * AGAW value supported by hardware. And ASR is ignored by hardware.
- */
- if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
- context_set_address_width(context, iommu->msagaw);
- else {
+
context_set_address_root(context, virt_to_phys(pgd));
context_set_address_width(context, iommu->agaw);
+ } else {
+ /*
+ * In pass through mode, AW must be programmed to
+ * indicate the largest AGAW value supported by
+ * hardware. And ASR is ignored by hardware.
+ */
+ context_set_address_width(context, iommu->msagaw);
}
context_set_translation_type(context, translation);
@@ -1953,14 +1964,17 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
- iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
+ iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
} else {
iommu_flush_write_buffer(iommu);
}
iommu_enable_dev_iotlb(info);
- spin_unlock_irqrestore(&iommu->lock, flags);
- domain_attach_iommu(domain, iommu);
+ ret = 0;
+
+out_unlock:
+ spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
return 0;
}
@@ -1968,7 +1982,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
struct domain_context_mapping_data {
struct dmar_domain *domain;
struct intel_iommu *iommu;
- int translation;
};
static int domain_context_mapping_cb(struct pci_dev *pdev,
@@ -1977,13 +1990,11 @@ static int domain_context_mapping_cb(struct pci_dev *pdev,
struct domain_context_mapping_data *data = opaque;
return domain_context_mapping_one(data->domain, data->iommu,
- PCI_BUS_NUM(alias), alias & 0xff,
- data->translation);
+ PCI_BUS_NUM(alias), alias & 0xff);
}
static int
-domain_context_mapping(struct dmar_domain *domain, struct device *dev,
- int translation)
+domain_context_mapping(struct dmar_domain *domain, struct device *dev)
{
struct intel_iommu *iommu;
u8 bus, devfn;
@@ -1994,12 +2005,10 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev,
return -ENODEV;
if (!dev_is_pci(dev))
- return domain_context_mapping_one(domain, iommu, bus, devfn,
- translation);
+ return domain_context_mapping_one(domain, iommu, bus, devfn);
data.domain = domain;
data.iommu = iommu;
- data.translation = translation;
return pci_for_each_dma_alias(to_pci_dev(dev),
&domain_context_mapping_cb, &data);
@@ -2094,7 +2103,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
sg_res = aligned_nrpages(sg->offset, sg->length);
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
sg->dma_length = sg->length;
- pteval = page_to_phys(sg_page(sg)) | prot;
+ pteval = (sg_phys(sg) & PAGE_MASK) | prot;
phys_pfn = pteval >> VTD_PAGE_SHIFT;
}
@@ -2185,7 +2194,7 @@ static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long i
return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
}
-static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
+static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
if (!iommu)
return;
@@ -2211,21 +2220,8 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
unsigned long flags;
spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry_safe(info, tmp, &domain->devices, link) {
- unlink_domain_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
- iommu_disable_dev_iotlb(info);
- iommu_detach_dev(info->iommu, info->bus, info->devfn);
-
- if (domain_type_is_vm(domain)) {
- iommu_detach_dependent_devices(info->iommu, info->dev);
- domain_detach_iommu(domain, info->iommu);
- }
-
- free_devinfo_mem(info);
- spin_lock_irqsave(&device_domain_lock, flags);
- }
+ list_for_each_entry_safe(info, tmp, &domain->devices, link)
+ __dmar_remove_one_dev_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
}
@@ -2257,14 +2253,15 @@ dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
return NULL;
}
-static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
- int bus, int devfn,
- struct device *dev,
- struct dmar_domain *domain)
+static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
+ int bus, int devfn,
+ struct device *dev,
+ struct dmar_domain *domain)
{
struct dmar_domain *found = NULL;
struct device_domain_info *info;
unsigned long flags;
+ int ret;
info = alloc_devinfo_mem();
if (!info)
@@ -2272,6 +2269,8 @@ static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
info->bus = bus;
info->devfn = devfn;
+ info->ats.enabled = 0;
+ info->ats.qdep = 0;
info->dev = dev;
info->domain = domain;
info->iommu = iommu;
@@ -2279,12 +2278,16 @@ static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
spin_lock_irqsave(&device_domain_lock, flags);
if (dev)
found = find_domain(dev);
- else {
+
+ if (!found) {
struct device_domain_info *info2;
info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
- if (info2)
- found = info2->domain;
+ if (info2) {
+ found = info2->domain;
+ info2->dev = dev;
+ }
}
+
if (found) {
spin_unlock_irqrestore(&device_domain_lock, flags);
free_devinfo_mem(info);
@@ -2292,12 +2295,27 @@ static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
return found;
}
+ spin_lock(&iommu->lock);
+ ret = domain_attach_iommu(domain, iommu);
+ spin_unlock(&iommu->lock);
+
+ if (ret) {
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ return NULL;
+ }
+
list_add(&info->link, &domain->devices);
list_add(&info->global, &device_domain_list);
if (dev)
dev->archdata.iommu = info;
spin_unlock_irqrestore(&device_domain_lock, flags);
+ if (dev && domain_context_mapping(domain, dev)) {
+ pr_err("Domain context map for %s failed\n", dev_name(dev));
+ dmar_remove_one_dev_info(domain, dev);
+ return NULL;
+ }
+
return domain;
}
@@ -2310,10 +2328,10 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
/* domain is initialized */
static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
{
+ struct device_domain_info *info = NULL;
struct dmar_domain *domain, *tmp;
struct intel_iommu *iommu;
- struct device_domain_info *info;
- u16 dma_alias;
+ u16 req_id, dma_alias;
unsigned long flags;
u8 bus, devfn;
@@ -2325,6 +2343,8 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
if (!iommu)
return NULL;
+ req_id = ((u16)bus << 8) | devfn;
+
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
@@ -2349,21 +2369,15 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
domain = alloc_domain(0);
if (!domain)
return NULL;
- domain->id = iommu_attach_domain(domain, iommu);
- if (domain->id < 0) {
- free_domain_mem(domain);
- return NULL;
- }
- domain_attach_iommu(domain, iommu);
- if (domain_init(domain, gaw)) {
+ if (domain_init(domain, iommu, gaw)) {
domain_exit(domain);
return NULL;
}
/* register PCI DMA alias device */
- if (dev_is_pci(dev)) {
- tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
- dma_alias & 0xff, NULL, domain);
+ if (req_id != dma_alias && dev_is_pci(dev)) {
+ tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
+ dma_alias & 0xff, NULL, domain);
if (!tmp || tmp != domain) {
domain_exit(domain);
@@ -2375,7 +2389,7 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
}
found_domain:
- tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
+ tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
if (!tmp || tmp != domain) {
domain_exit(domain);
@@ -2403,8 +2417,7 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
return -ENOMEM;
}
- pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
- start, end, domain->id);
+ pr_debug("Mapping reserved region %llx-%llx\n", start, end);
/*
* RMRR range might have overlap with physical memory range,
* clear it first
@@ -2465,11 +2478,6 @@ static int iommu_prepare_identity_map(struct device *dev,
if (ret)
goto error;
- /* context entry init */
- ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
- if (ret)
- goto error;
-
return 0;
error:
@@ -2515,37 +2523,18 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width);
static int __init si_domain_init(int hw)
{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
int nid, ret = 0;
- bool first = true;
si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
if (!si_domain)
return -EFAULT;
- for_each_active_iommu(iommu, drhd) {
- ret = iommu_attach_domain(si_domain, iommu);
- if (ret < 0) {
- domain_exit(si_domain);
- return -EFAULT;
- } else if (first) {
- si_domain->id = ret;
- first = false;
- } else if (si_domain->id != ret) {
- domain_exit(si_domain);
- return -EFAULT;
- }
- domain_attach_iommu(si_domain, iommu);
- }
-
if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
domain_exit(si_domain);
return -EFAULT;
}
- pr_debug("Identity mapping domain is domain %d\n",
- si_domain->id);
+ pr_debug("Identity mapping domain allocated\n");
if (hw)
return 0;
@@ -2579,28 +2568,20 @@ static int identity_mapping(struct device *dev)
return 0;
}
-static int domain_add_dev_info(struct dmar_domain *domain,
- struct device *dev, int translation)
+static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
{
struct dmar_domain *ndomain;
struct intel_iommu *iommu;
u8 bus, devfn;
- int ret;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return -ENODEV;
- ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
+ ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
if (ndomain != domain)
return -EBUSY;
- ret = domain_context_mapping(domain, dev, translation);
- if (ret) {
- domain_remove_one_dev_info(domain, dev);
- return ret;
- }
-
return 0;
}
@@ -2740,9 +2721,7 @@ static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw
if (!iommu_should_identity_map(dev, 1))
return 0;
- ret = domain_add_dev_info(si_domain, dev,
- hw ? CONTEXT_TT_PASS_THROUGH :
- CONTEXT_TT_MULTI_LEVEL);
+ ret = domain_add_dev_info(si_domain, dev);
if (!ret)
pr_info("%s identity mapping for device %s\n",
hw ? "Hardware" : "Software", dev_name(dev));
@@ -2828,15 +2807,18 @@ static void intel_iommu_init_qi(struct intel_iommu *iommu)
}
static int copy_context_table(struct intel_iommu *iommu,
- struct root_entry *old_re,
+ struct root_entry __iomem *old_re,
struct context_entry **tbl,
int bus, bool ext)
{
- struct context_entry *old_ce = NULL, *new_ce = NULL, ce;
int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
+ struct context_entry __iomem *old_ce = NULL;
+ struct context_entry *new_ce = NULL, ce;
+ struct root_entry re;
phys_addr_t old_ce_phys;
tbl_idx = ext ? bus * 2 : bus;
+ memcpy_fromio(&re, old_re, sizeof(re));
for (devfn = 0; devfn < 256; devfn++) {
/* First calculate the correct index */
@@ -2856,9 +2838,9 @@ static int copy_context_table(struct intel_iommu *iommu,
ret = 0;
if (devfn < 0x80)
- old_ce_phys = root_entry_lctp(old_re);
+ old_ce_phys = root_entry_lctp(&re);
else
- old_ce_phys = root_entry_uctp(old_re);
+ old_ce_phys = root_entry_uctp(&re);
if (!old_ce_phys) {
if (ext && devfn == 0) {
@@ -2883,7 +2865,7 @@ static int copy_context_table(struct intel_iommu *iommu,
}
/* Now copy the context entry */
- ce = old_ce[idx];
+ memcpy_fromio(&ce, old_ce + idx, sizeof(ce));
if (!__context_present(&ce))
continue;
@@ -2927,8 +2909,8 @@ out:
static int copy_translation_tables(struct intel_iommu *iommu)
{
+ struct root_entry __iomem *old_rt;
struct context_entry **ctxt_tbls;
- struct root_entry *old_rt;
phys_addr_t old_rt_phys;
int ctxt_table_entries;
unsigned long flags;
@@ -3258,7 +3240,6 @@ static struct iova *intel_alloc_iova(struct device *dev,
static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
{
struct dmar_domain *domain;
- int ret;
domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain) {
@@ -3267,16 +3248,6 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
return NULL;
}
- /* make sure context mapping is ok */
- if (unlikely(!domain_context_mapped(dev))) {
- ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
- if (ret) {
- pr_err("Domain context map for %s failed\n",
- dev_name(dev));
- return NULL;
- }
- }
-
return domain;
}
@@ -3312,7 +3283,7 @@ static int iommu_no_mapping(struct device *dev)
* 32 bit DMA is removed from si_domain and fall back
* to non-identity mapping.
*/
- domain_remove_one_dev_info(si_domain, dev);
+ dmar_remove_one_dev_info(si_domain, dev);
pr_info("32bit %s uses non-identity mapping\n",
dev_name(dev));
return 0;
@@ -3324,10 +3295,7 @@ static int iommu_no_mapping(struct device *dev)
*/
if (iommu_should_identity_map(dev, 0)) {
int ret;
- ret = domain_add_dev_info(si_domain, dev,
- hw_pass_through ?
- CONTEXT_TT_PASS_THROUGH :
- CONTEXT_TT_MULTI_LEVEL);
+ ret = domain_add_dev_info(si_domain, dev);
if (!ret) {
pr_info("64bit %s uses identity mapping\n",
dev_name(dev));
@@ -3388,7 +3356,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
+ iommu_flush_iotlb_psi(iommu, domain,
+ mm_to_dma_pfn(iova->pfn_lo),
+ size, 0, 1);
else
iommu_flush_write_buffer(iommu);
@@ -3439,7 +3409,7 @@ static void flush_unmaps(void)
/* On real hardware multiple invalidations are expensive */
if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain->id,
+ iommu_flush_iotlb_psi(iommu, domain,
iova->pfn_lo, iova_size(iova),
!deferred_flush[i].freelist[j], 0);
else {
@@ -3523,7 +3493,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
freelist = domain_unmap(domain, start_pfn, last_pfn);
if (intel_iommu_strict) {
- iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
+ iommu_flush_iotlb_psi(iommu, domain, start_pfn,
last_pfn - start_pfn + 1, !freelist, 0);
/* free iova */
__free_iova(&domain->iovad, iova);
@@ -3620,7 +3590,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
for_each_sg(sglist, sg, nelems, i) {
BUG_ON(!sg_page(sg));
- sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
+ sg->dma_address = sg_phys(sg);
sg->dma_length = sg->length;
}
return nelems;
@@ -3681,7 +3651,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
+ iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
else
iommu_flush_write_buffer(iommu);
@@ -4158,13 +4128,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
iommu_enable_translation(iommu);
- if (si_domain) {
- ret = iommu_attach_domain(si_domain, iommu);
- if (ret < 0 || si_domain->id != ret)
- goto disable_iommu;
- domain_attach_iommu(si_domain, iommu);
- }
-
iommu_disable_protect_mem_regions(iommu);
return 0;
@@ -4326,11 +4289,9 @@ static int device_notifier(struct notifier_block *nb,
if (!domain)
return 0;
- down_read(&dmar_global_lock);
- domain_remove_one_dev_info(domain, dev);
+ dmar_remove_one_dev_info(domain, dev);
if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
domain_exit(domain);
- up_read(&dmar_global_lock);
return 0;
}
@@ -4387,7 +4348,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
rcu_read_lock();
for_each_active_iommu(iommu, drhd)
- iommu_flush_iotlb_psi(iommu, si_domain->id,
+ iommu_flush_iotlb_psi(iommu, si_domain,
iova->pfn_lo, iova_size(iova),
!freelist, 0);
rcu_read_unlock();
@@ -4446,11 +4407,32 @@ static ssize_t intel_iommu_show_ecap(struct device *dev,
}
static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
+static ssize_t intel_iommu_show_ndoms(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct intel_iommu *iommu = dev_get_drvdata(dev);
+ return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
+}
+static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
+
+static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct intel_iommu *iommu = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
+ cap_ndoms(iommu->cap)));
+}
+static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
+
static struct attribute *intel_iommu_attrs[] = {
&dev_attr_version.attr,
&dev_attr_address.attr,
&dev_attr_cap.attr,
&dev_attr_ecap.attr,
+ &dev_attr_domains_supported.attr,
+ &dev_attr_domains_used.attr,
NULL,
};
@@ -4530,7 +4512,7 @@ int __init intel_iommu_init(void)
for_each_active_iommu(iommu, drhd)
iommu->iommu_dev = iommu_device_create(NULL, iommu,
intel_iommu_groups,
- iommu->name);
+ "%s", iommu->name);
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
bus_register_notifier(&pci_bus_type, &device_nb);
@@ -4550,11 +4532,11 @@ out_free_dmar:
return ret;
}
-static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
+static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
{
struct intel_iommu *iommu = opaque;
- iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
+ domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
return 0;
}
@@ -4564,63 +4546,50 @@ static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
* devices, unbinding the driver from any one of them will possibly leave
* the others unable to operate.
*/
-static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
- struct device *dev)
+static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
{
if (!iommu || !dev || !dev_is_pci(dev))
return;
- pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
+ pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
}
-static void domain_remove_one_dev_info(struct dmar_domain *domain,
- struct device *dev)
+static void __dmar_remove_one_dev_info(struct device_domain_info *info)
{
- struct device_domain_info *info, *tmp;
struct intel_iommu *iommu;
unsigned long flags;
- bool found = false;
- u8 bus, devfn;
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
+ assert_spin_locked(&device_domain_lock);
+
+ if (WARN_ON(!info))
return;
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry_safe(info, tmp, &domain->devices, link) {
- if (info->iommu == iommu && info->bus == bus &&
- info->devfn == devfn) {
- unlink_domain_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ iommu = info->iommu;
- iommu_disable_dev_iotlb(info);
- iommu_detach_dev(iommu, info->bus, info->devfn);
- iommu_detach_dependent_devices(iommu, dev);
- free_devinfo_mem(info);
+ if (info->dev) {
+ iommu_disable_dev_iotlb(info);
+ domain_context_clear(iommu, info->dev);
+ }
- spin_lock_irqsave(&device_domain_lock, flags);
+ unlink_domain_info(info);
- if (found)
- break;
- else
- continue;
- }
+ spin_lock_irqsave(&iommu->lock, flags);
+ domain_detach_iommu(info->domain, iommu);
+ spin_unlock_irqrestore(&iommu->lock, flags);
- /* if there is no other devices under the same iommu
- * owned by this domain, clear this iommu in iommu_bmp
- * update iommu count and coherency
- */
- if (info->iommu == iommu)
- found = true;
- }
+ free_devinfo_mem(info);
+}
- spin_unlock_irqrestore(&device_domain_lock, flags);
+static void dmar_remove_one_dev_info(struct dmar_domain *domain,
+ struct device *dev)
+{
+ struct device_domain_info *info;
+ unsigned long flags;
- if (found == 0) {
- domain_detach_iommu(domain, iommu);
- if (!domain_type_is_vm_or_si(domain))
- iommu_detach_domain(domain, iommu);
- }
+ spin_lock_irqsave(&device_domain_lock, flags);
+ info = dev->archdata.iommu;
+ __dmar_remove_one_dev_info(info);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
}
static int md_domain_init(struct dmar_domain *domain, int guest_width)
@@ -4701,10 +4670,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
old_domain = find_domain(dev);
if (old_domain) {
- if (domain_type_is_vm_or_si(dmar_domain))
- domain_remove_one_dev_info(old_domain, dev);
- else
- domain_remove_dev_info(old_domain);
+ rcu_read_lock();
+ dmar_remove_one_dev_info(old_domain, dev);
+ rcu_read_unlock();
if (!domain_type_is_vm_or_si(old_domain) &&
list_empty(&old_domain->devices))
@@ -4744,13 +4712,13 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
dmar_domain->agaw--;
}
- return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
+ return domain_add_dev_info(dmar_domain, dev);
}
static void intel_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- domain_remove_one_dev_info(to_dmar_domain(domain), dev);
+ dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
}
static int intel_iommu_map(struct iommu_domain *domain,
@@ -4799,12 +4767,11 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
struct intel_iommu *iommu;
unsigned long start_pfn, last_pfn;
unsigned int npages;
- int iommu_id, num, ndomains, level = 0;
+ int iommu_id, level = 0;
/* Cope with horrid API which requires us to unmap more than the
size argument if it happens to be a large-page mapping. */
- if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
- BUG();
+ BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
size = VTD_PAGE_SIZE << level_to_offset_bits(level);
@@ -4816,19 +4783,11 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
npages = last_pfn - start_pfn + 1;
- for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
- iommu = g_iommus[iommu_id];
-
- /*
- * find bit position of dmar_domain
- */
- ndomains = cap_ndoms(iommu->cap);
- for_each_set_bit(num, iommu->domain_ids, ndomains) {
- if (iommu->domains[num] == dmar_domain)
- iommu_flush_iotlb_psi(iommu, num, start_pfn,
- npages, !freelist, 0);
- }
+ for_each_domain_iommu(iommu_id, dmar_domain) {
+ iommu = g_iommus[iommu_id];
+ iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
+ start_pfn, npages, !freelist, 0);
}
dma_free_pagelist(freelist);
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index f15692a410c7..9ec4e0d94ffd 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -384,7 +384,7 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
static int iommu_load_old_irte(struct intel_iommu *iommu)
{
- struct irte *old_ir_table;
+ struct irte __iomem *old_ir_table;
phys_addr_t irt_phys;
unsigned int i;
size_t size;
@@ -413,7 +413,7 @@ static int iommu_load_old_irte(struct intel_iommu *iommu)
return -ENOMEM;
/* Copy data over */
- memcpy(iommu->ir_table->base, old_ir_table, size);
+ memcpy_fromio(iommu->ir_table->base, old_ir_table, size);
__iommu_flush_cache(iommu, iommu->ir_table->base, size);
@@ -426,6 +426,8 @@ static int iommu_load_old_irte(struct intel_iommu *iommu)
bitmap_set(iommu->ir_table->bitmap, i, 1);
}
+ iounmap(old_ir_table);
+
return 0;
}
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 4e460216bd16..73c07482f487 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -26,6 +26,8 @@
#include <linux/slab.h>
#include <linux/types.h>
+#include <asm/barrier.h>
+
#include "io-pgtable.h"
#define ARM_LPAE_MAX_ADDR_BITS 48
@@ -200,20 +202,97 @@ typedef u64 arm_lpae_iopte;
static bool selftest_running = false;
+static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages)
+{
+ return phys_to_dma(dev, virt_to_phys(pages));
+}
+
+static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
+ struct io_pgtable_cfg *cfg)
+{
+ struct device *dev = cfg->iommu_dev;
+ dma_addr_t dma;
+ void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
+
+ if (!pages)
+ return NULL;
+
+ if (!selftest_running) {
+ dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto out_free;
+ /*
+ * We depend on the IOMMU being able to work with any physical
+ * address directly, so if the DMA layer suggests it can't by
+ * giving us back some translation, that bodes very badly...
+ */
+ if (dma != __arm_lpae_dma_addr(dev, pages))
+ goto out_unmap;
+ }
+
+ return pages;
+
+out_unmap:
+ dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
+ dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
+out_free:
+ free_pages_exact(pages, size);
+ return NULL;
+}
+
+static void __arm_lpae_free_pages(void *pages, size_t size,
+ struct io_pgtable_cfg *cfg)
+{
+ struct device *dev = cfg->iommu_dev;
+
+ if (!selftest_running)
+ dma_unmap_single(dev, __arm_lpae_dma_addr(dev, pages),
+ size, DMA_TO_DEVICE);
+ free_pages_exact(pages, size);
+}
+
+static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
+ struct io_pgtable_cfg *cfg)
+{
+ struct device *dev = cfg->iommu_dev;
+
+ *ptep = pte;
+
+ if (!selftest_running)
+ dma_sync_single_for_device(dev, __arm_lpae_dma_addr(dev, ptep),
+ sizeof(pte), DMA_TO_DEVICE);
+}
+
+static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+ unsigned long iova, size_t size, int lvl,
+ arm_lpae_iopte *ptep);
+
static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
unsigned long iova, phys_addr_t paddr,
arm_lpae_iopte prot, int lvl,
arm_lpae_iopte *ptep)
{
arm_lpae_iopte pte = prot;
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
- /* We require an unmap first */
if (iopte_leaf(*ptep, lvl)) {
+ /* We require an unmap first */
WARN_ON(!selftest_running);
return -EEXIST;
+ } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
+ /*
+ * We need to unmap and free the old table before
+ * overwriting it with a block entry.
+ */
+ arm_lpae_iopte *tblp;
+ size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
+
+ tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
+ if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
+ return -EINVAL;
}
- if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
pte |= ARM_LPAE_PTE_NS;
if (lvl == ARM_LPAE_MAX_LEVELS - 1)
@@ -224,8 +303,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
- *ptep = pte;
- data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie);
+ __arm_lpae_set_pte(ptep, pte, cfg);
return 0;
}
@@ -234,14 +312,14 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
int lvl, arm_lpae_iopte *ptep)
{
arm_lpae_iopte *cptep, pte;
- void *cookie = data->iop.cookie;
size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
/* Find our entry at the current level */
ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
/* If we can install a leaf entry at this level, then do so */
- if (size == block_size && (size & data->iop.cfg.pgsize_bitmap))
+ if (size == block_size && (size & cfg->pgsize_bitmap))
return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
/* We can't allocate tables at the final level */
@@ -251,18 +329,15 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
/* Grab a pointer to the next level */
pte = *ptep;
if (!pte) {
- cptep = alloc_pages_exact(1UL << data->pg_shift,
- GFP_ATOMIC | __GFP_ZERO);
+ cptep = __arm_lpae_alloc_pages(1UL << data->pg_shift,
+ GFP_ATOMIC, cfg);
if (!cptep)
return -ENOMEM;
- data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift,
- cookie);
pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
- if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
pte |= ARM_LPAE_PTE_NSTABLE;
- *ptep = pte;
- data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+ __arm_lpae_set_pte(ptep, pte, cfg);
} else {
cptep = iopte_deref(pte, data);
}
@@ -309,7 +384,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
arm_lpae_iopte *ptep = data->pgd;
- int lvl = ARM_LPAE_START_LVL(data);
+ int ret, lvl = ARM_LPAE_START_LVL(data);
arm_lpae_iopte prot;
/* If no access, then nothing to do */
@@ -317,7 +392,14 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
return 0;
prot = arm_lpae_prot_to_pte(data, iommu_prot);
- return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
+ ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
+ /*
+ * Synchronise all PTE updates for the new mapping before there's
+ * a chance for anything to kick off a table walk for the new iova.
+ */
+ wmb();
+
+ return ret;
}
static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
@@ -347,7 +429,7 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
}
- free_pages_exact(start, table_size);
+ __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
}
static void arm_lpae_free_pgtable(struct io_pgtable *iop)
@@ -366,8 +448,7 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
unsigned long blk_start, blk_end;
phys_addr_t blk_paddr;
arm_lpae_iopte table = 0;
- void *cookie = data->iop.cookie;
- const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
blk_start = iova & ~(blk_size - 1);
blk_end = blk_start + blk_size;
@@ -393,10 +474,9 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
}
}
- *ptep = table;
- tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+ __arm_lpae_set_pte(ptep, table, cfg);
iova &= ~(blk_size - 1);
- tlb->tlb_add_flush(iova, blk_size, true, cookie);
+ cfg->tlb->tlb_add_flush(iova, blk_size, true, data->iop.cookie);
return size;
}
@@ -418,13 +498,12 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
/* If the size matches this level, we're in the right place */
if (size == blk_size) {
- *ptep = 0;
- tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+ __arm_lpae_set_pte(ptep, 0, &data->iop.cfg);
if (!iopte_leaf(pte, lvl)) {
/* Also flush any partial walks */
tlb->tlb_add_flush(iova, size, false, cookie);
- tlb->tlb_sync(data->iop.cookie);
+ tlb->tlb_sync(cookie);
ptep = iopte_deref(pte, data);
__arm_lpae_free_pgtable(data, lvl + 1, ptep);
} else {
@@ -640,11 +719,12 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
cfg->arm_lpae_s1_cfg.mair[1] = 0;
/* Looking good; allocate a pgd */
- data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
+ data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
if (!data->pgd)
goto out_free_data;
- cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
+ /* Ensure the empty pgd is visible before any actual TTBR write */
+ wmb();
/* TTBRs */
cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
@@ -728,11 +808,12 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
cfg->arm_lpae_s2_cfg.vtcr = reg;
/* Allocate pgd pages */
- data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
+ data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
if (!data->pgd)
goto out_free_data;
- cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
+ /* Ensure the empty pgd is visible before any actual TTBR write */
+ wmb();
/* VTTBR */
cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
@@ -818,16 +899,10 @@ static void dummy_tlb_sync(void *cookie)
WARN_ON(cookie != cfg_cookie);
}
-static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
-{
- WARN_ON(cookie != cfg_cookie);
-}
-
static struct iommu_gather_ops dummy_tlb_ops __initdata = {
.tlb_flush_all = dummy_tlb_flush_all,
.tlb_add_flush = dummy_tlb_add_flush,
.tlb_sync = dummy_tlb_sync,
- .flush_pgtable = dummy_flush_pgtable,
};
static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index 6436fe24bc2f..6f2e319d4f04 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -24,11 +24,6 @@
#include "io-pgtable.h"
-extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
-extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
-extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
-extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
-
static const struct io_pgtable_init_fns *
io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
{
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index 10e32f69c668..ac9e2341a633 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -17,8 +17,9 @@ enum io_pgtable_fmt {
*
* @tlb_flush_all: Synchronously invalidate the entire TLB context.
* @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
- * @tlb_sync: Ensure any queue TLB invalidation has taken effect.
- * @flush_pgtable: Ensure page table updates are visible to the IOMMU.
+ * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and
+ * any corresponding page table updates are visible to the
+ * IOMMU.
*
* Note that these can all be called in atomic context and must therefore
* not block.
@@ -28,7 +29,6 @@ struct iommu_gather_ops {
void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf,
void *cookie);
void (*tlb_sync)(void *cookie);
- void (*flush_pgtable)(void *ptr, size_t size, void *cookie);
};
/**
@@ -41,6 +41,8 @@ struct iommu_gather_ops {
* @ias: Input address (iova) size, in bits.
* @oas: Output address (paddr) size, in bits.
* @tlb: TLB management callbacks for this set of tables.
+ * @iommu_dev: The device representing the DMA configuration for the
+ * page table walker.
*/
struct io_pgtable_cfg {
#define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */
@@ -49,6 +51,7 @@ struct io_pgtable_cfg {
unsigned int ias;
unsigned int oas;
const struct iommu_gather_ops *tlb;
+ struct device *iommu_dev;
/* Low-level data specific to the table format */
union {
@@ -140,4 +143,9 @@ struct io_pgtable_init_fns {
void (*free)(struct io_pgtable *iop);
};
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
+
#endif /* __IO_PGTABLE_H */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index f286090931cc..049df495c274 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1408,7 +1408,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
for_each_sg(sg, s, nents, i) {
- phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
+ phys_addr_t phys = sg_phys(s);
/*
* We are mapping on IOMMU page boundaries, so offset within
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 1a67c531a07e..8cf605fa9946 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -283,24 +283,10 @@ static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
/* The hardware doesn't support selective TLB flush. */
}
-static void ipmmu_flush_pgtable(void *ptr, size_t size, void *cookie)
-{
- unsigned long offset = (unsigned long)ptr & ~PAGE_MASK;
- struct ipmmu_vmsa_domain *domain = cookie;
-
- /*
- * TODO: Add support for coherent walk through CCI with DVM and remove
- * cache handling.
- */
- dma_map_page(domain->mmu->dev, virt_to_page(ptr), offset, size,
- DMA_TO_DEVICE);
-}
-
static struct iommu_gather_ops ipmmu_gather_ops = {
.tlb_flush_all = ipmmu_tlb_flush_all,
.tlb_add_flush = ipmmu_tlb_add_flush,
.tlb_sync = ipmmu_tlb_flush_all,
- .flush_pgtable = ipmmu_flush_pgtable,
};
/* -----------------------------------------------------------------------------
@@ -327,6 +313,11 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
domain->cfg.ias = 32;
domain->cfg.oas = 40;
domain->cfg.tlb = &ipmmu_gather_ops;
+ /*
+ * TODO: Add support for coherent walk through CCI with DVM and remove
+ * cache handling. For now, delegate it to the io-pgtable code.
+ */
+ domain->cfg.iommu_dev = domain->mmu->dev;
domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
domain);
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 2d9993062ded..913455a5fd40 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -84,7 +84,7 @@ void set_irq_remapping_broken(void)
bool irq_remapping_cap(enum irq_remap_cap cap)
{
if (!remap_ops || disable_irq_post)
- return 0;
+ return false;
return (remap_ops->capability & (1 << cap));
}
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 15a2063812fa..e321fa517a45 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -106,8 +106,8 @@ static int __flush_iotlb(struct iommu_domain *domain)
#endif
list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
- if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
- BUG();
+
+ BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
BUG_ON(!iommu_drvdata);
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 43429ab62228..60ba238090d9 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -141,10 +141,12 @@ struct iommu_ops *of_iommu_configure(struct device *dev,
struct iommu_ops *ops = NULL;
int idx = 0;
- if (dev_is_pci(dev)) {
- dev_err(dev, "IOMMU is currently not supported for PCI\n");
+ /*
+ * We can't do much for PCI devices without knowing how
+ * device IDs are wired up from the PCI bus to the IOMMU.
+ */
+ if (dev_is_pci(dev))
return NULL;
- }
/*
* We don't currently walk up the tree looking for a parent IOMMU.
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index f3d20a2039d2..9bc20e2119a3 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -14,6 +14,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
#include <linux/debugfs.h>
#include <linux/platform_data/iommu-omap.h>
@@ -29,6 +30,59 @@ static inline bool is_omap_iommu_detached(struct omap_iommu *obj)
return !obj->domain;
}
+#define pr_reg(name) \
+ do { \
+ ssize_t bytes; \
+ const char *str = "%20s: %08x\n"; \
+ const int maxcol = 32; \
+ bytes = snprintf(p, maxcol, str, __stringify(name), \
+ iommu_read_reg(obj, MMU_##name)); \
+ p += bytes; \
+ len -= bytes; \
+ if (len < maxcol) \
+ goto out; \
+ } while (0)
+
+static ssize_t
+omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
+{
+ char *p = buf;
+
+ pr_reg(REVISION);
+ pr_reg(IRQSTATUS);
+ pr_reg(IRQENABLE);
+ pr_reg(WALKING_ST);
+ pr_reg(CNTL);
+ pr_reg(FAULT_AD);
+ pr_reg(TTB);
+ pr_reg(LOCK);
+ pr_reg(LD_TLB);
+ pr_reg(CAM);
+ pr_reg(RAM);
+ pr_reg(GFLUSH);
+ pr_reg(FLUSH_ENTRY);
+ pr_reg(READ_CAM);
+ pr_reg(READ_RAM);
+ pr_reg(EMU_FAULT_AD);
+out:
+ return p - buf;
+}
+
+static ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf,
+ ssize_t bytes)
+{
+ if (!obj || !buf)
+ return -EINVAL;
+
+ pm_runtime_get_sync(obj->dev);
+
+ bytes = omap2_iommu_dump_ctx(obj, buf, bytes);
+
+ pm_runtime_put_sync(obj->dev);
+
+ return bytes;
+}
+
static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
@@ -55,34 +109,72 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
return bytes;
}
-static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
+static int
+__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
{
- struct omap_iommu *obj = file->private_data;
- char *p, *buf;
- ssize_t bytes, rest;
+ int i;
+ struct iotlb_lock saved;
+ struct cr_regs tmp;
+ struct cr_regs *p = crs;
+
+ pm_runtime_get_sync(obj->dev);
+ iotlb_lock_get(obj, &saved);
+
+ for_each_iotlb_cr(obj, num, i, tmp) {
+ if (!iotlb_cr_valid(&tmp))
+ continue;
+ *p++ = tmp;
+ }
+
+ iotlb_lock_set(obj, &saved);
+ pm_runtime_put_sync(obj->dev);
+
+ return p - crs;
+}
+
+static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
+ struct seq_file *s)
+{
+ seq_printf(s, "%08x %08x %01x\n", cr->cam, cr->ram,
+ (cr->cam & MMU_CAM_P) ? 1 : 0);
+ return 0;
+}
+
+static size_t omap_dump_tlb_entries(struct omap_iommu *obj, struct seq_file *s)
+{
+ int i, num;
+ struct cr_regs *cr;
+
+ num = obj->nr_tlb_entries;
+
+ cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
+ if (!cr)
+ return 0;
+
+ num = __dump_tlb_entries(obj, cr, num);
+ for (i = 0; i < num; i++)
+ iotlb_dump_cr(obj, cr + i, s);
+ kfree(cr);
+
+ return 0;
+}
+
+static int debug_read_tlb(struct seq_file *s, void *data)
+{
+ struct omap_iommu *obj = s->private;
if (is_omap_iommu_detached(obj))
return -EPERM;
- buf = kmalloc(count, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- p = buf;
-
mutex_lock(&iommu_debug_lock);
- p += sprintf(p, "%8s %8s\n", "cam:", "ram:");
- p += sprintf(p, "-----------------------------------------\n");
- rest = count - (p - buf);
- p += omap_dump_tlb_entries(obj, p, rest);
-
- bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+ seq_printf(s, "%8s %8s\n", "cam:", "ram:");
+ seq_puts(s, "-----------------------------------------\n");
+ omap_dump_tlb_entries(obj, s);
mutex_unlock(&iommu_debug_lock);
- kfree(buf);
- return bytes;
+ return 0;
}
static void dump_ioptable(struct seq_file *s)
@@ -154,10 +246,10 @@ static int debug_read_pagetable(struct seq_file *s, void *data)
.open = simple_open, \
.read = debug_read_##name, \
.llseek = generic_file_llseek, \
- };
+ }
DEBUG_FOPS_RO(regs);
-DEBUG_FOPS_RO(tlb);
+DEBUG_SEQ_FOPS_RO(tlb);
DEBUG_SEQ_FOPS_RO(pagetable);
#define __DEBUG_ADD_FILE(attr, mode) \
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index a22c33d6a486..36d0033c2ccb 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -12,7 +12,6 @@
*/
#include <linux/err.h>
-#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
@@ -38,11 +37,6 @@
#define to_iommu(dev) \
((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
-#define for_each_iotlb_cr(obj, n, __i, cr) \
- for (__i = 0; \
- (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
- __i++)
-
/* bitmap of the page sizes currently supported */
#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
@@ -72,11 +66,6 @@ struct omap_iommu_domain {
#define MMU_LOCK_VICT(x) \
((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
-struct iotlb_lock {
- short base;
- short vict;
-};
-
static struct platform_driver omap_iommu_driver;
static struct kmem_cache *iopte_cachep;
@@ -213,14 +202,6 @@ static void iommu_disable(struct omap_iommu *obj)
/*
* TLB operations
*/
-static inline int iotlb_cr_valid(struct cr_regs *cr)
-{
- if (!cr)
- return -EINVAL;
-
- return cr->cam & MMU_CAM_V;
-}
-
static u32 iotlb_cr_to_virt(struct cr_regs *cr)
{
u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
@@ -260,7 +241,7 @@ static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
return status;
}
-static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
+void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
{
u32 val;
@@ -268,10 +249,9 @@ static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
l->base = MMU_LOCK_BASE(val);
l->vict = MMU_LOCK_VICT(val);
-
}
-static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
+void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
{
u32 val;
@@ -297,7 +277,7 @@ static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
}
/* only used in iotlb iteration for-loop */
-static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
+struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
{
struct cr_regs cr;
struct iotlb_lock l;
@@ -468,129 +448,6 @@ static void flush_iotlb_all(struct omap_iommu *obj)
pm_runtime_put_sync(obj->dev);
}
-#ifdef CONFIG_OMAP_IOMMU_DEBUG
-
-#define pr_reg(name) \
- do { \
- ssize_t bytes; \
- const char *str = "%20s: %08x\n"; \
- const int maxcol = 32; \
- bytes = snprintf(p, maxcol, str, __stringify(name), \
- iommu_read_reg(obj, MMU_##name)); \
- p += bytes; \
- len -= bytes; \
- if (len < maxcol) \
- goto out; \
- } while (0)
-
-static ssize_t
-omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
-{
- char *p = buf;
-
- pr_reg(REVISION);
- pr_reg(IRQSTATUS);
- pr_reg(IRQENABLE);
- pr_reg(WALKING_ST);
- pr_reg(CNTL);
- pr_reg(FAULT_AD);
- pr_reg(TTB);
- pr_reg(LOCK);
- pr_reg(LD_TLB);
- pr_reg(CAM);
- pr_reg(RAM);
- pr_reg(GFLUSH);
- pr_reg(FLUSH_ENTRY);
- pr_reg(READ_CAM);
- pr_reg(READ_RAM);
- pr_reg(EMU_FAULT_AD);
-out:
- return p - buf;
-}
-
-ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
-{
- if (!obj || !buf)
- return -EINVAL;
-
- pm_runtime_get_sync(obj->dev);
-
- bytes = omap2_iommu_dump_ctx(obj, buf, bytes);
-
- pm_runtime_put_sync(obj->dev);
-
- return bytes;
-}
-
-static int
-__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
-{
- int i;
- struct iotlb_lock saved;
- struct cr_regs tmp;
- struct cr_regs *p = crs;
-
- pm_runtime_get_sync(obj->dev);
- iotlb_lock_get(obj, &saved);
-
- for_each_iotlb_cr(obj, num, i, tmp) {
- if (!iotlb_cr_valid(&tmp))
- continue;
- *p++ = tmp;
- }
-
- iotlb_lock_set(obj, &saved);
- pm_runtime_put_sync(obj->dev);
-
- return p - crs;
-}
-
-/**
- * iotlb_dump_cr - Dump an iommu tlb entry into buf
- * @obj: target iommu
- * @cr: contents of cam and ram register
- * @buf: output buffer
- **/
-static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
- char *buf)
-{
- char *p = buf;
-
- /* FIXME: Need more detail analysis of cam/ram */
- p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
- (cr->cam & MMU_CAM_P) ? 1 : 0);
-
- return p - buf;
-}
-
-/**
- * omap_dump_tlb_entries - dump cr arrays to given buffer
- * @obj: target iommu
- * @buf: output buffer
- **/
-size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
-{
- int i, num;
- struct cr_regs *cr;
- char *p = buf;
-
- num = bytes / sizeof(*cr);
- num = min(obj->nr_tlb_entries, num);
-
- cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
- if (!cr)
- return 0;
-
- num = __dump_tlb_entries(obj, cr, num);
- for (i = 0; i < num; i++)
- p += iotlb_dump_cr(obj, cr + i, p);
- kfree(cr);
-
- return p - buf;
-}
-
-#endif /* CONFIG_OMAP_IOMMU_DEBUG */
-
/*
* H/W pagetable operations
*/
@@ -930,14 +787,14 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
if (!iopgd_is_table(*iopgd)) {
dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
- obj->name, errs, da, iopgd, *iopgd);
+ obj->name, errs, da, iopgd, *iopgd);
return IRQ_NONE;
}
iopte = iopte_offset(iopgd, da);
dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
- obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
+ obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
return IRQ_NONE;
}
@@ -963,9 +820,8 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
struct device *dev;
struct omap_iommu *obj;
- dev = driver_find_device(&omap_iommu_driver.driver, NULL,
- (void *)name,
- device_match_by_alias);
+ dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
+ device_match_by_alias);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -1089,7 +945,6 @@ static const struct of_device_id omap_iommu_of_match[] = {
{ .compatible = "ti,dra7-iommu" },
{},
};
-MODULE_DEVICE_TABLE(of, omap_iommu_of_match);
static struct platform_driver omap_iommu_driver = {
.probe = omap_iommu_probe,
@@ -1121,7 +976,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
}
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
- phys_addr_t pa, size_t bytes, int prot)
+ phys_addr_t pa, size_t bytes, int prot)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev;
@@ -1148,7 +1003,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
}
static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
- size_t size)
+ size_t size)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev;
@@ -1199,7 +1054,7 @@ out:
}
static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
- struct device *dev)
+ struct device *dev)
{
struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
@@ -1220,7 +1075,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
}
static void omap_iommu_detach_dev(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
@@ -1237,16 +1092,12 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
return NULL;
omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
- if (!omap_domain) {
- pr_err("kzalloc failed\n");
+ if (!omap_domain)
goto out;
- }
omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
- if (!omap_domain->pgtable) {
- pr_err("kzalloc failed\n");
+ if (!omap_domain->pgtable)
goto fail_nomem;
- }
/*
* should never fail, but please keep this around to ensure
@@ -1285,7 +1136,7 @@ static void omap_iommu_domain_free(struct iommu_domain *domain)
}
static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t da)
+ dma_addr_t da)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev;
@@ -1302,7 +1153,7 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
else
dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
- (unsigned long long)da);
+ (unsigned long long)da);
} else {
if (iopgd_is_section(*pgd))
ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
@@ -1310,7 +1161,7 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
else
dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
- (unsigned long long)da);
+ (unsigned long long)da);
}
return ret;
@@ -1405,20 +1256,5 @@ static int __init omap_iommu_init(void)
return platform_driver_register(&omap_iommu_driver);
}
-/* must be ready before omap3isp is probed */
subsys_initcall(omap_iommu_init);
-
-static void __exit omap_iommu_exit(void)
-{
- kmem_cache_destroy(iopte_cachep);
-
- platform_driver_unregister(&omap_iommu_driver);
-
- omap_iommu_debugfs_exit();
-}
-module_exit(omap_iommu_exit);
-
-MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
-MODULE_ALIAS("platform:omap-iommu");
-MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
-MODULE_LICENSE("GPL v2");
+/* must be ready before omap3isp is probed */
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index d736630df3c8..a656df2f9e03 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -13,16 +13,18 @@
#ifndef _OMAP_IOMMU_H
#define _OMAP_IOMMU_H
+#include <linux/bitops.h>
+
+#define for_each_iotlb_cr(obj, n, __i, cr) \
+ for (__i = 0; \
+ (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
+ __i++)
+
struct iotlb_entry {
u32 da;
u32 pa;
u32 pgsz, prsvd, valid;
- union {
- u16 ap;
- struct {
- u32 endian, elsz, mixed;
- };
- };
+ u32 endian, elsz, mixed;
};
struct omap_iommu {
@@ -49,20 +51,13 @@ struct omap_iommu {
};
struct cr_regs {
- union {
- struct {
- u16 cam_l;
- u16 cam_h;
- };
- u32 cam;
- };
- union {
- struct {
- u16 ram_l;
- u16 ram_h;
- };
- u32 ram;
- };
+ u32 cam;
+ u32 ram;
+};
+
+struct iotlb_lock {
+ short base;
+ short vict;
};
/**
@@ -103,11 +98,11 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
* MMU Register bit definitions
*/
/* IRQSTATUS & IRQENABLE */
-#define MMU_IRQ_MULTIHITFAULT (1 << 4)
-#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
-#define MMU_IRQ_EMUMISS (1 << 2)
-#define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
-#define MMU_IRQ_TLBMISS (1 << 0)
+#define MMU_IRQ_MULTIHITFAULT BIT(4)
+#define MMU_IRQ_TABLEWALKFAULT BIT(3)
+#define MMU_IRQ_EMUMISS BIT(2)
+#define MMU_IRQ_TRANSLATIONFAULT BIT(1)
+#define MMU_IRQ_TLBMISS BIT(0)
#define __MMU_IRQ_FAULT \
(MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
@@ -119,16 +114,16 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
/* MMU_CNTL */
#define MMU_CNTL_SHIFT 1
#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
-#define MMU_CNTL_EML_TLB (1 << 3)
-#define MMU_CNTL_TWL_EN (1 << 2)
-#define MMU_CNTL_MMU_EN (1 << 1)
+#define MMU_CNTL_EML_TLB BIT(3)
+#define MMU_CNTL_TWL_EN BIT(2)
+#define MMU_CNTL_MMU_EN BIT(1)
/* CAM */
#define MMU_CAM_VATAG_SHIFT 12
#define MMU_CAM_VATAG_MASK \
((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT)
-#define MMU_CAM_P (1 << 3)
-#define MMU_CAM_V (1 << 2)
+#define MMU_CAM_P BIT(3)
+#define MMU_CAM_V BIT(2)
#define MMU_CAM_PGSZ_MASK 3
#define MMU_CAM_PGSZ_1M (0 << 0)
#define MMU_CAM_PGSZ_64K (1 << 0)
@@ -141,9 +136,9 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT)
#define MMU_RAM_ENDIAN_SHIFT 9
-#define MMU_RAM_ENDIAN_MASK (1 << MMU_RAM_ENDIAN_SHIFT)
+#define MMU_RAM_ENDIAN_MASK BIT(MMU_RAM_ENDIAN_SHIFT)
#define MMU_RAM_ENDIAN_LITTLE (0 << MMU_RAM_ENDIAN_SHIFT)
-#define MMU_RAM_ENDIAN_BIG (1 << MMU_RAM_ENDIAN_SHIFT)
+#define MMU_RAM_ENDIAN_BIG BIT(MMU_RAM_ENDIAN_SHIFT)
#define MMU_RAM_ELSZ_SHIFT 7
#define MMU_RAM_ELSZ_MASK (3 << MMU_RAM_ELSZ_SHIFT)
@@ -152,7 +147,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
#define MMU_RAM_ELSZ_32 (2 << MMU_RAM_ELSZ_SHIFT)
#define MMU_RAM_ELSZ_NONE (3 << MMU_RAM_ELSZ_SHIFT)
#define MMU_RAM_MIXED_SHIFT 6
-#define MMU_RAM_MIXED_MASK (1 << MMU_RAM_MIXED_SHIFT)
+#define MMU_RAM_MIXED_MASK BIT(MMU_RAM_MIXED_SHIFT)
#define MMU_RAM_MIXED MMU_RAM_MIXED_MASK
#define MMU_GP_REG_BUS_ERR_BACK_EN 0x1
@@ -190,12 +185,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
/*
* global functions
*/
-#ifdef CONFIG_OMAP_IOMMU_DEBUG
-extern ssize_t
-omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len);
-extern size_t
-omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len);
+struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n);
+void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l);
+void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l);
+
+#ifdef CONFIG_OMAP_IOMMU_DEBUG
void omap_iommu_debugfs_init(void);
void omap_iommu_debugfs_exit(void);
@@ -222,4 +217,12 @@ static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs)
__raw_writel(val, obj->regbase + offs);
}
+static inline int iotlb_cr_valid(struct cr_regs *cr)
+{
+ if (!cr)
+ return -EINVAL;
+
+ return cr->cam & MMU_CAM_V;
+}
+
#endif /* _OMAP_IOMMU_H */
diff --git a/drivers/iommu/omap-iopgtable.h b/drivers/iommu/omap-iopgtable.h
index f891683e3f05..01a315227bf0 100644
--- a/drivers/iommu/omap-iopgtable.h
+++ b/drivers/iommu/omap-iopgtable.h
@@ -10,25 +10,30 @@
* published by the Free Software Foundation.
*/
+#ifndef _OMAP_IOPGTABLE_H
+#define _OMAP_IOPGTABLE_H
+
+#include <linux/bitops.h>
+
/*
* "L2 table" address mask and size definitions.
*/
#define IOPGD_SHIFT 20
-#define IOPGD_SIZE (1UL << IOPGD_SHIFT)
+#define IOPGD_SIZE BIT(IOPGD_SHIFT)
#define IOPGD_MASK (~(IOPGD_SIZE - 1))
/*
* "section" address mask and size definitions.
*/
#define IOSECTION_SHIFT 20
-#define IOSECTION_SIZE (1UL << IOSECTION_SHIFT)
+#define IOSECTION_SIZE BIT(IOSECTION_SHIFT)
#define IOSECTION_MASK (~(IOSECTION_SIZE - 1))
/*
* "supersection" address mask and size definitions.
*/
#define IOSUPER_SHIFT 24
-#define IOSUPER_SIZE (1UL << IOSUPER_SHIFT)
+#define IOSUPER_SIZE BIT(IOSUPER_SHIFT)
#define IOSUPER_MASK (~(IOSUPER_SIZE - 1))
#define PTRS_PER_IOPGD (1UL << (32 - IOPGD_SHIFT))
@@ -38,14 +43,14 @@
* "small page" address mask and size definitions.
*/
#define IOPTE_SHIFT 12
-#define IOPTE_SIZE (1UL << IOPTE_SHIFT)
+#define IOPTE_SIZE BIT(IOPTE_SHIFT)
#define IOPTE_MASK (~(IOPTE_SIZE - 1))
/*
* "large page" address mask and size definitions.
*/
#define IOLARGE_SHIFT 16
-#define IOLARGE_SIZE (1UL << IOLARGE_SHIFT)
+#define IOLARGE_SIZE BIT(IOLARGE_SHIFT)
#define IOLARGE_MASK (~(IOLARGE_SIZE - 1))
#define PTRS_PER_IOPTE (1UL << (IOPGD_SHIFT - IOPTE_SHIFT))
@@ -69,16 +74,16 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
/*
* some descriptor attributes.
*/
-#define IOPGD_TABLE (1 << 0)
-#define IOPGD_SECTION (2 << 0)
-#define IOPGD_SUPER (1 << 18 | 2 << 0)
+#define IOPGD_TABLE (1)
+#define IOPGD_SECTION (2)
+#define IOPGD_SUPER (BIT(18) | IOPGD_SECTION)
#define iopgd_is_table(x) (((x) & 3) == IOPGD_TABLE)
#define iopgd_is_section(x) (((x) & (1 << 18 | 3)) == IOPGD_SECTION)
#define iopgd_is_super(x) (((x) & (1 << 18 | 3)) == IOPGD_SUPER)
-#define IOPTE_SMALL (2 << 0)
-#define IOPTE_LARGE (1 << 0)
+#define IOPTE_SMALL (2)
+#define IOPTE_LARGE (1)
#define iopte_is_small(x) (((x) & 2) == IOPTE_SMALL)
#define iopte_is_large(x) (((x) & 3) == IOPTE_LARGE)
@@ -93,3 +98,5 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
/* to find an entry in the second-level page table. */
#define iopte_index(da) (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1))
#define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da))
+
+#endif /* _OMAP_IOPGTABLE_H */
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index c1f2e521dc52..9305964250ac 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -27,6 +27,7 @@ struct tegra_smmu {
const struct tegra_smmu_soc *soc;
unsigned long pfn_mask;
+ unsigned long tlb_mask;
unsigned long *asids;
struct mutex lock;
@@ -40,8 +41,10 @@ struct tegra_smmu_as {
struct iommu_domain domain;
struct tegra_smmu *smmu;
unsigned int use_count;
- struct page *count;
+ u32 *count;
+ struct page **pts;
struct page *pd;
+ dma_addr_t pd_dma;
unsigned id;
u32 attr;
};
@@ -68,7 +71,8 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_TLB_CONFIG 0x14
#define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
#define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
-#define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
+#define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
+ ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
#define SMMU_PTC_CONFIG 0x18
#define SMMU_PTC_CONFIG_ENABLE (1 << 29)
@@ -79,9 +83,9 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
#define SMMU_PTB_DATA 0x020
-#define SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr))
+#define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
-#define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr))
+#define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
#define SMMU_TLB_FLUSH 0x030
#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
@@ -134,29 +138,49 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
SMMU_PTE_NONSECURE)
-static inline void smmu_flush_ptc(struct tegra_smmu *smmu, struct page *page,
+static unsigned int iova_pd_index(unsigned long iova)
+{
+ return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
+}
+
+static unsigned int iova_pt_index(unsigned long iova)
+{
+ return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
+}
+
+static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
+{
+ addr >>= 12;
+ return (addr & smmu->pfn_mask) == addr;
+}
+
+static dma_addr_t smmu_pde_to_dma(u32 pde)
+{
+ return pde << 12;
+}
+
+static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
+{
+ smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
+}
+
+static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
unsigned long offset)
{
- phys_addr_t phys = page ? page_to_phys(page) : 0;
u32 value;
- if (page) {
- offset &= ~(smmu->mc->soc->atom_size - 1);
+ offset &= ~(smmu->mc->soc->atom_size - 1);
- if (smmu->mc->soc->num_address_bits > 32) {
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
- value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK;
+ if (smmu->mc->soc->num_address_bits > 32) {
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
#else
- value = 0;
+ value = 0;
#endif
- smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
- }
-
- value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
- } else {
- value = SMMU_PTC_FLUSH_TYPE_ALL;
+ smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
}
+ value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
smmu_writel(smmu, value, SMMU_PTC_FLUSH);
}
@@ -236,8 +260,6 @@ static bool tegra_smmu_capable(enum iommu_cap cap)
static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
{
struct tegra_smmu_as *as;
- unsigned int i;
- uint32_t *pd;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
@@ -248,32 +270,26 @@ static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
- as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
+ as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
if (!as->pd) {
kfree(as);
return NULL;
}
- as->count = alloc_page(GFP_KERNEL);
+ as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
if (!as->count) {
__free_page(as->pd);
kfree(as);
return NULL;
}
- /* clear PDEs */
- pd = page_address(as->pd);
- SetPageReserved(as->pd);
-
- for (i = 0; i < SMMU_NUM_PDE; i++)
- pd[i] = 0;
-
- /* clear PDE usage counters */
- pd = page_address(as->count);
- SetPageReserved(as->count);
-
- for (i = 0; i < SMMU_NUM_PDE; i++)
- pd[i] = 0;
+ as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
+ if (!as->pts) {
+ kfree(as->count);
+ __free_page(as->pd);
+ kfree(as);
+ return NULL;
+ }
/* setup aperture */
as->domain.geometry.aperture_start = 0;
@@ -288,7 +304,6 @@ static void tegra_smmu_domain_free(struct iommu_domain *domain)
struct tegra_smmu_as *as = to_smmu_as(domain);
/* TODO: free page directory and page tables */
- ClearPageReserved(as->pd);
kfree(as);
}
@@ -376,16 +391,26 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
return 0;
}
+ as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(smmu->dev, as->pd_dma))
+ return -ENOMEM;
+
+ /* We can't handle 64-bit DMA addresses */
+ if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
+ err = -ENOMEM;
+ goto err_unmap;
+ }
+
err = tegra_smmu_alloc_asid(smmu, &as->id);
if (err < 0)
- return err;
+ goto err_unmap;
- smmu->soc->ops->flush_dcache(as->pd, 0, SMMU_SIZE_PD);
- smmu_flush_ptc(smmu, as->pd, 0);
+ smmu_flush_ptc(smmu, as->pd_dma, 0);
smmu_flush_tlb_asid(smmu, as->id);
smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
- value = SMMU_PTB_DATA_VALUE(as->pd, as->attr);
+ value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
smmu_writel(smmu, value, SMMU_PTB_DATA);
smmu_flush(smmu);
@@ -393,6 +418,10 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
as->use_count++;
return 0;
+
+err_unmap:
+ dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+ return err;
}
static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
@@ -402,6 +431,9 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
return;
tegra_smmu_free_asid(smmu, as->id);
+
+ dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+
as->smmu = NULL;
}
@@ -465,96 +497,155 @@ static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *de
}
}
+static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
+ u32 value)
+{
+ unsigned int pd_index = iova_pd_index(iova);
+ struct tegra_smmu *smmu = as->smmu;
+ u32 *pd = page_address(as->pd);
+ unsigned long offset = pd_index * sizeof(*pd);
+
+ /* Set the page directory entry first */
+ pd[pd_index] = value;
+
+ /* The flush the page directory entry from caches */
+ dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
+ sizeof(*pd), DMA_TO_DEVICE);
+
+ /* And flush the iommu */
+ smmu_flush_ptc(smmu, as->pd_dma, offset);
+ smmu_flush_tlb_section(smmu, as->id, iova);
+ smmu_flush(smmu);
+}
+
+static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
+{
+ u32 *pt = page_address(pt_page);
+
+ return pt + iova_pt_index(iova);
+}
+
+static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
+ dma_addr_t *dmap)
+{
+ unsigned int pd_index = iova_pd_index(iova);
+ struct page *pt_page;
+ u32 *pd;
+
+ pt_page = as->pts[pd_index];
+ if (!pt_page)
+ return NULL;
+
+ pd = page_address(as->pd);
+ *dmap = smmu_pde_to_dma(pd[pd_index]);
+
+ return tegra_smmu_pte_offset(pt_page, iova);
+}
+
static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
- struct page **pagep)
+ dma_addr_t *dmap)
{
- u32 *pd = page_address(as->pd), *pt, *count;
- u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
- u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
+ unsigned int pde = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
- struct page *page;
- unsigned int i;
- if (pd[pde] == 0) {
- page = alloc_page(GFP_KERNEL | __GFP_DMA);
+ if (!as->pts[pde]) {
+ struct page *page;
+ dma_addr_t dma;
+
+ page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
if (!page)
return NULL;
- pt = page_address(page);
- SetPageReserved(page);
+ dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(smmu->dev, dma)) {
+ __free_page(page);
+ return NULL;
+ }
- for (i = 0; i < SMMU_NUM_PTE; i++)
- pt[i] = 0;
+ if (!smmu_dma_addr_valid(smmu, dma)) {
+ dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
+ DMA_TO_DEVICE);
+ __free_page(page);
+ return NULL;
+ }
- smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT);
+ as->pts[pde] = page;
- pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
+ tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
+ SMMU_PDE_NEXT));
- smmu->soc->ops->flush_dcache(as->pd, pde << 2, 4);
- smmu_flush_ptc(smmu, as->pd, pde << 2);
- smmu_flush_tlb_section(smmu, as->id, iova);
- smmu_flush(smmu);
+ *dmap = dma;
} else {
- page = pfn_to_page(pd[pde] & smmu->pfn_mask);
- pt = page_address(page);
+ u32 *pd = page_address(as->pd);
+
+ *dmap = smmu_pde_to_dma(pd[pde]);
}
- *pagep = page;
+ return tegra_smmu_pte_offset(as->pts[pde], iova);
+}
- /* Keep track of entries in this page table. */
- count = page_address(as->count);
- if (pt[pte] == 0)
- count[pde]++;
+static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
+{
+ unsigned int pd_index = iova_pd_index(iova);
- return &pt[pte];
+ as->count[pd_index]++;
}
-static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
+static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
{
- u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
- u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
- u32 *count = page_address(as->count);
- u32 *pd = page_address(as->pd), *pt;
- struct page *page;
-
- page = pfn_to_page(pd[pde] & as->smmu->pfn_mask);
- pt = page_address(page);
+ unsigned int pde = iova_pd_index(iova);
+ struct page *page = as->pts[pde];
/*
* When no entries in this page table are used anymore, return the
* memory page to the system.
*/
- if (pt[pte] != 0) {
- if (--count[pde] == 0) {
- ClearPageReserved(page);
- __free_page(page);
- pd[pde] = 0;
- }
+ if (--as->count[pde] == 0) {
+ struct tegra_smmu *smmu = as->smmu;
+ u32 *pd = page_address(as->pd);
+ dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
+
+ tegra_smmu_set_pde(as, iova, 0);
- pt[pte] = 0;
+ dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
+ __free_page(page);
+ as->pts[pde] = NULL;
}
}
+static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
+ u32 *pte, dma_addr_t pte_dma, u32 val)
+{
+ struct tegra_smmu *smmu = as->smmu;
+ unsigned long offset = offset_in_page(pte);
+
+ *pte = val;
+
+ dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
+ 4, DMA_TO_DEVICE);
+ smmu_flush_ptc(smmu, pte_dma, offset);
+ smmu_flush_tlb_group(smmu, as->id, iova);
+ smmu_flush(smmu);
+}
+
static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
- struct tegra_smmu *smmu = as->smmu;
- unsigned long offset;
- struct page *page;
+ dma_addr_t pte_dma;
u32 *pte;
- pte = as_get_pte(as, iova, &page);
+ pte = as_get_pte(as, iova, &pte_dma);
if (!pte)
return -ENOMEM;
- *pte = __phys_to_pfn(paddr) | SMMU_PTE_ATTR;
- offset = offset_in_page(pte);
+ /* If we aren't overwriting a pre-existing entry, increment use */
+ if (*pte == 0)
+ tegra_smmu_pte_get_use(as, iova);
- smmu->soc->ops->flush_dcache(page, offset, 4);
- smmu_flush_ptc(smmu, page, offset);
- smmu_flush_tlb_group(smmu, as->id, iova);
- smmu_flush(smmu);
+ tegra_smmu_set_pte(as, iova, pte, pte_dma,
+ __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
return 0;
}
@@ -563,22 +654,15 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
- struct tegra_smmu *smmu = as->smmu;
- unsigned long offset;
- struct page *page;
+ dma_addr_t pte_dma;
u32 *pte;
- pte = as_get_pte(as, iova, &page);
- if (!pte)
+ pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
+ if (!pte || !*pte)
return 0;
- offset = offset_in_page(pte);
- as_put_pte(as, iova);
-
- smmu->soc->ops->flush_dcache(page, offset, 4);
- smmu_flush_ptc(smmu, page, offset);
- smmu_flush_tlb_group(smmu, as->id, iova);
- smmu_flush(smmu);
+ tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
+ tegra_smmu_pte_put_use(as, iova);
return size;
}
@@ -587,11 +671,14 @@ static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
- struct page *page;
unsigned long pfn;
+ dma_addr_t pte_dma;
u32 *pte;
- pte = as_get_pte(as, iova, &page);
+ pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
+ if (!pte || !*pte)
+ return 0;
+
pfn = *pte & as->smmu->pfn_mask;
return PFN_PHYS(pfn);
@@ -816,6 +903,9 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
mc->soc->num_address_bits, smmu->pfn_mask);
+ smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
+ dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
+ smmu->tlb_mask);
value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
@@ -825,14 +915,14 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
smmu_writel(smmu, value, SMMU_PTC_CONFIG);
value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
- SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
+ SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
if (soc->supports_round_robin_arbitration)
value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
smmu_writel(smmu, value, SMMU_TLB_CONFIG);
- smmu_flush_ptc(smmu, NULL, 0);
+ smmu_flush_ptc_all(smmu);
smmu_flush_tlb(smmu);
smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
smmu_flush(smmu);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 120d81543e53..27b52c8729cd 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -61,6 +61,10 @@ config ATMEL_AIC5_IRQ
select MULTI_IRQ_HANDLER
select SPARSE_IRQ
+config I8259
+ bool
+ select IRQ_DOMAIN
+
config BCM7038_L1_IRQ
bool
select GENERIC_IRQ_CHIP
@@ -177,3 +181,9 @@ config RENESAS_H8300H_INTC
config RENESAS_H8S_INTC
bool
select IRQ_DOMAIN
+
+config IMX_GPCV2
+ bool
+ select IRQ_DOMAIN
+ help
+ Enables the wakeup IRQs for IMX platforms with GPCv2 block
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index b8d4e9691890..bb3048f00e64 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_IRQCHIP) += irqchip.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
+obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
obj-$(CONFIG_ARCH_MMP) += irq-mmp.o
@@ -22,11 +23,12 @@ obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
-obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o
+obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
obj-$(CONFIG_ARM_VIC) += irq-vic.o
obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o
obj-$(CONFIG_ATMEL_AIC5_IRQ) += irq-atmel-aic-common.o irq-atmel-aic5.o
+obj-$(CONFIG_I8259) += irq-i8259.o
obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o
obj-$(CONFIG_IRQ_MIPS_CPU) += irq-mips-cpu.o
obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
@@ -52,3 +54,4 @@ obj-$(CONFIG_RENESAS_H8300H_INTC) += irq-renesas-h8300h.o
obj-$(CONFIG_RENESAS_H8S_INTC) += irq-renesas-h8s.o
obj-$(CONFIG_ARCH_SA1100) += irq-sa11x0.o
obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o
+obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
index 5c82e3bdafdf..e9c6f2a5b52d 100644
--- a/drivers/irqchip/exynos-combiner.c
+++ b/drivers/irqchip/exynos-combiner.c
@@ -15,13 +15,12 @@
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include "irqchip.h"
-
#define COMBINER_ENABLE_SET 0x0
#define COMBINER_ENABLE_CLEAR 0x4
#define COMBINER_INT_STATUS 0xC
@@ -66,10 +65,12 @@ static void combiner_unmask_irq(struct irq_data *data)
__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
}
-static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
+static void combiner_handle_cascade_irq(unsigned int __irq,
+ struct irq_desc *desc)
{
- struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
- struct irq_chip *chip = irq_get_chip(irq);
+ struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int irq = irq_desc_get_irq(desc);
unsigned int cascade_irq, combiner_irq;
unsigned long status;
@@ -122,9 +123,8 @@ static struct irq_chip combiner_chip = {
static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
unsigned int irq)
{
- if (irq_set_handler_data(irq, combiner_data) != 0)
- BUG();
- irq_set_chained_handler(irq, combiner_handle_cascade_irq);
+ irq_set_chained_handler_and_data(irq, combiner_handle_cascade_irq,
+ combiner_data);
}
static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
@@ -185,14 +185,14 @@ static void __init combiner_init(void __iomem *combiner_base,
combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
if (!combiner_data) {
- pr_warning("%s: could not allocate combiner data\n", __func__);
+ pr_warn("%s: could not allocate combiner data\n", __func__);
return;
}
combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
&combiner_irq_domain_ops, combiner_data);
if (WARN_ON(!combiner_irq_domain)) {
- pr_warning("%s: irq domain init failed\n", __func__);
+ pr_warn("%s: irq domain init failed\n", __func__);
return;
}
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 0d3b0fe2f175..39b72da0c143 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/cpu.h>
#include <linux/io.h>
@@ -33,8 +34,6 @@
#include <asm/smp_plat.h>
#include <asm/mach/irq.h>
-#include "irqchip.h"
-
/* Interrupt Controller Registers Map */
#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
@@ -451,7 +450,7 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq,
struct irq_desc *desc)
{
- struct irq_chip *chip = irq_get_chip(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long irqmap, irqn, irqsrc, cpuid;
unsigned int cascade_irq;
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index dae3604b32a9..8a0c7f288198 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -19,6 +19,7 @@
#include <linux/bitmap.h>
#include <linux/types.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -31,7 +32,6 @@
#include <asm/mach/irq.h>
#include "irq-atmel-aic-common.h"
-#include "irqchip.h"
/* Number of irq lines managed by AIC */
#define NR_AIC_IRQS 32
@@ -225,7 +225,7 @@ static void __init at91sam9g45_aic_irq_fixup(struct device_node *root)
aic_common_rtt_irq_fixup(root);
}
-static const struct of_device_id __initdata aic_irq_fixups[] = {
+static const struct of_device_id aic_irq_fixups[] __initconst = {
{ .compatible = "atmel,at91rm9200", .data = at91rm9200_aic_irq_fixup },
{ .compatible = "atmel,at91sam9g45", .data = at91sam9g45_aic_irq_fixup },
{ .compatible = "atmel,at91sam9n12", .data = at91rm9200_aic_irq_fixup },
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 459bf4429d36..9da9942ac83c 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -19,6 +19,7 @@
#include <linux/bitmap.h>
#include <linux/types.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -31,7 +32,6 @@
#include <asm/mach/irq.h>
#include "irq-atmel-aic-common.h"
-#include "irqchip.h"
/* Number of irq lines managed by AIC */
#define NR_AIC5_IRQS 128
@@ -290,7 +290,7 @@ static void __init sama5d3_aic_irq_fixup(struct device_node *root)
aic_common_rtc_irq_fixup(root);
}
-static const struct of_device_id __initdata aic5_irq_fixups[] = {
+static const struct of_device_id aic5_irq_fixups[] __initconst = {
{ .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup },
{ .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup },
{ /* sentinel */ },
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index e68c3b60a681..ed4ca9deca70 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -48,13 +48,12 @@
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <asm/exception.h>
#include <asm/mach/irq.h>
-#include "irqchip.h"
-
/* Put the bank and irq (32 bits) into the hwirq */
#define MAKE_HWIRQ(b, n) ((b << 5) | (n))
#define HWIRQ_BANK(i) (i >> 5)
@@ -76,10 +75,10 @@
#define NR_BANKS 3
#define IRQS_PER_BANK 32
-static int reg_pending[] __initconst = { 0x00, 0x04, 0x08 };
-static int reg_enable[] __initconst = { 0x18, 0x10, 0x14 };
-static int reg_disable[] __initconst = { 0x24, 0x1c, 0x20 };
-static int bank_irqs[] __initconst = { 8, 32, 32 };
+static const int reg_pending[] __initconst = { 0x00, 0x04, 0x08 };
+static const int reg_enable[] __initconst = { 0x18, 0x10, 0x14 };
+static const int reg_disable[] __initconst = { 0x24, 0x1c, 0x20 };
+static const int bank_irqs[] __initconst = { 8, 32, 32 };
static const int shortcuts[] = {
7, 9, 10, 18, 19, /* Bank 1 */
@@ -97,6 +96,7 @@ struct armctrl_ic {
static struct armctrl_ic intc __read_mostly;
static void __exception_irq_entry bcm2835_handle_irq(
struct pt_regs *regs);
+static void bcm2836_chained_handle_irq(unsigned int irq, struct irq_desc *desc);
static void armctrl_mask_irq(struct irq_data *d)
{
@@ -140,7 +140,8 @@ static const struct irq_domain_ops armctrl_ops = {
};
static int __init armctrl_of_init(struct device_node *node,
- struct device_node *parent)
+ struct device_node *parent,
+ bool is_2836)
{
void __iomem *base;
int irq, b, i;
@@ -169,54 +170,90 @@ static int __init armctrl_of_init(struct device_node *node,
}
}
- set_handle_irq(bcm2835_handle_irq);
+ if (is_2836) {
+ int parent_irq = irq_of_parse_and_map(node, 0);
+
+ if (!parent_irq) {
+ panic("%s: unable to get parent interrupt.\n",
+ node->full_name);
+ }
+ irq_set_chained_handler(parent_irq, bcm2836_chained_handle_irq);
+ } else {
+ set_handle_irq(bcm2835_handle_irq);
+ }
+
return 0;
}
+static int __init bcm2835_armctrl_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return armctrl_of_init(node, parent, false);
+}
+
+static int __init bcm2836_armctrl_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return armctrl_of_init(node, parent, true);
+}
+
+
/*
* Handle each interrupt across the entire interrupt controller. This reads the
* status register before handling each interrupt, which is necessary given that
* handle_IRQ may briefly re-enable interrupts for soft IRQ handling.
*/
-static void armctrl_handle_bank(int bank, struct pt_regs *regs)
+static u32 armctrl_translate_bank(int bank)
{
- u32 stat, irq;
+ u32 stat = readl_relaxed(intc.pending[bank]);
- while ((stat = readl_relaxed(intc.pending[bank]))) {
- irq = MAKE_HWIRQ(bank, ffs(stat) - 1);
- handle_IRQ(irq_linear_revmap(intc.domain, irq), regs);
- }
+ return MAKE_HWIRQ(bank, ffs(stat) - 1);
+}
+
+static u32 armctrl_translate_shortcut(int bank, u32 stat)
+{
+ return MAKE_HWIRQ(bank, shortcuts[ffs(stat >> SHORTCUT_SHIFT) - 1]);
}
-static void armctrl_handle_shortcut(int bank, struct pt_regs *regs,
- u32 stat)
+static u32 get_next_armctrl_hwirq(void)
{
- u32 irq = MAKE_HWIRQ(bank, shortcuts[ffs(stat >> SHORTCUT_SHIFT) - 1]);
- handle_IRQ(irq_linear_revmap(intc.domain, irq), regs);
+ u32 stat = readl_relaxed(intc.pending[0]) & BANK0_VALID_MASK;
+
+ if (stat == 0)
+ return ~0;
+ else if (stat & BANK0_HWIRQ_MASK)
+ return MAKE_HWIRQ(0, ffs(stat & BANK0_HWIRQ_MASK) - 1);
+ else if (stat & SHORTCUT1_MASK)
+ return armctrl_translate_shortcut(1, stat & SHORTCUT1_MASK);
+ else if (stat & SHORTCUT2_MASK)
+ return armctrl_translate_shortcut(2, stat & SHORTCUT2_MASK);
+ else if (stat & BANK1_HWIRQ)
+ return armctrl_translate_bank(1);
+ else if (stat & BANK2_HWIRQ)
+ return armctrl_translate_bank(2);
+ else
+ BUG();
}
static void __exception_irq_entry bcm2835_handle_irq(
struct pt_regs *regs)
{
- u32 stat, irq;
-
- while ((stat = readl_relaxed(intc.pending[0]) & BANK0_VALID_MASK)) {
- if (stat & BANK0_HWIRQ_MASK) {
- irq = MAKE_HWIRQ(0, ffs(stat & BANK0_HWIRQ_MASK) - 1);
- handle_IRQ(irq_linear_revmap(intc.domain, irq), regs);
- } else if (stat & SHORTCUT1_MASK) {
- armctrl_handle_shortcut(1, regs, stat & SHORTCUT1_MASK);
- } else if (stat & SHORTCUT2_MASK) {
- armctrl_handle_shortcut(2, regs, stat & SHORTCUT2_MASK);
- } else if (stat & BANK1_HWIRQ) {
- armctrl_handle_bank(1, regs);
- } else if (stat & BANK2_HWIRQ) {
- armctrl_handle_bank(2, regs);
- } else {
- BUG();
- }
- }
+ u32 hwirq;
+
+ while ((hwirq = get_next_armctrl_hwirq()) != ~0)
+ handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs);
+}
+
+static void bcm2836_chained_handle_irq(unsigned int irq, struct irq_desc *desc)
+{
+ u32 hwirq;
+
+ while ((hwirq = get_next_armctrl_hwirq()) != ~0)
+ generic_handle_irq(irq_linear_revmap(intc.domain, hwirq));
}
-IRQCHIP_DECLARE(bcm2835_armctrl_ic, "brcm,bcm2835-armctrl-ic", armctrl_of_init);
+IRQCHIP_DECLARE(bcm2835_armctrl_ic, "brcm,bcm2835-armctrl-ic",
+ bcm2835_armctrl_of_init);
+IRQCHIP_DECLARE(bcm2836_armctrl_ic, "brcm,bcm2836-armctrl-ic",
+ bcm2836_armctrl_of_init);
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
new file mode 100644
index 000000000000..f68708281fcf
--- /dev/null
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -0,0 +1,275 @@
+/*
+ * Root interrupt controller for the BCM2836 (Raspberry Pi 2).
+ *
+ * Copyright 2015 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <asm/exception.h>
+
+/*
+ * The low 2 bits identify the CPU that the GPU IRQ goes to, and the
+ * next 2 bits identify the CPU that the GPU FIQ goes to.
+ */
+#define LOCAL_GPU_ROUTING 0x00c
+/* When setting bits 0-3, enables PMU interrupts on that CPU. */
+#define LOCAL_PM_ROUTING_SET 0x010
+/* When setting bits 0-3, disables PMU interrupts on that CPU. */
+#define LOCAL_PM_ROUTING_CLR 0x014
+/*
+ * The low 4 bits of this are the CPU's timer IRQ enables, and the
+ * next 4 bits are the CPU's timer FIQ enables (which override the IRQ
+ * bits).
+ */
+#define LOCAL_TIMER_INT_CONTROL0 0x040
+/*
+ * The low 4 bits of this are the CPU's per-mailbox IRQ enables, and
+ * the next 4 bits are the CPU's per-mailbox FIQ enables (which
+ * override the IRQ bits).
+ */
+#define LOCAL_MAILBOX_INT_CONTROL0 0x050
+/*
+ * The CPU's interrupt status register. Bits are defined by the the
+ * LOCAL_IRQ_* bits below.
+ */
+#define LOCAL_IRQ_PENDING0 0x060
+/* Same status bits as above, but for FIQ. */
+#define LOCAL_FIQ_PENDING0 0x070
+/*
+ * Mailbox0 write-to-set bits. There are 16 mailboxes, 4 per CPU, and
+ * these bits are organized by mailbox number and then CPU number. We
+ * use mailbox 0 for IPIs. The mailbox's interrupt is raised while
+ * any bit is set.
+ */
+#define LOCAL_MAILBOX0_SET0 0x080
+/* Mailbox0 write-to-clear bits. */
+#define LOCAL_MAILBOX0_CLR0 0x0c0
+
+#define LOCAL_IRQ_CNTPSIRQ 0
+#define LOCAL_IRQ_CNTPNSIRQ 1
+#define LOCAL_IRQ_CNTHPIRQ 2
+#define LOCAL_IRQ_CNTVIRQ 3
+#define LOCAL_IRQ_MAILBOX0 4
+#define LOCAL_IRQ_MAILBOX1 5
+#define LOCAL_IRQ_MAILBOX2 6
+#define LOCAL_IRQ_MAILBOX3 7
+#define LOCAL_IRQ_GPU_FAST 8
+#define LOCAL_IRQ_PMU_FAST 9
+#define LAST_IRQ LOCAL_IRQ_PMU_FAST
+
+struct bcm2836_arm_irqchip_intc {
+ struct irq_domain *domain;
+ void __iomem *base;
+};
+
+static struct bcm2836_arm_irqchip_intc intc __read_mostly;
+
+static void bcm2836_arm_irqchip_mask_per_cpu_irq(unsigned int reg_offset,
+ unsigned int bit,
+ int cpu)
+{
+ void __iomem *reg = intc.base + reg_offset + 4 * cpu;
+
+ writel(readl(reg) & ~BIT(bit), reg);
+}
+
+static void bcm2836_arm_irqchip_unmask_per_cpu_irq(unsigned int reg_offset,
+ unsigned int bit,
+ int cpu)
+{
+ void __iomem *reg = intc.base + reg_offset + 4 * cpu;
+
+ writel(readl(reg) | BIT(bit), reg);
+}
+
+static void bcm2836_arm_irqchip_mask_timer_irq(struct irq_data *d)
+{
+ bcm2836_arm_irqchip_mask_per_cpu_irq(LOCAL_TIMER_INT_CONTROL0,
+ d->hwirq - LOCAL_IRQ_CNTPSIRQ,
+ smp_processor_id());
+}
+
+static void bcm2836_arm_irqchip_unmask_timer_irq(struct irq_data *d)
+{
+ bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_TIMER_INT_CONTROL0,
+ d->hwirq - LOCAL_IRQ_CNTPSIRQ,
+ smp_processor_id());
+}
+
+static struct irq_chip bcm2836_arm_irqchip_timer = {
+ .name = "bcm2836-timer",
+ .irq_mask = bcm2836_arm_irqchip_mask_timer_irq,
+ .irq_unmask = bcm2836_arm_irqchip_unmask_timer_irq,
+};
+
+static void bcm2836_arm_irqchip_mask_pmu_irq(struct irq_data *d)
+{
+ writel(1 << smp_processor_id(), intc.base + LOCAL_PM_ROUTING_CLR);
+}
+
+static void bcm2836_arm_irqchip_unmask_pmu_irq(struct irq_data *d)
+{
+ writel(1 << smp_processor_id(), intc.base + LOCAL_PM_ROUTING_SET);
+}
+
+static struct irq_chip bcm2836_arm_irqchip_pmu = {
+ .name = "bcm2836-pmu",
+ .irq_mask = bcm2836_arm_irqchip_mask_pmu_irq,
+ .irq_unmask = bcm2836_arm_irqchip_unmask_pmu_irq,
+};
+
+static void bcm2836_arm_irqchip_mask_gpu_irq(struct irq_data *d)
+{
+}
+
+static void bcm2836_arm_irqchip_unmask_gpu_irq(struct irq_data *d)
+{
+}
+
+static struct irq_chip bcm2836_arm_irqchip_gpu = {
+ .name = "bcm2836-gpu",
+ .irq_mask = bcm2836_arm_irqchip_mask_gpu_irq,
+ .irq_unmask = bcm2836_arm_irqchip_unmask_gpu_irq,
+};
+
+static void bcm2836_arm_irqchip_register_irq(int hwirq, struct irq_chip *chip)
+{
+ int irq = irq_create_mapping(intc.domain, hwirq);
+
+ irq_set_percpu_devid(irq);
+ irq_set_chip_and_handler(irq, chip, handle_percpu_devid_irq);
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+}
+
+static void
+__exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+ u32 stat;
+
+ stat = readl_relaxed(intc.base + LOCAL_IRQ_PENDING0 + 4 * cpu);
+ if (stat & 0x10) {
+#ifdef CONFIG_SMP
+ void __iomem *mailbox0 = (intc.base +
+ LOCAL_MAILBOX0_CLR0 + 16 * cpu);
+ u32 mbox_val = readl(mailbox0);
+ u32 ipi = ffs(mbox_val) - 1;
+
+ writel(1 << ipi, mailbox0);
+ handle_IPI(ipi, regs);
+#endif
+ } else {
+ u32 hwirq = ffs(stat) - 1;
+
+ handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs);
+ }
+}
+
+#ifdef CONFIG_SMP
+static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask,
+ unsigned int ipi)
+{
+ int cpu;
+ void __iomem *mailbox0_base = intc.base + LOCAL_MAILBOX0_SET0;
+
+ /*
+ * Ensure that stores to normal memory are visible to the
+ * other CPUs before issuing the IPI.
+ */
+ dsb();
+
+ for_each_cpu(cpu, mask) {
+ writel(1 << ipi, mailbox0_base + 16 * cpu);
+ }
+}
+
+/* Unmasks the IPI on the CPU when it's online. */
+static int bcm2836_arm_irqchip_cpu_notify(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ unsigned int int_reg = LOCAL_MAILBOX_INT_CONTROL0;
+ unsigned int mailbox = 0;
+
+ if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+ bcm2836_arm_irqchip_unmask_per_cpu_irq(int_reg, mailbox, cpu);
+ else if (action == CPU_DYING)
+ bcm2836_arm_irqchip_mask_per_cpu_irq(int_reg, mailbox, cpu);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = {
+ .notifier_call = bcm2836_arm_irqchip_cpu_notify,
+ .priority = 100,
+};
+#endif
+
+static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
+ .xlate = irq_domain_xlate_onecell
+};
+
+static void
+bcm2836_arm_irqchip_smp_init(void)
+{
+#ifdef CONFIG_SMP
+ /* Unmask IPIs to the boot CPU. */
+ bcm2836_arm_irqchip_cpu_notify(&bcm2836_arm_irqchip_cpu_notifier,
+ CPU_STARTING,
+ (void *)smp_processor_id());
+ register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier);
+
+ set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
+#endif
+}
+
+static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ intc.base = of_iomap(node, 0);
+ if (!intc.base) {
+ panic("%s: unable to map local interrupt registers\n",
+ node->full_name);
+ }
+
+ intc.domain = irq_domain_add_linear(node, LAST_IRQ + 1,
+ &bcm2836_arm_irqchip_intc_ops,
+ NULL);
+ if (!intc.domain)
+ panic("%s: unable to create IRQ domain\n", node->full_name);
+
+ bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPSIRQ,
+ &bcm2836_arm_irqchip_timer);
+ bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPNSIRQ,
+ &bcm2836_arm_irqchip_timer);
+ bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTHPIRQ,
+ &bcm2836_arm_irqchip_timer);
+ bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTVIRQ,
+ &bcm2836_arm_irqchip_timer);
+ bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_GPU_FAST,
+ &bcm2836_arm_irqchip_gpu);
+ bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_PMU_FAST,
+ &bcm2836_arm_irqchip_pmu);
+
+ bcm2836_arm_irqchip_smp_init();
+
+ set_handle_irq(bcm2836_arm_irqchip_handle_irq);
+ return 0;
+}
+
+IRQCHIP_DECLARE(bcm2836_arm_irqchip_l1_intc, "brcm,bcm2836-l1-intc",
+ bcm2836_arm_irqchip_l1_intc_of_init);
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index d3b8c8be15f6..409bdc6366c2 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -29,10 +29,9 @@
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/types.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
-#include "irqchip.h"
-
#define IRQS_PER_WORD 32
#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4)
#define MAX_WORDS 8
@@ -257,8 +256,8 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
pr_err("failed to map parent interrupt %d\n", parent_irq);
return -EINVAL;
}
- irq_set_handler_data(parent_irq, intc);
- irq_set_chained_handler(parent_irq, bcm7038_l1_irq_handle);
+ irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle,
+ intc);
return 0;
}
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index 3ba5cc780fcb..d3f976913a6f 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -26,10 +26,9 @@
#include <linux/irqdomain.h>
#include <linux/reboot.h>
#include <linux/bitops.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
-#include "irqchip.h"
-
/* Register offset in the L2 interrupt controller */
#define IRQEN 0x00
#define IRQSTAT 0x04
@@ -38,6 +37,11 @@
#define MAX_MAPPINGS (MAX_WORDS * 2)
#define IRQS_PER_WORD 32
+struct bcm7120_l1_intc_data {
+ struct bcm7120_l2_intc_data *b;
+ u32 irq_map_mask[MAX_WORDS];
+};
+
struct bcm7120_l2_intc_data {
unsigned int n_words;
void __iomem *map_base[MAX_MAPPINGS];
@@ -47,14 +51,15 @@ struct bcm7120_l2_intc_data {
struct irq_domain *domain;
bool can_wake;
u32 irq_fwd_mask[MAX_WORDS];
- u32 irq_map_mask[MAX_WORDS];
+ struct bcm7120_l1_intc_data *l1_data;
int num_parent_irqs;
const __be32 *map_mask_prop;
};
static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc)
{
- struct bcm7120_l2_intc_data *b = irq_desc_get_handler_data(desc);
+ struct bcm7120_l1_intc_data *data = irq_desc_get_handler_data(desc);
+ struct bcm7120_l2_intc_data *b = data->b;
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int idx;
@@ -69,7 +74,8 @@ static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc)
irq_gc_lock(gc);
pending = irq_reg_readl(gc, b->stat_offset[idx]) &
- gc->mask_cache;
+ gc->mask_cache &
+ data->irq_map_mask[idx];
irq_gc_unlock(gc);
for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
@@ -81,11 +87,10 @@ static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void bcm7120_l2_intc_suspend(struct irq_data *d)
+static void bcm7120_l2_intc_suspend(struct irq_chip_generic *gc)
{
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct bcm7120_l2_intc_data *b = gc->private;
+ struct irq_chip_type *ct = gc->chip_types;
irq_gc_lock(gc);
if (b->can_wake)
@@ -94,10 +99,9 @@ static void bcm7120_l2_intc_suspend(struct irq_data *d)
irq_gc_unlock(gc);
}
-static void bcm7120_l2_intc_resume(struct irq_data *d)
+static void bcm7120_l2_intc_resume(struct irq_chip_generic *gc)
{
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ struct irq_chip_type *ct = gc->chip_types;
/* Restore the saved mask */
irq_gc_lock(gc);
@@ -107,8 +111,9 @@ static void bcm7120_l2_intc_resume(struct irq_data *d)
static int bcm7120_l2_intc_init_one(struct device_node *dn,
struct bcm7120_l2_intc_data *data,
- int irq)
+ int irq, u32 *valid_mask)
{
+ struct bcm7120_l1_intc_data *l1_data = &data->l1_data[irq];
int parent_irq;
unsigned int idx;
@@ -120,20 +125,28 @@ static int bcm7120_l2_intc_init_one(struct device_node *dn,
/* For multiple parent IRQs with multiple words, this looks like:
* <irq0_w0 irq0_w1 irq1_w0 irq1_w1 ...>
+ *
+ * We need to associate a given parent interrupt with its corresponding
+ * map_mask in order to mask the status register with it because we
+ * have the same handler being called for multiple parent interrupts.
+ *
+ * This is typically something needed on BCM7xxx (STB chips).
*/
for (idx = 0; idx < data->n_words; idx++) {
if (data->map_mask_prop) {
- data->irq_map_mask[idx] |=
+ l1_data->irq_map_mask[idx] |=
be32_to_cpup(data->map_mask_prop +
irq * data->n_words + idx);
} else {
- data->irq_map_mask[idx] = 0xffffffff;
+ l1_data->irq_map_mask[idx] = 0xffffffff;
}
+ valid_mask[idx] |= l1_data->irq_map_mask[idx];
}
- irq_set_handler_data(parent_irq, data);
- irq_set_chained_handler(parent_irq, bcm7120_l2_intc_irq_handle);
+ l1_data->b = data;
+ irq_set_chained_handler_and_data(parent_irq,
+ bcm7120_l2_intc_irq_handle, l1_data);
return 0;
}
@@ -214,6 +227,7 @@ int __init bcm7120_l2_intc_probe(struct device_node *dn,
struct irq_chip_type *ct;
int ret = 0;
unsigned int idx, irq, flags;
+ u32 valid_mask[MAX_WORDS] = { };
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
@@ -226,9 +240,16 @@ int __init bcm7120_l2_intc_probe(struct device_node *dn,
goto out_unmap;
}
+ data->l1_data = kcalloc(data->num_parent_irqs, sizeof(*data->l1_data),
+ GFP_KERNEL);
+ if (!data->l1_data) {
+ ret = -ENOMEM;
+ goto out_free_l1_data;
+ }
+
ret = iomap_regs_fn(dn, data);
if (ret < 0)
- goto out_unmap;
+ goto out_free_l1_data;
for (idx = 0; idx < data->n_words; idx++) {
__raw_writel(data->irq_fwd_mask[idx],
@@ -237,16 +258,16 @@ int __init bcm7120_l2_intc_probe(struct device_node *dn,
}
for (irq = 0; irq < data->num_parent_irqs; irq++) {
- ret = bcm7120_l2_intc_init_one(dn, data, irq);
+ ret = bcm7120_l2_intc_init_one(dn, data, irq, valid_mask);
if (ret)
- goto out_unmap;
+ goto out_free_l1_data;
}
data->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * data->n_words,
&irq_generic_chip_ops, NULL);
if (!data->domain) {
ret = -ENOMEM;
- goto out_unmap;
+ goto out_free_l1_data;
}
/* MIPS chips strapped for BE will automagically configure the
@@ -270,7 +291,7 @@ int __init bcm7120_l2_intc_probe(struct device_node *dn,
irq = idx * IRQS_PER_WORD;
gc = irq_get_domain_generic_chip(data->domain, irq);
- gc->unused = 0xffffffff & ~data->irq_map_mask[idx];
+ gc->unused = 0xffffffff & ~valid_mask[idx];
gc->private = data;
ct = gc->chip_types;
@@ -280,8 +301,15 @@ int __init bcm7120_l2_intc_probe(struct device_node *dn,
ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_ack = irq_gc_noop;
- ct->chip.irq_suspend = bcm7120_l2_intc_suspend;
- ct->chip.irq_resume = bcm7120_l2_intc_resume;
+ gc->suspend = bcm7120_l2_intc_suspend;
+ gc->resume = bcm7120_l2_intc_resume;
+
+ /*
+ * Initialize mask-cache, in case we need it for
+ * saving/restoring fwd mask even w/o any child interrupts
+ * installed
+ */
+ gc->mask_cache = irq_reg_readl(gc, ct->regs.mask);
if (data->can_wake) {
/* This IRQ chip can wake the system, set all
@@ -300,6 +328,8 @@ int __init bcm7120_l2_intc_probe(struct device_node *dn,
out_free_domain:
irq_domain_remove(data->domain);
+out_free_l1_data:
+ kfree(data->l1_data);
out_unmap:
for (idx = 0; idx < MAX_MAPPINGS; idx++) {
if (data->map_base[idx])
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index d6bcc6be0777..aedda06191eb 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -32,8 +32,6 @@
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
-#include "irqchip.h"
-
/* Register offsets in the L2 interrupt controller */
#define CPU_STATUS 0x00
#define CPU_SET 0x04
@@ -51,11 +49,13 @@ struct brcmstb_l2_intc_data {
u32 saved_mask; /* for suspend/resume */
};
-static void brcmstb_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc)
+static void brcmstb_l2_intc_irq_handle(unsigned int __irq,
+ struct irq_desc *desc)
{
struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc);
struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0);
struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int irq = irq_desc_get_irq(desc);
u32 status;
chained_irq_enter(chip, desc);
@@ -172,8 +172,8 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np,
}
/* Set the IRQ chaining logic */
- irq_set_handler_data(data->parent_irq, data);
- irq_set_chained_handler(data->parent_irq, brcmstb_l2_intc_irq_handle);
+ irq_set_chained_handler_and_data(data->parent_irq,
+ brcmstb_l2_intc_irq_handle, data);
gc = irq_get_domain_generic_chip(data->domain, 0);
gc->reg_base = data->base;
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
index 33127f131d78..2dd929eed9e0 100644
--- a/drivers/irqchip/irq-clps711x.c
+++ b/drivers/irqchip/irq-clps711x.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -19,8 +20,6 @@
#include <asm/exception.h>
#include <asm/mach/irq.h>
-#include "irqchip.h"
-
#define CLPS711X_INTSR1 (0x0240)
#define CLPS711X_INTMR1 (0x0280)
#define CLPS711X_BLEOI (0x0600)
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 692fe2bc8197..a7f5626930f5 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -11,13 +11,12 @@
*/
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/slab.h>
-#include "irqchip.h"
-
#define IRQ_FREE -1
#define IRQ_RESERVED -2
#define IRQ_SKIP -3
@@ -68,7 +67,9 @@ static struct irq_chip crossbar_chip = {
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_retrigger = irq_chip_retrigger_hierarchy,
- .irq_set_wake = irq_chip_set_wake_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SKIP_SET_WAKE,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
#endif
diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c
index 3cbc658afe27..dad85e74c37c 100644
--- a/drivers/irqchip/irq-digicolor.c
+++ b/drivers/irqchip/irq-digicolor.c
@@ -12,6 +12,7 @@
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -20,8 +21,6 @@
#include <asm/exception.h>
-#include "irqchip.h"
-
#define UC_IRQ_CONTROL 0x04
#define IC_FLAG_CLEAR_LO 0x00
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
index 53bb7326a60a..efd95d9955e7 100644
--- a/drivers/irqchip/irq-dw-apb-ictl.c
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -13,36 +13,36 @@
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include "irqchip.h"
-
#define APB_INT_ENABLE_L 0x00
#define APB_INT_ENABLE_H 0x04
#define APB_INT_MASK_L 0x08
#define APB_INT_MASK_H 0x0c
#define APB_INT_FINALSTATUS_L 0x30
#define APB_INT_FINALSTATUS_H 0x34
+#define APB_INT_BASE_OFFSET 0x04
static void dw_apb_ictl_handler(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = irq_get_chip(irq);
- struct irq_chip_generic *gc = irq_get_handler_data(irq);
- struct irq_domain *d = gc->private;
- u32 stat;
+ struct irq_domain *d = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
int n;
chained_irq_enter(chip, desc);
- for (n = 0; n < gc->num_ct; n++) {
- stat = readl_relaxed(gc->reg_base +
- APB_INT_FINALSTATUS_L + 4 * n);
+ for (n = 0; n < d->revmap_size; n += 32) {
+ struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, n);
+ u32 stat = readl_relaxed(gc->reg_base + APB_INT_FINALSTATUS_L);
+
while (stat) {
u32 hwirq = ffs(stat) - 1;
- generic_handle_irq(irq_find_mapping(d,
- gc->irq_base + hwirq + 32 * n));
+ u32 virq = irq_find_mapping(d, gc->irq_base + hwirq);
+
+ generic_handle_irq(virq);
stat &= ~(1 << hwirq);
}
}
@@ -73,7 +73,7 @@ static int __init dw_apb_ictl_init(struct device_node *np,
struct irq_domain *domain;
struct irq_chip_generic *gc;
void __iomem *iobase;
- int ret, nrirqs, irq;
+ int ret, nrirqs, irq, i;
u32 reg;
/* Map the parent interrupt for the chained handler */
@@ -128,35 +128,25 @@ static int __init dw_apb_ictl_init(struct device_node *np,
goto err_unmap;
}
- ret = irq_alloc_domain_generic_chips(domain, 32, (nrirqs > 32) ? 2 : 1,
- np->name, handle_level_irq, clr, 0,
- IRQ_GC_MASK_CACHE_PER_TYPE |
+ ret = irq_alloc_domain_generic_chips(domain, 32, 1, np->name,
+ handle_level_irq, clr, 0,
IRQ_GC_INIT_MASK_CACHE);
if (ret) {
pr_err("%s: unable to alloc irq domain gc\n", np->full_name);
goto err_unmap;
}
- gc = irq_get_domain_generic_chip(domain, 0);
- gc->private = domain;
- gc->reg_base = iobase;
-
- gc->chip_types[0].regs.mask = APB_INT_MASK_L;
- gc->chip_types[0].regs.enable = APB_INT_ENABLE_L;
- gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
- gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
- gc->chip_types[0].chip.irq_resume = dw_apb_ictl_resume;
-
- if (nrirqs > 32) {
- gc->chip_types[1].regs.mask = APB_INT_MASK_H;
- gc->chip_types[1].regs.enable = APB_INT_ENABLE_H;
- gc->chip_types[1].chip.irq_mask = irq_gc_mask_set_bit;
- gc->chip_types[1].chip.irq_unmask = irq_gc_mask_clr_bit;
- gc->chip_types[1].chip.irq_resume = dw_apb_ictl_resume;
+ for (i = 0; i < DIV_ROUND_UP(nrirqs, 32); i++) {
+ gc = irq_get_domain_generic_chip(domain, i * 32);
+ gc->reg_base = iobase + i * APB_INT_BASE_OFFSET;
+ gc->chip_types[0].regs.mask = APB_INT_MASK_L;
+ gc->chip_types[0].regs.enable = APB_INT_ENABLE_L;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_resume = dw_apb_ictl_resume;
}
- irq_set_handler_data(irq, gc);
- irq_set_chained_handler(irq, dw_apb_ictl_handler);
+ irq_set_chained_handler_and_data(irq, dw_apb_ictl_handler, domain);
return 0;
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index fdf706555d72..db04fc1f56b2 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -45,13 +45,11 @@
struct v2m_data {
spinlock_t msi_cnt_lock;
- struct msi_controller mchip;
struct resource res; /* GICv2m resource */
void __iomem *base; /* GICv2m virt address */
u32 spi_start; /* The SPI number that MSIs start */
u32 nr_spis; /* The number of SPIs for MSIs */
unsigned long *bm; /* MSI vector bitmap */
- struct irq_domain *domain;
};
static void gicv2m_mask_msi_irq(struct irq_data *d)
@@ -213,11 +211,25 @@ static bool is_msi_spi_valid(u32 base, u32 num)
return true;
}
+static struct irq_chip gicv2m_pmsi_irq_chip = {
+ .name = "pMSI",
+};
+
+static struct msi_domain_ops gicv2m_pmsi_ops = {
+};
+
+static struct msi_domain_info gicv2m_pmsi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .ops = &gicv2m_pmsi_ops,
+ .chip = &gicv2m_pmsi_irq_chip,
+};
+
static int __init gicv2m_init_one(struct device_node *node,
struct irq_domain *parent)
{
int ret;
struct v2m_data *v2m;
+ struct irq_domain *inner_domain, *pci_domain, *plat_domain;
v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
if (!v2m) {
@@ -261,32 +273,28 @@ static int __init gicv2m_init_one(struct device_node *node,
goto err_iounmap;
}
- v2m->domain = irq_domain_add_tree(NULL, &gicv2m_domain_ops, v2m);
- if (!v2m->domain) {
+ inner_domain = irq_domain_add_tree(node, &gicv2m_domain_ops, v2m);
+ if (!inner_domain) {
pr_err("Failed to create GICv2m domain\n");
ret = -ENOMEM;
goto err_free_bm;
}
- v2m->domain->parent = parent;
- v2m->mchip.of_node = node;
- v2m->mchip.domain = pci_msi_create_irq_domain(node,
- &gicv2m_msi_domain_info,
- v2m->domain);
- if (!v2m->mchip.domain) {
- pr_err("Failed to create MSI domain\n");
+ inner_domain->bus_token = DOMAIN_BUS_NEXUS;
+ inner_domain->parent = parent;
+ pci_domain = pci_msi_create_irq_domain(node, &gicv2m_msi_domain_info,
+ inner_domain);
+ plat_domain = platform_msi_create_irq_domain(node,
+ &gicv2m_pmsi_domain_info,
+ inner_domain);
+ if (!pci_domain || !plat_domain) {
+ pr_err("Failed to create MSI domains\n");
ret = -ENOMEM;
goto err_free_domains;
}
spin_lock_init(&v2m->msi_cnt_lock);
- ret = of_pci_msi_chip_add(&v2m->mchip);
- if (ret) {
- pr_err("Failed to add msi_chip.\n");
- goto err_free_domains;
- }
-
pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name,
(unsigned long)v2m->res.start, (unsigned long)v2m->res.end,
v2m->spi_start, (v2m->spi_start + v2m->nr_spis));
@@ -294,10 +302,12 @@ static int __init gicv2m_init_one(struct device_node *node,
return 0;
err_free_domains:
- if (v2m->mchip.domain)
- irq_domain_remove(v2m->mchip.domain);
- if (v2m->domain)
- irq_domain_remove(v2m->domain);
+ if (plat_domain)
+ irq_domain_remove(plat_domain);
+ if (pci_domain)
+ irq_domain_remove(pci_domain);
+ if (inner_domain)
+ irq_domain_remove(inner_domain);
err_free_bm:
kfree(v2m->bm);
err_iounmap:
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
new file mode 100644
index 000000000000..cf351c637464
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+
+static void its_mask_msi_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void its_unmask_msi_irq(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip its_msi_irq_chip = {
+ .name = "ITS-MSI",
+ .irq_unmask = its_unmask_msi_irq,
+ .irq_mask = its_mask_msi_irq,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_write_msi_msg = pci_msi_domain_write_msg,
+};
+
+struct its_pci_alias {
+ struct pci_dev *pdev;
+ u32 dev_id;
+ u32 count;
+};
+
+static int its_pci_msi_vec_count(struct pci_dev *pdev)
+{
+ int msi, msix;
+
+ msi = max(pci_msi_vec_count(pdev), 0);
+ msix = max(pci_msix_vec_count(pdev), 0);
+
+ return max(msi, msix);
+}
+
+static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct its_pci_alias *dev_alias = data;
+
+ dev_alias->dev_id = alias;
+ if (pdev != dev_alias->pdev)
+ dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev);
+
+ return 0;
+}
+
+static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ struct pci_dev *pdev;
+ struct its_pci_alias dev_alias;
+ struct msi_domain_info *msi_info;
+
+ if (!dev_is_pci(dev))
+ return -EINVAL;
+
+ msi_info = msi_get_domain_info(domain->parent);
+
+ pdev = to_pci_dev(dev);
+ dev_alias.pdev = pdev;
+ dev_alias.count = nvec;
+
+ pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias);
+
+ /* ITS specific DeviceID, as the core ITS ignores dev. */
+ info->scratchpad[0].ul = dev_alias.dev_id;
+
+ return msi_info->ops->msi_prepare(domain->parent,
+ dev, dev_alias.count, info);
+}
+
+static struct msi_domain_ops its_pci_msi_ops = {
+ .msi_prepare = its_pci_msi_prepare,
+};
+
+static struct msi_domain_info its_pci_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+ .ops = &its_pci_msi_ops,
+ .chip = &its_msi_irq_chip,
+};
+
+static struct of_device_id its_device_id[] = {
+ { .compatible = "arm,gic-v3-its", },
+ {},
+};
+
+static int __init its_pci_msi_init(void)
+{
+ struct device_node *np;
+ struct irq_domain *parent;
+
+ for (np = of_find_matching_node(NULL, its_device_id); np;
+ np = of_find_matching_node(np, its_device_id)) {
+ if (!of_property_read_bool(np, "msi-controller"))
+ continue;
+
+ parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS);
+ if (!parent || !msi_get_domain_info(parent)) {
+ pr_err("%s: unable to locate ITS domain\n",
+ np->full_name);
+ continue;
+ }
+
+ if (!pci_msi_create_irq_domain(np, &its_pci_msi_domain_info,
+ parent)) {
+ pr_err("%s: unable to create PCI domain\n",
+ np->full_name);
+ continue;
+ }
+
+ pr_info("PCI/MSI: %s domain created\n", np->full_name);
+ }
+
+ return 0;
+}
+early_initcall(its_pci_msi_init);
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
new file mode 100644
index 000000000000..a86550562779
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+
+static struct irq_chip its_pmsi_irq_chip = {
+ .name = "ITS-pMSI",
+};
+
+static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ struct msi_domain_info *msi_info;
+ u32 dev_id;
+ int ret;
+
+ msi_info = msi_get_domain_info(domain->parent);
+
+ /* Suck the DeviceID out of the msi-parent property */
+ ret = of_property_read_u32_index(dev->of_node, "msi-parent",
+ 1, &dev_id);
+ if (ret)
+ return ret;
+
+ /* ITS specific DeviceID, as the core ITS ignores dev. */
+ info->scratchpad[0].ul = dev_id;
+
+ return msi_info->ops->msi_prepare(domain->parent,
+ dev, nvec, info);
+}
+
+static struct msi_domain_ops its_pmsi_ops = {
+ .msi_prepare = its_pmsi_prepare,
+};
+
+static struct msi_domain_info its_pmsi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .ops = &its_pmsi_ops,
+ .chip = &its_pmsi_irq_chip,
+};
+
+static struct of_device_id its_device_id[] = {
+ { .compatible = "arm,gic-v3-its", },
+ {},
+};
+
+static int __init its_pmsi_init(void)
+{
+ struct device_node *np;
+ struct irq_domain *parent;
+
+ for (np = of_find_matching_node(NULL, its_device_id); np;
+ np = of_find_matching_node(np, its_device_id)) {
+ if (!of_property_read_bool(np, "msi-controller"))
+ continue;
+
+ parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS);
+ if (!parent || !msi_get_domain_info(parent)) {
+ pr_err("%s: unable to locate ITS domain\n",
+ np->full_name);
+ continue;
+ }
+
+ if (!platform_msi_create_irq_domain(np, &its_pmsi_domain_info,
+ parent)) {
+ pr_err("%s: unable to create platform domain\n",
+ np->full_name);
+ continue;
+ }
+
+ pr_info("Platform MSI: %s domain created\n", np->full_name);
+ }
+
+ return 0;
+}
+early_initcall(its_pmsi_init);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 1b7e155869f6..26b55c53755f 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -30,14 +30,13 @@
#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-v3.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <asm/exception.h>
-#include "irqchip.h"
-
#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1 << 0)
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
@@ -54,14 +53,12 @@ struct its_collection {
/*
* The ITS structure - contains most of the infrastructure, with the
- * msi_controller, the command queue, the collections, and the list of
- * devices writing to it.
+ * top-level MSI domain, the command queue, the collections, and the
+ * list of devices writing to it.
*/
struct its_node {
raw_spinlock_t lock;
struct list_head entry;
- struct msi_controller msi_chip;
- struct irq_domain *domain;
void __iomem *base;
unsigned long phys_base;
struct its_cmd_block *cmd_base;
@@ -75,6 +72,13 @@ struct its_node {
#define ITS_ITT_ALIGN SZ_256
+struct event_lpi_map {
+ unsigned long *lpi_map;
+ u16 *col_map;
+ irq_hw_number_t lpi_base;
+ int nr_lpis;
+};
+
/*
* The ITS view of a device - belongs to an ITS, a collection, owns an
* interrupt translation table, and a list of interrupts.
@@ -82,11 +86,8 @@ struct its_node {
struct its_device {
struct list_head entry;
struct its_node *its;
- struct its_collection *collection;
+ struct event_lpi_map event_map;
void *itt;
- unsigned long *lpi_map;
- irq_hw_number_t lpi_base;
- int nr_lpis;
u32 nr_ites;
u32 device_id;
};
@@ -99,6 +100,14 @@ static struct rdists *gic_rdists;
#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
+static struct its_collection *dev_event_to_col(struct its_device *its_dev,
+ u32 event)
+{
+ struct its_node *its = its_dev->its;
+
+ return its->collections + its_dev->event_map.col_map[event];
+}
+
/*
* ITS command descriptors - parameters to be encoded in a command
* block.
@@ -134,7 +143,7 @@ struct its_cmd_desc {
struct {
struct its_device *dev;
struct its_collection *col;
- u32 id;
+ u32 event_id;
} its_movi_cmd;
struct {
@@ -241,7 +250,7 @@ static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
its_fixup_cmd(cmd);
- return desc->its_mapd_cmd.dev->collection;
+ return NULL;
}
static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
@@ -260,52 +269,72 @@ static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
+ struct its_collection *col;
+
+ col = dev_event_to_col(desc->its_mapvi_cmd.dev,
+ desc->its_mapvi_cmd.event_id);
+
its_encode_cmd(cmd, GITS_CMD_MAPVI);
its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id);
its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id);
its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id);
- its_encode_collection(cmd, desc->its_mapvi_cmd.dev->collection->col_id);
+ its_encode_collection(cmd, col->col_id);
its_fixup_cmd(cmd);
- return desc->its_mapvi_cmd.dev->collection;
+ return col;
}
static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
+ struct its_collection *col;
+
+ col = dev_event_to_col(desc->its_movi_cmd.dev,
+ desc->its_movi_cmd.event_id);
+
its_encode_cmd(cmd, GITS_CMD_MOVI);
its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
- its_encode_event_id(cmd, desc->its_movi_cmd.id);
+ its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
its_fixup_cmd(cmd);
- return desc->its_movi_cmd.dev->collection;
+ return col;
}
static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
+ struct its_collection *col;
+
+ col = dev_event_to_col(desc->its_discard_cmd.dev,
+ desc->its_discard_cmd.event_id);
+
its_encode_cmd(cmd, GITS_CMD_DISCARD);
its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
its_fixup_cmd(cmd);
- return desc->its_discard_cmd.dev->collection;
+ return col;
}
static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
+ struct its_collection *col;
+
+ col = dev_event_to_col(desc->its_inv_cmd.dev,
+ desc->its_inv_cmd.event_id);
+
its_encode_cmd(cmd, GITS_CMD_INV);
its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
its_fixup_cmd(cmd);
- return desc->its_inv_cmd.dev->collection;
+ return col;
}
static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
@@ -497,7 +526,7 @@ static void its_send_movi(struct its_device *dev,
desc.its_movi_cmd.dev = dev;
desc.its_movi_cmd.col = col;
- desc.its_movi_cmd.id = id;
+ desc.its_movi_cmd.event_id = id;
its_send_single_command(dev->its, its_build_movi_cmd, &desc);
}
@@ -528,7 +557,7 @@ static void its_send_invall(struct its_node *its, struct its_collection *col)
static inline u32 its_get_event_id(struct irq_data *d)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- return d->hwirq - its_dev->lpi_base;
+ return d->hwirq - its_dev->event_map.lpi_base;
}
static void lpi_set_config(struct irq_data *d, bool enable)
@@ -583,7 +612,7 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
target_col = &its_dev->its->collections[cpu];
its_send_movi(its_dev, target_col, id);
- its_dev->collection = target_col;
+ its_dev->event_map.col_map[id] = cpu;
return IRQ_SET_MASK_OK_DONE;
}
@@ -611,26 +640,6 @@ static struct irq_chip its_irq_chip = {
.irq_compose_msi_msg = its_irq_compose_msi_msg,
};
-static void its_mask_msi_irq(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void its_unmask_msi_irq(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip its_msi_irq_chip = {
- .name = "ITS-MSI",
- .irq_unmask = its_unmask_msi_irq,
- .irq_mask = its_mask_msi_irq,
- .irq_eoi = irq_chip_eoi_parent,
- .irq_write_msi_msg = pci_msi_domain_write_msg,
-};
-
/*
* How we allocate LPIs:
*
@@ -713,8 +722,10 @@ out:
return bitmap;
}
-static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids)
+static void its_lpi_free(struct event_lpi_map *map)
{
+ int base = map->lpi_base;
+ int nr_ids = map->nr_lpis;
int lpi;
spin_lock(&lpi_lock);
@@ -731,7 +742,8 @@ static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids)
spin_unlock(&lpi_lock);
- kfree(bitmap);
+ kfree(map->lpi_map);
+ kfree(map->col_map);
}
/*
@@ -796,7 +808,7 @@ static void its_free_tables(struct its_node *its)
}
}
-static int its_alloc_tables(struct its_node *its)
+static int its_alloc_tables(const char *node_name, struct its_node *its)
{
int err;
int i;
@@ -839,7 +851,7 @@ static int its_alloc_tables(struct its_node *its)
if (order >= MAX_ORDER) {
order = MAX_ORDER - 1;
pr_warn("%s: Device Table too large, reduce its page order to %u\n",
- its->msi_chip.of_node->full_name, order);
+ node_name, order);
}
}
@@ -909,7 +921,7 @@ retry_baser:
if (val != tmp) {
pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n",
- its->msi_chip.of_node->full_name, i,
+ node_name, i,
(unsigned long) val, (unsigned long) tmp);
err = -ENXIO;
goto out_free;
@@ -1099,11 +1111,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
struct its_device *dev;
unsigned long *lpi_map;
unsigned long flags;
+ u16 *col_map = NULL;
void *itt;
int lpi_base;
int nr_lpis;
int nr_ites;
- int cpu;
int sz;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -1117,20 +1129,24 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kzalloc(sz, GFP_KERNEL);
lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
+ if (lpi_map)
+ col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL);
- if (!dev || !itt || !lpi_map) {
+ if (!dev || !itt || !lpi_map || !col_map) {
kfree(dev);
kfree(itt);
kfree(lpi_map);
+ kfree(col_map);
return NULL;
}
dev->its = its;
dev->itt = itt;
dev->nr_ites = nr_ites;
- dev->lpi_map = lpi_map;
- dev->lpi_base = lpi_base;
- dev->nr_lpis = nr_lpis;
+ dev->event_map.lpi_map = lpi_map;
+ dev->event_map.col_map = col_map;
+ dev->event_map.lpi_base = lpi_base;
+ dev->event_map.nr_lpis = nr_lpis;
dev->device_id = dev_id;
INIT_LIST_HEAD(&dev->entry);
@@ -1138,10 +1154,6 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
list_add(&dev->entry, &its->its_device_list);
raw_spin_unlock_irqrestore(&its->lock, flags);
- /* Bind the device to the first possible CPU */
- cpu = cpumask_first(cpu_online_mask);
- dev->collection = &its->collections[cpu];
-
/* Map device to its ITT */
its_send_mapd(dev, 1);
@@ -1163,39 +1175,13 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
{
int idx;
- idx = find_first_zero_bit(dev->lpi_map, dev->nr_lpis);
- if (idx == dev->nr_lpis)
+ idx = find_first_zero_bit(dev->event_map.lpi_map,
+ dev->event_map.nr_lpis);
+ if (idx == dev->event_map.nr_lpis)
return -ENOSPC;
- *hwirq = dev->lpi_base + idx;
- set_bit(idx, dev->lpi_map);
-
- return 0;
-}
-
-struct its_pci_alias {
- struct pci_dev *pdev;
- u32 dev_id;
- u32 count;
-};
-
-static int its_pci_msi_vec_count(struct pci_dev *pdev)
-{
- int msi, msix;
-
- msi = max(pci_msi_vec_count(pdev), 0);
- msix = max(pci_msix_vec_count(pdev), 0);
-
- return max(msi, msix);
-}
-
-static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
-{
- struct its_pci_alias *dev_alias = data;
-
- dev_alias->dev_id = alias;
- if (pdev != dev_alias->pdev)
- dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev);
+ *hwirq = dev->event_map.lpi_base + idx;
+ set_bit(idx, dev->event_map.lpi_map);
return 0;
}
@@ -1203,55 +1189,47 @@ static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
int nvec, msi_alloc_info_t *info)
{
- struct pci_dev *pdev;
struct its_node *its;
struct its_device *its_dev;
- struct its_pci_alias dev_alias;
+ struct msi_domain_info *msi_info;
+ u32 dev_id;
- if (!dev_is_pci(dev))
- return -EINVAL;
-
- pdev = to_pci_dev(dev);
- dev_alias.pdev = pdev;
- dev_alias.count = nvec;
+ /*
+ * We ignore "dev" entierely, and rely on the dev_id that has
+ * been passed via the scratchpad. This limits this domain's
+ * usefulness to upper layers that definitely know that they
+ * are built on top of the ITS.
+ */
+ dev_id = info->scratchpad[0].ul;
- pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias);
- its = domain->parent->host_data;
+ msi_info = msi_get_domain_info(domain);
+ its = msi_info->data;
- its_dev = its_find_device(its, dev_alias.dev_id);
+ its_dev = its_find_device(its, dev_id);
if (its_dev) {
/*
* We already have seen this ID, probably through
* another alias (PCI bridge of some sort). No need to
* create the device.
*/
- dev_dbg(dev, "Reusing ITT for devID %x\n", dev_alias.dev_id);
+ pr_debug("Reusing ITT for devID %x\n", dev_id);
goto out;
}
- its_dev = its_create_device(its, dev_alias.dev_id, dev_alias.count);
+ its_dev = its_create_device(its, dev_id, nvec);
if (!its_dev)
return -ENOMEM;
- dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n",
- dev_alias.count, ilog2(dev_alias.count));
+ pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
out:
info->scratchpad[0].ptr = its_dev;
- info->scratchpad[1].ptr = dev;
return 0;
}
-static struct msi_domain_ops its_pci_msi_ops = {
+static struct msi_domain_ops its_msi_domain_ops = {
.msi_prepare = its_msi_prepare,
};
-static struct msi_domain_info its_pci_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
- .ops = &its_pci_msi_ops,
- .chip = &its_msi_irq_chip,
-};
-
static int its_irq_gic_domain_alloc(struct irq_domain *domain,
unsigned int virq,
irq_hw_number_t hwirq)
@@ -1287,8 +1265,9 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
irq_domain_set_hwirq_and_chip(domain, virq + i,
hwirq, &its_irq_chip, its_dev);
- dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n",
- (int)(hwirq - its_dev->lpi_base), (int)hwirq, virq + i);
+ pr_debug("ID:%d pID:%d vID:%d\n",
+ (int)(hwirq - its_dev->event_map.lpi_base),
+ (int) hwirq, virq + i);
}
return 0;
@@ -1300,6 +1279,9 @@ static void its_irq_domain_activate(struct irq_domain *domain,
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
+ /* Bind the LPI to the first possible CPU */
+ its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
+
/* Map the GIC IRQ and event to the device */
its_send_mapvi(its_dev, d->hwirq, event);
}
@@ -1327,17 +1309,16 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
u32 event = its_get_event_id(data);
/* Mark interrupt index as unused */
- clear_bit(event, its_dev->lpi_map);
+ clear_bit(event, its_dev->event_map.lpi_map);
/* Nuke the entry in the domain */
irq_domain_reset_irq_data(data);
}
/* If all interrupts have been freed, start mopping the floor */
- if (bitmap_empty(its_dev->lpi_map, its_dev->nr_lpis)) {
- its_lpi_free(its_dev->lpi_map,
- its_dev->lpi_base,
- its_dev->nr_lpis);
+ if (bitmap_empty(its_dev->event_map.lpi_map,
+ its_dev->event_map.nr_lpis)) {
+ its_lpi_free(&its_dev->event_map);
/* Unmap device/itt */
its_send_mapd(its_dev, 0);
@@ -1387,6 +1368,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
struct resource res;
struct its_node *its;
void __iomem *its_base;
+ struct irq_domain *inner_domain;
u32 val;
u64 baser, tmp;
int err;
@@ -1430,7 +1412,6 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
INIT_LIST_HEAD(&its->its_device_list);
its->base = its_base;
its->phys_base = res.start;
- its->msi_chip.of_node = node;
its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
@@ -1440,7 +1421,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
}
its->cmd_write = its->cmd_base;
- err = its_alloc_tables(its);
+ err = its_alloc_tables(node->full_name, its);
if (err)
goto out_free_cmd;
@@ -1476,26 +1457,27 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
writeq_relaxed(0, its->base + GITS_CWRITER);
writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
- if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) {
- its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its);
- if (!its->domain) {
+ if (of_property_read_bool(node, "msi-controller")) {
+ struct msi_domain_info *info;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
err = -ENOMEM;
goto out_free_tables;
}
- its->domain->parent = parent;
-
- its->msi_chip.domain = pci_msi_create_irq_domain(node,
- &its_pci_msi_domain_info,
- its->domain);
- if (!its->msi_chip.domain) {
+ inner_domain = irq_domain_add_tree(node, &its_domain_ops, its);
+ if (!inner_domain) {
err = -ENOMEM;
- goto out_free_domains;
+ kfree(info);
+ goto out_free_tables;
}
- err = of_pci_msi_chip_add(&its->msi_chip);
- if (err)
- goto out_free_domains;
+ inner_domain->parent = parent;
+ inner_domain->bus_token = DOMAIN_BUS_NEXUS;
+ info->ops = &its_msi_domain_ops;
+ info->data = its;
+ inner_domain->host_data = info;
}
spin_lock(&its_lock);
@@ -1504,11 +1486,6 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
return 0;
-out_free_domains:
- if (its->msi_chip.domain)
- irq_domain_remove(its->msi_chip.domain);
- if (its->domain)
- irq_domain_remove(its->domain);
out_free_tables:
its_free_tables(its);
out_free_cmd:
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index c52f7ba205b4..7deed6ef54c2 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -25,14 +25,15 @@
#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-v3.h>
#include <asm/cputype.h>
#include <asm/exception.h>
#include <asm/smp_plat.h>
+#include <asm/virt.h>
#include "irq-gic-common.h"
-#include "irqchip.h"
struct redist_region {
void __iomem *redist_base;
@@ -50,6 +51,7 @@ struct gic_chip_data {
};
static struct gic_chip_data gic_data __read_mostly;
+static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
@@ -68,6 +70,11 @@ static inline int gic_irq_in_rdist(struct irq_data *d)
return gic_irq(d) < 32;
}
+static inline bool forwarded_irq(struct irq_data *d)
+{
+ return d->handler_data != NULL;
+}
+
static inline void __iomem *gic_dist_base(struct irq_data *d)
{
if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
@@ -231,6 +238,21 @@ static void gic_mask_irq(struct irq_data *d)
gic_poke_irq(d, GICD_ICENABLER);
}
+static void gic_eoimode1_mask_irq(struct irq_data *d)
+{
+ gic_mask_irq(d);
+ /*
+ * When masking a forwarded interrupt, make sure it is
+ * deactivated as well.
+ *
+ * This ensures that an interrupt that is getting
+ * disabled/masked will not get "stuck", because there is
+ * noone to deactivate it (guest is being terminated).
+ */
+ if (forwarded_irq(d))
+ gic_poke_irq(d, GICD_ICACTIVER);
+}
+
static void gic_unmask_irq(struct irq_data *d)
{
gic_poke_irq(d, GICD_ISENABLER);
@@ -296,6 +318,17 @@ static void gic_eoi_irq(struct irq_data *d)
gic_write_eoir(gic_irq(d));
}
+static void gic_eoimode1_eoi_irq(struct irq_data *d)
+{
+ /*
+ * No need to deactivate an LPI, or an interrupt that
+ * is is getting forwarded to a vcpu.
+ */
+ if (gic_irq(d) >= 8192 || forwarded_irq(d))
+ return;
+ gic_write_dir(gic_irq(d));
+}
+
static int gic_set_type(struct irq_data *d, unsigned int type)
{
unsigned int irq = gic_irq(d);
@@ -322,6 +355,12 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
return gic_configure_irq(irq, type, base, rwp_wait);
}
+static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
+{
+ d->handler_data = vcpu;
+ return 0;
+}
+
static u64 gic_mpidr_to_affinity(u64 mpidr)
{
u64 aff;
@@ -343,15 +382,26 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
int err;
+
+ if (static_key_true(&supports_deactivate))
+ gic_write_eoir(irqnr);
+
err = handle_domain_irq(gic_data.domain, irqnr, regs);
if (err) {
WARN_ONCE(true, "Unexpected interrupt received!\n");
- gic_write_eoir(irqnr);
+ if (static_key_true(&supports_deactivate)) {
+ if (irqnr < 8192)
+ gic_write_dir(irqnr);
+ } else {
+ gic_write_eoir(irqnr);
+ }
}
continue;
}
if (irqnr < 16) {
gic_write_eoir(irqnr);
+ if (static_key_true(&supports_deactivate))
+ gic_write_dir(irqnr);
#ifdef CONFIG_SMP
handle_IPI(irqnr, regs);
#else
@@ -451,8 +501,13 @@ static void gic_cpu_sys_reg_init(void)
/* Set priority mask register */
gic_write_pmr(DEFAULT_PMR_VALUE);
- /* EOI deactivates interrupt too (mode 0) */
- gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
+ if (static_key_true(&supports_deactivate)) {
+ /* EOI drops priority only (mode 1) */
+ gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
+ } else {
+ /* EOI deactivates interrupt too (mode 0) */
+ gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
+ }
/* ... and let's hit the road... */
gic_write_grpen1(1);
@@ -661,11 +716,29 @@ static struct irq_chip gic_chip = {
.flags = IRQCHIP_SET_TYPE_MASKED,
};
+static struct irq_chip gic_eoimode1_chip = {
+ .name = "GICv3",
+ .irq_mask = gic_eoimode1_mask_irq,
+ .irq_unmask = gic_unmask_irq,
+ .irq_eoi = gic_eoimode1_eoi_irq,
+ .irq_set_type = gic_set_type,
+ .irq_set_affinity = gic_set_affinity,
+ .irq_get_irqchip_state = gic_irq_get_irqchip_state,
+ .irq_set_irqchip_state = gic_irq_set_irqchip_state,
+ .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
+ .flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
#define GIC_ID_NR (1U << gic_data.rdists.id_bits)
static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
+ struct irq_chip *chip = &gic_chip;
+
+ if (static_key_true(&supports_deactivate))
+ chip = &gic_eoimode1_chip;
+
/* SGIs are private to the core kernel */
if (hw < 16)
return -EPERM;
@@ -679,13 +752,13 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
/* PPIs */
if (hw < 32) {
irq_set_percpu_devid(irq);
- irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_percpu_devid_irq, NULL, NULL);
set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
}
/* SPIs */
if (hw >= 32 && hw < gic_data.irq_nr) {
- irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
@@ -693,7 +766,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
if (hw >= 8192 && hw < GIC_ID_NR) {
if (!gic_dist_supports_lpis())
return -EPERM;
- irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
set_irq_flags(irq, IRQF_VALID);
}
@@ -820,6 +893,12 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
redist_stride = 0;
+ if (!is_hyp_mode_available())
+ static_key_slow_dec(&supports_deactivate);
+
+ if (static_key_true(&supports_deactivate))
+ pr_info("GIC: Using split EOI/Deactivate mode\n");
+
gic_data.dist_base = dist_base;
gic_data.redist_regions = rdist_regs;
gic_data.nr_redist_regions = nr_redist_regions;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 8d7e1c8b6d56..e6b7ed537952 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -38,6 +38,7 @@
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/irqchip/arm-gic-acpi.h>
@@ -46,9 +47,9 @@
#include <asm/irq.h>
#include <asm/exception.h>
#include <asm/smp_plat.h>
+#include <asm/virt.h>
#include "irq-gic-common.h"
-#include "irqchip.h"
union gic_base {
void __iomem *common_base;
@@ -82,6 +83,8 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
#define NR_GIC_CPU_IF 8
static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
+static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
+
#ifndef MAX_GIC_NR
#define MAX_GIC_NR 1
#endif
@@ -137,6 +140,36 @@ static inline unsigned int gic_irq(struct irq_data *d)
return d->hwirq;
}
+static inline bool cascading_gic_irq(struct irq_data *d)
+{
+ void *data = irq_data_get_irq_handler_data(d);
+
+ /*
+ * If handler_data pointing to one of the secondary GICs, then
+ * this is a cascading interrupt, and it cannot possibly be
+ * forwarded.
+ */
+ if (data >= (void *)(gic_data + 1) &&
+ data < (void *)(gic_data + MAX_GIC_NR))
+ return true;
+
+ return false;
+}
+
+static inline bool forwarded_irq(struct irq_data *d)
+{
+ /*
+ * A forwarded interrupt:
+ * - is on the primary GIC
+ * - has its handler_data set to a value
+ * - that isn't a secondary GIC
+ */
+ if (d->handler_data && !cascading_gic_irq(d))
+ return true;
+
+ return false;
+}
+
/*
* Routines to acknowledge, disable and enable interrupts
*/
@@ -157,6 +190,21 @@ static void gic_mask_irq(struct irq_data *d)
gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
}
+static void gic_eoimode1_mask_irq(struct irq_data *d)
+{
+ gic_mask_irq(d);
+ /*
+ * When masking a forwarded interrupt, make sure it is
+ * deactivated as well.
+ *
+ * This ensures that an interrupt that is getting
+ * disabled/masked will not get "stuck", because there is
+ * noone to deactivate it (guest is being terminated).
+ */
+ if (forwarded_irq(d))
+ gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR);
+}
+
static void gic_unmask_irq(struct irq_data *d)
{
gic_poke_irq(d, GIC_DIST_ENABLE_SET);
@@ -167,6 +215,15 @@ static void gic_eoi_irq(struct irq_data *d)
writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
}
+static void gic_eoimode1_eoi_irq(struct irq_data *d)
+{
+ /* Do not deactivate an IRQ forwarded to a vcpu. */
+ if (forwarded_irq(d))
+ return;
+
+ writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
+}
+
static int gic_irq_set_irqchip_state(struct irq_data *d,
enum irqchip_irq_state which, bool val)
{
@@ -233,6 +290,16 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
return gic_configure_irq(gicirq, type, base, NULL);
}
+static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
+{
+ /* Only interrupts on the primary GIC can be forwarded to a vcpu. */
+ if (cascading_gic_irq(d))
+ return -EINVAL;
+
+ d->handler_data = vcpu;
+ return 0;
+}
+
#ifdef CONFIG_SMP
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
@@ -272,11 +339,15 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
irqnr = irqstat & GICC_IAR_INT_ID_MASK;
if (likely(irqnr > 15 && irqnr < 1021)) {
+ if (static_key_true(&supports_deactivate))
+ writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
handle_domain_irq(gic->domain, irqnr, regs);
continue;
}
if (irqnr < 16) {
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+ if (static_key_true(&supports_deactivate))
+ writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
#ifdef CONFIG_SMP
handle_IPI(irqnr, regs);
#endif
@@ -288,8 +359,8 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
{
- struct gic_chip_data *chip_data = irq_get_handler_data(irq);
- struct irq_chip *chip = irq_get_chip(irq);
+ struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq, gic_irq;
unsigned long status;
@@ -324,16 +395,34 @@ static struct irq_chip gic_chip = {
#endif
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
- .flags = IRQCHIP_SET_TYPE_MASKED,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static struct irq_chip gic_eoimode1_chip = {
+ .name = "GICv2",
+ .irq_mask = gic_eoimode1_mask_irq,
+ .irq_unmask = gic_unmask_irq,
+ .irq_eoi = gic_eoimode1_eoi_irq,
+ .irq_set_type = gic_set_type,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = gic_set_affinity,
+#endif
+ .irq_get_irqchip_state = gic_irq_get_irqchip_state,
+ .irq_set_irqchip_state = gic_irq_set_irqchip_state,
+ .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
};
void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
{
if (gic_nr >= MAX_GIC_NR)
BUG();
- if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
- BUG();
- irq_set_chained_handler(irq, gic_handle_cascade_irq);
+ irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq,
+ &gic_data[gic_nr]);
}
static u8 gic_get_cpumask(struct gic_chip_data *gic)
@@ -355,10 +444,14 @@ static u8 gic_get_cpumask(struct gic_chip_data *gic)
return mask;
}
-static void gic_cpu_if_up(void)
+static void gic_cpu_if_up(struct gic_chip_data *gic)
{
- void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]);
+ void __iomem *cpu_base = gic_data_cpu_base(gic);
u32 bypass = 0;
+ u32 mode = 0;
+
+ if (static_key_true(&supports_deactivate))
+ mode = GIC_CPU_CTRL_EOImodeNS;
/*
* Preserve bypass disable bits to be written back later
@@ -366,7 +459,7 @@ static void gic_cpu_if_up(void)
bypass = readl(cpu_base + GIC_CPU_CTRL);
bypass &= GICC_DIS_BYPASS_MASK;
- writel_relaxed(bypass | GICC_ENABLE, cpu_base + GIC_CPU_CTRL);
+ writel_relaxed(bypass | mode | GICC_ENABLE, cpu_base + GIC_CPU_CTRL);
}
@@ -401,34 +494,47 @@ static void gic_cpu_init(struct gic_chip_data *gic)
int i;
/*
- * Get what the GIC says our CPU mask is.
+ * Setting up the CPU map is only relevant for the primary GIC
+ * because any nested/secondary GICs do not directly interface
+ * with the CPU(s).
*/
- BUG_ON(cpu >= NR_GIC_CPU_IF);
- cpu_mask = gic_get_cpumask(gic);
- gic_cpu_map[cpu] = cpu_mask;
+ if (gic == &gic_data[0]) {
+ /*
+ * Get what the GIC says our CPU mask is.
+ */
+ BUG_ON(cpu >= NR_GIC_CPU_IF);
+ cpu_mask = gic_get_cpumask(gic);
+ gic_cpu_map[cpu] = cpu_mask;
- /*
- * Clear our mask from the other map entries in case they're
- * still undefined.
- */
- for (i = 0; i < NR_GIC_CPU_IF; i++)
- if (i != cpu)
- gic_cpu_map[i] &= ~cpu_mask;
+ /*
+ * Clear our mask from the other map entries in case they're
+ * still undefined.
+ */
+ for (i = 0; i < NR_GIC_CPU_IF; i++)
+ if (i != cpu)
+ gic_cpu_map[i] &= ~cpu_mask;
+ }
gic_cpu_config(dist_base, NULL);
writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
- gic_cpu_if_up();
+ gic_cpu_if_up(gic);
}
-void gic_cpu_if_down(void)
+int gic_cpu_if_down(unsigned int gic_nr)
{
- void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]);
+ void __iomem *cpu_base;
u32 val = 0;
+ if (gic_nr >= MAX_GIC_NR)
+ return -EINVAL;
+
+ cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
val = readl(cpu_base + GIC_CPU_CTRL);
val &= ~GICC_ENABLE;
writel_relaxed(val, cpu_base + GIC_CPU_CTRL);
+
+ return 0;
}
#ifdef CONFIG_CPU_PM
@@ -564,7 +670,7 @@ static void gic_cpu_restore(unsigned int gic_nr)
dist_base + GIC_DIST_PRI + i * 4);
writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
- gic_cpu_if_up();
+ gic_cpu_if_up(&gic_data[gic_nr]);
}
static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
@@ -789,13 +895,20 @@ void __init gic_init_physaddr(struct device_node *node)
static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
+ struct irq_chip *chip = &gic_chip;
+
+ if (static_key_true(&supports_deactivate)) {
+ if (d->host_data == (void *)&gic_data[0])
+ chip = &gic_eoimode1_chip;
+ }
+
if (hw < 32) {
irq_set_percpu_devid(irq);
- irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_percpu_devid_irq, NULL, NULL);
set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
} else {
- irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
@@ -880,12 +993,7 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
.xlate = gic_irq_domain_xlate,
};
-void gic_set_irqchip_flags(unsigned long flags)
-{
- gic_chip.flags |= flags;
-}
-
-void __init gic_init_bases(unsigned int gic_nr, int irq_start,
+static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
void __iomem *dist_base, void __iomem *cpu_base,
u32 percpu_offset, struct device_node *node)
{
@@ -930,13 +1038,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
}
/*
- * Initialize the CPU interface map to all CPUs.
- * It will be refined as each CPU probes its ID.
- */
- for (i = 0; i < NR_GIC_CPU_IF; i++)
- gic_cpu_map[i] = 0xff;
-
- /*
* Find out how many interrupts are supported.
* The GIC only supports up to 1020 interrupt sources.
*/
@@ -981,11 +1082,20 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
return;
if (gic_nr == 0) {
+ /*
+ * Initialize the CPU interface map to all CPUs.
+ * It will be refined as each CPU probes its ID.
+ * This is only necessary for the primary GIC.
+ */
+ for (i = 0; i < NR_GIC_CPU_IF; i++)
+ gic_cpu_map[i] = 0xff;
#ifdef CONFIG_SMP
set_smp_cross_call(gic_raise_softirq);
register_cpu_notifier(&gic_cpu_notifier);
#endif
set_handle_irq(gic_handle_irq);
+ if (static_key_true(&supports_deactivate))
+ pr_info("GIC: Using split EOI/Deactivate mode\n");
}
gic_dist_init(gic);
@@ -993,6 +1103,19 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
gic_pm_init(gic);
}
+void __init gic_init_bases(unsigned int gic_nr, int irq_start,
+ void __iomem *dist_base, void __iomem *cpu_base,
+ u32 percpu_offset, struct device_node *node)
+{
+ /*
+ * Non-DT/ACPI systems won't run a hypervisor, so let's not
+ * bother with these...
+ */
+ static_key_slow_dec(&supports_deactivate);
+ __gic_init_bases(gic_nr, irq_start, dist_base, cpu_base,
+ percpu_offset, node);
+}
+
#ifdef CONFIG_OF
static int gic_cnt __initdata;
@@ -1001,6 +1124,7 @@ gic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *cpu_base;
void __iomem *dist_base;
+ struct resource cpu_res;
u32 percpu_offset;
int irq;
@@ -1013,10 +1137,20 @@ gic_of_init(struct device_node *node, struct device_node *parent)
cpu_base = of_iomap(node, 1);
WARN(!cpu_base, "unable to map gic cpu registers\n");
+ of_address_to_resource(node, 1, &cpu_res);
+
+ /*
+ * Disable split EOI/Deactivate if either HYP is not available
+ * or the CPU interface is too small.
+ */
+ if (gic_cnt == 0 && (!is_hyp_mode_available() ||
+ resource_size(&cpu_res) < SZ_8K))
+ static_key_slow_dec(&supports_deactivate);
+
if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
percpu_offset = 0;
- gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
+ __gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
if (!gic_cnt)
gic_init_physaddr(node);
@@ -1055,7 +1189,7 @@ gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
processor = (struct acpi_madt_generic_interrupt *)header;
- if (BAD_MADT_ENTRY(processor, end))
+ if (BAD_MADT_GICC_ENTRY(processor, end))
return -EINVAL;
/*
@@ -1132,11 +1266,19 @@ gic_v2_acpi_init(struct acpi_table_header *table)
}
/*
+ * Disable split EOI/Deactivate if HYP is not available. ACPI
+ * guarantees that we'll always have a GICv2, so the CPU
+ * interface will always be the right size.
+ */
+ if (!is_hyp_mode_available())
+ static_key_slow_dec(&supports_deactivate);
+
+ /*
* Initialize zero GIC instance (no multi-GIC support). Also, set GIC
* as default IRQ domain to allow for GSI registration and GSI to IRQ
* number translation (see acpi_register_gsi() and acpi_gsi_to_irq()).
*/
- gic_init_bases(0, -1, dist_base, cpu_base, 0, NULL);
+ __gic_init_bases(0, -1, dist_base, cpu_base, 0, NULL);
irq_set_default_host(gic_data[0].domain);
acpi_irq_model = ACPI_IRQ_MODEL_GIC;
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 0cae45d10695..a0128c7c98dd 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -41,6 +41,7 @@
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic.h>
#include <asm/irq.h>
@@ -48,7 +49,6 @@
#include <asm/smp_plat.h>
#include "irq-gic-common.h"
-#include "irqchip.h"
#define HIP04_MAX_IRQS 510
@@ -202,7 +202,9 @@ static struct irq_chip hip04_irq_chip = {
#ifdef CONFIG_SMP
.irq_set_affinity = hip04_irq_set_affinity,
#endif
- .flags = IRQCHIP_SET_TYPE_MASKED,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
};
static u16 hip04_get_cpumask(struct hip04_irq_data *intc)
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
new file mode 100644
index 000000000000..4836102ba312
--- /dev/null
+++ b/drivers/irqchip/irq-i8259.c
@@ -0,0 +1,384 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Code to handle x86 style IRQs plus some generic interrupt stuff.
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000 Ralf Baechle
+ */
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
+#include <linux/irq.h>
+
+#include <asm/i8259.h>
+#include <asm/io.h>
+
+/*
+ * This is the 'legacy' 8259A Programmable Interrupt Controller,
+ * present in the majority of PC/AT boxes.
+ * plus some generic x86 specific things if generic specifics makes
+ * any sense at all.
+ * this file should become arch/i386/kernel/irq.c when the old irq.c
+ * moves to arch independent land
+ */
+
+static int i8259A_auto_eoi = -1;
+DEFINE_RAW_SPINLOCK(i8259A_lock);
+static void disable_8259A_irq(struct irq_data *d);
+static void enable_8259A_irq(struct irq_data *d);
+static void mask_and_ack_8259A(struct irq_data *d);
+static void init_8259A(int auto_eoi);
+
+static struct irq_chip i8259A_chip = {
+ .name = "XT-PIC",
+ .irq_mask = disable_8259A_irq,
+ .irq_disable = disable_8259A_irq,
+ .irq_unmask = enable_8259A_irq,
+ .irq_mask_ack = mask_and_ack_8259A,
+};
+
+/*
+ * 8259A PIC functions to handle ISA devices:
+ */
+
+/*
+ * This contains the irq mask for both 8259A irq controllers,
+ */
+static unsigned int cached_irq_mask = 0xffff;
+
+#define cached_master_mask (cached_irq_mask)
+#define cached_slave_mask (cached_irq_mask >> 8)
+
+static void disable_8259A_irq(struct irq_data *d)
+{
+ unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
+ unsigned long flags;
+
+ mask = 1 << irq;
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
+ cached_irq_mask |= mask;
+ if (irq & 8)
+ outb(cached_slave_mask, PIC_SLAVE_IMR);
+ else
+ outb(cached_master_mask, PIC_MASTER_IMR);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+}
+
+static void enable_8259A_irq(struct irq_data *d)
+{
+ unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
+ unsigned long flags;
+
+ mask = ~(1 << irq);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
+ cached_irq_mask &= mask;
+ if (irq & 8)
+ outb(cached_slave_mask, PIC_SLAVE_IMR);
+ else
+ outb(cached_master_mask, PIC_MASTER_IMR);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+}
+
+int i8259A_irq_pending(unsigned int irq)
+{
+ unsigned int mask;
+ unsigned long flags;
+ int ret;
+
+ irq -= I8259A_IRQ_BASE;
+ mask = 1 << irq;
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
+ if (irq < 8)
+ ret = inb(PIC_MASTER_CMD) & mask;
+ else
+ ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+
+ return ret;
+}
+
+void make_8259A_irq(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+ irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
+ enable_irq(irq);
+}
+
+/*
+ * This function assumes to be called rarely. Switching between
+ * 8259A registers is slow.
+ * This has to be protected by the irq controller spinlock
+ * before being called.
+ */
+static inline int i8259A_irq_real(unsigned int irq)
+{
+ int value;
+ int irqmask = 1 << irq;
+
+ if (irq < 8) {
+ outb(0x0B, PIC_MASTER_CMD); /* ISR register */
+ value = inb(PIC_MASTER_CMD) & irqmask;
+ outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */
+ return value;
+ }
+ outb(0x0B, PIC_SLAVE_CMD); /* ISR register */
+ value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
+ outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */
+ return value;
+}
+
+/*
+ * Careful! The 8259A is a fragile beast, it pretty
+ * much _has_ to be done exactly like this (mask it
+ * first, _then_ send the EOI, and the order of EOI
+ * to the two 8259s is important!
+ */
+static void mask_and_ack_8259A(struct irq_data *d)
+{
+ unsigned int irqmask, irq = d->irq - I8259A_IRQ_BASE;
+ unsigned long flags;
+
+ irqmask = 1 << irq;
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
+ /*
+ * Lightweight spurious IRQ detection. We do not want
+ * to overdo spurious IRQ handling - it's usually a sign
+ * of hardware problems, so we only do the checks we can
+ * do without slowing down good hardware unnecessarily.
+ *
+ * Note that IRQ7 and IRQ15 (the two spurious IRQs
+ * usually resulting from the 8259A-1|2 PICs) occur
+ * even if the IRQ is masked in the 8259A. Thus we
+ * can check spurious 8259A IRQs without doing the
+ * quite slow i8259A_irq_real() call for every IRQ.
+ * This does not cover 100% of spurious interrupts,
+ * but should be enough to warn the user that there
+ * is something bad going on ...
+ */
+ if (cached_irq_mask & irqmask)
+ goto spurious_8259A_irq;
+ cached_irq_mask |= irqmask;
+
+handle_real_irq:
+ if (irq & 8) {
+ inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
+ outb(cached_slave_mask, PIC_SLAVE_IMR);
+ outb(0x60+(irq&7), PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
+ outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
+ } else {
+ inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
+ outb(cached_master_mask, PIC_MASTER_IMR);
+ outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
+ }
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+ return;
+
+spurious_8259A_irq:
+ /*
+ * this is the slow path - should happen rarely.
+ */
+ if (i8259A_irq_real(irq))
+ /*
+ * oops, the IRQ _is_ in service according to the
+ * 8259A - not spurious, go handle it.
+ */
+ goto handle_real_irq;
+
+ {
+ static int spurious_irq_mask;
+ /*
+ * At this point we can be sure the IRQ is spurious,
+ * lets ACK and report it. [once per IRQ]
+ */
+ if (!(spurious_irq_mask & irqmask)) {
+ printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
+ spurious_irq_mask |= irqmask;
+ }
+ atomic_inc(&irq_err_count);
+ /*
+ * Theoretically we do not have to handle this IRQ,
+ * but in Linux this does not cause problems and is
+ * simpler for us.
+ */
+ goto handle_real_irq;
+ }
+}
+
+static void i8259A_resume(void)
+{
+ if (i8259A_auto_eoi >= 0)
+ init_8259A(i8259A_auto_eoi);
+}
+
+static void i8259A_shutdown(void)
+{
+ /* Put the i8259A into a quiescent state that
+ * the kernel initialization code can get it
+ * out of.
+ */
+ if (i8259A_auto_eoi >= 0) {
+ outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
+ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
+ }
+}
+
+static struct syscore_ops i8259_syscore_ops = {
+ .resume = i8259A_resume,
+ .shutdown = i8259A_shutdown,
+};
+
+static int __init i8259A_init_sysfs(void)
+{
+ register_syscore_ops(&i8259_syscore_ops);
+ return 0;
+}
+
+device_initcall(i8259A_init_sysfs);
+
+static void init_8259A(int auto_eoi)
+{
+ unsigned long flags;
+
+ i8259A_auto_eoi = auto_eoi;
+
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
+
+ outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
+ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
+
+ /*
+ * outb_p - this has to work on a wide range of PC hardware.
+ */
+ outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
+ outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */
+ outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */
+ if (auto_eoi) /* master does Auto EOI */
+ outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
+ else /* master expects normal EOI */
+ outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
+
+ outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */
+ outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */
+ outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */
+ outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
+ if (auto_eoi)
+ /*
+ * In AEOI mode we just have to mask the interrupt
+ * when acking.
+ */
+ i8259A_chip.irq_mask_ack = disable_8259A_irq;
+ else
+ i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
+
+ udelay(100); /* wait for 8259A to initialize */
+
+ outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
+ outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
+
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+}
+
+/*
+ * IRQ2 is cascade interrupt to second interrupt controller
+ */
+static struct irqaction irq2 = {
+ .handler = no_action,
+ .name = "cascade",
+ .flags = IRQF_NO_THREAD,
+};
+
+static struct resource pic1_io_resource = {
+ .name = "pic1",
+ .start = PIC_MASTER_CMD,
+ .end = PIC_MASTER_IMR,
+ .flags = IORESOURCE_BUSY
+};
+
+static struct resource pic2_io_resource = {
+ .name = "pic2",
+ .start = PIC_SLAVE_CMD,
+ .end = PIC_SLAVE_IMR,
+ .flags = IORESOURCE_BUSY
+};
+
+static int i8259A_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(virq, &i8259A_chip, handle_level_irq);
+ irq_set_probe(virq);
+ return 0;
+}
+
+static struct irq_domain_ops i8259A_ops = {
+ .map = i8259A_irq_domain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+/*
+ * On systems with i8259-style interrupt controllers we assume for
+ * driver compatibility reasons interrupts 0 - 15 to be the i8259
+ * interrupts even if the hardware uses a different interrupt numbering.
+ */
+struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
+{
+ struct irq_domain *domain;
+
+ insert_resource(&ioport_resource, &pic1_io_resource);
+ insert_resource(&ioport_resource, &pic2_io_resource);
+
+ init_8259A(0);
+
+ domain = irq_domain_add_legacy(node, 16, I8259A_IRQ_BASE, 0,
+ &i8259A_ops, NULL);
+ if (!domain)
+ panic("Failed to add i8259 IRQ domain");
+
+ setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
+ return domain;
+}
+
+void __init init_i8259_irqs(void)
+{
+ __init_i8259_irqs(NULL);
+}
+
+static void i8259_irq_dispatch(unsigned int __irq, struct irq_desc *desc)
+{
+ struct irq_domain *domain = irq_desc_get_handler_data(desc);
+ int hwirq = i8259_irq();
+ unsigned int irq;
+
+ if (hwirq < 0)
+ return;
+
+ irq = irq_linear_revmap(domain, hwirq);
+ generic_handle_irq(irq);
+}
+
+int __init i8259_of_init(struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *domain;
+ unsigned int parent_irq;
+
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (!parent_irq) {
+ pr_err("Failed to map i8259 parent IRQ\n");
+ return -ENODEV;
+ }
+
+ domain = __init_i8259_irqs(node);
+ irq_set_handler_data(parent_irq, domain);
+ irq_set_chained_handler(parent_irq, i8259_irq_dispatch);
+ return 0;
+}
+IRQCHIP_DECLARE(i8259, "intel,i8259", i8259_of_init);
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c
index 8071c2eb0248..841604b81004 100644
--- a/drivers/irqchip/irq-imgpdc.c
+++ b/drivers/irqchip/irq-imgpdc.c
@@ -218,8 +218,9 @@ static int pdc_irq_set_wake(struct irq_data *data, unsigned int on)
return 0;
}
-static void pdc_intc_perip_isr(unsigned int irq, struct irq_desc *desc)
+static void pdc_intc_perip_isr(unsigned int __irq, struct irq_desc *desc)
{
+ unsigned int irq = irq_desc_get_irq(desc);
struct pdc_intc_priv *priv;
unsigned int i, irq_no;
@@ -451,13 +452,13 @@ static int pdc_intc_probe(struct platform_device *pdev)
/* Setup chained handlers for the peripheral IRQs */
for (i = 0; i < priv->nr_perips; ++i) {
irq = priv->perip_irqs[i];
- irq_set_handler_data(irq, priv);
- irq_set_chained_handler(irq, pdc_intc_perip_isr);
+ irq_set_chained_handler_and_data(irq, pdc_intc_perip_isr,
+ priv);
}
/* Setup chained handler for the syswake IRQ */
- irq_set_handler_data(priv->syswake_irq, priv);
- irq_set_chained_handler(priv->syswake_irq, pdc_intc_syswake_isr);
+ irq_set_chained_handler_and_data(priv->syswake_irq,
+ pdc_intc_syswake_isr, priv);
dev_info(&pdev->dev,
"PDC IRQ controller initialised (%u perip IRQs, %u syswake IRQs)\n",
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
new file mode 100644
index 000000000000..e48d3305456f
--- /dev/null
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/irqchip.h>
+#include <linux/syscore_ops.h>
+
+#define IMR_NUM 4
+#define GPC_MAX_IRQS (IMR_NUM * 32)
+
+#define GPC_IMR1_CORE0 0x30
+#define GPC_IMR1_CORE1 0x40
+
+struct gpcv2_irqchip_data {
+ struct raw_spinlock rlock;
+ void __iomem *gpc_base;
+ u32 wakeup_sources[IMR_NUM];
+ u32 saved_irq_mask[IMR_NUM];
+ u32 cpu2wakeup;
+};
+
+static struct gpcv2_irqchip_data *imx_gpcv2_instance;
+
+/*
+ * Interface for the low level wakeup code.
+ */
+u32 imx_gpcv2_get_wakeup_source(u32 **sources)
+{
+ if (!imx_gpcv2_instance)
+ return 0;
+
+ if (sources)
+ *sources = imx_gpcv2_instance->wakeup_sources;
+
+ return IMR_NUM;
+}
+
+static int gpcv2_wakeup_source_save(void)
+{
+ struct gpcv2_irqchip_data *cd;
+ void __iomem *reg;
+ int i;
+
+ cd = imx_gpcv2_instance;
+ if (!cd)
+ return 0;
+
+ for (i = 0; i < IMR_NUM; i++) {
+ reg = cd->gpc_base + cd->cpu2wakeup + i * 4;
+ cd->saved_irq_mask[i] = readl_relaxed(reg);
+ writel_relaxed(cd->wakeup_sources[i], reg);
+ }
+
+ return 0;
+}
+
+static void gpcv2_wakeup_source_restore(void)
+{
+ struct gpcv2_irqchip_data *cd;
+ void __iomem *reg;
+ int i;
+
+ cd = imx_gpcv2_instance;
+ if (!cd)
+ return;
+
+ for (i = 0; i < IMR_NUM; i++) {
+ reg = cd->gpc_base + cd->cpu2wakeup + i * 4;
+ writel_relaxed(cd->saved_irq_mask[i], reg);
+ }
+}
+
+static struct syscore_ops imx_gpcv2_syscore_ops = {
+ .suspend = gpcv2_wakeup_source_save,
+ .resume = gpcv2_wakeup_source_restore,
+};
+
+static int imx_gpcv2_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct gpcv2_irqchip_data *cd = d->chip_data;
+ unsigned int idx = d->hwirq / 32;
+ unsigned long flags;
+ void __iomem *reg;
+ u32 mask, val;
+
+ raw_spin_lock_irqsave(&cd->rlock, flags);
+ reg = cd->gpc_base + cd->cpu2wakeup + idx * 4;
+ mask = 1 << d->hwirq % 32;
+ val = cd->wakeup_sources[idx];
+
+ cd->wakeup_sources[idx] = on ? (val & ~mask) : (val | mask);
+ raw_spin_unlock_irqrestore(&cd->rlock, flags);
+
+ /*
+ * Do *not* call into the parent, as the GIC doesn't have any
+ * wake-up facility...
+ */
+
+ return 0;
+}
+
+static void imx_gpcv2_irq_unmask(struct irq_data *d)
+{
+ struct gpcv2_irqchip_data *cd = d->chip_data;
+ void __iomem *reg;
+ u32 val;
+
+ raw_spin_lock(&cd->rlock);
+ reg = cd->gpc_base + cd->cpu2wakeup + d->hwirq / 32 * 4;
+ val = readl_relaxed(reg);
+ val &= ~(1 << d->hwirq % 32);
+ writel_relaxed(val, reg);
+ raw_spin_unlock(&cd->rlock);
+
+ irq_chip_unmask_parent(d);
+}
+
+static void imx_gpcv2_irq_mask(struct irq_data *d)
+{
+ struct gpcv2_irqchip_data *cd = d->chip_data;
+ void __iomem *reg;
+ u32 val;
+
+ raw_spin_lock(&cd->rlock);
+ reg = cd->gpc_base + cd->cpu2wakeup + d->hwirq / 32 * 4;
+ val = readl_relaxed(reg);
+ val |= 1 << (d->hwirq % 32);
+ writel_relaxed(val, reg);
+ raw_spin_unlock(&cd->rlock);
+
+ irq_chip_mask_parent(d);
+}
+
+static struct irq_chip gpcv2_irqchip_data_chip = {
+ .name = "GPCv2",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = imx_gpcv2_irq_mask,
+ .irq_unmask = imx_gpcv2_irq_unmask,
+ .irq_set_wake = imx_gpcv2_irq_set_wake,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+};
+
+static int imx_gpcv2_domain_xlate(struct irq_domain *domain,
+ struct device_node *controller,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ /* Shouldn't happen, really... */
+ if (domain->of_node != controller)
+ return -EINVAL;
+
+ /* Not GIC compliant */
+ if (intsize != 3)
+ return -EINVAL;
+
+ /* No PPI should point to this domain */
+ if (intspec[0] != 0)
+ return -EINVAL;
+
+ *out_hwirq = intspec[1];
+ *out_type = intspec[2];
+ return 0;
+}
+
+static int imx_gpcv2_domain_alloc(struct irq_domain *domain,
+ unsigned int irq, unsigned int nr_irqs,
+ void *data)
+{
+ struct of_phandle_args *args = data;
+ struct of_phandle_args parent_args;
+ irq_hw_number_t hwirq;
+ int i;
+
+ /* Not GIC compliant */
+ if (args->args_count != 3)
+ return -EINVAL;
+
+ /* No PPI should point to this domain */
+ if (args->args[0] != 0)
+ return -EINVAL;
+
+ /* Can't deal with this */
+ hwirq = args->args[1];
+ if (hwirq >= GPC_MAX_IRQS)
+ return -EINVAL;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i,
+ &gpcv2_irqchip_data_chip, domain->host_data);
+ }
+
+ parent_args = *args;
+ parent_args.np = domain->parent->of_node;
+ return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, &parent_args);
+}
+
+static struct irq_domain_ops gpcv2_irqchip_data_domain_ops = {
+ .xlate = imx_gpcv2_domain_xlate,
+ .alloc = imx_gpcv2_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int __init imx_gpcv2_irqchip_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *parent_domain, *domain;
+ struct gpcv2_irqchip_data *cd;
+ int i;
+
+ if (!parent) {
+ pr_err("%s: no parent, giving up\n", node->full_name);
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%s: unable to get parent domain\n", node->full_name);
+ return -ENXIO;
+ }
+
+ cd = kzalloc(sizeof(struct gpcv2_irqchip_data), GFP_KERNEL);
+ if (!cd) {
+ pr_err("kzalloc failed!\n");
+ return -ENOMEM;
+ }
+
+ cd->gpc_base = of_iomap(node, 0);
+ if (!cd->gpc_base) {
+ pr_err("fsl-gpcv2: unable to map gpc registers\n");
+ kfree(cd);
+ return -ENOMEM;
+ }
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
+ node, &gpcv2_irqchip_data_domain_ops, cd);
+ if (!domain) {
+ iounmap(cd->gpc_base);
+ kfree(cd);
+ return -ENOMEM;
+ }
+ irq_set_default_host(domain);
+
+ /* Initially mask all interrupts */
+ for (i = 0; i < IMR_NUM; i++) {
+ writel_relaxed(~0, cd->gpc_base + GPC_IMR1_CORE0 + i * 4);
+ writel_relaxed(~0, cd->gpc_base + GPC_IMR1_CORE1 + i * 4);
+ cd->wakeup_sources[i] = ~0;
+ }
+
+ /* Let CORE0 as the default CPU to wake up by GPC */
+ cd->cpu2wakeup = GPC_IMR1_CORE0;
+
+ /*
+ * Due to hardware design failure, need to make sure GPR
+ * interrupt(#32) is unmasked during RUN mode to avoid entering
+ * DSM by mistake.
+ */
+ writel_relaxed(~0x1, cd->gpc_base + cd->cpu2wakeup);
+
+ imx_gpcv2_instance = cd;
+ register_syscore_ops(&imx_gpcv2_syscore_ops);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(imx_gpcv2, "fsl,imx7d-gpc", imx_gpcv2_irqchip_init);
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index 005de3f932ae..fc5953dea509 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -18,6 +18,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/ingenic.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -28,8 +29,6 @@
#include <asm/io.h>
#include <asm/mach-jz4740/irq.h>
-#include "irqchip.h"
-
struct ingenic_intc_data {
void __iomem *base;
unsigned num_chips;
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index 81e3cf5b9a1f..c1517267b5db 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -20,13 +20,12 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
-#include "irqchip.h"
-
/* The source ID bits start from 4 to 31 (total 28 bits)*/
#define BIT_OFS 4
@@ -84,8 +83,9 @@ static void keystone_irq_ack(struct irq_data *d)
/* nothing to do here */
}
-static void keystone_irq_handler(unsigned irq, struct irq_desc *desc)
+static void keystone_irq_handler(unsigned __irq, struct irq_desc *desc)
{
+ unsigned int irq = irq_desc_get_irq(desc);
struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc);
unsigned long pending;
int src, virq;
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
index 2cb474ad8809..5f4c52928d16 100644
--- a/drivers/irqchip/irq-metag-ext.c
+++ b/drivers/irqchip/irq-metag-ext.c
@@ -404,7 +404,6 @@ static int meta_intc_irq_set_type(struct irq_data *data, unsigned int flow_type)
#ifdef CONFIG_METAG_SUSPEND_MEM
struct meta_intc_priv *priv = &meta_intc_priv;
#endif
- unsigned int irq = data->irq;
irq_hw_number_t hw = data->hwirq;
unsigned int bit = 1 << meta_intc_offset(hw);
void __iomem *level_addr = meta_intc_level_addr(hw);
@@ -413,11 +412,11 @@ static int meta_intc_irq_set_type(struct irq_data *data, unsigned int flow_type)
/* update the chip/handler */
if (flow_type & IRQ_TYPE_LEVEL_MASK)
- __irq_set_chip_handler_name_locked(irq, &meta_intc_level_chip,
- handle_level_irq, NULL);
+ irq_set_chip_handler_name_locked(data, &meta_intc_level_chip,
+ handle_level_irq, NULL);
else
- __irq_set_chip_handler_name_locked(irq, &meta_intc_edge_chip,
- handle_edge_irq, NULL);
+ irq_set_chip_handler_name_locked(data, &meta_intc_edge_chip,
+ handle_edge_irq, NULL);
/* and clear/set the bit in HWLEVELEXT */
__global_lock2(flags);
diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c
index c16c186d97d3..3d23ce3edb5c 100644
--- a/drivers/irqchip/irq-metag.c
+++ b/drivers/irqchip/irq-metag.c
@@ -286,8 +286,7 @@ static void metag_internal_irq_init_cpu(struct metag_internal_irq_priv *priv,
int irq = tbisig_map(signum);
/* Register the multiplexed IRQ handler */
- irq_set_handler_data(irq, priv);
- irq_set_chained_handler(irq, metag_internal_irq_demux);
+ irq_set_chained_handler_and_data(irq, metag_internal_irq_demux, priv);
irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
}
diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
index a43c41988009..8c504f562e9d 100644
--- a/drivers/irqchip/irq-mips-cpu.c
+++ b/drivers/irqchip/irq-mips-cpu.c
@@ -31,6 +31,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <asm/irq_cpu.h>
@@ -38,8 +39,6 @@
#include <asm/mipsmtregs.h>
#include <asm/setup.h>
-#include "irqchip.h"
-
static inline void unmask_mips_irq(struct irq_data *d)
{
set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 4400edd1a6c7..1764bcf8ee6b 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/mips-gic.h>
#include <linux/of_address.h>
#include <linux/sched.h>
@@ -22,8 +23,6 @@
#include <dt-bindings/interrupt-controller/mips-gic.h>
-#include "irqchip.h"
-
unsigned int gic_present;
struct gic_pcpu_mask {
@@ -42,20 +41,46 @@ static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
static void __gic_irq_dispatch(void);
-static inline unsigned int gic_read(unsigned int reg)
+static inline u32 gic_read32(unsigned int reg)
{
return __raw_readl(gic_base + reg);
}
-static inline void gic_write(unsigned int reg, unsigned int val)
+static inline u64 gic_read64(unsigned int reg)
+{
+ return __raw_readq(gic_base + reg);
+}
+
+static inline unsigned long gic_read(unsigned int reg)
+{
+ if (!mips_cm_is64)
+ return gic_read32(reg);
+ else
+ return gic_read64(reg);
+}
+
+static inline void gic_write32(unsigned int reg, u32 val)
{
- __raw_writel(val, gic_base + reg);
+ return __raw_writel(val, gic_base + reg);
+}
+
+static inline void gic_write64(unsigned int reg, u64 val)
+{
+ return __raw_writeq(val, gic_base + reg);
+}
+
+static inline void gic_write(unsigned int reg, unsigned long val)
+{
+ if (!mips_cm_is64)
+ return gic_write32(reg, (u32)val);
+ else
+ return gic_write64(reg, (u64)val);
}
-static inline void gic_update_bits(unsigned int reg, unsigned int mask,
- unsigned int val)
+static inline void gic_update_bits(unsigned int reg, unsigned long mask,
+ unsigned long val)
{
- unsigned int regval;
+ unsigned long regval;
regval = gic_read(reg);
regval &= ~mask;
@@ -66,40 +91,40 @@ static inline void gic_update_bits(unsigned int reg, unsigned int mask,
static inline void gic_reset_mask(unsigned int intr)
{
gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
- 1 << GIC_INTR_BIT(intr));
+ 1ul << GIC_INTR_BIT(intr));
}
static inline void gic_set_mask(unsigned int intr)
{
gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
- 1 << GIC_INTR_BIT(intr));
+ 1ul << GIC_INTR_BIT(intr));
}
static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
{
gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
- GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
- pol << GIC_INTR_BIT(intr));
+ GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
+ (unsigned long)pol << GIC_INTR_BIT(intr));
}
static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
{
gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
- GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
- trig << GIC_INTR_BIT(intr));
+ GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
+ (unsigned long)trig << GIC_INTR_BIT(intr));
}
static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
{
gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
- 1 << GIC_INTR_BIT(intr),
- dual << GIC_INTR_BIT(intr));
+ 1ul << GIC_INTR_BIT(intr),
+ (unsigned long)dual << GIC_INTR_BIT(intr));
}
static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
{
- gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
- GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
+ gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
+ GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
}
static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
@@ -114,10 +139,13 @@ cycle_t gic_read_count(void)
{
unsigned int hi, hi2, lo;
+ if (mips_cm_is64)
+ return (cycle_t)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER));
+
do {
- hi = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
- lo = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
- hi2 = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
+ hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
+ lo = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
+ hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
} while (hi2 != hi);
return (((cycle_t) hi) << 32) + lo;
@@ -136,10 +164,14 @@ unsigned int gic_get_count_width(void)
void gic_write_compare(cycle_t cnt)
{
- gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
- (int)(cnt >> 32));
- gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
- (int)(cnt & 0xffffffff));
+ if (mips_cm_is64) {
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
+ } else {
+ gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
+ (int)(cnt >> 32));
+ gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
+ (int)(cnt & 0xffffffff));
+ }
}
void gic_write_cpu_compare(cycle_t cnt, int cpu)
@@ -149,10 +181,15 @@ void gic_write_cpu_compare(cycle_t cnt, int cpu)
local_irq_save(flags);
gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
- gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
- (int)(cnt >> 32));
- gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
- (int)(cnt & 0xffffffff));
+
+ if (mips_cm_is64) {
+ gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt);
+ } else {
+ gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
+ (int)(cnt >> 32));
+ gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
+ (int)(cnt & 0xffffffff));
+ }
local_irq_restore(flags);
}
@@ -161,8 +198,11 @@ cycle_t gic_read_compare(void)
{
unsigned int hi, lo;
- hi = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
- lo = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
+ if (mips_cm_is64)
+ return (cycle_t)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE));
+
+ hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
+ lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
return (((cycle_t) hi) << 32) + lo;
}
@@ -197,7 +237,7 @@ static bool gic_local_irq_is_routable(int intr)
if (cpu_has_veic)
return true;
- vpe_ctl = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
+ vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
switch (intr) {
case GIC_LOCAL_INT_TIMER:
return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
@@ -257,23 +297,13 @@ int gic_get_c0_fdc_int(void)
return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
}
- /*
- * Some cores claim the FDC is routable but it doesn't actually seem to
- * be connected.
- */
- switch (current_cpu_type()) {
- case CPU_INTERAPTIV:
- case CPU_PROAPTIV:
- return -1;
- }
-
return irq_create_mapping(gic_irq_domain,
GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
}
static void gic_handle_shared_int(bool chained)
{
- unsigned int i, intr, virq;
+ unsigned int i, intr, virq, gic_reg_step = mips_cm_is64 ? 8 : 4;
unsigned long *pcpu_mask;
unsigned long pending_reg, intrmask_reg;
DECLARE_BITMAP(pending, GIC_MAX_INTRS);
@@ -288,8 +318,8 @@ static void gic_handle_shared_int(bool chained)
for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
pending[i] = gic_read(pending_reg);
intrmask[i] = gic_read(intrmask_reg);
- pending_reg += 0x4;
- intrmask_reg += 0x4;
+ pending_reg += gic_reg_step;
+ intrmask_reg += gic_reg_step;
}
bitmap_and(pending, pending, intrmask, gic_shared_intrs);
@@ -368,15 +398,12 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
break;
}
- if (is_edge) {
- __irq_set_chip_handler_name_locked(d->irq,
- &gic_edge_irq_controller,
- handle_edge_irq, NULL);
- } else {
- __irq_set_chip_handler_name_locked(d->irq,
- &gic_level_irq_controller,
- handle_level_irq, NULL);
- }
+ if (is_edge)
+ irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
+ handle_edge_irq, NULL);
+ else
+ irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
+ handle_level_irq, NULL);
spin_unlock_irqrestore(&gic_lock, flags);
return 0;
@@ -406,7 +433,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
clear_bit(irq, pcpu_masks[i].pcpu_mask);
set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
- cpumask_copy(d->affinity, cpumask);
+ cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
spin_unlock_irqrestore(&gic_lock, flags);
return IRQ_SET_MASK_OK_NOCOPY;
@@ -439,8 +466,8 @@ static void gic_handle_local_int(bool chained)
unsigned long pending, masked;
unsigned int intr, virq;
- pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
- masked = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
+ pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
+ masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
@@ -463,14 +490,14 @@ static void gic_mask_local_irq(struct irq_data *d)
{
int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
- gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
+ gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
}
static void gic_unmask_local_irq(struct irq_data *d)
{
int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
- gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
+ gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
}
static struct irq_chip gic_local_irq_controller = {
@@ -488,7 +515,7 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d)
spin_lock_irqsave(&gic_lock, flags);
for (i = 0; i < gic_vpes; i++) {
gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
- gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
+ gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
}
spin_unlock_irqrestore(&gic_lock, flags);
}
@@ -502,7 +529,7 @@ static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
spin_lock_irqsave(&gic_lock, flags);
for (i = 0; i < gic_vpes; i++) {
gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
- gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
+ gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
}
spin_unlock_irqrestore(&gic_lock, flags);
}
@@ -548,7 +575,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
@@ -622,7 +649,7 @@ static void __init gic_basic_init(void)
for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
if (!gic_local_irq_is_routable(j))
continue;
- gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
+ gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
}
}
}
@@ -667,27 +694,32 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
switch (intr) {
case GIC_LOCAL_INT_WD:
- gic_write(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
+ gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
break;
case GIC_LOCAL_INT_COMPARE:
- gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val);
+ gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP),
+ val);
break;
case GIC_LOCAL_INT_TIMER:
/* CONFIG_MIPS_CMP workaround (see __gic_init) */
val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
- gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val);
+ gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
+ val);
break;
case GIC_LOCAL_INT_PERFCTR:
- gic_write(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), val);
+ gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
+ val);
break;
case GIC_LOCAL_INT_SWINT0:
- gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), val);
+ gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP),
+ val);
break;
case GIC_LOCAL_INT_SWINT1:
- gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), val);
+ gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP),
+ val);
break;
case GIC_LOCAL_INT_FDC:
- gic_write(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
+ gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
break;
default:
pr_err("Invalid local IRQ %d\n", intr);
@@ -792,7 +824,7 @@ static void __init __gic_init(unsigned long gic_base_addr,
*/
if (IS_ENABLED(CONFIG_MIPS_CMP) &&
gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
- timer_cpu_pin = gic_read(GIC_REG(VPE_LOCAL,
+ timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL,
GIC_VPE_TIMER_MAP)) &
GIC_MAP_MSK;
irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index c0da57bdb89d..781ed6e71dbb 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/ioport.h>
@@ -24,8 +25,6 @@
#include <asm/exception.h>
#include <asm/hardirq.h>
-#include "irqchip.h"
-
#define MAX_ICU_NR 16
#define PJ1_INT_SEL 0x10c
@@ -130,8 +129,9 @@ struct irq_chip icu_irq_chip = {
.irq_unmask = icu_unmask_irq,
};
-static void icu_mux_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void icu_mux_irq_demux(unsigned int __irq, struct irq_desc *desc)
{
+ unsigned int irq = irq_desc_get_irq(desc);
struct irq_domain *domain;
struct icu_chip_data *data;
int i;
diff --git a/drivers/irqchip/irq-moxart.c b/drivers/irqchip/irq-moxart.c
index 00b3cc908f76..a24b06a1718b 100644
--- a/drivers/irqchip/irq-moxart.c
+++ b/drivers/irqchip/irq-moxart.c
@@ -12,6 +12,7 @@
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -19,8 +20,6 @@
#include <asm/exception.h>
-#include "irqchip.h"
-
#define IRQ_SOURCE_REG 0
#define IRQ_MASK_REG 0x04
#define IRQ_CLEAR_REG 0x08
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
index 15c13039bba2..c8753da4c156 100644
--- a/drivers/irqchip/irq-mtk-sysirq.c
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -13,6 +13,7 @@
*/
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_irq.h>
@@ -21,8 +22,6 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include "irqchip.h"
-
struct mtk_sysirq_chip_data {
spinlock_t lock;
void __iomem *intpol_base;
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index 04bf97b289cf..1faf812f3dc8 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -27,8 +28,6 @@
#include <linux/stmp_device.h>
#include <asm/exception.h>
-#include "irqchip.h"
-
#define HW_ICOLL_VECTOR 0x0000
#define HW_ICOLL_LEVELACK 0x0010
#define HW_ICOLL_CTRL 0x0020
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index 5fac9100f6cb..a878b8d03868 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -21,13 +21,12 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <asm/v7m.h>
#include <asm/exception.h>
-#include "irqchip.h"
-
#define NVIC_ISER 0x000
#define NVIC_ICER 0x080
#define NVIC_IPR 0x300
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index a569c6dbd1d1..8587d0f8d8c0 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -17,13 +17,12 @@
#include <linux/io.h>
#include <asm/exception.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include "irqchip.h"
-
/* Define these here for now until we drop all board-files */
#define OMAP24XX_IC_BASE 0x480fe000
#define OMAP34XX_IC_BASE 0x48200000
@@ -331,37 +330,12 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
static asmlinkage void __exception_irq_entry
omap_intc_handle_irq(struct pt_regs *regs)
{
- u32 irqnr = 0;
- int handled_irq = 0;
- int i;
-
- do {
- for (i = 0; i < omap_nr_pending; i++) {
- irqnr = intc_readl(INTC_PENDING_IRQ0 + (0x20 * i));
- if (irqnr)
- goto out;
- }
-
-out:
- if (!irqnr)
- break;
+ u32 irqnr;
- irqnr = intc_readl(INTC_SIR);
- irqnr &= ACTIVEIRQ_MASK;
-
- if (irqnr) {
- handle_domain_irq(domain, irqnr, regs);
- handled_irq = 1;
- }
- } while (irqnr);
-
- /*
- * If an irq is masked or deasserted while active, we will
- * keep ending up here with no irq handled. So remove it from
- * the INTC with an ack.
- */
- if (!handled_irq)
- omap_ack_irq(NULL);
+ irqnr = intc_readl(INTC_SIR);
+ irqnr &= ACTIVEIRQ_MASK;
+ WARN_ONCE(!irqnr, "Spurious IRQ ?\n");
+ handle_domain_irq(domain, irqnr, regs);
}
void __init omap3_init_irq(void)
diff --git a/drivers/irqchip/irq-or1k-pic.c b/drivers/irqchip/irq-or1k-pic.c
index e93d079fe069..6a9a3e79218b 100644
--- a/drivers/irqchip/irq-or1k-pic.c
+++ b/drivers/irqchip/irq-or1k-pic.c
@@ -9,12 +9,11 @@
*/
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
-#include "irqchip.h"
-
/* OR1K PIC implementation */
struct or1k_pic_dev {
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
index ad0c0f6f1d65..5ea999a724b5 100644
--- a/drivers/irqchip/irq-orion.c
+++ b/drivers/irqchip/irq-orion.c
@@ -10,14 +10,13 @@
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/exception.h>
#include <asm/mach/irq.h>
-#include "irqchip.h"
-
/*
* Orion SoC main interrupt controller
*/
@@ -109,7 +108,7 @@ IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
{
- struct irq_domain *d = irq_get_handler_data(irq);
+ struct irq_domain *d = irq_desc_get_handler_data(desc);
struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
@@ -198,8 +197,8 @@ static int __init orion_bridge_irq_init(struct device_node *np,
writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
- irq_set_handler_data(irq, domain);
- irq_set_chained_handler(irq, orion_bridge_irq_handler);
+ irq_set_chained_handler_and_data(irq, orion_bridge_irq_handler,
+ domain);
return 0;
}
diff --git a/drivers/irqchip/irq-renesas-h8300h.c b/drivers/irqchip/irq-renesas-h8300h.c
index 1870e6bd3dd9..6fd30d5ee14d 100644
--- a/drivers/irqchip/irq-renesas-h8300h.c
+++ b/drivers/irqchip/irq-renesas-h8300h.c
@@ -11,8 +11,6 @@
#include <linux/of_irq.h>
#include <asm/io.h>
-#include "irqchip.h"
-
static const char ipr_bit[] = {
7, 6, 5, 5,
4, 4, 4, 4, 3, 3, 3, 3,
diff --git a/drivers/irqchip/irq-renesas-h8s.c b/drivers/irqchip/irq-renesas-h8s.c
index 64425f4de7d9..8098ead1eb22 100644
--- a/drivers/irqchip/irq-renesas-h8s.c
+++ b/drivers/irqchip/irq-renesas-h8s.c
@@ -5,10 +5,10 @@
*/
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/io.h>
-#include "irqchip.h"
static void *intc_baseaddr;
#define IPRA ((unsigned long)intc_baseaddr)
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index 778bd076aeea..2aa3add711a6 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -53,7 +53,6 @@
struct irqc_irq {
int hw_irq;
int requested_irq;
- int domain_irq;
struct irqc_priv *p;
};
@@ -70,8 +69,8 @@ struct irqc_priv {
static void irqc_dbg(struct irqc_irq *i, char *str)
{
- dev_dbg(&i->p->pdev->dev, "%s (%d:%d:%d)\n",
- str, i->requested_irq, i->hw_irq, i->domain_irq);
+ dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n",
+ str, i->requested_irq, i->hw_irq);
}
static void irqc_irq_enable(struct irq_data *d)
@@ -145,7 +144,7 @@ static irqreturn_t irqc_irq_handler(int irq, void *dev_id)
if (ioread32(p->iomem + DETECT_STATUS) & bit) {
iowrite32(bit, p->iomem + DETECT_STATUS);
irqc_dbg(i, "demux2");
- generic_handle_irq(i->domain_irq);
+ generic_handle_irq(irq_find_mapping(p->irq_domain, i->hw_irq));
return IRQ_HANDLED;
}
return IRQ_NONE;
@@ -156,13 +155,9 @@ static int irqc_irq_domain_map(struct irq_domain *h, unsigned int virq,
{
struct irqc_priv *p = h->host_data;
- p->irq[hw].domain_irq = virq;
- p->irq[hw].hw_irq = hw;
-
irqc_dbg(&p->irq[hw], "map");
irq_set_chip_data(virq, h->host_data);
irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
- set_irq_flags(virq, IRQF_VALID); /* kill me now */
return 0;
}
@@ -215,6 +210,7 @@ static int irqc_probe(struct platform_device *pdev)
break;
p->irq[k].p = p;
+ p->irq[k].hw_irq = k;
p->irq[k].requested_irq = irq->start;
}
@@ -243,8 +239,8 @@ static int irqc_probe(struct platform_device *pdev)
irq_chip->irq_set_wake = irqc_irq_set_wake;
irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND;
- p->irq_domain = irq_domain_add_simple(pdev->dev.of_node,
- p->number_of_irqs, 0,
+ p->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
+ p->number_of_irqs,
&irqc_irq_domain_ops, p);
if (!p->irq_domain) {
ret = -ENXIO;
diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c
index e96717f45ea1..506d9f20ca51 100644
--- a/drivers/irqchip/irq-s3c24xx.c
+++ b/drivers/irqchip/irq-s3c24xx.c
@@ -25,6 +25,7 @@
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/of.h>
#include <linux/of_irq.h>
@@ -40,8 +41,6 @@
#include <plat/regs-irqtype.h>
#include <plat/pm.h>
-#include "irqchip.h"
-
#define S3C_IRQTYPE_NONE 0
#define S3C_IRQTYPE_EINT 1
#define S3C_IRQTYPE_EDGE 2
@@ -299,16 +298,14 @@ static struct irq_chip s3c_irq_eint0t4 = {
.irq_set_type = s3c_irqext0_type,
};
-static void s3c_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void s3c_irq_demux(unsigned int __irq, struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc);
struct s3c_irq_intc *intc = irq_data->intc;
struct s3c_irq_intc *sub_intc = irq_data->sub_intc;
- unsigned long src;
- unsigned long msk;
- unsigned int n;
- unsigned int offset;
+ unsigned int n, offset, irq;
+ unsigned long src, msk;
/* we're using individual domains for the non-dt case
* and one big domain for the dt case where the subintc
diff --git a/drivers/irqchip/irq-sa11x0.c b/drivers/irqchip/irq-sa11x0.c
index 46df2875dc1c..61bb28d7b19b 100644
--- a/drivers/irqchip/irq-sa11x0.c
+++ b/drivers/irqchip/irq-sa11x0.c
@@ -70,7 +70,6 @@ static int sa1100_normal_irqdomain_map(struct irq_domain *d,
{
irq_set_chip_and_handler(irq, &sa1100_normal_chip,
handle_level_irq);
- set_irq_flags(irq, IRQF_VALID);
return 0;
}
diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c
index a469355df352..10cb21b9ba3d 100644
--- a/drivers/irqchip/irq-sirfsoc.c
+++ b/drivers/irqchip/irq-sirfsoc.c
@@ -11,40 +11,44 @@
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/syscore_ops.h>
#include <asm/mach/irq.h>
#include <asm/exception.h>
-#include "irqchip.h"
-#define SIRFSOC_INT_RISC_MASK0 0x0018
-#define SIRFSOC_INT_RISC_MASK1 0x001C
-#define SIRFSOC_INT_RISC_LEVEL0 0x0020
-#define SIRFSOC_INT_RISC_LEVEL1 0x0024
+#define SIRFSOC_INT_RISC_MASK0 0x0018
+#define SIRFSOC_INT_RISC_MASK1 0x001C
+#define SIRFSOC_INT_RISC_LEVEL0 0x0020
+#define SIRFSOC_INT_RISC_LEVEL1 0x0024
#define SIRFSOC_INIT_IRQ_ID 0x0038
+#define SIRFSOC_INT_BASE_OFFSET 0x0004
#define SIRFSOC_NUM_IRQS 64
+#define SIRFSOC_NUM_BANKS (SIRFSOC_NUM_IRQS / 32)
static struct irq_domain *sirfsoc_irqdomain;
-static __init void
-sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
+static __init void sirfsoc_alloc_gc(void __iomem *base)
{
- struct irq_chip_generic *gc;
- struct irq_chip_type *ct;
- int ret;
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
unsigned int set = IRQ_LEVEL;
-
- ret = irq_alloc_domain_generic_chips(sirfsoc_irqdomain, num, 1, "irq_sirfsoc",
- handle_level_irq, clr, set, IRQ_GC_INIT_MASK_CACHE);
-
- gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, irq_start);
- gc->reg_base = base;
- ct = gc->chip_types;
- ct->chip.irq_mask = irq_gc_mask_clr_bit;
- ct->chip.irq_unmask = irq_gc_mask_set_bit;
- ct->regs.mask = SIRFSOC_INT_RISC_MASK0;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ int i;
+
+ irq_alloc_domain_generic_chips(sirfsoc_irqdomain, 32, 1, "irq_sirfsoc",
+ handle_level_irq, clr, set,
+ IRQ_GC_INIT_MASK_CACHE);
+
+ for (i = 0; i < SIRFSOC_NUM_BANKS; i++) {
+ gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, i * 32);
+ gc->reg_base = base + i * SIRFSOC_INT_BASE_OFFSET;
+ ct = gc->chip_types;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->regs.mask = SIRFSOC_INT_RISC_MASK0;
+ }
}
static void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
@@ -64,10 +68,8 @@ static int __init sirfsoc_irq_init(struct device_node *np,
panic("unable to map intc cpu registers\n");
sirfsoc_irqdomain = irq_domain_add_linear(np, SIRFSOC_NUM_IRQS,
- &irq_generic_chip_ops, base);
-
- sirfsoc_alloc_gc(base, 0, 32);
- sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32);
+ &irq_generic_chip_ops, base);
+ sirfsoc_alloc_gc(base);
writel_relaxed(0, base + SIRFSOC_INT_RISC_LEVEL0);
writel_relaxed(0, base + SIRFSOC_INT_RISC_LEVEL1);
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index 83d6aa6464ee..4ad3e7c69aa7 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -23,8 +24,6 @@
#include <asm/exception.h>
#include <asm/mach/irq.h>
-#include "irqchip.h"
-
#define SUN4I_IRQ_VECTOR_REG 0x00
#define SUN4I_IRQ_PROTECTION_REG 0x08
#define SUN4I_IRQ_NMI_CTRL_REG 0x0c
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 6b2b582433bd..772a82cacbf7 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -17,8 +17,8 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
-#include "irqchip.h"
#define SUNXI_NMI_SRC_TYPE_MASK 0x00000003
@@ -61,7 +61,7 @@ static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off)
static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc)
{
struct irq_domain *domain = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_get_chip(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int virq = irq_find_mapping(domain, 0);
chained_irq_enter(chip, desc);
@@ -182,8 +182,7 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
sunxi_sc_nmi_write(gc, reg_offs->pend, 0x1);
- irq_set_handler_data(irq, domain);
- irq_set_chained_handler(irq, sunxi_sc_nmi_handle_irq);
+ irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain);
return 0;
diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c
index accc20036a3c..331829661366 100644
--- a/drivers/irqchip/irq-tb10x.c
+++ b/drivers/irqchip/irq-tb10x.c
@@ -22,13 +22,13 @@
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/bitops.h>
-#include "irqchip.h"
#define AB_IRQCTL_INT_ENABLE 0x00
#define AB_IRQCTL_INT_STATUS 0x04
@@ -97,9 +97,10 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
return IRQ_SET_MASK_OK;
}
-static void tb10x_irq_cascade(unsigned int irq, struct irq_desc *desc)
+static void tb10x_irq_cascade(unsigned int __irq, struct irq_desc *desc)
{
struct irq_domain *domain = irq_desc_get_handler_data(desc);
+ unsigned int irq = irq_desc_get_irq(desc);
generic_handle_irq(irq_find_mapping(domain, irq));
}
@@ -173,8 +174,8 @@ static int __init of_tb10x_init_irq(struct device_node *ictl,
for (i = 0; i < nrirqs; i++) {
unsigned int irq = irq_of_parse_and_map(ictl, i);
- irq_set_handler_data(irq, domain);
- irq_set_chained_handler(irq, tb10x_irq_cascade);
+ irq_set_chained_handler_and_data(irq, tb10x_irq_cascade,
+ domain);
}
ab_irqctl_writereg(gc, AB_IRQCTL_INT_ENABLE, 0);
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index f67bbd80433e..2fd89eb88f3a 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of_address.h>
#include <linux/slab.h>
@@ -31,8 +32,6 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include "irqchip.h"
-
#define ICTLR_CPU_IEP_VFIQ 0x08
#define ICTLR_CPU_IEP_FIR 0x14
#define ICTLR_CPU_IEP_FIR_SET 0x18
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 888111b76ea0..16123f688768 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -4,6 +4,7 @@
#include <linux/bitops.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/versatile-fpga.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
@@ -14,8 +15,6 @@
#include <asm/exception.h>
#include <asm/mach/irq.h>
-#include "irqchip.h"
-
#define IRQ_STATUS 0x00
#define IRQ_RAW_STATUS 0x04
#define IRQ_ENABLE_SET 0x08
@@ -66,9 +65,10 @@ static void fpga_irq_unmask(struct irq_data *d)
writel(mask, f->base + IRQ_ENABLE_SET);
}
-static void fpga_irq_handle(unsigned int irq, struct irq_desc *desc)
+static void fpga_irq_handle(unsigned int __irq, struct irq_desc *desc)
{
struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
+ unsigned int irq = irq_desc_get_irq(desc);
u32 status = readl(f->base + IRQ_STATUS);
if (status == 0) {
@@ -156,8 +156,8 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start,
f->valid = valid;
if (parent_irq != -1) {
- irq_set_handler_data(parent_irq, f);
- irq_set_chained_handler(parent_irq, fpga_irq_handle);
+ irq_set_chained_handler_and_data(parent_irq, fpga_irq_handle,
+ f);
}
/* This will also allocate irq descriptors */
diff --git a/drivers/irqchip/irq-vf610-mscm-ir.c b/drivers/irqchip/irq-vf610-mscm-ir.c
index f5c01cbcc73a..2c2255886401 100644
--- a/drivers/irqchip/irq-vf610-mscm-ir.c
+++ b/drivers/irqchip/irq-vf610-mscm-ir.c
@@ -26,6 +26,7 @@
#include <linux/cpu_pm.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -34,8 +35,6 @@
#include <linux/slab.h>
#include <linux/regmap.h>
-#include "irqchip.h"
-
#define MSCM_CPxNUM 0x4
#define MSCM_IRSPRC(n) (0x80 + 2 * (n))
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index d4ce331ea4a0..03846dff4212 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -24,6 +24,7 @@
#include <linux/list.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
@@ -37,8 +38,6 @@
#include <asm/exception.h>
#include <asm/irq.h>
-#include "irqchip.h"
-
#define VIC_IRQ_STATUS 0x00
#define VIC_FIQ_STATUS 0x04
#define VIC_INT_SELECT 0x0c /* 1 = FIQ, 0 = IRQ */
@@ -297,8 +296,8 @@ static void __init vic_register(void __iomem *base, unsigned int parent_irq,
vic_id++;
if (parent_irq) {
- irq_set_handler_data(parent_irq, v);
- irq_set_chained_handler(parent_irq, vic_handle_irq_cascaded);
+ irq_set_chained_handler_and_data(parent_irq,
+ vic_handle_irq_cascaded, v);
}
v->domain = irq_domain_add_simple(node, fls(valid_sources), irq,
diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c
index 0b297009b856..8371d9978d31 100644
--- a/drivers/irqchip/irq-vt8500.c
+++ b/drivers/irqchip/irq-vt8500.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
@@ -39,8 +40,6 @@
#include <asm/exception.h>
#include <asm/mach/irq.h>
-#include "irqchip.h"
-
#define VT8500_ICPC_IRQ 0x20
#define VT8500_ICPC_FIQ 0x24
#define VT8500_ICDC 0x40 /* Destination Control 64*u32 */
@@ -127,15 +126,15 @@ static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type)
return -EINVAL;
case IRQF_TRIGGER_HIGH:
dctr |= VT8500_TRIGGER_HIGH;
- __irq_set_handler_locked(d->irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
break;
case IRQF_TRIGGER_FALLING:
dctr |= VT8500_TRIGGER_FALLING;
- __irq_set_handler_locked(d->irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
break;
case IRQF_TRIGGER_RISING:
dctr |= VT8500_TRIGGER_RISING;
- __irq_set_handler_locked(d->irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
break;
}
writeb(dctr, base + VT8500_ICDC + d->hwirq);
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index e1c2f9632893..bb3ac5fe5846 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -11,12 +11,11 @@
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of.h>
#include <asm/mxregs.h>
-#include "irqchip.h"
-
#define HW_IRQ_IPI_COUNT 2
#define HW_IRQ_MX_BASE 2
#define HW_IRQ_EXTERN_BASE 3
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index 7d71126d1ce5..472ae1770964 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -15,10 +15,9 @@
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of.h>
-#include "irqchip.h"
-
unsigned int cached_irq_mask;
/*
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
index e4ef74ed454a..4c48fa88a03d 100644
--- a/drivers/irqchip/irq-zevio.c
+++ b/drivers/irqchip/irq-zevio.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -18,8 +19,6 @@
#include <asm/mach/irq.h>
#include <asm/exception.h>
-#include "irqchip.h"
-
#define IO_STATUS 0x000
#define IO_RAW_STATUS 0x004
#define IO_ENABLE 0x008
diff --git a/drivers/irqchip/irqchip.h b/drivers/irqchip/irqchip.h
deleted file mode 100644
index 0f67ae32464f..000000000000
--- a/drivers/irqchip/irqchip.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * Copyright (C) 2012 Thomas Petazzoni
- *
- * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/irqchip.h>
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index a45121546caf..4cbd9c5dc1e6 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -2,7 +2,7 @@
* SPEAr platform shared irq layer source file
*
* Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* Copyright (C) 2012 ST Microelectronics
* Shiraz Hashim <shiraz.linux.kernel@gmail.com>
@@ -18,14 +18,13 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/spinlock.h>
-#include "irqchip.h"
-
/*
* struct spear_shirq: shared irq structure
*
@@ -183,9 +182,9 @@ static struct spear_shirq *spear320_shirq_blocks[] = {
&spear320_shirq_intrcomm_ras,
};
-static void shirq_handler(unsigned irq, struct irq_desc *desc)
+static void shirq_handler(unsigned __irq, struct irq_desc *desc)
{
- struct spear_shirq *shirq = irq_get_handler_data(irq);
+ struct spear_shirq *shirq = irq_desc_get_handler_data(desc);
u32 pend;
pend = readl(shirq->base + shirq->status_reg) & shirq->mask;
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 8c91fd5eb6fd..375be509e95f 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty)
cs->hw.ser->tty = tty;
atomic_set(&cs->hw.ser->refcnt, 1);
init_completion(&cs->hw.ser->dead_cmp);
-
tty->disc_data = cs;
+ /* Set the amount of data we're willing to receive per call
+ * from the hardware driver to half of the input buffer size
+ * to leave some reserve.
+ * Note: We don't do flow control towards the hardware driver.
+ * If more data is received than will fit into the input buffer,
+ * it will be dropped and an error will be logged. This should
+ * never happen as the device is slow and the buffer size ample.
+ */
+ tty->receive_room = RBUFSIZE/2;
+
/* OK.. Initialization of the datastructures and the HW is done.. Now
* startup system and notify the LL that we are ready to run
*/
@@ -598,28 +607,6 @@ static int gigaset_tty_hangup(struct tty_struct *tty)
}
/*
- * Read on the tty.
- * Unused, received data goes only to the Gigaset driver.
- */
-static ssize_t
-gigaset_tty_read(struct tty_struct *tty, struct file *file,
- unsigned char __user *buf, size_t count)
-{
- return -EAGAIN;
-}
-
-/*
- * Write on the tty.
- * Unused, transmit data comes only from the Gigaset driver.
- */
-static ssize_t
-gigaset_tty_write(struct tty_struct *tty, struct file *file,
- const unsigned char *buf, size_t count)
-{
- return -EAGAIN;
-}
-
-/*
* Ioctl on the tty.
* Called in process context only.
* May be re-entered by multiple ioctl calling threads.
@@ -752,8 +739,6 @@ static struct tty_ldisc_ops gigaset_ldisc = {
.open = gigaset_tty_open,
.close = gigaset_tty_close,
.hangup = gigaset_tty_hangup,
- .read = gigaset_tty_read,
- .write = gigaset_tty_write,
.ioctl = gigaset_tty_ioctl,
.receive_buf = gigaset_tty_receive,
.write_wakeup = gigaset_tty_wakeup,
diff --git a/drivers/isdn/icn/icn.h b/drivers/isdn/icn/icn.h
index b713466997a0..f8f2e76d34bf 100644
--- a/drivers/isdn/icn/icn.h
+++ b/drivers/isdn/icn/icn.h
@@ -38,7 +38,7 @@ typedef struct icn_cdef {
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/major.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/slab.h>
diff --git a/drivers/isdn/mISDN/dsp_audio.c b/drivers/isdn/mISDN/dsp_audio.c
index 06022952a437..bbef98e7a16e 100644
--- a/drivers/isdn/mISDN/dsp_audio.c
+++ b/drivers/isdn/mISDN/dsp_audio.c
@@ -13,6 +13,7 @@
#include <linux/mISDNif.h>
#include <linux/mISDNdsp.h>
#include <linux/export.h>
+#include <linux/bitrev.h>
#include "core.h"
#include "dsp.h"
@@ -137,27 +138,14 @@ static unsigned char linear2ulaw(short sample)
return ulawbyte;
}
-static int reverse_bits(int i)
-{
- int z, j;
- z = 0;
-
- for (j = 0; j < 8; j++) {
- if ((i & (1 << j)) != 0)
- z |= 1 << (7 - j);
- }
- return z;
-}
-
-
void dsp_audio_generate_law_tables(void)
{
int i;
for (i = 0; i < 256; i++)
- dsp_audio_alaw_to_s32[i] = alaw2linear(reverse_bits(i));
+ dsp_audio_alaw_to_s32[i] = alaw2linear(bitrev8((u8)i));
for (i = 0; i < 256; i++)
- dsp_audio_ulaw_to_s32[i] = ulaw2linear(reverse_bits(i));
+ dsp_audio_ulaw_to_s32[i] = ulaw2linear(bitrev8((u8)i));
for (i = 0; i < 256; i++) {
dsp_audio_alaw_to_ulaw[i] =
@@ -176,13 +164,13 @@ dsp_audio_generate_s2law_table(void)
/* generating ulaw-table */
for (i = -32768; i < 32768; i++) {
dsp_audio_s16_to_law[i & 0xffff] =
- reverse_bits(linear2ulaw(i));
+ bitrev8(linear2ulaw(i));
}
} else {
/* generating alaw-table */
for (i = -32768; i < 32768; i++) {
dsp_audio_s16_to_law[i & 0xffff] =
- reverse_bits(linear2alaw(i));
+ bitrev8(linear2alaw(i));
}
}
}
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
index 52c43821f746..8e3aa002767b 100644
--- a/drivers/isdn/mISDN/dsp_cmx.c
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -506,7 +506,7 @@ dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp)
__func__, conf->id);
if (list_empty(&conf->mlist)) {
- printk(KERN_ERR "%s: conference whithout members\n",
+ printk(KERN_ERR "%s: conference without members\n",
__func__);
return;
}
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 9ad35f72ab4c..70f4255ff291 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -43,7 +43,7 @@ config LEDS_AAT1290
tristate "LED support for the AAT1290"
depends on LEDS_CLASS_FLASH
depends on V4L2_FLASH_LED_CLASS || !V4L2_FLASH_LED_CLASS
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
depends on OF
depends on PINCTRL
help
@@ -419,7 +419,7 @@ config LEDS_INTEL_SS4200
config LEDS_LT3593
tristate "LED driver for LT3593 controllers"
depends on LEDS_CLASS
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
help
This option enables support for LEDs driven by a Linear Technology
LT3593 controller. This controller uses a special one-wire pulse
@@ -455,12 +455,16 @@ config LEDS_MC13783
config LEDS_NS2
tristate "LED support for Network Space v2 GPIO LEDs"
depends on LEDS_CLASS
- depends on MACH_KIRKWOOD
+ depends on MACH_KIRKWOOD || MACH_ARMADA_370
default y
help
- This option enable support for the dual-GPIO LED found on the
- Network Space v2 board (and parents). This include Internet Space v2,
- Network Space (Max) v2 and d2 Network v2 boards.
+ This option enables support for the dual-GPIO LEDs found on the
+ following LaCie/Seagate boards:
+
+ Network Space v2 (and parents: Max, Mini)
+ Internet Space v2
+ d2 Network v2
+ n090401 (Seagate NAS 4-Bay)
config LEDS_NETXBIG
tristate "LED support for Big Network series LEDs"
@@ -543,7 +547,8 @@ config LEDS_MENF21BMC
config LEDS_KTD2692
tristate "LED support for KTD2692 flash LED controller"
- depends on LEDS_CLASS_FLASH && GPIOLIB && OF
+ depends on LEDS_CLASS_FLASH && OF
+ depends on GPIOLIB || COMPILE_TEST
help
This option enables support for KTD2692 LED flash connected
through ExpressWire interface.
@@ -560,6 +565,17 @@ config LEDS_BLINKM
This option enables support for the BlinkM RGB LED connected
through I2C. Say Y to enable support for the BlinkM LED.
+config LEDS_POWERNV
+ tristate "LED support for PowerNV Platform"
+ depends on LEDS_CLASS
+ depends on PPC_POWERNV
+ depends on OF
+ help
+ This option enables support for the system LEDs present on
+ PowerNV platforms. Say 'y' to enable this support in kernel.
+ To compile this driver as a module, choose 'm' here: the module
+ will be called leds-powernv.
+
config LEDS_SYSCON
bool "LED support for LEDs on system controllers"
depends on LEDS_CLASS=y
@@ -578,14 +594,6 @@ config LEDS_VERSATILE
This option enabled support for the LEDs on the ARM Versatile
and RealView boards. Say Y to enabled these.
-config LEDS_PM8941_WLED
- tristate "LED support for the Qualcomm PM8941 WLED block"
- depends on LEDS_CLASS
- select REGMAP
- help
- This option enables support for the 'White' LED block
- on Qualcomm PM8941 PMICs.
-
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 8d6a24a2f513..b503f92dc2c4 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -63,8 +63,8 @@ obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o
obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o
obj-$(CONFIG_LEDS_VERSATILE) += leds-versatile.o
obj-$(CONFIG_LEDS_MENF21BMC) += leds-menf21bmc.o
-obj-$(CONFIG_LEDS_PM8941_WLED) += leds-pm8941-wled.o
obj-$(CONFIG_LEDS_KTD2692) += leds-ktd2692.o
+obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index beabfbc6f7cd..ca51d58bed24 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -228,12 +228,15 @@ static int led_classdev_next_name(const char *init_name, char *name,
{
unsigned int i = 0;
int ret = 0;
+ struct device *dev;
strlcpy(name, init_name, len);
- while (class_find_device(leds_class, NULL, name, match_name) &&
- (ret < len))
+ while ((ret < len) &&
+ (dev = class_find_device(leds_class, NULL, name, match_name))) {
+ put_device(dev);
ret = snprintf(name, len, "%s_%u", init_name, ++i);
+ }
if (ret >= len)
return -ENOMEM;
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c
index 2b4dc738dcd6..257a813c73f3 100644
--- a/drivers/leds/leds-fsg.c
+++ b/drivers/leds/leds-fsg.c
@@ -156,63 +156,35 @@ static int fsg_led_probe(struct platform_device *pdev)
latch_value = 0xffff;
*latch_address = latch_value;
- ret = led_classdev_register(&pdev->dev, &fsg_wlan_led);
+ ret = devm_led_classdev_register(&pdev->dev, &fsg_wlan_led);
if (ret < 0)
- goto failwlan;
+ return ret;
- ret = led_classdev_register(&pdev->dev, &fsg_wan_led);
+ ret = devm_led_classdev_register(&pdev->dev, &fsg_wan_led);
if (ret < 0)
- goto failwan;
+ return ret;
- ret = led_classdev_register(&pdev->dev, &fsg_sata_led);
+ ret = devm_led_classdev_register(&pdev->dev, &fsg_sata_led);
if (ret < 0)
- goto failsata;
+ return ret;
- ret = led_classdev_register(&pdev->dev, &fsg_usb_led);
+ ret = devm_led_classdev_register(&pdev->dev, &fsg_usb_led);
if (ret < 0)
- goto failusb;
+ return ret;
- ret = led_classdev_register(&pdev->dev, &fsg_sync_led);
+ ret = devm_led_classdev_register(&pdev->dev, &fsg_sync_led);
if (ret < 0)
- goto failsync;
+ return ret;
- ret = led_classdev_register(&pdev->dev, &fsg_ring_led);
+ ret = devm_led_classdev_register(&pdev->dev, &fsg_ring_led);
if (ret < 0)
- goto failring;
-
- return ret;
-
- failring:
- led_classdev_unregister(&fsg_sync_led);
- failsync:
- led_classdev_unregister(&fsg_usb_led);
- failusb:
- led_classdev_unregister(&fsg_sata_led);
- failsata:
- led_classdev_unregister(&fsg_wan_led);
- failwan:
- led_classdev_unregister(&fsg_wlan_led);
- failwlan:
+ return ret;
return ret;
}
-static int fsg_led_remove(struct platform_device *pdev)
-{
- led_classdev_unregister(&fsg_wlan_led);
- led_classdev_unregister(&fsg_wan_led);
- led_classdev_unregister(&fsg_sata_led);
- led_classdev_unregister(&fsg_usb_led);
- led_classdev_unregister(&fsg_sync_led);
- led_classdev_unregister(&fsg_ring_led);
-
- return 0;
-}
-
-
static struct platform_driver fsg_led_driver = {
.probe = fsg_led_probe,
- .remove = fsg_led_remove,
.driver = {
.name = "fsg-led",
},
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 91325de3cd33..b38430cb10ad 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -492,7 +492,6 @@ static struct i2c_driver lm3530_i2c_driver = {
.id_table = lm3530_id,
.driver = {
.name = LM3530_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
index f5112cb2d991..48872997d6b4 100644
--- a/drivers/leds/leds-lm355x.c
+++ b/drivers/leds/leds-lm355x.c
@@ -555,7 +555,6 @@ MODULE_DEVICE_TABLE(i2c, lm355x_id);
static struct i2c_driver lm355x_i2c_driver = {
.driver = {
.name = LM355x_NAME,
- .owner = THIS_MODULE,
.pm = NULL,
},
.probe = lm355x_probe,
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
index d3dec0132769..02ebe342f5af 100644
--- a/drivers/leds/leds-lm3642.c
+++ b/drivers/leds/leds-lm3642.c
@@ -446,7 +446,6 @@ MODULE_DEVICE_TABLE(i2c, lm3642_id);
static struct i2c_driver lm3642_i2c_driver = {
.driver = {
.name = LM3642_NAME,
- .owner = THIS_MODULE,
.pm = NULL,
},
.probe = lm3642_probe,
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 8ca197af2864..63a92542c8cb 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -514,20 +514,19 @@ static int lp5521_probe(struct i2c_client *client,
int ret;
struct lp55xx_chip *chip;
struct lp55xx_led *led;
- struct lp55xx_platform_data *pdata;
+ struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev);
struct device_node *np = client->dev.of_node;
- if (!dev_get_platdata(&client->dev)) {
+ if (!pdata) {
if (np) {
- ret = lp55xx_of_populate_pdata(&client->dev, np);
- if (ret < 0)
- return ret;
+ pdata = lp55xx_of_populate_pdata(&client->dev, np);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
} else {
dev_err(&client->dev, "no platform data\n");
return -EINVAL;
}
}
- pdata = dev_get_platdata(&client->dev);
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 584dbbcec659..1d0187f42941 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -880,20 +880,19 @@ static int lp5523_probe(struct i2c_client *client,
int ret;
struct lp55xx_chip *chip;
struct lp55xx_led *led;
- struct lp55xx_platform_data *pdata;
+ struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev);
struct device_node *np = client->dev.of_node;
- if (!dev_get_platdata(&client->dev)) {
+ if (!pdata) {
if (np) {
- ret = lp55xx_of_populate_pdata(&client->dev, np);
- if (ret < 0)
- return ret;
+ pdata = lp55xx_of_populate_pdata(&client->dev, np);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
} else {
dev_err(&client->dev, "no platform data\n");
return -EINVAL;
}
}
- pdata = dev_get_platdata(&client->dev);
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
index ca85724ab138..0360c59dbdc9 100644
--- a/drivers/leds/leds-lp5562.c
+++ b/drivers/leds/leds-lp5562.c
@@ -515,20 +515,19 @@ static int lp5562_probe(struct i2c_client *client,
int ret;
struct lp55xx_chip *chip;
struct lp55xx_led *led;
- struct lp55xx_platform_data *pdata;
+ struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev);
struct device_node *np = client->dev.of_node;
- if (!dev_get_platdata(&client->dev)) {
+ if (!pdata) {
if (np) {
- ret = lp55xx_of_populate_pdata(&client->dev, np);
- if (ret < 0)
- return ret;
+ pdata = lp55xx_of_populate_pdata(&client->dev, np);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
} else {
dev_err(&client->dev, "no platform data\n");
return -EINVAL;
}
}
- pdata = dev_get_platdata(&client->dev);
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index 96d51e9879c9..59b76833f0d3 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -543,7 +543,8 @@ void lp55xx_unregister_sysfs(struct lp55xx_chip *chip)
}
EXPORT_SYMBOL_GPL(lp55xx_unregister_sysfs);
-int lp55xx_of_populate_pdata(struct device *dev, struct device_node *np)
+struct lp55xx_platform_data *lp55xx_of_populate_pdata(struct device *dev,
+ struct device_node *np)
{
struct device_node *child;
struct lp55xx_platform_data *pdata;
@@ -553,17 +554,17 @@ int lp55xx_of_populate_pdata(struct device *dev, struct device_node *np)
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
num_channels = of_get_child_count(np);
if (num_channels == 0) {
dev_err(dev, "no LED channels\n");
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
cfg = devm_kzalloc(dev, sizeof(*cfg) * num_channels, GFP_KERNEL);
if (!cfg)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
pdata->led_config = &cfg[0];
pdata->num_channels = num_channels;
@@ -588,9 +589,7 @@ int lp55xx_of_populate_pdata(struct device *dev, struct device_node *np)
/* LP8501 specific */
of_property_read_u8(np, "pwr-sel", (u8 *)&pdata->pwr_sel);
- dev->platform_data = pdata;
-
- return 0;
+ return pdata;
}
EXPORT_SYMBOL_GPL(lp55xx_of_populate_pdata);
diff --git a/drivers/leds/leds-lp55xx-common.h b/drivers/leds/leds-lp55xx-common.h
index cceab483edd0..c7f1e6155001 100644
--- a/drivers/leds/leds-lp55xx-common.h
+++ b/drivers/leds/leds-lp55xx-common.h
@@ -202,7 +202,7 @@ extern int lp55xx_register_sysfs(struct lp55xx_chip *chip);
extern void lp55xx_unregister_sysfs(struct lp55xx_chip *chip);
/* common device tree population function */
-extern int lp55xx_of_populate_pdata(struct device *dev,
- struct device_node *np);
+extern struct lp55xx_platform_data
+*lp55xx_of_populate_pdata(struct device *dev, struct device_node *np);
#endif /* _LEDS_LP55XX_COMMON_H */
diff --git a/drivers/leds/leds-lp8501.c b/drivers/leds/leds-lp8501.c
index d3098e395fff..3f54f6f2b821 100644
--- a/drivers/leds/leds-lp8501.c
+++ b/drivers/leds/leds-lp8501.c
@@ -308,20 +308,19 @@ static int lp8501_probe(struct i2c_client *client,
int ret;
struct lp55xx_chip *chip;
struct lp55xx_led *led;
- struct lp55xx_platform_data *pdata;
+ struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev);
struct device_node *np = client->dev.of_node;
- if (!dev_get_platdata(&client->dev)) {
+ if (!pdata) {
if (np) {
- ret = lp55xx_of_populate_pdata(&client->dev, np);
- if (ret < 0)
- return ret;
+ pdata = lp55xx_of_populate_pdata(&client->dev, np);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
} else {
dev_err(&client->dev, "no platform data\n");
return -EINVAL;
}
}
- pdata = dev_get_platdata(&client->dev);
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index 8c2b7fbe2392..79f084354e67 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -302,7 +302,7 @@ out:
return ret;
}
-static struct reg_default lp8860_reg_defs[] = {
+static const struct reg_default lp8860_reg_defs[] = {
{ LP8860_DISP_CL1_BRT_MSB, 0x00},
{ LP8860_DISP_CL1_BRT_LSB, 0x00},
{ LP8860_DISP_CL1_CURR_MSB, 0x00},
@@ -332,7 +332,7 @@ static const struct regmap_config lp8860_regmap_config = {
.cache_type = REGCACHE_NONE,
};
-static struct reg_default lp8860_eeprom_defs[] = {
+static const struct reg_default lp8860_eeprom_defs[] = {
{ LP8860_EEPROM_REG_0, 0x00 },
{ LP8860_EEPROM_REG_1, 0x00 },
{ LP8860_EEPROM_REG_2, 0x00 },
diff --git a/drivers/leds/leds-max77693.c b/drivers/leds/leds-max77693.c
index b8b0eec7b540..df348a06d8c7 100644
--- a/drivers/leds/leds-max77693.c
+++ b/drivers/leds/leds-max77693.c
@@ -13,6 +13,7 @@
#include <linux/led-class-flash.h>
#include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77693-private.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 1fd6adbb43b7..b33514d9f427 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -31,50 +31,38 @@
#include <linux/platform_data/leds-kirkwood-ns2.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include "leds.h"
/*
- * The Network Space v2 dual-GPIO LED is wired to a CPLD and can blink in
- * relation with the SATA activity. This capability is exposed through the
- * "sata" sysfs attribute.
- *
- * The following array detail the different LED registers and the combination
- * of their possible values:
- *
- * cmd_led | slow_led | /SATA active | LED state
- * | | |
- * 1 | 0 | x | off
- * - | 1 | x | on
- * 0 | 0 | 1 | on
- * 0 | 0 | 0 | blink (rate 300ms)
+ * The Network Space v2 dual-GPIO LED is wired to a CPLD. Three different LED
+ * modes are available: off, on and SATA activity blinking. The LED modes are
+ * controlled through two GPIOs (command and slow): each combination of values
+ * for the command/slow GPIOs corresponds to a LED mode.
*/
-enum ns2_led_modes {
- NS_V2_LED_OFF,
- NS_V2_LED_ON,
- NS_V2_LED_SATA,
-};
-
-struct ns2_led_mode_value {
- enum ns2_led_modes mode;
- int cmd_level;
- int slow_level;
-};
-
-static struct ns2_led_mode_value ns2_led_modval[] = {
- { NS_V2_LED_OFF , 1, 0 },
- { NS_V2_LED_ON , 0, 1 },
- { NS_V2_LED_ON , 1, 1 },
- { NS_V2_LED_SATA, 0, 0 },
-};
-
struct ns2_led_data {
struct led_classdev cdev;
unsigned cmd;
unsigned slow;
+ bool can_sleep;
+ int mode_index;
unsigned char sata; /* True when SATA mode active. */
rwlock_t rw_lock; /* Lock GPIOs. */
+ struct work_struct work;
+ int num_modes;
+ struct ns2_led_modval *modval;
};
+static void ns2_led_work(struct work_struct *work)
+{
+ struct ns2_led_data *led_dat =
+ container_of(work, struct ns2_led_data, work);
+ int i = led_dat->mode_index;
+
+ gpio_set_value_cansleep(led_dat->cmd, led_dat->modval[i].cmd_level);
+ gpio_set_value_cansleep(led_dat->slow, led_dat->modval[i].slow_level);
+}
+
static int ns2_led_get_mode(struct ns2_led_data *led_dat,
enum ns2_led_modes *mode)
{
@@ -83,22 +71,18 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
int cmd_level;
int slow_level;
- read_lock_irq(&led_dat->rw_lock);
+ cmd_level = gpio_get_value_cansleep(led_dat->cmd);
+ slow_level = gpio_get_value_cansleep(led_dat->slow);
- cmd_level = gpio_get_value(led_dat->cmd);
- slow_level = gpio_get_value(led_dat->slow);
-
- for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) {
- if (cmd_level == ns2_led_modval[i].cmd_level &&
- slow_level == ns2_led_modval[i].slow_level) {
- *mode = ns2_led_modval[i].mode;
+ for (i = 0; i < led_dat->num_modes; i++) {
+ if (cmd_level == led_dat->modval[i].cmd_level &&
+ slow_level == led_dat->modval[i].slow_level) {
+ *mode = led_dat->modval[i].mode;
ret = 0;
break;
}
}
- read_unlock_irq(&led_dat->rw_lock);
-
return ret;
}
@@ -106,19 +90,32 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
enum ns2_led_modes mode)
{
int i;
+ bool found = false;
unsigned long flags;
+ for (i = 0; i < led_dat->num_modes; i++)
+ if (mode == led_dat->modval[i].mode) {
+ found = true;
+ break;
+ }
+
+ if (!found)
+ return;
+
write_lock_irqsave(&led_dat->rw_lock, flags);
- for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) {
- if (mode == ns2_led_modval[i].mode) {
- gpio_set_value(led_dat->cmd,
- ns2_led_modval[i].cmd_level);
- gpio_set_value(led_dat->slow,
- ns2_led_modval[i].slow_level);
- }
+ if (!led_dat->can_sleep) {
+ gpio_set_value(led_dat->cmd,
+ led_dat->modval[i].cmd_level);
+ gpio_set_value(led_dat->slow,
+ led_dat->modval[i].slow_level);
+ goto exit_unlock;
}
+ led_dat->mode_index = i;
+ schedule_work(&led_dat->work);
+
+exit_unlock:
write_unlock_irqrestore(&led_dat->rw_lock, flags);
}
@@ -148,7 +145,6 @@ static ssize_t ns2_led_sata_store(struct device *dev,
container_of(led_cdev, struct ns2_led_data, cdev);
int ret;
unsigned long enable;
- enum ns2_led_modes mode;
ret = kstrtoul(buff, 10, &enable);
if (ret < 0)
@@ -157,19 +153,19 @@ static ssize_t ns2_led_sata_store(struct device *dev,
enable = !!enable;
if (led_dat->sata == enable)
- return count;
+ goto exit;
- ret = ns2_led_get_mode(led_dat, &mode);
- if (ret < 0)
- return ret;
+ led_dat->sata = enable;
+
+ if (!led_get_brightness(led_cdev))
+ goto exit;
- if (enable && mode == NS_V2_LED_ON)
+ if (enable)
ns2_led_set_mode(led_dat, NS_V2_LED_SATA);
- if (!enable && mode == NS_V2_LED_SATA)
+ else
ns2_led_set_mode(led_dat, NS_V2_LED_ON);
- led_dat->sata = enable;
-
+exit:
return count;
}
@@ -199,7 +195,7 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
enum ns2_led_modes mode;
ret = devm_gpio_request_one(&pdev->dev, template->cmd,
- gpio_get_value(template->cmd) ?
+ gpio_get_value_cansleep(template->cmd) ?
GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
template->name);
if (ret) {
@@ -209,7 +205,7 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
}
ret = devm_gpio_request_one(&pdev->dev, template->slow,
- gpio_get_value(template->slow) ?
+ gpio_get_value_cansleep(template->slow) ?
GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
template->name);
if (ret) {
@@ -228,6 +224,10 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
led_dat->cdev.groups = ns2_led_groups;
led_dat->cmd = template->cmd;
led_dat->slow = template->slow;
+ led_dat->can_sleep = gpio_cansleep(led_dat->cmd) |
+ gpio_cansleep(led_dat->slow);
+ led_dat->modval = template->modval;
+ led_dat->num_modes = template->num_modes;
ret = ns2_led_get_mode(led_dat, &mode);
if (ret < 0)
@@ -238,6 +238,8 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
led_dat->cdev.brightness =
(mode == NS_V2_LED_OFF) ? LED_OFF : LED_FULL;
+ INIT_WORK(&led_dat->work, ns2_led_work);
+
ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
if (ret < 0)
return ret;
@@ -248,6 +250,7 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
static void delete_ns2_led(struct ns2_led_data *led_dat)
{
led_classdev_unregister(&led_dat->cdev);
+ cancel_work_sync(&led_dat->work);
}
#ifdef CONFIG_OF_GPIO
@@ -259,9 +262,8 @@ ns2_leds_get_of_pdata(struct device *dev, struct ns2_led_platform_data *pdata)
{
struct device_node *np = dev->of_node;
struct device_node *child;
- struct ns2_led *leds;
+ struct ns2_led *led, *leds;
int num_leds = 0;
- int i = 0;
num_leds = of_get_child_count(np);
if (!num_leds)
@@ -272,26 +274,57 @@ ns2_leds_get_of_pdata(struct device *dev, struct ns2_led_platform_data *pdata)
if (!leds)
return -ENOMEM;
+ led = leds;
for_each_child_of_node(np, child) {
const char *string;
- int ret;
+ int ret, i, num_modes;
+ struct ns2_led_modval *modval;
ret = of_get_named_gpio(child, "cmd-gpio", 0);
if (ret < 0)
return ret;
- leds[i].cmd = ret;
+ led->cmd = ret;
ret = of_get_named_gpio(child, "slow-gpio", 0);
if (ret < 0)
return ret;
- leds[i].slow = ret;
+ led->slow = ret;
ret = of_property_read_string(child, "label", &string);
- leds[i].name = (ret == 0) ? string : child->name;
+ led->name = (ret == 0) ? string : child->name;
ret = of_property_read_string(child, "linux,default-trigger",
&string);
if (ret == 0)
- leds[i].default_trigger = string;
+ led->default_trigger = string;
+
+ ret = of_property_count_u32_elems(child, "modes-map");
+ if (ret < 0 || ret % 3) {
+ dev_err(dev,
+ "Missing or malformed modes-map property\n");
+ return -EINVAL;
+ }
+
+ num_modes = ret / 3;
+ modval = devm_kzalloc(dev,
+ num_modes * sizeof(struct ns2_led_modval),
+ GFP_KERNEL);
+ if (!modval)
+ return -ENOMEM;
+
+ for (i = 0; i < num_modes; i++) {
+ of_property_read_u32_index(child,
+ "modes-map", 3 * i,
+ (u32 *) &modval[i].mode);
+ of_property_read_u32_index(child,
+ "modes-map", 3 * i + 1,
+ (u32 *) &modval[i].cmd_level);
+ of_property_read_u32_index(child,
+ "modes-map", 3 * i + 2,
+ (u32 *) &modval[i].slow_level);
+ }
+
+ led->num_modes = num_modes;
+ led->modval = modval;
- i++;
+ led++;
}
pdata->leds = leds;
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index c3a08b60535b..b775e1efecd3 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -379,7 +379,6 @@ static int pca955x_remove(struct i2c_client *client)
static struct i2c_driver pca955x_driver = {
.driver = {
.name = "leds-pca955x",
- .owner = THIS_MODULE,
},
.probe = pca955x_probe,
.remove = pca955x_remove,
diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
index bee3e1ab27fd..41f269fe0920 100644
--- a/drivers/leds/leds-pca963x.c
+++ b/drivers/leds/leds-pca963x.c
@@ -332,6 +332,7 @@ static const struct of_device_id of_pca963x_match[] = {
{ .compatible = "nxp,pca9635", },
{},
};
+MODULE_DEVICE_TABLE(of, of_pca963x_match);
#else
static struct pca963x_platform_data *
pca963x_dt_init(struct i2c_client *client, struct pca963x_chipdef *chip)
@@ -458,7 +459,6 @@ static int pca963x_remove(struct i2c_client *client)
static struct i2c_driver pca963x_driver = {
.driver = {
.name = "leds-pca963x",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(of_pca963x_match),
},
.probe = pca963x_probe,
diff --git a/drivers/leds/leds-powernv.c b/drivers/leds/leds-powernv.c
new file mode 100644
index 000000000000..2c5c5b12ab64
--- /dev/null
+++ b/drivers/leds/leds-powernv.c
@@ -0,0 +1,345 @@
+/*
+ * PowerNV LED Driver
+ *
+ * Copyright IBM Corp. 2015
+ *
+ * Author: Vasant Hegde <hegdevasant@linux.vnet.ibm.com>
+ * Author: Anshuman Khandual <khandual@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/opal.h>
+
+/* Map LED type to description. */
+struct led_type_map {
+ const int type;
+ const char *desc;
+};
+static const struct led_type_map led_type_map[] = {
+ {OPAL_SLOT_LED_TYPE_ID, "identify"},
+ {OPAL_SLOT_LED_TYPE_FAULT, "fault"},
+ {OPAL_SLOT_LED_TYPE_ATTN, "attention"},
+ {-1, NULL},
+};
+
+struct powernv_led_common {
+ /*
+ * By default unload path resets all the LEDs. But on PowerNV
+ * platform we want to retain LED state across reboot as these
+ * are controlled by firmware. Also service processor can modify
+ * the LEDs independent of OS. Hence avoid resetting LEDs in
+ * unload path.
+ */
+ bool led_disabled;
+
+ /* Max supported LED type */
+ __be64 max_led_type;
+
+ /* glabal lock */
+ struct mutex lock;
+};
+
+/* PowerNV LED data */
+struct powernv_led_data {
+ struct led_classdev cdev;
+ char *loc_code; /* LED location code */
+ int led_type; /* OPAL_SLOT_LED_TYPE_* */
+
+ struct powernv_led_common *common;
+};
+
+
+/* Returns OPAL_SLOT_LED_TYPE_* for given led type string */
+static int powernv_get_led_type(const char *led_type_desc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(led_type_map); i++)
+ if (!strcmp(led_type_map[i].desc, led_type_desc))
+ return led_type_map[i].type;
+
+ return -1;
+}
+
+/*
+ * This commits the state change of the requested LED through an OPAL call.
+ * This function is called from work queue task context when ever it gets
+ * scheduled. This function can sleep at opal_async_wait_response call.
+ */
+static void powernv_led_set(struct powernv_led_data *powernv_led,
+ enum led_brightness value)
+{
+ int rc, token;
+ u64 led_mask, led_value = 0;
+ __be64 max_type;
+ struct opal_msg msg;
+ struct device *dev = powernv_led->cdev.dev;
+ struct powernv_led_common *powernv_led_common = powernv_led->common;
+
+ /* Prepare for the OPAL call */
+ max_type = powernv_led_common->max_led_type;
+ led_mask = OPAL_SLOT_LED_STATE_ON << powernv_led->led_type;
+ if (value)
+ led_value = led_mask;
+
+ /* OPAL async call */
+ token = opal_async_get_token_interruptible();
+ if (token < 0) {
+ if (token != -ERESTARTSYS)
+ dev_err(dev, "%s: Couldn't get OPAL async token\n",
+ __func__);
+ return;
+ }
+
+ rc = opal_leds_set_ind(token, powernv_led->loc_code,
+ led_mask, led_value, &max_type);
+ if (rc != OPAL_ASYNC_COMPLETION) {
+ dev_err(dev, "%s: OPAL set LED call failed for %s [rc=%d]\n",
+ __func__, powernv_led->loc_code, rc);
+ goto out_token;
+ }
+
+ rc = opal_async_wait_response(token, &msg);
+ if (rc) {
+ dev_err(dev,
+ "%s: Failed to wait for the async response [rc=%d]\n",
+ __func__, rc);
+ goto out_token;
+ }
+
+ rc = be64_to_cpu(msg.params[1]);
+ if (rc != OPAL_SUCCESS)
+ dev_err(dev, "%s : OAPL async call returned failed [rc=%d]\n",
+ __func__, rc);
+
+out_token:
+ opal_async_release_token(token);
+}
+
+/*
+ * This function fetches the LED state for a given LED type for
+ * mentioned LED classdev structure.
+ */
+static enum led_brightness powernv_led_get(struct powernv_led_data *powernv_led)
+{
+ int rc;
+ __be64 mask, value, max_type;
+ u64 led_mask, led_value;
+ struct device *dev = powernv_led->cdev.dev;
+ struct powernv_led_common *powernv_led_common = powernv_led->common;
+
+ /* Fetch all LED status */
+ mask = cpu_to_be64(0);
+ value = cpu_to_be64(0);
+ max_type = powernv_led_common->max_led_type;
+
+ rc = opal_leds_get_ind(powernv_led->loc_code,
+ &mask, &value, &max_type);
+ if (rc != OPAL_SUCCESS && rc != OPAL_PARTIAL) {
+ dev_err(dev, "%s: OPAL get led call failed [rc=%d]\n",
+ __func__, rc);
+ return LED_OFF;
+ }
+
+ led_mask = be64_to_cpu(mask);
+ led_value = be64_to_cpu(value);
+
+ /* LED status available */
+ if (!((led_mask >> powernv_led->led_type) & OPAL_SLOT_LED_STATE_ON)) {
+ dev_err(dev, "%s: LED status not available for %s\n",
+ __func__, powernv_led->cdev.name);
+ return LED_OFF;
+ }
+
+ /* LED status value */
+ if ((led_value >> powernv_led->led_type) & OPAL_SLOT_LED_STATE_ON)
+ return LED_FULL;
+
+ return LED_OFF;
+}
+
+/*
+ * LED classdev 'brightness_get' function. This schedules work
+ * to update LED state.
+ */
+static void powernv_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct powernv_led_data *powernv_led =
+ container_of(led_cdev, struct powernv_led_data, cdev);
+ struct powernv_led_common *powernv_led_common = powernv_led->common;
+
+ /* Do not modify LED in unload path */
+ if (powernv_led_common->led_disabled)
+ return;
+
+ mutex_lock(&powernv_led_common->lock);
+ powernv_led_set(powernv_led, value);
+ mutex_unlock(&powernv_led_common->lock);
+}
+
+/* LED classdev 'brightness_get' function */
+static enum led_brightness powernv_brightness_get(struct led_classdev *led_cdev)
+{
+ struct powernv_led_data *powernv_led =
+ container_of(led_cdev, struct powernv_led_data, cdev);
+
+ return powernv_led_get(powernv_led);
+}
+
+/*
+ * This function registers classdev structure for any given type of LED on
+ * a given child LED device node.
+ */
+static int powernv_led_create(struct device *dev,
+ struct powernv_led_data *powernv_led,
+ const char *led_type_desc)
+{
+ int rc;
+
+ /* Make sure LED type is supported */
+ powernv_led->led_type = powernv_get_led_type(led_type_desc);
+ if (powernv_led->led_type == -1) {
+ dev_warn(dev, "%s: No support for led type : %s\n",
+ __func__, led_type_desc);
+ return -EINVAL;
+ }
+
+ /* Create the name for classdev */
+ powernv_led->cdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s",
+ powernv_led->loc_code,
+ led_type_desc);
+ if (!powernv_led->cdev.name) {
+ dev_err(dev,
+ "%s: Memory allocation failed for classdev name\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ powernv_led->cdev.brightness_set = powernv_brightness_set;
+ powernv_led->cdev.brightness_get = powernv_brightness_get;
+ powernv_led->cdev.brightness = LED_OFF;
+ powernv_led->cdev.max_brightness = LED_FULL;
+
+ /* Register the classdev */
+ rc = devm_led_classdev_register(dev, &powernv_led->cdev);
+ if (rc) {
+ dev_err(dev, "%s: Classdev registration failed for %s\n",
+ __func__, powernv_led->cdev.name);
+ }
+
+ return rc;
+}
+
+/* Go through LED device tree node and register LED classdev structure */
+static int powernv_led_classdev(struct platform_device *pdev,
+ struct device_node *led_node,
+ struct powernv_led_common *powernv_led_common)
+{
+ const char *cur = NULL;
+ int rc = -1;
+ struct property *p;
+ struct device_node *np;
+ struct powernv_led_data *powernv_led;
+ struct device *dev = &pdev->dev;
+
+ for_each_child_of_node(led_node, np) {
+ p = of_find_property(np, "led-types", NULL);
+ if (!p)
+ continue;
+
+ while ((cur = of_prop_next_string(p, cur)) != NULL) {
+ powernv_led = devm_kzalloc(dev, sizeof(*powernv_led),
+ GFP_KERNEL);
+ if (!powernv_led)
+ return -ENOMEM;
+
+ powernv_led->common = powernv_led_common;
+ powernv_led->loc_code = (char *)np->name;
+
+ rc = powernv_led_create(dev, powernv_led, cur);
+ if (rc)
+ return rc;
+ } /* while end */
+ }
+
+ return rc;
+}
+
+/* Platform driver probe */
+static int powernv_led_probe(struct platform_device *pdev)
+{
+ struct device_node *led_node;
+ struct powernv_led_common *powernv_led_common;
+ struct device *dev = &pdev->dev;
+
+ led_node = of_find_node_by_path("/ibm,opal/leds");
+ if (!led_node) {
+ dev_err(dev, "%s: LED parent device node not found\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ powernv_led_common = devm_kzalloc(dev, sizeof(*powernv_led_common),
+ GFP_KERNEL);
+ if (!powernv_led_common)
+ return -ENOMEM;
+
+ mutex_init(&powernv_led_common->lock);
+ powernv_led_common->max_led_type = cpu_to_be64(OPAL_SLOT_LED_TYPE_MAX);
+
+ platform_set_drvdata(pdev, powernv_led_common);
+
+ return powernv_led_classdev(pdev, led_node, powernv_led_common);
+}
+
+/* Platform driver remove */
+static int powernv_led_remove(struct platform_device *pdev)
+{
+ struct powernv_led_common *powernv_led_common;
+
+ /* Disable LED operation */
+ powernv_led_common = platform_get_drvdata(pdev);
+ powernv_led_common->led_disabled = true;
+
+ /* Destroy lock */
+ mutex_destroy(&powernv_led_common->lock);
+
+ dev_info(&pdev->dev, "PowerNV led module unregistered\n");
+ return 0;
+}
+
+/* Platform driver property match */
+static const struct of_device_id powernv_led_match[] = {
+ {
+ .compatible = "ibm,opal-v3-led",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, powernv_led_match);
+
+static struct platform_driver powernv_led_driver = {
+ .probe = powernv_led_probe,
+ .remove = powernv_led_remove,
+ .driver = {
+ .name = "powernv-led-driver",
+ .of_match_table = powernv_led_match,
+ },
+};
+
+module_platform_driver(powernv_led_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PowerNV LED driver");
+MODULE_AUTHOR("Vasant Hegde <hegdevasant@linux.vnet.ibm.com>");
diff --git a/drivers/leds/leds-syscon.c b/drivers/leds/leds-syscon.c
index d1660b039812..b88900d721e4 100644
--- a/drivers/leds/leds-syscon.c
+++ b/drivers/leds/leds-syscon.c
@@ -83,9 +83,9 @@ static int syscon_led_probe(struct platform_device *pdev)
return -ENODEV;
}
map = syscon_node_to_regmap(parent->of_node);
- if (!map) {
+ if (IS_ERR(map)) {
dev_err(dev, "no regmap for syscon LED parent\n");
- return -ENODEV;
+ return PTR_ERR(map);
}
sled = devm_kzalloc(dev, sizeof(*sled), GFP_KERNEL);
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 20fa8e77f186..edbecc4ca2da 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -735,6 +735,7 @@ static const struct of_device_id of_tca6507_leds_match[] = {
{ .compatible = "ti,tca6507", },
{},
};
+MODULE_DEVICE_TABLE(of, of_tca6507_leds_match);
#else
static struct tca6507_platform_data *
@@ -830,7 +831,6 @@ static int tca6507_remove(struct i2c_client *client)
static struct i2c_driver tca6507_driver = {
.driver = {
.name = "leds-tca6507",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(of_tca6507_leds_match),
},
.probe = tca6507_probe,
diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c
index de16c29d7895..b806eca83d27 100644
--- a/drivers/leds/leds-tlc591xx.c
+++ b/drivers/leds/leds-tlc591xx.c
@@ -231,10 +231,6 @@ tlc591xx_probe(struct i2c_client *client,
if (!count || count > tlc591xx->max_leds)
return -EINVAL;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA))
- return -EIO;
-
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index 49794b47b51c..5bda6a9b56bb 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -72,7 +72,7 @@ config LEDS_TRIGGER_CPU
config LEDS_TRIGGER_GPIO
tristate "LED GPIO Trigger"
depends on LEDS_TRIGGERS
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
help
This allows LEDs to be controlled by gpio events. It's good
when using gpios as switches and triggering the needed LEDs
diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c
index 1a57e88a38f7..cd35079c8c98 100644
--- a/drivers/macintosh/ans-lcd.c
+++ b/drivers/macintosh/ans-lcd.c
@@ -7,7 +7,7 @@
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/fcntl.h>
-#include <linux/init.h>
+#include <linux/module.h>
#include <linux/delay.h>
#include <linux/fs.h>
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 109dcaa15934..68dcbcb4fc5b 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -408,6 +408,7 @@ static const struct i2c_device_id therm_windtunnel_id[] = {
{ "therm_adm1030", adm1030 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, therm_windtunnel_id);
static int
do_probe(struct i2c_client *cl, const struct i2c_device_id *id)
@@ -459,6 +460,7 @@ static const struct of_device_id therm_of_match[] = {{
.compatible = "adm1030"
}, {}
};
+MODULE_DEVICE_TABLE(of, therm_of_match);
static struct platform_driver therm_of_driver = {
.driver = {
diff --git a/drivers/macintosh/windfarm.h b/drivers/macintosh/windfarm.h
index 028cdac2d33d..901c42f71b5a 100644
--- a/drivers/macintosh/windfarm.h
+++ b/drivers/macintosh/windfarm.h
@@ -53,11 +53,9 @@ struct wf_control {
* the kref and wf_unregister_control will decrement it, thus the
* object creating/disposing a given control shouldn't assume it
* still exists after wf_unregister_control has been called.
- * wf_find_control will inc the refcount for you
*/
extern int wf_register_control(struct wf_control *ct);
extern void wf_unregister_control(struct wf_control *ct);
-extern struct wf_control * wf_find_control(const char *name);
extern int wf_get_control(struct wf_control *ct);
extern void wf_put_control(struct wf_control *ct);
@@ -117,7 +115,6 @@ struct wf_sensor {
/* Same lifetime rules as controls */
extern int wf_register_sensor(struct wf_sensor *sr);
extern void wf_unregister_sensor(struct wf_sensor *sr);
-extern struct wf_sensor * wf_find_sensor(const char *name);
extern int wf_get_sensor(struct wf_sensor *sr);
extern void wf_put_sensor(struct wf_sensor *sr);
@@ -144,7 +141,6 @@ extern int wf_unregister_client(struct notifier_block *nb);
/* Overtemp conditions. Those are refcounted */
extern void wf_set_overtemp(void);
extern void wf_clear_overtemp(void);
-extern int wf_is_overtemp(void);
#define WF_EVENT_NEW_CONTROL 0 /* param is wf_control * */
#define WF_EVENT_NEW_SENSOR 1 /* param is wf_sensor * */
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index 3ee198b65843..465d770ab0bb 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -72,7 +72,7 @@ static inline void wf_notify(int event, void *param)
blocking_notifier_call_chain(&wf_client_list, event, param);
}
-int wf_critical_overtemp(void)
+static int wf_critical_overtemp(void)
{
static char * critical_overtemp_path = "/sbin/critical_overtemp";
char *argv[] = { critical_overtemp_path, NULL };
@@ -84,7 +84,6 @@ int wf_critical_overtemp(void)
return call_usermodehelper(critical_overtemp_path,
argv, envp, UMH_WAIT_EXEC);
}
-EXPORT_SYMBOL_GPL(wf_critical_overtemp);
static int wf_thread_func(void *data)
{
@@ -255,24 +254,6 @@ void wf_unregister_control(struct wf_control *ct)
}
EXPORT_SYMBOL_GPL(wf_unregister_control);
-struct wf_control * wf_find_control(const char *name)
-{
- struct wf_control *ct;
-
- mutex_lock(&wf_lock);
- list_for_each_entry(ct, &wf_controls, link) {
- if (!strcmp(ct->name, name)) {
- if (wf_get_control(ct))
- ct = NULL;
- mutex_unlock(&wf_lock);
- return ct;
- }
- }
- mutex_unlock(&wf_lock);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(wf_find_control);
-
int wf_get_control(struct wf_control *ct)
{
if (!try_module_get(ct->ops->owner))
@@ -368,24 +349,6 @@ void wf_unregister_sensor(struct wf_sensor *sr)
}
EXPORT_SYMBOL_GPL(wf_unregister_sensor);
-struct wf_sensor * wf_find_sensor(const char *name)
-{
- struct wf_sensor *sr;
-
- mutex_lock(&wf_lock);
- list_for_each_entry(sr, &wf_sensors, link) {
- if (!strcmp(sr->name, name)) {
- if (wf_get_sensor(sr))
- sr = NULL;
- mutex_unlock(&wf_lock);
- return sr;
- }
- }
- mutex_unlock(&wf_lock);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(wf_find_sensor);
-
int wf_get_sensor(struct wf_sensor *sr)
{
if (!try_module_get(sr->ops->owner))
@@ -435,7 +398,7 @@ int wf_unregister_client(struct notifier_block *nb)
{
mutex_lock(&wf_lock);
blocking_notifier_chain_unregister(&wf_client_list, nb);
- wf_client_count++;
+ wf_client_count--;
if (wf_client_count == 0)
wf_stop_thread();
mutex_unlock(&wf_lock);
@@ -474,12 +437,6 @@ void wf_clear_overtemp(void)
}
EXPORT_SYMBOL_GPL(wf_clear_overtemp);
-int wf_is_overtemp(void)
-{
- return (wf_overtemp != 0);
-}
-EXPORT_SYMBOL_GPL(wf_is_overtemp);
-
static int __init windfarm_core_init(void)
{
DBG("wf: core loaded\n");
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index e269f084497d..bbec5009cdc2 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -46,6 +46,7 @@ config OMAP_MBOX_KFIFO_SIZE
config PCC
bool "Platform Communication Channel Driver"
depends on ACPI
+ default n
help
ACPI 5.0+ spec defines a generic mode of communication
between the OS and a platform such as the BMC. This medium
diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c
index d9e99f981aa9..99befa76e37c 100644
--- a/drivers/mailbox/arm_mhu.c
+++ b/drivers/mailbox/arm_mhu.c
@@ -96,7 +96,7 @@ static int mhu_startup(struct mbox_chan *chan)
IRQF_SHARED, "mhu_link", chan);
if (ret) {
dev_err(chan->mbox->dev,
- "Unable to aquire IRQ %d\n", mlink->irq);
+ "Unable to acquire IRQ %d\n", mlink->irq);
return ret;
}
@@ -148,7 +148,7 @@ static int mhu_probe(struct amba_device *adev, const struct amba_id *id)
mhu->mbox.ops = &mhu_ops;
mhu->mbox.txdone_irq = false;
mhu->mbox.txdone_poll = true;
- mhu->mbox.txpoll_period = 10;
+ mhu->mbox.txpoll_period = 1;
amba_set_drvdata(adev, mhu);
diff --git a/drivers/mailbox/bcm2835-mailbox.c b/drivers/mailbox/bcm2835-mailbox.c
index 0b47dd42f3bd..cfb4b4496dd9 100644
--- a/drivers/mailbox/bcm2835-mailbox.c
+++ b/drivers/mailbox/bcm2835-mailbox.c
@@ -204,7 +204,6 @@ MODULE_DEVICE_TABLE(of, bcm2835_mbox_of_match);
static struct platform_driver bcm2835_mbox_driver = {
.driver = {
.name = "bcm2835-mbox",
- .owner = THIS_MODULE,
.of_match_table = bcm2835_mbox_of_match,
},
.probe = bcm2835_mbox_probe,
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index c7fdb57fd166..6a4811f85705 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -26,8 +26,6 @@
static LIST_HEAD(mbox_cons);
static DEFINE_MUTEX(con_mutex);
-static void poll_txdone(unsigned long data);
-
static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
{
int idx;
@@ -88,7 +86,9 @@ exit:
spin_unlock_irqrestore(&chan->lock, flags);
if (!err && (chan->txdone_method & TXDONE_BY_POLL))
- poll_txdone((unsigned long)chan->mbox);
+ /* kick start the timer immediately to avoid delays */
+ hrtimer_start(&chan->mbox->poll_hrt, ktime_set(0, 0),
+ HRTIMER_MODE_REL);
}
static void tx_tick(struct mbox_chan *chan, int r)
@@ -112,9 +112,10 @@ static void tx_tick(struct mbox_chan *chan, int r)
complete(&chan->tx_complete);
}
-static void poll_txdone(unsigned long data)
+static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
{
- struct mbox_controller *mbox = (struct mbox_controller *)data;
+ struct mbox_controller *mbox =
+ container_of(hrtimer, struct mbox_controller, poll_hrt);
bool txdone, resched = false;
int i;
@@ -130,9 +131,11 @@ static void poll_txdone(unsigned long data)
}
}
- if (resched)
- mod_timer(&mbox->poll, jiffies +
- msecs_to_jiffies(mbox->txpoll_period));
+ if (resched) {
+ hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
+ return HRTIMER_RESTART;
+ }
+ return HRTIMER_NORESTART;
}
/**
@@ -451,9 +454,9 @@ int mbox_controller_register(struct mbox_controller *mbox)
txdone = TXDONE_BY_ACK;
if (txdone == TXDONE_BY_POLL) {
- mbox->poll.function = &poll_txdone;
- mbox->poll.data = (unsigned long)mbox;
- init_timer(&mbox->poll);
+ hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ mbox->poll_hrt.function = txdone_hrtimer;
}
for (i = 0; i < mbox->num_chans; i++) {
@@ -495,7 +498,7 @@ void mbox_controller_unregister(struct mbox_controller *mbox)
mbox_free_channel(&mbox->chans[i]);
if (mbox->txdone_poll)
- del_timer_sync(&mbox->poll);
+ hrtimer_cancel(&mbox->poll_hrt);
mutex_unlock(&con_mutex);
}
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 26d121d1d501..68885a82e704 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -352,4 +352,10 @@ static int __init pcc_init(void)
return 0;
}
-device_initcall(pcc_init);
+
+/*
+ * Make PCC init postcore so that users of this mailbox
+ * such as the ACPI Processor driver have it available
+ * at their init.
+ */
+postcore_initcall(pcc_init);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index b59727309072..3e01e6fb3424 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -259,7 +259,7 @@ config DM_CRYPT
the ciphers you're going to use in the cryptoapi configuration.
For further information on dm-crypt and userspace tools see:
- <http://code.google.com/p/cryptsetup/wiki/DMCrypt>
+ <https://gitlab.com/cryptsetup/cryptsetup/wikis/DMCrypt>
To compile this code as a module, choose M here: the module will
be called dm-crypt.
@@ -393,7 +393,7 @@ config DM_MULTIPATH
# of SCSI_DH if the latter isn't defined but if
# it is, DM_MULTIPATH must depend on it. We get a build
# error if SCSI_DH=m and DM_MULTIPATH=y
- depends on SCSI_DH || !SCSI_DH
+ depends on !SCSI_DH || SCSI
---help---
Allow volume managers to support multipath hardware.
@@ -478,7 +478,7 @@ config DM_LOG_WRITES
This device-mapper target takes two devices, one device to use
normally, one to log all write operations done to the first device.
This is for use by file system developers wishing to verify that
- their fs is writing a consitent file system at all times by allowing
+ their fs is writing a consistent file system at all times by allowing
them to replay the log in a variety of ways and to check the
contents.
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 04f7bc28ef83..6b420a55c745 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -243,19 +243,6 @@ struct keybuf {
DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
};
-struct bio_split_pool {
- struct bio_set *bio_split;
- mempool_t *bio_split_hook;
-};
-
-struct bio_split_hook {
- struct closure cl;
- struct bio_split_pool *p;
- struct bio *bio;
- bio_end_io_t *bi_end_io;
- void *bi_private;
-};
-
struct bcache_device {
struct closure cl;
@@ -288,8 +275,6 @@ struct bcache_device {
int (*cache_miss)(struct btree *, struct search *,
struct bio *, unsigned);
int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
-
- struct bio_split_pool bio_split_hook;
};
struct io {
@@ -454,8 +439,6 @@ struct cache {
atomic_long_t meta_sectors_written;
atomic_long_t btree_sectors_written;
atomic_long_t sectors_written;
-
- struct bio_split_pool bio_split_hook;
};
struct gc_stat {
@@ -873,7 +856,6 @@ void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
void bch_bbio_free(struct bio *, struct cache_set *);
struct bio *bch_bbio_alloc(struct cache_set *);
-void bch_generic_make_request(struct bio *, struct bio_split_pool *);
void __bch_submit_bbio(struct bio *, struct cache_set *);
void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 00cde40db572..83392f856dfd 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -278,7 +278,7 @@ err:
goto out;
}
-static void btree_node_read_endio(struct bio *bio, int error)
+static void btree_node_read_endio(struct bio *bio)
{
struct closure *cl = bio->bi_private;
closure_put(cl);
@@ -305,7 +305,7 @@ static void bch_btree_node_read(struct btree *b)
bch_submit_bbio(bio, b->c, &b->key, 0);
closure_sync(&cl);
- if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+ if (bio->bi_error)
set_btree_node_io_error(b);
bch_bbio_free(bio, b->c);
@@ -371,15 +371,15 @@ static void btree_node_write_done(struct closure *cl)
__btree_node_write_done(cl);
}
-static void btree_node_write_endio(struct bio *bio, int error)
+static void btree_node_write_endio(struct bio *bio)
{
struct closure *cl = bio->bi_private;
struct btree *b = container_of(cl, struct btree, io);
- if (error)
+ if (bio->bi_error)
set_btree_node_io_error(b);
- bch_bbio_count_io_errors(b->c, bio, error, "writing btree");
+ bch_bbio_count_io_errors(b->c, bio, bio->bi_error, "writing btree");
closure_put(cl);
}
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index a08e3eeac3c5..782cc2c8a185 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -38,7 +38,7 @@
* they are running owned by the thread that is running them. Otherwise, suppose
* you submit some bios and wish to have a function run when they all complete:
*
- * foo_endio(struct bio *bio, int error)
+ * foo_endio(struct bio *bio)
* {
* closure_put(cl);
* }
@@ -320,7 +320,6 @@ static inline void closure_wake_up(struct closure_waitlist *list)
do { \
set_closure_fn(_cl, _fn, _wq); \
closure_sub(_cl, CLOSURE_RUNNING + 1); \
- return; \
} while (0)
/**
@@ -349,7 +348,6 @@ do { \
do { \
set_closure_fn(_cl, _fn, _wq); \
closure_queue(_cl); \
- return; \
} while (0)
/**
@@ -365,7 +363,6 @@ do { \
do { \
set_closure_fn(_cl, _destructor, NULL); \
closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
- return; \
} while (0)
/**
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index cb64e64a4789..86a0bb87124e 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -11,104 +11,6 @@
#include <linux/blkdev.h>
-static unsigned bch_bio_max_sectors(struct bio *bio)
-{
- struct request_queue *q = bdev_get_queue(bio->bi_bdev);
- struct bio_vec bv;
- struct bvec_iter iter;
- unsigned ret = 0, seg = 0;
-
- if (bio->bi_rw & REQ_DISCARD)
- return min(bio_sectors(bio), q->limits.max_discard_sectors);
-
- bio_for_each_segment(bv, bio, iter) {
- struct bvec_merge_data bvm = {
- .bi_bdev = bio->bi_bdev,
- .bi_sector = bio->bi_iter.bi_sector,
- .bi_size = ret << 9,
- .bi_rw = bio->bi_rw,
- };
-
- if (seg == min_t(unsigned, BIO_MAX_PAGES,
- queue_max_segments(q)))
- break;
-
- if (q->merge_bvec_fn &&
- q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
- break;
-
- seg++;
- ret += bv.bv_len >> 9;
- }
-
- ret = min(ret, queue_max_sectors(q));
-
- WARN_ON(!ret);
- ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);
-
- return ret;
-}
-
-static void bch_bio_submit_split_done(struct closure *cl)
-{
- struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
-
- s->bio->bi_end_io = s->bi_end_io;
- s->bio->bi_private = s->bi_private;
- bio_endio(s->bio, 0);
-
- closure_debug_destroy(&s->cl);
- mempool_free(s, s->p->bio_split_hook);
-}
-
-static void bch_bio_submit_split_endio(struct bio *bio, int error)
-{
- struct closure *cl = bio->bi_private;
- struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
-
- if (error)
- clear_bit(BIO_UPTODATE, &s->bio->bi_flags);
-
- bio_put(bio);
- closure_put(cl);
-}
-
-void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
-{
- struct bio_split_hook *s;
- struct bio *n;
-
- if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD))
- goto submit;
-
- if (bio_sectors(bio) <= bch_bio_max_sectors(bio))
- goto submit;
-
- s = mempool_alloc(p->bio_split_hook, GFP_NOIO);
- closure_init(&s->cl, NULL);
-
- s->bio = bio;
- s->p = p;
- s->bi_end_io = bio->bi_end_io;
- s->bi_private = bio->bi_private;
- bio_get(bio);
-
- do {
- n = bio_next_split(bio, bch_bio_max_sectors(bio),
- GFP_NOIO, s->p->bio_split);
-
- n->bi_end_io = bch_bio_submit_split_endio;
- n->bi_private = &s->cl;
-
- closure_get(&s->cl);
- generic_make_request(n);
- } while (n != bio);
-
- continue_at(&s->cl, bch_bio_submit_split_done, NULL);
-submit:
- generic_make_request(bio);
-}
-
/* Bios with headers */
void bch_bbio_free(struct bio *bio, struct cache_set *c)
@@ -138,7 +40,7 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
b->submit_time_us = local_clock_us();
- closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
+ closure_bio_submit(bio, bio->bi_private);
}
void bch_submit_bbio(struct bio *bio, struct cache_set *c,
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ce64fc851251..29eba7219b01 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -24,7 +24,7 @@
* bit.
*/
-static void journal_read_endio(struct bio *bio, int error)
+static void journal_read_endio(struct bio *bio)
{
struct closure *cl = bio->bi_private;
closure_put(cl);
@@ -61,7 +61,7 @@ reread: left = ca->sb.bucket_size - offset;
bio->bi_private = &cl;
bch_bio_map(bio, data);
- closure_bio_submit(bio, &cl, ca);
+ closure_bio_submit(bio, &cl);
closure_sync(&cl);
/* This function could be simpler now since we no longer write
@@ -401,7 +401,7 @@ retry:
#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
-static void journal_discard_endio(struct bio *bio, int error)
+static void journal_discard_endio(struct bio *bio)
{
struct journal_device *ja =
container_of(bio, struct journal_device, discard_bio);
@@ -547,11 +547,11 @@ void bch_journal_next(struct journal *j)
pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
}
-static void journal_write_endio(struct bio *bio, int error)
+static void journal_write_endio(struct bio *bio)
{
struct journal_write *w = bio->bi_private;
- cache_set_err_on(error, w->c, "journal io error");
+ cache_set_err_on(bio->bi_error, w->c, "journal io error");
closure_put(&w->c->journal.io);
}
@@ -592,12 +592,14 @@ static void journal_write_unlocked(struct closure *cl)
if (!w->need_write) {
closure_return_with_destructor(cl, journal_write_unlock);
+ return;
} else if (journal_full(&c->journal)) {
journal_reclaim(c);
spin_unlock(&c->journal.lock);
btree_flush_write(c);
continue_at(cl, journal_write, system_wq);
+ return;
}
c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
@@ -646,7 +648,7 @@ static void journal_write_unlocked(struct closure *cl)
spin_unlock(&c->journal.lock);
while ((bio = bio_list_pop(&list)))
- closure_bio_submit(bio, cl, c->cache[0]);
+ closure_bio_submit(bio, cl);
continue_at(cl, journal_write_done, NULL);
}
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index cd7490311e51..b929fc944e9c 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -60,20 +60,20 @@ static void write_moving_finish(struct closure *cl)
closure_return_with_destructor(cl, moving_io_destructor);
}
-static void read_moving_endio(struct bio *bio, int error)
+static void read_moving_endio(struct bio *bio)
{
struct bbio *b = container_of(bio, struct bbio, bio);
struct moving_io *io = container_of(bio->bi_private,
struct moving_io, cl);
- if (error)
- io->op.error = error;
+ if (bio->bi_error)
+ io->op.error = bio->bi_error;
else if (!KEY_DIRTY(&b->key) &&
ptr_stale(io->op.c, &b->key, 0)) {
io->op.error = -EINTR;
}
- bch_bbio_endio(io->op.c, bio, error, "reading data to move");
+ bch_bbio_endio(io->op.c, bio, bio->bi_error, "reading data to move");
}
static void moving_init(struct moving_io *io)
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 4afb2d26b148..8e9877b04637 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -88,8 +88,10 @@ static void bch_data_insert_keys(struct closure *cl)
if (journal_ref)
atomic_dec_bug(journal_ref);
- if (!op->insert_data_done)
+ if (!op->insert_data_done) {
continue_at(cl, bch_data_insert_start, op->wq);
+ return;
+ }
bch_keylist_free(&op->insert_keys);
closure_return(cl);
@@ -171,22 +173,22 @@ static void bch_data_insert_error(struct closure *cl)
bch_data_insert_keys(cl);
}
-static void bch_data_insert_endio(struct bio *bio, int error)
+static void bch_data_insert_endio(struct bio *bio)
{
struct closure *cl = bio->bi_private;
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
- if (error) {
+ if (bio->bi_error) {
/* TODO: We could try to recover from this. */
if (op->writeback)
- op->error = error;
+ op->error = bio->bi_error;
else if (!op->replace)
set_closure_fn(cl, bch_data_insert_error, op->wq);
else
set_closure_fn(cl, NULL, NULL);
}
- bch_bbio_endio(op->c, bio, error, "writing data to cache");
+ bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache");
}
static void bch_data_insert_start(struct closure *cl)
@@ -216,8 +218,10 @@ static void bch_data_insert_start(struct closure *cl)
/* 1 for the device pointer and 1 for the chksum */
if (bch_keylist_realloc(&op->insert_keys,
3 + (op->csum ? 1 : 0),
- op->c))
+ op->c)) {
continue_at(cl, bch_data_insert_keys, op->wq);
+ return;
+ }
k = op->insert_keys.top;
bkey_init(k);
@@ -255,6 +259,7 @@ static void bch_data_insert_start(struct closure *cl)
op->insert_data_done = true;
continue_at(cl, bch_data_insert_keys, op->wq);
+ return;
err:
/* bch_alloc_sectors() blocks if s->writeback = true */
BUG_ON(op->writeback);
@@ -472,7 +477,7 @@ struct search {
struct data_insert_op iop;
};
-static void bch_cache_read_endio(struct bio *bio, int error)
+static void bch_cache_read_endio(struct bio *bio)
{
struct bbio *b = container_of(bio, struct bbio, bio);
struct closure *cl = bio->bi_private;
@@ -485,15 +490,15 @@ static void bch_cache_read_endio(struct bio *bio, int error)
* from the backing device.
*/
- if (error)
- s->iop.error = error;
+ if (bio->bi_error)
+ s->iop.error = bio->bi_error;
else if (!KEY_DIRTY(&b->key) &&
ptr_stale(s->iop.c, &b->key, 0)) {
atomic_long_inc(&s->iop.c->cache_read_races);
s->iop.error = -EINTR;
}
- bch_bbio_endio(s->iop.c, bio, error, "reading from cache");
+ bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache");
}
/*
@@ -576,21 +581,23 @@ static void cache_lookup(struct closure *cl)
ret = bch_btree_map_keys(&s->op, s->iop.c,
&KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
cache_lookup_fn, MAP_END_KEY);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN) {
continue_at(cl, cache_lookup, bcache_wq);
+ return;
+ }
closure_return(cl);
}
/* Common code for the make_request functions */
-static void request_endio(struct bio *bio, int error)
+static void request_endio(struct bio *bio)
{
struct closure *cl = bio->bi_private;
- if (error) {
+ if (bio->bi_error) {
struct search *s = container_of(cl, struct search, cl);
- s->iop.error = error;
+ s->iop.error = bio->bi_error;
/* Only cache read errors are recoverable */
s->recoverable = false;
}
@@ -606,7 +613,8 @@ static void bio_complete(struct search *s)
&s->d->disk->part0, s->start_time);
trace_bcache_request_end(s->d, s->orig_bio);
- bio_endio(s->orig_bio, s->iop.error);
+ s->orig_bio->bi_error = s->iop.error;
+ bio_endio(s->orig_bio);
s->orig_bio = NULL;
}
}
@@ -711,7 +719,7 @@ static void cached_dev_read_error(struct closure *cl)
/* XXX: invalidate cache */
- closure_bio_submit(bio, cl, s->d);
+ closure_bio_submit(bio, cl);
}
continue_at(cl, cached_dev_cache_miss_done, NULL);
@@ -834,7 +842,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
s->cache_miss = miss;
s->iop.bio = cache_bio;
bio_get(cache_bio);
- closure_bio_submit(cache_bio, &s->cl, s->d);
+ closure_bio_submit(cache_bio, &s->cl);
return ret;
out_put:
@@ -842,7 +850,7 @@ out_put:
out_submit:
miss->bi_end_io = request_endio;
miss->bi_private = &s->cl;
- closure_bio_submit(miss, &s->cl, s->d);
+ closure_bio_submit(miss, &s->cl);
return ret;
}
@@ -907,7 +915,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
if (!(bio->bi_rw & REQ_DISCARD) ||
blk_queue_discard(bdev_get_queue(dc->bdev)))
- closure_bio_submit(bio, cl, s->d);
+ closure_bio_submit(bio, cl);
} else if (s->iop.writeback) {
bch_writeback_add(dc);
s->iop.bio = bio;
@@ -922,12 +930,12 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
flush->bi_end_io = request_endio;
flush->bi_private = cl;
- closure_bio_submit(flush, cl, s->d);
+ closure_bio_submit(flush, cl);
}
} else {
s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
- closure_bio_submit(bio, cl, s->d);
+ closure_bio_submit(bio, cl);
}
closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
@@ -943,7 +951,7 @@ static void cached_dev_nodata(struct closure *cl)
bch_journal_meta(s->iop.c, cl);
/* If it's a flush, we send the flush to the backing device too */
- closure_bio_submit(bio, cl, s->d);
+ closure_bio_submit(bio, cl);
continue_at(cl, cached_dev_bio_complete, NULL);
}
@@ -985,9 +993,9 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
} else {
if ((bio->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(dc->bdev)))
- bio_endio(bio, 0);
+ bio_endio(bio);
else
- bch_generic_make_request(bio, &d->bio_split_hook);
+ generic_make_request(bio);
}
}
@@ -1085,6 +1093,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
continue_at_nobarrier(&s->cl,
flash_dev_nodata,
bcache_wq);
+ return;
} else if (rw) {
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
&KEY(d->id, bio->bi_iter.bi_sector, 0),
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 94980bfca434..679a093a3bf6 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -59,29 +59,6 @@ struct workqueue_struct *bcache_wq;
#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
-static void bio_split_pool_free(struct bio_split_pool *p)
-{
- if (p->bio_split_hook)
- mempool_destroy(p->bio_split_hook);
-
- if (p->bio_split)
- bioset_free(p->bio_split);
-}
-
-static int bio_split_pool_init(struct bio_split_pool *p)
-{
- p->bio_split = bioset_create(4, 0);
- if (!p->bio_split)
- return -ENOMEM;
-
- p->bio_split_hook = mempool_create_kmalloc_pool(4,
- sizeof(struct bio_split_hook));
- if (!p->bio_split_hook)
- return -ENOMEM;
-
- return 0;
-}
-
/* Superblock */
static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
@@ -221,7 +198,7 @@ err:
return err;
}
-static void write_bdev_super_endio(struct bio *bio, int error)
+static void write_bdev_super_endio(struct bio *bio)
{
struct cached_dev *dc = bio->bi_private;
/* XXX: error checking */
@@ -290,11 +267,11 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
}
-static void write_super_endio(struct bio *bio, int error)
+static void write_super_endio(struct bio *bio)
{
struct cache *ca = bio->bi_private;
- bch_count_io_errors(ca, error, "writing superblock");
+ bch_count_io_errors(ca, bio->bi_error, "writing superblock");
closure_put(&ca->set->sb_write);
}
@@ -339,12 +316,12 @@ void bcache_write_super(struct cache_set *c)
/* UUID io */
-static void uuid_endio(struct bio *bio, int error)
+static void uuid_endio(struct bio *bio)
{
struct closure *cl = bio->bi_private;
struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
- cache_set_err_on(error, c, "accessing uuids");
+ cache_set_err_on(bio->bi_error, c, "accessing uuids");
bch_bbio_free(bio, c);
closure_put(cl);
}
@@ -512,11 +489,11 @@ static struct uuid_entry *uuid_find_empty(struct cache_set *c)
* disk.
*/
-static void prio_endio(struct bio *bio, int error)
+static void prio_endio(struct bio *bio)
{
struct cache *ca = bio->bi_private;
- cache_set_err_on(error, ca->set, "accessing priorities");
+ cache_set_err_on(bio->bi_error, ca->set, "accessing priorities");
bch_bbio_free(bio, ca->set);
closure_put(&ca->prio);
}
@@ -537,7 +514,7 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
bio->bi_private = ca;
bch_bio_map(bio, ca->disk_buckets);
- closure_bio_submit(bio, &ca->prio, ca);
+ closure_bio_submit(bio, &ca->prio);
closure_sync(cl);
}
@@ -757,7 +734,6 @@ static void bcache_device_free(struct bcache_device *d)
put_disk(d->disk);
}
- bio_split_pool_free(&d->bio_split_hook);
if (d->bio_split)
bioset_free(d->bio_split);
kvfree(d->full_dirty_stripes);
@@ -804,7 +780,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
return minor;
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
- bio_split_pool_init(&d->bio_split_hook) ||
!(d->disk = alloc_disk(1))) {
ida_simple_remove(&bcache_minor, minor);
return -ENOMEM;
@@ -830,7 +805,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
q->limits.max_sectors = UINT_MAX;
q->limits.max_segment_size = UINT_MAX;
q->limits.max_segments = BIO_MAX_PAGES;
- q->limits.max_discard_sectors = UINT_MAX;
+ blk_queue_max_discard_sectors(q, UINT_MAX);
q->limits.discard_granularity = 512;
q->limits.io_min = block_size;
q->limits.logical_block_size = block_size;
@@ -1793,8 +1768,6 @@ void bch_cache_release(struct kobject *kobj)
ca->set->cache[ca->sb.nr_this_dev] = NULL;
}
- bio_split_pool_free(&ca->bio_split_hook);
-
free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
kfree(ca->prio_buckets);
vfree(ca->buckets);
@@ -1839,8 +1812,7 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
ca->sb.nbuckets)) ||
!(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
2, GFP_KERNEL)) ||
- !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) ||
- bio_split_pool_init(&ca->bio_split_hook))
+ !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)))
return -ENOMEM;
ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 1d04c4859c70..cf2cbc211d83 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -4,6 +4,7 @@
#include <linux/blkdev.h>
#include <linux/errno.h>
+#include <linux/blkdev.h>
#include <linux/kernel.h>
#include <linux/llist.h>
#include <linux/ratelimit.h>
@@ -570,10 +571,10 @@ static inline sector_t bdev_sectors(struct block_device *bdev)
return bdev->bd_inode->i_size >> 9;
}
-#define closure_bio_submit(bio, cl, dev) \
+#define closure_bio_submit(bio, cl) \
do { \
closure_get(cl); \
- bch_generic_make_request(bio, &(dev)->bio_split_hook); \
+ generic_make_request(bio); \
} while (0)
uint64_t bch_crc64_update(uint64_t, const void *, size_t);
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index f1986bcd1bf0..b23f88d9f18c 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -166,12 +166,12 @@ static void write_dirty_finish(struct closure *cl)
closure_return_with_destructor(cl, dirty_io_destructor);
}
-static void dirty_endio(struct bio *bio, int error)
+static void dirty_endio(struct bio *bio)
{
struct keybuf_key *w = bio->bi_private;
struct dirty_io *io = w->private;
- if (error)
+ if (bio->bi_error)
SET_KEY_DIRTY(&w->key, false);
closure_put(&io->cl);
@@ -188,27 +188,27 @@ static void write_dirty(struct closure *cl)
io->bio.bi_bdev = io->dc->bdev;
io->bio.bi_end_io = dirty_endio;
- closure_bio_submit(&io->bio, cl, &io->dc->disk);
+ closure_bio_submit(&io->bio, cl);
continue_at(cl, write_dirty_finish, system_wq);
}
-static void read_dirty_endio(struct bio *bio, int error)
+static void read_dirty_endio(struct bio *bio)
{
struct keybuf_key *w = bio->bi_private;
struct dirty_io *io = w->private;
bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
- error, "reading dirty data from cache");
+ bio->bi_error, "reading dirty data from cache");
- dirty_endio(bio, error);
+ dirty_endio(bio);
}
static void read_dirty_submit(struct closure *cl)
{
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
- closure_bio_submit(&io->bio, cl, &io->dc->disk);
+ closure_bio_submit(&io->bio, cl);
continue_at(cl, write_dirty, system_wq);
}
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index ed2346ddf4c9..e51de52eeb94 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -494,7 +494,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
bitmap_super_t *sb;
unsigned long chunksize, daemon_sleep, write_behind;
- bitmap->storage.sb_page = alloc_page(GFP_KERNEL);
+ bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (bitmap->storage.sb_page == NULL)
return -ENOMEM;
bitmap->storage.sb_page->index = 0;
@@ -541,6 +541,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
sb->state = cpu_to_le32(bitmap->flags);
bitmap->events_cleared = bitmap->mddev->events;
sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
+ bitmap->mddev->bitmap_info.nodes = 0;
kunmap_atomic(sb);
@@ -558,6 +559,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
unsigned long sectors_reserved = 0;
int err = -EINVAL;
struct page *sb_page;
+ loff_t offset = bitmap->mddev->bitmap_info.offset;
if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
chunksize = 128 * 1024 * 1024;
@@ -584,9 +586,9 @@ re_read:
bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
/* to 4k blocks */
bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
- bitmap->mddev->bitmap_info.offset += bitmap->cluster_slot * (bm_blocks << 3);
+ offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
- bitmap->cluster_slot, (unsigned long long)bitmap->mddev->bitmap_info.offset);
+ bitmap->cluster_slot, offset);
}
if (bitmap->storage.file) {
@@ -597,7 +599,7 @@ re_read:
bitmap, bytes, sb_page);
} else {
err = read_sb_page(bitmap->mddev,
- bitmap->mddev->bitmap_info.offset,
+ offset,
sb_page,
0, sizeof(bitmap_super_t));
}
@@ -611,8 +613,16 @@ re_read:
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
write_behind = le32_to_cpu(sb->write_behind);
sectors_reserved = le32_to_cpu(sb->sectors_reserved);
- nodes = le32_to_cpu(sb->nodes);
- strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
+ /* XXX: This is a hack to ensure that we don't use clustering
+ * in case:
+ * - dm-raid is in use and
+ * - the nodes written in bitmap_sb is erroneous.
+ */
+ if (!bitmap->mddev->sync_super) {
+ nodes = le32_to_cpu(sb->nodes);
+ strlcpy(bitmap->mddev->bitmap_info.cluster_name,
+ sb->cluster_name, 64);
+ }
/* verify that the bitmap-specific fields are valid */
if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
@@ -671,7 +681,7 @@ out:
kunmap_atomic(sb);
/* Assiging chunksize is required for "re_read" */
bitmap->mddev->bitmap_info.chunksize = chunksize;
- if (nodes && (bitmap->cluster_slot < 0)) {
+ if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
err = md_setup_cluster(bitmap->mddev, nodes);
if (err) {
pr_err("%s: Could not setup cluster service (%d)\n",
@@ -1866,10 +1876,6 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
if (IS_ERR(bitmap))
return PTR_ERR(bitmap);
- rv = bitmap_read_sb(bitmap);
- if (rv)
- goto err;
-
rv = bitmap_init_from_disk(bitmap, 0);
if (rv)
goto err;
diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c
index cd6d1d21e057..03af174485d3 100644
--- a/drivers/md/dm-bio-prison.c
+++ b/drivers/md/dm-bio-prison.c
@@ -236,8 +236,10 @@ void dm_cell_error(struct dm_bio_prison *prison,
bio_list_init(&bios);
dm_cell_release(prison, cell, &bios);
- while ((bio = bio_list_pop(&bios)))
- bio_endio(bio, error);
+ while ((bio = bio_list_pop(&bios))) {
+ bio->bi_error = error;
+ bio_endio(bio);
+ }
}
EXPORT_SYMBOL_GPL(dm_cell_error);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 86dbbc737402..83cc52eaf56d 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -545,7 +545,8 @@ static void dmio_complete(unsigned long error, void *context)
{
struct dm_buffer *b = context;
- b->bio.bi_end_io(&b->bio, error ? -EIO : 0);
+ b->bio.bi_error = error ? -EIO : 0;
+ b->bio.bi_end_io(&b->bio);
}
static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
@@ -575,13 +576,16 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
b->bio.bi_end_io = end_io;
r = dm_io(&io_req, 1, &region, NULL);
- if (r)
- end_io(&b->bio, r);
+ if (r) {
+ b->bio.bi_error = r;
+ end_io(&b->bio);
+ }
}
-static void inline_endio(struct bio *bio, int error)
+static void inline_endio(struct bio *bio)
{
bio_end_io_t *end_fn = bio->bi_private;
+ int error = bio->bi_error;
/*
* Reset the bio to free any attached resources
@@ -589,7 +593,8 @@ static void inline_endio(struct bio *bio, int error)
*/
bio_reset(bio);
- end_fn(bio, error);
+ bio->bi_error = error;
+ end_fn(bio);
}
static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
@@ -661,13 +666,14 @@ static void submit_io(struct dm_buffer *b, int rw, sector_t block,
* Set the error, clear B_WRITING bit and wake anyone who was waiting on
* it.
*/
-static void write_endio(struct bio *bio, int error)
+static void write_endio(struct bio *bio)
{
struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
- b->write_error = error;
- if (unlikely(error)) {
+ b->write_error = bio->bi_error;
+ if (unlikely(bio->bi_error)) {
struct dm_bufio_client *c = b->c;
+ int error = bio->bi_error;
(void)cmpxchg(&c->async_write_error, 0, error);
}
@@ -1026,11 +1032,11 @@ found_buffer:
* The endio routine for reading: set the error, clear the bit and wake up
* anyone waiting on the buffer.
*/
-static void read_endio(struct bio *bio, int error)
+static void read_endio(struct bio *bio)
{
struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
- b->read_error = error;
+ b->read_error = bio->bi_error;
BUG_ON(!test_bit(B_READING, &b->state));
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 32814371b8d3..aa1b41ca40f7 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -1471,5 +1471,3 @@ module_exit(mq_exit);
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("mq cache policy");
-
-MODULE_ALIAS("dm-cache-default");
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index b6f22651dd35..1ffbeb1b3ea6 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -772,7 +772,7 @@ struct smq_policy {
struct dm_cache_policy policy;
/* protects everything */
- struct mutex lock;
+ spinlock_t lock;
dm_cblock_t cache_size;
sector_t cache_block_size;
@@ -807,13 +807,7 @@ struct smq_policy {
/*
* Keeps track of time, incremented by the core. We use this to
* avoid attributing multiple hits within the same tick.
- *
- * Access to tick_protected should be done with the spin lock held.
- * It's copied to tick at the start of the map function (within the
- * mutex).
*/
- spinlock_t tick_lock;
- unsigned tick_protected;
unsigned tick;
/*
@@ -1296,46 +1290,20 @@ static void smq_destroy(struct dm_cache_policy *p)
kfree(mq);
}
-static void copy_tick(struct smq_policy *mq)
-{
- unsigned long flags, tick;
-
- spin_lock_irqsave(&mq->tick_lock, flags);
- tick = mq->tick_protected;
- if (tick != mq->tick) {
- update_sentinels(mq);
- end_hotspot_period(mq);
- end_cache_period(mq);
- mq->tick = tick;
- }
- spin_unlock_irqrestore(&mq->tick_lock, flags);
-}
-
-static bool maybe_lock(struct smq_policy *mq, bool can_block)
-{
- if (can_block) {
- mutex_lock(&mq->lock);
- return true;
- } else
- return mutex_trylock(&mq->lock);
-}
-
static int smq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
bool can_block, bool can_migrate, bool fast_promote,
struct bio *bio, struct policy_locker *locker,
struct policy_result *result)
{
int r;
+ unsigned long flags;
struct smq_policy *mq = to_smq_policy(p);
result->op = POLICY_MISS;
- if (!maybe_lock(mq, can_block))
- return -EWOULDBLOCK;
-
- copy_tick(mq);
+ spin_lock_irqsave(&mq->lock, flags);
r = map(mq, bio, oblock, can_migrate, fast_promote, locker, result);
- mutex_unlock(&mq->lock);
+ spin_unlock_irqrestore(&mq->lock, flags);
return r;
}
@@ -1343,20 +1311,18 @@ static int smq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
{
int r;
+ unsigned long flags;
struct smq_policy *mq = to_smq_policy(p);
struct entry *e;
- if (!mutex_trylock(&mq->lock))
- return -EWOULDBLOCK;
-
+ spin_lock_irqsave(&mq->lock, flags);
e = h_lookup(&mq->table, oblock);
if (e) {
*cblock = infer_cblock(mq, e);
r = 0;
} else
r = -ENOENT;
-
- mutex_unlock(&mq->lock);
+ spin_unlock_irqrestore(&mq->lock, flags);
return r;
}
@@ -1375,20 +1341,22 @@ static void __smq_set_clear_dirty(struct smq_policy *mq, dm_oblock_t oblock, boo
static void smq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
{
+ unsigned long flags;
struct smq_policy *mq = to_smq_policy(p);
- mutex_lock(&mq->lock);
+ spin_lock_irqsave(&mq->lock, flags);
__smq_set_clear_dirty(mq, oblock, true);
- mutex_unlock(&mq->lock);
+ spin_unlock_irqrestore(&mq->lock, flags);
}
static void smq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
{
struct smq_policy *mq = to_smq_policy(p);
+ unsigned long flags;
- mutex_lock(&mq->lock);
+ spin_lock_irqsave(&mq->lock, flags);
__smq_set_clear_dirty(mq, oblock, false);
- mutex_unlock(&mq->lock);
+ spin_unlock_irqrestore(&mq->lock, flags);
}
static int smq_load_mapping(struct dm_cache_policy *p,
@@ -1433,14 +1401,14 @@ static int smq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
struct smq_policy *mq = to_smq_policy(p);
int r = 0;
- mutex_lock(&mq->lock);
-
+ /*
+ * We don't need to lock here since this method is only called once
+ * the IO has stopped.
+ */
r = smq_save_hints(mq, &mq->clean, fn, context);
if (!r)
r = smq_save_hints(mq, &mq->dirty, fn, context);
- mutex_unlock(&mq->lock);
-
return r;
}
@@ -1458,10 +1426,11 @@ static void __remove_mapping(struct smq_policy *mq, dm_oblock_t oblock)
static void smq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
{
struct smq_policy *mq = to_smq_policy(p);
+ unsigned long flags;
- mutex_lock(&mq->lock);
+ spin_lock_irqsave(&mq->lock, flags);
__remove_mapping(mq, oblock);
- mutex_unlock(&mq->lock);
+ spin_unlock_irqrestore(&mq->lock, flags);
}
static int __remove_cblock(struct smq_policy *mq, dm_cblock_t cblock)
@@ -1480,11 +1449,12 @@ static int __remove_cblock(struct smq_policy *mq, dm_cblock_t cblock)
static int smq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
{
int r;
+ unsigned long flags;
struct smq_policy *mq = to_smq_policy(p);
- mutex_lock(&mq->lock);
+ spin_lock_irqsave(&mq->lock, flags);
r = __remove_cblock(mq, cblock);
- mutex_unlock(&mq->lock);
+ spin_unlock_irqrestore(&mq->lock, flags);
return r;
}
@@ -1537,11 +1507,12 @@ static int smq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock,
dm_cblock_t *cblock, bool critical_only)
{
int r;
+ unsigned long flags;
struct smq_policy *mq = to_smq_policy(p);
- mutex_lock(&mq->lock);
+ spin_lock_irqsave(&mq->lock, flags);
r = __smq_writeback_work(mq, oblock, cblock, critical_only);
- mutex_unlock(&mq->lock);
+ spin_unlock_irqrestore(&mq->lock, flags);
return r;
}
@@ -1562,21 +1533,23 @@ static void __force_mapping(struct smq_policy *mq,
static void smq_force_mapping(struct dm_cache_policy *p,
dm_oblock_t current_oblock, dm_oblock_t new_oblock)
{
+ unsigned long flags;
struct smq_policy *mq = to_smq_policy(p);
- mutex_lock(&mq->lock);
+ spin_lock_irqsave(&mq->lock, flags);
__force_mapping(mq, current_oblock, new_oblock);
- mutex_unlock(&mq->lock);
+ spin_unlock_irqrestore(&mq->lock, flags);
}
static dm_cblock_t smq_residency(struct dm_cache_policy *p)
{
dm_cblock_t r;
+ unsigned long flags;
struct smq_policy *mq = to_smq_policy(p);
- mutex_lock(&mq->lock);
+ spin_lock_irqsave(&mq->lock, flags);
r = to_cblock(mq->cache_alloc.nr_allocated);
- mutex_unlock(&mq->lock);
+ spin_unlock_irqrestore(&mq->lock, flags);
return r;
}
@@ -1586,15 +1559,12 @@ static void smq_tick(struct dm_cache_policy *p, bool can_block)
struct smq_policy *mq = to_smq_policy(p);
unsigned long flags;
- spin_lock_irqsave(&mq->tick_lock, flags);
- mq->tick_protected++;
- spin_unlock_irqrestore(&mq->tick_lock, flags);
-
- if (can_block) {
- mutex_lock(&mq->lock);
- copy_tick(mq);
- mutex_unlock(&mq->lock);
- }
+ spin_lock_irqsave(&mq->lock, flags);
+ mq->tick++;
+ update_sentinels(mq);
+ end_hotspot_period(mq);
+ end_cache_period(mq);
+ spin_unlock_irqrestore(&mq->lock, flags);
}
/* Init the policy plugin interface function pointers. */
@@ -1686,7 +1656,7 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
if (from_cblock(cache_size)) {
mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
- if (!mq->cache_hit_bits && mq->cache_hit_bits) {
+ if (!mq->cache_hit_bits) {
DMERR("couldn't allocate cache hit bitset");
goto bad_cache_hit_bits;
}
@@ -1694,10 +1664,8 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
} else
mq->cache_hit_bits = NULL;
- mq->tick_protected = 0;
mq->tick = 0;
- mutex_init(&mq->lock);
- spin_lock_init(&mq->tick_lock);
+ spin_lock_init(&mq->lock);
q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS);
mq->hotspot.nr_top_levels = 8;
@@ -1789,3 +1757,5 @@ module_exit(smq_exit);
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("smq cache policy");
+
+MODULE_ALIAS("dm-cache-default");
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1b4e1756b169..dd90d1236f4a 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -424,7 +424,6 @@ static void free_migration(struct dm_cache_migration *mg)
wake_up(&cache->migration_wait);
mempool_free(mg, cache->migration_pool);
- wake_worker(cache);
}
static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
@@ -919,14 +918,14 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
wake_worker(cache);
}
-static void writethrough_endio(struct bio *bio, int err)
+static void writethrough_endio(struct bio *bio)
{
struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
dm_unhook_bio(&pb->hook_info, bio);
- if (err) {
- bio_endio(bio, err);
+ if (bio->bi_error) {
+ bio_endio(bio);
return;
}
@@ -1064,14 +1063,6 @@ static void dec_io_migrations(struct cache *cache)
atomic_dec(&cache->nr_io_migrations);
}
-static void __cell_release(struct cache *cache, struct dm_bio_prison_cell *cell,
- bool holder, struct bio_list *bios)
-{
- (holder ? dm_cell_release : dm_cell_release_no_holder)
- (cache->prison, cell, bios);
- free_prison_cell(cache, cell);
-}
-
static bool discard_or_flush(struct bio *bio)
{
return bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD);
@@ -1079,14 +1070,13 @@ static bool discard_or_flush(struct bio *bio)
static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
{
- if (discard_or_flush(cell->holder))
+ if (discard_or_flush(cell->holder)) {
/*
- * We have to handle these bios
- * individually.
+ * We have to handle these bios individually.
*/
- __cell_release(cache, cell, true, &cache->deferred_bios);
-
- else
+ dm_cell_release(cache->prison, cell, &cache->deferred_bios);
+ free_prison_cell(cache, cell);
+ } else
list_add_tail(&cell->user_list, &cache->deferred_cells);
}
@@ -1113,7 +1103,7 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, boo
static void cell_error_with_code(struct cache *cache, struct dm_bio_prison_cell *cell, int err)
{
dm_cell_error(cache->prison, cell, err);
- dm_bio_prison_free_cell(cache->prison, cell);
+ free_prison_cell(cache, cell);
}
static void cell_requeue(struct cache *cache, struct dm_bio_prison_cell *cell)
@@ -1123,8 +1113,11 @@ static void cell_requeue(struct cache *cache, struct dm_bio_prison_cell *cell)
static void free_io_migration(struct dm_cache_migration *mg)
{
- dec_io_migrations(mg->cache);
+ struct cache *cache = mg->cache;
+
+ dec_io_migrations(cache);
free_migration(mg);
+ wake_worker(cache);
}
static void migration_failure(struct dm_cache_migration *mg)
@@ -1231,7 +1224,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
* The block was promoted via an overwrite, so it's dirty.
*/
set_dirty(cache, mg->new_oblock, mg->cblock);
- bio_endio(mg->new_ocell->holder, 0);
+ bio_endio(mg->new_ocell->holder);
cell_defer(cache, mg->new_ocell, false);
}
free_io_migration(mg);
@@ -1284,7 +1277,7 @@ static void issue_copy(struct dm_cache_migration *mg)
}
}
-static void overwrite_endio(struct bio *bio, int err)
+static void overwrite_endio(struct bio *bio)
{
struct dm_cache_migration *mg = bio->bi_private;
struct cache *cache = mg->cache;
@@ -1294,7 +1287,7 @@ static void overwrite_endio(struct bio *bio, int err)
dm_unhook_bio(&pb->hook_info, bio);
- if (err)
+ if (bio->bi_error)
mg->err = true;
mg->requeue_holder = false;
@@ -1351,16 +1344,18 @@ static void issue_discard(struct dm_cache_migration *mg)
{
dm_dblock_t b, e;
struct bio *bio = mg->new_ocell->holder;
+ struct cache *cache = mg->cache;
- calc_discard_block_range(mg->cache, bio, &b, &e);
+ calc_discard_block_range(cache, bio, &b, &e);
while (b != e) {
- set_discard(mg->cache, b);
+ set_discard(cache, b);
b = to_dblock(from_dblock(b) + 1);
}
- bio_endio(bio, 0);
- cell_defer(mg->cache, mg->new_ocell, false);
+ bio_endio(bio);
+ cell_defer(cache, mg->new_ocell, false);
free_migration(mg);
+ wake_worker(cache);
}
static void issue_copy_or_discard(struct dm_cache_migration *mg)
@@ -1631,7 +1626,7 @@ static void process_discard_bio(struct cache *cache, struct prealloc *structs,
calc_discard_block_range(cache, bio, &b, &e);
if (b == e) {
- bio_endio(bio, 0);
+ bio_endio(bio);
return;
}
@@ -1729,6 +1724,8 @@ static void remap_cell_to_origin_clear_discard(struct cache *cache,
remap_to_origin(cache, bio);
issue(cache, bio);
}
+
+ free_prison_cell(cache, cell);
}
static void remap_cell_to_cache_dirty(struct cache *cache, struct dm_bio_prison_cell *cell,
@@ -1763,6 +1760,8 @@ static void remap_cell_to_cache_dirty(struct cache *cache, struct dm_bio_prison_
remap_to_cache(cache, bio, cblock);
issue(cache, bio);
}
+
+ free_prison_cell(cache, cell);
}
/*----------------------------------------------------------------*/
@@ -1947,6 +1946,7 @@ static int commit_if_needed(struct cache *cache)
static void process_deferred_bios(struct cache *cache)
{
+ bool prealloc_used = false;
unsigned long flags;
struct bio_list bios;
struct bio *bio;
@@ -1966,6 +1966,7 @@ static void process_deferred_bios(struct cache *cache)
* this bio might require one, we pause until there are some
* prepared mappings to process.
*/
+ prealloc_used = true;
if (prealloc_data_structs(cache, &structs)) {
spin_lock_irqsave(&cache->lock, flags);
bio_list_merge(&cache->deferred_bios, &bios);
@@ -1983,11 +1984,13 @@ static void process_deferred_bios(struct cache *cache)
process_bio(cache, &structs, bio);
}
- prealloc_free_structs(cache, &structs);
+ if (prealloc_used)
+ prealloc_free_structs(cache, &structs);
}
static void process_deferred_cells(struct cache *cache)
{
+ bool prealloc_used = false;
unsigned long flags;
struct dm_bio_prison_cell *cell, *tmp;
struct list_head cells;
@@ -2007,6 +2010,7 @@ static void process_deferred_cells(struct cache *cache)
* this bio might require one, we pause until there are some
* prepared mappings to process.
*/
+ prealloc_used = true;
if (prealloc_data_structs(cache, &structs)) {
spin_lock_irqsave(&cache->lock, flags);
list_splice(&cells, &cache->deferred_cells);
@@ -2017,7 +2021,8 @@ static void process_deferred_cells(struct cache *cache)
process_cell(cache, &structs, cell);
}
- prealloc_free_structs(cache, &structs);
+ if (prealloc_used)
+ prealloc_free_structs(cache, &structs);
}
static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
@@ -2062,7 +2067,7 @@ static void process_deferred_writethrough_bios(struct cache *cache)
static void writeback_some_dirty_blocks(struct cache *cache)
{
- int r = 0;
+ bool prealloc_used = false;
dm_oblock_t oblock;
dm_cblock_t cblock;
struct prealloc structs;
@@ -2072,15 +2077,12 @@ static void writeback_some_dirty_blocks(struct cache *cache)
memset(&structs, 0, sizeof(structs));
while (spare_migration_bandwidth(cache)) {
- if (prealloc_data_structs(cache, &structs))
- break;
-
- r = policy_writeback_work(cache->policy, &oblock, &cblock, busy);
- if (r)
- break;
+ if (policy_writeback_work(cache->policy, &oblock, &cblock, busy))
+ break; /* no work to do */
- r = get_cell(cache, oblock, &structs, &old_ocell);
- if (r) {
+ prealloc_used = true;
+ if (prealloc_data_structs(cache, &structs) ||
+ get_cell(cache, oblock, &structs, &old_ocell)) {
policy_set_dirty(cache->policy, oblock);
break;
}
@@ -2088,7 +2090,8 @@ static void writeback_some_dirty_blocks(struct cache *cache)
writeback(cache, &structs, oblock, cblock, old_ocell);
}
- prealloc_free_structs(cache, &structs);
+ if (prealloc_used)
+ prealloc_free_structs(cache, &structs);
}
/*----------------------------------------------------------------
@@ -2213,8 +2216,10 @@ static void requeue_deferred_bios(struct cache *cache)
bio_list_merge(&bios, &cache->deferred_bios);
bio_list_init(&cache->deferred_bios);
- while ((bio = bio_list_pop(&bios)))
- bio_endio(bio, DM_ENDIO_REQUEUE);
+ while ((bio = bio_list_pop(&bios))) {
+ bio->bi_error = DM_ENDIO_REQUEUE;
+ bio_endio(bio);
+ }
}
static int more_work(struct cache *cache)
@@ -3119,7 +3124,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
* This is a duplicate writethrough io that is no
* longer needed because the block has been demoted.
*/
- bio_endio(bio, 0);
+ bio_endio(bio);
// FIXME: remap everything as a miss
cell_defer(cache, cell, false);
r = DM_MAPIO_SUBMITTED;
@@ -3496,7 +3501,7 @@ static void cache_resume(struct dm_target *ti)
* <#demotions> <#promotions> <#dirty>
* <#features> <features>*
* <#core args> <core args>
- * <policy name> <#policy args> <policy args>* <cache metadata mode>
+ * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
*/
static void cache_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
@@ -3582,6 +3587,11 @@ static void cache_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("rw ");
+ if (dm_cache_metadata_needs_check(cache->cmd))
+ DMEMIT("needs_check ");
+ else
+ DMEMIT("- ");
+
break;
case STATUSTYPE_TABLE:
@@ -3769,26 +3779,6 @@ static int cache_iterate_devices(struct dm_target *ti,
return r;
}
-/*
- * We assume I/O is going to the origin (which is the volume
- * more likely to have restrictions e.g. by being striped).
- * (Looking up the exact location of the data would be expensive
- * and could always be out of date by the time the bio is submitted.)
- */
-static int cache_bvec_merge(struct dm_target *ti,
- struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct cache *cache = ti->private;
- struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
-
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = cache->origin_dev->bdev;
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
{
/*
@@ -3820,7 +3810,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = {
.name = "cache",
- .version = {1, 7, 0},
+ .version = {1, 8, 0},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,
@@ -3832,7 +3822,6 @@ static struct target_type cache_target = {
.status = cache_status,
.message = cache_message,
.iterate_devices = cache_iterate_devices,
- .merge = cache_bvec_merge,
.io_hints = cache_io_hints,
};
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 0f48fed44a17..d60c88df5234 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1076,7 +1076,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
if (io->ctx.req)
crypt_free_req(cc, io->ctx.req, base_bio);
- bio_endio(base_bio, error);
+ base_bio->bi_error = error;
+ bio_endio(base_bio);
}
/*
@@ -1096,14 +1097,12 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
* The work is done per CPU global for all dm-crypt instances.
* They should not depend on each other and do not block.
*/
-static void crypt_endio(struct bio *clone, int error)
+static void crypt_endio(struct bio *clone)
{
struct dm_crypt_io *io = clone->bi_private;
struct crypt_config *cc = io->cc;
unsigned rw = bio_data_dir(clone);
-
- if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
- error = -EIO;
+ int error;
/*
* free the processed pages
@@ -1111,6 +1110,7 @@ static void crypt_endio(struct bio *clone, int error)
if (rw == WRITE)
crypt_free_buffer_pages(cc, clone);
+ error = clone->bi_error;
bio_put(clone);
if (rw == READ && !error) {
@@ -1811,11 +1811,13 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
cc->iv_offset = tmpll;
- if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
+ ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
+ if (ret) {
ti->error = "Device lookup failed";
goto bad;
}
+ ret = -EINVAL;
if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
ti->error = "Invalid device sector";
goto bad;
@@ -2035,21 +2037,6 @@ error:
return -EINVAL;
}
-static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct crypt_config *cc = ti->private;
- struct request_queue *q = bdev_get_queue(cc->dev->bdev);
-
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = cc->dev->bdev;
- bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
-
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
static int crypt_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
@@ -2070,7 +2057,6 @@ static struct target_type crypt_target = {
.preresume = crypt_preresume,
.resume = crypt_resume,
.message = crypt_message,
- .merge = crypt_merge,
.iterate_devices = crypt_iterate_devices,
};
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 57b6a1901c91..b34f6e27293d 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -129,6 +129,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
struct delay_c *dc;
unsigned long long tmpll;
char dummy;
+ int ret;
if (argc != 3 && argc != 6) {
ti->error = "requires exactly 3 or 6 arguments";
@@ -143,6 +144,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
dc->reads = dc->writes = 0;
+ ret = -EINVAL;
if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) {
ti->error = "Invalid device sector";
goto bad;
@@ -154,12 +156,14 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
- &dc->dev_read)) {
+ ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
+ &dc->dev_read);
+ if (ret) {
ti->error = "Device lookup failed";
goto bad;
}
+ ret = -EINVAL;
dc->dev_write = NULL;
if (argc == 3)
goto out;
@@ -175,13 +179,15 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_dev_read;
}
- if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
- &dc->dev_write)) {
+ ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
+ &dc->dev_write);
+ if (ret) {
ti->error = "Write device lookup failed";
goto bad_dev_read;
}
out:
+ ret = -EINVAL;
dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
if (!dc->kdelayd_wq) {
DMERR("Couldn't start kdelayd");
@@ -208,7 +214,7 @@ bad_dev_read:
dm_put_device(ti, dc->dev_read);
bad:
kfree(dc);
- return -EINVAL;
+ return ret;
}
static void delay_dtr(struct dm_target *ti)
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index ad913cd4aded..0119ebfb3d49 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1673,20 +1673,6 @@ static int era_iterate_devices(struct dm_target *ti,
return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data);
}
-static int era_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct era *era = ti->private;
- struct request_queue *q = bdev_get_queue(era->origin_dev->bdev);
-
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = era->origin_dev->bdev;
-
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
static void era_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct era *era = ti->private;
@@ -1717,7 +1703,6 @@ static struct target_type era_target = {
.status = era_status,
.message = era_message,
.iterate_devices = era_iterate_devices,
- .merge = era_merge,
.io_hints = era_io_hints
};
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index b257e46876d3..645e8b4f808e 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -183,6 +183,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
devname = dm_shift_arg(&as);
+ r = -EINVAL;
if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) {
ti->error = "Invalid device sector";
goto bad;
@@ -211,7 +212,8 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
goto bad;
- if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev)) {
+ r = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev);
+ if (r) {
ti->error = "Device lookup failed";
goto bad;
}
@@ -224,7 +226,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bad:
kfree(fc);
- return -EINVAL;
+ return r;
}
static void flakey_dtr(struct dm_target *ti)
@@ -296,7 +298,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
* Drop writes?
*/
if (test_bit(DROP_WRITES, &fc->flags)) {
- bio_endio(bio, 0);
+ bio_endio(bio);
return DM_MAPIO_SUBMITTED;
}
@@ -387,21 +389,6 @@ static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long ar
return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
}
-static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct flakey_c *fc = ti->private;
- struct request_queue *q = bdev_get_queue(fc->dev->bdev);
-
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = fc->dev->bdev;
- bvm->bi_sector = flakey_map_sector(ti, bvm->bi_sector);
-
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
{
struct flakey_c *fc = ti->private;
@@ -419,7 +406,6 @@ static struct target_type flakey_target = {
.end_io = flakey_end_io,
.status = flakey_status,
.ioctl = flakey_ioctl,
- .merge = flakey_merge,
.iterate_devices = flakey_iterate_devices,
};
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 74adcd2c967e..6f8e83b2a6f8 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -134,12 +134,13 @@ static void dec_count(struct io *io, unsigned int region, int error)
complete_io(io);
}
-static void endio(struct bio *bio, int error)
+static void endio(struct bio *bio)
{
struct io *io;
unsigned region;
+ int error;
- if (error && bio_data_dir(bio) == READ)
+ if (bio->bi_error && bio_data_dir(bio) == READ)
zero_fill_bio(bio);
/*
@@ -147,6 +148,7 @@ static void endio(struct bio *bio, int error)
*/
retrieve_io_and_region_from_bio(bio, &io, &region);
+ error = bio->bi_error;
bio_put(bio);
dec_count(io, region, error);
@@ -314,7 +316,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
num_bvecs = 1;
else
- num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
+ num_bvecs = min_t(int, BIO_MAX_PAGES,
dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 720ceeb7fa9b..80a439543259 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1919,9 +1919,7 @@ int __init dm_interface_init(void)
void dm_interface_exit(void)
{
- if (misc_deregister(&_dm_misc) < 0)
- DMERR("misc_deregister failed for control device");
-
+ misc_deregister(&_dm_misc);
dm_hash_exit();
}
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 53e848c10939..436f5c9b6aea 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -30,6 +30,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
struct linear_c *lc;
unsigned long long tmp;
char dummy;
+ int ret;
if (argc != 2) {
ti->error = "Invalid argument count";
@@ -42,13 +43,15 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -ENOMEM;
}
+ ret = -EINVAL;
if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) {
ti->error = "dm-linear: Invalid device sector";
goto bad;
}
lc->start = tmp;
- if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev)) {
+ ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
+ if (ret) {
ti->error = "dm-linear: Device lookup failed";
goto bad;
}
@@ -61,7 +64,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bad:
kfree(lc);
- return -EINVAL;
+ return ret;
}
static void linear_dtr(struct dm_target *ti)
@@ -130,21 +133,6 @@ static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
}
-static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct linear_c *lc = ti->private;
- struct request_queue *q = bdev_get_queue(lc->dev->bdev);
-
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = lc->dev->bdev;
- bvm->bi_sector = linear_map_sector(ti, bvm->bi_sector);
-
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
static int linear_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
@@ -162,7 +150,6 @@ static struct target_type linear_target = {
.map = linear_map,
.status = linear_status,
.ioctl = linear_ioctl,
- .merge = linear_merge,
.iterate_devices = linear_iterate_devices,
};
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index ad1b049ae2ab..b2912dbac8bc 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -146,16 +146,16 @@ static void put_io_block(struct log_writes_c *lc)
}
}
-static void log_end_io(struct bio *bio, int err)
+static void log_end_io(struct bio *bio)
{
struct log_writes_c *lc = bio->bi_private;
struct bio_vec *bvec;
int i;
- if (err) {
+ if (bio->bi_error) {
unsigned long flags;
- DMERR("Error writing log block, error=%d", err);
+ DMERR("Error writing log block, error=%d", bio->bi_error);
spin_lock_irqsave(&lc->blocks_lock, flags);
lc->logging_enabled = false;
spin_unlock_irqrestore(&lc->blocks_lock, flags);
@@ -205,7 +205,6 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
bio->bi_bdev = lc->logdev->bdev;
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
- set_bit(BIO_UPTODATE, &bio->bi_flags);
page = alloc_page(GFP_KERNEL);
if (!page) {
@@ -270,7 +269,6 @@ static int log_one_block(struct log_writes_c *lc,
bio->bi_bdev = lc->logdev->bdev;
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
- set_bit(BIO_UPTODATE, &bio->bi_flags);
for (i = 0; i < block->vec_cnt; i++) {
/*
@@ -292,7 +290,6 @@ static int log_one_block(struct log_writes_c *lc,
bio->bi_bdev = lc->logdev->bdev;
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
- set_bit(BIO_UPTODATE, &bio->bi_flags);
ret = bio_add_page(bio, block->vecs[i].bv_page,
block->vecs[i].bv_len, 0);
@@ -420,6 +417,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
struct log_writes_c *lc;
struct dm_arg_set as;
const char *devname, *logdevname;
+ int ret;
as.argc = argc;
as.argv = argv;
@@ -443,18 +441,22 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
atomic_set(&lc->pending_blocks, 0);
devname = dm_shift_arg(&as);
- if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev)) {
+ ret = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev);
+ if (ret) {
ti->error = "Device lookup failed";
goto bad;
}
logdevname = dm_shift_arg(&as);
- if (dm_get_device(ti, logdevname, dm_table_get_mode(ti->table), &lc->logdev)) {
+ ret = dm_get_device(ti, logdevname, dm_table_get_mode(ti->table),
+ &lc->logdev);
+ if (ret) {
ti->error = "Log device lookup failed";
dm_put_device(ti, lc->dev);
goto bad;
}
+ ret = -EINVAL;
lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
if (!lc->log_kthread) {
ti->error = "Couldn't alloc kthread";
@@ -479,7 +481,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bad:
kfree(lc);
- return -EINVAL;
+ return ret;
}
static int log_mark(struct log_writes_c *lc, char *data)
@@ -606,7 +608,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
WARN_ON(flush_bio || fua_bio);
if (lc->device_supports_discard)
goto map_bio;
- bio_endio(bio, 0);
+ bio_endio(bio);
return DM_MAPIO_SUBMITTED;
}
@@ -728,21 +730,6 @@ static int log_writes_ioctl(struct dm_target *ti, unsigned int cmd,
return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
}
-static int log_writes_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct log_writes_c *lc = ti->private;
- struct request_queue *q = bdev_get_queue(lc->dev->bdev);
-
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = lc->dev->bdev;
- bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector);
-
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
static int log_writes_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn,
void *data)
@@ -796,7 +783,6 @@ static struct target_type log_writes_target = {
.end_io = normal_end_io,
.status = log_writes_status,
.ioctl = log_writes_ioctl,
- .merge = log_writes_merge,
.message = log_writes_message,
.iterate_devices = log_writes_iterate_devices,
.io_hints = log_writes_io_hints,
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index eff7bdd7731d..5a67671a3973 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -159,12 +159,9 @@ static struct priority_group *alloc_priority_group(void)
static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
{
struct pgpath *pgpath, *tmp;
- struct multipath *m = ti->private;
list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
list_del(&pgpath->list);
- if (m->hw_handler_name)
- scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
dm_put_device(ti, pgpath->path.dev);
free_pgpath(pgpath);
}
@@ -580,6 +577,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
q = bdev_get_queue(p->path.dev->bdev);
if (m->retain_attached_hw_handler) {
+retain:
attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
if (attached_handler_name) {
/*
@@ -599,20 +597,14 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
}
if (m->hw_handler_name) {
- /*
- * Increments scsi_dh reference, even when using an
- * already-attached handler.
- */
r = scsi_dh_attach(q, m->hw_handler_name);
if (r == -EBUSY) {
- /*
- * Already attached to different hw_handler:
- * try to reattach with correct one.
- */
- scsi_dh_detach(q);
- r = scsi_dh_attach(q, m->hw_handler_name);
- }
+ char b[BDEVNAME_SIZE];
+ printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
+ bdevname(p->path.dev->bdev, b));
+ goto retain;
+ }
if (r < 0) {
ti->error = "error attaching hardware handler";
dm_put_device(ti, p->path.dev);
@@ -624,7 +616,6 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
if (r < 0) {
ti->error = "unable to set hardware "
"handler parameters";
- scsi_dh_detach(q);
dm_put_device(ti, p->path.dev);
goto bad;
}
@@ -734,12 +725,6 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
return 0;
m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
- if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
- "scsi_dh_%s", m->hw_handler_name)) {
- ti->error = "unknown hardware handler type";
- ret = -EINVAL;
- goto fail;
- }
if (hw_argc > 1) {
char *p;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 2daa67793511..97e165183e79 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1717,24 +1717,6 @@ static void raid_resume(struct dm_target *ti)
mddev_resume(&rs->md);
}
-static int raid_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct raid_set *rs = ti->private;
- struct md_personality *pers = rs->md.pers;
-
- if (pers && pers->mergeable_bvec)
- return min(max_size, pers->mergeable_bvec(&rs->md, bvm, biovec));
-
- /*
- * In case we can't request the personality because
- * the raid set is not running yet
- *
- * -> return safe minimum
- */
- return rs->md.chunk_sectors;
-}
-
static struct target_type raid_target = {
.name = "raid",
.version = {1, 7, 0},
@@ -1749,7 +1731,6 @@ static struct target_type raid_target = {
.presuspend = raid_presuspend,
.postsuspend = raid_postsuspend,
.resume = raid_resume,
- .merge = raid_merge,
};
static int __init dm_raid_init(void)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index d83696bf403b..f2a363a89629 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -490,9 +490,11 @@ static void hold_bio(struct mirror_set *ms, struct bio *bio)
* If device is suspended, complete the bio.
*/
if (dm_noflush_suspending(ms->ti))
- bio_endio(bio, DM_ENDIO_REQUEUE);
+ bio->bi_error = DM_ENDIO_REQUEUE;
else
- bio_endio(bio, -EIO);
+ bio->bi_error = -EIO;
+
+ bio_endio(bio);
return;
}
@@ -515,7 +517,7 @@ static void read_callback(unsigned long error, void *context)
bio_set_m(bio, NULL);
if (likely(!error)) {
- bio_endio(bio, 0);
+ bio_endio(bio);
return;
}
@@ -531,7 +533,7 @@ static void read_callback(unsigned long error, void *context)
DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
m->dev->name);
- bio_endio(bio, -EIO);
+ bio_io_error(bio);
}
/* Asynchronous read. */
@@ -580,7 +582,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
if (likely(m))
read_async_bio(m, bio);
else
- bio_endio(bio, -EIO);
+ bio_io_error(bio);
}
}
@@ -598,7 +600,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
static void write_callback(unsigned long error, void *context)
{
- unsigned i, ret = 0;
+ unsigned i;
struct bio *bio = (struct bio *) context;
struct mirror_set *ms;
int should_wake = 0;
@@ -614,7 +616,7 @@ static void write_callback(unsigned long error, void *context)
* regions with the same code.
*/
if (likely(!error)) {
- bio_endio(bio, ret);
+ bio_endio(bio);
return;
}
@@ -623,7 +625,8 @@ static void write_callback(unsigned long error, void *context)
* degrade the array.
*/
if (bio->bi_rw & REQ_DISCARD) {
- bio_endio(bio, -EOPNOTSUPP);
+ bio->bi_error = -EOPNOTSUPP;
+ bio_endio(bio);
return;
}
@@ -828,13 +831,12 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures)
* be wrong if the failed leg returned after reboot and
* got replicated back to the good legs.)
*/
-
if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure)))
- bio_endio(bio, -EIO);
+ bio_io_error(bio);
else if (errors_handled(ms) && !keep_log(ms))
hold_bio(ms, bio);
else
- bio_endio(bio, 0);
+ bio_endio(bio);
}
}
@@ -943,16 +945,18 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
{
unsigned long long offset;
char dummy;
+ int ret;
if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) {
ti->error = "Invalid offset";
return -EINVAL;
}
- if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
- &ms->mirror[mirror].dev)) {
+ ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
+ &ms->mirror[mirror].dev);
+ if (ret) {
ti->error = "Device lookup failure";
- return -ENXIO;
+ return ret;
}
ms->mirror[mirror].ms = ms;
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 808b8419bc48..bf71583296f7 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -533,7 +533,7 @@ static int read_exceptions(struct pstore *ps,
chunk = area_location(ps, ps->current_area);
area = dm_bufio_read(client, chunk, &bp);
- if (unlikely(IS_ERR(area))) {
+ if (IS_ERR(area)) {
r = PTR_ERR(area);
goto ret_destroy_bufio;
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 7c82d3ccce87..c0bcd6516dfe 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -63,6 +63,13 @@ struct dm_snapshot {
*/
int valid;
+ /*
+ * The snapshot overflowed because of a write to the snapshot device.
+ * We don't have to invalidate the snapshot in this case, but we need
+ * to prevent further writes.
+ */
+ int snapshot_overflowed;
+
/* Origin writes don't trigger exceptions until this is set */
int active;
@@ -1152,6 +1159,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->ti = ti;
s->valid = 1;
+ s->snapshot_overflowed = 0;
s->active = 0;
atomic_set(&s->pending_exceptions_count, 0);
s->exception_start_sequence = 0;
@@ -1301,6 +1309,7 @@ static void __handover_exceptions(struct dm_snapshot *snap_src,
snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
snap_dest->valid = snap_src->valid;
+ snap_dest->snapshot_overflowed = snap_src->snapshot_overflowed;
/*
* Set source invalid to ensure it receives no further I/O.
@@ -1490,7 +1499,7 @@ out:
error_bios(snapshot_bios);
} else {
if (full_bio)
- bio_endio(full_bio, 0);
+ bio_endio(full_bio);
flush_bios(snapshot_bios);
}
@@ -1580,11 +1589,11 @@ static void start_copy(struct dm_snap_pending_exception *pe)
dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
}
-static void full_bio_end_io(struct bio *bio, int error)
+static void full_bio_end_io(struct bio *bio)
{
void *callback_data = bio->bi_private;
- dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0);
+ dm_kcopyd_do_callback(callback_data, 0, bio->bi_error ? 1 : 0);
}
static void start_full_bio(struct dm_snap_pending_exception *pe,
@@ -1691,7 +1700,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
* to copy an exception */
down_write(&s->lock);
- if (!s->valid) {
+ if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_rw(bio) == WRITE)) {
r = -EIO;
goto out_unlock;
}
@@ -1715,7 +1724,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
pe = alloc_pending_exception(s);
down_write(&s->lock);
- if (!s->valid) {
+ if (!s->valid || s->snapshot_overflowed) {
free_pending_exception(pe);
r = -EIO;
goto out_unlock;
@@ -1730,7 +1739,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
pe = __find_pending_exception(s, pe, chunk);
if (!pe) {
- __invalidate_snapshot(s, -ENOMEM);
+ s->snapshot_overflowed = 1;
+ DMERR("Snapshot overflowed: Unable to allocate exception.");
r = -EIO;
goto out_unlock;
}
@@ -1990,6 +2000,8 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
DMEMIT("Invalid");
else if (snap->merge_failed)
DMEMIT("Merge failed");
+ else if (snap->snapshot_overflowed)
+ DMEMIT("Overflow");
else {
if (snap->store->type->usage) {
sector_t total_sectors, sectors_allocated,
@@ -2330,20 +2342,6 @@ static void origin_status(struct dm_target *ti, status_type_t type,
}
}
-static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct dm_origin *o = ti->private;
- struct request_queue *q = bdev_get_queue(o->dev->bdev);
-
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = o->dev->bdev;
-
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
static int origin_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
@@ -2362,13 +2360,12 @@ static struct target_type origin_target = {
.resume = origin_resume,
.postsuspend = origin_postsuspend,
.status = origin_status,
- .merge = origin_merge,
.iterate_devices = origin_iterate_devices,
};
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 13, 0},
+ .version = {1, 14, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 8a8b48fa901a..8289804ccd99 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -457,12 +457,24 @@ static int dm_stats_list(struct dm_stats *stats, const char *program,
list_for_each_entry(s, &stats->list, list_entry) {
if (!program || !strcmp(program, s->program_id)) {
len = s->end - s->start;
- DMEMIT("%d: %llu+%llu %llu %s %s\n", s->id,
+ DMEMIT("%d: %llu+%llu %llu %s %s", s->id,
(unsigned long long)s->start,
(unsigned long long)len,
(unsigned long long)s->step,
s->program_id,
s->aux_data);
+ if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
+ DMEMIT(" precise_timestamps");
+ if (s->n_histogram_entries) {
+ unsigned i;
+ DMEMIT(" histogram:");
+ for (i = 0; i < s->n_histogram_entries; i++) {
+ if (i)
+ DMEMIT(",");
+ DMEMIT("%llu", s->histogram_boundaries[i]);
+ }
+ }
+ DMEMIT("\n");
}
}
mutex_unlock(&stats->mutex);
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index a672a1502c14..797ddb900b06 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -75,13 +75,15 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
{
unsigned long long start;
char dummy;
+ int ret;
if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1)
return -EINVAL;
- if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
- &sc->stripe[stripe].dev))
- return -ENXIO;
+ ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
+ &sc->stripe[stripe].dev);
+ if (ret)
+ return ret;
sc->stripe[stripe].physical_start = start;
@@ -273,7 +275,7 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
return DM_MAPIO_REMAPPED;
} else {
/* The range doesn't map to the target stripe */
- bio_endio(bio, 0);
+ bio_endio(bio);
return DM_MAPIO_SUBMITTED;
}
}
@@ -412,26 +414,6 @@ static void stripe_io_hints(struct dm_target *ti,
blk_limits_io_opt(limits, chunk_size * sc->stripes);
}
-static int stripe_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct stripe_c *sc = ti->private;
- sector_t bvm_sector = bvm->bi_sector;
- uint32_t stripe;
- struct request_queue *q;
-
- stripe_map_sector(sc, bvm_sector, &stripe, &bvm_sector);
-
- q = bdev_get_queue(sc->stripe[stripe].dev->bdev);
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = sc->stripe[stripe].dev->bdev;
- bvm->bi_sector = sc->stripe[stripe].physical_start + bvm_sector;
-
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
static struct target_type stripe_target = {
.name = "striped",
.version = {1, 5, 1},
@@ -443,7 +425,6 @@ static struct target_type stripe_target = {
.status = stripe_status,
.iterate_devices = stripe_iterate_devices,
.io_hints = stripe_io_hints,
- .merge = stripe_merge,
};
int __init dm_stripe_init(void)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 16ba55ad7089..e76ed003769e 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -440,14 +440,6 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
q->limits.alignment_offset,
(unsigned long long) start << SECTOR_SHIFT);
- /*
- * Check if merge fn is supported.
- * If not we'll force DM to use PAGE_SIZE or
- * smaller I/O, just to be safe.
- */
- if (dm_queue_merge_is_compulsory(q) && !ti->type->merge)
- blk_limits_max_hw_sectors(limits,
- (unsigned int) (PAGE_SIZE >> 9));
return 0;
}
@@ -1388,14 +1380,6 @@ static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
}
-static int queue_supports_sg_gaps(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
-{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return q && !test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags);
-}
-
static bool dm_table_all_devices_attribute(struct dm_table *t,
iterate_devices_callout_fn func)
{
@@ -1516,11 +1500,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else
queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
- if (dm_table_all_devices_attribute(t, queue_supports_sg_gaps))
- queue_flag_clear_unlocked(QUEUE_FLAG_SG_GAPS, q);
- else
- queue_flag_set_unlocked(QUEUE_FLAG_SG_GAPS, q);
-
dm_table_set_integrity(t);
/*
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 48dfe3c4d6aa..6ba47cfb1443 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1293,8 +1293,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd)
return r;
disk_super = dm_block_data(copy);
- dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
- dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
+ dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
+ dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
dm_sm_dec_block(pmd->metadata_sm, held_root);
return dm_tm_unlock(pmd->tm, copy);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index c33f61a4cc28..6578b7bc1fbb 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/sort.h>
#include <linux/rbtree.h>
@@ -268,7 +269,7 @@ struct pool {
process_mapping_fn process_prepared_mapping;
process_mapping_fn process_prepared_discard;
- struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE];
+ struct dm_bio_prison_cell **cell_sort_array;
};
static enum pool_mode get_pool_mode(struct pool *pool);
@@ -331,9 +332,6 @@ struct thin_c {
*
* Description:
* Asynchronously issue a discard request for the sectors in question.
- * NOTE: this variant of blk-core's blkdev_issue_discard() is a stop-gap
- * that is being kept local to DM thinp until the block changes to allow
- * late bio splitting land upstream.
*/
static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags,
@@ -341,91 +339,36 @@ static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sect
{
struct request_queue *q = bdev_get_queue(bdev);
int type = REQ_WRITE | REQ_DISCARD;
- unsigned int max_discard_sectors, granularity;
- int alignment;
struct bio *bio;
- int ret = 0;
- struct blk_plug plug;
- if (!q)
+ if (!q || !nr_sects)
return -ENXIO;
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
- /* Zero-sector (unknown) and one-sector granularities are the same. */
- granularity = max(q->limits.discard_granularity >> 9, 1U);
- alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
-
- /*
- * Ensure that max_discard_sectors is of the proper
- * granularity, so that requests stay aligned after a split.
- */
- max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
- max_discard_sectors -= max_discard_sectors % granularity;
- if (unlikely(!max_discard_sectors)) {
- /* Avoid infinite loop below. Being cautious never hurts. */
- return -EOPNOTSUPP;
- }
-
if (flags & BLKDEV_DISCARD_SECURE) {
if (!blk_queue_secdiscard(q))
return -EOPNOTSUPP;
type |= REQ_SECURE;
}
- blk_start_plug(&plug);
- while (nr_sects) {
- unsigned int req_sects;
- sector_t end_sect, tmp;
-
- /*
- * Required bio_put occurs in bio_endio thanks to bio_chain below
- */
- bio = bio_alloc(gfp_mask, 1);
- if (!bio) {
- ret = -ENOMEM;
- break;
- }
-
- req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
-
- /*
- * If splitting a request, and the next starting sector would be
- * misaligned, stop the discard at the previous aligned sector.
- */
- end_sect = sector + req_sects;
- tmp = end_sect;
- if (req_sects < nr_sects &&
- sector_div(tmp, granularity) != alignment) {
- end_sect = end_sect - alignment;
- sector_div(end_sect, granularity);
- end_sect = end_sect * granularity + alignment;
- req_sects = end_sect - sector;
- }
-
- bio_chain(bio, parent_bio);
-
- bio->bi_iter.bi_sector = sector;
- bio->bi_bdev = bdev;
+ /*
+ * Required bio_put occurs in bio_endio thanks to bio_chain below
+ */
+ bio = bio_alloc(gfp_mask, 1);
+ if (!bio)
+ return -ENOMEM;
- bio->bi_iter.bi_size = req_sects << 9;
- nr_sects -= req_sects;
- sector = end_sect;
+ bio_chain(bio, parent_bio);
- submit_bio(type, bio);
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_bdev = bdev;
+ bio->bi_iter.bi_size = nr_sects << 9;
- /*
- * We can loop for a long time in here, if someone does
- * full device discards (like mkfs). Be nice and allow
- * us to schedule out to avoid softlocking if preempt
- * is disabled.
- */
- cond_resched();
- }
- blk_finish_plug(&plug);
+ submit_bio(type, bio);
- return ret;
+ return 0;
}
static bool block_size_is_power_of_two(struct pool *pool)
@@ -614,8 +557,10 @@ static void error_bio_list(struct bio_list *bios, int error)
{
struct bio *bio;
- while ((bio = bio_list_pop(bios)))
- bio_endio(bio, error);
+ while ((bio = bio_list_pop(bios))) {
+ bio->bi_error = error;
+ bio_endio(bio);
+ }
}
static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error)
@@ -665,16 +610,21 @@ static void requeue_io(struct thin_c *tc)
requeue_deferred_cells(tc);
}
-static void error_retry_list(struct pool *pool)
+static void error_retry_list_with_code(struct pool *pool, int error)
{
struct thin_c *tc;
rcu_read_lock();
list_for_each_entry_rcu(tc, &pool->active_thins, list)
- error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO);
+ error_thin_bio_list(tc, &tc->retry_on_resume_list, error);
rcu_read_unlock();
}
+static void error_retry_list(struct pool *pool)
+{
+ return error_retry_list_with_code(pool, -EIO);
+}
+
/*
* This section of code contains the logic for processing a thin device's IO.
* Much of the code depends on pool object resources (lists, workqueues, etc)
@@ -864,14 +814,14 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
complete_mapping_preparation(m);
}
-static void overwrite_endio(struct bio *bio, int err)
+static void overwrite_endio(struct bio *bio)
{
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct dm_thin_new_mapping *m = h->overwrite_mapping;
bio->bi_end_io = m->saved_bi_end_io;
- m->err = err;
+ m->err = bio->bi_error;
complete_mapping_preparation(m);
}
@@ -996,7 +946,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
*/
if (bio) {
inc_remap_and_issue_cell(tc, m->cell, m->data_block);
- bio_endio(bio, 0);
+ bio_endio(bio);
} else {
inc_all_io_entry(tc->pool, m->cell->holder);
remap_and_issue(tc, m->cell->holder, m->data_block);
@@ -1026,7 +976,7 @@ static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
static void process_prepared_discard_success(struct dm_thin_new_mapping *m)
{
- bio_endio(m->bio, 0);
+ bio_endio(m->bio);
free_discard_mapping(m);
}
@@ -1040,7 +990,7 @@ static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m)
metadata_operation_failed(tc->pool, "dm_thin_remove_range", r);
bio_io_error(m->bio);
} else
- bio_endio(m->bio, 0);
+ bio_endio(m->bio);
cell_defer_no_holder(tc, m->cell);
mempool_free(m, tc->pool->mapping_pool);
@@ -1111,7 +1061,8 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
* Even if r is set, there could be sub discards in flight that we
* need to wait for.
*/
- bio_endio(m->bio, r);
+ m->bio->bi_error = r;
+ bio_endio(m->bio);
cell_defer_no_holder(tc, m->cell);
mempool_free(m, pool->mapping_pool);
}
@@ -1487,9 +1438,10 @@ static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
{
int error = should_error_unserviceable_bio(pool);
- if (error)
- bio_endio(bio, error);
- else
+ if (error) {
+ bio->bi_error = error;
+ bio_endio(bio);
+ } else
retry_on_resume(bio);
}
@@ -1533,9 +1485,8 @@ static void process_discard_cell_no_passdown(struct thin_c *tc,
}
/*
- * FIXME: DM local hack to defer parent bios's end_io until we
- * _know_ all chained sub range discard bios have completed.
- * Will go away once late bio splitting lands upstream!
+ * __bio_inc_remaining() is used to defer parent bios's end_io until
+ * we _know_ all chained sub range discard bios have completed.
*/
static inline void __bio_inc_remaining(struct bio *bio)
{
@@ -1625,7 +1576,7 @@ static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_priso
* will prevent completion until the sub range discards have
* completed.
*/
- bio_endio(bio, 0);
+ bio_endio(bio);
}
static void process_discard_bio(struct thin_c *tc, struct bio *bio)
@@ -1639,7 +1590,7 @@ static void process_discard_bio(struct thin_c *tc, struct bio *bio)
/*
* The discard covers less than a block.
*/
- bio_endio(bio, 0);
+ bio_endio(bio);
return;
}
@@ -1784,7 +1735,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
if (bio_data_dir(bio) == READ) {
zero_fill_bio(bio);
cell_defer_no_holder(tc, cell);
- bio_endio(bio, 0);
+ bio_endio(bio);
return;
}
@@ -1849,7 +1800,7 @@ static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
} else {
zero_fill_bio(bio);
- bio_endio(bio, 0);
+ bio_endio(bio);
}
} else
provision_block(tc, bio, block, cell);
@@ -1920,7 +1871,7 @@ static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
}
zero_fill_bio(bio);
- bio_endio(bio, 0);
+ bio_endio(bio);
break;
default:
@@ -1945,7 +1896,7 @@ static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell
static void process_bio_success(struct thin_c *tc, struct bio *bio)
{
- bio_endio(bio, 0);
+ bio_endio(bio);
}
static void process_bio_fail(struct thin_c *tc, struct bio *bio)
@@ -2281,18 +2232,23 @@ static void do_waker(struct work_struct *ws)
queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
}
+static void notify_of_pool_mode_change_to_oods(struct pool *pool);
+
/*
* We're holding onto IO to allow userland time to react. After the
* timeout either the pool will have been resized (and thus back in
- * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
+ * PM_WRITE mode), or we degrade to PM_OUT_OF_DATA_SPACE w/ error_if_no_space.
*/
static void do_no_space_timeout(struct work_struct *ws)
{
struct pool *pool = container_of(to_delayed_work(ws), struct pool,
no_space_timeout);
- if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
- set_pool_mode(pool, PM_READ_ONLY);
+ if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
+ pool->pf.error_if_no_space = true;
+ notify_of_pool_mode_change_to_oods(pool);
+ error_retry_list_with_code(pool, -ENOSPC);
+ }
}
/*----------------------------------------------------------------*/
@@ -2370,6 +2326,14 @@ static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
dm_device_name(pool->pool_md), new_mode);
}
+static void notify_of_pool_mode_change_to_oods(struct pool *pool)
+{
+ if (!pool->pf.error_if_no_space)
+ notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)");
+ else
+ notify_of_pool_mode_change(pool, "out-of-data-space (error IO)");
+}
+
static bool passdown_enabled(struct pool_c *pt)
{
return pt->adjusted_pf.discard_passdown;
@@ -2454,7 +2418,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
* frequently seeing this mode.
*/
if (old_mode != new_mode)
- notify_of_pool_mode_change(pool, "out-of-data-space");
+ notify_of_pool_mode_change_to_oods(pool);
pool->process_bio = process_bio_read_only;
pool->process_discard = process_discard_bio;
pool->process_cell = process_cell_read_only;
@@ -2581,7 +2545,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
thin_hook_bio(tc, bio);
if (tc->requeue_mode) {
- bio_endio(bio, DM_ENDIO_REQUEUE);
+ bio->bi_error = DM_ENDIO_REQUEUE;
+ bio_endio(bio);
return DM_MAPIO_SUBMITTED;
}
@@ -2777,6 +2742,7 @@ static void __pool_destroy(struct pool *pool)
{
__pool_table_remove(pool);
+ vfree(pool->cell_sort_array);
if (dm_pool_metadata_close(pool->pmd) < 0)
DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
@@ -2889,6 +2855,13 @@ static struct pool *pool_create(struct mapped_device *pool_md,
goto bad_mapping_pool;
}
+ pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE);
+ if (!pool->cell_sort_array) {
+ *error = "Error allocating cell sort array";
+ err_p = ERR_PTR(-ENOMEM);
+ goto bad_sort_array;
+ }
+
pool->ref_count = 1;
pool->last_commit_jiffies = jiffies;
pool->pool_md = pool_md;
@@ -2897,6 +2870,8 @@ static struct pool *pool_create(struct mapped_device *pool_md,
return pool;
+bad_sort_array:
+ mempool_destroy(pool->mapping_pool);
bad_mapping_pool:
dm_deferred_set_destroy(pool->all_io_ds);
bad_all_io_ds:
@@ -3714,6 +3689,7 @@ static void emit_flags(struct pool_features *pf, char *result,
* Status line is:
* <transaction id> <used metadata sectors>/<total metadata sectors>
* <used data sectors>/<total data sectors> <held metadata root>
+ * <pool mode> <discard config> <no space config> <needs_check>
*/
static void pool_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
@@ -3815,6 +3791,11 @@ static void pool_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("queue_if_no_space ");
+ if (dm_pool_metadata_needs_check(pool->pmd))
+ DMEMIT("needs_check ");
+ else
+ DMEMIT("- ");
+
break;
case STATUSTYPE_TABLE:
@@ -3840,20 +3821,6 @@ static int pool_iterate_devices(struct dm_target *ti,
return fn(ti, pt->data_dev, 0, ti->len, data);
}
-static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct pool_c *pt = ti->private;
- struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
-
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = pt->data_dev->bdev;
-
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct pool_c *pt = ti->private;
@@ -3918,7 +3885,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 15, 0},
+ .version = {1, 16, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -3930,7 +3897,6 @@ static struct target_type pool_target = {
.resume = pool_resume,
.message = pool_message,
.status = pool_status,
- .merge = pool_merge,
.iterate_devices = pool_iterate_devices,
.io_hints = pool_io_hints,
};
@@ -4257,21 +4223,6 @@ err:
DMEMIT("Error");
}
-static int thin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct thin_c *tc = ti->private;
- struct request_queue *q = bdev_get_queue(tc->pool_dev->bdev);
-
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = tc->pool_dev->bdev;
- bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector);
-
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
static int thin_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
@@ -4305,7 +4256,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 15, 0},
+ .version = {1, 16, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
@@ -4315,7 +4266,6 @@ static struct target_type thin_target = {
.presuspend = thin_presuspend,
.postsuspend = thin_postsuspend,
.status = thin_status,
- .merge = thin_merge,
.iterate_devices = thin_iterate_devices,
.io_hints = thin_io_hints,
};
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index bb9c6a00e4b0..edc624bccf9a 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -26,8 +26,6 @@
#define DM_VERITY_ENV_LENGTH 42
#define DM_VERITY_ENV_VAR_NAME "DM_VERITY_ERR_BLOCK_NR"
-#define DM_VERITY_IO_VEC_INLINE 16
-#define DM_VERITY_MEMPOOL_SIZE 4
#define DM_VERITY_DEFAULT_PREFETCH_SIZE 262144
#define DM_VERITY_MAX_LEVELS 63
@@ -76,8 +74,6 @@ struct dm_verity {
enum verity_mode mode; /* mode for handling verification errors */
unsigned corrupted_errs;/* Number of errors for corrupted blocks */
- mempool_t *vec_mempool; /* mempool of bio vector */
-
struct workqueue_struct *verify_wq;
/* starting blocks for each tree level. 0 is the lowest level. */
@@ -271,7 +267,7 @@ static int verity_verify_level(struct dm_verity_io *io, sector_t block,
verity_hash_at_level(v, block, level, &hash_block, &offset);
data = dm_bufio_read(v->bufio, hash_block, &buf);
- if (unlikely(IS_ERR(data)))
+ if (IS_ERR(data))
return PTR_ERR(data);
aux = dm_bufio_get_aux_data(buf);
@@ -458,8 +454,9 @@ static void verity_finish_io(struct dm_verity_io *io, int error)
bio->bi_end_io = io->orig_bi_end_io;
bio->bi_private = io->orig_bi_private;
+ bio->bi_error = error;
- bio_endio(bio, error);
+ bio_endio(bio);
}
static void verity_work(struct work_struct *w)
@@ -469,12 +466,12 @@ static void verity_work(struct work_struct *w)
verity_finish_io(io, verity_verify_io(io));
}
-static void verity_end_io(struct bio *bio, int error)
+static void verity_end_io(struct bio *bio)
{
struct dm_verity_io *io = bio->bi_private;
- if (error) {
- verity_finish_io(io, error);
+ if (bio->bi_error) {
+ verity_finish_io(io, bio->bi_error);
return;
}
@@ -648,21 +645,6 @@ static int verity_ioctl(struct dm_target *ti, unsigned cmd,
cmd, arg);
}
-static int verity_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
- struct bio_vec *biovec, int max_size)
-{
- struct dm_verity *v = ti->private;
- struct request_queue *q = bdev_get_queue(v->data_dev->bdev);
-
- if (!q->merge_bvec_fn)
- return max_size;
-
- bvm->bi_bdev = v->data_dev->bdev;
- bvm->bi_sector = verity_map_sector(v, bvm->bi_sector);
-
- return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
static int verity_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
@@ -691,9 +673,6 @@ static void verity_dtr(struct dm_target *ti)
if (v->verify_wq)
destroy_workqueue(v->verify_wq);
- if (v->vec_mempool)
- mempool_destroy(v->vec_mempool);
-
if (v->bufio)
dm_bufio_client_destroy(v->bufio);
@@ -962,14 +941,6 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->per_bio_data_size = roundup(sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2, __alignof__(struct dm_verity_io));
- v->vec_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE,
- BIO_MAX_PAGES * sizeof(struct bio_vec));
- if (!v->vec_mempool) {
- ti->error = "Cannot allocate vector mempool";
- r = -ENOMEM;
- goto bad;
- }
-
/* WQ_UNBOUND greatly improves performance when running on ramdisk */
v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
if (!v->verify_wq) {
@@ -995,7 +966,6 @@ static struct target_type verity_target = {
.map = verity_map,
.status = verity_status,
.ioctl = verity_ioctl,
- .merge = verity_merge,
.iterate_devices = verity_iterate_devices,
.io_hints = verity_io_hints,
};
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index b9a64bbce304..766bc93006e6 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -47,7 +47,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio)
break;
}
- bio_endio(bio, 0);
+ bio_endio(bio);
/* accepted bio, don't make new request */
return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f331d888e7f5..6264781dc69a 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -124,9 +124,8 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
#define DMF_FREEING 3
#define DMF_DELETING 4
#define DMF_NOFLUSH_SUSPENDING 5
-#define DMF_MERGE_IS_OPTIONAL 6
-#define DMF_DEFERRED_REMOVE 7
-#define DMF_SUSPENDED_INTERNALLY 8
+#define DMF_DEFERRED_REMOVE 6
+#define DMF_SUSPENDED_INTERNALLY 7
/*
* A dummy definition to make RCU happy.
@@ -944,7 +943,8 @@ static void dec_pending(struct dm_io *io, int error)
} else {
/* done with normal IO or empty flush */
trace_block_bio_complete(md->queue, bio, io_error);
- bio_endio(bio, io_error);
+ bio->bi_error = io_error;
+ bio_endio(bio);
}
}
}
@@ -957,17 +957,15 @@ static void disable_write_same(struct mapped_device *md)
limits->max_write_same_sectors = 0;
}
-static void clone_endio(struct bio *bio, int error)
+static void clone_endio(struct bio *bio)
{
+ int error = bio->bi_error;
int r = error;
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
- if (!bio_flagged(bio, BIO_UPTODATE) && !error)
- error = -EIO;
-
if (endio) {
r = endio(tio->ti, bio, error);
if (r < 0 || r == DM_ENDIO_REQUEUE)
@@ -996,7 +994,7 @@ static void clone_endio(struct bio *bio, int error)
/*
* Partial completion handling for request-based dm
*/
-static void end_clone_bio(struct bio *clone, int error)
+static void end_clone_bio(struct bio *clone)
{
struct dm_rq_clone_bio_info *info =
container_of(clone, struct dm_rq_clone_bio_info, clone);
@@ -1013,13 +1011,13 @@ static void end_clone_bio(struct bio *clone, int error)
* the remainder.
*/
return;
- else if (error) {
+ else if (bio->bi_error) {
/*
* Don't notice the error to the upper layer yet.
* The error handling decision is made by the target driver,
* when the request is completed.
*/
- tio->error = error;
+ tio->error = bio->bi_error;
return;
}
@@ -1067,13 +1065,10 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
*/
static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
{
- int nr_requests_pending;
-
atomic_dec(&md->pending[rw]);
/* nudge anyone waiting on suspend queue */
- nr_requests_pending = md_in_flight(md);
- if (!nr_requests_pending)
+ if (!md_in_flight(md))
wake_up(&md->wait);
/*
@@ -1085,8 +1080,7 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
if (run_queue) {
if (md->queue->mq_ops)
blk_mq_run_hw_queues(md->queue, true);
- else if (!nr_requests_pending ||
- (nr_requests_pending >= md->queue->nr_congestion_on))
+ else
blk_run_queue_async(md->queue);
}
@@ -1470,7 +1464,7 @@ static void __map_bio(struct dm_target_io *tio)
md = tio->io->md;
dec_pending(tio->io, r);
free_tio(md, tio);
- } else if (r) {
+ } else if (r != DM_MAPIO_SUBMITTED) {
DMWARN("unimplemented target map return value: %d", r);
BUG();
}
@@ -1726,67 +1720,6 @@ static void __split_and_process_bio(struct mapped_device *md,
* CRUD END
*---------------------------------------------------------------*/
-static int dm_merge_bvec(struct request_queue *q,
- struct bvec_merge_data *bvm,
- struct bio_vec *biovec)
-{
- struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_live_table_fast(md);
- struct dm_target *ti;
- sector_t max_sectors, max_size = 0;
-
- if (unlikely(!map))
- goto out;
-
- ti = dm_table_find_target(map, bvm->bi_sector);
- if (!dm_target_is_valid(ti))
- goto out;
-
- /*
- * Find maximum amount of I/O that won't need splitting
- */
- max_sectors = min(max_io_len(bvm->bi_sector, ti),
- (sector_t) queue_max_sectors(q));
- max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
-
- /*
- * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
- * to the targets' merge function since it holds sectors not bytes).
- * Just doing this as an interim fix for stable@ because the more
- * comprehensive cleanup of switching to sector_t will impact every
- * DM target that implements a ->merge hook.
- */
- if (max_size > INT_MAX)
- max_size = INT_MAX;
-
- /*
- * merge_bvec_fn() returns number of bytes
- * it can accept at this offset
- * max is precomputed maximal io size
- */
- if (max_size && ti->type->merge)
- max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
- /*
- * If the target doesn't support merge method and some of the devices
- * provided their merge_bvec method (we know this by looking for the
- * max_hw_sectors that dm_set_device_limits may set), then we can't
- * allow bios with multiple vector entries. So always set max_size
- * to 0, and the code below allows just one page.
- */
- else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
- max_size = 0;
-
-out:
- dm_put_live_table_fast(md);
- /*
- * Always allow an entire first page
- */
- if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
- max_size = biovec->bv_len;
-
- return max_size;
-}
-
/*
* The request function that just remaps the bio built up by
* dm_merge_bvec.
@@ -1800,6 +1733,8 @@ static void dm_make_request(struct request_queue *q, struct bio *bio)
map = dm_get_live_table(md, &srcu_idx);
+ blk_queue_split(q, &bio, q->bio_split);
+
generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
/* if we're suspended, we have to queue this io for later */
@@ -2281,8 +2216,6 @@ static void dm_init_old_md_queue(struct mapped_device *md)
static void cleanup_mapped_device(struct mapped_device *md)
{
- cleanup_srcu_struct(&md->io_barrier);
-
if (md->wq)
destroy_workqueue(md->wq);
if (md->kworker_task)
@@ -2294,6 +2227,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
if (md->bs)
bioset_free(md->bs);
+ cleanup_srcu_struct(&md->io_barrier);
+
if (md->disk) {
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
@@ -2507,59 +2442,6 @@ static void __set_size(struct mapped_device *md, sector_t size)
}
/*
- * Return 1 if the queue has a compulsory merge_bvec_fn function.
- *
- * If this function returns 0, then the device is either a non-dm
- * device without a merge_bvec_fn, or it is a dm device that is
- * able to split any bios it receives that are too big.
- */
-int dm_queue_merge_is_compulsory(struct request_queue *q)
-{
- struct mapped_device *dev_md;
-
- if (!q->merge_bvec_fn)
- return 0;
-
- if (q->make_request_fn == dm_make_request) {
- dev_md = q->queuedata;
- if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
- return 0;
- }
-
- return 1;
-}
-
-static int dm_device_merge_is_compulsory(struct dm_target *ti,
- struct dm_dev *dev, sector_t start,
- sector_t len, void *data)
-{
- struct block_device *bdev = dev->bdev;
- struct request_queue *q = bdev_get_queue(bdev);
-
- return dm_queue_merge_is_compulsory(q);
-}
-
-/*
- * Return 1 if it is acceptable to ignore merge_bvec_fn based
- * on the properties of the underlying devices.
- */
-static int dm_table_merge_is_optional(struct dm_table *table)
-{
- unsigned i = 0;
- struct dm_target *ti;
-
- while (i < dm_table_get_num_targets(table)) {
- ti = dm_table_get_target(table, i++);
-
- if (ti->type->iterate_devices &&
- ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
- return 0;
- }
-
- return 1;
-}
-
-/*
* Returns old map, which caller must destroy.
*/
static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
@@ -2568,7 +2450,6 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
struct dm_table *old_map;
struct request_queue *q = md->queue;
sector_t size;
- int merge_is_optional;
size = dm_table_get_size(t);
@@ -2594,17 +2475,11 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
__bind_mempools(md, t);
- merge_is_optional = dm_table_merge_is_optional(t);
-
old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
rcu_assign_pointer(md->map, t);
md->immutable_target_type = dm_table_get_immutable_target_type(t);
dm_table_set_restrictions(t, q, limits);
- if (merge_is_optional)
- set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
- else
- clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
if (old_map)
dm_sync_table(md);
@@ -2885,7 +2760,6 @@ int dm_setup_md_queue(struct mapped_device *md)
case DM_TYPE_BIO_BASED:
dm_init_old_md_queue(md);
blk_queue_make_request(md->queue, dm_make_request);
- blk_queue_merge_bvec(md->queue, dm_merge_bvec);
break;
}
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 4e984993d40a..7edcf97dfa5a 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -78,8 +78,6 @@ bool dm_table_mq_request_based(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
-int dm_queue_merge_is_compulsory(struct request_queue *q);
-
void dm_lock_md_type(struct mapped_device *md);
void dm_unlock_md_type(struct mapped_device *md);
void dm_set_md_type(struct mapped_device *md, unsigned type);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 1277eb26b58a..4a8e15058e8b 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -70,7 +70,7 @@
#include <linux/seq_file.h>
-static void faulty_fail(struct bio *bio, int error)
+static void faulty_fail(struct bio *bio)
{
struct bio *b = bio->bi_private;
@@ -181,7 +181,7 @@ static void make_request(struct mddev *mddev, struct bio *bio)
/* special case - don't decrement, don't generic_make_request,
* just fail immediately
*/
- bio_endio(bio, -EIO);
+ bio_io_error(bio);
return;
}
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index fa7d577f3d12..b7fe7e9fc777 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -52,48 +52,6 @@ static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
return conf->disks + lo;
}
-/**
- * linear_mergeable_bvec -- tell bio layer if two requests can be merged
- * @q: request queue
- * @bvm: properties of new bio
- * @biovec: the request that could be merged to it.
- *
- * Return amount of bytes we can take at this offset
- */
-static int linear_mergeable_bvec(struct mddev *mddev,
- struct bvec_merge_data *bvm,
- struct bio_vec *biovec)
-{
- struct dev_info *dev0;
- unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
- sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
- int maxbytes = biovec->bv_len;
- struct request_queue *subq;
-
- dev0 = which_dev(mddev, sector);
- maxsectors = dev0->end_sector - sector;
- subq = bdev_get_queue(dev0->rdev->bdev);
- if (subq->merge_bvec_fn) {
- bvm->bi_bdev = dev0->rdev->bdev;
- bvm->bi_sector -= dev0->end_sector - dev0->rdev->sectors;
- maxbytes = min(maxbytes, subq->merge_bvec_fn(subq, bvm,
- biovec));
- }
-
- if (maxsectors < bio_sectors)
- maxsectors = 0;
- else
- maxsectors -= bio_sectors;
-
- if (maxsectors <= (PAGE_SIZE >> 9 ) && bio_sectors == 0)
- return maxbytes;
-
- if (maxsectors > (maxbytes >> 9))
- return maxbytes;
- else
- return maxsectors << 9;
-}
-
static int linear_congested(struct mddev *mddev, int bits)
{
struct linear_conf *conf;
@@ -297,7 +255,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
if (unlikely((split->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
/* Just ignore it */
- bio_endio(split, 0);
+ bio_endio(split);
} else
generic_make_request(split);
} while (split != bio);
@@ -338,7 +296,6 @@ static struct md_personality linear_personality =
.size = linear_size,
.quiesce = linear_quiesce,
.congested = linear_congested,
- .mergeable_bvec = linear_mergeable_bvec,
};
static int __init linear_init (void)
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index fcfc4b9b2672..11e3bc9d2a4b 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -44,6 +44,8 @@ struct resync_info {
/* md_cluster_info flags */
#define MD_CLUSTER_WAITING_FOR_NEWDISK 1
+#define MD_CLUSTER_SUSPEND_READ_BALANCING 2
+#define MD_CLUSTER_BEGIN_JOIN_CLUSTER 3
struct md_cluster_info {
@@ -51,7 +53,6 @@ struct md_cluster_info {
dlm_lockspace_t *lockspace;
int slot_number;
struct completion completion;
- struct dlm_lock_resource *sb_lock;
struct mutex sb_mutex;
struct dlm_lock_resource *bitmap_lockres;
struct list_head suspend_list;
@@ -74,6 +75,7 @@ enum msg_type {
NEWDISK,
REMOVE,
RE_ADD,
+ BITMAP_NEEDS_SYNC,
};
struct cluster_msg {
@@ -98,7 +100,6 @@ static int dlm_lock_sync(struct dlm_lock_resource *res, int mode)
{
int ret = 0;
- init_completion(&res->completion);
ret = dlm_lock(res->ls, mode, &res->lksb,
res->flags, res->name, strlen(res->name),
0, sync_ast, res, res->bast);
@@ -123,6 +124,7 @@ static struct dlm_lock_resource *lockres_init(struct mddev *mddev,
res = kzalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL);
if (!res)
return NULL;
+ init_completion(&res->completion);
res->ls = cinfo->lockspace;
res->mddev = mddev;
namelen = strlen(name);
@@ -164,11 +166,24 @@ out_err:
static void lockres_free(struct dlm_lock_resource *res)
{
+ int ret;
+
if (!res)
return;
- init_completion(&res->completion);
- dlm_unlock(res->ls, res->lksb.sb_lkid, 0, &res->lksb, res);
+ /* cancel a lock request or a conversion request that is blocked */
+ res->flags |= DLM_LKF_CANCEL;
+retry:
+ ret = dlm_unlock(res->ls, res->lksb.sb_lkid, 0, &res->lksb, res);
+ if (unlikely(ret != 0)) {
+ pr_info("%s: failed to unlock %s return %d\n", __func__, res->name, ret);
+
+ /* if a lock conversion is cancelled, then the lock is put
+ * back to grant queue, need to ensure it is unlocked */
+ if (ret == -DLM_ECANCEL)
+ goto retry;
+ }
+ res->flags &= ~DLM_LKF_CANCEL;
wait_for_completion(&res->completion);
kfree(res->name);
@@ -176,18 +191,6 @@ static void lockres_free(struct dlm_lock_resource *res)
kfree(res);
}
-static char *pretty_uuid(char *dest, char *src)
-{
- int i, len = 0;
-
- for (i = 0; i < 16; i++) {
- if (i == 4 || i == 6 || i == 8 || i == 10)
- len += sprintf(dest + len, "-");
- len += sprintf(dest + len, "%02x", (__u8)src[i]);
- }
- return dest;
-}
-
static void add_resync_info(struct mddev *mddev, struct dlm_lock_resource *lockres,
sector_t lo, sector_t hi)
{
@@ -275,18 +278,16 @@ clear_bit:
static void recover_prep(void *arg)
{
+ struct mddev *mddev = arg;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
}
-static void recover_slot(void *arg, struct dlm_slot *slot)
+static void __recover_slot(struct mddev *mddev, int slot)
{
- struct mddev *mddev = arg;
struct md_cluster_info *cinfo = mddev->cluster_info;
- pr_info("md-cluster: %s Node %d/%d down. My slot: %d. Initiating recovery.\n",
- mddev->bitmap_info.cluster_name,
- slot->nodeid, slot->slot,
- cinfo->slot_number);
- set_bit(slot->slot - 1, &cinfo->recovery_map);
+ set_bit(slot, &cinfo->recovery_map);
if (!cinfo->recovery_thread) {
cinfo->recovery_thread = md_register_thread(recover_bitmaps,
mddev, "recover");
@@ -298,6 +299,20 @@ static void recover_slot(void *arg, struct dlm_slot *slot)
md_wakeup_thread(cinfo->recovery_thread);
}
+static void recover_slot(void *arg, struct dlm_slot *slot)
+{
+ struct mddev *mddev = arg;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ pr_info("md-cluster: %s Node %d/%d down. My slot: %d. Initiating recovery.\n",
+ mddev->bitmap_info.cluster_name,
+ slot->nodeid, slot->slot,
+ cinfo->slot_number);
+ /* deduct one since dlm slot starts from one while the num of
+ * cluster-md begins with 0 */
+ __recover_slot(mddev, slot->slot - 1);
+}
+
static void recover_done(void *arg, struct dlm_slot *slots,
int num_slots, int our_slot,
uint32_t generation)
@@ -306,9 +321,17 @@ static void recover_done(void *arg, struct dlm_slot *slots,
struct md_cluster_info *cinfo = mddev->cluster_info;
cinfo->slot_number = our_slot;
- complete(&cinfo->completion);
+ /* completion is only need to be complete when node join cluster,
+ * it doesn't need to run during another node's failure */
+ if (test_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state)) {
+ complete(&cinfo->completion);
+ clear_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state);
+ }
+ clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
}
+/* the ops is called when node join the cluster, and do lock recovery
+ * if node failure occurs */
static const struct dlm_lockspace_ops md_ls_ops = {
.recover_prep = recover_prep,
.recover_slot = recover_slot,
@@ -383,7 +406,7 @@ static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg)
int len;
len = snprintf(disk_uuid, 64, "DEVICE_UUID=");
- pretty_uuid(disk_uuid + len, cmsg->uuid);
+ sprintf(disk_uuid + len, "%pU", cmsg->uuid);
snprintf(raid_slot, 16, "RAID_DISK=%d", cmsg->raid_slot);
pr_info("%s:%d Sending kobject change with %s and %s\n", __func__, __LINE__, disk_uuid, raid_slot);
init_completion(&cinfo->newdisk_completion);
@@ -452,6 +475,11 @@ static void process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
__func__, __LINE__, msg->slot);
process_readd_disk(mddev, msg);
break;
+ case BITMAP_NEEDS_SYNC:
+ pr_info("%s: %d Received BITMAP_NEEDS_SYNC from %d\n",
+ __func__, __LINE__, msg->slot);
+ __recover_slot(mddev, msg->slot);
+ break;
default:
pr_warn("%s:%d Received unknown message from %d\n",
__func__, __LINE__, msg->slot);
@@ -467,6 +495,7 @@ static void recv_daemon(struct md_thread *thread)
struct dlm_lock_resource *ack_lockres = cinfo->ack_lockres;
struct dlm_lock_resource *message_lockres = cinfo->message_lockres;
struct cluster_msg msg;
+ int ret;
/*get CR on Message*/
if (dlm_lock_sync(message_lockres, DLM_LOCK_CR)) {
@@ -479,13 +508,21 @@ static void recv_daemon(struct md_thread *thread)
process_recvd_msg(thread->mddev, &msg);
/*release CR on ack_lockres*/
- dlm_unlock_sync(ack_lockres);
- /*up-convert to EX on message_lockres*/
- dlm_lock_sync(message_lockres, DLM_LOCK_EX);
+ ret = dlm_unlock_sync(ack_lockres);
+ if (unlikely(ret != 0))
+ pr_info("unlock ack failed return %d\n", ret);
+ /*up-convert to PR on message_lockres*/
+ ret = dlm_lock_sync(message_lockres, DLM_LOCK_PR);
+ if (unlikely(ret != 0))
+ pr_info("lock PR on msg failed return %d\n", ret);
/*get CR on ack_lockres again*/
- dlm_lock_sync(ack_lockres, DLM_LOCK_CR);
+ ret = dlm_lock_sync(ack_lockres, DLM_LOCK_CR);
+ if (unlikely(ret != 0))
+ pr_info("lock CR on ack failed return %d\n", ret);
/*release CR on message_lockres*/
- dlm_unlock_sync(message_lockres);
+ ret = dlm_unlock_sync(message_lockres);
+ if (unlikely(ret != 0))
+ pr_info("unlock msg failed return %d\n", ret);
}
/* lock_comm()
@@ -514,7 +551,7 @@ static void unlock_comm(struct md_cluster_info *cinfo)
* The function:
* 1. Grabs the message lockresource in EX mode
* 2. Copies the message to the message LVB
- * 3. Downconverts message lockresource to CR
+ * 3. Downconverts message lockresource to CW
* 4. Upconverts ack lock resource from CR to EX. This forces the BAST on other nodes
* and the other nodes read the message. The thread will wait here until all other
* nodes have released ack lock resource.
@@ -535,12 +572,12 @@ static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg)
memcpy(cinfo->message_lockres->lksb.sb_lvbptr, (void *)cmsg,
sizeof(struct cluster_msg));
- /*down-convert EX to CR on Message*/
- error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_CR);
+ /*down-convert EX to CW on Message*/
+ error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_CW);
if (error) {
- pr_err("md-cluster: failed to convert EX to CR on MESSAGE(%d)\n",
+ pr_err("md-cluster: failed to convert EX to CW on MESSAGE(%d)\n",
error);
- goto failed_message;
+ goto failed_ack;
}
/*up-convert CR to EX on Ack*/
@@ -560,7 +597,13 @@ static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg)
}
failed_ack:
- dlm_unlock_sync(cinfo->message_lockres);
+ error = dlm_unlock_sync(cinfo->message_lockres);
+ if (unlikely(error != 0)) {
+ pr_err("md-cluster: failed convert to NL on MESSAGE(%d)\n",
+ error);
+ /* in case the message can't be released due to some reason */
+ goto failed_ack;
+ }
failed_message:
return error;
}
@@ -582,6 +625,7 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
struct dlm_lock_resource *bm_lockres;
struct suspend_info *s;
char str[64];
+ sector_t lo, hi;
for (i = 0; i < total_slots; i++) {
@@ -612,9 +656,24 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
lockres_free(bm_lockres);
continue;
}
- if (ret)
+ if (ret) {
+ lockres_free(bm_lockres);
goto out;
- /* TODO: Read the disk bitmap sb and check if it needs recovery */
+ }
+
+ /* Read the disk bitmap sb and check if it needs recovery */
+ ret = bitmap_copy_from_slot(mddev, i, &lo, &hi, false);
+ if (ret) {
+ pr_warn("md-cluster: Could not gather bitmaps from slot %d", i);
+ lockres_free(bm_lockres);
+ continue;
+ }
+ if ((hi > 0) && (lo < mddev->recovery_cp)) {
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ mddev->recovery_cp = lo;
+ md_check_recovery(mddev);
+ }
+
dlm_unlock_sync(bm_lockres);
lockres_free(bm_lockres);
}
@@ -628,20 +687,20 @@ static int join(struct mddev *mddev, int nodes)
int ret, ops_rv;
char str[64];
- if (!try_module_get(THIS_MODULE))
- return -ENOENT;
-
cinfo = kzalloc(sizeof(struct md_cluster_info), GFP_KERNEL);
if (!cinfo)
return -ENOMEM;
+ INIT_LIST_HEAD(&cinfo->suspend_list);
+ spin_lock_init(&cinfo->suspend_lock);
init_completion(&cinfo->completion);
+ set_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state);
mutex_init(&cinfo->sb_mutex);
mddev->cluster_info = cinfo;
memset(str, 0, 64);
- pretty_uuid(str, mddev->uuid);
+ sprintf(str, "%pU", mddev->uuid);
ret = dlm_new_lockspace(str, mddev->bitmap_info.cluster_name,
DLM_LSFL_FS, LVB_SIZE,
&md_ls_ops, mddev, &ops_rv, &cinfo->lockspace);
@@ -654,12 +713,6 @@ static int join(struct mddev *mddev, int nodes)
ret = -ERANGE;
goto err;
}
- cinfo->sb_lock = lockres_init(mddev, "cmd-super",
- NULL, 0);
- if (!cinfo->sb_lock) {
- ret = -ENOMEM;
- goto err;
- }
/* Initiate the communication resources */
ret = -ENOMEM;
cinfo->recv_thread = md_register_thread(recv_daemon, mddev, "cluster_recv");
@@ -700,9 +753,6 @@ static int join(struct mddev *mddev, int nodes)
goto err;
}
- INIT_LIST_HEAD(&cinfo->suspend_list);
- spin_lock_init(&cinfo->suspend_lock);
-
ret = gather_all_resync_info(mddev, nodes);
if (ret)
goto err;
@@ -714,12 +764,10 @@ err:
lockres_free(cinfo->ack_lockres);
lockres_free(cinfo->no_new_dev_lockres);
lockres_free(cinfo->bitmap_lockres);
- lockres_free(cinfo->sb_lock);
if (cinfo->lockspace)
dlm_release_lockspace(cinfo->lockspace, 2);
mddev->cluster_info = NULL;
kfree(cinfo);
- module_put(THIS_MODULE);
return ret;
}
@@ -735,7 +783,6 @@ static int leave(struct mddev *mddev)
lockres_free(cinfo->token_lockres);
lockres_free(cinfo->ack_lockres);
lockres_free(cinfo->no_new_dev_lockres);
- lockres_free(cinfo->sb_lock);
lockres_free(cinfo->bitmap_lockres);
dlm_release_lockspace(cinfo->lockspace, 2);
return 0;
@@ -812,16 +859,30 @@ static int resync_start(struct mddev *mddev, sector_t lo, sector_t hi)
static void resync_finish(struct mddev *mddev)
{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ struct cluster_msg cmsg;
+ int slot = cinfo->slot_number - 1;
+
pr_info("%s:%d\n", __func__, __LINE__);
resync_send(mddev, RESYNCING, 0, 0);
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+ cmsg.type = cpu_to_le32(BITMAP_NEEDS_SYNC);
+ cmsg.slot = cpu_to_le32(slot);
+ sendmsg(cinfo, &cmsg);
+ }
}
-static int area_resyncing(struct mddev *mddev, sector_t lo, sector_t hi)
+static int area_resyncing(struct mddev *mddev, int direction,
+ sector_t lo, sector_t hi)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
int ret = 0;
struct suspend_info *s;
+ if ((direction == READ) &&
+ test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state))
+ return 1;
+
spin_lock_irq(&cinfo->suspend_lock);
if (list_empty(&cinfo->suspend_list))
goto out;
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h
index 6817ee00e053..00defe2badbc 100644
--- a/drivers/md/md-cluster.h
+++ b/drivers/md/md-cluster.h
@@ -18,7 +18,7 @@ struct md_cluster_operations {
int (*metadata_update_start)(struct mddev *mddev);
int (*metadata_update_finish)(struct mddev *mddev);
int (*metadata_update_cancel)(struct mddev *mddev);
- int (*area_resyncing)(struct mddev *mddev, sector_t lo, sector_t hi);
+ int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi);
int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev);
int (*add_new_disk_finish)(struct mddev *mddev);
int (*new_disk_ack)(struct mddev *mddev, bool ack);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d429c30cd514..4f5ecbe94ccb 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -257,13 +257,17 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
unsigned int sectors;
int cpu;
+ blk_queue_split(q, &bio, q->bio_split);
+
if (mddev == NULL || mddev->pers == NULL
|| !mddev->ready) {
bio_io_error(bio);
return;
}
if (mddev->ro == 1 && unlikely(rw == WRITE)) {
- bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
+ if (bio_sectors(bio) != 0)
+ bio->bi_error = -EROFS;
+ bio_endio(bio);
return;
}
smp_rmb(); /* Ensure implications of 'active' are visible */
@@ -350,34 +354,11 @@ static int md_congested(void *data, int bits)
return mddev_congested(mddev, bits);
}
-static int md_mergeable_bvec(struct request_queue *q,
- struct bvec_merge_data *bvm,
- struct bio_vec *biovec)
-{
- struct mddev *mddev = q->queuedata;
- int ret;
- rcu_read_lock();
- if (mddev->suspended) {
- /* Must always allow one vec */
- if (bvm->bi_size == 0)
- ret = biovec->bv_len;
- else
- ret = 0;
- } else {
- struct md_personality *pers = mddev->pers;
- if (pers && pers->mergeable_bvec)
- ret = pers->mergeable_bvec(mddev, bvm, biovec);
- else
- ret = biovec->bv_len;
- }
- rcu_read_unlock();
- return ret;
-}
/*
* Generic flush handling for md
*/
-static void md_end_flush(struct bio *bio, int err)
+static void md_end_flush(struct bio *bio)
{
struct md_rdev *rdev = bio->bi_private;
struct mddev *mddev = rdev->mddev;
@@ -433,7 +414,7 @@ static void md_submit_flush_data(struct work_struct *ws)
if (bio->bi_iter.bi_size == 0)
/* an empty barrier - all done */
- bio_endio(bio, 0);
+ bio_endio(bio);
else {
bio->bi_rw &= ~REQ_FLUSH;
mddev->pers->make_request(mddev, bio);
@@ -502,6 +483,8 @@ static void mddev_put(struct mddev *mddev)
bioset_free(bs);
}
+static void md_safemode_timeout(unsigned long data);
+
void mddev_init(struct mddev *mddev)
{
mutex_init(&mddev->open_mutex);
@@ -509,7 +492,8 @@ void mddev_init(struct mddev *mddev)
mutex_init(&mddev->bitmap_info.mutex);
INIT_LIST_HEAD(&mddev->disks);
INIT_LIST_HEAD(&mddev->all_mddevs);
- init_timer(&mddev->safemode_timer);
+ setup_timer(&mddev->safemode_timer, md_safemode_timeout,
+ (unsigned long) mddev);
atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
@@ -728,15 +712,13 @@ void md_rdev_clear(struct md_rdev *rdev)
}
EXPORT_SYMBOL_GPL(md_rdev_clear);
-static void super_written(struct bio *bio, int error)
+static void super_written(struct bio *bio)
{
struct md_rdev *rdev = bio->bi_private;
struct mddev *mddev = rdev->mddev;
- if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
- printk("md: super_written gets error=%d, uptodate=%d\n",
- error, test_bit(BIO_UPTODATE, &bio->bi_flags));
- WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
+ if (bio->bi_error) {
+ printk("md: super_written gets error=%d\n", bio->bi_error);
md_error(mddev, rdev);
}
@@ -791,7 +773,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
bio_add_page(bio, page, size, 0);
submit_bio_wait(rw, bio);
- ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ ret = !bio->bi_error;
bio_put(bio);
return ret;
}
@@ -3276,8 +3258,6 @@ int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
return 0;
}
-static void md_safemode_timeout(unsigned long data);
-
static ssize_t
safe_delay_show(struct mddev *mddev, char *page)
{
@@ -4210,6 +4190,8 @@ action_show(struct mddev *mddev, char *page)
type = "repair";
} else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
type = "recover";
+ else if (mddev->reshape_position != MaxSector)
+ type = "reshape";
}
return sprintf(page, "%s\n", type);
}
@@ -5186,7 +5168,6 @@ int md_run(struct mddev *mddev)
if (mddev->queue) {
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = md_congested;
- blk_queue_merge_bvec(mddev->queue, md_mergeable_bvec);
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
@@ -5202,8 +5183,6 @@ int md_run(struct mddev *mddev)
atomic_set(&mddev->max_corr_read_errors,
MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
mddev->safemode = 0;
- mddev->safemode_timer.function = md_safemode_timeout;
- mddev->safemode_timer.data = (unsigned long) mddev;
mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
mddev->in_sync = 1;
smp_wmb();
@@ -5216,6 +5195,11 @@ int md_run(struct mddev *mddev)
if (sysfs_link_rdev(mddev, rdev))
/* failure here is OK */;
+ if (mddev->degraded && !mddev->ro)
+ /* This ensures that recovering status is reported immediately
+ * via sysfs - until a lack of spares is confirmed.
+ */
+ set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
if (mddev->flags & MD_UPDATE_SB_FLAGS)
@@ -5315,7 +5299,6 @@ static void md_clean(struct mddev *mddev)
mddev->degraded = 0;
mddev->safemode = 0;
mddev->private = NULL;
- mddev->merge_check_needed = 0;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.default_offset = 0;
mddev->bitmap_info.default_space = 0;
@@ -5382,6 +5365,8 @@ static void __md_stop(struct mddev *mddev)
{
struct md_personality *pers = mddev->pers;
mddev_detach(mddev);
+ /* Ensure ->event_work is done */
+ flush_workqueue(md_misc_wq);
spin_lock(&mddev->lock);
mddev->ready = 0;
mddev->pers = NULL;
@@ -5512,7 +5497,6 @@ static int do_md_stop(struct mddev *mddev, int mode,
__md_stop_writes(mddev);
__md_stop(mddev);
- mddev->queue->merge_bvec_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
/* tell userspace to handle 'inactive' */
@@ -5757,22 +5741,22 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg)
char *ptr;
int err;
- file = kmalloc(sizeof(*file), GFP_NOIO);
+ file = kzalloc(sizeof(*file), GFP_NOIO);
if (!file)
return -ENOMEM;
err = 0;
spin_lock(&mddev->lock);
- /* bitmap disabled, zero the first byte and copy out */
- if (!mddev->bitmap_info.file)
- file->pathname[0] = '\0';
- else if ((ptr = file_path(mddev->bitmap_info.file,
- file->pathname, sizeof(file->pathname))),
- IS_ERR(ptr))
- err = PTR_ERR(ptr);
- else
- memmove(file->pathname, ptr,
- sizeof(file->pathname)-(ptr-file->pathname));
+ /* bitmap enabled */
+ if (mddev->bitmap_info.file) {
+ ptr = file_path(mddev->bitmap_info.file, file->pathname,
+ sizeof(file->pathname));
+ if (IS_ERR(ptr))
+ err = PTR_ERR(ptr);
+ else
+ memmove(file->pathname, ptr,
+ sizeof(file->pathname)-(ptr-file->pathname));
+ }
spin_unlock(&mddev->lock);
if (err == 0 &&
@@ -7091,7 +7075,7 @@ static void status_unused(struct seq_file *seq)
seq_printf(seq, "\n");
}
-static void status_resync(struct seq_file *seq, struct mddev *mddev)
+static int status_resync(struct seq_file *seq, struct mddev *mddev)
{
sector_t max_sectors, resync, res;
unsigned long dt, db;
@@ -7099,18 +7083,32 @@ static void status_resync(struct seq_file *seq, struct mddev *mddev)
int scale;
unsigned int per_milli;
- if (mddev->curr_resync <= 3)
- resync = 0;
- else
- resync = mddev->curr_resync
- - atomic_read(&mddev->recovery_active);
-
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sectors = mddev->resync_max_sectors;
else
max_sectors = mddev->dev_sectors;
+ resync = mddev->curr_resync;
+ if (resync <= 3) {
+ if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
+ /* Still cleaning up */
+ resync = max_sectors;
+ } else
+ resync -= atomic_read(&mddev->recovery_active);
+
+ if (resync == 0) {
+ if (mddev->recovery_cp < MaxSector) {
+ seq_printf(seq, "\tresync=PENDING");
+ return 1;
+ }
+ return 0;
+ }
+ if (resync < 3) {
+ seq_printf(seq, "\tresync=DELAYED");
+ return 1;
+ }
+
WARN_ON(max_sectors == 0);
/* Pick 'scale' such that (resync>>scale)*1000 will fit
* in a sector_t, and (max_sectors>>scale) will fit in a
@@ -7175,6 +7173,7 @@ static void status_resync(struct seq_file *seq, struct mddev *mddev)
((unsigned long)rt % 60)/6);
seq_printf(seq, " speed=%ldK/sec", db/2/dt);
+ return 1;
}
static void *md_seq_start(struct seq_file *seq, loff_t *pos)
@@ -7320,13 +7319,8 @@ static int md_seq_show(struct seq_file *seq, void *v)
mddev->pers->status(seq, mddev);
seq_printf(seq, "\n ");
if (mddev->pers->sync_request) {
- if (mddev->curr_resync > 2) {
- status_resync(seq, mddev);
+ if (status_resync(seq, mddev))
seq_printf(seq, "\n ");
- } else if (mddev->curr_resync >= 1)
- seq_printf(seq, "\tresync=DELAYED\n ");
- else if (mddev->recovery_cp < MaxSector)
- seq_printf(seq, "\tresync=PENDING\n ");
}
} else
seq_printf(seq, "\n ");
@@ -7409,15 +7403,19 @@ int unregister_md_personality(struct md_personality *p)
}
EXPORT_SYMBOL(unregister_md_personality);
-int register_md_cluster_operations(struct md_cluster_operations *ops, struct module *module)
+int register_md_cluster_operations(struct md_cluster_operations *ops,
+ struct module *module)
{
- if (md_cluster_ops != NULL)
- return -EALREADY;
+ int ret = 0;
spin_lock(&pers_lock);
- md_cluster_ops = ops;
- md_cluster_mod = module;
+ if (md_cluster_ops != NULL)
+ ret = -EALREADY;
+ else {
+ md_cluster_ops = ops;
+ md_cluster_mod = module;
+ }
spin_unlock(&pers_lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(register_md_cluster_operations);
@@ -7437,7 +7435,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
err = request_module("md-cluster");
if (err) {
pr_err("md-cluster module not found.\n");
- return err;
+ return -ENOENT;
}
spin_lock(&pers_lock);
@@ -7815,7 +7813,8 @@ void md_do_sync(struct md_thread *thread)
> (max_sectors >> 4)) ||
time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
(j - mddev->curr_resync_completed)*2
- >= mddev->resync_max - mddev->curr_resync_completed
+ >= mddev->resync_max - mddev->curr_resync_completed ||
+ mddev->curr_resync_completed > mddev->resync_max
)) {
/* time to update curr_resync_completed */
wait_event(mddev->recovery_wait,
@@ -7860,6 +7859,9 @@ void md_do_sync(struct md_thread *thread)
break;
j += sectors;
+ if (j > max_sectors)
+ /* when skipping, extra large numbers can be returned. */
+ j = max_sectors;
if (j > 2)
mddev->curr_resync = j;
if (mddev_is_clustered(mddev))
@@ -7928,12 +7930,15 @@ void md_do_sync(struct md_thread *thread)
blk_finish_plug(&plug);
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
+ if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+ mddev->curr_resync > 2) {
+ mddev->curr_resync_completed = mddev->curr_resync;
+ sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ }
/* tell personality that we are finished */
mddev->pers->sync_request(mddev, max_sectors, &skipped);
- if (mddev_is_clustered(mddev))
- md_cluster_ops->resync_finish(mddev);
-
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
mddev->curr_resync > 2) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
@@ -7967,6 +7972,9 @@ void md_do_sync(struct md_thread *thread)
}
}
skip:
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->resync_finish(mddev);
+
set_bit(MD_CHANGE_DEVS, &mddev->flags);
spin_lock(&mddev->lock);
@@ -7977,11 +7985,11 @@ void md_do_sync(struct md_thread *thread)
mddev->resync_max = MaxSector;
} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
mddev->resync_min = mddev->curr_resync_completed;
+ set_bit(MD_RECOVERY_DONE, &mddev->recovery);
mddev->curr_resync = 0;
spin_unlock(&mddev->lock);
wake_up(&resync_wait);
- set_bit(MD_RECOVERY_DONE, &mddev->recovery);
md_wakeup_thread(mddev->thread);
return;
}
@@ -8150,6 +8158,7 @@ void md_check_recovery(struct mddev *mddev)
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
+ clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
goto unlock;
}
@@ -8596,6 +8605,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
/* Make sure they get written out promptly */
sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &rdev->mddev->flags);
md_wakeup_thread(rdev->mddev->thread);
}
return rv;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 7da6e9c3cb53..ab339571e57f 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -134,10 +134,6 @@ enum flag_bits {
Bitmap_sync, /* ..actually, not quite In_sync. Need a
* bitmap-based recovery to get fully in sync
*/
- Unmerged, /* device is being added to array and should
- * be considerred for bvec_merge_fn but not
- * yet for actual IO
- */
WriteMostly, /* Avoid reading if at all possible */
AutoDetected, /* added by auto-detect */
Blocked, /* An error occurred but has not yet
@@ -374,10 +370,6 @@ struct mddev {
int degraded; /* whether md should consider
* adding a spare
*/
- int merge_check_needed; /* at least one
- * member device
- * has a
- * merge_bvec_fn */
atomic_t recovery_active; /* blocks scheduled, but not written */
wait_queue_head_t recovery_wait;
@@ -532,10 +524,6 @@ struct md_personality
/* congested implements bdi.congested_fn().
* Will not be called while array is 'suspended' */
int (*congested)(struct mddev *mddev, int bits);
- /* mergeable_bvec is use to implement ->merge_bvec_fn */
- int (*mergeable_bvec)(struct mddev *mddev,
- struct bvec_merge_data *bvm,
- struct bio_vec *biovec);
};
struct md_sysfs_entry {
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index ac3ede2bd00e..d222522c52e0 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -77,18 +77,18 @@ static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
struct bio *bio = mp_bh->master_bio;
struct mpconf *conf = mp_bh->mddev->private;
- bio_endio(bio, err);
+ bio->bi_error = err;
+ bio_endio(bio);
mempool_free(mp_bh, conf->pool);
}
-static void multipath_end_request(struct bio *bio, int error)
+static void multipath_end_request(struct bio *bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct multipath_bh *mp_bh = bio->bi_private;
struct mpconf *conf = mp_bh->mddev->private;
struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev;
- if (uptodate)
+ if (!bio->bi_error)
multipath_end_bh_io(mp_bh, 0);
else if (!(bio->bi_rw & REQ_RAHEAD)) {
/*
@@ -101,7 +101,7 @@ static void multipath_end_request(struct bio *bio, int error)
(unsigned long long)bio->bi_iter.bi_sector);
multipath_reschedule_retry(mp_bh);
} else
- multipath_end_bh_io(mp_bh, error);
+ multipath_end_bh_io(mp_bh, bio->bi_error);
rdev_dec_pending(rdev, conf->mddev);
}
@@ -123,7 +123,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
mp_bh->path = multipath_map(conf);
if (mp_bh->path < 0) {
- bio_endio(bio, -EIO);
+ bio_io_error(bio);
mempool_free(mp_bh, conf->pool);
return;
}
@@ -257,18 +257,6 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
- /* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_segments to one, lying
- * within a single page.
- * (Note: it is very unlikely that a device with
- * merge_bvec_fn will be involved in multipath.)
- */
- if (q->merge_bvec_fn) {
- blk_queue_max_segments(mddev->queue, 1);
- blk_queue_segment_boundary(mddev->queue,
- PAGE_CACHE_SIZE - 1);
- }
-
spin_lock_irq(&conf->device_lock);
mddev->degraded--;
rdev->raid_disk = path;
@@ -432,15 +420,6 @@ static int multipath_run (struct mddev *mddev)
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
- /* as we don't honour merge_bvec_fn, we must never risk
- * violating it, not that we ever expect a device with
- * a merge_bvec_fn to be involved in multipath */
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
- blk_queue_max_segments(mddev->queue, 1);
- blk_queue_segment_boundary(mddev->queue,
- PAGE_CACHE_SIZE - 1);
- }
-
if (!test_bit(Faulty, &rdev->flags))
working_disks++;
}
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 4d6c9b689eaa..88dbe7b97c2c 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -454,7 +454,7 @@ int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b,
int r;
p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
- if (unlikely(IS_ERR(p)))
+ if (IS_ERR(p))
return PTR_ERR(p);
aux = dm_bufio_get_aux_data(to_buffer(*result));
@@ -490,7 +490,7 @@ int dm_bm_write_lock(struct dm_block_manager *bm,
return -EPERM;
p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
- if (unlikely(IS_ERR(p)))
+ if (IS_ERR(p))
return PTR_ERR(p);
aux = dm_bufio_get_aux_data(to_buffer(*result));
@@ -523,7 +523,7 @@ int dm_bm_read_try_lock(struct dm_block_manager *bm,
int r;
p = dm_bufio_get(bm->bufio, b, (struct dm_buffer **) result);
- if (unlikely(IS_ERR(p)))
+ if (IS_ERR(p))
return PTR_ERR(p);
if (unlikely(!p))
return -EWOULDBLOCK;
@@ -559,7 +559,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm,
return -EPERM;
p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
- if (unlikely(IS_ERR(p)))
+ if (IS_ERR(p))
return PTR_ERR(p);
memset(p, 0, dm_bm_block_size(bm));
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index bf2b80d5c470..8731b6ea026b 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key);
extern struct dm_block_validator btree_node_validator;
+/*
+ * Value type for upper levels of multi-level btrees.
+ */
+extern void init_le64_type(struct dm_transaction_manager *tm,
+ struct dm_btree_value_type *vt);
+
#endif /* DM_BTREE_INTERNAL_H */
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index e04cfd2d60ef..421a36c593e3 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -309,8 +309,8 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
if (s < 0 && nr_center < -s) {
/* not enough in central node */
- shift(left, center, nr_center);
- s = nr_center - target;
+ shift(left, center, -nr_center);
+ s += nr_center;
shift(left, right, s);
nr_right += s;
} else
@@ -323,7 +323,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
if (s > 0 && nr_center < s) {
/* not enough in central node */
shift(center, right, nr_center);
- s = target - nr_center;
+ s -= nr_center;
shift(left, right, s);
nr_left -= s;
} else
@@ -409,29 +409,11 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
return 0;
}
-static int get_nr_entries(struct dm_transaction_manager *tm,
- dm_block_t b, uint32_t *result)
-{
- int r;
- struct dm_block *block;
- struct btree_node *n;
-
- r = dm_tm_read_lock(tm, b, &btree_node_validator, &block);
- if (r)
- return r;
-
- n = dm_block_data(block);
- *result = le32_to_cpu(n->header.nr_entries);
-
- return dm_tm_unlock(tm, block);
-}
-
static int rebalance_children(struct shadow_spine *s,
struct dm_btree_info *info,
struct dm_btree_value_type *vt, uint64_t key)
{
int i, r, has_left_sibling, has_right_sibling;
- uint32_t child_entries;
struct btree_node *n;
n = dm_block_data(shadow_current(s));
@@ -458,10 +440,6 @@ static int rebalance_children(struct shadow_spine *s,
if (i < 0)
return -ENODATA;
- r = get_nr_entries(info->tm, value64(n, i), &child_entries);
- if (r)
- return r;
-
has_left_sibling = i > 0;
has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1);
@@ -544,14 +522,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
return r;
}
-static struct dm_btree_value_type le64_type = {
- .context = NULL,
- .size = sizeof(__le64),
- .inc = NULL,
- .dec = NULL,
- .equal = NULL
-};
-
int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, dm_block_t *new_root)
{
@@ -559,12 +529,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
int index = 0, r = 0;
struct shadow_spine spine;
struct btree_node *n;
+ struct dm_btree_value_type le64_vt;
+ init_le64_type(info->tm, &le64_vt);
init_shadow_spine(&spine, info);
for (level = 0; level < info->levels; level++) {
r = remove_raw(&spine, info,
(level == last_level ?
- &info->value_type : &le64_type),
+ &info->value_type : &le64_vt),
root, keys[level], (unsigned *)&index);
if (r < 0)
break;
@@ -654,11 +626,13 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
int index = 0, r = 0;
struct shadow_spine spine;
struct btree_node *n;
+ struct dm_btree_value_type le64_vt;
uint64_t k;
+ init_le64_type(info->tm, &le64_vt);
init_shadow_spine(&spine, info);
for (level = 0; level < last_level; level++) {
- r = remove_raw(&spine, info, &le64_type,
+ r = remove_raw(&spine, info, &le64_vt,
root, keys[level], (unsigned *) &index);
if (r < 0)
goto out;
@@ -689,6 +663,7 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
value_ptr(n, index));
delete_at(n, index);
+ keys[last_level] = k + 1ull;
} else
r = -ENODATA;
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index 1b5e13ec7f96..0dee514ba4c5 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s)
{
return s->root;
}
+
+static void le64_inc(void *context, const void *value_le)
+{
+ struct dm_transaction_manager *tm = context;
+ __le64 v_le;
+
+ memcpy(&v_le, value_le, sizeof(v_le));
+ dm_tm_inc(tm, le64_to_cpu(v_le));
+}
+
+static void le64_dec(void *context, const void *value_le)
+{
+ struct dm_transaction_manager *tm = context;
+ __le64 v_le;
+
+ memcpy(&v_le, value_le, sizeof(v_le));
+ dm_tm_dec(tm, le64_to_cpu(v_le));
+}
+
+static int le64_equal(void *context, const void *value1_le, const void *value2_le)
+{
+ __le64 v1_le, v2_le;
+
+ memcpy(&v1_le, value1_le, sizeof(v1_le));
+ memcpy(&v2_le, value2_le, sizeof(v2_le));
+ return v1_le == v2_le;
+}
+
+void init_le64_type(struct dm_transaction_manager *tm,
+ struct dm_btree_value_type *vt)
+{
+ vt->context = tm;
+ vt->size = sizeof(__le64);
+ vt->inc = le64_inc;
+ vt->dec = le64_dec;
+ vt->equal = le64_equal;
+}
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 200ac12a1d40..b6cec258cc21 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -255,7 +255,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
int r;
struct del_stack *s;
- s = kmalloc(sizeof(*s), GFP_KERNEL);
+ s = kmalloc(sizeof(*s), GFP_NOIO);
if (!s)
return -ENOMEM;
s->info = info;
@@ -420,8 +420,8 @@ EXPORT_SYMBOL_GPL(dm_btree_lookup);
*
* Where A* is a shadow of A.
*/
-static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
- unsigned parent_index, uint64_t key)
+static int btree_split_sibling(struct shadow_spine *s, unsigned parent_index,
+ uint64_t key)
{
int r;
size_t size;
@@ -625,7 +625,7 @@ static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
if (top)
r = btree_split_beneath(s, key);
else
- r = btree_split_sibling(s, root, i, key);
+ r = btree_split_sibling(s, i, key);
if (r < 0)
return r;
@@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
struct btree_node *n;
struct dm_btree_value_type le64_type;
- le64_type.context = NULL;
- le64_type.size = sizeof(__le64);
- le64_type.inc = NULL;
- le64_type.dec = NULL;
- le64_type.equal = NULL;
-
+ init_le64_type(info->tm, &le64_type);
init_shadow_spine(&spine, info);
for (level = 0; level < (info->levels - 1); level++) {
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index efb654eb5399..63e619b2f44e 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -83,7 +83,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
char b[BDEVNAME_SIZE];
char b2[BDEVNAME_SIZE];
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
- bool discard_supported = false;
+ unsigned short blksize = 512;
if (!conf)
return -ENOMEM;
@@ -98,6 +98,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
sector_div(sectors, mddev->chunk_sectors);
rdev1->sectors = sectors * mddev->chunk_sectors;
+ blksize = max(blksize, queue_logical_block_size(
+ rdev1->bdev->bd_disk->queue));
+
rdev_for_each(rdev2, mddev) {
pr_debug("md/raid0:%s: comparing %s(%llu)"
" with %s(%llu)\n",
@@ -134,6 +137,18 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
}
pr_debug("md/raid0:%s: FINAL %d zones\n",
mdname(mddev), conf->nr_strip_zones);
+ /*
+ * now since we have the hard sector sizes, we can make sure
+ * chunk size is a multiple of that sector size
+ */
+ if ((mddev->chunk_sectors << 9) % blksize) {
+ printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
+ mdname(mddev),
+ mddev->chunk_sectors << 9, blksize);
+ err = -EINVAL;
+ goto abort;
+ }
+
err = -ENOMEM;
conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
conf->nr_strip_zones, GFP_KERNEL);
@@ -188,19 +203,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
}
dev[j] = rdev1;
- if (mddev->queue)
- disk_stack_limits(mddev->gendisk, rdev1->bdev,
- rdev1->data_offset << 9);
-
- if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
- conf->has_merge_bvec = 1;
-
if (!smallest || (rdev1->sectors < smallest->sectors))
smallest = rdev1;
cnt++;
-
- if (blk_queue_discard(bdev_get_queue(rdev1->bdev)))
- discard_supported = true;
}
if (cnt != mddev->raid_disks) {
printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
@@ -261,28 +266,6 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
(unsigned long long)smallest->sectors);
}
- /*
- * now since we have the hard sector sizes, we can make sure
- * chunk size is a multiple of that sector size
- */
- if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
- printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
- mdname(mddev),
- mddev->chunk_sectors << 9);
- goto abort;
- }
-
- if (mddev->queue) {
- blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
- blk_queue_io_opt(mddev->queue,
- (mddev->chunk_sectors << 9) * mddev->raid_disks);
-
- if (!discard_supported)
- queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
- else
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
- }
-
pr_debug("md/raid0:%s: done.\n", mdname(mddev));
*private_conf = conf;
@@ -351,58 +334,6 @@ static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
+ sector_div(sector, zone->nb_dev)];
}
-/**
- * raid0_mergeable_bvec -- tell bio layer if two requests can be merged
- * @mddev: the md device
- * @bvm: properties of new bio
- * @biovec: the request that could be merged to it.
- *
- * Return amount of bytes we can accept at this offset
- */
-static int raid0_mergeable_bvec(struct mddev *mddev,
- struct bvec_merge_data *bvm,
- struct bio_vec *biovec)
-{
- struct r0conf *conf = mddev->private;
- sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
- sector_t sector_offset = sector;
- int max;
- unsigned int chunk_sectors = mddev->chunk_sectors;
- unsigned int bio_sectors = bvm->bi_size >> 9;
- struct strip_zone *zone;
- struct md_rdev *rdev;
- struct request_queue *subq;
-
- if (is_power_of_2(chunk_sectors))
- max = (chunk_sectors - ((sector & (chunk_sectors-1))
- + bio_sectors)) << 9;
- else
- max = (chunk_sectors - (sector_div(sector, chunk_sectors)
- + bio_sectors)) << 9;
- if (max < 0)
- max = 0; /* bio_add cannot handle a negative return */
- if (max <= biovec->bv_len && bio_sectors == 0)
- return biovec->bv_len;
- if (max < biovec->bv_len)
- /* too small already, no need to check further */
- return max;
- if (!conf->has_merge_bvec)
- return max;
-
- /* May need to check subordinate device */
- sector = sector_offset;
- zone = find_zone(mddev->private, &sector_offset);
- rdev = map_sector(mddev, zone, sector, &sector_offset);
- subq = bdev_get_queue(rdev->bdev);
- if (subq->merge_bvec_fn) {
- bvm->bi_bdev = rdev->bdev;
- bvm->bi_sector = sector_offset + zone->dev_start +
- rdev->data_offset;
- return min(max, subq->merge_bvec_fn(subq, bvm, biovec));
- } else
- return max;
-}
-
static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
{
sector_t array_sectors = 0;
@@ -433,12 +364,6 @@ static int raid0_run(struct mddev *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;
- if (mddev->queue) {
- blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
- blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
- blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
- }
-
/* if private is not null, we are here after takeover */
if (mddev->private == NULL) {
ret = create_strip_zones(mddev, &conf);
@@ -447,6 +372,29 @@ static int raid0_run(struct mddev *mddev)
mddev->private = conf;
}
conf = mddev->private;
+ if (mddev->queue) {
+ struct md_rdev *rdev;
+ bool discard_supported = false;
+
+ rdev_for_each(rdev, mddev) {
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
+ discard_supported = true;
+ }
+ blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
+ blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
+ blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
+
+ blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+ blk_queue_io_opt(mddev->queue,
+ (mddev->chunk_sectors << 9) * mddev->raid_disks);
+
+ if (!discard_supported)
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+ else
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+ }
/* calculate array device size */
md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
@@ -543,7 +491,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
if (unlikely((split->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
/* Just ignore it */
- bio_endio(split, 0);
+ bio_endio(split);
} else
generic_make_request(split);
} while (split != bio);
@@ -727,7 +675,6 @@ static struct md_personality raid0_personality=
.takeover = raid0_takeover,
.quiesce = raid0_quiesce,
.congested = raid0_congested,
- .mergeable_bvec = raid0_mergeable_bvec,
};
static int __init raid0_init (void)
diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h
index 05539d9c97f0..7127a623f5da 100644
--- a/drivers/md/raid0.h
+++ b/drivers/md/raid0.h
@@ -12,8 +12,6 @@ struct r0conf {
struct md_rdev **devlist; /* lists of rdevs, pointed to
* by strip_zone->dev */
int nr_strip_zones;
- int has_merge_bvec; /* at least one member has
- * a merge_bvec_fn */
};
#endif
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f80f1af61ce7..4517f06c41ba 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -255,9 +255,10 @@ static void call_bio_endio(struct r1bio *r1_bio)
done = 1;
if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ bio->bi_error = -EIO;
+
if (done) {
- bio_endio(bio, 0);
+ bio_endio(bio);
/*
* Wake up any possible resync thread that waits for the device
* to go idle.
@@ -312,9 +313,9 @@ static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
return mirror;
}
-static void raid1_end_read_request(struct bio *bio, int error)
+static void raid1_end_read_request(struct bio *bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ int uptodate = !bio->bi_error;
struct r1bio *r1_bio = bio->bi_private;
int mirror;
struct r1conf *conf = r1_bio->mddev->private;
@@ -336,7 +337,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
spin_lock_irqsave(&conf->device_lock, flags);
if (r1_bio->mddev->degraded == conf->raid_disks ||
(r1_bio->mddev->degraded == conf->raid_disks-1 &&
- !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
+ test_bit(In_sync, &conf->mirrors[mirror].rdev->flags)))
uptodate = 1;
spin_unlock_irqrestore(&conf->device_lock, flags);
}
@@ -397,9 +398,8 @@ static void r1_bio_write_done(struct r1bio *r1_bio)
}
}
-static void raid1_end_write_request(struct bio *bio, int error)
+static void raid1_end_write_request(struct bio *bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct r1bio *r1_bio = bio->bi_private;
int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
struct r1conf *conf = r1_bio->mddev->private;
@@ -410,7 +410,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
/*
* 'one mirror IO has finished' event handler:
*/
- if (!uptodate) {
+ if (bio->bi_error) {
set_bit(WriteErrorSeen,
&conf->mirrors[mirror].rdev->flags);
if (!test_and_set_bit(WantReplacement,
@@ -541,7 +541,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
if ((conf->mddev->recovery_cp < this_sector + sectors) ||
(mddev_is_clustered(conf->mddev) &&
- md_cluster_ops->area_resyncing(conf->mddev, this_sector,
+ md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
this_sector + sectors)))
choose_first = 1;
else
@@ -557,7 +557,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
rdev = rcu_dereference(conf->mirrors[disk].rdev);
if (r1_bio->bios[disk] == IO_BLOCKED
|| rdev == NULL
- || test_bit(Unmerged, &rdev->flags)
|| test_bit(Faulty, &rdev->flags))
continue;
if (!test_bit(In_sync, &rdev->flags) &&
@@ -708,38 +707,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
return best_disk;
}
-static int raid1_mergeable_bvec(struct mddev *mddev,
- struct bvec_merge_data *bvm,
- struct bio_vec *biovec)
-{
- struct r1conf *conf = mddev->private;
- sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
- int max = biovec->bv_len;
-
- if (mddev->merge_check_needed) {
- int disk;
- rcu_read_lock();
- for (disk = 0; disk < conf->raid_disks * 2; disk++) {
- struct md_rdev *rdev = rcu_dereference(
- conf->mirrors[disk].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct request_queue *q =
- bdev_get_queue(rdev->bdev);
- if (q->merge_bvec_fn) {
- bvm->bi_sector = sector +
- rdev->data_offset;
- bvm->bi_bdev = rdev->bdev;
- max = min(max, q->merge_bvec_fn(
- q, bvm, biovec));
- }
- }
- }
- rcu_read_unlock();
- }
- return max;
-
-}
-
static int raid1_congested(struct mddev *mddev, int bits)
{
struct r1conf *conf = mddev->private;
@@ -793,7 +760,7 @@ static void flush_pending_writes(struct r1conf *conf)
if (unlikely((bio->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
- bio_endio(bio, 0);
+ bio_endio(bio);
else
generic_make_request(bio);
bio = next;
@@ -1068,7 +1035,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
if (unlikely((bio->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
- bio_endio(bio, 0);
+ bio_endio(bio);
else
generic_make_request(bio);
bio = next;
@@ -1111,7 +1078,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
((bio_end_sector(bio) > mddev->suspend_lo &&
bio->bi_iter.bi_sector < mddev->suspend_hi) ||
(mddev_is_clustered(mddev) &&
- md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
+ md_cluster_ops->area_resyncing(mddev, WRITE,
+ bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
/* As the suspend_* range is controlled by
* userspace, we want an interruptible
* wait.
@@ -1124,7 +1092,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
if (bio_end_sector(bio) <= mddev->suspend_lo ||
bio->bi_iter.bi_sector >= mddev->suspend_hi ||
(mddev_is_clustered(mddev) &&
- !md_cluster_ops->area_resyncing(mddev,
+ !md_cluster_ops->area_resyncing(mddev, WRITE,
bio->bi_iter.bi_sector, bio_end_sector(bio))))
break;
schedule();
@@ -1157,7 +1125,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
* non-zero, then it is the number of not-completed requests.
*/
bio->bi_phys_segments = 0;
- clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+ bio_clear_flag(bio, BIO_SEG_VALID);
if (rw == READ) {
/*
@@ -1268,8 +1236,7 @@ read_again:
break;
}
r1_bio->bios[i] = NULL;
- if (!rdev || test_bit(Faulty, &rdev->flags)
- || test_bit(Unmerged, &rdev->flags)) {
+ if (!rdev || test_bit(Faulty, &rdev->flags)) {
if (i < conf->raid_disks)
set_bit(R1BIO_Degraded, &r1_bio->state);
continue;
@@ -1475,6 +1442,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
struct r1conf *conf = mddev->private;
+ unsigned long flags;
/*
* If it is not operational, then we have already marked it as dead
@@ -1494,19 +1462,19 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
return;
}
set_bit(Blocked, &rdev->flags);
+ spin_lock_irqsave(&conf->device_lock, flags);
if (test_and_clear_bit(In_sync, &rdev->flags)) {
- unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded++;
set_bit(Faulty, &rdev->flags);
- spin_unlock_irqrestore(&conf->device_lock, flags);
} else
set_bit(Faulty, &rdev->flags);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
/*
* if recovery is running, make sure it aborts.
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
printk(KERN_ALERT
"md/raid1:%s: Disk failure on %s, disabling device.\n"
"md/raid1:%s: Operation continuing on %d devices.\n",
@@ -1567,7 +1535,10 @@ static int raid1_spare_active(struct mddev *mddev)
* Find all failed disks within the RAID1 configuration
* and mark them readable.
* Called under mddev lock, so rcu protection not needed.
+ * device_lock used to avoid races with raid1_end_read_request
+ * which expects 'In_sync' flags and ->degraded to be consistent.
*/
+ spin_lock_irqsave(&conf->device_lock, flags);
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = conf->mirrors[i].rdev;
struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
@@ -1598,7 +1569,6 @@ static int raid1_spare_active(struct mddev *mddev)
sysfs_notify_dirent_safe(rdev->sysfs_state);
}
}
- spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded -= count;
spin_unlock_irqrestore(&conf->device_lock, flags);
@@ -1614,7 +1584,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
struct raid1_info *p;
int first = 0;
int last = conf->raid_disks - 1;
- struct request_queue *q = bdev_get_queue(rdev->bdev);
if (mddev->recovery_disabled == conf->recovery_disabled)
return -EBUSY;
@@ -1622,11 +1591,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
- if (q->merge_bvec_fn) {
- set_bit(Unmerged, &rdev->flags);
- mddev->merge_check_needed = 1;
- }
-
for (mirror = first; mirror <= last; mirror++) {
p = conf->mirrors+mirror;
if (!p->rdev) {
@@ -1658,19 +1622,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
break;
}
}
- if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
- /* Some requests might not have seen this new
- * merge_bvec_fn. We must wait for them to complete
- * before merging the device fully.
- * First we make sure any code which has tested
- * our function has submitted the request, then
- * we wait for all outstanding requests to complete.
- */
- synchronize_sched();
- freeze_array(conf, 0);
- unfreeze_array(conf);
- clear_bit(Unmerged, &rdev->flags);
- }
md_integrity_add_rdev(rdev, mddev);
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
@@ -1734,7 +1685,7 @@ abort:
return err;
}
-static void end_sync_read(struct bio *bio, int error)
+static void end_sync_read(struct bio *bio)
{
struct r1bio *r1_bio = bio->bi_private;
@@ -1745,16 +1696,16 @@ static void end_sync_read(struct bio *bio, int error)
* or re-read if the read failed.
* We don't do much here, just schedule handling by raid1d
*/
- if (test_bit(BIO_UPTODATE, &bio->bi_flags))
+ if (!bio->bi_error)
set_bit(R1BIO_Uptodate, &r1_bio->state);
if (atomic_dec_and_test(&r1_bio->remaining))
reschedule_retry(r1_bio);
}
-static void end_sync_write(struct bio *bio, int error)
+static void end_sync_write(struct bio *bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ int uptodate = !bio->bi_error;
struct r1bio *r1_bio = bio->bi_private;
struct mddev *mddev = r1_bio->mddev;
struct r1conf *conf = mddev->private;
@@ -1941,7 +1892,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
idx ++;
}
set_bit(R1BIO_Uptodate, &r1_bio->state);
- set_bit(BIO_UPTODATE, &bio->bi_flags);
+ bio->bi_error = 0;
return 1;
}
@@ -1965,15 +1916,14 @@ static void process_checks(struct r1bio *r1_bio)
for (i = 0; i < conf->raid_disks * 2; i++) {
int j;
int size;
- int uptodate;
+ int error;
struct bio *b = r1_bio->bios[i];
if (b->bi_end_io != end_sync_read)
continue;
- /* fixup the bio for reuse, but preserve BIO_UPTODATE */
- uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
+ /* fixup the bio for reuse, but preserve errno */
+ error = b->bi_error;
bio_reset(b);
- if (!uptodate)
- clear_bit(BIO_UPTODATE, &b->bi_flags);
+ b->bi_error = error;
b->bi_vcnt = vcnt;
b->bi_iter.bi_size = r1_bio->sectors << 9;
b->bi_iter.bi_sector = r1_bio->sector +
@@ -1996,7 +1946,7 @@ static void process_checks(struct r1bio *r1_bio)
}
for (primary = 0; primary < conf->raid_disks * 2; primary++)
if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
- test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
+ !r1_bio->bios[primary]->bi_error) {
r1_bio->bios[primary]->bi_end_io = NULL;
rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
break;
@@ -2006,14 +1956,14 @@ static void process_checks(struct r1bio *r1_bio)
int j;
struct bio *pbio = r1_bio->bios[primary];
struct bio *sbio = r1_bio->bios[i];
- int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
+ int error = sbio->bi_error;
if (sbio->bi_end_io != end_sync_read)
continue;
- /* Now we can 'fixup' the BIO_UPTODATE flag */
- set_bit(BIO_UPTODATE, &sbio->bi_flags);
+ /* Now we can 'fixup' the error value */
+ sbio->bi_error = 0;
- if (uptodate) {
+ if (!error) {
for (j = vcnt; j-- ; ) {
struct page *p, *s;
p = pbio->bi_io_vec[j].bv_page;
@@ -2028,7 +1978,7 @@ static void process_checks(struct r1bio *r1_bio)
if (j >= 0)
atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
- && uptodate)) {
+ && !error)) {
/* No need to write to this device. */
sbio->bi_end_io = NULL;
rdev_dec_pending(conf->mirrors[i].rdev, mddev);
@@ -2269,11 +2219,11 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
struct bio *bio = r1_bio->bios[m];
if (bio->bi_end_io == NULL)
continue;
- if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
+ if (!bio->bi_error &&
test_bit(R1BIO_MadeGood, &r1_bio->state)) {
rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
}
- if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
+ if (bio->bi_error &&
test_bit(R1BIO_WriteError, &r1_bio->state)) {
if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
md_error(conf->mddev, rdev);
@@ -2286,6 +2236,7 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
{
int m;
+ bool fail = false;
for (m = 0; m < conf->raid_disks * 2 ; m++)
if (r1_bio->bios[m] == IO_MADE_GOOD) {
struct md_rdev *rdev = conf->mirrors[m].rdev;
@@ -2298,6 +2249,7 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
* narrow down and record precise write
* errors.
*/
+ fail = true;
if (!narrow_write_error(r1_bio, m)) {
md_error(conf->mddev,
conf->mirrors[m].rdev);
@@ -2309,7 +2261,13 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
}
if (test_bit(R1BIO_WriteError, &r1_bio->state))
close_write(r1_bio);
- raid_end_bio_io(r1_bio);
+ if (fail) {
+ spin_lock_irq(&conf->device_lock);
+ list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
+ spin_unlock_irq(&conf->device_lock);
+ md_wakeup_thread(conf->mddev->thread);
+ } else
+ raid_end_bio_io(r1_bio);
}
static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
@@ -2415,6 +2373,23 @@ static void raid1d(struct md_thread *thread)
md_check_recovery(mddev);
+ if (!list_empty_careful(&conf->bio_end_io_list) &&
+ !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ LIST_HEAD(tmp);
+ spin_lock_irqsave(&conf->device_lock, flags);
+ if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ list_add(&tmp, &conf->bio_end_io_list);
+ list_del_init(&conf->bio_end_io_list);
+ }
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ while (!list_empty(&tmp)) {
+ r1_bio = list_first_entry(&conf->bio_end_io_list,
+ struct r1bio, retry_list);
+ list_del(&r1_bio->retry_list);
+ raid_end_bio_io(r1_bio);
+ }
+ }
+
blk_start_plug(&plug);
for (;;) {
@@ -2712,7 +2687,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
/* remove last page from this bio */
bio->bi_vcnt--;
bio->bi_iter.bi_size -= len;
- __clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+ bio_clear_flag(bio, BIO_SEG_VALID);
}
goto bio_full;
}
@@ -2807,8 +2782,6 @@ static struct r1conf *setup_conf(struct mddev *mddev)
goto abort;
disk->rdev = rdev;
q = bdev_get_queue(rdev->bdev);
- if (q->merge_bvec_fn)
- mddev->merge_check_needed = 1;
disk->head_position = 0;
disk->seq_start = MaxSector;
@@ -2816,6 +2789,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
conf->raid_disks = mddev->raid_disks;
conf->mddev = mddev;
INIT_LIST_HEAD(&conf->retry_list);
+ INIT_LIST_HEAD(&conf->bio_end_io_list);
spin_lock_init(&conf->resync_lock);
init_waitqueue_head(&conf->wait_barrier);
@@ -3110,6 +3084,7 @@ static int raid1_reshape(struct mddev *mddev)
unfreeze_array(conf);
+ set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
@@ -3173,7 +3148,6 @@ static struct md_personality raid1_personality =
.quiesce = raid1_quiesce,
.takeover = raid1_takeover,
.congested = raid1_congested,
- .mergeable_bvec = raid1_mergeable_bvec,
};
static int __init raid_init(void)
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 14ebb288c1ef..c52d7139c5d7 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -61,6 +61,11 @@ struct r1conf {
* block, or anything else.
*/
struct list_head retry_list;
+ /* A separate list of r1bio which just need raid_end_bio_io called.
+ * This mustn't happen for writes which had any errors if the superblock
+ * needs to be written.
+ */
+ struct list_head bio_end_io_list;
/* queue pending writes to be submitted on unplug */
struct bio_list pending_bio_list;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 940f2f365461..0fc33eb88855 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -101,7 +101,7 @@ static int _enough(struct r10conf *conf, int previous, int ignore);
static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
int *skipped);
static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
-static void end_reshape_write(struct bio *bio, int error);
+static void end_reshape_write(struct bio *bio);
static void end_reshape(struct r10conf *conf);
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
@@ -307,9 +307,9 @@ static void raid_end_bio_io(struct r10bio *r10_bio)
} else
done = 1;
if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ bio->bi_error = -EIO;
if (done) {
- bio_endio(bio, 0);
+ bio_endio(bio);
/*
* Wake up any possible resync thread that waits for the device
* to go idle.
@@ -358,9 +358,9 @@ static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
return r10_bio->devs[slot].devnum;
}
-static void raid10_end_read_request(struct bio *bio, int error)
+static void raid10_end_read_request(struct bio *bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ int uptodate = !bio->bi_error;
struct r10bio *r10_bio = bio->bi_private;
int slot, dev;
struct md_rdev *rdev;
@@ -438,9 +438,8 @@ static void one_write_done(struct r10bio *r10_bio)
}
}
-static void raid10_end_write_request(struct bio *bio, int error)
+static void raid10_end_write_request(struct bio *bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct r10bio *r10_bio = bio->bi_private;
int dev;
int dec_rdev = 1;
@@ -460,7 +459,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
- if (!uptodate) {
+ if (bio->bi_error) {
if (repl)
/* Never record new bad blocks to replacement,
* just fail it.
@@ -672,93 +671,6 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
return (vchunk << geo->chunk_shift) + offset;
}
-/**
- * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
- * @mddev: the md device
- * @bvm: properties of new bio
- * @biovec: the request that could be merged to it.
- *
- * Return amount of bytes we can accept at this offset
- * This requires checking for end-of-chunk if near_copies != raid_disks,
- * and for subordinate merge_bvec_fns if merge_check_needed.
- */
-static int raid10_mergeable_bvec(struct mddev *mddev,
- struct bvec_merge_data *bvm,
- struct bio_vec *biovec)
-{
- struct r10conf *conf = mddev->private;
- sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
- int max;
- unsigned int chunk_sectors;
- unsigned int bio_sectors = bvm->bi_size >> 9;
- struct geom *geo = &conf->geo;
-
- chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1;
- if (conf->reshape_progress != MaxSector &&
- ((sector >= conf->reshape_progress) !=
- conf->mddev->reshape_backwards))
- geo = &conf->prev;
-
- if (geo->near_copies < geo->raid_disks) {
- max = (chunk_sectors - ((sector & (chunk_sectors - 1))
- + bio_sectors)) << 9;
- if (max < 0)
- /* bio_add cannot handle a negative return */
- max = 0;
- if (max <= biovec->bv_len && bio_sectors == 0)
- return biovec->bv_len;
- } else
- max = biovec->bv_len;
-
- if (mddev->merge_check_needed) {
- struct {
- struct r10bio r10_bio;
- struct r10dev devs[conf->copies];
- } on_stack;
- struct r10bio *r10_bio = &on_stack.r10_bio;
- int s;
- if (conf->reshape_progress != MaxSector) {
- /* Cannot give any guidance during reshape */
- if (max <= biovec->bv_len && bio_sectors == 0)
- return biovec->bv_len;
- return 0;
- }
- r10_bio->sector = sector;
- raid10_find_phys(conf, r10_bio);
- rcu_read_lock();
- for (s = 0; s < conf->copies; s++) {
- int disk = r10_bio->devs[s].devnum;
- struct md_rdev *rdev = rcu_dereference(
- conf->mirrors[disk].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct request_queue *q =
- bdev_get_queue(rdev->bdev);
- if (q->merge_bvec_fn) {
- bvm->bi_sector = r10_bio->devs[s].addr
- + rdev->data_offset;
- bvm->bi_bdev = rdev->bdev;
- max = min(max, q->merge_bvec_fn(
- q, bvm, biovec));
- }
- }
- rdev = rcu_dereference(conf->mirrors[disk].replacement);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct request_queue *q =
- bdev_get_queue(rdev->bdev);
- if (q->merge_bvec_fn) {
- bvm->bi_sector = r10_bio->devs[s].addr
- + rdev->data_offset;
- bvm->bi_bdev = rdev->bdev;
- max = min(max, q->merge_bvec_fn(
- q, bvm, biovec));
- }
- }
- }
- rcu_read_unlock();
- }
- return max;
-}
-
/*
* This routine returns the disk from which the requested read should
* be done. There is a per-array 'next expected sequential IO' sector
@@ -821,12 +733,10 @@ retry:
disk = r10_bio->devs[slot].devnum;
rdev = rcu_dereference(conf->mirrors[disk].replacement);
if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
- test_bit(Unmerged, &rdev->flags) ||
r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
rdev = rcu_dereference(conf->mirrors[disk].rdev);
if (rdev == NULL ||
- test_bit(Faulty, &rdev->flags) ||
- test_bit(Unmerged, &rdev->flags))
+ test_bit(Faulty, &rdev->flags))
continue;
if (!test_bit(In_sync, &rdev->flags) &&
r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
@@ -957,7 +867,7 @@ static void flush_pending_writes(struct r10conf *conf)
if (unlikely((bio->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
- bio_endio(bio, 0);
+ bio_endio(bio);
else
generic_make_request(bio);
bio = next;
@@ -1133,7 +1043,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
if (unlikely((bio->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
- bio_endio(bio, 0);
+ bio_endio(bio);
else
generic_make_request(bio);
bio = next;
@@ -1217,7 +1127,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
* non-zero, then it is the number of not-completed requests.
*/
bio->bi_phys_segments = 0;
- clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+ bio_clear_flag(bio, BIO_SEG_VALID);
if (rw == READ) {
/*
@@ -1326,11 +1236,9 @@ retry_write:
blocked_rdev = rrdev;
break;
}
- if (rdev && (test_bit(Faulty, &rdev->flags)
- || test_bit(Unmerged, &rdev->flags)))
+ if (rdev && (test_bit(Faulty, &rdev->flags)))
rdev = NULL;
- if (rrdev && (test_bit(Faulty, &rrdev->flags)
- || test_bit(Unmerged, &rrdev->flags)))
+ if (rrdev && (test_bit(Faulty, &rrdev->flags)))
rrdev = NULL;
r10_bio->devs[i].bio = NULL;
@@ -1681,6 +1589,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
spin_unlock_irqrestore(&conf->device_lock, flags);
printk(KERN_ALERT
"md/raid10:%s: Disk failure on %s, disabling device.\n"
@@ -1777,7 +1686,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
int mirror;
int first = 0;
int last = conf->geo.raid_disks - 1;
- struct request_queue *q = bdev_get_queue(rdev->bdev);
if (mddev->recovery_cp < MaxSector)
/* only hot-add to in-sync arrays, as recovery is
@@ -1790,11 +1698,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
- if (q->merge_bvec_fn) {
- set_bit(Unmerged, &rdev->flags);
- mddev->merge_check_needed = 1;
- }
-
if (rdev->saved_raid_disk >= first &&
conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
mirror = rdev->saved_raid_disk;
@@ -1833,19 +1736,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
rcu_assign_pointer(p->rdev, rdev);
break;
}
- if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
- /* Some requests might not have seen this new
- * merge_bvec_fn. We must wait for them to complete
- * before merging the device fully.
- * First we make sure any code which has tested
- * our function has submitted the request, then
- * we wait for all outstanding requests to complete.
- */
- synchronize_sched();
- freeze_array(conf, 0);
- unfreeze_array(conf);
- clear_bit(Unmerged, &rdev->flags);
- }
md_integrity_add_rdev(rdev, mddev);
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
@@ -1916,7 +1806,7 @@ abort:
return err;
}
-static void end_sync_read(struct bio *bio, int error)
+static void end_sync_read(struct bio *bio)
{
struct r10bio *r10_bio = bio->bi_private;
struct r10conf *conf = r10_bio->mddev->private;
@@ -1928,7 +1818,7 @@ static void end_sync_read(struct bio *bio, int error)
} else
d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
- if (test_bit(BIO_UPTODATE, &bio->bi_flags))
+ if (!bio->bi_error)
set_bit(R10BIO_Uptodate, &r10_bio->state);
else
/* The write handler will notice the lack of
@@ -1977,9 +1867,8 @@ static void end_sync_request(struct r10bio *r10_bio)
}
}
-static void end_sync_write(struct bio *bio, int error)
+static void end_sync_write(struct bio *bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct r10bio *r10_bio = bio->bi_private;
struct mddev *mddev = r10_bio->mddev;
struct r10conf *conf = mddev->private;
@@ -1996,7 +1885,7 @@ static void end_sync_write(struct bio *bio, int error)
else
rdev = conf->mirrors[d].rdev;
- if (!uptodate) {
+ if (bio->bi_error) {
if (repl)
md_error(mddev, rdev);
else {
@@ -2044,7 +1933,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
/* find the first device with a block */
for (i=0; i<conf->copies; i++)
- if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
+ if (!r10_bio->devs[i].bio->bi_error)
break;
if (i == conf->copies)
@@ -2064,7 +1953,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
continue;
if (i == first)
continue;
- if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
+ if (!r10_bio->devs[i].bio->bi_error) {
/* We know that the bi_io_vec layout is the same for
* both 'first' and 'i', so we just compare them.
* All vec entries are PAGE_SIZE;
@@ -2394,7 +2283,6 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
d = r10_bio->devs[sl].devnum;
rdev = rcu_dereference(conf->mirrors[d].rdev);
if (rdev &&
- !test_bit(Unmerged, &rdev->flags) &&
test_bit(In_sync, &rdev->flags) &&
is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
&first_bad, &bad_sectors) == 0) {
@@ -2448,7 +2336,6 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
d = r10_bio->devs[sl].devnum;
rdev = rcu_dereference(conf->mirrors[d].rdev);
if (!rdev ||
- test_bit(Unmerged, &rdev->flags) ||
!test_bit(In_sync, &rdev->flags))
continue;
@@ -2706,8 +2593,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
rdev = conf->mirrors[dev].rdev;
if (r10_bio->devs[m].bio == NULL)
continue;
- if (test_bit(BIO_UPTODATE,
- &r10_bio->devs[m].bio->bi_flags)) {
+ if (!r10_bio->devs[m].bio->bi_error) {
rdev_clear_badblocks(
rdev,
r10_bio->devs[m].addr,
@@ -2722,8 +2608,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
rdev = conf->mirrors[dev].replacement;
if (r10_bio->devs[m].repl_bio == NULL)
continue;
- if (test_bit(BIO_UPTODATE,
- &r10_bio->devs[m].repl_bio->bi_flags)) {
+
+ if (!r10_bio->devs[m].repl_bio->bi_error) {
rdev_clear_badblocks(
rdev,
r10_bio->devs[m].addr,
@@ -2738,6 +2624,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
}
put_buf(r10_bio);
} else {
+ bool fail = false;
for (m = 0; m < conf->copies; m++) {
int dev = r10_bio->devs[m].devnum;
struct bio *bio = r10_bio->devs[m].bio;
@@ -2748,8 +2635,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
r10_bio->devs[m].addr,
r10_bio->sectors, 0);
rdev_dec_pending(rdev, conf->mddev);
- } else if (bio != NULL &&
- !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+ } else if (bio != NULL && bio->bi_error) {
+ fail = true;
if (!narrow_write_error(r10_bio, m)) {
md_error(conf->mddev, rdev);
set_bit(R10BIO_Degraded,
@@ -2770,7 +2657,13 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
if (test_bit(R10BIO_WriteError,
&r10_bio->state))
close_write(r10_bio);
- raid_end_bio_io(r10_bio);
+ if (fail) {
+ spin_lock_irq(&conf->device_lock);
+ list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
+ spin_unlock_irq(&conf->device_lock);
+ md_wakeup_thread(conf->mddev->thread);
+ } else
+ raid_end_bio_io(r10_bio);
}
}
@@ -2785,6 +2678,23 @@ static void raid10d(struct md_thread *thread)
md_check_recovery(mddev);
+ if (!list_empty_careful(&conf->bio_end_io_list) &&
+ !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ LIST_HEAD(tmp);
+ spin_lock_irqsave(&conf->device_lock, flags);
+ if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ list_add(&tmp, &conf->bio_end_io_list);
+ list_del_init(&conf->bio_end_io_list);
+ }
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ while (!list_empty(&tmp)) {
+ r10_bio = list_first_entry(&conf->bio_end_io_list,
+ struct r10bio, retry_list);
+ list_del(&r10_bio->retry_list);
+ raid_end_bio_io(r10_bio);
+ }
+ }
+
blk_start_plug(&plug);
for (;;) {
@@ -3263,7 +3173,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r10_bio->devs[i].bio;
bio_reset(bio);
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ bio->bi_error = -EIO;
if (conf->mirrors[d].rdev == NULL ||
test_bit(Faulty, &conf->mirrors[d].rdev->flags))
continue;
@@ -3300,7 +3210,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* Need to set up for writing to the replacement */
bio = r10_bio->devs[i].repl_bio;
bio_reset(bio);
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ bio->bi_error = -EIO;
sector = r10_bio->devs[i].addr;
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
@@ -3357,7 +3267,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* remove last page from this bio */
bio2->bi_vcnt--;
bio2->bi_iter.bi_size -= len;
- __clear_bit(BIO_SEG_VALID, &bio2->bi_flags);
+ bio_clear_flag(bio2, BIO_SEG_VALID);
}
goto bio_full;
}
@@ -3377,7 +3287,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (bio->bi_end_io == end_sync_read) {
md_sync_acct(bio->bi_bdev, nr_sectors);
- set_bit(BIO_UPTODATE, &bio->bi_flags);
+ bio->bi_error = 0;
generic_make_request(bio);
}
}
@@ -3556,8 +3466,10 @@ static struct r10conf *setup_conf(struct mddev *mddev)
/* far_copies must be 1 */
conf->prev.stride = conf->dev_sectors;
}
+ conf->reshape_safe = conf->reshape_progress;
spin_lock_init(&conf->device_lock);
INIT_LIST_HEAD(&conf->retry_list);
+ INIT_LIST_HEAD(&conf->bio_end_io_list);
spin_lock_init(&conf->resync_lock);
init_waitqueue_head(&conf->wait_barrier);
@@ -3642,8 +3554,6 @@ static int run(struct mddev *mddev)
disk->rdev = rdev;
}
q = bdev_get_queue(rdev->bdev);
- if (q->merge_bvec_fn)
- mddev->merge_check_needed = 1;
diff = (rdev->new_data_offset - rdev->data_offset);
if (!mddev->reshape_backwards)
diff = -diff;
@@ -3760,7 +3670,6 @@ static int run(struct mddev *mddev)
}
conf->offset_diff = min_offset_diff;
- conf->reshape_safe = conf->reshape_progress;
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
@@ -4103,6 +4012,7 @@ static int raid10_start_reshape(struct mddev *mddev)
conf->reshape_progress = size;
} else
conf->reshape_progress = 0;
+ conf->reshape_safe = conf->reshape_progress;
spin_unlock_irq(&conf->device_lock);
if (mddev->delta_disks && mddev->bitmap) {
@@ -4170,6 +4080,7 @@ abort:
rdev->new_data_offset = rdev->data_offset;
smp_wmb();
conf->reshape_progress = MaxSector;
+ conf->reshape_safe = MaxSector;
mddev->reshape_position = MaxSector;
spin_unlock_irq(&conf->device_lock);
return ret;
@@ -4213,7 +4124,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
* at a time, possibly less if that exceeds RESYNC_PAGES,
* or we hit a bad block or something.
* This might mean we pause for normal IO in the middle of
- * a chunk, but that is not a problem was mddev->reshape_position
+ * a chunk, but that is not a problem as mddev->reshape_position
* can record any location.
*
* If we will want to write to a location that isn't
@@ -4237,7 +4148,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
*
* In all this the minimum difference in data offsets
* (conf->offset_diff - always positive) allows a bit of slack,
- * so next can be after 'safe', but not by more than offset_disk
+ * so next can be after 'safe', but not by more than offset_diff
*
* We need to prepare all the bios here before we start any IO
* to ensure the size we choose is acceptable to all devices.
@@ -4380,7 +4291,7 @@ read_more:
read_bio->bi_end_io = end_sync_read;
read_bio->bi_rw = READ;
read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
- __set_bit(BIO_UPTODATE, &read_bio->bi_flags);
+ read_bio->bi_error = 0;
read_bio->bi_vcnt = 0;
read_bio->bi_iter.bi_size = 0;
r10_bio->master_bio = read_bio;
@@ -4437,7 +4348,7 @@ read_more:
/* Remove last page from this bio */
bio2->bi_vcnt--;
bio2->bi_iter.bi_size -= len;
- __clear_bit(BIO_SEG_VALID, &bio2->bi_flags);
+ bio_clear_flag(bio2, BIO_SEG_VALID);
}
goto bio_full;
}
@@ -4524,6 +4435,7 @@ static void end_reshape(struct r10conf *conf)
md_finish_reshape(conf->mddev);
smp_wmb();
conf->reshape_progress = MaxSector;
+ conf->reshape_safe = MaxSector;
spin_unlock_irq(&conf->device_lock);
/* read-ahead size must cover two whole stripes, which is
@@ -4601,9 +4513,8 @@ static int handle_reshape_read_error(struct mddev *mddev,
return 0;
}
-static void end_reshape_write(struct bio *bio, int error)
+static void end_reshape_write(struct bio *bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct r10bio *r10_bio = bio->bi_private;
struct mddev *mddev = r10_bio->mddev;
struct r10conf *conf = mddev->private;
@@ -4620,7 +4531,7 @@ static void end_reshape_write(struct bio *bio, int error)
rdev = conf->mirrors[d].rdev;
}
- if (!uptodate) {
+ if (bio->bi_error) {
/* FIXME should record badblock */
md_error(mddev, rdev);
}
@@ -4697,7 +4608,6 @@ static struct md_personality raid10_personality =
.start_reshape = raid10_start_reshape,
.finish_reshape = raid10_finish_reshape,
.congested = raid10_congested,
- .mergeable_bvec = raid10_mergeable_bvec,
};
static int __init raid_init(void)
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 5ee6473ddc2c..6fc2c75759bf 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -53,6 +53,12 @@ struct r10conf {
sector_t offset_diff;
struct list_head retry_list;
+ /* A separate list of r1bio which just need raid_end_bio_io called.
+ * This mustn't happen for writes which had any errors if the superblock
+ * needs to be written.
+ */
+ struct list_head bio_end_io_list;
+
/* queue pending writes and submit them on unplug */
struct bio_list pending_bio_list;
int pending_count;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 59e44e99eef3..15ef2c641b2b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -223,18 +223,14 @@ static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
return slot;
}
-static void return_io(struct bio *return_bi)
+static void return_io(struct bio_list *return_bi)
{
- struct bio *bi = return_bi;
- while (bi) {
-
- return_bi = bi->bi_next;
- bi->bi_next = NULL;
+ struct bio *bi;
+ while ((bi = bio_list_pop(return_bi)) != NULL) {
bi->bi_iter.bi_size = 0;
trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
bi, 0);
- bio_endio(bi, 0);
- bi = return_bi;
+ bio_endio(bi);
}
}
@@ -887,9 +883,9 @@ static int use_new_offset(struct r5conf *conf, struct stripe_head *sh)
}
static void
-raid5_end_read_request(struct bio *bi, int error);
+raid5_end_read_request(struct bio *bi);
static void
-raid5_end_write_request(struct bio *bi, int error);
+raid5_end_write_request(struct bio *bi);
static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
{
@@ -1177,7 +1173,7 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
static void ops_complete_biofill(void *stripe_head_ref)
{
struct stripe_head *sh = stripe_head_ref;
- struct bio *return_bi = NULL;
+ struct bio_list return_bi = BIO_EMPTY_LIST;
int i;
pr_debug("%s: stripe %llu\n", __func__,
@@ -1201,17 +1197,15 @@ static void ops_complete_biofill(void *stripe_head_ref)
while (rbi && rbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
rbi2 = r5_next_bio(rbi, dev->sector);
- if (!raid5_dec_bi_active_stripes(rbi)) {
- rbi->bi_next = return_bi;
- return_bi = rbi;
- }
+ if (!raid5_dec_bi_active_stripes(rbi))
+ bio_list_add(&return_bi, rbi);
rbi = rbi2;
}
}
}
clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
- return_io(return_bi);
+ return_io(&return_bi);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
@@ -2162,6 +2156,9 @@ static int resize_stripes(struct r5conf *conf, int newsize)
if (!sc)
return -ENOMEM;
+ /* Need to ensure auto-resizing doesn't interfere */
+ mutex_lock(&conf->cache_size_mutex);
+
for (i = conf->max_nr_stripes; i; i--) {
nsh = alloc_stripe(sc, GFP_KERNEL);
if (!nsh)
@@ -2178,6 +2175,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
kmem_cache_free(sc, nsh);
}
kmem_cache_destroy(sc);
+ mutex_unlock(&conf->cache_size_mutex);
return -ENOMEM;
}
/* Step 2 - Must use GFP_NOIO now.
@@ -2224,6 +2222,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
} else
err = -ENOMEM;
+ mutex_unlock(&conf->cache_size_mutex);
/* Step 4, return new stripes to service */
while(!list_empty(&newstripes)) {
nsh = list_entry(newstripes.next, struct stripe_head, lru);
@@ -2251,7 +2250,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
static int drop_one_stripe(struct r5conf *conf)
{
struct stripe_head *sh;
- int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
+ int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
spin_lock_irq(conf->hash_locks + hash);
sh = get_free_stripe(conf, hash);
@@ -2277,12 +2276,11 @@ static void shrink_stripes(struct r5conf *conf)
conf->slab_cache = NULL;
}
-static void raid5_end_read_request(struct bio * bi, int error)
+static void raid5_end_read_request(struct bio * bi)
{
struct stripe_head *sh = bi->bi_private;
struct r5conf *conf = sh->raid_conf;
int disks = sh->disks, i;
- int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
char b[BDEVNAME_SIZE];
struct md_rdev *rdev = NULL;
sector_t s;
@@ -2291,9 +2289,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
if (bi == &sh->dev[i].req)
break;
- pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
+ pr_debug("end_read_request %llu/%d, count: %d, error %d.\n",
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
- uptodate);
+ bi->bi_error);
if (i == disks) {
BUG();
return;
@@ -2312,7 +2310,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
s = sh->sector + rdev->new_data_offset;
else
s = sh->sector + rdev->data_offset;
- if (uptodate) {
+ if (!bi->bi_error) {
set_bit(R5_UPTODATE, &sh->dev[i].flags);
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
/* Note that this cannot happen on a
@@ -2400,13 +2398,12 @@ static void raid5_end_read_request(struct bio * bi, int error)
release_stripe(sh);
}
-static void raid5_end_write_request(struct bio *bi, int error)
+static void raid5_end_write_request(struct bio *bi)
{
struct stripe_head *sh = bi->bi_private;
struct r5conf *conf = sh->raid_conf;
int disks = sh->disks, i;
struct md_rdev *uninitialized_var(rdev);
- int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
sector_t first_bad;
int bad_sectors;
int replacement = 0;
@@ -2429,23 +2426,23 @@ static void raid5_end_write_request(struct bio *bi, int error)
break;
}
}
- pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
+ pr_debug("end_write_request %llu/%d, count %d, error: %d.\n",
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
- uptodate);
+ bi->bi_error);
if (i == disks) {
BUG();
return;
}
if (replacement) {
- if (!uptodate)
+ if (bi->bi_error)
md_error(conf->mddev, rdev);
else if (is_badblock(rdev, sh->sector,
STRIPE_SECTORS,
&first_bad, &bad_sectors))
set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
} else {
- if (!uptodate) {
+ if (bi->bi_error) {
set_bit(STRIPE_DEGRADED, &sh->state);
set_bit(WriteErrorSeen, &rdev->flags);
set_bit(R5_WriteError, &sh->dev[i].flags);
@@ -2466,7 +2463,7 @@ static void raid5_end_write_request(struct bio *bi, int error)
}
rdev_dec_pending(rdev, conf->mddev);
- if (sh->batch_head && !uptodate && !replacement)
+ if (sh->batch_head && bi->bi_error && !replacement)
set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
@@ -2514,6 +2511,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
printk(KERN_ALERT
"md/raid:%s: Disk failure on %s, disabling device.\n"
"md/raid:%s: Operation continuing on %d devices.\n",
@@ -3066,7 +3064,7 @@ static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
static void
handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
struct stripe_head_state *s, int disks,
- struct bio **return_bi)
+ struct bio_list *return_bi)
{
int i;
BUG_ON(sh->batch_head);
@@ -3107,11 +3105,11 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
- clear_bit(BIO_UPTODATE, &bi->bi_flags);
+
+ bi->bi_error = -EIO;
if (!raid5_dec_bi_active_stripes(bi)) {
md_write_end(conf->mddev);
- bi->bi_next = *return_bi;
- *return_bi = bi;
+ bio_list_add(return_bi, bi);
}
bi = nextbi;
}
@@ -3131,11 +3129,11 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
- clear_bit(BIO_UPTODATE, &bi->bi_flags);
+
+ bi->bi_error = -EIO;
if (!raid5_dec_bi_active_stripes(bi)) {
md_write_end(conf->mddev);
- bi->bi_next = *return_bi;
- *return_bi = bi;
+ bio_list_add(return_bi, bi);
}
bi = bi2;
}
@@ -3156,11 +3154,10 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi =
r5_next_bio(bi, sh->dev[i].sector);
- clear_bit(BIO_UPTODATE, &bi->bi_flags);
- if (!raid5_dec_bi_active_stripes(bi)) {
- bi->bi_next = *return_bi;
- *return_bi = bi;
- }
+
+ bi->bi_error = -EIO;
+ if (!raid5_dec_bi_active_stripes(bi))
+ bio_list_add(return_bi, bi);
bi = nextbi;
}
}
@@ -3439,7 +3436,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
* never LOCKED, so we don't need to test 'failed' directly.
*/
static void handle_stripe_clean_event(struct r5conf *conf,
- struct stripe_head *sh, int disks, struct bio **return_bi)
+ struct stripe_head *sh, int disks, struct bio_list *return_bi)
{
int i;
struct r5dev *dev;
@@ -3473,8 +3470,7 @@ returnbi:
wbi2 = r5_next_bio(wbi, dev->sector);
if (!raid5_dec_bi_active_stripes(wbi)) {
md_write_end(conf->mddev);
- wbi->bi_next = *return_bi;
- *return_bi = wbi;
+ bio_list_add(return_bi, wbi);
}
wbi = wbi2;
}
@@ -4061,8 +4057,10 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
&first_bad, &bad_sectors))
set_bit(R5_ReadRepl, &dev->flags);
else {
- if (rdev)
+ if (rdev && !test_bit(Faulty, &rdev->flags))
set_bit(R5_NeedReplace, &dev->flags);
+ else
+ clear_bit(R5_NeedReplace, &dev->flags);
rdev = rcu_dereference(conf->disks[i].rdev);
clear_bit(R5_ReadRepl, &dev->flags);
}
@@ -4605,7 +4603,15 @@ finish:
md_wakeup_thread(conf->mddev->thread);
}
- return_io(s.return_bi);
+ if (!bio_list_empty(&s.return_bi)) {
+ if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags)) {
+ spin_lock_irq(&conf->device_lock);
+ bio_list_merge(&conf->return_bi, &s.return_bi);
+ spin_unlock_irq(&conf->device_lock);
+ md_wakeup_thread(conf->mddev->thread);
+ } else
+ return_io(&s.return_bi);
+ }
clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
}
@@ -4662,43 +4668,14 @@ static int raid5_congested(struct mddev *mddev, int bits)
return 0;
}
-/* We want read requests to align with chunks where possible,
- * but write requests don't need to.
- */
-static int raid5_mergeable_bvec(struct mddev *mddev,
- struct bvec_merge_data *bvm,
- struct bio_vec *biovec)
-{
- sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
- int max;
- unsigned int chunk_sectors = mddev->chunk_sectors;
- unsigned int bio_sectors = bvm->bi_size >> 9;
-
- /*
- * always allow writes to be mergeable, read as well if array
- * is degraded as we'll go through stripe cache anyway.
- */
- if ((bvm->bi_rw & 1) == WRITE || mddev->degraded)
- return biovec->bv_len;
-
- if (mddev->new_chunk_sectors < mddev->chunk_sectors)
- chunk_sectors = mddev->new_chunk_sectors;
- max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
- if (max < 0) max = 0;
- if (max <= biovec->bv_len && bio_sectors == 0)
- return biovec->bv_len;
- else
- return max;
-}
-
static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
{
+ struct r5conf *conf = mddev->private;
sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
- unsigned int chunk_sectors = mddev->chunk_sectors;
+ unsigned int chunk_sectors;
unsigned int bio_sectors = bio_sectors(bio);
- if (mddev->new_chunk_sectors < mddev->chunk_sectors)
- chunk_sectors = mddev->new_chunk_sectors;
+ chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
return chunk_sectors >=
((sector & (chunk_sectors - 1)) + bio_sectors);
}
@@ -4749,13 +4726,13 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf)
* first).
* If the read failed..
*/
-static void raid5_align_endio(struct bio *bi, int error)
+static void raid5_align_endio(struct bio *bi)
{
struct bio* raid_bi = bi->bi_private;
struct mddev *mddev;
struct r5conf *conf;
- int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
struct md_rdev *rdev;
+ int error = bi->bi_error;
bio_put(bi);
@@ -4766,10 +4743,10 @@ static void raid5_align_endio(struct bio *bi, int error)
rdev_dec_pending(rdev, conf->mddev);
- if (!error && uptodate) {
+ if (!error) {
trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
raid_bi, 0);
- bio_endio(raid_bi, 0);
+ bio_endio(raid_bi);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_quiescent);
return;
@@ -4780,26 +4757,7 @@ static void raid5_align_endio(struct bio *bi, int error)
add_bio_to_retry(raid_bi, conf);
}
-static int bio_fits_rdev(struct bio *bi)
-{
- struct request_queue *q = bdev_get_queue(bi->bi_bdev);
-
- if (bio_sectors(bi) > queue_max_sectors(q))
- return 0;
- blk_recount_segments(q, bi);
- if (bi->bi_phys_segments > queue_max_segments(q))
- return 0;
-
- if (q->merge_bvec_fn)
- /* it's too hard to apply the merge_bvec_fn at this stage,
- * just just give up
- */
- return 0;
-
- return 1;
-}
-
-static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
+static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
{
struct r5conf *conf = mddev->private;
int dd_idx;
@@ -4808,7 +4766,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
sector_t end_sector;
if (!in_chunk_boundary(mddev, raid_bio)) {
- pr_debug("chunk_aligned_read : non aligned\n");
+ pr_debug("%s: non aligned\n", __func__);
return 0;
}
/*
@@ -4850,13 +4808,11 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
rcu_read_unlock();
raid_bio->bi_next = (void*)rdev;
align_bi->bi_bdev = rdev->bdev;
- __clear_bit(BIO_SEG_VALID, &align_bi->bi_flags);
+ bio_clear_flag(align_bi, BIO_SEG_VALID);
- if (!bio_fits_rdev(align_bi) ||
- is_badblock(rdev, align_bi->bi_iter.bi_sector,
+ if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
bio_sectors(align_bi),
&first_bad, &bad_sectors)) {
- /* too big in some way, or has a known bad block */
bio_put(align_bi);
rdev_dec_pending(rdev, mddev);
return 0;
@@ -4885,6 +4841,31 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
}
}
+static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
+{
+ struct bio *split;
+
+ do {
+ sector_t sector = raid_bio->bi_iter.bi_sector;
+ unsigned chunk_sects = mddev->chunk_sectors;
+ unsigned sectors = chunk_sects - (sector & (chunk_sects-1));
+
+ if (sectors < bio_sectors(raid_bio)) {
+ split = bio_split(raid_bio, sectors, GFP_NOIO, fs_bio_set);
+ bio_chain(split, raid_bio);
+ } else
+ split = raid_bio;
+
+ if (!raid5_read_one_chunk(mddev, split)) {
+ if (split != raid_bio)
+ generic_make_request(raid_bio);
+ return split;
+ }
+ } while (split != raid_bio);
+
+ return NULL;
+}
+
/* __get_priority_stripe - get the next stripe to process
*
* Full stripe writes are allowed to pass preread active stripes up until
@@ -5133,7 +5114,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
remaining = raid5_dec_bi_active_stripes(bi);
if (remaining == 0) {
md_write_end(mddev);
- bio_endio(bi, 0);
+ bio_endio(bi);
}
}
@@ -5162,9 +5143,11 @@ static void make_request(struct mddev *mddev, struct bio * bi)
* data on failed drives.
*/
if (rw == READ && mddev->degraded == 0 &&
- mddev->reshape_position == MaxSector &&
- chunk_aligned_read(mddev,bi))
- return;
+ mddev->reshape_position == MaxSector) {
+ bi = chunk_aligned_read(mddev, bi);
+ if (!bi)
+ return;
+ }
if (unlikely(bi->bi_rw & REQ_DISCARD)) {
make_discard_request(mddev, bi);
@@ -5297,7 +5280,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
release_stripe_plug(mddev, sh);
} else {
/* cannot get stripe for read-ahead, just give-up */
- clear_bit(BIO_UPTODATE, &bi->bi_flags);
+ bi->bi_error = -EIO;
break;
}
}
@@ -5311,7 +5294,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
bi, 0);
- bio_endio(bi, 0);
+ bio_endio(bi);
}
}
@@ -5340,6 +5323,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
sector_t stripe_addr;
int reshape_sectors;
struct list_head stripes;
+ sector_t retn;
if (sector_nr == 0) {
/* If restarting in the middle, skip the initial sectors */
@@ -5347,6 +5331,10 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
conf->reshape_progress < raid5_size(mddev, 0, 0)) {
sector_nr = raid5_size(mddev, 0, 0)
- conf->reshape_progress;
+ } else if (mddev->reshape_backwards &&
+ conf->reshape_progress == MaxSector) {
+ /* shouldn't happen, but just in case, finish up.*/
+ sector_nr = MaxSector;
} else if (!mddev->reshape_backwards &&
conf->reshape_progress > 0)
sector_nr = conf->reshape_progress;
@@ -5355,7 +5343,8 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
mddev->curr_resync_completed = sector_nr;
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
*skipped = 1;
- return sector_nr;
+ retn = sector_nr;
+ goto finish;
}
}
@@ -5363,10 +5352,8 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
* If old and new chunk sizes differ, we need to process the
* largest of these
*/
- if (mddev->new_chunk_sectors > mddev->chunk_sectors)
- reshape_sectors = mddev->new_chunk_sectors;
- else
- reshape_sectors = mddev->chunk_sectors;
+
+ reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors);
/* We update the metadata at least every 10 seconds, or when
* the data about to be copied would over-write the source of
@@ -5381,11 +5368,16 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
safepos = conf->reshape_safe;
sector_div(safepos, data_disks);
if (mddev->reshape_backwards) {
- writepos -= min_t(sector_t, reshape_sectors, writepos);
+ BUG_ON(writepos < reshape_sectors);
+ writepos -= reshape_sectors;
readpos += reshape_sectors;
safepos += reshape_sectors;
} else {
writepos += reshape_sectors;
+ /* readpos and safepos are worst-case calculations.
+ * A negative number is overly pessimistic, and causes
+ * obvious problems for unsigned storage. So clip to 0.
+ */
readpos -= min_t(sector_t, reshape_sectors, readpos);
safepos -= min_t(sector_t, reshape_sectors, safepos);
}
@@ -5528,7 +5520,10 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
* then we need to write out the superblock.
*/
sector_nr += reshape_sectors;
- if ((sector_nr - mddev->curr_resync_completed) * 2
+ retn = reshape_sectors;
+finish:
+ if (mddev->curr_resync_completed > mddev->resync_max ||
+ (sector_nr - mddev->curr_resync_completed) * 2
>= mddev->resync_max - mddev->curr_resync_completed) {
/* Cannot proceed until we've updated the superblock... */
wait_event(conf->wait_for_overlap,
@@ -5553,7 +5548,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
ret:
- return reshape_sectors;
+ return retn;
}
static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
@@ -5707,7 +5702,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
if (remaining == 0) {
trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
raid_bio, 0);
- bio_endio(raid_bio, 0);
+ bio_endio(raid_bio);
}
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_quiescent);
@@ -5809,6 +5804,18 @@ static void raid5d(struct md_thread *thread)
md_check_recovery(mddev);
+ if (!bio_list_empty(&conf->return_bi) &&
+ !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ struct bio_list tmp = BIO_EMPTY_LIST;
+ spin_lock_irq(&conf->device_lock);
+ if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ bio_list_merge(&tmp, &conf->return_bi);
+ bio_list_init(&conf->return_bi);
+ }
+ spin_unlock_irq(&conf->device_lock);
+ return_io(&tmp);
+ }
+
blk_start_plug(&plug);
handled = 0;
spin_lock_irq(&conf->device_lock);
@@ -5857,12 +5864,14 @@ static void raid5d(struct md_thread *thread)
pr_debug("%d stripes handled\n", handled);
spin_unlock_irq(&conf->device_lock);
- if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) {
+ if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
+ mutex_trylock(&conf->cache_size_mutex)) {
grow_one_stripe(conf, __GFP_NOWARN);
/* Set flag even if allocation failed. This helps
* slow down allocation requests when mem is short
*/
set_bit(R5_DID_ALLOC, &conf->cache_state);
+ mutex_unlock(&conf->cache_size_mutex);
}
async_tx_issue_pending_all();
@@ -5894,18 +5903,22 @@ raid5_set_cache_size(struct mddev *mddev, int size)
return -EINVAL;
conf->min_nr_stripes = size;
+ mutex_lock(&conf->cache_size_mutex);
while (size < conf->max_nr_stripes &&
drop_one_stripe(conf))
;
+ mutex_unlock(&conf->cache_size_mutex);
err = md_allow_write(mddev);
if (err)
return err;
+ mutex_lock(&conf->cache_size_mutex);
while (size > conf->max_nr_stripes)
if (!grow_one_stripe(conf, GFP_KERNEL))
break;
+ mutex_unlock(&conf->cache_size_mutex);
return 0;
}
@@ -6243,8 +6256,8 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
/* size is defined by the smallest of previous and new size */
raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
- sectors &= ~((sector_t)mddev->chunk_sectors - 1);
- sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
+ sectors &= ~((sector_t)conf->chunk_sectors - 1);
+ sectors &= ~((sector_t)conf->prev_chunk_sectors - 1);
return sectors * (raid_disks - conf->max_degraded);
}
@@ -6371,11 +6384,19 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
- int ret = 0;
- while (ret < sc->nr_to_scan) {
- if (drop_one_stripe(conf) == 0)
- return SHRINK_STOP;
- ret++;
+ unsigned long ret = SHRINK_STOP;
+
+ if (mutex_trylock(&conf->cache_size_mutex)) {
+ ret= 0;
+ while (ret < sc->nr_to_scan &&
+ conf->max_nr_stripes > conf->min_nr_stripes) {
+ if (drop_one_stripe(conf) == 0) {
+ ret = SHRINK_STOP;
+ break;
+ }
+ ret++;
+ }
+ mutex_unlock(&conf->cache_size_mutex);
}
return ret;
}
@@ -6444,6 +6465,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
goto abort;
spin_lock_init(&conf->device_lock);
seqcount_init(&conf->gen_lock);
+ mutex_init(&conf->cache_size_mutex);
init_waitqueue_head(&conf->wait_for_quiescent);
for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
init_waitqueue_head(&conf->wait_for_stripe[i]);
@@ -6453,6 +6475,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
INIT_LIST_HEAD(&conf->hold_list);
INIT_LIST_HEAD(&conf->delayed_list);
INIT_LIST_HEAD(&conf->bitmap_list);
+ bio_list_init(&conf->return_bi);
init_llist_head(&conf->released_stripes);
atomic_set(&conf->active_stripes, 0);
atomic_set(&conf->preread_active_stripes, 0);
@@ -6542,6 +6565,9 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if (conf->reshape_progress != MaxSector) {
conf->prev_chunk_sectors = mddev->chunk_sectors;
conf->prev_algo = mddev->layout;
+ } else {
+ conf->prev_chunk_sectors = conf->chunk_sectors;
+ conf->prev_algo = conf->algorithm;
}
conf->min_nr_stripes = NR_STRIPES;
@@ -6661,6 +6687,8 @@ static int run(struct mddev *mddev)
sector_t here_new, here_old;
int old_disks;
int max_degraded = (mddev->level == 6 ? 2 : 1);
+ int chunk_sectors;
+ int new_data_disks;
if (mddev->new_level != mddev->level) {
printk(KERN_ERR "md/raid:%s: unsupported reshape "
@@ -6672,28 +6700,25 @@ static int run(struct mddev *mddev)
/* reshape_position must be on a new-stripe boundary, and one
* further up in new geometry must map after here in old
* geometry.
+ * If the chunk sizes are different, then as we perform reshape
+ * in units of the largest of the two, reshape_position needs
+ * be a multiple of the largest chunk size times new data disks.
*/
here_new = mddev->reshape_position;
- if (sector_div(here_new, mddev->new_chunk_sectors *
- (mddev->raid_disks - max_degraded))) {
+ chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors);
+ new_data_disks = mddev->raid_disks - max_degraded;
+ if (sector_div(here_new, chunk_sectors * new_data_disks)) {
printk(KERN_ERR "md/raid:%s: reshape_position not "
"on a stripe boundary\n", mdname(mddev));
return -EINVAL;
}
- reshape_offset = here_new * mddev->new_chunk_sectors;
+ reshape_offset = here_new * chunk_sectors;
/* here_new is the stripe we will write to */
here_old = mddev->reshape_position;
- sector_div(here_old, mddev->chunk_sectors *
- (old_disks-max_degraded));
+ sector_div(here_old, chunk_sectors * (old_disks-max_degraded));
/* here_old is the first stripe that we might need to read
* from */
if (mddev->delta_disks == 0) {
- if ((here_new * mddev->new_chunk_sectors !=
- here_old * mddev->chunk_sectors)) {
- printk(KERN_ERR "md/raid:%s: reshape position is"
- " confused - aborting\n", mdname(mddev));
- return -EINVAL;
- }
/* We cannot be sure it is safe to start an in-place
* reshape. It is only safe if user-space is monitoring
* and taking constant backups.
@@ -6712,10 +6737,10 @@ static int run(struct mddev *mddev)
return -EINVAL;
}
} else if (mddev->reshape_backwards
- ? (here_new * mddev->new_chunk_sectors + min_offset_diff <=
- here_old * mddev->chunk_sectors)
- : (here_new * mddev->new_chunk_sectors >=
- here_old * mddev->chunk_sectors + (-min_offset_diff))) {
+ ? (here_new * chunk_sectors + min_offset_diff <=
+ here_old * chunk_sectors)
+ : (here_new * chunk_sectors >=
+ here_old * chunk_sectors + (-min_offset_diff))) {
/* Reading from the same stripe as writing to - bad */
printk(KERN_ERR "md/raid:%s: reshape_position too early for "
"auto-recovery - aborting.\n",
@@ -6967,7 +6992,7 @@ static void status(struct seq_file *seq, struct mddev *mddev)
int i;
seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
- mddev->chunk_sectors / 2, mddev->layout);
+ conf->chunk_sectors / 2, mddev->layout);
seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
for (i = 0; i < conf->raid_disks; i++)
seq_printf (seq, "%s",
@@ -7173,7 +7198,9 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
* worth it.
*/
sector_t newsize;
- sectors &= ~((sector_t)mddev->chunk_sectors - 1);
+ struct r5conf *conf = mddev->private;
+
+ sectors &= ~((sector_t)conf->chunk_sectors - 1);
newsize = raid5_size(mddev, sectors, mddev->raid_disks);
if (mddev->external_size &&
mddev->array_sectors > newsize)
@@ -7412,6 +7439,7 @@ static void end_reshape(struct r5conf *conf)
rdev->data_offset = rdev->new_data_offset;
smp_wmb();
conf->reshape_progress = MaxSector;
+ conf->mddev->reshape_position = MaxSector;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap);
@@ -7757,7 +7785,6 @@ static struct md_personality raid6_personality =
.quiesce = raid5_quiesce,
.takeover = raid6_takeover,
.congested = raid5_congested,
- .mergeable_bvec = raid5_mergeable_bvec,
};
static struct md_personality raid5_personality =
{
@@ -7781,7 +7808,6 @@ static struct md_personality raid5_personality =
.quiesce = raid5_quiesce,
.takeover = raid5_takeover,
.congested = raid5_congested,
- .mergeable_bvec = raid5_mergeable_bvec,
};
static struct md_personality raid4_personality =
@@ -7806,7 +7832,6 @@ static struct md_personality raid4_personality =
.quiesce = raid5_quiesce,
.takeover = raid4_takeover,
.congested = raid5_congested,
- .mergeable_bvec = raid5_mergeable_bvec,
};
static int __init raid5_init(void)
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 02c3bf8fbfe7..828c2925e68f 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -265,7 +265,7 @@ struct stripe_head_state {
int dec_preread_active;
unsigned long ops_request;
- struct bio *return_bi;
+ struct bio_list return_bi;
struct md_rdev *blocked_rdev;
int handle_bad_blocks;
};
@@ -476,13 +476,17 @@ struct r5conf {
int skip_copy; /* Don't copy data from bio to stripe cache */
struct list_head *last_hold; /* detect hold_list promotions */
+ /* bios to have bi_end_io called after metadata is synced */
+ struct bio_list return_bi;
+
atomic_t reshape_stripes; /* stripes with pending writes for reshape */
/* unfortunately we need two cache names as we temporarily have
* two caches.
*/
int active_name;
char cache_name[2][32];
- struct kmem_cache *slab_cache; /* for allocating stripes */
+ struct kmem_cache *slab_cache; /* for allocating stripes */
+ struct mutex cache_size_mutex; /* Protect changes to cache size */
int seq_flush, seq_write;
int quiesce;
diff --git a/drivers/media/common/saa7146/saa7146_hlp.c b/drivers/media/common/saa7146/saa7146_hlp.c
index be746d1aee9a..3dc6a838ca6f 100644
--- a/drivers/media/common/saa7146/saa7146_hlp.c
+++ b/drivers/media/common/saa7146/saa7146_hlp.c
@@ -307,7 +307,7 @@ static int calculate_v_scale_registers(struct saa7146_dev *dev, enum v4l2_field
/* simple bubble-sort algorithm with duplicate elimination */
static int sort_and_eliminate(u32* values, int* count)
{
- int low = 0, high = 0, top = 0, temp = 0;
+ int low = 0, high = 0, top = 0;
int cur = 0, next = 0;
/* sanity checks */
@@ -318,11 +318,8 @@ static int sort_and_eliminate(u32* values, int* count)
/* bubble sort the first @count items of the array @values */
for( top = *count; top > 0; top--) {
for( low = 0, high = 1; high < top; low++, high++) {
- if( values[low] > values[high] ) {
- temp = values[low];
- values[low] = values[high];
- values[high] = temp;
- }
+ if( values[low] > values[high] )
+ swap(values[low], values[high]);
}
}
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index 72937756f60c..fb66184dc9b6 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -169,10 +169,10 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, u8 * e
/**
* Safely find needle in haystack.
*
- * @param haystack Buffer to look in.
- * @param hlen Number of bytes in haystack.
- * @param needle Buffer to find.
- * @param nlen Number of bytes in needle.
+ * @haystack: Buffer to look in.
+ * @hlen: Number of bytes in haystack.
+ * @needle: Buffer to find.
+ * @nlen: Number of bytes in needle.
* @return Pointer into haystack needle was found at, or NULL if not found.
*/
static char *findstr(char * haystack, int hlen, char * needle, int nlen)
@@ -197,7 +197,7 @@ static char *findstr(char * haystack, int hlen, char * needle, int nlen)
/**
- * Check CAM status.
+ * dvb_ca_en50221_check_camstatus - Check CAM status.
*/
static int dvb_ca_en50221_check_camstatus(struct dvb_ca_private *ca, int slot)
{
@@ -240,13 +240,13 @@ static int dvb_ca_en50221_check_camstatus(struct dvb_ca_private *ca, int slot)
/**
- * Wait for flags to become set on the STATUS register on a CAM interface,
- * checking for errors and timeout.
+ * dvb_ca_en50221_wait_if_status - Wait for flags to become set on the STATUS
+ * register on a CAM interface, checking for errors and timeout.
*
- * @param ca CA instance.
- * @param slot Slot on interface.
- * @param waitfor Flags to wait for.
- * @param timeout_ms Timeout in milliseconds.
+ * @ca: CA instance.
+ * @slot: Slot on interface.
+ * @waitfor: Flags to wait for.
+ * @timeout_ms: Timeout in milliseconds.
*
* @return 0 on success, nonzero on error.
*/
@@ -290,10 +290,10 @@ static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot,
/**
- * Initialise the link layer connection to a CAM.
+ * dvb_ca_en50221_link_init - Initialise the link layer connection to a CAM.
*
- * @param ca CA instance.
- * @param slot Slot id.
+ * @ca: CA instance.
+ * @slot: Slot id.
*
* @return 0 on success, nonzero on failure.
*/
@@ -346,14 +346,14 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
}
/**
- * Read a tuple from attribute memory.
+ * dvb_ca_en50221_read_tuple - Read a tuple from attribute memory.
*
- * @param ca CA instance.
- * @param slot Slot id.
- * @param address Address to read from. Updated.
- * @param tupleType Tuple id byte. Updated.
- * @param tupleLength Tuple length. Updated.
- * @param tuple Dest buffer for tuple (must be 256 bytes). Updated.
+ * @ca: CA instance.
+ * @slot: Slot id.
+ * @address: Address to read from. Updated.
+ * @tupleType: Tuple id byte. Updated.
+ * @tupleLength: Tuple length. Updated.
+ * @tuple: Dest buffer for tuple (must be 256 bytes). Updated.
*
* @return 0 on success, nonzero on error.
*/
@@ -399,11 +399,11 @@ static int dvb_ca_en50221_read_tuple(struct dvb_ca_private *ca, int slot,
/**
- * Parse attribute memory of a CAM module, extracting Config register, and checking
- * it is a DVB CAM module.
+ * dvb_ca_en50221_parse_attributes - Parse attribute memory of a CAM module,
+ * extracting Config register, and checking it is a DVB CAM module.
*
- * @param ca CA instance.
- * @param slot Slot id.
+ * @ca: CA instance.
+ * @slot: Slot id.
*
* @return 0 on success, <0 on failure.
*/
@@ -546,10 +546,10 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
/**
- * Set CAM's configoption correctly.
+ * dvb_ca_en50221_set_configoption - Set CAM's configoption correctly.
*
- * @param ca CA instance.
- * @param slot Slot containing the CAM.
+ * @ca: CA instance.
+ * @slot: Slot containing the CAM.
*/
static int dvb_ca_en50221_set_configoption(struct dvb_ca_private *ca, int slot)
{
@@ -574,15 +574,16 @@ static int dvb_ca_en50221_set_configoption(struct dvb_ca_private *ca, int slot)
/**
- * This function talks to an EN50221 CAM control interface. It reads a buffer of
- * data from the CAM. The data can either be stored in a supplied buffer, or
- * automatically be added to the slot's rx_buffer.
+ * dvb_ca_en50221_read_data - This function talks to an EN50221 CAM control
+ * interface. It reads a buffer of data from the CAM. The data can either
+ * be stored in a supplied buffer, or automatically be added to the slot's
+ * rx_buffer.
*
- * @param ca CA instance.
- * @param slot Slot to read from.
- * @param ebuf If non-NULL, the data will be written to this buffer. If NULL,
+ * @ca: CA instance.
+ * @slot: Slot to read from.
+ * @ebuf: If non-NULL, the data will be written to this buffer. If NULL,
* the data will be added into the buffering system as a normal fragment.
- * @param ecount Size of ebuf. Ignored if ebuf is NULL.
+ * @ecount: Size of ebuf. Ignored if ebuf is NULL.
*
* @return Number of bytes read, or < 0 on error
*/
@@ -698,14 +699,14 @@ exit:
/**
- * This function talks to an EN50221 CAM control interface. It writes a buffer of data
- * to a CAM.
+ * dvb_ca_en50221_write_data - This function talks to an EN50221 CAM control
+ * interface. It writes a buffer of data to a CAM.
*
- * @param ca CA instance.
- * @param slot Slot to write to.
- * @param ebuf The data in this buffer is treated as a complete link-level packet to
+ * @ca: CA instance.
+ * @slot: Slot to write to.
+ * @ebuf: The data in this buffer is treated as a complete link-level packet to
* be written.
- * @param count Size of ebuf.
+ * @count: Size of ebuf.
*
* @return Number of bytes written, or < 0 on error.
*/
@@ -790,10 +791,10 @@ EXPORT_SYMBOL(dvb_ca_en50221_camchange_irq);
/**
- * A CAM has been removed => shut it down.
+ * dvb_ca_en50221_camready_irq - A CAM has been removed => shut it down.
*
- * @param ca CA instance.
- * @param slot Slot to shut down.
+ * @ca: CA instance.
+ * @slot: Slot to shut down.
*/
static int dvb_ca_en50221_slot_shutdown(struct dvb_ca_private *ca, int slot)
{
@@ -815,11 +816,11 @@ EXPORT_SYMBOL(dvb_ca_en50221_camready_irq);
/**
- * A CAMCHANGE IRQ has occurred.
+ * dvb_ca_en50221_camready_irq - A CAMCHANGE IRQ has occurred.
*
- * @param ca CA instance.
- * @param slot Slot concerned.
- * @param change_type One of the DVB_CA_CAMCHANGE_* values.
+ * @ca: CA instance.
+ * @slot: Slot concerned.
+ * @change_type: One of the DVB_CA_CAMCHANGE_* values.
*/
void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221 *pubca, int slot, int change_type)
{
@@ -844,10 +845,10 @@ EXPORT_SYMBOL(dvb_ca_en50221_frda_irq);
/**
- * A CAMREADY IRQ has occurred.
+ * dvb_ca_en50221_camready_irq - A CAMREADY IRQ has occurred.
*
- * @param ca CA instance.
- * @param slot Slot concerned.
+ * @ca: CA instance.
+ * @slot: Slot concerned.
*/
void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot)
{
@@ -865,8 +866,8 @@ void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot)
/**
* An FR or DA IRQ has occurred.
*
- * @param ca CA instance.
- * @param slot Slot concerned.
+ * @ca: CA instance.
+ * @slot: Slot concerned.
*/
void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *pubca, int slot)
{
@@ -899,7 +900,7 @@ void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *pubca, int slot)
/**
* Wake up the DVB CA thread
*
- * @param ca CA instance.
+ * @ca: CA instance.
*/
static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca)
{
@@ -914,7 +915,7 @@ static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca)
/**
* Update the delay used by the thread.
*
- * @param ca CA instance.
+ * @ca: CA instance.
*/
static void dvb_ca_en50221_thread_update_delay(struct dvb_ca_private *ca)
{
@@ -1177,10 +1178,10 @@ static int dvb_ca_en50221_thread(void *data)
* Real ioctl implementation.
* NOTE: CA_SEND_MSG/CA_GET_MSG ioctls have userspace buffers passed to them.
*
- * @param inode Inode concerned.
- * @param file File concerned.
- * @param cmd IOCTL command.
- * @param arg Associated argument.
+ * @inode: Inode concerned.
+ * @file: File concerned.
+ * @cmd: IOCTL command.
+ * @arg: Associated argument.
*
* @return 0 on success, <0 on error.
*/
@@ -1258,10 +1259,10 @@ out_unlock:
/**
* Wrapper for ioctl implementation.
*
- * @param inode Inode concerned.
- * @param file File concerned.
- * @param cmd IOCTL command.
- * @param arg Associated argument.
+ * @inode: Inode concerned.
+ * @file: File concerned.
+ * @cmd: IOCTL command.
+ * @arg: Associated argument.
*
* @return 0 on success, <0 on error.
*/
@@ -1275,10 +1276,10 @@ static long dvb_ca_en50221_io_ioctl(struct file *file,
/**
* Implementation of write() syscall.
*
- * @param file File structure.
- * @param buf Source buffer.
- * @param count Size of source buffer.
- * @param ppos Position in file (ignored).
+ * @file: File structure.
+ * @buf: Source buffer.
+ * @count: Size of source buffer.
+ * @ppos: Position in file (ignored).
*
* @return Number of bytes read, or <0 on error.
*/
@@ -1416,10 +1417,10 @@ nextslot:
/**
* Implementation of read() syscall.
*
- * @param file File structure.
- * @param buf Destination buffer.
- * @param count Size of destination buffer.
- * @param ppos Position in file (ignored).
+ * @file: File structure.
+ * @buf: Destination buffer.
+ * @count: Size of destination buffer.
+ * @ppos: Position in file (ignored).
*
* @return Number of bytes read, or <0 on error.
*/
@@ -1519,8 +1520,8 @@ exit:
/**
* Implementation of file open syscall.
*
- * @param inode Inode concerned.
- * @param file File concerned.
+ * @inode: Inode concerned.
+ * @file: File concerned.
*
* @return 0 on success, <0 on failure.
*/
@@ -1564,8 +1565,8 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
/**
* Implementation of file close syscall.
*
- * @param inode Inode concerned.
- * @param file File concerned.
+ * @inode: Inode concerned.
+ * @file: File concerned.
*
* @return 0 on success, <0 on failure.
*/
@@ -1592,8 +1593,8 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
/**
* Implementation of poll() syscall.
*
- * @param file File concerned.
- * @param wait poll wait table.
+ * @file: File concerned.
+ * @wait: poll wait table.
*
* @return Standard poll mask.
*/
@@ -1656,10 +1657,10 @@ static const struct dvb_device dvbdev_ca = {
/**
* Initialise a new DVB CA EN50221 interface device.
*
- * @param dvb_adapter DVB adapter to attach the new CA device to.
- * @param ca The dvb_ca instance.
- * @param flags Flags describing the CA device (DVB_CA_FLAG_*).
- * @param slot_count Number of slots supported.
+ * @dvb_adapter: DVB adapter to attach the new CA device to.
+ * @ca: The dvb_ca instance.
+ * @flags: Flags describing the CA device (DVB_CA_FLAG_*).
+ * @slot_count: Number of slots supported.
*
* @return 0 on success, nonzero on failure
*/
@@ -1743,8 +1744,8 @@ EXPORT_SYMBOL(dvb_ca_en50221_release);
/**
* Release a DVB CA EN50221 interface device.
*
- * @param ca_dev The dvb_device_t instance for the CA device.
- * @param ca The associated dvb_ca instance.
+ * @ca_dev: The dvb_device_t instance for the CA device.
+ * @ca: The associated dvb_ca instance.
*/
void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca)
{
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.h b/drivers/media/dvb-core/dvb_ca_en50221.h
index 7df2e141187a..aba3b4fbd704 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.h
+++ b/drivers/media/dvb-core/dvb_ca_en50221.h
@@ -83,27 +83,27 @@ struct dvb_ca_en50221 {
/* Functions for reporting IRQ events */
/**
- * A CAMCHANGE IRQ has occurred.
+ * dvb_ca_en50221_camchange_irq - A CAMCHANGE IRQ has occurred.
*
- * @param ca CA instance.
- * @param slot Slot concerned.
- * @param change_type One of the DVB_CA_CAMCHANGE_* values
+ * @pubca: CA instance.
+ * @slot: Slot concerned.
+ * @change_type: One of the DVB_CA_CAMCHANGE_* values
*/
void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221* pubca, int slot, int change_type);
/**
- * A CAMREADY IRQ has occurred.
+ * dvb_ca_en50221_camready_irq - A CAMREADY IRQ has occurred.
*
- * @param ca CA instance.
- * @param slot Slot concerned.
+ * @pubca: CA instance.
+ * @slot: Slot concerned.
*/
void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221* pubca, int slot);
/**
- * An FR or a DA IRQ has occurred.
+ * dvb_ca_en50221_frda_irq - An FR or a DA IRQ has occurred.
*
- * @param ca CA instance.
- * @param slot Slot concerned.
+ * @ca: CA instance.
+ * @slot: Slot concerned.
*/
void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221* ca, int slot);
@@ -113,21 +113,21 @@ void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221* ca, int slot);
/* Initialisation/shutdown functions */
/**
- * Initialise a new DVB CA device.
+ * dvb_ca_en50221_init - Initialise a new DVB CA device.
*
- * @param dvb_adapter DVB adapter to attach the new CA device to.
- * @param ca The dvb_ca instance.
- * @param flags Flags describing the CA device (DVB_CA_EN50221_FLAG_*).
- * @param slot_count Number of slots supported.
+ * @dvb_adapter: DVB adapter to attach the new CA device to.
+ * @ca: The dvb_ca instance.
+ * @flags: Flags describing the CA device (DVB_CA_EN50221_FLAG_*).
+ * @slot_count: Number of slots supported.
*
* @return 0 on success, nonzero on failure
*/
extern int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter, struct dvb_ca_en50221* ca, int flags, int slot_count);
/**
- * Release a DVB CA device.
+ * dvb_ca_en50221_release - Release a DVB CA device.
*
- * @param ca The associated dvb_ca instance.
+ * @ca: The associated dvb_ca instance.
*/
extern void dvb_ca_en50221_release(struct dvb_ca_en50221* ca);
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 842b9c8f80c6..c38ef1a72b4a 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -81,7 +81,6 @@ MODULE_PARM_DESC(dvb_mfe_wait_time, "Wait up to <mfe_wait_time> seconds on open(
#define FESTATE_SEARCHING_SLOW (FESTATE_TUNING_SLOW | FESTATE_ZIGZAG_SLOW)
#define FESTATE_LOSTLOCK (FESTATE_ZIGZAG_FAST | FESTATE_ZIGZAG_SLOW)
-#define FE_ALGO_HW 1
/*
* FESTATE_IDLE. No tuning parameters have been supplied and the loop is idling.
* FESTATE_RETUNE. Parameters have been supplied, but we have not yet performed the first tune.
diff --git a/drivers/media/dvb-core/dvb_frontend.h b/drivers/media/dvb-core/dvb_frontend.h
index 4816947294fe..97661b2f247a 100644
--- a/drivers/media/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb-core/dvb_frontend.h
@@ -48,6 +48,15 @@
*/
#define MAX_DELSYS 8
+/**
+ * struct dvb_frontend_tune_settings - parameters to adjust frontend tuning
+ *
+ * @min_delay_ms: minimum delay for tuning, in ms
+ * @step_size: step size between two consecutive frequencies
+ * @max_drift: maximum drift
+ *
+ * NOTE: step_size is in Hz, for terrestrial/cable or kHz for satellite
+ */
struct dvb_frontend_tune_settings {
int min_delay_ms;
int step_size;
@@ -56,6 +65,20 @@ struct dvb_frontend_tune_settings {
struct dvb_frontend;
+/**
+ * struct dvb_tuner_info - Frontend name and min/max ranges/bandwidths
+ *
+ * @name: name of the Frontend
+ * @frequency_min: minimal frequency supported
+ * @frequency_max: maximum frequency supported
+ * @frequency_step: frequency step
+ * @bandwidth_min: minimal frontend bandwidth supported
+ * @bandwidth_max: maximum frontend bandwidth supported
+ * @bandwidth_step: frontend bandwidth step
+ *
+ * NOTE: frequency parameters are in Hz, for terrestrial/cable or kHz for
+ * satellite.
+ */
struct dvb_tuner_info {
char name[128];
@@ -68,6 +91,20 @@ struct dvb_tuner_info {
u32 bandwidth_step;
};
+/**
+ * struct analog_parameters - Parameters to tune into an analog/radio channel
+ *
+ * @frequency: Frequency used by analog TV tuner (either in 62.5 kHz step,
+ * for TV, or 62.5 Hz for radio)
+ * @mode: Tuner mode, as defined on enum v4l2_tuner_type
+ * @audmode: Audio mode as defined for the rxsubchans field at videodev2.h,
+ * e. g. V4L2_TUNER_MODE_*
+ * @std: TV standard bitmap as defined at videodev2.h, e. g. V4L2_STD_*
+ *
+ * Hybrid tuners should be supported by both V4L2 and DVB APIs. This
+ * struct contains the data that are used by the V4L2 side. To avoid
+ * dependencies from V4L2 headers, all enums here are declared as integers.
+ */
struct analog_parameters {
unsigned int frequency;
unsigned int mode;
@@ -75,42 +112,6 @@ struct analog_parameters {
u64 std;
};
-enum dvbfe_modcod {
- DVBFE_MODCOD_DUMMY_PLFRAME = 0,
- DVBFE_MODCOD_QPSK_1_4,
- DVBFE_MODCOD_QPSK_1_3,
- DVBFE_MODCOD_QPSK_2_5,
- DVBFE_MODCOD_QPSK_1_2,
- DVBFE_MODCOD_QPSK_3_5,
- DVBFE_MODCOD_QPSK_2_3,
- DVBFE_MODCOD_QPSK_3_4,
- DVBFE_MODCOD_QPSK_4_5,
- DVBFE_MODCOD_QPSK_5_6,
- DVBFE_MODCOD_QPSK_8_9,
- DVBFE_MODCOD_QPSK_9_10,
- DVBFE_MODCOD_8PSK_3_5,
- DVBFE_MODCOD_8PSK_2_3,
- DVBFE_MODCOD_8PSK_3_4,
- DVBFE_MODCOD_8PSK_5_6,
- DVBFE_MODCOD_8PSK_8_9,
- DVBFE_MODCOD_8PSK_9_10,
- DVBFE_MODCOD_16APSK_2_3,
- DVBFE_MODCOD_16APSK_3_4,
- DVBFE_MODCOD_16APSK_4_5,
- DVBFE_MODCOD_16APSK_5_6,
- DVBFE_MODCOD_16APSK_8_9,
- DVBFE_MODCOD_16APSK_9_10,
- DVBFE_MODCOD_32APSK_3_4,
- DVBFE_MODCOD_32APSK_4_5,
- DVBFE_MODCOD_32APSK_5_6,
- DVBFE_MODCOD_32APSK_8_9,
- DVBFE_MODCOD_32APSK_9_10,
- DVBFE_MODCOD_RESERVED_1,
- DVBFE_MODCOD_BPSK_1_3,
- DVBFE_MODCOD_BPSK_1_4,
- DVBFE_MODCOD_RESERVED_2
-};
-
enum tuner_param {
DVBFE_TUNER_FREQUENCY = (1 << 0),
DVBFE_TUNER_TUNERSTEP = (1 << 1),
@@ -121,30 +122,28 @@ enum tuner_param {
DVBFE_TUNER_DUMMY = (1 << 31)
};
-/*
- * ALGO_HW: (Hardware Algorithm)
- * ----------------------------------------------------------------
- * Devices that support this algorithm do everything in hardware
- * and no software support is needed to handle them.
- * Requesting these devices to LOCK is the only thing required,
- * device is supposed to do everything in the hardware.
- *
- * ALGO_SW: (Software Algorithm)
- * ----------------------------------------------------------------
+/**
+ * enum dvbfe_algo - defines the algorithm used to tune into a channel
+ *
+ * @DVBFE_ALGO_HW: Hardware Algorithm -
+ * Devices that support this algorithm do everything in hardware
+ * and no software support is needed to handle them.
+ * Requesting these devices to LOCK is the only thing required,
+ * device is supposed to do everything in the hardware.
+ *
+ * @DVBFE_ALGO_SW: Software Algorithm -
* These are dumb devices, that require software to do everything
*
- * ALGO_CUSTOM: (Customizable Agorithm)
- * ----------------------------------------------------------------
- * Devices having this algorithm can be customized to have specific
- * algorithms in the frontend driver, rather than simply doing a
- * software zig-zag. In this case the zigzag maybe hardware assisted
- * or it maybe completely done in hardware. In all cases, usage of
- * this algorithm, in conjunction with the search and track
- * callbacks, utilizes the driver specific algorithm.
- *
- * ALGO_RECOVERY: (Recovery Algorithm)
- * ----------------------------------------------------------------
- * These devices have AUTO recovery capabilities from LOCK failure
+ * @DVBFE_ALGO_CUSTOM: Customizable Agorithm -
+ * Devices having this algorithm can be customized to have specific
+ * algorithms in the frontend driver, rather than simply doing a
+ * software zig-zag. In this case the zigzag maybe hardware assisted
+ * or it maybe completely done in hardware. In all cases, usage of
+ * this algorithm, in conjunction with the search and track
+ * callbacks, utilizes the driver specific algorithm.
+ *
+ * @DVBFE_ALGO_RECOVERY: Recovery Algorithm -
+ * These devices have AUTO recovery capabilities from LOCK failure
*/
enum dvbfe_algo {
DVBFE_ALGO_HW = (1 << 0),
@@ -162,27 +161,27 @@ struct tuner_state {
u32 refclock;
};
-/*
- * search callback possible return status
+/**
+ * enum dvbfe_search - search callback possible return status
*
- * DVBFE_ALGO_SEARCH_SUCCESS
- * The frontend search algorithm completed and returned successfully
+ * @DVBFE_ALGO_SEARCH_SUCCESS:
+ * The frontend search algorithm completed and returned successfully
*
- * DVBFE_ALGO_SEARCH_ASLEEP
- * The frontend search algorithm is sleeping
+ * @DVBFE_ALGO_SEARCH_ASLEEP:
+ * The frontend search algorithm is sleeping
*
- * DVBFE_ALGO_SEARCH_FAILED
- * The frontend search for a signal failed
+ * @DVBFE_ALGO_SEARCH_FAILED:
+ * The frontend search for a signal failed
*
- * DVBFE_ALGO_SEARCH_INVALID
- * The frontend search algorith was probably supplied with invalid
- * parameters and the search is an invalid one
+ * @DVBFE_ALGO_SEARCH_INVALID:
+ * The frontend search algorith was probably supplied with invalid
+ * parameters and the search is an invalid one
*
- * DVBFE_ALGO_SEARCH_ERROR
- * The frontend search algorithm failed due to some error
+ * @DVBFE_ALGO_SEARCH_ERROR:
+ * The frontend search algorithm failed due to some error
*
- * DVBFE_ALGO_SEARCH_AGAIN
- * The frontend search algorithm was requested to search again
+ * @DVBFE_ALGO_SEARCH_AGAIN:
+ * The frontend search algorithm was requested to search again
*/
enum dvbfe_search {
DVBFE_ALGO_SEARCH_SUCCESS = (1 << 0),
@@ -193,7 +192,56 @@ enum dvbfe_search {
DVBFE_ALGO_SEARCH_ERROR = (1 << 31),
};
-
+/**
+ * struct dvb_tuner_ops - Tuner information and callbacks
+ *
+ * @info: embedded struct dvb_tuner_info with tuner properties
+ * @release: callback function called when frontend is dettached.
+ * drivers should free any allocated memory.
+ * @init: callback function used to initialize the tuner device.
+ * @sleep: callback function used to put the tuner to sleep.
+ * @suspend: callback function used to inform that the Kernel will
+ * suspend.
+ * @resume: callback function used to inform that the Kernel is
+ * resuming from suspend.
+ * @set_params: callback function used to inform the tuner to tune
+ * into a digital TV channel. The properties to be used
+ * are stored at @dvb_frontend.dtv_property_cache;. The
+ * tuner demod can change the parameters to reflect the
+ * changes needed for the channel to be tuned, and
+ * update statistics.
+ * @set_analog_params: callback function used to tune into an analog TV
+ * channel on hybrid tuners. It passes @analog_parameters;
+ * to the driver.
+ * @calc_regs: callback function used to pass register data settings
+ * for simple tuners.
+ * @set_config: callback function used to send some tuner-specific
+ * parameters.
+ * @get_frequency: get the actual tuned frequency
+ * @get_bandwidth: get the bandwitdh used by the low pass filters
+ * @get_if_frequency: get the Intermediate Frequency, in Hz. For baseband,
+ * should return 0.
+ * @get_status: returns the frontend lock status
+ * @get_rf_strength: returns the RF signal strengh. Used mostly to support
+ * analog TV and radio. Digital TV should report, instead,
+ * via DVBv5 API (@dvb_frontend.dtv_property_cache;).
+ * @get_afc: Used only by analog TV core. Reports the frequency
+ * drift due to AFC.
+ * @set_frequency: Set a new frequency. Please notice that using
+ * set_params is preferred.
+ * @set_bandwidth: Set a new frequency. Please notice that using
+ * set_params is preferred.
+ * @set_state: callback function used on some legacy drivers that
+ * don't implement set_params in order to set properties.
+ * Shouldn't be used on new drivers.
+ * @get_state: callback function used to get properties by some
+ * legacy drivers that don't implement set_params.
+ * Shouldn't be used on new drivers.
+ *
+ * NOTE: frequencies used on get_frequency and set_frequency are in Hz for
+ * terrestrial/cable or kHz for satellite.
+ *
+ */
struct dvb_tuner_ops {
struct dvb_tuner_info info;
@@ -237,10 +285,37 @@ struct dvb_tuner_ops {
int (*get_state)(struct dvb_frontend *fe, enum tuner_param param, struct tuner_state *state);
};
+/**
+ * struct analog_demod_info - Information struct for analog TV part of the demod
+ *
+ * @name: Name of the analog TV demodulator
+ */
struct analog_demod_info {
char *name;
};
+/**
+ * struct analog_demod_ops - Demodulation information and callbacks for
+ * analog TV and radio
+ *
+ * @info: pointer to struct analog_demod_info
+ * @set_params: callback function used to inform the demod to set the
+ * demodulator parameters needed to decode an analog or
+ * radio channel. The properties are passed via
+ * struct @analog_params;.
+ * @has_signal: returns 0xffff if has signal, or 0 if it doesn't.
+ * @get_afc: Used only by analog TV core. Reports the frequency
+ * drift due to AFC.
+ * @tuner_status: callback function that returns tuner status bits, e. g.
+ * TUNER_STATUS_LOCKED and TUNER_STATUS_STEREO.
+ * @standby: set the tuner to standby mode.
+ * @release: callback function called when frontend is dettached.
+ * drivers should free any allocated memory.
+ * @i2c_gate_ctrl: controls the I2C gate. Newer drivers should use I2C
+ * mux support instead.
+ * @set_config: callback function used to send some tuner-specific
+ * parameters.
+ */
struct analog_demod_ops {
struct analog_demod_info info;
@@ -260,6 +335,87 @@ struct analog_demod_ops {
struct dtv_frontend_properties;
+
+/**
+ * struct dvb_frontend_ops - Demodulation information and callbacks for
+ * ditialt TV
+ *
+ * @info: embedded struct dvb_tuner_info with tuner properties
+ * @delsys: Delivery systems supported by the frontend
+ * @release: callback function called when frontend is dettached.
+ * drivers should free any allocated memory.
+ * @release_sec: callback function requesting that the Satelite Equipment
+ * Control (SEC) driver to release and free any memory
+ * allocated by the driver.
+ * @init: callback function used to initialize the tuner device.
+ * @sleep: callback function used to put the tuner to sleep.
+ * @write: callback function used by some demod legacy drivers to
+ * allow other drivers to write data into their registers.
+ * Should not be used on new drivers.
+ * @tune: callback function used by demod drivers that use
+ * @DVBFE_ALGO_HW; to tune into a frequency.
+ * @get_frontend_algo: returns the desired hardware algorithm.
+ * @set_frontend: callback function used to inform the demod to set the
+ * parameters for demodulating a digital TV channel.
+ * The properties to be used are stored at
+ * @dvb_frontend.dtv_property_cache;. The demod can change
+ * the parameters to reflect the changes needed for the
+ * channel to be decoded, and update statistics.
+ * @get_tune_settings: callback function
+ * @get_frontend: callback function used to inform the parameters
+ * actuall in use. The properties to be used are stored at
+ * @dvb_frontend.dtv_property_cache; and update
+ * statistics. Please notice that it should not return
+ * an error code if the statistics are not available
+ * because the demog is not locked.
+ * @read_status: returns the locking status of the frontend.
+ * @read_ber: legacy callback function to return the bit error rate.
+ * Newer drivers should provide such info via DVBv5 API,
+ * e. g. @set_frontend;/@get_frontend;, implementing this
+ * callback only if DVBv3 API compatibility is wanted.
+ * @read_signal_strength: legacy callback function to return the signal
+ * strength. Newer drivers should provide such info via
+ * DVBv5 API, e. g. @set_frontend;/@get_frontend;,
+ * implementing this callback only if DVBv3 API
+ * compatibility is wanted.
+ * @read_snr: legacy callback function to return the Signal/Noise
+ * rate. Newer drivers should provide such info via
+ * DVBv5 API, e. g. @set_frontend;/@get_frontend;,
+ * implementing this callback only if DVBv3 API
+ * compatibility is wanted.
+ * @read_ucblocks: legacy callback function to return the Uncorrected Error
+ * Blocks. Newer drivers should provide such info via
+ * DVBv5 API, e. g. @set_frontend;/@get_frontend;,
+ * implementing this callback only if DVBv3 API
+ * compatibility is wanted.
+ * @diseqc_reset_overload: callback function to implement the
+ * FE_DISEQC_RESET_OVERLOAD ioctl (only Satellite)
+ * @diseqc_send_master_cmd: callback function to implement the
+ * FE_DISEQC_SEND_MASTER_CMD ioctl (only Satellite).
+ * @diseqc_recv_slave_reply: callback function to implement the
+ * FE_DISEQC_RECV_SLAVE_REPLY ioctl (only Satellite)
+ * @diseqc_send_burst: callback function to implement the
+ * FE_DISEQC_SEND_BURST ioctl (only Satellite).
+ * @set_tone: callback function to implement the
+ * FE_SET_TONE ioctl (only Satellite).
+ * @set_voltage: callback function to implement the
+ * FE_SET_VOLTAGE ioctl (only Satellite).
+ * @enable_high_lnb_voltage: callback function to implement the
+ * FE_ENABLE_HIGH_LNB_VOLTAGE ioctl (only Satellite).
+ * @dishnetwork_send_legacy_command: callback function to implement the
+ * FE_DISHNETWORK_SEND_LEGACY_CMD ioctl (only Satellite).
+ * @i2c_gate_ctrl: controls the I2C gate. Newer drivers should use I2C
+ * mux support instead.
+ * @ts_bus_ctrl: callback function used to take control of the TS bus.
+ * @set_lna: callback function to power on/off/auto the LNA.
+ * @search: callback function used on some custom algo search algos.
+ * @tuner_ops: pointer to struct dvb_tuner_ops
+ * @analog_ops: pointer to struct analog_demod_ops
+ * @set_property: callback function to allow the frontend to validade
+ * incoming properties. Should not be used on new drivers.
+ * @get_property: callback function to allow the frontend to override
+ * outcoming properties. Should not be used on new drivers.
+ */
struct dvb_frontend_ops {
struct dvb_frontend_info info;
@@ -280,6 +436,7 @@ struct dvb_frontend_ops {
unsigned int mode_flags,
unsigned int *delay,
enum fe_status *status);
+
/* get frontend tuning algorithm from the module */
enum dvbfe_algo (*get_frontend_algo)(struct dvb_frontend *fe);
@@ -324,6 +481,7 @@ struct dvb_frontend_ops {
#ifdef __DVB_CORE__
#define MAX_EVENT 8
+/* Used only internally at dvb_frontend.c */
struct dvb_fe_events {
struct dvb_frontend_event events[MAX_EVENT];
int eventw;
@@ -334,13 +492,83 @@ struct dvb_fe_events {
};
#endif
+/**
+ * struct dtv_frontend_properties - contains a list of properties that are
+ * specific to a digital TV standard.
+ *
+ * @frequency: frequency in Hz for terrestrial/cable or in kHz for
+ * Satellite
+ * @modulation: Frontend modulation type
+ * @voltage: SEC voltage (only Satellite)
+ * @sectone: SEC tone mode (only Satellite)
+ * @inversion: Spectral inversion
+ * @fec_inner: Forward error correction inner Code Rate
+ * @transmission_mode: Transmission Mode
+ * @bandwidth_hz: Bandwidth, in Hz. A zero value means that userspace
+ * wants to autodetect.
+ * @guard_interval: Guard Interval
+ * @hierarchy: Hierarchy
+ * @symbol_rate: Symbol Rate
+ * @code_rate_HP: high priority stream code rate
+ * @code_rate_LP: low priority stream code rate
+ * @pilot: Enable/disable/autodetect pilot tones
+ * @rolloff: Rolloff factor (alpha)
+ * @delivery_system: FE delivery system (e. g. digital TV standard)
+ * @interleaving: interleaving
+ * @isdbt_partial_reception: ISDB-T partial reception (only ISDB standard)
+ * @isdbt_sb_mode: ISDB-T Sound Broadcast (SB) mode (only ISDB standard)
+ * @isdbt_sb_subchannel: ISDB-T SB subchannel (only ISDB standard)
+ * @isdbt_sb_segment_idx: ISDB-T SB segment index (only ISDB standard)
+ * @isdbt_sb_segment_count: ISDB-T SB segment count (only ISDB standard)
+ * @isdbt_layer_enabled: ISDB Layer enabled (only ISDB standard)
+ * @layer: ISDB per-layer data (only ISDB standard)
+ * @layer.segment_count: Segment Count;
+ * @layer.fec: per layer code rate;
+ * @layer.modulation: per layer modulation;
+ * @layer.interleaving: per layer interleaving.
+ * @stream_id: If different than zero, enable substream filtering, if
+ * hardware supports (DVB-S2 and DVB-T2).
+ * @atscmh_fic_ver: Version number of the FIC (Fast Information Channel)
+ * signaling data (only ATSC-M/H)
+ * @atscmh_parade_id: Parade identification number (only ATSC-M/H)
+ * @atscmh_nog: Number of MH groups per MH subframe for a designated
+ * parade (only ATSC-M/H)
+ * @atscmh_tnog: Total number of MH groups including all MH groups
+ * belonging to all MH parades in one MH subframe
+ * (only ATSC-M/H)
+ * @atscmh_sgn: Start group number (only ATSC-M/H)
+ * @atscmh_prc: Parade repetition cycle (only ATSC-M/H)
+ * @atscmh_rs_frame_mode: Reed Solomon (RS) frame mode (only ATSC-M/H)
+ * @atscmh_rs_frame_ensemble: RS frame ensemble (only ATSC-M/H)
+ * @atscmh_rs_code_mode_pri: RS code mode pri (only ATSC-M/H)
+ * @atscmh_rs_code_mode_sec: RS code mode sec (only ATSC-M/H)
+ * @atscmh_sccc_block_mode: Series Concatenated Convolutional Code (SCCC)
+ * Block Mode (only ATSC-M/H)
+ * @atscmh_sccc_code_mode_a: SCCC code mode A (only ATSC-M/H)
+ * @atscmh_sccc_code_mode_b: SCCC code mode B (only ATSC-M/H)
+ * @atscmh_sccc_code_mode_c: SCCC code mode C (only ATSC-M/H)
+ * @atscmh_sccc_code_mode_d: SCCC code mode D (only ATSC-M/H)
+ * @lna: Power ON/OFF/AUTO the Linear Now-noise Amplifier (LNA)
+ * @strength: DVBv5 API statistics: Signal Strength
+ * @cnr: DVBv5 API statistics: Signal to Noise ratio of the
+ * (main) carrier
+ * @pre_bit_error: DVBv5 API statistics: pre-Viterbi bit error count
+ * @pre_bit_count: DVBv5 API statistics: pre-Viterbi bit count
+ * @post_bit_error: DVBv5 API statistics: post-Viterbi bit error count
+ * @post_bit_count: DVBv5 API statistics: post-Viterbi bit count
+ * @block_error: DVBv5 API statistics: block error count
+ * @block_count: DVBv5 API statistics: block count
+ *
+ * NOTE: derivated statistics like Uncorrected Error blocks (UCE) are
+ * calculated on userspace.
+ *
+ * Only a subset of the properties are needed for a given delivery system.
+ * For more info, consult the media_api.html with the documentation of the
+ * Userspace API.
+ */
struct dtv_frontend_properties {
-
- /* Cache State */
- u32 state;
-
u32 frequency;
- enum fe_modulation modulation;
+ enum fe_modulation modulation;
enum fe_sec_voltage voltage;
enum fe_sec_tone_mode sectone;
@@ -407,6 +635,11 @@ struct dtv_frontend_properties {
struct dtv_fe_stats post_bit_count;
struct dtv_fe_stats block_error;
struct dtv_fe_stats block_count;
+
+ /* private: */
+ /* Cache State */
+ u32 state;
+
};
#define DVB_FE_NO_EXIT 0
@@ -414,6 +647,25 @@ struct dtv_frontend_properties {
#define DVB_FE_DEVICE_REMOVED 2
#define DVB_FE_DEVICE_RESUME 3
+/**
+ * struct dvb_frontend - Frontend structure to be used on drivers.
+ *
+ * @ops: embedded struct dvb_frontend_ops
+ * @dvb: pointer to struct dvb_adapter
+ * @demodulator_priv: demod private data
+ * @tuner_priv: tuner private data
+ * @frontend_priv: frontend private data
+ * @sec_priv: SEC private data
+ * @analog_demod_priv: Analog demod private data
+ * @dtv_property_cache: embedded struct dtv_frontend_properties
+ * @callback: callback function used on some drivers to call
+ * either the tuner or the demodulator.
+ * @id: Frontend ID
+ * @exit: Used to inform the DVB core that the frontend
+ * thread should exit (usually, means that the hardware
+ * got disconnected.
+ */
+
struct dvb_frontend {
struct dvb_frontend_ops ops;
struct dvb_adapter *dvb;
diff --git a/drivers/media/dvb-core/dvb_math.h b/drivers/media/dvb-core/dvb_math.h
index aecc867e9404..34dc1df03cab 100644
--- a/drivers/media/dvb-core/dvb_math.h
+++ b/drivers/media/dvb-core/dvb_math.h
@@ -25,33 +25,38 @@
#include <linux/types.h>
/**
- * computes log2 of a value; the result is shifted left by 24 bits
+ * cintlog2 - computes log2 of a value; the result is shifted left by 24 bits
+ *
+ * @value: The value (must be != 0)
*
* to use rational values you can use the following method:
* intlog2(value) = intlog2(value * 2^x) - x * 2^24
*
- * example: intlog2(8) will give 3 << 24 = 3 * 2^24
- * example: intlog2(9) will give 3 << 24 + ... = 3.16... * 2^24
- * example: intlog2(1.5) = intlog2(3) - 2^24 = 0.584... * 2^24
+ * Some usecase examples:
+ * intlog2(8) will give 3 << 24 = 3 * 2^24
+ * intlog2(9) will give 3 << 24 + ... = 3.16... * 2^24
+ * intlog2(1.5) = intlog2(3) - 2^24 = 0.584... * 2^24
+ *
*
- * @param value The value (must be != 0)
- * @return log2(value) * 2^24
+ * return: log2(value) * 2^24
*/
extern unsigned int intlog2(u32 value);
/**
- * computes log10 of a value; the result is shifted left by 24 bits
+ * intlog10 - computes log10 of a value; the result is shifted left by 24 bits
+ *
+ * @value: The value (must be != 0)
*
* to use rational values you can use the following method:
* intlog10(value) = intlog10(value * 10^x) - x * 2^24
*
- * example: intlog10(1000) will give 3 << 24 = 3 * 2^24
+ * An usecase example:
+ * intlog10(1000) will give 3 << 24 = 3 * 2^24
* due to the implementation intlog10(1000) might be not exactly 3 * 2^24
*
* look at intlog2 for similar examples
*
- * @param value The value (must be != 0)
- * @return log10(value) * 2^24
+ * return: log10(value) * 2^24
*/
extern unsigned int intlog10(u32 value);
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index a694fb1ea228..b81e026edab3 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -709,7 +709,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
if (!priv->ule_dbit) {
/* dest_addr buffer is only valid if priv->ule_dbit == 0 */
memcpy(ethh->h_dest, dest_addr, ETH_ALEN);
- memset(ethh->h_source, 0, ETH_ALEN);
+ eth_zero_addr(ethh->h_source);
}
else /* zeroize source and dest */
memset( ethh, 0, ETH_ALEN*2 );
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.h b/drivers/media/dvb-core/dvb_ringbuffer.h
index 9e1e11b7c39c..3ebc2d34b4a2 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.h
+++ b/drivers/media/dvb-core/dvb_ringbuffer.h
@@ -45,33 +45,33 @@ struct dvb_ringbuffer {
/*
-** Notes:
-** ------
-** (1) For performance reasons read and write routines don't check buffer sizes
-** and/or number of bytes free/available. This has to be done before these
-** routines are called. For example:
-**
-** *** write <buflen> bytes ***
-** free = dvb_ringbuffer_free(rbuf);
-** if (free >= buflen)
-** count = dvb_ringbuffer_write(rbuf, buffer, buflen);
-** else
-** ...
-**
-** *** read min. 1000, max. <bufsize> bytes ***
-** avail = dvb_ringbuffer_avail(rbuf);
-** if (avail >= 1000)
-** count = dvb_ringbuffer_read(rbuf, buffer, min(avail, bufsize));
-** else
-** ...
-**
-** (2) If there is exactly one reader and one writer, there is no need
-** to lock read or write operations.
-** Two or more readers must be locked against each other.
-** Flushing the buffer counts as a read operation.
-** Resetting the buffer counts as a read and write operation.
-** Two or more writers must be locked against each other.
-*/
+ * Notes:
+ * ------
+ * (1) For performance reasons read and write routines don't check buffer sizes
+ * and/or number of bytes free/available. This has to be done before these
+ * routines are called. For example:
+ *
+ * *** write @buflen: bytes ***
+ * free = dvb_ringbuffer_free(rbuf);
+ * if (free >= buflen)
+ * count = dvb_ringbuffer_write(rbuf, buffer, buflen);
+ * else
+ * ...
+ *
+ * *** read min. 1000, max. @bufsize: bytes ***
+ * avail = dvb_ringbuffer_avail(rbuf);
+ * if (avail >= 1000)
+ * count = dvb_ringbuffer_read(rbuf, buffer, min(avail, bufsize));
+ * else
+ * ...
+ *
+ * (2) If there is exactly one reader and one writer, there is no need
+ * to lock read or write operations.
+ * Two or more readers must be locked against each other.
+ * Flushing the buffer counts as a read operation.
+ * Resetting the buffer counts as a read and write operation.
+ * Two or more writers must be locked against each other.
+ */
/* initialize ring buffer, lock and queue */
extern void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len);
@@ -87,9 +87,9 @@ extern ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf);
/*
-** Reset the read and write pointers to zero and flush the buffer
-** This counts as a read and write operation
-*/
+ * Reset the read and write pointers to zero and flush the buffer
+ * This counts as a read and write operation
+ */
extern void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf);
@@ -101,19 +101,19 @@ extern void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf);
/* flush buffer protected by spinlock and wake-up waiting task(s) */
extern void dvb_ringbuffer_flush_spinlock_wakeup(struct dvb_ringbuffer *rbuf);
-/* peek at byte <offs> in the buffer */
+/* peek at byte @offs: in the buffer */
#define DVB_RINGBUFFER_PEEK(rbuf,offs) \
(rbuf)->data[((rbuf)->pread+(offs))%(rbuf)->size]
-/* advance read ptr by <num> bytes */
+/* advance read ptr by @num: bytes */
#define DVB_RINGBUFFER_SKIP(rbuf,num) \
(rbuf)->pread=((rbuf)->pread+(num))%(rbuf)->size
/*
-** read <len> bytes from ring buffer into <buf>
-** <usermem> specifies whether <buf> resides in user space
-** returns number of bytes transferred or -EFAULT
-*/
+ * read @len: bytes from ring buffer into @buf:
+ * @usermem: specifies whether @buf: resides in user space
+ * returns number of bytes transferred or -EFAULT
+ */
extern ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf,
u8 __user *buf, size_t len);
extern void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf,
@@ -127,9 +127,9 @@ extern void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf,
{ (rbuf)->data[(rbuf)->pwrite]=(byte); \
(rbuf)->pwrite=((rbuf)->pwrite+1)%(rbuf)->size; }
/*
-** write <len> bytes to ring buffer
-** <usermem> specifies whether <buf> resides in user space
-** returns number of bytes transferred or -EFAULT
+ * write @len: bytes to ring buffer
+ * @usermem: specifies whether @buf: resides in user space
+ * returns number of bytes transferred or -EFAULT
*/
extern ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf,
size_t len);
@@ -138,48 +138,63 @@ extern ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
/**
- * Write a packet into the ringbuffer.
+ * dvb_ringbuffer_pkt_write - Write a packet into the ringbuffer.
*
- * <rbuf> Ringbuffer to write to.
- * <buf> Buffer to write.
- * <len> Length of buffer (currently limited to 65535 bytes max).
+ * @rbuf: Ringbuffer to write to.
+ * @buf: Buffer to write.
+ * @len: Length of buffer (currently limited to 65535 bytes max).
* returns Number of bytes written, or -EFAULT, -ENOMEM, -EVINAL.
*/
extern ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8* buf,
size_t len);
/**
- * Read from a packet in the ringbuffer. Note: unlike dvb_ringbuffer_read(), this
- * does NOT update the read pointer in the ringbuffer. You must use
- * dvb_ringbuffer_pkt_dispose() to mark a packet as no longer required.
- *
- * <rbuf> Ringbuffer concerned.
- * <idx> Packet index as returned by dvb_ringbuffer_pkt_next().
- * <offset> Offset into packet to read from.
- * <buf> Destination buffer for data.
- * <len> Size of destination buffer.
- * <usermem> Set to 1 if <buf> is in userspace.
+ * dvb_ringbuffer_pkt_read_user - Read from a packet in the ringbuffer.
+ * Note: unlike dvb_ringbuffer_read(), this does NOT update the read pointer
+ * in the ringbuffer. You must use dvb_ringbuffer_pkt_dispose() to mark a
+ * packet as no longer required.
+ *
+ * @rbuf: Ringbuffer concerned.
+ * @idx: Packet index as returned by dvb_ringbuffer_pkt_next().
+ * @offset: Offset into packet to read from.
+ * @buf: Destination buffer for data.
+ * @len: Size of destination buffer.
+ *
* returns Number of bytes read, or -EFAULT.
*/
extern ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, size_t idx,
int offset, u8 __user *buf, size_t len);
+
+/**
+ * dvb_ringbuffer_pkt_read - Read from a packet in the ringbuffer.
+ * Note: unlike dvb_ringbuffer_read_user(), this DOES update the read pointer
+ * in the ringbuffer.
+ *
+ * @rbuf: Ringbuffer concerned.
+ * @idx: Packet index as returned by dvb_ringbuffer_pkt_next().
+ * @offset: Offset into packet to read from.
+ * @buf: Destination buffer for data.
+ * @len: Size of destination buffer.
+ *
+ * returns Number of bytes read, or -EFAULT.
+ */
extern ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx,
int offset, u8 *buf, size_t len);
/**
- * Dispose of a packet in the ring buffer.
+ * dvb_ringbuffer_pkt_dispose - Dispose of a packet in the ring buffer.
*
- * <rbuf> Ring buffer concerned.
- * <idx> Packet index as returned by dvb_ringbuffer_pkt_next().
+ * @rbuf: Ring buffer concerned.
+ * @idx: Packet index as returned by dvb_ringbuffer_pkt_next().
*/
extern void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx);
/**
- * Get the index of the next packet in a ringbuffer.
+ * dvb_ringbuffer_pkt_next - Get the index of the next packet in a ringbuffer.
*
- * <rbuf> Ringbuffer concerned.
- * <idx> Previous packet index, or -1 to return the first packet index.
- * <pktlen> On success, will be updated to contain the length of the packet in bytes.
+ * @rbuf: Ringbuffer concerned.
+ * @idx: Previous packet index, or -1 to return the first packet index.
+ * @pktlen: On success, will be updated to contain the length of the packet in bytes.
* returns Packet index (if >=0), or -1 if no packets available.
*/
extern ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t* pktlen);
diff --git a/drivers/media/dvb-core/dvbdev.h b/drivers/media/dvb-core/dvbdev.h
index 12629b8ecb0c..c61a4f03a66f 100644
--- a/drivers/media/dvb-core/dvbdev.h
+++ b/drivers/media/dvb-core/dvbdev.h
@@ -57,6 +57,25 @@
struct dvb_frontend;
+/**
+ * struct dvb_adapter - represents a Digital TV adapter using Linux DVB API
+ *
+ * @num: Number of the adapter
+ * @list_head: List with the DVB adapters
+ * @device_list: List with the DVB devices
+ * @name: Name of the adapter
+ * @proposed_mac: proposed MAC address for the adapter
+ * @priv: private data
+ * @device: pointer to struct device
+ * @module: pointer to struct module
+ * @mfe_shared: mfe shared: indicates mutually exclusive frontends
+ * Thie usage of this flag is currently deprecated
+ * @mfe_dvbdev: Frontend device in use, in the case of MFE
+ * @mfe_lock: Lock to prevent using the other frontends when MFE is
+ * used.
+ * @mdev: pointer to struct media_device, used when the media
+ * controller is used.
+ */
struct dvb_adapter {
int num;
struct list_head list_head;
@@ -78,7 +97,34 @@ struct dvb_adapter {
#endif
};
-
+/**
+ * struct dvb_device - represents a DVB device node
+ *
+ * @list_head: List head with all DVB devices
+ * @fops: pointer to struct file_operations
+ * @adapter: pointer to the adapter that holds this device node
+ * @type: type of the device: DVB_DEVICE_SEC, DVB_DEVICE_FRONTEND,
+ * DVB_DEVICE_DEMUX, DVB_DEVICE_DVR, DVB_DEVICE_CA, DVB_DEVICE_NET
+ * @minor: devnode minor number. Major number is always DVB_MAJOR.
+ * @id: device ID number, inside the adapter
+ * @readers: Initialized by the caller. Each call to open() in Read Only mode
+ * decreases this counter by one.
+ * @writers: Initialized by the caller. Each call to open() in Read/Write
+ * mode decreases this counter by one.
+ * @users: Initialized by the caller. Each call to open() in any mode
+ * decreases this counter by one.
+ * @wait_queue: wait queue, used to wait for certain events inside one of
+ * the DVB API callers
+ * @kernel_ioctl: callback function used to handle ioctl calls from userspace.
+ * @name: Name to be used for the device at the Media Controller
+ * @entity: pointer to struct media_entity associated with the device node
+ * @pads: pointer to struct media_pad associated with @entity;
+ * @priv: private data
+ *
+ * This structure is used by the DVB core (frontend, CA, net, demux) in
+ * order to create the device nodes. Usually, driver should not initialize
+ * this struct diretly.
+ */
struct dvb_device {
struct list_head list_head;
const struct file_operations *fops;
@@ -109,19 +155,55 @@ struct dvb_device {
void *priv;
};
+/**
+ * dvb_register_adapter - Registers a new DVB adapter
+ *
+ * @adap: pointer to struct dvb_adapter
+ * @name: Adapter's name
+ * @module: initialized with THIS_MODULE at the caller
+ * @device: pointer to struct device that corresponds to the device driver
+ * @adapter_nums: Array with a list of the numbers for @dvb_register_adapter;
+ * to select among them. Typically, initialized with:
+ * DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nums)
+ */
+int dvb_register_adapter(struct dvb_adapter *adap, const char *name,
+ struct module *module, struct device *device,
+ short *adapter_nums);
-extern int dvb_register_adapter(struct dvb_adapter *adap, const char *name,
- struct module *module, struct device *device,
- short *adapter_nums);
-extern int dvb_unregister_adapter (struct dvb_adapter *adap);
-
-extern int dvb_register_device (struct dvb_adapter *adap,
- struct dvb_device **pdvbdev,
- const struct dvb_device *template,
- void *priv,
- int type);
+/**
+ * dvb_unregister_adapter - Unregisters a DVB adapter
+ *
+ * @adap: pointer to struct dvb_adapter
+ */
+int dvb_unregister_adapter(struct dvb_adapter *adap);
-extern void dvb_unregister_device (struct dvb_device *dvbdev);
+/**
+ * dvb_register_device - Registers a new DVB device
+ *
+ * @adap: pointer to struct dvb_adapter
+ * @pdvbdev: pointer to the place where the new struct dvb_device will be
+ * stored
+ * @template: Template used to create &pdvbdev;
+ * @device: pointer to struct device that corresponds to the device driver
+ * @adapter_nums: Array with a list of the numbers for @dvb_register_adapter;
+ * to select among them. Typically, initialized with:
+ * DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nums)
+ * @priv: private data
+ * @type: type of the device: DVB_DEVICE_SEC, DVB_DEVICE_FRONTEND,
+ * DVB_DEVICE_DEMUX, DVB_DEVICE_DVR, DVB_DEVICE_CA, DVB_DEVICE_NET
+ */
+int dvb_register_device(struct dvb_adapter *adap,
+ struct dvb_device **pdvbdev,
+ const struct dvb_device *template,
+ void *priv,
+ int type);
+
+/**
+ * dvb_unregister_device - Unregisters a DVB device
+ *
+ * @dvbdev: pointer to struct dvb_device
+ */
+void dvb_unregister_device(struct dvb_device *dvbdev);
#ifdef CONFIG_MEDIA_CONTROLLER_DVB
void dvb_create_media_graph(struct dvb_adapter *adap);
@@ -136,17 +218,17 @@ static inline void dvb_create_media_graph(struct dvb_adapter *adap) {}
#define dvb_register_media_controller(a, b) {}
#endif
-extern int dvb_generic_open (struct inode *inode, struct file *file);
-extern int dvb_generic_release (struct inode *inode, struct file *file);
-extern long dvb_generic_ioctl (struct file *file,
+int dvb_generic_open (struct inode *inode, struct file *file);
+int dvb_generic_release (struct inode *inode, struct file *file);
+long dvb_generic_ioctl (struct file *file,
unsigned int cmd, unsigned long arg);
/* we don't mess with video_usercopy() any more,
we simply define out own dvb_usercopy(), which will hopefully become
generic_usercopy() someday... */
-extern int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
- int (*func)(struct file *file, unsigned int cmd, void *arg));
+int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
+ int (*func)(struct file *file, unsigned int cmd, void *arg));
/** generic DVB attach function. */
#ifdef CONFIG_MEDIA_ATTACH
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 0d35f5850ff1..292c9479bb75 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -1,5 +1,5 @@
menu "Customise DVB Frontends"
- visible if !MEDIA_SUBDRV_AUTOSELECT
+ visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST
comment "Multistandard (satellite) frontends"
depends on DVB_CORE
@@ -240,7 +240,7 @@ config DVB_SI21XX
config DVB_TS2020
tristate "Montage Tehnology TS2020 based tuners"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
@@ -264,6 +264,7 @@ config DVB_MB86A16
config DVB_TDA10071
tristate "NXP TDA10071"
depends on DVB_CORE && I2C
+ select REGMAP
default m if !MEDIA_SUBDRV_AUTOSELECT
help
Say Y when you want to support this frontend.
@@ -450,6 +451,13 @@ config DVB_CXD2820R
help
Say Y when you want to support this frontend.
+config DVB_CXD2841ER
+ tristate "Sony CXD2841ER"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y when you want to support this frontend.
+
config DVB_RTL2830
tristate "Realtek RTL2830 DVB-T"
depends on DVB_CORE && I2C && I2C_MUX
@@ -712,6 +720,14 @@ comment "SEC control devices for DVB-S"
source "drivers/media/dvb-frontends/drx39xyj/Kconfig"
+config DVB_LNBH25
+ tristate "LNBH25 SEC controller"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ An SEC control chip.
+ Say Y when you want to support this chip.
+
config DVB_LNBP21
tristate "LNBP21/LNBH24 SEC controllers"
depends on DVB_CORE && I2C
@@ -815,6 +831,20 @@ config DVB_AF9033
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
+config DVB_HORUS3A
+ tristate "Sony Horus3A tuner"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y when you want to support this frontend.
+
+config DVB_ASCOT2E
+ tristate "Sony Ascot2E tuner"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y when you want to support this frontend.
+
comment "Tools to develop new frontends"
config DVB_DUMMY_FE
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index ebab1b83e1fc..37ef17b5b995 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_DVB_LGDT3305) += lgdt3305.o
obj-$(CONFIG_DVB_LGDT3306A) += lgdt3306a.o
obj-$(CONFIG_DVB_LG2160) += lg2160.o
obj-$(CONFIG_DVB_CX24123) += cx24123.o
+obj-$(CONFIG_DVB_LNBH25) += lnbh25.o
obj-$(CONFIG_DVB_LNBP21) += lnbp21.o
obj-$(CONFIG_DVB_LNBP22) += lnbp22.o
obj-$(CONFIG_DVB_ISL6405) += isl6405.o
@@ -105,6 +106,7 @@ obj-$(CONFIG_DVB_MB86A20S) += mb86a20s.o
obj-$(CONFIG_DVB_IX2505V) += ix2505v.o
obj-$(CONFIG_DVB_STV0367) += stv0367.o
obj-$(CONFIG_DVB_CXD2820R) += cxd2820r.o
+obj-$(CONFIG_DVB_CXD2841ER) += cxd2841er.o
obj-$(CONFIG_DVB_DRXK) += drxk.o
obj-$(CONFIG_DVB_TDA18271C2DD) += tda18271c2dd.o
obj-$(CONFIG_DVB_SI2165) += si2165.o
@@ -118,3 +120,5 @@ obj-$(CONFIG_DVB_M88RS2000) += m88rs2000.o
obj-$(CONFIG_DVB_AF9033) += af9033.o
obj-$(CONFIG_DVB_AS102_FE) += as102_fe.o
obj-$(CONFIG_DVB_TC90522) += tc90522.o
+obj-$(CONFIG_DVB_HORUS3A) += horus3a.o
+obj-$(CONFIG_DVB_ASCOT2E) += ascot2e.o
diff --git a/drivers/media/dvb-frontends/a8293.c b/drivers/media/dvb-frontends/a8293.c
index 97ecbe01034c..e1e9bddcf516 100644
--- a/drivers/media/dvb-frontends/a8293.c
+++ b/drivers/media/dvb-frontends/a8293.c
@@ -12,163 +12,69 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-#include "dvb_frontend.h"
#include "a8293.h"
-struct a8293_priv {
- u8 i2c_addr;
- struct i2c_adapter *i2c;
+struct a8293_dev {
struct i2c_client *client;
u8 reg[2];
};
-static int a8293_i2c(struct a8293_priv *priv, u8 *val, int len, bool rd)
-{
- int ret;
- struct i2c_msg msg[1] = {
- {
- .addr = priv->i2c_addr,
- .len = len,
- .buf = val,
- }
- };
-
- if (rd)
- msg[0].flags = I2C_M_RD;
- else
- msg[0].flags = 0;
-
- ret = i2c_transfer(priv->i2c, msg, 1);
- if (ret == 1) {
- ret = 0;
- } else {
- dev_warn(&priv->i2c->dev, "%s: i2c failed=%d rd=%d\n",
- KBUILD_MODNAME, ret, rd);
- ret = -EREMOTEIO;
- }
-
- return ret;
-}
-
-static int a8293_wr(struct a8293_priv *priv, u8 *val, int len)
-{
- return a8293_i2c(priv, val, len, 0);
-}
-
-static int a8293_rd(struct a8293_priv *priv, u8 *val, int len)
-{
- return a8293_i2c(priv, val, len, 1);
-}
-
static int a8293_set_voltage(struct dvb_frontend *fe,
- enum fe_sec_voltage fe_sec_voltage)
+ enum fe_sec_voltage fe_sec_voltage)
{
- struct a8293_priv *priv = fe->sec_priv;
+ struct a8293_dev *dev = fe->sec_priv;
+ struct i2c_client *client = dev->client;
int ret;
+ u8 reg0, reg1;
- dev_dbg(&priv->i2c->dev, "%s: fe_sec_voltage=%d\n", __func__,
- fe_sec_voltage);
+ dev_dbg(&client->dev, "fe_sec_voltage=%d\n", fe_sec_voltage);
switch (fe_sec_voltage) {
case SEC_VOLTAGE_OFF:
/* ENB=0 */
- priv->reg[0] = 0x10;
+ reg0 = 0x10;
break;
case SEC_VOLTAGE_13:
/* VSEL0=1, VSEL1=0, VSEL2=0, VSEL3=0, ENB=1*/
- priv->reg[0] = 0x31;
+ reg0 = 0x31;
break;
case SEC_VOLTAGE_18:
/* VSEL0=0, VSEL1=0, VSEL2=0, VSEL3=1, ENB=1*/
- priv->reg[0] = 0x38;
+ reg0 = 0x38;
break;
default:
ret = -EINVAL;
goto err;
}
-
- ret = a8293_wr(priv, &priv->reg[0], 1);
- if (ret)
- goto err;
-
- usleep_range(1500, 50000);
-
- return ret;
-err:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
-
-static void a8293_release_sec(struct dvb_frontend *fe)
-{
- a8293_set_voltage(fe, SEC_VOLTAGE_OFF);
-
- kfree(fe->sec_priv);
- fe->sec_priv = NULL;
-}
-
-struct dvb_frontend *a8293_attach(struct dvb_frontend *fe,
- struct i2c_adapter *i2c, const struct a8293_config *cfg)
-{
- int ret;
- struct a8293_priv *priv = NULL;
- u8 buf[2];
-
- /* allocate memory for the internal priv */
- priv = kzalloc(sizeof(struct a8293_priv), GFP_KERNEL);
- if (priv == NULL) {
- ret = -ENOMEM;
- goto err;
+ if (reg0 != dev->reg[0]) {
+ ret = i2c_master_send(client, &reg0, 1);
+ if (ret < 0)
+ goto err;
+ dev->reg[0] = reg0;
}
- /* setup the priv */
- priv->i2c = i2c;
- priv->i2c_addr = cfg->i2c_addr;
- fe->sec_priv = priv;
-
- /* check if the SEC is there */
- ret = a8293_rd(priv, buf, 2);
- if (ret)
- goto err;
-
- /* ENB=0 */
- priv->reg[0] = 0x10;
- ret = a8293_wr(priv, &priv->reg[0], 1);
- if (ret)
- goto err;
-
/* TMODE=0, TGATE=1 */
- priv->reg[1] = 0x82;
- ret = a8293_wr(priv, &priv->reg[1], 1);
- if (ret)
- goto err;
-
- fe->ops.release_sec = a8293_release_sec;
-
- /* override frontend ops */
- fe->ops.set_voltage = a8293_set_voltage;
-
- dev_info(&priv->i2c->dev, "%s: Allegro A8293 SEC attached\n",
- KBUILD_MODNAME);
+ reg1 = 0x82;
+ if (reg1 != dev->reg[1]) {
+ ret = i2c_master_send(client, &reg1, 1);
+ if (ret < 0)
+ goto err;
+ dev->reg[1] = reg1;
+ }
- return fe;
+ usleep_range(1500, 50000);
+ return 0;
err:
- dev_dbg(&i2c->dev, "%s: failed=%d\n", __func__, ret);
- kfree(priv);
- return NULL;
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
}
-EXPORT_SYMBOL(a8293_attach);
static int a8293_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
- struct a8293_priv *dev;
+ struct a8293_dev *dev;
struct a8293_platform_data *pdata = client->dev.platform_data;
struct dvb_frontend *fe = pdata->dvb_frontend;
int ret;
@@ -181,29 +87,14 @@ static int a8293_probe(struct i2c_client *client,
}
dev->client = client;
- dev->i2c = client->adapter;
- dev->i2c_addr = client->addr;
/* check if the SEC is there */
- ret = a8293_rd(dev, buf, 2);
- if (ret)
- goto err_kfree;
-
- /* ENB=0 */
- dev->reg[0] = 0x10;
- ret = a8293_wr(dev, &dev->reg[0], 1);
- if (ret)
- goto err_kfree;
-
- /* TMODE=0, TGATE=1 */
- dev->reg[1] = 0x82;
- ret = a8293_wr(dev, &dev->reg[1], 1);
- if (ret)
+ ret = i2c_master_recv(client, buf, 2);
+ if (ret < 0)
goto err_kfree;
/* override frontend ops */
fe->ops.set_voltage = a8293_set_voltage;
-
fe->sec_priv = dev;
i2c_set_clientdata(client, dev);
@@ -234,7 +125,6 @@ MODULE_DEVICE_TABLE(i2c, a8293_id_table);
static struct i2c_driver a8293_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "a8293",
.suppress_bind_attrs = true,
},
diff --git a/drivers/media/dvb-frontends/a8293.h b/drivers/media/dvb-frontends/a8293.h
index aff36538f582..7b90a03fcd0a 100644
--- a/drivers/media/dvb-frontends/a8293.h
+++ b/drivers/media/dvb-frontends/a8293.h
@@ -12,17 +12,12 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef A8293_H
#define A8293_H
#include "dvb_frontend.h"
-#include <linux/kconfig.h>
/*
* I2C address
@@ -37,21 +32,4 @@ struct a8293_platform_data {
struct dvb_frontend *dvb_frontend;
};
-
-struct a8293_config {
- u8 i2c_addr;
-};
-
-#if IS_REACHABLE(CONFIG_DVB_A8293)
-extern struct dvb_frontend *a8293_attach(struct dvb_frontend *fe,
- struct i2c_adapter *i2c, const struct a8293_config *cfg);
-#else
-static inline struct dvb_frontend *a8293_attach(struct dvb_frontend *fe,
- struct i2c_adapter *i2c, const struct a8293_config *cfg)
-{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
- return NULL;
-}
-#endif
-
#endif /* A8293_H */
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 59018afaa95f..bc35206a0821 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -1387,7 +1387,6 @@ MODULE_DEVICE_TABLE(i2c, af9033_id_table);
static struct i2c_driver af9033_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "af9033",
},
.probe = af9033_probe,
diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c
new file mode 100644
index 000000000000..f770f6a2c987
--- /dev/null
+++ b/drivers/media/dvb-frontends/ascot2e.c
@@ -0,0 +1,548 @@
+/*
+ * ascot2e.c
+ *
+ * Sony Ascot3E DVB-T/T2/C/C2 tuner driver
+ *
+ * Copyright 2012 Sony Corporation
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dvb/frontend.h>
+#include <linux/types.h>
+#include "ascot2e.h"
+#include "dvb_frontend.h"
+
+#define MAX_WRITE_REGSIZE 10
+
+enum ascot2e_state {
+ STATE_UNKNOWN,
+ STATE_SLEEP,
+ STATE_ACTIVE
+};
+
+struct ascot2e_priv {
+ u32 frequency;
+ u8 i2c_address;
+ struct i2c_adapter *i2c;
+ enum ascot2e_state state;
+ void *set_tuner_data;
+ int (*set_tuner)(void *, int);
+};
+
+enum ascot2e_tv_system_t {
+ ASCOT2E_DTV_DVBT_5,
+ ASCOT2E_DTV_DVBT_6,
+ ASCOT2E_DTV_DVBT_7,
+ ASCOT2E_DTV_DVBT_8,
+ ASCOT2E_DTV_DVBT2_1_7,
+ ASCOT2E_DTV_DVBT2_5,
+ ASCOT2E_DTV_DVBT2_6,
+ ASCOT2E_DTV_DVBT2_7,
+ ASCOT2E_DTV_DVBT2_8,
+ ASCOT2E_DTV_DVBC_6,
+ ASCOT2E_DTV_DVBC_8,
+ ASCOT2E_DTV_DVBC2_6,
+ ASCOT2E_DTV_DVBC2_8,
+ ASCOT2E_DTV_UNKNOWN
+};
+
+struct ascot2e_band_sett {
+ u8 if_out_sel;
+ u8 agc_sel;
+ u8 mix_oll;
+ u8 rf_gain;
+ u8 if_bpf_gc;
+ u8 fif_offset;
+ u8 bw_offset;
+ u8 bw;
+ u8 rf_oldet;
+ u8 if_bpf_f0;
+};
+
+#define ASCOT2E_AUTO 0xff
+#define ASCOT2E_OFFSET(ofs) ((u8)(ofs) & 0x1F)
+#define ASCOT2E_BW_6 0x00
+#define ASCOT2E_BW_7 0x01
+#define ASCOT2E_BW_8 0x02
+#define ASCOT2E_BW_1_7 0x03
+
+static struct ascot2e_band_sett ascot2e_sett[] = {
+ { ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x06,
+ ASCOT2E_OFFSET(-8), ASCOT2E_OFFSET(-6), ASCOT2E_BW_6, 0x0B, 0x00 },
+ { ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x06,
+ ASCOT2E_OFFSET(-8), ASCOT2E_OFFSET(-6), ASCOT2E_BW_6, 0x0B, 0x00 },
+ { ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x06,
+ ASCOT2E_OFFSET(-6), ASCOT2E_OFFSET(-4), ASCOT2E_BW_7, 0x0B, 0x00 },
+ { ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x06,
+ ASCOT2E_OFFSET(-4), ASCOT2E_OFFSET(-2), ASCOT2E_BW_8, 0x0B, 0x00 },
+ { ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x06,
+ ASCOT2E_OFFSET(-10), ASCOT2E_OFFSET(-16), ASCOT2E_BW_1_7, 0x0B, 0x00 },
+ { ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x06,
+ ASCOT2E_OFFSET(-8), ASCOT2E_OFFSET(-6), ASCOT2E_BW_6, 0x0B, 0x00 },
+ { ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x06,
+ ASCOT2E_OFFSET(-8), ASCOT2E_OFFSET(-6), ASCOT2E_BW_6, 0x0B, 0x00 },
+ { ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x06,
+ ASCOT2E_OFFSET(-6), ASCOT2E_OFFSET(-4), ASCOT2E_BW_7, 0x0B, 0x00 },
+ { ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x06,
+ ASCOT2E_OFFSET(-4), ASCOT2E_OFFSET(-2), ASCOT2E_BW_8, 0x0B, 0x00 },
+ { ASCOT2E_AUTO, ASCOT2E_AUTO, 0x02, ASCOT2E_AUTO, 0x03,
+ ASCOT2E_OFFSET(-6), ASCOT2E_OFFSET(-8), ASCOT2E_BW_6, 0x09, 0x00 },
+ { ASCOT2E_AUTO, ASCOT2E_AUTO, 0x02, ASCOT2E_AUTO, 0x03,
+ ASCOT2E_OFFSET(-2), ASCOT2E_OFFSET(-1), ASCOT2E_BW_8, 0x09, 0x00 },
+ { ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x01,
+ ASCOT2E_OFFSET(-6), ASCOT2E_OFFSET(-4), ASCOT2E_BW_6, 0x09, 0x00 },
+ { ASCOT2E_AUTO, ASCOT2E_AUTO, 0x03, ASCOT2E_AUTO, 0x01,
+ ASCOT2E_OFFSET(-2), ASCOT2E_OFFSET(2), ASCOT2E_BW_8, 0x09, 0x00 }
+};
+
+static void ascot2e_i2c_debug(struct ascot2e_priv *priv,
+ u8 reg, u8 write, const u8 *data, u32 len)
+{
+ dev_dbg(&priv->i2c->dev, "ascot2e: I2C %s reg 0x%02x size %d\n",
+ (write == 0 ? "read" : "write"), reg, len);
+ print_hex_dump_bytes("ascot2e: I2C data: ",
+ DUMP_PREFIX_OFFSET, data, len);
+}
+
+static int ascot2e_write_regs(struct ascot2e_priv *priv,
+ u8 reg, const u8 *data, u32 len)
+{
+ int ret;
+ u8 buf[MAX_WRITE_REGSIZE + 1];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->i2c_address,
+ .flags = 0,
+ .len = len + 1,
+ .buf = buf,
+ }
+ };
+
+ if (len + 1 >= sizeof(buf)) {
+ dev_warn(&priv->i2c->dev,"wr reg=%04x: len=%d is too big!\n",
+ reg, len + 1);
+ return -E2BIG;
+ }
+
+ ascot2e_i2c_debug(priv, reg, 1, data, len);
+ buf[0] = reg;
+ memcpy(&buf[1], data, len);
+ ret = i2c_transfer(priv->i2c, msg, 1);
+ if (ret >= 0 && ret != 1)
+ ret = -EREMOTEIO;
+ if (ret < 0) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr failed=%d reg=%02x len=%d\n",
+ KBUILD_MODNAME, ret, reg, len);
+ return ret;
+ }
+ return 0;
+}
+
+static int ascot2e_write_reg(struct ascot2e_priv *priv, u8 reg, u8 val)
+{
+ return ascot2e_write_regs(priv, reg, &val, 1);
+}
+
+static int ascot2e_read_regs(struct ascot2e_priv *priv,
+ u8 reg, u8 *val, u32 len)
+{
+ int ret;
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->i2c_address,
+ .flags = 0,
+ .len = 1,
+ .buf = &reg,
+ }, {
+ .addr = priv->i2c_address,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = val,
+ }
+ };
+
+ ret = i2c_transfer(priv->i2c, &msg[0], 1);
+ if (ret >= 0 && ret != 1)
+ ret = -EREMOTEIO;
+ if (ret < 0) {
+ dev_warn(&priv->i2c->dev,
+ "%s: I2C rw failed=%d addr=%02x reg=%02x\n",
+ KBUILD_MODNAME, ret, priv->i2c_address, reg);
+ return ret;
+ }
+ ret = i2c_transfer(priv->i2c, &msg[1], 1);
+ if (ret >= 0 && ret != 1)
+ ret = -EREMOTEIO;
+ if (ret < 0) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c rd failed=%d addr=%02x reg=%02x\n",
+ KBUILD_MODNAME, ret, priv->i2c_address, reg);
+ return ret;
+ }
+ ascot2e_i2c_debug(priv, reg, 0, val, len);
+ return 0;
+}
+
+static int ascot2e_read_reg(struct ascot2e_priv *priv, u8 reg, u8 *val)
+{
+ return ascot2e_read_regs(priv, reg, val, 1);
+}
+
+static int ascot2e_set_reg_bits(struct ascot2e_priv *priv,
+ u8 reg, u8 data, u8 mask)
+{
+ int res;
+ u8 rdata;
+
+ if (mask != 0xff) {
+ res = ascot2e_read_reg(priv, reg, &rdata);
+ if (res != 0)
+ return res;
+ data = ((data & mask) | (rdata & (mask ^ 0xFF)));
+ }
+ return ascot2e_write_reg(priv, reg, data);
+}
+
+static int ascot2e_enter_power_save(struct ascot2e_priv *priv)
+{
+ u8 data[2];
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state == STATE_SLEEP)
+ return 0;
+ data[0] = 0x00;
+ data[1] = 0x04;
+ ascot2e_write_regs(priv, 0x14, data, 2);
+ ascot2e_write_reg(priv, 0x50, 0x01);
+ priv->state = STATE_SLEEP;
+ return 0;
+}
+
+static int ascot2e_leave_power_save(struct ascot2e_priv *priv)
+{
+ u8 data[2] = { 0xFB, 0x0F };
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state == STATE_ACTIVE)
+ return 0;
+ ascot2e_write_regs(priv, 0x14, data, 2);
+ ascot2e_write_reg(priv, 0x50, 0x00);
+ priv->state = STATE_ACTIVE;
+ return 0;
+}
+
+static int ascot2e_init(struct dvb_frontend *fe)
+{
+ struct ascot2e_priv *priv = fe->tuner_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ return ascot2e_leave_power_save(priv);
+}
+
+static int ascot2e_release(struct dvb_frontend *fe)
+{
+ struct ascot2e_priv *priv = fe->tuner_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ kfree(fe->tuner_priv);
+ fe->tuner_priv = NULL;
+ return 0;
+}
+
+static int ascot2e_sleep(struct dvb_frontend *fe)
+{
+ struct ascot2e_priv *priv = fe->tuner_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ ascot2e_enter_power_save(priv);
+ return 0;
+}
+
+static enum ascot2e_tv_system_t ascot2e_get_tv_system(struct dvb_frontend *fe)
+{
+ enum ascot2e_tv_system_t system = ASCOT2E_DTV_UNKNOWN;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct ascot2e_priv *priv = fe->tuner_priv;
+
+ if (p->delivery_system == SYS_DVBT) {
+ if (p->bandwidth_hz <= 5000000)
+ system = ASCOT2E_DTV_DVBT_5;
+ else if (p->bandwidth_hz <= 6000000)
+ system = ASCOT2E_DTV_DVBT_6;
+ else if (p->bandwidth_hz <= 7000000)
+ system = ASCOT2E_DTV_DVBT_7;
+ else if (p->bandwidth_hz <= 8000000)
+ system = ASCOT2E_DTV_DVBT_8;
+ else {
+ system = ASCOT2E_DTV_DVBT_8;
+ p->bandwidth_hz = 8000000;
+ }
+ } else if (p->delivery_system == SYS_DVBT2) {
+ if (p->bandwidth_hz <= 5000000)
+ system = ASCOT2E_DTV_DVBT2_5;
+ else if (p->bandwidth_hz <= 6000000)
+ system = ASCOT2E_DTV_DVBT2_6;
+ else if (p->bandwidth_hz <= 7000000)
+ system = ASCOT2E_DTV_DVBT2_7;
+ else if (p->bandwidth_hz <= 8000000)
+ system = ASCOT2E_DTV_DVBT2_8;
+ else {
+ system = ASCOT2E_DTV_DVBT2_8;
+ p->bandwidth_hz = 8000000;
+ }
+ } else if (p->delivery_system == SYS_DVBC_ANNEX_A) {
+ if (p->bandwidth_hz <= 6000000)
+ system = ASCOT2E_DTV_DVBC_6;
+ else if (p->bandwidth_hz <= 8000000)
+ system = ASCOT2E_DTV_DVBC_8;
+ }
+ dev_dbg(&priv->i2c->dev,
+ "%s(): ASCOT2E DTV system %d (delsys %d, bandwidth %d)\n",
+ __func__, (int)system, p->delivery_system, p->bandwidth_hz);
+ return system;
+}
+
+static int ascot2e_set_params(struct dvb_frontend *fe)
+{
+ u8 data[10];
+ u32 frequency;
+ enum ascot2e_tv_system_t tv_system;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct ascot2e_priv *priv = fe->tuner_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s(): tune frequency %dkHz\n",
+ __func__, p->frequency / 1000);
+ tv_system = ascot2e_get_tv_system(fe);
+
+ if (tv_system == ASCOT2E_DTV_UNKNOWN) {
+ dev_dbg(&priv->i2c->dev, "%s(): unknown DTV system\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (priv->set_tuner)
+ priv->set_tuner(priv->set_tuner_data, 1);
+ frequency = roundup(p->frequency / 1000, 25);
+ if (priv->state == STATE_SLEEP)
+ ascot2e_leave_power_save(priv);
+
+ /* IF_OUT_SEL / AGC_SEL setting */
+ data[0] = 0x00;
+ if (ascot2e_sett[tv_system].agc_sel != ASCOT2E_AUTO) {
+ /* AGC pin setting from parameter table */
+ data[0] |= (u8)(
+ (ascot2e_sett[tv_system].agc_sel & 0x03) << 3);
+ }
+ if (ascot2e_sett[tv_system].if_out_sel != ASCOT2E_AUTO) {
+ /* IFOUT pin setting from parameter table */
+ data[0] |= (u8)(
+ (ascot2e_sett[tv_system].if_out_sel & 0x01) << 2);
+ }
+ /* Set bit[4:2] only */
+ ascot2e_set_reg_bits(priv, 0x05, data[0], 0x1c);
+ /* 0x06 - 0x0F */
+ /* REF_R setting (0x06) */
+ if (tv_system == ASCOT2E_DTV_DVBC_6 ||
+ tv_system == ASCOT2E_DTV_DVBC_8) {
+ /* xtal, xtal*2 */
+ data[0] = (frequency > 500000) ? 16 : 32;
+ } else {
+ /* xtal/8, xtal/4 */
+ data[0] = (frequency > 500000) ? 2 : 4;
+ }
+ /* XOSC_SEL=100uA */
+ data[1] = 0x04;
+ /* KBW setting (0x08), KC0 setting (0x09), KC1 setting (0x0A) */
+ if (tv_system == ASCOT2E_DTV_DVBC_6 ||
+ tv_system == ASCOT2E_DTV_DVBC_8) {
+ data[2] = 18;
+ data[3] = 120;
+ data[4] = 20;
+ } else {
+ data[2] = 48;
+ data[3] = 10;
+ data[4] = 30;
+ }
+ /* ORDER/R2_RANGE/R2_BANK/C2_BANK setting (0x0B) */
+ if (tv_system == ASCOT2E_DTV_DVBC_6 ||
+ tv_system == ASCOT2E_DTV_DVBC_8)
+ data[5] = (frequency > 500000) ? 0x08 : 0x0c;
+ else
+ data[5] = (frequency > 500000) ? 0x30 : 0x38;
+ /* Set MIX_OLL (0x0C) value from parameter table */
+ data[6] = ascot2e_sett[tv_system].mix_oll;
+ /* Set RF_GAIN (0x0D) setting from parameter table */
+ if (ascot2e_sett[tv_system].rf_gain == ASCOT2E_AUTO) {
+ /* RF_GAIN auto control enable */
+ ascot2e_write_reg(priv, 0x4E, 0x01);
+ /* RF_GAIN Default value */
+ data[7] = 0x00;
+ } else {
+ /* RF_GAIN auto control disable */
+ ascot2e_write_reg(priv, 0x4E, 0x00);
+ data[7] = ascot2e_sett[tv_system].rf_gain;
+ }
+ /* Set IF_BPF_GC/FIF_OFFSET (0x0E) value from parameter table */
+ data[8] = (u8)((ascot2e_sett[tv_system].fif_offset << 3) |
+ (ascot2e_sett[tv_system].if_bpf_gc & 0x07));
+ /* Set BW_OFFSET (0x0F) value from parameter table */
+ data[9] = ascot2e_sett[tv_system].bw_offset;
+ ascot2e_write_regs(priv, 0x06, data, 10);
+ /*
+ * 0x45 - 0x47
+ * LNA optimization setting
+ * RF_LNA_DIST1-5, RF_LNA_CM
+ */
+ if (tv_system == ASCOT2E_DTV_DVBC_6 ||
+ tv_system == ASCOT2E_DTV_DVBC_8) {
+ data[0] = 0x0F;
+ data[1] = 0x00;
+ data[2] = 0x01;
+ } else {
+ data[0] = 0x0F;
+ data[1] = 0x00;
+ data[2] = 0x03;
+ }
+ ascot2e_write_regs(priv, 0x45, data, 3);
+ /* 0x49 - 0x4A
+ Set RF_OLDET_ENX/RF_OLDET_OLL value from parameter table */
+ data[0] = ascot2e_sett[tv_system].rf_oldet;
+ /* Set IF_BPF_F0 value from parameter table */
+ data[1] = ascot2e_sett[tv_system].if_bpf_f0;
+ ascot2e_write_regs(priv, 0x49, data, 2);
+ /*
+ * Tune now
+ * RFAGC fast mode / RFAGC auto control enable
+ * (set bit[7], bit[5:4] only)
+ * vco_cal = 1, set MIX_OL_CPU_EN
+ */
+ ascot2e_set_reg_bits(priv, 0x0c, 0x90, 0xb0);
+ /* Logic wake up, CPU wake up */
+ data[0] = 0xc4;
+ data[1] = 0x40;
+ ascot2e_write_regs(priv, 0x03, data, 2);
+ /* 0x10 - 0x14 */
+ data[0] = (u8)(frequency & 0xFF); /* 0x10: FRF_L */
+ data[1] = (u8)((frequency >> 8) & 0xFF); /* 0x11: FRF_M */
+ data[2] = (u8)((frequency >> 16) & 0x0F); /* 0x12: FRF_H (bit[3:0]) */
+ /* 0x12: BW (bit[5:4]) */
+ data[2] |= (u8)(ascot2e_sett[tv_system].bw << 4);
+ data[3] = 0xFF; /* 0x13: VCO calibration enable */
+ data[4] = 0xFF; /* 0x14: Analog block enable */
+ /* Tune (Burst write) */
+ ascot2e_write_regs(priv, 0x10, data, 5);
+ msleep(50);
+ /* CPU deep sleep */
+ ascot2e_write_reg(priv, 0x04, 0x00);
+ /* Logic sleep */
+ ascot2e_write_reg(priv, 0x03, 0xC0);
+ /* RFAGC normal mode (set bit[5:4] only) */
+ ascot2e_set_reg_bits(priv, 0x0C, 0x00, 0x30);
+ priv->frequency = frequency;
+ return 0;
+}
+
+static int ascot2e_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct ascot2e_priv *priv = fe->tuner_priv;
+
+ *frequency = priv->frequency * 1000;
+ return 0;
+}
+
+static struct dvb_tuner_ops ascot2e_tuner_ops = {
+ .info = {
+ .name = "Sony ASCOT2E",
+ .frequency_min = 1000000,
+ .frequency_max = 1200000000,
+ .frequency_step = 25000,
+ },
+ .init = ascot2e_init,
+ .release = ascot2e_release,
+ .sleep = ascot2e_sleep,
+ .set_params = ascot2e_set_params,
+ .get_frequency = ascot2e_get_frequency,
+};
+
+struct dvb_frontend *ascot2e_attach(struct dvb_frontend *fe,
+ const struct ascot2e_config *config,
+ struct i2c_adapter *i2c)
+{
+ u8 data[4];
+ struct ascot2e_priv *priv = NULL;
+
+ priv = kzalloc(sizeof(struct ascot2e_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return NULL;
+ priv->i2c_address = (config->i2c_address >> 1);
+ priv->i2c = i2c;
+ priv->set_tuner_data = config->set_tuner_priv;
+ priv->set_tuner = config->set_tuner_callback;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ /* 16 MHz xTal frequency */
+ data[0] = 16;
+ /* VCO current setting */
+ data[1] = 0x06;
+ /* Logic wake up, CPU boot */
+ data[2] = 0xC4;
+ data[3] = 0x40;
+ ascot2e_write_regs(priv, 0x01, data, 4);
+ /* RFVGA optimization setting (RF_DIST0 - RF_DIST2) */
+ data[0] = 0x10;
+ data[1] = 0x3F;
+ data[2] = 0x25;
+ ascot2e_write_regs(priv, 0x22, data, 3);
+ /* PLL mode setting */
+ ascot2e_write_reg(priv, 0x28, 0x1e);
+ /* RSSI setting */
+ ascot2e_write_reg(priv, 0x59, 0x04);
+ /* TODO check CPU HW error state here */
+ msleep(80);
+ /* Xtal oscillator current control setting */
+ ascot2e_write_reg(priv, 0x4c, 0x01);
+ /* XOSC_SEL=100uA */
+ ascot2e_write_reg(priv, 0x07, 0x04);
+ /* CPU deep sleep */
+ ascot2e_write_reg(priv, 0x04, 0x00);
+ /* Logic sleep */
+ ascot2e_write_reg(priv, 0x03, 0xc0);
+ /* Power save setting */
+ data[0] = 0x00;
+ data[1] = 0x04;
+ ascot2e_write_regs(priv, 0x14, data, 2);
+ ascot2e_write_reg(priv, 0x50, 0x01);
+ priv->state = STATE_SLEEP;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ memcpy(&fe->ops.tuner_ops, &ascot2e_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+ fe->tuner_priv = priv;
+ dev_info(&priv->i2c->dev,
+ "Sony ASCOT2E attached on addr=%x at I2C adapter %p\n",
+ priv->i2c_address, priv->i2c);
+ return fe;
+}
+EXPORT_SYMBOL(ascot2e_attach);
+
+MODULE_DESCRIPTION("Sony ASCOT2E terr/cab tuner driver");
+MODULE_AUTHOR("info@netup.ru");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/ascot2e.h b/drivers/media/dvb-frontends/ascot2e.h
new file mode 100644
index 000000000000..6da4ae6d6cc3
--- /dev/null
+++ b/drivers/media/dvb-frontends/ascot2e.h
@@ -0,0 +1,58 @@
+/*
+ * ascot2e.h
+ *
+ * Sony Ascot3E DVB-T/T2/C/C2 tuner driver
+ *
+ * Copyright 2012 Sony Corporation
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DVB_ASCOT2E_H__
+#define __DVB_ASCOT2E_H__
+
+#include <linux/kconfig.h>
+#include <linux/dvb/frontend.h>
+#include <linux/i2c.h>
+
+/**
+ * struct ascot2e_config - the configuration of Ascot2E tuner driver
+ * @i2c_address: I2C address of the tuner
+ * @xtal_freq_mhz: Oscillator frequency, MHz
+ * @set_tuner_priv: Callback function private context
+ * @set_tuner_callback: Callback function that notifies the parent driver
+ * which tuner is active now
+ */
+struct ascot2e_config {
+ u8 i2c_address;
+ u8 xtal_freq_mhz;
+ void *set_tuner_priv;
+ int (*set_tuner_callback)(void *, int);
+};
+
+#if IS_REACHABLE(CONFIG_DVB_ASCOT2E)
+extern struct dvb_frontend *ascot2e_attach(struct dvb_frontend *fe,
+ const struct ascot2e_config *config,
+ struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *ascot2e_attach(struct dvb_frontend *fe,
+ const struct ascot2e_config *config,
+ struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/dvb-frontends/au8522_decoder.c b/drivers/media/dvb-frontends/au8522_decoder.c
index 33aa9410b624..28d7dc2fee34 100644
--- a/drivers/media/dvb-frontends/au8522_decoder.c
+++ b/drivers/media/dvb-frontends/au8522_decoder.c
@@ -820,7 +820,6 @@ MODULE_DEVICE_TABLE(i2c, au8522_id);
static struct i2c_driver au8522_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "au8522",
},
.probe = au8522_probe,
diff --git a/drivers/media/dvb-frontends/au8522_dig.c b/drivers/media/dvb-frontends/au8522_dig.c
index b744a3f8d467..f956f13fb3dc 100644
--- a/drivers/media/dvb-frontends/au8522_dig.c
+++ b/drivers/media/dvb-frontends/au8522_dig.c
@@ -922,7 +922,7 @@ module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Enable verbose debug messages");
module_param(zv_mode, int, 0644);
-MODULE_PARM_DESC(zv_mode, "Turn on/off ZeeVee modulator compatability mode (default:on).\n"
+MODULE_PARM_DESC(zv_mode, "Turn on/off ZeeVee modulator compatibility mode (default:on).\n"
"\t\ton - modified AU8522 QAM256 initialization.\n"
"\t\tProvides faster lock when using ZeeVee modulator based sources");
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index e18cf9e1185e..0fe7fb11124b 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -1011,7 +1011,7 @@ static int cx24123_tune(struct dvb_frontend *fe,
static int cx24123_get_algo(struct dvb_frontend *fe)
{
- return 1; /* FE_ALGO_HW */
+ return DVBFE_ALGO_HW;
}
static void cx24123_release(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
new file mode 100644
index 000000000000..fdffb2f0ded8
--- /dev/null
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -0,0 +1,2727 @@
+/*
+ * cxd2841er.c
+ *
+ * Sony CXD2441ER digital demodulator driver
+ *
+ * Copyright 2012 Sony Corporation
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/math64.h>
+#include <linux/log2.h>
+#include <linux/dynamic_debug.h>
+
+#include "dvb_math.h"
+#include "dvb_frontend.h"
+#include "cxd2841er.h"
+#include "cxd2841er_priv.h"
+
+#define MAX_WRITE_REGSIZE 16
+
+enum cxd2841er_state {
+ STATE_SHUTDOWN = 0,
+ STATE_SLEEP_S,
+ STATE_ACTIVE_S,
+ STATE_SLEEP_TC,
+ STATE_ACTIVE_TC
+};
+
+struct cxd2841er_priv {
+ struct dvb_frontend frontend;
+ struct i2c_adapter *i2c;
+ u8 i2c_addr_slvx;
+ u8 i2c_addr_slvt;
+ const struct cxd2841er_config *config;
+ enum cxd2841er_state state;
+ u8 system;
+};
+
+static const struct cxd2841er_cnr_data s_cn_data[] = {
+ { 0x033e, 0 }, { 0x0339, 100 }, { 0x0333, 200 },
+ { 0x032e, 300 }, { 0x0329, 400 }, { 0x0324, 500 },
+ { 0x031e, 600 }, { 0x0319, 700 }, { 0x0314, 800 },
+ { 0x030f, 900 }, { 0x030a, 1000 }, { 0x02ff, 1100 },
+ { 0x02f4, 1200 }, { 0x02e9, 1300 }, { 0x02de, 1400 },
+ { 0x02d4, 1500 }, { 0x02c9, 1600 }, { 0x02bf, 1700 },
+ { 0x02b5, 1800 }, { 0x02ab, 1900 }, { 0x02a1, 2000 },
+ { 0x029b, 2100 }, { 0x0295, 2200 }, { 0x0290, 2300 },
+ { 0x028a, 2400 }, { 0x0284, 2500 }, { 0x027f, 2600 },
+ { 0x0279, 2700 }, { 0x0274, 2800 }, { 0x026e, 2900 },
+ { 0x0269, 3000 }, { 0x0262, 3100 }, { 0x025c, 3200 },
+ { 0x0255, 3300 }, { 0x024f, 3400 }, { 0x0249, 3500 },
+ { 0x0242, 3600 }, { 0x023c, 3700 }, { 0x0236, 3800 },
+ { 0x0230, 3900 }, { 0x022a, 4000 }, { 0x0223, 4100 },
+ { 0x021c, 4200 }, { 0x0215, 4300 }, { 0x020e, 4400 },
+ { 0x0207, 4500 }, { 0x0201, 4600 }, { 0x01fa, 4700 },
+ { 0x01f4, 4800 }, { 0x01ed, 4900 }, { 0x01e7, 5000 },
+ { 0x01e0, 5100 }, { 0x01d9, 5200 }, { 0x01d2, 5300 },
+ { 0x01cb, 5400 }, { 0x01c4, 5500 }, { 0x01be, 5600 },
+ { 0x01b7, 5700 }, { 0x01b1, 5800 }, { 0x01aa, 5900 },
+ { 0x01a4, 6000 }, { 0x019d, 6100 }, { 0x0196, 6200 },
+ { 0x018f, 6300 }, { 0x0189, 6400 }, { 0x0182, 6500 },
+ { 0x017c, 6600 }, { 0x0175, 6700 }, { 0x016f, 6800 },
+ { 0x0169, 6900 }, { 0x0163, 7000 }, { 0x015c, 7100 },
+ { 0x0156, 7200 }, { 0x0150, 7300 }, { 0x014a, 7400 },
+ { 0x0144, 7500 }, { 0x013e, 7600 }, { 0x0138, 7700 },
+ { 0x0132, 7800 }, { 0x012d, 7900 }, { 0x0127, 8000 },
+ { 0x0121, 8100 }, { 0x011c, 8200 }, { 0x0116, 8300 },
+ { 0x0111, 8400 }, { 0x010b, 8500 }, { 0x0106, 8600 },
+ { 0x0101, 8700 }, { 0x00fc, 8800 }, { 0x00f7, 8900 },
+ { 0x00f2, 9000 }, { 0x00ee, 9100 }, { 0x00ea, 9200 },
+ { 0x00e6, 9300 }, { 0x00e2, 9400 }, { 0x00de, 9500 },
+ { 0x00da, 9600 }, { 0x00d7, 9700 }, { 0x00d3, 9800 },
+ { 0x00d0, 9900 }, { 0x00cc, 10000 }, { 0x00c7, 10100 },
+ { 0x00c3, 10200 }, { 0x00bf, 10300 }, { 0x00ba, 10400 },
+ { 0x00b6, 10500 }, { 0x00b2, 10600 }, { 0x00ae, 10700 },
+ { 0x00aa, 10800 }, { 0x00a7, 10900 }, { 0x00a3, 11000 },
+ { 0x009f, 11100 }, { 0x009c, 11200 }, { 0x0098, 11300 },
+ { 0x0094, 11400 }, { 0x0091, 11500 }, { 0x008e, 11600 },
+ { 0x008a, 11700 }, { 0x0087, 11800 }, { 0x0084, 11900 },
+ { 0x0081, 12000 }, { 0x007e, 12100 }, { 0x007b, 12200 },
+ { 0x0079, 12300 }, { 0x0076, 12400 }, { 0x0073, 12500 },
+ { 0x0071, 12600 }, { 0x006e, 12700 }, { 0x006c, 12800 },
+ { 0x0069, 12900 }, { 0x0067, 13000 }, { 0x0065, 13100 },
+ { 0x0062, 13200 }, { 0x0060, 13300 }, { 0x005e, 13400 },
+ { 0x005c, 13500 }, { 0x005a, 13600 }, { 0x0058, 13700 },
+ { 0x0056, 13800 }, { 0x0054, 13900 }, { 0x0052, 14000 },
+ { 0x0050, 14100 }, { 0x004e, 14200 }, { 0x004c, 14300 },
+ { 0x004b, 14400 }, { 0x0049, 14500 }, { 0x0047, 14600 },
+ { 0x0046, 14700 }, { 0x0044, 14800 }, { 0x0043, 14900 },
+ { 0x0041, 15000 }, { 0x003f, 15100 }, { 0x003e, 15200 },
+ { 0x003c, 15300 }, { 0x003b, 15400 }, { 0x003a, 15500 },
+ { 0x0037, 15700 }, { 0x0036, 15800 }, { 0x0034, 15900 },
+ { 0x0033, 16000 }, { 0x0032, 16100 }, { 0x0031, 16200 },
+ { 0x0030, 16300 }, { 0x002f, 16400 }, { 0x002e, 16500 },
+ { 0x002d, 16600 }, { 0x002c, 16700 }, { 0x002b, 16800 },
+ { 0x002a, 16900 }, { 0x0029, 17000 }, { 0x0028, 17100 },
+ { 0x0027, 17200 }, { 0x0026, 17300 }, { 0x0025, 17400 },
+ { 0x0024, 17500 }, { 0x0023, 17600 }, { 0x0022, 17800 },
+ { 0x0021, 17900 }, { 0x0020, 18000 }, { 0x001f, 18200 },
+ { 0x001e, 18300 }, { 0x001d, 18500 }, { 0x001c, 18700 },
+ { 0x001b, 18900 }, { 0x001a, 19000 }, { 0x0019, 19200 },
+ { 0x0018, 19300 }, { 0x0017, 19500 }, { 0x0016, 19700 },
+ { 0x0015, 19900 }, { 0x0014, 20000 },
+};
+
+static const struct cxd2841er_cnr_data s2_cn_data[] = {
+ { 0x05af, 0 }, { 0x0597, 100 }, { 0x057e, 200 },
+ { 0x0567, 300 }, { 0x0550, 400 }, { 0x0539, 500 },
+ { 0x0522, 600 }, { 0x050c, 700 }, { 0x04f6, 800 },
+ { 0x04e1, 900 }, { 0x04cc, 1000 }, { 0x04b6, 1100 },
+ { 0x04a1, 1200 }, { 0x048c, 1300 }, { 0x0477, 1400 },
+ { 0x0463, 1500 }, { 0x044f, 1600 }, { 0x043c, 1700 },
+ { 0x0428, 1800 }, { 0x0416, 1900 }, { 0x0403, 2000 },
+ { 0x03ef, 2100 }, { 0x03dc, 2200 }, { 0x03c9, 2300 },
+ { 0x03b6, 2400 }, { 0x03a4, 2500 }, { 0x0392, 2600 },
+ { 0x0381, 2700 }, { 0x036f, 2800 }, { 0x035f, 2900 },
+ { 0x034e, 3000 }, { 0x033d, 3100 }, { 0x032d, 3200 },
+ { 0x031d, 3300 }, { 0x030d, 3400 }, { 0x02fd, 3500 },
+ { 0x02ee, 3600 }, { 0x02df, 3700 }, { 0x02d0, 3800 },
+ { 0x02c2, 3900 }, { 0x02b4, 4000 }, { 0x02a6, 4100 },
+ { 0x0299, 4200 }, { 0x028c, 4300 }, { 0x027f, 4400 },
+ { 0x0272, 4500 }, { 0x0265, 4600 }, { 0x0259, 4700 },
+ { 0x024d, 4800 }, { 0x0241, 4900 }, { 0x0236, 5000 },
+ { 0x022b, 5100 }, { 0x0220, 5200 }, { 0x0215, 5300 },
+ { 0x020a, 5400 }, { 0x0200, 5500 }, { 0x01f6, 5600 },
+ { 0x01ec, 5700 }, { 0x01e2, 5800 }, { 0x01d8, 5900 },
+ { 0x01cf, 6000 }, { 0x01c6, 6100 }, { 0x01bc, 6200 },
+ { 0x01b3, 6300 }, { 0x01aa, 6400 }, { 0x01a2, 6500 },
+ { 0x0199, 6600 }, { 0x0191, 6700 }, { 0x0189, 6800 },
+ { 0x0181, 6900 }, { 0x0179, 7000 }, { 0x0171, 7100 },
+ { 0x0169, 7200 }, { 0x0161, 7300 }, { 0x015a, 7400 },
+ { 0x0153, 7500 }, { 0x014b, 7600 }, { 0x0144, 7700 },
+ { 0x013d, 7800 }, { 0x0137, 7900 }, { 0x0130, 8000 },
+ { 0x012a, 8100 }, { 0x0124, 8200 }, { 0x011e, 8300 },
+ { 0x0118, 8400 }, { 0x0112, 8500 }, { 0x010c, 8600 },
+ { 0x0107, 8700 }, { 0x0101, 8800 }, { 0x00fc, 8900 },
+ { 0x00f7, 9000 }, { 0x00f2, 9100 }, { 0x00ec, 9200 },
+ { 0x00e7, 9300 }, { 0x00e2, 9400 }, { 0x00dd, 9500 },
+ { 0x00d8, 9600 }, { 0x00d4, 9700 }, { 0x00cf, 9800 },
+ { 0x00ca, 9900 }, { 0x00c6, 10000 }, { 0x00c2, 10100 },
+ { 0x00be, 10200 }, { 0x00b9, 10300 }, { 0x00b5, 10400 },
+ { 0x00b1, 10500 }, { 0x00ae, 10600 }, { 0x00aa, 10700 },
+ { 0x00a6, 10800 }, { 0x00a3, 10900 }, { 0x009f, 11000 },
+ { 0x009b, 11100 }, { 0x0098, 11200 }, { 0x0095, 11300 },
+ { 0x0091, 11400 }, { 0x008e, 11500 }, { 0x008b, 11600 },
+ { 0x0088, 11700 }, { 0x0085, 11800 }, { 0x0082, 11900 },
+ { 0x007f, 12000 }, { 0x007c, 12100 }, { 0x007a, 12200 },
+ { 0x0077, 12300 }, { 0x0074, 12400 }, { 0x0072, 12500 },
+ { 0x006f, 12600 }, { 0x006d, 12700 }, { 0x006b, 12800 },
+ { 0x0068, 12900 }, { 0x0066, 13000 }, { 0x0064, 13100 },
+ { 0x0061, 13200 }, { 0x005f, 13300 }, { 0x005d, 13400 },
+ { 0x005b, 13500 }, { 0x0059, 13600 }, { 0x0057, 13700 },
+ { 0x0055, 13800 }, { 0x0053, 13900 }, { 0x0051, 14000 },
+ { 0x004f, 14100 }, { 0x004e, 14200 }, { 0x004c, 14300 },
+ { 0x004a, 14400 }, { 0x0049, 14500 }, { 0x0047, 14600 },
+ { 0x0045, 14700 }, { 0x0044, 14800 }, { 0x0042, 14900 },
+ { 0x0041, 15000 }, { 0x003f, 15100 }, { 0x003e, 15200 },
+ { 0x003c, 15300 }, { 0x003b, 15400 }, { 0x003a, 15500 },
+ { 0x0038, 15600 }, { 0x0037, 15700 }, { 0x0036, 15800 },
+ { 0x0034, 15900 }, { 0x0033, 16000 }, { 0x0032, 16100 },
+ { 0x0031, 16200 }, { 0x0030, 16300 }, { 0x002f, 16400 },
+ { 0x002e, 16500 }, { 0x002d, 16600 }, { 0x002c, 16700 },
+ { 0x002b, 16800 }, { 0x002a, 16900 }, { 0x0029, 17000 },
+ { 0x0028, 17100 }, { 0x0027, 17200 }, { 0x0026, 17300 },
+ { 0x0025, 17400 }, { 0x0024, 17500 }, { 0x0023, 17600 },
+ { 0x0022, 17800 }, { 0x0021, 17900 }, { 0x0020, 18000 },
+ { 0x001f, 18200 }, { 0x001e, 18300 }, { 0x001d, 18500 },
+ { 0x001c, 18700 }, { 0x001b, 18900 }, { 0x001a, 19000 },
+ { 0x0019, 19200 }, { 0x0018, 19300 }, { 0x0017, 19500 },
+ { 0x0016, 19700 }, { 0x0015, 19900 }, { 0x0014, 20000 },
+};
+
+#define MAKE_IFFREQ_CONFIG(iffreq) ((u32)(((iffreq)/41.0)*16777216.0 + 0.5))
+
+static void cxd2841er_i2c_debug(struct cxd2841er_priv *priv,
+ u8 addr, u8 reg, u8 write,
+ const u8 *data, u32 len)
+{
+ dev_dbg(&priv->i2c->dev,
+ "cxd2841er: I2C %s addr %02x reg 0x%02x size %d\n",
+ (write == 0 ? "read" : "write"), addr, reg, len);
+ print_hex_dump_bytes("cxd2841er: I2C data: ",
+ DUMP_PREFIX_OFFSET, data, len);
+}
+
+static int cxd2841er_write_regs(struct cxd2841er_priv *priv,
+ u8 addr, u8 reg, const u8 *data, u32 len)
+{
+ int ret;
+ u8 buf[MAX_WRITE_REGSIZE + 1];
+ u8 i2c_addr = (addr == I2C_SLVX ?
+ priv->i2c_addr_slvx : priv->i2c_addr_slvt);
+ struct i2c_msg msg[1] = {
+ {
+ .addr = i2c_addr,
+ .flags = 0,
+ .len = len + 1,
+ .buf = buf,
+ }
+ };
+
+ if (len + 1 >= sizeof(buf)) {
+ dev_warn(&priv->i2c->dev,"wr reg=%04x: len=%d is too big!\n",
+ reg, len + 1);
+ return -E2BIG;
+ }
+
+ cxd2841er_i2c_debug(priv, i2c_addr, reg, 1, data, len);
+ buf[0] = reg;
+ memcpy(&buf[1], data, len);
+
+ ret = i2c_transfer(priv->i2c, msg, 1);
+ if (ret >= 0 && ret != 1)
+ ret = -EIO;
+ if (ret < 0) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr failed=%d addr=%02x reg=%02x len=%d\n",
+ KBUILD_MODNAME, ret, i2c_addr, reg, len);
+ return ret;
+ }
+ return 0;
+}
+
+static int cxd2841er_write_reg(struct cxd2841er_priv *priv,
+ u8 addr, u8 reg, u8 val)
+{
+ return cxd2841er_write_regs(priv, addr, reg, &val, 1);
+}
+
+static int cxd2841er_read_regs(struct cxd2841er_priv *priv,
+ u8 addr, u8 reg, u8 *val, u32 len)
+{
+ int ret;
+ u8 i2c_addr = (addr == I2C_SLVX ?
+ priv->i2c_addr_slvx : priv->i2c_addr_slvt);
+ struct i2c_msg msg[2] = {
+ {
+ .addr = i2c_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &reg,
+ }, {
+ .addr = i2c_addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = val,
+ }
+ };
+
+ ret = i2c_transfer(priv->i2c, &msg[0], 1);
+ if (ret >= 0 && ret != 1)
+ ret = -EIO;
+ if (ret < 0) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c rw failed=%d addr=%02x reg=%02x\n",
+ KBUILD_MODNAME, ret, i2c_addr, reg);
+ return ret;
+ }
+ ret = i2c_transfer(priv->i2c, &msg[1], 1);
+ if (ret >= 0 && ret != 1)
+ ret = -EIO;
+ if (ret < 0) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c rd failed=%d addr=%02x reg=%02x\n",
+ KBUILD_MODNAME, ret, i2c_addr, reg);
+ return ret;
+ }
+ return 0;
+}
+
+static int cxd2841er_read_reg(struct cxd2841er_priv *priv,
+ u8 addr, u8 reg, u8 *val)
+{
+ return cxd2841er_read_regs(priv, addr, reg, val, 1);
+}
+
+static int cxd2841er_set_reg_bits(struct cxd2841er_priv *priv,
+ u8 addr, u8 reg, u8 data, u8 mask)
+{
+ int res;
+ u8 rdata;
+
+ if (mask != 0xff) {
+ res = cxd2841er_read_reg(priv, addr, reg, &rdata);
+ if (res)
+ return res;
+ data = ((data & mask) | (rdata & (mask ^ 0xFF)));
+ }
+ return cxd2841er_write_reg(priv, addr, reg, data);
+}
+
+static int cxd2841er_dvbs2_set_symbol_rate(struct cxd2841er_priv *priv,
+ u32 symbol_rate)
+{
+ u32 reg_value = 0;
+ u8 data[3] = {0, 0, 0};
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ /*
+ * regValue = (symbolRateKSps * 2^14 / 1000) + 0.5
+ * = ((symbolRateKSps * 2^14) + 500) / 1000
+ * = ((symbolRateKSps * 16384) + 500) / 1000
+ */
+ reg_value = DIV_ROUND_CLOSEST(symbol_rate * 16384, 1000);
+ if ((reg_value == 0) || (reg_value > 0xFFFFF)) {
+ dev_err(&priv->i2c->dev,
+ "%s(): reg_value is out of range\n", __func__);
+ return -EINVAL;
+ }
+ data[0] = (u8)((reg_value >> 16) & 0x0F);
+ data[1] = (u8)((reg_value >> 8) & 0xFF);
+ data[2] = (u8)(reg_value & 0xFF);
+ /* Set SLV-T Bank : 0xAE */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xae);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0x20, data, 3);
+ return 0;
+}
+
+static void cxd2841er_set_ts_clock_mode(struct cxd2841er_priv *priv,
+ u8 system);
+
+static int cxd2841er_sleep_s_to_active_s(struct cxd2841er_priv *priv,
+ u8 system, u32 symbol_rate)
+{
+ int ret;
+ u8 data[4] = { 0, 0, 0, 0 };
+
+ if (priv->state != STATE_SLEEP_S) {
+ dev_err(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, (int)priv->state);
+ return -EINVAL;
+ }
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ cxd2841er_set_ts_clock_mode(priv, SYS_DVBS);
+ /* Set demod mode */
+ if (system == SYS_DVBS) {
+ data[0] = 0x0A;
+ } else if (system == SYS_DVBS2) {
+ data[0] = 0x0B;
+ } else {
+ dev_err(&priv->i2c->dev, "%s(): invalid delsys %d\n",
+ __func__, system);
+ return -EINVAL;
+ }
+ /* Set SLV-X Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x17, data[0]);
+ /* DVB-S/S2 */
+ data[0] = 0x00;
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* Enable S/S2 auto detection 1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x2d, data[0]);
+ /* Set SLV-T Bank : 0xAE */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xae);
+ /* Enable S/S2 auto detection 2 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x30, data[0]);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* Enable demod clock */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x01);
+ /* Enable ADC clock */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x31, 0x01);
+ /* Enable ADC 1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x63, 0x16);
+ /* Enable ADC 2 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x65, 0x3f);
+ /* Set SLV-X Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+ /* Enable ADC 3 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x00);
+ /* Set SLV-T Bank : 0xA3 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa3);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0xac, 0x00);
+ data[0] = 0x07;
+ data[1] = 0x3B;
+ data[2] = 0x08;
+ data[3] = 0xC5;
+ /* Set SLV-T Bank : 0xAB */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xab);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0x98, data, 4);
+ data[0] = 0x05;
+ data[1] = 0x80;
+ data[2] = 0x0A;
+ data[3] = 0x80;
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xa8, data, 4);
+ data[0] = 0x0C;
+ data[1] = 0xCC;
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xc3, data, 2);
+ /* Set demod parameter */
+ ret = cxd2841er_dvbs2_set_symbol_rate(priv, symbol_rate);
+ if (ret != 0)
+ return ret;
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* disable Hi-Z setting 1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x10);
+ /* disable Hi-Z setting 2 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x81, 0x00);
+ priv->state = STATE_ACTIVE_S;
+ return 0;
+}
+
+static int cxd2841er_sleep_tc_to_active_t_band(struct cxd2841er_priv *priv,
+ u32 bandwidth);
+
+static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv,
+ u32 bandwidth);
+
+static int cxd2841er_sleep_tc_to_active_c_band(struct cxd2841er_priv *priv,
+ u32 bandwidth);
+
+static int cxd2841er_retune_active(struct cxd2841er_priv *priv,
+ struct dtv_frontend_properties *p)
+{
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state != STATE_ACTIVE_S &&
+ priv->state != STATE_ACTIVE_TC) {
+ dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* disable TS output */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0xc3, 0x01);
+ if (priv->state == STATE_ACTIVE_S)
+ return cxd2841er_dvbs2_set_symbol_rate(
+ priv, p->symbol_rate / 1000);
+ else if (priv->state == STATE_ACTIVE_TC) {
+ switch (priv->system) {
+ case SYS_DVBT:
+ return cxd2841er_sleep_tc_to_active_t_band(
+ priv, p->bandwidth_hz);
+ case SYS_DVBT2:
+ return cxd2841er_sleep_tc_to_active_t2_band(
+ priv, p->bandwidth_hz);
+ case SYS_DVBC_ANNEX_A:
+ return cxd2841er_sleep_tc_to_active_c_band(
+ priv, 8000000);
+ }
+ }
+ dev_dbg(&priv->i2c->dev, "%s(): invalid delivery system %d\n",
+ __func__, priv->system);
+ return -EINVAL;
+}
+
+static int cxd2841er_active_s_to_sleep_s(struct cxd2841er_priv *priv)
+{
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state != STATE_ACTIVE_S) {
+ dev_err(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* disable TS output */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0xc3, 0x01);
+ /* enable Hi-Z setting 1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x1f);
+ /* enable Hi-Z setting 2 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x81, 0xff);
+ /* Set SLV-X Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+ /* disable ADC 1 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x01);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* disable ADC clock */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x31, 0x00);
+ /* disable ADC 2 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x63, 0x16);
+ /* disable ADC 3 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x65, 0x27);
+ /* SADC Bias ON */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x69, 0x06);
+ /* disable demod clock */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x00);
+ /* Set SLV-T Bank : 0xAE */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xae);
+ /* disable S/S2 auto detection1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x30, 0x00);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* disable S/S2 auto detection2 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x2d, 0x00);
+ priv->state = STATE_SLEEP_S;
+ return 0;
+}
+
+static int cxd2841er_sleep_s_to_shutdown(struct cxd2841er_priv *priv)
+{
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state != STATE_SLEEP_S) {
+ dev_dbg(&priv->i2c->dev, "%s(): invalid demod state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* Disable DSQOUT */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x3f);
+ /* Disable DSQIN */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x9c, 0x00);
+ /* Set SLV-X Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+ /* Disable oscillator */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x15, 0x01);
+ /* Set demod mode */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x17, 0x01);
+ priv->state = STATE_SHUTDOWN;
+ return 0;
+}
+
+static int cxd2841er_sleep_tc_to_shutdown(struct cxd2841er_priv *priv)
+{
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state != STATE_SLEEP_TC) {
+ dev_dbg(&priv->i2c->dev, "%s(): invalid demod state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ /* Set SLV-X Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+ /* Disable oscillator */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x15, 0x01);
+ /* Set demod mode */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x17, 0x01);
+ priv->state = STATE_SHUTDOWN;
+ return 0;
+}
+
+static int cxd2841er_active_t_to_sleep_tc(struct cxd2841er_priv *priv)
+{
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state != STATE_ACTIVE_TC) {
+ dev_err(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* disable TS output */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0xc3, 0x01);
+ /* enable Hi-Z setting 1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x3f);
+ /* enable Hi-Z setting 2 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x81, 0xff);
+ /* Set SLV-X Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+ /* disable ADC 1 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x01);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* Disable ADC 2 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x43, 0x0a);
+ /* Disable ADC 3 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x41, 0x0a);
+ /* Disable ADC clock */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x30, 0x00);
+ /* Disable RF level monitor */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x2f, 0x00);
+ /* Disable demod clock */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x00);
+ priv->state = STATE_SLEEP_TC;
+ return 0;
+}
+
+static int cxd2841er_active_t2_to_sleep_tc(struct cxd2841er_priv *priv)
+{
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state != STATE_ACTIVE_TC) {
+ dev_err(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* disable TS output */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0xc3, 0x01);
+ /* enable Hi-Z setting 1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x3f);
+ /* enable Hi-Z setting 2 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x81, 0xff);
+ /* Cancel DVB-T2 setting */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x13);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x83, 0x40);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x86, 0x21);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x9e, 0x09, 0x0f);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x9f, 0xfb);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x2a);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x38, 0x00, 0x0f);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x2b);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x11, 0x00, 0x3f);
+ /* Set SLV-X Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+ /* disable ADC 1 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x01);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* Disable ADC 2 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x43, 0x0a);
+ /* Disable ADC 3 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x41, 0x0a);
+ /* Disable ADC clock */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x30, 0x00);
+ /* Disable RF level monitor */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x2f, 0x00);
+ /* Disable demod clock */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x00);
+ priv->state = STATE_SLEEP_TC;
+ return 0;
+}
+
+static int cxd2841er_active_c_to_sleep_tc(struct cxd2841er_priv *priv)
+{
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state != STATE_ACTIVE_TC) {
+ dev_err(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* disable TS output */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0xc3, 0x01);
+ /* enable Hi-Z setting 1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x3f);
+ /* enable Hi-Z setting 2 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x81, 0xff);
+ /* Cancel DVB-C setting */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x11);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xa3, 0x00, 0x1f);
+ /* Set SLV-X Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+ /* disable ADC 1 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x01);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* Disable ADC 2 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x43, 0x0a);
+ /* Disable ADC 3 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x41, 0x0a);
+ /* Disable ADC clock */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x30, 0x00);
+ /* Disable RF level monitor */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x2f, 0x00);
+ /* Disable demod clock */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x00);
+ priv->state = STATE_SLEEP_TC;
+ return 0;
+}
+
+static int cxd2841er_shutdown_to_sleep_s(struct cxd2841er_priv *priv)
+{
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state != STATE_SHUTDOWN) {
+ dev_dbg(&priv->i2c->dev, "%s(): invalid demod state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ /* Set SLV-X Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+ /* Clear all demodulator registers */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x02, 0x00);
+ usleep_range(3000, 5000);
+ /* Set SLV-X Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+ /* Set demod SW reset */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x10, 0x01);
+ /* Set X'tal clock to 20.5Mhz */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x14, 0x00);
+ /* Set demod mode */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x17, 0x0a);
+ /* Clear demod SW reset */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x10, 0x00);
+ usleep_range(1000, 2000);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* enable DSQOUT */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x1F);
+ /* enable DSQIN */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x9C, 0x40);
+ /* TADC Bias On */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x43, 0x0a);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x41, 0x0a);
+ /* SADC Bias On */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x63, 0x16);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x65, 0x27);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x69, 0x06);
+ priv->state = STATE_SLEEP_S;
+ return 0;
+}
+
+static int cxd2841er_shutdown_to_sleep_tc(struct cxd2841er_priv *priv)
+{
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state != STATE_SHUTDOWN) {
+ dev_dbg(&priv->i2c->dev, "%s(): invalid demod state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ /* Set SLV-X Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+ /* Clear all demodulator registers */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x02, 0x00);
+ usleep_range(3000, 5000);
+ /* Set SLV-X Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+ /* Set demod SW reset */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x10, 0x01);
+ /* Set X'tal clock to 20.5Mhz */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x13, 0x00);
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x14, 0x00);
+ /* Clear demod SW reset */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x10, 0x00);
+ usleep_range(1000, 2000);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* TADC Bias On */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x43, 0x0a);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x41, 0x0a);
+ /* SADC Bias On */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x63, 0x16);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x65, 0x27);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x69, 0x06);
+ priv->state = STATE_SLEEP_TC;
+ return 0;
+}
+
+static int cxd2841er_tune_done(struct cxd2841er_priv *priv)
+{
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0, 0);
+ /* SW Reset */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0xfe, 0x01);
+ /* Enable TS output */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0xc3, 0x00);
+ return 0;
+}
+
+/* Set TS parallel mode */
+static void cxd2841er_set_ts_clock_mode(struct cxd2841er_priv *priv,
+ u8 system)
+{
+ u8 serial_ts, ts_rate_ctrl_off, ts_in_off;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ cxd2841er_read_reg(priv, I2C_SLVT, 0xc4, &serial_ts);
+ cxd2841er_read_reg(priv, I2C_SLVT, 0xd3, &ts_rate_ctrl_off);
+ cxd2841er_read_reg(priv, I2C_SLVT, 0xde, &ts_in_off);
+ dev_dbg(&priv->i2c->dev, "%s(): ser_ts=0x%02x rate_ctrl_off=0x%02x in_off=0x%02x\n",
+ __func__, serial_ts, ts_rate_ctrl_off, ts_in_off);
+
+ /*
+ * slave Bank Addr Bit default Name
+ * <SLV-T> 00h D9h [7:0] 8'h08 OTSCKPERIOD
+ */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0xd9, 0x08);
+ /*
+ * Disable TS IF Clock
+ * slave Bank Addr Bit default Name
+ * <SLV-T> 00h 32h [0] 1'b1 OREG_CK_TSIF_EN
+ */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x32, 0x00, 0x01);
+ /*
+ * slave Bank Addr Bit default Name
+ * <SLV-T> 00h 33h [1:0] 2'b01 OREG_CKSEL_TSIF
+ */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x33, 0x00, 0x03);
+ /*
+ * Enable TS IF Clock
+ * slave Bank Addr Bit default Name
+ * <SLV-T> 00h 32h [0] 1'b1 OREG_CK_TSIF_EN
+ */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x32, 0x01, 0x01);
+
+ if (system == SYS_DVBT) {
+ /* Enable parity period for DVB-T */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x66, 0x01, 0x01);
+ } else if (system == SYS_DVBC_ANNEX_A) {
+ /* Enable parity period for DVB-C */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x40);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x66, 0x01, 0x01);
+ }
+}
+
+static u8 cxd2841er_chip_id(struct cxd2841er_priv *priv)
+{
+ u8 chip_id;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0, 0);
+ cxd2841er_read_reg(priv, I2C_SLVT, 0xfd, &chip_id);
+ return chip_id;
+}
+
+static int cxd2841er_read_status_s(struct dvb_frontend *fe,
+ enum fe_status *status)
+{
+ u8 reg = 0;
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ *status = 0;
+ if (priv->state != STATE_ACTIVE_S) {
+ dev_err(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ /* Set SLV-T Bank : 0xA0 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa0);
+ /*
+ * slave Bank Addr Bit Signal name
+ * <SLV-T> A0h 11h [2] ITSLOCK
+ */
+ cxd2841er_read_reg(priv, I2C_SLVT, 0x11, &reg);
+ if (reg & 0x04) {
+ *status = FE_HAS_SIGNAL
+ | FE_HAS_CARRIER
+ | FE_HAS_VITERBI
+ | FE_HAS_SYNC
+ | FE_HAS_LOCK;
+ }
+ dev_dbg(&priv->i2c->dev, "%s(): result 0x%x\n", __func__, *status);
+ return 0;
+}
+
+static int cxd2841er_read_status_t_t2(struct cxd2841er_priv *priv,
+ u8 *sync, u8 *tslock, u8 *unlock)
+{
+ u8 data = 0;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state != STATE_ACTIVE_TC)
+ return -EINVAL;
+ if (priv->system == SYS_DVBT) {
+ /* Set SLV-T Bank : 0x10 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+ } else {
+ /* Set SLV-T Bank : 0x20 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x20);
+ }
+ cxd2841er_read_reg(priv, I2C_SLVT, 0x10, &data);
+ if ((data & 0x07) == 0x07) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): invalid hardware state detected\n", __func__);
+ *sync = 0;
+ *tslock = 0;
+ *unlock = 0;
+ } else {
+ *sync = ((data & 0x07) == 0x6 ? 1 : 0);
+ *tslock = ((data & 0x20) ? 1 : 0);
+ *unlock = ((data & 0x10) ? 1 : 0);
+ }
+ return 0;
+}
+
+static int cxd2841er_read_status_c(struct cxd2841er_priv *priv, u8 *tslock)
+{
+ u8 data;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state != STATE_ACTIVE_TC)
+ return -EINVAL;
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x40);
+ cxd2841er_read_reg(priv, I2C_SLVT, 0x88, &data);
+ if ((data & 0x01) == 0) {
+ *tslock = 0;
+ } else {
+ cxd2841er_read_reg(priv, I2C_SLVT, 0x10, &data);
+ *tslock = ((data & 0x20) ? 1 : 0);
+ }
+ return 0;
+}
+
+static int cxd2841er_read_status_tc(struct dvb_frontend *fe,
+ enum fe_status *status)
+{
+ int ret = 0;
+ u8 sync = 0;
+ u8 tslock = 0;
+ u8 unlock = 0;
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+ *status = 0;
+ if (priv->state == STATE_ACTIVE_TC) {
+ if (priv->system == SYS_DVBT || priv->system == SYS_DVBT2) {
+ ret = cxd2841er_read_status_t_t2(
+ priv, &sync, &tslock, &unlock);
+ if (ret)
+ goto done;
+ if (unlock)
+ goto done;
+ if (sync)
+ *status = FE_HAS_SIGNAL |
+ FE_HAS_CARRIER |
+ FE_HAS_VITERBI |
+ FE_HAS_SYNC;
+ if (tslock)
+ *status |= FE_HAS_LOCK;
+ } else if (priv->system == SYS_DVBC_ANNEX_A) {
+ ret = cxd2841er_read_status_c(priv, &tslock);
+ if (ret)
+ goto done;
+ if (tslock)
+ *status = FE_HAS_SIGNAL |
+ FE_HAS_CARRIER |
+ FE_HAS_VITERBI |
+ FE_HAS_SYNC |
+ FE_HAS_LOCK;
+ }
+ }
+done:
+ dev_dbg(&priv->i2c->dev, "%s(): status 0x%x\n", __func__, *status);
+ return ret;
+}
+
+static int cxd2841er_get_carrier_offset_s_s2(struct cxd2841er_priv *priv,
+ int *offset)
+{
+ u8 data[3];
+ u8 is_hs_mode;
+ s32 cfrl_ctrlval;
+ s32 temp_div, temp_q, temp_r;
+
+ if (priv->state != STATE_ACTIVE_S) {
+ dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ /*
+ * Get High Sampling Rate mode
+ * slave Bank Addr Bit Signal name
+ * <SLV-T> A0h 10h [0] ITRL_LOCK
+ */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa0);
+ cxd2841er_read_reg(priv, I2C_SLVT, 0x10, &data[0]);
+ if (data[0] & 0x01) {
+ /*
+ * slave Bank Addr Bit Signal name
+ * <SLV-T> A0h 50h [4] IHSMODE
+ */
+ cxd2841er_read_reg(priv, I2C_SLVT, 0x50, &data[0]);
+ is_hs_mode = (data[0] & 0x10 ? 1 : 0);
+ } else {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): unable to detect sampling rate mode\n",
+ __func__);
+ return -EINVAL;
+ }
+ /*
+ * slave Bank Addr Bit Signal name
+ * <SLV-T> A0h 45h [4:0] ICFRL_CTRLVAL[20:16]
+ * <SLV-T> A0h 46h [7:0] ICFRL_CTRLVAL[15:8]
+ * <SLV-T> A0h 47h [7:0] ICFRL_CTRLVAL[7:0]
+ */
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x45, data, 3);
+ cfrl_ctrlval = sign_extend32((((u32)data[0] & 0x1F) << 16) |
+ (((u32)data[1] & 0xFF) << 8) |
+ ((u32)data[2] & 0xFF), 20);
+ temp_div = (is_hs_mode ? 1048576 : 1572864);
+ if (cfrl_ctrlval > 0) {
+ temp_q = div_s64_rem(97375LL * cfrl_ctrlval,
+ temp_div, &temp_r);
+ } else {
+ temp_q = div_s64_rem(-97375LL * cfrl_ctrlval,
+ temp_div, &temp_r);
+ }
+ if (temp_r >= temp_div / 2)
+ temp_q++;
+ if (cfrl_ctrlval > 0)
+ temp_q *= -1;
+ *offset = temp_q;
+ return 0;
+}
+
+static int cxd2841er_get_carrier_offset_t2(struct cxd2841er_priv *priv,
+ u32 bandwidth, int *offset)
+{
+ u8 data[4];
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state != STATE_ACTIVE_TC) {
+ dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ if (priv->system != SYS_DVBT2) {
+ dev_dbg(&priv->i2c->dev, "%s(): invalid delivery system %d\n",
+ __func__, priv->system);
+ return -EINVAL;
+ }
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x20);
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x4c, data, sizeof(data));
+ *offset = -1 * sign_extend32(
+ ((u32)(data[0] & 0x0F) << 24) | ((u32)data[1] << 16) |
+ ((u32)data[2] << 8) | (u32)data[3], 27);
+ switch (bandwidth) {
+ case 1712000:
+ *offset /= 582;
+ break;
+ case 5000000:
+ case 6000000:
+ case 7000000:
+ case 8000000:
+ *offset *= (bandwidth / 1000000);
+ *offset /= 940;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s(): invalid bandwidth %d\n",
+ __func__, bandwidth);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int cxd2841er_get_carrier_offset_c(struct cxd2841er_priv *priv,
+ int *offset)
+{
+ u8 data[2];
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state != STATE_ACTIVE_TC) {
+ dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ if (priv->system != SYS_DVBC_ANNEX_A) {
+ dev_dbg(&priv->i2c->dev, "%s(): invalid delivery system %d\n",
+ __func__, priv->system);
+ return -EINVAL;
+ }
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x40);
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x15, data, sizeof(data));
+ *offset = div_s64(41000LL * sign_extend32((((u32)data[0] & 0x3f) << 8)
+ | (u32)data[1], 13), 16384);
+ return 0;
+}
+
+static int cxd2841er_read_packet_errors_t(
+ struct cxd2841er_priv *priv, u32 *penum)
+{
+ u8 data[3];
+
+ *penum = 0;
+ if (priv->state != STATE_ACTIVE_TC) {
+ dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+ cxd2841er_read_regs(priv, I2C_SLVT, 0xea, data, sizeof(data));
+ if (data[2] & 0x01)
+ *penum = ((u32)data[0] << 8) | (u32)data[1];
+ return 0;
+}
+
+static int cxd2841er_read_packet_errors_t2(
+ struct cxd2841er_priv *priv, u32 *penum)
+{
+ u8 data[3];
+
+ *penum = 0;
+ if (priv->state != STATE_ACTIVE_TC) {
+ dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x24);
+ cxd2841er_read_regs(priv, I2C_SLVT, 0xfd, data, sizeof(data));
+ if (data[0] & 0x01)
+ *penum = ((u32)data[1] << 8) | (u32)data[2];
+ return 0;
+}
+
+static u32 cxd2841er_mon_read_ber_s(struct cxd2841er_priv *priv)
+{
+ u8 data[11];
+ u32 bit_error, bit_count;
+ u32 temp_q, temp_r;
+
+ /* Set SLV-T Bank : 0xA0 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa0);
+ /*
+ * slave Bank Addr Bit Signal name
+ * <SLV-T> A0h 35h [0] IFVBER_VALID
+ * <SLV-T> A0h 36h [5:0] IFVBER_BITERR[21:16]
+ * <SLV-T> A0h 37h [7:0] IFVBER_BITERR[15:8]
+ * <SLV-T> A0h 38h [7:0] IFVBER_BITERR[7:0]
+ * <SLV-T> A0h 3Dh [5:0] IFVBER_BITNUM[21:16]
+ * <SLV-T> A0h 3Eh [7:0] IFVBER_BITNUM[15:8]
+ * <SLV-T> A0h 3Fh [7:0] IFVBER_BITNUM[7:0]
+ */
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x35, data, 11);
+ if (data[0] & 0x01) {
+ bit_error = ((u32)(data[1] & 0x3F) << 16) |
+ ((u32)(data[2] & 0xFF) << 8) |
+ (u32)(data[3] & 0xFF);
+ bit_count = ((u32)(data[8] & 0x3F) << 16) |
+ ((u32)(data[9] & 0xFF) << 8) |
+ (u32)(data[10] & 0xFF);
+ /*
+ * BER = bitError / bitCount
+ * = (bitError * 10^7) / bitCount
+ * = ((bitError * 625 * 125 * 128) / bitCount
+ */
+ if ((bit_count == 0) || (bit_error > bit_count)) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): invalid bit_error %d, bit_count %d\n",
+ __func__, bit_error, bit_count);
+ return 0;
+ }
+ temp_q = div_u64_rem(10000000ULL * bit_error,
+ bit_count, &temp_r);
+ if (bit_count != 1 && temp_r >= bit_count / 2)
+ temp_q++;
+ return temp_q;
+ }
+ dev_dbg(&priv->i2c->dev, "%s(): no data available\n", __func__);
+ return 0;
+}
+
+
+static u32 cxd2841er_mon_read_ber_s2(struct cxd2841er_priv *priv)
+{
+ u8 data[5];
+ u32 bit_error, period;
+ u32 temp_q, temp_r;
+ u32 result = 0;
+
+ /* Set SLV-T Bank : 0xB2 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xb2);
+ /*
+ * slave Bank Addr Bit Signal name
+ * <SLV-T> B2h 30h [0] IFLBER_VALID
+ * <SLV-T> B2h 31h [3:0] IFLBER_BITERR[27:24]
+ * <SLV-T> B2h 32h [7:0] IFLBER_BITERR[23:16]
+ * <SLV-T> B2h 33h [7:0] IFLBER_BITERR[15:8]
+ * <SLV-T> B2h 34h [7:0] IFLBER_BITERR[7:0]
+ */
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x30, data, 5);
+ if (data[0] & 0x01) {
+ /* Bit error count */
+ bit_error = ((u32)(data[1] & 0x0F) << 24) |
+ ((u32)(data[2] & 0xFF) << 16) |
+ ((u32)(data[3] & 0xFF) << 8) |
+ (u32)(data[4] & 0xFF);
+
+ /* Set SLV-T Bank : 0xA0 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa0);
+ cxd2841er_read_reg(priv, I2C_SLVT, 0x7a, data);
+ /* Measurement period */
+ period = (u32)(1 << (data[0] & 0x0F));
+ if (period == 0) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): period is 0\n", __func__);
+ return 0;
+ }
+ if (bit_error > (period * 64800)) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): invalid bit_err 0x%x period 0x%x\n",
+ __func__, bit_error, period);
+ return 0;
+ }
+ /*
+ * BER = bitError / (period * 64800)
+ * = (bitError * 10^7) / (period * 64800)
+ * = (bitError * 10^5) / (period * 648)
+ * = (bitError * 12500) / (period * 81)
+ * = (bitError * 10) * 1250 / (period * 81)
+ */
+ temp_q = div_u64_rem(12500ULL * bit_error,
+ period * 81, &temp_r);
+ if (temp_r >= period * 40)
+ temp_q++;
+ result = temp_q;
+ } else {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): no data available\n", __func__);
+ }
+ return result;
+}
+
+static int cxd2841er_read_ber_t2(struct cxd2841er_priv *priv, u32 *ber)
+{
+ u8 data[4];
+ u32 div, q, r;
+ u32 bit_err, period_exp, n_ldpc;
+
+ *ber = 0;
+ if (priv->state != STATE_ACTIVE_TC) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): invalid state %d\n", __func__, priv->state);
+ return -EINVAL;
+ }
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x20);
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x39, data, sizeof(data));
+ if (!(data[0] & 0x10)) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): no valid BER data\n", __func__);
+ return 0;
+ }
+ bit_err = ((u32)(data[0] & 0x0f) << 24) |
+ ((u32)data[1] << 16) |
+ ((u32)data[2] << 8) |
+ (u32)data[3];
+ cxd2841er_read_reg(priv, I2C_SLVT, 0x6f, data);
+ period_exp = data[0] & 0x0f;
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x22);
+ cxd2841er_read_reg(priv, I2C_SLVT, 0x5e, data);
+ n_ldpc = ((data[0] & 0x03) == 0 ? 16200 : 64800);
+ if (bit_err > ((1U << period_exp) * n_ldpc)) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): invalid BER value\n", __func__);
+ return -EINVAL;
+ }
+ if (period_exp >= 4) {
+ div = (1U << (period_exp - 4)) * (n_ldpc / 200);
+ q = div_u64_rem(3125ULL * bit_err, div, &r);
+ } else {
+ div = (1U << period_exp) * (n_ldpc / 200);
+ q = div_u64_rem(50000ULL * bit_err, div, &r);
+ }
+ *ber = (r >= div / 2) ? q + 1 : q;
+ return 0;
+}
+
+static int cxd2841er_read_ber_t(struct cxd2841er_priv *priv, u32 *ber)
+{
+ u8 data[2];
+ u32 div, q, r;
+ u32 bit_err, period;
+
+ *ber = 0;
+ if (priv->state != STATE_ACTIVE_TC) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): invalid state %d\n", __func__, priv->state);
+ return -EINVAL;
+ }
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+ cxd2841er_read_reg(priv, I2C_SLVT, 0x39, data);
+ if (!(data[0] & 0x01)) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): no valid BER data\n", __func__);
+ return 0;
+ }
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x22, data, sizeof(data));
+ bit_err = ((u32)data[0] << 8) | (u32)data[1];
+ cxd2841er_read_reg(priv, I2C_SLVT, 0x6f, data);
+ period = ((data[0] & 0x07) == 0) ? 256 : (4096 << (data[0] & 0x07));
+ div = period / 128;
+ q = div_u64_rem(78125ULL * bit_err, div, &r);
+ *ber = (r >= div / 2) ? q + 1 : q;
+ return 0;
+}
+
+static u32 cxd2841er_dvbs_read_snr(struct cxd2841er_priv *priv, u8 delsys)
+{
+ u8 data[3];
+ u32 res = 0, value;
+ int min_index, max_index, index;
+ static const struct cxd2841er_cnr_data *cn_data;
+
+ /* Set SLV-T Bank : 0xA1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa1);
+ /*
+ * slave Bank Addr Bit Signal name
+ * <SLV-T> A1h 10h [0] ICPM_QUICKRDY
+ * <SLV-T> A1h 11h [4:0] ICPM_QUICKCNDT[12:8]
+ * <SLV-T> A1h 12h [7:0] ICPM_QUICKCNDT[7:0]
+ */
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x10, data, 3);
+ if (data[0] & 0x01) {
+ value = ((u32)(data[1] & 0x1F) << 8) | (u32)(data[2] & 0xFF);
+ min_index = 0;
+ if (delsys == SYS_DVBS) {
+ cn_data = s_cn_data;
+ max_index = sizeof(s_cn_data) /
+ sizeof(s_cn_data[0]) - 1;
+ } else {
+ cn_data = s2_cn_data;
+ max_index = sizeof(s2_cn_data) /
+ sizeof(s2_cn_data[0]) - 1;
+ }
+ if (value >= cn_data[min_index].value) {
+ res = cn_data[min_index].cnr_x1000;
+ goto done;
+ }
+ if (value <= cn_data[max_index].value) {
+ res = cn_data[max_index].cnr_x1000;
+ goto done;
+ }
+ while ((max_index - min_index) > 1) {
+ index = (max_index + min_index) / 2;
+ if (value == cn_data[index].value) {
+ res = cn_data[index].cnr_x1000;
+ goto done;
+ } else if (value > cn_data[index].value)
+ max_index = index;
+ else
+ min_index = index;
+ if ((max_index - min_index) <= 1) {
+ if (value == cn_data[max_index].value) {
+ res = cn_data[max_index].cnr_x1000;
+ goto done;
+ } else {
+ res = cn_data[min_index].cnr_x1000;
+ goto done;
+ }
+ }
+ }
+ } else {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): no data available\n", __func__);
+ }
+done:
+ return res;
+}
+
+static int cxd2841er_read_snr_t(struct cxd2841er_priv *priv, u32 *snr)
+{
+ u32 reg;
+ u8 data[2];
+
+ *snr = 0;
+ if (priv->state != STATE_ACTIVE_TC) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): invalid state %d\n", __func__, priv->state);
+ return -EINVAL;
+ }
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x28, data, sizeof(data));
+ reg = ((u32)data[0] << 8) | (u32)data[1];
+ if (reg == 0) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): reg value out of range\n", __func__);
+ return 0;
+ }
+ if (reg > 4996)
+ reg = 4996;
+ *snr = 10000 * ((intlog10(reg) - intlog10(5350 - reg)) >> 24) + 28500;
+ return 0;
+}
+
+static int cxd2841er_read_snr_t2(struct cxd2841er_priv *priv, u32 *snr)
+{
+ u32 reg;
+ u8 data[2];
+
+ *snr = 0;
+ if (priv->state != STATE_ACTIVE_TC) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): invalid state %d\n", __func__, priv->state);
+ return -EINVAL;
+ }
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x20);
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x28, data, sizeof(data));
+ reg = ((u32)data[0] << 8) | (u32)data[1];
+ if (reg == 0) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): reg value out of range\n", __func__);
+ return 0;
+ }
+ if (reg > 10876)
+ reg = 10876;
+ *snr = 10000 * ((intlog10(reg) -
+ intlog10(12600 - reg)) >> 24) + 32000;
+ return 0;
+}
+
+static u16 cxd2841er_read_agc_gain_t_t2(struct cxd2841er_priv *priv,
+ u8 delsys)
+{
+ u8 data[2];
+
+ cxd2841er_write_reg(
+ priv, I2C_SLVT, 0x00, (delsys == SYS_DVBT ? 0x10 : 0x20));
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x26, data, 2);
+ return ((((u16)data[0] & 0x0F) << 8) | (u16)(data[1] & 0xFF)) << 4;
+}
+
+static u16 cxd2841er_read_agc_gain_s(struct cxd2841er_priv *priv)
+{
+ u8 data[2];
+
+ /* Set SLV-T Bank : 0xA0 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa0);
+ /*
+ * slave Bank Addr Bit Signal name
+ * <SLV-T> A0h 1Fh [4:0] IRFAGC_GAIN[12:8]
+ * <SLV-T> A0h 20h [7:0] IRFAGC_GAIN[7:0]
+ */
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x1f, data, 2);
+ return ((((u16)data[0] & 0x1F) << 8) | (u16)(data[1] & 0xFF)) << 3;
+}
+
+static int cxd2841er_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ *ber = 0;
+ switch (p->delivery_system) {
+ case SYS_DVBS:
+ *ber = cxd2841er_mon_read_ber_s(priv);
+ break;
+ case SYS_DVBS2:
+ *ber = cxd2841er_mon_read_ber_s2(priv);
+ break;
+ case SYS_DVBT:
+ return cxd2841er_read_ber_t(priv, ber);
+ case SYS_DVBT2:
+ return cxd2841er_read_ber_t2(priv, ber);
+ default:
+ *ber = 0;
+ break;
+ }
+ return 0;
+}
+
+static int cxd2841er_read_signal_strength(struct dvb_frontend *fe,
+ u16 *strength)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ switch (p->delivery_system) {
+ case SYS_DVBT:
+ case SYS_DVBT2:
+ *strength = 65535 - cxd2841er_read_agc_gain_t_t2(
+ priv, p->delivery_system);
+ break;
+ case SYS_DVBS:
+ case SYS_DVBS2:
+ *strength = 65535 - cxd2841er_read_agc_gain_s(priv);
+ break;
+ default:
+ *strength = 0;
+ break;
+ }
+ return 0;
+}
+
+static int cxd2841er_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ u32 tmp = 0;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ switch (p->delivery_system) {
+ case SYS_DVBT:
+ cxd2841er_read_snr_t(priv, &tmp);
+ break;
+ case SYS_DVBT2:
+ cxd2841er_read_snr_t2(priv, &tmp);
+ break;
+ case SYS_DVBS:
+ case SYS_DVBS2:
+ tmp = cxd2841er_dvbs_read_snr(priv, p->delivery_system);
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s(): unknown delivery system %d\n",
+ __func__, p->delivery_system);
+ break;
+ }
+ *snr = tmp & 0xffff;
+ return 0;
+}
+
+static int cxd2841er_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ switch (p->delivery_system) {
+ case SYS_DVBT:
+ cxd2841er_read_packet_errors_t(priv, ucblocks);
+ break;
+ case SYS_DVBT2:
+ cxd2841er_read_packet_errors_t2(priv, ucblocks);
+ break;
+ default:
+ *ucblocks = 0;
+ break;
+ }
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ return 0;
+}
+
+static int cxd2841er_dvbt2_set_profile(
+ struct cxd2841er_priv *priv, enum cxd2841er_dvbt2_profile_t profile)
+{
+ u8 tune_mode;
+ u8 seq_not2d_time;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ switch (profile) {
+ case DVBT2_PROFILE_BASE:
+ tune_mode = 0x01;
+ seq_not2d_time = 12;
+ break;
+ case DVBT2_PROFILE_LITE:
+ tune_mode = 0x05;
+ seq_not2d_time = 40;
+ break;
+ case DVBT2_PROFILE_ANY:
+ tune_mode = 0x00;
+ seq_not2d_time = 40;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* Set SLV-T Bank : 0x2E */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x2e);
+ /* Set profile and tune mode */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x10, tune_mode, 0x07);
+ /* Set SLV-T Bank : 0x2B */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x2b);
+ /* Set early unlock detection time */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x9d, seq_not2d_time);
+ return 0;
+}
+
+static int cxd2841er_dvbt2_set_plp_config(struct cxd2841er_priv *priv,
+ u8 is_auto, u8 plp_id)
+{
+ if (is_auto) {
+ dev_dbg(&priv->i2c->dev,
+ "%s() using auto PLP selection\n", __func__);
+ } else {
+ dev_dbg(&priv->i2c->dev,
+ "%s() using manual PLP selection, ID %d\n",
+ __func__, plp_id);
+ }
+ /* Set SLV-T Bank : 0x23 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x23);
+ if (!is_auto) {
+ /* Manual PLP selection mode. Set the data PLP Id. */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0xaf, plp_id);
+ }
+ /* Auto PLP select (Scanning mode = 0x00). Data PLP select = 0x01. */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0xad, (is_auto ? 0x00 : 0x01));
+ return 0;
+}
+
+static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv,
+ u32 bandwidth)
+{
+ u32 iffreq;
+ u8 b20_9f[5];
+ u8 b10_a6[14];
+ u8 b10_b6[3];
+ u8 b10_d7;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ switch (bandwidth) {
+ case 8000000:
+ /* bank 0x20, reg 0x9f */
+ b20_9f[0] = 0x11;
+ b20_9f[1] = 0xf0;
+ b20_9f[2] = 0x00;
+ b20_9f[3] = 0x00;
+ b20_9f[4] = 0x00;
+ /* bank 0x10, reg 0xa6 */
+ b10_a6[0] = 0x26;
+ b10_a6[1] = 0xaf;
+ b10_a6[2] = 0x06;
+ b10_a6[3] = 0xcd;
+ b10_a6[4] = 0x13;
+ b10_a6[5] = 0xbb;
+ b10_a6[6] = 0x28;
+ b10_a6[7] = 0xba;
+ b10_a6[8] = 0x23;
+ b10_a6[9] = 0xa9;
+ b10_a6[10] = 0x1f;
+ b10_a6[11] = 0xa8;
+ b10_a6[12] = 0x2c;
+ b10_a6[13] = 0xc8;
+ iffreq = MAKE_IFFREQ_CONFIG(4.80);
+ b10_d7 = 0x00;
+ break;
+ case 7000000:
+ /* bank 0x20, reg 0x9f */
+ b20_9f[0] = 0x14;
+ b20_9f[1] = 0x80;
+ b20_9f[2] = 0x00;
+ b20_9f[3] = 0x00;
+ b20_9f[4] = 0x00;
+ /* bank 0x10, reg 0xa6 */
+ b10_a6[0] = 0x2C;
+ b10_a6[1] = 0xBD;
+ b10_a6[2] = 0x02;
+ b10_a6[3] = 0xCF;
+ b10_a6[4] = 0x04;
+ b10_a6[5] = 0xF8;
+ b10_a6[6] = 0x23;
+ b10_a6[7] = 0xA6;
+ b10_a6[8] = 0x29;
+ b10_a6[9] = 0xB0;
+ b10_a6[10] = 0x26;
+ b10_a6[11] = 0xA9;
+ b10_a6[12] = 0x21;
+ b10_a6[13] = 0xA5;
+ iffreq = MAKE_IFFREQ_CONFIG(4.2);
+ b10_d7 = 0x02;
+ break;
+ case 6000000:
+ /* bank 0x20, reg 0x9f */
+ b20_9f[0] = 0x17;
+ b20_9f[1] = 0xEA;
+ b20_9f[2] = 0xAA;
+ b20_9f[3] = 0xAA;
+ b20_9f[4] = 0xAA;
+ /* bank 0x10, reg 0xa6 */
+ b10_a6[0] = 0x27;
+ b10_a6[1] = 0xA7;
+ b10_a6[2] = 0x28;
+ b10_a6[3] = 0xB3;
+ b10_a6[4] = 0x02;
+ b10_a6[5] = 0xF0;
+ b10_a6[6] = 0x01;
+ b10_a6[7] = 0xE8;
+ b10_a6[8] = 0x00;
+ b10_a6[9] = 0xCF;
+ b10_a6[10] = 0x00;
+ b10_a6[11] = 0xE6;
+ b10_a6[12] = 0x23;
+ b10_a6[13] = 0xA4;
+ iffreq = MAKE_IFFREQ_CONFIG(3.6);
+ b10_d7 = 0x04;
+ break;
+ case 5000000:
+ /* bank 0x20, reg 0x9f */
+ b20_9f[0] = 0x1C;
+ b20_9f[1] = 0xB3;
+ b20_9f[2] = 0x33;
+ b20_9f[3] = 0x33;
+ b20_9f[4] = 0x33;
+ /* bank 0x10, reg 0xa6 */
+ b10_a6[0] = 0x27;
+ b10_a6[1] = 0xA7;
+ b10_a6[2] = 0x28;
+ b10_a6[3] = 0xB3;
+ b10_a6[4] = 0x02;
+ b10_a6[5] = 0xF0;
+ b10_a6[6] = 0x01;
+ b10_a6[7] = 0xE8;
+ b10_a6[8] = 0x00;
+ b10_a6[9] = 0xCF;
+ b10_a6[10] = 0x00;
+ b10_a6[11] = 0xE6;
+ b10_a6[12] = 0x23;
+ b10_a6[13] = 0xA4;
+ iffreq = MAKE_IFFREQ_CONFIG(3.6);
+ b10_d7 = 0x06;
+ break;
+ case 1712000:
+ /* bank 0x20, reg 0x9f */
+ b20_9f[0] = 0x58;
+ b20_9f[1] = 0xE2;
+ b20_9f[2] = 0xAF;
+ b20_9f[3] = 0xE0;
+ b20_9f[4] = 0xBC;
+ /* bank 0x10, reg 0xa6 */
+ b10_a6[0] = 0x25;
+ b10_a6[1] = 0xA0;
+ b10_a6[2] = 0x36;
+ b10_a6[3] = 0x8D;
+ b10_a6[4] = 0x2E;
+ b10_a6[5] = 0x94;
+ b10_a6[6] = 0x28;
+ b10_a6[7] = 0x9B;
+ b10_a6[8] = 0x32;
+ b10_a6[9] = 0x90;
+ b10_a6[10] = 0x2C;
+ b10_a6[11] = 0x9D;
+ b10_a6[12] = 0x29;
+ b10_a6[13] = 0x99;
+ iffreq = MAKE_IFFREQ_CONFIG(3.5);
+ b10_d7 = 0x03;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* Set SLV-T Bank : 0x20 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x20);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0x9f, b20_9f, sizeof(b20_9f));
+ /* Set SLV-T Bank : 0x27 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x27);
+ cxd2841er_set_reg_bits(
+ priv, I2C_SLVT, 0x7a,
+ (bandwidth == 1712000 ? 0x03 : 0x00), 0x0f);
+ /* Set SLV-T Bank : 0x10 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+ /* Group delay equaliser sett. for ASCOT2E */
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xa6, b10_a6, sizeof(b10_a6));
+ /* <IF freq setting> */
+ b10_b6[0] = (u8) ((iffreq >> 16) & 0xff);
+ b10_b6[1] = (u8)((iffreq >> 8) & 0xff);
+ b10_b6[2] = (u8)(iffreq & 0xff);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xb6, b10_b6, sizeof(b10_b6));
+ /* System bandwidth setting */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xd7, b10_d7, 0x07);
+ return 0;
+}
+
+static int cxd2841er_sleep_tc_to_active_t_band(
+ struct cxd2841er_priv *priv, u32 bandwidth)
+{
+ u8 b13_9c[2] = { 0x01, 0x14 };
+ u8 bw8mhz_b10_9f[] = { 0x11, 0xF0, 0x00, 0x00, 0x00 };
+ u8 bw8mhz_b10_a6[] = { 0x26, 0xAF, 0x06, 0xCD, 0x13, 0xBB,
+ 0x28, 0xBA, 0x23, 0xA9, 0x1F, 0xA8, 0x2C, 0xC8 };
+ u8 bw8mhz_b10_d9[] = { 0x01, 0xE0 };
+ u8 bw8mhz_b17_38[] = { 0x01, 0x02 };
+ u8 bw7mhz_b10_9f[] = { 0x14, 0x80, 0x00, 0x00, 0x00 };
+ u8 bw7mhz_b10_a6[] = { 0x2C, 0xBD, 0x02, 0xCF, 0x04, 0xF8,
+ 0x23, 0xA6, 0x29, 0xB0, 0x26, 0xA9, 0x21, 0xA5 };
+ u8 bw7mhz_b10_d9[] = { 0x12, 0xF8 };
+ u8 bw7mhz_b17_38[] = { 0x00, 0x03 };
+ u8 bw6mhz_b10_9f[] = { 0x17, 0xEA, 0xAA, 0xAA, 0xAA };
+ u8 bw6mhz_b10_a6[] = { 0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0,
+ 0x01, 0xE8, 0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4 };
+ u8 bw6mhz_b10_d9[] = { 0x1F, 0xDC };
+ u8 bw6mhz_b17_38[] = { 0x00, 0x03 };
+ u8 bw5mhz_b10_9f[] = { 0x1C, 0xB3, 0x33, 0x33, 0x33 };
+ u8 bw5mhz_b10_a6[] = { 0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0,
+ 0x01, 0xE8, 0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4 };
+ u8 bw5mhz_b10_d9[] = { 0x26, 0x3C };
+ u8 bw5mhz_b17_38[] = { 0x00, 0x03 };
+ u8 b10_b6[3];
+ u8 d7val;
+ u32 iffreq;
+ u8 *b10_9f;
+ u8 *b10_a6;
+ u8 *b10_d9;
+ u8 *b17_38;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x13);
+ /* Echo performance optimization setting */
+ cxd2841er_write_regs(priv, I2C_SLVT, 0x9c, b13_9c, sizeof(b13_9c));
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+
+ switch (bandwidth) {
+ case 8000000:
+ b10_9f = bw8mhz_b10_9f;
+ b10_a6 = bw8mhz_b10_a6;
+ b10_d9 = bw8mhz_b10_d9;
+ b17_38 = bw8mhz_b17_38;
+ d7val = 0;
+ iffreq = MAKE_IFFREQ_CONFIG(4.80);
+ break;
+ case 7000000:
+ b10_9f = bw7mhz_b10_9f;
+ b10_a6 = bw7mhz_b10_a6;
+ b10_d9 = bw7mhz_b10_d9;
+ b17_38 = bw7mhz_b17_38;
+ d7val = 2;
+ iffreq = MAKE_IFFREQ_CONFIG(4.20);
+ break;
+ case 6000000:
+ b10_9f = bw6mhz_b10_9f;
+ b10_a6 = bw6mhz_b10_a6;
+ b10_d9 = bw6mhz_b10_d9;
+ b17_38 = bw6mhz_b17_38;
+ d7val = 4;
+ iffreq = MAKE_IFFREQ_CONFIG(3.60);
+ break;
+ case 5000000:
+ b10_9f = bw5mhz_b10_9f;
+ b10_a6 = bw5mhz_b10_a6;
+ b10_d9 = bw5mhz_b10_d9;
+ b17_38 = bw5mhz_b17_38;
+ d7val = 6;
+ iffreq = MAKE_IFFREQ_CONFIG(3.60);
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s(): invalid bandwidth %d\n",
+ __func__, bandwidth);
+ return -EINVAL;
+ }
+ /* <IF freq setting> */
+ b10_b6[0] = (u8) ((iffreq >> 16) & 0xff);
+ b10_b6[1] = (u8)((iffreq >> 8) & 0xff);
+ b10_b6[2] = (u8)(iffreq & 0xff);
+ cxd2841er_write_regs(
+ priv, I2C_SLVT, 0x9f, b10_9f, sizeof(bw8mhz_b10_9f));
+ cxd2841er_write_regs(
+ priv, I2C_SLVT, 0xa6, b10_a6, sizeof(bw8mhz_b10_a6));
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xb6, b10_b6, sizeof(b10_b6));
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xd7, d7val, 0x7);
+ cxd2841er_write_regs(
+ priv, I2C_SLVT, 0xd9, b10_d9, sizeof(bw8mhz_b10_d9));
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x17);
+ cxd2841er_write_regs(
+ priv, I2C_SLVT, 0x38, b17_38, sizeof(bw8mhz_b17_38));
+ return 0;
+}
+
+static int cxd2841er_sleep_tc_to_active_c_band(struct cxd2841er_priv *priv,
+ u32 bandwidth)
+{
+ u8 bw7_8mhz_b10_a6[] = {
+ 0x2D, 0xC7, 0x04, 0xF4, 0x07, 0xC5, 0x2A, 0xB8,
+ 0x27, 0x9E, 0x27, 0xA4, 0x29, 0xAB };
+ u8 bw6mhz_b10_a6[] = {
+ 0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8,
+ 0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4 };
+ u8 b10_b6[3];
+ u32 iffreq;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+ switch (bandwidth) {
+ case 8000000:
+ case 7000000:
+ cxd2841er_write_regs(
+ priv, I2C_SLVT, 0xa6,
+ bw7_8mhz_b10_a6, sizeof(bw7_8mhz_b10_a6));
+ iffreq = MAKE_IFFREQ_CONFIG(4.9);
+ break;
+ case 6000000:
+ cxd2841er_write_regs(
+ priv, I2C_SLVT, 0xa6,
+ bw6mhz_b10_a6, sizeof(bw6mhz_b10_a6));
+ iffreq = MAKE_IFFREQ_CONFIG(3.7);
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s(): unsupported bandwidth %d\n",
+ __func__, bandwidth);
+ return -EINVAL;
+ }
+ /* <IF freq setting> */
+ b10_b6[0] = (u8) ((iffreq >> 16) & 0xff);
+ b10_b6[1] = (u8)((iffreq >> 8) & 0xff);
+ b10_b6[2] = (u8)(iffreq & 0xff);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xb6, b10_b6, sizeof(b10_b6));
+ /* Set SLV-T Bank : 0x11 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x11);
+ switch (bandwidth) {
+ case 8000000:
+ case 7000000:
+ cxd2841er_set_reg_bits(
+ priv, I2C_SLVT, 0xa3, 0x00, 0x1f);
+ break;
+ case 6000000:
+ cxd2841er_set_reg_bits(
+ priv, I2C_SLVT, 0xa3, 0x14, 0x1f);
+ break;
+ }
+ /* Set SLV-T Bank : 0x40 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x40);
+ switch (bandwidth) {
+ case 8000000:
+ cxd2841er_set_reg_bits(
+ priv, I2C_SLVT, 0x26, 0x0b, 0x0f);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x27, 0x3e);
+ break;
+ case 7000000:
+ cxd2841er_set_reg_bits(
+ priv, I2C_SLVT, 0x26, 0x09, 0x0f);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x27, 0xd6);
+ break;
+ case 6000000:
+ cxd2841er_set_reg_bits(
+ priv, I2C_SLVT, 0x26, 0x08, 0x0f);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x27, 0x6e);
+ break;
+ }
+ return 0;
+}
+
+static int cxd2841er_sleep_tc_to_active_t(struct cxd2841er_priv *priv,
+ u32 bandwidth)
+{
+ u8 data[2] = { 0x09, 0x54 };
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ cxd2841er_set_ts_clock_mode(priv, SYS_DVBT);
+ /* Set SLV-X Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+ /* Set demod mode */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x17, 0x01);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* Enable demod clock */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x01);
+ /* Disable RF level monitor */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x2f, 0x00);
+ /* Enable ADC clock */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x30, 0x00);
+ /* Enable ADC 1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x41, 0x1a);
+ /* xtal freq 20.5MHz */
+ cxd2841er_write_regs(priv, I2C_SLVT, 0x43, data, 2);
+ /* Enable ADC 4 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x00);
+ /* Set SLV-T Bank : 0x10 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+ /* IFAGC gain settings */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xd2, 0x0c, 0x1f);
+ /* Set SLV-T Bank : 0x11 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x11);
+ /* BBAGC TARGET level setting */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x6a, 0x50);
+ /* Set SLV-T Bank : 0x10 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+ /* ASCOT setting ON */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xa5, 0x01, 0x01);
+ /* Set SLV-T Bank : 0x18 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x18);
+ /* Pre-RS BER moniter setting */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x36, 0x40, 0x07);
+ /* FEC Auto Recovery setting */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x30, 0x01, 0x01);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x31, 0x01, 0x01);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* TSIF setting */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xce, 0x01, 0x01);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xcf, 0x01, 0x01);
+ cxd2841er_sleep_tc_to_active_t_band(priv, bandwidth);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* Disable HiZ Setting 1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x28);
+ /* Disable HiZ Setting 2 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x81, 0x00);
+ priv->state = STATE_ACTIVE_TC;
+ return 0;
+}
+
+static int cxd2841er_sleep_tc_to_active_t2(struct cxd2841er_priv *priv,
+ u32 bandwidth)
+{
+ u8 data[2] = { 0x09, 0x54 };
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ cxd2841er_set_ts_clock_mode(priv, SYS_DVBT2);
+ /* Set SLV-X Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+ /* Set demod mode */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x17, 0x02);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* Enable demod clock */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x01);
+ /* Disable RF level monitor */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x2f, 0x00);
+ /* Enable ADC clock */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x30, 0x00);
+ /* Enable ADC 1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x41, 0x1a);
+ /* xtal freq 20.5MHz */
+ cxd2841er_write_regs(priv, I2C_SLVT, 0x43, data, 2);
+ /* Enable ADC 4 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x00);
+ /* Set SLV-T Bank : 0x10 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+ /* IFAGC gain settings */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xd2, 0x0c, 0x1f);
+ /* Set SLV-T Bank : 0x11 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x11);
+ /* BBAGC TARGET level setting */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x6a, 0x50);
+ /* Set SLV-T Bank : 0x10 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+ /* ASCOT setting ON */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xa5, 0x01, 0x01);
+ /* Set SLV-T Bank : 0x20 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x20);
+ /* Acquisition optimization setting */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x8b, 0x3c);
+ /* Set SLV-T Bank : 0x2b */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x2b);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x76, 0x20, 0x70);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* TSIF setting */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xce, 0x01, 0x01);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xcf, 0x01, 0x01);
+ /* DVB-T2 initial setting */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x13);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x83, 0x10);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x86, 0x34);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x9e, 0x09, 0x0f);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x9f, 0xd8);
+ /* Set SLV-T Bank : 0x2a */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x2a);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x38, 0x04, 0x0f);
+ /* Set SLV-T Bank : 0x2b */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x2b);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x11, 0x20, 0x3f);
+
+ cxd2841er_sleep_tc_to_active_t2_band(priv, bandwidth);
+
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* Disable HiZ Setting 1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x28);
+ /* Disable HiZ Setting 2 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x81, 0x00);
+ priv->state = STATE_ACTIVE_TC;
+ return 0;
+}
+
+static int cxd2841er_sleep_tc_to_active_c(struct cxd2841er_priv *priv,
+ u32 bandwidth)
+{
+ u8 data[2] = { 0x09, 0x54 };
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ cxd2841er_set_ts_clock_mode(priv, SYS_DVBC_ANNEX_A);
+ /* Set SLV-X Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+ /* Set demod mode */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x17, 0x04);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* Enable demod clock */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x01);
+ /* Disable RF level monitor */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x2f, 0x00);
+ /* Enable ADC clock */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x30, 0x00);
+ /* Enable ADC 1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x41, 0x1a);
+ /* xtal freq 20.5MHz */
+ cxd2841er_write_regs(priv, I2C_SLVT, 0x43, data, 2);
+ /* Enable ADC 4 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x00);
+ /* Set SLV-T Bank : 0x10 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+ /* IFAGC gain settings */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xd2, 0x09, 0x1f);
+ /* Set SLV-T Bank : 0x11 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x11);
+ /* BBAGC TARGET level setting */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x6a, 0x48);
+ /* Set SLV-T Bank : 0x10 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+ /* ASCOT setting ON */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xa5, 0x01, 0x01);
+ /* Set SLV-T Bank : 0x40 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x40);
+ /* Demod setting */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xc3, 0x00, 0x04);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* TSIF setting */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xce, 0x01, 0x01);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xcf, 0x01, 0x01);
+
+ cxd2841er_sleep_tc_to_active_c_band(priv, 8000000);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* Disable HiZ Setting 1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x28);
+ /* Disable HiZ Setting 2 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x81, 0x00);
+ priv->state = STATE_ACTIVE_TC;
+ return 0;
+}
+
+static int cxd2841er_get_frontend(struct dvb_frontend *fe)
+{
+ enum fe_status status = 0;
+ u16 strength = 0, snr = 0;
+ u32 errors = 0, ber = 0;
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state == STATE_ACTIVE_S)
+ cxd2841er_read_status_s(fe, &status);
+ else if (priv->state == STATE_ACTIVE_TC)
+ cxd2841er_read_status_tc(fe, &status);
+
+ if (status & FE_HAS_LOCK) {
+ cxd2841er_read_signal_strength(fe, &strength);
+ p->strength.len = 1;
+ p->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ p->strength.stat[0].uvalue = strength;
+ cxd2841er_read_snr(fe, &snr);
+ p->cnr.len = 1;
+ p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ p->cnr.stat[0].svalue = snr;
+ cxd2841er_read_ucblocks(fe, &errors);
+ p->block_error.len = 1;
+ p->block_error.stat[0].scale = FE_SCALE_COUNTER;
+ p->block_error.stat[0].uvalue = errors;
+ cxd2841er_read_ber(fe, &ber);
+ p->post_bit_error.len = 1;
+ p->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ p->post_bit_error.stat[0].uvalue = ber;
+ } else {
+ p->strength.len = 1;
+ p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->cnr.len = 1;
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->block_error.len = 1;
+ p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->post_bit_error.len = 1;
+ p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+ return 0;
+}
+
+static int cxd2841er_set_frontend_s(struct dvb_frontend *fe)
+{
+ int ret = 0, i, timeout, carr_offset;
+ enum fe_status status;
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ u32 symbol_rate = p->symbol_rate/1000;
+
+ dev_dbg(&priv->i2c->dev, "%s(): %s frequency=%d symbol_rate=%d\n",
+ __func__,
+ (p->delivery_system == SYS_DVBS ? "DVB-S" : "DVB-S2"),
+ p->frequency, symbol_rate);
+ switch (priv->state) {
+ case STATE_SLEEP_S:
+ ret = cxd2841er_sleep_s_to_active_s(
+ priv, p->delivery_system, symbol_rate);
+ break;
+ case STATE_ACTIVE_S:
+ ret = cxd2841er_retune_active(priv, p);
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ ret = -EINVAL;
+ goto done;
+ }
+ if (ret) {
+ dev_dbg(&priv->i2c->dev, "%s(): tune failed\n", __func__);
+ goto done;
+ }
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ if (fe->ops.tuner_ops.set_params)
+ fe->ops.tuner_ops.set_params(fe);
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ cxd2841er_tune_done(priv);
+ timeout = ((3000000 + (symbol_rate - 1)) / symbol_rate) + 150;
+ for (i = 0; i < timeout / CXD2841ER_DVBS_POLLING_INVL; i++) {
+ usleep_range(CXD2841ER_DVBS_POLLING_INVL*1000,
+ (CXD2841ER_DVBS_POLLING_INVL + 2) * 1000);
+ cxd2841er_read_status_s(fe, &status);
+ if (status & FE_HAS_LOCK)
+ break;
+ }
+ if (status & FE_HAS_LOCK) {
+ if (cxd2841er_get_carrier_offset_s_s2(
+ priv, &carr_offset)) {
+ ret = -EINVAL;
+ goto done;
+ }
+ dev_dbg(&priv->i2c->dev, "%s(): carrier_offset=%d\n",
+ __func__, carr_offset);
+ }
+done:
+ return ret;
+}
+
+static int cxd2841er_set_frontend_tc(struct dvb_frontend *fe)
+{
+ int ret = 0, timeout;
+ enum fe_status status;
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (p->delivery_system == SYS_DVBT) {
+ priv->system = SYS_DVBT;
+ switch (priv->state) {
+ case STATE_SLEEP_TC:
+ ret = cxd2841er_sleep_tc_to_active_t(
+ priv, p->bandwidth_hz);
+ break;
+ case STATE_ACTIVE_TC:
+ ret = cxd2841er_retune_active(priv, p);
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ ret = -EINVAL;
+ }
+ } else if (p->delivery_system == SYS_DVBT2) {
+ priv->system = SYS_DVBT2;
+ cxd2841er_dvbt2_set_plp_config(priv,
+ (int)(p->stream_id > 255), p->stream_id);
+ cxd2841er_dvbt2_set_profile(priv, DVBT2_PROFILE_BASE);
+ switch (priv->state) {
+ case STATE_SLEEP_TC:
+ ret = cxd2841er_sleep_tc_to_active_t2(priv,
+ p->bandwidth_hz);
+ break;
+ case STATE_ACTIVE_TC:
+ ret = cxd2841er_retune_active(priv, p);
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ ret = -EINVAL;
+ }
+ } else if (p->delivery_system == SYS_DVBC_ANNEX_A ||
+ p->delivery_system == SYS_DVBC_ANNEX_C) {
+ priv->system = SYS_DVBC_ANNEX_A;
+ switch (priv->state) {
+ case STATE_SLEEP_TC:
+ ret = cxd2841er_sleep_tc_to_active_c(
+ priv, p->bandwidth_hz);
+ break;
+ case STATE_ACTIVE_TC:
+ ret = cxd2841er_retune_active(priv, p);
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ ret = -EINVAL;
+ }
+ } else {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): invalid delivery system %d\n",
+ __func__, p->delivery_system);
+ ret = -EINVAL;
+ }
+ if (ret)
+ goto done;
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ if (fe->ops.tuner_ops.set_params)
+ fe->ops.tuner_ops.set_params(fe);
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ cxd2841er_tune_done(priv);
+ timeout = 2500;
+ while (timeout > 0) {
+ ret = cxd2841er_read_status_tc(fe, &status);
+ if (ret)
+ goto done;
+ if (status & FE_HAS_LOCK)
+ break;
+ msleep(20);
+ timeout -= 20;
+ }
+ if (timeout < 0)
+ dev_dbg(&priv->i2c->dev,
+ "%s(): LOCK wait timeout\n", __func__);
+done:
+ return ret;
+}
+
+static int cxd2841er_tune_s(struct dvb_frontend *fe,
+ bool re_tune,
+ unsigned int mode_flags,
+ unsigned int *delay,
+ enum fe_status *status)
+{
+ int ret, carrier_offset;
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+ dev_dbg(&priv->i2c->dev, "%s() re_tune=%d\n", __func__, re_tune);
+ if (re_tune) {
+ ret = cxd2841er_set_frontend_s(fe);
+ if (ret)
+ return ret;
+ cxd2841er_read_status_s(fe, status);
+ if (*status & FE_HAS_LOCK) {
+ if (cxd2841er_get_carrier_offset_s_s2(
+ priv, &carrier_offset))
+ return -EINVAL;
+ p->frequency += carrier_offset;
+ ret = cxd2841er_set_frontend_s(fe);
+ if (ret)
+ return ret;
+ }
+ }
+ *delay = HZ / 5;
+ return cxd2841er_read_status_s(fe, status);
+}
+
+static int cxd2841er_tune_tc(struct dvb_frontend *fe,
+ bool re_tune,
+ unsigned int mode_flags,
+ unsigned int *delay,
+ enum fe_status *status)
+{
+ int ret, carrier_offset;
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+ dev_dbg(&priv->i2c->dev, "%s(): re_tune %d\n", __func__, re_tune);
+ if (re_tune) {
+ ret = cxd2841er_set_frontend_tc(fe);
+ if (ret)
+ return ret;
+ cxd2841er_read_status_tc(fe, status);
+ if (*status & FE_HAS_LOCK) {
+ switch (priv->system) {
+ case SYS_DVBT:
+ case SYS_DVBT2:
+ ret = cxd2841er_get_carrier_offset_t2(
+ priv, p->bandwidth_hz,
+ &carrier_offset);
+ break;
+ case SYS_DVBC_ANNEX_A:
+ ret = cxd2841er_get_carrier_offset_c(
+ priv, &carrier_offset);
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev,
+ "%s(): invalid delivery system %d\n",
+ __func__, priv->system);
+ return -EINVAL;
+ }
+ if (ret)
+ return ret;
+ dev_dbg(&priv->i2c->dev, "%s(): carrier offset %d\n",
+ __func__, carrier_offset);
+ p->frequency += carrier_offset;
+ ret = cxd2841er_set_frontend_tc(fe);
+ if (ret)
+ return ret;
+ }
+ }
+ *delay = HZ / 5;
+ return cxd2841er_read_status_tc(fe, status);
+}
+
+static int cxd2841er_sleep_s(struct dvb_frontend *fe)
+{
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ cxd2841er_active_s_to_sleep_s(fe->demodulator_priv);
+ cxd2841er_sleep_s_to_shutdown(fe->demodulator_priv);
+ return 0;
+}
+
+static int cxd2841er_sleep_tc(struct dvb_frontend *fe)
+{
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state == STATE_ACTIVE_TC) {
+ switch (priv->system) {
+ case SYS_DVBT:
+ cxd2841er_active_t_to_sleep_tc(priv);
+ break;
+ case SYS_DVBT2:
+ cxd2841er_active_t2_to_sleep_tc(priv);
+ break;
+ case SYS_DVBC_ANNEX_A:
+ cxd2841er_active_c_to_sleep_tc(priv);
+ break;
+ default:
+ dev_warn(&priv->i2c->dev,
+ "%s(): unknown delivery system %d\n",
+ __func__, priv->system);
+ }
+ }
+ if (priv->state != STATE_SLEEP_TC) {
+ dev_err(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ cxd2841er_sleep_tc_to_shutdown(priv);
+ return 0;
+}
+
+static int cxd2841er_send_burst(struct dvb_frontend *fe,
+ enum fe_sec_mini_cmd burst)
+{
+ u8 data;
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s(): burst mode %s\n", __func__,
+ (burst == SEC_MINI_A ? "A" : "B"));
+ if (priv->state != STATE_SLEEP_S &&
+ priv->state != STATE_ACTIVE_S) {
+ dev_err(&priv->i2c->dev, "%s(): invalid demod state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ data = (burst == SEC_MINI_A ? 0 : 1);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xbb);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x34, 0x01);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x35, data);
+ return 0;
+}
+
+static int cxd2841er_set_tone(struct dvb_frontend *fe,
+ enum fe_sec_tone_mode tone)
+{
+ u8 data;
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s(): tone %s\n", __func__,
+ (tone == SEC_TONE_ON ? "On" : "Off"));
+ if (priv->state != STATE_SLEEP_S &&
+ priv->state != STATE_ACTIVE_S) {
+ dev_err(&priv->i2c->dev, "%s(): invalid demod state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ data = (tone == SEC_TONE_ON ? 1 : 0);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xbb);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x36, data);
+ return 0;
+}
+
+static int cxd2841er_send_diseqc_msg(struct dvb_frontend *fe,
+ struct dvb_diseqc_master_cmd *cmd)
+{
+ int i;
+ u8 data[12];
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+ if (priv->state != STATE_SLEEP_S &&
+ priv->state != STATE_ACTIVE_S) {
+ dev_err(&priv->i2c->dev, "%s(): invalid demod state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ dev_dbg(&priv->i2c->dev,
+ "%s(): cmd->len %d\n", __func__, cmd->msg_len);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xbb);
+ /* DiDEqC enable */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x33, 0x01);
+ /* cmd1 length & data */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x3d, cmd->msg_len);
+ memset(data, 0, sizeof(data));
+ for (i = 0; i < cmd->msg_len && i < sizeof(data); i++)
+ data[i] = cmd->msg[i];
+ cxd2841er_write_regs(priv, I2C_SLVT, 0x3e, data, sizeof(data));
+ /* repeat count for cmd1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x37, 1);
+ /* repeat count for cmd2: always 0 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x38, 0);
+ /* start transmit */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x32, 0x01);
+ /* wait for 1 sec timeout */
+ for (i = 0; i < 50; i++) {
+ cxd2841er_read_reg(priv, I2C_SLVT, 0x10, data);
+ if (!data[0]) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): DiSEqC cmd has been sent\n", __func__);
+ return 0;
+ }
+ msleep(20);
+ }
+ dev_dbg(&priv->i2c->dev,
+ "%s(): DiSEqC cmd transmit timeout\n", __func__);
+ return -ETIMEDOUT;
+}
+
+static void cxd2841er_release(struct dvb_frontend *fe)
+{
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ kfree(priv);
+}
+
+static int cxd2841er_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s(): enable=%d\n", __func__, enable);
+ cxd2841er_set_reg_bits(
+ priv, I2C_SLVX, 0x8, (enable ? 0x01 : 0x00), 0x01);
+ return 0;
+}
+
+static enum dvbfe_algo cxd2841er_get_algo(struct dvb_frontend *fe)
+{
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ return DVBFE_ALGO_HW;
+}
+
+static int cxd2841er_init_s(struct dvb_frontend *fe)
+{
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ cxd2841er_shutdown_to_sleep_s(priv);
+ /* SONY_DEMOD_CONFIG_SAT_IFAGCNEG set to 1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa0);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xb9, 0x01, 0x01);
+ return 0;
+}
+
+static int cxd2841er_init_tc(struct dvb_frontend *fe)
+{
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ cxd2841er_shutdown_to_sleep_tc(priv);
+ /* SONY_DEMOD_CONFIG_IFAGCNEG = 1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xcb, 0x40, 0x40);
+ /* SONY_DEMOD_CONFIG_IFAGC_ADC_FS = 0 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0xcd, 0x50);
+ /* SONY_DEMOD_CONFIG_PARALLEL_SEL = 1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xc4, 0x00, 0x80);
+ return 0;
+}
+
+static struct dvb_frontend_ops cxd2841er_dvbs_s2_ops;
+static struct dvb_frontend_ops cxd2841er_dvbt_t2_ops;
+static struct dvb_frontend_ops cxd2841er_dvbc_ops;
+
+static struct dvb_frontend *cxd2841er_attach(struct cxd2841er_config *cfg,
+ struct i2c_adapter *i2c,
+ u8 system)
+{
+ u8 chip_id = 0;
+ const char *type;
+ struct cxd2841er_priv *priv = NULL;
+
+ /* allocate memory for the internal state */
+ priv = kzalloc(sizeof(struct cxd2841er_priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+ priv->i2c = i2c;
+ priv->config = cfg;
+ priv->i2c_addr_slvx = (cfg->i2c_addr + 4) >> 1;
+ priv->i2c_addr_slvt = (cfg->i2c_addr) >> 1;
+ /* create dvb_frontend */
+ switch (system) {
+ case SYS_DVBS:
+ memcpy(&priv->frontend.ops,
+ &cxd2841er_dvbs_s2_ops,
+ sizeof(struct dvb_frontend_ops));
+ type = "S/S2";
+ break;
+ case SYS_DVBT:
+ memcpy(&priv->frontend.ops,
+ &cxd2841er_dvbt_t2_ops,
+ sizeof(struct dvb_frontend_ops));
+ type = "T/T2";
+ break;
+ case SYS_DVBC_ANNEX_A:
+ memcpy(&priv->frontend.ops,
+ &cxd2841er_dvbc_ops,
+ sizeof(struct dvb_frontend_ops));
+ type = "C/C2";
+ break;
+ default:
+ kfree(priv);
+ return NULL;
+ }
+ priv->frontend.demodulator_priv = priv;
+ dev_info(&priv->i2c->dev,
+ "%s(): attaching CXD2841ER DVB-%s frontend\n",
+ __func__, type);
+ dev_info(&priv->i2c->dev,
+ "%s(): I2C adapter %p SLVX addr %x SLVT addr %x\n",
+ __func__, priv->i2c,
+ priv->i2c_addr_slvx, priv->i2c_addr_slvt);
+ chip_id = cxd2841er_chip_id(priv);
+ if (chip_id != CXD2841ER_CHIP_ID) {
+ dev_err(&priv->i2c->dev, "%s(): invalid chip ID 0x%02x\n",
+ __func__, chip_id);
+ priv->frontend.demodulator_priv = NULL;
+ kfree(priv);
+ return NULL;
+ }
+ dev_info(&priv->i2c->dev, "%s(): chip ID 0x%02x OK.\n",
+ __func__, chip_id);
+ return &priv->frontend;
+}
+
+struct dvb_frontend *cxd2841er_attach_s(struct cxd2841er_config *cfg,
+ struct i2c_adapter *i2c)
+{
+ return cxd2841er_attach(cfg, i2c, SYS_DVBS);
+}
+EXPORT_SYMBOL(cxd2841er_attach_s);
+
+struct dvb_frontend *cxd2841er_attach_t(struct cxd2841er_config *cfg,
+ struct i2c_adapter *i2c)
+{
+ return cxd2841er_attach(cfg, i2c, SYS_DVBT);
+}
+EXPORT_SYMBOL(cxd2841er_attach_t);
+
+struct dvb_frontend *cxd2841er_attach_c(struct cxd2841er_config *cfg,
+ struct i2c_adapter *i2c)
+{
+ return cxd2841er_attach(cfg, i2c, SYS_DVBC_ANNEX_A);
+}
+EXPORT_SYMBOL(cxd2841er_attach_c);
+
+static struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = {
+ .delsys = { SYS_DVBS, SYS_DVBS2 },
+ .info = {
+ .name = "Sony CXD2841ER DVB-S/S2 demodulator",
+ .frequency_min = 500000,
+ .frequency_max = 2500000,
+ .frequency_stepsize = 0,
+ .symbol_rate_min = 1000000,
+ .symbol_rate_max = 45000000,
+ .symbol_rate_tolerance = 500,
+ .caps = FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK,
+ },
+ .init = cxd2841er_init_s,
+ .sleep = cxd2841er_sleep_s,
+ .release = cxd2841er_release,
+ .set_frontend = cxd2841er_set_frontend_s,
+ .get_frontend = cxd2841er_get_frontend,
+ .read_status = cxd2841er_read_status_s,
+ .i2c_gate_ctrl = cxd2841er_i2c_gate_ctrl,
+ .get_frontend_algo = cxd2841er_get_algo,
+ .set_tone = cxd2841er_set_tone,
+ .diseqc_send_burst = cxd2841er_send_burst,
+ .diseqc_send_master_cmd = cxd2841er_send_diseqc_msg,
+ .tune = cxd2841er_tune_s
+};
+
+static struct dvb_frontend_ops cxd2841er_dvbt_t2_ops = {
+ .delsys = { SYS_DVBT, SYS_DVBT2 },
+ .info = {
+ .name = "Sony CXD2841ER DVB-T/T2 demodulator",
+ .caps = FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_QAM_16 |
+ FE_CAN_QAM_32 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_128 |
+ FE_CAN_QAM_256 |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_HIERARCHY_AUTO |
+ FE_CAN_MUTE_TS |
+ FE_CAN_2G_MODULATION,
+ .frequency_min = 42000000,
+ .frequency_max = 1002000000
+ },
+ .init = cxd2841er_init_tc,
+ .sleep = cxd2841er_sleep_tc,
+ .release = cxd2841er_release,
+ .set_frontend = cxd2841er_set_frontend_tc,
+ .get_frontend = cxd2841er_get_frontend,
+ .read_status = cxd2841er_read_status_tc,
+ .tune = cxd2841er_tune_tc,
+ .i2c_gate_ctrl = cxd2841er_i2c_gate_ctrl,
+ .get_frontend_algo = cxd2841er_get_algo
+};
+
+static struct dvb_frontend_ops cxd2841er_dvbc_ops = {
+ .delsys = { SYS_DVBC_ANNEX_A },
+ .info = {
+ .name = "Sony CXD2841ER DVB-C demodulator",
+ .caps = FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QAM_16 |
+ FE_CAN_QAM_32 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_128 |
+ FE_CAN_QAM_256 |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_INVERSION_AUTO,
+ .frequency_min = 42000000,
+ .frequency_max = 1002000000
+ },
+ .init = cxd2841er_init_tc,
+ .sleep = cxd2841er_sleep_tc,
+ .release = cxd2841er_release,
+ .set_frontend = cxd2841er_set_frontend_tc,
+ .get_frontend = cxd2841er_get_frontend,
+ .read_status = cxd2841er_read_status_tc,
+ .tune = cxd2841er_tune_tc,
+ .i2c_gate_ctrl = cxd2841er_i2c_gate_ctrl,
+ .get_frontend_algo = cxd2841er_get_algo,
+};
+
+MODULE_DESCRIPTION("Sony CXD2841ER DVB-C/C2/T/T2/S/S2 demodulator driver");
+MODULE_AUTHOR("Sergey Kozlov <serjk@netup.ru>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/cxd2841er.h b/drivers/media/dvb-frontends/cxd2841er.h
new file mode 100644
index 000000000000..3472bdd58949
--- /dev/null
+++ b/drivers/media/dvb-frontends/cxd2841er.h
@@ -0,0 +1,65 @@
+/*
+ * cxd2841er.h
+ *
+ * Sony CXD2441ER digital demodulator driver public definitions
+ *
+ * Copyright 2012 Sony Corporation
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CXD2841ER_H
+#define CXD2841ER_H
+
+#include <linux/kconfig.h>
+#include <linux/dvb/frontend.h>
+
+struct cxd2841er_config {
+ u8 i2c_addr;
+};
+
+#if IS_REACHABLE(CONFIG_DVB_CXD2841ER)
+extern struct dvb_frontend *cxd2841er_attach_s(struct cxd2841er_config *cfg,
+ struct i2c_adapter *i2c);
+
+extern struct dvb_frontend *cxd2841er_attach_t(struct cxd2841er_config *cfg,
+ struct i2c_adapter *i2c);
+
+extern struct dvb_frontend *cxd2841er_attach_c(struct cxd2841er_config *cfg,
+ struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *cxd2841er_attach_s(
+ struct cxd2841er_config *cfg,
+ struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+static inline struct dvb_frontend *cxd2841er_attach_t(
+ struct cxd2841er_config *cfg, struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+static inline struct dvb_frontend *cxd2841er_attach_c(
+ struct cxd2841er_config *cfg, struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/dvb-frontends/cxd2841er_priv.h b/drivers/media/dvb-frontends/cxd2841er_priv.h
new file mode 100644
index 000000000000..33e2f495277b
--- /dev/null
+++ b/drivers/media/dvb-frontends/cxd2841er_priv.h
@@ -0,0 +1,43 @@
+/*
+ * cxd2841er_priv.h
+ *
+ * Sony CXD2441ER digital demodulator driver internal definitions
+ *
+ * Copyright 2012 Sony Corporation
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CXD2841ER_PRIV_H
+#define CXD2841ER_PRIV_H
+
+#define I2C_SLVX 0
+#define I2C_SLVT 1
+
+#define CXD2841ER_CHIP_ID 0xa7
+
+#define CXD2841ER_DVBS_POLLING_INVL 10
+
+struct cxd2841er_cnr_data {
+ u32 value;
+ int cnr_x1000;
+};
+
+enum cxd2841er_dvbt2_profile_t {
+ DVBT2_PROFILE_ANY = 0,
+ DVBT2_PROFILE_BASE = 1,
+ DVBT2_PROFILE_LITE = 2
+};
+
+#endif
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index 6d8fe8843237..53089e142715 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -34,7 +34,7 @@ struct dvb_pll_priv {
struct i2c_adapter *i2c;
/* the PLL descriptor */
- struct dvb_pll_desc *pll_desc;
+ const struct dvb_pll_desc *pll_desc;
/* cached frequency/bandwidth */
u32 frequency;
@@ -57,7 +57,7 @@ MODULE_PARM_DESC(id, "force pll id to use (DEBUG ONLY)");
/* ----------------------------------------------------------- */
struct dvb_pll_desc {
- char *name;
+ const char *name;
u32 min;
u32 max;
u32 iffreq;
@@ -71,13 +71,13 @@ struct dvb_pll_desc {
u32 stepsize;
u8 config;
u8 cb;
- } entries[12];
+ } entries[];
};
/* ----------------------------------------------------------- */
/* descriptions */
-static struct dvb_pll_desc dvb_pll_thomson_dtt7579 = {
+static const struct dvb_pll_desc dvb_pll_thomson_dtt7579 = {
.name = "Thomson dtt7579",
.min = 177000000,
.max = 858000000,
@@ -99,7 +99,7 @@ static void thomson_dtt759x_bw(struct dvb_frontend *fe, u8 *buf)
buf[3] |= 0x10;
}
-static struct dvb_pll_desc dvb_pll_thomson_dtt759x = {
+static const struct dvb_pll_desc dvb_pll_thomson_dtt759x = {
.name = "Thomson dtt759x",
.min = 177000000,
.max = 896000000,
@@ -123,7 +123,7 @@ static void thomson_dtt7520x_bw(struct dvb_frontend *fe, u8 *buf)
buf[3] ^= 0x10;
}
-static struct dvb_pll_desc dvb_pll_thomson_dtt7520x = {
+static const struct dvb_pll_desc dvb_pll_thomson_dtt7520x = {
.name = "Thomson dtt7520x",
.min = 185000000,
.max = 900000000,
@@ -141,7 +141,7 @@ static struct dvb_pll_desc dvb_pll_thomson_dtt7520x = {
},
};
-static struct dvb_pll_desc dvb_pll_lg_z201 = {
+static const struct dvb_pll_desc dvb_pll_lg_z201 = {
.name = "LG z201",
.min = 174000000,
.max = 862000000,
@@ -157,7 +157,7 @@ static struct dvb_pll_desc dvb_pll_lg_z201 = {
},
};
-static struct dvb_pll_desc dvb_pll_unknown_1 = {
+static const struct dvb_pll_desc dvb_pll_unknown_1 = {
.name = "unknown 1", /* used by dntv live dvb-t */
.min = 174000000,
.max = 862000000,
@@ -179,7 +179,7 @@ static struct dvb_pll_desc dvb_pll_unknown_1 = {
/* Infineon TUA6010XS
* used in Thomson Cable Tuner
*/
-static struct dvb_pll_desc dvb_pll_tua6010xs = {
+static const struct dvb_pll_desc dvb_pll_tua6010xs = {
.name = "Infineon TUA6010XS",
.min = 44250000,
.max = 858000000,
@@ -193,7 +193,7 @@ static struct dvb_pll_desc dvb_pll_tua6010xs = {
};
/* Panasonic env57h1xd5 (some Philips PLL ?) */
-static struct dvb_pll_desc dvb_pll_env57h1xd5 = {
+static const struct dvb_pll_desc dvb_pll_env57h1xd5 = {
.name = "Panasonic ENV57H1XD5",
.min = 44250000,
.max = 858000000,
@@ -217,7 +217,7 @@ static void tda665x_bw(struct dvb_frontend *fe, u8 *buf)
buf[3] |= 0x08;
}
-static struct dvb_pll_desc dvb_pll_tda665x = {
+static const struct dvb_pll_desc dvb_pll_tda665x = {
.name = "Philips TDA6650/TDA6651",
.min = 44250000,
.max = 858000000,
@@ -251,7 +251,7 @@ static void tua6034_bw(struct dvb_frontend *fe, u8 *buf)
buf[3] |= 0x08;
}
-static struct dvb_pll_desc dvb_pll_tua6034 = {
+static const struct dvb_pll_desc dvb_pll_tua6034 = {
.name = "Infineon TUA6034",
.min = 44250000,
.max = 858000000,
@@ -275,7 +275,7 @@ static void tded4_bw(struct dvb_frontend *fe, u8 *buf)
buf[3] |= 0x04;
}
-static struct dvb_pll_desc dvb_pll_tded4 = {
+static const struct dvb_pll_desc dvb_pll_tded4 = {
.name = "ALPS TDED4",
.min = 47000000,
.max = 863000000,
@@ -293,7 +293,7 @@ static struct dvb_pll_desc dvb_pll_tded4 = {
/* ALPS TDHU2
* used in AverTVHD MCE A180
*/
-static struct dvb_pll_desc dvb_pll_tdhu2 = {
+static const struct dvb_pll_desc dvb_pll_tdhu2 = {
.name = "ALPS TDHU2",
.min = 54000000,
.max = 864000000,
@@ -310,7 +310,7 @@ static struct dvb_pll_desc dvb_pll_tdhu2 = {
/* Samsung TBMV30111IN / TBMV30712IN1
* used in Air2PC ATSC - 2nd generation (nxt2002)
*/
-static struct dvb_pll_desc dvb_pll_samsung_tbmv = {
+static const struct dvb_pll_desc dvb_pll_samsung_tbmv = {
.name = "Samsung TBMV30111IN / TBMV30712IN1",
.min = 54000000,
.max = 860000000,
@@ -329,7 +329,7 @@ static struct dvb_pll_desc dvb_pll_samsung_tbmv = {
/*
* Philips SD1878 Tuner.
*/
-static struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = {
+static const struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = {
.name = "Philips SD1878",
.min = 950000,
.max = 2150000,
@@ -395,7 +395,7 @@ static void opera1_bw(struct dvb_frontend *fe, u8 *buf)
return;
}
-static struct dvb_pll_desc dvb_pll_opera1 = {
+static const struct dvb_pll_desc dvb_pll_opera1 = {
.name = "Opera Tuner",
.min = 900000,
.max = 2250000,
@@ -442,7 +442,7 @@ static void samsung_dtos403ih102a_set(struct dvb_frontend *fe, u8 *buf)
}
/* unknown pll used in Samsung DTOS403IH102A DVB-C tuner */
-static struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = {
+static const struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = {
.name = "Samsung DTOS403IH102A",
.min = 44250000,
.max = 858000000,
@@ -462,7 +462,7 @@ static struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = {
};
/* Samsung TDTC9251DH0 DVB-T NIM, as used on AirStar 2 */
-static struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = {
+static const struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = {
.name = "Samsung TDTC9251DH0",
.min = 48000000,
.max = 863000000,
@@ -476,7 +476,7 @@ static struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = {
};
/* Samsung TBDU18132 DVB-S NIM with TSA5059 PLL, used in SkyStar2 DVB-S 2.3 */
-static struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = {
+static const struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = {
.name = "Samsung TBDU18132",
.min = 950000,
.max = 2150000, /* guesses */
@@ -497,7 +497,7 @@ static struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = {
};
/* Samsung TBMU24112 DVB-S NIM with SL1935 zero-IF tuner */
-static struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = {
+static const struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = {
.name = "Samsung TBMU24112",
.min = 950000,
.max = 2150000, /* guesses */
@@ -518,7 +518,7 @@ static struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = {
* 153 - 430 0 * 0 0 0 0 1 0 0x02
* 430 - 822 0 * 0 0 1 0 0 0 0x08
* 822 - 862 1 * 0 0 1 0 0 0 0x88 */
-static struct dvb_pll_desc dvb_pll_alps_tdee4 = {
+static const struct dvb_pll_desc dvb_pll_alps_tdee4 = {
.name = "ALPS TDEE4",
.min = 47000000,
.max = 862000000,
@@ -534,7 +534,7 @@ static struct dvb_pll_desc dvb_pll_alps_tdee4 = {
/* ----------------------------------------------------------- */
-static struct dvb_pll_desc *pll_list[] = {
+static const struct dvb_pll_desc *pll_list[] = {
[DVB_PLL_UNDEFINED] = NULL,
[DVB_PLL_THOMSON_DTT7579] = &dvb_pll_thomson_dtt7579,
[DVB_PLL_THOMSON_DTT759X] = &dvb_pll_thomson_dtt759x,
@@ -564,7 +564,7 @@ static int dvb_pll_configure(struct dvb_frontend *fe, u8 *buf,
const u32 frequency)
{
struct dvb_pll_priv *priv = fe->tuner_priv;
- struct dvb_pll_desc *desc = priv->pll_desc;
+ const struct dvb_pll_desc *desc = priv->pll_desc;
u32 div;
int i;
@@ -758,7 +758,7 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
.buf = b1, .len = 1 };
struct dvb_pll_priv *priv = NULL;
int ret;
- struct dvb_pll_desc *desc;
+ const struct dvb_pll_desc *desc;
if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) &&
(id[dvb_pll_devcount] < ARRAY_SIZE(pll_list)))
diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c
new file mode 100644
index 000000000000..000606af70f7
--- /dev/null
+++ b/drivers/media/dvb-frontends/horus3a.c
@@ -0,0 +1,430 @@
+/*
+ * horus3a.h
+ *
+ * Sony Horus3A DVB-S/S2 tuner driver
+ *
+ * Copyright 2012 Sony Corporation
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dvb/frontend.h>
+#include <linux/types.h>
+#include "horus3a.h"
+#include "dvb_frontend.h"
+
+#define MAX_WRITE_REGSIZE 5
+
+enum horus3a_state {
+ STATE_UNKNOWN,
+ STATE_SLEEP,
+ STATE_ACTIVE
+};
+
+struct horus3a_priv {
+ u32 frequency;
+ u8 i2c_address;
+ struct i2c_adapter *i2c;
+ enum horus3a_state state;
+ void *set_tuner_data;
+ int (*set_tuner)(void *, int);
+};
+
+static void horus3a_i2c_debug(struct horus3a_priv *priv,
+ u8 reg, u8 write, const u8 *data, u32 len)
+{
+ dev_dbg(&priv->i2c->dev, "horus3a: I2C %s reg 0x%02x size %d\n",
+ (write == 0 ? "read" : "write"), reg, len);
+ print_hex_dump_bytes("horus3a: I2C data: ",
+ DUMP_PREFIX_OFFSET, data, len);
+}
+
+static int horus3a_write_regs(struct horus3a_priv *priv,
+ u8 reg, const u8 *data, u32 len)
+{
+ int ret;
+ u8 buf[MAX_WRITE_REGSIZE + 1];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->i2c_address,
+ .flags = 0,
+ .len = len + 1,
+ .buf = buf,
+ }
+ };
+
+ if (len + 1 >= sizeof(buf)) {
+ dev_warn(&priv->i2c->dev,"wr reg=%04x: len=%d is too big!\n",
+ reg, len + 1);
+ return -E2BIG;
+ }
+
+ horus3a_i2c_debug(priv, reg, 1, data, len);
+ buf[0] = reg;
+ memcpy(&buf[1], data, len);
+ ret = i2c_transfer(priv->i2c, msg, 1);
+ if (ret >= 0 && ret != 1)
+ ret = -EREMOTEIO;
+ if (ret < 0) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr failed=%d reg=%02x len=%d\n",
+ KBUILD_MODNAME, ret, reg, len);
+ return ret;
+ }
+ return 0;
+}
+
+static int horus3a_write_reg(struct horus3a_priv *priv, u8 reg, u8 val)
+{
+ return horus3a_write_regs(priv, reg, &val, 1);
+}
+
+static int horus3a_enter_power_save(struct horus3a_priv *priv)
+{
+ u8 data[2];
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state == STATE_SLEEP)
+ return 0;
+ /* IQ Generator disable */
+ horus3a_write_reg(priv, 0x2a, 0x79);
+ /* MDIV_EN = 0 */
+ horus3a_write_reg(priv, 0x29, 0x70);
+ /* VCO disable preparation */
+ horus3a_write_reg(priv, 0x28, 0x3e);
+ /* VCO buffer disable */
+ horus3a_write_reg(priv, 0x2a, 0x19);
+ /* VCO calibration disable */
+ horus3a_write_reg(priv, 0x1c, 0x00);
+ /* Power save setting (xtal is not stopped) */
+ data[0] = 0xC0;
+ /* LNA is Disabled */
+ data[1] = 0xA7;
+ /* 0x11 - 0x12 */
+ horus3a_write_regs(priv, 0x11, data, sizeof(data));
+ priv->state = STATE_SLEEP;
+ return 0;
+}
+
+static int horus3a_leave_power_save(struct horus3a_priv *priv)
+{
+ u8 data[2];
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state == STATE_ACTIVE)
+ return 0;
+ /* Leave power save */
+ data[0] = 0x00;
+ /* LNA is Disabled */
+ data[1] = 0xa7;
+ /* 0x11 - 0x12 */
+ horus3a_write_regs(priv, 0x11, data, sizeof(data));
+ /* VCO buffer enable */
+ horus3a_write_reg(priv, 0x2a, 0x79);
+ /* VCO calibration enable */
+ horus3a_write_reg(priv, 0x1c, 0xc0);
+ /* MDIV_EN = 1 */
+ horus3a_write_reg(priv, 0x29, 0x71);
+ usleep_range(5000, 7000);
+ priv->state = STATE_ACTIVE;
+ return 0;
+}
+
+static int horus3a_init(struct dvb_frontend *fe)
+{
+ struct horus3a_priv *priv = fe->tuner_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ return 0;
+}
+
+static int horus3a_release(struct dvb_frontend *fe)
+{
+ struct horus3a_priv *priv = fe->tuner_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ kfree(fe->tuner_priv);
+ fe->tuner_priv = NULL;
+ return 0;
+}
+
+static int horus3a_sleep(struct dvb_frontend *fe)
+{
+ struct horus3a_priv *priv = fe->tuner_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ horus3a_enter_power_save(priv);
+ return 0;
+}
+
+static int horus3a_set_params(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct horus3a_priv *priv = fe->tuner_priv;
+ u32 frequency = p->frequency;
+ u32 symbol_rate = p->symbol_rate/1000;
+ u8 mixdiv = 0;
+ u8 mdiv = 0;
+ u32 ms = 0;
+ u8 f_ctl = 0;
+ u8 g_ctl = 0;
+ u8 fc_lpf = 0;
+ u8 data[5];
+
+ dev_dbg(&priv->i2c->dev, "%s(): frequency %dkHz symbol_rate %dksps\n",
+ __func__, frequency, symbol_rate);
+ if (priv->set_tuner)
+ priv->set_tuner(priv->set_tuner_data, 0);
+ if (priv->state == STATE_SLEEP)
+ horus3a_leave_power_save(priv);
+
+ /* frequency should be X MHz (X : integer) */
+ frequency = DIV_ROUND_CLOSEST(frequency, 1000) * 1000;
+ if (frequency <= 1155000) {
+ mixdiv = 4;
+ mdiv = 1;
+ } else {
+ mixdiv = 2;
+ mdiv = 0;
+ }
+ /* Assumed that fREF == 1MHz (1000kHz) */
+ ms = DIV_ROUND_CLOSEST((frequency * mixdiv) / 2, 1000);
+ if (ms > 0x7FFF) { /* 15 bit */
+ dev_err(&priv->i2c->dev, "horus3a: invalid frequency %d\n",
+ frequency);
+ return -EINVAL;
+ }
+ if (frequency < 975000) {
+ /* F_CTL=11100 G_CTL=001 */
+ f_ctl = 0x1C;
+ g_ctl = 0x01;
+ } else if (frequency < 1050000) {
+ /* F_CTL=11000 G_CTL=010 */
+ f_ctl = 0x18;
+ g_ctl = 0x02;
+ } else if (frequency < 1150000) {
+ /* F_CTL=10100 G_CTL=010 */
+ f_ctl = 0x14;
+ g_ctl = 0x02;
+ } else if (frequency < 1250000) {
+ /* F_CTL=10000 G_CTL=011 */
+ f_ctl = 0x10;
+ g_ctl = 0x03;
+ } else if (frequency < 1350000) {
+ /* F_CTL=01100 G_CTL=100 */
+ f_ctl = 0x0C;
+ g_ctl = 0x04;
+ } else if (frequency < 1450000) {
+ /* F_CTL=01010 G_CTL=100 */
+ f_ctl = 0x0A;
+ g_ctl = 0x04;
+ } else if (frequency < 1600000) {
+ /* F_CTL=00111 G_CTL=101 */
+ f_ctl = 0x07;
+ g_ctl = 0x05;
+ } else if (frequency < 1800000) {
+ /* F_CTL=00100 G_CTL=010 */
+ f_ctl = 0x04;
+ g_ctl = 0x02;
+ } else if (frequency < 2000000) {
+ /* F_CTL=00010 G_CTL=001 */
+ f_ctl = 0x02;
+ g_ctl = 0x01;
+ } else {
+ /* F_CTL=00000 G_CTL=000 */
+ f_ctl = 0x00;
+ g_ctl = 0x00;
+ }
+ /* LPF cutoff frequency setting */
+ if (p->delivery_system == SYS_DVBS) {
+ /*
+ * rolloff = 0.35
+ * SR <= 4.3
+ * fc_lpf = 5
+ * 4.3 < SR <= 10
+ * fc_lpf = SR * (1 + rolloff) / 2 + SR / 2 =
+ * SR * 1.175 = SR * (47/40)
+ * 10 < SR
+ * fc_lpf = SR * (1 + rolloff) / 2 + 5 =
+ * SR * 0.675 + 5 = SR * (27/40) + 5
+ * NOTE: The result should be round up.
+ */
+ if (symbol_rate <= 4300)
+ fc_lpf = 5;
+ else if (symbol_rate <= 10000)
+ fc_lpf = (u8)DIV_ROUND_UP(symbol_rate * 47, 40000);
+ else
+ fc_lpf = (u8)DIV_ROUND_UP(symbol_rate * 27, 40000) + 5;
+ /* 5 <= fc_lpf <= 36 */
+ if (fc_lpf > 36)
+ fc_lpf = 36;
+ } else if (p->delivery_system == SYS_DVBS2) {
+ int rolloff;
+
+ switch (p->rolloff) {
+ case ROLLOFF_35:
+ rolloff = 35;
+ break;
+ case ROLLOFF_25:
+ rolloff = 25;
+ break;
+ case ROLLOFF_20:
+ rolloff = 20;
+ break;
+ case ROLLOFF_AUTO:
+ default:
+ dev_err(&priv->i2c->dev,
+ "horus3a: auto roll-off is not supported\n");
+ return -EINVAL;
+ }
+ /*
+ * SR <= 4.5:
+ * fc_lpf = 5
+ * 4.5 < SR <= 10:
+ * fc_lpf = SR * (1 + rolloff) / 2 + SR / 2
+ * 10 < SR:
+ * fc_lpf = SR * (1 + rolloff) / 2 + 5
+ * NOTE: The result should be round up.
+ */
+ if (symbol_rate <= 4500)
+ fc_lpf = 5;
+ else if (symbol_rate <= 10000)
+ fc_lpf = (u8)DIV_ROUND_UP(
+ symbol_rate * (200 + rolloff), 200000);
+ else
+ fc_lpf = (u8)DIV_ROUND_UP(
+ symbol_rate * (100 + rolloff), 200000) + 5;
+ /* 5 <= fc_lpf <= 36 is valid */
+ if (fc_lpf > 36)
+ fc_lpf = 36;
+ } else {
+ dev_err(&priv->i2c->dev,
+ "horus3a: invalid delivery system %d\n",
+ p->delivery_system);
+ return -EINVAL;
+ }
+ /* 0x00 - 0x04 */
+ data[0] = (u8)((ms >> 7) & 0xFF);
+ data[1] = (u8)((ms << 1) & 0xFF);
+ data[2] = 0x00;
+ data[3] = 0x00;
+ data[4] = (u8)(mdiv << 7);
+ horus3a_write_regs(priv, 0x00, data, sizeof(data));
+ /* Write G_CTL, F_CTL */
+ horus3a_write_reg(priv, 0x09, (u8)((g_ctl << 5) | f_ctl));
+ /* Write LPF cutoff frequency */
+ horus3a_write_reg(priv, 0x37, (u8)(0x80 | (fc_lpf << 1)));
+ /* Start Calibration */
+ horus3a_write_reg(priv, 0x05, 0x80);
+ /* IQ Generator enable */
+ horus3a_write_reg(priv, 0x2a, 0x7b);
+ /* tuner stabilization time */
+ msleep(60);
+ /* Store tuned frequency to the struct */
+ priv->frequency = ms * 2 * 1000 / mixdiv;
+ return 0;
+}
+
+static int horus3a_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct horus3a_priv *priv = fe->tuner_priv;
+
+ *frequency = priv->frequency;
+ return 0;
+}
+
+static struct dvb_tuner_ops horus3a_tuner_ops = {
+ .info = {
+ .name = "Sony Horus3a",
+ .frequency_min = 950000,
+ .frequency_max = 2150000,
+ .frequency_step = 1000,
+ },
+ .init = horus3a_init,
+ .release = horus3a_release,
+ .sleep = horus3a_sleep,
+ .set_params = horus3a_set_params,
+ .get_frequency = horus3a_get_frequency,
+};
+
+struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe,
+ const struct horus3a_config *config,
+ struct i2c_adapter *i2c)
+{
+ u8 buf[3], val;
+ struct horus3a_priv *priv = NULL;
+
+ priv = kzalloc(sizeof(struct horus3a_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return NULL;
+ priv->i2c_address = (config->i2c_address >> 1);
+ priv->i2c = i2c;
+ priv->set_tuner_data = config->set_tuner_priv;
+ priv->set_tuner = config->set_tuner_callback;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ /* wait 4ms after power on */
+ usleep_range(4000, 6000);
+ /* IQ Generator disable */
+ horus3a_write_reg(priv, 0x2a, 0x79);
+ /* REF_R = Xtal Frequency */
+ buf[0] = config->xtal_freq_mhz;
+ buf[1] = config->xtal_freq_mhz;
+ buf[2] = 0;
+ /* 0x6 - 0x8 */
+ horus3a_write_regs(priv, 0x6, buf, 3);
+ /* IQ Out = Single Ended */
+ horus3a_write_reg(priv, 0x0a, 0x40);
+ switch (config->xtal_freq_mhz) {
+ case 27:
+ val = 0x1f;
+ break;
+ case 24:
+ val = 0x10;
+ break;
+ case 16:
+ val = 0xc;
+ break;
+ default:
+ val = 0;
+ dev_warn(&priv->i2c->dev,
+ "horus3a: invalid xtal frequency %dMHz\n",
+ config->xtal_freq_mhz);
+ break;
+ }
+ val <<= 2;
+ horus3a_write_reg(priv, 0x0e, val);
+ horus3a_enter_power_save(priv);
+ usleep_range(3000, 5000);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ memcpy(&fe->ops.tuner_ops, &horus3a_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+ fe->tuner_priv = priv;
+ dev_info(&priv->i2c->dev,
+ "Sony HORUS3A attached on addr=%x at I2C adapter %p\n",
+ priv->i2c_address, priv->i2c);
+ return fe;
+}
+EXPORT_SYMBOL(horus3a_attach);
+
+MODULE_DESCRIPTION("Sony HORUS3A sattelite tuner driver");
+MODULE_AUTHOR("Sergey Kozlov <serjk@netup.ru>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/horus3a.h b/drivers/media/dvb-frontends/horus3a.h
new file mode 100644
index 000000000000..b055319d532e
--- /dev/null
+++ b/drivers/media/dvb-frontends/horus3a.h
@@ -0,0 +1,58 @@
+/*
+ * horus3a.h
+ *
+ * Sony Horus3A DVB-S/S2 tuner driver
+ *
+ * Copyright 2012 Sony Corporation
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DVB_HORUS3A_H__
+#define __DVB_HORUS3A_H__
+
+#include <linux/kconfig.h>
+#include <linux/dvb/frontend.h>
+#include <linux/i2c.h>
+
+/**
+ * struct horus3a_config - the configuration of Horus3A tuner driver
+ * @i2c_address: I2C address of the tuner
+ * @xtal_freq_mhz: Oscillator frequency, MHz
+ * @set_tuner_priv: Callback function private context
+ * @set_tuner_callback: Callback function that notifies the parent driver
+ * which tuner is active now
+ */
+struct horus3a_config {
+ u8 i2c_address;
+ u8 xtal_freq_mhz;
+ void *set_tuner_priv;
+ int (*set_tuner_callback)(void *, int);
+};
+
+#if IS_REACHABLE(CONFIG_DVB_HORUS3A)
+extern struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe,
+ const struct horus3a_config *config,
+ struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *horus3a_attach(
+ const struct cxd2820r_config *config,
+ struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/dvb-frontends/lnbh25.c b/drivers/media/dvb-frontends/lnbh25.c
new file mode 100644
index 000000000000..ef3021e964be
--- /dev/null
+++ b/drivers/media/dvb-frontends/lnbh25.c
@@ -0,0 +1,189 @@
+/*
+ * lnbh25.c
+ *
+ * Driver for LNB supply and control IC LNBH25
+ *
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+#include "dvb_frontend.h"
+#include "lnbh25.h"
+
+/**
+ * struct lnbh25_priv - LNBH25 driver private data
+ * @i2c: pointer to the I2C adapter structure
+ * @i2c_address: I2C address of LNBH25 SEC chip
+ * @config: Registers configuration:
+ * offset 0: 1st register address, always 0x02 (DATA1)
+ * offset 1: DATA1 register value
+ * offset 2: DATA2 register value
+ */
+struct lnbh25_priv {
+ struct i2c_adapter *i2c;
+ u8 i2c_address;
+ u8 config[3];
+};
+
+#define LNBH25_STATUS_OFL 0x1
+#define LNBH25_STATUS_VMON 0x4
+#define LNBH25_VSEL_13 0x03
+#define LNBH25_VSEL_18 0x0a
+
+static int lnbh25_read_vmon(struct lnbh25_priv *priv)
+{
+ int i, ret;
+ u8 addr = 0x00;
+ u8 status[6];
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->i2c_address,
+ .flags = 0,
+ .len = 1,
+ .buf = &addr
+ }, {
+ .addr = priv->i2c_address,
+ .flags = I2C_M_RD,
+ .len = sizeof(status),
+ .buf = status
+ }
+ };
+
+ for (i = 0; i < 2; i++) {
+ ret = i2c_transfer(priv->i2c, &msg[i], 1);
+ if (ret >= 0 && ret != 1)
+ ret = -EIO;
+ if (ret < 0) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): I2C transfer %d failed (%d)\n",
+ __func__, i, ret);
+ return ret;
+ }
+ }
+ print_hex_dump_bytes("lnbh25_read_vmon: ",
+ DUMP_PREFIX_OFFSET, status, sizeof(status));
+ if ((status[0] & (LNBH25_STATUS_OFL | LNBH25_STATUS_VMON)) != 0) {
+ dev_err(&priv->i2c->dev,
+ "%s(): voltage in failure state, status reg 0x%x\n",
+ __func__, status[0]);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int lnbh25_set_voltage(struct dvb_frontend *fe,
+ enum fe_sec_voltage voltage)
+{
+ int ret;
+ u8 data1_reg;
+ const char *vsel;
+ struct lnbh25_priv *priv = fe->sec_priv;
+ struct i2c_msg msg = {
+ .addr = priv->i2c_address,
+ .flags = 0,
+ .len = sizeof(priv->config),
+ .buf = priv->config
+ };
+
+ switch (voltage) {
+ case SEC_VOLTAGE_OFF:
+ data1_reg = 0x00;
+ vsel = "Off";
+ break;
+ case SEC_VOLTAGE_13:
+ data1_reg = LNBH25_VSEL_13;
+ vsel = "13V";
+ break;
+ case SEC_VOLTAGE_18:
+ data1_reg = LNBH25_VSEL_18;
+ vsel = "18V";
+ break;
+ default:
+ return -EINVAL;
+ }
+ priv->config[1] = data1_reg;
+ dev_dbg(&priv->i2c->dev,
+ "%s(): %s, I2C 0x%x write [ %02x %02x %02x ]\n",
+ __func__, vsel, priv->i2c_address,
+ priv->config[0], priv->config[1], priv->config[2]);
+ ret = i2c_transfer(priv->i2c, &msg, 1);
+ if (ret >= 0 && ret != 1)
+ ret = -EIO;
+ if (ret < 0) {
+ dev_err(&priv->i2c->dev, "%s(): I2C transfer error (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+ if (voltage != SEC_VOLTAGE_OFF) {
+ msleep(120);
+ ret = lnbh25_read_vmon(priv);
+ } else {
+ msleep(20);
+ ret = 0;
+ }
+ return ret;
+}
+
+static void lnbh25_release(struct dvb_frontend *fe)
+{
+ struct lnbh25_priv *priv = fe->sec_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ lnbh25_set_voltage(fe, SEC_VOLTAGE_OFF);
+ kfree(fe->sec_priv);
+ fe->sec_priv = NULL;
+}
+
+struct dvb_frontend *lnbh25_attach(struct dvb_frontend *fe,
+ struct lnbh25_config *cfg,
+ struct i2c_adapter *i2c)
+{
+ struct lnbh25_priv *priv;
+
+ dev_dbg(&i2c->dev, "%s()\n", __func__);
+ priv = kzalloc(sizeof(struct lnbh25_priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+ priv->i2c_address = (cfg->i2c_address >> 1);
+ priv->i2c = i2c;
+ priv->config[0] = 0x02;
+ priv->config[1] = 0x00;
+ priv->config[2] = cfg->data2_config;
+ fe->sec_priv = priv;
+ if (lnbh25_set_voltage(fe, SEC_VOLTAGE_OFF)) {
+ dev_err(&i2c->dev,
+ "%s(): no LNBH25 found at I2C addr 0x%02x\n",
+ __func__, priv->i2c_address);
+ kfree(priv);
+ fe->sec_priv = NULL;
+ return NULL;
+ }
+
+ fe->ops.release_sec = lnbh25_release;
+ fe->ops.set_voltage = lnbh25_set_voltage;
+
+ dev_err(&i2c->dev, "%s(): attached at I2C addr 0x%02x\n",
+ __func__, priv->i2c_address);
+ return fe;
+}
+EXPORT_SYMBOL(lnbh25_attach);
+
+MODULE_DESCRIPTION("ST LNBH25 driver");
+MODULE_AUTHOR("info@netup.ru");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/lnbh25.h b/drivers/media/dvb-frontends/lnbh25.h
new file mode 100644
index 000000000000..69f30e21f6b3
--- /dev/null
+++ b/drivers/media/dvb-frontends/lnbh25.h
@@ -0,0 +1,56 @@
+/*
+ * lnbh25.c
+ *
+ * Driver for LNB supply and control IC LNBH25
+ *
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef LNBH25_H
+#define LNBH25_H
+
+#include <linux/i2c.h>
+#include <linux/kconfig.h>
+#include <linux/dvb/frontend.h>
+
+/* 22 kHz tone enabled. Tone output controlled by DSQIN pin */
+#define LNBH25_TEN 0x01
+/* Low power mode activated (used only with 22 kHz tone output disabled) */
+#define LNBH25_LPM 0x02
+/* DSQIN input pin is set to receive external 22 kHz TTL signal source */
+#define LNBH25_EXTM 0x04
+
+struct lnbh25_config {
+ u8 i2c_address;
+ u8 data2_config;
+};
+
+#if IS_REACHABLE(CONFIG_DVB_LNBH25)
+struct dvb_frontend *lnbh25_attach(
+ struct dvb_frontend *fe,
+ struct lnbh25_config *cfg,
+ struct i2c_adapter *i2c);
+#else
+static inline dvb_frontend *lnbh25_attach(
+ struct dvb_frontend *fe,
+ struct lnbh25_config *cfg,
+ struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index e9b2d2b69b1d..ff31e7a01ca9 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1495,7 +1495,6 @@ MODULE_DEVICE_TABLE(i2c, m88ds3103_id_table);
static struct i2c_driver m88ds3103_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "m88ds3103",
.suppress_bind_attrs = true,
},
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index 3d01f4f22aca..b792f305cf15 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -915,7 +915,6 @@ MODULE_DEVICE_TABLE(i2c, rtl2830_id_table);
static struct i2c_driver rtl2830_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "rtl2830",
},
.probe = rtl2830_probe,
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 822ea4b7a7ff..78b87b260d74 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -1319,7 +1319,6 @@ MODULE_DEVICE_TABLE(i2c, rtl2832_id_table);
static struct i2c_driver rtl2832_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "rtl2832",
},
.probe = rtl2832_probe,
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index 7edb885ae9c8..d5b994f17612 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -1538,7 +1538,6 @@ static int rtl2832_sdr_remove(struct platform_device *pdev)
static struct platform_driver rtl2832_sdr_driver = {
.driver = {
.name = "rtl2832_sdr",
- .owner = THIS_MODULE,
},
.probe = rtl2832_sdr_probe,
.remove = rtl2832_sdr_remove,
diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c
index b2d9fe13e1a0..d6a8fa63040b 100644
--- a/drivers/media/dvb-frontends/s921.c
+++ b/drivers/media/dvb-frontends/s921.c
@@ -466,7 +466,7 @@ static int s921_tune(struct dvb_frontend *fe,
static int s921_get_algo(struct dvb_frontend *fe)
{
- return 1; /* FE_ALGO_HW */
+ return DVBFE_ALGO_HW;
}
static void s921_release(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 25e238c370e5..81788c5a44d8 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -757,7 +757,6 @@ MODULE_DEVICE_TABLE(i2c, si2168_id_table);
static struct i2c_driver si2168_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "si2168",
},
.probe = si2168_probe,
diff --git a/drivers/media/dvb-frontends/sp2.c b/drivers/media/dvb-frontends/sp2.c
index 8fd42767e263..43d47dfcc7b8 100644
--- a/drivers/media/dvb-frontends/sp2.c
+++ b/drivers/media/dvb-frontends/sp2.c
@@ -426,7 +426,6 @@ MODULE_DEVICE_TABLE(i2c, sp2_id);
static struct i2c_driver sp2_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "sp2",
},
.probe = sp2_probe,
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index ec3e18e5ff50..44cb73f68af6 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -791,11 +791,13 @@ int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len)
memcpy(buf + 2, data, len);
if (i2cdebug)
- printk(KERN_DEBUG "%s: %02x: %02x\n", __func__, reg, buf[2]);
+ printk(KERN_DEBUG "%s: [%02x] %02x: %02x\n", __func__,
+ state->config->demod_address, reg, buf[2]);
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- printk(KERN_ERR "%s: i2c write error!\n", __func__);
+ printk(KERN_ERR "%s: i2c write error! ([%02x] %02x: %02x)\n",
+ __func__, state->config->demod_address, reg, buf[2]);
return (ret != 1) ? -EREMOTEIO : 0;
}
@@ -829,10 +831,12 @@ static u8 stv0367_readreg(struct stv0367_state *state, u16 reg)
ret = i2c_transfer(state->i2c, msg, 2);
if (ret != 2)
- printk(KERN_ERR "%s: i2c read error\n", __func__);
+ printk(KERN_ERR "%s: i2c read error ([%02x] %02x: %02x)\n",
+ __func__, state->config->demod_address, reg, b1[0]);
if (i2cdebug)
- printk(KERN_DEBUG "%s: %02x: %02x\n", __func__, reg, b1[0]);
+ printk(KERN_DEBUG "%s: [%02x] %02x: %02x\n", __func__,
+ state->config->demod_address, reg, b1[0]);
return b1[0];
}
@@ -1550,6 +1554,11 @@ static int stv0367ter_init(struct dvb_frontend *fe)
switch (state->config->xtal) {
/*set internal freq to 53.125MHz */
+ case 16000000:
+ stv0367_writereg(state, R367TER_PLLMDIV, 0x2);
+ stv0367_writereg(state, R367TER_PLLNDIV, 0x1b);
+ stv0367_writereg(state, R367TER_PLLSETUP, 0x18);
+ break;
case 25000000:
stv0367_writereg(state, R367TER_PLLMDIV, 0xa);
stv0367_writereg(state, R367TER_PLLNDIV, 0x55);
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index f6dc6307d35a..119d47596ac8 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -20,102 +20,15 @@
#include "tda10071_priv.h"
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE 64
-
static struct dvb_frontend_ops tda10071_ops;
-/* write multiple registers */
-static int tda10071_wr_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
- int len)
-{
- int ret;
- u8 buf[MAX_XFER_SIZE];
- struct i2c_msg msg[1] = {
- {
- .addr = priv->cfg.demod_i2c_addr,
- .flags = 0,
- .len = 1 + len,
- .buf = buf,
- }
- };
-
- if (1 + len > sizeof(buf)) {
- dev_warn(&priv->i2c->dev,
- "%s: i2c wr reg=%04x: len=%d is too big!\n",
- KBUILD_MODNAME, reg, len);
- return -EINVAL;
- }
-
- buf[0] = reg;
- memcpy(&buf[1], val, len);
-
- ret = i2c_transfer(priv->i2c, msg, 1);
- if (ret == 1) {
- ret = 0;
- } else {
- dev_warn(&priv->i2c->dev,
- "%s: i2c wr failed=%d reg=%02x len=%d\n",
- KBUILD_MODNAME, ret, reg, len);
- ret = -EREMOTEIO;
- }
- return ret;
-}
-
-/* read multiple registers */
-static int tda10071_rd_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
- int len)
-{
- int ret;
- u8 buf[MAX_XFER_SIZE];
- struct i2c_msg msg[2] = {
- {
- .addr = priv->cfg.demod_i2c_addr,
- .flags = 0,
- .len = 1,
- .buf = &reg,
- }, {
- .addr = priv->cfg.demod_i2c_addr,
- .flags = I2C_M_RD,
- .len = len,
- .buf = buf,
- }
- };
-
- if (len > sizeof(buf)) {
- dev_warn(&priv->i2c->dev,
- "%s: i2c wr reg=%04x: len=%d is too big!\n",
- KBUILD_MODNAME, reg, len);
- return -EINVAL;
- }
-
- ret = i2c_transfer(priv->i2c, msg, 2);
- if (ret == 2) {
- memcpy(val, buf, len);
- ret = 0;
- } else {
- dev_warn(&priv->i2c->dev,
- "%s: i2c rd failed=%d reg=%02x len=%d\n",
- KBUILD_MODNAME, ret, reg, len);
- ret = -EREMOTEIO;
- }
- return ret;
-}
-
-/* write single register */
-static int tda10071_wr_reg(struct tda10071_priv *priv, u8 reg, u8 val)
-{
- return tda10071_wr_regs(priv, reg, &val, 1);
-}
-
-/* read single register */
-static int tda10071_rd_reg(struct tda10071_priv *priv, u8 reg, u8 *val)
-{
- return tda10071_rd_regs(priv, reg, val, 1);
-}
-
+/*
+ * XXX: regmap_update_bits() does not fit our needs as it does not support
+ * partially volatile registers. Also it performs register read even mask is as
+ * wide as register value.
+ */
/* write single register with mask */
-static int tda10071_wr_reg_mask(struct tda10071_priv *priv,
+static int tda10071_wr_reg_mask(struct tda10071_dev *dev,
u8 reg, u8 val, u8 mask)
{
int ret;
@@ -123,7 +36,7 @@ static int tda10071_wr_reg_mask(struct tda10071_priv *priv,
/* no need for read if whole reg is written */
if (mask != 0xff) {
- ret = tda10071_rd_regs(priv, reg, &tmp, 1);
+ ret = regmap_bulk_read(dev->regmap, reg, &tmp, 1);
if (ret)
return ret;
@@ -132,64 +45,45 @@ static int tda10071_wr_reg_mask(struct tda10071_priv *priv,
val |= tmp;
}
- return tda10071_wr_regs(priv, reg, &val, 1);
-}
-
-/* read single register with mask */
-static int tda10071_rd_reg_mask(struct tda10071_priv *priv,
- u8 reg, u8 *val, u8 mask)
-{
- int ret, i;
- u8 tmp;
-
- ret = tda10071_rd_regs(priv, reg, &tmp, 1);
- if (ret)
- return ret;
-
- tmp &= mask;
-
- /* find position of the first bit */
- for (i = 0; i < 8; i++) {
- if ((mask >> i) & 0x01)
- break;
- }
- *val = tmp >> i;
-
- return 0;
+ return regmap_bulk_write(dev->regmap, reg, &val, 1);
}
/* execute firmware command */
-static int tda10071_cmd_execute(struct tda10071_priv *priv,
+static int tda10071_cmd_execute(struct tda10071_dev *dev,
struct tda10071_cmd *cmd)
{
+ struct i2c_client *client = dev->client;
int ret, i;
- u8 tmp;
+ unsigned int uitmp;
- if (!priv->warm) {
+ if (!dev->warm) {
ret = -EFAULT;
goto error;
}
+ mutex_lock(&dev->cmd_execute_mutex);
+
/* write cmd and args for firmware */
- ret = tda10071_wr_regs(priv, 0x00, cmd->args, cmd->len);
+ ret = regmap_bulk_write(dev->regmap, 0x00, cmd->args, cmd->len);
if (ret)
- goto error;
+ goto error_mutex_unlock;
/* start cmd execution */
- ret = tda10071_wr_reg(priv, 0x1f, 1);
+ ret = regmap_write(dev->regmap, 0x1f, 1);
if (ret)
- goto error;
+ goto error_mutex_unlock;
/* wait cmd execution terminate */
- for (i = 1000, tmp = 1; i && tmp; i--) {
- ret = tda10071_rd_reg(priv, 0x1f, &tmp);
+ for (i = 1000, uitmp = 1; i && uitmp; i--) {
+ ret = regmap_read(dev->regmap, 0x1f, &uitmp);
if (ret)
- goto error;
+ goto error_mutex_unlock;
usleep_range(200, 5000);
}
- dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
+ mutex_unlock(&dev->cmd_execute_mutex);
+ dev_dbg(&client->dev, "loop=%d\n", i);
if (i == 0) {
ret = -ETIMEDOUT;
@@ -197,26 +91,28 @@ static int tda10071_cmd_execute(struct tda10071_priv *priv,
}
return ret;
+error_mutex_unlock:
+ mutex_unlock(&dev->cmd_execute_mutex);
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_set_tone(struct dvb_frontend *fe,
enum fe_sec_tone_mode fe_sec_tone_mode)
{
- struct tda10071_priv *priv = fe->demodulator_priv;
+ struct tda10071_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
struct tda10071_cmd cmd;
int ret;
u8 tone;
- if (!priv->warm) {
+ if (!dev->warm) {
ret = -EFAULT;
goto error;
}
- dev_dbg(&priv->i2c->dev, "%s: tone_mode=%d\n", __func__,
- fe_sec_tone_mode);
+ dev_dbg(&client->dev, "tone_mode=%d\n", fe_sec_tone_mode);
switch (fe_sec_tone_mode) {
case SEC_TONE_ON:
@@ -226,8 +122,7 @@ static int tda10071_set_tone(struct dvb_frontend *fe,
tone = 0;
break;
default:
- dev_dbg(&priv->i2c->dev, "%s: invalid fe_sec_tone_mode\n",
- __func__);
+ dev_dbg(&client->dev, "invalid fe_sec_tone_mode\n");
ret = -EINVAL;
goto error;
}
@@ -238,30 +133,31 @@ static int tda10071_set_tone(struct dvb_frontend *fe,
cmd.args[3] = 0x00;
cmd.args[4] = tone;
cmd.len = 5;
- ret = tda10071_cmd_execute(priv, &cmd);
+ ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage fe_sec_voltage)
{
- struct tda10071_priv *priv = fe->demodulator_priv;
+ struct tda10071_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
struct tda10071_cmd cmd;
int ret;
u8 voltage;
- if (!priv->warm) {
+ if (!dev->warm) {
ret = -EFAULT;
goto error;
}
- dev_dbg(&priv->i2c->dev, "%s: voltage=%d\n", __func__, fe_sec_voltage);
+ dev_dbg(&client->dev, "voltage=%d\n", fe_sec_voltage);
switch (fe_sec_voltage) {
case SEC_VOLTAGE_13:
@@ -274,8 +170,7 @@ static int tda10071_set_voltage(struct dvb_frontend *fe,
voltage = 0;
break;
default:
- dev_dbg(&priv->i2c->dev, "%s: invalid fe_sec_voltage\n",
- __func__);
+ dev_dbg(&client->dev, "invalid fe_sec_voltage\n");
ret = -EINVAL;
goto error;
}
@@ -284,31 +179,31 @@ static int tda10071_set_voltage(struct dvb_frontend *fe,
cmd.args[1] = 0;
cmd.args[2] = voltage;
cmd.len = 3;
- ret = tda10071_cmd_execute(priv, &cmd);
+ ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_diseqc_send_master_cmd(struct dvb_frontend *fe,
struct dvb_diseqc_master_cmd *diseqc_cmd)
{
- struct tda10071_priv *priv = fe->demodulator_priv;
+ struct tda10071_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
struct tda10071_cmd cmd;
int ret, i;
- u8 tmp;
+ unsigned int uitmp;
- if (!priv->warm) {
+ if (!dev->warm) {
ret = -EFAULT;
goto error;
}
- dev_dbg(&priv->i2c->dev, "%s: msg_len=%d\n", __func__,
- diseqc_cmd->msg_len);
+ dev_dbg(&client->dev, "msg_len=%d\n", diseqc_cmd->msg_len);
if (diseqc_cmd->msg_len < 3 || diseqc_cmd->msg_len > 6) {
ret = -EINVAL;
@@ -316,22 +211,22 @@ static int tda10071_diseqc_send_master_cmd(struct dvb_frontend *fe,
}
/* wait LNB TX */
- for (i = 500, tmp = 0; i && !tmp; i--) {
- ret = tda10071_rd_reg_mask(priv, 0x47, &tmp, 0x01);
+ for (i = 500, uitmp = 0; i && !uitmp; i--) {
+ ret = regmap_read(dev->regmap, 0x47, &uitmp);
if (ret)
goto error;
-
+ uitmp = (uitmp >> 0) & 1;
usleep_range(10000, 20000);
}
- dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
+ dev_dbg(&client->dev, "loop=%d\n", i);
if (i == 0) {
ret = -ETIMEDOUT;
goto error;
}
- ret = tda10071_wr_reg_mask(priv, 0x47, 0x00, 0x01);
+ ret = regmap_update_bits(dev->regmap, 0x47, 0x01, 0x00);
if (ret)
goto error;
@@ -344,41 +239,42 @@ static int tda10071_diseqc_send_master_cmd(struct dvb_frontend *fe,
cmd.args[6] = diseqc_cmd->msg_len;
memcpy(&cmd.args[7], diseqc_cmd->msg, diseqc_cmd->msg_len);
cmd.len = 7 + diseqc_cmd->msg_len;
- ret = tda10071_cmd_execute(priv, &cmd);
+ ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_diseqc_recv_slave_reply(struct dvb_frontend *fe,
struct dvb_diseqc_slave_reply *reply)
{
- struct tda10071_priv *priv = fe->demodulator_priv;
+ struct tda10071_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
struct tda10071_cmd cmd;
int ret, i;
- u8 tmp;
+ unsigned int uitmp;
- if (!priv->warm) {
+ if (!dev->warm) {
ret = -EFAULT;
goto error;
}
- dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+ dev_dbg(&client->dev, "\n");
/* wait LNB RX */
- for (i = 500, tmp = 0; i && !tmp; i--) {
- ret = tda10071_rd_reg_mask(priv, 0x47, &tmp, 0x02);
+ for (i = 500, uitmp = 0; i && !uitmp; i--) {
+ ret = regmap_read(dev->regmap, 0x47, &uitmp);
if (ret)
goto error;
-
+ uitmp = (uitmp >> 1) & 1;
usleep_range(10000, 20000);
}
- dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
+ dev_dbg(&client->dev, "loop=%d\n", i);
if (i == 0) {
ret = -ETIMEDOUT;
@@ -386,11 +282,11 @@ static int tda10071_diseqc_recv_slave_reply(struct dvb_frontend *fe,
}
/* reply len */
- ret = tda10071_rd_reg(priv, 0x46, &tmp);
+ ret = regmap_read(dev->regmap, 0x46, &uitmp);
if (ret)
goto error;
- reply->msg_len = tmp & 0x1f; /* [4:0] */
+ reply->msg_len = uitmp & 0x1f; /* [4:0] */
if (reply->msg_len > sizeof(reply->msg))
reply->msg_len = sizeof(reply->msg); /* truncate API max */
@@ -398,35 +294,37 @@ static int tda10071_diseqc_recv_slave_reply(struct dvb_frontend *fe,
cmd.args[0] = CMD_LNB_UPDATE_REPLY;
cmd.args[1] = 0;
cmd.len = 2;
- ret = tda10071_cmd_execute(priv, &cmd);
+ ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
- ret = tda10071_rd_regs(priv, cmd.len, reply->msg, reply->msg_len);
+ ret = regmap_bulk_read(dev->regmap, cmd.len, reply->msg,
+ reply->msg_len);
if (ret)
goto error;
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_diseqc_send_burst(struct dvb_frontend *fe,
enum fe_sec_mini_cmd fe_sec_mini_cmd)
{
- struct tda10071_priv *priv = fe->demodulator_priv;
+ struct tda10071_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
struct tda10071_cmd cmd;
int ret, i;
- u8 tmp, burst;
+ unsigned int uitmp;
+ u8 burst;
- if (!priv->warm) {
+ if (!dev->warm) {
ret = -EFAULT;
goto error;
}
- dev_dbg(&priv->i2c->dev, "%s: fe_sec_mini_cmd=%d\n", __func__,
- fe_sec_mini_cmd);
+ dev_dbg(&client->dev, "fe_sec_mini_cmd=%d\n", fe_sec_mini_cmd);
switch (fe_sec_mini_cmd) {
case SEC_MINI_A:
@@ -436,29 +334,28 @@ static int tda10071_diseqc_send_burst(struct dvb_frontend *fe,
burst = 1;
break;
default:
- dev_dbg(&priv->i2c->dev, "%s: invalid fe_sec_mini_cmd\n",
- __func__);
+ dev_dbg(&client->dev, "invalid fe_sec_mini_cmd\n");
ret = -EINVAL;
goto error;
}
/* wait LNB TX */
- for (i = 500, tmp = 0; i && !tmp; i--) {
- ret = tda10071_rd_reg_mask(priv, 0x47, &tmp, 0x01);
+ for (i = 500, uitmp = 0; i && !uitmp; i--) {
+ ret = regmap_read(dev->regmap, 0x47, &uitmp);
if (ret)
goto error;
-
+ uitmp = (uitmp >> 0) & 1;
usleep_range(10000, 20000);
}
- dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
+ dev_dbg(&client->dev, "loop=%d\n", i);
if (i == 0) {
ret = -ETIMEDOUT;
goto error;
}
- ret = tda10071_wr_reg_mask(priv, 0x47, 0x00, 0x01);
+ ret = regmap_update_bits(dev->regmap, 0x47, 0x01, 0x00);
if (ret)
goto error;
@@ -466,219 +363,217 @@ static int tda10071_diseqc_send_burst(struct dvb_frontend *fe,
cmd.args[1] = 0;
cmd.args[2] = burst;
cmd.len = 3;
- ret = tda10071_cmd_execute(priv, &cmd);
+ ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_read_status(struct dvb_frontend *fe, enum fe_status *status)
{
- struct tda10071_priv *priv = fe->demodulator_priv;
+ struct tda10071_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct tda10071_cmd cmd;
int ret;
- u8 tmp;
+ unsigned int uitmp;
+ u8 buf[8];
*status = 0;
- if (!priv->warm) {
+ if (!dev->warm) {
ret = 0;
goto error;
}
- ret = tda10071_rd_reg(priv, 0x39, &tmp);
+ ret = regmap_read(dev->regmap, 0x39, &uitmp);
if (ret)
goto error;
/* 0x39[0] tuner PLL */
- if (tmp & 0x02) /* demod PLL */
+ if (uitmp & 0x02) /* demod PLL */
*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER;
- if (tmp & 0x04) /* viterbi or LDPC*/
+ if (uitmp & 0x04) /* viterbi or LDPC*/
*status |= FE_HAS_VITERBI;
- if (tmp & 0x08) /* RS or BCH */
+ if (uitmp & 0x08) /* RS or BCH */
*status |= FE_HAS_SYNC | FE_HAS_LOCK;
- priv->fe_status = *status;
+ dev->fe_status = *status;
- return ret;
-error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
+ /* signal strength */
+ if (dev->fe_status & FE_HAS_SIGNAL) {
+ cmd.args[0] = CMD_GET_AGCACC;
+ cmd.args[1] = 0;
+ cmd.len = 2;
+ ret = tda10071_cmd_execute(dev, &cmd);
+ if (ret)
+ goto error;
-static int tda10071_read_snr(struct dvb_frontend *fe, u16 *snr)
-{
- struct tda10071_priv *priv = fe->demodulator_priv;
- int ret;
- u8 buf[2];
+ /* input power estimate dBm */
+ ret = regmap_read(dev->regmap, 0x50, &uitmp);
+ if (ret)
+ goto error;
- if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
- *snr = 0;
- ret = 0;
- goto error;
+ c->strength.stat[0].scale = FE_SCALE_DECIBEL;
+ c->strength.stat[0].svalue = (int) (uitmp - 256) * 1000;
+ } else {
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
}
- ret = tda10071_rd_regs(priv, 0x3a, buf, 2);
- if (ret)
- goto error;
+ /* CNR */
+ if (dev->fe_status & FE_HAS_VITERBI) {
+ /* Es/No */
+ ret = regmap_bulk_read(dev->regmap, 0x3a, buf, 2);
+ if (ret)
+ goto error;
- /* Es/No dBx10 */
- *snr = buf[0] << 8 | buf[1];
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[0].svalue = (buf[0] << 8 | buf[1] << 0) * 100;
+ } else {
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
- return ret;
-error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
+ /* UCB/PER/BER */
+ if (dev->fe_status & FE_HAS_LOCK) {
+ /* TODO: report total bits/packets */
+ u8 delivery_system, reg, len;
-static int tda10071_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
-{
- struct tda10071_priv *priv = fe->demodulator_priv;
- struct tda10071_cmd cmd;
- int ret;
- u8 tmp;
+ switch (dev->delivery_system) {
+ case SYS_DVBS:
+ reg = 0x4c;
+ len = 8;
+ delivery_system = 1;
+ break;
+ case SYS_DVBS2:
+ reg = 0x4d;
+ len = 4;
+ delivery_system = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto error;
+ }
- if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
- *strength = 0;
- ret = 0;
- goto error;
- }
+ ret = regmap_read(dev->regmap, reg, &uitmp);
+ if (ret)
+ goto error;
- cmd.args[0] = CMD_GET_AGCACC;
- cmd.args[1] = 0;
- cmd.len = 2;
- ret = tda10071_cmd_execute(priv, &cmd);
- if (ret)
- goto error;
+ if (dev->meas_count == uitmp) {
+ dev_dbg(&client->dev, "meas not ready=%02x\n", uitmp);
+ ret = 0;
+ goto error;
+ } else {
+ dev->meas_count = uitmp;
+ }
- /* input power estimate dBm */
- ret = tda10071_rd_reg(priv, 0x50, &tmp);
- if (ret)
- goto error;
+ cmd.args[0] = CMD_BER_UPDATE_COUNTERS;
+ cmd.args[1] = 0;
+ cmd.args[2] = delivery_system;
+ cmd.len = 3;
+ ret = tda10071_cmd_execute(dev, &cmd);
+ if (ret)
+ goto error;
- if (tmp < 181)
- tmp = 181; /* -75 dBm */
- else if (tmp > 236)
- tmp = 236; /* -20 dBm */
+ ret = regmap_bulk_read(dev->regmap, cmd.len, buf, len);
+ if (ret)
+ goto error;
- /* scale value to 0x0000-0xffff */
- *strength = (tmp-181) * 0xffff / (236-181);
+ if (dev->delivery_system == SYS_DVBS) {
+ dev->dvbv3_ber = buf[0] << 24 | buf[1] << 16 |
+ buf[2] << 8 | buf[3] << 0;
+ dev->post_bit_error += buf[0] << 24 | buf[1] << 16 |
+ buf[2] << 8 | buf[3] << 0;
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue = dev->post_bit_error;
+ dev->block_error += buf[4] << 8 | buf[5] << 0;
+ c->block_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_error.stat[0].uvalue = dev->block_error;
+ } else {
+ dev->dvbv3_ber = buf[0] << 8 | buf[1] << 0;
+ dev->post_bit_error += buf[0] << 8 | buf[1] << 0;
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue = dev->post_bit_error;
+ c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+ } else {
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
-static int tda10071_read_ber(struct dvb_frontend *fe, u32 *ber)
+static int tda10071_read_snr(struct dvb_frontend *fe, u16 *snr)
{
- struct tda10071_priv *priv = fe->demodulator_priv;
- struct tda10071_cmd cmd;
- int ret, i, len;
- u8 tmp, reg, buf[8];
-
- if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
- *ber = priv->ber = 0;
- ret = 0;
- goto error;
- }
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- switch (priv->delivery_system) {
- case SYS_DVBS:
- reg = 0x4c;
- len = 8;
- i = 1;
- break;
- case SYS_DVBS2:
- reg = 0x4d;
- len = 4;
- i = 0;
- break;
- default:
- *ber = priv->ber = 0;
- return 0;
- }
+ if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL)
+ *snr = div_s64(c->cnr.stat[0].svalue, 100);
+ else
+ *snr = 0;
+ return 0;
+}
- ret = tda10071_rd_reg(priv, reg, &tmp);
- if (ret)
- goto error;
+static int tda10071_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
+{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ unsigned int uitmp;
- if (priv->meas_count[i] == tmp) {
- dev_dbg(&priv->i2c->dev, "%s: meas not ready=%02x\n", __func__,
- tmp);
- *ber = priv->ber;
- return 0;
+ if (c->strength.stat[0].scale == FE_SCALE_DECIBEL) {
+ uitmp = div_s64(c->strength.stat[0].svalue, 1000) + 256;
+ uitmp = clamp(uitmp, 181U, 236U); /* -75dBm - -20dBm */
+ /* scale value to 0x0000-0xffff */
+ *strength = (uitmp-181) * 0xffff / (236-181);
} else {
- priv->meas_count[i] = tmp;
+ *strength = 0;
}
+ return 0;
+}
- cmd.args[0] = CMD_BER_UPDATE_COUNTERS;
- cmd.args[1] = 0;
- cmd.args[2] = i;
- cmd.len = 3;
- ret = tda10071_cmd_execute(priv, &cmd);
- if (ret)
- goto error;
-
- ret = tda10071_rd_regs(priv, cmd.len, buf, len);
- if (ret)
- goto error;
-
- if (priv->delivery_system == SYS_DVBS) {
- *ber = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
- priv->ucb += (buf[4] << 8) | buf[5];
- } else {
- *ber = (buf[0] << 8) | buf[1];
- }
- priv->ber = *ber;
+static int tda10071_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ struct tda10071_dev *dev = fe->demodulator_priv;
- return ret;
-error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
+ *ber = dev->dvbv3_ber;
+ return 0;
}
static int tda10071_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
- struct tda10071_priv *priv = fe->demodulator_priv;
- int ret = 0;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
+ if (c->block_error.stat[0].scale == FE_SCALE_COUNTER)
+ *ucblocks = c->block_error.stat[0].uvalue;
+ else
*ucblocks = 0;
- goto error;
- }
-
- /* UCB is updated when BER is read. Assume BER is read anyway. */
-
- *ucblocks = priv->ucb;
-
- return ret;
-error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
+ return 0;
}
static int tda10071_set_frontend(struct dvb_frontend *fe)
{
- struct tda10071_priv *priv = fe->demodulator_priv;
+ struct tda10071_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
struct tda10071_cmd cmd;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i;
u8 mode, rolloff, pilot, inversion, div;
enum fe_modulation modulation;
- dev_dbg(&priv->i2c->dev,
- "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
- __func__, c->delivery_system, c->modulation,
- c->frequency, c->symbol_rate, c->inversion, c->pilot,
- c->rolloff);
+ dev_dbg(&client->dev,
+ "delivery_system=%d modulation=%d frequency=%u symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
+ c->delivery_system, c->modulation, c->frequency, c->symbol_rate,
+ c->inversion, c->pilot, c->rolloff);
- priv->delivery_system = SYS_UNDEFINED;
+ dev->delivery_system = SYS_UNDEFINED;
- if (!priv->warm) {
+ if (!dev->warm) {
ret = -EFAULT;
goto error;
}
@@ -696,7 +591,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
inversion = 3;
break;
default:
- dev_dbg(&priv->i2c->dev, "%s: invalid inversion\n", __func__);
+ dev_dbg(&client->dev, "invalid inversion\n");
ret = -EINVAL;
goto error;
}
@@ -722,8 +617,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
break;
case ROLLOFF_AUTO:
default:
- dev_dbg(&priv->i2c->dev, "%s: invalid rolloff\n",
- __func__);
+ dev_dbg(&client->dev, "invalid rolloff\n");
ret = -EINVAL;
goto error;
}
@@ -739,15 +633,13 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
pilot = 2;
break;
default:
- dev_dbg(&priv->i2c->dev, "%s: invalid pilot\n",
- __func__);
+ dev_dbg(&client->dev, "invalid pilot\n");
ret = -EINVAL;
goto error;
}
break;
default:
- dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n",
- __func__);
+ dev_dbg(&client->dev, "invalid delivery_system\n");
ret = -EINVAL;
goto error;
}
@@ -757,15 +649,13 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
modulation == TDA10071_MODCOD[i].modulation &&
c->fec_inner == TDA10071_MODCOD[i].fec) {
mode = TDA10071_MODCOD[i].val;
- dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n",
- __func__, mode);
+ dev_dbg(&client->dev, "mode found=%02x\n", mode);
break;
}
}
if (mode == 0xff) {
- dev_dbg(&priv->i2c->dev, "%s: invalid parameter combination\n",
- __func__);
+ dev_dbg(&client->dev, "invalid parameter combination\n");
ret = -EINVAL;
goto error;
}
@@ -775,11 +665,11 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
else
div = 4;
- ret = tda10071_wr_reg(priv, 0x81, div);
+ ret = regmap_write(dev->regmap, 0x81, div);
if (ret)
goto error;
- ret = tda10071_wr_reg(priv, 0xe3, div);
+ ret = regmap_write(dev->regmap, 0xe3, div);
if (ret)
goto error;
@@ -799,31 +689,32 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
cmd.args[13] = 0x00;
cmd.args[14] = 0x00;
cmd.len = 15;
- ret = tda10071_cmd_execute(priv, &cmd);
+ ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
- priv->delivery_system = c->delivery_system;
+ dev->delivery_system = c->delivery_system;
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_get_frontend(struct dvb_frontend *fe)
{
- struct tda10071_priv *priv = fe->demodulator_priv;
+ struct tda10071_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i;
u8 buf[5], tmp;
- if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
- ret = -EFAULT;
+ if (!dev->warm || !(dev->fe_status & FE_HAS_LOCK)) {
+ ret = 0;
goto error;
}
- ret = tda10071_rd_regs(priv, 0x30, buf, 5);
+ ret = regmap_bulk_read(dev->regmap, 0x30, buf, 5);
if (ret)
goto error;
@@ -856,7 +747,7 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
c->frequency = (buf[2] << 16) | (buf[3] << 8) | (buf[4] << 0);
- ret = tda10071_rd_regs(priv, 0x52, buf, 3);
+ ret = regmap_bulk_read(dev->regmap, 0x52, buf, 3);
if (ret)
goto error;
@@ -864,15 +755,18 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_init(struct dvb_frontend *fe)
{
- struct tda10071_priv *priv = fe->demodulator_priv;
+ struct tda10071_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct tda10071_cmd cmd;
int ret, i, len, remaining, fw_size;
+ unsigned int uitmp;
const struct firmware *fw;
u8 *fw_file = TDA10071_FIRMWARE;
u8 tmp, buf[4];
@@ -890,7 +784,7 @@ static int tda10071_init(struct dvb_frontend *fe)
};
struct tda10071_reg_val_mask tab2[] = {
{ 0xf1, 0x70, 0xff },
- { 0x88, priv->cfg.pll_multiplier, 0x3f },
+ { 0x88, dev->pll_multiplier, 0x3f },
{ 0x89, 0x00, 0x10 },
{ 0x89, 0x10, 0x10 },
{ 0xc0, 0x01, 0x01 },
@@ -934,11 +828,11 @@ static int tda10071_init(struct dvb_frontend *fe)
{ 0xd5, 0x03, 0x03 },
};
- if (priv->warm) {
+ if (dev->warm) {
/* warm state - wake up device from sleep */
for (i = 0; i < ARRAY_SIZE(tab); i++) {
- ret = tda10071_wr_reg_mask(priv, tab[i].reg,
+ ret = tda10071_wr_reg_mask(dev, tab[i].reg,
tab[i].val, tab[i].mask);
if (ret)
goto error;
@@ -948,78 +842,76 @@ static int tda10071_init(struct dvb_frontend *fe)
cmd.args[1] = 0;
cmd.args[2] = 0;
cmd.len = 3;
- ret = tda10071_cmd_execute(priv, &cmd);
+ ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
} else {
/* cold state - try to download firmware */
/* request the firmware, this will block and timeout */
- ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
+ ret = request_firmware(&fw, fw_file, &client->dev);
if (ret) {
- dev_err(&priv->i2c->dev,
- "%s: did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)\n",
- KBUILD_MODNAME, fw_file, ret);
+ dev_err(&client->dev,
+ "did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)\n",
+ fw_file, ret);
goto error;
}
/* init */
for (i = 0; i < ARRAY_SIZE(tab2); i++) {
- ret = tda10071_wr_reg_mask(priv, tab2[i].reg,
+ ret = tda10071_wr_reg_mask(dev, tab2[i].reg,
tab2[i].val, tab2[i].mask);
if (ret)
goto error_release_firmware;
}
/* download firmware */
- ret = tda10071_wr_reg(priv, 0xe0, 0x7f);
+ ret = regmap_write(dev->regmap, 0xe0, 0x7f);
if (ret)
goto error_release_firmware;
- ret = tda10071_wr_reg(priv, 0xf7, 0x81);
+ ret = regmap_write(dev->regmap, 0xf7, 0x81);
if (ret)
goto error_release_firmware;
- ret = tda10071_wr_reg(priv, 0xf8, 0x00);
+ ret = regmap_write(dev->regmap, 0xf8, 0x00);
if (ret)
goto error_release_firmware;
- ret = tda10071_wr_reg(priv, 0xf9, 0x00);
+ ret = regmap_write(dev->regmap, 0xf9, 0x00);
if (ret)
goto error_release_firmware;
- dev_info(&priv->i2c->dev,
- "%s: found a '%s' in cold state, will try to load a firmware\n",
- KBUILD_MODNAME, tda10071_ops.info.name);
- dev_info(&priv->i2c->dev,
- "%s: downloading firmware from file '%s'\n",
- KBUILD_MODNAME, fw_file);
+ dev_info(&client->dev,
+ "found a '%s' in cold state, will try to load a firmware\n",
+ tda10071_ops.info.name);
+ dev_info(&client->dev, "downloading firmware from file '%s'\n",
+ fw_file);
/* do not download last byte */
fw_size = fw->size - 1;
for (remaining = fw_size; remaining > 0;
- remaining -= (priv->cfg.i2c_wr_max - 1)) {
+ remaining -= (dev->i2c_wr_max - 1)) {
len = remaining;
- if (len > (priv->cfg.i2c_wr_max - 1))
- len = (priv->cfg.i2c_wr_max - 1);
+ if (len > (dev->i2c_wr_max - 1))
+ len = (dev->i2c_wr_max - 1);
- ret = tda10071_wr_regs(priv, 0xfa,
+ ret = regmap_bulk_write(dev->regmap, 0xfa,
(u8 *) &fw->data[fw_size - remaining], len);
if (ret) {
- dev_err(&priv->i2c->dev,
- "%s: firmware download failed=%d\n",
- KBUILD_MODNAME, ret);
+ dev_err(&client->dev,
+ "firmware download failed=%d\n", ret);
goto error_release_firmware;
}
}
release_firmware(fw);
- ret = tda10071_wr_reg(priv, 0xf7, 0x0c);
+ ret = regmap_write(dev->regmap, 0xf7, 0x0c);
if (ret)
goto error;
- ret = tda10071_wr_reg(priv, 0xe0, 0x00);
+ ret = regmap_write(dev->regmap, 0xe0, 0x00);
if (ret)
goto error;
@@ -1027,53 +919,52 @@ static int tda10071_init(struct dvb_frontend *fe)
msleep(250);
/* firmware status */
- ret = tda10071_rd_reg(priv, 0x51, &tmp);
+ ret = regmap_read(dev->regmap, 0x51, &uitmp);
if (ret)
goto error;
- if (tmp) {
- dev_info(&priv->i2c->dev, "%s: firmware did not run\n",
- KBUILD_MODNAME);
+ if (uitmp) {
+ dev_info(&client->dev, "firmware did not run\n");
ret = -EFAULT;
goto error;
} else {
- priv->warm = true;
+ dev->warm = true;
}
cmd.args[0] = CMD_GET_FW_VERSION;
cmd.len = 1;
- ret = tda10071_cmd_execute(priv, &cmd);
+ ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
- ret = tda10071_rd_regs(priv, cmd.len, buf, 4);
+ ret = regmap_bulk_read(dev->regmap, cmd.len, buf, 4);
if (ret)
goto error;
- dev_info(&priv->i2c->dev, "%s: firmware version %d.%d.%d.%d\n",
- KBUILD_MODNAME, buf[0], buf[1], buf[2], buf[3]);
- dev_info(&priv->i2c->dev, "%s: found a '%s' in warm state\n",
- KBUILD_MODNAME, tda10071_ops.info.name);
+ dev_info(&client->dev, "firmware version %d.%d.%d.%d\n",
+ buf[0], buf[1], buf[2], buf[3]);
+ dev_info(&client->dev, "found a '%s' in warm state\n",
+ tda10071_ops.info.name);
- ret = tda10071_rd_regs(priv, 0x81, buf, 2);
+ ret = regmap_bulk_read(dev->regmap, 0x81, buf, 2);
if (ret)
goto error;
cmd.args[0] = CMD_DEMOD_INIT;
- cmd.args[1] = ((priv->cfg.xtal / 1000) >> 8) & 0xff;
- cmd.args[2] = ((priv->cfg.xtal / 1000) >> 0) & 0xff;
+ cmd.args[1] = ((dev->clk / 1000) >> 8) & 0xff;
+ cmd.args[2] = ((dev->clk / 1000) >> 0) & 0xff;
cmd.args[3] = buf[0];
cmd.args[4] = buf[1];
- cmd.args[5] = priv->cfg.pll_multiplier;
- cmd.args[6] = priv->cfg.spec_inv;
+ cmd.args[5] = dev->pll_multiplier;
+ cmd.args[6] = dev->spec_inv;
cmd.args[7] = 0x00;
cmd.len = 8;
- ret = tda10071_cmd_execute(priv, &cmd);
+ ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
- if (priv->cfg.tuner_i2c_addr)
- tmp = priv->cfg.tuner_i2c_addr;
+ if (dev->tuner_i2c_addr)
+ tmp = dev->tuner_i2c_addr;
else
tmp = 0x14;
@@ -1093,22 +984,22 @@ static int tda10071_init(struct dvb_frontend *fe)
cmd.args[13] = 0x00;
cmd.args[14] = 0x00;
cmd.len = 15;
- ret = tda10071_cmd_execute(priv, &cmd);
+ ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
cmd.args[0] = CMD_MPEG_CONFIG;
cmd.args[1] = 0;
- cmd.args[2] = priv->cfg.ts_mode;
+ cmd.args[2] = dev->ts_mode;
cmd.args[3] = 0x00;
cmd.args[4] = 0x04;
cmd.args[5] = 0x00;
cmd.len = 6;
- ret = tda10071_cmd_execute(priv, &cmd);
+ ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
- ret = tda10071_wr_reg_mask(priv, 0xf0, 0x01, 0x01);
+ ret = regmap_update_bits(dev->regmap, 0xf0, 0x01, 0x01);
if (ret)
goto error;
@@ -1124,7 +1015,7 @@ static int tda10071_init(struct dvb_frontend *fe)
cmd.args[9] = 30;
cmd.args[10] = 30;
cmd.len = 11;
- ret = tda10071_cmd_execute(priv, &cmd);
+ ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
@@ -1133,22 +1024,33 @@ static int tda10071_init(struct dvb_frontend *fe)
cmd.args[2] = 14;
cmd.args[3] = 14;
cmd.len = 4;
- ret = tda10071_cmd_execute(priv, &cmd);
+ ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
}
+ /* init stats here in order signal app which stats are supported */
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_error.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_error.len = 1;
+ c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
return ret;
error_release_firmware:
release_firmware(fw);
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_sleep(struct dvb_frontend *fe)
{
- struct tda10071_priv *priv = fe->demodulator_priv;
+ struct tda10071_dev *dev = fe->demodulator_priv;
+ struct i2c_client *client = dev->client;
struct tda10071_cmd cmd;
int ret, i;
struct tda10071_reg_val_mask tab[] = {
@@ -1164,7 +1066,7 @@ static int tda10071_sleep(struct dvb_frontend *fe)
{ 0xce, 0x10, 0x10 },
};
- if (!priv->warm) {
+ if (!dev->warm) {
ret = -EFAULT;
goto error;
}
@@ -1173,12 +1075,12 @@ static int tda10071_sleep(struct dvb_frontend *fe)
cmd.args[1] = 0;
cmd.args[2] = 1;
cmd.len = 3;
- ret = tda10071_cmd_execute(priv, &cmd);
+ ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
for (i = 0; i < ARRAY_SIZE(tab); i++) {
- ret = tda10071_wr_reg_mask(priv, tab[i].reg, tab[i].val,
+ ret = tda10071_wr_reg_mask(dev, tab[i].reg, tab[i].val,
tab[i].mask);
if (ret)
goto error;
@@ -1186,7 +1088,7 @@ static int tda10071_sleep(struct dvb_frontend *fe)
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
@@ -1200,71 +1102,6 @@ static int tda10071_get_tune_settings(struct dvb_frontend *fe,
return 0;
}
-static void tda10071_release(struct dvb_frontend *fe)
-{
- struct tda10071_priv *priv = fe->demodulator_priv;
- kfree(priv);
-}
-
-struct dvb_frontend *tda10071_attach(const struct tda10071_config *config,
- struct i2c_adapter *i2c)
-{
- int ret;
- struct tda10071_priv *priv = NULL;
- u8 tmp;
-
- /* allocate memory for the internal priv */
- priv = kzalloc(sizeof(struct tda10071_priv), GFP_KERNEL);
- if (priv == NULL) {
- ret = -ENOMEM;
- goto error;
- }
-
- /* make sure demod i2c address is specified */
- if (!config->demod_i2c_addr) {
- dev_dbg(&i2c->dev, "%s: invalid demod i2c address\n", __func__);
- ret = -EINVAL;
- goto error;
- }
-
- /* make sure tuner i2c address is specified */
- if (!config->tuner_i2c_addr) {
- dev_dbg(&i2c->dev, "%s: invalid tuner i2c address\n", __func__);
- ret = -EINVAL;
- goto error;
- }
-
- /* setup the priv */
- priv->i2c = i2c;
- memcpy(&priv->cfg, config, sizeof(struct tda10071_config));
-
- /* chip ID */
- ret = tda10071_rd_reg(priv, 0xff, &tmp);
- if (ret || tmp != 0x0f)
- goto error;
-
- /* chip type */
- ret = tda10071_rd_reg(priv, 0xdd, &tmp);
- if (ret || tmp != 0x00)
- goto error;
-
- /* chip version */
- ret = tda10071_rd_reg(priv, 0xfe, &tmp);
- if (ret || tmp != 0x01)
- goto error;
-
- /* create dvb_frontend */
- memcpy(&priv->fe.ops, &tda10071_ops, sizeof(struct dvb_frontend_ops));
- priv->fe.demodulator_priv = priv;
-
- return &priv->fe;
-error:
- dev_dbg(&i2c->dev, "%s: failed=%d\n", __func__, ret);
- kfree(priv);
- return NULL;
-}
-EXPORT_SYMBOL(tda10071_attach);
-
static struct dvb_frontend_ops tda10071_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
@@ -1289,8 +1126,6 @@ static struct dvb_frontend_ops tda10071_ops = {
FE_CAN_2G_MODULATION
},
- .release = tda10071_release,
-
.get_tune_settings = tda10071_get_tune_settings,
.init = tda10071_init,
@@ -1315,7 +1150,7 @@ static struct dvb_frontend_ops tda10071_ops = {
static struct dvb_frontend *tda10071_get_dvb_frontend(struct i2c_client *client)
{
- struct tda10071_priv *dev = i2c_get_clientdata(client);
+ struct tda10071_dev *dev = i2c_get_clientdata(client);
dev_dbg(&client->dev, "\n");
@@ -1325,10 +1160,14 @@ static struct dvb_frontend *tda10071_get_dvb_frontend(struct i2c_client *client)
static int tda10071_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct tda10071_priv *dev;
+ struct tda10071_dev *dev;
struct tda10071_platform_data *pdata = client->dev.platform_data;
int ret;
- u8 u8tmp;
+ unsigned int uitmp;
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ };
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
@@ -1337,45 +1176,48 @@ static int tda10071_probe(struct i2c_client *client,
}
dev->client = client;
- dev->i2c = client->adapter;
- dev->cfg.demod_i2c_addr = client->addr;
- dev->cfg.i2c_wr_max = pdata->i2c_wr_max;
- dev->cfg.ts_mode = pdata->ts_mode;
- dev->cfg.spec_inv = pdata->spec_inv;
- dev->cfg.xtal = pdata->clk;
- dev->cfg.pll_multiplier = pdata->pll_multiplier;
- dev->cfg.tuner_i2c_addr = pdata->tuner_i2c_addr;
+ mutex_init(&dev->cmd_execute_mutex);
+ dev->clk = pdata->clk;
+ dev->i2c_wr_max = pdata->i2c_wr_max;
+ dev->ts_mode = pdata->ts_mode;
+ dev->spec_inv = pdata->spec_inv;
+ dev->pll_multiplier = pdata->pll_multiplier;
+ dev->tuner_i2c_addr = pdata->tuner_i2c_addr;
+ dev->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(dev->regmap)) {
+ ret = PTR_ERR(dev->regmap);
+ goto err_kfree;
+ }
/* chip ID */
- ret = tda10071_rd_reg(dev, 0xff, &u8tmp);
+ ret = regmap_read(dev->regmap, 0xff, &uitmp);
if (ret)
goto err_kfree;
- if (u8tmp != 0x0f) {
+ if (uitmp != 0x0f) {
ret = -ENODEV;
goto err_kfree;
}
/* chip type */
- ret = tda10071_rd_reg(dev, 0xdd, &u8tmp);
+ ret = regmap_read(dev->regmap, 0xdd, &uitmp);
if (ret)
goto err_kfree;
- if (u8tmp != 0x00) {
+ if (uitmp != 0x00) {
ret = -ENODEV;
goto err_kfree;
}
/* chip version */
- ret = tda10071_rd_reg(dev, 0xfe, &u8tmp);
+ ret = regmap_read(dev->regmap, 0xfe, &uitmp);
if (ret)
goto err_kfree;
- if (u8tmp != 0x01) {
+ if (uitmp != 0x01) {
ret = -ENODEV;
goto err_kfree;
}
/* create dvb_frontend */
memcpy(&dev->fe.ops, &tda10071_ops, sizeof(struct dvb_frontend_ops));
- dev->fe.ops.release = NULL;
dev->fe.demodulator_priv = dev;
i2c_set_clientdata(client, dev);
@@ -1409,7 +1251,6 @@ MODULE_DEVICE_TABLE(i2c, tda10071_id_table);
static struct i2c_driver tda10071_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "tda10071",
.suppress_bind_attrs = true,
},
diff --git a/drivers/media/dvb-frontends/tda10071.h b/drivers/media/dvb-frontends/tda10071.h
index 0ffbfa5b2dfb..8f184026ee11 100644
--- a/drivers/media/dvb-frontends/tda10071.h
+++ b/drivers/media/dvb-frontends/tda10071.h
@@ -21,12 +21,11 @@
#ifndef TDA10071_H
#define TDA10071_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
/*
* I2C address
- * 0x55,
+ * 0x05, 0x55,
*/
/**
@@ -53,64 +52,4 @@ struct tda10071_platform_data {
struct dvb_frontend* (*get_dvb_frontend)(struct i2c_client *);
};
-struct tda10071_config {
- /* Demodulator I2C address.
- * Default: none, must set
- * Values: 0x55,
- */
- u8 demod_i2c_addr;
-
- /* Tuner I2C address.
- * Default: none, must set
- * Values: 0x14, 0x54, ...
- */
- u8 tuner_i2c_addr;
-
- /* Max bytes I2C provider can write at once.
- * Note: Buffer is taken from the stack currently!
- * Default: none, must set
- * Values:
- */
- u16 i2c_wr_max;
-
- /* TS output mode.
- * Default: TDA10071_TS_SERIAL
- * Values:
- */
-#define TDA10071_TS_SERIAL 0
-#define TDA10071_TS_PARALLEL 1
- u8 ts_mode;
-
- /* Input spectrum inversion.
- * Default: 0
- * Values: 0, 1
- */
- bool spec_inv;
-
- /* Xtal frequency Hz
- * Default: none, must set
- * Values:
- */
- u32 xtal;
-
- /* PLL multiplier.
- * Default: none, must set
- * Values:
- */
- u8 pll_multiplier;
-};
-
-
-#if IS_REACHABLE(CONFIG_DVB_TDA10071)
-extern struct dvb_frontend *tda10071_attach(
- const struct tda10071_config *config, struct i2c_adapter *i2c);
-#else
-static inline struct dvb_frontend *tda10071_attach(
- const struct tda10071_config *config, struct i2c_adapter *i2c)
-{
- dev_warn(&i2c->dev, "%s: driver disabled by Kconfig\n", __func__);
- return NULL;
-}
-#endif
-
#endif /* TDA10071_H */
diff --git a/drivers/media/dvb-frontends/tda10071_priv.h b/drivers/media/dvb-frontends/tda10071_priv.h
index 54d7c713eec8..b9c3601802ba 100644
--- a/drivers/media/dvb-frontends/tda10071_priv.h
+++ b/drivers/media/dvb-frontends/tda10071_priv.h
@@ -24,19 +24,27 @@
#include "dvb_frontend.h"
#include "tda10071.h"
#include <linux/firmware.h>
+#include <linux/regmap.h>
-struct tda10071_priv {
- struct i2c_adapter *i2c;
+struct tda10071_dev {
struct dvb_frontend fe;
struct i2c_client *client;
- struct tda10071_config cfg;
+ struct regmap *regmap;
+ struct mutex cmd_execute_mutex;
+ u32 clk;
+ u16 i2c_wr_max;
+ u8 ts_mode;
+ bool spec_inv;
+ u8 pll_multiplier;
+ u8 tuner_i2c_addr;
- u8 meas_count[2];
- u32 ber;
- u32 ucb;
+ u8 meas_count;
+ u32 dvbv3_ber;
enum fe_status fe_status;
enum fe_delivery_system delivery_system;
bool warm; /* FW running */
+ u64 post_bit_error;
+ u64 block_error;
};
static struct tda10071_modcod {
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index f61b143a0052..7979e5d6498b 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -726,7 +726,6 @@ MODULE_DEVICE_TABLE(i2c, ts2020_id_table);
static struct i2c_driver ts2020_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "ts2020",
},
.probe = ts2020_probe,
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 71ee8f586430..521bbf1b29bc 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -22,7 +22,7 @@ config VIDEO_IR_I2C
#
menu "Encoders, decoders, sensors and other helper chips"
- visible if !MEDIA_SUBDRV_AUTOSELECT
+ visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST
comment "Audio decoders, processors and mixers"
@@ -196,7 +196,8 @@ config VIDEO_ADV7183
config VIDEO_ADV7604
tristate "Analog Devices ADV7604 decoder"
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && GPIOLIB
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on GPIOLIB || COMPILE_TEST
select HDMI
---help---
Support for the Analog Devices ADV7604 video decoder.
@@ -286,6 +287,16 @@ config VIDEO_SAA711X
To compile this driver as a module, choose M here: the
module will be called saa7115.
+config VIDEO_TC358743
+ tristate "Toshiba TC358743 decoder"
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ select HDMI
+ ---help---
+ Support for the Toshiba TC358743 HDMI to MIPI CSI-2 bridge.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tc358743.
+
config VIDEO_TVP514X
tristate "Texas Instruments TVP514x video decoder"
depends on VIDEO_V4L2 && I2C
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index f165faea5b3f..07db257abfc1 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -78,3 +78,4 @@ obj-$(CONFIG_VIDEO_AK881X) += ak881x.o
obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o
obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
+obj-$(CONFIG_VIDEO_TC358743) += tc358743.o
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index c70ababce954..5dd39775d6ca 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -465,7 +465,7 @@ static int adp1653_of_init(struct i2c_client *client,
of_node_put(child);
- pd->enable_gpio = devm_gpiod_get(&client->dev, "enable");
+ pd->enable_gpio = devm_gpiod_get(&client->dev, "enable", GPIOD_OUT_LOW);
if (!pd->enable_gpio) {
dev_err(&client->dev, "Error getting GPIO\n");
return -EINVAL;
diff --git a/drivers/media/i2c/adv7170.c b/drivers/media/i2c/adv7170.c
index f0d3f5a2da46..05f1dc6c72af 100644
--- a/drivers/media/i2c/adv7170.c
+++ b/drivers/media/i2c/adv7170.c
@@ -401,7 +401,6 @@ MODULE_DEVICE_TABLE(i2c, adv7170_id);
static struct i2c_driver adv7170_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "adv7170",
},
.probe = adv7170_probe,
diff --git a/drivers/media/i2c/adv7175.c b/drivers/media/i2c/adv7175.c
index 321834ba8f57..f554809a51e7 100644
--- a/drivers/media/i2c/adv7175.c
+++ b/drivers/media/i2c/adv7175.c
@@ -455,7 +455,6 @@ MODULE_DEVICE_TABLE(i2c, adv7175_id);
static struct i2c_driver adv7175_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "adv7175",
},
.probe = adv7175_probe,
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index a493c0b0b5fe..f82c8aa164fa 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -25,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <media/v4l2-ioctl.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
@@ -1324,11 +1325,20 @@ static SIMPLE_DEV_PM_OPS(adv7180_pm_ops, adv7180_suspend, adv7180_resume);
#define ADV7180_PM_OPS NULL
#endif
+#ifdef CONFIG_OF
+static const struct of_device_id adv7180_of_id[] = {
+ { .compatible = "adi,adv7180", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, adv7180_of_id);
+#endif
+
static struct i2c_driver adv7180_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = KBUILD_MODNAME,
.pm = ADV7180_PM_OPS,
+ .of_match_table = of_match_ptr(adv7180_of_id),
},
.probe = adv7180_probe,
.remove = adv7180_remove,
diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c
index 7c50833e7d17..f89d0afcd964 100644
--- a/drivers/media/i2c/adv7343.c
+++ b/drivers/media/i2c/adv7343.c
@@ -319,13 +319,6 @@ static const struct v4l2_ctrl_ops adv7343_ctrl_ops = {
static const struct v4l2_subdev_core_ops adv7343_core_ops = {
.log_status = adv7343_log_status,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
};
static int adv7343_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
@@ -529,7 +522,6 @@ MODULE_DEVICE_TABLE(of, adv7343_of_match);
static struct i2c_driver adv7343_driver = {
.driver = {
.of_match_table = of_match_ptr(adv7343_of_match),
- .owner = THIS_MODULE,
.name = "adv7343",
},
.probe = adv7343_probe,
diff --git a/drivers/media/i2c/adv7393.c b/drivers/media/i2c/adv7393.c
index 558f19154eb9..0215f95c2245 100644
--- a/drivers/media/i2c/adv7393.c
+++ b/drivers/media/i2c/adv7393.c
@@ -306,13 +306,6 @@ static const struct v4l2_ctrl_ops adv7393_ctrl_ops = {
static const struct v4l2_subdev_core_ops adv7393_core_ops = {
.log_status = adv7393_log_status,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
};
static int adv7393_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 95bcd4026451..e4900df1140b 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -40,7 +40,7 @@ MODULE_PARM_DESC(debug, "debug level (0-2)");
MODULE_DESCRIPTION("Analog Devices ADV7511 HDMI Transmitter Device Driver");
MODULE_AUTHOR("Hans Verkuil");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
#define MASK_ADV7511_EDID_RDY_INT 0x04
#define MASK_ADV7511_MSEN_INT 0x40
@@ -1576,7 +1576,6 @@ MODULE_DEVICE_TABLE(i2c, adv7511_id);
static struct i2c_driver adv7511_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "adv7511",
},
.probe = adv7511_probe,
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 808360fd6539..5631ec004eed 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -37,10 +37,12 @@
#include <linux/v4l2-dv-timings.h>
#include <linux/videodev2.h>
#include <linux/workqueue.h>
+#include <linux/regmap.h>
#include <media/adv7604.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-dv-timings.h>
#include <media/v4l2-of.h>
@@ -81,6 +83,7 @@ MODULE_LICENSE("GPL");
enum adv76xx_type {
ADV7604,
ADV7611,
+ ADV7612,
};
struct adv76xx_reg_seq {
@@ -188,6 +191,9 @@ struct adv76xx_state {
/* i2c clients */
struct i2c_client *i2c_clients[ADV76XX_PAGE_MAX];
+ /* Regmaps */
+ struct regmap *regmap[ADV76XX_PAGE_MAX];
+
/* controls */
struct v4l2_ctrl *detect_tx_5v_ctrl;
struct v4l2_ctrl *analog_sampling_phase_ctrl;
@@ -373,66 +379,39 @@ static inline unsigned vtotal(const struct v4l2_bt_timings *t)
/* ----------------------------------------------------------------------- */
-static s32 adv_smbus_read_byte_data_check(struct i2c_client *client,
- u8 command, bool check)
-{
- union i2c_smbus_data data;
-
- if (!i2c_smbus_xfer(client->adapter, client->addr, client->flags,
- I2C_SMBUS_READ, command,
- I2C_SMBUS_BYTE_DATA, &data))
- return data.byte;
- if (check)
- v4l_err(client, "error reading %02x, %02x\n",
- client->addr, command);
- return -EIO;
-}
-
-static s32 adv_smbus_read_byte_data(struct adv76xx_state *state,
- enum adv76xx_page page, u8 command)
+static int adv76xx_read_check(struct adv76xx_state *state,
+ int client_page, u8 reg)
{
- return adv_smbus_read_byte_data_check(state->i2c_clients[page],
- command, true);
-}
-
-static s32 adv_smbus_write_byte_data(struct adv76xx_state *state,
- enum adv76xx_page page, u8 command,
- u8 value)
-{
- struct i2c_client *client = state->i2c_clients[page];
- union i2c_smbus_data data;
+ struct i2c_client *client = state->i2c_clients[client_page];
int err;
- int i;
+ unsigned int val;
- data.byte = value;
- for (i = 0; i < 3; i++) {
- err = i2c_smbus_xfer(client->adapter, client->addr,
- client->flags,
- I2C_SMBUS_WRITE, command,
- I2C_SMBUS_BYTE_DATA, &data);
- if (!err)
- break;
+ err = regmap_read(state->regmap[client_page], reg, &val);
+
+ if (err) {
+ v4l_err(client, "error reading %02x, %02x\n",
+ client->addr, reg);
+ return err;
}
- if (err < 0)
- v4l_err(client, "error writing %02x, %02x, %02x\n",
- client->addr, command, value);
- return err;
+ return val;
}
-static s32 adv_smbus_write_i2c_block_data(struct adv76xx_state *state,
- enum adv76xx_page page, u8 command,
- unsigned length, const u8 *values)
+/* adv76xx_write_block(): Write raw data with a maximum of I2C_SMBUS_BLOCK_MAX
+ * size to one or more registers.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+static int adv76xx_write_block(struct adv76xx_state *state, int client_page,
+ unsigned int init_reg, const void *val,
+ size_t val_len)
{
- struct i2c_client *client = state->i2c_clients[page];
- union i2c_smbus_data data;
+ struct regmap *regmap = state->regmap[client_page];
+
+ if (val_len > I2C_SMBUS_BLOCK_MAX)
+ val_len = I2C_SMBUS_BLOCK_MAX;
- if (length > I2C_SMBUS_BLOCK_MAX)
- length = I2C_SMBUS_BLOCK_MAX;
- data.block[0] = length;
- memcpy(data.block + 1, values, length);
- return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
- I2C_SMBUS_WRITE, command,
- I2C_SMBUS_I2C_BLOCK_DATA, &data);
+ return regmap_raw_write(regmap, init_reg, val, val_len);
}
/* ----------------------------------------------------------------------- */
@@ -441,14 +420,14 @@ static inline int io_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_read_byte_data(state, ADV76XX_PAGE_IO, reg);
+ return adv76xx_read_check(state, ADV76XX_PAGE_IO, reg);
}
static inline int io_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_write_byte_data(state, ADV76XX_PAGE_IO, reg, val);
+ return regmap_write(state->regmap[ADV76XX_PAGE_IO], reg, val);
}
static inline int io_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
@@ -460,71 +439,70 @@ static inline int avlink_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_read_byte_data(state, ADV7604_PAGE_AVLINK, reg);
+ return adv76xx_read_check(state, ADV7604_PAGE_AVLINK, reg);
}
static inline int avlink_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_write_byte_data(state, ADV7604_PAGE_AVLINK, reg, val);
+ return regmap_write(state->regmap[ADV7604_PAGE_AVLINK], reg, val);
}
static inline int cec_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_read_byte_data(state, ADV76XX_PAGE_CEC, reg);
+ return adv76xx_read_check(state, ADV76XX_PAGE_CEC, reg);
}
static inline int cec_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_write_byte_data(state, ADV76XX_PAGE_CEC, reg, val);
+ return regmap_write(state->regmap[ADV76XX_PAGE_CEC], reg, val);
}
static inline int infoframe_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_read_byte_data(state, ADV76XX_PAGE_INFOFRAME, reg);
+ return adv76xx_read_check(state, ADV76XX_PAGE_INFOFRAME, reg);
}
static inline int infoframe_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_write_byte_data(state, ADV76XX_PAGE_INFOFRAME,
- reg, val);
+ return regmap_write(state->regmap[ADV76XX_PAGE_INFOFRAME], reg, val);
}
static inline int afe_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_read_byte_data(state, ADV76XX_PAGE_AFE, reg);
+ return adv76xx_read_check(state, ADV76XX_PAGE_AFE, reg);
}
static inline int afe_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_write_byte_data(state, ADV76XX_PAGE_AFE, reg, val);
+ return regmap_write(state->regmap[ADV76XX_PAGE_AFE], reg, val);
}
static inline int rep_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_read_byte_data(state, ADV76XX_PAGE_REP, reg);
+ return adv76xx_read_check(state, ADV76XX_PAGE_REP, reg);
}
static inline int rep_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_write_byte_data(state, ADV76XX_PAGE_REP, reg, val);
+ return regmap_write(state->regmap[ADV76XX_PAGE_REP], reg, val);
}
static inline int rep_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
@@ -536,28 +514,37 @@ static inline int edid_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_read_byte_data(state, ADV76XX_PAGE_EDID, reg);
+ return adv76xx_read_check(state, ADV76XX_PAGE_EDID, reg);
}
static inline int edid_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_write_byte_data(state, ADV76XX_PAGE_EDID, reg, val);
+ return regmap_write(state->regmap[ADV76XX_PAGE_EDID], reg, val);
}
static inline int edid_write_block(struct v4l2_subdev *sd,
- unsigned len, const u8 *val)
+ unsigned int total_len, const u8 *val)
{
struct adv76xx_state *state = to_state(sd);
int err = 0;
- int i;
+ int i = 0;
+ int len = 0;
+
+ v4l2_dbg(2, debug, sd, "%s: write EDID block (%d byte)\n",
+ __func__, total_len);
- v4l2_dbg(2, debug, sd, "%s: write EDID block (%d byte)\n", __func__, len);
+ while (!err && i < total_len) {
+ len = (total_len - i) > I2C_SMBUS_BLOCK_MAX ?
+ I2C_SMBUS_BLOCK_MAX :
+ (total_len - i);
+
+ err = adv76xx_write_block(state, ADV76XX_PAGE_EDID,
+ i, val + i, len);
+ i += len;
+ }
- for (i = 0; !err && i < len; i += I2C_SMBUS_BLOCK_MAX)
- err = adv_smbus_write_i2c_block_data(state, ADV76XX_PAGE_EDID,
- i, I2C_SMBUS_BLOCK_MAX, val + i);
return err;
}
@@ -587,7 +574,7 @@ static inline int hdmi_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_read_byte_data(state, ADV76XX_PAGE_HDMI, reg);
+ return adv76xx_read_check(state, ADV76XX_PAGE_HDMI, reg);
}
static u16 hdmi_read16(struct v4l2_subdev *sd, u8 reg, u16 mask)
@@ -599,7 +586,7 @@ static inline int hdmi_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_write_byte_data(state, ADV76XX_PAGE_HDMI, reg, val);
+ return regmap_write(state->regmap[ADV76XX_PAGE_HDMI], reg, val);
}
static inline int hdmi_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
@@ -611,14 +598,14 @@ static inline int test_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_write_byte_data(state, ADV76XX_PAGE_TEST, reg, val);
+ return regmap_write(state->regmap[ADV76XX_PAGE_TEST], reg, val);
}
static inline int cp_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_read_byte_data(state, ADV76XX_PAGE_CP, reg);
+ return adv76xx_read_check(state, ADV76XX_PAGE_CP, reg);
}
static u16 cp_read16(struct v4l2_subdev *sd, u8 reg, u16 mask)
@@ -630,7 +617,7 @@ static inline int cp_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_write_byte_data(state, ADV76XX_PAGE_CP, reg, val);
+ return regmap_write(state->regmap[ADV76XX_PAGE_CP], reg, val);
}
static inline int cp_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
@@ -642,14 +629,14 @@ static inline int vdp_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_read_byte_data(state, ADV7604_PAGE_VDP, reg);
+ return adv76xx_read_check(state, ADV7604_PAGE_VDP, reg);
}
static inline int vdp_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
struct adv76xx_state *state = to_state(sd);
- return adv_smbus_write_byte_data(state, ADV7604_PAGE_VDP, reg, val);
+ return regmap_write(state->regmap[ADV7604_PAGE_VDP], reg, val);
}
#define ADV76XX_REG(page, offset) (((page) << 8) | (offset))
@@ -660,13 +647,16 @@ static int adv76xx_read_reg(struct v4l2_subdev *sd, unsigned int reg)
{
struct adv76xx_state *state = to_state(sd);
unsigned int page = reg >> 8;
+ unsigned int val;
+ int err;
if (!(BIT(page) & state->info->page_mask))
return -EINVAL;
reg &= 0xff;
+ err = regmap_read(state->regmap[page], reg, &val);
- return adv_smbus_read_byte_data(state, page, reg);
+ return err ? err : val;
}
#endif
@@ -680,7 +670,7 @@ static int adv76xx_write_reg(struct v4l2_subdev *sd, unsigned int reg, u8 val)
reg &= 0xff;
- return adv_smbus_write_byte_data(state, page, reg, val);
+ return regmap_write(state->regmap[page], reg, val);
}
static void adv76xx_write_reg_seq(struct v4l2_subdev *sd,
@@ -766,6 +756,23 @@ static const struct adv76xx_format_info adv7611_formats[] = {
ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_12BIT },
};
+static const struct adv76xx_format_info adv7612_formats[] = {
+ { MEDIA_BUS_FMT_RGB888_1X24, ADV76XX_OP_CH_SEL_RGB, true, false,
+ ADV76XX_OP_MODE_SEL_SDR_444 | ADV76XX_OP_FORMAT_SEL_8BIT },
+ { MEDIA_BUS_FMT_YUYV8_2X8, ADV76XX_OP_CH_SEL_RGB, false, false,
+ ADV76XX_OP_MODE_SEL_SDR_422 | ADV76XX_OP_FORMAT_SEL_8BIT },
+ { MEDIA_BUS_FMT_YVYU8_2X8, ADV76XX_OP_CH_SEL_RGB, false, true,
+ ADV76XX_OP_MODE_SEL_SDR_422 | ADV76XX_OP_FORMAT_SEL_8BIT },
+ { MEDIA_BUS_FMT_UYVY8_1X16, ADV76XX_OP_CH_SEL_RBG, false, false,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_8BIT },
+ { MEDIA_BUS_FMT_VYUY8_1X16, ADV76XX_OP_CH_SEL_RBG, false, true,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_8BIT },
+ { MEDIA_BUS_FMT_YUYV8_1X16, ADV76XX_OP_CH_SEL_RGB, false, false,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_8BIT },
+ { MEDIA_BUS_FMT_YVYU8_1X16, ADV76XX_OP_CH_SEL_RGB, false, true,
+ ADV76XX_OP_MODE_SEL_SDR_422_2X | ADV76XX_OP_FORMAT_SEL_8BIT },
+};
+
static const struct adv76xx_format_info *
adv76xx_format_info(struct adv76xx_state *state, u32 code)
{
@@ -870,6 +877,16 @@ static unsigned int adv7611_read_cable_det(struct v4l2_subdev *sd)
return value & 1;
}
+static unsigned int adv7612_read_cable_det(struct v4l2_subdev *sd)
+{
+ /* Reads CABLE_DET_A_RAW. For input B support, need to
+ * account for bit 7 [MSB] of 0x6a (ie. CABLE_DET_B_RAW)
+ */
+ u8 value = io_read(sd, 0x6f);
+
+ return value & 1;
+}
+
static int adv76xx_s_detect_tx_5v_ctrl(struct v4l2_subdev *sd)
{
struct adv76xx_state *state = to_state(sd);
@@ -976,8 +993,8 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd,
/* Should only be set in auto-graphics mode [REF_02, p. 91-92] */
/* setup PLL_DIV_MAN_EN and PLL_DIV_RATIO */
/* IO-map reg. 0x16 and 0x17 should be written in sequence */
- if (adv_smbus_write_i2c_block_data(state, ADV76XX_PAGE_IO,
- 0x16, 2, pll))
+ if (regmap_raw_write(state->regmap[ADV76XX_PAGE_IO],
+ 0x16, pll, 2))
v4l2_err(sd, "writing to reg 0x16 and 0x17 failed\n");
/* active video - horizontal timing */
@@ -1028,8 +1045,8 @@ static void adv76xx_set_offset(struct v4l2_subdev *sd, bool auto_offset, u16 off
offset_buf[3] = offset_c & 0x0ff;
/* Registers must be written in this order with no i2c access in between */
- if (adv_smbus_write_i2c_block_data(state, ADV76XX_PAGE_CP,
- 0x77, 4, offset_buf))
+ if (regmap_raw_write(state->regmap[ADV76XX_PAGE_CP],
+ 0x77, offset_buf, 4))
v4l2_err(sd, "%s: i2c error writing to CP reg 0x77, 0x78, 0x79, 0x7a\n", __func__);
}
@@ -1058,8 +1075,8 @@ static void adv76xx_set_gain(struct v4l2_subdev *sd, bool auto_gain, u16 gain_a,
gain_buf[3] = ((gain_c & 0x0ff));
/* Registers must be written in this order with no i2c access in between */
- if (adv_smbus_write_i2c_block_data(state, ADV76XX_PAGE_CP,
- 0x73, 4, gain_buf))
+ if (regmap_raw_write(state->regmap[ADV76XX_PAGE_CP],
+ 0x73, gain_buf, 4))
v4l2_err(sd, "%s: i2c error writing to CP reg 0x73, 0x74, 0x75, 0x76\n", __func__);
}
@@ -1328,7 +1345,7 @@ static int stdi2dv_timings(struct v4l2_subdev *sd,
}
}
- if (v4l2_detect_cvt(stdi->lcf + 1, hfreq, stdi->lcvs,
+ if (v4l2_detect_cvt(stdi->lcf + 1, hfreq, stdi->lcvs, 0,
(stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) |
(stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0),
false, timings))
@@ -1760,8 +1777,8 @@ static int adv76xx_s_routing(struct v4l2_subdev *sd,
select_input(sd);
enable_input(sd);
- v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT,
- (void *)&adv76xx_ev_fmt);
+ v4l2_subdev_notify_event(sd, &adv76xx_ev_fmt);
+
return 0;
}
@@ -1928,8 +1945,7 @@ static int adv76xx_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
"%s: fmt_change = 0x%x, fmt_change_digital = 0x%x\n",
__func__, fmt_change, fmt_change_digital);
- v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT,
- (void *)&adv76xx_ev_fmt);
+ v4l2_subdev_notify_event(sd, &adv76xx_ev_fmt);
if (handled)
*handled = true;
@@ -2347,6 +2363,20 @@ static int adv76xx_log_status(struct v4l2_subdev *sd)
return 0;
}
+static int adv76xx_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subdev_subscribe(sd, fh, sub);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subdev_subscribe_event(sd, fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
/* ----------------------------------------------------------------------- */
static const struct v4l2_ctrl_ops adv76xx_ctrl_ops = {
@@ -2356,6 +2386,8 @@ static const struct v4l2_ctrl_ops adv76xx_ctrl_ops = {
static const struct v4l2_subdev_core_ops adv76xx_core_ops = {
.log_status = adv76xx_log_status,
.interrupt_service_routine = adv76xx_isr,
+ .subscribe_event = adv76xx_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = adv76xx_g_register,
.s_register = adv76xx_s_register,
@@ -2510,6 +2542,11 @@ static void adv7611_setup_irqs(struct v4l2_subdev *sd)
io_write(sd, 0x41, 0xd0); /* STDI irq for any change, disable INT2 */
}
+static void adv7612_setup_irqs(struct v4l2_subdev *sd)
+{
+ io_write(sd, 0x41, 0xd0); /* disable INT2 */
+}
+
static void adv76xx_unregister_clients(struct adv76xx_state *state)
{
unsigned int i;
@@ -2597,6 +2634,19 @@ static const struct adv76xx_reg_seq adv7611_recommended_settings_hdmi[] = {
{ ADV76XX_REG_SEQ_TERM, 0 },
};
+static const struct adv76xx_reg_seq adv7612_recommended_settings_hdmi[] = {
+ { ADV76XX_REG(ADV76XX_PAGE_CP, 0x6c), 0x00 },
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x9b), 0x03 },
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x6f), 0x08 },
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x85), 0x1f },
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x87), 0x70 },
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x57), 0xda },
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x58), 0x01 },
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x03), 0x98 },
+ { ADV76XX_REG(ADV76XX_PAGE_HDMI, 0x4c), 0x44 },
+ { ADV76XX_REG_SEQ_TERM, 0 },
+};
+
static const struct adv76xx_chip_info adv76xx_chip_info[] = {
[ADV7604] = {
.type = ADV7604,
@@ -2685,17 +2735,60 @@ static const struct adv76xx_chip_info adv76xx_chip_info[] = {
.field1_vsync_mask = 0x3fff,
.field1_vbackporch_mask = 0x3fff,
},
+ [ADV7612] = {
+ .type = ADV7612,
+ .has_afe = false,
+ .max_port = ADV76XX_PAD_HDMI_PORT_A, /* B not supported */
+ .num_dv_ports = 1, /* normally 2 */
+ .edid_enable_reg = 0x74,
+ .edid_status_reg = 0x76,
+ .lcf_reg = 0xa3,
+ .tdms_lock_mask = 0x43,
+ .cable_det_mask = 0x01,
+ .fmt_change_digital_mask = 0x03,
+ .cp_csc = 0xf4,
+ .formats = adv7612_formats,
+ .nformats = ARRAY_SIZE(adv7612_formats),
+ .set_termination = adv7611_set_termination,
+ .setup_irqs = adv7612_setup_irqs,
+ .read_hdmi_pixelclock = adv7611_read_hdmi_pixelclock,
+ .read_cable_det = adv7612_read_cable_det,
+ .recommended_settings = {
+ [1] = adv7612_recommended_settings_hdmi,
+ },
+ .num_recommended_settings = {
+ [1] = ARRAY_SIZE(adv7612_recommended_settings_hdmi),
+ },
+ .page_mask = BIT(ADV76XX_PAGE_IO) | BIT(ADV76XX_PAGE_CEC) |
+ BIT(ADV76XX_PAGE_INFOFRAME) | BIT(ADV76XX_PAGE_AFE) |
+ BIT(ADV76XX_PAGE_REP) | BIT(ADV76XX_PAGE_EDID) |
+ BIT(ADV76XX_PAGE_HDMI) | BIT(ADV76XX_PAGE_CP),
+ .linewidth_mask = 0x1fff,
+ .field0_height_mask = 0x1fff,
+ .field1_height_mask = 0x1fff,
+ .hfrontporch_mask = 0x1fff,
+ .hsync_mask = 0x1fff,
+ .hbackporch_mask = 0x1fff,
+ .field0_vfrontporch_mask = 0x3fff,
+ .field0_vsync_mask = 0x3fff,
+ .field0_vbackporch_mask = 0x3fff,
+ .field1_vfrontporch_mask = 0x3fff,
+ .field1_vsync_mask = 0x3fff,
+ .field1_vbackporch_mask = 0x3fff,
+ },
};
static const struct i2c_device_id adv76xx_i2c_id[] = {
{ "adv7604", (kernel_ulong_t)&adv76xx_chip_info[ADV7604] },
{ "adv7611", (kernel_ulong_t)&adv76xx_chip_info[ADV7611] },
+ { "adv7612", (kernel_ulong_t)&adv76xx_chip_info[ADV7612] },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv76xx_i2c_id);
static const struct of_device_id adv76xx_of_id[] __maybe_unused = {
{ .compatible = "adi,adv7611", .data = &adv76xx_chip_info[ADV7611] },
+ { .compatible = "adi,adv7612", .data = &adv76xx_chip_info[ADV7612] },
{ }
};
MODULE_DEVICE_TABLE(of, adv76xx_of_id);
@@ -2706,6 +2799,7 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
struct device_node *endpoint;
struct device_node *np;
unsigned int flags;
+ u32 v;
np = state->i2c_clients[ADV76XX_PAGE_IO]->dev.of_node;
@@ -2715,6 +2809,12 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
return -EINVAL;
v4l2_of_parse_endpoint(endpoint, &bus_cfg);
+
+ if (!of_property_read_u32(endpoint, "default-input", &v))
+ state->pdata.default_input = v;
+ else
+ state->pdata.default_input = -1;
+
of_node_put(endpoint);
flags = bus_cfg.bus.parallel.flags;
@@ -2753,7 +2853,6 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
/* Hardcode the remaining platform data fields. */
state->pdata.disable_pwrdnb = 0;
state->pdata.disable_cable_det_rst = 0;
- state->pdata.default_input = -1;
state->pdata.blank_data = 1;
state->pdata.alt_data_sat = 1;
state->pdata.op_format_mode_sel = ADV7604_OP_FORMAT_MODE0;
@@ -2762,6 +2861,148 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
return 0;
}
+static const struct regmap_config adv76xx_regmap_cnf[] = {
+ {
+ .name = "io",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+ {
+ .name = "avlink",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+ {
+ .name = "cec",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+ {
+ .name = "infoframe",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+ {
+ .name = "esdp",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+ {
+ .name = "epp",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+ {
+ .name = "afe",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+ {
+ .name = "rep",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+ {
+ .name = "edid",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+
+ {
+ .name = "hdmi",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+ {
+ .name = "test",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+ {
+ .name = "cp",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+ {
+ .name = "vdp",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+};
+
+static int configure_regmap(struct adv76xx_state *state, int region)
+{
+ int err;
+
+ if (!state->i2c_clients[region])
+ return -ENODEV;
+
+ state->regmap[region] =
+ devm_regmap_init_i2c(state->i2c_clients[region],
+ &adv76xx_regmap_cnf[region]);
+
+ if (IS_ERR(state->regmap[region])) {
+ err = PTR_ERR(state->regmap[region]);
+ v4l_err(state->i2c_clients[region],
+ "Error initializing regmap %d with error %d\n",
+ region, err);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int configure_regmaps(struct adv76xx_state *state)
+{
+ int i, err;
+
+ for (i = ADV7604_PAGE_AVLINK ; i < ADV76XX_PAGE_MAX; i++) {
+ err = configure_regmap(state, i);
+ if (err && (err != -ENODEV))
+ return err;
+ }
+ return 0;
+}
+
static int adv76xx_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -2771,7 +3012,7 @@ static int adv76xx_probe(struct i2c_client *client,
struct v4l2_ctrl_handler *hdl;
struct v4l2_subdev *sd;
unsigned int i;
- u16 val;
+ unsigned int val, val2;
int err;
/* Check if the adapter supports the needed features */
@@ -2833,28 +3074,59 @@ static int adv76xx_probe(struct i2c_client *client,
snprintf(sd->name, sizeof(sd->name), "%s %d-%04x",
id->name, i2c_adapter_id(client->adapter),
client->addr);
- sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+
+ /* Configure IO Regmap region */
+ err = configure_regmap(state, ADV76XX_PAGE_IO);
+
+ if (err) {
+ v4l2_err(sd, "Error configuring IO regmap region\n");
+ return -ENODEV;
+ }
/*
* Verify that the chip is present. On ADV7604 the RD_INFO register only
* identifies the revision, while on ADV7611 it identifies the model as
* well. Use the HDMI slave address on ADV7604 and RD_INFO on ADV7611.
*/
- if (state->info->type == ADV7604) {
- val = adv_smbus_read_byte_data_check(client, 0xfb, false);
+ switch (state->info->type) {
+ case ADV7604:
+ err = regmap_read(state->regmap[ADV76XX_PAGE_IO], 0xfb, &val);
+ if (err) {
+ v4l2_err(sd, "Error %d reading IO Regmap\n", err);
+ return -ENODEV;
+ }
if (val != 0x68) {
- v4l2_info(sd, "not an adv7604 on address 0x%x\n",
+ v4l2_err(sd, "not an adv7604 on address 0x%x\n",
client->addr << 1);
return -ENODEV;
}
- } else {
- val = (adv_smbus_read_byte_data_check(client, 0xea, false) << 8)
- | (adv_smbus_read_byte_data_check(client, 0xeb, false) << 0);
- if (val != 0x2051) {
- v4l2_info(sd, "not an adv7611 on address 0x%x\n",
+ break;
+ case ADV7611:
+ case ADV7612:
+ err = regmap_read(state->regmap[ADV76XX_PAGE_IO],
+ 0xea,
+ &val);
+ if (err) {
+ v4l2_err(sd, "Error %d reading IO Regmap\n", err);
+ return -ENODEV;
+ }
+ val2 = val << 8;
+ err = regmap_read(state->regmap[ADV76XX_PAGE_IO],
+ 0xeb,
+ &val);
+ if (err) {
+ v4l2_err(sd, "Error %d reading IO Regmap\n", err);
+ return -ENODEV;
+ }
+ val |= val2;
+ if ((state->info->type == ADV7611 && val != 0x2051) ||
+ (state->info->type == ADV7612 && val != 0x2041)) {
+ v4l2_err(sd, "not an adv761x on address 0x%x\n",
client->addr << 1);
return -ENODEV;
}
+ break;
}
/* control handlers */
@@ -2941,6 +3213,11 @@ static int adv76xx_probe(struct i2c_client *client,
if (err)
goto err_work_queues;
+ /* Configure regmaps */
+ err = configure_regmaps(state);
+ if (err)
+ goto err_entity;
+
err = adv76xx_core_init(sd);
if (err)
goto err_entity;
@@ -2985,7 +3262,6 @@ static int adv76xx_remove(struct i2c_client *client)
static struct i2c_driver adv76xx_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "adv7604",
.of_match_table = of_match_ptr(adv76xx_of_id),
},
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 4cf79b2422d4..b7269b8f040d 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -40,6 +40,7 @@
#include <linux/v4l2-dv-timings.h>
#include <linux/hdmi.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-dv-timings.h>
#include <media/adv7842.h>
@@ -1442,7 +1443,7 @@ static int stdi2dv_timings(struct v4l2_subdev *sd,
}
}
- if (v4l2_detect_cvt(stdi->lcf + 1, hfreq, stdi->lcvs,
+ if (v4l2_detect_cvt(stdi->lcf + 1, hfreq, stdi->lcvs, 0,
(stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) |
(stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0),
false, timings))
@@ -1980,8 +1981,7 @@ static int adv7842_s_routing(struct v4l2_subdev *sd,
select_input(sd, state->vid_std_select);
enable_input(sd);
- v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT,
- (void *)&adv7842_ev_fmt);
+ v4l2_subdev_notify_event(sd, &adv7842_ev_fmt);
return 0;
}
@@ -2214,8 +2214,7 @@ static int adv7842_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
"%s: fmt_change_cp = 0x%x, fmt_change_digital = 0x%x, fmt_change_sdp = 0x%x\n",
__func__, fmt_change_cp, fmt_change_digital,
fmt_change_sdp);
- v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT,
- (void *)&adv7842_ev_fmt);
+ v4l2_subdev_notify_event(sd, &adv7842_ev_fmt);
if (handled)
*handled = true;
}
@@ -3005,6 +3004,20 @@ static long adv7842_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
return -ENOTTY;
}
+static int adv7842_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subdev_subscribe(sd, fh, sub);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subdev_subscribe_event(sd, fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
/* ----------------------------------------------------------------------- */
static const struct v4l2_ctrl_ops adv7842_ctrl_ops = {
@@ -3015,6 +3028,8 @@ static const struct v4l2_subdev_core_ops adv7842_core_ops = {
.log_status = adv7842_log_status,
.ioctl = adv7842_ioctl,
.interrupt_service_routine = adv7842_isr,
+ .subscribe_event = adv7842_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = adv7842_g_register,
.s_register = adv7842_s_register,
@@ -3210,7 +3225,7 @@ static int adv7842_probe(struct i2c_client *client,
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &adv7842_ops);
- sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
state->mode = pdata->mode;
state->hdmi_port_a = pdata->input == ADV7842_SELECT_HDMI_PORT_A;
@@ -3348,7 +3363,6 @@ MODULE_DEVICE_TABLE(i2c, adv7842_id);
static struct i2c_driver adv7842_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "adv7842",
},
.probe = adv7842_probe,
diff --git a/drivers/media/i2c/ak881x.c b/drivers/media/i2c/ak881x.c
index 29846245aa3b..d3b965ec3bbc 100644
--- a/drivers/media/i2c/ak881x.c
+++ b/drivers/media/i2c/ak881x.c
@@ -156,12 +156,12 @@ static int ak881x_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
} else if (std == V4L2_STD_PAL_60) {
vp1 = 7;
ak881x->lines = 480;
- } else if (std && !(std & ~V4L2_STD_PAL)) {
- vp1 = 0xf;
- ak881x->lines = 576;
- } else if (std && !(std & ~V4L2_STD_NTSC)) {
+ } else if (std & V4L2_STD_NTSC) {
vp1 = 0;
ak881x->lines = 480;
+ } else if (std & V4L2_STD_PAL) {
+ vp1 = 0xf;
+ ak881x->lines = 576;
} else {
/* No SECAM or PAL_N/Nc supported */
return -EINVAL;
diff --git a/drivers/media/i2c/bt819.c b/drivers/media/i2c/bt819.c
index 76b334a6a56d..e00e3104d448 100644
--- a/drivers/media/i2c/bt819.c
+++ b/drivers/media/i2c/bt819.c
@@ -379,16 +379,6 @@ static const struct v4l2_ctrl_ops bt819_ctrl_ops = {
.s_ctrl = bt819_s_ctrl,
};
-static const struct v4l2_subdev_core_ops bt819_core_ops = {
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
-};
-
static const struct v4l2_subdev_video_ops bt819_video_ops = {
.s_std = bt819_s_std,
.s_routing = bt819_s_routing,
@@ -398,7 +388,6 @@ static const struct v4l2_subdev_video_ops bt819_video_ops = {
};
static const struct v4l2_subdev_ops bt819_ops = {
- .core = &bt819_core_ops,
.video = &bt819_video_ops,
};
@@ -492,7 +481,6 @@ MODULE_DEVICE_TABLE(i2c, bt819_id);
static struct i2c_driver bt819_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "bt819",
},
.probe = bt819_probe,
diff --git a/drivers/media/i2c/bt856.c b/drivers/media/i2c/bt856.c
index 7fc163d0253c..48176591a80d 100644
--- a/drivers/media/i2c/bt856.c
+++ b/drivers/media/i2c/bt856.c
@@ -252,7 +252,6 @@ MODULE_DEVICE_TABLE(i2c, bt856_id);
static struct i2c_driver bt856_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "bt856",
},
.probe = bt856_probe,
diff --git a/drivers/media/i2c/bt866.c b/drivers/media/i2c/bt866.c
index a8bf10fc665d..bbec70c882a3 100644
--- a/drivers/media/i2c/bt866.c
+++ b/drivers/media/i2c/bt866.c
@@ -218,7 +218,6 @@ MODULE_DEVICE_TABLE(i2c, bt866_id);
static struct i2c_driver bt866_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "bt866",
},
.probe = bt866_probe,
diff --git a/drivers/media/i2c/cs5345.c b/drivers/media/i2c/cs5345.c
index 34b76a9e7515..c7de9790d4f3 100644
--- a/drivers/media/i2c/cs5345.c
+++ b/drivers/media/i2c/cs5345.c
@@ -132,13 +132,6 @@ static const struct v4l2_ctrl_ops cs5345_ctrl_ops = {
static const struct v4l2_subdev_core_ops cs5345_core_ops = {
.log_status = cs5345_log_status,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = cs5345_g_register,
.s_register = cs5345_s_register,
@@ -218,7 +211,6 @@ MODULE_DEVICE_TABLE(i2c, cs5345_id);
static struct i2c_driver cs5345_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "cs5345",
},
.probe = cs5345_probe,
diff --git a/drivers/media/i2c/cs53l32a.c b/drivers/media/i2c/cs53l32a.c
index 27400c16ef9a..b7e87e38642a 100644
--- a/drivers/media/i2c/cs53l32a.c
+++ b/drivers/media/i2c/cs53l32a.c
@@ -228,7 +228,6 @@ MODULE_DEVICE_TABLE(i2c, cs53l32a_id);
static struct i2c_driver cs53l32a_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "cs53l32a",
},
.probe = cs53l32a_probe,
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index e15a789ad596..fe6eb78b6914 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -5348,7 +5348,6 @@ MODULE_DEVICE_TABLE(i2c, cx25840_id);
static struct i2c_driver cx25840_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "cx25840",
},
.probe = cx25840_probe,
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 175a76114953..728d2cc8a3e7 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -478,6 +478,7 @@ static const struct i2c_device_id ir_kbd_id[] = {
{ "ir_rx_z8f0811_hdpvr", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, ir_kbd_id);
static struct i2c_driver ir_kbd_driver = {
.driver = {
diff --git a/drivers/media/i2c/ks0127.c b/drivers/media/i2c/ks0127.c
index 25b81bc58c81..77551baab068 100644
--- a/drivers/media/i2c/ks0127.c
+++ b/drivers/media/i2c/ks0127.c
@@ -708,7 +708,6 @@ MODULE_DEVICE_TABLE(i2c, ks0127_id);
static struct i2c_driver ks0127_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "ks0127",
},
.probe = ks0127_probe,
diff --git a/drivers/media/i2c/m52790.c b/drivers/media/i2c/m52790.c
index bf476358704d..77eb07eb667e 100644
--- a/drivers/media/i2c/m52790.c
+++ b/drivers/media/i2c/m52790.c
@@ -185,7 +185,6 @@ MODULE_DEVICE_TABLE(i2c, m52790_id);
static struct i2c_driver m52790_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "m52790",
},
.probe = m52790_probe,
diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
index dcc68ec71732..bdb94000ba5a 100644
--- a/drivers/media/i2c/msp3400-driver.c
+++ b/drivers/media/i2c/msp3400-driver.c
@@ -894,7 +894,6 @@ MODULE_DEVICE_TABLE(i2c, msp_id);
static struct i2c_driver msp_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "msp3400",
.pm = &msp3400_pm_ops,
},
diff --git a/drivers/media/i2c/mt9v011.c b/drivers/media/i2c/mt9v011.c
index 57132cdba5ea..a4a5c39b599b 100644
--- a/drivers/media/i2c/mt9v011.c
+++ b/drivers/media/i2c/mt9v011.c
@@ -583,7 +583,6 @@ MODULE_DEVICE_TABLE(i2c, mt9v011_id);
static struct i2c_driver mt9v011_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "mt9v011",
},
.probe = mt9v011_probe,
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 977f4006edbd..a68ce94ee097 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -882,7 +882,7 @@ static const struct regmap_config mt9v032_regmap_config = {
static struct mt9v032_platform_data *
mt9v032_get_pdata(struct i2c_client *client)
{
- struct mt9v032_platform_data *pdata;
+ struct mt9v032_platform_data *pdata = NULL;
struct v4l2_of_endpoint endpoint;
struct device_node *np;
struct property *prop;
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 6edffc7b74e3..49109f4f5bb4 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -909,7 +909,6 @@ static void ov2659_pll_calc_params(struct ov2659 *ov2659)
u8 ctrl1_reg = 0, ctrl2_reg = 0, ctrl3_reg = 0;
struct i2c_client *client = ov2659->client;
unsigned int desired = pdata->link_frequency;
- u32 s_prediv = 1, s_postdiv = 1, s_mult = 1;
u32 prediv, postdiv, mult;
u32 bestdelta = -1;
u32 delta, actual;
@@ -929,9 +928,6 @@ static void ov2659_pll_calc_params(struct ov2659 *ov2659)
if ((delta < bestdelta) || (bestdelta == -1)) {
bestdelta = delta;
- s_mult = mult;
- s_prediv = prediv;
- s_postdiv = postdiv;
ctrl1_reg = ctrl1[i].reg;
ctrl2_reg = mult;
ctrl3_reg = ctrl3[j].reg;
diff --git a/drivers/media/i2c/ov7640.c b/drivers/media/i2c/ov7640.c
index faa64baf09e8..b8961df5af33 100644
--- a/drivers/media/i2c/ov7640.c
+++ b/drivers/media/i2c/ov7640.c
@@ -94,7 +94,6 @@ MODULE_DEVICE_TABLE(i2c, ov7640_id);
static struct i2c_driver ov7640_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "ov7640",
},
.probe = ov7640_probe,
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index 2d1e25f10973..e1b5dc84c14e 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -1674,7 +1674,6 @@ MODULE_DEVICE_TABLE(i2c, ov7670_id);
static struct i2c_driver ov7670_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "ov7670",
},
.probe = ov7670_probe,
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 2bc473385c91..e691bba1945b 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -1436,7 +1436,7 @@ static int ov965x_detect_sensor(struct v4l2_subdev *sd)
int ret;
mutex_lock(&ov965x->lock);
- __ov965x_set_power(ov965x, 1);
+ __ov965x_set_power(ov965x, 1);
usleep_range(25000, 26000);
/* Check sensor revision */
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-spi.c b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
index 63eb19093381..fa4a5ebda6b2 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
@@ -149,7 +149,6 @@ int s5c73m3_register_spi_driver(struct s5c73m3 *state)
spidrv->remove = s5c73m3_spi_remove;
spidrv->probe = s5c73m3_spi_probe;
spidrv->driver.name = S5C73M3_SPI_DRV_NAME;
- spidrv->driver.bus = &spi_bus_type;
spidrv->driver.owner = THIS_MODULE;
spidrv->driver.of_match_table = s5c73m3_spi_ids;
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index bc389d5e42ae..b1b1574dfb95 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -363,6 +363,7 @@ static int s5k6a3_remove(struct i2c_client *client)
static const struct i2c_device_id s5k6a3_ids[] = {
{ }
};
+MODULE_DEVICE_TABLE(i2c, s5k6a3_ids);
#ifdef CONFIG_OF
static const struct of_device_id s5k6a3_of_match[] = {
diff --git a/drivers/media/i2c/saa6588.c b/drivers/media/i2c/saa6588.c
index 2960b5a8362a..37e65f661d7a 100644
--- a/drivers/media/i2c/saa6588.c
+++ b/drivers/media/i2c/saa6588.c
@@ -301,9 +301,7 @@ static void saa6588_i2c_poll(struct saa6588 *s)
first and the last of the 3 bytes block.
*/
- tmp = tmpbuf[2];
- tmpbuf[2] = tmpbuf[0];
- tmpbuf[0] = tmp;
+ swap(tmpbuf[2], tmpbuf[0]);
/* Map 'Invalid block E' to 'Invalid Block' */
if (blocknum == 6)
@@ -520,7 +518,6 @@ MODULE_DEVICE_TABLE(i2c, saa6588_id);
static struct i2c_driver saa6588_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "saa6588",
},
.probe = saa6588_probe,
diff --git a/drivers/media/i2c/saa6752hs.c b/drivers/media/i2c/saa6752hs.c
index ba3c4156644d..7202d3a3219a 100644
--- a/drivers/media/i2c/saa6752hs.c
+++ b/drivers/media/i2c/saa6752hs.c
@@ -793,7 +793,6 @@ MODULE_DEVICE_TABLE(i2c, saa6752hs_id);
static struct i2c_driver saa6752hs_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "saa6752hs",
},
.probe = saa6752hs_probe,
diff --git a/drivers/media/i2c/saa7110.c b/drivers/media/i2c/saa7110.c
index 99689ee57d7e..6f49886806ee 100644
--- a/drivers/media/i2c/saa7110.c
+++ b/drivers/media/i2c/saa7110.c
@@ -357,16 +357,6 @@ static const struct v4l2_ctrl_ops saa7110_ctrl_ops = {
.s_ctrl = saa7110_s_ctrl,
};
-static const struct v4l2_subdev_core_ops saa7110_core_ops = {
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
-};
-
static const struct v4l2_subdev_video_ops saa7110_video_ops = {
.s_std = saa7110_s_std,
.s_routing = saa7110_s_routing,
@@ -376,7 +366,6 @@ static const struct v4l2_subdev_video_ops saa7110_video_ops = {
};
static const struct v4l2_subdev_ops saa7110_ops = {
- .core = &saa7110_core_ops,
.video = &saa7110_video_ops,
};
@@ -472,7 +461,6 @@ MODULE_DEVICE_TABLE(i2c, saa7110_id);
static struct i2c_driver saa7110_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "saa7110",
},
.probe = saa7110_probe,
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index 0eae5f4471e2..91e75222c537 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -1929,7 +1929,6 @@ MODULE_DEVICE_TABLE(i2c, saa711x_id);
static struct i2c_driver saa711x_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "saa7115",
},
.probe = saa711x_probe,
diff --git a/drivers/media/i2c/saa7127.c b/drivers/media/i2c/saa7127.c
index 264b755bedce..a43d96da1017 100644
--- a/drivers/media/i2c/saa7127.c
+++ b/drivers/media/i2c/saa7127.c
@@ -822,7 +822,6 @@ MODULE_DEVICE_TABLE(i2c, saa7127_id);
static struct i2c_driver saa7127_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "saa7127",
},
.probe = saa7127_probe,
diff --git a/drivers/media/i2c/saa717x.c b/drivers/media/i2c/saa717x.c
index 7d517361e419..1baca37f3eb6 100644
--- a/drivers/media/i2c/saa717x.c
+++ b/drivers/media/i2c/saa717x.c
@@ -1204,13 +1204,6 @@ static const struct v4l2_subdev_core_ops saa717x_core_ops = {
.g_register = saa717x_g_register,
.s_register = saa717x_s_register,
#endif
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
.log_status = saa717x_log_status,
};
@@ -1363,7 +1356,6 @@ MODULE_DEVICE_TABLE(i2c, saa717x_id);
static struct i2c_driver saa717x_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "saa717x",
},
.probe = saa717x_probe,
diff --git a/drivers/media/i2c/saa7185.c b/drivers/media/i2c/saa7185.c
index f56c1c88b27d..eecad2d1edce 100644
--- a/drivers/media/i2c/saa7185.c
+++ b/drivers/media/i2c/saa7185.c
@@ -356,7 +356,6 @@ MODULE_DEVICE_TABLE(i2c, saa7185_id);
static struct i2c_driver saa7185_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "saa7185",
},
.probe = saa7185_probe,
diff --git a/drivers/media/i2c/soc_camera/mt9t112.c b/drivers/media/i2c/soc_camera/mt9t112.c
index de10a76ba6df..2f35d31ca58e 100644
--- a/drivers/media/i2c/soc_camera/mt9t112.c
+++ b/drivers/media/i2c/soc_camera/mt9t112.c
@@ -104,22 +104,22 @@ struct mt9t112_priv {
static const struct mt9t112_format mt9t112_cfmts[] = {
{
.code = MEDIA_BUS_FMT_UYVY8_2X8,
- .colorspace = V4L2_COLORSPACE_JPEG,
+ .colorspace = V4L2_COLORSPACE_SRGB,
.fmt = 1,
.order = 0,
}, {
.code = MEDIA_BUS_FMT_VYUY8_2X8,
- .colorspace = V4L2_COLORSPACE_JPEG,
+ .colorspace = V4L2_COLORSPACE_SRGB,
.fmt = 1,
.order = 1,
}, {
.code = MEDIA_BUS_FMT_YUYV8_2X8,
- .colorspace = V4L2_COLORSPACE_JPEG,
+ .colorspace = V4L2_COLORSPACE_SRGB,
.fmt = 1,
.order = 2,
}, {
.code = MEDIA_BUS_FMT_YVYU8_2X8,
- .colorspace = V4L2_COLORSPACE_JPEG,
+ .colorspace = V4L2_COLORSPACE_SRGB,
.fmt = 1,
.order = 3,
}, {
diff --git a/drivers/media/i2c/soc_camera/tw9910.c b/drivers/media/i2c/soc_camera/tw9910.c
index 42bec9bf1892..e939c24bfd3c 100644
--- a/drivers/media/i2c/soc_camera/tw9910.c
+++ b/drivers/media/i2c/soc_camera/tw9910.c
@@ -510,13 +510,39 @@ static int tw9910_s_std(struct v4l2_subdev *sd, v4l2_std_id norm)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct tw9910_priv *priv = to_tw9910(client);
+ const unsigned hact = 720;
+ const unsigned hdelay = 15;
+ unsigned vact;
+ unsigned vdelay;
+ int ret;
if (!(norm & (V4L2_STD_NTSC | V4L2_STD_PAL)))
return -EINVAL;
priv->norm = norm;
+ if (norm & V4L2_STD_525_60) {
+ vact = 240;
+ vdelay = 18;
+ ret = tw9910_mask_set(client, VVBI, 0x10, 0x10);
+ } else {
+ vact = 288;
+ vdelay = 24;
+ ret = tw9910_mask_set(client, VVBI, 0x10, 0x00);
+ }
+ if (!ret)
+ ret = i2c_smbus_write_byte_data(client, CROP_HI,
+ ((vdelay >> 2) & 0xc0) |
+ ((vact >> 4) & 0x30) |
+ ((hdelay >> 6) & 0x0c) |
+ ((hact >> 8) & 0x03));
+ if (!ret)
+ ret = i2c_smbus_write_byte_data(client, VDELAY_LO,
+ vdelay & 0xff);
+ if (!ret)
+ ret = i2c_smbus_write_byte_data(client, VACTIVE_LO,
+ vact & 0xff);
- return 0;
+ return ret;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -711,7 +737,7 @@ static int tw9910_get_fmt(struct v4l2_subdev *sd,
mf->width = priv->scale->width;
mf->height = priv->scale->height;
mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
- mf->colorspace = V4L2_COLORSPACE_JPEG;
+ mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
mf->field = V4L2_FIELD_INTERLACED_BT;
return 0;
@@ -732,7 +758,7 @@ static int tw9910_s_fmt(struct v4l2_subdev *sd,
if (mf->code != MEDIA_BUS_FMT_UYVY8_2X8)
return -EINVAL;
- mf->colorspace = V4L2_COLORSPACE_JPEG;
+ mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
ret = tw9910_set_frame(sd, &width, &height);
if (!ret) {
@@ -762,7 +788,7 @@ static int tw9910_set_fmt(struct v4l2_subdev *sd,
}
mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
- mf->colorspace = V4L2_COLORSPACE_JPEG;
+ mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
/*
* select suitable norm
@@ -820,6 +846,7 @@ static int tw9910_video_probe(struct i2c_client *client)
"tw9910 Product ID %0x:%0x\n", id, priv->revision);
priv->norm = V4L2_STD_NTSC;
+ priv->scale = &tw9910_ntsc_scales[0];
done:
tw9910_s_power(&priv->subdev, 0);
diff --git a/drivers/media/i2c/sony-btf-mpx.c b/drivers/media/i2c/sony-btf-mpx.c
index 1da8004f5a8e..6b1a04ffad32 100644
--- a/drivers/media/i2c/sony-btf-mpx.c
+++ b/drivers/media/i2c/sony-btf-mpx.c
@@ -388,7 +388,6 @@ MODULE_DEVICE_TABLE(i2c, sony_btf_mpx_id);
static struct i2c_driver sony_btf_mpx_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "sony-btf-mpx",
},
.probe = sony_btf_mpx_probe,
diff --git a/drivers/media/i2c/sr030pc30.c b/drivers/media/i2c/sr030pc30.c
index b62b6ddc4356..b04c09dd4bfb 100644
--- a/drivers/media/i2c/sr030pc30.c
+++ b/drivers/media/i2c/sr030pc30.c
@@ -489,18 +489,14 @@ static int sr030pc30_get_fmt(struct v4l2_subdev *sd,
{
struct v4l2_mbus_framefmt *mf;
struct sr030pc30_info *info = to_sr030pc30(sd);
- int ret;
if (!format || format->pad)
return -EINVAL;
mf = &format->format;
- if (!info->curr_win || !info->curr_fmt) {
- ret = sr030pc30_set_params(sd);
- if (ret)
- return ret;
- }
+ if (!info->curr_win || !info->curr_fmt)
+ return -EINVAL;
mf->width = info->curr_win->width;
mf->height = info->curr_win->height;
@@ -636,13 +632,6 @@ static const struct v4l2_ctrl_ops sr030pc30_ctrl_ops = {
static const struct v4l2_subdev_core_ops sr030pc30_core_ops = {
.s_power = sr030pc30_s_power,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
};
static const struct v4l2_subdev_pad_ops sr030pc30_pad_ops = {
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
new file mode 100644
index 000000000000..9ef5baaf8646
--- /dev/null
+++ b/drivers/media/i2c/tc358743.c
@@ -0,0 +1,1979 @@
+/*
+ * tc358743 - Toshiba HDMI to CSI-2 bridge
+ *
+ * Copyright 2015 Cisco Systems, Inc. and/or its affiliates. All rights
+ * reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+/*
+ * References (c = chapter, p = page):
+ * REF_01 - Toshiba, TC358743XBG (H2C), Functional Specification, Rev 0.60
+ * REF_02 - Toshiba, TC358743XBG_HDMI-CSI_Tv11p_nm.xls
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <linux/v4l2-dv-timings.h>
+#include <linux/hdmi.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-of.h>
+#include <media/tc358743.h>
+
+#include "tc358743_regs.h"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-3)");
+
+MODULE_DESCRIPTION("Toshiba TC358743 HDMI to CSI-2 bridge driver");
+MODULE_AUTHOR("Ramakrishnan Muthukrishnan <ram@rkrishnan.org>");
+MODULE_AUTHOR("Mikhail Khelik <mkhelik@cisco.com>");
+MODULE_AUTHOR("Mats Randgaard <matrandg@cisco.com>");
+MODULE_LICENSE("GPL");
+
+#define EDID_NUM_BLOCKS_MAX 8
+#define EDID_BLOCK_SIZE 128
+
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE (EDID_NUM_BLOCKS_MAX * EDID_BLOCK_SIZE + 2)
+
+static const struct v4l2_dv_timings_cap tc358743_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+ /* Pixel clock from REF_01 p. 20. Min/max height/width are unknown */
+ V4L2_INIT_BT_TIMINGS(1, 10000, 1, 10000, 0, 165000000,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
+ V4L2_DV_BT_CAP_PROGRESSIVE |
+ V4L2_DV_BT_CAP_REDUCED_BLANKING |
+ V4L2_DV_BT_CAP_CUSTOM)
+};
+
+struct tc358743_state {
+ struct tc358743_platform_data pdata;
+ struct v4l2_of_bus_mipi_csi2 bus;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler hdl;
+ struct i2c_client *i2c_client;
+ /* CONFCTL is modified in ops and tc358743_hdmi_sys_int_handler */
+ struct mutex confctl_mutex;
+
+ /* controls */
+ struct v4l2_ctrl *detect_tx_5v_ctrl;
+ struct v4l2_ctrl *audio_sampling_rate_ctrl;
+ struct v4l2_ctrl *audio_present_ctrl;
+
+ /* work queues */
+ struct workqueue_struct *work_queues;
+ struct delayed_work delayed_work_enable_hotplug;
+
+ /* edid */
+ u8 edid_blocks_written;
+
+ /* used by i2c_wr() */
+ u8 wr_data[MAX_XFER_SIZE];
+
+ struct v4l2_dv_timings timings;
+ u32 mbus_fmt_code;
+
+ struct gpio_desc *reset_gpio;
+};
+
+static void tc358743_enable_interrupts(struct v4l2_subdev *sd,
+ bool cable_connected);
+static int tc358743_s_ctrl_detect_tx_5v(struct v4l2_subdev *sd);
+
+static inline struct tc358743_state *to_state(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct tc358743_state, sd);
+}
+
+/* --------------- I2C --------------- */
+
+static void i2c_rd(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
+{
+ struct tc358743_state *state = to_state(sd);
+ struct i2c_client *client = state->i2c_client;
+ int err;
+ u8 buf[2] = { reg >> 8, reg & 0xff };
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = buf,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = n,
+ .buf = values,
+ },
+ };
+
+ err = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (err != ARRAY_SIZE(msgs)) {
+ v4l2_err(sd, "%s: reading register 0x%x from 0x%x failed\n",
+ __func__, reg, client->addr);
+ }
+}
+
+static void i2c_wr(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
+{
+ struct tc358743_state *state = to_state(sd);
+ struct i2c_client *client = state->i2c_client;
+ u8 *data = state->wr_data;
+ int err, i;
+ struct i2c_msg msg;
+
+ if ((2 + n) > sizeof(state->wr_data))
+ v4l2_warn(sd, "i2c wr reg=%04x: len=%d is too big!\n",
+ reg, 2 + n);
+
+ msg.addr = client->addr;
+ msg.buf = data;
+ msg.len = 2 + n;
+ msg.flags = 0;
+
+ data[0] = reg >> 8;
+ data[1] = reg & 0xff;
+
+ for (i = 0; i < n; i++)
+ data[2 + i] = values[i];
+
+ err = i2c_transfer(client->adapter, &msg, 1);
+ if (err != 1) {
+ v4l2_err(sd, "%s: writing register 0x%x from 0x%x failed\n",
+ __func__, reg, client->addr);
+ return;
+ }
+
+ if (debug < 3)
+ return;
+
+ switch (n) {
+ case 1:
+ v4l2_info(sd, "I2C write 0x%04x = 0x%02x",
+ reg, data[2]);
+ break;
+ case 2:
+ v4l2_info(sd, "I2C write 0x%04x = 0x%02x%02x",
+ reg, data[3], data[2]);
+ break;
+ case 4:
+ v4l2_info(sd, "I2C write 0x%04x = 0x%02x%02x%02x%02x",
+ reg, data[5], data[4], data[3], data[2]);
+ break;
+ default:
+ v4l2_info(sd, "I2C write %d bytes from address 0x%04x\n",
+ n, reg);
+ }
+}
+
+static u8 i2c_rd8(struct v4l2_subdev *sd, u16 reg)
+{
+ u8 val;
+
+ i2c_rd(sd, reg, &val, 1);
+
+ return val;
+}
+
+static void i2c_wr8(struct v4l2_subdev *sd, u16 reg, u8 val)
+{
+ i2c_wr(sd, reg, &val, 1);
+}
+
+static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg,
+ u8 mask, u8 val)
+{
+ i2c_wr8(sd, reg, (i2c_rd8(sd, reg) & mask) | val);
+}
+
+static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg)
+{
+ u16 val;
+
+ i2c_rd(sd, reg, (u8 *)&val, 2);
+
+ return val;
+}
+
+static void i2c_wr16(struct v4l2_subdev *sd, u16 reg, u16 val)
+{
+ i2c_wr(sd, reg, (u8 *)&val, 2);
+}
+
+static void i2c_wr16_and_or(struct v4l2_subdev *sd, u16 reg, u16 mask, u16 val)
+{
+ i2c_wr16(sd, reg, (i2c_rd16(sd, reg) & mask) | val);
+}
+
+static u32 i2c_rd32(struct v4l2_subdev *sd, u16 reg)
+{
+ u32 val;
+
+ i2c_rd(sd, reg, (u8 *)&val, 4);
+
+ return val;
+}
+
+static void i2c_wr32(struct v4l2_subdev *sd, u16 reg, u32 val)
+{
+ i2c_wr(sd, reg, (u8 *)&val, 4);
+}
+
+/* --------------- STATUS --------------- */
+
+static inline bool is_hdmi(struct v4l2_subdev *sd)
+{
+ return i2c_rd8(sd, SYS_STATUS) & MASK_S_HDMI;
+}
+
+static inline bool tx_5v_power_present(struct v4l2_subdev *sd)
+{
+ return i2c_rd8(sd, SYS_STATUS) & MASK_S_DDC5V;
+}
+
+static inline bool no_signal(struct v4l2_subdev *sd)
+{
+ return !(i2c_rd8(sd, SYS_STATUS) & MASK_S_TMDS);
+}
+
+static inline bool no_sync(struct v4l2_subdev *sd)
+{
+ return !(i2c_rd8(sd, SYS_STATUS) & MASK_S_SYNC);
+}
+
+static inline bool audio_present(struct v4l2_subdev *sd)
+{
+ return i2c_rd8(sd, AU_STATUS0) & MASK_S_A_SAMPLE;
+}
+
+static int get_audio_sampling_rate(struct v4l2_subdev *sd)
+{
+ static const int code_to_rate[] = {
+ 44100, 0, 48000, 32000, 22050, 384000, 24000, 352800,
+ 88200, 768000, 96000, 705600, 176400, 0, 192000, 0
+ };
+
+ /* Register FS_SET is not cleared when the cable is disconnected */
+ if (no_signal(sd))
+ return 0;
+
+ return code_to_rate[i2c_rd8(sd, FS_SET) & MASK_FS];
+}
+
+static unsigned tc358743_num_csi_lanes_in_use(struct v4l2_subdev *sd)
+{
+ return ((i2c_rd32(sd, CSI_CONTROL) & MASK_NOL) >> 1) + 1;
+}
+
+/* --------------- TIMINGS --------------- */
+
+static inline unsigned fps(const struct v4l2_bt_timings *t)
+{
+ if (!V4L2_DV_BT_FRAME_HEIGHT(t) || !V4L2_DV_BT_FRAME_WIDTH(t))
+ return 0;
+
+ return DIV_ROUND_CLOSEST((unsigned)t->pixelclock,
+ V4L2_DV_BT_FRAME_HEIGHT(t) * V4L2_DV_BT_FRAME_WIDTH(t));
+}
+
+static int tc358743_get_detected_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct v4l2_bt_timings *bt = &timings->bt;
+ unsigned width, height, frame_width, frame_height, frame_interval, fps;
+
+ memset(timings, 0, sizeof(struct v4l2_dv_timings));
+
+ if (no_signal(sd)) {
+ v4l2_dbg(1, debug, sd, "%s: no valid signal\n", __func__);
+ return -ENOLINK;
+ }
+ if (no_sync(sd)) {
+ v4l2_dbg(1, debug, sd, "%s: no sync on signal\n", __func__);
+ return -ENOLCK;
+ }
+
+ timings->type = V4L2_DV_BT_656_1120;
+ bt->interlaced = i2c_rd8(sd, VI_STATUS1) & MASK_S_V_INTERLACE ?
+ V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
+
+ width = ((i2c_rd8(sd, DE_WIDTH_H_HI) & 0x1f) << 8) +
+ i2c_rd8(sd, DE_WIDTH_H_LO);
+ height = ((i2c_rd8(sd, DE_WIDTH_V_HI) & 0x1f) << 8) +
+ i2c_rd8(sd, DE_WIDTH_V_LO);
+ frame_width = ((i2c_rd8(sd, H_SIZE_HI) & 0x1f) << 8) +
+ i2c_rd8(sd, H_SIZE_LO);
+ frame_height = (((i2c_rd8(sd, V_SIZE_HI) & 0x3f) << 8) +
+ i2c_rd8(sd, V_SIZE_LO)) / 2;
+ /* frame interval in milliseconds * 10
+ * Require SYS_FREQ0 and SYS_FREQ1 are precisely set */
+ frame_interval = ((i2c_rd8(sd, FV_CNT_HI) & 0x3) << 8) +
+ i2c_rd8(sd, FV_CNT_LO);
+ fps = (frame_interval > 0) ?
+ DIV_ROUND_CLOSEST(10000, frame_interval) : 0;
+
+ bt->width = width;
+ bt->height = height;
+ bt->vsync = frame_height - height;
+ bt->hsync = frame_width - width;
+ bt->pixelclock = frame_width * frame_height * fps;
+ if (bt->interlaced == V4L2_DV_INTERLACED) {
+ bt->height *= 2;
+ bt->il_vsync = bt->vsync + 1;
+ bt->pixelclock /= 2;
+ }
+
+ return 0;
+}
+
+/* --------------- HOTPLUG / HDCP / EDID --------------- */
+
+static void tc358743_delayed_work_enable_hotplug(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tc358743_state *state = container_of(dwork,
+ struct tc358743_state, delayed_work_enable_hotplug);
+ struct v4l2_subdev *sd = &state->sd;
+
+ v4l2_dbg(2, debug, sd, "%s:\n", __func__);
+
+ i2c_wr8_and_or(sd, HPD_CTL, ~MASK_HPD_OUT0, MASK_HPD_OUT0);
+}
+
+static void tc358743_set_hdmi_hdcp(struct v4l2_subdev *sd, bool enable)
+{
+ v4l2_dbg(2, debug, sd, "%s: %s\n", __func__, enable ?
+ "enable" : "disable");
+
+ i2c_wr8_and_or(sd, HDCP_REG1,
+ ~(MASK_AUTH_UNAUTH_SEL | MASK_AUTH_UNAUTH),
+ MASK_AUTH_UNAUTH_SEL_16_FRAMES | MASK_AUTH_UNAUTH_AUTO);
+
+ i2c_wr8_and_or(sd, HDCP_REG2, ~MASK_AUTO_P3_RESET,
+ SET_AUTO_P3_RESET_FRAMES(0x0f));
+
+ /* HDCP is disabled by configuring the receiver as HDCP repeater. The
+ * repeater mode require software support to work, so HDCP
+ * authentication will fail.
+ */
+ i2c_wr8_and_or(sd, HDCP_REG3, ~KEY_RD_CMD, enable ? KEY_RD_CMD : 0);
+ i2c_wr8_and_or(sd, HDCP_MODE, ~(MASK_AUTO_CLR | MASK_MODE_RST_TN),
+ enable ? (MASK_AUTO_CLR | MASK_MODE_RST_TN) : 0);
+
+ /* Apple MacBook Pro gen.8 has a bug that makes it freeze every fifth
+ * second when HDCP is disabled, but the MAX_EXCED bit is handled
+ * correctly and HDCP is disabled on the HDMI output.
+ */
+ i2c_wr8_and_or(sd, BSTATUS1, ~MASK_MAX_EXCED,
+ enable ? 0 : MASK_MAX_EXCED);
+ i2c_wr8_and_or(sd, BCAPS, ~(MASK_REPEATER | MASK_READY),
+ enable ? 0 : MASK_REPEATER | MASK_READY);
+}
+
+static void tc358743_disable_edid(struct v4l2_subdev *sd)
+{
+ struct tc358743_state *state = to_state(sd);
+
+ v4l2_dbg(2, debug, sd, "%s:\n", __func__);
+
+ cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
+
+ /* DDC access to EDID is also disabled when hotplug is disabled. See
+ * register DDC_CTL */
+ i2c_wr8_and_or(sd, HPD_CTL, ~MASK_HPD_OUT0, 0x0);
+}
+
+static void tc358743_enable_edid(struct v4l2_subdev *sd)
+{
+ struct tc358743_state *state = to_state(sd);
+
+ if (state->edid_blocks_written == 0) {
+ v4l2_dbg(2, debug, sd, "%s: no EDID -> no hotplug\n", __func__);
+ return;
+ }
+
+ v4l2_dbg(2, debug, sd, "%s:\n", __func__);
+
+ /* Enable hotplug after 100 ms. DDC access to EDID is also enabled when
+ * hotplug is enabled. See register DDC_CTL */
+ queue_delayed_work(state->work_queues,
+ &state->delayed_work_enable_hotplug, HZ / 10);
+
+ tc358743_enable_interrupts(sd, true);
+ tc358743_s_ctrl_detect_tx_5v(sd);
+}
+
+static void tc358743_erase_bksv(struct v4l2_subdev *sd)
+{
+ int i;
+
+ for (i = 0; i < 5; i++)
+ i2c_wr8(sd, BKSV + i, 0);
+}
+
+/* --------------- AVI infoframe --------------- */
+
+static void print_avi_infoframe(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct device *dev = &client->dev;
+ union hdmi_infoframe frame;
+ u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
+
+ if (!is_hdmi(sd)) {
+ v4l2_info(sd, "DVI-D signal - AVI infoframe not supported\n");
+ return;
+ }
+
+ i2c_rd(sd, PK_AVI_0HEAD, buffer, HDMI_INFOFRAME_SIZE(AVI));
+
+ if (hdmi_infoframe_unpack(&frame, buffer) < 0) {
+ v4l2_err(sd, "%s: unpack of AVI infoframe failed\n", __func__);
+ return;
+ }
+
+ hdmi_infoframe_log(KERN_INFO, dev, &frame);
+}
+
+/* --------------- CTRLS --------------- */
+
+static int tc358743_s_ctrl_detect_tx_5v(struct v4l2_subdev *sd)
+{
+ struct tc358743_state *state = to_state(sd);
+
+ return v4l2_ctrl_s_ctrl(state->detect_tx_5v_ctrl,
+ tx_5v_power_present(sd));
+}
+
+static int tc358743_s_ctrl_audio_sampling_rate(struct v4l2_subdev *sd)
+{
+ struct tc358743_state *state = to_state(sd);
+
+ return v4l2_ctrl_s_ctrl(state->audio_sampling_rate_ctrl,
+ get_audio_sampling_rate(sd));
+}
+
+static int tc358743_s_ctrl_audio_present(struct v4l2_subdev *sd)
+{
+ struct tc358743_state *state = to_state(sd);
+
+ return v4l2_ctrl_s_ctrl(state->audio_present_ctrl,
+ audio_present(sd));
+}
+
+static int tc358743_update_controls(struct v4l2_subdev *sd)
+{
+ int ret = 0;
+
+ ret |= tc358743_s_ctrl_detect_tx_5v(sd);
+ ret |= tc358743_s_ctrl_audio_sampling_rate(sd);
+ ret |= tc358743_s_ctrl_audio_present(sd);
+
+ return ret;
+}
+
+/* --------------- INIT --------------- */
+
+static void tc358743_reset_phy(struct v4l2_subdev *sd)
+{
+ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+
+ i2c_wr8_and_or(sd, PHY_RST, ~MASK_RESET_CTRL, 0);
+ i2c_wr8_and_or(sd, PHY_RST, ~MASK_RESET_CTRL, MASK_RESET_CTRL);
+}
+
+static void tc358743_reset(struct v4l2_subdev *sd, uint16_t mask)
+{
+ u16 sysctl = i2c_rd16(sd, SYSCTL);
+
+ i2c_wr16(sd, SYSCTL, sysctl | mask);
+ i2c_wr16(sd, SYSCTL, sysctl & ~mask);
+}
+
+static inline void tc358743_sleep_mode(struct v4l2_subdev *sd, bool enable)
+{
+ i2c_wr16_and_or(sd, SYSCTL, ~MASK_SLEEP,
+ enable ? MASK_SLEEP : 0);
+}
+
+static inline void enable_stream(struct v4l2_subdev *sd, bool enable)
+{
+ struct tc358743_state *state = to_state(sd);
+
+ v4l2_dbg(3, debug, sd, "%s: %sable\n",
+ __func__, enable ? "en" : "dis");
+
+ if (enable) {
+ /* It is critical for CSI receiver to see lane transition
+ * LP11->HS. Set to non-continuous mode to enable clock lane
+ * LP11 state. */
+ i2c_wr32(sd, TXOPTIONCNTRL, 0);
+ /* Set to continuous mode to trigger LP11->HS transition */
+ i2c_wr32(sd, TXOPTIONCNTRL, MASK_CONTCLKMODE);
+ /* Unmute video */
+ i2c_wr8(sd, VI_MUTE, MASK_AUTO_MUTE);
+ } else {
+ /* Mute video so that all data lanes go to LSP11 state.
+ * No data is output to CSI Tx block. */
+ i2c_wr8(sd, VI_MUTE, MASK_AUTO_MUTE | MASK_VI_MUTE);
+ }
+
+ mutex_lock(&state->confctl_mutex);
+ i2c_wr16_and_or(sd, CONFCTL, ~(MASK_VBUFEN | MASK_ABUFEN),
+ enable ? (MASK_VBUFEN | MASK_ABUFEN) : 0x0);
+ mutex_unlock(&state->confctl_mutex);
+}
+
+static void tc358743_set_pll(struct v4l2_subdev *sd)
+{
+ struct tc358743_state *state = to_state(sd);
+ struct tc358743_platform_data *pdata = &state->pdata;
+ u16 pllctl0 = i2c_rd16(sd, PLLCTL0);
+ u16 pllctl1 = i2c_rd16(sd, PLLCTL1);
+ u16 pllctl0_new = SET_PLL_PRD(pdata->pll_prd) |
+ SET_PLL_FBD(pdata->pll_fbd);
+ u32 hsck = (pdata->refclk_hz / pdata->pll_prd) * pdata->pll_fbd;
+
+ v4l2_dbg(2, debug, sd, "%s:\n", __func__);
+
+ /* Only rewrite when needed (new value or disabled), since rewriting
+ * triggers another format change event. */
+ if ((pllctl0 != pllctl0_new) || ((pllctl1 & MASK_PLL_EN) == 0)) {
+ u16 pll_frs;
+
+ if (hsck > 500000000)
+ pll_frs = 0x0;
+ else if (hsck > 250000000)
+ pll_frs = 0x1;
+ else if (hsck > 125000000)
+ pll_frs = 0x2;
+ else
+ pll_frs = 0x3;
+
+ v4l2_dbg(1, debug, sd, "%s: updating PLL clock\n", __func__);
+ tc358743_sleep_mode(sd, true);
+ i2c_wr16(sd, PLLCTL0, pllctl0_new);
+ i2c_wr16_and_or(sd, PLLCTL1,
+ ~(MASK_PLL_FRS | MASK_RESETB | MASK_PLL_EN),
+ (SET_PLL_FRS(pll_frs) | MASK_RESETB |
+ MASK_PLL_EN));
+ udelay(10); /* REF_02, Sheet "Source HDMI" */
+ i2c_wr16_and_or(sd, PLLCTL1, ~MASK_CKEN, MASK_CKEN);
+ tc358743_sleep_mode(sd, false);
+ }
+}
+
+static void tc358743_set_ref_clk(struct v4l2_subdev *sd)
+{
+ struct tc358743_state *state = to_state(sd);
+ struct tc358743_platform_data *pdata = &state->pdata;
+ u32 sys_freq;
+ u32 lockdet_ref;
+ u16 fh_min;
+ u16 fh_max;
+
+ BUG_ON(!(pdata->refclk_hz == 26000000 ||
+ pdata->refclk_hz == 27000000 ||
+ pdata->refclk_hz == 42000000));
+
+ sys_freq = pdata->refclk_hz / 10000;
+ i2c_wr8(sd, SYS_FREQ0, sys_freq & 0x00ff);
+ i2c_wr8(sd, SYS_FREQ1, (sys_freq & 0xff00) >> 8);
+
+ i2c_wr8_and_or(sd, PHY_CTL0, ~MASK_PHY_SYSCLK_IND,
+ (pdata->refclk_hz == 42000000) ?
+ MASK_PHY_SYSCLK_IND : 0x0);
+
+ fh_min = pdata->refclk_hz / 100000;
+ i2c_wr8(sd, FH_MIN0, fh_min & 0x00ff);
+ i2c_wr8(sd, FH_MIN1, (fh_min & 0xff00) >> 8);
+
+ fh_max = (fh_min * 66) / 10;
+ i2c_wr8(sd, FH_MAX0, fh_max & 0x00ff);
+ i2c_wr8(sd, FH_MAX1, (fh_max & 0xff00) >> 8);
+
+ lockdet_ref = pdata->refclk_hz / 100;
+ i2c_wr8(sd, LOCKDET_REF0, lockdet_ref & 0x0000ff);
+ i2c_wr8(sd, LOCKDET_REF1, (lockdet_ref & 0x00ff00) >> 8);
+ i2c_wr8(sd, LOCKDET_REF2, (lockdet_ref & 0x0f0000) >> 16);
+
+ i2c_wr8_and_or(sd, NCO_F0_MOD, ~MASK_NCO_F0_MOD,
+ (pdata->refclk_hz == 27000000) ?
+ MASK_NCO_F0_MOD_27MHZ : 0x0);
+}
+
+static void tc358743_set_csi_color_space(struct v4l2_subdev *sd)
+{
+ struct tc358743_state *state = to_state(sd);
+
+ switch (state->mbus_fmt_code) {
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ v4l2_dbg(2, debug, sd, "%s: YCbCr 422 16-bit\n", __func__);
+ i2c_wr8_and_or(sd, VOUT_SET2,
+ ~(MASK_SEL422 | MASK_VOUT_422FIL_100) & 0xff,
+ MASK_SEL422 | MASK_VOUT_422FIL_100);
+ i2c_wr8_and_or(sd, VI_REP, ~MASK_VOUT_COLOR_SEL & 0xff,
+ MASK_VOUT_COLOR_601_YCBCR_LIMITED);
+ mutex_lock(&state->confctl_mutex);
+ i2c_wr16_and_or(sd, CONFCTL, ~MASK_YCBCRFMT,
+ MASK_YCBCRFMT_422_8_BIT);
+ mutex_unlock(&state->confctl_mutex);
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ v4l2_dbg(2, debug, sd, "%s: RGB 888 24-bit\n", __func__);
+ i2c_wr8_and_or(sd, VOUT_SET2,
+ ~(MASK_SEL422 | MASK_VOUT_422FIL_100) & 0xff,
+ 0x00);
+ i2c_wr8_and_or(sd, VI_REP, ~MASK_VOUT_COLOR_SEL & 0xff,
+ MASK_VOUT_COLOR_RGB_FULL);
+ mutex_lock(&state->confctl_mutex);
+ i2c_wr16_and_or(sd, CONFCTL, ~MASK_YCBCRFMT, 0);
+ mutex_unlock(&state->confctl_mutex);
+ break;
+ default:
+ v4l2_dbg(2, debug, sd, "%s: Unsupported format code 0x%x\n",
+ __func__, state->mbus_fmt_code);
+ }
+}
+
+static unsigned tc358743_num_csi_lanes_needed(struct v4l2_subdev *sd)
+{
+ struct tc358743_state *state = to_state(sd);
+ struct v4l2_bt_timings *bt = &state->timings.bt;
+ struct tc358743_platform_data *pdata = &state->pdata;
+ u32 bits_pr_pixel =
+ (state->mbus_fmt_code == MEDIA_BUS_FMT_UYVY8_1X16) ? 16 : 24;
+ u32 bps = bt->width * bt->height * fps(bt) * bits_pr_pixel;
+ u32 bps_pr_lane = (pdata->refclk_hz / pdata->pll_prd) * pdata->pll_fbd;
+
+ return DIV_ROUND_UP(bps, bps_pr_lane);
+}
+
+static void tc358743_set_csi(struct v4l2_subdev *sd)
+{
+ struct tc358743_state *state = to_state(sd);
+ struct tc358743_platform_data *pdata = &state->pdata;
+ unsigned lanes = tc358743_num_csi_lanes_needed(sd);
+
+ v4l2_dbg(3, debug, sd, "%s:\n", __func__);
+
+ tc358743_reset(sd, MASK_CTXRST);
+
+ if (lanes < 1)
+ i2c_wr32(sd, CLW_CNTRL, MASK_CLW_LANEDISABLE);
+ if (lanes < 1)
+ i2c_wr32(sd, D0W_CNTRL, MASK_D0W_LANEDISABLE);
+ if (lanes < 2)
+ i2c_wr32(sd, D1W_CNTRL, MASK_D1W_LANEDISABLE);
+ if (lanes < 3)
+ i2c_wr32(sd, D2W_CNTRL, MASK_D2W_LANEDISABLE);
+ if (lanes < 4)
+ i2c_wr32(sd, D3W_CNTRL, MASK_D3W_LANEDISABLE);
+
+ i2c_wr32(sd, LINEINITCNT, pdata->lineinitcnt);
+ i2c_wr32(sd, LPTXTIMECNT, pdata->lptxtimecnt);
+ i2c_wr32(sd, TCLK_HEADERCNT, pdata->tclk_headercnt);
+ i2c_wr32(sd, TCLK_TRAILCNT, pdata->tclk_trailcnt);
+ i2c_wr32(sd, THS_HEADERCNT, pdata->ths_headercnt);
+ i2c_wr32(sd, TWAKEUP, pdata->twakeup);
+ i2c_wr32(sd, TCLK_POSTCNT, pdata->tclk_postcnt);
+ i2c_wr32(sd, THS_TRAILCNT, pdata->ths_trailcnt);
+ i2c_wr32(sd, HSTXVREGCNT, pdata->hstxvregcnt);
+
+ i2c_wr32(sd, HSTXVREGEN,
+ ((lanes > 0) ? MASK_CLM_HSTXVREGEN : 0x0) |
+ ((lanes > 0) ? MASK_D0M_HSTXVREGEN : 0x0) |
+ ((lanes > 1) ? MASK_D1M_HSTXVREGEN : 0x0) |
+ ((lanes > 2) ? MASK_D2M_HSTXVREGEN : 0x0) |
+ ((lanes > 3) ? MASK_D3M_HSTXVREGEN : 0x0));
+
+ i2c_wr32(sd, TXOPTIONCNTRL, (state->bus.flags &
+ V4L2_MBUS_CSI2_CONTINUOUS_CLOCK) ? MASK_CONTCLKMODE : 0);
+ i2c_wr32(sd, STARTCNTRL, MASK_START);
+ i2c_wr32(sd, CSI_START, MASK_STRT);
+
+ i2c_wr32(sd, CSI_CONFW, MASK_MODE_SET |
+ MASK_ADDRESS_CSI_CONTROL |
+ MASK_CSI_MODE |
+ MASK_TXHSMD |
+ ((lanes == 4) ? MASK_NOL_4 :
+ (lanes == 3) ? MASK_NOL_3 :
+ (lanes == 2) ? MASK_NOL_2 : MASK_NOL_1));
+
+ i2c_wr32(sd, CSI_CONFW, MASK_MODE_SET |
+ MASK_ADDRESS_CSI_ERR_INTENA | MASK_TXBRK | MASK_QUNK |
+ MASK_WCER | MASK_INER);
+
+ i2c_wr32(sd, CSI_CONFW, MASK_MODE_CLEAR |
+ MASK_ADDRESS_CSI_ERR_HALT | MASK_TXBRK | MASK_QUNK);
+
+ i2c_wr32(sd, CSI_CONFW, MASK_MODE_SET |
+ MASK_ADDRESS_CSI_INT_ENA | MASK_INTER);
+}
+
+static void tc358743_set_hdmi_phy(struct v4l2_subdev *sd)
+{
+ struct tc358743_state *state = to_state(sd);
+ struct tc358743_platform_data *pdata = &state->pdata;
+
+ /* Default settings from REF_02, sheet "Source HDMI"
+ * and custom settings as platform data */
+ i2c_wr8_and_or(sd, PHY_EN, ~MASK_ENABLE_PHY, 0x0);
+ i2c_wr8(sd, PHY_CTL1, SET_PHY_AUTO_RST1_US(1600) |
+ SET_FREQ_RANGE_MODE_CYCLES(1));
+ i2c_wr8_and_or(sd, PHY_CTL2, ~MASK_PHY_AUTO_RSTn,
+ (pdata->hdmi_phy_auto_reset_tmds_detected ?
+ MASK_PHY_AUTO_RST2 : 0) |
+ (pdata->hdmi_phy_auto_reset_tmds_in_range ?
+ MASK_PHY_AUTO_RST3 : 0) |
+ (pdata->hdmi_phy_auto_reset_tmds_valid ?
+ MASK_PHY_AUTO_RST4 : 0));
+ i2c_wr8(sd, PHY_BIAS, 0x40);
+ i2c_wr8(sd, PHY_CSQ, SET_CSQ_CNT_LEVEL(0x0a));
+ i2c_wr8(sd, AVM_CTL, 45);
+ i2c_wr8_and_or(sd, HDMI_DET, ~MASK_HDMI_DET_V,
+ pdata->hdmi_detection_delay << 4);
+ i2c_wr8_and_or(sd, HV_RST, ~(MASK_H_PI_RST | MASK_V_PI_RST),
+ (pdata->hdmi_phy_auto_reset_hsync_out_of_range ?
+ MASK_H_PI_RST : 0) |
+ (pdata->hdmi_phy_auto_reset_vsync_out_of_range ?
+ MASK_V_PI_RST : 0));
+ i2c_wr8_and_or(sd, PHY_EN, ~MASK_ENABLE_PHY, MASK_ENABLE_PHY);
+}
+
+static void tc358743_set_hdmi_audio(struct v4l2_subdev *sd)
+{
+ struct tc358743_state *state = to_state(sd);
+
+ /* Default settings from REF_02, sheet "Source HDMI" */
+ i2c_wr8(sd, FORCE_MUTE, 0x00);
+ i2c_wr8(sd, AUTO_CMD0, MASK_AUTO_MUTE7 | MASK_AUTO_MUTE6 |
+ MASK_AUTO_MUTE5 | MASK_AUTO_MUTE4 |
+ MASK_AUTO_MUTE1 | MASK_AUTO_MUTE0);
+ i2c_wr8(sd, AUTO_CMD1, MASK_AUTO_MUTE9);
+ i2c_wr8(sd, AUTO_CMD2, MASK_AUTO_PLAY3 | MASK_AUTO_PLAY2);
+ i2c_wr8(sd, BUFINIT_START, SET_BUFINIT_START_MS(500));
+ i2c_wr8(sd, FS_MUTE, 0x00);
+ i2c_wr8(sd, FS_IMODE, MASK_NLPCM_SMODE | MASK_FS_SMODE);
+ i2c_wr8(sd, ACR_MODE, MASK_CTS_MODE);
+ i2c_wr8(sd, ACR_MDF0, MASK_ACR_L2MDF_1976_PPM | MASK_ACR_L1MDF_976_PPM);
+ i2c_wr8(sd, ACR_MDF1, MASK_ACR_L3MDF_3906_PPM);
+ i2c_wr8(sd, SDO_MODE1, MASK_SDO_FMT_I2S);
+ i2c_wr8(sd, DIV_MODE, SET_DIV_DLY_MS(100));
+
+ mutex_lock(&state->confctl_mutex);
+ i2c_wr16_and_or(sd, CONFCTL, 0xffff, MASK_AUDCHNUM_2 |
+ MASK_AUDOUTSEL_I2S | MASK_AUTOINDEX);
+ mutex_unlock(&state->confctl_mutex);
+}
+
+static void tc358743_set_hdmi_info_frame_mode(struct v4l2_subdev *sd)
+{
+ /* Default settings from REF_02, sheet "Source HDMI" */
+ i2c_wr8(sd, PK_INT_MODE, MASK_ISRC2_INT_MODE | MASK_ISRC_INT_MODE |
+ MASK_ACP_INT_MODE | MASK_VS_INT_MODE |
+ MASK_SPD_INT_MODE | MASK_MS_INT_MODE |
+ MASK_AUD_INT_MODE | MASK_AVI_INT_MODE);
+ i2c_wr8(sd, NO_PKT_LIMIT, 0x2c);
+ i2c_wr8(sd, NO_PKT_CLR, 0x53);
+ i2c_wr8(sd, ERR_PK_LIMIT, 0x01);
+ i2c_wr8(sd, NO_PKT_LIMIT2, 0x30);
+ i2c_wr8(sd, NO_GDB_LIMIT, 0x10);
+}
+
+static void tc358743_initial_setup(struct v4l2_subdev *sd)
+{
+ struct tc358743_state *state = to_state(sd);
+ struct tc358743_platform_data *pdata = &state->pdata;
+
+ /* CEC and IR are not supported by this driver */
+ i2c_wr16_and_or(sd, SYSCTL, ~(MASK_CECRST | MASK_IRRST),
+ (MASK_CECRST | MASK_IRRST));
+
+ tc358743_reset(sd, MASK_CTXRST | MASK_HDMIRST);
+ tc358743_sleep_mode(sd, false);
+
+ i2c_wr16(sd, FIFOCTL, pdata->fifo_level);
+
+ tc358743_set_ref_clk(sd);
+
+ i2c_wr8_and_or(sd, DDC_CTL, ~MASK_DDC5V_MODE,
+ pdata->ddc5v_delay & MASK_DDC5V_MODE);
+ i2c_wr8_and_or(sd, EDID_MODE, ~MASK_EDID_MODE, MASK_EDID_MODE_E_DDC);
+
+ tc358743_set_hdmi_phy(sd);
+ tc358743_set_hdmi_hdcp(sd, pdata->enable_hdcp);
+ tc358743_set_hdmi_audio(sd);
+ tc358743_set_hdmi_info_frame_mode(sd);
+
+ /* All CE and IT formats are detected as RGB full range in DVI mode */
+ i2c_wr8_and_or(sd, VI_MODE, ~MASK_RGB_DVI, 0);
+
+ i2c_wr8_and_or(sd, VOUT_SET2, ~MASK_VOUTCOLORMODE,
+ MASK_VOUTCOLORMODE_AUTO);
+ i2c_wr8(sd, VOUT_SET3, MASK_VOUT_EXTCNT);
+}
+
+/* --------------- IRQ --------------- */
+
+static void tc358743_format_change(struct v4l2_subdev *sd)
+{
+ struct tc358743_state *state = to_state(sd);
+ struct v4l2_dv_timings timings;
+ const struct v4l2_event tc358743_ev_fmt = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+
+ if (tc358743_get_detected_timings(sd, &timings)) {
+ enable_stream(sd, false);
+
+ v4l2_dbg(1, debug, sd, "%s: Format changed. No signal\n",
+ __func__);
+ } else {
+ if (!v4l2_match_dv_timings(&state->timings, &timings, 0))
+ enable_stream(sd, false);
+
+ v4l2_print_dv_timings(sd->name,
+ "tc358743_format_change: Format changed. New format: ",
+ &timings, false);
+ }
+
+ if (sd->devnode)
+ v4l2_subdev_notify_event(sd, &tc358743_ev_fmt);
+}
+
+static void tc358743_init_interrupts(struct v4l2_subdev *sd)
+{
+ u16 i;
+
+ /* clear interrupt status registers */
+ for (i = SYS_INT; i <= KEY_INT; i++)
+ i2c_wr8(sd, i, 0xff);
+
+ i2c_wr16(sd, INTSTATUS, 0xffff);
+}
+
+static void tc358743_enable_interrupts(struct v4l2_subdev *sd,
+ bool cable_connected)
+{
+ v4l2_dbg(2, debug, sd, "%s: cable connected = %d\n", __func__,
+ cable_connected);
+
+ if (cable_connected) {
+ i2c_wr8(sd, SYS_INTM, ~(MASK_M_DDC | MASK_M_DVI_DET |
+ MASK_M_HDMI_DET) & 0xff);
+ i2c_wr8(sd, CLK_INTM, ~MASK_M_IN_DE_CHG);
+ i2c_wr8(sd, CBIT_INTM, ~(MASK_M_CBIT_FS | MASK_M_AF_LOCK |
+ MASK_M_AF_UNLOCK) & 0xff);
+ i2c_wr8(sd, AUDIO_INTM, ~MASK_M_BUFINIT_END);
+ i2c_wr8(sd, MISC_INTM, ~MASK_M_SYNC_CHG);
+ } else {
+ i2c_wr8(sd, SYS_INTM, ~MASK_M_DDC & 0xff);
+ i2c_wr8(sd, CLK_INTM, 0xff);
+ i2c_wr8(sd, CBIT_INTM, 0xff);
+ i2c_wr8(sd, AUDIO_INTM, 0xff);
+ i2c_wr8(sd, MISC_INTM, 0xff);
+ }
+}
+
+static void tc358743_hdmi_audio_int_handler(struct v4l2_subdev *sd,
+ bool *handled)
+{
+ u8 audio_int_mask = i2c_rd8(sd, AUDIO_INTM);
+ u8 audio_int = i2c_rd8(sd, AUDIO_INT) & ~audio_int_mask;
+
+ i2c_wr8(sd, AUDIO_INT, audio_int);
+
+ v4l2_dbg(3, debug, sd, "%s: AUDIO_INT = 0x%02x\n", __func__, audio_int);
+
+ tc358743_s_ctrl_audio_sampling_rate(sd);
+ tc358743_s_ctrl_audio_present(sd);
+}
+
+static void tc358743_csi_err_int_handler(struct v4l2_subdev *sd, bool *handled)
+{
+ v4l2_err(sd, "%s: CSI_ERR = 0x%x\n", __func__, i2c_rd32(sd, CSI_ERR));
+
+ i2c_wr32(sd, CSI_INT_CLR, MASK_ICRER);
+}
+
+static void tc358743_hdmi_misc_int_handler(struct v4l2_subdev *sd,
+ bool *handled)
+{
+ u8 misc_int_mask = i2c_rd8(sd, MISC_INTM);
+ u8 misc_int = i2c_rd8(sd, MISC_INT) & ~misc_int_mask;
+
+ i2c_wr8(sd, MISC_INT, misc_int);
+
+ v4l2_dbg(3, debug, sd, "%s: MISC_INT = 0x%02x\n", __func__, misc_int);
+
+ if (misc_int & MASK_I_SYNC_CHG) {
+ /* Reset the HDMI PHY to try to trigger proper lock on the
+ * incoming video format. Erase BKSV to prevent that old keys
+ * are used when a new source is connected. */
+ if (no_sync(sd) || no_signal(sd)) {
+ tc358743_reset_phy(sd);
+ tc358743_erase_bksv(sd);
+ }
+
+ tc358743_format_change(sd);
+
+ misc_int &= ~MASK_I_SYNC_CHG;
+ if (handled)
+ *handled = true;
+ }
+
+ if (misc_int) {
+ v4l2_err(sd, "%s: Unhandled MISC_INT interrupts: 0x%02x\n",
+ __func__, misc_int);
+ }
+}
+
+static void tc358743_hdmi_cbit_int_handler(struct v4l2_subdev *sd,
+ bool *handled)
+{
+ u8 cbit_int_mask = i2c_rd8(sd, CBIT_INTM);
+ u8 cbit_int = i2c_rd8(sd, CBIT_INT) & ~cbit_int_mask;
+
+ i2c_wr8(sd, CBIT_INT, cbit_int);
+
+ v4l2_dbg(3, debug, sd, "%s: CBIT_INT = 0x%02x\n", __func__, cbit_int);
+
+ if (cbit_int & MASK_I_CBIT_FS) {
+
+ v4l2_dbg(1, debug, sd, "%s: Audio sample rate changed\n",
+ __func__);
+ tc358743_s_ctrl_audio_sampling_rate(sd);
+
+ cbit_int &= ~MASK_I_CBIT_FS;
+ if (handled)
+ *handled = true;
+ }
+
+ if (cbit_int & (MASK_I_AF_LOCK | MASK_I_AF_UNLOCK)) {
+
+ v4l2_dbg(1, debug, sd, "%s: Audio present changed\n",
+ __func__);
+ tc358743_s_ctrl_audio_present(sd);
+
+ cbit_int &= ~(MASK_I_AF_LOCK | MASK_I_AF_UNLOCK);
+ if (handled)
+ *handled = true;
+ }
+
+ if (cbit_int) {
+ v4l2_err(sd, "%s: Unhandled CBIT_INT interrupts: 0x%02x\n",
+ __func__, cbit_int);
+ }
+}
+
+static void tc358743_hdmi_clk_int_handler(struct v4l2_subdev *sd, bool *handled)
+{
+ u8 clk_int_mask = i2c_rd8(sd, CLK_INTM);
+ u8 clk_int = i2c_rd8(sd, CLK_INT) & ~clk_int_mask;
+
+ /* Bit 7 and bit 6 are set even when they are masked */
+ i2c_wr8(sd, CLK_INT, clk_int | 0x80 | MASK_I_OUT_H_CHG);
+
+ v4l2_dbg(3, debug, sd, "%s: CLK_INT = 0x%02x\n", __func__, clk_int);
+
+ if (clk_int & (MASK_I_IN_DE_CHG)) {
+
+ v4l2_dbg(1, debug, sd, "%s: DE size or position has changed\n",
+ __func__);
+
+ /* If the source switch to a new resolution with the same pixel
+ * frequency as the existing (e.g. 1080p25 -> 720p50), the
+ * I_SYNC_CHG interrupt is not always triggered, while the
+ * I_IN_DE_CHG interrupt seems to work fine. Format change
+ * notifications are only sent when the signal is stable to
+ * reduce the number of notifications. */
+ if (!no_signal(sd) && !no_sync(sd))
+ tc358743_format_change(sd);
+
+ clk_int &= ~(MASK_I_IN_DE_CHG);
+ if (handled)
+ *handled = true;
+ }
+
+ if (clk_int) {
+ v4l2_err(sd, "%s: Unhandled CLK_INT interrupts: 0x%02x\n",
+ __func__, clk_int);
+ }
+}
+
+static void tc358743_hdmi_sys_int_handler(struct v4l2_subdev *sd, bool *handled)
+{
+ struct tc358743_state *state = to_state(sd);
+ u8 sys_int_mask = i2c_rd8(sd, SYS_INTM);
+ u8 sys_int = i2c_rd8(sd, SYS_INT) & ~sys_int_mask;
+
+ i2c_wr8(sd, SYS_INT, sys_int);
+
+ v4l2_dbg(3, debug, sd, "%s: SYS_INT = 0x%02x\n", __func__, sys_int);
+
+ if (sys_int & MASK_I_DDC) {
+ bool tx_5v = tx_5v_power_present(sd);
+
+ v4l2_dbg(1, debug, sd, "%s: Tx 5V power present: %s\n",
+ __func__, tx_5v ? "yes" : "no");
+
+ if (tx_5v) {
+ tc358743_enable_edid(sd);
+ } else {
+ tc358743_enable_interrupts(sd, false);
+ tc358743_disable_edid(sd);
+ memset(&state->timings, 0, sizeof(state->timings));
+ tc358743_erase_bksv(sd);
+ tc358743_update_controls(sd);
+ }
+
+ sys_int &= ~MASK_I_DDC;
+ if (handled)
+ *handled = true;
+ }
+
+ if (sys_int & MASK_I_DVI) {
+ v4l2_dbg(1, debug, sd, "%s: HDMI->DVI change detected\n",
+ __func__);
+
+ /* Reset the HDMI PHY to try to trigger proper lock on the
+ * incoming video format. Erase BKSV to prevent that old keys
+ * are used when a new source is connected. */
+ if (no_sync(sd) || no_signal(sd)) {
+ tc358743_reset_phy(sd);
+ tc358743_erase_bksv(sd);
+ }
+
+ sys_int &= ~MASK_I_DVI;
+ if (handled)
+ *handled = true;
+ }
+
+ if (sys_int & MASK_I_HDMI) {
+ v4l2_dbg(1, debug, sd, "%s: DVI->HDMI change detected\n",
+ __func__);
+
+ /* Register is reset in DVI mode (REF_01, c. 6.6.41) */
+ i2c_wr8(sd, ANA_CTL, MASK_APPL_PCSX_NORMAL | MASK_ANALOG_ON);
+
+ sys_int &= ~MASK_I_HDMI;
+ if (handled)
+ *handled = true;
+ }
+
+ if (sys_int) {
+ v4l2_err(sd, "%s: Unhandled SYS_INT interrupts: 0x%02x\n",
+ __func__, sys_int);
+ }
+}
+
+/* --------------- CORE OPS --------------- */
+
+static int tc358743_log_status(struct v4l2_subdev *sd)
+{
+ struct tc358743_state *state = to_state(sd);
+ struct v4l2_dv_timings timings;
+ uint8_t hdmi_sys_status = i2c_rd8(sd, SYS_STATUS);
+ uint16_t sysctl = i2c_rd16(sd, SYSCTL);
+ u8 vi_status3 = i2c_rd8(sd, VI_STATUS3);
+ const int deep_color_mode[4] = { 8, 10, 12, 16 };
+ static const char * const input_color_space[] = {
+ "RGB", "YCbCr 601", "Adobe RGB", "YCbCr 709", "NA (4)",
+ "xvYCC 601", "NA(6)", "xvYCC 709", "NA(8)", "sYCC601",
+ "NA(10)", "NA(11)", "NA(12)", "Adobe YCC 601"};
+
+ v4l2_info(sd, "-----Chip status-----\n");
+ v4l2_info(sd, "Chip ID: 0x%02x\n",
+ (i2c_rd16(sd, CHIPID) & MASK_CHIPID) >> 8);
+ v4l2_info(sd, "Chip revision: 0x%02x\n",
+ i2c_rd16(sd, CHIPID) & MASK_REVID);
+ v4l2_info(sd, "Reset: IR: %d, CEC: %d, CSI TX: %d, HDMI: %d\n",
+ !!(sysctl & MASK_IRRST),
+ !!(sysctl & MASK_CECRST),
+ !!(sysctl & MASK_CTXRST),
+ !!(sysctl & MASK_HDMIRST));
+ v4l2_info(sd, "Sleep mode: %s\n", sysctl & MASK_SLEEP ? "on" : "off");
+ v4l2_info(sd, "Cable detected (+5V power): %s\n",
+ hdmi_sys_status & MASK_S_DDC5V ? "yes" : "no");
+ v4l2_info(sd, "DDC lines enabled: %s\n",
+ (i2c_rd8(sd, EDID_MODE) & MASK_EDID_MODE_E_DDC) ?
+ "yes" : "no");
+ v4l2_info(sd, "Hotplug enabled: %s\n",
+ (i2c_rd8(sd, HPD_CTL) & MASK_HPD_OUT0) ?
+ "yes" : "no");
+ v4l2_info(sd, "CEC enabled: %s\n",
+ (i2c_rd16(sd, CECEN) & MASK_CECEN) ? "yes" : "no");
+ v4l2_info(sd, "-----Signal status-----\n");
+ v4l2_info(sd, "TMDS signal detected: %s\n",
+ hdmi_sys_status & MASK_S_TMDS ? "yes" : "no");
+ v4l2_info(sd, "Stable sync signal: %s\n",
+ hdmi_sys_status & MASK_S_SYNC ? "yes" : "no");
+ v4l2_info(sd, "PHY PLL locked: %s\n",
+ hdmi_sys_status & MASK_S_PHY_PLL ? "yes" : "no");
+ v4l2_info(sd, "PHY DE detected: %s\n",
+ hdmi_sys_status & MASK_S_PHY_SCDT ? "yes" : "no");
+
+ if (tc358743_get_detected_timings(sd, &timings)) {
+ v4l2_info(sd, "No video detected\n");
+ } else {
+ v4l2_print_dv_timings(sd->name, "Detected format: ", &timings,
+ true);
+ }
+ v4l2_print_dv_timings(sd->name, "Configured format: ", &state->timings,
+ true);
+
+ v4l2_info(sd, "-----CSI-TX status-----\n");
+ v4l2_info(sd, "Lanes needed: %d\n",
+ tc358743_num_csi_lanes_needed(sd));
+ v4l2_info(sd, "Lanes in use: %d\n",
+ tc358743_num_csi_lanes_in_use(sd));
+ v4l2_info(sd, "Waiting for particular sync signal: %s\n",
+ (i2c_rd16(sd, CSI_STATUS) & MASK_S_WSYNC) ?
+ "yes" : "no");
+ v4l2_info(sd, "Transmit mode: %s\n",
+ (i2c_rd16(sd, CSI_STATUS) & MASK_S_TXACT) ?
+ "yes" : "no");
+ v4l2_info(sd, "Receive mode: %s\n",
+ (i2c_rd16(sd, CSI_STATUS) & MASK_S_RXACT) ?
+ "yes" : "no");
+ v4l2_info(sd, "Stopped: %s\n",
+ (i2c_rd16(sd, CSI_STATUS) & MASK_S_HLT) ?
+ "yes" : "no");
+ v4l2_info(sd, "Color space: %s\n",
+ state->mbus_fmt_code == MEDIA_BUS_FMT_UYVY8_1X16 ?
+ "YCbCr 422 16-bit" :
+ state->mbus_fmt_code == MEDIA_BUS_FMT_RGB888_1X24 ?
+ "RGB 888 24-bit" : "Unsupported");
+
+ v4l2_info(sd, "-----%s status-----\n", is_hdmi(sd) ? "HDMI" : "DVI-D");
+ v4l2_info(sd, "HDCP encrypted content: %s\n",
+ hdmi_sys_status & MASK_S_HDCP ? "yes" : "no");
+ v4l2_info(sd, "Input color space: %s %s range\n",
+ input_color_space[(vi_status3 & MASK_S_V_COLOR) >> 1],
+ (vi_status3 & MASK_LIMITED) ? "limited" : "full");
+ if (!is_hdmi(sd))
+ return 0;
+ v4l2_info(sd, "AV Mute: %s\n", hdmi_sys_status & MASK_S_AVMUTE ? "on" :
+ "off");
+ v4l2_info(sd, "Deep color mode: %d-bits per channel\n",
+ deep_color_mode[(i2c_rd8(sd, VI_STATUS1) &
+ MASK_S_DEEPCOLOR) >> 2]);
+ print_avi_infoframe(sd);
+
+ return 0;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static void tc358743_print_register_map(struct v4l2_subdev *sd)
+{
+ v4l2_info(sd, "0x0000–0x00FF: Global Control Register\n");
+ v4l2_info(sd, "0x0100–0x01FF: CSI2-TX PHY Register\n");
+ v4l2_info(sd, "0x0200–0x03FF: CSI2-TX PPI Register\n");
+ v4l2_info(sd, "0x0400–0x05FF: Reserved\n");
+ v4l2_info(sd, "0x0600–0x06FF: CEC Register\n");
+ v4l2_info(sd, "0x0700–0x84FF: Reserved\n");
+ v4l2_info(sd, "0x8500–0x85FF: HDMIRX System Control Register\n");
+ v4l2_info(sd, "0x8600–0x86FF: HDMIRX Audio Control Register\n");
+ v4l2_info(sd, "0x8700–0x87FF: HDMIRX InfoFrame packet data Register\n");
+ v4l2_info(sd, "0x8800–0x88FF: HDMIRX HDCP Port Register\n");
+ v4l2_info(sd, "0x8900–0x89FF: HDMIRX Video Output Port & 3D Register\n");
+ v4l2_info(sd, "0x8A00–0x8BFF: Reserved\n");
+ v4l2_info(sd, "0x8C00–0x8FFF: HDMIRX EDID-RAM (1024bytes)\n");
+ v4l2_info(sd, "0x9000–0x90FF: HDMIRX GBD Extraction Control\n");
+ v4l2_info(sd, "0x9100–0x92FF: HDMIRX GBD RAM read\n");
+ v4l2_info(sd, "0x9300- : Reserved\n");
+}
+
+static int tc358743_get_reg_size(u16 address)
+{
+ /* REF_01 p. 66-72 */
+ if (address <= 0x00ff)
+ return 2;
+ else if ((address >= 0x0100) && (address <= 0x06FF))
+ return 4;
+ else if ((address >= 0x0700) && (address <= 0x84ff))
+ return 2;
+ else
+ return 1;
+}
+
+static int tc358743_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ if (reg->reg > 0xffff) {
+ tc358743_print_register_map(sd);
+ return -EINVAL;
+ }
+
+ reg->size = tc358743_get_reg_size(reg->reg);
+
+ i2c_rd(sd, reg->reg, (u8 *)&reg->val, reg->size);
+
+ return 0;
+}
+
+static int tc358743_s_register(struct v4l2_subdev *sd,
+ const struct v4l2_dbg_register *reg)
+{
+ if (reg->reg > 0xffff) {
+ tc358743_print_register_map(sd);
+ return -EINVAL;
+ }
+
+ /* It should not be possible for the user to enable HDCP with a simple
+ * v4l2-dbg command.
+ *
+ * DO NOT REMOVE THIS unless all other issues with HDCP have been
+ * resolved.
+ */
+ if (reg->reg == HDCP_MODE ||
+ reg->reg == HDCP_REG1 ||
+ reg->reg == HDCP_REG2 ||
+ reg->reg == HDCP_REG3 ||
+ reg->reg == BCAPS)
+ return 0;
+
+ i2c_wr(sd, (u16)reg->reg, (u8 *)&reg->val,
+ tc358743_get_reg_size(reg->reg));
+
+ return 0;
+}
+#endif
+
+static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
+{
+ u16 intstatus = i2c_rd16(sd, INTSTATUS);
+
+ v4l2_dbg(1, debug, sd, "%s: IntStatus = 0x%04x\n", __func__, intstatus);
+
+ if (intstatus & MASK_HDMI_INT) {
+ u8 hdmi_int0 = i2c_rd8(sd, HDMI_INT0);
+ u8 hdmi_int1 = i2c_rd8(sd, HDMI_INT1);
+
+ if (hdmi_int0 & MASK_I_MISC)
+ tc358743_hdmi_misc_int_handler(sd, handled);
+ if (hdmi_int1 & MASK_I_CBIT)
+ tc358743_hdmi_cbit_int_handler(sd, handled);
+ if (hdmi_int1 & MASK_I_CLK)
+ tc358743_hdmi_clk_int_handler(sd, handled);
+ if (hdmi_int1 & MASK_I_SYS)
+ tc358743_hdmi_sys_int_handler(sd, handled);
+ if (hdmi_int1 & MASK_I_AUD)
+ tc358743_hdmi_audio_int_handler(sd, handled);
+
+ i2c_wr16(sd, INTSTATUS, MASK_HDMI_INT);
+ intstatus &= ~MASK_HDMI_INT;
+ }
+
+ if (intstatus & MASK_CSI_INT) {
+ u32 csi_int = i2c_rd32(sd, CSI_INT);
+
+ if (csi_int & MASK_INTER)
+ tc358743_csi_err_int_handler(sd, handled);
+
+ i2c_wr16(sd, INTSTATUS, MASK_CSI_INT);
+ intstatus &= ~MASK_CSI_INT;
+ }
+
+ intstatus = i2c_rd16(sd, INTSTATUS);
+ if (intstatus) {
+ v4l2_dbg(1, debug, sd,
+ "%s: Unhandled IntStatus interrupts: 0x%02x\n",
+ __func__, intstatus);
+ }
+
+ return 0;
+}
+
+static irqreturn_t tc358743_irq_handler(int irq, void *dev_id)
+{
+ struct tc358743_state *state = dev_id;
+ bool handled;
+
+ tc358743_isr(&state->sd, 0, &handled);
+
+ return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int tc358743_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subdev_subscribe(sd, fh, sub);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subdev_subscribe_event(sd, fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+/* --------------- VIDEO OPS --------------- */
+
+static int tc358743_g_input_status(struct v4l2_subdev *sd, u32 *status)
+{
+ *status = 0;
+ *status |= no_signal(sd) ? V4L2_IN_ST_NO_SIGNAL : 0;
+ *status |= no_sync(sd) ? V4L2_IN_ST_NO_SYNC : 0;
+
+ v4l2_dbg(1, debug, sd, "%s: status = 0x%x\n", __func__, *status);
+
+ return 0;
+}
+
+static int tc358743_s_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct tc358743_state *state = to_state(sd);
+
+ if (!timings)
+ return -EINVAL;
+
+ if (debug)
+ v4l2_print_dv_timings(sd->name, "tc358743_s_dv_timings: ",
+ timings, false);
+
+ if (v4l2_match_dv_timings(&state->timings, timings, 0)) {
+ v4l2_dbg(1, debug, sd, "%s: no change\n", __func__);
+ return 0;
+ }
+
+ if (!v4l2_valid_dv_timings(timings,
+ &tc358743_timings_cap, NULL, NULL)) {
+ v4l2_dbg(1, debug, sd, "%s: timings out of range\n", __func__);
+ return -ERANGE;
+ }
+
+ state->timings = *timings;
+
+ enable_stream(sd, false);
+ tc358743_set_pll(sd);
+ tc358743_set_csi(sd);
+
+ return 0;
+}
+
+static int tc358743_g_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct tc358743_state *state = to_state(sd);
+
+ *timings = state->timings;
+
+ return 0;
+}
+
+static int tc358743_enum_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_enum_dv_timings *timings)
+{
+ if (timings->pad != 0)
+ return -EINVAL;
+
+ return v4l2_enum_dv_timings_cap(timings,
+ &tc358743_timings_cap, NULL, NULL);
+}
+
+static int tc358743_query_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ int ret;
+
+ ret = tc358743_get_detected_timings(sd, timings);
+ if (ret)
+ return ret;
+
+ if (debug)
+ v4l2_print_dv_timings(sd->name, "tc358743_query_dv_timings: ",
+ timings, false);
+
+ if (!v4l2_valid_dv_timings(timings,
+ &tc358743_timings_cap, NULL, NULL)) {
+ v4l2_dbg(1, debug, sd, "%s: timings out of range\n", __func__);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int tc358743_dv_timings_cap(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings_cap *cap)
+{
+ if (cap->pad != 0)
+ return -EINVAL;
+
+ *cap = tc358743_timings_cap;
+
+ return 0;
+}
+
+static int tc358743_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ cfg->type = V4L2_MBUS_CSI2;
+
+ /* Support for non-continuous CSI-2 clock is missing in the driver */
+ cfg->flags = V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
+
+ switch (tc358743_num_csi_lanes_in_use(sd)) {
+ case 1:
+ cfg->flags |= V4L2_MBUS_CSI2_1_LANE;
+ break;
+ case 2:
+ cfg->flags |= V4L2_MBUS_CSI2_2_LANE;
+ break;
+ case 3:
+ cfg->flags |= V4L2_MBUS_CSI2_3_LANE;
+ break;
+ case 4:
+ cfg->flags |= V4L2_MBUS_CSI2_4_LANE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tc358743_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ enable_stream(sd, enable);
+
+ return 0;
+}
+
+/* --------------- PAD OPS --------------- */
+
+static int tc358743_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct tc358743_state *state = to_state(sd);
+ u8 vi_rep = i2c_rd8(sd, VI_REP);
+
+ if (format->pad != 0)
+ return -EINVAL;
+
+ format->format.code = state->mbus_fmt_code;
+ format->format.width = state->timings.bt.width;
+ format->format.height = state->timings.bt.height;
+ format->format.field = V4L2_FIELD_NONE;
+
+ switch (vi_rep & MASK_VOUT_COLOR_SEL) {
+ case MASK_VOUT_COLOR_RGB_FULL:
+ case MASK_VOUT_COLOR_RGB_LIMITED:
+ format->format.colorspace = V4L2_COLORSPACE_SRGB;
+ break;
+ case MASK_VOUT_COLOR_601_YCBCR_LIMITED:
+ case MASK_VOUT_COLOR_601_YCBCR_FULL:
+ format->format.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ break;
+ case MASK_VOUT_COLOR_709_YCBCR_FULL:
+ case MASK_VOUT_COLOR_709_YCBCR_LIMITED:
+ format->format.colorspace = V4L2_COLORSPACE_REC709;
+ break;
+ default:
+ format->format.colorspace = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int tc358743_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct tc358743_state *state = to_state(sd);
+
+ u32 code = format->format.code; /* is overwritten by get_fmt */
+ int ret = tc358743_get_fmt(sd, cfg, format);
+
+ format->format.code = code;
+
+ if (ret)
+ return ret;
+
+ switch (code) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ return 0;
+
+ state->mbus_fmt_code = format->format.code;
+
+ enable_stream(sd, false);
+ tc358743_set_pll(sd);
+ tc358743_set_csi(sd);
+ tc358743_set_csi_color_space(sd);
+
+ return 0;
+}
+
+static int tc358743_g_edid(struct v4l2_subdev *sd,
+ struct v4l2_subdev_edid *edid)
+{
+ struct tc358743_state *state = to_state(sd);
+
+ if (edid->pad != 0)
+ return -EINVAL;
+
+ if (edid->start_block == 0 && edid->blocks == 0) {
+ edid->blocks = state->edid_blocks_written;
+ return 0;
+ }
+
+ if (state->edid_blocks_written == 0)
+ return -ENODATA;
+
+ if (edid->start_block >= state->edid_blocks_written ||
+ edid->blocks == 0)
+ return -EINVAL;
+
+ if (edid->start_block + edid->blocks > state->edid_blocks_written)
+ edid->blocks = state->edid_blocks_written - edid->start_block;
+
+ i2c_rd(sd, EDID_RAM + (edid->start_block * EDID_BLOCK_SIZE), edid->edid,
+ edid->blocks * EDID_BLOCK_SIZE);
+
+ return 0;
+}
+
+static int tc358743_s_edid(struct v4l2_subdev *sd,
+ struct v4l2_subdev_edid *edid)
+{
+ struct tc358743_state *state = to_state(sd);
+ u16 edid_len = edid->blocks * EDID_BLOCK_SIZE;
+
+ v4l2_dbg(2, debug, sd, "%s, pad %d, start block %d, blocks %d\n",
+ __func__, edid->pad, edid->start_block, edid->blocks);
+
+ if (edid->pad != 0)
+ return -EINVAL;
+
+ if (edid->start_block != 0)
+ return -EINVAL;
+
+ if (edid->blocks > EDID_NUM_BLOCKS_MAX) {
+ edid->blocks = EDID_NUM_BLOCKS_MAX;
+ return -E2BIG;
+ }
+
+ tc358743_disable_edid(sd);
+
+ i2c_wr8(sd, EDID_LEN1, edid_len & 0xff);
+ i2c_wr8(sd, EDID_LEN2, edid_len >> 8);
+
+ if (edid->blocks == 0) {
+ state->edid_blocks_written = 0;
+ return 0;
+ }
+
+ i2c_wr(sd, EDID_RAM, edid->edid, edid_len);
+
+ state->edid_blocks_written = edid->blocks;
+
+ if (tx_5v_power_present(sd))
+ tc358743_enable_edid(sd);
+
+ return 0;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static const struct v4l2_subdev_core_ops tc358743_core_ops = {
+ .log_status = tc358743_log_status,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = tc358743_g_register,
+ .s_register = tc358743_s_register,
+#endif
+ .interrupt_service_routine = tc358743_isr,
+ .subscribe_event = tc358743_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_video_ops tc358743_video_ops = {
+ .g_input_status = tc358743_g_input_status,
+ .s_dv_timings = tc358743_s_dv_timings,
+ .g_dv_timings = tc358743_g_dv_timings,
+ .query_dv_timings = tc358743_query_dv_timings,
+ .g_mbus_config = tc358743_g_mbus_config,
+ .s_stream = tc358743_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops tc358743_pad_ops = {
+ .set_fmt = tc358743_set_fmt,
+ .get_fmt = tc358743_get_fmt,
+ .get_edid = tc358743_g_edid,
+ .set_edid = tc358743_s_edid,
+ .enum_dv_timings = tc358743_enum_dv_timings,
+ .dv_timings_cap = tc358743_dv_timings_cap,
+};
+
+static const struct v4l2_subdev_ops tc358743_ops = {
+ .core = &tc358743_core_ops,
+ .video = &tc358743_video_ops,
+ .pad = &tc358743_pad_ops,
+};
+
+/* --------------- CUSTOM CTRLS --------------- */
+
+static const struct v4l2_ctrl_config tc358743_ctrl_audio_sampling_rate = {
+ .id = TC358743_CID_AUDIO_SAMPLING_RATE,
+ .name = "Audio sampling rate",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 768000,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+};
+
+static const struct v4l2_ctrl_config tc358743_ctrl_audio_present = {
+ .id = TC358743_CID_AUDIO_PRESENT,
+ .name = "Audio present",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+};
+
+/* --------------- PROBE / REMOVE --------------- */
+
+#ifdef CONFIG_OF
+static void tc358743_gpio_reset(struct tc358743_state *state)
+{
+ usleep_range(5000, 10000);
+ gpiod_set_value(state->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value(state->reset_gpio, 0);
+ msleep(20);
+}
+
+static int tc358743_probe_of(struct tc358743_state *state)
+{
+ struct device *dev = &state->i2c_client->dev;
+ struct v4l2_of_endpoint *endpoint;
+ struct device_node *ep;
+ struct clk *refclk;
+ u32 bps_pr_lane;
+ int ret = -EINVAL;
+
+ refclk = devm_clk_get(dev, "refclk");
+ if (IS_ERR(refclk)) {
+ if (PTR_ERR(refclk) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get refclk: %ld\n",
+ PTR_ERR(refclk));
+ return PTR_ERR(refclk);
+ }
+
+ ep = of_graph_get_next_endpoint(dev->of_node, NULL);
+ if (!ep) {
+ dev_err(dev, "missing endpoint node\n");
+ return -EINVAL;
+ }
+
+ endpoint = v4l2_of_alloc_parse_endpoint(ep);
+ if (IS_ERR(endpoint)) {
+ dev_err(dev, "failed to parse endpoint\n");
+ return PTR_ERR(endpoint);
+ }
+
+ if (endpoint->bus_type != V4L2_MBUS_CSI2 ||
+ endpoint->bus.mipi_csi2.num_data_lanes == 0 ||
+ endpoint->nr_of_link_frequencies == 0) {
+ dev_err(dev, "missing CSI-2 properties in endpoint\n");
+ goto free_endpoint;
+ }
+
+ state->bus = endpoint->bus.mipi_csi2;
+
+ clk_prepare_enable(refclk);
+
+ state->pdata.refclk_hz = clk_get_rate(refclk);
+ state->pdata.ddc5v_delay = DDC5V_DELAY_100_MS;
+ state->pdata.enable_hdcp = false;
+ /* A FIFO level of 16 should be enough for 2-lane 720p60 at 594 MHz. */
+ state->pdata.fifo_level = 16;
+ /*
+ * The PLL input clock is obtained by dividing refclk by pll_prd.
+ * It must be between 6 MHz and 40 MHz, lower frequency is better.
+ */
+ switch (state->pdata.refclk_hz) {
+ case 26000000:
+ case 27000000:
+ case 42000000:
+ state->pdata.pll_prd = state->pdata.refclk_hz / 6000000;
+ break;
+ default:
+ dev_err(dev, "unsupported refclk rate: %u Hz\n",
+ state->pdata.refclk_hz);
+ goto disable_clk;
+ }
+
+ /*
+ * The CSI bps per lane must be between 62.5 Mbps and 1 Gbps.
+ * The default is 594 Mbps for 4-lane 1080p60 or 2-lane 720p60.
+ */
+ bps_pr_lane = 2 * endpoint->link_frequencies[0];
+ if (bps_pr_lane < 62500000U || bps_pr_lane > 1000000000U) {
+ dev_err(dev, "unsupported bps per lane: %u bps\n", bps_pr_lane);
+ goto disable_clk;
+ }
+
+ /* The CSI speed per lane is refclk / pll_prd * pll_fbd */
+ state->pdata.pll_fbd = bps_pr_lane /
+ state->pdata.refclk_hz * state->pdata.pll_prd;
+
+ /*
+ * FIXME: These timings are from REF_02 for 594 Mbps per lane (297 MHz
+ * link frequency). In principle it should be possible to calculate
+ * them based on link frequency and resolution.
+ */
+ if (bps_pr_lane != 594000000U)
+ dev_warn(dev, "untested bps per lane: %u bps\n", bps_pr_lane);
+ state->pdata.lineinitcnt = 0xe80;
+ state->pdata.lptxtimecnt = 0x003;
+ /* tclk-preparecnt: 3, tclk-zerocnt: 20 */
+ state->pdata.tclk_headercnt = 0x1403;
+ state->pdata.tclk_trailcnt = 0x00;
+ /* ths-preparecnt: 3, ths-zerocnt: 1 */
+ state->pdata.ths_headercnt = 0x0103;
+ state->pdata.twakeup = 0x4882;
+ state->pdata.tclk_postcnt = 0x008;
+ state->pdata.ths_trailcnt = 0x2;
+ state->pdata.hstxvregcnt = 0;
+
+ state->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(state->reset_gpio)) {
+ dev_err(dev, "failed to get reset gpio\n");
+ ret = PTR_ERR(state->reset_gpio);
+ goto disable_clk;
+ }
+
+ if (state->reset_gpio)
+ tc358743_gpio_reset(state);
+
+ ret = 0;
+ goto free_endpoint;
+
+disable_clk:
+ clk_disable_unprepare(refclk);
+free_endpoint:
+ v4l2_of_free_endpoint(endpoint);
+ return ret;
+}
+#else
+static inline int tc358743_probe_of(struct tc358743_state *state)
+{
+ return -ENODEV;
+}
+#endif
+
+static int tc358743_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ static struct v4l2_dv_timings default_timing =
+ V4L2_DV_BT_CEA_640X480P59_94;
+ struct tc358743_state *state;
+ struct tc358743_platform_data *pdata = client->dev.platform_data;
+ struct v4l2_subdev *sd;
+ int err;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -EIO;
+ v4l_dbg(1, debug, client, "chip found @ 0x%x (%s)\n",
+ client->addr << 1, client->adapter->name);
+
+ state = devm_kzalloc(&client->dev, sizeof(struct tc358743_state),
+ GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->i2c_client = client;
+
+ /* platform data */
+ if (pdata) {
+ state->pdata = *pdata;
+ state->bus.flags = V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
+ } else {
+ err = tc358743_probe_of(state);
+ if (err == -ENODEV)
+ v4l_err(client, "No platform data!\n");
+ if (err)
+ return err;
+ }
+
+ sd = &state->sd;
+ v4l2_i2c_subdev_init(sd, client, &tc358743_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+
+ /* i2c access */
+ if ((i2c_rd16(sd, CHIPID) & MASK_CHIPID) != 0) {
+ v4l2_info(sd, "not a TC358743 on address 0x%x\n",
+ client->addr << 1);
+ return -ENODEV;
+ }
+
+ /* control handlers */
+ v4l2_ctrl_handler_init(&state->hdl, 3);
+
+ /* private controls */
+ state->detect_tx_5v_ctrl = v4l2_ctrl_new_std(&state->hdl, NULL,
+ V4L2_CID_DV_RX_POWER_PRESENT, 0, 1, 0, 0);
+
+ /* custom controls */
+ state->audio_sampling_rate_ctrl = v4l2_ctrl_new_custom(&state->hdl,
+ &tc358743_ctrl_audio_sampling_rate, NULL);
+
+ state->audio_present_ctrl = v4l2_ctrl_new_custom(&state->hdl,
+ &tc358743_ctrl_audio_present, NULL);
+
+ sd->ctrl_handler = &state->hdl;
+ if (state->hdl.error) {
+ err = state->hdl.error;
+ goto err_hdl;
+ }
+
+ if (tc358743_update_controls(sd)) {
+ err = -ENODEV;
+ goto err_hdl;
+ }
+
+ /* work queues */
+ state->work_queues = create_singlethread_workqueue(client->name);
+ if (!state->work_queues) {
+ v4l2_err(sd, "Could not create work queue\n");
+ err = -ENOMEM;
+ goto err_hdl;
+ }
+
+ state->pad.flags = MEDIA_PAD_FL_SOURCE;
+ err = media_entity_init(&sd->entity, 1, &state->pad, 0);
+ if (err < 0)
+ goto err_hdl;
+
+ sd->dev = &client->dev;
+ err = v4l2_async_register_subdev(sd);
+ if (err < 0)
+ goto err_hdl;
+
+ mutex_init(&state->confctl_mutex);
+
+ INIT_DELAYED_WORK(&state->delayed_work_enable_hotplug,
+ tc358743_delayed_work_enable_hotplug);
+
+ tc358743_initial_setup(sd);
+
+ tc358743_s_dv_timings(sd, &default_timing);
+
+ state->mbus_fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
+ tc358743_set_csi_color_space(sd);
+
+ tc358743_init_interrupts(sd);
+
+ if (state->i2c_client->irq) {
+ err = devm_request_threaded_irq(&client->dev,
+ state->i2c_client->irq,
+ NULL, tc358743_irq_handler,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "tc358743", state);
+ if (err)
+ goto err_work_queues;
+ }
+
+ tc358743_enable_interrupts(sd, tx_5v_power_present(sd));
+ i2c_wr16(sd, INTMASK, ~(MASK_HDMI_MSK | MASK_CSI_MSK) & 0xffff);
+
+ err = v4l2_ctrl_handler_setup(sd->ctrl_handler);
+ if (err)
+ goto err_work_queues;
+
+ v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
+ client->addr << 1, client->adapter->name);
+
+ return 0;
+
+err_work_queues:
+ cancel_delayed_work(&state->delayed_work_enable_hotplug);
+ destroy_workqueue(state->work_queues);
+ mutex_destroy(&state->confctl_mutex);
+err_hdl:
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(&state->hdl);
+ return err;
+}
+
+static int tc358743_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct tc358743_state *state = to_state(sd);
+
+ cancel_delayed_work(&state->delayed_work_enable_hotplug);
+ destroy_workqueue(state->work_queues);
+ v4l2_async_unregister_subdev(sd);
+ v4l2_device_unregister_subdev(sd);
+ mutex_destroy(&state->confctl_mutex);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(&state->hdl);
+
+ return 0;
+}
+
+static struct i2c_device_id tc358743_id[] = {
+ {"tc358743", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, tc358743_id);
+
+static struct i2c_driver tc358743_driver = {
+ .driver = {
+ .name = "tc358743",
+ },
+ .probe = tc358743_probe,
+ .remove = tc358743_remove,
+ .id_table = tc358743_id,
+};
+
+module_i2c_driver(tc358743_driver);
diff --git a/drivers/media/i2c/tc358743_regs.h b/drivers/media/i2c/tc358743_regs.h
new file mode 100644
index 000000000000..81f1db558e7c
--- /dev/null
+++ b/drivers/media/i2c/tc358743_regs.h
@@ -0,0 +1,681 @@
+/*
+ * tc358743 - Toshiba HDMI to CSI-2 bridge - register names and bit masks
+ *
+ * Copyright 2015 Cisco Systems, Inc. and/or its affiliates. All rights
+ * reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+/*
+ * References (c = chapter, p = page):
+ * REF_01 - Toshiba, TC358743XBG (H2C), Functional Specification, Rev 0.60
+ */
+
+/* Bit masks has prefix 'MASK_' and options after '_'. */
+
+#ifndef __TC358743_REGS_H
+#define __TC358743_REGS_H
+
+#define CHIPID 0x0000
+#define MASK_CHIPID 0xff00
+#define MASK_REVID 0x00ff
+
+#define SYSCTL 0x0002
+#define MASK_IRRST 0x0800
+#define MASK_CECRST 0x0400
+#define MASK_CTXRST 0x0200
+#define MASK_HDMIRST 0x0100
+#define MASK_SLEEP 0x0001
+
+#define CONFCTL 0x0004
+#define MASK_PWRISO 0x8000
+#define MASK_ACLKOPT 0x1000
+#define MASK_AUDCHNUM 0x0c00
+#define MASK_AUDCHNUM_8 0x0000
+#define MASK_AUDCHNUM_6 0x0400
+#define MASK_AUDCHNUM_4 0x0800
+#define MASK_AUDCHNUM_2 0x0c00
+#define MASK_AUDCHSEL 0x0200
+#define MASK_I2SDLYOPT 0x0100
+#define MASK_YCBCRFMT 0x00c0
+#define MASK_YCBCRFMT_444 0x0000
+#define MASK_YCBCRFMT_422_12_BIT 0x0040
+#define MASK_YCBCRFMT_COLORBAR 0x0080
+#define MASK_YCBCRFMT_422_8_BIT 0x00c0
+#define MASK_INFRMEN 0x0020
+#define MASK_AUDOUTSEL 0x0018
+#define MASK_AUDOUTSEL_CSI 0x0000
+#define MASK_AUDOUTSEL_I2S 0x0010
+#define MASK_AUDOUTSEL_TDM 0x0018
+#define MASK_AUTOINDEX 0x0004
+#define MASK_ABUFEN 0x0002
+#define MASK_VBUFEN 0x0001
+
+#define FIFOCTL 0x0006
+
+#define INTSTATUS 0x0014
+#define MASK_AMUTE_INT 0x0400
+#define MASK_HDMI_INT 0x0200
+#define MASK_CSI_INT 0x0100
+#define MASK_SYS_INT 0x0020
+#define MASK_CEC_EINT 0x0010
+#define MASK_CEC_TINT 0x0008
+#define MASK_CEC_RINT 0x0004
+#define MASK_IR_EINT 0x0002
+#define MASK_IR_DINT 0x0001
+
+#define INTMASK 0x0016
+#define MASK_AMUTE_MSK 0x0400
+#define MASK_HDMI_MSK 0x0200
+#define MASK_CSI_MSK 0x0100
+#define MASK_SYS_MSK 0x0020
+#define MASK_CEC_EMSK 0x0010
+#define MASK_CEC_TMSK 0x0008
+#define MASK_CEC_RMSK 0x0004
+#define MASK_IR_EMSK 0x0002
+#define MASK_IR_DMSK 0x0001
+
+#define INTFLAG 0x0018
+#define INTSYSSTATUS 0x001A
+
+#define PLLCTL0 0x0020
+#define MASK_PLL_PRD 0xf000
+#define SET_PLL_PRD(prd) ((((prd) - 1) << 12) &\
+ MASK_PLL_PRD)
+#define MASK_PLL_FBD 0x01ff
+#define SET_PLL_FBD(fbd) (((fbd) - 1) & MASK_PLL_FBD)
+
+#define PLLCTL1 0x0022
+#define MASK_PLL_FRS 0x0c00
+#define SET_PLL_FRS(frs) (((frs) << 10) & MASK_PLL_FRS)
+#define MASK_PLL_LBWS 0x0300
+#define MASK_LFBREN 0x0040
+#define MASK_BYPCKEN 0x0020
+#define MASK_CKEN 0x0010
+#define MASK_RESETB 0x0002
+#define MASK_PLL_EN 0x0001
+
+#define CLW_CNTRL 0x0140
+#define MASK_CLW_LANEDISABLE 0x0001
+
+#define D0W_CNTRL 0x0144
+#define MASK_D0W_LANEDISABLE 0x0001
+
+#define D1W_CNTRL 0x0148
+#define MASK_D1W_LANEDISABLE 0x0001
+
+#define D2W_CNTRL 0x014C
+#define MASK_D2W_LANEDISABLE 0x0001
+
+#define D3W_CNTRL 0x0150
+#define MASK_D3W_LANEDISABLE 0x0001
+
+#define STARTCNTRL 0x0204
+#define MASK_START 0x00000001
+
+#define LINEINITCNT 0x0210
+#define LPTXTIMECNT 0x0214
+#define TCLK_HEADERCNT 0x0218
+#define TCLK_TRAILCNT 0x021C
+#define THS_HEADERCNT 0x0220
+#define TWAKEUP 0x0224
+#define TCLK_POSTCNT 0x0228
+#define THS_TRAILCNT 0x022C
+#define HSTXVREGCNT 0x0230
+
+#define HSTXVREGEN 0x0234
+#define MASK_D3M_HSTXVREGEN 0x0010
+#define MASK_D2M_HSTXVREGEN 0x0008
+#define MASK_D1M_HSTXVREGEN 0x0004
+#define MASK_D0M_HSTXVREGEN 0x0002
+#define MASK_CLM_HSTXVREGEN 0x0001
+
+
+#define TXOPTIONCNTRL 0x0238
+#define MASK_CONTCLKMODE 0x00000001
+
+#define CSI_CONTROL 0x040C
+#define MASK_CSI_MODE 0x8000
+#define MASK_HTXTOEN 0x0400
+#define MASK_TXHSMD 0x0080
+#define MASK_HSCKMD 0x0020
+#define MASK_NOL 0x0006
+#define MASK_NOL_1 0x0000
+#define MASK_NOL_2 0x0002
+#define MASK_NOL_3 0x0004
+#define MASK_NOL_4 0x0006
+#define MASK_EOTDIS 0x0001
+
+#define CSI_INT 0x0414
+#define MASK_INTHLT 0x00000008
+#define MASK_INTER 0x00000004
+
+#define CSI_INT_ENA 0x0418
+#define MASK_IENHLT 0x00000008
+#define MASK_IENER 0x00000004
+
+#define CSI_ERR 0x044C
+#define MASK_INER 0x00000200
+#define MASK_WCER 0x00000100
+#define MASK_QUNK 0x00000010
+#define MASK_TXBRK 0x00000002
+
+#define CSI_ERR_INTENA 0x0450
+#define CSI_ERR_HALT 0x0454
+
+#define CSI_CONFW 0x0500
+#define MASK_MODE 0xe0000000
+#define MASK_MODE_SET 0xa0000000
+#define MASK_MODE_CLEAR 0xc0000000
+#define MASK_ADDRESS 0x1f000000
+#define MASK_ADDRESS_CSI_CONTROL 0x03000000
+#define MASK_ADDRESS_CSI_INT_ENA 0x06000000
+#define MASK_ADDRESS_CSI_ERR_INTENA 0x14000000
+#define MASK_ADDRESS_CSI_ERR_HALT 0x15000000
+#define MASK_DATA 0x0000ffff
+
+#define CSI_INT_CLR 0x050C
+#define MASK_ICRER 0x00000004
+
+#define CSI_START 0x0518
+#define MASK_STRT 0x00000001
+
+#define CECEN 0x0600
+#define MASK_CECEN 0x0001
+
+#define HDMI_INT0 0x8500
+#define MASK_I_KEY 0x80
+#define MASK_I_MISC 0x02
+#define MASK_I_PHYERR 0x01
+
+#define HDMI_INT1 0x8501
+#define MASK_I_GBD 0x80
+#define MASK_I_HDCP 0x40
+#define MASK_I_ERR 0x20
+#define MASK_I_AUD 0x10
+#define MASK_I_CBIT 0x08
+#define MASK_I_PACKET 0x04
+#define MASK_I_CLK 0x02
+#define MASK_I_SYS 0x01
+
+#define SYS_INT 0x8502
+#define MASK_I_ACR_CTS 0x80
+#define MASK_I_ACRN 0x40
+#define MASK_I_DVI 0x20
+#define MASK_I_HDMI 0x10
+#define MASK_I_NOPMBDET 0x08
+#define MASK_I_DPMBDET 0x04
+#define MASK_I_TMDS 0x02
+#define MASK_I_DDC 0x01
+
+#define CLK_INT 0x8503
+#define MASK_I_OUT_H_CHG 0x40
+#define MASK_I_IN_DE_CHG 0x20
+#define MASK_I_IN_HV_CHG 0x10
+#define MASK_I_DC_CHG 0x08
+#define MASK_I_PXCLK_CHG 0x04
+#define MASK_I_PHYCLK_CHG 0x02
+#define MASK_I_TMDSCLK_CHG 0x01
+
+#define CBIT_INT 0x8505
+#define MASK_I_AF_LOCK 0x80
+#define MASK_I_AF_UNLOCK 0x40
+#define MASK_I_CBIT_FS 0x02
+
+#define AUDIO_INT 0x8506
+
+#define ERR_INT 0x8507
+#define MASK_I_EESS_ERR 0x80
+
+#define HDCP_INT 0x8508
+#define MASK_I_AVM_SET 0x80
+#define MASK_I_AVM_CLR 0x40
+#define MASK_I_LINKERR 0x20
+#define MASK_I_SHA_END 0x10
+#define MASK_I_R0_END 0x08
+#define MASK_I_KM_END 0x04
+#define MASK_I_AKSV_END 0x02
+#define MASK_I_AN_END 0x01
+
+#define MISC_INT 0x850B
+#define MASK_I_AS_LAYOUT 0x10
+#define MASK_I_NO_SPD 0x08
+#define MASK_I_NO_VS 0x03
+#define MASK_I_SYNC_CHG 0x02
+#define MASK_I_AUDIO_MUTE 0x01
+
+#define KEY_INT 0x850F
+
+#define SYS_INTM 0x8512
+#define MASK_M_ACR_CTS 0x80
+#define MASK_M_ACR_N 0x40
+#define MASK_M_DVI_DET 0x20
+#define MASK_M_HDMI_DET 0x10
+#define MASK_M_NOPMBDET 0x08
+#define MASK_M_BPMBDET 0x04
+#define MASK_M_TMDS 0x02
+#define MASK_M_DDC 0x01
+
+#define CLK_INTM 0x8513
+#define MASK_M_OUT_H_CHG 0x40
+#define MASK_M_IN_DE_CHG 0x20
+#define MASK_M_IN_HV_CHG 0x10
+#define MASK_M_DC_CHG 0x08
+#define MASK_M_PXCLK_CHG 0x04
+#define MASK_M_PHYCLK_CHG 0x02
+#define MASK_M_TMDS_CHG 0x01
+
+#define PACKET_INTM 0x8514
+
+#define CBIT_INTM 0x8515
+#define MASK_M_AF_LOCK 0x80
+#define MASK_M_AF_UNLOCK 0x40
+#define MASK_M_CBIT_FS 0x02
+
+#define AUDIO_INTM 0x8516
+#define MASK_M_BUFINIT_END 0x01
+
+#define ERR_INTM 0x8517
+#define MASK_M_EESS_ERR 0x80
+
+#define HDCP_INTM 0x8518
+#define MASK_M_AVM_SET 0x80
+#define MASK_M_AVM_CLR 0x40
+#define MASK_M_LINKERR 0x20
+#define MASK_M_SHA_END 0x10
+#define MASK_M_R0_END 0x08
+#define MASK_M_KM_END 0x04
+#define MASK_M_AKSV_END 0x02
+#define MASK_M_AN_END 0x01
+
+#define MISC_INTM 0x851B
+#define MASK_M_AS_LAYOUT 0x10
+#define MASK_M_NO_SPD 0x08
+#define MASK_M_NO_VS 0x03
+#define MASK_M_SYNC_CHG 0x02
+#define MASK_M_AUDIO_MUTE 0x01
+
+#define KEY_INTM 0x851F
+
+#define SYS_STATUS 0x8520
+#define MASK_S_SYNC 0x80
+#define MASK_S_AVMUTE 0x40
+#define MASK_S_HDCP 0x20
+#define MASK_S_HDMI 0x10
+#define MASK_S_PHY_SCDT 0x08
+#define MASK_S_PHY_PLL 0x04
+#define MASK_S_TMDS 0x02
+#define MASK_S_DDC5V 0x01
+
+#define CSI_STATUS 0x0410
+#define MASK_S_WSYNC 0x0400
+#define MASK_S_TXACT 0x0200
+#define MASK_S_RXACT 0x0100
+#define MASK_S_HLT 0x0001
+
+#define VI_STATUS1 0x8522
+#define MASK_S_V_GBD 0x08
+#define MASK_S_DEEPCOLOR 0x0c
+#define MASK_S_V_422 0x02
+#define MASK_S_V_INTERLACE 0x01
+
+#define AU_STATUS0 0x8523
+#define MASK_S_A_SAMPLE 0x01
+
+#define VI_STATUS3 0x8528
+#define MASK_S_V_COLOR 0x1e
+#define MASK_LIMITED 0x01
+
+#define PHY_CTL0 0x8531
+#define MASK_PHY_SYSCLK_IND 0x02
+#define MASK_PHY_CTL 0x01
+
+
+#define PHY_CTL1 0x8532 /* Not in REF_01 */
+#define MASK_PHY_AUTO_RST1 0xf0
+#define MASK_PHY_AUTO_RST1_OFF 0x00
+#define SET_PHY_AUTO_RST1_US(us) ((((us) / 200) << 4) & \
+ MASK_PHY_AUTO_RST1)
+#define MASK_FREQ_RANGE_MODE 0x0f
+#define SET_FREQ_RANGE_MODE_CYCLES(cycles) (((cycles) - 1) & \
+ MASK_FREQ_RANGE_MODE)
+
+#define PHY_CTL2 0x8533 /* Not in REF_01 */
+#define MASK_PHY_AUTO_RST4 0x04
+#define MASK_PHY_AUTO_RST3 0x02
+#define MASK_PHY_AUTO_RST2 0x01
+#define MASK_PHY_AUTO_RSTn (MASK_PHY_AUTO_RST4 | \
+ MASK_PHY_AUTO_RST3 | \
+ MASK_PHY_AUTO_RST2)
+
+#define PHY_EN 0x8534
+#define MASK_ENABLE_PHY 0x01
+
+#define PHY_RST 0x8535
+#define MASK_RESET_CTRL 0x01 /* Reset active low */
+
+#define PHY_BIAS 0x8536 /* Not in REF_01 */
+
+#define PHY_CSQ 0x853F /* Not in REF_01 */
+#define MASK_CSQ_CNT 0x0f
+#define SET_CSQ_CNT_LEVEL(n) (n & MASK_CSQ_CNT)
+
+#define SYS_FREQ0 0x8540
+#define SYS_FREQ1 0x8541
+
+#define SYS_CLK 0x8542 /* Not in REF_01 */
+#define MASK_CLK_DIFF 0x0C
+#define MASK_CLK_DIV 0x03
+
+#define DDC_CTL 0x8543
+#define MASK_DDC_ACK_POL 0x08
+#define MASK_DDC_ACTION 0x04
+#define MASK_DDC5V_MODE 0x03
+#define MASK_DDC5V_MODE_0MS 0x00
+#define MASK_DDC5V_MODE_50MS 0x01
+#define MASK_DDC5V_MODE_100MS 0x02
+#define MASK_DDC5V_MODE_200MS 0x03
+
+#define HPD_CTL 0x8544
+#define MASK_HPD_CTL0 0x10
+#define MASK_HPD_OUT0 0x01
+
+#define ANA_CTL 0x8545
+#define MASK_APPL_PCSX 0x30
+#define MASK_APPL_PCSX_HIZ 0x00
+#define MASK_APPL_PCSX_L_FIX 0x10
+#define MASK_APPL_PCSX_H_FIX 0x20
+#define MASK_APPL_PCSX_NORMAL 0x30
+#define MASK_ANALOG_ON 0x01
+
+#define AVM_CTL 0x8546
+
+#define INIT_END 0x854A
+#define MASK_INIT_END 0x01
+
+#define HDMI_DET 0x8552 /* Not in REF_01 */
+#define MASK_HDMI_DET_MOD1 0x80
+#define MASK_HDMI_DET_MOD0 0x40
+#define MASK_HDMI_DET_V 0x30
+#define MASK_HDMI_DET_V_SYNC 0x00
+#define MASK_HDMI_DET_V_ASYNC_25MS 0x10
+#define MASK_HDMI_DET_V_ASYNC_50MS 0x20
+#define MASK_HDMI_DET_V_ASYNC_100MS 0x30
+#define MASK_HDMI_DET_NUM 0x0f
+
+#define HDCP_MODE 0x8560
+#define MASK_MODE_RST_TN 0x20
+#define MASK_LINE_REKEY 0x10
+#define MASK_AUTO_CLR 0x04
+
+#define HDCP_REG1 0x8563 /* Not in REF_01 */
+#define MASK_AUTH_UNAUTH_SEL 0x70
+#define MASK_AUTH_UNAUTH_SEL_12_FRAMES 0x70
+#define MASK_AUTH_UNAUTH_SEL_8_FRAMES 0x60
+#define MASK_AUTH_UNAUTH_SEL_4_FRAMES 0x50
+#define MASK_AUTH_UNAUTH_SEL_2_FRAMES 0x40
+#define MASK_AUTH_UNAUTH_SEL_64_FRAMES 0x30
+#define MASK_AUTH_UNAUTH_SEL_32_FRAMES 0x20
+#define MASK_AUTH_UNAUTH_SEL_16_FRAMES 0x10
+#define MASK_AUTH_UNAUTH_SEL_ONCE 0x00
+#define MASK_AUTH_UNAUTH 0x01
+#define MASK_AUTH_UNAUTH_AUTO 0x01
+
+#define HDCP_REG2 0x8564 /* Not in REF_01 */
+#define MASK_AUTO_P3_RESET 0x0F
+#define SET_AUTO_P3_RESET_FRAMES(n) (n & MASK_AUTO_P3_RESET)
+#define MASK_AUTO_P3_RESET_OFF 0x00
+
+#define VI_MODE 0x8570
+#define MASK_RGB_DVI 0x08 /* Not in REF_01 */
+
+#define VOUT_SET2 0x8573
+#define MASK_SEL422 0x80
+#define MASK_VOUT_422FIL_100 0x40
+#define MASK_VOUTCOLORMODE 0x03
+#define MASK_VOUTCOLORMODE_THROUGH 0x00
+#define MASK_VOUTCOLORMODE_AUTO 0x01
+#define MASK_VOUTCOLORMODE_MANUAL 0x03
+
+#define VOUT_SET3 0x8574
+#define MASK_VOUT_EXTCNT 0x08
+
+#define VI_REP 0x8576
+#define MASK_VOUT_COLOR_SEL 0xe0
+#define MASK_VOUT_COLOR_RGB_FULL 0x00
+#define MASK_VOUT_COLOR_RGB_LIMITED 0x20
+#define MASK_VOUT_COLOR_601_YCBCR_FULL 0x40
+#define MASK_VOUT_COLOR_601_YCBCR_LIMITED 0x60
+#define MASK_VOUT_COLOR_709_YCBCR_FULL 0x80
+#define MASK_VOUT_COLOR_709_YCBCR_LIMITED 0xa0
+#define MASK_VOUT_COLOR_FULL_TO_LIMITED 0xc0
+#define MASK_VOUT_COLOR_LIMITED_TO_FULL 0xe0
+#define MASK_IN_REP_HEN 0x10
+#define MASK_IN_REP 0x0f
+
+#define VI_MUTE 0x857F
+#define MASK_AUTO_MUTE 0xc0
+#define MASK_VI_MUTE 0x10
+
+#define DE_WIDTH_H_LO 0x8582 /* Not in REF_01 */
+#define DE_WIDTH_H_HI 0x8583 /* Not in REF_01 */
+#define DE_WIDTH_V_LO 0x8588 /* Not in REF_01 */
+#define DE_WIDTH_V_HI 0x8589 /* Not in REF_01 */
+#define H_SIZE_LO 0x858A /* Not in REF_01 */
+#define H_SIZE_HI 0x858B /* Not in REF_01 */
+#define V_SIZE_LO 0x858C /* Not in REF_01 */
+#define V_SIZE_HI 0x858D /* Not in REF_01 */
+#define FV_CNT_LO 0x85A1 /* Not in REF_01 */
+#define FV_CNT_HI 0x85A2 /* Not in REF_01 */
+
+#define FH_MIN0 0x85AA /* Not in REF_01 */
+#define FH_MIN1 0x85AB /* Not in REF_01 */
+#define FH_MAX0 0x85AC /* Not in REF_01 */
+#define FH_MAX1 0x85AD /* Not in REF_01 */
+
+#define HV_RST 0x85AF /* Not in REF_01 */
+#define MASK_H_PI_RST 0x20
+#define MASK_V_PI_RST 0x10
+
+#define EDID_MODE 0x85C7
+#define MASK_EDID_SPEED 0x40
+#define MASK_EDID_MODE 0x03
+#define MASK_EDID_MODE_DISABLE 0x00
+#define MASK_EDID_MODE_DDC2B 0x01
+#define MASK_EDID_MODE_E_DDC 0x02
+
+#define EDID_LEN1 0x85CA
+#define EDID_LEN2 0x85CB
+
+#define HDCP_REG3 0x85D1 /* Not in REF_01 */
+#define KEY_RD_CMD 0x01
+
+#define FORCE_MUTE 0x8600
+#define MASK_FORCE_AMUTE 0x10
+#define MASK_FORCE_DMUTE 0x01
+
+#define CMD_AUD 0x8601
+#define MASK_CMD_BUFINIT 0x04
+#define MASK_CMD_LOCKDET 0x02
+#define MASK_CMD_MUTE 0x01
+
+#define AUTO_CMD0 0x8602
+#define MASK_AUTO_MUTE7 0x80
+#define MASK_AUTO_MUTE6 0x40
+#define MASK_AUTO_MUTE5 0x20
+#define MASK_AUTO_MUTE4 0x10
+#define MASK_AUTO_MUTE3 0x08
+#define MASK_AUTO_MUTE2 0x04
+#define MASK_AUTO_MUTE1 0x02
+#define MASK_AUTO_MUTE0 0x01
+
+#define AUTO_CMD1 0x8603
+#define MASK_AUTO_MUTE10 0x04
+#define MASK_AUTO_MUTE9 0x02
+#define MASK_AUTO_MUTE8 0x01
+
+#define AUTO_CMD2 0x8604
+#define MASK_AUTO_PLAY3 0x08
+#define MASK_AUTO_PLAY2 0x04
+
+#define BUFINIT_START 0x8606
+#define SET_BUFINIT_START_MS(milliseconds) ((milliseconds) / 100)
+
+#define FS_MUTE 0x8607
+#define MASK_FS_ELSE_MUTE 0x80
+#define MASK_FS22_MUTE 0x40
+#define MASK_FS24_MUTE 0x20
+#define MASK_FS88_MUTE 0x10
+#define MASK_FS96_MUTE 0x08
+#define MASK_FS176_MUTE 0x04
+#define MASK_FS192_MUTE 0x02
+#define MASK_FS_NO_MUTE 0x01
+
+#define FS_IMODE 0x8620
+#define MASK_NLPCM_HMODE 0x40
+#define MASK_NLPCM_SMODE 0x20
+#define MASK_NLPCM_IMODE 0x10
+#define MASK_FS_HMODE 0x08
+#define MASK_FS_AMODE 0x04
+#define MASK_FS_SMODE 0x02
+#define MASK_FS_IMODE 0x01
+
+#define FS_SET 0x8621
+#define MASK_FS 0x0f
+
+#define LOCKDET_REF0 0x8630
+#define LOCKDET_REF1 0x8631
+#define LOCKDET_REF2 0x8632
+
+#define ACR_MODE 0x8640
+#define MASK_ACR_LOAD 0x10
+#define MASK_N_MODE 0x04
+#define MASK_CTS_MODE 0x01
+
+#define ACR_MDF0 0x8641
+#define MASK_ACR_L2MDF 0x70
+#define MASK_ACR_L2MDF_0_PPM 0x00
+#define MASK_ACR_L2MDF_61_PPM 0x10
+#define MASK_ACR_L2MDF_122_PPM 0x20
+#define MASK_ACR_L2MDF_244_PPM 0x30
+#define MASK_ACR_L2MDF_488_PPM 0x40
+#define MASK_ACR_L2MDF_976_PPM 0x50
+#define MASK_ACR_L2MDF_1976_PPM 0x60
+#define MASK_ACR_L2MDF_3906_PPM 0x70
+#define MASK_ACR_L1MDF 0x07
+#define MASK_ACR_L1MDF_0_PPM 0x00
+#define MASK_ACR_L1MDF_61_PPM 0x01
+#define MASK_ACR_L1MDF_122_PPM 0x02
+#define MASK_ACR_L1MDF_244_PPM 0x03
+#define MASK_ACR_L1MDF_488_PPM 0x04
+#define MASK_ACR_L1MDF_976_PPM 0x05
+#define MASK_ACR_L1MDF_1976_PPM 0x06
+#define MASK_ACR_L1MDF_3906_PPM 0x07
+
+#define ACR_MDF1 0x8642
+#define MASK_ACR_L3MDF 0x07
+#define MASK_ACR_L3MDF_0_PPM 0x00
+#define MASK_ACR_L3MDF_61_PPM 0x01
+#define MASK_ACR_L3MDF_122_PPM 0x02
+#define MASK_ACR_L3MDF_244_PPM 0x03
+#define MASK_ACR_L3MDF_488_PPM 0x04
+#define MASK_ACR_L3MDF_976_PPM 0x05
+#define MASK_ACR_L3MDF_1976_PPM 0x06
+#define MASK_ACR_L3MDF_3906_PPM 0x07
+
+#define SDO_MODE1 0x8652
+#define MASK_SDO_BIT_LENG 0x70
+#define MASK_SDO_FMT 0x03
+#define MASK_SDO_FMT_RIGHT 0x00
+#define MASK_SDO_FMT_LEFT 0x01
+#define MASK_SDO_FMT_I2S 0x02
+
+#define DIV_MODE 0x8665 /* Not in REF_01 */
+#define MASK_DIV_DLY 0xf0
+#define SET_DIV_DLY_MS(milliseconds) ((((milliseconds) / 100) << 4) & \
+ MASK_DIV_DLY)
+#define MASK_DIV_MODE 0x01
+
+#define NCO_F0_MOD 0x8670
+#define MASK_NCO_F0_MOD 0x03
+#define MASK_NCO_F0_MOD_42MHZ 0x00
+#define MASK_NCO_F0_MOD_27MHZ 0x01
+
+#define PK_INT_MODE 0x8709
+#define MASK_ISRC2_INT_MODE 0x80
+#define MASK_ISRC_INT_MODE 0x40
+#define MASK_ACP_INT_MODE 0x20
+#define MASK_VS_INT_MODE 0x10
+#define MASK_SPD_INT_MODE 0x08
+#define MASK_MS_INT_MODE 0x04
+#define MASK_AUD_INT_MODE 0x02
+#define MASK_AVI_INT_MODE 0x01
+
+#define NO_PKT_LIMIT 0x870B
+#define MASK_NO_ACP_LIMIT 0xf0
+#define SET_NO_ACP_LIMIT_MS(milliseconds) ((((milliseconds) / 80) << 4) & \
+ MASK_NO_ACP_LIMIT)
+#define MASK_NO_AVI_LIMIT 0x0f
+#define SET_NO_AVI_LIMIT_MS(milliseconds) (((milliseconds) / 80) & \
+ MASK_NO_AVI_LIMIT)
+
+#define NO_PKT_CLR 0x870C
+#define MASK_NO_VS_CLR 0x40
+#define MASK_NO_SPD_CLR 0x20
+#define MASK_NO_ACP_CLR 0x10
+#define MASK_NO_AVI_CLR1 0x02
+#define MASK_NO_AVI_CLR0 0x01
+
+#define ERR_PK_LIMIT 0x870D
+#define NO_PKT_LIMIT2 0x870E
+#define PK_AVI_0HEAD 0x8710
+#define PK_AVI_1HEAD 0x8711
+#define PK_AVI_2HEAD 0x8712
+#define PK_AVI_0BYTE 0x8713
+#define PK_AVI_1BYTE 0x8714
+#define PK_AVI_2BYTE 0x8715
+#define PK_AVI_3BYTE 0x8716
+#define PK_AVI_4BYTE 0x8717
+#define PK_AVI_5BYTE 0x8718
+#define PK_AVI_6BYTE 0x8719
+#define PK_AVI_7BYTE 0x871A
+#define PK_AVI_8BYTE 0x871B
+#define PK_AVI_9BYTE 0x871C
+#define PK_AVI_10BYTE 0x871D
+#define PK_AVI_11BYTE 0x871E
+#define PK_AVI_12BYTE 0x871F
+#define PK_AVI_13BYTE 0x8720
+#define PK_AVI_14BYTE 0x8721
+#define PK_AVI_15BYTE 0x8722
+#define PK_AVI_16BYTE 0x8723
+
+#define BKSV 0x8800
+
+#define BCAPS 0x8840
+#define MASK_HDMI_RSVD 0x80
+#define MASK_REPEATER 0x40
+#define MASK_READY 0x20
+#define MASK_FASTI2C 0x10
+#define MASK_1_1_FEA 0x02
+#define MASK_FAST_REAU 0x01
+
+#define BSTATUS1 0x8842
+#define MASK_MAX_EXCED 0x08
+
+#define EDID_RAM 0x8C00
+#define NO_GDB_LIMIT 0x9007
+
+#endif
diff --git a/drivers/media/i2c/tda7432.c b/drivers/media/i2c/tda7432.c
index cf93021a6500..d87168adee45 100644
--- a/drivers/media/i2c/tda7432.c
+++ b/drivers/media/i2c/tda7432.c
@@ -331,13 +331,6 @@ static const struct v4l2_ctrl_ops tda7432_ctrl_ops = {
static const struct v4l2_subdev_core_ops tda7432_core_ops = {
.log_status = tda7432_log_status,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
};
static const struct v4l2_subdev_ops tda7432_ops = {
@@ -416,7 +409,6 @@ MODULE_DEVICE_TABLE(i2c, tda7432_id);
static struct i2c_driver tda7432_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "tda7432",
},
.probe = tda7432_probe,
diff --git a/drivers/media/i2c/tda9840.c b/drivers/media/i2c/tda9840.c
index fbdff8b24eec..f31e659588ac 100644
--- a/drivers/media/i2c/tda9840.c
+++ b/drivers/media/i2c/tda9840.c
@@ -199,7 +199,6 @@ MODULE_DEVICE_TABLE(i2c, tda9840_id);
static struct i2c_driver tda9840_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "tda9840",
},
.probe = tda9840_probe,
diff --git a/drivers/media/i2c/tea6415c.c b/drivers/media/i2c/tea6415c.c
index bbe1a99fda36..084bd75bb32c 100644
--- a/drivers/media/i2c/tea6415c.c
+++ b/drivers/media/i2c/tea6415c.c
@@ -162,7 +162,6 @@ MODULE_DEVICE_TABLE(i2c, tea6415c_id);
static struct i2c_driver tea6415c_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "tea6415c",
},
.probe = tea6415c_probe,
diff --git a/drivers/media/i2c/tea6420.c b/drivers/media/i2c/tea6420.c
index 30a8d75771af..b7f4e58f3624 100644
--- a/drivers/media/i2c/tea6420.c
+++ b/drivers/media/i2c/tea6420.c
@@ -144,7 +144,6 @@ MODULE_DEVICE_TABLE(i2c, tea6420_id);
static struct i2c_driver tea6420_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "tea6420",
},
.probe = tea6420_probe,
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index 9f7fdb6b61ca..bda3a6540a60 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -377,7 +377,6 @@ MODULE_DEVICE_TABLE(i2c, ths7303_id);
static struct i2c_driver ths7303_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "ths73x3",
},
.probe = ths7303_probe,
diff --git a/drivers/media/i2c/tlv320aic23b.c b/drivers/media/i2c/tlv320aic23b.c
index ef87f7b09ea2..0370dd89f1fc 100644
--- a/drivers/media/i2c/tlv320aic23b.c
+++ b/drivers/media/i2c/tlv320aic23b.c
@@ -122,13 +122,6 @@ static const struct v4l2_ctrl_ops tlv320aic23b_ctrl_ops = {
static const struct v4l2_subdev_core_ops tlv320aic23b_core_ops = {
.log_status = tlv320aic23b_log_status,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
};
static const struct v4l2_subdev_audio_ops tlv320aic23b_audio_ops = {
diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c
index 0c50e5285cf6..2a8114a676fd 100644
--- a/drivers/media/i2c/tvaudio.c
+++ b/drivers/media/i2c/tvaudio.c
@@ -2051,7 +2051,6 @@ MODULE_DEVICE_TABLE(i2c, tvaudio_id);
static struct i2c_driver tvaudio_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "tvaudio",
},
.probe = tvaudio_probe,
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index 24e47279e30c..a93985a9b070 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -957,16 +957,6 @@ static int tvp514x_set_pad_format(struct v4l2_subdev *sd,
return 0;
}
-static const struct v4l2_subdev_core_ops tvp514x_core_ops = {
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
-};
-
static const struct v4l2_subdev_video_ops tvp514x_video_ops = {
.s_std = tvp514x_s_std,
.s_routing = tvp514x_s_routing,
@@ -983,7 +973,6 @@ static const struct v4l2_subdev_pad_ops tvp514x_pad_ops = {
};
static const struct v4l2_subdev_ops tvp514x_ops = {
- .core = &tvp514x_core_ops,
.video = &tvp514x_video_ops,
.pad = &tvp514x_pad_ops,
};
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index e4fa0746f75e..522a865c5c60 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -1215,7 +1215,6 @@ MODULE_DEVICE_TABLE(i2c, tvp5150_id);
static struct i2c_driver tvp5150_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "tvp5150",
},
.probe = tvp5150_probe,
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 05077cffd235..f617d8b745ee 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -861,13 +861,6 @@ tvp7002_set_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cf
/* V4L2 core operation handlers */
static const struct v4l2_subdev_core_ops tvp7002_core_ops = {
.log_status = tvp7002_log_status,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = tvp7002_g_register,
.s_register = tvp7002_s_register,
diff --git a/drivers/media/i2c/tw9903.c b/drivers/media/i2c/tw9903.c
index 12c7d211a4a4..bef79cf74364 100644
--- a/drivers/media/i2c/tw9903.c
+++ b/drivers/media/i2c/tw9903.c
@@ -266,7 +266,6 @@ MODULE_DEVICE_TABLE(i2c, tw9903_id);
static struct i2c_driver tw9903_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "tw9903",
},
.probe = tw9903_probe,
diff --git a/drivers/media/i2c/tw9906.c b/drivers/media/i2c/tw9906.c
index 2672d89265ff..316a3113ef27 100644
--- a/drivers/media/i2c/tw9906.c
+++ b/drivers/media/i2c/tw9906.c
@@ -234,7 +234,6 @@ MODULE_DEVICE_TABLE(i2c, tw9906_id);
static struct i2c_driver tw9906_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "tw9906",
},
.probe = tw9906_probe,
diff --git a/drivers/media/i2c/upd64031a.c b/drivers/media/i2c/upd64031a.c
index d248e6a12b8e..2c0f955abc72 100644
--- a/drivers/media/i2c/upd64031a.c
+++ b/drivers/media/i2c/upd64031a.c
@@ -241,7 +241,6 @@ MODULE_DEVICE_TABLE(i2c, upd64031a_id);
static struct i2c_driver upd64031a_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "upd64031a",
},
.probe = upd64031a_probe,
diff --git a/drivers/media/i2c/upd64083.c b/drivers/media/i2c/upd64083.c
index 3a152ce7258a..f2057a434060 100644
--- a/drivers/media/i2c/upd64083.c
+++ b/drivers/media/i2c/upd64083.c
@@ -213,7 +213,6 @@ MODULE_DEVICE_TABLE(i2c, upd64083_id);
static struct i2c_driver upd64083_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "upd64083",
},
.probe = upd64083_probe,
diff --git a/drivers/media/i2c/vp27smpx.c b/drivers/media/i2c/vp27smpx.c
index 819ab6d12989..d6c23bdbcd4a 100644
--- a/drivers/media/i2c/vp27smpx.c
+++ b/drivers/media/i2c/vp27smpx.c
@@ -194,7 +194,6 @@ MODULE_DEVICE_TABLE(i2c, vp27smpx_id);
static struct i2c_driver vp27smpx_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "vp27smpx",
},
.probe = vp27smpx_probe,
diff --git a/drivers/media/i2c/vpx3220.c b/drivers/media/i2c/vpx3220.c
index 016e766e72ba..4b564f17f618 100644
--- a/drivers/media/i2c/vpx3220.c
+++ b/drivers/media/i2c/vpx3220.c
@@ -450,13 +450,6 @@ static const struct v4l2_ctrl_ops vpx3220_ctrl_ops = {
static const struct v4l2_subdev_core_ops vpx3220_core_ops = {
.init = vpx3220_init,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
};
static const struct v4l2_subdev_video_ops vpx3220_video_ops = {
@@ -567,7 +560,6 @@ MODULE_DEVICE_TABLE(i2c, vpx3220_id);
static struct i2c_driver vpx3220_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "vpx3220",
},
.probe = vpx3220_probe,
diff --git a/drivers/media/i2c/wm8739.c b/drivers/media/i2c/wm8739.c
index 3be73f6a40e9..f086e5e6e844 100644
--- a/drivers/media/i2c/wm8739.c
+++ b/drivers/media/i2c/wm8739.c
@@ -176,13 +176,6 @@ static const struct v4l2_ctrl_ops wm8739_ctrl_ops = {
static const struct v4l2_subdev_core_ops wm8739_core_ops = {
.log_status = wm8739_log_status,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
};
static const struct v4l2_subdev_audio_ops wm8739_audio_ops = {
@@ -272,7 +265,6 @@ MODULE_DEVICE_TABLE(i2c, wm8739_id);
static struct i2c_driver wm8739_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "wm8739",
},
.probe = wm8739_probe,
diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c
index bee7946faa7c..d33d2cd6d034 100644
--- a/drivers/media/i2c/wm8775.c
+++ b/drivers/media/i2c/wm8775.c
@@ -318,7 +318,6 @@ MODULE_DEVICE_TABLE(i2c, wm8775_id);
static struct i2c_driver wm8775_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "wm8775",
},
.probe = wm8775_probe,
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index 4d8e01c7b1b2..153a46469814 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -282,9 +282,9 @@ __must_check int media_entity_pipeline_start(struct media_entity *entity,
if (ret < 0 && ret != -ENOIOCTLCMD) {
dev_dbg(entity->parent->dev,
"link validation failed for \"%s\":%u -> \"%s\":%u, error %d\n",
- entity->name, link->source->index,
- link->sink->entity->name,
- link->sink->index, ret);
+ link->source->entity->name,
+ link->source->index,
+ entity->name, link->sink->index, ret);
goto error;
}
}
diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig
index f318ae9bb57a..48a611bc3e18 100644
--- a/drivers/media/pci/Kconfig
+++ b/drivers/media/pci/Kconfig
@@ -11,16 +11,16 @@ if MEDIA_PCI_SUPPORT
if MEDIA_CAMERA_SUPPORT
comment "Media capture support"
source "drivers/media/pci/meye/Kconfig"
+source "drivers/media/pci/solo6x10/Kconfig"
source "drivers/media/pci/sta2x11/Kconfig"
+source "drivers/media/pci/tw68/Kconfig"
+source "drivers/media/pci/zoran/Kconfig"
endif
if MEDIA_ANALOG_TV_SUPPORT
comment "Media capture/analog TV support"
source "drivers/media/pci/ivtv/Kconfig"
-source "drivers/media/pci/zoran/Kconfig"
source "drivers/media/pci/saa7146/Kconfig"
-source "drivers/media/pci/solo6x10/Kconfig"
-source "drivers/media/pci/tw68/Kconfig"
source "drivers/media/pci/dt3155/Kconfig"
endif
@@ -49,6 +49,7 @@ source "drivers/media/pci/mantis/Kconfig"
source "drivers/media/pci/ngene/Kconfig"
source "drivers/media/pci/ddbridge/Kconfig"
source "drivers/media/pci/smipcie/Kconfig"
+source "drivers/media/pci/netup_unidvb/Kconfig"
endif
endif #MEDIA_PCI_SUPPORT
diff --git a/drivers/media/pci/Makefile b/drivers/media/pci/Makefile
index 23ce53bd47c3..5f8aacb8b9b8 100644
--- a/drivers/media/pci/Makefile
+++ b/drivers/media/pci/Makefile
@@ -12,7 +12,8 @@ obj-y += ttpci/ \
ngene/ \
ddbridge/ \
saa7146/ \
- smipcie/
+ smipcie/ \
+ netup_unidvb/
obj-$(CONFIG_VIDEO_IVTV) += ivtv/
obj-$(CONFIG_VIDEO_ZORAN) += zoran/
diff --git a/drivers/media/pci/bt8xx/btcx-risc.c b/drivers/media/pci/bt8xx/btcx-risc.c
index 00f0880b6d66..57c7f58c3af2 100644
--- a/drivers/media/pci/bt8xx/btcx-risc.c
+++ b/drivers/media/pci/bt8xx/btcx-risc.c
@@ -160,7 +160,6 @@ btcx_align(struct v4l2_rect *win, struct v4l2_clip *clips, unsigned int n, int m
void
btcx_sort_clips(struct v4l2_clip *clips, unsigned int nclips)
{
- struct v4l2_clip swap;
int i,j,n;
if (nclips < 2)
@@ -168,9 +167,7 @@ btcx_sort_clips(struct v4l2_clip *clips, unsigned int nclips)
for (i = nclips-2; i >= 0; i--) {
for (n = 0, j = 0; j <= i; j++) {
if (clips[j].c.left > clips[j+1].c.left) {
- swap = clips[j];
- clips[j] = clips[j+1];
- clips[j+1] = swap;
+ swap(clips[j], clips[j + 1]);
n++;
}
}
diff --git a/drivers/media/pci/bt8xx/bttv-input.c b/drivers/media/pci/bt8xx/bttv-input.c
index 67c8d6b2c335..a75c53da224a 100644
--- a/drivers/media/pci/bt8xx/bttv-input.c
+++ b/drivers/media/pci/bt8xx/bttv-input.c
@@ -194,21 +194,18 @@ static u32 bttv_rc5_decode(unsigned int code)
static void bttv_rc5_timer_end(unsigned long data)
{
struct bttv_ir *ir = (struct bttv_ir *)data;
- struct timeval tv;
+ ktime_t tv;
u32 gap, rc5, scancode;
u8 toggle, command, system;
/* get time */
- do_gettimeofday(&tv);
+ tv = ktime_get();
+ gap = ktime_to_us(ktime_sub(tv, ir->base_time));
/* avoid overflow with gap >1s */
- if (tv.tv_sec - ir->base_time.tv_sec > 1) {
+ if (gap > USEC_PER_SEC) {
gap = 200000;
- } else {
- gap = 1000000 * (tv.tv_sec - ir->base_time.tv_sec) +
- tv.tv_usec - ir->base_time.tv_usec;
}
-
/* signal we're ready to start a new code */
ir->active = false;
@@ -249,7 +246,7 @@ static void bttv_rc5_timer_end(unsigned long data)
static int bttv_rc5_irq(struct bttv *btv)
{
struct bttv_ir *ir = btv->remote;
- struct timeval tv;
+ ktime_t tv;
u32 gpio;
u32 gap;
unsigned long current_jiffies;
@@ -259,14 +256,12 @@ static int bttv_rc5_irq(struct bttv *btv)
/* get time of bit */
current_jiffies = jiffies;
- do_gettimeofday(&tv);
+ tv = ktime_get();
+ gap = ktime_to_us(ktime_sub(tv, ir->base_time));
/* avoid overflow with gap >1s */
- if (tv.tv_sec - ir->base_time.tv_sec > 1) {
+ if (gap > USEC_PER_SEC) {
gap = 200000;
- } else {
- gap = 1000000 * (tv.tv_sec - ir->base_time.tv_sec) +
- tv.tv_usec - ir->base_time.tv_usec;
}
dprintk("RC5 IRQ: gap %d us for %s\n",
diff --git a/drivers/media/pci/bt8xx/bttvp.h b/drivers/media/pci/bt8xx/bttvp.h
index a444cfb35c0b..31bf79d3b0d2 100644
--- a/drivers/media/pci/bt8xx/bttvp.h
+++ b/drivers/media/pci/bt8xx/bttvp.h
@@ -140,7 +140,7 @@ struct bttv_ir {
bool rc5_gpio; /* Is RC5 legacy GPIO enabled? */
u32 last_bit; /* last raw bit seen */
u32 code; /* raw code under construction */
- struct timeval base_time; /* time of last seen code */
+ ktime_t base_time; /* time of last seen code */
bool active; /* building raw code */
};
diff --git a/drivers/media/pci/cobalt/Kconfig b/drivers/media/pci/cobalt/Kconfig
index 3be1b2c3c386..1f88ccc174da 100644
--- a/drivers/media/pci/cobalt/Kconfig
+++ b/drivers/media/pci/cobalt/Kconfig
@@ -1,7 +1,9 @@
config VIDEO_COBALT
tristate "Cisco Cobalt support"
depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
- depends on PCI_MSI && MTD_COMPLEX_MAPPINGS && GPIOLIB
+ depends on PCI_MSI && MTD_COMPLEX_MAPPINGS
+ depends on GPIOLIB || COMPILE_TEST
+ depends on SND
select I2C_ALGOBIT
select VIDEO_ADV7604
select VIDEO_ADV7511
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index b994b8efdc99..8fed61ec712e 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -339,15 +339,16 @@ static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
}
if (pcie_link_get_lanes(cobalt) != 8) {
- cobalt_err("PCI Express link width is not 8 lanes (%d)\n",
+ cobalt_warn("PCI Express link width is %d lanes.\n",
pcie_link_get_lanes(cobalt));
if (pcie_bus_link_get_lanes(cobalt) < 8)
- cobalt_err("The current slot only supports %d lanes, at least 8 are needed\n",
+ cobalt_warn("The current slot only supports %d lanes, for best performance 8 are needed\n",
pcie_bus_link_get_lanes(cobalt));
- else
+ if (pcie_link_get_lanes(cobalt) != pcie_bus_link_get_lanes(cobalt)) {
cobalt_err("The card is most likely not seated correctly in the PCIe slot\n");
- ret = -EIO;
- goto err_disable;
+ ret = -EIO;
+ goto err_disable;
+ }
}
if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
diff --git a/drivers/media/pci/cobalt/cobalt-irq.c b/drivers/media/pci/cobalt/cobalt-irq.c
index dd4bff9cf339..d1f5898d11ba 100644
--- a/drivers/media/pci/cobalt/cobalt-irq.c
+++ b/drivers/media/pci/cobalt/cobalt-irq.c
@@ -139,7 +139,7 @@ done:
also know about dropped frames. */
cb->vb.v4l2_buf.sequence = s->sequence++;
vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ?
- VB2_BUF_STATE_QUEUED : VB2_BUF_STATE_DONE);
+ VB2_BUF_STATE_REQUEUEING : VB2_BUF_STATE_DONE);
}
irqreturn_t cobalt_irq_handler(int irq, void *dev_id)
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index b40c2d141b58..9756fd3e8af5 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -28,6 +28,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
+#include <media/v4l2-dv-timings.h>
#include <media/adv7604.h>
#include <media/adv7842.h>
@@ -641,13 +642,17 @@ static int cobalt_s_dv_timings(struct file *file, void *priv_fh,
struct cobalt_stream *s = video_drvdata(file);
int err;
- if (vb2_is_busy(&s->q))
- return -EBUSY;
-
if (s->input == 1) {
*timings = cea1080p60;
return 0;
}
+
+ if (v4l2_match_dv_timings(timings, &s->timings, 0))
+ return 0;
+
+ if (vb2_is_busy(&s->q))
+ return -EBUSY;
+
err = v4l2_subdev_call(s->sd,
video, s_dv_timings, timings);
if (!err) {
diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c
index af52def700cc..f752f3993687 100644
--- a/drivers/media/pci/ivtv/ivtv-gpio.c
+++ b/drivers/media/pci/ivtv/ivtv-gpio.c
@@ -313,13 +313,6 @@ static const struct v4l2_ctrl_ops gpio_ctrl_ops = {
static const struct v4l2_subdev_core_ops subdev_core_ops = {
.log_status = subdev_log_status,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
};
static const struct v4l2_subdev_tuner_ops subdev_tuner_ops = {
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 4cb365d4ffdc..8b95eefb610b 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -38,6 +38,8 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fb.h>
@@ -1171,6 +1173,13 @@ static int ivtvfb_init_card(struct ivtv *itv)
{
int rc;
+#ifdef CONFIG_X86_64
+ if (pat_enabled()) {
+ pr_warn("ivtvfb needs PAT disabled, boot with nopat kernel parameter\n");
+ return -ENODEV;
+ }
+#endif
+
if (itv->osd_info) {
IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id);
return -EBUSY;
@@ -1265,12 +1274,6 @@ static int __init ivtvfb_init(void)
int registered = 0;
int err;
-#ifdef CONFIG_X86_64
- if (WARN(pat_enabled(),
- "ivtvfb needs PAT disabled, boot with nopat kernel parameter\n")) {
- return -ENODEV;
- }
-#endif
if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) {
printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n",
diff --git a/drivers/media/pci/mantis/mantis_dma.c b/drivers/media/pci/mantis/mantis_dma.c
index 1d59c7e039f7..2ce310b0a022 100644
--- a/drivers/media/pci/mantis/mantis_dma.c
+++ b/drivers/media/pci/mantis/mantis_dma.c
@@ -130,21 +130,20 @@ err:
int mantis_dma_init(struct mantis_pci *mantis)
{
- int err = 0;
+ int err;
dprintk(MANTIS_DEBUG, 1, "Mantis DMA init");
- if (mantis_alloc_buffers(mantis) < 0) {
+ err = mantis_alloc_buffers(mantis);
+ if (err < 0) {
dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer");
/* Stop RISC Engine */
mmwrite(0, MANTIS_DMA_CTL);
- goto err;
+ return err;
}
return 0;
-err:
- return err;
}
EXPORT_SYMBOL_GPL(mantis_dma_init);
diff --git a/drivers/media/pci/netup_unidvb/Kconfig b/drivers/media/pci/netup_unidvb/Kconfig
new file mode 100644
index 000000000000..f277b0b10c2d
--- /dev/null
+++ b/drivers/media/pci/netup_unidvb/Kconfig
@@ -0,0 +1,12 @@
+config DVB_NETUP_UNIDVB
+ tristate "NetUP Universal DVB card support"
+ depends on DVB_CORE && VIDEO_DEV && PCI && I2C && SPI_MASTER
+ select VIDEOBUF2_DVB
+ select VIDEOBUF2_VMALLOC
+ select DVB_HORUS3A if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_ASCOT2E if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_LNBH25 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_CXD2841ER if MEDIA_SUBDRV_AUTOSELECT
+ ---help---
+ Support for NetUP PCI express Universal DVB card.
+
diff --git a/drivers/media/pci/netup_unidvb/Makefile b/drivers/media/pci/netup_unidvb/Makefile
new file mode 100644
index 000000000000..ee6ae0501eae
--- /dev/null
+++ b/drivers/media/pci/netup_unidvb/Makefile
@@ -0,0 +1,9 @@
+netup-unidvb-objs += netup_unidvb_core.o
+netup-unidvb-objs += netup_unidvb_i2c.o
+netup-unidvb-objs += netup_unidvb_ci.o
+netup-unidvb-objs += netup_unidvb_spi.o
+
+obj-$(CONFIG_DVB_NETUP_UNIDVB) += netup-unidvb.o
+
+ccflags-y += -Idrivers/media/dvb-core
+ccflags-y += -Idrivers/media/dvb-frontends
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb.h b/drivers/media/pci/netup_unidvb/netup_unidvb.h
new file mode 100644
index 000000000000..fa951102d7fb
--- /dev/null
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb.h
@@ -0,0 +1,133 @@
+/*
+ * netup_unidvb.h
+ *
+ * Data type definitions for NetUP Universal Dual DVB-CI
+ *
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/pci.h>
+#include <linux/i2c.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-dvb.h>
+#include <dvb_ca_en50221.h>
+
+#define NETUP_UNIDVB_NAME "netup_unidvb"
+#define NETUP_UNIDVB_VERSION "0.0.1"
+#define NETUP_VENDOR_ID 0x1b55
+#define NETUP_PCI_DEV_REVISION 0x2
+
+/* IRQ-related regisers */
+#define REG_ISR 0x4890
+#define REG_ISR_MASKED 0x4892
+#define REG_IMASK_SET 0x4894
+#define REG_IMASK_CLEAR 0x4896
+/* REG_ISR register bits */
+#define NETUP_UNIDVB_IRQ_SPI (1 << 0)
+#define NETUP_UNIDVB_IRQ_I2C0 (1 << 1)
+#define NETUP_UNIDVB_IRQ_I2C1 (1 << 2)
+#define NETUP_UNIDVB_IRQ_FRA0 (1 << 4)
+#define NETUP_UNIDVB_IRQ_FRA1 (1 << 5)
+#define NETUP_UNIDVB_IRQ_FRB0 (1 << 6)
+#define NETUP_UNIDVB_IRQ_FRB1 (1 << 7)
+#define NETUP_UNIDVB_IRQ_DMA1 (1 << 8)
+#define NETUP_UNIDVB_IRQ_DMA2 (1 << 9)
+#define NETUP_UNIDVB_IRQ_CI (1 << 10)
+#define NETUP_UNIDVB_IRQ_CAM0 (1 << 11)
+#define NETUP_UNIDVB_IRQ_CAM1 (1 << 12)
+
+struct netup_dma {
+ u8 num;
+ spinlock_t lock;
+ struct netup_unidvb_dev *ndev;
+ struct netup_dma_regs *regs;
+ u32 ring_buffer_size;
+ u8 *addr_virt;
+ dma_addr_t addr_phys;
+ u64 addr_last;
+ u32 high_addr;
+ u32 data_offset;
+ u32 data_size;
+ struct list_head free_buffers;
+ struct work_struct work;
+ struct timer_list timeout;
+};
+
+enum netup_i2c_state {
+ STATE_DONE,
+ STATE_WAIT,
+ STATE_WANT_READ,
+ STATE_WANT_WRITE,
+ STATE_ERROR
+};
+
+struct netup_i2c_regs;
+
+struct netup_i2c {
+ spinlock_t lock;
+ wait_queue_head_t wq;
+ struct i2c_adapter adap;
+ struct netup_unidvb_dev *dev;
+ struct netup_i2c_regs *regs;
+ struct i2c_msg *msg;
+ enum netup_i2c_state state;
+ u32 xmit_size;
+};
+
+struct netup_ci_state {
+ struct dvb_ca_en50221 ca;
+ u8 __iomem *membase8_config;
+ u8 __iomem *membase8_io;
+ struct netup_unidvb_dev *dev;
+ int status;
+ int nr;
+};
+
+struct netup_spi;
+
+struct netup_unidvb_dev {
+ struct pci_dev *pci_dev;
+ int pci_bus;
+ int pci_slot;
+ int pci_func;
+ int board_num;
+ int old_fw;
+ u32 __iomem *lmmio0;
+ u8 __iomem *bmmio0;
+ u32 __iomem *lmmio1;
+ u8 __iomem *bmmio1;
+ u8 *dma_virt;
+ dma_addr_t dma_phys;
+ u32 dma_size;
+ struct vb2_dvb_frontends frontends[2];
+ struct netup_i2c i2c[2];
+ struct workqueue_struct *wq;
+ struct netup_dma dma[2];
+ struct netup_ci_state ci[2];
+ struct netup_spi *spi;
+};
+
+int netup_i2c_register(struct netup_unidvb_dev *ndev);
+void netup_i2c_unregister(struct netup_unidvb_dev *ndev);
+irqreturn_t netup_ci_interrupt(struct netup_unidvb_dev *ndev);
+irqreturn_t netup_i2c_interrupt(struct netup_i2c *i2c);
+irqreturn_t netup_spi_interrupt(struct netup_spi *spi);
+int netup_unidvb_ci_register(struct netup_unidvb_dev *dev,
+ int num, struct pci_dev *pci_dev);
+void netup_unidvb_ci_unregister(struct netup_unidvb_dev *dev, int num);
+int netup_spi_init(struct netup_unidvb_dev *ndev);
+void netup_spi_release(struct netup_unidvb_dev *ndev);
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_ci.c b/drivers/media/pci/netup_unidvb/netup_unidvb_ci.c
new file mode 100644
index 000000000000..751b51b03593
--- /dev/null
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_ci.c
@@ -0,0 +1,248 @@
+/*
+ * netup_unidvb_ci.c
+ *
+ * DVB CAM support for NetUP Universal Dual DVB-CI
+ *
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kmod.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include "netup_unidvb.h"
+
+/* CI slot 0 base address */
+#define CAM0_CONFIG 0x0
+#define CAM0_IO 0x8000
+#define CAM0_MEM 0x10000
+#define CAM0_SZ 32
+/* CI slot 1 base address */
+#define CAM1_CONFIG 0x20000
+#define CAM1_IO 0x28000
+#define CAM1_MEM 0x30000
+#define CAM1_SZ 32
+/* ctrlstat registers */
+#define CAM_CTRLSTAT_READ_SET 0x4980
+#define CAM_CTRLSTAT_CLR 0x4982
+/* register bits */
+#define BIT_CAM_STCHG (1<<0)
+#define BIT_CAM_PRESENT (1<<1)
+#define BIT_CAM_RESET (1<<2)
+#define BIT_CAM_BYPASS (1<<3)
+#define BIT_CAM_READY (1<<4)
+#define BIT_CAM_ERROR (1<<5)
+#define BIT_CAM_OVERCURR (1<<6)
+/* BIT_CAM_BYPASS bit shift for SLOT 1 */
+#define CAM1_SHIFT 8
+
+irqreturn_t netup_ci_interrupt(struct netup_unidvb_dev *ndev)
+{
+ writew(0x101, ndev->bmmio0 + CAM_CTRLSTAT_CLR);
+ return IRQ_HANDLED;
+}
+
+static int netup_unidvb_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221,
+ int slot)
+{
+ struct netup_ci_state *state = en50221->data;
+ struct netup_unidvb_dev *dev = state->dev;
+ u16 shift = (state->nr == 1) ? CAM1_SHIFT : 0;
+
+ dev_dbg(&dev->pci_dev->dev, "%s(): CAM_CTRLSTAT=0x%x\n",
+ __func__, readw(dev->bmmio0 + CAM_CTRLSTAT_READ_SET));
+ if (slot != 0)
+ return -EINVAL;
+ /* pass data to CAM module */
+ writew(BIT_CAM_BYPASS << shift, dev->bmmio0 + CAM_CTRLSTAT_CLR);
+ dev_dbg(&dev->pci_dev->dev, "%s(): CAM_CTRLSTAT=0x%x done\n",
+ __func__, readw(dev->bmmio0 + CAM_CTRLSTAT_READ_SET));
+ return 0;
+}
+
+static int netup_unidvb_ci_slot_shutdown(struct dvb_ca_en50221 *en50221,
+ int slot)
+{
+ struct netup_ci_state *state = en50221->data;
+ struct netup_unidvb_dev *dev = state->dev;
+
+ dev_dbg(&dev->pci_dev->dev, "%s()\n", __func__);
+ return 0;
+}
+
+static int netup_unidvb_ci_slot_reset(struct dvb_ca_en50221 *en50221,
+ int slot)
+{
+ struct netup_ci_state *state = en50221->data;
+ struct netup_unidvb_dev *dev = state->dev;
+ unsigned long timeout = 0;
+ u16 shift = (state->nr == 1) ? CAM1_SHIFT : 0;
+ u16 ci_stat = 0;
+ int reset_counter = 3;
+
+ dev_dbg(&dev->pci_dev->dev, "%s(): CAM_CTRLSTAT_READ_SET=0x%x\n",
+ __func__, readw(dev->bmmio0 + CAM_CTRLSTAT_READ_SET));
+reset:
+ timeout = jiffies + msecs_to_jiffies(5000);
+ /* start reset */
+ writew(BIT_CAM_RESET << shift, dev->bmmio0 + CAM_CTRLSTAT_READ_SET);
+ dev_dbg(&dev->pci_dev->dev, "%s(): waiting for reset\n", __func__);
+ /* wait until reset done */
+ while (time_before(jiffies, timeout)) {
+ ci_stat = readw(dev->bmmio0 + CAM_CTRLSTAT_READ_SET);
+ if (ci_stat & (BIT_CAM_READY << shift))
+ break;
+ udelay(1000);
+ }
+ if (!(ci_stat & (BIT_CAM_READY << shift)) && reset_counter > 0) {
+ dev_dbg(&dev->pci_dev->dev,
+ "%s(): CAMP reset timeout! Will try again..\n",
+ __func__);
+ reset_counter--;
+ goto reset;
+ }
+ return 0;
+}
+
+static int netup_unidvb_poll_ci_slot_status(struct dvb_ca_en50221 *en50221,
+ int slot, int open)
+{
+ struct netup_ci_state *state = en50221->data;
+ struct netup_unidvb_dev *dev = state->dev;
+ u16 shift = (state->nr == 1) ? CAM1_SHIFT : 0;
+ u16 ci_stat = 0;
+
+ dev_dbg(&dev->pci_dev->dev, "%s(): CAM_CTRLSTAT_READ_SET=0x%x\n",
+ __func__, readw(dev->bmmio0 + CAM_CTRLSTAT_READ_SET));
+ ci_stat = readw(dev->bmmio0 + CAM_CTRLSTAT_READ_SET);
+ if (ci_stat & (BIT_CAM_READY << shift)) {
+ state->status = DVB_CA_EN50221_POLL_CAM_PRESENT |
+ DVB_CA_EN50221_POLL_CAM_READY;
+ } else if (ci_stat & (BIT_CAM_PRESENT << shift)) {
+ state->status = DVB_CA_EN50221_POLL_CAM_PRESENT;
+ } else {
+ state->status = 0;
+ }
+ return state->status;
+}
+
+static int netup_unidvb_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
+ int slot, int addr)
+{
+ struct netup_ci_state *state = en50221->data;
+ struct netup_unidvb_dev *dev = state->dev;
+ u8 val = state->membase8_config[addr];
+
+ dev_dbg(&dev->pci_dev->dev,
+ "%s(): addr=0x%x val=0x%x\n", __func__, addr, val);
+ return val;
+}
+
+static int netup_unidvb_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
+ int slot, int addr, u8 data)
+{
+ struct netup_ci_state *state = en50221->data;
+ struct netup_unidvb_dev *dev = state->dev;
+
+ dev_dbg(&dev->pci_dev->dev,
+ "%s(): addr=0x%x data=0x%x\n", __func__, addr, data);
+ state->membase8_config[addr] = data;
+ return 0;
+}
+
+static int netup_unidvb_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221,
+ int slot, u8 addr)
+{
+ struct netup_ci_state *state = en50221->data;
+ struct netup_unidvb_dev *dev = state->dev;
+ u8 val = state->membase8_io[addr];
+
+ dev_dbg(&dev->pci_dev->dev,
+ "%s(): addr=0x%x val=0x%x\n", __func__, addr, val);
+ return val;
+}
+
+static int netup_unidvb_ci_write_cam_ctl(struct dvb_ca_en50221 *en50221,
+ int slot, u8 addr, u8 data)
+{
+ struct netup_ci_state *state = en50221->data;
+ struct netup_unidvb_dev *dev = state->dev;
+
+ dev_dbg(&dev->pci_dev->dev,
+ "%s(): addr=0x%x data=0x%x\n", __func__, addr, data);
+ state->membase8_io[addr] = data;
+ return 0;
+}
+
+int netup_unidvb_ci_register(struct netup_unidvb_dev *dev,
+ int num, struct pci_dev *pci_dev)
+{
+ int result;
+ struct netup_ci_state *state;
+
+ if (num < 0 || num > 1) {
+ dev_err(&pci_dev->dev, "%s(): invalid CI adapter %d\n",
+ __func__, num);
+ return -EINVAL;
+ }
+ state = &dev->ci[num];
+ state->nr = num;
+ state->membase8_config = dev->bmmio1 +
+ ((num == 0) ? CAM0_CONFIG : CAM1_CONFIG);
+ state->membase8_io = dev->bmmio1 +
+ ((num == 0) ? CAM0_IO : CAM1_IO);
+ state->dev = dev;
+ state->ca.owner = THIS_MODULE;
+ state->ca.read_attribute_mem = netup_unidvb_ci_read_attribute_mem;
+ state->ca.write_attribute_mem = netup_unidvb_ci_write_attribute_mem;
+ state->ca.read_cam_control = netup_unidvb_ci_read_cam_ctl;
+ state->ca.write_cam_control = netup_unidvb_ci_write_cam_ctl;
+ state->ca.slot_reset = netup_unidvb_ci_slot_reset;
+ state->ca.slot_shutdown = netup_unidvb_ci_slot_shutdown;
+ state->ca.slot_ts_enable = netup_unidvb_ci_slot_ts_ctl;
+ state->ca.poll_slot_status = netup_unidvb_poll_ci_slot_status;
+ state->ca.data = state;
+ result = dvb_ca_en50221_init(&dev->frontends[num].adapter,
+ &state->ca, 0, 1);
+ if (result < 0) {
+ dev_err(&pci_dev->dev,
+ "%s(): dvb_ca_en50221_init result %d\n",
+ __func__, result);
+ return result;
+ }
+ writew(NETUP_UNIDVB_IRQ_CI, (u16 *)(dev->bmmio0 + REG_IMASK_SET));
+ dev_info(&pci_dev->dev,
+ "%s(): CI adapter %d init done\n", __func__, num);
+ return 0;
+}
+
+void netup_unidvb_ci_unregister(struct netup_unidvb_dev *dev, int num)
+{
+ struct netup_ci_state *state;
+
+ dev_dbg(&dev->pci_dev->dev, "%s()\n", __func__);
+ if (num < 0 || num > 1) {
+ dev_err(&dev->pci_dev->dev, "%s(): invalid CI adapter %d\n",
+ __func__, num);
+ return;
+ }
+ state = &dev->ci[num];
+ dvb_ca_en50221_release(&state->ca);
+}
+
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
new file mode 100644
index 000000000000..6d8bf6277647
--- /dev/null
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -0,0 +1,1001 @@
+/*
+ * netup_unidvb_core.c
+ *
+ * Main module for NetUP Universal Dual DVB-CI
+ *
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kmod.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "netup_unidvb.h"
+#include "cxd2841er.h"
+#include "horus3a.h"
+#include "ascot2e.h"
+#include "lnbh25.h"
+
+static int spi_enable;
+module_param(spi_enable, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+
+MODULE_DESCRIPTION("Driver for NetUP Dual Universal DVB CI PCIe card");
+MODULE_AUTHOR("info@netup.ru");
+MODULE_VERSION(NETUP_UNIDVB_VERSION);
+MODULE_LICENSE("GPL");
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+/* Avalon-MM PCI-E registers */
+#define AVL_PCIE_IENR 0x50
+#define AVL_PCIE_ISR 0x40
+#define AVL_IRQ_ENABLE 0x80
+#define AVL_IRQ_ASSERTED 0x80
+/* GPIO registers */
+#define GPIO_REG_IO 0x4880
+#define GPIO_REG_IO_TOGGLE 0x4882
+#define GPIO_REG_IO_SET 0x4884
+#define GPIO_REG_IO_CLEAR 0x4886
+/* GPIO bits */
+#define GPIO_FEA_RESET (1 << 0)
+#define GPIO_FEB_RESET (1 << 1)
+#define GPIO_RFA_CTL (1 << 2)
+#define GPIO_RFB_CTL (1 << 3)
+#define GPIO_FEA_TU_RESET (1 << 4)
+#define GPIO_FEB_TU_RESET (1 << 5)
+/* DMA base address */
+#define NETUP_DMA0_ADDR 0x4900
+#define NETUP_DMA1_ADDR 0x4940
+/* 8 DMA blocks * 128 packets * 188 bytes*/
+#define NETUP_DMA_BLOCKS_COUNT 8
+#define NETUP_DMA_PACKETS_COUNT 128
+/* DMA status bits */
+#define BIT_DMA_RUN 1
+#define BIT_DMA_ERROR 2
+#define BIT_DMA_IRQ 0x200
+
+/**
+ * struct netup_dma_regs - the map of DMA module registers
+ * @ctrlstat_set: Control register, write to set control bits
+ * @ctrlstat_clear: Control register, write to clear control bits
+ * @start_addr_lo: DMA ring buffer start address, lower part
+ * @start_addr_hi: DMA ring buffer start address, higher part
+ * @size: DMA ring buffer size register
+ Bits [0-7]: DMA packet size, 188 bytes
+ Bits [16-23]: packets count in block, 128 packets
+ Bits [24-31]: blocks count, 8 blocks
+ * @timeout: DMA timeout in units of 8ns
+ For example, value of 375000000 equals to 3 sec
+ * @curr_addr_lo: Current ring buffer head address, lower part
+ * @curr_addr_hi: Current ring buffer head address, higher part
+ * @stat_pkt_received: Statistic register, not tested
+ * @stat_pkt_accepted: Statistic register, not tested
+ * @stat_pkt_overruns: Statistic register, not tested
+ * @stat_pkt_underruns: Statistic register, not tested
+ * @stat_fifo_overruns: Statistic register, not tested
+ */
+struct netup_dma_regs {
+ __le32 ctrlstat_set;
+ __le32 ctrlstat_clear;
+ __le32 start_addr_lo;
+ __le32 start_addr_hi;
+ __le32 size;
+ __le32 timeout;
+ __le32 curr_addr_lo;
+ __le32 curr_addr_hi;
+ __le32 stat_pkt_received;
+ __le32 stat_pkt_accepted;
+ __le32 stat_pkt_overruns;
+ __le32 stat_pkt_underruns;
+ __le32 stat_fifo_overruns;
+} __packed __aligned(1);
+
+struct netup_unidvb_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+ u32 size;
+};
+
+static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc);
+static void netup_unidvb_queue_cleanup(struct netup_dma *dma);
+
+static struct cxd2841er_config demod_config = {
+ .i2c_addr = 0xc8
+};
+
+static struct horus3a_config horus3a_conf = {
+ .i2c_address = 0xc0,
+ .xtal_freq_mhz = 16,
+ .set_tuner_callback = netup_unidvb_tuner_ctrl
+};
+
+static struct ascot2e_config ascot2e_conf = {
+ .i2c_address = 0xc2,
+ .set_tuner_callback = netup_unidvb_tuner_ctrl
+};
+
+static struct lnbh25_config lnbh25_conf = {
+ .i2c_address = 0x10,
+ .data2_config = LNBH25_TEN | LNBH25_EXTM
+};
+
+static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc)
+{
+ u8 reg, mask;
+ struct netup_dma *dma = priv;
+ struct netup_unidvb_dev *ndev;
+
+ if (!priv)
+ return -EINVAL;
+ ndev = dma->ndev;
+ dev_dbg(&ndev->pci_dev->dev, "%s(): num %d is_dvb_tc %d\n",
+ __func__, dma->num, is_dvb_tc);
+ reg = readb(ndev->bmmio0 + GPIO_REG_IO);
+ mask = (dma->num == 0) ? GPIO_RFA_CTL : GPIO_RFB_CTL;
+ if (!is_dvb_tc)
+ reg |= mask;
+ else
+ reg &= ~mask;
+ writeb(reg, ndev->bmmio0 + GPIO_REG_IO);
+ return 0;
+}
+
+static void netup_unidvb_dev_enable(struct netup_unidvb_dev *ndev)
+{
+ u16 gpio_reg;
+
+ /* enable PCI-E interrupts */
+ writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
+ /* unreset frontends bits[0:1] */
+ writeb(0x00, ndev->bmmio0 + GPIO_REG_IO);
+ msleep(100);
+ gpio_reg =
+ GPIO_FEA_RESET | GPIO_FEB_RESET |
+ GPIO_FEA_TU_RESET | GPIO_FEB_TU_RESET |
+ GPIO_RFA_CTL | GPIO_RFB_CTL;
+ writeb(gpio_reg, ndev->bmmio0 + GPIO_REG_IO);
+ dev_dbg(&ndev->pci_dev->dev,
+ "%s(): AVL_PCIE_IENR 0x%x GPIO_REG_IO 0x%x\n",
+ __func__, readl(ndev->bmmio0 + AVL_PCIE_IENR),
+ (int)readb(ndev->bmmio0 + GPIO_REG_IO));
+
+}
+
+static void netup_unidvb_dma_enable(struct netup_dma *dma, int enable)
+{
+ u32 irq_mask = (dma->num == 0 ?
+ NETUP_UNIDVB_IRQ_DMA1 : NETUP_UNIDVB_IRQ_DMA2);
+
+ dev_dbg(&dma->ndev->pci_dev->dev,
+ "%s(): DMA%d enable %d\n", __func__, dma->num, enable);
+ if (enable) {
+ writel(BIT_DMA_RUN, &dma->regs->ctrlstat_set);
+ writew(irq_mask,
+ (u16 *)(dma->ndev->bmmio0 + REG_IMASK_SET));
+ } else {
+ writel(BIT_DMA_RUN, &dma->regs->ctrlstat_clear);
+ writew(irq_mask,
+ (u16 *)(dma->ndev->bmmio0 + REG_IMASK_CLEAR));
+ }
+}
+
+static irqreturn_t netup_dma_interrupt(struct netup_dma *dma)
+{
+ u64 addr_curr;
+ u32 size;
+ unsigned long flags;
+ struct device *dev = &dma->ndev->pci_dev->dev;
+
+ spin_lock_irqsave(&dma->lock, flags);
+ addr_curr = ((u64)readl(&dma->regs->curr_addr_hi) << 32) |
+ (u64)readl(&dma->regs->curr_addr_lo) | dma->high_addr;
+ /* clear IRQ */
+ writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
+ /* sanity check */
+ if (addr_curr < dma->addr_phys ||
+ addr_curr > dma->addr_phys + dma->ring_buffer_size) {
+ if (addr_curr != 0) {
+ dev_err(dev,
+ "%s(): addr 0x%llx not from 0x%llx:0x%llx\n",
+ __func__, addr_curr, (u64)dma->addr_phys,
+ (u64)(dma->addr_phys + dma->ring_buffer_size));
+ }
+ goto irq_handled;
+ }
+ size = (addr_curr >= dma->addr_last) ?
+ (u32)(addr_curr - dma->addr_last) :
+ (u32)(dma->ring_buffer_size - (dma->addr_last - addr_curr));
+ if (dma->data_size != 0) {
+ printk_ratelimited("%s(): lost interrupt, data size %d\n",
+ __func__, dma->data_size);
+ dma->data_size += size;
+ }
+ if (dma->data_size == 0 || dma->data_size > dma->ring_buffer_size) {
+ dma->data_size = size;
+ dma->data_offset = (u32)(dma->addr_last - dma->addr_phys);
+ }
+ dma->addr_last = addr_curr;
+ queue_work(dma->ndev->wq, &dma->work);
+irq_handled:
+ spin_unlock_irqrestore(&dma->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t netup_unidvb_isr(int irq, void *dev_id)
+{
+ struct pci_dev *pci_dev = (struct pci_dev *)dev_id;
+ struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
+ u32 reg40, reg_isr;
+ irqreturn_t iret = IRQ_NONE;
+
+ /* disable interrupts */
+ writel(0, ndev->bmmio0 + AVL_PCIE_IENR);
+ /* check IRQ source */
+ reg40 = readl(ndev->bmmio0 + AVL_PCIE_ISR);
+ if ((reg40 & AVL_IRQ_ASSERTED) != 0) {
+ /* IRQ is being signaled */
+ reg_isr = readw(ndev->bmmio0 + REG_ISR);
+ if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) {
+ iret = netup_i2c_interrupt(&ndev->i2c[0]);
+ } else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) {
+ iret = netup_i2c_interrupt(&ndev->i2c[1]);
+ } else if (reg_isr & NETUP_UNIDVB_IRQ_SPI) {
+ iret = netup_spi_interrupt(ndev->spi);
+ } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) {
+ iret = netup_dma_interrupt(&ndev->dma[0]);
+ } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) {
+ iret = netup_dma_interrupt(&ndev->dma[1]);
+ } else if (reg_isr & NETUP_UNIDVB_IRQ_CI) {
+ iret = netup_ci_interrupt(ndev);
+ } else {
+ dev_err(&pci_dev->dev,
+ "%s(): unknown interrupt 0x%x\n",
+ __func__, reg_isr);
+ }
+ }
+ /* re-enable interrupts */
+ writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
+ return iret;
+}
+
+static int netup_unidvb_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ void *alloc_ctxs[])
+{
+ struct netup_dma *dma = vb2_get_drv_priv(vq);
+
+ dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
+
+ *nplanes = 1;
+ if (vq->num_buffers + *nbuffers < VIDEO_MAX_FRAME)
+ *nbuffers = VIDEO_MAX_FRAME - vq->num_buffers;
+ sizes[0] = PAGE_ALIGN(NETUP_DMA_PACKETS_COUNT * 188);
+ dev_dbg(&dma->ndev->pci_dev->dev, "%s() nbuffers=%d sizes[0]=%d\n",
+ __func__, *nbuffers, sizes[0]);
+ return 0;
+}
+
+static int netup_unidvb_buf_prepare(struct vb2_buffer *vb)
+{
+ struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
+ struct netup_unidvb_buffer *buf = container_of(vb,
+ struct netup_unidvb_buffer, vb);
+
+ dev_dbg(&dma->ndev->pci_dev->dev, "%s(): buf 0x%p\n", __func__, buf);
+ buf->size = 0;
+ return 0;
+}
+
+static void netup_unidvb_buf_queue(struct vb2_buffer *vb)
+{
+ unsigned long flags;
+ struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
+ struct netup_unidvb_buffer *buf = container_of(vb,
+ struct netup_unidvb_buffer, vb);
+
+ dev_dbg(&dma->ndev->pci_dev->dev, "%s(): %p\n", __func__, buf);
+ spin_lock_irqsave(&dma->lock, flags);
+ list_add_tail(&buf->list, &dma->free_buffers);
+ spin_unlock_irqrestore(&dma->lock, flags);
+ mod_timer(&dma->timeout, jiffies + msecs_to_jiffies(1000));
+}
+
+static int netup_unidvb_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct netup_dma *dma = vb2_get_drv_priv(q);
+
+ dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
+ netup_unidvb_dma_enable(dma, 1);
+ return 0;
+}
+
+static void netup_unidvb_stop_streaming(struct vb2_queue *q)
+{
+ struct netup_dma *dma = vb2_get_drv_priv(q);
+
+ dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
+ netup_unidvb_dma_enable(dma, 0);
+ netup_unidvb_queue_cleanup(dma);
+}
+
+static struct vb2_ops dvb_qops = {
+ .queue_setup = netup_unidvb_queue_setup,
+ .buf_prepare = netup_unidvb_buf_prepare,
+ .buf_queue = netup_unidvb_buf_queue,
+ .start_streaming = netup_unidvb_start_streaming,
+ .stop_streaming = netup_unidvb_stop_streaming,
+};
+
+static int netup_unidvb_queue_init(struct netup_dma *dma,
+ struct vb2_queue *vb_queue)
+{
+ int res;
+
+ /* Init videobuf2 queue structure */
+ vb_queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vb_queue->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
+ vb_queue->drv_priv = dma;
+ vb_queue->buf_struct_size = sizeof(struct netup_unidvb_buffer);
+ vb_queue->ops = &dvb_qops;
+ vb_queue->mem_ops = &vb2_vmalloc_memops;
+ vb_queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ res = vb2_queue_init(vb_queue);
+ if (res != 0) {
+ dev_err(&dma->ndev->pci_dev->dev,
+ "%s(): vb2_queue_init failed (%d)\n", __func__, res);
+ }
+ return res;
+}
+
+static int netup_unidvb_dvb_init(struct netup_unidvb_dev *ndev,
+ int num)
+{
+ struct vb2_dvb_frontend *fe0, *fe1, *fe2;
+
+ if (num < 0 || num > 1) {
+ dev_dbg(&ndev->pci_dev->dev,
+ "%s(): unable to init DVB bus %d\n", __func__, num);
+ return -ENODEV;
+ }
+ mutex_init(&ndev->frontends[num].lock);
+ INIT_LIST_HEAD(&ndev->frontends[num].felist);
+ if (vb2_dvb_alloc_frontend(&ndev->frontends[num], 1) == NULL ||
+ vb2_dvb_alloc_frontend(
+ &ndev->frontends[num], 2) == NULL ||
+ vb2_dvb_alloc_frontend(
+ &ndev->frontends[num], 3) == NULL) {
+ dev_dbg(&ndev->pci_dev->dev,
+ "%s(): unable to to alllocate vb2_dvb_frontend\n",
+ __func__);
+ return -ENOMEM;
+ }
+ fe0 = vb2_dvb_get_frontend(&ndev->frontends[num], 1);
+ fe1 = vb2_dvb_get_frontend(&ndev->frontends[num], 2);
+ fe2 = vb2_dvb_get_frontend(&ndev->frontends[num], 3);
+ if (fe0 == NULL || fe1 == NULL || fe2 == NULL) {
+ dev_dbg(&ndev->pci_dev->dev,
+ "%s(): frontends has not been allocated\n", __func__);
+ return -EINVAL;
+ }
+ netup_unidvb_queue_init(&ndev->dma[num], &fe0->dvb.dvbq);
+ netup_unidvb_queue_init(&ndev->dma[num], &fe1->dvb.dvbq);
+ netup_unidvb_queue_init(&ndev->dma[num], &fe2->dvb.dvbq);
+ fe0->dvb.name = "netup_fe0";
+ fe1->dvb.name = "netup_fe1";
+ fe2->dvb.name = "netup_fe2";
+ fe0->dvb.frontend = dvb_attach(cxd2841er_attach_s,
+ &demod_config, &ndev->i2c[num].adap);
+ if (fe0->dvb.frontend == NULL) {
+ dev_dbg(&ndev->pci_dev->dev,
+ "%s(): unable to attach DVB-S/S2 frontend\n",
+ __func__);
+ goto frontend_detach;
+ }
+ horus3a_conf.set_tuner_priv = &ndev->dma[num];
+ if (!dvb_attach(horus3a_attach, fe0->dvb.frontend,
+ &horus3a_conf, &ndev->i2c[num].adap)) {
+ dev_dbg(&ndev->pci_dev->dev,
+ "%s(): unable to attach DVB-S/S2 tuner frontend\n",
+ __func__);
+ goto frontend_detach;
+ }
+ if (!dvb_attach(lnbh25_attach, fe0->dvb.frontend,
+ &lnbh25_conf, &ndev->i2c[num].adap)) {
+ dev_dbg(&ndev->pci_dev->dev,
+ "%s(): unable to attach SEC frontend\n", __func__);
+ goto frontend_detach;
+ }
+ /* DVB-T/T2 frontend */
+ fe1->dvb.frontend = dvb_attach(cxd2841er_attach_t,
+ &demod_config, &ndev->i2c[num].adap);
+ if (fe1->dvb.frontend == NULL) {
+ dev_dbg(&ndev->pci_dev->dev,
+ "%s(): unable to attach DVB-T frontend\n", __func__);
+ goto frontend_detach;
+ }
+ fe1->dvb.frontend->id = 1;
+ ascot2e_conf.set_tuner_priv = &ndev->dma[num];
+ if (!dvb_attach(ascot2e_attach, fe1->dvb.frontend,
+ &ascot2e_conf, &ndev->i2c[num].adap)) {
+ dev_dbg(&ndev->pci_dev->dev,
+ "%s(): unable to attach DVB-T tuner frontend\n",
+ __func__);
+ goto frontend_detach;
+ }
+ /* DVB-C/C2 frontend */
+ fe2->dvb.frontend = dvb_attach(cxd2841er_attach_c,
+ &demod_config, &ndev->i2c[num].adap);
+ if (fe2->dvb.frontend == NULL) {
+ dev_dbg(&ndev->pci_dev->dev,
+ "%s(): unable to attach DVB-C frontend\n", __func__);
+ goto frontend_detach;
+ }
+ fe2->dvb.frontend->id = 2;
+ if (!dvb_attach(ascot2e_attach, fe2->dvb.frontend,
+ &ascot2e_conf, &ndev->i2c[num].adap)) {
+ dev_dbg(&ndev->pci_dev->dev,
+ "%s(): unable to attach DVB-T/C tuner frontend\n",
+ __func__);
+ goto frontend_detach;
+ }
+
+ if (vb2_dvb_register_bus(&ndev->frontends[num],
+ THIS_MODULE, NULL,
+ &ndev->pci_dev->dev, adapter_nr, 1)) {
+ dev_dbg(&ndev->pci_dev->dev,
+ "%s(): unable to register DVB bus %d\n",
+ __func__, num);
+ goto frontend_detach;
+ }
+ dev_info(&ndev->pci_dev->dev, "DVB init done, num=%d\n", num);
+ return 0;
+frontend_detach:
+ vb2_dvb_dealloc_frontends(&ndev->frontends[num]);
+ return -EINVAL;
+}
+
+static void netup_unidvb_dvb_fini(struct netup_unidvb_dev *ndev, int num)
+{
+ if (num < 0 || num > 1) {
+ dev_err(&ndev->pci_dev->dev,
+ "%s(): unable to unregister DVB bus %d\n",
+ __func__, num);
+ return;
+ }
+ vb2_dvb_unregister_bus(&ndev->frontends[num]);
+ dev_info(&ndev->pci_dev->dev,
+ "%s(): DVB bus %d unregistered\n", __func__, num);
+}
+
+static int netup_unidvb_dvb_setup(struct netup_unidvb_dev *ndev)
+{
+ int res;
+
+ res = netup_unidvb_dvb_init(ndev, 0);
+ if (res)
+ return res;
+ res = netup_unidvb_dvb_init(ndev, 1);
+ if (res) {
+ netup_unidvb_dvb_fini(ndev, 0);
+ return res;
+ }
+ return 0;
+}
+
+static int netup_unidvb_ring_copy(struct netup_dma *dma,
+ struct netup_unidvb_buffer *buf)
+{
+ u32 copy_bytes, ring_bytes;
+ u32 buff_bytes = NETUP_DMA_PACKETS_COUNT * 188 - buf->size;
+ u8 *p = vb2_plane_vaddr(&buf->vb, 0);
+ struct netup_unidvb_dev *ndev = dma->ndev;
+
+ if (p == NULL) {
+ dev_err(&ndev->pci_dev->dev,
+ "%s(): buffer is NULL\n", __func__);
+ return -EINVAL;
+ }
+ p += buf->size;
+ if (dma->data_offset + dma->data_size > dma->ring_buffer_size) {
+ ring_bytes = dma->ring_buffer_size - dma->data_offset;
+ copy_bytes = (ring_bytes > buff_bytes) ?
+ buff_bytes : ring_bytes;
+ memcpy_fromio(p, dma->addr_virt + dma->data_offset, copy_bytes);
+ p += copy_bytes;
+ buf->size += copy_bytes;
+ buff_bytes -= copy_bytes;
+ dma->data_size -= copy_bytes;
+ dma->data_offset += copy_bytes;
+ if (dma->data_offset == dma->ring_buffer_size)
+ dma->data_offset = 0;
+ }
+ if (buff_bytes > 0) {
+ ring_bytes = dma->data_size;
+ copy_bytes = (ring_bytes > buff_bytes) ?
+ buff_bytes : ring_bytes;
+ memcpy_fromio(p, dma->addr_virt + dma->data_offset, copy_bytes);
+ buf->size += copy_bytes;
+ dma->data_size -= copy_bytes;
+ dma->data_offset += copy_bytes;
+ if (dma->data_offset == dma->ring_buffer_size)
+ dma->data_offset = 0;
+ }
+ return 0;
+}
+
+static void netup_unidvb_dma_worker(struct work_struct *work)
+{
+ struct netup_dma *dma = container_of(work, struct netup_dma, work);
+ struct netup_unidvb_dev *ndev = dma->ndev;
+ struct netup_unidvb_buffer *buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dma->lock, flags);
+ if (dma->data_size == 0) {
+ dev_dbg(&ndev->pci_dev->dev,
+ "%s(): data_size == 0\n", __func__);
+ goto work_done;
+ }
+ while (dma->data_size > 0) {
+ if (list_empty(&dma->free_buffers)) {
+ dev_dbg(&ndev->pci_dev->dev,
+ "%s(): no free buffers\n", __func__);
+ goto work_done;
+ }
+ buf = list_first_entry(&dma->free_buffers,
+ struct netup_unidvb_buffer, list);
+ if (buf->size >= NETUP_DMA_PACKETS_COUNT * 188) {
+ dev_dbg(&ndev->pci_dev->dev,
+ "%s(): buffer overflow, size %d\n",
+ __func__, buf->size);
+ goto work_done;
+ }
+ if (netup_unidvb_ring_copy(dma, buf))
+ goto work_done;
+ if (buf->size == NETUP_DMA_PACKETS_COUNT * 188) {
+ list_del(&buf->list);
+ dev_dbg(&ndev->pci_dev->dev,
+ "%s(): buffer %p done, size %d\n",
+ __func__, buf, buf->size);
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+ vb2_set_plane_payload(&buf->vb, 0, buf->size);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+ }
+ }
+work_done:
+ dma->data_size = 0;
+ spin_unlock_irqrestore(&dma->lock, flags);
+}
+
+static void netup_unidvb_queue_cleanup(struct netup_dma *dma)
+{
+ struct netup_unidvb_buffer *buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dma->lock, flags);
+ while (!list_empty(&dma->free_buffers)) {
+ buf = list_first_entry(&dma->free_buffers,
+ struct netup_unidvb_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&dma->lock, flags);
+}
+
+static void netup_unidvb_dma_timeout(unsigned long data)
+{
+ struct netup_dma *dma = (struct netup_dma *)data;
+ struct netup_unidvb_dev *ndev = dma->ndev;
+
+ dev_dbg(&ndev->pci_dev->dev, "%s()\n", __func__);
+ netup_unidvb_queue_cleanup(dma);
+}
+
+static int netup_unidvb_dma_init(struct netup_unidvb_dev *ndev, int num)
+{
+ struct netup_dma *dma;
+ struct device *dev = &ndev->pci_dev->dev;
+
+ if (num < 0 || num > 1) {
+ dev_err(dev, "%s(): unable to register DMA%d\n",
+ __func__, num);
+ return -ENODEV;
+ }
+ dma = &ndev->dma[num];
+ dev_info(dev, "%s(): starting DMA%d\n", __func__, num);
+ dma->num = num;
+ dma->ndev = ndev;
+ spin_lock_init(&dma->lock);
+ INIT_WORK(&dma->work, netup_unidvb_dma_worker);
+ INIT_LIST_HEAD(&dma->free_buffers);
+ dma->timeout.function = netup_unidvb_dma_timeout;
+ dma->timeout.data = (unsigned long)dma;
+ init_timer(&dma->timeout);
+ dma->ring_buffer_size = ndev->dma_size / 2;
+ dma->addr_virt = ndev->dma_virt + dma->ring_buffer_size * num;
+ dma->addr_phys = (dma_addr_t)((u64)ndev->dma_phys +
+ dma->ring_buffer_size * num);
+ dev_info(dev, "%s(): DMA%d buffer virt/phys 0x%p/0x%llx size %d\n",
+ __func__, num, dma->addr_virt,
+ (unsigned long long)dma->addr_phys,
+ dma->ring_buffer_size);
+ memset_io(dma->addr_virt, 0, dma->ring_buffer_size);
+ dma->addr_last = dma->addr_phys;
+ dma->high_addr = (u32)(dma->addr_phys & 0xC0000000);
+ dma->regs = (struct netup_dma_regs *)(num == 0 ?
+ ndev->bmmio0 + NETUP_DMA0_ADDR :
+ ndev->bmmio0 + NETUP_DMA1_ADDR);
+ writel((NETUP_DMA_BLOCKS_COUNT << 24) |
+ (NETUP_DMA_PACKETS_COUNT << 8) | 188, &dma->regs->size);
+ writel((u32)(dma->addr_phys & 0x3FFFFFFF), &dma->regs->start_addr_lo);
+ writel(0, &dma->regs->start_addr_hi);
+ writel(dma->high_addr, ndev->bmmio0 + 0x1000);
+ writel(375000000, &dma->regs->timeout);
+ msleep(1000);
+ writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
+ return 0;
+}
+
+static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num)
+{
+ struct netup_dma *dma;
+
+ if (num < 0 || num > 1)
+ return;
+ dev_dbg(&ndev->pci_dev->dev, "%s(): num %d\n", __func__, num);
+ dma = &ndev->dma[num];
+ netup_unidvb_dma_enable(dma, 0);
+ msleep(50);
+ cancel_work_sync(&dma->work);
+ del_timer(&dma->timeout);
+}
+
+static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)
+{
+ int res;
+
+ res = netup_unidvb_dma_init(ndev, 0);
+ if (res)
+ return res;
+ res = netup_unidvb_dma_init(ndev, 1);
+ if (res) {
+ netup_unidvb_dma_fini(ndev, 0);
+ return res;
+ }
+ netup_unidvb_dma_enable(&ndev->dma[0], 0);
+ netup_unidvb_dma_enable(&ndev->dma[1], 0);
+ return 0;
+}
+
+static int netup_unidvb_ci_setup(struct netup_unidvb_dev *ndev,
+ struct pci_dev *pci_dev)
+{
+ int res;
+
+ writew(NETUP_UNIDVB_IRQ_CI, ndev->bmmio0 + REG_IMASK_SET);
+ res = netup_unidvb_ci_register(ndev, 0, pci_dev);
+ if (res)
+ return res;
+ res = netup_unidvb_ci_register(ndev, 1, pci_dev);
+ if (res)
+ netup_unidvb_ci_unregister(ndev, 0);
+ return res;
+}
+
+static int netup_unidvb_request_mmio(struct pci_dev *pci_dev)
+{
+ if (!request_mem_region(pci_resource_start(pci_dev, 0),
+ pci_resource_len(pci_dev, 0), NETUP_UNIDVB_NAME)) {
+ dev_err(&pci_dev->dev,
+ "%s(): unable to request MMIO bar 0 at 0x%llx\n",
+ __func__,
+ (unsigned long long)pci_resource_start(pci_dev, 0));
+ return -EBUSY;
+ }
+ if (!request_mem_region(pci_resource_start(pci_dev, 1),
+ pci_resource_len(pci_dev, 1), NETUP_UNIDVB_NAME)) {
+ dev_err(&pci_dev->dev,
+ "%s(): unable to request MMIO bar 1 at 0x%llx\n",
+ __func__,
+ (unsigned long long)pci_resource_start(pci_dev, 1));
+ release_mem_region(pci_resource_start(pci_dev, 0),
+ pci_resource_len(pci_dev, 0));
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int netup_unidvb_request_modules(struct device *dev)
+{
+ static const char * const modules[] = {
+ "lnbh25", "ascot2e", "horus3a", "cxd2841er", NULL
+ };
+ const char * const *curr_mod = modules;
+ int err;
+
+ while (*curr_mod != NULL) {
+ err = request_module(*curr_mod);
+ if (err) {
+ dev_warn(dev, "request_module(%s) failed: %d\n",
+ *curr_mod, err);
+ }
+ ++curr_mod;
+ }
+ return 0;
+}
+
+static int netup_unidvb_initdev(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
+{
+ u8 board_revision;
+ u16 board_vendor;
+ struct netup_unidvb_dev *ndev;
+ int old_firmware = 0;
+
+ netup_unidvb_request_modules(&pci_dev->dev);
+
+ /* Check card revision */
+ if (pci_dev->revision != NETUP_PCI_DEV_REVISION) {
+ dev_err(&pci_dev->dev,
+ "netup_unidvb: expected card revision %d, got %d\n",
+ NETUP_PCI_DEV_REVISION, pci_dev->revision);
+ dev_err(&pci_dev->dev,
+ "Please upgrade firmware!\n");
+ dev_err(&pci_dev->dev,
+ "Instructions on http://www.netup.tv\n");
+ old_firmware = 1;
+ spi_enable = 1;
+ }
+
+ /* allocate device context */
+ ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
+
+ if (!ndev)
+ goto dev_alloc_err;
+ memset(ndev, 0, sizeof(*ndev));
+ ndev->old_fw = old_firmware;
+ ndev->wq = create_singlethread_workqueue(NETUP_UNIDVB_NAME);
+ if (!ndev->wq) {
+ dev_err(&pci_dev->dev,
+ "%s(): unable to create workqueue\n", __func__);
+ goto wq_create_err;
+ }
+ ndev->pci_dev = pci_dev;
+ ndev->pci_bus = pci_dev->bus->number;
+ ndev->pci_slot = PCI_SLOT(pci_dev->devfn);
+ ndev->pci_func = PCI_FUNC(pci_dev->devfn);
+ ndev->board_num = ndev->pci_bus*10 + ndev->pci_slot;
+ pci_set_drvdata(pci_dev, ndev);
+ /* PCI init */
+ dev_info(&pci_dev->dev, "%s(): PCI device (%d). Bus:0x%x Slot:0x%x\n",
+ __func__, ndev->board_num, ndev->pci_bus, ndev->pci_slot);
+
+ if (pci_enable_device(pci_dev)) {
+ dev_err(&pci_dev->dev, "%s(): pci_enable_device failed\n",
+ __func__);
+ goto pci_enable_err;
+ }
+ /* read PCI info */
+ pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &board_revision);
+ pci_read_config_word(pci_dev, PCI_VENDOR_ID, &board_vendor);
+ if (board_vendor != NETUP_VENDOR_ID) {
+ dev_err(&pci_dev->dev, "%s(): unknown board vendor 0x%x",
+ __func__, board_vendor);
+ goto pci_detect_err;
+ }
+ dev_info(&pci_dev->dev,
+ "%s(): board vendor 0x%x, revision 0x%x\n",
+ __func__, board_vendor, board_revision);
+ pci_set_master(pci_dev);
+ if (!pci_dma_supported(pci_dev, 0xffffffff)) {
+ dev_err(&pci_dev->dev,
+ "%s(): 32bit PCI DMA is not supported\n", __func__);
+ goto pci_detect_err;
+ }
+ dev_info(&pci_dev->dev, "%s(): using 32bit PCI DMA\n", __func__);
+ /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
+ pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
+ PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
+ /* Adjust PCIe completion timeout. */
+ pcie_capability_clear_and_set_word(pci_dev,
+ PCI_EXP_DEVCTL2, 0xf, 0x2);
+
+ if (netup_unidvb_request_mmio(pci_dev)) {
+ dev_err(&pci_dev->dev,
+ "%s(): unable to request MMIO regions\n", __func__);
+ goto pci_detect_err;
+ }
+ ndev->lmmio0 = ioremap(pci_resource_start(pci_dev, 0),
+ pci_resource_len(pci_dev, 0));
+ if (!ndev->lmmio0) {
+ dev_err(&pci_dev->dev,
+ "%s(): unable to remap MMIO bar 0\n", __func__);
+ goto pci_bar0_error;
+ }
+ ndev->lmmio1 = ioremap(pci_resource_start(pci_dev, 1),
+ pci_resource_len(pci_dev, 1));
+ if (!ndev->lmmio1) {
+ dev_err(&pci_dev->dev,
+ "%s(): unable to remap MMIO bar 1\n", __func__);
+ goto pci_bar1_error;
+ }
+ ndev->bmmio0 = (u8 __iomem *)ndev->lmmio0;
+ ndev->bmmio1 = (u8 __iomem *)ndev->lmmio1;
+ dev_info(&pci_dev->dev,
+ "%s(): PCI MMIO at 0x%p (%d); 0x%p (%d); IRQ %d",
+ __func__,
+ ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0),
+ ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1),
+ pci_dev->irq);
+ if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
+ "netup_unidvb", pci_dev) < 0) {
+ dev_err(&pci_dev->dev,
+ "%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
+ goto irq_request_err;
+ }
+ ndev->dma_size = 2 * 188 *
+ NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT;
+ ndev->dma_virt = dma_alloc_coherent(&pci_dev->dev,
+ ndev->dma_size, &ndev->dma_phys, GFP_KERNEL);
+ if (!ndev->dma_virt) {
+ dev_err(&pci_dev->dev, "%s(): unable to allocate DMA buffer\n",
+ __func__);
+ goto dma_alloc_err;
+ }
+ netup_unidvb_dev_enable(ndev);
+ if (spi_enable && netup_spi_init(ndev)) {
+ dev_warn(&pci_dev->dev,
+ "netup_unidvb: SPI flash setup failed\n");
+ goto spi_setup_err;
+ }
+ if (old_firmware) {
+ dev_err(&pci_dev->dev,
+ "netup_unidvb: card initialization was incomplete\n");
+ return 0;
+ }
+ if (netup_i2c_register(ndev)) {
+ dev_err(&pci_dev->dev, "netup_unidvb: I2C setup failed\n");
+ goto i2c_setup_err;
+ }
+ /* enable I2C IRQs */
+ writew(NETUP_UNIDVB_IRQ_I2C0 | NETUP_UNIDVB_IRQ_I2C1,
+ ndev->bmmio0 + REG_IMASK_SET);
+ usleep_range(5000, 10000);
+ if (netup_unidvb_dvb_setup(ndev)) {
+ dev_err(&pci_dev->dev, "netup_unidvb: DVB setup failed\n");
+ goto dvb_setup_err;
+ }
+ if (netup_unidvb_ci_setup(ndev, pci_dev)) {
+ dev_err(&pci_dev->dev, "netup_unidvb: CI setup failed\n");
+ goto ci_setup_err;
+ }
+ if (netup_unidvb_dma_setup(ndev)) {
+ dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n");
+ goto dma_setup_err;
+ }
+ dev_info(&pci_dev->dev,
+ "netup_unidvb: device has been initialized\n");
+ return 0;
+dma_setup_err:
+ netup_unidvb_ci_unregister(ndev, 0);
+ netup_unidvb_ci_unregister(ndev, 1);
+ci_setup_err:
+ netup_unidvb_dvb_fini(ndev, 0);
+ netup_unidvb_dvb_fini(ndev, 1);
+dvb_setup_err:
+ netup_i2c_unregister(ndev);
+i2c_setup_err:
+ if (ndev->spi)
+ netup_spi_release(ndev);
+spi_setup_err:
+ dma_free_coherent(&pci_dev->dev, ndev->dma_size,
+ ndev->dma_virt, ndev->dma_phys);
+dma_alloc_err:
+ free_irq(pci_dev->irq, pci_dev);
+irq_request_err:
+ iounmap(ndev->lmmio1);
+pci_bar1_error:
+ iounmap(ndev->lmmio0);
+pci_bar0_error:
+ release_mem_region(pci_resource_start(pci_dev, 0),
+ pci_resource_len(pci_dev, 0));
+ release_mem_region(pci_resource_start(pci_dev, 1),
+ pci_resource_len(pci_dev, 1));
+pci_detect_err:
+ pci_disable_device(pci_dev);
+pci_enable_err:
+ pci_set_drvdata(pci_dev, NULL);
+ destroy_workqueue(ndev->wq);
+wq_create_err:
+ kfree(ndev);
+dev_alloc_err:
+ dev_err(&pci_dev->dev,
+ "%s(): failed to initizalize device\n", __func__);
+ return -EIO;
+}
+
+static void netup_unidvb_finidev(struct pci_dev *pci_dev)
+{
+ struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
+
+ dev_info(&pci_dev->dev, "%s(): trying to stop device\n", __func__);
+ if (!ndev->old_fw) {
+ netup_unidvb_dma_fini(ndev, 0);
+ netup_unidvb_dma_fini(ndev, 1);
+ netup_unidvb_ci_unregister(ndev, 0);
+ netup_unidvb_ci_unregister(ndev, 1);
+ netup_unidvb_dvb_fini(ndev, 0);
+ netup_unidvb_dvb_fini(ndev, 1);
+ netup_i2c_unregister(ndev);
+ }
+ if (ndev->spi)
+ netup_spi_release(ndev);
+ writew(0xffff, ndev->bmmio0 + REG_IMASK_CLEAR);
+ dma_free_coherent(&ndev->pci_dev->dev, ndev->dma_size,
+ ndev->dma_virt, ndev->dma_phys);
+ free_irq(pci_dev->irq, pci_dev);
+ iounmap(ndev->lmmio0);
+ iounmap(ndev->lmmio1);
+ release_mem_region(pci_resource_start(pci_dev, 0),
+ pci_resource_len(pci_dev, 0));
+ release_mem_region(pci_resource_start(pci_dev, 1),
+ pci_resource_len(pci_dev, 1));
+ pci_disable_device(pci_dev);
+ pci_set_drvdata(pci_dev, NULL);
+ destroy_workqueue(ndev->wq);
+ kfree(ndev);
+ dev_info(&pci_dev->dev,
+ "%s(): device has been successfully stopped\n", __func__);
+}
+
+
+static struct pci_device_id netup_unidvb_pci_tbl[] = {
+ { PCI_DEVICE(0x1b55, 0x18f6) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, netup_unidvb_pci_tbl);
+
+static struct pci_driver netup_unidvb_pci_driver = {
+ .name = "netup_unidvb",
+ .id_table = netup_unidvb_pci_tbl,
+ .probe = netup_unidvb_initdev,
+ .remove = netup_unidvb_finidev,
+ .suspend = NULL,
+ .resume = NULL,
+};
+
+static int __init netup_unidvb_init(void)
+{
+ return pci_register_driver(&netup_unidvb_pci_driver);
+}
+
+static void __exit netup_unidvb_fini(void)
+{
+ pci_unregister_driver(&netup_unidvb_pci_driver);
+}
+
+module_init(netup_unidvb_init);
+module_exit(netup_unidvb_fini);
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
new file mode 100644
index 000000000000..eaaa2d0a5fba
--- /dev/null
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
@@ -0,0 +1,381 @@
+/*
+ * netup_unidvb_i2c.c
+ *
+ * Internal I2C bus driver for NetUP Universal Dual DVB-CI
+ *
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include "netup_unidvb.h"
+
+#define NETUP_I2C_BUS0_ADDR 0x4800
+#define NETUP_I2C_BUS1_ADDR 0x4840
+#define NETUP_I2C_TIMEOUT 1000
+
+/* twi_ctrl0_stat reg bits */
+#define TWI_IRQEN_COMPL 0x1
+#define TWI_IRQEN_ANACK 0x2
+#define TWI_IRQEN_DNACK 0x4
+#define TWI_IRQ_COMPL (TWI_IRQEN_COMPL << 8)
+#define TWI_IRQ_ANACK (TWI_IRQEN_ANACK << 8)
+#define TWI_IRQ_DNACK (TWI_IRQEN_DNACK << 8)
+#define TWI_IRQ_TX 0x800
+#define TWI_IRQ_RX 0x1000
+#define TWI_IRQEN (TWI_IRQEN_COMPL | TWI_IRQEN_ANACK | TWI_IRQEN_DNACK)
+/* twi_addr_ctrl1 reg bits*/
+#define TWI_TRANSFER 0x100
+#define TWI_NOSTOP 0x200
+#define TWI_SOFT_RESET 0x2000
+/* twi_clkdiv reg value */
+#define TWI_CLKDIV 156
+/* fifo_stat_ctrl reg bits */
+#define FIFO_IRQEN 0x8000
+#define FIFO_RESET 0x4000
+/* FIFO size */
+#define FIFO_SIZE 16
+
+struct netup_i2c_fifo_regs {
+ union {
+ __u8 data8;
+ __le16 data16;
+ __le32 data32;
+ };
+ __u8 padding[4];
+ __le16 stat_ctrl;
+} __packed __aligned(1);
+
+struct netup_i2c_regs {
+ __le16 clkdiv;
+ __le16 twi_ctrl0_stat;
+ __le16 twi_addr_ctrl1;
+ __le16 length;
+ __u8 padding1[8];
+ struct netup_i2c_fifo_regs tx_fifo;
+ __u8 padding2[6];
+ struct netup_i2c_fifo_regs rx_fifo;
+} __packed __aligned(1);
+
+irqreturn_t netup_i2c_interrupt(struct netup_i2c *i2c)
+{
+ u16 reg, tmp;
+ unsigned long flags;
+ irqreturn_t iret = IRQ_HANDLED;
+
+ spin_lock_irqsave(&i2c->lock, flags);
+ reg = readw(&i2c->regs->twi_ctrl0_stat);
+ writew(reg & ~TWI_IRQEN, &i2c->regs->twi_ctrl0_stat);
+ dev_dbg(i2c->adap.dev.parent,
+ "%s(): twi_ctrl0_state 0x%x\n", __func__, reg);
+ if ((reg & TWI_IRQEN_COMPL) != 0 && (reg & TWI_IRQ_COMPL)) {
+ dev_dbg(i2c->adap.dev.parent,
+ "%s(): TWI_IRQEN_COMPL\n", __func__);
+ i2c->state = STATE_DONE;
+ goto irq_ok;
+ }
+ if ((reg & TWI_IRQEN_ANACK) != 0 && (reg & TWI_IRQ_ANACK)) {
+ dev_dbg(i2c->adap.dev.parent,
+ "%s(): TWI_IRQEN_ANACK\n", __func__);
+ i2c->state = STATE_ERROR;
+ goto irq_ok;
+ }
+ if ((reg & TWI_IRQEN_DNACK) != 0 && (reg & TWI_IRQ_DNACK)) {
+ dev_dbg(i2c->adap.dev.parent,
+ "%s(): TWI_IRQEN_DNACK\n", __func__);
+ i2c->state = STATE_ERROR;
+ goto irq_ok;
+ }
+ if ((reg & TWI_IRQ_RX) != 0) {
+ tmp = readw(&i2c->regs->rx_fifo.stat_ctrl);
+ writew(tmp & ~FIFO_IRQEN, &i2c->regs->rx_fifo.stat_ctrl);
+ i2c->state = STATE_WANT_READ;
+ dev_dbg(i2c->adap.dev.parent,
+ "%s(): want read\n", __func__);
+ goto irq_ok;
+ }
+ if ((reg & TWI_IRQ_TX) != 0) {
+ tmp = readw(&i2c->regs->tx_fifo.stat_ctrl);
+ writew(tmp & ~FIFO_IRQEN, &i2c->regs->tx_fifo.stat_ctrl);
+ i2c->state = STATE_WANT_WRITE;
+ dev_dbg(i2c->adap.dev.parent,
+ "%s(): want write\n", __func__);
+ goto irq_ok;
+ }
+ dev_warn(&i2c->adap.dev, "%s(): not mine interrupt\n", __func__);
+ iret = IRQ_NONE;
+irq_ok:
+ spin_unlock_irqrestore(&i2c->lock, flags);
+ if (iret == IRQ_HANDLED)
+ wake_up(&i2c->wq);
+ return iret;
+}
+
+static void netup_i2c_reset(struct netup_i2c *i2c)
+{
+ dev_dbg(i2c->adap.dev.parent, "%s()\n", __func__);
+ i2c->state = STATE_DONE;
+ writew(TWI_SOFT_RESET, &i2c->regs->twi_addr_ctrl1);
+ writew(TWI_CLKDIV, &i2c->regs->clkdiv);
+ writew(FIFO_RESET, &i2c->regs->tx_fifo.stat_ctrl);
+ writew(FIFO_RESET, &i2c->regs->rx_fifo.stat_ctrl);
+ writew(0x800, &i2c->regs->tx_fifo.stat_ctrl);
+ writew(0x800, &i2c->regs->rx_fifo.stat_ctrl);
+}
+
+static void netup_i2c_fifo_tx(struct netup_i2c *i2c)
+{
+ u8 data;
+ u32 fifo_space = FIFO_SIZE -
+ (readw(&i2c->regs->tx_fifo.stat_ctrl) & 0x3f);
+ u32 msg_length = i2c->msg->len - i2c->xmit_size;
+
+ msg_length = (msg_length < fifo_space ? msg_length : fifo_space);
+ while (msg_length--) {
+ data = i2c->msg->buf[i2c->xmit_size++];
+ writeb(data, &i2c->regs->tx_fifo.data8);
+ dev_dbg(i2c->adap.dev.parent,
+ "%s(): write 0x%02x\n", __func__, data);
+ }
+ if (i2c->xmit_size < i2c->msg->len) {
+ dev_dbg(i2c->adap.dev.parent,
+ "%s(): TX IRQ enabled\n", __func__);
+ writew(readw(&i2c->regs->tx_fifo.stat_ctrl) | FIFO_IRQEN,
+ &i2c->regs->tx_fifo.stat_ctrl);
+ }
+}
+
+static void netup_i2c_fifo_rx(struct netup_i2c *i2c)
+{
+ u8 data;
+ u32 fifo_size = readw(&i2c->regs->rx_fifo.stat_ctrl) & 0x3f;
+
+ dev_dbg(i2c->adap.dev.parent,
+ "%s(): RX fifo size %d\n", __func__, fifo_size);
+ while (fifo_size--) {
+ data = readb(&i2c->regs->rx_fifo.data8);
+ if ((i2c->msg->flags & I2C_M_RD) != 0 &&
+ i2c->xmit_size < i2c->msg->len) {
+ i2c->msg->buf[i2c->xmit_size++] = data;
+ dev_dbg(i2c->adap.dev.parent,
+ "%s(): read 0x%02x\n", __func__, data);
+ }
+ }
+ if (i2c->xmit_size < i2c->msg->len) {
+ dev_dbg(i2c->adap.dev.parent,
+ "%s(): RX IRQ enabled\n", __func__);
+ writew(readw(&i2c->regs->rx_fifo.stat_ctrl) | FIFO_IRQEN,
+ &i2c->regs->rx_fifo.stat_ctrl);
+ }
+}
+
+static void netup_i2c_start_xfer(struct netup_i2c *i2c)
+{
+ u16 rdflag = ((i2c->msg->flags & I2C_M_RD) ? 1 : 0);
+ u16 reg = readw(&i2c->regs->twi_ctrl0_stat);
+
+ writew(TWI_IRQEN | reg, &i2c->regs->twi_ctrl0_stat);
+ writew(i2c->msg->len, &i2c->regs->length);
+ writew(TWI_TRANSFER | (i2c->msg->addr << 1) | rdflag,
+ &i2c->regs->twi_addr_ctrl1);
+ dev_dbg(i2c->adap.dev.parent,
+ "%s(): length %d twi_addr_ctrl1 0x%x twi_ctrl0_stat 0x%x\n",
+ __func__, readw(&i2c->regs->length),
+ readw(&i2c->regs->twi_addr_ctrl1),
+ readw(&i2c->regs->twi_ctrl0_stat));
+ i2c->state = STATE_WAIT;
+ i2c->xmit_size = 0;
+ if (!rdflag)
+ netup_i2c_fifo_tx(i2c);
+ else
+ writew(FIFO_IRQEN | readw(&i2c->regs->rx_fifo.stat_ctrl),
+ &i2c->regs->rx_fifo.stat_ctrl);
+}
+
+static int netup_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ unsigned long flags;
+ int i, trans_done, res = num;
+ struct netup_i2c *i2c = i2c_get_adapdata(adap);
+ u16 reg;
+
+ if (num <= 0) {
+ dev_dbg(i2c->adap.dev.parent,
+ "%s(): num == %d\n", __func__, num);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&i2c->lock, flags);
+ if (i2c->state != STATE_DONE) {
+ dev_dbg(i2c->adap.dev.parent,
+ "%s(): i2c->state == %d, resetting I2C\n",
+ __func__, i2c->state);
+ netup_i2c_reset(i2c);
+ }
+ dev_dbg(i2c->adap.dev.parent, "%s() num %d\n", __func__, num);
+ for (i = 0; i < num; i++) {
+ i2c->msg = &msgs[i];
+ netup_i2c_start_xfer(i2c);
+ trans_done = 0;
+ while (!trans_done) {
+ spin_unlock_irqrestore(&i2c->lock, flags);
+ if (wait_event_timeout(i2c->wq,
+ i2c->state != STATE_WAIT,
+ msecs_to_jiffies(NETUP_I2C_TIMEOUT))) {
+ spin_lock_irqsave(&i2c->lock, flags);
+ switch (i2c->state) {
+ case STATE_WANT_READ:
+ netup_i2c_fifo_rx(i2c);
+ break;
+ case STATE_WANT_WRITE:
+ netup_i2c_fifo_tx(i2c);
+ break;
+ case STATE_DONE:
+ if ((i2c->msg->flags & I2C_M_RD) != 0 &&
+ i2c->xmit_size != i2c->msg->len)
+ netup_i2c_fifo_rx(i2c);
+ dev_dbg(i2c->adap.dev.parent,
+ "%s(): msg %d OK\n",
+ __func__, i);
+ trans_done = 1;
+ break;
+ case STATE_ERROR:
+ res = -EIO;
+ dev_dbg(i2c->adap.dev.parent,
+ "%s(): error state\n",
+ __func__);
+ goto done;
+ default:
+ dev_dbg(i2c->adap.dev.parent,
+ "%s(): invalid state %d\n",
+ __func__, i2c->state);
+ res = -EINVAL;
+ goto done;
+ }
+ if (!trans_done) {
+ i2c->state = STATE_WAIT;
+ reg = readw(
+ &i2c->regs->twi_ctrl0_stat);
+ writew(TWI_IRQEN | reg,
+ &i2c->regs->twi_ctrl0_stat);
+ }
+ spin_unlock_irqrestore(&i2c->lock, flags);
+ } else {
+ spin_lock_irqsave(&i2c->lock, flags);
+ dev_dbg(i2c->adap.dev.parent,
+ "%s(): wait timeout\n", __func__);
+ res = -ETIMEDOUT;
+ goto done;
+ }
+ spin_lock_irqsave(&i2c->lock, flags);
+ }
+ }
+done:
+ spin_unlock_irqrestore(&i2c->lock, flags);
+ dev_dbg(i2c->adap.dev.parent, "%s(): result %d\n", __func__, res);
+ return res;
+}
+
+static u32 netup_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm netup_i2c_algorithm = {
+ .master_xfer = netup_i2c_xfer,
+ .functionality = netup_i2c_func,
+};
+
+static struct i2c_adapter netup_i2c_adapter = {
+ .owner = THIS_MODULE,
+ .name = NETUP_UNIDVB_NAME,
+ .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .algo = &netup_i2c_algorithm,
+};
+
+static int netup_i2c_init(struct netup_unidvb_dev *ndev, int bus_num)
+{
+ int ret;
+ struct netup_i2c *i2c;
+
+ if (bus_num < 0 || bus_num > 1) {
+ dev_err(&ndev->pci_dev->dev,
+ "%s(): invalid bus_num %d\n", __func__, bus_num);
+ return -EINVAL;
+ }
+ i2c = &ndev->i2c[bus_num];
+ spin_lock_init(&i2c->lock);
+ init_waitqueue_head(&i2c->wq);
+ i2c->regs = (struct netup_i2c_regs *)(ndev->bmmio0 +
+ (bus_num == 0 ? NETUP_I2C_BUS0_ADDR : NETUP_I2C_BUS1_ADDR));
+ netup_i2c_reset(i2c);
+ i2c->adap = netup_i2c_adapter;
+ i2c->adap.dev.parent = &ndev->pci_dev->dev;
+ i2c_set_adapdata(&i2c->adap, i2c);
+ ret = i2c_add_adapter(&i2c->adap);
+ if (ret) {
+ dev_err(&ndev->pci_dev->dev,
+ "%s(): failed to add I2C adapter\n", __func__);
+ return ret;
+ }
+ dev_info(&ndev->pci_dev->dev,
+ "%s(): registered I2C bus %d at 0x%x\n",
+ __func__,
+ bus_num, (bus_num == 0 ?
+ NETUP_I2C_BUS0_ADDR :
+ NETUP_I2C_BUS1_ADDR));
+ return 0;
+}
+
+static void netup_i2c_remove(struct netup_unidvb_dev *ndev, int bus_num)
+{
+ struct netup_i2c *i2c;
+
+ if (bus_num < 0 || bus_num > 1) {
+ dev_err(&ndev->pci_dev->dev,
+ "%s(): invalid bus number %d\n", __func__, bus_num);
+ return;
+ }
+ i2c = &ndev->i2c[bus_num];
+ netup_i2c_reset(i2c);
+ /* remove adapter */
+ i2c_del_adapter(&i2c->adap);
+ dev_info(&ndev->pci_dev->dev,
+ "netup_i2c_remove: unregistered I2C bus %d\n", bus_num);
+}
+
+int netup_i2c_register(struct netup_unidvb_dev *ndev)
+{
+ int ret;
+
+ ret = netup_i2c_init(ndev, 0);
+ if (ret)
+ return ret;
+ ret = netup_i2c_init(ndev, 1);
+ if (ret) {
+ netup_i2c_remove(ndev, 0);
+ return ret;
+ }
+ return 0;
+}
+
+void netup_i2c_unregister(struct netup_unidvb_dev *ndev)
+{
+ netup_i2c_remove(ndev, 0);
+ netup_i2c_remove(ndev, 1);
+}
+
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
new file mode 100644
index 000000000000..f55b3276f28d
--- /dev/null
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
@@ -0,0 +1,252 @@
+/*
+ * netup_unidvb_spi.c
+ *
+ * Internal SPI driver for NetUP Universal Dual DVB-CI
+ *
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "netup_unidvb.h"
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include <linux/mtd/partitions.h>
+#include <mtd/mtd-abi.h>
+
+#define NETUP_SPI_CTRL_IRQ 0x1000
+#define NETUP_SPI_CTRL_IMASK 0x2000
+#define NETUP_SPI_CTRL_START 0x8000
+#define NETUP_SPI_CTRL_LAST_CS 0x4000
+
+#define NETUP_SPI_TIMEOUT 6000
+
+enum netup_spi_state {
+ SPI_STATE_START,
+ SPI_STATE_DONE,
+};
+
+struct netup_spi_regs {
+ __u8 data[1024];
+ __le16 control_stat;
+ __le16 clock_divider;
+} __packed __aligned(1);
+
+struct netup_spi {
+ struct device *dev;
+ struct spi_master *master;
+ struct netup_spi_regs *regs;
+ u8 __iomem *mmio;
+ spinlock_t lock;
+ wait_queue_head_t waitq;
+ enum netup_spi_state state;
+};
+
+static char netup_spi_name[64] = "fpga";
+
+static struct mtd_partition netup_spi_flash_partitions = {
+ .name = netup_spi_name,
+ .size = 0x1000000, /* 16MB */
+ .offset = 0,
+ .mask_flags = MTD_CAP_ROM
+};
+
+static struct flash_platform_data spi_flash_data = {
+ .name = "netup0_m25p128",
+ .parts = &netup_spi_flash_partitions,
+ .nr_parts = 1,
+};
+
+static struct spi_board_info netup_spi_board = {
+ .modalias = "m25p128",
+ .max_speed_hz = 11000000,
+ .chip_select = 0,
+ .mode = SPI_MODE_0,
+ .platform_data = &spi_flash_data,
+};
+
+irqreturn_t netup_spi_interrupt(struct netup_spi *spi)
+{
+ u16 reg;
+ unsigned long flags;
+
+ if (!spi) {
+ dev_dbg(&spi->master->dev,
+ "%s(): SPI not initialized\n", __func__);
+ return IRQ_NONE;
+ }
+ spin_lock_irqsave(&spi->lock, flags);
+ reg = readw(&spi->regs->control_stat);
+ if (!(reg & NETUP_SPI_CTRL_IRQ)) {
+ spin_unlock_irqrestore(&spi->lock, flags);
+ dev_dbg(&spi->master->dev,
+ "%s(): not mine interrupt\n", __func__);
+ return IRQ_NONE;
+ }
+ writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat);
+ reg = readw(&spi->regs->control_stat);
+ writew(reg & ~NETUP_SPI_CTRL_IMASK, &spi->regs->control_stat);
+ spi->state = SPI_STATE_DONE;
+ wake_up(&spi->waitq);
+ spin_unlock_irqrestore(&spi->lock, flags);
+ dev_dbg(&spi->master->dev,
+ "%s(): SPI interrupt handled\n", __func__);
+ return IRQ_HANDLED;
+}
+
+static int netup_spi_transfer(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct netup_spi *spi = spi_master_get_devdata(master);
+ struct spi_transfer *t;
+ int result = 0;
+ u32 tr_size;
+
+ /* reset CS */
+ writew(NETUP_SPI_CTRL_LAST_CS, &spi->regs->control_stat);
+ writew(0, &spi->regs->control_stat);
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+ tr_size = t->len;
+ while (tr_size) {
+ u32 frag_offset = t->len - tr_size;
+ u32 frag_size = (tr_size > sizeof(spi->regs->data)) ?
+ sizeof(spi->regs->data) : tr_size;
+ int frag_last = 0;
+
+ if (list_is_last(&t->transfer_list,
+ &msg->transfers) &&
+ frag_offset + frag_size == t->len) {
+ frag_last = 1;
+ }
+ if (t->tx_buf) {
+ memcpy_toio(spi->regs->data,
+ t->tx_buf + frag_offset,
+ frag_size);
+ } else {
+ memset_io(spi->regs->data,
+ 0, frag_size);
+ }
+ spi->state = SPI_STATE_START;
+ writew((frag_size & 0x3ff) |
+ NETUP_SPI_CTRL_IMASK |
+ NETUP_SPI_CTRL_START |
+ (frag_last ? NETUP_SPI_CTRL_LAST_CS : 0),
+ &spi->regs->control_stat);
+ dev_dbg(&spi->master->dev,
+ "%s(): control_stat 0x%04x\n",
+ __func__, readw(&spi->regs->control_stat));
+ wait_event_timeout(spi->waitq,
+ spi->state != SPI_STATE_START,
+ msecs_to_jiffies(NETUP_SPI_TIMEOUT));
+ if (spi->state == SPI_STATE_DONE) {
+ if (t->rx_buf) {
+ memcpy_fromio(t->rx_buf + frag_offset,
+ spi->regs->data, frag_size);
+ }
+ } else {
+ if (spi->state == SPI_STATE_START) {
+ dev_dbg(&spi->master->dev,
+ "%s(): transfer timeout\n",
+ __func__);
+ } else {
+ dev_dbg(&spi->master->dev,
+ "%s(): invalid state %d\n",
+ __func__, spi->state);
+ }
+ result = -EIO;
+ goto done;
+ }
+ tr_size -= frag_size;
+ msg->actual_length += frag_size;
+ }
+ }
+done:
+ msg->status = result;
+ spi_finalize_current_message(master);
+ return result;
+}
+
+static int netup_spi_setup(struct spi_device *spi)
+{
+ return 0;
+}
+
+int netup_spi_init(struct netup_unidvb_dev *ndev)
+{
+ struct spi_master *master;
+ struct netup_spi *nspi;
+
+ master = spi_alloc_master(&ndev->pci_dev->dev,
+ sizeof(struct netup_spi));
+ if (!master) {
+ dev_err(&ndev->pci_dev->dev,
+ "%s(): unable to alloc SPI master\n", __func__);
+ return -EINVAL;
+ }
+ nspi = spi_master_get_devdata(master);
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ master->bus_num = -1;
+ master->num_chipselect = 1;
+ master->transfer_one_message = netup_spi_transfer;
+ master->setup = netup_spi_setup;
+ spin_lock_init(&nspi->lock);
+ init_waitqueue_head(&nspi->waitq);
+ nspi->master = master;
+ nspi->regs = (struct netup_spi_regs *)(ndev->bmmio0 + 0x4000);
+ writew(2, &nspi->regs->clock_divider);
+ writew(NETUP_UNIDVB_IRQ_SPI, ndev->bmmio0 + REG_IMASK_SET);
+ ndev->spi = nspi;
+ if (spi_register_master(master)) {
+ ndev->spi = NULL;
+ dev_err(&ndev->pci_dev->dev,
+ "%s(): unable to register SPI bus\n", __func__);
+ return -EINVAL;
+ }
+ snprintf(netup_spi_name,
+ sizeof(netup_spi_name),
+ "fpga_%02x:%02x.%01x",
+ ndev->pci_bus,
+ ndev->pci_slot,
+ ndev->pci_func);
+ if (!spi_new_device(master, &netup_spi_board)) {
+ ndev->spi = NULL;
+ dev_err(&ndev->pci_dev->dev,
+ "%s(): unable to create SPI device\n", __func__);
+ return -EINVAL;
+ }
+ dev_dbg(&ndev->pci_dev->dev, "%s(): SPI init OK\n", __func__);
+ return 0;
+}
+
+void netup_spi_release(struct netup_unidvb_dev *ndev)
+{
+ u16 reg;
+ unsigned long flags;
+ struct netup_spi *spi = ndev->spi;
+
+ if (!spi) {
+ dev_dbg(&spi->master->dev,
+ "%s(): SPI not initialized\n", __func__);
+ return;
+ }
+ spin_lock_irqsave(&spi->lock, flags);
+ reg = readw(&spi->regs->control_stat);
+ writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat);
+ reg = readw(&spi->regs->control_stat);
+ writew(reg & ~NETUP_SPI_CTRL_IMASK, &spi->regs->control_stat);
+ spin_unlock_irqrestore(&spi->lock, flags);
+ spi_unregister_master(spi->master);
+ ndev->spi = NULL;
+}
+
+
diff --git a/drivers/media/pci/smipcie/Kconfig b/drivers/media/pci/smipcie/Kconfig
index 21a1583dbd8f..c11c772830c9 100644
--- a/drivers/media/pci/smipcie/Kconfig
+++ b/drivers/media/pci/smipcie/Kconfig
@@ -7,6 +7,7 @@ config DVB_SMIPCIE
select DVB_TS2020 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_M88RS6000T if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
+ depends on RC_CORE
help
Support for cards with SMI PCIe bridge:
- DVBSky S950 V3
diff --git a/drivers/media/pci/smipcie/Makefile b/drivers/media/pci/smipcie/Makefile
index be55481a6e95..013bc3fe4294 100644
--- a/drivers/media/pci/smipcie/Makefile
+++ b/drivers/media/pci/smipcie/Makefile
@@ -1,3 +1,6 @@
+
+smipcie-objs := smipcie-main.o smipcie-ir.o
+
obj-$(CONFIG_DVB_SMIPCIE) += smipcie.o
ccflags-y += -Idrivers/media/tuners
diff --git a/drivers/media/pci/smipcie/smipcie-ir.c b/drivers/media/pci/smipcie/smipcie-ir.c
new file mode 100644
index 000000000000..d018673c71f6
--- /dev/null
+++ b/drivers/media/pci/smipcie/smipcie-ir.c
@@ -0,0 +1,232 @@
+/*
+ * SMI PCIe driver for DVBSky cards.
+ *
+ * Copyright (C) 2014 Max nibble <nibble.max@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "smipcie.h"
+
+static void smi_ir_enableInterrupt(struct smi_rc *ir)
+{
+ struct smi_dev *dev = ir->dev;
+
+ smi_write(MSI_INT_ENA_SET, IR_X_INT);
+}
+
+static void smi_ir_disableInterrupt(struct smi_rc *ir)
+{
+ struct smi_dev *dev = ir->dev;
+
+ smi_write(MSI_INT_ENA_CLR, IR_X_INT);
+}
+
+static void smi_ir_clearInterrupt(struct smi_rc *ir)
+{
+ struct smi_dev *dev = ir->dev;
+
+ smi_write(MSI_INT_STATUS_CLR, IR_X_INT);
+}
+
+static void smi_ir_stop(struct smi_rc *ir)
+{
+ struct smi_dev *dev = ir->dev;
+
+ smi_ir_disableInterrupt(ir);
+ smi_clear(IR_Init_Reg, 0x80);
+}
+
+#define BITS_PER_COMMAND 14
+#define GROUPS_PER_BIT 2
+#define IR_RC5_MIN_BIT 36
+#define IR_RC5_MAX_BIT 52
+static u32 smi_decode_rc5(u8 *pData, u8 size)
+{
+ u8 index, current_bit, bit_count;
+ u8 group_array[BITS_PER_COMMAND * GROUPS_PER_BIT + 4];
+ u8 group_index = 0;
+ u32 command = 0xFFFFFFFF;
+
+ group_array[group_index++] = 1;
+
+ for (index = 0; index < size; index++) {
+
+ current_bit = (pData[index] & 0x80) ? 1 : 0;
+ bit_count = pData[index] & 0x7f;
+
+ if ((current_bit == 1) && (bit_count >= 2*IR_RC5_MAX_BIT + 1)) {
+ goto process_code;
+ } else if ((bit_count >= IR_RC5_MIN_BIT) &&
+ (bit_count <= IR_RC5_MAX_BIT)) {
+ group_array[group_index++] = current_bit;
+ } else if ((bit_count > IR_RC5_MAX_BIT) &&
+ (bit_count <= 2*IR_RC5_MAX_BIT)) {
+ group_array[group_index++] = current_bit;
+ group_array[group_index++] = current_bit;
+ } else {
+ goto invalid_timing;
+ }
+ if (group_index >= BITS_PER_COMMAND*GROUPS_PER_BIT)
+ goto process_code;
+
+ if ((group_index == BITS_PER_COMMAND*GROUPS_PER_BIT - 1)
+ && (group_array[group_index-1] == 0)) {
+ group_array[group_index++] = 1;
+ goto process_code;
+ }
+ }
+
+process_code:
+ if (group_index == (BITS_PER_COMMAND*GROUPS_PER_BIT-1))
+ group_array[group_index++] = 1;
+
+ if (group_index == BITS_PER_COMMAND*GROUPS_PER_BIT) {
+ command = 0;
+ for (index = 0; index < (BITS_PER_COMMAND*GROUPS_PER_BIT);
+ index = index + 2) {
+ if ((group_array[index] == 1) &&
+ (group_array[index+1] == 0)) {
+ command |= (1 << (BITS_PER_COMMAND -
+ (index/2) - 1));
+ } else if ((group_array[index] == 0) &&
+ (group_array[index+1] == 1)) {
+ /* */
+ } else {
+ command = 0xFFFFFFFF;
+ goto invalid_timing;
+ }
+ }
+ }
+
+invalid_timing:
+ return command;
+}
+
+static void smi_ir_decode(struct work_struct *work)
+{
+ struct smi_rc *ir = container_of(work, struct smi_rc, work);
+ struct smi_dev *dev = ir->dev;
+ struct rc_dev *rc_dev = ir->rc_dev;
+ u32 dwIRControl, dwIRData, dwIRCode, scancode;
+ u8 index, ucIRCount, readLoop, rc5_command, rc5_system, toggle;
+
+ dwIRControl = smi_read(IR_Init_Reg);
+ if (dwIRControl & rbIRVld) {
+ ucIRCount = (u8) smi_read(IR_Data_Cnt);
+
+ if (ucIRCount < 4)
+ goto end_ir_decode;
+
+ readLoop = ucIRCount/4;
+ if (ucIRCount % 4)
+ readLoop += 1;
+ for (index = 0; index < readLoop; index++) {
+ dwIRData = smi_read(IR_DATA_BUFFER_BASE + (index*4));
+
+ ir->irData[index*4 + 0] = (u8)(dwIRData);
+ ir->irData[index*4 + 1] = (u8)(dwIRData >> 8);
+ ir->irData[index*4 + 2] = (u8)(dwIRData >> 16);
+ ir->irData[index*4 + 3] = (u8)(dwIRData >> 24);
+ }
+ dwIRCode = smi_decode_rc5(ir->irData, ucIRCount);
+
+ if (dwIRCode != 0xFFFFFFFF) {
+ rc5_command = dwIRCode & 0x3F;
+ rc5_system = (dwIRCode & 0x7C0) >> 6;
+ toggle = (dwIRCode & 0x800) ? 1 : 0;
+ scancode = rc5_system << 8 | rc5_command;
+ rc_keydown(rc_dev, RC_TYPE_RC5, scancode, toggle);
+ }
+ }
+end_ir_decode:
+ smi_set(IR_Init_Reg, 0x04);
+ smi_ir_enableInterrupt(ir);
+}
+
+/* ir functions call by main driver.*/
+int smi_ir_irq(struct smi_rc *ir, u32 int_status)
+{
+ int handled = 0;
+
+ if (int_status & IR_X_INT) {
+ smi_ir_disableInterrupt(ir);
+ smi_ir_clearInterrupt(ir);
+ schedule_work(&ir->work);
+ handled = 1;
+ }
+ return handled;
+}
+
+void smi_ir_start(struct smi_rc *ir)
+{
+ struct smi_dev *dev = ir->dev;
+
+ smi_write(IR_Idle_Cnt_Low, 0x00140070);
+ msleep(20);
+ smi_set(IR_Init_Reg, 0x90);
+
+ smi_ir_enableInterrupt(ir);
+}
+
+int smi_ir_init(struct smi_dev *dev)
+{
+ int ret;
+ struct rc_dev *rc_dev;
+ struct smi_rc *ir = &dev->ir;
+
+ rc_dev = rc_allocate_device();
+ if (!rc_dev)
+ return -ENOMEM;
+
+ /* init input device */
+ snprintf(ir->input_name, sizeof(ir->input_name), "IR (%s)",
+ dev->info->name);
+ snprintf(ir->input_phys, sizeof(ir->input_phys), "pci-%s/ir0",
+ pci_name(dev->pci_dev));
+
+ rc_dev->driver_name = "SMI_PCIe";
+ rc_dev->input_phys = ir->input_phys;
+ rc_dev->input_name = ir->input_name;
+ rc_dev->input_id.bustype = BUS_PCI;
+ rc_dev->input_id.version = 1;
+ rc_dev->input_id.vendor = dev->pci_dev->subsystem_vendor;
+ rc_dev->input_id.product = dev->pci_dev->subsystem_device;
+ rc_dev->dev.parent = &dev->pci_dev->dev;
+
+ rc_dev->driver_type = RC_DRIVER_SCANCODE;
+ rc_dev->map_name = RC_MAP_DVBSKY;
+
+ ir->rc_dev = rc_dev;
+ ir->dev = dev;
+
+ INIT_WORK(&ir->work, smi_ir_decode);
+ smi_ir_disableInterrupt(ir);
+
+ ret = rc_register_device(rc_dev);
+ if (ret)
+ goto ir_err;
+
+ return 0;
+ir_err:
+ rc_free_device(rc_dev);
+ return ret;
+}
+
+void smi_ir_exit(struct smi_dev *dev)
+{
+ struct smi_rc *ir = &dev->ir;
+ struct rc_dev *rc_dev = ir->rc_dev;
+
+ smi_ir_stop(ir);
+ rc_unregister_device(rc_dev);
+ ir->rc_dev = NULL;
+}
diff --git a/drivers/media/pci/smipcie/smipcie.c b/drivers/media/pci/smipcie/smipcie-main.c
index 143fd7899ecd..b039a229b7d2 100644
--- a/drivers/media/pci/smipcie/smipcie.c
+++ b/drivers/media/pci/smipcie/smipcie-main.c
@@ -468,6 +468,7 @@ static irqreturn_t smi_irq_handler(int irq, void *dev_id)
struct smi_dev *dev = dev_id;
struct smi_port *port0 = &dev->ts_port[0];
struct smi_port *port1 = &dev->ts_port[1];
+ struct smi_rc *ir = &dev->ir;
int handled = 0;
u32 intr_status = smi_read(MSI_INT_STATUS);
@@ -480,6 +481,9 @@ static irqreturn_t smi_irq_handler(int irq, void *dev_id)
if (dev->info->ts_1)
handled += smi_port_irq(port1, intr_status);
+ /* ir interrupt.*/
+ handled += smi_ir_irq(ir, intr_status);
+
return IRQ_RETVAL(handled);
}
@@ -993,6 +997,10 @@ static int smi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_del_port0_attach;
}
+ ret = smi_ir_init(dev);
+ if (ret < 0)
+ goto err_del_port1_attach;
+
#ifdef CONFIG_PCI_MSI /* to do msi interrupt.???*/
if (pci_msi_enabled())
ret = pci_enable_msi(dev->pci_dev);
@@ -1003,10 +1011,13 @@ static int smi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = request_irq(dev->pci_dev->irq, smi_irq_handler,
IRQF_SHARED, "SMI_PCIE", dev);
if (ret < 0)
- goto err_del_port1_attach;
+ goto err_del_ir;
+ smi_ir_start(&dev->ir);
return 0;
+err_del_ir:
+ smi_ir_exit(dev);
err_del_port1_attach:
if (dev->info->ts_1)
smi_port_detach(&dev->ts_port[1]);
@@ -1039,6 +1050,7 @@ static void smi_remove(struct pci_dev *pdev)
if (dev->info->ts_0)
smi_port_detach(&dev->ts_port[0]);
+ smi_ir_exit(dev);
smi_i2c_exit(dev);
iounmap(dev->lmmio);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/media/pci/smipcie/smipcie.h b/drivers/media/pci/smipcie/smipcie.h
index 10cdf20f4839..68cdda28fd98 100644
--- a/drivers/media/pci/smipcie/smipcie.h
+++ b/drivers/media/pci/smipcie/smipcie.h
@@ -234,6 +234,17 @@ struct smi_cfg_info {
int fe_1;
};
+struct smi_rc {
+ struct smi_dev *dev;
+ struct rc_dev *rc_dev;
+ char input_phys[64];
+ char input_name[64];
+ struct work_struct work;
+ u8 irData[256];
+
+ int users;
+};
+
struct smi_port {
struct smi_dev *dev;
int idx;
@@ -284,6 +295,9 @@ struct smi_dev {
/* i2c */
struct i2c_adapter i2c_bus[2];
struct i2c_algo_bit_data i2c_bit[2];
+
+ /* ir */
+ struct smi_rc ir;
};
#define smi_read(reg) readl(dev->lmmio + ((reg)>>2))
@@ -296,4 +310,9 @@ struct smi_dev {
#define smi_set(reg, bit) smi_andor((reg), (bit), (bit))
#define smi_clear(reg, bit) smi_andor((reg), (bit), 0)
+int smi_ir_irq(struct smi_rc *ir, u32 int_status);
+void smi_ir_start(struct smi_rc *ir);
+void smi_ir_exit(struct smi_dev *dev);
+int smi_ir_init(struct smi_dev *dev);
+
#endif /* #ifndef _SMI_PCIE_H_ */
diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
index 570d119ea18b..f50d07229236 100644
--- a/drivers/media/pci/solo6x10/solo6x10-core.c
+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
@@ -134,23 +134,11 @@ static irqreturn_t solo_isr(int irq, void *data)
static void free_solo_dev(struct solo_dev *solo_dev)
{
- struct pci_dev *pdev;
-
- if (!solo_dev)
- return;
+ struct pci_dev *pdev = solo_dev->pdev;
if (solo_dev->dev.parent)
device_unregister(&solo_dev->dev);
- pdev = solo_dev->pdev;
-
- /* If we never initialized the PCI device, then nothing else
- * below here needs cleanup */
- if (!pdev) {
- kfree(solo_dev);
- return;
- }
-
if (solo_dev->reg_base) {
/* Bring down the sub-devices first */
solo_g723_exit(solo_dev);
@@ -164,9 +152,8 @@ static void free_solo_dev(struct solo_dev *solo_dev)
/* Now cleanup the PCI device */
solo_irq_off(solo_dev, ~0);
+ free_irq(pdev->irq, solo_dev);
pci_iounmap(pdev, solo_dev->reg_base);
- if (pdev->irq)
- free_irq(pdev->irq, solo_dev);
}
pci_release_regions(pdev);
@@ -483,7 +470,6 @@ static int solo_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
solo_dev->type = id->driver_data;
solo_dev->pdev = pdev;
- spin_lock_init(&solo_dev->reg_io_lock);
ret = v4l2_device_register(&pdev->dev, &solo_dev->v4l2_dev);
if (ret)
goto fail_probe;
diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
index 7ddc76709caa..4a37a1c51c48 100644
--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
@@ -48,10 +48,8 @@
/* The solo writes to 1k byte pages, 32 pages, in the dma. Each 1k page
* is broken down to 20 * 48 byte regions (one for each channel possible)
* with the rest of the page being dummy data. */
-#define G723_MAX_BUFFER (G723_PERIOD_BYTES * PERIODS_MAX)
+#define PERIODS G723_FDMA_PAGES
#define G723_INTR_ORDER 4 /* 0 - 4 */
-#define PERIODS_MIN (1 << G723_INTR_ORDER)
-#define PERIODS_MAX G723_FDMA_PAGES
struct solo_snd_pcm {
int on;
@@ -130,11 +128,11 @@ static const struct snd_pcm_hardware snd_solo_pcm_hw = {
.rate_max = SAMPLERATE,
.channels_min = 1,
.channels_max = 1,
- .buffer_bytes_max = G723_MAX_BUFFER,
+ .buffer_bytes_max = G723_PERIOD_BYTES * PERIODS,
.period_bytes_min = G723_PERIOD_BYTES,
.period_bytes_max = G723_PERIOD_BYTES,
- .periods_min = PERIODS_MIN,
- .periods_max = PERIODS_MAX,
+ .periods_min = PERIODS,
+ .periods_max = PERIODS,
};
static int snd_solo_pcm_open(struct snd_pcm_substream *ss)
@@ -340,7 +338,8 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
ret = snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
snd_dma_continuous_data(GFP_KERNEL),
- G723_MAX_BUFFER, G723_MAX_BUFFER);
+ G723_PERIOD_BYTES * PERIODS,
+ G723_PERIOD_BYTES * PERIODS);
if (ret < 0)
return ret;
diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
index 1ca54b08b3aa..27423d7f5410 100644
--- a/drivers/media/pci/solo6x10/solo6x10.h
+++ b/drivers/media/pci/solo6x10/solo6x10.h
@@ -199,7 +199,6 @@ struct solo_dev {
int nr_ext;
u32 irq_mask;
u32 motion_mask;
- spinlock_t reg_io_lock;
struct v4l2_device v4l2_dev;
/* tw28xx accounting */
@@ -281,36 +280,13 @@ struct solo_dev {
static inline u32 solo_reg_read(struct solo_dev *solo_dev, int reg)
{
- unsigned long flags;
- u32 ret;
- u16 val;
-
- spin_lock_irqsave(&solo_dev->reg_io_lock, flags);
-
- ret = readl(solo_dev->reg_base + reg);
- rmb();
- pci_read_config_word(solo_dev->pdev, PCI_STATUS, &val);
- rmb();
-
- spin_unlock_irqrestore(&solo_dev->reg_io_lock, flags);
-
- return ret;
+ return readl(solo_dev->reg_base + reg);
}
static inline void solo_reg_write(struct solo_dev *solo_dev, int reg,
u32 data)
{
- unsigned long flags;
- u16 val;
-
- spin_lock_irqsave(&solo_dev->reg_io_lock, flags);
-
writel(data, solo_dev->reg_base + reg);
- wmb();
- pci_read_config_word(solo_dev->pdev, PCI_STATUS, &val);
- rmb();
-
- spin_unlock_irqrestore(&solo_dev->reg_io_lock, flags);
}
static inline void solo_irq_on(struct solo_dev *dev, u32 mask)
diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c
index 54c9910256f8..3e469d4e0c87 100644
--- a/drivers/media/pci/ttpci/budget-av.c
+++ b/drivers/media/pci/ttpci/budget-av.c
@@ -1508,7 +1508,7 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
if (i2c_readregs(&budget_av->budget.i2c_adap, 0xa0, 0x30, mac, 6)) {
pr_err("KNC1-%d: Could not read MAC from KNC1 card\n",
budget_av->budget.dvb_adapter.num);
- memset(mac, 0, 6);
+ eth_zero_addr(mac);
} else {
pr_info("KNC1-%d: MAC addr = %pM\n",
budget_av->budget.dvb_adapter.num, mac);
diff --git a/drivers/media/pci/ttpci/ttpci-eeprom.c b/drivers/media/pci/ttpci/ttpci-eeprom.c
index 32d43156c548..079ee098b7e3 100644
--- a/drivers/media/pci/ttpci/ttpci-eeprom.c
+++ b/drivers/media/pci/ttpci/ttpci-eeprom.c
@@ -36,6 +36,7 @@
#include <linux/module.h>
#include <linux/string.h>
#include <linux/i2c.h>
+#include <linux/etherdevice.h>
#include "ttpci-eeprom.h"
@@ -145,7 +146,7 @@ int ttpci_eeprom_parse_mac(struct i2c_adapter *adapter, u8 *proposed_mac)
if (ret != 0) { /* Will only be -ENODEV */
dprintk("Couldn't read from EEPROM: not there?\n");
- memset(proposed_mac, 0, 6);
+ eth_zero_addr(proposed_mac);
return ret;
}
@@ -157,14 +158,12 @@ int ttpci_eeprom_parse_mac(struct i2c_adapter *adapter, u8 *proposed_mac)
dprintk( "%.2x:", encodedMAC[i]);
}
dprintk("%.2x\n", encodedMAC[19]);
- memset(proposed_mac, 0, 6);
+ eth_zero_addr(proposed_mac);
return ret;
}
memcpy(proposed_mac, decodedMAC, 6);
- dprintk("adapter has MAC addr = %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
- decodedMAC[0], decodedMAC[1], decodedMAC[2],
- decodedMAC[3], decodedMAC[4], decodedMAC[5]);
+ dprintk("adapter has MAC addr = %pM\n", decodedMAC);
return 0;
}
diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
index c135165a8b26..04706cc9b818 100644
--- a/drivers/media/pci/tw68/tw68-core.c
+++ b/drivers/media/pci/tw68/tw68-core.c
@@ -37,6 +37,7 @@
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/dma-mapping.h>
+#include <linux/pci_ids.h>
#include <linux/pm.h>
#include <media/v4l2-dev.h>
@@ -70,13 +71,13 @@ static atomic_t tw68_instance = ATOMIC_INIT(0);
* added under vendor 0x1797 (Techwell Inc.) as subsystem IDs.
*/
static const struct pci_device_id tw68_pci_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6800)},
- {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6801)},
- {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6804)},
- {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6816_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6816_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6816_3)},
- {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6816_4)},
+ {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_TECHWELL_6800)},
+ {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_TECHWELL_6801)},
+ {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_TECHWELL_6804)},
+ {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_TECHWELL_6816_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_TECHWELL_6816_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_TECHWELL_6816_3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_TECHWELL_6816_4)},
{0,}
};
@@ -263,15 +264,15 @@ static int tw68_initdev(struct pci_dev *pci_dev,
}
switch (pci_id->device) {
- case PCI_DEVICE_ID_6800: /* TW6800 */
+ case PCI_DEVICE_ID_TECHWELL_6800: /* TW6800 */
dev->vdecoder = TW6800;
dev->board_virqmask = TW68_VID_INTS;
break;
- case PCI_DEVICE_ID_6801: /* Video decoder for TW6802 */
+ case PCI_DEVICE_ID_TECHWELL_6801: /* Video decoder for TW6802 */
dev->vdecoder = TW6801;
dev->board_virqmask = TW68_VID_INTS | TW68_VID_INTSX;
break;
- case PCI_DEVICE_ID_6804: /* Video decoder for TW6804 */
+ case PCI_DEVICE_ID_TECHWELL_6804: /* Video decoder for TW6804 */
dev->vdecoder = TW6804;
dev->board_virqmask = TW68_VID_INTS | TW68_VID_INTSX;
break;
diff --git a/drivers/media/pci/tw68/tw68.h b/drivers/media/pci/tw68/tw68.h
index 93f2335e004b..ef51e4d48866 100644
--- a/drivers/media/pci/tw68/tw68.h
+++ b/drivers/media/pci/tw68/tw68.h
@@ -42,22 +42,6 @@
#define UNSET (-1U)
-/* system vendor and device ID's */
-#define PCI_VENDOR_ID_TECHWELL 0x1797
-#define PCI_DEVICE_ID_6800 0x6800
-#define PCI_DEVICE_ID_6801 0x6801
-#define PCI_DEVICE_ID_AUDIO2 0x6802
-#define PCI_DEVICE_ID_TS3 0x6803
-#define PCI_DEVICE_ID_6804 0x6804
-#define PCI_DEVICE_ID_AUDIO5 0x6805
-#define PCI_DEVICE_ID_TS6 0x6806
-
-/* tw6816 based cards */
-#define PCI_DEVICE_ID_6816_1 0x6810
-#define PCI_DEVICE_ID_6816_2 0x6811
-#define PCI_DEVICE_ID_6816_3 0x6812
-#define PCI_DEVICE_ID_6816_4 0x6813
-
#define TW68_NORMS ( \
V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM | \
V4L2_STD_PAL_M | V4L2_STD_PAL_Nc | V4L2_STD_PAL_60)
diff --git a/drivers/media/pci/zoran/zoran.h b/drivers/media/pci/zoran/zoran.h
index 5e040085c2ff..4e7db8939c2b 100644
--- a/drivers/media/pci/zoran/zoran.h
+++ b/drivers/media/pci/zoran/zoran.h
@@ -32,6 +32,8 @@
#define _BUZ_H_
#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
struct zoran_sync {
unsigned long frame; /* number of buffer that has been free'd */
@@ -216,6 +218,7 @@ struct zoran;
/* zoran_fh contains per-open() settings */
struct zoran_fh {
+ struct v4l2_fh fh;
struct zoran *zr;
enum zoran_map_mode map_mode; /* Flag which bufferset will map by next mmap() */
@@ -268,6 +271,7 @@ struct card_info {
struct zoran {
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler hdl;
struct video_device *video_dev;
struct i2c_adapter i2c_adapter; /* */
@@ -280,8 +284,7 @@ struct zoran {
struct videocodec *codec; /* video codec */
struct videocodec *vfe; /* video front end */
- struct mutex resource_lock; /* prevent evil stuff */
- struct mutex other_lock; /* please merge with above */
+ struct mutex lock; /* file ops serialize lock */
u8 initialized; /* flag if zoran has been correctly initialized */
int user; /* number of current users */
diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c
index cec5b7553f28..1136d92af642 100644
--- a/drivers/media/pci/zoran/zoran_card.c
+++ b/drivers/media/pci/zoran/zoran_card.c
@@ -1049,8 +1049,9 @@ static int zr36057_init (struct zoran *zr)
/*
* Now add the template and register the device unit.
*/
- memcpy(zr->video_dev, &zoran_template, sizeof(zoran_template));
+ *zr->video_dev = zoran_template;
zr->video_dev->v4l2_dev = &zr->v4l2_dev;
+ zr->video_dev->lock = &zr->lock;
strcpy(zr->video_dev->name, ZR_DEVNAME(zr));
/* It's not a mem2mem device, but you can both capture and output from
one and the same device. This should really be split up into two
@@ -1116,6 +1117,7 @@ static void zoran_remove(struct pci_dev *pdev)
pci_disable_device(zr->pci_dev);
video_unregister_device(zr->video_dev);
exit_free:
+ v4l2_ctrl_handler_free(&zr->hdl);
v4l2_device_unregister(&zr->v4l2_dev);
kfree(zr);
}
@@ -1219,9 +1221,11 @@ static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
zr->pci_dev = pdev;
zr->id = nr;
snprintf(ZR_DEVNAME(zr), sizeof(ZR_DEVNAME(zr)), "MJPEG[%u]", zr->id);
+ if (v4l2_ctrl_handler_init(&zr->hdl, 10))
+ goto zr_unreg;
+ zr->v4l2_dev.ctrl_handler = &zr->hdl;
spin_lock_init(&zr->spinlock);
- mutex_init(&zr->resource_lock);
- mutex_init(&zr->other_lock);
+ mutex_init(&zr->lock);
if (pci_enable_device(pdev))
goto zr_unreg;
zr->revision = zr->pci_dev->revision;
@@ -1443,6 +1447,7 @@ zr_free_irq:
zr_unmap:
iounmap(zr->zr36057_mem);
zr_unreg:
+ v4l2_ctrl_handler_free(&zr->hdl);
v4l2_device_unregister(&zr->v4l2_dev);
zr_free_mem:
kfree(zr);
diff --git a/drivers/media/pci/zoran/zoran_device.c b/drivers/media/pci/zoran/zoran_device.c
index 40119b3c52c1..4d47ddac97dc 100644
--- a/drivers/media/pci/zoran/zoran_device.c
+++ b/drivers/media/pci/zoran/zoran_device.c
@@ -31,6 +31,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
+#include <linux/ktime.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
@@ -181,20 +182,11 @@ dump_guests (struct zoran *zr)
}
}
-static inline unsigned long
-get_time (void)
-{
- struct timeval tv;
-
- do_gettimeofday(&tv);
- return (1000000 * tv.tv_sec + tv.tv_usec);
-}
-
void
detect_guest_activity (struct zoran *zr)
{
int timeout, i, j, res, guest[8], guest0[8], change[8][3];
- unsigned long t0, t1;
+ ktime_t t0, t1;
dump_guests(zr);
printk(KERN_INFO "%s: Detecting guests activity, please wait...\n",
@@ -205,15 +197,15 @@ detect_guest_activity (struct zoran *zr)
timeout = 0;
j = 0;
- t0 = get_time();
+ t0 = ktime_get();
while (timeout < 10000) {
udelay(10);
timeout++;
for (i = 1; (i < 8) && (j < 8); i++) {
res = post_office_read(zr, i, 0);
if (res != guest[i]) {
- t1 = get_time();
- change[j][0] = (t1 - t0);
+ t1 = ktime_get();
+ change[j][0] = ktime_to_us(ktime_sub(t1, t0));
t0 = t1;
change[j][1] = i;
change[j][2] = res;
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index 2b25d31c46f6..80caa70c6360 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -61,6 +61,7 @@
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
#include "videocodec.h"
#include <asm/byteorder.h>
@@ -592,10 +593,14 @@ static int v4l_sync(struct zoran_fh *fh, int frame)
return -EPROTO;
}
+ mutex_unlock(&zr->lock);
/* wait on this buffer to get ready */
if (!wait_event_interruptible_timeout(zr->v4l_capq,
- (zr->v4l_buffers.buffer[frame].state != BUZ_STATE_PEND), 10*HZ))
+ (zr->v4l_buffers.buffer[frame].state != BUZ_STATE_PEND), 10*HZ)) {
+ mutex_lock(&zr->lock);
return -ETIME;
+ }
+ mutex_lock(&zr->lock);
if (signal_pending(current))
return -ERESTARTSYS;
@@ -783,6 +788,7 @@ static int jpg_sync(struct zoran_fh *fh, struct zoran_sync *bs)
ZR_DEVNAME(zr), __func__);
return -EINVAL;
}
+ mutex_unlock(&zr->lock);
if (!wait_event_interruptible_timeout(zr->jpg_capq,
(zr->jpg_que_tail != zr->jpg_dma_tail ||
zr->jpg_dma_tail == zr->jpg_dma_head),
@@ -793,6 +799,7 @@ static int jpg_sync(struct zoran_fh *fh, struct zoran_sync *bs)
udelay(1);
zr->codec->control(zr->codec, CODEC_G_STATUS,
sizeof(isr), &isr);
+ mutex_lock(&zr->lock);
dprintk(1,
KERN_ERR
"%s: %s - timeout: codec isr=0x%02x\n",
@@ -801,6 +808,7 @@ static int jpg_sync(struct zoran_fh *fh, struct zoran_sync *bs)
return -ETIME;
}
+ mutex_lock(&zr->lock);
if (signal_pending(current))
return -ERESTARTSYS;
@@ -911,7 +919,7 @@ static int zoran_open(struct file *file)
dprintk(2, KERN_INFO "%s: %s(%s, pid=[%d]), users(-)=%d\n",
ZR_DEVNAME(zr), __func__, current->comm, task_pid_nr(current), zr->user + 1);
- mutex_lock(&zr->other_lock);
+ mutex_lock(&zr->lock);
if (zr->user >= 2048) {
dprintk(1, KERN_ERR "%s: too many users (%d) on device\n",
@@ -930,6 +938,8 @@ static int zoran_open(struct file *file)
res = -ENOMEM;
goto fail_unlock;
}
+ v4l2_fh_init(&fh->fh, video_devdata(file));
+
/* used to be BUZ_MAX_WIDTH/HEIGHT, but that gives overflows
* on norm-change! */
fh->overlay_mask =
@@ -946,8 +956,6 @@ static int zoran_open(struct file *file)
if (zr->user++ == 0)
first_open = 1;
- /*mutex_unlock(&zr->resource_lock);*/
-
/* default setup - TODO: look at flags */
if (first_open) { /* First device open */
zr36057_restart(zr);
@@ -961,14 +969,15 @@ static int zoran_open(struct file *file)
file->private_data = fh;
fh->zr = zr;
zoran_open_init_session(fh);
- mutex_unlock(&zr->other_lock);
+ v4l2_fh_add(&fh->fh);
+ mutex_unlock(&zr->lock);
return 0;
fail_fh:
kfree(fh);
fail_unlock:
- mutex_unlock(&zr->other_lock);
+ mutex_unlock(&zr->lock);
dprintk(2, KERN_INFO "%s: open failed (%d), users(-)=%d\n",
ZR_DEVNAME(zr), res, zr->user);
@@ -987,7 +996,7 @@ zoran_close(struct file *file)
/* kernel locks (fs/device.c), so don't do that ourselves
* (prevents deadlocks) */
- mutex_lock(&zr->other_lock);
+ mutex_lock(&zr->lock);
zoran_close_end_session(fh);
@@ -1021,9 +1030,10 @@ zoran_close(struct file *file)
encoder_call(zr, video, s_routing, 2, 0, 0);
}
}
- mutex_unlock(&zr->other_lock);
+ mutex_unlock(&zr->lock);
- file->private_data = NULL;
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
kfree(fh->overlay_mask);
kfree(fh);
@@ -1032,29 +1042,6 @@ zoran_close(struct file *file)
return 0;
}
-
-static ssize_t
-zoran_read (struct file *file,
- char __user *data,
- size_t count,
- loff_t *ppos)
-{
- /* we simply don't support read() (yet)... */
-
- return -EINVAL;
-}
-
-static ssize_t
-zoran_write (struct file *file,
- const char __user *data,
- size_t count,
- loff_t *ppos)
-{
- /* ...and the same goes for write() */
-
- return -EINVAL;
-}
-
static int setup_fbuffer(struct zoran_fh *fh,
void *base,
const struct zoran_format *fmt,
@@ -1523,7 +1510,6 @@ static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
- memset(cap, 0, sizeof(*cap));
strncpy(cap->card, ZR_DEVNAME(zr), sizeof(cap->card)-1);
strncpy(cap->driver, "zoran", sizeof(cap->driver)-1);
snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
@@ -1583,9 +1569,6 @@ static int zoran_g_fmt_vid_out(struct file *file, void *__fh,
struct v4l2_format *fmt)
{
struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
-
- mutex_lock(&zr->resource_lock);
fmt->fmt.pix.width = fh->jpg_settings.img_width / fh->jpg_settings.HorDcm;
fmt->fmt.pix.height = fh->jpg_settings.img_height * 2 /
@@ -1601,7 +1584,6 @@ static int zoran_g_fmt_vid_out(struct file *file, void *__fh,
fmt->fmt.pix.bytesperline = 0;
fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- mutex_unlock(&zr->resource_lock);
return 0;
}
@@ -1614,7 +1596,6 @@ static int zoran_g_fmt_vid_cap(struct file *file, void *__fh,
if (fh->map_mode != ZORAN_MAP_MODE_RAW)
return zoran_g_fmt_vid_out(file, fh, fmt);
- mutex_lock(&zr->resource_lock);
fmt->fmt.pix.width = fh->v4l_settings.width;
fmt->fmt.pix.height = fh->v4l_settings.height;
fmt->fmt.pix.sizeimage = fh->v4l_settings.bytesperline *
@@ -1626,7 +1607,6 @@ static int zoran_g_fmt_vid_cap(struct file *file, void *__fh,
fmt->fmt.pix.field = V4L2_FIELD_INTERLACED;
else
fmt->fmt.pix.field = V4L2_FIELD_TOP;
- mutex_unlock(&zr->resource_lock);
return 0;
}
@@ -1636,8 +1616,6 @@ static int zoran_g_fmt_vid_overlay(struct file *file, void *__fh,
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
- mutex_lock(&zr->resource_lock);
-
fmt->fmt.win.w.left = fh->overlay_settings.x;
fmt->fmt.win.w.top = fh->overlay_settings.y;
fmt->fmt.win.w.width = fh->overlay_settings.width;
@@ -1647,7 +1625,6 @@ static int zoran_g_fmt_vid_overlay(struct file *file, void *__fh,
else
fmt->fmt.win.field = V4L2_FIELD_TOP;
- mutex_unlock(&zr->resource_lock);
return 0;
}
@@ -1657,8 +1634,6 @@ static int zoran_try_fmt_vid_overlay(struct file *file, void *__fh,
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
- mutex_lock(&zr->resource_lock);
-
if (fmt->fmt.win.w.width > BUZ_MAX_WIDTH)
fmt->fmt.win.w.width = BUZ_MAX_WIDTH;
if (fmt->fmt.win.w.width < BUZ_MIN_WIDTH)
@@ -1668,7 +1643,6 @@ static int zoran_try_fmt_vid_overlay(struct file *file, void *__fh,
if (fmt->fmt.win.w.height < BUZ_MIN_HEIGHT)
fmt->fmt.win.w.height = BUZ_MIN_HEIGHT;
- mutex_unlock(&zr->resource_lock);
return 0;
}
@@ -1683,7 +1657,6 @@ static int zoran_try_fmt_vid_out(struct file *file, void *__fh,
if (fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG)
return -EINVAL;
- mutex_lock(&zr->resource_lock);
settings = fh->jpg_settings;
/* we actually need to set 'real' parameters now */
@@ -1718,7 +1691,7 @@ static int zoran_try_fmt_vid_out(struct file *file, void *__fh,
/* check */
res = zoran_check_jpg_settings(zr, &settings, 1);
if (res)
- goto tryfmt_unlock_and_return;
+ return res;
/* tell the user what we actually did */
fmt->fmt.pix.width = settings.img_width / settings.HorDcm;
@@ -1734,8 +1707,6 @@ static int zoran_try_fmt_vid_out(struct file *file, void *__fh,
fmt->fmt.pix.sizeimage = zoran_v4l2_calc_bufsize(&settings);
fmt->fmt.pix.bytesperline = 0;
fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
-tryfmt_unlock_and_return:
- mutex_unlock(&zr->resource_lock);
return res;
}
@@ -1750,23 +1721,17 @@ static int zoran_try_fmt_vid_cap(struct file *file, void *__fh,
if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG)
return zoran_try_fmt_vid_out(file, fh, fmt);
- mutex_lock(&zr->resource_lock);
-
for (i = 0; i < NUM_FORMATS; i++)
if (zoran_formats[i].fourcc == fmt->fmt.pix.pixelformat)
break;
- if (i == NUM_FORMATS) {
- mutex_unlock(&zr->resource_lock);
+ if (i == NUM_FORMATS)
return -EINVAL;
- }
bpp = DIV_ROUND_UP(zoran_formats[i].depth, 8);
v4l_bound_align_image(
&fmt->fmt.pix.width, BUZ_MIN_WIDTH, BUZ_MAX_WIDTH, bpp == 2 ? 1 : 2,
&fmt->fmt.pix.height, BUZ_MIN_HEIGHT, BUZ_MAX_HEIGHT, 0, 0);
- mutex_unlock(&zr->resource_lock);
-
return 0;
}
@@ -1774,7 +1739,6 @@ static int zoran_s_fmt_vid_overlay(struct file *file, void *__fh,
struct v4l2_format *fmt)
{
struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
int res;
dprintk(3, "x=%d, y=%d, w=%d, h=%d, cnt=%d, map=0x%p\n",
@@ -1783,12 +1747,10 @@ static int zoran_s_fmt_vid_overlay(struct file *file, void *__fh,
fmt->fmt.win.w.height,
fmt->fmt.win.clipcount,
fmt->fmt.win.bitmap);
- mutex_lock(&zr->resource_lock);
res = setup_window(fh, fmt->fmt.win.w.left, fmt->fmt.win.w.top,
fmt->fmt.win.w.width, fmt->fmt.win.w.height,
(struct v4l2_clip __user *)fmt->fmt.win.clips,
fmt->fmt.win.clipcount, fmt->fmt.win.bitmap);
- mutex_unlock(&zr->resource_lock);
return res;
}
@@ -1808,13 +1770,11 @@ static int zoran_s_fmt_vid_out(struct file *file, void *__fh,
if (fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG)
return -EINVAL;
- mutex_lock(&zr->resource_lock);
-
if (fh->buffers.allocated) {
dprintk(1, KERN_ERR "%s: VIDIOC_S_FMT - cannot change capture mode\n",
ZR_DEVNAME(zr));
res = -EBUSY;
- goto sfmtjpg_unlock_and_return;
+ return res;
}
settings = fh->jpg_settings;
@@ -1851,7 +1811,7 @@ static int zoran_s_fmt_vid_out(struct file *file, void *__fh,
/* check */
res = zoran_check_jpg_settings(zr, &settings, 0);
if (res)
- goto sfmtjpg_unlock_and_return;
+ return res;
/* it's ok, so set them */
fh->jpg_settings = settings;
@@ -1872,9 +1832,6 @@ static int zoran_s_fmt_vid_out(struct file *file, void *__fh,
fmt->fmt.pix.bytesperline = 0;
fmt->fmt.pix.sizeimage = fh->buffers.buffer_size;
fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
-
-sfmtjpg_unlock_and_return:
- mutex_unlock(&zr->resource_lock);
return res;
}
@@ -1898,14 +1855,12 @@ static int zoran_s_fmt_vid_cap(struct file *file, void *__fh,
return -EINVAL;
}
- mutex_lock(&zr->resource_lock);
-
if ((fh->map_mode != ZORAN_MAP_MODE_RAW && fh->buffers.allocated) ||
fh->buffers.active != ZORAN_FREE) {
dprintk(1, KERN_ERR "%s: VIDIOC_S_FMT - cannot change capture mode\n",
ZR_DEVNAME(zr));
res = -EBUSY;
- goto sfmtv4l_unlock_and_return;
+ return res;
}
if (fmt->fmt.pix.height > BUZ_MAX_HEIGHT)
fmt->fmt.pix.height = BUZ_MAX_HEIGHT;
@@ -1917,7 +1872,7 @@ static int zoran_s_fmt_vid_cap(struct file *file, void *__fh,
res = zoran_v4l_set_format(fh, fmt->fmt.pix.width, fmt->fmt.pix.height,
&zoran_formats[i]);
if (res)
- goto sfmtv4l_unlock_and_return;
+ return res;
/* tell the user the results/missing stuff */
fmt->fmt.pix.bytesperline = fh->v4l_settings.bytesperline;
@@ -1927,9 +1882,6 @@ static int zoran_s_fmt_vid_cap(struct file *file, void *__fh,
fmt->fmt.pix.field = V4L2_FIELD_INTERLACED;
else
fmt->fmt.pix.field = V4L2_FIELD_TOP;
-
-sfmtv4l_unlock_and_return:
- mutex_unlock(&zr->resource_lock);
return res;
}
@@ -1940,14 +1892,12 @@ static int zoran_g_fbuf(struct file *file, void *__fh,
struct zoran *zr = fh->zr;
memset(fb, 0, sizeof(*fb));
- mutex_lock(&zr->resource_lock);
fb->base = zr->vbuf_base;
fb->fmt.width = zr->vbuf_width;
fb->fmt.height = zr->vbuf_height;
if (zr->overlay_settings.format)
fb->fmt.pixelformat = fh->overlay_settings.format->fourcc;
fb->fmt.bytesperline = zr->vbuf_bytesperline;
- mutex_unlock(&zr->resource_lock);
fb->fmt.colorspace = V4L2_COLORSPACE_SRGB;
fb->fmt.field = V4L2_FIELD_INTERLACED;
fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
@@ -1973,10 +1923,8 @@ static int zoran_s_fbuf(struct file *file, void *__fh,
return -EINVAL;
}
- mutex_lock(&zr->resource_lock);
res = setup_fbuffer(fh, fb->base, &zoran_formats[i], fb->fmt.width,
fb->fmt.height, fb->fmt.bytesperline);
- mutex_unlock(&zr->resource_lock);
return res;
}
@@ -1984,12 +1932,9 @@ static int zoran_s_fbuf(struct file *file, void *__fh,
static int zoran_overlay(struct file *file, void *__fh, unsigned int on)
{
struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
int res;
- mutex_lock(&zr->resource_lock);
res = setup_overlay(fh, on);
- mutex_unlock(&zr->resource_lock);
return res;
}
@@ -2013,14 +1958,13 @@ static int zoran_reqbufs(struct file *file, void *__fh, struct v4l2_requestbuffe
if (req->count == 0)
return zoran_streamoff(file, fh, req->type);
- mutex_lock(&zr->resource_lock);
if (fh->buffers.allocated) {
dprintk(2,
KERN_ERR
"%s: VIDIOC_REQBUFS - buffers already allocated\n",
ZR_DEVNAME(zr));
res = -EBUSY;
- goto v4l2reqbuf_unlock_and_return;
+ return res;
}
if (fh->map_mode == ZORAN_MAP_MODE_RAW &&
@@ -2037,7 +1981,7 @@ static int zoran_reqbufs(struct file *file, void *__fh, struct v4l2_requestbuffe
if (v4l_fbuffer_alloc(fh)) {
res = -ENOMEM;
- goto v4l2reqbuf_unlock_and_return;
+ return res;
}
} else if (fh->map_mode == ZORAN_MAP_MODE_JPG_REC ||
fh->map_mode == ZORAN_MAP_MODE_JPG_PLAY) {
@@ -2054,7 +1998,7 @@ static int zoran_reqbufs(struct file *file, void *__fh, struct v4l2_requestbuffe
if (jpg_fbuffer_alloc(fh)) {
res = -ENOMEM;
- goto v4l2reqbuf_unlock_and_return;
+ return res;
}
} else {
dprintk(1,
@@ -2062,23 +2006,17 @@ static int zoran_reqbufs(struct file *file, void *__fh, struct v4l2_requestbuffe
"%s: VIDIOC_REQBUFS - unknown type %d\n",
ZR_DEVNAME(zr), req->type);
res = -EINVAL;
- goto v4l2reqbuf_unlock_and_return;
+ return res;
}
-v4l2reqbuf_unlock_and_return:
- mutex_unlock(&zr->resource_lock);
-
return res;
}
static int zoran_querybuf(struct file *file, void *__fh, struct v4l2_buffer *buf)
{
struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
int res;
- mutex_lock(&zr->resource_lock);
res = zoran_v4l2_buffer_status(fh, buf, buf->index);
- mutex_unlock(&zr->resource_lock);
return res;
}
@@ -2089,8 +2027,6 @@ static int zoran_qbuf(struct file *file, void *__fh, struct v4l2_buffer *buf)
struct zoran *zr = fh->zr;
int res = 0, codec_mode, buf_type;
- mutex_lock(&zr->resource_lock);
-
switch (fh->map_mode) {
case ZORAN_MAP_MODE_RAW:
if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
@@ -2098,12 +2034,12 @@ static int zoran_qbuf(struct file *file, void *__fh, struct v4l2_buffer *buf)
"%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n",
ZR_DEVNAME(zr), buf->type, fh->map_mode);
res = -EINVAL;
- goto qbuf_unlock_and_return;
+ return res;
}
res = zoran_v4l_queue_frame(fh, buf->index);
if (res)
- goto qbuf_unlock_and_return;
+ return res;
if (!zr->v4l_memgrab_active && fh->buffers.active == ZORAN_LOCKED)
zr36057_set_memgrab(zr, 1);
break;
@@ -2123,12 +2059,12 @@ static int zoran_qbuf(struct file *file, void *__fh, struct v4l2_buffer *buf)
"%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n",
ZR_DEVNAME(zr), buf->type, fh->map_mode);
res = -EINVAL;
- goto qbuf_unlock_and_return;
+ return res;
}
res = zoran_jpg_queue_frame(fh, buf->index, codec_mode);
if (res != 0)
- goto qbuf_unlock_and_return;
+ return res;
if (zr->codec_mode == BUZ_MODE_IDLE &&
fh->buffers.active == ZORAN_LOCKED)
zr36057_enable_jpg(zr, codec_mode);
@@ -2142,9 +2078,6 @@ static int zoran_qbuf(struct file *file, void *__fh, struct v4l2_buffer *buf)
res = -EINVAL;
break;
}
-qbuf_unlock_and_return:
- mutex_unlock(&zr->resource_lock);
-
return res;
}
@@ -2154,8 +2087,6 @@ static int zoran_dqbuf(struct file *file, void *__fh, struct v4l2_buffer *buf)
struct zoran *zr = fh->zr;
int res = 0, buf_type, num = -1; /* compiler borks here (?) */
- mutex_lock(&zr->resource_lock);
-
switch (fh->map_mode) {
case ZORAN_MAP_MODE_RAW:
if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
@@ -2163,18 +2094,18 @@ static int zoran_dqbuf(struct file *file, void *__fh, struct v4l2_buffer *buf)
"%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n",
ZR_DEVNAME(zr), buf->type, fh->map_mode);
res = -EINVAL;
- goto dqbuf_unlock_and_return;
+ return res;
}
num = zr->v4l_pend[zr->v4l_sync_tail & V4L_MASK_FRAME];
if (file->f_flags & O_NONBLOCK &&
zr->v4l_buffers.buffer[num].state != BUZ_STATE_DONE) {
res = -EAGAIN;
- goto dqbuf_unlock_and_return;
+ return res;
}
res = v4l_sync(fh, num);
if (res)
- goto dqbuf_unlock_and_return;
+ return res;
zr->v4l_sync_tail++;
res = zoran_v4l2_buffer_status(fh, buf, num);
break;
@@ -2194,7 +2125,7 @@ static int zoran_dqbuf(struct file *file, void *__fh, struct v4l2_buffer *buf)
"%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n",
ZR_DEVNAME(zr), buf->type, fh->map_mode);
res = -EINVAL;
- goto dqbuf_unlock_and_return;
+ return res;
}
num = zr->jpg_pend[zr->jpg_que_tail & BUZ_MASK_FRAME];
@@ -2202,12 +2133,12 @@ static int zoran_dqbuf(struct file *file, void *__fh, struct v4l2_buffer *buf)
if (file->f_flags & O_NONBLOCK &&
zr->jpg_buffers.buffer[num].state != BUZ_STATE_DONE) {
res = -EAGAIN;
- goto dqbuf_unlock_and_return;
+ return res;
}
bs.frame = 0; /* suppress compiler warning */
res = jpg_sync(fh, &bs);
if (res)
- goto dqbuf_unlock_and_return;
+ return res;
res = zoran_v4l2_buffer_status(fh, buf, bs.frame);
break;
}
@@ -2219,9 +2150,6 @@ static int zoran_dqbuf(struct file *file, void *__fh, struct v4l2_buffer *buf)
res = -EINVAL;
break;
}
-dqbuf_unlock_and_return:
- mutex_unlock(&zr->resource_lock);
-
return res;
}
@@ -2231,14 +2159,12 @@ static int zoran_streamon(struct file *file, void *__fh, enum v4l2_buf_type type
struct zoran *zr = fh->zr;
int res = 0;
- mutex_lock(&zr->resource_lock);
-
switch (fh->map_mode) {
case ZORAN_MAP_MODE_RAW: /* raw capture */
if (zr->v4l_buffers.active != ZORAN_ACTIVE ||
fh->buffers.active != ZORAN_ACTIVE) {
res = -EBUSY;
- goto strmon_unlock_and_return;
+ return res;
}
zr->v4l_buffers.active = fh->buffers.active = ZORAN_LOCKED;
@@ -2257,7 +2183,7 @@ static int zoran_streamon(struct file *file, void *__fh, enum v4l2_buf_type type
if (zr->jpg_buffers.active != ZORAN_ACTIVE ||
fh->buffers.active != ZORAN_ACTIVE) {
res = -EBUSY;
- goto strmon_unlock_and_return;
+ return res;
}
zr->jpg_buffers.active = fh->buffers.active = ZORAN_LOCKED;
@@ -2276,9 +2202,6 @@ static int zoran_streamon(struct file *file, void *__fh, enum v4l2_buf_type type
res = -EINVAL;
break;
}
-strmon_unlock_and_return:
- mutex_unlock(&zr->resource_lock);
-
return res;
}
@@ -2289,17 +2212,15 @@ static int zoran_streamoff(struct file *file, void *__fh, enum v4l2_buf_type typ
int i, res = 0;
unsigned long flags;
- mutex_lock(&zr->resource_lock);
-
switch (fh->map_mode) {
case ZORAN_MAP_MODE_RAW: /* raw capture */
if (fh->buffers.active == ZORAN_FREE &&
zr->v4l_buffers.active != ZORAN_FREE) {
res = -EPERM; /* stay off other's settings! */
- goto strmoff_unlock_and_return;
+ return res;
}
if (zr->v4l_buffers.active == ZORAN_FREE)
- goto strmoff_unlock_and_return;
+ return res;
spin_lock_irqsave(&zr->spinlock, flags);
/* unload capture */
@@ -2327,17 +2248,17 @@ static int zoran_streamoff(struct file *file, void *__fh, enum v4l2_buf_type typ
if (fh->buffers.active == ZORAN_FREE &&
zr->jpg_buffers.active != ZORAN_FREE) {
res = -EPERM; /* stay off other's settings! */
- goto strmoff_unlock_and_return;
+ return res;
}
if (zr->jpg_buffers.active == ZORAN_FREE)
- goto strmoff_unlock_and_return;
+ return res;
res = jpg_qbuf(fh, -1,
(fh->map_mode == ZORAN_MAP_MODE_JPG_REC) ?
BUZ_MODE_MOTION_COMPRESS :
BUZ_MODE_MOTION_DECOMPRESS);
if (res)
- goto strmoff_unlock_and_return;
+ return res;
break;
default:
dprintk(1, KERN_ERR
@@ -2346,70 +2267,14 @@ static int zoran_streamoff(struct file *file, void *__fh, enum v4l2_buf_type typ
res = -EINVAL;
break;
}
-strmoff_unlock_and_return:
- mutex_unlock(&zr->resource_lock);
-
return res;
}
-
-static int zoran_queryctrl(struct file *file, void *__fh,
- struct v4l2_queryctrl *ctrl)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
-
- /* we only support hue/saturation/contrast/brightness */
- if (ctrl->id < V4L2_CID_BRIGHTNESS ||
- ctrl->id > V4L2_CID_HUE)
- return -EINVAL;
-
- decoder_call(zr, core, queryctrl, ctrl);
-
- return 0;
-}
-
-static int zoran_g_ctrl(struct file *file, void *__fh, struct v4l2_control *ctrl)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
-
- /* we only support hue/saturation/contrast/brightness */
- if (ctrl->id < V4L2_CID_BRIGHTNESS ||
- ctrl->id > V4L2_CID_HUE)
- return -EINVAL;
-
- mutex_lock(&zr->resource_lock);
- decoder_call(zr, core, g_ctrl, ctrl);
- mutex_unlock(&zr->resource_lock);
-
- return 0;
-}
-
-static int zoran_s_ctrl(struct file *file, void *__fh, struct v4l2_control *ctrl)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
-
- /* we only support hue/saturation/contrast/brightness */
- if (ctrl->id < V4L2_CID_BRIGHTNESS ||
- ctrl->id > V4L2_CID_HUE)
- return -EINVAL;
-
- mutex_lock(&zr->resource_lock);
- decoder_call(zr, core, s_ctrl, ctrl);
- mutex_unlock(&zr->resource_lock);
-
- return 0;
-}
-
static int zoran_g_std(struct file *file, void *__fh, v4l2_std_id *std)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
- mutex_lock(&zr->resource_lock);
*std = zr->norm;
- mutex_unlock(&zr->resource_lock);
return 0;
}
@@ -2419,14 +2284,11 @@ static int zoran_s_std(struct file *file, void *__fh, v4l2_std_id std)
struct zoran *zr = fh->zr;
int res = 0;
- mutex_lock(&zr->resource_lock);
res = zoran_set_norm(zr, std);
if (res)
- goto sstd_unlock_and_return;
+ return res;
res = wait_grab_pending(zr);
-sstd_unlock_and_return:
- mutex_unlock(&zr->resource_lock);
return res;
}
@@ -2445,9 +2307,7 @@ static int zoran_enum_input(struct file *file, void *__fh,
inp->std = V4L2_STD_ALL;
/* Get status of video decoder */
- mutex_lock(&zr->resource_lock);
decoder_call(zr, video, g_input_status, &inp->status);
- mutex_unlock(&zr->resource_lock);
return 0;
}
@@ -2456,9 +2316,7 @@ static int zoran_g_input(struct file *file, void *__fh, unsigned int *input)
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
- mutex_lock(&zr->resource_lock);
*input = zr->input;
- mutex_unlock(&zr->resource_lock);
return 0;
}
@@ -2469,15 +2327,12 @@ static int zoran_s_input(struct file *file, void *__fh, unsigned int input)
struct zoran *zr = fh->zr;
int res;
- mutex_lock(&zr->resource_lock);
res = zoran_set_input(zr, input);
if (res)
- goto sinput_unlock_and_return;
+ return res;
/* Make sure the changes come into effect */
res = wait_grab_pending(zr);
-sinput_unlock_and_return:
- mutex_unlock(&zr->resource_lock);
return res;
}
@@ -2520,8 +2375,6 @@ static int zoran_cropcap(struct file *file, void *__fh,
memset(cropcap, 0, sizeof(*cropcap));
cropcap->type = type;
- mutex_lock(&zr->resource_lock);
-
if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
(cropcap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
fh->map_mode == ZORAN_MAP_MODE_RAW)) {
@@ -2529,7 +2382,7 @@ static int zoran_cropcap(struct file *file, void *__fh,
"%s: VIDIOC_CROPCAP - subcapture only supported for compressed capture\n",
ZR_DEVNAME(zr));
res = -EINVAL;
- goto cropcap_unlock_and_return;
+ return res;
}
cropcap->bounds.top = cropcap->bounds.left = 0;
@@ -2538,8 +2391,6 @@ static int zoran_cropcap(struct file *file, void *__fh,
cropcap->defrect.top = cropcap->defrect.left = 0;
cropcap->defrect.width = BUZ_MIN_WIDTH;
cropcap->defrect.height = BUZ_MIN_HEIGHT;
-cropcap_unlock_and_return:
- mutex_unlock(&zr->resource_lock);
return res;
}
@@ -2552,8 +2403,6 @@ static int zoran_g_crop(struct file *file, void *__fh, struct v4l2_crop *crop)
memset(crop, 0, sizeof(*crop));
crop->type = type;
- mutex_lock(&zr->resource_lock);
-
if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
(crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
fh->map_mode == ZORAN_MAP_MODE_RAW)) {
@@ -2562,17 +2411,13 @@ static int zoran_g_crop(struct file *file, void *__fh, struct v4l2_crop *crop)
"%s: VIDIOC_G_CROP - subcapture only supported for compressed capture\n",
ZR_DEVNAME(zr));
res = -EINVAL;
- goto gcrop_unlock_and_return;
+ return res;
}
crop->c.top = fh->jpg_settings.img_y;
crop->c.left = fh->jpg_settings.img_x;
crop->c.width = fh->jpg_settings.img_width;
crop->c.height = fh->jpg_settings.img_height;
-
-gcrop_unlock_and_return:
- mutex_unlock(&zr->resource_lock);
-
return res;
}
@@ -2585,14 +2430,12 @@ static int zoran_s_crop(struct file *file, void *__fh, const struct v4l2_crop *c
settings = fh->jpg_settings;
- mutex_lock(&zr->resource_lock);
-
if (fh->buffers.allocated) {
dprintk(1, KERN_ERR
"%s: VIDIOC_S_CROP - cannot change settings while active\n",
ZR_DEVNAME(zr));
res = -EBUSY;
- goto scrop_unlock_and_return;
+ return res;
}
if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
@@ -2602,7 +2445,7 @@ static int zoran_s_crop(struct file *file, void *__fh, const struct v4l2_crop *c
"%s: VIDIOC_G_CROP - subcapture only supported for compressed capture\n",
ZR_DEVNAME(zr));
res = -EINVAL;
- goto scrop_unlock_and_return;
+ return res;
}
/* move into a form that we understand */
@@ -2614,13 +2457,10 @@ static int zoran_s_crop(struct file *file, void *__fh, const struct v4l2_crop *c
/* check validity */
res = zoran_check_jpg_settings(zr, &settings, 0);
if (res)
- goto scrop_unlock_and_return;
+ return res;
/* accept */
fh->jpg_settings = settings;
-
-scrop_unlock_and_return:
- mutex_unlock(&zr->resource_lock);
return res;
}
@@ -2628,11 +2468,8 @@ static int zoran_g_jpegcomp(struct file *file, void *__fh,
struct v4l2_jpegcompression *params)
{
struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
memset(params, 0, sizeof(*params));
- mutex_lock(&zr->resource_lock);
-
params->quality = fh->jpg_settings.jpg_comp.quality;
params->APPn = fh->jpg_settings.jpg_comp.APPn;
memcpy(params->APP_data,
@@ -2646,8 +2483,6 @@ static int zoran_g_jpegcomp(struct file *file, void *__fh,
params->jpeg_markers =
fh->jpg_settings.jpg_comp.jpeg_markers;
- mutex_unlock(&zr->resource_lock);
-
return 0;
}
@@ -2663,26 +2498,21 @@ static int zoran_s_jpegcomp(struct file *file, void *__fh,
settings.jpg_comp = *params;
- mutex_lock(&zr->resource_lock);
-
if (fh->buffers.active != ZORAN_FREE) {
dprintk(1, KERN_WARNING
"%s: VIDIOC_S_JPEGCOMP called while in playback/capture mode\n",
ZR_DEVNAME(zr));
res = -EBUSY;
- goto sjpegc_unlock_and_return;
+ return res;
}
res = zoran_check_jpg_settings(zr, &settings, 0);
if (res)
- goto sjpegc_unlock_and_return;
+ return res;
if (!fh->buffers.allocated)
fh->buffers.buffer_size =
zoran_v4l2_calc_bufsize(&fh->jpg_settings);
fh->jpg_settings.jpg_comp = settings.jpg_comp;
-sjpegc_unlock_and_return:
- mutex_unlock(&zr->resource_lock);
-
return res;
}
@@ -2692,7 +2522,8 @@ zoran_poll (struct file *file,
{
struct zoran_fh *fh = file->private_data;
struct zoran *zr = fh->zr;
- int res = 0, frame;
+ int res = v4l2_ctrl_poll(file, wait);
+ int frame;
unsigned long flags;
/* we should check whether buffers are ready to be synced on
@@ -2703,8 +2534,6 @@ zoran_poll (struct file *file,
* if no buffers queued or so, return POLLNVAL
*/
- mutex_lock(&zr->resource_lock);
-
switch (fh->map_mode) {
case ZORAN_MAP_MODE_RAW:
poll_wait(file, &zr->v4l_capq, wait);
@@ -2722,7 +2551,7 @@ zoran_poll (struct file *file,
if (fh->buffers.active != ZORAN_FREE &&
/* Buffer ready to DQBUF? */
zr->v4l_buffers.buffer[frame].state == BUZ_STATE_DONE)
- res = POLLIN | POLLRDNORM;
+ res |= POLLIN | POLLRDNORM;
spin_unlock_irqrestore(&zr->spinlock, flags);
break;
@@ -2743,9 +2572,9 @@ zoran_poll (struct file *file,
if (fh->buffers.active != ZORAN_FREE &&
zr->jpg_buffers.buffer[frame].state == BUZ_STATE_DONE) {
if (fh->map_mode == ZORAN_MAP_MODE_JPG_REC)
- res = POLLIN | POLLRDNORM;
+ res |= POLLIN | POLLRDNORM;
else
- res = POLLOUT | POLLWRNORM;
+ res |= POLLOUT | POLLWRNORM;
}
spin_unlock_irqrestore(&zr->spinlock, flags);
@@ -2756,11 +2585,9 @@ zoran_poll (struct file *file,
KERN_ERR
"%s: %s - internal error, unknown map_mode=%d\n",
ZR_DEVNAME(zr), __func__, fh->map_mode);
- res = POLLNVAL;
+ res |= POLLERR;
}
- mutex_unlock(&zr->resource_lock);
-
return res;
}
@@ -2792,9 +2619,6 @@ zoran_vm_close (struct vm_area_struct *vma)
struct zoran *zr = fh->zr;
int i;
- if (!atomic_dec_and_mutex_lock(&map->count, &zr->resource_lock))
- return;
-
dprintk(3, KERN_INFO "%s: %s - munmap(%s)\n", ZR_DEVNAME(zr),
__func__, mode_name(fh->map_mode));
@@ -2807,7 +2631,6 @@ zoran_vm_close (struct vm_area_struct *vma)
/* Any buffers still mapped? */
for (i = 0; i < fh->buffers.num_buffers; i++) {
if (fh->buffers.buffer[i].map) {
- mutex_unlock(&zr->resource_lock);
return;
}
}
@@ -2815,7 +2638,6 @@ zoran_vm_close (struct vm_area_struct *vma)
dprintk(3, KERN_INFO "%s: %s - free %s buffers\n", ZR_DEVNAME(zr),
__func__, mode_name(fh->map_mode));
-
if (fh->map_mode == ZORAN_MAP_MODE_RAW) {
if (fh->buffers.active != ZORAN_FREE) {
unsigned long flags;
@@ -2835,8 +2657,6 @@ zoran_vm_close (struct vm_area_struct *vma)
}
jpg_fbuffer_free(fh);
}
-
- mutex_unlock(&zr->resource_lock);
}
static const struct vm_operations_struct zoran_vm_ops = {
@@ -2872,15 +2692,13 @@ zoran_mmap (struct file *file,
return -EINVAL;
}
- mutex_lock(&zr->resource_lock);
-
if (!fh->buffers.allocated) {
dprintk(1,
KERN_ERR
"%s: %s(%s) - buffers not yet allocated\n",
ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode));
res = -ENOMEM;
- goto mmap_unlock_and_return;
+ return res;
}
first = offset / fh->buffers.buffer_size;
@@ -2896,7 +2714,7 @@ zoran_mmap (struct file *file,
fh->buffers.buffer_size,
fh->buffers.num_buffers);
res = -EINVAL;
- goto mmap_unlock_and_return;
+ return res;
}
/* Check if any buffers are already mapped */
@@ -2907,7 +2725,7 @@ zoran_mmap (struct file *file,
"%s: %s(%s) - buffer %d already mapped\n",
ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode), i);
res = -EBUSY;
- goto mmap_unlock_and_return;
+ return res;
}
}
@@ -2915,7 +2733,7 @@ zoran_mmap (struct file *file,
map = kmalloc(sizeof(struct zoran_mapping), GFP_KERNEL);
if (!map) {
res = -ENOMEM;
- goto mmap_unlock_and_return;
+ return res;
}
map->fh = fh;
atomic_set(&map->count, 1);
@@ -2937,7 +2755,7 @@ zoran_mmap (struct file *file,
"%s: %s(V4L) - remap_pfn_range failed\n",
ZR_DEVNAME(zr), __func__);
res = -EAGAIN;
- goto mmap_unlock_and_return;
+ return res;
}
size -= todo;
start += todo;
@@ -2969,7 +2787,7 @@ zoran_mmap (struct file *file,
"%s: %s(V4L) - remap_pfn_range failed\n",
ZR_DEVNAME(zr), __func__);
res = -EAGAIN;
- goto mmap_unlock_and_return;
+ return res;
}
size -= todo;
start += todo;
@@ -2985,10 +2803,6 @@ zoran_mmap (struct file *file,
}
}
-
-mmap_unlock_and_return:
- mutex_unlock(&zr->resource_lock);
-
return res;
}
@@ -3028,33 +2842,15 @@ static const struct v4l2_ioctl_ops zoran_ioctl_ops = {
.vidioc_try_fmt_vid_cap = zoran_try_fmt_vid_cap,
.vidioc_try_fmt_vid_out = zoran_try_fmt_vid_out,
.vidioc_try_fmt_vid_overlay = zoran_try_fmt_vid_overlay,
- .vidioc_queryctrl = zoran_queryctrl,
- .vidioc_s_ctrl = zoran_s_ctrl,
- .vidioc_g_ctrl = zoran_g_ctrl,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
-/* please use zr->resource_lock consistently and kill this wrapper */
-static long zoran_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct zoran_fh *fh = file->private_data;
- struct zoran *zr = fh->zr;
- int ret;
-
- mutex_lock(&zr->other_lock);
- ret = video_ioctl2(file, cmd, arg);
- mutex_unlock(&zr->other_lock);
-
- return ret;
-}
-
static const struct v4l2_file_operations zoran_fops = {
.owner = THIS_MODULE,
.open = zoran_open,
.release = zoran_close,
- .unlocked_ioctl = zoran_ioctl,
- .read = zoran_read,
- .write = zoran_write,
+ .unlocked_ioctl = video_ioctl2,
.mmap = zoran_mmap,
.poll = zoran_poll,
};
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index f6bed197130c..dc75694ac12d 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -1,6 +1,6 @@
#
# Platform drivers
-# All drivers here are currently for webcam support
+# Most drivers here are currently for webcam support
menuconfig V4L_PLATFORM_DRIVERS
bool "V4L platform devices"
@@ -86,7 +86,7 @@ config VIDEO_M32R_AR_M64278
config VIDEO_OMAP3
tristate "OMAP 3 Camera support"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3
- depends on HAS_DMA
+ depends on HAS_DMA && OF
depends on OMAP_IOMMU
select ARM_DMA_USE_IOMMU
select VIDEOBUF2_DMA_CONTIG
@@ -231,6 +231,18 @@ config VIDEO_SH_VEU
Support for the Video Engine Unit (VEU) on SuperH and
SH-Mobile SoCs.
+config VIDEO_RENESAS_JPU
+ tristate "Renesas JPEG Processing Unit"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ ---help---
+ This is a V4L2 driver for the Renesas JPEG Processing Unit.
+
+ To compile this driver as a module, choose M here: the module
+ will be called rcar_jpu.
+
config VIDEO_RENESAS_VSP1
tristate "Renesas VSP1 Video Processing Engine"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
@@ -280,3 +292,14 @@ config VIDEO_VIM2M
This is a virtual test device for the memory-to-memory driver
framework.
endif #V4L_TEST_DRIVERS
+
+menuconfig DVB_PLATFORM_DRIVERS
+ bool "DVB platform devices"
+ depends on MEDIA_DIGITAL_TV_SUPPORT
+ default n
+ ---help---
+ Say Y here to enable support for platform-specific Digital TV drivers.
+
+if DVB_PLATFORM_DRIVERS
+source "drivers/media/platform/sti/c8sectpfe/Kconfig"
+endif #DVB_PLATFORM_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 114f9aba1c00..efa0295af87b 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_VIDEO_SAMSUNG_S5P_G2D) += s5p-g2d/
obj-$(CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC) += exynos-gsc/
obj-$(CONFIG_VIDEO_STI_BDISP) += sti/bdisp/
+obj-$(CONFIG_DVB_C8SECTPFE) += sti/c8sectpfe/
obj-$(CONFIG_BLACKFIN) += blackfin/
@@ -44,6 +45,7 @@ obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o
obj-$(CONFIG_SOC_CAMERA) += soc_camera/
+obj-$(CONFIG_VIDEO_RENESAS_JPU) += rcar_jpu.o
obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1/
obj-y += omap/
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index 1fba339cddc1..c8447fa3fd91 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -1186,14 +1186,24 @@ static int vpfe_initialize_device(struct vpfe_device *vpfe)
static int vpfe_release(struct file *file)
{
struct vpfe_device *vpfe = video_drvdata(file);
+ bool fh_singular;
int ret;
mutex_lock(&vpfe->lock);
- if (v4l2_fh_is_singular_file(file))
- vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
+ /* Save the singular status before we call the clean-up helper */
+ fh_singular = v4l2_fh_is_singular_file(file);
+
+ /* the release helper will cleanup any on-going streaming */
ret = _vb2_fop_release(file, NULL);
+ /*
+ * If this was the last open file.
+ * Then de-initialize hw module.
+ */
+ if (fh_singular)
+ vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
+
mutex_unlock(&vpfe->lock);
return ret;
@@ -1565,7 +1575,7 @@ static int vpfe_s_fmt(struct file *file, void *priv,
return -EBUSY;
}
- ret = vpfe_try_fmt(file, priv, fmt);
+ ret = vpfe_try_fmt(file, priv, &format);
if (ret)
return ret;
diff --git a/drivers/media/platform/coda/Makefile b/drivers/media/platform/coda/Makefile
index 834e504bf085..9342ac57b230 100644
--- a/drivers/media/platform/coda/Makefile
+++ b/drivers/media/platform/coda/Makefile
@@ -1,5 +1,5 @@
ccflags-y += -I$(src)
-coda-objs := coda-common.o coda-bit.o coda-h264.o coda-jpeg.o
+coda-objs := coda-common.o coda-bit.o coda-gdi.o coda-h264.o coda-jpeg.o
obj-$(CONFIG_VIDEO_CODA) += coda.o
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 109797bb8fbb..fd7819d8922d 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -226,8 +226,12 @@ void coda_fill_bitstream(struct coda_ctx *ctx, bool streaming)
{
struct vb2_buffer *src_buf;
struct coda_buffer_meta *meta;
+ unsigned long flags;
u32 start;
+ if (ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG)
+ return;
+
while (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0) {
/*
* Only queue a single JPEG into the bitstream buffer, except
@@ -252,6 +256,13 @@ void coda_fill_bitstream(struct coda_ctx *ctx, bool streaming)
continue;
}
+ /* Dump empty buffers */
+ if (!vb2_get_plane_payload(src_buf, 0)) {
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ continue;
+ }
+
/* Buffer start position */
start = ctx->bitstream_fifo.kfifo.in &
ctx->bitstream_fifo.kfifo.mask;
@@ -271,8 +282,13 @@ void coda_fill_bitstream(struct coda_ctx *ctx, bool streaming)
meta->start = start;
meta->end = ctx->bitstream_fifo.kfifo.in &
ctx->bitstream_fifo.kfifo.mask;
+ spin_lock_irqsave(&ctx->buffer_meta_lock,
+ flags);
list_add_tail(&meta->list,
&ctx->buffer_meta_list);
+ ctx->num_metas++;
+ spin_unlock_irqrestore(&ctx->buffer_meta_lock,
+ flags);
trace_coda_bit_queue(ctx, src_buf, meta);
}
@@ -331,7 +347,6 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx,
{
struct coda_dev *dev = ctx->dev;
int width, height;
- dma_addr_t paddr;
int ysize;
int ret;
int i;
@@ -351,7 +366,10 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx,
size_t size;
char *name;
- size = ysize + ysize / 2;
+ if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
+ size = round_up(ysize, 4096) + ysize / 2;
+ else
+ size = ysize + ysize / 2;
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 &&
dev->devtype->product != CODA_DX6)
size += ysize / 4;
@@ -367,11 +385,23 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx,
/* Register frame buffers in the parameter buffer */
for (i = 0; i < ctx->num_internal_frames; i++) {
- paddr = ctx->internal_frames[i].paddr;
+ u32 y, cb, cr;
+
/* Start addresses of Y, Cb, Cr planes */
- coda_parabuf_write(ctx, i * 3 + 0, paddr);
- coda_parabuf_write(ctx, i * 3 + 1, paddr + ysize);
- coda_parabuf_write(ctx, i * 3 + 2, paddr + ysize + ysize / 4);
+ y = ctx->internal_frames[i].paddr;
+ cb = y + ysize;
+ cr = y + ysize + ysize/4;
+ if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP) {
+ cb = round_up(cb, 4096);
+ cr = 0;
+ /* Packed 20-bit MSB of base addresses */
+ /* YYYYYCCC, CCyyyyyc, cccc.... */
+ y = (y & 0xfffff000) | cb >> 20;
+ cb = (cb & 0x000ff000) << 12;
+ }
+ coda_parabuf_write(ctx, i * 3 + 0, y);
+ coda_parabuf_write(ctx, i * 3 + 1, cb);
+ coda_parabuf_write(ctx, i * 3 + 2, cr);
/* mvcol buffer for h.264 */
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 &&
@@ -384,7 +414,7 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx,
/* mvcol buffer for mpeg4 */
if ((dev->devtype->product != CODA_DX6) &&
(ctx->codec->src_fourcc == V4L2_PIX_FMT_MPEG4))
- coda_parabuf_write(ctx, 97, ctx->internal_frames[i].paddr +
+ coda_parabuf_write(ctx, 97, ctx->internal_frames[0].paddr +
ysize + ysize/4 + ysize/4);
return 0;
@@ -712,6 +742,32 @@ err_clk_per:
return ret;
}
+static void coda9_set_frame_cache(struct coda_ctx *ctx, u32 fourcc)
+{
+ u32 cache_size, cache_config;
+
+ if (ctx->tiled_map_type == GDI_LINEAR_FRAME_MAP) {
+ /* Luma 2x0 page, 2x6 cache, chroma 2x0 page, 2x4 cache size */
+ cache_size = 0x20262024;
+ cache_config = 2 << CODA9_CACHE_PAGEMERGE_OFFSET;
+ } else {
+ /* Luma 0x2 page, 4x4 cache, chroma 0x2 page, 4x3 cache size */
+ cache_size = 0x02440243;
+ cache_config = 1 << CODA9_CACHE_PAGEMERGE_OFFSET;
+ }
+ coda_write(ctx->dev, cache_size, CODA9_CMD_SET_FRAME_CACHE_SIZE);
+ if (fourcc == V4L2_PIX_FMT_NV12) {
+ cache_config |= 32 << CODA9_CACHE_LUMA_BUFFER_SIZE_OFFSET |
+ 16 << CODA9_CACHE_CR_BUFFER_SIZE_OFFSET |
+ 0 << CODA9_CACHE_CB_BUFFER_SIZE_OFFSET;
+ } else {
+ cache_config |= 32 << CODA9_CACHE_LUMA_BUFFER_SIZE_OFFSET |
+ 8 << CODA9_CACHE_CR_BUFFER_SIZE_OFFSET |
+ 8 << CODA9_CACHE_CB_BUFFER_SIZE_OFFSET;
+ }
+ coda_write(ctx->dev, cache_config, CODA9_CMD_SET_FRAME_CACHE_CONFIG);
+}
+
/*
* Encoder context operations
*/
@@ -789,9 +845,12 @@ static int coda_start_encoding(struct coda_ctx *ctx)
break;
}
- ctx->frame_mem_ctrl &= ~CODA_FRAME_CHROMA_INTERLEAVE;
+ ctx->frame_mem_ctrl &= ~(CODA_FRAME_CHROMA_INTERLEAVE | (0x3 << 9) |
+ CODA9_FRAME_TILED2LINEAR);
if (q_data_src->fourcc == V4L2_PIX_FMT_NV12)
ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE;
+ if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
+ ctx->frame_mem_ctrl |= (0x3 << 9) | CODA9_FRAME_TILED2LINEAR;
coda_write(dev, ctx->frame_mem_ctrl, CODA_REG_BIT_FRAME_MEM_CTRL);
if (dev->devtype->product == CODA_DX6) {
@@ -913,6 +972,9 @@ static int coda_start_encoding(struct coda_ctx *ctx)
value = (ctx->params.bitrate & CODA_RATECONTROL_BITRATE_MASK)
<< CODA_RATECONTROL_BITRATE_OFFSET;
value |= 1 & CODA_RATECONTROL_ENABLE_MASK;
+ value |= (ctx->params.vbv_delay &
+ CODA_RATECONTROL_INITIALDELAY_MASK)
+ << CODA_RATECONTROL_INITIALDELAY_OFFSET;
if (dev->devtype->product == CODA_960)
value |= BIT(31); /* disable autoskip */
} else {
@@ -920,7 +982,7 @@ static int coda_start_encoding(struct coda_ctx *ctx)
}
coda_write(dev, value, CODA_CMD_ENC_SEQ_RC_PARA);
- coda_write(dev, 0, CODA_CMD_ENC_SEQ_RC_BUF_SIZE);
+ coda_write(dev, ctx->params.vbv_size, CODA_CMD_ENC_SEQ_RC_BUF_SIZE);
coda_write(dev, ctx->params.intra_refresh,
CODA_CMD_ENC_SEQ_INTRA_REFRESH);
@@ -996,6 +1058,7 @@ static int coda_start_encoding(struct coda_ctx *ctx)
ret = -EFAULT;
goto out;
}
+ ctx->initialized = 1;
if (dst_fourcc != V4L2_PIX_FMT_JPEG) {
if (dev->devtype->product == CODA_960)
@@ -1036,6 +1099,8 @@ static int coda_start_encoding(struct coda_ctx *ctx)
coda_write(dev, ctx->iram_info.buf_btp_use,
CODA9_CMD_SET_FRAME_AXI_BTP_ADDR);
+ coda9_set_frame_cache(ctx, q_data_src->fourcc);
+
/* FIXME */
coda_write(dev, ctx->internal_frames[2].paddr,
CODA9_CMD_SET_FRAME_SUBSAMP_A);
@@ -1326,6 +1391,9 @@ static void coda_seq_end_work(struct work_struct *work)
mutex_lock(&ctx->buffer_mutex);
mutex_lock(&dev->coda_mutex);
+ if (ctx->initialized == 0)
+ goto out;
+
v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
"%d: %s: sent command 'SEQ_END' to coda\n", ctx->idx,
__func__);
@@ -1334,11 +1402,22 @@ static void coda_seq_end_work(struct work_struct *work)
"CODA_COMMAND_SEQ_END failed\n");
}
+ /*
+ * FIXME: Sometimes h.264 encoding fails with 8-byte sequences missing
+ * from the output stream after the h.264 decoder has run. Resetting the
+ * hardware after the decoder has finished seems to help.
+ */
+ if (dev->devtype->product == CODA_960)
+ coda_hw_reset(ctx);
+
kfifo_init(&ctx->bitstream_fifo,
ctx->bitstream.vaddr, ctx->bitstream.size);
coda_free_framebuffers(ctx);
+ ctx->initialized = 0;
+
+out:
mutex_unlock(&dev->coda_mutex);
mutex_unlock(&ctx->buffer_mutex);
}
@@ -1448,9 +1527,12 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
/* Update coda bitstream read and write pointers from kfifo */
coda_kfifo_sync_to_device_full(ctx);
- ctx->frame_mem_ctrl &= ~CODA_FRAME_CHROMA_INTERLEAVE;
+ ctx->frame_mem_ctrl &= ~(CODA_FRAME_CHROMA_INTERLEAVE | (0x3 << 9) |
+ CODA9_FRAME_TILED2LINEAR);
if (dst_fourcc == V4L2_PIX_FMT_NV12)
ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE;
+ if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
+ ctx->frame_mem_ctrl |= (0x3 << 9) | CODA9_FRAME_TILED2LINEAR;
coda_write(dev, ctx->frame_mem_ctrl, CODA_REG_BIT_FRAME_MEM_CTRL);
ctx->display_idx = -1;
@@ -1496,6 +1578,7 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
return -ETIMEDOUT;
}
+ ctx->initialized = 1;
/* Update kfifo out pointer from coda bitstream read pointer */
coda_kfifo_sync_from_device(ctx);
@@ -1578,30 +1661,13 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR);
coda_write(dev, ctx->iram_info.buf_ovl_use,
CODA7_CMD_SET_FRAME_AXI_OVL_ADDR);
- if (dev->devtype->product == CODA_960)
+ if (dev->devtype->product == CODA_960) {
coda_write(dev, ctx->iram_info.buf_btp_use,
CODA9_CMD_SET_FRAME_AXI_BTP_ADDR);
- }
- if (dev->devtype->product == CODA_960) {
- int cbb_size, crb_size;
-
- coda_write(dev, -1, CODA9_CMD_SET_FRAME_DELAY);
- /* Luma 2x0 page, 2x6 cache, chroma 2x0 page, 2x4 cache size */
- coda_write(dev, 0x20262024, CODA9_CMD_SET_FRAME_CACHE_SIZE);
-
- if (dst_fourcc == V4L2_PIX_FMT_NV12) {
- cbb_size = 0;
- crb_size = 16;
- } else {
- cbb_size = 8;
- crb_size = 8;
+ coda_write(dev, -1, CODA9_CMD_SET_FRAME_DELAY);
+ coda9_set_frame_cache(ctx, dst_fourcc);
}
- coda_write(dev, 2 << CODA9_CACHE_PAGEMERGE_OFFSET |
- 32 << CODA9_CACHE_LUMA_BUFFER_SIZE_OFFSET |
- cbb_size << CODA9_CACHE_CB_BUFFER_SIZE_OFFSET |
- crb_size << CODA9_CACHE_CR_BUFFER_SIZE_OFFSET,
- CODA9_CMD_SET_FRAME_CACHE_CONFIG);
}
if (src_fourcc == V4L2_PIX_FMT_H264) {
@@ -1654,6 +1720,7 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
struct coda_dev *dev = ctx->dev;
struct coda_q_data *q_data_dst;
struct coda_buffer_meta *meta;
+ unsigned long flags;
u32 reg_addr, reg_stride;
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
@@ -1732,6 +1799,7 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
coda_write(dev, ctx->iram_info.axi_sram_use,
CODA7_REG_BIT_AXI_SRAM_USE);
+ spin_lock_irqsave(&ctx->buffer_meta_lock, flags);
meta = list_first_entry_or_null(&ctx->buffer_meta_list,
struct coda_buffer_meta, list);
@@ -1751,6 +1819,7 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
kfifo_in(&ctx->bitstream_fifo, buf, pad);
}
}
+ spin_unlock_irqrestore(&ctx->buffer_meta_lock, flags);
coda_kfifo_sync_to_device_full(ctx);
@@ -1772,6 +1841,7 @@ static void coda_finish_decode(struct coda_ctx *ctx)
struct vb2_buffer *dst_buf;
struct coda_buffer_meta *meta;
unsigned long payload;
+ unsigned long flags;
int width, height;
int decoded_idx;
int display_idx;
@@ -1897,12 +1967,21 @@ static void coda_finish_decode(struct coda_ctx *ctx)
} else {
val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM) - 1;
val -= ctx->sequence_offset;
- mutex_lock(&ctx->bitstream_mutex);
+ spin_lock_irqsave(&ctx->buffer_meta_lock, flags);
if (!list_empty(&ctx->buffer_meta_list)) {
meta = list_first_entry(&ctx->buffer_meta_list,
struct coda_buffer_meta, list);
list_del(&meta->list);
- if (val != (meta->sequence & 0xffff)) {
+ ctx->num_metas--;
+ spin_unlock_irqrestore(&ctx->buffer_meta_lock, flags);
+ /*
+ * Clamp counters to 16 bits for comparison, as the HW
+ * counter rolls over at this point for h.264. This
+ * may be different for other formats, but using 16 bits
+ * should be enough to detect most errors and saves us
+ * from doing different things based on the format.
+ */
+ if ((val & 0xffff) != (meta->sequence & 0xffff)) {
v4l2_err(&dev->v4l2_dev,
"sequence number mismatch (%d(%d) != %d)\n",
val, ctx->sequence_offset,
@@ -1911,13 +1990,13 @@ static void coda_finish_decode(struct coda_ctx *ctx)
ctx->frame_metas[decoded_idx] = *meta;
kfree(meta);
} else {
+ spin_unlock_irqrestore(&ctx->buffer_meta_lock, flags);
v4l2_err(&dev->v4l2_dev, "empty timestamp list!\n");
memset(&ctx->frame_metas[decoded_idx], 0,
sizeof(struct coda_buffer_meta));
ctx->frame_metas[decoded_idx].sequence = val;
ctx->sequence_offset++;
}
- mutex_unlock(&ctx->bitstream_mutex);
trace_coda_dec_pic_done(ctx, &ctx->frame_metas[decoded_idx]);
@@ -1960,7 +2039,7 @@ static void coda_finish_decode(struct coda_ctx *ctx)
dst_buf->v4l2_buf.timecode = meta->timecode;
dst_buf->v4l2_buf.timestamp = meta->timestamp;
- trace_coda_dec_rot_done(ctx, meta, dst_buf);
+ trace_coda_dec_rot_done(ctx, dst_buf, meta);
switch (q_data_dst->fourcc) {
case V4L2_PIX_FMT_YUV420:
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 58f65486de33..a4654e0c104d 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -15,6 +15,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/firmware.h>
+#include <linux/gcd.h>
#include <linux/genalloc.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -61,10 +62,9 @@ int coda_debug;
module_param(coda_debug, int, 0644);
MODULE_PARM_DESC(coda_debug, "Debug level (0-2)");
-struct coda_fmt {
- char *name;
- u32 fourcc;
-};
+static int disable_tiling;
+module_param(disable_tiling, int, 0644);
+MODULE_PARM_DESC(disable_tiling, "Disable tiled frame buffers");
void coda_write(struct coda_dev *dev, u32 data, u32 reg)
{
@@ -90,17 +90,17 @@ void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data,
u32 base_cb, base_cr;
switch (q_data->fourcc) {
- case V4L2_PIX_FMT_YVU420:
- /* Switch Cb and Cr for YVU420 format */
- base_cr = base_y + q_data->bytesperline * q_data->height;
- base_cb = base_cr + q_data->bytesperline * q_data->height / 4;
- break;
- case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_YUV420:
default:
base_cb = base_y + q_data->bytesperline * q_data->height;
base_cr = base_cb + q_data->bytesperline * q_data->height / 4;
break;
+ case V4L2_PIX_FMT_YVU420:
+ /* Switch Cb and Cr for YVU420 format */
+ base_cr = base_y + q_data->bytesperline * q_data->height;
+ base_cb = base_cr + q_data->bytesperline * q_data->height / 4;
+ break;
case V4L2_PIX_FMT_YUV422P:
base_cb = base_y + q_data->bytesperline * q_data->height;
base_cr = base_cb + q_data->bytesperline * q_data->height / 2;
@@ -111,40 +111,6 @@ void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data,
coda_write(ctx->dev, base_cr, reg_y + 8);
}
-/*
- * Array of all formats supported by any version of Coda:
- */
-static const struct coda_fmt coda_formats[] = {
- {
- .name = "YUV 4:2:0 Planar, YCbCr",
- .fourcc = V4L2_PIX_FMT_YUV420,
- },
- {
- .name = "YUV 4:2:0 Planar, YCrCb",
- .fourcc = V4L2_PIX_FMT_YVU420,
- },
- {
- .name = "YUV 4:2:0 Partial interleaved Y/CbCr",
- .fourcc = V4L2_PIX_FMT_NV12,
- },
- {
- .name = "YUV 4:2:2 Planar, YCbCr",
- .fourcc = V4L2_PIX_FMT_YUV422P,
- },
- {
- .name = "H264 Encoded Stream",
- .fourcc = V4L2_PIX_FMT_H264,
- },
- {
- .name = "MPEG4 Encoded Stream",
- .fourcc = V4L2_PIX_FMT_MPEG4,
- },
- {
- .name = "JPEG Encoded Images",
- .fourcc = V4L2_PIX_FMT_JPEG,
- },
-};
-
#define CODA_CODEC(mode, src_fourcc, dst_fourcc, max_w, max_h) \
{ mode, src_fourcc, dst_fourcc, max_w, max_h }
@@ -190,9 +156,9 @@ static const struct coda_video_device coda_bit_encoder = {
.type = CODA_INST_ENCODER,
.ops = &coda_bit_encode_ops,
.src_formats = {
+ V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_YUV420,
V4L2_PIX_FMT_YVU420,
- V4L2_PIX_FMT_NV12,
},
.dst_formats = {
V4L2_PIX_FMT_H264,
@@ -205,9 +171,9 @@ static const struct coda_video_device coda_bit_jpeg_encoder = {
.type = CODA_INST_ENCODER,
.ops = &coda_bit_encode_ops,
.src_formats = {
+ V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_YUV420,
V4L2_PIX_FMT_YVU420,
- V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_YUV422P,
},
.dst_formats = {
@@ -224,9 +190,9 @@ static const struct coda_video_device coda_bit_decoder = {
V4L2_PIX_FMT_MPEG4,
},
.dst_formats = {
+ V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_YUV420,
V4L2_PIX_FMT_YVU420,
- V4L2_PIX_FMT_NV12,
},
};
@@ -238,9 +204,9 @@ static const struct coda_video_device coda_bit_jpeg_decoder = {
V4L2_PIX_FMT_JPEG,
},
.dst_formats = {
+ V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_YUV420,
V4L2_PIX_FMT_YVU420,
- V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_YUV422P,
},
};
@@ -261,38 +227,21 @@ static const struct coda_video_device *coda9_video_devices[] = {
&coda_bit_decoder,
};
-static bool coda_format_is_yuv(u32 fourcc)
+/*
+ * Normalize all supported YUV 4:2:0 formats to the value used in the codec
+ * tables.
+ */
+static u32 coda_format_normalize_yuv(u32 fourcc)
{
switch (fourcc) {
+ case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
- case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_YUV422P:
- return true;
+ return V4L2_PIX_FMT_YUV420;
default:
- return false;
- }
-}
-
-static const char *coda_format_name(u32 fourcc)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(coda_formats); i++) {
- if (coda_formats[i].fourcc == fourcc)
- return coda_formats[i].name;
+ return fourcc;
}
-
- return NULL;
-}
-
-/*
- * Normalize all supported YUV 4:2:0 formats to the value used in the codec
- * tables.
- */
-static u32 coda_format_normalize_yuv(u32 fourcc)
-{
- return coda_format_is_yuv(fourcc) ? V4L2_PIX_FMT_YUV420 : fourcc;
}
static const struct coda_codec *coda_find_codec(struct coda_dev *dev,
@@ -396,7 +345,6 @@ static int coda_enum_fmt(struct file *file, void *priv,
struct video_device *vdev = video_devdata(file);
const struct coda_video_device *cvd = to_coda_video_device(vdev);
const u32 *formats;
- const char *name;
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
formats = cvd->src_formats;
@@ -408,11 +356,7 @@ static int coda_enum_fmt(struct file *file, void *priv,
if (f->index >= CODA_MAX_FORMATS || formats[f->index] == 0)
return -EINVAL;
- name = coda_format_name(formats[f->index]);
- strlcpy(f->description, name, sizeof(f->description));
f->pixelformat = formats[f->index];
- if (!coda_format_is_yuv(formats[f->index]))
- f->flags |= V4L2_FMT_FLAG_COMPRESSED;
return 0;
}
@@ -504,9 +448,9 @@ static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec,
S_ALIGN);
switch (f->fmt.pix.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
- case V4L2_PIX_FMT_NV12:
/*
* Frame stride must be at least multiple of 8,
* but multiple of 16 for h.264 or JPEG 4:2:x
@@ -645,6 +589,22 @@ static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f)
q_data->rect.width = f->fmt.pix.width;
q_data->rect.height = f->fmt.pix.height;
+ switch (f->fmt.pix.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
+ if (!disable_tiling)
+ break;
+ }
+ /* else fall through */
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ ctx->tiled_map_type = GDI_LINEAR_FRAME_MAP;
+ break;
+ default:
+ break;
+ }
+
v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
"Setting format for type %d, wxh: %dx%d, fmt: %d\n",
f->type, q_data->width, q_data->height, q_data->fourcc);
@@ -831,6 +791,104 @@ static int coda_decoder_cmd(struct file *file, void *fh,
return 0;
}
+static int coda_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct v4l2_fract *tpf;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ tpf = &a->parm.output.timeperframe;
+ tpf->denominator = ctx->params.framerate & CODA_FRATE_RES_MASK;
+ tpf->numerator = 1 + (ctx->params.framerate >>
+ CODA_FRATE_DIV_OFFSET);
+
+ return 0;
+}
+
+/*
+ * Approximate timeperframe v4l2_fract with values that can be written
+ * into the 16-bit CODA_FRATE_DIV and CODA_FRATE_RES fields.
+ */
+static void coda_approximate_timeperframe(struct v4l2_fract *timeperframe)
+{
+ struct v4l2_fract s = *timeperframe;
+ struct v4l2_fract f0;
+ struct v4l2_fract f1 = { 1, 0 };
+ struct v4l2_fract f2 = { 0, 1 };
+ unsigned int i, div, s_denominator;
+
+ /* Lower bound is 1/65535 */
+ if (s.numerator == 0 || s.denominator / s.numerator > 65535) {
+ timeperframe->numerator = 1;
+ timeperframe->denominator = 65535;
+ return;
+ }
+
+ /* Upper bound is 65536/1, map everything above to infinity */
+ if (s.denominator == 0 || s.numerator / s.denominator > 65536) {
+ timeperframe->numerator = 1;
+ timeperframe->denominator = 0;
+ return;
+ }
+
+ /* Reduce fraction to lowest terms */
+ div = gcd(s.numerator, s.denominator);
+ if (div > 1) {
+ s.numerator /= div;
+ s.denominator /= div;
+ }
+
+ if (s.numerator <= 65536 && s.denominator < 65536) {
+ *timeperframe = s;
+ return;
+ }
+
+ /* Find successive convergents from continued fraction expansion */
+ while (f2.numerator <= 65536 && f2.denominator < 65536) {
+ f0 = f1;
+ f1 = f2;
+
+ /* Stop when f2 exactly equals timeperframe */
+ if (s.numerator == 0)
+ break;
+
+ i = s.denominator / s.numerator;
+
+ f2.numerator = f0.numerator + i * f1.numerator;
+ f2.denominator = f0.denominator + i * f2.denominator;
+
+ s_denominator = s.numerator;
+ s.numerator = s.denominator % s.numerator;
+ s.denominator = s_denominator;
+ }
+
+ *timeperframe = f1;
+}
+
+static uint32_t coda_timeperframe_to_frate(struct v4l2_fract *timeperframe)
+{
+ return ((timeperframe->numerator - 1) << CODA_FRATE_DIV_OFFSET) |
+ timeperframe->denominator;
+}
+
+static int coda_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct v4l2_fract *tpf;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ tpf = &a->parm.output.timeperframe;
+ coda_approximate_timeperframe(tpf);
+ ctx->params.framerate = coda_timeperframe_to_frate(tpf);
+
+ return 0;
+}
+
static int coda_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
@@ -871,31 +929,13 @@ static const struct v4l2_ioctl_ops coda_ioctl_ops = {
.vidioc_try_decoder_cmd = coda_try_decoder_cmd,
.vidioc_decoder_cmd = coda_decoder_cmd,
+ .vidioc_g_parm = coda_g_parm,
+ .vidioc_s_parm = coda_s_parm,
+
.vidioc_subscribe_event = coda_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
-void coda_set_gdi_regs(struct coda_ctx *ctx)
-{
- struct gdi_tiled_map *tiled_map = &ctx->tiled_map;
- struct coda_dev *dev = ctx->dev;
- int i;
-
- for (i = 0; i < 16; i++)
- coda_write(dev, tiled_map->xy2ca_map[i],
- CODA9_GDI_XY2_CAS_0 + 4 * i);
- for (i = 0; i < 4; i++)
- coda_write(dev, tiled_map->xy2ba_map[i],
- CODA9_GDI_XY2_BA_0 + 4 * i);
- for (i = 0; i < 16; i++)
- coda_write(dev, tiled_map->xy2ra_map[i],
- CODA9_GDI_XY2_RAS_0 + 4 * i);
- coda_write(dev, tiled_map->xy2rbc_config, CODA9_GDI_XY2_RBC_CONFIG);
- for (i = 0; i < 32; i++)
- coda_write(dev, tiled_map->rbc2axi_map[i],
- CODA9_GDI_RBC2_AXI_0 + 4 * i);
-}
-
/*
* Mem-to-mem operations.
*/
@@ -949,14 +989,14 @@ static void coda_pic_run_work(struct work_struct *work)
static int coda_job_ready(void *m2m_priv)
{
struct coda_ctx *ctx = m2m_priv;
+ int src_bufs = v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx);
/*
* For both 'P' and 'key' frame cases 1 picture
* and 1 frame are needed. In the decoder case,
* the compressed frame can be in the bitstream.
*/
- if (!v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) &&
- ctx->inst_type != CODA_INST_DECODER) {
+ if (!src_bufs && ctx->inst_type != CODA_INST_DECODER) {
v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
"not ready: not enough video buffers.\n");
return 0;
@@ -969,27 +1009,17 @@ static int coda_job_ready(void *m2m_priv)
}
if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit) {
- struct list_head *meta;
- bool stream_end;
- int num_metas;
- int src_bufs;
+ bool stream_end = ctx->bit_stream_param &
+ CODA_BIT_STREAM_END_FLAG;
+ int num_metas = ctx->num_metas;
- if (ctx->hold && !v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx)) {
+ if (ctx->hold && !src_bufs) {
v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
"%d: not ready: on hold for more buffers.\n",
ctx->idx);
return 0;
}
- stream_end = ctx->bit_stream_param &
- CODA_BIT_STREAM_END_FLAG;
-
- num_metas = 0;
- list_for_each(meta, &ctx->buffer_meta_list)
- num_metas++;
-
- src_bufs = v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx);
-
if (!stream_end && (num_metas + src_bufs) < 2) {
v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
"%d: not ready: need 2 buffers available (%d, %d)\n",
@@ -998,8 +1028,8 @@ static int coda_job_ready(void *m2m_priv)
}
- if (!v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) &&
- !stream_end && (coda_get_bitstream_payload(ctx) < 512)) {
+ if (!src_bufs && !stream_end &&
+ (coda_get_bitstream_payload(ctx) < 512)) {
v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
"%d: not ready: not enough bitstream data (%d).\n",
ctx->idx, coda_get_bitstream_payload(ctx));
@@ -1015,6 +1045,7 @@ static int coda_job_ready(void *m2m_priv)
v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
"job ready\n");
+
return 1;
}
@@ -1052,32 +1083,6 @@ static const struct v4l2_m2m_ops coda_m2m_ops = {
.unlock = coda_unlock,
};
-static void coda_set_tiled_map_type(struct coda_ctx *ctx, int tiled_map_type)
-{
- struct gdi_tiled_map *tiled_map = &ctx->tiled_map;
- int luma_map, chro_map, i;
-
- memset(tiled_map, 0, sizeof(*tiled_map));
-
- luma_map = 64;
- chro_map = 64;
- tiled_map->map_type = tiled_map_type;
- for (i = 0; i < 16; i++)
- tiled_map->xy2ca_map[i] = luma_map << 8 | chro_map;
- for (i = 0; i < 4; i++)
- tiled_map->xy2ba_map[i] = luma_map << 8 | chro_map;
- for (i = 0; i < 16; i++)
- tiled_map->xy2ra_map[i] = luma_map << 8 | chro_map;
-
- if (tiled_map_type == GDI_LINEAR_FRAME_MAP) {
- tiled_map->xy2rbc_config = 0;
- } else {
- dev_err(&ctx->dev->plat_dev->dev, "invalid map type: %d\n",
- tiled_map_type);
- return;
- }
-}
-
static void set_default_params(struct coda_ctx *ctx)
{
unsigned int max_w, max_h, usize, csize;
@@ -1094,8 +1099,8 @@ static void set_default_params(struct coda_ctx *ctx)
ctx->params.framerate = 30;
/* Default formats for output and input queues */
- ctx->q_data[V4L2_M2M_SRC].fourcc = ctx->codec->src_fourcc;
- ctx->q_data[V4L2_M2M_DST].fourcc = ctx->codec->dst_fourcc;
+ ctx->q_data[V4L2_M2M_SRC].fourcc = ctx->cvd->src_formats[0];
+ ctx->q_data[V4L2_M2M_DST].fourcc = ctx->cvd->dst_formats[0];
ctx->q_data[V4L2_M2M_SRC].width = max_w;
ctx->q_data[V4L2_M2M_SRC].height = max_h;
ctx->q_data[V4L2_M2M_DST].width = max_w;
@@ -1116,8 +1121,11 @@ static void set_default_params(struct coda_ctx *ctx)
ctx->q_data[V4L2_M2M_DST].rect.width = max_w;
ctx->q_data[V4L2_M2M_DST].rect.height = max_h;
- if (ctx->dev->devtype->product == CODA_960)
- coda_set_tiled_map_type(ctx, GDI_LINEAR_FRAME_MAP);
+ /*
+ * Since the RBC2AXI logic only supports a single chroma plane,
+ * macroblock tiling only works for to NV12 pixel format.
+ */
+ ctx->tiled_map_type = GDI_LINEAR_FRAME_MAP;
}
/*
@@ -1244,9 +1252,7 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- if (q_data_src->fourcc == V4L2_PIX_FMT_H264 ||
- (q_data_src->fourcc == V4L2_PIX_FMT_JPEG &&
- ctx->dev->devtype->product == CODA_7541)) {
+ if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit) {
/* copy the buffers that were queued before streamon */
mutex_lock(&ctx->bitstream_mutex);
coda_fill_bitstream(ctx, false);
@@ -1315,7 +1321,6 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
goto err;
}
- ctx->initialized = 1;
return ret;
err:
@@ -1334,6 +1339,7 @@ static void coda_stop_streaming(struct vb2_queue *q)
struct coda_ctx *ctx = vb2_get_drv_priv(q);
struct coda_dev *dev = ctx->dev;
struct vb2_buffer *buf;
+ unsigned long flags;
bool stop;
stop = ctx->streamon_out && ctx->streamon_cap;
@@ -1368,20 +1374,23 @@ static void coda_stop_streaming(struct vb2_queue *q)
queue_work(dev->workqueue, &ctx->seq_end_work);
flush_work(&ctx->seq_end_work);
}
- mutex_lock(&ctx->bitstream_mutex);
+ spin_lock_irqsave(&ctx->buffer_meta_lock, flags);
while (!list_empty(&ctx->buffer_meta_list)) {
meta = list_first_entry(&ctx->buffer_meta_list,
struct coda_buffer_meta, list);
list_del(&meta->list);
kfree(meta);
}
- mutex_unlock(&ctx->bitstream_mutex);
+ ctx->num_metas = 0;
+ spin_unlock_irqrestore(&ctx->buffer_meta_lock, flags);
kfifo_init(&ctx->bitstream_fifo,
ctx->bitstream.vaddr, ctx->bitstream.size);
- ctx->initialized = 0;
ctx->runcounter = 0;
ctx->aborting = 0;
}
+
+ if (!ctx->streamon_out && !ctx->streamon_cap)
+ ctx->bit_stream_param &= ~CODA_BIT_STREAM_END_FLAG;
}
static const struct vb2_ops coda_qops = {
@@ -1469,6 +1478,12 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_JPEG_RESTART_INTERVAL:
ctx->params.jpeg_restart_interval = ctrl->val;
break;
+ case V4L2_CID_MPEG_VIDEO_VBV_DELAY:
+ ctx->params.vbv_delay = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VBV_SIZE:
+ ctx->params.vbv_size = min(ctrl->val * 8192, 0x7fffffff);
+ break;
default:
v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
"Invalid control, id=%d, val=%d\n",
@@ -1528,6 +1543,14 @@ static void coda_encode_ctrls(struct coda_ctx *ctx)
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB, 0,
1920 * 1088 / 256, 1, 0);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VBV_DELAY, 0, 0x7fff, 1, 0);
+ /*
+ * The maximum VBV size value is 0x7fffffff bits,
+ * one bit less than 262144 KiB
+ */
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VBV_SIZE, 0, 262144, 1, 0);
}
static void coda_jpeg_encode_ctrls(struct coda_ctx *ctx)
@@ -1726,6 +1749,7 @@ static int coda_open(struct file *file)
mutex_init(&ctx->bitstream_mutex);
mutex_init(&ctx->buffer_mutex);
INIT_LIST_HEAD(&ctx->buffer_meta_list);
+ spin_lock_init(&ctx->buffer_meta_lock);
coda_lock(ctx);
list_add(&ctx->list, &dev->instances);
@@ -1769,7 +1793,7 @@ static int coda_release(struct file *file)
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
/* In case the instance was not running, we still need to call SEQ_END */
- if (ctx->initialized && ctx->ops->seq_end_work) {
+ if (ctx->ops->seq_end_work) {
queue_work(dev->workqueue, &ctx->seq_end_work);
flush_work(&ctx->seq_end_work);
}
@@ -2157,7 +2181,7 @@ static int coda_probe(struct platform_device *pdev)
/* Get IRAM pool from device tree or platform data */
pool = of_gen_pool_get(np, "iram", 0);
if (!pool && pdata)
- pool = gen_pool_get(pdata->iram_dev);
+ pool = gen_pool_get(pdata->iram_dev, NULL);
if (!pool) {
dev_err(&pdev->dev, "iram pool not available\n");
return -ENOMEM;
diff --git a/drivers/media/platform/coda/coda-gdi.c b/drivers/media/platform/coda/coda-gdi.c
new file mode 100644
index 000000000000..aaa7afc6870f
--- /dev/null
+++ b/drivers/media/platform/coda/coda-gdi.c
@@ -0,0 +1,150 @@
+/*
+ * Coda multi-standard codec IP
+ *
+ * Copyright (C) 2014 Philipp Zabel, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include "coda.h"
+
+#define XY2_INVERT BIT(7)
+#define XY2_ZERO BIT(6)
+#define XY2_TB_XOR BIT(5)
+#define XY2_XYSEL BIT(4)
+#define XY2_Y (1 << 4)
+#define XY2_X (0 << 4)
+
+#define XY2(luma_sel, luma_bit, chroma_sel, chroma_bit) \
+ (((XY2_##luma_sel) | (luma_bit)) << 8 | \
+ (XY2_##chroma_sel) | (chroma_bit))
+
+static const u16 xy2ca_zero_map[16] = {
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+};
+
+static const u16 xy2ca_tiled_map[16] = {
+ XY2(Y, 0, Y, 0),
+ XY2(Y, 1, Y, 1),
+ XY2(Y, 2, Y, 2),
+ XY2(Y, 3, X, 3),
+ XY2(X, 3, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+ XY2(ZERO, 0, ZERO, 0),
+};
+
+/*
+ * RA[15:0], CA[15:8] are hardwired to contain the 24-bit macroblock
+ * start offset (macroblock size is 16x16 for luma, 16x8 for chroma).
+ * Bits CA[4:0] are set using XY2CA above. BA[3:0] seems to be unused.
+ */
+
+#define RBC_CA (0 << 4)
+#define RBC_BA (1 << 4)
+#define RBC_RA (2 << 4)
+#define RBC_ZERO (3 << 4)
+
+#define RBC(luma_sel, luma_bit, chroma_sel, chroma_bit) \
+ (((RBC_##luma_sel) | (luma_bit)) << 6 | \
+ (RBC_##chroma_sel) | (chroma_bit))
+
+static const u16 rbc2axi_tiled_map[32] = {
+ RBC(ZERO, 0, ZERO, 0),
+ RBC(ZERO, 0, ZERO, 0),
+ RBC(ZERO, 0, ZERO, 0),
+ RBC(CA, 0, CA, 0),
+ RBC(CA, 1, CA, 1),
+ RBC(CA, 2, CA, 2),
+ RBC(CA, 3, CA, 3),
+ RBC(CA, 4, CA, 8),
+ RBC(CA, 8, CA, 9),
+ RBC(CA, 9, CA, 10),
+ RBC(CA, 10, CA, 11),
+ RBC(CA, 11, CA, 12),
+ RBC(CA, 12, CA, 13),
+ RBC(CA, 13, CA, 14),
+ RBC(CA, 14, CA, 15),
+ RBC(CA, 15, RA, 0),
+ RBC(RA, 0, RA, 1),
+ RBC(RA, 1, RA, 2),
+ RBC(RA, 2, RA, 3),
+ RBC(RA, 3, RA, 4),
+ RBC(RA, 4, RA, 5),
+ RBC(RA, 5, RA, 6),
+ RBC(RA, 6, RA, 7),
+ RBC(RA, 7, RA, 8),
+ RBC(RA, 8, RA, 9),
+ RBC(RA, 9, RA, 10),
+ RBC(RA, 10, RA, 11),
+ RBC(RA, 11, RA, 12),
+ RBC(RA, 12, RA, 13),
+ RBC(RA, 13, RA, 14),
+ RBC(RA, 14, RA, 15),
+ RBC(RA, 15, ZERO, 0),
+};
+
+void coda_set_gdi_regs(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+ const u16 *xy2ca_map;
+ u32 xy2rbc_config;
+ int i;
+
+ switch (ctx->tiled_map_type) {
+ case GDI_LINEAR_FRAME_MAP:
+ default:
+ xy2ca_map = xy2ca_zero_map;
+ xy2rbc_config = 0;
+ break;
+ case GDI_TILED_FRAME_MB_RASTER_MAP:
+ xy2ca_map = xy2ca_tiled_map;
+ xy2rbc_config = CODA9_XY2RBC_TILED_MAP |
+ CODA9_XY2RBC_CA_INC_HOR |
+ (16 - 1) << 12 | (8 - 1) << 4;
+ break;
+ }
+
+ for (i = 0; i < 16; i++)
+ coda_write(dev, xy2ca_map[i],
+ CODA9_GDI_XY2_CAS_0 + 4 * i);
+ for (i = 0; i < 4; i++)
+ coda_write(dev, XY2(ZERO, 0, ZERO, 0),
+ CODA9_GDI_XY2_BA_0 + 4 * i);
+ for (i = 0; i < 16; i++)
+ coda_write(dev, XY2(ZERO, 0, ZERO, 0),
+ CODA9_GDI_XY2_RAS_0 + 4 * i);
+ coda_write(dev, xy2rbc_config, CODA9_GDI_XY2_RBC_CONFIG);
+ if (xy2rbc_config) {
+ for (i = 0; i < 32; i++)
+ coda_write(dev, rbc2axi_tiled_map[i],
+ CODA9_GDI_RBC2_AXI_0 + 4 * i);
+ }
+}
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 8e0af221b2e9..59b2af9c7749 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -128,6 +128,8 @@ struct coda_params {
enum v4l2_mpeg_video_multi_slice_mode slice_mode;
u32 framerate;
u16 bitrate;
+ u16 vbv_delay;
+ u32 vbv_size;
u32 slice_max_bits;
u32 slice_max_mb;
};
@@ -165,15 +167,8 @@ struct coda_iram_info {
phys_addr_t next_paddr;
};
-struct gdi_tiled_map {
- int xy2ca_map[16];
- int xy2ba_map[16];
- int xy2ra_map[16];
- int rbc2axi_map[32];
- int xy2rbc_config;
- int map_type;
#define GDI_LINEAR_FRAME_MAP 0
-};
+#define GDI_TILED_FRAME_MB_RASTER_MAP 1
struct coda_ctx;
@@ -227,12 +222,14 @@ struct coda_ctx {
struct coda_buffer_meta frame_metas[CODA_MAX_FRAMEBUFFERS];
u32 frame_errors[CODA_MAX_FRAMEBUFFERS];
struct list_head buffer_meta_list;
+ spinlock_t buffer_meta_lock;
+ int num_metas;
struct coda_aux_buf workbuf;
int num_internal_frames;
int idx;
int reg_idx;
struct coda_iram_info iram_info;
- struct gdi_tiled_map tiled_map;
+ int tiled_map_type;
u32 bit_stream_param;
u32 frm_dis_flg;
u32 frame_mem_ctrl;
diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h
index 7d026241171b..3490602fa6e1 100644
--- a/drivers/media/platform/coda/coda_regs.h
+++ b/drivers/media/platform/coda/coda_regs.h
@@ -51,6 +51,7 @@
#define CODA7_STREAM_SEL_64BITS_ENDIAN (1 << 1)
#define CODA_STREAM_ENDIAN_SELECT (1 << 0)
#define CODA_REG_BIT_FRAME_MEM_CTRL 0x110
+#define CODA9_FRAME_TILED2LINEAR (1 << 11)
#define CODA_FRAME_CHROMA_INTERLEAVE (1 << 2)
#define CODA_IMAGE_ENDIAN_SELECT (1 << 0)
#define CODA_REG_BIT_BIT_STREAM_PARAM 0x114
@@ -263,6 +264,10 @@
#define CODADX6_PICHEIGHT_MASK 0x3ff
#define CODA7_PICHEIGHT_MASK 0xffff
#define CODA_CMD_ENC_SEQ_SRC_F_RATE 0x194
+#define CODA_FRATE_RES_OFFSET 0
+#define CODA_FRATE_RES_MASK 0xffff
+#define CODA_FRATE_DIV_OFFSET 16
+#define CODA_FRATE_DIV_MASK 0xffff
#define CODA_CMD_ENC_SEQ_MP4_PARA 0x198
#define CODA_MP4PARAM_VERID_OFFSET 6
#define CODA_MP4PARAM_VERID_MASK 0x01
@@ -448,7 +453,12 @@
#define CODA9_GDI_XY2_RAS_F (CODA9_GDMA_BASE + 0x88c)
#define CODA9_GDI_XY2_RBC_CONFIG (CODA9_GDMA_BASE + 0x890)
+#define CODA9_XY2RBC_SEPARATE_MAP BIT(19)
+#define CODA9_XY2RBC_TOP_BOT_SPLIT BIT(18)
+#define CODA9_XY2RBC_TILED_MAP BIT(17)
+#define CODA9_XY2RBC_CA_INC_HOR BIT(16)
#define CODA9_GDI_RBC2_AXI_0 (CODA9_GDMA_BASE + 0x8a0)
#define CODA9_GDI_RBC2_AXI_1F (CODA9_GDMA_BASE + 0x91c)
+#define CODA9_GDI_TILEDBUF_BASE (CODA9_GDMA_BASE + 0x920)
#endif
diff --git a/drivers/media/platform/coda/trace.h b/drivers/media/platform/coda/trace.h
index 781bf7286d53..d9099a0f7c32 100644
--- a/drivers/media/platform/coda/trace.h
+++ b/drivers/media/platform/coda/trace.h
@@ -48,7 +48,7 @@ TRACE_EVENT(coda_bit_done,
TP_printk("minor = %d, ctx = %d", __entry->minor, __entry->ctx)
);
-TRACE_EVENT(coda_enc_pic_run,
+DECLARE_EVENT_CLASS(coda_buf_class,
TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf),
TP_ARGS(ctx, buf),
@@ -69,28 +69,17 @@ TRACE_EVENT(coda_enc_pic_run,
__entry->minor, __entry->index, __entry->ctx)
);
-TRACE_EVENT(coda_enc_pic_done,
+DEFINE_EVENT(coda_buf_class, coda_enc_pic_run,
TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf),
+ TP_ARGS(ctx, buf)
+);
- TP_ARGS(ctx, buf),
-
- TP_STRUCT__entry(
- __field(int, minor)
- __field(int, index)
- __field(int, ctx)
- ),
-
- TP_fast_assign(
- __entry->minor = ctx->fh.vdev->minor;
- __entry->index = buf->v4l2_buf.index;
- __entry->ctx = ctx->idx;
- ),
-
- TP_printk("minor = %d, index = %d, ctx = %d",
- __entry->minor, __entry->index, __entry->ctx)
+DEFINE_EVENT(coda_buf_class, coda_enc_pic_done,
+ TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf),
+ TP_ARGS(ctx, buf)
);
-TRACE_EVENT(coda_bit_queue,
+DECLARE_EVENT_CLASS(coda_buf_meta_class,
TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf,
struct coda_buffer_meta *meta),
@@ -117,7 +106,13 @@ TRACE_EVENT(coda_bit_queue,
__entry->ctx)
);
-TRACE_EVENT(coda_dec_pic_run,
+DEFINE_EVENT(coda_buf_meta_class, coda_bit_queue,
+ TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf,
+ struct coda_buffer_meta *meta),
+ TP_ARGS(ctx, buf, meta)
+);
+
+DECLARE_EVENT_CLASS(coda_meta_class,
TP_PROTO(struct coda_ctx *ctx, struct coda_buffer_meta *meta),
TP_ARGS(ctx, meta),
@@ -140,54 +135,20 @@ TRACE_EVENT(coda_dec_pic_run,
__entry->minor, __entry->start, __entry->end, __entry->ctx)
);
-TRACE_EVENT(coda_dec_pic_done,
+DEFINE_EVENT(coda_meta_class, coda_dec_pic_run,
TP_PROTO(struct coda_ctx *ctx, struct coda_buffer_meta *meta),
-
- TP_ARGS(ctx, meta),
-
- TP_STRUCT__entry(
- __field(int, minor)
- __field(int, start)
- __field(int, end)
- __field(int, ctx)
- ),
-
- TP_fast_assign(
- __entry->minor = ctx->fh.vdev->minor;
- __entry->start = meta->start;
- __entry->end = meta->end;
- __entry->ctx = ctx->idx;
- ),
-
- TP_printk("minor = %d, start = 0x%x, end = 0x%x, ctx = %d",
- __entry->minor, __entry->start, __entry->end, __entry->ctx)
+ TP_ARGS(ctx, meta)
);
-TRACE_EVENT(coda_dec_rot_done,
- TP_PROTO(struct coda_ctx *ctx, struct coda_buffer_meta *meta,
- struct vb2_buffer *buf),
-
- TP_ARGS(ctx, meta, buf),
-
- TP_STRUCT__entry(
- __field(int, minor)
- __field(int, start)
- __field(int, end)
- __field(int, index)
- __field(int, ctx)
- ),
-
- TP_fast_assign(
- __entry->minor = ctx->fh.vdev->minor;
- __entry->start = meta->start;
- __entry->end = meta->end;
- __entry->index = buf->v4l2_buf.index;
- __entry->ctx = ctx->idx;
- ),
+DEFINE_EVENT(coda_meta_class, coda_dec_pic_done,
+ TP_PROTO(struct coda_ctx *ctx, struct coda_buffer_meta *meta),
+ TP_ARGS(ctx, meta)
+);
- TP_printk("minor = %d, start = 0x%x, end = 0x%x, index = %d, ctx = %d",
- __entry->minor, __entry->start, __entry->end, __entry->index,
- __entry->ctx)
+DEFINE_EVENT(coda_buf_meta_class, coda_dec_rot_done,
+ TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf,
+ struct coda_buffer_meta *meta),
+ TP_ARGS(ctx, buf, meta)
);
#endif /* __CODA_TRACE_H__ */
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
index 0ad1b6f84a27..d2bfe7c2a6b4 100644
--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -188,7 +188,7 @@ static int fimc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
if (IS_ERR(f))
return PTR_ERR(f);
/*
- * Return number of non-contigous planes (plane buffers)
+ * Return number of non-contiguous planes (plane buffers)
* depending on the configured color format.
*/
if (!f->fmt)
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index 5b76e3db6a92..ae8c6b35a357 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -28,6 +28,9 @@
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
#include <media/videobuf-dma-contig.h>
#define DRV_NAME "fsl_viu"
@@ -40,49 +43,6 @@
/* I2C address of video decoder chip is 0x4A */
#define VIU_VIDEO_DECODER_ADDR 0x25
-/* supported controls */
-static struct v4l2_queryctrl viu_qctrl[] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 127,
- .flags = 0,
- }, {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = 0x10,
- .flags = 0,
- }, {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = 127,
- .flags = 0,
- }, {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = -128,
- .maximum = 127,
- .step = 0x1,
- .default_value = 0,
- .flags = 0,
- }
-};
-
-static int qctl_regs[ARRAY_SIZE(viu_qctrl)];
-
static int info_level;
#define dprintk(level, fmt, arg...) \
@@ -95,7 +55,6 @@ static int info_level;
* Basic structures
*/
struct viu_fmt {
- char name[32];
u32 fourcc; /* v4l2 format id */
u32 pixelformat;
int depth;
@@ -103,12 +62,10 @@ struct viu_fmt {
static struct viu_fmt formats[] = {
{
- .name = "RGB-16 (5/B-6/G-5/R)",
.fourcc = V4L2_PIX_FMT_RGB565,
.pixelformat = V4L2_PIX_FMT_RGB565,
.depth = 16,
}, {
- .name = "RGB-32 (A-R-G-B)",
.fourcc = V4L2_PIX_FMT_RGB32,
.pixelformat = V4L2_PIX_FMT_RGB32,
.depth = 32,
@@ -156,6 +113,7 @@ struct viu_reg {
struct viu_dev {
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler hdl;
struct mutex lock;
spinlock_t slock;
int users;
@@ -195,6 +153,8 @@ struct viu_dev {
};
struct viu_fh {
+ /* must remain the first field of this struct */
+ struct v4l2_fh fh;
struct viu_dev *dev;
/* video capture */
@@ -604,6 +564,7 @@ static int vidioc_querycap(struct file *file, void *priv,
{
strcpy(cap->driver, "viu");
strcpy(cap->card, "viu");
+ strcpy(cap->bus_info, "platform:viu");
cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING |
V4L2_CAP_VIDEO_OVERLAY |
@@ -617,10 +578,9 @@ static int vidioc_enum_fmt(struct file *file, void *priv,
{
int index = f->index;
- if (f->index > NUM_FORMATS)
+ if (f->index >= NUM_FORMATS)
return -EINVAL;
- strlcpy(f->description, formats[index].name, sizeof(f->description));
f->pixelformat = formats[index].fourcc;
return 0;
}
@@ -637,6 +597,7 @@ static int vidioc_g_fmt_cap(struct file *file, void *priv,
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fh->fmt->depth) >> 3;
f->fmt.pix.sizeimage = fh->sizeimage;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
@@ -644,7 +605,6 @@ static int vidioc_try_fmt_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct viu_fmt *fmt;
- enum v4l2_field field;
unsigned int maxw, maxh;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
@@ -654,19 +614,10 @@ static int vidioc_try_fmt_cap(struct file *file, void *priv,
return -EINVAL;
}
- field = f->fmt.pix.field;
-
- if (field == V4L2_FIELD_ANY) {
- field = V4L2_FIELD_INTERLACED;
- } else if (field != V4L2_FIELD_INTERLACED) {
- dprintk(1, "Field type invalid.\n");
- return -EINVAL;
- }
-
maxw = norm_maxw();
maxh = norm_maxh();
- f->fmt.pix.field = field;
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
if (f->fmt.pix.height < 32)
f->fmt.pix.height = 32;
if (f->fmt.pix.height > maxh)
@@ -678,6 +629,8 @@ static int vidioc_try_fmt_cap(struct file *file, void *priv,
f->fmt.pix.width &= ~0x03;
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
@@ -698,7 +651,6 @@ static int vidioc_s_fmt_cap(struct file *file, void *priv,
fh->sizeimage = f->fmt.pix.sizeimage;
fh->vb_vidq.field = f->fmt.pix.field;
fh->type = f->type;
- dprintk(1, "set to pixelformat '%4.6s'\n", (char *)&fh->fmt->name);
return 0;
}
@@ -764,8 +716,8 @@ static int viu_setup_preview(struct viu_dev *dev, struct viu_fh *fh)
{
int bpp;
- dprintk(1, "%s %dx%d %s\n", __func__,
- fh->win.w.width, fh->win.w.height, dev->ovfmt->name);
+ dprintk(1, "%s %dx%d\n", __func__,
+ fh->win.w.width, fh->win.w.height);
reg_val.status_cfg = 0;
@@ -1002,58 +954,13 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
struct viu_fh *fh = priv;
- if (i > 1)
+ if (i)
return -EINVAL;
decoder_call(fh->dev, video, s_routing, i, 0, 0);
return 0;
}
-/* Controls */
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(viu_qctrl); i++) {
- if (qc->id && qc->id == viu_qctrl[i].id) {
- memcpy(qc, &(viu_qctrl[i]), sizeof(*qc));
- return 0;
- }
- }
- return -EINVAL;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(viu_qctrl); i++) {
- if (ctrl->id == viu_qctrl[i].id) {
- ctrl->value = qctl_regs[i];
- return 0;
- }
- }
- return -EINVAL;
-}
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(viu_qctrl); i++) {
- if (ctrl->id == viu_qctrl[i].id) {
- if (ctrl->value < viu_qctrl[i].minimum
- || ctrl->value > viu_qctrl[i].maximum)
- return -ERANGE;
- qctl_regs[i] = ctrl->value;
- return 0;
- }
- }
- return -EINVAL;
-}
-
inline void viu_activate_next_buf(struct viu_dev *dev,
struct viu_dmaqueue *viuq)
{
@@ -1265,7 +1172,6 @@ static int viu_open(struct file *file)
struct viu_reg *vr;
int minor = vdev->minor;
u32 status_cfg;
- int i;
dprintk(1, "viu: open (minor=%d)\n", minor);
@@ -1293,6 +1199,7 @@ static int viu_open(struct file *file)
return -ENOMEM;
}
+ v4l2_fh_init(&fh->fh, vdev);
file->private_data = fh;
fh->dev = dev;
@@ -1303,10 +1210,6 @@ static int viu_open(struct file *file)
dev->crop_current.width = fh->width;
dev->crop_current.height = fh->height;
- /* Put all controls at a sane state */
- for (i = 0; i < ARRAY_SIZE(viu_qctrl); i++)
- qctl_regs[i] = viu_qctrl[i].default_value;
-
dprintk(1, "Open: fh=0x%08lx, dev=0x%08lx, dev->vidq=0x%08lx\n",
(unsigned long)fh, (unsigned long)dev,
(unsigned long)&dev->vidq);
@@ -1332,6 +1235,7 @@ static int viu_open(struct file *file)
fh->type, V4L2_FIELD_INTERLACED,
sizeof(struct viu_buf), fh,
&fh->dev->lock);
+ v4l2_fh_add(&fh->fh);
mutex_unlock(&dev->lock);
return 0;
}
@@ -1364,13 +1268,17 @@ static unsigned int viu_poll(struct file *file, struct poll_table_struct *wait)
struct viu_fh *fh = file->private_data;
struct videobuf_queue *q = &fh->vb_vidq;
struct viu_dev *dev = fh->dev;
- unsigned int res;
+ unsigned long req_events = poll_requested_events(wait);
+ unsigned int res = v4l2_ctrl_poll(file, wait);
if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
return POLLERR;
+ if (!(req_events & (POLLIN | POLLRDNORM)))
+ return res;
+
mutex_lock(&dev->lock);
- res = videobuf_poll_stream(file, q, wait);
+ res |= videobuf_poll_stream(file, q, wait);
mutex_unlock(&dev->lock);
return res;
}
@@ -1385,6 +1293,8 @@ static int viu_release(struct file *file)
viu_stop_dma(dev);
videobuf_stop(&fh->vb_vidq);
videobuf_mmap_free(&fh->vb_vidq);
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
mutex_unlock(&dev->lock);
kfree(fh);
@@ -1463,11 +1373,11 @@ static const struct v4l2_ioctl_ops viu_ioctl_ops = {
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static struct video_device viu_template = {
@@ -1543,6 +1453,16 @@ static int viu_of_probe(struct platform_device *op)
}
ad = i2c_get_adapter(0);
+
+ v4l2_ctrl_handler_init(&viu_dev->hdl, 5);
+ if (viu_dev->hdl.error) {
+ ret = viu_dev->hdl.error;
+ dev_err(&op->dev, "couldn't register control\n");
+ goto err_vdev;
+ }
+ /* This control handler will inherit the control(s) from the
+ sub-device(s). */
+ viu_dev->v4l2_dev.ctrl_handler = &viu_dev->hdl;
viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad,
"saa7113", VIU_VIDEO_DECODER_ADDR, NULL);
@@ -1559,7 +1479,7 @@ static int viu_of_probe(struct platform_device *op)
goto err_vdev;
}
- memcpy(vdev, &viu_template, sizeof(viu_template));
+ *vdev = viu_template;
vdev->v4l2_dev = &viu_dev->v4l2_dev;
@@ -1614,6 +1534,7 @@ err_irq:
err_clk:
video_unregister_device(viu_dev->vdev);
err_vdev:
+ v4l2_ctrl_handler_free(&viu_dev->hdl);
mutex_unlock(&viu_dev->lock);
i2c_put_adapter(ad);
v4l2_device_unregister(&viu_dev->v4l2_dev);
@@ -1635,6 +1556,7 @@ static int viu_of_remove(struct platform_device *op)
clk_disable_unprepare(dev->clk);
+ v4l2_ctrl_handler_free(&dev->hdl);
video_unregister_device(dev->vdev);
i2c_put_adapter(client->adapter);
v4l2_device_unregister(&dev->v4l2_dev);
diff --git a/drivers/media/platform/omap/Kconfig b/drivers/media/platform/omap/Kconfig
index dc2aaab54aef..217d613b0fe7 100644
--- a/drivers/media/platform/omap/Kconfig
+++ b/drivers/media/platform/omap/Kconfig
@@ -10,6 +10,7 @@ config VIDEO_OMAP2_VOUT
select OMAP2_DSS if HAS_IOMEM && ARCH_OMAP2PLUS
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
select VIDEO_OMAP2_VOUT_VRFB if VIDEO_OMAP2_VOUT && OMAP2_VRFB
+ select FRAME_VECTOR
default n
---help---
V4L2 Display driver support for OMAP2/3 based boards.
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index f09c5f17a42f..70c28d19ea04 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -195,46 +195,34 @@ static int omap_vout_try_format(struct v4l2_pix_format *pix)
}
/*
- * omap_vout_uservirt_to_phys: This inline function is used to convert user
- * space virtual address to physical address.
+ * omap_vout_get_userptr: Convert user space virtual address to physical
+ * address.
*/
-static unsigned long omap_vout_uservirt_to_phys(unsigned long virtp)
+static int omap_vout_get_userptr(struct videobuf_buffer *vb, u32 virtp,
+ u32 *physp)
{
- unsigned long physp = 0;
- struct vm_area_struct *vma;
- struct mm_struct *mm = current->mm;
+ struct frame_vector *vec;
+ int ret;
/* For kernel direct-mapped memory, take the easy way */
- if (virtp >= PAGE_OFFSET)
- return virt_to_phys((void *) virtp);
-
- down_read(&current->mm->mmap_sem);
- vma = find_vma(mm, virtp);
- if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
- /* this will catch, kernel-allocated, mmaped-to-usermode
- addresses */
- physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
- up_read(&current->mm->mmap_sem);
- } else {
- /* otherwise, use get_user_pages() for general userland pages */
- int res, nr_pages = 1;
- struct page *pages;
+ if (virtp >= PAGE_OFFSET) {
+ *physp = virt_to_phys((void *)virtp);
+ return 0;
+ }
- res = get_user_pages(current, current->mm, virtp, nr_pages, 1,
- 0, &pages, NULL);
- up_read(&current->mm->mmap_sem);
+ vec = frame_vector_create(1);
+ if (!vec)
+ return -ENOMEM;
- if (res == nr_pages) {
- physp = __pa(page_address(&pages[0]) +
- (virtp & ~PAGE_MASK));
- } else {
- printk(KERN_WARNING VOUT_NAME
- "get_user_pages failed\n");
- return 0;
- }
+ ret = get_vaddr_frames(virtp, 1, true, false, vec);
+ if (ret != 1) {
+ frame_vector_destroy(vec);
+ return -EINVAL;
}
+ *physp = __pfn_to_phys(frame_vector_pfns(vec)[0]);
+ vb->priv = vec;
- return physp;
+ return 0;
}
/*
@@ -784,11 +772,15 @@ static int omap_vout_buffer_prepare(struct videobuf_queue *q,
* address of the buffer
*/
if (V4L2_MEMORY_USERPTR == vb->memory) {
+ int ret;
+
if (0 == vb->baddr)
return -EINVAL;
/* Physical address */
- vout->queued_buf_addr[vb->i] = (u8 *)
- omap_vout_uservirt_to_phys(vb->baddr);
+ ret = omap_vout_get_userptr(vb, vb->baddr,
+ (u32 *)&vout->queued_buf_addr[vb->i]);
+ if (ret < 0)
+ return ret;
} else {
unsigned long addr, dma_addr;
unsigned long size;
@@ -834,12 +826,13 @@ static void omap_vout_buffer_queue(struct videobuf_queue *q,
static void omap_vout_buffer_release(struct videobuf_queue *q,
struct videobuf_buffer *vb)
{
- struct omap_vout_device *vout = q->priv_data;
-
vb->state = VIDEOBUF_NEEDS_INIT;
+ if (vb->memory == V4L2_MEMORY_USERPTR && vb->priv) {
+ struct frame_vector *vec = vb->priv;
- if (V4L2_MEMORY_MMAP != vout->memory)
- return;
+ put_vaddr_frames(vec);
+ frame_vector_destroy(vec);
+ }
}
/*
@@ -872,7 +865,7 @@ static void omap_vout_vm_close(struct vm_area_struct *vma)
vout->mmap_count--;
}
-static struct vm_operations_struct omap_vout_vm_ops = {
+static const struct vm_operations_struct omap_vout_vm_ops = {
.open = omap_vout_vm_open,
.close = omap_vout_vm_close,
};
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 18d0a871747f..56e683b19a73 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -101,7 +101,6 @@ static const struct isp_res_mapping isp_res_maps[] = {
0x0000, /* csi2a, len 0x0170 */
0x0170, /* csiphy2, len 0x000c */
},
- .syscon_offset = 0xdc,
.phy_type = ISP_PHY_TYPE_3430,
},
{
@@ -124,7 +123,6 @@ static const struct isp_res_mapping isp_res_maps[] = {
0x0570, /* csiphy1, len 0x000c */
0x05c0, /* csi2c, len 0x0040 (2nd area) */
},
- .syscon_offset = 0x2f0,
.phy_type = ISP_PHY_TYPE_3630,
},
};
@@ -829,14 +827,14 @@ static int isp_pipeline_link_notify(struct media_link *link, u32 flags,
int ret;
if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
- !(link->flags & MEDIA_LNK_FL_ENABLED)) {
+ !(flags & MEDIA_LNK_FL_ENABLED)) {
/* Powering off entities is assumed to never fail. */
isp_pipeline_pm_power(source, -sink_use);
isp_pipeline_pm_power(sink, -source_use);
return 0;
}
- if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+ if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
(flags & MEDIA_LNK_FL_ENABLED)) {
ret = isp_pipeline_pm_power(source, sink_use);
@@ -1796,47 +1794,6 @@ static void isp_unregister_entities(struct isp_device *isp)
media_device_unregister(&isp->media_dev);
}
-/*
- * isp_register_subdev - Register a sub-device
- * @isp: OMAP3 ISP device
- * @isp_subdev: platform data related to a sub-device
- *
- * Register an I2C sub-device which has not been registered by other
- * means (such as the Device Tree).
- *
- * Return a pointer to the sub-device if it has been successfully
- * registered, or NULL otherwise.
- */
-static struct v4l2_subdev *
-isp_register_subdev(struct isp_device *isp,
- struct isp_platform_subdev *isp_subdev)
-{
- struct i2c_adapter *adapter;
- struct v4l2_subdev *sd;
-
- if (isp_subdev->board_info == NULL)
- return NULL;
-
- adapter = i2c_get_adapter(isp_subdev->i2c_adapter_id);
- if (adapter == NULL) {
- dev_err(isp->dev,
- "%s: Unable to get I2C adapter %d for device %s\n",
- __func__, isp_subdev->i2c_adapter_id,
- isp_subdev->board_info->type);
- return NULL;
- }
-
- sd = v4l2_i2c_new_subdev_board(&isp->v4l2_dev, adapter,
- isp_subdev->board_info, NULL);
- if (sd == NULL) {
- dev_err(isp->dev, "%s: Unable to register subdev %s\n",
- __func__, isp_subdev->board_info->type);
- return NULL;
- }
-
- return sd;
-}
-
static int isp_link_entity(
struct isp_device *isp, struct media_entity *entity,
enum isp_interface_type interface)
@@ -1910,8 +1867,6 @@ static int isp_link_entity(
static int isp_register_entities(struct isp_device *isp)
{
- struct isp_platform_data *pdata = isp->pdata;
- struct isp_platform_subdev *isp_subdev;
int ret;
isp->media_dev.dev = isp->dev;
@@ -1968,42 +1923,9 @@ static int isp_register_entities(struct isp_device *isp)
if (ret < 0)
goto done;
- /*
- * Device Tree --- the external sub-devices will be registered
- * later. The same goes for the sub-device node registration.
- */
- if (isp->dev->of_node)
- return 0;
-
- /* Register external entities */
- for (isp_subdev = pdata ? pdata->subdevs : NULL;
- isp_subdev && isp_subdev->board_info; isp_subdev++) {
- struct v4l2_subdev *sd;
-
- sd = isp_register_subdev(isp, isp_subdev);
-
- /*
- * No bus information --- this is either a flash or a
- * lens subdev.
- */
- if (!sd || !isp_subdev->bus)
- continue;
-
- sd->host_priv = isp_subdev->bus;
-
- ret = isp_link_entity(isp, &sd->entity,
- isp_subdev->bus->interface);
- if (ret < 0)
- goto done;
- }
-
- ret = v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
-
done:
- if (ret < 0) {
+ if (ret < 0)
isp_unregister_entities(isp);
- v4l2_async_notifier_unregister(&isp->notifier);
- }
return ret;
}
@@ -2404,37 +2326,24 @@ static int isp_probe(struct platform_device *pdev)
return -ENOMEM;
}
- if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
- ret = of_property_read_u32(pdev->dev.of_node, "ti,phy-type",
- &isp->phy_type);
- if (ret)
- return ret;
+ ret = of_property_read_u32(pdev->dev.of_node, "ti,phy-type",
+ &isp->phy_type);
+ if (ret)
+ return ret;
- isp->syscon = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "syscon");
- if (IS_ERR(isp->syscon))
- return PTR_ERR(isp->syscon);
+ isp->syscon = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "syscon");
+ if (IS_ERR(isp->syscon))
+ return PTR_ERR(isp->syscon);
- ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 1,
- &isp->syscon_offset);
- if (ret)
- return ret;
+ ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 1,
+ &isp->syscon_offset);
+ if (ret)
+ return ret;
- ret = isp_of_parse_nodes(&pdev->dev, &isp->notifier);
- if (ret < 0)
- return ret;
- ret = v4l2_async_notifier_register(&isp->v4l2_dev,
- &isp->notifier);
- if (ret)
- return ret;
- } else {
- isp->pdata = pdev->dev.platform_data;
- isp->syscon = syscon_regmap_lookup_by_pdevname("syscon.0");
- if (IS_ERR(isp->syscon))
- return PTR_ERR(isp->syscon);
- dev_warn(&pdev->dev,
- "Platform data support is deprecated! Please move to DT now!\n");
- }
+ ret = isp_of_parse_nodes(&pdev->dev, &isp->notifier);
+ if (ret < 0)
+ return ret;
isp->autoidle = autoidle;
@@ -2513,11 +2422,6 @@ static int isp_probe(struct platform_device *pdev)
goto error_isp;
}
- if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node) {
- isp->syscon_offset = isp_res_maps[m].syscon_offset;
- isp->phy_type = isp_res_maps[m].phy_type;
- }
-
for (i = 1; i < OMAP3_ISP_IOMEM_CSI2A_REGS1; i++)
isp->mmio_base[i] =
isp->mmio_base[0] + isp_res_maps[m].offset[i];
@@ -2557,18 +2461,24 @@ static int isp_probe(struct platform_device *pdev)
if (ret < 0)
goto error_iommu;
- isp->notifier.bound = isp_subdev_notifier_bound;
- isp->notifier.complete = isp_subdev_notifier_complete;
-
ret = isp_register_entities(isp);
if (ret < 0)
goto error_modules;
+ isp->notifier.bound = isp_subdev_notifier_bound;
+ isp->notifier.complete = isp_subdev_notifier_complete;
+
+ ret = v4l2_async_notifier_register(&isp->v4l2_dev, &isp->notifier);
+ if (ret)
+ goto error_register_entities;
+
isp_core_init(isp, 1);
omap3isp_put(isp);
return 0;
+error_register_entities:
+ isp_unregister_entities(isp);
error_modules:
isp_cleanup_modules(isp);
error_iommu:
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index e579943175c4..5acc2e6511a5 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -17,7 +17,6 @@
#ifndef OMAP3_ISP_CORE_H
#define OMAP3_ISP_CORE_H
-#include <media/omap3isp.h>
#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
#include <linux/clk-provider.h>
@@ -27,6 +26,7 @@
#include <linux/platform_device.h>
#include <linux/wait.h>
+#include "omap3isp.h"
#include "ispstat.h"
#include "ispccdc.h"
#include "ispreg.h"
@@ -101,15 +101,11 @@ struct regmap;
* struct isp_res_mapping - Map ISP io resources to ISP revision.
* @isp_rev: ISP_REVISION_x_x
* @offset: register offsets of various ISP sub-blocks
- * @syscon_offset: offset of the syscon register for 343x / 3630
- * (CONTROL_CSIRXFE / CONTROL_CAMERA_PHY_CTRL, respectively)
- * from the syscon base address
* @phy_type: ISP_PHY_TYPE_{3430,3630}
*/
struct isp_res_mapping {
u32 isp_rev;
u32 offset[OMAP3_ISP_IOMEM_LAST];
- u32 syscon_offset;
u32 phy_type;
};
@@ -184,7 +180,6 @@ struct isp_device {
u32 revision;
/* platform HW resources */
- struct isp_platform_data *pdata;
unsigned int irq_num;
void __iomem *mmio_base[OMAP3_ISP_IOMEM_LAST];
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.h b/drivers/media/platform/omap3isp/ispcsiphy.h
index e17c88beab92..28b63b28f9f7 100644
--- a/drivers/media/platform/omap3isp/ispcsiphy.h
+++ b/drivers/media/platform/omap3isp/ispcsiphy.h
@@ -17,7 +17,7 @@
#ifndef OMAP3_ISP_CSI_PHY_H
#define OMAP3_ISP_CSI_PHY_H
-#include <media/omap3isp.h>
+#include "omap3isp.h"
struct isp_csi2_device;
struct regulator;
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index d285af18df7f..41bb8df91f72 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -1018,8 +1018,7 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
pipe->entities = 0;
- if (video->isp->pdata && video->isp->pdata->set_constraints)
- video->isp->pdata->set_constraints(video->isp, true);
+ /* TODO: Implement PM QoS */
pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
pipe->max_rate = pipe->l3_ick;
@@ -1100,8 +1099,7 @@ err_set_stream:
err_check_format:
media_entity_pipeline_stop(&video->video.entity);
err_pipeline_start:
- if (video->isp->pdata && video->isp->pdata->set_constraints)
- video->isp->pdata->set_constraints(video->isp, false);
+ /* TODO: Implement PM QoS */
/* The DMA queue must be emptied here, otherwise CCDC interrupts that
* will get triggered the next time the CCDC is powered up will try to
* access buffers that might have been freed but still present in the
@@ -1161,8 +1159,7 @@ isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
video->queue = NULL;
video->error = false;
- if (video->isp->pdata && video->isp->pdata->set_constraints)
- video->isp->pdata->set_constraints(video->isp, false);
+ /* TODO: Implement PM QoS */
media_entity_pipeline_stop(&video->video.entity);
done:
diff --git a/drivers/media/platform/omap3isp/omap3isp.h b/drivers/media/platform/omap3isp/omap3isp.h
new file mode 100644
index 000000000000..190e259a6a2d
--- /dev/null
+++ b/drivers/media/platform/omap3isp/omap3isp.h
@@ -0,0 +1,132 @@
+/*
+ * omap3isp.h
+ *
+ * TI OMAP3 ISP - Bus Configuration
+ *
+ * Copyright (C) 2011 Nokia Corporation
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __OMAP3ISP_H__
+#define __OMAP3ISP_H__
+
+enum isp_interface_type {
+ ISP_INTERFACE_PARALLEL,
+ ISP_INTERFACE_CSI2A_PHY2,
+ ISP_INTERFACE_CCP2B_PHY1,
+ ISP_INTERFACE_CCP2B_PHY2,
+ ISP_INTERFACE_CSI2C_PHY1,
+};
+
+/**
+ * struct isp_parallel_cfg - Parallel interface configuration
+ * @data_lane_shift: Data lane shifter
+ * 0 - CAMEXT[13:0] -> CAM[13:0]
+ * 1 - CAMEXT[13:2] -> CAM[11:0]
+ * 2 - CAMEXT[13:4] -> CAM[9:0]
+ * 3 - CAMEXT[13:6] -> CAM[7:0]
+ * @clk_pol: Pixel clock polarity
+ * 0 - Sample on rising edge, 1 - Sample on falling edge
+ * @hs_pol: Horizontal synchronization polarity
+ * 0 - Active high, 1 - Active low
+ * @vs_pol: Vertical synchronization polarity
+ * 0 - Active high, 1 - Active low
+ * @fld_pol: Field signal polarity
+ * 0 - Positive, 1 - Negative
+ * @data_pol: Data polarity
+ * 0 - Normal, 1 - One's complement
+ */
+struct isp_parallel_cfg {
+ unsigned int data_lane_shift:2;
+ unsigned int clk_pol:1;
+ unsigned int hs_pol:1;
+ unsigned int vs_pol:1;
+ unsigned int fld_pol:1;
+ unsigned int data_pol:1;
+};
+
+enum {
+ ISP_CCP2_PHY_DATA_CLOCK = 0,
+ ISP_CCP2_PHY_DATA_STROBE = 1,
+};
+
+enum {
+ ISP_CCP2_MODE_MIPI = 0,
+ ISP_CCP2_MODE_CCP2 = 1,
+};
+
+/**
+ * struct isp_csiphy_lane: CCP2/CSI2 lane position and polarity
+ * @pos: position of the lane
+ * @pol: polarity of the lane
+ */
+struct isp_csiphy_lane {
+ u8 pos;
+ u8 pol;
+};
+
+#define ISP_CSIPHY1_NUM_DATA_LANES 1
+#define ISP_CSIPHY2_NUM_DATA_LANES 2
+
+/**
+ * struct isp_csiphy_lanes_cfg - CCP2/CSI2 lane configuration
+ * @data: Configuration of one or two data lanes
+ * @clk: Clock lane configuration
+ */
+struct isp_csiphy_lanes_cfg {
+ struct isp_csiphy_lane data[ISP_CSIPHY2_NUM_DATA_LANES];
+ struct isp_csiphy_lane clk;
+};
+
+/**
+ * struct isp_ccp2_cfg - CCP2 interface configuration
+ * @strobe_clk_pol: Strobe/clock polarity
+ * 0 - Non Inverted, 1 - Inverted
+ * @crc: Enable the cyclic redundancy check
+ * @ccp2_mode: Enable CCP2 compatibility mode
+ * ISP_CCP2_MODE_MIPI - MIPI-CSI1 mode
+ * ISP_CCP2_MODE_CCP2 - CCP2 mode
+ * @phy_layer: Physical layer selection
+ * ISP_CCP2_PHY_DATA_CLOCK - Data/clock physical layer
+ * ISP_CCP2_PHY_DATA_STROBE - Data/strobe physical layer
+ * @vpclk_div: Video port output clock control
+ */
+struct isp_ccp2_cfg {
+ unsigned int strobe_clk_pol:1;
+ unsigned int crc:1;
+ unsigned int ccp2_mode:1;
+ unsigned int phy_layer:1;
+ unsigned int vpclk_div:2;
+ struct isp_csiphy_lanes_cfg lanecfg;
+};
+
+/**
+ * struct isp_csi2_cfg - CSI2 interface configuration
+ * @crc: Enable the cyclic redundancy check
+ */
+struct isp_csi2_cfg {
+ unsigned crc:1;
+ struct isp_csiphy_lanes_cfg lanecfg;
+};
+
+struct isp_bus_cfg {
+ enum isp_interface_type interface;
+ union {
+ struct isp_parallel_cfg parallel;
+ struct isp_ccp2_cfg ccp2;
+ struct isp_csi2_cfg csi2;
+ } bus; /* gcc < 4.6.0 chokes on anonymous union initializers */
+};
+
+#endif /* __OMAP3ISP_H__ */
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
new file mode 100644
index 000000000000..2973f070d328
--- /dev/null
+++ b/drivers/media/platform/rcar_jpu.c
@@ -0,0 +1,1794 @@
+/*
+ * Author: Mikhail Ulyanov
+ * Copyright (C) 2014-2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ *
+ * This is based on the drivers/media/platform/s5p-jpeg driver by
+ * Andrzej Pietrasiewicz and Jacek Anaszewski.
+ * Some portions of code inspired by VSP1 driver by Laurent Pinchart.
+ *
+ * TODO in order of priority:
+ * 1) Rotation
+ * 2) Cropping
+ * 3) V4L2_CID_JPEG_ACTIVE_MARKER
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+
+#define DRV_NAME "rcar_jpu"
+
+/*
+ * Align JPEG header end to cache line to make sure we will not have any issues
+ * with cache; additionally to requerment (33.3.27 R01UH0501EJ0100 Rev.1.00)
+ */
+#define JPU_JPEG_HDR_SIZE (ALIGN(0x258, L1_CACHE_BYTES))
+#define JPU_JPEG_MAX_BYTES_PER_PIXEL 2 /* 16 bit precision format */
+#define JPU_JPEG_MIN_SIZE 25 /* SOI + SOF + EOI */
+#define JPU_JPEG_QTBL_SIZE 0x40
+#define JPU_JPEG_HDCTBL_SIZE 0x1c
+#define JPU_JPEG_HACTBL_SIZE 0xb2
+#define JPU_JPEG_HEIGHT_OFFSET 0x91
+#define JPU_JPEG_WIDTH_OFFSET 0x93
+#define JPU_JPEG_SUBS_OFFSET 0x97
+#define JPU_JPEG_QTBL_LUM_OFFSET 0x07
+#define JPU_JPEG_QTBL_CHR_OFFSET 0x4c
+#define JPU_JPEG_HDCTBL_LUM_OFFSET 0xa4
+#define JPU_JPEG_HACTBL_LUM_OFFSET 0xc5
+#define JPU_JPEG_HDCTBL_CHR_OFFSET 0x17c
+#define JPU_JPEG_HACTBL_CHR_OFFSET 0x19d
+#define JPU_JPEG_PADDING_OFFSET 0x24f
+#define JPU_JPEG_LUM 0x00
+#define JPU_JPEG_CHR 0x01
+#define JPU_JPEG_DC 0x00
+#define JPU_JPEG_AC 0x10
+
+#define JPU_JPEG_422 0x21
+#define JPU_JPEG_420 0x22
+
+#define JPU_JPEG_DEFAULT_422_PIX_FMT V4L2_PIX_FMT_NV16M
+#define JPU_JPEG_DEFAULT_420_PIX_FMT V4L2_PIX_FMT_NV12M
+
+/* JPEG markers */
+#define TEM 0x01
+#define SOF0 0xc0
+#define RST 0xd0
+#define SOI 0xd8
+#define EOI 0xd9
+#define DHP 0xde
+#define DHT 0xc4
+#define COM 0xfe
+#define DQT 0xdb
+#define DRI 0xdd
+#define APP0 0xe0
+
+#define JPU_RESET_TIMEOUT 100 /* ms */
+#define JPU_JOB_TIMEOUT 300 /* ms */
+#define JPU_MAX_QUALITY 4
+#define JPU_WIDTH_MIN 16
+#define JPU_HEIGHT_MIN 16
+#define JPU_WIDTH_MAX 4096
+#define JPU_HEIGHT_MAX 4096
+#define JPU_MEMALIGN 8
+
+/* Flags that indicate a format can be used for capture/output */
+#define JPU_FMT_TYPE_OUTPUT 0
+#define JPU_FMT_TYPE_CAPTURE 1
+#define JPU_ENC_CAPTURE (1 << 0)
+#define JPU_ENC_OUTPUT (1 << 1)
+#define JPU_DEC_CAPTURE (1 << 2)
+#define JPU_DEC_OUTPUT (1 << 3)
+
+/*
+ * JPEG registers and bits
+ */
+
+/* JPEG code mode register */
+#define JCMOD 0x00
+#define JCMOD_PCTR (1 << 7)
+#define JCMOD_MSKIP_ENABLE (1 << 5)
+#define JCMOD_DSP_ENC (0 << 3)
+#define JCMOD_DSP_DEC (1 << 3)
+#define JCMOD_REDU (7 << 0)
+#define JCMOD_REDU_422 (1 << 0)
+#define JCMOD_REDU_420 (2 << 0)
+
+/* JPEG code command register */
+#define JCCMD 0x04
+#define JCCMD_SRST (1 << 12)
+#define JCCMD_JEND (1 << 2)
+#define JCCMD_JSRT (1 << 0)
+
+/* JPEG code quantanization table number register */
+#define JCQTN 0x0c
+#define JCQTN_SHIFT(t) (((t) - 1) << 1)
+
+/* JPEG code Huffman table number register */
+#define JCHTN 0x10
+#define JCHTN_AC_SHIFT(t) (((t) << 1) - 1)
+#define JCHTN_DC_SHIFT(t) (((t) - 1) << 1)
+
+#define JCVSZU 0x1c /* JPEG code vertical size upper register */
+#define JCVSZD 0x20 /* JPEG code vertical size lower register */
+#define JCHSZU 0x24 /* JPEG code horizontal size upper register */
+#define JCHSZD 0x28 /* JPEG code horizontal size lower register */
+#define JCSZ_MASK 0xff /* JPEG code h/v size register contains only 1 byte*/
+
+#define JCDTCU 0x2c /* JPEG code data count upper register */
+#define JCDTCM 0x30 /* JPEG code data count middle register */
+#define JCDTCD 0x34 /* JPEG code data count lower register */
+
+/* JPEG interrupt enable register */
+#define JINTE 0x38
+#define JINTE_ERR (7 << 5) /* INT5 + INT6 + INT7 */
+#define JINTE_TRANSF_COMPL (1 << 10)
+
+/* JPEG interrupt status register */
+#define JINTS 0x3c
+#define JINTS_MASK 0x7c68
+#define JINTS_ERR (1 << 5)
+#define JINTS_PROCESS_COMPL (1 << 6)
+#define JINTS_TRANSF_COMPL (1 << 10)
+
+#define JCDERR 0x40 /* JPEG code decode error register */
+#define JCDERR_MASK 0xf /* JPEG code decode error register mask*/
+
+/* JPEG interface encoding */
+#define JIFECNT 0x70
+#define JIFECNT_INFT_422 0
+#define JIFECNT_INFT_420 1
+#define JIFECNT_SWAP_WB (3 << 4) /* to JPU */
+
+#define JIFESYA1 0x74 /* encode source Y address register 1 */
+#define JIFESCA1 0x78 /* encode source C address register 1 */
+#define JIFESYA2 0x7c /* encode source Y address register 2 */
+#define JIFESCA2 0x80 /* encode source C address register 2 */
+#define JIFESMW 0x84 /* encode source memory width register */
+#define JIFESVSZ 0x88 /* encode source vertical size register */
+#define JIFESHSZ 0x8c /* encode source horizontal size register */
+#define JIFEDA1 0x90 /* encode destination address register 1 */
+#define JIFEDA2 0x94 /* encode destination address register 2 */
+
+/* JPEG decoding control register */
+#define JIFDCNT 0xa0
+#define JIFDCNT_SWAP_WB (3 << 1) /* from JPU */
+
+#define JIFDSA1 0xa4 /* decode source address register 1 */
+#define JIFDDMW 0xb0 /* decode destination memory width register */
+#define JIFDDVSZ 0xb4 /* decode destination vert. size register */
+#define JIFDDHSZ 0xb8 /* decode destination horiz. size register */
+#define JIFDDYA1 0xbc /* decode destination Y address register 1 */
+#define JIFDDCA1 0xc0 /* decode destination C address register 1 */
+
+#define JCQTBL(n) (0x10000 + (n) * 0x40) /* quantization tables regs */
+#define JCHTBD(n) (0x10100 + (n) * 0x100) /* Huffman table DC regs */
+#define JCHTBA(n) (0x10120 + (n) * 0x100) /* Huffman table AC regs */
+
+/**
+ * struct jpu - JPEG IP abstraction
+ * @mutex: the mutex protecting this structure
+ * @lock: spinlock protecting the device contexts
+ * @v4l2_dev: v4l2 device for mem2mem mode
+ * @vfd_encoder: video device node for encoder mem2mem mode
+ * @vfd_decoder: video device node for decoder mem2mem mode
+ * @m2m_dev: v4l2 mem2mem device data
+ * @curr: pointer to current context
+ * @irq_queue: interrupt handler waitqueue
+ * @regs: JPEG IP registers mapping
+ * @irq: JPEG IP irq
+ * @clk: JPEG IP clock
+ * @dev: JPEG IP struct device
+ * @alloc_ctx: videobuf2 memory allocator's context
+ * @ref_count: reference counter
+ */
+struct jpu {
+ struct mutex mutex;
+ spinlock_t lock;
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd_encoder;
+ struct video_device vfd_decoder;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct jpu_ctx *curr;
+ wait_queue_head_t irq_queue;
+
+ void __iomem *regs;
+ unsigned int irq;
+ struct clk *clk;
+ struct device *dev;
+ void *alloc_ctx;
+ int ref_count;
+};
+
+/**
+ * struct jpu_buffer - driver's specific video buffer
+ * @buf: m2m buffer
+ * @compr_quality: destination image quality in compression mode
+ * @subsampling: source image subsampling in decompression mode
+ */
+struct jpu_buffer {
+ struct v4l2_m2m_buffer buf;
+ unsigned short compr_quality;
+ unsigned char subsampling;
+};
+
+/**
+ * struct jpu_fmt - driver's internal format data
+ * @fourcc: the fourcc code, 0 if not applicable
+ * @colorspace: the colorspace specifier
+ * @bpp: number of bits per pixel per plane
+ * @h_align: horizontal alignment order (align to 2^h_align)
+ * @v_align: vertical alignment order (align to 2^v_align)
+ * @subsampling: (horizontal:4 | vertical:4) subsampling factor
+ * @num_planes: number of planes
+ * @types: types of queue this format is applicable to
+ */
+struct jpu_fmt {
+ u32 fourcc;
+ u32 colorspace;
+ u8 bpp[2];
+ u8 h_align;
+ u8 v_align;
+ u8 subsampling;
+ u8 num_planes;
+ u16 types;
+};
+
+/**
+ * jpu_q_data - parameters of one queue
+ * @fmtinfo: driver-specific format of this queue
+ * @format: multiplanar format of this queue
+ * @sequence: sequence number
+ */
+struct jpu_q_data {
+ struct jpu_fmt *fmtinfo;
+ struct v4l2_pix_format_mplane format;
+ unsigned int sequence;
+};
+
+/**
+ * jpu_ctx - the device context data
+ * @jpu: JPEG IP device for this context
+ * @encoder: compression (encode) operation or decompression (decode)
+ * @compr_quality: destination image quality in compression (encode) mode
+ * @out_q: source (output) queue information
+ * @cap_q: destination (capture) queue information
+ * @fh: file handler
+ * @ctrl_handler: controls handler
+ */
+struct jpu_ctx {
+ struct jpu *jpu;
+ bool encoder;
+ unsigned short compr_quality;
+ struct jpu_q_data out_q;
+ struct jpu_q_data cap_q;
+ struct v4l2_fh fh;
+ struct v4l2_ctrl_handler ctrl_handler;
+};
+
+ /**
+ * jpeg_buffer - description of memory containing input JPEG data
+ * @end: end position in the buffer
+ * @curr: current position in the buffer
+ */
+struct jpeg_buffer {
+ void *end;
+ void *curr;
+};
+
+static struct jpu_fmt jpu_formats[] = {
+ { V4L2_PIX_FMT_JPEG, V4L2_COLORSPACE_JPEG,
+ {0, 0}, 0, 0, 0, 1, JPU_ENC_CAPTURE | JPU_DEC_OUTPUT },
+ { V4L2_PIX_FMT_NV16M, V4L2_COLORSPACE_SRGB,
+ {8, 8}, 2, 2, JPU_JPEG_422, 2, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE },
+ { V4L2_PIX_FMT_NV12M, V4L2_COLORSPACE_SRGB,
+ {8, 4}, 2, 2, JPU_JPEG_420, 2, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE },
+ { V4L2_PIX_FMT_NV16, V4L2_COLORSPACE_SRGB,
+ {16, 0}, 2, 2, JPU_JPEG_422, 1, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE },
+ { V4L2_PIX_FMT_NV12, V4L2_COLORSPACE_SRGB,
+ {12, 0}, 2, 2, JPU_JPEG_420, 1, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE },
+};
+
+static const u8 zigzag[] = {
+ 0x03, 0x02, 0x0b, 0x13, 0x0a, 0x01, 0x00, 0x09,
+ 0x12, 0x1b, 0x23, 0x1a, 0x11, 0x08, 0x07, 0x06,
+ 0x0f, 0x10, 0x19, 0x22, 0x2b, 0x33, 0x2a, 0x21,
+ 0x18, 0x17, 0x0e, 0x05, 0x04, 0x0d, 0x16, 0x1f,
+ 0x20, 0x29, 0x32, 0x3b, 0x3a, 0x31, 0x28, 0x27,
+ 0x1e, 0x15, 0x0e, 0x14, 0x10, 0x26, 0x2f, 0x30,
+ 0x39, 0x38, 0x37, 0x2e, 0x25, 0x1c, 0x24, 0x2b,
+ 0x36, 0x3f, 0x3e, 0x35, 0x2c, 0x34, 0x3d, 0x3c
+};
+
+#define QTBL_SIZE (ALIGN(JPU_JPEG_QTBL_SIZE, \
+ sizeof(unsigned int)) / sizeof(unsigned int))
+#define HDCTBL_SIZE (ALIGN(JPU_JPEG_HDCTBL_SIZE, \
+ sizeof(unsigned int)) / sizeof(unsigned int))
+#define HACTBL_SIZE (ALIGN(JPU_JPEG_HACTBL_SIZE, \
+ sizeof(unsigned int)) / sizeof(unsigned int))
+/*
+ * Start of image; Quantization tables
+ * SOF0 (17 bytes payload) is Baseline DCT - Sample precision, height, width,
+ * Number of image components, (Ci:8 - Hi:4 - Vi:4 - Tq:8) * 3 - Y,Cb,Cr;
+ * Huffman tables; Padding with 0xff (33.3.27 R01UH0501EJ0100 Rev.1.00)
+ */
+#define JPU_JPEG_HDR_BLOB { \
+ 0xff, SOI, 0xff, DQT, 0x00, JPU_JPEG_QTBL_SIZE + 0x3, JPU_JPEG_LUM, \
+ [JPU_JPEG_QTBL_LUM_OFFSET ... \
+ JPU_JPEG_QTBL_LUM_OFFSET + JPU_JPEG_QTBL_SIZE - 1] = 0x00, \
+ 0xff, DQT, 0x00, JPU_JPEG_QTBL_SIZE + 0x3, JPU_JPEG_CHR, \
+ [JPU_JPEG_QTBL_CHR_OFFSET ... JPU_JPEG_QTBL_CHR_OFFSET + \
+ JPU_JPEG_QTBL_SIZE - 1] = 0x00, 0xff, SOF0, 0x00, 0x11, 0x08, \
+ [JPU_JPEG_HEIGHT_OFFSET ... JPU_JPEG_HEIGHT_OFFSET + 1] = 0x00, \
+ [JPU_JPEG_WIDTH_OFFSET ... JPU_JPEG_WIDTH_OFFSET + 1] = 0x00, \
+ 0x03, 0x01, [JPU_JPEG_SUBS_OFFSET] = 0x00, JPU_JPEG_LUM, \
+ 0x02, 0x11, JPU_JPEG_CHR, 0x03, 0x11, JPU_JPEG_CHR, \
+ 0xff, DHT, 0x00, JPU_JPEG_HDCTBL_SIZE + 0x3, JPU_JPEG_LUM|JPU_JPEG_DC, \
+ [JPU_JPEG_HDCTBL_LUM_OFFSET ... \
+ JPU_JPEG_HDCTBL_LUM_OFFSET + JPU_JPEG_HDCTBL_SIZE - 1] = 0x00, \
+ 0xff, DHT, 0x00, JPU_JPEG_HACTBL_SIZE + 0x3, JPU_JPEG_LUM|JPU_JPEG_AC, \
+ [JPU_JPEG_HACTBL_LUM_OFFSET ... \
+ JPU_JPEG_HACTBL_LUM_OFFSET + JPU_JPEG_HACTBL_SIZE - 1] = 0x00, \
+ 0xff, DHT, 0x00, JPU_JPEG_HDCTBL_SIZE + 0x3, JPU_JPEG_CHR|JPU_JPEG_DC, \
+ [JPU_JPEG_HDCTBL_CHR_OFFSET ... \
+ JPU_JPEG_HDCTBL_CHR_OFFSET + JPU_JPEG_HDCTBL_SIZE - 1] = 0x00, \
+ 0xff, DHT, 0x00, JPU_JPEG_HACTBL_SIZE + 0x3, JPU_JPEG_CHR|JPU_JPEG_AC, \
+ [JPU_JPEG_HACTBL_CHR_OFFSET ... \
+ JPU_JPEG_HACTBL_CHR_OFFSET + JPU_JPEG_HACTBL_SIZE - 1] = 0x00, \
+ [JPU_JPEG_PADDING_OFFSET ... JPU_JPEG_HDR_SIZE - 1] = 0xff \
+}
+
+static unsigned char jpeg_hdrs[JPU_MAX_QUALITY][JPU_JPEG_HDR_SIZE] = {
+ [0 ... JPU_MAX_QUALITY - 1] = JPU_JPEG_HDR_BLOB
+};
+
+static const unsigned int qtbl_lum[JPU_MAX_QUALITY][QTBL_SIZE] = {
+ {
+ 0x14101927, 0x322e3e44, 0x10121726, 0x26354144,
+ 0x19171f26, 0x35414444, 0x27262635, 0x41444444,
+ 0x32263541, 0x44444444, 0x2e354144, 0x44444444,
+ 0x3e414444, 0x44444444, 0x44444444, 0x44444444
+ },
+ {
+ 0x100b0b10, 0x171b1f1e, 0x0b0c0c0f, 0x1417171e,
+ 0x0b0c0d10, 0x171a232f, 0x100f1017, 0x1a252f40,
+ 0x1714171a, 0x27334040, 0x1b171a25, 0x33404040,
+ 0x1f17232f, 0x40404040, 0x1e1e2f40, 0x40404040
+ },
+ {
+ 0x0c08080c, 0x11151817, 0x0809090b, 0x0f131217,
+ 0x08090a0c, 0x13141b24, 0x0c0b0c15, 0x141c2435,
+ 0x110f1314, 0x1e27333b, 0x1513141c, 0x27333b3b,
+ 0x18121b24, 0x333b3b3b, 0x17172435, 0x3b3b3b3b
+ },
+ {
+ 0x08060608, 0x0c0e1011, 0x06060608, 0x0a0d0c0f,
+ 0x06060708, 0x0d0e1218, 0x0808080e, 0x0d131823,
+ 0x0c0a0d0d, 0x141a2227, 0x0e0d0e13, 0x1a222727,
+ 0x100c1318, 0x22272727, 0x110f1823, 0x27272727
+ }
+};
+
+static const unsigned int qtbl_chr[JPU_MAX_QUALITY][QTBL_SIZE] = {
+ {
+ 0x15192026, 0x36444444, 0x191c1826, 0x36444444,
+ 0x2018202b, 0x42444444, 0x26262b35, 0x44444444,
+ 0x36424444, 0x44444444, 0x44444444, 0x44444444,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444
+ },
+ {
+ 0x110f1115, 0x141a2630, 0x0f131211, 0x141a232b,
+ 0x11121416, 0x1a1e2e35, 0x1511161c, 0x1e273540,
+ 0x14141a1e, 0x27304040, 0x1a1a1e27, 0x303f4040,
+ 0x26232e35, 0x40404040, 0x302b3540, 0x40404040
+ },
+ {
+ 0x0d0b0d10, 0x14141d25, 0x0b0e0e0e, 0x10141a20,
+ 0x0d0e0f11, 0x14172328, 0x100e1115, 0x171e2832,
+ 0x14101417, 0x1e25323b, 0x1414171e, 0x25303b3b,
+ 0x1d1a2328, 0x323b3b3b, 0x25202832, 0x3b3b3b3b
+ },
+ {
+ 0x0908090b, 0x0e111318, 0x080a090b, 0x0e0d1116,
+ 0x09090d0e, 0x0d0f171a, 0x0b0b0e0e, 0x0f141a21,
+ 0x0e0e0d0f, 0x14182127, 0x110d0f14, 0x18202727,
+ 0x1311171a, 0x21272727, 0x18161a21, 0x27272727
+ }
+};
+
+static const unsigned int hdctbl_lum[HDCTBL_SIZE] = {
+ 0x00010501, 0x01010101, 0x01000000, 0x00000000,
+ 0x00010203, 0x04050607, 0x08090a0b
+};
+
+static const unsigned int hdctbl_chr[HDCTBL_SIZE] = {
+ 0x00010501, 0x01010101, 0x01000000, 0x00000000,
+ 0x00010203, 0x04050607, 0x08090a0b
+};
+
+static const unsigned int hactbl_lum[HACTBL_SIZE] = {
+ 0x00020103, 0x03020403, 0x05050404, 0x0000017d, 0x01020300, 0x04110512,
+ 0x21314106, 0x13516107, 0x22711432, 0x8191a108, 0x2342b1c1, 0x1552d1f0,
+ 0x24336272, 0x82090a16, 0x1718191a, 0x25262728, 0x292a3435, 0x36373839,
+ 0x3a434445, 0x46474849, 0x4a535455, 0x56575859, 0x5a636465, 0x66676869,
+ 0x6a737475, 0x76777879, 0x7a838485, 0x86878889, 0x8a929394, 0x95969798,
+ 0x999aa2a3, 0xa4a5a6a7, 0xa8a9aab2, 0xb3b4b5b6, 0xb7b8b9ba, 0xc2c3c4c5,
+ 0xc6c7c8c9, 0xcad2d3d4, 0xd5d6d7d8, 0xd9dae1e2, 0xe3e4e5e6, 0xe7e8e9ea,
+ 0xf1f2f3f4, 0xf5f6f7f8, 0xf9fa0000
+};
+
+static const unsigned int hactbl_chr[HACTBL_SIZE] = {
+ 0x00020103, 0x03020403, 0x05050404, 0x0000017d, 0x01020300, 0x04110512,
+ 0x21314106, 0x13516107, 0x22711432, 0x8191a108, 0x2342b1c1, 0x1552d1f0,
+ 0x24336272, 0x82090a16, 0x1718191a, 0x25262728, 0x292a3435, 0x36373839,
+ 0x3a434445, 0x46474849, 0x4a535455, 0x56575859, 0x5a636465, 0x66676869,
+ 0x6a737475, 0x76777879, 0x7a838485, 0x86878889, 0x8a929394, 0x95969798,
+ 0x999aa2a3, 0xa4a5a6a7, 0xa8a9aab2, 0xb3b4b5b6, 0xb7b8b9ba, 0xc2c3c4c5,
+ 0xc6c7c8c9, 0xcad2d3d4, 0xd5d6d7d8, 0xd9dae1e2, 0xe3e4e5e6, 0xe7e8e9ea,
+ 0xf1f2f3f4, 0xf5f6f7f8, 0xf9fa0000
+};
+
+static const char *error_to_text[16] = {
+ "Normal",
+ "SOI not detected",
+ "SOF1 to SOFF detected",
+ "Subsampling not detected",
+ "SOF accuracy error",
+ "DQT accuracy error",
+ "Component error 1",
+ "Component error 2",
+ "SOF0, DQT, and DHT not detected when SOS detected",
+ "SOS not detected",
+ "EOI not detected",
+ "Restart interval data number error detected",
+ "Image size error",
+ "Last MCU data number error",
+ "Block data number error",
+ "Unknown"
+};
+
+static struct jpu_buffer *vb2_to_jpu_buffer(struct vb2_buffer *vb)
+{
+ struct v4l2_m2m_buffer *b =
+ container_of(vb, struct v4l2_m2m_buffer, vb);
+
+ return container_of(b, struct jpu_buffer, buf);
+}
+
+static u32 jpu_read(struct jpu *jpu, unsigned int reg)
+{
+ return ioread32(jpu->regs + reg);
+}
+
+static void jpu_write(struct jpu *jpu, u32 val, unsigned int reg)
+{
+ iowrite32(val, jpu->regs + reg);
+}
+
+static struct jpu_ctx *ctrl_to_ctx(struct v4l2_ctrl *c)
+{
+ return container_of(c->handler, struct jpu_ctx, ctrl_handler);
+}
+
+static struct jpu_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct jpu_ctx, fh);
+}
+
+static void jpu_set_tbl(struct jpu *jpu, u32 reg, const unsigned int *tbl,
+ unsigned int len) {
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ jpu_write(jpu, tbl[i], reg + (i << 2));
+}
+
+static void jpu_set_qtbl(struct jpu *jpu, unsigned short quality)
+{
+ jpu_set_tbl(jpu, JCQTBL(0), qtbl_lum[quality], QTBL_SIZE);
+ jpu_set_tbl(jpu, JCQTBL(1), qtbl_chr[quality], QTBL_SIZE);
+}
+
+static void jpu_set_htbl(struct jpu *jpu)
+{
+ jpu_set_tbl(jpu, JCHTBD(0), hdctbl_lum, HDCTBL_SIZE);
+ jpu_set_tbl(jpu, JCHTBA(0), hactbl_lum, HACTBL_SIZE);
+ jpu_set_tbl(jpu, JCHTBD(1), hdctbl_chr, HDCTBL_SIZE);
+ jpu_set_tbl(jpu, JCHTBA(1), hactbl_chr, HACTBL_SIZE);
+}
+
+static int jpu_wait_reset(struct jpu *jpu)
+{
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(JPU_RESET_TIMEOUT);
+
+ while (jpu_read(jpu, JCCMD) & JCCMD_SRST) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(jpu->dev, "timed out in reset\n");
+ return -ETIMEDOUT;
+ }
+ schedule();
+ }
+
+ return 0;
+}
+
+static int jpu_reset(struct jpu *jpu)
+{
+ jpu_write(jpu, JCCMD_SRST, JCCMD);
+ return jpu_wait_reset(jpu);
+}
+
+/*
+ * ============================================================================
+ * video ioctl operations
+ * ============================================================================
+ */
+static void put_qtbl(u8 *p, const u8 *qtbl)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(zigzag); i++)
+ p[i] = *(qtbl + zigzag[i]);
+}
+
+static void put_htbl(u8 *p, const u8 *htbl, unsigned int len)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < len; i += 4)
+ for (j = 0; j < 4 && (i + j) < len; ++j)
+ p[i + j] = htbl[i + 3 - j];
+}
+
+static void jpu_generate_hdr(unsigned short quality, unsigned char *p)
+{
+ put_qtbl(p + JPU_JPEG_QTBL_LUM_OFFSET, (const u8 *)qtbl_lum[quality]);
+ put_qtbl(p + JPU_JPEG_QTBL_CHR_OFFSET, (const u8 *)qtbl_chr[quality]);
+
+ put_htbl(p + JPU_JPEG_HDCTBL_LUM_OFFSET, (const u8 *)hdctbl_lum,
+ JPU_JPEG_HDCTBL_SIZE);
+ put_htbl(p + JPU_JPEG_HACTBL_LUM_OFFSET, (const u8 *)hactbl_lum,
+ JPU_JPEG_HACTBL_SIZE);
+
+ put_htbl(p + JPU_JPEG_HDCTBL_CHR_OFFSET, (const u8 *)hdctbl_chr,
+ JPU_JPEG_HDCTBL_SIZE);
+ put_htbl(p + JPU_JPEG_HACTBL_CHR_OFFSET, (const u8 *)hactbl_chr,
+ JPU_JPEG_HACTBL_SIZE);
+}
+
+static int get_byte(struct jpeg_buffer *buf)
+{
+ if (buf->curr >= buf->end)
+ return -1;
+
+ return *(u8 *)buf->curr++;
+}
+
+static int get_word_be(struct jpeg_buffer *buf, unsigned int *word)
+{
+ if (buf->end - buf->curr < 2)
+ return -1;
+
+ *word = get_unaligned_be16(buf->curr);
+ buf->curr += 2;
+
+ return 0;
+}
+
+static void skip(struct jpeg_buffer *buf, unsigned long len)
+{
+ buf->curr += min((unsigned long)(buf->end - buf->curr), len);
+}
+
+static u8 jpu_parse_hdr(void *buffer, unsigned long size, unsigned int *width,
+ unsigned int *height)
+{
+ struct jpeg_buffer jpeg_buffer;
+ unsigned int word;
+ bool soi = false;
+
+ jpeg_buffer.end = buffer + size;
+ jpeg_buffer.curr = buffer;
+
+ /*
+ * basic size check and EOI - we don't want to let JPU cross
+ * buffer bounds in any case. Hope it's stopping by EOI.
+ */
+ if (size < JPU_JPEG_MIN_SIZE || *(u8 *)(buffer + size - 1) != EOI)
+ return 0;
+
+ for (;;) {
+ int c;
+
+ /* skip preceding filler bytes */
+ do
+ c = get_byte(&jpeg_buffer);
+ while (c == 0xff || c == 0);
+
+ if (!soi && c == SOI) {
+ soi = true;
+ continue;
+ } else if (soi != (c != SOI))
+ return 0;
+
+ switch (c) {
+ case SOF0: /* SOF0: baseline JPEG */
+ skip(&jpeg_buffer, 3); /* segment length and bpp */
+ if (get_word_be(&jpeg_buffer, height) ||
+ get_word_be(&jpeg_buffer, width) ||
+ get_byte(&jpeg_buffer) != 3) /* YCbCr only */
+ return 0;
+
+ skip(&jpeg_buffer, 1);
+ return get_byte(&jpeg_buffer);
+ case DHT:
+ case DQT:
+ case COM:
+ case DRI:
+ case APP0 ... APP0 + 0x0f:
+ if (get_word_be(&jpeg_buffer, &word))
+ return 0;
+ skip(&jpeg_buffer, (long)word - 2);
+ case 0:
+ break;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static int jpu_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->encoder)
+ strlcpy(cap->card, DRV_NAME " encoder", sizeof(cap->card));
+ else
+ strlcpy(cap->card, DRV_NAME " decoder", sizeof(cap->card));
+
+ strlcpy(cap->driver, DRV_NAME, sizeof(cap->driver));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(ctx->jpu->dev));
+ cap->device_caps |= V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
+ cap->capabilities = V4L2_CAP_DEVICE_CAPS | cap->device_caps;
+ memset(cap->reserved, 0, sizeof(cap->reserved));
+
+ return 0;
+}
+
+static struct jpu_fmt *jpu_find_format(bool encoder, u32 pixelformat,
+ unsigned int fmt_type)
+{
+ unsigned int i, fmt_flag;
+
+ if (encoder)
+ fmt_flag = fmt_type == JPU_FMT_TYPE_OUTPUT ? JPU_ENC_OUTPUT :
+ JPU_ENC_CAPTURE;
+ else
+ fmt_flag = fmt_type == JPU_FMT_TYPE_OUTPUT ? JPU_DEC_OUTPUT :
+ JPU_DEC_CAPTURE;
+
+ for (i = 0; i < ARRAY_SIZE(jpu_formats); i++) {
+ struct jpu_fmt *fmt = &jpu_formats[i];
+
+ if (fmt->fourcc == pixelformat && fmt->types & fmt_flag)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static int jpu_enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+ unsigned int i, num = 0;
+
+ for (i = 0; i < ARRAY_SIZE(jpu_formats); ++i) {
+ if (jpu_formats[i].types & type) {
+ if (num == f->index)
+ break;
+ ++num;
+ }
+ }
+
+ if (i >= ARRAY_SIZE(jpu_formats))
+ return -EINVAL;
+
+ f->pixelformat = jpu_formats[i].fourcc;
+
+ return 0;
+}
+
+static int jpu_enum_fmt_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+
+ return jpu_enum_fmt(f, ctx->encoder ? JPU_ENC_CAPTURE :
+ JPU_DEC_CAPTURE);
+}
+
+static int jpu_enum_fmt_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+
+ return jpu_enum_fmt(f, ctx->encoder ? JPU_ENC_OUTPUT : JPU_DEC_OUTPUT);
+}
+
+static struct jpu_q_data *jpu_get_q_data(struct jpu_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->out_q;
+ else
+ return &ctx->cap_q;
+}
+
+static void jpu_bound_align_image(u32 *w, unsigned int w_min,
+ unsigned int w_max, unsigned int w_align,
+ u32 *h, unsigned int h_min,
+ unsigned int h_max, unsigned int h_align)
+{
+ unsigned int width, height, w_step, h_step;
+
+ width = *w;
+ height = *h;
+
+ w_step = 1U << w_align;
+ h_step = 1U << h_align;
+ v4l_bound_align_image(w, w_min, w_max, w_align, h, h_min, h_max,
+ h_align, 3);
+
+ if (*w < width && *w + w_step < w_max)
+ *w += w_step;
+ if (*h < height && *h + h_step < h_max)
+ *h += h_step;
+}
+
+static int __jpu_try_fmt(struct jpu_ctx *ctx, struct jpu_fmt **fmtinfo,
+ struct v4l2_pix_format_mplane *pix,
+ enum v4l2_buf_type type)
+{
+ struct jpu_fmt *fmt;
+ unsigned int f_type, w, h;
+
+ f_type = V4L2_TYPE_IS_OUTPUT(type) ? JPU_FMT_TYPE_OUTPUT :
+ JPU_FMT_TYPE_CAPTURE;
+
+ fmt = jpu_find_format(ctx->encoder, pix->pixelformat, f_type);
+ if (!fmt) {
+ unsigned int pixelformat;
+
+ dev_dbg(ctx->jpu->dev, "unknown format; set default format\n");
+ if (ctx->encoder)
+ pixelformat = f_type == JPU_FMT_TYPE_OUTPUT ?
+ V4L2_PIX_FMT_NV16M : V4L2_PIX_FMT_JPEG;
+ else
+ pixelformat = f_type == JPU_FMT_TYPE_CAPTURE ?
+ V4L2_PIX_FMT_NV16M : V4L2_PIX_FMT_JPEG;
+ fmt = jpu_find_format(ctx->encoder, pixelformat, f_type);
+ }
+
+ pix->pixelformat = fmt->fourcc;
+ pix->colorspace = fmt->colorspace;
+ pix->field = V4L2_FIELD_NONE;
+ pix->num_planes = fmt->num_planes;
+ memset(pix->reserved, 0, sizeof(pix->reserved));
+
+ jpu_bound_align_image(&pix->width, JPU_WIDTH_MIN, JPU_WIDTH_MAX,
+ fmt->h_align, &pix->height, JPU_HEIGHT_MIN,
+ JPU_HEIGHT_MAX, fmt->v_align);
+
+ w = pix->width;
+ h = pix->height;
+
+ if (fmt->fourcc == V4L2_PIX_FMT_JPEG) {
+ /* ignore userspaces's sizeimage for encoding */
+ if (pix->plane_fmt[0].sizeimage <= 0 || ctx->encoder)
+ pix->plane_fmt[0].sizeimage = JPU_JPEG_HDR_SIZE +
+ (JPU_JPEG_MAX_BYTES_PER_PIXEL * w * h);
+ pix->plane_fmt[0].bytesperline = 0;
+ memset(pix->plane_fmt[0].reserved, 0,
+ sizeof(pix->plane_fmt[0].reserved));
+ } else {
+ unsigned int i, bpl = 0;
+
+ for (i = 0; i < pix->num_planes; ++i)
+ bpl = max(bpl, pix->plane_fmt[i].bytesperline);
+
+ bpl = clamp_t(unsigned int, bpl, w, JPU_WIDTH_MAX);
+ bpl = round_up(bpl, JPU_MEMALIGN);
+
+ for (i = 0; i < pix->num_planes; ++i) {
+ pix->plane_fmt[i].bytesperline = bpl;
+ pix->plane_fmt[i].sizeimage = bpl * h * fmt->bpp[i] / 8;
+ memset(pix->plane_fmt[i].reserved, 0,
+ sizeof(pix->plane_fmt[i].reserved));
+ }
+ }
+
+ if (fmtinfo)
+ *fmtinfo = fmt;
+
+ return 0;
+}
+
+static int jpu_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+
+ if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
+ return -EINVAL;
+
+ return __jpu_try_fmt(ctx, NULL, &f->fmt.pix_mp, f->type);
+}
+
+static int jpu_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ struct jpu_fmt *fmtinfo;
+ struct jpu_q_data *q_data;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->jpu->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = __jpu_try_fmt(ctx, &fmtinfo, &f->fmt.pix_mp, f->type);
+ if (ret < 0)
+ return ret;
+
+ q_data = jpu_get_q_data(ctx, f->type);
+
+ q_data->format = f->fmt.pix_mp;
+ q_data->fmtinfo = fmtinfo;
+
+ return 0;
+}
+
+static int jpu_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct jpu_q_data *q_data;
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+
+ if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
+ return -EINVAL;
+
+ q_data = jpu_get_q_data(ctx, f->type);
+ f->fmt.pix_mp = q_data->format;
+
+ return 0;
+}
+
+/*
+ * V4L2 controls
+ */
+static int jpu_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct jpu_ctx *ctx = ctrl_to_ctx(ctrl);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->jpu->lock, flags);
+ if (ctrl->id == V4L2_CID_JPEG_COMPRESSION_QUALITY)
+ ctx->compr_quality = ctrl->val;
+ spin_unlock_irqrestore(&ctx->jpu->lock, flags);
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops jpu_ctrl_ops = {
+ .s_ctrl = jpu_s_ctrl,
+};
+
+static int jpu_streamon(struct file *file, void *priv, enum v4l2_buf_type type)
+{
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+ struct jpu_q_data *src_q_data, *dst_q_data, *orig, adj, *ref;
+ enum v4l2_buf_type adj_type;
+
+ src_q_data = jpu_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ dst_q_data = jpu_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ if (ctx->encoder) {
+ adj = *src_q_data;
+ orig = src_q_data;
+ ref = dst_q_data;
+ adj_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ } else {
+ adj = *dst_q_data;
+ orig = dst_q_data;
+ ref = src_q_data;
+ adj_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ }
+
+ adj.format.width = ref->format.width;
+ adj.format.height = ref->format.height;
+
+ __jpu_try_fmt(ctx, NULL, &adj.format, adj_type);
+
+ if (adj.format.width != orig->format.width ||
+ adj.format.height != orig->format.height) {
+ dev_err(ctx->jpu->dev, "src and dst formats do not match.\n");
+ /* maybe we can return -EPIPE here? */
+ return -EINVAL;
+ }
+
+ return v4l2_m2m_streamon(file, ctx->fh.m2m_ctx, type);
+}
+
+static const struct v4l2_ioctl_ops jpu_ioctl_ops = {
+ .vidioc_querycap = jpu_querycap,
+
+ .vidioc_enum_fmt_vid_cap_mplane = jpu_enum_fmt_cap,
+ .vidioc_enum_fmt_vid_out_mplane = jpu_enum_fmt_out,
+ .vidioc_g_fmt_vid_cap_mplane = jpu_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = jpu_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = jpu_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = jpu_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = jpu_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = jpu_s_fmt,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = jpu_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe
+};
+
+static int jpu_controls_create(struct jpu_ctx *ctx)
+{
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, 1);
+
+ ctrl = v4l2_ctrl_new_std(&ctx->ctrl_handler, &jpu_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY,
+ 0, JPU_MAX_QUALITY - 1, 1, 0);
+
+ if (ctx->ctrl_handler.error) {
+ ret = ctx->ctrl_handler.error;
+ goto error_free;
+ }
+
+ if (!ctx->encoder)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_READ_ONLY;
+
+ ret = v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+ if (ret < 0)
+ goto error_free;
+
+ return 0;
+
+error_free:
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return ret;
+}
+
+/*
+ * ============================================================================
+ * Queue operations
+ * ============================================================================
+ */
+static int jpu_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct jpu_ctx *ctx = vb2_get_drv_priv(vq);
+ struct jpu_q_data *q_data;
+ unsigned int i;
+
+ q_data = jpu_get_q_data(ctx, vq->type);
+
+ *nplanes = q_data->format.num_planes;
+
+ for (i = 0; i < *nplanes; i++) {
+ unsigned int q_size = q_data->format.plane_fmt[i].sizeimage;
+ unsigned int f_size = fmt ?
+ fmt->fmt.pix_mp.plane_fmt[i].sizeimage : 0;
+
+ if (fmt && f_size < q_size)
+ return -EINVAL;
+
+ sizes[i] = fmt ? f_size : q_size;
+ alloc_ctxs[i] = ctx->jpu->alloc_ctx;
+ }
+
+ return 0;
+}
+
+static int jpu_buf_prepare(struct vb2_buffer *vb)
+{
+ struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct jpu_q_data *q_data;
+ unsigned int i;
+
+ q_data = jpu_get_q_data(ctx, vb->vb2_queue->type);
+
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vb->v4l2_buf.field == V4L2_FIELD_ANY)
+ vb->v4l2_buf.field = V4L2_FIELD_NONE;
+ if (vb->v4l2_buf.field != V4L2_FIELD_NONE) {
+ dev_err(ctx->jpu->dev, "%s field isn't supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < q_data->format.num_planes; i++) {
+ unsigned long size = q_data->format.plane_fmt[i].sizeimage;
+
+ if (vb2_plane_size(vb, i) < size) {
+ dev_err(ctx->jpu->dev,
+ "%s: data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+
+ /* decoder capture queue */
+ if (!ctx->encoder && !V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type))
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ return 0;
+}
+
+static void jpu_buf_queue(struct vb2_buffer *vb)
+{
+ struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (!ctx->encoder && V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vb);
+ struct jpu_q_data *q_data, adjust;
+ void *buffer = vb2_plane_vaddr(vb, 0);
+ unsigned long buf_size = vb2_get_plane_payload(vb, 0);
+ unsigned int width, height;
+
+ u8 subsampling = jpu_parse_hdr(buffer, buf_size, &width,
+ &height);
+
+ /* check if JPEG data basic parsing was successful */
+ if (subsampling != JPU_JPEG_422 && subsampling != JPU_JPEG_420)
+ goto format_error;
+
+ q_data = &ctx->out_q;
+
+ adjust = *q_data;
+ adjust.format.width = width;
+ adjust.format.height = height;
+
+ __jpu_try_fmt(ctx, &adjust.fmtinfo, &adjust.format,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ if (adjust.format.width != q_data->format.width ||
+ adjust.format.height != q_data->format.height)
+ goto format_error;
+
+ /*
+ * keep subsampling in buffer to check it
+ * for compatibility in device_run
+ */
+ jpu_buf->subsampling = subsampling;
+ }
+
+ if (ctx->fh.m2m_ctx)
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+
+ return;
+
+format_error:
+ dev_err(ctx->jpu->dev, "incompatible or corrupted JPEG data\n");
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+}
+
+static void jpu_buf_finish(struct vb2_buffer *vb)
+{
+ struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vb);
+ struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct jpu_q_data *q_data = &ctx->out_q;
+ enum v4l2_buf_type type = vb->vb2_queue->type;
+ u8 *buffer;
+
+ if (vb->state == VB2_BUF_STATE_DONE)
+ vb->v4l2_buf.sequence = jpu_get_q_data(ctx, type)->sequence++;
+
+ if (!ctx->encoder || vb->state != VB2_BUF_STATE_DONE ||
+ V4L2_TYPE_IS_OUTPUT(type))
+ return;
+
+ buffer = vb2_plane_vaddr(vb, 0);
+
+ memcpy(buffer, jpeg_hdrs[jpu_buf->compr_quality], JPU_JPEG_HDR_SIZE);
+ *(u16 *)(buffer + JPU_JPEG_HEIGHT_OFFSET) =
+ cpu_to_be16(q_data->format.height);
+ *(u16 *)(buffer + JPU_JPEG_WIDTH_OFFSET) =
+ cpu_to_be16(q_data->format.width);
+ *(buffer + JPU_JPEG_SUBS_OFFSET) = q_data->fmtinfo->subsampling;
+}
+
+static int jpu_start_streaming(struct vb2_queue *vq, unsigned count)
+{
+ struct jpu_ctx *ctx = vb2_get_drv_priv(vq);
+ struct jpu_q_data *q_data = jpu_get_q_data(ctx, vq->type);
+
+ q_data->sequence = 0;
+ return 0;
+}
+
+static void jpu_stop_streaming(struct vb2_queue *vq)
+{
+ struct jpu_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vb2_buffer *vb;
+ unsigned long flags;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (vb == NULL)
+ return;
+ spin_lock_irqsave(&ctx->jpu->lock, flags);
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&ctx->jpu->lock, flags);
+ }
+}
+
+static struct vb2_ops jpu_qops = {
+ .queue_setup = jpu_queue_setup,
+ .buf_prepare = jpu_buf_prepare,
+ .buf_queue = jpu_buf_queue,
+ .buf_finish = jpu_buf_finish,
+ .start_streaming = jpu_start_streaming,
+ .stop_streaming = jpu_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int jpu_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct jpu_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct jpu_buffer);
+ src_vq->ops = &jpu_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->jpu->mutex;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct jpu_buffer);
+ dst_vq->ops = &jpu_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->jpu->mutex;
+
+ return vb2_queue_init(dst_vq);
+}
+
+/*
+ * ============================================================================
+ * Device file operations
+ * ============================================================================
+ */
+static int jpu_open(struct file *file)
+{
+ struct jpu *jpu = video_drvdata(file);
+ struct video_device *vfd = video_devdata(file);
+ struct jpu_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ v4l2_fh_init(&ctx->fh, vfd);
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ ctx->jpu = jpu;
+ ctx->encoder = vfd == &jpu->vfd_encoder;
+
+ __jpu_try_fmt(ctx, &ctx->out_q.fmtinfo, &ctx->out_q.format,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ __jpu_try_fmt(ctx, &ctx->cap_q.fmtinfo, &ctx->cap_q.format,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(jpu->m2m_dev, ctx, jpu_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto v4l_prepare_rollback;
+ }
+
+ ret = jpu_controls_create(ctx);
+ if (ret < 0)
+ goto v4l_prepare_rollback;
+
+ if (mutex_lock_interruptible(&jpu->mutex)) {
+ ret = -ERESTARTSYS;
+ goto v4l_prepare_rollback;
+ }
+
+ if (jpu->ref_count == 0) {
+ ret = clk_prepare_enable(jpu->clk);
+ if (ret < 0)
+ goto device_prepare_rollback;
+ /* ...issue software reset */
+ ret = jpu_reset(jpu);
+ if (ret)
+ goto device_prepare_rollback;
+ }
+
+ jpu->ref_count++;
+
+ mutex_unlock(&jpu->mutex);
+ return 0;
+
+device_prepare_rollback:
+ mutex_unlock(&jpu->mutex);
+v4l_prepare_rollback:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ return ret;
+}
+
+static int jpu_release(struct file *file)
+{
+ struct jpu *jpu = video_drvdata(file);
+ struct jpu_ctx *ctx = fh_to_ctx(file->private_data);
+
+ mutex_lock(&jpu->mutex);
+ if (--jpu->ref_count == 0)
+ clk_disable_unprepare(jpu->clk);
+ mutex_unlock(&jpu->mutex);
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations jpu_fops = {
+ .owner = THIS_MODULE,
+ .open = jpu_open,
+ .release = jpu_release,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = v4l2_m2m_fop_poll,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+/*
+ * ============================================================================
+ * mem2mem callbacks
+ * ============================================================================
+ */
+static void jpu_cleanup(struct jpu_ctx *ctx, bool reset)
+{
+ /* remove current buffers and finish job */
+ struct vb2_buffer *src_buf, *dst_buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->jpu->lock, flags);
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+
+ /* ...and give it a chance on next run */
+ if (reset)
+ jpu_write(ctx->jpu, JCCMD_SRST, JCCMD);
+
+ spin_unlock_irqrestore(&ctx->jpu->lock, flags);
+
+ v4l2_m2m_job_finish(ctx->jpu->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static void jpu_device_run(void *priv)
+{
+ struct jpu_ctx *ctx = priv;
+ struct jpu *jpu = ctx->jpu;
+ struct jpu_buffer *jpu_buf;
+ struct jpu_q_data *q_data;
+ struct vb2_buffer *src_buf, *dst_buf;
+ unsigned int w, h, bpl;
+ unsigned char num_planes, subsampling;
+ unsigned long flags;
+
+ /* ...wait until module reset completes; we have mutex locked here */
+ if (jpu_wait_reset(jpu)) {
+ jpu_cleanup(ctx, true);
+ return;
+ }
+
+ spin_lock_irqsave(&ctx->jpu->lock, flags);
+
+ jpu->curr = ctx;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ if (ctx->encoder) {
+ jpu_buf = vb2_to_jpu_buffer(dst_buf);
+ q_data = &ctx->out_q;
+ } else {
+ jpu_buf = vb2_to_jpu_buffer(src_buf);
+ q_data = &ctx->cap_q;
+ }
+
+ w = q_data->format.width;
+ h = q_data->format.height;
+ bpl = q_data->format.plane_fmt[0].bytesperline;
+ num_planes = q_data->fmtinfo->num_planes;
+ subsampling = q_data->fmtinfo->subsampling;
+
+ if (ctx->encoder) {
+ unsigned long src_1_addr, src_2_addr, dst_addr;
+ unsigned int redu, inft;
+
+ dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ src_1_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ if (num_planes > 1)
+ src_2_addr = vb2_dma_contig_plane_dma_addr(src_buf, 1);
+ else
+ src_2_addr = src_1_addr + w * h;
+
+ jpu_buf->compr_quality = ctx->compr_quality;
+
+ if (subsampling == JPU_JPEG_420) {
+ redu = JCMOD_REDU_420;
+ inft = JIFECNT_INFT_420;
+ } else {
+ redu = JCMOD_REDU_422;
+ inft = JIFECNT_INFT_422;
+ }
+
+ /* only no marker mode works for encoding */
+ jpu_write(jpu, JCMOD_DSP_ENC | JCMOD_PCTR | redu |
+ JCMOD_MSKIP_ENABLE, JCMOD);
+
+ jpu_write(jpu, JIFECNT_SWAP_WB | inft, JIFECNT);
+ jpu_write(jpu, JIFDCNT_SWAP_WB, JIFDCNT);
+ jpu_write(jpu, JINTE_TRANSF_COMPL, JINTE);
+
+ /* Y and C components source addresses */
+ jpu_write(jpu, src_1_addr, JIFESYA1);
+ jpu_write(jpu, src_2_addr, JIFESCA1);
+
+ /* memory width */
+ jpu_write(jpu, bpl, JIFESMW);
+
+ jpu_write(jpu, (w >> 8) & JCSZ_MASK, JCHSZU);
+ jpu_write(jpu, w & JCSZ_MASK, JCHSZD);
+
+ jpu_write(jpu, (h >> 8) & JCSZ_MASK, JCVSZU);
+ jpu_write(jpu, h & JCSZ_MASK, JCVSZD);
+
+ jpu_write(jpu, w, JIFESHSZ);
+ jpu_write(jpu, h, JIFESVSZ);
+
+ jpu_write(jpu, dst_addr + JPU_JPEG_HDR_SIZE, JIFEDA1);
+
+ jpu_write(jpu, 0 << JCQTN_SHIFT(1) | 1 << JCQTN_SHIFT(2) |
+ 1 << JCQTN_SHIFT(3), JCQTN);
+
+ jpu_write(jpu, 0 << JCHTN_AC_SHIFT(1) | 0 << JCHTN_DC_SHIFT(1) |
+ 1 << JCHTN_AC_SHIFT(2) | 1 << JCHTN_DC_SHIFT(2) |
+ 1 << JCHTN_AC_SHIFT(3) | 1 << JCHTN_DC_SHIFT(3),
+ JCHTN);
+
+ jpu_set_qtbl(jpu, ctx->compr_quality);
+ jpu_set_htbl(jpu);
+ } else {
+ unsigned long src_addr, dst_1_addr, dst_2_addr;
+
+ if (jpu_buf->subsampling != subsampling) {
+ dev_err(ctx->jpu->dev,
+ "src and dst formats do not match.\n");
+ spin_unlock_irqrestore(&ctx->jpu->lock, flags);
+ jpu_cleanup(ctx, false);
+ return;
+ }
+
+ src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ dst_1_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ if (q_data->fmtinfo->num_planes > 1)
+ dst_2_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 1);
+ else
+ dst_2_addr = dst_1_addr + w * h;
+
+ /* ...set up decoder operation */
+ jpu_write(jpu, JCMOD_DSP_DEC | JCMOD_PCTR, JCMOD);
+ jpu_write(jpu, JIFECNT_SWAP_WB, JIFECNT);
+ jpu_write(jpu, JIFDCNT_SWAP_WB, JIFDCNT);
+
+ /* ...enable interrupts on transfer completion and d-g error */
+ jpu_write(jpu, JINTE_TRANSF_COMPL | JINTE_ERR, JINTE);
+
+ /* ...set source/destination addresses of encoded data */
+ jpu_write(jpu, src_addr, JIFDSA1);
+ jpu_write(jpu, dst_1_addr, JIFDDYA1);
+ jpu_write(jpu, dst_2_addr, JIFDDCA1);
+
+ jpu_write(jpu, bpl, JIFDDMW);
+ }
+
+ /* ...start encoder/decoder operation */
+ jpu_write(jpu, JCCMD_JSRT, JCCMD);
+
+ spin_unlock_irqrestore(&ctx->jpu->lock, flags);
+}
+
+static int jpu_job_ready(void *priv)
+{
+ return 1;
+}
+
+static void jpu_job_abort(void *priv)
+{
+ struct jpu_ctx *ctx = priv;
+
+ if (!wait_event_timeout(ctx->jpu->irq_queue, !ctx->jpu->curr,
+ msecs_to_jiffies(JPU_JOB_TIMEOUT)))
+ jpu_cleanup(ctx, true);
+}
+
+static struct v4l2_m2m_ops jpu_m2m_ops = {
+ .device_run = jpu_device_run,
+ .job_ready = jpu_job_ready,
+ .job_abort = jpu_job_abort,
+};
+
+/*
+ * ============================================================================
+ * IRQ handler
+ * ============================================================================
+ */
+static irqreturn_t jpu_irq_handler(int irq, void *dev_id)
+{
+ struct jpu *jpu = dev_id;
+ struct jpu_ctx *curr_ctx;
+ struct vb2_buffer *src_buf, *dst_buf;
+ unsigned int int_status;
+
+ int_status = jpu_read(jpu, JINTS);
+
+ /* ...spurious interrupt */
+ if (!((JINTS_TRANSF_COMPL | JINTS_PROCESS_COMPL | JINTS_ERR) &
+ int_status))
+ return IRQ_NONE;
+
+ /* ...clear interrupts */
+ jpu_write(jpu, ~(int_status & JINTS_MASK), JINTS);
+ if (int_status & (JINTS_ERR | JINTS_PROCESS_COMPL))
+ jpu_write(jpu, JCCMD_JEND, JCCMD);
+
+ spin_lock(&jpu->lock);
+
+ if ((int_status & JINTS_PROCESS_COMPL) &&
+ !(int_status & JINTS_TRANSF_COMPL))
+ goto handled;
+
+ curr_ctx = v4l2_m2m_get_curr_priv(jpu->m2m_dev);
+ if (!curr_ctx) {
+ /* ...instance is not running */
+ dev_err(jpu->dev, "no active context for m2m\n");
+ goto handled;
+ }
+
+ src_buf = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
+
+ if (int_status & JINTS_TRANSF_COMPL) {
+ if (curr_ctx->encoder) {
+ unsigned long payload_size = jpu_read(jpu, JCDTCU) << 16
+ | jpu_read(jpu, JCDTCM) << 8
+ | jpu_read(jpu, JCDTCD);
+ vb2_set_plane_payload(dst_buf, 0,
+ payload_size + JPU_JPEG_HDR_SIZE);
+ }
+
+ dst_buf->v4l2_buf.field = src_buf->v4l2_buf.field;
+ dst_buf->v4l2_buf.timestamp = src_buf->v4l2_buf.timestamp;
+ if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_TIMECODE)
+ dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode;
+ dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_buf->v4l2_buf.flags |= src_buf->v4l2_buf.flags &
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_buf->v4l2_buf.flags = src_buf->v4l2_buf.flags &
+ (V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_KEYFRAME |
+ V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME |
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK);
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+ } else if (int_status & JINTS_ERR) {
+ unsigned char error = jpu_read(jpu, JCDERR) & JCDERR_MASK;
+
+ dev_dbg(jpu->dev, "processing error: %#X: %s\n", error,
+ error_to_text[error]);
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+ }
+
+ jpu->curr = NULL;
+
+ /* ...reset JPU after completion */
+ jpu_write(jpu, JCCMD_SRST, JCCMD);
+ spin_unlock(&jpu->lock);
+
+ v4l2_m2m_job_finish(jpu->m2m_dev, curr_ctx->fh.m2m_ctx);
+
+ /* ...wakeup abort routine if needed */
+ wake_up(&jpu->irq_queue);
+
+ return IRQ_HANDLED;
+
+handled:
+ spin_unlock(&jpu->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * ============================================================================
+ * Driver basic infrastructure
+ * ============================================================================
+ */
+static const struct of_device_id jpu_dt_ids[] = {
+ { .compatible = "renesas,jpu-r8a7790" }, /* H2 */
+ { .compatible = "renesas,jpu-r8a7791" }, /* M2-W */
+ { .compatible = "renesas,jpu-r8a7792" }, /* V2H */
+ { .compatible = "renesas,jpu-r8a7793" }, /* M2-N */
+ { },
+};
+MODULE_DEVICE_TABLE(of, jpu_dt_ids);
+
+static int jpu_probe(struct platform_device *pdev)
+{
+ struct jpu *jpu;
+ struct resource *res;
+ int ret;
+ unsigned int i;
+
+ jpu = devm_kzalloc(&pdev->dev, sizeof(*jpu), GFP_KERNEL);
+ if (!jpu)
+ return -ENOMEM;
+
+ init_waitqueue_head(&jpu->irq_queue);
+ mutex_init(&jpu->mutex);
+ spin_lock_init(&jpu->lock);
+ jpu->dev = &pdev->dev;
+
+ /* memory-mapped registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ jpu->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(jpu->regs))
+ return PTR_ERR(jpu->regs);
+
+ /* interrupt service routine registration */
+ jpu->irq = ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot find IRQ\n");
+ return ret;
+ }
+
+ ret = devm_request_irq(&pdev->dev, jpu->irq, jpu_irq_handler, 0,
+ dev_name(&pdev->dev), jpu);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot claim IRQ %d\n", jpu->irq);
+ return ret;
+ }
+
+ /* clocks */
+ jpu->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(jpu->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ return PTR_ERR(jpu->clk);
+ }
+
+ /* v4l2 device */
+ ret = v4l2_device_register(&pdev->dev, &jpu->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+ return ret;
+ }
+
+ /* mem2mem device */
+ jpu->m2m_dev = v4l2_m2m_init(&jpu_m2m_ops);
+ if (IS_ERR(jpu->m2m_dev)) {
+ v4l2_err(&jpu->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(jpu->m2m_dev);
+ goto device_register_rollback;
+ }
+
+ jpu->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(jpu->alloc_ctx)) {
+ v4l2_err(&jpu->v4l2_dev, "Failed to init memory allocator\n");
+ ret = PTR_ERR(jpu->alloc_ctx);
+ goto m2m_init_rollback;
+ }
+
+ /* fill in qantization and Huffman tables for encoder */
+ for (i = 0; i < JPU_MAX_QUALITY; i++)
+ jpu_generate_hdr(i, (unsigned char *)jpeg_hdrs[i]);
+
+ strlcpy(jpu->vfd_encoder.name, DRV_NAME, sizeof(jpu->vfd_encoder.name));
+ jpu->vfd_encoder.fops = &jpu_fops;
+ jpu->vfd_encoder.ioctl_ops = &jpu_ioctl_ops;
+ jpu->vfd_encoder.minor = -1;
+ jpu->vfd_encoder.release = video_device_release_empty;
+ jpu->vfd_encoder.lock = &jpu->mutex;
+ jpu->vfd_encoder.v4l2_dev = &jpu->v4l2_dev;
+ jpu->vfd_encoder.vfl_dir = VFL_DIR_M2M;
+
+ ret = video_register_device(&jpu->vfd_encoder, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(&jpu->v4l2_dev, "Failed to register video device\n");
+ goto vb2_allocator_rollback;
+ }
+
+ video_set_drvdata(&jpu->vfd_encoder, jpu);
+
+ strlcpy(jpu->vfd_decoder.name, DRV_NAME, sizeof(jpu->vfd_decoder.name));
+ jpu->vfd_decoder.fops = &jpu_fops;
+ jpu->vfd_decoder.ioctl_ops = &jpu_ioctl_ops;
+ jpu->vfd_decoder.minor = -1;
+ jpu->vfd_decoder.release = video_device_release_empty;
+ jpu->vfd_decoder.lock = &jpu->mutex;
+ jpu->vfd_decoder.v4l2_dev = &jpu->v4l2_dev;
+ jpu->vfd_decoder.vfl_dir = VFL_DIR_M2M;
+
+ ret = video_register_device(&jpu->vfd_decoder, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(&jpu->v4l2_dev, "Failed to register video device\n");
+ goto enc_vdev_register_rollback;
+ }
+
+ video_set_drvdata(&jpu->vfd_decoder, jpu);
+ platform_set_drvdata(pdev, jpu);
+
+ v4l2_info(&jpu->v4l2_dev, "encoder device registered as /dev/video%d\n",
+ jpu->vfd_encoder.num);
+ v4l2_info(&jpu->v4l2_dev, "decoder device registered as /dev/video%d\n",
+ jpu->vfd_decoder.num);
+
+ return 0;
+
+enc_vdev_register_rollback:
+ video_unregister_device(&jpu->vfd_encoder);
+
+vb2_allocator_rollback:
+ vb2_dma_contig_cleanup_ctx(jpu->alloc_ctx);
+
+m2m_init_rollback:
+ v4l2_m2m_release(jpu->m2m_dev);
+
+device_register_rollback:
+ v4l2_device_unregister(&jpu->v4l2_dev);
+
+ return ret;
+}
+
+static int jpu_remove(struct platform_device *pdev)
+{
+ struct jpu *jpu = platform_get_drvdata(pdev);
+
+ video_unregister_device(&jpu->vfd_decoder);
+ video_unregister_device(&jpu->vfd_encoder);
+ vb2_dma_contig_cleanup_ctx(jpu->alloc_ctx);
+ v4l2_m2m_release(jpu->m2m_dev);
+ v4l2_device_unregister(&jpu->v4l2_dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int jpu_suspend(struct device *dev)
+{
+ struct jpu *jpu = dev_get_drvdata(dev);
+
+ if (jpu->ref_count == 0)
+ return 0;
+
+ clk_disable_unprepare(jpu->clk);
+
+ return 0;
+}
+
+static int jpu_resume(struct device *dev)
+{
+ struct jpu *jpu = dev_get_drvdata(dev);
+
+ if (jpu->ref_count == 0)
+ return 0;
+
+ clk_prepare_enable(jpu->clk);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops jpu_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(jpu_suspend, jpu_resume)
+};
+
+static struct platform_driver jpu_driver = {
+ .probe = jpu_probe,
+ .remove = jpu_remove,
+ .driver = {
+ .of_match_table = jpu_dt_ids,
+ .name = DRV_NAME,
+ .pm = &jpu_pm_ops,
+ },
+};
+
+module_platform_driver(jpu_driver);
+
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_AUTHOR("Mikhail Ulianov <mikhail.ulyanov@cogentembedded.com>");
+MODULE_DESCRIPTION("Renesas R-Car JPEG processing unit driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index bfbf1575677c..9690f9dcb0ca 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -2544,7 +2544,8 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
ret = video_register_device(jpeg->vfd_encoder, VFL_TYPE_GRABBER, -1);
if (ret) {
v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
- goto enc_vdev_alloc_rollback;
+ video_device_release(jpeg->vfd_encoder);
+ goto vb2_allocator_rollback;
}
video_set_drvdata(jpeg->vfd_encoder, jpeg);
@@ -2572,7 +2573,8 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1);
if (ret) {
v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
- goto dec_vdev_alloc_rollback;
+ video_device_release(jpeg->vfd_decoder);
+ goto enc_vdev_register_rollback;
}
video_set_drvdata(jpeg->vfd_decoder, jpeg);
@@ -2589,15 +2591,9 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
return 0;
-dec_vdev_alloc_rollback:
- video_device_release(jpeg->vfd_decoder);
-
enc_vdev_register_rollback:
video_unregister_device(jpeg->vfd_encoder);
-enc_vdev_alloc_rollback:
- video_device_release(jpeg->vfd_encoder);
-
vb2_allocator_rollback:
vb2_dma_contig_cleanup_ctx(jpeg->alloc_ctx);
@@ -2622,9 +2618,7 @@ static int s5p_jpeg_remove(struct platform_device *pdev)
pm_runtime_disable(jpeg->dev);
video_unregister_device(jpeg->vfd_decoder);
- video_device_release(jpeg->vfd_decoder);
video_unregister_device(jpeg->vfd_encoder);
- video_device_release(jpeg->vfd_encoder);
vb2_dma_contig_cleanup_ctx(jpeg->alloc_ctx);
v4l2_m2m_release(jpeg->m2m_dev);
v4l2_device_unregister(&jpeg->v4l2_dev);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
index f17609669b96..b1b149151d2d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
@@ -37,8 +37,12 @@ static int s5p_mfc_sys_init_cmd_v6(struct s5p_mfc_dev *dev)
{
struct s5p_mfc_cmd_args h2r_args;
struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
+ int ret;
+
+ ret = s5p_mfc_hw_call(dev->mfc_ops, alloc_dev_context_buffer, dev);
+ if (ret)
+ return ret;
- s5p_mfc_hw_call(dev->mfc_ops, alloc_dev_context_buffer, dev);
mfc_write(dev, dev->ctx_buf.dma, S5P_FIMV_CONTEXT_MEM_ADDR_V6);
mfc_write(dev, buf_size->dev_ctx, S5P_FIMV_CONTEXT_MEM_SIZE_V6);
return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SYS_INIT_V6,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index e65993f4b901..2e57e9f45b85 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -1819,11 +1819,12 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
struct s5p_mfc_dev *dev = ctx->dev;
- if (ctx->state != MFCINST_GOT_INST) {
- mfc_err("inavlid state: %d\n", ctx->state);
- return -EINVAL;
- }
if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (ctx->state != MFCINST_GOT_INST) {
+ mfc_err("inavlid state: %d\n", ctx->state);
+ return -EINVAL;
+ }
+
if (ctx->dst_fmt)
*plane_count = ctx->dst_fmt->num_planes;
else
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
index 00a1d8b2a8c2..1e7250260a9a 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
@@ -37,10 +37,9 @@ void s5p_mfc_init_regs(struct s5p_mfc_dev *dev)
dev->mfc_regs = s5p_mfc_init_regs_v6_plus(dev);
}
-int s5p_mfc_alloc_priv_buf(struct device *dev,
+int s5p_mfc_alloc_priv_buf(struct device *dev, dma_addr_t base,
struct s5p_mfc_priv_buf *b)
{
-
mfc_debug(3, "Allocating priv: %zu\n", b->size);
b->virt = dma_alloc_coherent(dev, b->size, &b->dma, GFP_KERNEL);
@@ -50,6 +49,14 @@ int s5p_mfc_alloc_priv_buf(struct device *dev,
return -ENOMEM;
}
+ if (b->dma < base) {
+ mfc_err("Invaling memory configuration!\n");
+ mfc_err("Allocated buffer (%pad) is lower than memory base address (%pad)\n",
+ &b->dma, &base);
+ dma_free_coherent(dev, b->size, b->virt, b->dma);
+ return -ENOMEM;
+ }
+
mfc_debug(3, "Allocated addr %p %pad\n", b->virt, &b->dma);
return 0;
}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
index 22dfb3effda8..77a08b19b46d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
@@ -334,7 +334,7 @@ struct s5p_mfc_hw_ops {
void s5p_mfc_init_hw_ops(struct s5p_mfc_dev *dev);
void s5p_mfc_init_regs(struct s5p_mfc_dev *dev);
-int s5p_mfc_alloc_priv_buf(struct device *dev,
+int s5p_mfc_alloc_priv_buf(struct device *dev, dma_addr_t base,
struct s5p_mfc_priv_buf *b);
void s5p_mfc_release_priv_buf(struct device *dev,
struct s5p_mfc_priv_buf *b);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index 9a923b1a9bac..6402f76cc620 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -41,7 +41,7 @@ static int s5p_mfc_alloc_dec_temp_buffers_v5(struct s5p_mfc_ctx *ctx)
int ret;
ctx->dsc.size = buf_size->dsc;
- ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->dsc);
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1, &ctx->dsc);
if (ret) {
mfc_err("Failed to allocate temporary buffer\n");
return ret;
@@ -172,7 +172,8 @@ static int s5p_mfc_alloc_codec_buffers_v5(struct s5p_mfc_ctx *ctx)
/* Allocate only if memory from bank 1 is necessary */
if (ctx->bank1.size > 0) {
- ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->bank1);
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1,
+ &ctx->bank1);
if (ret) {
mfc_err("Failed to allocate Bank1 temporary buffer\n");
return ret;
@@ -181,7 +182,8 @@ static int s5p_mfc_alloc_codec_buffers_v5(struct s5p_mfc_ctx *ctx)
}
/* Allocate only if memory from bank 2 is necessary */
if (ctx->bank2.size > 0) {
- ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_r, &ctx->bank2);
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_r, dev->bank2,
+ &ctx->bank2);
if (ret) {
mfc_err("Failed to allocate Bank2 temporary buffer\n");
s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->bank1);
@@ -212,7 +214,7 @@ static int s5p_mfc_alloc_instance_buffer_v5(struct s5p_mfc_ctx *ctx)
else
ctx->ctx.size = buf_size->non_h264_ctx;
- ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->ctx);
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1, &ctx->ctx);
if (ret) {
mfc_err("Failed to allocate instance buffer\n");
return ret;
@@ -225,7 +227,7 @@ static int s5p_mfc_alloc_instance_buffer_v5(struct s5p_mfc_ctx *ctx)
/* Initialize shared memory */
ctx->shm.size = buf_size->shm;
- ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->shm);
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1, &ctx->shm);
if (ret) {
mfc_err("Failed to allocate shared memory buffer\n");
s5p_mfc_release_priv_buf(dev->mem_dev_l, &ctx->ctx);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index 12497f5ed8e9..e5cb30e1f718 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -239,7 +239,8 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
/* Allocate only if memory from bank 1 is necessary */
if (ctx->bank1.size > 0) {
- ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->bank1);
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1,
+ &ctx->bank1);
if (ret) {
mfc_err("Failed to allocate Bank1 memory\n");
return ret;
@@ -291,7 +292,7 @@ static int s5p_mfc_alloc_instance_buffer_v6(struct s5p_mfc_ctx *ctx)
break;
}
- ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->ctx);
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1, &ctx->ctx);
if (ret) {
mfc_err("Failed to allocate instance buffer\n");
return ret;
@@ -320,7 +321,8 @@ static int s5p_mfc_alloc_dev_context_buffer_v6(struct s5p_mfc_dev *dev)
mfc_debug_enter();
dev->ctx_buf.size = buf_size->dev_ctx;
- ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &dev->ctx_buf);
+ ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1,
+ &dev->ctx_buf);
if (ret) {
mfc_err("Failed to allocate device context buffer\n");
return ret;
@@ -1734,7 +1736,7 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
mfc_debug(1, "New context: %d\n", new_ctx);
ctx = dev->ctx[new_ctx];
- mfc_debug(1, "Seting new context to %p\n", ctx);
+ mfc_debug(1, "Setting new context to %p\n", ctx);
/* Got context to run in ctx */
mfc_debug(1, "ctx->dst_queue_cnt=%d ctx->dpb_count=%d ctx->src_queue_cnt=%d\n",
ctx->dst_queue_cnt, ctx->pb_count, ctx->src_queue_cnt);
diff --git a/drivers/media/platform/s5p-tv/hdmiphy_drv.c b/drivers/media/platform/s5p-tv/hdmiphy_drv.c
index c2f2e35642f2..aae652351aa8 100644
--- a/drivers/media/platform/s5p-tv/hdmiphy_drv.c
+++ b/drivers/media/platform/s5p-tv/hdmiphy_drv.c
@@ -315,7 +315,6 @@ MODULE_DEVICE_TABLE(i2c, hdmiphy_id);
static struct i2c_driver hdmiphy_driver = {
.driver = {
.name = "s5p-hdmiphy",
- .owner = THIS_MODULE,
},
.probe = hdmiphy_probe,
.remove = hdmiphy_remove,
diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
index b713403024ef..5127acb1e571 100644
--- a/drivers/media/platform/s5p-tv/mixer_reg.c
+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
@@ -357,17 +357,15 @@ void mxr_reg_streamoff(struct mxr_device *mdev)
int mxr_reg_wait4vsync(struct mxr_device *mdev)
{
- int ret;
+ long time_left;
clear_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
/* TODO: consider adding interruptible */
- ret = wait_event_timeout(mdev->event_queue,
- test_bit(MXR_EVENT_VSYNC, &mdev->event_flags),
- msecs_to_jiffies(1000));
- if (ret > 0)
+ time_left = wait_event_timeout(mdev->event_queue,
+ test_bit(MXR_EVENT_VSYNC, &mdev->event_flags),
+ msecs_to_jiffies(1000));
+ if (time_left > 0)
return 0;
- if (ret < 0)
- return ret;
mxr_warn(mdev, "no vsync detected - timeout\n");
return -ETIME;
}
diff --git a/drivers/media/platform/s5p-tv/sii9234_drv.c b/drivers/media/platform/s5p-tv/sii9234_drv.c
index db8c17bb4aaa..8d171310af8f 100644
--- a/drivers/media/platform/s5p-tv/sii9234_drv.c
+++ b/drivers/media/platform/s5p-tv/sii9234_drv.c
@@ -397,7 +397,6 @@ MODULE_DEVICE_TABLE(i2c, sii9234_id);
static struct i2c_driver sii9234_driver = {
.driver = {
.name = "sii9234",
- .owner = THIS_MODULE,
.pm = &sii9234_pm_ops,
},
.probe = sii9234_probe,
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index 2554f3719b9e..f5e3eb3a20ff 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -211,7 +211,7 @@ static enum v4l2_colorspace sh_veu_4cc2cspace(u32 fourcc)
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV24:
- return V4L2_COLORSPACE_JPEG;
+ return V4L2_COLORSPACE_SMPTE170M;
case V4L2_PIX_FMT_RGB332:
case V4L2_PIX_FMT_RGB444:
case V4L2_PIX_FMT_RGB565:
@@ -958,6 +958,7 @@ static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->ops = &sh_veu_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->lock = &veu->fop_lock;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
ret = vb2_queue_init(src_vq);
if (ret < 0)
@@ -971,6 +972,7 @@ static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->ops = &sh_veu_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->lock = &veu->fop_lock;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
return vb2_queue_init(dst_vq);
}
@@ -1103,6 +1105,12 @@ static irqreturn_t sh_veu_isr(int irq, void *dev_id)
if (!src || !dst)
return IRQ_NONE;
+ dst->v4l2_buf.timestamp = src->v4l2_buf.timestamp;
+ dst->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst->v4l2_buf.flags |=
+ src->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst->v4l2_buf.timecode = src->v4l2_buf.timecode;
+
spin_lock(&veu->lock);
v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index 8b799bae01b8..fe5c8ab06bd5 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -27,7 +27,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mediabus.h>
-#include <media/videobuf-dma-contig.h>
+#include <media/videobuf2-dma-contig.h>
/* Mirror addresses are not available for all registers */
#define VOUER 0
@@ -57,31 +57,40 @@ enum sh_vou_status {
SH_VOU_RUNNING,
};
+#define VOU_MIN_IMAGE_WIDTH 16
#define VOU_MAX_IMAGE_WIDTH 720
-#define VOU_MAX_IMAGE_HEIGHT 576
+#define VOU_MIN_IMAGE_HEIGHT 16
+
+struct sh_vou_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+};
+
+static inline struct sh_vou_buffer *to_sh_vou_buffer(struct vb2_buffer *vb2)
+{
+ return container_of(vb2, struct sh_vou_buffer, vb);
+}
struct sh_vou_device {
struct v4l2_device v4l2_dev;
struct video_device vdev;
- atomic_t use_count;
struct sh_vou_pdata *pdata;
spinlock_t lock;
void __iomem *base;
/* State information */
struct v4l2_pix_format pix;
struct v4l2_rect rect;
- struct list_head queue;
+ struct list_head buf_list;
v4l2_std_id std;
int pix_idx;
- struct videobuf_buffer *active;
+ struct vb2_queue queue;
+ struct vb2_alloc_ctx *alloc_ctx;
+ struct sh_vou_buffer *active;
enum sh_vou_status status;
+ unsigned sequence;
struct mutex fop_lock;
};
-struct sh_vou_file {
- struct videobuf_queue vbq;
-};
-
/* Register access routines for sides A, B and mirror addresses */
static void sh_vou_reg_a_write(struct sh_vou_device *vou_dev, unsigned int reg,
u32 value)
@@ -133,6 +142,7 @@ struct sh_vou_fmt {
u32 pfmt;
char *desc;
unsigned char bpp;
+ unsigned char bpl;
unsigned char rgb;
unsigned char yf;
unsigned char pkf;
@@ -143,6 +153,7 @@ static struct sh_vou_fmt vou_fmt[] = {
{
.pfmt = V4L2_PIX_FMT_NV12,
.bpp = 12,
+ .bpl = 1,
.desc = "YVU420 planar",
.yf = 0,
.rgb = 0,
@@ -150,6 +161,7 @@ static struct sh_vou_fmt vou_fmt[] = {
{
.pfmt = V4L2_PIX_FMT_NV16,
.bpp = 16,
+ .bpl = 1,
.desc = "YVYU planar",
.yf = 1,
.rgb = 0,
@@ -157,6 +169,7 @@ static struct sh_vou_fmt vou_fmt[] = {
{
.pfmt = V4L2_PIX_FMT_RGB24,
.bpp = 24,
+ .bpl = 3,
.desc = "RGB24",
.pkf = 2,
.rgb = 1,
@@ -164,6 +177,7 @@ static struct sh_vou_fmt vou_fmt[] = {
{
.pfmt = V4L2_PIX_FMT_RGB565,
.bpp = 16,
+ .bpl = 2,
.desc = "RGB565",
.pkf = 3,
.rgb = 1,
@@ -171,6 +185,7 @@ static struct sh_vou_fmt vou_fmt[] = {
{
.pfmt = V4L2_PIX_FMT_RGB565X,
.bpp = 16,
+ .bpl = 2,
.desc = "RGB565 byteswapped",
.pkf = 3,
.rgb = 1,
@@ -178,11 +193,11 @@ static struct sh_vou_fmt vou_fmt[] = {
};
static void sh_vou_schedule_next(struct sh_vou_device *vou_dev,
- struct videobuf_buffer *vb)
+ struct vb2_buffer *vb)
{
dma_addr_t addr1, addr2;
- addr1 = videobuf_to_dma_contig(vb);
+ addr1 = vb2_dma_contig_plane_dma_addr(vb, 0);
switch (vou_dev->pix.pixelformat) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV16:
@@ -196,8 +211,7 @@ static void sh_vou_schedule_next(struct sh_vou_device *vou_dev,
sh_vou_reg_m_write(vou_dev, VOUAD2R, addr2);
}
-static void sh_vou_stream_start(struct sh_vou_device *vou_dev,
- struct videobuf_buffer *vb)
+static void sh_vou_stream_config(struct sh_vou_device *vou_dev)
{
unsigned int row_coeff;
#ifdef __LITTLE_ENDIAN
@@ -224,167 +238,136 @@ static void sh_vou_stream_start(struct sh_vou_device *vou_dev,
sh_vou_reg_a_write(vou_dev, VOUSWR, dataswap);
sh_vou_reg_ab_write(vou_dev, VOUAIR, vou_dev->pix.width * row_coeff);
- sh_vou_schedule_next(vou_dev, vb);
-}
-
-static void free_buffer(struct videobuf_queue *vq, struct videobuf_buffer *vb)
-{
- BUG_ON(in_interrupt());
-
- /* Wait until this buffer is no longer in STATE_QUEUED or STATE_ACTIVE */
- videobuf_waiton(vq, vb, 0, 0);
- videobuf_dma_contig_free(vq, vb);
- vb->state = VIDEOBUF_NEEDS_INIT;
}
/* Locking: caller holds fop_lock mutex */
-static int sh_vou_buf_setup(struct videobuf_queue *vq, unsigned int *count,
- unsigned int *size)
+static int sh_vou_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
- struct video_device *vdev = vq->priv_data;
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
-
- *size = vou_fmt[vou_dev->pix_idx].bpp * vou_dev->pix.width *
- vou_dev->pix.height / 8;
-
- if (*count < 2)
- *count = 2;
-
- /* Taking into account maximum frame size, *count will stay >= 2 */
- if (PAGE_ALIGN(*size) * *count > 4 * 1024 * 1024)
- *count = 4 * 1024 * 1024 / PAGE_ALIGN(*size);
+ struct sh_vou_device *vou_dev = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix = &vou_dev->pix;
+ int bytes_per_line = vou_fmt[vou_dev->pix_idx].bpp * pix->width / 8;
- dev_dbg(vou_dev->v4l2_dev.dev, "%s(): count=%d, size=%d\n", __func__,
- *count, *size);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+ if (fmt && fmt->fmt.pix.sizeimage < pix->height * bytes_per_line)
+ return -EINVAL;
+ *nplanes = 1;
+ sizes[0] = fmt ? fmt->fmt.pix.sizeimage : pix->height * bytes_per_line;
+ alloc_ctxs[0] = vou_dev->alloc_ctx;
return 0;
}
-/* Locking: caller holds fop_lock mutex */
-static int sh_vou_buf_prepare(struct videobuf_queue *vq,
- struct videobuf_buffer *vb,
- enum v4l2_field field)
+static int sh_vou_buf_prepare(struct vb2_buffer *vb)
{
- struct video_device *vdev = vq->priv_data;
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = vb2_get_drv_priv(vb->vb2_queue);
struct v4l2_pix_format *pix = &vou_dev->pix;
- int bytes_per_line = vou_fmt[vou_dev->pix_idx].bpp * pix->width / 8;
- int ret;
+ unsigned bytes_per_line = vou_fmt[vou_dev->pix_idx].bpp * pix->width / 8;
+ unsigned size = pix->height * bytes_per_line;
dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
- if (vb->width != pix->width ||
- vb->height != pix->height ||
- vb->field != pix->field) {
- vb->width = pix->width;
- vb->height = pix->height;
- vb->field = field;
- if (vb->state != VIDEOBUF_NEEDS_INIT)
- free_buffer(vq, vb);
- }
-
- vb->size = vb->height * bytes_per_line;
- if (vb->baddr && vb->bsize < vb->size) {
+ if (vb2_plane_size(vb, 0) < size) {
/* User buffer too small */
- dev_warn(vq->dev, "User buffer too small: [%zu] @ %lx\n",
- vb->bsize, vb->baddr);
+ dev_warn(vou_dev->v4l2_dev.dev, "buffer too small (%lu < %u)\n",
+ vb2_plane_size(vb, 0), size);
return -EINVAL;
}
- if (vb->state == VIDEOBUF_NEEDS_INIT) {
- ret = videobuf_iolock(vq, vb, NULL);
- if (ret < 0) {
- dev_warn(vq->dev, "IOLOCK buf-type %d: %d\n",
- vb->memory, ret);
- return ret;
- }
- vb->state = VIDEOBUF_PREPARED;
- }
-
- dev_dbg(vou_dev->v4l2_dev.dev,
- "%s(): fmt #%d, %u bytes per line, phys %pad, type %d, state %d\n",
- __func__, vou_dev->pix_idx, bytes_per_line,
- ({ dma_addr_t addr = videobuf_to_dma_contig(vb); &addr; }),
- vb->memory, vb->state);
-
+ vb2_set_plane_payload(vb, 0, size);
return 0;
}
/* Locking: caller holds fop_lock mutex and vq->irqlock spinlock */
-static void sh_vou_buf_queue(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
+static void sh_vou_buf_queue(struct vb2_buffer *vb)
{
- struct video_device *vdev = vq->priv_data;
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct sh_vou_buffer *shbuf = to_sh_vou_buffer(vb);
+ unsigned long flags;
- dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+ spin_lock_irqsave(&vou_dev->lock, flags);
+ list_add_tail(&shbuf->list, &vou_dev->buf_list);
+ spin_unlock_irqrestore(&vou_dev->lock, flags);
+}
+
+static int sh_vou_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct sh_vou_device *vou_dev = vb2_get_drv_priv(vq);
+ struct sh_vou_buffer *buf, *node;
+ int ret;
- vb->state = VIDEOBUF_QUEUED;
- list_add_tail(&vb->queue, &vou_dev->queue);
-
- if (vou_dev->status == SH_VOU_RUNNING) {
- return;
- } else if (!vou_dev->active) {
- vou_dev->active = vb;
- /* Start from side A: we use mirror addresses, so, set B */
- sh_vou_reg_a_write(vou_dev, VOURPR, 1);
- dev_dbg(vou_dev->v4l2_dev.dev, "%s: first buffer status 0x%x\n",
- __func__, sh_vou_reg_a_read(vou_dev, VOUSTR));
- sh_vou_schedule_next(vou_dev, vb);
- /* Only activate VOU after the second buffer */
- } else if (vou_dev->active->queue.next == &vb->queue) {
- /* Second buffer - initialise register side B */
- sh_vou_reg_a_write(vou_dev, VOURPR, 0);
- sh_vou_stream_start(vou_dev, vb);
-
- /* Register side switching with frame VSYNC */
- sh_vou_reg_a_write(vou_dev, VOURCR, 5);
- dev_dbg(vou_dev->v4l2_dev.dev, "%s: second buffer status 0x%x\n",
- __func__, sh_vou_reg_a_read(vou_dev, VOUSTR));
-
- /* Enable End-of-Frame (VSYNC) interrupts */
- sh_vou_reg_a_write(vou_dev, VOUIR, 0x10004);
- /* Two buffers on the queue - activate the hardware */
-
- vou_dev->status = SH_VOU_RUNNING;
- sh_vou_reg_a_write(vou_dev, VOUER, 0x107);
+ vou_dev->sequence = 0;
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0,
+ video, s_stream, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ list_for_each_entry_safe(buf, node, &vou_dev->buf_list, list) {
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ list_del(&buf->list);
+ }
+ vou_dev->active = NULL;
+ return ret;
}
+
+ buf = list_entry(vou_dev->buf_list.next, struct sh_vou_buffer, list);
+
+ vou_dev->active = buf;
+
+ /* Start from side A: we use mirror addresses, so, set B */
+ sh_vou_reg_a_write(vou_dev, VOURPR, 1);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s: first buffer status 0x%x\n",
+ __func__, sh_vou_reg_a_read(vou_dev, VOUSTR));
+ sh_vou_schedule_next(vou_dev, &buf->vb);
+
+ buf = list_entry(buf->list.next, struct sh_vou_buffer, list);
+
+ /* Second buffer - initialise register side B */
+ sh_vou_reg_a_write(vou_dev, VOURPR, 0);
+ sh_vou_schedule_next(vou_dev, &buf->vb);
+
+ /* Register side switching with frame VSYNC */
+ sh_vou_reg_a_write(vou_dev, VOURCR, 5);
+
+ sh_vou_stream_config(vou_dev);
+ /* Enable End-of-Frame (VSYNC) interrupts */
+ sh_vou_reg_a_write(vou_dev, VOUIR, 0x10004);
+
+ /* Two buffers on the queue - activate the hardware */
+ vou_dev->status = SH_VOU_RUNNING;
+ sh_vou_reg_a_write(vou_dev, VOUER, 0x107);
+ return 0;
}
-static void sh_vou_buf_release(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
+static void sh_vou_stop_streaming(struct vb2_queue *vq)
{
- struct video_device *vdev = vq->priv_data;
- struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_device *vou_dev = vb2_get_drv_priv(vq);
+ struct sh_vou_buffer *buf, *node;
unsigned long flags;
- dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
+ v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0,
+ video, s_stream, 0);
+ /* disable output */
+ sh_vou_reg_a_set(vou_dev, VOUER, 0, 1);
+ /* ...but the current frame will complete */
+ sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x30000);
+ msleep(50);
spin_lock_irqsave(&vou_dev->lock, flags);
-
- if (vou_dev->active == vb) {
- /* disable output */
- sh_vou_reg_a_set(vou_dev, VOUER, 0, 1);
- /* ...but the current frame will complete */
- sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x30000);
- vou_dev->active = NULL;
- }
-
- if ((vb->state == VIDEOBUF_ACTIVE || vb->state == VIDEOBUF_QUEUED)) {
- vb->state = VIDEOBUF_ERROR;
- list_del(&vb->queue);
+ list_for_each_entry_safe(buf, node, &vou_dev->buf_list, list) {
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ list_del(&buf->list);
}
-
+ vou_dev->active = NULL;
spin_unlock_irqrestore(&vou_dev->lock, flags);
-
- free_buffer(vq, vb);
}
-static struct videobuf_queue_ops sh_vou_video_qops = {
- .buf_setup = sh_vou_buf_setup,
- .buf_prepare = sh_vou_buf_prepare,
- .buf_queue = sh_vou_buf_queue,
- .buf_release = sh_vou_buf_release,
+static struct vb2_ops sh_vou_qops = {
+ .queue_setup = sh_vou_queue_setup,
+ .buf_prepare = sh_vou_buf_prepare,
+ .buf_queue = sh_vou_buf_queue,
+ .start_streaming = sh_vou_start_streaming,
+ .stop_streaming = sh_vou_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/* Video IOCTLs */
@@ -396,7 +379,10 @@ static int sh_vou_querycap(struct file *file, void *priv,
dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
strlcpy(cap->card, "SuperH VOU", sizeof(cap->card));
- cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ strlcpy(cap->driver, "sh-vou", sizeof(cap->driver));
+ strlcpy(cap->bus_info, "platform:sh-vou", sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -540,8 +526,10 @@ static void vou_adjust_input(struct sh_vou_geometry *geo, v4l2_std_id std)
img_height_max = 576;
/* Image width must be a multiple of 4 */
- v4l_bound_align_image(&geo->in_width, 0, VOU_MAX_IMAGE_WIDTH, 2,
- &geo->in_height, 0, img_height_max, 1, 0);
+ v4l_bound_align_image(&geo->in_width,
+ VOU_MIN_IMAGE_WIDTH, VOU_MAX_IMAGE_WIDTH, 2,
+ &geo->in_height,
+ VOU_MIN_IMAGE_HEIGHT, img_height_max, 1, 0);
/* Select scales to come as close as possible to the output image */
for (i = ARRAY_SIZE(vou_scale_h_num) - 1; i >= 0; i--) {
@@ -671,34 +659,19 @@ static void vou_adjust_output(struct sh_vou_geometry *geo, v4l2_std_id std)
vou_scale_v_num[idx_v], vou_scale_v_den[idx_v], best);
}
-static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
- struct v4l2_format *fmt)
+static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
{
struct sh_vou_device *vou_dev = video_drvdata(file);
struct v4l2_pix_format *pix = &fmt->fmt.pix;
unsigned int img_height_max;
int pix_idx;
- struct sh_vou_geometry geo;
- struct v4l2_subdev_format format = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- /* Revisit: is this the correct code? */
- .format.code = MEDIA_BUS_FMT_YUYV8_2X8,
- .format.field = V4L2_FIELD_INTERLACED,
- .format.colorspace = V4L2_COLORSPACE_SMPTE170M,
- };
- struct v4l2_mbus_framefmt *mbfmt = &format.format;
- int ret;
-
- dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u -> %ux%u\n", __func__,
- vou_dev->rect.width, vou_dev->rect.height,
- pix->width, pix->height);
- if (pix->field == V4L2_FIELD_ANY)
- pix->field = V4L2_FIELD_NONE;
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
- if (fmt->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
- pix->field != V4L2_FIELD_NONE)
- return -EINVAL;
+ pix->field = V4L2_FIELD_INTERLACED;
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ pix->ycbcr_enc = pix->quantization = 0;
for (pix_idx = 0; pix_idx < ARRAY_SIZE(vou_fmt); pix_idx++)
if (vou_fmt[pix_idx].pfmt == pix->pixelformat)
@@ -712,9 +685,38 @@ static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
else
img_height_max = 576;
- /* Image width must be a multiple of 4 */
- v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 2,
- &pix->height, 0, img_height_max, 1, 0);
+ v4l_bound_align_image(&pix->width,
+ VOU_MIN_IMAGE_WIDTH, VOU_MAX_IMAGE_WIDTH, 2,
+ &pix->height,
+ VOU_MIN_IMAGE_HEIGHT, img_height_max, 1, 0);
+ pix->bytesperline = pix->width * vou_fmt[pix_idx].bpl;
+ pix->sizeimage = pix->height * ((pix->width * vou_fmt[pix_idx].bpp) >> 3);
+
+ return 0;
+}
+
+static int sh_vou_set_fmt_vid_out(struct sh_vou_device *vou_dev,
+ struct v4l2_pix_format *pix)
+{
+ unsigned int img_height_max;
+ struct sh_vou_geometry geo;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ /* Revisit: is this the correct code? */
+ .format.code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .format.field = V4L2_FIELD_INTERLACED,
+ .format.colorspace = V4L2_COLORSPACE_SMPTE170M,
+ };
+ struct v4l2_mbus_framefmt *mbfmt = &format.format;
+ int pix_idx;
+ int ret;
+
+ if (vb2_is_busy(&vou_dev->queue))
+ return -EBUSY;
+
+ for (pix_idx = 0; pix_idx < ARRAY_SIZE(vou_fmt); pix_idx++)
+ if (vou_fmt[pix_idx].pfmt == pix->pixelformat)
+ break;
geo.in_width = pix->width;
geo.in_height = pix->height;
@@ -733,6 +735,11 @@ static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u -> %ux%u\n", __func__,
geo.output.width, geo.output.height, mbfmt->width, mbfmt->height);
+ if (vou_dev->std & V4L2_STD_525_60)
+ img_height_max = 480;
+ else
+ img_height_max = 576;
+
/* Sanity checks */
if ((unsigned)mbfmt->width > VOU_MAX_IMAGE_WIDTH ||
(unsigned)mbfmt->height > img_height_max ||
@@ -765,109 +772,39 @@ static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
return 0;
}
-static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
- struct v4l2_format *fmt)
+static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
{
struct sh_vou_device *vou_dev = video_drvdata(file);
- struct v4l2_pix_format *pix = &fmt->fmt.pix;
- int i;
+ int ret = sh_vou_try_fmt_vid_out(file, priv, fmt);
- dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
- fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- pix->field = V4L2_FIELD_NONE;
-
- v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 1,
- &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
-
- for (i = 0; i < ARRAY_SIZE(vou_fmt); i++)
- if (vou_fmt[i].pfmt == pix->pixelformat)
- return 0;
-
- pix->pixelformat = vou_fmt[0].pfmt;
-
- return 0;
+ if (ret)
+ return ret;
+ return sh_vou_set_fmt_vid_out(vou_dev, &fmt->fmt.pix);
}
-static int sh_vou_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *req)
+static int sh_vou_enum_output(struct file *file, void *fh,
+ struct v4l2_output *a)
{
struct sh_vou_device *vou_dev = video_drvdata(file);
- struct sh_vou_file *vou_file = priv;
-
- dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
- if (req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ if (a->index)
return -EINVAL;
-
- return videobuf_reqbufs(&vou_file->vbq, req);
-}
-
-static int sh_vou_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *b)
-{
- struct sh_vou_device *vou_dev = video_drvdata(file);
- struct sh_vou_file *vou_file = priv;
-
- dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
- return videobuf_querybuf(&vou_file->vbq, b);
-}
-
-static int sh_vou_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct sh_vou_device *vou_dev = video_drvdata(file);
- struct sh_vou_file *vou_file = priv;
-
- dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
- return videobuf_qbuf(&vou_file->vbq, b);
+ strlcpy(a->name, "Video Out", sizeof(a->name));
+ a->type = V4L2_OUTPUT_TYPE_ANALOG;
+ a->std = vou_dev->vdev.tvnorms;
+ return 0;
}
-static int sh_vou_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
+static int sh_vou_g_output(struct file *file, void *fh, unsigned int *i)
{
- struct sh_vou_device *vou_dev = video_drvdata(file);
- struct sh_vou_file *vou_file = priv;
-
- dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
- return videobuf_dqbuf(&vou_file->vbq, b, file->f_flags & O_NONBLOCK);
+ *i = 0;
+ return 0;
}
-static int sh_vou_streamon(struct file *file, void *priv,
- enum v4l2_buf_type buftype)
+static int sh_vou_s_output(struct file *file, void *fh, unsigned int i)
{
- struct sh_vou_device *vou_dev = video_drvdata(file);
- struct sh_vou_file *vou_file = priv;
- int ret;
-
- dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
- ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0,
- video, s_stream, 1);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return ret;
-
- /* This calls our .buf_queue() (== sh_vou_buf_queue) */
- return videobuf_streamon(&vou_file->vbq);
-}
-
-static int sh_vou_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type buftype)
-{
- struct sh_vou_device *vou_dev = video_drvdata(file);
- struct sh_vou_file *vou_file = priv;
-
- dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
- /*
- * This calls buf_release from host driver's videobuf_queue_ops for all
- * remaining buffers. When the last buffer is freed, stop streaming
- */
- videobuf_streamoff(&vou_file->vbq);
- v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video, s_stream, 0);
-
- return 0;
+ return i ? -EINVAL : 0;
}
static u32 sh_vou_ntsc_mode(enum sh_vou_bus_fmt bus_fmt)
@@ -892,8 +829,11 @@ static int sh_vou_s_std(struct file *file, void *priv, v4l2_std_id std_id)
dev_dbg(vou_dev->v4l2_dev.dev, "%s(): 0x%llx\n", __func__, std_id);
- if (std_id & ~vou_dev->vdev.tvnorms)
- return -EINVAL;
+ if (std_id == vou_dev->std)
+ return 0;
+
+ if (vb2_is_busy(&vou_dev->queue))
+ return -EBUSY;
ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
s_std_output, std_id);
@@ -901,13 +841,25 @@ static int sh_vou_s_std(struct file *file, void *priv, v4l2_std_id std_id)
if (ret < 0 && ret != -ENOIOCTLCMD)
return ret;
- if (std_id & V4L2_STD_525_60)
+ vou_dev->rect.top = vou_dev->rect.left = 0;
+ vou_dev->rect.width = VOU_MAX_IMAGE_WIDTH;
+ if (std_id & V4L2_STD_525_60) {
sh_vou_reg_ab_set(vou_dev, VOUCR,
sh_vou_ntsc_mode(vou_dev->pdata->bus_fmt) << 29, 7 << 29);
- else
+ vou_dev->rect.height = 480;
+ } else {
sh_vou_reg_ab_set(vou_dev, VOUCR, 5 << 29, 7 << 29);
+ vou_dev->rect.height = 576;
+ }
+ vou_dev->pix.width = vou_dev->rect.width;
+ vou_dev->pix.height = vou_dev->rect.height;
+ vou_dev->pix.bytesperline =
+ vou_dev->pix.width * vou_fmt[vou_dev->pix_idx].bpl;
+ vou_dev->pix.sizeimage = vou_dev->pix.height *
+ ((vou_dev->pix.width * vou_fmt[vou_dev->pix_idx].bpp) >> 3);
vou_dev->std = std_id;
+ sh_vou_set_fmt_vid_out(vou_dev, &vou_dev->pix);
return 0;
}
@@ -923,24 +875,66 @@ static int sh_vou_g_std(struct file *file, void *priv, v4l2_std_id *std)
return 0;
}
-static int sh_vou_g_crop(struct file *file, void *fh, struct v4l2_crop *a)
+static int sh_vou_log_status(struct file *file, void *priv)
{
struct sh_vou_device *vou_dev = video_drvdata(file);
- dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+ pr_info("VOUER: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUER));
+ pr_info("VOUCR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUCR));
+ pr_info("VOUSTR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUSTR));
+ pr_info("VOUVCR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUVCR));
+ pr_info("VOUISR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUISR));
+ pr_info("VOUBCR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUBCR));
+ pr_info("VOUDPR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUDPR));
+ pr_info("VOUDSR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUDSR));
+ pr_info("VOUVPR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUVPR));
+ pr_info("VOUIR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUIR));
+ pr_info("VOUSRR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUSRR));
+ pr_info("VOUMSR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUMSR));
+ pr_info("VOUHIR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUHIR));
+ pr_info("VOUDFR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUDFR));
+ pr_info("VOUAD1R: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUAD1R));
+ pr_info("VOUAD2R: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUAD2R));
+ pr_info("VOUAIR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUAIR));
+ pr_info("VOUSWR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUSWR));
+ pr_info("VOURCR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOURCR));
+ pr_info("VOURPR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOURPR));
+ return 0;
+}
- a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- a->c = vou_dev->rect;
+static int sh_vou_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ sel->r = vou_dev->rect;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = VOU_MAX_IMAGE_WIDTH;
+ if (vou_dev->std & V4L2_STD_525_60)
+ sel->r.height = 480;
+ else
+ sel->r.height = 576;
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
}
/* Assume a dull encoder, do all the work ourselves. */
-static int sh_vou_s_crop(struct file *file, void *fh, const struct v4l2_crop *a)
+static int sh_vou_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
{
- struct v4l2_crop a_writable = *a;
+ struct v4l2_rect *rect = &sel->r;
struct sh_vou_device *vou_dev = video_drvdata(file);
- struct v4l2_rect *rect = &a_writable.c;
struct v4l2_crop sd_crop = {.type = V4L2_BUF_TYPE_VIDEO_OUTPUT};
struct v4l2_pix_format *pix = &vou_dev->pix;
struct sh_vou_geometry geo;
@@ -954,19 +948,22 @@ static int sh_vou_s_crop(struct file *file, void *fh, const struct v4l2_crop *a)
unsigned int img_height_max;
int ret;
- dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u@%u:%u\n", __func__,
- rect->width, rect->height, rect->left, rect->top);
-
- if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ sel->target != V4L2_SEL_TGT_COMPOSE)
return -EINVAL;
+ if (vb2_is_busy(&vou_dev->queue))
+ return -EBUSY;
+
if (vou_dev->std & V4L2_STD_525_60)
img_height_max = 480;
else
img_height_max = 576;
- v4l_bound_align_image(&rect->width, 0, VOU_MAX_IMAGE_WIDTH, 1,
- &rect->height, 0, img_height_max, 1, 0);
+ v4l_bound_align_image(&rect->width,
+ VOU_MIN_IMAGE_WIDTH, VOU_MAX_IMAGE_WIDTH, 1,
+ &rect->height,
+ VOU_MIN_IMAGE_HEIGHT, img_height_max, 1, 0);
if (rect->width + rect->left > VOU_MAX_IMAGE_WIDTH)
rect->left = VOU_MAX_IMAGE_WIDTH - rect->width;
@@ -1021,41 +1018,11 @@ static int sh_vou_s_crop(struct file *file, void *fh, const struct v4l2_crop *a)
return 0;
}
-/*
- * Total field: NTSC 858 x 2 * 262/263, PAL 864 x 2 * 312/313, default rectangle
- * is the initial register values, height takes the interlaced format into
- * account. The actual image can only go up to 720 x 2 * 240, So, VOUVPR can
- * actually only meaningfully contain values <= 720 and <= 240 respectively, and
- * not <= 864 and <= 312.
- */
-static int sh_vou_cropcap(struct file *file, void *priv,
- struct v4l2_cropcap *a)
-{
- struct sh_vou_device *vou_dev = video_drvdata(file);
-
- dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
- a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- a->bounds.left = 0;
- a->bounds.top = 0;
- a->bounds.width = VOU_MAX_IMAGE_WIDTH;
- a->bounds.height = VOU_MAX_IMAGE_HEIGHT;
- /* Default = max, set VOUDPR = 0, which is not hardware default */
- a->defrect.left = 0;
- a->defrect.top = 0;
- a->defrect.width = VOU_MAX_IMAGE_WIDTH;
- a->defrect.height = VOU_MAX_IMAGE_HEIGHT;
- a->pixelaspect.numerator = 1;
- a->pixelaspect.denominator = 1;
-
- return 0;
-}
-
static irqreturn_t sh_vou_isr(int irq, void *dev_id)
{
struct sh_vou_device *vou_dev = dev_id;
static unsigned long j;
- struct videobuf_buffer *vb;
+ struct sh_vou_buffer *vb;
static int cnt;
u32 irq_status = sh_vou_reg_a_read(vou_dev, VOUIR), masked;
u32 vou_status = sh_vou_reg_a_read(vou_dev, VOUSTR);
@@ -1068,7 +1035,7 @@ static irqreturn_t sh_vou_isr(int irq, void *dev_id)
}
spin_lock(&vou_dev->lock);
- if (!vou_dev->active || list_empty(&vou_dev->queue)) {
+ if (!vou_dev->active || list_empty(&vou_dev->buf_list)) {
if (printk_timed_ratelimit(&j, 500))
dev_warn(vou_dev->v4l2_dev.dev,
"IRQ without active buffer: %x!\n", irq_status);
@@ -1090,33 +1057,30 @@ static irqreturn_t sh_vou_isr(int irq, void *dev_id)
sh_vou_reg_a_write(vou_dev, VOUIR, masked);
vb = vou_dev->active;
- list_del(&vb->queue);
-
- vb->state = VIDEOBUF_DONE;
- v4l2_get_timestamp(&vb->ts);
- vb->field_count++;
- wake_up(&vb->done);
-
- if (list_empty(&vou_dev->queue)) {
- /* Stop VOU */
- dev_dbg(vou_dev->v4l2_dev.dev, "%s: queue empty after %d\n",
- __func__, cnt);
- sh_vou_reg_a_set(vou_dev, VOUER, 0, 1);
- vou_dev->active = NULL;
- vou_dev->status = SH_VOU_INITIALISING;
- /* Disable End-of-Frame (VSYNC) interrupts */
- sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x30000);
+ if (list_is_singular(&vb->list)) {
+ /* Keep cycling while no next buffer is available */
+ sh_vou_schedule_next(vou_dev, &vb->vb);
spin_unlock(&vou_dev->lock);
return IRQ_HANDLED;
}
- vou_dev->active = list_entry(vou_dev->queue.next,
- struct videobuf_buffer, queue);
+ list_del(&vb->list);
+
+ v4l2_get_timestamp(&vb->vb.v4l2_buf.timestamp);
+ vb->vb.v4l2_buf.sequence = vou_dev->sequence++;
+ vb->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
+ vb2_buffer_done(&vb->vb, VB2_BUF_STATE_DONE);
- if (vou_dev->active->queue.next != &vou_dev->queue) {
- struct videobuf_buffer *new = list_entry(vou_dev->active->queue.next,
- struct videobuf_buffer, queue);
- sh_vou_schedule_next(vou_dev, new);
+ vou_dev->active = list_entry(vou_dev->buf_list.next,
+ struct sh_vou_buffer, list);
+
+ if (list_is_singular(&vou_dev->buf_list)) {
+ /* Keep cycling while no next buffer is available */
+ sh_vou_schedule_next(vou_dev, &vou_dev->active->vb);
+ } else {
+ struct sh_vou_buffer *new = list_entry(vou_dev->active->list.next,
+ struct sh_vou_buffer, list);
+ sh_vou_schedule_next(vou_dev, &new->vb);
}
spin_unlock(&vou_dev->lock);
@@ -1156,6 +1120,8 @@ static int sh_vou_hw_init(struct sh_vou_device *vou_dev)
/* Default - fixed HSYNC length, can be made configurable is required */
sh_vou_reg_ab_write(vou_dev, VOUMSR, 0x800000);
+ sh_vou_set_fmt_vid_out(vou_dev, &vou_dev->pix);
+
return 0;
}
@@ -1163,96 +1129,47 @@ static int sh_vou_hw_init(struct sh_vou_device *vou_dev)
static int sh_vou_open(struct file *file)
{
struct sh_vou_device *vou_dev = video_drvdata(file);
- struct sh_vou_file *vou_file = kzalloc(sizeof(struct sh_vou_file),
- GFP_KERNEL);
-
- if (!vou_file)
- return -ENOMEM;
-
- dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+ int err;
- if (mutex_lock_interruptible(&vou_dev->fop_lock)) {
- kfree(vou_file);
+ if (mutex_lock_interruptible(&vou_dev->fop_lock))
return -ERESTARTSYS;
- }
- if (atomic_inc_return(&vou_dev->use_count) == 1) {
- int ret;
+
+ err = v4l2_fh_open(file);
+ if (err)
+ goto done_open;
+ if (v4l2_fh_is_singular_file(file) &&
+ vou_dev->status == SH_VOU_INITIALISING) {
/* First open */
- vou_dev->status = SH_VOU_INITIALISING;
pm_runtime_get_sync(vou_dev->v4l2_dev.dev);
- ret = sh_vou_hw_init(vou_dev);
- if (ret < 0) {
- atomic_dec(&vou_dev->use_count);
+ err = sh_vou_hw_init(vou_dev);
+ if (err < 0) {
pm_runtime_put(vou_dev->v4l2_dev.dev);
+ v4l2_fh_release(file);
+ } else {
vou_dev->status = SH_VOU_IDLE;
- mutex_unlock(&vou_dev->fop_lock);
- kfree(vou_file);
- return ret;
}
}
-
- videobuf_queue_dma_contig_init(&vou_file->vbq, &sh_vou_video_qops,
- vou_dev->v4l2_dev.dev, &vou_dev->lock,
- V4L2_BUF_TYPE_VIDEO_OUTPUT,
- V4L2_FIELD_NONE,
- sizeof(struct videobuf_buffer),
- &vou_dev->vdev, &vou_dev->fop_lock);
+done_open:
mutex_unlock(&vou_dev->fop_lock);
-
- file->private_data = vou_file;
-
- return 0;
+ return err;
}
static int sh_vou_release(struct file *file)
{
struct sh_vou_device *vou_dev = video_drvdata(file);
- struct sh_vou_file *vou_file = file->private_data;
+ bool is_last;
- dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
- if (!atomic_dec_return(&vou_dev->use_count)) {
- mutex_lock(&vou_dev->fop_lock);
+ mutex_lock(&vou_dev->fop_lock);
+ is_last = v4l2_fh_is_singular_file(file);
+ _vb2_fop_release(file, NULL);
+ if (is_last) {
/* Last close */
- vou_dev->status = SH_VOU_IDLE;
+ vou_dev->status = SH_VOU_INITIALISING;
sh_vou_reg_a_set(vou_dev, VOUER, 0, 0x101);
pm_runtime_put(vou_dev->v4l2_dev.dev);
- mutex_unlock(&vou_dev->fop_lock);
}
-
- file->private_data = NULL;
- kfree(vou_file);
-
- return 0;
-}
-
-static int sh_vou_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct sh_vou_device *vou_dev = video_drvdata(file);
- struct sh_vou_file *vou_file = file->private_data;
- int ret;
-
- dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
- if (mutex_lock_interruptible(&vou_dev->fop_lock))
- return -ERESTARTSYS;
- ret = videobuf_mmap_mapper(&vou_file->vbq, vma);
mutex_unlock(&vou_dev->fop_lock);
- return ret;
-}
-
-static unsigned int sh_vou_poll(struct file *file, poll_table *wait)
-{
- struct sh_vou_device *vou_dev = video_drvdata(file);
- struct sh_vou_file *vou_file = file->private_data;
- unsigned int res;
-
- dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
-
- mutex_lock(&vou_dev->fop_lock);
- res = videobuf_poll_stream(file, &vou_file->vbq, wait);
- mutex_unlock(&vou_dev->fop_lock);
- return res;
+ return 0;
}
/* sh_vou display ioctl operations */
@@ -1262,17 +1179,23 @@ static const struct v4l2_ioctl_ops sh_vou_ioctl_ops = {
.vidioc_g_fmt_vid_out = sh_vou_g_fmt_vid_out,
.vidioc_s_fmt_vid_out = sh_vou_s_fmt_vid_out,
.vidioc_try_fmt_vid_out = sh_vou_try_fmt_vid_out,
- .vidioc_reqbufs = sh_vou_reqbufs,
- .vidioc_querybuf = sh_vou_querybuf,
- .vidioc_qbuf = sh_vou_qbuf,
- .vidioc_dqbuf = sh_vou_dqbuf,
- .vidioc_streamon = sh_vou_streamon,
- .vidioc_streamoff = sh_vou_streamoff,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_g_output = sh_vou_g_output,
+ .vidioc_s_output = sh_vou_s_output,
+ .vidioc_enum_output = sh_vou_enum_output,
.vidioc_s_std = sh_vou_s_std,
.vidioc_g_std = sh_vou_g_std,
- .vidioc_cropcap = sh_vou_cropcap,
- .vidioc_g_crop = sh_vou_g_crop,
- .vidioc_s_crop = sh_vou_s_crop,
+ .vidioc_g_selection = sh_vou_g_selection,
+ .vidioc_s_selection = sh_vou_s_selection,
+ .vidioc_log_status = sh_vou_log_status,
};
static const struct v4l2_file_operations sh_vou_fops = {
@@ -1280,8 +1203,9 @@ static const struct v4l2_file_operations sh_vou_fops = {
.open = sh_vou_open,
.release = sh_vou_release,
.unlocked_ioctl = video_ioctl2,
- .mmap = sh_vou_mmap,
- .poll = sh_vou_poll,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+ .write = vb2_fop_write,
};
static const struct video_device sh_vou_video_template = {
@@ -1300,8 +1224,9 @@ static int sh_vou_probe(struct platform_device *pdev)
struct i2c_adapter *i2c_adap;
struct video_device *vdev;
struct sh_vou_device *vou_dev;
- struct resource *reg_res, *region;
+ struct resource *reg_res;
struct v4l2_subdev *subdev;
+ struct vb2_queue *q;
int irq, ret;
reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1312,16 +1237,16 @@ static int sh_vou_probe(struct platform_device *pdev)
return -ENODEV;
}
- vou_dev = kzalloc(sizeof(*vou_dev), GFP_KERNEL);
+ vou_dev = devm_kzalloc(&pdev->dev, sizeof(*vou_dev), GFP_KERNEL);
if (!vou_dev)
return -ENOMEM;
- INIT_LIST_HEAD(&vou_dev->queue);
+ INIT_LIST_HEAD(&vou_dev->buf_list);
spin_lock_init(&vou_dev->lock);
mutex_init(&vou_dev->fop_lock);
- atomic_set(&vou_dev->use_count, 0);
vou_dev->pdata = vou_pdata;
- vou_dev->status = SH_VOU_IDLE;
+ vou_dev->status = SH_VOU_INITIALISING;
+ vou_dev->pix_idx = 1;
rect = &vou_dev->rect;
pix = &vou_dev->pix;
@@ -1334,34 +1259,24 @@ static int sh_vou_probe(struct platform_device *pdev)
rect->height = 480;
pix->width = VOU_MAX_IMAGE_WIDTH;
pix->height = 480;
- pix->pixelformat = V4L2_PIX_FMT_YVYU;
- pix->field = V4L2_FIELD_NONE;
- pix->bytesperline = VOU_MAX_IMAGE_WIDTH * 2;
+ pix->pixelformat = V4L2_PIX_FMT_NV16;
+ pix->field = V4L2_FIELD_INTERLACED;
+ pix->bytesperline = VOU_MAX_IMAGE_WIDTH;
pix->sizeimage = VOU_MAX_IMAGE_WIDTH * 2 * 480;
pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
- region = request_mem_region(reg_res->start, resource_size(reg_res),
- pdev->name);
- if (!region) {
- dev_err(&pdev->dev, "VOU region already claimed\n");
- ret = -EBUSY;
- goto ereqmemreg;
- }
+ vou_dev->base = devm_ioremap_resource(&pdev->dev, reg_res);
+ if (IS_ERR(vou_dev->base))
+ return PTR_ERR(vou_dev->base);
- vou_dev->base = ioremap(reg_res->start, resource_size(reg_res));
- if (!vou_dev->base) {
- ret = -ENOMEM;
- goto emap;
- }
-
- ret = request_irq(irq, sh_vou_isr, 0, "vou", vou_dev);
+ ret = devm_request_irq(&pdev->dev, irq, sh_vou_isr, 0, "vou", vou_dev);
if (ret < 0)
- goto ereqirq;
+ return ret;
ret = v4l2_device_register(&pdev->dev, &vou_dev->v4l2_dev);
if (ret < 0) {
dev_err(&pdev->dev, "Error registering v4l2 device\n");
- goto ev4l2devreg;
+ return ret;
}
vdev = &vou_dev->vdev;
@@ -1374,6 +1289,30 @@ static int sh_vou_probe(struct platform_device *pdev)
video_set_drvdata(vdev, vou_dev);
+ /* Initialize the vb2 queue */
+ q = &vou_dev->queue;
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE;
+ q->drv_priv = vou_dev;
+ q->buf_struct_size = sizeof(struct sh_vou_buffer);
+ q->ops = &sh_vou_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+ q->lock = &vou_dev->fop_lock;
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto einitctx;
+
+ vou_dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(vou_dev->alloc_ctx)) {
+ dev_err(&pdev->dev, "Can't allocate buffer context");
+ ret = PTR_ERR(vou_dev->alloc_ctx);
+ goto einitctx;
+ }
+ vdev->queue = q;
+ INIT_LIST_HEAD(&vou_dev->buf_list);
+
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
@@ -1405,41 +1344,27 @@ ei2cnd:
ereset:
i2c_put_adapter(i2c_adap);
ei2cgadap:
+ vb2_dma_contig_cleanup_ctx(vou_dev->alloc_ctx);
+einitctx:
pm_runtime_disable(&pdev->dev);
v4l2_device_unregister(&vou_dev->v4l2_dev);
-ev4l2devreg:
- free_irq(irq, vou_dev);
-ereqirq:
- iounmap(vou_dev->base);
-emap:
- release_mem_region(reg_res->start, resource_size(reg_res));
-ereqmemreg:
- kfree(vou_dev);
return ret;
}
static int sh_vou_remove(struct platform_device *pdev)
{
- int irq = platform_get_irq(pdev, 0);
struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
struct sh_vou_device *vou_dev = container_of(v4l2_dev,
struct sh_vou_device, v4l2_dev);
struct v4l2_subdev *sd = list_entry(v4l2_dev->subdevs.next,
struct v4l2_subdev, list);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct resource *reg_res;
- if (irq > 0)
- free_irq(irq, vou_dev);
pm_runtime_disable(&pdev->dev);
video_unregister_device(&vou_dev->vdev);
i2c_put_adapter(client->adapter);
+ vb2_dma_contig_cleanup_ctx(vou_dev->alloc_ctx);
v4l2_device_unregister(&vou_dev->v4l2_dev);
- iounmap(vou_dev->base);
- reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (reg_res)
- release_mem_region(reg_res->start, resource_size(reg_res));
- kfree(vou_dev);
return 0;
}
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 287902681164..90701726a06a 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <media/atmel-isi.h>
@@ -34,7 +35,6 @@
#define VID_LIMIT_BYTES (16 * 1024 * 1024)
#define MIN_FRAME_RATE 15
#define FRAME_INTERVAL_MILLI_SEC (1000 / MIN_FRAME_RATE)
-#define ISI_DEFAULT_MCLK_FREQ 25000000
/* Frame buffer descriptor */
struct fbd {
@@ -82,8 +82,6 @@ struct atmel_isi {
struct completion complete;
/* ISI peripherial clock */
struct clk *pclk;
- /* ISI_MCK, feed to camera sensor to generate pixel clock */
- struct clk *mck;
unsigned int irq;
struct isi_platform_data pdata;
@@ -386,10 +384,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
struct atmel_isi *isi = ici->priv;
int ret;
+ pm_runtime_get_sync(ici->v4l2_dev.dev);
+
/* Reset ISI */
ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET);
if (ret < 0) {
dev_err(icd->parent, "Reset ISI timed out\n");
+ pm_runtime_put(ici->v4l2_dev.dev);
return ret;
}
/* Disable all interrupts */
@@ -431,11 +432,9 @@ static void stop_streaming(struct vb2_queue *vq)
time_before(jiffies, timeout))
msleep(1);
- if (time_after(jiffies, timeout)) {
+ if (time_after(jiffies, timeout))
dev_err(icd->parent,
"Timeout waiting for finishing codec request\n");
- return;
- }
/* Disable interrupts */
isi_writel(isi, ISI_INTDIS,
@@ -445,6 +444,8 @@ static void stop_streaming(struct vb2_queue *vq)
ret = atmel_isi_wait_status(isi, WAIT_ISI_DISABLE);
if (ret < 0)
dev_err(icd->parent, "Disable ISI timed out\n");
+
+ pm_runtime_put(ici->v4l2_dev.dev);
}
static struct vb2_ops isi_video_qops = {
@@ -516,7 +517,13 @@ static int isi_camera_set_fmt(struct soc_camera_device *icd,
if (mf->code != xlate->code)
return -EINVAL;
+ /* Enable PM and peripheral clock before operate isi registers */
+ pm_runtime_get_sync(ici->v4l2_dev.dev);
+
ret = configure_geometry(isi, pix->width, pix->height, xlate->code);
+
+ pm_runtime_put(ici->v4l2_dev.dev);
+
if (ret < 0)
return ret;
@@ -730,37 +737,6 @@ static void isi_camera_remove_device(struct soc_camera_device *icd)
icd->devnum);
}
-/* Called with .host_lock held */
-static int isi_camera_clock_start(struct soc_camera_host *ici)
-{
- struct atmel_isi *isi = ici->priv;
- int ret;
-
- ret = clk_prepare_enable(isi->pclk);
- if (ret)
- return ret;
-
- if (!IS_ERR(isi->mck)) {
- ret = clk_prepare_enable(isi->mck);
- if (ret) {
- clk_disable_unprepare(isi->pclk);
- return ret;
- }
- }
-
- return 0;
-}
-
-/* Called with .host_lock held */
-static void isi_camera_clock_stop(struct soc_camera_host *ici)
-{
- struct atmel_isi *isi = ici->priv;
-
- if (!IS_ERR(isi->mck))
- clk_disable_unprepare(isi->mck);
- clk_disable_unprepare(isi->pclk);
-}
-
static unsigned int isi_camera_poll(struct file *file, poll_table *pt)
{
struct soc_camera_device *icd = file->private_data;
@@ -855,9 +831,14 @@ static int isi_camera_set_bus_param(struct soc_camera_device *icd)
cfg1 |= ISI_CFG1_THMASK_BEATS_16;
+ /* Enable PM and peripheral clock before operate isi registers */
+ pm_runtime_get_sync(ici->v4l2_dev.dev);
+
isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
isi_writel(isi, ISI_CFG1, cfg1);
+ pm_runtime_put(ici->v4l2_dev.dev);
+
return 0;
}
@@ -865,8 +846,6 @@ static struct soc_camera_host_ops isi_soc_camera_host_ops = {
.owner = THIS_MODULE,
.add = isi_camera_add_device,
.remove = isi_camera_remove_device,
- .clock_start = isi_camera_clock_start,
- .clock_stop = isi_camera_clock_stop,
.set_fmt = isi_camera_set_fmt,
.try_fmt = isi_camera_try_fmt,
.get_formats = isi_camera_get_formats,
@@ -889,6 +868,7 @@ static int atmel_isi_remove(struct platform_device *pdev)
sizeof(struct fbd) * MAX_BUFFER_NUM,
isi->p_fb_descriptors,
isi->fb_descriptors_phys);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
@@ -902,7 +882,6 @@ static int atmel_isi_probe_dt(struct atmel_isi *isi,
/* Default settings for ISI */
isi->pdata.full_mode = 1;
- isi->pdata.mck_hz = ISI_DEFAULT_MCLK_FREQ;
isi->pdata.frate = ISI_CFG1_FRATE_CAPTURE_ALL;
np = of_graph_get_next_endpoint(np, NULL);
@@ -978,21 +957,6 @@ static int atmel_isi_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&isi->video_buffer_list);
INIT_LIST_HEAD(&isi->dma_desc_head);
- /* ISI_MCK is the sensor master clock. It should be handled by the
- * sensor driver directly, as the ISI has no use for that clock. Make
- * the clock optional here while platforms transition to the correct
- * model.
- */
- isi->mck = devm_clk_get(dev, "isi_mck");
- if (!IS_ERR(isi->mck)) {
- /* Set ISI_MCK's frequency, it should be faster than pixel
- * clock.
- */
- ret = clk_set_rate(isi->mck, isi->pdata.mck_hz);
- if (ret < 0)
- return ret;
- }
-
isi->p_fb_descriptors = dma_alloc_coherent(&pdev->dev,
sizeof(struct fbd) * MAX_BUFFER_NUM,
&isi->fb_descriptors_phys,
@@ -1027,8 +991,6 @@ static int atmel_isi_probe(struct platform_device *pdev)
if (isi->pdata.data_width_flags & ISI_DATAWIDTH_10)
isi->width_flags |= 1 << 9;
- isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
-
irq = platform_get_irq(pdev, 0);
if (IS_ERR_VALUE(irq)) {
ret = irq;
@@ -1049,6 +1011,9 @@ static int atmel_isi_probe(struct platform_device *pdev)
soc_host->v4l2_dev.dev = &pdev->dev;
soc_host->nr = pdev->id;
+ pm_suspend_ignore_children(&pdev->dev, true);
+ pm_runtime_enable(&pdev->dev);
+
if (isi->pdata.asd_sizes) {
soc_host->asd = isi->pdata.asd;
soc_host->asd_sizes = isi->pdata.asd_sizes;
@@ -1062,6 +1027,7 @@ static int atmel_isi_probe(struct platform_device *pdev)
return 0;
err_register_soc_camera_host:
+ pm_runtime_disable(&pdev->dev);
err_req_irq:
err_ioremap:
vb2_dma_contig_cleanup_ctx(isi->alloc_ctx);
@@ -1074,6 +1040,30 @@ err_alloc_ctx:
return ret;
}
+static int atmel_isi_runtime_suspend(struct device *dev)
+{
+ struct soc_camera_host *soc_host = to_soc_camera_host(dev);
+ struct atmel_isi *isi = container_of(soc_host,
+ struct atmel_isi, soc_host);
+
+ clk_disable_unprepare(isi->pclk);
+
+ return 0;
+}
+static int atmel_isi_runtime_resume(struct device *dev)
+{
+ struct soc_camera_host *soc_host = to_soc_camera_host(dev);
+ struct atmel_isi *isi = container_of(soc_host,
+ struct atmel_isi, soc_host);
+
+ return clk_prepare_enable(isi->pclk);
+}
+
+static const struct dev_pm_ops atmel_isi_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(atmel_isi_runtime_suspend,
+ atmel_isi_runtime_resume, NULL)
+};
+
static const struct of_device_id atmel_isi_of_match[] = {
{ .compatible = "atmel,at91sam9g45-isi" },
{ }
@@ -1085,6 +1075,7 @@ static struct platform_driver atmel_isi_driver = {
.driver = {
.name = "atmel_isi",
.of_match_table = of_match_ptr(atmel_isi_of_match),
+ .pm = &atmel_isi_dev_pm_ops,
},
};
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index db7700b0af7c..71dd71c0bd1f 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -98,6 +98,7 @@
#define VNMC_INF_YUV10_BT656 (2 << 16)
#define VNMC_INF_YUV10_BT601 (3 << 16)
#define VNMC_INF_YUV16 (5 << 16)
+#define VNMC_INF_RGB888 (6 << 16)
#define VNMC_VUP (1 << 10)
#define VNMC_IM_ODD (0 << 3)
#define VNMC_IM_ODD_EVEN (1 << 3)
@@ -540,6 +541,9 @@ static int rcar_vin_videobuf_setup(struct vb2_queue *vq,
unsigned int bytes_per_line;
int ret;
+ if (fmt->fmt.pix.sizeimage < icd->sizeimage)
+ return -EINVAL;
+
xlate = soc_camera_xlate_by_fourcc(icd,
fmt->fmt.pix.pixelformat);
if (!xlate)
@@ -589,7 +593,7 @@ static int rcar_vin_setup(struct rcar_vin_priv *priv)
struct soc_camera_device *icd = priv->ici.icd;
struct rcar_vin_cam *cam = icd->host_priv;
u32 vnmc, dmr, interrupts;
- bool progressive = false, output_is_yuv = false;
+ bool progressive = false, output_is_yuv = false, input_is_yuv = false;
switch (priv->field) {
case V4L2_FIELD_TOP:
@@ -623,16 +627,22 @@ static int rcar_vin_setup(struct rcar_vin_priv *priv)
case MEDIA_BUS_FMT_YUYV8_1X16:
/* BT.601/BT.1358 16bit YCbCr422 */
vnmc |= VNMC_INF_YUV16;
+ input_is_yuv = true;
break;
case MEDIA_BUS_FMT_YUYV8_2X8:
/* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */
vnmc |= priv->pdata_flags & RCAR_VIN_BT656 ?
VNMC_INF_YUV8_BT656 : VNMC_INF_YUV8_BT601;
+ input_is_yuv = true;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ vnmc |= VNMC_INF_RGB888;
break;
case MEDIA_BUS_FMT_YUYV10_2X10:
/* BT.656 10bit YCbCr422 or BT.601 10bit YCbCr422 */
vnmc |= priv->pdata_flags & RCAR_VIN_BT656 ?
VNMC_INF_YUV10_BT656 : VNMC_INF_YUV10_BT601;
+ input_is_yuv = true;
break;
default:
break;
@@ -676,7 +686,7 @@ static int rcar_vin_setup(struct rcar_vin_priv *priv)
vnmc |= VNMC_VUP;
/* If input and output use the same colorspace, use bypass mode */
- if (output_is_yuv)
+ if (input_is_yuv == output_is_yuv)
vnmc |= VNMC_BPS;
/* progressive or interlaced mode */
@@ -1423,6 +1433,7 @@ static int rcar_vin_get_formats(struct soc_camera_device *icd, unsigned int idx,
case MEDIA_BUS_FMT_YUYV8_1X16:
case MEDIA_BUS_FMT_YUYV8_2X8:
case MEDIA_BUS_FMT_YUYV10_2X10:
+ case MEDIA_BUS_FMT_RGB888_1X24:
if (cam->extra_fmt)
break;
@@ -1783,6 +1794,7 @@ static int rcar_vin_querycap(struct soc_camera_host *ici,
strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card));
cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s%d", DRV_NAME, ici->nr);
return 0;
}
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index c5c6c4e91f7b..efdeea4490e8 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -1665,6 +1665,8 @@ static int sh_mobile_ceu_querycap(struct soc_camera_host *ici,
struct v4l2_capability *cap)
{
strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card));
+ strlcpy(cap->driver, "sh_mobile_ceu", sizeof(cap->driver));
+ strlcpy(cap->bus_info, "platform:sh_mobile_ceu", sizeof(cap->bus_info));
cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
@@ -1773,6 +1775,7 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
pcdev->max_height = pcdev->pdata->max_height;
pcdev->flags = pcdev->pdata->flags;
}
+ pcdev->field = V4L2_FIELD_NONE;
if (!pcdev->max_width) {
unsigned int v;
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index d708df410f74..9087fed586fb 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -309,11 +309,14 @@ static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv,
static int soc_camera_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
+ struct soc_camera_device *icd = file->private_data;
+
if (inp->index != 0)
return -EINVAL;
/* default is camera */
inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->std = icd->vdev->tvnorms;
strcpy(inp->name, "Camera");
return 0;
@@ -381,9 +384,8 @@ static int soc_camera_reqbufs(struct file *file, void *priv,
ret = vb2_reqbufs(&icd->vb2_vidq, p);
}
- if (!ret && !icd->streamer)
- icd->streamer = file;
-
+ if (!ret)
+ icd->streamer = p->count ? file : NULL;
return ret;
}
@@ -440,12 +442,19 @@ static int soc_camera_create_bufs(struct file *file, void *priv,
{
struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ int ret;
/* videobuf2 only */
if (ici->ops->init_videobuf)
- return -EINVAL;
- else
- return vb2_create_bufs(&icd->vb2_vidq, create);
+ return -ENOTTY;
+
+ if (icd->streamer && icd->streamer != file)
+ return -EBUSY;
+
+ ret = vb2_create_bufs(&icd->vb2_vidq, create);
+ if (!ret)
+ icd->streamer = file;
+ return ret;
}
static int soc_camera_prepare_buf(struct file *file, void *priv,
@@ -467,14 +476,13 @@ static int soc_camera_expbuf(struct file *file, void *priv,
struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- if (icd->streamer != file)
- return -EBUSY;
-
/* videobuf2 only */
if (ici->ops->init_videobuf)
- return -EINVAL;
- else
- return vb2_expbuf(&icd->vb2_vidq, p);
+ return -ENOTTY;
+
+ if (icd->streamer && icd->streamer != file)
+ return -EBUSY;
+ return vb2_expbuf(&icd->vb2_vidq, p);
}
/* Always entered with .host_lock held */
@@ -780,20 +788,21 @@ static int soc_camera_close(struct file *file)
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
mutex_lock(&ici->host_lock);
+ if (icd->streamer == file) {
+ if (ici->ops->init_videobuf2)
+ vb2_queue_release(&icd->vb2_vidq);
+ icd->streamer = NULL;
+ }
icd->use_count--;
if (!icd->use_count) {
pm_runtime_suspend(&icd->vdev->dev);
pm_runtime_disable(&icd->vdev->dev);
- if (ici->ops->init_videobuf2)
- vb2_queue_release(&icd->vb2_vidq);
__soc_camera_power_off(icd);
soc_camera_remove_device(icd);
}
- if (icd->streamer == file)
- icd->streamer = NULL;
mutex_unlock(&ici->host_lock);
module_put(ici->ops->owner);
@@ -992,6 +1001,7 @@ static int soc_camera_streamoff(struct file *file, void *priv,
struct soc_camera_device *icd = file->private_data;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ int ret;
WARN_ON(priv != file->private_data);
@@ -1006,13 +1016,13 @@ static int soc_camera_streamoff(struct file *file, void *priv,
* remaining buffers. When the last buffer is freed, stop capture
*/
if (ici->ops->init_videobuf)
- videobuf_streamoff(&icd->vb_vidq);
+ ret = videobuf_streamoff(&icd->vb_vidq);
else
- vb2_streamoff(&icd->vb2_vidq, i);
+ ret = vb2_streamoff(&icd->vb2_vidq, i);
v4l2_subdev_call(sd, video, s_stream, 0);
- return 0;
+ return ret;
}
static int soc_camera_cropcap(struct file *file, void *fh,
diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c
index 18282a0f80c9..79c56356a7c7 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-debug.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c
@@ -116,6 +116,9 @@ static void bdisp_dbg_dump_tty(struct seq_file *s, u32 val)
case BDISP_RGB565:
seq_puts(s, "RGB565 - ");
break;
+ case BDISP_RGB888:
+ seq_puts(s, "RGB888 - ");
+ break;
case BDISP_XRGB8888:
seq_puts(s, "xRGB888 - ");
break;
@@ -185,6 +188,9 @@ static void bdisp_dbg_dump_sty(struct seq_file *s,
case BDISP_RGB565:
seq_puts(s, "RGB565 - ");
break;
+ case BDISP_RGB888:
+ seq_puts(s, "RGB888 - ");
+ break;
case BDISP_XRGB8888:
seq_puts(s, "xRGB888 - ");
break;
@@ -420,6 +426,8 @@ static const char *bdisp_fmt_to_str(struct bdisp_frame frame)
return "NV12";
case V4L2_PIX_FMT_RGB565:
return "RGB16";
+ case V4L2_PIX_FMT_RGB24:
+ return "RGB24";
case V4L2_PIX_FMT_XBGR32:
return "XRGB";
case V4L2_PIX_FMT_ABGR32:
diff --git a/drivers/media/platform/sti/bdisp/bdisp-hw.c b/drivers/media/platform/sti/bdisp/bdisp-hw.c
index 465828e859e2..052c932ac942 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-hw.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-hw.c
@@ -336,8 +336,8 @@ static int bdisp_hw_get_hv_inc(struct bdisp_ctx *ctx, u16 *h_inc, u16 *v_inc)
src_w = ctx->src.crop.width;
src_h = ctx->src.crop.height;
- dst_w = ctx->dst.width;
- dst_h = ctx->dst.height;
+ dst_w = ctx->dst.crop.width;
+ dst_h = ctx->dst.crop.height;
if (bdisp_hw_get_inc(src_w, dst_w, h_inc) ||
bdisp_hw_get_inc(src_h, dst_h, v_inc)) {
@@ -483,9 +483,9 @@ static void bdisp_hw_build_node(struct bdisp_ctx *ctx,
src_rect.width -= src_x_offset;
src_rect.width = min_t(__s32, MAX_SRC_WIDTH, src_rect.width);
- dst_x_offset = (src_x_offset * dst->width) / ctx->src.crop.width;
+ dst_x_offset = (src_x_offset * dst_width) / ctx->src.crop.width;
dst_rect.left += dst_x_offset;
- dst_rect.width = (src_rect.width * dst->width) / ctx->src.crop.width;
+ dst_rect.width = (src_rect.width * dst_width) / ctx->src.crop.width;
/* General */
src_fmt = src->fmt->pixelformat;
@@ -768,12 +768,12 @@ static void bdisp_hw_save_request(struct bdisp_ctx *ctx)
/* Allocate memory if not done yet */
if (!copy_node[i]) {
copy_node[i] = devm_kzalloc(ctx->bdisp_dev->dev,
- sizeof(*copy_node),
+ sizeof(*copy_node[i]),
GFP_KERNEL);
if (!copy_node[i])
return;
}
- copy_node[i] = node[i];
+ *copy_node[i] = *node[i];
}
}
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 9e782ebe18da..df61355b46f1 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -851,33 +851,56 @@ static int bdisp_g_selection(struct file *file, void *fh,
struct bdisp_frame *frame;
struct bdisp_ctx *ctx = fh_to_ctx(fh);
- if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- /* Composing / capture is not supported */
- dev_dbg(ctx->bdisp_dev->dev, "Not supported for capture\n");
- return -EINVAL;
- }
-
frame = ctx_get_frame(ctx, s->type);
if (IS_ERR(frame)) {
dev_err(ctx->bdisp_dev->dev, "Invalid frame (%p)\n", frame);
return PTR_ERR(frame);
}
- switch (s->target) {
- case V4L2_SEL_TGT_CROP:
- /* cropped frame */
- s->r = frame->crop;
+ switch (s->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ /* cropped frame */
+ s->r = frame->crop;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ /* complete frame */
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = frame->width;
+ s->r.height = frame->height;
+ break;
+ default:
+ dev_err(ctx->bdisp_dev->dev, "Invalid target\n");
+ return -EINVAL;
+ }
break;
- case V4L2_SEL_TGT_CROP_DEFAULT:
- case V4L2_SEL_TGT_CROP_BOUNDS:
- /* complete frame */
- s->r.left = 0;
- s->r.top = 0;
- s->r.width = frame->width;
- s->r.height = frame->height;
+
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ /* composed (cropped) frame */
+ s->r = frame->crop;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ /* complete frame */
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = frame->width;
+ s->r.height = frame->height;
+ break;
+ default:
+ dev_err(ctx->bdisp_dev->dev, "Invalid target\n");
+ return -EINVAL;
+ }
break;
+
default:
- dev_dbg(ctx->bdisp_dev->dev, "Invalid target\n");
+ dev_err(ctx->bdisp_dev->dev, "Invalid type\n");
return -EINVAL;
}
@@ -906,15 +929,18 @@ static int bdisp_s_selection(struct file *file, void *fh,
struct bdisp_frame *frame;
struct bdisp_ctx *ctx = fh_to_ctx(fh);
struct v4l2_rect *in, out;
+ bool valid = false;
- if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- /* Composing / capture is not supported */
- dev_dbg(ctx->bdisp_dev->dev, "Not supported for capture\n");
- return -EINVAL;
- }
+ if ((s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) &&
+ (s->target == V4L2_SEL_TGT_CROP))
+ valid = true;
+
+ if ((s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
+ (s->target == V4L2_SEL_TGT_COMPOSE))
+ valid = true;
- if (s->target != V4L2_SEL_TGT_CROP) {
- dev_dbg(ctx->bdisp_dev->dev, "Invalid target\n");
+ if (!valid) {
+ dev_err(ctx->bdisp_dev->dev, "Invalid type / target\n");
return -EINVAL;
}
diff --git a/drivers/media/platform/sti/c8sectpfe/Kconfig b/drivers/media/platform/sti/c8sectpfe/Kconfig
new file mode 100644
index 000000000000..641ad8f34956
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/Kconfig
@@ -0,0 +1,28 @@
+config DVB_C8SECTPFE
+ tristate "STMicroelectronics C8SECTPFE DVB support"
+ depends on PINCTRL && DVB_CORE && I2C
+ depends on ARCH_STI || ARCH_MULTIPLATFORM || COMPILE_TEST
+ select FW_LOADER
+ select FW_LOADER_USER_HELPER_FALLBACK
+ select DEBUG_FS
+ select DVB_LNBP21 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV090x if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STB6100 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV6110 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV0900 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV0367 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_TDA18212 if MEDIA_SUBDRV_AUTOSELECT
+
+ ---help---
+ This adds support for DVB front-end cards connected
+ to TS inputs of STiH407/410 SoC.
+
+ The driver currently supports C8SECTPFE's TS input block,
+ memdma engine, and HW PID filtering.
+
+ Supported DVB front-end cards are:
+ - STMicroelectronics DVB-T B2100A (STV0367 + TDA18212)
+ - STMicroelectronics DVB-S/S2 STV0903 + STV6110 + LNBP24 board
+
+ To compile this driver as a module, choose M here: the
+ module will be called c8sectpfe.
diff --git a/drivers/media/platform/sti/c8sectpfe/Makefile b/drivers/media/platform/sti/c8sectpfe/Makefile
new file mode 100644
index 000000000000..b578c7cb4c34
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/Makefile
@@ -0,0 +1,9 @@
+c8sectpfe-y += c8sectpfe-core.o c8sectpfe-common.o c8sectpfe-dvb.o \
+ c8sectpfe-debugfs.o
+
+obj-$(CONFIG_DVB_C8SECTPFE) += c8sectpfe.o
+
+ccflags-y += -Idrivers/media/i2c
+ccflags-y += -Idrivers/media/common
+ccflags-y += -Idrivers/media/dvb-core/ -Idrivers/media/dvb-frontends/ \
+ -Idrivers/media/tuners/
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c
new file mode 100644
index 000000000000..95223ab71e19
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c
@@ -0,0 +1,265 @@
+/*
+ * c8sectpfe-common.c - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dvb/dmx.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "c8sectpfe-common.h"
+#include "c8sectpfe-core.h"
+#include "c8sectpfe-dvb.h"
+
+static int register_dvb(struct stdemux *demux, struct dvb_adapter *adap,
+ void *start_feed, void *stop_feed,
+ struct c8sectpfei *fei)
+{
+ int result;
+
+ demux->dvb_demux.dmx.capabilities = DMX_TS_FILTERING |
+ DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING;
+
+ demux->dvb_demux.priv = demux;
+ demux->dvb_demux.filternum = C8SECTPFE_MAXCHANNEL;
+ demux->dvb_demux.feednum = C8SECTPFE_MAXCHANNEL;
+
+ demux->dvb_demux.start_feed = start_feed;
+ demux->dvb_demux.stop_feed = stop_feed;
+ demux->dvb_demux.write_to_decoder = NULL;
+
+ result = dvb_dmx_init(&demux->dvb_demux);
+ if (result < 0) {
+ dev_err(fei->dev, "dvb_dmx_init failed (errno = %d)\n",
+ result);
+ goto err_dmx;
+ }
+
+ demux->dmxdev.filternum = demux->dvb_demux.filternum;
+ demux->dmxdev.demux = &demux->dvb_demux.dmx;
+ demux->dmxdev.capabilities = 0;
+
+ result = dvb_dmxdev_init(&demux->dmxdev, adap);
+ if (result < 0) {
+ dev_err(fei->dev, "dvb_dmxdev_init failed (errno = %d)\n",
+ result);
+
+ goto err_dmxdev;
+ }
+
+ demux->hw_frontend.source = DMX_FRONTEND_0 + demux->tsin_index;
+
+ result = demux->dvb_demux.dmx.add_frontend(&demux->dvb_demux.dmx,
+ &demux->hw_frontend);
+ if (result < 0) {
+ dev_err(fei->dev, "add_frontend failed (errno = %d)\n", result);
+ goto err_fe_hw;
+ }
+
+ demux->mem_frontend.source = DMX_MEMORY_FE;
+ result = demux->dvb_demux.dmx.add_frontend(&demux->dvb_demux.dmx,
+ &demux->mem_frontend);
+ if (result < 0) {
+ dev_err(fei->dev, "add_frontend failed (%d)\n", result);
+ goto err_fe_mem;
+ }
+
+ result = demux->dvb_demux.dmx.connect_frontend(&demux->dvb_demux.dmx,
+ &demux->hw_frontend);
+ if (result < 0) {
+ dev_err(fei->dev, "connect_frontend (%d)\n", result);
+ goto err_fe_con;
+ }
+
+ return 0;
+
+err_fe_con:
+ demux->dvb_demux.dmx.remove_frontend(&demux->dvb_demux.dmx,
+ &demux->mem_frontend);
+err_fe_mem:
+ demux->dvb_demux.dmx.remove_frontend(&demux->dvb_demux.dmx,
+ &demux->hw_frontend);
+err_fe_hw:
+ dvb_dmxdev_release(&demux->dmxdev);
+err_dmxdev:
+ dvb_dmx_release(&demux->dvb_demux);
+err_dmx:
+ return result;
+
+}
+
+static void unregister_dvb(struct stdemux *demux)
+{
+
+ demux->dvb_demux.dmx.remove_frontend(&demux->dvb_demux.dmx,
+ &demux->mem_frontend);
+
+ demux->dvb_demux.dmx.remove_frontend(&demux->dvb_demux.dmx,
+ &demux->hw_frontend);
+
+ dvb_dmxdev_release(&demux->dmxdev);
+
+ dvb_dmx_release(&demux->dvb_demux);
+}
+
+static struct c8sectpfe *c8sectpfe_create(struct c8sectpfei *fei,
+ void *start_feed,
+ void *stop_feed)
+{
+ struct c8sectpfe *c8sectpfe;
+ int result;
+ int i, j;
+
+ short int ids[] = { -1 };
+
+ c8sectpfe = kzalloc(sizeof(struct c8sectpfe), GFP_KERNEL);
+ if (!c8sectpfe)
+ goto err1;
+
+ mutex_init(&c8sectpfe->lock);
+
+ c8sectpfe->device = fei->dev;
+
+ result = dvb_register_adapter(&c8sectpfe->adapter, "STi c8sectpfe",
+ THIS_MODULE, fei->dev, ids);
+ if (result < 0) {
+ dev_err(fei->dev, "dvb_register_adapter failed (errno = %d)\n",
+ result);
+ goto err2;
+ }
+
+ c8sectpfe->adapter.priv = fei;
+
+ for (i = 0; i < fei->tsin_count; i++) {
+
+ c8sectpfe->demux[i].tsin_index = i;
+ c8sectpfe->demux[i].c8sectpfei = fei;
+
+ result = register_dvb(&c8sectpfe->demux[i], &c8sectpfe->adapter,
+ start_feed, stop_feed, fei);
+ if (result < 0) {
+ dev_err(fei->dev,
+ "register_dvb feed=%d failed (errno = %d)\n",
+ result, i);
+
+ /* we take a all or nothing approach */
+ for (j = 0; j < i; j++)
+ unregister_dvb(&c8sectpfe->demux[j]);
+ goto err3;
+ }
+ }
+
+ c8sectpfe->num_feeds = fei->tsin_count;
+
+ return c8sectpfe;
+err3:
+ dvb_unregister_adapter(&c8sectpfe->adapter);
+err2:
+ kfree(c8sectpfe);
+err1:
+ return NULL;
+};
+
+static void c8sectpfe_delete(struct c8sectpfe *c8sectpfe)
+{
+ int i;
+
+ if (!c8sectpfe)
+ return;
+
+ for (i = 0; i < c8sectpfe->num_feeds; i++)
+ unregister_dvb(&c8sectpfe->demux[i]);
+
+ dvb_unregister_adapter(&c8sectpfe->adapter);
+
+ kfree(c8sectpfe);
+};
+
+void c8sectpfe_tuner_unregister_frontend(struct c8sectpfe *c8sectpfe,
+ struct c8sectpfei *fei)
+{
+ int n;
+ struct channel_info *tsin;
+
+ for (n = 0; n < fei->tsin_count; n++) {
+
+ tsin = fei->channel_data[n];
+
+ if (tsin && tsin->frontend) {
+ dvb_unregister_frontend(tsin->frontend);
+ dvb_frontend_detach(tsin->frontend);
+ }
+
+ if (tsin && tsin->i2c_adapter)
+ i2c_put_adapter(tsin->i2c_adapter);
+
+ if (tsin && tsin->i2c_client) {
+ if (tsin->i2c_client->dev.driver->owner)
+ module_put(tsin->i2c_client->dev.driver->owner);
+ i2c_unregister_device(tsin->i2c_client);
+ }
+ }
+
+ c8sectpfe_delete(c8sectpfe);
+};
+
+int c8sectpfe_tuner_register_frontend(struct c8sectpfe **c8sectpfe,
+ struct c8sectpfei *fei,
+ void *start_feed,
+ void *stop_feed)
+{
+ struct channel_info *tsin;
+ struct dvb_frontend *frontend;
+ int n, res;
+
+ *c8sectpfe = c8sectpfe_create(fei, start_feed, stop_feed);
+ if (!*c8sectpfe)
+ return -ENOMEM;
+
+ for (n = 0; n < fei->tsin_count; n++) {
+ tsin = fei->channel_data[n];
+
+ res = c8sectpfe_frontend_attach(&frontend, *c8sectpfe, tsin, n);
+ if (res)
+ goto err;
+
+ res = dvb_register_frontend(&c8sectpfe[0]->adapter, frontend);
+ if (res < 0) {
+ dev_err(fei->dev, "dvb_register_frontend failed (%d)\n",
+ res);
+ goto err;
+ }
+
+ tsin->frontend = frontend;
+ }
+
+ return 0;
+
+err:
+ c8sectpfe_tuner_unregister_frontend(*c8sectpfe, fei);
+ return res;
+}
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.h b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.h
new file mode 100644
index 000000000000..da21c0ac0fc1
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.h
@@ -0,0 +1,64 @@
+/*
+ * c8sectpfe-common.h - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+#ifndef _C8SECTPFE_COMMON_H_
+#define _C8SECTPFE_COMMON_H_
+
+#include <linux/dvb/dmx.h>
+#include <linux/dvb/frontend.h>
+#include <linux/gpio.h>
+#include <linux/version.h>
+
+#include "dmxdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+/* Maximum number of channels */
+#define C8SECTPFE_MAXADAPTER (4)
+#define C8SECTPFE_MAXCHANNEL 64
+#define STPTI_MAXCHANNEL 64
+
+#define MAX_INPUTBLOCKS 7
+
+struct c8sectpfe;
+struct stdemux;
+
+struct stdemux {
+ struct dvb_demux dvb_demux;
+ struct dmxdev dmxdev;
+ struct dmx_frontend hw_frontend;
+ struct dmx_frontend mem_frontend;
+ int tsin_index;
+ int running_feed_count;
+ struct c8sectpfei *c8sectpfei;
+};
+
+struct c8sectpfe {
+ struct stdemux demux[MAX_INPUTBLOCKS];
+ struct mutex lock;
+ struct dvb_adapter adapter;
+ struct device *device;
+ int mapping;
+ int num_feeds;
+};
+
+/* Channel registration */
+int c8sectpfe_tuner_register_frontend(struct c8sectpfe **c8sectpfe,
+ struct c8sectpfei *fei,
+ void *start_feed,
+ void *stop_feed);
+
+void c8sectpfe_tuner_unregister_frontend(struct c8sectpfe *c8sectpfe,
+ struct c8sectpfei *fei);
+
+#endif
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
new file mode 100644
index 000000000000..486aef50d99b
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -0,0 +1,1236 @@
+/*
+ * c8sectpfe-core.c - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author:Peter Bennett <peter.bennett@st.com>
+ * Peter Griffin <peter.griffin@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dvb/dmx.h>
+#include <linux/dvb/frontend.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/version.h>
+#include <linux/wait.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "c8sectpfe-core.h"
+#include "c8sectpfe-common.h"
+#include "c8sectpfe-debugfs.h"
+#include "dmxdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#define FIRMWARE_MEMDMA "pti_memdma_h407.elf"
+MODULE_FIRMWARE(FIRMWARE_MEMDMA);
+
+#define PID_TABLE_SIZE 1024
+#define POLL_MSECS 50
+
+static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei);
+
+#define TS_PKT_SIZE 188
+#define HEADER_SIZE (4)
+#define PACKET_SIZE (TS_PKT_SIZE+HEADER_SIZE)
+
+#define FEI_ALIGNMENT (32)
+/* hw requires minimum of 8*PACKET_SIZE and padded to 8byte boundary */
+#define FEI_BUFFER_SIZE (8*PACKET_SIZE*340)
+
+#define FIFO_LEN 1024
+
+static void c8sectpfe_timer_interrupt(unsigned long ac8sectpfei)
+{
+ struct c8sectpfei *fei = (struct c8sectpfei *)ac8sectpfei;
+ struct channel_info *channel;
+ int chan_num;
+
+ /* iterate through input block channels */
+ for (chan_num = 0; chan_num < fei->tsin_count; chan_num++) {
+ channel = fei->channel_data[chan_num];
+
+ /* is this descriptor initialised and TP enabled */
+ if (channel->irec && readl(channel->irec + DMA_PRDS_TPENABLE))
+ tasklet_schedule(&channel->tsklet);
+ }
+
+ fei->timer.expires = jiffies + msecs_to_jiffies(POLL_MSECS);
+ add_timer(&fei->timer);
+}
+
+static void channel_swdemux_tsklet(unsigned long data)
+{
+ struct channel_info *channel = (struct channel_info *)data;
+ struct c8sectpfei *fei = channel->fei;
+ unsigned long wp, rp;
+ int pos, num_packets, n, size;
+ u8 *buf;
+
+ if (unlikely(!channel || !channel->irec))
+ return;
+
+ wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0));
+ rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
+
+ pos = rp - channel->back_buffer_busaddr;
+
+ /* has it wrapped */
+ if (wp < rp)
+ wp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE;
+
+ size = wp - rp;
+ num_packets = size / PACKET_SIZE;
+
+ /* manage cache so data is visible to CPU */
+ dma_sync_single_for_cpu(fei->dev,
+ rp,
+ size,
+ DMA_FROM_DEVICE);
+
+ buf = (u8 *) channel->back_buffer_aligned;
+
+ dev_dbg(fei->dev,
+ "chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\t"
+ "rp=0x%lx, wp=0x%lx\n",
+ channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
+
+ for (n = 0; n < num_packets; n++) {
+ dvb_dmx_swfilter_packets(
+ &fei->c8sectpfe[0]->
+ demux[channel->demux_mapping].dvb_demux,
+ &buf[pos], 1);
+
+ pos += PACKET_SIZE;
+ }
+
+ /* advance the read pointer */
+ if (wp == (channel->back_buffer_busaddr + FEI_BUFFER_SIZE))
+ writel(channel->back_buffer_busaddr, channel->irec +
+ DMA_PRDS_BUSRP_TP(0));
+ else
+ writel(wp, channel->irec + DMA_PRDS_BUSWP_TP(0));
+}
+
+static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+ struct dvb_demux *demux = dvbdmxfeed->demux;
+ struct stdemux *stdemux = (struct stdemux *)demux->priv;
+ struct c8sectpfei *fei = stdemux->c8sectpfei;
+ struct channel_info *channel;
+ u32 tmp;
+ unsigned long *bitmap;
+
+ switch (dvbdmxfeed->type) {
+ case DMX_TYPE_TS:
+ break;
+ case DMX_TYPE_SEC:
+ break;
+ default:
+ dev_err(fei->dev, "%s:%d Error bailing\n"
+ , __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (dvbdmxfeed->type == DMX_TYPE_TS) {
+ switch (dvbdmxfeed->pes_type) {
+ case DMX_PES_VIDEO:
+ case DMX_PES_AUDIO:
+ case DMX_PES_TELETEXT:
+ case DMX_PES_PCR:
+ case DMX_PES_OTHER:
+ break;
+ default:
+ dev_err(fei->dev, "%s:%d Error bailing\n"
+ , __func__, __LINE__);
+ return -EINVAL;
+ }
+ }
+
+ if (!atomic_read(&fei->fw_loaded)) {
+ dev_err(fei->dev, "%s: c8sectpfe fw not loaded\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&fei->lock);
+
+ channel = fei->channel_data[stdemux->tsin_index];
+
+ bitmap = (unsigned long *) channel->pid_buffer_aligned;
+
+ /* 8192 is a special PID */
+ if (dvbdmxfeed->pid == 8192) {
+ tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
+ tmp &= ~C8SECTPFE_PID_ENABLE;
+ writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
+
+ } else {
+ bitmap_set(bitmap, dvbdmxfeed->pid, 1);
+ }
+
+ /* manage cache so PID bitmap is visible to HW */
+ dma_sync_single_for_device(fei->dev,
+ channel->pid_buffer_busaddr,
+ PID_TABLE_SIZE,
+ DMA_TO_DEVICE);
+
+ channel->active = 1;
+
+ if (fei->global_feed_count == 0) {
+ fei->timer.expires = jiffies +
+ msecs_to_jiffies(msecs_to_jiffies(POLL_MSECS));
+
+ add_timer(&fei->timer);
+ }
+
+ if (stdemux->running_feed_count == 0) {
+
+ dev_dbg(fei->dev, "Starting channel=%p\n", channel);
+
+ tasklet_init(&channel->tsklet, channel_swdemux_tsklet,
+ (unsigned long) channel);
+
+ /* Reset the internal inputblock sram pointers */
+ writel(channel->fifo,
+ fei->io + C8SECTPFE_IB_BUFF_STRT(channel->tsin_id));
+ writel(channel->fifo + FIFO_LEN - 1,
+ fei->io + C8SECTPFE_IB_BUFF_END(channel->tsin_id));
+
+ writel(channel->fifo,
+ fei->io + C8SECTPFE_IB_READ_PNT(channel->tsin_id));
+ writel(channel->fifo,
+ fei->io + C8SECTPFE_IB_WRT_PNT(channel->tsin_id));
+
+
+ /* reset read / write memdma ptrs for this channel */
+ writel(channel->back_buffer_busaddr, channel->irec +
+ DMA_PRDS_BUSBASE_TP(0));
+
+ tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
+ writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
+
+ writel(channel->back_buffer_busaddr, channel->irec +
+ DMA_PRDS_BUSWP_TP(0));
+
+ /* Issue a reset and enable InputBlock */
+ writel(C8SECTPFE_SYS_ENABLE | C8SECTPFE_SYS_RESET
+ , fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
+
+ /* and enable the tp */
+ writel(0x1, channel->irec + DMA_PRDS_TPENABLE);
+
+ dev_dbg(fei->dev, "%s:%d Starting DMA feed on stdemux=%p\n"
+ , __func__, __LINE__, stdemux);
+ }
+
+ stdemux->running_feed_count++;
+ fei->global_feed_count++;
+
+ mutex_unlock(&fei->lock);
+
+ return 0;
+}
+
+static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+
+ struct dvb_demux *demux = dvbdmxfeed->demux;
+ struct stdemux *stdemux = (struct stdemux *)demux->priv;
+ struct c8sectpfei *fei = stdemux->c8sectpfei;
+ struct channel_info *channel;
+ int idlereq;
+ u32 tmp;
+ int ret;
+ unsigned long *bitmap;
+
+ if (!atomic_read(&fei->fw_loaded)) {
+ dev_err(fei->dev, "%s: c8sectpfe fw not loaded\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&fei->lock);
+
+ channel = fei->channel_data[stdemux->tsin_index];
+
+ bitmap = (unsigned long *) channel->pid_buffer_aligned;
+
+ if (dvbdmxfeed->pid == 8192) {
+ tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
+ tmp |= C8SECTPFE_PID_ENABLE;
+ writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
+ } else {
+ bitmap_clear(bitmap, dvbdmxfeed->pid, 1);
+ }
+
+ /* manage cache so data is visible to HW */
+ dma_sync_single_for_device(fei->dev,
+ channel->pid_buffer_busaddr,
+ PID_TABLE_SIZE,
+ DMA_TO_DEVICE);
+
+ if (--stdemux->running_feed_count == 0) {
+
+ channel = fei->channel_data[stdemux->tsin_index];
+
+ /* TP re-configuration on page 168 of functional spec */
+
+ /* disable IB (prevents more TS data going to memdma) */
+ writel(0, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
+
+ /* disable this channels descriptor */
+ writel(0, channel->irec + DMA_PRDS_TPENABLE);
+
+ tasklet_disable(&channel->tsklet);
+
+ /* now request memdma channel goes idle */
+ idlereq = (1 << channel->tsin_id) | IDLEREQ;
+ writel(idlereq, fei->io + DMA_IDLE_REQ);
+
+ /* wait for idle irq handler to signal completion */
+ ret = wait_for_completion_timeout(&channel->idle_completion,
+ msecs_to_jiffies(100));
+
+ if (ret == 0)
+ dev_warn(fei->dev,
+ "Timeout waiting for idle irq on tsin%d\n",
+ channel->tsin_id);
+
+ reinit_completion(&channel->idle_completion);
+
+ /* reset read / write ptrs for this channel */
+
+ writel(channel->back_buffer_busaddr,
+ channel->irec + DMA_PRDS_BUSBASE_TP(0));
+
+ tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
+ writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
+
+ writel(channel->back_buffer_busaddr,
+ channel->irec + DMA_PRDS_BUSWP_TP(0));
+
+ dev_dbg(fei->dev,
+ "%s:%d stopping DMA feed on stdemux=%p channel=%d\n",
+ __func__, __LINE__, stdemux, channel->tsin_id);
+
+ /* turn off all PIDS in the bitmap */
+ memset((void *)channel->pid_buffer_aligned
+ , 0x00, PID_TABLE_SIZE);
+
+ /* manage cache so data is visible to HW */
+ dma_sync_single_for_device(fei->dev,
+ channel->pid_buffer_busaddr,
+ PID_TABLE_SIZE,
+ DMA_TO_DEVICE);
+
+ channel->active = 0;
+ }
+
+ if (--fei->global_feed_count == 0) {
+ dev_dbg(fei->dev, "%s:%d global_feed_count=%d\n"
+ , __func__, __LINE__, fei->global_feed_count);
+
+ del_timer(&fei->timer);
+ }
+
+ mutex_unlock(&fei->lock);
+
+ return 0;
+}
+
+static struct channel_info *find_channel(struct c8sectpfei *fei, int tsin_num)
+{
+ int i;
+
+ for (i = 0; i < C8SECTPFE_MAX_TSIN_CHAN; i++) {
+ if (!fei->channel_data[i])
+ continue;
+
+ if (fei->channel_data[i]->tsin_id == tsin_num)
+ return fei->channel_data[i];
+ }
+
+ return NULL;
+}
+
+static void c8sectpfe_getconfig(struct c8sectpfei *fei)
+{
+ struct c8sectpfe_hw *hw = &fei->hw_stats;
+
+ hw->num_ib = readl(fei->io + SYS_CFG_NUM_IB);
+ hw->num_mib = readl(fei->io + SYS_CFG_NUM_MIB);
+ hw->num_swts = readl(fei->io + SYS_CFG_NUM_SWTS);
+ hw->num_tsout = readl(fei->io + SYS_CFG_NUM_TSOUT);
+ hw->num_ccsc = readl(fei->io + SYS_CFG_NUM_CCSC);
+ hw->num_ram = readl(fei->io + SYS_CFG_NUM_RAM);
+ hw->num_tp = readl(fei->io + SYS_CFG_NUM_TP);
+
+ dev_info(fei->dev, "C8SECTPFE hw supports the following:\n");
+ dev_info(fei->dev, "Input Blocks: %d\n", hw->num_ib);
+ dev_info(fei->dev, "Merged Input Blocks: %d\n", hw->num_mib);
+ dev_info(fei->dev, "Software Transport Stream Inputs: %d\n"
+ , hw->num_swts);
+ dev_info(fei->dev, "Transport Stream Output: %d\n", hw->num_tsout);
+ dev_info(fei->dev, "Cable Card Converter: %d\n", hw->num_ccsc);
+ dev_info(fei->dev, "RAMs supported by C8SECTPFE: %d\n", hw->num_ram);
+ dev_info(fei->dev, "Tango TPs supported by C8SECTPFE: %d\n"
+ , hw->num_tp);
+}
+
+static irqreturn_t c8sectpfe_idle_irq_handler(int irq, void *priv)
+{
+ struct c8sectpfei *fei = priv;
+ struct channel_info *chan;
+ int bit;
+ unsigned long tmp = readl(fei->io + DMA_IDLE_REQ);
+
+ /* page 168 of functional spec: Clear the idle request
+ by writing 0 to the C8SECTPFE_DMA_IDLE_REQ register. */
+
+ /* signal idle completion */
+ for_each_set_bit(bit, &tmp, fei->hw_stats.num_ib) {
+
+ chan = find_channel(fei, bit);
+
+ if (chan)
+ complete(&chan->idle_completion);
+ }
+
+ writel(0, fei->io + DMA_IDLE_REQ);
+
+ return IRQ_HANDLED;
+}
+
+
+static void free_input_block(struct c8sectpfei *fei, struct channel_info *tsin)
+{
+ if (!fei || !tsin)
+ return;
+
+ if (tsin->back_buffer_busaddr)
+ if (!dma_mapping_error(fei->dev, tsin->back_buffer_busaddr))
+ dma_unmap_single(fei->dev, tsin->back_buffer_busaddr,
+ FEI_BUFFER_SIZE, DMA_BIDIRECTIONAL);
+
+ kfree(tsin->back_buffer_start);
+
+ if (tsin->pid_buffer_busaddr)
+ if (!dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr))
+ dma_unmap_single(fei->dev, tsin->pid_buffer_busaddr,
+ PID_TABLE_SIZE, DMA_BIDIRECTIONAL);
+
+ kfree(tsin->pid_buffer_start);
+}
+
+#define MAX_NAME 20
+
+static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
+ struct channel_info *tsin)
+{
+ int ret;
+ u32 tmp;
+ char tsin_pin_name[MAX_NAME];
+
+ if (!fei || !tsin)
+ return -EINVAL;
+
+ dev_dbg(fei->dev, "%s:%d Configuring channel=%p tsin=%d\n"
+ , __func__, __LINE__, tsin, tsin->tsin_id);
+
+ init_completion(&tsin->idle_completion);
+
+ tsin->back_buffer_start = kzalloc(FEI_BUFFER_SIZE +
+ FEI_ALIGNMENT, GFP_KERNEL);
+
+ if (!tsin->back_buffer_start) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ /* Ensure backbuffer is 32byte aligned */
+ tsin->back_buffer_aligned = tsin->back_buffer_start
+ + FEI_ALIGNMENT;
+
+ tsin->back_buffer_aligned = (void *)
+ (((uintptr_t) tsin->back_buffer_aligned) & ~0x1F);
+
+ tsin->back_buffer_busaddr = dma_map_single(fei->dev,
+ (void *)tsin->back_buffer_aligned,
+ FEI_BUFFER_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(fei->dev, tsin->back_buffer_busaddr)) {
+ dev_err(fei->dev, "failed to map back_buffer\n");
+ ret = -EFAULT;
+ goto err_unmap;
+ }
+
+ /*
+ * The pid buffer can be configured (in hw) for byte or bit
+ * per pid. By powers of deduction we conclude stih407 family
+ * is configured (at SoC design stage) for bit per pid.
+ */
+ tsin->pid_buffer_start = kzalloc(2048, GFP_KERNEL);
+
+ if (!tsin->pid_buffer_start) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ /*
+ * PID buffer needs to be aligned to size of the pid table
+ * which at bit per pid is 1024 bytes (8192 pids / 8).
+ * PIDF_BASE register enforces this alignment when writing
+ * the register.
+ */
+
+ tsin->pid_buffer_aligned = tsin->pid_buffer_start +
+ PID_TABLE_SIZE;
+
+ tsin->pid_buffer_aligned = (void *)
+ (((uintptr_t) tsin->pid_buffer_aligned) & ~0x3ff);
+
+ tsin->pid_buffer_busaddr = dma_map_single(fei->dev,
+ tsin->pid_buffer_aligned,
+ PID_TABLE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr)) {
+ dev_err(fei->dev, "failed to map pid_bitmap\n");
+ ret = -EFAULT;
+ goto err_unmap;
+ }
+
+ /* manage cache so pid bitmap is visible to HW */
+ dma_sync_single_for_device(fei->dev,
+ tsin->pid_buffer_busaddr,
+ PID_TABLE_SIZE,
+ DMA_TO_DEVICE);
+
+ snprintf(tsin_pin_name, MAX_NAME, "tsin%d-%s", tsin->tsin_id,
+ (tsin->serial_not_parallel ? "serial" : "parallel"));
+
+ tsin->pstate = pinctrl_lookup_state(fei->pinctrl, tsin_pin_name);
+ if (IS_ERR(tsin->pstate)) {
+ dev_err(fei->dev, "%s: pinctrl_lookup_state couldn't find %s state\n"
+ , __func__, tsin_pin_name);
+ ret = PTR_ERR(tsin->pstate);
+ goto err_unmap;
+ }
+
+ ret = pinctrl_select_state(fei->pinctrl, tsin->pstate);
+
+ if (ret) {
+ dev_err(fei->dev, "%s: pinctrl_select_state failed\n"
+ , __func__);
+ goto err_unmap;
+ }
+
+ /* Enable this input block */
+ tmp = readl(fei->io + SYS_INPUT_CLKEN);
+ tmp |= BIT(tsin->tsin_id);
+ writel(tmp, fei->io + SYS_INPUT_CLKEN);
+
+ if (tsin->serial_not_parallel)
+ tmp |= C8SECTPFE_SERIAL_NOT_PARALLEL;
+
+ if (tsin->invert_ts_clk)
+ tmp |= C8SECTPFE_INVERT_TSCLK;
+
+ if (tsin->async_not_sync)
+ tmp |= C8SECTPFE_ASYNC_NOT_SYNC;
+
+ tmp |= C8SECTPFE_ALIGN_BYTE_SOP | C8SECTPFE_BYTE_ENDIANNESS_MSB;
+
+ writel(tmp, fei->io + C8SECTPFE_IB_IP_FMT_CFG(tsin->tsin_id));
+
+ writel(C8SECTPFE_SYNC(0x9) |
+ C8SECTPFE_DROP(0x9) |
+ C8SECTPFE_TOKEN(0x47),
+ fei->io + C8SECTPFE_IB_SYNCLCKDRP_CFG(tsin->tsin_id));
+
+ writel(TS_PKT_SIZE, fei->io + C8SECTPFE_IB_PKT_LEN(tsin->tsin_id));
+
+ /* Place the FIFO's at the end of the irec descriptors */
+
+ tsin->fifo = (tsin->tsin_id * FIFO_LEN);
+
+ writel(tsin->fifo, fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id));
+ writel(tsin->fifo + FIFO_LEN - 1,
+ fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id));
+
+ writel(tsin->fifo, fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id));
+ writel(tsin->fifo, fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id));
+
+ writel(tsin->pid_buffer_busaddr,
+ fei->io + PIDF_BASE(tsin->tsin_id));
+
+ dev_info(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=%pad\n",
+ tsin->tsin_id, readl(fei->io + PIDF_BASE(tsin->tsin_id)),
+ &tsin->pid_buffer_busaddr);
+
+ /* Configure and enable HW PID filtering */
+
+ /*
+ * The PID value is created by assembling the first 8 bytes of
+ * the TS packet into a 64-bit word in big-endian format. A
+ * slice of that 64-bit word is taken from
+ * (PID_OFFSET+PID_NUM_BITS-1) to PID_OFFSET.
+ */
+ tmp = (C8SECTPFE_PID_ENABLE | C8SECTPFE_PID_NUMBITS(13)
+ | C8SECTPFE_PID_OFFSET(40));
+
+ writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(tsin->tsin_id));
+
+ dev_dbg(fei->dev, "chan=%d setting wp: %d, rp: %d, buf: %d-%d\n",
+ tsin->tsin_id,
+ readl(fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id)),
+ readl(fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id)),
+ readl(fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id)),
+ readl(fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id)));
+
+ /* Get base addpress of pointer record block from DMEM */
+ tsin->irec = fei->io + DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET +
+ readl(fei->io + DMA_PTRREC_BASE);
+
+ /* fill out pointer record data structure */
+
+ /* advance pointer record block to our channel */
+ tsin->irec += (tsin->tsin_id * DMA_PRDS_SIZE);
+
+ writel(tsin->fifo, tsin->irec + DMA_PRDS_MEMBASE);
+
+ writel(tsin->fifo + FIFO_LEN - 1, tsin->irec + DMA_PRDS_MEMTOP);
+
+ writel((188 + 7)&~7, tsin->irec + DMA_PRDS_PKTSIZE);
+
+ writel(0x1, tsin->irec + DMA_PRDS_TPENABLE);
+
+ /* read/write pointers with physical bus address */
+
+ writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSBASE_TP(0));
+
+ tmp = tsin->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
+ writel(tmp, tsin->irec + DMA_PRDS_BUSTOP_TP(0));
+
+ writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSWP_TP(0));
+ writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSRP_TP(0));
+
+ /* initialize tasklet */
+ tasklet_init(&tsin->tsklet, channel_swdemux_tsklet,
+ (unsigned long) tsin);
+
+ return 0;
+
+err_unmap:
+ free_input_block(fei, tsin);
+ return ret;
+}
+
+static irqreturn_t c8sectpfe_error_irq_handler(int irq, void *priv)
+{
+ struct c8sectpfei *fei = priv;
+
+ dev_err(fei->dev, "%s: error handling not yet implemented\n"
+ , __func__);
+
+ /*
+ * TODO FIXME we should detect some error conditions here
+ * and ideally so something about them!
+ */
+
+ return IRQ_HANDLED;
+}
+
+static int c8sectpfe_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *child, *np = dev->of_node;
+ struct c8sectpfei *fei;
+ struct resource *res;
+ int ret, index = 0;
+ struct channel_info *tsin;
+
+ /* Allocate the c8sectpfei structure */
+ fei = devm_kzalloc(dev, sizeof(struct c8sectpfei), GFP_KERNEL);
+ if (!fei)
+ return -ENOMEM;
+
+ fei->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "c8sectpfe");
+ fei->io = devm_ioremap_resource(dev, res);
+ if (IS_ERR(fei->io))
+ return PTR_ERR(fei->io);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "c8sectpfe-ram");
+ fei->sram = devm_ioremap_resource(dev, res);
+ if (IS_ERR(fei->sram))
+ return PTR_ERR(fei->sram);
+
+ fei->sram_size = res->end - res->start;
+
+ fei->idle_irq = platform_get_irq_byname(pdev, "c8sectpfe-idle-irq");
+ if (fei->idle_irq < 0) {
+ dev_err(dev, "Can't get c8sectpfe-idle-irq\n");
+ return fei->idle_irq;
+ }
+
+ fei->error_irq = platform_get_irq_byname(pdev, "c8sectpfe-error-irq");
+ if (fei->error_irq < 0) {
+ dev_err(dev, "Can't get c8sectpfe-error-irq\n");
+ return fei->error_irq;
+ }
+
+ platform_set_drvdata(pdev, fei);
+
+ fei->c8sectpfeclk = devm_clk_get(dev, "c8sectpfe");
+ if (IS_ERR(fei->c8sectpfeclk)) {
+ dev_err(dev, "c8sectpfe clk not found\n");
+ return PTR_ERR(fei->c8sectpfeclk);
+ }
+
+ ret = clk_prepare_enable(fei->c8sectpfeclk);
+ if (ret) {
+ dev_err(dev, "Failed to enable c8sectpfe clock\n");
+ return ret;
+ }
+
+ /* to save power disable all IP's (on by default) */
+ writel(0, fei->io + SYS_INPUT_CLKEN);
+
+ /* Enable memdma clock */
+ writel(MEMDMAENABLE, fei->io + SYS_OTHER_CLKEN);
+
+ /* clear internal sram */
+ memset_io(fei->sram, 0x0, fei->sram_size);
+
+ c8sectpfe_getconfig(fei);
+
+ ret = devm_request_irq(dev, fei->idle_irq, c8sectpfe_idle_irq_handler,
+ 0, "c8sectpfe-idle-irq", fei);
+ if (ret) {
+ dev_err(dev, "Can't register c8sectpfe-idle-irq IRQ.\n");
+ goto err_clk_disable;
+ }
+
+ ret = devm_request_irq(dev, fei->error_irq,
+ c8sectpfe_error_irq_handler, 0,
+ "c8sectpfe-error-irq", fei);
+ if (ret) {
+ dev_err(dev, "Can't register c8sectpfe-error-irq IRQ.\n");
+ goto err_clk_disable;
+ }
+
+ fei->tsin_count = of_get_child_count(np);
+
+ if (fei->tsin_count > C8SECTPFE_MAX_TSIN_CHAN ||
+ fei->tsin_count > fei->hw_stats.num_ib) {
+
+ dev_err(dev, "More tsin declared than exist on SoC!\n");
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ fei->pinctrl = devm_pinctrl_get(dev);
+
+ if (IS_ERR(fei->pinctrl)) {
+ dev_err(dev, "Error getting tsin pins\n");
+ ret = PTR_ERR(fei->pinctrl);
+ goto err_clk_disable;
+ }
+
+ for_each_child_of_node(np, child) {
+ struct device_node *i2c_bus;
+
+ fei->channel_data[index] = devm_kzalloc(dev,
+ sizeof(struct channel_info),
+ GFP_KERNEL);
+
+ if (!fei->channel_data[index]) {
+ ret = -ENOMEM;
+ goto err_clk_disable;
+ }
+
+ tsin = fei->channel_data[index];
+
+ tsin->fei = fei;
+
+ ret = of_property_read_u32(child, "tsin-num", &tsin->tsin_id);
+ if (ret) {
+ dev_err(&pdev->dev, "No tsin_num found\n");
+ goto err_clk_disable;
+ }
+
+ /* sanity check value */
+ if (tsin->tsin_id > fei->hw_stats.num_ib) {
+ dev_err(&pdev->dev,
+ "tsin-num %d specified greater than number\n\t"
+ "of input block hw in SoC! (%d)",
+ tsin->tsin_id, fei->hw_stats.num_ib);
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ tsin->invert_ts_clk = of_property_read_bool(child,
+ "invert-ts-clk");
+
+ tsin->serial_not_parallel = of_property_read_bool(child,
+ "serial-not-parallel");
+
+ tsin->async_not_sync = of_property_read_bool(child,
+ "async-not-sync");
+
+ ret = of_property_read_u32(child, "dvb-card",
+ &tsin->dvb_card);
+ if (ret) {
+ dev_err(&pdev->dev, "No dvb-card found\n");
+ goto err_clk_disable;
+ }
+
+ i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
+ if (!i2c_bus) {
+ dev_err(&pdev->dev, "No i2c-bus found\n");
+ goto err_clk_disable;
+ }
+ tsin->i2c_adapter =
+ of_find_i2c_adapter_by_node(i2c_bus);
+ if (!tsin->i2c_adapter) {
+ dev_err(&pdev->dev, "No i2c adapter found\n");
+ of_node_put(i2c_bus);
+ goto err_clk_disable;
+ }
+ of_node_put(i2c_bus);
+
+ tsin->rst_gpio = of_get_named_gpio(child, "rst-gpio", 0);
+
+ ret = gpio_is_valid(tsin->rst_gpio);
+ if (!ret) {
+ dev_err(dev,
+ "reset gpio for tsin%d not valid (gpio=%d)\n",
+ tsin->tsin_id, tsin->rst_gpio);
+ goto err_clk_disable;
+ }
+
+ ret = devm_gpio_request_one(dev, tsin->rst_gpio,
+ GPIOF_OUT_INIT_LOW, "NIM reset");
+ if (ret && ret != -EBUSY) {
+ dev_err(dev, "Can't request tsin%d reset gpio\n"
+ , fei->channel_data[index]->tsin_id);
+ goto err_clk_disable;
+ }
+
+ if (!ret) {
+ /* toggle reset lines */
+ gpio_direction_output(tsin->rst_gpio, 0);
+ usleep_range(3500, 5000);
+ gpio_direction_output(tsin->rst_gpio, 1);
+ usleep_range(3000, 5000);
+ }
+
+ tsin->demux_mapping = index;
+
+ dev_dbg(fei->dev,
+ "channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\t"
+ "serial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
+ fei->channel_data[index], index,
+ tsin->tsin_id, tsin->invert_ts_clk,
+ tsin->serial_not_parallel, tsin->async_not_sync,
+ tsin->dvb_card);
+
+ index++;
+ }
+
+ /* Setup timer interrupt */
+ init_timer(&fei->timer);
+ fei->timer.function = c8sectpfe_timer_interrupt;
+ fei->timer.data = (unsigned long)fei;
+
+ mutex_init(&fei->lock);
+
+ /* Get the configuration information about the tuners */
+ ret = c8sectpfe_tuner_register_frontend(&fei->c8sectpfe[0],
+ (void *)fei,
+ c8sectpfe_start_feed,
+ c8sectpfe_stop_feed);
+ if (ret) {
+ dev_err(dev, "c8sectpfe_tuner_register_frontend failed (%d)\n",
+ ret);
+ goto err_clk_disable;
+ }
+
+ /* ensure all other init has been done before requesting firmware */
+ ret = load_c8sectpfe_fw_step1(fei);
+ if (ret) {
+ dev_err(dev, "Couldn't load slim core firmware\n");
+ goto err_clk_disable;
+ }
+
+ c8sectpfe_debugfs_init(fei);
+
+ return 0;
+
+err_clk_disable:
+ /* TODO uncomment when upstream has taken a reference on this clk */
+ /*clk_disable_unprepare(fei->c8sectpfeclk);*/
+ return ret;
+}
+
+static int c8sectpfe_remove(struct platform_device *pdev)
+{
+ struct c8sectpfei *fei = platform_get_drvdata(pdev);
+ struct channel_info *channel;
+ int i;
+
+ wait_for_completion(&fei->fw_ack);
+
+ c8sectpfe_tuner_unregister_frontend(fei->c8sectpfe[0], fei);
+
+ /*
+ * Now loop through and un-configure each of the InputBlock resources
+ */
+ for (i = 0; i < fei->tsin_count; i++) {
+ channel = fei->channel_data[i];
+ free_input_block(fei, channel);
+ }
+
+ c8sectpfe_debugfs_exit(fei);
+
+ dev_info(fei->dev, "Stopping memdma SLIM core\n");
+ if (readl(fei->io + DMA_CPU_RUN))
+ writel(0x0, fei->io + DMA_CPU_RUN);
+
+ /* unclock all internal IP's */
+ if (readl(fei->io + SYS_INPUT_CLKEN))
+ writel(0, fei->io + SYS_INPUT_CLKEN);
+
+ if (readl(fei->io + SYS_OTHER_CLKEN))
+ writel(0, fei->io + SYS_OTHER_CLKEN);
+
+ /* TODO uncomment when upstream has taken a reference on this clk */
+ /*
+ if (fei->c8sectpfeclk)
+ clk_disable_unprepare(fei->c8sectpfeclk);
+ */
+
+ return 0;
+}
+
+
+static int configure_channels(struct c8sectpfei *fei)
+{
+ int index = 0, ret;
+ struct channel_info *tsin;
+ struct device_node *child, *np = fei->dev->of_node;
+
+ /* iterate round each tsin and configure memdma descriptor and IB hw */
+ for_each_child_of_node(np, child) {
+
+ tsin = fei->channel_data[index];
+
+ ret = configure_memdma_and_inputblock(fei,
+ fei->channel_data[index]);
+
+ if (ret) {
+ dev_err(fei->dev,
+ "configure_memdma_and_inputblock failed\n");
+ goto err_unmap;
+ }
+ index++;
+ }
+
+ return 0;
+
+err_unmap:
+ for (index = 0; index < fei->tsin_count; index++) {
+ tsin = fei->channel_data[index];
+ free_input_block(fei, tsin);
+ }
+ return ret;
+}
+
+static int
+c8sectpfe_elf_sanity_check(struct c8sectpfei *fei, const struct firmware *fw)
+{
+ struct elf32_hdr *ehdr;
+ char class;
+
+ if (!fw) {
+ dev_err(fei->dev, "failed to load %s\n", FIRMWARE_MEMDMA);
+ return -EINVAL;
+ }
+
+ if (fw->size < sizeof(struct elf32_hdr)) {
+ dev_err(fei->dev, "Image is too small\n");
+ return -EINVAL;
+ }
+
+ ehdr = (struct elf32_hdr *)fw->data;
+
+ /* We only support ELF32 at this point */
+ class = ehdr->e_ident[EI_CLASS];
+ if (class != ELFCLASS32) {
+ dev_err(fei->dev, "Unsupported class: %d\n", class);
+ return -EINVAL;
+ }
+
+ if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
+ dev_err(fei->dev, "Unsupported firmware endianness\n");
+ return -EINVAL;
+ }
+
+ if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
+ dev_err(fei->dev, "Image is too small\n");
+ return -EINVAL;
+ }
+
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+ dev_err(fei->dev, "Image is corrupted (bad magic)\n");
+ return -EINVAL;
+ }
+
+ /* Check ELF magic */
+ ehdr = (Elf32_Ehdr *)fw->data;
+ if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
+ ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
+ ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
+ ehdr->e_ident[EI_MAG3] != ELFMAG3) {
+ dev_err(fei->dev, "Invalid ELF magic\n");
+ return -EINVAL;
+ }
+
+ if (ehdr->e_type != ET_EXEC) {
+ dev_err(fei->dev, "Unsupported ELF header type\n");
+ return -EINVAL;
+ }
+
+ if (ehdr->e_phoff > fw->size) {
+ dev_err(fei->dev, "Firmware size is too small\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static void load_imem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
+ const struct firmware *fw, u8 __iomem *dest,
+ int seg_num)
+{
+ const u8 *imem_src = fw->data + phdr->p_offset;
+ int i;
+
+ /*
+ * For IMEM segments, the segment contains 24-bit
+ * instructions which must be padded to 32-bit
+ * instructions before being written. The written
+ * segment is padded with NOP instructions.
+ */
+
+ dev_dbg(fei->dev,
+ "Loading IMEM segment %d 0x%08x\n\t"
+ " (0x%x bytes) -> 0x%p (0x%x bytes)\n", seg_num,
+ phdr->p_paddr, phdr->p_filesz,
+ dest, phdr->p_memsz + phdr->p_memsz / 3);
+
+ for (i = 0; i < phdr->p_filesz; i++) {
+
+ writeb(readb((void __iomem *)imem_src), (void __iomem *)dest);
+
+ /* Every 3 bytes, add an additional
+ * padding zero in destination */
+ if (i % 3 == 2) {
+ dest++;
+ writeb(0x00, (void __iomem *)dest);
+ }
+
+ dest++;
+ imem_src++;
+ }
+}
+
+static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
+ const struct firmware *fw, u8 __iomem *dst, int seg_num)
+{
+ /*
+ * For DMEM segments copy the segment data from the ELF
+ * file and pad segment with zeroes
+ */
+
+ dev_dbg(fei->dev,
+ "Loading DMEM segment %d 0x%08x\n\t"
+ "(0x%x bytes) -> 0x%p (0x%x bytes)\n",
+ seg_num, phdr->p_paddr, phdr->p_filesz,
+ dst, phdr->p_memsz);
+
+ memcpy((void __iomem *)dst, (void *)fw->data + phdr->p_offset,
+ phdr->p_filesz);
+
+ memset((void __iomem *)dst + phdr->p_filesz, 0,
+ phdr->p_memsz - phdr->p_filesz);
+}
+
+static int load_slim_core_fw(const struct firmware *fw, void *context)
+{
+ struct c8sectpfei *fei = context;
+ Elf32_Ehdr *ehdr;
+ Elf32_Phdr *phdr;
+ u8 __iomem *dst;
+ int err, i;
+
+ if (!fw || !context)
+ return -EINVAL;
+
+ ehdr = (Elf32_Ehdr *)fw->data;
+ phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff);
+
+ /* go through the available ELF segments */
+ for (i = 0; i < ehdr->e_phnum && !err; i++, phdr++) {
+
+ /* Only consider LOAD segments */
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ /*
+ * Check segment is contained within the fw->data buffer
+ */
+ if (phdr->p_offset + phdr->p_filesz > fw->size) {
+ dev_err(fei->dev,
+ "Segment %d is outside of firmware file\n", i);
+ err = -EINVAL;
+ break;
+ }
+
+ /*
+ * MEMDMA IMEM has executable flag set, otherwise load
+ * this segment into DMEM.
+ *
+ */
+
+ if (phdr->p_flags & PF_X) {
+ dst = (u8 __iomem *) fei->io + DMA_MEMDMA_IMEM;
+ /*
+ * The Slim ELF file uses 32-bit word addressing for
+ * load offsets.
+ */
+ dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
+ load_imem_segment(fei, phdr, fw, dst, i);
+ } else {
+ dst = (u8 __iomem *) fei->io + DMA_MEMDMA_DMEM;
+ /*
+ * The Slim ELF file uses 32-bit word addressing for
+ * load offsets.
+ */
+ dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
+ load_dmem_segment(fei, phdr, fw, dst, i);
+ }
+ }
+
+ release_firmware(fw);
+ return err;
+}
+
+static void load_c8sectpfe_fw_cb(const struct firmware *fw, void *context)
+{
+ struct c8sectpfei *fei = context;
+ int err;
+
+ err = c8sectpfe_elf_sanity_check(fei, fw);
+ if (err) {
+ dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n"
+ , err);
+ goto err;
+ }
+
+ err = load_slim_core_fw(fw, context);
+ if (err) {
+ dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err);
+ goto err;
+ }
+
+ /* now the firmware is loaded configure the input blocks */
+ err = configure_channels(fei);
+ if (err) {
+ dev_err(fei->dev, "configure_channels failed err=(%d)\n", err);
+ goto err;
+ }
+
+ /*
+ * STBus target port can access IMEM and DMEM ports
+ * without waiting for CPU
+ */
+ writel(0x1, fei->io + DMA_PER_STBUS_SYNC);
+
+ dev_info(fei->dev, "Boot the memdma SLIM core\n");
+ writel(0x1, fei->io + DMA_CPU_RUN);
+
+ atomic_set(&fei->fw_loaded, 1);
+err:
+ complete_all(&fei->fw_ack);
+}
+
+static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei)
+{
+ int ret;
+ int err;
+
+ dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
+
+ init_completion(&fei->fw_ack);
+ atomic_set(&fei->fw_loaded, 0);
+
+ err = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ FIRMWARE_MEMDMA, fei->dev, GFP_KERNEL, fei,
+ load_c8sectpfe_fw_cb);
+
+ if (err) {
+ dev_err(fei->dev, "request_firmware_nowait err: %d.\n", err);
+ complete_all(&fei->fw_ack);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id c8sectpfe_match[] = {
+ { .compatible = "st,stih407-c8sectpfe" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, c8sectpfe_match);
+
+static struct platform_driver c8sectpfe_driver = {
+ .driver = {
+ .name = "c8sectpfe",
+ .of_match_table = of_match_ptr(c8sectpfe_match),
+ },
+ .probe = c8sectpfe_probe,
+ .remove = c8sectpfe_remove,
+};
+
+module_platform_driver(c8sectpfe_driver);
+
+MODULE_AUTHOR("Peter Bennett <peter.bennett@st.com>");
+MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
+MODULE_DESCRIPTION("C8SECTPFE STi DVB Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h
new file mode 100644
index 000000000000..39e7a221a941
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h
@@ -0,0 +1,288 @@
+/*
+ * c8sectpfe-core.h - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author:Peter Bennett <peter.bennett@st.com>
+ * Peter Griffin <peter.griffin@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+#ifndef _C8SECTPFE_CORE_H_
+#define _C8SECTPFE_CORE_H_
+
+#define C8SECTPFEI_MAXCHANNEL 16
+#define C8SECTPFEI_MAXADAPTER 3
+
+#define C8SECTPFE_MAX_TSIN_CHAN 8
+
+struct channel_info {
+
+ int tsin_id;
+ bool invert_ts_clk;
+ bool serial_not_parallel;
+ bool async_not_sync;
+ int i2c;
+ int dvb_card;
+
+ int rst_gpio;
+
+ struct i2c_adapter *i2c_adapter;
+ struct i2c_adapter *tuner_i2c;
+ struct i2c_adapter *lnb_i2c;
+ struct i2c_client *i2c_client;
+ struct dvb_frontend *frontend;
+
+ struct pinctrl_state *pstate;
+
+ int demux_mapping;
+ int active;
+
+ void *back_buffer_start;
+ void *back_buffer_aligned;
+ dma_addr_t back_buffer_busaddr;
+
+ void *pid_buffer_start;
+ void *pid_buffer_aligned;
+ dma_addr_t pid_buffer_busaddr;
+
+ unsigned long fifo;
+
+ struct completion idle_completion;
+ struct tasklet_struct tsklet;
+
+ struct c8sectpfei *fei;
+ void __iomem *irec;
+
+};
+
+struct c8sectpfe_hw {
+ int num_ib;
+ int num_mib;
+ int num_swts;
+ int num_tsout;
+ int num_ccsc;
+ int num_ram;
+ int num_tp;
+};
+
+struct c8sectpfei {
+
+ struct device *dev;
+ struct pinctrl *pinctrl;
+
+ struct dentry *root;
+ struct debugfs_regset32 *regset;
+ struct completion fw_ack;
+ atomic_t fw_loaded;
+
+ int tsin_count;
+
+ struct c8sectpfe_hw hw_stats;
+
+ struct c8sectpfe *c8sectpfe[C8SECTPFEI_MAXADAPTER];
+
+ int mapping[C8SECTPFEI_MAXCHANNEL];
+
+ struct mutex lock;
+
+ struct timer_list timer; /* timer interrupts for outputs */
+
+ void __iomem *io;
+ void __iomem *sram;
+
+ unsigned long sram_size;
+
+ struct channel_info *channel_data[C8SECTPFE_MAX_TSIN_CHAN];
+
+ struct clk *c8sectpfeclk;
+ int nima_rst_gpio;
+ int nimb_rst_gpio;
+
+ int idle_irq;
+ int error_irq;
+
+ int global_feed_count;
+};
+
+/* C8SECTPFE SYS Regs list */
+
+#define SYS_INPUT_ERR_STATUS 0x0
+#define SYS_OTHER_ERR_STATUS 0x8
+#define SYS_INPUT_ERR_MASK 0x10
+#define SYS_OTHER_ERR_MASK 0x18
+#define SYS_DMA_ROUTE 0x20
+#define SYS_INPUT_CLKEN 0x30
+#define IBENABLE_MASK 0x7F
+
+#define SYS_OTHER_CLKEN 0x38
+#define TSDMAENABLE BIT(1)
+#define MEMDMAENABLE BIT(0)
+
+#define SYS_CFG_NUM_IB 0x200
+#define SYS_CFG_NUM_MIB 0x204
+#define SYS_CFG_NUM_SWTS 0x208
+#define SYS_CFG_NUM_TSOUT 0x20C
+#define SYS_CFG_NUM_CCSC 0x210
+#define SYS_CFG_NUM_RAM 0x214
+#define SYS_CFG_NUM_TP 0x218
+
+/* Input Block Regs */
+
+#define C8SECTPFE_INPUTBLK_OFFSET 0x1000
+#define C8SECTPFE_CHANNEL_OFFSET(x) ((x*0x40) + C8SECTPFE_INPUTBLK_OFFSET)
+
+#define C8SECTPFE_IB_IP_FMT_CFG(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x00)
+#define C8SECTPFE_IGNORE_ERR_AT_SOP BIT(7)
+#define C8SECTPFE_IGNORE_ERR_IN_PKT BIT(6)
+#define C8SECTPFE_IGNORE_ERR_IN_BYTE BIT(5)
+#define C8SECTPFE_INVERT_TSCLK BIT(4)
+#define C8SECTPFE_ALIGN_BYTE_SOP BIT(3)
+#define C8SECTPFE_ASYNC_NOT_SYNC BIT(2)
+#define C8SECTPFE_BYTE_ENDIANNESS_MSB BIT(1)
+#define C8SECTPFE_SERIAL_NOT_PARALLEL BIT(0)
+
+#define C8SECTPFE_IB_SYNCLCKDRP_CFG(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x04)
+#define C8SECTPFE_SYNC(x) (x & 0xf)
+#define C8SECTPFE_DROP(x) ((x<<4) & 0xf)
+#define C8SECTPFE_TOKEN(x) ((x<<8) & 0xff00)
+#define C8SECTPFE_SLDENDIANNESS BIT(16)
+
+#define C8SECTPFE_IB_TAGBYTES_CFG(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x08)
+#define C8SECTPFE_TAG_HEADER(x) (x << 16)
+#define C8SECTPFE_TAG_COUNTER(x) ((x<<1) & 0x7fff)
+#define C8SECTPFE_TAG_ENABLE BIT(0)
+
+#define C8SECTPFE_IB_PID_SET(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x0C)
+#define C8SECTPFE_PID_OFFSET(x) (x & 0x3f)
+#define C8SECTPFE_PID_NUMBITS(x) ((x << 6) & 0xfff)
+#define C8SECTPFE_PID_ENABLE BIT(31)
+
+#define C8SECTPFE_IB_PKT_LEN(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x10)
+
+#define C8SECTPFE_IB_BUFF_STRT(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x14)
+#define C8SECTPFE_IB_BUFF_END(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x18)
+#define C8SECTPFE_IB_READ_PNT(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x1C)
+#define C8SECTPFE_IB_WRT_PNT(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x20)
+
+#define C8SECTPFE_IB_PRI_THRLD(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x24)
+#define C8SECTPFE_PRI_VALUE(x) (x & 0x7fffff)
+#define C8SECTPFE_PRI_LOWPRI(x) ((x & 0xf) << 24)
+#define C8SECTPFE_PRI_HIGHPRI(x) ((x & 0xf) << 28)
+
+#define C8SECTPFE_IB_STAT(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x28)
+#define C8SECTPFE_STAT_FIFO_OVERFLOW(x) (x & 0x1)
+#define C8SECTPFE_STAT_BUFFER_OVERFLOW(x) (x & 0x2)
+#define C8SECTPFE_STAT_OUTOFORDERRP(x) (x & 0x4)
+#define C8SECTPFE_STAT_PID_OVERFLOW(x) (x & 0x8)
+#define C8SECTPFE_STAT_PKT_OVERFLOW(x) (x & 0x10)
+#define C8SECTPFE_STAT_ERROR_PACKETS(x) ((x >> 8) & 0xf)
+#define C8SECTPFE_STAT_SHORT_PACKETS(x) ((x >> 12) & 0xf)
+
+#define C8SECTPFE_IB_MASK(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x2C)
+#define C8SECTPFE_MASK_FIFO_OVERFLOW BIT(0)
+#define C8SECTPFE_MASK_BUFFER_OVERFLOW BIT(1)
+#define C8SECTPFE_MASK_OUTOFORDERRP(x) BIT(2)
+#define C8SECTPFE_MASK_PID_OVERFLOW(x) BIT(3)
+#define C8SECTPFE_MASK_PKT_OVERFLOW(x) BIT(4)
+#define C8SECTPFE_MASK_ERROR_PACKETS(x) ((x & 0xf) << 8)
+#define C8SECTPFE_MASK_SHORT_PACKETS(x) ((x & 0xf) >> 12)
+
+#define C8SECTPFE_IB_SYS(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x30)
+#define C8SECTPFE_SYS_RESET BIT(1)
+#define C8SECTPFE_SYS_ENABLE BIT(0)
+
+/*
+ * Ponter record data structure required for each input block
+ * see Table 82 on page 167 of functional specification.
+ */
+
+#define DMA_PRDS_MEMBASE 0x0 /* Internal sram base address */
+#define DMA_PRDS_MEMTOP 0x4 /* Internal sram top address */
+
+/*
+ * TS packet size, including tag bytes added by input block,
+ * rounded up to the next multiple of 8 bytes. The packet size,
+ * including any tagging bytes and rounded up to the nearest
+ * multiple of 8 bytes must be less than 255 bytes.
+ */
+#define DMA_PRDS_PKTSIZE 0x8
+#define DMA_PRDS_TPENABLE 0xc
+
+#define TP0_OFFSET 0x10
+#define DMA_PRDS_BUSBASE_TP(x) ((0x10*x) + TP0_OFFSET)
+#define DMA_PRDS_BUSTOP_TP(x) ((0x10*x) + TP0_OFFSET + 0x4)
+#define DMA_PRDS_BUSWP_TP(x) ((0x10*x) + TP0_OFFSET + 0x8)
+#define DMA_PRDS_BUSRP_TP(x) ((0x10*x) + TP0_OFFSET + 0xc)
+
+#define DMA_PRDS_SIZE (0x20)
+
+#define DMA_MEMDMA_OFFSET 0x4000
+#define DMA_IMEM_OFFSET 0x0
+#define DMA_DMEM_OFFSET 0x4000
+#define DMA_CPU 0x8000
+#define DMA_PER_OFFSET 0xb000
+
+#define DMA_MEMDMA_DMEM (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET)
+#define DMA_MEMDMA_IMEM (DMA_MEMDMA_OFFSET + DMA_IMEM_OFFSET)
+
+/* XP70 Slim core regs */
+#define DMA_CPU_ID (DMA_MEMDMA_OFFSET + DMA_CPU + 0x0)
+#define DMA_CPU_VCR (DMA_MEMDMA_OFFSET + DMA_CPU + 0x4)
+#define DMA_CPU_RUN (DMA_MEMDMA_OFFSET + DMA_CPU + 0x8)
+#define DMA_CPU_CLOCKGATE (DMA_MEMDMA_OFFSET + DMA_CPU + 0xc)
+#define DMA_CPU_PC (DMA_MEMDMA_OFFSET + DMA_CPU + 0x20)
+
+/* Enable Interrupt for a IB */
+#define DMA_PER_TPn_DREQ_MASK (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xd00)
+/* Ack interrupt by setting corresponding bit */
+#define DMA_PER_TPn_DACK_SET (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xd80)
+#define DMA_PER_TPn_DREQ (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xe00)
+#define DMA_PER_TPn_DACK (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xe80)
+#define DMA_PER_DREQ_MODE (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xf80)
+#define DMA_PER_STBUS_SYNC (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xf88)
+#define DMA_PER_STBUS_ACCESS (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xf8c)
+#define DMA_PER_STBUS_ADDRESS (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xf90)
+#define DMA_PER_IDLE_INT (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfa8)
+#define DMA_PER_PRIORITY (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfac)
+#define DMA_PER_MAX_OPCODE (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfb0)
+#define DMA_PER_MAX_CHUNK (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfb4)
+#define DMA_PER_PAGE_SIZE (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfbc)
+#define DMA_PER_MBOX_STATUS (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfc0)
+#define DMA_PER_MBOX_SET (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfc8)
+#define DMA_PER_MBOX_CLEAR (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfd0)
+#define DMA_PER_MBOX_MASK (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfd8)
+#define DMA_PER_INJECT_PKT_SRC (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfe0)
+#define DMA_PER_INJECT_PKT_DEST (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfe4)
+#define DMA_PER_INJECT_PKT_ADDR (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfe8)
+#define DMA_PER_INJECT_PKT (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfec)
+#define DMA_PER_PAT_PTR_INIT (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xff0)
+#define DMA_PER_PAT_PTR (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xff4)
+#define DMA_PER_SLEEP_MASK (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xff8)
+#define DMA_PER_SLEEP_COUNTER (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xffc)
+/* #define DMA_RF_CPUREGn DMA_RFBASEADDR n=0 to 15) slim regsa */
+
+/* The following are from DMA_DMEM_BaseAddress */
+#define DMA_FIRMWARE_VERSION (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0x0)
+#define DMA_PTRREC_BASE (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0x4)
+#define DMA_PTRREC_INPUT_OFFSET (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0x8)
+#define DMA_ERRREC_BASE (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0xc)
+#define DMA_ERROR_RECORD(n) ((n*4) + DMA_ERRREC_BASE + 0x4)
+#define DMA_IDLE_REQ (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0x10)
+#define IDLEREQ BIT(31)
+
+#define DMA_FIRMWARE_CONFIG (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0x14)
+
+/* Regs for PID Filter */
+
+#define PIDF_OFFSET 0x2800
+#define PIDF_BASE(n) ((n*4) + PIDF_OFFSET)
+#define PIDF_LEAK_ENABLE (PIDF_OFFSET + 0x100)
+#define PIDF_LEAK_STATUS (PIDF_OFFSET + 0x108)
+#define PIDF_LEAK_COUNT_RESET (PIDF_OFFSET + 0x110)
+#define PIDF_LEAK_COUNTER (PIDF_OFFSET + 0x114)
+
+#endif /* _C8SECTPFE_CORE_H_ */
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c
new file mode 100644
index 000000000000..e9ba13db49cd
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c
@@ -0,0 +1,271 @@
+/*
+ * c8sectpfe-debugfs.c - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "c8sectpfe-debugfs.h"
+
+#define dump_register(nm ...) \
+{ \
+ .name = #nm, \
+ .offset = nm, \
+}
+
+static const struct debugfs_reg32 fei_sys_regs[] = {
+ dump_register(SYS_INPUT_ERR_STATUS),
+ dump_register(SYS_OTHER_ERR_STATUS),
+ dump_register(SYS_INPUT_ERR_MASK),
+ dump_register(SYS_DMA_ROUTE),
+ dump_register(SYS_INPUT_CLKEN),
+ dump_register(IBENABLE_MASK),
+ dump_register(SYS_OTHER_CLKEN),
+ dump_register(SYS_CFG_NUM_IB),
+ dump_register(SYS_CFG_NUM_MIB),
+ dump_register(SYS_CFG_NUM_SWTS),
+ dump_register(SYS_CFG_NUM_TSOUT),
+ dump_register(SYS_CFG_NUM_CCSC),
+ dump_register(SYS_CFG_NUM_RAM),
+ dump_register(SYS_CFG_NUM_TP),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(0)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(0)),
+ dump_register(C8SECTPFE_IB_PID_SET(0)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(0)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(0)),
+ dump_register(C8SECTPFE_IB_BUFF_END(0)),
+ dump_register(C8SECTPFE_IB_READ_PNT(0)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(0)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(0)),
+ dump_register(C8SECTPFE_IB_STAT(0)),
+ dump_register(C8SECTPFE_IB_MASK(0)),
+ dump_register(C8SECTPFE_IB_SYS(0)),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(1)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(1)),
+ dump_register(C8SECTPFE_IB_PID_SET(1)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(1)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(1)),
+ dump_register(C8SECTPFE_IB_BUFF_END(1)),
+ dump_register(C8SECTPFE_IB_READ_PNT(1)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(1)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(1)),
+ dump_register(C8SECTPFE_IB_STAT(1)),
+ dump_register(C8SECTPFE_IB_MASK(1)),
+ dump_register(C8SECTPFE_IB_SYS(1)),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(2)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(2)),
+ dump_register(C8SECTPFE_IB_PID_SET(2)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(2)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(2)),
+ dump_register(C8SECTPFE_IB_BUFF_END(2)),
+ dump_register(C8SECTPFE_IB_READ_PNT(2)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(2)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(2)),
+ dump_register(C8SECTPFE_IB_STAT(2)),
+ dump_register(C8SECTPFE_IB_MASK(2)),
+ dump_register(C8SECTPFE_IB_SYS(2)),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(3)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(3)),
+ dump_register(C8SECTPFE_IB_PID_SET(3)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(3)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(3)),
+ dump_register(C8SECTPFE_IB_BUFF_END(3)),
+ dump_register(C8SECTPFE_IB_READ_PNT(3)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(3)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(3)),
+ dump_register(C8SECTPFE_IB_STAT(3)),
+ dump_register(C8SECTPFE_IB_MASK(3)),
+ dump_register(C8SECTPFE_IB_SYS(3)),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(4)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(4)),
+ dump_register(C8SECTPFE_IB_PID_SET(4)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(4)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(4)),
+ dump_register(C8SECTPFE_IB_BUFF_END(4)),
+ dump_register(C8SECTPFE_IB_READ_PNT(4)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(4)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(4)),
+ dump_register(C8SECTPFE_IB_STAT(4)),
+ dump_register(C8SECTPFE_IB_MASK(4)),
+ dump_register(C8SECTPFE_IB_SYS(4)),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(5)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(5)),
+ dump_register(C8SECTPFE_IB_PID_SET(5)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(5)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(5)),
+ dump_register(C8SECTPFE_IB_BUFF_END(5)),
+ dump_register(C8SECTPFE_IB_READ_PNT(5)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(5)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(5)),
+ dump_register(C8SECTPFE_IB_STAT(5)),
+ dump_register(C8SECTPFE_IB_MASK(5)),
+ dump_register(C8SECTPFE_IB_SYS(5)),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(6)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(6)),
+ dump_register(C8SECTPFE_IB_PID_SET(6)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(6)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(6)),
+ dump_register(C8SECTPFE_IB_BUFF_END(6)),
+ dump_register(C8SECTPFE_IB_READ_PNT(6)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(6)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(6)),
+ dump_register(C8SECTPFE_IB_STAT(6)),
+ dump_register(C8SECTPFE_IB_MASK(6)),
+ dump_register(C8SECTPFE_IB_SYS(6)),
+
+ dump_register(DMA_CPU_ID),
+ dump_register(DMA_CPU_VCR),
+ dump_register(DMA_CPU_RUN),
+ dump_register(DMA_CPU_PC),
+
+ dump_register(DMA_PER_TPn_DREQ_MASK),
+ dump_register(DMA_PER_TPn_DACK_SET),
+ dump_register(DMA_PER_TPn_DREQ),
+ dump_register(DMA_PER_TPn_DACK),
+ dump_register(DMA_PER_DREQ_MODE),
+ dump_register(DMA_PER_STBUS_SYNC),
+ dump_register(DMA_PER_STBUS_ACCESS),
+ dump_register(DMA_PER_STBUS_ADDRESS),
+ dump_register(DMA_PER_IDLE_INT),
+ dump_register(DMA_PER_PRIORITY),
+ dump_register(DMA_PER_MAX_OPCODE),
+ dump_register(DMA_PER_MAX_CHUNK),
+ dump_register(DMA_PER_PAGE_SIZE),
+ dump_register(DMA_PER_MBOX_STATUS),
+ dump_register(DMA_PER_MBOX_SET),
+ dump_register(DMA_PER_MBOX_CLEAR),
+ dump_register(DMA_PER_MBOX_MASK),
+ dump_register(DMA_PER_INJECT_PKT_SRC),
+ dump_register(DMA_PER_INJECT_PKT_DEST),
+ dump_register(DMA_PER_INJECT_PKT_ADDR),
+ dump_register(DMA_PER_INJECT_PKT),
+ dump_register(DMA_PER_PAT_PTR_INIT),
+ dump_register(DMA_PER_PAT_PTR),
+ dump_register(DMA_PER_SLEEP_MASK),
+ dump_register(DMA_PER_SLEEP_COUNTER),
+
+ dump_register(DMA_FIRMWARE_VERSION),
+ dump_register(DMA_PTRREC_BASE),
+ dump_register(DMA_PTRREC_INPUT_OFFSET),
+ dump_register(DMA_ERRREC_BASE),
+
+ dump_register(DMA_ERROR_RECORD(0)),
+ dump_register(DMA_ERROR_RECORD(1)),
+ dump_register(DMA_ERROR_RECORD(2)),
+ dump_register(DMA_ERROR_RECORD(3)),
+ dump_register(DMA_ERROR_RECORD(4)),
+ dump_register(DMA_ERROR_RECORD(5)),
+ dump_register(DMA_ERROR_RECORD(6)),
+ dump_register(DMA_ERROR_RECORD(7)),
+ dump_register(DMA_ERROR_RECORD(8)),
+ dump_register(DMA_ERROR_RECORD(9)),
+ dump_register(DMA_ERROR_RECORD(10)),
+ dump_register(DMA_ERROR_RECORD(11)),
+ dump_register(DMA_ERROR_RECORD(12)),
+ dump_register(DMA_ERROR_RECORD(13)),
+ dump_register(DMA_ERROR_RECORD(14)),
+ dump_register(DMA_ERROR_RECORD(15)),
+ dump_register(DMA_ERROR_RECORD(16)),
+ dump_register(DMA_ERROR_RECORD(17)),
+ dump_register(DMA_ERROR_RECORD(18)),
+ dump_register(DMA_ERROR_RECORD(19)),
+ dump_register(DMA_ERROR_RECORD(20)),
+ dump_register(DMA_ERROR_RECORD(21)),
+ dump_register(DMA_ERROR_RECORD(22)),
+
+ dump_register(DMA_IDLE_REQ),
+ dump_register(DMA_FIRMWARE_CONFIG),
+
+ dump_register(PIDF_BASE(0)),
+ dump_register(PIDF_BASE(1)),
+ dump_register(PIDF_BASE(2)),
+ dump_register(PIDF_BASE(3)),
+ dump_register(PIDF_BASE(4)),
+ dump_register(PIDF_BASE(5)),
+ dump_register(PIDF_BASE(6)),
+ dump_register(PIDF_BASE(7)),
+ dump_register(PIDF_BASE(8)),
+ dump_register(PIDF_BASE(9)),
+ dump_register(PIDF_BASE(10)),
+ dump_register(PIDF_BASE(11)),
+ dump_register(PIDF_BASE(12)),
+ dump_register(PIDF_BASE(13)),
+ dump_register(PIDF_BASE(14)),
+ dump_register(PIDF_BASE(15)),
+ dump_register(PIDF_BASE(16)),
+ dump_register(PIDF_BASE(17)),
+ dump_register(PIDF_BASE(18)),
+ dump_register(PIDF_BASE(19)),
+ dump_register(PIDF_BASE(20)),
+ dump_register(PIDF_BASE(21)),
+ dump_register(PIDF_BASE(22)),
+ dump_register(PIDF_LEAK_ENABLE),
+ dump_register(PIDF_LEAK_STATUS),
+ dump_register(PIDF_LEAK_COUNT_RESET),
+ dump_register(PIDF_LEAK_COUNTER),
+};
+
+void c8sectpfe_debugfs_init(struct c8sectpfei *fei)
+{
+ struct dentry *root;
+ struct dentry *file;
+
+ root = debugfs_create_dir("c8sectpfe", NULL);
+ if (!root)
+ goto err;
+
+ fei->root = root;
+
+ fei->regset = devm_kzalloc(fei->dev, sizeof(*fei->regset), GFP_KERNEL);
+ if (!fei->regset)
+ goto err;
+
+ fei->regset->regs = fei_sys_regs;
+ fei->regset->nregs = ARRAY_SIZE(fei_sys_regs);
+ fei->regset->base = fei->io;
+
+ file = debugfs_create_regset32("registers", S_IRUGO, root,
+ fei->regset);
+ if (!file) {
+ dev_err(fei->dev,
+ "%s not able to create 'registers' debugfs\n"
+ , __func__);
+ goto err;
+ }
+
+ return;
+
+err:
+ debugfs_remove_recursive(root);
+}
+
+void c8sectpfe_debugfs_exit(struct c8sectpfei *fei)
+{
+ debugfs_remove_recursive(fei->root);
+ fei->root = NULL;
+}
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.h b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.h
new file mode 100644
index 000000000000..8af1ac1378c8
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.h
@@ -0,0 +1,26 @@
+/**
+ * c8sectpfe-debugfs.h - C8SECTPFE STi DVB driver debugfs header
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Authors: Peter Griffin <peter.griffin@linaro.org>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __C8SECTPFE_DEBUG_H
+#define __C8SECTPFE_DEBUG_H
+
+#include "c8sectpfe-core.h"
+
+void c8sectpfe_debugfs_init(struct c8sectpfei *);
+void c8sectpfe_debugfs_exit(struct c8sectpfei *);
+
+#endif /* __C8SECTPFE_DEBUG_H */
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
new file mode 100644
index 000000000000..69d7fe4471c2
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
@@ -0,0 +1,244 @@
+/*
+ * c8sectpfe-dvb.c - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author Peter Griffin <peter.griffin@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ */
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/version.h>
+
+#include <dt-bindings/media/c8sectpfe.h>
+
+#include "c8sectpfe-common.h"
+#include "c8sectpfe-core.h"
+#include "c8sectpfe-dvb.h"
+
+#include "dvb-pll.h"
+#include "lnbh24.h"
+#include "stv0367.h"
+#include "stv0367_priv.h"
+#include "stv6110x.h"
+#include "stv090x.h"
+#include "tda18212.h"
+
+static inline const char *dvb_card_str(unsigned int c)
+{
+ switch (c) {
+ case STV0367_TDA18212_NIMA_1: return "STV0367_TDA18212_NIMA_1";
+ case STV0367_TDA18212_NIMA_2: return "STV0367_TDA18212_NIMA_2";
+ case STV0367_TDA18212_NIMB_1: return "STV0367_TDA18212_NIMB_1";
+ case STV0367_TDA18212_NIMB_2: return "STV0367_TDA18212_NIMB_2";
+ case STV0903_6110_LNB24_NIMA: return "STV0903_6110_LNB24_NIMA";
+ case STV0903_6110_LNB24_NIMB: return "STV0903_6110_LNB24_NIMB";
+ default: return "unknown dvb frontend card";
+ }
+}
+
+static struct stv090x_config stv090x_config = {
+ .device = STV0903,
+ .demod_mode = STV090x_SINGLE,
+ .clk_mode = STV090x_CLK_EXT,
+ .xtal = 16000000,
+ .address = 0x69,
+
+ .ts1_mode = STV090x_TSMODE_SERIAL_CONTINUOUS,
+ .ts2_mode = STV090x_TSMODE_SERIAL_CONTINUOUS,
+
+ .repeater_level = STV090x_RPTLEVEL_64,
+
+ .tuner_init = NULL,
+ .tuner_set_mode = NULL,
+ .tuner_set_frequency = NULL,
+ .tuner_get_frequency = NULL,
+ .tuner_set_bandwidth = NULL,
+ .tuner_get_bandwidth = NULL,
+ .tuner_set_bbgain = NULL,
+ .tuner_get_bbgain = NULL,
+ .tuner_set_refclk = NULL,
+ .tuner_get_status = NULL,
+};
+
+static struct stv6110x_config stv6110x_config = {
+ .addr = 0x60,
+ .refclk = 16000000,
+};
+
+#define NIMA 0
+#define NIMB 1
+
+static struct stv0367_config stv0367_tda18212_config[] = {
+ {
+ .demod_address = 0x1c,
+ .xtal = 16000000,
+ .if_khz = 4500,
+ .if_iq_mode = FE_TER_NORMAL_IF_TUNER,
+ .ts_mode = STV0367_SERIAL_PUNCT_CLOCK,
+ .clk_pol = STV0367_CLOCKPOLARITY_DEFAULT,
+ }, {
+ .demod_address = 0x1d,
+ .xtal = 16000000,
+ .if_khz = 4500,
+ .if_iq_mode = FE_TER_NORMAL_IF_TUNER,
+ .ts_mode = STV0367_SERIAL_PUNCT_CLOCK,
+ .clk_pol = STV0367_CLOCKPOLARITY_DEFAULT,
+ }, {
+ .demod_address = 0x1e,
+ .xtal = 16000000,
+ .if_khz = 4500,
+ .if_iq_mode = FE_TER_NORMAL_IF_TUNER,
+ .ts_mode = STV0367_SERIAL_PUNCT_CLOCK,
+ .clk_pol = STV0367_CLOCKPOLARITY_DEFAULT,
+ },
+};
+
+static struct tda18212_config tda18212_conf = {
+ .if_dvbt_6 = 4150,
+ .if_dvbt_7 = 4150,
+ .if_dvbt_8 = 4500,
+ .if_dvbc = 5000,
+};
+
+int c8sectpfe_frontend_attach(struct dvb_frontend **fe,
+ struct c8sectpfe *c8sectpfe,
+ struct channel_info *tsin, int chan_num)
+{
+ struct tda18212_config *tda18212;
+ struct stv6110x_devctl *fe2;
+ struct i2c_client *client;
+ struct i2c_board_info tda18212_info = {
+ .type = "tda18212",
+ .addr = 0x60,
+ };
+
+ if (!tsin)
+ return -EINVAL;
+
+ switch (tsin->dvb_card) {
+
+ case STV0367_TDA18212_NIMA_1:
+ case STV0367_TDA18212_NIMA_2:
+ case STV0367_TDA18212_NIMB_1:
+ case STV0367_TDA18212_NIMB_2:
+ if (tsin->dvb_card == STV0367_TDA18212_NIMA_1)
+ *fe = dvb_attach(stv0367ter_attach,
+ &stv0367_tda18212_config[0],
+ tsin->i2c_adapter);
+ else if (tsin->dvb_card == STV0367_TDA18212_NIMB_1)
+ *fe = dvb_attach(stv0367ter_attach,
+ &stv0367_tda18212_config[1],
+ tsin->i2c_adapter);
+ else
+ *fe = dvb_attach(stv0367ter_attach,
+ &stv0367_tda18212_config[2],
+ tsin->i2c_adapter);
+
+ if (!*fe) {
+ dev_err(c8sectpfe->device,
+ "%s: stv0367ter_attach failed for NIM card %s\n"
+ , __func__, dvb_card_str(tsin->dvb_card));
+ return -ENODEV;
+ };
+
+ /*
+ * init the demod so that i2c gate_ctrl
+ * to the tuner works correctly
+ */
+ (*fe)->ops.init(*fe);
+
+ /* Allocate the tda18212 structure */
+ tda18212 = devm_kzalloc(c8sectpfe->device,
+ sizeof(struct tda18212_config),
+ GFP_KERNEL);
+ if (!tda18212) {
+ dev_err(c8sectpfe->device,
+ "%s: devm_kzalloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ memcpy(tda18212, &tda18212_conf,
+ sizeof(struct tda18212_config));
+
+ tda18212->fe = (*fe);
+
+ tda18212_info.platform_data = tda18212;
+
+ /* attach tuner */
+ request_module("tda18212");
+ client = i2c_new_device(tsin->i2c_adapter, &tda18212_info);
+ if (!client || !client->dev.driver) {
+ dvb_frontend_detach(*fe);
+ return -ENODEV;
+ }
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ dvb_frontend_detach(*fe);
+ return -ENODEV;
+ }
+
+ tsin->i2c_client = client;
+
+ break;
+
+ case STV0903_6110_LNB24_NIMA:
+ *fe = dvb_attach(stv090x_attach, &stv090x_config,
+ tsin->i2c_adapter, STV090x_DEMODULATOR_0);
+ if (!*fe) {
+ dev_err(c8sectpfe->device, "%s: stv090x_attach failed\n"
+ "\tfor NIM card %s\n",
+ __func__, dvb_card_str(tsin->dvb_card));
+ return -ENODEV;
+ }
+
+ fe2 = dvb_attach(stv6110x_attach, *fe,
+ &stv6110x_config, tsin->i2c_adapter);
+ if (!fe2) {
+ dev_err(c8sectpfe->device,
+ "%s: stv6110x_attach failed for NIM card %s\n"
+ , __func__, dvb_card_str(tsin->dvb_card));
+ return -ENODEV;
+ };
+
+ stv090x_config.tuner_init = fe2->tuner_init;
+ stv090x_config.tuner_set_mode = fe2->tuner_set_mode;
+ stv090x_config.tuner_set_frequency = fe2->tuner_set_frequency;
+ stv090x_config.tuner_get_frequency = fe2->tuner_get_frequency;
+ stv090x_config.tuner_set_bandwidth = fe2->tuner_set_bandwidth;
+ stv090x_config.tuner_get_bandwidth = fe2->tuner_get_bandwidth;
+ stv090x_config.tuner_set_bbgain = fe2->tuner_set_bbgain;
+ stv090x_config.tuner_get_bbgain = fe2->tuner_get_bbgain;
+ stv090x_config.tuner_set_refclk = fe2->tuner_set_refclk;
+ stv090x_config.tuner_get_status = fe2->tuner_get_status;
+
+ dvb_attach(lnbh24_attach, *fe, tsin->i2c_adapter, 0, 0, 0x9);
+ break;
+
+ default:
+ dev_err(c8sectpfe->device,
+ "%s: DVB frontend card %s not yet supported\n",
+ __func__, dvb_card_str(tsin->dvb_card));
+ return -ENODEV;
+ }
+
+ (*fe)->id = chan_num;
+
+ dev_info(c8sectpfe->device,
+ "DVB frontend card %s successfully attached",
+ dvb_card_str(tsin->dvb_card));
+ return 0;
+}
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.h b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.h
new file mode 100644
index 000000000000..bd366dbc82b3
--- /dev/null
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.h
@@ -0,0 +1,20 @@
+/*
+ * c8sectpfe-common.h - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+#ifndef _C8SECTPFE_DVB_H_
+#define _C8SECTPFE_DVB_H_
+
+int c8sectpfe_frontend_attach(struct dvb_frontend **fe,
+ struct c8sectpfe *c8sectpfe, struct channel_info *tsin,
+ int chan_num);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index c4268d1b2f82..ed0b8788a66f 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -1627,7 +1627,7 @@ static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings)
h_freq = (u32)bt->pixelclock / total_h_pixel;
if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) {
- if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync,
+ if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width,
bt->polarities, bt->interlaced, timings))
return true;
}
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index 0862c1f24f57..c404e275eae0 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -1124,15 +1124,26 @@ int vivid_vid_out_s_std(struct file *file, void *priv, v4l2_std_id id)
return 0;
}
+static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings)
+{
+ struct v4l2_bt_timings *bt = &timings->bt;
+
+ if ((bt->standards & (V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF)) &&
+ v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap, NULL, NULL))
+ return true;
+
+ return false;
+}
+
int vivid_vid_out_s_dv_timings(struct file *file, void *_fh,
struct v4l2_dv_timings *timings)
{
struct vivid_dev *dev = video_drvdata(file);
-
if (!vivid_is_hdmi_out(dev))
return -ENODATA;
if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap,
- 0, NULL, NULL))
+ 0, NULL, NULL) &&
+ !valid_cvt_gtf_timings(timings))
return -EINVAL;
if (v4l2_match_dv_timings(timings, &dev->dv_timings_out, 0))
return 0;
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 913485a90e97..4e61886384e3 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -1,7 +1,7 @@
/*
* vsp1_drv.c -- R-Car VSP1 Driver
*
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
@@ -403,7 +403,10 @@ static int vsp1_pm_suspend(struct device *dev)
if (vsp1->ref_count == 0)
return 0;
+ vsp1_pipelines_suspend(vsp1);
+
clk_disable_unprepare(vsp1->clock);
+
return 0;
}
@@ -413,10 +416,14 @@ static int vsp1_pm_resume(struct device *dev)
WARN_ON(mutex_is_locked(&vsp1->lock));
- if (vsp1->ref_count)
+ if (vsp1->ref_count == 0)
return 0;
- return clk_prepare_enable(vsp1->clock);
+ clk_prepare_enable(vsp1->clock);
+
+ vsp1_pipelines_resume(vsp1);
+
+ return 0;
}
#endif
diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c
index a453bb4ddd37..fd95a75b04f4 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/vsp1/vsp1_entity.c
@@ -24,22 +24,24 @@
bool vsp1_entity_is_streaming(struct vsp1_entity *entity)
{
+ unsigned long flags;
bool streaming;
- mutex_lock(&entity->lock);
+ spin_lock_irqsave(&entity->lock, flags);
streaming = entity->streaming;
- mutex_unlock(&entity->lock);
+ spin_unlock_irqrestore(&entity->lock, flags);
return streaming;
}
int vsp1_entity_set_streaming(struct vsp1_entity *entity, bool streaming)
{
+ unsigned long flags;
int ret;
- mutex_lock(&entity->lock);
+ spin_lock_irqsave(&entity->lock, flags);
entity->streaming = streaming;
- mutex_unlock(&entity->lock);
+ spin_unlock_irqrestore(&entity->lock, flags);
if (!streaming)
return 0;
@@ -49,9 +51,9 @@ int vsp1_entity_set_streaming(struct vsp1_entity *entity, bool streaming)
ret = v4l2_ctrl_handler_setup(entity->subdev.ctrl_handler);
if (ret < 0) {
- mutex_lock(&entity->lock);
+ spin_lock_irqsave(&entity->lock, flags);
entity->streaming = false;
- mutex_unlock(&entity->lock);
+ spin_unlock_irqrestore(&entity->lock, flags);
}
return ret;
@@ -193,7 +195,7 @@ int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
if (i == ARRAY_SIZE(vsp1_routes))
return -EINVAL;
- mutex_init(&entity->lock);
+ spin_lock_init(&entity->lock);
entity->vsp1 = vsp1;
entity->source_pad = num_pads - 1;
@@ -228,6 +230,4 @@ void vsp1_entity_destroy(struct vsp1_entity *entity)
if (entity->subdev.ctrl_handler)
v4l2_ctrl_handler_free(entity->subdev.ctrl_handler);
media_entity_cleanup(&entity->subdev.entity);
-
- mutex_destroy(&entity->lock);
}
diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h
index 62c768d1c6aa..8867a5787c28 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/vsp1/vsp1_entity.h
@@ -14,7 +14,7 @@
#define __VSP1_ENTITY_H__
#include <linux/list.h>
-#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <media/v4l2-subdev.h>
@@ -73,7 +73,7 @@ struct vsp1_entity {
struct vsp1_video *video;
- struct mutex lock; /* Protects the streaming field */
+ spinlock_t lock; /* Protects the streaming field */
bool streaming;
};
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index da3c573e1efc..25b48738b147 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -238,7 +238,7 @@
#define VI6_WPF_SZCLIP_EN (1 << 28)
#define VI6_WPF_SZCLIP_OFST_MASK (0xff << 16)
#define VI6_WPF_SZCLIP_OFST_SHIFT 16
-#define VI6_WPF_SZCLIP_SIZE_MASK (0x1fff << 0)
+#define VI6_WPF_SZCLIP_SIZE_MASK (0xfff << 0)
#define VI6_WPF_SZCLIP_SIZE_SHIFT 0
#define VI6_WPF_OUTFMT 0x100c
@@ -304,9 +304,9 @@
#define VI6_DPR_HST_ROUTE 0x2044
#define VI6_DPR_HSI_ROUTE 0x2048
#define VI6_DPR_BRU_ROUTE 0x204c
-#define VI6_DPR_ROUTE_FXA_MASK (0xff << 8)
+#define VI6_DPR_ROUTE_FXA_MASK (0xff << 16)
#define VI6_DPR_ROUTE_FXA_SHIFT 16
-#define VI6_DPR_ROUTE_FP_MASK (0xff << 8)
+#define VI6_DPR_ROUTE_FP_MASK (0x3f << 8)
#define VI6_DPR_ROUTE_FP_SHIFT 8
#define VI6_DPR_ROUTE_RT_MASK (0x3f << 0)
#define VI6_DPR_ROUTE_RT_SHIFT 0
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
index fa71f4695e16..9688c219b30e 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -197,6 +197,17 @@ int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
*/
format = vsp1_entity_get_pad_format(&rwpf->entity, cfg, RWPF_PAD_SINK,
sel->which);
+
+ /* Restrict the crop rectangle coordinates to multiples of 2 to avoid
+ * shifting the color plane.
+ */
+ if (format->code == MEDIA_BUS_FMT_AYUV8_1X32) {
+ sel->r.left = ALIGN(sel->r.left, 2);
+ sel->r.top = ALIGN(sel->r.top, 2);
+ sel->r.width = round_down(sel->r.width, 2);
+ sel->r.height = round_down(sel->r.height, 2);
+ }
+
sel->r.left = min_t(unsigned int, sel->r.left, format->width - 2);
sel->r.top = min_t(unsigned int, sel->r.top, format->height - 2);
if (rwpf->entity.type == VSP1_ENTITY_WPF) {
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index d91f19a9e1c1..3c124c14ce14 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -1,7 +1,7 @@
/*
* vsp1_video.c -- R-Car VSP1 Video Node
*
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
@@ -245,7 +245,7 @@ static int __vsp1_video_try_format(struct vsp1_video *video,
* the datasheet, strides not aligned to a multiple of 128 bytes result
* in image corruption.
*/
- for (i = 0; i < max(info->planes, 2U); ++i) {
+ for (i = 0; i < min(info->planes, 2U); ++i) {
unsigned int hsub = i > 0 ? info->hsub : 1;
unsigned int vsub = i > 0 ? info->vsub : 1;
unsigned int align = 128;
@@ -514,6 +514,18 @@ static void vsp1_pipeline_run(struct vsp1_pipeline *pipe)
pipe->buffers_ready = 0;
}
+static bool vsp1_pipeline_stopped(struct vsp1_pipeline *pipe)
+{
+ unsigned long flags;
+ bool stopped;
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ stopped = pipe->state == VSP1_PIPELINE_STOPPED,
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+
+ return stopped;
+}
+
static int vsp1_pipeline_stop(struct vsp1_pipeline *pipe)
{
struct vsp1_entity *entity;
@@ -525,7 +537,7 @@ static int vsp1_pipeline_stop(struct vsp1_pipeline *pipe)
pipe->state = VSP1_PIPELINE_STOPPING;
spin_unlock_irqrestore(&pipe->irqlock, flags);
- ret = wait_event_timeout(pipe->wq, pipe->state == VSP1_PIPELINE_STOPPED,
+ ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe),
msecs_to_jiffies(500));
ret = ret == 0 ? -ETIMEDOUT : 0;
@@ -703,6 +715,73 @@ void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe,
}
}
+void vsp1_pipelines_suspend(struct vsp1_device *vsp1)
+{
+ unsigned long flags;
+ unsigned int i;
+ int ret;
+
+ /* To avoid increasing the system suspend time needlessly, loop over the
+ * pipelines twice, first to set them all to the stopping state, and then
+ * to wait for the stop to complete.
+ */
+ for (i = 0; i < vsp1->pdata.wpf_count; ++i) {
+ struct vsp1_rwpf *wpf = vsp1->wpf[i];
+ struct vsp1_pipeline *pipe;
+
+ if (wpf == NULL)
+ continue;
+
+ pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
+ if (pipe == NULL)
+ continue;
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ if (pipe->state == VSP1_PIPELINE_RUNNING)
+ pipe->state = VSP1_PIPELINE_STOPPING;
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+ }
+
+ for (i = 0; i < vsp1->pdata.wpf_count; ++i) {
+ struct vsp1_rwpf *wpf = vsp1->wpf[i];
+ struct vsp1_pipeline *pipe;
+
+ if (wpf == NULL)
+ continue;
+
+ pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
+ if (pipe == NULL)
+ continue;
+
+ ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe),
+ msecs_to_jiffies(500));
+ if (ret == 0)
+ dev_warn(vsp1->dev, "pipeline %u stop timeout\n",
+ wpf->entity.index);
+ }
+}
+
+void vsp1_pipelines_resume(struct vsp1_device *vsp1)
+{
+ unsigned int i;
+
+ /* Resume pipeline all running pipelines. */
+ for (i = 0; i < vsp1->pdata.wpf_count; ++i) {
+ struct vsp1_rwpf *wpf = vsp1->wpf[i];
+ struct vsp1_pipeline *pipe;
+
+ if (wpf == NULL)
+ continue;
+
+ pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
+ if (pipe == NULL)
+ continue;
+
+ if (vsp1_pipeline_ready(pipe))
+ vsp1_pipeline_run(pipe);
+ }
+}
+
/* -----------------------------------------------------------------------------
* videobuf2 Queue Operations
*/
diff --git a/drivers/media/platform/vsp1/vsp1_video.h b/drivers/media/platform/vsp1/vsp1_video.h
index fd2851a82e00..0887a4d2742c 100644
--- a/drivers/media/platform/vsp1/vsp1_video.h
+++ b/drivers/media/platform/vsp1/vsp1_video.h
@@ -1,7 +1,7 @@
/*
* vsp1_video.h -- R-Car VSP1 Video Node
*
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
@@ -149,4 +149,7 @@ void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe,
struct vsp1_entity *input,
unsigned int alpha);
+void vsp1_pipelines_suspend(struct vsp1_device *vsp1);
+void vsp1_pipelines_resume(struct vsp1_device *vsp1);
+
#endif /* __VSP1_VIDEO_H__ */
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index 98e50e446d57..e779c93cb015 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -699,8 +699,10 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
/* ... and the buffers queue... */
dma->alloc_ctx = vb2_dma_contig_init_ctx(dma->xdev->dev);
- if (IS_ERR(dma->alloc_ctx))
+ if (IS_ERR(dma->alloc_ctx)) {
+ ret = PTR_ERR(dma->alloc_ctx);
goto error;
+ }
/* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
* V4L2 APIs would be inefficient. Testing on the command line with a
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index cc3990111411..a1930b300c06 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -526,7 +526,6 @@ MODULE_DEVICE_TABLE(i2c, tea5764_id);
static struct i2c_driver tea5764_i2c_driver = {
.driver = {
.name = "radio-tea5764",
- .owner = THIS_MODULE,
},
.probe = tea5764_i2c_probe,
.remove = tea5764_i2c_remove,
diff --git a/drivers/media/radio/saa7706h.c b/drivers/media/radio/saa7706h.c
index ec805b09c608..ba8e357ba0a2 100644
--- a/drivers/media/radio/saa7706h.c
+++ b/drivers/media/radio/saa7706h.c
@@ -336,19 +336,7 @@ static const struct v4l2_ctrl_ops saa7706h_ctrl_ops = {
.s_ctrl = saa7706h_s_ctrl,
};
-static const struct v4l2_subdev_core_ops saa7706h_core_ops = {
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
-};
-
-static const struct v4l2_subdev_ops saa7706h_ops = {
- .core = &saa7706h_core_ops,
-};
+static const struct v4l2_subdev_ops empty_ops = {};
/*
* Generic i2c probe
@@ -373,7 +361,7 @@ static int saa7706h_probe(struct i2c_client *client,
if (state == NULL)
return -ENOMEM;
sd = &state->sd;
- v4l2_i2c_subdev_init(sd, client, &saa7706h_ops);
+ v4l2_i2c_subdev_init(sd, client, &empty_ops);
v4l2_ctrl_handler_init(&state->hdl, 4);
v4l2_ctrl_new_std(&state->hdl, &saa7706h_ctrl_ops,
@@ -429,7 +417,6 @@ MODULE_DEVICE_TABLE(i2c, saa7706h_id);
static struct i2c_driver saa7706h_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRIVER_NAME,
},
.probe = saa7706h_probe,
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index a9319a24c7ef..9f879f0ec0ef 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -195,7 +195,6 @@ MODULE_DEVICE_TABLE(i2c, tef6862_id);
static struct i2c_driver tef6862_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRIVER_NAME,
},
.probe = tef6862_probe,
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 704397f3c106..ebc73b034249 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -689,7 +689,6 @@ static void fm_rx_update_af_cache(struct fmdev *fmdev, u8 af)
static void fm_rdsparse_swapbytes(struct fmdev *fmdev,
struct fm_rdsdata_format *rds_format)
{
- u8 byte1;
u8 index = 0;
u8 *rds_buff;
@@ -701,9 +700,7 @@ static void fm_rdsparse_swapbytes(struct fmdev *fmdev,
if (fmdev->asci_id != 0x6350) {
rds_buff = &rds_format->data.groupdatabuff.buff[0];
while (index + 1 < FM_RX_RDS_INFO_FIELD_MAX) {
- byte1 = rds_buff[index];
- rds_buff[index] = rds_buff[index + 1];
- rds_buff[index + 1] = byte1;
+ swap(rds_buff[index], rds_buff[index + 1]);
index += 2;
}
}
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index ddfab256b9a5..b6e13116c6f5 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -371,21 +371,21 @@ config RC_ST
tristate "ST remote control receiver"
depends on RC_CORE
depends on ARCH_STI || COMPILE_TEST
- help
- Say Y here if you want support for ST remote control driver
- which allows both IR and UHF RX.
- The driver passes raw pulse and space information to the LIRC decoder.
+ ---help---
+ Say Y here if you want support for ST remote control driver
+ which allows both IR and UHF RX.
+ The driver passes raw pulse and space information to the LIRC decoder.
- If you're not sure, select N here.
+ If you're not sure, select N here.
config IR_SUNXI
- tristate "SUNXI IR remote control"
- depends on RC_CORE
- depends on ARCH_SUNXI || COMPILE_TEST
- ---help---
- Say Y if you want to use sunXi internal IR Controller
-
- To compile this driver as a module, choose M here: the module will
- be called sunxi-ir.
+ tristate "SUNXI IR remote control"
+ depends on RC_CORE
+ depends on ARCH_SUNXI || COMPILE_TEST
+ ---help---
+ Say Y if you want to use sunXi internal IR Controller
+
+ To compile this driver as a module, choose M here: the module will
+ be called sunxi-ir.
endif #RC_DEVICES
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index 98893a8332c7..a32659fcd266 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -35,9 +35,6 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
struct lirc_codec *lirc = &dev->raw->lirc;
int sample;
- if (!(dev->enabled_protocols & RC_BIT_LIRC))
- return 0;
-
if (!dev->raw->lirc.drv || !dev->raw->lirc.drv->rbuf)
return -EINVAL;
@@ -424,7 +421,7 @@ static int ir_lirc_unregister(struct rc_dev *dev)
}
static struct ir_raw_handler lirc_handler = {
- .protocols = RC_BIT_LIRC,
+ .protocols = 0,
.decode = ir_lirc_decode,
.raw_register = ir_lirc_register,
.raw_unregister = ir_lirc_unregister,
diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
index 8939ebd74391..84fa6e9b59a1 100644
--- a/drivers/media/rc/ir-rc5-decoder.c
+++ b/drivers/media/rc/ir-rc5-decoder.c
@@ -184,125 +184,9 @@ out:
return -EINVAL;
}
-static struct ir_raw_timings_manchester ir_rc5_timings = {
- .leader = RC5_UNIT,
- .pulse_space_start = 0,
- .clock = RC5_UNIT,
- .trailer_space = RC5_UNIT * 10,
-};
-
-static struct ir_raw_timings_manchester ir_rc5x_timings[2] = {
- {
- .leader = RC5_UNIT,
- .pulse_space_start = 0,
- .clock = RC5_UNIT,
- .trailer_space = RC5X_SPACE,
- },
- {
- .clock = RC5_UNIT,
- .trailer_space = RC5_UNIT * 10,
- },
-};
-
-static struct ir_raw_timings_manchester ir_rc5_sz_timings = {
- .leader = RC5_UNIT,
- .pulse_space_start = 0,
- .clock = RC5_UNIT,
- .trailer_space = RC5_UNIT * 10,
-};
-
-static int ir_rc5_validate_filter(const struct rc_scancode_filter *scancode,
- unsigned int important_bits)
-{
- /* all important bits of scancode should be set in mask */
- if (~scancode->mask & important_bits)
- return -EINVAL;
- /* extra bits in mask should be zero in data */
- if (scancode->mask & scancode->data & ~important_bits)
- return -EINVAL;
- return 0;
-}
-
-/**
- * ir_rc5_encode() - Encode a scancode as a stream of raw events
- *
- * @protocols: allowed protocols
- * @scancode: scancode filter describing scancode (helps distinguish between
- * protocol subtypes when scancode is ambiguous)
- * @events: array of raw ir events to write into
- * @max: maximum size of @events
- *
- * Returns: The number of events written.
- * -ENOBUFS if there isn't enough space in the array to fit the
- * encoding. In this case all @max events will have been written.
- * -EINVAL if the scancode is ambiguous or invalid.
- */
-static int ir_rc5_encode(u64 protocols,
- const struct rc_scancode_filter *scancode,
- struct ir_raw_event *events, unsigned int max)
-{
- int ret;
- struct ir_raw_event *e = events;
- unsigned int data, xdata, command, commandx, system;
-
- /* Detect protocol and convert scancode to raw data */
- if (protocols & RC_BIT_RC5 &&
- !ir_rc5_validate_filter(scancode, 0x1f7f)) {
- /* decode scancode */
- command = (scancode->data & 0x003f) >> 0;
- commandx = (scancode->data & 0x0040) >> 6;
- system = (scancode->data & 0x1f00) >> 8;
- /* encode data */
- data = !commandx << 12 | system << 6 | command;
-
- /* Modulate the data */
- ret = ir_raw_gen_manchester(&e, max, &ir_rc5_timings, RC5_NBITS,
- data);
- if (ret < 0)
- return ret;
- } else if (protocols & RC_BIT_RC5X &&
- !ir_rc5_validate_filter(scancode, 0x1f7f3f)) {
- /* decode scancode */
- xdata = (scancode->data & 0x00003f) >> 0;
- command = (scancode->data & 0x003f00) >> 8;
- commandx = (scancode->data & 0x004000) >> 14;
- system = (scancode->data & 0x1f0000) >> 16;
- /* commandx and system overlap, bits must match when encoded */
- if (commandx == (system & 0x1))
- return -EINVAL;
- /* encode data */
- data = 1 << 18 | system << 12 | command << 6 | xdata;
-
- /* Modulate the data */
- ret = ir_raw_gen_manchester(&e, max, &ir_rc5x_timings[0],
- CHECK_RC5X_NBITS,
- data >> (RC5X_NBITS-CHECK_RC5X_NBITS));
- if (ret < 0)
- return ret;
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc5x_timings[1],
- RC5X_NBITS - CHECK_RC5X_NBITS,
- data);
- if (ret < 0)
- return ret;
- } else if (protocols & RC_BIT_RC5_SZ &&
- !ir_rc5_validate_filter(scancode, 0x2fff)) {
- /* RC5-SZ scancode is raw enough for Manchester as it is */
- ret = ir_raw_gen_manchester(&e, max, &ir_rc5_sz_timings,
- RC5_SZ_NBITS, scancode->data & 0x2fff);
- if (ret < 0)
- return ret;
- } else {
- return -EINVAL;
- }
-
- return e - events;
-}
-
static struct ir_raw_handler rc5_handler = {
.protocols = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ,
.decode = ir_rc5_decode,
- .encode = ir_rc5_encode,
};
static int __init ir_rc5_decode_init(void)
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index f9c70baf6e0c..d16bc67af732 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -291,133 +291,11 @@ out:
return -EINVAL;
}
-static struct ir_raw_timings_manchester ir_rc6_timings[4] = {
- {
- .leader = RC6_PREFIX_PULSE,
- .pulse_space_start = 0,
- .clock = RC6_UNIT,
- .invert = 1,
- .trailer_space = RC6_PREFIX_SPACE,
- },
- {
- .clock = RC6_UNIT,
- .invert = 1,
- },
- {
- .clock = RC6_UNIT * 2,
- .invert = 1,
- },
- {
- .clock = RC6_UNIT,
- .invert = 1,
- .trailer_space = RC6_SUFFIX_SPACE,
- },
-};
-
-static int ir_rc6_validate_filter(const struct rc_scancode_filter *scancode,
- unsigned int important_bits)
-{
- /* all important bits of scancode should be set in mask */
- if (~scancode->mask & important_bits)
- return -EINVAL;
- /* extra bits in mask should be zero in data */
- if (scancode->mask & scancode->data & ~important_bits)
- return -EINVAL;
- return 0;
-}
-
-/**
- * ir_rc6_encode() - Encode a scancode as a stream of raw events
- *
- * @protocols: allowed protocols
- * @scancode: scancode filter describing scancode (helps distinguish between
- * protocol subtypes when scancode is ambiguous)
- * @events: array of raw ir events to write into
- * @max: maximum size of @events
- *
- * Returns: The number of events written.
- * -ENOBUFS if there isn't enough space in the array to fit the
- * encoding. In this case all @max events will have been written.
- * -EINVAL if the scancode is ambiguous or invalid.
- */
-static int ir_rc6_encode(u64 protocols,
- const struct rc_scancode_filter *scancode,
- struct ir_raw_event *events, unsigned int max)
-{
- int ret;
- struct ir_raw_event *e = events;
-
- if (protocols & RC_BIT_RC6_0 &&
- !ir_rc6_validate_filter(scancode, 0xffff)) {
-
- /* Modulate the preamble */
- ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
- if (ret < 0)
- return ret;
-
- /* Modulate the header (Start Bit & Mode-0) */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[1],
- RC6_HEADER_NBITS, (1 << 3));
- if (ret < 0)
- return ret;
-
- /* Modulate Trailer Bit */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[2], 1, 0);
- if (ret < 0)
- return ret;
-
- /* Modulate rest of the data */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[3], RC6_0_NBITS,
- scancode->data);
- if (ret < 0)
- return ret;
-
- } else if (protocols & (RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 |
- RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE) &&
- !ir_rc6_validate_filter(scancode, 0x8fffffff)) {
-
- /* Modulate the preamble */
- ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
- if (ret < 0)
- return ret;
-
- /* Modulate the header (Start Bit & Header-version 6 */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[1],
- RC6_HEADER_NBITS, (1 << 3 | 6));
- if (ret < 0)
- return ret;
-
- /* Modulate Trailer Bit */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[2], 1, 0);
- if (ret < 0)
- return ret;
-
- /* Modulate rest of the data */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[3],
- fls(scancode->mask),
- scancode->data);
- if (ret < 0)
- return ret;
-
- } else {
- return -EINVAL;
- }
-
- return e - events;
-}
-
static struct ir_raw_handler rc6_handler = {
.protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 |
RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 |
RC_BIT_RC6_MCE,
.decode = ir_rc6_decode,
- .encode = ir_rc6_encode,
};
static int __init ir_rc6_decode_init(void)
diff --git a/drivers/media/rc/keymaps/rc-lirc.c b/drivers/media/rc/keymaps/rc-lirc.c
index fbf08fa6f46e..e172f5db5803 100644
--- a/drivers/media/rc/keymaps/rc-lirc.c
+++ b/drivers/media/rc/keymaps/rc-lirc.c
@@ -20,7 +20,7 @@ static struct rc_map_list lirc_map = {
.map = {
.scan = lirc,
.size = ARRAY_SIZE(lirc),
- .rc_type = RC_TYPE_LIRC,
+ .rc_type = RC_TYPE_OTHER,
.name = RC_MAP_LIRC,
}
};
diff --git a/drivers/media/rc/keymaps/rc-lme2510.c b/drivers/media/rc/keymaps/rc-lme2510.c
index 51f18bb50a37..2b0027c41332 100644
--- a/drivers/media/rc/keymaps/rc-lme2510.c
+++ b/drivers/media/rc/keymaps/rc-lme2510.c
@@ -15,74 +15,74 @@
static struct rc_map_table lme2510_rc[] = {
/* Type 1 - 26 buttons */
- { 0x10ed45, KEY_0 },
- { 0x10ed5f, KEY_1 },
- { 0x10ed50, KEY_2 },
- { 0x10ed5d, KEY_3 },
- { 0x10ed41, KEY_4 },
- { 0x10ed0a, KEY_5 },
- { 0x10ed42, KEY_6 },
- { 0x10ed47, KEY_7 },
- { 0x10ed49, KEY_8 },
- { 0x10ed05, KEY_9 },
- { 0x10ed43, KEY_POWER },
- { 0x10ed46, KEY_SUBTITLE },
- { 0x10ed06, KEY_PAUSE },
- { 0x10ed03, KEY_MEDIA_REPEAT},
- { 0x10ed02, KEY_PAUSE },
- { 0x10ed5e, KEY_VOLUMEUP },
- { 0x10ed5c, KEY_VOLUMEDOWN },
- { 0x10ed09, KEY_CHANNELUP },
- { 0x10ed1a, KEY_CHANNELDOWN },
- { 0x10ed1e, KEY_PLAY },
- { 0x10ed1b, KEY_ZOOM },
- { 0x10ed59, KEY_MUTE },
- { 0x10ed5a, KEY_TV },
- { 0x10ed18, KEY_RECORD },
- { 0x10ed07, KEY_EPG },
- { 0x10ed01, KEY_STOP },
+ { 0xef12ba45, KEY_0 },
+ { 0xef12a05f, KEY_1 },
+ { 0xef12af50, KEY_2 },
+ { 0xef12a25d, KEY_3 },
+ { 0xef12be41, KEY_4 },
+ { 0xef12f50a, KEY_5 },
+ { 0xef12bd42, KEY_6 },
+ { 0xef12b847, KEY_7 },
+ { 0xef12b649, KEY_8 },
+ { 0xef12fa05, KEY_9 },
+ { 0xef12bc43, KEY_POWER },
+ { 0xef12b946, KEY_SUBTITLE },
+ { 0xef12f906, KEY_PAUSE },
+ { 0xef12fc03, KEY_MEDIA_REPEAT},
+ { 0xef12fd02, KEY_PAUSE },
+ { 0xef12a15e, KEY_VOLUMEUP },
+ { 0xef12a35c, KEY_VOLUMEDOWN },
+ { 0xef12f609, KEY_CHANNELUP },
+ { 0xef12e51a, KEY_CHANNELDOWN },
+ { 0xef12e11e, KEY_PLAY },
+ { 0xef12e41b, KEY_ZOOM },
+ { 0xef12a659, KEY_MUTE },
+ { 0xef12a55a, KEY_TV },
+ { 0xef12e718, KEY_RECORD },
+ { 0xef12f807, KEY_EPG },
+ { 0xef12fe01, KEY_STOP },
/* Type 2 - 20 buttons */
- { 0xbf15, KEY_0 },
- { 0xbf08, KEY_1 },
- { 0xbf09, KEY_2 },
- { 0xbf0a, KEY_3 },
- { 0xbf0c, KEY_4 },
- { 0xbf0d, KEY_5 },
- { 0xbf0e, KEY_6 },
- { 0xbf10, KEY_7 },
- { 0xbf11, KEY_8 },
- { 0xbf12, KEY_9 },
- { 0xbf00, KEY_POWER },
- { 0xbf04, KEY_MEDIA_REPEAT}, /* Recall */
- { 0xbf1a, KEY_PAUSE }, /* Timeshift */
- { 0xbf02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
- { 0xbf06, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
- { 0xbf01, KEY_CHANNELUP },
- { 0xbf05, KEY_CHANNELDOWN },
- { 0xbf14, KEY_ZOOM },
- { 0xbf18, KEY_RECORD },
- { 0xbf16, KEY_STOP },
+ { 0xff40ea15, KEY_0 },
+ { 0xff40f708, KEY_1 },
+ { 0xff40f609, KEY_2 },
+ { 0xff40f50a, KEY_3 },
+ { 0xff40f30c, KEY_4 },
+ { 0xff40f20d, KEY_5 },
+ { 0xff40f10e, KEY_6 },
+ { 0xff40ef10, KEY_7 },
+ { 0xff40ee11, KEY_8 },
+ { 0xff40ed12, KEY_9 },
+ { 0xff40ff00, KEY_POWER },
+ { 0xff40fb04, KEY_MEDIA_REPEAT}, /* Recall */
+ { 0xff40e51a, KEY_PAUSE }, /* Timeshift */
+ { 0xff40fd02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
+ { 0xff40f906, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
+ { 0xff40fe01, KEY_CHANNELUP },
+ { 0xff40fa05, KEY_CHANNELDOWN },
+ { 0xff40eb14, KEY_ZOOM },
+ { 0xff40e718, KEY_RECORD },
+ { 0xff40e916, KEY_STOP },
/* Type 3 - 20 buttons */
- { 0x1c, KEY_0 },
- { 0x07, KEY_1 },
- { 0x15, KEY_2 },
- { 0x09, KEY_3 },
- { 0x16, KEY_4 },
- { 0x19, KEY_5 },
- { 0x0d, KEY_6 },
- { 0x0c, KEY_7 },
- { 0x18, KEY_8 },
- { 0x5e, KEY_9 },
- { 0x45, KEY_POWER },
- { 0x44, KEY_MEDIA_REPEAT}, /* Recall */
- { 0x4a, KEY_PAUSE }, /* Timeshift */
- { 0x47, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
- { 0x43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
- { 0x46, KEY_CHANNELUP },
- { 0x40, KEY_CHANNELDOWN },
- { 0x08, KEY_ZOOM },
- { 0x42, KEY_RECORD },
- { 0x5a, KEY_STOP },
+ { 0xff00e31c, KEY_0 },
+ { 0xff00f807, KEY_1 },
+ { 0xff00ea15, KEY_2 },
+ { 0xff00f609, KEY_3 },
+ { 0xff00e916, KEY_4 },
+ { 0xff00e619, KEY_5 },
+ { 0xff00f20d, KEY_6 },
+ { 0xff00f30c, KEY_7 },
+ { 0xff00e718, KEY_8 },
+ { 0xff00a15e, KEY_9 },
+ { 0xff00ba45, KEY_POWER },
+ { 0xff00bb44, KEY_MEDIA_REPEAT}, /* Recall */
+ { 0xff00b54a, KEY_PAUSE }, /* Timeshift */
+ { 0xff00b847, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
+ { 0xff00bc43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
+ { 0xff00b946, KEY_CHANNELUP },
+ { 0xff00bf40, KEY_CHANNELDOWN },
+ { 0xff00f708, KEY_ZOOM },
+ { 0xff00bd42, KEY_RECORD },
+ { 0xff00a55a, KEY_STOP },
};
static struct rc_map_list lme2510_map = {
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index baeb5971fd52..85af7a869167 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -526,130 +526,6 @@ static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
return 0;
}
-static int nvt_write_wakeup_codes(struct rc_dev *dev,
- const u8 *wakeup_sample_buf, int count)
-{
- int i = 0;
- u8 reg, reg_learn_mode;
- unsigned long flags;
- struct nvt_dev *nvt = dev->priv;
-
- nvt_dbg_wake("writing wakeup samples");
-
- reg = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
- reg_learn_mode = reg & ~CIR_WAKE_IRCON_MODE0;
- reg_learn_mode |= CIR_WAKE_IRCON_MODE1;
-
- /* Lock the learn area to prevent racing with wake-isr */
- spin_lock_irqsave(&nvt->nvt_lock, flags);
-
- /* Enable fifo writes */
- nvt_cir_wake_reg_write(nvt, reg_learn_mode, CIR_WAKE_IRCON);
-
- /* Clear cir wake rx fifo */
- nvt_clear_cir_wake_fifo(nvt);
-
- if (count > WAKE_FIFO_LEN) {
- nvt_dbg_wake("HW FIFO too small for all wake samples");
- count = WAKE_FIFO_LEN;
- }
-
- if (count)
- pr_info("Wake samples (%d) =", count);
- else
- pr_info("Wake sample fifo cleared");
-
- /* Write wake samples to fifo */
- for (i = 0; i < count; i++) {
- pr_cont(" %02x", wakeup_sample_buf[i]);
- nvt_cir_wake_reg_write(nvt, wakeup_sample_buf[i],
- CIR_WAKE_WR_FIFO_DATA);
- }
- pr_cont("\n");
-
- /* Switch cir to wakeup mode and disable fifo writing */
- nvt_cir_wake_reg_write(nvt, reg, CIR_WAKE_IRCON);
-
- /* Set number of bytes needed for wake */
- nvt_cir_wake_reg_write(nvt, count ? count :
- CIR_WAKE_FIFO_CMP_BYTES,
- CIR_WAKE_FIFO_CMP_DEEP);
-
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
-
- return 0;
-}
-
-static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
- struct rc_scancode_filter *sc_filter)
-{
- u8 *reg_buf;
- u8 buf_val;
- int i, ret, count;
- unsigned int val;
- struct ir_raw_event *raw;
- bool complete;
-
- /* Require both mask and data to be set before actually committing */
- if (!sc_filter->mask || !sc_filter->data)
- return 0;
-
- raw = kmalloc_array(WAKE_FIFO_LEN, sizeof(*raw), GFP_KERNEL);
- if (!raw)
- return -ENOMEM;
-
- ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
- raw, WAKE_FIFO_LEN);
- complete = (ret != -ENOBUFS);
- if (!complete)
- ret = WAKE_FIFO_LEN;
- else if (ret < 0)
- goto out_raw;
-
- reg_buf = kmalloc_array(WAKE_FIFO_LEN, sizeof(*reg_buf), GFP_KERNEL);
- if (!reg_buf) {
- ret = -ENOMEM;
- goto out_raw;
- }
-
- /* Inspect the ir samples */
- for (i = 0, count = 0; i < ret && count < WAKE_FIFO_LEN; ++i) {
- val = NS_TO_US((raw[i]).duration) / SAMPLE_PERIOD;
-
- /* Split too large values into several smaller ones */
- while (val > 0 && count < WAKE_FIFO_LEN) {
-
- /* Skip last value for better comparison tolerance */
- if (complete && i == ret - 1 && val < BUF_LEN_MASK)
- break;
-
- /* Clamp values to BUF_LEN_MASK at most */
- buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val;
-
- reg_buf[count] = buf_val;
- val -= buf_val;
- if ((raw[i]).pulse)
- reg_buf[count] |= BUF_PULSE_BIT;
- count++;
- }
- }
-
- ret = nvt_write_wakeup_codes(dev, reg_buf, count);
-
- kfree(reg_buf);
-out_raw:
- kfree(raw);
-
- return ret;
-}
-
-/* Dummy implementation. nuvoton is agnostic to the protocol used */
-static int nvt_ir_raw_change_wakeup_protocol(struct rc_dev *dev,
- u64 *rc_type)
-{
- return 0;
-}
-
/*
* nvt_tx_ir
*
@@ -1167,14 +1043,11 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
/* Set up the rc device */
rdev->priv = nvt;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->encode_wakeup = true;
rdev->allowed_protocols = RC_BIT_ALL;
rdev->open = nvt_open;
rdev->close = nvt_close;
rdev->tx_ir = nvt_tx_ir;
rdev->s_tx_carrier = nvt_set_tx_carrier;
- rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter;
- rdev->change_wakeup_protocol = nvt_ir_raw_change_wakeup_protocol;
rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
rdev->input_phys = "nuvoton/cir0";
rdev->input_id.bustype = BUS_HOST;
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 9d0e161c2a88..e1cf23c3875b 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -63,7 +63,6 @@ static int debug;
*/
#define TX_BUF_LEN 256
#define RX_BUF_LEN 32
-#define WAKE_FIFO_LEN 67
struct nvt_dev {
struct pnp_dev *pdev;
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 4b994aa2f2a7..b68d4f762734 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -25,8 +25,6 @@ struct ir_raw_handler {
u64 protocols; /* which are handled by this handler */
int (*decode)(struct rc_dev *dev, struct ir_raw_event event);
- int (*encode)(u64 protocols, const struct rc_scancode_filter *scancode,
- struct ir_raw_event *events, unsigned int max);
/* These two should only be used by the lirc decoder */
int (*raw_register)(struct rc_dev *dev);
@@ -152,44 +150,10 @@ static inline bool is_timing_event(struct ir_raw_event ev)
#define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000)
#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space")
-/* functions for IR encoders */
-
-static inline void init_ir_raw_event_duration(struct ir_raw_event *ev,
- unsigned int pulse,
- u32 duration)
-{
- init_ir_raw_event(ev);
- ev->duration = duration;
- ev->pulse = pulse;
-}
-
-/**
- * struct ir_raw_timings_manchester - Manchester coding timings
- * @leader: duration of leader pulse (if any) 0 if continuing
- * existing signal (see @pulse_space_start)
- * @pulse_space_start: 1 for starting with pulse (0 for starting with space)
- * @clock: duration of each pulse/space in ns
- * @invert: if set clock logic is inverted
- * (0 = space + pulse, 1 = pulse + space)
- * @trailer_space: duration of trailer space in ns
- */
-struct ir_raw_timings_manchester {
- unsigned int leader;
- unsigned int pulse_space_start:1;
- unsigned int clock;
- unsigned int invert:1;
- unsigned int trailer_space;
-};
-
-int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
- const struct ir_raw_timings_manchester *timings,
- unsigned int n, unsigned int data);
-
/*
* Routines from rc-raw.c to be used internally and by decoders
*/
u64 ir_raw_get_allowed_protocols(void);
-u64 ir_raw_get_encode_protocols(void);
int ir_raw_event_register(struct rc_dev *dev);
void ir_raw_event_unregister(struct rc_dev *dev);
int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler);
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index b9e4645c731c..ad260520a9d4 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -30,7 +30,6 @@ static LIST_HEAD(ir_raw_client_list);
static DEFINE_MUTEX(ir_raw_handler_lock);
static LIST_HEAD(ir_raw_handler_list);
static u64 available_protocols;
-static u64 encode_protocols;
static int ir_raw_event_thread(void *data)
{
@@ -241,146 +240,12 @@ ir_raw_get_allowed_protocols(void)
return protocols;
}
-/* used internally by the sysfs interface */
-u64
-ir_raw_get_encode_protocols(void)
-{
- u64 protocols;
-
- mutex_lock(&ir_raw_handler_lock);
- protocols = encode_protocols;
- mutex_unlock(&ir_raw_handler_lock);
- return protocols;
-}
-
static int change_protocol(struct rc_dev *dev, u64 *rc_type)
{
/* the caller will update dev->enabled_protocols */
return 0;
}
-/**
- * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
- * @ev: Pointer to pointer to next free event. *@ev is incremented for
- * each raw event filled.
- * @max: Maximum number of raw events to fill.
- * @timings: Manchester modulation timings.
- * @n: Number of bits of data.
- * @data: Data bits to encode.
- *
- * Encodes the @n least significant bits of @data using Manchester (bi-phase)
- * modulation with the timing characteristics described by @timings, writing up
- * to @max raw IR events using the *@ev pointer.
- *
- * Returns: 0 on success.
- * -ENOBUFS if there isn't enough space in the array to fit the
- * full encoded data. In this case all @max events will have been
- * written.
- */
-int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
- const struct ir_raw_timings_manchester *timings,
- unsigned int n, unsigned int data)
-{
- bool need_pulse;
- unsigned int i;
- int ret = -ENOBUFS;
-
- i = 1 << (n - 1);
-
- if (timings->leader) {
- if (!max--)
- return ret;
- if (timings->pulse_space_start) {
- init_ir_raw_event_duration((*ev)++, 1, timings->leader);
-
- if (!max--)
- return ret;
- init_ir_raw_event_duration((*ev), 0, timings->leader);
- } else {
- init_ir_raw_event_duration((*ev), 1, timings->leader);
- }
- i >>= 1;
- } else {
- /* continue existing signal */
- --(*ev);
- }
- /* from here on *ev will point to the last event rather than the next */
-
- while (n && i > 0) {
- need_pulse = !(data & i);
- if (timings->invert)
- need_pulse = !need_pulse;
- if (need_pulse == !!(*ev)->pulse) {
- (*ev)->duration += timings->clock;
- } else {
- if (!max--)
- goto nobufs;
- init_ir_raw_event_duration(++(*ev), need_pulse,
- timings->clock);
- }
-
- if (!max--)
- goto nobufs;
- init_ir_raw_event_duration(++(*ev), !need_pulse,
- timings->clock);
- i >>= 1;
- }
-
- if (timings->trailer_space) {
- if (!(*ev)->pulse)
- (*ev)->duration += timings->trailer_space;
- else if (!max--)
- goto nobufs;
- else
- init_ir_raw_event_duration(++(*ev), 0,
- timings->trailer_space);
- }
-
- ret = 0;
-nobufs:
- /* point to the next event rather than last event before returning */
- ++(*ev);
- return ret;
-}
-EXPORT_SYMBOL(ir_raw_gen_manchester);
-
-/**
- * ir_raw_encode_scancode() - Encode a scancode as raw events
- *
- * @protocols: permitted protocols
- * @scancode: scancode filter describing a single scancode
- * @events: array of raw events to write into
- * @max: max number of raw events
- *
- * Attempts to encode the scancode as raw events.
- *
- * Returns: The number of events written.
- * -ENOBUFS if there isn't enough space in the array to fit the
- * encoding. In this case all @max events will have been written.
- * -EINVAL if the scancode is ambiguous or invalid, or if no
- * compatible encoder was found.
- */
-int ir_raw_encode_scancode(u64 protocols,
- const struct rc_scancode_filter *scancode,
- struct ir_raw_event *events, unsigned int max)
-{
- struct ir_raw_handler *handler;
- int ret = -EINVAL;
-
- mutex_lock(&ir_raw_handler_lock);
- list_for_each_entry(handler, &ir_raw_handler_list, list) {
- if (handler->protocols & protocols && handler->encode) {
- ret = handler->encode(protocols, scancode, events, max);
- if (ret >= 0 || ret == -ENOBUFS)
- break;
- }
- }
- mutex_unlock(&ir_raw_handler_lock);
-
- return ret;
-}
-EXPORT_SYMBOL(ir_raw_encode_scancode);
-
/*
* Used to (un)register raw event clients
*/
@@ -406,7 +271,7 @@ int ir_raw_event_register(struct rc_dev *dev)
spin_lock_init(&dev->raw->lock);
dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
- "rc%ld", dev->devno);
+ "rc%u", dev->minor);
if (IS_ERR(dev->raw->thread)) {
rc = PTR_ERR(dev->raw->thread);
@@ -463,8 +328,6 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
list_for_each_entry(raw, &ir_raw_client_list, list)
ir_raw_handler->raw_register(raw->dev);
available_protocols |= ir_raw_handler->protocols;
- if (ir_raw_handler->encode)
- encode_protocols |= ir_raw_handler->protocols;
mutex_unlock(&ir_raw_handler_lock);
return 0;
@@ -481,8 +344,6 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
list_for_each_entry(raw, &ir_raw_client_list, list)
ir_raw_handler->raw_unregister(raw->dev);
available_protocols &= ~ir_raw_handler->protocols;
- if (ir_raw_handler->encode)
- encode_protocols &= ~ir_raw_handler->protocols;
mutex_unlock(&ir_raw_handler_lock);
}
EXPORT_SYMBOL(ir_raw_handler_unregister);
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index d8bdf63ce985..63dace8198b0 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -26,7 +26,6 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/sched.h>
-#include <linux/slab.h>
#include <media/rc-core.h>
#define DRIVER_NAME "rc-loopback"
@@ -177,39 +176,6 @@ static int loop_set_carrier_report(struct rc_dev *dev, int enable)
return 0;
}
-static int loop_set_wakeup_filter(struct rc_dev *dev,
- struct rc_scancode_filter *sc_filter)
-{
- static const unsigned int max = 512;
- struct ir_raw_event *raw;
- int ret;
- int i;
-
- /* fine to disable filter */
- if (!sc_filter->mask)
- return 0;
-
- /* encode the specified filter and loop it back */
- raw = kmalloc_array(max, sizeof(*raw), GFP_KERNEL);
- ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
- raw, max);
- /* still loop back the partial raw IR even if it's incomplete */
- if (ret == -ENOBUFS)
- ret = max;
- if (ret >= 0) {
- /* do the loopback */
- for (i = 0; i < ret; ++i)
- ir_raw_event_store(dev, &raw[i]);
- ir_raw_event_handle(dev);
-
- ret = 0;
- }
-
- kfree(raw);
-
- return ret;
-}
-
static int __init loop_init(void)
{
struct rc_dev *rc;
@@ -229,7 +195,6 @@ static int __init loop_init(void)
rc->map_name = RC_MAP_EMPTY;
rc->priv = &loopdev;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->encode_wakeup = true;
rc->allowed_protocols = RC_BIT_ALL;
rc->timeout = 100 * 1000 * 1000; /* 100 ms */
rc->min_timeout = 1;
@@ -244,7 +209,6 @@ static int __init loop_init(void)
rc->s_idle = loop_set_idle;
rc->s_learning_mode = loop_set_learning_mode;
rc->s_carrier_report = loop_set_carrier_report;
- rc->s_wakeup_filter = loop_set_wakeup_filter;
loopdev.txmask = RXMASK_REGULAR;
loopdev.txcarrier = 36000;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 9d015db65280..3f0f71adabb4 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -18,17 +18,15 @@
#include <linux/input.h>
#include <linux/leds.h>
#include <linux/slab.h>
+#include <linux/idr.h>
#include <linux/device.h>
#include <linux/module.h>
#include "rc-core-priv.h"
-/* Bitmap to store allocated device numbers from 0 to IRRCV_NUM_DEVICES - 1 */
-#define IRRCV_NUM_DEVICES 256
-static DECLARE_BITMAP(ir_core_dev_number, IRRCV_NUM_DEVICES);
-
/* Sizes are in bytes, 256 bytes allows for 32 entries on x64 */
#define IR_TAB_MIN_SIZE 256
#define IR_TAB_MAX_SIZE 8192
+#define RC_DEV_MAX 256
/* FIXME: IR_KEYPRESS_TIMEOUT should be protocol specific */
#define IR_KEYPRESS_TIMEOUT 250
@@ -38,6 +36,9 @@ static LIST_HEAD(rc_map_list);
static DEFINE_SPINLOCK(rc_map_lock);
static struct led_trigger *led_feedback;
+/* Used to keep track of rc devices */
+static DEFINE_IDA(rc_ida);
+
static struct rc_map_list *seek_rc_map(const char *name)
{
struct rc_map_list *map = NULL;
@@ -799,7 +800,6 @@ static struct {
{ RC_BIT_SANYO, "sanyo" },
{ RC_BIT_SHARP, "sharp" },
{ RC_BIT_MCE_KBD, "mce_kbd" },
- { RC_BIT_LIRC, "lirc" },
{ RC_BIT_XMP, "xmp" },
};
@@ -828,6 +828,23 @@ struct rc_filter_attribute {
.mask = (_mask), \
}
+static bool lirc_is_present(void)
+{
+#if defined(CONFIG_LIRC_MODULE)
+ struct module *lirc;
+
+ mutex_lock(&module_mutex);
+ lirc = find_module("lirc_dev");
+ mutex_unlock(&module_mutex);
+
+ return lirc ? true : false;
+#elif defined(CONFIG_LIRC)
+ return true;
+#else
+ return false;
+#endif
+}
+
/**
* show_protocols() - shows the current/wakeup IR protocol(s)
* @device: the device descriptor
@@ -865,8 +882,6 @@ static ssize_t show_protocols(struct device *device,
} else {
enabled = dev->enabled_wakeup_protocols;
allowed = dev->allowed_wakeup_protocols;
- if (dev->encode_wakeup && !allowed)
- allowed = ir_raw_get_encode_protocols();
}
mutex_unlock(&dev->lock);
@@ -884,6 +899,9 @@ static ssize_t show_protocols(struct device *device,
allowed &= ~proto_names[i].type;
}
+ if (dev->driver_type == RC_DRIVER_IR_RAW && lirc_is_present())
+ tmp += sprintf(tmp, "[lirc] ");
+
if (tmp != buf)
tmp--;
*tmp = '\n';
@@ -935,8 +953,12 @@ static int parse_protocol_change(u64 *protocols, const char *buf)
}
if (i == ARRAY_SIZE(proto_names)) {
- IR_dprintk(1, "Unknown protocol: '%s'\n", tmp);
- return -EINVAL;
+ if (!strcasecmp(tmp, "lirc"))
+ mask = 0;
+ else {
+ IR_dprintk(1, "Unknown protocol: '%s'\n", tmp);
+ return -EINVAL;
+ }
}
count++;
@@ -1193,9 +1215,6 @@ static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
{
struct rc_dev *dev = to_rc_dev(device);
- if (!dev || !dev->input_dev)
- return -ENODEV;
-
if (dev->rc_map.name)
ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name);
if (dev->driver_name)
@@ -1314,7 +1333,9 @@ int rc_register_device(struct rc_dev *dev)
static bool raw_init = false; /* raw decoders loaded? */
struct rc_map *rc_map;
const char *path;
- int rc, devno, attr = 0;
+ int attr = 0;
+ int minor;
+ int rc;
if (!dev || !dev->map_name)
return -EINVAL;
@@ -1334,13 +1355,13 @@ int rc_register_device(struct rc_dev *dev)
if (dev->close)
dev->input_dev->close = ir_close;
- do {
- devno = find_first_zero_bit(ir_core_dev_number,
- IRRCV_NUM_DEVICES);
- /* No free device slots */
- if (devno >= IRRCV_NUM_DEVICES)
- return -ENOMEM;
- } while (test_and_set_bit(devno, ir_core_dev_number));
+ minor = ida_simple_get(&rc_ida, 0, RC_DEV_MAX, GFP_KERNEL);
+ if (minor < 0)
+ return minor;
+
+ dev->minor = minor;
+ dev_set_name(&dev->dev, "rc%u", dev->minor);
+ dev_set_drvdata(&dev->dev, dev);
dev->dev.groups = dev->sysfs_groups;
dev->sysfs_groups[attr++] = &rc_dev_protocol_attr_grp;
@@ -1360,9 +1381,6 @@ int rc_register_device(struct rc_dev *dev)
*/
mutex_lock(&dev->lock);
- dev->devno = devno;
- dev_set_name(&dev->dev, "rc%ld", dev->devno);
- dev_set_drvdata(&dev->dev, dev);
rc = device_add(&dev->dev);
if (rc)
goto out_unlock;
@@ -1408,16 +1426,13 @@ int rc_register_device(struct rc_dev *dev)
path ? path : "N/A");
kfree(path);
- if (dev->driver_type == RC_DRIVER_IR_RAW || dev->encode_wakeup) {
+ if (dev->driver_type == RC_DRIVER_IR_RAW) {
/* Load raw decoders, if they aren't already */
if (!raw_init) {
IR_dprintk(1, "Loading raw decoders\n");
ir_raw_init();
raw_init = true;
}
- }
-
- if (dev->driver_type == RC_DRIVER_IR_RAW) {
/* calls ir_register_device so unlock mutex here*/
mutex_unlock(&dev->lock);
rc = ir_raw_event_register(dev);
@@ -1428,8 +1443,6 @@ int rc_register_device(struct rc_dev *dev)
if (dev->change_protocol) {
u64 rc_type = (1ll << rc_map->rc_type);
- if (dev->driver_type == RC_DRIVER_IR_RAW)
- rc_type |= RC_BIT_LIRC;
rc = dev->change_protocol(dev, &rc_type);
if (rc < 0)
goto out_raw;
@@ -1438,8 +1451,8 @@ int rc_register_device(struct rc_dev *dev)
mutex_unlock(&dev->lock);
- IR_dprintk(1, "Registered rc%ld (driver: %s, remote: %s, mode %s)\n",
- dev->devno,
+ IR_dprintk(1, "Registered rc%u (driver: %s, remote: %s, mode %s)\n",
+ dev->minor,
dev->driver_name ? dev->driver_name : "unknown",
rc_map->name ? rc_map->name : "unknown",
dev->driver_type == RC_DRIVER_IR_RAW ? "raw" : "cooked");
@@ -1458,7 +1471,7 @@ out_dev:
device_del(&dev->dev);
out_unlock:
mutex_unlock(&dev->lock);
- clear_bit(dev->devno, ir_core_dev_number);
+ ida_simple_remove(&rc_ida, minor);
return rc;
}
EXPORT_SYMBOL_GPL(rc_register_device);
@@ -1470,8 +1483,6 @@ void rc_unregister_device(struct rc_dev *dev)
del_timer_sync(&dev->timer_keyup);
- clear_bit(dev->devno, ir_core_dev_number);
-
if (dev->driver_type == RC_DRIVER_IR_RAW)
ir_raw_event_unregister(dev);
@@ -1484,6 +1495,8 @@ void rc_unregister_device(struct rc_dev *dev)
device_del(&dev->dev);
+ ida_simple_remove(&rc_ida, dev->minor);
+
rc_free_device(dev);
}
diff --git a/drivers/media/tuners/Kconfig b/drivers/media/tuners/Kconfig
index 8294af909174..05998f0254c6 100644
--- a/drivers/media/tuners/Kconfig
+++ b/drivers/media/tuners/Kconfig
@@ -15,7 +15,7 @@ config MEDIA_TUNER
select MEDIA_TUNER_MC44S803 if MEDIA_SUBDRV_AUTOSELECT
menu "Customize TV tuners"
- visible if !MEDIA_SUBDRV_AUTOSELECT
+ visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST
depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT || MEDIA_SDR_SUPPORT
config MEDIA_TUNER_SIMPLE
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index 03538f88f488..564a000f503e 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -752,7 +752,6 @@ MODULE_DEVICE_TABLE(i2c, e4000_id_table);
static struct i2c_driver e4000_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "e4000",
.suppress_bind_attrs = true,
},
diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
index 12f916e53150..f4d4665de168 100644
--- a/drivers/media/tuners/fc2580.c
+++ b/drivers/media/tuners/fc2580.c
@@ -632,7 +632,6 @@ MODULE_DEVICE_TABLE(i2c, fc2580_id_table);
static struct i2c_driver fc2580_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "fc2580",
.suppress_bind_attrs = true,
},
diff --git a/drivers/media/tuners/it913x.c b/drivers/media/tuners/it913x.c
index a076c87eda7a..5c96da693289 100644
--- a/drivers/media/tuners/it913x.c
+++ b/drivers/media/tuners/it913x.c
@@ -463,7 +463,6 @@ MODULE_DEVICE_TABLE(i2c, it913x_id_table);
static struct i2c_driver it913x_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "it913x",
},
.probe = it913x_probe,
diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c
index d4c13fe6e7b3..504bfbc4027a 100644
--- a/drivers/media/tuners/m88rs6000t.c
+++ b/drivers/media/tuners/m88rs6000t.c
@@ -729,7 +729,6 @@ MODULE_DEVICE_TABLE(i2c, m88rs6000t_id);
static struct i2c_driver m88rs6000t_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "m88rs6000t",
},
.probe = m88rs6000t_probe,
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index a6245ef379c4..507382160e5e 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -469,7 +469,6 @@ MODULE_DEVICE_TABLE(i2c, si2157_id_table);
static struct i2c_driver si2157_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "si2157",
},
.probe = si2157_probe,
diff --git a/drivers/media/tuners/tda18212.c b/drivers/media/tuners/tda18212.c
index d93e0667b46b..7b8068354fea 100644
--- a/drivers/media/tuners/tda18212.c
+++ b/drivers/media/tuners/tda18212.c
@@ -277,7 +277,6 @@ MODULE_DEVICE_TABLE(i2c, tda18212_id);
static struct i2c_driver tda18212_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "tda18212",
},
.probe = tda18212_probe,
diff --git a/drivers/media/tuners/tua9001.c b/drivers/media/tuners/tua9001.c
index d4f6ca0c4d92..9d70378fe2d3 100644
--- a/drivers/media/tuners/tua9001.c
+++ b/drivers/media/tuners/tua9001.c
@@ -267,7 +267,6 @@ MODULE_DEVICE_TABLE(i2c, tua9001_id_table);
static struct i2c_driver tua9001_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "tua9001",
.suppress_bind_attrs = true,
},
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index 4069234abed5..8f2e1c277c5f 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -937,9 +937,6 @@ static int airspy_set_if_gain(struct airspy *s)
ret = airspy_ctrl_msg(s, CMD_SET_VGA_GAIN, 0, s->if_gain->val,
&u8tmp, 1);
if (ret)
- goto err;
-err:
- if (ret)
dev_dbg(s->dev, "failed=%d\n", ret);
return ret;
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index c6ff8968286a..9798160698a3 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -1875,7 +1875,7 @@ static int cx231xx_close(struct file *filp)
v4l2_fh_exit(&fh->fh);
kfree(fh);
dev->users--;
- wake_up_interruptible_nr(&dev->open, 1);
+ wake_up_interruptible(&dev->open);
return 0;
}
@@ -1908,7 +1908,7 @@ static int cx231xx_close(struct file *filp)
}
v4l2_fh_exit(&fh->fh);
kfree(fh);
- wake_up_interruptible_nr(&dev->open, 1);
+ wake_up_interruptible(&dev->open);
return 0;
}
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 4cc55b3a0558..3721ee63b8fb 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -348,15 +348,16 @@ static void lme2510_int_response(struct urb *lme_urb)
switch (ibuf[0]) {
case 0xaa:
debug_data_snipet(1, "INT Remote data snipet", ibuf);
- if ((ibuf[4] + ibuf[5]) == 0xff) {
- key = RC_SCANCODE_NECX((ibuf[2] ^ 0xff) << 8 |
- (ibuf[3] > 0) ? (ibuf[3] ^ 0xff) : 0,
- ibuf[5]);
- deb_info(1, "INT Key =%08x", key);
- if (adap_to_d(adap)->rc_dev != NULL)
- rc_keydown(adap_to_d(adap)->rc_dev,
- RC_TYPE_NEC, key, 0);
- }
+ if (!adap_to_d(adap)->rc_dev)
+ break;
+
+ key = RC_SCANCODE_NEC32(ibuf[2] << 24 |
+ ibuf[3] << 16 |
+ ibuf[4] << 8 |
+ ibuf[5]);
+
+ deb_info(1, "INT Key = 0x%08x", key);
+ rc_keydown(adap_to_d(adap)->rc_dev, RC_TYPE_NEC, key, 0);
break;
case 0xbb:
switch (st->tuner_config) {
@@ -1344,7 +1345,7 @@ module_usb_driver(lme2510_driver);
MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0");
-MODULE_VERSION("2.06");
+MODULE_VERSION("2.07");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(LME2510_C_S7395);
MODULE_FIRMWARE(LME2510_C_LG);
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index d17618fe8f5c..ec397c4b7cc8 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -611,7 +611,7 @@ static int pctv452e_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
return 0;
failed:
- memset(mac, 0, 6);
+ eth_zero_addr(mac);
return ret;
}
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 03f334d3a8f4..6c3c47722955 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -707,7 +707,7 @@ static struct dvb_usb_device_properties technisat_usb2_devices = {
.stream = {
.type = USB_ISOC,
- .count = 8,
+ .count = 4,
.endpoint = 0x2,
.u = {
.isoc = {
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index a38248360833..357be76c7a55 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -808,10 +808,6 @@ static struct tda18271_config em28xx_cxd2820r_tda18271_config = {
.gate = TDA18271_GATE_DIGITAL,
};
-static const struct a8293_config em28xx_a8293_config = {
- .i2c_addr = 0x08, /* (0x10 >> 1) */
-};
-
static struct zl10353_config em28xx_zl10353_no_i2c_gate_dev = {
.demod_address = (0x1e >> 1),
.disable_i2c_gate_ctrl = 1,
diff --git a/drivers/media/usb/go7007/s2250-board.c b/drivers/media/usb/go7007/s2250-board.c
index 5c2a49534d2b..1466db150d82 100644
--- a/drivers/media/usb/go7007/s2250-board.c
+++ b/drivers/media/usb/go7007/s2250-board.c
@@ -629,7 +629,6 @@ MODULE_DEVICE_TABLE(i2c, s2250_id);
static struct i2c_driver s2250_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "s2250",
},
.probe = s2250_probe,
diff --git a/drivers/media/usb/gspca/m5602/m5602_s5k83a.c b/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
index 7cbc3a00bda8..bf6b215438e3 100644
--- a/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
+++ b/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
@@ -177,7 +177,7 @@ static int rotation_thread_function(void *data)
__s32 vflip, hflip;
set_current_state(TASK_INTERRUPTIBLE);
- while (!schedule_timeout(100)) {
+ while (!schedule_timeout(msecs_to_jiffies(100))) {
if (mutex_lock_interruptible(&sd->gspca_dev.usb_lock))
break;
diff --git a/drivers/media/usb/gspca/sn9c2028.c b/drivers/media/usb/gspca/sn9c2028.c
index c75b7388a85c..4f2050a5ec94 100644
--- a/drivers/media/usb/gspca/sn9c2028.c
+++ b/drivers/media/usb/gspca/sn9c2028.c
@@ -140,7 +140,7 @@ static int sn9c2028_long_command(struct gspca_dev *gspca_dev, u8 *command)
status = sn9c2028_read1(gspca_dev);
if (status < 0) {
pr_err("long command status read error %d\n", status);
- return (status < 0) ? status : -EIO;
+ return status;
}
memset(reading, 0, 4);
diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c
index 03504dcf3c52..1b6836f15370 100644
--- a/drivers/media/usb/stk1160/stk1160-core.c
+++ b/drivers/media/usb/stk1160/stk1160-core.c
@@ -162,7 +162,7 @@ static void stk1160_release(struct v4l2_device *v4l2_dev)
{
struct stk1160 *dev = container_of(v4l2_dev, struct stk1160, v4l2_dev);
- stk1160_info("releasing all resources\n");
+ stk1160_dbg("releasing all resources\n");
stk1160_i2c_unregister(dev);
@@ -363,9 +363,6 @@ static int stk1160_probe(struct usb_interface *interface,
dev->sd_saa7115 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
"saa7115_auto", 0, saa7113_addrs);
- stk1160_info("driver ver %s successfully loaded\n",
- STK1160_VERSION);
-
/* i2c reset saa711x */
v4l2_device_call_all(&dev->v4l2_dev, 0, core, reset, 0);
v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
diff --git a/drivers/media/usb/stk1160/stk1160-reg.h b/drivers/media/usb/stk1160/stk1160-reg.h
index 3e49da6e7edd..81ff3a15d96e 100644
--- a/drivers/media/usb/stk1160/stk1160-reg.h
+++ b/drivers/media/usb/stk1160/stk1160-reg.h
@@ -33,6 +33,40 @@
*/
#define STK1160_DCTRL 0x100
+/*
+ * Decimation Control Register:
+ * Byte 104: Horizontal Decimation Line Unit Count
+ * Byte 105: Vertical Decimation Line Unit Count
+ * Byte 106: Decimation Control
+ * Bit 0 - Horizontal Decimation Control
+ * 0 Horizontal decimation is disabled.
+ * 1 Horizontal decimation is enabled.
+ * Bit 1 - Decimates Half or More Column
+ * 0 Decimates less than half from original column,
+ * send count unit (0x105) before each unit skipped.
+ * 1 Decimates half or more from original column,
+ * skip count unit (0x105) before each unit sent.
+ * Bit 2 - Vertical Decimation Control
+ * 0 Vertical decimation is disabled.
+ * 1 Vertical decimation is enabled.
+ * Bit 3 - Vertical Greater or Equal to Half
+ * 0 Decimates less than half from original row,
+ * send count unit (0x105) before each unit skipped.
+ * 1 Decimates half or more from original row,
+ * skip count unit (0x105) before each unit sent.
+ * Bit 4 - Decimation Unit
+ * 0 Decimation will work with 2 rows or columns per unit.
+ * 1 Decimation will work with 4 rows or columns per unit.
+ */
+#define STK1160_DMCTRL_H_UNITS 0x104
+#define STK1160_DMCTRL_V_UNITS 0x105
+#define STK1160_DMCTRL 0x106
+#define STK1160_H_DEC_EN BIT(0)
+#define STK1160_H_DEC_MODE BIT(1)
+#define STK1160_V_DEC_EN BIT(2)
+#define STK1160_V_DEC_MODE BIT(3)
+#define STK1160_DEC_UNIT_SIZE BIT(4)
+
/* Capture Frame Start Position */
#define STK116_CFSPO 0x110
#define STK116_CFSPO_STX_L 0x110
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index 4d313ed4c32e..e12b10352871 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -42,6 +42,17 @@ static bool keep_buffers;
module_param(keep_buffers, bool, 0644);
MODULE_PARM_DESC(keep_buffers, "don't release buffers upon stop streaming");
+enum stk1160_decimate_mode {
+ STK1160_DECIMATE_MORE_THAN_HALF,
+ STK1160_DECIMATE_LESS_THAN_HALF,
+};
+
+struct stk1160_decimate_ctrl {
+ bool col_en, row_en;
+ enum stk1160_decimate_mode col_mode, row_mode;
+ unsigned int col_n, row_n;
+};
+
/* supported video standards */
static struct stk1160_fmt format[] = {
{
@@ -51,6 +62,19 @@ static struct stk1160_fmt format[] = {
}
};
+/*
+ * Helper to find the next divisor that results in modulo being zero.
+ * This is required to guarantee valid decimation unit counts.
+ */
+static unsigned int
+div_round_integer(unsigned int x, unsigned int y)
+{
+ for (;; y++) {
+ if (x % y == 0)
+ return x / y;
+ }
+}
+
static void stk1160_set_std(struct stk1160 *dev)
{
int i;
@@ -106,6 +130,41 @@ static void stk1160_set_std(struct stk1160 *dev)
}
+static void stk1160_set_fmt(struct stk1160 *dev,
+ struct stk1160_decimate_ctrl *ctrl)
+{
+ u32 val = 0;
+
+ if (ctrl) {
+ /*
+ * Since the format is UYVY, the device must skip or send
+ * a number of rows/columns multiple of four. This way, the
+ * colour format is preserved. The STK1160_DEC_UNIT_SIZE bit
+ * does exactly this.
+ */
+ val |= STK1160_DEC_UNIT_SIZE;
+ val |= ctrl->col_en ? STK1160_H_DEC_EN : 0;
+ val |= ctrl->row_en ? STK1160_V_DEC_EN : 0;
+ val |= ctrl->col_mode ==
+ STK1160_DECIMATE_MORE_THAN_HALF ?
+ STK1160_H_DEC_MODE : 0;
+ val |= ctrl->row_mode ==
+ STK1160_DECIMATE_MORE_THAN_HALF ?
+ STK1160_V_DEC_MODE : 0;
+
+ /* Horizontal count units */
+ stk1160_write_reg(dev, STK1160_DMCTRL_H_UNITS, ctrl->col_n);
+ /* Vertical count units */
+ stk1160_write_reg(dev, STK1160_DMCTRL_V_UNITS, ctrl->row_n);
+
+ stk1160_dbg("decimate 0x%x, column units %d, row units %d\n",
+ val, ctrl->col_n, ctrl->row_n);
+ }
+
+ /* Decimation control */
+ stk1160_write_reg(dev, STK1160_DMCTRL, val);
+}
+
/*
* Set a new alternate setting.
* Returns true is dev->max_pkt_size has changed, false otherwise.
@@ -136,7 +195,7 @@ static bool stk1160_set_alternate(struct stk1160 *dev)
dev->alt = i;
}
- stk1160_info("setting alternate %d\n", dev->alt);
+ stk1160_dbg("setting alternate %d\n", dev->alt);
if (dev->alt != prev_alt) {
stk1160_dbg("minimum isoc packet size: %u (alt=%d)\n",
@@ -194,6 +253,8 @@ static int stk1160_start_streaming(struct stk1160 *dev)
/* Start saa711x */
v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 1);
+ dev->sequence = 0;
+
/* Start stk1160 */
stk1160_write_reg(dev, STK1160_DCTRL, 0xb3);
stk1160_write_reg(dev, STK1160_DCTRL+3, 0x00);
@@ -224,7 +285,7 @@ static void stk1160_stop_hw(struct stk1160 *dev)
/* set alternate 0 */
dev->alt = 0;
- stk1160_info("setting alternate %d\n", dev->alt);
+ stk1160_dbg("setting alternate %d\n", dev->alt);
usb_set_interface(dev->udev, 0, 0);
/* Stop stk1160 */
@@ -321,41 +382,134 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+static int stk1160_try_fmt(struct stk1160 *dev, struct v4l2_format *f,
+ struct stk1160_decimate_ctrl *ctrl)
{
- struct stk1160 *dev = video_drvdata(file);
+ unsigned int width, height;
+ unsigned int base_width, base_height;
+ unsigned int col_n, row_n;
+ enum stk1160_decimate_mode col_mode, row_mode;
+ bool col_en, row_en;
+
+ base_width = 720;
+ base_height = (dev->norm & V4L2_STD_525_60) ? 480 : 576;
+
+ /* Minimum width and height is 5% the frame size */
+ width = clamp_t(unsigned int, f->fmt.pix.width,
+ base_width / 20, base_width);
+ height = clamp_t(unsigned int, f->fmt.pix.height,
+ base_height / 20, base_height);
+
+ /* Let's set default no decimation values */
+ col_n = 0;
+ row_n = 0;
+ col_en = false;
+ row_en = false;
+ f->fmt.pix.width = base_width;
+ f->fmt.pix.height = base_height;
+ row_mode = STK1160_DECIMATE_LESS_THAN_HALF;
+ col_mode = STK1160_DECIMATE_LESS_THAN_HALF;
+
+ if (width < base_width && width > base_width / 2) {
+ /*
+ * The device will send count units for each
+ * unit skipped. This means count unit is:
+ *
+ * n = width / (frame width - width)
+ *
+ * And the width is:
+ *
+ * width = (n / n + 1) * frame width
+ */
+ col_n = div_round_integer(width, base_width - width);
+ if (col_n > 0 && col_n <= 255) {
+ col_en = true;
+ col_mode = STK1160_DECIMATE_LESS_THAN_HALF;
+ f->fmt.pix.width = (base_width * col_n) / (col_n + 1);
+ }
- /*
- * User can't choose size at his own will,
- * so we just return him the current size chosen
- * at standard selection.
- * TODO: Implement frame scaling?
- */
+ } else if (width <= base_width / 2) {
+
+ /*
+ * The device will skip count units for each
+ * unit sent. This means count is:
+ *
+ * n = (frame width / width) - 1
+ *
+ * And the width is:
+ *
+ * width = frame width / (n + 1)
+ */
+ col_n = div_round_integer(base_width, width) - 1;
+ if (col_n > 0 && col_n <= 255) {
+ col_en = true;
+ col_mode = STK1160_DECIMATE_MORE_THAN_HALF;
+ f->fmt.pix.width = base_width / (col_n + 1);
+ }
+ }
+
+ if (height < base_height && height > base_height / 2) {
+ row_n = div_round_integer(height, base_height - height);
+ if (row_n > 0 && row_n <= 255) {
+ row_en = true;
+ row_mode = STK1160_DECIMATE_LESS_THAN_HALF;
+ f->fmt.pix.height = (base_height * row_n) / (row_n + 1);
+ }
+
+ } else if (height <= base_height / 2) {
+ row_n = div_round_integer(base_height, height) - 1;
+ if (row_n > 0 && row_n <= 255) {
+ row_en = true;
+ row_mode = STK1160_DECIMATE_MORE_THAN_HALF;
+ f->fmt.pix.height = base_height / (row_n + 1);
+ }
+ }
f->fmt.pix.pixelformat = dev->fmt->fourcc;
- f->fmt.pix.width = dev->width;
- f->fmt.pix.height = dev->height;
f->fmt.pix.field = V4L2_FIELD_INTERLACED;
- f->fmt.pix.bytesperline = dev->width * 2;
- f->fmt.pix.sizeimage = dev->height * f->fmt.pix.bytesperline;
+ f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ if (ctrl) {
+ ctrl->col_en = col_en;
+ ctrl->col_n = col_n;
+ ctrl->col_mode = col_mode;
+ ctrl->row_en = row_en;
+ ctrl->row_n = row_n;
+ ctrl->row_mode = row_mode;
+ }
+
+ stk1160_dbg("width %d, height %d\n",
+ f->fmt.pix.width, f->fmt.pix.height);
return 0;
}
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct stk1160 *dev = video_drvdata(file);
+
+ return stk1160_try_fmt(dev, f, NULL);
+}
+
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct stk1160 *dev = video_drvdata(file);
struct vb2_queue *q = &dev->vb_vidq;
+ struct stk1160_decimate_ctrl ctrl;
+ int rc;
if (vb2_is_busy(q))
return -EBUSY;
- vidioc_try_fmt_vid_cap(file, priv, f);
-
- /* We don't support any format changes */
+ rc = stk1160_try_fmt(dev, f, &ctrl);
+ if (rc < 0)
+ return rc;
+ dev->width = f->fmt.pix.width;
+ dev->height = f->fmt.pix.height;
+ stk1160_set_fmt(dev, &ctrl);
return 0;
}
@@ -391,22 +545,15 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
return -ENODEV;
/* We need to set this now, before we call stk1160_set_std */
+ dev->width = 720;
+ dev->height = (norm & V4L2_STD_525_60) ? 480 : 576;
dev->norm = norm;
- /* This is taken from saa7115 video decoder */
- if (dev->norm & V4L2_STD_525_60) {
- dev->width = 720;
- dev->height = 480;
- } else if (dev->norm & V4L2_STD_625_50) {
- dev->width = 720;
- dev->height = 576;
- } else {
- stk1160_err("invalid standard\n");
- return -EINVAL;
- }
-
stk1160_set_std(dev);
+ /* Calling with NULL disables frame decimation */
+ stk1160_set_fmt(dev, NULL);
+
v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_std,
dev->norm);
@@ -538,8 +685,8 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *v4l_fmt,
sizes[0] = size;
- stk1160_info("%s: buffer count %d, each %ld bytes\n",
- __func__, *nbuffers, size);
+ stk1160_dbg("%s: buffer count %d, each %ld bytes\n",
+ __func__, *nbuffers, size);
return 0;
}
@@ -623,8 +770,8 @@ void stk1160_clear_queue(struct stk1160 *dev)
struct stk1160_buffer, list);
list_del(&buf->list);
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
- stk1160_info("buffer [%p/%d] aborted\n",
- buf, buf->vb.v4l2_buf.index);
+ stk1160_dbg("buffer [%p/%d] aborted\n",
+ buf, buf->vb.v4l2_buf.index);
}
/* It's important to release the current buffer */
@@ -633,8 +780,8 @@ void stk1160_clear_queue(struct stk1160 *dev)
dev->isoc_ctl.buf = NULL;
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
- stk1160_info("buffer [%p/%d] aborted\n",
- buf, buf->vb.v4l2_buf.index);
+ stk1160_dbg("buffer [%p/%d] aborted\n",
+ buf, buf->vb.v4l2_buf.index);
}
spin_unlock_irqrestore(&dev->buf_lock, flags);
}
diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
index 39f1aae209bc..940c3eaea507 100644
--- a/drivers/media/usb/stk1160/stk1160-video.c
+++ b/drivers/media/usb/stk1160/stk1160-video.c
@@ -96,9 +96,7 @@ void stk1160_buffer_done(struct stk1160 *dev)
{
struct stk1160_buffer *buf = dev->isoc_ctl.buf;
- dev->field_count++;
-
- buf->vb.v4l2_buf.sequence = dev->field_count >> 1;
+ buf->vb.v4l2_buf.sequence = dev->sequence++;
buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
buf->vb.v4l2_buf.bytesused = buf->bytesused;
v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h
index abdea484c998..72cc8e8cbef7 100644
--- a/drivers/media/usb/stk1160/stk1160.h
+++ b/drivers/media/usb/stk1160/stk1160.h
@@ -58,7 +58,6 @@
* new drivers should use.
*
*/
-#define DEBUG
#ifdef DEBUG
#define stk1160_dbg(fmt, args...) \
printk(KERN_DEBUG "stk1160: " fmt, ## args)
@@ -151,8 +150,7 @@ struct stk1160 {
v4l2_std_id norm; /* current norm */
struct stk1160_fmt *fmt; /* selected format */
- unsigned int field_count; /* not sure ??? */
- enum v4l2_field field; /* also not sure :/ */
+ unsigned int sequence;
/* i2c i/o */
struct i2c_adapter i2c_adap;
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index 322b53a4f1dd..7c3a7c55d969 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -593,14 +593,9 @@ static void ttusb_dec_process_packet(struct ttusb_dec *dec)
static void swap_bytes(u8 *b, int length)
{
- u8 c;
-
length -= length % 2;
- for (; length; b += 2, length -= 2) {
- c = *b;
- *b = *(b + 1);
- *(b + 1) = c;
- }
+ for (; length; b += 2, length -= 2)
+ swap(*b, *(b + 1));
}
static void ttusb_dec_process_urb_frame(struct ttusb_dec *dec, u8 *b,
diff --git a/drivers/media/usb/usbvision/usbvision-core.c b/drivers/media/usb/usbvision/usbvision-core.c
index 7c04ef697fb6..dc3b4d5155c5 100644
--- a/drivers/media/usb/usbvision/usbvision-core.c
+++ b/drivers/media/usb/usbvision/usbvision-core.c
@@ -1367,7 +1367,7 @@ static void usbvision_isoc_irq(struct urb *urb)
int usbvision_read_reg(struct usb_usbvision *usbvision, unsigned char reg)
{
int err_code = 0;
- unsigned char buffer[1];
+ unsigned char *buffer = usbvision->ctrl_urb_buffer;
if (!USBVISION_IS_OPERATIONAL(usbvision))
return -1;
@@ -1401,10 +1401,12 @@ int usbvision_write_reg(struct usb_usbvision *usbvision, unsigned char reg,
if (!USBVISION_IS_OPERATIONAL(usbvision))
return 0;
+ usbvision->ctrl_urb_buffer[0] = value;
err_code = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1),
USBVISION_OP_CODE,
USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_ENDPOINT, 0, (__u16) reg, &value, 1, HZ);
+ USB_RECIP_ENDPOINT, 0, (__u16) reg,
+ usbvision->ctrl_urb_buffer, 1, HZ);
if (err_code < 0) {
dev_err(&usbvision->dev->dev,
@@ -1596,7 +1598,7 @@ static int usbvision_init_webcam(struct usb_usbvision *usbvision)
{ 0x27, 0x00, 0x00 }, { 0x28, 0x00, 0x00 }, { 0x29, 0x00, 0x00 }, { 0x08, 0x80, 0x60 },
{ 0x0f, 0x2d, 0x24 }, { 0x0c, 0x80, 0x80 }
};
- char value[3];
+ unsigned char *value = usbvision->ctrl_urb_buffer;
/* the only difference between PAL and NTSC init_values */
if (usbvision_device_data[usbvision->dev_model].video_norm == V4L2_STD_NTSC)
@@ -1635,8 +1637,8 @@ static int usbvision_init_webcam(struct usb_usbvision *usbvision)
static int usbvision_set_video_format(struct usb_usbvision *usbvision, int format)
{
static const char proc[] = "usbvision_set_video_format";
+ unsigned char *value = usbvision->ctrl_urb_buffer;
int rc;
- unsigned char value[2];
if (!USBVISION_IS_OPERATIONAL(usbvision))
return 0;
@@ -1677,7 +1679,7 @@ int usbvision_set_output(struct usb_usbvision *usbvision, int width,
int err_code = 0;
int usb_width, usb_height;
unsigned int frame_rate = 0, frame_drop = 0;
- unsigned char value[4];
+ unsigned char *value = usbvision->ctrl_urb_buffer;
if (!USBVISION_IS_OPERATIONAL(usbvision))
return 0;
@@ -1789,10 +1791,6 @@ int usbvision_frames_alloc(struct usb_usbvision *usbvision, int number_of_frames
usbvision->num_frames--;
}
- spin_lock_init(&usbvision->queue_lock);
- init_waitqueue_head(&usbvision->wait_frame);
- init_waitqueue_head(&usbvision->wait_stream);
-
/* Allocate all buffers */
for (i = 0; i < usbvision->num_frames; i++) {
usbvision->frame[i].index = i;
@@ -1872,7 +1870,7 @@ static int usbvision_set_compress_params(struct usb_usbvision *usbvision)
{
static const char proc[] = "usbvision_set_compresion_params: ";
int rc;
- unsigned char value[6];
+ unsigned char *value = usbvision->ctrl_urb_buffer;
value[0] = 0x0F; /* Intra-Compression cycle */
value[1] = 0x01; /* Reg.45 one line per strip */
@@ -1946,7 +1944,7 @@ int usbvision_set_input(struct usb_usbvision *usbvision)
{
static const char proc[] = "usbvision_set_input: ";
int rc;
- unsigned char value[8];
+ unsigned char *value = usbvision->ctrl_urb_buffer;
unsigned char dvi_yuv_value;
if (!USBVISION_IS_OPERATIONAL(usbvision))
@@ -2062,8 +2060,8 @@ int usbvision_set_input(struct usb_usbvision *usbvision)
static int usbvision_set_dram_settings(struct usb_usbvision *usbvision)
{
+ unsigned char *value = usbvision->ctrl_urb_buffer;
int rc;
- unsigned char value[8];
if (usbvision->isoc_mode == ISOC_MODE_COMPRESS) {
value[0] = 0x42;
@@ -2161,55 +2159,6 @@ int usbvision_power_on(struct usb_usbvision *usbvision)
/*
- * usbvision timer stuff
- */
-
-/* to call usbvision_power_off from task queue */
-static void call_usbvision_power_off(struct work_struct *work)
-{
- struct usb_usbvision *usbvision = container_of(work, struct usb_usbvision, power_off_work);
-
- PDEBUG(DBG_FUNC, "");
- if (mutex_lock_interruptible(&usbvision->v4l2_lock))
- return;
-
- if (usbvision->user == 0) {
- usbvision_i2c_unregister(usbvision);
-
- usbvision_power_off(usbvision);
- usbvision->initialized = 0;
- }
- mutex_unlock(&usbvision->v4l2_lock);
-}
-
-static void usbvision_power_off_timer(unsigned long data)
-{
- struct usb_usbvision *usbvision = (void *)data;
-
- PDEBUG(DBG_FUNC, "");
- del_timer(&usbvision->power_off_timer);
- INIT_WORK(&usbvision->power_off_work, call_usbvision_power_off);
- (void) schedule_work(&usbvision->power_off_work);
-}
-
-void usbvision_init_power_off_timer(struct usb_usbvision *usbvision)
-{
- setup_timer(&usbvision->power_off_timer, usbvision_power_off_timer,
- (unsigned long)usbvision);
-}
-
-void usbvision_set_power_off_timer(struct usb_usbvision *usbvision)
-{
- mod_timer(&usbvision->power_off_timer, jiffies + USBVISION_POWEROFF_TIME);
-}
-
-void usbvision_reset_power_off_timer(struct usb_usbvision *usbvision)
-{
- if (timer_pending(&usbvision->power_off_timer))
- del_timer(&usbvision->power_off_timer);
-}
-
-/*
* usbvision_begin_streaming()
* Sure you have to put bit 7 to 0, if not incoming frames are droped, but no
* idea about the rest
diff --git a/drivers/media/usb/usbvision/usbvision-i2c.c b/drivers/media/usb/usbvision/usbvision-i2c.c
index 26dbcb1146af..120de2e020e1 100644
--- a/drivers/media/usb/usbvision/usbvision-i2c.c
+++ b/drivers/media/usb/usbvision/usbvision-i2c.c
@@ -343,7 +343,7 @@ static int usbvision_i2c_write_max4(struct usb_usbvision *usbvision,
{
int rc, retries;
int i;
- unsigned char value[6];
+ unsigned char *value = usbvision->ctrl_urb_buffer;
unsigned char ser_cont;
ser_cont = (len & 0x07) | 0x10;
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 1c6d31f7c1b9..b693206f66dd 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -62,6 +62,7 @@
#include <media/saa7115.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
#include <media/tuner.h>
#include <linux/workqueue.h>
@@ -122,8 +123,6 @@ static void usbvision_release(struct usb_usbvision *usbvision);
static int isoc_mode = ISOC_MODE_COMPRESS;
/* Set the default Debug Mode of the device driver */
static int video_debug;
-/* Set the default device to power on at startup */
-static int power_on_at_open = 1;
/* Sequential Number of Video Device */
static int video_nr = -1;
/* Sequential Number of Radio Device */
@@ -134,13 +133,11 @@ static int radio_nr = -1;
/* Showing parameters under SYSFS */
module_param(isoc_mode, int, 0444);
module_param(video_debug, int, 0444);
-module_param(power_on_at_open, int, 0444);
module_param(video_nr, int, 0444);
module_param(radio_nr, int, 0444);
MODULE_PARM_DESC(isoc_mode, " Set the default format for ISOC endpoint. Default: 0x60 (Compression On)");
MODULE_PARM_DESC(video_debug, " Set the default Debug Mode of the device driver. Default: 0 (Off)");
-MODULE_PARM_DESC(power_on_at_open, " Set the default device to power on when device is opened. Default: 1 (On)");
MODULE_PARM_DESC(video_nr, "Set video device number (/dev/videoX). Default: -1 (autodetect)");
MODULE_PARM_DESC(radio_nr, "Set radio device number (/dev/radioX). Default: -1 (autodetect)");
@@ -351,11 +348,14 @@ static int usbvision_v4l2_open(struct file *file)
if (mutex_lock_interruptible(&usbvision->v4l2_lock))
return -ERESTARTSYS;
- usbvision_reset_power_off_timer(usbvision);
- if (usbvision->user)
+ if (usbvision->user) {
err_code = -EBUSY;
- else {
+ } else {
+ err_code = v4l2_fh_open(file);
+ if (err_code)
+ goto unlock;
+
/* Allocate memory for the scratch ring buffer */
err_code = usbvision_scratch_alloc(usbvision);
if (isoc_mode == ISOC_MODE_COMPRESS) {
@@ -372,11 +372,6 @@ static int usbvision_v4l2_open(struct file *file)
/* If so far no errors then we shall start the camera */
if (!err_code) {
- if (usbvision->power == 0) {
- usbvision_power_on(usbvision);
- usbvision_i2c_register(usbvision);
- }
-
/* Send init sequence only once, it's large! */
if (!usbvision->initialized) {
int setup_ok = 0;
@@ -392,18 +387,14 @@ static int usbvision_v4l2_open(struct file *file)
err_code = usbvision_init_isoc(usbvision);
/* device must be initialized before isoc transfer */
usbvision_muxsel(usbvision, 0);
+
+ /* prepare queues */
+ usbvision_empty_framequeues(usbvision);
usbvision->user++;
- } else {
- if (power_on_at_open) {
- usbvision_i2c_unregister(usbvision);
- usbvision_power_off(usbvision);
- usbvision->initialized = 0;
- }
}
}
- /* prepare queues */
- usbvision_empty_framequeues(usbvision);
+unlock:
mutex_unlock(&usbvision->v4l2_lock);
PDEBUG(DBG_IO, "success");
@@ -435,23 +426,16 @@ static int usbvision_v4l2_close(struct file *file)
usbvision_scratch_free(usbvision);
usbvision->user--;
-
- if (power_on_at_open) {
- /* power off in a little while
- to avoid off/on every close/open short sequences */
- usbvision_set_power_off_timer(usbvision);
- usbvision->initialized = 0;
- }
+ mutex_unlock(&usbvision->v4l2_lock);
if (usbvision->remove_pending) {
printk(KERN_INFO "%s: Final disconnect\n", __func__);
usbvision_release(usbvision);
return 0;
}
- mutex_unlock(&usbvision->v4l2_lock);
PDEBUG(DBG_IO, "success");
- return 0;
+ return v4l2_fh_release(file);
}
@@ -503,18 +487,24 @@ static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *vc)
{
struct usb_usbvision *usbvision = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
strlcpy(vc->driver, "USBVision", sizeof(vc->driver));
strlcpy(vc->card,
usbvision_device_data[usbvision->dev_model].model_string,
sizeof(vc->card));
usb_make_path(usbvision->dev, vc->bus_info, sizeof(vc->bus_info));
- vc->device_caps = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_AUDIO |
- V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING |
- (usbvision->have_tuner ? V4L2_CAP_TUNER : 0);
- vc->capabilities = vc->device_caps | V4L2_CAP_DEVICE_CAPS;
+ vc->device_caps = usbvision->have_tuner ? V4L2_CAP_TUNER : 0;
+ if (vdev->vfl_type == VFL_TYPE_GRABBER)
+ vc->device_caps |= V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+ else
+ vc->device_caps |= V4L2_CAP_RADIO;
+
+ vc->capabilities = vc->device_caps | V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
+ if (usbvision_device_data[usbvision->dev_model].radio)
+ vc->capabilities |= V4L2_CAP_RADIO;
return 0;
}
@@ -540,7 +530,6 @@ static int vidioc_enum_input(struct file *file, void *priv,
} else {
strcpy(vi->name, "Television");
vi->type = V4L2_INPUT_TYPE_TUNER;
- vi->audioset = 1;
vi->tuner = chan;
vi->std = USBVISION_NORMS;
}
@@ -551,7 +540,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
strcpy(vi->name, "Green Video Input");
else
strcpy(vi->name, "Composite Video Input");
- vi->std = V4L2_STD_PAL;
+ vi->std = USBVISION_NORMS;
break;
case 2:
vi->type = V4L2_INPUT_TYPE_CAMERA;
@@ -559,12 +548,12 @@ static int vidioc_enum_input(struct file *file, void *priv,
strcpy(vi->name, "Yellow Video Input");
else
strcpy(vi->name, "S-Video Input");
- vi->std = V4L2_STD_PAL;
+ vi->std = USBVISION_NORMS;
break;
case 3:
vi->type = V4L2_INPUT_TYPE_CAMERA;
strcpy(vi->name, "Red Video Input");
- vi->std = V4L2_STD_PAL;
+ vi->std = USBVISION_NORMS;
break;
}
return 0;
@@ -619,14 +608,13 @@ static int vidioc_g_tuner(struct file *file, void *priv,
{
struct usb_usbvision *usbvision = video_drvdata(file);
- if (!usbvision->have_tuner || vt->index) /* Only tuner 0 */
+ if (vt->index) /* Only tuner 0 */
return -EINVAL;
- if (usbvision->radio) {
+ if (vt->type == V4L2_TUNER_RADIO)
strcpy(vt->name, "Radio");
- vt->type = V4L2_TUNER_RADIO;
- } else {
+ else
strcpy(vt->name, "Television");
- }
+
/* Let clients fill in the remainder of this struct */
call_all(usbvision, tuner, g_tuner, vt);
@@ -638,8 +626,8 @@ static int vidioc_s_tuner(struct file *file, void *priv,
{
struct usb_usbvision *usbvision = video_drvdata(file);
- /* Only no or one tuner for now */
- if (!usbvision->have_tuner || vt->index)
+ /* Only one tuner for now */
+ if (vt->index)
return -EINVAL;
/* let clients handle this */
call_all(usbvision, tuner, s_tuner, vt);
@@ -652,12 +640,13 @@ static int vidioc_g_frequency(struct file *file, void *priv,
{
struct usb_usbvision *usbvision = video_drvdata(file);
- freq->tuner = 0; /* Only one tuner */
- if (usbvision->radio)
- freq->type = V4L2_TUNER_RADIO;
+ /* Only one tuner */
+ if (freq->tuner)
+ return -EINVAL;
+ if (freq->type == V4L2_TUNER_RADIO)
+ freq->frequency = usbvision->radio_freq;
else
- freq->type = V4L2_TUNER_ANALOG_TV;
- freq->frequency = usbvision->freq;
+ freq->frequency = usbvision->tv_freq;
return 0;
}
@@ -666,65 +655,19 @@ static int vidioc_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *freq)
{
struct usb_usbvision *usbvision = video_drvdata(file);
+ struct v4l2_frequency new_freq = *freq;
- /* Only no or one tuner for now */
- if (!usbvision->have_tuner || freq->tuner)
+ /* Only one tuner for now */
+ if (freq->tuner)
return -EINVAL;
- usbvision->freq = freq->frequency;
call_all(usbvision, tuner, s_frequency, freq);
-
- return 0;
-}
-
-static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
-
- if (usbvision->radio)
- strcpy(a->name, "Radio");
+ call_all(usbvision, tuner, g_frequency, &new_freq);
+ if (freq->type == V4L2_TUNER_RADIO)
+ usbvision->radio_freq = new_freq.frequency;
else
- strcpy(a->name, "TV");
-
- return 0;
-}
+ usbvision->tv_freq = new_freq.frequency;
-static int vidioc_s_audio(struct file *file, void *fh,
- const struct v4l2_audio *a)
-{
- if (a->index)
- return -EINVAL;
- return 0;
-}
-
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *ctrl)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
-
- call_all(usbvision, core, queryctrl, ctrl);
-
- if (!ctrl->type)
- return -EINVAL;
-
- return 0;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
-
- call_all(usbvision, core, g_ctrl, ctrl);
- return 0;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
-
- call_all(usbvision, core, s_ctrl, ctrl);
return 0;
}
@@ -937,6 +880,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
vf->fmt.pix.bytesperline = vf->fmt.pix.width*
usbvision->palette.bytes_per_pixel;
vf->fmt.pix.sizeimage = vf->fmt.pix.bytesperline*vf->fmt.pix.height;
+ vf->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ vf->fmt.pix.field = V4L2_FIELD_NONE; /* Always progressive image */
return 0;
}
@@ -1167,20 +1112,15 @@ static int usbvision_radio_open(struct file *file)
if (mutex_lock_interruptible(&usbvision->v4l2_lock))
return -ERESTARTSYS;
+ err_code = v4l2_fh_open(file);
+ if (err_code)
+ goto out;
if (usbvision->user) {
dev_err(&usbvision->rdev.dev,
"%s: Someone tried to open an already opened USBVision Radio!\n",
__func__);
err_code = -EBUSY;
} else {
- if (power_on_at_open) {
- usbvision_reset_power_off_timer(usbvision);
- if (usbvision->power == 0) {
- usbvision_power_on(usbvision);
- usbvision_i2c_register(usbvision);
- }
- }
-
/* Alternate interface 1 is is the biggest frame size */
err_code = usbvision_set_alternate(usbvision);
if (err_code < 0) {
@@ -1195,14 +1135,6 @@ static int usbvision_radio_open(struct file *file)
usbvision_set_audio(usbvision, USBVISION_AUDIO_RADIO);
usbvision->user++;
}
-
- if (err_code) {
- if (power_on_at_open) {
- usbvision_i2c_unregister(usbvision);
- usbvision_power_off(usbvision);
- usbvision->initialized = 0;
- }
- }
out:
mutex_unlock(&usbvision->v4l2_lock);
return err_code;
@@ -1212,34 +1144,29 @@ out:
static int usbvision_radio_close(struct file *file)
{
struct usb_usbvision *usbvision = video_drvdata(file);
- int err_code = 0;
PDEBUG(DBG_IO, "");
mutex_lock(&usbvision->v4l2_lock);
/* Set packet size to 0 */
usbvision->iface_alt = 0;
- err_code = usb_set_interface(usbvision->dev, usbvision->iface,
+ usb_set_interface(usbvision->dev, usbvision->iface,
usbvision->iface_alt);
usbvision_audio_off(usbvision);
usbvision->radio = 0;
usbvision->user--;
- if (power_on_at_open) {
- usbvision_set_power_off_timer(usbvision);
- usbvision->initialized = 0;
- }
-
if (usbvision->remove_pending) {
printk(KERN_INFO "%s: Final disconnect\n", __func__);
+ v4l2_fh_release(file);
usbvision_release(usbvision);
- return err_code;
+ return 0;
}
mutex_unlock(&usbvision->v4l2_lock);
PDEBUG(DBG_IO, "success");
- return err_code;
+ return v4l2_fh_release(file);
}
/* Video registration stuff */
@@ -1252,7 +1179,6 @@ static const struct v4l2_file_operations usbvision_fops = {
.read = usbvision_v4l2_read,
.mmap = usbvision_v4l2_mmap,
.unlocked_ioctl = video_ioctl2,
-/* .poll = video_poll, */
};
static const struct v4l2_ioctl_ops usbvision_ioctl_ops = {
@@ -1270,17 +1196,15 @@ static const struct v4l2_ioctl_ops usbvision_ioctl_ops = {
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_audio = vidioc_g_audio,
- .vidioc_s_audio = vidioc_s_audio,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = vidioc_g_register,
.vidioc_s_register = vidioc_s_register,
@@ -1301,23 +1225,19 @@ static const struct v4l2_file_operations usbvision_radio_fops = {
.owner = THIS_MODULE,
.open = usbvision_radio_open,
.release = usbvision_radio_close,
+ .poll = v4l2_ctrl_poll,
.unlocked_ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops usbvision_radio_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_audio = vidioc_g_audio,
- .vidioc_s_audio = vidioc_s_audio,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static struct video_device usbvision_radio_template = {
@@ -1369,9 +1289,17 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
/* register video4linux devices */
static int usbvision_register_video(struct usb_usbvision *usbvision)
{
+ int res = -ENOMEM;
+
/* Video Device: */
usbvision_vdev_init(usbvision, &usbvision->vdev,
&usbvision_video_template, "USBVision Video");
+ if (!usbvision->have_tuner) {
+ v4l2_disable_ioctl(&usbvision->vdev, VIDIOC_G_FREQUENCY);
+ v4l2_disable_ioctl(&usbvision->vdev, VIDIOC_S_TUNER);
+ v4l2_disable_ioctl(&usbvision->vdev, VIDIOC_G_FREQUENCY);
+ v4l2_disable_ioctl(&usbvision->vdev, VIDIOC_S_TUNER);
+ }
if (video_register_device(&usbvision->vdev, VFL_TYPE_GRABBER, video_nr) < 0)
goto err_exit;
printk(KERN_INFO "USBVision[%d]: registered USBVision Video device %s [v4l2]\n",
@@ -1395,7 +1323,7 @@ static int usbvision_register_video(struct usb_usbvision *usbvision)
"USBVision[%d]: video_register_device() failed\n",
usbvision->nr);
usbvision_unregister_video(usbvision);
- return -1;
+ return res;
}
/*
@@ -1420,6 +1348,9 @@ static struct usb_usbvision *usbvision_alloc(struct usb_device *dev,
if (v4l2_device_register(&intf->dev, &usbvision->v4l2_dev))
goto err_free;
+ if (v4l2_ctrl_handler_init(&usbvision->hdl, 4))
+ goto err_unreg;
+ usbvision->v4l2_dev.ctrl_handler = &usbvision->hdl;
mutex_init(&usbvision->v4l2_lock);
/* prepare control urb for control messages during interrupts */
@@ -1428,11 +1359,10 @@ static struct usb_usbvision *usbvision_alloc(struct usb_device *dev,
goto err_unreg;
init_waitqueue_head(&usbvision->ctrl_urb_wq);
- usbvision_init_power_off_timer(usbvision);
-
return usbvision;
err_unreg:
+ v4l2_ctrl_handler_free(&usbvision->hdl);
v4l2_device_unregister(&usbvision->v4l2_dev);
err_free:
kfree(usbvision);
@@ -1450,8 +1380,6 @@ static void usbvision_release(struct usb_usbvision *usbvision)
{
PDEBUG(DBG_PROBE, "");
- usbvision_reset_power_off_timer(usbvision);
-
usbvision->initialized = 0;
usbvision_remove_sysfs(&usbvision->vdev);
@@ -1460,6 +1388,7 @@ static void usbvision_release(struct usb_usbvision *usbvision)
usb_free_urb(usbvision->ctrl_urb);
+ v4l2_ctrl_handler_free(&usbvision->hdl);
v4l2_device_unregister(&usbvision->v4l2_dev);
kfree(usbvision);
@@ -1487,19 +1416,18 @@ static void usbvision_configure_video(struct usb_usbvision *usbvision)
}
usbvision->tvnorm_id = usbvision_device_data[model].video_norm;
-
usbvision->video_inputs = usbvision_device_data[model].video_channels;
usbvision->ctl_input = 0;
+ usbvision->radio_freq = 87.5 * 16000;
+ usbvision->tv_freq = 400 * 16;
/* This should be here to make i2c clients to be able to register */
/* first switch off audio */
if (usbvision_device_data[model].audio_channels > 0)
usbvision_audio_off(usbvision);
- if (!power_on_at_open) {
- /* and then power up the noisy tuner */
- usbvision_power_on(usbvision);
- usbvision_i2c_register(usbvision);
- }
+ /* and then power up the tuner */
+ usbvision_power_on(usbvision);
+ usbvision_i2c_register(usbvision);
}
/*
@@ -1592,6 +1520,10 @@ static int usbvision_probe(struct usb_interface *intf,
usbvision->nr = usbvision_nr++;
+ spin_lock_init(&usbvision->queue_lock);
+ init_waitqueue_head(&usbvision->wait_frame);
+ init_waitqueue_head(&usbvision->wait_stream);
+
usbvision->have_tuner = usbvision_device_data[model].tuner;
if (usbvision->have_tuner)
usbvision->tuner_type = usbvision_device_data[model].tuner_type;
@@ -1646,11 +1578,7 @@ static void usbvision_disconnect(struct usb_interface *intf)
usbvision_stop_isoc(usbvision);
v4l2_device_disconnect(&usbvision->v4l2_dev);
-
- if (usbvision->power) {
- usbvision_i2c_unregister(usbvision);
- usbvision_power_off(usbvision);
- }
+ usbvision_i2c_unregister(usbvision);
usbvision->remove_pending = 1; /* Now all ISO data will be ignored */
usb_put_dev(usbvision->dev);
diff --git a/drivers/media/usb/usbvision/usbvision.h b/drivers/media/usb/usbvision/usbvision.h
index 140a1f67566e..4f2e4fde38f2 100644
--- a/drivers/media/usb/usbvision/usbvision.h
+++ b/drivers/media/usb/usbvision/usbvision.h
@@ -36,6 +36,7 @@
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
#include <media/tuner.h>
#include <linux/videodev2.h>
@@ -357,6 +358,7 @@ extern struct usb_device_id usbvision_table[];
struct usb_usbvision {
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler hdl;
struct video_device vdev; /* Video Device */
struct video_device rdev; /* Radio Device */
@@ -376,7 +378,8 @@ struct usb_usbvision {
int bridge_type; /* NT1003, NT1004, NT1005 */
int radio;
int video_inputs; /* # of inputs */
- unsigned long freq;
+ unsigned long radio_freq;
+ unsigned long tv_freq;
int audio_mute;
int audio_channel;
int isoc_mode; /* format of video data for the usb isoc-transfer */
@@ -391,8 +394,6 @@ struct usb_usbvision {
unsigned char iface_alt; /* Alt settings */
unsigned char vin_reg2_preset;
struct mutex v4l2_lock;
- struct timer_list power_off_timer;
- struct work_struct power_off_work;
int power; /* is the device powered on? */
int user; /* user count for exclusive use */
int initialized; /* Had we already sent init sequence? */
@@ -510,9 +511,6 @@ int usbvision_muxsel(struct usb_usbvision *usbvision, int channel);
int usbvision_set_input(struct usb_usbvision *usbvision);
int usbvision_set_output(struct usb_usbvision *usbvision, int width, int height);
-void usbvision_init_power_off_timer(struct usb_usbvision *usbvision);
-void usbvision_set_power_off_timer(struct usb_usbvision *usbvision);
-void usbvision_reset_power_off_timer(struct usb_usbvision *usbvision);
int usbvision_power_off(struct usb_usbvision *usbvision);
int usbvision_power_on(struct usb_usbvision *usbvision);
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index b4b022933e29..82876a67f144 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -84,6 +84,7 @@ config VIDEOBUF2_CORE
config VIDEOBUF2_MEMOPS
tristate
+ select FRAME_VECTOR
config VIDEOBUF2_DMA_CONTIG
tristate
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index dc3de00d68b5..d1dd440d9d9b 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -13,6 +13,9 @@ endif
ifeq ($(CONFIG_OF),y)
videodev-objs += v4l2-of.o
endif
+ifeq ($(CONFIG_TRACEPOINTS),y)
+ videodev-objs += v4l2-trace.o
+endif
obj-$(CONFIG_VIDEO_V4L2) += videodev.o
obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index abdcffabcb59..581e21ad6801 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -1366,7 +1366,6 @@ MODULE_DEVICE_TABLE(i2c, tuner_id);
static struct i2c_driver tuner_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "tuner",
.pm = &tuner_pm_ops,
},
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index e3a3468002e6..b6b7dcc1b77d 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1678,21 +1678,6 @@ static int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new)
unsigned idx;
int err = 0;
- if (!ctrl->is_ptr) {
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_INTEGER:
- case V4L2_CTRL_TYPE_INTEGER_MENU:
- case V4L2_CTRL_TYPE_MENU:
- case V4L2_CTRL_TYPE_BITMASK:
- case V4L2_CTRL_TYPE_BOOLEAN:
- case V4L2_CTRL_TYPE_BUTTON:
- case V4L2_CTRL_TYPE_CTRL_CLASS:
- case V4L2_CTRL_TYPE_INTEGER64:
- return ctrl->type_ops->validate(ctrl, 0, p_new);
- default:
- break;
- }
- }
for (idx = 0; !err && idx < ctrl->elems; idx++)
err = ctrl->type_ops->validate(ctrl, idx, p_new);
return err;
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 04dc71e3ebf0..6a83d6191684 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -256,6 +256,7 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
{
const struct v4l2_bt_timings *bt = &t->bt;
u32 htot, vtot;
+ u32 fps;
if (t->type != V4L2_DV_BT_656_1120)
return;
@@ -265,13 +266,15 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
if (bt->interlaced)
vtot /= 2;
+ fps = (htot * vtot) > 0 ? div_u64((100 * (u64)bt->pixelclock),
+ (htot * vtot)) : 0;
+
if (prefix == NULL)
prefix = "";
- pr_info("%s: %s%ux%u%s%u (%ux%u)\n", dev_prefix, prefix,
+ pr_info("%s: %s%ux%u%s%u.%u (%ux%u)\n", dev_prefix, prefix,
bt->width, bt->height, bt->interlaced ? "i" : "p",
- (htot * vtot) > 0 ? ((u32)bt->pixelclock / (htot * vtot)) : 0,
- htot, vtot);
+ fps / 100, fps % 100, htot, vtot);
if (!detailed)
return;
@@ -290,9 +293,11 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
(bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
bt->il_vsync, bt->il_vbackporch);
pr_info("%s: pixelclock: %llu\n", dev_prefix, bt->pixelclock);
- pr_info("%s: flags (0x%x):%s%s%s%s%s\n", dev_prefix, bt->flags,
+ pr_info("%s: flags (0x%x):%s%s%s%s%s%s\n", dev_prefix, bt->flags,
(bt->flags & V4L2_DV_FL_REDUCED_BLANKING) ?
" REDUCED_BLANKING" : "",
+ ((bt->flags & V4L2_DV_FL_REDUCED_BLANKING) &&
+ bt->vsync == 8) ? " (V2)" : "",
(bt->flags & V4L2_DV_FL_CAN_REDUCE_FPS) ?
" CAN_REDUCE_FPS" : "",
(bt->flags & V4L2_DV_FL_REDUCED_FPS) ?
@@ -316,6 +321,7 @@ EXPORT_SYMBOL_GPL(v4l2_print_dv_timings);
*/
#define CVT_PXL_CLK_GRAN 250000 /* pixel clock granularity */
+#define CVT_PXL_CLK_GRAN_RB_V2 1000 /* granularity for reduced blanking v2*/
/* Normal blanking */
#define CVT_MIN_V_BPORCH 7 /* lines */
@@ -335,15 +341,22 @@ EXPORT_SYMBOL_GPL(v4l2_print_dv_timings);
/* Reduced Blanking */
#define CVT_RB_MIN_V_BPORCH 7 /* lines */
#define CVT_RB_V_FPORCH 3 /* lines */
-#define CVT_RB_MIN_V_BLANK 460 /* us */
+#define CVT_RB_MIN_V_BLANK 460 /* us */
#define CVT_RB_H_SYNC 32 /* pixels */
-#define CVT_RB_H_BPORCH 80 /* pixels */
#define CVT_RB_H_BLANK 160 /* pixels */
+/* Reduce blanking Version 2 */
+#define CVT_RB_V2_H_BLANK 80 /* pixels */
+#define CVT_RB_MIN_V_FPORCH 3 /* lines */
+#define CVT_RB_V2_MIN_V_FPORCH 1 /* lines */
+#define CVT_RB_V_BPORCH 6 /* lines */
/** v4l2_detect_cvt - detect if the given timings follow the CVT standard
* @frame_height - the total height of the frame (including blanking) in lines.
* @hfreq - the horizontal frequency in Hz.
* @vsync - the height of the vertical sync in lines.
+ * @active_width - active width of image (does not include blanking). This
+ * information is needed only in case of version 2 of reduced blanking.
+ * In other cases, this parameter does not have any effect on timings.
* @polarities - the horizontal and vertical polarities (same as struct
* v4l2_bt_timings polarities).
* @interlaced - if this flag is true, it indicates interlaced format
@@ -352,20 +365,22 @@ EXPORT_SYMBOL_GPL(v4l2_print_dv_timings);
* This function will attempt to detect if the given values correspond to a
* valid CVT format. If so, then it will return true, and fmt will be filled
* in with the found CVT timings.
- *
- * TODO: VESA defined a new version 2 of their reduced blanking
- * formula. Support for that is currently missing in this CVT
- * detection function.
*/
-bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
- u32 polarities, bool interlaced, struct v4l2_dv_timings *fmt)
+bool v4l2_detect_cvt(unsigned frame_height,
+ unsigned hfreq,
+ unsigned vsync,
+ unsigned active_width,
+ u32 polarities,
+ bool interlaced,
+ struct v4l2_dv_timings *fmt)
{
int v_fp, v_bp, h_fp, h_bp, hsync;
int frame_width, image_height, image_width;
bool reduced_blanking;
+ bool rb_v2 = false;
unsigned pix_clk;
- if (vsync < 4 || vsync > 7)
+ if (vsync < 4 || vsync > 8)
return false;
if (polarities == V4L2_DV_VSYNC_POS_POL)
@@ -375,17 +390,35 @@ bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
else
return false;
+ if (reduced_blanking && vsync == 8)
+ rb_v2 = true;
+
+ if (rb_v2 && active_width == 0)
+ return false;
+
+ if (!rb_v2 && vsync > 7)
+ return false;
+
if (hfreq == 0)
return false;
/* Vertical */
if (reduced_blanking) {
- v_fp = CVT_RB_V_FPORCH;
- v_bp = (CVT_RB_MIN_V_BLANK * hfreq) / 1000000 + 1;
- v_bp -= vsync + v_fp;
-
- if (v_bp < CVT_RB_MIN_V_BPORCH)
- v_bp = CVT_RB_MIN_V_BPORCH;
+ if (rb_v2) {
+ v_bp = CVT_RB_V_BPORCH;
+ v_fp = (CVT_RB_MIN_V_BLANK * hfreq) / 1000000 + 1;
+ v_fp -= vsync + v_bp;
+
+ if (v_fp < CVT_RB_V2_MIN_V_FPORCH)
+ v_fp = CVT_RB_V2_MIN_V_FPORCH;
+ } else {
+ v_fp = CVT_RB_V_FPORCH;
+ v_bp = (CVT_RB_MIN_V_BLANK * hfreq) / 1000000 + 1;
+ v_bp -= vsync + v_fp;
+
+ if (v_bp < CVT_RB_MIN_V_BPORCH)
+ v_bp = CVT_RB_MIN_V_BPORCH;
+ }
} else {
v_fp = CVT_MIN_V_PORCH_RND;
v_bp = (CVT_MIN_VSYNC_BP * hfreq) / 1000000 + 1 - vsync;
@@ -422,22 +455,32 @@ bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
else
return false;
break;
+ case 8:
+ image_width = active_width;
+ break;
default:
return false;
}
- image_width = image_width & ~7;
+ if (!rb_v2)
+ image_width = image_width & ~7;
/* Horizontal */
if (reduced_blanking) {
- pix_clk = (image_width + CVT_RB_H_BLANK) * hfreq;
- pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN;
+ int h_blank;
+ int clk_gran;
- h_bp = CVT_RB_H_BPORCH;
+ h_blank = rb_v2 ? CVT_RB_V2_H_BLANK : CVT_RB_H_BLANK;
+ clk_gran = rb_v2 ? CVT_PXL_CLK_GRAN_RB_V2 : CVT_PXL_CLK_GRAN;
+
+ pix_clk = (image_width + h_blank) * hfreq;
+ pix_clk = (pix_clk / clk_gran) * clk_gran;
+
+ h_bp = h_blank / 2;
hsync = CVT_RB_H_SYNC;
- h_fp = CVT_RB_H_BLANK - h_bp - hsync;
+ h_fp = h_blank - h_bp - hsync;
- frame_width = image_width + CVT_RB_H_BLANK;
+ frame_width = image_width + h_blank;
} else {
unsigned ideal_duty_cycle_per_myriad =
100 * CVT_C_PRIME - (CVT_M_PRIME * 100000) / hfreq;
@@ -665,7 +708,6 @@ EXPORT_SYMBOL_GPL(v4l2_detect_gtf);
struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
{
struct v4l2_fract aspect = { 16, 9 };
- u32 tmp;
u8 ratio;
/* Nothing filled in, fallback to 16:9 */
@@ -697,9 +739,7 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
if (hor_landscape)
return aspect;
/* The aspect ratio is for portrait, so swap numerator and denominator */
- tmp = aspect.denominator;
- aspect.denominator = aspect.numerator;
- aspect.numerator = tmp;
+ swap(aspect.denominator, aspect.numerator);
return aspect;
}
EXPORT_SYMBOL_GPL(v4l2_calc_aspect_ratio);
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 8761aab99de9..8d3171c6bee8 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -172,6 +172,9 @@ void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
unsigned long flags;
struct timespec timestamp;
+ if (vdev == NULL)
+ return;
+
ktime_get_ts(&timestamp);
spin_lock_irqsave(&vdev->fh_lock, flags);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 85de4557f696..4a384fc765b8 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -28,7 +28,6 @@
#include <media/v4l2-device.h>
#include <media/videobuf2-core.h>
-#define CREATE_TRACE_POINTS
#include <trace/events/v4l2.h>
/* Zero out the end of the struct pointed to by p. Everything after, but
@@ -1025,8 +1024,9 @@ static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
* Drivers MUST fill in device_caps, so check for this and
* warn if it was forgotten.
*/
- WARN_ON(!(cap->capabilities & V4L2_CAP_DEVICE_CAPS) ||
- !cap->device_caps);
+ WARN(!(cap->capabilities & V4L2_CAP_DEVICE_CAPS) ||
+ !cap->device_caps, "Bad caps for driver %s, %x %x",
+ cap->driver, cap->capabilities, cap->device_caps);
cap->device_caps |= V4L2_CAP_EXT_PIX_FORMAT;
return ret;
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index dc853e57f91f..ec3ad4eb0c57 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -357,9 +357,16 @@ int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_requestbuffers *reqbufs)
{
struct vb2_queue *vq;
+ int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
- return vb2_reqbufs(vq, reqbufs);
+ ret = vb2_reqbufs(vq, reqbufs);
+ /* If count == 0, then the owner has released all buffers and he
+ is no longer owner of the queue. Otherwise we have an owner. */
+ if (ret == 0)
+ vq->owner = reqbufs->count ? file->private_data : NULL;
+
+ return ret;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
@@ -874,18 +881,8 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
{
struct v4l2_fh *fh = file->private_data;
- struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
- int ret;
-
- if (m2m_ctx->q_lock && mutex_lock_interruptible(m2m_ctx->q_lock))
- return -ERESTARTSYS;
- ret = v4l2_m2m_mmap(file, m2m_ctx, vma);
-
- if (m2m_ctx->q_lock)
- mutex_unlock(m2m_ctx->q_lock);
-
- return ret;
+ return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 63596063b213..83615b8fb46a 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -588,3 +588,21 @@ void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
#endif
}
EXPORT_SYMBOL(v4l2_subdev_init);
+
+/**
+ * v4l2_subdev_notify_event() - Delivers event notification for subdevice
+ * @sd: The subdev for which to deliver the event
+ * @ev: The event to deliver
+ *
+ * Will deliver the specified event to all userspace event listeners which are
+ * subscribed to the v42l subdev event queue as well as to the bridge driver
+ * using the notify callback. The notification type for the notify callback
+ * will be V4L2_DEVICE_NOTIFY_EVENT.
+ */
+void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
+ const struct v4l2_event *ev)
+{
+ v4l2_event_queue(sd->devnode, ev);
+ v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev);
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event);
diff --git a/drivers/media/v4l2-core/v4l2-trace.c b/drivers/media/v4l2-core/v4l2-trace.c
new file mode 100644
index 000000000000..ae10b0248c8e
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-trace.c
@@ -0,0 +1,11 @@
+#include <media/v4l2-common.h>
+#include <media/v4l2-fh.h>
+#include <media/videobuf2-core.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/v4l2.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_buf_done);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_buf_queue);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_dqbuf);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_qbuf);
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 93b315459098..4f59b7ec05d0 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -30,6 +30,8 @@
#include <media/v4l2-common.h>
#include <media/videobuf2-core.h>
+#include <trace/events/v4l2.h>
+
static int debug;
module_param(debug, int, 0644);
@@ -715,6 +717,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
break;
case VB2_BUF_STATE_PREPARING:
case VB2_BUF_STATE_DEQUEUED:
+ case VB2_BUF_STATE_REQUEUEING:
/* nothing */
break;
}
@@ -1182,7 +1185,8 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
if (WARN_ON(state != VB2_BUF_STATE_DONE &&
state != VB2_BUF_STATE_ERROR &&
- state != VB2_BUF_STATE_QUEUED))
+ state != VB2_BUF_STATE_QUEUED &&
+ state != VB2_BUF_STATE_REQUEUEING))
state = VB2_BUF_STATE_ERROR;
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -1199,22 +1203,32 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
for (plane = 0; plane < vb->num_planes; ++plane)
call_void_memop(vb, finish, vb->planes[plane].mem_priv);
- /* Add the buffer to the done buffers list */
spin_lock_irqsave(&q->done_lock, flags);
- vb->state = state;
- if (state != VB2_BUF_STATE_QUEUED)
+ if (state == VB2_BUF_STATE_QUEUED ||
+ state == VB2_BUF_STATE_REQUEUEING) {
+ vb->state = VB2_BUF_STATE_QUEUED;
+ } else {
+ /* Add the buffer to the done buffers list */
list_add_tail(&vb->done_entry, &q->done_list);
+ vb->state = state;
+ }
atomic_dec(&q->owned_by_drv_count);
spin_unlock_irqrestore(&q->done_lock, flags);
- if (state == VB2_BUF_STATE_QUEUED) {
+ trace_vb2_buf_done(q, vb);
+
+ switch (state) {
+ case VB2_BUF_STATE_QUEUED:
+ return;
+ case VB2_BUF_STATE_REQUEUEING:
if (q->start_streaming_called)
__enqueue_in_driver(vb);
return;
+ default:
+ /* Inform any processes that may be waiting for buffers */
+ wake_up(&q->done_wq);
+ break;
}
-
- /* Inform any processes that may be waiting for buffers */
- wake_up(&q->done_wq);
}
EXPORT_SYMBOL_GPL(vb2_buffer_done);
@@ -1244,19 +1258,19 @@ EXPORT_SYMBOL_GPL(vb2_discard_done);
static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
{
- static bool __check_once __read_mostly;
+ static bool check_once;
- if (__check_once)
+ if (check_once)
return;
- __check_once = true;
- __WARN();
+ check_once = true;
+ WARN_ON(1);
- pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n");
+ pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
if (vb->vb2_queue->allow_zero_bytesused)
- pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
+ pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
else
- pr_warn_once("use the actual size instead.\n");
+ pr_warn("use the actual size instead.\n");
}
/**
@@ -1629,6 +1643,8 @@ static void __enqueue_in_driver(struct vb2_buffer *vb)
vb->state = VB2_BUF_STATE_ACTIVE;
atomic_inc(&q->owned_by_drv_count);
+ trace_vb2_buf_queue(q, vb);
+
/* sync buffers */
for (plane = 0; plane < vb->num_planes; ++plane)
call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
@@ -1675,9 +1691,7 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
ret = __qbuf_mmap(vb, b);
break;
case V4L2_MEMORY_USERPTR:
- down_read(&current->mm->mmap_sem);
ret = __qbuf_userptr(vb, b);
- up_read(&current->mm->mmap_sem);
break;
case V4L2_MEMORY_DMABUF:
ret = __qbuf_dmabuf(vb, b);
@@ -1878,6 +1892,8 @@ static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
vb->v4l2_buf.timecode = b->timecode;
}
+ trace_vb2_qbuf(q, vb);
+
/*
* If already streaming, give the buffer to driver for processing.
* If not, the buffer will be given to driver on next streamon.
@@ -2123,6 +2139,9 @@ static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool n
/* Remove from videobuf queue */
list_del(&vb->queued_entry);
q->queued_count--;
+
+ trace_vb2_dqbuf(q, vb);
+
if (!V4L2_TYPE_IS_OUTPUT(q->type) &&
vb->v4l2_buf.flags & V4L2_BUF_FLAG_LAST)
q->last_buffer_dequeued = true;
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 94c1e6455d36..2397ceb1dc6b 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -32,15 +32,13 @@ struct vb2_dc_buf {
dma_addr_t dma_addr;
enum dma_data_direction dma_dir;
struct sg_table *dma_sgt;
+ struct frame_vector *vec;
/* MMAP related */
struct vb2_vmarea_handler handler;
atomic_t refcount;
struct sg_table *sgt_base;
- /* USERPTR related */
- struct vm_area_struct *vma;
-
/* DMABUF related */
struct dma_buf_attachment *db_attach;
};
@@ -49,24 +47,6 @@ struct vb2_dc_buf {
/* scatterlist table functions */
/*********************************************/
-
-static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
- void (*cb)(struct page *pg))
-{
- struct scatterlist *s;
- unsigned int i;
-
- for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
- struct page *page = sg_page(s);
- unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
- >> PAGE_SHIFT;
- unsigned int j;
-
- for (j = 0; j < n_pages; ++j, ++page)
- cb(page);
- }
-}
-
static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
{
struct scatterlist *s;
@@ -429,92 +409,12 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
/* callbacks for USERPTR buffers */
/*********************************************/
-static inline int vma_is_io(struct vm_area_struct *vma)
-{
- return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
-}
-
-static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
- struct vm_area_struct *vma, unsigned long *res)
-{
- unsigned long pfn, start_pfn, prev_pfn;
- unsigned int i;
- int ret;
-
- if (!vma_is_io(vma))
- return -EFAULT;
-
- ret = follow_pfn(vma, start, &pfn);
- if (ret)
- return ret;
-
- start_pfn = pfn;
- start += PAGE_SIZE;
-
- for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
- prev_pfn = pfn;
- ret = follow_pfn(vma, start, &pfn);
-
- if (ret) {
- pr_err("no page for address %lu\n", start);
- return ret;
- }
- if (pfn != prev_pfn + 1)
- return -EINVAL;
- }
-
- *res = start_pfn;
- return 0;
-}
-
-static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
- int n_pages, struct vm_area_struct *vma,
- enum dma_data_direction dma_dir)
-{
- if (vma_is_io(vma)) {
- unsigned int i;
-
- for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
- unsigned long pfn;
- int ret = follow_pfn(vma, start, &pfn);
-
- if (!pfn_valid(pfn))
- return -EINVAL;
-
- if (ret) {
- pr_err("no page for address %lu\n", start);
- return ret;
- }
- pages[i] = pfn_to_page(pfn);
- }
- } else {
- int n;
-
- n = get_user_pages(current, current->mm, start & PAGE_MASK,
- n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL);
- /* negative error means that no page was pinned */
- n = max(n, 0);
- if (n != n_pages) {
- pr_err("got only %d of %d user pages\n", n, n_pages);
- while (n)
- put_page(pages[--n]);
- return -EFAULT;
- }
- }
-
- return 0;
-}
-
-static void vb2_dc_put_dirty_page(struct page *page)
-{
- set_page_dirty_lock(page);
- put_page(page);
-}
-
static void vb2_dc_put_userptr(void *buf_priv)
{
struct vb2_dc_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
+ int i;
+ struct page **pages;
if (sgt) {
DEFINE_DMA_ATTRS(attrs);
@@ -526,13 +426,15 @@ static void vb2_dc_put_userptr(void *buf_priv)
*/
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
buf->dma_dir, &attrs);
- if (!vma_is_io(buf->vma))
- vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
-
+ pages = frame_vector_pages(buf->vec);
+ /* sgt should exist only if vector contains pages... */
+ BUG_ON(IS_ERR(pages));
+ for (i = 0; i < frame_vector_count(buf->vec); i++)
+ set_page_dirty_lock(pages[i]);
sg_free_table(sgt);
kfree(sgt);
}
- vb2_put_vma(buf->vma);
+ vb2_destroy_framevec(buf->vec);
kfree(buf);
}
@@ -572,13 +474,10 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
{
struct vb2_dc_conf *conf = alloc_ctx;
struct vb2_dc_buf *buf;
- unsigned long start;
- unsigned long end;
+ struct frame_vector *vec;
unsigned long offset;
- struct page **pages;
- int n_pages;
+ int n_pages, i;
int ret = 0;
- struct vm_area_struct *vma;
struct sg_table *sgt;
unsigned long contig_size;
unsigned long dma_align = dma_get_cache_alignment();
@@ -604,72 +503,43 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
buf->dev = conf->dev;
buf->dma_dir = dma_dir;
- start = vaddr & PAGE_MASK;
offset = vaddr & ~PAGE_MASK;
- end = PAGE_ALIGN(vaddr + size);
- n_pages = (end - start) >> PAGE_SHIFT;
-
- pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
- if (!pages) {
- ret = -ENOMEM;
- pr_err("failed to allocate pages table\n");
+ vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
+ if (IS_ERR(vec)) {
+ ret = PTR_ERR(vec);
goto fail_buf;
}
+ buf->vec = vec;
+ n_pages = frame_vector_count(vec);
+ ret = frame_vector_to_pages(vec);
+ if (ret < 0) {
+ unsigned long *nums = frame_vector_pfns(vec);
- /* current->mm->mmap_sem is taken by videobuf2 core */
- vma = find_vma(current->mm, vaddr);
- if (!vma) {
- pr_err("no vma for address %lu\n", vaddr);
- ret = -EFAULT;
- goto fail_pages;
- }
-
- if (vma->vm_end < vaddr + size) {
- pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
- ret = -EFAULT;
- goto fail_pages;
- }
-
- buf->vma = vb2_get_vma(vma);
- if (!buf->vma) {
- pr_err("failed to copy vma\n");
- ret = -ENOMEM;
- goto fail_pages;
- }
-
- /* extract page list from userspace mapping */
- ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, dma_dir);
- if (ret) {
- unsigned long pfn;
- if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
- buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
- buf->size = size;
- kfree(pages);
- return buf;
- }
-
- pr_err("failed to get user pages\n");
- goto fail_vma;
+ /*
+ * Failed to convert to pages... Check the memory is physically
+ * contiguous and use direct mapping
+ */
+ for (i = 1; i < n_pages; i++)
+ if (nums[i-1] + 1 != nums[i])
+ goto fail_pfnvec;
+ buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
+ goto out;
}
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
pr_err("failed to allocate sg table\n");
ret = -ENOMEM;
- goto fail_get_user_pages;
+ goto fail_pfnvec;
}
- ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
+ ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
offset, size, GFP_KERNEL);
if (ret) {
pr_err("failed to initialize sg table\n");
goto fail_sgt;
}
- /* pages are no longer needed */
- kfree(pages);
- pages = NULL;
-
/*
* No need to sync to the device, this will happen later when the
* prepare() memop is called.
@@ -691,8 +561,9 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
}
buf->dma_addr = sg_dma_address(sgt->sgl);
- buf->size = size;
buf->dma_sgt = sgt;
+out:
+ buf->size = size;
return buf;
@@ -701,23 +572,13 @@ fail_map_sg:
buf->dma_dir, &attrs);
fail_sgt_init:
- if (!vma_is_io(buf->vma))
- vb2_dc_sgt_foreach_page(sgt, put_page);
sg_free_table(sgt);
fail_sgt:
kfree(sgt);
-fail_get_user_pages:
- if (pages && !vma_is_io(buf->vma))
- while (n_pages)
- put_page(pages[--n_pages]);
-
-fail_vma:
- vb2_put_vma(buf->vma);
-
-fail_pages:
- kfree(pages); /* kfree is NULL-proof */
+fail_pfnvec:
+ vb2_destroy_framevec(vec);
fail_buf:
kfree(buf);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 7289b81bd7b7..be7bd6535c9d 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -38,6 +38,7 @@ struct vb2_dma_sg_buf {
struct device *dev;
void *vaddr;
struct page **pages;
+ struct frame_vector *vec;
int offset;
enum dma_data_direction dma_dir;
struct sg_table sg_table;
@@ -51,7 +52,6 @@ struct vb2_dma_sg_buf {
unsigned int num_pages;
atomic_t refcount;
struct vb2_vmarea_handler handler;
- struct vm_area_struct *vma;
struct dma_buf_attachment *db_attach;
};
@@ -225,25 +225,17 @@ static void vb2_dma_sg_finish(void *buf_priv)
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
}
-static inline int vma_is_io(struct vm_area_struct *vma)
-{
- return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
-}
-
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
unsigned long size,
enum dma_data_direction dma_dir)
{
struct vb2_dma_sg_conf *conf = alloc_ctx;
struct vb2_dma_sg_buf *buf;
- unsigned long first, last;
- int num_pages_from_user;
- struct vm_area_struct *vma;
struct sg_table *sgt;
DEFINE_DMA_ATTRS(attrs);
+ struct frame_vector *vec;
dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
-
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return NULL;
@@ -254,61 +246,19 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
buf->offset = vaddr & ~PAGE_MASK;
buf->size = size;
buf->dma_sgt = &buf->sg_table;
+ vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE);
+ if (IS_ERR(vec))
+ goto userptr_fail_pfnvec;
+ buf->vec = vec;
- first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
- last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
- buf->num_pages = last - first + 1;
-
- buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
- GFP_KERNEL);
- if (!buf->pages)
- goto userptr_fail_alloc_pages;
-
- vma = find_vma(current->mm, vaddr);
- if (!vma) {
- dprintk(1, "no vma for address %lu\n", vaddr);
- goto userptr_fail_find_vma;
- }
-
- if (vma->vm_end < vaddr + size) {
- dprintk(1, "vma at %lu is too small for %lu bytes\n",
- vaddr, size);
- goto userptr_fail_find_vma;
- }
-
- buf->vma = vb2_get_vma(vma);
- if (!buf->vma) {
- dprintk(1, "failed to copy vma\n");
- goto userptr_fail_find_vma;
- }
-
- if (vma_is_io(buf->vma)) {
- for (num_pages_from_user = 0;
- num_pages_from_user < buf->num_pages;
- ++num_pages_from_user, vaddr += PAGE_SIZE) {
- unsigned long pfn;
-
- if (follow_pfn(vma, vaddr, &pfn)) {
- dprintk(1, "no page for address %lu\n", vaddr);
- break;
- }
- buf->pages[num_pages_from_user] = pfn_to_page(pfn);
- }
- } else
- num_pages_from_user = get_user_pages(current, current->mm,
- vaddr & PAGE_MASK,
- buf->num_pages,
- buf->dma_dir == DMA_FROM_DEVICE,
- 1, /* force */
- buf->pages,
- NULL);
-
- if (num_pages_from_user != buf->num_pages)
- goto userptr_fail_get_user_pages;
+ buf->pages = frame_vector_pages(vec);
+ if (IS_ERR(buf->pages))
+ goto userptr_fail_sgtable;
+ buf->num_pages = frame_vector_count(vec);
if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
buf->num_pages, buf->offset, size, 0))
- goto userptr_fail_alloc_table_from_pages;
+ goto userptr_fail_sgtable;
sgt = &buf->sg_table;
/*
@@ -324,17 +274,9 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
userptr_fail_map:
sg_free_table(&buf->sg_table);
-userptr_fail_alloc_table_from_pages:
-userptr_fail_get_user_pages:
- dprintk(1, "get_user_pages requested/got: %d/%d]\n",
- buf->num_pages, num_pages_from_user);
- if (!vma_is_io(buf->vma))
- while (--num_pages_from_user >= 0)
- put_page(buf->pages[num_pages_from_user]);
- vb2_put_vma(buf->vma);
-userptr_fail_find_vma:
- kfree(buf->pages);
-userptr_fail_alloc_pages:
+userptr_fail_sgtable:
+ vb2_destroy_framevec(vec);
+userptr_fail_pfnvec:
kfree(buf);
return NULL;
}
@@ -362,11 +304,8 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
while (--i >= 0) {
if (buf->dma_dir == DMA_FROM_DEVICE)
set_page_dirty_lock(buf->pages[i]);
- if (!vma_is_io(buf->vma))
- put_page(buf->pages[i]);
}
- kfree(buf->pages);
- vb2_put_vma(buf->vma);
+ vb2_destroy_framevec(buf->vec);
kfree(buf);
}
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 81c1ad8b2cf1..48c6a49c4928 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -23,118 +23,62 @@
#include <media/videobuf2-memops.h>
/**
- * vb2_get_vma() - acquire and lock the virtual memory area
- * @vma: given virtual memory area
+ * vb2_create_framevec() - map virtual addresses to pfns
+ * @start: Virtual user address where we start mapping
+ * @length: Length of a range to map
+ * @write: Should we map for writing into the area
*
- * This function attempts to acquire an area mapped in the userspace for
- * the duration of a hardware operation. The area is "locked" by performing
- * the same set of operation that are done when process calls fork() and
- * memory areas are duplicated.
- *
- * Returns a copy of a virtual memory region on success or NULL.
- */
-struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
-{
- struct vm_area_struct *vma_copy;
-
- vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
- if (vma_copy == NULL)
- return NULL;
-
- if (vma->vm_ops && vma->vm_ops->open)
- vma->vm_ops->open(vma);
-
- if (vma->vm_file)
- get_file(vma->vm_file);
-
- memcpy(vma_copy, vma, sizeof(*vma));
-
- vma_copy->vm_mm = NULL;
- vma_copy->vm_next = NULL;
- vma_copy->vm_prev = NULL;
-
- return vma_copy;
-}
-EXPORT_SYMBOL_GPL(vb2_get_vma);
-
-/**
- * vb2_put_userptr() - release a userspace virtual memory area
- * @vma: virtual memory region associated with the area to be released
- *
- * This function releases the previously acquired memory area after a hardware
- * operation.
+ * This function allocates and fills in a vector with pfns corresponding to
+ * virtual address range passed in arguments. If pfns have corresponding pages,
+ * page references are also grabbed to pin pages in memory. The function
+ * returns pointer to the vector on success and error pointer in case of
+ * failure. Returned vector needs to be freed via vb2_destroy_pfnvec().
*/
-void vb2_put_vma(struct vm_area_struct *vma)
+struct frame_vector *vb2_create_framevec(unsigned long start,
+ unsigned long length,
+ bool write)
{
- if (!vma)
- return;
-
- if (vma->vm_ops && vma->vm_ops->close)
- vma->vm_ops->close(vma);
-
- if (vma->vm_file)
- fput(vma->vm_file);
-
- kfree(vma);
+ int ret;
+ unsigned long first, last;
+ unsigned long nr;
+ struct frame_vector *vec;
+
+ first = start >> PAGE_SHIFT;
+ last = (start + length - 1) >> PAGE_SHIFT;
+ nr = last - first + 1;
+ vec = frame_vector_create(nr);
+ if (!vec)
+ return ERR_PTR(-ENOMEM);
+ ret = get_vaddr_frames(start, nr, write, 1, vec);
+ if (ret < 0)
+ goto out_destroy;
+ /* We accept only complete set of PFNs */
+ if (ret != nr) {
+ ret = -EFAULT;
+ goto out_release;
+ }
+ return vec;
+out_release:
+ put_vaddr_frames(vec);
+out_destroy:
+ frame_vector_destroy(vec);
+ return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(vb2_put_vma);
+EXPORT_SYMBOL(vb2_create_framevec);
/**
- * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory
- * @vaddr: starting virtual address of the area to be verified
- * @size: size of the area
- * @res_paddr: will return physical address for the given vaddr
- * @res_vma: will return locked copy of struct vm_area for the given area
- *
- * This function will go through memory area of size @size mapped at @vaddr and
- * verify that the underlying physical pages are contiguous. If they are
- * contiguous the virtual memory area is locked and a @res_vma is filled with
- * the copy and @res_pa set to the physical address of the buffer.
+ * vb2_destroy_framevec() - release vector of mapped pfns
+ * @vec: vector of pfns / pages to release
*
- * Returns 0 on success.
+ * This releases references to all pages in the vector @vec (if corresponding
+ * pfns are backed by pages) and frees the passed vector.
*/
-int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
- struct vm_area_struct **res_vma, dma_addr_t *res_pa)
+void vb2_destroy_framevec(struct frame_vector *vec)
{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long offset, start, end;
- unsigned long this_pfn, prev_pfn;
- dma_addr_t pa = 0;
-
- start = vaddr;
- offset = start & ~PAGE_MASK;
- end = start + size;
-
- vma = find_vma(mm, start);
-
- if (vma == NULL || vma->vm_end < end)
- return -EFAULT;
-
- for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
- int ret = follow_pfn(vma, start, &this_pfn);
- if (ret)
- return ret;
-
- if (prev_pfn == 0)
- pa = this_pfn << PAGE_SHIFT;
- else if (this_pfn != prev_pfn + 1)
- return -EFAULT;
-
- prev_pfn = this_pfn;
- }
-
- /*
- * Memory is contigous, lock vma and return to the caller
- */
- *res_vma = vb2_get_vma(vma);
- if (*res_vma == NULL)
- return -ENOMEM;
-
- *res_pa = pa + offset;
- return 0;
+ put_vaddr_frames(vec);
+ frame_vector_destroy(vec);
}
-EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
+EXPORT_SYMBOL(vb2_destroy_framevec);
/**
* vb2_common_vm_open() - increase refcount of the vma
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index 2fe4c27f524a..ecb8f0c7f025 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -23,11 +23,9 @@
struct vb2_vmalloc_buf {
void *vaddr;
- struct page **pages;
- struct vm_area_struct *vma;
+ struct frame_vector *vec;
enum dma_data_direction dma_dir;
unsigned long size;
- unsigned int n_pages;
atomic_t refcount;
struct vb2_vmarea_handler handler;
struct dma_buf *dbuf;
@@ -76,10 +74,8 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
enum dma_data_direction dma_dir)
{
struct vb2_vmalloc_buf *buf;
- unsigned long first, last;
- int n_pages, offset;
- struct vm_area_struct *vma;
- dma_addr_t physp;
+ struct frame_vector *vec;
+ int n_pages, offset, i;
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
@@ -88,51 +84,36 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
buf->dma_dir = dma_dir;
offset = vaddr & ~PAGE_MASK;
buf->size = size;
-
-
- vma = find_vma(current->mm, vaddr);
- if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
- if (vb2_get_contig_userptr(vaddr, size, &vma, &physp))
- goto fail_pages_array_alloc;
- buf->vma = vma;
- buf->vaddr = (__force void *)ioremap_nocache(physp, size);
- if (!buf->vaddr)
- goto fail_pages_array_alloc;
+ vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
+ if (IS_ERR(vec))
+ goto fail_pfnvec_create;
+ buf->vec = vec;
+ n_pages = frame_vector_count(vec);
+ if (frame_vector_to_pages(vec) < 0) {
+ unsigned long *nums = frame_vector_pfns(vec);
+
+ /*
+ * We cannot get page pointers for these pfns. Check memory is
+ * physically contiguous and use direct mapping.
+ */
+ for (i = 1; i < n_pages; i++)
+ if (nums[i-1] + 1 != nums[i])
+ goto fail_map;
+ buf->vaddr = (__force void *)
+ ioremap_nocache(nums[0] << PAGE_SHIFT, size);
} else {
- first = vaddr >> PAGE_SHIFT;
- last = (vaddr + size - 1) >> PAGE_SHIFT;
- buf->n_pages = last - first + 1;
- buf->pages = kzalloc(buf->n_pages * sizeof(struct page *),
- GFP_KERNEL);
- if (!buf->pages)
- goto fail_pages_array_alloc;
-
- /* current->mm->mmap_sem is taken by videobuf2 core */
- n_pages = get_user_pages(current, current->mm,
- vaddr & PAGE_MASK, buf->n_pages,
- dma_dir == DMA_FROM_DEVICE,
- 1, /* force */
- buf->pages, NULL);
- if (n_pages != buf->n_pages)
- goto fail_get_user_pages;
-
- buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1,
+ buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
PAGE_KERNEL);
- if (!buf->vaddr)
- goto fail_get_user_pages;
}
+ if (!buf->vaddr)
+ goto fail_map;
buf->vaddr += offset;
return buf;
-fail_get_user_pages:
- pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages,
- buf->n_pages);
- while (--n_pages >= 0)
- put_page(buf->pages[n_pages]);
- kfree(buf->pages);
-
-fail_pages_array_alloc:
+fail_map:
+ vb2_destroy_framevec(vec);
+fail_pfnvec_create:
kfree(buf);
return NULL;
@@ -143,20 +124,21 @@ static void vb2_vmalloc_put_userptr(void *buf_priv)
struct vb2_vmalloc_buf *buf = buf_priv;
unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
unsigned int i;
+ struct page **pages;
+ unsigned int n_pages;
- if (buf->pages) {
+ if (!buf->vec->is_pfns) {
+ n_pages = frame_vector_count(buf->vec);
+ pages = frame_vector_pages(buf->vec);
if (vaddr)
- vm_unmap_ram((void *)vaddr, buf->n_pages);
- for (i = 0; i < buf->n_pages; ++i) {
- if (buf->dma_dir == DMA_FROM_DEVICE)
- set_page_dirty_lock(buf->pages[i]);
- put_page(buf->pages[i]);
- }
- kfree(buf->pages);
+ vm_unmap_ram((void *)vaddr, n_pages);
+ if (buf->dma_dir == DMA_FROM_DEVICE)
+ for (i = 0; i < n_pages; i++)
+ set_page_dirty_lock(pages[i]);
} else {
- vb2_put_vma(buf->vma);
iounmap((__force void __iomem *)buf->vaddr);
}
+ vb2_destroy_framevec(buf->vec);
kfree(buf);
}
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 8406c668ecdc..c6a644b22af4 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -7,6 +7,14 @@ menuconfig MEMORY
if MEMORY
+config ARM_PL172_MPMC
+ tristate "ARM PL172 MPMC driver"
+ depends on ARM_AMBA && OF
+ help
+ This selects the ARM PrimeCell PL172 MultiPort Memory Controller.
+ If you have an embedded system with an AMBA bus and a PL172
+ controller, say Y or M here.
+
config ATMEL_SDRAMC
bool "Atmel (Multi-port DDR-)SDRAM Controller"
default y
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index b670441e3cdf..1c46af501610 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -5,6 +5,7 @@
ifeq ($(CONFIG_DDR),y)
obj-$(CONFIG_OF) += of_memory.o
endif
+obj-$(CONFIG_ARM_PL172_MPMC) += pl172.o
obj-$(CONFIG_ATMEL_SDRAMC) += atmel-sdramc.o
obj-$(CONFIG_TI_AEMIF) += ti-aemif.o
obj-$(CONFIG_TI_EMIF) += emif.o
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index 410c39749872..e87459f6d686 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -62,7 +62,7 @@ int fsl_ifc_find(phys_addr_t addr_base)
return -ENODEV;
for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) {
- u32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr);
+ u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr);
if (cspr & CSPR_V && (cspr & CSPR_BA) ==
convert_ifc_address(addr_base))
return i;
@@ -79,16 +79,16 @@ static int fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl)
/*
* Clear all the common status and event registers
*/
- if (in_be32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER)
- out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER);
+ if (ifc_in32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER)
+ ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat);
/* enable all error and events */
- out_be32(&ifc->cm_evter_en, IFC_CM_EVTER_EN_CSEREN);
+ ifc_out32(IFC_CM_EVTER_EN_CSEREN, &ifc->cm_evter_en);
/* enable all error and event interrupts */
- out_be32(&ifc->cm_evter_intr_en, IFC_CM_EVTER_INTR_EN_CSERIREN);
- out_be32(&ifc->cm_erattr0, 0x0);
- out_be32(&ifc->cm_erattr1, 0x0);
+ ifc_out32(IFC_CM_EVTER_INTR_EN_CSERIREN, &ifc->cm_evter_intr_en);
+ ifc_out32(0x0, &ifc->cm_erattr0);
+ ifc_out32(0x0, &ifc->cm_erattr1);
return 0;
}
@@ -127,9 +127,9 @@ static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl)
spin_lock_irqsave(&nand_irq_lock, flags);
- stat = in_be32(&ifc->ifc_nand.nand_evter_stat);
+ stat = ifc_in32(&ifc->ifc_nand.nand_evter_stat);
if (stat) {
- out_be32(&ifc->ifc_nand.nand_evter_stat, stat);
+ ifc_out32(stat, &ifc->ifc_nand.nand_evter_stat);
ctrl->nand_stat = stat;
wake_up(&ctrl->nand_wait);
}
@@ -161,16 +161,16 @@ static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
irqreturn_t ret = IRQ_NONE;
/* read for chip select error */
- cs_err = in_be32(&ifc->cm_evter_stat);
+ cs_err = ifc_in32(&ifc->cm_evter_stat);
if (cs_err) {
dev_err(ctrl->dev, "transaction sent to IFC is not mapped to"
"any memory bank 0x%08X\n", cs_err);
/* clear the chip select error */
- out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER);
+ ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat);
/* read error attribute registers print the error information */
- status = in_be32(&ifc->cm_erattr0);
- err_addr = in_be32(&ifc->cm_erattr1);
+ status = ifc_in32(&ifc->cm_erattr0);
+ err_addr = ifc_in32(&ifc->cm_erattr1);
if (status & IFC_CM_ERATTR0_ERTYP_READ)
dev_err(ctrl->dev, "Read transaction error"
@@ -231,6 +231,23 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
goto err;
}
+ version = ifc_in32(&fsl_ifc_ctrl_dev->regs->ifc_rev) &
+ FSL_IFC_VERSION_MASK;
+ banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8;
+ dev_info(&dev->dev, "IFC version %d.%d, %d banks\n",
+ version >> 24, (version >> 16) & 0xf, banks);
+
+ fsl_ifc_ctrl_dev->version = version;
+ fsl_ifc_ctrl_dev->banks = banks;
+
+ if (of_property_read_bool(dev->dev.of_node, "little-endian")) {
+ fsl_ifc_ctrl_dev->little_endian = true;
+ dev_dbg(&dev->dev, "IFC REGISTERS are LITTLE endian\n");
+ } else {
+ fsl_ifc_ctrl_dev->little_endian = false;
+ dev_dbg(&dev->dev, "IFC REGISTERS are BIG endian\n");
+ }
+
version = ioread32be(&fsl_ifc_ctrl_dev->regs->ifc_rev) &
FSL_IFC_VERSION_MASK;
banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8;
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 8911e51d410a..32ac049f2bc4 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -1176,8 +1176,8 @@ static int gpmc_setup_irq(void)
gpmc_client_irq[i].irq = gpmc_irq_start + i;
irq_set_chip_and_handler(gpmc_client_irq[i].irq,
&gpmc_irq_chip, handle_simple_irq);
- set_irq_flags(gpmc_client_irq[i].irq,
- IRQF_VALID | IRQF_NOAUTOEN);
+ irq_modify_status(gpmc_client_irq[i].irq, IRQ_NOREQUEST,
+ IRQ_NOAUTOEN);
}
/* Disable interrupts */
@@ -1200,7 +1200,6 @@ static int gpmc_free_irq(void)
for (i = 0; i < GPMC_NR_IRQ; i++) {
irq_set_handler(gpmc_client_irq[i].irq, NULL);
irq_set_chip(gpmc_client_irq[i].irq, &no_irq_chip);
- irq_modify_status(gpmc_client_irq[i].irq, 0, 0);
}
irq_free_descs(gpmc_irq_start, GPMC_NR_IRQ);
@@ -2074,14 +2073,8 @@ static int gpmc_probe_dt(struct platform_device *pdev)
ret = gpmc_probe_nand_child(pdev, child);
else if (of_node_cmp(child->name, "onenand") == 0)
ret = gpmc_probe_onenand_child(pdev, child);
- else if (of_node_cmp(child->name, "ethernet") == 0 ||
- of_node_cmp(child->name, "nor") == 0 ||
- of_node_cmp(child->name, "uart") == 0)
+ else
ret = gpmc_probe_generic_child(pdev, child);
-
- if (WARN(ret < 0, "%s: probing gpmc child %s failed\n",
- __func__, child->full_name))
- of_node_put(child);
}
return 0;
@@ -2251,6 +2244,9 @@ void omap3_gpmc_save_context(void)
{
int i;
+ if (!gpmc_base)
+ return;
+
gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
@@ -2283,6 +2279,9 @@ void omap3_gpmc_restore_context(void)
{
int i;
+ if (!gpmc_base)
+ return;
+
gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
diff --git a/drivers/memory/pl172.c b/drivers/memory/pl172.c
new file mode 100644
index 000000000000..b2ef6072fbf4
--- /dev/null
+++ b/drivers/memory/pl172.c
@@ -0,0 +1,301 @@
+/*
+ * Memory controller driver for ARM PrimeCell PL172
+ * PrimeCell MultiPort Memory Controller (PL172)
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * Based on:
+ * TI AEMIF driver, Copyright (C) 2010 - 2013 Texas Instruments Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/time.h>
+
+#define MPMC_STATIC_CFG(n) (0x200 + 0x20 * n)
+#define MPMC_STATIC_CFG_MW_8BIT 0x0
+#define MPMC_STATIC_CFG_MW_16BIT 0x1
+#define MPMC_STATIC_CFG_MW_32BIT 0x2
+#define MPMC_STATIC_CFG_PM BIT(3)
+#define MPMC_STATIC_CFG_PC BIT(6)
+#define MPMC_STATIC_CFG_PB BIT(7)
+#define MPMC_STATIC_CFG_EW BIT(8)
+#define MPMC_STATIC_CFG_B BIT(19)
+#define MPMC_STATIC_CFG_P BIT(20)
+#define MPMC_STATIC_WAIT_WEN(n) (0x204 + 0x20 * n)
+#define MPMC_STATIC_WAIT_WEN_MAX 0x0f
+#define MPMC_STATIC_WAIT_OEN(n) (0x208 + 0x20 * n)
+#define MPMC_STATIC_WAIT_OEN_MAX 0x0f
+#define MPMC_STATIC_WAIT_RD(n) (0x20c + 0x20 * n)
+#define MPMC_STATIC_WAIT_RD_MAX 0x1f
+#define MPMC_STATIC_WAIT_PAGE(n) (0x210 + 0x20 * n)
+#define MPMC_STATIC_WAIT_PAGE_MAX 0x1f
+#define MPMC_STATIC_WAIT_WR(n) (0x214 + 0x20 * n)
+#define MPMC_STATIC_WAIT_WR_MAX 0x1f
+#define MPMC_STATIC_WAIT_TURN(n) (0x218 + 0x20 * n)
+#define MPMC_STATIC_WAIT_TURN_MAX 0x0f
+
+/* Maximum number of static chip selects */
+#define PL172_MAX_CS 4
+
+struct pl172_data {
+ void __iomem *base;
+ unsigned long rate;
+ struct clk *clk;
+};
+
+static int pl172_timing_prop(struct amba_device *adev,
+ const struct device_node *np, const char *name,
+ u32 reg_offset, u32 max, int start)
+{
+ struct pl172_data *pl172 = amba_get_drvdata(adev);
+ int cycles;
+ u32 val;
+
+ if (!of_property_read_u32(np, name, &val)) {
+ cycles = DIV_ROUND_UP(val * pl172->rate, NSEC_PER_MSEC) - start;
+ if (cycles < 0) {
+ cycles = 0;
+ } else if (cycles > max) {
+ dev_err(&adev->dev, "%s timing too tight\n", name);
+ return -EINVAL;
+ }
+
+ writel(cycles, pl172->base + reg_offset);
+ }
+
+ dev_dbg(&adev->dev, "%s: %u cycle(s)\n", name, start +
+ readl(pl172->base + reg_offset));
+
+ return 0;
+}
+
+static int pl172_setup_static(struct amba_device *adev,
+ struct device_node *np, u32 cs)
+{
+ struct pl172_data *pl172 = amba_get_drvdata(adev);
+ u32 cfg;
+ int ret;
+
+ /* MPMC static memory configuration */
+ if (!of_property_read_u32(np, "mpmc,memory-width", &cfg)) {
+ if (cfg == 8) {
+ cfg = MPMC_STATIC_CFG_MW_8BIT;
+ } else if (cfg == 16) {
+ cfg = MPMC_STATIC_CFG_MW_16BIT;
+ } else if (cfg == 32) {
+ cfg = MPMC_STATIC_CFG_MW_32BIT;
+ } else {
+ dev_err(&adev->dev, "invalid memory width cs%u\n", cs);
+ return -EINVAL;
+ }
+ } else {
+ dev_err(&adev->dev, "memory-width property required\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(np, "mpmc,async-page-mode"))
+ cfg |= MPMC_STATIC_CFG_PM;
+
+ if (of_property_read_bool(np, "mpmc,cs-active-high"))
+ cfg |= MPMC_STATIC_CFG_PC;
+
+ if (of_property_read_bool(np, "mpmc,byte-lane-low"))
+ cfg |= MPMC_STATIC_CFG_PB;
+
+ if (of_property_read_bool(np, "mpmc,extended-wait"))
+ cfg |= MPMC_STATIC_CFG_EW;
+
+ if (of_property_read_bool(np, "mpmc,buffer-enable"))
+ cfg |= MPMC_STATIC_CFG_B;
+
+ if (of_property_read_bool(np, "mpmc,write-protect"))
+ cfg |= MPMC_STATIC_CFG_P;
+
+ writel(cfg, pl172->base + MPMC_STATIC_CFG(cs));
+ dev_dbg(&adev->dev, "mpmc static config cs%u: 0x%08x\n", cs, cfg);
+
+ /* MPMC static memory timing */
+ ret = pl172_timing_prop(adev, np, "mpmc,write-enable-delay",
+ MPMC_STATIC_WAIT_WEN(cs),
+ MPMC_STATIC_WAIT_WEN_MAX, 1);
+ if (ret)
+ goto fail;
+
+ ret = pl172_timing_prop(adev, np, "mpmc,output-enable-delay",
+ MPMC_STATIC_WAIT_OEN(cs),
+ MPMC_STATIC_WAIT_OEN_MAX, 0);
+ if (ret)
+ goto fail;
+
+ ret = pl172_timing_prop(adev, np, "mpmc,read-access-delay",
+ MPMC_STATIC_WAIT_RD(cs),
+ MPMC_STATIC_WAIT_RD_MAX, 1);
+ if (ret)
+ goto fail;
+
+ ret = pl172_timing_prop(adev, np, "mpmc,page-mode-read-delay",
+ MPMC_STATIC_WAIT_PAGE(cs),
+ MPMC_STATIC_WAIT_PAGE_MAX, 1);
+ if (ret)
+ goto fail;
+
+ ret = pl172_timing_prop(adev, np, "mpmc,write-access-delay",
+ MPMC_STATIC_WAIT_WR(cs),
+ MPMC_STATIC_WAIT_WR_MAX, 2);
+ if (ret)
+ goto fail;
+
+ ret = pl172_timing_prop(adev, np, "mpmc,turn-round-delay",
+ MPMC_STATIC_WAIT_TURN(cs),
+ MPMC_STATIC_WAIT_TURN_MAX, 1);
+ if (ret)
+ goto fail;
+
+ return 0;
+fail:
+ dev_err(&adev->dev, "failed to configure cs%u\n", cs);
+ return ret;
+}
+
+static int pl172_parse_cs_config(struct amba_device *adev,
+ struct device_node *np)
+{
+ u32 cs;
+
+ if (!of_property_read_u32(np, "mpmc,cs", &cs)) {
+ if (cs >= PL172_MAX_CS) {
+ dev_err(&adev->dev, "cs%u invalid\n", cs);
+ return -EINVAL;
+ }
+
+ return pl172_setup_static(adev, np, cs);
+ }
+
+ dev_err(&adev->dev, "cs property required\n");
+
+ return -EINVAL;
+}
+
+static const char * const pl172_revisions[] = {"r1", "r2", "r2p3", "r2p4"};
+
+static int pl172_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ struct device_node *child_np, *np = adev->dev.of_node;
+ struct device *dev = &adev->dev;
+ static const char *rev = "?";
+ struct pl172_data *pl172;
+ int ret;
+
+ if (amba_part(adev) == 0x172) {
+ if (amba_rev(adev) < ARRAY_SIZE(pl172_revisions))
+ rev = pl172_revisions[amba_rev(adev)];
+ }
+
+ dev_info(dev, "ARM PL%x revision %s\n", amba_part(adev), rev);
+
+ pl172 = devm_kzalloc(dev, sizeof(*pl172), GFP_KERNEL);
+ if (!pl172)
+ return -ENOMEM;
+
+ pl172->clk = devm_clk_get(dev, "mpmcclk");
+ if (IS_ERR(pl172->clk)) {
+ dev_err(dev, "no mpmcclk provided clock\n");
+ return PTR_ERR(pl172->clk);
+ }
+
+ ret = clk_prepare_enable(pl172->clk);
+ if (ret) {
+ dev_err(dev, "unable to mpmcclk enable clock\n");
+ return ret;
+ }
+
+ pl172->rate = clk_get_rate(pl172->clk) / MSEC_PER_SEC;
+ if (!pl172->rate) {
+ dev_err(dev, "unable to get mpmcclk clock rate\n");
+ ret = -EINVAL;
+ goto err_clk_enable;
+ }
+
+ ret = amba_request_regions(adev, NULL);
+ if (ret) {
+ dev_err(dev, "unable to request AMBA regions\n");
+ goto err_clk_enable;
+ }
+
+ pl172->base = devm_ioremap(dev, adev->res.start,
+ resource_size(&adev->res));
+ if (!pl172->base) {
+ dev_err(dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_no_ioremap;
+ }
+
+ amba_set_drvdata(adev, pl172);
+
+ /*
+ * Loop through each child node, which represent a chip select, and
+ * configure parameters and timing. If successful; populate devices
+ * under that node.
+ */
+ for_each_available_child_of_node(np, child_np) {
+ ret = pl172_parse_cs_config(adev, child_np);
+ if (ret)
+ continue;
+
+ of_platform_populate(child_np, NULL, NULL, dev);
+ }
+
+ return 0;
+
+err_no_ioremap:
+ amba_release_regions(adev);
+err_clk_enable:
+ clk_disable_unprepare(pl172->clk);
+ return ret;
+}
+
+static int pl172_remove(struct amba_device *adev)
+{
+ struct pl172_data *pl172 = amba_get_drvdata(adev);
+
+ clk_disable_unprepare(pl172->clk);
+ amba_release_regions(adev);
+
+ return 0;
+}
+
+static const struct amba_id pl172_ids[] = {
+ {
+ .id = 0x07341172,
+ .mask = 0xffffffff,
+ },
+ { 0, 0 },
+};
+MODULE_DEVICE_TABLE(amba, pl172_ids);
+
+static struct amba_driver pl172_driver = {
+ .drv = {
+ .name = "memory-pl172",
+ },
+ .probe = pl172_probe,
+ .remove = pl172_remove,
+ .id_table = pl172_ids,
+};
+module_amba_driver(pl172_driver);
+
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_DESCRIPTION("PL172 Memory Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/Makefile b/drivers/memory/tegra/Makefile
index 6a0b9ac54f05..c2cb671ffc4a 100644
--- a/drivers/memory/tegra/Makefile
+++ b/drivers/memory/tegra/Makefile
@@ -4,6 +4,7 @@ tegra-mc-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra30.o
tegra-mc-$(CONFIG_ARCH_TEGRA_114_SOC) += tegra114.o
tegra-mc-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124.o
tegra-mc-$(CONFIG_ARCH_TEGRA_132_SOC) += tegra124.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210.o
obj-$(CONFIG_TEGRA_MC) += tegra-mc.o
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index c71ede67e6c8..a1ae0cc2b86d 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -42,7 +42,6 @@
#define MC_ERR_STATUS_ADR_HI_MASK 0x3
#define MC_ERR_STATUS_SECURITY (1 << 17)
#define MC_ERR_STATUS_RW (1 << 16)
-#define MC_ERR_STATUS_CLIENT_MASK 0x7f
#define MC_ERR_ADR 0x0c
@@ -67,6 +66,9 @@ static const struct of_device_id tegra_mc_of_match[] = {
#ifdef CONFIG_ARCH_TEGRA_132_SOC
{ .compatible = "nvidia,tegra132-mc", .data = &tegra132_mc_soc },
#endif
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+ { .compatible = "nvidia,tegra210-mc", .data = &tegra210_mc_soc },
+#endif
{ }
};
MODULE_DEVICE_TABLE(of, tegra_mc_of_match);
@@ -283,7 +285,7 @@ static irqreturn_t tegra_mc_irq(int irq, void *data)
else
secure = "";
- id = value & MC_ERR_STATUS_CLIENT_MASK;
+ id = value & mc->soc->client_id_mask;
for (i = 0; i < mc->soc->num_clients; i++) {
if (mc->soc->clients[i].id == id) {
@@ -410,6 +412,8 @@ static int tegra_mc_probe(struct platform_device *pdev)
return err;
}
+ WARN(!mc->soc->client_id_mask, "Missing client ID mask for this SoC\n");
+
value = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM;
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
index b7361b0a6696..ddb16676c3af 100644
--- a/drivers/memory/tegra/mc.h
+++ b/drivers/memory/tegra/mc.h
@@ -41,4 +41,8 @@ extern const struct tegra_mc_soc tegra124_mc_soc;
extern const struct tegra_mc_soc tegra132_mc_soc;
#endif
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+extern const struct tegra_mc_soc tegra210_mc_soc;
+#endif
+
#endif /* MEMORY_TEGRA_MC_H */
diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
index 9f579589e800..ba8fff3d66a6 100644
--- a/drivers/memory/tegra/tegra114.c
+++ b/drivers/memory/tegra/tegra114.c
@@ -9,8 +9,6 @@
#include <linux/of.h>
#include <linux/mm.h>
-#include <asm/cacheflush.h>
-
#include <dt-bindings/memory/tegra114-mc.h>
#include "mc.h"
@@ -914,20 +912,6 @@ static const struct tegra_smmu_swgroup tegra114_swgroups[] = {
{ .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 },
};
-static void tegra114_flush_dcache(struct page *page, unsigned long offset,
- size_t size)
-{
- phys_addr_t phys = page_to_phys(page) + offset;
- void *virt = page_address(page) + offset;
-
- __cpuc_flush_dcache_area(virt, size);
- outer_flush_range(phys, phys + size);
-}
-
-static const struct tegra_smmu_ops tegra114_smmu_ops = {
- .flush_dcache = tegra114_flush_dcache,
-};
-
static const struct tegra_smmu_soc tegra114_smmu_soc = {
.clients = tegra114_mc_clients,
.num_clients = ARRAY_SIZE(tegra114_mc_clients),
@@ -935,8 +919,8 @@ static const struct tegra_smmu_soc tegra114_smmu_soc = {
.num_swgroups = ARRAY_SIZE(tegra114_swgroups),
.supports_round_robin_arbitration = false,
.supports_request_limit = false,
+ .num_tlb_lines = 32,
.num_asids = 4,
- .ops = &tegra114_smmu_ops,
};
const struct tegra_mc_soc tegra114_mc_soc = {
@@ -944,5 +928,6 @@ const struct tegra_mc_soc tegra114_mc_soc = {
.num_clients = ARRAY_SIZE(tegra114_mc_clients),
.num_address_bits = 32,
.atom_size = 32,
+ .client_id_mask = 0x7f,
.smmu = &tegra114_smmu_soc,
};
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 8620355776fe..3dac7be39654 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -1027,7 +1027,40 @@ static int emc_debug_rate_set(void *data, u64 rate)
DEFINE_SIMPLE_ATTRIBUTE(emc_debug_rate_fops, emc_debug_rate_get,
emc_debug_rate_set, "%lld\n");
-static void emc_debugfs_init(struct device *dev)
+static int emc_debug_supported_rates_show(struct seq_file *s, void *data)
+{
+ struct tegra_emc *emc = s->private;
+ const char *prefix = "";
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ struct emc_timing *timing = &emc->timings[i];
+
+ seq_printf(s, "%s%lu", prefix, timing->rate);
+
+ prefix = " ";
+ }
+
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int emc_debug_supported_rates_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, emc_debug_supported_rates_show,
+ inode->i_private);
+}
+
+static const struct file_operations emc_debug_supported_rates_fops = {
+ .open = emc_debug_supported_rates_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
{
struct dentry *root, *file;
struct clk *clk;
@@ -1048,6 +1081,11 @@ static void emc_debugfs_init(struct device *dev)
&emc_debug_rate_fops);
if (!file)
dev_err(dev, "failed to create debugfs entry\n");
+
+ file = debugfs_create_file("supported_rates", S_IRUGO, root, emc,
+ &emc_debug_supported_rates_fops);
+ if (!file)
+ dev_err(dev, "failed to create debugfs entry\n");
}
static int tegra_emc_probe(struct platform_device *pdev)
@@ -1119,7 +1157,7 @@ static int tegra_emc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, emc);
if (IS_ENABLED(CONFIG_DEBUG_FS))
- emc_debugfs_init(&pdev->dev);
+ emc_debugfs_init(&pdev->dev, emc);
return 0;
};
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index 966e1557e6f4..21e7255e3d96 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -9,8 +9,6 @@
#include <linux/of.h>
#include <linux/mm.h>
-#include <asm/cacheflush.h>
-
#include <dt-bindings/memory/tegra124-mc.h>
#include "mc.h"
@@ -1002,20 +1000,6 @@ static const struct tegra_smmu_swgroup tegra124_swgroups[] = {
};
#ifdef CONFIG_ARCH_TEGRA_124_SOC
-static void tegra124_flush_dcache(struct page *page, unsigned long offset,
- size_t size)
-{
- phys_addr_t phys = page_to_phys(page) + offset;
- void *virt = page_address(page) + offset;
-
- __cpuc_flush_dcache_area(virt, size);
- outer_flush_range(phys, phys + size);
-}
-
-static const struct tegra_smmu_ops tegra124_smmu_ops = {
- .flush_dcache = tegra124_flush_dcache,
-};
-
static const struct tegra_smmu_soc tegra124_smmu_soc = {
.clients = tegra124_mc_clients,
.num_clients = ARRAY_SIZE(tegra124_mc_clients),
@@ -1024,7 +1008,6 @@ static const struct tegra_smmu_soc tegra124_smmu_soc = {
.supports_round_robin_arbitration = true,
.supports_request_limit = true,
.num_asids = 128,
- .ops = &tegra124_smmu_ops,
};
const struct tegra_mc_soc tegra124_mc_soc = {
@@ -1032,6 +1015,7 @@ const struct tegra_mc_soc tegra124_mc_soc = {
.num_clients = ARRAY_SIZE(tegra124_mc_clients),
.num_address_bits = 34,
.atom_size = 32,
+ .client_id_mask = 0x7f,
.smmu = &tegra124_smmu_soc,
.emem_regs = tegra124_mc_emem_regs,
.num_emem_regs = ARRAY_SIZE(tegra124_mc_emem_regs),
@@ -1039,18 +1023,6 @@ const struct tegra_mc_soc tegra124_mc_soc = {
#endif /* CONFIG_ARCH_TEGRA_124_SOC */
#ifdef CONFIG_ARCH_TEGRA_132_SOC
-static void tegra132_flush_dcache(struct page *page, unsigned long offset,
- size_t size)
-{
- void *virt = page_address(page) + offset;
-
- __flush_dcache_area(virt, size);
-}
-
-static const struct tegra_smmu_ops tegra132_smmu_ops = {
- .flush_dcache = tegra132_flush_dcache,
-};
-
static const struct tegra_smmu_soc tegra132_smmu_soc = {
.clients = tegra124_mc_clients,
.num_clients = ARRAY_SIZE(tegra124_mc_clients),
@@ -1058,8 +1030,8 @@ static const struct tegra_smmu_soc tegra132_smmu_soc = {
.num_swgroups = ARRAY_SIZE(tegra124_swgroups),
.supports_round_robin_arbitration = true,
.supports_request_limit = true,
+ .num_tlb_lines = 32,
.num_asids = 128,
- .ops = &tegra132_smmu_ops,
};
const struct tegra_mc_soc tegra132_mc_soc = {
@@ -1067,6 +1039,7 @@ const struct tegra_mc_soc tegra132_mc_soc = {
.num_clients = ARRAY_SIZE(tegra124_mc_clients),
.num_address_bits = 34,
.atom_size = 32,
+ .client_id_mask = 0x7f,
.smmu = &tegra132_smmu_soc,
};
#endif /* CONFIG_ARCH_TEGRA_132_SOC */
diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c
new file mode 100644
index 000000000000..5e144abe4c18
--- /dev/null
+++ b/drivers/memory/tegra/tegra210.c
@@ -0,0 +1,1080 @@
+/*
+ * Copyright (C) 2015 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/of.h>
+#include <linux/mm.h>
+
+#include <asm/cacheflush.h>
+
+#include <dt-bindings/memory/tegra210-mc.h>
+
+#include "mc.h"
+
+static const struct tegra_mc_client tegra210_mc_clients[] = {
+ {
+ .id = 0x00,
+ .name = "ptcr",
+ .swgroup = TEGRA_SWGROUP_PTC,
+ }, {
+ .id = 0x01,
+ .name = "display0a",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .smmu = {
+ .reg = 0x228,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xc2,
+ },
+ }, {
+ .id = 0x02,
+ .name = "display0ab",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .smmu = {
+ .reg = 0x228,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xc6,
+ },
+ }, {
+ .id = 0x03,
+ .name = "display0b",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .smmu = {
+ .reg = 0x228,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ }, {
+ .id = 0x04,
+ .name = "display0bb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .smmu = {
+ .reg = 0x228,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ }, {
+ .id = 0x05,
+ .name = "display0c",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .smmu = {
+ .reg = 0x228,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x2ec,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ }, {
+ .id = 0x06,
+ .name = "display0cb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .smmu = {
+ .reg = 0x228,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x2f8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ }, {
+ .id = 0x0e,
+ .name = "afir",
+ .swgroup = TEGRA_SWGROUP_AFI,
+ .smmu = {
+ .reg = 0x228,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x2e0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x13,
+ },
+ }, {
+ .id = 0x0f,
+ .name = "avpcarm7r",
+ .swgroup = TEGRA_SWGROUP_AVPC,
+ .smmu = {
+ .reg = 0x228,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ }, {
+ .id = 0x10,
+ .name = "displayhc",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .smmu = {
+ .reg = 0x228,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x2f0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ }, {
+ .id = 0x11,
+ .name = "displayhcb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .smmu = {
+ .reg = 0x228,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2fc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ }, {
+ .id = 0x15,
+ .name = "hdar",
+ .swgroup = TEGRA_SWGROUP_HDA,
+ .smmu = {
+ .reg = 0x228,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x24,
+ },
+ }, {
+ .id = 0x16,
+ .name = "host1xdmar",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .smmu = {
+ .reg = 0x228,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ }, {
+ .id = 0x17,
+ .name = "host1xr",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .smmu = {
+ .reg = 0x228,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ }, {
+ .id = 0x1c,
+ .name = "nvencsrd",
+ .swgroup = TEGRA_SWGROUP_NVENC,
+ .smmu = {
+ .reg = 0x228,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x23,
+ },
+ }, {
+ .id = 0x1d,
+ .name = "ppcsahbdmar",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .smmu = {
+ .reg = 0x228,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ }, {
+ .id = 0x1e,
+ .name = "ppcsahbslvr",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .smmu = {
+ .reg = 0x228,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
+ }, {
+ .id = 0x1f,
+ .name = "satar",
+ .swgroup = TEGRA_SWGROUP_SATA,
+ .smmu = {
+ .reg = 0x228,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x350,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x65,
+ },
+ }, {
+ .id = 0x27,
+ .name = "mpcorer",
+ .swgroup = TEGRA_SWGROUP_MPCORE,
+ .la = {
+ .reg = 0x320,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ }, {
+ .id = 0x2b,
+ .name = "nvencswr",
+ .swgroup = TEGRA_SWGROUP_NVENC,
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x31,
+ .name = "afiw",
+ .swgroup = TEGRA_SWGROUP_AFI,
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2e0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x32,
+ .name = "avpcarm7w",
+ .swgroup = TEGRA_SWGROUP_AVPC,
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x35,
+ .name = "hdaw",
+ .swgroup = TEGRA_SWGROUP_HDA,
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x36,
+ .name = "host1xw",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x314,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x39,
+ .name = "mpcorew",
+ .swgroup = TEGRA_SWGROUP_MPCORE,
+ .la = {
+ .reg = 0x320,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x3b,
+ .name = "ppcsahbdmaw",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 27,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x3c,
+ .name = "ppcsahbslvw",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x3d,
+ .name = "sataw",
+ .swgroup = TEGRA_SWGROUP_SATA,
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x350,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x65,
+ },
+ }, {
+ .id = 0x44,
+ .name = "ispra",
+ .swgroup = TEGRA_SWGROUP_ISP2,
+ .smmu = {
+ .reg = 0x230,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x370,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x18,
+ },
+ }, {
+ .id = 0x46,
+ .name = "ispwa",
+ .swgroup = TEGRA_SWGROUP_ISP2,
+ .smmu = {
+ .reg = 0x230,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x374,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x47,
+ .name = "ispwb",
+ .swgroup = TEGRA_SWGROUP_ISP2,
+ .smmu = {
+ .reg = 0x230,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x374,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x4a,
+ .name = "xusb_hostr",
+ .swgroup = TEGRA_SWGROUP_XUSB_HOST,
+ .smmu = {
+ .reg = 0x230,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x37c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x39,
+ },
+ }, {
+ .id = 0x4b,
+ .name = "xusb_hostw",
+ .swgroup = TEGRA_SWGROUP_XUSB_HOST,
+ .smmu = {
+ .reg = 0x230,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x37c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x4c,
+ .name = "xusb_devr",
+ .swgroup = TEGRA_SWGROUP_XUSB_DEV,
+ .smmu = {
+ .reg = 0x230,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x380,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x39,
+ },
+ }, {
+ .id = 0x4d,
+ .name = "xusb_devw",
+ .swgroup = TEGRA_SWGROUP_XUSB_DEV,
+ .smmu = {
+ .reg = 0x230,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x380,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x4e,
+ .name = "isprab",
+ .swgroup = TEGRA_SWGROUP_ISP2B,
+ .smmu = {
+ .reg = 0x230,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x384,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x18,
+ },
+ }, {
+ .id = 0x50,
+ .name = "ispwab",
+ .swgroup = TEGRA_SWGROUP_ISP2B,
+ .smmu = {
+ .reg = 0x230,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x388,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x51,
+ .name = "ispwbb",
+ .swgroup = TEGRA_SWGROUP_ISP2B,
+ .smmu = {
+ .reg = 0x230,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x388,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x54,
+ .name = "tsecsrd",
+ .swgroup = TEGRA_SWGROUP_TSEC,
+ .smmu = {
+ .reg = 0x230,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x390,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x9b,
+ },
+ }, {
+ .id = 0x55,
+ .name = "tsecswr",
+ .swgroup = TEGRA_SWGROUP_TSEC,
+ .smmu = {
+ .reg = 0x230,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x390,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x56,
+ .name = "a9avpscr",
+ .swgroup = TEGRA_SWGROUP_A9AVP,
+ .smmu = {
+ .reg = 0x230,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x3a4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ }, {
+ .id = 0x57,
+ .name = "a9avpscw",
+ .swgroup = TEGRA_SWGROUP_A9AVP,
+ .smmu = {
+ .reg = 0x230,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x3a4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x58,
+ .name = "gpusrd",
+ .swgroup = TEGRA_SWGROUP_GPU,
+ .smmu = {
+ /* read-only */
+ .reg = 0x230,
+ .bit = 24,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
+ }, {
+ .id = 0x59,
+ .name = "gpuswr",
+ .swgroup = TEGRA_SWGROUP_GPU,
+ .smmu = {
+ /* read-only */
+ .reg = 0x230,
+ .bit = 25,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x5a,
+ .name = "displayt",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .smmu = {
+ .reg = 0x230,
+ .bit = 26,
+ },
+ .la = {
+ .reg = 0x2f0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ }, {
+ .id = 0x60,
+ .name = "sdmmcra",
+ .swgroup = TEGRA_SWGROUP_SDMMC1A,
+ .smmu = {
+ .reg = 0x234,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x3b8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ }, {
+ .id = 0x61,
+ .name = "sdmmcraa",
+ .swgroup = TEGRA_SWGROUP_SDMMC2A,
+ .smmu = {
+ .reg = 0x234,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x3bc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ }, {
+ .id = 0x62,
+ .name = "sdmmcr",
+ .swgroup = TEGRA_SWGROUP_SDMMC3A,
+ .smmu = {
+ .reg = 0x234,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x3c0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ }, {
+ .id = 0x63,
+ .swgroup = TEGRA_SWGROUP_SDMMC4A,
+ .name = "sdmmcrab",
+ .smmu = {
+ .reg = 0x234,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x3c4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ }, {
+ .id = 0x64,
+ .name = "sdmmcwa",
+ .swgroup = TEGRA_SWGROUP_SDMMC1A,
+ .smmu = {
+ .reg = 0x234,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x3b8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x65,
+ .name = "sdmmcwaa",
+ .swgroup = TEGRA_SWGROUP_SDMMC2A,
+ .smmu = {
+ .reg = 0x234,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x3bc,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x66,
+ .name = "sdmmcw",
+ .swgroup = TEGRA_SWGROUP_SDMMC3A,
+ .smmu = {
+ .reg = 0x234,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x3c0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x67,
+ .name = "sdmmcwab",
+ .swgroup = TEGRA_SWGROUP_SDMMC4A,
+ .smmu = {
+ .reg = 0x234,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x3c4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x6c,
+ .name = "vicsrd",
+ .swgroup = TEGRA_SWGROUP_VIC,
+ .smmu = {
+ .reg = 0x234,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x394,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
+ }, {
+ .id = 0x6d,
+ .name = "vicswr",
+ .swgroup = TEGRA_SWGROUP_VIC,
+ .smmu = {
+ .reg = 0x234,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x394,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x72,
+ .name = "viw",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .smmu = {
+ .reg = 0x234,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x398,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x73,
+ .name = "displayd",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .smmu = {
+ .reg = 0x234,
+ .bit = 19,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ }, {
+ .id = 0x78,
+ .name = "nvdecsrd",
+ .swgroup = TEGRA_SWGROUP_NVDEC,
+ .smmu = {
+ .reg = 0x234,
+ .bit = 24,
+ },
+ .la = {
+ .reg = 0x3d8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x23,
+ },
+ }, {
+ .id = 0x79,
+ .name = "nvdecswr",
+ .swgroup = TEGRA_SWGROUP_NVDEC,
+ .smmu = {
+ .reg = 0x234,
+ .bit = 25,
+ },
+ .la = {
+ .reg = 0x3d8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x7a,
+ .name = "aper",
+ .swgroup = TEGRA_SWGROUP_APE,
+ .smmu = {
+ .reg = 0x234,
+ .bit = 26,
+ },
+ .la = {
+ .reg = 0x3dc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ }, {
+ .id = 0x7b,
+ .name = "apew",
+ .swgroup = TEGRA_SWGROUP_APE,
+ .smmu = {
+ .reg = 0x234,
+ .bit = 27,
+ },
+ .la = {
+ .reg = 0x3dc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x7e,
+ .name = "nvjpgsrd",
+ .swgroup = TEGRA_SWGROUP_NVJPG,
+ .smmu = {
+ .reg = 0x234,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x3e4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x23,
+ },
+ }, {
+ .id = 0x7f,
+ .name = "nvjpgswr",
+ .swgroup = TEGRA_SWGROUP_NVJPG,
+ .smmu = {
+ .reg = 0x234,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x3e4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x80,
+ .name = "sesrd",
+ .swgroup = TEGRA_SWGROUP_SE,
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x3e0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x2e,
+ },
+ }, {
+ .id = 0x81,
+ .name = "seswr",
+ .swgroup = TEGRA_SWGROUP_SE,
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0xb98,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x82,
+ .name = "axiapr",
+ .swgroup = TEGRA_SWGROUP_AXIAP,
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x3a0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ }, {
+ .id = 0x83,
+ .name = "axiapw",
+ .swgroup = TEGRA_SWGROUP_AXIAP,
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x3a0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x84,
+ .name = "etrr",
+ .swgroup = TEGRA_SWGROUP_ETR,
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x3ec,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ }, {
+ .id = 0x85,
+ .name = "etrw",
+ .swgroup = TEGRA_SWGROUP_ETR,
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x3ec,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ }, {
+ .id = 0x86,
+ .name = "tsecsrdb",
+ .swgroup = TEGRA_SWGROUP_TSECB,
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x3f0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x9b,
+ },
+ }, {
+ .id = 0x87,
+ .name = "tsecswrb",
+ .swgroup = TEGRA_SWGROUP_TSECB,
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x3f0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ }, {
+ .id = 0x88,
+ .name = "gpusrd2",
+ .swgroup = TEGRA_SWGROUP_GPU,
+ .smmu = {
+ /* read-only */
+ .reg = 0xb98,
+ .bit = 8,
+ },
+ .la = {
+ .reg = 0x3e8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
+ }, {
+ .id = 0x89,
+ .name = "gpuswr2",
+ .swgroup = TEGRA_SWGROUP_GPU,
+ .smmu = {
+ /* read-only */
+ .reg = 0xb98,
+ .bit = 9,
+ },
+ .la = {
+ .reg = 0x3e8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+};
+
+static const struct tegra_smmu_swgroup tegra210_swgroups[] = {
+ { .name = "dc", .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 },
+ { .name = "dcb", .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 },
+ { .name = "afi", .swgroup = TEGRA_SWGROUP_AFI, .reg = 0x238 },
+ { .name = "avpc", .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c },
+ { .name = "hda", .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 },
+ { .name = "hc", .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 },
+ { .name = "nvenc", .swgroup = TEGRA_SWGROUP_NVENC, .reg = 0x264 },
+ { .name = "ppcs", .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 },
+ { .name = "sata", .swgroup = TEGRA_SWGROUP_SATA, .reg = 0x274 },
+ { .name = "isp2", .swgroup = TEGRA_SWGROUP_ISP2, .reg = 0x258 },
+ { .name = "xusb_host", .swgroup = TEGRA_SWGROUP_XUSB_HOST, .reg = 0x288 },
+ { .name = "xusb_dev", .swgroup = TEGRA_SWGROUP_XUSB_DEV, .reg = 0x28c },
+ { .name = "isp2b", .swgroup = TEGRA_SWGROUP_ISP2B, .reg = 0xaa4 },
+ { .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 },
+ { .name = "a9avp", .swgroup = TEGRA_SWGROUP_A9AVP, .reg = 0x290 },
+ { .name = "gpu", .swgroup = TEGRA_SWGROUP_GPU, .reg = 0xaac },
+ { .name = "sdmmc1a", .swgroup = TEGRA_SWGROUP_SDMMC1A, .reg = 0xa94 },
+ { .name = "sdmmc2a", .swgroup = TEGRA_SWGROUP_SDMMC2A, .reg = 0xa98 },
+ { .name = "sdmmc3a", .swgroup = TEGRA_SWGROUP_SDMMC3A, .reg = 0xa9c },
+ { .name = "sdmmc4a", .swgroup = TEGRA_SWGROUP_SDMMC4A, .reg = 0xaa0 },
+ { .name = "vic", .swgroup = TEGRA_SWGROUP_VIC, .reg = 0x284 },
+ { .name = "vi", .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 },
+ { .name = "nvdec", .swgroup = TEGRA_SWGROUP_NVDEC, .reg = 0xab4 },
+ { .name = "ape", .swgroup = TEGRA_SWGROUP_APE, .reg = 0xab8 },
+ { .name = "nvjpg", .swgroup = TEGRA_SWGROUP_NVJPG, .reg = 0xac0 },
+ { .name = "se", .swgroup = TEGRA_SWGROUP_SE, .reg = 0xabc },
+ { .name = "axiap", .swgroup = TEGRA_SWGROUP_AXIAP, .reg = 0xacc },
+ { .name = "etr", .swgroup = TEGRA_SWGROUP_ETR, .reg = 0xad0 },
+ { .name = "tsecb", .swgroup = TEGRA_SWGROUP_TSECB, .reg = 0xad4 },
+};
+
+static const struct tegra_smmu_soc tegra210_smmu_soc = {
+ .clients = tegra210_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra210_mc_clients),
+ .swgroups = tegra210_swgroups,
+ .num_swgroups = ARRAY_SIZE(tegra210_swgroups),
+ .supports_round_robin_arbitration = true,
+ .supports_request_limit = true,
+ .num_tlb_lines = 32,
+ .num_asids = 128,
+};
+
+const struct tegra_mc_soc tegra210_mc_soc = {
+ .clients = tegra210_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra210_mc_clients),
+ .num_address_bits = 34,
+ .atom_size = 64,
+ .client_id_mask = 0xff,
+ .smmu = &tegra210_smmu_soc,
+};
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
index 1abcd8f6f3ba..b44737840e70 100644
--- a/drivers/memory/tegra/tegra30.c
+++ b/drivers/memory/tegra/tegra30.c
@@ -9,8 +9,6 @@
#include <linux/of.h>
#include <linux/mm.h>
-#include <asm/cacheflush.h>
-
#include <dt-bindings/memory/tegra30-mc.h>
#include "mc.h"
@@ -936,20 +934,6 @@ static const struct tegra_smmu_swgroup tegra30_swgroups[] = {
{ .name = "isp", .swgroup = TEGRA_SWGROUP_ISP, .reg = 0x258 },
};
-static void tegra30_flush_dcache(struct page *page, unsigned long offset,
- size_t size)
-{
- phys_addr_t phys = page_to_phys(page) + offset;
- void *virt = page_address(page) + offset;
-
- __cpuc_flush_dcache_area(virt, size);
- outer_flush_range(phys, phys + size);
-}
-
-static const struct tegra_smmu_ops tegra30_smmu_ops = {
- .flush_dcache = tegra30_flush_dcache,
-};
-
static const struct tegra_smmu_soc tegra30_smmu_soc = {
.clients = tegra30_mc_clients,
.num_clients = ARRAY_SIZE(tegra30_mc_clients),
@@ -957,8 +941,8 @@ static const struct tegra_smmu_soc tegra30_smmu_soc = {
.num_swgroups = ARRAY_SIZE(tegra30_swgroups),
.supports_round_robin_arbitration = false,
.supports_request_limit = false,
+ .num_tlb_lines = 16,
.num_asids = 4,
- .ops = &tegra30_smmu_ops,
};
const struct tegra_mc_soc tegra30_mc_soc = {
@@ -966,5 +950,6 @@ const struct tegra_mc_soc tegra30_mc_soc = {
.num_clients = ARRAY_SIZE(tegra30_mc_clients),
.num_address_bits = 32,
.atom_size = 16,
+ .client_id_mask = 0x7f,
.smmu = &tegra30_smmu_soc,
};
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 70bb7530b22c..fc7393729081 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1859,6 +1859,15 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ /* Basic sanity checks to prevent underflows or integer overflows */
+ if (karg.maxReplyBytes < 0 ||
+ karg.dataInSize < 0 ||
+ karg.dataOutSize < 0 ||
+ karg.dataSgeOffset < 0 ||
+ karg.maxSenseBytes < 0 ||
+ karg.dataSgeOffset > ioc->req_sz / 4)
+ return -EINVAL;
+
/* Verify that the final request frame will not be too large.
*/
sz = karg.dataSgeOffset * 4;
diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
index 841717a2842c..f2d9fb4c4e8e 100644
--- a/drivers/mfd/88pm800.c
+++ b/drivers/mfd/88pm800.c
@@ -609,7 +609,6 @@ static int pm800_remove(struct i2c_client *client)
static struct i2c_driver pm800_driver = {
.driver = {
.name = "88PM800",
- .owner = THIS_MODULE,
.pm = &pm80x_pm_ops,
},
.probe = pm800_probe,
diff --git a/drivers/mfd/88pm805.c b/drivers/mfd/88pm805.c
index e9d50644660c..39f2302e137b 100644
--- a/drivers/mfd/88pm805.c
+++ b/drivers/mfd/88pm805.c
@@ -267,7 +267,6 @@ static int pm805_remove(struct i2c_client *client)
static struct i2c_driver pm805_driver = {
.driver = {
.name = "88PM805",
- .owner = THIS_MODULE,
.pm = &pm80x_pm_ops,
},
.probe = pm805_probe,
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index e03b7f45b8f7..3269a9990b24 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -558,11 +558,7 @@ static int pm860x_irq_domain_map(struct irq_domain *d, unsigned int virq,
irq_set_chip_data(virq, d->host_data);
irq_set_chip_and_handler(virq, &pm860x_irq_chip, handle_edge_irq);
irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(virq, IRQF_VALID);
-#else
irq_set_noprobe(virq);
-#endif
return 0;
}
@@ -1258,7 +1254,6 @@ MODULE_DEVICE_TABLE(of, pm860x_dt_ids);
static struct i2c_driver pm860x_driver = {
.driver = {
.name = "88PM860x",
- .owner = THIS_MODULE,
.pm = &pm860x_pm_ops,
.of_match_table = pm860x_dt_ids,
},
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 653815950aa2..99d63675f073 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -97,6 +97,7 @@ config MFD_CROS_EC
select MFD_CORE
select CHROME_PLATFORMS
select CROS_EC_PROTO
+ depends on X86 || ARM || COMPILE_TEST
help
If you say Y here you get support for the ChromeOS Embedded
Controller (EC) providing keyboard, battery and power services.
@@ -105,7 +106,7 @@ config MFD_CROS_EC
config MFD_CROS_EC_I2C
tristate "ChromeOS Embedded Controller (I2C)"
- depends on MFD_CROS_EC && CROS_EC_PROTO && I2C
+ depends on MFD_CROS_EC && I2C
help
If you say Y here, you get support for talking to the ChromeOS
@@ -115,7 +116,7 @@ config MFD_CROS_EC_I2C
config MFD_CROS_EC_SPI
tristate "ChromeOS Embedded Controller (SPI)"
- depends on MFD_CROS_EC && CROS_EC_PROTO && SPI && OF
+ depends on MFD_CROS_EC && SPI
---help---
If you say Y here, you get support for talking to the ChromeOS EC
@@ -186,6 +187,18 @@ config MFD_DA9055
This driver can be built as a module. If built as a module it will be
called "da9055"
+config MFD_DA9062
+ tristate "Dialog Semiconductor DA9062 PMIC Support"
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ depends on I2C=y
+ help
+ Say yes here for support for the Dialog Semiconductor DA9062 PMIC.
+ This includes the I2C driver and core APIs.
+ Additional drivers must be enabled in order to use the functionality
+ of the device.
+
config MFD_DA9063
bool "Dialog Semiconductor DA9063 PMIC Support"
select MFD_CORE
@@ -318,6 +331,7 @@ config LPC_SCH
config INTEL_SOC_PMIC
bool "Support for Intel Atom SoC PMIC"
+ depends on GPIOLIB
depends on I2C=y
select MFD_CORE
select REGMAP_I2C
@@ -328,6 +342,29 @@ config INTEL_SOC_PMIC
thermal, charger and related power management functions
on these systems.
+config MFD_INTEL_LPSS
+ tristate
+ select COMMON_CLK
+ select MFD_CORE
+
+config MFD_INTEL_LPSS_ACPI
+ tristate "Intel Low Power Subsystem support in ACPI mode"
+ select MFD_INTEL_LPSS
+ depends on X86 && ACPI
+ help
+ This driver supports Intel Low Power Subsystem (LPSS) devices such as
+ I2C, SPI and HS-UART starting from Intel Sunrisepoint (Intel Skylake
+ PCH) in ACPI mode.
+
+config MFD_INTEL_LPSS_PCI
+ tristate "Intel Low Power Subsystem support in PCI mode"
+ select MFD_INTEL_LPSS
+ depends on X86 && PCI
+ help
+ This driver supports Intel Low Power Subsystem (LPSS) devices such as
+ I2C, SPI and HS-UART starting from Intel Sunrisepoint (Intel Skylake
+ PCH) in PCI mode.
+
config MFD_INTEL_MSIC
bool "Intel MSIC"
depends on INTEL_SCU_IPC
@@ -375,12 +412,14 @@ config MFD_KEMPLD
device may provide functions like watchdog, GPIO, UART and I2C bus.
The following modules are supported:
+ * COMe-bBL6
* COMe-bHL6
* COMe-bIP#
* COMe-bPC2 (ETXexpress-PC)
* COMe-bSC# (ETXexpress-SC T#)
* COMe-cBL6
* COMe-cBT6
+ * COMe-cBW6
* COMe-cCT6
* COMe-cDC2 (microETXexpress-DC)
* COMe-cHL6
@@ -1356,6 +1395,12 @@ config MFD_WM8997
help
Support for Wolfson Microelectronics WM8997 low power audio SoC
+config MFD_WM8998
+ bool "Wolfson Microelectronics WM8998"
+ depends on MFD_ARIZONA
+ help
+ Support for Wolfson Microelectronics WM8998 low power audio SoC
+
config MFD_WM8400
bool "Wolfson Microelectronics WM8400"
select MFD_CORE
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index ea40e076cb61..a59e3fcc8626 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -48,6 +48,9 @@ endif
ifeq ($(CONFIG_MFD_WM8997),y)
obj-$(CONFIG_MFD_ARIZONA) += wm8997-tables.o
endif
+ifeq ($(CONFIG_MFD_WM8998),y)
+obj-$(CONFIG_MFD_ARIZONA) += wm8998-tables.o
+endif
obj-$(CONFIG_MFD_WM8400) += wm8400-core.o
wm831x-objs := wm831x-core.o wm831x-irq.o wm831x-otp.o
wm831x-objs += wm831x-auxadc.o
@@ -110,10 +113,11 @@ obj-$(CONFIG_MFD_LP8788) += lp8788.o lp8788-irq.o
da9055-objs := da9055-core.o da9055-i2c.o
obj-$(CONFIG_MFD_DA9055) += da9055.o
-
+obj-$(CONFIG_MFD_DA9062) += da9062-core.o
da9063-objs := da9063-core.o da9063-irq.o da9063-i2c.o
obj-$(CONFIG_MFD_DA9063) += da9063.o
obj-$(CONFIG_MFD_DA9150) += da9150-core.o
+
obj-$(CONFIG_MFD_MAX14577) += max14577.o
obj-$(CONFIG_MFD_MAX77686) += max77686.o
obj-$(CONFIG_MFD_MAX77693) += max77693.o
@@ -161,6 +165,9 @@ obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
obj-$(CONFIG_MFD_TPS65090) += tps65090.o
obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
obj-$(CONFIG_MFD_ATMEL_HLCDC) += atmel-hlcdc.o
+obj-$(CONFIG_MFD_INTEL_LPSS) += intel-lpss.o
+obj-$(CONFIG_MFD_INTEL_LPSS_PCI) += intel-lpss-pci.o
+obj-$(CONFIG_MFD_INTEL_LPSS_ACPI) += intel-lpss-acpi.o
obj-$(CONFIG_MFD_INTEL_MSIC) += intel_msic.o
obj-$(CONFIG_MFD_PALMAS) += palmas.o
obj-$(CONFIG_MFD_VIPERBOARD) += viperboard.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index 4e6e03d63e12..29b6a2d4ac72 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -500,7 +500,6 @@ MODULE_DEVICE_TABLE(i2c, aat2870_i2c_id_table);
static struct i2c_driver aat2870_i2c_driver = {
.driver = {
.name = "aat2870",
- .owner = THIS_MODULE,
.pm = &aat2870_pm_ops,
},
.probe = aat2870_i2c_probe,
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 4659ac1db039..f0afb44271f8 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -972,7 +972,6 @@ MODULE_DEVICE_TABLE(i2c, ab3100_id);
static struct i2c_driver ab3100_driver = {
.driver = {
.name = "ab3100",
- .owner = THIS_MODULE,
},
.id_table = ab3100_id,
.probe = ab3100_probe,
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 000da72a0ae9..fefbe4cfa61d 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -565,11 +565,7 @@ static int ab8500_irq_map(struct irq_domain *d, unsigned int virq,
irq_set_chip_and_handler(virq, &ab8500_irq_chip,
handle_simple_irq);
irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(virq, IRQF_VALID);
-#else
irq_set_noprobe(virq);
-#endif
return 0;
}
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index f495b8b57dd7..ae88654595dc 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -351,7 +351,6 @@ MODULE_DEVICE_TABLE(i2c, adp5520_id);
static struct i2c_driver adp5520_driver = {
.driver = {
.name = "adp5520",
- .owner = THIS_MODULE,
.pm = &adp5520_pm,
},
.probe = adp5520_probe,
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index bebf58a06a6b..44cfdbb295db 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -30,7 +30,7 @@
#include "arizona.h"
-static const char *wm5102_core_supplies[] = {
+static const char * const wm5102_core_supplies[] = {
"AVDD",
"DBVDD1",
};
@@ -146,17 +146,31 @@ static irqreturn_t arizona_underclocked(int irq, void *data)
static irqreturn_t arizona_overclocked(int irq, void *data)
{
struct arizona *arizona = data;
- unsigned int val[2];
+ unsigned int val[3];
int ret;
-
+
ret = regmap_bulk_read(arizona->regmap, ARIZONA_INTERRUPT_RAW_STATUS_6,
- &val[0], 2);
+ &val[0], 3);
if (ret != 0) {
dev_err(arizona->dev, "Failed to read overclock status: %d\n",
ret);
return IRQ_NONE;
}
+ switch (arizona->type) {
+ case WM8998:
+ case WM1814:
+ /* Some bits are shifted on WM8998,
+ * rearrange to match the standard bit layout
+ */
+ val[0] = ((val[0] & 0x60e0) >> 1) |
+ ((val[0] & 0x1e00) >> 2) |
+ (val[0] & 0x000f);
+ break;
+ default:
+ break;
+ }
+
if (val[0] & ARIZONA_PWM_OVERCLOCKED_STS)
dev_err(arizona->dev, "PWM overclocked\n");
if (val[0] & ARIZONA_FX_CORE_OVERCLOCKED_STS)
@@ -201,6 +215,9 @@ static irqreturn_t arizona_overclocked(int irq, void *data)
if (val[1] & ARIZONA_ISRC1_OVERCLOCKED_STS)
dev_err(arizona->dev, "ISRC1 overclocked\n");
+ if (val[2] & ARIZONA_SPDIF_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "SPDIF overclocked\n");
+
return IRQ_HANDLED;
}
@@ -392,7 +409,7 @@ err:
* Register patch to some of the CODECs internal write sequences
* to ensure a clean exit from the low power sleep state.
*/
-static const struct reg_default wm5110_sleep_patch[] = {
+static const struct reg_sequence wm5110_sleep_patch[] = {
{ 0x337A, 0xC100 },
{ 0x337B, 0x0041 },
{ 0x3300, 0xA210 },
@@ -550,9 +567,8 @@ static int arizona_runtime_resume(struct device *dev)
break;
default:
ret = arizona_wait_for_boot(arizona);
- if (ret != 0) {
+ if (ret != 0)
goto err;
- }
if (arizona->external_dcvdd) {
ret = regmap_update_bits(arizona->regmap,
@@ -651,7 +667,7 @@ static int arizona_runtime_suspend(struct device *dev)
arizona->has_fully_powered_off = true;
- disable_irq(arizona->irq);
+ disable_irq_nosync(arizona->irq);
arizona_enable_reset(arizona);
regulator_bulk_disable(arizona->num_core_supplies,
arizona->core_supplies);
@@ -759,8 +775,8 @@ static int arizona_of_get_core_pdata(struct arizona *arizona)
ret = of_property_read_u32_array(arizona->dev->of_node,
"wlf,gpio-defaults",
- arizona->pdata.gpio_defaults,
- ARRAY_SIZE(arizona->pdata.gpio_defaults));
+ pdata->gpio_defaults,
+ ARRAY_SIZE(pdata->gpio_defaults));
if (ret >= 0) {
/*
* All values are literal except out of range values
@@ -768,11 +784,11 @@ static int arizona_of_get_core_pdata(struct arizona *arizona)
* data which uses 0 as chip default and out of range
* as zero.
*/
- for (i = 0; i < ARRAY_SIZE(arizona->pdata.gpio_defaults); i++) {
- if (arizona->pdata.gpio_defaults[i] > 0xffff)
- arizona->pdata.gpio_defaults[i] = 0;
- else if (arizona->pdata.gpio_defaults[i] == 0)
- arizona->pdata.gpio_defaults[i] = 0x10000;
+ for (i = 0; i < ARRAY_SIZE(pdata->gpio_defaults); i++) {
+ if (pdata->gpio_defaults[i] > 0xffff)
+ pdata->gpio_defaults[i] = 0;
+ else if (pdata->gpio_defaults[i] == 0)
+ pdata->gpio_defaults[i] = 0x10000;
}
} else {
dev_err(arizona->dev, "Failed to parse GPIO defaults: %d\n",
@@ -781,20 +797,20 @@ static int arizona_of_get_core_pdata(struct arizona *arizona)
of_property_for_each_u32(arizona->dev->of_node, "wlf,inmode", prop,
cur, val) {
- if (count == ARRAY_SIZE(arizona->pdata.inmode))
+ if (count == ARRAY_SIZE(pdata->inmode))
break;
- arizona->pdata.inmode[count] = val;
+ pdata->inmode[count] = val;
count++;
}
count = 0;
of_property_for_each_u32(arizona->dev->of_node, "wlf,dmic-ref", prop,
cur, val) {
- if (count == ARRAY_SIZE(arizona->pdata.dmic_ref))
+ if (count == ARRAY_SIZE(pdata->dmic_ref))
break;
- arizona->pdata.dmic_ref[count] = val;
+ pdata->dmic_ref[count] = val;
count++;
}
@@ -806,6 +822,8 @@ const struct of_device_id arizona_of_match[] = {
{ .compatible = "wlf,wm5110", .data = (void *)WM5110 },
{ .compatible = "wlf,wm8280", .data = (void *)WM8280 },
{ .compatible = "wlf,wm8997", .data = (void *)WM8997 },
+ { .compatible = "wlf,wm8998", .data = (void *)WM8998 },
+ { .compatible = "wlf,wm1814", .data = (void *)WM1814 },
{},
};
EXPORT_SYMBOL_GPL(arizona_of_match);
@@ -820,7 +838,7 @@ static const struct mfd_cell early_devs[] = {
{ .name = "arizona-ldo1" },
};
-static const char *wm5102_supplies[] = {
+static const char * const wm5102_supplies[] = {
"MICVDD",
"DBVDD2",
"DBVDD3",
@@ -863,7 +881,7 @@ static const struct mfd_cell wm5110_devs[] = {
},
};
-static const char *wm8997_supplies[] = {
+static const char * const wm8997_supplies[] = {
"MICVDD",
"DBVDD2",
"CPVDD",
@@ -887,11 +905,28 @@ static const struct mfd_cell wm8997_devs[] = {
},
};
+static const struct mfd_cell wm8998_devs[] = {
+ {
+ .name = "arizona-extcon",
+ .parent_supplies = wm5102_supplies,
+ .num_parent_supplies = 1, /* We only need MICVDD */
+ },
+ { .name = "arizona-gpio" },
+ { .name = "arizona-haptics" },
+ { .name = "arizona-pwm" },
+ {
+ .name = "wm8998-codec",
+ .parent_supplies = wm5102_supplies,
+ .num_parent_supplies = ARRAY_SIZE(wm5102_supplies),
+ },
+ { .name = "arizona-micsupp" },
+};
+
int arizona_dev_init(struct arizona *arizona)
{
struct device *dev = arizona->dev;
const char *type_name;
- unsigned int reg, val;
+ unsigned int reg, val, mask;
int (*apply_patch)(struct arizona *) = NULL;
int ret, i;
@@ -911,6 +946,8 @@ int arizona_dev_init(struct arizona *arizona)
case WM5110:
case WM8280:
case WM8997:
+ case WM8998:
+ case WM1814:
for (i = 0; i < ARRAY_SIZE(wm5102_core_supplies); i++)
arizona->core_supplies[i].supply
= wm5102_core_supplies[i];
@@ -992,6 +1029,7 @@ int arizona_dev_init(struct arizona *arizona)
switch (reg) {
case 0x5102:
case 0x5110:
+ case 0x6349:
case 0x8997:
break;
default:
@@ -1093,6 +1131,27 @@ int arizona_dev_init(struct arizona *arizona)
apply_patch = wm8997_patch;
break;
#endif
+#ifdef CONFIG_MFD_WM8998
+ case 0x6349:
+ switch (arizona->type) {
+ case WM8998:
+ type_name = "WM8998";
+ break;
+
+ case WM1814:
+ type_name = "WM1814";
+ break;
+
+ default:
+ type_name = "WM8998";
+ dev_err(arizona->dev, "WM8998 registered as %d\n",
+ arizona->type);
+ arizona->type = WM8998;
+ }
+
+ apply_patch = wm8998_patch;
+ break;
+#endif
default:
dev_err(arizona->dev, "Unknown device ID %x\n", reg);
goto err_reset;
@@ -1141,10 +1200,6 @@ int arizona_dev_init(struct arizona *arizona)
arizona->pdata.gpio_defaults[i]);
}
- pm_runtime_set_autosuspend_delay(arizona->dev, 100);
- pm_runtime_use_autosuspend(arizona->dev);
- pm_runtime_enable(arizona->dev);
-
/* Chip default */
if (!arizona->pdata.clk32k_src)
arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2;
@@ -1208,14 +1263,38 @@ int arizona_dev_init(struct arizona *arizona)
<< ARIZONA_IN1_DMIC_SUP_SHIFT;
if (arizona->pdata.inmode[i] & ARIZONA_INMODE_DMIC)
val |= 1 << ARIZONA_IN1_MODE_SHIFT;
- if (arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
- val |= 1 << ARIZONA_IN1_SINGLE_ENDED_SHIFT;
+
+ switch (arizona->type) {
+ case WM8998:
+ case WM1814:
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ADC_DIGITAL_VOLUME_1L + (i * 8),
+ ARIZONA_IN1L_SRC_SE_MASK,
+ (arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
+ << ARIZONA_IN1L_SRC_SE_SHIFT);
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ADC_DIGITAL_VOLUME_1R + (i * 8),
+ ARIZONA_IN1R_SRC_SE_MASK,
+ (arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
+ << ARIZONA_IN1R_SRC_SE_SHIFT);
+
+ mask = ARIZONA_IN1_DMIC_SUP_MASK |
+ ARIZONA_IN1_MODE_MASK;
+ break;
+ default:
+ if (arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
+ val |= 1 << ARIZONA_IN1_SINGLE_ENDED_SHIFT;
+
+ mask = ARIZONA_IN1_DMIC_SUP_MASK |
+ ARIZONA_IN1_MODE_MASK |
+ ARIZONA_IN1_SINGLE_ENDED_MASK;
+ break;
+ }
regmap_update_bits(arizona->regmap,
ARIZONA_IN1L_CONTROL + (i * 8),
- ARIZONA_IN1_DMIC_SUP_MASK |
- ARIZONA_IN1_MODE_MASK |
- ARIZONA_IN1_SINGLE_ENDED_MASK, val);
+ mask, val);
}
for (i = 0; i < ARIZONA_MAX_OUTPUT; i++) {
@@ -1245,11 +1324,17 @@ int arizona_dev_init(struct arizona *arizona)
arizona->pdata.spk_fmt[i]);
}
+ pm_runtime_set_active(arizona->dev);
+ pm_runtime_enable(arizona->dev);
+
/* Set up for interrupts */
ret = arizona_irq_init(arizona);
if (ret != 0)
goto err_reset;
+ pm_runtime_set_autosuspend_delay(arizona->dev, 100);
+ pm_runtime_use_autosuspend(arizona->dev);
+
arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error",
arizona_clkgen_err, arizona);
arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked",
@@ -1271,6 +1356,11 @@ int arizona_dev_init(struct arizona *arizona)
ret = mfd_add_devices(arizona->dev, -1, wm8997_devs,
ARRAY_SIZE(wm8997_devs), NULL, 0, NULL);
break;
+ case WM8998:
+ case WM1814:
+ ret = mfd_add_devices(arizona->dev, -1, wm8998_devs,
+ ARRAY_SIZE(wm8998_devs), NULL, 0, NULL);
+ break;
}
if (ret != 0) {
@@ -1278,10 +1368,6 @@ int arizona_dev_init(struct arizona *arizona)
goto err_irq;
}
-#ifdef CONFIG_PM
- regulator_disable(arizona->dcvdd);
-#endif
-
return 0;
err_irq:
diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c
index ff782a5de235..cea1b409fa27 100644
--- a/drivers/mfd/arizona-i2c.c
+++ b/drivers/mfd/arizona-i2c.c
@@ -53,6 +53,12 @@ static int arizona_i2c_probe(struct i2c_client *i2c,
regmap_config = &wm8997_i2c_regmap;
break;
#endif
+#ifdef CONFIG_MFD_WM8998
+ case WM8998:
+ case WM1814:
+ regmap_config = &wm8998_i2c_regmap;
+ break;
+#endif
default:
dev_err(&i2c->dev, "Unknown device type %ld\n",
id->driver_data);
@@ -90,6 +96,8 @@ static const struct i2c_device_id arizona_i2c_id[] = {
{ "wm5110", WM5110 },
{ "wm8280", WM8280 },
{ "wm8997", WM8997 },
+ { "wm8998", WM8998 },
+ { "wm1814", WM1814 },
{ }
};
MODULE_DEVICE_TABLE(i2c, arizona_i2c_id);
@@ -97,7 +105,6 @@ MODULE_DEVICE_TABLE(i2c, arizona_i2c_id);
static struct i2c_driver arizona_i2c_driver = {
.driver = {
.name = "arizona",
- .owner = THIS_MODULE,
.pm = &arizona_pm_ops,
.of_match_table = of_match_ptr(arizona_of_match),
},
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index 2b9965d53e4e..2cac4f463f1e 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -174,14 +174,7 @@ static int arizona_irq_map(struct irq_domain *h, unsigned int virq,
irq_set_chip_data(virq, data);
irq_set_chip_and_handler(virq, &arizona_irq_chip, handle_simple_irq);
irq_set_nested_thread(virq, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(virq, IRQF_VALID);
-#else
irq_set_noprobe(virq);
-#endif
return 0;
}
@@ -234,6 +227,15 @@ int arizona_irq_init(struct arizona *arizona)
arizona->ctrlif_error = false;
break;
#endif
+#ifdef CONFIG_MFD_WM8998
+ case WM8998:
+ case WM1814:
+ aod = &wm8998_aod;
+ irq = &wm8998_irq;
+
+ arizona->ctrlif_error = false;
+ break;
+#endif
default:
BUG_ON("Unknown Arizona class device" == NULL);
return -EINVAL;
diff --git a/drivers/mfd/arizona.h b/drivers/mfd/arizona.h
index fbe2843271c5..3af12e938f57 100644
--- a/drivers/mfd/arizona.h
+++ b/drivers/mfd/arizona.h
@@ -27,6 +27,8 @@ extern const struct regmap_config wm5110_spi_regmap;
extern const struct regmap_config wm8997_i2c_regmap;
+extern const struct regmap_config wm8998_i2c_regmap;
+
extern const struct dev_pm_ops arizona_pm_ops;
extern const struct of_device_id arizona_of_match[];
@@ -41,6 +43,9 @@ extern const struct regmap_irq_chip wm5110_revd_irq;
extern const struct regmap_irq_chip wm8997_aod;
extern const struct regmap_irq_chip wm8997_irq;
+extern struct regmap_irq_chip wm8998_aod;
+extern struct regmap_irq_chip wm8998_irq;
+
int arizona_dev_init(struct arizona *arizona);
int arizona_dev_exit(struct arizona *arizona);
int arizona_irq_init(struct arizona *arizona);
diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c
index d9706ede8d39..d001f7e238f5 100644
--- a/drivers/mfd/as3711.c
+++ b/drivers/mfd/as3711.c
@@ -211,7 +211,6 @@ MODULE_DEVICE_TABLE(i2c, as3711_i2c_id);
static struct i2c_driver as3711_i2c_driver = {
.driver = {
.name = "as3711",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(as3711_of_match),
},
.probe = as3711_i2c_probe,
diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c
index 39fa554f13bb..924ea90494ae 100644
--- a/drivers/mfd/as3722.c
+++ b/drivers/mfd/as3722.c
@@ -437,7 +437,6 @@ MODULE_DEVICE_TABLE(i2c, as3722_i2c_id);
static struct i2c_driver as3722_i2c_driver = {
.driver = {
.name = "as3722",
- .owner = THIS_MODULE,
.of_match_table = as3722_of_match,
},
.probe = as3722_i2c_probe,
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 120df5c08741..4b54128bc78e 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -411,7 +411,7 @@ static int __init asic3_irq_probe(struct platform_device *pdev)
irq_set_chip_data(irq, asic);
irq_set_handler(irq, handle_level_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
}
asic3_write_register(asic, ASIC3_OFFSET(INTR, INT_MASK),
@@ -431,7 +431,7 @@ static void asic3_irq_remove(struct platform_device *pdev)
irq_base = asic->irq_base;
for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) {
- set_irq_flags(irq, 0);
+ irq_set_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
}
diff --git a/drivers/mfd/atmel-hlcdc.c b/drivers/mfd/atmel-hlcdc.c
index cfd58f4cc5c3..3fff6b5d0426 100644
--- a/drivers/mfd/atmel-hlcdc.c
+++ b/drivers/mfd/atmel-hlcdc.c
@@ -18,6 +18,7 @@
*/
#include <linux/clk.h>
+#include <linux/iopoll.h>
#include <linux/mfd/atmel-hlcdc.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
@@ -26,6 +27,10 @@
#define ATMEL_HLCDC_REG_MAX (0x4000 - 0x4)
+struct atmel_hlcdc_regmap {
+ void __iomem *regs;
+};
+
static const struct mfd_cell atmel_hlcdc_cells[] = {
{
.name = "atmel-hlcdc-pwm",
@@ -37,28 +42,62 @@ static const struct mfd_cell atmel_hlcdc_cells[] = {
},
};
+static int regmap_atmel_hlcdc_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct atmel_hlcdc_regmap *hregmap = context;
+
+ if (reg <= ATMEL_HLCDC_DIS) {
+ u32 status;
+
+ readl_poll_timeout(hregmap->regs + ATMEL_HLCDC_SR, status,
+ !(status & ATMEL_HLCDC_SIP), 1, 100);
+ }
+
+ writel(val, hregmap->regs + reg);
+
+ return 0;
+}
+
+static int regmap_atmel_hlcdc_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct atmel_hlcdc_regmap *hregmap = context;
+
+ *val = readl(hregmap->regs + reg);
+
+ return 0;
+}
+
static const struct regmap_config atmel_hlcdc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = ATMEL_HLCDC_REG_MAX,
+ .reg_write = regmap_atmel_hlcdc_reg_write,
+ .reg_read = regmap_atmel_hlcdc_reg_read,
+ .fast_io = true,
};
static int atmel_hlcdc_probe(struct platform_device *pdev)
{
+ struct atmel_hlcdc_regmap *hregmap;
struct device *dev = &pdev->dev;
struct atmel_hlcdc *hlcdc;
struct resource *res;
- void __iomem *regs;
+
+ hregmap = devm_kzalloc(dev, sizeof(*hregmap), GFP_KERNEL);
+ if (!hregmap)
+ return -ENOMEM;
hlcdc = devm_kzalloc(dev, sizeof(*hlcdc), GFP_KERNEL);
if (!hlcdc)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(regs))
- return PTR_ERR(regs);
+ hregmap->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hregmap->regs))
+ return PTR_ERR(hregmap->regs);
hlcdc->irq = platform_get_irq(pdev, 0);
if (hlcdc->irq < 0)
@@ -82,8 +121,8 @@ static int atmel_hlcdc_probe(struct platform_device *pdev)
return PTR_ERR(hlcdc->slow_clk);
}
- hlcdc->regmap = devm_regmap_init_mmio(dev, regs,
- &atmel_hlcdc_regmap_config);
+ hlcdc->regmap = devm_regmap_init(dev, NULL, hregmap,
+ &atmel_hlcdc_regmap_config);
if (IS_ERR(hlcdc->regmap))
return PTR_ERR(hlcdc->regmap);
@@ -102,7 +141,11 @@ static int atmel_hlcdc_remove(struct platform_device *pdev)
}
static const struct of_device_id atmel_hlcdc_match[] = {
+ { .compatible = "atmel,at91sam9n12-hlcdc" },
+ { .compatible = "atmel,at91sam9x5-hlcdc" },
+ { .compatible = "atmel,sama5d2-hlcdc" },
{ .compatible = "atmel,sama5d3-hlcdc" },
+ { .compatible = "atmel,sama5d4-hlcdc" },
{ /* sentinel */ },
};
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 6df91556faf3..3f576b76c322 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -30,19 +30,47 @@
#define AXP20X_OFF 0x80
static const char * const axp20x_model_names[] = {
+ "AXP152",
"AXP202",
"AXP209",
"AXP221",
"AXP288",
};
+static const struct regmap_range axp152_writeable_ranges[] = {
+ regmap_reg_range(AXP152_LDO3456_DC1234_CTRL, AXP152_IRQ3_STATE),
+ regmap_reg_range(AXP152_DCDC_MODE, AXP152_PWM1_DUTY_CYCLE),
+};
+
+static const struct regmap_range axp152_volatile_ranges[] = {
+ regmap_reg_range(AXP152_PWR_OP_MODE, AXP152_PWR_OP_MODE),
+ regmap_reg_range(AXP152_IRQ1_EN, AXP152_IRQ3_STATE),
+ regmap_reg_range(AXP152_GPIO_INPUT, AXP152_GPIO_INPUT),
+};
+
+static const struct regmap_access_table axp152_writeable_table = {
+ .yes_ranges = axp152_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(axp152_writeable_ranges),
+};
+
+static const struct regmap_access_table axp152_volatile_table = {
+ .yes_ranges = axp152_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(axp152_volatile_ranges),
+};
+
static const struct regmap_range axp20x_writeable_ranges[] = {
regmap_reg_range(AXP20X_DATACACHE(0), AXP20X_IRQ5_STATE),
regmap_reg_range(AXP20X_DCDC_MODE, AXP20X_FG_RES),
+ regmap_reg_range(AXP20X_RDC_H, AXP20X_OCV(AXP20X_OCV_MAX)),
};
static const struct regmap_range axp20x_volatile_ranges[] = {
+ regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP20X_USB_OTG_STATUS),
+ regmap_reg_range(AXP20X_CHRG_CTRL1, AXP20X_CHRG_CTRL2),
regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IRQ5_STATE),
+ regmap_reg_range(AXP20X_ACIN_V_ADC_H, AXP20X_IPSOUT_V_HIGH_L),
+ regmap_reg_range(AXP20X_GPIO20_SS, AXP20X_GPIO3_CTRL),
+ regmap_reg_range(AXP20X_FG_RES, AXP20X_RDC_L),
};
static const struct regmap_access_table axp20x_writeable_table = {
@@ -93,6 +121,11 @@ static const struct regmap_access_table axp288_volatile_table = {
.n_yes_ranges = ARRAY_SIZE(axp288_volatile_ranges),
};
+static struct resource axp152_pek_resources[] = {
+ DEFINE_RES_IRQ_NAMED(AXP152_IRQ_PEK_RIS_EDGE, "PEK_DBR"),
+ DEFINE_RES_IRQ_NAMED(AXP152_IRQ_PEK_FAL_EDGE, "PEK_DBF"),
+};
+
static struct resource axp20x_pek_resources[] = {
{
.name = "PEK_DBR",
@@ -107,6 +140,13 @@ static struct resource axp20x_pek_resources[] = {
},
};
+static struct resource axp20x_usb_power_supply_resources[] = {
+ DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_VBUS_PLUGIN, "VBUS_PLUGIN"),
+ DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_VBUS_REMOVAL, "VBUS_REMOVAL"),
+ DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_VBUS_VALID, "VBUS_VALID"),
+ DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_VBUS_NOT_VALID, "VBUS_NOT_VALID"),
+};
+
static struct resource axp22x_pek_resources[] = {
{
.name = "PEK_DBR",
@@ -154,12 +194,21 @@ static struct resource axp288_fuel_gauge_resources[] = {
},
};
+static const struct regmap_config axp152_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .wr_table = &axp152_writeable_table,
+ .volatile_table = &axp152_volatile_table,
+ .max_register = AXP152_PWM1_DUTY_CYCLE,
+ .cache_type = REGCACHE_RBTREE,
+};
+
static const struct regmap_config axp20x_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.wr_table = &axp20x_writeable_table,
.volatile_table = &axp20x_volatile_table,
- .max_register = AXP20X_FG_RES,
+ .max_register = AXP20X_OCV(AXP20X_OCV_MAX),
.cache_type = REGCACHE_RBTREE,
};
@@ -184,6 +233,26 @@ static const struct regmap_config axp288_regmap_config = {
#define INIT_REGMAP_IRQ(_variant, _irq, _off, _mask) \
[_variant##_IRQ_##_irq] = { .reg_offset = (_off), .mask = BIT(_mask) }
+static const struct regmap_irq axp152_regmap_irqs[] = {
+ INIT_REGMAP_IRQ(AXP152, LDO0IN_CONNECT, 0, 6),
+ INIT_REGMAP_IRQ(AXP152, LDO0IN_REMOVAL, 0, 5),
+ INIT_REGMAP_IRQ(AXP152, ALDO0IN_CONNECT, 0, 3),
+ INIT_REGMAP_IRQ(AXP152, ALDO0IN_REMOVAL, 0, 2),
+ INIT_REGMAP_IRQ(AXP152, DCDC1_V_LOW, 1, 5),
+ INIT_REGMAP_IRQ(AXP152, DCDC2_V_LOW, 1, 4),
+ INIT_REGMAP_IRQ(AXP152, DCDC3_V_LOW, 1, 3),
+ INIT_REGMAP_IRQ(AXP152, DCDC4_V_LOW, 1, 2),
+ INIT_REGMAP_IRQ(AXP152, PEK_SHORT, 1, 1),
+ INIT_REGMAP_IRQ(AXP152, PEK_LONG, 1, 0),
+ INIT_REGMAP_IRQ(AXP152, TIMER, 2, 7),
+ INIT_REGMAP_IRQ(AXP152, PEK_RIS_EDGE, 2, 6),
+ INIT_REGMAP_IRQ(AXP152, PEK_FAL_EDGE, 2, 5),
+ INIT_REGMAP_IRQ(AXP152, GPIO3_INPUT, 2, 3),
+ INIT_REGMAP_IRQ(AXP152, GPIO2_INPUT, 2, 2),
+ INIT_REGMAP_IRQ(AXP152, GPIO1_INPUT, 2, 1),
+ INIT_REGMAP_IRQ(AXP152, GPIO0_INPUT, 2, 0),
+};
+
static const struct regmap_irq axp20x_regmap_irqs[] = {
INIT_REGMAP_IRQ(AXP20X, ACIN_OVER_V, 0, 7),
INIT_REGMAP_IRQ(AXP20X, ACIN_PLUGIN, 0, 6),
@@ -293,6 +362,7 @@ static const struct regmap_irq axp288_regmap_irqs[] = {
};
static const struct of_device_id axp20x_of_match[] = {
+ { .compatible = "x-powers,axp152", .data = (void *) AXP152_ID },
{ .compatible = "x-powers,axp202", .data = (void *) AXP202_ID },
{ .compatible = "x-powers,axp209", .data = (void *) AXP209_ID },
{ .compatible = "x-powers,axp221", .data = (void *) AXP221_ID },
@@ -317,6 +387,18 @@ static const struct acpi_device_id axp20x_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, axp20x_acpi_match);
+static const struct regmap_irq_chip axp152_regmap_irq_chip = {
+ .name = "axp152_irq_chip",
+ .status_base = AXP152_IRQ1_STATE,
+ .ack_base = AXP152_IRQ1_STATE,
+ .mask_base = AXP152_IRQ1_EN,
+ .mask_invert = true,
+ .init_ack_masked = true,
+ .irqs = axp152_regmap_irqs,
+ .num_irqs = ARRAY_SIZE(axp152_regmap_irqs),
+ .num_regs = 3,
+};
+
static const struct regmap_irq_chip axp20x_regmap_irq_chip = {
.name = "axp20x_irq_chip",
.status_base = AXP20X_IRQ1_STATE,
@@ -357,11 +439,16 @@ static const struct regmap_irq_chip axp288_regmap_irq_chip = {
static struct mfd_cell axp20x_cells[] = {
{
- .name = "axp20x-pek",
- .num_resources = ARRAY_SIZE(axp20x_pek_resources),
- .resources = axp20x_pek_resources,
+ .name = "axp20x-pek",
+ .num_resources = ARRAY_SIZE(axp20x_pek_resources),
+ .resources = axp20x_pek_resources,
}, {
- .name = "axp20x-regulator",
+ .name = "axp20x-regulator",
+ }, {
+ .name = "axp20x-usb-power-supply",
+ .of_compatible = "x-powers,axp202-usb-power-supply",
+ .num_resources = ARRAY_SIZE(axp20x_usb_power_supply_resources),
+ .resources = axp20x_usb_power_supply_resources,
},
};
@@ -375,6 +462,14 @@ static struct mfd_cell axp22x_cells[] = {
},
};
+static struct mfd_cell axp152_cells[] = {
+ {
+ .name = "axp20x-pek",
+ .num_resources = ARRAY_SIZE(axp152_pek_resources),
+ .resources = axp152_pek_resources,
+ },
+};
+
static struct resource axp288_adc_resources[] = {
{
.name = "GPADC",
@@ -513,6 +608,12 @@ static int axp20x_match_device(struct axp20x_dev *axp20x, struct device *dev)
}
switch (axp20x->variant) {
+ case AXP152_ID:
+ axp20x->nr_cells = ARRAY_SIZE(axp152_cells);
+ axp20x->cells = axp152_cells;
+ axp20x->regmap_cfg = &axp152_regmap_config;
+ axp20x->regmap_irq_chip = &axp152_regmap_irq_chip;
+ break;
case AXP202_ID:
case AXP209_ID:
axp20x->nr_cells = ARRAY_SIZE(axp20x_cells);
@@ -613,7 +714,6 @@ static int axp20x_i2c_remove(struct i2c_client *i2c)
static struct i2c_driver axp20x_i2c_driver = {
.driver = {
.name = "axp20x",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(axp20x_of_match),
.acpi_match_table = ACPI_PTR(axp20x_acpi_match),
},
diff --git a/drivers/mfd/bcm590xx.c b/drivers/mfd/bcm590xx.c
index e334de000e8c..da2af5b4f855 100644
--- a/drivers/mfd/bcm590xx.c
+++ b/drivers/mfd/bcm590xx.c
@@ -117,7 +117,6 @@ MODULE_DEVICE_TABLE(i2c, bcm590xx_i2c_id);
static struct i2c_driver bcm590xx_i2c_driver = {
.driver = {
.name = "bcm590xx",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(bcm590xx_of_match),
},
.probe = bcm590xx_i2c_probe,
diff --git a/drivers/mfd/cros_ec_i2c.c b/drivers/mfd/cros_ec_i2c.c
index b9a0963ca5c3..d06e4b46db80 100644
--- a/drivers/mfd/cros_ec_i2c.c
+++ b/drivers/mfd/cros_ec_i2c.c
@@ -353,7 +353,6 @@ MODULE_DEVICE_TABLE(i2c, cros_ec_i2c_id);
static struct i2c_driver cros_ec_driver = {
.driver = {
.name = "cros-ec-i2c",
- .owner = THIS_MODULE,
.pm = &cros_ec_i2c_pm_ops,
},
.probe = cros_ec_i2c_probe,
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index 16f228dc243f..30a296b4e748 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -701,6 +701,12 @@ static int cros_ec_spi_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(cros_ec_spi_pm_ops, cros_ec_spi_suspend,
cros_ec_spi_resume);
+static const struct of_device_id cros_ec_spi_of_match[] = {
+ { .compatible = "google,cros-ec-spi", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cros_ec_spi_of_match);
+
static const struct spi_device_id cros_ec_spi_id[] = {
{ "cros-ec-spi", 0 },
{ }
@@ -710,6 +716,7 @@ MODULE_DEVICE_TABLE(spi, cros_ec_spi_id);
static struct spi_driver cros_ec_driver_spi = {
.driver = {
.name = "cros-ec-spi",
+ .of_match_table = of_match_ptr(cros_ec_spi_of_match),
.owner = THIS_MODULE,
.pm = &cros_ec_spi_pm_ops,
},
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index e0a2e0ee603b..ef7fe2ae2fa4 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -550,7 +550,6 @@ static int da903x_remove(struct i2c_client *client)
static struct i2c_driver da903x_driver = {
.driver = {
.name = "da903x",
- .owner = THIS_MODULE,
},
.probe = da903x_probe,
.remove = da903x_remove,
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index ec39287a245b..02887001e800 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -195,7 +195,6 @@ static struct i2c_driver da9052_i2c_driver = {
.id_table = da9052_i2c_id,
.driver = {
.name = "da9052",
- .owner = THIS_MODULE,
#ifdef CONFIG_OF
.of_match_table = dialog_dt_ids,
#endif
diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
index d4d4c165eb95..b53e100f577c 100644
--- a/drivers/mfd/da9055-i2c.c
+++ b/drivers/mfd/da9055-i2c.c
@@ -79,7 +79,6 @@ static struct i2c_driver da9055_i2c_driver = {
.id_table = da9055_i2c_id,
.driver = {
.name = "da9055-pmic",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(da9055_of_match),
},
};
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
new file mode 100644
index 000000000000..f80d9471f2e7
--- /dev/null
+++ b/drivers/mfd/da9062-core.c
@@ -0,0 +1,533 @@
+/*
+ * Core, IRQ and I2C device driver for DA9062 PMIC
+ * Copyright (C) 2015 Dialog Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/i2c.h>
+#include <linux/mfd/da9062/core.h>
+#include <linux/mfd/da9062/registers.h>
+#include <linux/regulator/of_regulator.h>
+
+#define DA9062_REG_EVENT_A_OFFSET 0
+#define DA9062_REG_EVENT_B_OFFSET 1
+#define DA9062_REG_EVENT_C_OFFSET 2
+
+static struct regmap_irq da9062_irqs[] = {
+ /* EVENT A */
+ [DA9062_IRQ_ONKEY] = {
+ .reg_offset = DA9062_REG_EVENT_A_OFFSET,
+ .mask = DA9062AA_M_NONKEY_MASK,
+ },
+ [DA9062_IRQ_ALARM] = {
+ .reg_offset = DA9062_REG_EVENT_A_OFFSET,
+ .mask = DA9062AA_M_ALARM_MASK,
+ },
+ [DA9062_IRQ_TICK] = {
+ .reg_offset = DA9062_REG_EVENT_A_OFFSET,
+ .mask = DA9062AA_M_TICK_MASK,
+ },
+ [DA9062_IRQ_WDG_WARN] = {
+ .reg_offset = DA9062_REG_EVENT_A_OFFSET,
+ .mask = DA9062AA_M_WDG_WARN_MASK,
+ },
+ [DA9062_IRQ_SEQ_RDY] = {
+ .reg_offset = DA9062_REG_EVENT_A_OFFSET,
+ .mask = DA9062AA_M_SEQ_RDY_MASK,
+ },
+ /* EVENT B */
+ [DA9062_IRQ_TEMP] = {
+ .reg_offset = DA9062_REG_EVENT_B_OFFSET,
+ .mask = DA9062AA_M_TEMP_MASK,
+ },
+ [DA9062_IRQ_LDO_LIM] = {
+ .reg_offset = DA9062_REG_EVENT_B_OFFSET,
+ .mask = DA9062AA_M_LDO_LIM_MASK,
+ },
+ [DA9062_IRQ_DVC_RDY] = {
+ .reg_offset = DA9062_REG_EVENT_B_OFFSET,
+ .mask = DA9062AA_M_DVC_RDY_MASK,
+ },
+ [DA9062_IRQ_VDD_WARN] = {
+ .reg_offset = DA9062_REG_EVENT_B_OFFSET,
+ .mask = DA9062AA_M_VDD_WARN_MASK,
+ },
+ /* EVENT C */
+ [DA9062_IRQ_GPI0] = {
+ .reg_offset = DA9062_REG_EVENT_C_OFFSET,
+ .mask = DA9062AA_M_GPI0_MASK,
+ },
+ [DA9062_IRQ_GPI1] = {
+ .reg_offset = DA9062_REG_EVENT_C_OFFSET,
+ .mask = DA9062AA_M_GPI1_MASK,
+ },
+ [DA9062_IRQ_GPI2] = {
+ .reg_offset = DA9062_REG_EVENT_C_OFFSET,
+ .mask = DA9062AA_M_GPI2_MASK,
+ },
+ [DA9062_IRQ_GPI3] = {
+ .reg_offset = DA9062_REG_EVENT_C_OFFSET,
+ .mask = DA9062AA_M_GPI3_MASK,
+ },
+ [DA9062_IRQ_GPI4] = {
+ .reg_offset = DA9062_REG_EVENT_C_OFFSET,
+ .mask = DA9062AA_M_GPI4_MASK,
+ },
+};
+
+static struct regmap_irq_chip da9062_irq_chip = {
+ .name = "da9062-irq",
+ .irqs = da9062_irqs,
+ .num_irqs = DA9062_NUM_IRQ,
+ .num_regs = 3,
+ .status_base = DA9062AA_EVENT_A,
+ .mask_base = DA9062AA_IRQ_MASK_A,
+ .ack_base = DA9062AA_EVENT_A,
+};
+
+static struct resource da9062_core_resources[] = {
+ DEFINE_RES_NAMED(DA9062_IRQ_VDD_WARN, 1, "VDD_WARN", IORESOURCE_IRQ),
+};
+
+static struct resource da9062_regulators_resources[] = {
+ DEFINE_RES_NAMED(DA9062_IRQ_LDO_LIM, 1, "LDO_LIM", IORESOURCE_IRQ),
+};
+
+static struct resource da9062_thermal_resources[] = {
+ DEFINE_RES_NAMED(DA9062_IRQ_TEMP, 1, "THERMAL", IORESOURCE_IRQ),
+};
+
+static struct resource da9062_wdt_resources[] = {
+ DEFINE_RES_NAMED(DA9062_IRQ_WDG_WARN, 1, "WD_WARN", IORESOURCE_IRQ),
+};
+
+static struct resource da9062_rtc_resources[] = {
+ DEFINE_RES_NAMED(DA9062_IRQ_ALARM, 1, "ALARM", IORESOURCE_IRQ),
+ DEFINE_RES_NAMED(DA9062_IRQ_TICK, 1, "TICK", IORESOURCE_IRQ),
+};
+
+static struct resource da9062_onkey_resources[] = {
+ DEFINE_RES_NAMED(DA9062_IRQ_ONKEY, 1, "ONKEY", IORESOURCE_IRQ),
+};
+
+static const struct mfd_cell da9062_devs[] = {
+ {
+ .name = "da9062-core",
+ .num_resources = ARRAY_SIZE(da9062_core_resources),
+ .resources = da9062_core_resources,
+ },
+ {
+ .name = "da9062-regulators",
+ .num_resources = ARRAY_SIZE(da9062_regulators_resources),
+ .resources = da9062_regulators_resources,
+ },
+ {
+ .name = "da9062-watchdog",
+ .num_resources = ARRAY_SIZE(da9062_wdt_resources),
+ .resources = da9062_wdt_resources,
+ .of_compatible = "dlg,da9062-wdt",
+ },
+ {
+ .name = "da9062-thermal",
+ .num_resources = ARRAY_SIZE(da9062_thermal_resources),
+ .resources = da9062_thermal_resources,
+ .of_compatible = "dlg,da9062-thermal",
+ },
+ {
+ .name = "da9062-rtc",
+ .num_resources = ARRAY_SIZE(da9062_rtc_resources),
+ .resources = da9062_rtc_resources,
+ .of_compatible = "dlg,da9062-rtc",
+ },
+ {
+ .name = "da9062-onkey",
+ .num_resources = ARRAY_SIZE(da9062_onkey_resources),
+ .resources = da9062_onkey_resources,
+ .of_compatible = "dlg,da9062-onkey",
+ },
+};
+
+static int da9062_clear_fault_log(struct da9062 *chip)
+{
+ int ret;
+ int fault_log;
+
+ ret = regmap_read(chip->regmap, DA9062AA_FAULT_LOG, &fault_log);
+ if (ret < 0)
+ return ret;
+
+ if (fault_log) {
+ if (fault_log & DA9062AA_TWD_ERROR_MASK)
+ dev_dbg(chip->dev, "Fault log entry detected: TWD_ERROR\n");
+ if (fault_log & DA9062AA_POR_MASK)
+ dev_dbg(chip->dev, "Fault log entry detected: POR\n");
+ if (fault_log & DA9062AA_VDD_FAULT_MASK)
+ dev_dbg(chip->dev, "Fault log entry detected: VDD_FAULT\n");
+ if (fault_log & DA9062AA_VDD_START_MASK)
+ dev_dbg(chip->dev, "Fault log entry detected: VDD_START\n");
+ if (fault_log & DA9062AA_TEMP_CRIT_MASK)
+ dev_dbg(chip->dev, "Fault log entry detected: TEMP_CRIT\n");
+ if (fault_log & DA9062AA_KEY_RESET_MASK)
+ dev_dbg(chip->dev, "Fault log entry detected: KEY_RESET\n");
+ if (fault_log & DA9062AA_NSHUTDOWN_MASK)
+ dev_dbg(chip->dev, "Fault log entry detected: NSHUTDOWN\n");
+ if (fault_log & DA9062AA_WAIT_SHUT_MASK)
+ dev_dbg(chip->dev, "Fault log entry detected: WAIT_SHUT\n");
+
+ ret = regmap_write(chip->regmap, DA9062AA_FAULT_LOG,
+ fault_log);
+ }
+
+ return ret;
+}
+
+int get_device_type(struct da9062 *chip)
+{
+ int device_id, variant_id, variant_mrc;
+ int ret;
+
+ ret = regmap_read(chip->regmap, DA9062AA_DEVICE_ID, &device_id);
+ if (ret < 0) {
+ dev_err(chip->dev, "Cannot read chip ID.\n");
+ return -EIO;
+ }
+ if (device_id != DA9062_PMIC_DEVICE_ID) {
+ dev_err(chip->dev, "Invalid device ID: 0x%02x\n", device_id);
+ return -ENODEV;
+ }
+
+ ret = regmap_read(chip->regmap, DA9062AA_VARIANT_ID, &variant_id);
+ if (ret < 0) {
+ dev_err(chip->dev, "Cannot read chip variant id.\n");
+ return -EIO;
+ }
+
+ dev_info(chip->dev,
+ "Device detected (device-ID: 0x%02X, var-ID: 0x%02X)\n",
+ device_id, variant_id);
+
+ variant_mrc = (variant_id & DA9062AA_MRC_MASK) >> DA9062AA_MRC_SHIFT;
+
+ if (variant_mrc < DA9062_PMIC_VARIANT_MRC_AA) {
+ dev_err(chip->dev,
+ "Cannot support variant MRC: 0x%02X\n", variant_mrc);
+ return -ENODEV;
+ }
+
+ return ret;
+}
+
+static const struct regmap_range da9062_aa_readable_ranges[] = {
+ {
+ .range_min = DA9062AA_PAGE_CON,
+ .range_max = DA9062AA_STATUS_B,
+ }, {
+ .range_min = DA9062AA_STATUS_D,
+ .range_max = DA9062AA_EVENT_C,
+ }, {
+ .range_min = DA9062AA_IRQ_MASK_A,
+ .range_max = DA9062AA_IRQ_MASK_C,
+ }, {
+ .range_min = DA9062AA_CONTROL_A,
+ .range_max = DA9062AA_GPIO_4,
+ }, {
+ .range_min = DA9062AA_GPIO_WKUP_MODE,
+ .range_max = DA9062AA_BUCK4_CONT,
+ }, {
+ .range_min = DA9062AA_BUCK3_CONT,
+ .range_max = DA9062AA_BUCK3_CONT,
+ }, {
+ .range_min = DA9062AA_LDO1_CONT,
+ .range_max = DA9062AA_LDO4_CONT,
+ }, {
+ .range_min = DA9062AA_DVC_1,
+ .range_max = DA9062AA_DVC_1,
+ }, {
+ .range_min = DA9062AA_COUNT_S,
+ .range_max = DA9062AA_SECOND_D,
+ }, {
+ .range_min = DA9062AA_SEQ,
+ .range_max = DA9062AA_ID_4_3,
+ }, {
+ .range_min = DA9062AA_ID_12_11,
+ .range_max = DA9062AA_ID_16_15,
+ }, {
+ .range_min = DA9062AA_ID_22_21,
+ .range_max = DA9062AA_ID_32_31,
+ }, {
+ .range_min = DA9062AA_SEQ_A,
+ .range_max = DA9062AA_BUCK3_CFG,
+ }, {
+ .range_min = DA9062AA_VBUCK2_A,
+ .range_max = DA9062AA_VBUCK4_A,
+ }, {
+ .range_min = DA9062AA_VBUCK3_A,
+ .range_max = DA9062AA_VBUCK3_A,
+ }, {
+ .range_min = DA9062AA_VLDO1_A,
+ .range_max = DA9062AA_VLDO4_A,
+ }, {
+ .range_min = DA9062AA_VBUCK2_B,
+ .range_max = DA9062AA_VBUCK4_B,
+ }, {
+ .range_min = DA9062AA_VBUCK3_B,
+ .range_max = DA9062AA_VBUCK3_B,
+ }, {
+ .range_min = DA9062AA_VLDO1_B,
+ .range_max = DA9062AA_VLDO4_B,
+ }, {
+ .range_min = DA9062AA_BBAT_CONT,
+ .range_max = DA9062AA_BBAT_CONT,
+ }, {
+ .range_min = DA9062AA_INTERFACE,
+ .range_max = DA9062AA_CONFIG_E,
+ }, {
+ .range_min = DA9062AA_CONFIG_G,
+ .range_max = DA9062AA_CONFIG_K,
+ }, {
+ .range_min = DA9062AA_CONFIG_M,
+ .range_max = DA9062AA_CONFIG_M,
+ }, {
+ .range_min = DA9062AA_TRIM_CLDR,
+ .range_max = DA9062AA_GP_ID_19,
+ }, {
+ .range_min = DA9062AA_DEVICE_ID,
+ .range_max = DA9062AA_CONFIG_ID,
+ },
+};
+
+static const struct regmap_range da9062_aa_writeable_ranges[] = {
+ {
+ .range_min = DA9062AA_PAGE_CON,
+ .range_max = DA9062AA_PAGE_CON,
+ }, {
+ .range_min = DA9062AA_FAULT_LOG,
+ .range_max = DA9062AA_EVENT_C,
+ }, {
+ .range_min = DA9062AA_IRQ_MASK_A,
+ .range_max = DA9062AA_IRQ_MASK_C,
+ }, {
+ .range_min = DA9062AA_CONTROL_A,
+ .range_max = DA9062AA_GPIO_4,
+ }, {
+ .range_min = DA9062AA_GPIO_WKUP_MODE,
+ .range_max = DA9062AA_BUCK4_CONT,
+ }, {
+ .range_min = DA9062AA_BUCK3_CONT,
+ .range_max = DA9062AA_BUCK3_CONT,
+ }, {
+ .range_min = DA9062AA_LDO1_CONT,
+ .range_max = DA9062AA_LDO4_CONT,
+ }, {
+ .range_min = DA9062AA_DVC_1,
+ .range_max = DA9062AA_DVC_1,
+ }, {
+ .range_min = DA9062AA_COUNT_S,
+ .range_max = DA9062AA_ALARM_Y,
+ }, {
+ .range_min = DA9062AA_SEQ,
+ .range_max = DA9062AA_ID_4_3,
+ }, {
+ .range_min = DA9062AA_ID_12_11,
+ .range_max = DA9062AA_ID_16_15,
+ }, {
+ .range_min = DA9062AA_ID_22_21,
+ .range_max = DA9062AA_ID_32_31,
+ }, {
+ .range_min = DA9062AA_SEQ_A,
+ .range_max = DA9062AA_BUCK3_CFG,
+ }, {
+ .range_min = DA9062AA_VBUCK2_A,
+ .range_max = DA9062AA_VBUCK4_A,
+ }, {
+ .range_min = DA9062AA_VBUCK3_A,
+ .range_max = DA9062AA_VBUCK3_A,
+ }, {
+ .range_min = DA9062AA_VLDO1_A,
+ .range_max = DA9062AA_VLDO4_A,
+ }, {
+ .range_min = DA9062AA_VBUCK2_B,
+ .range_max = DA9062AA_VBUCK4_B,
+ }, {
+ .range_min = DA9062AA_VBUCK3_B,
+ .range_max = DA9062AA_VBUCK3_B,
+ }, {
+ .range_min = DA9062AA_VLDO1_B,
+ .range_max = DA9062AA_VLDO4_B,
+ }, {
+ .range_min = DA9062AA_BBAT_CONT,
+ .range_max = DA9062AA_BBAT_CONT,
+ }, {
+ .range_min = DA9062AA_GP_ID_0,
+ .range_max = DA9062AA_GP_ID_19,
+ },
+};
+
+static const struct regmap_range da9062_aa_volatile_ranges[] = {
+ {
+ .range_min = DA9062AA_PAGE_CON,
+ .range_max = DA9062AA_STATUS_B,
+ }, {
+ .range_min = DA9062AA_STATUS_D,
+ .range_max = DA9062AA_EVENT_C,
+ }, {
+ .range_min = DA9062AA_CONTROL_F,
+ .range_max = DA9062AA_CONTROL_F,
+ }, {
+ .range_min = DA9062AA_COUNT_S,
+ .range_max = DA9062AA_SECOND_D,
+ },
+};
+
+static const struct regmap_access_table da9062_aa_readable_table = {
+ .yes_ranges = da9062_aa_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9062_aa_readable_ranges),
+};
+
+static const struct regmap_access_table da9062_aa_writeable_table = {
+ .yes_ranges = da9062_aa_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9062_aa_writeable_ranges),
+};
+
+static const struct regmap_access_table da9062_aa_volatile_table = {
+ .yes_ranges = da9062_aa_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9062_aa_volatile_ranges),
+};
+
+static const struct regmap_range_cfg da9062_range_cfg[] = {
+ {
+ .range_min = DA9062AA_PAGE_CON,
+ .range_max = DA9062AA_CONFIG_ID,
+ .selector_reg = DA9062AA_PAGE_CON,
+ .selector_mask = 1 << DA9062_I2C_PAGE_SEL_SHIFT,
+ .selector_shift = DA9062_I2C_PAGE_SEL_SHIFT,
+ .window_start = 0,
+ .window_len = 256,
+ }
+};
+
+static struct regmap_config da9062_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .ranges = da9062_range_cfg,
+ .num_ranges = ARRAY_SIZE(da9062_range_cfg),
+ .max_register = DA9062AA_CONFIG_ID,
+ .cache_type = REGCACHE_RBTREE,
+ .rd_table = &da9062_aa_readable_table,
+ .wr_table = &da9062_aa_writeable_table,
+ .volatile_table = &da9062_aa_volatile_table,
+};
+
+static int da9062_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct da9062 *chip;
+ unsigned int irq_base;
+ int ret;
+
+ chip = devm_kzalloc(&i2c->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, chip);
+ chip->dev = &i2c->dev;
+
+ if (!i2c->irq) {
+ dev_err(chip->dev, "No IRQ configured\n");
+ return -EINVAL;
+ }
+
+ chip->regmap = devm_regmap_init_i2c(i2c, &da9062_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ ret = PTR_ERR(chip->regmap);
+ dev_err(chip->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = da9062_clear_fault_log(chip);
+ if (ret < 0)
+ dev_warn(chip->dev, "Cannot clear fault log\n");
+
+ ret = get_device_type(chip);
+ if (ret)
+ return ret;
+
+ ret = regmap_add_irq_chip(chip->regmap, i2c->irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_SHARED,
+ -1, &da9062_irq_chip,
+ &chip->regmap_irq);
+ if (ret) {
+ dev_err(chip->dev, "Failed to request IRQ %d: %d\n",
+ i2c->irq, ret);
+ return ret;
+ }
+
+ irq_base = regmap_irq_chip_get_base(chip->regmap_irq);
+
+ ret = mfd_add_devices(chip->dev, PLATFORM_DEVID_NONE, da9062_devs,
+ ARRAY_SIZE(da9062_devs), NULL, irq_base,
+ NULL);
+ if (ret) {
+ dev_err(chip->dev, "Cannot register child devices\n");
+ regmap_del_irq_chip(i2c->irq, chip->regmap_irq);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int da9062_i2c_remove(struct i2c_client *i2c)
+{
+ struct da9062 *chip = i2c_get_clientdata(i2c);
+
+ mfd_remove_devices(chip->dev);
+ regmap_del_irq_chip(i2c->irq, chip->regmap_irq);
+
+ return 0;
+}
+
+static const struct i2c_device_id da9062_i2c_id[] = {
+ { "da9062", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, da9062_i2c_id);
+
+static const struct of_device_id da9062_dt_ids[] = {
+ { .compatible = "dlg,da9062", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, da9062_dt_ids);
+
+static struct i2c_driver da9062_i2c_driver = {
+ .driver = {
+ .name = "da9062",
+ .of_match_table = of_match_ptr(da9062_dt_ids),
+ },
+ .probe = da9062_i2c_probe,
+ .remove = da9062_i2c_remove,
+ .id_table = da9062_i2c_id,
+};
+
+module_i2c_driver(da9062_i2c_driver);
+
+MODULE_DESCRIPTION("Core device driver for Dialog DA9062");
+MODULE_AUTHOR("Steve Twiss <stwiss.opensource@diasemi.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
index 6f3a7c0001f9..2d4e3e0f4e94 100644
--- a/drivers/mfd/da9063-i2c.c
+++ b/drivers/mfd/da9063-i2c.c
@@ -264,7 +264,6 @@ MODULE_DEVICE_TABLE(i2c, da9063_i2c_id);
static struct i2c_driver da9063_i2c_driver = {
.driver = {
.name = "da9063",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(da9063_dt_ids),
},
.probe = da9063_i2c_probe,
diff --git a/drivers/mfd/da9063-irq.c b/drivers/mfd/da9063-irq.c
index eaf1ec9208b2..26302634633c 100644
--- a/drivers/mfd/da9063-irq.c
+++ b/drivers/mfd/da9063-irq.c
@@ -77,6 +77,10 @@ static const struct regmap_irq da9063_irqs[] = {
.reg_offset = DA9063_REG_EVENT_B_OFFSET,
.mask = DA9063_M_UVOV,
},
+ [DA9063_IRQ_DVC_RDY] = {
+ .reg_offset = DA9063_REG_EVENT_B_OFFSET,
+ .mask = DA9063_M_DVC_RDY,
+ },
[DA9063_IRQ_VDD_MON] = {
.reg_offset = DA9063_REG_EVENT_B_OFFSET,
.mask = DA9063_M_VDD_MON,
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 8b14740f9fca..e6e4bacb09ee 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2654,7 +2654,6 @@ static int db8500_irq_map(struct irq_domain *d, unsigned int virq,
{
irq_set_chip_and_handler(virq, &prcmu_irq_chip,
handle_simple_irq);
- set_irq_flags(virq, IRQF_VALID);
return 0;
}
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 5991faddd3c6..a76eb6ef47a0 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -207,7 +207,7 @@ static void pcap_isr_work(struct work_struct *work)
static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
{
- struct pcap_chip *pcap = irq_get_handler_data(irq);
+ struct pcap_chip *pcap = irq_desc_get_handler_data(desc);
desc->irq_data.chip->irq_ack(&desc->irq_data);
queue_work(pcap->workqueue, &pcap->isr_work);
@@ -463,11 +463,7 @@ static int ezx_pcap_probe(struct spi_device *spi)
for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) {
irq_set_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq);
irq_set_chip_data(i, pcap);
-#ifdef CONFIG_ARM
- set_irq_flags(i, IRQF_VALID);
-#else
- irq_set_noprobe(i);
-#endif
+ irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE);
}
/* mask/ack all PCAP interrupts */
@@ -476,8 +472,7 @@ static int ezx_pcap_probe(struct spi_device *spi)
pcap->msr = PCAP_MASK_ALL_INTERRUPT;
irq_set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING);
- irq_set_handler_data(spi->irq, pcap);
- irq_set_chained_handler(spi->irq, pcap_irq_handler);
+ irq_set_chained_handler_and_data(spi->irq, pcap_irq_handler, pcap);
irq_set_irq_wake(spi->irq, 1);
/* ADC */
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c
index 49f39feca784..9131cdcdc64a 100644
--- a/drivers/mfd/htc-egpio.c
+++ b/drivers/mfd/htc-egpio.c
@@ -350,11 +350,11 @@ static int __init egpio_probe(struct platform_device *pdev)
irq_set_chip_and_handler(irq, &egpio_muxed_chip,
handle_simple_irq);
irq_set_chip_data(irq, ei);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
}
irq_set_irq_type(ei->chained_irq, IRQ_TYPE_EDGE_RISING);
- irq_set_handler_data(ei->chained_irq, ei);
- irq_set_chained_handler(ei->chained_irq, egpio_handler);
+ irq_set_chained_handler_and_data(ei->chained_irq,
+ egpio_handler, ei);
ack_irqs(ei);
device_init_wakeup(&pdev->dev, 1);
@@ -376,7 +376,7 @@ static int __exit egpio_remove(struct platform_device *pdev)
irq_end = ei->irq_start + ei->nirqs;
for (irq = ei->irq_start; irq < irq_end; irq++) {
irq_set_chip_and_handler(irq, NULL, NULL);
- set_irq_flags(irq, 0);
+ irq_set_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
}
irq_set_chained_handler(ei->chained_irq, NULL);
device_init_wakeup(&pdev->dev, 0);
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c
index b54baad30164..1bd5b042c8b3 100644
--- a/drivers/mfd/htc-i2cpld.c
+++ b/drivers/mfd/htc-i2cpld.c
@@ -330,11 +330,7 @@ static int htcpld_setup_chip_irq(
irq_set_chip_and_handler(irq, &htcpld_muxed_chip,
handle_simple_irq);
irq_set_chip_data(irq, chip);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-#else
- irq_set_probe(irq);
-#endif
+ irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
}
return ret;
diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c
new file mode 100644
index 000000000000..0d92d73bfa0e
--- /dev/null
+++ b/drivers/mfd/intel-lpss-acpi.c
@@ -0,0 +1,84 @@
+/*
+ * Intel LPSS ACPI support.
+ *
+ * Copyright (C) 2015, Intel Corporation
+ *
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+
+#include "intel-lpss.h"
+
+static const struct intel_lpss_platform_info spt_info = {
+ .clk_rate = 120000000,
+};
+
+static const struct acpi_device_id intel_lpss_acpi_ids[] = {
+ /* SPT */
+ { "INT3446", (kernel_ulong_t)&spt_info },
+ { "INT3447", (kernel_ulong_t)&spt_info },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, intel_lpss_acpi_ids);
+
+static int intel_lpss_acpi_probe(struct platform_device *pdev)
+{
+ struct intel_lpss_platform_info *info;
+ const struct acpi_device_id *id;
+
+ id = acpi_match_device(intel_lpss_acpi_ids, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
+ info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ info->irq = platform_get_irq(pdev, 0);
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ return intel_lpss_probe(&pdev->dev, info);
+}
+
+static int intel_lpss_acpi_remove(struct platform_device *pdev)
+{
+ intel_lpss_remove(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static INTEL_LPSS_PM_OPS(intel_lpss_acpi_pm_ops);
+
+static struct platform_driver intel_lpss_acpi_driver = {
+ .probe = intel_lpss_acpi_probe,
+ .remove = intel_lpss_acpi_remove,
+ .driver = {
+ .name = "intel-lpss",
+ .acpi_match_table = intel_lpss_acpi_ids,
+ .pm = &intel_lpss_acpi_pm_ops,
+ },
+};
+
+module_platform_driver(intel_lpss_acpi_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Intel LPSS ACPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
new file mode 100644
index 000000000000..9236dffeb4d6
--- /dev/null
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -0,0 +1,113 @@
+/*
+ * Intel LPSS PCI support.
+ *
+ * Copyright (C) 2015, Intel Corporation
+ *
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include "intel-lpss.h"
+
+static int intel_lpss_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct intel_lpss_platform_info *info;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->mem = &pdev->resource[0];
+ info->irq = pdev->irq;
+
+ /* Probably it is enough to set this for iDMA capable devices only */
+ pci_set_master(pdev);
+
+ ret = intel_lpss_probe(&pdev->dev, info);
+ if (ret)
+ return ret;
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
+ return 0;
+}
+
+static void intel_lpss_pci_remove(struct pci_dev *pdev)
+{
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ intel_lpss_remove(&pdev->dev);
+}
+
+static INTEL_LPSS_PM_OPS(intel_lpss_pci_pm_ops);
+
+static const struct intel_lpss_platform_info spt_info = {
+ .clk_rate = 120000000,
+};
+
+static const struct intel_lpss_platform_info spt_uart_info = {
+ .clk_rate = 120000000,
+ .clk_con_id = "baudclk",
+};
+
+static const struct pci_device_id intel_lpss_pci_ids[] = {
+ /* SPT-LP */
+ { PCI_VDEVICE(INTEL, 0x9d27), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x9d28), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x9d29), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x9d2a), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x9d60), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x9d61), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x9d62), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x9d63), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x9d64), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x9d65), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x9d66), (kernel_ulong_t)&spt_uart_info },
+ /* SPT-H */
+ { PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xa129), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa12a), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa160), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
+
+static struct pci_driver intel_lpss_pci_driver = {
+ .name = "intel-lpss",
+ .id_table = intel_lpss_pci_ids,
+ .probe = intel_lpss_pci_probe,
+ .remove = intel_lpss_pci_remove,
+ .driver = {
+ .pm = &intel_lpss_pci_pm_ops,
+ },
+};
+
+module_pci_driver(intel_lpss_pci_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Intel LPSS PCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
new file mode 100644
index 000000000000..fdf4d5c1add2
--- /dev/null
+++ b/drivers/mfd/intel-lpss.c
@@ -0,0 +1,524 @@
+/*
+ * Intel Sunrisepoint LPSS core support.
+ *
+ * Copyright (C) 2015, Intel Corporation
+ *
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ * Jarkko Nikula <jarkko.nikula@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/debugfs.h>
+#include <linux/idr.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/core.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
+#include <linux/seq_file.h>
+
+#include "intel-lpss.h"
+
+#define LPSS_DEV_OFFSET 0x000
+#define LPSS_DEV_SIZE 0x200
+#define LPSS_PRIV_OFFSET 0x200
+#define LPSS_PRIV_SIZE 0x100
+#define LPSS_IDMA64_OFFSET 0x800
+#define LPSS_IDMA64_SIZE 0x800
+
+/* Offsets from lpss->priv */
+#define LPSS_PRIV_RESETS 0x04
+#define LPSS_PRIV_RESETS_FUNC BIT(2)
+#define LPSS_PRIV_RESETS_IDMA 0x3
+
+#define LPSS_PRIV_ACTIVELTR 0x10
+#define LPSS_PRIV_IDLELTR 0x14
+
+#define LPSS_PRIV_LTR_REQ BIT(15)
+#define LPSS_PRIV_LTR_SCALE_MASK 0xc00
+#define LPSS_PRIV_LTR_SCALE_1US 0x800
+#define LPSS_PRIV_LTR_SCALE_32US 0xc00
+#define LPSS_PRIV_LTR_VALUE_MASK 0x3ff
+
+#define LPSS_PRIV_SSP_REG 0x20
+#define LPSS_PRIV_SSP_REG_DIS_DMA_FIN BIT(0)
+
+#define LPSS_PRIV_REMAP_ADDR_LO 0x40
+#define LPSS_PRIV_REMAP_ADDR_HI 0x44
+
+#define LPSS_PRIV_CAPS 0xfc
+#define LPSS_PRIV_CAPS_NO_IDMA BIT(8)
+#define LPSS_PRIV_CAPS_TYPE_SHIFT 4
+#define LPSS_PRIV_CAPS_TYPE_MASK (0xf << LPSS_PRIV_CAPS_TYPE_SHIFT)
+
+/* This matches the type field in CAPS register */
+enum intel_lpss_dev_type {
+ LPSS_DEV_I2C = 0,
+ LPSS_DEV_UART,
+ LPSS_DEV_SPI,
+};
+
+struct intel_lpss {
+ const struct intel_lpss_platform_info *info;
+ enum intel_lpss_dev_type type;
+ struct clk *clk;
+ struct clk_lookup *clock;
+ const struct mfd_cell *cell;
+ struct device *dev;
+ void __iomem *priv;
+ int devid;
+ u32 caps;
+ u32 active_ltr;
+ u32 idle_ltr;
+ struct dentry *debugfs;
+};
+
+static const struct resource intel_lpss_dev_resources[] = {
+ DEFINE_RES_MEM_NAMED(LPSS_DEV_OFFSET, LPSS_DEV_SIZE, "lpss_dev"),
+ DEFINE_RES_MEM_NAMED(LPSS_PRIV_OFFSET, LPSS_PRIV_SIZE, "lpss_priv"),
+ DEFINE_RES_IRQ(0),
+};
+
+static const struct resource intel_lpss_idma64_resources[] = {
+ DEFINE_RES_MEM(LPSS_IDMA64_OFFSET, LPSS_IDMA64_SIZE),
+ DEFINE_RES_IRQ(0),
+};
+
+#define LPSS_IDMA64_DRIVER_NAME "idma64"
+
+/*
+ * Cells needs to be ordered so that the iDMA is created first. This is
+ * because we need to be sure the DMA is available when the host controller
+ * driver is probed.
+ */
+static const struct mfd_cell intel_lpss_idma64_cell = {
+ .name = LPSS_IDMA64_DRIVER_NAME,
+ .num_resources = ARRAY_SIZE(intel_lpss_idma64_resources),
+ .resources = intel_lpss_idma64_resources,
+};
+
+static const struct mfd_cell intel_lpss_i2c_cell = {
+ .name = "i2c_designware",
+ .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
+ .resources = intel_lpss_dev_resources,
+};
+
+static const struct mfd_cell intel_lpss_uart_cell = {
+ .name = "dw-apb-uart",
+ .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
+ .resources = intel_lpss_dev_resources,
+};
+
+static const struct mfd_cell intel_lpss_spi_cell = {
+ .name = "pxa2xx-spi",
+ .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
+ .resources = intel_lpss_dev_resources,
+};
+
+static DEFINE_IDA(intel_lpss_devid_ida);
+static struct dentry *intel_lpss_debugfs;
+
+static int intel_lpss_request_dma_module(const char *name)
+{
+ static bool intel_lpss_dma_requested;
+
+ if (intel_lpss_dma_requested)
+ return 0;
+
+ intel_lpss_dma_requested = true;
+ return request_module("%s", name);
+}
+
+static void intel_lpss_cache_ltr(struct intel_lpss *lpss)
+{
+ lpss->active_ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR);
+ lpss->idle_ltr = readl(lpss->priv + LPSS_PRIV_IDLELTR);
+}
+
+static int intel_lpss_debugfs_add(struct intel_lpss *lpss)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir(dev_name(lpss->dev), intel_lpss_debugfs);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ /* Cache the values into lpss structure */
+ intel_lpss_cache_ltr(lpss);
+
+ debugfs_create_x32("capabilities", S_IRUGO, dir, &lpss->caps);
+ debugfs_create_x32("active_ltr", S_IRUGO, dir, &lpss->active_ltr);
+ debugfs_create_x32("idle_ltr", S_IRUGO, dir, &lpss->idle_ltr);
+
+ lpss->debugfs = dir;
+ return 0;
+}
+
+static void intel_lpss_debugfs_remove(struct intel_lpss *lpss)
+{
+ debugfs_remove_recursive(lpss->debugfs);
+}
+
+static void intel_lpss_ltr_set(struct device *dev, s32 val)
+{
+ struct intel_lpss *lpss = dev_get_drvdata(dev);
+ u32 ltr;
+
+ /*
+ * Program latency tolerance (LTR) accordingly what has been asked
+ * by the PM QoS layer or disable it in case we were passed
+ * negative value or PM_QOS_LATENCY_ANY.
+ */
+ ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR);
+
+ if (val == PM_QOS_LATENCY_ANY || val < 0) {
+ ltr &= ~LPSS_PRIV_LTR_REQ;
+ } else {
+ ltr |= LPSS_PRIV_LTR_REQ;
+ ltr &= ~LPSS_PRIV_LTR_SCALE_MASK;
+ ltr &= ~LPSS_PRIV_LTR_VALUE_MASK;
+
+ if (val > LPSS_PRIV_LTR_VALUE_MASK)
+ ltr |= LPSS_PRIV_LTR_SCALE_32US | val >> 5;
+ else
+ ltr |= LPSS_PRIV_LTR_SCALE_1US | val;
+ }
+
+ if (ltr == lpss->active_ltr)
+ return;
+
+ writel(ltr, lpss->priv + LPSS_PRIV_ACTIVELTR);
+ writel(ltr, lpss->priv + LPSS_PRIV_IDLELTR);
+
+ /* Cache the values into lpss structure */
+ intel_lpss_cache_ltr(lpss);
+}
+
+static void intel_lpss_ltr_expose(struct intel_lpss *lpss)
+{
+ lpss->dev->power.set_latency_tolerance = intel_lpss_ltr_set;
+ dev_pm_qos_expose_latency_tolerance(lpss->dev);
+}
+
+static void intel_lpss_ltr_hide(struct intel_lpss *lpss)
+{
+ dev_pm_qos_hide_latency_tolerance(lpss->dev);
+ lpss->dev->power.set_latency_tolerance = NULL;
+}
+
+static int intel_lpss_assign_devs(struct intel_lpss *lpss)
+{
+ unsigned int type;
+
+ type = lpss->caps & LPSS_PRIV_CAPS_TYPE_MASK;
+ type >>= LPSS_PRIV_CAPS_TYPE_SHIFT;
+
+ switch (type) {
+ case LPSS_DEV_I2C:
+ lpss->cell = &intel_lpss_i2c_cell;
+ break;
+ case LPSS_DEV_UART:
+ lpss->cell = &intel_lpss_uart_cell;
+ break;
+ case LPSS_DEV_SPI:
+ lpss->cell = &intel_lpss_spi_cell;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ lpss->type = type;
+
+ return 0;
+}
+
+static bool intel_lpss_has_idma(const struct intel_lpss *lpss)
+{
+ return (lpss->caps & LPSS_PRIV_CAPS_NO_IDMA) == 0;
+}
+
+static void intel_lpss_set_remap_addr(const struct intel_lpss *lpss)
+{
+ resource_size_t addr = lpss->info->mem->start;
+
+ writel(addr, lpss->priv + LPSS_PRIV_REMAP_ADDR_LO);
+#if BITS_PER_LONG > 32
+ writel(addr >> 32, lpss->priv + LPSS_PRIV_REMAP_ADDR_HI);
+#else
+ writel(0, lpss->priv + LPSS_PRIV_REMAP_ADDR_HI);
+#endif
+}
+
+static void intel_lpss_deassert_reset(const struct intel_lpss *lpss)
+{
+ u32 value = LPSS_PRIV_RESETS_FUNC | LPSS_PRIV_RESETS_IDMA;
+
+ /* Bring out the device from reset */
+ writel(value, lpss->priv + LPSS_PRIV_RESETS);
+}
+
+static void intel_lpss_init_dev(const struct intel_lpss *lpss)
+{
+ u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN;
+
+ intel_lpss_deassert_reset(lpss);
+
+ if (!intel_lpss_has_idma(lpss))
+ return;
+
+ intel_lpss_set_remap_addr(lpss);
+
+ /* Make sure that SPI multiblock DMA transfers are re-enabled */
+ if (lpss->type == LPSS_DEV_SPI)
+ writel(value, lpss->priv + LPSS_PRIV_SSP_REG);
+}
+
+static void intel_lpss_unregister_clock_tree(struct clk *clk)
+{
+ struct clk *parent;
+
+ while (clk) {
+ parent = clk_get_parent(clk);
+ clk_unregister(clk);
+ clk = parent;
+ }
+}
+
+static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,
+ const char *devname,
+ struct clk **clk)
+{
+ char name[32];
+ struct clk *tmp = *clk;
+
+ snprintf(name, sizeof(name), "%s-enable", devname);
+ tmp = clk_register_gate(NULL, name, __clk_get_name(tmp), 0,
+ lpss->priv, 0, 0, NULL);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+
+ snprintf(name, sizeof(name), "%s-div", devname);
+ tmp = clk_register_fractional_divider(NULL, name, __clk_get_name(tmp),
+ 0, lpss->priv, 1, 15, 16, 15, 0,
+ NULL);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ *clk = tmp;
+
+ snprintf(name, sizeof(name), "%s-update", devname);
+ tmp = clk_register_gate(NULL, name, __clk_get_name(tmp),
+ CLK_SET_RATE_PARENT, lpss->priv, 31, 0, NULL);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ *clk = tmp;
+
+ return 0;
+}
+
+static int intel_lpss_register_clock(struct intel_lpss *lpss)
+{
+ const struct mfd_cell *cell = lpss->cell;
+ struct clk *clk;
+ char devname[24];
+ int ret;
+
+ if (!lpss->info->clk_rate)
+ return 0;
+
+ /* Root clock */
+ clk = clk_register_fixed_rate(NULL, dev_name(lpss->dev), NULL,
+ CLK_IS_ROOT, lpss->info->clk_rate);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ snprintf(devname, sizeof(devname), "%s.%d", cell->name, lpss->devid);
+
+ /*
+ * Support for clock divider only if it has some preset value.
+ * Otherwise we assume that the divider is not used.
+ */
+ if (lpss->type != LPSS_DEV_I2C) {
+ ret = intel_lpss_register_clock_divider(lpss, devname, &clk);
+ if (ret)
+ goto err_clk_register;
+ }
+
+ ret = -ENOMEM;
+
+ /* Clock for the host controller */
+ lpss->clock = clkdev_create(clk, lpss->info->clk_con_id, "%s", devname);
+ if (!lpss->clock)
+ goto err_clk_register;
+
+ lpss->clk = clk;
+
+ return 0;
+
+err_clk_register:
+ intel_lpss_unregister_clock_tree(clk);
+
+ return ret;
+}
+
+static void intel_lpss_unregister_clock(struct intel_lpss *lpss)
+{
+ if (IS_ERR_OR_NULL(lpss->clk))
+ return;
+
+ clkdev_drop(lpss->clock);
+ intel_lpss_unregister_clock_tree(lpss->clk);
+}
+
+int intel_lpss_probe(struct device *dev,
+ const struct intel_lpss_platform_info *info)
+{
+ struct intel_lpss *lpss;
+ int ret;
+
+ if (!info || !info->mem || info->irq <= 0)
+ return -EINVAL;
+
+ lpss = devm_kzalloc(dev, sizeof(*lpss), GFP_KERNEL);
+ if (!lpss)
+ return -ENOMEM;
+
+ lpss->priv = devm_ioremap(dev, info->mem->start + LPSS_PRIV_OFFSET,
+ LPSS_PRIV_SIZE);
+ if (!lpss->priv)
+ return -ENOMEM;
+
+ lpss->info = info;
+ lpss->dev = dev;
+ lpss->caps = readl(lpss->priv + LPSS_PRIV_CAPS);
+
+ dev_set_drvdata(dev, lpss);
+
+ ret = intel_lpss_assign_devs(lpss);
+ if (ret)
+ return ret;
+
+ intel_lpss_init_dev(lpss);
+
+ lpss->devid = ida_simple_get(&intel_lpss_devid_ida, 0, 0, GFP_KERNEL);
+ if (lpss->devid < 0)
+ return lpss->devid;
+
+ ret = intel_lpss_register_clock(lpss);
+ if (ret)
+ goto err_clk_register;
+
+ intel_lpss_ltr_expose(lpss);
+
+ ret = intel_lpss_debugfs_add(lpss);
+ if (ret)
+ dev_warn(dev, "Failed to create debugfs entries\n");
+
+ if (intel_lpss_has_idma(lpss)) {
+ /*
+ * Ensure the DMA driver is loaded before the host
+ * controller device appears, so that the host controller
+ * driver can request its DMA channels as early as
+ * possible.
+ *
+ * If the DMA module is not there that's OK as well.
+ */
+ intel_lpss_request_dma_module(LPSS_IDMA64_DRIVER_NAME);
+
+ ret = mfd_add_devices(dev, lpss->devid, &intel_lpss_idma64_cell,
+ 1, info->mem, info->irq, NULL);
+ if (ret)
+ dev_warn(dev, "Failed to add %s, fallback to PIO\n",
+ LPSS_IDMA64_DRIVER_NAME);
+ }
+
+ ret = mfd_add_devices(dev, lpss->devid, lpss->cell,
+ 1, info->mem, info->irq, NULL);
+ if (ret)
+ goto err_remove_ltr;
+
+ return 0;
+
+err_remove_ltr:
+ intel_lpss_debugfs_remove(lpss);
+ intel_lpss_ltr_hide(lpss);
+
+err_clk_register:
+ ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(intel_lpss_probe);
+
+void intel_lpss_remove(struct device *dev)
+{
+ struct intel_lpss *lpss = dev_get_drvdata(dev);
+
+ mfd_remove_devices(dev);
+ intel_lpss_debugfs_remove(lpss);
+ intel_lpss_ltr_hide(lpss);
+ intel_lpss_unregister_clock(lpss);
+ ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
+}
+EXPORT_SYMBOL_GPL(intel_lpss_remove);
+
+static int resume_lpss_device(struct device *dev, void *data)
+{
+ pm_runtime_resume(dev);
+ return 0;
+}
+
+int intel_lpss_prepare(struct device *dev)
+{
+ /*
+ * Resume both child devices before entering system sleep. This
+ * ensures that they are in proper state before they get suspended.
+ */
+ device_for_each_child_reverse(dev, NULL, resume_lpss_device);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_lpss_prepare);
+
+int intel_lpss_suspend(struct device *dev)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_lpss_suspend);
+
+int intel_lpss_resume(struct device *dev)
+{
+ struct intel_lpss *lpss = dev_get_drvdata(dev);
+
+ intel_lpss_init_dev(lpss);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_lpss_resume);
+
+static int __init intel_lpss_init(void)
+{
+ intel_lpss_debugfs = debugfs_create_dir("intel_lpss", NULL);
+ return 0;
+}
+module_init(intel_lpss_init);
+
+static void __exit intel_lpss_exit(void)
+{
+ debugfs_remove(intel_lpss_debugfs);
+}
+module_exit(intel_lpss_exit);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
+MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
+MODULE_DESCRIPTION("Intel LPSS core driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h
new file mode 100644
index 000000000000..f28cb28a62f8
--- /dev/null
+++ b/drivers/mfd/intel-lpss.h
@@ -0,0 +1,62 @@
+/*
+ * Intel LPSS core support.
+ *
+ * Copyright (C) 2015, Intel Corporation
+ *
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFD_INTEL_LPSS_H
+#define __MFD_INTEL_LPSS_H
+
+struct device;
+struct resource;
+
+struct intel_lpss_platform_info {
+ struct resource *mem;
+ int irq;
+ unsigned long clk_rate;
+ const char *clk_con_id;
+};
+
+int intel_lpss_probe(struct device *dev,
+ const struct intel_lpss_platform_info *info);
+void intel_lpss_remove(struct device *dev);
+
+#ifdef CONFIG_PM
+int intel_lpss_prepare(struct device *dev);
+int intel_lpss_suspend(struct device *dev);
+int intel_lpss_resume(struct device *dev);
+
+#ifdef CONFIG_PM_SLEEP
+#define INTEL_LPSS_SLEEP_PM_OPS \
+ .prepare = intel_lpss_prepare, \
+ .suspend = intel_lpss_suspend, \
+ .resume = intel_lpss_resume, \
+ .freeze = intel_lpss_suspend, \
+ .thaw = intel_lpss_resume, \
+ .poweroff = intel_lpss_suspend, \
+ .restore = intel_lpss_resume,
+#endif
+
+#define INTEL_LPSS_RUNTIME_PM_OPS \
+ .runtime_suspend = intel_lpss_suspend, \
+ .runtime_resume = intel_lpss_resume,
+
+#else /* !CONFIG_PM */
+#define INTEL_LPSS_SLEEP_PM_OPS
+#define INTEL_LPSS_RUNTIME_PM_OPS
+#endif /* CONFIG_PM */
+
+#define INTEL_LPSS_PM_OPS(name) \
+const struct dev_pm_ops name = { \
+ INTEL_LPSS_SLEEP_PM_OPS \
+ INTEL_LPSS_RUNTIME_PM_OPS \
+}
+
+#endif /* __MFD_INTEL_LPSS_H */
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index 7b50b6b208a5..d9e15cf7c6c8 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -24,8 +24,25 @@
#include <linux/acpi.h>
#include <linux/regmap.h>
#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/gpio/machine.h>
+#include <linux/pwm.h>
#include "intel_soc_pmic_core.h"
+/* Lookup table for the Panel Enable/Disable line as GPIO signals */
+static struct gpiod_lookup_table panel_gpio_table = {
+ /* Intel GFX is consumer */
+ .dev_id = "0000:00:02.0",
+ .table = {
+ /* Panel EN/DISABLE */
+ GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH),
+ },
+};
+
+/* PWM consumed by the Intel GFX */
+static struct pwm_lookup crc_pwm_lookup[] = {
+ PWM_LOOKUP("crystal_cove_pwm", 0, "0000:00:02.0", "pwm_backlight", 0, PWM_POLARITY_NORMAL),
+};
+
static int intel_soc_pmic_find_gpio_irq(struct device *dev)
{
struct gpio_desc *desc;
@@ -85,6 +102,12 @@ static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c,
if (ret)
dev_warn(dev, "Can't enable IRQ as wake source: %d\n", ret);
+ /* Add lookup table binding for Panel Control to the GPIO Chip */
+ gpiod_add_lookup_table(&panel_gpio_table);
+
+ /* Add lookup table for crc-pwm */
+ pwm_add_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
+
ret = mfd_add_devices(dev, -1, config->cell_dev,
config->n_cell_devs, NULL, 0,
regmap_irq_get_domain(pmic->irq_chip_data));
@@ -104,6 +127,12 @@ static int intel_soc_pmic_i2c_remove(struct i2c_client *i2c)
regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
+ /* Remove lookup table for Panel Control from the GPIO Chip */
+ gpiod_remove_lookup_table(&panel_gpio_table);
+
+ /* remove crc-pwm lookup table */
+ pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
+
mfd_remove_devices(&i2c->dev);
return 0;
@@ -147,7 +176,7 @@ static const struct i2c_device_id intel_soc_pmic_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, intel_soc_pmic_i2c_id);
#if defined(CONFIG_ACPI)
-static struct acpi_device_id intel_soc_pmic_acpi_match[] = {
+static const struct acpi_device_id intel_soc_pmic_acpi_match[] = {
{"INT33FD", (kernel_ulong_t)&intel_soc_pmic_config_crc},
{ },
};
@@ -157,7 +186,6 @@ MODULE_DEVICE_TABLE(acpi, intel_soc_pmic_acpi_match);
static struct i2c_driver intel_soc_pmic_i2c_driver = {
.driver = {
.name = "intel_soc_pmic_i2c",
- .owner = THIS_MODULE,
.pm = &intel_soc_pmic_pm_ops,
.acpi_match_table = ACPI_PTR(intel_soc_pmic_acpi_match),
},
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
index 7436075e8983..4a7494872da2 100644
--- a/drivers/mfd/intel_soc_pmic_crc.c
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -109,6 +109,9 @@ static struct mfd_cell crystal_cove_dev[] = {
{
.name = "crystal_cove_pmic",
},
+ {
+ .name = "crystal_cove_pwm",
+ },
};
static const struct regmap_config crystal_cove_regmap_config = {
diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c
index 8df3266064e4..a41859c55bda 100644
--- a/drivers/mfd/ipaq-micro.c
+++ b/drivers/mfd/ipaq-micro.c
@@ -53,8 +53,8 @@ static void ipaq_micro_trigger_tx(struct ipaq_micro *micro)
tx->buf[bp++] = checksum;
tx->len = bp;
tx->index = 0;
- print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
- tx->buf, tx->len, true);
+ print_hex_dump_debug("data: ", DUMP_PREFIX_OFFSET, 16, 1,
+ tx->buf, tx->len, true);
/* Enable interrupt */
val = readl(micro->base + UTCR3);
@@ -242,7 +242,7 @@ static u16 ipaq_micro_to_u16(u8 *data)
return data[1] << 8 | data[0];
}
-static void ipaq_micro_eeprom_dump(struct ipaq_micro *micro)
+static void __init ipaq_micro_eeprom_dump(struct ipaq_micro *micro)
{
u8 dump[256];
char *str;
@@ -250,7 +250,7 @@ static void ipaq_micro_eeprom_dump(struct ipaq_micro *micro)
ipaq_micro_eeprom_read(micro, 0, 128, dump);
str = ipaq_micro_str(dump, 10);
if (str) {
- dev_info(micro->dev, "HM version %s\n", str);
+ dev_info(micro->dev, "HW version %s\n", str);
kfree(str);
}
str = ipaq_micro_str(dump+10, 40);
@@ -281,8 +281,8 @@ static void ipaq_micro_eeprom_dump(struct ipaq_micro *micro)
dev_info(micro->dev, "RAM size: %u KiB\n", ipaq_micro_to_u16(dump+92));
dev_info(micro->dev, "screen: %u x %u\n",
ipaq_micro_to_u16(dump+94), ipaq_micro_to_u16(dump+96));
- print_hex_dump(KERN_DEBUG, "eeprom: ", DUMP_PREFIX_OFFSET, 16, 1,
- dump, 256, true);
+ print_hex_dump_debug("eeprom: ", DUMP_PREFIX_OFFSET, 16, 1,
+ dump, 256, true);
}
@@ -386,7 +386,7 @@ static int micro_resume(struct device *dev)
return 0;
}
-static int micro_probe(struct platform_device *pdev)
+static int __init micro_probe(struct platform_device *pdev)
{
struct ipaq_micro *micro;
struct resource *res;
@@ -448,21 +448,6 @@ static int micro_probe(struct platform_device *pdev)
return 0;
}
-static int micro_remove(struct platform_device *pdev)
-{
- struct ipaq_micro *micro = platform_get_drvdata(pdev);
- u32 val;
-
- mfd_remove_devices(&pdev->dev);
-
- val = readl(micro->base + UTCR3);
- val &= ~(UTCR3_RXE | UTCR3_RIE); /* disable receive interrupt */
- val &= ~(UTCR3_TXE | UTCR3_TIE); /* disable transmit interrupt */
- writel(val, micro->base + UTCR3);
-
- return 0;
-}
-
static const struct dev_pm_ops micro_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(NULL, micro_resume)
};
@@ -471,12 +456,7 @@ static struct platform_driver micro_device_driver = {
.driver = {
.name = "ipaq-h3xxx-micro",
.pm = &micro_dev_pm_ops,
+ .suppress_bind_attrs = true,
},
- .probe = micro_probe,
- .remove = micro_remove,
- /* .shutdown = micro_suspend, // FIXME */
};
-module_platform_driver(micro_device_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("driver for iPAQ Atmel micro core and backlight");
+builtin_platform_driver_probe(micro_device_driver, micro_probe);
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index b31c54e4ecb2..5bb49f08955d 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -273,12 +273,12 @@ static int jz4740_adc_probe(struct platform_device *pdev)
ct->chip.irq_unmask = irq_gc_mask_clr_bit;
ct->chip.irq_ack = irq_gc_ack_set_bit;
- irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL);
+ irq_setup_generic_chip(gc, IRQ_MSK(5), IRQ_GC_INIT_MASK_CACHE, 0,
+ IRQ_NOPROBE | IRQ_LEVEL);
adc->gc = gc;
- irq_set_handler_data(adc->irq, gc);
- irq_set_chained_handler(adc->irq, jz4740_adc_irq_demux);
+ irq_set_chained_handler_and_data(adc->irq, jz4740_adc_irq_demux, gc);
writeb(0x00, adc->base + JZ_REG_ADC_ENABLE);
writeb(0xff, adc->base + JZ_REG_ADC_CTRL);
@@ -308,8 +308,7 @@ static int jz4740_adc_remove(struct platform_device *pdev)
irq_remove_generic_chip(adc->gc, IRQ_MSK(5), IRQ_NOPROBE | IRQ_LEVEL, 0);
kfree(adc->gc);
- irq_set_handler_data(adc->irq, NULL);
- irq_set_chained_handler(adc->irq, NULL);
+ irq_set_chained_handler_and_data(adc->irq, NULL, NULL);
iounmap(adc->base);
release_mem_region(adc->mem->start, resource_size(adc->mem));
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 8057849d51ac..463f4eae20c1 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -501,6 +501,14 @@ static struct platform_driver kempld_driver = {
static struct dmi_system_id kempld_dmi_table[] __initdata = {
{
+ .ident = "BBL6",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "COMe-bBL6"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
+ }, {
.ident = "BHL6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
@@ -517,6 +525,14 @@ static struct dmi_system_id kempld_dmi_table[] __initdata = {
.driver_data = (void *)&kempld_platform_data_generic,
.callback = kempld_create_platform_device,
}, {
+ .ident = "CBW6",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "COMe-cBW6"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
+ }, {
.ident = "CCR2",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c
index d42fbb667d8c..643f3750e830 100644
--- a/drivers/mfd/lm3533-core.c
+++ b/drivers/mfd/lm3533-core.c
@@ -640,7 +640,6 @@ MODULE_DEVICE_TABLE(i2c, lm3533_i2c_ids);
static struct i2c_driver lm3533_i2c_driver = {
.driver = {
.name = "lm3533",
- .owner = THIS_MODULE,
},
.id_table = lm3533_i2c_ids,
.probe = lm3533_i2c_probe,
diff --git a/drivers/mfd/lp3943.c b/drivers/mfd/lp3943.c
index 335b930112b2..eecbb13de1bd 100644
--- a/drivers/mfd/lp3943.c
+++ b/drivers/mfd/lp3943.c
@@ -154,7 +154,6 @@ static struct i2c_driver lp3943_driver = {
.remove = lp3943_remove,
.driver = {
.name = "lp3943",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(lp3943_of_match),
},
.id_table = lp3943_ids,
diff --git a/drivers/mfd/lp8788-irq.c b/drivers/mfd/lp8788-irq.c
index a87f2b548f71..c7a9825aa4ce 100644
--- a/drivers/mfd/lp8788-irq.c
+++ b/drivers/mfd/lp8788-irq.c
@@ -141,12 +141,7 @@ static int lp8788_irq_map(struct irq_domain *d, unsigned int virq,
irq_set_chip_data(virq, irqd);
irq_set_chip_and_handler(virq, chip, handle_edge_irq);
irq_set_nested_thread(virq, 1);
-
-#ifdef CONFIG_ARM
- set_irq_flags(virq, IRQF_VALID);
-#else
irq_set_noprobe(virq);
-#endif
return 0;
}
diff --git a/drivers/mfd/lp8788.c b/drivers/mfd/lp8788.c
index a30bc15fe5ba..acf616559512 100644
--- a/drivers/mfd/lp8788.c
+++ b/drivers/mfd/lp8788.c
@@ -221,7 +221,6 @@ MODULE_DEVICE_TABLE(i2c, lp8788_ids);
static struct i2c_driver lp8788_driver = {
.driver = {
.name = "lp8788",
- .owner = THIS_MODULE,
},
.probe = lp8788_probe,
.remove = lp8788_remove,
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 8de34398abc0..c5a9a08b5dfb 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -66,6 +66,7 @@
#include <linux/pci.h>
#include <linux/mfd/core.h>
#include <linux/mfd/lpc_ich.h>
+#include <linux/platform_data/itco_wdt.h>
#define ACPIBASE 0x40
#define ACPIBASE_GPE_OFF 0x28
@@ -835,9 +836,31 @@ static void lpc_ich_enable_pmc_space(struct pci_dev *dev)
priv->actrl_pbase_save = reg_save;
}
-static void lpc_ich_finalize_cell(struct pci_dev *dev, struct mfd_cell *cell)
+static int lpc_ich_finalize_wdt_cell(struct pci_dev *dev)
{
+ struct itco_wdt_platform_data *pdata;
struct lpc_ich_priv *priv = pci_get_drvdata(dev);
+ struct lpc_ich_info *info;
+ struct mfd_cell *cell = &lpc_ich_cells[LPC_WDT];
+
+ pdata = devm_kzalloc(&dev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ info = &lpc_chipset_info[priv->chipset];
+
+ pdata->version = info->iTCO_version;
+ strlcpy(pdata->name, info->name, sizeof(pdata->name));
+
+ cell->platform_data = pdata;
+ cell->pdata_size = sizeof(*pdata);
+ return 0;
+}
+
+static void lpc_ich_finalize_gpio_cell(struct pci_dev *dev)
+{
+ struct lpc_ich_priv *priv = pci_get_drvdata(dev);
+ struct mfd_cell *cell = &lpc_ich_cells[LPC_GPIO];
cell->platform_data = &lpc_chipset_info[priv->chipset];
cell->pdata_size = sizeof(struct lpc_ich_info);
@@ -933,7 +956,7 @@ gpe0_done:
lpc_chipset_info[priv->chipset].use_gpio = ret;
lpc_ich_enable_gpio_space(dev);
- lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_GPIO]);
+ lpc_ich_finalize_gpio_cell(dev);
ret = mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO,
&lpc_ich_cells[LPC_GPIO], 1, NULL, 0, NULL);
@@ -1007,7 +1030,10 @@ static int lpc_ich_init_wdt(struct pci_dev *dev)
res->end = base_addr + ACPIBASE_PMC_END;
}
- lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_WDT]);
+ ret = lpc_ich_finalize_wdt_cell(dev);
+ if (ret)
+ goto wdt_done;
+
ret = mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO,
&lpc_ich_cells[LPC_WDT], 1, NULL, 0, NULL);
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 3bf8def82f1e..56e216dedc91 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -532,7 +532,6 @@ static SIMPLE_DEV_PM_OPS(max14577_pm, max14577_suspend, max14577_resume);
static struct i2c_driver max14577_i2c_driver = {
.driver = {
.name = "max14577",
- .owner = THIS_MODULE,
.pm = &max14577_pm,
.of_match_table = max14577_dt_match,
},
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index 760d08d7923d..d19be64cd32b 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -391,7 +391,6 @@ static SIMPLE_DEV_PM_OPS(max77686_pm, max77686_suspend, max77686_resume);
static struct i2c_driver max77686_i2c_driver = {
.driver = {
.name = "max77686",
- .owner = THIS_MODULE,
.pm = &max77686_pm,
.of_match_table = of_match_ptr(max77686_pmic_dt_match),
},
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index cb14afa97e6f..007f729e150b 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -33,6 +33,7 @@
#include <linux/mutex.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77693-private.h>
#include <linux/regulator/machine.h>
#include <linux/regmap.h>
@@ -193,22 +194,22 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
} else
dev_info(max77693->dev, "device ID: 0x%x\n", reg_data);
- max77693->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
- if (!max77693->muic) {
+ max77693->i2c_muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
+ if (!max77693->i2c_muic) {
dev_err(max77693->dev, "Failed to allocate I2C device for MUIC\n");
return -ENODEV;
}
- i2c_set_clientdata(max77693->muic, max77693);
+ i2c_set_clientdata(max77693->i2c_muic, max77693);
- max77693->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
- if (!max77693->haptic) {
+ max77693->i2c_haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
+ if (!max77693->i2c_haptic) {
dev_err(max77693->dev, "Failed to allocate I2C device for Haptic\n");
ret = -ENODEV;
goto err_i2c_haptic;
}
- i2c_set_clientdata(max77693->haptic, max77693);
+ i2c_set_clientdata(max77693->i2c_haptic, max77693);
- max77693->regmap_haptic = devm_regmap_init_i2c(max77693->haptic,
+ max77693->regmap_haptic = devm_regmap_init_i2c(max77693->i2c_haptic,
&max77693_regmap_haptic_config);
if (IS_ERR(max77693->regmap_haptic)) {
ret = PTR_ERR(max77693->regmap_haptic);
@@ -222,7 +223,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
* instance of MUIC device when irq of max77693 is initialized
* before call max77693-muic probe() function.
*/
- max77693->regmap_muic = devm_regmap_init_i2c(max77693->muic,
+ max77693->regmap_muic = devm_regmap_init_i2c(max77693->i2c_muic,
&max77693_regmap_muic_config);
if (IS_ERR(max77693->regmap_muic)) {
ret = PTR_ERR(max77693->regmap_muic);
@@ -255,7 +256,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
IRQF_ONESHOT | IRQF_SHARED |
IRQF_TRIGGER_FALLING, 0,
&max77693_charger_irq_chip,
- &max77693->irq_data_charger);
+ &max77693->irq_data_chg);
if (ret) {
dev_err(max77693->dev, "failed to add irq chip: %d\n", ret);
goto err_irq_charger;
@@ -296,15 +297,15 @@ err_mfd:
err_intsrc:
regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic);
err_irq_muic:
- regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger);
+ regmap_del_irq_chip(max77693->irq, max77693->irq_data_chg);
err_irq_charger:
regmap_del_irq_chip(max77693->irq, max77693->irq_data_topsys);
err_irq_topsys:
regmap_del_irq_chip(max77693->irq, max77693->irq_data_led);
err_regmap:
- i2c_unregister_device(max77693->haptic);
+ i2c_unregister_device(max77693->i2c_haptic);
err_i2c_haptic:
- i2c_unregister_device(max77693->muic);
+ i2c_unregister_device(max77693->i2c_muic);
return ret;
}
@@ -315,12 +316,12 @@ static int max77693_i2c_remove(struct i2c_client *i2c)
mfd_remove_devices(max77693->dev);
regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic);
- regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger);
+ regmap_del_irq_chip(max77693->irq, max77693->irq_data_chg);
regmap_del_irq_chip(max77693->irq, max77693->irq_data_topsys);
regmap_del_irq_chip(max77693->irq, max77693->irq_data_led);
- i2c_unregister_device(max77693->muic);
- i2c_unregister_device(max77693->haptic);
+ i2c_unregister_device(max77693->i2c_muic);
+ i2c_unregister_device(max77693->i2c_haptic);
return 0;
}
@@ -372,7 +373,6 @@ static const struct of_device_id max77693_dt_match[] = {
static struct i2c_driver max77693_i2c_driver = {
.driver = {
.name = "max77693",
- .owner = THIS_MODULE,
.pm = &max77693_pm,
.of_match_table = of_match_ptr(max77693_dt_match),
},
diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
index a354ac677ec7..c52162ea3d0a 100644
--- a/drivers/mfd/max77843.c
+++ b/drivers/mfd/max77843.c
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mfd/core.h>
+#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77843-private.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -71,7 +72,7 @@ static const struct regmap_irq_chip max77843_irq_chip = {
};
/* Charger and Charger regulator use same regmap. */
-static int max77843_chg_init(struct max77843 *max77843)
+static int max77843_chg_init(struct max77693_dev *max77843)
{
int ret;
@@ -101,7 +102,7 @@ err_chg_i2c:
static int max77843_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
- struct max77843 *max77843;
+ struct max77693_dev *max77843;
unsigned int reg_data;
int ret;
@@ -113,6 +114,7 @@ static int max77843_probe(struct i2c_client *i2c,
max77843->dev = &i2c->dev;
max77843->i2c = i2c;
max77843->irq = i2c->irq;
+ max77843->type = id->driver_data;
max77843->regmap = devm_regmap_init_i2c(i2c,
&max77843_regmap_config);
@@ -123,7 +125,7 @@ static int max77843_probe(struct i2c_client *i2c,
ret = regmap_add_irq_chip(max77843->regmap, max77843->irq,
IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_SHARED,
- 0, &max77843_irq_chip, &max77843->irq_data);
+ 0, &max77843_irq_chip, &max77843->irq_data_topsys);
if (ret) {
dev_err(&i2c->dev, "Failed to add TOPSYS IRQ chip\n");
return ret;
@@ -164,18 +166,18 @@ static int max77843_probe(struct i2c_client *i2c,
return 0;
err_pmic_id:
- regmap_del_irq_chip(max77843->irq, max77843->irq_data);
+ regmap_del_irq_chip(max77843->irq, max77843->irq_data_topsys);
return ret;
}
static int max77843_remove(struct i2c_client *i2c)
{
- struct max77843 *max77843 = i2c_get_clientdata(i2c);
+ struct max77693_dev *max77843 = i2c_get_clientdata(i2c);
mfd_remove_devices(max77843->dev);
- regmap_del_irq_chip(max77843->irq, max77843->irq_data);
+ regmap_del_irq_chip(max77843->irq, max77843->irq_data_topsys);
i2c_unregister_device(max77843->i2c_chg);
@@ -188,7 +190,7 @@ static const struct of_device_id max77843_dt_match[] = {
};
static const struct i2c_device_id max77843_id[] = {
- { "max77843", },
+ { "max77843", TYPE_MAX77843, },
{ },
};
MODULE_DEVICE_TABLE(i2c, max77843_id);
@@ -196,7 +198,7 @@ MODULE_DEVICE_TABLE(i2c, max77843_id);
static int __maybe_unused max77843_suspend(struct device *dev)
{
struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
- struct max77843 *max77843 = i2c_get_clientdata(i2c);
+ struct max77693_dev *max77843 = i2c_get_clientdata(i2c);
disable_irq(max77843->irq);
if (device_may_wakeup(dev))
@@ -208,7 +210,7 @@ static int __maybe_unused max77843_suspend(struct device *dev)
static int __maybe_unused max77843_resume(struct device *dev)
{
struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
- struct max77843 *max77843 = i2c_get_clientdata(i2c);
+ struct max77693_dev *max77843 = i2c_get_clientdata(i2c);
if (device_may_wakeup(dev))
disable_irq_wake(max77843->irq);
diff --git a/drivers/mfd/max8907.c b/drivers/mfd/max8907.c
index 232749c8813d..2974c8b1273b 100644
--- a/drivers/mfd/max8907.c
+++ b/drivers/mfd/max8907.c
@@ -321,7 +321,6 @@ MODULE_DEVICE_TABLE(i2c, max8907_i2c_id);
static struct i2c_driver max8907_i2c_driver = {
.driver = {
.name = "max8907",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(max8907_of_match),
},
.probe = max8907_i2c_probe,
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 8520bd68c1ff..fd8b15cd84fd 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -650,11 +650,8 @@ static int max8925_irq_domain_map(struct irq_domain *d, unsigned int virq,
irq_set_chip_data(virq, d->host_data);
irq_set_chip_and_handler(virq, &max8925_irq_chip, handle_edge_irq);
irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(virq, IRQF_VALID);
-#else
irq_set_noprobe(virq);
-#endif
+
return 0;
}
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index c880c895c5a6..b0fe8103e401 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -245,7 +245,6 @@ MODULE_DEVICE_TABLE(of, max8925_dt_ids);
static struct i2c_driver max8925_driver = {
.driver = {
.name = "max8925",
- .owner = THIS_MODULE,
.pm = &max8925_pm_ops,
.of_match_table = max8925_dt_ids,
},
diff --git a/drivers/mfd/max8997-irq.c b/drivers/mfd/max8997-irq.c
index d3025be57f39..b95a46d79b9d 100644
--- a/drivers/mfd/max8997-irq.c
+++ b/drivers/mfd/max8997-irq.c
@@ -113,14 +113,14 @@ static const struct max8997_irq_data max8997_irqs[] = {
static void max8997_irq_lock(struct irq_data *data)
{
- struct max8997_dev *max8997 = irq_get_chip_data(data->irq);
+ struct max8997_dev *max8997 = irq_data_get_irq_chip_data(data);
mutex_lock(&max8997->irqlock);
}
static void max8997_irq_sync_unlock(struct irq_data *data)
{
- struct max8997_dev *max8997 = irq_get_chip_data(data->irq);
+ struct max8997_dev *max8997 = irq_data_get_irq_chip_data(data);
int i;
for (i = 0; i < MAX8997_IRQ_GROUP_NR; i++) {
@@ -140,26 +140,25 @@ static void max8997_irq_sync_unlock(struct irq_data *data)
}
static const inline struct max8997_irq_data *
-irq_to_max8997_irq(struct max8997_dev *max8997, int irq)
+irq_to_max8997_irq(struct max8997_dev *max8997, struct irq_data *data)
{
- struct irq_data *data = irq_get_irq_data(irq);
return &max8997_irqs[data->hwirq];
}
static void max8997_irq_mask(struct irq_data *data)
{
- struct max8997_dev *max8997 = irq_get_chip_data(data->irq);
+ struct max8997_dev *max8997 = irq_data_get_irq_chip_data(data);
const struct max8997_irq_data *irq_data = irq_to_max8997_irq(max8997,
- data->irq);
+ data);
max8997->irq_masks_cur[irq_data->group] |= irq_data->mask;
}
static void max8997_irq_unmask(struct irq_data *data)
{
- struct max8997_dev *max8997 = irq_get_chip_data(data->irq);
+ struct max8997_dev *max8997 = irq_data_get_irq_chip_data(data);
const struct max8997_irq_data *irq_data = irq_to_max8997_irq(max8997,
- data->irq);
+ data);
max8997->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
}
@@ -295,11 +294,8 @@ static int max8997_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_set_chip_data(irq, max8997);
irq_set_chip_and_handler(irq, &max8997_irq_chip, handle_edge_irq);
irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
irq_set_noprobe(irq);
-#endif
+
return 0;
}
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 595364ee178a..d3cfa9cf5c8f 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -508,7 +508,6 @@ static const struct dev_pm_ops max8997_pm = {
static struct i2c_driver max8997_i2c_driver = {
.driver = {
.name = "max8997",
- .owner = THIS_MODULE,
.pm = &max8997_pm,
.of_match_table = of_match_ptr(max8997_pmic_dt_match),
},
diff --git a/drivers/mfd/max8998-irq.c b/drivers/mfd/max8998-irq.c
index 3702056628a8..90bad9ffa7e2 100644
--- a/drivers/mfd/max8998-irq.c
+++ b/drivers/mfd/max8998-irq.c
@@ -98,9 +98,8 @@ static struct max8998_irq_data max8998_irqs[] = {
};
static inline struct max8998_irq_data *
-irq_to_max8998_irq(struct max8998_dev *max8998, int irq)
+irq_to_max8998_irq(struct max8998_dev *max8998, struct irq_data *data)
{
- struct irq_data *data = irq_get_irq_data(irq);
return &max8998_irqs[data->hwirq];
}
@@ -134,8 +133,7 @@ static void max8998_irq_sync_unlock(struct irq_data *data)
static void max8998_irq_unmask(struct irq_data *data)
{
struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
- struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998,
- data->irq);
+ struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, data);
max8998->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
}
@@ -143,8 +141,7 @@ static void max8998_irq_unmask(struct irq_data *data)
static void max8998_irq_mask(struct irq_data *data)
{
struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
- struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998,
- data->irq);
+ struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, data);
max8998->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
}
@@ -206,11 +203,8 @@ static int max8998_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_set_chip_data(irq, max8998);
irq_set_chip_and_handler(irq, &max8998_irq_chip, handle_edge_irq);
irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
irq_set_noprobe(irq);
-#endif
+
return 0;
}
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index a37cb7444b6e..a7afe3bf27fc 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -377,7 +377,6 @@ static const struct dev_pm_ops max8998_pm = {
static struct i2c_driver max8998_i2c_driver = {
.driver = {
.name = "max8998",
- .owner = THIS_MODULE,
.pm = &max8998_pm,
.of_match_table = of_match_ptr(max8998_dt_match),
},
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
index 68b844811566..67e4c9aa7d18 100644
--- a/drivers/mfd/mc13xxx-i2c.c
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -96,7 +96,6 @@ static int mc13xxx_i2c_remove(struct i2c_client *client)
static struct i2c_driver mc13xxx_i2c_driver = {
.id_table = mc13xxx_i2c_device_id,
.driver = {
- .owner = THIS_MODULE,
.name = "mc13xxx",
.of_match_table = mc13xxx_dt_ids,
},
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 14fd5cbcf0f2..c17635d3e504 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -302,7 +302,7 @@ void mfd_remove_devices(struct device *parent)
{
atomic_t *cnts = NULL;
- device_for_each_child(parent, &cnts, mfd_remove_devices_fn);
+ device_for_each_child_reverse(parent, &cnts, mfd_remove_devices_fn);
kfree(cnts);
}
EXPORT_SYMBOL(mfd_remove_devices);
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 03929a6c6fc4..1749c1c9f405 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -60,14 +60,14 @@ static const struct mfd_cell mt6397_devs[] = {
static void mt6397_irq_lock(struct irq_data *data)
{
- struct mt6397_chip *mt6397 = irq_get_chip_data(data->irq);
+ struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
mutex_lock(&mt6397->irqlock);
}
static void mt6397_irq_sync_unlock(struct irq_data *data)
{
- struct mt6397_chip *mt6397 = irq_get_chip_data(data->irq);
+ struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
regmap_write(mt6397->regmap, MT6397_INT_CON0, mt6397->irq_masks_cur[0]);
regmap_write(mt6397->regmap, MT6397_INT_CON1, mt6397->irq_masks_cur[1]);
@@ -77,7 +77,7 @@ static void mt6397_irq_sync_unlock(struct irq_data *data)
static void mt6397_irq_disable(struct irq_data *data)
{
- struct mt6397_chip *mt6397 = irq_get_chip_data(data->irq);
+ struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
int shift = data->hwirq & 0xf;
int reg = data->hwirq >> 4;
@@ -86,19 +86,38 @@ static void mt6397_irq_disable(struct irq_data *data)
static void mt6397_irq_enable(struct irq_data *data)
{
- struct mt6397_chip *mt6397 = irq_get_chip_data(data->irq);
+ struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
int shift = data->hwirq & 0xf;
int reg = data->hwirq >> 4;
mt6397->irq_masks_cur[reg] |= BIT(shift);
}
+#ifdef CONFIG_PM_SLEEP
+static int mt6397_irq_set_wake(struct irq_data *irq_data, unsigned int on)
+{
+ struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(irq_data);
+ int shift = irq_data->hwirq & 0xf;
+ int reg = irq_data->hwirq >> 4;
+
+ if (on)
+ mt6397->wake_mask[reg] |= BIT(shift);
+ else
+ mt6397->wake_mask[reg] &= ~BIT(shift);
+
+ return 0;
+}
+#else
+#define mt6397_irq_set_wake NULL
+#endif
+
static struct irq_chip mt6397_irq_chip = {
.name = "mt6397-irq",
.irq_bus_lock = mt6397_irq_lock,
.irq_bus_sync_unlock = mt6397_irq_sync_unlock,
.irq_enable = mt6397_irq_enable,
.irq_disable = mt6397_irq_disable,
+ .irq_set_wake = mt6397_irq_set_wake,
};
static void mt6397_irq_handle_reg(struct mt6397_chip *mt6397, int reg,
@@ -142,11 +161,7 @@ static int mt6397_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_set_chip_data(irq, mt6397);
irq_set_chip_and_handler(irq, &mt6397_irq_chip, handle_level_irq);
irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
irq_set_noprobe(irq);
-#endif
return 0;
}
@@ -183,6 +198,35 @@ static int mt6397_irq_init(struct mt6397_chip *mt6397)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int mt6397_irq_suspend(struct device *dev)
+{
+ struct mt6397_chip *chip = dev_get_drvdata(dev);
+
+ regmap_write(chip->regmap, MT6397_INT_CON0, chip->wake_mask[0]);
+ regmap_write(chip->regmap, MT6397_INT_CON1, chip->wake_mask[1]);
+
+ enable_irq_wake(chip->irq);
+
+ return 0;
+}
+
+static int mt6397_irq_resume(struct device *dev)
+{
+ struct mt6397_chip *chip = dev_get_drvdata(dev);
+
+ regmap_write(chip->regmap, MT6397_INT_CON0, chip->irq_masks_cur[0]);
+ regmap_write(chip->regmap, MT6397_INT_CON1, chip->irq_masks_cur[1]);
+
+ disable_irq_wake(chip->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_irq_suspend,
+ mt6397_irq_resume);
+
static int mt6397_probe(struct platform_device *pdev)
{
int ret;
@@ -237,6 +281,7 @@ static struct platform_driver mt6397_driver = {
.driver = {
.name = "mt6397",
.of_match_table = of_match_ptr(mt6397_of_match),
+ .pm = &mt6397_pm_ops,
},
};
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 28cb048f4760..8f8bacb67a15 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -719,7 +719,6 @@ static struct i2c_driver palmas_i2c_driver = {
.driver = {
.name = "palmas",
.of_match_table = of_palmas_match_tbl,
- .owner = THIS_MODULE,
},
.probe = palmas_i2c_probe,
.remove = palmas_i2c_remove,
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c
index 5a92646a2ccb..59502d02cd15 100644
--- a/drivers/mfd/pm8921-core.c
+++ b/drivers/mfd/pm8921-core.c
@@ -236,11 +236,49 @@ static int pm8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
return pm8xxx_config_irq(chip, block, config);
}
+static int pm8xxx_irq_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool *state)
+{
+ struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
+ unsigned int pmirq = irqd_to_hwirq(d);
+ unsigned int bits;
+ int irq_bit;
+ u8 block;
+ int rc;
+
+ if (which != IRQCHIP_STATE_LINE_LEVEL)
+ return -EINVAL;
+
+ block = pmirq / 8;
+ irq_bit = pmirq % 8;
+
+ spin_lock(&chip->pm_irq_lock);
+ rc = regmap_write(chip->regmap, SSBI_REG_ADDR_IRQ_BLK_SEL, block);
+ if (rc) {
+ pr_err("Failed Selecting Block %d rc=%d\n", block, rc);
+ goto bail;
+ }
+
+ rc = regmap_read(chip->regmap, SSBI_REG_ADDR_IRQ_RT_STATUS, &bits);
+ if (rc) {
+ pr_err("Failed Reading Status rc=%d\n", rc);
+ goto bail;
+ }
+
+ *state = !!(bits & BIT(irq_bit));
+bail:
+ spin_unlock(&chip->pm_irq_lock);
+
+ return rc;
+}
+
static struct irq_chip pm8xxx_irq_chip = {
.name = "pm8xxx",
.irq_mask_ack = pm8xxx_irq_mask_ack,
.irq_unmask = pm8xxx_irq_unmask,
.irq_set_type = pm8xxx_irq_set_type,
+ .irq_get_irqchip_state = pm8xxx_irq_get_irqchip_state,
.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
};
@@ -251,11 +289,8 @@ static int pm8xxx_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_set_chip_and_handler(irq, &pm8xxx_irq_chip, handle_level_irq);
irq_set_chip_data(irq, chip);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
irq_set_noprobe(irq);
-#endif
+
return 0;
}
@@ -336,14 +371,12 @@ static int pm8921_probe(struct platform_device *pdev)
if (!chip->irqdomain)
return -ENODEV;
- irq_set_handler_data(irq, chip);
- irq_set_chained_handler(irq, pm8xxx_irq_handler);
+ irq_set_chained_handler_and_data(irq, pm8xxx_irq_handler, chip);
irq_set_irq_wake(irq, 1);
rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
if (rc) {
- irq_set_chained_handler(irq, NULL);
- irq_set_handler_data(irq, NULL);
+ irq_set_chained_handler_and_data(irq, NULL, NULL);
irq_domain_remove(chip->irqdomain);
}
@@ -362,8 +395,7 @@ static int pm8921_remove(struct platform_device *pdev)
struct pm_irq_chip *chip = platform_get_drvdata(pdev);
device_for_each_child(&pdev->dev, NULL, pm8921_remove_child);
- irq_set_chained_handler(irq, NULL);
- irq_set_handler_data(irq, NULL);
+ irq_set_chained_handler_and_data(irq, NULL, NULL);
irq_domain_remove(chip->irqdomain);
return 0;
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index 12e324319573..6afc9fabd94c 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -149,6 +149,7 @@ static const struct qcom_rpm_resource apq8064_rpm_resource_table[] = {
[QCOM_RPM_USB_OTG_SWITCH] = { 210, 125, 82, 1 },
[QCOM_RPM_HDMI_SWITCH] = { 211, 126, 83, 1 },
[QCOM_RPM_DDR_DMM] = { 212, 127, 84, 2 },
+ [QCOM_RPM_QDSS_CLK] = { 214, ~0, 7, 1 },
[QCOM_RPM_VDDMIN_GPIO] = { 215, 131, 89, 1 },
};
diff --git a/drivers/mfd/rc5t583-irq.c b/drivers/mfd/rc5t583-irq.c
index bb8502020274..3f8812daa304 100644
--- a/drivers/mfd/rc5t583-irq.c
+++ b/drivers/mfd/rc5t583-irq.c
@@ -386,9 +386,7 @@ int rc5t583_irq_init(struct rc5t583 *rc5t583, int irq, int irq_base)
irq_set_chip_and_handler(__irq, &rc5t583_irq_chip,
handle_simple_irq);
irq_set_nested_thread(__irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(__irq, IRQF_VALID);
-#endif
+ irq_clear_status_flags(__irq, IRQ_NOREQUEST);
}
ret = request_threaded_irq(irq, NULL, rc5t583_irq, IRQF_ONESHOT,
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index df276ad9f40b..e10f02f5d551 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -322,7 +322,6 @@ MODULE_DEVICE_TABLE(i2c, rc5t583_i2c_id);
static struct i2c_driver rc5t583_i2c_driver = {
.driver = {
.name = "rc5t583",
- .owner = THIS_MODULE,
},
.probe = rc5t583_i2c_probe,
.remove = rc5t583_i2c_remove,
diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c
index 2d64430c719b..d4c114abeb75 100644
--- a/drivers/mfd/retu-mfd.c
+++ b/drivers/mfd/retu-mfd.c
@@ -311,7 +311,6 @@ MODULE_DEVICE_TABLE(i2c, retu_id);
static struct i2c_driver retu_driver = {
.driver = {
.name = "retu-mfd",
- .owner = THIS_MODULE,
},
.probe = retu_probe,
.remove = retu_remove,
diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c
index db395a6c52bc..d60f91619c4a 100644
--- a/drivers/mfd/rt5033.c
+++ b/drivers/mfd/rt5033.c
@@ -124,6 +124,7 @@ static const struct of_device_id rt5033_dt_match[] = {
{ .compatible = "richtek,rt5033", },
{ }
};
+MODULE_DEVICE_TABLE(of, rt5033_dt_match);
static struct i2c_driver rt5033_driver = {
.driver = {
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 4a69afb425ad..d206a3e8fe87 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -486,7 +486,6 @@ MODULE_DEVICE_TABLE(i2c, sec_pmic_id);
static struct i2c_driver sec_pmic_driver = {
.driver = {
.name = "sec_pmic",
- .owner = THIS_MODULE,
.pm = &sec_pmic_pm_ops,
.of_match_table = of_match_ptr(sec_dt_match),
},
diff --git a/drivers/mfd/si476x-i2c.c b/drivers/mfd/si476x-i2c.c
index e3deb466628b..fb4ce6d04c30 100644
--- a/drivers/mfd/si476x-i2c.c
+++ b/drivers/mfd/si476x-i2c.c
@@ -873,7 +873,6 @@ MODULE_DEVICE_TABLE(i2c, si476x_id);
static struct i2c_driver si476x_core_driver = {
.driver = {
.name = "si476x-core",
- .owner = THIS_MODULE,
},
.probe = si476x_core_probe,
.remove = si476x_core_remove,
diff --git a/drivers/mfd/smsc-ece1099.c b/drivers/mfd/smsc-ece1099.c
index 03246880d484..a4c0df71c8b3 100644
--- a/drivers/mfd/smsc-ece1099.c
+++ b/drivers/mfd/smsc-ece1099.c
@@ -98,7 +98,6 @@ MODULE_DEVICE_TABLE(i2c, smsc_i2c_id);
static struct i2c_driver smsc_i2c_driver = {
.driver = {
.name = "smsc",
- .owner = THIS_MODULE,
},
.probe = smsc_i2c_probe,
.remove = smsc_i2c_remove,
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index 5c054031c3f8..c3f4aab53b07 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -6,7 +6,7 @@
*
* License Terms: GNU General Public License, version 2
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
- * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
+ * Author: Viresh Kumar <vireshk@kernel.org> for ST Microelectronics
*/
#include <linux/i2c.h>
@@ -112,7 +112,6 @@ MODULE_DEVICE_TABLE(i2c, stmpe_id);
static struct i2c_driver stmpe_i2c_driver = {
.driver = {
.name = "stmpe-i2c",
- .owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &stmpe_dev_pm_ops,
#endif
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index a81badbaa917..618ba244d98a 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -4,13 +4,14 @@
* Copyright (C) ST Microelectronics SA 2011
*
* License Terms: GNU General Public License, version 2
- * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
+ * Author: Viresh Kumar <vireshk@kernel.org> for ST Microelectronics
*/
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/types.h>
#include "stmpe.h"
@@ -108,6 +109,17 @@ static int stmpe_spi_remove(struct spi_device *spi)
return stmpe_remove(stmpe);
}
+static const struct of_device_id stmpe_spi_of_match[] = {
+ { .compatible = "st,stmpe610", },
+ { .compatible = "st,stmpe801", },
+ { .compatible = "st,stmpe811", },
+ { .compatible = "st,stmpe1601", },
+ { .compatible = "st,stmpe2401", },
+ { .compatible = "st,stmpe2403", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, stmpe_spi_of_match);
+
static const struct spi_device_id stmpe_spi_id[] = {
{ "stmpe610", STMPE610 },
{ "stmpe801", STMPE801 },
@@ -122,6 +134,7 @@ MODULE_DEVICE_TABLE(spi, stmpe_id);
static struct spi_driver stmpe_spi_driver = {
.driver = {
.name = "stmpe-spi",
+ .of_match_table = of_match_ptr(stmpe_spi_of_match),
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &stmpe_dev_pm_ops,
@@ -146,4 +159,4 @@ module_exit(stmpe_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 18c4d72d1d2a..e971af86ce1e 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -971,20 +971,13 @@ static int stmpe_irq_map(struct irq_domain *d, unsigned int virq,
irq_set_chip_data(virq, stmpe);
irq_set_chip_and_handler(virq, chip, handle_edge_irq);
irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(virq, IRQF_VALID);
-#else
irq_set_noprobe(virq);
-#endif
return 0;
}
static void stmpe_irq_unmap(struct irq_domain *d, unsigned int virq)
{
-#ifdef CONFIG_ARM
- set_irq_flags(virq, 0);
-#endif
irq_set_chip_and_handler(virq, NULL, NULL);
irq_set_chip_data(virq, NULL);
}
diff --git a/drivers/mfd/stw481x.c b/drivers/mfd/stw481x.c
index 7ceb3df09e25..ca613df36143 100644
--- a/drivers/mfd/stw481x.c
+++ b/drivers/mfd/stw481x.c
@@ -231,6 +231,7 @@ static const struct i2c_device_id stw481x_id[] = {
{ "stw481x", 0 },
{ },
};
+MODULE_DEVICE_TABLE(i2c, stw481x_id);
static const struct of_device_id stw481x_match[] = {
{ .compatible = "st,stw4810", },
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index c09fb5dccd50..16fc1adc4fa3 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -187,7 +187,7 @@ static struct mfd_cell t7l66xb_cells[] = {
/* Handle the T7L66XB interrupt mux */
static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc)
{
- struct t7l66xb *t7l66xb = irq_get_handler_data(irq);
+ struct t7l66xb *t7l66xb = irq_desc_get_handler_data(desc);
unsigned int isr;
unsigned int i, irq_base;
@@ -246,14 +246,10 @@ static void t7l66xb_attach_irq(struct platform_device *dev)
for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) {
irq_set_chip_and_handler(irq, &t7l66xb_chip, handle_level_irq);
irq_set_chip_data(irq, t7l66xb);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-#endif
}
irq_set_irq_type(t7l66xb->irq, IRQ_TYPE_EDGE_FALLING);
- irq_set_handler_data(t7l66xb->irq, t7l66xb);
- irq_set_chained_handler(t7l66xb->irq, t7l66xb_irq);
+ irq_set_chained_handler_and_data(t7l66xb->irq, t7l66xb_irq, t7l66xb);
}
static void t7l66xb_detach_irq(struct platform_device *dev)
@@ -263,13 +259,9 @@ static void t7l66xb_detach_irq(struct platform_device *dev)
irq_base = t7l66xb->irq_base;
- irq_set_chained_handler(t7l66xb->irq, NULL);
- irq_set_handler_data(t7l66xb->irq, NULL);
+ irq_set_chained_handler_and_data(t7l66xb->irq, NULL, NULL);
for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) {
-#ifdef CONFIG_ARM
- set_irq_flags(irq, 0);
-#endif
irq_set_chip(irq, NULL);
irq_set_chip_data(irq, NULL);
}
@@ -318,7 +310,7 @@ static int t7l66xb_probe(struct platform_device *dev)
struct resource *iomem, *rscr;
int ret;
- if (pdata == NULL)
+ if (!pdata)
return -EINVAL;
iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
@@ -371,7 +363,7 @@ static int t7l66xb_probe(struct platform_device *dev)
clk_prepare_enable(t7l66xb->clk48m);
- if (pdata && pdata->enable)
+ if (pdata->enable)
pdata->enable(dev);
/* Mask all interrupts */
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 96d420dfc15d..274bf39968aa 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -215,20 +215,13 @@ static int tc3589x_irq_map(struct irq_domain *d, unsigned int virq,
irq_set_chip_and_handler(virq, &dummy_irq_chip,
handle_edge_irq);
irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(virq, IRQF_VALID);
-#else
irq_set_noprobe(virq);
-#endif
return 0;
}
static void tc3589x_irq_unmap(struct irq_domain *d, unsigned int virq)
{
-#ifdef CONFIG_ARM
- set_irq_flags(virq, 0);
-#endif
irq_set_chip_and_handler(virq, NULL, NULL);
irq_set_chip_data(virq, NULL);
}
@@ -492,7 +485,6 @@ MODULE_DEVICE_TABLE(i2c, tc3589x_id);
static struct i2c_driver tc3589x_driver = {
.driver = {
.name = "tc3589x",
- .owner = THIS_MODULE,
.pm = &tc3589x_dev_pm_ops,
.of_match_table = of_match_ptr(tc3589x_match),
},
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 63458b39a97d..775b9aca871a 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -525,7 +525,7 @@ static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base)
static void
tc6393xb_irq(unsigned int irq, struct irq_desc *desc)
{
- struct tc6393xb *tc6393xb = irq_get_handler_data(irq);
+ struct tc6393xb *tc6393xb = irq_desc_get_handler_data(desc);
unsigned int isr;
unsigned int i, irq_base;
@@ -586,12 +586,12 @@ static void tc6393xb_attach_irq(struct platform_device *dev)
for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
irq_set_chip_and_handler(irq, &tc6393xb_chip, handle_edge_irq);
irq_set_chip_data(irq, tc6393xb);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
}
irq_set_irq_type(tc6393xb->irq, IRQ_TYPE_EDGE_FALLING);
- irq_set_handler_data(tc6393xb->irq, tc6393xb);
- irq_set_chained_handler(tc6393xb->irq, tc6393xb_irq);
+ irq_set_chained_handler_and_data(tc6393xb->irq, tc6393xb_irq,
+ tc6393xb);
}
static void tc6393xb_detach_irq(struct platform_device *dev)
@@ -599,13 +599,12 @@ static void tc6393xb_detach_irq(struct platform_device *dev)
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
unsigned int irq, irq_base;
- irq_set_chained_handler(tc6393xb->irq, NULL);
- irq_set_handler_data(tc6393xb->irq, NULL);
+ irq_set_chained_handler_and_data(tc6393xb->irq, NULL, NULL);
irq_base = tc6393xb->irq_base;
for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
- set_irq_flags(irq, 0);
+ irq_set_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
irq_set_chip(irq, NULL);
irq_set_chip_data(irq, NULL);
}
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index a2e1990c9de7..1ab3dd6c8adf 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -129,7 +129,6 @@ MODULE_DEVICE_TABLE(of, tps6507x_of_match);
static struct i2c_driver tps6507x_i2c_driver = {
.driver = {
.name = "tps6507x",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(tps6507x_of_match),
},
.probe = tps6507x_i2c_probe,
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index 14b62e11aff4..f88085ad9772 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -259,7 +259,6 @@ MODULE_DEVICE_TABLE(i2c, tps65090_id_table);
static struct i2c_driver tps65090_driver = {
.driver = {
.name = "tps65090",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(tps65090_of_match),
},
.probe = tps65090_i2c_probe,
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 7d1cfc1d3ce0..55add0453ae9 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -156,6 +156,7 @@ static const struct of_device_id tps65217_of_match[] = {
{ .compatible = "ti,tps65217", .data = (void *)TPS65217 },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, tps65217_of_match);
static int tps65217_probe(struct i2c_client *client,
const struct i2c_device_id *ids)
@@ -248,7 +249,6 @@ MODULE_DEVICE_TABLE(i2c, tps65217_id_table);
static struct i2c_driver tps65217_driver = {
.driver = {
.name = "tps65217",
- .owner = THIS_MODULE,
.of_match_table = tps65217_of_match,
},
.id_table = tps65217_id_table,
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 7af11a8b9753..80b9dc363cd8 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -211,6 +211,7 @@ static const struct of_device_id of_tps65218_match_table[] = {
{ .compatible = "ti,tps65218", },
{}
};
+MODULE_DEVICE_TABLE(of, of_tps65218_match_table);
static int tps65218_probe(struct i2c_client *client,
const struct i2c_device_id *ids)
@@ -280,7 +281,6 @@ MODULE_DEVICE_TABLE(i2c, tps65218_id_table);
static struct i2c_driver tps65218_driver = {
.driver = {
.name = "tps65218",
- .owner = THIS_MODULE,
.of_match_table = of_tps65218_match_table,
},
.probe = tps65218_probe,
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index e0a2583916ce..5628a6b5b19b 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -52,7 +52,7 @@
#define TPS6586X_VERSIONCRC 0xcd
/* Maximum register */
-#define TPS6586X_MAX_REGISTER (TPS6586X_VERSIONCRC + 1)
+#define TPS6586X_MAX_REGISTER TPS6586X_VERSIONCRC
struct tps6586x_irq_data {
u8 mask_reg;
@@ -299,14 +299,7 @@ static int tps6586x_irq_map(struct irq_domain *h, unsigned int virq,
irq_set_chip_data(virq, tps6586x);
irq_set_chip_and_handler(virq, &tps6586x_irq_chip, handle_simple_irq);
irq_set_nested_thread(virq, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(virq, IRQF_VALID);
-#else
irq_set_noprobe(virq);
-#endif
return 0;
}
@@ -467,7 +460,7 @@ static bool is_volatile_reg(struct device *dev, unsigned int reg)
static const struct regmap_config tps6586x_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = TPS6586X_MAX_REGISTER - 1,
+ .max_register = TPS6586X_MAX_REGISTER,
.volatile_reg = is_volatile_reg,
.cache_type = REGCACHE_RBTREE,
};
@@ -610,7 +603,6 @@ MODULE_DEVICE_TABLE(i2c, tps6586x_id_table);
static struct i2c_driver tps6586x_driver = {
.driver = {
.name = "tps6586x",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(tps6586x_of_match),
},
.probe = tps6586x_i2c_probe,
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 7612d89850dd..f7ab115483a9 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -544,7 +544,6 @@ MODULE_DEVICE_TABLE(i2c, tps65910_i2c_id);
static struct i2c_driver tps65910_i2c_driver = {
.driver = {
.name = "tps65910",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(tps65910_of_match),
},
.probe = tps65910_i2c_probe,
diff --git a/drivers/mfd/tps65912-i2c.c b/drivers/mfd/tps65912-i2c.c
index 6a6343ee95fe..7e55640b3ed5 100644
--- a/drivers/mfd/tps65912-i2c.c
+++ b/drivers/mfd/tps65912-i2c.c
@@ -109,7 +109,6 @@ MODULE_DEVICE_TABLE(i2c, tps65912_i2c_id);
static struct i2c_driver tps65912_i2c_driver = {
.driver = {
.name = "tps65912",
- .owner = THIS_MODULE,
},
.probe = tps65912_i2c_probe,
.remove = tps65912_i2c_remove,
diff --git a/drivers/mfd/tps65912-irq.c b/drivers/mfd/tps65912-irq.c
index fbecec7f1e3d..db2c29cb709b 100644
--- a/drivers/mfd/tps65912-irq.c
+++ b/drivers/mfd/tps65912-irq.c
@@ -197,13 +197,7 @@ int tps65912_irq_init(struct tps65912 *tps65912, int irq,
irq_set_chip_and_handler(cur_irq, &tps65912_irq_chip,
handle_edge_irq);
irq_set_nested_thread(cur_irq, 1);
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(cur_irq, IRQF_VALID);
-#else
- irq_set_noprobe(cur_irq);
-#endif
+ irq_clear_status_flags(cur_irq, IRQ_NOREQUEST | IRQ_NOPROBE);
}
ret = request_threaded_irq(irq, NULL, tps65912_irq, flags,
diff --git a/drivers/mfd/tps80031.c b/drivers/mfd/tps80031.c
index ed6c5b0956e2..0812df3b0d47 100644
--- a/drivers/mfd/tps80031.c
+++ b/drivers/mfd/tps80031.c
@@ -549,7 +549,6 @@ MODULE_DEVICE_TABLE(i2c, tps80031_id_table);
static struct i2c_driver tps80031_driver = {
.driver = {
.name = "tps80031",
- .owner = THIS_MODULE,
},
.probe = tps80031_probe,
.remove = tps80031_remove,
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 489674a2497e..831696ee2472 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -788,9 +788,8 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
static struct regulator_consumer_supply usb1v8 = {
.supply = "usb1v8",
};
- static struct regulator_consumer_supply usb3v1[] = {
- { .supply = "usb3v1" },
- { .supply = "bci3v1" },
+ static struct regulator_consumer_supply usb3v1 = {
+ .supply = "usb3v1",
};
/* First add the regulators so that they can be used by transceiver */
@@ -818,7 +817,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
return PTR_ERR(child);
child = add_regulator_linked(TWL4030_REG_VUSB3V1,
- &usb_fixed, usb3v1, 2,
+ &usb_fixed, &usb3v1, 1,
features);
if (IS_ERR(child))
return PTR_ERR(child);
@@ -838,7 +837,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && child) {
usb1v5.dev_name = dev_name(child);
usb1v8.dev_name = dev_name(child);
- usb3v1[0].dev_name = dev_name(child);
+ usb3v1.dev_name = dev_name(child);
}
}
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index a3fa7f4f1fb4..40e51b0baa46 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -419,16 +419,7 @@ static int twl4030_init_sih_modules(unsigned line)
static inline void activate_irq(int irq)
{
-#ifdef CONFIG_ARM
- /*
- * ARM requires an extra step to clear IRQ_NOREQUEST, which it
- * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
- */
- set_irq_flags(irq, IRQF_VALID);
-#else
- /* same effect on other architectures */
- irq_set_noprobe(irq);
-#endif
+ irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
}
/*----------------------------------------------------------------------*/
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 20fb58179ada..53574508a613 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -231,7 +231,7 @@ static irqreturn_t twl6030_irq_thread(int irq, void *data)
static int twl6030_irq_set_wake(struct irq_data *d, unsigned int on)
{
- struct twl6030_irq *pdata = irq_get_chip_data(d->irq);
+ struct twl6030_irq *pdata = irq_data_get_irq_chip_data(d);
if (on)
atomic_inc(&pdata->wakeirqs);
@@ -352,26 +352,13 @@ static int twl6030_irq_map(struct irq_domain *d, unsigned int virq,
irq_set_chip_and_handler(virq, &pdata->irq_chip, handle_simple_irq);
irq_set_nested_thread(virq, true);
irq_set_parent(virq, pdata->twl_irq);
-
-#ifdef CONFIG_ARM
- /*
- * ARM requires an extra step to clear IRQ_NOREQUEST, which it
- * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
- */
- set_irq_flags(virq, IRQF_VALID);
-#else
- /* same effect on other architectures */
irq_set_noprobe(virq);
-#endif
return 0;
}
static void twl6030_irq_unmap(struct irq_domain *d, unsigned int virq)
{
-#ifdef CONFIG_ARM
- set_irq_flags(virq, 0);
-#endif
irq_set_chip_and_handler(virq, NULL, NULL);
irq_set_chip_data(virq, NULL);
}
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index c5265c1262c5..a151ee2eed2a 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -86,7 +86,7 @@ static const struct reg_default twl6040_defaults[] = {
{ 0x2E, 0x00 }, /* REG_STATUS (ro) */
};
-static struct reg_default twl6040_patch[] = {
+static struct reg_sequence twl6040_patch[] = {
/*
* Select I2C bus access to dual access registers
* Interrupt register is cleared on read
@@ -801,7 +801,6 @@ MODULE_DEVICE_TABLE(i2c, twl6040_i2c_id);
static struct i2c_driver twl6040_driver = {
.driver = {
.name = "twl6040",
- .owner = THIS_MODULE,
},
.probe = twl6040_probe,
.remove = twl6040_remove,
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index 3591550598ad..9a2302129711 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -282,7 +282,7 @@ void ucb1x00_adc_disable(struct ucb1x00 *ucb)
* SIBCLK to talk to the chip. We leave the clock running until
* we have finished processing all interrupts from the chip.
*/
-static void ucb1x00_irq(unsigned int irq, struct irq_desc *desc)
+static void ucb1x00_irq(unsigned int __irq, struct irq_desc *desc)
{
struct ucb1x00 *ucb = irq_desc_get_handler_data(desc);
unsigned int isr, i;
@@ -292,7 +292,7 @@ static void ucb1x00_irq(unsigned int irq, struct irq_desc *desc)
ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr);
ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
- for (i = 0; i < 16 && isr; i++, isr >>= 1, irq++)
+ for (i = 0; i < 16 && isr; i++, isr >>= 1)
if (isr & 1)
generic_handle_irq(ucb->irq_base + i);
ucb1x00_disable(ucb);
@@ -562,7 +562,7 @@ static int ucb1x00_probe(struct mcp *mcp)
irq_set_chip_and_handler(irq, &ucb1x00_irqchip, handle_edge_irq);
irq_set_chip_data(irq, ucb);
- set_irq_flags(irq, IRQF_VALID | IRQ_NOREQUEST);
+ irq_clear_status_flags(irq, IRQ_NOREQUEST);
}
irq_set_irq_type(ucb->irq, IRQ_TYPE_EDGE_RISING);
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index aeae6ec123b3..0386eaf6be32 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -21,7 +21,7 @@
#define WM5102_NUM_AOD_ISR 2
#define WM5102_NUM_ISR 5
-static const struct reg_default wm5102_reva_patch[] = {
+static const struct reg_sequence wm5102_reva_patch[] = {
{ 0x80, 0x0003 },
{ 0x221, 0x0090 },
{ 0x211, 0x0014 },
@@ -57,7 +57,7 @@ static const struct reg_default wm5102_reva_patch[] = {
{ 0x80, 0x0000 },
};
-static const struct reg_default wm5102_revb_patch[] = {
+static const struct reg_sequence wm5102_revb_patch[] = {
{ 0x19, 0x0001 },
{ 0x80, 0x0003 },
{ 0x081, 0xE022 },
@@ -80,7 +80,7 @@ static const struct reg_default wm5102_revb_patch[] = {
/* We use a function so we can use ARRAY_SIZE() */
int wm5102_patch(struct arizona *arizona)
{
- const struct reg_default *wm5102_patch;
+ const struct reg_sequence *wm5102_patch;
int patch_size;
switch (arizona->rev) {
@@ -266,8 +266,6 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000069, 0x01FF }, /* R105 - Always On Triggers Sequence Select 4 */
{ 0x0000006A, 0x01FF }, /* R106 - Always On Triggers Sequence Select 5 */
{ 0x0000006B, 0x01FF }, /* R107 - Always On Triggers Sequence Select 6 */
- { 0x0000006E, 0x01FF }, /* R110 - Trigger Sequence Select 32 */
- { 0x0000006F, 0x01FF }, /* R111 - Trigger Sequence Select 33 */
{ 0x00000070, 0x0000 }, /* R112 - Comfort Noise Generator */
{ 0x00000090, 0x0000 }, /* R144 - Haptics Control 1 */
{ 0x00000091, 0x7FFF }, /* R145 - Haptics Control 2 */
@@ -300,7 +298,6 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000175, 0x0004 }, /* R373 - FLL1 Control 5 */
{ 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
{ 0x00000177, 0x0181 }, /* R375 - FLL1 Loop Filter Test 1 */
- { 0x00000178, 0x0000 }, /* R376 - FLL1 NCO Test 0 */
{ 0x00000179, 0x0000 }, /* R377 - FLL1 Control 7 */
{ 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
{ 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
@@ -318,7 +315,6 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000195, 0x0004 }, /* R405 - FLL2 Control 5 */
{ 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
{ 0x00000197, 0x0000 }, /* R407 - FLL2 Loop Filter Test 1 */
- { 0x00000198, 0x0000 }, /* R408 - FLL2 NCO Test 0 */
{ 0x00000199, 0x0000 }, /* R409 - FLL2 Control 7 */
{ 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
{ 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
@@ -338,12 +334,9 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */
{ 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */
{ 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */
- { 0x0000029C, 0x0000 }, /* R668 - Headphone Detect 2 */
- { 0x0000029F, 0x0000 }, /* R671 - Headphone Detect Test */
{ 0x000002A2, 0x0000 }, /* R674 - Micd clamp control */
{ 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
{ 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
- { 0x000002A5, 0x0000 }, /* R677 - Mic Detect 3 */
{ 0x000002A6, 0x3737 }, /* R678 - Mic Detect Level 1 */
{ 0x000002A7, 0x372C }, /* R679 - Mic Detect Level 2 */
{ 0x000002A8, 0x1422 }, /* R680 - Mic Detect Level 3 */
@@ -887,11 +880,11 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000D1B, 0xFFFF }, /* R3355 - IRQ2 Status 4 Mask */
{ 0x00000D1C, 0xFFFF }, /* R3356 - IRQ2 Status 5 Mask */
{ 0x00000D1F, 0x0000 }, /* R3359 - IRQ2 Control */
+ { 0x00000D41, 0x0000 }, /* R3393 - ADSP2 IRQ0 */
{ 0x00000D53, 0xFFFF }, /* R3411 - AOD IRQ Mask IRQ1 */
{ 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */
{ 0x00000D56, 0x0000 }, /* R3414 - Jack detect debounce */
{ 0x00000E00, 0x0000 }, /* R3584 - FX_Ctrl1 */
- { 0x00000E01, 0x0000 }, /* R3585 - FX_Ctrl2 */
{ 0x00000E10, 0x6318 }, /* R3600 - EQ1_1 */
{ 0x00000E11, 0x6300 }, /* R3601 - EQ1_2 */
{ 0x00000E12, 0x0FC8 }, /* R3602 - EQ1_3 */
@@ -991,6 +984,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
{ 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
{ 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
+ { 0x00000EE3, 0x0400 }, /* R3811 - ASRC_RATE2 */
{ 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
{ 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
{ 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
@@ -998,7 +992,6 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000EF4, 0x0000 }, /* R3828 - ISRC 2 CTRL 2 */
{ 0x00000EF5, 0x0000 }, /* R3829 - ISRC 2 CTRL 3 */
{ 0x00001100, 0x0010 }, /* R4352 - DSP1 Control 1 */
- { 0x00001101, 0x0000 }, /* R4353 - DSP1 Clocking 1 */
};
static bool wm5102_readable_register(struct device *dev, unsigned int reg)
@@ -1008,12 +1001,10 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_DEVICE_REVISION:
case ARIZONA_CTRL_IF_SPI_CFG_1:
case ARIZONA_CTRL_IF_I2C1_CFG_1:
- case ARIZONA_CTRL_IF_STATUS_1:
case ARIZONA_WRITE_SEQUENCER_CTRL_0:
case ARIZONA_WRITE_SEQUENCER_CTRL_1:
case ARIZONA_WRITE_SEQUENCER_CTRL_2:
case ARIZONA_WRITE_SEQUENCER_CTRL_3:
- case ARIZONA_WRITE_SEQUENCER_PROM:
case ARIZONA_TONE_GENERATOR_1:
case ARIZONA_TONE_GENERATOR_2:
case ARIZONA_TONE_GENERATOR_3:
@@ -1034,8 +1025,6 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4:
case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_5:
case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_6:
- case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_7:
- case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_8:
case ARIZONA_COMFORT_NOISE_GENERATOR:
case ARIZONA_HAPTICS_CONTROL_1:
case ARIZONA_HAPTICS_CONTROL_2:
@@ -1176,7 +1165,6 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_DAC_DIGITAL_VOLUME_4L:
case ARIZONA_OUT_VOLUME_4L:
case ARIZONA_NOISE_GATE_SELECT_4L:
- case ARIZONA_OUTPUT_PATH_CONFIG_4R:
case ARIZONA_DAC_DIGITAL_VOLUME_4R:
case ARIZONA_OUT_VOLUME_4R:
case ARIZONA_NOISE_GATE_SELECT_4R:
@@ -1184,7 +1172,6 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_DAC_DIGITAL_VOLUME_5L:
case ARIZONA_DAC_VOLUME_LIMIT_5L:
case ARIZONA_NOISE_GATE_SELECT_5L:
- case ARIZONA_OUTPUT_PATH_CONFIG_5R:
case ARIZONA_DAC_DIGITAL_VOLUME_5R:
case ARIZONA_DAC_VOLUME_LIMIT_5R:
case ARIZONA_NOISE_GATE_SELECT_5R:
@@ -1195,8 +1182,6 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_NOISE_GATE_CONTROL:
case ARIZONA_PDM_SPK1_CTRL_1:
case ARIZONA_PDM_SPK1_CTRL_2:
- case ARIZONA_SPK_CTRL_2:
- case ARIZONA_SPK_CTRL_3:
case ARIZONA_DAC_COMP_1:
case ARIZONA_DAC_COMP_2:
case ARIZONA_DAC_COMP_3:
@@ -1228,7 +1213,6 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_AIF1_FRAME_CTRL_18:
case ARIZONA_AIF1_TX_ENABLES:
case ARIZONA_AIF1_RX_ENABLES:
- case ARIZONA_AIF1_FORCE_WRITE:
case ARIZONA_AIF2_BCLK_CTRL:
case ARIZONA_AIF2_TX_PIN_CTRL:
case ARIZONA_AIF2_RX_PIN_CTRL:
@@ -1244,7 +1228,6 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_AIF2_FRAME_CTRL_12:
case ARIZONA_AIF2_TX_ENABLES:
case ARIZONA_AIF2_RX_ENABLES:
- case ARIZONA_AIF2_FORCE_WRITE:
case ARIZONA_AIF3_BCLK_CTRL:
case ARIZONA_AIF3_TX_PIN_CTRL:
case ARIZONA_AIF3_RX_PIN_CTRL:
@@ -1260,7 +1243,6 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_AIF3_FRAME_CTRL_12:
case ARIZONA_AIF3_TX_ENABLES:
case ARIZONA_AIF3_RX_ENABLES:
- case ARIZONA_AIF3_FORCE_WRITE:
case ARIZONA_SLIMBUS_FRAMER_REF_GEAR:
case ARIZONA_SLIMBUS_RATES_1:
case ARIZONA_SLIMBUS_RATES_2:
@@ -1586,22 +1568,6 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_DRC1RMIX_INPUT_3_VOLUME:
case ARIZONA_DRC1RMIX_INPUT_4_SOURCE:
case ARIZONA_DRC1RMIX_INPUT_4_VOLUME:
- case ARIZONA_DRC2LMIX_INPUT_1_SOURCE:
- case ARIZONA_DRC2LMIX_INPUT_1_VOLUME:
- case ARIZONA_DRC2LMIX_INPUT_2_SOURCE:
- case ARIZONA_DRC2LMIX_INPUT_2_VOLUME:
- case ARIZONA_DRC2LMIX_INPUT_3_SOURCE:
- case ARIZONA_DRC2LMIX_INPUT_3_VOLUME:
- case ARIZONA_DRC2LMIX_INPUT_4_SOURCE:
- case ARIZONA_DRC2LMIX_INPUT_4_VOLUME:
- case ARIZONA_DRC2RMIX_INPUT_1_SOURCE:
- case ARIZONA_DRC2RMIX_INPUT_1_VOLUME:
- case ARIZONA_DRC2RMIX_INPUT_2_SOURCE:
- case ARIZONA_DRC2RMIX_INPUT_2_VOLUME:
- case ARIZONA_DRC2RMIX_INPUT_3_SOURCE:
- case ARIZONA_DRC2RMIX_INPUT_3_VOLUME:
- case ARIZONA_DRC2RMIX_INPUT_4_SOURCE:
- case ARIZONA_DRC2RMIX_INPUT_4_VOLUME:
case ARIZONA_HPLP1MIX_INPUT_1_SOURCE:
case ARIZONA_HPLP1MIX_INPUT_1_VOLUME:
case ARIZONA_HPLP1MIX_INPUT_2_SOURCE:
@@ -1810,11 +1776,6 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_DRC1_CTRL3:
case ARIZONA_DRC1_CTRL4:
case ARIZONA_DRC1_CTRL5:
- case ARIZONA_DRC2_CTRL1:
- case ARIZONA_DRC2_CTRL2:
- case ARIZONA_DRC2_CTRL3:
- case ARIZONA_DRC2_CTRL4:
- case ARIZONA_DRC2_CTRL5:
case ARIZONA_HPLPF1_1:
case ARIZONA_HPLPF1_2:
case ARIZONA_HPLPF2_1:
@@ -1832,9 +1793,6 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_ISRC_2_CTRL_1:
case ARIZONA_ISRC_2_CTRL_2:
case ARIZONA_ISRC_2_CTRL_3:
- case ARIZONA_ISRC_3_CTRL_1:
- case ARIZONA_ISRC_3_CTRL_2:
- case ARIZONA_ISRC_3_CTRL_3:
case ARIZONA_DSP1_CONTROL_1:
case ARIZONA_DSP1_CLOCKING_1:
case ARIZONA_DSP1_STATUS_1:
@@ -1883,7 +1841,6 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
case ARIZONA_WRITE_SEQUENCER_CTRL_2:
case ARIZONA_WRITE_SEQUENCER_CTRL_3:
case ARIZONA_OUTPUT_STATUS_1:
- case ARIZONA_RAW_OUTPUT_STATUS_1:
case ARIZONA_SLIMBUS_RX_PORT_STATUS:
case ARIZONA_SLIMBUS_TX_PORT_STATUS:
case ARIZONA_SAMPLE_RATE_1_STATUS:
@@ -1969,6 +1926,8 @@ const struct regmap_config wm5102_spi_regmap = {
.reg_bits = 32,
.pad_bits = 16,
.val_bits = 16,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
.max_register = WM5102_MAX_REGISTER,
.readable_reg = wm5102_readable_register,
@@ -1983,6 +1942,8 @@ EXPORT_SYMBOL_GPL(wm5102_spi_regmap);
const struct regmap_config wm5102_i2c_regmap = {
.reg_bits = 32,
.val_bits = 16,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
.max_register = WM5102_MAX_REGISTER,
.readable_reg = wm5102_readable_register,
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 12cad94b4035..c4b9374efd76 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -21,7 +21,7 @@
#define WM5110_NUM_AOD_ISR 2
#define WM5110_NUM_ISR 5
-static const struct reg_default wm5110_reva_patch[] = {
+static const struct reg_sequence wm5110_reva_patch[] = {
{ 0x80, 0x3 },
{ 0x44, 0x20 },
{ 0x45, 0x40 },
@@ -134,7 +134,7 @@ static const struct reg_default wm5110_reva_patch[] = {
{ 0x209, 0x002A },
};
-static const struct reg_default wm5110_revb_patch[] = {
+static const struct reg_sequence wm5110_revb_patch[] = {
{ 0x80, 0x3 },
{ 0x36e, 0x0210 },
{ 0x370, 0x0210 },
@@ -224,7 +224,7 @@ static const struct reg_default wm5110_revb_patch[] = {
{ 0x80, 0x0 },
};
-static const struct reg_default wm5110_revd_patch[] = {
+static const struct reg_sequence wm5110_revd_patch[] = {
{ 0x80, 0x3 },
{ 0x80, 0x3 },
{ 0x393, 0x27 },
@@ -249,6 +249,16 @@ static const struct reg_default wm5110_revd_patch[] = {
{ 0x80, 0x0 },
};
+/* Add extra headphone write sequence locations */
+static const struct reg_default wm5110_reve_patch[] = {
+ { 0x80, 0x3 },
+ { 0x80, 0x3 },
+ { 0x4b, 0x138 },
+ { 0x4c, 0x13d },
+ { 0x80, 0x0 },
+ { 0x80, 0x0 },
+};
+
/* We use a function so we can use ARRAY_SIZE() */
int wm5110_patch(struct arizona *arizona)
{
@@ -266,7 +276,9 @@ int wm5110_patch(struct arizona *arizona)
wm5110_revd_patch,
ARRAY_SIZE(wm5110_revd_patch));
default:
- return 0;
+ return regmap_register_patch(arizona->regmap,
+ wm5110_reve_patch,
+ ARRAY_SIZE(wm5110_reve_patch));
}
}
EXPORT_SYMBOL_GPL(wm5110_patch);
@@ -676,6 +688,7 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000032, 0x0100 }, /* R50 - PWM Drive 3 */
{ 0x00000040, 0x0000 }, /* R64 - Wake control */
{ 0x00000041, 0x0000 }, /* R65 - Sequence control */
+ { 0x00000042, 0x0000 }, /* R66 - Spare Triggers */
{ 0x00000061, 0x01FF }, /* R97 - Sample Rate Sequence Select 1 */
{ 0x00000062, 0x01FF }, /* R98 - Sample Rate Sequence Select 2 */
{ 0x00000063, 0x01FF }, /* R99 - Sample Rate Sequence Select 3 */
@@ -754,11 +767,9 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */
{ 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */
{ 0x0000029B, 0x0028 }, /* R667 - Headphone Detect 1 */
- { 0x0000029C, 0x0000 }, /* R668 - Headphone Detect 2 */
{ 0x000002A2, 0x0000 }, /* R674 - Micd clamp control */
{ 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
{ 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
- { 0x000002A5, 0x0000 }, /* R677 - Mic Detect 3 */
{ 0x000002A6, 0x3737 }, /* R678 - Mic Detect Level 1 */
{ 0x000002A7, 0x372C }, /* R679 - Mic Detect Level 2 */
{ 0x000002A8, 0x1422 }, /* R680 - Mic Detect Level 3 */
@@ -848,8 +859,6 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000440, 0x8FFF }, /* R1088 - DRE Enable */
{ 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */
{ 0x00000458, 0x0000 }, /* R1112 - Noise Gate Control */
- { 0x00000480, 0x0040 }, /* R1152 - Class W ANC Threshold 1 */
- { 0x00000481, 0x0040 }, /* R1153 - Class W ANC Threshold 2 */
{ 0x00000490, 0x0069 }, /* R1168 - PDM SPK1 CTRL 1 */
{ 0x00000491, 0x0000 }, /* R1169 - PDM SPK1 CTRL 2 */
{ 0x00000492, 0x0069 }, /* R1170 - PDM SPK2 CTRL 1 */
@@ -1508,7 +1517,6 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */
{ 0x00000D56, 0x0000 }, /* R3414 - Jack detect debounce */
{ 0x00000E00, 0x0000 }, /* R3584 - FX_Ctrl1 */
- { 0x00000E01, 0x0000 }, /* R3585 - FX_Ctrl2 */
{ 0x00000E10, 0x6318 }, /* R3600 - EQ1_1 */
{ 0x00000E11, 0x6300 }, /* R3601 - EQ1_2 */
{ 0x00000E12, 0x0FC8 }, /* R3602 - EQ1_3 */
@@ -1625,14 +1633,9 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000F00, 0x0000 }, /* R3840 - Clock Control */
{ 0x00000F01, 0x0000 }, /* R3841 - ANC_SRC */
{ 0x00001100, 0x0010 }, /* R4352 - DSP1 Control 1 */
- { 0x00001101, 0x0000 }, /* R4353 - DSP1 Clocking 1 */
{ 0x00001200, 0x0010 }, /* R4608 - DSP2 Control 1 */
- { 0x00001201, 0x0000 }, /* R4609 - DSP2 Clocking 1 */
{ 0x00001300, 0x0010 }, /* R4864 - DSP3 Control 1 */
- { 0x00001301, 0x0000 }, /* R4865 - DSP3 Clocking 1 */
{ 0x00001400, 0x0010 }, /* R5120 - DSP4 Control 1 */
- { 0x00001401, 0x0000 }, /* R5121 - DSP4 Clocking 1 */
- { 0x00001404, 0x0000 }, /* R5124 - DSP4 Status 1 */
};
static bool wm5110_is_rev_b_adsp_memory(unsigned int reg)
@@ -1716,6 +1719,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_PWM_DRIVE_3:
case ARIZONA_WAKE_CONTROL:
case ARIZONA_SEQUENCE_CONTROL:
+ case ARIZONA_SPARE_TRIGGERS:
case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1:
case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2:
case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3:
@@ -3007,6 +3011,8 @@ const struct regmap_config wm5110_spi_regmap = {
.reg_bits = 32,
.pad_bits = 16,
.val_bits = 16,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
.max_register = WM5110_MAX_REGISTER,
.readable_reg = wm5110_readable_register,
@@ -3021,6 +3027,8 @@ EXPORT_SYMBOL_GPL(wm5110_spi_regmap);
const struct regmap_config wm5110_i2c_regmap = {
.reg_bits = 32,
.val_bits = 16,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
.max_register = WM5110_MAX_REGISTER,
.readable_reg = wm5110_readable_register,
diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c
index a4cbefe5430f..824bcbaa9624 100644
--- a/drivers/mfd/wm831x-i2c.c
+++ b/drivers/mfd/wm831x-i2c.c
@@ -93,7 +93,6 @@ static const struct dev_pm_ops wm831x_pm_ops = {
static struct i2c_driver wm831x_i2c_driver = {
.driver = {
.name = "wm831x",
- .owner = THIS_MODULE,
.pm = &wm831x_pm_ops,
},
.probe = wm831x_i2c_probe,
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 3da81263c764..dfea8b9c2fe6 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -552,14 +552,7 @@ static int wm831x_irq_map(struct irq_domain *h, unsigned int virq,
irq_set_chip_data(virq, h->host_data);
irq_set_chip_and_handler(virq, &wm831x_irq_chip, handle_edge_irq);
irq_set_nested_thread(virq, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(virq, IRQF_VALID);
-#else
irq_set_noprobe(virq);
-#endif
return 0;
}
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index 6a16a8a6f9fa..9358f03b7938 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -69,7 +69,6 @@ MODULE_DEVICE_TABLE(i2c, wm8350_i2c_id);
static struct i2c_driver wm8350_i2c_driver = {
.driver = {
.name = "wm8350",
- .owner = THIS_MODULE,
},
.probe = wm8350_i2c_probe,
.remove = wm8350_i2c_remove,
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
index 813ff50f95b6..27054f357b8e 100644
--- a/drivers/mfd/wm8350-irq.c
+++ b/drivers/mfd/wm8350-irq.c
@@ -526,13 +526,7 @@ int wm8350_irq_init(struct wm8350 *wm8350, int irq,
handle_edge_irq);
irq_set_nested_thread(cur_irq, 1);
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(cur_irq, IRQF_VALID);
-#else
- irq_set_noprobe(cur_irq);
-#endif
+ irq_clear_status_flags(cur_irq, IRQ_NOREQUEST | IRQ_NOPROBE);
}
ret = request_threaded_irq(irq, NULL, wm8350_irq, flags,
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index c6fb5d16ca09..3bd44a45c378 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -194,7 +194,6 @@ MODULE_DEVICE_TABLE(i2c, wm8400_i2c_id);
static struct i2c_driver wm8400_i2c_driver = {
.driver = {
.name = "WM8400",
- .owner = THIS_MODULE,
},
.probe = wm8400_i2c_probe,
.remove = wm8400_i2c_remove,
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 53ae5af5d6e4..7eec619a6023 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -243,21 +243,21 @@ static int wm8994_ldo_in_use(struct wm8994_pdata *pdata, int ldo)
}
#endif
-static const struct reg_default wm8994_revc_patch[] = {
+static const struct reg_sequence wm8994_revc_patch[] = {
{ 0x102, 0x3 },
{ 0x56, 0x3 },
{ 0x817, 0x0 },
{ 0x102, 0x0 },
};
-static const struct reg_default wm8958_reva_patch[] = {
+static const struct reg_sequence wm8958_reva_patch[] = {
{ 0x102, 0x3 },
{ 0xcb, 0x81 },
{ 0x817, 0x0 },
{ 0x102, 0x0 },
};
-static const struct reg_default wm1811_reva_patch[] = {
+static const struct reg_sequence wm1811_reva_patch[] = {
{ 0x102, 0x3 },
{ 0x56, 0xc07 },
{ 0x5d, 0x7e },
@@ -326,7 +326,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
{
struct wm8994_pdata *pdata;
struct regmap_config *regmap_config;
- const struct reg_default *regmap_patch = NULL;
+ const struct reg_sequence *regmap_patch = NULL;
const char *devname;
int ret, i, patch_regs = 0;
int pulls = 0;
@@ -677,7 +677,6 @@ static const struct dev_pm_ops wm8994_pm_ops = {
static struct i2c_driver wm8994_i2c_driver = {
.driver = {
.name = "wm8994",
- .owner = THIS_MODULE,
.pm = &wm8994_pm_ops,
.of_match_table = of_match_ptr(wm8994_of_match),
},
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index 55c380a67686..18710f3b5c53 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -172,14 +172,7 @@ static int wm8994_edge_irq_map(struct irq_domain *h, unsigned int virq,
irq_set_chip_data(virq, wm8994);
irq_set_chip_and_handler(virq, &wm8994_edge_irq_chip, handle_edge_irq);
irq_set_nested_thread(virq, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(virq, IRQF_VALID);
-#else
irq_set_noprobe(virq);
-#endif
return 0;
}
@@ -193,7 +186,7 @@ int wm8994_irq_init(struct wm8994 *wm8994)
{
int ret;
unsigned long irqflags;
- struct wm8994_pdata *pdata = dev_get_platdata(wm8994->dev);
+ struct wm8994_pdata *pdata = &wm8994->pdata;
if (!wm8994->irq) {
dev_warn(wm8994->dev,
diff --git a/drivers/mfd/wm8994-regmap.c b/drivers/mfd/wm8994-regmap.c
index 300e9b6a2e96..c56b1600ef3e 100644
--- a/drivers/mfd/wm8994-regmap.c
+++ b/drivers/mfd/wm8994-regmap.c
@@ -19,7 +19,7 @@
#include "wm8994.h"
-static struct reg_default wm1811_defaults[] = {
+static const struct reg_default wm1811_defaults[] = {
{ 0x0001, 0x0000 }, /* R1 - Power Management (1) */
{ 0x0002, 0x6000 }, /* R2 - Power Management (2) */
{ 0x0003, 0x0000 }, /* R3 - Power Management (3) */
@@ -251,7 +251,7 @@ static struct reg_default wm1811_defaults[] = {
{ 0x0748, 0x003F }, /* R1864 - IRQ Debounce */
};
-static struct reg_default wm8994_defaults[] = {
+static const struct reg_default wm8994_defaults[] = {
{ 0x0001, 0x0000 }, /* R1 - Power Management (1) */
{ 0x0002, 0x6000 }, /* R2 - Power Management (2) */
{ 0x0003, 0x0000 }, /* R3 - Power Management (3) */
@@ -470,7 +470,7 @@ static struct reg_default wm8994_defaults[] = {
{ 0x0748, 0x003F }, /* R1864 - IRQ Debounce */
};
-static struct reg_default wm8958_defaults[] = {
+static const struct reg_default wm8958_defaults[] = {
{ 0x0001, 0x0000 }, /* R1 - Power Management (1) */
{ 0x0002, 0x6000 }, /* R2 - Power Management (2) */
{ 0x0003, 0x0000 }, /* R3 - Power Management (3) */
diff --git a/drivers/mfd/wm8997-tables.c b/drivers/mfd/wm8997-tables.c
index c0c25d75aacc..ca41a561bfd3 100644
--- a/drivers/mfd/wm8997-tables.c
+++ b/drivers/mfd/wm8997-tables.c
@@ -17,7 +17,7 @@
#include "arizona.h"
-static const struct reg_default wm8997_reva_patch[] = {
+static const struct reg_sequence wm8997_reva_patch[] = {
{ 0x80, 0x0003 },
{ 0x214, 0x0008 },
{ 0x458, 0x0000 },
@@ -243,7 +243,6 @@ static const struct reg_default wm8997_reg_default[] = {
{ 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */
{ 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
{ 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
- { 0x000002A5, 0x0000 }, /* R677 - Mic Detect 3 */
{ 0x000002C3, 0x0000 }, /* R707 - Mic noise mix control 1 */
{ 0x000002CB, 0x0000 }, /* R715 - Isolation control */
{ 0x000002D3, 0x0000 }, /* R723 - Jack detect analogue */
@@ -684,7 +683,6 @@ static const struct reg_default wm8997_reg_default[] = {
{ 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */
{ 0x00000D56, 0x0000 }, /* R3414 - Jack detect debounce */
{ 0x00000E00, 0x0000 }, /* R3584 - FX_Ctrl1 */
- { 0x00000E01, 0x0000 }, /* R3585 - FX_Ctrl2 */
{ 0x00000E10, 0x6318 }, /* R3600 - EQ1_1 */
{ 0x00000E11, 0x6300 }, /* R3601 - EQ1_2 */
{ 0x00000E12, 0x0FC8 }, /* R3602 - EQ1_3 */
@@ -788,8 +786,6 @@ static const struct reg_default wm8997_reg_default[] = {
{ 0x00000EF3, 0x0000 }, /* R3827 - ISRC 2 CTRL 1 */
{ 0x00000EF4, 0x0000 }, /* R3828 - ISRC 2 CTRL 2 */
{ 0x00000EF5, 0x0000 }, /* R3829 - ISRC 2 CTRL 3 */
- { 0x00001100, 0x0010 }, /* R4352 - DSP1 Control 1 */
- { 0x00001101, 0x0000 }, /* R4353 - DSP1 Clocking 1 */
};
static bool wm8997_readable_register(struct device *dev, unsigned int reg)
@@ -1480,6 +1476,8 @@ static bool wm8997_volatile_register(struct device *dev, unsigned int reg)
case ARIZONA_SAMPLE_RATE_2_STATUS:
case ARIZONA_SAMPLE_RATE_3_STATUS:
case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
+ case ARIZONA_FLL1_NCO_TEST_0:
+ case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_MIC_DETECT_3:
case ARIZONA_HP_CTRL_1L:
case ARIZONA_HP_CTRL_1R:
@@ -1521,6 +1519,8 @@ static bool wm8997_volatile_register(struct device *dev, unsigned int reg)
const struct regmap_config wm8997_i2c_regmap = {
.reg_bits = 32,
.val_bits = 16,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
.max_register = WM8997_MAX_REGISTER,
.readable_reg = wm8997_readable_register,
diff --git a/drivers/mfd/wm8998-tables.c b/drivers/mfd/wm8998-tables.c
new file mode 100644
index 000000000000..e6de3cd8a9aa
--- /dev/null
+++ b/drivers/mfd/wm8998-tables.c
@@ -0,0 +1,1594 @@
+/*
+ * wm8998-tables.c -- data tables for wm8998-class codecs
+ *
+ * Copyright 2014 Wolfson Microelectronics plc
+ *
+ * Author: Richard Fitzgerald <rf@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+
+#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/registers.h>
+#include <linux/device.h>
+
+#include "arizona.h"
+
+#define WM8998_NUM_AOD_ISR 2
+#define WM8998_NUM_ISR 5
+
+static const struct reg_default wm8998_rev_a_patch[] = {
+ { 0x0212, 0x0000 },
+ { 0x0211, 0x0014 },
+ { 0x04E4, 0x0E0D },
+ { 0x04E5, 0x0E0D },
+ { 0x04E6, 0x0E0D },
+ { 0x04EB, 0x060E },
+ { 0x0441, 0xC759 },
+ { 0x0442, 0x2A08 },
+ { 0x0443, 0x5CFA },
+ { 0x026E, 0x0064 },
+ { 0x026F, 0x00EA },
+ { 0x0270, 0x1F16 },
+ { 0x0410, 0x2080 },
+ { 0x0418, 0x2080 },
+ { 0x0420, 0x2080 },
+ { 0x04B8, 0x1120 },
+ { 0x047E, 0x080E },
+ { 0x0448, 0x03EF },
+};
+
+/* We use a function so we can use ARRAY_SIZE() */
+int wm8998_patch(struct arizona *arizona)
+{
+ return regmap_register_patch(arizona->regmap,
+ wm8998_rev_a_patch,
+ ARRAY_SIZE(wm8998_rev_a_patch));
+}
+
+static const struct regmap_irq wm8998_aod_irqs[ARIZONA_NUM_IRQ] = {
+ [ARIZONA_IRQ_MICD_CLAMP_FALL] = {
+ .mask = ARIZONA_MICD_CLAMP_FALL_EINT1
+ },
+ [ARIZONA_IRQ_MICD_CLAMP_RISE] = {
+ .mask = ARIZONA_MICD_CLAMP_RISE_EINT1
+ },
+ [ARIZONA_IRQ_GP5_FALL] = { .mask = ARIZONA_GP5_FALL_EINT1 },
+ [ARIZONA_IRQ_GP5_RISE] = { .mask = ARIZONA_GP5_RISE_EINT1 },
+ [ARIZONA_IRQ_JD_FALL] = { .mask = ARIZONA_JD1_FALL_EINT1 },
+ [ARIZONA_IRQ_JD_RISE] = { .mask = ARIZONA_JD1_RISE_EINT1 },
+};
+
+struct regmap_irq_chip wm8998_aod = {
+ .name = "wm8998 AOD",
+ .status_base = ARIZONA_AOD_IRQ1,
+ .mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1,
+ .ack_base = ARIZONA_AOD_IRQ1,
+ .wake_base = ARIZONA_WAKE_CONTROL,
+ .wake_invert = 1,
+ .num_regs = 1,
+ .irqs = wm8998_aod_irqs,
+ .num_irqs = ARRAY_SIZE(wm8998_aod_irqs),
+};
+
+static const struct regmap_irq wm8998_irqs[ARIZONA_NUM_IRQ] = {
+ [ARIZONA_IRQ_GP4] = { .reg_offset = 0, .mask = ARIZONA_GP4_EINT1 },
+ [ARIZONA_IRQ_GP3] = { .reg_offset = 0, .mask = ARIZONA_GP3_EINT1 },
+ [ARIZONA_IRQ_GP2] = { .reg_offset = 0, .mask = ARIZONA_GP2_EINT1 },
+ [ARIZONA_IRQ_GP1] = { .reg_offset = 0, .mask = ARIZONA_GP1_EINT1 },
+
+ [ARIZONA_IRQ_SPK_OVERHEAT_WARN] = {
+ .reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_WARN_EINT1
+ },
+ [ARIZONA_IRQ_SPK_OVERHEAT] = {
+ .reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_EINT1
+ },
+ [ARIZONA_IRQ_HPDET] = {
+ .reg_offset = 2, .mask = ARIZONA_HPDET_EINT1
+ },
+ [ARIZONA_IRQ_MICDET] = {
+ .reg_offset = 2, .mask = ARIZONA_MICDET_EINT1
+ },
+ [ARIZONA_IRQ_WSEQ_DONE] = {
+ .reg_offset = 2, .mask = ARIZONA_WSEQ_DONE_EINT1
+ },
+ [ARIZONA_IRQ_DRC1_SIG_DET] = {
+ .reg_offset = 2, .mask = ARIZONA_DRC1_SIG_DET_EINT1
+ },
+ [ARIZONA_IRQ_ASRC2_LOCK] = {
+ .reg_offset = 2, .mask = ARIZONA_ASRC2_LOCK_EINT1
+ },
+ [ARIZONA_IRQ_ASRC1_LOCK] = {
+ .reg_offset = 2, .mask = ARIZONA_ASRC1_LOCK_EINT1
+ },
+ [ARIZONA_IRQ_UNDERCLOCKED] = {
+ .reg_offset = 2, .mask = ARIZONA_UNDERCLOCKED_EINT1
+ },
+ [ARIZONA_IRQ_OVERCLOCKED] = {
+ .reg_offset = 2, .mask = ARIZONA_OVERCLOCKED_EINT1
+ },
+ [ARIZONA_IRQ_FLL2_LOCK] = {
+ .reg_offset = 2, .mask = ARIZONA_FLL2_LOCK_EINT1
+ },
+ [ARIZONA_IRQ_FLL1_LOCK] = {
+ .reg_offset = 2, .mask = ARIZONA_FLL1_LOCK_EINT1
+ },
+ [ARIZONA_IRQ_CLKGEN_ERR] = {
+ .reg_offset = 2, .mask = ARIZONA_CLKGEN_ERR_EINT1
+ },
+ [ARIZONA_IRQ_CLKGEN_ERR_ASYNC] = {
+ .reg_offset = 2, .mask = ARIZONA_CLKGEN_ERR_ASYNC_EINT1
+ },
+
+ [ARIZONA_IRQ_ASRC_CFG_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_ASRC_CFG_ERR_EINT1
+ },
+ [ARIZONA_IRQ_AIF3_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_AIF3_ERR_EINT1
+ },
+ [ARIZONA_IRQ_AIF2_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_AIF2_ERR_EINT1
+ },
+ [ARIZONA_IRQ_AIF1_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_AIF1_ERR_EINT1
+ },
+ [ARIZONA_IRQ_CTRLIF_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_CTRLIF_ERR_EINT1
+ },
+ [ARIZONA_IRQ_MIXER_DROPPED_SAMPLES] = {
+ .reg_offset = 3, .mask = ARIZONA_MIXER_DROPPED_SAMPLE_EINT1
+ },
+ [ARIZONA_IRQ_ASYNC_CLK_ENA_LOW] = {
+ .reg_offset = 3, .mask = ARIZONA_ASYNC_CLK_ENA_LOW_EINT1
+ },
+ [ARIZONA_IRQ_SYSCLK_ENA_LOW] = {
+ .reg_offset = 3, .mask = ARIZONA_SYSCLK_ENA_LOW_EINT1
+ },
+ [ARIZONA_IRQ_ISRC1_CFG_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_ISRC1_CFG_ERR_EINT1
+ },
+ [ARIZONA_IRQ_ISRC2_CFG_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_ISRC2_CFG_ERR_EINT1
+ },
+
+ [ARIZONA_IRQ_BOOT_DONE] = {
+ .reg_offset = 4, .mask = ARIZONA_BOOT_DONE_EINT1
+ },
+ [ARIZONA_IRQ_FLL2_CLOCK_OK] = {
+ .reg_offset = 4, .mask = ARIZONA_FLL2_CLOCK_OK_EINT1
+ },
+ [ARIZONA_IRQ_FLL1_CLOCK_OK] = {
+ .reg_offset = 4, .mask = ARIZONA_FLL1_CLOCK_OK_EINT1
+ },
+};
+
+struct regmap_irq_chip wm8998_irq = {
+ .name = "wm8998 IRQ",
+ .status_base = ARIZONA_INTERRUPT_STATUS_1,
+ .mask_base = ARIZONA_INTERRUPT_STATUS_1_MASK,
+ .ack_base = ARIZONA_INTERRUPT_STATUS_1,
+ .num_regs = 5,
+ .irqs = wm8998_irqs,
+ .num_irqs = ARRAY_SIZE(wm8998_irqs),
+};
+
+static const struct reg_default wm8998_reg_default[] = {
+ { 0x00000009, 0x0001 }, /* R9 - Ctrl IF I2C1 CFG 1 */
+ { 0x0000000B, 0x001A }, /* R11 - Ctrl IF I2C1 CFG 2 */
+ { 0x00000020, 0x0000 }, /* R32 - Tone Generator 1 */
+ { 0x00000021, 0x1000 }, /* R33 - Tone Generator 2 */
+ { 0x00000022, 0x0000 }, /* R34 - Tone Generator 3 */
+ { 0x00000023, 0x1000 }, /* R35 - Tone Generator 4 */
+ { 0x00000024, 0x0000 }, /* R36 - Tone Generator 5 */
+ { 0x00000030, 0x0000 }, /* R48 - PWM Drive 1 */
+ { 0x00000031, 0x0100 }, /* R49 - PWM Drive 2 */
+ { 0x00000032, 0x0100 }, /* R50 - PWM Drive 3 */
+ { 0x00000040, 0x0000 }, /* R64 - Wake control */
+ { 0x00000041, 0x0000 }, /* R65 - Sequence control */
+ { 0x00000061, 0x01FF }, /* R97 - Sample Rate Sequence Select 1 */
+ { 0x00000062, 0x01FF }, /* R98 - Sample Rate Sequence Select 2 */
+ { 0x00000063, 0x01FF }, /* R99 - Sample Rate Sequence Select 3 */
+ { 0x00000064, 0x01FF }, /* R100 - Sample Rate Sequence Select 4 */
+ { 0x00000066, 0x01FF }, /* R102 - Always On Triggers Sequence Select 1 */
+ { 0x00000067, 0x01FF }, /* R103 - Always On Triggers Sequence Select 2 */
+ { 0x00000068, 0x01FF }, /* R104 - Always On Triggers Sequence Select 3 */
+ { 0x00000069, 0x01FF }, /* R105 - Always On Triggers Sequence Select 4 */
+ { 0x0000006A, 0x01FF }, /* R106 - Always On Triggers Sequence Select 5 */
+ { 0x0000006B, 0x01FF }, /* R107 - Always On Triggers Sequence Select 6 */
+ { 0x0000006E, 0x01FF }, /* R110 - Trigger Sequence Select 32 */
+ { 0x0000006F, 0x01FF }, /* R111 - Trigger Sequence Select 33 */
+ { 0x00000090, 0x0000 }, /* R144 - Haptics Control 1 */
+ { 0x00000091, 0x7FFF }, /* R145 - Haptics Control 2 */
+ { 0x00000092, 0x0000 }, /* R146 - Haptics phase 1 intensity */
+ { 0x00000093, 0x0000 }, /* R147 - Haptics phase 1 duration */
+ { 0x00000094, 0x0000 }, /* R148 - Haptics phase 2 intensity */
+ { 0x00000095, 0x0000 }, /* R149 - Haptics phase 2 duration */
+ { 0x00000096, 0x0000 }, /* R150 - Haptics phase 3 intensity */
+ { 0x00000097, 0x0000 }, /* R151 - Haptics phase 3 duration */
+ { 0x00000100, 0x0002 }, /* R256 - Clock 32k 1 */
+ { 0x00000101, 0x0304 }, /* R257 - System Clock 1 */
+ { 0x00000102, 0x0011 }, /* R258 - Sample rate 1 */
+ { 0x00000103, 0x0011 }, /* R259 - Sample rate 2 */
+ { 0x00000104, 0x0011 }, /* R260 - Sample rate 3 */
+ { 0x00000112, 0x0305 }, /* R274 - Async clock 1 */
+ { 0x00000113, 0x0011 }, /* R275 - Async sample rate 1 */
+ { 0x00000114, 0x0011 }, /* R276 - Async sample rate 2 */
+ { 0x00000149, 0x0000 }, /* R329 - Output system clock */
+ { 0x0000014A, 0x0000 }, /* R330 - Output async clock */
+ { 0x00000152, 0x0000 }, /* R338 - Rate Estimator 1 */
+ { 0x00000153, 0x0000 }, /* R339 - Rate Estimator 2 */
+ { 0x00000154, 0x0000 }, /* R340 - Rate Estimator 3 */
+ { 0x00000155, 0x0000 }, /* R341 - Rate Estimator 4 */
+ { 0x00000156, 0x0000 }, /* R342 - Rate Estimator 5 */
+ { 0x00000161, 0x0000 }, /* R353 - Dynamic Frequency Scaling 1 */
+ { 0x00000171, 0x0002 }, /* R369 - FLL1 Control 1 */
+ { 0x00000172, 0x0008 }, /* R370 - FLL1 Control 2 */
+ { 0x00000173, 0x0018 }, /* R371 - FLL1 Control 3 */
+ { 0x00000174, 0x007D }, /* R372 - FLL1 Control 4 */
+ { 0x00000175, 0x0004 }, /* R373 - FLL1 Control 5 */
+ { 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
+ { 0x00000177, 0x0181 }, /* R375 - FLL1 Loop Filter Test 1 */
+ { 0x00000178, 0x0000 }, /* R376 - FLL1 NCO Test 0 */
+ { 0x00000179, 0x0000 }, /* R377 - FLL1 Control 7 */
+ { 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
+ { 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
+ { 0x00000183, 0x0000 }, /* R387 - FLL1 Synchroniser 3 */
+ { 0x00000184, 0x0000 }, /* R388 - FLL1 Synchroniser 4 */
+ { 0x00000185, 0x0000 }, /* R389 - FLL1 Synchroniser 5 */
+ { 0x00000186, 0x0000 }, /* R390 - FLL1 Synchroniser 6 */
+ { 0x00000187, 0x0001 }, /* R391 - FLL1 Synchroniser 7 */
+ { 0x00000189, 0x0000 }, /* R393 - FLL1 Spread Spectrum */
+ { 0x0000018A, 0x0004 }, /* R394 - FLL1 GPIO Clock */
+ { 0x00000191, 0x0000 }, /* R401 - FLL2 Control 1 */
+ { 0x00000192, 0x0008 }, /* R402 - FLL2 Control 2 */
+ { 0x00000193, 0x0018 }, /* R403 - FLL2 Control 3 */
+ { 0x00000194, 0x007D }, /* R404 - FLL2 Control 4 */
+ { 0x00000195, 0x0004 }, /* R405 - FLL2 Control 5 */
+ { 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
+ { 0x00000197, 0x0000 }, /* R407 - FLL2 Loop Filter Test 1 */
+ { 0x00000198, 0x0000 }, /* R408 - FLL2 NCO Test 0 */
+ { 0x00000199, 0x0000 }, /* R409 - FLL2 Control 7 */
+ { 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
+ { 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
+ { 0x000001A3, 0x0000 }, /* R419 - FLL2 Synchroniser 3 */
+ { 0x000001A4, 0x0000 }, /* R420 - FLL2 Synchroniser 4 */
+ { 0x000001A5, 0x0000 }, /* R421 - FLL2 Synchroniser 5 */
+ { 0x000001A6, 0x0000 }, /* R422 - FLL2 Synchroniser 6 */
+ { 0x000001A7, 0x0001 }, /* R423 - FLL2 Synchroniser 7 */
+ { 0x000001A9, 0x0000 }, /* R425 - FLL2 Spread Spectrum */
+ { 0x000001AA, 0x0004 }, /* R426 - FLL2 GPIO Clock */
+ { 0x00000200, 0x0006 }, /* R512 - Mic Charge Pump 1 */
+ { 0x00000210, 0x00D4 }, /* R528 - LDO1 Control 1 */
+ { 0x00000212, 0x0000 }, /* R530 - LDO1 Control 2 */
+ { 0x00000213, 0x0344 }, /* R531 - LDO2 Control 1 */
+ { 0x00000218, 0x01A6 }, /* R536 - Mic Bias Ctrl 1 */
+ { 0x00000219, 0x01A6 }, /* R537 - Mic Bias Ctrl 2 */
+ { 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */
+ { 0x00000293, 0x0080 }, /* R659 - Accessory Detect Mode 1 */
+ { 0x0000029B, 0x0000 }, /* R667 - Headphone Detect 1 */
+ { 0x0000029C, 0x0000 }, /* R668 - Headphone Detect 2 */
+ { 0x000002A2, 0x0000 }, /* R674 - Micd Clamp control */
+ { 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
+ { 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
+ { 0x000002A5, 0x0000 }, /* R677 - Mic Detect 3 */
+ { 0x000002A6, 0x3737 }, /* R678 - Mic Detect Level 1 */
+ { 0x000002A7, 0x2C37 }, /* R679 - Mic Detect Level 2 */
+ { 0x000002A8, 0x1422 }, /* R680 - Mic Detect Level 3 */
+ { 0x000002A9, 0x030A }, /* R681 - Mic Detect Level 4 */
+ { 0x000002AB, 0x0000 }, /* R683 - Mic Detect 4 */
+ { 0x000002CB, 0x0000 }, /* R715 - Isolation control */
+ { 0x000002D3, 0x0000 }, /* R723 - Jack detect analogue */
+ { 0x00000300, 0x0000 }, /* R768 - Input Enables */
+ { 0x00000308, 0x0000 }, /* R776 - Input Rate */
+ { 0x00000309, 0x0022 }, /* R777 - Input Volume Ramp */
+ { 0x0000030C, 0x0002 }, /* R780 - HPF Control */
+ { 0x00000310, 0x2080 }, /* R784 - IN1L Control */
+ { 0x00000311, 0x0180 }, /* R785 - ADC Digital Volume 1L */
+ { 0x00000312, 0x0000 }, /* R786 - DMIC1L Control */
+ { 0x00000314, 0x0080 }, /* R788 - IN1R Control */
+ { 0x00000315, 0x0180 }, /* R789 - ADC Digital Volume 1R */
+ { 0x00000316, 0x0000 }, /* R790 - DMIC1R Control */
+ { 0x00000318, 0x2080 }, /* R792 - IN2L Control */
+ { 0x00000319, 0x0180 }, /* R793 - ADC Digital Volume 2L */
+ { 0x0000031A, 0x0000 }, /* R794 - DMIC2L Control */
+ { 0x00000400, 0x0000 }, /* R1024 - Output Enables 1 */
+ { 0x00000408, 0x0000 }, /* R1032 - Output Rate 1 */
+ { 0x00000409, 0x0022 }, /* R1033 - Output Volume Ramp */
+ { 0x00000410, 0x2080 }, /* R1040 - Output Path Config 1L */
+ { 0x00000411, 0x0180 }, /* R1041 - DAC Digital Volume 1L */
+ { 0x00000413, 0x0001 }, /* R1043 - Noise Gate Select 1L */
+ { 0x00000414, 0x0080 }, /* R1044 - Output Path Config 1R */
+ { 0x00000415, 0x0180 }, /* R1045 - DAC Digital Volume 1R */
+ { 0x00000417, 0x0002 }, /* R1047 - Noise Gate Select 1R */
+ { 0x00000418, 0x2080 }, /* R1048 - Output Path Config 2L */
+ { 0x00000419, 0x0180 }, /* R1049 - DAC Digital Volume 2L */
+ { 0x0000041B, 0x0004 }, /* R1051 - Noise Gate Select 2L */
+ { 0x0000041C, 0x0080 }, /* R1052 - Output Path Config 2R */
+ { 0x0000041D, 0x0180 }, /* R1053 - DAC Digital Volume 2R */
+ { 0x0000041F, 0x0008 }, /* R1055 - Noise Gate Select 2R */
+ { 0x00000420, 0x2080 }, /* R1056 - Output Path Config 3L */
+ { 0x00000421, 0x0180 }, /* R1057 - DAC Digital Volume 3L */
+ { 0x00000423, 0x0010 }, /* R1059 - Noise Gate Select 3L */
+ { 0x00000428, 0x0000 }, /* R1064 - Output Path Config 4L */
+ { 0x00000429, 0x0180 }, /* R1065 - DAC Digital Volume 4L */
+ { 0x0000042B, 0x0040 }, /* R1067 - Noise Gate Select 4L */
+ { 0x0000042C, 0x0000 }, /* R1068 - Output Path Config 4R */
+ { 0x0000042D, 0x0180 }, /* R1069 - DAC Digital Volume 4R */
+ { 0x0000042F, 0x0080 }, /* R1071 - Noise Gate Select 4R */
+ { 0x00000430, 0x0000 }, /* R1072 - Output Path Config 5L */
+ { 0x00000431, 0x0180 }, /* R1073 - DAC Digital Volume 5L */
+ { 0x00000433, 0x0100 }, /* R1075 - Noise Gate Select 5L */
+ { 0x00000434, 0x0000 }, /* R1076 - Output Path Config 5R */
+ { 0x00000435, 0x0180 }, /* R1077 - DAC Digital Volume 5R */
+ { 0x00000437, 0x0200 }, /* R1079 - Noise Gate Select 5R */
+ { 0x00000440, 0x8FFF }, /* R1088 - DRE Enable */
+ { 0x00000441, 0xC759 }, /* R1089 - DRE Control 1 */
+ { 0x00000442, 0x2A08 }, /* R1089 - DRE Control 2 */
+ { 0x00000443, 0x5CFA }, /* R1089 - DRE Control 3 */
+ { 0x00000448, 0x03EF }, /* R1096 - EDRE Enable */
+ { 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */
+ { 0x00000451, 0x0000 }, /* R1105 - DAC AEC Control 2 */
+ { 0x00000458, 0x0000 }, /* R1112 - Noise Gate Control */
+ { 0x00000490, 0x0069 }, /* R1168 - PDM SPK1 CTRL 1 */
+ { 0x00000491, 0x0000 }, /* R1169 - PDM SPK1 CTRL 2 */
+ { 0x0000049A, 0x0000 }, /* R1178 - HP_TEST_CTRL_13 */
+ { 0x00000500, 0x000C }, /* R1280 - AIF1 BCLK Ctrl */
+ { 0x00000501, 0x0008 }, /* R1281 - AIF1 Tx Pin Ctrl */
+ { 0x00000502, 0x0000 }, /* R1282 - AIF1 Rx Pin Ctrl */
+ { 0x00000503, 0x0000 }, /* R1283 - AIF1 Rate Ctrl */
+ { 0x00000504, 0x0000 }, /* R1284 - AIF1 Format */
+ { 0x00000506, 0x0040 }, /* R1286 - AIF1 Rx BCLK Rate */
+ { 0x00000507, 0x1818 }, /* R1287 - AIF1 Frame Ctrl 1 */
+ { 0x00000508, 0x1818 }, /* R1288 - AIF1 Frame Ctrl 2 */
+ { 0x00000509, 0x0000 }, /* R1289 - AIF1 Frame Ctrl 3 */
+ { 0x0000050A, 0x0001 }, /* R1290 - AIF1 Frame Ctrl 4 */
+ { 0x0000050B, 0x0002 }, /* R1291 - AIF1 Frame Ctrl 5 */
+ { 0x0000050C, 0x0003 }, /* R1292 - AIF1 Frame Ctrl 6 */
+ { 0x0000050D, 0x0004 }, /* R1293 - AIF1 Frame Ctrl 7 */
+ { 0x0000050E, 0x0005 }, /* R1294 - AIF1 Frame Ctrl 8 */
+ { 0x00000511, 0x0000 }, /* R1297 - AIF1 Frame Ctrl 11 */
+ { 0x00000512, 0x0001 }, /* R1298 - AIF1 Frame Ctrl 12 */
+ { 0x00000513, 0x0002 }, /* R1299 - AIF1 Frame Ctrl 13 */
+ { 0x00000514, 0x0003 }, /* R1300 - AIF1 Frame Ctrl 14 */
+ { 0x00000515, 0x0004 }, /* R1301 - AIF1 Frame Ctrl 15 */
+ { 0x00000516, 0x0005 }, /* R1302 - AIF1 Frame Ctrl 16 */
+ { 0x00000519, 0x0000 }, /* R1305 - AIF1 Tx Enables */
+ { 0x0000051A, 0x0000 }, /* R1306 - AIF1 Rx Enables */
+ { 0x00000540, 0x000C }, /* R1344 - AIF2 BCLK Ctrl */
+ { 0x00000541, 0x0008 }, /* R1345 - AIF2 Tx Pin Ctrl */
+ { 0x00000542, 0x0000 }, /* R1346 - AIF2 Rx Pin Ctrl */
+ { 0x00000543, 0x0000 }, /* R1347 - AIF2 Rate Ctrl */
+ { 0x00000544, 0x0000 }, /* R1348 - AIF2 Format */
+ { 0x00000546, 0x0040 }, /* R1350 - AIF2 Rx BCLK Rate */
+ { 0x00000547, 0x1818 }, /* R1351 - AIF2 Frame Ctrl 1 */
+ { 0x00000548, 0x1818 }, /* R1352 - AIF2 Frame Ctrl 2 */
+ { 0x00000549, 0x0000 }, /* R1353 - AIF2 Frame Ctrl 3 */
+ { 0x0000054A, 0x0001 }, /* R1354 - AIF2 Frame Ctrl 4 */
+ { 0x0000054B, 0x0002 }, /* R1355 - AIF2 Frame Ctrl 5 */
+ { 0x0000054C, 0x0003 }, /* R1356 - AIF2 Frame Ctrl 6 */
+ { 0x0000054D, 0x0004 }, /* R1357 - AIF2 Frame Ctrl 7 */
+ { 0x0000054E, 0x0005 }, /* R1358 - AIF2 Frame Ctrl 8 */
+ { 0x00000551, 0x0000 }, /* R1361 - AIF2 Frame Ctrl 11 */
+ { 0x00000552, 0x0001 }, /* R1362 - AIF2 Frame Ctrl 12 */
+ { 0x00000553, 0x0002 }, /* R1363 - AIF2 Frame Ctrl 13 */
+ { 0x00000554, 0x0003 }, /* R1364 - AIF2 Frame Ctrl 14 */
+ { 0x00000555, 0x0004 }, /* R1365 - AIF2 Frame Ctrl 15 */
+ { 0x00000556, 0x0005 }, /* R1366 - AIF2 Frame Ctrl 16 */
+ { 0x00000559, 0x0000 }, /* R1369 - AIF2 Tx Enables */
+ { 0x0000055A, 0x0000 }, /* R1370 - AIF2 Rx Enables */
+ { 0x00000580, 0x000C }, /* R1408 - AIF3 BCLK Ctrl */
+ { 0x00000581, 0x0008 }, /* R1409 - AIF3 Tx Pin Ctrl */
+ { 0x00000582, 0x0000 }, /* R1410 - AIF3 Rx Pin Ctrl */
+ { 0x00000583, 0x0000 }, /* R1411 - AIF3 Rate Ctrl */
+ { 0x00000584, 0x0000 }, /* R1412 - AIF3 Format */
+ { 0x00000586, 0x0040 }, /* R1414 - AIF3 Rx BCLK Rate */
+ { 0x00000587, 0x1818 }, /* R1415 - AIF3 Frame Ctrl 1 */
+ { 0x00000588, 0x1818 }, /* R1416 - AIF3 Frame Ctrl 2 */
+ { 0x00000589, 0x0000 }, /* R1417 - AIF3 Frame Ctrl 3 */
+ { 0x0000058A, 0x0001 }, /* R1418 - AIF3 Frame Ctrl 4 */
+ { 0x00000591, 0x0000 }, /* R1425 - AIF3 Frame Ctrl 11 */
+ { 0x00000592, 0x0001 }, /* R1426 - AIF3 Frame Ctrl 12 */
+ { 0x00000599, 0x0000 }, /* R1433 - AIF3 Tx Enables */
+ { 0x0000059A, 0x0000 }, /* R1434 - AIF3 Rx Enables */
+ { 0x000005C2, 0x0000 }, /* R1474 - SPD1 TX Control */
+ { 0x000005C3, 0x0000 }, /* R1475 - SPD1 TX Channel Status 1 */
+ { 0x000005C4, 0x0B01 }, /* R1476 - SPD1 TX Channel Status 2 */
+ { 0x000005C5, 0x0000 }, /* R1477 - SPD1 TX Channel Status 3 */
+ { 0x000005E3, 0x0004 }, /* R1507 - SLIMbus Framer Ref Gear */
+ { 0x000005E5, 0x0000 }, /* R1509 - SLIMbus Rates 1 */
+ { 0x000005E6, 0x0000 }, /* R1510 - SLIMbus Rates 2 */
+ { 0x000005E9, 0x0000 }, /* R1513 - SLIMbus Rates 5 */
+ { 0x000005EA, 0x0000 }, /* R1514 - SLIMbus Rates 6 */
+ { 0x000005EB, 0x0000 }, /* R1515 - SLIMbus Rates 7 */
+ { 0x000005F5, 0x0000 }, /* R1525 - SLIMbus RX Channel Enable */
+ { 0x000005F6, 0x0000 }, /* R1526 - SLIMbus TX Channel Enable */
+ { 0x00000640, 0x0000 }, /* R1600 - PWM1MIX Input 1 Source */
+ { 0x00000641, 0x0080 }, /* R1601 - PWM1MIX Input 1 Volume */
+ { 0x00000642, 0x0000 }, /* R1602 - PWM1MIX Input 2 Source */
+ { 0x00000643, 0x0080 }, /* R1603 - PWM1MIX Input 2 Volume */
+ { 0x00000644, 0x0000 }, /* R1604 - PWM1MIX Input 3 Source */
+ { 0x00000645, 0x0080 }, /* R1605 - PWM1MIX Input 3 Volume */
+ { 0x00000646, 0x0000 }, /* R1606 - PWM1MIX Input 4 Source */
+ { 0x00000647, 0x0080 }, /* R1607 - PWM1MIX Input 4 Volume */
+ { 0x00000648, 0x0000 }, /* R1608 - PWM2MIX Input 1 Source */
+ { 0x00000649, 0x0080 }, /* R1609 - PWM2MIX Input 1 Volume */
+ { 0x0000064A, 0x0000 }, /* R1610 - PWM2MIX Input 2 Source */
+ { 0x0000064B, 0x0080 }, /* R1611 - PWM2MIX Input 2 Volume */
+ { 0x0000064C, 0x0000 }, /* R1612 - PWM2MIX Input 3 Source */
+ { 0x0000064D, 0x0080 }, /* R1613 - PWM2MIX Input 3 Volume */
+ { 0x0000064E, 0x0000 }, /* R1614 - PWM2MIX Input 4 Source */
+ { 0x0000064F, 0x0080 }, /* R1615 - PWM2MIX Input 4 Volume */
+ { 0x00000680, 0x0000 }, /* R1664 - OUT1LMIX Input 1 Source */
+ { 0x00000681, 0x0080 }, /* R1665 - OUT1LMIX Input 1 Volume */
+ { 0x00000682, 0x0000 }, /* R1666 - OUT1LMIX Input 2 Source */
+ { 0x00000683, 0x0080 }, /* R1667 - OUT1LMIX Input 2 Volume */
+ { 0x00000684, 0x0000 }, /* R1668 - OUT1LMIX Input 3 Source */
+ { 0x00000685, 0x0080 }, /* R1669 - OUT1LMIX Input 3 Volume */
+ { 0x00000686, 0x0000 }, /* R1670 - OUT1LMIX Input 4 Source */
+ { 0x00000687, 0x0080 }, /* R1671 - OUT1LMIX Input 4 Volume */
+ { 0x00000688, 0x0000 }, /* R1672 - OUT1RMIX Input 1 Source */
+ { 0x00000689, 0x0080 }, /* R1673 - OUT1RMIX Input 1 Volume */
+ { 0x0000068A, 0x0000 }, /* R1674 - OUT1RMIX Input 2 Source */
+ { 0x0000068B, 0x0080 }, /* R1675 - OUT1RMIX Input 2 Volume */
+ { 0x0000068C, 0x0000 }, /* R1676 - OUT1RMIX Input 3 Source */
+ { 0x0000068D, 0x0080 }, /* R1677 - OUT1RMIX Input 3 Volume */
+ { 0x0000068E, 0x0000 }, /* R1678 - OUT1RMIX Input 4 Source */
+ { 0x0000068F, 0x0080 }, /* R1679 - OUT1RMIX Input 4 Volume */
+ { 0x00000690, 0x0000 }, /* R1680 - OUT2LMIX Input 1 Source */
+ { 0x00000691, 0x0080 }, /* R1681 - OUT2LMIX Input 1 Volume */
+ { 0x00000692, 0x0000 }, /* R1682 - OUT2LMIX Input 2 Source */
+ { 0x00000693, 0x0080 }, /* R1683 - OUT2LMIX Input 2 Volume */
+ { 0x00000694, 0x0000 }, /* R1684 - OUT2LMIX Input 3 Source */
+ { 0x00000695, 0x0080 }, /* R1685 - OUT2LMIX Input 3 Volume */
+ { 0x00000696, 0x0000 }, /* R1686 - OUT2LMIX Input 4 Source */
+ { 0x00000697, 0x0080 }, /* R1687 - OUT2LMIX Input 4 Volume */
+ { 0x00000698, 0x0000 }, /* R1688 - OUT2RMIX Input 1 Source */
+ { 0x00000699, 0x0080 }, /* R1689 - OUT2RMIX Input 1 Volume */
+ { 0x0000069A, 0x0000 }, /* R1690 - OUT2RMIX Input 2 Source */
+ { 0x0000069B, 0x0080 }, /* R1691 - OUT2RMIX Input 2 Volume */
+ { 0x0000069C, 0x0000 }, /* R1692 - OUT2RMIX Input 3 Source */
+ { 0x0000069D, 0x0080 }, /* R1693 - OUT2RMIX Input 3 Volume */
+ { 0x0000069E, 0x0000 }, /* R1694 - OUT2RMIX Input 4 Source */
+ { 0x0000069F, 0x0080 }, /* R1695 - OUT2RMIX Input 4 Volume */
+ { 0x000006A0, 0x0000 }, /* R1696 - OUT3LMIX Input 1 Source */
+ { 0x000006A1, 0x0080 }, /* R1697 - OUT3LMIX Input 1 Volume */
+ { 0x000006A2, 0x0000 }, /* R1698 - OUT3LMIX Input 2 Source */
+ { 0x000006A3, 0x0080 }, /* R1699 - OUT3LMIX Input 2 Volume */
+ { 0x000006A4, 0x0000 }, /* R1700 - OUT3LMIX Input 3 Source */
+ { 0x000006A5, 0x0080 }, /* R1701 - OUT3LMIX Input 3 Volume */
+ { 0x000006A6, 0x0000 }, /* R1702 - OUT3LMIX Input 4 Source */
+ { 0x000006A7, 0x0080 }, /* R1703 - OUT3LMIX Input 4 Volume */
+ { 0x000006B0, 0x0000 }, /* R1712 - OUT4LMIX Input 1 Source */
+ { 0x000006B1, 0x0080 }, /* R1713 - OUT4LMIX Input 1 Volume */
+ { 0x000006B2, 0x0000 }, /* R1714 - OUT4LMIX Input 2 Source */
+ { 0x000006B3, 0x0080 }, /* R1715 - OUT4LMIX Input 2 Volume */
+ { 0x000006B4, 0x0000 }, /* R1716 - OUT4LMIX Input 3 Source */
+ { 0x000006B5, 0x0080 }, /* R1717 - OUT4LMIX Input 3 Volume */
+ { 0x000006B6, 0x0000 }, /* R1718 - OUT4LMIX Input 4 Source */
+ { 0x000006B7, 0x0080 }, /* R1719 - OUT4LMIX Input 4 Volume */
+ { 0x000006B8, 0x0000 }, /* R1720 - OUT4RMIX Input 1 Source */
+ { 0x000006B9, 0x0080 }, /* R1721 - OUT4RMIX Input 1 Volume */
+ { 0x000006BA, 0x0000 }, /* R1722 - OUT4RMIX Input 2 Source */
+ { 0x000006BB, 0x0080 }, /* R1723 - OUT4RMIX Input 2 Volume */
+ { 0x000006BC, 0x0000 }, /* R1724 - OUT4RMIX Input 3 Source */
+ { 0x000006BD, 0x0080 }, /* R1725 - OUT4RMIX Input 3 Volume */
+ { 0x000006BE, 0x0000 }, /* R1726 - OUT4RMIX Input 4 Source */
+ { 0x000006BF, 0x0080 }, /* R1727 - OUT4RMIX Input 4 Volume */
+ { 0x000006C0, 0x0000 }, /* R1728 - OUT5LMIX Input 1 Source */
+ { 0x000006C1, 0x0080 }, /* R1729 - OUT5LMIX Input 1 Volume */
+ { 0x000006C2, 0x0000 }, /* R1730 - OUT5LMIX Input 2 Source */
+ { 0x000006C3, 0x0080 }, /* R1731 - OUT5LMIX Input 2 Volume */
+ { 0x000006C4, 0x0000 }, /* R1732 - OUT5LMIX Input 3 Source */
+ { 0x000006C5, 0x0080 }, /* R1733 - OUT5LMIX Input 3 Volume */
+ { 0x000006C6, 0x0000 }, /* R1734 - OUT5LMIX Input 4 Source */
+ { 0x000006C7, 0x0080 }, /* R1735 - OUT5LMIX Input 4 Volume */
+ { 0x000006C8, 0x0000 }, /* R1736 - OUT5RMIX Input 1 Source */
+ { 0x000006C9, 0x0080 }, /* R1737 - OUT5RMIX Input 1 Volume */
+ { 0x000006CA, 0x0000 }, /* R1738 - OUT5RMIX Input 2 Source */
+ { 0x000006CB, 0x0080 }, /* R1739 - OUT5RMIX Input 2 Volume */
+ { 0x000006CC, 0x0000 }, /* R1740 - OUT5RMIX Input 3 Source */
+ { 0x000006CD, 0x0080 }, /* R1741 - OUT5RMIX Input 3 Volume */
+ { 0x000006CE, 0x0000 }, /* R1742 - OUT5RMIX Input 4 Source */
+ { 0x000006CF, 0x0080 }, /* R1743 - OUT5RMIX Input 4 Volume */
+ { 0x00000700, 0x0000 }, /* R1792 - AIF1TX1MIX Input 1 Source */
+ { 0x00000701, 0x0080 }, /* R1793 - AIF1TX1MIX Input 1 Volume */
+ { 0x00000702, 0x0000 }, /* R1794 - AIF1TX1MIX Input 2 Source */
+ { 0x00000703, 0x0080 }, /* R1795 - AIF1TX1MIX Input 2 Volume */
+ { 0x00000704, 0x0000 }, /* R1796 - AIF1TX1MIX Input 3 Source */
+ { 0x00000705, 0x0080 }, /* R1797 - AIF1TX1MIX Input 3 Volume */
+ { 0x00000706, 0x0000 }, /* R1798 - AIF1TX1MIX Input 4 Source */
+ { 0x00000707, 0x0080 }, /* R1799 - AIF1TX1MIX Input 4 Volume */
+ { 0x00000708, 0x0000 }, /* R1800 - AIF1TX2MIX Input 1 Source */
+ { 0x00000709, 0x0080 }, /* R1801 - AIF1TX2MIX Input 1 Volume */
+ { 0x0000070A, 0x0000 }, /* R1802 - AIF1TX2MIX Input 2 Source */
+ { 0x0000070B, 0x0080 }, /* R1803 - AIF1TX2MIX Input 2 Volume */
+ { 0x0000070C, 0x0000 }, /* R1804 - AIF1TX2MIX Input 3 Source */
+ { 0x0000070D, 0x0080 }, /* R1805 - AIF1TX2MIX Input 3 Volume */
+ { 0x0000070E, 0x0000 }, /* R1806 - AIF1TX2MIX Input 4 Source */
+ { 0x0000070F, 0x0080 }, /* R1807 - AIF1TX2MIX Input 4 Volume */
+ { 0x00000710, 0x0000 }, /* R1808 - AIF1TX3MIX Input 1 Source */
+ { 0x00000711, 0x0080 }, /* R1809 - AIF1TX3MIX Input 1 Volume */
+ { 0x00000712, 0x0000 }, /* R1810 - AIF1TX3MIX Input 2 Source */
+ { 0x00000713, 0x0080 }, /* R1811 - AIF1TX3MIX Input 2 Volume */
+ { 0x00000714, 0x0000 }, /* R1812 - AIF1TX3MIX Input 3 Source */
+ { 0x00000715, 0x0080 }, /* R1813 - AIF1TX3MIX Input 3 Volume */
+ { 0x00000716, 0x0000 }, /* R1814 - AIF1TX3MIX Input 4 Source */
+ { 0x00000717, 0x0080 }, /* R1815 - AIF1TX3MIX Input 4 Volume */
+ { 0x00000718, 0x0000 }, /* R1816 - AIF1TX4MIX Input 1 Source */
+ { 0x00000719, 0x0080 }, /* R1817 - AIF1TX4MIX Input 1 Volume */
+ { 0x0000071A, 0x0000 }, /* R1818 - AIF1TX4MIX Input 2 Source */
+ { 0x0000071B, 0x0080 }, /* R1819 - AIF1TX4MIX Input 2 Volume */
+ { 0x0000071C, 0x0000 }, /* R1820 - AIF1TX4MIX Input 3 Source */
+ { 0x0000071D, 0x0080 }, /* R1821 - AIF1TX4MIX Input 3 Volume */
+ { 0x0000071E, 0x0000 }, /* R1822 - AIF1TX4MIX Input 4 Source */
+ { 0x0000071F, 0x0080 }, /* R1823 - AIF1TX4MIX Input 4 Volume */
+ { 0x00000720, 0x0000 }, /* R1824 - AIF1TX5MIX Input 1 Source */
+ { 0x00000721, 0x0080 }, /* R1825 - AIF1TX5MIX Input 1 Volume */
+ { 0x00000722, 0x0000 }, /* R1826 - AIF1TX5MIX Input 2 Source */
+ { 0x00000723, 0x0080 }, /* R1827 - AIF1TX5MIX Input 2 Volume */
+ { 0x00000724, 0x0000 }, /* R1828 - AIF1TX5MIX Input 3 Source */
+ { 0x00000725, 0x0080 }, /* R1829 - AIF1TX5MIX Input 3 Volume */
+ { 0x00000726, 0x0000 }, /* R1830 - AIF1TX5MIX Input 4 Source */
+ { 0x00000727, 0x0080 }, /* R1831 - AIF1TX5MIX Input 4 Volume */
+ { 0x00000728, 0x0000 }, /* R1832 - AIF1TX6MIX Input 1 Source */
+ { 0x00000729, 0x0080 }, /* R1833 - AIF1TX6MIX Input 1 Volume */
+ { 0x0000072A, 0x0000 }, /* R1834 - AIF1TX6MIX Input 2 Source */
+ { 0x0000072B, 0x0080 }, /* R1835 - AIF1TX6MIX Input 2 Volume */
+ { 0x0000072C, 0x0000 }, /* R1836 - AIF1TX6MIX Input 3 Source */
+ { 0x0000072D, 0x0080 }, /* R1837 - AIF1TX6MIX Input 3 Volume */
+ { 0x0000072E, 0x0000 }, /* R1838 - AIF1TX6MIX Input 4 Source */
+ { 0x0000072F, 0x0080 }, /* R1839 - AIF1TX6MIX Input 4 Volume */
+ { 0x00000740, 0x0000 }, /* R1856 - AIF2TX1MIX Input 1 Source */
+ { 0x00000741, 0x0080 }, /* R1857 - AIF2TX1MIX Input 1 Volume */
+ { 0x00000742, 0x0000 }, /* R1858 - AIF2TX1MIX Input 2 Source */
+ { 0x00000743, 0x0080 }, /* R1859 - AIF2TX1MIX Input 2 Volume */
+ { 0x00000744, 0x0000 }, /* R1860 - AIF2TX1MIX Input 3 Source */
+ { 0x00000745, 0x0080 }, /* R1861 - AIF2TX1MIX Input 3 Volume */
+ { 0x00000746, 0x0000 }, /* R1862 - AIF2TX1MIX Input 4 Source */
+ { 0x00000747, 0x0080 }, /* R1863 - AIF2TX1MIX Input 4 Volume */
+ { 0x00000748, 0x0000 }, /* R1864 - AIF2TX2MIX Input 1 Source */
+ { 0x00000749, 0x0080 }, /* R1865 - AIF2TX2MIX Input 1 Volume */
+ { 0x0000074A, 0x0000 }, /* R1866 - AIF2TX2MIX Input 2 Source */
+ { 0x0000074B, 0x0080 }, /* R1867 - AIF2TX2MIX Input 2 Volume */
+ { 0x0000074C, 0x0000 }, /* R1868 - AIF2TX2MIX Input 3 Source */
+ { 0x0000074D, 0x0080 }, /* R1869 - AIF2TX2MIX Input 3 Volume */
+ { 0x0000074E, 0x0000 }, /* R1870 - AIF2TX2MIX Input 4 Source */
+ { 0x0000074F, 0x0080 }, /* R1871 - AIF2TX2MIX Input 4 Volume */
+ { 0x00000750, 0x0000 }, /* R1872 - AIF2TX3MIX Input 1 Source */
+ { 0x00000751, 0x0080 }, /* R1873 - AIF2TX3MIX Input 1 Volume */
+ { 0x00000752, 0x0000 }, /* R1874 - AIF2TX3MIX Input 2 Source */
+ { 0x00000753, 0x0080 }, /* R1875 - AIF2TX3MIX Input 2 Volume */
+ { 0x00000754, 0x0000 }, /* R1876 - AIF2TX3MIX Input 3 Source */
+ { 0x00000755, 0x0080 }, /* R1877 - AIF2TX3MIX Input 3 Volume */
+ { 0x00000756, 0x0000 }, /* R1878 - AIF2TX3MIX Input 4 Source */
+ { 0x00000757, 0x0080 }, /* R1879 - AIF2TX3MIX Input 4 Volume */
+ { 0x00000758, 0x0000 }, /* R1880 - AIF2TX4MIX Input 1 Source */
+ { 0x00000759, 0x0080 }, /* R1881 - AIF2TX4MIX Input 1 Volume */
+ { 0x0000075A, 0x0000 }, /* R1882 - AIF2TX4MIX Input 2 Source */
+ { 0x0000075B, 0x0080 }, /* R1883 - AIF2TX4MIX Input 2 Volume */
+ { 0x0000075C, 0x0000 }, /* R1884 - AIF2TX4MIX Input 3 Source */
+ { 0x0000075D, 0x0080 }, /* R1885 - AIF2TX4MIX Input 3 Volume */
+ { 0x0000075E, 0x0000 }, /* R1886 - AIF2TX4MIX Input 4 Source */
+ { 0x0000075F, 0x0080 }, /* R1887 - AIF2TX4MIX Input 4 Volume */
+ { 0x00000760, 0x0000 }, /* R1888 - AIF2TX5MIX Input 1 Source */
+ { 0x00000761, 0x0080 }, /* R1889 - AIF2TX5MIX Input 1 Volume */
+ { 0x00000762, 0x0000 }, /* R1890 - AIF2TX5MIX Input 2 Source */
+ { 0x00000763, 0x0080 }, /* R1891 - AIF2TX5MIX Input 2 Volume */
+ { 0x00000764, 0x0000 }, /* R1892 - AIF2TX5MIX Input 3 Source */
+ { 0x00000765, 0x0080 }, /* R1893 - AIF2TX5MIX Input 3 Volume */
+ { 0x00000766, 0x0000 }, /* R1894 - AIF2TX5MIX Input 4 Source */
+ { 0x00000767, 0x0080 }, /* R1895 - AIF2TX5MIX Input 4 Volume */
+ { 0x00000768, 0x0000 }, /* R1896 - AIF2TX6MIX Input 1 Source */
+ { 0x00000769, 0x0080 }, /* R1897 - AIF2TX6MIX Input 1 Volume */
+ { 0x0000076A, 0x0000 }, /* R1898 - AIF2TX6MIX Input 2 Source */
+ { 0x0000076B, 0x0080 }, /* R1899 - AIF2TX6MIX Input 2 Volume */
+ { 0x0000076C, 0x0000 }, /* R1900 - AIF2TX6MIX Input 3 Source */
+ { 0x0000076D, 0x0080 }, /* R1901 - AIF2TX6MIX Input 3 Volume */
+ { 0x0000076E, 0x0000 }, /* R1902 - AIF2TX6MIX Input 4 Source */
+ { 0x0000076F, 0x0080 }, /* R1903 - AIF2TX6MIX Input 4 Volume */
+ { 0x00000780, 0x0000 }, /* R1920 - AIF3TX1MIX Input 1 Source */
+ { 0x00000781, 0x0080 }, /* R1921 - AIF3TX1MIX Input 1 Volume */
+ { 0x00000782, 0x0000 }, /* R1922 - AIF3TX1MIX Input 2 Source */
+ { 0x00000783, 0x0080 }, /* R1923 - AIF3TX1MIX Input 2 Volume */
+ { 0x00000784, 0x0000 }, /* R1924 - AIF3TX1MIX Input 3 Source */
+ { 0x00000785, 0x0080 }, /* R1925 - AIF3TX1MIX Input 3 Volume */
+ { 0x00000786, 0x0000 }, /* R1926 - AIF3TX1MIX Input 4 Source */
+ { 0x00000787, 0x0080 }, /* R1927 - AIF3TX1MIX Input 4 Volume */
+ { 0x00000788, 0x0000 }, /* R1928 - AIF3TX2MIX Input 1 Source */
+ { 0x00000789, 0x0080 }, /* R1929 - AIF3TX2MIX Input 1 Volume */
+ { 0x0000078A, 0x0000 }, /* R1930 - AIF3TX2MIX Input 2 Source */
+ { 0x0000078B, 0x0080 }, /* R1931 - AIF3TX2MIX Input 2 Volume */
+ { 0x0000078C, 0x0000 }, /* R1932 - AIF3TX2MIX Input 3 Source */
+ { 0x0000078D, 0x0080 }, /* R1933 - AIF3TX2MIX Input 3 Volume */
+ { 0x0000078E, 0x0000 }, /* R1934 - AIF3TX2MIX Input 4 Source */
+ { 0x0000078F, 0x0080 }, /* R1935 - AIF3TX2MIX Input 4 Volume */
+ { 0x000007C0, 0x0000 }, /* R1984 - SLIMTX1MIX Input 1 Source */
+ { 0x000007C1, 0x0080 }, /* R1985 - SLIMTX1MIX Input 1 Volume */
+ { 0x000007C8, 0x0000 }, /* R1992 - SLIMTX2MIX Input 1 Source */
+ { 0x000007C9, 0x0080 }, /* R1993 - SLIMTX2MIX Input 1 Volume */
+ { 0x000007D0, 0x0000 }, /* R2000 - SLIMTX3MIX Input 1 Source */
+ { 0x000007D1, 0x0080 }, /* R2001 - SLIMTX3MIX Input 1 Volume */
+ { 0x000007D8, 0x0000 }, /* R2008 - SLIMTX4MIX Input 1 Source */
+ { 0x000007D9, 0x0080 }, /* R2009 - SLIMTX4MIX Input 1 Volume */
+ { 0x000007E0, 0x0000 }, /* R2016 - SLIMTX5MIX Input 1 Source */
+ { 0x000007E1, 0x0080 }, /* R2017 - SLIMTX5MIX Input 1 Volume */
+ { 0x000007E8, 0x0000 }, /* R2024 - SLIMTX6MIX Input 1 Source */
+ { 0x000007E9, 0x0080 }, /* R2025 - SLIMTX6MIX Input 1 Volume */
+ { 0x00000800, 0x0000 }, /* R2048 - SPDIF1TX1MIX Input 1 Source */
+ { 0x00000801, 0x0080 }, /* R2049 - SPDIF1TX1MIX Input 1 Volume */
+ { 0x00000808, 0x0000 }, /* R2056 - SPDIF1TX2MIX Input 1 Source */
+ { 0x00000809, 0x0080 }, /* R2057 - SPDIF1TX2MIX Input 1 Volume */
+ { 0x00000880, 0x0000 }, /* R2176 - EQ1MIX Input 1 Source */
+ { 0x00000881, 0x0080 }, /* R2177 - EQ1MIX Input 1 Volume */
+ { 0x00000888, 0x0000 }, /* R2184 - EQ2MIX Input 1 Source */
+ { 0x00000889, 0x0080 }, /* R2185 - EQ2MIX Input 1 Volume */
+ { 0x00000890, 0x0000 }, /* R2192 - EQ3MIX Input 1 Source */
+ { 0x00000891, 0x0080 }, /* R2193 - EQ3MIX Input 1 Volume */
+ { 0x00000898, 0x0000 }, /* R2200 - EQ4MIX Input 1 Source */
+ { 0x00000899, 0x0080 }, /* R2201 - EQ4MIX Input 1 Volume */
+ { 0x000008C0, 0x0000 }, /* R2240 - DRC1LMIX Input 1 Source */
+ { 0x000008C1, 0x0080 }, /* R2241 - DRC1LMIX Input 1 Volume */
+ { 0x000008C8, 0x0000 }, /* R2248 - DRC1RMIX Input 1 Source */
+ { 0x000008C9, 0x0080 }, /* R2249 - DRC1RMIX Input 1 Volume */
+ { 0x00000900, 0x0000 }, /* R2304 - HPLP1MIX Input 1 Source */
+ { 0x00000901, 0x0080 }, /* R2305 - HPLP1MIX Input 1 Volume */
+ { 0x00000902, 0x0000 }, /* R2306 - HPLP1MIX Input 2 Source */
+ { 0x00000903, 0x0080 }, /* R2307 - HPLP1MIX Input 2 Volume */
+ { 0x00000904, 0x0000 }, /* R2308 - HPLP1MIX Input 3 Source */
+ { 0x00000905, 0x0080 }, /* R2309 - HPLP1MIX Input 3 Volume */
+ { 0x00000906, 0x0000 }, /* R2310 - HPLP1MIX Input 4 Source */
+ { 0x00000907, 0x0080 }, /* R2311 - HPLP1MIX Input 4 Volume */
+ { 0x00000908, 0x0000 }, /* R2312 - HPLP2MIX Input 1 Source */
+ { 0x00000909, 0x0080 }, /* R2313 - HPLP2MIX Input 1 Volume */
+ { 0x0000090A, 0x0000 }, /* R2314 - HPLP2MIX Input 2 Source */
+ { 0x0000090B, 0x0080 }, /* R2315 - HPLP2MIX Input 2 Volume */
+ { 0x0000090C, 0x0000 }, /* R2316 - HPLP2MIX Input 3 Source */
+ { 0x0000090D, 0x0080 }, /* R2317 - HPLP2MIX Input 3 Volume */
+ { 0x0000090E, 0x0000 }, /* R2318 - HPLP2MIX Input 4 Source */
+ { 0x0000090F, 0x0080 }, /* R2319 - HPLP2MIX Input 4 Volume */
+ { 0x00000910, 0x0000 }, /* R2320 - HPLP3MIX Input 1 Source */
+ { 0x00000911, 0x0080 }, /* R2321 - HPLP3MIX Input 1 Volume */
+ { 0x00000912, 0x0000 }, /* R2322 - HPLP3MIX Input 2 Source */
+ { 0x00000913, 0x0080 }, /* R2323 - HPLP3MIX Input 2 Volume */
+ { 0x00000914, 0x0000 }, /* R2324 - HPLP3MIX Input 3 Source */
+ { 0x00000915, 0x0080 }, /* R2325 - HPLP3MIX Input 3 Volume */
+ { 0x00000916, 0x0000 }, /* R2326 - HPLP3MIX Input 4 Source */
+ { 0x00000917, 0x0080 }, /* R2327 - HPLP3MIX Input 4 Volume */
+ { 0x00000918, 0x0000 }, /* R2328 - HPLP4MIX Input 1 Source */
+ { 0x00000919, 0x0080 }, /* R2329 - HPLP4MIX Input 1 Volume */
+ { 0x0000091A, 0x0000 }, /* R2330 - HPLP4MIX Input 2 Source */
+ { 0x0000091B, 0x0080 }, /* R2331 - HPLP4MIX Input 2 Volume */
+ { 0x0000091C, 0x0000 }, /* R2332 - HPLP4MIX Input 3 Source */
+ { 0x0000091D, 0x0080 }, /* R2333 - HPLP4MIX Input 3 Volume */
+ { 0x0000091E, 0x0000 }, /* R2334 - HPLP4MIX Input 4 Source */
+ { 0x0000091F, 0x0080 }, /* R2335 - HPLP4MIX Input 4 Volume */
+ { 0x00000A80, 0x0000 }, /* R2688 - ASRC1LMIX Input 1 Source */
+ { 0x00000A88, 0x0000 }, /* R2696 - ASRC1RMIX Input 1 Source */
+ { 0x00000A90, 0x0000 }, /* R2704 - ASRC2LMIX Input 1 Source */
+ { 0x00000A98, 0x0000 }, /* R2712 - ASRC2RMIX Input 1 Source */
+ { 0x00000B00, 0x0000 }, /* R2816 - ISRC1DEC1MIX Input 1 Source */
+ { 0x00000B08, 0x0000 }, /* R2824 - ISRC1DEC2MIX Input 1 Source */
+ { 0x00000B10, 0x0000 }, /* R2832 - ISRC1DEC3MIX Input 1 Source */
+ { 0x00000B18, 0x0000 }, /* R2840 - ISRC1DEC4MIX Input 1 Source */
+ { 0x00000B20, 0x0000 }, /* R2848 - ISRC1INT1MIX Input 1 Source */
+ { 0x00000B28, 0x0000 }, /* R2856 - ISRC1INT2MIX Input 1 Source */
+ { 0x00000B30, 0x0000 }, /* R2864 - ISRC1INT3MIX Input 1 Source */
+ { 0x00000B38, 0x0000 }, /* R2872 - ISRC1INT4MIX Input 1 Source */
+ { 0x00000B40, 0x0000 }, /* R2880 - ISRC2DEC1MIX Input 1 Source */
+ { 0x00000B48, 0x0000 }, /* R2888 - ISRC2DEC2MIX Input 1 Source */
+ { 0x00000B60, 0x0000 }, /* R2912 - ISRC2INT1MIX Input 1 Source */
+ { 0x00000B68, 0x0000 }, /* R2920 - ISRC2INT2MIX Input 1 Source */
+ { 0x00000C00, 0xA101 }, /* R3072 - GPIO1 CTRL */
+ { 0x00000C01, 0xA101 }, /* R3073 - GPIO2 CTRL */
+ { 0x00000C02, 0xA101 }, /* R3074 - GPIO3 CTRL */
+ { 0x00000C03, 0xA101 }, /* R3075 - GPIO4 CTRL */
+ { 0x00000C04, 0xA101 }, /* R3076 - GPIO5 CTRL */
+ { 0x00000C0F, 0x0400 }, /* R3087 - IRQ CTRL 1 */
+ { 0x00000C10, 0x1000 }, /* R3088 - GPIO Debounce Config */
+ { 0x00000C18, 0x0000 }, /* R3096 - GP Switch 1 */
+ { 0x00000C20, 0x8002 }, /* R3104 - Misc Pad Ctrl 1 */
+ { 0x00000C21, 0x8001 }, /* R3105 - Misc Pad Ctrl 2 */
+ { 0x00000C22, 0x0000 }, /* R3106 - Misc Pad Ctrl 3 */
+ { 0x00000C23, 0x0000 }, /* R3107 - Misc Pad Ctrl 4 */
+ { 0x00000C24, 0x0000 }, /* R3108 - Misc Pad Ctrl 5 */
+ { 0x00000C25, 0x0000 }, /* R3109 - Misc Pad Ctrl 6 */
+ { 0x00000D08, 0xFFFF }, /* R3336 - Interrupt Status 1 Mask */
+ { 0x00000D09, 0xFFFF }, /* R3337 - Interrupt Status 2 Mask */
+ { 0x00000D0A, 0xFFFF }, /* R3338 - Interrupt Status 3 Mask */
+ { 0x00000D0B, 0xFFFF }, /* R3339 - Interrupt Status 4 Mask */
+ { 0x00000D0C, 0xFEFF }, /* R3340 - Interrupt Status 5 Mask */
+ { 0x00000D0F, 0x0000 }, /* R3343 - Interrupt Control */
+ { 0x00000D18, 0xFFFF }, /* R3352 - IRQ2 Status 1 Mask */
+ { 0x00000D19, 0xFFFF }, /* R3353 - IRQ2 Status 2 Mask */
+ { 0x00000D1A, 0xFFFF }, /* R3354 - IRQ2 Status 3 Mask */
+ { 0x00000D1B, 0xFFFF }, /* R3355 - IRQ2 Status 4 Mask */
+ { 0x00000D1C, 0xFEFF }, /* R3356 - IRQ2 Status 5 Mask */
+ { 0x00000D1D, 0xFFFF }, /* R3357 - IRQ2 Status 6 Mask */
+ { 0x00000D1F, 0x0000 }, /* R3359 - IRQ2 Control */
+ { 0x00000D53, 0xFFFF }, /* R3411 - AOD IRQ Mask IRQ1 */
+ { 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */
+ { 0x00000D56, 0x0000 }, /* R3414 - Jack detect debounce */
+ { 0x00000E00, 0x0000 }, /* R3584 - FX_Ctrl1 */
+ { 0x00000E01, 0x0000 }, /* R3585 - FX_Ctrl2 */
+ { 0x00000E10, 0x6318 }, /* R3600 - EQ1_1 */
+ { 0x00000E11, 0x6300 }, /* R3601 - EQ1_2 */
+ { 0x00000E12, 0x0FC8 }, /* R3602 - EQ1_3 */
+ { 0x00000E13, 0x03FE }, /* R3603 - EQ1_4 */
+ { 0x00000E14, 0x00E0 }, /* R3604 - EQ1_5 */
+ { 0x00000E15, 0x1EC4 }, /* R3605 - EQ1_6 */
+ { 0x00000E16, 0xF136 }, /* R3606 - EQ1_7 */
+ { 0x00000E17, 0x0409 }, /* R3607 - EQ1_8 */
+ { 0x00000E18, 0x04CC }, /* R3608 - EQ1_9 */
+ { 0x00000E19, 0x1C9B }, /* R3609 - EQ1_10 */
+ { 0x00000E1A, 0xF337 }, /* R3610 - EQ1_11 */
+ { 0x00000E1B, 0x040B }, /* R3611 - EQ1_12 */
+ { 0x00000E1C, 0x0CBB }, /* R3612 - EQ1_13 */
+ { 0x00000E1D, 0x16F8 }, /* R3613 - EQ1_14 */
+ { 0x00000E1E, 0xF7D9 }, /* R3614 - EQ1_15 */
+ { 0x00000E1F, 0x040A }, /* R3615 - EQ1_16 */
+ { 0x00000E20, 0x1F14 }, /* R3616 - EQ1_17 */
+ { 0x00000E21, 0x058C }, /* R3617 - EQ1_18 */
+ { 0x00000E22, 0x0563 }, /* R3618 - EQ1_19 */
+ { 0x00000E23, 0x4000 }, /* R3619 - EQ1_20 */
+ { 0x00000E24, 0x0B75 }, /* R3620 - EQ1_21 */
+ { 0x00000E26, 0x6318 }, /* R3622 - EQ2_1 */
+ { 0x00000E27, 0x6300 }, /* R3623 - EQ2_2 */
+ { 0x00000E28, 0x0FC8 }, /* R3624 - EQ2_3 */
+ { 0x00000E29, 0x03FE }, /* R3625 - EQ2_4 */
+ { 0x00000E2A, 0x00E0 }, /* R3626 - EQ2_5 */
+ { 0x00000E2B, 0x1EC4 }, /* R3627 - EQ2_6 */
+ { 0x00000E2C, 0xF136 }, /* R3628 - EQ2_7 */
+ { 0x00000E2D, 0x0409 }, /* R3629 - EQ2_8 */
+ { 0x00000E2E, 0x04CC }, /* R3630 - EQ2_9 */
+ { 0x00000E2F, 0x1C9B }, /* R3631 - EQ2_10 */
+ { 0x00000E30, 0xF337 }, /* R3632 - EQ2_11 */
+ { 0x00000E31, 0x040B }, /* R3633 - EQ2_12 */
+ { 0x00000E32, 0x0CBB }, /* R3634 - EQ2_13 */
+ { 0x00000E33, 0x16F8 }, /* R3635 - EQ2_14 */
+ { 0x00000E34, 0xF7D9 }, /* R3636 - EQ2_15 */
+ { 0x00000E35, 0x040A }, /* R3637 - EQ2_16 */
+ { 0x00000E36, 0x1F14 }, /* R3638 - EQ2_17 */
+ { 0x00000E37, 0x058C }, /* R3639 - EQ2_18 */
+ { 0x00000E38, 0x0563 }, /* R3640 - EQ2_19 */
+ { 0x00000E39, 0x4000 }, /* R3641 - EQ2_20 */
+ { 0x00000E3A, 0x0B75 }, /* R3642 - EQ2_21 */
+ { 0x00000E3C, 0x6318 }, /* R3644 - EQ3_1 */
+ { 0x00000E3D, 0x6300 }, /* R3645 - EQ3_2 */
+ { 0x00000E3E, 0x0FC8 }, /* R3646 - EQ3_3 */
+ { 0x00000E3F, 0x03FE }, /* R3647 - EQ3_4 */
+ { 0x00000E40, 0x00E0 }, /* R3648 - EQ3_5 */
+ { 0x00000E41, 0x1EC4 }, /* R3649 - EQ3_6 */
+ { 0x00000E42, 0xF136 }, /* R3650 - EQ3_7 */
+ { 0x00000E43, 0x0409 }, /* R3651 - EQ3_8 */
+ { 0x00000E44, 0x04CC }, /* R3652 - EQ3_9 */
+ { 0x00000E45, 0x1C9B }, /* R3653 - EQ3_10 */
+ { 0x00000E46, 0xF337 }, /* R3654 - EQ3_11 */
+ { 0x00000E47, 0x040B }, /* R3655 - EQ3_12 */
+ { 0x00000E48, 0x0CBB }, /* R3656 - EQ3_13 */
+ { 0x00000E49, 0x16F8 }, /* R3657 - EQ3_14 */
+ { 0x00000E4A, 0xF7D9 }, /* R3658 - EQ3_15 */
+ { 0x00000E4B, 0x040A }, /* R3659 - EQ3_16 */
+ { 0x00000E4C, 0x1F14 }, /* R3660 - EQ3_17 */
+ { 0x00000E4D, 0x058C }, /* R3661 - EQ3_18 */
+ { 0x00000E4E, 0x0563 }, /* R3662 - EQ3_19 */
+ { 0x00000E4F, 0x4000 }, /* R3663 - EQ3_20 */
+ { 0x00000E50, 0x0B75 }, /* R3664 - EQ3_21 */
+ { 0x00000E52, 0x6318 }, /* R3666 - EQ4_1 */
+ { 0x00000E53, 0x6300 }, /* R3667 - EQ4_2 */
+ { 0x00000E54, 0x0FC8 }, /* R3668 - EQ4_3 */
+ { 0x00000E55, 0x03FE }, /* R3669 - EQ4_4 */
+ { 0x00000E56, 0x00E0 }, /* R3670 - EQ4_5 */
+ { 0x00000E57, 0x1EC4 }, /* R3671 - EQ4_6 */
+ { 0x00000E58, 0xF136 }, /* R3672 - EQ4_7 */
+ { 0x00000E59, 0x0409 }, /* R3673 - EQ4_8 */
+ { 0x00000E5A, 0x04CC }, /* R3674 - EQ4_9 */
+ { 0x00000E5B, 0x1C9B }, /* R3675 - EQ4_10 */
+ { 0x00000E5C, 0xF337 }, /* R3676 - EQ4_11 */
+ { 0x00000E5D, 0x040B }, /* R3677 - EQ4_12 */
+ { 0x00000E5E, 0x0CBB }, /* R3678 - EQ4_13 */
+ { 0x00000E5F, 0x16F8 }, /* R3679 - EQ4_14 */
+ { 0x00000E60, 0xF7D9 }, /* R3680 - EQ4_15 */
+ { 0x00000E61, 0x040A }, /* R3681 - EQ4_16 */
+ { 0x00000E62, 0x1F14 }, /* R3682 - EQ4_17 */
+ { 0x00000E63, 0x058C }, /* R3683 - EQ4_18 */
+ { 0x00000E64, 0x0563 }, /* R3684 - EQ4_19 */
+ { 0x00000E65, 0x4000 }, /* R3685 - EQ4_20 */
+ { 0x00000E66, 0x0B75 }, /* R3686 - EQ4_21 */
+ { 0x00000E80, 0x0018 }, /* R3712 - DRC1 ctrl1 */
+ { 0x00000E81, 0x0933 }, /* R3713 - DRC1 ctrl2 */
+ { 0x00000E82, 0x0018 }, /* R3714 - DRC1 ctrl3 */
+ { 0x00000E83, 0x0000 }, /* R3715 - DRC1 ctrl4 */
+ { 0x00000E84, 0x0000 }, /* R3716 - DRC1 ctrl5 */
+ { 0x00000EC0, 0x0000 }, /* R3776 - HPLPF1_1 */
+ { 0x00000EC1, 0x0000 }, /* R3777 - HPLPF1_2 */
+ { 0x00000EC4, 0x0000 }, /* R3780 - HPLPF2_1 */
+ { 0x00000EC5, 0x0000 }, /* R3781 - HPLPF2_2 */
+ { 0x00000EC8, 0x0000 }, /* R3784 - HPLPF3_1 */
+ { 0x00000EC9, 0x0000 }, /* R3785 - HPLPF3_2 */
+ { 0x00000ECC, 0x0000 }, /* R3788 - HPLPF4_1 */
+ { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
+ { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
+ { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
+ { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
+ { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
+ { 0x00000EF1, 0x0001 }, /* R3825 - ISRC 1 CTRL 2 */
+ { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
+ { 0x00000EF3, 0x0000 }, /* R3827 - ISRC 2 CTRL 1 */
+ { 0x00000EF4, 0x0001 }, /* R3828 - ISRC 2 CTRL 2 */
+ { 0x00000EF5, 0x0000 }, /* R3829 - ISRC 2 CTRL 3 */
+ { 0x00001700, 0x0000 }, /* R5888 - FRF_COEFF_1 */
+ { 0x00001701, 0x0000 }, /* R5889 - FRF_COEFF_2 */
+ { 0x00001702, 0x0000 }, /* R5890 - FRF_COEFF_3 */
+ { 0x00001703, 0x0000 }, /* R5891 - FRF_COEFF_4 */
+ { 0x00001704, 0x0000 }, /* R5892 - DAC_COMP_1 */
+ { 0x00001705, 0x0000 }, /* R5893 - DAC_COMP_2 */
+};
+
+static bool wm8998_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ARIZONA_SOFTWARE_RESET:
+ case ARIZONA_DEVICE_REVISION:
+ case ARIZONA_CTRL_IF_SPI_CFG_1:
+ case ARIZONA_CTRL_IF_I2C1_CFG_1:
+ case ARIZONA_CTRL_IF_I2C1_CFG_2:
+ case ARIZONA_WRITE_SEQUENCER_CTRL_0:
+ case ARIZONA_WRITE_SEQUENCER_CTRL_1:
+ case ARIZONA_WRITE_SEQUENCER_CTRL_2:
+ case ARIZONA_TONE_GENERATOR_1:
+ case ARIZONA_TONE_GENERATOR_2:
+ case ARIZONA_TONE_GENERATOR_3:
+ case ARIZONA_TONE_GENERATOR_4:
+ case ARIZONA_TONE_GENERATOR_5:
+ case ARIZONA_PWM_DRIVE_1:
+ case ARIZONA_PWM_DRIVE_2:
+ case ARIZONA_PWM_DRIVE_3:
+ case ARIZONA_WAKE_CONTROL:
+ case ARIZONA_SEQUENCE_CONTROL:
+ case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1:
+ case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2:
+ case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3:
+ case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_4:
+ case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1:
+ case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2:
+ case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_3:
+ case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4:
+ case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_5:
+ case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_6:
+ case ARIZONA_HAPTICS_CONTROL_1:
+ case ARIZONA_HAPTICS_CONTROL_2:
+ case ARIZONA_HAPTICS_PHASE_1_INTENSITY:
+ case ARIZONA_HAPTICS_PHASE_1_DURATION:
+ case ARIZONA_HAPTICS_PHASE_2_INTENSITY:
+ case ARIZONA_HAPTICS_PHASE_2_DURATION:
+ case ARIZONA_HAPTICS_PHASE_3_INTENSITY:
+ case ARIZONA_HAPTICS_PHASE_3_DURATION:
+ case ARIZONA_HAPTICS_STATUS:
+ case ARIZONA_CLOCK_32K_1:
+ case ARIZONA_SYSTEM_CLOCK_1:
+ case ARIZONA_SAMPLE_RATE_1:
+ case ARIZONA_SAMPLE_RATE_2:
+ case ARIZONA_SAMPLE_RATE_3:
+ case ARIZONA_SAMPLE_RATE_1_STATUS:
+ case ARIZONA_SAMPLE_RATE_2_STATUS:
+ case ARIZONA_SAMPLE_RATE_3_STATUS:
+ case ARIZONA_ASYNC_CLOCK_1:
+ case ARIZONA_ASYNC_SAMPLE_RATE_1:
+ case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
+ case ARIZONA_ASYNC_SAMPLE_RATE_2:
+ case ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS:
+ case ARIZONA_OUTPUT_SYSTEM_CLOCK:
+ case ARIZONA_OUTPUT_ASYNC_CLOCK:
+ case ARIZONA_RATE_ESTIMATOR_1:
+ case ARIZONA_RATE_ESTIMATOR_2:
+ case ARIZONA_RATE_ESTIMATOR_3:
+ case ARIZONA_RATE_ESTIMATOR_4:
+ case ARIZONA_RATE_ESTIMATOR_5:
+ case ARIZONA_DYNAMIC_FREQUENCY_SCALING_1:
+ case ARIZONA_FLL1_CONTROL_1:
+ case ARIZONA_FLL1_CONTROL_2:
+ case ARIZONA_FLL1_CONTROL_3:
+ case ARIZONA_FLL1_CONTROL_4:
+ case ARIZONA_FLL1_CONTROL_5:
+ case ARIZONA_FLL1_CONTROL_6:
+ case ARIZONA_FLL1_CONTROL_7:
+ case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
+ case ARIZONA_FLL1_NCO_TEST_0:
+ case ARIZONA_FLL1_SYNCHRONISER_1:
+ case ARIZONA_FLL1_SYNCHRONISER_2:
+ case ARIZONA_FLL1_SYNCHRONISER_3:
+ case ARIZONA_FLL1_SYNCHRONISER_4:
+ case ARIZONA_FLL1_SYNCHRONISER_5:
+ case ARIZONA_FLL1_SYNCHRONISER_6:
+ case ARIZONA_FLL1_SYNCHRONISER_7:
+ case ARIZONA_FLL1_SPREAD_SPECTRUM:
+ case ARIZONA_FLL1_GPIO_CLOCK:
+ case ARIZONA_FLL2_CONTROL_1:
+ case ARIZONA_FLL2_CONTROL_2:
+ case ARIZONA_FLL2_CONTROL_3:
+ case ARIZONA_FLL2_CONTROL_4:
+ case ARIZONA_FLL2_CONTROL_5:
+ case ARIZONA_FLL2_CONTROL_6:
+ case ARIZONA_FLL2_CONTROL_7:
+ case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
+ case ARIZONA_FLL2_NCO_TEST_0:
+ case ARIZONA_FLL2_SYNCHRONISER_1:
+ case ARIZONA_FLL2_SYNCHRONISER_2:
+ case ARIZONA_FLL2_SYNCHRONISER_3:
+ case ARIZONA_FLL2_SYNCHRONISER_4:
+ case ARIZONA_FLL2_SYNCHRONISER_5:
+ case ARIZONA_FLL2_SYNCHRONISER_6:
+ case ARIZONA_FLL2_SYNCHRONISER_7:
+ case ARIZONA_FLL2_SPREAD_SPECTRUM:
+ case ARIZONA_FLL2_GPIO_CLOCK:
+ case ARIZONA_MIC_CHARGE_PUMP_1:
+ case ARIZONA_LDO1_CONTROL_1:
+ case ARIZONA_LDO1_CONTROL_2:
+ case ARIZONA_LDO2_CONTROL_1:
+ case ARIZONA_MIC_BIAS_CTRL_1:
+ case ARIZONA_MIC_BIAS_CTRL_2:
+ case ARIZONA_MIC_BIAS_CTRL_3:
+ case ARIZONA_ACCESSORY_DETECT_MODE_1:
+ case ARIZONA_HEADPHONE_DETECT_1:
+ case ARIZONA_HEADPHONE_DETECT_2:
+ case ARIZONA_MICD_CLAMP_CONTROL:
+ case ARIZONA_MIC_DETECT_1:
+ case ARIZONA_MIC_DETECT_2:
+ case ARIZONA_MIC_DETECT_3:
+ case ARIZONA_MIC_DETECT_4:
+ case ARIZONA_MIC_DETECT_LEVEL_1:
+ case ARIZONA_MIC_DETECT_LEVEL_2:
+ case ARIZONA_MIC_DETECT_LEVEL_3:
+ case ARIZONA_MIC_DETECT_LEVEL_4:
+ case ARIZONA_ISOLATION_CONTROL:
+ case ARIZONA_JACK_DETECT_ANALOGUE:
+ case ARIZONA_INPUT_ENABLES:
+ case ARIZONA_INPUT_ENABLES_STATUS:
+ case ARIZONA_INPUT_RATE:
+ case ARIZONA_INPUT_VOLUME_RAMP:
+ case ARIZONA_HPF_CONTROL:
+ case ARIZONA_IN1L_CONTROL:
+ case ARIZONA_ADC_DIGITAL_VOLUME_1L:
+ case ARIZONA_DMIC1L_CONTROL:
+ case ARIZONA_IN1R_CONTROL:
+ case ARIZONA_ADC_DIGITAL_VOLUME_1R:
+ case ARIZONA_DMIC1R_CONTROL:
+ case ARIZONA_IN2L_CONTROL:
+ case ARIZONA_ADC_DIGITAL_VOLUME_2L:
+ case ARIZONA_DMIC2L_CONTROL:
+ case ARIZONA_OUTPUT_ENABLES_1:
+ case ARIZONA_OUTPUT_STATUS_1:
+ case ARIZONA_RAW_OUTPUT_STATUS_1:
+ case ARIZONA_OUTPUT_RATE_1:
+ case ARIZONA_OUTPUT_VOLUME_RAMP:
+ case ARIZONA_OUTPUT_PATH_CONFIG_1L:
+ case ARIZONA_DAC_DIGITAL_VOLUME_1L:
+ case ARIZONA_NOISE_GATE_SELECT_1L:
+ case ARIZONA_OUTPUT_PATH_CONFIG_1R:
+ case ARIZONA_DAC_DIGITAL_VOLUME_1R:
+ case ARIZONA_NOISE_GATE_SELECT_1R:
+ case ARIZONA_OUTPUT_PATH_CONFIG_2L:
+ case ARIZONA_DAC_DIGITAL_VOLUME_2L:
+ case ARIZONA_NOISE_GATE_SELECT_2L:
+ case ARIZONA_OUTPUT_PATH_CONFIG_2R:
+ case ARIZONA_DAC_DIGITAL_VOLUME_2R:
+ case ARIZONA_NOISE_GATE_SELECT_2R:
+ case ARIZONA_OUTPUT_PATH_CONFIG_3L:
+ case ARIZONA_DAC_DIGITAL_VOLUME_3L:
+ case ARIZONA_NOISE_GATE_SELECT_3L:
+ case ARIZONA_OUTPUT_PATH_CONFIG_4L:
+ case ARIZONA_DAC_DIGITAL_VOLUME_4L:
+ case ARIZONA_NOISE_GATE_SELECT_4L:
+ case ARIZONA_OUTPUT_PATH_CONFIG_4R:
+ case ARIZONA_DAC_DIGITAL_VOLUME_4R:
+ case ARIZONA_NOISE_GATE_SELECT_4R:
+ case ARIZONA_OUTPUT_PATH_CONFIG_5L:
+ case ARIZONA_DAC_DIGITAL_VOLUME_5L:
+ case ARIZONA_NOISE_GATE_SELECT_5L:
+ case ARIZONA_OUTPUT_PATH_CONFIG_5R:
+ case ARIZONA_DAC_DIGITAL_VOLUME_5R:
+ case ARIZONA_NOISE_GATE_SELECT_5R:
+ case ARIZONA_DRE_ENABLE:
+ case ARIZONA_DRE_CONTROL_1:
+ case ARIZONA_DRE_CONTROL_2:
+ case ARIZONA_DRE_CONTROL_3:
+ case ARIZONA_EDRE_ENABLE:
+ case ARIZONA_DAC_AEC_CONTROL_1:
+ case ARIZONA_DAC_AEC_CONTROL_2:
+ case ARIZONA_NOISE_GATE_CONTROL:
+ case ARIZONA_PDM_SPK1_CTRL_1:
+ case ARIZONA_PDM_SPK1_CTRL_2:
+ case ARIZONA_HP_TEST_CTRL_13:
+ case ARIZONA_AIF1_BCLK_CTRL:
+ case ARIZONA_AIF1_TX_PIN_CTRL:
+ case ARIZONA_AIF1_RX_PIN_CTRL:
+ case ARIZONA_AIF1_RATE_CTRL:
+ case ARIZONA_AIF1_FORMAT:
+ case ARIZONA_AIF1_RX_BCLK_RATE:
+ case ARIZONA_AIF1_FRAME_CTRL_1:
+ case ARIZONA_AIF1_FRAME_CTRL_2:
+ case ARIZONA_AIF1_FRAME_CTRL_3:
+ case ARIZONA_AIF1_FRAME_CTRL_4:
+ case ARIZONA_AIF1_FRAME_CTRL_5:
+ case ARIZONA_AIF1_FRAME_CTRL_6:
+ case ARIZONA_AIF1_FRAME_CTRL_7:
+ case ARIZONA_AIF1_FRAME_CTRL_8:
+ case ARIZONA_AIF1_FRAME_CTRL_11:
+ case ARIZONA_AIF1_FRAME_CTRL_12:
+ case ARIZONA_AIF1_FRAME_CTRL_13:
+ case ARIZONA_AIF1_FRAME_CTRL_14:
+ case ARIZONA_AIF1_FRAME_CTRL_15:
+ case ARIZONA_AIF1_FRAME_CTRL_16:
+ case ARIZONA_AIF1_TX_ENABLES:
+ case ARIZONA_AIF1_RX_ENABLES:
+ case ARIZONA_AIF2_BCLK_CTRL:
+ case ARIZONA_AIF2_TX_PIN_CTRL:
+ case ARIZONA_AIF2_RX_PIN_CTRL:
+ case ARIZONA_AIF2_RATE_CTRL:
+ case ARIZONA_AIF2_FORMAT:
+ case ARIZONA_AIF2_RX_BCLK_RATE:
+ case ARIZONA_AIF2_FRAME_CTRL_1:
+ case ARIZONA_AIF2_FRAME_CTRL_2:
+ case ARIZONA_AIF2_FRAME_CTRL_3:
+ case ARIZONA_AIF2_FRAME_CTRL_4:
+ case ARIZONA_AIF2_FRAME_CTRL_5:
+ case ARIZONA_AIF2_FRAME_CTRL_6:
+ case ARIZONA_AIF2_FRAME_CTRL_7:
+ case ARIZONA_AIF2_FRAME_CTRL_8:
+ case ARIZONA_AIF2_FRAME_CTRL_11:
+ case ARIZONA_AIF2_FRAME_CTRL_12:
+ case ARIZONA_AIF2_FRAME_CTRL_13:
+ case ARIZONA_AIF2_FRAME_CTRL_14:
+ case ARIZONA_AIF2_FRAME_CTRL_15:
+ case ARIZONA_AIF2_FRAME_CTRL_16:
+ case ARIZONA_AIF2_TX_ENABLES:
+ case ARIZONA_AIF2_RX_ENABLES:
+ case ARIZONA_AIF3_BCLK_CTRL:
+ case ARIZONA_AIF3_TX_PIN_CTRL:
+ case ARIZONA_AIF3_RX_PIN_CTRL:
+ case ARIZONA_AIF3_RATE_CTRL:
+ case ARIZONA_AIF3_FORMAT:
+ case ARIZONA_AIF3_RX_BCLK_RATE:
+ case ARIZONA_AIF3_FRAME_CTRL_1:
+ case ARIZONA_AIF3_FRAME_CTRL_2:
+ case ARIZONA_AIF3_FRAME_CTRL_3:
+ case ARIZONA_AIF3_FRAME_CTRL_4:
+ case ARIZONA_AIF3_FRAME_CTRL_11:
+ case ARIZONA_AIF3_FRAME_CTRL_12:
+ case ARIZONA_AIF3_TX_ENABLES:
+ case ARIZONA_AIF3_RX_ENABLES:
+ case ARIZONA_SPD1_TX_CONTROL:
+ case ARIZONA_SPD1_TX_CHANNEL_STATUS_1:
+ case ARIZONA_SPD1_TX_CHANNEL_STATUS_2:
+ case ARIZONA_SPD1_TX_CHANNEL_STATUS_3:
+ case ARIZONA_SLIMBUS_FRAMER_REF_GEAR:
+ case ARIZONA_SLIMBUS_RATES_1:
+ case ARIZONA_SLIMBUS_RATES_2:
+ case ARIZONA_SLIMBUS_RATES_5:
+ case ARIZONA_SLIMBUS_RATES_6:
+ case ARIZONA_SLIMBUS_RATES_7:
+ case ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE:
+ case ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE:
+ case ARIZONA_SLIMBUS_RX_PORT_STATUS:
+ case ARIZONA_SLIMBUS_TX_PORT_STATUS:
+ case ARIZONA_PWM1MIX_INPUT_1_SOURCE:
+ case ARIZONA_PWM1MIX_INPUT_1_VOLUME:
+ case ARIZONA_PWM1MIX_INPUT_2_SOURCE:
+ case ARIZONA_PWM1MIX_INPUT_2_VOLUME:
+ case ARIZONA_PWM1MIX_INPUT_3_SOURCE:
+ case ARIZONA_PWM1MIX_INPUT_3_VOLUME:
+ case ARIZONA_PWM1MIX_INPUT_4_SOURCE:
+ case ARIZONA_PWM1MIX_INPUT_4_VOLUME:
+ case ARIZONA_PWM2MIX_INPUT_1_SOURCE:
+ case ARIZONA_PWM2MIX_INPUT_1_VOLUME:
+ case ARIZONA_PWM2MIX_INPUT_2_SOURCE:
+ case ARIZONA_PWM2MIX_INPUT_2_VOLUME:
+ case ARIZONA_PWM2MIX_INPUT_3_SOURCE:
+ case ARIZONA_PWM2MIX_INPUT_3_VOLUME:
+ case ARIZONA_PWM2MIX_INPUT_4_SOURCE:
+ case ARIZONA_PWM2MIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT1LMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT1LMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT1LMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT1LMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT1LMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT1LMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT1LMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT1LMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT1RMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT1RMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT1RMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT1RMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT1RMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT1RMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT1RMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT1RMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT2LMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT2LMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT2LMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT2LMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT2LMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT2LMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT2LMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT2LMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT2RMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT2RMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT2RMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT2RMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT2RMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT2RMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT2RMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT2RMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT3LMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT3LMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT3LMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT3LMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT3LMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT3LMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT3LMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT3LMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT4LMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT4LMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT4LMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT4LMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT4LMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT4LMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT4LMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT4LMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT4RMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT4RMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT4RMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT4RMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT4RMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT4RMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT4RMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT4RMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT5LMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT5LMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT5LMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT5LMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT5LMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT5LMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT5LMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT5LMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT5RMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT5RMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT5RMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT5RMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT5RMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT5RMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT5RMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT5RMIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX1MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX1MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX1MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX1MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX1MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX1MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX1MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX2MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX2MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX2MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX2MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX2MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX2MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX2MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX3MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX3MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX3MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX3MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX3MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX3MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX3MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX4MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX4MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX4MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX4MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX4MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX4MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX4MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX5MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX5MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX5MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX5MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX5MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX5MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX5MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX6MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX6MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX6MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX6MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX6MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX6MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX6MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF2TX1MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF2TX1MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF2TX1MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF2TX1MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF2TX1MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF2TX1MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF2TX1MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF2TX2MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF2TX2MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF2TX2MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF2TX2MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF2TX2MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF2TX2MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF2TX2MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF2TX3MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF2TX3MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF2TX3MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF2TX3MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF2TX3MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF2TX3MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF2TX3MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF2TX3MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF2TX4MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF2TX4MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF2TX4MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF2TX4MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF2TX4MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF2TX4MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF2TX4MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF2TX4MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF2TX5MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF2TX5MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF2TX5MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF2TX5MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF2TX5MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF2TX5MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF2TX5MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF2TX5MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF2TX6MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF2TX6MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF2TX6MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF2TX6MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF2TX6MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF2TX6MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF2TX6MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF2TX6MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF3TX1MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF3TX1MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF3TX1MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF3TX1MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF3TX1MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF3TX1MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF3TX1MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF3TX2MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF3TX2MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF3TX2MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF3TX2MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF3TX2MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF3TX2MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF3TX2MIX_INPUT_4_VOLUME:
+ case ARIZONA_SLIMTX1MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX1MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX2MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX2MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX3MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX3MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX4MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX4MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX5MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX6MIX_INPUT_1_VOLUME:
+ case ARIZONA_SPDIFTX1MIX_INPUT_1_SOURCE:
+ case ARIZONA_SPDIFTX1MIX_INPUT_1_VOLUME:
+ case ARIZONA_SPDIFTX2MIX_INPUT_1_SOURCE:
+ case ARIZONA_SPDIFTX2MIX_INPUT_1_VOLUME:
+ case ARIZONA_EQ1MIX_INPUT_1_SOURCE:
+ case ARIZONA_EQ1MIX_INPUT_1_VOLUME:
+ case ARIZONA_EQ2MIX_INPUT_1_SOURCE:
+ case ARIZONA_EQ2MIX_INPUT_1_VOLUME:
+ case ARIZONA_EQ3MIX_INPUT_1_SOURCE:
+ case ARIZONA_EQ3MIX_INPUT_1_VOLUME:
+ case ARIZONA_EQ4MIX_INPUT_1_SOURCE:
+ case ARIZONA_EQ4MIX_INPUT_1_VOLUME:
+ case ARIZONA_DRC1LMIX_INPUT_1_SOURCE:
+ case ARIZONA_DRC1LMIX_INPUT_1_VOLUME:
+ case ARIZONA_DRC1RMIX_INPUT_1_SOURCE:
+ case ARIZONA_DRC1RMIX_INPUT_1_VOLUME:
+ case ARIZONA_HPLP1MIX_INPUT_1_SOURCE:
+ case ARIZONA_HPLP1MIX_INPUT_1_VOLUME:
+ case ARIZONA_HPLP1MIX_INPUT_2_SOURCE:
+ case ARIZONA_HPLP1MIX_INPUT_2_VOLUME:
+ case ARIZONA_HPLP1MIX_INPUT_3_SOURCE:
+ case ARIZONA_HPLP1MIX_INPUT_3_VOLUME:
+ case ARIZONA_HPLP1MIX_INPUT_4_SOURCE:
+ case ARIZONA_HPLP1MIX_INPUT_4_VOLUME:
+ case ARIZONA_HPLP2MIX_INPUT_1_SOURCE:
+ case ARIZONA_HPLP2MIX_INPUT_1_VOLUME:
+ case ARIZONA_HPLP2MIX_INPUT_2_SOURCE:
+ case ARIZONA_HPLP2MIX_INPUT_2_VOLUME:
+ case ARIZONA_HPLP2MIX_INPUT_3_SOURCE:
+ case ARIZONA_HPLP2MIX_INPUT_3_VOLUME:
+ case ARIZONA_HPLP2MIX_INPUT_4_SOURCE:
+ case ARIZONA_HPLP2MIX_INPUT_4_VOLUME:
+ case ARIZONA_HPLP3MIX_INPUT_1_SOURCE:
+ case ARIZONA_HPLP3MIX_INPUT_1_VOLUME:
+ case ARIZONA_HPLP3MIX_INPUT_2_SOURCE:
+ case ARIZONA_HPLP3MIX_INPUT_2_VOLUME:
+ case ARIZONA_HPLP3MIX_INPUT_3_SOURCE:
+ case ARIZONA_HPLP3MIX_INPUT_3_VOLUME:
+ case ARIZONA_HPLP3MIX_INPUT_4_SOURCE:
+ case ARIZONA_HPLP3MIX_INPUT_4_VOLUME:
+ case ARIZONA_HPLP4MIX_INPUT_1_SOURCE:
+ case ARIZONA_HPLP4MIX_INPUT_1_VOLUME:
+ case ARIZONA_HPLP4MIX_INPUT_2_SOURCE:
+ case ARIZONA_HPLP4MIX_INPUT_2_VOLUME:
+ case ARIZONA_HPLP4MIX_INPUT_3_SOURCE:
+ case ARIZONA_HPLP4MIX_INPUT_3_VOLUME:
+ case ARIZONA_HPLP4MIX_INPUT_4_SOURCE:
+ case ARIZONA_HPLP4MIX_INPUT_4_VOLUME:
+ case ARIZONA_ASRC1LMIX_INPUT_1_SOURCE:
+ case ARIZONA_ASRC1RMIX_INPUT_1_SOURCE:
+ case ARIZONA_ASRC2LMIX_INPUT_1_SOURCE:
+ case ARIZONA_ASRC2RMIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC1DEC1MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC1DEC2MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC1DEC3MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC1DEC4MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC1INT1MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC1INT2MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC1INT3MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC1INT4MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE:
+ case ARIZONA_GPIO1_CTRL:
+ case ARIZONA_GPIO2_CTRL:
+ case ARIZONA_GPIO3_CTRL:
+ case ARIZONA_GPIO4_CTRL:
+ case ARIZONA_GPIO5_CTRL:
+ case ARIZONA_IRQ_CTRL_1:
+ case ARIZONA_GPIO_DEBOUNCE_CONFIG:
+ case ARIZONA_GP_SWITCH_1:
+ case ARIZONA_MISC_PAD_CTRL_1:
+ case ARIZONA_MISC_PAD_CTRL_2:
+ case ARIZONA_MISC_PAD_CTRL_3:
+ case ARIZONA_MISC_PAD_CTRL_4:
+ case ARIZONA_MISC_PAD_CTRL_5:
+ case ARIZONA_MISC_PAD_CTRL_6:
+ case ARIZONA_INTERRUPT_STATUS_1:
+ case ARIZONA_INTERRUPT_STATUS_2:
+ case ARIZONA_INTERRUPT_STATUS_3:
+ case ARIZONA_INTERRUPT_STATUS_4:
+ case ARIZONA_INTERRUPT_STATUS_5:
+ case ARIZONA_INTERRUPT_STATUS_1_MASK:
+ case ARIZONA_INTERRUPT_STATUS_2_MASK:
+ case ARIZONA_INTERRUPT_STATUS_3_MASK:
+ case ARIZONA_INTERRUPT_STATUS_4_MASK:
+ case ARIZONA_INTERRUPT_STATUS_5_MASK:
+ case ARIZONA_INTERRUPT_CONTROL:
+ case ARIZONA_IRQ2_STATUS_1:
+ case ARIZONA_IRQ2_STATUS_2:
+ case ARIZONA_IRQ2_STATUS_3:
+ case ARIZONA_IRQ2_STATUS_4:
+ case ARIZONA_IRQ2_STATUS_5:
+ case ARIZONA_IRQ2_STATUS_1_MASK:
+ case ARIZONA_IRQ2_STATUS_2_MASK:
+ case ARIZONA_IRQ2_STATUS_3_MASK:
+ case ARIZONA_IRQ2_STATUS_4_MASK:
+ case ARIZONA_IRQ2_STATUS_5_MASK:
+ case ARIZONA_IRQ2_CONTROL:
+ case ARIZONA_INTERRUPT_RAW_STATUS_2:
+ case ARIZONA_INTERRUPT_RAW_STATUS_3:
+ case ARIZONA_INTERRUPT_RAW_STATUS_4:
+ case ARIZONA_INTERRUPT_RAW_STATUS_5:
+ case ARIZONA_INTERRUPT_RAW_STATUS_6:
+ case ARIZONA_INTERRUPT_RAW_STATUS_7:
+ case ARIZONA_INTERRUPT_RAW_STATUS_8:
+ case ARIZONA_IRQ_PIN_STATUS:
+ case ARIZONA_AOD_WKUP_AND_TRIG:
+ case ARIZONA_AOD_IRQ1:
+ case ARIZONA_AOD_IRQ2:
+ case ARIZONA_AOD_IRQ_MASK_IRQ1:
+ case ARIZONA_AOD_IRQ_MASK_IRQ2:
+ case ARIZONA_AOD_IRQ_RAW_STATUS:
+ case ARIZONA_JACK_DETECT_DEBOUNCE:
+ case ARIZONA_FX_CTRL1:
+ case ARIZONA_FX_CTRL2:
+ case ARIZONA_EQ1_1:
+ case ARIZONA_EQ1_2:
+ case ARIZONA_EQ1_3:
+ case ARIZONA_EQ1_4:
+ case ARIZONA_EQ1_5:
+ case ARIZONA_EQ1_6:
+ case ARIZONA_EQ1_7:
+ case ARIZONA_EQ1_8:
+ case ARIZONA_EQ1_9:
+ case ARIZONA_EQ1_10:
+ case ARIZONA_EQ1_11:
+ case ARIZONA_EQ1_12:
+ case ARIZONA_EQ1_13:
+ case ARIZONA_EQ1_14:
+ case ARIZONA_EQ1_15:
+ case ARIZONA_EQ1_16:
+ case ARIZONA_EQ1_17:
+ case ARIZONA_EQ1_18:
+ case ARIZONA_EQ1_19:
+ case ARIZONA_EQ1_20:
+ case ARIZONA_EQ1_21:
+ case ARIZONA_EQ2_1:
+ case ARIZONA_EQ2_2:
+ case ARIZONA_EQ2_3:
+ case ARIZONA_EQ2_4:
+ case ARIZONA_EQ2_5:
+ case ARIZONA_EQ2_6:
+ case ARIZONA_EQ2_7:
+ case ARIZONA_EQ2_8:
+ case ARIZONA_EQ2_9:
+ case ARIZONA_EQ2_10:
+ case ARIZONA_EQ2_11:
+ case ARIZONA_EQ2_12:
+ case ARIZONA_EQ2_13:
+ case ARIZONA_EQ2_14:
+ case ARIZONA_EQ2_15:
+ case ARIZONA_EQ2_16:
+ case ARIZONA_EQ2_17:
+ case ARIZONA_EQ2_18:
+ case ARIZONA_EQ2_19:
+ case ARIZONA_EQ2_20:
+ case ARIZONA_EQ2_21:
+ case ARIZONA_EQ3_1:
+ case ARIZONA_EQ3_2:
+ case ARIZONA_EQ3_3:
+ case ARIZONA_EQ3_4:
+ case ARIZONA_EQ3_5:
+ case ARIZONA_EQ3_6:
+ case ARIZONA_EQ3_7:
+ case ARIZONA_EQ3_8:
+ case ARIZONA_EQ3_9:
+ case ARIZONA_EQ3_10:
+ case ARIZONA_EQ3_11:
+ case ARIZONA_EQ3_12:
+ case ARIZONA_EQ3_13:
+ case ARIZONA_EQ3_14:
+ case ARIZONA_EQ3_15:
+ case ARIZONA_EQ3_16:
+ case ARIZONA_EQ3_17:
+ case ARIZONA_EQ3_18:
+ case ARIZONA_EQ3_19:
+ case ARIZONA_EQ3_20:
+ case ARIZONA_EQ3_21:
+ case ARIZONA_EQ4_1:
+ case ARIZONA_EQ4_2:
+ case ARIZONA_EQ4_3:
+ case ARIZONA_EQ4_4:
+ case ARIZONA_EQ4_5:
+ case ARIZONA_EQ4_6:
+ case ARIZONA_EQ4_7:
+ case ARIZONA_EQ4_8:
+ case ARIZONA_EQ4_9:
+ case ARIZONA_EQ4_10:
+ case ARIZONA_EQ4_11:
+ case ARIZONA_EQ4_12:
+ case ARIZONA_EQ4_13:
+ case ARIZONA_EQ4_14:
+ case ARIZONA_EQ4_15:
+ case ARIZONA_EQ4_16:
+ case ARIZONA_EQ4_17:
+ case ARIZONA_EQ4_18:
+ case ARIZONA_EQ4_19:
+ case ARIZONA_EQ4_20:
+ case ARIZONA_EQ4_21:
+ case ARIZONA_DRC1_CTRL1:
+ case ARIZONA_DRC1_CTRL2:
+ case ARIZONA_DRC1_CTRL3:
+ case ARIZONA_DRC1_CTRL4:
+ case ARIZONA_DRC1_CTRL5:
+ case ARIZONA_HPLPF1_1:
+ case ARIZONA_HPLPF1_2:
+ case ARIZONA_HPLPF2_1:
+ case ARIZONA_HPLPF2_2:
+ case ARIZONA_HPLPF3_1:
+ case ARIZONA_HPLPF3_2:
+ case ARIZONA_HPLPF4_1:
+ case ARIZONA_HPLPF4_2:
+ case ARIZONA_ASRC_ENABLE:
+ case ARIZONA_ASRC_STATUS:
+ case ARIZONA_ASRC_RATE1:
+ case ARIZONA_ASRC_RATE2:
+ case ARIZONA_ISRC_1_CTRL_1:
+ case ARIZONA_ISRC_1_CTRL_2:
+ case ARIZONA_ISRC_1_CTRL_3:
+ case ARIZONA_ISRC_2_CTRL_1:
+ case ARIZONA_ISRC_2_CTRL_2:
+ case ARIZONA_ISRC_2_CTRL_3:
+ case ARIZONA_FRF_COEFF_1:
+ case ARIZONA_FRF_COEFF_2:
+ case ARIZONA_FRF_COEFF_3:
+ case ARIZONA_FRF_COEFF_4:
+ case ARIZONA_V2_DAC_COMP_1:
+ case ARIZONA_V2_DAC_COMP_2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool wm8998_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ARIZONA_SOFTWARE_RESET:
+ case ARIZONA_DEVICE_REVISION:
+ case ARIZONA_WRITE_SEQUENCER_CTRL_0:
+ case ARIZONA_WRITE_SEQUENCER_CTRL_1:
+ case ARIZONA_WRITE_SEQUENCER_CTRL_2:
+ case ARIZONA_HAPTICS_STATUS:
+ case ARIZONA_SAMPLE_RATE_1_STATUS:
+ case ARIZONA_SAMPLE_RATE_2_STATUS:
+ case ARIZONA_SAMPLE_RATE_3_STATUS:
+ case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
+ case ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS:
+ case ARIZONA_MIC_DETECT_3:
+ case ARIZONA_MIC_DETECT_4:
+ case ARIZONA_HEADPHONE_DETECT_2:
+ case ARIZONA_INPUT_ENABLES_STATUS:
+ case ARIZONA_OUTPUT_STATUS_1:
+ case ARIZONA_RAW_OUTPUT_STATUS_1:
+ case ARIZONA_SLIMBUS_RX_PORT_STATUS:
+ case ARIZONA_SLIMBUS_TX_PORT_STATUS:
+ case ARIZONA_INTERRUPT_STATUS_1:
+ case ARIZONA_INTERRUPT_STATUS_2:
+ case ARIZONA_INTERRUPT_STATUS_3:
+ case ARIZONA_INTERRUPT_STATUS_4:
+ case ARIZONA_INTERRUPT_STATUS_5:
+ case ARIZONA_IRQ2_STATUS_1:
+ case ARIZONA_IRQ2_STATUS_2:
+ case ARIZONA_IRQ2_STATUS_3:
+ case ARIZONA_IRQ2_STATUS_4:
+ case ARIZONA_IRQ2_STATUS_5:
+ case ARIZONA_INTERRUPT_RAW_STATUS_2:
+ case ARIZONA_INTERRUPT_RAW_STATUS_3:
+ case ARIZONA_INTERRUPT_RAW_STATUS_4:
+ case ARIZONA_INTERRUPT_RAW_STATUS_5:
+ case ARIZONA_INTERRUPT_RAW_STATUS_6:
+ case ARIZONA_INTERRUPT_RAW_STATUS_7:
+ case ARIZONA_INTERRUPT_RAW_STATUS_8:
+ case ARIZONA_IRQ_PIN_STATUS:
+ case ARIZONA_AOD_WKUP_AND_TRIG:
+ case ARIZONA_AOD_IRQ1:
+ case ARIZONA_AOD_IRQ2:
+ case ARIZONA_AOD_IRQ_RAW_STATUS:
+ case ARIZONA_FX_CTRL2:
+ case ARIZONA_ASRC_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+#define WM8998_MAX_REGISTER 0x31ff
+
+const struct regmap_config wm8998_i2c_regmap = {
+ .reg_bits = 32,
+ .val_bits = 16,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+
+ .max_register = WM8998_MAX_REGISTER,
+ .readable_reg = wm8998_readable_register,
+ .volatile_reg = wm8998_volatile_register,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = wm8998_reg_default,
+ .num_reg_defaults = ARRAY_SIZE(wm8998_reg_default),
+};
+EXPORT_SYMBOL_GPL(wm8998_i2c_regmap);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 42c38525904b..ccccc2943f2f 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -271,6 +271,16 @@ config HP_ILO
To compile this driver as a module, choose M here: the
module will be called hpilo.
+config QCOM_COINCELL
+ tristate "Qualcomm coincell charger support"
+ depends on MFD_SPMI_PMIC || COMPILE_TEST
+ help
+ This driver supports the coincell block found inside of
+ Qualcomm PMICs. The coincell charger provides a means to
+ charge a coincell battery or backup capacitor which is used
+ to maintain PMIC register and RTC state in the absence of
+ external power.
+
config SGI_GRU
tristate "SGI GRU driver"
depends on X86_UV && SMP
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index d056fb7186fe..537d7f3b78da 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_LKDTM) += lkdtm.o
obj-$(CONFIG_TIFM_CORE) += tifm_core.o
obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
obj-$(CONFIG_PHANTOM) += phantom.o
+obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o
obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o
obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index 705b881e186d..d11187d36ddd 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -106,7 +106,6 @@ MODULE_DEVICE_TABLE(i2c, ad_dpot_id);
static struct i2c_driver ad_dpot_i2c_driver = {
.driver = {
.name = "ad_dpot",
- .owner = THIS_MODULE,
},
.probe = ad_dpot_i2c_probe,
.remove = ad_dpot_i2c_remove,
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index 3739ffa9cdf1..a3e789b85cc8 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -1275,7 +1275,6 @@ static const struct dev_pm_ops apds990x_pm_ops = {
static struct i2c_driver apds990x_driver = {
.driver = {
.name = "apds990x",
- .owner = THIS_MODULE,
.pm = &apds990x_pm_ops,
},
.probe = apds990x_probe,
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index b756381b8250..753d7ecdadaa 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -1396,7 +1396,6 @@ static const struct dev_pm_ops bh1770_pm_ops = {
static struct i2c_driver bh1770_driver = {
.driver = {
.name = "bh1770glc",
- .owner = THIS_MODULE,
.pm = &bh1770_pm_ops,
},
.probe = bh1770_probe,
diff --git a/drivers/misc/bmp085-i2c.c b/drivers/misc/bmp085-i2c.c
index a7c16295b816..f35c218aaa1a 100644
--- a/drivers/misc/bmp085-i2c.c
+++ b/drivers/misc/bmp085-i2c.c
@@ -66,7 +66,6 @@ MODULE_DEVICE_TABLE(i2c, bmp085_id);
static struct i2c_driver bmp085_i2c_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = BMP085_NAME,
},
.id_table = bmp085_id,
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
index b6db9ebd52c2..8756d06e2bb8 100644
--- a/drivers/misc/cxl/Kconfig
+++ b/drivers/misc/cxl/Kconfig
@@ -11,11 +11,16 @@ config CXL_KERNEL_API
bool
default n
+config CXL_EEH
+ bool
+ default n
+
config CXL
tristate "Support for IBM Coherent Accelerators (CXL)"
- depends on PPC_POWERNV && PCI_MSI
+ depends on PPC_POWERNV && PCI_MSI && EEH
select CXL_BASE
select CXL_KERNEL_API
+ select CXL_EEH
default m
help
Select this option to enable driver support for IBM Coherent
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
index 14e3f8219a11..6f484dfe78f9 100644
--- a/drivers/misc/cxl/Makefile
+++ b/drivers/misc/cxl/Makefile
@@ -1,3 +1,5 @@
+ccflags-y := -Werror
+
cxl-y += main.o file.o irq.o fault.o native.o
cxl-y += context.o sysfs.o debugfs.o pci.o trace.o
cxl-y += vphb.o api.o
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 0c77240ae2fc..8af12c884b04 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -12,30 +12,57 @@
#include <linux/anon_inodes.h>
#include <linux/file.h>
#include <misc/cxl.h>
+#include <linux/fs.h>
#include "cxl.h"
struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
{
+ struct address_space *mapping;
struct cxl_afu *afu;
struct cxl_context *ctx;
int rc;
afu = cxl_pci_to_afu(dev);
+ get_device(&afu->dev);
ctx = cxl_context_alloc();
- if (IS_ERR(ctx))
- return ctx;
+ if (IS_ERR(ctx)) {
+ rc = PTR_ERR(ctx);
+ goto err_dev;
+ }
- /* Make it a slave context. We can promote it later? */
- rc = cxl_context_init(ctx, afu, false, NULL);
- if (rc) {
- kfree(ctx);
- return ERR_PTR(-ENOMEM);
+ ctx->kernelapi = true;
+
+ /*
+ * Make our own address space since we won't have one from the
+ * filesystem like the user api has, and even if we do associate a file
+ * with this context we don't want to use the global anonymous inode's
+ * address space as that can invalidate unrelated users:
+ */
+ mapping = kmalloc(sizeof(struct address_space), GFP_KERNEL);
+ if (!mapping) {
+ rc = -ENOMEM;
+ goto err_ctx;
}
+ address_space_init_once(mapping);
+
+ /* Make it a slave context. We can promote it later? */
+ rc = cxl_context_init(ctx, afu, false, mapping);
+ if (rc)
+ goto err_mapping;
+
cxl_assign_psn_space(ctx);
return ctx;
+
+err_mapping:
+ kfree(mapping);
+err_ctx:
+ kfree(ctx);
+err_dev:
+ put_device(&afu->dev);
+ return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(cxl_dev_context_init);
@@ -57,9 +84,11 @@ EXPORT_SYMBOL_GPL(cxl_get_phys_dev);
int cxl_release_context(struct cxl_context *ctx)
{
- if (ctx->status != CLOSED)
+ if (ctx->status >= STARTED)
return -EBUSY;
+ put_device(&ctx->afu->dev);
+
cxl_context_free(ctx);
return 0;
@@ -159,7 +188,6 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
}
ctx->status = STARTED;
- get_device(&ctx->afu->dev);
out:
mutex_unlock(&ctx->status_mutex);
return rc;
@@ -175,12 +203,7 @@ EXPORT_SYMBOL_GPL(cxl_process_element);
/* Stop a context. Returns 0 on success, otherwise -Errno */
int cxl_stop_context(struct cxl_context *ctx)
{
- int rc;
-
- rc = __detach_context(ctx);
- if (!rc)
- put_device(&ctx->afu->dev);
- return rc;
+ return __detach_context(ctx);
}
EXPORT_SYMBOL_GPL(cxl_stop_context);
@@ -257,9 +280,16 @@ struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
file = anon_inode_getfile("cxl", fops, ctx, flags);
if (IS_ERR(file))
- put_unused_fd(fdtmp);
+ goto err_fd;
+
+ file->f_mapping = ctx->mapping;
+
*fd = fdtmp;
return file;
+
+err_fd:
+ put_unused_fd(fdtmp);
+ return NULL;
}
EXPORT_SYMBOL_GPL(cxl_get_fd);
@@ -329,3 +359,10 @@ int cxl_afu_reset(struct cxl_context *ctx)
return cxl_afu_check_and_enable(afu);
}
EXPORT_SYMBOL_GPL(cxl_afu_reset);
+
+void cxl_perst_reloads_same_image(struct cxl_afu *afu,
+ bool perst_reloads_same_image)
+{
+ afu->adapter->perst_same_image = perst_reloads_same_image;
+}
+EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 2a4c80ac322a..e762f85ee233 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -113,11 +113,11 @@ static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
area = ctx->afu->psn_phys;
- if (offset > ctx->afu->adapter->ps_size)
+ if (offset >= ctx->afu->adapter->ps_size)
return VM_FAULT_SIGBUS;
} else {
area = ctx->psn_phys;
- if (offset > ctx->psn_size)
+ if (offset >= ctx->psn_size)
return VM_FAULT_SIGBUS;
}
@@ -126,6 +126,18 @@ static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ctx->status != STARTED) {
mutex_unlock(&ctx->status_mutex);
pr_devel("%s: Context not started, failing problem state access\n", __func__);
+ if (ctx->mmio_err_ff) {
+ if (!ctx->ff_page) {
+ ctx->ff_page = alloc_page(GFP_USER);
+ if (!ctx->ff_page)
+ return VM_FAULT_OOM;
+ memset(page_address(ctx->ff_page), 0xff, PAGE_SIZE);
+ }
+ get_page(ctx->ff_page);
+ vmf->page = ctx->ff_page;
+ vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
+ return 0;
+ }
return VM_FAULT_SIGBUS;
}
@@ -145,8 +157,16 @@ static const struct vm_operations_struct cxl_mmap_vmops = {
*/
int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
{
+ u64 start = vma->vm_pgoff << PAGE_SHIFT;
u64 len = vma->vm_end - vma->vm_start;
- len = min(len, ctx->psn_size);
+
+ if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
+ if (start + len > ctx->afu->adapter->ps_size)
+ return -EINVAL;
+ } else {
+ if (start + len > ctx->psn_size)
+ return -EINVAL;
+ }
if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
/* make sure there is a valid per process space for this AFU */
@@ -185,7 +205,11 @@ int __detach_context(struct cxl_context *ctx)
if (status != STARTED)
return -EBUSY;
- WARN_ON(cxl_detach_process(ctx));
+ /* Only warn if we detached while the link was OK.
+ * If detach fails when hw is down, we don't care.
+ */
+ WARN_ON(cxl_detach_process(ctx) &&
+ cxl_adapter_link_ok(ctx->afu->adapter));
flush_work(&ctx->fault_work); /* Only needed for dedicated process */
put_pid(ctx->pid);
cxl_ctx_put();
@@ -245,7 +269,11 @@ static void reclaim_ctx(struct rcu_head *rcu)
struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
free_page((u64)ctx->sstp);
+ if (ctx->ff_page)
+ __free_page(ctx->ff_page);
ctx->sstp = NULL;
+ if (ctx->kernelapi)
+ kfree(ctx->mapping);
kfree(ctx);
}
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 4fd66cabde1e..1c30ef77073d 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -34,7 +34,7 @@ extern uint cxl_verbose;
* Bump version each time a user API change is made, whether it is
* backwards compatible ot not.
*/
-#define CXL_API_VERSION 1
+#define CXL_API_VERSION 2
#define CXL_API_VERSION_COMPATIBLE 1
/*
@@ -83,8 +83,10 @@ static const cxl_p1_reg_t CXL_PSL_AFUSEL = {0x00B0};
/* 0x00C0:7EFF Implementation dependent area */
static const cxl_p1_reg_t CXL_PSL_FIR1 = {0x0100};
static const cxl_p1_reg_t CXL_PSL_FIR2 = {0x0108};
+static const cxl_p1_reg_t CXL_PSL_Timebase = {0x0110};
static const cxl_p1_reg_t CXL_PSL_VERSION = {0x0118};
static const cxl_p1_reg_t CXL_PSL_RESLCKTO = {0x0128};
+static const cxl_p1_reg_t CXL_PSL_TB_CTLSTAT = {0x0140};
static const cxl_p1_reg_t CXL_PSL_FIR_CNTL = {0x0148};
static const cxl_p1_reg_t CXL_PSL_DSNDCTL = {0x0150};
static const cxl_p1_reg_t CXL_PSL_SNWRALLOC = {0x0158};
@@ -152,6 +154,9 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
#define CXL_PSL_SPAP_Size_Shift 4
#define CXL_PSL_SPAP_V 0x0000000000000001ULL
+/****** CXL_PSL_Control ****************************************************/
+#define CXL_PSL_Control_tb 0x0000000000000001ULL
+
/****** CXL_PSL_DLCNTL *****************************************************/
#define CXL_PSL_DLCNTL_D (0x1ull << (63-28))
#define CXL_PSL_DLCNTL_C (0x1ull << (63-29))
@@ -418,6 +423,9 @@ struct cxl_context {
/* Used to unmap any mmaps when force detaching */
struct address_space *mapping;
struct mutex mapping_lock;
+ struct page *ff_page;
+ bool mmio_err_ff;
+ bool kernelapi;
spinlock_t sste_lock; /* Protects segment table entries */
struct cxl_sste *sstp;
@@ -493,6 +501,7 @@ struct cxl {
bool user_image_loaded;
bool perst_loads_image;
bool perst_select_user;
+ bool perst_same_image;
};
int cxl_alloc_one_irq(struct cxl *adapter);
@@ -531,16 +540,33 @@ struct cxl_process_element {
__be32 software_state;
} __packed;
+static inline bool cxl_adapter_link_ok(struct cxl *cxl)
+{
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(cxl->dev.parent);
+ return !pci_channel_offline(pdev);
+}
+
static inline void __iomem *_cxl_p1_addr(struct cxl *cxl, cxl_p1_reg_t reg)
{
WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE));
return cxl->p1_mmio + cxl_reg_off(reg);
}
-#define cxl_p1_write(cxl, reg, val) \
- out_be64(_cxl_p1_addr(cxl, reg), val)
-#define cxl_p1_read(cxl, reg) \
- in_be64(_cxl_p1_addr(cxl, reg))
+static inline void cxl_p1_write(struct cxl *cxl, cxl_p1_reg_t reg, u64 val)
+{
+ if (likely(cxl_adapter_link_ok(cxl)))
+ out_be64(_cxl_p1_addr(cxl, reg), val);
+}
+
+static inline u64 cxl_p1_read(struct cxl *cxl, cxl_p1_reg_t reg)
+{
+ if (likely(cxl_adapter_link_ok(cxl)))
+ return in_be64(_cxl_p1_addr(cxl, reg));
+ else
+ return ~0ULL;
+}
static inline void __iomem *_cxl_p1n_addr(struct cxl_afu *afu, cxl_p1n_reg_t reg)
{
@@ -548,26 +574,56 @@ static inline void __iomem *_cxl_p1n_addr(struct cxl_afu *afu, cxl_p1n_reg_t reg
return afu->p1n_mmio + cxl_reg_off(reg);
}
-#define cxl_p1n_write(afu, reg, val) \
- out_be64(_cxl_p1n_addr(afu, reg), val)
-#define cxl_p1n_read(afu, reg) \
- in_be64(_cxl_p1n_addr(afu, reg))
+static inline void cxl_p1n_write(struct cxl_afu *afu, cxl_p1n_reg_t reg, u64 val)
+{
+ if (likely(cxl_adapter_link_ok(afu->adapter)))
+ out_be64(_cxl_p1n_addr(afu, reg), val);
+}
+
+static inline u64 cxl_p1n_read(struct cxl_afu *afu, cxl_p1n_reg_t reg)
+{
+ if (likely(cxl_adapter_link_ok(afu->adapter)))
+ return in_be64(_cxl_p1n_addr(afu, reg));
+ else
+ return ~0ULL;
+}
static inline void __iomem *_cxl_p2n_addr(struct cxl_afu *afu, cxl_p2n_reg_t reg)
{
return afu->p2n_mmio + cxl_reg_off(reg);
}
-#define cxl_p2n_write(afu, reg, val) \
- out_be64(_cxl_p2n_addr(afu, reg), val)
-#define cxl_p2n_read(afu, reg) \
- in_be64(_cxl_p2n_addr(afu, reg))
+static inline void cxl_p2n_write(struct cxl_afu *afu, cxl_p2n_reg_t reg, u64 val)
+{
+ if (likely(cxl_adapter_link_ok(afu->adapter)))
+ out_be64(_cxl_p2n_addr(afu, reg), val);
+}
+
+static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg)
+{
+ if (likely(cxl_adapter_link_ok(afu->adapter)))
+ return in_be64(_cxl_p2n_addr(afu, reg));
+ else
+ return ~0ULL;
+}
+static inline u64 cxl_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off)
+{
+ if (likely(cxl_adapter_link_ok(afu->adapter)))
+ return in_le64((afu)->afu_desc_mmio + (afu)->crs_offset +
+ ((cr) * (afu)->crs_len) + (off));
+ else
+ return ~0ULL;
+}
-#define cxl_afu_cr_read64(afu, cr, off) \
- in_le64((afu)->afu_desc_mmio + (afu)->crs_offset + ((cr) * (afu)->crs_len) + (off))
-#define cxl_afu_cr_read32(afu, cr, off) \
- in_le32((afu)->afu_desc_mmio + (afu)->crs_offset + ((cr) * (afu)->crs_len) + (off))
+static inline u32 cxl_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off)
+{
+ if (likely(cxl_adapter_link_ok(afu->adapter)))
+ return in_le32((afu)->afu_desc_mmio + (afu)->crs_offset +
+ ((cr) * (afu)->crs_len) + (off));
+ else
+ return 0xffffffff;
+}
u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off);
u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off);
@@ -585,6 +641,9 @@ void unregister_cxl_calls(struct cxl_calls *calls);
int cxl_alloc_adapter_nr(struct cxl *adapter);
void cxl_remove_adapter_nr(struct cxl *adapter);
+int cxl_alloc_spa(struct cxl_afu *afu);
+void cxl_release_spa(struct cxl_afu *afu);
+
int cxl_file_init(void);
void cxl_file_exit(void);
int cxl_register_adapter(struct cxl *adapter);
@@ -675,6 +734,7 @@ int cxl_psl_purge(struct cxl_afu *afu);
void cxl_stop_trace(struct cxl *cxl);
int cxl_pci_vphb_add(struct cxl_afu *afu);
+void cxl_pci_vphb_reconfigure(struct cxl_afu *afu);
void cxl_pci_vphb_remove(struct cxl_afu *afu);
extern struct pci_driver cxl_pci_driver;
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c
index 825c412580bc..18df6f44af2a 100644
--- a/drivers/misc/cxl/debugfs.c
+++ b/drivers/misc/cxl/debugfs.c
@@ -48,7 +48,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set, "0x
static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode,
struct dentry *parent, u64 __iomem *value)
{
- return debugfs_create_file(name, mode, parent, (void *)value, &fops_io_x64);
+ return debugfs_create_file(name, mode, parent, (void __force *)value, &fops_io_x64);
}
int cxl_debugfs_adapter_add(struct cxl *adapter)
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index e3f4b69527a9..a30bf285b5bd 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -73,6 +73,11 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
if (!afu->current_mode)
goto err_put_afu;
+ if (!cxl_adapter_link_ok(adapter)) {
+ rc = -EIO;
+ goto err_put_afu;
+ }
+
if (!(ctx = cxl_context_alloc())) {
rc = -ENOMEM;
goto err_put_afu;
@@ -179,6 +184,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
if (work.flags & CXL_START_WORK_AMR)
amr = work.amr & mfspr(SPRN_UAMOR);
+ ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
+
/*
* We grab the PID here and not in the file open to allow for the case
* where a process (master, some daemon, etc) has opened the chardev on
@@ -238,6 +245,9 @@ long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (ctx->status == CLOSED)
return -EIO;
+ if (!cxl_adapter_link_ok(ctx->afu->adapter))
+ return -EIO;
+
pr_devel("afu_ioctl\n");
switch (cmd) {
case CXL_IOCTL_START_WORK:
@@ -251,7 +261,7 @@ long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return -EINVAL;
}
-long afu_compat_ioctl(struct file *file, unsigned int cmd,
+static long afu_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return afu_ioctl(file, cmd, arg);
@@ -265,6 +275,9 @@ int afu_mmap(struct file *file, struct vm_area_struct *vm)
if (ctx->status != STARTED)
return -EIO;
+ if (!cxl_adapter_link_ok(ctx->afu->adapter))
+ return -EIO;
+
return cxl_context_iomap(ctx, vm);
}
@@ -309,6 +322,9 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count,
int rc;
DEFINE_WAIT(wait);
+ if (!cxl_adapter_link_ok(ctx->afu->adapter))
+ return -EIO;
+
if (count < CXL_READ_MIN_SIZE)
return -EINVAL;
@@ -319,6 +335,11 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count,
if (ctx_event_pending(ctx))
break;
+ if (!cxl_adapter_link_ok(ctx->afu->adapter)) {
+ rc = -EIO;
+ goto out;
+ }
+
if (file->f_flags & O_NONBLOCK) {
rc = -EAGAIN;
goto out;
@@ -396,7 +417,7 @@ const struct file_operations afu_fops = {
.mmap = afu_mmap,
};
-const struct file_operations afu_master_fops = {
+static const struct file_operations afu_master_fops = {
.owner = THIS_MODULE,
.open = afu_master_open,
.poll = afu_poll,
@@ -519,7 +540,7 @@ int __init cxl_file_init(void)
* If these change we really need to update API. Either change some
* flags or update API version number CXL_API_VERSION.
*/
- BUILD_BUG_ON(CXL_API_VERSION != 1);
+ BUILD_BUG_ON(CXL_API_VERSION != 2);
BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64);
BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8);
BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8);
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index 680cd263436d..583b42afeda2 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -30,12 +30,12 @@ static irqreturn_t handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, u6
serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
- dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
- dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%.16llx\n", fir1);
- dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%.16llx\n", fir2);
- dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
- dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice);
- dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug);
+ dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
+ dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
+ dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
+ dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
+ dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
+ dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
cxl_stop_trace(ctx->afu->adapter);
@@ -54,10 +54,10 @@ irqreturn_t cxl_slice_irq_err(int irq, void *data)
fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
- dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
- dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice);
- dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%.16llx\n", errstat);
- dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug);
+ dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
+ dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
+ dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
+ dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
@@ -72,7 +72,7 @@ static irqreturn_t cxl_irq_err(int irq, void *data)
WARN(1, "CXL ERROR interrupt %i\n", irq);
err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
- dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%.16llx\n", err_ivte);
+ dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
cxl_stop_trace(adapter);
@@ -80,7 +80,7 @@ static irqreturn_t cxl_irq_err(int irq, void *data)
fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
- dev_crit(&adapter->dev, "PSL_FIR1: 0x%.16llx\nPSL_FIR2: 0x%.16llx\n", fir1, fir2);
+ dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
return IRQ_HANDLED;
}
@@ -147,7 +147,7 @@ static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info)
if (dsisr & CXL_PSL_DSISR_An_PE)
return handle_psl_slice_error(ctx, dsisr, irq_info->errstat);
if (dsisr & CXL_PSL_DSISR_An_AE) {
- pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info->afu_err);
+ pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
if (ctx->pending_afu_err) {
/*
@@ -158,7 +158,7 @@ static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info)
* probably best that we log them somewhere:
*/
dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error "
- "undelivered to pe %i: %.llx\n",
+ "undelivered to pe %i: 0x%016llx\n",
ctx->pe, irq_info->afu_err);
} else {
spin_lock(&ctx->lock);
@@ -211,8 +211,8 @@ static irqreturn_t cxl_irq_multiplexed(int irq, void *data)
}
rcu_read_unlock();
- WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %.16llx DAR"
- " %.16llx\n(Possible AFU HW issue - was a term/remove acked"
+ WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
+ " %016llx\n(Possible AFU HW issue - was a term/remove acked"
" with outstanding transactions?)\n", ph, irq_info.dsisr,
irq_info.dar);
return fail_psl_irq(afu, &irq_info);
@@ -341,6 +341,9 @@ int cxl_register_psl_err_irq(struct cxl *adapter)
void cxl_release_psl_err_irq(struct cxl *adapter)
{
+ if (adapter->err_virq != irq_find_mapping(NULL, adapter->err_hwirq))
+ return;
+
cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
cxl_unmap_irq(adapter->err_virq, adapter);
cxl_release_one_irq(adapter, adapter->err_hwirq);
@@ -374,6 +377,9 @@ int cxl_register_serr_irq(struct cxl_afu *afu)
void cxl_release_serr_irq(struct cxl_afu *afu)
{
+ if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
+ return;
+
cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
cxl_unmap_irq(afu->serr_virq, afu);
cxl_release_one_irq(afu->adapter, afu->serr_hwirq);
@@ -400,12 +406,15 @@ int cxl_register_psl_irq(struct cxl_afu *afu)
void cxl_release_psl_irq(struct cxl_afu *afu)
{
+ if (afu->psl_virq != irq_find_mapping(NULL, afu->psl_hwirq))
+ return;
+
cxl_unmap_irq(afu->psl_virq, afu);
cxl_release_one_irq(afu->adapter, afu->psl_hwirq);
kfree(afu->psl_irq_name);
}
-void afu_irq_name_free(struct cxl_context *ctx)
+static void afu_irq_name_free(struct cxl_context *ctx)
{
struct cxl_irq_name *irq_name, *tmp;
@@ -421,6 +430,9 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
int rc, r, i, j = 1;
struct cxl_irq_name *irq_name;
+ /* Initialize the list head to hold irq names */
+ INIT_LIST_HEAD(&ctx->irq_names);
+
if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count)))
return rc;
@@ -432,13 +444,12 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count),
sizeof(*ctx->irq_bitmap), GFP_KERNEL);
if (!ctx->irq_bitmap)
- return -ENOMEM;
+ goto out;
/*
* Allocate names first. If any fail, bail out before allocating
* actual hardware IRQs.
*/
- INIT_LIST_HEAD(&ctx->irq_names);
for (r = 1; r < CXL_IRQ_RANGES; r++) {
for (i = 0; i < ctx->irqs.range[r]; i++) {
irq_name = kmalloc(sizeof(struct cxl_irq_name),
@@ -460,11 +471,12 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
return 0;
out:
+ cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
afu_irq_name_free(ctx);
return -ENOMEM;
}
-void afu_register_hwirqs(struct cxl_context *ctx)
+static void afu_register_hwirqs(struct cxl_context *ctx)
{
irq_hw_number_t hwirq;
struct cxl_irq_name *irq_name;
@@ -511,4 +523,8 @@ void afu_release_irqs(struct cxl_context *ctx, void *cookie)
afu_irq_name_free(ctx);
cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
+
+ kfree(ctx->irq_bitmap);
+ ctx->irq_bitmap = NULL;
+ ctx->irq_count = 0;
}
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index 833348e2c9cb..9fde75ed4fac 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -73,7 +73,7 @@ static inline void cxl_slbia_core(struct mm_struct *mm)
spin_lock(&adapter->afu_list_lock);
for (slice = 0; slice < adapter->slices; slice++) {
afu = adapter->afu[slice];
- if (!afu->enabled)
+ if (!afu || !afu->enabled)
continue;
rcu_read_lock();
idr_for_each_entry(&afu->contexts_idr, ctx, id)
@@ -222,6 +222,7 @@ static void exit_cxl(void)
cxl_debugfs_exit();
cxl_file_exit();
unregister_cxl_calls(&cxl_calls);
+ idr_destroy(&cxl_adapter_idr);
}
module_init(init_cxl);
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 10567f245818..b37f2e8004f5 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -41,7 +41,14 @@ static int afu_control(struct cxl_afu *afu, u64 command,
rc = -EBUSY;
goto out;
}
- pr_devel_ratelimited("AFU control... (0x%.16llx)\n",
+
+ if (!cxl_adapter_link_ok(afu->adapter)) {
+ afu->enabled = enabled;
+ rc = -EIO;
+ goto out;
+ }
+
+ pr_devel_ratelimited("AFU control... (0x%016llx)\n",
AFU_Cntl | command);
cpu_relax();
AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
@@ -85,6 +92,10 @@ int __cxl_afu_reset(struct cxl_afu *afu)
int cxl_afu_check_and_enable(struct cxl_afu *afu)
{
+ if (!cxl_adapter_link_ok(afu->adapter)) {
+ WARN(1, "Refusing to enable afu while link down!\n");
+ return -EIO;
+ }
if (afu->enabled)
return 0;
return afu_enable(afu);
@@ -103,6 +114,12 @@ int cxl_psl_purge(struct cxl_afu *afu)
pr_devel("PSL purge request\n");
+ if (!cxl_adapter_link_ok(afu->adapter)) {
+ dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
+ rc = -EIO;
+ goto out;
+ }
+
if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
WARN(1, "psl_purge request while AFU not disabled!\n");
cxl_afu_disable(afu);
@@ -119,14 +136,19 @@ int cxl_psl_purge(struct cxl_afu *afu)
rc = -EBUSY;
goto out;
}
+ if (!cxl_adapter_link_ok(afu->adapter)) {
+ rc = -EIO;
+ goto out;
+ }
+
dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
- pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%.16llx PSL_DSISR: 0x%.16llx\n", PSL_CNTL, dsisr);
+ pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n", PSL_CNTL, dsisr);
if (dsisr & CXL_PSL_DSISR_TRANS) {
dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
- dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%.16llx, DAR: 0x%.16llx\n", dsisr, dar);
+ dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n", dsisr, dar);
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
} else if (dsisr) {
- dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%.16llx\n", dsisr);
+ dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n", dsisr);
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
} else {
cpu_relax();
@@ -161,10 +183,8 @@ static int spa_max_procs(int spa_size)
return ((spa_size / 8) - 96) / 17;
}
-static int alloc_spa(struct cxl_afu *afu)
+int cxl_alloc_spa(struct cxl_afu *afu)
{
- u64 spap;
-
/* Work out how many pages to allocate */
afu->spa_order = 0;
do {
@@ -183,6 +203,13 @@ static int alloc_spa(struct cxl_afu *afu)
pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
1<<afu->spa_order, afu->spa_max_procs, afu->num_procs);
+ return 0;
+}
+
+static void attach_spa(struct cxl_afu *afu)
+{
+ u64 spap;
+
afu->sw_command_status = (__be64 *)((char *)afu->spa +
((afu->spa_max_procs + 3) * 128));
@@ -191,14 +218,19 @@ static int alloc_spa(struct cxl_afu *afu)
spap |= CXL_PSL_SPAP_V;
pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", afu->spa, afu->spa_max_procs, afu->sw_command_status, spap);
cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
-
- return 0;
}
-static void release_spa(struct cxl_afu *afu)
+static inline void detach_spa(struct cxl_afu *afu)
{
cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);
- free_pages((unsigned long) afu->spa, afu->spa_order);
+}
+
+void cxl_release_spa(struct cxl_afu *afu)
+{
+ if (afu->spa) {
+ free_pages((unsigned long) afu->spa, afu->spa_order);
+ afu->spa = NULL;
+ }
}
int cxl_tlb_slb_invalidate(struct cxl *adapter)
@@ -215,6 +247,8 @@ int cxl_tlb_slb_invalidate(struct cxl *adapter)
dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
return -EBUSY;
}
+ if (!cxl_adapter_link_ok(adapter))
+ return -EIO;
cpu_relax();
}
@@ -224,6 +258,8 @@ int cxl_tlb_slb_invalidate(struct cxl *adapter)
dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
return -EBUSY;
}
+ if (!cxl_adapter_link_ok(adapter))
+ return -EIO;
cpu_relax();
}
return 0;
@@ -240,6 +276,11 @@ int cxl_afu_slbia(struct cxl_afu *afu)
dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n");
return -EBUSY;
}
+ /* If the adapter has gone down, we can assume that we
+ * will PERST it and that will invalidate everything.
+ */
+ if (!cxl_adapter_link_ok(afu->adapter))
+ return -EIO;
cpu_relax();
}
return 0;
@@ -279,6 +320,8 @@ static void slb_invalid(struct cxl_context *ctx)
cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
while (1) {
+ if (!cxl_adapter_link_ok(adapter))
+ break;
slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
if (!(slbia & CXL_TLB_SLB_P))
break;
@@ -308,6 +351,11 @@ static int do_process_element_cmd(struct cxl_context *ctx,
rc = -EBUSY;
goto out;
}
+ if (!cxl_adapter_link_ok(ctx->afu->adapter)) {
+ dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
+ rc = -EIO;
+ goto out;
+ }
state = be64_to_cpup(ctx->afu->sw_command_status);
if (state == ~0ULL) {
pr_err("cxl: Error adding process element to AFU\n");
@@ -355,8 +403,13 @@ static int terminate_process_element(struct cxl_context *ctx)
mutex_lock(&ctx->afu->spa_mutex);
pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
- rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
- CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
+ /* We could be asked to terminate when the hw is down. That
+ * should always succeed: it's not running if the hw has gone
+ * away and is being reset.
+ */
+ if (cxl_adapter_link_ok(ctx->afu->adapter))
+ rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
+ CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
ctx->elem->software_state = 0; /* Remove Valid bit */
pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
mutex_unlock(&ctx->afu->spa_mutex);
@@ -369,7 +422,14 @@ static int remove_process_element(struct cxl_context *ctx)
mutex_lock(&ctx->afu->spa_mutex);
pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
- if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0)))
+
+ /* We could be asked to remove when the hw is down. Again, if
+ * the hw is down, the PE is gone, so we succeed.
+ */
+ if (cxl_adapter_link_ok(ctx->afu->adapter))
+ rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
+
+ if (!rc)
ctx->pe_inserted = false;
slb_invalid(ctx);
pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
@@ -397,8 +457,11 @@ static int activate_afu_directed(struct cxl_afu *afu)
dev_info(&afu->dev, "Activating AFU directed mode\n");
- if (alloc_spa(afu))
- return -ENOMEM;
+ if (afu->spa == NULL) {
+ if (cxl_alloc_spa(afu))
+ return -ENOMEM;
+ }
+ attach_spa(afu);
cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
@@ -492,9 +555,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
if ((result = cxl_afu_check_and_enable(ctx->afu)))
return result;
- add_process_element(ctx);
-
- return 0;
+ return add_process_element(ctx);
}
static int deactivate_afu_directed(struct cxl_afu *afu)
@@ -511,8 +572,6 @@ static int deactivate_afu_directed(struct cxl_afu *afu)
cxl_afu_disable(afu);
cxl_psl_purge(afu);
- release_spa(afu);
-
return 0;
}
@@ -614,6 +673,11 @@ int cxl_afu_activate_mode(struct cxl_afu *afu, int mode)
if (!(mode & afu->modes_supported))
return -EINVAL;
+ if (!cxl_adapter_link_ok(afu->adapter)) {
+ WARN(1, "Device link is down, refusing to activate!\n");
+ return -EIO;
+ }
+
if (mode == CXL_MODE_DIRECTED)
return activate_afu_directed(afu);
if (mode == CXL_MODE_DEDICATED)
@@ -624,6 +688,11 @@ int cxl_afu_activate_mode(struct cxl_afu *afu, int mode)
int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
{
+ if (!cxl_adapter_link_ok(ctx->afu->adapter)) {
+ WARN(1, "Device link is down, refusing to attach process!\n");
+ return -EIO;
+ }
+
ctx->kernel = kernel;
if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
return attach_afu_directed(ctx, wed, amr);
@@ -668,6 +737,12 @@ int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info)
{
u64 pidtid;
+ /* If the adapter has gone away, we can't get any meaningful
+ * information.
+ */
+ if (!cxl_adapter_link_ok(afu->adapter))
+ return -EIO;
+
info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
@@ -684,7 +759,7 @@ static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
{
u64 dsisr;
- pr_devel("RECOVERING FROM PSL ERROR... (0x%.16llx)\n", errstat);
+ pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat);
/* Clear PSL_DSISR[PE] */
dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index c68ef5806dbe..02c85160bfe9 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -24,6 +24,7 @@
#include <asm/io.h>
#include "cxl.h"
+#include <misc/cxl.h>
#define CXL_PCI_VSEC_ID 0x1280
@@ -133,7 +134,7 @@ u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off)
return (val >> ((off & 0x3) * 8)) & 0xff;
}
-static DEFINE_PCI_DEVICE_TABLE(cxl_pci_tbl) = {
+static const struct pci_device_id cxl_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
@@ -369,6 +370,55 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev
return 0;
}
+#define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
+#define _2048_250MHZ_CYCLES 1
+
+static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
+{
+ u64 psl_tb;
+ int delta;
+ unsigned int retry = 0;
+ struct device_node *np;
+
+ if (!(np = pnv_pci_get_phb_node(dev)))
+ return -ENODEV;
+
+ /* Do not fail when CAPP timebase sync is not supported by OPAL */
+ of_node_get(np);
+ if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) {
+ of_node_put(np);
+ pr_err("PSL: Timebase sync: OPAL support missing\n");
+ return 0;
+ }
+ of_node_put(np);
+
+ /*
+ * Setup PSL Timebase Control and Status register
+ * with the recommended Timebase Sync Count value
+ */
+ cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT,
+ TBSYNC_CNT(2 * _2048_250MHZ_CYCLES));
+
+ /* Enable PSL Timebase */
+ cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000);
+ cxl_p1_write(adapter, CXL_PSL_Control, CXL_PSL_Control_tb);
+
+ /* Wait until CORE TB and PSL TB difference <= 16usecs */
+ do {
+ msleep(1);
+ if (retry++ > 5) {
+ pr_err("PSL: Timebase sync: giving up!\n");
+ return -EIO;
+ }
+ psl_tb = cxl_p1_read(adapter, CXL_PSL_Timebase);
+ delta = mftb() - psl_tb;
+ if (delta < 0)
+ delta = -delta;
+ } while (cputime_to_usecs(delta) > 16);
+
+ return 0;
+}
+
static int init_implementation_afu_regs(struct cxl_afu *afu)
{
/* read/write masks for this slice */
@@ -539,10 +589,18 @@ err:
static void cxl_unmap_slice_regs(struct cxl_afu *afu)
{
- if (afu->p1n_mmio)
+ if (afu->p2n_mmio) {
iounmap(afu->p2n_mmio);
- if (afu->p1n_mmio)
+ afu->p2n_mmio = NULL;
+ }
+ if (afu->p1n_mmio) {
iounmap(afu->p1n_mmio);
+ afu->p1n_mmio = NULL;
+ }
+ if (afu->afu_desc_mmio) {
+ iounmap(afu->afu_desc_mmio);
+ afu->afu_desc_mmio = NULL;
+ }
}
static void cxl_release_afu(struct device *dev)
@@ -551,6 +609,9 @@ static void cxl_release_afu(struct device *dev)
pr_devel("cxl_release_afu\n");
+ idr_destroy(&afu->contexts_idr);
+ cxl_release_spa(afu);
+
kfree(afu);
}
@@ -656,7 +717,7 @@ static int sanitise_afu_regs(struct cxl_afu *afu)
*/
reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
- dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#.16llx\n", reg);
+ dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
if (__cxl_afu_reset(afu))
return -EIO;
if (cxl_afu_disable(afu))
@@ -677,7 +738,7 @@ static int sanitise_afu_regs(struct cxl_afu *afu)
cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000);
reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
if (reg) {
- dev_warn(&afu->dev, "AFU had pending DSISR: %#.16llx\n", reg);
+ dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
if (reg & CXL_PSL_DSISR_TRANS)
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
else
@@ -686,12 +747,12 @@ static int sanitise_afu_regs(struct cxl_afu *afu)
reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
if (reg) {
if (reg & ~0xffff)
- dev_warn(&afu->dev, "AFU had pending SERR: %#.16llx\n", reg);
+ dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
}
reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
if (reg) {
- dev_warn(&afu->dev, "AFU had pending error status: %#.16llx\n", reg);
+ dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
}
@@ -742,45 +803,70 @@ ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
return count;
}
-static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
+static int cxl_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
{
- struct cxl_afu *afu;
- bool free = true;
int rc;
- if (!(afu = cxl_alloc_afu(adapter, slice)))
- return -ENOMEM;
-
- if ((rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice)))
- goto err1;
-
if ((rc = cxl_map_slice_regs(afu, adapter, dev)))
- goto err1;
+ return rc;
if ((rc = sanitise_afu_regs(afu)))
- goto err2;
+ goto err1;
/* We need to reset the AFU before we can read the AFU descriptor */
if ((rc = __cxl_afu_reset(afu)))
- goto err2;
+ goto err1;
if (cxl_verbose)
dump_afu_descriptor(afu);
if ((rc = cxl_read_afu_descriptor(afu)))
- goto err2;
+ goto err1;
if ((rc = cxl_afu_descriptor_looks_ok(afu)))
- goto err2;
+ goto err1;
if ((rc = init_implementation_afu_regs(afu)))
- goto err2;
+ goto err1;
if ((rc = cxl_register_serr_irq(afu)))
- goto err2;
+ goto err1;
if ((rc = cxl_register_psl_irq(afu)))
- goto err3;
+ goto err2;
+
+ return 0;
+
+err2:
+ cxl_release_serr_irq(afu);
+err1:
+ cxl_unmap_slice_regs(afu);
+ return rc;
+}
+
+static void cxl_deconfigure_afu(struct cxl_afu *afu)
+{
+ cxl_release_psl_irq(afu);
+ cxl_release_serr_irq(afu);
+ cxl_unmap_slice_regs(afu);
+}
+
+static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
+{
+ struct cxl_afu *afu;
+ int rc;
+
+ afu = cxl_alloc_afu(adapter, slice);
+ if (!afu)
+ return -ENOMEM;
+
+ rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice);
+ if (rc)
+ goto err_free;
+
+ rc = cxl_configure_afu(afu, adapter, dev);
+ if (rc)
+ goto err_free;
/* Don't care if this fails */
cxl_debugfs_afu_add(afu);
@@ -795,10 +881,6 @@ static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
if ((rc = cxl_sysfs_afu_add(afu)))
goto err_put1;
-
- if ((rc = cxl_afu_select_best_mode(afu)))
- goto err_put2;
-
adapter->afu[afu->slice] = afu;
if ((rc = cxl_pci_vphb_add(afu)))
@@ -806,21 +888,16 @@ static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
return 0;
-err_put2:
- cxl_sysfs_afu_remove(afu);
err_put1:
- device_unregister(&afu->dev);
- free = false;
+ cxl_deconfigure_afu(afu);
cxl_debugfs_afu_remove(afu);
- cxl_release_psl_irq(afu);
-err3:
- cxl_release_serr_irq(afu);
-err2:
- cxl_unmap_slice_regs(afu);
-err1:
- if (free)
- kfree(afu);
+ device_unregister(&afu->dev);
+ return rc;
+
+err_free:
+ kfree(afu);
return rc;
+
}
static void cxl_remove_afu(struct cxl_afu *afu)
@@ -840,10 +917,7 @@ static void cxl_remove_afu(struct cxl_afu *afu)
cxl_context_detach_all(afu);
cxl_afu_deactivate_mode(afu);
- cxl_release_psl_irq(afu);
- cxl_release_serr_irq(afu);
- cxl_unmap_slice_regs(afu);
-
+ cxl_deconfigure_afu(afu);
device_unregister(&afu->dev);
}
@@ -851,16 +925,15 @@ int cxl_reset(struct cxl *adapter)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
int rc;
- int i;
- u32 val;
-
- dev_info(&dev->dev, "CXL reset\n");
- for (i = 0; i < adapter->slices; i++) {
- cxl_pci_vphb_remove(adapter->afu[i]);
- cxl_remove_afu(adapter->afu[i]);
+ if (adapter->perst_same_image) {
+ dev_warn(&dev->dev,
+ "cxl: refusing to reset/reflash when perst_reloads_same_image is set.\n");
+ return -EINVAL;
}
+ dev_info(&dev->dev, "CXL reset\n");
+
/* pcie_warm_reset requests a fundamental pci reset which includes a
* PERST assert/deassert. PERST triggers a loading of the image
* if "user" or "factory" is selected in sysfs */
@@ -869,20 +942,6 @@ int cxl_reset(struct cxl *adapter)
return rc;
}
- /* the PERST done above fences the PHB. So, reset depends on EEH
- * to unbind the driver, tell Sapphire to reinit the PHB, and rebind
- * the driver. Do an mmio read explictly to ensure EEH notices the
- * fenced PHB. Retry for a few seconds before giving up. */
- i = 0;
- while (((val = mmio_read32be(adapter->p1_mmio)) != 0xffffffff) &&
- (i < 5)) {
- msleep(500);
- i++;
- }
-
- if (val != 0xffffffff)
- dev_err(&dev->dev, "cxl: PERST failed to trigger EEH\n");
-
return rc;
}
@@ -893,7 +952,7 @@ static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
if (pci_request_region(dev, 0, "priv 1 regs"))
goto err2;
- pr_devel("cxl_map_adapter_regs: p1: %#.16llx %#llx, p2: %#.16llx %#llx",
+ pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx",
p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
if (!(adapter->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
@@ -917,10 +976,16 @@ err1:
static void cxl_unmap_adapter_regs(struct cxl *adapter)
{
- if (adapter->p1_mmio)
+ if (adapter->p1_mmio) {
iounmap(adapter->p1_mmio);
- if (adapter->p2_mmio)
+ adapter->p1_mmio = NULL;
+ pci_release_region(to_pci_dev(adapter->dev.parent), 2);
+ }
+ if (adapter->p2_mmio) {
iounmap(adapter->p2_mmio);
+ adapter->p2_mmio = NULL;
+ pci_release_region(to_pci_dev(adapter->dev.parent), 0);
+ }
}
static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
@@ -949,7 +1014,6 @@ static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
- adapter->perst_loads_image = true;
adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
@@ -1009,81 +1073,138 @@ static void cxl_release_adapter(struct device *dev)
pr_devel("cxl_release_adapter\n");
+ cxl_remove_adapter_nr(adapter);
+
kfree(adapter);
}
-static struct cxl *cxl_alloc_adapter(struct pci_dev *dev)
+static struct cxl *cxl_alloc_adapter(void)
{
struct cxl *adapter;
if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL)))
return NULL;
- adapter->dev.parent = &dev->dev;
- adapter->dev.release = cxl_release_adapter;
- pci_set_drvdata(dev, adapter);
spin_lock_init(&adapter->afu_list_lock);
+ if (cxl_alloc_adapter_nr(adapter))
+ goto err1;
+
+ if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
+ goto err2;
+
return adapter;
+
+err2:
+ cxl_remove_adapter_nr(adapter);
+err1:
+ kfree(adapter);
+ return NULL;
}
+#define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31))
+
static int sanitise_adapter_regs(struct cxl *adapter)
{
- cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
+ /* Clear PSL tberror bit by writing 1 to it */
+ cxl_p1_write(adapter, CXL_PSL_ErrIVTE, CXL_PSL_ErrIVTE_tberror);
return cxl_tlb_slb_invalidate(adapter);
}
-static struct cxl *cxl_init_adapter(struct pci_dev *dev)
+/* This should contain *only* operations that can safely be done in
+ * both creation and recovery.
+ */
+static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
{
- struct cxl *adapter;
- bool free = true;
int rc;
+ adapter->dev.parent = &dev->dev;
+ adapter->dev.release = cxl_release_adapter;
+ pci_set_drvdata(dev, adapter);
- if (!(adapter = cxl_alloc_adapter(dev)))
- return ERR_PTR(-ENOMEM);
+ rc = pci_enable_device(dev);
+ if (rc) {
+ dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
+ return rc;
+ }
if ((rc = cxl_read_vsec(adapter, dev)))
- goto err1;
+ return rc;
if ((rc = cxl_vsec_looks_ok(adapter, dev)))
- goto err1;
+ return rc;
if ((rc = setup_cxl_bars(dev)))
- goto err1;
+ return rc;
if ((rc = switch_card_to_cxl(dev)))
- goto err1;
-
- if ((rc = cxl_alloc_adapter_nr(adapter)))
- goto err1;
-
- if ((rc = dev_set_name(&adapter->dev, "card%i", adapter->adapter_num)))
- goto err2;
+ return rc;
if ((rc = cxl_update_image_control(adapter)))
- goto err2;
+ return rc;
if ((rc = cxl_map_adapter_regs(adapter, dev)))
- goto err2;
+ return rc;
if ((rc = sanitise_adapter_regs(adapter)))
- goto err2;
+ goto err;
if ((rc = init_implementation_adapter_regs(adapter, dev)))
- goto err3;
+ goto err;
if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI)))
- goto err3;
+ goto err;
/* If recovery happened, the last step is to turn on snooping.
* In the non-recovery case this has no effect */
- if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON))) {
- goto err3;
- }
+ if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON)))
+ goto err;
+
+ if ((rc = cxl_setup_psl_timebase(adapter, dev)))
+ goto err;
if ((rc = cxl_register_psl_err_irq(adapter)))
- goto err3;
+ goto err;
+
+ return 0;
+
+err:
+ cxl_unmap_adapter_regs(adapter);
+ return rc;
+
+}
+
+static void cxl_deconfigure_adapter(struct cxl *adapter)
+{
+ struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
+
+ cxl_release_psl_err_irq(adapter);
+ cxl_unmap_adapter_regs(adapter);
+
+ pci_disable_device(pdev);
+}
+
+static struct cxl *cxl_init_adapter(struct pci_dev *dev)
+{
+ struct cxl *adapter;
+ int rc;
+
+ adapter = cxl_alloc_adapter();
+ if (!adapter)
+ return ERR_PTR(-ENOMEM);
+
+ /* Set defaults for parameters which need to persist over
+ * configure/reconfigure
+ */
+ adapter->perst_loads_image = true;
+ adapter->perst_same_image = false;
+
+ rc = cxl_configure_adapter(adapter, dev);
+ if (rc) {
+ pci_disable_device(dev);
+ cxl_release_adapter(&adapter->dev);
+ return ERR_PTR(rc);
+ }
/* Don't care if this one fails: */
cxl_debugfs_adapter_add(adapter);
@@ -1101,37 +1222,25 @@ static struct cxl *cxl_init_adapter(struct pci_dev *dev)
return adapter;
err_put1:
- device_unregister(&adapter->dev);
- free = false;
+ /* This should mirror cxl_remove_adapter, except without the
+ * sysfs parts
+ */
cxl_debugfs_adapter_remove(adapter);
- cxl_release_psl_err_irq(adapter);
-err3:
- cxl_unmap_adapter_regs(adapter);
-err2:
- cxl_remove_adapter_nr(adapter);
-err1:
- if (free)
- kfree(adapter);
+ cxl_deconfigure_adapter(adapter);
+ device_unregister(&adapter->dev);
return ERR_PTR(rc);
}
static void cxl_remove_adapter(struct cxl *adapter)
{
- struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
-
- pr_devel("cxl_release_adapter\n");
+ pr_devel("cxl_remove_adapter\n");
cxl_sysfs_adapter_remove(adapter);
cxl_debugfs_adapter_remove(adapter);
- cxl_release_psl_err_irq(adapter);
- cxl_unmap_adapter_regs(adapter);
- cxl_remove_adapter_nr(adapter);
- device_unregister(&adapter->dev);
+ cxl_deconfigure_adapter(adapter);
- pci_release_region(pdev, 0);
- pci_release_region(pdev, 2);
- pci_disable_device(pdev);
+ device_unregister(&adapter->dev);
}
static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
@@ -1145,21 +1254,21 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (cxl_verbose)
dump_cxl_config_space(dev);
- if ((rc = pci_enable_device(dev))) {
- dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
- return rc;
- }
-
adapter = cxl_init_adapter(dev);
if (IS_ERR(adapter)) {
dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
- pci_disable_device(dev);
return PTR_ERR(adapter);
}
for (slice = 0; slice < adapter->slices; slice++) {
- if ((rc = cxl_init_afu(adapter, slice, dev)))
+ if ((rc = cxl_init_afu(adapter, slice, dev))) {
dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc);
+ continue;
+ }
+
+ rc = cxl_afu_select_best_mode(adapter->afu[slice]);
+ if (rc)
+ dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc);
}
return 0;
@@ -1183,10 +1292,262 @@ static void cxl_remove(struct pci_dev *dev)
cxl_remove_adapter(adapter);
}
+static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
+ pci_channel_state_t state)
+{
+ struct pci_dev *afu_dev;
+ pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
+ pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
+
+ /* There should only be one entry, but go through the list
+ * anyway
+ */
+ list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
+ if (!afu_dev->driver)
+ continue;
+
+ afu_dev->error_state = state;
+
+ if (afu_dev->driver->err_handler)
+ afu_result = afu_dev->driver->err_handler->error_detected(afu_dev,
+ state);
+ /* Disconnect trumps all, NONE trumps NEED_RESET */
+ if (afu_result == PCI_ERS_RESULT_DISCONNECT)
+ result = PCI_ERS_RESULT_DISCONNECT;
+ else if ((afu_result == PCI_ERS_RESULT_NONE) &&
+ (result == PCI_ERS_RESULT_NEED_RESET))
+ result = PCI_ERS_RESULT_NONE;
+ }
+ return result;
+}
+
+static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct cxl *adapter = pci_get_drvdata(pdev);
+ struct cxl_afu *afu;
+ pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
+ int i;
+
+ /* At this point, we could still have an interrupt pending.
+ * Let's try to get them out of the way before they do
+ * anything we don't like.
+ */
+ schedule();
+
+ /* If we're permanently dead, give up. */
+ if (state == pci_channel_io_perm_failure) {
+ /* Tell the AFU drivers; but we don't care what they
+ * say, we're going away.
+ */
+ for (i = 0; i < adapter->slices; i++) {
+ afu = adapter->afu[i];
+ cxl_vphb_error_detected(afu, state);
+ }
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ /* Are we reflashing?
+ *
+ * If we reflash, we could come back as something entirely
+ * different, including a non-CAPI card. As such, by default
+ * we don't participate in the process. We'll be unbound and
+ * the slot re-probed. (TODO: check EEH doesn't blindly rebind
+ * us!)
+ *
+ * However, this isn't the entire story: for reliablity
+ * reasons, we usually want to reflash the FPGA on PERST in
+ * order to get back to a more reliable known-good state.
+ *
+ * This causes us a bit of a problem: if we reflash we can't
+ * trust that we'll come back the same - we could have a new
+ * image and been PERSTed in order to load that
+ * image. However, most of the time we actually *will* come
+ * back the same - for example a regular EEH event.
+ *
+ * Therefore, we allow the user to assert that the image is
+ * indeed the same and that we should continue on into EEH
+ * anyway.
+ */
+ if (adapter->perst_loads_image && !adapter->perst_same_image) {
+ /* TODO take the PHB out of CXL mode */
+ dev_info(&pdev->dev, "reflashing, so opting out of EEH!\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ /*
+ * At this point, we want to try to recover. We'll always
+ * need a complete slot reset: we don't trust any other reset.
+ *
+ * Now, we go through each AFU:
+ * - We send the driver, if bound, an error_detected callback.
+ * We expect it to clean up, but it can also tell us to give
+ * up and permanently detach the card. To simplify things, if
+ * any bound AFU driver doesn't support EEH, we give up on EEH.
+ *
+ * - We detach all contexts associated with the AFU. This
+ * does not free them, but puts them into a CLOSED state
+ * which causes any the associated files to return useful
+ * errors to userland. It also unmaps, but does not free,
+ * any IRQs.
+ *
+ * - We clean up our side: releasing and unmapping resources we hold
+ * so we can wire them up again when the hardware comes back up.
+ *
+ * Driver authors should note:
+ *
+ * - Any contexts you create in your kernel driver (except
+ * those associated with anonymous file descriptors) are
+ * your responsibility to free and recreate. Likewise with
+ * any attached resources.
+ *
+ * - We will take responsibility for re-initialising the
+ * device context (the one set up for you in
+ * cxl_pci_enable_device_hook and accessed through
+ * cxl_get_context). If you've attached IRQs or other
+ * resources to it, they remains yours to free.
+ *
+ * You can call the same functions to release resources as you
+ * normally would: we make sure that these functions continue
+ * to work when the hardware is down.
+ *
+ * Two examples:
+ *
+ * 1) If you normally free all your resources at the end of
+ * each request, or if you use anonymous FDs, your
+ * error_detected callback can simply set a flag to tell
+ * your driver not to start any new calls. You can then
+ * clear the flag in the resume callback.
+ *
+ * 2) If you normally allocate your resources on startup:
+ * * Set a flag in error_detected as above.
+ * * Let CXL detach your contexts.
+ * * In slot_reset, free the old resources and allocate new ones.
+ * * In resume, clear the flag to allow things to start.
+ */
+ for (i = 0; i < adapter->slices; i++) {
+ afu = adapter->afu[i];
+
+ result = cxl_vphb_error_detected(afu, state);
+
+ /* Only continue if everyone agrees on NEED_RESET */
+ if (result != PCI_ERS_RESULT_NEED_RESET)
+ return result;
+
+ cxl_context_detach_all(afu);
+ cxl_afu_deactivate_mode(afu);
+ cxl_deconfigure_afu(afu);
+ }
+ cxl_deconfigure_adapter(adapter);
+
+ return result;
+}
+
+static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct cxl *adapter = pci_get_drvdata(pdev);
+ struct cxl_afu *afu;
+ struct cxl_context *ctx;
+ struct pci_dev *afu_dev;
+ pci_ers_result_t afu_result = PCI_ERS_RESULT_RECOVERED;
+ pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
+ int i;
+
+ if (cxl_configure_adapter(adapter, pdev))
+ goto err;
+
+ for (i = 0; i < adapter->slices; i++) {
+ afu = adapter->afu[i];
+
+ if (cxl_configure_afu(afu, adapter, pdev))
+ goto err;
+
+ if (cxl_afu_select_best_mode(afu))
+ goto err;
+
+ cxl_pci_vphb_reconfigure(afu);
+
+ list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
+ /* Reset the device context.
+ * TODO: make this less disruptive
+ */
+ ctx = cxl_get_context(afu_dev);
+
+ if (ctx && cxl_release_context(ctx))
+ goto err;
+
+ ctx = cxl_dev_context_init(afu_dev);
+ if (!ctx)
+ goto err;
+
+ afu_dev->dev.archdata.cxl_ctx = ctx;
+
+ if (cxl_afu_check_and_enable(afu))
+ goto err;
+
+ afu_dev->error_state = pci_channel_io_normal;
+
+ /* If there's a driver attached, allow it to
+ * chime in on recovery. Drivers should check
+ * if everything has come back OK, but
+ * shouldn't start new work until we call
+ * their resume function.
+ */
+ if (!afu_dev->driver)
+ continue;
+
+ if (afu_dev->driver->err_handler &&
+ afu_dev->driver->err_handler->slot_reset)
+ afu_result = afu_dev->driver->err_handler->slot_reset(afu_dev);
+
+ if (afu_result == PCI_ERS_RESULT_DISCONNECT)
+ result = PCI_ERS_RESULT_DISCONNECT;
+ }
+ }
+ return result;
+
+err:
+ /* All the bits that happen in both error_detected and cxl_remove
+ * should be idempotent, so we don't need to worry about leaving a mix
+ * of unconfigured and reconfigured resources.
+ */
+ dev_err(&pdev->dev, "EEH recovery failed. Asking to be disconnected.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+static void cxl_pci_resume(struct pci_dev *pdev)
+{
+ struct cxl *adapter = pci_get_drvdata(pdev);
+ struct cxl_afu *afu;
+ struct pci_dev *afu_dev;
+ int i;
+
+ /* Everything is back now. Drivers should restart work now.
+ * This is not the place to be checking if everything came back up
+ * properly, because there's no return value: do that in slot_reset.
+ */
+ for (i = 0; i < adapter->slices; i++) {
+ afu = adapter->afu[i];
+
+ list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
+ if (afu_dev->driver && afu_dev->driver->err_handler &&
+ afu_dev->driver->err_handler->resume)
+ afu_dev->driver->err_handler->resume(afu_dev);
+ }
+ }
+}
+
+static const struct pci_error_handlers cxl_err_handler = {
+ .error_detected = cxl_pci_error_detected,
+ .slot_reset = cxl_pci_slot_reset,
+ .resume = cxl_pci_resume,
+};
+
struct pci_driver cxl_pci_driver = {
.name = "cxl-pci",
.id_table = cxl_pci_tbl,
.probe = cxl_probe,
.remove = cxl_remove,
.shutdown = cxl_remove,
+ .err_handler = &cxl_err_handler,
};
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 31f38bc71a3d..25868c2ec03e 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -112,12 +112,38 @@ static ssize_t load_image_on_perst_store(struct device *device,
return count;
}
+static ssize_t perst_reloads_same_image_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl *adapter = to_cxl_adapter(device);
+
+ return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->perst_same_image);
+}
+
+static ssize_t perst_reloads_same_image_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cxl *adapter = to_cxl_adapter(device);
+ int rc;
+ int val;
+
+ rc = sscanf(buf, "%i", &val);
+ if ((rc != 1) || !(val == 1 || val == 0))
+ return -EINVAL;
+
+ adapter->perst_same_image = (val == 1 ? true : false);
+ return count;
+}
+
static struct device_attribute adapter_attrs[] = {
__ATTR_RO(caia_version),
__ATTR_RO(psl_revision),
__ATTR_RO(base_image),
__ATTR_RO(image_loaded),
__ATTR_RW(load_image_on_perst),
+ __ATTR_RW(perst_reloads_same_image),
__ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
};
@@ -443,12 +469,7 @@ static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
struct afu_config_record *cr = to_cr(kobj);
struct cxl_afu *afu = to_cxl_afu(container_of(kobj->parent, struct device, kobj));
- u64 i, j, val, size = afu->crs_len;
-
- if (off > size)
- return 0;
- if (off + count > size)
- count = size - off;
+ u64 i, j, val;
for (i = 0; i < count;) {
val = cxl_afu_cr_read64(afu, cr->cr, off & ~0x7);
diff --git a/drivers/misc/cxl/trace.h b/drivers/misc/cxl/trace.h
index ae434d87887e..6e1e2adfba8e 100644
--- a/drivers/misc/cxl/trace.h
+++ b/drivers/misc/cxl/trace.h
@@ -105,7 +105,7 @@ TRACE_EVENT(cxl_attach,
__entry->num_interrupts = num_interrupts;
),
- TP_printk("afu%i.%i pid=%i pe=%i wed=0x%.16llx irqs=%i amr=0x%llx",
+ TP_printk("afu%i.%i pid=%i pe=%i wed=0x%016llx irqs=%i amr=0x%llx",
__entry->card,
__entry->afu,
__entry->pid,
@@ -177,7 +177,7 @@ TRACE_EVENT(cxl_psl_irq,
__entry->dar = dar;
),
- TP_printk("afu%i.%i pe=%i irq=%i dsisr=%s dar=0x%.16llx",
+ TP_printk("afu%i.%i pe=%i irq=%i dsisr=%s dar=0x%016llx",
__entry->card,
__entry->afu,
__entry->pe,
@@ -233,7 +233,7 @@ TRACE_EVENT(cxl_ste_miss,
__entry->dar = dar;
),
- TP_printk("afu%i.%i pe=%i dar=0x%.16llx",
+ TP_printk("afu%i.%i pe=%i dar=0x%016llx",
__entry->card,
__entry->afu,
__entry->pe,
@@ -264,7 +264,7 @@ TRACE_EVENT(cxl_ste_write,
__entry->v = v;
),
- TP_printk("afu%i.%i pe=%i SSTE[%i] E=0x%.16llx V=0x%.16llx",
+ TP_printk("afu%i.%i pe=%i SSTE[%i] E=0x%016llx V=0x%016llx",
__entry->card,
__entry->afu,
__entry->pe,
@@ -295,7 +295,7 @@ TRACE_EVENT(cxl_pte_miss,
__entry->dar = dar;
),
- TP_printk("afu%i.%i pe=%i dsisr=%s dar=0x%.16llx",
+ TP_printk("afu%i.%i pe=%i dsisr=%s dar=0x%016llx",
__entry->card,
__entry->afu,
__entry->pe,
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index b1d1983a84a5..6dd16a6d153f 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -112,9 +112,10 @@ static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
unsigned long addr;
phb = pci_bus_to_host(bus);
- afu = (struct cxl_afu *)phb->private_data;
if (phb == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
+ afu = (struct cxl_afu *)phb->private_data;
+
if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num)
return PCIBIOS_DEVICE_NOT_FOUND;
if (offset >= (unsigned long)phb->cfg_data)
@@ -137,6 +138,26 @@ static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
return 0;
}
+
+static inline bool cxl_config_link_ok(struct pci_bus *bus)
+{
+ struct pci_controller *phb;
+ struct cxl_afu *afu;
+
+ /* Config space IO is based on phb->cfg_addr, which is based on
+ * afu_desc_mmio. This isn't safe to read/write when the link
+ * goes down, as EEH tears down MMIO space.
+ *
+ * Check if the link is OK before proceeding.
+ */
+
+ phb = pci_bus_to_host(bus);
+ if (phb == NULL)
+ return false;
+ afu = (struct cxl_afu *)phb->private_data;
+ return cxl_adapter_link_ok(afu->adapter);
+}
+
static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
@@ -149,6 +170,9 @@ static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
if (rc)
return rc;
+ if (!cxl_config_link_ok(bus))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
/* Can only read 32 bits */
*val = (in_le32(ioaddr) >> shift) & mask;
return PCIBIOS_SUCCESSFUL;
@@ -166,6 +190,9 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
if (rc)
return rc;
+ if (!cxl_config_link_ok(bus))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
/* Can only write 32 bits so do read-modify-write */
mask <<= shift;
val <<= shift;
@@ -239,6 +266,14 @@ int cxl_pci_vphb_add(struct cxl_afu *afu)
return 0;
}
+void cxl_pci_vphb_reconfigure(struct cxl_afu *afu)
+{
+ /* When we are reconfigured, the AFU's MMIO space is unmapped
+ * and remapped. We need to reflect this in the PHB's view of
+ * the world.
+ */
+ afu->phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset;
+}
void cxl_pci_vphb_remove(struct cxl_afu *afu)
{
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index b909fb30232a..c7112276a039 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -148,12 +148,6 @@ static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj,
dev_dbg(&client->dev, "ds1682_eeprom_read(p=%p, off=%lli, c=%zi)\n",
buf, off, count);
- if (off >= DS1682_EEPROM_SIZE)
- return 0;
-
- if (off + count > DS1682_EEPROM_SIZE)
- count = DS1682_EEPROM_SIZE - off;
-
rc = i2c_smbus_read_i2c_block_data(client, DS1682_REG_EEPROM + off,
count, buf);
if (rc < 0)
@@ -171,12 +165,6 @@ static ssize_t ds1682_eeprom_write(struct file *filp, struct kobject *kobj,
dev_dbg(&client->dev, "ds1682_eeprom_write(p=%p, off=%lli, c=%zi)\n",
buf, off, count);
- if (off >= DS1682_EEPROM_SIZE)
- return -ENOSPC;
-
- if (off + count > DS1682_EEPROM_SIZE)
- count = DS1682_EEPROM_SIZE - off;
-
/* Write out to the device */
if (i2c_smbus_write_i2c_block_data(client, DS1682_REG_EEPROM + off,
count, buf) < 0)
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 9536852fd4c6..04f2e1fa9dd1 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -96,17 +96,4 @@ config EEPROM_DIGSY_MTC_CFG
If unsure, say N.
-config EEPROM_SUNXI_SID
- tristate "Allwinner sunxi security ID support"
- depends on ARCH_SUNXI && SYSFS
- help
- This is a driver for the 'security ID' available on various Allwinner
- devices.
-
- Due to the potential risks involved with changing e-fuses,
- this driver is read-only.
-
- This driver can also be built as a module. If so, the module
- will be called sunxi_sid.
-
endmenu
diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile
index 9507aec95e94..fc1e81d29267 100644
--- a/drivers/misc/eeprom/Makefile
+++ b/drivers/misc/eeprom/Makefile
@@ -4,5 +4,4 @@ obj-$(CONFIG_EEPROM_LEGACY) += eeprom.o
obj-$(CONFIG_EEPROM_MAX6875) += max6875.o
obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o
-obj-$(CONFIG_EEPROM_SUNXI_SID) += sunxi_sid.o
obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 2d3db81be099..c6cb7f8f325e 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -186,19 +186,11 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
if (count > io_limit)
count = io_limit;
- switch (at24->use_smbus) {
- case I2C_SMBUS_I2C_BLOCK_DATA:
+ if (at24->use_smbus) {
/* Smaller eeproms can work given some SMBus extension calls */
if (count > I2C_SMBUS_BLOCK_MAX)
count = I2C_SMBUS_BLOCK_MAX;
- break;
- case I2C_SMBUS_WORD_DATA:
- count = 2;
- break;
- case I2C_SMBUS_BYTE_DATA:
- count = 1;
- break;
- default:
+ } else {
/*
* When we have a better choice than SMBus calls, use a
* combined I2C message. Write address; then read up to
@@ -229,27 +221,10 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
timeout = jiffies + msecs_to_jiffies(write_timeout);
do {
read_time = jiffies;
- switch (at24->use_smbus) {
- case I2C_SMBUS_I2C_BLOCK_DATA:
- status = i2c_smbus_read_i2c_block_data(client, offset,
- count, buf);
- break;
- case I2C_SMBUS_WORD_DATA:
- status = i2c_smbus_read_word_data(client, offset);
- if (status >= 0) {
- buf[0] = status & 0xff;
- buf[1] = status >> 8;
- status = count;
- }
- break;
- case I2C_SMBUS_BYTE_DATA:
- status = i2c_smbus_read_byte_data(client, offset);
- if (status >= 0) {
- buf[0] = status;
- status = count;
- }
- break;
- default:
+ if (at24->use_smbus) {
+ status = i2c_smbus_read_i2c_block_data_or_emulated(client, offset,
+ count, buf);
+ } else {
status = i2c_transfer(client->adapter, msg, 2);
if (status == 2)
status = count;
@@ -438,9 +413,6 @@ static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
{
struct at24_data *at24;
- if (unlikely(off >= attr->size))
- return -EFBIG;
-
at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
return at24_write(at24, buf, off, count);
}
@@ -689,7 +661,6 @@ static int at24_remove(struct i2c_client *client)
static struct i2c_driver at24_driver = {
.driver = {
.name = "at24",
- .owner = THIS_MODULE,
},
.probe = at24_probe,
.remove = at24_remove,
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index b432873def96..7342fd637031 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -88,11 +88,6 @@ static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
struct eeprom_data *data = i2c_get_clientdata(client);
u8 slice;
- if (off > EEPROM_SIZE)
- return 0;
- if (off + count > EEPROM_SIZE)
- count = EEPROM_SIZE - off;
-
/* Only refresh slices which contain requested bytes */
for (slice = off >> 5; slice <= (off + count - 1) >> 5; slice++)
eeprom_update_client(client, slice);
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 9ebeacdb8ec4..a6bd9e3fe9d3 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -48,13 +48,6 @@ eeprom_93xx46_bin_read(struct file *filp, struct kobject *kobj,
dev = container_of(kobj, struct device, kobj);
edev = dev_get_drvdata(dev);
- if (unlikely(off >= edev->bin.size))
- return 0;
- if ((off + count) > edev->bin.size)
- count = edev->bin.size - off;
- if (unlikely(!count))
- return count;
-
cmd_addr = OP_READ << edev->addrlen;
if (edev->addrlen == 7) {
@@ -200,13 +193,6 @@ eeprom_93xx46_bin_write(struct file *filp, struct kobject *kobj,
dev = container_of(kobj, struct device, kobj);
edev = dev_get_drvdata(dev);
- if (unlikely(off >= edev->bin.size))
- return -EFBIG;
- if ((off + count) > edev->bin.size)
- count = edev->bin.size - off;
- if (unlikely(!count))
- return count;
-
/* only write even number of bytes on 16-bit devices */
if (edev->addrlen == 6) {
step = 2;
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index 580ff9df5529..e4dd93b2518c 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -114,12 +114,6 @@ static ssize_t max6875_read(struct file *filp, struct kobject *kobj,
struct max6875_data *data = i2c_get_clientdata(client);
int slice, max_slice;
- if (off > USER_EEPROM_SIZE)
- return 0;
-
- if (off + count > USER_EEPROM_SIZE)
- count = USER_EEPROM_SIZE - off;
-
/* refresh slices which contain requested bytes */
max_slice = (off + count - 1) >> SLICE_BITS;
for (slice = (off >> SLICE_BITS); slice <= max_slice; slice++)
@@ -197,6 +191,7 @@ static const struct i2c_device_id max6875_id[] = {
{ "max6875", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, max6875_id);
static struct i2c_driver max6875_driver = {
.driver = {
diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
deleted file mode 100644
index 8385177ff32b..000000000000
--- a/drivers/misc/eeprom/sunxi_sid.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl>
- * http://www.linux-sunxi.org
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver exposes the Allwinner security ID, efuses exported in byte-
- * sized chunks.
- */
-
-#include <linux/compiler.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/export.h>
-#include <linux/fs.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/kobject.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include <linux/stat.h>
-#include <linux/sysfs.h>
-#include <linux/types.h>
-
-#define DRV_NAME "sunxi-sid"
-
-struct sunxi_sid_data {
- void __iomem *reg_base;
- unsigned int keysize;
-};
-
-/* We read the entire key, due to a 32 bit read alignment requirement. Since we
- * want to return the requested byte, this results in somewhat slower code and
- * uses 4 times more reads as needed but keeps code simpler. Since the SID is
- * only very rarely probed, this is not really an issue.
- */
-static u8 sunxi_sid_read_byte(const struct sunxi_sid_data *sid_data,
- const unsigned int offset)
-{
- u32 sid_key;
-
- if (offset >= sid_data->keysize)
- return 0;
-
- sid_key = ioread32be(sid_data->reg_base + round_down(offset, 4));
- sid_key >>= (offset % 4) * 8;
-
- return sid_key; /* Only return the last byte */
-}
-
-static ssize_t sid_read(struct file *fd, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t pos, size_t size)
-{
- struct platform_device *pdev;
- struct sunxi_sid_data *sid_data;
- int i;
-
- pdev = to_platform_device(kobj_to_dev(kobj));
- sid_data = platform_get_drvdata(pdev);
-
- if (pos < 0 || pos >= sid_data->keysize)
- return 0;
- if (size > sid_data->keysize - pos)
- size = sid_data->keysize - pos;
-
- for (i = 0; i < size; i++)
- buf[i] = sunxi_sid_read_byte(sid_data, pos + i);
-
- return i;
-}
-
-static struct bin_attribute sid_bin_attr = {
- .attr = { .name = "eeprom", .mode = S_IRUGO, },
- .read = sid_read,
-};
-
-static int sunxi_sid_remove(struct platform_device *pdev)
-{
- device_remove_bin_file(&pdev->dev, &sid_bin_attr);
- dev_dbg(&pdev->dev, "driver unloaded\n");
-
- return 0;
-}
-
-static const struct of_device_id sunxi_sid_of_match[] = {
- { .compatible = "allwinner,sun4i-a10-sid", .data = (void *)16},
- { .compatible = "allwinner,sun7i-a20-sid", .data = (void *)512},
- {/* sentinel */},
-};
-MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
-
-static int sunxi_sid_probe(struct platform_device *pdev)
-{
- struct sunxi_sid_data *sid_data;
- struct resource *res;
- const struct of_device_id *of_dev_id;
- u8 *entropy;
- unsigned int i;
-
- sid_data = devm_kzalloc(&pdev->dev, sizeof(struct sunxi_sid_data),
- GFP_KERNEL);
- if (!sid_data)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sid_data->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(sid_data->reg_base))
- return PTR_ERR(sid_data->reg_base);
-
- of_dev_id = of_match_device(sunxi_sid_of_match, &pdev->dev);
- if (!of_dev_id)
- return -ENODEV;
- sid_data->keysize = (int)of_dev_id->data;
-
- platform_set_drvdata(pdev, sid_data);
-
- sid_bin_attr.size = sid_data->keysize;
- if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
- return -ENODEV;
-
- entropy = kzalloc(sizeof(u8) * sid_data->keysize, GFP_KERNEL);
- for (i = 0; i < sid_data->keysize; i++)
- entropy[i] = sunxi_sid_read_byte(sid_data, i);
- add_device_randomness(entropy, sid_data->keysize);
- kfree(entropy);
-
- dev_dbg(&pdev->dev, "loaded\n");
-
- return 0;
-}
-
-static struct platform_driver sunxi_sid_driver = {
- .probe = sunxi_sid_probe,
- .remove = sunxi_sid_remove,
- .driver = {
- .name = DRV_NAME,
- .of_match_table = sunxi_sid_of_match,
- },
-};
-module_platform_driver(sunxi_sid_driver);
-
-MODULE_AUTHOR("Oliver Schinagl <oliver@schinagl.nl>");
-MODULE_DESCRIPTION("Allwinner sunxi security id driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index c49d244265ec..70e62d6a3231 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -418,7 +418,7 @@ static void genwqe_vma_close(struct vm_area_struct *vma)
kfree(dma_map);
}
-static struct vm_operations_struct genwqe_vma_ops = {
+static const struct vm_operations_struct genwqe_vma_ops = {
.open = genwqe_vma_open,
.close = genwqe_vma_close,
};
diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c
index 12c30b486b27..976df0013633 100644
--- a/drivers/misc/isl29003.c
+++ b/drivers/misc/isl29003.c
@@ -465,7 +465,6 @@ MODULE_DEVICE_TABLE(i2c, isl29003_id);
static struct i2c_driver isl29003_driver = {
.driver = {
.name = ISL29003_DRV_NAME,
- .owner = THIS_MODULE,
.pm = ISL29003_PM_OPS,
},
.probe = isl29003_probe,
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index e3e7f1dc27ba..0c3bb7e3ee80 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -274,7 +274,6 @@ static const struct dev_pm_ops lis3_pm_ops = {
static struct i2c_driver lis3lv02d_i2c_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = &lis3_pm_ops,
.of_match_table = of_match_ptr(lis3lv02d_i2c_dt_ids),
},
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 518914a82b83..01447ca21c26 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -11,7 +11,7 @@ mei-objs += main.o
mei-objs += amthif.o
mei-objs += wd.o
mei-objs += bus.o
-mei-objs += nfc.o
+mei-objs += bus-fixup.o
mei-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_INTEL_MEI_ME) += mei-me.o
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
new file mode 100644
index 000000000000..3e536ca85f7d
--- /dev/null
+++ b/drivers/misc/mei/bus-fixup.c
@@ -0,0 +1,306 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+
+#include <linux/mei_cl_bus.h>
+
+#include "mei_dev.h"
+#include "client.h"
+
+#define MEI_UUID_NFC_INFO UUID_LE(0xd2de1625, 0x382d, 0x417d, \
+ 0x48, 0xa4, 0xef, 0xab, 0xba, 0x8a, 0x12, 0x06)
+
+static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
+
+#define MEI_UUID_NFC_HCI UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, \
+ 0x94, 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c)
+
+#define MEI_UUID_ANY NULL_UUID_LE
+
+/**
+ * number_of_connections - determine whether an client be on the bus
+ * according number of connections
+ * We support only clients:
+ * 1. with single connection
+ * 2. and fixed clients (max_number_of_connections == 0)
+ *
+ * @cldev: me clients device
+ */
+static void number_of_connections(struct mei_cl_device *cldev)
+{
+ dev_dbg(&cldev->dev, "running hook %s on %pUl\n",
+ __func__, mei_me_cl_uuid(cldev->me_cl));
+
+ if (cldev->me_cl->props.max_number_of_connections > 1)
+ cldev->do_match = 0;
+}
+
+/**
+ * blacklist - blacklist a client from the bus
+ *
+ * @cldev: me clients device
+ */
+static void blacklist(struct mei_cl_device *cldev)
+{
+ dev_dbg(&cldev->dev, "running hook %s on %pUl\n",
+ __func__, mei_me_cl_uuid(cldev->me_cl));
+ cldev->do_match = 0;
+}
+
+struct mei_nfc_cmd {
+ u8 command;
+ u8 status;
+ u16 req_id;
+ u32 reserved;
+ u16 data_size;
+ u8 sub_command;
+ u8 data[];
+} __packed;
+
+struct mei_nfc_reply {
+ u8 command;
+ u8 status;
+ u16 req_id;
+ u32 reserved;
+ u16 data_size;
+ u8 sub_command;
+ u8 reply_status;
+ u8 data[];
+} __packed;
+
+struct mei_nfc_if_version {
+ u8 radio_version_sw[3];
+ u8 reserved[3];
+ u8 radio_version_hw[3];
+ u8 i2c_addr;
+ u8 fw_ivn;
+ u8 vendor_id;
+ u8 radio_type;
+} __packed;
+
+
+#define MEI_NFC_CMD_MAINTENANCE 0x00
+#define MEI_NFC_SUBCMD_IF_VERSION 0x01
+
+/* Vendors */
+#define MEI_NFC_VENDOR_INSIDE 0x00
+#define MEI_NFC_VENDOR_NXP 0x01
+
+/* Radio types */
+#define MEI_NFC_VENDOR_INSIDE_UREAD 0x00
+#define MEI_NFC_VENDOR_NXP_PN544 0x01
+
+/**
+ * mei_nfc_if_version - get NFC interface version
+ *
+ * @cl: host client (nfc info)
+ * @ver: NFC interface version to be filled in
+ *
+ * Return: 0 on success; < 0 otherwise
+ */
+static int mei_nfc_if_version(struct mei_cl *cl,
+ struct mei_nfc_if_version *ver)
+{
+ struct mei_device *bus;
+ struct mei_nfc_cmd cmd = {
+ .command = MEI_NFC_CMD_MAINTENANCE,
+ .data_size = 1,
+ .sub_command = MEI_NFC_SUBCMD_IF_VERSION,
+ };
+ struct mei_nfc_reply *reply = NULL;
+ size_t if_version_length;
+ int bytes_recv, ret;
+
+ bus = cl->dev;
+
+ WARN_ON(mutex_is_locked(&bus->device_lock));
+
+ ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd), 1);
+ if (ret < 0) {
+ dev_err(bus->dev, "Could not send IF version cmd\n");
+ return ret;
+ }
+
+ /* to be sure on the stack we alloc memory */
+ if_version_length = sizeof(struct mei_nfc_reply) +
+ sizeof(struct mei_nfc_if_version);
+
+ reply = kzalloc(if_version_length, GFP_KERNEL);
+ if (!reply)
+ return -ENOMEM;
+
+ ret = 0;
+ bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
+ if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
+ dev_err(bus->dev, "Could not read IF version\n");
+ ret = -EIO;
+ goto err;
+ }
+
+ memcpy(ver, reply->data, sizeof(struct mei_nfc_if_version));
+
+ dev_info(bus->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n",
+ ver->fw_ivn, ver->vendor_id, ver->radio_type);
+
+err:
+ kfree(reply);
+ return ret;
+}
+
+/**
+ * mei_nfc_radio_name - derive nfc radio name from the interface version
+ *
+ * @ver: NFC radio version
+ *
+ * Return: radio name string
+ */
+static const char *mei_nfc_radio_name(struct mei_nfc_if_version *ver)
+{
+
+ if (ver->vendor_id == MEI_NFC_VENDOR_INSIDE) {
+ if (ver->radio_type == MEI_NFC_VENDOR_INSIDE_UREAD)
+ return "microread";
+ }
+
+ if (ver->vendor_id == MEI_NFC_VENDOR_NXP) {
+ if (ver->radio_type == MEI_NFC_VENDOR_NXP_PN544)
+ return "pn544";
+ }
+
+ return NULL;
+}
+
+/**
+ * mei_nfc - The nfc fixup function. The function retrieves nfc radio
+ * name and set is as device attribute so we can load
+ * the proper device driver for it
+ *
+ * @cldev: me client device (nfc)
+ */
+static void mei_nfc(struct mei_cl_device *cldev)
+{
+ struct mei_device *bus;
+ struct mei_cl *cl;
+ struct mei_me_client *me_cl = NULL;
+ struct mei_nfc_if_version ver;
+ const char *radio_name = NULL;
+ int ret;
+
+ bus = cldev->bus;
+
+ dev_dbg(bus->dev, "running hook %s: %pUl match=%d\n",
+ __func__, mei_me_cl_uuid(cldev->me_cl), cldev->do_match);
+
+ mutex_lock(&bus->device_lock);
+ /* we need to connect to INFO GUID */
+ cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY);
+ if (IS_ERR(cl)) {
+ ret = PTR_ERR(cl);
+ cl = NULL;
+ dev_err(bus->dev, "nfc hook alloc failed %d\n", ret);
+ goto out;
+ }
+
+ me_cl = mei_me_cl_by_uuid(bus, &mei_nfc_info_guid);
+ if (!me_cl) {
+ ret = -ENOTTY;
+ dev_err(bus->dev, "Cannot find nfc info %d\n", ret);
+ goto out;
+ }
+
+ ret = mei_cl_connect(cl, me_cl, NULL);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "Can't connect to the NFC INFO ME ret = %d\n",
+ ret);
+ goto out;
+ }
+
+ mutex_unlock(&bus->device_lock);
+
+ ret = mei_nfc_if_version(cl, &ver);
+ if (ret)
+ goto disconnect;
+
+ radio_name = mei_nfc_radio_name(&ver);
+
+ if (!radio_name) {
+ ret = -ENOENT;
+ dev_err(&cldev->dev, "Can't get the NFC interface version ret = %d\n",
+ ret);
+ goto disconnect;
+ }
+
+ dev_dbg(bus->dev, "nfc radio %s\n", radio_name);
+ strlcpy(cldev->name, radio_name, sizeof(cldev->name));
+
+disconnect:
+ mutex_lock(&bus->device_lock);
+ if (mei_cl_disconnect(cl) < 0)
+ dev_err(bus->dev, "Can't disconnect the NFC INFO ME\n");
+
+ mei_cl_flush_queues(cl, NULL);
+
+out:
+ mei_cl_unlink(cl);
+ mutex_unlock(&bus->device_lock);
+ mei_me_cl_put(me_cl);
+ kfree(cl);
+
+ if (ret)
+ cldev->do_match = 0;
+
+ dev_dbg(bus->dev, "end of fixup match = %d\n", cldev->do_match);
+}
+
+#define MEI_FIXUP(_uuid, _hook) { _uuid, _hook }
+
+static struct mei_fixup {
+
+ const uuid_le uuid;
+ void (*hook)(struct mei_cl_device *cldev);
+} mei_fixups[] = {
+ MEI_FIXUP(MEI_UUID_ANY, number_of_connections),
+ MEI_FIXUP(MEI_UUID_NFC_INFO, blacklist),
+ MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc),
+};
+
+/**
+ * mei_cl_dev_fixup - run fixup handlers
+ *
+ * @cldev: me client device
+ */
+void mei_cl_dev_fixup(struct mei_cl_device *cldev)
+{
+ struct mei_fixup *f;
+ const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mei_fixups); i++) {
+
+ f = &mei_fixups[i];
+ if (uuid_le_cmp(f->uuid, MEI_UUID_ANY) == 0 ||
+ uuid_le_cmp(f->uuid, *uuid) == 0)
+ f->hook(cldev);
+ }
+}
+
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 357b6ae4d207..eef1c6b46ad8 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -30,276 +30,29 @@
#define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver)
#define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev)
-static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
-{
- struct mei_cl_device *device = to_mei_cl_device(dev);
- struct mei_cl_driver *driver = to_mei_cl_driver(drv);
- const struct mei_cl_device_id *id;
- const uuid_le *uuid;
- const char *name;
-
- if (!device)
- return 0;
-
- uuid = mei_me_cl_uuid(device->me_cl);
- name = device->name;
-
- if (!driver || !driver->id_table)
- return 0;
-
- id = driver->id_table;
-
- while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
-
- if (!uuid_le_cmp(*uuid, id->uuid)) {
- if (id->name[0]) {
- if (!strncmp(name, id->name, sizeof(id->name)))
- return 1;
- } else {
- return 1;
- }
- }
-
- id++;
- }
-
- return 0;
-}
-
-static int mei_cl_device_probe(struct device *dev)
-{
- struct mei_cl_device *device = to_mei_cl_device(dev);
- struct mei_cl_driver *driver;
- struct mei_cl_device_id id;
-
- if (!device)
- return 0;
-
- driver = to_mei_cl_driver(dev->driver);
- if (!driver || !driver->probe)
- return -ENODEV;
-
- dev_dbg(dev, "Device probe\n");
-
- strlcpy(id.name, device->name, sizeof(id.name));
-
- return driver->probe(device, &id);
-}
-
-static int mei_cl_device_remove(struct device *dev)
-{
- struct mei_cl_device *device = to_mei_cl_device(dev);
- struct mei_cl_driver *driver;
-
- if (!device || !dev->driver)
- return 0;
-
- if (device->event_cb) {
- device->event_cb = NULL;
- cancel_work_sync(&device->event_work);
- }
-
- driver = to_mei_cl_driver(dev->driver);
- if (!driver->remove) {
- dev->driver = NULL;
-
- return 0;
- }
-
- return driver->remove(device);
-}
-
-static ssize_t name_show(struct device *dev, struct device_attribute *a,
- char *buf)
-{
- struct mei_cl_device *device = to_mei_cl_device(dev);
- size_t len;
-
- len = snprintf(buf, PAGE_SIZE, "%s", device->name);
-
- return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
-}
-static DEVICE_ATTR_RO(name);
-
-static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
- char *buf)
-{
- struct mei_cl_device *device = to_mei_cl_device(dev);
- const uuid_le *uuid = mei_me_cl_uuid(device->me_cl);
- size_t len;
-
- len = snprintf(buf, PAGE_SIZE, "%pUl", uuid);
-
- return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
-}
-static DEVICE_ATTR_RO(uuid);
-
-static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
- char *buf)
-{
- struct mei_cl_device *device = to_mei_cl_device(dev);
- const uuid_le *uuid = mei_me_cl_uuid(device->me_cl);
- size_t len;
-
- len = snprintf(buf, PAGE_SIZE, "mei:%s:" MEI_CL_UUID_FMT ":",
- device->name, MEI_CL_UUID_ARGS(uuid->b));
-
- return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
-}
-static DEVICE_ATTR_RO(modalias);
-
-static struct attribute *mei_cl_dev_attrs[] = {
- &dev_attr_name.attr,
- &dev_attr_uuid.attr,
- &dev_attr_modalias.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(mei_cl_dev);
-
-static int mei_cl_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- struct mei_cl_device *device = to_mei_cl_device(dev);
- const uuid_le *uuid = mei_me_cl_uuid(device->me_cl);
-
- if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
- return -ENOMEM;
-
- if (add_uevent_var(env, "MEI_CL_NAME=%s", device->name))
- return -ENOMEM;
-
- if (add_uevent_var(env, "MODALIAS=mei:%s:" MEI_CL_UUID_FMT ":",
- device->name, MEI_CL_UUID_ARGS(uuid->b)))
- return -ENOMEM;
-
- return 0;
-}
-
-static struct bus_type mei_cl_bus_type = {
- .name = "mei",
- .dev_groups = mei_cl_dev_groups,
- .match = mei_cl_device_match,
- .probe = mei_cl_device_probe,
- .remove = mei_cl_device_remove,
- .uevent = mei_cl_uevent,
-};
-
-static void mei_cl_dev_release(struct device *dev)
-{
- struct mei_cl_device *device = to_mei_cl_device(dev);
-
- if (!device)
- return;
-
- mei_me_cl_put(device->me_cl);
- kfree(device);
-}
-
-static struct device_type mei_cl_device_type = {
- .release = mei_cl_dev_release,
-};
-
-struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev,
- uuid_le uuid)
-{
- struct mei_cl *cl;
-
- list_for_each_entry(cl, &dev->device_list, device_link) {
- if (cl->device && cl->device->me_cl &&
- !uuid_le_cmp(uuid, *mei_me_cl_uuid(cl->device->me_cl)))
- return cl;
- }
-
- return NULL;
-}
-
-struct mei_cl_device *mei_cl_add_device(struct mei_device *dev,
- struct mei_me_client *me_cl,
- struct mei_cl *cl,
- char *name)
-{
- struct mei_cl_device *device;
- int status;
-
- device = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL);
- if (!device)
- return NULL;
-
- device->me_cl = mei_me_cl_get(me_cl);
- if (!device->me_cl) {
- kfree(device);
- return NULL;
- }
-
- device->cl = cl;
- device->dev.parent = dev->dev;
- device->dev.bus = &mei_cl_bus_type;
- device->dev.type = &mei_cl_device_type;
-
- strlcpy(device->name, name, sizeof(device->name));
-
- dev_set_name(&device->dev, "mei:%s:%pUl", name, mei_me_cl_uuid(me_cl));
-
- status = device_register(&device->dev);
- if (status) {
- dev_err(dev->dev, "Failed to register MEI device\n");
- mei_me_cl_put(device->me_cl);
- kfree(device);
- return NULL;
- }
-
- cl->device = device;
-
- dev_dbg(&device->dev, "client %s registered\n", name);
-
- return device;
-}
-EXPORT_SYMBOL_GPL(mei_cl_add_device);
-
-void mei_cl_remove_device(struct mei_cl_device *device)
-{
- device_unregister(&device->dev);
-}
-EXPORT_SYMBOL_GPL(mei_cl_remove_device);
-
-int __mei_cl_driver_register(struct mei_cl_driver *driver, struct module *owner)
-{
- int err;
-
- driver->driver.name = driver->name;
- driver->driver.owner = owner;
- driver->driver.bus = &mei_cl_bus_type;
-
- err = driver_register(&driver->driver);
- if (err)
- return err;
-
- pr_debug("mei: driver [%s] registered\n", driver->driver.name);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(__mei_cl_driver_register);
-
-void mei_cl_driver_unregister(struct mei_cl_driver *driver)
-{
- driver_unregister(&driver->driver);
-
- pr_debug("mei: driver [%s] unregistered\n", driver->driver.name);
-}
-EXPORT_SYMBOL_GPL(mei_cl_driver_unregister);
-
+/**
+ * __mei_cl_send - internal client send (write)
+ *
+ * @cl: host client
+ * @buf: buffer to send
+ * @length: buffer length
+ * @blocking: wait for write completion
+ *
+ * Return: written size bytes or < 0 on error
+ */
ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
bool blocking)
{
- struct mei_device *dev;
+ struct mei_device *bus;
struct mei_cl_cb *cb = NULL;
ssize_t rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
- dev = cl->dev;
+ bus = cl->dev;
- mutex_lock(&dev->device_lock);
+ mutex_lock(&bus->device_lock);
if (!mei_cl_is_connected(cl)) {
rets = -ENODEV;
goto out;
@@ -327,16 +80,25 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
rets = mei_cl_write(cl, cb, blocking);
out:
- mutex_unlock(&dev->device_lock);
+ mutex_unlock(&bus->device_lock);
if (rets < 0)
mei_io_cb_free(cb);
return rets;
}
+/**
+ * __mei_cl_recv - internal client receive (read)
+ *
+ * @cl: host client
+ * @buf: buffer to send
+ * @length: buffer length
+ *
+ * Return: read size in bytes of < 0 on error
+ */
ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
{
- struct mei_device *dev;
+ struct mei_device *bus;
struct mei_cl_cb *cb;
size_t r_length;
ssize_t rets;
@@ -344,9 +106,9 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
- dev = cl->dev;
+ bus = cl->dev;
- mutex_lock(&dev->device_lock);
+ mutex_lock(&bus->device_lock);
cb = mei_cl_read_cb(cl, NULL);
if (cb)
@@ -356,9 +118,10 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
if (rets && rets != -EBUSY)
goto out;
+ /* wait on event only if there is no other waiter */
if (list_empty(&cl->rd_completed) && !waitqueue_active(&cl->rx_wait)) {
- mutex_unlock(&dev->device_lock);
+ mutex_unlock(&bus->device_lock);
if (wait_event_interruptible(cl->rx_wait,
(!list_empty(&cl->rd_completed)) ||
@@ -369,7 +132,7 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
return -ERESTARTSYS;
}
- mutex_lock(&dev->device_lock);
+ mutex_lock(&bus->device_lock);
if (!mei_cl_is_connected(cl)) {
rets = -EBUSY;
@@ -396,14 +159,23 @@ copy:
free:
mei_io_cb_free(cb);
out:
- mutex_unlock(&dev->device_lock);
+ mutex_unlock(&bus->device_lock);
return rets;
}
-ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length)
+/**
+ * mei_cl_send - me device send (write)
+ *
+ * @cldev: me client device
+ * @buf: buffer to send
+ * @length: buffer length
+ *
+ * Return: written size in bytes or < 0 on error
+ */
+ssize_t mei_cl_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
{
- struct mei_cl *cl = device->cl;
+ struct mei_cl *cl = cldev->cl;
if (cl == NULL)
return -ENODEV;
@@ -412,9 +184,18 @@ ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length)
}
EXPORT_SYMBOL_GPL(mei_cl_send);
-ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length)
+/**
+ * mei_cl_recv - client receive (read)
+ *
+ * @cldev: me client device
+ * @buf: buffer to send
+ * @length: buffer length
+ *
+ * Return: read size in bytes of < 0 on error
+ */
+ssize_t mei_cl_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
{
- struct mei_cl *cl = device->cl;
+ struct mei_cl *cl = cldev->cl;
if (cl == NULL)
return -ENODEV;
@@ -423,151 +204,698 @@ ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length)
}
EXPORT_SYMBOL_GPL(mei_cl_recv);
+/**
+ * mei_bus_event_work - dispatch rx event for a bus device
+ * and schedule new work
+ *
+ * @work: work
+ */
static void mei_bus_event_work(struct work_struct *work)
{
- struct mei_cl_device *device;
+ struct mei_cl_device *cldev;
- device = container_of(work, struct mei_cl_device, event_work);
+ cldev = container_of(work, struct mei_cl_device, event_work);
- if (device->event_cb)
- device->event_cb(device, device->events, device->event_context);
+ if (cldev->event_cb)
+ cldev->event_cb(cldev, cldev->events, cldev->event_context);
- device->events = 0;
+ cldev->events = 0;
/* Prepare for the next read */
- mei_cl_read_start(device->cl, 0, NULL);
+ if (cldev->events_mask & BIT(MEI_CL_EVENT_RX))
+ mei_cl_read_start(cldev->cl, 0, NULL);
}
-int mei_cl_register_event_cb(struct mei_cl_device *device,
+/**
+ * mei_cl_bus_notify_event - schedule notify cb on bus client
+ *
+ * @cl: host client
+ */
+void mei_cl_bus_notify_event(struct mei_cl *cl)
+{
+ struct mei_cl_device *cldev = cl->cldev;
+
+ if (!cldev || !cldev->event_cb)
+ return;
+
+ if (!(cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)))
+ return;
+
+ if (!cl->notify_ev)
+ return;
+
+ set_bit(MEI_CL_EVENT_NOTIF, &cldev->events);
+
+ schedule_work(&cldev->event_work);
+
+ cl->notify_ev = false;
+}
+
+/**
+ * mei_cl_bus_rx_event - schedule rx evenet
+ *
+ * @cl: host client
+ */
+void mei_cl_bus_rx_event(struct mei_cl *cl)
+{
+ struct mei_cl_device *cldev = cl->cldev;
+
+ if (!cldev || !cldev->event_cb)
+ return;
+
+ if (!(cldev->events_mask & BIT(MEI_CL_EVENT_RX)))
+ return;
+
+ set_bit(MEI_CL_EVENT_RX, &cldev->events);
+
+ schedule_work(&cldev->event_work);
+}
+
+/**
+ * mei_cl_register_event_cb - register event callback
+ *
+ * @cldev: me client devices
+ * @event_cb: callback function
+ * @events_mask: requested events bitmask
+ * @context: driver context data
+ *
+ * Return: 0 on success
+ * -EALREADY if an callback is already registered
+ * <0 on other errors
+ */
+int mei_cl_register_event_cb(struct mei_cl_device *cldev,
+ unsigned long events_mask,
mei_cl_event_cb_t event_cb, void *context)
{
- if (device->event_cb)
+ int ret;
+
+ if (cldev->event_cb)
return -EALREADY;
- device->events = 0;
- device->event_cb = event_cb;
- device->event_context = context;
- INIT_WORK(&device->event_work, mei_bus_event_work);
+ cldev->events = 0;
+ cldev->events_mask = events_mask;
+ cldev->event_cb = event_cb;
+ cldev->event_context = context;
+ INIT_WORK(&cldev->event_work, mei_bus_event_work);
- mei_cl_read_start(device->cl, 0, NULL);
+ if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
+ ret = mei_cl_read_start(cldev->cl, 0, NULL);
+ if (ret && ret != -EBUSY)
+ return ret;
+ }
+
+ if (cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)) {
+ mutex_lock(&cldev->cl->dev->device_lock);
+ ret = mei_cl_notify_request(cldev->cl, NULL, event_cb ? 1 : 0);
+ mutex_unlock(&cldev->cl->dev->device_lock);
+ if (ret)
+ return ret;
+ }
return 0;
}
EXPORT_SYMBOL_GPL(mei_cl_register_event_cb);
-void *mei_cl_get_drvdata(const struct mei_cl_device *device)
+/**
+ * mei_cl_get_drvdata - driver data getter
+ *
+ * @cldev: mei client device
+ *
+ * Return: driver private data
+ */
+void *mei_cl_get_drvdata(const struct mei_cl_device *cldev)
{
- return dev_get_drvdata(&device->dev);
+ return dev_get_drvdata(&cldev->dev);
}
EXPORT_SYMBOL_GPL(mei_cl_get_drvdata);
-void mei_cl_set_drvdata(struct mei_cl_device *device, void *data)
+/**
+ * mei_cl_set_drvdata - driver data setter
+ *
+ * @cldev: mei client device
+ * @data: data to store
+ */
+void mei_cl_set_drvdata(struct mei_cl_device *cldev, void *data)
{
- dev_set_drvdata(&device->dev, data);
+ dev_set_drvdata(&cldev->dev, data);
}
EXPORT_SYMBOL_GPL(mei_cl_set_drvdata);
-int mei_cl_enable_device(struct mei_cl_device *device)
+/**
+ * mei_cl_enable_device - enable me client device
+ * create connection with me client
+ *
+ * @cldev: me client device
+ *
+ * Return: 0 on success and < 0 on error
+ */
+int mei_cl_enable_device(struct mei_cl_device *cldev)
{
- int err;
- struct mei_device *dev;
- struct mei_cl *cl = device->cl;
-
- if (cl == NULL)
- return -ENODEV;
-
- dev = cl->dev;
-
- mutex_lock(&dev->device_lock);
+ struct mei_device *bus = cldev->bus;
+ struct mei_cl *cl;
+ int ret;
+
+ cl = cldev->cl;
+
+ if (!cl) {
+ mutex_lock(&bus->device_lock);
+ cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY);
+ mutex_unlock(&bus->device_lock);
+ if (IS_ERR(cl))
+ return PTR_ERR(cl);
+ /* update pointers */
+ cldev->cl = cl;
+ cl->cldev = cldev;
+ }
+ mutex_lock(&bus->device_lock);
if (mei_cl_is_connected(cl)) {
- mutex_unlock(&dev->device_lock);
- dev_warn(dev->dev, "Already connected");
- return -EBUSY;
+ ret = 0;
+ goto out;
}
- err = mei_cl_connect(cl, device->me_cl, NULL);
- if (err < 0) {
- mutex_unlock(&dev->device_lock);
- dev_err(dev->dev, "Could not connect to the ME client");
-
- return err;
+ if (!mei_me_cl_is_active(cldev->me_cl)) {
+ dev_err(&cldev->dev, "me client is not active\n");
+ ret = -ENOTTY;
+ goto out;
}
- mutex_unlock(&dev->device_lock);
+ ret = mei_cl_connect(cl, cldev->me_cl, NULL);
+ if (ret < 0)
+ dev_err(&cldev->dev, "cannot connect\n");
- if (device->event_cb)
- mei_cl_read_start(device->cl, 0, NULL);
+out:
+ mutex_unlock(&bus->device_lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(mei_cl_enable_device);
-int mei_cl_disable_device(struct mei_cl_device *device)
+/**
+ * mei_cl_disable_device - disable me client device
+ * disconnect form the me client
+ *
+ * @cldev: me client device
+ *
+ * Return: 0 on success and < 0 on error
+ */
+int mei_cl_disable_device(struct mei_cl_device *cldev)
{
+ struct mei_device *bus;
+ struct mei_cl *cl;
int err;
- struct mei_device *dev;
- struct mei_cl *cl = device->cl;
- if (cl == NULL)
+ if (!cldev || !cldev->cl)
return -ENODEV;
- dev = cl->dev;
+ cl = cldev->cl;
+
+ bus = cldev->bus;
- device->event_cb = NULL;
+ cldev->event_cb = NULL;
- mutex_lock(&dev->device_lock);
+ mutex_lock(&bus->device_lock);
if (!mei_cl_is_connected(cl)) {
- dev_err(dev->dev, "Already disconnected");
+ dev_err(bus->dev, "Already disconnected");
err = 0;
goto out;
}
err = mei_cl_disconnect(cl);
- if (err < 0) {
- dev_err(dev->dev, "Could not disconnect from the ME client");
- goto out;
- }
+ if (err < 0)
+ dev_err(bus->dev, "Could not disconnect from the ME client");
+out:
/* Flush queues and remove any pending read */
mei_cl_flush_queues(cl, NULL);
+ mei_cl_unlink(cl);
-out:
- mutex_unlock(&dev->device_lock);
- return err;
+ kfree(cl);
+ cldev->cl = NULL;
+ mutex_unlock(&bus->device_lock);
+ return err;
}
EXPORT_SYMBOL_GPL(mei_cl_disable_device);
-void mei_cl_bus_rx_event(struct mei_cl *cl)
+/**
+ * mei_cl_device_find - find matching entry in the driver id table
+ *
+ * @cldev: me client device
+ * @cldrv: me client driver
+ *
+ * Return: id on success; NULL if no id is matching
+ */
+static const
+struct mei_cl_device_id *mei_cl_device_find(struct mei_cl_device *cldev,
+ struct mei_cl_driver *cldrv)
+{
+ const struct mei_cl_device_id *id;
+ const uuid_le *uuid;
+
+ uuid = mei_me_cl_uuid(cldev->me_cl);
+
+ id = cldrv->id_table;
+ while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
+ if (!uuid_le_cmp(*uuid, id->uuid)) {
+
+ if (!cldev->name[0])
+ return id;
+
+ if (!strncmp(cldev->name, id->name, sizeof(id->name)))
+ return id;
+ }
+
+ id++;
+ }
+
+ return NULL;
+}
+
+/**
+ * mei_cl_device_match - device match function
+ *
+ * @dev: device
+ * @drv: driver
+ *
+ * Return: 1 if matching device was found 0 otherwise
+ */
+static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ struct mei_cl_driver *cldrv = to_mei_cl_driver(drv);
+ const struct mei_cl_device_id *found_id;
+
+ if (!cldev)
+ return 0;
+
+ if (!cldev->do_match)
+ return 0;
+
+ if (!cldrv || !cldrv->id_table)
+ return 0;
+
+ found_id = mei_cl_device_find(cldev, cldrv);
+ if (found_id)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * mei_cl_device_probe - bus probe function
+ *
+ * @dev: device
+ *
+ * Return: 0 on success; < 0 otherwise
+ */
+static int mei_cl_device_probe(struct device *dev)
+{
+ struct mei_cl_device *cldev;
+ struct mei_cl_driver *cldrv;
+ const struct mei_cl_device_id *id;
+
+ cldev = to_mei_cl_device(dev);
+ cldrv = to_mei_cl_driver(dev->driver);
+
+ if (!cldev)
+ return 0;
+
+ if (!cldrv || !cldrv->probe)
+ return -ENODEV;
+
+ id = mei_cl_device_find(cldev, cldrv);
+ if (!id)
+ return -ENODEV;
+
+ __module_get(THIS_MODULE);
+
+ return cldrv->probe(cldev, id);
+}
+
+/**
+ * mei_cl_device_remove - remove device from the bus
+ *
+ * @dev: device
+ *
+ * Return: 0 on success; < 0 otherwise
+ */
+static int mei_cl_device_remove(struct device *dev)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ struct mei_cl_driver *cldrv;
+ int ret = 0;
+
+ if (!cldev || !dev->driver)
+ return 0;
+
+ if (cldev->event_cb) {
+ cldev->event_cb = NULL;
+ cancel_work_sync(&cldev->event_work);
+ }
+
+ cldrv = to_mei_cl_driver(dev->driver);
+ if (cldrv->remove)
+ ret = cldrv->remove(cldev);
+
+ module_put(THIS_MODULE);
+ dev->driver = NULL;
+ return ret;
+
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ size_t len;
+
+ len = snprintf(buf, PAGE_SIZE, "%s", cldev->name);
+
+ return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+}
+static DEVICE_ATTR_RO(name);
+
+static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
+ size_t len;
+
+ len = snprintf(buf, PAGE_SIZE, "%pUl", uuid);
+
+ return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+}
+static DEVICE_ATTR_RO(uuid);
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
+ size_t len;
+
+ len = snprintf(buf, PAGE_SIZE, "mei:%s:" MEI_CL_UUID_FMT ":",
+ cldev->name, MEI_CL_UUID_ARGS(uuid->b));
+
+ return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *mei_cl_dev_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_uuid.attr,
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mei_cl_dev);
+
+/**
+ * mei_cl_device_uevent - me client bus uevent handler
+ *
+ * @dev: device
+ * @env: uevent kobject
+ *
+ * Return: 0 on success -ENOMEM on when add_uevent_var fails
+ */
+static int mei_cl_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
+
+ if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "MODALIAS=mei:%s:" MEI_CL_UUID_FMT ":",
+ cldev->name, MEI_CL_UUID_ARGS(uuid->b)))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static struct bus_type mei_cl_bus_type = {
+ .name = "mei",
+ .dev_groups = mei_cl_dev_groups,
+ .match = mei_cl_device_match,
+ .probe = mei_cl_device_probe,
+ .remove = mei_cl_device_remove,
+ .uevent = mei_cl_device_uevent,
+};
+
+static struct mei_device *mei_dev_bus_get(struct mei_device *bus)
+{
+ if (bus)
+ get_device(bus->dev);
+
+ return bus;
+}
+
+static void mei_dev_bus_put(struct mei_device *bus)
{
- struct mei_cl_device *device = cl->device;
+ if (bus)
+ put_device(bus->dev);
+}
- if (!device || !device->event_cb)
+static void mei_cl_dev_release(struct device *dev)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+
+ if (!cldev)
return;
- set_bit(MEI_CL_EVENT_RX, &device->events);
+ mei_me_cl_put(cldev->me_cl);
+ mei_dev_bus_put(cldev->bus);
+ kfree(cldev);
+}
+
+static struct device_type mei_cl_device_type = {
+ .release = mei_cl_dev_release,
+};
+
+/**
+ * mei_cl_dev_alloc - initialize and allocate mei client device
+ *
+ * @bus: mei device
+ * @me_cl: me client
+ *
+ * Return: allocated device structur or NULL on allocation failure
+ */
+static struct mei_cl_device *mei_cl_dev_alloc(struct mei_device *bus,
+ struct mei_me_client *me_cl)
+{
+ struct mei_cl_device *cldev;
+
+ cldev = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL);
+ if (!cldev)
+ return NULL;
+
+ device_initialize(&cldev->dev);
+ cldev->dev.parent = bus->dev;
+ cldev->dev.bus = &mei_cl_bus_type;
+ cldev->dev.type = &mei_cl_device_type;
+ cldev->bus = mei_dev_bus_get(bus);
+ cldev->me_cl = mei_me_cl_get(me_cl);
+ cldev->is_added = 0;
+ INIT_LIST_HEAD(&cldev->bus_list);
+
+ return cldev;
+}
+
+/**
+ * mei_cl_dev_setup - setup me client device
+ * run fix up routines and set the device name
+ *
+ * @bus: mei device
+ * @cldev: me client device
+ *
+ * Return: true if the device is eligible for enumeration
+ */
+static bool mei_cl_dev_setup(struct mei_device *bus,
+ struct mei_cl_device *cldev)
+{
+ cldev->do_match = 1;
+ mei_cl_dev_fixup(cldev);
+
+ if (cldev->do_match)
+ dev_set_name(&cldev->dev, "mei:%s:%pUl",
+ cldev->name, mei_me_cl_uuid(cldev->me_cl));
- schedule_work(&device->event_work);
+ return cldev->do_match == 1;
}
-void mei_cl_bus_remove_devices(struct mei_device *dev)
+/**
+ * mei_cl_bus_dev_add - add me client devices
+ *
+ * @cldev: me client device
+ *
+ * Return: 0 on success; < 0 on failre
+ */
+static int mei_cl_bus_dev_add(struct mei_cl_device *cldev)
{
- struct mei_cl *cl, *next;
+ int ret;
+
+ dev_dbg(cldev->bus->dev, "adding %pUL\n", mei_me_cl_uuid(cldev->me_cl));
+ ret = device_add(&cldev->dev);
+ if (!ret)
+ cldev->is_added = 1;
- mutex_lock(&dev->device_lock);
- list_for_each_entry_safe(cl, next, &dev->device_list, device_link) {
- if (cl->device)
- mei_cl_remove_device(cl->device);
+ return ret;
+}
- list_del(&cl->device_link);
- mei_cl_unlink(cl);
- kfree(cl);
+/**
+ * mei_cl_bus_dev_stop - stop the driver
+ *
+ * @cldev: me client device
+ */
+static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev)
+{
+ if (cldev->is_added)
+ device_release_driver(&cldev->dev);
+}
+
+/**
+ * mei_cl_bus_dev_destroy - destroy me client devices object
+ *
+ * @cldev: me client device
+ */
+static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev)
+{
+ if (!cldev->is_added)
+ return;
+
+ device_del(&cldev->dev);
+
+ mutex_lock(&cldev->bus->cl_bus_lock);
+ list_del_init(&cldev->bus_list);
+ mutex_unlock(&cldev->bus->cl_bus_lock);
+
+ cldev->is_added = 0;
+ put_device(&cldev->dev);
+}
+
+/**
+ * mei_cl_bus_remove_device - remove a devices form the bus
+ *
+ * @cldev: me client device
+ */
+static void mei_cl_bus_remove_device(struct mei_cl_device *cldev)
+{
+ mei_cl_bus_dev_stop(cldev);
+ mei_cl_bus_dev_destroy(cldev);
+}
+
+/**
+ * mei_cl_bus_remove_devices - remove all devices form the bus
+ *
+ * @bus: mei device
+ */
+void mei_cl_bus_remove_devices(struct mei_device *bus)
+{
+ struct mei_cl_device *cldev, *next;
+
+ list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list)
+ mei_cl_bus_remove_device(cldev);
+}
+
+
+/**
+ * mei_cl_dev_init - allocate and initializes an mei client devices
+ * based on me client
+ *
+ * @bus: mei device
+ * @me_cl: me client
+ */
+static void mei_cl_dev_init(struct mei_device *bus, struct mei_me_client *me_cl)
+{
+ struct mei_cl_device *cldev;
+
+ dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
+
+ if (me_cl->bus_added)
+ return;
+
+ cldev = mei_cl_dev_alloc(bus, me_cl);
+ if (!cldev)
+ return;
+
+ mutex_lock(&cldev->bus->cl_bus_lock);
+ me_cl->bus_added = true;
+ list_add_tail(&cldev->bus_list, &bus->device_list);
+ mutex_unlock(&cldev->bus->cl_bus_lock);
+
+}
+
+/**
+ * mei_cl_bus_rescan - scan me clients list and add create
+ * devices for eligible clients
+ *
+ * @bus: mei device
+ */
+void mei_cl_bus_rescan(struct mei_device *bus)
+{
+ struct mei_cl_device *cldev, *n;
+ struct mei_me_client *me_cl;
+
+ down_read(&bus->me_clients_rwsem);
+ list_for_each_entry(me_cl, &bus->me_clients, list)
+ mei_cl_dev_init(bus, me_cl);
+ up_read(&bus->me_clients_rwsem);
+
+ mutex_lock(&bus->cl_bus_lock);
+ list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) {
+
+ if (!mei_me_cl_is_active(cldev->me_cl)) {
+ mei_cl_bus_remove_device(cldev);
+ continue;
+ }
+
+ if (cldev->is_added)
+ continue;
+
+ if (mei_cl_dev_setup(bus, cldev))
+ mei_cl_bus_dev_add(cldev);
+ else {
+ list_del_init(&cldev->bus_list);
+ put_device(&cldev->dev);
+ }
}
- mutex_unlock(&dev->device_lock);
+ mutex_unlock(&bus->cl_bus_lock);
+
+ dev_dbg(bus->dev, "rescan end");
}
+int __mei_cl_driver_register(struct mei_cl_driver *cldrv, struct module *owner)
+{
+ int err;
+
+ cldrv->driver.name = cldrv->name;
+ cldrv->driver.owner = owner;
+ cldrv->driver.bus = &mei_cl_bus_type;
+
+ err = driver_register(&cldrv->driver);
+ if (err)
+ return err;
+
+ pr_debug("mei: driver [%s] registered\n", cldrv->driver.name);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__mei_cl_driver_register);
+
+void mei_cl_driver_unregister(struct mei_cl_driver *cldrv)
+{
+ driver_unregister(&cldrv->driver);
+
+ pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name);
+}
+EXPORT_SYMBOL_GPL(mei_cl_driver_unregister);
+
+
int __init mei_cl_bus_init(void)
{
return bus_register(&mei_cl_bus_type);
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 6decbe136ea7..a6c87c713193 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -555,10 +555,10 @@ void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
init_waitqueue_head(&cl->wait);
init_waitqueue_head(&cl->rx_wait);
init_waitqueue_head(&cl->tx_wait);
+ init_waitqueue_head(&cl->ev_wait);
INIT_LIST_HEAD(&cl->rd_completed);
INIT_LIST_HEAD(&cl->rd_pending);
INIT_LIST_HEAD(&cl->link);
- INIT_LIST_HEAD(&cl->device_link);
cl->writing_state = MEI_IDLE;
cl->state = MEI_FILE_INITIALIZING;
cl->dev = dev;
@@ -690,16 +690,12 @@ void mei_host_client_init(struct work_struct *work)
mei_wd_host_init(dev, me_cl);
mei_me_cl_put(me_cl);
- me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
- if (me_cl)
- mei_nfc_host_init(dev, me_cl);
- mei_me_cl_put(me_cl);
-
-
dev->dev_state = MEI_DEV_ENABLED;
dev->reset_count = 0;
mutex_unlock(&dev->device_lock);
+ mei_cl_bus_rescan(dev);
+
pm_runtime_mark_last_busy(dev->dev);
dev_dbg(dev->dev, "rpm: autosuspend\n");
pm_runtime_autosuspend(dev->dev);
@@ -841,45 +837,22 @@ int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
return ret;
}
-
-
/**
- * mei_cl_disconnect - disconnect host client from the me one
+ * __mei_cl_disconnect - disconnect host client from the me one
+ * internal function runtime pm has to be already acquired
*
* @cl: host client
*
- * Locking: called under "dev->device_lock" lock
- *
* Return: 0 on success, <0 on failure.
*/
-int mei_cl_disconnect(struct mei_cl *cl)
+static int __mei_cl_disconnect(struct mei_cl *cl)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
int rets;
- if (WARN_ON(!cl || !cl->dev))
- return -ENODEV;
-
dev = cl->dev;
- cl_dbg(dev, cl, "disconnecting");
-
- if (!mei_cl_is_connected(cl))
- return 0;
-
- if (mei_cl_is_fixed_address(cl)) {
- mei_cl_set_disconnected(cl);
- return 0;
- }
-
- rets = pm_runtime_get(dev->dev);
- if (rets < 0 && rets != -EINPROGRESS) {
- pm_runtime_put_noidle(dev->dev);
- cl_err(dev, cl, "rpm: get failed %d\n", rets);
- return rets;
- }
-
cl->state = MEI_FILE_DISCONNECTING;
cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT, NULL);
@@ -915,11 +888,52 @@ out:
if (!rets)
cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
+ mei_io_cb_free(cb);
+ return rets;
+}
+
+/**
+ * mei_cl_disconnect - disconnect host client from the me one
+ *
+ * @cl: host client
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+int mei_cl_disconnect(struct mei_cl *cl)
+{
+ struct mei_device *dev;
+ int rets;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ cl_dbg(dev, cl, "disconnecting");
+
+ if (!mei_cl_is_connected(cl))
+ return 0;
+
+ if (mei_cl_is_fixed_address(cl)) {
+ mei_cl_set_disconnected(cl);
+ return 0;
+ }
+
+ rets = pm_runtime_get(dev->dev);
+ if (rets < 0 && rets != -EINPROGRESS) {
+ pm_runtime_put_noidle(dev->dev);
+ cl_err(dev, cl, "rpm: get failed %d\n", rets);
+ return rets;
+ }
+
+ rets = __mei_cl_disconnect(cl);
+
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
- mei_io_cb_free(cb);
return rets;
}
@@ -1064,11 +1078,23 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
(cl->state == MEI_FILE_CONNECTED ||
+ cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
cl->state == MEI_FILE_DISCONNECT_REPLY),
mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
mutex_lock(&dev->device_lock);
if (!mei_cl_is_connected(cl)) {
+ if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
+ mei_io_list_flush(&dev->ctrl_rd_list, cl);
+ mei_io_list_flush(&dev->ctrl_wr_list, cl);
+ /* ignore disconnect return valuue;
+ * in case of failure reset will be invoked
+ */
+ __mei_cl_disconnect(cl);
+ rets = -EFAULT;
+ goto out;
+ }
+
/* timeout or something went really wrong */
if (!cl->status)
cl->status = -EFAULT;
@@ -1181,6 +1207,221 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
}
/**
+ * mei_cl_notify_fop2req - convert fop to proper request
+ *
+ * @fop: client notification start response command
+ *
+ * Return: MEI_HBM_NOTIFICATION_START/STOP
+ */
+u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
+{
+ if (fop == MEI_FOP_NOTIFY_START)
+ return MEI_HBM_NOTIFICATION_START;
+ else
+ return MEI_HBM_NOTIFICATION_STOP;
+}
+
+/**
+ * mei_cl_notify_req2fop - convert notification request top file operation type
+ *
+ * @req: hbm notification request type
+ *
+ * Return: MEI_FOP_NOTIFY_START/STOP
+ */
+enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
+{
+ if (req == MEI_HBM_NOTIFICATION_START)
+ return MEI_FOP_NOTIFY_START;
+ else
+ return MEI_FOP_NOTIFY_STOP;
+}
+
+/**
+ * mei_cl_irq_notify - send notification request in irq_thread context
+ *
+ * @cl: client
+ * @cb: callback block.
+ * @cmpl_list: complete list.
+ *
+ * Return: 0 on such and error otherwise.
+ */
+int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list)
+{
+ struct mei_device *dev = cl->dev;
+ u32 msg_slots;
+ int slots;
+ int ret;
+ bool request;
+
+ msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
+ slots = mei_hbuf_empty_slots(dev);
+
+ if (slots < msg_slots)
+ return -EMSGSIZE;
+
+ request = mei_cl_notify_fop2req(cb->fop_type);
+ ret = mei_hbm_cl_notify_req(dev, cl, request);
+ if (ret) {
+ cl->status = ret;
+ list_move_tail(&cb->list, &cmpl_list->list);
+ return ret;
+ }
+
+ list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
+ return 0;
+}
+
+/**
+ * mei_cl_notify_request - send notification stop/start request
+ *
+ * @cl: host client
+ * @file: associate request with file
+ * @request: 1 for start or 0 for stop
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * Return: 0 on such and error otherwise.
+ */
+int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request)
+{
+ struct mei_device *dev;
+ struct mei_cl_cb *cb;
+ enum mei_cb_file_ops fop_type;
+ int rets;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (!dev->hbm_f_ev_supported) {
+ cl_dbg(dev, cl, "notifications not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ rets = pm_runtime_get(dev->dev);
+ if (rets < 0 && rets != -EINPROGRESS) {
+ pm_runtime_put_noidle(dev->dev);
+ cl_err(dev, cl, "rpm: get failed %d\n", rets);
+ return rets;
+ }
+
+ fop_type = mei_cl_notify_req2fop(request);
+ cb = mei_io_cb_init(cl, fop_type, file);
+ if (!cb) {
+ rets = -ENOMEM;
+ goto out;
+ }
+
+ if (mei_hbuf_acquire(dev)) {
+ if (mei_hbm_cl_notify_req(dev, cl, request)) {
+ rets = -ENODEV;
+ goto out;
+ }
+ list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
+ } else {
+ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+ }
+
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(cl->wait, cl->notify_en == request,
+ mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ mutex_lock(&dev->device_lock);
+
+ if (cl->notify_en != request) {
+ mei_io_list_flush(&dev->ctrl_rd_list, cl);
+ mei_io_list_flush(&dev->ctrl_wr_list, cl);
+ if (!cl->status)
+ cl->status = -EFAULT;
+ }
+
+ rets = cl->status;
+
+out:
+ cl_dbg(dev, cl, "rpm: autosuspend\n");
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ mei_io_cb_free(cb);
+ return rets;
+}
+
+/**
+ * mei_cl_notify - raise notification
+ *
+ * @cl: host client
+ *
+ * Locking: called under "dev->device_lock" lock
+ */
+void mei_cl_notify(struct mei_cl *cl)
+{
+ struct mei_device *dev;
+
+ if (!cl || !cl->dev)
+ return;
+
+ dev = cl->dev;
+
+ if (!cl->notify_en)
+ return;
+
+ cl_dbg(dev, cl, "notify event");
+ cl->notify_ev = true;
+ wake_up_interruptible_all(&cl->ev_wait);
+
+ if (cl->ev_async)
+ kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
+
+ mei_cl_bus_notify_event(cl);
+}
+
+/**
+ * mei_cl_notify_get - get or wait for notification event
+ *
+ * @cl: host client
+ * @block: this request is blocking
+ * @notify_ev: true if notification event was received
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * Return: 0 on such and error otherwise.
+ */
+int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
+{
+ struct mei_device *dev;
+ int rets;
+
+ *notify_ev = false;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (!mei_cl_is_connected(cl))
+ return -ENODEV;
+
+ if (cl->notify_ev)
+ goto out;
+
+ if (!block)
+ return -EAGAIN;
+
+ mutex_unlock(&dev->device_lock);
+ rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
+ mutex_lock(&dev->device_lock);
+
+ if (rets < 0)
+ return rets;
+
+out:
+ *notify_ev = cl->notify_ev;
+ cl->notify_ev = false;
+ return 0;
+}
+
+/**
* mei_cl_read_start - the start read client message function.
*
* @cl: host client
@@ -1356,6 +1597,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
struct mei_device *dev;
struct mei_msg_data *buf;
struct mei_msg_hdr mei_hdr;
+ int size;
int rets;
@@ -1367,10 +1609,10 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
dev = cl->dev;
-
buf = &cb->buf;
+ size = buf->size;
- cl_dbg(dev, cl, "size=%d\n", buf->size);
+ cl_dbg(dev, cl, "size=%d\n", size);
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
@@ -1394,21 +1636,21 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
if (rets == 0) {
cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
- rets = buf->size;
+ rets = size;
goto out;
}
if (!mei_hbuf_acquire(dev)) {
cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
- rets = buf->size;
+ rets = size;
goto out;
}
/* Check for a maximum length */
- if (buf->size > mei_hbuf_max_len(dev)) {
+ if (size > mei_hbuf_max_len(dev)) {
mei_hdr.length = mei_hbuf_max_len(dev);
mei_hdr.msg_complete = 0;
} else {
- mei_hdr.length = buf->size;
+ mei_hdr.length = size;
mei_hdr.msg_complete = 1;
}
@@ -1430,6 +1672,7 @@ out:
else
list_add_tail(&cb->list, &dev->write_list.list);
+ cb = NULL;
if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
mutex_unlock(&dev->device_lock);
@@ -1444,7 +1687,7 @@ out:
}
}
- rets = buf->size;
+ rets = size;
err:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
@@ -1486,6 +1729,8 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
case MEI_FOP_CONNECT:
case MEI_FOP_DISCONNECT:
+ case MEI_FOP_NOTIFY_STOP:
+ case MEI_FOP_NOTIFY_START:
if (waitqueue_active(&cl->wait))
wake_up(&cl->wait);
@@ -1528,6 +1773,12 @@ void mei_cl_all_wakeup(struct mei_device *dev)
cl_dbg(dev, cl, "Waking up writing client!\n");
wake_up_interruptible(&cl->tx_wait);
}
+
+ /* synchronized under device mutex */
+ if (waitqueue_active(&cl->ev_wait)) {
+ cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
+ wake_up_interruptible(&cl->ev_wait);
+ }
}
}
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 8d7f057f1045..1c7cad07d731 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -219,6 +219,14 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
void mei_host_client_init(struct work_struct *work);
+u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop);
+enum mei_cb_file_ops mei_cl_notify_req2fop(u8 request);
+int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request);
+int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list);
+int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev);
+void mei_cl_notify(struct mei_cl *cl);
+
void mei_cl_all_disconnect(struct mei_device *dev);
void mei_cl_all_wakeup(struct mei_device *dev);
void mei_cl_all_write_clear(struct mei_device *dev);
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index eb868341247f..4b469cf9e60f 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -154,6 +154,12 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
pos += scnprintf(buf + pos, bufsz - pos, "hbm features:\n");
pos += scnprintf(buf + pos, bufsz - pos, "\tPG: %01d\n",
dev->hbm_f_pg_supported);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tDC: %01d\n",
+ dev->hbm_f_dc_supported);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tDOT: %01d\n",
+ dev->hbm_f_dot_supported);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tEV: %01d\n",
+ dev->hbm_f_ev_supported);
}
pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n",
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index a4f283165a33..8eec887c8f70 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -52,6 +52,7 @@ static const char *mei_cl_conn_status_str(enum mei_cl_connect_status status)
MEI_CL_CS(ALREADY_STARTED);
MEI_CL_CS(OUT_OF_RESOURCES);
MEI_CL_CS(MESSAGE_SMALL);
+ MEI_CL_CS(NOT_ALLOWED);
default: return "unknown";
}
#undef MEI_CL_CCS
@@ -89,6 +90,7 @@ static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status)
case MEI_CL_CONN_ALREADY_STARTED: return -EBUSY;
case MEI_CL_CONN_OUT_OF_RESOURCES: return -EBUSY;
case MEI_CL_CONN_MESSAGE_SMALL: return -EINVAL;
+ case MEI_CL_CONN_NOT_ALLOWED: return -EBUSY;
default: return -EINVAL;
}
}
@@ -299,6 +301,7 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev)
enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
memset(enum_req, 0, len);
enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
+ enum_req->allow_add = dev->hbm_f_dc_supported;
ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
if (ret) {
@@ -344,6 +347,180 @@ static int mei_hbm_me_cl_add(struct mei_device *dev,
}
/**
+ * mei_hbm_add_cl_resp - send response to fw on client add request
+ *
+ * @dev: the device structure
+ * @addr: me address
+ * @status: response status
+ *
+ * Return: 0 on success and < 0 on failure
+ */
+static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status)
+{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ struct hbm_add_client_response *resp;
+ const size_t len = sizeof(struct hbm_add_client_response);
+ int ret;
+
+ dev_dbg(dev->dev, "adding client response\n");
+
+ resp = (struct hbm_add_client_response *)dev->wr_msg.data;
+
+ mei_hbm_hdr(mei_hdr, len);
+ memset(resp, 0, sizeof(struct hbm_add_client_response));
+
+ resp->hbm_cmd = MEI_HBM_ADD_CLIENT_RES_CMD;
+ resp->me_addr = addr;
+ resp->status = status;
+
+ ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ if (ret)
+ dev_err(dev->dev, "add client response write failed: ret = %d\n",
+ ret);
+ return ret;
+}
+
+/**
+ * mei_hbm_fw_add_cl_req - request from the fw to add a client
+ *
+ * @dev: the device structure
+ * @req: add client request
+ *
+ * Return: 0 on success and < 0 on failure
+ */
+static int mei_hbm_fw_add_cl_req(struct mei_device *dev,
+ struct hbm_add_client_request *req)
+{
+ int ret;
+ u8 status = MEI_HBMS_SUCCESS;
+
+ BUILD_BUG_ON(sizeof(struct hbm_add_client_request) !=
+ sizeof(struct hbm_props_response));
+
+ ret = mei_hbm_me_cl_add(dev, (struct hbm_props_response *)req);
+ if (ret)
+ status = !MEI_HBMS_SUCCESS;
+
+ return mei_hbm_add_cl_resp(dev, req->me_addr, status);
+}
+
+/**
+ * mei_hbm_cl_notify_req - send notification request
+ *
+ * @dev: the device structure
+ * @cl: a client to disconnect from
+ * @start: true for start false for stop
+ *
+ * Return: 0 on success and -EIO on write failure
+ */
+int mei_hbm_cl_notify_req(struct mei_device *dev,
+ struct mei_cl *cl, u8 start)
+{
+
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ struct hbm_notification_request *req;
+ const size_t len = sizeof(struct hbm_notification_request);
+ int ret;
+
+ mei_hbm_hdr(mei_hdr, len);
+ mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, dev->wr_msg.data, len);
+
+ req = (struct hbm_notification_request *)dev->wr_msg.data;
+ req->start = start;
+
+ ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ if (ret)
+ dev_err(dev->dev, "notify request failed: ret = %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * notify_res_to_fop - convert notification response to the proper
+ * notification FOP
+ *
+ * @cmd: client notification start response command
+ *
+ * Return: MEI_FOP_NOTIFY_START or MEI_FOP_NOTIFY_STOP;
+ */
+static inline enum mei_cb_file_ops notify_res_to_fop(struct mei_hbm_cl_cmd *cmd)
+{
+ struct hbm_notification_response *rs =
+ (struct hbm_notification_response *)cmd;
+
+ return mei_cl_notify_req2fop(rs->start);
+}
+
+/**
+ * mei_hbm_cl_notify_start_res - update the client state according
+ * notify start response
+ *
+ * @dev: the device structure
+ * @cl: mei host client
+ * @cmd: client notification start response command
+ */
+static void mei_hbm_cl_notify_start_res(struct mei_device *dev,
+ struct mei_cl *cl,
+ struct mei_hbm_cl_cmd *cmd)
+{
+ struct hbm_notification_response *rs =
+ (struct hbm_notification_response *)cmd;
+
+ cl_dbg(dev, cl, "hbm: notify start response status=%d\n", rs->status);
+
+ if (rs->status == MEI_HBMS_SUCCESS ||
+ rs->status == MEI_HBMS_ALREADY_STARTED) {
+ cl->notify_en = true;
+ cl->status = 0;
+ } else {
+ cl->status = -EINVAL;
+ }
+}
+
+/**
+ * mei_hbm_cl_notify_stop_res - update the client state according
+ * notify stop response
+ *
+ * @dev: the device structure
+ * @cl: mei host client
+ * @cmd: client notification stop response command
+ */
+static void mei_hbm_cl_notify_stop_res(struct mei_device *dev,
+ struct mei_cl *cl,
+ struct mei_hbm_cl_cmd *cmd)
+{
+ struct hbm_notification_response *rs =
+ (struct hbm_notification_response *)cmd;
+
+ cl_dbg(dev, cl, "hbm: notify stop response status=%d\n", rs->status);
+
+ if (rs->status == MEI_HBMS_SUCCESS ||
+ rs->status == MEI_HBMS_NOT_STARTED) {
+ cl->notify_en = false;
+ cl->status = 0;
+ } else {
+ /* TODO: spec is not clear yet about other possible issues */
+ cl->status = -EINVAL;
+ }
+}
+
+/**
+ * mei_hbm_cl_notify - signal notification event
+ *
+ * @dev: the device structure
+ * @cmd: notification client message
+ */
+static void mei_hbm_cl_notify(struct mei_device *dev,
+ struct mei_hbm_cl_cmd *cmd)
+{
+ struct mei_cl *cl;
+
+ cl = mei_hbm_cl_find_by_cmd(dev, cmd);
+ if (cl)
+ mei_cl_notify(cl);
+}
+
+/**
* mei_hbm_prop_req - request property for a single client
*
* @dev: the device structure
@@ -610,8 +787,11 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev, struct mei_cl *cl,
if (rs->status == MEI_CL_CONN_SUCCESS)
cl->state = MEI_FILE_CONNECTED;
- else
+ else {
cl->state = MEI_FILE_DISCONNECT_REPLY;
+ if (rs->status == MEI_CL_CONN_NOT_FOUND)
+ mei_me_cl_del(dev, cl->me_cl);
+ }
cl->status = mei_cl_conn_status_to_errno(rs->status);
}
@@ -654,6 +834,12 @@ static void mei_hbm_cl_res(struct mei_device *dev,
case MEI_FOP_DISCONNECT:
mei_hbm_cl_disconnect_res(dev, cl, rs);
break;
+ case MEI_FOP_NOTIFY_START:
+ mei_hbm_cl_notify_start_res(dev, cl, rs);
+ break;
+ case MEI_FOP_NOTIFY_STOP:
+ mei_hbm_cl_notify_stop_res(dev, cl, rs);
+ break;
default:
return;
}
@@ -694,6 +880,79 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
}
/**
+ * mei_hbm_pg_enter_res - PG enter response received
+ *
+ * @dev: the device structure.
+ *
+ * Return: 0 on success, -EPROTO on state mismatch
+ */
+static int mei_hbm_pg_enter_res(struct mei_device *dev)
+{
+ if (mei_pg_state(dev) != MEI_PG_OFF ||
+ dev->pg_event != MEI_PG_EVENT_WAIT) {
+ dev_err(dev->dev, "hbm: pg entry response: state mismatch [%s, %d]\n",
+ mei_pg_state_str(mei_pg_state(dev)), dev->pg_event);
+ return -EPROTO;
+ }
+
+ dev->pg_event = MEI_PG_EVENT_RECEIVED;
+ wake_up(&dev->wait_pg);
+
+ return 0;
+}
+
+/**
+ * mei_hbm_pg_resume - process with PG resume
+ *
+ * @dev: the device structure.
+ */
+void mei_hbm_pg_resume(struct mei_device *dev)
+{
+ pm_request_resume(dev->dev);
+}
+EXPORT_SYMBOL_GPL(mei_hbm_pg_resume);
+
+/**
+ * mei_hbm_pg_exit_res - PG exit response received
+ *
+ * @dev: the device structure.
+ *
+ * Return: 0 on success, -EPROTO on state mismatch
+ */
+static int mei_hbm_pg_exit_res(struct mei_device *dev)
+{
+ if (mei_pg_state(dev) != MEI_PG_ON ||
+ (dev->pg_event != MEI_PG_EVENT_WAIT &&
+ dev->pg_event != MEI_PG_EVENT_IDLE)) {
+ dev_err(dev->dev, "hbm: pg exit response: state mismatch [%s, %d]\n",
+ mei_pg_state_str(mei_pg_state(dev)), dev->pg_event);
+ return -EPROTO;
+ }
+
+ switch (dev->pg_event) {
+ case MEI_PG_EVENT_WAIT:
+ dev->pg_event = MEI_PG_EVENT_RECEIVED;
+ wake_up(&dev->wait_pg);
+ break;
+ case MEI_PG_EVENT_IDLE:
+ /*
+ * If the driver is not waiting on this then
+ * this is HW initiated exit from PG.
+ * Start runtime pm resume sequence to exit from PG.
+ */
+ dev->pg_event = MEI_PG_EVENT_RECEIVED;
+ mei_hbm_pg_resume(dev);
+ break;
+ default:
+ WARN(1, "hbm: pg exit response: unexpected pg event = %d\n",
+ dev->pg_event);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+/**
* mei_hbm_config_features - check what hbm features and commands
* are supported by the fw
*
@@ -709,6 +968,17 @@ static void mei_hbm_config_features(struct mei_device *dev)
if (dev->version.major_version == HBM_MAJOR_VERSION_PGI &&
dev->version.minor_version >= HBM_MINOR_VERSION_PGI)
dev->hbm_f_pg_supported = 1;
+
+ if (dev->version.major_version >= HBM_MAJOR_VERSION_DC)
+ dev->hbm_f_dc_supported = 1;
+
+ /* disconnect on connect timeout instead of link reset */
+ if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT)
+ dev->hbm_f_dot_supported = 1;
+
+ /* Notification Event Support */
+ if (dev->version.major_version >= HBM_MAJOR_VERSION_EV)
+ dev->hbm_f_ev_supported = 1;
}
/**
@@ -740,6 +1010,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
struct hbm_host_version_response *version_res;
struct hbm_props_response *props_res;
struct hbm_host_enum_response *enum_res;
+ struct hbm_add_client_request *add_cl_req;
+ int ret;
struct mei_hbm_cl_cmd *cl_cmd;
struct hbm_client_connect_request *disconnect_req;
@@ -828,24 +1100,17 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
break;
case MEI_PG_ISOLATION_ENTRY_RES_CMD:
- dev_dbg(dev->dev, "power gate isolation entry response received\n");
- dev->pg_event = MEI_PG_EVENT_RECEIVED;
- if (waitqueue_active(&dev->wait_pg))
- wake_up(&dev->wait_pg);
+ dev_dbg(dev->dev, "hbm: power gate isolation entry response received\n");
+ ret = mei_hbm_pg_enter_res(dev);
+ if (ret)
+ return ret;
break;
case MEI_PG_ISOLATION_EXIT_REQ_CMD:
- dev_dbg(dev->dev, "power gate isolation exit request received\n");
- dev->pg_event = MEI_PG_EVENT_RECEIVED;
- if (waitqueue_active(&dev->wait_pg))
- wake_up(&dev->wait_pg);
- else
- /*
- * If the driver is not waiting on this then
- * this is HW initiated exit from PG.
- * Start runtime pm resume sequence to exit from PG.
- */
- pm_request_resume(dev->dev);
+ dev_dbg(dev->dev, "hbm: power gate isolation exit request received\n");
+ ret = mei_hbm_pg_exit_res(dev);
+ if (ret)
+ return ret;
break;
case HOST_CLIENT_PROPERTIES_RES_CMD:
@@ -937,6 +1202,39 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
return -EIO;
}
break;
+
+ case MEI_HBM_ADD_CLIENT_REQ_CMD:
+ dev_dbg(dev->dev, "hbm: add client request received\n");
+ /*
+ * after the host receives the enum_resp
+ * message clients may be added or removed
+ */
+ if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS &&
+ dev->hbm_state >= MEI_HBM_STOPPED) {
+ dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n",
+ dev->dev_state, dev->hbm_state);
+ return -EPROTO;
+ }
+ add_cl_req = (struct hbm_add_client_request *)mei_msg;
+ ret = mei_hbm_fw_add_cl_req(dev, add_cl_req);
+ if (ret) {
+ dev_err(dev->dev, "hbm: add client: failed to send response %d\n",
+ ret);
+ return -EIO;
+ }
+ dev_dbg(dev->dev, "hbm: add client request processed\n");
+ break;
+
+ case MEI_HBM_NOTIFY_RES_CMD:
+ dev_dbg(dev->dev, "hbm: notify response received\n");
+ mei_hbm_cl_res(dev, cl_cmd, notify_res_to_fop(cl_cmd));
+ break;
+
+ case MEI_HBM_NOTIFICATION_CMD:
+ dev_dbg(dev->dev, "hbm: notification\n");
+ mei_hbm_cl_notify(dev, cl_cmd);
+ break;
+
default:
BUG();
break;
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
index 2544db7d1649..a2025a5083a3 100644
--- a/drivers/misc/mei/hbm.h
+++ b/drivers/misc/mei/hbm.h
@@ -54,6 +54,9 @@ int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl);
int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl);
bool mei_hbm_version_is_supported(struct mei_device *dev);
int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd);
+void mei_hbm_pg_resume(struct mei_device *dev);
+int mei_hbm_cl_notify_req(struct mei_device *dev,
+ struct mei_cl *cl, u8 request);
#endif /* _MEI_HBM_H_ */
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 9eb7ed70ace2..a8a68acd3267 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -117,12 +117,17 @@
#define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */
#define MEI_DEV_ID_WPT_LP_2 0x9CBB /* Wildcat Point LP 2 */
+#define MEI_DEV_ID_SPT 0x9D3A /* Sunrise Point */
+#define MEI_DEV_ID_SPT_2 0x9D3B /* Sunrise Point 2 */
+#define MEI_DEV_ID_SPT_H 0xA13A /* Sunrise Point H */
+#define MEI_DEV_ID_SPT_H_2 0xA13B /* Sunrise Point H 2 */
/*
* MEI HW Section
*/
/* Host Firmware Status Registers in PCI Config Space */
#define PCI_CFG_HFS_1 0x40
+# define PCI_CFG_HFS_1_D0I3_MSK 0x80000000
#define PCI_CFG_HFS_2 0x48
#define PCI_CFG_HFS_3 0x60
#define PCI_CFG_HFS_4 0x64
@@ -140,7 +145,8 @@
#define ME_CSR_HA 0xC
/* H_HGC_CSR - PGI register */
#define H_HPG_CSR 0x10
-
+/* H_D0I3C - D0I3 Control */
+#define H_D0I3C 0x800
/* register bits of H_CSR (Host Control Status register) */
/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
@@ -159,7 +165,14 @@
#define H_IS 0x00000002
/* Host Interrupt Enable */
#define H_IE 0x00000001
+/* Host D0I3 Interrupt Enable */
+#define H_D0I3C_IE 0x00000020
+/* Host D0I3 Interrupt Status */
+#define H_D0I3C_IS 0x00000040
+/* H_CSR masks */
+#define H_CSR_IE_MASK (H_IE | H_D0I3C_IE)
+#define H_CSR_IS_MASK (H_IS | H_D0I3C_IS)
/* register bits of ME_CSR_HA (ME Control Status Host Access register) */
/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only
@@ -183,8 +196,14 @@ access to ME_CBD */
#define ME_IE_HRA 0x00000001
-/* register bits - H_HPG_CSR */
-#define H_HPG_CSR_PGIHEXR 0x00000001
-#define H_HPG_CSR_PGI 0x00000002
+/* H_HPG_CSR register bits */
+#define H_HPG_CSR_PGIHEXR 0x00000001
+#define H_HPG_CSR_PGI 0x00000002
+
+/* H_D0I3C register bits */
+#define H_D0I3C_CIP 0x00000001
+#define H_D0I3C_IR 0x00000002
+#define H_D0I3C_I3 0x00000004
+#define H_D0I3C_RR 0x00000008
#endif /* _MEI_HW_MEI_REGS_H_ */
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 43d7101ff993..65511d39d89b 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -134,11 +134,40 @@ static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
*/
static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
{
- reg &= ~H_IS;
+ reg &= ~H_CSR_IS_MASK;
mei_hcsr_write(dev, reg);
}
/**
+ * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
+ *
+ * @dev: the device structure
+ *
+ * Return: H_D0I3C register value (u32)
+ */
+static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
+{
+ u32 reg;
+
+ reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
+ trace_mei_reg_read(dev->dev, "H_D0I3C", H_CSR, reg);
+
+ return reg;
+}
+
+/**
+ * mei_me_d0i3c_write - writes H_D0I3C register to device
+ *
+ * @dev: the device structure
+ * @reg: new register value
+ */
+static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
+{
+ trace_mei_reg_write(dev->dev, "H_D0I3C", H_CSR, reg);
+ mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
+}
+
+/**
* mei_me_fw_status - read fw status register from pci config space
*
* @dev: mei device
@@ -176,12 +205,25 @@ static int mei_me_fw_status(struct mei_device *dev,
*/
static void mei_me_hw_config(struct mei_device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct mei_me_hw *hw = to_me_hw(dev);
- u32 hcsr = mei_hcsr_read(dev);
+ u32 hcsr, reg;
+
/* Doesn't change in runtime */
+ hcsr = mei_hcsr_read(dev);
dev->hbuf_depth = (hcsr & H_CBD) >> 24;
+ reg = 0;
+ pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+ hw->d0i3_supported =
+ ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
+
hw->pg_state = MEI_PG_OFF;
+ if (hw->d0i3_supported) {
+ reg = mei_me_d0i3c_read(dev);
+ if (reg & H_D0I3C_I3)
+ hw->pg_state = MEI_PG_ON;
+ }
}
/**
@@ -208,7 +250,7 @@ static void mei_me_intr_clear(struct mei_device *dev)
{
u32 hcsr = mei_hcsr_read(dev);
- if ((hcsr & H_IS) == H_IS)
+ if (hcsr & H_CSR_IS_MASK)
mei_hcsr_write(dev, hcsr);
}
/**
@@ -220,7 +262,7 @@ static void mei_me_intr_enable(struct mei_device *dev)
{
u32 hcsr = mei_hcsr_read(dev);
- hcsr |= H_IE;
+ hcsr |= H_CSR_IE_MASK;
mei_hcsr_set(dev, hcsr);
}
@@ -233,7 +275,7 @@ static void mei_me_intr_disable(struct mei_device *dev)
{
u32 hcsr = mei_hcsr_read(dev);
- hcsr &= ~H_IE;
+ hcsr &= ~H_CSR_IE_MASK;
mei_hcsr_set(dev, hcsr);
}
@@ -253,57 +295,6 @@ static void mei_me_hw_reset_release(struct mei_device *dev)
/* complete this write before we set host ready on another CPU */
mmiowb();
}
-/**
- * mei_me_hw_reset - resets fw via mei csr register.
- *
- * @dev: the device structure
- * @intr_enable: if interrupt should be enabled after reset.
- *
- * Return: always 0
- */
-static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
-{
- u32 hcsr = mei_hcsr_read(dev);
-
- /* H_RST may be found lit before reset is started,
- * for example if preceding reset flow hasn't completed.
- * In that case asserting H_RST will be ignored, therefore
- * we need to clean H_RST bit to start a successful reset sequence.
- */
- if ((hcsr & H_RST) == H_RST) {
- dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
- hcsr &= ~H_RST;
- mei_hcsr_set(dev, hcsr);
- hcsr = mei_hcsr_read(dev);
- }
-
- hcsr |= H_RST | H_IG | H_IS;
-
- if (intr_enable)
- hcsr |= H_IE;
- else
- hcsr &= ~H_IE;
-
- dev->recvd_hw_ready = false;
- mei_hcsr_write(dev, hcsr);
-
- /*
- * Host reads the H_CSR once to ensure that the
- * posted write to H_CSR completes.
- */
- hcsr = mei_hcsr_read(dev);
-
- if ((hcsr & H_RST) == 0)
- dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
-
- if ((hcsr & H_RDY) == H_RDY)
- dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
-
- if (intr_enable == false)
- mei_me_hw_reset_release(dev);
-
- return 0;
-}
/**
* mei_me_host_set_ready - enable device
@@ -314,7 +305,7 @@ static void mei_me_host_set_ready(struct mei_device *dev)
{
u32 hcsr = mei_hcsr_read(dev);
- hcsr |= H_IE | H_IG | H_RDY;
+ hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
mei_hcsr_set(dev, hcsr);
}
@@ -601,13 +592,13 @@ static void mei_me_pg_unset(struct mei_device *dev)
}
/**
- * mei_me_pg_enter_sync - perform pg entry procedure
+ * mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure
*
* @dev: the device structure
*
* Return: 0 on success an error code otherwise
*/
-int mei_me_pg_enter_sync(struct mei_device *dev)
+static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
@@ -638,13 +629,13 @@ int mei_me_pg_enter_sync(struct mei_device *dev)
}
/**
- * mei_me_pg_exit_sync - perform pg exit procedure
+ * mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure
*
* @dev: the device structure
*
* Return: 0 on success an error code otherwise
*/
-int mei_me_pg_exit_sync(struct mei_device *dev)
+static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
@@ -712,8 +703,12 @@ static bool mei_me_pg_in_transition(struct mei_device *dev)
*/
static bool mei_me_pg_is_enabled(struct mei_device *dev)
{
+ struct mei_me_hw *hw = to_me_hw(dev);
u32 reg = mei_me_mecsr_read(dev);
+ if (hw->d0i3_supported)
+ return true;
+
if ((reg & ME_PGIC_HRA) == 0)
goto notsupported;
@@ -723,7 +718,8 @@ static bool mei_me_pg_is_enabled(struct mei_device *dev)
return true;
notsupported:
- dev_dbg(dev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n",
+ dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
+ hw->d0i3_supported,
!!(reg & ME_PGIC_HRA),
dev->version.major_version,
dev->version.minor_version,
@@ -734,11 +730,211 @@ notsupported:
}
/**
- * mei_me_pg_intr - perform pg processing in interrupt thread handler
+ * mei_me_d0i3_set - write d0i3 register bit on mei device.
*
* @dev: the device structure
+ * @intr: ask for interrupt
+ *
+ * Return: D0I3C register value
*/
-static void mei_me_pg_intr(struct mei_device *dev)
+static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
+{
+ u32 reg = mei_me_d0i3c_read(dev);
+
+ reg |= H_D0I3C_I3;
+ if (intr)
+ reg |= H_D0I3C_IR;
+ else
+ reg &= ~H_D0I3C_IR;
+ mei_me_d0i3c_write(dev, reg);
+ /* read it to ensure HW consistency */
+ reg = mei_me_d0i3c_read(dev);
+ return reg;
+}
+
+/**
+ * mei_me_d0i3_unset - clean d0i3 register bit on mei device.
+ *
+ * @dev: the device structure
+ *
+ * Return: D0I3C register value
+ */
+static u32 mei_me_d0i3_unset(struct mei_device *dev)
+{
+ u32 reg = mei_me_d0i3c_read(dev);
+
+ reg &= ~H_D0I3C_I3;
+ reg |= H_D0I3C_IR;
+ mei_me_d0i3c_write(dev, reg);
+ /* read it to ensure HW consistency */
+ reg = mei_me_d0i3c_read(dev);
+ return reg;
+}
+
+/**
+ * mei_me_d0i3_enter_sync - perform d0i3 entry procedure
+ *
+ * @dev: the device structure
+ *
+ * Return: 0 on success an error code otherwise
+ */
+static int mei_me_d0i3_enter_sync(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
+ unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
+ int ret;
+ u32 reg;
+
+ reg = mei_me_d0i3c_read(dev);
+ if (reg & H_D0I3C_I3) {
+ /* we are in d0i3, nothing to do */
+ dev_dbg(dev->dev, "d0i3 set not needed\n");
+ ret = 0;
+ goto on;
+ }
+
+ /* PGI entry procedure */
+ dev->pg_event = MEI_PG_EVENT_WAIT;
+
+ ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
+ if (ret)
+ /* FIXME: should we reset here? */
+ goto out;
+
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(dev->wait_pg,
+ dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
+ mutex_lock(&dev->device_lock);
+
+ if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
+ ret = -ETIME;
+ goto out;
+ }
+ /* end PGI entry procedure */
+
+ dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
+
+ reg = mei_me_d0i3_set(dev, true);
+ if (!(reg & H_D0I3C_CIP)) {
+ dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
+ ret = 0;
+ goto on;
+ }
+
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(dev->wait_pg,
+ dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
+ mutex_lock(&dev->device_lock);
+
+ if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
+ reg = mei_me_d0i3c_read(dev);
+ if (!(reg & H_D0I3C_I3)) {
+ ret = -ETIME;
+ goto out;
+ }
+ }
+
+ ret = 0;
+on:
+ hw->pg_state = MEI_PG_ON;
+out:
+ dev->pg_event = MEI_PG_EVENT_IDLE;
+ dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
+ return ret;
+}
+
+/**
+ * mei_me_d0i3_enter - perform d0i3 entry procedure
+ * no hbm PG handshake
+ * no waiting for confirmation; runs with interrupts
+ * disabled
+ *
+ * @dev: the device structure
+ *
+ * Return: 0 on success an error code otherwise
+ */
+static int mei_me_d0i3_enter(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 reg;
+
+ reg = mei_me_d0i3c_read(dev);
+ if (reg & H_D0I3C_I3) {
+ /* we are in d0i3, nothing to do */
+ dev_dbg(dev->dev, "already d0i3 : set not needed\n");
+ goto on;
+ }
+
+ mei_me_d0i3_set(dev, false);
+on:
+ hw->pg_state = MEI_PG_ON;
+ dev->pg_event = MEI_PG_EVENT_IDLE;
+ dev_dbg(dev->dev, "d0i3 enter\n");
+ return 0;
+}
+
+/**
+ * mei_me_d0i3_exit_sync - perform d0i3 exit procedure
+ *
+ * @dev: the device structure
+ *
+ * Return: 0 on success an error code otherwise
+ */
+static int mei_me_d0i3_exit_sync(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
+ int ret;
+ u32 reg;
+
+ dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
+
+ reg = mei_me_d0i3c_read(dev);
+ if (!(reg & H_D0I3C_I3)) {
+ /* we are not in d0i3, nothing to do */
+ dev_dbg(dev->dev, "d0i3 exit not needed\n");
+ ret = 0;
+ goto off;
+ }
+
+ reg = mei_me_d0i3_unset(dev);
+ if (!(reg & H_D0I3C_CIP)) {
+ dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
+ ret = 0;
+ goto off;
+ }
+
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(dev->wait_pg,
+ dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
+ mutex_lock(&dev->device_lock);
+
+ if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
+ reg = mei_me_d0i3c_read(dev);
+ if (reg & H_D0I3C_I3) {
+ ret = -ETIME;
+ goto out;
+ }
+ }
+
+ ret = 0;
+off:
+ hw->pg_state = MEI_PG_OFF;
+out:
+ dev->pg_event = MEI_PG_EVENT_IDLE;
+
+ dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
+ return ret;
+}
+
+/**
+ * mei_me_pg_legacy_intr - perform legacy pg processing
+ * in interrupt thread handler
+ *
+ * @dev: the device structure
+ */
+static void mei_me_pg_legacy_intr(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
@@ -752,6 +948,162 @@ static void mei_me_pg_intr(struct mei_device *dev)
}
/**
+ * mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
+ *
+ * @dev: the device structure
+ */
+static void mei_me_d0i3_intr(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+
+ if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
+ (hw->intr_source & H_D0I3C_IS)) {
+ dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
+ if (hw->pg_state == MEI_PG_ON) {
+ hw->pg_state = MEI_PG_OFF;
+ if (dev->hbm_state != MEI_HBM_IDLE) {
+ /*
+ * force H_RDY because it could be
+ * wiped off during PG
+ */
+ dev_dbg(dev->dev, "d0i3 set host ready\n");
+ mei_me_host_set_ready(dev);
+ }
+ } else {
+ hw->pg_state = MEI_PG_ON;
+ }
+
+ wake_up(&dev->wait_pg);
+ }
+
+ if (hw->pg_state == MEI_PG_ON && (hw->intr_source & H_IS)) {
+ /*
+ * HW sent some data and we are in D0i3, so
+ * we got here because of HW initiated exit from D0i3.
+ * Start runtime pm resume sequence to exit low power state.
+ */
+ dev_dbg(dev->dev, "d0i3 want resume\n");
+ mei_hbm_pg_resume(dev);
+ }
+}
+
+/**
+ * mei_me_pg_intr - perform pg processing in interrupt thread handler
+ *
+ * @dev: the device structure
+ */
+static void mei_me_pg_intr(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+
+ if (hw->d0i3_supported)
+ mei_me_d0i3_intr(dev);
+ else
+ mei_me_pg_legacy_intr(dev);
+}
+
+/**
+ * mei_me_pg_enter_sync - perform runtime pm entry procedure
+ *
+ * @dev: the device structure
+ *
+ * Return: 0 on success an error code otherwise
+ */
+int mei_me_pg_enter_sync(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+
+ if (hw->d0i3_supported)
+ return mei_me_d0i3_enter_sync(dev);
+ else
+ return mei_me_pg_legacy_enter_sync(dev);
+}
+
+/**
+ * mei_me_pg_exit_sync - perform runtime pm exit procedure
+ *
+ * @dev: the device structure
+ *
+ * Return: 0 on success an error code otherwise
+ */
+int mei_me_pg_exit_sync(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+
+ if (hw->d0i3_supported)
+ return mei_me_d0i3_exit_sync(dev);
+ else
+ return mei_me_pg_legacy_exit_sync(dev);
+}
+
+/**
+ * mei_me_hw_reset - resets fw via mei csr register.
+ *
+ * @dev: the device structure
+ * @intr_enable: if interrupt should be enabled after reset.
+ *
+ * Return: 0 on success an error code otherwise
+ */
+static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ int ret;
+ u32 hcsr;
+
+ if (intr_enable) {
+ mei_me_intr_enable(dev);
+ if (hw->d0i3_supported) {
+ ret = mei_me_d0i3_exit_sync(dev);
+ if (ret)
+ return ret;
+ }
+ }
+
+ hcsr = mei_hcsr_read(dev);
+ /* H_RST may be found lit before reset is started,
+ * for example if preceding reset flow hasn't completed.
+ * In that case asserting H_RST will be ignored, therefore
+ * we need to clean H_RST bit to start a successful reset sequence.
+ */
+ if ((hcsr & H_RST) == H_RST) {
+ dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
+ hcsr &= ~H_RST;
+ mei_hcsr_set(dev, hcsr);
+ hcsr = mei_hcsr_read(dev);
+ }
+
+ hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
+
+ if (!intr_enable)
+ hcsr &= ~H_CSR_IE_MASK;
+
+ dev->recvd_hw_ready = false;
+ mei_hcsr_write(dev, hcsr);
+
+ /*
+ * Host reads the H_CSR once to ensure that the
+ * posted write to H_CSR completes.
+ */
+ hcsr = mei_hcsr_read(dev);
+
+ if ((hcsr & H_RST) == 0)
+ dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
+
+ if ((hcsr & H_RDY) == H_RDY)
+ dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
+
+ if (!intr_enable) {
+ mei_me_hw_reset_release(dev);
+ if (hw->d0i3_supported) {
+ ret = mei_me_d0i3_enter(dev);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/**
* mei_me_irq_quick_handler - The ISR of the MEI device
*
* @irq: The irq number
@@ -759,16 +1111,20 @@ static void mei_me_pg_intr(struct mei_device *dev)
*
* Return: irqreturn_t
*/
-
irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
{
- struct mei_device *dev = (struct mei_device *) dev_id;
- u32 hcsr = mei_hcsr_read(dev);
+ struct mei_device *dev = (struct mei_device *)dev_id;
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 hcsr;
- if ((hcsr & H_IS) != H_IS)
+ hcsr = mei_hcsr_read(dev);
+ if (!(hcsr & H_CSR_IS_MASK))
return IRQ_NONE;
- /* clear H_IS bit in H_CSR */
+ hw->intr_source = hcsr & H_CSR_IS_MASK;
+ dev_dbg(dev->dev, "interrupt source 0x%08X.\n", hw->intr_source);
+
+ /* clear H_IS and H_D0I3C_IS bits in H_CSR to clear the interrupts */
mei_hcsr_write(dev, hcsr);
return IRQ_WAKE_THREAD;
@@ -796,11 +1152,6 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
mutex_lock(&dev->device_lock);
mei_io_list_init(&complete_list);
- /* Ack the interrupt here
- * In case of MSI we don't go through the quick handler */
- if (pci_dev_msi_enabled(to_pci_dev(dev->dev)))
- mei_clear_interrupts(dev);
-
/* check if ME wants a reset */
if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
dev_warn(dev->dev, "FW not ready: resetting.\n");
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 6022d52af6f6..2ee14dc1b2ea 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -50,13 +50,17 @@ struct mei_cfg {
* struct mei_me_hw - me hw specific data
*
* @cfg: per device generation config and ops
- * @mem_addr: io memory address
- * @pg_state: power gating state
+ * @mem_addr: io memory address
+ * @intr_source: interrupt source
+ * @pg_state: power gating state
+ * @d0i3_supported: di03 support
*/
struct mei_me_hw {
const struct mei_cfg *cfg;
void __iomem *mem_addr;
+ u32 intr_source;
enum mei_pg_state pg_state;
+ bool d0i3_supported;
};
#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 16fef6dc4dd7..4cebde85924f 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -31,14 +31,15 @@
#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */
#define MEI_IAMTHIF_READ_TIMER 10 /* HPS */
-#define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */
-#define MEI_HBM_TIMEOUT 1 /* 1 second */
+#define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */
+#define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */
+#define MEI_HBM_TIMEOUT 1 /* 1 second */
/*
* MEI Version
*/
-#define HBM_MINOR_VERSION 1
-#define HBM_MAJOR_VERSION 1
+#define HBM_MINOR_VERSION 0
+#define HBM_MAJOR_VERSION 2
/*
* MEI version with PGI support
@@ -46,6 +47,24 @@
#define HBM_MINOR_VERSION_PGI 1
#define HBM_MAJOR_VERSION_PGI 1
+/*
+ * MEI version with Dynamic clients support
+ */
+#define HBM_MINOR_VERSION_DC 0
+#define HBM_MAJOR_VERSION_DC 2
+
+/*
+ * MEI version with disconnect on connection timeout support
+ */
+#define HBM_MINOR_VERSION_DOT 0
+#define HBM_MAJOR_VERSION_DOT 2
+
+/*
+ * MEI version with notifcation support
+ */
+#define HBM_MINOR_VERSION_EV 0
+#define HBM_MAJOR_VERSION_EV 2
+
/* Host bus message command opcode */
#define MEI_HBM_CMD_OP_MSK 0x7f
/* Host bus message command RESPONSE */
@@ -81,6 +100,13 @@
#define MEI_PG_ISOLATION_EXIT_REQ_CMD 0x0b
#define MEI_PG_ISOLATION_EXIT_RES_CMD 0x8b
+#define MEI_HBM_ADD_CLIENT_REQ_CMD 0x0f
+#define MEI_HBM_ADD_CLIENT_RES_CMD 0x8f
+
+#define MEI_HBM_NOTIFY_REQ_CMD 0x10
+#define MEI_HBM_NOTIFY_RES_CMD 0x90
+#define MEI_HBM_NOTIFICATION_CMD 0x11
+
/*
* MEI Stop Reason
* used by hbm_host_stop_request.reason
@@ -136,6 +162,7 @@ enum mei_cl_connect_status {
MEI_CL_CONN_ALREADY_STARTED = MEI_HBMS_ALREADY_EXISTS,
MEI_CL_CONN_OUT_OF_RESOURCES = MEI_HBMS_REJECTED,
MEI_CL_CONN_MESSAGE_SMALL = MEI_HBMS_INVALID_PARAMETER,
+ MEI_CL_CONN_NOT_ALLOWED = MEI_HBMS_NOT_ALLOWED,
};
/*
@@ -213,9 +240,17 @@ struct hbm_me_stop_request {
u8 reserved[2];
} __packed;
+/**
+ * struct hbm_host_enum_request - enumeration request from host to fw
+ *
+ * @hbm_cmd: bus message command header
+ * @allow_add: allow dynamic clients add HBM version >= 2.0
+ * @reserved: reserved
+ */
struct hbm_host_enum_request {
u8 hbm_cmd;
- u8 reserved[3];
+ u8 allow_add;
+ u8 reserved[2];
} __packed;
struct hbm_host_enum_response {
@@ -248,6 +283,38 @@ struct hbm_props_response {
} __packed;
/**
+ * struct hbm_add_client_request - request to add a client
+ * might be sent by fw after enumeration has already completed
+ *
+ * @hbm_cmd: bus message command header
+ * @me_addr: address of the client in ME
+ * @reserved: reserved
+ * @client_properties: client properties
+ */
+struct hbm_add_client_request {
+ u8 hbm_cmd;
+ u8 me_addr;
+ u8 reserved[2];
+ struct mei_client_properties client_properties;
+} __packed;
+
+/**
+ * struct hbm_add_client_response - response to add a client
+ * sent by the host to report client addition status to fw
+ *
+ * @hbm_cmd: bus message command header
+ * @me_addr: address of the client in ME
+ * @status: if HBMS_SUCCESS then the client can now accept connections.
+ * @reserved: reserved
+ */
+struct hbm_add_client_response {
+ u8 hbm_cmd;
+ u8 me_addr;
+ u8 status;
+ u8 reserved[1];
+} __packed;
+
+/**
* struct hbm_power_gate - power gate request/response
*
* @hbm_cmd: bus message command header
@@ -298,5 +365,62 @@ struct hbm_flow_control {
u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH];
} __packed;
+#define MEI_HBM_NOTIFICATION_START 1
+#define MEI_HBM_NOTIFICATION_STOP 0
+/**
+ * struct hbm_notification_request - start/stop notification request
+ *
+ * @hbm_cmd: bus message command header
+ * @me_addr: address of the client in ME
+ * @host_addr: address of the client in the driver
+ * @start: start = 1 or stop = 0 asynchronous notifications
+ */
+struct hbm_notification_request {
+ u8 hbm_cmd;
+ u8 me_addr;
+ u8 host_addr;
+ u8 start;
+} __packed;
+
+/**
+ * struct hbm_notification_response - start/stop notification response
+ *
+ * @hbm_cmd: bus message command header
+ * @me_addr: address of the client in ME
+ * @host_addr: - address of the client in the driver
+ * @status: (mei_hbm_status) response status for the request
+ * - MEI_HBMS_SUCCESS: successful stop/start
+ * - MEI_HBMS_CLIENT_NOT_FOUND: if the connection could not be found.
+ * - MEI_HBMS_ALREADY_STARTED: for start requests for a previously
+ * started notification.
+ * - MEI_HBMS_NOT_STARTED: for stop request for a connected client for whom
+ * asynchronous notifications are currently disabled.
+ *
+ * @start: start = 1 or stop = 0 asynchronous notifications
+ * @reserved: reserved
+ */
+struct hbm_notification_response {
+ u8 hbm_cmd;
+ u8 me_addr;
+ u8 host_addr;
+ u8 status;
+ u8 start;
+ u8 reserved[3];
+} __packed;
+
+/**
+ * struct hbm_notification - notification event
+ *
+ * @hbm_cmd: bus message command header
+ * @me_addr: address of the client in ME
+ * @host_addr: address of the client in the driver
+ * @reserved: reserved for alignment
+ */
+struct hbm_notification {
+ u8 hbm_cmd;
+ u8 me_addr;
+ u8 host_addr;
+ u8 reserved[1];
+} __packed;
#endif
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 94514b2c7a50..e374661652cd 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -331,8 +331,6 @@ void mei_stop(struct mei_device *dev)
mei_cancel_work(dev);
- mei_nfc_host_exit(dev);
-
mei_cl_bus_remove_devices(dev);
mutex_lock(&dev->device_lock);
@@ -392,6 +390,7 @@ void mei_device_init(struct mei_device *dev,
INIT_LIST_HEAD(&dev->me_clients);
mutex_init(&dev->device_lock);
init_rwsem(&dev->me_clients_rwsem);
+ mutex_init(&dev->cl_bus_lock);
init_waitqueue_head(&dev->wait_hw_ready);
init_waitqueue_head(&dev->wait_pg);
init_waitqueue_head(&dev->wait_hbm_start);
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 3f3405269c39..c418d7888994 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -403,6 +403,13 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
if (ret)
return ret;
break;
+
+ case MEI_FOP_NOTIFY_START:
+ case MEI_FOP_NOTIFY_STOP:
+ ret = mei_cl_irq_notify(cl, cb, cmpl_list);
+ if (ret)
+ return ret;
+ break;
default:
BUG();
}
@@ -424,6 +431,24 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
EXPORT_SYMBOL_GPL(mei_irq_write_handler);
+/**
+ * mei_connect_timeout - connect/disconnect timeouts
+ *
+ * @cl: host client
+ */
+static void mei_connect_timeout(struct mei_cl *cl)
+{
+ struct mei_device *dev = cl->dev;
+
+ if (cl->state == MEI_FILE_CONNECTING) {
+ if (dev->hbm_f_dot_supported) {
+ cl->state = MEI_FILE_DISCONNECT_REQUIRED;
+ wake_up(&cl->wait);
+ return;
+ }
+ }
+ mei_reset(dev);
+}
/**
* mei_timer - timer function.
@@ -464,7 +489,7 @@ void mei_timer(struct work_struct *work)
if (cl->timer_count) {
if (--cl->timer_count == 0) {
dev_err(dev->dev, "timer: connect/disconnect timeout.\n");
- mei_reset(dev);
+ mei_connect_timeout(cl);
goto out;
}
}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 8eb0a9500a90..b2f2486b3d75 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -446,6 +446,45 @@ end:
}
/**
+ * mei_ioctl_client_notify_request -
+ * propagate event notification request to client
+ *
+ * @file: pointer to file structure
+ * @request: 0 - disable, 1 - enable
+ *
+ * Return: 0 on success , <0 on error
+ */
+static int mei_ioctl_client_notify_request(struct file *file, u32 request)
+{
+ struct mei_cl *cl = file->private_data;
+
+ return mei_cl_notify_request(cl, file, request);
+}
+
+/**
+ * mei_ioctl_client_notify_get - wait for notification request
+ *
+ * @file: pointer to file structure
+ * @notify_get: 0 - disable, 1 - enable
+ *
+ * Return: 0 on success , <0 on error
+ */
+static int mei_ioctl_client_notify_get(struct file *file, u32 *notify_get)
+{
+ struct mei_cl *cl = file->private_data;
+ bool notify_ev;
+ bool block = (file->f_flags & O_NONBLOCK) == 0;
+ int rets;
+
+ rets = mei_cl_notify_get(cl, block, &notify_ev);
+ if (rets)
+ return rets;
+
+ *notify_get = notify_ev ? 1 : 0;
+ return 0;
+}
+
+/**
* mei_ioctl - the IOCTL function
*
* @file: pointer to file structure
@@ -459,6 +498,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
struct mei_device *dev;
struct mei_cl *cl = file->private_data;
struct mei_connect_client_data connect_data;
+ u32 notify_get, notify_req;
int rets;
@@ -499,6 +539,33 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
break;
+ case IOCTL_MEI_NOTIFY_SET:
+ dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_SET.\n");
+ if (copy_from_user(&notify_req,
+ (char __user *)data, sizeof(notify_req))) {
+ dev_dbg(dev->dev, "failed to copy data from userland\n");
+ rets = -EFAULT;
+ goto out;
+ }
+ rets = mei_ioctl_client_notify_request(file, notify_req);
+ break;
+
+ case IOCTL_MEI_NOTIFY_GET:
+ dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_GET.\n");
+ rets = mei_ioctl_client_notify_get(file, &notify_get);
+ if (rets)
+ goto out;
+
+ dev_dbg(dev->dev, "copy connect data to user\n");
+ if (copy_to_user((char __user *)data,
+ &notify_get, sizeof(notify_get))) {
+ dev_dbg(dev->dev, "failed to copy data to userland\n");
+ rets = -EFAULT;
+ goto out;
+
+ }
+ break;
+
default:
dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd);
rets = -ENOIOCTLCMD;
@@ -541,6 +608,7 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
struct mei_cl *cl = file->private_data;
struct mei_device *dev;
unsigned int mask = 0;
+ bool notify_en;
if (WARN_ON(!cl || !cl->dev))
return POLLERR;
@@ -549,6 +617,7 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
mutex_lock(&dev->device_lock);
+ notify_en = cl->notify_en && (req_events & POLLPRI);
if (dev->dev_state != MEI_DEV_ENABLED ||
!mei_cl_is_connected(cl)) {
@@ -561,6 +630,12 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
goto out;
}
+ if (notify_en) {
+ poll_wait(file, &cl->ev_wait, wait);
+ if (cl->notify_ev)
+ mask |= POLLPRI;
+ }
+
if (req_events & (POLLIN | POLLRDNORM)) {
poll_wait(file, &cl->rx_wait, wait);
@@ -576,6 +651,26 @@ out:
}
/**
+ * mei_fasync - asynchronous io support
+ *
+ * @fd: file descriptor
+ * @file: pointer to file structure
+ * @band: band bitmap
+ *
+ * Return: poll mask
+ */
+static int mei_fasync(int fd, struct file *file, int band)
+{
+
+ struct mei_cl *cl = file->private_data;
+
+ if (!mei_cl_is_connected(cl))
+ return POLLERR;
+
+ return fasync_helper(fd, file, band, &cl->ev_async);
+}
+
+/**
* fw_status_show - mei device attribute show method
*
* @device: device pointer
@@ -627,6 +722,7 @@ static const struct file_operations mei_fops = {
.release = mei_release,
.write = mei_write,
.poll = mei_poll,
+ .fasync = mei_fasync,
.llseek = no_llseek
};
@@ -682,7 +778,7 @@ int mei_register(struct mei_device *dev, struct device *parent)
/* Fill in the data structures */
devno = MKDEV(MAJOR(mei_devt), dev->minor);
cdev_init(&dev->cdev, &mei_fops);
- dev->cdev.owner = mei_fops.owner;
+ dev->cdev.owner = parent->driver->owner;
/* Add the device */
ret = cdev_add(&dev->cdev, devno, 1);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 453f6a333b42..e25ee16c658e 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -89,6 +89,7 @@ enum file_state {
MEI_FILE_CONNECTED,
MEI_FILE_DISCONNECTING,
MEI_FILE_DISCONNECT_REPLY,
+ MEI_FILE_DISCONNECT_REQUIRED,
MEI_FILE_DISCONNECTED,
};
@@ -135,6 +136,8 @@ enum mei_wd_states {
* @MEI_FOP_CONNECT: connect
* @MEI_FOP_DISCONNECT: disconnect
* @MEI_FOP_DISCONNECT_RSP: disconnect response
+ * @MEI_FOP_NOTIFY_START: start notification
+ * @MEI_FOP_NOTIFY_STOP: stop notification
*/
enum mei_cb_file_ops {
MEI_FOP_READ = 0,
@@ -142,6 +145,8 @@ enum mei_cb_file_ops {
MEI_FOP_CONNECT,
MEI_FOP_DISCONNECT,
MEI_FOP_DISCONNECT_RSP,
+ MEI_FOP_NOTIFY_START,
+ MEI_FOP_NOTIFY_STOP,
};
/*
@@ -178,7 +183,7 @@ struct mei_fw_status {
* @client_id: me client id
* @mei_flow_ctrl_creds: flow control credits
* @connect_count: number connections to this client
- * @reserved: reserved
+ * @bus_added: added to bus
*/
struct mei_me_client {
struct list_head list;
@@ -187,7 +192,7 @@ struct mei_me_client {
u8 client_id;
u8 mei_flow_ctrl_creds;
u8 connect_count;
- u8 reserved;
+ u8 bus_added;
};
@@ -230,18 +235,21 @@ struct mei_cl_cb {
* @tx_wait: wait queue for tx completion
* @rx_wait: wait queue for rx completion
* @wait: wait queue for management operation
+ * @ev_wait: notification wait queue
+ * @ev_async: event async notification
* @status: connection status
* @me_cl: fw client connected
* @host_client_id: host id
* @mei_flow_ctrl_creds: transmit flow credentials
* @timer_count: watchdog timer for operation completion
* @reserved: reserved for alignment
+ * @notify_en: notification - enabled/disabled
+ * @notify_ev: pending notification event
* @writing_state: state of the tx
* @rd_pending: pending read credits
* @rd_completed: completed read
*
- * @device: device on the mei client bus
- * @device_link: link to bus clients
+ * @cldev: device on the mei client bus
*/
struct mei_cl {
struct list_head link;
@@ -250,19 +258,21 @@ struct mei_cl {
wait_queue_head_t tx_wait;
wait_queue_head_t rx_wait;
wait_queue_head_t wait;
+ wait_queue_head_t ev_wait;
+ struct fasync_struct *ev_async;
int status;
struct mei_me_client *me_cl;
u8 host_client_id;
u8 mei_flow_ctrl_creds;
u8 timer_count;
u8 reserved;
+ u8 notify_en;
+ u8 notify_ev;
enum mei_file_transaction_states writing_state;
struct list_head rd_pending;
struct list_head rd_completed;
- /* MEI CL bus data */
- struct mei_cl_device *device;
- struct list_head device_link;
+ struct mei_cl_device *cldev;
};
/** struct mei_hw_ops
@@ -329,21 +339,16 @@ struct mei_hw_ops {
};
/* MEI bus API*/
-
-struct mei_cl_device *mei_cl_add_device(struct mei_device *dev,
- struct mei_me_client *me_cl,
- struct mei_cl *cl,
- char *name);
-void mei_cl_remove_device(struct mei_cl_device *device);
-
+void mei_cl_bus_rescan(struct mei_device *bus);
+void mei_cl_dev_fixup(struct mei_cl_device *dev);
ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
bool blocking);
ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
void mei_cl_bus_rx_event(struct mei_cl *cl);
-void mei_cl_bus_remove_devices(struct mei_device *dev);
+void mei_cl_bus_notify_event(struct mei_cl *cl);
+void mei_cl_bus_remove_devices(struct mei_device *bus);
int mei_cl_bus_init(void);
void mei_cl_bus_exit(void);
-struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev, uuid_le uuid);
/**
* enum mei_pg_event - power gating transition events
@@ -416,7 +421,10 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @wr_msg : the buffer for hbm control messages
*
* @version : HBM protocol version in use
- * @hbm_f_pg_supported : hbm feature pgi protocol
+ * @hbm_f_pg_supported : hbm feature pgi protocol
+ * @hbm_f_dc_supported : hbm feature dynamic clients
+ * @hbm_f_dot_supported : hbm feature disconnect on timeout
+ * @hbm_f_ev_supported : hbm feature event notification
*
* @me_clients_rwsem: rw lock over me_clients list
* @me_clients : list of FW clients
@@ -447,6 +455,7 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @reset_work : work item for the device reset
*
* @device_list : mei client bus list
+ * @cl_bus_lock : client bus list lock
*
* @dbgfs_dir : debugfs mei root directory
*
@@ -509,6 +518,9 @@ struct mei_device {
struct hbm_version version;
unsigned int hbm_f_pg_supported:1;
+ unsigned int hbm_f_dc_supported:1;
+ unsigned int hbm_f_dot_supported:1;
+ unsigned int hbm_f_ev_supported:1;
struct rw_semaphore me_clients_rwsem;
struct list_head me_clients;
@@ -543,6 +555,7 @@ struct mei_device {
/* List of bus devices */
struct list_head device_list;
+ struct mutex cl_bus_lock;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_dir;
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
deleted file mode 100644
index b983c4ecad38..000000000000
--- a/drivers/misc/mei/nfc.c
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2013, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-
-#include <linux/mei_cl_bus.h>
-
-#include "mei_dev.h"
-#include "client.h"
-
-struct mei_nfc_cmd {
- u8 command;
- u8 status;
- u16 req_id;
- u32 reserved;
- u16 data_size;
- u8 sub_command;
- u8 data[];
-} __packed;
-
-struct mei_nfc_reply {
- u8 command;
- u8 status;
- u16 req_id;
- u32 reserved;
- u16 data_size;
- u8 sub_command;
- u8 reply_status;
- u8 data[];
-} __packed;
-
-struct mei_nfc_if_version {
- u8 radio_version_sw[3];
- u8 reserved[3];
- u8 radio_version_hw[3];
- u8 i2c_addr;
- u8 fw_ivn;
- u8 vendor_id;
- u8 radio_type;
-} __packed;
-
-struct mei_nfc_connect {
- u8 fw_ivn;
- u8 vendor_id;
-} __packed;
-
-struct mei_nfc_connect_resp {
- u8 fw_ivn;
- u8 vendor_id;
- u16 me_major;
- u16 me_minor;
- u16 me_hotfix;
- u16 me_build;
-} __packed;
-
-struct mei_nfc_hci_hdr {
- u8 cmd;
- u8 status;
- u16 req_id;
- u32 reserved;
- u16 data_size;
-} __packed;
-
-#define MEI_NFC_CMD_MAINTENANCE 0x00
-#define MEI_NFC_CMD_HCI_SEND 0x01
-#define MEI_NFC_CMD_HCI_RECV 0x02
-
-#define MEI_NFC_SUBCMD_CONNECT 0x00
-#define MEI_NFC_SUBCMD_IF_VERSION 0x01
-
-#define MEI_NFC_HEADER_SIZE 10
-
-/**
- * struct mei_nfc_dev - NFC mei device
- *
- * @me_cl: NFC me client
- * @cl: NFC host client
- * @cl_info: NFC info host client
- * @init_work: perform connection to the info client
- * @fw_ivn: NFC Interface Version Number
- * @vendor_id: NFC manufacturer ID
- * @radio_type: NFC radio type
- * @bus_name: bus name
- *
- */
-struct mei_nfc_dev {
- struct mei_me_client *me_cl;
- struct mei_cl *cl;
- struct mei_cl *cl_info;
- struct work_struct init_work;
- u8 fw_ivn;
- u8 vendor_id;
- u8 radio_type;
- char *bus_name;
-};
-
-/* UUIDs for NFC F/W clients */
-const uuid_le mei_nfc_guid = UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50,
- 0x94, 0xd4, 0x50, 0x26,
- 0x67, 0x23, 0x77, 0x5c);
-
-static const uuid_le mei_nfc_info_guid = UUID_LE(0xd2de1625, 0x382d, 0x417d,
- 0x48, 0xa4, 0xef, 0xab,
- 0xba, 0x8a, 0x12, 0x06);
-
-/* Vendors */
-#define MEI_NFC_VENDOR_INSIDE 0x00
-#define MEI_NFC_VENDOR_NXP 0x01
-
-/* Radio types */
-#define MEI_NFC_VENDOR_INSIDE_UREAD 0x00
-#define MEI_NFC_VENDOR_NXP_PN544 0x01
-
-static void mei_nfc_free(struct mei_nfc_dev *ndev)
-{
- if (!ndev)
- return;
-
- if (ndev->cl) {
- list_del(&ndev->cl->device_link);
- mei_cl_unlink(ndev->cl);
- kfree(ndev->cl);
- }
-
- if (ndev->cl_info) {
- list_del(&ndev->cl_info->device_link);
- mei_cl_unlink(ndev->cl_info);
- kfree(ndev->cl_info);
- }
-
- mei_me_cl_put(ndev->me_cl);
- kfree(ndev);
-}
-
-static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
-{
- struct mei_device *dev;
-
- if (!ndev->cl)
- return -ENODEV;
-
- dev = ndev->cl->dev;
-
- switch (ndev->vendor_id) {
- case MEI_NFC_VENDOR_INSIDE:
- switch (ndev->radio_type) {
- case MEI_NFC_VENDOR_INSIDE_UREAD:
- ndev->bus_name = "microread";
- return 0;
-
- default:
- dev_err(dev->dev, "Unknown radio type 0x%x\n",
- ndev->radio_type);
-
- return -EINVAL;
- }
-
- case MEI_NFC_VENDOR_NXP:
- switch (ndev->radio_type) {
- case MEI_NFC_VENDOR_NXP_PN544:
- ndev->bus_name = "pn544";
- return 0;
- default:
- dev_err(dev->dev, "Unknown radio type 0x%x\n",
- ndev->radio_type);
-
- return -EINVAL;
- }
-
- default:
- dev_err(dev->dev, "Unknown vendor ID 0x%x\n",
- ndev->vendor_id);
-
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int mei_nfc_if_version(struct mei_nfc_dev *ndev)
-{
- struct mei_device *dev;
- struct mei_cl *cl;
-
- struct mei_nfc_cmd cmd;
- struct mei_nfc_reply *reply = NULL;
- struct mei_nfc_if_version *version;
- size_t if_version_length;
- int bytes_recv, ret;
-
- cl = ndev->cl_info;
- dev = cl->dev;
-
- memset(&cmd, 0, sizeof(struct mei_nfc_cmd));
- cmd.command = MEI_NFC_CMD_MAINTENANCE;
- cmd.data_size = 1;
- cmd.sub_command = MEI_NFC_SUBCMD_IF_VERSION;
-
- ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd), 1);
- if (ret < 0) {
- dev_err(dev->dev, "Could not send IF version cmd\n");
- return ret;
- }
-
- /* to be sure on the stack we alloc memory */
- if_version_length = sizeof(struct mei_nfc_reply) +
- sizeof(struct mei_nfc_if_version);
-
- reply = kzalloc(if_version_length, GFP_KERNEL);
- if (!reply)
- return -ENOMEM;
-
- bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
- if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
- dev_err(dev->dev, "Could not read IF version\n");
- ret = -EIO;
- goto err;
- }
-
- version = (struct mei_nfc_if_version *)reply->data;
-
- ndev->fw_ivn = version->fw_ivn;
- ndev->vendor_id = version->vendor_id;
- ndev->radio_type = version->radio_type;
-
-err:
- kfree(reply);
- return ret;
-}
-
-static void mei_nfc_init(struct work_struct *work)
-{
- struct mei_device *dev;
- struct mei_cl_device *cldev;
- struct mei_nfc_dev *ndev;
- struct mei_cl *cl_info;
- struct mei_me_client *me_cl_info;
-
- ndev = container_of(work, struct mei_nfc_dev, init_work);
-
- cl_info = ndev->cl_info;
- dev = cl_info->dev;
-
- mutex_lock(&dev->device_lock);
-
- /* check for valid client id */
- me_cl_info = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid);
- if (!me_cl_info) {
- mutex_unlock(&dev->device_lock);
- dev_info(dev->dev, "nfc: failed to find the info client\n");
- goto err;
- }
-
- if (mei_cl_connect(cl_info, me_cl_info, NULL) < 0) {
- mei_me_cl_put(me_cl_info);
- mutex_unlock(&dev->device_lock);
- dev_err(dev->dev, "Could not connect to the NFC INFO ME client");
-
- goto err;
- }
- mei_me_cl_put(me_cl_info);
- mutex_unlock(&dev->device_lock);
-
- if (mei_nfc_if_version(ndev) < 0) {
- dev_err(dev->dev, "Could not get the NFC interface version");
-
- goto err;
- }
-
- dev_info(dev->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n",
- ndev->fw_ivn, ndev->vendor_id, ndev->radio_type);
-
- mutex_lock(&dev->device_lock);
-
- if (mei_cl_disconnect(cl_info) < 0) {
- mutex_unlock(&dev->device_lock);
- dev_err(dev->dev, "Could not disconnect the NFC INFO ME client");
-
- goto err;
- }
-
- mutex_unlock(&dev->device_lock);
-
- if (mei_nfc_build_bus_name(ndev) < 0) {
- dev_err(dev->dev, "Could not build the bus ID name\n");
- return;
- }
-
- cldev = mei_cl_add_device(dev, ndev->me_cl, ndev->cl,
- ndev->bus_name);
- if (!cldev) {
- dev_err(dev->dev, "Could not add the NFC device to the MEI bus\n");
-
- goto err;
- }
-
- cldev->priv_data = ndev;
-
-
- return;
-
-err:
- mutex_lock(&dev->device_lock);
- mei_nfc_free(ndev);
- mutex_unlock(&dev->device_lock);
-
-}
-
-
-int mei_nfc_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
-{
- struct mei_nfc_dev *ndev;
- struct mei_cl *cl_info, *cl;
- int ret;
-
-
- /* in case of internal reset bail out
- * as the device is already setup
- */
- cl = mei_cl_bus_find_cl_by_uuid(dev, mei_nfc_guid);
- if (cl)
- return 0;
-
- ndev = kzalloc(sizeof(struct mei_nfc_dev), GFP_KERNEL);
- if (!ndev) {
- ret = -ENOMEM;
- goto err;
- }
-
- ndev->me_cl = mei_me_cl_get(me_cl);
- if (!ndev->me_cl) {
- ret = -ENODEV;
- goto err;
- }
-
- cl_info = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
- if (IS_ERR(cl_info)) {
- ret = PTR_ERR(cl_info);
- goto err;
- }
-
- list_add_tail(&cl_info->device_link, &dev->device_list);
-
- ndev->cl_info = cl_info;
-
- cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
- if (IS_ERR(cl)) {
- ret = PTR_ERR(cl);
- goto err;
- }
-
- list_add_tail(&cl->device_link, &dev->device_list);
-
- ndev->cl = cl;
-
- INIT_WORK(&ndev->init_work, mei_nfc_init);
- schedule_work(&ndev->init_work);
-
- return 0;
-
-err:
- mei_nfc_free(ndev);
-
- return ret;
-}
-
-void mei_nfc_host_exit(struct mei_device *dev)
-{
- struct mei_nfc_dev *ndev;
- struct mei_cl *cl;
- struct mei_cl_device *cldev;
-
- cl = mei_cl_bus_find_cl_by_uuid(dev, mei_nfc_guid);
- if (!cl)
- return;
-
- cldev = cl->device;
- if (!cldev)
- return;
-
- ndev = (struct mei_nfc_dev *)cldev->priv_data;
- if (ndev)
- cancel_work_sync(&ndev->init_work);
-
- cldev->priv_data = NULL;
-
- mutex_lock(&dev->device_lock);
- /* Need to remove the device here
- * since mei_nfc_free will unlink the clients
- */
- mei_cl_remove_device(cldev);
- mei_nfc_free(ndev);
- mutex_unlock(&dev->device_lock);
-}
-
-
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 23f71f5ce4fb..27678d8154e0 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -82,6 +82,11 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)},
+
/* required last entry */
{0, }
};
@@ -128,6 +133,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
struct mei_device *dev;
struct mei_me_hw *hw;
+ unsigned int irqflags;
int err;
@@ -180,17 +186,12 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_enable_msi(pdev);
/* request and enable interrupt */
- if (pci_dev_msi_enabled(pdev))
- err = request_threaded_irq(pdev->irq,
- NULL,
- mei_me_irq_thread_handler,
- IRQF_ONESHOT, KBUILD_MODNAME, dev);
- else
- err = request_threaded_irq(pdev->irq,
+ irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
+
+ err = request_threaded_irq(pdev->irq,
mei_me_irq_quick_handler,
mei_me_irq_thread_handler,
- IRQF_SHARED, KBUILD_MODNAME, dev);
-
+ irqflags, KBUILD_MODNAME, dev);
if (err) {
dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
pdev->irq);
@@ -319,6 +320,7 @@ static int mei_me_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev;
+ unsigned int irqflags;
int err;
dev = pci_get_drvdata(pdev);
@@ -327,17 +329,13 @@ static int mei_me_pci_resume(struct device *device)
pci_enable_msi(pdev);
+ irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
+
/* request and enable interrupt */
- if (pci_dev_msi_enabled(pdev))
- err = request_threaded_irq(pdev->irq,
- NULL,
- mei_me_irq_thread_handler,
- IRQF_ONESHOT, KBUILD_MODNAME, dev);
- else
- err = request_threaded_irq(pdev->irq,
+ err = request_threaded_irq(pdev->irq,
mei_me_irq_quick_handler,
mei_me_irq_thread_handler,
- IRQF_SHARED, KBUILD_MODNAME, dev);
+ irqflags, KBUILD_MODNAME, dev);
if (err) {
dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 2bc0f5089f82..b346638833b0 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -364,6 +364,7 @@ int mei_watchdog_register(struct mei_device *dev)
int ret;
+ amt_wd_dev.parent = dev->dev;
/* unlock to perserve correct locking order */
mutex_unlock(&dev->device_lock);
ret = watchdog_register_device(&amt_wd_dev);
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
index 41e3bdb10061..6dfdae3452d6 100644
--- a/drivers/misc/mic/scif/scif_nodeqp.c
+++ b/drivers/misc/mic/scif/scif_nodeqp.c
@@ -357,7 +357,7 @@ static void scif_p2p_freesg(struct scatterlist *sg)
}
static struct scatterlist *
-scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt)
+scif_p2p_setsg(phys_addr_t pa, int page_size, int page_cnt)
{
struct scatterlist *sg;
struct page *page;
@@ -368,16 +368,11 @@ scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt)
return NULL;
sg_init_table(sg, page_cnt);
for (i = 0; i < page_cnt; i++) {
- page = vmalloc_to_page((void __force *)va);
- if (!page)
- goto p2p_sg_err;
+ page = pfn_to_page(pa >> PAGE_SHIFT);
sg_set_page(&sg[i], page, page_size, 0);
- va += page_size;
+ pa += page_size;
}
return sg;
-p2p_sg_err:
- kfree(sg);
- return NULL;
}
/* Init p2p mappings required to access peerdev from scifdev */
@@ -395,14 +390,14 @@ scif_init_p2p_info(struct scif_dev *scifdev, struct scif_dev *peerdev)
p2p = kzalloc(sizeof(*p2p), GFP_KERNEL);
if (!p2p)
return NULL;
- p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->va,
+ p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->pa,
PAGE_SIZE, num_mmio_pages);
if (!p2p->ppi_sg[SCIF_PPI_MMIO])
goto free_p2p;
p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages;
sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30)));
num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT);
- p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->va,
+ p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->pa,
1 << sg_page_shift,
num_aper_chunks);
p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks;
diff --git a/drivers/misc/qcom-coincell.c b/drivers/misc/qcom-coincell.c
new file mode 100644
index 000000000000..7b4a2da487a5
--- /dev/null
+++ b/drivers/misc/qcom-coincell.c
@@ -0,0 +1,152 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+struct qcom_coincell {
+ struct device *dev;
+ struct regmap *regmap;
+ u32 base_addr;
+};
+
+#define QCOM_COINCELL_REG_RSET 0x44
+#define QCOM_COINCELL_REG_VSET 0x45
+#define QCOM_COINCELL_REG_ENABLE 0x46
+
+#define QCOM_COINCELL_ENABLE BIT(7)
+
+static const int qcom_rset_map[] = { 2100, 1700, 1200, 800 };
+static const int qcom_vset_map[] = { 2500, 3200, 3100, 3000 };
+/* NOTE: for pm8921 and others, voltage of 2500 is 16 (10000b), not 0 */
+
+/* if enable==0, rset and vset are ignored */
+static int qcom_coincell_chgr_config(struct qcom_coincell *chgr, int rset,
+ int vset, bool enable)
+{
+ int i, j, rc;
+
+ /* if disabling, just do that and skip other operations */
+ if (!enable)
+ return regmap_write(chgr->regmap,
+ chgr->base_addr + QCOM_COINCELL_REG_ENABLE, 0);
+
+ /* find index for current-limiting resistor */
+ for (i = 0; i < ARRAY_SIZE(qcom_rset_map); i++)
+ if (rset == qcom_rset_map[i])
+ break;
+
+ if (i >= ARRAY_SIZE(qcom_rset_map)) {
+ dev_err(chgr->dev, "invalid rset-ohms value %d\n", rset);
+ return -EINVAL;
+ }
+
+ /* find index for charge voltage */
+ for (j = 0; j < ARRAY_SIZE(qcom_vset_map); j++)
+ if (vset == qcom_vset_map[j])
+ break;
+
+ if (j >= ARRAY_SIZE(qcom_vset_map)) {
+ dev_err(chgr->dev, "invalid vset-millivolts value %d\n", vset);
+ return -EINVAL;
+ }
+
+ rc = regmap_write(chgr->regmap,
+ chgr->base_addr + QCOM_COINCELL_REG_RSET, i);
+ if (rc) {
+ /*
+ * This is mainly to flag a bad base_addr (reg) from dts.
+ * Other failures writing to the registers should be
+ * extremely rare, or indicative of problems that
+ * should be reported elsewhere (eg. spmi failure).
+ */
+ dev_err(chgr->dev, "could not write to RSET register\n");
+ return rc;
+ }
+
+ rc = regmap_write(chgr->regmap,
+ chgr->base_addr + QCOM_COINCELL_REG_VSET, j);
+ if (rc)
+ return rc;
+
+ /* set 'enable' register */
+ return regmap_write(chgr->regmap,
+ chgr->base_addr + QCOM_COINCELL_REG_ENABLE,
+ QCOM_COINCELL_ENABLE);
+}
+
+static int qcom_coincell_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct qcom_coincell chgr;
+ u32 rset, vset;
+ bool enable;
+ int rc;
+
+ chgr.dev = &pdev->dev;
+
+ chgr.regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!chgr.regmap) {
+ dev_err(chgr.dev, "Unable to get regmap\n");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(node, "reg", &chgr.base_addr);
+ if (rc)
+ return rc;
+
+ enable = !of_property_read_bool(node, "qcom,charger-disable");
+
+ if (enable) {
+ rc = of_property_read_u32(node, "qcom,rset-ohms", &rset);
+ if (rc) {
+ dev_err(chgr.dev,
+ "can't find 'qcom,rset-ohms' in DT block");
+ return rc;
+ }
+
+ rc = of_property_read_u32(node, "qcom,vset-millivolts", &vset);
+ if (rc) {
+ dev_err(chgr.dev,
+ "can't find 'qcom,vset-millivolts' in DT block");
+ return rc;
+ }
+ }
+
+ return qcom_coincell_chgr_config(&chgr, rset, vset, enable);
+}
+
+static const struct of_device_id qcom_coincell_match_table[] = {
+ { .compatible = "qcom,pm8941-coincell", },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, qcom_coincell_match_table);
+
+static struct platform_driver qcom_coincell_driver = {
+ .driver = {
+ .name = "qcom-spmi-coincell",
+ .of_match_table = qcom_coincell_match_table,
+ },
+ .probe = qcom_coincell_probe,
+};
+
+module_platform_driver(qcom_coincell_driver);
+
+MODULE_DESCRIPTION("Qualcomm PMIC coincell charger driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 95c894482fdd..340b44d9e8cf 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -239,7 +239,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
mq->mmr_blade = uv_cpu_to_blade_id(cpu);
nid = cpu_to_node(cpu);
- page = alloc_pages_exact_node(nid,
+ page = __alloc_pages_node(nid,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
pg_order);
if (page == NULL) {
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index 15c33cc34a80..431e1dd528bc 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -186,10 +186,10 @@ static int sram_probe(struct platform_device *pdev)
if (IS_ERR(sram->virt_base))
return PTR_ERR(sram->virt_base);
- sram->pool = devm_gen_pool_create(sram->dev,
- ilog2(SRAM_GRANULARITY), -1);
- if (!sram->pool)
- return -ENOMEM;
+ sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
+ NUMA_NO_NODE, NULL);
+ if (IS_ERR(sram->pool))
+ return PTR_ERR(sram->pool);
ret = sram_reserve_regions(sram, res);
if (ret)
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 5027b8ffae43..71b64550b591 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -36,8 +36,6 @@
#include <linux/skbuff.h>
#include <linux/ti_wilink_st.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */
static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
@@ -45,9 +43,6 @@ static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
/**********************************************************************/
/* internal functions */
-struct ti_st_plat_data *dt_pdata;
-static struct ti_st_plat_data *get_platform_data(struct device *dev);
-
/**
* st_get_plat_device -
* function which returns the reference to the platform device
@@ -469,12 +464,7 @@ long st_kim_start(void *kim_data)
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
pr_info(" %s", __func__);
- if (kim_gdata->kim_pdev->dev.of_node) {
- pr_debug("use device tree data");
- pdata = dt_pdata;
- } else {
- pdata = kim_gdata->kim_pdev->dev.platform_data;
- }
+ pdata = kim_gdata->kim_pdev->dev.platform_data;
do {
/* platform specific enabling code here */
@@ -482,9 +472,9 @@ long st_kim_start(void *kim_data)
pdata->chip_enable(kim_gdata);
/* Configure BT nShutdown to HIGH state */
- gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+ gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW);
mdelay(5); /* FIXME: a proper toggle */
- gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
+ gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_HIGH);
mdelay(100);
/* re-initialize the completion */
reinit_completion(&kim_gdata->ldisc_installed);
@@ -534,18 +524,12 @@ long st_kim_stop(void *kim_data)
{
long err = 0;
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
- struct ti_st_plat_data *pdata;
+ struct ti_st_plat_data *pdata =
+ kim_gdata->kim_pdev->dev.platform_data;
struct tty_struct *tty = kim_gdata->core_data->tty;
reinit_completion(&kim_gdata->ldisc_installed);
- if (kim_gdata->kim_pdev->dev.of_node) {
- pr_debug("use device tree data");
- pdata = dt_pdata;
- } else
- pdata = kim_gdata->kim_pdev->dev.platform_data;
-
-
if (tty) { /* can be called before ldisc is installed */
/* Flush any pending characters in the driver and discipline. */
tty_ldisc_flush(tty);
@@ -566,11 +550,11 @@ long st_kim_stop(void *kim_data)
}
/* By default configure BT nShutdown to LOW state */
- gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+ gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW);
mdelay(1);
- gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
+ gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_HIGH);
mdelay(1);
- gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+ gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW);
/* platform specific disable */
if (pdata->chip_disable)
@@ -737,52 +721,13 @@ static const struct file_operations list_debugfs_fops = {
* board-*.c file
*/
-static const struct of_device_id kim_of_match[] = {
-{
- .compatible = "kim",
- },
- {}
-};
-MODULE_DEVICE_TABLE(of, kim_of_match);
-
-static struct ti_st_plat_data *get_platform_data(struct device *dev)
-{
- struct device_node *np = dev->of_node;
- const u32 *dt_property;
- int len;
-
- dt_pdata = kzalloc(sizeof(*dt_pdata), GFP_KERNEL);
- if (!dt_pdata)
- return NULL;
-
- dt_property = of_get_property(np, "dev_name", &len);
- if (dt_property)
- memcpy(&dt_pdata->dev_name, dt_property, len);
- of_property_read_u32(np, "nshutdown_gpio",
- &dt_pdata->nshutdown_gpio);
- of_property_read_u32(np, "flow_cntrl", &dt_pdata->flow_cntrl);
- of_property_read_u32(np, "baud_rate", &dt_pdata->baud_rate);
-
- return dt_pdata;
-}
-
static struct dentry *kim_debugfs_dir;
static int kim_probe(struct platform_device *pdev)
{
struct kim_data_s *kim_gdata;
- struct ti_st_plat_data *pdata;
+ struct ti_st_plat_data *pdata = pdev->dev.platform_data;
int err;
- if (pdev->dev.of_node)
- pdata = get_platform_data(&pdev->dev);
- else
- pdata = pdev->dev.platform_data;
-
- if (pdata == NULL) {
- dev_err(&pdev->dev, "Platform Data is missing\n");
- return -ENXIO;
- }
-
if ((pdev->id != -1) && (pdev->id < MAX_ST_DEVICES)) {
/* multiple devices could exist */
st_kim_devices[pdev->id] = pdev;
@@ -863,16 +808,9 @@ err_core_init:
static int kim_remove(struct platform_device *pdev)
{
/* free the GPIOs requested */
- struct ti_st_plat_data *pdata;
+ struct ti_st_plat_data *pdata = pdev->dev.platform_data;
struct kim_data_s *kim_gdata;
- if (pdev->dev.of_node) {
- pr_debug("use device tree data");
- pdata = dt_pdata;
- } else {
- pdata = pdev->dev.platform_data;
- }
-
kim_gdata = platform_get_drvdata(pdev);
/* Free the Bluetooth/FM/GPIO
@@ -890,22 +828,12 @@ static int kim_remove(struct platform_device *pdev)
kfree(kim_gdata);
kim_gdata = NULL;
- kfree(dt_pdata);
- dt_pdata = NULL;
-
return 0;
}
static int kim_suspend(struct platform_device *pdev, pm_message_t state)
{
- struct ti_st_plat_data *pdata;
-
- if (pdev->dev.of_node) {
- pr_debug("use device tree data");
- pdata = dt_pdata;
- } else {
- pdata = pdev->dev.platform_data;
- }
+ struct ti_st_plat_data *pdata = pdev->dev.platform_data;
if (pdata->suspend)
return pdata->suspend(pdev, state);
@@ -915,14 +843,7 @@ static int kim_suspend(struct platform_device *pdev, pm_message_t state)
static int kim_resume(struct platform_device *pdev)
{
- struct ti_st_plat_data *pdata;
-
- if (pdev->dev.of_node) {
- pr_debug("use device tree data");
- pdata = dt_pdata;
- } else {
- pdata = pdev->dev.platform_data;
- }
+ struct ti_st_plat_data *pdata = pdev->dev.platform_data;
if (pdata->resume)
return pdata->resume(pdev);
@@ -939,8 +860,6 @@ static struct platform_driver kim_platform_driver = {
.resume = kim_resume,
.driver = {
.name = "kim",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(kim_of_match),
},
};
diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c
index 518e1b7f2f95..93b4d67cc4a3 100644
--- a/drivers/misc/ti-st/st_ll.c
+++ b/drivers/misc/ti-st/st_ll.c
@@ -26,7 +26,6 @@
#include <linux/ti_wilink_st.h>
/**********************************************************************/
-
/* internal functions */
static void send_ll_cmd(struct st_data_s *st_data,
unsigned char cmd)
@@ -54,13 +53,7 @@ static void ll_device_want_to_sleep(struct st_data_s *st_data)
/* communicate to platform about chip asleep */
kim_data = st_data->kim_data;
- if (kim_data->kim_pdev->dev.of_node) {
- pr_debug("use device tree data");
- pdata = dt_pdata;
- } else {
- pdata = kim_data->kim_pdev->dev.platform_data;
- }
-
+ pdata = kim_data->kim_pdev->dev.platform_data;
if (pdata->chip_asleep)
pdata->chip_asleep(NULL);
}
@@ -93,13 +86,7 @@ static void ll_device_want_to_wakeup(struct st_data_s *st_data)
/* communicate to platform about chip wakeup */
kim_data = st_data->kim_data;
- if (kim_data->kim_pdev->dev.of_node) {
- pr_debug("use device tree data");
- pdata = dt_pdata;
- } else {
- pdata = kim_data->kim_pdev->dev.platform_data;
- }
-
+ pdata = kim_data->kim_pdev->dev.platform_data;
if (pdata->chip_awake)
pdata->chip_awake(NULL);
}
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index b00335652e52..87a13374fdc0 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -446,7 +446,6 @@ MODULE_DEVICE_TABLE(i2c, tsl2550_id);
static struct i2c_driver tsl2550_driver = {
.driver = {
.name = TSL2550_DRV_NAME,
- .owner = THIS_MODULE,
.pm = TSL2550_PM_OPS,
},
.probe = tsl2550_probe,
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 191617492181..ffb56340d0c7 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -46,7 +46,7 @@
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
-MODULE_VERSION("1.2.1.3-k");
+MODULE_VERSION("1.3.0.0-k");
MODULE_ALIAS("dmi:*:svnVMware*:*");
MODULE_ALIAS("vmware_vmmemctl");
MODULE_LICENSE("GPL");
@@ -110,9 +110,18 @@ MODULE_LICENSE("GPL");
*/
#define VMW_BALLOON_HV_PORT 0x5670
#define VMW_BALLOON_HV_MAGIC 0x456c6d6f
-#define VMW_BALLOON_PROTOCOL_VERSION 2
#define VMW_BALLOON_GUEST_ID 1 /* Linux */
+enum vmwballoon_capabilities {
+ /*
+ * Bit 0 is reserved and not associated to any capability.
+ */
+ VMW_BALLOON_BASIC_CMDS = (1 << 1),
+ VMW_BALLOON_BATCHED_CMDS = (1 << 2)
+};
+
+#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS)
+
#define VMW_BALLOON_CMD_START 0
#define VMW_BALLOON_CMD_GET_TARGET 1
#define VMW_BALLOON_CMD_LOCK 2
@@ -120,32 +129,36 @@ MODULE_LICENSE("GPL");
#define VMW_BALLOON_CMD_GUEST_ID 4
/* error codes */
-#define VMW_BALLOON_SUCCESS 0
-#define VMW_BALLOON_FAILURE -1
-#define VMW_BALLOON_ERROR_CMD_INVALID 1
-#define VMW_BALLOON_ERROR_PPN_INVALID 2
-#define VMW_BALLOON_ERROR_PPN_LOCKED 3
-#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
-#define VMW_BALLOON_ERROR_PPN_PINNED 5
-#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
-#define VMW_BALLOON_ERROR_RESET 7
-#define VMW_BALLOON_ERROR_BUSY 8
-
-#define VMWARE_BALLOON_CMD(cmd, data, result) \
-({ \
- unsigned long __stat, __dummy1, __dummy2; \
- __asm__ __volatile__ ("inl %%dx" : \
- "=a"(__stat), \
- "=c"(__dummy1), \
- "=d"(__dummy2), \
- "=b"(result) : \
- "0"(VMW_BALLOON_HV_MAGIC), \
- "1"(VMW_BALLOON_CMD_##cmd), \
- "2"(VMW_BALLOON_HV_PORT), \
- "3"(data) : \
- "memory"); \
- result &= -1UL; \
- __stat & -1UL; \
+#define VMW_BALLOON_SUCCESS 0
+#define VMW_BALLOON_FAILURE -1
+#define VMW_BALLOON_ERROR_CMD_INVALID 1
+#define VMW_BALLOON_ERROR_PPN_INVALID 2
+#define VMW_BALLOON_ERROR_PPN_LOCKED 3
+#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
+#define VMW_BALLOON_ERROR_PPN_PINNED 5
+#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
+#define VMW_BALLOON_ERROR_RESET 7
+#define VMW_BALLOON_ERROR_BUSY 8
+
+#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
+
+#define VMWARE_BALLOON_CMD(cmd, data, result) \
+({ \
+ unsigned long __status, __dummy1, __dummy2; \
+ __asm__ __volatile__ ("inl %%dx" : \
+ "=a"(__status), \
+ "=c"(__dummy1), \
+ "=d"(__dummy2), \
+ "=b"(result) : \
+ "0"(VMW_BALLOON_HV_MAGIC), \
+ "1"(VMW_BALLOON_CMD_##cmd), \
+ "2"(VMW_BALLOON_HV_PORT), \
+ "3"(data) : \
+ "memory"); \
+ if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START) \
+ result = __dummy1; \
+ result &= -1UL; \
+ __status & -1UL; \
})
#ifdef CONFIG_DEBUG_FS
@@ -223,11 +236,12 @@ static struct vmballoon balloon;
*/
static bool vmballoon_send_start(struct vmballoon *b)
{
- unsigned long status, dummy;
+ unsigned long status, capabilities;
STATS_INC(b->stats.start);
- status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_PROTOCOL_VERSION, dummy);
+ status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_CAPABILITIES,
+ capabilities);
if (status == VMW_BALLOON_SUCCESS)
return true;
@@ -402,55 +416,37 @@ static void vmballoon_reset(struct vmballoon *b)
}
/*
- * Allocate (or reserve) a page for the balloon and notify the host. If host
- * refuses the page put it on "refuse" list and allocate another one until host
- * is satisfied. "Refused" pages are released at the end of inflation cycle
- * (when we allocate b->rate_alloc pages).
+ * Notify the host of a ballooned page. If host rejects the page put it on the
+ * refuse list, those refused page are then released at the end of the
+ * inflation cycle.
*/
-static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
+static int vmballoon_lock_page(struct vmballoon *b, struct page *page)
{
- struct page *page;
- gfp_t flags;
- unsigned int hv_status;
- int locked;
- flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
-
- do {
- if (!can_sleep)
- STATS_INC(b->stats.alloc);
- else
- STATS_INC(b->stats.sleep_alloc);
+ int locked, hv_status;
- page = alloc_page(flags);
- if (!page) {
- if (!can_sleep)
- STATS_INC(b->stats.alloc_fail);
- else
- STATS_INC(b->stats.sleep_alloc_fail);
- return -ENOMEM;
- }
+ locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
+ if (locked > 0) {
+ STATS_INC(b->stats.refused_alloc);
- /* inform monitor */
- locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
- if (locked > 0) {
- STATS_INC(b->stats.refused_alloc);
-
- if (hv_status == VMW_BALLOON_ERROR_RESET ||
- hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
- __free_page(page);
- return -EIO;
- }
+ if (hv_status == VMW_BALLOON_ERROR_RESET ||
+ hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
+ __free_page(page);
+ return -EIO;
+ }
- /*
- * Place page on the list of non-balloonable pages
- * and retry allocation, unless we already accumulated
- * too many of them, in which case take a breather.
- */
+ /*
+ * Place page on the list of non-balloonable pages
+ * and retry allocation, unless we already accumulated
+ * too many of them, in which case take a breather.
+ */
+ if (b->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
+ b->n_refused_pages++;
list_add(&page->lru, &b->refused_pages);
- if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED)
- return -EIO;
+ } else {
+ __free_page(page);
}
- } while (locked != 0);
+ return -EIO;
+ }
/* track allocated page */
list_add(&page->lru, &b->pages);
@@ -512,7 +508,7 @@ static void vmballoon_inflate(struct vmballoon *b)
unsigned int i;
unsigned int allocations = 0;
int error = 0;
- bool alloc_can_sleep = false;
+ gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
@@ -543,19 +539,16 @@ static void vmballoon_inflate(struct vmballoon *b)
__func__, goal, rate, b->rate_alloc);
for (i = 0; i < goal; i++) {
+ struct page *page;
- error = vmballoon_reserve_page(b, alloc_can_sleep);
- if (error) {
- if (error != -ENOMEM) {
- /*
- * Not a page allocation failure, stop this
- * cycle. Maybe we'll get new target from
- * the host soon.
- */
- break;
- }
+ if (flags == VMW_PAGE_ALLOC_NOSLEEP)
+ STATS_INC(b->stats.alloc);
+ else
+ STATS_INC(b->stats.sleep_alloc);
- if (alloc_can_sleep) {
+ page = alloc_page(flags);
+ if (!page) {
+ if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
/*
* CANSLEEP page allocation failed, so guest
* is under severe memory pressure. Quickly
@@ -563,8 +556,10 @@ static void vmballoon_inflate(struct vmballoon *b)
*/
b->rate_alloc = max(b->rate_alloc / 2,
VMW_BALLOON_RATE_ALLOC_MIN);
+ STATS_INC(b->stats.sleep_alloc_fail);
break;
}
+ STATS_INC(b->stats.alloc_fail);
/*
* NOSLEEP page allocation failed, so the guest is
@@ -579,11 +574,16 @@ static void vmballoon_inflate(struct vmballoon *b)
if (i >= b->rate_alloc)
break;
- alloc_can_sleep = true;
+ flags = VMW_PAGE_ALLOC_CANSLEEP;
/* Lower rate for sleeping allocations. */
rate = b->rate_alloc;
+ continue;
}
+ error = vmballoon_lock_page(b, page);
+ if (error)
+ break;
+
if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) {
cond_resched();
allocations = 0;
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index a721b5d8a9da..9ec262a52656 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -1031,14 +1031,9 @@ int __init vmci_host_init(void)
void __exit vmci_host_exit(void)
{
- int error;
-
vmci_host_device_initialized = false;
- error = misc_deregister(&vmci_host_miscdev);
- if (error)
- pr_warn("Error unregistering character device: %d\n", error);
-
+ misc_deregister(&vmci_host_miscdev);
vmci_ctx_destroy(host_context);
vmci_qp_broker_exit();
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c9c3d20b784b..c742cfd7674e 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -47,10 +47,13 @@
#include "queue.h"
MODULE_ALIAS("mmc:block");
+
+#ifdef KERNEL
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
#define MODULE_PARAM_PREFIX "mmcblk."
+#endif
#define INAND_CMD38_ARG_EXT_CSD 113
#define INAND_CMD38_ARG_ERASE 0x00
@@ -208,6 +211,8 @@ static ssize_t power_ro_lock_show(struct device *dev,
ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
+ mmc_blk_put(md);
+
return ret;
}
@@ -2384,6 +2389,7 @@ force_ro_fail:
#define CID_MANFID_TOSHIBA 0x11
#define CID_MANFID_MICRON 0x13
#define CID_MANFID_SAMSUNG 0x15
+#define CID_MANFID_KINGSTON 0x70
static const struct mmc_fixup blk_fixups[] =
{
@@ -2406,6 +2412,10 @@ static const struct mmc_fixup blk_fixups[] =
*
* N.B. This doesn't affect SD cards.
*/
+ MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_BLK_NO_CMD23),
MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
@@ -2442,6 +2452,15 @@ static const struct mmc_fixup blk_fixups[] =
MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ /*
+ * On Some Kingston eMMCs, performing trim can result in
+ * unrecoverable data conrruption occasionally due to a firmware bug.
+ */
+ MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_TRIM_BROKEN),
+ MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_TRIM_BROKEN),
+
END_FIXUP
};
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index b5a2b145d89f..6f4323c6d653 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -165,7 +165,7 @@ static void mmc_queue_setup_discard(struct request_queue *q,
return;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
- q->limits.max_discard_sectors = max_discard;
+ blk_queue_max_discard_sectors(q, max_discard);
if (card->erased_byte == 0 && !mmc_can_discard(card))
q->limits.discard_zeroes_data = 1;
q->limits.discard_granularity = card->pref_erase << 9;
@@ -467,7 +467,7 @@ static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
sg_set_buf(__sg, buf + offset, len);
offset += len;
remain -= len;
- (__sg++)->page_link &= ~0x02;
+ sg_unmark_end(__sg++);
sg_len++;
} while (remain);
}
@@ -475,7 +475,7 @@ static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
list_for_each_entry(req, &packed->list, queuelist) {
sg_len += blk_rq_map_sg(mq->queue, req, __sg);
__sg = sg + (sg_len - 1);
- (__sg++)->page_link &= ~0x02;
+ sg_unmark_end(__sg++);
}
sg_mark_end(sg + (sg_len - 1));
return sg_len;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 9ad73f30f744..0520064dc33b 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -358,8 +358,10 @@ EXPORT_SYMBOL(mmc_start_bkops);
*/
static void mmc_wait_data_done(struct mmc_request *mrq)
{
- mrq->host->context_info.is_done_rcv = true;
- wake_up_interruptible(&mrq->host->context_info.wait);
+ struct mmc_context_info *context_info = &mrq->host->context_info;
+
+ context_info->is_done_rcv = true;
+ wake_up_interruptible(&context_info->wait);
}
static void mmc_wait_done(struct mmc_request *mrq)
@@ -2168,6 +2170,7 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
unsigned int arg)
{
unsigned int rem, to = from + nr;
+ int err;
if (!(card->host->caps & MMC_CAP_ERASE) ||
!(card->csd.cmdclass & CCC_ERASE))
@@ -2218,6 +2221,22 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
/* 'from' and 'to' are inclusive */
to -= 1;
+ /*
+ * Special case where only one erase-group fits in the timeout budget:
+ * If the region crosses an erase-group boundary on this particular
+ * case, we will be trimming more than one erase-group which, does not
+ * fit in the timeout budget of the controller, so we need to split it
+ * and call mmc_do_erase() twice if necessary. This special case is
+ * identified by the card->eg_boundary flag.
+ */
+ rem = card->erase_size - (from % card->erase_size);
+ if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
+ err = mmc_do_erase(card, from, from + rem - 1, arg);
+ from += rem;
+ if ((err) || (to <= from))
+ return err;
+ }
+
return mmc_do_erase(card, from, to, arg);
}
EXPORT_SYMBOL(mmc_erase);
@@ -2233,7 +2252,8 @@ EXPORT_SYMBOL(mmc_can_erase);
int mmc_can_trim(struct mmc_card *card)
{
- if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
+ if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
+ (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
return 1;
return 0;
}
@@ -2313,16 +2333,28 @@ static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
if (!qty)
return 0;
+ /*
+ * When specifying a sector range to trim, chances are we might cross
+ * an erase-group boundary even if the amount of sectors is less than
+ * one erase-group.
+ * If we can only fit one erase-group in the controller timeout budget,
+ * we have to care that erase-group boundaries are not crossed by a
+ * single trim operation. We flag that special case with "eg_boundary".
+ * In all other cases we can just decrement qty and pretend that we
+ * always touch (qty + 1) erase-groups as a simple optimization.
+ */
if (qty == 1)
- return 1;
+ card->eg_boundary = 1;
+ else
+ qty--;
/* Convert qty to sectors */
if (card->erase_shift)
- max_discard = --qty << card->erase_shift;
+ max_discard = qty << card->erase_shift;
else if (mmc_card_sd(card))
- max_discard = qty;
+ max_discard = qty + 1;
else
- max_discard = --qty * card->erase_size;
+ max_discard = qty * card->erase_size;
return max_discard;
}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 99a9c9011c50..abd933b7029b 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -398,7 +398,7 @@ int mmc_of_parse(struct mmc_host *host)
{
struct device_node *np;
u32 bus_width;
- int len, ret;
+ int ret;
bool cd_cap_invert, cd_gpio_invert = false;
bool ro_cap_invert, ro_gpio_invert = false;
@@ -445,12 +445,12 @@ int mmc_of_parse(struct mmc_host *host)
*/
/* Parse Card Detection */
- if (of_find_property(np, "non-removable", &len)) {
+ if (of_property_read_bool(np, "non-removable")) {
host->caps |= MMC_CAP_NONREMOVABLE;
} else {
cd_cap_invert = of_property_read_bool(np, "cd-inverted");
- if (of_find_property(np, "broken-cd", &len))
+ if (of_property_read_bool(np, "broken-cd"))
host->caps |= MMC_CAP_NEEDS_POLL;
ret = mmc_gpiod_request_cd(host, "cd", 0, true,
@@ -491,41 +491,41 @@ int mmc_of_parse(struct mmc_host *host)
if (ro_cap_invert ^ ro_gpio_invert)
host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
- if (of_find_property(np, "cap-sd-highspeed", &len))
+ if (of_property_read_bool(np, "cap-sd-highspeed"))
host->caps |= MMC_CAP_SD_HIGHSPEED;
- if (of_find_property(np, "cap-mmc-highspeed", &len))
+ if (of_property_read_bool(np, "cap-mmc-highspeed"))
host->caps |= MMC_CAP_MMC_HIGHSPEED;
- if (of_find_property(np, "sd-uhs-sdr12", &len))
+ if (of_property_read_bool(np, "sd-uhs-sdr12"))
host->caps |= MMC_CAP_UHS_SDR12;
- if (of_find_property(np, "sd-uhs-sdr25", &len))
+ if (of_property_read_bool(np, "sd-uhs-sdr25"))
host->caps |= MMC_CAP_UHS_SDR25;
- if (of_find_property(np, "sd-uhs-sdr50", &len))
+ if (of_property_read_bool(np, "sd-uhs-sdr50"))
host->caps |= MMC_CAP_UHS_SDR50;
- if (of_find_property(np, "sd-uhs-sdr104", &len))
+ if (of_property_read_bool(np, "sd-uhs-sdr104"))
host->caps |= MMC_CAP_UHS_SDR104;
- if (of_find_property(np, "sd-uhs-ddr50", &len))
+ if (of_property_read_bool(np, "sd-uhs-ddr50"))
host->caps |= MMC_CAP_UHS_DDR50;
- if (of_find_property(np, "cap-power-off-card", &len))
+ if (of_property_read_bool(np, "cap-power-off-card"))
host->caps |= MMC_CAP_POWER_OFF_CARD;
- if (of_find_property(np, "cap-sdio-irq", &len))
+ if (of_property_read_bool(np, "cap-sdio-irq"))
host->caps |= MMC_CAP_SDIO_IRQ;
- if (of_find_property(np, "full-pwr-cycle", &len))
+ if (of_property_read_bool(np, "full-pwr-cycle"))
host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
- if (of_find_property(np, "keep-power-in-suspend", &len))
+ if (of_property_read_bool(np, "keep-power-in-suspend"))
host->pm_caps |= MMC_PM_KEEP_POWER;
- if (of_find_property(np, "enable-sdio-wakeup", &len))
+ if (of_property_read_bool(np, "enable-sdio-wakeup"))
host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
- if (of_find_property(np, "mmc-ddr-1_8v", &len))
+ if (of_property_read_bool(np, "mmc-ddr-1_8v"))
host->caps |= MMC_CAP_1_8V_DDR;
- if (of_find_property(np, "mmc-ddr-1_2v", &len))
+ if (of_property_read_bool(np, "mmc-ddr-1_2v"))
host->caps |= MMC_CAP_1_2V_DDR;
- if (of_find_property(np, "mmc-hs200-1_8v", &len))
+ if (of_property_read_bool(np, "mmc-hs200-1_8v"))
host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
- if (of_find_property(np, "mmc-hs200-1_2v", &len))
+ if (of_property_read_bool(np, "mmc-hs200-1_2v"))
host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
- if (of_find_property(np, "mmc-hs400-1_8v", &len))
+ if (of_property_read_bool(np, "mmc-hs400-1_8v"))
host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
- if (of_find_property(np, "mmc-hs400-1_2v", &len))
+ if (of_property_read_bool(np, "mmc-hs400-1_2v"))
host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index fd9a58e216a5..8a1e3498261e 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -129,6 +129,14 @@ config MMC_SDHCI_OF_ARASAN
If unsure, say N.
+config MMC_SDHCI_OF_AT91
+ tristate "SDHCI OF support for the Atmel SDMMC controller"
+ depends on MMC_SDHCI_PLTFM
+ depends on OF
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This selects the Atmel SDMMC driver
+
config MMC_SDHCI_OF_ESDHC
tristate "SDHCI OF support for the Freescale eSDHC controller"
depends on MMC_SDHCI_PLTFM
@@ -779,6 +787,7 @@ config MMC_TOSHIBA_PCI
config MMC_MTK
tristate "MediaTek SD/MMC Card Interface support"
+ depends on HAS_DMA
help
This selects the MediaTek(R) Secure digital and Multimedia card Interface.
If you have a machine with a integrated SD/MMC card reader, say Y or M here.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index e928d61c5f4b..4f3452afa6ca 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
obj-$(CONFIG_MMC_SDHCI_OF_ARASAN) += sdhci-of-arasan.o
+obj-$(CONFIG_MMC_SDHCI_OF_AT91) += sdhci-of-at91.o
obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c
index b1eac719a4cc..dca5518b0139 100644
--- a/drivers/mmc/host/android-goldfish.c
+++ b/drivers/mmc/host/android-goldfish.c
@@ -118,7 +118,7 @@ struct goldfish_mmc_host {
struct mmc_host *mmc;
struct device *dev;
unsigned char id; /* 16xx chips have 2 MMC blocks */
- void __iomem *virt_base;
+ void *virt_base;
unsigned int phys_base;
int irq;
unsigned char bus_mode;
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 9a39e0b7e583..bf62e429f7fc 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/types.h>
-#include <linux/platform_data/atmel.h>
#include <linux/platform_data/mmc-atmel-mci.h>
#include <linux/mmc/host.h>
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index de15121bba7d..bc76aa22473e 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -73,6 +73,9 @@ static int dw_mci_rockchip_init(struct dw_mci *host)
/* It is slot 8 on Rockchip SoCs */
host->sdio_id0 = 8;
+ /* It needs this quirk on all Rockchip SoCs */
+ host->pdata->quirks |= DW_MCI_QUIRK_BROKEN_DTO;
+
return 0;
}
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 40e9d8e45f25..fcbf5524fd31 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -99,6 +99,9 @@ struct idmac_desc {
__le32 des3; /* buffer 2 physical address */
};
+
+/* Each descriptor can transfer up to 4KB of data in chained mode */
+#define DW_MCI_DESC_DATA_LENGTH 0x1000
#endif /* CONFIG_MMC_DW_IDMAC */
static bool dw_mci_reset(struct dw_mci *host);
@@ -235,8 +238,8 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
struct dw_mci *host = slot->host;
const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
u32 cmdr;
- cmd->error = -EINPROGRESS;
+ cmd->error = -EINPROGRESS;
cmdr = cmd->opcode;
if (cmd->opcode == MMC_STOP_TRANSMISSION ||
@@ -371,7 +374,7 @@ static void dw_mci_start_command(struct dw_mci *host,
cmd->arg, cmd_flags);
mci_writel(host, CMDARG, cmd->arg);
- wmb();
+ wmb(); /* drain writebuffer */
dw_mci_wait_while_busy(host, cmd_flags);
mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
@@ -380,6 +383,7 @@ static void dw_mci_start_command(struct dw_mci *host,
static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
{
struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
+
dw_mci_start_command(host, stop, host->stop_cmdr);
}
@@ -462,69 +466,102 @@ static void dw_mci_idmac_complete_dma(struct dw_mci *host)
static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
unsigned int sg_len)
{
+ unsigned int desc_len;
int i;
+
if (host->dma_64bit_address == 1) {
- struct idmac_desc_64addr *desc = host->sg_cpu;
+ struct idmac_desc_64addr *desc_first, *desc_last, *desc;
- for (i = 0; i < sg_len; i++, desc++) {
+ desc_first = desc_last = desc = host->sg_cpu;
+
+ for (i = 0; i < sg_len; i++) {
unsigned int length = sg_dma_len(&data->sg[i]);
+
u64 mem_addr = sg_dma_address(&data->sg[i]);
- /*
- * Set the OWN bit and disable interrupts for this
- * descriptor
- */
- desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
- IDMAC_DES0_CH;
- /* Buffer length */
- IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, length);
-
- /* Physical address to DMA to/from */
- desc->des4 = mem_addr & 0xffffffff;
- desc->des5 = mem_addr >> 32;
+ for ( ; length ; desc++) {
+ desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
+ length : DW_MCI_DESC_DATA_LENGTH;
+
+ length -= desc_len;
+
+ /*
+ * Set the OWN bit and disable interrupts
+ * for this descriptor
+ */
+ desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
+ IDMAC_DES0_CH;
+
+ /* Buffer length */
+ IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
+
+ /* Physical address to DMA to/from */
+ desc->des4 = mem_addr & 0xffffffff;
+ desc->des5 = mem_addr >> 32;
+
+ /* Update physical address for the next desc */
+ mem_addr += desc_len;
+
+ /* Save pointer to the last descriptor */
+ desc_last = desc;
+ }
}
/* Set first descriptor */
- desc = host->sg_cpu;
- desc->des0 |= IDMAC_DES0_FD;
+ desc_first->des0 |= IDMAC_DES0_FD;
/* Set last descriptor */
- desc = host->sg_cpu + (i - 1) *
- sizeof(struct idmac_desc_64addr);
- desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
- desc->des0 |= IDMAC_DES0_LD;
+ desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
+ desc_last->des0 |= IDMAC_DES0_LD;
} else {
- struct idmac_desc *desc = host->sg_cpu;
+ struct idmac_desc *desc_first, *desc_last, *desc;
- for (i = 0; i < sg_len; i++, desc++) {
+ desc_first = desc_last = desc = host->sg_cpu;
+
+ for (i = 0; i < sg_len; i++) {
unsigned int length = sg_dma_len(&data->sg[i]);
+
u32 mem_addr = sg_dma_address(&data->sg[i]);
- /*
- * Set the OWN bit and disable interrupts for this
- * descriptor
- */
- desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
- IDMAC_DES0_DIC | IDMAC_DES0_CH);
- /* Buffer length */
- IDMAC_SET_BUFFER1_SIZE(desc, length);
+ for ( ; length ; desc++) {
+ desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
+ length : DW_MCI_DESC_DATA_LENGTH;
+
+ length -= desc_len;
+
+ /*
+ * Set the OWN bit and disable interrupts
+ * for this descriptor
+ */
+ desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
+ IDMAC_DES0_DIC |
+ IDMAC_DES0_CH);
- /* Physical address to DMA to/from */
- desc->des2 = cpu_to_le32(mem_addr);
+ /* Buffer length */
+ IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
+
+ /* Physical address to DMA to/from */
+ desc->des2 = cpu_to_le32(mem_addr);
+
+ /* Update physical address for the next desc */
+ mem_addr += desc_len;
+
+ /* Save pointer to the last descriptor */
+ desc_last = desc;
+ }
}
/* Set first descriptor */
- desc = host->sg_cpu;
- desc->des0 |= cpu_to_le32(IDMAC_DES0_FD);
+ desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
/* Set last descriptor */
- desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
- desc->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | IDMAC_DES0_DIC));
- desc->des0 |= cpu_to_le32(IDMAC_DES0_LD);
+ desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
+ IDMAC_DES0_DIC));
+ desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
}
- wmb();
+ wmb(); /* drain writebuffer */
}
static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
@@ -542,6 +579,7 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
temp |= SDMMC_CTRL_USE_IDMAC;
mci_writel(host, CTRL, temp);
+ /* drain writebuffer */
wmb();
/* Enable the IDMAC */
@@ -589,7 +627,9 @@ static int dw_mci_idmac_init(struct dw_mci *host)
host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
/* Forward link the descriptor list */
- for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) {
+ for (i = 0, p = host->sg_cpu;
+ i < host->ring_size - 1;
+ i++, p++) {
p->des3 = cpu_to_le32(host->sg_dma +
(sizeof(struct idmac_desc) * (i + 1)));
p->des1 = 0;
@@ -718,7 +758,7 @@ static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
u32 fifo_width = 1 << host->data_shift;
u32 blksz_depth = blksz / fifo_width, fifoth_val;
u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
- int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
+ int idx = ARRAY_SIZE(mszs) - 1;
tx_wmark = (host->fifo_depth) / 2;
tx_wmark_invers = host->fifo_depth - tx_wmark;
@@ -843,6 +883,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
{
unsigned long irqflags;
+ int flags = SG_MITER_ATOMIC;
u32 temp;
data->error = -EINPROGRESS;
@@ -859,7 +900,6 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
}
if (dw_mci_submit_data_dma(host, data)) {
- int flags = SG_MITER_ATOMIC;
if (host->data->flags & MMC_DATA_READ)
flags |= SG_MITER_TO_SG;
else
@@ -906,7 +946,7 @@ static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
unsigned int cmd_status = 0;
mci_writel(host, CMDARG, arg);
- wmb();
+ wmb(); /* drain writebuffer */
dw_mci_wait_while_busy(host, cmd);
mci_writel(host, CMD, SDMMC_CMD_START | cmd);
@@ -1019,7 +1059,7 @@ static void __dw_mci_start_request(struct dw_mci *host,
if (data) {
dw_mci_submit_data(host, data);
- wmb();
+ wmb(); /* drain writebuffer */
}
dw_mci_start_command(host, cmd, cmdflags);
@@ -1384,14 +1424,15 @@ static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
const struct dw_mci_drv_data *drv_data = host->drv_data;
- int err = -ENOSYS;
+ int err = -EINVAL;
if (drv_data && drv_data->execute_tuning)
err = drv_data->execute_tuning(slot);
return err;
}
-static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
+ struct mmc_ios *ios)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
@@ -1533,6 +1574,20 @@ static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
return data->error;
}
+static void dw_mci_set_drto(struct dw_mci *host)
+{
+ unsigned int drto_clks;
+ unsigned int drto_ms;
+
+ drto_clks = mci_readl(host, TMOUT) >> 8;
+ drto_ms = DIV_ROUND_UP(drto_clks, host->bus_hz / 1000);
+
+ /* add a bit spare time */
+ drto_ms += 10;
+
+ mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms));
+}
+
static void dw_mci_tasklet_func(unsigned long priv)
{
struct dw_mci *host = (struct dw_mci *)priv;
@@ -1610,8 +1665,16 @@ static void dw_mci_tasklet_func(unsigned long priv)
}
if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
- &host->pending_events))
+ &host->pending_events)) {
+ /*
+ * If all data-related interrupts don't come
+ * within the given time in reading data state.
+ */
+ if ((host->quirks & DW_MCI_QUIRK_BROKEN_DTO) &&
+ (host->dir_status == DW_MCI_RECV_STATUS))
+ dw_mci_set_drto(host);
break;
+ }
set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
@@ -1644,8 +1707,17 @@ static void dw_mci_tasklet_func(unsigned long priv)
case STATE_DATA_BUSY:
if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
- &host->pending_events))
+ &host->pending_events)) {
+ /*
+ * If data error interrupt comes but data over
+ * interrupt doesn't come within the given time.
+ * in reading data state.
+ */
+ if ((host->quirks & DW_MCI_QUIRK_BROKEN_DTO) &&
+ (host->dir_status == DW_MCI_RECV_STATUS))
+ dw_mci_set_drto(host);
break;
+ }
host->data = NULL;
set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
@@ -1743,7 +1815,7 @@ static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
/* pull first bytes from part_buf, only use during pull */
static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
{
- cnt = min(cnt, (int)host->part_buf_count);
+ cnt = min_t(int, cnt, host->part_buf_count);
if (cnt) {
memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
cnt);
@@ -1769,6 +1841,7 @@ static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
/* try and push anything in the part_buf */
if (unlikely(host->part_buf_count)) {
int len = dw_mci_push_part_bytes(host, buf, cnt);
+
buf += len;
cnt -= len;
if (host->part_buf_count == 2) {
@@ -1795,6 +1868,7 @@ static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
#endif
{
u16 *pdata = buf;
+
for (; cnt >= 2; cnt -= 2)
mci_fifo_writew(host->fifo_reg, *pdata++);
buf = pdata;
@@ -1819,6 +1893,7 @@ static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
int len = min(cnt & -2, (int)sizeof(aligned_buf));
int items = len >> 1;
int i;
+
for (i = 0; i < items; ++i)
aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
/* memcpy from aligned buffer into output buffer */
@@ -1830,6 +1905,7 @@ static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
#endif
{
u16 *pdata = buf;
+
for (; cnt >= 2; cnt -= 2)
*pdata++ = mci_fifo_readw(host->fifo_reg);
buf = pdata;
@@ -1848,6 +1924,7 @@ static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
/* try and push anything in the part_buf */
if (unlikely(host->part_buf_count)) {
int len = dw_mci_push_part_bytes(host, buf, cnt);
+
buf += len;
cnt -= len;
if (host->part_buf_count == 4) {
@@ -1874,6 +1951,7 @@ static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
#endif
{
u32 *pdata = buf;
+
for (; cnt >= 4; cnt -= 4)
mci_fifo_writel(host->fifo_reg, *pdata++);
buf = pdata;
@@ -1898,6 +1976,7 @@ static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
int len = min(cnt & -4, (int)sizeof(aligned_buf));
int items = len >> 2;
int i;
+
for (i = 0; i < items; ++i)
aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
/* memcpy from aligned buffer into output buffer */
@@ -1909,6 +1988,7 @@ static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
#endif
{
u32 *pdata = buf;
+
for (; cnt >= 4; cnt -= 4)
*pdata++ = mci_fifo_readl(host->fifo_reg);
buf = pdata;
@@ -1927,6 +2007,7 @@ static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
/* try and push anything in the part_buf */
if (unlikely(host->part_buf_count)) {
int len = dw_mci_push_part_bytes(host, buf, cnt);
+
buf += len;
cnt -= len;
@@ -1954,6 +2035,7 @@ static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
#endif
{
u64 *pdata = buf;
+
for (; cnt >= 8; cnt -= 8)
mci_fifo_writeq(host->fifo_reg, *pdata++);
buf = pdata;
@@ -1978,6 +2060,7 @@ static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
int len = min(cnt & -8, (int)sizeof(aligned_buf));
int items = len >> 3;
int i;
+
for (i = 0; i < items; ++i)
aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
@@ -1990,6 +2073,7 @@ static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
#endif
{
u64 *pdata = buf;
+
for (; cnt >= 8; cnt -= 8)
*pdata++ = mci_fifo_readq(host->fifo_reg);
buf = pdata;
@@ -2065,7 +2149,7 @@ static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
done:
sg_miter_stop(sg_miter);
host->sg = NULL;
- smp_wmb();
+ smp_wmb(); /* drain writebuffer */
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
@@ -2119,7 +2203,7 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
done:
sg_miter_stop(sg_miter);
host->sg = NULL;
- smp_wmb();
+ smp_wmb(); /* drain writebuffer */
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
@@ -2128,7 +2212,7 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
if (!host->cmd_status)
host->cmd_status = status;
- smp_wmb();
+ smp_wmb(); /* drain writebuffer */
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
@@ -2192,7 +2276,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & DW_MCI_CMD_ERROR_FLAGS) {
mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
host->cmd_status = pending;
- smp_wmb();
+ smp_wmb(); /* drain writebuffer */
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
}
@@ -2200,16 +2284,19 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
/* if there is an error report DATA_ERROR */
mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
host->data_status = pending;
- smp_wmb();
+ smp_wmb(); /* drain writebuffer */
set_bit(EVENT_DATA_ERROR, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
if (pending & SDMMC_INT_DATA_OVER) {
+ if (host->quirks & DW_MCI_QUIRK_BROKEN_DTO)
+ del_timer(&host->dto_timer);
+
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
if (!host->data_status)
host->data_status = pending;
- smp_wmb();
+ smp_wmb(); /* drain writebuffer */
if (host->dir_status == DW_MCI_RECV_STATUS) {
if (host->sg != NULL)
dw_mci_read_data_pio(host, true);
@@ -2383,27 +2470,20 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
if (ret)
goto err_host_allocated;
- if (host->pdata->blk_settings) {
- mmc->max_segs = host->pdata->blk_settings->max_segs;
- mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
- mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
- mmc->max_req_size = host->pdata->blk_settings->max_req_size;
- mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
- } else {
- /* Useful defaults if platform data is unset. */
-#ifdef CONFIG_MMC_DW_IDMAC
+ /* Useful defaults if platform data is unset. */
+ if (host->use_dma) {
mmc->max_segs = host->ring_size;
mmc->max_blk_size = 65536;
mmc->max_seg_size = 0x1000;
mmc->max_req_size = mmc->max_seg_size * host->ring_size;
mmc->max_blk_count = mmc->max_req_size / 512;
-#else
+ } else {
mmc->max_segs = 64;
mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
mmc->max_blk_count = 512;
- mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_req_size = mmc->max_blk_size *
+ mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
-#endif /* CONFIG_MMC_DW_IDMAC */
}
if (dw_mci_get_cd(mmc))
@@ -2473,8 +2553,8 @@ static void dw_mci_init_dma(struct dw_mci *host)
if (host->dma_ops->init && host->dma_ops->start &&
host->dma_ops->stop && host->dma_ops->cleanup) {
if (host->dma_ops->init(host)) {
- dev_err(host->dev, "%s: Unable to initialize "
- "DMA Controller.\n", __func__);
+ dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
+ __func__);
goto no_dma;
}
} else {
@@ -2488,7 +2568,6 @@ static void dw_mci_init_dma(struct dw_mci *host)
no_dma:
dev_info(host->dev, "Using PIO mode.\n");
host->use_dma = 0;
- return;
}
static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
@@ -2542,6 +2621,7 @@ static bool dw_mci_reset(struct dw_mci *host)
if (host->use_dma) {
unsigned long timeout = jiffies + msecs_to_jiffies(500);
u32 status;
+
do {
status = mci_readl(host, STATUS);
if (!(status & SDMMC_STATUS_DMA_REQ))
@@ -2551,8 +2631,8 @@ static bool dw_mci_reset(struct dw_mci *host)
if (status & SDMMC_STATUS_DMA_REQ) {
dev_err(host->dev,
- "%s: Timeout waiting for dma_req to "
- "clear during reset\n", __func__);
+ "%s: Timeout waiting for dma_req to clear during reset\n",
+ __func__);
goto ciu_out;
}
@@ -2563,8 +2643,8 @@ static bool dw_mci_reset(struct dw_mci *host)
} else {
/* if the controller reset bit did clear, then set clock regs */
if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
- dev_err(host->dev, "%s: fifo/dma reset bits didn't "
- "clear but ciu was reset, doing clock update\n",
+ dev_err(host->dev,
+ "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
__func__);
goto ciu_out;
}
@@ -2598,6 +2678,28 @@ static void dw_mci_cmd11_timer(unsigned long arg)
tasklet_schedule(&host->tasklet);
}
+static void dw_mci_dto_timer(unsigned long arg)
+{
+ struct dw_mci *host = (struct dw_mci *)arg;
+
+ switch (host->state) {
+ case STATE_SENDING_DATA:
+ case STATE_DATA_BUSY:
+ /*
+ * If DTO interrupt does NOT come in sending data state,
+ * we should notify the driver to terminate current transfer
+ * and report a data timeout to the core.
+ */
+ host->data_status = SDMMC_INT_DRTO;
+ set_bit(EVENT_DATA_ERROR, &host->pending_events);
+ set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+ break;
+ default:
+ break;
+ }
+}
+
#ifdef CONFIG_OF
static struct dw_mci_of_quirks {
char *quirk;
@@ -2625,8 +2727,8 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
/* find out number of slots supported */
if (of_property_read_u32(dev->of_node, "num-slots",
&pdata->num_slots)) {
- dev_info(dev, "num-slots property not found, "
- "assuming 1 slot is available\n");
+ dev_info(dev,
+ "num-slots property not found, assuming 1 slot is available\n");
pdata->num_slots = 1;
}
@@ -2636,8 +2738,8 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
pdata->quirks |= of_quirks[idx].id;
if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
- dev_info(dev, "fifo-depth property not found, using "
- "value of FIFOTH register as default\n");
+ dev_info(dev,
+ "fifo-depth property not found, using value of FIFOTH register as default\n");
of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
@@ -2650,8 +2752,10 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
return ERR_PTR(ret);
}
- if (of_find_property(np, "supports-highspeed", NULL))
+ if (of_find_property(np, "supports-highspeed", NULL)) {
+ dev_info(dev, "supports-highspeed property is deprecated.\n");
pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
+ }
return pdata;
}
@@ -2706,7 +2810,7 @@ int dw_mci_probe(struct dw_mci *host)
}
}
- if (host->pdata->num_slots > 1) {
+ if (host->pdata->num_slots < 1) {
dev_err(host->dev,
"Platform data must supply num_slots.\n");
return -ENODEV;
@@ -2774,6 +2878,10 @@ int dw_mci_probe(struct dw_mci *host)
host->quirks = host->pdata->quirks;
+ if (host->quirks & DW_MCI_QUIRK_BROKEN_DTO)
+ setup_timer(&host->dto_timer,
+ dw_mci_dto_timer, (unsigned long)host);
+
spin_lock_init(&host->lock);
spin_lock_init(&host->irq_lock);
INIT_LIST_HEAD(&host->queue);
@@ -2874,11 +2982,11 @@ int dw_mci_probe(struct dw_mci *host)
mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
SDMMC_INT_TXDR | SDMMC_INT_RXDR |
DW_MCI_ERROR_FLAGS);
- mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
+ /* Enable mci interrupt */
+ mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
- dev_info(host->dev, "DW MMC controller at irq %d, "
- "%d bit host data width, "
- "%u deep fifo\n",
+ dev_info(host->dev,
+ "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
host->irq, width, fifo_size);
/* We need at least one slot to succeed */
@@ -2893,8 +3001,9 @@ int dw_mci_probe(struct dw_mci *host)
if (init_slots) {
dev_info(host->dev, "%d slots initialized\n", init_slots);
} else {
- dev_dbg(host->dev, "attempted to initialize %d slots, "
- "but failed on all\n", host->num_slots);
+ dev_dbg(host->dev,
+ "attempted to initialize %d slots, but failed on all\n",
+ host->num_slots);
goto err_dmaunmap;
}
@@ -2992,6 +3101,7 @@ int dw_mci_resume(struct dw_mci *host)
for (i = 0; i < host->num_slots; i++) {
struct dw_mci_slot *slot = host->slot[i];
+
if (!slot)
continue;
if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 68dd6c79c378..b763b11ed9e1 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -948,6 +948,7 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
{
struct mmc_data *data = req->data;
int i, use_dma = 1, block_size;
+ struct scatterlist *sg;
unsigned sg_len;
host->data = data;
@@ -972,8 +973,8 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
sg_len = (data->blocks == 1) ? 1 : data->sg_len;
/* Only do DMA for entire blocks */
- for (i = 0; i < sg_len; i++) {
- if ((data->sg[i].length % block_size) != 0) {
+ for_each_sg(data->sg, sg, sg_len, i) {
+ if ((sg->length % block_size) != 0) {
use_dma = 0;
break;
}
@@ -1419,8 +1420,10 @@ static int mmc_omap_probe(struct platform_device *pdev)
host->reg_shift = (mmc_omap7xx() ? 1 : 2);
host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
- if (!host->mmc_omap_wq)
+ if (!host->mmc_omap_wq) {
+ ret = -ENOMEM;
goto err_plat_cleanup;
+ }
for (i = 0; i < pdata->nr_slots; i++) {
ret = mmc_omap_new_slot(host, i);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index b2b411da297b..781e4db31767 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -181,18 +181,9 @@ struct omap_hsmmc_host {
struct mmc_data *data;
struct clk *fclk;
struct clk *dbclk;
- /*
- * vcc == configured supply
- * vcc_aux == optional
- * - MMC1, supply for DAT4..DAT7
- * - MMC2/MMC2, external level shifter voltage supply, for
- * chip (SDIO, eMMC, etc) or transceiver (MMC2 only)
- */
- struct regulator *vcc;
- struct regulator *vcc_aux;
struct regulator *pbias;
- bool pbias_enabled;
void __iomem *base;
+ int vqmmc_enabled;
resource_size_t mapbase;
spinlock_t irq_lock; /* Prevent races with irq handler */
unsigned int dma_len;
@@ -213,7 +204,6 @@ struct omap_hsmmc_host {
int context_loss;
int protect_card;
int reqs_blocked;
- int use_reg;
int req_in_progress;
unsigned long clk_rate;
unsigned int flags;
@@ -254,32 +244,133 @@ static int omap_hsmmc_get_cover_state(struct device *dev)
return mmc_gpio_get_cd(host->mmc);
}
-#ifdef CONFIG_REGULATOR
+static int omap_hsmmc_enable_supply(struct mmc_host *mmc)
+{
+ int ret;
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+ struct mmc_ios *ios = &mmc->ios;
+
+ if (mmc->supply.vmmc) {
+ ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+ if (ret)
+ return ret;
+ }
+
+ /* Enable interface voltage rail, if needed */
+ if (mmc->supply.vqmmc && !host->vqmmc_enabled) {
+ ret = regulator_enable(mmc->supply.vqmmc);
+ if (ret) {
+ dev_err(mmc_dev(mmc), "vmmc_aux reg enable failed\n");
+ goto err_vqmmc;
+ }
+ host->vqmmc_enabled = 1;
+ }
+
+ return 0;
+
+err_vqmmc:
+ if (mmc->supply.vmmc)
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+
+ return ret;
+}
+
+static int omap_hsmmc_disable_supply(struct mmc_host *mmc)
+{
+ int ret;
+ int status;
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+
+ if (mmc->supply.vqmmc && host->vqmmc_enabled) {
+ ret = regulator_disable(mmc->supply.vqmmc);
+ if (ret) {
+ dev_err(mmc_dev(mmc), "vmmc_aux reg disable failed\n");
+ return ret;
+ }
+ host->vqmmc_enabled = 0;
+ }
+
+ if (mmc->supply.vmmc) {
+ ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ if (ret)
+ goto err_set_ocr;
+ }
+
+ return 0;
+
+err_set_ocr:
+ if (mmc->supply.vqmmc) {
+ status = regulator_enable(mmc->supply.vqmmc);
+ if (status)
+ dev_err(mmc_dev(mmc), "vmmc_aux re-enable failed\n");
+ }
+
+ return ret;
+}
+
+static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
+ int vdd)
+{
+ int ret;
+
+ if (!host->pbias)
+ return 0;
+
+ if (power_on) {
+ if (vdd <= VDD_165_195)
+ ret = regulator_set_voltage(host->pbias, VDD_1V8,
+ VDD_1V8);
+ else
+ ret = regulator_set_voltage(host->pbias, VDD_3V0,
+ VDD_3V0);
+ if (ret < 0) {
+ dev_err(host->dev, "pbias set voltage fail\n");
+ return ret;
+ }
+
+ if (!regulator_is_enabled(host->pbias)) {
+ ret = regulator_enable(host->pbias);
+ if (ret) {
+ dev_err(host->dev, "pbias reg enable fail\n");
+ return ret;
+ }
+ }
+ } else {
+ if (regulator_is_enabled(host->pbias)) {
+ ret = regulator_disable(host->pbias);
+ if (ret) {
+ dev_err(host->dev, "pbias reg disable fail\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd)
{
struct omap_hsmmc_host *host =
platform_get_drvdata(to_platform_device(dev));
+ struct mmc_host *mmc = host->mmc;
int ret = 0;
+ if (mmc_pdata(host)->set_power)
+ return mmc_pdata(host)->set_power(dev, power_on, vdd);
+
/*
* If we don't see a Vcc regulator, assume it's a fixed
* voltage always-on regulator.
*/
- if (!host->vcc)
+ if (!mmc->supply.vmmc)
return 0;
if (mmc_pdata(host)->before_set_reg)
mmc_pdata(host)->before_set_reg(dev, power_on, vdd);
- if (host->pbias) {
- if (host->pbias_enabled == 1) {
- ret = regulator_disable(host->pbias);
- if (!ret)
- host->pbias_enabled = 0;
- }
- regulator_set_voltage(host->pbias, VDD_3V0, VDD_3V0);
- }
+ ret = omap_hsmmc_set_pbias(host, false, 0);
+ if (ret)
+ return ret;
/*
* Assume Vcc regulator is used only to power the card ... OMAP
@@ -295,129 +386,138 @@ static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd)
* chips/cards need an interface voltage rail too.
*/
if (power_on) {
- if (host->vcc)
- ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
- /* Enable interface voltage rail, if needed */
- if (ret == 0 && host->vcc_aux) {
- ret = regulator_enable(host->vcc_aux);
- if (ret < 0 && host->vcc)
- ret = mmc_regulator_set_ocr(host->mmc,
- host->vcc, 0);
- }
- } else {
- /* Shut down the rail */
- if (host->vcc_aux)
- ret = regulator_disable(host->vcc_aux);
- if (host->vcc) {
- /* Then proceed to shut down the local regulator */
- ret = mmc_regulator_set_ocr(host->mmc,
- host->vcc, 0);
- }
- }
-
- if (host->pbias) {
- if (vdd <= VDD_165_195)
- ret = regulator_set_voltage(host->pbias, VDD_1V8,
- VDD_1V8);
- else
- ret = regulator_set_voltage(host->pbias, VDD_3V0,
- VDD_3V0);
- if (ret < 0)
- goto error_set_power;
+ ret = omap_hsmmc_enable_supply(mmc);
+ if (ret)
+ return ret;
- if (host->pbias_enabled == 0) {
- ret = regulator_enable(host->pbias);
- if (!ret)
- host->pbias_enabled = 1;
- }
+ ret = omap_hsmmc_set_pbias(host, true, vdd);
+ if (ret)
+ goto err_set_voltage;
+ } else {
+ ret = omap_hsmmc_disable_supply(mmc);
+ if (ret)
+ return ret;
}
if (mmc_pdata(host)->after_set_reg)
mmc_pdata(host)->after_set_reg(dev, power_on, vdd);
-error_set_power:
+ return 0;
+
+err_set_voltage:
+ omap_hsmmc_disable_supply(mmc);
+
return ret;
}
-static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
+static int omap_hsmmc_disable_boot_regulator(struct regulator *reg)
{
- struct regulator *reg;
- int ocr_value = 0;
+ int ret;
- reg = devm_regulator_get(host->dev, "vmmc");
- if (IS_ERR(reg)) {
- dev_err(host->dev, "unable to get vmmc regulator %ld\n",
- PTR_ERR(reg));
- return PTR_ERR(reg);
- } else {
- host->vcc = reg;
- ocr_value = mmc_regulator_get_ocrmask(reg);
- if (!mmc_pdata(host)->ocr_mask) {
- mmc_pdata(host)->ocr_mask = ocr_value;
- } else {
- if (!(mmc_pdata(host)->ocr_mask & ocr_value)) {
- dev_err(host->dev, "ocrmask %x is not supported\n",
- mmc_pdata(host)->ocr_mask);
- mmc_pdata(host)->ocr_mask = 0;
- return -EINVAL;
- }
- }
+ if (!reg)
+ return 0;
+
+ if (regulator_is_enabled(reg)) {
+ ret = regulator_enable(reg);
+ if (ret)
+ return ret;
+
+ ret = regulator_disable(reg);
+ if (ret)
+ return ret;
}
- mmc_pdata(host)->set_power = omap_hsmmc_set_power;
- /* Allow an aux regulator */
- reg = devm_regulator_get_optional(host->dev, "vmmc_aux");
- host->vcc_aux = IS_ERR(reg) ? NULL : reg;
+ return 0;
+}
- reg = devm_regulator_get_optional(host->dev, "pbias");
- host->pbias = IS_ERR(reg) ? NULL : reg;
+static int omap_hsmmc_disable_boot_regulators(struct omap_hsmmc_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ int ret;
- /* For eMMC do not power off when not in sleep state */
- if (mmc_pdata(host)->no_regulator_off_init)
- return 0;
/*
- * To disable boot_on regulator, enable regulator
- * to increase usecount and then disable it.
+ * disable regulators enabled during boot and get the usecount
+ * right so that regulators can be enabled/disabled by checking
+ * the return value of regulator_is_enabled
*/
- if ((host->vcc && regulator_is_enabled(host->vcc) > 0) ||
- (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) {
- int vdd = ffs(mmc_pdata(host)->ocr_mask) - 1;
+ ret = omap_hsmmc_disable_boot_regulator(mmc->supply.vmmc);
+ if (ret) {
+ dev_err(host->dev, "fail to disable boot enabled vmmc reg\n");
+ return ret;
+ }
+
+ ret = omap_hsmmc_disable_boot_regulator(mmc->supply.vqmmc);
+ if (ret) {
+ dev_err(host->dev,
+ "fail to disable boot enabled vmmc_aux reg\n");
+ return ret;
+ }
- mmc_pdata(host)->set_power(host->dev, 1, vdd);
- mmc_pdata(host)->set_power(host->dev, 0, 0);
+ ret = omap_hsmmc_disable_boot_regulator(host->pbias);
+ if (ret) {
+ dev_err(host->dev,
+ "failed to disable boot enabled pbias reg\n");
+ return ret;
}
return 0;
}
-static void omap_hsmmc_reg_put(struct omap_hsmmc_host *host)
+static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
{
- mmc_pdata(host)->set_power = NULL;
-}
+ int ocr_value = 0;
+ int ret;
+ struct mmc_host *mmc = host->mmc;
-static inline int omap_hsmmc_have_reg(void)
-{
- return 1;
-}
+ if (mmc_pdata(host)->set_power)
+ return 0;
-#else
+ mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc");
+ if (IS_ERR(mmc->supply.vmmc)) {
+ ret = PTR_ERR(mmc->supply.vmmc);
+ if (ret != -ENODEV)
+ return ret;
+ dev_dbg(host->dev, "unable to get vmmc regulator %ld\n",
+ PTR_ERR(mmc->supply.vmmc));
+ mmc->supply.vmmc = NULL;
+ } else {
+ ocr_value = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
+ if (ocr_value > 0)
+ mmc_pdata(host)->ocr_mask = ocr_value;
+ }
-static inline int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
-{
- return -EINVAL;
-}
+ /* Allow an aux regulator */
+ mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux");
+ if (IS_ERR(mmc->supply.vqmmc)) {
+ ret = PTR_ERR(mmc->supply.vqmmc);
+ if (ret != -ENODEV)
+ return ret;
+ dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n",
+ PTR_ERR(mmc->supply.vqmmc));
+ mmc->supply.vqmmc = NULL;
+ }
-static inline void omap_hsmmc_reg_put(struct omap_hsmmc_host *host)
-{
-}
+ host->pbias = devm_regulator_get_optional(host->dev, "pbias");
+ if (IS_ERR(host->pbias)) {
+ ret = PTR_ERR(host->pbias);
+ if (ret != -ENODEV)
+ return ret;
+ dev_dbg(host->dev, "unable to get pbias regulator %ld\n",
+ PTR_ERR(host->pbias));
+ host->pbias = NULL;
+ }
+
+ /* For eMMC do not power off when not in sleep state */
+ if (mmc_pdata(host)->no_regulator_off_init)
+ return 0;
+
+ ret = omap_hsmmc_disable_boot_regulators(host);
+ if (ret)
+ return ret;
-static inline int omap_hsmmc_have_reg(void)
-{
return 0;
}
-#endif
-
static irqreturn_t omap_hsmmc_cover_irq(int irq, void *dev_id);
static int omap_hsmmc_gpio_init(struct mmc_host *mmc,
@@ -1062,9 +1162,14 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
if (status & (CTO_EN | CCRC_EN))
end_cmd = 1;
+ if (host->data || host->response_busy) {
+ end_trans = !end_cmd;
+ host->response_busy = 0;
+ }
if (status & (CTO_EN | DTO_EN))
hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
- else if (status & (CCRC_EN | DCRC_EN))
+ else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN |
+ BADA_EN))
hsmmc_command_incomplete(host, -EILSEQ, end_cmd);
if (status & ACE_EN) {
@@ -1081,10 +1186,6 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
}
dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
}
- if (host->data || host->response_busy) {
- end_trans = !end_cmd;
- host->response_busy = 0;
- }
}
OMAP_HSMMC_WRITE(host->base, STAT, status);
@@ -1148,11 +1249,11 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
clk_disable_unprepare(host->dbclk);
/* Turn the power off */
- ret = mmc_pdata(host)->set_power(host->dev, 0, 0);
+ ret = omap_hsmmc_set_power(host->dev, 0, 0);
/* Turn the power ON with given VDD 1.8 or 3.0v */
if (!ret)
- ret = mmc_pdata(host)->set_power(host->dev, 1, vdd);
+ ret = omap_hsmmc_set_power(host->dev, 1, vdd);
pm_runtime_get_sync(host->dev);
if (host->dbclk)
clk_prepare_enable(host->dbclk);
@@ -1551,10 +1652,10 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->power_mode != host->power_mode) {
switch (ios->power_mode) {
case MMC_POWER_OFF:
- mmc_pdata(host)->set_power(host->dev, 0, 0);
+ omap_hsmmc_set_power(host->dev, 0, 0);
break;
case MMC_POWER_UP:
- mmc_pdata(host)->set_power(host->dev, 1, ios->vdd);
+ omap_hsmmc_set_power(host->dev, 1, ios->vdd);
break;
case MMC_POWER_ON:
do_send_init_stream = 1;
@@ -1952,7 +2053,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
host->base = base + pdata->reg_offset;
host->power_mode = MMC_POWER_OFF;
host->next_data.cookie = 1;
- host->pbias_enabled = 0;
+ host->vqmmc_enabled = 0;
ret = omap_hsmmc_gpio_init(mmc, host, pdata);
if (ret)
@@ -2077,12 +2178,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
goto err_irq;
}
- if (omap_hsmmc_have_reg() && !mmc_pdata(host)->set_power) {
- ret = omap_hsmmc_reg_get(host);
- if (ret)
- goto err_irq;
- host->use_reg = 1;
- }
+ ret = omap_hsmmc_reg_get(host);
+ if (ret)
+ goto err_irq;
mmc->ocr_avail = mmc_pdata(host)->ocr_mask;
@@ -2124,8 +2222,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
err_slot_name:
mmc_remove_host(mmc);
- if (host->use_reg)
- omap_hsmmc_reg_put(host);
err_irq:
device_init_wakeup(&pdev->dev, false);
if (host->tx_chan)
@@ -2149,8 +2245,6 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
pm_runtime_get_sync(host->dev);
mmc_remove_host(host->mmc);
- if (host->use_reg)
- omap_hsmmc_reg_put(host);
if (host->tx_chan)
dma_release_channel(host->tx_chan);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 1b6d0bfe35f5..1420f29628c7 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -22,7 +22,9 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/dma/pxa-dma.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/mmc/host.h>
@@ -37,7 +39,6 @@
#include <asm/sizes.h>
#include <mach/hardware.h>
-#include <mach/dma.h>
#include <linux/platform_data/mmc-pxamci.h>
#include "pxamci.h"
@@ -58,7 +59,6 @@ struct pxamci_host {
struct clk *clk;
unsigned long clkrate;
int irq;
- int dma;
unsigned int clkrt;
unsigned int cmdat;
unsigned int imask;
@@ -69,8 +69,10 @@ struct pxamci_host {
struct mmc_command *cmd;
struct mmc_data *data;
+ struct dma_chan *dma_chan_rx;
+ struct dma_chan *dma_chan_tx;
+ dma_cookie_t dma_cookie;
dma_addr_t sg_dma;
- struct pxa_dma_desc *sg_cpu;
unsigned int dma_len;
unsigned int dma_dir;
@@ -173,14 +175,18 @@ static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
spin_unlock_irqrestore(&host->lock, flags);
}
+static void pxamci_dma_irq(void *param);
+
static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
{
+ struct dma_async_tx_descriptor *tx;
+ enum dma_data_direction direction;
+ struct dma_slave_config config;
+ struct dma_chan *chan;
unsigned int nob = data->blocks;
unsigned long long clks;
unsigned int timeout;
- bool dalgn = 0;
- u32 dcmd;
- int i;
+ int ret;
host->data = data;
@@ -195,54 +201,48 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt);
writel((timeout + 255) / 256, host->base + MMC_RDTO);
+ memset(&config, 0, sizeof(config));
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ config.src_addr = host->res->start + MMC_RXFIFO;
+ config.dst_addr = host->res->start + MMC_TXFIFO;
+ config.src_maxburst = 32;
+ config.dst_maxburst = 32;
+
if (data->flags & MMC_DATA_READ) {
host->dma_dir = DMA_FROM_DEVICE;
- dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
- DRCMR(host->dma_drcmrtx) = 0;
- DRCMR(host->dma_drcmrrx) = host->dma | DRCMR_MAPVLD;
+ direction = DMA_DEV_TO_MEM;
+ chan = host->dma_chan_rx;
} else {
host->dma_dir = DMA_TO_DEVICE;
- dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
- DRCMR(host->dma_drcmrrx) = 0;
- DRCMR(host->dma_drcmrtx) = host->dma | DRCMR_MAPVLD;
+ direction = DMA_MEM_TO_DEV;
+ chan = host->dma_chan_tx;
}
- dcmd |= DCMD_BURST32 | DCMD_WIDTH1;
+ config.direction = direction;
+
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret < 0) {
+ dev_err(mmc_dev(host->mmc), "dma slave config failed\n");
+ return;
+ }
- host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ host->dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
host->dma_dir);
- for (i = 0; i < host->dma_len; i++) {
- unsigned int length = sg_dma_len(&data->sg[i]);
- host->sg_cpu[i].dcmd = dcmd | length;
- if (length & 31 && !(data->flags & MMC_DATA_READ))
- host->sg_cpu[i].dcmd |= DCMD_ENDIRQEN;
- /* Not aligned to 8-byte boundary? */
- if (sg_dma_address(&data->sg[i]) & 0x7)
- dalgn = 1;
- if (data->flags & MMC_DATA_READ) {
- host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO;
- host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]);
- } else {
- host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]);
- host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO;
- }
- host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) *
- sizeof(struct pxa_dma_desc);
+ tx = dmaengine_prep_slave_sg(chan, data->sg, host->dma_len, direction,
+ DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
+ return;
}
- host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP;
- wmb();
- /*
- * The PXA27x DMA controller encounters overhead when working with
- * unaligned (to 8-byte boundaries) data, so switch on byte alignment
- * mode only if we have unaligned data.
- */
- if (dalgn)
- DALGN |= (1 << host->dma);
- else
- DALGN &= ~(1 << host->dma);
- DDADR(host->dma) = host->sg_dma;
+ if (!(data->flags & MMC_DATA_READ)) {
+ tx->callback = pxamci_dma_irq;
+ tx->callback_param = host;
+ }
+
+ host->dma_cookie = dmaengine_submit(tx);
/*
* workaround for erratum #91:
@@ -251,7 +251,7 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
* before starting DMA.
*/
if (!cpu_is_pxa27x() || data->flags & MMC_DATA_READ)
- DCSR(host->dma) = DCSR_RUN;
+ dma_async_issue_pending(chan);
}
static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
@@ -343,7 +343,7 @@ static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
* enable DMA late
*/
if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE)
- DCSR(host->dma) = DCSR_RUN;
+ dma_async_issue_pending(host->dma_chan_tx);
} else {
pxamci_finish_request(host, host->mrq);
}
@@ -354,13 +354,17 @@ static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
{
struct mmc_data *data = host->data;
+ struct dma_chan *chan;
if (!data)
return 0;
- DCSR(host->dma) = 0;
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- host->dma_dir);
+ if (data->flags & MMC_DATA_READ)
+ chan = host->dma_chan_rx;
+ else
+ chan = host->dma_chan_tx;
+ dma_unmap_sg(chan->device->dev,
+ data->sg, data->sg_len, host->dma_dir);
if (stat & STAT_READ_TIME_OUT)
data->error = -ETIMEDOUT;
@@ -552,20 +556,37 @@ static const struct mmc_host_ops pxamci_ops = {
.enable_sdio_irq = pxamci_enable_sdio_irq,
};
-static void pxamci_dma_irq(int dma, void *devid)
+static void pxamci_dma_irq(void *param)
{
- struct pxamci_host *host = devid;
- int dcsr = DCSR(dma);
- DCSR(dma) = dcsr & ~DCSR_STOPIRQEN;
+ struct pxamci_host *host = param;
+ struct dma_tx_state state;
+ enum dma_status status;
+ struct dma_chan *chan;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (!host->data)
+ goto out_unlock;
- if (dcsr & DCSR_ENDINTR) {
+ if (host->data->flags & MMC_DATA_READ)
+ chan = host->dma_chan_rx;
+ else
+ chan = host->dma_chan_tx;
+
+ status = dmaengine_tx_status(chan, host->dma_cookie, &state);
+
+ if (likely(status == DMA_COMPLETE)) {
writel(BUF_PART_FULL, host->base + MMC_PRTBUF);
} else {
- pr_err("%s: DMA error on channel %d (DCSR=%#x)\n",
- mmc_hostname(host->mmc), dma, dcsr);
+ pr_err("%s: DMA error on %s channel\n", mmc_hostname(host->mmc),
+ host->data->flags & MMC_DATA_READ ? "rx" : "tx");
host->data->error = -EIO;
pxamci_data_done(host, 0);
}
+
+out_unlock:
+ spin_unlock_irqrestore(&host->lock, flags);
}
static irqreturn_t pxamci_detect_irq(int irq, void *devid)
@@ -625,7 +646,9 @@ static int pxamci_probe(struct platform_device *pdev)
struct mmc_host *mmc;
struct pxamci_host *host = NULL;
struct resource *r, *dmarx, *dmatx;
+ struct pxad_param param_rx, param_tx;
int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
+ dma_cap_mask_t mask;
ret = pxamci_of_init(pdev);
if (ret)
@@ -671,7 +694,6 @@ static int pxamci_probe(struct platform_device *pdev)
host = mmc_priv(mmc);
host->mmc = mmc;
- host->dma = -1;
host->pdata = pdev->dev.platform_data;
host->clkrt = CLKRT_OFF;
@@ -702,12 +724,6 @@ static int pxamci_probe(struct platform_device *pdev)
MMC_CAP_SD_HIGHSPEED;
}
- host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
- if (!host->sg_cpu) {
- ret = -ENOMEM;
- goto out;
- }
-
spin_lock_init(&host->lock);
host->res = r;
host->irq = irq;
@@ -728,32 +744,45 @@ static int pxamci_probe(struct platform_device *pdev)
writel(64, host->base + MMC_RESTO);
writel(host->imask, host->base + MMC_I_MASK);
- host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW,
- pxamci_dma_irq, host);
- if (host->dma < 0) {
- ret = -EBUSY;
- goto out;
- }
-
ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
if (ret)
goto out;
platform_set_drvdata(pdev, mmc);
- dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!dmarx) {
- ret = -ENXIO;
+ if (!pdev->dev.of_node) {
+ dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!dmarx || !dmatx) {
+ ret = -ENXIO;
+ goto out;
+ }
+ param_rx.prio = PXAD_PRIO_LOWEST;
+ param_rx.drcmr = dmarx->start;
+ param_tx.prio = PXAD_PRIO_LOWEST;
+ param_tx.drcmr = dmatx->start;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ host->dma_chan_rx =
+ dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &param_rx, &pdev->dev, "rx");
+ if (host->dma_chan_rx == NULL) {
+ dev_err(&pdev->dev, "unable to request rx dma channel\n");
+ ret = -ENODEV;
goto out;
}
- host->dma_drcmrrx = dmarx->start;
- dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (!dmatx) {
- ret = -ENXIO;
+ host->dma_chan_tx =
+ dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &param_tx, &pdev->dev, "tx");
+ if (host->dma_chan_tx == NULL) {
+ dev_err(&pdev->dev, "unable to request tx dma channel\n");
+ ret = -ENODEV;
goto out;
}
- host->dma_drcmrtx = dmatx->start;
if (host->pdata) {
gpio_cd = host->pdata->gpio_card_detect;
@@ -814,12 +843,12 @@ err_gpio_ro:
gpio_free(gpio_power);
out:
if (host) {
- if (host->dma >= 0)
- pxa_free_dma(host->dma);
+ if (host->dma_chan_rx)
+ dma_release_channel(host->dma_chan_rx);
+ if (host->dma_chan_tx)
+ dma_release_channel(host->dma_chan_tx);
if (host->base)
iounmap(host->base);
- if (host->sg_cpu)
- dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
if (host->clk)
clk_put(host->clk);
}
@@ -863,13 +892,12 @@ static int pxamci_remove(struct platform_device *pdev)
END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
host->base + MMC_I_MASK);
- DRCMR(host->dma_drcmrrx) = 0;
- DRCMR(host->dma_drcmrtx) = 0;
-
free_irq(host->irq, host);
- pxa_free_dma(host->dma);
+ dmaengine_terminate_all(host->dma_chan_rx);
+ dmaengine_terminate_all(host->dma_chan_tx);
+ dma_release_channel(host->dma_chan_rx);
+ dma_release_channel(host->dma_chan_tx);
iounmap(host->base);
- dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
clk_put(host->clk);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index faf0cb910c96..886d230f41d0 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -32,6 +32,7 @@
#include "sdhci-esdhc.h"
#define ESDHC_CTRL_D3CD 0x08
+#define ESDHC_BURST_LEN_EN_INCR (1 << 27)
/* VENDOR SPEC register */
#define ESDHC_VENDOR_SPEC 0xc0
#define ESDHC_VENDOR_SPEC_SDIO_QUIRK (1 << 1)
@@ -44,6 +45,7 @@
#define ESDHC_MIX_CTRL_EXE_TUNE (1 << 22)
#define ESDHC_MIX_CTRL_SMPCLK_SEL (1 << 23)
#define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25)
+#define ESDHC_MIX_CTRL_HS400_EN (1 << 26)
/* Bits 3 and 6 are not SDHCI standard definitions */
#define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7
/* Tuning bits */
@@ -60,10 +62,21 @@
#define ESDHC_TUNE_CTRL_MIN 0
#define ESDHC_TUNE_CTRL_MAX ((1 << 7) - 1)
+/* strobe dll register */
+#define ESDHC_STROBE_DLL_CTRL 0x70
+#define ESDHC_STROBE_DLL_CTRL_ENABLE (1 << 0)
+#define ESDHC_STROBE_DLL_CTRL_RESET (1 << 1)
+#define ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT 3
+
+#define ESDHC_STROBE_DLL_STATUS 0x74
+#define ESDHC_STROBE_DLL_STS_REF_LOCK (1 << 1)
+#define ESDHC_STROBE_DLL_STS_SLV_LOCK 0x1
+
#define ESDHC_TUNING_CTRL 0xcc
#define ESDHC_STD_TUNING_EN (1 << 24)
/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
#define ESDHC_TUNING_START_TAP 0x1
+#define ESDHC_TUNING_STEP_SHIFT 16
/* pinctrl state */
#define ESDHC_PINCTRL_STATE_100MHZ "state_100mhz"
@@ -120,6 +133,11 @@
#define ESDHC_FLAG_ERR004536 BIT(7)
/* The IP supports HS200 mode */
#define ESDHC_FLAG_HS200 BIT(8)
+/* The IP supports HS400 mode */
+#define ESDHC_FLAG_HS400 BIT(9)
+
+/* A higher clock ferquency than this rate requires strobell dll control */
+#define ESDHC_STROBE_DLL_CLK_FREQ 100000000
struct esdhc_soc_data {
u32 flags;
@@ -156,6 +174,12 @@ static struct esdhc_soc_data usdhc_imx6sx_data = {
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200,
};
+static struct esdhc_soc_data usdhc_imx7d_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ | ESDHC_FLAG_HS400,
+};
+
struct pltfm_imx_data {
u32 scratchpad;
struct pinctrl *pinctrl;
@@ -199,6 +223,7 @@ static const struct of_device_id imx_esdhc_dt_ids[] = {
{ .compatible = "fsl,imx6sx-usdhc", .data = &usdhc_imx6sx_data, },
{ .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, },
{ .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, },
+ { .compatible = "fsl,imx7d-usdhc", .data = &usdhc_imx7d_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids);
@@ -274,6 +299,9 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
val = SDHCI_SUPPORT_DDR50 | SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
| SDHCI_USE_SDR50_TUNING;
+
+ if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
+ val |= SDHCI_SUPPORT_HS400;
}
}
@@ -448,6 +476,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
} else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR);
u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ u32 tuning_ctrl;
if (val & SDHCI_CTRL_TUNED_CLK) {
v |= ESDHC_MIX_CTRL_SMPCLK_SEL;
} else {
@@ -458,6 +487,11 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
if (val & SDHCI_CTRL_EXEC_TUNING) {
v |= ESDHC_MIX_CTRL_EXE_TUNE;
m |= ESDHC_MIX_CTRL_FBCLK_SEL;
+ tuning_ctrl = readl(host->ioaddr + ESDHC_TUNING_CTRL);
+ tuning_ctrl |= ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP;
+ if (imx_data->boarddata.tuning_step)
+ tuning_ctrl |= imx_data->boarddata.tuning_step << ESDHC_TUNING_STEP_SHIFT;
+ writel(tuning_ctrl, host->ioaddr + ESDHC_TUNING_CTRL);
} else {
v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
}
@@ -581,13 +615,8 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
- struct esdhc_platform_data *boarddata = &imx_data->boarddata;
- if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock))
- return boarddata->f_max;
- else
- return pltfm_host->clock;
+ return pltfm_host->clock;
}
static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
@@ -779,6 +808,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
break;
case MMC_TIMING_UHS_SDR104:
case MMC_TIMING_MMC_HS200:
+ case MMC_TIMING_MMC_HS400:
pinctrl = imx_data->pins_200mhz;
break;
default:
@@ -789,24 +819,68 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
return pinctrl_select_state(imx_data->pinctrl, pinctrl);
}
+/*
+ * For HS400 eMMC, there is a data_strobe line, this signal is generated
+ * by the device and used for data output and CRC status response output
+ * in HS400 mode. The frequency of this signal follows the frequency of
+ * CLK generated by host. Host receive the data which is aligned to the
+ * edge of data_strobe line. Due to the time delay between CLK line and
+ * data_strobe line, if the delay time is larger than one clock cycle,
+ * then CLK and data_strobe line will misaligned, read error shows up.
+ * So when the CLK is higher than 100MHz, each clock cycle is short enough,
+ * host should config the delay target.
+ */
+static void esdhc_set_strobe_dll(struct sdhci_host *host)
+{
+ u32 v;
+
+ if (host->mmc->actual_clock > ESDHC_STROBE_DLL_CLK_FREQ) {
+ /* force a reset on strobe dll */
+ writel(ESDHC_STROBE_DLL_CTRL_RESET,
+ host->ioaddr + ESDHC_STROBE_DLL_CTRL);
+ /*
+ * enable strobe dll ctrl and adjust the delay target
+ * for the uSDHC loopback read clock
+ */
+ v = ESDHC_STROBE_DLL_CTRL_ENABLE |
+ (7 << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT);
+ writel(v, host->ioaddr + ESDHC_STROBE_DLL_CTRL);
+ /* wait 1us to make sure strobe dll status register stable */
+ udelay(1);
+ v = readl(host->ioaddr + ESDHC_STROBE_DLL_STATUS);
+ if (!(v & ESDHC_STROBE_DLL_STS_REF_LOCK))
+ dev_warn(mmc_dev(host->mmc),
+ "warning! HS400 strobe DLL status REF not lock!\n");
+ if (!(v & ESDHC_STROBE_DLL_STS_SLV_LOCK))
+ dev_warn(mmc_dev(host->mmc),
+ "warning! HS400 strobe DLL status SLV not lock!\n");
+ }
+}
+
static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
{
+ u32 m;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+ /* disable ddr mode and disable HS400 mode */
+ m = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ m &= ~(ESDHC_MIX_CTRL_DDREN | ESDHC_MIX_CTRL_HS400_EN);
+ imx_data->is_ddr = 0;
+
switch (timing) {
case MMC_TIMING_UHS_SDR12:
case MMC_TIMING_UHS_SDR25:
case MMC_TIMING_UHS_SDR50:
case MMC_TIMING_UHS_SDR104:
case MMC_TIMING_MMC_HS200:
+ writel(m, host->ioaddr + ESDHC_MIX_CTRL);
break;
case MMC_TIMING_UHS_DDR50:
case MMC_TIMING_MMC_DDR52:
- writel(readl(host->ioaddr + ESDHC_MIX_CTRL) |
- ESDHC_MIX_CTRL_DDREN,
- host->ioaddr + ESDHC_MIX_CTRL);
+ m |= ESDHC_MIX_CTRL_DDREN;
+ writel(m, host->ioaddr + ESDHC_MIX_CTRL);
imx_data->is_ddr = 1;
if (boarddata->delay_line) {
u32 v;
@@ -818,6 +892,12 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
writel(v, host->ioaddr + ESDHC_DLL_CTRL);
}
break;
+ case MMC_TIMING_MMC_HS400:
+ m |= ESDHC_MIX_CTRL_DDREN | ESDHC_MIX_CTRL_HS400_EN;
+ writel(m, host->ioaddr + ESDHC_MIX_CTRL);
+ imx_data->is_ddr = 1;
+ esdhc_set_strobe_dll(host);
+ break;
}
esdhc_change_pinstate(host, timing);
@@ -878,33 +958,20 @@ static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
static int
sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
struct sdhci_host *host,
- struct esdhc_platform_data *boarddata)
+ struct pltfm_imx_data *imx_data)
{
struct device_node *np = pdev->dev.of_node;
-
- if (!np)
- return -ENODEV;
-
- if (of_get_property(np, "non-removable", NULL))
- boarddata->cd_type = ESDHC_CD_PERMANENT;
-
- if (of_get_property(np, "fsl,cd-controller", NULL))
- boarddata->cd_type = ESDHC_CD_CONTROLLER;
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+ int ret;
if (of_get_property(np, "fsl,wp-controller", NULL))
boarddata->wp_type = ESDHC_WP_CONTROLLER;
- boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
- if (gpio_is_valid(boarddata->cd_gpio))
- boarddata->cd_type = ESDHC_CD_GPIO;
-
boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
if (gpio_is_valid(boarddata->wp_gpio))
boarddata->wp_type = ESDHC_WP_GPIO;
- of_property_read_u32(np, "bus-width", &boarddata->max_bus_width);
-
- of_property_read_u32(np, "max-frequency", &boarddata->f_max);
+ of_property_read_u32(np, "fsl,tuning-step", &boarddata->tuning_step);
if (of_find_property(np, "no-1-8-v", NULL))
boarddata->support_vsel = false;
@@ -916,29 +983,119 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
mmc_of_parse_voltage(np, &host->ocr_mask);
+ /* sdr50 and sdr104 needs work on 1.8v signal voltage */
+ if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
+ !IS_ERR(imx_data->pins_default)) {
+ imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
+ ESDHC_PINCTRL_STATE_100MHZ);
+ imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
+ ESDHC_PINCTRL_STATE_200MHZ);
+ if (IS_ERR(imx_data->pins_100mhz) ||
+ IS_ERR(imx_data->pins_200mhz)) {
+ dev_warn(mmc_dev(host->mmc),
+ "could not get ultra high speed state, work on normal mode\n");
+ /*
+ * fall back to not support uhs by specify no 1.8v quirk
+ */
+ host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ }
+ } else {
+ host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ }
+
/* call to generic mmc_of_parse to support additional capabilities */
- return mmc_of_parse(host->mmc);
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ return ret;
+
+ if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+
+ return 0;
}
#else
static inline int
sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
struct sdhci_host *host,
- struct esdhc_platform_data *boarddata)
+ struct pltfm_imx_data *imx_data)
{
return -ENODEV;
}
#endif
+static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
+ struct sdhci_host *host,
+ struct pltfm_imx_data *imx_data)
+{
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+ int err;
+
+ if (!host->mmc->parent->platform_data) {
+ dev_err(mmc_dev(host->mmc), "no board data!\n");
+ return -EINVAL;
+ }
+
+ imx_data->boarddata = *((struct esdhc_platform_data *)
+ host->mmc->parent->platform_data);
+ /* write_protect */
+ if (boarddata->wp_type == ESDHC_WP_GPIO) {
+ err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
+ if (err) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to request write-protect gpio!\n");
+ return err;
+ }
+ host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+ }
+
+ /* card_detect */
+ switch (boarddata->cd_type) {
+ case ESDHC_CD_GPIO:
+ err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
+ if (err) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to request card-detect gpio!\n");
+ return err;
+ }
+ /* fall through */
+
+ case ESDHC_CD_CONTROLLER:
+ /* we have a working card_detect back */
+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ break;
+
+ case ESDHC_CD_PERMANENT:
+ host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+ break;
+
+ case ESDHC_CD_NONE:
+ break;
+ }
+
+ switch (boarddata->max_bus_width) {
+ case 8:
+ host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
+ break;
+ case 4:
+ host->mmc->caps |= MMC_CAP_4_BIT_DATA;
+ break;
+ case 1:
+ default:
+ host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
+ break;
+ }
+
+ return 0;
+}
+
static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(imx_esdhc_dt_ids, &pdev->dev);
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
- struct esdhc_platform_data *boarddata;
int err;
struct pltfm_imx_data *imx_data;
- bool dt = true;
host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0);
if (IS_ERR(host))
@@ -1003,10 +1160,26 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
* to something insane. Change it back here.
*/
if (esdhc_is_usdhc(imx_data)) {
- writel(0x08100810, host->ioaddr + ESDHC_WTMK_LVL);
+ writel(0x10401040, host->ioaddr + ESDHC_WTMK_LVL);
+
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
host->mmc->caps |= MMC_CAP_1_8V_DDR;
+ /*
+ * ROM code will change the bit burst_length_enable setting
+ * to zero if this usdhc is choosed to boot system. Change
+ * it back here, otherwise it will impact the performance a
+ * lot. This bit is used to enable/disable the burst length
+ * for the external AHB2AXI bridge, it's usefully especially
+ * for INCR transfer because without burst length indicator,
+ * the AHB2AXI bridge does not know the burst length in
+ * advance. And without burst length indicator, AHB INCR
+ * transfer can only be converted to singles on the AXI side.
+ */
+ writel(readl(host->ioaddr + SDHCI_HOST_CONTROL)
+ | ESDHC_BURST_LEN_EN_INCR,
+ host->ioaddr + SDHCI_HOST_CONTROL);
+
if (!(imx_data->socdata->flags & ESDHC_FLAG_HS200))
host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
@@ -1030,84 +1203,15 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
- boarddata = &imx_data->boarddata;
- if (sdhci_esdhc_imx_probe_dt(pdev, host, boarddata) < 0) {
- if (!host->mmc->parent->platform_data) {
- dev_err(mmc_dev(host->mmc), "no board data!\n");
- err = -EINVAL;
- goto disable_clk;
- }
- imx_data->boarddata = *((struct esdhc_platform_data *)
- host->mmc->parent->platform_data);
- dt = false;
- }
- /* write_protect */
- if (boarddata->wp_type == ESDHC_WP_GPIO && !dt) {
- err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
- if (err) {
- dev_err(mmc_dev(host->mmc),
- "failed to request write-protect gpio!\n");
- goto disable_clk;
- }
- host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
- }
-
- /* card_detect */
- switch (boarddata->cd_type) {
- case ESDHC_CD_GPIO:
- if (dt)
- break;
- err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
- if (err) {
- dev_err(mmc_dev(host->mmc),
- "failed to request card-detect gpio!\n");
- goto disable_clk;
- }
- /* fall through */
-
- case ESDHC_CD_CONTROLLER:
- /* we have a working card_detect back */
- host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
- break;
-
- case ESDHC_CD_PERMANENT:
- host->mmc->caps |= MMC_CAP_NONREMOVABLE;
- break;
+ if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
+ host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400;
- case ESDHC_CD_NONE:
- break;
- }
-
- switch (boarddata->max_bus_width) {
- case 8:
- host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
- break;
- case 4:
- host->mmc->caps |= MMC_CAP_4_BIT_DATA;
- break;
- case 1:
- default:
- host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
- break;
- }
-
- /* sdr50 and sdr104 needs work on 1.8v signal voltage */
- if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
- !IS_ERR(imx_data->pins_default)) {
- imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
- ESDHC_PINCTRL_STATE_100MHZ);
- imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
- ESDHC_PINCTRL_STATE_200MHZ);
- if (IS_ERR(imx_data->pins_100mhz) ||
- IS_ERR(imx_data->pins_200mhz)) {
- dev_warn(mmc_dev(host->mmc),
- "could not get ultra high speed state, work on normal mode\n");
- /* fall back to not support uhs by specify no 1.8v quirk */
- host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
- }
- } else {
- host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
- }
+ if (of_id)
+ err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
+ else
+ err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data);
+ if (err)
+ goto disable_clk;
err = sdhci_add_host(host);
if (err)
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index 3497cfaf683c..163ac9974d91 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -21,7 +21,8 @@
#define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
SDHCI_QUIRK_NO_BUSY_IRQ | \
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
- SDHCI_QUIRK_PIO_NEEDS_DELAY)
+ SDHCI_QUIRK_PIO_NEEDS_DELAY | \
+ SDHCI_QUIRK_NO_HISPD_BIT)
#define ESDHC_SYSTEM_CONTROL 0x2c
#define ESDHC_CLOCK_MASK 0x0000fff0
@@ -45,6 +46,6 @@
#define ESDHC_DMA_SYSCTL 0x40c
#define ESDHC_DMA_SNOOP 0x00000040
-#define ESDHC_HOST_CONTROL_RES 0x05
+#define ESDHC_HOST_CONTROL_RES 0x01
#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 4a09f7608c66..4bcee033feda 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -489,6 +489,11 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto pclk_disable;
}
+ /* Vote for maximum clock rate for maximum performance */
+ ret = clk_set_rate(msm_host->clk, INT_MAX);
+ if (ret)
+ dev_warn(&pdev->dev, "core clock boost failed\n");
+
ret = clk_prepare_enable(msm_host->clk);
if (ret)
goto pclk_disable;
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 21c0c08dfe54..75379cb0fb35 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -63,6 +63,9 @@ static struct sdhci_ops sdhci_arasan_ops = {
static struct sdhci_pltfm_data sdhci_arasan_pdata = {
.ops = &sdhci_arasan_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
};
#ifdef CONFIG_PM_SLEEP
@@ -214,6 +217,7 @@ static int sdhci_arasan_remove(struct platform_device *pdev)
static const struct of_device_id sdhci_arasan_of_match[] = {
{ .compatible = "arasan,sdhci-8.9a" },
+ { .compatible = "arasan,sdhci-5.1" },
{ .compatible = "arasan,sdhci-4.9a" },
{ }
};
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
new file mode 100644
index 000000000000..d1556643a41d
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -0,0 +1,191 @@
+/*
+ * Atmel SDMMC controller driver.
+ *
+ * Copyright (C) 2015 Atmel,
+ * 2015 Ludovic Desroches <ludovic.desroches@atmel.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "sdhci-pltfm.h"
+
+#define SDMMC_CACR 0x230
+#define SDMMC_CACR_CAPWREN BIT(0)
+#define SDMMC_CACR_KEY (0x46 << 8)
+
+struct sdhci_at91_priv {
+ struct clk *hclock;
+ struct clk *gck;
+ struct clk *mainck;
+};
+
+static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_pltfm_data soc_data_sama5d2 = {
+ .ops = &sdhci_at91_sama5d2_ops,
+};
+
+static const struct of_device_id sdhci_at91_dt_match[] = {
+ { .compatible = "atmel,sama5d2-sdhci", .data = &soc_data_sama5d2 },
+ {}
+};
+
+static int sdhci_at91_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct sdhci_pltfm_data *soc_data;
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_at91_priv *priv;
+ unsigned int caps0, caps1;
+ unsigned int clk_base, clk_mul;
+ unsigned int gck_rate, real_gck_rate;
+ int ret;
+
+ match = of_match_device(sdhci_at91_dt_match, &pdev->dev);
+ if (!match)
+ return -EINVAL;
+ soc_data = match->data;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "unable to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ priv->mainck = devm_clk_get(&pdev->dev, "baseclk");
+ if (IS_ERR(priv->mainck)) {
+ dev_err(&pdev->dev, "failed to get baseclk\n");
+ return PTR_ERR(priv->mainck);
+ }
+
+ priv->hclock = devm_clk_get(&pdev->dev, "hclock");
+ if (IS_ERR(priv->hclock)) {
+ dev_err(&pdev->dev, "failed to get hclock\n");
+ return PTR_ERR(priv->hclock);
+ }
+
+ priv->gck = devm_clk_get(&pdev->dev, "multclk");
+ if (IS_ERR(priv->gck)) {
+ dev_err(&pdev->dev, "failed to get multclk\n");
+ return PTR_ERR(priv->gck);
+ }
+
+ host = sdhci_pltfm_init(pdev, soc_data, 0);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ /*
+ * The mult clock is provided by as a generated clock by the PMC
+ * controller. In order to set the rate of gck, we have to get the
+ * base clock rate and the clock mult from capabilities.
+ */
+ clk_prepare_enable(priv->hclock);
+ caps0 = readl(host->ioaddr + SDHCI_CAPABILITIES);
+ caps1 = readl(host->ioaddr + SDHCI_CAPABILITIES_1);
+ clk_base = (caps0 & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
+ clk_mul = (caps1 & SDHCI_CLOCK_MUL_MASK) >> SDHCI_CLOCK_MUL_SHIFT;
+ gck_rate = clk_base * 1000000 * (clk_mul + 1);
+ ret = clk_set_rate(priv->gck, gck_rate);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to set gck");
+ goto hclock_disable_unprepare;
+ return -EINVAL;
+ }
+ /*
+ * We need to check if we have the requested rate for gck because in
+ * some cases this rate could be not supported. If it happens, the rate
+ * is the closest one gck can provide. We have to update the value
+ * of clk mul.
+ */
+ real_gck_rate = clk_get_rate(priv->gck);
+ if (real_gck_rate != gck_rate) {
+ clk_mul = real_gck_rate / (clk_base * 1000000) - 1;
+ caps1 &= (~SDHCI_CLOCK_MUL_MASK);
+ caps1 |= ((clk_mul << SDHCI_CLOCK_MUL_SHIFT) & SDHCI_CLOCK_MUL_MASK);
+ /* Set capabilities in r/w mode. */
+ writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, host->ioaddr + SDMMC_CACR);
+ writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1);
+ /* Set capabilities in ro mode. */
+ writel(0, host->ioaddr + SDMMC_CACR);
+ dev_info(&pdev->dev, "update clk mul to %u as gck rate is %u Hz\n",
+ clk_mul, real_gck_rate);
+ }
+
+ clk_prepare_enable(priv->mainck);
+ clk_prepare_enable(priv->gck);
+
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->priv = priv;
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto clocks_disable_unprepare;
+
+ sdhci_get_of_property(pdev);
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto clocks_disable_unprepare;
+
+ return 0;
+
+clocks_disable_unprepare:
+ clk_disable_unprepare(priv->gck);
+ clk_disable_unprepare(priv->mainck);
+hclock_disable_unprepare:
+ clk_disable_unprepare(priv->hclock);
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static int sdhci_at91_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_at91_priv *priv = pltfm_host->priv;
+
+ sdhci_pltfm_unregister(pdev);
+
+ clk_disable_unprepare(priv->gck);
+ clk_disable_unprepare(priv->hclock);
+ clk_disable_unprepare(priv->mainck);
+
+ return 0;
+}
+
+static struct platform_driver sdhci_at91_driver = {
+ .driver = {
+ .name = "sdhci-at91",
+ .of_match_table = sdhci_at91_dt_match,
+ .pm = SDHCI_PLTFM_PMOPS,
+ },
+ .probe = sdhci_at91_probe,
+ .remove = sdhci_at91_remove,
+};
+
+module_platform_driver(sdhci_at91_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for at91");
+MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 797be7549a15..653f335bef15 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -208,6 +208,12 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
if (clock == 0)
return;
+ /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
+ temp = esdhc_readw(host, SDHCI_HOST_VERSION);
+ temp = (temp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
+ if (temp < VENDOR_V_23)
+ pre_div = 2;
+
/* Workaround to reduce the clock frequency for p1010 esdhc */
if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
if (clock > 20000000)
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 94f54d2772e8..b3b0a3e4fca1 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -618,6 +618,7 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
static const struct sdhci_pci_fixes sdhci_o2 = {
.probe = sdhci_pci_o2_probe,
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD,
.probe_slot = sdhci_pci_o2_probe_slot,
.resume = sdhci_pci_o2_resume,
};
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 9cd5fc62f130..946d37f94a31 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -411,6 +411,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
goto err_of_parse;
sdhci_get_of_property(pdev);
pdata = pxav3_get_mmc_pdata(dev);
+ pdev->dev.platform_data = pdata;
} else if (pdata) {
/* on-chip device */
if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
index 0110bae25b7e..884294576356 100644
--- a/drivers/mmc/host/sdhci-sirf.c
+++ b/drivers/mmc/host/sdhci-sirf.c
@@ -161,8 +161,8 @@ static struct sdhci_pltfm_data sdhci_sirf_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
- SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
- SDHCI_QUIRK_DELAY_AFTER_POWER,
+ SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
static int sdhci_sirf_probe(struct platform_device *pdev)
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index df088343d60f..255a896769b8 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -4,7 +4,7 @@
* Support of SDHCI platform devices for spear soc family
*
* Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* Inspired by sdhci-pltfm.c
*
@@ -211,5 +211,5 @@ static struct platform_driver sdhci_driver = {
module_platform_driver(sdhci_driver);
MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index bc1445238fb3..64b7fdbd1a9c 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -54,8 +54,7 @@ static void sdhci_finish_command(struct sdhci_host *);
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
static int sdhci_pre_dma_transfer(struct sdhci_host *host,
- struct mmc_data *data,
- struct sdhci_host_next *next);
+ struct mmc_data *data);
static int sdhci_do_get_cd(struct sdhci_host *host);
#ifdef CONFIG_PM
@@ -207,8 +206,7 @@ EXPORT_SYMBOL_GPL(sdhci_reset);
static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
{
if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
- if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
- SDHCI_CARD_PRESENT))
+ if (!sdhci_do_get_cd(host))
return;
}
@@ -496,7 +494,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
goto fail;
BUG_ON(host->align_addr & host->align_mask);
- host->sg_count = sdhci_pre_dma_transfer(host, data, NULL);
+ host->sg_count = sdhci_pre_dma_transfer(host, data);
if (host->sg_count < 0)
goto unmap_align;
@@ -635,9 +633,11 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
}
}
- if (!data->host_cookie)
+ if (data->host_cookie == COOKIE_MAPPED) {
dma_unmap_sg(mmc_dev(host->mmc), data->sg,
data->sg_len, direction);
+ data->host_cookie = COOKIE_UNMAPPED;
+ }
}
static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
@@ -833,7 +833,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
} else {
int sg_cnt;
- sg_cnt = sdhci_pre_dma_transfer(host, data, NULL);
+ sg_cnt = sdhci_pre_dma_transfer(host, data);
if (sg_cnt <= 0) {
/*
* This only happens when someone fed
@@ -949,11 +949,13 @@ static void sdhci_finish_data(struct sdhci_host *host)
if (host->flags & SDHCI_USE_ADMA)
sdhci_adma_table_post(host, data);
else {
- if (!data->host_cookie)
+ if (data->host_cookie == COOKIE_MAPPED) {
dma_unmap_sg(mmc_dev(host->mmc),
data->sg, data->sg_len,
(data->flags & MMC_DATA_READ) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ data->host_cookie = COOKIE_UNMAPPED;
+ }
}
}
@@ -1132,6 +1134,7 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host)
preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
break;
case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
break;
case MMC_TIMING_MMC_HS400:
@@ -1152,6 +1155,7 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
int real_div = div, clk_mul = 1;
u16 clk = 0;
unsigned long timeout;
+ bool switch_base_clk = false;
host->mmc->actual_clock = 0;
@@ -1189,15 +1193,25 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
<= clock)
break;
}
- /*
- * Set Programmable Clock Mode in the Clock
- * Control register.
- */
- clk = SDHCI_PROG_CLOCK_MODE;
- real_div = div;
- clk_mul = host->clk_mul;
- div--;
- } else {
+ if ((host->max_clk * host->clk_mul / div) <= clock) {
+ /*
+ * Set Programmable Clock Mode in the Clock
+ * Control register.
+ */
+ clk = SDHCI_PROG_CLOCK_MODE;
+ real_div = div;
+ clk_mul = host->clk_mul;
+ div--;
+ } else {
+ /*
+ * Divisor can be too small to reach clock
+ * speed requirement. Then use the base clock.
+ */
+ switch_base_clk = true;
+ }
+ }
+
+ if (!host->clk_mul || switch_base_clk) {
/* Version 3.00 divisors must be a multiple of 2. */
if (host->max_clk <= clock)
div = 1;
@@ -1210,6 +1224,9 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
}
real_div = div;
div >>= 1;
+ if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
+ && !div && host->max_clk <= 25000000)
+ div = 1;
}
} else {
/* Version 2.00 divisors must be a power of 2. */
@@ -1559,7 +1576,8 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
(ios->timing == MMC_TIMING_UHS_SDR25) ||
(ios->timing == MMC_TIMING_UHS_SDR50) ||
(ios->timing == MMC_TIMING_UHS_SDR104) ||
- (ios->timing == MMC_TIMING_UHS_DDR50))) {
+ (ios->timing == MMC_TIMING_UHS_DDR50) ||
+ (ios->timing == MMC_TIMING_MMC_DDR52))) {
u16 preset;
sdhci_enable_preset_value(host, true);
@@ -1601,15 +1619,21 @@ static int sdhci_do_get_cd(struct sdhci_host *host)
if (host->flags & SDHCI_DEVICE_DEAD)
return 0;
- /* If polling/nonremovable, assume that the card is always present. */
- if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
- (host->mmc->caps & MMC_CAP_NONREMOVABLE))
+ /* If nonremovable, assume that the card is always present. */
+ if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
return 1;
- /* Try slot gpio detect */
+ /*
+ * Try slot gpio detect, if defined it take precedence
+ * over build in controller functionality
+ */
if (!IS_ERR_VALUE(gpio_cd))
return !!gpio_cd;
+ /* If polling, assume that the card is always present. */
+ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ return 1;
+
/* Host native card detect */
return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
}
@@ -2097,49 +2121,36 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
struct mmc_data *data = mrq->data;
if (host->flags & SDHCI_REQ_USE_DMA) {
- if (data->host_cookie)
+ if (data->host_cookie == COOKIE_GIVEN ||
+ data->host_cookie == COOKIE_MAPPED)
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
data->flags & MMC_DATA_WRITE ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
- mrq->data->host_cookie = 0;
+ data->host_cookie = COOKIE_UNMAPPED;
}
}
static int sdhci_pre_dma_transfer(struct sdhci_host *host,
- struct mmc_data *data,
- struct sdhci_host_next *next)
+ struct mmc_data *data)
{
int sg_count;
- if (!next && data->host_cookie &&
- data->host_cookie != host->next_data.cookie) {
- pr_debug(DRIVER_NAME "[%s] invalid cookie: %d, next-cookie %d\n",
- __func__, data->host_cookie, host->next_data.cookie);
- data->host_cookie = 0;
+ if (data->host_cookie == COOKIE_MAPPED) {
+ data->host_cookie = COOKIE_GIVEN;
+ return data->sg_count;
}
- /* Check if next job is already prepared */
- if (next ||
- (!next && data->host_cookie != host->next_data.cookie)) {
- sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len,
- data->flags & MMC_DATA_WRITE ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
-
- } else {
- sg_count = host->next_data.sg_count;
- host->next_data.sg_count = 0;
- }
+ WARN_ON(data->host_cookie == COOKIE_GIVEN);
+ sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ data->flags & MMC_DATA_WRITE ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (sg_count == 0)
- return -EINVAL;
+ return -ENOSPC;
- if (next) {
- next->sg_count = sg_count;
- data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
- } else
- host->sg_count = sg_count;
+ data->sg_count = sg_count;
+ data->host_cookie = COOKIE_MAPPED;
return sg_count;
}
@@ -2149,16 +2160,10 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
{
struct sdhci_host *host = mmc_priv(mmc);
- if (mrq->data->host_cookie) {
- mrq->data->host_cookie = 0;
- return;
- }
+ mrq->data->host_cookie = COOKIE_UNMAPPED;
if (host->flags & SDHCI_REQ_USE_DMA)
- if (sdhci_pre_dma_transfer(host,
- mrq->data,
- &host->next_data) < 0)
- mrq->data->host_cookie = 0;
+ sdhci_pre_dma_transfer(host, mrq->data);
}
static void sdhci_card_event(struct mmc_host *mmc)
@@ -2866,6 +2871,7 @@ int sdhci_add_host(struct sdhci_host *host)
u32 max_current_caps;
unsigned int ocr_avail;
unsigned int override_timeout_clk;
+ u32 max_clk;
int ret;
WARN_ON(host == NULL);
@@ -2978,8 +2984,11 @@ int sdhci_add_host(struct sdhci_host *host)
GFP_KERNEL);
host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
if (!host->adma_table || !host->align_buffer) {
- dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
- host->adma_table, host->adma_addr);
+ if (host->adma_table)
+ dma_free_coherent(mmc_dev(mmc),
+ host->adma_table_sz,
+ host->adma_table,
+ host->adma_addr);
kfree(host->align_buffer);
pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
mmc_hostname(mmc));
@@ -3026,7 +3035,6 @@ int sdhci_add_host(struct sdhci_host *host)
host->max_clk = host->ops->get_max_clock(host);
}
- host->next_data.cookie = 1;
/*
* In case of Host Controller v3.00, find out whether clock
* multiplier is supported.
@@ -3047,18 +3055,22 @@ int sdhci_add_host(struct sdhci_host *host)
* Set host parameters.
*/
mmc->ops = &sdhci_ops;
- mmc->f_max = host->max_clk;
+ max_clk = host->max_clk;
+
if (host->ops->get_min_clock)
mmc->f_min = host->ops->get_min_clock(host);
else if (host->version >= SDHCI_SPEC_300) {
if (host->clk_mul) {
mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
- mmc->f_max = host->max_clk * host->clk_mul;
+ max_clk = host->max_clk * host->clk_mul;
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
+ if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk)))
+ mmc->f_max = max_clk;
+
if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
SDHCI_TIMEOUT_CLK_SHIFT;
@@ -3118,7 +3130,8 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
- !(mmc->caps & MMC_CAP_NONREMOVABLE))
+ !(mmc->caps & MMC_CAP_NONREMOVABLE) &&
+ IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
mmc->caps |= MMC_CAP_NEEDS_POLL;
/* If there are external regulators, get them */
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 5521d29368e4..7c02ff46c8ac 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -309,9 +309,10 @@ struct sdhci_adma2_64_desc {
*/
#define SDHCI_MAX_SEGS 128
-struct sdhci_host_next {
- unsigned int sg_count;
- s32 cookie;
+enum sdhci_cookie {
+ COOKIE_UNMAPPED,
+ COOKIE_MAPPED,
+ COOKIE_GIVEN,
};
struct sdhci_host {
@@ -409,6 +410,8 @@ struct sdhci_host {
#define SDHCI_QUIRK2_SUPPORT_SINGLE (1<<13)
/* Controller broken with using ACMD23 */
#define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14)
+/* Broken Clock divider zero in controller */
+#define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
@@ -503,7 +506,6 @@ struct sdhci_host {
unsigned int tuning_mode; /* Re-tuning mode supported by host */
#define SDHCI_TUNING_MODE_1 0
- struct sdhci_host_next next_data;
unsigned long private[0] ____cacheline_aligned;
};
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 5a1fdd405b1a..ad9ffea7d659 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1632,7 +1632,9 @@ static int sh_mmcif_suspend(struct device *dev)
{
struct sh_mmcif_host *host = dev_get_drvdata(dev);
+ pm_runtime_get_sync(dev);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
+ pm_runtime_put(dev);
return 0;
}
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 4d3e1ffe5508..a7b7a6771598 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -595,7 +595,7 @@ static irqreturn_t sunxi_mmc_handle_manual_stop(int irq, void *dev_id)
static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en)
{
- unsigned long expire = jiffies + msecs_to_jiffies(250);
+ unsigned long expire = jiffies + msecs_to_jiffies(750);
u32 rval;
rval = mmc_readl(host, REG_CLKCR);
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index e3dcf31a8bd6..a10fde40b6c3 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -83,6 +83,8 @@ static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
return --host->sg_len;
}
+#define CMDREQ_TIMEOUT 5000
+
#ifdef CONFIG_MMC_DEBUG
#define STATUS_TO_TEXT(a, status, i) \
@@ -230,7 +232,7 @@ static void tmio_mmc_reset_work(struct work_struct *work)
*/
if (IS_ERR_OR_NULL(mrq)
|| time_is_after_jiffies(host->last_req_ts +
- msecs_to_jiffies(2000))) {
+ msecs_to_jiffies(CMDREQ_TIMEOUT))) {
spin_unlock_irqrestore(&host->lock, flags);
return;
}
@@ -818,7 +820,7 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
ret = tmio_mmc_start_command(host, mrq->cmd);
if (!ret) {
schedule_delayed_work(&host->delayed_reset_work,
- msecs_to_jiffies(2000));
+ msecs_to_jiffies(CMDREQ_TIMEOUT));
return;
}
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index 54b082b1804a..4498e92116b8 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1611,7 +1611,7 @@ static irqreturn_t usdhi6_cd(int irq, void *dev_id)
return IRQ_NONE;
/* Ack */
- usdhi6_write(host, USDHI6_SD_INFO1, !status);
+ usdhi6_write(host, USDHI6_SD_INFO1, ~status);
if (!work_pending(&mmc->detect.work) &&
(((status & USDHI6_SD_INFO1_CARD_INSERT) &&
@@ -1634,6 +1634,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
struct mmc_request *mrq = host->mrq;
struct mmc_data *data = mrq ? mrq->data : NULL;
+ struct scatterlist *sg = host->sg ?: data->sg;
dev_warn(mmc_dev(host->mmc),
"%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n",
@@ -1669,7 +1670,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
"%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n",
data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
host->offset, data->blocks, data->blksz, data->sg_len,
- sg_dma_len(host->sg), host->sg->offset);
+ sg_dma_len(sg), sg->offset);
usdhi6_sg_unmap(host, true);
/*
* If USDHI6_WAIT_FOR_DATA_END times out, we have already unmapped
@@ -1715,12 +1716,14 @@ static int usdhi6_probe(struct platform_device *pdev)
if (!mmc)
return -ENOMEM;
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret == -EPROBE_DEFER)
+ goto e_free_mmc;
+
ret = mmc_of_parse(mmc);
if (ret < 0)
goto e_free_mmc;
- mmc_regulator_get_supply(mmc);
-
host = mmc_priv(mmc);
host->mmc = mmc;
host->wait = USDHI6_WAIT_FOR_REQUEST;
@@ -1734,8 +1737,10 @@ static int usdhi6_probe(struct platform_device *pdev)
}
host->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(host->clk))
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
goto e_free_mmc;
+ }
host->imclk = clk_get_rate(host->clk);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index d313f948b96c..9cd3631170ef 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -223,8 +223,6 @@ static int m25p_probe(struct spi_device *spi)
*/
if (data && data->type)
flash_name = data->type;
- else if (!strcmp(spi->modalias, "spi-nor"))
- flash_name = NULL; /* auto-detect */
else
flash_name = spi->modalias;
@@ -289,19 +287,25 @@ static const struct spi_device_id m25p_ids[] = {
{"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
{"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
- /*
- * Generic support for SPI NOR that can be identified by the JEDEC READ
- * ID opcode (0x9F). Use this, if possible.
- */
- {"spi-nor"},
{ },
};
MODULE_DEVICE_TABLE(spi, m25p_ids);
+static const struct of_device_id m25p_of_table[] = {
+ /*
+ * Generic compatibility for SPI NOR that can be identified by the
+ * JEDEC READ ID opcode (0x9F). Use this, if possible.
+ */
+ { .compatible = "jedec,spi-nor" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, m25p_of_table);
+
static struct spi_driver m25p80_driver = {
.driver = {
.name = "m25p80",
.owner = THIS_MODULE,
+ .of_match_table = m25p_of_table,
},
.id_table = m25p_ids,
.probe = m25p_probe,
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 0099aba72a8b..df6f61137376 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -102,6 +102,7 @@ static const struct of_device_id dataflash_dt_ids[] = {
{ .compatible = "atmel,dataflash", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, dataflash_dt_ids);
#endif
/* ......................................................................... */
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 2fc4957cbe7f..a70eb83e68f1 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -41,7 +41,7 @@
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/init.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/mtd/mtd.h>
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index eadcfffc4f9c..a577ef8553d0 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -385,20 +385,28 @@ static int __init nettel_init(void)
}
rc = mtd_device_register(intel_mtd, nettel_intel_partitions,
num_intel_partitions);
+ if (rc)
+ goto out_map_destroy;
#endif
if (amd_mtd) {
rc = mtd_device_register(amd_mtd, nettel_amd_partitions,
num_amd_partitions);
+ if (rc)
+ goto out_mtd_unreg;
}
#ifdef CONFIG_MTD_CFI_INTELEXT
register_reboot_notifier(&nettel_notifier_block);
#endif
- return(rc);
+ return rc;
+out_mtd_unreg:
#ifdef CONFIG_MTD_CFI_INTELEXT
+ mtd_device_unregister(intel_mtd);
+out_map_destroy:
+ map_destroy(intel_mtd);
out_unmap1:
iounmap(nettel_intel_map.virt);
#endif
@@ -407,8 +415,7 @@ out_unmap2:
iounmap(nettel_mmcrp);
iounmap(nettel_amd_map.virt);
- return(rc);
-
+ return rc;
}
/****************************************************************************/
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 774b32fd29e6..3e614e9119d5 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -130,6 +130,8 @@ static const char * const *of_get_probes(struct device_node *dp)
count++;
res = kzalloc((count + 1)*sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return NULL;
count = 0;
while (cplen > 0) {
res[count] = cp;
@@ -311,6 +313,10 @@ static int of_flash_probe(struct platform_device *dev)
ppdata.of_node = dp;
part_probe_types = of_get_probes(dp);
+ if (!part_probe_types) {
+ err = -ENOMEM;
+ goto err_out;
+ }
mtd_device_parse_register(info->cmtd, part_probe_types, &ppdata,
NULL, 0);
of_free_probes(part_probe_types);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 41acc507b22e..44dc965a2f7c 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -97,14 +97,13 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
if (req->cmd_flags & REQ_DISCARD)
return tr->discard(dev, block, nsect);
- switch(rq_data_dir(req)) {
- case READ:
+ if (rq_data_dir(req) == READ) {
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->readsect(dev, block, buf))
return -EIO;
rq_flush_dcache_pages(req);
return 0;
- case WRITE:
+ } else {
if (!tr->writesect)
return -EIO;
@@ -113,9 +112,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
if (tr->writesect(dev, block, buf))
return -EIO;
return 0;
- default:
- printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
- return -EIO;
}
}
@@ -423,7 +419,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
if (tr->discard) {
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
- new->rq->limits.max_discard_sectors = UINT_MAX;
+ blk_queue_max_discard_sectors(new->rq, UINT_MAX);
}
gd->queue = new->rq;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 5b2806a7e5f7..3324281d1f53 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -42,23 +42,20 @@ config MTD_SM_COMMON
default n
config MTD_NAND_DENALI
- tristate "Support Denali NAND controller"
- depends on HAS_DMA
- help
- Enable support for the Denali NAND controller. This should be
- combined with either the PCI or platform drivers to provide device
- registration.
+ tristate
config MTD_NAND_DENALI_PCI
tristate "Support Denali NAND controller on Intel Moorestown"
- depends on PCI && MTD_NAND_DENALI
+ select MTD_NAND_DENALI
+ depends on HAS_DMA && PCI
help
Enable the driver for NAND flash on Intel Moorestown, using the
Denali NAND controller core.
config MTD_NAND_DENALI_DT
tristate "Support Denali NAND controller as a DT device"
- depends on HAVE_CLK && MTD_NAND_DENALI
+ select MTD_NAND_DENALI
+ depends on HAS_DMA && HAVE_CLK
help
Enable the driver for NAND flash on platforms using a Denali NAND
controller as a DT device.
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 1f897ec3c242..075a027632b5 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -26,7 +26,8 @@ obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o
obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o
-obj-$(CONFIG_MTD_NAND_OMAP2) += omap2.o
+omap2_nand-objs := omap2.o
+obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o
obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD) += omap_elm.o
obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.h b/drivers/mtd/nand/brcmnand/brcmnand.h
index a20c73630b7b..169f99e38a26 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.h
+++ b/drivers/mtd/nand/brcmnand/brcmnand.h
@@ -50,7 +50,7 @@ static inline u32 brcmnand_readl(void __iomem *addr)
* Other architectures (e.g., ARM) either do not support big endian, or
* else leave I/O in little endian mode.
*/
- if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
return __raw_readl(addr);
else
return readl_relaxed(addr);
@@ -59,7 +59,7 @@ static inline u32 brcmnand_readl(void __iomem *addr)
static inline void brcmnand_writel(u32 val, void __iomem *addr)
{
/* See brcmnand_readl() comments */
- if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
__raw_writel(val, addr);
else
writel_relaxed(val, addr);
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index feb6d18de78d..b90801302df4 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -520,6 +520,32 @@ static struct nand_ecclayout hwecc4_2048 = {
},
};
+/*
+ * An ECC layout for using 4-bit ECC with large-page (4096bytes) flash,
+ * storing ten ECC bytes plus the manufacturer's bad block marker byte,
+ * and not overlapping the default BBT markers.
+ */
+static struct nand_ecclayout hwecc4_4096 = {
+ .eccbytes = 80,
+ .eccpos = {
+ /* at the end of spare sector */
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+ 78, 79, 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
+ },
+ .oobfree = {
+ /* 2 bytes at offset 0 hold manufacturer badblock markers */
+ {.offset = 2, .length = 46, },
+ /* 5 bytes at offset 8 hold BBT markers */
+ /* 8 bytes at offset 16 hold JFFS2 clean markers */
+ },
+};
+
#if defined(CONFIG_OF)
static const struct of_device_id davinci_nand_of_match[] = {
{.compatible = "ti,davinci-nand", },
@@ -796,18 +822,12 @@ static int nand_davinci_probe(struct platform_device *pdev)
info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
goto syndrome_done;
}
+ if (chunks == 8) {
+ info->ecclayout = hwecc4_4096;
+ info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
+ goto syndrome_done;
+ }
- /* 4KiB page chips are not yet supported. The eccpos from
- * nand_ecclayout cannot hold 80 bytes and change to eccpos[]
- * breaks userspace ioctl interface with mtd-utils. Once we
- * resolve this issue, NAND_ECC_HW_OOB_FIRST mode can be used
- * for the 4KiB page chips.
- *
- * TODO: Note that nand_ecclayout has now been expanded and can
- * hold plenty of OOB entries.
- */
- dev_warn(&pdev->dev, "no 4-bit ECC support yet "
- "for 4KiB-page NAND\n");
ret = -EIO;
goto err;
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index 6e2f387b823f..de31514df282 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -30,19 +30,19 @@ MODULE_DEVICE_TABLE(pci, denali_pci_ids);
static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- int ret = -ENODEV;
+ int ret;
resource_size_t csr_base, mem_base;
unsigned long csr_len, mem_len;
struct denali_nand_info *denali;
- denali = kzalloc(sizeof(*denali), GFP_KERNEL);
+ denali = devm_kzalloc(&dev->dev, sizeof(*denali), GFP_KERNEL);
if (!denali)
return -ENOMEM;
- ret = pci_enable_device(dev);
+ ret = pcim_enable_device(dev);
if (ret) {
- pr_err("Spectra: pci_enable_device failed.\n");
- goto failed_alloc_memery;
+ dev_err(&dev->dev, "Spectra: pci_enable_device failed.\n");
+ return ret;
}
if (id->driver_data == INTEL_CE4100) {
@@ -69,20 +69,19 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
ret = pci_request_regions(dev, DENALI_NAND_NAME);
if (ret) {
- pr_err("Spectra: Unable to request memory regions\n");
- goto failed_enable_dev;
+ dev_err(&dev->dev, "Spectra: Unable to request memory regions\n");
+ return ret;
}
denali->flash_reg = ioremap_nocache(csr_base, csr_len);
if (!denali->flash_reg) {
- pr_err("Spectra: Unable to remap memory region\n");
- ret = -ENOMEM;
- goto failed_req_regions;
+ dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
+ return -ENOMEM;
}
denali->flash_mem = ioremap_nocache(mem_base, mem_len);
if (!denali->flash_mem) {
- pr_err("Spectra: ioremap_nocache failed!");
+ dev_err(&dev->dev, "Spectra: ioremap_nocache failed!");
ret = -ENOMEM;
goto failed_remap_reg;
}
@@ -99,13 +98,6 @@ failed_remap_mem:
iounmap(denali->flash_mem);
failed_remap_reg:
iounmap(denali->flash_reg);
-failed_req_regions:
- pci_release_regions(dev);
-failed_enable_dev:
- pci_disable_device(dev);
-failed_alloc_memery:
- kfree(denali);
-
return ret;
}
@@ -117,9 +109,6 @@ static void denali_pci_remove(struct pci_dev *dev)
denali_remove(denali);
iounmap(denali->flash_reg);
iounmap(denali->flash_mem);
- pci_release_regions(dev);
- pci_disable_device(dev);
- kfree(denali);
}
static struct pci_driver denali_pci_driver = {
@@ -129,14 +118,4 @@ static struct pci_driver denali_pci_driver = {
.remove = denali_pci_remove,
};
-static int denali_init_pci(void)
-{
- return pci_register_driver(&denali_pci_driver);
-}
-module_init(denali_init_pci);
-
-static void denali_exit_pci(void)
-{
- pci_unregister_driver(&denali_pci_driver);
-}
-module_exit(denali_exit_pci);
+module_pci_driver(denali_pci_driver);
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 7da266a53979..0802158a3f75 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -24,7 +24,7 @@
#include <linux/rslib.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 51394e59901b..a4e27e891153 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -238,8 +238,8 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
ifc_nand_ctrl->page = page_addr;
/* Program ROW0/COL0 */
- iowrite32be(page_addr, &ifc->ifc_nand.row0);
- iowrite32be((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
+ ifc_out32(page_addr, &ifc->ifc_nand.row0);
+ ifc_out32((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
buf_num = page_addr & priv->bufnum_mask;
@@ -301,19 +301,19 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
int i;
/* set the chip select for NAND Transaction */
- iowrite32be(priv->bank << IFC_NAND_CSEL_SHIFT,
- &ifc->ifc_nand.nand_csel);
+ ifc_out32(priv->bank << IFC_NAND_CSEL_SHIFT,
+ &ifc->ifc_nand.nand_csel);
dev_vdbg(priv->dev,
"%s: fir0=%08x fcr0=%08x\n",
__func__,
- ioread32be(&ifc->ifc_nand.nand_fir0),
- ioread32be(&ifc->ifc_nand.nand_fcr0));
+ ifc_in32(&ifc->ifc_nand.nand_fir0),
+ ifc_in32(&ifc->ifc_nand.nand_fcr0));
ctrl->nand_stat = 0;
/* start read/write seq */
- iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
+ ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
/* wait for command complete flag or timeout */
wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -336,7 +336,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
int sector_end = sector + chip->ecc.steps - 1;
for (i = sector / 4; i <= sector_end / 4; i++)
- eccstat[i] = ioread32be(&ifc->ifc_nand.nand_eccstat[i]);
+ eccstat[i] = ifc_in32(&ifc->ifc_nand.nand_eccstat[i]);
for (i = sector; i <= sector_end; i++) {
errors = check_read_ecc(mtd, ctrl, eccstat, i);
@@ -376,33 +376,33 @@ static void fsl_ifc_do_read(struct nand_chip *chip,
/* Program FIR/IFC_NAND_FCR0 for Small/Large page */
if (mtd->writesize > 512) {
- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
- (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
- &ifc->ifc_nand.nand_fir0);
- iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
-
- iowrite32be((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
- (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
- &ifc->ifc_nand.nand_fcr0);
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
+
+ ifc_out32((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
+ &ifc->ifc_nand.nand_fcr0);
} else {
- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
- (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
- &ifc->ifc_nand.nand_fir0);
- iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
if (oob)
- iowrite32be(NAND_CMD_READOOB <<
- IFC_NAND_FCR0_CMD0_SHIFT,
- &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(NAND_CMD_READOOB <<
+ IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
else
- iowrite32be(NAND_CMD_READ0 <<
- IFC_NAND_FCR0_CMD0_SHIFT,
- &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(NAND_CMD_READ0 <<
+ IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
}
}
@@ -422,7 +422,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
switch (command) {
/* READ0 read the entire buffer to use hardware ECC. */
case NAND_CMD_READ0:
- iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+ ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, 0, page_addr, 0);
ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
@@ -437,7 +437,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* READOOB reads only the OOB because no ECC is performed. */
case NAND_CMD_READOOB:
- iowrite32be(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
+ ifc_out32(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, column, page_addr, 1);
ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
@@ -453,19 +453,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
if (command == NAND_CMD_PARAM)
timing = IFC_FIR_OP_RBCD;
- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
- (timing << IFC_NAND_FIR0_OP2_SHIFT),
- &ifc->ifc_nand.nand_fir0);
- iowrite32be(command << IFC_NAND_FCR0_CMD0_SHIFT,
- &ifc->ifc_nand.nand_fcr0);
- iowrite32be(column, &ifc->ifc_nand.row3);
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (timing << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(command << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(column, &ifc->ifc_nand.row3);
/*
* although currently it's 8 bytes for READID, we always read
* the maximum 256 bytes(for PARAM)
*/
- iowrite32be(256, &ifc->ifc_nand.nand_fbcr);
+ ifc_out32(256, &ifc->ifc_nand.nand_fbcr);
ifc_nand_ctrl->read_bytes = 256;
set_addr(mtd, 0, 0, 0);
@@ -480,16 +480,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* ERASE2 uses the block and page address from ERASE1 */
case NAND_CMD_ERASE2:
- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
- &ifc->ifc_nand.nand_fir0);
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
- iowrite32be((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
- (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
- &ifc->ifc_nand.nand_fcr0);
+ ifc_out32((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
+ &ifc->ifc_nand.nand_fcr0);
- iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+ ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
ifc_nand_ctrl->read_bytes = 0;
fsl_ifc_run_command(mtd);
return;
@@ -506,19 +506,18 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) |
(NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT);
- iowrite32be(
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
- (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
- (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
- &ifc->ifc_nand.nand_fir0);
- iowrite32be(
- (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
- (IFC_FIR_OP_RDSTAT <<
- IFC_NAND_FIR1_OP6_SHIFT) |
- (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
- &ifc->ifc_nand.nand_fir1);
+ ifc_out32(
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(
+ (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP6_SHIFT) |
+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
+ &ifc->ifc_nand.nand_fir1);
} else {
nand_fcr0 = ((NAND_CMD_PAGEPROG <<
IFC_NAND_FCR0_CMD1_SHIFT) |
@@ -527,20 +526,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
(NAND_CMD_STATUS <<
IFC_NAND_FCR0_CMD3_SHIFT));
- iowrite32be(
+ ifc_out32(
(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
(IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
(IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
&ifc->ifc_nand.nand_fir0);
- iowrite32be(
- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
- (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
- (IFC_FIR_OP_RDSTAT <<
- IFC_NAND_FIR1_OP7_SHIFT) |
- (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
- &ifc->ifc_nand.nand_fir1);
+ ifc_out32(
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
+ (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP7_SHIFT) |
+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
+ &ifc->ifc_nand.nand_fir1);
if (column >= mtd->writesize)
nand_fcr0 |=
@@ -555,7 +553,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
column -= mtd->writesize;
ifc_nand_ctrl->oob = 1;
}
- iowrite32be(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
return;
}
@@ -563,24 +561,26 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* PAGEPROG reuses all of the setup from SEQIN and adds the length */
case NAND_CMD_PAGEPROG: {
if (ifc_nand_ctrl->oob) {
- iowrite32be(ifc_nand_ctrl->index -
- ifc_nand_ctrl->column,
- &ifc->ifc_nand.nand_fbcr);
+ ifc_out32(ifc_nand_ctrl->index -
+ ifc_nand_ctrl->column,
+ &ifc->ifc_nand.nand_fbcr);
} else {
- iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+ ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
}
fsl_ifc_run_command(mtd);
return;
}
- case NAND_CMD_STATUS:
- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
- &ifc->ifc_nand.nand_fir0);
- iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
- &ifc->ifc_nand.nand_fcr0);
- iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
+ case NAND_CMD_STATUS: {
+ void __iomem *addr;
+
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, 0, 0, 0);
ifc_nand_ctrl->read_bytes = 1;
@@ -590,17 +590,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
* The chip always seems to report that it is
* write-protected, even when it is not.
*/
+ addr = ifc_nand_ctrl->addr;
if (chip->options & NAND_BUSWIDTH_16)
- setbits16(ifc_nand_ctrl->addr, NAND_STATUS_WP);
+ ifc_out16(ifc_in16(addr) | (NAND_STATUS_WP), addr);
else
- setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP);
+ ifc_out8(ifc_in8(addr) | (NAND_STATUS_WP), addr);
return;
+ }
case NAND_CMD_RESET:
- iowrite32be(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
- &ifc->ifc_nand.nand_fir0);
- iowrite32be(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
- &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
fsl_ifc_run_command(mtd);
return;
@@ -658,7 +660,7 @@ static uint8_t fsl_ifc_read_byte(struct mtd_info *mtd)
*/
if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
offset = ifc_nand_ctrl->index++;
- return in_8(ifc_nand_ctrl->addr + offset);
+ return ifc_in8(ifc_nand_ctrl->addr + offset);
}
dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
@@ -680,7 +682,7 @@ static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd)
* next byte.
*/
if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
- data = in_be16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
+ data = ifc_in16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
ifc_nand_ctrl->index += 2;
return (uint8_t) data;
}
@@ -726,18 +728,18 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
u32 nand_fsr;
/* Use READ_STATUS command, but wait for the device to be ready */
- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
- &ifc->ifc_nand.nand_fir0);
- iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
- &ifc->ifc_nand.nand_fcr0);
- iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, 0, 0, 0);
ifc_nand_ctrl->read_bytes = 1;
fsl_ifc_run_command(mtd);
- nand_fsr = ioread32be(&ifc->ifc_nand.nand_fsr);
+ nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr);
/*
* The chip always seems to report that it is
@@ -829,34 +831,34 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
uint32_t cs = priv->bank;
/* Save CSOR and CSOR_ext */
- csor = ioread32be(&ifc->csor_cs[cs].csor);
- csor_ext = ioread32be(&ifc->csor_cs[cs].csor_ext);
+ csor = ifc_in32(&ifc->csor_cs[cs].csor);
+ csor_ext = ifc_in32(&ifc->csor_cs[cs].csor_ext);
/* chage PageSize 8K and SpareSize 1K*/
csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
- iowrite32be(csor_8k, &ifc->csor_cs[cs].csor);
- iowrite32be(0x0000400, &ifc->csor_cs[cs].csor_ext);
+ ifc_out32(csor_8k, &ifc->csor_cs[cs].csor);
+ ifc_out32(0x0000400, &ifc->csor_cs[cs].csor_ext);
/* READID */
- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
- &ifc->ifc_nand.nand_fir0);
- iowrite32be(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
- &ifc->ifc_nand.nand_fcr0);
- iowrite32be(0x0, &ifc->ifc_nand.row3);
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(0x0, &ifc->ifc_nand.row3);
- iowrite32be(0x0, &ifc->ifc_nand.nand_fbcr);
+ ifc_out32(0x0, &ifc->ifc_nand.nand_fbcr);
/* Program ROW0/COL0 */
- iowrite32be(0x0, &ifc->ifc_nand.row0);
- iowrite32be(0x0, &ifc->ifc_nand.col0);
+ ifc_out32(0x0, &ifc->ifc_nand.row0);
+ ifc_out32(0x0, &ifc->ifc_nand.col0);
/* set the chip select for NAND Transaction */
- iowrite32be(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel);
+ ifc_out32(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel);
/* start read seq */
- iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
+ ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
/* wait for command complete flag or timeout */
wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -866,8 +868,8 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
/* Restore CSOR and CSOR_ext */
- iowrite32be(csor, &ifc->csor_cs[cs].csor);
- iowrite32be(csor_ext, &ifc->csor_cs[cs].csor_ext);
+ ifc_out32(csor, &ifc->csor_cs[cs].csor);
+ ifc_out32(csor_ext, &ifc->csor_cs[cs].csor_ext);
}
static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
@@ -884,7 +886,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
/* fill in nand_chip structure */
/* set up function call table */
- if ((ioread32be(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
+ if ((ifc_in32(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
chip->read_byte = fsl_ifc_read_byte16;
else
chip->read_byte = fsl_ifc_read_byte;
@@ -898,13 +900,13 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->bbt_td = &bbt_main_descr;
chip->bbt_md = &bbt_mirror_descr;
- iowrite32be(0x0, &ifc->ifc_nand.ncfgr);
+ ifc_out32(0x0, &ifc->ifc_nand.ncfgr);
/* set up nand options */
chip->bbt_options = NAND_BBT_USE_FLASH;
chip->options = NAND_NO_SUBPAGE_WRITE;
- if (ioread32be(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
+ if (ifc_in32(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
chip->read_byte = fsl_ifc_read_byte16;
chip->options |= NAND_BUSWIDTH_16;
} else {
@@ -917,7 +919,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->ecc.read_page = fsl_ifc_read_page;
chip->ecc.write_page = fsl_ifc_write_page;
- csor = ioread32be(&ifc->csor_cs[priv->bank].csor);
+ csor = ifc_in32(&ifc->csor_cs[priv->bank].csor);
/* Hardware generates ECC per 512 Bytes */
chip->ecc.size = 512;
@@ -1006,7 +1008,7 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
phys_addr_t addr)
{
- u32 cspr = ioread32be(&ifc->cspr_cs[bank].cspr);
+ u32 cspr = ifc_in32(&ifc->cspr_cs[bank].cspr);
if (!(cspr & CSPR_V))
return 0;
@@ -1092,16 +1094,16 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
dev_set_drvdata(priv->dev, priv);
- iowrite32be(IFC_NAND_EVTER_EN_OPC_EN |
- IFC_NAND_EVTER_EN_FTOER_EN |
- IFC_NAND_EVTER_EN_WPER_EN,
- &ifc->ifc_nand.nand_evter_en);
+ ifc_out32(IFC_NAND_EVTER_EN_OPC_EN |
+ IFC_NAND_EVTER_EN_FTOER_EN |
+ IFC_NAND_EVTER_EN_WPER_EN,
+ &ifc->ifc_nand.nand_evter_en);
/* enable NAND Machine Interrupts */
- iowrite32be(IFC_NAND_EVTER_INTR_OPCIR_EN |
- IFC_NAND_EVTER_INTR_FTOERIR_EN |
- IFC_NAND_EVTER_INTR_WPERIR_EN,
- &ifc->ifc_nand.nand_evter_intr_en);
+ ifc_out32(IFC_NAND_EVTER_INTR_OPCIR_EN |
+ IFC_NAND_EVTER_INTR_FTOERIR_EN |
+ IFC_NAND_EVTER_INTR_WPERIR_EN,
+ &ifc->ifc_nand.nand_evter_intr_en);
priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
if (!priv->mtd.name) {
ret = -ENOMEM;
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 7124400d903b..a8804a3da076 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -29,6 +29,10 @@ struct nand_flash_dev nand_flash_ids[] = {
* listed by full ID. We list them first so that we can easily identify
* the most specific match.
*/
+ {"TC58NVG0S3E 1G 3.3V 8-bit",
+ { .id = {0x98, 0xd1, 0x90, 0x15, 0x76, 0x14, 0x01, 0x00} },
+ SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512),
+ 2 },
{"TC58NVG2S0F 4G 3.3V 8-bit",
{ .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 52c0c1a3899c..95d0cc49cfc2 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -649,7 +649,8 @@ static void free_device(struct nandsim *ns)
kmem_cache_free(ns->nand_pages_slab,
ns->pages[i].byte);
}
- kmem_cache_destroy(ns->nand_pages_slab);
+ if (ns->nand_pages_slab)
+ kmem_cache_destroy(ns->nand_pages_slab);
vfree(ns->pages);
}
}
@@ -729,8 +730,7 @@ static int init_nandsim(struct mtd_info *mtd)
/* Fill the partition_info structure */
if (parts_num > ARRAY_SIZE(ns->partitions)) {
NS_ERR("too many partitions.\n");
- ret = -EINVAL;
- goto error;
+ return -EINVAL;
}
remains = ns->geom.totsz;
next_offset = 0;
@@ -739,14 +739,12 @@ static int init_nandsim(struct mtd_info *mtd)
if (!part_sz || part_sz > remains) {
NS_ERR("bad partition size.\n");
- ret = -EINVAL;
- goto error;
+ return -EINVAL;
}
ns->partitions[i].name = get_partition_name(i);
if (!ns->partitions[i].name) {
NS_ERR("unable to allocate memory.\n");
- ret = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
ns->partitions[i].offset = next_offset;
ns->partitions[i].size = part_sz;
@@ -757,14 +755,12 @@ static int init_nandsim(struct mtd_info *mtd)
if (remains) {
if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
NS_ERR("too many partitions.\n");
- ret = -EINVAL;
- goto error;
+ return -EINVAL;
}
ns->partitions[i].name = get_partition_name(i);
if (!ns->partitions[i].name) {
NS_ERR("unable to allocate memory.\n");
- ret = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
ns->partitions[i].offset = next_offset;
ns->partitions[i].size = remains;
@@ -792,24 +788,18 @@ static int init_nandsim(struct mtd_info *mtd)
printk("options: %#x\n", ns->options);
if ((ret = alloc_device(ns)) != 0)
- goto error;
+ return ret;
/* Allocate / initialize the internal buffer */
ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
if (!ns->buf.byte) {
NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
ns->geom.pgszoob);
- ret = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
return 0;
-
-error:
- free_device(ns);
-
- return ret;
}
/*
diff --git a/drivers/mtd/nand/omap_elm.c b/drivers/mtd/nand/omap_elm.c
index 376bfe19104f..235ec7992b4c 100644
--- a/drivers/mtd/nand/omap_elm.c
+++ b/drivers/mtd/nand/omap_elm.c
@@ -574,5 +574,5 @@ module_platform_driver(elm_driver);
MODULE_DESCRIPTION("ELM driver for BCH error correction");
MODULE_AUTHOR("Texas Instruments");
-MODULE_ALIAS("platform: elm");
+MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 1259cc558ce9..740983a34626 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -45,10 +45,13 @@
/*
* Define a buffer size for the initial command that detects the flash device:
- * STATUS, READID and PARAM. The largest of these is the PARAM command,
- * needing 256 bytes.
+ * STATUS, READID and PARAM.
+ * ONFI param page is 256 bytes, and there are three redundant copies
+ * to be read. JEDEC param page is 512 bytes, and there are also three
+ * redundant copies to be read.
+ * Hence this buffer should be at least 512 x 3. Let's pick 2048.
*/
-#define INIT_BUFFER_SIZE 256
+#define INIT_BUFFER_SIZE 2048
/* registers and bit definitions */
#define NDCR (0x00) /* Control register */
@@ -126,6 +129,13 @@
#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
+/*
+ * This should be large enough to read 'ONFI' and 'JEDEC'.
+ * Let's use 7 bytes, which is the maximum ID count supported
+ * by the controller (see NDCR_RD_ID_CNT_MASK).
+ */
+#define READ_ID_BYTES 7
+
/* macros for registers read/write */
#define nand_writel(info, off, val) \
writel_relaxed((val), (info)->mmio_base + (off))
@@ -173,8 +183,6 @@ struct pxa3xx_nand_host {
/* calculated from pxa3xx_nand_flash data */
unsigned int col_addr_cycles;
unsigned int row_addr_cycles;
- size_t read_id_bytes;
-
};
struct pxa3xx_nand_info {
@@ -439,8 +447,8 @@ static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
ndcr |= NDCR_ND_RUN;
/* clear status bits and run */
- nand_writel(info, NDCR, 0);
nand_writel(info, NDSR, NDSR_MASK);
+ nand_writel(info, NDCR, 0);
nand_writel(info, NDCR, ndcr);
}
@@ -675,8 +683,14 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
is_ready = 1;
}
+ /*
+ * Clear all status bit before issuing the next command, which
+ * can and will alter the status bits and will deserve a new
+ * interrupt on its own. This lets the controller exit the IRQ
+ */
+ nand_writel(info, NDSR, status);
+
if (status & NDSR_WRCMDREQ) {
- nand_writel(info, NDSR, NDSR_WRCMDREQ);
status &= ~NDSR_WRCMDREQ;
info->state = STATE_CMD_HANDLE;
@@ -697,8 +711,6 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
nand_writel(info, NDCB0, info->ndcb3);
}
- /* clear NDSR to let the controller exit the IRQ */
- nand_writel(info, NDSR, status);
if (is_completed)
complete(&info->cmd_complete);
if (is_ready)
@@ -899,18 +911,18 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
break;
case NAND_CMD_PARAM:
- info->buf_count = 256;
+ info->buf_count = INIT_BUFFER_SIZE;
info->ndcb0 |= NDCB0_CMD_TYPE(0)
| NDCB0_ADDR_CYC(1)
| NDCB0_LEN_OVRD
| command;
info->ndcb1 = (column & 0xFF);
- info->ndcb3 = 256;
- info->data_size = 256;
+ info->ndcb3 = INIT_BUFFER_SIZE;
+ info->data_size = INIT_BUFFER_SIZE;
break;
case NAND_CMD_READID:
- info->buf_count = host->read_id_bytes;
+ info->buf_count = READ_ID_BYTES;
info->ndcb0 |= NDCB0_CMD_TYPE(3)
| NDCB0_ADDR_CYC(1)
| command;
@@ -1247,9 +1259,6 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
return -EINVAL;
}
- /* calculate flash information */
- host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
-
/* calculate addressing information */
host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
@@ -1265,7 +1274,7 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
- ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
+ ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
ndcr |= NDCR_SPARE_EN; /* enable spare by default */
info->reg_ndcr = ndcr;
@@ -1276,23 +1285,10 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
{
- /*
- * We set 0 by hard coding here, for we don't support keep_config
- * when there is more than one chip attached to the controller
- */
- struct pxa3xx_nand_host *host = info->host[0];
uint32_t ndcr = nand_readl(info, NDCR);
- if (ndcr & NDCR_PAGE_SZ) {
- /* Controller's FIFO size */
- info->chunk_size = 2048;
- host->read_id_bytes = 4;
- } else {
- info->chunk_size = 512;
- host->read_id_bytes = 2;
- }
-
/* Set an initial chunk size */
+ info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
@@ -1473,6 +1469,9 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
goto KEEP_CONFIG;
+ /* Set a default chunk size */
+ info->chunk_size = 512;
+
ret = pxa3xx_nand_sensing(info);
if (ret) {
dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index 77e96d2df96c..cc6bac537f5a 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -466,7 +466,7 @@ static int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
static int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
uint8_t *read_ecc, uint8_t *calc_ecc)
{
- uint16_t ecc_reg;
+ uint32_t ecc_reg;
uint8_t ecc_status, err_byte;
int i, error = 0;
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 6f93b2990d25..f97a58d6aae1 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -99,6 +99,15 @@
NFC_CMD_INT_ENABLE | \
NFC_DMA_INT_ENABLE)
+/* define bit use in NFC_TIMING_CTL */
+#define NFC_TIMING_CTL_EDO BIT(8)
+
+/* define NFC_TIMING_CFG register layout */
+#define NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD) \
+ (((tWB) & 0x3) | (((tADL) & 0x3) << 2) | \
+ (((tWHR) & 0x3) << 4) | (((tRHW) & 0x3) << 6) | \
+ (((tCAD) & 0x7) << 8))
+
/* define bit use in NFC_CMD */
#define NFC_CMD_LOW_BYTE GENMASK(7, 0)
#define NFC_CMD_HIGH_BYTE GENMASK(15, 8)
@@ -208,6 +217,7 @@ struct sunxi_nand_hw_ecc {
* @nand: base NAND chip structure
* @mtd: base MTD structure
* @clk_rate: clk_rate required for this NAND chip
+ * @timing_cfg TIMING_CFG register value for this NAND chip
* @selected: current active CS
* @nsels: number of CS lines required by the NAND chip
* @sels: array of CS lines descriptions
@@ -217,6 +227,8 @@ struct sunxi_nand_chip {
struct nand_chip nand;
struct mtd_info mtd;
unsigned long clk_rate;
+ u32 timing_cfg;
+ u32 timing_ctl;
int selected;
int nsels;
struct sunxi_nand_chip_sel sels[0];
@@ -403,6 +415,8 @@ static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
}
}
+ writel(sunxi_nand->timing_ctl, nfc->regs + NFC_REG_TIMING_CTL);
+ writel(sunxi_nand->timing_cfg, nfc->regs + NFC_REG_TIMING_CFG);
writel(ctl, nfc->regs + NFC_REG_CTL);
sunxi_nand->selected = chip;
@@ -807,10 +821,33 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
return 0;
}
+static const s32 tWB_lut[] = {6, 12, 16, 20};
+static const s32 tRHW_lut[] = {4, 8, 12, 20};
+
+static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration,
+ u32 clk_period)
+{
+ u32 clk_cycles = DIV_ROUND_UP(duration, clk_period);
+ int i;
+
+ for (i = 0; i < lut_size; i++) {
+ if (clk_cycles <= lut[i])
+ return i;
+ }
+
+ /* Doesn't fit */
+ return -EINVAL;
+}
+
+#define sunxi_nand_lookup_timing(l, p, c) \
+ _sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
+
static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip,
const struct nand_sdr_timings *timings)
{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(chip->nand.controller);
u32 min_clk_period = 0;
+ s32 tWB, tADL, tWHR, tRHW, tCAD;
/* T1 <=> tCLS */
if (timings->tCLS_min > min_clk_period)
@@ -872,6 +909,48 @@ static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip,
if (timings->tWC_min > (min_clk_period * 2))
min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2);
+ /* T16 - T19 + tCAD */
+ tWB = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max,
+ min_clk_period);
+ if (tWB < 0) {
+ dev_err(nfc->dev, "unsupported tWB\n");
+ return tWB;
+ }
+
+ tADL = DIV_ROUND_UP(timings->tADL_min, min_clk_period) >> 3;
+ if (tADL > 3) {
+ dev_err(nfc->dev, "unsupported tADL\n");
+ return -EINVAL;
+ }
+
+ tWHR = DIV_ROUND_UP(timings->tWHR_min, min_clk_period) >> 3;
+ if (tWHR > 3) {
+ dev_err(nfc->dev, "unsupported tWHR\n");
+ return -EINVAL;
+ }
+
+ tRHW = sunxi_nand_lookup_timing(tRHW_lut, timings->tRHW_min,
+ min_clk_period);
+ if (tRHW < 0) {
+ dev_err(nfc->dev, "unsupported tRHW\n");
+ return tRHW;
+ }
+
+ /*
+ * TODO: according to ONFI specs this value only applies for DDR NAND,
+ * but Allwinner seems to set this to 0x7. Mimic them for now.
+ */
+ tCAD = 0x7;
+
+ /* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */
+ chip->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD);
+
+ /*
+ * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
+ * output cycle timings shall be used if the host drives tRC less than
+ * 30 ns.
+ */
+ chip->timing_ctl = (timings->tRC_min < 30000) ? NFC_TIMING_CTL_EDO : 0;
/* Convert min_clk_period from picoseconds to nanoseconds */
min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
@@ -884,8 +963,6 @@ static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip,
*/
chip->clk_rate = (2 * NSEC_PER_SEC) / min_clk_period;
- /* TODO: configure T16-T19 */
-
return 0;
}
@@ -1376,13 +1453,6 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, nfc);
- /*
- * TODO: replace these magic values with proper flags as soon as we
- * know what they are encoding.
- */
- writel(0x100, nfc->regs + NFC_REG_TIMING_CTL);
- writel(0x7ff, nfc->regs + NFC_REG_TIMING_CFG);
-
ret = sunxi_nand_chips_init(dev, nfc);
if (ret) {
dev_err(dev, "failed to init nand chips\n");
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 32a216d31141..ab7bda0bb245 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -18,7 +18,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
-#include <asm/io.h>
+#include <linux/io.h>
/*
* Note: Driver name and platform data format have been updated!
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 64a4f0edabc7..89bf4c1faa2b 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -26,6 +26,18 @@ config SPI_FSL_QUADSPI
depends on ARCH_MXC
help
This enables support for the Quad SPI controller in master mode.
- We only connect the NOR to this controller now.
+ This controller does not support generic SPI. It only supports
+ SPI NOR.
+
+config SPI_NXP_SPIFI
+ tristate "NXP SPI Flash Interface (SPIFI)"
+ depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
+ depends on HAS_IOMEM
+ help
+ Enable support for the NXP LPC SPI Flash Interface controller.
+
+ SPIFI is a specialized controller for connecting serial SPI
+ Flash. Enable this option if you have a device with a SPIFI
+ controller and want to access the Flash as a mtd device.
endif # MTD_SPI_NOR
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
index 6a7ce1462247..e53333ef8582 100644
--- a/drivers/mtd/spi-nor/Makefile
+++ b/drivers/mtd/spi-nor/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o
+obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index 52a872fa1b6e..d32b7e04ccca 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -26,6 +26,20 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/spi-nor.h>
+#include <linux/mutex.h>
+#include <linux/pm_qos.h>
+
+/* Controller needs driver to swap endian */
+#define QUADSPI_QUIRK_SWAP_ENDIAN (1 << 0)
+/* Controller needs 4x internal clock */
+#define QUADSPI_QUIRK_4X_INT_CLK (1 << 1)
+/*
+ * TKT253890, Controller needs driver to fill txfifo till 16 byte to
+ * trigger data transfer even though extern data will not transferred.
+ */
+#define QUADSPI_QUIRK_TKT253890 (1 << 2)
+/* Controller cannot wake up from wait mode, TKT245618 */
+#define QUADSPI_QUIRK_TKT245618 (1 << 3)
/* The registers */
#define QUADSPI_MCR 0x00
@@ -191,9 +205,13 @@
#define SEQID_EN4B 10
#define SEQID_BRWR 11
+#define QUADSPI_MIN_IOMAP SZ_4M
+
enum fsl_qspi_devtype {
FSL_QUADSPI_VYBRID,
FSL_QUADSPI_IMX6SX,
+ FSL_QUADSPI_IMX7D,
+ FSL_QUADSPI_IMX6UL,
};
struct fsl_qspi_devtype_data {
@@ -201,20 +219,42 @@ struct fsl_qspi_devtype_data {
int rxfifo;
int txfifo;
int ahb_buf_size;
+ int driver_data;
};
static struct fsl_qspi_devtype_data vybrid_data = {
.devtype = FSL_QUADSPI_VYBRID,
.rxfifo = 128,
.txfifo = 64,
- .ahb_buf_size = 1024
+ .ahb_buf_size = 1024,
+ .driver_data = QUADSPI_QUIRK_SWAP_ENDIAN,
};
static struct fsl_qspi_devtype_data imx6sx_data = {
.devtype = FSL_QUADSPI_IMX6SX,
.rxfifo = 128,
.txfifo = 512,
- .ahb_buf_size = 1024
+ .ahb_buf_size = 1024,
+ .driver_data = QUADSPI_QUIRK_4X_INT_CLK
+ | QUADSPI_QUIRK_TKT245618,
+};
+
+static struct fsl_qspi_devtype_data imx7d_data = {
+ .devtype = FSL_QUADSPI_IMX7D,
+ .rxfifo = 512,
+ .txfifo = 512,
+ .ahb_buf_size = 1024,
+ .driver_data = QUADSPI_QUIRK_TKT253890
+ | QUADSPI_QUIRK_4X_INT_CLK,
+};
+
+static struct fsl_qspi_devtype_data imx6ul_data = {
+ .devtype = FSL_QUADSPI_IMX6UL,
+ .rxfifo = 128,
+ .txfifo = 512,
+ .ahb_buf_size = 1024,
+ .driver_data = QUADSPI_QUIRK_TKT253890
+ | QUADSPI_QUIRK_4X_INT_CLK,
};
#define FSL_QSPI_MAX_CHIP 4
@@ -222,8 +262,10 @@ struct fsl_qspi {
struct mtd_info mtd[FSL_QSPI_MAX_CHIP];
struct spi_nor nor[FSL_QSPI_MAX_CHIP];
void __iomem *iobase;
- void __iomem *ahb_base; /* Used when read from AHB bus */
+ void __iomem *ahb_addr;
u32 memmap_phy;
+ u32 memmap_offs;
+ u32 memmap_len;
struct clk *clk, *clk_en;
struct device *dev;
struct completion c;
@@ -233,16 +275,28 @@ struct fsl_qspi {
u32 clk_rate;
unsigned int chip_base_addr; /* We may support two chips. */
bool has_second_chip;
+ struct mutex lock;
+ struct pm_qos_request pm_qos_req;
};
-static inline int is_vybrid_qspi(struct fsl_qspi *q)
+static inline int needs_swap_endian(struct fsl_qspi *q)
+{
+ return q->devtype_data->driver_data & QUADSPI_QUIRK_SWAP_ENDIAN;
+}
+
+static inline int needs_4x_clock(struct fsl_qspi *q)
+{
+ return q->devtype_data->driver_data & QUADSPI_QUIRK_4X_INT_CLK;
+}
+
+static inline int needs_fill_txfifo(struct fsl_qspi *q)
{
- return q->devtype_data->devtype == FSL_QUADSPI_VYBRID;
+ return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT253890;
}
-static inline int is_imx6sx_qspi(struct fsl_qspi *q)
+static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
{
- return q->devtype_data->devtype == FSL_QUADSPI_IMX6SX;
+ return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT245618;
}
/*
@@ -251,7 +305,7 @@ static inline int is_imx6sx_qspi(struct fsl_qspi *q)
*/
static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
{
- return is_vybrid_qspi(q) ? __swab32(a) : a;
+ return needs_swap_endian(q) ? __swab32(a) : a;
}
static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q)
@@ -343,14 +397,8 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
/* Erase a sector */
lut_base = SEQID_SE * 4;
- if (q->nor_size <= SZ_16M) {
- cmd = SPINOR_OP_SE;
- addrlen = ADDR24BIT;
- } else {
- /* use the 4-byte address */
- cmd = SPINOR_OP_SE;
- addrlen = ADDR32BIT;
- }
+ cmd = q->nor[0].erase_opcode;
+ addrlen = q->nor_size <= SZ_16M ? ADDR24BIT : ADDR32BIT;
writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
base + QUADSPI_LUT(lut_base));
@@ -419,6 +467,8 @@ static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
case SPINOR_OP_BRWR:
return SEQID_BRWR;
default:
+ if (cmd == q->nor[0].erase_opcode)
+ return SEQID_SE;
dev_err(q->dev, "Unsupported cmd 0x%.2x\n", cmd);
break;
}
@@ -537,7 +587,7 @@ static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
/* clear the TX FIFO. */
tmp = readl(q->iobase + QUADSPI_MCR);
- writel(tmp | QUADSPI_MCR_CLR_RXF_MASK, q->iobase + QUADSPI_MCR);
+ writel(tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR);
/* fill the TX data to the FIFO */
for (j = 0, i = ((count + 3) / 4); j < i; j++) {
@@ -546,6 +596,11 @@ static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
txbuf++;
}
+ /* fill the TXFIFO upto 16 bytes for i.MX7d */
+ if (needs_fill_txfifo(q))
+ for (; i < 4; i++)
+ writel(tmp, q->iobase + QUADSPI_TBDR);
+
/* Trigger it */
ret = fsl_qspi_runcmd(q, opcode, to, count);
@@ -606,6 +661,38 @@ static void fsl_qspi_init_abh_read(struct fsl_qspi *q)
q->iobase + QUADSPI_BFGENCR);
}
+/* This function was used to prepare and enable QSPI clock */
+static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
+{
+ int ret;
+
+ ret = clk_prepare_enable(q->clk_en);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(q->clk);
+ if (ret) {
+ clk_disable_unprepare(q->clk_en);
+ return ret;
+ }
+
+ if (needs_wakeup_wait_mode(q))
+ pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0);
+
+ return 0;
+}
+
+/* This function was used to disable and unprepare QSPI clock */
+static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
+{
+ if (needs_wakeup_wait_mode(q))
+ pm_qos_remove_request(&q->pm_qos_req);
+
+ clk_disable_unprepare(q->clk);
+ clk_disable_unprepare(q->clk_en);
+
+}
+
/* We use this function to do some basic init for spi_nor_scan(). */
static int fsl_qspi_nor_setup(struct fsl_qspi *q)
{
@@ -613,11 +700,23 @@ static int fsl_qspi_nor_setup(struct fsl_qspi *q)
u32 reg;
int ret;
- /* the default frequency, we will change it in the future.*/
+ /* disable and unprepare clock to avoid glitch pass to controller */
+ fsl_qspi_clk_disable_unprep(q);
+
+ /* the default frequency, we will change it in the future. */
ret = clk_set_rate(q->clk, 66000000);
if (ret)
return ret;
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ return ret;
+
+ /* Reset the module */
+ writel(QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
+ base + QUADSPI_MCR);
+ udelay(1);
+
/* Init the LUT table. */
fsl_qspi_init_lut(q);
@@ -635,6 +734,9 @@ static int fsl_qspi_nor_setup(struct fsl_qspi *q)
writel(QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
base + QUADSPI_MCR);
+ /* clear all interrupt status */
+ writel(0xffffffff, q->iobase + QUADSPI_FR);
+
/* enable the interrupt */
writel(QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
@@ -646,13 +748,20 @@ static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
unsigned long rate = q->clk_rate;
int ret;
- if (is_imx6sx_qspi(q))
+ if (needs_4x_clock(q))
rate *= 4;
+ /* disable and unprepare clock to avoid glitch pass to controller */
+ fsl_qspi_clk_disable_unprep(q);
+
ret = clk_set_rate(q->clk, rate);
if (ret)
return ret;
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ return ret;
+
/* Init the LUT table again. */
fsl_qspi_init_lut(q);
@@ -665,6 +774,8 @@ static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
static const struct of_device_id fsl_qspi_dt_ids[] = {
{ .compatible = "fsl,vf610-qspi", .data = (void *)&vybrid_data, },
{ .compatible = "fsl,imx6sx-qspi", .data = (void *)&imx6sx_data, },
+ { .compatible = "fsl,imx7d-qspi", .data = (void *)&imx7d_data, },
+ { .compatible = "fsl,imx6ul-qspi", .data = (void *)&imx6ul_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
@@ -730,11 +841,42 @@ static int fsl_qspi_read(struct spi_nor *nor, loff_t from,
struct fsl_qspi *q = nor->priv;
u8 cmd = nor->read_opcode;
- dev_dbg(q->dev, "cmd [%x],read from (0x%p, 0x%.8x, 0x%.8x),len:%d\n",
- cmd, q->ahb_base, q->chip_base_addr, (unsigned int)from, len);
+ /* if necessary,ioremap buffer before AHB read, */
+ if (!q->ahb_addr) {
+ q->memmap_offs = q->chip_base_addr + from;
+ q->memmap_len = len > QUADSPI_MIN_IOMAP ? len : QUADSPI_MIN_IOMAP;
+
+ q->ahb_addr = ioremap_nocache(
+ q->memmap_phy + q->memmap_offs,
+ q->memmap_len);
+ if (!q->ahb_addr) {
+ dev_err(q->dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+ /* ioremap if the data requested is out of range */
+ } else if (q->chip_base_addr + from < q->memmap_offs
+ || q->chip_base_addr + from + len >
+ q->memmap_offs + q->memmap_len) {
+ iounmap(q->ahb_addr);
+
+ q->memmap_offs = q->chip_base_addr + from;
+ q->memmap_len = len > QUADSPI_MIN_IOMAP ? len : QUADSPI_MIN_IOMAP;
+ q->ahb_addr = ioremap_nocache(
+ q->memmap_phy + q->memmap_offs,
+ q->memmap_len);
+ if (!q->ahb_addr) {
+ dev_err(q->dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+ }
+
+ dev_dbg(q->dev, "cmd [%x],read from 0x%p, len:%d\n",
+ cmd, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
+ len);
/* Read out the data directly from the AHB buffer.*/
- memcpy(buf, q->ahb_base + q->chip_base_addr + from, len);
+ memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
+ len);
*retlen += len;
return 0;
@@ -761,26 +903,26 @@ static int fsl_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
struct fsl_qspi *q = nor->priv;
int ret;
- ret = clk_enable(q->clk_en);
- if (ret)
- return ret;
+ mutex_lock(&q->lock);
- ret = clk_enable(q->clk);
- if (ret) {
- clk_disable(q->clk_en);
- return ret;
- }
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ goto err_mutex;
fsl_qspi_set_base_addr(q, nor);
return 0;
+
+err_mutex:
+ mutex_unlock(&q->lock);
+ return ret;
}
static void fsl_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
{
struct fsl_qspi *q = nor->priv;
- clk_disable(q->clk);
- clk_disable(q->clk_en);
+ fsl_qspi_clk_disable_unprep(q);
+ mutex_unlock(&q->lock);
}
static int fsl_qspi_probe(struct platform_device *pdev)
@@ -804,6 +946,10 @@ static int fsl_qspi_probe(struct platform_device *pdev)
if (!q->nor_num || q->nor_num > FSL_QSPI_MAX_CHIP)
return -ENODEV;
+ q->dev = dev;
+ q->devtype_data = (struct fsl_qspi_devtype_data *)of_id->data;
+ platform_set_drvdata(pdev, q);
+
/* find the resources */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
q->iobase = devm_ioremap_resource(dev, res);
@@ -812,9 +958,11 @@ static int fsl_qspi_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"QuadSPI-memory");
- q->ahb_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(q->ahb_base))
- return PTR_ERR(q->ahb_base);
+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ res->name)) {
+ dev_err(dev, "can't request region for resource %pR\n", res);
+ return -EBUSY;
+ }
q->memmap_phy = res->start;
@@ -827,15 +975,9 @@ static int fsl_qspi_probe(struct platform_device *pdev)
if (IS_ERR(q->clk))
return PTR_ERR(q->clk);
- ret = clk_prepare_enable(q->clk_en);
- if (ret) {
- dev_err(dev, "cannot enable the qspi_en clock: %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(q->clk);
+ ret = fsl_qspi_clk_prep_enable(q);
if (ret) {
- dev_err(dev, "cannot enable the qspi clock: %d\n", ret);
+ dev_err(dev, "can not enable the clock\n");
goto clk_failed;
}
@@ -853,10 +995,6 @@ static int fsl_qspi_probe(struct platform_device *pdev)
goto irq_failed;
}
- q->dev = dev;
- q->devtype_data = (struct fsl_qspi_devtype_data *)of_id->data;
- platform_set_drvdata(pdev, q);
-
ret = fsl_qspi_nor_setup(q);
if (ret)
goto irq_failed;
@@ -864,6 +1002,8 @@ static int fsl_qspi_probe(struct platform_device *pdev)
if (of_get_property(np, "fsl,qspi-has-second-chip", NULL))
q->has_second_chip = true;
+ mutex_init(&q->lock);
+
/* iterate the subnodes. */
for_each_available_child_of_node(dev->of_node, np) {
char modalias[40];
@@ -892,24 +1032,24 @@ static int fsl_qspi_probe(struct platform_device *pdev)
ret = of_modalias_node(np, modalias, sizeof(modalias));
if (ret < 0)
- goto irq_failed;
+ goto mutex_failed;
ret = of_property_read_u32(np, "spi-max-frequency",
&q->clk_rate);
if (ret < 0)
- goto irq_failed;
+ goto mutex_failed;
/* set the chip address for READID */
fsl_qspi_set_base_addr(q, nor);
ret = spi_nor_scan(nor, modalias, SPI_NOR_QUAD);
if (ret)
- goto irq_failed;
+ goto mutex_failed;
ppdata.of_node = np;
ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
if (ret)
- goto irq_failed;
+ goto mutex_failed;
/* Set the correct NOR size now. */
if (q->nor_size == 0) {
@@ -939,8 +1079,7 @@ static int fsl_qspi_probe(struct platform_device *pdev)
if (ret)
goto last_init_failed;
- clk_disable(q->clk);
- clk_disable(q->clk_en);
+ fsl_qspi_clk_disable_unprep(q);
return 0;
last_init_failed:
@@ -950,10 +1089,12 @@ last_init_failed:
i *= 2;
mtd_device_unregister(&q->mtd[i]);
}
+mutex_failed:
+ mutex_destroy(&q->lock);
irq_failed:
- clk_disable_unprepare(q->clk);
+ fsl_qspi_clk_disable_unprep(q);
clk_failed:
- clk_disable_unprepare(q->clk_en);
+ dev_err(dev, "Freescale QuadSPI probe failed\n");
return ret;
}
@@ -973,8 +1114,11 @@ static int fsl_qspi_remove(struct platform_device *pdev)
writel(QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
writel(0x0, q->iobase + QUADSPI_RSER);
- clk_unprepare(q->clk);
- clk_unprepare(q->clk_en);
+ mutex_destroy(&q->lock);
+
+ if (q->ahb_addr)
+ iounmap(q->ahb_addr);
+
return 0;
}
@@ -985,12 +1129,19 @@ static int fsl_qspi_suspend(struct platform_device *pdev, pm_message_t state)
static int fsl_qspi_resume(struct platform_device *pdev)
{
+ int ret;
struct fsl_qspi *q = platform_get_drvdata(pdev);
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ return ret;
+
fsl_qspi_nor_setup(q);
fsl_qspi_set_map_addr(q);
fsl_qspi_nor_setup_last(q);
+ fsl_qspi_clk_disable_unprep(q);
+
return 0;
}
diff --git a/drivers/mtd/spi-nor/nxp-spifi.c b/drivers/mtd/spi-nor/nxp-spifi.c
new file mode 100644
index 000000000000..9ad1dd0896c0
--- /dev/null
+++ b/drivers/mtd/spi-nor/nxp-spifi.c
@@ -0,0 +1,482 @@
+/*
+ * SPI-NOR driver for NXP SPI Flash Interface (SPIFI)
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * Based on Freescale QuadSPI driver:
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+/* NXP SPIFI registers, bits and macros */
+#define SPIFI_CTRL 0x000
+#define SPIFI_CTRL_TIMEOUT(timeout) (timeout)
+#define SPIFI_CTRL_CSHIGH(cshigh) ((cshigh) << 16)
+#define SPIFI_CTRL_MODE3 BIT(23)
+#define SPIFI_CTRL_DUAL BIT(28)
+#define SPIFI_CTRL_FBCLK BIT(30)
+#define SPIFI_CMD 0x004
+#define SPIFI_CMD_DATALEN(dlen) ((dlen) & 0x3fff)
+#define SPIFI_CMD_DOUT BIT(15)
+#define SPIFI_CMD_INTLEN(ilen) ((ilen) << 16)
+#define SPIFI_CMD_FIELDFORM(field) ((field) << 19)
+#define SPIFI_CMD_FIELDFORM_ALL_SERIAL SPIFI_CMD_FIELDFORM(0x0)
+#define SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA SPIFI_CMD_FIELDFORM(0x1)
+#define SPIFI_CMD_FRAMEFORM(frame) ((frame) << 21)
+#define SPIFI_CMD_FRAMEFORM_OPCODE_ONLY SPIFI_CMD_FRAMEFORM(0x1)
+#define SPIFI_CMD_OPCODE(op) ((op) << 24)
+#define SPIFI_ADDR 0x008
+#define SPIFI_IDATA 0x00c
+#define SPIFI_CLIMIT 0x010
+#define SPIFI_DATA 0x014
+#define SPIFI_MCMD 0x018
+#define SPIFI_STAT 0x01c
+#define SPIFI_STAT_MCINIT BIT(0)
+#define SPIFI_STAT_CMD BIT(1)
+#define SPIFI_STAT_RESET BIT(4)
+
+#define SPI_NOR_MAX_ID_LEN 6
+
+struct nxp_spifi {
+ struct device *dev;
+ struct clk *clk_spifi;
+ struct clk *clk_reg;
+ void __iomem *io_base;
+ void __iomem *flash_base;
+ struct mtd_info mtd;
+ struct spi_nor nor;
+ bool memory_mode;
+ u32 mcmd;
+};
+
+static int nxp_spifi_wait_for_cmd(struct nxp_spifi *spifi)
+{
+ u8 stat;
+ int ret;
+
+ ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
+ !(stat & SPIFI_STAT_CMD), 10, 30);
+ if (ret)
+ dev_warn(spifi->dev, "command timed out\n");
+
+ return ret;
+}
+
+static int nxp_spifi_reset(struct nxp_spifi *spifi)
+{
+ u8 stat;
+ int ret;
+
+ writel(SPIFI_STAT_RESET, spifi->io_base + SPIFI_STAT);
+ ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
+ !(stat & SPIFI_STAT_RESET), 10, 30);
+ if (ret)
+ dev_warn(spifi->dev, "state reset timed out\n");
+
+ return ret;
+}
+
+static int nxp_spifi_set_memory_mode_off(struct nxp_spifi *spifi)
+{
+ int ret;
+
+ if (!spifi->memory_mode)
+ return 0;
+
+ ret = nxp_spifi_reset(spifi);
+ if (ret)
+ dev_err(spifi->dev, "unable to enter command mode\n");
+ else
+ spifi->memory_mode = false;
+
+ return ret;
+}
+
+static int nxp_spifi_set_memory_mode_on(struct nxp_spifi *spifi)
+{
+ u8 stat;
+ int ret;
+
+ if (spifi->memory_mode)
+ return 0;
+
+ writel(spifi->mcmd, spifi->io_base + SPIFI_MCMD);
+ ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
+ stat & SPIFI_STAT_MCINIT, 10, 30);
+ if (ret)
+ dev_err(spifi->dev, "unable to enter memory mode\n");
+ else
+ spifi->memory_mode = true;
+
+ return ret;
+}
+
+static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ u32 cmd;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_off(spifi);
+ if (ret)
+ return ret;
+
+ cmd = SPIFI_CMD_DATALEN(len) |
+ SPIFI_CMD_OPCODE(opcode) |
+ SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+ SPIFI_CMD_FRAMEFORM_OPCODE_ONLY;
+ writel(cmd, spifi->io_base + SPIFI_CMD);
+
+ while (len--)
+ *buf++ = readb(spifi->io_base + SPIFI_DATA);
+
+ return nxp_spifi_wait_for_cmd(spifi);
+}
+
+static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ int len, int write_enable)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ u32 cmd;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_off(spifi);
+ if (ret)
+ return ret;
+
+ cmd = SPIFI_CMD_DOUT |
+ SPIFI_CMD_DATALEN(len) |
+ SPIFI_CMD_OPCODE(opcode) |
+ SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+ SPIFI_CMD_FRAMEFORM_OPCODE_ONLY;
+ writel(cmd, spifi->io_base + SPIFI_CMD);
+
+ while (len--)
+ writeb(*buf++, spifi->io_base + SPIFI_DATA);
+
+ return nxp_spifi_wait_for_cmd(spifi);
+}
+
+static int nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_on(spifi);
+ if (ret)
+ return ret;
+
+ memcpy_fromio(buf, spifi->flash_base + from, len);
+ *retlen += len;
+
+ return 0;
+}
+
+static void nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ u32 cmd;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_off(spifi);
+ if (ret)
+ return;
+
+ writel(to, spifi->io_base + SPIFI_ADDR);
+ *retlen += len;
+
+ cmd = SPIFI_CMD_DOUT |
+ SPIFI_CMD_DATALEN(len) |
+ SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+ SPIFI_CMD_OPCODE(nor->program_opcode) |
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+ writel(cmd, spifi->io_base + SPIFI_CMD);
+
+ while (len--)
+ writeb(*buf++, spifi->io_base + SPIFI_DATA);
+
+ nxp_spifi_wait_for_cmd(spifi);
+}
+
+static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ u32 cmd;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_off(spifi);
+ if (ret)
+ return ret;
+
+ writel(offs, spifi->io_base + SPIFI_ADDR);
+
+ cmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+ SPIFI_CMD_OPCODE(nor->erase_opcode) |
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+ writel(cmd, spifi->io_base + SPIFI_CMD);
+
+ return nxp_spifi_wait_for_cmd(spifi);
+}
+
+static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi)
+{
+ switch (spifi->nor.flash_read) {
+ case SPI_NOR_NORMAL:
+ case SPI_NOR_FAST:
+ spifi->mcmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL;
+ break;
+ case SPI_NOR_DUAL:
+ case SPI_NOR_QUAD:
+ spifi->mcmd = SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA;
+ break;
+ default:
+ dev_err(spifi->dev, "unsupported SPI read mode\n");
+ return -EINVAL;
+ }
+
+ /* Memory mode supports address length between 1 and 4 */
+ if (spifi->nor.addr_width < 1 || spifi->nor.addr_width > 4)
+ return -EINVAL;
+
+ spifi->mcmd |= SPIFI_CMD_OPCODE(spifi->nor.read_opcode) |
+ SPIFI_CMD_INTLEN(spifi->nor.read_dummy / 8) |
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+
+ return 0;
+}
+
+static void nxp_spifi_dummy_id_read(struct spi_nor *nor)
+{
+ u8 id[SPI_NOR_MAX_ID_LEN];
+ nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
+}
+
+static int nxp_spifi_setup_flash(struct nxp_spifi *spifi,
+ struct device_node *np)
+{
+ struct mtd_part_parser_data ppdata;
+ enum read_mode flash_read;
+ u32 ctrl, property;
+ u16 mode = 0;
+ int ret;
+
+ if (!of_property_read_u32(np, "spi-rx-bus-width", &property)) {
+ switch (property) {
+ case 1:
+ break;
+ case 2:
+ mode |= SPI_RX_DUAL;
+ break;
+ case 4:
+ mode |= SPI_RX_QUAD;
+ break;
+ default:
+ dev_err(spifi->dev, "unsupported rx-bus-width\n");
+ return -EINVAL;
+ }
+ }
+
+ if (of_find_property(np, "spi-cpha", NULL))
+ mode |= SPI_CPHA;
+
+ if (of_find_property(np, "spi-cpol", NULL))
+ mode |= SPI_CPOL;
+
+ /* Setup control register defaults */
+ ctrl = SPIFI_CTRL_TIMEOUT(1000) |
+ SPIFI_CTRL_CSHIGH(15) |
+ SPIFI_CTRL_FBCLK;
+
+ if (mode & SPI_RX_DUAL) {
+ ctrl |= SPIFI_CTRL_DUAL;
+ flash_read = SPI_NOR_DUAL;
+ } else if (mode & SPI_RX_QUAD) {
+ ctrl &= ~SPIFI_CTRL_DUAL;
+ flash_read = SPI_NOR_QUAD;
+ } else {
+ ctrl |= SPIFI_CTRL_DUAL;
+ flash_read = SPI_NOR_NORMAL;
+ }
+
+ switch (mode & (SPI_CPHA | SPI_CPOL)) {
+ case SPI_MODE_0:
+ ctrl &= ~SPIFI_CTRL_MODE3;
+ break;
+ case SPI_MODE_3:
+ ctrl |= SPIFI_CTRL_MODE3;
+ break;
+ default:
+ dev_err(spifi->dev, "only mode 0 and 3 supported\n");
+ return -EINVAL;
+ }
+
+ writel(ctrl, spifi->io_base + SPIFI_CTRL);
+
+ spifi->mtd.priv = &spifi->nor;
+ spifi->nor.mtd = &spifi->mtd;
+ spifi->nor.dev = spifi->dev;
+ spifi->nor.priv = spifi;
+ spifi->nor.read = nxp_spifi_read;
+ spifi->nor.write = nxp_spifi_write;
+ spifi->nor.erase = nxp_spifi_erase;
+ spifi->nor.read_reg = nxp_spifi_read_reg;
+ spifi->nor.write_reg = nxp_spifi_write_reg;
+
+ /*
+ * The first read on a hard reset isn't reliable so do a
+ * dummy read of the id before calling spi_nor_scan().
+ * The reason for this problem is unknown.
+ *
+ * The official NXP spifilib uses more or less the same
+ * workaround that is applied here by reading the device
+ * id multiple times.
+ */
+ nxp_spifi_dummy_id_read(&spifi->nor);
+
+ ret = spi_nor_scan(&spifi->nor, NULL, flash_read);
+ if (ret) {
+ dev_err(spifi->dev, "device scan failed\n");
+ return ret;
+ }
+
+ ret = nxp_spifi_setup_memory_cmd(spifi);
+ if (ret) {
+ dev_err(spifi->dev, "memory command setup failed\n");
+ return ret;
+ }
+
+ ppdata.of_node = np;
+ ret = mtd_device_parse_register(&spifi->mtd, NULL, &ppdata, NULL, 0);
+ if (ret) {
+ dev_err(spifi->dev, "mtd device parse failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nxp_spifi_probe(struct platform_device *pdev)
+{
+ struct device_node *flash_np;
+ struct nxp_spifi *spifi;
+ struct resource *res;
+ int ret;
+
+ spifi = devm_kzalloc(&pdev->dev, sizeof(*spifi), GFP_KERNEL);
+ if (!spifi)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "spifi");
+ spifi->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spifi->io_base))
+ return PTR_ERR(spifi->io_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash");
+ spifi->flash_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spifi->flash_base))
+ return PTR_ERR(spifi->flash_base);
+
+ spifi->clk_spifi = devm_clk_get(&pdev->dev, "spifi");
+ if (IS_ERR(spifi->clk_spifi)) {
+ dev_err(&pdev->dev, "spifi clock not found\n");
+ return PTR_ERR(spifi->clk_spifi);
+ }
+
+ spifi->clk_reg = devm_clk_get(&pdev->dev, "reg");
+ if (IS_ERR(spifi->clk_reg)) {
+ dev_err(&pdev->dev, "reg clock not found\n");
+ return PTR_ERR(spifi->clk_reg);
+ }
+
+ ret = clk_prepare_enable(spifi->clk_reg);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable reg clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(spifi->clk_spifi);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable spifi clock\n");
+ goto dis_clk_reg;
+ }
+
+ spifi->dev = &pdev->dev;
+ platform_set_drvdata(pdev, spifi);
+
+ /* Initialize and reset device */
+ nxp_spifi_reset(spifi);
+ writel(0, spifi->io_base + SPIFI_IDATA);
+ writel(0, spifi->io_base + SPIFI_MCMD);
+ nxp_spifi_reset(spifi);
+
+ flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
+ if (!flash_np) {
+ dev_err(&pdev->dev, "no SPI flash device to configure\n");
+ ret = -ENODEV;
+ goto dis_clks;
+ }
+
+ ret = nxp_spifi_setup_flash(spifi, flash_np);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to setup flash chip\n");
+ goto dis_clks;
+ }
+
+ return 0;
+
+dis_clks:
+ clk_disable_unprepare(spifi->clk_spifi);
+dis_clk_reg:
+ clk_disable_unprepare(spifi->clk_reg);
+ return ret;
+}
+
+static int nxp_spifi_remove(struct platform_device *pdev)
+{
+ struct nxp_spifi *spifi = platform_get_drvdata(pdev);
+
+ mtd_device_unregister(&spifi->mtd);
+ clk_disable_unprepare(spifi->clk_spifi);
+ clk_disable_unprepare(spifi->clk_reg);
+
+ return 0;
+}
+
+static const struct of_device_id nxp_spifi_match[] = {
+ {.compatible = "nxp,lpc1773-spifi"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, nxp_spifi_match);
+
+static struct platform_driver nxp_spifi_driver = {
+ .probe = nxp_spifi_probe,
+ .remove = nxp_spifi_remove,
+ .driver = {
+ .name = "nxp-spifi",
+ .of_match_table = nxp_spifi_match,
+ },
+};
+module_platform_driver(nxp_spifi_driver);
+
+MODULE_DESCRIPTION("NXP SPI Flash Interface driver");
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index d78831b4422b..f59aedfe1462 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -29,6 +29,8 @@
#define SPI_NOR_MAX_ID_LEN 6
struct flash_info {
+ char *name;
+
/*
* This array stores the ID bytes.
* The first three bytes are the JEDIC ID.
@@ -59,7 +61,7 @@ struct flash_info {
#define JEDEC_MFR(info) ((info)->id[0])
-static const struct spi_device_id *spi_nor_match_id(const char *name);
+static const struct flash_info *spi_nor_match_id(const char *name);
/*
* Read the status register, returning its value in the location
@@ -169,7 +171,7 @@ static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
}
/* Enable/disable 4-byte addressing mode. */
-static inline int set_4byte(struct spi_nor *nor, struct flash_info *info,
+static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
int enable)
{
int status;
@@ -469,7 +471,6 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
/* Used when the "_ext_id" is two bytes at most */
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
- ((kernel_ulong_t)&(struct flash_info) { \
.id = { \
((_jedec_id) >> 16) & 0xff, \
((_jedec_id) >> 8) & 0xff, \
@@ -481,11 +482,9 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = 256, \
- .flags = (_flags), \
- })
+ .flags = (_flags),
#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
- ((kernel_ulong_t)&(struct flash_info) { \
.id = { \
((_jedec_id) >> 16) & 0xff, \
((_jedec_id) >> 8) & 0xff, \
@@ -498,17 +497,14 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = 256, \
- .flags = (_flags), \
- })
+ .flags = (_flags),
#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
- ((kernel_ulong_t)&(struct flash_info) { \
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = (_page_size), \
.addr_width = (_addr_width), \
- .flags = (_flags), \
- })
+ .flags = (_flags),
/* NOTE: double check command sets and memory organization when you add
* more nor chips. This current list focusses on newer chips, which
@@ -521,7 +517,7 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
* For historical (and compatibility) reasons (before we got above config) some
* old entries may be missing 4K flag.
*/
-static const struct spi_device_id spi_nor_ids[] = {
+static const struct flash_info spi_nor_ids[] = {
/* Atmel -- some are (confusingly) marketed as "DataFlash" */
{ "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
{ "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
@@ -589,7 +585,8 @@ static const struct spi_device_id spi_nor_ids[] = {
/* Micron */
{ "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
- { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SPI_NOR_QUAD_READ) },
+ { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
{ "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
@@ -606,7 +603,7 @@ static const struct spi_device_id spi_nor_ids[] = {
* for the chips listed here (without boot sectors).
*/
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) },
+ { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
{ "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
@@ -614,8 +611,8 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
{ "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
- { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
- { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
+ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
{ "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
{ "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
@@ -626,6 +623,7 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
{ "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
{ "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
+ { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
@@ -636,6 +634,7 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
{ "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
{ "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
+ { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
{ "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
{ "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
@@ -702,11 +701,11 @@ static const struct spi_device_id spi_nor_ids[] = {
{ },
};
-static const struct spi_device_id *spi_nor_read_id(struct spi_nor *nor)
+static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
{
int tmp;
u8 id[SPI_NOR_MAX_ID_LEN];
- struct flash_info *info;
+ const struct flash_info *info;
tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
if (tmp < 0) {
@@ -715,7 +714,7 @@ static const struct spi_device_id *spi_nor_read_id(struct spi_nor *nor)
}
for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
- info = (void *)spi_nor_ids[tmp].driver_data;
+ info = &spi_nor_ids[tmp];
if (info->id_len) {
if (!memcmp(info->id, id, info->id_len))
return &spi_nor_ids[tmp];
@@ -961,7 +960,7 @@ static int micron_quad_enable(struct spi_nor *nor)
return 0;
}
-static int set_quad_mode(struct spi_nor *nor, struct flash_info *info)
+static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
{
int status;
@@ -1003,8 +1002,7 @@ static int spi_nor_check(struct spi_nor *nor)
int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
{
- const struct spi_device_id *id = NULL;
- struct flash_info *info;
+ const struct flash_info *info = NULL;
struct device *dev = nor->dev;
struct mtd_info *mtd = nor->mtd;
struct device_node *np = dev->of_node;
@@ -1015,27 +1013,25 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
if (ret)
return ret;
- /* Try to auto-detect if chip name wasn't specified */
- if (!name)
- id = spi_nor_read_id(nor);
- else
- id = spi_nor_match_id(name);
- if (IS_ERR_OR_NULL(id))
+ if (name)
+ info = spi_nor_match_id(name);
+ /* Try to auto-detect if chip name wasn't specified or not found */
+ if (!info)
+ info = spi_nor_read_id(nor);
+ if (IS_ERR_OR_NULL(info))
return -ENOENT;
- info = (void *)id->driver_data;
-
/*
* If caller has specified name of flash model that can normally be
* detected using JEDEC, let's verify it.
*/
if (name && info->id_len) {
- const struct spi_device_id *jid;
+ const struct flash_info *jinfo;
- jid = spi_nor_read_id(nor);
- if (IS_ERR(jid)) {
- return PTR_ERR(jid);
- } else if (jid != id) {
+ jinfo = spi_nor_read_id(nor);
+ if (IS_ERR(jinfo)) {
+ return PTR_ERR(jinfo);
+ } else if (jinfo != info) {
/*
* JEDEC knows better, so overwrite platform ID. We
* can't trust partitions any longer, but we'll let
@@ -1044,9 +1040,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
* information, even if it's not 100% accurate.
*/
dev_warn(dev, "found %s, expected %s\n",
- jid->name, id->name);
- id = jid;
- info = (void *)jid->driver_data;
+ jinfo->name, info->name);
+ info = jinfo;
}
}
@@ -1196,7 +1191,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
nor->read_dummy = spi_nor_read_dummy_cycles(nor);
- dev_info(dev, "%s (%lld Kbytes)\n", id->name,
+ dev_info(dev, "%s (%lld Kbytes)\n", info->name,
(long long)mtd->size >> 10);
dev_dbg(dev,
@@ -1219,11 +1214,11 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
}
EXPORT_SYMBOL_GPL(spi_nor_scan);
-static const struct spi_device_id *spi_nor_match_id(const char *name)
+static const struct flash_info *spi_nor_match_id(const char *name)
{
- const struct spi_device_id *id = spi_nor_ids;
+ const struct flash_info *id = spi_nor_ids;
- while (id->name[0]) {
+ while (id->name) {
if (!strcmp(name, id->name))
return id;
id++;
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index 8e8525f0202f..31762120eb56 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -125,7 +125,8 @@ static int write_whole_device(void)
* Display the address, offset and data bytes at comparison failure.
* Return number of bitflips encountered.
*/
-static size_t memcmpshow(loff_t addr, const void *cs, const void *ct, size_t count)
+static size_t memcmpshowoffset(loff_t addr, loff_t offset, const void *cs,
+ const void *ct, size_t count)
{
const unsigned char *su1, *su2;
int res;
@@ -135,8 +136,9 @@ static size_t memcmpshow(loff_t addr, const void *cs, const void *ct, size_t cou
for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--, i++) {
res = *su1 ^ *su2;
if (res) {
- pr_info("error @addr[0x%lx:0x%zx] 0x%x -> 0x%x diff 0x%x\n",
- (unsigned long)addr, i, *su1, *su2, res);
+ pr_info("error @addr[0x%lx:0x%lx] 0x%x -> 0x%x diff 0x%x\n",
+ (unsigned long)addr, (unsigned long)offset + i,
+ *su1, *su2, res);
bitflips += hweight8(res);
}
}
@@ -144,6 +146,9 @@ static size_t memcmpshow(loff_t addr, const void *cs, const void *ct, size_t cou
return bitflips;
}
+#define memcmpshow(addr, cs, ct, count) memcmpshowoffset((addr), 0, (cs), (ct),\
+ (count))
+
/*
* Compare with 0xff and show the address, offset and data bytes at
* comparison failure. Return number of bitflips encountered.
@@ -228,9 +233,10 @@ static int verify_eraseblock(int ebnum)
errcnt += 1;
return err ? err : -1;
}
- bitflips = memcmpshow(addr, readbuf + use_offset,
- writebuf + (use_len_max * i) + use_offset,
- use_len);
+ bitflips = memcmpshowoffset(addr, use_offset,
+ readbuf + use_offset,
+ writebuf + (use_len_max * i) + use_offset,
+ use_len);
/* verify pre-offset area for 0xff */
bitflips += memffshow(addr, 0, readbuf, use_offset);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index c18f9e62a9fa..d18eb607bee6 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -180,8 +180,8 @@ config VXLAN
will be called vxlan.
config GENEVE
- tristate "Generic Network Virtualization Encapsulation netdev"
- depends on INET && GENEVE_CORE
+ tristate "Generic Network Virtualization Encapsulation"
+ depends on INET && NET_UDP_TUNNEL
select NET_IP_TUNNEL
---help---
This allows one to create geneve virtual interfaces that provide
@@ -282,7 +282,6 @@ config VETH
config VIRTIO_NET
tristate "Virtio network driver"
depends on VIRTIO
- select AVERAGE
---help---
This is the virtual network driver for virtio. It can be used with
lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
@@ -297,6 +296,13 @@ config NLMON
diagnostics, etc. This is mostly intended for developers or support
to debug netlink issues. If unsure, say N.
+config NET_VRF
+ tristate "Virtual Routing and Forwarding (Lite)"
+ depends on IP_MULTIPLE_TABLES && IPV6_MULTIPLE_TABLES
+ ---help---
+ This option enables the support for mapping interfaces into VRF's. The
+ support enables VRF devices.
+
endif # NET_CORE
config SUNGEM_PHY
@@ -407,6 +413,13 @@ config VMXNET3
To compile this driver as a module, choose M here: the
module will be called vmxnet3.
+config FUJITSU_ES
+ tristate "FUJITSU Extended Socket Network Device driver"
+ depends on ACPI
+ help
+ This driver provides support for Extended Socket network device
+ on Extended Partitioning of FUJITSU PRIMEQUEST 2000 E2 series.
+
source "drivers/net/hyperv/Kconfig"
endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index c12cb22478a7..900b0c5320bb 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
obj-$(CONFIG_VXLAN) += vxlan.o
obj-$(CONFIG_GENEVE) += geneve.o
obj-$(CONFIG_NLMON) += nlmon.o
+obj-$(CONFIG_NET_VRF) += vrf.o
#
# Networking Drivers
@@ -67,3 +68,5 @@ obj-$(CONFIG_USB_NET_DRIVERS) += usb/
obj-$(CONFIG_HYPERV_NET) += hyperv/
obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o
+
+obj-$(CONFIG_FUJITSU_ES) += fjes/
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 7fde4d5c2b28..3c45358844eb 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1870,8 +1870,6 @@ static void ad_marker_info_received(struct bond_marker *marker_info,
static void ad_marker_response_received(struct bond_marker *marker,
struct port *port)
{
- marker = NULL;
- port = NULL;
/* DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW */
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 19eb990d398c..771a449d2f56 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -625,6 +625,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
}
+static struct slave *bond_get_old_active(struct bonding *bond,
+ struct slave *new_active)
+{
+ struct slave *slave;
+ struct list_head *iter;
+
+ bond_for_each_slave(bond, slave, iter) {
+ if (slave == new_active)
+ continue;
+
+ if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
+ return slave;
+ }
+
+ return NULL;
+}
+
/* bond_do_fail_over_mac
*
* Perform special MAC address swapping for fail_over_mac settings
@@ -652,6 +669,9 @@ static void bond_do_fail_over_mac(struct bonding *bond,
if (!new_active)
return;
+ if (!old_active)
+ old_active = bond_get_old_active(bond, new_active);
+
if (old_active) {
ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
ether_addr_copy(saddr.sa_data,
@@ -689,40 +709,57 @@ out:
}
-static bool bond_should_change_active(struct bonding *bond)
+static struct slave *bond_choose_primary_or_current(struct bonding *bond)
{
struct slave *prim = rtnl_dereference(bond->primary_slave);
struct slave *curr = rtnl_dereference(bond->curr_active_slave);
- if (!prim || !curr || curr->link != BOND_LINK_UP)
- return true;
+ if (!prim || prim->link != BOND_LINK_UP) {
+ if (!curr || curr->link != BOND_LINK_UP)
+ return NULL;
+ return curr;
+ }
+
if (bond->force_primary) {
bond->force_primary = false;
- return true;
+ return prim;
+ }
+
+ if (!curr || curr->link != BOND_LINK_UP)
+ return prim;
+
+ /* At this point, prim and curr are both up */
+ switch (bond->params.primary_reselect) {
+ case BOND_PRI_RESELECT_ALWAYS:
+ return prim;
+ case BOND_PRI_RESELECT_BETTER:
+ if (prim->speed < curr->speed)
+ return curr;
+ if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
+ return curr;
+ return prim;
+ case BOND_PRI_RESELECT_FAILURE:
+ return curr;
+ default:
+ netdev_err(bond->dev, "impossible primary_reselect %d\n",
+ bond->params.primary_reselect);
+ return curr;
}
- if (bond->params.primary_reselect == BOND_PRI_RESELECT_BETTER &&
- (prim->speed < curr->speed ||
- (prim->speed == curr->speed && prim->duplex <= curr->duplex)))
- return false;
- if (bond->params.primary_reselect == BOND_PRI_RESELECT_FAILURE)
- return false;
- return true;
}
/**
- * find_best_interface - select the best available slave to be the active one
+ * bond_find_best_slave - select the best available slave to be the active one
* @bond: our bonding struct
*/
static struct slave *bond_find_best_slave(struct bonding *bond)
{
- struct slave *slave, *bestslave = NULL, *primary;
+ struct slave *slave, *bestslave = NULL;
struct list_head *iter;
int mintime = bond->params.updelay;
- primary = rtnl_dereference(bond->primary_slave);
- if (primary && primary->link == BOND_LINK_UP &&
- bond_should_change_active(bond))
- return primary;
+ slave = bond_choose_primary_or_current(bond);
+ if (slave)
+ return slave;
bond_for_each_slave(bond, slave, iter) {
if (slave->link == BOND_LINK_UP)
@@ -749,6 +786,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
slave ? slave->dev->name : "NULL");
if (!slave || !bond->send_peer_notif ||
+ !netif_carrier_ok(bond->dev) ||
test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
return false;
@@ -941,7 +979,6 @@ static void bond_poll_controller(struct net_device *bond_dev)
if (bond_3ad_get_active_agg_info(bond, &ad_info))
return;
- rcu_read_lock_bh();
bond_for_each_slave_rcu(bond, slave, iter) {
ops = slave->dev->netdev_ops;
if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller)
@@ -962,7 +999,6 @@ static void bond_poll_controller(struct net_device *bond_dev)
ops->ndo_poll_controller(slave->dev);
up(&ni->dev_lock);
}
- rcu_read_unlock_bh();
}
static void bond_netpoll_cleanup(struct net_device *bond_dev)
@@ -1708,9 +1744,16 @@ err_free:
err_undo_flags:
/* Enslave of first slave has failed and we need to fix master's mac */
- if (!bond_has_slaves(bond) &&
- ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr))
- eth_hw_addr_random(bond_dev);
+ if (!bond_has_slaves(bond)) {
+ if (ether_addr_equal_64bits(bond_dev->dev_addr,
+ slave_dev->dev_addr))
+ eth_hw_addr_random(bond_dev);
+ if (bond_dev->type != ARPHRD_ETHER) {
+ ether_setup(bond_dev);
+ bond_dev->flags |= IFF_MASTER;
+ bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ }
+ }
return res;
}
@@ -1899,6 +1942,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
netdev_info(bond_dev, "Destroying bond %s\n",
bond_dev->name);
+ bond_remove_proc_entry(bond);
unregister_netdevice(bond_dev);
}
return ret;
@@ -3051,7 +3095,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
int noff, proto = -1;
if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
- return skb_flow_dissect_flow_keys(skb, fk);
+ return skb_flow_dissect_flow_keys(skb, fk, 0);
fk->ports.ports = 0;
noff = skb_network_offset(skb);
@@ -3734,7 +3778,6 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
struct slave *slave;
struct list_head *iter;
struct bond_up_slave *new_arr, *old_arr;
- int slaves_in_agg;
int agg_id = 0;
int ret = 0;
@@ -3765,7 +3808,6 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
}
goto out;
}
- slaves_in_agg = ad_info.ports;
agg_id = ad_info.aggregator_id;
}
bond_for_each_slave(bond, slave, iter) {
@@ -4076,9 +4118,8 @@ void bond_setup(struct net_device *bond_dev)
SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
/* Initialize the device options */
- bond_dev->tx_queue_len = 0;
bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
- bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT;
+ bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
/* don't acquire bond device's netif_tx_lock when transmitting */
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 1bda29249d12..db760e84119f 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -111,6 +111,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_AD_USER_PORT_KEY] = { .type = NLA_U16 },
[IFLA_BOND_AD_ACTOR_SYSTEM] = { .type = NLA_BINARY,
.len = ETH_ALEN },
+ [IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 },
};
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
@@ -405,7 +406,6 @@ static int bond_changelink(struct net_device *bond_dev,
if (err)
return err;
}
-
if (data[IFLA_BOND_AD_USER_PORT_KEY]) {
int port_key =
nla_get_u16(data[IFLA_BOND_AD_USER_PORT_KEY]);
@@ -415,7 +415,6 @@ static int bond_changelink(struct net_device *bond_dev,
if (err)
return err;
}
-
if (data[IFLA_BOND_AD_ACTOR_SYSTEM]) {
if (nla_len(data[IFLA_BOND_AD_ACTOR_SYSTEM]) != ETH_ALEN)
return -EINVAL;
@@ -426,6 +425,15 @@ static int bond_changelink(struct net_device *bond_dev,
if (err)
return err;
}
+ if (data[IFLA_BOND_TLB_DYNAMIC_LB]) {
+ int dynamic_lb = nla_get_u8(data[IFLA_BOND_TLB_DYNAMIC_LB]);
+
+ bond_opt_initval(&newval, dynamic_lb);
+ err = __bond_opt_set(bond, BOND_OPT_TLB_DYNAMIC_LB, &newval);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -476,6 +484,7 @@ static size_t bond_get_size(const struct net_device *bond_dev)
nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_ACTOR_SYS_PRIO */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_USER_PORT_KEY */
nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */
0;
}
@@ -598,6 +607,10 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.ad_select))
goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_BOND_TLB_DYNAMIC_LB,
+ bond->params.tlb_dynamic_lb))
+ goto nla_put_failure;
+
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info info;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index e9c624d54dd4..55e93b6b6d21 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -420,6 +420,13 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.flags = BOND_OPTFLAG_IFDOWN,
.values = bond_ad_user_port_key_tbl,
.set = bond_option_ad_user_port_key_set,
+ },
+ [BOND_OPT_NUM_PEER_NOTIF_ALIAS] = {
+ .id = BOND_OPT_NUM_PEER_NOTIF_ALIAS,
+ .name = "num_grat_arp",
+ .desc = "Number of peer notifications to send on failover event",
+ .values = bond_num_peer_notif_tbl,
+ .set = bond_option_num_peer_notif_set
}
};
@@ -730,19 +737,6 @@ static int bond_option_mode_set(struct bonding *bond,
return 0;
}
-static struct net_device *__bond_option_active_slave_get(struct bonding *bond,
- struct slave *slave)
-{
- return bond_uses_primary(bond) && slave ? slave->dev : NULL;
-}
-
-struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
-{
- struct slave *slave = rcu_dereference(bond->curr_active_slave);
-
- return __bond_option_active_slave_get(bond, slave);
-}
-
static int bond_option_active_slave_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 31835a4dab57..f4ae72086215 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -380,7 +380,7 @@ static ssize_t bonding_show_ad_select(struct device *d,
static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
bonding_show_ad_select, bonding_sysfs_store_option);
-/* Show and set the number of peer notifications to send after a failover event. */
+/* Show the number of peer notifications to send after a failover event. */
static ssize_t bonding_show_num_peer_notif(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -388,24 +388,10 @@ static ssize_t bonding_show_num_peer_notif(struct device *d,
struct bonding *bond = to_bond(d);
return sprintf(buf, "%d\n", bond->params.num_peer_notif);
}
-
-static ssize_t bonding_store_num_peer_notif(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_NUM_PEER_NOTIF, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
- bonding_show_num_peer_notif, bonding_store_num_peer_notif);
+ bonding_show_num_peer_notif, bonding_sysfs_store_option);
static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR,
- bonding_show_num_peer_notif, bonding_store_num_peer_notif);
+ bonding_show_num_peer_notif, bonding_sysfs_store_option);
/* Show the MII monitor interval. */
static ssize_t bonding_show_miimon(struct device *d,
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index b3b922adc0e4..615c65da39be 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -1120,7 +1120,7 @@ static void cfhsi_setup(struct net_device *dev)
dev->type = ARPHRD_CAIF;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->destructor = free_netdev;
dev->netdev_ops = &cfhsi_netdevops;
for (i = 0; i < CFHSI_PRIO_LAST; ++i)
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 9da06537237f..c2dea4916e5d 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -427,7 +427,7 @@ static void caifdev_setup(struct net_device *dev)
dev->type = ARPHRD_CAIF;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = CAIF_MAX_MTU;
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->destructor = free_netdev;
skb_queue_head_init(&serdev->head);
serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 72ea9ff9bb9c..de3962014af7 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -710,7 +710,7 @@ static void cfspi_setup(struct net_device *dev)
dev->netdev_ops = &cfspi_ops;
dev->type = ARPHRD_CAIF;
dev->flags = IFF_NOARP | IFF_POINTOPOINT;
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->mtu = SPI_MAX_PAYLOAD_SIZE;
dev->destructor = free_netdev;
skb_queue_head_init(&cfspi->qhead);
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index f4e40aa4d2a2..945c0955a967 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -577,10 +577,10 @@ static void at91_rx_overflow_err(struct net_device *dev)
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
}
/**
@@ -642,10 +642,10 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
}
at91_read_mb(dev, mb, cf);
- netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
can_led_event(dev, CAN_LED_EVENT_RX);
}
@@ -802,10 +802,10 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
return 0;
at91_poll_err_frame(dev, cf, reg_sr);
- netif_receive_skb(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
return 1;
}
@@ -1067,10 +1067,10 @@ static void at91_irq_err(struct net_device *dev)
return;
at91_irq_err_state(dev, cf, new_state);
- netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
+ netif_rx(skb);
priv->can.state = new_state;
}
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 27ad312e7abf..57dadd52b428 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -424,10 +424,9 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc)
cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
}
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
@@ -508,10 +507,9 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
priv->can.state = state;
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
return 0;
}
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 041525d2595c..5d214d135332 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -592,6 +592,7 @@ static int c_can_start(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
int err;
+ struct pinctrl *p;
/* basic c_can configuration */
err = c_can_chip_config(dev);
@@ -604,8 +605,13 @@ static int c_can_start(struct net_device *dev)
priv->can.state = CAN_STATE_ERROR_ACTIVE;
- /* activate pins */
- pinctrl_pm_select_default_state(dev->dev.parent);
+ /* Attempt to use "active" if available else use "default" */
+ p = pinctrl_get_select(priv->device, "active");
+ if (!IS_ERR(p))
+ pinctrl_put(p);
+ else
+ pinctrl_pm_select_default_state(priv->device);
+
return 0;
}
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index c11d44984036..70a8cbb29e75 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -504,10 +504,10 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
for (i = 0; i < cf->can_dlc; i++)
cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
}
- netif_rx(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
static int cc770_err(struct net_device *dev, u8 status)
@@ -584,10 +584,10 @@ static int cc770_err(struct net_device *dev, u8 status)
}
}
- netif_rx(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
return 0;
}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index e9b1810d319f..aede704605c6 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -440,9 +440,6 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
struct can_frame *cf = (struct can_frame *)skb->data;
u8 dlc = cf->can_dlc;
- if (!(skb->tstamp.tv64))
- __net_timestamp(skb);
-
netif_rx(priv->echo_skb[idx]);
priv->echo_skb[idx] = NULL;
@@ -578,7 +575,6 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
if (unlikely(!skb))
return NULL;
- __net_timestamp(skb);
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -589,6 +585,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
*cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
memset(*cf, 0, sizeof(struct can_frame));
@@ -607,7 +604,6 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
if (unlikely(!skb))
return NULL;
- __net_timestamp(skb);
skb->protocol = htons(ETH_P_CANFD);
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -618,6 +614,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
*cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame));
memset(*cfd, 0, sizeof(struct canfd_frame));
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 6201c5a1a884..c83f0f03482b 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -577,10 +577,10 @@ static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr)
return 0;
do_bus_err(dev, cf, reg_esr);
- netif_receive_skb(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
return 1;
}
@@ -622,10 +622,9 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
if (unlikely(new_state == CAN_STATE_BUS_OFF))
can_bus_off(dev);
- netif_receive_skb(skb);
-
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
return 1;
}
@@ -670,10 +669,10 @@ static int flexcan_read_frame(struct net_device *dev)
}
flexcan_read_fifo(dev, cf);
- netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
can_led_event(dev, CAN_LED_EVENT_RX);
@@ -806,7 +805,7 @@ static void flexcan_set_bittiming(struct net_device *dev)
if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
reg |= FLEXCAN_CTRL_SMP;
- netdev_info(dev, "writing ctrl=0x%08x\n", reg);
+ netdev_dbg(dev, "writing ctrl=0x%08x\n", reg);
flexcan_write(reg, &regs->ctrl);
/* print chip status */
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index e3d7e22a4fa0..db9538d4b358 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1216,11 +1216,12 @@ static int grcan_receive(struct net_device *dev, int budget)
cf->data[i] = (u8)(slot[j] >> shift);
}
}
- netif_receive_skb(skb);
/* Update statistics and read pointer */
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
+
rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size);
}
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
index 7deb80dcbe8c..7bd54191f962 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar_can.c
@@ -508,7 +508,8 @@ static int rcar_can_open(struct net_device *ndev)
err = clk_prepare_enable(priv->clk);
if (err) {
- netdev_err(ndev, "failed to enable periperal clock, error %d\n",
+ netdev_err(ndev,
+ "failed to enable peripheral clock, error %d\n",
err);
goto out;
}
@@ -526,7 +527,8 @@ static int rcar_can_open(struct net_device *ndev)
napi_enable(&priv->napi);
err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
if (err) {
- netdev_err(ndev, "error requesting interrupt %x\n", ndev->irq);
+ netdev_err(ndev, "request_irq(%d) failed, error %d\n",
+ ndev->irq, err);
goto out_close;
}
can_led_event(ndev, CAN_LED_EVENT_OPEN);
@@ -758,8 +760,9 @@ static int rcar_can_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (!irq) {
+ if (irq < 0) {
dev_err(&pdev->dev, "No IRQ resource\n");
+ err = irq;
goto fail;
}
@@ -782,7 +785,8 @@ static int rcar_can_probe(struct platform_device *pdev)
priv->clk = devm_clk_get(&pdev->dev, "clkp1");
if (IS_ERR(priv->clk)) {
err = PTR_ERR(priv->clk);
- dev_err(&pdev->dev, "cannot get peripheral clock: %d\n", err);
+ dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
+ err);
goto fail_clk;
}
@@ -794,7 +798,7 @@ static int rcar_can_probe(struct platform_device *pdev)
priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]);
if (IS_ERR(priv->can_clk)) {
err = PTR_ERR(priv->can_clk);
- dev_err(&pdev->dev, "cannot get CAN clock: %d\n", err);
+ dev_err(&pdev->dev, "cannot get CAN clock, error %d\n", err);
goto fail_clk;
}
@@ -823,7 +827,7 @@ static int rcar_can_probe(struct platform_device *pdev)
devm_can_led_init(ndev);
- dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
+ dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n",
priv->regs, ndev->irq);
return 0;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 32bd7f451aa4..7b92e911a616 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -377,10 +377,9 @@ static void sja1000_rx(struct net_device *dev)
/* release receive buffer */
sja1000_write_cmdreg(priv, CMD_RRB);
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
can_led_event(dev, CAN_LED_EVENT_RX);
}
@@ -484,10 +483,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
can_bus_off(dev);
}
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
return 0;
}
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index f64f5290d6f8..9a3f15cb7ef4 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -207,7 +207,6 @@ static void slc_bump(struct slcan *sl)
if (!skb)
return;
- __net_timestamp(skb);
skb->dev = sl->dev;
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
@@ -215,13 +214,14 @@ static void slc_bump(struct slcan *sl)
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = sl->dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
memcpy(skb_put(skb, sizeof(struct can_frame)),
&cf, sizeof(struct can_frame));
- netif_rx_ni(skb);
sl->dev->stats.rx_packets++;
sl->dev->stats.rx_bytes += cf.can_dlc;
+ netif_rx_ni(skb);
}
/* parse tty input stream */
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index c1a95a34d62e..b7e83c212023 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1086,8 +1086,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
if (ret)
goto out_clk;
- priv->power = devm_regulator_get(&spi->dev, "vdd");
- priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
+ priv->power = devm_regulator_get_optional(&spi->dev, "vdd");
+ priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
(PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
ret = -EPROBE_DEFER;
@@ -1222,17 +1222,16 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev)
struct spi_device *spi = to_spi_device(dev);
struct mcp251x_priv *priv = spi_get_drvdata(spi);
- if (priv->after_suspend & AFTER_SUSPEND_POWER) {
+ if (priv->after_suspend & AFTER_SUSPEND_POWER)
mcp251x_power_enable(priv->power, 1);
+
+ if (priv->after_suspend & AFTER_SUSPEND_UP) {
+ mcp251x_power_enable(priv->transceiver, 1);
queue_work(priv->wq, &priv->restart_work);
} else {
- if (priv->after_suspend & AFTER_SUSPEND_UP) {
- mcp251x_power_enable(priv->transceiver, 1);
- queue_work(priv->wq, &priv->restart_work);
- } else {
- priv->after_suspend = 0;
- }
+ priv->after_suspend = 0;
}
+
priv->force_quit = 0;
enable_irq(spi->irq);
return 0;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index e95a9e1a889f..cf345cbfe819 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -747,9 +747,9 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
}
}
- netif_rx(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
return 0;
}
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 866bac0ae7e9..2d390384ef3b 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -324,10 +324,9 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
cf->data[i] = msg->msg.can_msg.msg[i];
}
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
@@ -400,10 +399,9 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
stats->rx_errors++;
}
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
/*
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 411c1af92c62..0e5a4493ba4f 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -301,13 +301,12 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
cf->data[7] = rxerr;
}
- netif_rx(skb);
-
priv->bec.txerr = txerr;
priv->bec.rxerr = rxerr;
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
}
@@ -347,10 +346,9 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
cf->data[i] = msg->msg.rx.data[i];
}
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
return;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 8b4d3e6875eb..5eee62badf45 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -162,7 +162,7 @@ struct gs_can {
struct can_bittiming_const bt_const;
unsigned int channel; /* channel number */
- /* This lock prevents a race condition between xmit and recieve. */
+ /* This lock prevents a race condition between xmit and receive. */
spinlock_t tx_ctx_lock;
struct gs_tx_context tx_context[GS_MAX_TX_URBS];
@@ -274,7 +274,7 @@ static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
}
}
-static void gs_usb_recieve_bulk_callback(struct urb *urb)
+static void gs_usb_receive_bulk_callback(struct urb *urb)
{
struct gs_usb *usbcan = urb->context;
struct gs_can *dev;
@@ -376,7 +376,7 @@ static void gs_usb_recieve_bulk_callback(struct urb *urb)
usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
hf,
sizeof(struct gs_host_frame),
- gs_usb_recieve_bulk_callback,
+ gs_usb_receive_bulk_callback,
usbcan
);
@@ -605,7 +605,7 @@ static int gs_can_open(struct net_device *netdev)
GSUSB_ENDPOINT_IN),
buf,
sizeof(struct gs_host_frame),
- gs_usb_recieve_bulk_callback,
+ gs_usb_receive_bulk_callback,
parent);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 72427f21edff..838545ce468d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -526,9 +526,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
hwts->hwtstamp = timeval_to_ktime(tv);
}
- netif_rx(skb);
mc->netdev->stats.rx_packets++;
mc->netdev->stats.rx_bytes += cf->can_dlc;
+ netif_rx(skb);
return 0;
}
@@ -659,12 +659,11 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
hwts = skb_hwtstamps(skb);
hwts->hwtstamp = timeval_to_ktime(tv);
- /* push the skb */
- netif_rx(skb);
-
/* update statistics */
mc->netdev->stats.rx_packets++;
mc->netdev->stats.rx_bytes += cf->can_dlc;
+ /* push the skb */
+ netif_rx(skb);
return 0;
@@ -855,6 +854,18 @@ static int pcan_usb_probe(struct usb_interface *intf)
/*
* describe the PCAN-USB adapter
*/
+static const struct can_bittiming_const pcan_usb_const = {
+ .name = "pcan_usb",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 64,
+ .brp_inc = 1,
+};
+
const struct peak_usb_adapter pcan_usb = {
.name = "PCAN-USB",
.device_id = PCAN_USB_PRODUCT_ID,
@@ -863,17 +874,7 @@ const struct peak_usb_adapter pcan_usb = {
.clock = {
.freq = PCAN_USB_CRYSTAL_HZ / 2 ,
},
- .bittiming_const = {
- .name = "pcan_usb",
- .tseg1_min = 1,
- .tseg1_max = 16,
- .tseg2_min = 1,
- .tseg2_max = 8,
- .sjw_max = 4,
- .brp_min = 1,
- .brp_max = 64,
- .brp_inc = 1,
- },
+ .bittiming_const = &pcan_usb_const,
/* size of device private data */
.sizeof_dev_private = sizeof(struct pcan_usb),
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 7921cff93a63..5a2e341a6d1e 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -792,9 +792,9 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
dev->ep_msg_out = peak_usb_adapter->ep_msg_out[ctrl_idx];
dev->can.clock = peak_usb_adapter->clock;
- dev->can.bittiming_const = &peak_usb_adapter->bittiming_const;
+ dev->can.bittiming_const = peak_usb_adapter->bittiming_const;
dev->can.do_set_bittiming = peak_usb_set_bittiming;
- dev->can.data_bittiming_const = &peak_usb_adapter->data_bittiming_const;
+ dev->can.data_bittiming_const = peak_usb_adapter->data_bittiming_const;
dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming;
dev->can.do_set_mode = peak_usb_set_mode;
dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index 9e624f05ad4d..506fe506c9d3 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -48,8 +48,8 @@ struct peak_usb_adapter {
u32 device_id;
u32 ctrlmode_supported;
struct can_clock clock;
- const struct can_bittiming_const bittiming_const;
- const struct can_bittiming_const data_bittiming_const;
+ const struct can_bittiming_const * const bittiming_const;
+ const struct can_bittiming_const * const data_bittiming_const;
unsigned int ctrl_count;
int (*intf_probe)(struct usb_interface *intf);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 09d14e70abd7..ce44a033f63b 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -990,6 +990,30 @@ static void pcan_usb_fd_free(struct peak_usb_device *dev)
}
/* describes the PCAN-USB FD adapter */
+static const struct can_bittiming_const pcan_usb_fd_const = {
+ .name = "pcan_usb_fd",
+ .tseg1_min = 1,
+ .tseg1_max = 64,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const pcan_usb_fd_data_const = {
+ .name = "pcan_usb_fd",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
const struct peak_usb_adapter pcan_usb_fd = {
.name = "PCAN-USB FD",
.device_id = PCAN_USBFD_PRODUCT_ID,
@@ -999,28 +1023,8 @@ const struct peak_usb_adapter pcan_usb_fd = {
.clock = {
.freq = PCAN_UFD_CRYSTAL_HZ,
},
- .bittiming_const = {
- .name = "pcan_usb_fd",
- .tseg1_min = 1,
- .tseg1_max = 64,
- .tseg2_min = 1,
- .tseg2_max = 16,
- .sjw_max = 16,
- .brp_min = 1,
- .brp_max = 1024,
- .brp_inc = 1,
- },
- .data_bittiming_const = {
- .name = "pcan_usb_fd",
- .tseg1_min = 1,
- .tseg1_max = 16,
- .tseg2_min = 1,
- .tseg2_max = 8,
- .sjw_max = 4,
- .brp_min = 1,
- .brp_max = 1024,
- .brp_inc = 1,
- },
+ .bittiming_const = &pcan_usb_fd_const,
+ .data_bittiming_const = &pcan_usb_fd_data_const,
/* size of device private data */
.sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
@@ -1058,6 +1062,30 @@ const struct peak_usb_adapter pcan_usb_fd = {
};
/* describes the PCAN-USB Pro FD adapter */
+static const struct can_bittiming_const pcan_usb_pro_fd_const = {
+ .name = "pcan_usb_pro_fd",
+ .tseg1_min = 1,
+ .tseg1_max = 64,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const pcan_usb_pro_fd_data_const = {
+ .name = "pcan_usb_pro_fd",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
const struct peak_usb_adapter pcan_usb_pro_fd = {
.name = "PCAN-USB Pro FD",
.device_id = PCAN_USBPROFD_PRODUCT_ID,
@@ -1067,28 +1095,8 @@ const struct peak_usb_adapter pcan_usb_pro_fd = {
.clock = {
.freq = PCAN_UFD_CRYSTAL_HZ,
},
- .bittiming_const = {
- .name = "pcan_usb_pro_fd",
- .tseg1_min = 1,
- .tseg1_max = 64,
- .tseg2_min = 1,
- .tseg2_max = 16,
- .sjw_max = 16,
- .brp_min = 1,
- .brp_max = 1024,
- .brp_inc = 1,
- },
- .data_bittiming_const = {
- .name = "pcan_usb_pro_fd",
- .tseg1_min = 1,
- .tseg1_max = 16,
- .tseg2_min = 1,
- .tseg2_max = 8,
- .sjw_max = 4,
- .brp_min = 1,
- .brp_max = 1024,
- .brp_inc = 1,
- },
+ .bittiming_const = &pcan_usb_pro_fd_const,
+ .data_bittiming_const = &pcan_usb_pro_fd_data_const,
/* size of device private data */
.sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index dec51717635e..bbdd6058cd2f 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -553,9 +553,9 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
hwts = skb_hwtstamps(skb);
hwts->hwtstamp = timeval_to_ktime(tv);
- netif_rx(skb);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += can_frame->can_dlc;
+ netif_rx(skb);
return 0;
}
@@ -670,9 +670,9 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
hwts = skb_hwtstamps(skb);
hwts->hwtstamp = timeval_to_ktime(tv);
- netif_rx(skb);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += can_frame->can_dlc;
+ netif_rx(skb);
return 0;
}
@@ -1004,6 +1004,18 @@ int pcan_usb_pro_probe(struct usb_interface *intf)
/*
* describe the PCAN-USB Pro adapter
*/
+static const struct can_bittiming_const pcan_usb_pro_const = {
+ .name = "pcan_usb_pro",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
const struct peak_usb_adapter pcan_usb_pro = {
.name = "PCAN-USB Pro",
.device_id = PCAN_USBPRO_PRODUCT_ID,
@@ -1012,17 +1024,7 @@ const struct peak_usb_adapter pcan_usb_pro = {
.clock = {
.freq = PCAN_USBPRO_CRYSTAL_HZ,
},
- .bittiming_const = {
- .name = "pcan_usb_pro",
- .tseg1_min = 1,
- .tseg1_max = 16,
- .tseg2_min = 1,
- .tseg2_max = 8,
- .sjw_max = 4,
- .brp_min = 1,
- .brp_max = 1024,
- .brp_inc = 1,
- },
+ .bittiming_const = &pcan_usb_pro_const,
/* size of device private data */
.sizeof_dev_private = sizeof(struct pcan_usb_pro_device),
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index dd52c7a4c80d..de95b1ccba3e 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -461,10 +461,9 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
priv->bec.txerr = txerr;
priv->bec.rxerr = rxerr;
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
/* Read data and status frames */
@@ -494,10 +493,9 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
else
memcpy(cf->data, msg->data, cf->can_dlc);
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
can_led_event(priv->netdev, CAN_LED_EVENT_RX);
} else {
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 0ce868de855d..674f367087c5 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -78,9 +78,6 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
skb->dev = dev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (!(skb->tstamp.tv64))
- __net_timestamp(skb);
-
netif_rx_ni(skb);
}
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 7ad0a4d8e475..4c483d937481 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -46,13 +46,13 @@ config NET_DSA_MV88E6171
ethernet switches chips.
config NET_DSA_MV88E6352
- tristate "Marvell 88E6172/88E6176/88E6352 ethernet switch chip support"
+ tristate "Marvell 88E6172/6176/6320/6321/6352 ethernet switch chip support"
depends on NET_DSA
select NET_DSA_MV88E6XXX
select NET_DSA_TAG_EDSA
---help---
- This enables support for the Marvell 88E6172, 88E6176 and 88E6352
- ethernet switch chips.
+ This enables support for the Marvell 88E6172, 88E6176, 88E6320,
+ 88E6321 and 88E6352 ethernet switch chips.
config NET_DSA_BCM_SF2
tristate "Broadcom Starfighter 2 Ethernet switch support"
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 972982f8bea7..9d56515f4c4d 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -418,7 +418,7 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
core_writel(priv, port, CORE_FAST_AGE_PORT);
reg = core_readl(priv, CORE_FAST_AGE_CTRL);
- reg |= EN_AGE_PORT | FAST_AGE_STR_DONE;
+ reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
core_writel(priv, reg, CORE_FAST_AGE_CTRL);
do {
@@ -432,6 +432,8 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
if (!timeout)
return -ETIMEDOUT;
+ core_writel(priv, 0, CORE_FAST_AGE_CTRL);
+
return 0;
}
@@ -507,7 +509,7 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
u32 reg;
reg = core_readl(priv, CORE_G_PCTL_PORT(port));
- cur_hw_state = reg >> G_MISTP_STATE_SHIFT;
+ cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
switch (state) {
case BR_STATE_DISABLED:
@@ -531,10 +533,12 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
}
/* Fast-age ARL entries if we are moving a port from Learning or
- * Forwarding state to Disabled, Blocking or Listening state
+ * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
+ * state (hw_state)
*/
if (cur_hw_state != hw_state) {
- if (cur_hw_state & 4 && !(hw_state & 4)) {
+ if (cur_hw_state >= G_MISTP_LEARN_STATE &&
+ hw_state <= G_MISTP_LISTEN_STATE) {
ret = bcm_sf2_sw_fast_age_port(ds, port);
if (ret) {
pr_err("%s: fast-ageing failed\n", __func__);
@@ -696,9 +700,20 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
}
/* Include the pseudo-PHY address and the broadcast PHY address to
- * divert reads towards our workaround
+ * divert reads towards our workaround. This is only required for
+ * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
+ * that we can use the regular SWITCH_MDIO master controller instead.
+ *
+ * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask
+ * to have a 1:1 mapping between Port address and PHY address in order
+ * to utilize the slave_mii_bus instance to read from Port PHYs. This is
+ * not what we want here, so we initialize phys_mii_mask 0 to always
+ * utilize the "master" MDIO bus backed by the "mdio-unimac" driver.
*/
- ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
+ if (of_machine_is_compatible("brcm,bcm7445d0"))
+ ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
+ else
+ ds->phys_mii_mask = 0;
rev = reg_readl(priv, REG_SWITCH_REVISION);
priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
@@ -890,15 +905,11 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
struct fixed_phy_status *status)
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
- u32 duplex, pause, speed;
+ u32 duplex, pause;
u32 reg;
duplex = core_readl(priv, CORE_DUPSTS);
pause = core_readl(priv, CORE_PAUSESTS);
- speed = core_readl(priv, CORE_SPDSTS);
-
- speed >>= (port * SPDSTS_SHIFT);
- speed &= SPDSTS_MASK;
status->link = 0;
@@ -933,18 +944,6 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
reg &= ~LINK_STS;
core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
- switch (speed) {
- case SPDSTS_10:
- status->speed = SPEED_10;
- break;
- case SPDSTS_100:
- status->speed = SPEED_100;
- break;
- case SPDSTS_1000:
- status->speed = SPEED_1000;
- break;
- }
-
if ((pause & (1 << port)) &&
(pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
status->asym_pause = 1;
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 22e2ebf31333..789d7b7737da 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -112,8 +112,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \
spin_unlock(&priv->indir_lock); \
return (u64)indir << 32 | dir; \
} \
-static inline void name##_writeq(struct bcm_sf2_priv *priv, u32 off, \
- u64 val) \
+static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \
+ u32 off) \
{ \
spin_lock(&priv->indir_lock); \
reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE); \
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index 71a29a7ce538..3de2a6d73fdc 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -129,6 +129,7 @@ struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
.get_strings = mv88e6xxx_get_strings,
.get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
.get_sset_count = mv88e6xxx_get_sset_count,
+ .adjust_link = mv88e6xxx_adjust_link,
#ifdef CONFIG_NET_DSA_HWMON
.get_temp = mv88e6xxx_get_temp,
#endif
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index 32f4a08e9bc9..3e8386529965 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -182,6 +182,7 @@ struct dsa_switch_driver mv88e6131_switch_driver = {
.get_strings = mv88e6xxx_get_strings,
.get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
.get_sset_count = mv88e6xxx_get_sset_count,
+ .adjust_link = mv88e6xxx_adjust_link,
};
MODULE_ALIAS("platform:mv88e6085");
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index 1c7808495a9d..c2daaf087761 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -108,6 +108,7 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
.get_strings = mv88e6xxx_get_strings,
.get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
.get_sset_count = mv88e6xxx_get_sset_count,
+ .adjust_link = mv88e6xxx_adjust_link,
#ifdef CONFIG_NET_DSA_HWMON
.get_temp = mv88e6xxx_get_temp,
#endif
@@ -116,9 +117,14 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
.port_join_bridge = mv88e6xxx_join_bridge,
.port_leave_bridge = mv88e6xxx_leave_bridge,
.port_stp_update = mv88e6xxx_port_stp_update,
- .fdb_add = mv88e6xxx_port_fdb_add,
- .fdb_del = mv88e6xxx_port_fdb_del,
- .fdb_getnext = mv88e6xxx_port_fdb_getnext,
+ .port_pvid_get = mv88e6xxx_port_pvid_get,
+ .port_pvid_set = mv88e6xxx_port_pvid_set,
+ .port_vlan_add = mv88e6xxx_port_vlan_add,
+ .port_vlan_del = mv88e6xxx_port_vlan_del,
+ .vlan_getnext = mv88e6xxx_vlan_getnext,
+ .port_fdb_add = mv88e6xxx_port_fdb_add,
+ .port_fdb_del = mv88e6xxx_port_fdb_del,
+ .port_fdb_getnext = mv88e6xxx_port_fdb_getnext,
};
MODULE_ALIAS("platform:mv88e6171");
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index 632815c10a40..1f5129c105fb 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -36,6 +36,18 @@ static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
return "Marvell 88E6172";
if ((ret & 0xfff0) == PORT_SWITCH_ID_6176)
return "Marvell 88E6176";
+ if (ret == PORT_SWITCH_ID_6320_A1)
+ return "Marvell 88E6320 (A1)";
+ if (ret == PORT_SWITCH_ID_6320_A2)
+ return "Marvell 88e6320 (A2)";
+ if ((ret & 0xfff0) == PORT_SWITCH_ID_6320)
+ return "Marvell 88E6320";
+ if (ret == PORT_SWITCH_ID_6321_A1)
+ return "Marvell 88E6321 (A1)";
+ if (ret == PORT_SWITCH_ID_6321_A2)
+ return "Marvell 88e6321 (A2)";
+ if ((ret & 0xfff0) == PORT_SWITCH_ID_6321)
+ return "Marvell 88E6321";
if (ret == PORT_SWITCH_ID_6352_A0)
return "Marvell 88E6352 (A0)";
if (ret == PORT_SWITCH_ID_6352_A1)
@@ -80,66 +92,6 @@ static int mv88e6352_setup_global(struct dsa_switch *ds)
return 0;
}
-#ifdef CONFIG_NET_DSA_HWMON
-
-static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp)
-{
- int ret;
-
- *temp = 0;
-
- ret = mv88e6xxx_phy_page_read(ds, 0, 6, 27);
- if (ret < 0)
- return ret;
-
- *temp = (ret & 0xff) - 25;
-
- return 0;
-}
-
-static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp)
-{
- int ret;
-
- *temp = 0;
-
- ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
- if (ret < 0)
- return ret;
-
- *temp = (((ret >> 8) & 0x1f) * 5) - 25;
-
- return 0;
-}
-
-static int mv88e6352_set_temp_limit(struct dsa_switch *ds, int temp)
-{
- int ret;
-
- ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
- if (ret < 0)
- return ret;
- temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
- return mv88e6xxx_phy_page_write(ds, 0, 6, 26,
- (ret & 0xe0ff) | (temp << 8));
-}
-
-static int mv88e6352_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
-{
- int ret;
-
- *alarm = false;
-
- ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
- if (ret < 0)
- return ret;
-
- *alarm = !!(ret & 0x40);
-
- return 0;
-}
-#endif /* CONFIG_NET_DSA_HWMON */
-
static int mv88e6352_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -171,8 +123,9 @@ static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
mutex_lock(&ps->eeprom_mutex);
- ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14,
- 0xc000 | (addr & 0xff));
+ ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+ GLOBAL2_EEPROM_OP_READ |
+ (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
if (ret < 0)
goto error;
@@ -180,7 +133,7 @@ static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
if (ret < 0)
goto error;
- ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x15);
+ ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
error:
mutex_unlock(&ps->eeprom_mutex);
return ret;
@@ -253,11 +206,11 @@ static int mv88e6352_eeprom_is_readonly(struct dsa_switch *ds)
{
int ret;
- ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x14);
+ ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
if (ret < 0)
return ret;
- if (!(ret & 0x0400))
+ if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN))
return -EROFS;
return 0;
@@ -271,12 +224,13 @@ static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr,
mutex_lock(&ps->eeprom_mutex);
- ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x15, data);
+ ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
if (ret < 0)
goto error;
- ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14,
- 0xb000 | (addr & 0xff));
+ ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+ GLOBAL2_EEPROM_OP_WRITE |
+ (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
if (ret < 0)
goto error;
@@ -374,13 +328,14 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
.get_strings = mv88e6xxx_get_strings,
.get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
.get_sset_count = mv88e6xxx_get_sset_count,
+ .adjust_link = mv88e6xxx_adjust_link,
.set_eee = mv88e6xxx_set_eee,
.get_eee = mv88e6xxx_get_eee,
#ifdef CONFIG_NET_DSA_HWMON
- .get_temp = mv88e6352_get_temp,
- .get_temp_limit = mv88e6352_get_temp_limit,
- .set_temp_limit = mv88e6352_set_temp_limit,
- .get_temp_alarm = mv88e6352_get_temp_alarm,
+ .get_temp = mv88e6xxx_get_temp,
+ .get_temp_limit = mv88e6xxx_get_temp_limit,
+ .set_temp_limit = mv88e6xxx_set_temp_limit,
+ .get_temp_alarm = mv88e6xxx_get_temp_alarm,
#endif
.get_eeprom = mv88e6352_get_eeprom,
.set_eeprom = mv88e6352_set_eeprom,
@@ -389,10 +344,18 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
.port_join_bridge = mv88e6xxx_join_bridge,
.port_leave_bridge = mv88e6xxx_leave_bridge,
.port_stp_update = mv88e6xxx_port_stp_update,
- .fdb_add = mv88e6xxx_port_fdb_add,
- .fdb_del = mv88e6xxx_port_fdb_del,
- .fdb_getnext = mv88e6xxx_port_fdb_getnext,
+ .port_pvid_get = mv88e6xxx_port_pvid_get,
+ .port_pvid_set = mv88e6xxx_port_pvid_set,
+ .port_vlan_add = mv88e6xxx_port_vlan_add,
+ .port_vlan_del = mv88e6xxx_port_vlan_del,
+ .vlan_getnext = mv88e6xxx_vlan_getnext,
+ .port_fdb_add = mv88e6xxx_port_fdb_add,
+ .port_fdb_del = mv88e6xxx_port_fdb_del,
+ .port_fdb_getnext = mv88e6xxx_port_fdb_getnext,
};
-MODULE_ALIAS("platform:mv88e6352");
MODULE_ALIAS("platform:mv88e6172");
+MODULE_ALIAS("platform:mv88e6176");
+MODULE_ALIAS("platform:mv88e6320");
+MODULE_ALIAS("platform:mv88e6321");
+MODULE_ALIAS("platform:mv88e6352");
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index fd8547c2b79d..6f13f7206762 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -2,6 +2,9 @@
* net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
* Copyright (c) 2008 Marvell Semiconductor
*
+ * Copyright (c) 2015 CMC Electronics, Inc.
+ * Added support for VLAN Table Unit operations
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -11,6 +14,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
#include <linux/if_bridge.h>
#include <linux/jiffies.h>
#include <linux/list.h>
@@ -391,6 +395,7 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
for (i = 0; i < DSA_MAX_PORTS; i++) {
struct net_device *dev;
int uninitialized_var(port_status);
+ int pcs_ctrl;
int link;
int speed;
int duplex;
@@ -400,6 +405,10 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
if (dev == NULL)
continue;
+ pcs_ctrl = mv88e6xxx_reg_read(ds, REG_PORT(i), PORT_PCS_CTRL);
+ if (pcs_ctrl < 0 || pcs_ctrl & PORT_PCS_CTRL_FORCE_LINK)
+ continue;
+
link = 0;
if (dev->flags & IFF_UP) {
port_status = mv88e6xxx_reg_read(ds, REG_PORT(i),
@@ -517,6 +526,18 @@ static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
return false;
}
+static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ switch (ps->id) {
+ case PORT_SWITCH_ID_6320:
+ case PORT_SWITCH_ID_6321:
+ return true;
+ }
+ return false;
+}
+
static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -545,6 +566,73 @@ static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
return false;
}
+/* We expect the switch to perform auto negotiation if there is a real
+ * phy. However, in the case of a fixed link phy, we force the port
+ * settings from the fixed link settings.
+ */
+void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ u32 ret, reg;
+
+ if (!phy_is_pseudo_fixed_link(phydev))
+ return;
+
+ mutex_lock(&ps->smi_mutex);
+
+ ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
+ if (ret < 0)
+ goto out;
+
+ reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
+ PORT_PCS_CTRL_FORCE_LINK |
+ PORT_PCS_CTRL_DUPLEX_FULL |
+ PORT_PCS_CTRL_FORCE_DUPLEX |
+ PORT_PCS_CTRL_UNFORCED);
+
+ reg |= PORT_PCS_CTRL_FORCE_LINK;
+ if (phydev->link)
+ reg |= PORT_PCS_CTRL_LINK_UP;
+
+ if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100)
+ goto out;
+
+ switch (phydev->speed) {
+ case SPEED_1000:
+ reg |= PORT_PCS_CTRL_1000;
+ break;
+ case SPEED_100:
+ reg |= PORT_PCS_CTRL_100;
+ break;
+ case SPEED_10:
+ reg |= PORT_PCS_CTRL_10;
+ break;
+ default:
+ pr_info("Unknown speed");
+ goto out;
+ }
+
+ reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
+ if (phydev->duplex == DUPLEX_FULL)
+ reg |= PORT_PCS_CTRL_DUPLEX_FULL;
+
+ if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) &&
+ (port >= ps->num_ports - 2)) {
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
+ PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
+ }
+ _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg);
+
+out:
+ mutex_unlock(&ps->smi_mutex);
+}
+
/* Must be called with SMI mutex held */
static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
{
@@ -565,7 +653,7 @@ static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
{
int ret;
- if (mv88e6xxx_6352_family(ds))
+ if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
port = (port + 1) << 5;
/* Snapshot the hardware statistics counters for this port. */
@@ -796,54 +884,6 @@ void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
}
}
-#ifdef CONFIG_NET_DSA_HWMON
-
-int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
- int val;
-
- *temp = 0;
-
- mutex_lock(&ps->smi_mutex);
-
- ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
- if (ret < 0)
- goto error;
-
- /* Enable temperature sensor */
- ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
- if (ret < 0)
- goto error;
-
- ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
- if (ret < 0)
- goto error;
-
- /* Wait for temperature to stabilize */
- usleep_range(10000, 12000);
-
- val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
- if (val < 0) {
- ret = val;
- goto error;
- }
-
- /* Disable temperature sensor */
- ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
- if (ret < 0)
- goto error;
-
- *temp = ((val & 0x1f) - 5) * 5;
-
-error:
- _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
- mutex_unlock(&ps->smi_mutex);
- return ret;
-}
-#endif /* CONFIG_NET_DSA_HWMON */
-
/* Must be called with SMI lock held */
static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
u16 mask)
@@ -1000,7 +1040,7 @@ static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, int fid, u16 cmd)
{
int ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x01, fid);
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
if (ret < 0)
return ret;
@@ -1127,7 +1167,7 @@ int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
ps->bridge_mask[fid] = br_port_mask;
if (fid != ps->fid[port]) {
- ps->fid_mask |= 1 << ps->fid[port];
+ clear_bit(ps->fid[port], ps->fid_bitmap);
ps->fid[port] = fid;
ret = _mv88e6xxx_update_bridge_config(ds, fid);
}
@@ -1161,9 +1201,16 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
mutex_lock(&ps->smi_mutex);
- newfid = __ffs(ps->fid_mask);
+ newfid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, 1);
+ if (unlikely(newfid > ps->num_ports)) {
+ netdev_err(ds->ports[port], "all first %d FIDs are used\n",
+ ps->num_ports);
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
ps->fid[port] = newfid;
- ps->fid_mask &= (1 << newfid);
+ set_bit(newfid, ps->fid_bitmap);
ps->bridge_mask[fid] &= ~(1 << port);
ps->bridge_mask[newfid] = 1 << port;
@@ -1171,6 +1218,7 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
if (!ret)
ret = _mv88e6xxx_update_bridge_config(ds, newfid);
+unlock:
mutex_unlock(&ps->smi_mutex);
return ret;
@@ -1210,8 +1258,476 @@ int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
return 0;
}
-static int __mv88e6xxx_write_addr(struct dsa_switch *ds,
- const unsigned char *addr)
+int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
+{
+ int ret;
+
+ ret = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
+ if (ret < 0)
+ return ret;
+
+ *pvid = ret & PORT_DEFAULT_VLAN_MASK;
+
+ return 0;
+}
+
+int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
+{
+ return mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
+ pvid & PORT_DEFAULT_VLAN_MASK);
+}
+
+static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
+{
+ return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP,
+ GLOBAL_VTU_OP_BUSY);
+}
+
+static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op)
+{
+ int ret;
+
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op);
+ if (ret < 0)
+ return ret;
+
+ return _mv88e6xxx_vtu_wait(ds);
+}
+
+static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds)
+{
+ int ret;
+
+ ret = _mv88e6xxx_vtu_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL);
+}
+
+static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
+ struct mv88e6xxx_vtu_stu_entry *entry,
+ unsigned int nibble_offset)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ u16 regs[3];
+ int i;
+ int ret;
+
+ for (i = 0; i < 3; ++i) {
+ ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+ GLOBAL_VTU_DATA_0_3 + i);
+ if (ret < 0)
+ return ret;
+
+ regs[i] = ret;
+ }
+
+ for (i = 0; i < ps->num_ports; ++i) {
+ unsigned int shift = (i % 4) * 4 + nibble_offset;
+ u16 reg = regs[i / 4];
+
+ entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
+ }
+
+ return 0;
+}
+
+static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
+ struct mv88e6xxx_vtu_stu_entry *entry,
+ unsigned int nibble_offset)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ u16 regs[3] = { 0 };
+ int i;
+ int ret;
+
+ for (i = 0; i < ps->num_ports; ++i) {
+ unsigned int shift = (i % 4) * 4 + nibble_offset;
+ u8 data = entry->data[i];
+
+ regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
+ }
+
+ for (i = 0; i < 3; ++i) {
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL,
+ GLOBAL_VTU_DATA_0_3 + i, regs[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, u16 vid,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ struct mv88e6xxx_vtu_stu_entry next = { 0 };
+ int ret;
+
+ ret = _mv88e6xxx_vtu_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
+ vid & GLOBAL_VTU_VID_MASK);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
+ if (ret < 0)
+ return ret;
+
+ next.vid = ret & GLOBAL_VTU_VID_MASK;
+ next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
+
+ if (next.valid) {
+ ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0);
+ if (ret < 0)
+ return ret;
+
+ if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
+ mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
+ ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+ GLOBAL_VTU_FID);
+ if (ret < 0)
+ return ret;
+
+ next.fid = ret & GLOBAL_VTU_FID_MASK;
+
+ ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+ GLOBAL_VTU_SID);
+ if (ret < 0)
+ return ret;
+
+ next.sid = ret & GLOBAL_VTU_SID_MASK;
+ }
+ }
+
+ *entry = next;
+ return 0;
+}
+
+static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ u16 reg = 0;
+ int ret;
+
+ ret = _mv88e6xxx_vtu_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ if (!entry->valid)
+ goto loadpurge;
+
+ /* Write port member tags */
+ ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0);
+ if (ret < 0)
+ return ret;
+
+ if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
+ mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
+ reg = entry->sid & GLOBAL_VTU_SID_MASK;
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
+ if (ret < 0)
+ return ret;
+
+ reg = entry->fid & GLOBAL_VTU_FID_MASK;
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
+ if (ret < 0)
+ return ret;
+ }
+
+ reg = GLOBAL_VTU_VID_VALID;
+loadpurge:
+ reg |= entry->vid & GLOBAL_VTU_VID_MASK;
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
+ if (ret < 0)
+ return ret;
+
+ return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
+}
+
+static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ struct mv88e6xxx_vtu_stu_entry next = { 0 };
+ int ret;
+
+ ret = _mv88e6xxx_vtu_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID,
+ sid & GLOBAL_VTU_SID_MASK);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID);
+ if (ret < 0)
+ return ret;
+
+ next.sid = ret & GLOBAL_VTU_SID_MASK;
+
+ ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
+ if (ret < 0)
+ return ret;
+
+ next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
+
+ if (next.valid) {
+ ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2);
+ if (ret < 0)
+ return ret;
+ }
+
+ *entry = next;
+ return 0;
+}
+
+static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ u16 reg = 0;
+ int ret;
+
+ ret = _mv88e6xxx_vtu_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ if (!entry->valid)
+ goto loadpurge;
+
+ /* Write port states */
+ ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2);
+ if (ret < 0)
+ return ret;
+
+ reg = GLOBAL_VTU_VID_VALID;
+loadpurge:
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
+ if (ret < 0)
+ return ret;
+
+ reg = entry->sid & GLOBAL_VTU_SID_MASK;
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
+ if (ret < 0)
+ return ret;
+
+ return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
+}
+
+static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_vtu_stu_entry vlan = {
+ .valid = true,
+ .vid = vid,
+ };
+ int i;
+
+ /* exclude all ports except the CPU */
+ for (i = 0; i < ps->num_ports; ++i)
+ vlan.data[i] = dsa_is_cpu_port(ds, i) ?
+ GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED :
+ GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
+
+ if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
+ mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
+ struct mv88e6xxx_vtu_stu_entry vstp;
+ int err;
+
+ /* Adding a VTU entry requires a valid STU entry. As VSTP is not
+ * implemented, only one STU entry is needed to cover all VTU
+ * entries. Thus, validate the SID 0.
+ */
+ vlan.sid = 0;
+ err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp);
+ if (err)
+ return err;
+
+ if (vstp.sid != vlan.sid || !vstp.valid) {
+ memset(&vstp, 0, sizeof(vstp));
+ vstp.valid = true;
+ vstp.sid = vlan.sid;
+
+ err = _mv88e6xxx_stu_loadpurge(ds, &vstp);
+ if (err)
+ return err;
+ }
+
+ /* Non-bridged ports and bridge groups use FIDs from 1 to
+ * num_ports; VLANs use FIDs from num_ports+1 to 4095.
+ */
+ vlan.fid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID,
+ ps->num_ports + 1);
+ if (unlikely(vlan.fid == VLAN_N_VID)) {
+ pr_err("no more FID available for VLAN %d\n", vid);
+ return -ENOSPC;
+ }
+
+ err = _mv88e6xxx_flush_fid(ds, vlan.fid);
+ if (err)
+ return err;
+
+ set_bit(vlan.fid, ps->fid_bitmap);
+ }
+
+ *entry = vlan;
+ return 0;
+}
+
+int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+ bool untagged)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_vtu_stu_entry vlan;
+ int err;
+
+ mutex_lock(&ps->smi_mutex);
+ err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
+ if (err)
+ goto unlock;
+
+ if (vlan.vid != vid || !vlan.valid) {
+ err = _mv88e6xxx_vlan_init(ds, vid, &vlan);
+ if (err)
+ goto unlock;
+ }
+
+ vlan.data[port] = untagged ?
+ GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
+ GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
+
+ err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
+unlock:
+ mutex_unlock(&ps->smi_mutex);
+
+ return err;
+}
+
+int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_vtu_stu_entry vlan;
+ bool keep = false;
+ int i, err;
+
+ mutex_lock(&ps->smi_mutex);
+
+ err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
+ if (err)
+ goto unlock;
+
+ if (vlan.vid != vid || !vlan.valid ||
+ vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
+ err = -ENOENT;
+ goto unlock;
+ }
+
+ vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
+
+ /* keep the VLAN unless all ports are excluded */
+ for (i = 0; i < ps->num_ports; ++i) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+
+ if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
+ keep = true;
+ break;
+ }
+ }
+
+ vlan.valid = keep;
+ err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
+ if (err)
+ goto unlock;
+
+ if (!keep)
+ clear_bit(vlan.fid, ps->fid_bitmap);
+
+unlock:
+ mutex_unlock(&ps->smi_mutex);
+
+ return err;
+}
+
+static int _mv88e6xxx_port_vtu_getnext(struct dsa_switch *ds, int port, u16 vid,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ int err;
+
+ do {
+ if (vid == 4095)
+ return -ENOENT;
+
+ err = _mv88e6xxx_vtu_getnext(ds, vid, entry);
+ if (err)
+ return err;
+
+ if (!entry->valid)
+ return -ENOENT;
+
+ vid = entry->vid;
+ } while (entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED &&
+ entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED);
+
+ return 0;
+}
+
+int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
+ unsigned long *ports, unsigned long *untagged)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_vtu_stu_entry next;
+ int port;
+ int err;
+
+ if (*vid == 4095)
+ return -ENOENT;
+
+ mutex_lock(&ps->smi_mutex);
+ err = _mv88e6xxx_vtu_getnext(ds, *vid, &next);
+ mutex_unlock(&ps->smi_mutex);
+
+ if (err)
+ return err;
+
+ if (!next.valid)
+ return -ENOENT;
+
+ *vid = next.vid;
+
+ for (port = 0; port < ps->num_ports; ++port) {
+ clear_bit(port, ports);
+ clear_bit(port, untagged);
+
+ if (dsa_is_cpu_port(ds, port))
+ continue;
+
+ if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED ||
+ next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
+ set_bit(port, ports);
+
+ if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
+ set_bit(port, untagged);
+ }
+
+ return 0;
+}
+
+static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
+ const unsigned char *addr)
{
int i, ret;
@@ -1226,7 +1742,7 @@ static int __mv88e6xxx_write_addr(struct dsa_switch *ds,
return 0;
}
-static int __mv88e6xxx_read_addr(struct dsa_switch *ds, unsigned char *addr)
+static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
{
int i, ret;
@@ -1242,29 +1758,83 @@ static int __mv88e6xxx_read_addr(struct dsa_switch *ds, unsigned char *addr)
return 0;
}
-static int __mv88e6xxx_port_fdb_cmd(struct dsa_switch *ds, int port,
- const unsigned char *addr, int state)
+static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
+ struct mv88e6xxx_atu_entry *entry)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- u8 fid = ps->fid[port];
+ u16 reg = 0;
int ret;
ret = _mv88e6xxx_atu_wait(ds);
if (ret < 0)
return ret;
- ret = __mv88e6xxx_write_addr(ds, addr);
+ ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA,
- (0x10 << port) | state);
- if (ret)
+ if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+ unsigned int mask, shift;
+
+ if (entry->trunk) {
+ reg |= GLOBAL_ATU_DATA_TRUNK;
+ mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
+ shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
+ } else {
+ mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
+ shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
+ }
+
+ reg |= (entry->portv_trunkid << shift) & mask;
+ }
+
+ reg |= entry->state & GLOBAL_ATU_DATA_STATE_MASK;
+
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, reg);
+ if (ret < 0)
return ret;
- ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_LOAD_DB);
+ return _mv88e6xxx_atu_cmd(ds, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
+}
- return ret;
+static int _mv88e6xxx_port_vid_to_fid(struct dsa_switch *ds, int port, u16 vid)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_vtu_stu_entry vlan;
+ int err;
+
+ if (vid == 0)
+ return ps->fid[port];
+
+ err = _mv88e6xxx_port_vtu_getnext(ds, port, vid - 1, &vlan);
+ if (err)
+ return err;
+
+ if (vlan.vid == vid)
+ return vlan.fid;
+
+ return -ENOENT;
+}
+
+static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ u8 state)
+{
+ struct mv88e6xxx_atu_entry entry = { 0 };
+ int ret;
+
+ ret = _mv88e6xxx_port_vid_to_fid(ds, port, vid);
+ if (ret < 0)
+ return ret;
+
+ entry.fid = ret;
+ entry.state = state;
+ ether_addr_copy(entry.mac, addr);
+ if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+ entry.trunk = false;
+ entry.portv_trunkid = BIT(port);
+ }
+
+ return _mv88e6xxx_atu_load(ds, &entry);
}
int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
@@ -1277,7 +1847,7 @@ int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
int ret;
mutex_lock(&ps->smi_mutex);
- ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr, state);
+ ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid, state);
mutex_unlock(&ps->smi_mutex);
return ret;
@@ -1290,61 +1860,105 @@ int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
int ret;
mutex_lock(&ps->smi_mutex);
- ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr,
+ ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid,
GLOBAL_ATU_DATA_STATE_UNUSED);
mutex_unlock(&ps->smi_mutex);
return ret;
}
-static int __mv88e6xxx_port_getnext(struct dsa_switch *ds, int port,
- unsigned char *addr, bool *is_static)
+static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
+ const unsigned char *addr,
+ struct mv88e6xxx_atu_entry *entry)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- u8 fid = ps->fid[port];
- int ret, state;
+ struct mv88e6xxx_atu_entry next = { 0 };
+ int ret;
+
+ next.fid = fid;
ret = _mv88e6xxx_atu_wait(ds);
if (ret < 0)
return ret;
- ret = __mv88e6xxx_write_addr(ds, addr);
+ ret = _mv88e6xxx_atu_mac_write(ds, addr);
if (ret < 0)
return ret;
- do {
- ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
- if (ret < 0)
- return ret;
+ ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
+ if (ret < 0)
+ return ret;
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
- if (ret < 0)
- return ret;
- state = ret & GLOBAL_ATU_DATA_STATE_MASK;
- if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
- return -ENOENT;
- } while (!(((ret >> 4) & 0xff) & (1 << port)));
+ ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
+ if (ret < 0)
+ return ret;
- ret = __mv88e6xxx_read_addr(ds, addr);
+ ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
if (ret < 0)
return ret;
- *is_static = state == (is_multicast_ether_addr(addr) ?
- GLOBAL_ATU_DATA_STATE_MC_STATIC :
- GLOBAL_ATU_DATA_STATE_UC_STATIC);
+ next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
+ if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+ unsigned int mask, shift;
+
+ if (ret & GLOBAL_ATU_DATA_TRUNK) {
+ next.trunk = true;
+ mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
+ shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
+ } else {
+ next.trunk = false;
+ mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
+ shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
+ }
+
+ next.portv_trunkid = (ret & mask) >> shift;
+ }
+ *entry = next;
return 0;
}
/* get next entry for port */
int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
- unsigned char *addr, bool *is_static)
+ unsigned char *addr, u16 *vid, bool *is_static)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_atu_entry next;
+ u16 fid;
int ret;
mutex_lock(&ps->smi_mutex);
- ret = __mv88e6xxx_port_getnext(ds, port, addr, is_static);
+
+ ret = _mv88e6xxx_port_vid_to_fid(ds, port, *vid);
+ if (ret < 0)
+ goto unlock;
+ fid = ret;
+
+ do {
+ if (is_broadcast_ether_addr(addr)) {
+ struct mv88e6xxx_vtu_stu_entry vtu;
+
+ ret = _mv88e6xxx_port_vtu_getnext(ds, port, *vid, &vtu);
+ if (ret < 0)
+ goto unlock;
+
+ *vid = vtu.vid;
+ fid = vtu.fid;
+ }
+
+ ret = _mv88e6xxx_atu_getnext(ds, fid, addr, &next);
+ if (ret < 0)
+ goto unlock;
+
+ ether_addr_copy(addr, next.mac);
+
+ if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
+ continue;
+ } while (next.trunk || (next.portv_trunkid & BIT(port)) == 0);
+
+ *is_static = next.state == (is_multicast_ether_addr(addr) ?
+ GLOBAL_ATU_DATA_STATE_MC_STATIC :
+ GLOBAL_ATU_DATA_STATE_UC_STATIC);
+unlock:
mutex_unlock(&ps->smi_mutex);
return ret;
@@ -1377,7 +1991,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
- mv88e6xxx_6065_family(ds)) {
+ mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
/* MAC Forcing register: don't force link, speed,
* duplex or flow control state to any particular
* values on physical ports, but force the CPU port
@@ -1385,8 +1999,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
* full duplex.
*/
reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
- if (dsa_is_cpu_port(ds, port) ||
- ds->dsa_port_mask & (1 << port)) {
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
reg |= PORT_PCS_CTRL_FORCE_LINK |
PORT_PCS_CTRL_LINK_UP |
PORT_PCS_CTRL_DUPLEX_FULL |
@@ -1423,7 +2036,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
- mv88e6xxx_6185_family(ds))
+ mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
reg = PORT_CONTROL_IGMP_MLD_SNOOP |
PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
PORT_CONTROL_STATE_FORWARDING;
@@ -1431,7 +2044,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
reg |= PORT_CONTROL_DSA_TAG;
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6320_family(ds)) {
if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
else
@@ -1441,16 +2055,20 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
- mv88e6xxx_6185_family(ds)) {
+ mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
reg |= PORT_CONTROL_EGRESS_ADD_TAG;
}
}
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds)) {
- if (ds->dsa_port_mask & (1 << port))
+ if (dsa_is_dsa_port(ds, port)) {
+ if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
+ reg |= PORT_CONTROL_DSA_TAG;
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6320_family(ds)) {
reg |= PORT_CONTROL_FRAME_MODE_DSA;
+ }
+
if (port == dsa_upstream_port(ds))
reg |= PORT_CONTROL_FORWARD_UNKNOWN |
PORT_CONTROL_FORWARD_UNKNOWN_MC;
@@ -1462,22 +2080,20 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
goto abort;
}
- /* Port Control 2: don't force a good FCS, set the maximum
- * frame size to 10240 bytes, don't let the switch add or
- * strip 802.1q tags, don't discard tagged or untagged frames
- * on this port, do a destination address lookup on all
- * received packets as usual, disable ARP mirroring and don't
- * send a copy of all transmitted/received frames on this port
- * to the CPU.
+ /* Port Control 2: don't force a good FCS, set the maximum frame size to
+ * 10240 bytes, enable secure 802.1q tags, don't discard tagged or
+ * untagged frames on this port, do a destination address lookup on all
+ * received packets as usual, disable ARP mirroring and don't send a
+ * copy of all transmitted/received frames on this port to the CPU.
*/
reg = 0;
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6095_family(ds))
+ mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
reg = PORT_CONTROL_2_MAP_DA;
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds))
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
reg |= PORT_CONTROL_2_JUMBO_10240;
if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
@@ -1490,6 +2106,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
}
+ reg |= PORT_CONTROL_2_8021Q_FALLBACK;
+
if (reg) {
ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
PORT_CONTROL_2, reg);
@@ -1514,7 +2132,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
goto abort;
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6320_family(ds)) {
/* Do not limit the period of time that this port can
* be paused for by the remote end or the period of
* time that this port can pause the remote end.
@@ -1564,7 +2183,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
+ mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
+ mv88e6xxx_6320_family(ds)) {
/* Rate Control: disable ingress rate limiting. */
ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
PORT_RATE_CONTROL, 0x0001);
@@ -1584,9 +2204,9 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
* ports, and allow each of the 'real' ports to only talk to
* the upstream port.
*/
- fid = __ffs(ps->fid_mask);
+ fid = port + 1;
ps->fid[port] = fid;
- ps->fid_mask &= ~(1 << fid);
+ set_bit(fid, ps->fid_bitmap);
if (!dsa_is_cpu_port(ds, port))
ps->bridge_mask[fid] = 1 << port;
@@ -1683,7 +2303,7 @@ static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds,
unsigned char addr[6];
int ret, data, state;
- ret = __mv88e6xxx_write_addr(ds, bcast);
+ ret = _mv88e6xxx_atu_mac_write(ds, bcast);
if (ret < 0)
return ret;
@@ -1698,7 +2318,7 @@ static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds,
state = data & GLOBAL_ATU_DATA_STATE_MASK;
if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
break;
- ret = __mv88e6xxx_read_addr(ds, addr);
+ ret = _mv88e6xxx_atu_mac_read(ds, addr);
if (ret < 0)
return ret;
mv88e6xxx_atu_show_entry(s, dbnum, addr, data);
@@ -1885,8 +2505,6 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds)
ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
- ps->fid_mask = (1 << DSA_MAX_PORTS) - 1;
-
INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
name = kasprintf(GFP_KERNEL, "dsa%d", ds->index);
@@ -1913,6 +2531,7 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds)
int mv88e6xxx_setup_global(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
int i;
/* Set the default address aging time to 5 minutes, and
@@ -1976,7 +2595,8 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds)
(i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6320_family(ds)) {
/* Send all frames with destination addresses matching
* 01:80:c2:00:00:2x to the CPU port.
*/
@@ -1995,7 +2615,8 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds)
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
+ mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
+ mv88e6xxx_6320_family(ds)) {
/* Disable ingress rate limiting by resetting all
* ingress rate limit registers to their initial
* state.
@@ -2009,9 +2630,17 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds)
REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
/* Wait for the flush to complete. */
- _mv88e6xxx_stats_wait(ds);
+ mutex_lock(&ps->smi_mutex);
+ ret = _mv88e6xxx_stats_wait(ds);
+ if (ret < 0)
+ goto unlock;
- return 0;
+ /* Clear all the VTU and STU entries */
+ ret = _mv88e6xxx_vtu_stu_flush(ds);
+unlock:
+ mutex_unlock(&ps->smi_mutex);
+
+ return ret;
}
int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
@@ -2162,6 +2791,132 @@ mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
return ret;
}
+#ifdef CONFIG_NET_DSA_HWMON
+
+static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+ int val;
+
+ *temp = 0;
+
+ mutex_lock(&ps->smi_mutex);
+
+ ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
+ if (ret < 0)
+ goto error;
+
+ /* Enable temperature sensor */
+ ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+ if (ret < 0)
+ goto error;
+
+ ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
+ if (ret < 0)
+ goto error;
+
+ /* Wait for temperature to stabilize */
+ usleep_range(10000, 12000);
+
+ val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+ if (val < 0) {
+ ret = val;
+ goto error;
+ }
+
+ /* Disable temperature sensor */
+ ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
+ if (ret < 0)
+ goto error;
+
+ *temp = ((val & 0x1f) - 5) * 5;
+
+error:
+ _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
+ mutex_unlock(&ps->smi_mutex);
+ return ret;
+}
+
+static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
+{
+ int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+ int ret;
+
+ *temp = 0;
+
+ ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
+ if (ret < 0)
+ return ret;
+
+ *temp = (ret & 0xff) - 25;
+
+ return 0;
+}
+
+int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
+{
+ if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
+ return mv88e63xx_get_temp(ds, temp);
+
+ return mv88e61xx_get_temp(ds, temp);
+}
+
+int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
+{
+ int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+ int ret;
+
+ if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+ return -EOPNOTSUPP;
+
+ *temp = 0;
+
+ ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+ if (ret < 0)
+ return ret;
+
+ *temp = (((ret >> 8) & 0x1f) * 5) - 25;
+
+ return 0;
+}
+
+int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
+{
+ int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+ int ret;
+
+ if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+ return -EOPNOTSUPP;
+
+ ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+ if (ret < 0)
+ return ret;
+ temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
+ return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
+ (ret & 0xe0ff) | (temp << 8));
+}
+
+int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
+{
+ int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+ int ret;
+
+ if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+ return -EOPNOTSUPP;
+
+ *alarm = false;
+
+ ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+ if (ret < 0)
+ return ret;
+
+ *alarm = !!(ret & 0x40);
+
+ return 0;
+}
+#endif /* CONFIG_NET_DSA_HWMON */
+
static int __init mv88e6xxx_init(void)
{
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index a650b2656de9..9b6f3d9d5ae1 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -11,6 +11,8 @@
#ifndef __MV88E6XXX_H
#define __MV88E6XXX_H
+#include <linux/if_vlan.h>
+
#ifndef UINT64_MAX
#define UINT64_MAX (u64)(~((u64)0))
#endif
@@ -44,6 +46,8 @@
#define PORT_STATUS_TX_PAUSED BIT(5)
#define PORT_STATUS_FLOW_CTRL BIT(4)
#define PORT_PCS_CTRL 0x01
+#define PORT_PCS_CTRL_RGMII_DELAY_RXCLK BIT(15)
+#define PORT_PCS_CTRL_RGMII_DELAY_TXCLK BIT(14)
#define PORT_PCS_CTRL_FC BIT(7)
#define PORT_PCS_CTRL_FORCE_FC BIT(6)
#define PORT_PCS_CTRL_LINK_UP BIT(5)
@@ -89,7 +93,12 @@
#define PORT_SWITCH_ID_6182 0x1a60
#define PORT_SWITCH_ID_6185 0x1a70
#define PORT_SWITCH_ID_6240 0x2400
-#define PORT_SWITCH_ID_6320 0x1250
+#define PORT_SWITCH_ID_6320 0x1150
+#define PORT_SWITCH_ID_6320_A1 0x1151
+#define PORT_SWITCH_ID_6320_A2 0x1152
+#define PORT_SWITCH_ID_6321 0x3100
+#define PORT_SWITCH_ID_6321_A1 0x3101
+#define PORT_SWITCH_ID_6321_A2 0x3102
#define PORT_SWITCH_ID_6350 0x3710
#define PORT_SWITCH_ID_6351 0x3750
#define PORT_SWITCH_ID_6352 0x3520
@@ -124,6 +133,7 @@
#define PORT_CONTROL_1 0x05
#define PORT_BASE_VLAN 0x06
#define PORT_DEFAULT_VLAN 0x07
+#define PORT_DEFAULT_VLAN_MASK 0xfff
#define PORT_CONTROL_2 0x08
#define PORT_CONTROL_2_IGNORE_FCS BIT(15)
#define PORT_CONTROL_2_VTU_PRI_OVERRIDE BIT(14)
@@ -132,6 +142,11 @@
#define PORT_CONTROL_2_JUMBO_1522 (0x00 << 12)
#define PORT_CONTROL_2_JUMBO_2048 (0x01 << 12)
#define PORT_CONTROL_2_JUMBO_10240 (0x02 << 12)
+#define PORT_CONTROL_2_8021Q_MASK (0x03 << 10)
+#define PORT_CONTROL_2_8021Q_DISABLED (0x00 << 10)
+#define PORT_CONTROL_2_8021Q_FALLBACK (0x01 << 10)
+#define PORT_CONTROL_2_8021Q_CHECK (0x02 << 10)
+#define PORT_CONTROL_2_8021Q_SECURE (0x03 << 10)
#define PORT_CONTROL_2_DISCARD_TAGGED BIT(9)
#define PORT_CONTROL_2_DISCARD_UNTAGGED BIT(8)
#define PORT_CONTROL_2_MAP_DA BIT(7)
@@ -164,6 +179,11 @@
#define GLOBAL_MAC_01 0x01
#define GLOBAL_MAC_23 0x02
#define GLOBAL_MAC_45 0x03
+#define GLOBAL_ATU_FID 0x01 /* 6097 6165 6351 6352 */
+#define GLOBAL_VTU_FID 0x02 /* 6097 6165 6351 6352 */
+#define GLOBAL_VTU_FID_MASK 0xfff
+#define GLOBAL_VTU_SID 0x03 /* 6097 6165 6351 6352 */
+#define GLOBAL_VTU_SID_MASK 0x3f
#define GLOBAL_CONTROL 0x04
#define GLOBAL_CONTROL_SW_RESET BIT(15)
#define GLOBAL_CONTROL_PPU_ENABLE BIT(14)
@@ -180,10 +200,27 @@
#define GLOBAL_CONTROL_TCAM_EN BIT(1)
#define GLOBAL_CONTROL_EEPROM_DONE_EN BIT(0)
#define GLOBAL_VTU_OP 0x05
+#define GLOBAL_VTU_OP_BUSY BIT(15)
+#define GLOBAL_VTU_OP_FLUSH_ALL ((0x01 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_OP_VTU_LOAD_PURGE ((0x03 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_OP_VTU_GET_NEXT ((0x04 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_OP_STU_LOAD_PURGE ((0x05 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_OP_STU_GET_NEXT ((0x06 << 12) | GLOBAL_VTU_OP_BUSY)
#define GLOBAL_VTU_VID 0x06
+#define GLOBAL_VTU_VID_MASK 0xfff
+#define GLOBAL_VTU_VID_VALID BIT(12)
#define GLOBAL_VTU_DATA_0_3 0x07
#define GLOBAL_VTU_DATA_4_7 0x08
#define GLOBAL_VTU_DATA_8_11 0x09
+#define GLOBAL_VTU_STU_DATA_MASK 0x03
+#define GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED 0x00
+#define GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED 0x01
+#define GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED 0x02
+#define GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER 0x03
+#define GLOBAL_STU_DATA_PORT_STATE_DISABLED 0x00
+#define GLOBAL_STU_DATA_PORT_STATE_BLOCKING 0x01
+#define GLOBAL_STU_DATA_PORT_STATE_LEARNING 0x02
+#define GLOBAL_STU_DATA_PORT_STATE_FORWARDING 0x03
#define GLOBAL_ATU_CONTROL 0x0a
#define GLOBAL_ATU_CONTROL_LEARN2ALL BIT(3)
#define GLOBAL_ATU_OP 0x0b
@@ -198,6 +235,8 @@
#define GLOBAL_ATU_OP_GET_CLR_VIOLATION ((7 << 12) | GLOBAL_ATU_OP_BUSY)
#define GLOBAL_ATU_DATA 0x0c
#define GLOBAL_ATU_DATA_TRUNK BIT(15)
+#define GLOBAL_ATU_DATA_TRUNK_ID_MASK 0x00f0
+#define GLOBAL_ATU_DATA_TRUNK_ID_SHIFT 4
#define GLOBAL_ATU_DATA_PORT_VECTOR_MASK 0x3ff0
#define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT 4
#define GLOBAL_ATU_DATA_STATE_MASK 0x0f
@@ -280,8 +319,12 @@
#define GLOBAL2_PRIO_OVERRIDE_FORCE_ARP BIT(3)
#define GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT 0
#define GLOBAL2_EEPROM_OP 0x14
-#define GLOBAL2_EEPROM_OP_BUSY BIT(15)
-#define GLOBAL2_EEPROM_OP_LOAD BIT(11)
+#define GLOBAL2_EEPROM_OP_BUSY BIT(15)
+#define GLOBAL2_EEPROM_OP_WRITE ((3 << 12) | GLOBAL2_EEPROM_OP_BUSY)
+#define GLOBAL2_EEPROM_OP_READ ((4 << 12) | GLOBAL2_EEPROM_OP_BUSY)
+#define GLOBAL2_EEPROM_OP_LOAD BIT(11)
+#define GLOBAL2_EEPROM_OP_WRITE_EN BIT(10)
+#define GLOBAL2_EEPROM_OP_ADDR_MASK 0xff
#define GLOBAL2_EEPROM_DATA 0x15
#define GLOBAL2_PTP_AVB_OP 0x16
#define GLOBAL2_PTP_AVB_DATA 0x17
@@ -304,6 +347,25 @@
#define GLOBAL2_QOS_WEIGHT 0x1c
#define GLOBAL2_MISC 0x1d
+struct mv88e6xxx_atu_entry {
+ u16 fid;
+ u8 state;
+ bool trunk;
+ u16 portv_trunkid;
+ u8 mac[ETH_ALEN];
+};
+
+struct mv88e6xxx_vtu_stu_entry {
+ /* VTU only */
+ u16 vid;
+ u16 fid;
+
+ /* VTU and STU */
+ u8 sid;
+ bool valid;
+ u8 data[DSA_MAX_PORTS];
+};
+
struct mv88e6xxx_priv_state {
/* When using multi-chip addressing, this mutex protects
* access to the indirect access registers. (In single-chip
@@ -342,9 +404,9 @@ struct mv88e6xxx_priv_state {
/* hw bridging */
- u32 fid_mask;
- u8 fid[DSA_MAX_PORTS];
- u16 bridge_mask[DSA_MAX_PORTS];
+ DECLARE_BITMAP(fid_bitmap, VLAN_N_VID); /* FIDs 1 to 4095 available */
+ u16 fid[DSA_MAX_PORTS]; /* per (non-bridged) port FID */
+ u16 bridge_mask[DSA_MAX_PORTS]; /* br groups (indexed by FID) */
unsigned long port_state_update_mask;
u8 port_state[DSA_MAX_PORTS];
@@ -386,10 +448,15 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data);
int mv88e6xxx_get_sset_count(struct dsa_switch *ds);
int mv88e6xxx_get_sset_count_basic(struct dsa_switch *ds);
+void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev);
int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
struct ethtool_regs *regs, void *_p);
-int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp);
+int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm);
int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds);
int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds);
int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum);
@@ -401,15 +468,23 @@ int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask);
int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask);
int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state);
+int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *vid);
+int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 vid);
+int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+ bool untagged);
+int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid);
+int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
+ unsigned long *ports, unsigned long *untagged);
int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid);
int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid);
int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
- unsigned char *addr, bool *is_static);
+ unsigned char *addr, u16 *vid, bool *is_static);
int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg);
int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
int reg, int val);
+
extern struct dsa_switch_driver mv88e6131_switch_driver;
extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
extern struct dsa_switch_driver mv88e6352_switch_driver;
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 49adbf1b7574..815eb94990f5 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -144,10 +144,9 @@ static void dummy_setup(struct net_device *dev)
dev->destructor = free_netdev;
/* Fill in device structure with ethernet-generic values. */
- dev->tx_queue_len = 0;
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
- dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 41095ebad97f..2839af00f20c 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1726,6 +1726,7 @@ vortex_up(struct net_device *dev)
if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
iowrite32(0x8000, vp->cb_fn_base + 4);
netif_start_queue (dev);
+ netdev_reset_queue(dev);
err_out:
return err;
}
@@ -1763,16 +1764,9 @@ vortex_open(struct net_device *dev)
vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
}
if (i != RX_RING_SIZE) {
- int j;
pr_emerg("%s: no memory for rx ring\n", dev->name);
- for (j = 0; j < i; j++) {
- if (vp->rx_skbuff[j]) {
- dev_kfree_skb(vp->rx_skbuff[j]);
- vp->rx_skbuff[j] = NULL;
- }
- }
retval = -ENOMEM;
- goto err_free_irq;
+ goto err_free_skb;
}
/* Wrap the ring. */
vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
@@ -1782,7 +1776,13 @@ vortex_open(struct net_device *dev)
if (!retval)
goto out;
-err_free_irq:
+err_free_skb:
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (vp->rx_skbuff[i]) {
+ dev_kfree_skb(vp->rx_skbuff[i]);
+ vp->rx_skbuff[i] = NULL;
+ }
+ }
free_irq(dev->irq, dev);
err:
if (vortex_debug > 1)
@@ -1936,16 +1936,18 @@ static void vortex_tx_timeout(struct net_device *dev)
if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0)
iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
ioaddr + DownListPtr);
- if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE)
+ if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) {
netif_wake_queue (dev);
+ netdev_reset_queue (dev);
+ }
if (vp->drv_flags & IS_BOOMERANG)
iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
iowrite16(DownUnstall, ioaddr + EL3_CMD);
} else {
dev->stats.tx_dropped++;
netif_wake_queue(dev);
+ netdev_reset_queue(dev);
}
-
/* Issue Tx Enable */
iowrite16(TxEnable, ioaddr + EL3_CMD);
dev->trans_start = jiffies; /* prevent tx timeout */
@@ -2064,6 +2066,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
+ int skblen = skb->len;
/* Put out the doubleword header... */
iowrite32(skb->len, ioaddr + TX_FIFO);
@@ -2095,6 +2098,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+ netdev_sent_queue(dev, skblen);
/* Clear the Tx status stack. */
{
@@ -2126,6 +2130,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
void __iomem *ioaddr = vp->ioaddr;
/* Calculate the next Tx descriptor entry. */
int entry = vp->cur_tx % TX_RING_SIZE;
+ int skblen = skb->len;
struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
unsigned long flags;
dma_addr_t dma_addr;
@@ -2231,6 +2236,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
vp->cur_tx++;
+ netdev_sent_queue(dev, skblen);
+
if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
netif_stop_queue (dev);
} else { /* Clear previous interrupt enable. */
@@ -2268,6 +2275,7 @@ vortex_interrupt(int irq, void *dev_id)
int status;
int work_done = max_interrupt_work;
int handled = 0;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
ioaddr = vp->ioaddr;
spin_lock(&vp->lock);
@@ -2315,6 +2323,8 @@ vortex_interrupt(int irq, void *dev_id)
if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
+ pkts_compl++;
+ bytes_compl += vp->tx_skb->len;
dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
if (ioread16(ioaddr + TxFree) > 1536) {
/*
@@ -2359,6 +2369,7 @@ vortex_interrupt(int irq, void *dev_id)
iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
} while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
spin_unlock(&vp->window_lock);
if (vortex_debug > 4)
@@ -2382,6 +2393,8 @@ boomerang_interrupt(int irq, void *dev_id)
void __iomem *ioaddr;
int status;
int work_done = max_interrupt_work;
+ int handled = 0;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
ioaddr = vp->ioaddr;
@@ -2400,6 +2413,7 @@ boomerang_interrupt(int irq, void *dev_id)
if ((status & IntLatch) == 0)
goto handler_exit; /* No interrupt: shared IRQs can cause this */
+ handled = 1;
if (status == 0xffff) { /* h/w no longer present (hotplug)? */
if (vortex_debug > 1)
@@ -2454,6 +2468,8 @@ boomerang_interrupt(int irq, void *dev_id)
pci_unmap_single(VORTEX_PCI(vp),
le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
#endif
+ pkts_compl++;
+ bytes_compl += skb->len;
dev_kfree_skb_irq(skb);
vp->tx_skbuff[entry] = NULL;
} else {
@@ -2494,6 +2510,7 @@ boomerang_interrupt(int irq, void *dev_id)
iowrite32(0x8000, vp->cb_fn_base + 4);
} while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch);
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
if (vortex_debug > 4)
pr_debug("%s: exiting interrupt, status %4.4x.\n",
@@ -2501,7 +2518,7 @@ boomerang_interrupt(int irq, void *dev_id)
handler_exit:
vp->handling_irq = 0;
spin_unlock(&vp->lock);
- return IRQ_HANDLED;
+ return IRQ_RETVAL(handled);
}
static int vortex_rx(struct net_device *dev)
@@ -2695,7 +2712,8 @@ vortex_down(struct net_device *dev, int final_down)
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
- netif_stop_queue (dev);
+ netdev_reset_queue(dev);
+ netif_stop_queue(dev);
del_timer_sync(&vp->rx_oom_timer);
del_timer_sync(&vp->timer);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index f3bb1784066b..05aa7597dab9 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -167,6 +167,7 @@ source "drivers/net/ethernet/sgi/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
source "drivers/net/ethernet/sun/Kconfig"
+source "drivers/net/ethernet/synopsys/Kconfig"
source "drivers/net/ethernet/tehuti/Kconfig"
source "drivers/net/ethernet/ti/Kconfig"
source "drivers/net/ethernet/tile/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index c51014b0464f..ddfc808110a1 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -65,7 +65,7 @@ obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/
obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/
obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
-obj-$(CONFIG_SH_ETH) += renesas/
+obj-$(CONFIG_NET_VENDOR_RENESAS) += renesas/
obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/
obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
@@ -77,6 +77,7 @@ obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
obj-$(CONFIG_NET_VENDOR_SUN) += sun/
+obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
obj-$(CONFIG_NET_VENDOR_TI) += ti/
obj-$(CONFIG_TILE_NET) += tile/
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index bab01c849165..48ce83e443c2 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -28,6 +28,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
+#include <linux/soc/sunxi/sunxi_sram.h>
#include "sun4i-emac.h"
@@ -857,11 +858,17 @@ static int emac_probe(struct platform_device *pdev)
clk_prepare_enable(db->clk);
+ ret = sunxi_sram_claim(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Error couldn't map SRAM to device\n");
+ goto out;
+ }
+
db->phy_node = of_parse_phandle(np, "phy", 0);
if (!db->phy_node) {
dev_err(&pdev->dev, "no associated PHY\n");
ret = -ENODEV;
- goto out;
+ goto out_release_sram;
}
/* Read MAC-address from DT */
@@ -893,7 +900,7 @@ static int emac_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Registering netdev failed!\n");
ret = -ENODEV;
- goto out;
+ goto out_release_sram;
}
dev_info(&pdev->dev, "%s: at %p, IRQ %d MAC: %pM\n",
@@ -901,6 +908,8 @@ static int emac_probe(struct platform_device *pdev)
return 0;
+out_release_sram:
+ sunxi_sram_release(&pdev->dev);
out:
dev_err(db->dev, "not found (%d).\n", ret);
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 580553d42d34..88ef67a998b4 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -71,8 +71,6 @@ int sgdma_initialize(struct altera_tse_private *priv)
SGDMA_CTRLREG_INTEN |
SGDMA_CTRLREG_ILASTD;
- priv->sgdmadesclen = sizeof(struct sgdma_descrip);
-
INIT_LIST_HEAD(&priv->txlisthd);
INIT_LIST_HEAD(&priv->rxlisthd);
@@ -254,7 +252,7 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
unsigned int pktstatus = 0;
dma_sync_single_for_cpu(priv->device,
priv->rxdescphys,
- priv->sgdmadesclen,
+ SGDMA_DESC_LEN,
DMA_FROM_DEVICE);
pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
@@ -374,7 +372,7 @@ static int sgdma_async_read(struct altera_tse_private *priv)
dma_sync_single_for_device(priv->device,
priv->rxdescphys,
- priv->sgdmadesclen,
+ SGDMA_DESC_LEN,
DMA_TO_DEVICE);
csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
@@ -402,7 +400,7 @@ static int sgdma_async_write(struct altera_tse_private *priv,
csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
dma_sync_single_for_device(priv->device, priv->txdescphys,
- priv->sgdmadesclen, DMA_TO_DEVICE);
+ SGDMA_DESC_LEN, DMA_TO_DEVICE);
csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
priv->tx_dma_csr,
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
index 85bc33b218d9..bbd52f02330b 100644
--- a/drivers/net/ethernet/altera/altera_sgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_sgdmahw.h
@@ -50,6 +50,7 @@ struct sgdma_descrip {
u8 control;
} __packed;
+#define SGDMA_DESC_LEN sizeof(struct sgdma_descrip)
#define SGDMA_STATUS_ERR BIT(0)
#define SGDMA_STATUS_LENGTH_ERR BIT(1)
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 2adb24d4523c..103c30ddddf7 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -458,7 +458,6 @@ struct altera_tse_private {
u32 rxctrlreg;
dma_addr_t rxdescphys;
dma_addr_t txdescphys;
- size_t sgdmadesclen;
struct list_head txlisthd;
struct list_head rxlisthd;
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index da48e66377b5..fe644823ceaf 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -511,8 +511,7 @@ static int tse_poll(struct napi_struct *napi, int budget)
if (rxcomplete < budget) {
- napi_gro_flush(napi, false);
- __napi_complete(napi);
+ napi_complete(napi);
netdev_dbg(priv->dev,
"NAPI Complete, did %d packets with budget %d\n",
@@ -1518,6 +1517,7 @@ static int altera_tse_probe(struct platform_device *pdev)
spin_lock_init(&priv->tx_lock);
spin_lock_init(&priv->rxdma_irq_lock);
+ netif_carrier_off(ndev);
ret = register_netdev(ndev);
if (ret) {
dev_err(&pdev->dev, "failed to register TSE net device\n");
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 661cdaa7ea96..b3bc87fe3764 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -303,7 +303,8 @@ static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
get_page(pa->pages);
bd->pa = *pa;
- bd->dma = pa->pages_dma + pa->pages_offset;
+ bd->dma_base = pa->pages_dma;
+ bd->dma_off = pa->pages_offset;
bd->dma_len = len;
pa->pages_offset += len;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 506e832c9e9a..a4473d8ff4fa 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1110,6 +1110,7 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
unsigned int rx_usecs = pdata->rx_usecs;
unsigned int rx_frames = pdata->rx_frames;
unsigned int inte;
+ dma_addr_t hdr_dma, buf_dma;
if (!rx_usecs && !rx_frames) {
/* No coalescing, interrupt for every descriptor */
@@ -1129,10 +1130,12 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
* Set buffer 2 (hi) address to buffer dma address (hi) and
* set control bits OWN and INTE
*/
- rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma));
- rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma));
- rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma));
+ hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
+ buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
+ rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
+ rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 1e9c28d19ef8..aae9d5ecd182 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1765,8 +1765,9 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
/* Start with the header buffer which may contain just the header
* or the header plus data
*/
- dma_sync_single_for_cpu(pdata->dev, rdata->rx.hdr.dma,
- rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
+ dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
+ rdata->rx.hdr.dma_off,
+ rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
packet = page_address(rdata->rx.hdr.pa.pages) +
rdata->rx.hdr.pa.pages_offset;
@@ -1778,8 +1779,11 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
len -= copy_len;
if (len) {
/* Add the remaining data as a frag */
- dma_sync_single_for_cpu(pdata->dev, rdata->rx.buf.dma,
- rdata->rx.buf.dma_len, DMA_FROM_DEVICE);
+ dma_sync_single_range_for_cpu(pdata->dev,
+ rdata->rx.buf.dma_base,
+ rdata->rx.buf.dma_off,
+ rdata->rx.buf.dma_len,
+ DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rdata->rx.buf.pa.pages,
@@ -1945,8 +1949,9 @@ read_again:
if (!skb)
error = 1;
} else if (rdesc_len) {
- dma_sync_single_for_cpu(pdata->dev,
- rdata->rx.buf.dma,
+ dma_sync_single_range_for_cpu(pdata->dev,
+ rdata->rx.buf.dma_base,
+ rdata->rx.buf.dma_off,
rdata->rx.buf.dma_len,
DMA_FROM_DEVICE);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 63d72a140053..8c9d01ef730d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -140,7 +140,7 @@
#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
-/* Descriptors required for maximum contigous TSO/GSO packet */
+/* Descriptors required for maximum contiguous TSO/GSO packet */
#define XGBE_TX_MAX_SPLIT ((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1)
/* Maximum possible descriptors needed for an SKB:
@@ -337,7 +337,8 @@ struct xgbe_buffer_data {
struct xgbe_page_alloc pa;
struct xgbe_page_alloc pa_unmap;
- dma_addr_t dma;
+ dma_addr_t dma_base;
+ unsigned long dma_off;
unsigned int dma_len;
};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index a626c4315a89..cfa37041ab71 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -801,6 +801,9 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
{
+ if (pdata->phy_dev)
+ phy_disconnect(pdata->phy_dev);
+
mdiobus_unregister(pdata->mdio_bus);
mdiobus_free(pdata->mdio_bus);
pdata->mdio_bus = NULL;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 541bed056012..ff05bbcff26d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -193,12 +193,16 @@ enum xgene_enet_rm {
#define USERINFO_LEN 32
#define FPQNUM_POS 32
#define FPQNUM_LEN 12
+#define NV_POS 50
+#define NV_LEN 1
+#define LL_POS 51
+#define LL_LEN 1
#define LERR_POS 60
#define LERR_LEN 3
#define STASH_POS 52
#define STASH_LEN 2
#define BUFDATALEN_POS 48
-#define BUFDATALEN_LEN 12
+#define BUFDATALEN_LEN 15
#define DATAADDR_POS 0
#define DATAADDR_LEN 42
#define COHERENT_POS 63
@@ -215,9 +219,19 @@ enum xgene_enet_rm {
#define IPHDR_LEN 6
#define EC_POS 22 /* Enable checksum */
#define EC_LEN 1
+#define ET_POS 23 /* Enable TSO */
#define IS_POS 24 /* IP protocol select */
#define IS_LEN 1
#define TYPE_ETH_WORK_MESSAGE_POS 44
+#define LL_BYTES_MSB_POS 56
+#define LL_BYTES_MSB_LEN 8
+#define LL_BYTES_LSB_POS 48
+#define LL_BYTES_LSB_LEN 12
+#define LL_LEN_POS 48
+#define LL_LEN_LEN 8
+#define DATALEN_MASK GENMASK(11, 0)
+
+#define LAST_BUFFER (0x7800ULL << BUFDATALEN_POS)
struct xgene_enet_raw_desc {
__le64 m0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 299eb4315fe6..e47298faf78d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -147,18 +147,27 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
{
struct sk_buff *skb;
struct device *dev;
+ skb_frag_t *frag;
+ dma_addr_t *frag_dma_addr;
u16 skb_index;
u8 status;
- int ret = 0;
+ int i, ret = 0;
skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
skb = cp_ring->cp_skb[skb_index];
+ frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
dev = ndev_to_dev(cp_ring->ndev);
dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
- GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)),
+ skb_headlen(skb),
DMA_TO_DEVICE);
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ }
+
/* Checking for error */
status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
if (unlikely(status > 2)) {
@@ -179,12 +188,16 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
static u64 xgene_enet_work_msg(struct sk_buff *skb)
{
+ struct net_device *ndev = skb->dev;
+ struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct iphdr *iph;
- u8 l3hlen, l4hlen = 0;
- u8 csum_enable = 0;
- u8 proto = 0;
- u8 ethhdr;
- u64 hopinfo;
+ u8 l3hlen = 0, l4hlen = 0;
+ u8 ethhdr, proto = 0, csum_enable = 0;
+ u64 hopinfo = 0;
+ u32 hdr_len, mss = 0;
+ u32 i, len, nr_frags;
+
+ ethhdr = xgene_enet_hdr_len(skb->data);
if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
unlikely(skb->protocol != htons(ETH_P_8021Q)))
@@ -201,14 +214,40 @@ static u64 xgene_enet_work_msg(struct sk_buff *skb)
l4hlen = tcp_hdrlen(skb) >> 2;
csum_enable = 1;
proto = TSO_IPPROTO_TCP;
+ if (ndev->features & NETIF_F_TSO) {
+ hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
+ mss = skb_shinfo(skb)->gso_size;
+
+ if (skb_is_nonlinear(skb)) {
+ len = skb_headlen(skb);
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ for (i = 0; i < 2 && i < nr_frags; i++)
+ len += skb_shinfo(skb)->frags[i].size;
+
+ /* HW requires header must reside in 3 buffer */
+ if (unlikely(hdr_len > len)) {
+ if (skb_linearize(skb))
+ return 0;
+ }
+ }
+
+ if (!mss || ((skb->len - hdr_len) <= mss))
+ goto out;
+
+ if (mss != pdata->mss) {
+ pdata->mss = mss;
+ pdata->mac_ops->set_mss(pdata);
+ }
+ hopinfo |= SET_BIT(ET);
+ }
} else if (iph->protocol == IPPROTO_UDP) {
l4hlen = UDP_HDR_SIZE;
csum_enable = 1;
}
out:
l3hlen = ip_hdrlen(skb) >> 2;
- ethhdr = xgene_enet_hdr_len(skb->data);
- hopinfo = SET_VAL(TCPHDR, l4hlen) |
+ hopinfo |= SET_VAL(TCPHDR, l4hlen) |
SET_VAL(IPHDR, l3hlen) |
SET_VAL(ETHHDR, ethhdr) |
SET_VAL(EC, csum_enable) |
@@ -219,35 +258,170 @@ out:
return hopinfo;
}
+static u16 xgene_enet_encode_len(u16 len)
+{
+ return (len == BUFLEN_16K) ? 0 : len;
+}
+
+static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
+{
+ desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
+ SET_VAL(BUFDATALEN, len));
+}
+
+static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
+{
+ __le64 *exp_bufs;
+
+ exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
+ memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
+ ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
+
+ return exp_bufs;
+}
+
+static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
+{
+ return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
+}
+
static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
struct sk_buff *skb)
{
struct device *dev = ndev_to_dev(tx_ring->ndev);
struct xgene_enet_raw_desc *raw_desc;
- dma_addr_t dma_addr;
+ __le64 *exp_desc = NULL, *exp_bufs = NULL;
+ dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
+ skb_frag_t *frag;
u16 tail = tx_ring->tail;
u64 hopinfo;
+ u32 len, hw_len;
+ u8 ll = 0, nv = 0, idx = 0;
+ bool split = false;
+ u32 size, offset, ell_bytes = 0;
+ u32 i, fidx, nr_frags, count = 1;
raw_desc = &tx_ring->raw_desc[tail];
+ tail = (tail + 1) & (tx_ring->slots - 1);
memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
- dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ hopinfo = xgene_enet_work_msg(skb);
+ if (!hopinfo)
+ return -EINVAL;
+ raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
+ hopinfo);
+
+ len = skb_headlen(skb);
+ hw_len = xgene_enet_encode_len(len);
+
+ dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
netdev_err(tx_ring->ndev, "DMA mapping error\n");
return -EINVAL;
}
/* Hardware expects descriptor in little endian format */
- raw_desc->m0 = cpu_to_le64(tail);
raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
- SET_VAL(BUFDATALEN, skb->len) |
+ SET_VAL(BUFDATALEN, hw_len) |
SET_BIT(COHERENT));
- hopinfo = xgene_enet_work_msg(skb);
- raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
- hopinfo);
- tx_ring->cp_ring->cp_skb[tail] = skb;
- return 0;
+ if (!skb_is_nonlinear(skb))
+ goto out;
+
+ /* scatter gather */
+ nv = 1;
+ exp_desc = (void *)&tx_ring->raw_desc[tail];
+ tail = (tail + 1) & (tx_ring->slots - 1);
+ memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ for (i = nr_frags; i < 4 ; i++)
+ exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
+
+ frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
+
+ for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
+ if (!split) {
+ frag = &skb_shinfo(skb)->frags[fidx];
+ size = skb_frag_size(frag);
+ offset = 0;
+
+ pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pbuf_addr))
+ return -EINVAL;
+
+ frag_dma_addr[fidx] = pbuf_addr;
+ fidx++;
+
+ if (size > BUFLEN_16K)
+ split = true;
+ }
+
+ if (size > BUFLEN_16K) {
+ len = BUFLEN_16K;
+ size -= BUFLEN_16K;
+ } else {
+ len = size;
+ split = false;
+ }
+
+ dma_addr = pbuf_addr + offset;
+ hw_len = xgene_enet_encode_len(len);
+
+ switch (i) {
+ case 0:
+ case 1:
+ case 2:
+ xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
+ break;
+ case 3:
+ if (split || (fidx != nr_frags)) {
+ exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
+ xgene_set_addr_len(exp_bufs, idx, dma_addr,
+ hw_len);
+ idx++;
+ ell_bytes += len;
+ } else {
+ xgene_set_addr_len(exp_desc, i, dma_addr,
+ hw_len);
+ }
+ break;
+ default:
+ xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
+ idx++;
+ ell_bytes += len;
+ break;
+ }
+
+ if (split)
+ offset += BUFLEN_16K;
+ }
+ count++;
+
+ if (idx) {
+ ll = 1;
+ dma_addr = dma_map_single(dev, exp_bufs,
+ sizeof(u64) * MAX_EXP_BUFFS,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+ i = ell_bytes >> LL_BYTES_LSB_LEN;
+ exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
+ SET_VAL(LL_BYTES_MSB, i) |
+ SET_VAL(LL_LEN, idx));
+ raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
+ }
+
+out:
+ raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
+ SET_VAL(USERINFO, tx_ring->tail));
+ tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
+ tx_ring->tail = tail;
+
+ return count;
}
static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
@@ -257,6 +431,7 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
u32 tx_level, cq_level;
+ int count;
tx_level = pdata->ring_ops->len(tx_ring);
cq_level = pdata->ring_ops->len(cp_ring);
@@ -266,14 +441,17 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (xgene_enet_setup_tx_desc(tx_ring, skb)) {
+ if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
+ return NETDEV_TX_OK;
+
+ count = xgene_enet_setup_tx_desc(tx_ring, skb);
+ if (count <= 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
- pdata->ring_ops->wr_cmd(tx_ring, 1);
+ pdata->ring_ops->wr_cmd(tx_ring, count);
skb_tx_timestamp(skb);
- tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1);
pdata->stats.tx_packets++;
pdata->stats.tx_bytes += skb->len;
@@ -326,7 +504,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
/* strip off CRC as HW isn't doing this */
datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
- datalen -= 4;
+ datalen = (datalen & DATALEN_MASK) - 4;
prefetch(skb->data - NET_IP_ALIGN);
skb_put(skb, datalen);
@@ -358,26 +536,41 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
int budget)
{
struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
- struct xgene_enet_raw_desc *raw_desc;
+ struct xgene_enet_raw_desc *raw_desc, *exp_desc;
u16 head = ring->head;
u16 slots = ring->slots - 1;
- int ret, count = 0;
+ int ret, count = 0, processed = 0;
do {
raw_desc = &ring->raw_desc[head];
+ exp_desc = NULL;
if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
break;
/* read fpqnum field after dataaddr field */
dma_rmb();
+ if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
+ head = (head + 1) & slots;
+ exp_desc = &ring->raw_desc[head];
+
+ if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
+ head = (head - 1) & slots;
+ break;
+ }
+ dma_rmb();
+ count++;
+ }
if (is_rx_desc(raw_desc))
ret = xgene_enet_rx_frame(ring, raw_desc);
else
ret = xgene_enet_tx_completion(ring, raw_desc);
xgene_enet_mark_desc_slot_empty(raw_desc);
+ if (exp_desc)
+ xgene_enet_mark_desc_slot_empty(exp_desc);
head = (head + 1) & slots;
count++;
+ processed++;
if (ret)
break;
@@ -393,7 +586,7 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
}
}
- return count;
+ return processed;
}
static int xgene_enet_napi(struct napi_struct *napi, const int budget)
@@ -738,12 +931,13 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
struct xgene_enet_desc_ring *buf_pool = NULL;
enum xgene_ring_owner owner;
+ dma_addr_t dma_exp_bufs;
u8 cpu_bufnum = pdata->cpu_bufnum;
u8 eth_bufnum = pdata->eth_bufnum;
u8 bp_bufnum = pdata->bp_bufnum;
u16 ring_num = pdata->ring_num;
u16 ring_id;
- int ret;
+ int ret, size;
/* allocate rx descriptor ring */
owner = xgene_derive_ring_owner(pdata);
@@ -794,6 +988,15 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
ret = -ENOMEM;
goto err;
}
+
+ size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
+ tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, &dma_exp_bufs,
+ GFP_KERNEL);
+ if (!tx_ring->exp_bufs) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
pdata->tx_ring = tx_ring;
if (!pdata->cq_cnt) {
@@ -818,6 +1021,16 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
ret = -ENOMEM;
goto err;
}
+
+ size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
+ cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
+ size, GFP_KERNEL);
+ if (!cp_ring->frag_dma_addr) {
+ devm_kfree(dev, cp_ring->cp_skb);
+ ret = -ENOMEM;
+ goto err;
+ }
+
pdata->tx_ring->cp_ring = cp_ring;
pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
@@ -905,40 +1118,6 @@ static int xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pda
return ret;
}
-static int xgene_get_mac_address(struct device *dev,
- unsigned char *addr)
-{
- int ret;
-
- ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6);
- if (ret)
- ret = device_property_read_u8_array(dev, "mac-address",
- addr, 6);
- if (ret)
- return -ENODEV;
-
- return ETH_ALEN;
-}
-
-static int xgene_get_phy_mode(struct device *dev)
-{
- int i, ret;
- char *modestr;
-
- ret = device_property_read_string(dev, "phy-connection-type",
- (const char **)&modestr);
- if (ret)
- ret = device_property_read_string(dev, "phy-mode",
- (const char **)&modestr);
- if (ret)
- return -ENODEV;
-
- for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
- if (!strcasecmp(modestr, phy_modes(i)))
- return i;
- }
- return -ENODEV;
-}
static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
{
@@ -998,12 +1177,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
if (ret)
return ret;
- if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN)
+ if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
eth_hw_addr_random(ndev);
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
- pdata->phy_mode = xgene_get_phy_mode(dev);
+ pdata->phy_mode = device_get_phy_mode(dev);
if (pdata->phy_mode < 0) {
dev_err(dev, "Unable to get phy-connection-type\n");
return pdata->phy_mode;
@@ -1207,7 +1386,8 @@ static int xgene_enet_probe(struct platform_device *pdev)
xgene_enet_set_ethtool_ops(ndev);
ndev->features |= NETIF_F_IP_CSUM |
NETIF_F_GSO |
- NETIF_F_GRO;
+ NETIF_F_GRO |
+ NETIF_F_SG;
of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
if (of_id) {
@@ -1233,6 +1413,12 @@ static int xgene_enet_probe(struct platform_device *pdev)
xgene_enet_setup_ops(pdata);
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+ ndev->features |= NETIF_F_TSO;
+ pdata->mss = XGENE_ENET_MSS;
+ }
+ ndev->hw_features = ndev->features;
+
ret = register_netdev(ndev);
if (ret) {
netdev_err(ndev, "Failed to register netdev\n");
@@ -1277,9 +1463,10 @@ static int xgene_enet_remove(struct platform_device *pdev)
mac_ops->tx_disable(pdata);
xgene_enet_napi_del(pdata);
- xgene_enet_mdio_remove(pdata);
- xgene_enet_delete_desc_rings(pdata);
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ xgene_enet_mdio_remove(pdata);
unregister_netdev(ndev);
+ xgene_enet_delete_desc_rings(pdata);
pdata->port_ops->shutdown(pdata);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 1c85fc87703a..50f92c39ed2a 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -40,8 +40,12 @@
#define XGENE_DRV_VERSION "v1.0"
#define XGENE_ENET_MAX_MTU 1536
#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN)
+#define BUFLEN_16K (16 * 1024)
#define NUM_PKT_BUF 64
#define NUM_BUFPOOL 32
+#define MAX_EXP_BUFFS 256
+#define XGENE_ENET_MSS 1448
+#define XGENE_MIN_ENET_FRAME_SIZE 60
#define START_CPU_BUFNUM_0 0
#define START_ETH_BUFNUM_0 2
@@ -79,6 +83,7 @@ struct xgene_enet_desc_ring {
u16 num;
u16 head;
u16 tail;
+ u16 exp_buf_tail;
u16 slots;
u16 irq;
char irq_name[IRQ_ID_SIZE];
@@ -93,6 +98,7 @@ struct xgene_enet_desc_ring {
u8 nbufpool;
struct sk_buff *(*rx_skb);
struct sk_buff *(*cp_skb);
+ dma_addr_t *frag_dma_addr;
enum xgene_enet_ring_cfgsize cfgsize;
struct xgene_enet_desc_ring *cp_ring;
struct xgene_enet_desc_ring *buf_pool;
@@ -102,6 +108,7 @@ struct xgene_enet_desc_ring {
struct xgene_enet_raw_desc *raw_desc;
struct xgene_enet_raw_desc16 *raw_desc16;
};
+ __le64 *exp_bufs;
};
struct xgene_mac_ops {
@@ -112,6 +119,7 @@ struct xgene_mac_ops {
void (*tx_disable)(struct xgene_enet_pdata *pdata);
void (*rx_disable)(struct xgene_enet_pdata *pdata);
void (*set_mac_addr)(struct xgene_enet_pdata *pdata);
+ void (*set_mss)(struct xgene_enet_pdata *pdata);
void (*link_state)(struct work_struct *work);
};
@@ -170,6 +178,7 @@ struct xgene_enet_pdata {
u8 eth_bufnum;
u8 bp_bufnum;
u16 ring_num;
+ u32 mss;
};
struct xgene_indirect_ctl {
@@ -204,6 +213,9 @@ static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src)
#define GET_VAL(field, src) \
xgene_enet_get_field_value(field ## _POS, field ## _LEN, src)
+#define GET_BIT(field, src) \
+ xgene_enet_get_field_value(field ## _POS, 1, src)
+
static inline struct device *ndev_to_dev(struct net_device *ndev)
{
return ndev->dev.parent;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 05edb847cf26..7a28a48cb2c7 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -184,6 +184,11 @@ static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1);
}
+static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata)
+{
+ xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR, pdata->mss);
+}
+
static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata)
{
u32 data;
@@ -204,8 +209,8 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
data &= ~HSTLENCHK;
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
- xgene_enet_wr_mac(pdata, HSTMAXFRAME_LENGTH_ADDR, 0x06000600);
xgene_xgmac_set_mac_addr(pdata);
+ xgene_xgmac_set_mss(pdata);
xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data);
data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
@@ -329,6 +334,7 @@ struct xgene_mac_ops xgene_xgmac_ops = {
.rx_disable = xgene_xgmac_rx_disable,
.tx_disable = xgene_xgmac_tx_disable,
.set_mac_addr = xgene_xgmac_set_mac_addr,
+ .set_mss = xgene_xgmac_set_mss,
.link_state = xgene_enet_link_state
};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index bf0a99435737..f8f908dbf51c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -62,7 +62,9 @@
#define XCLE_BYPASS_REG0_ADDR 0x0160
#define XCLE_BYPASS_REG1_ADDR 0x0164
#define XG_CFG_BYPASS_ADDR 0x0204
+#define XG_CFG_LINK_AGGR_RESUME_0_ADDR 0x0214
#define XG_LINK_STATUS_ADDR 0x0228
+#define XG_TSIF_MSS_REG0_ADDR 0x02a4
#define XG_ENET_SPARE_CFG_REG_ADDR 0x040c
#define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410
#define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 932bd1862f7a..2795d6db10e1 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -874,6 +874,8 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
atl1c_clean_buffer(pdev, buffer_info);
}
+ netdev_reset_queue(adapter->netdev);
+
/* Zero out Tx-buffers */
memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
ring_count);
@@ -1551,6 +1553,7 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
u16 hw_next_to_clean;
u16 reg;
+ unsigned int total_bytes = 0, total_packets = 0;
reg = type == atl1c_trans_high ? REG_TPD_PRI1_CIDX : REG_TPD_PRI0_CIDX;
@@ -1558,12 +1561,18 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
while (next_to_clean != hw_next_to_clean) {
buffer_info = &tpd_ring->buffer_info[next_to_clean];
+ if (buffer_info->skb) {
+ total_bytes += buffer_info->skb->len;
+ total_packets++;
+ }
atl1c_clean_buffer(pdev, buffer_info);
if (++next_to_clean == tpd_ring->count)
next_to_clean = 0;
atomic_set(&tpd_ring->next_to_clean, next_to_clean);
}
+ netdev_completed_queue(adapter->netdev, total_packets, total_bytes);
+
if (netif_queue_stopped(adapter->netdev) &&
netif_carrier_ok(adapter->netdev)) {
netif_wake_queue(adapter->netdev);
@@ -2256,6 +2265,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
spin_unlock_irqrestore(&adapter->tx_lock, flags);
dev_kfree_skb_any(skb);
} else {
+ netdev_sent_queue(adapter->netdev, skb->len);
atl1c_tx_queue(adapter, skb, tpd, type);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
}
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 8be9eab73320..e930aa9a3cfb 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -139,6 +139,16 @@ config BNX2X_SRIOV
Virtualization support in the 578xx and 57712 products. This
allows for virtual function acceleration in virtual environments.
+config BNX2X_VXLAN
+ bool "Virtual eXtensible Local Area Network support"
+ default n
+ depends on BNX2X && VXLAN && !(BNX2X=y && VXLAN=m)
+ ---help---
+ This enables hardward offload support for VXLAN protocol over the
+ NetXtremeII series adapters.
+ Say Y here if you want to enable hardware offload support for
+ Virtual eXtensible Local Area Network (VXLAN) in the driver.
+
config BGMAC
tristate "BCMA bus GBit core support"
depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X)
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 909ad7a0d480..b9a5a97ed4dd 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -933,6 +933,21 @@ static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bcm_sysport_poll_controller(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ disable_irq(priv->irq0);
+ bcm_sysport_rx_isr(priv->irq0, priv);
+ enable_irq(priv->irq0);
+
+ disable_irq(priv->irq1);
+ bcm_sysport_tx_isr(priv->irq1, priv);
+ enable_irq(priv->irq1);
+}
+#endif
+
static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
struct net_device *dev)
{
@@ -1723,6 +1738,9 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
.ndo_set_features = bcm_sysport_set_features,
.ndo_set_rx_mode = bcm_sysport_set_rx_mode,
.ndo_set_mac_address = bcm_sysport_change_mac,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = bcm_sysport_poll_controller,
+#endif
};
#define REV_FMT "v%2x.%02x"
@@ -1793,7 +1811,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
macaddr = of_get_mac_address(dn);
if (!macaddr || !is_valid_ether_addr(macaddr)) {
dev_warn(&pdev->dev, "using random Ethernet MAC\n");
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
} else {
ether_addr_copy(dev->dev_addr, macaddr);
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 21e3c38c7c75..28f7610b03fe 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1447,7 +1447,7 @@ static int bgmac_fixed_phy_register(struct bgmac *bgmac)
struct phy_device *phy_dev;
int err;
- phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+ phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
if (!phy_dev || IS_ERR(phy_dev)) {
bgmac_err(bgmac, "Failed to register fixed PHY device\n");
return -ENODEV;
@@ -1549,11 +1549,20 @@ static int bgmac_probe(struct bcma_device *core)
struct net_device *net_dev;
struct bgmac *bgmac;
struct ssb_sprom *sprom = &core->bus->sprom;
- u8 *mac = core->core_unit ? sprom->et1mac : sprom->et0mac;
+ u8 *mac;
int err;
- /* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */
- if (core->core_unit > 1) {
+ switch (core->core_unit) {
+ case 0:
+ mac = sprom->et0mac;
+ break;
+ case 1:
+ mac = sprom->et1mac;
+ break;
+ case 2:
+ mac = sprom->et2mac;
+ break;
+ default:
pr_err("Unsupported core_unit %d\n", core->core_unit);
return -ENOTSUPP;
}
@@ -1588,8 +1597,17 @@ static int bgmac_probe(struct bcma_device *core)
}
bgmac->cmn = core->bus->drv_gmac_cmn.core;
- bgmac->phyaddr = core->core_unit ? sprom->et1phyaddr :
- sprom->et0phyaddr;
+ switch (core->core_unit) {
+ case 0:
+ bgmac->phyaddr = sprom->et0phyaddr;
+ break;
+ case 1:
+ bgmac->phyaddr = sprom->et1phyaddr;
+ break;
+ case 2:
+ bgmac->phyaddr = sprom->et2phyaddr;
+ break;
+ }
bgmac->phyaddr &= BGMAC_PHY_MASK;
if (bgmac->phyaddr == BGMAC_PHY_MASK) {
bgmac_err(bgmac, "No PHY found\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index cd4ae76bbff2..ba936635322a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1,6 +1,8 @@
-/* bnx2x.h: Broadcom Everest network driver.
+/* bnx2x.h: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -30,7 +32,7 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.710.51-0"
+#define DRV_MODULE_VERSION "1.712.30-0"
#define DRV_MODULE_RELDATE "2014/02/10"
#define BNX2X_BC_VER 0x040200
@@ -1227,6 +1229,10 @@ struct bnx2x_slowpath {
} mac_rdata;
union {
+ struct eth_classify_rules_ramrod_data e2;
+ } vlan_rdata;
+
+ union {
struct tstorm_eth_mac_filter_config e1x;
struct eth_filter_rules_ramrod_data e2;
} rx_mode_rdata;
@@ -1386,6 +1392,8 @@ enum sp_rtnl_flag {
BNX2X_SP_RTNL_HYPERVISOR_VLAN,
BNX2X_SP_RTNL_TX_STOP,
BNX2X_SP_RTNL_GET_DRV_VERSION,
+ BNX2X_SP_RTNL_ADD_VXLAN_PORT,
+ BNX2X_SP_RTNL_DEL_VXLAN_PORT,
};
enum bnx2x_iov_flag {
@@ -1408,6 +1416,9 @@ struct bnx2x_sp_objs {
/* Queue State object */
struct bnx2x_queue_sp_obj q_obj;
+
+ /* VLANs object */
+ struct bnx2x_vlan_mac_obj vlan_obj;
};
struct bnx2x_fp_stats {
@@ -1422,6 +1433,13 @@ enum {
SUB_MF_MODE_UNKNOWN = 0,
SUB_MF_MODE_UFP,
SUB_MF_MODE_NPAR1_DOT_5,
+ SUB_MF_MODE_BD,
+};
+
+struct bnx2x_vlan_entry {
+ struct list_head link;
+ u16 vid;
+ bool hw;
};
struct bnx2x {
@@ -1636,6 +1654,8 @@ struct bnx2x {
u8 mf_sub_mode;
#define IS_MF_UFP(bp) (IS_MF_SD(bp) && \
bp->mf_sub_mode == SUB_MF_MODE_UFP)
+#define IS_MF_BD(bp) (IS_MF_SD(bp) && \
+ bp->mf_sub_mode == SUB_MF_MODE_BD)
u8 wol;
@@ -1860,8 +1880,6 @@ struct bnx2x {
int dcb_version;
/* CAM credit pools */
-
- /* used only in sriov */
struct bnx2x_credit_pool_obj vlans_pool;
struct bnx2x_credit_pool_obj macs_pool;
@@ -1924,6 +1942,11 @@ struct bnx2x {
u16 rx_filter;
struct bnx2x_link_report_data vf_link_vars;
+ struct list_head vlan_reg;
+ u16 vlan_cnt;
+ u16 vlan_credit;
+ u16 vxlan_dst_port;
+ bool accept_any_vlan;
};
/* Tx queues may be less or equal to Rx queues */
@@ -1951,23 +1974,14 @@ extern int num_queues;
#define RSS_IPV6_TCP_CAP_MASK \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
-/* func init flags */
-#define FUNC_FLG_RSS 0x0001
-#define FUNC_FLG_STATS 0x0002
-/* removed FUNC_FLG_UNMATCHED 0x0004 */
-#define FUNC_FLG_TPA 0x0008
-#define FUNC_FLG_SPQ 0x0010
-#define FUNC_FLG_LEADING 0x0020 /* PF only */
-#define FUNC_FLG_LEADING_STATS 0x0040
struct bnx2x_func_init_params {
/* dma */
- dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
- dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */
+ bool spq_active;
+ dma_addr_t spq_map;
+ u16 spq_prod;
- u16 func_flgs;
u16 func_id; /* abs fid */
u16 pf_id;
- u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
};
#define for_each_cnic_queue(bp, var) \
@@ -2077,6 +2091,11 @@ struct bnx2x_func_init_params {
int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
struct bnx2x_vlan_mac_obj *obj, bool set,
int mac_type, unsigned long *ramrod_flags);
+
+int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+ struct bnx2x_vlan_mac_obj *obj, bool set,
+ unsigned long *ramrod_flags);
+
/**
* bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
*
@@ -2481,6 +2500,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define VF_ACQUIRE_THRESH 3
#define VF_ACQUIRE_MAC_FILTERS 1
#define VF_ACQUIRE_MC_FILTERS 10
+#define VF_ACQUIRE_VLAN_FILTERS 2 /* VLAN0 + 'real' VLAN */
#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
(!((me_reg) & ME_REG_VF_ERR)))
@@ -2553,6 +2573,10 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
(IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) || \
IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp))
+/* Determines whether BW configuration arrives in 100Mb units or in
+ * percentages from actual physical link speed.
+ */
+#define IS_MF_PERCENT_BW(bp) (IS_MF_SI(bp) || IS_MF_UFP(bp) || IS_MF_BD(bp))
#define SET_FLAG(value, mask, flag) \
do {\
@@ -2577,6 +2601,8 @@ void bnx2x_set_local_cmng(struct bnx2x *bp);
void bnx2x_update_mng_version(struct bnx2x *bp);
+void bnx2x_update_mfw_dump(struct bnx2x *bp);
+
#define MCPR_SCRATCH_BASE(bp) \
(CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
@@ -2589,4 +2615,9 @@ void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
#define BNX2X_MAX_PHC_DRIFT 31000000
#define BNX2X_PTP_TX_TIMEOUT
+/* Re-configure all previously configured vlan filters.
+ * Meant for implicit re-load flows.
+ */
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
+
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index a90d7364334f..44173be5cbf0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1,6 +1,8 @@
-/* bnx2x_cmn.c: Broadcom Everest network driver.
+/* bnx2x_cmn.c: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -262,9 +264,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
if (likely(skb)) {
(*pkts_compl)++;
(*bytes_compl) += skb->len;
+ dev_kfree_skb_any(skb);
}
- dev_kfree_skb_any(skb);
tx_buf->first_bd = 0;
tx_buf->skb = NULL;
@@ -1188,7 +1190,7 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
/* Calculate the current MAX line speed limit for the MF
* devices
*/
- if (IS_MF_SI(bp))
+ if (IS_MF_PERCENT_BW(bp))
line_speed = (line_speed * maxCfg) / 100;
else { /* SD mode */
u16 vn_max_rate = maxCfg * 100;
@@ -2103,9 +2105,14 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
if (rss_obj->udp_rss_v6)
__set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
- if (!CHIP_IS_E1x(bp))
+ if (!CHIP_IS_E1x(bp)) {
+ /* valid only for TUNN_MODE_VXLAN tunnel mode */
+ __set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
+
/* valid only for TUNN_MODE_GRE tunnel mode */
- __set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
+ __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
+ }
} else {
__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
}
@@ -2510,6 +2517,20 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
fp->mode = TPA_MODE_DISABLED;
}
+void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
+{
+ u32 cur;
+
+ if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
+ return;
+
+ cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
+ DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
+ cur, state);
+
+ SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
+}
+
int bnx2x_load_cnic(struct bnx2x *bp)
{
int i, rc, port = BP_PORT(bp);
@@ -2827,6 +2848,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Start fast path */
+ /* Re-configure vlan filters */
+ rc = bnx2x_vlan_reconfigure_vid(bp);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error3);
+
/* Initialize Rx filter. */
bnx2x_set_rx_mode_inner(bp);
@@ -2873,6 +2899,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* mark driver is loaded in shmem2 */
u32 val;
val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+ val &= ~DRV_FLAGS_MTU_MASK;
+ val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
DRV_FLAGS_CAPABILITIES_LOADED_L2);
@@ -2885,10 +2913,17 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
return -EBUSY;
}
+ /* Update driver data for On-Chip MFW dump. */
+ if (IS_PF(bp))
+ bnx2x_update_mfw_dump(bp);
+
/* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
bnx2x_dcbx_init(bp, false);
+ if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+ bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
+
DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
return 0;
@@ -2956,6 +2991,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
+ if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+ bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
+
/* mark driver is unloaded in shmem2 */
if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
u32 val;
@@ -3677,7 +3715,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
pbd2->fw_ip_hdr_to_payload_w =
hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
pbd_e2->data.tunnel_data.flags |=
- ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
+ ETH_TUNNEL_DATA_IPV6_OUTER;
}
pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
@@ -4184,6 +4222,41 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
+{
+ int mfw_vn = BP_FW_MB_IDX(bp);
+ u32 tmp;
+
+ /* If the shmem shouldn't affect configuration, reflect */
+ if (!IS_MF_BD(bp)) {
+ int i;
+
+ for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
+ c2s_map[i] = i;
+ *c2s_default = 0;
+
+ return;
+ }
+
+ tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
+ tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+ c2s_map[0] = tmp & 0xff;
+ c2s_map[1] = (tmp >> 8) & 0xff;
+ c2s_map[2] = (tmp >> 16) & 0xff;
+ c2s_map[3] = (tmp >> 24) & 0xff;
+
+ tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
+ tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+ c2s_map[4] = tmp & 0xff;
+ c2s_map[5] = (tmp >> 8) & 0xff;
+ c2s_map[6] = (tmp >> 16) & 0xff;
+ c2s_map[7] = (tmp >> 24) & 0xff;
+
+ tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
+ tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+ *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
+}
+
/**
* bnx2x_setup_tc - routine to configure net_device for multi tc
*
@@ -4194,8 +4267,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
{
- int cos, prio, count, offset;
struct bnx2x *bp = netdev_priv(dev);
+ u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
+ int cos, prio, count, offset;
/* setup tc must be called under rtnl lock */
ASSERT_RTNL();
@@ -4219,12 +4293,16 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
return -EINVAL;
}
+ bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
+
/* configure priority to traffic class mapping */
for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
- netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
+ int outer_prio = c2s_map[prio];
+
+ netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
"mapping priority %d to tc %d\n",
- prio, bp->prio_to_cos[prio]);
+ outer_prio, bp->prio_to_cos[outer_prio]);
}
/* Use this configuration to differentiate tc0 from other COSes
@@ -4278,6 +4356,9 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
if (netif_running(dev))
rc = bnx2x_set_eth_mac(bp, true);
+ if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
+ SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
return rc;
}
@@ -4831,6 +4912,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
*/
dev->mtu = new_mtu;
+ if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
+ SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
return bnx2x_reload_if_running(dev);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 03b7404d5b9b..b7d32e8412f1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1,6 +1,8 @@
-/* bnx2x_cmn.h: Broadcom Everest network driver.
+/* bnx2x_cmn.h: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -620,6 +622,14 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
*/
void bnx2x_tx_timeout(struct net_device *dev);
+/** bnx2x_get_c2s_mapping - read inner-to-outer vlan configuration
+ * c2s_map should have BNX2X_MAX_PRIORITY entries.
+ * @bp: driver handle
+ * @c2s_map: should have BNX2X_MAX_PRIORITY entries for mapping
+ * @c2s_default: entry for non-tagged configuration
+ */
+void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default);
+
/*********************** Inlines **********************************/
/*********************** Fast path ********************************/
static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
@@ -931,14 +941,35 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
start_params->mf_mode = bp->mf_mode;
start_params->sd_vlan_tag = bp->mf_ov;
+ /* Configure Ethertype for BD mode */
+ if (IS_MF_BD(bp)) {
+ DP(NETIF_MSG_IFUP, "Configuring ethertype 0x88a8 for BD\n");
+ start_params->sd_vlan_eth_type = ETH_P_8021AD;
+ REG_WR(bp, PRS_REG_VLAN_TYPE_0, ETH_P_8021AD);
+ REG_WR(bp, PBF_REG_VLAN_TYPE_0, ETH_P_8021AD);
+ REG_WR(bp, NIG_REG_LLH_E1HOV_TYPE_1, ETH_P_8021AD);
+
+ bnx2x_get_c2s_mapping(bp, start_params->c2s_pri,
+ &start_params->c2s_pri_default);
+ start_params->c2s_pri_valid = 1;
+
+ DP(NETIF_MSG_IFUP,
+ "Inner-to-Outer priority: %02x %02x %02x %02x %02x %02x %02x %02x [Default %02x]\n",
+ start_params->c2s_pri[0], start_params->c2s_pri[1],
+ start_params->c2s_pri[2], start_params->c2s_pri[3],
+ start_params->c2s_pri[4], start_params->c2s_pri[5],
+ start_params->c2s_pri[6], start_params->c2s_pri[7],
+ start_params->c2s_pri_default);
+ }
+
if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
start_params->network_cos_mode = STATIC_COS;
else /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR;
- start_params->tunnel_mode = TUNN_MODE_GRE;
- start_params->gre_tunnel_type = IPGRE_TUNNEL;
- start_params->inner_gre_rss_en = 1;
+ start_params->vxlan_dst_port = bp->vxlan_dst_port;
+
+ start_params->inner_rss = 1;
if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
start_params->class_fail_ethtype = ETH_P_FIP;
@@ -1037,6 +1068,15 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
BNX2X_FILTER_MAC_PENDING,
&bp->sp_state, obj_type,
&bp->macs_pool);
+
+ if (!CHIP_IS_E1x(bp))
+ bnx2x_init_vlan_obj(bp, &bnx2x_sp_obj(bp, fp).vlan_obj,
+ fp->cl_id, fp->cid, BP_FUNC(bp),
+ bnx2x_sp(bp, vlan_rdata),
+ bnx2x_sp_mapping(bp, vlan_rdata),
+ BNX2X_FILTER_VLAN_PENDING,
+ &bp->sp_state, obj_type,
+ &bp->vlans_pool);
}
/**
@@ -1096,7 +1136,7 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
bnx2x_get_path_func_num(bp));
- bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1,
+ bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_FUNC(bp),
bnx2x_get_path_func_num(bp));
/* RSS configuration object */
@@ -1106,6 +1146,8 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
bnx2x_sp_mapping(bp, rss_rdata),
BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
BNX2X_OBJ_TYPE_RX);
+
+ bp->vlan_credit = PF_VLAN_CREDIT_E2(bp, bnx2x_get_path_func_num(bp));
}
static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
@@ -1339,4 +1381,23 @@ void bnx2x_squeeze_objects(struct bnx2x *bp);
void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
u32 verbose);
+/**
+ * bnx2x_set_os_driver_state - write driver state for management FW usage
+ *
+ * @bp: driver handle
+ * @state: OS_DRIVER_STATE_* value reflecting current driver state
+ */
+void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state);
+
+/**
+ * bnx2x_nvram_read - reads data from nvram [might sleep]
+ *
+ * @bp: driver handle
+ * @offset: byte offset in nvram
+ * @ret_buf: pointer to buffer where data is to be stored
+ * @buf_size: Length of 'ret_buf' in bytes
+ */
+int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+ int buf_size);
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 6e4294ed1fc9..7ccf6684e0a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -1,15 +1,17 @@
-/* bnx2x_dcb.c: Broadcom Everest network driver.
+/* bnx2x_dcb.c: QLogic Everest network driver.
*
* Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -1850,6 +1852,8 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
if (bp->dcbx_port_params.ets.cos_params[cos].
pri_bitmask & pri_bit)
tt2cos[pri].cos = cos;
+
+ pfc_fw_cfg->dcb_outer_pri[pri] = ttp[pri];
}
/* we never want the FW to add a 0 vlan tag */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index c6939ecb02c5..9a9517c0f703 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -1,15 +1,17 @@
-/* bnx2x_dcb.h: Broadcom Everest network driver.
+/* bnx2x_dcb.h: QLogic Everest network driver.
*
* Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
index 741aa130c19f..eccfa13b0f2d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
@@ -1,15 +1,17 @@
-/* bnx2x_dump.h: Broadcom Everest network driver.
+/* bnx2x_dump.h: QLogic Everest network driver.
*
* Copyright (c) 2012-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
* consent.
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 76b9052a961c..aeb7ce64452e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1,6 +1,8 @@
-/* bnx2x_ethtool.c: Broadcom Everest network driver.
+/* bnx2x_ethtool.c: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1129,6 +1131,9 @@ static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
} else
bp->wol = 0;
+ if (SHMEM2_HAS(bp, curr_cfg))
+ SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
return 0;
}
@@ -1343,8 +1348,8 @@ static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
return rc;
}
-static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
- int buf_size)
+int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+ int buf_size)
{
int rc;
u32 cmd_flags;
@@ -1718,6 +1723,22 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
offset += sizeof(u32);
data_buf += sizeof(u32);
written_so_far += sizeof(u32);
+
+ /* At end of each 4Kb page, release nvram lock to allow MFW
+ * chance to take it for its own use.
+ */
+ if ((cmd_flags & MCPR_NVM_COMMAND_LAST) &&
+ (written_so_far < buf_size)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "Releasing NVM lock after offset 0x%x\n",
+ (u32)(offset - sizeof(u32)));
+ bnx2x_release_nvram_lock(bp);
+ usleep_range(1000, 2000);
+ rc = bnx2x_acquire_nvram_lock(bp);
+ if (rc)
+ return rc;
+ }
+
cmd_flags = 0;
}
@@ -3562,17 +3583,8 @@ static int bnx2x_get_ts_info(struct net_device *dev,
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 7636e3c18771..226ab29f4cb6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -1,6 +1,8 @@
-/* bnx2x_fw_defs.h: Broadcom Everest network driver.
+/* bnx2x_fw_defs.h: Qlogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -372,7 +374,7 @@
#define MAX_COS_NUMBER 4
#define MAX_TRAFFIC_TYPES 8
#define MAX_PFC_PRIORITIES 8
-
+#define MAX_VLAN_PRIORITIES 8
/* used by array traffic_type_to_priority[] to mark traffic type \
that is not mapped to priority*/
#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
index 8aafd9b5d6a2..9e3b5a1e9f4f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -1,6 +1,8 @@
/* bnx2x_fw_file_hdr.h: FW binary file header structure.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 058bc7328220..cafd5de675cf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1,6 +1,8 @@
-/* bnx2x_hsi.h: Broadcom Everest network driver.
+/* bnx2x_hsi.h: Qlogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -729,6 +731,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616 0x00001000
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84834 0x00001100
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84858 0x00001200
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00
@@ -786,6 +789,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616 0x00001000
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834 0x00001100
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858 0x00001200
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
@@ -864,6 +868,7 @@ struct shared_feat_cfg { /* NVRAM Offset */
#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
#define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE 0x00000500
#define SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE 0x00000600
#define SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE 0x00000700
@@ -2064,6 +2069,45 @@ struct ncsi_oem_fcoe_features {
#define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET 0
};
+enum curr_cfg_method_e {
+ CURR_CFG_MET_NONE = 0, /* default config */
+ CURR_CFG_MET_OS = 1,
+ CURR_CFG_MET_VENDOR_SPEC = 2,/* e.g. Option ROM, NPAR, O/S Cfg Utils */
+};
+
+#define FC_NPIV_WWPN_SIZE 8
+#define FC_NPIV_WWNN_SIZE 8
+struct bdn_npiv_settings {
+ u8 npiv_wwpn[FC_NPIV_WWPN_SIZE];
+ u8 npiv_wwnn[FC_NPIV_WWNN_SIZE];
+};
+
+struct bdn_fc_npiv_cfg {
+ /* hdr used internally by the MFW */
+ u32 hdr;
+ u32 num_of_npiv;
+};
+
+#define MAX_NUMBER_NPIV 64
+struct bdn_fc_npiv_tbl {
+ struct bdn_fc_npiv_cfg fc_npiv_cfg;
+ struct bdn_npiv_settings settings[MAX_NUMBER_NPIV];
+};
+
+struct mdump_driver_info {
+ u32 epoc;
+ u32 drv_ver;
+ u32 fw_ver;
+
+ u32 valid_dump;
+ #define FIRST_DUMP_VALID (1 << 0)
+ #define SECOND_DUMP_VALID (1 << 1)
+
+ u32 flags;
+ #define ENABLE_ALL_TRIGGERS (0x7fffffff)
+ #define TRIGGER_MDUMP_ONCE (1 << 31)
+};
+
struct ncsi_oem_data {
u32 driver_version[4];
struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features;
@@ -2187,6 +2231,8 @@ struct shmem2_region {
#define DRV_FLAGS_CAPABILITIES_LOADED_L2 0x00000002
#define DRV_FLAGS_CAPABILITIES_LOADED_FCOE 0x00000004
#define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI 0x00000008
+#define DRV_FLAGS_MTU_MASK 0xffff0000
+#define DRV_FLAGS_MTU_SHIFT 16
u32 extended_dev_info_shared_cfg_size;
@@ -2251,6 +2297,7 @@ struct shmem2_region {
u32 reserved4; /* Offset 0x150 */
u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */
#define LINK_ATTR_SYNC_KR2_ENABLE 0x00000001
+ #define LINK_ATTR_84858 0x00000002
#define LINK_SFP_EEPROM_COMP_CODE_MASK 0x0000ff00
#define LINK_SFP_EEPROM_COMP_CODE_SHIFT 8
#define LINK_SFP_EEPROM_COMP_CODE_SR 0x00001000
@@ -2268,6 +2315,74 @@ struct shmem2_region {
/* We use indication for each PF (0..3) */
#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_))
+ union { /* For various OEMs */ /* Offset 0x1a0 */
+ u8 storage_boot_prog[E2_FUNC_MAX];
+ #define STORAGE_BOOT_PROG_MASK 0x000000FF
+ #define STORAGE_BOOT_PROG_NONE 0x00000000
+ #define STORAGE_BOOT_PROG_ISCSI_IP_ACQUIRED 0x00000002
+ #define STORAGE_BOOT_PROG_FCOE_FABRIC_LOGIN_SUCCESS 0x00000002
+ #define STORAGE_BOOT_PROG_TARGET_FOUND 0x00000004
+ #define STORAGE_BOOT_PROG_ISCSI_CHAP_SUCCESS 0x00000008
+ #define STORAGE_BOOT_PROG_FCOE_LUN_FOUND 0x00000008
+ #define STORAGE_BOOT_PROG_LOGGED_INTO_TGT 0x00000010
+ #define STORAGE_BOOT_PROG_IMG_DOWNLOADED 0x00000020
+ #define STORAGE_BOOT_PROG_OS_HANDOFF 0x00000040
+ #define STORAGE_BOOT_PROG_COMPLETED 0x00000080
+
+ u32 oem_i2c_data_addr;
+ };
+
+ /* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */
+ /* For PCP values 0-3 use the map lower */
+ /* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1,
+ * 0x0000FF00 - PCP 2, 0x000000FF PCP 3
+ */
+ u32 c2s_pcp_map_lower[E2_FUNC_MAX]; /* 0x1a4 */
+
+ /* For PCP values 4-7 use the map upper */
+ /* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5,
+ * 0x0000FF00 - PCP 6, 0x000000FF PCP 7
+ */
+ u32 c2s_pcp_map_upper[E2_FUNC_MAX]; /* 0x1b4 */
+
+ /* For PCP default value get the MSB byte of the map default */
+ u32 c2s_pcp_map_default[E2_FUNC_MAX]; /* 0x1c4 */
+
+ /* FC_NPIV table offset in NVRAM */
+ u32 fc_npiv_nvram_tbl_addr[PORT_MAX]; /* 0x1d4 */
+
+ /* Shows last method that changed configuration of this device */
+ enum curr_cfg_method_e curr_cfg; /* 0x1dc */
+
+ /* Storm FW version, shold be kept in the format 0xMMmmbbdd:
+ * MM - Major, mm - Minor, bb - Build ,dd - Drop
+ */
+ u32 netproc_fw_ver; /* 0x1e0 */
+
+ /* Option ROM SMASH CLP version */
+ u32 clp_ver; /* 0x1e4 */
+
+ u32 pcie_bus_num; /* 0x1e8 */
+
+ u32 sriov_switch_mode; /* 0x1ec */
+ #define SRIOV_SWITCH_MODE_NONE 0x0
+ #define SRIOV_SWITCH_MODE_VEB 0x1
+ #define SRIOV_SWITCH_MODE_VEPA 0x2
+
+ u8 rsrv2[E2_FUNC_MAX]; /* 0x1f0 */
+
+ u32 img_inv_table_addr; /* Address to INV_TABLE_P */ /* 0x1f4 */
+
+ u32 mtu_size[E2_FUNC_MAX]; /* 0x1f8 */
+
+ u32 os_driver_state[E2_FUNC_MAX]; /* 0x208 */
+ #define OS_DRIVER_STATE_NOT_LOADED 0 /* not installed */
+ #define OS_DRIVER_STATE_LOADING 1 /* transition state */
+ #define OS_DRIVER_STATE_DISABLED 2 /* installed but disabled */
+ #define OS_DRIVER_STATE_ACTIVE 3 /* installed and active */
+
+ /* mini dump driver info */
+ struct mdump_driver_info drv_info; /* 0x218 */
};
@@ -2898,8 +3013,8 @@ struct afex_stats {
};
#define BCM_5710_FW_MAJOR_VERSION 7
-#define BCM_5710_FW_MINOR_VERSION 10
-#define BCM_5710_FW_REVISION_VERSION 51
+#define BCM_5710_FW_MINOR_VERSION 12
+#define BCM_5710_FW_REVISION_VERSION 30
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -3901,7 +4016,11 @@ struct eth_fast_path_rx_cqe {
__le16 len_on_bd;
struct parsing_flags pars_flags;
union eth_sgl_or_raw_data sgl_or_raw_data;
- __le32 reserved1[7];
+ u8 tunn_type;
+ u8 tunn_inner_hdrs_offset;
+ __le16 reserved1;
+ __le32 tunn_tenant_id;
+ __le32 padding[5];
u32 marker;
};
@@ -4012,8 +4131,8 @@ struct eth_tunnel_data {
__le16 pseudo_csum;
u8 ip_hdr_start_inner_w;
u8 flags;
-#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0)
-#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0
+#define ETH_TUNNEL_DATA_IPV6_OUTER (0x1<<0)
+#define ETH_TUNNEL_DATA_IPV6_OUTER_SHIFT 0
#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1)
#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1
};
@@ -4120,16 +4239,12 @@ struct eth_rss_update_ramrod_data {
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1<<7)
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7
-#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<8)
-#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 8
-#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY (0x1<<9)
-#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY_SHIFT 9
-#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY (0x1<<10)
-#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY_SHIFT 10
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<11)
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 11
-#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0xF<<12)
-#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 12
+#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY (0x1<<8)
+#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY_SHIFT 8
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<9)
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 9
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0x3F<<10)
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 10
u8 rss_result_mask;
u8 reserved3;
__le16 reserved4;
@@ -4314,6 +4429,18 @@ enum eth_tunnel_non_lso_csum_location {
MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION
};
+enum eth_tunn_type {
+ TUNN_TYPE_NONE,
+ TUNN_TYPE_VXLAN,
+ TUNN_TYPE_L2_GRE,
+ TUNN_TYPE_IPV4_GRE,
+ TUNN_TYPE_IPV6_GRE,
+ TUNN_TYPE_L2_GENEVE,
+ TUNN_TYPE_IPV4_GENEVE,
+ TUNN_TYPE_IPV6_GENEVE,
+ MAX_ETH_TUNN_TYPE
+};
+
/*
* Tx regular BD structure
*/
@@ -4758,6 +4885,9 @@ struct afex_vif_list_ramrod_data {
__le16 reserved1;
};
+struct c2s_pri_trans_table_entry {
+ u8 val[MAX_VLAN_PRIORITIES];
+};
/*
* cfc delete event data
@@ -5246,6 +5376,7 @@ struct flow_control_configuration {
u8 dont_add_pri_0_en;
u8 reserved1;
__le32 reserved2;
+ u8 dcb_outer_pri[MAX_TRAFFIC_TYPES];
};
@@ -5260,18 +5391,25 @@ struct function_start_data {
u8 path_id;
u8 network_cos_mode;
u8 dmae_cmd_id;
- u8 tunnel_mode;
- u8 gre_tunnel_type;
- u8 tunn_clss_en;
- u8 inner_gre_rss_en;
- u8 sd_accept_mf_clss_fail;
+ u8 no_added_tags;
+ __le16 reserved0;
+ __le32 reserved1;
+ u8 inner_clss_vxlan;
+ u8 inner_clss_l2gre;
+ u8 inner_clss_l2geneve;
+ u8 inner_rss;
__le16 vxlan_dst_port;
+ __le16 geneve_dst_port;
+ u8 sd_accept_mf_clss_fail;
+ u8 sd_accept_mf_clss_fail_match_ethtype;
__le16 sd_accept_mf_clss_fail_ethtype;
__le16 sd_vlan_eth_type;
u8 sd_vlan_force_pri_flg;
u8 sd_vlan_force_pri_val;
- u8 sd_accept_mf_clss_fail_match_ethtype;
- u8 no_added_tags;
+ u8 c2s_pri_tt_valid;
+ u8 c2s_pri_default;
+ u8 reserved2[6];
+ struct c2s_pri_trans_table_entry c2s_pri_trans_table;
};
struct function_update_data {
@@ -5289,11 +5427,12 @@ struct function_update_data {
u8 tx_switch_suspend;
u8 echo;
u8 update_tunn_cfg_flg;
- u8 tunnel_mode;
- u8 gre_tunnel_type;
- u8 tunn_clss_en;
- u8 inner_gre_rss_en;
+ u8 inner_clss_vxlan;
+ u8 inner_clss_l2gre;
+ u8 inner_clss_l2geneve;
+ u8 inner_rss;
__le16 vxlan_dst_port;
+ __le16 geneve_dst_port;
u8 sd_vlan_force_pri_change_flg;
u8 sd_vlan_force_pri_flg;
u8 sd_vlan_force_pri_val;
@@ -5302,6 +5441,8 @@ struct function_update_data {
u8 reserved1;
__le16 sd_vlan_tag;
__le16 sd_vlan_eth_type;
+ __le16 reserved0;
+ __le32 reserved2;
};
/*
@@ -5330,15 +5471,6 @@ struct fw_version {
#define __FW_VERSION_RESERVED_SHIFT 4
};
-
-/* GRE Tunnel Mode */
-enum gre_tunnel_type {
- NVGRE_TUNNEL,
- L2GRE_TUNNEL,
- IPGRE_TUNNEL,
- MAX_GRE_TUNNEL_TYPE
-};
-
/*
* Dynamic Host-Coalescing - Driver(host) counters
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index d6e1975b7b69..46ee2c01f4c5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -1,7 +1,9 @@
-/* bnx2x_init.h: Broadcom Everest network driver.
+/* bnx2x_init.h: Qlogic Everest network driver.
* Structures and macroes needed during the initialization.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index 5669ed2e87d0..1835d2e451c0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -1,8 +1,10 @@
-/* bnx2x_init_ops.h: Broadcom Everest network driver.
+/* bnx2x_init_ops.h: Qlogic Everest network driver.
* Static functions needed during the initialization.
* This file is "included" in bnx2x_main.c.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index a0b03c27e0a3..d946bba43726 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -1,13 +1,15 @@
/* Copyright 2008-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
* consent.
*
* Written by Yaniv Rosner
@@ -9652,6 +9654,13 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
/******************************************************************/
/* BCM8481/BCM84823/BCM84833 PHY SECTION */
/******************************************************************/
+static int bnx2x_is_8483x_8485x(struct bnx2x_phy *phy)
+{
+ return ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858));
+}
+
static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
struct bnx2x *bp,
u8 port)
@@ -9666,8 +9675,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
};
u16 fw_ver1;
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+ if (bnx2x_is_8483x_8485x(phy)) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff,
phy->ver_addr);
@@ -9749,8 +9757,7 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
+ if (bnx2x_is_8483x_8485x(phy))
offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
else
offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
@@ -9768,8 +9775,7 @@ static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
switch (action) {
case PHY_INIT:
- if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
- (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+ if (!bnx2x_is_8483x_8485x(phy)) {
/* Save spirom version */
bnx2x_save_848xx_spirom_version(phy, bp, params->port);
}
@@ -9901,8 +9907,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
/* Always write this if this is not 84833/4.
* For 84833/4, write it only when it's a forced speed.
*/
- if (((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
- (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) ||
+ if (!bnx2x_is_8483x_8485x(phy) ||
((autoneg_val & (1<<12)) == 0))
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD,
@@ -9949,8 +9954,86 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
return bnx2x_848xx_cmn_config_init(phy, params, vars);
}
-#define PHY84833_CMDHDLR_WAIT 300
-#define PHY84833_CMDHDLR_MAX_ARGS 5
+#define PHY848xx_CMDHDLR_WAIT 300
+#define PHY848xx_CMDHDLR_MAX_ARGS 5
+
+static int bnx2x_84858_cmd_hdlr(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 fw_cmd,
+ u16 cmd_args[], int argc)
+{
+ int idx;
+ u16 val;
+ struct bnx2x *bp = params->bp;
+
+ /* Step 1: Poll the STATUS register to see whether the previous command
+ * is in progress or the system is busy (CMD_IN_PROGRESS or
+ * SYSTEM_BUSY). If previous command is in progress or system is busy,
+ * check again until the previous command finishes execution and the
+ * system is available for taking command
+ */
+
+ for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_STATUS, &val);
+ if ((val != PHY84858_STATUS_CMD_IN_PROGRESS) &&
+ (val != PHY84858_STATUS_CMD_SYSTEM_BUSY))
+ break;
+ usleep_range(1000, 2000);
+ }
+ if (idx >= PHY848xx_CMDHDLR_WAIT) {
+ DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
+ return -EINVAL;
+ }
+
+ /* Step2: If any parameters are required for the function, write them
+ * to the required DATA registers
+ */
+
+ for (idx = 0; idx < argc; idx++) {
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
+ cmd_args[idx]);
+ }
+
+ /* Step3: When the firmware is ready for commands, write the 'Command
+ * code' to the CMD register
+ */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
+
+ /* Step4: Once the command has been written, poll the STATUS register
+ * to check whether the command has completed (CMD_COMPLETED_PASS/
+ * CMD_FOR_CMDS or CMD_COMPLETED_ERROR).
+ */
+
+ for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_STATUS, &val);
+ if ((val == PHY84858_STATUS_CMD_COMPLETE_PASS) ||
+ (val == PHY84858_STATUS_CMD_COMPLETE_ERROR))
+ break;
+ usleep_range(1000, 2000);
+ }
+ if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
+ (val == PHY84858_STATUS_CMD_COMPLETE_ERROR)) {
+ DP(NETIF_MSG_LINK, "FW cmd failed.\n");
+ return -EINVAL;
+ }
+ /* Step5: Once the command has completed, read the specficied DATA
+ * registers for any saved results for the command, if applicable
+ */
+
+ /* Gather returning data */
+ for (idx = 0; idx < argc; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
+ &cmd_args[idx]);
+ }
+
+ return 0;
+}
+
static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
struct link_params *params, u16 fw_cmd,
u16 cmd_args[], int argc)
@@ -9960,16 +10043,16 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
/* Write CMD_OPEN_OVERRIDE to STATUS reg */
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_STATUS,
+ MDIO_848xx_CMD_HDLR_STATUS,
PHY84833_STATUS_CMD_OPEN_OVERRIDE);
- for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
+ for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_STATUS, &val);
+ MDIO_848xx_CMD_HDLR_STATUS, &val);
if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
break;
usleep_range(1000, 2000);
}
- if (idx >= PHY84833_CMDHDLR_WAIT) {
+ if (idx >= PHY848xx_CMDHDLR_WAIT) {
DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
return -EINVAL;
}
@@ -9977,42 +10060,62 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
/* Prepare argument(s) and issue command */
for (idx = 0; idx < argc; idx++) {
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_DATA1 + idx,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
cmd_args[idx]);
}
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_COMMAND, fw_cmd);
- for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
+ MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
+ for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_STATUS, &val);
+ MDIO_848xx_CMD_HDLR_STATUS, &val);
if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
- (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
+ (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
break;
usleep_range(1000, 2000);
}
- if ((idx >= PHY84833_CMDHDLR_WAIT) ||
- (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
+ if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
+ (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
DP(NETIF_MSG_LINK, "FW cmd failed.\n");
return -EINVAL;
}
/* Gather returning data */
for (idx = 0; idx < argc; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_DATA1 + idx,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
&cmd_args[idx]);
}
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_STATUS,
+ MDIO_848xx_CMD_HDLR_STATUS,
PHY84833_STATUS_CMD_CLEAR_COMPLETE);
return 0;
}
-static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 fw_cmd,
+ u16 cmd_args[], int argc)
+{
+ struct bnx2x *bp = params->bp;
+
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) ||
+ (REG_RD(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ link_attr_sync[params->port])) &
+ LINK_ATTR_84858)) {
+ return bnx2x_84858_cmd_hdlr(phy, params, fw_cmd, cmd_args,
+ argc);
+ } else {
+ return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args,
+ argc);
+ }
+}
+
+static int bnx2x_848xx_pair_swap_cfg(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
u32 pair_swap;
- u16 data[PHY84833_CMDHDLR_MAX_ARGS];
+ u16 data[PHY848xx_CMDHDLR_MAX_ARGS];
int status;
struct bnx2x *bp = params->bp;
@@ -10028,8 +10131,9 @@ static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
/* Only the second argument is used for this command */
data[1] = (u16)pair_swap;
- status = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS);
+ status = bnx2x_848xx_cmd_hdlr(phy, params,
+ PHY848xx_CMD_SET_PAIR_SWAP, data,
+ PHY848xx_CMDHDLR_MAX_ARGS);
if (status == 0)
DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
@@ -10118,8 +10222,8 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
/* Prevent Phy from working in EEE and advertising it */
- rc = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+ rc = bnx2x_848xx_cmd_hdlr(phy, params,
+ PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1);
if (rc) {
DP(NETIF_MSG_LINK, "EEE disable failed.\n");
return rc;
@@ -10136,8 +10240,8 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 cmd_args = 1;
- rc = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+ rc = bnx2x_848xx_cmd_hdlr(phy, params,
+ PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1);
if (rc) {
DP(NETIF_MSG_LINK, "EEE enable failed.\n");
return rc;
@@ -10155,7 +10259,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
u8 port, initialize = 1;
u16 val;
u32 actual_phy_selection;
- u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
+ u16 cmd_args[PHY848xx_CMDHDLR_MAX_ARGS];
int rc = 0;
usleep_range(1000, 2000);
@@ -10180,8 +10284,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
/* Wait for GPHY to come out of reset */
msleep(50);
- if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
- (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+ if (!bnx2x_is_8483x_8485x(phy)) {
/* BCM84823 requires that XGXS links up first @ 10G for normal
* behavior.
*/
@@ -10192,7 +10295,19 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
vars->line_speed = temp;
}
+ /* Check if this is actually BCM84858 */
+ if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
+ u16 hw_rev;
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_848xx_ID_MSB, &hw_rev);
+ if (hw_rev == BCM84858_PHY_ID) {
+ params->link_attr_sync |= LINK_ATTR_84858;
+ bnx2x_update_link_attr(params, params->link_attr_sync);
+ }
+ }
+
+ /* Set dual-media configuration according to configuration */
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
MDIO_CTL_REG_84823_MEDIA, &val);
val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
@@ -10237,18 +10352,17 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
params->multi_phy_config, val);
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
- bnx2x_84833_pair_swap_cfg(phy, params, vars);
+ if (bnx2x_is_8483x_8485x(phy)) {
+ bnx2x_848xx_pair_swap_cfg(phy, params, vars);
/* Keep AutogrEEEn disabled. */
cmd_args[0] = 0x0;
cmd_args[1] = 0x0;
cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
cmd_args[3] = PHY84833_CONSTANT_LATENCY;
- rc = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_EEE_MODE, cmd_args,
- PHY84833_CMDHDLR_MAX_ARGS);
+ rc = bnx2x_848xx_cmd_hdlr(phy, params,
+ PHY848xx_CMD_SET_EEE_MODE, cmd_args,
+ PHY848xx_CMDHDLR_MAX_ARGS);
if (rc)
DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
}
@@ -10302,8 +10416,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
}
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+ if (bnx2x_is_8483x_8485x(phy)) {
/* Bring PHY out of super isolate mode as the final step. */
bnx2x_cl45_read_and_write(bp, phy,
MDIO_CTL_DEVAD,
@@ -10435,8 +10548,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
/* Determine if EEE was negotiated */
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
+ if (bnx2x_is_8483x_8485x(phy))
bnx2x_eee_an_resolve(phy, params, vars);
}
@@ -11842,6 +11954,40 @@ static const struct bnx2x_phy phy_84834 = {
.phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
};
+static const struct bnx2x_phy phy_84858 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_848x3_config_init,
+ .read_status = (read_status_t)bnx2x_848xx_read_status,
+ .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
+ .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
+ .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+};
+
static const struct bnx2x_phy phy_54618se = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE,
.addr = 0xff,
@@ -12128,6 +12274,9 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
*phy = phy_84834;
break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858:
+ *phy = phy_84858;
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
*phy = phy_54618se;
@@ -12184,9 +12333,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
}
phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
- if (((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) &&
- (phy->ver_addr)) {
+ if (bnx2x_is_8483x_8485x(phy) && (phy->ver_addr)) {
/* Remove 100Mb link supported for BCM84833/4 when phy fw
* version lower than or equal to 1.39
*/
@@ -13281,6 +13428,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858:
/* GPIO3's are linked, and so both need to be toggled
* to obtain required 2us pulse.
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index d9cce4c3899b..b7d251108c19 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -1,13 +1,15 @@
/* Copyright 2008-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
* consent.
*
* Written by Yaniv Rosner
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c27af12314ed..e3da2bddf143 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1,6 +1,8 @@
-/* bnx2x_main.c: Broadcom Everest network driver.
+/* bnx2x_main.c: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -81,11 +83,11 @@
#define TX_TIMEOUT (5*HZ)
static char version[] =
- "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
+ "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Eliezer Tamir");
-MODULE_DESCRIPTION("Broadcom NetXtreme II "
+MODULE_DESCRIPTION("QLogic "
"BCM57710/57711/57711E/"
"57712/57712_MF/57800/57800_MF/57810/57810_MF/"
"57840/57840_MF Driver");
@@ -163,27 +165,27 @@ enum bnx2x_board_type {
static struct {
char *name;
} board_info[] = {
- [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
- [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
- [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
- [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
- [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
- [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
- [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
- [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
- [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
- [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
- [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
- [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
- [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
- [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
- [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
- [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
- [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
- [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
- [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
- [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
- [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
+ [BCM57710] = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
+ [BCM57711] = { "QLogic BCM57711 10 Gigabit PCIe" },
+ [BCM57711E] = { "QLogic BCM57711E 10 Gigabit PCIe" },
+ [BCM57712] = { "QLogic BCM57712 10 Gigabit Ethernet" },
+ [BCM57712_MF] = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
+ [BCM57712_VF] = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
+ [BCM57800] = { "QLogic BCM57800 10 Gigabit Ethernet" },
+ [BCM57800_MF] = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
+ [BCM57800_VF] = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
+ [BCM57810] = { "QLogic BCM57810 10 Gigabit Ethernet" },
+ [BCM57810_MF] = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
+ [BCM57810_VF] = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
+ [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" },
+ [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" },
+ [BCM57840_MF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
+ [BCM57840_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
+ [BCM57811] = { "QLogic BCM57811 10 Gigabit Ethernet" },
+ [BCM57811_MF] = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
+ [BCM57840_O] = { "QLogic BCM57840 10/20 Gigabit Ethernet" },
+ [BCM57840_MFO] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
+ [BCM57811_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
};
#ifndef PCI_DEVICE_ID_NX2_57710
@@ -264,11 +266,14 @@ static const struct pci_device_id bnx2x_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
@@ -2492,7 +2497,7 @@ static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
else {
u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
- if (IS_MF_SI(bp)) {
+ if (IS_MF_PERCENT_BW(bp)) {
/* maxCfg in percents of linkspeed */
vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
} else /* SD modes */
@@ -2916,7 +2921,7 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
- if (IS_MF_UFP(bp)) {
+ if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
int func = BP_ABS_FUNC(bp);
u32 val;
@@ -2943,16 +2948,16 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
bp->mf_ov);
goto fail;
+ } else {
+ DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
+ bp->mf_ov);
}
-
- DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", bp->mf_ov);
-
- bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
-
- return;
+ } else {
+ goto fail;
}
- /* not supported by SW yet */
+ bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
+ return;
fail:
bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
}
@@ -3065,7 +3070,7 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
storm_memset_func_en(bp, p->func_id, 1);
/* spq */
- if (p->func_flgs & FUNC_FLG_SPQ) {
+ if (p->spq_active) {
storm_memset_spq_addr(bp, p->spq_map, p->func_id);
REG_WR(bp, XSEM_REG_FAST_MEMORY +
XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
@@ -3281,7 +3286,6 @@ static void bnx2x_pf_init(struct bnx2x *bp)
{
struct bnx2x_func_init_params func_init = {0};
struct event_ring_data eq_data = { {0} };
- u16 flags;
if (!CHIP_IS_E1x(bp)) {
/* reset IGU PF statistics: MSIX + ATTN */
@@ -3298,15 +3302,7 @@ static void bnx2x_pf_init(struct bnx2x *bp)
BP_FUNC(bp) : BP_VN(bp))*4, 0);
}
- /* function setup flags */
- flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
-
- /* This flag is relevant for E1x only.
- * E2 doesn't have a TPA configuration in a function level.
- */
- flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0;
-
- func_init.func_flgs = flags;
+ func_init.spq_active = true;
func_init.pf_id = BP_FUNC(bp);
func_init.func_id = BP_FUNC(bp);
func_init.spq_map = bp->spq_mapping;
@@ -3707,6 +3703,34 @@ out:
ethver, iscsiver, fcoever);
}
+void bnx2x_update_mfw_dump(struct bnx2x *bp)
+{
+ struct timeval epoc;
+ u32 drv_ver;
+ u32 valid_dump;
+
+ if (!SHMEM2_HAS(bp, drv_info))
+ return;
+
+ /* Update Driver load time */
+ do_gettimeofday(&epoc);
+ SHMEM2_WR(bp, drv_info.epoc, epoc.tv_sec);
+
+ drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
+ SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
+
+ SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
+
+ /* Check & notify On-Chip dump. */
+ valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
+
+ if (valid_dump & FIRST_DUMP_VALID)
+ DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
+
+ if (valid_dump & SECOND_DUMP_VALID)
+ DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
+}
+
static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
{
u32 cmd_ok, cmd_fail;
@@ -5274,6 +5298,10 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
break;
+ case BNX2X_FILTER_VLAN_PENDING:
+ DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
+ vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
+ break;
case BNX2X_FILTER_MCAST_PENDING:
DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
/* This is only relevant for 57710 where multicast MACs are
@@ -5568,6 +5596,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
BNX2X_STATE_OPEN):
case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
BNX2X_STATE_OPENING_WAIT4_PORT):
+ case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
cid = elem->message.data.eth_event.echo &
BNX2X_SWCID_MASK;
DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
@@ -5585,7 +5615,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
BNX2X_STATE_DIAG):
case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
BNX2X_STATE_CLOSING_WAIT4_HALT):
- DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
+ DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
bnx2x_handle_classification_eqe(bp, elem);
break;
@@ -6173,6 +6203,11 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
__set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
__set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
+ if (bp->accept_any_vlan) {
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+ }
+
break;
case BNX2X_RX_MODE_ALLMULTI:
__set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
@@ -6184,6 +6219,11 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
__set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
+ if (bp->accept_any_vlan) {
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+ }
+
break;
case BNX2X_RX_MODE_PROMISC:
/* According to definition of SI mode, iface in promisc mode
@@ -6204,18 +6244,15 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
else
__set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+
break;
default:
BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
return -EINVAL;
}
- /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
- if (rx_mode != BNX2X_RX_MODE_NONE) {
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
- }
-
return 0;
}
@@ -7429,6 +7466,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
} else
BNX2X_ERR("Bootcode is missing - can not initialize link\n");
+ if (SHMEM2_HAS(bp, netproc_fw_ver))
+ SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
+
return 0;
}
@@ -8406,6 +8446,42 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
return rc;
}
+int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+ struct bnx2x_vlan_mac_obj *obj, bool set,
+ unsigned long *ramrod_flags)
+{
+ int rc;
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+ /* Fill general parameters */
+ ramrod_param.vlan_mac_obj = obj;
+ ramrod_param.ramrod_flags = *ramrod_flags;
+
+ /* Fill a user request section if needed */
+ if (!test_bit(RAMROD_CONT, ramrod_flags)) {
+ ramrod_param.user_req.u.vlan.vlan = vlan;
+ /* Set the command: ADD or DEL */
+ if (set)
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ else
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+ }
+
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+
+ if (rc == -EEXIST) {
+ /* Do not treat adding same vlan as error. */
+ DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
+ rc = 0;
+ } else if (rc < 0) {
+ BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
+ }
+
+ return rc;
+}
+
int bnx2x_del_all_macs(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *mac_obj,
int mac_type, bool wait_for_comp)
@@ -10002,6 +10078,81 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
}
}
+#ifdef CONFIG_BNX2X_VXLAN
+static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port)
+{
+ struct bnx2x_func_switch_update_params *switch_update_params;
+ struct bnx2x_func_state_params func_params = {NULL};
+ int rc;
+
+ switch_update_params = &func_params.params.switch_update;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+
+ /* Function parameters */
+ __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
+ &switch_update_params->changes);
+ switch_update_params->vxlan_dst_port = port;
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc)
+ BNX2X_ERR("failed to change vxlan dst port to %d (rc = 0x%x)\n",
+ port, rc);
+ return rc;
+}
+
+static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port)
+{
+ if (!netif_running(bp->dev))
+ return;
+
+ if (bp->vxlan_dst_port || !IS_PF(bp)) {
+ DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n");
+ return;
+ }
+
+ bp->vxlan_dst_port = port;
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0);
+}
+
+static void bnx2x_add_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u16 t_port = ntohs(port);
+
+ __bnx2x_add_vxlan_port(bp, t_port);
+}
+
+static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
+{
+ if (!bp->vxlan_dst_port || bp->vxlan_dst_port != port || !IS_PF(bp)) {
+ DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
+ return;
+ }
+
+ if (netif_running(bp->dev)) {
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0);
+ } else {
+ bp->vxlan_dst_port = 0;
+ netdev_info(bp->dev, "Deleted vxlan dest port %d", port);
+ }
+}
+
+static void bnx2x_del_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u16 t_port = ntohs(port);
+
+ __bnx2x_del_vxlan_port(bp, t_port);
+}
+#endif
+
static int bnx2x_close(struct net_device *dev);
/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
@@ -10010,6 +10161,9 @@ static int bnx2x_close(struct net_device *dev);
static void bnx2x_sp_rtnl_task(struct work_struct *work)
{
struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
+#ifdef CONFIG_BNX2X_VXLAN
+ u16 port;
+#endif
rtnl_lock();
@@ -10108,6 +10262,27 @@ sp_rtnl_not_reset:
&bp->sp_rtnl_state))
bnx2x_update_mng_version(bp);
+#ifdef CONFIG_BNX2X_VXLAN
+ port = bp->vxlan_dst_port;
+ if (test_and_clear_bit(BNX2X_SP_RTNL_ADD_VXLAN_PORT,
+ &bp->sp_rtnl_state)) {
+ if (!bnx2x_vxlan_port_update(bp, port))
+ netdev_info(bp->dev, "Added vxlan dest port %d", port);
+ else
+ bp->vxlan_dst_port = 0;
+ }
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_DEL_VXLAN_PORT,
+ &bp->sp_rtnl_state)) {
+ if (!bnx2x_vxlan_port_update(bp, 0)) {
+ netdev_info(bp->dev,
+ "Deleted vxlan dest port %d", port);
+ bp->vxlan_dst_port = 0;
+ vxlan_get_rx_port(bp->dev);
+ }
+ }
+#endif
+
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
*/
@@ -11678,7 +11853,7 @@ static void validate_set_si_mode(struct bnx2x *bp)
static int bnx2x_get_hwinfo(struct bnx2x *bp)
{
int /*abs*/func = BP_ABS_FUNC(bp);
- int vn;
+ int vn, mfw_vn;
u32 val = 0, val2 = 0;
int rc = 0;
@@ -11768,6 +11943,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_mode = 0;
bp->mf_sub_mode = 0;
vn = BP_VN(bp);
+ mfw_vn = BP_FW_MB_IDX(bp);
if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
@@ -11824,6 +12000,31 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
} else
BNX2X_DEV_INFO("illegal OV for SD\n");
break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE:
+ bp->mf_mode = MULTI_FUNCTION_SD;
+ bp->mf_sub_mode = SUB_MF_MODE_BD;
+ bp->mf_config[vn] =
+ MF_CFG_RD(bp,
+ func_mf_config[func].config);
+
+ if (SHMEM2_HAS(bp, mtu_size)) {
+ int mtu_idx = BP_FW_MB_IDX(bp);
+ u16 mtu_size;
+ u32 mtu;
+
+ mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
+ mtu_size = (u16)mtu;
+ DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n",
+ mtu_size, mtu);
+
+ /* if valid: update device mtu */
+ if (((mtu_size + ETH_HLEN) >=
+ ETH_MIN_PACKET_SIZE) &&
+ (mtu_size <=
+ ETH_MAX_JUMBO_PACKET_SIZE))
+ bp->dev->mtu = mtu_size;
+ }
+ break;
case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
bp->mf_mode = MULTI_FUNCTION_SD;
bp->mf_sub_mode = SUB_MF_MODE_UFP;
@@ -11871,9 +12072,10 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
func, bp->mf_ov, bp->mf_ov);
- } else if (bp->mf_sub_mode == SUB_MF_MODE_UFP) {
+ } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
+ (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
dev_err(&bp->pdev->dev,
- "Unexpected - no valid MF OV for func %d in UFP mode\n",
+ "Unexpected - no valid MF OV for func %d in UFP/BD mode\n",
func);
bp->path_has_ovlan = true;
} else {
@@ -12078,6 +12280,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
mutex_init(&bp->drv_info_mutex);
sema_init(&bp->stats_lock, 1);
bp->drv_info_mng_owner = false;
+ INIT_LIST_HEAD(&bp->vlan_reg);
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -12278,6 +12481,12 @@ static int bnx2x_open(struct net_device *dev)
rc = bnx2x_nic_load(bp, LOAD_OPEN);
if (rc)
return rc;
+
+#ifdef CONFIG_BNX2X_VXLAN
+ if (IS_PF(bp))
+ vxlan_get_rx_port(dev);
+#endif
+
return 0;
}
@@ -12596,6 +12805,169 @@ static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
return vxlan_features_check(skb, features);
}
+static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
+{
+ int rc;
+
+ if (IS_PF(bp)) {
+ unsigned long ramrod_flags = 0;
+
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
+ add, &ramrod_flags);
+ } else {
+ rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
+ }
+
+ return rc;
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_entry *vlan;
+ int rc = 0;
+
+ if (!bp->vlan_cnt) {
+ DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
+ return 0;
+ }
+
+ list_for_each_entry(vlan, &bp->vlan_reg, link) {
+ /* Prepare for cleanup in case of errors */
+ if (rc) {
+ vlan->hw = false;
+ continue;
+ }
+
+ if (!vlan->hw)
+ continue;
+
+ DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
+
+ rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
+ if (rc) {
+ BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
+ vlan->hw = false;
+ rc = -EINVAL;
+ continue;
+ }
+ }
+
+ return rc;
+}
+
+static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bnx2x_vlan_entry *vlan;
+ bool hw = false;
+ int rc = 0;
+
+ if (!netif_running(bp->dev)) {
+ DP(NETIF_MSG_IFUP,
+ "Ignoring VLAN configuration the interface is down\n");
+ return -EFAULT;
+ }
+
+ DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
+
+ vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return -ENOMEM;
+
+ bp->vlan_cnt++;
+ if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
+ DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
+ bp->accept_any_vlan = true;
+ if (IS_PF(bp))
+ bnx2x_set_rx_mode_inner(bp);
+ else
+ bnx2x_vfpf_storm_rx_mode(bp);
+ } else if (bp->vlan_cnt <= bp->vlan_credit) {
+ rc = __bnx2x_vlan_configure_vid(bp, vid, true);
+ hw = true;
+ }
+
+ vlan->vid = vid;
+ vlan->hw = hw;
+
+ if (!rc) {
+ list_add(&vlan->link, &bp->vlan_reg);
+ } else {
+ bp->vlan_cnt--;
+ kfree(vlan);
+ }
+
+ DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
+
+ return rc;
+}
+
+static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bnx2x_vlan_entry *vlan;
+ int rc = 0;
+
+ if (!netif_running(bp->dev)) {
+ DP(NETIF_MSG_IFUP,
+ "Ignoring VLAN configuration the interface is down\n");
+ return -EFAULT;
+ }
+
+ DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
+
+ if (!bp->vlan_cnt) {
+ BNX2X_ERR("Unable to kill VLAN %d\n", vid);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(vlan, &bp->vlan_reg, link)
+ if (vlan->vid == vid)
+ break;
+
+ if (vlan->vid != vid) {
+ BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
+ return -EINVAL;
+ }
+
+ if (vlan->hw)
+ rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+
+ list_del(&vlan->link);
+ kfree(vlan);
+
+ bp->vlan_cnt--;
+
+ if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
+ /* Configure all non-configured entries */
+ list_for_each_entry(vlan, &bp->vlan_reg, link) {
+ if (vlan->hw)
+ continue;
+
+ rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
+ if (rc) {
+ BNX2X_ERR("Unable to config VLAN %d\n",
+ vlan->vid);
+ continue;
+ }
+ DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
+ vlan->vid);
+ vlan->hw = true;
+ }
+ DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
+ bp->accept_any_vlan = false;
+ if (IS_PF(bp))
+ bnx2x_set_rx_mode_inner(bp);
+ else
+ bnx2x_vfpf_storm_rx_mode(bp);
+ }
+
+ DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
+
+ return rc;
+}
+
static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_open = bnx2x_open,
.ndo_stop = bnx2x_close,
@@ -12609,6 +12981,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_fix_features = bnx2x_fix_features,
.ndo_set_features = bnx2x_set_features,
.ndo_tx_timeout = bnx2x_tx_timeout,
+ .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = poll_bnx2x,
#endif
@@ -12628,6 +13002,10 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
.ndo_features_check = bnx2x_features_check,
+#ifdef CONFIG_BNX2X_VXLAN
+ .ndo_add_vxlan_port = bnx2x_add_vxlan_port,
+ .ndo_del_vxlan_port = bnx2x_del_vxlan_port,
+#endif
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
@@ -12819,6 +13197,18 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
+ /* VF with OLD Hypervisor or old PF do not support filtering */
+ if (IS_PF(bp)) {
+ if (CHIP_IS_E1x(bp))
+ bp->accept_any_vlan = true;
+ else
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#ifdef CONFIG_BNX2X_SRIOV
+ } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#endif
+ }
+
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
dev->features |= NETIF_F_HIGHDMA;
@@ -13561,6 +13951,9 @@ static int bnx2x_init_one(struct pci_dev *pdev,
bnx2x_register_phc(bp);
+ if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+ bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
+
return 0;
init_one_exit:
@@ -13623,6 +14016,7 @@ static void __bnx2x_remove(struct pci_dev *pdev,
/* Power on: we can't let PCI layer write to us while we are in D3 */
if (IS_PF(bp)) {
bnx2x_set_power_state(bp, PCI_D0);
+ bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
/* Set endianity registers to reset values in case next driver
* boots in different endianty environment.
@@ -14371,6 +14765,90 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
rc = -EINVAL;
}
+ /* For storage-only interfaces, change driver state */
+ if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) {
+ switch (ctl->drv_state) {
+ case DRV_NOP:
+ break;
+ case DRV_ACTIVE:
+ bnx2x_set_os_driver_state(bp,
+ OS_DRIVER_STATE_ACTIVE);
+ break;
+ case DRV_INACTIVE:
+ bnx2x_set_os_driver_state(bp,
+ OS_DRIVER_STATE_DISABLED);
+ break;
+ case DRV_UNLOADED:
+ bnx2x_set_os_driver_state(bp,
+ OS_DRIVER_STATE_NOT_LOADED);
+ break;
+ default:
+ BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state);
+ }
+ }
+
+ return rc;
+}
+
+static int bnx2x_get_fc_npiv(struct net_device *dev,
+ struct cnic_fc_npiv_tbl *cnic_tbl)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bdn_fc_npiv_tbl *tbl = NULL;
+ u32 offset, entries;
+ int rc = -EINVAL;
+ int i;
+
+ if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0]))
+ goto out;
+
+ DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n");
+
+ tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
+ if (!tbl) {
+ BNX2X_ERR("Failed to allocate fc_npiv table\n");
+ goto out;
+ }
+
+ offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
+ DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
+
+ /* Read the table contents from nvram */
+ if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
+ BNX2X_ERR("Failed to read FC-NPIV table\n");
+ goto out;
+ }
+
+ /* Since bnx2x_nvram_read() returns data in be32, we need to convert
+ * the number of entries back to cpu endianness.
+ */
+ entries = tbl->fc_npiv_cfg.num_of_npiv;
+ entries = (__force u32)be32_to_cpu((__force __be32)entries);
+ tbl->fc_npiv_cfg.num_of_npiv = entries;
+
+ if (!tbl->fc_npiv_cfg.num_of_npiv) {
+ DP(BNX2X_MSG_MCP,
+ "No FC-NPIV table [valid, simply not present]\n");
+ goto out;
+ } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) {
+ BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n",
+ tbl->fc_npiv_cfg.num_of_npiv);
+ goto out;
+ } else {
+ DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n",
+ tbl->fc_npiv_cfg.num_of_npiv);
+ }
+
+ /* Copy the data into cnic-provided struct */
+ cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv;
+ for (i = 0; i < cnic_tbl->count; i++) {
+ memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8);
+ memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8);
+ }
+
+ rc = 0;
+out:
+ kfree(tbl);
return rc;
}
@@ -14516,6 +14994,7 @@ static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
cp->drv_ctl = bnx2x_drv_ctl;
+ cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv;
cp->drv_register_cnic = bnx2x_register_cnic;
cp->drv_unregister_cnic = bnx2x_unregister_cnic;
cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
index caf1aef651eb..a91ccbf36345 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
@@ -1,6 +1,8 @@
-/* bnx2x_mfw_req.h: Broadcom Everest network driver.
+/* bnx2x_mfw_req.h: Qlogic Everest network driver.
*
* Copyright (c) 2012-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 49d511092c82..4dead49bd5cb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1,6 +1,8 @@
-/* bnx2x_reg.h: Broadcom Everest network driver.
+/* bnx2x_reg.h: Qlogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -2137,6 +2139,10 @@
/* [RW 1] When this bit is set; the LLH will expect all packets to be with
e1hov */
#define NIG_REG_LLH_E1HOV_MODE 0x160d8
+/* [RW 16] Outer VLAN type identifier for multi-function mode. In non
+ * multi-function mode; it will hold the inner VLAN type. Typically 0x8100.
+ */
+#define NIG_REG_LLH_E1HOV_TYPE_1 0x16028
/* [RW 1] When this bit is set; the LLH will classify the packet before
sending it to the BRB or calculating WoL on it. */
#define NIG_REG_LLH_MF_MODE 0x16024
@@ -2953,7 +2959,12 @@
#define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac
/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */
#define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0
-#define PB_REG_CONTROL 0
+/* [RW 16] One of 8 values that should be compared to type in Ethernet
+ * parsing. If there is a match; the field after Ethernet is the first VLAN.
+ * Reset value is 0x8100 which is the standard VLAN type. Note that when
+ * checking second VLAN; type is compared only to 0x8100.
+ */
+#define PBF_REG_VLAN_TYPE_0 0x15c06c
/* [RW 2] Interrupt mask register #0 read/write */
#define PB_REG_PB_INT_MASK 0x28
/* [R 2] Interrupt register #0 read */
@@ -3372,6 +3383,12 @@
#define PRS_REG_TCM_CURRENT_CREDIT 0x40160
/* [R 8] debug only: TSDM current credit. Transaction based. */
#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c
+/* [RW 16] One of 8 values that should be compared to type in Ethernet
+ * parsing. If there is a match; the field after Ethernet is the first VLAN.
+ * Reset value is 0x8100 which is the standard VLAN type. Note that when
+ * checking second VLAN; type is compared only to 0x8100.
+ */
+#define PRS_REG_VLAN_TYPE_0 0x401a8
#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT (0x1<<19)
#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF (0x1<<20)
#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN (0x1<<22)
@@ -7240,6 +7257,9 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
#define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G 0x40
#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
+#define MDIO_AN_REG_848xx_ID_MSB 0xffe2
+#define BCM84858_PHY_ID 0x600d
+#define MDIO_AN_REG_848xx_ID_LSB 0xffe3
#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
@@ -7283,31 +7303,31 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81
#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
#define MDIO_84833_SUPER_ISOLATE 0x8000
-/* These are mailbox register set used by 84833. */
-#define MDIO_84833_TOP_CFG_SCRATCH_REG0 0x4005
-#define MDIO_84833_TOP_CFG_SCRATCH_REG1 0x4006
-#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007
-#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008
-#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009
-#define MDIO_84833_TOP_CFG_SCRATCH_REG26 0x4037
-#define MDIO_84833_TOP_CFG_SCRATCH_REG27 0x4038
-#define MDIO_84833_TOP_CFG_SCRATCH_REG28 0x4039
-#define MDIO_84833_TOP_CFG_SCRATCH_REG29 0x403a
-#define MDIO_84833_TOP_CFG_SCRATCH_REG30 0x403b
-#define MDIO_84833_TOP_CFG_SCRATCH_REG31 0x403c
-#define MDIO_84833_CMD_HDLR_COMMAND MDIO_84833_TOP_CFG_SCRATCH_REG0
-#define MDIO_84833_CMD_HDLR_STATUS MDIO_84833_TOP_CFG_SCRATCH_REG26
-#define MDIO_84833_CMD_HDLR_DATA1 MDIO_84833_TOP_CFG_SCRATCH_REG27
-#define MDIO_84833_CMD_HDLR_DATA2 MDIO_84833_TOP_CFG_SCRATCH_REG28
-#define MDIO_84833_CMD_HDLR_DATA3 MDIO_84833_TOP_CFG_SCRATCH_REG29
-#define MDIO_84833_CMD_HDLR_DATA4 MDIO_84833_TOP_CFG_SCRATCH_REG30
-#define MDIO_84833_CMD_HDLR_DATA5 MDIO_84833_TOP_CFG_SCRATCH_REG31
+/* These are mailbox register set used by 84833/84858. */
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG0 0x4005
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG1 0x4006
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG2 0x4007
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG3 0x4008
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG4 0x4009
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG26 0x4037
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG27 0x4038
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG28 0x4039
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG29 0x403a
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG30 0x403b
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG31 0x403c
+#define MDIO_848xx_CMD_HDLR_COMMAND (MDIO_848xx_TOP_CFG_SCRATCH_REG0)
+#define MDIO_848xx_CMD_HDLR_STATUS (MDIO_848xx_TOP_CFG_SCRATCH_REG26)
+#define MDIO_848xx_CMD_HDLR_DATA1 (MDIO_848xx_TOP_CFG_SCRATCH_REG27)
+#define MDIO_848xx_CMD_HDLR_DATA2 (MDIO_848xx_TOP_CFG_SCRATCH_REG28)
+#define MDIO_848xx_CMD_HDLR_DATA3 (MDIO_848xx_TOP_CFG_SCRATCH_REG29)
+#define MDIO_848xx_CMD_HDLR_DATA4 (MDIO_848xx_TOP_CFG_SCRATCH_REG30)
+#define MDIO_848xx_CMD_HDLR_DATA5 (MDIO_848xx_TOP_CFG_SCRATCH_REG31)
-/* Mailbox command set used by 84833. */
-#define PHY84833_CMD_SET_PAIR_SWAP 0x8001
-#define PHY84833_CMD_GET_EEE_MODE 0x8008
-#define PHY84833_CMD_SET_EEE_MODE 0x8009
-/* Mailbox status set used by 84833. */
+/* Mailbox command set used by 84833/84858 */
+#define PHY848xx_CMD_SET_PAIR_SWAP 0x8001
+#define PHY848xx_CMD_GET_EEE_MODE 0x8008
+#define PHY848xx_CMD_SET_EEE_MODE 0x8009
+/* Mailbox status set used by 84833 only */
#define PHY84833_STATUS_CMD_RECEIVED 0x0001
#define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002
#define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004
@@ -7318,6 +7338,13 @@ Theotherbitsarereservedandshouldbezero*/
#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080
#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5
+/* Mailbox status set used by 84858 only */
+#define PHY84858_STATUS_CMD_RECEIVED 0x0001
+#define PHY84858_STATUS_CMD_IN_PROGRESS 0x0002
+#define PHY84858_STATUS_CMD_COMPLETE_PASS 0x0004
+#define PHY84858_STATUS_CMD_COMPLETE_ERROR 0x0008
+#define PHY84858_STATUS_CMD_SYSTEM_BUSY 0xbbbb
+
/* Warpcore clause 45 addressing */
#define MDIO_WC_DEVAD 0x3
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 4ad415ac8cfe..c9bd7f16018e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -1,15 +1,17 @@
-/* bnx2x_sp.c: Broadcom Everest network driver.
+/* bnx2x_sp.c: Qlogic Everest network driver.
*
- * Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -355,6 +357,23 @@ static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
return vp->get(vp, 1);
}
+
+static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ if (!mp->get(mp, 1))
+ return false;
+
+ if (!vp->get(vp, 1)) {
+ mp->put(mp, 1);
+ return false;
+ }
+
+ return true;
+}
+
static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
{
struct bnx2x_credit_pool_obj *mp = o->macs_pool;
@@ -383,6 +402,22 @@ static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
return vp->put(vp, 1);
}
+static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ if (!mp->put(mp, 1))
+ return false;
+
+ if (!vp->put(vp, 1)) {
+ mp->get(mp, 1);
+ return false;
+ }
+
+ return true;
+}
+
/**
* __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
*
@@ -636,6 +671,26 @@ static int bnx2x_check_vlan_add(struct bnx2x *bp,
return 0;
}
+static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
+ data->vlan_mac.mac, data->vlan_mac.vlan);
+
+ list_for_each_entry(pos, &o->head, link)
+ if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+ (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+ ETH_ALEN)) &&
+ (data->vlan_mac.is_inner_mac ==
+ pos->u.vlan_mac.is_inner_mac))
+ return -EEXIST;
+
+ return 0;
+}
+
/* check_del() callbacks */
static struct bnx2x_vlan_mac_registry_elem *
bnx2x_check_mac_del(struct bnx2x *bp,
@@ -670,6 +725,27 @@ static struct bnx2x_vlan_mac_registry_elem *
return NULL;
}
+static struct bnx2x_vlan_mac_registry_elem *
+ bnx2x_check_vlan_mac_del(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
+ data->vlan_mac.mac, data->vlan_mac.vlan);
+
+ list_for_each_entry(pos, &o->head, link)
+ if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+ (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+ ETH_ALEN)) &&
+ (data->vlan_mac.is_inner_mac ==
+ pos->u.vlan_mac.is_inner_mac))
+ return pos;
+
+ return NULL;
+}
+
/* check_move() callback */
static bool bnx2x_check_move(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *src_o,
@@ -1036,6 +1112,96 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
rule_cnt);
}
+static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem,
+ int rule_idx, int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct eth_classify_rules_ramrod_data *data =
+ (struct eth_classify_rules_ramrod_data *)(raw->rdata);
+ int rule_cnt = rule_idx + 1;
+ union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+ enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
+ bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+ u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
+ u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
+ u16 inner_mac;
+
+ /* Reset the ramrod data buffer for the first rule */
+ if (rule_idx == 0)
+ memset(data, 0, sizeof(*data));
+
+ /* Set a rule header */
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
+ &rule_entry->pair.header);
+
+ /* Set VLAN and MAC themselves */
+ rule_entry->pair.vlan = cpu_to_le16(vlan);
+ bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+ &rule_entry->pair.mac_mid,
+ &rule_entry->pair.mac_lsb, mac);
+ inner_mac = elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
+ rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
+ /* MOVE: Add a rule that will add this MAC/VLAN to the target Queue */
+ if (cmd == BNX2X_VLAN_MAC_MOVE) {
+ struct bnx2x_vlan_mac_obj *target_obj;
+
+ rule_entry++;
+ rule_cnt++;
+
+ /* Setup ramrod data */
+ target_obj = elem->cmd_data.vlan_mac.target_obj;
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp, target_obj,
+ true, CLASSIFY_RULE_OPCODE_PAIR,
+ &rule_entry->pair.header);
+
+ /* Set a VLAN itself */
+ rule_entry->pair.vlan = cpu_to_le16(vlan);
+ bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+ &rule_entry->pair.mac_mid,
+ &rule_entry->pair.mac_lsb, mac);
+ rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
+ }
+
+ /* Set the ramrod data header */
+ bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+ rule_cnt);
+}
+
+/**
+ * bnx2x_set_one_vlan_mac_e1h -
+ *
+ * @bp: device handle
+ * @o: bnx2x_vlan_mac_obj
+ * @elem: bnx2x_exeq_elem
+ * @rule_idx: rule_idx
+ * @cam_offset: cam_offset
+ */
+static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem,
+ int rule_idx, int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct mac_configuration_cmd *config =
+ (struct mac_configuration_cmd *)(raw->rdata);
+ /* 57710 and 57711 do not support MOVE command,
+ * so it's either ADD or DEL
+ */
+ bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+ true : false;
+
+ /* Reset the ramrod data buffer */
+ memset(config, 0, sizeof(*config));
+
+ bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
+ cam_offset, add,
+ elem->cmd_data.vlan_mac.u.vlan_mac.mac,
+ elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
+ ETH_VLAN_FILTER_CLASSIFY, config);
+}
+
/**
* bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
*
@@ -1135,6 +1301,25 @@ static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
return NULL;
}
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
+ struct bnx2x_exe_queue_obj *o,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_exeq_elem *pos;
+ struct bnx2x_vlan_mac_ramrod_data *data =
+ &elem->cmd_data.vlan_mac.u.vlan_mac;
+
+ /* Check pending for execution commands */
+ list_for_each_entry(pos, &o->exe_queue, link)
+ if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
+ sizeof(*data)) &&
+ (pos->cmd_data.vlan_mac.cmd ==
+ elem->cmd_data.vlan_mac.cmd))
+ return pos;
+
+ return NULL;
+}
+
/**
* bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
*
@@ -2042,6 +2227,68 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
}
}
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool,
+ struct bnx2x_credit_pool_obj *vlans_pool)
+{
+ union bnx2x_qable_obj *qable_obj =
+ (union bnx2x_qable_obj *)vlan_mac_obj;
+
+ bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
+ rdata_mapping, state, pstate, type,
+ macs_pool, vlans_pool);
+
+ /* CAM pool handling */
+ vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
+ vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
+ /* CAM offset is relevant for 57710 and 57711 chips only which have a
+ * single CAM for both MACs and VLAN-MAC pairs. So the offset
+ * will be taken from MACs' pool object only.
+ */
+ vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+ vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+ if (CHIP_IS_E1(bp)) {
+ BNX2X_ERR("Do not support chips others than E2\n");
+ BUG();
+ } else if (CHIP_IS_E1H(bp)) {
+ vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
+ vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
+ vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
+ vlan_mac_obj->check_move = bnx2x_check_move_always_err;
+ vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &vlan_mac_obj->exe_queue, 1, qable_obj,
+ bnx2x_validate_vlan_mac,
+ bnx2x_remove_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_vlan_mac);
+ } else {
+ vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
+ vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
+ vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
+ vlan_mac_obj->check_move = bnx2x_check_move;
+ vlan_mac_obj->ramrod_cmd =
+ RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &vlan_mac_obj->exe_queue,
+ CLASSIFY_RULES_COUNT,
+ qable_obj, bnx2x_validate_vlan_mac,
+ bnx2x_remove_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_vlan_mac);
+ }
+}
/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
static inline void __storm_memset_mac_filters(struct bnx2x *bp,
struct tstorm_eth_mac_filter_config *mac_filters,
@@ -3854,8 +4101,8 @@ static bool bnx2x_credit_pool_get_entry_always_true(
* If credit is negative pool operations will always succeed (unlimited pool).
*
*/
-static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
- int base, int credit)
+void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+ int base, int credit)
{
/* Zero the object first */
memset(p, 0, sizeof(*p));
@@ -3934,9 +4181,9 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
/* CAM credit is equaly divided between all active functions
* on the PATH.
*/
- if ((func_num > 0)) {
+ if (func_num > 0) {
if (!CHIP_REV_IS_SLOW(bp))
- cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
+ cam_sz = PF_MAC_CREDIT_E2(bp, func_num);
else
cam_sz = BNX2X_CAM_SIZE_EMUL;
@@ -3966,8 +4213,9 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
* on the PATH.
*/
if (func_num > 0) {
- int credit = MAX_VLAN_CREDIT_E2 / func_num;
- bnx2x_init_credit_pool(p, func_id * credit, credit);
+ int credit = PF_VLAN_CREDIT_E2(bp, func_num);
+
+ bnx2x_init_credit_pool(p, -1/*unused for E2*/, credit);
} else
/* this should never happen! Block VLAN operations. */
bnx2x_init_credit_pool(p, 0, 0);
@@ -4060,8 +4308,14 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
- if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags))
- caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY;
+ if (test_bit(BNX2X_RSS_IPV4_VXLAN, &p->rss_flags))
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_IPV6_VXLAN, &p->rss_flags))
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_TUNN_INNER_HDRS, &p->rss_flags))
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY;
/* RSS keys */
if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
@@ -5669,10 +5923,14 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
rdata->path_id = BP_PATH(bp);
rdata->network_cos_mode = start_params->network_cos_mode;
- rdata->tunnel_mode = start_params->tunnel_mode;
- rdata->gre_tunnel_type = start_params->gre_tunnel_type;
- rdata->inner_gre_rss_en = start_params->inner_gre_rss_en;
- rdata->vxlan_dst_port = cpu_to_le16(4789);
+
+ rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port);
+ rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port);
+ rdata->inner_clss_l2gre = start_params->inner_clss_l2gre;
+ rdata->inner_clss_l2geneve = start_params->inner_clss_l2geneve;
+ rdata->inner_clss_vxlan = start_params->inner_clss_vxlan;
+ rdata->inner_rss = start_params->inner_rss;
+
rdata->sd_accept_mf_clss_fail = start_params->class_fail;
if (start_params->class_fail_ethtype) {
rdata->sd_accept_mf_clss_fail_match_ethtype = 1;
@@ -5690,6 +5948,14 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
cpu_to_le16(0x8100);
rdata->no_added_tags = start_params->no_added_tags;
+
+ rdata->c2s_pri_tt_valid = start_params->c2s_pri_valid;
+ if (rdata->c2s_pri_tt_valid) {
+ memcpy(rdata->c2s_pri_trans_table.val,
+ start_params->c2s_pri,
+ MAX_VLAN_PRIORITIES);
+ rdata->c2s_pri_default = start_params->c2s_pri_default;
+ }
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
@@ -5750,15 +6016,22 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
&switch_update_params->changes)) {
rdata->update_tunn_cfg_flg = 1;
- if (test_bit(BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
+ if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
+ &switch_update_params->changes))
+ rdata->inner_clss_l2gre = 1;
+ if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
+ &switch_update_params->changes))
+ rdata->inner_clss_vxlan = 1;
+ if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
&switch_update_params->changes))
- rdata->tunn_clss_en = 1;
- if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
+ rdata->inner_clss_l2geneve = 1;
+ if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
&switch_update_params->changes))
- rdata->inner_gre_rss_en = 1;
- rdata->tunnel_mode = switch_update_params->tunnel_mode;
- rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type;
- rdata->vxlan_dst_port = cpu_to_le16(4789);
+ rdata->inner_rss = 1;
+ rdata->vxlan_dst_port =
+ cpu_to_le16(switch_update_params->vxlan_dst_port);
+ rdata->geneve_dst_port =
+ cpu_to_le16(switch_update_params->geneve_dst_port);
}
rdata->echo = SWITCH_UPDATE;
@@ -5885,6 +6158,8 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
rdata->traffic_type_to_priority_cos[i] =
tx_start_params->traffic_type_to_priority_cos[i];
+ for (i = 0; i < MAX_TRAFFIC_TYPES; i++)
+ rdata->dcb_outer_pri[i] = tx_start_params->dcb_outer_pri[i];
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 86baecb7c60c..4048fc594cce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -1,15 +1,17 @@
-/* bnx2x_sp.h: Broadcom Everest network driver.
+/* bnx2x_sp.h: Qlogic Everest network driver.
*
- * Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -711,7 +713,10 @@ enum {
BNX2X_RSS_IPV6,
BNX2X_RSS_IPV6_TCP,
BNX2X_RSS_IPV6_UDP,
- BNX2X_RSS_GRE_INNER_HDRS,
+
+ BNX2X_RSS_IPV4_VXLAN,
+ BNX2X_RSS_IPV6_VXLAN,
+ BNX2X_RSS_TUNN_INNER_HDRS,
};
struct bnx2x_config_rss_params {
@@ -1105,8 +1110,10 @@ enum {
BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
- BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
- BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
+ BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
+ BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
+ BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
+ BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
};
/* Allowed Function states */
@@ -1171,19 +1178,23 @@ struct bnx2x_func_start_params {
/* Function cos mode */
u8 network_cos_mode;
- /* TUNN_MODE_NONE/TUNN_MODE_VXLAN/TUNN_MODE_GRE */
- u8 tunnel_mode;
+ /* UDP dest port for VXLAN */
+ u16 vxlan_dst_port;
- /* tunneling classification enablement */
- u8 tunn_clss_en;
+ /* UDP dest port for Geneve */
+ u16 geneve_dst_port;
- /* NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
- u8 gre_tunnel_type;
+ /* Enable inner Rx classifications for L2GRE packets */
+ u8 inner_clss_l2gre;
- /* Enables Inner GRE RSS on the function, depends on the client RSS
- * capailities
- */
- u8 inner_gre_rss_en;
+ /* Enable inner Rx classifications for L2-Geneve packets */
+ u8 inner_clss_l2geneve;
+
+ /* Enable inner Rx classification for vxlan packets */
+ u8 inner_clss_vxlan;
+
+ /* Enable RSS according to inner header */
+ u8 inner_rss;
/* Allows accepting of packets failing MF classification, possibly
* only matching a given ethertype
@@ -1200,6 +1211,11 @@ struct bnx2x_func_start_params {
/* Prevent inner vlans from being added by FW */
u8 no_added_tags;
+
+ /* Inner-to-Outer vlan priority mapping */
+ u8 c2s_pri[MAX_VLAN_PRIORITIES];
+ u8 c2s_pri_default;
+ u8 c2s_pri_valid;
};
struct bnx2x_func_switch_update_params {
@@ -1207,8 +1223,8 @@ struct bnx2x_func_switch_update_params {
u16 vlan;
u16 vlan_eth_type;
u8 vlan_force_prio;
- u8 tunnel_mode;
- u8 gre_tunnel_type;
+ u16 vxlan_dst_port;
+ u16 geneve_dst_port;
};
struct bnx2x_func_afex_update_params {
@@ -1229,6 +1245,7 @@ struct bnx2x_func_tx_start_params {
u8 dcb_enabled;
u8 dcb_version;
u8 dont_add_pri_0_en;
+ u8 dcb_outer_pri[MAX_TRAFFIC_TYPES];
};
struct bnx2x_func_set_timesync_params {
@@ -1396,6 +1413,14 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
unsigned long *pstate, bnx2x_obj_type type,
struct bnx2x_credit_pool_obj *vlans_pool);
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool,
+ struct bnx2x_credit_pool_obj *vlans_pool);
+
int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o);
void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
@@ -1466,6 +1491,8 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
struct bnx2x_credit_pool_obj *p, u8 func_id,
u8 func_num);
+void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+ int base, int credit);
/****************** RSS CONFIGURATION ****************/
void bnx2x_init_rss_config_obj(struct bnx2x *bp,
@@ -1493,4 +1520,12 @@ int bnx2x_config_rss(struct bnx2x *bp,
void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
u8 *ind_table);
+#define PF_MAC_CREDIT_E2(bp, func_num) \
+ ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \
+ func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT)
+
+#define PF_VLAN_CREDIT_E2(bp, func_num) \
+ ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \
+ func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT)
+
#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index f67348d16966..9d027348cd09 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1,15 +1,17 @@
-/* bnx2x_sriov.c: Broadcom Everest network driver.
+/* bnx2x_sriov.c: QLogic Everest network driver.
*
* Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -195,14 +197,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
setup_p->gen_params.fp_hsi = vf->fp_hsi;
- /* Setup-op pause params:
- * Nothing to do, the pause thresholds are set by default to 0 which
- * effectively turns off the feature for this queue. We don't want
- * one queue (VF) to interfering with another queue (another VF)
- */
- if (vf->cfg_flags & VF_CFG_FW_FC)
- BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
- vf->abs_vfid);
/* Setup-op flags:
* collect statistics, zero statistics, local-switching, security,
* OV for Flex10, RSS and MCAST for leading
@@ -358,22 +352,24 @@ static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
}
static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
- int qid, bool drv_only, bool mac)
+ int qid, bool drv_only, int type)
{
struct bnx2x_vlan_mac_ramrod_params ramrod;
int rc;
DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
- mac ? "MACs" : "VLANs");
+ (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
+ (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
/* Prepare ramrod params */
memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
- if (mac) {
+ if (type == BNX2X_VF_FILTER_VLAN_MAC) {
+ set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
+ } else if (type == BNX2X_VF_FILTER_MAC) {
set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
} else {
- set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
- &ramrod.user_req.vlan_mac_flags);
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
}
ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
@@ -391,14 +387,11 @@ static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
&ramrod.ramrod_flags);
if (rc) {
BNX2X_ERR("Failed to delete all %s\n",
- mac ? "MACs" : "VLANs");
+ (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
+ (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
return rc;
}
- /* Clear the vlan counters */
- if (!mac)
- atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
-
return 0;
}
@@ -412,13 +405,17 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
vf->abs_vfid, filter->add ? "Adding" : "Deleting",
- filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN");
+ (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" :
+ (filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN");
/* Prepare ramrod params */
memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
- if (filter->type == BNX2X_VF_FILTER_VLAN) {
- set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
- &ramrod.user_req.vlan_mac_flags);
+ if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) {
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
+ ramrod.user_req.u.vlan.vlan = filter->vid;
+ memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
+ set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+ } else if (filter->type == BNX2X_VF_FILTER_VLAN) {
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
ramrod.user_req.u.vlan.vlan = filter->vid;
} else {
@@ -429,16 +426,6 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
BNX2X_VLAN_MAC_DEL;
- /* Verify there are available vlan credits */
- if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
- (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
- vf_vlan_rules_cnt(vf))) {
- BNX2X_ERR("No credits for vlan [%d >= %d]\n",
- atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
- vf_vlan_rules_cnt(vf));
- return -ENOMEM;
- }
-
set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
if (drv_only)
set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
@@ -450,16 +437,13 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
if (rc && rc != -EEXIST) {
BNX2X_ERR("Failed to %s %s\n",
filter->add ? "add" : "delete",
- filter->type == BNX2X_VF_FILTER_MAC ? "MAC" :
- "VLAN");
+ (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
+ "VLAN-MAC" :
+ (filter->type == BNX2X_VF_FILTER_MAC) ?
+ "MAC" : "VLAN");
return rc;
}
- /* Update the vlan counters */
- if (filter->type == BNX2X_VF_FILTER_VLAN)
- bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
- &bnx2x_vfq(vf, qid, vlan_count));
-
return 0;
}
@@ -511,21 +495,7 @@ int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
if (rc)
goto op_err;
- /* Configure vlan0 for leading queue */
- if (!qid) {
- struct bnx2x_vf_mac_vlan_filter filter;
-
- memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
- filter.type = BNX2X_VF_FILTER_VLAN;
- filter.add = true;
- filter.vid = 0;
- rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
- if (rc)
- goto op_err;
- }
-
/* Schedule the configuration of any pending vlan filters */
- vf->cfg_flags |= VF_CFG_VLAN;
bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
BNX2X_MSG_IOV);
return 0;
@@ -544,10 +514,16 @@ static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* If needed, clean the filtering data base */
if ((qid == LEADING_IDX) &&
bnx2x_validate_vf_sp_objs(bp, vf, false)) {
- rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false);
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+ BNX2X_VF_FILTER_VLAN_MAC);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+ BNX2X_VF_FILTER_VLAN);
if (rc)
goto op_err;
- rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true);
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+ BNX2X_VF_FILTER_MAC);
if (rc)
goto op_err;
}
@@ -680,11 +656,18 @@ int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
/* Remove filtering if feasible */
if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
- false, false);
+ false,
+ BNX2X_VF_FILTER_VLAN_MAC);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+ false,
+ BNX2X_VF_FILTER_VLAN);
if (rc)
goto op_err;
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
- false, true);
+ false,
+ BNX2X_VF_FILTER_MAC);
if (rc)
goto op_err;
rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
@@ -765,8 +748,6 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
- if (vf->cfg_flags & VF_CFG_INT_SIMD)
- val |= IGU_VF_CONF_SINGLE_ISR_EN;
val &= ~IGU_VF_CONF_PARENT_MASK;
val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
@@ -845,29 +826,6 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
return 0;
}
-static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- int new)
-{
- int num = vf_vlan_rules_cnt(vf);
- int diff = new - num;
- bool rc = true;
-
- DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
- vf->abs_vfid, new, num);
-
- if (diff > 0)
- rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
- else if (diff < 0)
- rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
-
- if (rc)
- vf_vlan_rules_cnt(vf) = new;
- else
- DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
- vf->abs_vfid);
-}
-
/* must be called after the number of PF queues and the number of VFs are
* both known
*/
@@ -875,21 +833,13 @@ static void
bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
struct vf_pf_resc_request *resc = &vf->alloc_resc;
- u16 vlan_count = 0;
/* will be set only during VF-ACQUIRE */
resc->num_rxqs = 0;
resc->num_txqs = 0;
- /* no credit calculations for macs (just yet) */
- resc->num_mac_filters = 1;
-
- /* divvy up vlan rules */
- bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
- vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
- vlan_count = 1 << ilog2(vlan_count);
- bnx2x_iov_re_set_vlan_filters(bp, vf,
- vlan_count / BNX2X_NR_VIRTFN(bp));
+ resc->num_mac_filters = VF_MAC_CREDIT_CNT;
+ resc->num_vlan_filters = VF_VLAN_CREDIT_CNT;
/* no real limitation */
resc->num_mc_filters = 0;
@@ -1338,6 +1288,9 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
mutex_init(&bp->vfdb->bulletin_mutex);
+ if (SHMEM2_HAS(bp, sriov_switch_mode))
+ SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB);
+
return 0;
failed:
DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
@@ -1620,6 +1573,11 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
vf->filter_state = 0;
vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
+ bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0,
+ vf_vlan_rules_cnt(vf));
+ bnx2x_init_credit_pool(&vf->vf_macs_pool, 0,
+ vf_mac_rules_cnt(vf));
+
/* init mcast object - This object will be re-initialized
* during VF-ACQUIRE with the proper cl_id and cid.
* It needs to be initialized here so that it can be safely
@@ -2032,12 +1990,11 @@ int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
- /* Save a vlan filter for the Hypervisor */
return ((req_resc->num_rxqs <= rxq_cnt) &&
(req_resc->num_txqs <= txq_cnt) &&
(req_resc->num_sbs <= vf_sb_count(vf)) &&
(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
- (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
+ (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
}
/* CORE VF API */
@@ -2091,16 +2048,12 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf_sb_count(vf) = resc->num_sbs;
vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
- if (resc->num_mac_filters)
- vf_mac_rules_cnt(vf) = resc->num_mac_filters;
- /* Add an additional vlan filter credit for the hypervisor */
- bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
DP(BNX2X_MSG_IOV,
"Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
vf_sb_count(vf), vf_rxq_count(vf),
vf_txq_count(vf), vf_mac_rules_cnt(vf),
- vf_vlan_rules_visible_cnt(vf));
+ vf_vlan_rules_cnt(vf));
/* Initialize the queues */
if (!vf->vfqs) {
@@ -2133,7 +2086,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
{
struct bnx2x_func_init_params func_init = {0};
- u16 flags = 0;
int i;
/* the sb resources are initialized at this point, do the
@@ -2160,23 +2112,9 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
/* reset IGU VF statistics: MSIX */
REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
- /* vf init */
- if (vf->cfg_flags & VF_CFG_STATS)
- flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
-
- if (vf->cfg_flags & VF_CFG_TPA)
- flags |= FUNC_FLG_TPA;
-
- if (is_vf_multi(vf))
- flags |= FUNC_FLG_RSS;
-
/* function setup */
- func_init.func_flgs = flags;
func_init.pf_id = BP_FUNC(bp);
func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
- func_init.fw_stat_map = vf->fw_stat_map;
- func_init.spq_map = vf->spq_map;
- func_init.spq_prod = 0;
bnx2x_func_init(bp, &func_init);
/* Enable the vf */
@@ -2589,8 +2527,8 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
for_each_vf(bp, vfidx) {
- bulletin = BP_VF_BULLETIN(bp, vfidx);
- if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
+ bulletin = BP_VF_BULLETIN(bp, vfidx);
+ if (bulletin->valid_bitmap & (1 << VLAN_VALID))
bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
}
}
@@ -2808,20 +2746,58 @@ out:
return rc;
}
-int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp,
+ struct bnx2x_virtf *vf, bool accept)
+{
+ struct bnx2x_rx_mode_ramrod_params rx_ramrod;
+ unsigned long accept_flags;
+
+ /* need to remove/add the VF's accept_any_vlan bit */
+ accept_flags = bnx2x_leading_vfq(vf, accept_flags);
+ if (accept)
+ set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+ else
+ clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+ bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
+ accept_flags);
+ bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
+ bnx2x_config_rx_mode(bp, &rx_ramrod);
+}
+
+static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ u16 vlan, bool add)
{
- struct bnx2x_queue_state_params q_params = {NULL};
struct bnx2x_vlan_mac_ramrod_params ramrod_param;
- struct bnx2x_queue_update_params *update_params;
+ unsigned long ramrod_flags = 0;
+ int rc = 0;
+
+ /* configure the new vlan to device */
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+ ramrod_param.ramrod_flags = ramrod_flags;
+ ramrod_param.user_req.u.vlan.vlan = vlan;
+ ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD
+ : BNX2X_VLAN_MAC_DEL;
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+ if (rc) {
+ BNX2X_ERR("failed to configure vlan\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+{
struct pf_vf_bulletin_content *bulletin = NULL;
- struct bnx2x_rx_mode_ramrod_params rx_ramrod;
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_mac_obj *vlan_obj;
unsigned long vlan_mac_flags = 0;
unsigned long ramrod_flags = 0;
struct bnx2x_virtf *vf = NULL;
- unsigned long accept_flags;
- int rc;
+ int i, rc;
if (vlan > 4095) {
BNX2X_ERR("illegal vlan value %d\n", vlan);
@@ -2850,6 +2826,10 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
bulletin->vlan = vlan;
+ /* Post update on VF's bulletin board */
+ rc = bnx2x_post_vf_bulletin(bp, vfidx);
+ if (rc)
+ BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
mutex_unlock(&bp->vfdb->bulletin_mutex);
/* is vf initialized and queue set up? */
@@ -2876,84 +2856,76 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
goto out;
}
- /* need to remove/add the VF's accept_any_vlan bit */
- accept_flags = bnx2x_leading_vfq(vf, accept_flags);
- if (vlan)
- clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
- else
- set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
-
- bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
- accept_flags);
- bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
- bnx2x_config_rx_mode(bp, &rx_ramrod);
+ /* clear accept_any_vlan when HV forces vlan, otherwise
+ * according to VF capabilities
+ */
+ if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER))
+ bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan);
- /* configure the new vlan to device */
- memset(&ramrod_param, 0, sizeof(ramrod_param));
- __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
- ramrod_param.vlan_mac_obj = vlan_obj;
- ramrod_param.ramrod_flags = ramrod_flags;
- set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
- &ramrod_param.user_req.vlan_mac_flags);
- ramrod_param.user_req.u.vlan.vlan = vlan;
- ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
- rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
- if (rc) {
- BNX2X_ERR("failed to configure vlan\n");
- rc = -EINVAL;
+ rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true);
+ if (rc)
goto out;
- }
- /* send queue update ramrod to configure default vlan and silent
- * vlan removal
+ /* send queue update ramrods to configure default vlan and
+ * silent vlan removal
*/
- __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
- q_params.cmd = BNX2X_Q_CMD_UPDATE;
- q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
- update_params = &q_params.params.update;
- __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
- &update_params->update_flags);
- __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
- &update_params->update_flags);
- if (vlan == 0) {
- /* if vlan is 0 then we want to leave the VF traffic
- * untagged, and leave the incoming traffic untouched
- * (i.e. do not remove any vlan tags).
- */
- __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
- &update_params->update_flags);
- __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
- &update_params->update_flags);
- } else {
- /* configure default vlan to vf queue and set silent
- * vlan removal (the vf remains unaware of this vlan).
- */
- __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ for_each_vfq(vf, i) {
+ struct bnx2x_queue_state_params q_params = {NULL};
+ struct bnx2x_queue_update_params *update_params;
+
+ q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
+
+ /* validate the Q is UP */
+ if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE)
+ continue;
+
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
&update_params->update_flags);
- __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
&update_params->update_flags);
- update_params->def_vlan = vlan;
- update_params->silent_removal_value =
- vlan & VLAN_VID_MASK;
- update_params->silent_removal_mask = VLAN_VID_MASK;
- }
+ if (vlan == 0) {
+ /* if vlan is 0 then we want to leave the VF traffic
+ * untagged, and leave the incoming traffic untouched
+ * (i.e. do not remove any vlan tags).
+ */
+ __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ } else {
+ /* configure default vlan to vf queue and set silent
+ * vlan removal (the vf remains unaware of this vlan).
+ */
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ update_params->def_vlan = vlan;
+ update_params->silent_removal_value =
+ vlan & VLAN_VID_MASK;
+ update_params->silent_removal_mask = VLAN_VID_MASK;
+ }
- /* Update the Queue state */
- rc = bnx2x_queue_state_change(bp, &q_params);
- if (rc) {
- BNX2X_ERR("Failed to configure default VLAN\n");
- goto out;
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure default VLAN queue %d\n",
+ i);
+ goto out;
+ }
}
-
-
- /* clear the flag indicating that this VF needs its vlan
- * (will only be set if the HV configured the Vlan before vf was
- * up and we were called because the VF came up later
- */
out:
- vf->cfg_flags &= ~VF_CFG_VLAN;
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+ if (rc)
+ DP(BNX2X_MSG_IOV,
+ "updated VF[%d] vlan configuration (vlan = %d)\n",
+ vfidx, vlan);
+
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 66ee62a0401a..670a581ffabc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -1,15 +1,17 @@
-/* bnx2x_sriov.h: Broadcom Everest network driver.
+/* bnx2x_sriov.h: QLogic Everest network driver.
*
* Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -75,7 +77,10 @@ struct bnx2x_vf_queue {
/* VLANs object */
struct bnx2x_vlan_mac_obj vlan_obj;
- atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
+
+ /* VLAN-MACs object */
+ struct bnx2x_vlan_mac_obj vlan_mac_obj;
+
unsigned long accept_flags; /* last accept flags configured */
/* Queue Slow-path State object */
@@ -103,8 +108,10 @@ struct bnx2x_virtf;
struct bnx2x_vf_mac_vlan_filter {
int type;
-#define BNX2X_VF_FILTER_MAC 1
-#define BNX2X_VF_FILTER_VLAN 2
+#define BNX2X_VF_FILTER_MAC BIT(0)
+#define BNX2X_VF_FILTER_VLAN BIT(1)
+#define BNX2X_VF_FILTER_VLAN_MAC \
+ (BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
bool add;
u8 *mac;
@@ -119,14 +126,9 @@ struct bnx2x_vf_mac_vlan_filters {
/* vf context */
struct bnx2x_virtf {
u16 cfg_flags;
-#define VF_CFG_STATS 0x0001
-#define VF_CFG_FW_FC 0x0002
-#define VF_CFG_TPA 0x0004
-#define VF_CFG_INT_SIMD 0x0008
-#define VF_CACHE_LINE 0x0010
-#define VF_CFG_VLAN 0x0020
-#define VF_CFG_STATS_COALESCE 0x0040
-#define VF_CFG_EXT_BULLETIN 0x0080
+#define VF_CFG_STATS_COALESCE 0x1
+#define VF_CFG_EXT_BULLETIN 0x2
+#define VF_CFG_VLAN_FILTER 0x4
u8 link_cfg; /* IFLA_VF_LINK_STATE_AUTO
* IFLA_VF_LINK_STATE_ENABLE
* IFLA_VF_LINK_STATE_DISABLE
@@ -140,9 +142,8 @@ struct bnx2x_virtf {
bool flr_clnup_stage; /* true during flr cleanup */
/* dma */
- dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
+ dma_addr_t fw_stat_map;
u16 stats_stride;
- dma_addr_t spq_map;
dma_addr_t bulletin_map;
/* Allocated resources counters. Before the VF is acquired, the
@@ -163,8 +164,6 @@ struct bnx2x_virtf {
#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters)
#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters)
#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters)
- /* Hide a single vlan filter credit for the hypervisor */
-#define vf_vlan_rules_visible_cnt(vf) (vf_vlan_rules_cnt(vf) - 1)
u8 sb_count; /* actual number of SBs */
u8 igu_base_id; /* base igu status block id */
@@ -207,6 +206,9 @@ struct bnx2x_virtf {
enum channel_tlvs op_current;
u8 fp_hsi;
+
+ struct bnx2x_credit_pool_obj vf_vlans_pool;
+ struct bnx2x_credit_pool_obj vf_macs_pool;
};
#define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn)
@@ -230,6 +232,12 @@ struct bnx2x_virtf {
#define FW_VF_HANDLE(abs_vfid) \
(abs_vfid + FW_PF_MAX_HANDLE)
+#define GET_NUM_VFS_PER_PATH(bp) 64 /* use max possible value */
+#define GET_NUM_VFS_PER_PF(bp) ((bp)->vfdb ? (bp)->vfdb->sriov.total \
+ : 0)
+#define VF_MAC_CREDIT_CNT 1
+#define VF_VLAN_CREDIT_CNT 2 /* VLAN0 + 'real' VLAN */
+
/* locking and unlocking the channel mutex */
void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs tlv);
@@ -274,6 +282,10 @@ struct bnx2x_vf_sp {
} vlan_rdata;
union {
+ struct eth_classify_rules_ramrod_data e2;
+ } vlan_mac_rdata;
+
+ union {
struct eth_filter_rules_ramrod_data e2;
} rx_mode_rdata;
@@ -536,8 +548,14 @@ int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx);
int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state);
+int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add);
#else /* CONFIG_BNX2X_SRIOV */
+#define GET_NUM_VFS_PER_PATH(bp) 0
+#define GET_NUM_VFS_PER_PF(bp) 0
+#define VF_MAC_CREDIT_CNT 0
+#define VF_VLAN_CREDIT_CNT 0
+
static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
struct bnx2x_queue_sp_obj **q_obj) {}
static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
@@ -604,5 +622,7 @@ struct pf_vf_bulletin_content;
static inline void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
bool support_long) {}
+static inline int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) {return 0; }
+
#endif /* CONFIG_BNX2X_SRIOV */
#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 69d699f0730a..7e0919aa450e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1,6 +1,8 @@
-/* bnx2x_stats.c: Broadcom Everest network driver.
+/* bnx2x_stats.c: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 965539a9dabe..b2644ed13d06 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -1,6 +1,8 @@
-/* bnx2x_stats.h: Broadcom Everest network driver.
+/* bnx2x_stats.h: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 06b8c0d8fd3b..1374e5394a79 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -1,15 +1,17 @@
-/* bnx2x_vfpf.c: Broadcom Everest network driver.
+/* bnx2x_vfpf.c: QLogic Everest network driver.
*
* Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -245,6 +247,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
req->resc_request.num_sbs = bp->igu_sb_cnt;
req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
+ req->resc_request.num_vlan_filters = VF_ACQUIRE_VLAN_FILTERS;
/* pf 2 vf bulletin board address */
req->bulletin_addr = bp->pf2vf_bulletin_mapping;
@@ -255,6 +258,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* Bulletin support for bulletin board with length > legacy length */
req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
+ /* vlan filtering is supported */
+ req->vfdev_info.caps |= VF_CAP_SUPPORT_VLAN_FILTER;
/* add list termination tlv */
bnx2x_add_tlv(bp, req,
@@ -373,6 +378,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
+ bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
+
strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
sizeof(bp->fw_ver));
@@ -546,7 +553,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
BNX2X_FILTER_MAC_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX,
- &bp->macs_pool);
+ &vf->vf_macs_pool);
/* vlan */
bnx2x_init_vlan_obj(bp, &q->vlan_obj,
cl_id, q->cid, func_id,
@@ -555,8 +562,17 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
BNX2X_FILTER_VLAN_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX,
- &bp->vlans_pool);
-
+ &vf->vf_vlans_pool);
+ /* vlan-mac */
+ bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, vlan_mac_rdata),
+ bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata),
+ BNX2X_FILTER_VLAN_MAC_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &vf->vf_macs_pool,
+ &vf->vf_vlans_pool);
/* mcast */
bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
q->cid, func_id, func_id,
@@ -723,7 +739,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
if (set)
- req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
+ req->filters[0].flags |= VFPF_Q_FILTER_SET;
/* sample bulletin board for new mac */
bnx2x_sample_bulletin(bp);
@@ -911,6 +927,67 @@ out:
return 0;
}
+/* request pf to add a vlan for the vf */
+int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
+{
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc = 0;
+
+ if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) {
+ DP(BNX2X_MSG_IOV, "HV does not support vlan filtering\n");
+ return 0;
+ }
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
+ req->vf_qid = vf_qid;
+ req->n_mac_vlan_filters = 1;
+
+ req->filters[0].flags = VFPF_Q_FILTER_VLAN_TAG_VALID;
+
+ if (add)
+ req->filters[0].flags |= VFPF_Q_FILTER_SET;
+
+ /* sample bulletin board for hypervisor vlan */
+ bnx2x_sample_bulletin(bp);
+
+ if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
+ BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ req->filters[0].vlan_tag = vid;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+ goto out;
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("vfpf %s VLAN %d failed\n", add ? "add" : "del",
+ vid);
+ rc = -EINVAL;
+ }
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
+}
+
int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
{
int mode = bp->rx_mode;
@@ -934,8 +1011,13 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ if (mode == BNX2X_RX_MODE_PROMISC)
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
}
+ if (bp->accept_any_vlan)
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
+
req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
req->vf_qid = 0;
@@ -1188,7 +1270,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
PFVF_CAP_TPA |
- PFVF_CAP_TPA_UPDATE);
+ PFVF_CAP_TPA_UPDATE |
+ PFVF_CAP_VLAN_FILTER);
bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
sizeof(resp->pfdev_info.fw_ver));
@@ -1203,7 +1286,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_max_queue_cnt(bp, vf);
resc->num_sbs = vf_sb_count(vf);
resc->num_mac_filters = vf_mac_rules_cnt(vf);
- resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);
+ resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
resc->num_mc_filters = 0;
if (status == PFVF_STATUS_SUCCESS) {
@@ -1370,6 +1453,14 @@ static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN;
}
+ if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_VLAN_FILTER) {
+ DP(BNX2X_MSG_IOV, "VF[%d] supports vlan filtering\n",
+ vf->abs_vfid);
+ vf->cfg_flags |= VF_CFG_VLAN_FILTER;
+ } else {
+ vf->cfg_flags &= ~VF_CFG_VLAN_FILTER;
+ }
+
out:
/* response */
bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
@@ -1382,7 +1473,6 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
int rc;
/* record ghost addresses from vf message */
- vf->spq_map = init->spq_addr;
vf->fw_stat_map = init->stats_addr;
vf->stats_stride = init->stats_stride;
rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
@@ -1578,17 +1668,18 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
if ((msg_filter->flags & type_flag) != type_flag)
continue;
- if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
+ memset(&fl->filters[j], 0, sizeof(fl->filters[j]));
+ if (type_flag & VFPF_Q_FILTER_DEST_MAC_VALID) {
fl->filters[j].mac = msg_filter->mac;
- fl->filters[j].type = BNX2X_VF_FILTER_MAC;
- } else {
+ fl->filters[j].type |= BNX2X_VF_FILTER_MAC;
+ }
+ if (type_flag & VFPF_Q_FILTER_VLAN_TAG_VALID) {
fl->filters[j].vid = msg_filter->vlan_tag;
- fl->filters[j].type = BNX2X_VF_FILTER_VLAN;
+ fl->filters[j].type |= BNX2X_VF_FILTER_VLAN;
}
- fl->filters[j].add =
- (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
- true : false;
+ fl->filters[j].add = !!(msg_filter->flags & VFPF_Q_FILTER_SET);
fl->count++;
+ j++;
}
if (!fl->count)
kfree(fl);
@@ -1598,6 +1689,18 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
return 0;
}
+static int bnx2x_vf_filters_contain(struct vfpf_set_q_filters_tlv *filters,
+ u32 flags)
+{
+ int i, cnt = 0;
+
+ for (i = 0; i < filters->n_mac_vlan_filters; i++)
+ if ((filters->filters[i].flags & flags) == flags)
+ cnt++;
+
+ return cnt;
+}
+
static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
struct vfpf_q_mac_vlan_filter *filter)
{
@@ -1629,6 +1732,7 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
+#define VFPF_VLAN_MAC_FILTER (VFPF_VLAN_FILTER | VFPF_MAC_FILTER)
static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
@@ -1639,17 +1743,17 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
/* check for any mac/vlan changes */
if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
- /* build mac list */
struct bnx2x_vf_mac_vlan_filters *fl = NULL;
+ /* build vlan-mac list */
rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
- VFPF_MAC_FILTER);
+ VFPF_VLAN_MAC_FILTER);
if (rc)
goto op_err;
if (fl) {
- /* set mac list */
+ /* set vlan-mac list */
rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
msg->vf_qid,
false);
@@ -1657,22 +1761,23 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
goto op_err;
}
- /* build vlan list */
+ /* build mac list */
fl = NULL;
rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
- VFPF_VLAN_FILTER);
+ VFPF_MAC_FILTER);
if (rc)
goto op_err;
if (fl) {
- /* set vlan list */
+ /* set mac list */
rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
msg->vf_qid,
false);
if (rc)
goto op_err;
}
+
}
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
@@ -1687,11 +1792,15 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
}
- /* A packet arriving the vf's mac should be accepted
- * with any vlan, unless a vlan has already been
- * configured.
+ /* any_vlan is not configured if HV is forcing VLAN
+ * any_vlan is configured if
+ * 1. VF does not support vlan filtering
+ * OR
+ * 2. VF supports vlan filtering and explicitly requested it
*/
- if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
+ if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)) &&
+ (!(vf->cfg_flags & VF_CFG_VLAN_FILTER) ||
+ msg->rx_mask & VFPF_RX_MASK_ACCEPT_ANY_VLAN))
__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
/* set rx-mode */
@@ -1727,17 +1836,31 @@ static int bnx2x_filters_validate_mac(struct bnx2x *bp,
* since queue was not set up.
*/
if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
- /* once a mac was set by ndo can only accept a single mac... */
- if (filters->n_mac_vlan_filters > 1) {
- BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
- vf->abs_vfid);
- rc = -EPERM;
- goto response;
+ struct vfpf_q_mac_vlan_filter *filter = NULL;
+ int i;
+
+ for (i = 0; i < filters->n_mac_vlan_filters; i++) {
+ if (!(filters->filters[i].flags &
+ VFPF_Q_FILTER_DEST_MAC_VALID))
+ continue;
+
+ /* once a mac was set by ndo can only accept
+ * a single mac...
+ */
+ if (filter) {
+ BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called [%d filters]\n",
+ vf->abs_vfid,
+ filters->n_mac_vlan_filters);
+ rc = -EPERM;
+ goto response;
+ }
+
+ filter = &filters->filters[i];
}
/* ...and only the mac set by the ndo */
- if (filters->n_mac_vlan_filters == 1 &&
- !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
+ if (filter &&
+ !ether_addr_equal(filter->mac, bulletin->mac)) {
BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
vf->abs_vfid);
@@ -1759,17 +1882,14 @@ static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
/* if vlan was set by hypervisor we don't allow guest to config vlan */
if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
- int i;
-
/* search for vlan filters */
- for (i = 0; i < filters->n_mac_vlan_filters; i++) {
- if (filters->filters[i].flags &
- VFPF_Q_FILTER_VLAN_TAG_VALID) {
- BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
- vf->abs_vfid);
- rc = -EPERM;
- goto response;
- }
+
+ if (bnx2x_vf_filters_contain(filters,
+ VFPF_Q_FILTER_VLAN_TAG_VALID)) {
+ BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
+ vf->abs_vfid);
+ rc = -EPERM;
+ goto response;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index b86479fc0d2f..64f2b52c5829 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -1,16 +1,22 @@
-/* bnx2x_vfpf.h: Broadcom Everest network driver.
+/* bnx2x_vfpf.h: Qlogic Everest network driver.
*
* Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * under the terms of the GNU General Public License version 2 (the “GPL”),
+ * available at http://www.gnu.org/licenses/gpl-2.0.html, with the following
+ * added to such license:
*
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
- * consent.
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions
+ * of the license of that module. An independent module is a module which is
+ * not derived from this software. The special exception does not apply to any
+ * modifications of the software.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Ariel Elior <ariel.elior@qlogic.com>
@@ -64,6 +70,8 @@ struct hw_sb_info {
#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004
#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008
#define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010
+#define VFPF_RX_MASK_ACCEPT_ANY_VLAN 0x00000020
+
#define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content))
#define BULLETIN_CONTENT_LEGACY_SIZE (32)
#define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */
@@ -127,6 +135,7 @@ struct vfpf_acquire_tlv {
u8 fp_hsi_ver;
u8 caps;
#define VF_CAP_SUPPORT_EXT_BULLETIN (1 << 0)
+#define VF_CAP_SUPPORT_VLAN_FILTER (1 << 1)
} vfdev_info;
struct vf_pf_resc_request resc_request;
@@ -168,10 +177,12 @@ struct pfvf_acquire_resp_tlv {
struct pf_vf_pfdev_info {
u32 chip_num;
u32 pf_cap;
-#define PFVF_CAP_RSS 0x00000001
-#define PFVF_CAP_DHC 0x00000002
-#define PFVF_CAP_TPA 0x00000004
-#define PFVF_CAP_TPA_UPDATE 0x00000008
+#define PFVF_CAP_RSS 0x00000001
+#define PFVF_CAP_DHC 0x00000002
+#define PFVF_CAP_TPA 0x00000004
+#define PFVF_CAP_TPA_UPDATE 0x00000008
+#define PFVF_CAP_VLAN_FILTER 0x00000010
+
char fw_ver[32];
u16 db_size;
u8 indices_per_sb;
@@ -288,7 +299,7 @@ struct vfpf_q_mac_vlan_filter {
u32 flags;
#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
-#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
+#define VFPF_Q_FILTER_SET 0x100 /* set/clear */
u8 mac[ETH_ALEN];
u16 vlan_tag;
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 17c145fdf3ff..b69dc58faeab 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -192,6 +192,7 @@ static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
info.cmd = DRV_CTL_CTX_WR_CMD;
io->cid_addr = cid_addr;
io->offset = off;
@@ -206,6 +207,7 @@ static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
info.cmd = DRV_CTL_CTXTBL_WR_CMD;
io->offset = off;
io->dma_addr = addr;
@@ -219,6 +221,7 @@ static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
struct drv_ctl_info info;
struct drv_ctl_l2_ring *ring = &info.data.ring;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
if (start)
info.cmd = DRV_CTL_START_L2_CMD;
else
@@ -236,6 +239,7 @@ static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
info.cmd = DRV_CTL_IO_WR_CMD;
io->offset = off;
io->data = val;
@@ -249,13 +253,14 @@ static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
info.cmd = DRV_CTL_IO_RD_CMD;
io->offset = off;
ethdev->drv_ctl(dev->netdev, &info);
return io->data;
}
-static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
+static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg, int state)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
@@ -263,6 +268,7 @@ static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
struct fcoe_capabilities *fcoe_cap =
&info.data.register_data.fcoe_features;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
if (reg) {
info.cmd = DRV_CTL_ULP_REGISTER_CMD;
if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
@@ -272,6 +278,7 @@ static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
}
info.data.ulp_type = ulp_type;
+ info.drv_state = state;
ethdev->drv_ctl(dev->netdev, &info);
}
@@ -286,6 +293,7 @@ static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
info.cmd = cmd;
info.data.credit.credit_count = count;
ethdev->drv_ctl(dev->netdev, &info);
@@ -591,7 +599,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
mutex_unlock(&cnic_lock);
- cnic_ulp_ctl(dev, ulp_type, true);
+ cnic_ulp_ctl(dev, ulp_type, true, DRV_ACTIVE);
return 0;
@@ -636,7 +644,10 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
- cnic_ulp_ctl(dev, ulp_type, false);
+ if (test_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
+ cnic_ulp_ctl(dev, ulp_type, false, DRV_UNLOADED);
+ else
+ cnic_ulp_ctl(dev, ulp_type, false, DRV_INACTIVE);
return 0;
}
@@ -4267,6 +4278,7 @@ static void cnic_delete_task(struct work_struct *work)
cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
+ memset(&info, 0, sizeof(struct drv_ctl_info));
info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
cp->ethdev->drv_ctl(dev->netdev, &info);
}
@@ -5433,6 +5445,23 @@ static void cnic_free_dev(struct cnic_dev *dev)
kfree(dev);
}
+static int cnic_get_fc_npiv_tbl(struct cnic_dev *dev,
+ struct cnic_fc_npiv_tbl *npiv_tbl)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
+ int ret;
+
+ if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+ return -EAGAIN; /* bnx2x is down */
+
+ if (!BNX2X_CHIP_IS_E2_PLUS(bp))
+ return -EINVAL;
+
+ ret = cp->ethdev->drv_get_fc_npiv_tbl(dev->netdev, npiv_tbl);
+ return ret;
+}
+
static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
struct pci_dev *pdev)
{
@@ -5451,6 +5480,7 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
cdev->register_device = cnic_register_device;
cdev->unregister_device = cnic_unregister_device;
cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
+ cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl;
cp = cdev->cnic_priv;
cp->dev = cdev;
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index ef6125b0ee3e..789e5c7e9311 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -15,8 +15,8 @@
#include "bnx2x/bnx2x_mfw_req.h"
-#define CNIC_MODULE_VERSION "2.5.21"
-#define CNIC_MODULE_RELDATE "January 29, 2015"
+#define CNIC_MODULE_VERSION "2.5.22"
+#define CNIC_MODULE_RELDATE "July 20, 2015"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -151,6 +151,11 @@ struct drv_ctl_register_data {
struct drv_ctl_info {
int cmd;
+ int drv_state;
+#define DRV_NOP 0
+#define DRV_ACTIVE 1
+#define DRV_INACTIVE 2
+#define DRV_UNLOADED 3
union {
struct drv_ctl_spq_credit credit;
struct drv_ctl_io io;
@@ -161,6 +166,15 @@ struct drv_ctl_info {
} data;
};
+#define MAX_NPIV_ENTRIES 64
+#define FC_NPIV_WWN_SIZE 8
+
+struct cnic_fc_npiv_tbl {
+ u8 wwpn[MAX_NPIV_ENTRIES][FC_NPIV_WWN_SIZE];
+ u8 wwnn[MAX_NPIV_ENTRIES][FC_NPIV_WWN_SIZE];
+ u32 count;
+};
+
struct cnic_ops {
struct module *cnic_owner;
/* Calls to these functions are protected by RCU. When
@@ -226,6 +240,8 @@ struct cnic_eth_dev {
int (*drv_submit_kwqes_16)(struct net_device *,
struct kwqe_16 *[], u32);
int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
+ int (*drv_get_fc_npiv_tbl)(struct net_device *,
+ struct cnic_fc_npiv_tbl *);
unsigned long reserved1[2];
union drv_info_to_mcp *addr_drv_info_to_mcp;
};
@@ -314,6 +330,7 @@ struct cnic_dev {
struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
char *data, u16 data_size);
+ int (*get_fc_npiv_tbl)(struct cnic_dev *, struct cnic_fc_npiv_tbl *);
unsigned long flags;
#define CNIC_F_CNIC_UP 1
#define CNIC_F_BNX2_CLASS 3
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index b43b2cb9b830..fadbd0088d3e 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -907,9 +907,8 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
}
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
-
if (mode == GENET_POWER_PASSIVE)
- bcmgenet_mii_reset(priv->dev);
+ bcmgenet_phy_power_set(priv->dev, true);
}
/* ioctl handle special commands that are not present in ethtool. */
@@ -1230,7 +1229,6 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
new_skb = skb_realloc_headroom(skb, sizeof(*status));
dev_kfree_skb(skb);
if (!new_skb) {
- dev->stats.tx_errors++;
dev->stats.tx_dropped++;
return NULL;
}
@@ -1465,7 +1463,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(!skb)) {
dev->stats.rx_dropped++;
- dev->stats.rx_errors++;
goto next;
}
@@ -1493,7 +1490,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
- dev->stats.rx_dropped++;
dev->stats.rx_errors++;
dev_kfree_skb_any(skb);
goto next;
@@ -1515,7 +1511,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
dev->stats.rx_frame_errors++;
if (dma_flag & DMA_RX_LG)
dev->stats.rx_length_errors++;
- dev->stats.rx_dropped++;
dev->stats.rx_errors++;
dev_kfree_skb_any(skb);
goto next;
@@ -1729,7 +1724,7 @@ static int init_umac(struct bcmgenet_priv *priv)
int0_enable |= UMAC_IRQ_TXDMA_DONE;
/* Monitor cable plug/unplugged event for internal PHY */
- if (phy_is_internal(priv->phydev)) {
+ if (priv->internal_phy) {
int0_enable |= UMAC_IRQ_LINK_EVENT;
} else if (priv->ext_phy) {
int0_enable |= UMAC_IRQ_LINK_EVENT;
@@ -2130,6 +2125,8 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
int ret = 0;
int timeout = 0;
u32 reg;
+ u32 dma_ctrl;
+ int i;
/* Disable TDMA to stop add more frames in TX DMA */
reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
@@ -2173,6 +2170,20 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
ret = -ETIMEDOUT;
}
+ dma_ctrl = 0;
+ for (i = 0; i < priv->hw_params->rx_queues; i++)
+ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg &= ~dma_ctrl;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ dma_ctrl = 0;
+ for (i = 0; i < priv->hw_params->tx_queues; i++)
+ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg &= ~dma_ctrl;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
return ret;
}
@@ -2393,6 +2404,23 @@ static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bcmgenet_poll_controller(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ /* Invoke the main RX/TX interrupt handler */
+ disable_irq(priv->irq0);
+ bcmgenet_isr0(priv->irq0, priv);
+ enable_irq(priv->irq0);
+
+ /* And the interrupt handler for RX/TX priority queues */
+ disable_irq(priv->irq1);
+ bcmgenet_isr1(priv->irq1, priv);
+ enable_irq(priv->irq1);
+}
+#endif
+
static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
{
u32 reg;
@@ -2630,13 +2658,12 @@ static int bcmgenet_open(struct net_device *dev)
netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
/* Turn on the clock */
- if (!IS_ERR(priv->clk))
- clk_prepare_enable(priv->clk);
+ clk_prepare_enable(priv->clk);
/* If this is an internal GPHY, power it back on now, before UniMAC is
* brought out of reset as absolutely no UniMAC activity is allowed
*/
- if (phy_is_internal(priv->phydev))
+ if (priv->internal_phy)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
/* take MAC out of reset */
@@ -2655,7 +2682,7 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- if (phy_is_internal(priv->phydev)) {
+ if (priv->internal_phy) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= EXT_ENERGY_DET_MASK;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
@@ -2691,23 +2718,24 @@ static int bcmgenet_open(struct net_device *dev)
goto err_irq0;
}
- /* Re-configure the port multiplexer towards the PHY device */
- bcmgenet_mii_config(priv->dev, false);
-
- phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
- priv->phy_interface);
+ ret = bcmgenet_mii_probe(dev);
+ if (ret) {
+ netdev_err(dev, "failed to connect to PHY\n");
+ goto err_irq1;
+ }
bcmgenet_netif_start(dev);
return 0;
+err_irq1:
+ free_irq(priv->irq1, priv);
err_irq0:
- free_irq(priv->irq0, dev);
+ free_irq(priv->irq0, priv);
err_fini_dma:
bcmgenet_fini_dma(priv);
err_clk_disable:
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -2761,11 +2789,10 @@ static int bcmgenet_close(struct net_device *dev)
free_irq(priv->irq0, priv);
free_irq(priv->irq1, priv);
- if (phy_is_internal(priv->phydev))
+ if (priv->internal_phy)
ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -2824,8 +2851,6 @@ static void bcmgenet_timeout(struct net_device *dev)
netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
- bcmgenet_disable_tx_napi(priv);
-
for (q = 0; q < priv->hw_params->tx_queues; q++)
bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
@@ -2841,8 +2866,6 @@ static void bcmgenet_timeout(struct net_device *dev)
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
- bcmgenet_enable_tx_napi(priv);
-
dev->trans_start = jiffies;
dev->stats.tx_errors++;
@@ -2945,6 +2968,9 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_set_mac_address = bcmgenet_set_mac_addr,
.ndo_do_ioctl = bcmgenet_ioctl,
.ndo_set_features = bcmgenet_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = bcmgenet_poll_controller,
+#endif
};
/* Array of GENET hardware parameters/characteristics */
@@ -3218,11 +3244,12 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->version = pd->genet_version;
priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
- if (IS_ERR(priv->clk))
+ if (IS_ERR(priv->clk)) {
dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
+ priv->clk = NULL;
+ }
- if (!IS_ERR(priv->clk))
- clk_prepare_enable(priv->clk);
+ clk_prepare_enable(priv->clk);
bcmgenet_set_hw_params(priv);
@@ -3233,8 +3260,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
- if (IS_ERR(priv->clk_wol))
+ if (IS_ERR(priv->clk_wol)) {
dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
+ priv->clk_wol = NULL;
+ }
priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
if (IS_ERR(priv->clk_eee)) {
@@ -3260,8 +3289,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
netif_carrier_off(dev);
/* Turn off the main clock, WOL clock is handled separately */
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
err = register_netdev(dev);
if (err)
@@ -3270,8 +3298,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
return err;
err_clk_disable:
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
err:
free_netdev(dev);
return err;
@@ -3323,7 +3350,7 @@ static int bcmgenet_suspend(struct device *d)
if (device_may_wakeup(d) && priv->wolopts) {
ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
clk_prepare_enable(priv->clk_wol);
- } else if (phy_is_internal(priv->phydev)) {
+ } else if (priv->internal_phy) {
ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
}
@@ -3352,7 +3379,7 @@ static int bcmgenet_resume(struct device *d)
/* If this is an internal GPHY, power it back on now, before UniMAC is
* brought out of reset as absolutely no UniMAC activity is allowed
*/
- if (phy_is_internal(priv->phydev))
+ if (priv->internal_phy)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
bcmgenet_umac_reset(priv);
@@ -3367,14 +3394,14 @@ static int bcmgenet_resume(struct device *d)
phy_init_hw(priv->phydev);
/* Speed settings must be restored */
- bcmgenet_mii_config(priv->dev, false);
+ bcmgenet_mii_config(priv->dev);
/* disable ethernet MAC while updating its registers */
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- if (phy_is_internal(priv->phydev)) {
+ if (priv->internal_phy) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= EXT_ENERGY_DET_MASK;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 6159deab8c98..7299d1075422 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -593,6 +593,7 @@ struct bcmgenet_priv {
/* MDIO bus variables */
wait_queue_head_t wq;
struct phy_device *phydev;
+ bool internal_phy;
struct device_node *phy_dn;
struct device_node *mdio_dn;
struct mii_bus *mii_bus;
@@ -670,9 +671,9 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
/* MDIO routines */
int bcmgenet_mii_init(struct net_device *dev);
-int bcmgenet_mii_config(struct net_device *dev, bool init);
+int bcmgenet_mii_config(struct net_device *dev);
+int bcmgenet_mii_probe(struct net_device *dev);
void bcmgenet_mii_exit(struct net_device *dev);
-void bcmgenet_mii_reset(struct net_device *dev);
void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
void bcmgenet_mii_setup(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index adf23d2ac488..c8affad76f36 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -163,14 +163,13 @@ void bcmgenet_mii_setup(struct net_device *dev)
phy_print_status(phydev);
}
-void bcmgenet_mii_reset(struct net_device *dev)
+static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
+ struct fixed_phy_status *status)
{
- struct bcmgenet_priv *priv = netdev_priv(dev);
+ if (dev && dev->phydev && status)
+ status->link = dev->phydev->link;
- if (priv->phydev) {
- phy_init_hw(priv->phydev);
- phy_start_aneg(priv->phydev);
- }
+ return 0;
}
void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
@@ -215,7 +214,6 @@ static void bcmgenet_internal_phy_setup(struct net_device *dev)
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= EXT_PWR_DN_EN_LD;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- bcmgenet_mii_reset(dev);
}
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
@@ -226,9 +224,13 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
reg |= LED_ACT_SOURCE_MAC;
bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+
+ if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ fixed_phy_set_link_update(priv->phydev,
+ bcmgenet_fixed_phy_link_update);
}
-int bcmgenet_mii_config(struct net_device *dev, bool init)
+int bcmgenet_mii_config(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phydev;
@@ -238,10 +240,10 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
u32 port_ctrl;
u32 reg;
- priv->ext_phy = !phy_is_internal(priv->phydev) &&
+ priv->ext_phy = !priv->internal_phy &&
(priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
- if (phy_is_internal(priv->phydev))
+ if (priv->internal_phy)
priv->phy_interface = PHY_INTERFACE_MODE_NA;
switch (priv->phy_interface) {
@@ -259,7 +261,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
- if (phy_is_internal(priv->phydev)) {
+ if (priv->internal_phy) {
phy_name = "internal PHY";
bcmgenet_internal_phy_setup(dev);
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
@@ -321,13 +323,12 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
}
- if (init)
- dev_info(kdev, "configuring instance for %s\n", phy_name);
+ dev_info_once(kdev, "configuring instance for %s\n", phy_name);
return 0;
}
-static int bcmgenet_mii_probe(struct net_device *dev)
+int bcmgenet_mii_probe(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device_node *dn = priv->pdev->dev.of_node;
@@ -345,22 +346,6 @@ static int bcmgenet_mii_probe(struct net_device *dev)
priv->old_pause = -1;
if (dn) {
- if (priv->phydev) {
- pr_info("PHY already attached\n");
- return 0;
- }
-
- /* In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- if (!priv->phy_dn && of_phy_is_fixed_link(dn)) {
- ret = of_phy_register_fixed_link(dn);
- if (ret)
- return ret;
-
- priv->phy_dn = of_node_get(dn);
- }
-
phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
phy_flags, priv->phy_interface);
if (!phydev) {
@@ -386,7 +371,7 @@ static int bcmgenet_mii_probe(struct net_device *dev)
* PHY speed which is needed for bcmgenet_mii_config() to configure
* things appropriately.
*/
- ret = bcmgenet_mii_config(dev, true);
+ ret = bcmgenet_mii_config(dev);
if (ret) {
phy_disconnect(priv->phydev);
return ret;
@@ -397,14 +382,11 @@ static int bcmgenet_mii_probe(struct net_device *dev)
/* The internal PHY has its link interrupts routed to the
* Ethernet MAC ISRs
*/
- if (phy_is_internal(priv->phydev))
+ if (priv->internal_phy)
priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT;
else
priv->mii_bus->irq[phydev->addr] = PHY_POLL;
- pr_info("attached PHY at address %d [%s]\n",
- phydev->addr, phydev->drv->name);
-
return 0;
}
@@ -490,7 +472,10 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
{
struct device_node *dn = priv->pdev->dev.of_node;
struct device *kdev = &priv->pdev->dev;
+ const char *phy_mode_str = NULL;
+ struct phy_device *phydev = NULL;
char *compat;
+ int phy_mode;
int ret;
compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version);
@@ -513,17 +498,43 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
/* Fetch the PHY phandle */
priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0);
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ if (!priv->phy_dn && of_phy_is_fixed_link(dn)) {
+ ret = of_phy_register_fixed_link(dn);
+ if (ret)
+ return ret;
+
+ priv->phy_dn = of_node_get(dn);
+ }
+
/* Get the link mode */
- priv->phy_interface = of_get_phy_mode(dn);
+ phy_mode = of_get_phy_mode(dn);
+ priv->phy_interface = phy_mode;
- return 0;
-}
+ /* We need to specifically look up whether this PHY interface is internal
+ * or not *before* we even try to probe the PHY driver over MDIO as we
+ * may have shut down the internal PHY for power saving purposes.
+ */
+ if (phy_mode < 0) {
+ ret = of_property_read_string(dn, "phy-mode", &phy_mode_str);
+ if (ret < 0) {
+ dev_err(kdev, "invalid PHY mode property\n");
+ return ret;
+ }
-static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
- struct fixed_phy_status *status)
-{
- if (dev && dev->phydev && status)
- status->link = dev->phydev->link;
+ priv->phy_interface = PHY_INTERFACE_MODE_NA;
+ if (!strcasecmp(phy_mode_str, "internal"))
+ priv->internal_phy = true;
+ }
+
+ /* Make sure we initialize MoCA PHYs with a link down */
+ if (phy_mode == PHY_INTERFACE_MODE_MOCA) {
+ phydev = of_phy_find_device(dn);
+ if (phydev)
+ phydev->link = 0;
+ }
return 0;
}
@@ -574,18 +585,15 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
.asym_pause = 0,
};
- phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+ phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
if (!phydev || IS_ERR(phydev)) {
dev_err(kdev, "failed to register fixed PHY device\n");
return -ENODEV;
}
- if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) {
- ret = fixed_phy_set_link_update(
- phydev, bcmgenet_fixed_phy_link_update);
- if (!ret)
- phydev->link = 0;
- }
+ /* Make sure we initialize MoCA PHYs with a link down */
+ phydev->link = 0;
+
}
priv->phydev = phydev;
@@ -615,10 +623,6 @@ int bcmgenet_mii_init(struct net_device *dev)
ret = bcmgenet_mii_bus_init(priv);
if (ret)
- goto out_free;
-
- ret = bcmgenet_mii_probe(dev);
- if (ret)
goto out;
return 0;
@@ -626,7 +630,6 @@ int bcmgenet_mii_init(struct net_device *dev)
out:
of_node_put(priv->phy_dn);
mdiobus_unregister(priv->mii_bus);
-out_free:
kfree(priv->mii_bus->irq);
mdiobus_free(priv->mii_bus);
return ret;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index ac27e24264a5..f557a2aaec23 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -1508,16 +1508,7 @@ static void sbmac_channel_start(struct sbmac_softc *s)
__raw_writeq(reg, port);
port = s->sbm_base + R_MAC_ETHERNET_ADDR;
-#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
- /*
- * Pass1 SOCs do not receive packets addressed to the
- * destination address in the R_MAC_ETHERNET_ADDR register.
- * Set the value to zero.
- */
- __raw_writeq(0, port);
-#else
__raw_writeq(reg, port);
-#endif
/*
* Set the receive filter for no packets, and write values
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 73c934cf6c61..79789d8e52da 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -10757,7 +10757,7 @@ static ssize_t tg3_show_temp(struct device *dev,
tg3_ape_scratchpad_read(tp, &temperature, attr->index,
sizeof(temperature));
spin_unlock_bh(&tp->lock);
- return sprintf(buf, "%u\n", temperature);
+ return sprintf(buf, "%u\n", temperature * 1000);
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 0612b19f6313..506047c38607 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -676,6 +676,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
if (!next_cmpl->valid)
break;
}
+ packets++;
/* TODO: BNA_CQ_EF_LOCAL ? */
if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
@@ -692,7 +693,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
else
bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
- packets++;
rcb->rxq->rx_packets++;
rcb->rxq->rx_bytes += totlen;
ccb->bytes_per_intr += totlen;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index caeb39561567..88c1e1a834f8 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -104,6 +104,57 @@ static void *macb_rx_buffer(struct macb *bp, unsigned int index)
return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
}
+/* I/O accessors */
+static u32 hw_readl_native(struct macb *bp, int offset)
+{
+ return __raw_readl(bp->regs + offset);
+}
+
+static void hw_writel_native(struct macb *bp, int offset, u32 value)
+{
+ __raw_writel(value, bp->regs + offset);
+}
+
+static u32 hw_readl(struct macb *bp, int offset)
+{
+ return readl_relaxed(bp->regs + offset);
+}
+
+static void hw_writel(struct macb *bp, int offset, u32 value)
+{
+ writel_relaxed(value, bp->regs + offset);
+}
+
+/*
+ * Find the CPU endianness by using the loopback bit of NCR register. When the
+ * CPU is in big endian we need to program swaped mode for management
+ * descriptor access.
+ */
+static bool hw_is_native_io(void __iomem *addr)
+{
+ u32 value = MACB_BIT(LLB);
+
+ __raw_writel(value, addr + MACB_NCR);
+ value = __raw_readl(addr + MACB_NCR);
+
+ /* Write 0 back to disable everything */
+ __raw_writel(0, addr + MACB_NCR);
+
+ return value == MACB_BIT(LLB);
+}
+
+static bool hw_is_gem(void __iomem *addr, bool native_io)
+{
+ u32 id;
+
+ if (native_io)
+ id = __raw_readl(addr + MACB_MID);
+ else
+ id = readl_relaxed(addr + MACB_MID);
+
+ return MACB_BFEXT(IDNUM, id) >= 0x2;
+}
+
static void macb_set_hwaddr(struct macb *bp)
{
u32 bottom;
@@ -160,7 +211,7 @@ static void macb_get_hwaddr(struct macb *bp)
}
}
- netdev_info(bp->dev, "invalid hw address, using random\n");
+ dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
eth_hw_addr_random(bp->dev);
}
@@ -252,7 +303,6 @@ static void macb_handle_link_change(struct net_device *dev)
struct macb *bp = netdev_priv(dev);
struct phy_device *phydev = bp->phy_dev;
unsigned long flags;
-
int status_change = 0;
spin_lock_irqsave(&bp->lock, flags);
@@ -449,14 +499,14 @@ err_out:
static void macb_update_stats(struct macb *bp)
{
- u32 __iomem *reg = bp->regs + MACB_PFR;
u32 *p = &bp->hw_stats.macb.rx_pause_frames;
u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
+ int offset = MACB_PFR;
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
- for(; p < end; p++, reg++)
- *p += readl_relaxed(reg);
+ for(; p < end; p++, offset += 4)
+ *p += bp->macb_reg_readl(bp, offset);
}
static int macb_halt_tx(struct macb *bp)
@@ -1107,12 +1157,6 @@ static void macb_poll_controller(struct net_device *dev)
}
#endif
-static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
- unsigned int len)
-{
- return (len + bp->max_tx_length - 1) / bp->max_tx_length;
-}
-
static unsigned int macb_tx_map(struct macb *bp,
struct macb_queue *queue,
struct sk_buff *skb)
@@ -1263,11 +1307,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
* socket buffer: skb fragments of jumbo frames may need to be
* splitted into many buffer descriptors.
*/
- count = macb_count_tx_descriptors(bp, skb_headlen(skb));
+ count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++) {
frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
- count += macb_count_tx_descriptors(bp, frag_size);
+ count += DIV_ROUND_UP(frag_size, bp->max_tx_length);
}
spin_lock_irqsave(&bp->lock, flags);
@@ -1603,7 +1647,6 @@ static u32 macb_dbw(struct macb *bp)
static void macb_configure_dma(struct macb *bp)
{
u32 dmacfg;
- u32 tmp, ncr;
if (macb_is_gem(bp)) {
dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
@@ -1613,22 +1656,11 @@ static void macb_configure_dma(struct macb *bp)
dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
dmacfg &= ~GEM_BIT(ENDIA_PKT);
- /* Find the CPU endianness by using the loopback bit of net_ctrl
- * register. save it first. When the CPU is in big endian we
- * need to program swaped mode for management descriptor access.
- */
- ncr = macb_readl(bp, NCR);
- __raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR);
- tmp = __raw_readl(bp->regs + MACB_NCR);
-
- if (tmp == MACB_BIT(LLB))
+ if (bp->native_io)
dmacfg &= ~GEM_BIT(ENDIA_DESC);
else
dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
- /* Restore net_ctrl */
- macb_writel(bp, NCR, ncr);
-
if (bp->dev->features & NETIF_F_HW_CSUM)
dmacfg |= GEM_BIT(TXCOEN);
else
@@ -1897,19 +1929,19 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
static void gem_update_stats(struct macb *bp)
{
- int i;
+ unsigned int i;
u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
u32 offset = gem_statistics[i].offset;
- u64 val = readl_relaxed(bp->regs + offset);
+ u64 val = bp->macb_reg_readl(bp, offset);
bp->ethtool_stats[i] += val;
*p += val;
if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
/* Add GEM_OCTTXH, GEM_OCTRXH */
- val = readl_relaxed(bp->regs + offset + 4);
+ val = bp->macb_reg_readl(bp, offset + 4);
bp->ethtool_stats[i] += ((u64)val) << 32;
*(++p) += val;
}
@@ -1976,7 +2008,7 @@ static int gem_get_sset_count(struct net_device *dev, int sset)
static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
{
- int i;
+ unsigned int i;
switch (sset) {
case ETH_SS_STATS:
@@ -2190,7 +2222,7 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co
if (dt_conf)
bp->caps = dt_conf->caps;
- if (macb_is_gem_hw(bp->regs)) {
+ if (hw_is_gem(bp->regs, bp->native_io)) {
bp->caps |= MACB_CAPS_MACB_IS_GEM;
dcfg = gem_readl(bp, DCFG1);
@@ -2201,10 +2233,11 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co
bp->caps |= MACB_CAPS_FIFO_MODE;
}
- netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps);
+ dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
}
static void macb_probe_queues(void __iomem *mem,
+ bool native_io,
unsigned int *queue_mask,
unsigned int *num_queues)
{
@@ -2219,7 +2252,7 @@ static void macb_probe_queues(void __iomem *mem,
* we are early in the probe process and don't have the
* MACB_CAPS_MACB_IS_GEM flag positioned
*/
- if (!macb_is_gem_hw(mem))
+ if (!hw_is_gem(mem, native_io))
return;
/* bit 0 is never set but queue 0 always exists */
@@ -2741,8 +2774,7 @@ static const struct macb_config emac_config = {
static const struct macb_config zynqmp_config = {
- .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_JUMBO,
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -2750,8 +2782,7 @@ static const struct macb_config zynqmp_config = {
};
static const struct macb_config zynq_config = {
- .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_NO_GIGABIT_HALF,
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -2786,6 +2817,7 @@ static int macb_probe(struct platform_device *pdev)
struct clk *pclk, *hclk, *tx_clk;
unsigned int queue_mask, num_queues;
struct macb_platform_data *pdata;
+ bool native_io;
struct phy_device *phydev;
struct net_device *dev;
struct resource *regs;
@@ -2794,6 +2826,11 @@ static int macb_probe(struct platform_device *pdev)
struct macb *bp;
int err;
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mem = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
if (np) {
const struct of_device_id *match;
@@ -2809,14 +2846,9 @@ static int macb_probe(struct platform_device *pdev)
if (err)
return err;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mem = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(mem)) {
- err = PTR_ERR(mem);
- goto err_disable_clocks;
- }
+ native_io = hw_is_native_io(mem);
- macb_probe_queues(mem, &queue_mask, &num_queues);
+ macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
if (!dev) {
err = -ENOMEM;
@@ -2831,6 +2863,14 @@ static int macb_probe(struct platform_device *pdev)
bp->pdev = pdev;
bp->dev = dev;
bp->regs = mem;
+ bp->native_io = native_io;
+ if (native_io) {
+ bp->macb_reg_readl = hw_readl_native;
+ bp->macb_reg_writel = hw_writel_native;
+ } else {
+ bp->macb_reg_readl = hw_readl;
+ bp->macb_reg_writel = hw_writel;
+ }
bp->num_queues = num_queues;
bp->queue_mask = queue_mask;
if (macb_config)
@@ -2838,9 +2878,8 @@ static int macb_probe(struct platform_device *pdev)
bp->pclk = pclk;
bp->hclk = hclk;
bp->tx_clk = tx_clk;
- if (macb_config->jumbo_max_len) {
+ if (macb_config)
bp->jumbo_max_len = macb_config->jumbo_max_len;
- }
spin_lock_init(&bp->lock);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d74655993d4b..6e1faea00ca8 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -399,7 +399,7 @@
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
#define MACB_CAPS_MACB_IS_GEM 0x80000000
-#define MACB_CAPS_JUMBO 0x00000008
+#define MACB_CAPS_JUMBO 0x00000010
/* Bit manipulation macros */
#define MACB_BIT(name) \
@@ -429,18 +429,12 @@
| GEM_BF(name, value))
/* Register access macros */
-#define macb_readl(port,reg) \
- readl_relaxed((port)->regs + MACB_##reg)
-#define macb_writel(port,reg,value) \
- writel_relaxed((value), (port)->regs + MACB_##reg)
-#define gem_readl(port, reg) \
- readl_relaxed((port)->regs + GEM_##reg)
-#define gem_writel(port, reg, value) \
- writel_relaxed((value), (port)->regs + GEM_##reg)
-#define queue_readl(queue, reg) \
- readl_relaxed((queue)->bp->regs + (queue)->reg)
-#define queue_writel(queue, reg, value) \
- writel_relaxed((value), (queue)->bp->regs + (queue)->reg)
+#define macb_readl(port, reg) (port)->macb_reg_readl((port), MACB_##reg)
+#define macb_writel(port, reg, value) (port)->macb_reg_writel((port), MACB_##reg, (value))
+#define gem_readl(port, reg) (port)->macb_reg_readl((port), GEM_##reg)
+#define gem_writel(port, reg, value) (port)->macb_reg_writel((port), GEM_##reg, (value))
+#define queue_readl(queue, reg) (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg)
+#define queue_writel(queue, reg, value) (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value))
/* Conditional GEM/MACB macros. These perform the operation to the correct
* register dependent on whether the device is a GEM or a MACB. For registers
@@ -785,6 +779,11 @@ struct macb_queue {
struct macb {
void __iomem *regs;
+ bool native_io;
+
+ /* hardware IO accessors */
+ u32 (*macb_reg_readl)(struct macb *bp, int offset);
+ void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
unsigned int rx_tail;
unsigned int rx_prepared_head;
@@ -817,9 +816,9 @@ struct macb {
struct mii_bus *mii_bus;
struct phy_device *phy_dev;
- unsigned int link;
- unsigned int speed;
- unsigned int duplex;
+ int link;
+ int speed;
+ int duplex;
u32 caps;
unsigned int dma_burst_length;
@@ -843,9 +842,4 @@ static inline bool macb_is_gem(struct macb *bp)
return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
}
-static inline bool macb_is_gem_hw(void __iomem *addr)
-{
- return !!(MACB_BFEXT(IDNUM, readl_relaxed(addr + MACB_MID)) >= 0x2);
-}
-
#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index c4d6bbe9458d..9b35d142f47a 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -16,7 +16,6 @@ if NET_VENDOR_CAVIUM
config THUNDER_NIC_PF
tristate "Thunder Physical function driver"
depends on 64BIT
- default ARCH_THUNDER
select THUNDER_NIC_BGX
---help---
This driver supports Thunder's NIC physical function.
@@ -29,14 +28,14 @@ config THUNDER_NIC_PF
config THUNDER_NIC_VF
tristate "Thunder Virtual function driver"
depends on 64BIT
- default ARCH_THUNDER
---help---
This driver supports Thunder's NIC virtual function
config THUNDER_NIC_BGX
tristate "Thunder MAC interface driver (BGX)"
depends on 64BIT
- default ARCH_THUNDER
+ select PHYLIB
+ select MDIO_OCTEON
---help---
This driver supports programming and controlling of MAC
interface from NIC physical function driver.
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 0660deecc2c9..f683d97d7614 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -818,10 +818,9 @@ static int setup_glist(struct lio *lio)
INIT_LIST_HEAD(&lio->glist);
for (i = 0; i < lio->tx_qsize; i++) {
- g = kmalloc(sizeof(*g), GFP_KERNEL);
+ g = kzalloc(sizeof(*g), GFP_KERNEL);
if (!g)
break;
- memset(g, 0, sizeof(struct octnic_gather));
g->sg_size =
((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index dda8a02b7322..d3950b20feb9 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -125,7 +125,17 @@
*/
#define NICPF_CLK_PER_INT_TICK 2
+/* Time to wait before we decide that a SQ is stuck.
+ *
+ * Since both pkt rx and tx notifications are done with same CQ,
+ * when packets are being received at very high rate (eg: L2 forwarding)
+ * then freeing transmitted skbs will be delayed and watchdog
+ * will kick in, resetting interface. Hence keeping this value high.
+ */
+#define NICVF_TX_TIMEOUT (50 * HZ)
+
struct nicvf_cq_poll {
+ struct nicvf *nicvf;
u8 cq_idx; /* Completion queue index */
struct napi_struct napi;
};
@@ -181,10 +191,10 @@ enum tx_stats_reg_offset {
};
struct nicvf_hw_stats {
- u64 rx_bytes_ok;
- u64 rx_ucast_frames_ok;
- u64 rx_bcast_frames_ok;
- u64 rx_mcast_frames_ok;
+ u64 rx_bytes;
+ u64 rx_ucast_frames;
+ u64 rx_bcast_frames;
+ u64 rx_mcast_frames;
u64 rx_fcs_errors;
u64 rx_l2_errors;
u64 rx_drop_red;
@@ -195,6 +205,31 @@ struct nicvf_hw_stats {
u64 rx_drop_mcast;
u64 rx_drop_l3_bcast;
u64 rx_drop_l3_mcast;
+ u64 rx_bgx_truncated_pkts;
+ u64 rx_jabber_errs;
+ u64 rx_fcs_errs;
+ u64 rx_bgx_errs;
+ u64 rx_prel2_errs;
+ u64 rx_l2_hdr_malformed;
+ u64 rx_oversize;
+ u64 rx_undersize;
+ u64 rx_l2_len_mismatch;
+ u64 rx_l2_pclp;
+ u64 rx_ip_ver_errs;
+ u64 rx_ip_csum_errs;
+ u64 rx_ip_hdr_malformed;
+ u64 rx_ip_payload_malformed;
+ u64 rx_ip_ttl_errs;
+ u64 rx_l3_pclp;
+ u64 rx_l4_malformed;
+ u64 rx_l4_csum_errs;
+ u64 rx_udp_len_errs;
+ u64 rx_l4_port_errs;
+ u64 rx_tcp_flag_errs;
+ u64 rx_tcp_offset_errs;
+ u64 rx_l4_pclp;
+ u64 rx_truncated_pkts;
+
u64 tx_bytes_ok;
u64 tx_ucast_frames_ok;
u64 tx_bcast_frames_ok;
@@ -213,21 +248,34 @@ struct nicvf_drv_stats {
u64 rx_frames_1518;
u64 rx_frames_jumbo;
u64 rx_drops;
+
/* Tx */
u64 tx_frames_ok;
u64 tx_drops;
- u64 tx_busy;
u64 tx_tso;
+ u64 txq_stop;
+ u64 txq_wake;
};
struct nicvf {
+ struct nicvf *pnicvf;
struct net_device *netdev;
struct pci_dev *pdev;
u8 vf_id;
u8 node;
- u8 tns_mode;
+ u8 tns_mode:1;
+ u8 sqs_mode:1;
+ u8 loopback_supported:1;
u16 mtu;
struct queue_set *qs;
+#define MAX_SQS_PER_VF_SINGLE_NODE 5
+#define MAX_SQS_PER_VF 11
+ u8 sqs_id;
+ u8 sqs_count; /* Secondary Qset count */
+ struct nicvf *snicvf[MAX_SQS_PER_VF];
+ u8 rx_queues;
+ u8 tx_queues;
+ u8 max_queues;
void __iomem *reg_base;
bool link_up;
u8 duplex;
@@ -247,7 +295,7 @@ struct nicvf {
u32 cq_coalesce_usecs;
u32 msg_enable;
- struct nicvf_hw_stats stats;
+ struct nicvf_hw_stats hw_stats;
struct nicvf_drv_stats drv_stats;
struct bgx_stats bgx_stats;
struct work_struct reset_task;
@@ -259,10 +307,9 @@ struct nicvf {
char irq_name[NIC_VF_MSIX_VECTORS][20];
bool irq_allocated[NIC_VF_MSIX_VECTORS];
- bool pf_ready_to_rcv_msg;
+ /* VF <-> PF mailbox communication */
bool pf_acked;
bool pf_nacked;
- bool bgx_stats_acked;
bool set_mac_pending;
} ____cacheline_aligned_in_smp;
@@ -294,14 +341,21 @@ struct nicvf {
#define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */
#define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */
#define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */
-#define NIC_MBOX_MSG_CFG_DONE 0x12 /* VF configuration done */
-#define NIC_MBOX_MSG_SHUTDOWN 0x13 /* VF is being shutdown */
+#define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */
+#define NIC_MBOX_MSG_NICVF_PTR 0x13 /* Send nicvf ptr to PF */
+#define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */
+#define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */
+#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
+#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
+#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
struct nic_cfg_msg {
u8 msg;
u8 vf_id;
- u8 tns_mode;
u8 node_id;
+ u8 tns_mode:1;
+ u8 sqs_mode:1;
+ u8 loopback_supported:1;
u8 mac_addr[ETH_ALEN];
};
@@ -309,6 +363,7 @@ struct nic_cfg_msg {
struct qs_cfg_msg {
u8 msg;
u8 num;
+ u8 sqs_count;
u64 cfg;
};
@@ -325,6 +380,7 @@ struct sq_cfg_msg {
u8 msg;
u8 qs_num;
u8 sq_num;
+ bool sqs_mode;
u64 cfg;
};
@@ -384,6 +440,28 @@ struct bgx_link_status {
u32 speed;
};
+/* Get Extra Qset IDs */
+struct sqs_alloc {
+ u8 msg;
+ u8 vf_id;
+ u8 qs_count;
+};
+
+struct nicvf_ptr {
+ u8 msg;
+ u8 vf_id;
+ bool sqs_mode;
+ u8 sqs_id;
+ u64 nicvf;
+};
+
+/* Set interface in loopback mode */
+struct set_loopback {
+ u8 msg;
+ u8 vf_id;
+ bool enable;
+};
+
/* 128 bit shared memory between PF and each VF */
union nic_mbx {
struct { u8 msg; } msg;
@@ -398,6 +476,9 @@ union nic_mbx {
struct rss_cfg_msg rss_cfg;
struct bgx_stats_msg bgx_stats;
struct bgx_link_status link_status;
+ struct sqs_alloc sqs_alloc;
+ struct nicvf_ptr nicvf;
+ struct set_loopback lbk;
};
#define NIC_NODE_ID_MASK 0x03
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 6e0c03169a55..b3a5947a2cc0 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -28,6 +28,11 @@ struct nicpf {
u8 num_vf_en; /* No of VF enabled */
bool vf_enabled[MAX_NUM_VFS_SUPPORTED];
void __iomem *reg_base; /* Register start address */
+ u8 num_sqs_en; /* Secondary qsets enabled */
+ u64 nicvf[MAX_NUM_VFS_SUPPORTED];
+ u8 vf_sqs[MAX_NUM_VFS_SUPPORTED][MAX_SQS_PER_VF];
+ u8 pqs_vf[MAX_NUM_VFS_SUPPORTED];
+ bool sqs_used[MAX_NUM_VFS_SUPPORTED];
struct pkind_cfg pkind;
#define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF))
#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
@@ -139,14 +144,19 @@ static void nic_mbx_send_ready(struct nicpf *nic, int vf)
mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
- bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
- lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
-
- mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
- if (mac)
- ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
+ if (vf < MAX_LMAC) {
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
+ if (mac)
+ ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
+ }
+ mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
mbx.nic_cfg.node_id = nic->node;
+
+ mbx.nic_cfg.loopback_supported = vf < MAX_LMAC;
+
nic_send_msg_to_vf(nic, vf, &mbx);
}
@@ -329,6 +339,10 @@ static void nic_init_hw(struct nicpf *nic)
/* Timer config */
nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
+
+ /* Enable VLAN ethertype matching and stripping */
+ nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
+ (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
}
/* Channel parse index configuration */
@@ -429,6 +443,12 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
qset = cfg->vf_id;
for (; rssi < (rssi_base + cfg->tbl_len); rssi++) {
+ u8 svf = cfg->ind_tbl[idx] >> 3;
+
+ if (svf)
+ qset = nic->vf_sqs[cfg->vf_id][svf - 1];
+ else
+ qset = cfg->vf_id;
nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
(qset << 3) | (cfg->ind_tbl[idx] & 0x7));
idx++;
@@ -452,19 +472,31 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
* VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1
* VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1
*/
-static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx)
+static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
+ struct sq_cfg_msg *sq)
{
u32 bgx, lmac, chan;
u32 tl2, tl3, tl4;
u32 rr_quantum;
+ u8 sq_idx = sq->sq_num;
+ u8 pqs_vnic;
+
+ if (sq->sqs_mode)
+ pqs_vnic = nic->pqs_vf[vnic];
+ else
+ pqs_vnic = vnic;
+
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
- bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
- lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
/* 24 bytes for FCS, IPG and preamble */
rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
tl4 += sq_idx;
+ if (sq->sqs_mode)
+ tl4 += vnic * 8;
+
tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
((u64)vnic << NIC_QS_ID_SHIFT) |
@@ -485,6 +517,86 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx)
nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
}
+/* Send primary nicvf pointer to secondary QS's VF */
+static void nic_send_pnicvf(struct nicpf *nic, int sqs)
+{
+ union nic_mbx mbx = {};
+
+ mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
+ mbx.nicvf.nicvf = nic->nicvf[nic->pqs_vf[sqs]];
+ nic_send_msg_to_vf(nic, sqs, &mbx);
+}
+
+/* Send SQS's nicvf pointer to primary QS's VF */
+static void nic_send_snicvf(struct nicpf *nic, struct nicvf_ptr *nicvf)
+{
+ union nic_mbx mbx = {};
+ int sqs_id = nic->vf_sqs[nicvf->vf_id][nicvf->sqs_id];
+
+ mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
+ mbx.nicvf.sqs_id = nicvf->sqs_id;
+ mbx.nicvf.nicvf = nic->nicvf[sqs_id];
+ nic_send_msg_to_vf(nic, nicvf->vf_id, &mbx);
+}
+
+/* Find next available Qset that can be assigned as a
+ * secondary Qset to a VF.
+ */
+static int nic_nxt_avail_sqs(struct nicpf *nic)
+{
+ int sqs;
+
+ for (sqs = 0; sqs < nic->num_sqs_en; sqs++) {
+ if (!nic->sqs_used[sqs])
+ nic->sqs_used[sqs] = true;
+ else
+ continue;
+ return sqs + nic->num_vf_en;
+ }
+ return -1;
+}
+
+/* Allocate additional Qsets for requested VF */
+static void nic_alloc_sqs(struct nicpf *nic, struct sqs_alloc *sqs)
+{
+ union nic_mbx mbx = {};
+ int idx, alloc_qs = 0;
+ int sqs_id;
+
+ if (!nic->num_sqs_en)
+ goto send_mbox;
+
+ for (idx = 0; idx < sqs->qs_count; idx++) {
+ sqs_id = nic_nxt_avail_sqs(nic);
+ if (sqs_id < 0)
+ break;
+ nic->vf_sqs[sqs->vf_id][idx] = sqs_id;
+ nic->pqs_vf[sqs_id] = sqs->vf_id;
+ alloc_qs++;
+ }
+
+send_mbox:
+ mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
+ mbx.sqs_alloc.vf_id = sqs->vf_id;
+ mbx.sqs_alloc.qs_count = alloc_qs;
+ nic_send_msg_to_vf(nic, sqs->vf_id, &mbx);
+}
+
+static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
+{
+ int bgx_idx, lmac_idx;
+
+ if (lbk->vf_id > MAX_LMAC)
+ return -1;
+
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
+ lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
+
+ bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable);
+
+ return 0;
+}
+
/* Interrupt handler to handle mailbox messages from VFs */
static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
{
@@ -492,6 +604,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
u64 *mbx_data;
u64 mbx_addr;
u64 reg_addr;
+ u64 cfg;
int bgx, lmac;
int i;
int ret = 0;
@@ -512,15 +625,24 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
switch (mbx.msg.msg) {
case NIC_MBOX_MSG_READY:
nic_mbx_send_ready(nic, vf);
- nic->link[vf] = 0;
- nic->duplex[vf] = 0;
- nic->speed[vf] = 0;
+ if (vf < MAX_LMAC) {
+ nic->link[vf] = 0;
+ nic->duplex[vf] = 0;
+ nic->speed[vf] = 0;
+ }
ret = 1;
break;
case NIC_MBOX_MSG_QS_CFG:
reg_addr = NIC_PF_QSET_0_127_CFG |
(mbx.qs.num << NIC_QS_ID_SHIFT);
- nic_reg_write(nic, reg_addr, mbx.qs.cfg);
+ cfg = mbx.qs.cfg;
+ /* Check if its a secondary Qset */
+ if (vf >= nic->num_vf_en) {
+ cfg = cfg & (~0x7FULL);
+ /* Assign this Qset to primary Qset's VF */
+ cfg |= nic->pqs_vf[vf];
+ }
+ nic_reg_write(nic, reg_addr, cfg);
break;
case NIC_MBOX_MSG_RQ_CFG:
reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
@@ -548,9 +670,11 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
(mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
(mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
nic_reg_write(nic, reg_addr, mbx.sq.cfg);
- nic_tx_channel_cfg(nic, mbx.qs.num, mbx.sq.sq_num);
+ nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq);
break;
case NIC_MBOX_MSG_SET_MAC:
+ if (vf >= nic->num_vf_en)
+ break;
lmac = mbx.mac.vf_id;
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
@@ -577,10 +701,28 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
case NIC_MBOX_MSG_SHUTDOWN:
/* First msg in VF teardown sequence */
nic->vf_enabled[vf] = false;
+ if (vf >= nic->num_vf_en)
+ nic->sqs_used[vf - nic->num_vf_en] = false;
+ nic->pqs_vf[vf] = 0;
break;
+ case NIC_MBOX_MSG_ALLOC_SQS:
+ nic_alloc_sqs(nic, &mbx.sqs_alloc);
+ goto unlock;
+ case NIC_MBOX_MSG_NICVF_PTR:
+ nic->nicvf[vf] = mbx.nicvf.nicvf;
+ break;
+ case NIC_MBOX_MSG_PNICVF_PTR:
+ nic_send_pnicvf(nic, vf);
+ goto unlock;
+ case NIC_MBOX_MSG_SNICVF_PTR:
+ nic_send_snicvf(nic, &mbx.nicvf);
+ goto unlock;
case NIC_MBOX_MSG_BGX_STATS:
nic_get_bgx_stats(nic, &mbx.bgx_stats);
goto unlock;
+ case NIC_MBOX_MSG_LOOPBACK:
+ ret = nic_config_loopback(nic, &mbx.lbk);
+ break;
default:
dev_err(&nic->pdev->dev,
"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
@@ -606,8 +748,7 @@ static void nic_mbx_intr_handler (struct nicpf *nic, int mbx)
if (intr & (1ULL << vf)) {
dev_dbg(&nic->pdev->dev, "Intr from VF %d\n",
vf + (mbx * vf_per_mbx_reg));
- if ((vf + (mbx * vf_per_mbx_reg)) > nic->num_vf_en)
- break;
+
nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg));
nic_clear_mbx_intr(nic, vf, mbx);
}
@@ -713,9 +854,24 @@ static void nic_unregister_interrupts(struct nicpf *nic)
nic_disable_msix(nic);
}
+static int nic_num_sqs_en(struct nicpf *nic, int vf_en)
+{
+ int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE;
+ u16 total_vf;
+
+ /* Check if its a multi-node environment */
+ if (nr_node_ids > 1)
+ sqs_per_vf = MAX_SQS_PER_VF;
+
+ pos = pci_find_ext_capability(nic->pdev, PCI_EXT_CAP_ID_SRIOV);
+ pci_read_config_word(nic->pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf);
+ return min(total_vf - vf_en, vf_en * sqs_per_vf);
+}
+
static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
{
int pos = 0;
+ int vf_en;
int err;
u16 total_vf_cnt;
@@ -732,16 +888,20 @@ static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
if (!total_vf_cnt)
return 0;
- err = pci_enable_sriov(pdev, nic->num_vf_en);
+ vf_en = nic->num_vf_en;
+ nic->num_sqs_en = nic_num_sqs_en(nic, nic->num_vf_en);
+ vf_en += nic->num_sqs_en;
+
+ err = pci_enable_sriov(pdev, vf_en);
if (err) {
dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
- nic->num_vf_en);
+ vf_en);
nic->num_vf_en = 0;
return err;
}
dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
- nic->num_vf_en);
+ vf_en);
nic->flags |= NIC_SRIOV_ENABLED;
return 0;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 16bd2d772db9..af54c10945c2 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -35,10 +35,10 @@ struct nicvf_stat {
}
static const struct nicvf_stat nicvf_hw_stats[] = {
- NICVF_HW_STAT(rx_bytes_ok),
- NICVF_HW_STAT(rx_ucast_frames_ok),
- NICVF_HW_STAT(rx_bcast_frames_ok),
- NICVF_HW_STAT(rx_mcast_frames_ok),
+ NICVF_HW_STAT(rx_bytes),
+ NICVF_HW_STAT(rx_ucast_frames),
+ NICVF_HW_STAT(rx_bcast_frames),
+ NICVF_HW_STAT(rx_mcast_frames),
NICVF_HW_STAT(rx_fcs_errors),
NICVF_HW_STAT(rx_l2_errors),
NICVF_HW_STAT(rx_drop_red),
@@ -49,6 +49,30 @@ static const struct nicvf_stat nicvf_hw_stats[] = {
NICVF_HW_STAT(rx_drop_mcast),
NICVF_HW_STAT(rx_drop_l3_bcast),
NICVF_HW_STAT(rx_drop_l3_mcast),
+ NICVF_HW_STAT(rx_bgx_truncated_pkts),
+ NICVF_HW_STAT(rx_jabber_errs),
+ NICVF_HW_STAT(rx_fcs_errs),
+ NICVF_HW_STAT(rx_bgx_errs),
+ NICVF_HW_STAT(rx_prel2_errs),
+ NICVF_HW_STAT(rx_l2_hdr_malformed),
+ NICVF_HW_STAT(rx_oversize),
+ NICVF_HW_STAT(rx_undersize),
+ NICVF_HW_STAT(rx_l2_len_mismatch),
+ NICVF_HW_STAT(rx_l2_pclp),
+ NICVF_HW_STAT(rx_ip_ver_errs),
+ NICVF_HW_STAT(rx_ip_csum_errs),
+ NICVF_HW_STAT(rx_ip_hdr_malformed),
+ NICVF_HW_STAT(rx_ip_payload_malformed),
+ NICVF_HW_STAT(rx_ip_ttl_errs),
+ NICVF_HW_STAT(rx_l3_pclp),
+ NICVF_HW_STAT(rx_l4_malformed),
+ NICVF_HW_STAT(rx_l4_csum_errs),
+ NICVF_HW_STAT(rx_udp_len_errs),
+ NICVF_HW_STAT(rx_l4_port_errs),
+ NICVF_HW_STAT(rx_tcp_flag_errs),
+ NICVF_HW_STAT(rx_tcp_offset_errs),
+ NICVF_HW_STAT(rx_l4_pclp),
+ NICVF_HW_STAT(rx_truncated_pkts),
NICVF_HW_STAT(tx_bytes_ok),
NICVF_HW_STAT(tx_ucast_frames_ok),
NICVF_HW_STAT(tx_bcast_frames_ok),
@@ -66,9 +90,10 @@ static const struct nicvf_stat nicvf_drv_stats[] = {
NICVF_DRV_STAT(rx_frames_jumbo),
NICVF_DRV_STAT(rx_drops),
NICVF_DRV_STAT(tx_frames_ok),
- NICVF_DRV_STAT(tx_busy),
NICVF_DRV_STAT(tx_tso),
NICVF_DRV_STAT(tx_drops),
+ NICVF_DRV_STAT(txq_stop),
+ NICVF_DRV_STAT(txq_wake),
};
static const struct nicvf_stat nicvf_queue_stats[] = {
@@ -124,9 +149,33 @@ static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
nic->msg_enable = lvl;
}
-static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+static void nicvf_get_qset_strings(struct nicvf *nic, u8 **data, int qset)
{
int stats, qidx;
+ int start_qidx = qset * MAX_RCV_QUEUES_PER_QS;
+
+ for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
+ for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+ sprintf(*data, "rxq%d: %s", qidx + start_qidx,
+ nicvf_queue_stats[stats].name);
+ *data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
+ for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+ sprintf(*data, "txq%d: %s", qidx + start_qidx,
+ nicvf_queue_stats[stats].name);
+ *data += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ int stats;
+ int sqs;
if (sset != ETH_SS_STATS)
return;
@@ -141,20 +190,12 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
data += ETH_GSTRING_LEN;
}
- for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) {
- for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
- sprintf(data, "rxq%d: %s", qidx,
- nicvf_queue_stats[stats].name);
- data += ETH_GSTRING_LEN;
- }
- }
+ nicvf_get_qset_strings(nic, &data, 0);
- for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
- for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
- sprintf(data, "txq%d: %s", qidx,
- nicvf_queue_stats[stats].name);
- data += ETH_GSTRING_LEN;
- }
+ for (sqs = 0; sqs < nic->sqs_count; sqs++) {
+ if (!nic->snicvf[sqs])
+ continue;
+ nicvf_get_qset_strings(nic->snicvf[sqs], &data, sqs + 1);
}
for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) {
@@ -170,20 +211,59 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
static int nicvf_get_sset_count(struct net_device *netdev, int sset)
{
+ struct nicvf *nic = netdev_priv(netdev);
+ int qstats_count;
+ int sqs;
+
if (sset != ETH_SS_STATS)
return -EINVAL;
+ qstats_count = nicvf_n_queue_stats *
+ (nic->qs->rq_cnt + nic->qs->sq_cnt);
+ for (sqs = 0; sqs < nic->sqs_count; sqs++) {
+ struct nicvf *snic;
+
+ snic = nic->snicvf[sqs];
+ if (!snic)
+ continue;
+ qstats_count += nicvf_n_queue_stats *
+ (snic->qs->rq_cnt + snic->qs->sq_cnt);
+ }
+
return nicvf_n_hw_stats + nicvf_n_drv_stats +
- (nicvf_n_queue_stats *
- (MAX_RCV_QUEUES_PER_QS + MAX_SND_QUEUES_PER_QS)) +
+ qstats_count +
BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
}
+static void nicvf_get_qset_stats(struct nicvf *nic,
+ struct ethtool_stats *stats, u64 **data)
+{
+ int stat, qidx;
+
+ if (!nic)
+ return;
+
+ for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
+ nicvf_update_rq_stats(nic, qidx);
+ for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+ *((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats)
+ [nicvf_queue_stats[stat].index];
+ }
+
+ for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
+ nicvf_update_sq_stats(nic, qidx);
+ for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+ *((*data)++) = ((u64 *)&nic->qs->sq[qidx].stats)
+ [nicvf_queue_stats[stat].index];
+ }
+}
+
static void nicvf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct nicvf *nic = netdev_priv(netdev);
- int stat, qidx;
+ int stat;
+ int sqs;
nicvf_update_stats(nic);
@@ -191,22 +271,18 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
nicvf_update_lmac_stats(nic);
for (stat = 0; stat < nicvf_n_hw_stats; stat++)
- *(data++) = ((u64 *)&nic->stats)
+ *(data++) = ((u64 *)&nic->hw_stats)
[nicvf_hw_stats[stat].index];
for (stat = 0; stat < nicvf_n_drv_stats; stat++)
*(data++) = ((u64 *)&nic->drv_stats)
[nicvf_drv_stats[stat].index];
- for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) {
- for (stat = 0; stat < nicvf_n_queue_stats; stat++)
- *(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
- [nicvf_queue_stats[stat].index];
- }
+ nicvf_get_qset_stats(nic, stats, &data);
- for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
- for (stat = 0; stat < nicvf_n_queue_stats; stat++)
- *(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
- [nicvf_queue_stats[stat].index];
+ for (sqs = 0; sqs < nic->sqs_count; sqs++) {
+ if (!nic->snicvf[sqs])
+ continue;
+ nicvf_get_qset_stats(nic->snicvf[sqs], stats, &data);
}
for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++)
@@ -365,7 +441,7 @@ static int nicvf_get_rxnfc(struct net_device *dev,
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
- info->data = nic->qs->rq_cnt;
+ info->data = nic->rx_queues;
ret = 0;
break;
case ETHTOOL_GRXFH:
@@ -497,17 +573,15 @@ static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
struct nicvf_rss_info *rss = &nic->rss_info;
int idx;
- if ((nic->qs->rq_cnt <= 1) || (nic->cpi_alg != CPI_ALG_NONE)) {
- rss->enable = false;
- rss->hash_bits = 0;
- return -EIO;
- }
-
- /* We do not allow change in unsupported parameters */
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- rss->enable = true;
+ if (!rss->enable) {
+ netdev_err(nic->netdev,
+ "RSS is disabled, cannot change settings\n");
+ return -EIO;
+ }
+
if (indir) {
for (idx = 0; idx < rss->rss_size; idx++)
rss->ind_tbl[idx] = indir[idx];
@@ -530,11 +604,11 @@ static void nicvf_get_channels(struct net_device *dev,
memset(channel, 0, sizeof(*channel));
- channel->max_rx = MAX_RCV_QUEUES_PER_QS;
- channel->max_tx = MAX_SND_QUEUES_PER_QS;
+ channel->max_rx = nic->max_queues;
+ channel->max_tx = nic->max_queues;
- channel->rx_count = nic->qs->rq_cnt;
- channel->tx_count = nic->qs->sq_cnt;
+ channel->rx_count = nic->rx_queues;
+ channel->tx_count = nic->tx_queues;
}
/* Set no of Tx, Rx queues to be used */
@@ -543,29 +617,43 @@ static int nicvf_set_channels(struct net_device *dev,
{
struct nicvf *nic = netdev_priv(dev);
int err = 0;
+ bool if_up = netif_running(dev);
+ int cqcount;
if (!channel->rx_count || !channel->tx_count)
return -EINVAL;
- if (channel->rx_count > MAX_RCV_QUEUES_PER_QS)
+ if (channel->rx_count > nic->max_queues)
return -EINVAL;
- if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
+ if (channel->tx_count > nic->max_queues)
return -EINVAL;
- nic->qs->rq_cnt = channel->rx_count;
- nic->qs->sq_cnt = channel->tx_count;
+ if (if_up)
+ nicvf_stop(dev);
+
+ cqcount = max(channel->rx_count, channel->tx_count);
+
+ if (cqcount > MAX_CMP_QUEUES_PER_QS) {
+ nic->sqs_count = roundup(cqcount, MAX_CMP_QUEUES_PER_QS);
+ nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1;
+ } else {
+ nic->sqs_count = 0;
+ }
+
+ nic->qs->rq_cnt = min_t(u32, channel->rx_count, MAX_RCV_QUEUES_PER_QS);
+ nic->qs->sq_cnt = min_t(u32, channel->tx_count, MAX_SND_QUEUES_PER_QS);
nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
- err = nicvf_set_real_num_queues(dev, nic->qs->sq_cnt, nic->qs->rq_cnt);
+ nic->rx_queues = channel->rx_count;
+ nic->tx_queues = channel->tx_count;
+ err = nicvf_set_real_num_queues(dev, nic->tx_queues, nic->rx_queues);
if (err)
return err;
- if (!netif_running(dev))
- return err;
+ if (if_up)
+ nicvf_open(dev);
- nicvf_stop(dev);
- nicvf_open(dev);
netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
- nic->qs->sq_cnt, nic->qs->rq_cnt);
+ nic->tx_queues, nic->rx_queues);
return err;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 8b119a035b7e..b63e579aeb12 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/log2.h>
@@ -50,6 +51,14 @@ module_param(cpi_alg, int, S_IRUGO);
MODULE_PARM_DESC(cpi_alg,
"PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
+static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
+{
+ if (nic->sqs_mode)
+ return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
+ else
+ return qidx;
+}
+
static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
struct sk_buff *skb)
{
@@ -105,7 +114,6 @@ u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
}
/* VF -> PF mailbox communication */
-
static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
{
u64 *msg = (u64 *)mbx;
@@ -147,26 +155,15 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
*/
static int nicvf_check_pf_ready(struct nicvf *nic)
{
- int timeout = 5000, sleep = 20;
union nic_mbx mbx = {};
mbx.msg.msg = NIC_MBOX_MSG_READY;
-
- nic->pf_ready_to_rcv_msg = false;
-
- nicvf_write_to_mbx(nic, &mbx);
-
- while (!nic->pf_ready_to_rcv_msg) {
- msleep(sleep);
- if (nic->pf_ready_to_rcv_msg)
- break;
- timeout -= sleep;
- if (!timeout) {
- netdev_err(nic->netdev,
- "PF didn't respond to READY msg\n");
- return 0;
- }
+ if (nicvf_send_msg_to_pf(nic, &mbx)) {
+ netdev_err(nic->netdev,
+ "PF didn't respond to READY msg\n");
+ return 0;
}
+
return 1;
}
@@ -197,13 +194,15 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
switch (mbx.msg.msg) {
case NIC_MBOX_MSG_READY:
- nic->pf_ready_to_rcv_msg = true;
+ nic->pf_acked = true;
nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
nic->node = mbx.nic_cfg.node_id;
if (!nic->set_mac_pending)
ether_addr_copy(nic->netdev->dev_addr,
mbx.nic_cfg.mac_addr);
+ nic->sqs_mode = mbx.nic_cfg.sqs_mode;
+ nic->loopback_supported = mbx.nic_cfg.loopback_supported;
nic->link_up = false;
nic->duplex = 0;
nic->speed = 0;
@@ -221,7 +220,6 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
case NIC_MBOX_MSG_BGX_STATS:
nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
nic->pf_acked = true;
- nic->bgx_stats_acked = true;
break;
case NIC_MBOX_MSG_BGX_LINK_CHANGE:
nic->pf_acked = true;
@@ -234,7 +232,7 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
nic->duplex == DUPLEX_FULL ?
"Full duplex" : "Half duplex");
netif_carrier_on(nic->netdev);
- netif_tx_wake_all_queues(nic->netdev);
+ netif_tx_start_all_queues(nic->netdev);
} else {
netdev_info(nic->netdev, "%s: Link is Down\n",
nic->netdev->name);
@@ -242,6 +240,26 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
netif_tx_stop_all_queues(nic->netdev);
}
break;
+ case NIC_MBOX_MSG_ALLOC_SQS:
+ nic->sqs_count = mbx.sqs_alloc.qs_count;
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_SNICVF_PTR:
+ /* Primary VF: make note of secondary VF's pointer
+ * to be used while packet transmission.
+ */
+ nic->snicvf[mbx.nicvf.sqs_id] =
+ (struct nicvf *)mbx.nicvf.nicvf;
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_PNICVF_PTR:
+ /* Secondary VF/Qset: make note of primary VF's pointer
+ * to be used while packet reception, to handover packet
+ * to primary VF's netdev.
+ */
+ nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf;
+ nic->pf_acked = true;
+ break;
default:
netdev_err(nic->netdev,
"Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
@@ -326,7 +344,7 @@ static int nicvf_rss_init(struct nicvf *nic)
nicvf_get_rss_size(nic);
- if ((nic->qs->rq_cnt <= 1) || (cpi_alg != CPI_ALG_NONE)) {
+ if (cpi_alg != CPI_ALG_NONE) {
rss->enable = false;
rss->hash_bits = 0;
return 0;
@@ -350,11 +368,100 @@ static int nicvf_rss_init(struct nicvf *nic)
for (idx = 0; idx < rss->rss_size; idx++)
rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
- nic->qs->rq_cnt);
+ nic->rx_queues);
nicvf_config_rss(nic);
return 1;
}
+/* Request PF to allocate additional Qsets */
+static void nicvf_request_sqs(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+ int sqs;
+ int sqs_count = nic->sqs_count;
+ int rx_queues = 0, tx_queues = 0;
+
+ /* Only primary VF should request */
+ if (nic->sqs_mode || !nic->sqs_count)
+ return;
+
+ mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
+ mbx.sqs_alloc.vf_id = nic->vf_id;
+ mbx.sqs_alloc.qs_count = nic->sqs_count;
+ if (nicvf_send_msg_to_pf(nic, &mbx)) {
+ /* No response from PF */
+ nic->sqs_count = 0;
+ return;
+ }
+
+ /* Return if no Secondary Qsets available */
+ if (!nic->sqs_count)
+ return;
+
+ if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS)
+ rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS;
+ if (nic->tx_queues > MAX_SND_QUEUES_PER_QS)
+ tx_queues = nic->tx_queues - MAX_SND_QUEUES_PER_QS;
+
+ /* Set no of Rx/Tx queues in each of the SQsets */
+ for (sqs = 0; sqs < nic->sqs_count; sqs++) {
+ mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
+ mbx.nicvf.vf_id = nic->vf_id;
+ mbx.nicvf.sqs_id = sqs;
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ nic->snicvf[sqs]->sqs_id = sqs;
+ if (rx_queues > MAX_RCV_QUEUES_PER_QS) {
+ nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS;
+ rx_queues -= MAX_RCV_QUEUES_PER_QS;
+ } else {
+ nic->snicvf[sqs]->qs->rq_cnt = rx_queues;
+ rx_queues = 0;
+ }
+
+ if (tx_queues > MAX_SND_QUEUES_PER_QS) {
+ nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS;
+ tx_queues -= MAX_SND_QUEUES_PER_QS;
+ } else {
+ nic->snicvf[sqs]->qs->sq_cnt = tx_queues;
+ tx_queues = 0;
+ }
+
+ nic->snicvf[sqs]->qs->cq_cnt =
+ max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt);
+
+ /* Initialize secondary Qset's queues and its interrupts */
+ nicvf_open(nic->snicvf[sqs]->netdev);
+ }
+
+ /* Update stack with actual Rx/Tx queue count allocated */
+ if (sqs_count != nic->sqs_count)
+ nicvf_set_real_num_queues(nic->netdev,
+ nic->tx_queues, nic->rx_queues);
+}
+
+/* Send this Qset's nicvf pointer to PF.
+ * PF inturn sends primary VF's nicvf struct to secondary Qsets/VFs
+ * so that packets received by these Qsets can use primary VF's netdev
+ */
+static void nicvf_send_vf_struct(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.nicvf.msg = NIC_MBOX_MSG_NICVF_PTR;
+ mbx.nicvf.sqs_mode = nic->sqs_mode;
+ mbx.nicvf.nicvf = (u64)nic;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_get_primary_vf_struct(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
int nicvf_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues)
{
@@ -425,7 +532,36 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
if (skb) {
prefetch(skb);
dev_consume_skb_any(skb);
+ sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
+ }
+}
+
+static inline void nicvf_set_rxhash(struct net_device *netdev,
+ struct cqe_rx_t *cqe_rx,
+ struct sk_buff *skb)
+{
+ u8 hash_type;
+ u32 hash;
+
+ if (!(netdev->features & NETIF_F_RXHASH))
+ return;
+
+ switch (cqe_rx->rss_alg) {
+ case RSS_ALG_TCP_IP:
+ case RSS_ALG_UDP_IP:
+ hash_type = PKT_HASH_TYPE_L4;
+ hash = cqe_rx->rss_tag;
+ break;
+ case RSS_ALG_IP:
+ hash_type = PKT_HASH_TYPE_L3;
+ hash = cqe_rx->rss_tag;
+ break;
+ default:
+ hash_type = PKT_HASH_TYPE_NONE;
+ hash = 0;
}
+
+ skb_set_hash(skb, hash, hash_type);
}
static void nicvf_rcv_pkt_handler(struct net_device *netdev,
@@ -436,6 +572,15 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
struct sk_buff *skb;
struct nicvf *nic = netdev_priv(netdev);
int err = 0;
+ int rq_idx;
+
+ rq_idx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
+
+ if (nic->sqs_mode) {
+ /* Use primary VF's 'nicvf' struct */
+ nic = nic->pnicvf;
+ netdev = nic->netdev;
+ }
/* Check for errors */
err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
@@ -455,9 +600,17 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
skb->data, skb->len, true);
}
+ /* If error packet, drop it here */
+ if (err) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
nicvf_set_rx_frame_cnt(nic, skb);
- skb_record_rx_queue(skb, cqe_rx->rq_idx);
+ nicvf_set_rxhash(netdev, cqe_rx, skb);
+
+ skb_record_rx_queue(skb, rq_idx);
if (netdev->hw_features & NETIF_F_RXCSUM) {
/* HW by default verifies TCP/UDP/SCTP checksums */
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -467,6 +620,11 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
skb->protocol = eth_type_trans(skb, netdev);
+ /* Check for stripped VLAN */
+ if (cqe_rx->vlan_found && cqe_rx->vlan_stripped)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ ntohs((__force __be16)cqe_rx->vlan_tci));
+
if (napi && (netdev->features & NETIF_F_GRO))
napi_gro_receive(napi, skb);
else
@@ -476,12 +634,13 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
struct napi_struct *napi, int budget)
{
- int processed_cqe, work_done = 0;
+ int processed_cqe, work_done = 0, tx_done = 0;
int cqe_count, cqe_head;
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
struct cmp_queue *cq = &qs->cq[cq_idx];
struct cqe_rx_t *cq_desc;
+ struct netdev_queue *txq;
spin_lock_bh(&cq->lock);
loop:
@@ -496,8 +655,8 @@ loop:
cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
cqe_head &= 0xFFFF;
- netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n",
- __func__, cqe_count, cqe_head);
+ netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n",
+ __func__, cq_idx, cqe_count, cqe_head);
while (processed_cqe < cqe_count) {
/* Get the CQ descriptor */
cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
@@ -511,8 +670,8 @@ loop:
break;
}
- netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n",
- cq_desc->cqe_type);
+ netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n",
+ cq_idx, cq_desc->cqe_type);
switch (cq_desc->cqe_type) {
case CQE_TYPE_RX:
nicvf_rcv_pkt_handler(netdev, napi, cq,
@@ -522,6 +681,7 @@ loop:
case CQE_TYPE_SEND:
nicvf_snd_pkt_handler(netdev, cq,
(void *)cq_desc, CQE_TYPE_SEND);
+ tx_done++;
break;
case CQE_TYPE_INVALID:
case CQE_TYPE_RX_SPLIT:
@@ -532,8 +692,9 @@ loop:
}
processed_cqe++;
}
- netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n",
- __func__, processed_cqe, work_done, budget);
+ netdev_dbg(nic->netdev,
+ "%s CQ%d processed_cqe %d work_done %d budget %d\n",
+ __func__, cq_idx, processed_cqe, work_done, budget);
/* Ring doorbell to inform H/W to reuse processed CQEs */
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
@@ -543,6 +704,22 @@ loop:
goto loop;
done:
+ /* Wakeup TXQ if its stopped earlier due to SQ full */
+ if (tx_done) {
+ netdev = nic->pnicvf->netdev;
+ txq = netdev_get_tx_queue(netdev,
+ nicvf_netdev_qidx(nic, cq_idx));
+ nic = nic->pnicvf;
+ if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
+ netif_tx_start_queue(txq);
+ nic->drv_stats.txq_wake++;
+ if (netif_msg_tx_err(nic))
+ netdev_warn(netdev,
+ "%s: Transmit queue wakeup SQ%d\n",
+ netdev->name, cq_idx);
+ }
+ }
+
spin_unlock_bh(&cq->lock);
return work_done;
}
@@ -554,15 +731,10 @@ static int nicvf_poll(struct napi_struct *napi, int budget)
struct net_device *netdev = napi->dev;
struct nicvf *nic = netdev_priv(netdev);
struct nicvf_cq_poll *cq;
- struct netdev_queue *txq;
cq = container_of(napi, struct nicvf_cq_poll, napi);
work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
- txq = netdev_get_tx_queue(netdev, cq->cq_idx);
- if (netif_tx_queue_stopped(txq))
- netif_tx_wake_queue(txq);
-
if (work_done < budget) {
/* Slow packet rate, exit polling */
napi_complete(napi);
@@ -612,11 +784,20 @@ static void nicvf_handle_qs_err(unsigned long data)
nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
}
+static void nicvf_dump_intr_status(struct nicvf *nic)
+{
+ if (netif_msg_intr(nic))
+ netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
+ nic->netdev->name, nicvf_reg_read(nic, NIC_VF_INT));
+}
+
static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
{
struct nicvf *nic = (struct nicvf *)nicvf_irq;
u64 intr;
+ nicvf_dump_intr_status(nic);
+
intr = nicvf_reg_read(nic, NIC_VF_INT);
/* Check for spurious interrupt */
if (!(intr & NICVF_INTR_MBOX_MASK))
@@ -627,59 +808,58 @@ static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
return IRQ_HANDLED;
}
-static irqreturn_t nicvf_intr_handler(int irq, void *nicvf_irq)
+static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq)
+{
+ struct nicvf_cq_poll *cq_poll = (struct nicvf_cq_poll *)cq_irq;
+ struct nicvf *nic = cq_poll->nicvf;
+ int qidx = cq_poll->cq_idx;
+
+ nicvf_dump_intr_status(nic);
+
+ /* Disable interrupts */
+ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+
+ /* Schedule NAPI */
+ napi_schedule(&cq_poll->napi);
+
+ /* Clear interrupt */
+ nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nicvf_rbdr_intr_handler(int irq, void *nicvf_irq)
{
- u64 qidx, intr, clear_intr = 0;
- u64 cq_intr, rbdr_intr, qs_err_intr;
struct nicvf *nic = (struct nicvf *)nicvf_irq;
- struct queue_set *qs = nic->qs;
- struct nicvf_cq_poll *cq_poll = NULL;
+ u8 qidx;
- intr = nicvf_reg_read(nic, NIC_VF_INT);
- if (netif_msg_intr(nic))
- netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
- nic->netdev->name, intr);
-
- qs_err_intr = intr & NICVF_INTR_QS_ERR_MASK;
- if (qs_err_intr) {
- /* Disable Qset err interrupt and schedule softirq */
- nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
- tasklet_hi_schedule(&nic->qs_err_task);
- clear_intr |= qs_err_intr;
- }
- /* Disable interrupts and start polling */
- cq_intr = (intr & NICVF_INTR_CQ_MASK) >> NICVF_INTR_CQ_SHIFT;
- for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
- if (!(cq_intr & (1 << qidx)))
- continue;
- if (!nicvf_is_intr_enabled(nic, NICVF_INTR_CQ, qidx))
+ nicvf_dump_intr_status(nic);
+
+ /* Disable RBDR interrupt and schedule softirq */
+ for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
+ if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
continue;
+ nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
+ tasklet_hi_schedule(&nic->rbdr_task);
+ /* Clear interrupt */
+ nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
+ }
- nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
- clear_intr |= ((1 << qidx) << NICVF_INTR_CQ_SHIFT);
+ return IRQ_HANDLED;
+}
- cq_poll = nic->napi[qidx];
- /* Schedule NAPI */
- if (cq_poll)
- napi_schedule(&cq_poll->napi);
- }
+static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq)
+{
+ struct nicvf *nic = (struct nicvf *)nicvf_irq;
- /* Handle RBDR interrupts */
- rbdr_intr = (intr & NICVF_INTR_RBDR_MASK) >> NICVF_INTR_RBDR_SHIFT;
- if (rbdr_intr) {
- /* Disable RBDR interrupt and schedule softirq */
- for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
- if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
- continue;
- nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
- tasklet_hi_schedule(&nic->rbdr_task);
- clear_intr |= ((1 << qidx) << NICVF_INTR_RBDR_SHIFT);
- }
- }
+ nicvf_dump_intr_status(nic);
+
+ /* Disable Qset err interrupt and schedule softirq */
+ nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
+ tasklet_hi_schedule(&nic->qs_err_task);
+ nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
- /* Clear interrupts */
- nicvf_reg_write(nic, NIC_VF_INT, clear_intr);
return IRQ_HANDLED;
}
@@ -713,7 +893,7 @@ static void nicvf_disable_msix(struct nicvf *nic)
static int nicvf_register_interrupts(struct nicvf *nic)
{
- int irq, free, ret = 0;
+ int irq, ret = 0;
int vector;
for_each_cq_irq(irq)
@@ -728,44 +908,42 @@ static int nicvf_register_interrupts(struct nicvf *nic)
sprintf(nic->irq_name[irq], "NICVF%d RBDR%d",
nic->vf_id, irq - NICVF_INTR_ID_RBDR);
- /* Register all interrupts except mailbox */
- for (irq = 0; irq < NICVF_INTR_ID_SQ; irq++) {
+ /* Register CQ interrupts */
+ for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
vector = nic->msix_entries[irq].vector;
ret = request_irq(vector, nicvf_intr_handler,
- 0, nic->irq_name[irq], nic);
+ 0, nic->irq_name[irq], nic->napi[irq]);
if (ret)
- break;
+ goto err;
nic->irq_allocated[irq] = true;
}
- for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_MISC; irq++) {
+ /* Register RBDR interrupt */
+ for (irq = NICVF_INTR_ID_RBDR;
+ irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) {
vector = nic->msix_entries[irq].vector;
- ret = request_irq(vector, nicvf_intr_handler,
+ ret = request_irq(vector, nicvf_rbdr_intr_handler,
0, nic->irq_name[irq], nic);
if (ret)
- break;
+ goto err;
nic->irq_allocated[irq] = true;
}
+ /* Register QS error interrupt */
sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR],
"NICVF%d Qset error", nic->vf_id);
- if (!ret) {
- vector = nic->msix_entries[NICVF_INTR_ID_QS_ERR].vector;
- irq = NICVF_INTR_ID_QS_ERR;
- ret = request_irq(vector, nicvf_intr_handler,
- 0, nic->irq_name[irq], nic);
- if (!ret)
- nic->irq_allocated[irq] = true;
- }
+ irq = NICVF_INTR_ID_QS_ERR;
+ ret = request_irq(nic->msix_entries[irq].vector,
+ nicvf_qs_err_intr_handler,
+ 0, nic->irq_name[irq], nic);
+ if (!ret)
+ nic->irq_allocated[irq] = true;
- if (ret) {
- netdev_err(nic->netdev, "Request irq failed\n");
- for (free = 0; free < irq; free++)
- free_irq(nic->msix_entries[free].vector, nic);
- return ret;
- }
+err:
+ if (ret)
+ netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq);
- return 0;
+ return ret;
}
static void nicvf_unregister_interrupts(struct nicvf *nic)
@@ -774,8 +952,14 @@ static void nicvf_unregister_interrupts(struct nicvf *nic)
/* Free registered interrupts */
for (irq = 0; irq < nic->num_vec; irq++) {
- if (nic->irq_allocated[irq])
+ if (!nic->irq_allocated[irq])
+ continue;
+
+ if (irq < NICVF_INTR_ID_SQ)
+ free_irq(nic->msix_entries[irq].vector, nic->napi[irq]);
+ else
free_irq(nic->msix_entries[irq].vector, nic);
+
nic->irq_allocated[irq] = false;
}
@@ -833,20 +1017,33 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) {
+ if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
netif_tx_stop_queue(txq);
- nic->drv_stats.tx_busy++;
+ nic->drv_stats.txq_stop++;
if (netif_msg_tx_err(nic))
netdev_warn(netdev,
"%s: Transmit ring full, stopping SQ%d\n",
netdev->name, qid);
-
return NETDEV_TX_BUSY;
}
return NETDEV_TX_OK;
}
+static inline void nicvf_free_cq_poll(struct nicvf *nic)
+{
+ struct nicvf_cq_poll *cq_poll;
+ int qidx;
+
+ for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
+ cq_poll = nic->napi[qidx];
+ if (!cq_poll)
+ continue;
+ nic->napi[qidx] = NULL;
+ kfree(cq_poll);
+ }
+}
+
int nicvf_stop(struct net_device *netdev)
{
int irq, qidx;
@@ -859,7 +1056,17 @@ int nicvf_stop(struct net_device *netdev)
nicvf_send_msg_to_pf(nic, &mbx);
netif_carrier_off(netdev);
- netif_tx_disable(netdev);
+ netif_tx_stop_all_queues(nic->netdev);
+
+ /* Teardown secondary qsets first */
+ if (!nic->sqs_mode) {
+ for (qidx = 0; qidx < nic->sqs_count; qidx++) {
+ if (!nic->snicvf[qidx])
+ continue;
+ nicvf_stop(nic->snicvf[qidx]->netdev);
+ nic->snicvf[qidx] = NULL;
+ }
+ }
/* Disable RBDR & QS error interrupts */
for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
@@ -882,7 +1089,6 @@ int nicvf_stop(struct net_device *netdev)
cq_poll = nic->napi[qidx];
if (!cq_poll)
continue;
- nic->napi[qidx] = NULL;
napi_synchronize(&cq_poll->napi);
/* CQ intr is enabled while napi_complete,
* so disable it now
@@ -891,9 +1097,10 @@ int nicvf_stop(struct net_device *netdev)
nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
napi_disable(&cq_poll->napi);
netif_napi_del(&cq_poll->napi);
- kfree(cq_poll);
}
+ netif_tx_disable(netdev);
+
/* Free resources */
nicvf_config_data_transfer(nic, false);
@@ -905,6 +1112,12 @@ int nicvf_stop(struct net_device *netdev)
nicvf_unregister_interrupts(nic);
+ nicvf_free_cq_poll(nic);
+
+ /* Clear multiqset info */
+ nic->pnicvf = nic;
+ nic->sqs_count = 0;
+
return 0;
}
@@ -931,6 +1144,7 @@ int nicvf_open(struct net_device *netdev)
goto napi_del;
}
cq_poll->cq_idx = qidx;
+ cq_poll->nicvf = nic;
netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
NAPI_POLL_WEIGHT);
napi_enable(&cq_poll->napi);
@@ -959,10 +1173,16 @@ int nicvf_open(struct net_device *netdev)
/* Configure CPI alorithm */
nic->cpi_alg = cpi_alg;
- nicvf_config_cpi(nic);
+ if (!nic->sqs_mode)
+ nicvf_config_cpi(nic);
+
+ nicvf_request_sqs(nic);
+ if (nic->sqs_mode)
+ nicvf_get_primary_vf_struct(nic);
/* Configure receive side scaling */
- nicvf_rss_init(nic);
+ if (!nic->sqs_mode)
+ nicvf_rss_init(nic);
err = nicvf_register_interrupts(nic);
if (err)
@@ -988,6 +1208,9 @@ int nicvf_open(struct net_device *netdev)
for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
+ nic->drv_stats.txq_stop = 0;
+ nic->drv_stats.txq_wake = 0;
+
netif_carrier_on(netdev);
netif_tx_start_all_queues(netdev);
@@ -995,6 +1218,8 @@ int nicvf_open(struct net_device *netdev)
cleanup:
nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
nicvf_unregister_interrupts(nic);
+ tasklet_kill(&nic->qs_err_task);
+ tasklet_kill(&nic->rbdr_task);
napi_del:
for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
cq_poll = nic->napi[qidx];
@@ -1002,9 +1227,8 @@ napi_del:
continue;
napi_disable(&cq_poll->napi);
netif_napi_del(&cq_poll->napi);
- kfree(cq_poll);
- nic->napi[qidx] = NULL;
}
+ nicvf_free_cq_poll(nic);
return err;
}
@@ -1061,7 +1285,6 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
{
int stat = 0;
union nic_mbx mbx = {};
- int timeout;
if (!netif_running(nic->netdev))
return;
@@ -1071,14 +1294,9 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
/* Rx stats */
mbx.bgx_stats.rx = 1;
while (stat < BGX_RX_STATS_COUNT) {
- nic->bgx_stats_acked = 0;
mbx.bgx_stats.idx = stat;
- nicvf_send_msg_to_pf(nic, &mbx);
- timeout = 0;
- while ((!nic->bgx_stats_acked) && (timeout < 10)) {
- msleep(2);
- timeout++;
- }
+ if (nicvf_send_msg_to_pf(nic, &mbx))
+ return;
stat++;
}
@@ -1087,14 +1305,9 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
/* Tx stats */
mbx.bgx_stats.rx = 0;
while (stat < BGX_TX_STATS_COUNT) {
- nic->bgx_stats_acked = 0;
mbx.bgx_stats.idx = stat;
- nicvf_send_msg_to_pf(nic, &mbx);
- timeout = 0;
- while ((!nic->bgx_stats_acked) && (timeout < 10)) {
- msleep(2);
- timeout++;
- }
+ if (nicvf_send_msg_to_pf(nic, &mbx))
+ return;
stat++;
}
}
@@ -1102,7 +1315,7 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
void nicvf_update_stats(struct nicvf *nic)
{
int qidx;
- struct nicvf_hw_stats *stats = &nic->stats;
+ struct nicvf_hw_stats *stats = &nic->hw_stats;
struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
struct queue_set *qs = nic->qs;
@@ -1111,14 +1324,16 @@ void nicvf_update_stats(struct nicvf *nic)
#define GET_TX_STATS(reg) \
nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
- stats->rx_bytes_ok = GET_RX_STATS(RX_OCTS);
- stats->rx_ucast_frames_ok = GET_RX_STATS(RX_UCAST);
- stats->rx_bcast_frames_ok = GET_RX_STATS(RX_BCAST);
- stats->rx_mcast_frames_ok = GET_RX_STATS(RX_MCAST);
+ stats->rx_bytes = GET_RX_STATS(RX_OCTS);
+ stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
+ stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
+ stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
stats->rx_drop_red = GET_RX_STATS(RX_RED);
+ stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
+ stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
@@ -1130,9 +1345,6 @@ void nicvf_update_stats(struct nicvf *nic)
stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
stats->tx_drops = GET_TX_STATS(TX_DROP);
- drv_stats->rx_frames_ok = stats->rx_ucast_frames_ok +
- stats->rx_bcast_frames_ok +
- stats->rx_mcast_frames_ok;
drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
stats->tx_bcast_frames_ok +
stats->tx_mcast_frames_ok;
@@ -1151,14 +1363,15 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct nicvf *nic = netdev_priv(netdev);
- struct nicvf_hw_stats *hw_stats = &nic->stats;
+ struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
nicvf_update_stats(nic);
- stats->rx_bytes = hw_stats->rx_bytes_ok;
+ stats->rx_bytes = hw_stats->rx_bytes;
stats->rx_packets = drv_stats->rx_frames_ok;
stats->rx_dropped = drv_stats->rx_drops;
+ stats->multicast = hw_stats->rx_mcast_frames;
stats->tx_bytes = hw_stats->tx_bytes_ok;
stats->tx_packets = drv_stats->tx_frames_ok;
@@ -1192,6 +1405,45 @@ static void nicvf_reset_task(struct work_struct *work)
nic->netdev->trans_start = jiffies;
}
+static int nicvf_config_loopback(struct nicvf *nic,
+ netdev_features_t features)
+{
+ union nic_mbx mbx = {};
+
+ mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
+ mbx.lbk.vf_id = nic->vf_id;
+ mbx.lbk.enable = (features & NETIF_F_LOOPBACK) != 0;
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static netdev_features_t nicvf_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if ((features & NETIF_F_LOOPBACK) &&
+ netif_running(netdev) && !nic->loopback_supported)
+ features &= ~NETIF_F_LOOPBACK;
+
+ return features;
+}
+
+static int nicvf_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ netdev_features_t changed = features ^ netdev->features;
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+ nicvf_config_vlan_stripping(nic, features);
+
+ if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
+ return nicvf_config_loopback(nic, features);
+
+ return 0;
+}
+
static const struct net_device_ops nicvf_netdev_ops = {
.ndo_open = nicvf_open,
.ndo_stop = nicvf_stop,
@@ -1200,6 +1452,8 @@ static const struct net_device_ops nicvf_netdev_ops = {
.ndo_set_mac_address = nicvf_set_mac_address,
.ndo_get_stats64 = nicvf_get_stats64,
.ndo_tx_timeout = nicvf_tx_timeout,
+ .ndo_fix_features = nicvf_fix_features,
+ .ndo_set_features = nicvf_set_features,
};
static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1207,8 +1461,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct device *dev = &pdev->dev;
struct net_device *netdev;
struct nicvf *nic;
- struct queue_set *qs;
- int err;
+ int err, qcount;
err = pci_enable_device(pdev);
if (err) {
@@ -1234,9 +1487,17 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_release_regions;
}
- netdev = alloc_etherdev_mqs(sizeof(struct nicvf),
- MAX_RCV_QUEUES_PER_QS,
- MAX_SND_QUEUES_PER_QS);
+ qcount = MAX_CMP_QUEUES_PER_QS;
+
+ /* Restrict multiqset support only for host bound VFs */
+ if (pdev->is_virtfn) {
+ /* Set max number of queues per VF */
+ qcount = roundup(num_online_cpus(), MAX_CMP_QUEUES_PER_QS);
+ qcount = min(qcount,
+ (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
+ }
+
+ netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount);
if (!netdev) {
err = -ENOMEM;
goto err_release_regions;
@@ -1249,6 +1510,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
nic = netdev_priv(netdev);
nic->netdev = netdev;
nic->pdev = pdev;
+ nic->pnicvf = nic;
+ nic->max_queues = qcount;
/* MAP VF's configuration registers */
nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
@@ -1262,22 +1525,34 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_free_netdev;
- qs = nic->qs;
-
- err = nicvf_set_real_num_queues(netdev, qs->sq_cnt, qs->rq_cnt);
- if (err)
- goto err_free_netdev;
-
/* Check if PF is alive and get MAC address for this VF */
err = nicvf_register_misc_interrupt(nic);
if (err)
goto err_free_netdev;
- netdev->features |= (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_TSO | NETIF_F_GRO);
- netdev->hw_features = netdev->features;
+ nicvf_send_vf_struct(nic);
+
+ /* Check if this VF is in QS only mode */
+ if (nic->sqs_mode)
+ return 0;
+
+ err = nicvf_set_real_num_queues(netdev, nic->tx_queues, nic->rx_queues);
+ if (err)
+ goto err_unregister_interrupts;
+
+ netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_GRO |
+ NETIF_F_HW_VLAN_CTAG_RX);
+
+ netdev->hw_features |= NETIF_F_RXHASH;
+
+ netdev->features |= netdev->hw_features;
+ netdev->hw_features |= NETIF_F_LOOPBACK;
+
+ netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
netdev->netdev_ops = &nicvf_netdev_ops;
+ netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
INIT_WORK(&nic->reset_task, nicvf_reset_task);
@@ -1309,8 +1584,13 @@ static void nicvf_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct nicvf *nic = netdev_priv(netdev);
+ struct net_device *pnetdev = nic->pnicvf->netdev;
- unregister_netdev(netdev);
+ /* Check if this Qset is assigned to different VF.
+ * If yes, clean primary and all secondary Qsets.
+ */
+ if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
+ unregister_netdev(pnetdev);
nicvf_unregister_interrupts(nic);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
@@ -1318,11 +1598,17 @@ static void nicvf_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static void nicvf_shutdown(struct pci_dev *pdev)
+{
+ nicvf_remove(pdev);
+}
+
static struct pci_driver nicvf_driver = {
.name = DRV_NAME,
.id_table = nicvf_id_table,
.probe = nicvf_probe,
.remove = nicvf_remove,
+ .shutdown = nicvf_shutdown,
};
static int __init nicvf_init_module(void)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d69d228d11a0..e404ea837727 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -103,9 +103,11 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
/* Allocate a new page */
if (!nic->rb_page) {
- nic->rb_page = alloc_pages(gfp | __GFP_COMP, order);
+ nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
+ order);
if (!nic->rb_page) {
- netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n");
+ netdev_err(nic->netdev,
+ "Failed to allocate new rcv buffer\n");
return -ENOMEM;
}
nic->rb_page_offset = 0;
@@ -382,7 +384,8 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
return;
if (sq->tso_hdrs)
- dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len,
+ dma_free_coherent(&nic->pdev->dev,
+ sq->dmem.q_len * TSO_HEADER_SIZE,
sq->tso_hdrs, sq->tso_hdrs_phys);
kfree(sq->skbuff);
@@ -472,6 +475,27 @@ static void nicvf_reclaim_rbdr(struct nicvf *nic,
return;
}
+void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
+{
+ u64 rq_cfg;
+ int sqs;
+
+ rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0);
+
+ /* Enable first VLAN stripping */
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ rq_cfg |= (1ULL << 25);
+ else
+ rq_cfg &= ~(1ULL << 25);
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
+
+ /* Configure Secondary Qsets, if any */
+ for (sqs = 0; sqs < nic->sqs_count; sqs++)
+ if (nic->snicvf[sqs])
+ nicvf_queue_reg_write(nic->snicvf[sqs],
+ NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
+}
+
/* Configures receive queue */
static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
int qidx, bool enable)
@@ -521,7 +545,9 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
nicvf_send_msg_to_pf(nic, &mbx);
- nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, qidx, 0x00);
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
+ if (!nic->sqs_mode)
+ nicvf_config_vlan_stripping(nic, nic->netdev->features);
/* Enable Receive queue */
rq_cfg.ena = 1;
@@ -595,6 +621,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
mbx.sq.qs_num = qs->vnic_id;
mbx.sq.sq_num = qidx;
+ mbx.sq.sqs_mode = nic->sqs_mode;
mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
nicvf_send_msg_to_pf(nic, &mbx);
@@ -676,6 +703,7 @@ void nicvf_qset_config(struct nicvf *nic, bool enable)
/* Send a mailbox msg to PF to config Qset */
mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
mbx.qs.num = qs->vnic_id;
+ mbx.qs.sqs_count = nic->sqs_count;
mbx.qs.cfg = 0;
qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
@@ -756,6 +784,10 @@ int nicvf_set_qset_resources(struct nicvf *nic)
qs->rbdr_len = RCV_BUF_COUNT;
qs->sq_len = SND_QUEUE_LEN;
qs->cq_len = CMP_QUEUE_LEN;
+
+ nic->rx_queues = qs->rq_cnt;
+ nic->tx_queues = qs->sq_cnt;
+
return 0;
}
@@ -863,10 +895,11 @@ void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
continue;
}
skb = (struct sk_buff *)sq->skbuff[sq->head];
+ if (skb)
+ dev_kfree_skb_any(skb);
atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
atomic64_add(hdr->tot_len,
(atomic64_t *)&netdev->stats.tx_bytes);
- dev_kfree_skb_any(skb);
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
}
}
@@ -957,9 +990,6 @@ nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
/* Offload checksum calculation to HW */
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (skb->protocol != htons(ETH_P_IP))
- return;
-
hdr->csum_l3 = 1; /* Enable IP csum calculation */
hdr->l3_offset = skb_network_offset(skb);
hdr->l4_offset = skb_transport_offset(skb);
@@ -992,7 +1022,7 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
memset(gather, 0, SND_QUEUE_DESC_SIZE);
gather->subdesc_type = SQ_DESC_TYPE_GATHER;
- gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB;
+ gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
gather->size = size;
gather->addr = data;
}
@@ -1001,7 +1031,7 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
* them to SQ for transfer
*/
static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
- int qentry, struct sk_buff *skb)
+ int sq_num, int qentry, struct sk_buff *skb)
{
struct tso_t tso;
int seg_subdescs = 0, desc_cnt = 0;
@@ -1048,7 +1078,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
}
nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
seg_subdescs - 1, skb, seg_len);
- sq->skbuff[hdr_qentry] = 0;
+ sq->skbuff[hdr_qentry] = (u64)NULL;
qentry = nicvf_get_nxt_sqentry(sq, qentry);
desc_cnt += seg_subdescs;
@@ -1061,7 +1091,8 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
/* Inform HW to xmit all TSO segments */
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
- skb_get_queue_mapping(skb), desc_cnt);
+ sq_num, desc_cnt);
+ nic->drv_stats.tx_tso++;
return 1;
}
@@ -1071,10 +1102,24 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
int i, size;
int subdesc_cnt;
int sq_num, qentry;
- struct queue_set *qs = nic->qs;
+ struct queue_set *qs;
struct snd_queue *sq;
sq_num = skb_get_queue_mapping(skb);
+ if (sq_num >= MAX_SND_QUEUES_PER_QS) {
+ /* Get secondary Qset's SQ structure */
+ i = sq_num / MAX_SND_QUEUES_PER_QS;
+ if (!nic->snicvf[i - 1]) {
+ netdev_warn(nic->netdev,
+ "Secondary Qset#%d's ptr not initialized\n",
+ i - 1);
+ return 1;
+ }
+ nic = (struct nicvf *)nic->snicvf[i - 1];
+ sq_num = sq_num % MAX_SND_QUEUES_PER_QS;
+ }
+
+ qs = nic->qs;
sq = &qs->sq[sq_num];
subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
@@ -1085,7 +1130,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
/* Check if its a TSO packet */
if (skb_shinfo(skb)->gso_size)
- return nicvf_sq_append_tso(nic, sq, qentry, skb);
+ return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
/* Add SQ header subdesc */
nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len);
@@ -1121,6 +1166,8 @@ doorbell:
return 1;
append_fail:
+ /* Use original PCI dev for debug log */
+ nic = nic->pnicvf;
netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
return 0;
}
@@ -1366,10 +1413,11 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
int nicvf_check_cqe_rx_errs(struct nicvf *nic,
struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
{
- struct cmp_queue_stats *stats = &cq->stats;
+ struct nicvf_hw_stats *stats = &nic->hw_stats;
+ struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
- stats->rx.errop.good++;
+ drv_stats->rx_frames_ok++;
return 0;
}
@@ -1379,111 +1427,78 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic,
nic->netdev->name,
cqe_rx->err_level, cqe_rx->err_opcode);
- switch (cqe_rx->err_level) {
- case CQ_ERRLVL_MAC:
- stats->rx.errlvl.mac_errs++;
- break;
- case CQ_ERRLVL_L2:
- stats->rx.errlvl.l2_errs++;
- break;
- case CQ_ERRLVL_L3:
- stats->rx.errlvl.l3_errs++;
- break;
- case CQ_ERRLVL_L4:
- stats->rx.errlvl.l4_errs++;
- break;
- }
-
switch (cqe_rx->err_opcode) {
case CQ_RX_ERROP_RE_PARTIAL:
- stats->rx.errop.partial_pkts++;
+ stats->rx_bgx_truncated_pkts++;
break;
case CQ_RX_ERROP_RE_JABBER:
- stats->rx.errop.jabber_errs++;
+ stats->rx_jabber_errs++;
break;
case CQ_RX_ERROP_RE_FCS:
- stats->rx.errop.fcs_errs++;
- break;
- case CQ_RX_ERROP_RE_TERMINATE:
- stats->rx.errop.terminate_errs++;
+ stats->rx_fcs_errs++;
break;
case CQ_RX_ERROP_RE_RX_CTL:
- stats->rx.errop.bgx_rx_errs++;
+ stats->rx_bgx_errs++;
break;
case CQ_RX_ERROP_PREL2_ERR:
- stats->rx.errop.prel2_errs++;
- break;
- case CQ_RX_ERROP_L2_FRAGMENT:
- stats->rx.errop.l2_frags++;
- break;
- case CQ_RX_ERROP_L2_OVERRUN:
- stats->rx.errop.l2_overruns++;
- break;
- case CQ_RX_ERROP_L2_PFCS:
- stats->rx.errop.l2_pfcs++;
- break;
- case CQ_RX_ERROP_L2_PUNY:
- stats->rx.errop.l2_puny++;
+ stats->rx_prel2_errs++;
break;
case CQ_RX_ERROP_L2_MAL:
- stats->rx.errop.l2_hdr_malformed++;
+ stats->rx_l2_hdr_malformed++;
break;
case CQ_RX_ERROP_L2_OVERSIZE:
- stats->rx.errop.l2_oversize++;
+ stats->rx_oversize++;
break;
case CQ_RX_ERROP_L2_UNDERSIZE:
- stats->rx.errop.l2_undersize++;
+ stats->rx_undersize++;
break;
case CQ_RX_ERROP_L2_LENMISM:
- stats->rx.errop.l2_len_mismatch++;
+ stats->rx_l2_len_mismatch++;
break;
case CQ_RX_ERROP_L2_PCLP:
- stats->rx.errop.l2_pclp++;
+ stats->rx_l2_pclp++;
break;
case CQ_RX_ERROP_IP_NOT:
- stats->rx.errop.non_ip++;
+ stats->rx_ip_ver_errs++;
break;
case CQ_RX_ERROP_IP_CSUM_ERR:
- stats->rx.errop.ip_csum_err++;
+ stats->rx_ip_csum_errs++;
break;
case CQ_RX_ERROP_IP_MAL:
- stats->rx.errop.ip_hdr_malformed++;
+ stats->rx_ip_hdr_malformed++;
break;
case CQ_RX_ERROP_IP_MALD:
- stats->rx.errop.ip_payload_malformed++;
+ stats->rx_ip_payload_malformed++;
break;
case CQ_RX_ERROP_IP_HOP:
- stats->rx.errop.ip_hop_errs++;
- break;
- case CQ_RX_ERROP_L3_ICRC:
- stats->rx.errop.l3_icrc_errs++;
+ stats->rx_ip_ttl_errs++;
break;
case CQ_RX_ERROP_L3_PCLP:
- stats->rx.errop.l3_pclp++;
+ stats->rx_l3_pclp++;
break;
case CQ_RX_ERROP_L4_MAL:
- stats->rx.errop.l4_malformed++;
+ stats->rx_l4_malformed++;
break;
case CQ_RX_ERROP_L4_CHK:
- stats->rx.errop.l4_csum_errs++;
+ stats->rx_l4_csum_errs++;
break;
case CQ_RX_ERROP_UDP_LEN:
- stats->rx.errop.udp_len_err++;
+ stats->rx_udp_len_errs++;
break;
case CQ_RX_ERROP_L4_PORT:
- stats->rx.errop.bad_l4_port++;
+ stats->rx_l4_port_errs++;
break;
case CQ_RX_ERROP_TCP_FLAG:
- stats->rx.errop.bad_tcp_flag++;
+ stats->rx_tcp_flag_errs++;
break;
case CQ_RX_ERROP_TCP_OFFSET:
- stats->rx.errop.tcp_offset_errs++;
+ stats->rx_tcp_offset_errs++;
break;
case CQ_RX_ERROP_L4_PCLP:
- stats->rx.errop.l4_pclp++;
+ stats->rx_l4_pclp++;
break;
case CQ_RX_ERROP_RBDR_TRUNC:
- stats->rx.errop.pkt_truncated++;
+ stats->rx_truncated_pkts++;
break;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 8341bdf755d1..fb4957d09914 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -62,7 +62,7 @@
#define SND_QUEUE_CNT 8
#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
-#define SND_QSIZE SND_QUEUE_SIZE4
+#define SND_QSIZE SND_QUEUE_SIZE2
#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
#define SND_QUEUE_THRESH 2ULL
@@ -70,7 +70,10 @@
/* Since timestamp not enabled, otherwise 2 */
#define MAX_CQE_PER_PKT_XMIT 1
-#define CMP_QSIZE CMP_QUEUE_SIZE4
+/* Keep CQ and SQ sizes same, if timestamping
+ * is enabled this equation will change.
+ */
+#define CMP_QSIZE CMP_QUEUE_SIZE2
#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
#define CMP_QUEUE_CQE_THRESH 0
#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
@@ -87,7 +90,12 @@
#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
MAX_CQE_PER_PKT_XMIT)
-#define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256)
+/* Calculate number of CQEs to reserve for all SQEs.
+ * Its 1/256th level of CQ size.
+ * '+ 1' to account for pipelining
+ */
+#define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \
+ (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1)
/* Descriptor size in bytes */
#define SND_QUEUE_DESC_SIZE 16
@@ -173,47 +181,6 @@ enum CQ_TX_ERROP_E {
};
struct cmp_queue_stats {
- struct rx_stats {
- struct {
- u64 mac_errs;
- u64 l2_errs;
- u64 l3_errs;
- u64 l4_errs;
- } errlvl;
- struct {
- u64 good;
- u64 partial_pkts;
- u64 jabber_errs;
- u64 fcs_errs;
- u64 terminate_errs;
- u64 bgx_rx_errs;
- u64 prel2_errs;
- u64 l2_frags;
- u64 l2_overruns;
- u64 l2_pfcs;
- u64 l2_puny;
- u64 l2_hdr_malformed;
- u64 l2_oversize;
- u64 l2_undersize;
- u64 l2_len_mismatch;
- u64 l2_pclp;
- u64 non_ip;
- u64 ip_csum_err;
- u64 ip_hdr_malformed;
- u64 ip_payload_malformed;
- u64 ip_hop_errs;
- u64 l3_icrc_errs;
- u64 l3_pclp;
- u64 l4_malformed;
- u64 l4_csum_errs;
- u64 udp_len_err;
- u64 bad_l4_port;
- u64 bad_tcp_flag;
- u64 tcp_offset_errs;
- u64 l4_pclp;
- u64 pkt_truncated;
- } errop;
- } rx;
struct tx_stats {
u64 good;
u64 desc_fault;
@@ -284,6 +251,7 @@ struct cmp_queue {
void *desc;
struct q_desc_mem dmem;
struct cmp_queue_stats stats;
+ int irq;
} ____cacheline_aligned_in_smp;
struct snd_queue {
@@ -339,6 +307,8 @@ struct queue_set {
#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
+void nicvf_config_vlan_stripping(struct nicvf *nic,
+ netdev_features_t features);
int nicvf_set_qset_resources(struct nicvf *nic);
int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
void nicvf_qset_config(struct nicvf *nic, bool enable);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 633ec05dfe05..574c49278900 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -6,6 +6,7 @@
* as published by the Free Software Foundation.
*/
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
@@ -26,7 +27,7 @@
struct lmac {
struct bgx *bgx;
int dmac;
- unsigned char mac[ETH_ALEN];
+ u8 mac[ETH_ALEN];
bool link_up;
int lmacid; /* ID within BGX */
int lmacid_bd; /* ID on board */
@@ -328,6 +329,37 @@ static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
}
}
+/* Configure BGX LMAC in internal loopback mode */
+void bgx_lmac_internal_loopback(int node, int bgx_idx,
+ int lmac_idx, bool enable)
+{
+ struct bgx *bgx;
+ struct lmac *lmac;
+ u64 cfg;
+
+ bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ if (!bgx)
+ return;
+
+ lmac = &bgx->lmac[lmac_idx];
+ if (lmac->is_sgmii) {
+ cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
+ if (enable)
+ cfg |= PCS_MRX_CTL_LOOPBACK1;
+ else
+ cfg &= ~PCS_MRX_CTL_LOOPBACK1;
+ bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
+ } else {
+ cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
+ if (enable)
+ cfg |= SPU_CTL_LOOPBACK;
+ else
+ cfg &= ~SPU_CTL_LOOPBACK;
+ bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
+ }
+}
+EXPORT_SYMBOL(bgx_lmac_internal_loopback);
+
static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
{
u64 cfg;
@@ -673,7 +705,10 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
bgx_flush_dmac_addrs(bgx, lmacid);
- if (lmac->phydev)
+ if ((bgx->lmac_type != BGX_MODE_XFI) &&
+ (bgx->lmac_type != BGX_MODE_XLAUI) &&
+ (bgx->lmac_type != BGX_MODE_40G_KR) &&
+ (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
phy_disconnect(lmac->phydev);
lmac->phydev = NULL;
@@ -832,18 +867,108 @@ static void bgx_get_qlm_mode(struct bgx *bgx)
}
}
-static void bgx_init_of(struct bgx *bgx, struct device_node *np)
+#ifdef CONFIG_ACPI
+
+static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst)
{
+ u8 mac[ETH_ALEN];
+ int ret;
+
+ ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
+ "mac-address", mac, ETH_ALEN);
+ if (ret)
+ goto out;
+
+ if (!is_valid_ether_addr(mac)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(dst, mac, ETH_ALEN);
+out:
+ return ret;
+}
+
+/* Currently only sets the MAC address. */
+static acpi_status bgx_acpi_register_phy(acpi_handle handle,
+ u32 lvl, void *context, void **rv)
+{
+ struct bgx *bgx = context;
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(handle, &adev))
+ goto out;
+
+ acpi_get_mac_address(adev, bgx->lmac[bgx->lmac_count].mac);
+
+ SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, &bgx->pdev->dev);
+
+ bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
+out:
+ bgx->lmac_count++;
+ return AE_OK;
+}
+
+static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
+ void *context, void **ret_val)
+{
+ struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct bgx *bgx = context;
+ char bgx_sel[5];
+
+ snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
+ if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
+ pr_warn("Invalid link device\n");
+ return AE_OK;
+ }
+
+ if (strncmp(string.pointer, bgx_sel, 4))
+ return AE_OK;
+
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ bgx_acpi_register_phy, NULL, bgx, NULL);
+
+ kfree(string.pointer);
+ return AE_CTRL_TERMINATE;
+}
+
+static int bgx_init_acpi_phy(struct bgx *bgx)
+{
+ acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL);
+ return 0;
+}
+
+#else
+
+static int bgx_init_acpi_phy(struct bgx *bgx)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_ACPI */
+
+#if IS_ENABLED(CONFIG_OF_MDIO)
+
+static int bgx_init_of_phy(struct bgx *bgx)
+{
+ struct device_node *np;
struct device_node *np_child;
u8 lmac = 0;
+ char bgx_sel[5];
+ const char *mac;
- for_each_child_of_node(np, np_child) {
- struct device_node *phy_np;
- const char *mac;
+ /* Get BGX node from DT */
+ snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
+ np = of_find_node_by_name(NULL, bgx_sel);
+ if (!np)
+ return -ENODEV;
- phy_np = of_parse_phandle(np_child, "phy-handle", 0);
- if (phy_np)
- bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
+ for_each_child_of_node(np, np_child) {
+ struct device_node *phy_np = of_parse_phandle(np_child,
+ "phy-handle", 0);
+ if (!phy_np)
+ continue;
+ bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
mac = of_get_mac_address(np_child);
if (mac)
@@ -855,6 +980,24 @@ static void bgx_init_of(struct bgx *bgx, struct device_node *np)
if (lmac == MAX_LMAC_PER_BGX)
break;
}
+ return 0;
+}
+
+#else
+
+static int bgx_init_of_phy(struct bgx *bgx)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_OF_MDIO */
+
+static int bgx_init_phy(struct bgx *bgx)
+{
+ if (!acpi_disabled)
+ return bgx_init_acpi_phy(bgx);
+
+ return bgx_init_of_phy(bgx);
}
static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -862,8 +1005,6 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err;
struct device *dev = &pdev->dev;
struct bgx *bgx = NULL;
- struct device_node *np;
- char bgx_sel[5];
u8 lmac;
bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
@@ -899,10 +1040,9 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bgx_vnic[bgx->bgx_id] = bgx;
bgx_get_qlm_mode(bgx);
- snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
- np = of_find_node_by_name(NULL, bgx_sel);
- if (np)
- bgx_init_of(bgx, np);
+ err = bgx_init_phy(bgx);
+ if (err)
+ goto err_enable;
bgx_init_hw(bgx);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index ba4f53b7cc2c..07b7ec66c60d 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -72,6 +72,7 @@
#define BGX_SPUX_CONTROL1 0x10000
#define SPU_CTL_LOW_POWER BIT_ULL(11)
+#define SPU_CTL_LOOPBACK BIT_ULL(14)
#define SPU_CTL_RESET BIT_ULL(15)
#define BGX_SPUX_STATUS1 0x10008
#define SPU_STATUS1_RCV_LNK BIT_ULL(2)
@@ -126,6 +127,7 @@
#define PCS_MRX_CTL_RST_AN BIT_ULL(9)
#define PCS_MRX_CTL_PWR_DN BIT_ULL(11)
#define PCS_MRX_CTL_AN_EN BIT_ULL(12)
+#define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14)
#define PCS_MRX_CTL_RESET BIT_ULL(15)
#define BGX_GMP_PCS_MRX_STATUS 0x30008
#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5)
@@ -186,6 +188,8 @@ int bgx_get_lmac_count(int node, int bgx);
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
+void bgx_lmac_internal_loopback(int node, int bgx_idx,
+ int lmac_idx, bool enable);
u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
#define BGX_RX_STATS_COUNT 11
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 629f75d70353..fa0c7b54ec7a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -767,6 +767,11 @@ struct adapter {
bool tid_release_task_busy;
struct dentry *debugfs_root;
+ u32 use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */
+ u32 trace_rss; /* 1 implies that different RSS flit per filter is
+ * used per filter else if 0 default RSS flit is
+ * used for all 4 filters.
+ */
spinlock_t stats_lock;
spinlock_t win0_lock ____cacheline_aligned_in_smp;
@@ -1284,6 +1289,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op);
int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
const u8 *fw_data, unsigned int size, int force);
unsigned int t4_flash_cfg_addr(struct adapter *adapter);
+int t4_check_fw_version(struct adapter *adap);
int t4_get_fw_version(struct adapter *adapter, u32 *vers);
int t4_get_tp_version(struct adapter *adapter, u32 *vers);
int t4_get_exprom_version(struct adapter *adapter, u32 *vers);
@@ -1440,6 +1446,10 @@ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
void t4_db_full(struct adapter *adapter);
void t4_db_dropped(struct adapter *adapter);
+int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
+ int filter_index, int enable);
+void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
+ int filter_index, int *enabled);
int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
u32 addr, u32 val);
void t4_sge_decode_idma_state(struct adapter *adapter, int state);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 6074680bc985..052c660aca80 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -31,6 +31,15 @@ static const char * const dcb_ver_array[] = {
"Auto Negotiated"
};
+static inline bool cxgb4_dcb_state_synced(enum cxgb4_dcb_state state)
+{
+ if (state == CXGB4_DCB_STATE_FW_ALLSYNCED ||
+ state == CXGB4_DCB_STATE_HOST)
+ return true;
+ else
+ return false;
+}
+
/* Initialize a port's Data Center Bridging state. Typically used after a
* Link Down event.
*/
@@ -603,7 +612,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg)
struct port_info *pi = netdev2pinfo(dev);
struct port_dcb_info *dcb = &pi->dcb;
- if (dcb->state != CXGB4_DCB_STATE_FW_ALLSYNCED ||
+ if (!cxgb4_dcb_state_synced(dcb->state) ||
priority >= CXGB4_MAX_PRIORITY)
*pfccfg = 0;
else
@@ -620,7 +629,7 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
struct adapter *adap = pi->adapter;
int err;
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED ||
+ if (!cxgb4_dcb_state_synced(pi->dcb.state) ||
priority >= CXGB4_MAX_PRIORITY)
return;
@@ -732,7 +741,7 @@ static u8 cxgb4_getpfcstate(struct net_device *dev)
{
struct port_info *pi = netdev2pinfo(dev);
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (!cxgb4_dcb_state_synced(pi->dcb.state))
return false;
return pi->dcb.pfcen != 0;
@@ -756,7 +765,7 @@ static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id,
struct adapter *adap = pi->adapter;
int i;
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (!cxgb4_dcb_state_synced(pi->dcb.state))
return 0;
for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
@@ -794,7 +803,9 @@ static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id,
*/
static int cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id)
{
- return __cxgb4_getapp(dev, app_idtype, app_id, 0);
+ /* Convert app_idtype to firmware format before querying */
+ return __cxgb4_getapp(dev, app_idtype == DCB_APP_IDTYPE_ETHTYPE ?
+ app_idtype : 3, app_id, 0);
}
/* Write a new Application User Priority Map for the specified Application ID
@@ -808,7 +819,7 @@ static int __cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id,
int i, err;
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (!cxgb4_dcb_state_synced(pi->dcb.state))
return -EINVAL;
/* DCB info gets thrown away on link up */
@@ -896,10 +907,11 @@ cxgb4_ieee_negotiation_complete(struct net_device *dev,
struct port_info *pi = netdev2pinfo(dev);
struct port_dcb_info *dcb = &pi->dcb;
- if (dcb_subtype && !(dcb->msgs & dcb_subtype))
- return 0;
+ if (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (dcb_subtype && !(dcb->msgs & dcb_subtype))
+ return 0;
- return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED &&
+ return (cxgb4_dcb_state_synced(dcb->state) &&
(dcb->supported & DCB_CAP_DCBX_VER_IEEE));
}
@@ -1057,7 +1069,7 @@ static u8 cxgb4_setdcbx(struct net_device *dev, u8 dcb_request)
/* Can't enable DCB if we haven't successfully negotiated it.
*/
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (!cxgb4_dcb_state_synced(pi->dcb.state))
return 1;
/* There's currently no mechanism to allow for the firmware DCBX
@@ -1080,7 +1092,7 @@ static int cxgb4_getpeer_app(struct net_device *dev,
struct adapter *adap = pi->adapter;
int i, err = 0;
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (!cxgb4_dcb_state_synced(pi->dcb.state))
return 1;
info->willing = 0;
@@ -1114,7 +1126,7 @@ static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table)
struct adapter *adap = pi->adapter;
int i, err = 0;
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (!cxgb4_dcb_state_synced(pi->dcb.state))
return 1;
for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
@@ -1133,7 +1145,7 @@ static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table)
if (!pcmd.u.dcb.app_priority.protocolid)
break;
- table[i].selector = pcmd.u.dcb.app_priority.sel_field;
+ table[i].selector = (pcmd.u.dcb.app_priority.sel_field + 1);
table[i].protocol =
be16_to_cpu(pcmd.u.dcb.app_priority.protocolid);
table[i].priority =
@@ -1181,6 +1193,8 @@ static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg)
for (i = 0; i < CXGB4_MAX_PRIORITY; i++)
pg->pg_bw[i] = pcmd.u.dcb.pgrate.pgrate[i];
+ pg->tcs_supported = pcmd.u.dcb.pgrate.num_tcs_supported;
+
return 0;
}
@@ -1198,6 +1212,8 @@ static int cxgb4_cee_peer_getpfc(struct net_device *dev, struct cee_pfc *pfc)
*/
pfc->pfc_en = bitswap_1(pi->dcb.pfcen);
+ pfc->tcs_supported = pi->dcb.pfc_num_tcs_supported;
+
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 484eb8c37489..0a87a3247464 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -151,6 +151,45 @@ static int cim_la_show_3in1(struct seq_file *seq, void *v, int idx)
return 0;
}
+static int cim_la_show_t6(struct seq_file *seq, void *v, int idx)
+{
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "Status Inst Data PC LS0Stat "
+ "LS0Addr LS0Data LS1Stat LS1Addr LS1Data\n");
+ } else {
+ const u32 *p = v;
+
+ seq_printf(seq, " %02x %04x%04x %04x%04x %04x%04x %08x %08x %08x %08x %08x %08x\n",
+ (p[9] >> 16) & 0xff, /* Status */
+ p[9] & 0xffff, p[8] >> 16, /* Inst */
+ p[8] & 0xffff, p[7] >> 16, /* Data */
+ p[7] & 0xffff, p[6] >> 16, /* PC */
+ p[2], p[1], p[0], /* LS0 Stat, Addr and Data */
+ p[5], p[4], p[3]); /* LS1 Stat, Addr and Data */
+ }
+ return 0;
+}
+
+static int cim_la_show_pc_t6(struct seq_file *seq, void *v, int idx)
+{
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "Status Inst Data PC\n");
+ } else {
+ const u32 *p = v;
+
+ seq_printf(seq, " %02x %08x %08x %08x\n",
+ p[3] & 0xff, p[2], p[1], p[0]);
+ seq_printf(seq, " %02x %02x%06x %02x%06x %02x%06x\n",
+ (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
+ p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
+ seq_printf(seq, " %02x %04x%04x %04x%04x %04x%04x\n",
+ (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
+ p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
+ p[6] >> 16);
+ }
+ return 0;
+}
+
static int cim_la_open(struct inode *inode, struct file *file)
{
int ret;
@@ -162,9 +201,18 @@ static int cim_la_open(struct inode *inode, struct file *file)
if (ret)
return ret;
- p = seq_open_tab(file, adap->params.cim_la_size / 8, 8 * sizeof(u32), 1,
- cfg & UPDBGLACAPTPCONLY_F ?
- cim_la_show_3in1 : cim_la_show);
+ if (is_t6(adap->params.chip)) {
+ /* +1 to account for integer division of CIMLA_SIZE/10 */
+ p = seq_open_tab(file, (adap->params.cim_la_size / 10) + 1,
+ 10 * sizeof(u32), 1,
+ cfg & UPDBGLACAPTPCONLY_F ?
+ cim_la_show_pc_t6 : cim_la_show_t6);
+ } else {
+ p = seq_open_tab(file, adap->params.cim_la_size / 8,
+ 8 * sizeof(u32), 1,
+ cfg & UPDBGLACAPTPCONLY_F ? cim_la_show_3in1 :
+ cim_la_show);
+ }
if (!p)
return -ENOMEM;
@@ -298,11 +346,11 @@ static int cim_qcfg_show(struct seq_file *seq, void *v)
if (is_t4(adap->params.chip)) {
i = t4_cim_read(adap, UP_OBQ_0_REALADDR_A,
ARRAY_SIZE(obq_wr_t4), obq_wr_t4);
- wr = obq_wr_t4;
+ wr = obq_wr_t4;
} else {
i = t4_cim_read(adap, UP_OBQ_0_SHADOW_REALADDR_A,
ARRAY_SIZE(obq_wr_t5), obq_wr_t5);
- wr = obq_wr_t5;
+ wr = obq_wr_t5;
}
}
if (i)
@@ -952,16 +1000,23 @@ static int devlog_show(struct seq_file *seq, void *v)
* eventually have to put a format interpreter in here ...
*/
seq_printf(seq, "%10d %15llu %8s %8s ",
- e->seqno, e->timestamp,
+ be32_to_cpu(e->seqno),
+ be64_to_cpu(e->timestamp),
(e->level < ARRAY_SIZE(devlog_level_strings)
? devlog_level_strings[e->level]
: "UNKNOWN"),
(e->facility < ARRAY_SIZE(devlog_facility_strings)
? devlog_facility_strings[e->facility]
: "UNKNOWN"));
- seq_printf(seq, e->fmt, e->params[0], e->params[1],
- e->params[2], e->params[3], e->params[4],
- e->params[5], e->params[6], e->params[7]);
+ seq_printf(seq, e->fmt,
+ be32_to_cpu(e->params[0]),
+ be32_to_cpu(e->params[1]),
+ be32_to_cpu(e->params[2]),
+ be32_to_cpu(e->params[3]),
+ be32_to_cpu(e->params[4]),
+ be32_to_cpu(e->params[5]),
+ be32_to_cpu(e->params[6]),
+ be32_to_cpu(e->params[7]));
}
return 0;
}
@@ -1043,23 +1098,17 @@ static int devlog_open(struct inode *inode, struct file *file)
return ret;
}
- /* Translate log multi-byte integral elements into host native format
- * and determine where the first entry in the log is.
+ /* Find the earliest (lowest Sequence Number) log entry in the
+ * circular Device Log.
*/
for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) {
struct fw_devlog_e *e = &dinfo->log[index];
- int i;
__u32 seqno;
if (e->timestamp == 0)
continue;
- e->timestamp = (__force __be64)be64_to_cpu(e->timestamp);
seqno = be32_to_cpu(e->seqno);
- for (i = 0; i < 8; i++)
- e->params[i] =
- (__force __be32)be32_to_cpu(e->params[i]);
-
if (seqno < fseqno) {
fseqno = seqno;
dinfo->first = index;
@@ -1152,6 +1201,299 @@ static const struct file_operations mbox_debugfs_fops = {
.write = mbox_write
};
+static int mps_trc_show(struct seq_file *seq, void *v)
+{
+ int enabled, i;
+ struct trace_params tp;
+ unsigned int trcidx = (uintptr_t)seq->private & 3;
+ struct adapter *adap = seq->private - trcidx;
+
+ t4_get_trace_filter(adap, &tp, trcidx, &enabled);
+ if (!enabled) {
+ seq_puts(seq, "tracer is disabled\n");
+ return 0;
+ }
+
+ if (tp.skip_ofst * 8 >= TRACE_LEN) {
+ dev_err(adap->pdev_dev, "illegal trace pattern skip offset\n");
+ return -EINVAL;
+ }
+ if (tp.port < 8) {
+ i = adap->chan_map[tp.port & 3];
+ if (i >= MAX_NPORTS) {
+ dev_err(adap->pdev_dev, "tracer %u is assigned "
+ "to non-existing port\n", trcidx);
+ return -EINVAL;
+ }
+ seq_printf(seq, "tracer is capturing %s %s, ",
+ adap->port[i]->name, tp.port < 4 ? "Rx" : "Tx");
+ } else
+ seq_printf(seq, "tracer is capturing loopback %d, ",
+ tp.port - 8);
+ seq_printf(seq, "snap length: %u, min length: %u\n", tp.snap_len,
+ tp.min_len);
+ seq_printf(seq, "packets captured %smatch filter\n",
+ tp.invert ? "do not " : "");
+
+ if (tp.skip_ofst) {
+ seq_puts(seq, "filter pattern: ");
+ for (i = 0; i < tp.skip_ofst * 2; i += 2)
+ seq_printf(seq, "%08x%08x", tp.data[i], tp.data[i + 1]);
+ seq_putc(seq, '/');
+ for (i = 0; i < tp.skip_ofst * 2; i += 2)
+ seq_printf(seq, "%08x%08x", tp.mask[i], tp.mask[i + 1]);
+ seq_puts(seq, "@0\n");
+ }
+
+ seq_puts(seq, "filter pattern: ");
+ for (i = tp.skip_ofst * 2; i < TRACE_LEN / 4; i += 2)
+ seq_printf(seq, "%08x%08x", tp.data[i], tp.data[i + 1]);
+ seq_putc(seq, '/');
+ for (i = tp.skip_ofst * 2; i < TRACE_LEN / 4; i += 2)
+ seq_printf(seq, "%08x%08x", tp.mask[i], tp.mask[i + 1]);
+ seq_printf(seq, "@%u\n", (tp.skip_ofst + tp.skip_len) * 8);
+ return 0;
+}
+
+static int mps_trc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mps_trc_show, inode->i_private);
+}
+
+static unsigned int xdigit2int(unsigned char c)
+{
+ return isdigit(c) ? c - '0' : tolower(c) - 'a' + 10;
+}
+
+#define TRC_PORT_NONE 0xff
+#define TRC_RSS_ENABLE 0x33
+#define TRC_RSS_DISABLE 0x13
+
+/* Set an MPS trace filter. Syntax is:
+ *
+ * disable
+ *
+ * to disable tracing, or
+ *
+ * interface qid=<qid no> [snaplen=<val>] [minlen=<val>] [not] [<pattern>]...
+ *
+ * where interface is one of rxN, txN, or loopbackN, N = 0..3, qid can be one
+ * of the NIC's response qid obtained from sge_qinfo and pattern has the form
+ *
+ * <pattern data>[/<pattern mask>][@<anchor>]
+ *
+ * Up to 2 filter patterns can be specified. If 2 are supplied the first one
+ * must be anchored at 0. An omited mask is taken as a mask of 1s, an omitted
+ * anchor is taken as 0.
+ */
+static ssize_t mps_trc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ int i, enable, ret;
+ u32 *data, *mask;
+ struct trace_params tp;
+ const struct inode *ino;
+ unsigned int trcidx;
+ char *s, *p, *word, *end;
+ struct adapter *adap;
+ u32 j;
+
+ ino = file_inode(file);
+ trcidx = (uintptr_t)ino->i_private & 3;
+ adap = ino->i_private - trcidx;
+
+ /* Don't accept input more than 1K, can't be anything valid except lots
+ * of whitespace. Well, use less.
+ */
+ if (count > 1024)
+ return -EFBIG;
+ p = s = kzalloc(count + 1, GFP_USER);
+ if (!s)
+ return -ENOMEM;
+ if (copy_from_user(s, buf, count)) {
+ count = -EFAULT;
+ goto out;
+ }
+
+ if (s[count - 1] == '\n')
+ s[count - 1] = '\0';
+
+ enable = strcmp("disable", s) != 0;
+ if (!enable)
+ goto apply;
+
+ /* enable or disable trace multi rss filter */
+ if (adap->trace_rss)
+ t4_write_reg(adap, MPS_TRC_CFG_A, TRC_RSS_ENABLE);
+ else
+ t4_write_reg(adap, MPS_TRC_CFG_A, TRC_RSS_DISABLE);
+
+ memset(&tp, 0, sizeof(tp));
+ tp.port = TRC_PORT_NONE;
+ i = 0; /* counts pattern nibbles */
+
+ while (p) {
+ while (isspace(*p))
+ p++;
+ word = strsep(&p, " ");
+ if (!*word)
+ break;
+
+ if (!strncmp(word, "qid=", 4)) {
+ end = (char *)word + 4;
+ ret = kstrtouint(end, 10, &j);
+ if (ret)
+ goto out;
+ if (!adap->trace_rss) {
+ t4_write_reg(adap, MPS_T5_TRC_RSS_CONTROL_A, j);
+ continue;
+ }
+
+ switch (trcidx) {
+ case 0:
+ t4_write_reg(adap, MPS_TRC_RSS_CONTROL_A, j);
+ break;
+ case 1:
+ t4_write_reg(adap,
+ MPS_TRC_FILTER1_RSS_CONTROL_A, j);
+ break;
+ case 2:
+ t4_write_reg(adap,
+ MPS_TRC_FILTER2_RSS_CONTROL_A, j);
+ break;
+ case 3:
+ t4_write_reg(adap,
+ MPS_TRC_FILTER3_RSS_CONTROL_A, j);
+ break;
+ }
+ continue;
+ }
+ if (!strncmp(word, "snaplen=", 8)) {
+ end = (char *)word + 8;
+ ret = kstrtouint(end, 10, &j);
+ if (ret || j > 9600) {
+inval: count = -EINVAL;
+ goto out;
+ }
+ tp.snap_len = j;
+ continue;
+ }
+ if (!strncmp(word, "minlen=", 7)) {
+ end = (char *)word + 7;
+ ret = kstrtouint(end, 10, &j);
+ if (ret || j > TFMINPKTSIZE_M)
+ goto inval;
+ tp.min_len = j;
+ continue;
+ }
+ if (!strcmp(word, "not")) {
+ tp.invert = !tp.invert;
+ continue;
+ }
+ if (!strncmp(word, "loopback", 8) && tp.port == TRC_PORT_NONE) {
+ if (word[8] < '0' || word[8] > '3' || word[9])
+ goto inval;
+ tp.port = word[8] - '0' + 8;
+ continue;
+ }
+ if (!strncmp(word, "tx", 2) && tp.port == TRC_PORT_NONE) {
+ if (word[2] < '0' || word[2] > '3' || word[3])
+ goto inval;
+ tp.port = word[2] - '0' + 4;
+ if (adap->chan_map[tp.port & 3] >= MAX_NPORTS)
+ goto inval;
+ continue;
+ }
+ if (!strncmp(word, "rx", 2) && tp.port == TRC_PORT_NONE) {
+ if (word[2] < '0' || word[2] > '3' || word[3])
+ goto inval;
+ tp.port = word[2] - '0';
+ if (adap->chan_map[tp.port] >= MAX_NPORTS)
+ goto inval;
+ continue;
+ }
+ if (!isxdigit(*word))
+ goto inval;
+
+ /* we have found a trace pattern */
+ if (i) { /* split pattern */
+ if (tp.skip_len) /* too many splits */
+ goto inval;
+ tp.skip_ofst = i / 16;
+ }
+
+ data = &tp.data[i / 8];
+ mask = &tp.mask[i / 8];
+ j = i;
+
+ while (isxdigit(*word)) {
+ if (i >= TRACE_LEN * 2) {
+ count = -EFBIG;
+ goto out;
+ }
+ *data = (*data << 4) + xdigit2int(*word++);
+ if (++i % 8 == 0)
+ data++;
+ }
+ if (*word == '/') {
+ word++;
+ while (isxdigit(*word)) {
+ if (j >= i) /* mask longer than data */
+ goto inval;
+ *mask = (*mask << 4) + xdigit2int(*word++);
+ if (++j % 8 == 0)
+ mask++;
+ }
+ if (i != j) /* mask shorter than data */
+ goto inval;
+ } else { /* no mask, use all 1s */
+ for ( ; i - j >= 8; j += 8)
+ *mask++ = 0xffffffff;
+ if (i % 8)
+ *mask = (1 << (i % 8) * 4) - 1;
+ }
+ if (*word == '@') {
+ end = (char *)word + 1;
+ ret = kstrtouint(end, 10, &j);
+ if (*end && *end != '\n')
+ goto inval;
+ if (j & 7) /* doesn't start at multiple of 8 */
+ goto inval;
+ j /= 8;
+ if (j < tp.skip_ofst) /* overlaps earlier pattern */
+ goto inval;
+ if (j - tp.skip_ofst > 31) /* skip too big */
+ goto inval;
+ tp.skip_len = j - tp.skip_ofst;
+ }
+ if (i % 8) {
+ *data <<= (8 - i % 8) * 4;
+ *mask <<= (8 - i % 8) * 4;
+ i = (i + 15) & ~15; /* 8-byte align */
+ }
+ }
+
+ if (tp.port == TRC_PORT_NONE)
+ goto inval;
+
+apply:
+ i = t4_set_trace_filter(adap, &tp, trcidx, enable);
+ if (i)
+ count = i;
+out:
+ kfree(s);
+ return count;
+}
+
+static const struct file_operations mps_trc_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = mps_trc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = mps_trc_write
+};
+
static ssize_t flash_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
@@ -1894,13 +2236,13 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
{
struct adapter *adap = seq->private;
int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
- int toe_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
+ int iscsi_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
int i, r = (uintptr_t)v - 1;
- int toe_idx = r - eth_entries;
- int rdma_idx = toe_idx - toe_entries;
+ int iscsi_idx = r - eth_entries;
+ int rdma_idx = iscsi_idx - iscsi_entries;
int ciq_idx = rdma_idx - rdma_entries;
int ctrl_idx = ciq_idx - ciq_entries;
int fq_idx = ctrl_idx - ctrl_entries;
@@ -1916,8 +2258,12 @@ do { \
seq_putc(seq, '\n'); \
} while (0)
#define S(s, v) S3("s", s, v)
+#define T3(fmt_spec, s, v) S3(fmt_spec, s, tx[i].v)
#define T(s, v) S3("u", s, tx[i].v)
+#define TL(s, v) T3("lu", s, v)
+#define R3(fmt_spec, s, v) S3(fmt_spec, s, rx[i].v)
#define R(s, v) S3("u", s, rx[i].v)
+#define RL(s, v) R3("lu", s, v)
if (r < eth_entries) {
int base_qset = r * 4;
@@ -1956,12 +2302,30 @@ do { \
R("FL avail:", fl.avail);
R("FL PIDX:", fl.pidx);
R("FL CIDX:", fl.cidx);
- } else if (toe_idx < toe_entries) {
- const struct sge_ofld_rxq *rx = &adap->sge.ofldrxq[toe_idx * 4];
- const struct sge_ofld_txq *tx = &adap->sge.ofldtxq[toe_idx * 4];
- int n = min(4, adap->sge.ofldqsets - 4 * toe_idx);
+ RL("RxPackets:", stats.pkts);
+ RL("RxCSO:", stats.rx_cso);
+ RL("VLANxtract:", stats.vlan_ex);
+ RL("LROmerged:", stats.lro_merged);
+ RL("LROpackets:", stats.lro_pkts);
+ RL("RxDrops:", stats.rx_drops);
+ TL("TSO:", tso);
+ TL("TxCSO:", tx_cso);
+ TL("VLANins:", vlan_ins);
+ TL("TxQFull:", q.stops);
+ TL("TxQRestarts:", q.restarts);
+ TL("TxMapErr:", mapping_err);
+ RL("FLAllocErr:", fl.alloc_failed);
+ RL("FLLrgAlcErr:", fl.large_alloc_failed);
+ RL("FLStarving:", fl.starving);
+
+ } else if (iscsi_idx < iscsi_entries) {
+ const struct sge_ofld_rxq *rx =
+ &adap->sge.ofldrxq[iscsi_idx * 4];
+ const struct sge_ofld_txq *tx =
+ &adap->sge.ofldtxq[iscsi_idx * 4];
+ int n = min(4, adap->sge.ofldqsets - 4 * iscsi_idx);
- S("QType:", "TOE");
+ S("QType:", "iSCSI");
T("TxQ ID:", q.cntxt_id);
T("TxQ size:", q.size);
T("TxQ inuse:", q.in_use);
@@ -1981,6 +2345,13 @@ do { \
R("FL avail:", fl.avail);
R("FL PIDX:", fl.pidx);
R("FL CIDX:", fl.cidx);
+ RL("RxPackets:", stats.pkts);
+ RL("RxImmPkts:", stats.imm);
+ RL("RxNoMem:", stats.nomem);
+ RL("FLAllocErr:", fl.alloc_failed);
+ RL("FLLrgAlcErr:", fl.large_alloc_failed);
+ RL("FLStarving:", fl.starving);
+
} else if (rdma_idx < rdma_entries) {
const struct sge_ofld_rxq *rx =
&adap->sge.rdmarxq[rdma_idx * 4];
@@ -2003,6 +2374,13 @@ do { \
R("FL avail:", fl.avail);
R("FL PIDX:", fl.pidx);
R("FL CIDX:", fl.cidx);
+ RL("RxPackets:", stats.pkts);
+ RL("RxImmPkts:", stats.imm);
+ RL("RxNoMem:", stats.nomem);
+ RL("FLAllocErr:", fl.alloc_failed);
+ RL("FLLrgAlcErr:", fl.large_alloc_failed);
+ RL("FLStarving:", fl.starving);
+
} else if (ciq_idx < ciq_entries) {
const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4];
int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
@@ -2018,6 +2396,9 @@ do { \
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
S3("u", "Intr pktcnt:",
adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+ RL("RxAN:", stats.an);
+ RL("RxNoMem:", stats.nomem);
+
} else if (ctrl_idx < ctrl_entries) {
const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
int n = min(4, adap->params.nports - 4 * ctrl_idx);
@@ -2028,6 +2409,8 @@ do { \
T("TxQ inuse:", q.in_use);
T("TxQ CIDX:", q.cidx);
T("TxQ PIDX:", q.pidx);
+ TL("TxQFull:", q.stops);
+ TL("TxQRestarts:", q.restarts);
} else if (fq_idx == 0) {
const struct sge_rspq *evtq = &adap->sge.fw_evtq;
@@ -2043,10 +2426,14 @@ do { \
adap->sge.counter_val[evtq->pktcnt_idx]);
}
#undef R
+#undef RL
#undef T
+#undef TL
#undef S
+#undef R3
+#undef T3
#undef S3
-return 0;
+ return 0;
}
static int sge_queue_entries(const struct adapter *adap)
@@ -2163,6 +2550,73 @@ static const struct file_operations mem_debugfs_fops = {
.llseek = default_llseek,
};
+static int tid_info_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adap = seq->private;
+ const struct tid_info *t = &adap->tids;
+ enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+ if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
+ unsigned int sb;
+
+ if (chip <= CHELSIO_T5)
+ sb = t4_read_reg(adap, LE_DB_SERVER_INDEX_A) / 4;
+ else
+ sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A);
+
+ if (sb) {
+ seq_printf(seq, "TID range: 0..%u/%u..%u", sb - 1,
+ adap->tids.hash_base,
+ t->ntids - 1);
+ seq_printf(seq, ", in use: %u/%u\n",
+ atomic_read(&t->tids_in_use),
+ atomic_read(&t->hash_tids_in_use));
+ } else if (adap->flags & FW_OFLD_CONN) {
+ seq_printf(seq, "TID range: %u..%u/%u..%u",
+ t->aftid_base,
+ t->aftid_end,
+ adap->tids.hash_base,
+ t->ntids - 1);
+ seq_printf(seq, ", in use: %u/%u\n",
+ atomic_read(&t->tids_in_use),
+ atomic_read(&t->hash_tids_in_use));
+ } else {
+ seq_printf(seq, "TID range: %u..%u",
+ adap->tids.hash_base,
+ t->ntids - 1);
+ seq_printf(seq, ", in use: %u\n",
+ atomic_read(&t->hash_tids_in_use));
+ }
+ } else if (t->ntids) {
+ seq_printf(seq, "TID range: 0..%u", t->ntids - 1);
+ seq_printf(seq, ", in use: %u\n",
+ atomic_read(&t->tids_in_use));
+ }
+
+ if (t->nstids)
+ seq_printf(seq, "STID range: %u..%u, in use: %u\n",
+ (!t->stid_base &&
+ (chip <= CHELSIO_T5)) ?
+ t->stid_base + 1 : t->stid_base,
+ t->stid_base + t->nstids - 1, t->stids_in_use);
+ if (t->natids)
+ seq_printf(seq, "ATID range: 0..%u, in use: %u\n",
+ t->natids - 1, t->atids_in_use);
+ seq_printf(seq, "FTID range: %u..%u\n", t->ftid_base,
+ t->ftid_base + t->nftids - 1);
+ if (t->nsftids)
+ seq_printf(seq, "SFTID range: %u..%u in use: %u\n",
+ t->sftid_base, t->sftid_base + t->nsftids - 2,
+ t->sftids_in_use);
+ if (t->ntids)
+ seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n",
+ t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A),
+ t4_read_reg(adap, LE_DB_ACT_CNT_IPV6_A));
+ return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(tid_info);
+
static void add_debugfs_mem(struct adapter *adap, const char *name,
unsigned int idx, unsigned int size_mb)
{
@@ -2226,6 +2680,290 @@ static const struct file_operations blocked_fl_fops = {
.llseek = generic_file_llseek,
};
+struct mem_desc {
+ unsigned int base;
+ unsigned int limit;
+ unsigned int idx;
+};
+
+static int mem_desc_cmp(const void *a, const void *b)
+{
+ return ((const struct mem_desc *)a)->base -
+ ((const struct mem_desc *)b)->base;
+}
+
+static void mem_region_show(struct seq_file *seq, const char *name,
+ unsigned int from, unsigned int to)
+{
+ char buf[40];
+
+ string_get_size((u64)to - from + 1, 1, STRING_UNITS_2, buf,
+ sizeof(buf));
+ seq_printf(seq, "%-15s %#x-%#x [%s]\n", name, from, to, buf);
+}
+
+static int meminfo_show(struct seq_file *seq, void *v)
+{
+ static const char * const memory[] = { "EDC0:", "EDC1:", "MC:",
+ "MC0:", "MC1:"};
+ static const char * const region[] = {
+ "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
+ "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
+ "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
+ "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
+ "RQUDP region:", "PBL region:", "TXPBL region:",
+ "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
+ "On-chip queues:"
+ };
+
+ int i, n;
+ u32 lo, hi, used, alloc;
+ struct mem_desc avail[4];
+ struct mem_desc mem[ARRAY_SIZE(region) + 3]; /* up to 3 holes */
+ struct mem_desc *md = mem;
+ struct adapter *adap = seq->private;
+
+ for (i = 0; i < ARRAY_SIZE(mem); i++) {
+ mem[i].limit = 0;
+ mem[i].idx = i;
+ }
+
+ /* Find and sort the populated memory ranges */
+ i = 0;
+ lo = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (lo & EDRAM0_ENABLE_F) {
+ hi = t4_read_reg(adap, MA_EDRAM0_BAR_A);
+ avail[i].base = EDRAM0_BASE_G(hi) << 20;
+ avail[i].limit = avail[i].base + (EDRAM0_SIZE_G(hi) << 20);
+ avail[i].idx = 0;
+ i++;
+ }
+ if (lo & EDRAM1_ENABLE_F) {
+ hi = t4_read_reg(adap, MA_EDRAM1_BAR_A);
+ avail[i].base = EDRAM1_BASE_G(hi) << 20;
+ avail[i].limit = avail[i].base + (EDRAM1_SIZE_G(hi) << 20);
+ avail[i].idx = 1;
+ i++;
+ }
+
+ if (is_t5(adap->params.chip)) {
+ if (lo & EXT_MEM0_ENABLE_F) {
+ hi = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+ avail[i].base = EXT_MEM0_BASE_G(hi) << 20;
+ avail[i].limit =
+ avail[i].base + (EXT_MEM0_SIZE_G(hi) << 20);
+ avail[i].idx = 3;
+ i++;
+ }
+ if (lo & EXT_MEM1_ENABLE_F) {
+ hi = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+ avail[i].base = EXT_MEM1_BASE_G(hi) << 20;
+ avail[i].limit =
+ avail[i].base + (EXT_MEM1_SIZE_G(hi) << 20);
+ avail[i].idx = 4;
+ i++;
+ }
+ } else {
+ if (lo & EXT_MEM_ENABLE_F) {
+ hi = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
+ avail[i].base = EXT_MEM_BASE_G(hi) << 20;
+ avail[i].limit =
+ avail[i].base + (EXT_MEM_SIZE_G(hi) << 20);
+ avail[i].idx = 2;
+ i++;
+ }
+ }
+ if (!i) /* no memory available */
+ return 0;
+ sort(avail, i, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+
+ (md++)->base = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A);
+ (md++)->base = t4_read_reg(adap, SGE_IMSG_CTXT_BADDR_A);
+ (md++)->base = t4_read_reg(adap, SGE_FLM_CACHE_BADDR_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_TCB_BASE_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_MM_BASE_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_TIMER_BASE_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_MM_RX_FLST_BASE_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_MM_TX_FLST_BASE_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_MM_PS_FLST_BASE_A);
+
+ /* the next few have explicit upper bounds */
+ md->base = t4_read_reg(adap, TP_PMM_TX_BASE_A);
+ md->limit = md->base - 1 +
+ t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A) *
+ PMTXMAXPAGE_G(t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A));
+ md++;
+
+ md->base = t4_read_reg(adap, TP_PMM_RX_BASE_A);
+ md->limit = md->base - 1 +
+ t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) *
+ PMRXMAXPAGE_G(t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A));
+ md++;
+
+ if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) {
+ hi = t4_read_reg(adap, LE_DB_TID_HASHBASE_A) / 4;
+ md->base = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
+ } else {
+ hi = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
+ md->base = t4_read_reg(adap,
+ LE_DB_HASH_TBL_BASE_ADDR_A);
+ }
+ md->limit = 0;
+ } else {
+ md->base = 0;
+ md->idx = ARRAY_SIZE(region); /* hide it */
+ }
+ md++;
+
+#define ulp_region(reg) do { \
+ md->base = t4_read_reg(adap, ULP_ ## reg ## _LLIMIT_A);\
+ (md++)->limit = t4_read_reg(adap, ULP_ ## reg ## _ULIMIT_A); \
+} while (0)
+
+ ulp_region(RX_ISCSI);
+ ulp_region(RX_TDDP);
+ ulp_region(TX_TPT);
+ ulp_region(RX_STAG);
+ ulp_region(RX_RQ);
+ ulp_region(RX_RQUDP);
+ ulp_region(RX_PBL);
+ ulp_region(TX_PBL);
+#undef ulp_region
+ md->base = 0;
+ md->idx = ARRAY_SIZE(region);
+ if (!is_t4(adap->params.chip)) {
+ u32 size = 0;
+ u32 sge_ctrl = t4_read_reg(adap, SGE_CONTROL2_A);
+ u32 fifo_size = t4_read_reg(adap, SGE_DBVFIFO_SIZE_A);
+
+ if (is_t5(adap->params.chip)) {
+ if (sge_ctrl & VFIFO_ENABLE_F)
+ size = DBVFIFO_SIZE_G(fifo_size);
+ } else {
+ size = T6_DBVFIFO_SIZE_G(fifo_size);
+ }
+
+ if (size) {
+ md->base = BASEADDR_G(t4_read_reg(adap,
+ SGE_DBVFIFO_BADDR_A));
+ md->limit = md->base + (size << 2) - 1;
+ }
+ }
+
+ md++;
+
+ md->base = t4_read_reg(adap, ULP_RX_CTX_BASE_A);
+ md->limit = 0;
+ md++;
+ md->base = t4_read_reg(adap, ULP_TX_ERR_TABLE_BASE_A);
+ md->limit = 0;
+ md++;
+
+ md->base = adap->vres.ocq.start;
+ if (adap->vres.ocq.size)
+ md->limit = md->base + adap->vres.ocq.size - 1;
+ else
+ md->idx = ARRAY_SIZE(region); /* hide it */
+ md++;
+
+ /* add any address-space holes, there can be up to 3 */
+ for (n = 0; n < i - 1; n++)
+ if (avail[n].limit < avail[n + 1].base)
+ (md++)->base = avail[n].limit;
+ if (avail[n].limit)
+ (md++)->base = avail[n].limit;
+
+ n = md - mem;
+ sort(mem, n, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+
+ for (lo = 0; lo < i; lo++)
+ mem_region_show(seq, memory[avail[lo].idx], avail[lo].base,
+ avail[lo].limit - 1);
+
+ seq_putc(seq, '\n');
+ for (i = 0; i < n; i++) {
+ if (mem[i].idx >= ARRAY_SIZE(region))
+ continue; /* skip holes */
+ if (!mem[i].limit)
+ mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
+ mem_region_show(seq, region[mem[i].idx], mem[i].base,
+ mem[i].limit);
+ }
+
+ seq_putc(seq, '\n');
+ lo = t4_read_reg(adap, CIM_SDRAM_BASE_ADDR_A);
+ hi = t4_read_reg(adap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
+ mem_region_show(seq, "uP RAM:", lo, hi);
+
+ lo = t4_read_reg(adap, CIM_EXTMEM2_BASE_ADDR_A);
+ hi = t4_read_reg(adap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
+ mem_region_show(seq, "uP Extmem2:", lo, hi);
+
+ lo = t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A);
+ seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
+ PMRXMAXPAGE_G(lo),
+ t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) >> 10,
+ (lo & PMRXNUMCHN_F) ? 2 : 1);
+
+ lo = t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A);
+ hi = t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A);
+ seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
+ PMTXMAXPAGE_G(lo),
+ hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
+ hi >= (1 << 20) ? 'M' : 'K', 1 << PMTXNUMCHN_G(lo));
+ seq_printf(seq, "%u p-structs\n\n",
+ t4_read_reg(adap, TP_CMM_MM_MAX_PSTRUCT_A));
+
+ for (i = 0; i < 4; i++) {
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
+ lo = t4_read_reg(adap, MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
+ else
+ lo = t4_read_reg(adap, MPS_RX_PG_RSV0_A + i * 4);
+ if (is_t5(adap->params.chip)) {
+ used = T5_USED_G(lo);
+ alloc = T5_ALLOC_G(lo);
+ } else {
+ used = USED_G(lo);
+ alloc = ALLOC_G(lo);
+ }
+ /* For T6 these are MAC buffer groups */
+ seq_printf(seq, "Port %d using %u pages out of %u allocated\n",
+ i, used, alloc);
+ }
+ for (i = 0; i < adap->params.arch.nchan; i++) {
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
+ lo = t4_read_reg(adap,
+ MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
+ else
+ lo = t4_read_reg(adap, MPS_RX_PG_RSV4_A + i * 4);
+ if (is_t5(adap->params.chip)) {
+ used = T5_USED_G(lo);
+ alloc = T5_ALLOC_G(lo);
+ } else {
+ used = USED_G(lo);
+ alloc = ALLOC_G(lo);
+ }
+ /* For T6 these are MAC buffer groups */
+ seq_printf(seq,
+ "Loopback %d using %u pages out of %u allocated\n",
+ i, used, alloc);
+ }
+ return 0;
+}
+
+static int meminfo_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, meminfo_show, inode->i_private);
+}
+
+static const struct file_operations meminfo_fops = {
+ .owner = THIS_MODULE,
+ .open = meminfo_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
/* Add an array of Debug FS files.
*/
void add_debugfs_files(struct adapter *adap,
@@ -2263,6 +3001,10 @@ int t4_setup_debugfs(struct adapter *adap)
{ "mbox5", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 5 },
{ "mbox6", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 6 },
{ "mbox7", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 7 },
+ { "trace0", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 0 },
+ { "trace1", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 1 },
+ { "trace2", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 2 },
+ { "trace3", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 3 },
{ "l2t", &t4_l2t_fops, S_IRUSR, 0},
{ "mps_tcam", &mps_tcam_debugfs_fops, S_IRUSR, 0 },
{ "rss", &rss_debugfs_fops, S_IRUSR, 0 },
@@ -2292,7 +3034,9 @@ int t4_setup_debugfs(struct adapter *adap)
#if IS_ENABLED(CONFIG_IPV6)
{ "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
#endif
+ { "tids", &tid_info_debugfs_fops, S_IRUSR, 0},
{ "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 },
+ { "meminfo", &meminfo_fops, S_IRUSR, 0 },
};
/* Debug FS nodes common to all T5 and later adapters.
@@ -2331,14 +3075,19 @@ int t4_setup_debugfs(struct adapter *adap)
EXT_MEM1_SIZE_G(size));
}
} else {
- if (i & EXT_MEM_ENABLE_F)
+ if (i & EXT_MEM_ENABLE_F) {
size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
add_debugfs_mem(adap, "mc", MEM_MC,
EXT_MEM_SIZE_G(size));
+ }
}
de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
&flash_debugfs_fops, adap->params.sf_size);
+ debugfs_create_bool("use_backdoor", S_IWUSR | S_IRUSR,
+ adap->debugfs_root, &adap->use_bd);
+ debugfs_create_bool("trace_rss", S_IWUSR | S_IRUSR,
+ adap->debugfs_root, &adap->trace_rss);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 687acf71fa15..5eedb98ff581 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -925,6 +925,20 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
const struct firmware *fw;
struct adapter *adap = netdev2adap(netdev);
unsigned int mbox = PCIE_FW_MASTER_M + 1;
+ u32 pcie_fw;
+ unsigned int master;
+ u8 master_vld = 0;
+
+ pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+ master = PCIE_FW_MASTER_G(pcie_fw);
+ if (pcie_fw & PCIE_FW_MASTER_VLD_F)
+ master_vld = 1;
+ /* if csiostor is the master return */
+ if (master_vld && (master != adap->pf)) {
+ dev_warn(adap->pdev_dev,
+ "cxgb4 driver needs to be loaded as MASTER to support FW flash\n");
+ return -EOPNOTSUPP;
+ }
ef->data[sizeof(ef->data) - 1] = '\0';
ret = request_firmware(&fw, ef->data, adap->pdev_dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 351f3b1bf800..f5dcde27e402 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1548,7 +1548,7 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
t->stid_tab[stid].data = data;
stid -= t->nstids;
stid += t->sftid_base;
- t->stids_in_use++;
+ t->sftids_in_use++;
}
spin_unlock_bh(&t->stid_lock);
return stid;
@@ -1573,10 +1573,14 @@ void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
else
bitmap_release_region(t->stid_bmap, stid, 2);
t->stid_tab[stid].data = NULL;
- if (family == PF_INET)
- t->stids_in_use--;
- else
- t->stids_in_use -= 4;
+ if (stid < t->nstids) {
+ if (family == PF_INET)
+ t->stids_in_use--;
+ else
+ t->stids_in_use -= 4;
+ } else {
+ t->sftids_in_use--;
+ }
spin_unlock_bh(&t->stid_lock);
}
EXPORT_SYMBOL(cxgb4_free_stid);
@@ -1654,20 +1658,25 @@ static void process_tid_release_list(struct work_struct *work)
*/
void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
{
- void *old;
struct sk_buff *skb;
struct adapter *adap = container_of(t, struct adapter, tids);
- old = t->tid_tab[tid];
+ WARN_ON(tid >= t->ntids);
+
+ if (t->tid_tab[tid]) {
+ t->tid_tab[tid] = NULL;
+ if (t->hash_base && (tid >= t->hash_base))
+ atomic_dec(&t->hash_tids_in_use);
+ else
+ atomic_dec(&t->tids_in_use);
+ }
+
skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
if (likely(skb)) {
- t->tid_tab[tid] = NULL;
mk_tid_release(skb, chan, tid);
t4_ofld_send(adap, skb);
} else
cxgb4_queue_tid_release(t, chan, tid);
- if (old)
- atomic_dec(&t->tids_in_use);
}
EXPORT_SYMBOL(cxgb4_remove_tid);
@@ -1702,9 +1711,11 @@ static int tid_init(struct tid_info *t)
spin_lock_init(&t->atid_lock);
t->stids_in_use = 0;
+ t->sftids_in_use = 0;
t->afree = NULL;
t->atids_in_use = 0;
atomic_set(&t->tids_in_use, 0);
+ atomic_set(&t->hash_tids_in_use, 0);
/* Setup the free list for atid_tab and clear the stid bitmap. */
if (natids) {
@@ -3657,6 +3668,10 @@ static int adap_init0(struct adapter *adap)
*/
t4_get_fw_version(adap, &adap->params.fw_vers);
t4_get_tp_version(adap, &adap->params.tp_vers);
+ ret = t4_check_fw_version(adap);
+ /* If firmware is too old (not supported by driver) force an update. */
+ if (ret == -EFAULT)
+ state = DEV_STATE_UNINIT;
if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
struct fw_info *fw_info;
struct fw_hdr *card_fw;
@@ -4551,6 +4566,27 @@ static void free_some_resources(struct adapter *adapter)
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
#define SEGMENT_SIZE 128
+static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
+{
+ u16 device_id;
+
+ /* Retrieve adapter's device ID */
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
+
+ switch (device_id >> 12) {
+ case CHELSIO_T4:
+ return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
+ case CHELSIO_T5:
+ return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+ case CHELSIO_T6:
+ return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+ default:
+ dev_err(&pdev->dev, "Device %d is not supported\n",
+ device_id);
+ }
+ return -EINVAL;
+}
+
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int func, i, err, s_qpp, qpp, num_seg;
@@ -4558,6 +4594,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bool highdma = false;
struct adapter *adapter = NULL;
void __iomem *regs;
+ u32 whoami, pl_rev;
+ enum chip_type chip;
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -4586,7 +4624,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_unmap_bar0;
/* We control everything through one PF */
- func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
+ whoami = readl(regs + PL_WHOAMI_A);
+ pl_rev = REV_G(readl(regs + PL_REV_A));
+ chip = get_chip_type(pdev, pl_rev);
+ func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
+ SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
if (func != ent->driver_data) {
iounmap(regs);
pci_disable_device(pdev);
@@ -4677,8 +4719,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -ENOMEM;
goto out_free_adapter;
}
- t4_write_reg(adapter, SGE_STAT_CFG_A,
- STATSOURCE_T5_V(7) | STATMODE_V(0));
}
setup_memwin(adapter);
@@ -4690,6 +4730,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto out_unmap_bar;
+ /* configure SGE_STAT_CFG_A to read WC stats */
+ if (!is_t4(adapter->params.chip))
+ t4_write_reg(adapter, SGE_STAT_CFG_A,
+ STATSOURCE_T5_V(7) | STATMODE_V(0));
+
for_each_port(adapter, i) {
struct net_device *netdev;
@@ -4757,7 +4802,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
cfg_queues(adapter);
- adapter->l2t = t4_init_l2t();
+ adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
if (!adapter->l2t) {
/* We tolerate a lack of L2T, giving up some functionality */
dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
@@ -4782,6 +4827,22 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->params.offload = 0;
}
+ if (is_offload(adapter)) {
+ if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
+ u32 hash_base, hash_reg;
+
+ if (chip <= CHELSIO_T5) {
+ hash_reg = LE_DB_TID_HASHBASE_A;
+ hash_base = t4_read_reg(adapter, hash_reg);
+ adapter->tids.hash_base = hash_base / 4;
+ } else {
+ hash_reg = T6_LE_DB_HASH_TID_BASE_A;
+ hash_base = t4_read_reg(adapter, hash_reg);
+ adapter->tids.hash_base = hash_base;
+ }
+ }
+ }
+
/* See what interrupts we'll be using */
if (msi > 1 && enable_msix(adapter) == 0)
adapter->flags |= USING_MSIX;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index b27897d4f787..c3a8be5541e7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -96,6 +96,7 @@ struct tid_info {
unsigned long *stid_bmap;
unsigned int nstids;
unsigned int stid_base;
+ unsigned int hash_base;
union aopen_entry *atid_tab;
unsigned int natids;
@@ -116,8 +117,12 @@ struct tid_info {
spinlock_t stid_lock;
unsigned int stids_in_use;
+ unsigned int sftids_in_use;
+ /* TIDs in the TCAM */
atomic_t tids_in_use;
+ /* TIDs in the HASH */
+ atomic_t hash_tids_in_use;
};
static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
@@ -147,7 +152,10 @@ static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
unsigned int tid)
{
t->tid_tab[tid] = data;
- atomic_inc(&t->tids_in_use);
+ if (t->hash_base && (tid >= t->hash_base))
+ atomic_inc(&t->hash_tids_in_use);
+ else
+ atomic_inc(&t->tids_in_use);
}
int cxgb4_alloc_atid(struct tid_info *t, void *data);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 252efc29321f..ac27898c6ab0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -51,24 +51,17 @@
#define VLAN_NONE 0xfff
/* identifies sync vs async L2T_WRITE_REQs */
-#define F_SYNC_WR (1 << 12)
-
-enum {
- L2T_STATE_VALID, /* entry is up to date */
- L2T_STATE_STALE, /* entry may be used but needs revalidation */
- L2T_STATE_RESOLVING, /* entry needs address resolution */
- L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
-
- /* when state is one of the below the entry is not hashed */
- L2T_STATE_SWITCHING, /* entry is being used by a switching filter */
- L2T_STATE_UNUSED /* entry not in use */
-};
+#define SYNC_WR_S 12
+#define SYNC_WR_V(x) ((x) << SYNC_WR_S)
+#define SYNC_WR_F SYNC_WR_V(1)
struct l2t_data {
+ unsigned int l2t_start; /* start index of our piece of the L2T */
+ unsigned int l2t_size; /* number of entries in l2tab */
rwlock_t lock;
atomic_t nfree; /* number of free entries */
struct l2t_entry *rover; /* starting point for next allocation */
- struct l2t_entry l2tab[L2T_SIZE];
+ struct l2t_entry l2tab[0]; /* MUST BE LAST */
};
static inline unsigned int vlan_prio(const struct l2t_entry *e)
@@ -85,29 +78,36 @@ static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
/*
* To avoid having to check address families we do not allow v4 and v6
* neighbors to be on the same hash chain. We keep v4 entries in the first
- * half of available hash buckets and v6 in the second.
+ * half of available hash buckets and v6 in the second. We need at least two
+ * entries in our L2T for this scheme to work.
*/
enum {
- L2T_SZ_HALF = L2T_SIZE / 2,
- L2T_HASH_MASK = L2T_SZ_HALF - 1
+ L2T_MIN_HASH_BUCKETS = 2,
};
-static inline unsigned int arp_hash(const u32 *key, int ifindex)
+static inline unsigned int arp_hash(struct l2t_data *d, const u32 *key,
+ int ifindex)
{
- return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK;
+ unsigned int l2t_size_half = d->l2t_size / 2;
+
+ return jhash_2words(*key, ifindex, 0) % l2t_size_half;
}
-static inline unsigned int ipv6_hash(const u32 *key, int ifindex)
+static inline unsigned int ipv6_hash(struct l2t_data *d, const u32 *key,
+ int ifindex)
{
+ unsigned int l2t_size_half = d->l2t_size / 2;
u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
- return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK);
+ return (l2t_size_half +
+ (jhash_2words(xor, ifindex, 0) % l2t_size_half));
}
-static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex)
+static unsigned int addr_hash(struct l2t_data *d, const u32 *addr,
+ int addr_len, int ifindex)
{
- return addr_len == 4 ? arp_hash(addr, ifindex) :
- ipv6_hash(addr, ifindex);
+ return addr_len == 4 ? arp_hash(d, addr, ifindex) :
+ ipv6_hash(d, addr, ifindex);
}
/*
@@ -139,6 +139,8 @@ static void neigh_replace(struct l2t_entry *e, struct neighbour *n)
*/
static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
{
+ struct l2t_data *d = adap->l2t;
+ unsigned int l2t_idx = e->idx + d->l2t_start;
struct sk_buff *skb;
struct cpl_l2t_write_req *req;
@@ -150,10 +152,10 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
- e->idx | (sync ? F_SYNC_WR : 0) |
+ l2t_idx | (sync ? SYNC_WR_F : 0) |
TID_QID_V(adap->sge.fw_evtq.abs_id)));
req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
- req->l2t_idx = htons(e->idx);
+ req->l2t_idx = htons(l2t_idx);
req->vlan = htons(e->vlan);
if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
@@ -190,18 +192,19 @@ static void send_pending(struct adapter *adap, struct l2t_entry *e)
*/
void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
{
+ struct l2t_data *d = adap->l2t;
unsigned int tid = GET_TID(rpl);
- unsigned int idx = tid & (L2T_SIZE - 1);
+ unsigned int l2t_idx = tid % L2T_SIZE;
if (unlikely(rpl->status != CPL_ERR_NONE)) {
dev_err(adap->pdev_dev,
"Unexpected L2T_WRITE_RPL status %u for entry %u\n",
- rpl->status, idx);
+ rpl->status, l2t_idx);
return;
}
- if (tid & F_SYNC_WR) {
- struct l2t_entry *e = &adap->l2t->l2tab[idx];
+ if (tid & SYNC_WR_F) {
+ struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start];
spin_lock(&e->lock);
if (e->state != L2T_STATE_SWITCHING) {
@@ -276,7 +279,7 @@ static struct l2t_entry *alloc_l2e(struct l2t_data *d)
return NULL;
/* there's definitely a free entry */
- for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e)
+ for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e)
if (atomic_read(&e->refcnt) == 0)
goto found;
@@ -368,7 +371,7 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
int addr_len = neigh->tbl->key_len;
u32 *addr = (u32 *)neigh->primary_key;
int ifidx = neigh->dev->ifindex;
- int hash = addr_hash(addr, addr_len, ifidx);
+ int hash = addr_hash(d, addr, addr_len, ifidx);
if (neigh->dev->flags & IFF_LOOPBACK)
lport = netdev2pinfo(physdev)->tx_chan + 4;
@@ -481,7 +484,7 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
int addr_len = neigh->tbl->key_len;
u32 *addr = (u32 *) neigh->primary_key;
int ifidx = neigh->dev->ifindex;
- int hash = addr_hash(addr, addr_len, ifidx);
+ int hash = addr_hash(d, addr, addr_len, ifidx);
read_lock_bh(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
@@ -554,20 +557,30 @@ int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
return write_l2e(adap, e, 0);
}
-struct l2t_data *t4_init_l2t(void)
+struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
{
+ unsigned int l2t_size;
int i;
struct l2t_data *d;
- d = t4_alloc_mem(sizeof(*d));
+ if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE)
+ return NULL;
+ l2t_size = l2t_end - l2t_start + 1;
+ if (l2t_size < L2T_MIN_HASH_BUCKETS)
+ return NULL;
+
+ d = t4_alloc_mem(sizeof(*d) + l2t_size * sizeof(struct l2t_entry));
if (!d)
return NULL;
+ d->l2t_start = l2t_start;
+ d->l2t_size = l2t_size;
+
d->rover = d->l2tab;
- atomic_set(&d->nfree, L2T_SIZE);
+ atomic_set(&d->nfree, l2t_size);
rwlock_init(&d->lock);
- for (i = 0; i < L2T_SIZE; ++i) {
+ for (i = 0; i < d->l2t_size; ++i) {
d->l2tab[i].idx = i;
d->l2tab[i].state = L2T_STATE_UNUSED;
spin_lock_init(&d->l2tab[i].lock);
@@ -578,9 +591,9 @@ struct l2t_data *t4_init_l2t(void)
static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
{
- struct l2t_entry *l2tab = seq->private;
+ struct l2t_data *d = seq->private;
- return pos >= L2T_SIZE ? NULL : &l2tab[pos];
+ return pos >= d->l2t_size ? NULL : &d->l2tab[pos];
}
static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
@@ -620,6 +633,7 @@ static int l2t_seq_show(struct seq_file *seq, void *v)
"Ethernet address VLAN/P LP State Users Port\n");
else {
char ip[60];
+ struct l2t_data *d = seq->private;
struct l2t_entry *e = v;
spin_lock_bh(&e->lock);
@@ -628,7 +642,7 @@ static int l2t_seq_show(struct seq_file *seq, void *v)
else
sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n",
- e->idx, ip, e->dmac,
+ e->idx + d->l2t_start, ip, e->dmac,
e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
l2e_state(e), atomic_read(&e->refcnt),
e->neigh ? e->neigh->dev->name : "");
@@ -652,7 +666,7 @@ static int l2t_seq_open(struct inode *inode, struct file *file)
struct adapter *adap = inode->i_private;
struct seq_file *seq = file->private_data;
- seq->private = adap->l2t->l2tab;
+ seq->private = adap->l2t;
}
return rc;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index a30126ce90cb..b38dc526aad5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -39,6 +39,20 @@
#include <linux/if_ether.h>
#include <linux/atomic.h>
+enum { L2T_SIZE = 4096 }; /* # of L2T entries */
+
+enum {
+ L2T_STATE_VALID, /* entry is up to date */
+ L2T_STATE_STALE, /* entry may be used but needs revalidation */
+ L2T_STATE_RESOLVING, /* entry needs address resolution */
+ L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
+ L2T_STATE_NOARP, /* Netdev down or removed*/
+
+ /* when state is one of the below the entry is not hashed */
+ L2T_STATE_SWITCHING, /* entry is being used by a switching filter */
+ L2T_STATE_UNUSED /* entry not in use */
+};
+
struct adapter;
struct l2t_data;
struct neighbour;
@@ -56,7 +70,7 @@ struct cpl_l2t_write_rpl;
*/
struct l2t_entry {
u16 state; /* entry state */
- u16 idx; /* entry index */
+ u16 idx; /* entry index within in-memory table */
u32 addr[4]; /* next hop IP or IPv6 address */
int ifindex; /* neighbor's net_device's ifindex */
struct neighbour *neigh; /* associated neighbour */
@@ -104,7 +118,7 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
u8 port, u8 *eth_addr);
-struct l2t_data *t4_init_l2t(void);
+struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end);
void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
extern const struct file_operations t4_l2t_fops;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 942db078f33a..9162746d7729 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -807,7 +807,7 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
* message or, if we're doing a Large Send Offload, an LSO CPL message
* with an embedded TX Packet Write CPL message.
*/
- flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
if (skb_shinfo(skb)->gso_size)
flits += (sizeof(struct fw_eth_tx_pkt_wr) +
sizeof(struct cpl_tx_pkt_lso_core) +
@@ -1137,7 +1137,7 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
*/
netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
- u32 wr_mid;
+ u32 wr_mid, ctrl0;
u64 cntrl, *end;
int qidx, credits;
unsigned int flits, ndesc;
@@ -1274,9 +1274,15 @@ out_free: dev_kfree_skb_any(skb);
#endif /* CONFIG_CHELSIO_T4_FCOE */
}
- cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
- TXPKT_INTF_V(pi->tx_chan) |
- TXPKT_PF_V(adap->pf));
+ ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
+ TXPKT_PF_V(adap->pf);
+#ifdef CONFIG_CHELSIO_T4_DCB
+ if (is_t4(adap->params.chip))
+ ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
+ else
+ ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
+#endif
+ cpl->ctrl0 = htonl(ctrl0);
cpl->pack = htons(0);
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1418,18 +1424,17 @@ static void restart_ctrlq(unsigned long data)
struct fw_wr_hdr *wr;
unsigned int ndesc = skb->priority; /* previously saved */
- /*
- * Write descriptors and free skbs outside the lock to limit
+ written += ndesc;
+ /* Write descriptors and free skbs outside the lock to limit
* wait times. q->full is still set so new skbs will be queued.
*/
+ wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+ txq_advance(&q->q, ndesc);
spin_unlock(&q->sendq.lock);
- wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
inline_tx_skb(skb, &q->q, wr);
kfree_skb(skb);
- written += ndesc;
- txq_advance(&q->q, ndesc);
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
unsigned long old = q->q.stops;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 2b52aae7ec86..44806253c178 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -37,6 +37,7 @@
#include "t4_regs.h"
#include "t4_values.h"
#include "t4fw_api.h"
+#include "t4fw_version.h"
/**
* t4_wait_op_done_val - wait until an operation is completed
@@ -345,6 +346,43 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
FW_CMD_MAX_TIMEOUT);
}
+static int t4_edc_err_read(struct adapter *adap, int idx)
+{
+ u32 edc_ecc_err_addr_reg;
+ u32 rdata_reg;
+
+ if (is_t4(adap->params.chip)) {
+ CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
+ return 0;
+ }
+ if (idx != 0 && idx != 1) {
+ CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
+ return 0;
+ }
+
+ edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
+ rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
+
+ CH_WARN(adap,
+ "edc%d err addr 0x%x: 0x%x.\n",
+ idx, edc_ecc_err_addr_reg,
+ t4_read_reg(adap, edc_ecc_err_addr_reg));
+ CH_WARN(adap,
+ "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
+ rdata_reg,
+ (unsigned long long)t4_read_reg64(adap, rdata_reg),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
+
+ return 0;
+}
+
/**
* t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
* @adap: the adapter
@@ -1322,9 +1360,10 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
};
static const unsigned int t6_reg_ranges[] = {
- 0x1008, 0x114c,
+ 0x1008, 0x1124,
+ 0x1138, 0x114c,
0x1180, 0x11b4,
- 0x11fc, 0x1250,
+ 0x11fc, 0x1254,
0x1280, 0x133c,
0x1800, 0x18fc,
0x3000, 0x302c,
@@ -1345,18 +1384,18 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x5a80, 0x5a9c,
0x5b94, 0x5bfc,
0x5c10, 0x5ec0,
- 0x5ec8, 0x5ec8,
+ 0x5ec8, 0x5ecc,
0x6000, 0x6040,
- 0x6058, 0x6154,
+ 0x6058, 0x619c,
0x7700, 0x7798,
0x77c0, 0x7880,
0x78cc, 0x78fc,
0x7b00, 0x7c54,
0x7d00, 0x7efc,
- 0x8dc0, 0x8de0,
+ 0x8dc0, 0x8de4,
0x8df8, 0x8e84,
0x8ea0, 0x8f88,
- 0x8fb8, 0x911c,
+ 0x8fb8, 0x9124,
0x9400, 0x9470,
0x9600, 0x971c,
0x9800, 0x9808,
@@ -1371,20 +1410,21 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x9f00, 0x9f6c,
0x9f80, 0xa020,
0xd004, 0xd03c,
+ 0xd100, 0xd118,
+ 0xd200, 0xd31c,
0xdfc0, 0xdfe0,
0xe000, 0xf008,
0x11000, 0x11014,
- 0x11048, 0x11110,
- 0x11118, 0x1117c,
- 0x11190, 0x11260,
+ 0x11048, 0x1117c,
+ 0x11190, 0x11270,
0x11300, 0x1130c,
- 0x12000, 0x1205c,
+ 0x12000, 0x1206c,
0x19040, 0x1906c,
0x19078, 0x19080,
0x1908c, 0x19124,
0x19150, 0x191b0,
0x191d0, 0x191e8,
- 0x19238, 0x192b8,
+ 0x19238, 0x192bc,
0x193f8, 0x19474,
0x19490, 0x194cc,
0x194f0, 0x194f8,
@@ -1461,12 +1501,11 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x1ff00, 0x1ff84,
0x1ffc0, 0x1ffc8,
0x30000, 0x30070,
- 0x30100, 0x3015c,
- 0x30190, 0x301d0,
- 0x30200, 0x30318,
+ 0x30100, 0x301d0,
+ 0x30200, 0x30320,
0x30400, 0x3052c,
0x30540, 0x3061c,
- 0x30800, 0x3088c,
+ 0x30800, 0x30890,
0x308c0, 0x30908,
0x30910, 0x309b8,
0x30a00, 0x30a04,
@@ -1539,12 +1578,11 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x33c24, 0x33c50,
0x33cf0, 0x33cfc,
0x34000, 0x34070,
- 0x34100, 0x3415c,
- 0x34190, 0x341d0,
- 0x34200, 0x34318,
+ 0x34100, 0x341d0,
+ 0x34200, 0x34320,
0x34400, 0x3452c,
0x34540, 0x3461c,
- 0x34800, 0x3488c,
+ 0x34800, 0x34890,
0x348c0, 0x34908,
0x34910, 0x349b8,
0x34a00, 0x34a04,
@@ -2129,6 +2167,61 @@ int t4_get_exprom_version(struct adapter *adap, u32 *vers)
return 0;
}
+/**
+ * t4_check_fw_version - check if the FW is supported with this driver
+ * @adap: the adapter
+ *
+ * Checks if an adapter's FW is compatible with the driver. Returns 0
+ * if there's exact match, a negative error if the version could not be
+ * read or there's a major version mismatch
+ */
+int t4_check_fw_version(struct adapter *adap)
+{
+ int ret, major, minor, micro;
+ int exp_major, exp_minor, exp_micro;
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+ ret = t4_get_fw_version(adap, &adap->params.fw_vers);
+ if (ret)
+ return ret;
+
+ major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
+ minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
+ micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
+
+ switch (chip_version) {
+ case CHELSIO_T4:
+ exp_major = T4FW_MIN_VERSION_MAJOR;
+ exp_minor = T4FW_MIN_VERSION_MINOR;
+ exp_micro = T4FW_MIN_VERSION_MICRO;
+ break;
+ case CHELSIO_T5:
+ exp_major = T5FW_MIN_VERSION_MAJOR;
+ exp_minor = T5FW_MIN_VERSION_MINOR;
+ exp_micro = T5FW_MIN_VERSION_MICRO;
+ break;
+ case CHELSIO_T6:
+ exp_major = T6FW_MIN_VERSION_MAJOR;
+ exp_minor = T6FW_MIN_VERSION_MINOR;
+ exp_micro = T6FW_MIN_VERSION_MICRO;
+ break;
+ default:
+ dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
+ adap->chip);
+ return -EINVAL;
+ }
+
+ if (major < exp_major || (major == exp_major && minor < exp_minor) ||
+ (major == exp_major && minor == exp_minor && micro < exp_micro)) {
+ dev_err(adap->pdev_dev,
+ "Card has firmware version %u.%u.%u, minimum "
+ "supported firmware is %u.%u.%u.\n", major, minor,
+ micro, exp_major, exp_minor, exp_micro);
+ return -EFAULT;
+ }
+ return 0;
+}
+
/* Is the given firmware API compatible with the one the driver was compiled
* with?
*/
@@ -3281,6 +3374,8 @@ static void mem_intr_handler(struct adapter *adapter, int idx)
if (v & ECC_CE_INT_CAUSE_F) {
u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
+ t4_edc_err_read(adapter, idx);
+
t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
if (printk_ratelimit())
dev_warn(adapter->pdev_dev,
@@ -3488,7 +3583,9 @@ int t4_slow_intr_handler(struct adapter *adapter)
void t4_intr_enable(struct adapter *adapter)
{
u32 val = 0;
- u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
+ u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+ u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
@@ -3513,7 +3610,9 @@ void t4_intr_enable(struct adapter *adapter)
*/
void t4_intr_disable(struct adapter *adapter)
{
- u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
+ u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+ u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
@@ -3687,6 +3786,11 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
return 0;
}
+static unsigned int t4_use_ldst(struct adapter *adap)
+{
+ return (adap->flags & FW_OK) || !adap->use_bd;
+}
+
/**
* t4_fw_tp_pio_rw - Access TP PIO through LDST
* @adap: the adapter
@@ -3730,7 +3834,7 @@ static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
*/
void t4_read_rss_key(struct adapter *adap, u32 *key)
{
- if (adap->flags & FW_OK)
+ if (t4_use_ldst(adap))
t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
else
t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
@@ -3760,7 +3864,7 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
(vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
rss_key_addr_cnt = 32;
- if (adap->flags & FW_OK)
+ if (t4_use_ldst(adap))
t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
else
t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
@@ -3789,7 +3893,7 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
u32 *valp)
{
- if (adapter->flags & FW_OK)
+ if (t4_use_ldst(adapter))
t4_fw_tp_pio_rw(adapter, valp, 1,
TP_RSS_PF0_CONFIG_A + index, 1);
else
@@ -3829,7 +3933,7 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
/* Grab the VFL/VFH values ...
*/
- if (adapter->flags & FW_OK) {
+ if (t4_use_ldst(adapter)) {
t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
} else {
@@ -3850,7 +3954,7 @@ u32 t4_read_rss_pf_map(struct adapter *adapter)
{
u32 pfmap;
- if (adapter->flags & FW_OK)
+ if (t4_use_ldst(adapter))
t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
else
t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
@@ -3868,7 +3972,7 @@ u32 t4_read_rss_pf_mask(struct adapter *adapter)
{
u32 pfmask;
- if (adapter->flags & FW_OK)
+ if (t4_use_ldst(adapter))
t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
else
t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
@@ -3924,43 +4028,25 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
*/
void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
{
- /* T6 and later has 2 channels */
- if (adap->params.arch.nchan == NCHAN) {
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tnl_cong_drops, 8,
- TP_MIB_TNL_CNG_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tnl_tx_drops, 4,
- TP_MIB_TNL_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->ofld_vlan_drops, 4,
- TP_MIB_OFD_VLN_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tcp6_in_errs, 4,
- TP_MIB_TCP_V6IN_ERR_0_A);
- } else {
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tnl_cong_drops, 2,
- TP_MIB_TNL_CNG_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->ofld_chan_drops, 2,
- TP_MIB_OFD_CHN_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->ofld_vlan_drops, 2,
- TP_MIB_OFD_VLN_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A);
- }
+ int nchan = adap->params.arch.nchan;
+
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_cong_drops, nchan, TP_MIB_TNL_CNG_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->ofld_chan_drops, nchan, TP_MIB_OFD_CHN_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->ofld_vlan_drops, nchan, TP_MIB_OFD_VLN_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tcp6_in_errs, nchan, TP_MIB_TCP_V6IN_ERR_0_A);
+
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
&st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
}
@@ -3974,16 +4060,13 @@ void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
*/
void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
{
- /* T6 and later has 2 channels */
- if (adap->params.arch.nchan == NCHAN) {
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
- 8, TP_MIB_CPL_IN_REQ_0_A);
- } else {
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
- 2, TP_MIB_CPL_IN_REQ_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
- 2, TP_MIB_CPL_OUT_RSP_0_A);
- }
+ int nchan = adap->params.arch.nchan;
+
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
+ nchan, TP_MIB_CPL_IN_REQ_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
+ nchan, TP_MIB_CPL_OUT_RSP_0_A);
+
}
/**
@@ -4238,6 +4321,119 @@ void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
}
/**
+ * t4_set_trace_filter - configure one of the tracing filters
+ * @adap: the adapter
+ * @tp: the desired trace filter parameters
+ * @idx: which filter to configure
+ * @enable: whether to enable or disable the filter
+ *
+ * Configures one of the tracing filters available in HW. If @enable is
+ * %0 @tp is not examined and may be %NULL. The user is responsible to
+ * set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
+ */
+int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
+ int idx, int enable)
+{
+ int i, ofst = idx * 4;
+ u32 data_reg, mask_reg, cfg;
+ u32 multitrc = TRCMULTIFILTER_F;
+
+ if (!enable) {
+ t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
+ return 0;
+ }
+
+ cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
+ if (cfg & TRCMULTIFILTER_F) {
+ /* If multiple tracers are enabled, then maximum
+ * capture size is 2.5KB (FIFO size of a single channel)
+ * minus 2 flits for CPL_TRACE_PKT header.
+ */
+ if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
+ return -EINVAL;
+ } else {
+ /* If multiple tracers are disabled, to avoid deadlocks
+ * maximum packet capture size of 9600 bytes is recommended.
+ * Also in this mode, only trace0 can be enabled and running.
+ */
+ multitrc = 0;
+ if (tp->snap_len > 9600 || idx)
+ return -EINVAL;
+ }
+
+ if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
+ tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
+ tp->min_len > TFMINPKTSIZE_M)
+ return -EINVAL;
+
+ /* stop the tracer we'll be changing */
+ t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
+
+ idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
+ data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
+ mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
+
+ for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
+ t4_write_reg(adap, data_reg, tp->data[i]);
+ t4_write_reg(adap, mask_reg, ~tp->mask[i]);
+ }
+ t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
+ TFCAPTUREMAX_V(tp->snap_len) |
+ TFMINPKTSIZE_V(tp->min_len));
+ t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
+ TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
+ (is_t4(adap->params.chip) ?
+ TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
+ T5_TFPORT_V(tp->port) | T5_TFEN_F |
+ T5_TFINVERTMATCH_V(tp->invert)));
+
+ return 0;
+}
+
+/**
+ * t4_get_trace_filter - query one of the tracing filters
+ * @adap: the adapter
+ * @tp: the current trace filter parameters
+ * @idx: which trace filter to query
+ * @enabled: non-zero if the filter is enabled
+ *
+ * Returns the current settings of one of the HW tracing filters.
+ */
+void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
+ int *enabled)
+{
+ u32 ctla, ctlb;
+ int i, ofst = idx * 4;
+ u32 data_reg, mask_reg;
+
+ ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
+ ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
+
+ if (is_t4(adap->params.chip)) {
+ *enabled = !!(ctla & TFEN_F);
+ tp->port = TFPORT_G(ctla);
+ tp->invert = !!(ctla & TFINVERTMATCH_F);
+ } else {
+ *enabled = !!(ctla & T5_TFEN_F);
+ tp->port = T5_TFPORT_G(ctla);
+ tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
+ }
+ tp->snap_len = TFCAPTUREMAX_G(ctlb);
+ tp->min_len = TFMINPKTSIZE_G(ctlb);
+ tp->skip_ofst = TFOFFSET_G(ctla);
+ tp->skip_len = TFLENGTH_G(ctla);
+
+ ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
+ data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
+ mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
+
+ for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
+ tp->mask[i] = ~t4_read_reg(adap, mask_reg);
+ tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
+ }
+}
+
+/**
* t4_pmtx_get_stats - returns the HW stats from PMTX
* @adap: the adapter
* @cnt: where to store the count statistics
@@ -6294,7 +6490,7 @@ int t4_init_tp_params(struct adapter *adap)
/* Cache the adapter's Compressed Filter Mode and global Incress
* Configuration.
*/
- if (adap->flags & FW_OK) {
+ if (t4_use_ldst(adap)) {
t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
TP_VLAN_PRI_MAP_A, 1);
t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index c8488f430d19..640369df8b3a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -47,7 +47,6 @@ enum {
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
- L2T_SIZE = 4096, /* # of L2T entries */
PM_NSTATS = 5, /* # of PM stats */
MBOX_LEN = 64, /* mailbox size in bytes */
TRACE_LEN = 112, /* length of trace data and mask */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 132cb8fc0bf7..b99144afd4ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -660,6 +660,9 @@ struct cpl_tx_pkt {
#define TXPKT_OVLAN_IDX_S 12
#define TXPKT_OVLAN_IDX_V(x) ((x) << TXPKT_OVLAN_IDX_S)
+#define TXPKT_T5_OVLAN_IDX_S 12
+#define TXPKT_T5_OVLAN_IDX_V(x) ((x) << TXPKT_T5_OVLAN_IDX_S)
+
#define TXPKT_INTF_S 16
#define TXPKT_INTF_V(x) ((x) << TXPKT_INTF_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index d7ca106927b0..8353a6cbfcc2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -142,6 +142,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */
CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */
CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
@@ -155,6 +157,22 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */
CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */
CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */
+
+ /* T6 adapters:
+ */
+ CH_PCI_ID_TABLE_FENTRY(0x6001),
+ CH_PCI_ID_TABLE_FENTRY(0x6002),
+ CH_PCI_ID_TABLE_FENTRY(0x6003),
+ CH_PCI_ID_TABLE_FENTRY(0x6004),
+ CH_PCI_ID_TABLE_FENTRY(0x6005),
+ CH_PCI_ID_TABLE_FENTRY(0x6006),
+ CH_PCI_ID_TABLE_FENTRY(0x6007),
+ CH_PCI_ID_TABLE_FENTRY(0x6009),
+ CH_PCI_ID_TABLE_FENTRY(0x600d),
+ CH_PCI_ID_TABLE_FENTRY(0x6010),
+ CH_PCI_ID_TABLE_FENTRY(0x6011),
+ CH_PCI_ID_TABLE_FENTRY(0x6014),
+ CH_PCI_ID_TABLE_FENTRY(0x6015),
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 375a825573b0..fc3044c8ac1c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -136,6 +136,20 @@
#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \
& INGPACKBOUNDARY_M)
+#define VFIFO_ENABLE_S 10
+#define VFIFO_ENABLE_V(x) ((x) << VFIFO_ENABLE_S)
+#define VFIFO_ENABLE_F VFIFO_ENABLE_V(1U)
+
+#define SGE_DBVFIFO_BADDR_A 0x1138
+
+#define DBVFIFO_SIZE_S 6
+#define DBVFIFO_SIZE_M 0xfffU
+#define DBVFIFO_SIZE_G(x) (((x) >> DBVFIFO_SIZE_S) & DBVFIFO_SIZE_M)
+
+#define T6_DBVFIFO_SIZE_S 0
+#define T6_DBVFIFO_SIZE_M 0x1fffU
+#define T6_DBVFIFO_SIZE_G(x) (((x) >> T6_DBVFIFO_SIZE_S) & T6_DBVFIFO_SIZE_M)
+
#define GLOBALENABLE_S 0
#define GLOBALENABLE_V(x) ((x) << GLOBALENABLE_S)
#define GLOBALENABLE_F GLOBALENABLE_V(1U)
@@ -303,6 +317,8 @@
#define SGE_FL_BUFFER_SIZE7_A 0x1060
#define SGE_FL_BUFFER_SIZE8_A 0x1064
+#define SGE_IMSG_CTXT_BADDR_A 0x1088
+#define SGE_FLM_CACHE_BADDR_A 0x108c
#define SGE_INGRESS_RX_THRESHOLD_A 0x10a0
#define THRESHOLD_0_S 24
@@ -338,6 +354,11 @@
#define EGRTHRESHOLDPACKING_G(x) \
(((x) >> EGRTHRESHOLDPACKING_S) & EGRTHRESHOLDPACKING_M)
+#define T6_EGRTHRESHOLDPACKING_S 16
+#define T6_EGRTHRESHOLDPACKING_M 0xffU
+#define T6_EGRTHRESHOLDPACKING_G(x) \
+ (((x) >> T6_EGRTHRESHOLDPACKING_S) & T6_EGRTHRESHOLDPACKING_M)
+
#define SGE_TIMESTAMP_LO_A 0x1098
#define SGE_TIMESTAMP_HI_A 0x109c
@@ -352,6 +373,7 @@
#define TSVAL_G(x) (((x) >> TSVAL_S) & TSVAL_M)
#define SGE_DBFIFO_STATUS_A 0x10a4
+#define SGE_DBVFIFO_SIZE_A 0x113c
#define HP_INT_THRESH_S 28
#define HP_INT_THRESH_M 0xfU
@@ -864,6 +886,10 @@
/* registers for module MA */
#define MA_EDRAM0_BAR_A 0x77c0
+#define EDRAM0_BASE_S 16
+#define EDRAM0_BASE_M 0xfffU
+#define EDRAM0_BASE_G(x) (((x) >> EDRAM0_BASE_S) & EDRAM0_BASE_M)
+
#define EDRAM0_SIZE_S 0
#define EDRAM0_SIZE_M 0xfffU
#define EDRAM0_SIZE_V(x) ((x) << EDRAM0_SIZE_S)
@@ -871,6 +897,10 @@
#define MA_EDRAM1_BAR_A 0x77c4
+#define EDRAM1_BASE_S 16
+#define EDRAM1_BASE_M 0xfffU
+#define EDRAM1_BASE_G(x) (((x) >> EDRAM1_BASE_S) & EDRAM1_BASE_M)
+
#define EDRAM1_SIZE_S 0
#define EDRAM1_SIZE_M 0xfffU
#define EDRAM1_SIZE_V(x) ((x) << EDRAM1_SIZE_S)
@@ -878,6 +908,11 @@
#define MA_EXT_MEMORY_BAR_A 0x77c8
+#define EXT_MEM_BASE_S 16
+#define EXT_MEM_BASE_M 0xfffU
+#define EXT_MEM_BASE_V(x) ((x) << EXT_MEM_BASE_S)
+#define EXT_MEM_BASE_G(x) (((x) >> EXT_MEM_BASE_S) & EXT_MEM_BASE_M)
+
#define EXT_MEM_SIZE_S 0
#define EXT_MEM_SIZE_M 0xfffU
#define EXT_MEM_SIZE_V(x) ((x) << EXT_MEM_SIZE_S)
@@ -885,6 +920,10 @@
#define MA_EXT_MEMORY1_BAR_A 0x7808
+#define EXT_MEM1_BASE_S 16
+#define EXT_MEM1_BASE_M 0xfffU
+#define EXT_MEM1_BASE_G(x) (((x) >> EXT_MEM1_BASE_S) & EXT_MEM1_BASE_M)
+
#define EXT_MEM1_SIZE_S 0
#define EXT_MEM1_SIZE_M 0xfffU
#define EXT_MEM1_SIZE_V(x) ((x) << EXT_MEM1_SIZE_S)
@@ -892,6 +931,10 @@
#define MA_EXT_MEMORY0_BAR_A 0x77c8
+#define EXT_MEM0_BASE_S 16
+#define EXT_MEM0_BASE_M 0xfffU
+#define EXT_MEM0_BASE_G(x) (((x) >> EXT_MEM0_BASE_S) & EXT_MEM0_BASE_M)
+
#define EXT_MEM0_SIZE_S 0
#define EXT_MEM0_SIZE_M 0xfffU
#define EXT_MEM0_SIZE_V(x) ((x) << EXT_MEM0_SIZE_S)
@@ -973,6 +1016,10 @@
/* registers for module CIM */
#define CIM_BOOT_CFG_A 0x7b00
+#define CIM_SDRAM_BASE_ADDR_A 0x7b14
+#define CIM_SDRAM_ADDR_SIZE_A 0x7b18
+#define CIM_EXTMEM2_BASE_ADDR_A 0x7b1c
+#define CIM_EXTMEM2_ADDR_SIZE_A 0x7b20
#define CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A 0x290
#define BOOTADDR_M 0xffffff00U
@@ -1231,6 +1278,33 @@
#define TP_OUT_CONFIG_A 0x7d04
#define TP_GLOBAL_CONFIG_A 0x7d08
+#define TP_CMM_TCB_BASE_A 0x7d10
+#define TP_CMM_MM_BASE_A 0x7d14
+#define TP_CMM_TIMER_BASE_A 0x7d18
+#define TP_PMM_TX_BASE_A 0x7d20
+#define TP_PMM_RX_BASE_A 0x7d28
+#define TP_PMM_RX_PAGE_SIZE_A 0x7d2c
+#define TP_PMM_RX_MAX_PAGE_A 0x7d30
+#define TP_PMM_TX_PAGE_SIZE_A 0x7d34
+#define TP_PMM_TX_MAX_PAGE_A 0x7d38
+#define TP_CMM_MM_MAX_PSTRUCT_A 0x7e6c
+
+#define PMRXNUMCHN_S 31
+#define PMRXNUMCHN_V(x) ((x) << PMRXNUMCHN_S)
+#define PMRXNUMCHN_F PMRXNUMCHN_V(1U)
+
+#define PMTXNUMCHN_S 30
+#define PMTXNUMCHN_M 0x3U
+#define PMTXNUMCHN_G(x) (((x) >> PMTXNUMCHN_S) & PMTXNUMCHN_M)
+
+#define PMTXMAXPAGE_S 0
+#define PMTXMAXPAGE_M 0x1fffffU
+#define PMTXMAXPAGE_G(x) (((x) >> PMTXMAXPAGE_S) & PMTXMAXPAGE_M)
+
+#define PMRXMAXPAGE_S 0
+#define PMRXMAXPAGE_M 0x1fffffU
+#define PMRXMAXPAGE_G(x) (((x) >> PMRXMAXPAGE_S) & PMRXMAXPAGE_M)
+
#define DBGLAMODE_S 14
#define DBGLAMODE_M 0x3U
#define DBGLAMODE_G(x) (((x) >> DBGLAMODE_S) & DBGLAMODE_M)
@@ -1338,6 +1412,9 @@
#define MTUVALUE_G(x) (((x) >> MTUVALUE_S) & MTUVALUE_M)
#define TP_RSS_LKP_TABLE_A 0x7dec
+#define TP_CMM_MM_RX_FLST_BASE_A 0x7e60
+#define TP_CMM_MM_TX_FLST_BASE_A 0x7e64
+#define TP_CMM_MM_PS_FLST_BASE_A 0x7e68
#define LKPTBLROWVLD_S 31
#define LKPTBLROWVLD_V(x) ((x) << LKPTBLROWVLD_S)
@@ -1483,6 +1560,11 @@
#define TP_MIB_RQE_DFR_PKT_A 0x64
#define ULP_TX_INT_CAUSE_A 0x8dcc
+#define ULP_TX_TPT_LLIMIT_A 0x8dd4
+#define ULP_TX_TPT_ULIMIT_A 0x8dd8
+#define ULP_TX_PBL_LLIMIT_A 0x8ddc
+#define ULP_TX_PBL_ULIMIT_A 0x8de0
+#define ULP_TX_ERR_TABLE_BASE_A 0x8e04
#define PBL_BOUND_ERR_CH3_S 31
#define PBL_BOUND_ERR_CH3_V(x) ((x) << PBL_BOUND_ERR_CH3_S)
@@ -1804,6 +1886,9 @@
#define TRCMULTIFILTER_F TRCMULTIFILTER_V(1U)
#define MPS_TRC_RSS_CONTROL_A 0x9808
+#define MPS_TRC_FILTER1_RSS_CONTROL_A 0x9ff4
+#define MPS_TRC_FILTER2_RSS_CONTROL_A 0x9ffc
+#define MPS_TRC_FILTER3_RSS_CONTROL_A 0xa004
#define MPS_T5_TRC_RSS_CONTROL_A 0xa00c
#define RSSCONTROL_S 16
@@ -1812,6 +1897,59 @@
#define QUEUENUMBER_S 0
#define QUEUENUMBER_V(x) ((x) << QUEUENUMBER_S)
+#define TFINVERTMATCH_S 24
+#define TFINVERTMATCH_V(x) ((x) << TFINVERTMATCH_S)
+#define TFINVERTMATCH_F TFINVERTMATCH_V(1U)
+
+#define TFEN_S 22
+#define TFEN_V(x) ((x) << TFEN_S)
+#define TFEN_F TFEN_V(1U)
+
+#define TFPORT_S 18
+#define TFPORT_M 0xfU
+#define TFPORT_V(x) ((x) << TFPORT_S)
+#define TFPORT_G(x) (((x) >> TFPORT_S) & TFPORT_M)
+
+#define TFLENGTH_S 8
+#define TFLENGTH_M 0x1fU
+#define TFLENGTH_V(x) ((x) << TFLENGTH_S)
+#define TFLENGTH_G(x) (((x) >> TFLENGTH_S) & TFLENGTH_M)
+
+#define TFOFFSET_S 0
+#define TFOFFSET_M 0x1fU
+#define TFOFFSET_V(x) ((x) << TFOFFSET_S)
+#define TFOFFSET_G(x) (((x) >> TFOFFSET_S) & TFOFFSET_M)
+
+#define T5_TFINVERTMATCH_S 25
+#define T5_TFINVERTMATCH_V(x) ((x) << T5_TFINVERTMATCH_S)
+#define T5_TFINVERTMATCH_F T5_TFINVERTMATCH_V(1U)
+
+#define T5_TFEN_S 23
+#define T5_TFEN_V(x) ((x) << T5_TFEN_S)
+#define T5_TFEN_F T5_TFEN_V(1U)
+
+#define T5_TFPORT_S 18
+#define T5_TFPORT_M 0x1fU
+#define T5_TFPORT_V(x) ((x) << T5_TFPORT_S)
+#define T5_TFPORT_G(x) (((x) >> T5_TFPORT_S) & T5_TFPORT_M)
+
+#define MPS_TRC_FILTER_MATCH_CTL_A_A 0x9810
+#define MPS_TRC_FILTER_MATCH_CTL_B_A 0x9820
+
+#define TFMINPKTSIZE_S 16
+#define TFMINPKTSIZE_M 0x1ffU
+#define TFMINPKTSIZE_V(x) ((x) << TFMINPKTSIZE_S)
+#define TFMINPKTSIZE_G(x) (((x) >> TFMINPKTSIZE_S) & TFMINPKTSIZE_M)
+
+#define TFCAPTUREMAX_S 0
+#define TFCAPTUREMAX_M 0x3fffU
+#define TFCAPTUREMAX_V(x) ((x) << TFCAPTUREMAX_S)
+#define TFCAPTUREMAX_G(x) (((x) >> TFCAPTUREMAX_S) & TFCAPTUREMAX_M)
+
+#define MPS_TRC_FILTER0_MATCH_A 0x9c00
+#define MPS_TRC_FILTER0_DONT_CARE_A 0x9c80
+#define MPS_TRC_FILTER1_MATCH_A 0x9d00
+
#define TP_RSS_CONFIG_A 0x7df0
#define TNL4TUPENIPV6_S 31
@@ -2247,12 +2385,32 @@
#define MATCHSRAM_V(x) ((x) << MATCHSRAM_S)
#define MATCHSRAM_F MATCHSRAM_V(1U)
+#define MPS_RX_PG_RSV0_A 0x11010
+#define MPS_RX_PG_RSV4_A 0x11020
#define MPS_RX_PERR_INT_CAUSE_A 0x11074
+#define MPS_RX_MAC_BG_PG_CNT0_A 0x11208
+#define MPS_RX_LPBK_BG_PG_CNT0_A 0x11218
#define MPS_CLS_TCAM_Y_L_A 0xf000
#define MPS_CLS_TCAM_DATA0_A 0xf000
#define MPS_CLS_TCAM_DATA1_A 0xf004
+#define USED_S 16
+#define USED_M 0x7ffU
+#define USED_G(x) (((x) >> USED_S) & USED_M)
+
+#define ALLOC_S 0
+#define ALLOC_M 0x7ffU
+#define ALLOC_G(x) (((x) >> ALLOC_S) & ALLOC_M)
+
+#define T5_USED_S 16
+#define T5_USED_M 0xfffU
+#define T5_USED_G(x) (((x) >> T5_USED_S) & T5_USED_M)
+
+#define T5_ALLOC_S 0
+#define T5_ALLOC_M 0xfffU
+#define T5_ALLOC_G(x) (((x) >> T5_ALLOC_S) & T5_ALLOC_M)
+
#define DMACH_S 0
#define DMACH_M 0xffffU
#define DMACH_G(x) (((x) >> DMACH_S) & DMACH_M)
@@ -2410,8 +2568,21 @@
#define SLVFIFOPARINT_F SLVFIFOPARINT_V(1U)
#define ULP_RX_INT_CAUSE_A 0x19158
+#define ULP_RX_ISCSI_LLIMIT_A 0x1915c
+#define ULP_RX_ISCSI_ULIMIT_A 0x19160
#define ULP_RX_ISCSI_TAGMASK_A 0x19164
#define ULP_RX_ISCSI_PSZ_A 0x19168
+#define ULP_RX_TDDP_LLIMIT_A 0x1916c
+#define ULP_RX_TDDP_ULIMIT_A 0x19170
+#define ULP_RX_STAG_LLIMIT_A 0x1917c
+#define ULP_RX_STAG_ULIMIT_A 0x19180
+#define ULP_RX_RQ_LLIMIT_A 0x19184
+#define ULP_RX_RQ_ULIMIT_A 0x19188
+#define ULP_RX_PBL_LLIMIT_A 0x1918c
+#define ULP_RX_PBL_ULIMIT_A 0x19190
+#define ULP_RX_CTX_BASE_A 0x19194
+#define ULP_RX_RQUDP_LLIMIT_A 0x191a4
+#define ULP_RX_RQUDP_ULIMIT_A 0x191a8
#define ULP_RX_LA_CTL_A 0x1923c
#define ULP_RX_LA_RDPTR_A 0x19240
#define ULP_RX_LA_RDDATA_A 0x19244
@@ -2473,6 +2644,10 @@
#define SOURCEPF_M 0x7U
#define SOURCEPF_G(x) (((x) >> SOURCEPF_S) & SOURCEPF_M)
+#define T6_SOURCEPF_S 9
+#define T6_SOURCEPF_M 0x7U
+#define T6_SOURCEPF_G(x) (((x) >> T6_SOURCEPF_S) & T6_SOURCEPF_M)
+
#define PL_INT_CAUSE_A 0x1940c
#define ULP_TX_S 27
@@ -2612,7 +2787,20 @@
#define T6_LIPMISS_V(x) ((x) << T6_LIPMISS_S)
#define T6_LIPMISS_F T6_LIPMISS_V(1U)
+#define LE_DB_CONFIG_A 0x19c04
+#define LE_DB_SERVER_INDEX_A 0x19c18
+#define LE_DB_SRVR_START_INDEX_A 0x19c18
+#define LE_DB_ACT_CNT_IPV4_A 0x19c20
+#define LE_DB_ACT_CNT_IPV6_A 0x19c24
+#define LE_DB_HASH_TID_BASE_A 0x19c30
+#define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30
#define LE_DB_INT_CAUSE_A 0x19c3c
+#define LE_DB_TID_HASHBASE_A 0x19df8
+#define T6_LE_DB_HASH_TID_BASE_A 0x19df8
+
+#define HASHEN_S 20
+#define HASHEN_V(x) ((x) << HASHEN_S)
+#define HASHEN_F HASHEN_V(1U)
#define REQQPARERR_S 16
#define REQQPARERR_V(x) ((x) << REQQPARERR_S)
@@ -2634,6 +2822,10 @@
#define LIP0_V(x) ((x) << LIP0_S)
#define LIP0_F LIP0_V(1U)
+#define BASEADDR_S 3
+#define BASEADDR_M 0x1fffffffU
+#define BASEADDR_G(x) (((x) >> BASEADDR_S) & BASEADDR_M)
+
#define TCAMINTPERR_S 13
#define TCAMINTPERR_V(x) ((x) << TCAMINTPERR_S)
#define TCAMINTPERR_F TCAMINTPERR_V(1U)
@@ -2740,10 +2932,11 @@
#define EDC_H_BIST_DATA_PATTERN_A 0x50010
#define EDC_H_BIST_STATUS_RDATA_A 0x50028
+#define EDC_H_ECC_ERR_ADDR_A 0x50084
#define EDC_T51_BASE_ADDR 0x50800
-#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
-#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
+#define EDC_T5_STRIDE (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
+#define EDC_T5_REG(reg, idx) (reg + EDC_T5_STRIDE * idx)
#define PL_VF_REV_A 0x4
#define PL_VF_WHOAMI_A 0x0
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index ab4674684acc..a32de30ea663 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -762,8 +762,6 @@ enum fw_ldst_func_mod_index {
struct fw_ldst_cmd {
__be32 op_to_addrspace;
-#define FW_LDST_CMD_ADDRSPACE_S 0
-#define FW_LDST_CMD_ADDRSPACE_V(x) ((x) << FW_LDST_CMD_ADDRSPACE_S)
__be32 cycles_to_len16;
union fw_ldst {
struct fw_ldst_addrval {
@@ -788,6 +786,13 @@ struct fw_ldst_cmd {
__be16 vctl;
__be16 rval;
} mdio;
+ struct fw_ldst_cim_rq {
+ u8 req_first64[8];
+ u8 req_second64[8];
+ u8 resp_first64[8];
+ u8 resp_second64[8];
+ __be32 r3[2];
+ } cim_rq;
union fw_ldst_mps {
struct fw_ldst_mps_rplc {
__be16 fid_idx;
@@ -828,9 +833,33 @@ struct fw_ldst_cmd {
__be16 nset_pkd;
__be32 data[12];
} pcie;
+ struct fw_ldst_i2c_deprecated {
+ u8 pid_pkd;
+ u8 base;
+ u8 boffset;
+ u8 data;
+ __be32 r9;
+ } i2c_deprecated;
+ struct fw_ldst_i2c {
+ u8 pid;
+ u8 did;
+ u8 boffset;
+ u8 blen;
+ __be32 r9;
+ __u8 data[48];
+ } i2c;
+ struct fw_ldst_le {
+ __be32 index;
+ __be32 r9;
+ u8 val[33];
+ u8 r11[7];
+ } le;
} u;
};
+#define FW_LDST_CMD_ADDRSPACE_S 0
+#define FW_LDST_CMD_ADDRSPACE_V(x) ((x) << FW_LDST_CMD_ADDRSPACE_S)
+
#define FW_LDST_CMD_MSG_S 31
#define FW_LDST_CMD_MSG_V(x) ((x) << FW_LDST_CMD_MSG_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 32b213559b02..c4b262ca7d43 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,18 +36,29 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0D
-#define T4FW_VERSION_MICRO 0x20
+#define T4FW_VERSION_MINOR 0x0E
+#define T4FW_VERSION_MICRO 0x04
#define T4FW_VERSION_BUILD 0x00
+#define T4FW_MIN_VERSION_MAJOR 0x01
+#define T4FW_MIN_VERSION_MINOR 0x04
+#define T4FW_MIN_VERSION_MICRO 0x00
+
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0D
-#define T5FW_VERSION_MICRO 0x20
+#define T5FW_VERSION_MINOR 0x0E
+#define T5FW_VERSION_MICRO 0x04
#define T5FW_VERSION_BUILD 0x00
+#define T5FW_MIN_VERSION_MAJOR 0x00
+#define T5FW_MIN_VERSION_MINOR 0x00
+#define T5FW_MIN_VERSION_MICRO 0x00
+
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x0D
-#define T6FW_VERSION_MICRO 0x2D
+#define T6FW_VERSION_MINOR 0x0E
+#define T6FW_VERSION_MICRO 0x04
#define T6FW_VERSION_BUILD 0x00
+#define T6FW_MIN_VERSION_MAJOR 0x00
+#define T6FW_MIN_VERSION_MINOR 0x00
+#define T6FW_MIN_VERSION_MICRO 0x00
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index ad53e5ad2acd..fa3786a9d30e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1898,7 +1898,10 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
rspq->unhandled_irqs++;
val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
- if (is_t4(rspq->adapter->params.chip)) {
+ /* If we don't have access to the new User GTS (T5+), use the old
+ * doorbell mechanism; otherwise use the new BAR2 mechanism.
+ */
+ if (unlikely(!rspq->bar2_addr)) {
t4_write_reg(rspq->adapter,
T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
val | INGRESSQID_V((u32)rspq->cntxt_id));
@@ -1998,10 +2001,13 @@ static unsigned int process_intrq(struct adapter *adapter)
}
val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
- if (is_t4(adapter->params.chip))
+ /* If we don't have access to the new User GTS (T5+), use the old
+ * doorbell mechanism; otherwise use the new BAR2 mechanism.
+ */
+ if (unlikely(!intrq->bar2_addr)) {
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
val | INGRESSQID_V(intrq->cntxt_id));
- else {
+ } else {
writel(val | INGRESSQID_V(intrq->bar2_qid),
intrq->bar2_addr + SGE_UDB_GTS);
wmb();
@@ -2662,8 +2668,22 @@ int t4vf_sge_init(struct adapter *adapter)
* give it more Free List entries. (Note that the SGE's Egress
* Congestion Threshold is in units of 2 Free List pointers.)
*/
- s->fl_starve_thres
- = EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1;
+ switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
+ case CHELSIO_T4:
+ s->fl_starve_thres =
+ EGRTHRESHOLD_G(sge_params->sge_congestion_control);
+ break;
+ case CHELSIO_T5:
+ s->fl_starve_thres =
+ EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
+ break;
+ case CHELSIO_T6:
+ default:
+ s->fl_starve_thres =
+ T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
+ break;
+ }
+ s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
/*
* Set up tasklet timers.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 0db6dc9e9ed2..63dd5fdac5b9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -619,7 +619,8 @@ int t4vf_get_sge_params(struct adapter *adapter)
*/
whoami = t4_read_reg(adapter,
T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
- pf = SOURCEPF_G(whoami);
+ pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
s_hps = (HOSTPAGESIZEPF0_S +
(HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 84b6a2b46aec..8b53f7d4bebf 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,7 +33,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.1.1.83"
+#define DRV_VERSION "2.3.0.12"
#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
@@ -191,6 +191,25 @@ struct enic {
struct vnic_gen_stats gen_stats;
};
+static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev)
+{
+ struct enic *enic = vdev->priv;
+
+ return enic->netdev;
+}
+
+/* wrappers function for kernel log
+ * Make sure variable vdev of struct vnic_dev is available in the block where
+ * these macros are used
+ */
+#define vdev_info(args...) dev_info(&vdev->pdev->dev, args)
+#define vdev_warn(args...) dev_warn(&vdev->pdev->dev, args)
+#define vdev_err(args...) dev_err(&vdev->pdev->dev, args)
+
+#define vdev_netinfo(args...) netdev_info(vnic_get_netdev(vdev), args)
+#define vdev_netwarn(args...) netdev_warn(vnic_get_netdev(vdev), args)
+#define vdev_neterr(args...) netdev_err(vnic_get_netdev(vdev), args)
+
static inline struct device *enic_get_dev(struct enic *enic)
{
return &(enic->pdev->dev);
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index d106186f4f4a..3c677ed3c29e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -177,7 +177,7 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
int res, i;
enic = netdev_priv(dev);
- res = skb_flow_dissect_flow_keys(skb, &keys);
+ res = skb_flow_dissect_flow_keys(skb, &keys, 0);
if (!res || keys.basic.n_proto != htons(ETH_P_IP) ||
(keys.basic.ip_proto != IPPROTO_TCP &&
keys.basic.ip_proto != IPPROTO_UDP))
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index f3f1601a76f3..f44a39c40642 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -224,7 +224,8 @@ static int enic_get_coalesce(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
- ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
+ if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
+ ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
if (rxcoal->use_adaptive_rx_coalesce)
ecmd->use_adaptive_rx_coalesce = 1;
@@ -234,6 +235,53 @@ static int enic_get_coalesce(struct net_device *netdev,
return 0;
}
+static int enic_coalesce_valid(struct enic *enic,
+ struct ethtool_coalesce *ec)
+{
+ u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
+ u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max,
+ ec->rx_coalesce_usecs_high);
+ u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
+ ec->rx_coalesce_usecs_low);
+
+ if (ec->rx_max_coalesced_frames ||
+ ec->rx_coalesce_usecs_irq ||
+ ec->rx_max_coalesced_frames_irq ||
+ ec->tx_max_coalesced_frames ||
+ ec->tx_coalesce_usecs_irq ||
+ ec->tx_max_coalesced_frames_irq ||
+ ec->stats_block_coalesce_usecs ||
+ ec->use_adaptive_tx_coalesce ||
+ ec->pkt_rate_low ||
+ ec->rx_max_coalesced_frames_low ||
+ ec->tx_coalesce_usecs_low ||
+ ec->tx_max_coalesced_frames_low ||
+ ec->pkt_rate_high ||
+ ec->rx_max_coalesced_frames_high ||
+ ec->tx_coalesce_usecs_high ||
+ ec->tx_max_coalesced_frames_high ||
+ ec->rate_sample_interval)
+ return -EINVAL;
+
+ if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
+ ec->tx_coalesce_usecs)
+ return -EINVAL;
+
+ if ((ec->tx_coalesce_usecs > coalesce_usecs_max) ||
+ (ec->rx_coalesce_usecs > coalesce_usecs_max) ||
+ (ec->rx_coalesce_usecs_low > coalesce_usecs_max) ||
+ (ec->rx_coalesce_usecs_high > coalesce_usecs_max))
+ netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n",
+ coalesce_usecs_max);
+
+ if (ec->rx_coalesce_usecs_high &&
+ (rx_coalesce_usecs_high <
+ rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
+ return -EINVAL;
+
+ return 0;
+}
+
static int enic_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ecmd)
{
@@ -244,8 +292,12 @@ static int enic_set_coalesce(struct net_device *netdev,
u32 rx_coalesce_usecs_high;
u32 coalesce_usecs_max;
unsigned int i, intr;
+ int ret;
struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
+ ret = enic_coalesce_valid(enic, ecmd);
+ if (ret)
+ return ret;
coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
coalesce_usecs_max);
@@ -257,59 +309,24 @@ static int enic_set_coalesce(struct net_device *netdev,
rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
coalesce_usecs_max);
- switch (vnic_dev_get_intr_mode(enic->vdev)) {
- case VNIC_DEV_INTR_MODE_INTX:
- if (tx_coalesce_usecs != rx_coalesce_usecs)
- return -EINVAL;
- if (ecmd->use_adaptive_rx_coalesce ||
- ecmd->rx_coalesce_usecs_low ||
- ecmd->rx_coalesce_usecs_high)
- return -EINVAL;
-
- intr = enic_legacy_io_intr();
- vnic_intr_coalescing_timer_set(&enic->intr[intr],
- tx_coalesce_usecs);
- break;
- case VNIC_DEV_INTR_MODE_MSI:
- if (tx_coalesce_usecs != rx_coalesce_usecs)
- return -EINVAL;
- if (ecmd->use_adaptive_rx_coalesce ||
- ecmd->rx_coalesce_usecs_low ||
- ecmd->rx_coalesce_usecs_high)
- return -EINVAL;
-
- vnic_intr_coalescing_timer_set(&enic->intr[0],
- tx_coalesce_usecs);
- break;
- case VNIC_DEV_INTR_MODE_MSIX:
- if (ecmd->rx_coalesce_usecs_high &&
- (rx_coalesce_usecs_high <
- rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
- return -EINVAL;
-
+ if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
for (i = 0; i < enic->wq_count; i++) {
intr = enic_msix_wq_intr(enic, i);
vnic_intr_coalescing_timer_set(&enic->intr[intr],
- tx_coalesce_usecs);
- }
-
- rxcoal->use_adaptive_rx_coalesce =
- !!ecmd->use_adaptive_rx_coalesce;
- if (!rxcoal->use_adaptive_rx_coalesce)
- enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
-
- if (ecmd->rx_coalesce_usecs_high) {
- rxcoal->range_end = rx_coalesce_usecs_high;
- rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
- rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
- ENIC_AIC_LARGE_PKT_DIFF;
+ tx_coalesce_usecs);
}
- break;
- default:
- break;
+ enic->tx_coalesce_usecs = tx_coalesce_usecs;
+ }
+ rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce;
+ if (!rxcoal->use_adaptive_rx_coalesce)
+ enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
+ if (ecmd->rx_coalesce_usecs_high) {
+ rxcoal->range_end = rx_coalesce_usecs_high;
+ rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
+ rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
+ ENIC_AIC_LARGE_PKT_DIFF;
}
- enic->tx_coalesce_usecs = tx_coalesce_usecs;
enic->rx_coalesce_usecs = rx_coalesce_usecs;
return 0;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index da2004e2a741..3352d027ab89 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1149,6 +1149,64 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
return 0;
}
+static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+ unsigned int intr = enic_msix_rq_intr(enic, rq->index);
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ u32 timer = cq->tobe_rx_coal_timeval;
+
+ if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
+ vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
+ cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
+ }
+}
+
+static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+ struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
+ int index;
+ u32 timer;
+ u32 range_start;
+ u32 traffic;
+ u64 delta;
+ ktime_t now = ktime_get();
+
+ delta = ktime_us_delta(now, cq->prev_ts);
+ if (delta < ENIC_AIC_TS_BREAK)
+ return;
+ cq->prev_ts = now;
+
+ traffic = pkt_size_counter->large_pkt_bytes_cnt +
+ pkt_size_counter->small_pkt_bytes_cnt;
+ /* The table takes Mbps
+ * traffic *= 8 => bits
+ * traffic *= (10^6 / delta) => bps
+ * traffic /= 10^6 => Mbps
+ *
+ * Combining, traffic *= (8 / delta)
+ */
+
+ traffic <<= 3;
+ traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
+
+ for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
+ if (traffic < mod_table[index].rx_rate)
+ break;
+ range_start = (pkt_size_counter->small_pkt_bytes_cnt >
+ pkt_size_counter->large_pkt_bytes_cnt << 1) ?
+ rx_coal->small_pkt_range_start :
+ rx_coal->large_pkt_range_start;
+ timer = range_start + ((rx_coal->range_end - range_start) *
+ mod_table[index].range_percent / 100);
+ /* Damping */
+ cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
+
+ pkt_size_counter->large_pkt_bytes_cnt = 0;
+ pkt_size_counter->small_pkt_bytes_cnt = 0;
+}
+
static int enic_poll(struct napi_struct *napi, int budget)
{
struct net_device *netdev = napi->dev;
@@ -1170,7 +1228,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
wq_work_done,
0 /* dont unmask intr */,
0 /* dont reset intr timer */);
- return rq_work_done;
+ return budget;
}
if (budget > 0)
@@ -1191,6 +1249,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
0 /* don't reset intr timer */);
err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+ enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
/* Buffer allocation failed. Stay in polling
* mode so we can try to fill the ring again.
@@ -1198,6 +1257,11 @@ static int enic_poll(struct napi_struct *napi, int budget)
if (err)
rq_work_done = rq_work_to_do;
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ /* Call the function which refreshes the intr coalescing timer
+ * value based on the traffic.
+ */
+ enic_calc_int_moderation(enic, &enic->rq[0]);
if (rq_work_done < rq_work_to_do) {
@@ -1206,71 +1270,14 @@ static int enic_poll(struct napi_struct *napi, int budget)
*/
napi_complete(napi);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_set_int_moderation(enic, &enic->rq[0]);
vnic_intr_unmask(&enic->intr[intr]);
}
- enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
return rq_work_done;
}
-static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
-{
- unsigned int intr = enic_msix_rq_intr(enic, rq->index);
- struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
- u32 timer = cq->tobe_rx_coal_timeval;
-
- if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
- vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
- cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
- }
-}
-
-static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
-{
- struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
- struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
- struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
- int index;
- u32 timer;
- u32 range_start;
- u32 traffic;
- u64 delta;
- ktime_t now = ktime_get();
-
- delta = ktime_us_delta(now, cq->prev_ts);
- if (delta < ENIC_AIC_TS_BREAK)
- return;
- cq->prev_ts = now;
-
- traffic = pkt_size_counter->large_pkt_bytes_cnt +
- pkt_size_counter->small_pkt_bytes_cnt;
- /* The table takes Mbps
- * traffic *= 8 => bits
- * traffic *= (10^6 / delta) => bps
- * traffic /= 10^6 => Mbps
- *
- * Combining, traffic *= (8 / delta)
- */
-
- traffic <<= 3;
- traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
-
- for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
- if (traffic < mod_table[index].rx_rate)
- break;
- range_start = (pkt_size_counter->small_pkt_bytes_cnt >
- pkt_size_counter->large_pkt_bytes_cnt << 1) ?
- rx_coal->small_pkt_range_start :
- rx_coal->large_pkt_range_start;
- timer = range_start + ((rx_coal->range_end - range_start) *
- mod_table[index].range_percent / 100);
- /* Damping */
- cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
-
- pkt_size_counter->large_pkt_bytes_cnt = 0;
- pkt_size_counter->small_pkt_bytes_cnt = 0;
-}
-
#ifdef CONFIG_RFS_ACCEL
static void enic_free_rx_cpu_rmap(struct enic *enic)
{
@@ -1407,10 +1414,8 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
if (err)
work_done = work_to_do;
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- /* Call the function which refreshes
- * the intr coalescing timer value based on
- * the traffic. This is supported only in
- * the case of MSI-x mode
+ /* Call the function which refreshes the intr coalescing timer
+ * value based on the traffic.
*/
enic_calc_int_moderation(enic, &enic->rq[rq]);
@@ -1569,12 +1574,6 @@ static void enic_set_rx_coal_setting(struct enic *enic)
int index = -1;
struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
- /* If intr mode is not MSIX, do not do adaptive coalescing */
- if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) {
- netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing");
- return;
- }
-
/* 1. Read the link speed from fw
* 2. Pick the default range for the speed
* 3. Update it in enic->rx_coalesce_setting
@@ -2485,6 +2484,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_iounmap;
}
+ err = vnic_devcmd_init(enic->vdev);
+
+ if (err)
+ goto err_out_vnic_unregister;
+
#ifdef CONFIG_PCI_IOV
/* Get number of subvnics */
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
@@ -2659,8 +2663,8 @@ err_out_disable_sriov_pp:
pci_disable_sriov(pdev);
enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
}
-err_out_vnic_unregister:
#endif
+err_out_vnic_unregister:
vnic_dev_unregister(enic->vdev);
err_out_iounmap:
enic_iounmap(enic);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.c b/drivers/net/ethernet/cisco/enic/vnic_cq.c
index 0daa1c7073cb..abeda2a9ea27 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.c
@@ -24,6 +24,7 @@
#include "vnic_dev.h"
#include "vnic_cq.h"
+#include "enic.h"
void vnic_cq_free(struct vnic_cq *cq)
{
@@ -42,7 +43,7 @@ int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
if (!cq->ctrl) {
- pr_err("Failed to hook CQ[%d] resource\n", index);
+ vdev_err("Failed to hook CQ[%d] resource\n", index);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 62f7b7baf93c..a3badefaf360 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -27,46 +27,9 @@
#include "vnic_resource.h"
#include "vnic_devcmd.h"
#include "vnic_dev.h"
+#include "vnic_wq.h"
#include "vnic_stats.h"
-
-enum vnic_proxy_type {
- PROXY_NONE,
- PROXY_BY_BDF,
- PROXY_BY_INDEX,
-};
-
-struct vnic_res {
- void __iomem *vaddr;
- dma_addr_t bus_addr;
- unsigned int count;
-};
-
-struct vnic_intr_coal_timer_info {
- u32 mul;
- u32 div;
- u32 max_usec;
-};
-
-struct vnic_dev {
- void *priv;
- struct pci_dev *pdev;
- struct vnic_res res[RES_TYPE_MAX];
- enum vnic_dev_intr_mode intr_mode;
- struct vnic_devcmd __iomem *devcmd;
- struct vnic_devcmd_notify *notify;
- struct vnic_devcmd_notify notify_copy;
- dma_addr_t notify_pa;
- u32 notify_sz;
- dma_addr_t linkstatus_pa;
- struct vnic_stats *stats;
- dma_addr_t stats_pa;
- struct vnic_devcmd_fw_info *fw_info;
- dma_addr_t fw_info_pa;
- enum vnic_proxy_type proxy;
- u32 proxy_index;
- u64 args[VNIC_DEVCMD_NARGS];
- struct vnic_intr_coal_timer_info intr_coal_timer_info;
-};
+#include "enic.h"
#define VNIC_MAX_RES_HDR_SIZE \
(sizeof(struct vnic_resource_header) + \
@@ -90,14 +53,14 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
return -EINVAL;
if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
- pr_err("vNIC BAR0 res hdr length error\n");
+ vdev_err("vNIC BAR0 res hdr length error\n");
return -EINVAL;
}
rh = bar->vaddr;
mrh = bar->vaddr;
if (!rh) {
- pr_err("vNIC BAR0 res hdr not mem-mapped\n");
+ vdev_err("vNIC BAR0 res hdr not mem-mapped\n");
return -EINVAL;
}
@@ -106,11 +69,10 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
(ioread32(&rh->version) != VNIC_RES_VERSION)) {
if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
(ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
- pr_err("vNIC BAR0 res magic/version error "
- "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
- VNIC_RES_MAGIC, VNIC_RES_VERSION,
- MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
- ioread32(&rh->magic), ioread32(&rh->version));
+ vdev_err("vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
+ VNIC_RES_MAGIC, VNIC_RES_VERSION,
+ MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
+ ioread32(&rh->magic), ioread32(&rh->version));
return -EINVAL;
}
}
@@ -144,17 +106,15 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
/* each count is stride bytes long */
len = count * VNIC_RES_STRIDE;
if (len + bar_offset > bar[bar_num].len) {
- pr_err("vNIC BAR0 resource %d "
- "out-of-bounds, offset 0x%x + "
- "size 0x%x > bar len 0x%lx\n",
- type, bar_offset,
- len,
- bar[bar_num].len);
+ vdev_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
+ type, bar_offset, len,
+ bar[bar_num].len);
return -EINVAL;
}
break;
case RES_TYPE_INTR_PBA_LEGACY:
case RES_TYPE_DEVCMD:
+ case RES_TYPE_DEVCMD2:
len = count;
break;
default:
@@ -238,8 +198,8 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
&ring->base_addr_unaligned);
if (!ring->descs_unaligned) {
- pr_err("Failed to allocate ring (size=%d), aborting\n",
- (int)ring->size);
+ vdev_err("Failed to allocate ring (size=%d), aborting\n",
+ (int)ring->size);
return -ENOMEM;
}
@@ -281,7 +241,7 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
return -ENODEV;
}
if (status & STAT_BUSY) {
- pr_err("Busy devcmd %d\n", _CMD_N(cmd));
+ vdev_neterr("Busy devcmd %d\n", _CMD_N(cmd));
return -EBUSY;
}
@@ -315,8 +275,8 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
return -err;
if (err != ERR_ECMDUNKNOWN ||
cmd != CMD_CAPABILITY)
- pr_err("Error %d devcmd %d\n",
- err, _CMD_N(cmd));
+ vdev_neterr("Error %d devcmd %d\n",
+ err, _CMD_N(cmd));
return -err;
}
@@ -330,10 +290,162 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
}
}
- pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
+ vdev_neterr("Timedout devcmd %d\n", _CMD_N(cmd));
return -ETIMEDOUT;
}
+static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ int wait)
+{
+ struct devcmd2_controller *dc2c = vdev->devcmd2;
+ struct devcmd2_result *result = dc2c->result + dc2c->next_result;
+ unsigned int i;
+ int delay, err;
+ u32 fetch_index, new_posted;
+ u32 posted = dc2c->posted;
+
+ fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index);
+
+ if (fetch_index == 0xFFFFFFFF)
+ return -ENODEV;
+
+ new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
+
+ if (new_posted == fetch_index) {
+ vdev_neterr("devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n",
+ _CMD_N(cmd), fetch_index, posted);
+ return -EBUSY;
+ }
+ dc2c->cmd_ring[posted].cmd = cmd;
+ dc2c->cmd_ring[posted].flags = 0;
+
+ if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
+ dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
+ if (_CMD_DIR(cmd) & _CMD_DIR_WRITE)
+ for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+ dc2c->cmd_ring[posted].args[i] = vdev->args[i];
+
+ /* Adding write memory barrier prevents compiler and/or CPU reordering,
+ * thus avoiding descriptor posting before descriptor is initialized.
+ * Otherwise, hardware can read stale descriptor fields.
+ */
+ wmb();
+ iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
+ dc2c->posted = new_posted;
+
+ if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
+ return 0;
+
+ for (delay = 0; delay < wait; delay++) {
+ if (result->color == dc2c->color) {
+ dc2c->next_result++;
+ if (dc2c->next_result == dc2c->result_size) {
+ dc2c->next_result = 0;
+ dc2c->color = dc2c->color ? 0 : 1;
+ }
+ if (result->error) {
+ err = result->error;
+ if (err != ERR_ECMDUNKNOWN ||
+ cmd != CMD_CAPABILITY)
+ vdev_neterr("Error %d devcmd %d\n",
+ err, _CMD_N(cmd));
+ return -err;
+ }
+ if (_CMD_DIR(cmd) & _CMD_DIR_READ)
+ for (i = 0; i < VNIC_DEVCMD2_NARGS; i++)
+ vdev->args[i] = result->results[i];
+
+ return 0;
+ }
+ udelay(100);
+ }
+
+ vdev_neterr("devcmd %d timed out\n", _CMD_N(cmd));
+
+ return -ETIMEDOUT;
+}
+
+static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
+{
+ vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+ if (!vdev->devcmd)
+ return -ENODEV;
+ vdev->devcmd_rtn = _vnic_dev_cmd;
+
+ return 0;
+}
+
+static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
+{
+ int err;
+ unsigned int fetch_index;
+
+ if (vdev->devcmd2)
+ return 0;
+
+ vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_KERNEL);
+ if (!vdev->devcmd2)
+ return -ENOMEM;
+
+ vdev->devcmd2->color = 1;
+ vdev->devcmd2->result_size = DEVCMD2_RING_SIZE;
+ err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE,
+ DEVCMD2_DESC_SIZE);
+ if (err)
+ goto err_free_devcmd2;
+
+ fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
+ if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
+ vdev_err("Fatal error in devcmd2 init - hardware surprise removal");
+
+ return -ENODEV;
+ }
+
+ enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0,
+ 0);
+ vdev->devcmd2->posted = fetch_index;
+ vnic_wq_enable(&vdev->devcmd2->wq);
+
+ err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
+ DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
+ if (err)
+ goto err_free_wq;
+
+ vdev->devcmd2->result = vdev->devcmd2->results_ring.descs;
+ vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs;
+ vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl;
+ vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr |
+ VNIC_PADDR_TARGET;
+ vdev->args[1] = DEVCMD2_RING_SIZE;
+
+ err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000);
+ if (err)
+ goto err_free_desc_ring;
+
+ vdev->devcmd_rtn = _vnic_dev_cmd2;
+
+ return 0;
+
+err_free_desc_ring:
+ vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
+err_free_wq:
+ vnic_wq_disable(&vdev->devcmd2->wq);
+ vnic_wq_free(&vdev->devcmd2->wq);
+err_free_devcmd2:
+ kfree(vdev->devcmd2);
+ vdev->devcmd2 = NULL;
+
+ return err;
+}
+
+static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
+{
+ vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
+ vnic_wq_disable(&vdev->devcmd2->wq);
+ vnic_wq_free(&vdev->devcmd2->wq);
+ kfree(vdev->devcmd2);
+}
+
static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait)
@@ -348,7 +460,7 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
vdev->args[2] = *a0;
vdev->args[3] = *a1;
- err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
+ err = vdev->devcmd_rtn(vdev, proxy_cmd, wait);
if (err)
return err;
@@ -357,7 +469,8 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
err = (int)vdev->args[1];
if (err != ERR_ECMDUNKNOWN ||
cmd != CMD_CAPABILITY)
- pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
+ vdev_neterr("Error %d proxy devcmd %d\n", err,
+ _CMD_N(cmd));
return err;
}
@@ -375,7 +488,7 @@ static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
vdev->args[0] = *a0;
vdev->args[1] = *a1;
- err = _vnic_dev_cmd(vdev, cmd, wait);
+ err = vdev->devcmd_rtn(vdev, cmd, wait);
*a0 = vdev->args[0];
*a1 = vdev->args[1];
@@ -650,7 +763,7 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
if (err)
- pr_err("Can't set packet filter\n");
+ vdev_neterr("Can't set packet filter\n");
return err;
}
@@ -667,7 +780,7 @@ int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
if (err)
- pr_err("Can't add addr [%pM], %d\n", addr, err);
+ vdev_neterr("Can't add addr [%pM], %d\n", addr, err);
return err;
}
@@ -684,7 +797,7 @@ int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
if (err)
- pr_err("Can't del addr [%pM], %d\n", addr, err);
+ vdev_neterr("Can't del addr [%pM], %d\n", addr, err);
return err;
}
@@ -728,7 +841,7 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
dma_addr_t notify_pa;
if (vdev->notify || vdev->notify_pa) {
- pr_err("notify block %p still allocated", vdev->notify);
+ vdev_neterr("notify block %p still allocated", vdev->notify);
return -EINVAL;
}
@@ -838,7 +951,7 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
memset(vdev->args, 0, sizeof(vdev->args));
if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT))
- err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait);
+ err = vdev->devcmd_rtn(vdev, CMD_INTR_COAL_CONVERT, wait);
else
err = ERR_ECMDUNKNOWN;
@@ -847,7 +960,7 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
*/
if ((err == ERR_ECMDUNKNOWN) ||
(!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
- pr_warn("Using default conversion factor for interrupt coalesce timer\n");
+ vdev_netwarn("Using default conversion factor for interrupt coalesce timer\n");
vnic_dev_intr_coal_timer_info_default(vdev);
return 0;
}
@@ -938,6 +1051,9 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
pci_free_consistent(vdev->pdev,
sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa);
+ if (vdev->devcmd2)
+ vnic_dev_deinit_devcmd2(vdev);
+
kfree(vdev);
}
}
@@ -959,10 +1075,6 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
if (vnic_dev_discover_res(vdev, bar, num_bars))
goto err_out;
- vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
- if (!vdev->devcmd)
- goto err_out;
-
return vdev;
err_out:
@@ -977,6 +1089,29 @@ struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev)
}
EXPORT_SYMBOL(vnic_dev_get_pdev);
+int vnic_devcmd_init(struct vnic_dev *vdev)
+{
+ void __iomem *res;
+ int err;
+
+ res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
+ if (res) {
+ err = vnic_dev_init_devcmd2(vdev);
+ if (err)
+ vdev_warn("DEVCMD2 init failed: %d, Using DEVCMD1",
+ err);
+ else
+ return 0;
+ } else {
+ vdev_warn("DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n");
+ }
+ err = vnic_dev_init_devcmd1(vdev);
+ if (err)
+ vdev_err("DEVCMD1 initialization failed: %d", err);
+
+ return err;
+}
+
int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
{
u64 a0, a1 = len;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index 1fb214efceba..b013b6a78e87 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -70,7 +70,48 @@ struct vnic_dev_ring {
unsigned int desc_avail;
};
-struct vnic_dev;
+enum vnic_proxy_type {
+ PROXY_NONE,
+ PROXY_BY_BDF,
+ PROXY_BY_INDEX,
+};
+
+struct vnic_res {
+ void __iomem *vaddr;
+ dma_addr_t bus_addr;
+ unsigned int count;
+};
+
+struct vnic_intr_coal_timer_info {
+ u32 mul;
+ u32 div;
+ u32 max_usec;
+};
+
+struct vnic_dev {
+ void *priv;
+ struct pci_dev *pdev;
+ struct vnic_res res[RES_TYPE_MAX];
+ enum vnic_dev_intr_mode intr_mode;
+ struct vnic_devcmd __iomem *devcmd;
+ struct vnic_devcmd_notify *notify;
+ struct vnic_devcmd_notify notify_copy;
+ dma_addr_t notify_pa;
+ u32 notify_sz;
+ dma_addr_t linkstatus_pa;
+ struct vnic_stats *stats;
+ dma_addr_t stats_pa;
+ struct vnic_devcmd_fw_info *fw_info;
+ dma_addr_t fw_info_pa;
+ enum vnic_proxy_type proxy;
+ u32 proxy_index;
+ u64 args[VNIC_DEVCMD_NARGS];
+ struct vnic_intr_coal_timer_info intr_coal_timer_info;
+ struct devcmd2_controller *devcmd2;
+ int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ int wait);
+};
+
struct vnic_stats;
void *vnic_dev_priv(struct vnic_dev *vdev);
@@ -135,5 +176,6 @@ int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
struct filter *data);
+int vnic_devcmd_init(struct vnic_dev *vdev);
#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index 435d0cd96c22..2a812880b884 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -365,6 +365,12 @@ enum vnic_devcmd_cmd {
*/
CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56),
+ /* Initialization for the devcmd2 interface.
+ * in: (u64) a0 = host result buffer physical address
+ * in: (u16) a1 = number of entries in result buffer
+ */
+ CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57),
+
/* Add a filter.
* in: (u64) a0= filter address
* (u32) a1= size of filter
@@ -629,4 +635,26 @@ struct vnic_devcmd {
u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
};
+#define DEVCMD2_FNORESULT 0x1 /* Don't copy result to host */
+
+#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS
+struct vnic_devcmd2 {
+ u16 pad;
+ u16 flags;
+ u32 cmd;
+ u64 args[VNIC_DEVCMD2_NARGS];
+};
+
+#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS
+struct devcmd2_result {
+ u64 results[VNIC_DEVCMD2_NRESULTS];
+ u32 pad;
+ u16 completed_index;
+ u8 error;
+ u8 color;
+};
+
+#define DEVCMD2_RING_SIZE 32
+#define DEVCMD2_DESC_SIZE 128
+
#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.c b/drivers/net/ethernet/cisco/enic/vnic_intr.c
index 0ca107f7bc8c..942759d9cb3c 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_intr.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_intr.c
@@ -25,6 +25,7 @@
#include "vnic_dev.h"
#include "vnic_intr.h"
+#include "enic.h"
void vnic_intr_free(struct vnic_intr *intr)
{
@@ -39,7 +40,7 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
if (!intr->ctrl) {
- pr_err("Failed to hook INTR[%d].ctrl resource\n", index);
+ vdev_err("Failed to hook INTR[%d].ctrl resource\n", index);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_resource.h b/drivers/net/ethernet/cisco/enic/vnic_resource.h
index e0a73f1ca6f4..4e45f88ac1d4 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_resource.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_resource.h
@@ -48,6 +48,13 @@ enum vnic_res_type {
RES_TYPE_RSVD7,
RES_TYPE_DEVCMD, /* Device command region */
RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
+ RES_TYPE_SUBVNIC, /* subvnic resource type */
+ RES_TYPE_MQ_WQ, /* MQ Work queues */
+ RES_TYPE_MQ_RQ, /* MQ Receive queues */
+ RES_TYPE_MQ_CQ, /* MQ Completion queues */
+ RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */
+ RES_TYPE_DEPRECATED2, /* Old version of devcmd 2 */
+ RES_TYPE_DEVCMD2, /* Device control region */
RES_TYPE_MAX, /* Count of resource types */
};
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index c4b2183bf352..cce2777dfc41 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -26,6 +26,7 @@
#include "vnic_dev.h"
#include "vnic_rq.h"
+#include "enic.h"
static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
{
@@ -91,7 +92,7 @@ int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
if (!rq->ctrl) {
- pr_err("Failed to hook RQ[%d] resource\n", index);
+ vdev_err("Failed to hook RQ[%d] resource\n", index);
return -EINVAL;
}
@@ -167,6 +168,7 @@ void vnic_rq_enable(struct vnic_rq *rq)
int vnic_rq_disable(struct vnic_rq *rq)
{
unsigned int wait;
+ struct vnic_dev *vdev = rq->vdev;
iowrite32(0, &rq->ctrl->enable);
@@ -177,7 +179,7 @@ int vnic_rq_disable(struct vnic_rq *rq)
udelay(10);
}
- pr_err("Failed to disable RQ[%d]\n", rq->index);
+ vdev_neterr("Failed to disable RQ[%d]\n", rq->index);
return -ETIMEDOUT;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index b5a1c937fad2..05ad16a7e872 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -26,6 +26,7 @@
#include "vnic_dev.h"
#include "vnic_wq.h"
+#include "enic.h"
static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
@@ -94,7 +95,7 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
if (!wq->ctrl) {
- pr_err("Failed to hook WQ[%d] resource\n", index);
+ vdev_err("Failed to hook WQ[%d] resource\n", index);
return -EINVAL;
}
@@ -113,10 +114,27 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
return 0;
}
-static void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
- unsigned int fetch_index, unsigned int posted_index,
- unsigned int error_interrupt_enable,
- unsigned int error_interrupt_offset)
+int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+
+ wq->index = 0;
+ wq->vdev = vdev;
+
+ wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
+ if (!wq->ctrl)
+ return -EINVAL;
+ vnic_wq_disable(wq);
+ err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+
+ return err;
+}
+
+void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
{
u64 paddr;
unsigned int count = wq->ring.desc_count;
@@ -140,7 +158,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
- vnic_wq_init_start(wq, cq_index, 0, 0,
+ enic_wq_init_start(wq, cq_index, 0, 0,
error_interrupt_enable,
error_interrupt_offset);
}
@@ -158,6 +176,7 @@ void vnic_wq_enable(struct vnic_wq *wq)
int vnic_wq_disable(struct vnic_wq *wq)
{
unsigned int wait;
+ struct vnic_dev *vdev = wq->vdev;
iowrite32(0, &wq->ctrl->enable);
@@ -168,7 +187,7 @@ int vnic_wq_disable(struct vnic_wq *wq)
udelay(10);
}
- pr_err("Failed to disable WQ[%d]\n", wq->index);
+ vdev_neterr("Failed to disable WQ[%d]\n", wq->index);
return -ETIMEDOUT;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 296154351823..01209613d57d 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -88,6 +88,18 @@ struct vnic_wq {
unsigned int pkts_outstanding;
};
+struct devcmd2_controller {
+ struct vnic_wq_ctrl __iomem *wq_ctrl;
+ struct vnic_devcmd2 *cmd_ring;
+ struct devcmd2_result *result;
+ u16 next_result;
+ u16 result_size;
+ int color;
+ struct vnic_dev_ring results_ring;
+ struct vnic_wq wq;
+ u32 posted;
+};
+
static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
{
/* how many does SW own? */
@@ -174,5 +186,11 @@ void vnic_wq_enable(struct vnic_wq *wq);
int vnic_wq_disable(struct vnic_wq *wq);
void vnic_wq_clean(struct vnic_wq *wq,
void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
+int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int desc_count, unsigned int desc_size);
+void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index c0a7813603c3..cf94b72dbacd 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1226,7 +1226,7 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
if (int_status & ISR_PRS)
dm9000_rx(dev);
- /* Trnasmit Interrupt check */
+ /* Transmit Interrupt check */
if (int_status & ISR_PTS)
dm9000_tx_done(dev, db);
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index d1017509b08a..f7b42483921c 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -604,19 +604,7 @@ static struct pci_driver pci_driver = {
.probe = ec_bhf_probe,
.remove = ec_bhf_remove,
};
-
-static int __init ec_bhf_init(void)
-{
- return pci_register_driver(&pci_driver);
-}
-
-static void __exit ec_bhf_exit(void)
-{
- pci_unregister_driver(&pci_driver);
-}
-
-module_init(ec_bhf_init);
-module_exit(ec_bhf_exit);
+module_pci_driver(pci_driver);
module_param(polling_frequency, long, S_IRUGO);
MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8d12b41b3b19..0a27805cbbbd 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -37,7 +37,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "10.6.0.2"
+#define DRV_VER "10.6.0.3"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -105,6 +105,8 @@
#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
#define FW_VER_LEN 32
+#define CNTL_SERIAL_NUM_WORDS 8 /* Controller serial number words */
+#define CNTL_SERIAL_NUM_WORD_SZ (sizeof(u16)) /* Byte-sz of serial num word */
#define RSS_INDIR_TABLE_LEN 128
#define RSS_HASH_KEY_LEN 40
@@ -228,6 +230,7 @@ struct be_mcc_obj {
struct be_tx_stats {
u64 tx_bytes;
u64 tx_pkts;
+ u64 tx_vxlan_offload_pkts;
u64 tx_reqs;
u64 tx_compl;
ulong tx_jiffies;
@@ -275,6 +278,7 @@ struct be_rx_page_info {
struct be_rx_stats {
u64 rx_bytes;
u64 rx_pkts;
+ u64 rx_vxlan_offload_pkts;
u32 rx_drops_no_skbs; /* skb allocation errors */
u32 rx_drops_no_frags; /* HW has no fetched frags */
u32 rx_post_fail; /* page post alloc failures */
@@ -590,6 +594,7 @@ struct be_adapter {
struct rss_info rss_info;
/* Filters for packets that need to be sent to BMC */
u32 bmc_filt_mask;
+ u16 serial_num[CNTL_SERIAL_NUM_WORDS];
};
#define be_physfn(adapter) (!adapter->virtfn)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 9eac3227d2ca..eb323913cd39 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -88,19 +88,21 @@ static inline void *embedded_payload(struct be_mcc_wrb *wrb)
return wrb->payload.embedded_payload;
}
-static void be_mcc_notify(struct be_adapter *adapter)
+static int be_mcc_notify(struct be_adapter *adapter)
{
struct be_queue_info *mccq = &adapter->mcc_obj.q;
u32 val = 0;
if (be_check_error(adapter, BE_ERROR_ANY))
- return;
+ return -EIO;
val |= mccq->id & DB_MCCQ_RING_ID_MASK;
val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
wmb();
iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
+
+ return 0;
}
/* To check if valid bit is set, check the entire word as we don't know
@@ -170,6 +172,12 @@ static void be_async_cmd_process(struct be_adapter *adapter,
return;
}
+ if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE &&
+ subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
+ complete(&adapter->et_cmd_compl);
+ return;
+ }
+
if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
opcode == OPCODE_COMMON_WRITE_OBJECT) &&
subsystem == CMD_SUBSYSTEM_COMMON) {
@@ -541,7 +549,9 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto out;
status = be_mcc_wait_compl(adapter);
if (status == -EIO)
@@ -1547,7 +1557,10 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
else
hdr->version = 2;
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err;
+
adapter->stats_cmd_sent = true;
err:
@@ -1583,7 +1596,10 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
req->cmd_params.params.reset_stats = 0;
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err;
+
adapter->stats_cmd_sent = true;
err:
@@ -1687,8 +1703,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
sizeof(*req), wrb, NULL);
- be_mcc_notify(adapter);
-
+ status = be_mcc_notify(adapter);
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -1860,7 +1875,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
cpu_to_le32(set_eqd[i].delay_multiplier);
}
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -2320,7 +2335,10 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
sizeof(struct lancer_cmd_req_write_object)));
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err_unlock;
+
spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
@@ -2491,7 +2509,10 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
req->params.op_code = cpu_to_le32(flash_opcode);
req->params.data_buf_size = cpu_to_le32(buf_size);
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err_unlock;
+
spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
@@ -2585,7 +2606,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
wrb = wrb_from_mccq(adapter);
if (!wrb) {
status = -EBUSY;
- goto err;
+ goto err_unlock;
}
req = embedded_payload(wrb);
@@ -2599,8 +2620,19 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
req->loopback_type = loopback_type;
req->loopback_state = enable;
- status = be_mcc_notify_wait(adapter);
-err:
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err_unlock;
+
+ spin_unlock_bh(&adapter->mcc_lock);
+
+ if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
+ msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
+ status = -ETIMEDOUT;
+
+ return status;
+
+err_unlock:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2636,7 +2668,9 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
req->num_pkts = cpu_to_le32(num_pkts);
req->loopback_type = cpu_to_le32(loopback_type);
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err;
spin_unlock_bh(&adapter->mcc_lock);
@@ -2818,10 +2852,11 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
struct be_mcc_wrb *wrb;
struct be_cmd_req_cntl_attribs *req;
struct be_cmd_resp_cntl_attribs *resp;
- int status;
+ int status, i;
int payload_len = max(sizeof(*req), sizeof(*resp));
struct mgmt_controller_attrib *attribs;
struct be_dma_mem attribs_cmd;
+ u32 *serial_num;
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -2852,6 +2887,10 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
if (!status) {
attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
adapter->hba_port_num = attribs->hba_attribs.phy_port;
+ serial_num = attribs->hba_attribs.controller_serial_number;
+ for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
+ adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
+ (BIT_MASK(16) - 1);
}
err:
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 2716e6f30d9a..7d178bdb112e 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -620,6 +620,11 @@ enum be_if_flags {
BE_IF_FLAGS_VLAN_PROMISCUOUS |\
BE_IF_FLAGS_MCAST_PROMISCUOUS)
+#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\
+ BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED)
+
+#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS)
+
/* An RX interface is an object with one or more MAC addresses and
* filtering capabilities. */
struct be_cmd_req_if_create {
@@ -1495,6 +1500,8 @@ struct be_cmd_resp_acpi_wol_magic_config_v1 {
#define BE_PME_D3COLD_CAP 0x80
/********************** LoopBack test *********************/
+#define SET_LB_MODE_TIMEOUT 12000
+
struct be_cmd_req_loopback_test {
struct be_cmd_req_hdr hdr;
u32 loopback_type;
@@ -1635,10 +1642,12 @@ struct be_cmd_req_set_qos {
struct mgmt_hba_attribs {
u32 rsvd0[24];
u8 controller_model_number[32];
- u32 rsvd1[79];
- u8 rsvd2[3];
+ u32 rsvd1[16];
+ u32 controller_serial_number[8];
+ u32 rsvd2[55];
+ u8 rsvd3[3];
u8 phy_port;
- u32 rsvd3[13];
+ u32 rsvd4[13];
} __packed;
struct mgmt_controller_attrib {
@@ -1758,6 +1767,7 @@ struct be_cmd_req_set_mac_list {
/*********************** HSW Config ***********************/
#define PORT_FWD_TYPE_VEPA 0x3
#define PORT_FWD_TYPE_VEB 0x2
+#define PORT_FWD_TYPE_PASSTHRU 0x1
#define ENABLE_MAC_SPOOFCHK 0x2
#define DISABLE_MAC_SPOOFCHK 0x3
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index b2476dbfd103..2c9ed1710ba6 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -138,6 +138,7 @@ static const struct be_ethtool_stat et_stats[] = {
static const struct be_ethtool_stat et_rx_stats[] = {
{DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
{DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
+ {DRVSTAT_RX_INFO(rx_vxlan_offload_pkts)},
{DRVSTAT_RX_INFO(rx_compl)},
{DRVSTAT_RX_INFO(rx_compl_err)},
{DRVSTAT_RX_INFO(rx_mcast_pkts)},
@@ -190,6 +191,7 @@ static const struct be_ethtool_stat et_tx_stats[] = {
{DRVSTAT_TX_INFO(tx_internal_parity_err)},
{DRVSTAT_TX_INFO(tx_bytes)},
{DRVSTAT_TX_INFO(tx_pkts)},
+ {DRVSTAT_TX_INFO(tx_vxlan_offload_pkts)},
/* Number of skbs queued for trasmission by the driver */
{DRVSTAT_TX_INFO(tx_reqs)},
/* Number of times the TX queue was stopped due to lack
@@ -847,10 +849,21 @@ err:
static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
u64 *status)
{
- be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1);
+ int ret;
+
+ ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
+ loopback_type, 1);
+ if (ret)
+ return ret;
+
*status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
loopback_type, 1500, 2, 0xabc);
- be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1);
+
+ ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
+ BE_NO_LOOPBACK, 1);
+ if (ret)
+ return ret;
+
return *status;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6f642426308c..12687bf52b95 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -273,6 +273,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
return 0;
+ /* if device is not running, copy MAC to netdev->dev_addr */
+ if (!netif_running(netdev))
+ goto done;
+
/* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
* privilege or if PF did not provision the new MAC address.
* On BE3, this cmd will always fail if the VF doesn't have the
@@ -307,9 +311,9 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
status = -EPERM;
goto err;
}
-
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- dev_info(dev, "MAC address changed to %pM\n", mac);
+done:
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
return 0;
err:
dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
@@ -677,11 +681,14 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
{
struct be_tx_stats *stats = tx_stats(txo);
+ u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
u64_stats_update_begin(&stats->sync);
stats->tx_reqs++;
stats->tx_bytes += skb->len;
- stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
+ stats->tx_pkts += tx_pkts;
+ if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
+ stats->tx_vxlan_offload_pkts += tx_pkts;
u64_stats_update_end(&stats->sync);
}
@@ -1254,7 +1261,7 @@ static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
if (is_udp_pkt((*skb))) {
struct udphdr *udp = udp_hdr((*skb));
- switch (udp->dest) {
+ switch (ntohs(udp->dest)) {
case DHCP_CLIENT_PORT:
os2bmc = is_dhcp_client_filt_enabled(adapter);
goto done;
@@ -1957,6 +1964,8 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,
stats->rx_compl++;
stats->rx_bytes += rxcp->pkt_size;
stats->rx_pkts++;
+ if (rxcp->tunneled)
+ stats->rx_vxlan_offload_pkts++;
if (rxcp->pkt_type == BE_MULTICAST_PACKET)
stats->rx_mcast_pkts++;
if (rxcp->err)
@@ -2447,10 +2456,24 @@ static void be_eq_clean(struct be_eq_obj *eqo)
be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
}
-static void be_rx_cq_clean(struct be_rx_obj *rxo)
+/* Free posted rx buffers that were not used */
+static void be_rxq_clean(struct be_rx_obj *rxo)
{
- struct be_rx_page_info *page_info;
struct be_queue_info *rxq = &rxo->q;
+ struct be_rx_page_info *page_info;
+
+ while (atomic_read(&rxq->used) > 0) {
+ page_info = get_rx_page_info(rxo);
+ put_page(page_info->page);
+ memset(page_info, 0, sizeof(*page_info));
+ }
+ BUG_ON(atomic_read(&rxq->used));
+ rxq->tail = 0;
+ rxq->head = 0;
+}
+
+static void be_rx_cq_clean(struct be_rx_obj *rxo)
+{
struct be_queue_info *rx_cq = &rxo->cq;
struct be_rx_compl_info *rxcp;
struct be_adapter *adapter = rxo->adapter;
@@ -2487,16 +2510,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
/* After cleanup, leave the CQ in unarmed state */
be_cq_notify(adapter, rx_cq->id, false, 0);
-
- /* Then free posted rx buffers that were not used */
- while (atomic_read(&rxq->used) > 0) {
- page_info = get_rx_page_info(rxo);
- put_page(page_info->page);
- memset(page_info, 0, sizeof(*page_info));
- }
- BUG_ON(atomic_read(&rxq->used));
- rxq->tail = 0;
- rxq->head = 0;
}
static void be_tx_compl_clean(struct be_adapter *adapter)
@@ -2576,8 +2589,8 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
napi_hash_del(&eqo->napi);
netif_napi_del(&eqo->napi);
+ free_cpumask_var(eqo->affinity_mask);
}
- free_cpumask_var(eqo->affinity_mask);
be_queue_free(adapter, &eqo->q);
}
}
@@ -2594,13 +2607,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
for_all_evt_queues(adapter, eqo, i) {
int numa_node = dev_to_node(&adapter->pdev->dev);
- if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
- return -ENOMEM;
- cpumask_set_cpu(cpumask_local_spread(i, numa_node),
- eqo->affinity_mask);
- netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
- BE_NAPI_WEIGHT);
- napi_hash_add(&eqo->napi);
+
aic = &adapter->aic_obj[i];
eqo->adapter = adapter;
eqo->idx = i;
@@ -2616,6 +2623,14 @@ static int be_evt_queues_create(struct be_adapter *adapter)
rc = be_cmd_eq_create(adapter, eqo);
if (rc)
return rc;
+
+ if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ eqo->affinity_mask);
+ netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
+ BE_NAPI_WEIGHT);
+ napi_hash_add(&eqo->napi);
}
return 0;
}
@@ -3354,13 +3369,54 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
for_all_rx_queues(adapter, rxo, i) {
q = &rxo->q;
if (q->created) {
+ /* If RXQs are destroyed while in an "out of buffer"
+ * state, there is a possibility of an HW stall on
+ * Lancer. So, post 64 buffers to each queue to relieve
+ * the "out of buffer" condition.
+ * Make sure there's space in the RXQ before posting.
+ */
+ if (lancer_chip(adapter)) {
+ be_rx_cq_clean(rxo);
+ if (atomic_read(&q->used) == 0)
+ be_post_rx_frags(rxo, GFP_KERNEL,
+ MAX_RX_POST);
+ }
+
be_cmd_rxq_destroy(adapter, q);
be_rx_cq_clean(rxo);
+ be_rxq_clean(rxo);
}
be_queue_free(adapter, q);
}
}
+static void be_disable_if_filters(struct be_adapter *adapter)
+{
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id[0], 0);
+
+ be_clear_uc_list(adapter);
+
+ /* The IFACE flags are enabled in the open path and cleared
+ * in the close path. When a VF gets detached from the host and
+ * assigned to a VM the following happens:
+ * - VF's IFACE flags get cleared in the detach path
+ * - IFACE create is issued by the VF in the attach path
+ * Due to a bug in the BE3/Skyhawk-R FW
+ * (Lancer FW doesn't have the bug), the IFACE capability flags
+ * specified along with the IFACE create cmd issued by a VF are not
+ * honoured by FW. As a consequence, if a *new* driver
+ * (that enables/disables IFACE flags in open/close)
+ * is loaded in the host and an *old* driver is * used by a VM/VF,
+ * the IFACE gets created *without* the needed flags.
+ * To avoid this, disable RX-filter flags only for Lancer.
+ */
+ if (lancer_chip(adapter)) {
+ be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
+ adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
+ }
+}
+
static int be_close(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -3373,6 +3429,8 @@ static int be_close(struct net_device *netdev)
if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
return 0;
+ be_disable_if_filters(adapter);
+
be_roce_dev_close(adapter);
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3392,7 +3450,6 @@ static int be_close(struct net_device *netdev)
be_tx_compl_clean(adapter);
be_rx_qs_destroy(adapter);
- be_clear_uc_list(adapter);
for_all_evt_queues(adapter, eqo, i) {
if (msix_enabled(adapter))
@@ -3477,6 +3534,31 @@ static int be_rx_qs_create(struct be_adapter *adapter)
return 0;
}
+static int be_enable_if_filters(struct be_adapter *adapter)
+{
+ int status;
+
+ status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
+ if (status)
+ return status;
+
+ /* For BE3 VFs, the PF programs the initial MAC address */
+ if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
+ status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
+ adapter->if_handle,
+ &adapter->pmac_id[0], 0);
+ if (status)
+ return status;
+ }
+
+ if (adapter->vlans_added)
+ be_vid_config(adapter);
+
+ be_set_rx_mode(adapter->netdev);
+
+ return 0;
+}
+
static int be_open(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -3490,6 +3572,10 @@ static int be_open(struct net_device *netdev)
if (status)
goto err;
+ status = be_enable_if_filters(adapter);
+ if (status)
+ goto err;
+
status = be_irq_register(adapter);
if (status)
goto err;
@@ -3529,15 +3615,15 @@ err:
static int be_setup_wol(struct be_adapter *adapter, bool enable)
{
+ struct device *dev = &adapter->pdev->dev;
struct be_dma_mem cmd;
- int status = 0;
u8 mac[ETH_ALEN];
+ int status;
eth_zero_addr(mac);
cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_KERNEL);
+ cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
if (!cmd.va)
return -ENOMEM;
@@ -3546,24 +3632,18 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
PCICFG_PM_CONTROL_OFFSET,
PCICFG_PM_CONTROL_MASK);
if (status) {
- dev_err(&adapter->pdev->dev,
- "Could not enable Wake-on-lan\n");
- dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
- cmd.dma);
- return status;
+ dev_err(dev, "Could not enable Wake-on-lan\n");
+ goto err;
}
- status = be_cmd_enable_magic_wol(adapter,
- adapter->netdev->dev_addr,
- &cmd);
- pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
- pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
} else {
- status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
- pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
- pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
+ ether_addr_copy(mac, adapter->netdev->dev_addr);
}
- dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+ status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
+ pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
+ pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
+err:
+ dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
return status;
}
@@ -3686,16 +3766,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
}
}
-static void be_mac_clear(struct be_adapter *adapter)
-{
- if (adapter->pmac_id) {
- be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[0], 0);
- kfree(adapter->pmac_id);
- adapter->pmac_id = NULL;
- }
-}
-
#ifdef CONFIG_BE2NET_VXLAN
static void be_disable_vxlan_offloads(struct be_adapter *adapter)
{
@@ -3770,8 +3840,8 @@ static int be_clear(struct be_adapter *adapter)
#ifdef CONFIG_BE2NET_VXLAN
be_disable_vxlan_offloads(adapter);
#endif
- /* delete the primary mac along with the uc-mac list */
- be_mac_clear(adapter);
+ kfree(adapter->pmac_id);
+ adapter->pmac_id = NULL;
be_cmd_if_destroy(adapter, adapter->if_handle, 0);
@@ -3782,25 +3852,11 @@ static int be_clear(struct be_adapter *adapter)
return 0;
}
-static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
- u32 cap_flags, u32 vf)
-{
- u32 en_flags;
-
- en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
- BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
-
- en_flags &= cap_flags;
-
- return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
-}
-
static int be_vfs_if_create(struct be_adapter *adapter)
{
struct be_resources res = {0};
+ u32 cap_flags, en_flags, vf;
struct be_vf_cfg *vf_cfg;
- u32 cap_flags, vf;
int status;
/* If a FW profile exists, then cap_flags are updated */
@@ -3821,8 +3877,12 @@ static int be_vfs_if_create(struct be_adapter *adapter)
}
}
- status = be_if_create(adapter, &vf_cfg->if_handle,
- cap_flags, vf + 1);
+ en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST |
+ BE_IF_FLAGS_PASS_L3L4_ERRORS);
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ &vf_cfg->if_handle, vf + 1);
if (status)
return status;
}
@@ -4194,15 +4254,8 @@ static int be_mac_setup(struct be_adapter *adapter)
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- } else {
- /* Maybe the HW was reset; dev_addr must be re-programmed */
- memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
}
- /* For BE3-R VFs, the PF programs the initial MAC address */
- if (!(BEx_chip(adapter) && be_virtfn(adapter)))
- be_cmd_pmac_add(adapter, mac, adapter->if_handle,
- &adapter->pmac_id[0], 0);
return 0;
}
@@ -4342,6 +4395,7 @@ static int be_func_init(struct be_adapter *adapter)
static int be_setup(struct be_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
+ u32 en_flags;
int status;
status = be_func_init(adapter);
@@ -4364,8 +4418,11 @@ static int be_setup(struct be_adapter *adapter)
if (status)
goto err;
- status = be_if_create(adapter, &adapter->if_handle,
- be_if_cap_flags(adapter), 0);
+ /* will enable all the needed filter flags in be_open() */
+ en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
+ en_flags = en_flags & be_if_cap_flags(adapter);
+ status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
+ &adapter->if_handle, 0);
if (status)
goto err;
@@ -4391,11 +4448,6 @@ static int be_setup(struct be_adapter *adapter)
dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
}
- if (adapter->vlans_added)
- be_vid_config(adapter);
-
- be_set_rx_mode(adapter->netdev);
-
status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
if (status)
@@ -4924,7 +4976,7 @@ static bool be_check_ufi_compatibility(struct be_adapter *adapter,
{
if (!fhdr) {
dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
- return -1;
+ return false;
}
/* First letter of the build version is used to identify
@@ -5079,9 +5131,6 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
int status = 0;
u8 hsw_mode;
- if (!sriov_enabled(adapter))
- return 0;
-
/* BE and Lancer chips support VEB mode only */
if (BEx_chip(adapter) || lancer_chip(adapter)) {
hsw_mode = PORT_FWD_TYPE_VEB;
@@ -5091,6 +5140,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
NULL);
if (status)
return 0;
+
+ if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
+ return 0;
}
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
@@ -5121,7 +5173,7 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
struct device *dev = &adapter->pdev->dev;
int status;
- if (lancer_chip(adapter) || BEx_chip(adapter))
+ if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
@@ -5168,7 +5220,7 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
{
struct be_adapter *adapter = netdev_priv(netdev);
- if (lancer_chip(adapter) || BEx_chip(adapter))
+ if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
if (adapter->vxlan_port != port)
@@ -5225,6 +5277,27 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
}
#endif
+static int be_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
+ struct be_adapter *adapter = netdev_priv(dev);
+ u8 *id;
+
+ if (MAX_PHYS_ITEM_ID_LEN < id_len)
+ return -ENOSPC;
+
+ ppid->id[0] = adapter->hba_port_num + 1;
+ id = &ppid->id[1];
+ for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
+ i--, id += CNTL_SERIAL_NUM_WORD_SZ)
+ memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
+
+ ppid->id_len = id_len;
+
+ return 0;
+}
+
static const struct net_device_ops be_netdev_ops = {
.ndo_open = be_open,
.ndo_stop = be_close,
@@ -5255,6 +5328,7 @@ static const struct net_device_ops be_netdev_ops = {
.ndo_del_vxlan_port = be_del_vxlan_port,
.ndo_features_check = be_features_check,
#endif
+ .ndo_get_phys_port_id = be_get_phys_port_id,
};
static void be_netdev_init(struct net_device *netdev)
@@ -5813,7 +5887,6 @@ static int be_pci_resume(struct pci_dev *pdev)
if (status)
return status;
- pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
status = be_resume(adapter);
@@ -5893,7 +5966,6 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
pci_set_master(pdev);
- pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/* Check if card is ok and fw is ready */
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 442410cd2ca4..a2c96fd88393 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1132,10 +1132,6 @@ static int ethoc_probe(struct platform_device *pdev)
memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
priv->phy_id = pdata->phy_id;
} else {
- priv->phy_id = -1;
-
-#ifdef CONFIG_OF
- {
const uint8_t *mac;
mac = of_get_property(pdev->dev.of_node,
@@ -1143,8 +1139,7 @@ static int ethoc_probe(struct platform_device *pdev)
NULL);
if (mac)
memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
- }
-#endif
+ priv->phy_id = -1;
}
/* Check that the given MAC address is valid. If it isn't, read the
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 24a85b292007..63c2bcf8031a 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -150,6 +150,9 @@ static void nps_enet_tx_handler(struct net_device *ndev)
if (!priv->tx_packet_sent || tx_ctrl.ct)
return;
+ /* Ack Tx ctrl register */
+ nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, 0);
+
/* Check Tx transmit error */
if (unlikely(tx_ctrl.et)) {
ndev->stats.tx_errors++;
@@ -158,11 +161,7 @@ static void nps_enet_tx_handler(struct net_device *ndev)
ndev->stats.tx_bytes += tx_ctrl.nt;
}
- if (priv->tx_skb) {
- dev_kfree_skb(priv->tx_skb);
- priv->tx_skb = NULL;
- }
-
+ dev_kfree_skb(priv->tx_skb);
priv->tx_packet_sent = false;
if (netif_queue_stopped(ndev))
@@ -180,15 +179,16 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
{
struct net_device *ndev = napi->dev;
struct nps_enet_priv *priv = netdev_priv(ndev);
- struct nps_enet_buf_int_enable buf_int_enable;
u32 work_done;
- buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
- buf_int_enable.tx_done = NPS_ENET_ENABLE;
nps_enet_tx_handler(ndev);
work_done = nps_enet_rx_handler(ndev);
if (work_done < budget) {
+ struct nps_enet_buf_int_enable buf_int_enable;
+
napi_complete(napi);
+ buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
+ buf_int_enable.tx_done = NPS_ENET_ENABLE;
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
buf_int_enable.value);
}
@@ -211,12 +211,13 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
{
struct net_device *ndev = dev_instance;
struct nps_enet_priv *priv = netdev_priv(ndev);
- struct nps_enet_buf_int_cause buf_int_cause;
+ struct nps_enet_rx_ctl rx_ctrl;
+ struct nps_enet_tx_ctl tx_ctrl;
- buf_int_cause.value =
- nps_enet_reg_get(priv, NPS_ENET_REG_BUF_INT_CAUSE);
+ rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
+ tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
- if (buf_int_cause.tx_done || buf_int_cause.rx_rdy)
+ if ((!tx_ctrl.ct && priv->tx_packet_sent) || rx_ctrl.cr)
if (likely(napi_schedule_prep(&priv->napi))) {
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
__napi_schedule(&priv->napi);
@@ -307,11 +308,8 @@ static void nps_enet_hw_enable_control(struct net_device *ndev)
/* Discard Packets bigger than max frame length */
max_frame_length = ETH_HLEN + ndev->mtu + ETH_FCS_LEN;
- if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) {
+ if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH)
ge_mac_cfg_3->max_len = max_frame_length;
- nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3,
- ge_mac_cfg_3->value);
- }
/* Enable interrupts */
buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
@@ -339,11 +337,14 @@ static void nps_enet_hw_enable_control(struct net_device *ndev)
ge_mac_cfg_0.tx_fc_en = NPS_ENET_ENABLE;
ge_mac_cfg_0.rx_fc_en = NPS_ENET_ENABLE;
ge_mac_cfg_0.tx_fc_retr = NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR;
+ ge_mac_cfg_3->cf_drop = NPS_ENET_ENABLE;
/* Enable Rx and Tx */
ge_mac_cfg_0.rx_en = NPS_ENET_ENABLE;
ge_mac_cfg_0.tx_en = NPS_ENET_ENABLE;
+ nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3,
+ ge_mac_cfg_3->value);
nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0,
ge_mac_cfg_0.value);
}
@@ -527,10 +528,10 @@ static netdev_tx_t nps_enet_start_xmit(struct sk_buff *skb,
/* This driver handles one frame at a time */
netif_stop_queue(ndev);
- nps_enet_send_frame(ndev, skb);
-
priv->tx_skb = skb;
+ nps_enet_send_frame(ndev, skb);
+
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/ezchip/nps_enet.h b/drivers/net/ethernet/ezchip/nps_enet.h
index fc45c9daa1c2..6703674d679c 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.h
+++ b/drivers/net/ethernet/ezchip/nps_enet.h
@@ -36,7 +36,6 @@
#define NPS_ENET_REG_RX_CTL 0x810
#define NPS_ENET_REG_RX_BUF 0x818
#define NPS_ENET_REG_BUF_INT_ENABLE 0x8C0
-#define NPS_ENET_REG_BUF_INT_CAUSE 0x8C4
#define NPS_ENET_REG_GE_MAC_CFG_0 0x1000
#define NPS_ENET_REG_GE_MAC_CFG_1 0x1004
#define NPS_ENET_REG_GE_MAC_CFG_2 0x1008
@@ -108,25 +107,6 @@ struct nps_enet_buf_int_enable {
};
};
-/* Interrupt cause for data buffer events register */
-struct nps_enet_buf_int_cause {
- union {
- /* tx_done: Interrupt in the case when current frame was
- * read from TX buffer.
- * rx_rdy: Interrupt in the case when new frame is ready
- * in RX buffer.
- */
- struct {
- u32
- __reserved:30,
- tx_done:1,
- rx_rdy:1;
- };
-
- u32 value;
- };
-};
-
/* Gbps Eth MAC Configuration 0 register */
struct nps_enet_ge_mac_cfg_0 {
union {
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 1eee73cccdf5..99d33e2d35e6 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -562,6 +562,7 @@ struct fec_enet_private {
};
void fec_ptp_init(struct platform_device *pdev);
+void fec_ptp_stop(struct platform_device *pdev);
void fec_ptp_start_cyclecounter(struct net_device *ndev);
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1f89c59b4353..dd4ca39d5d8f 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/pm_runtime.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
@@ -77,6 +78,7 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
#define FEC_ENET_RAEM_V 0x8
#define FEC_ENET_RAFL_V 0x8
#define FEC_ENET_OPD_V 0xFFF0
+#define FEC_MDIO_PM_TIMEOUT 100 /* ms */
static struct platform_device_id fec_devtype[] = {
{
@@ -362,7 +364,7 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
return 0;
}
-static int
+static struct bufdesc *
fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
struct sk_buff *skb,
struct net_device *ndev)
@@ -437,10 +439,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
bdp->cbd_sc = status;
}
- txq->cur_tx = bdp;
-
- return 0;
-
+ return bdp;
dma_mapping_error:
bdp = txq->cur_tx;
for (i = 0; i < frag; i++) {
@@ -448,7 +447,7 @@ dma_mapping_error:
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
bdp->cbd_datlen, DMA_TO_DEVICE);
}
- return NETDEV_TX_OK;
+ return ERR_PTR(-ENOMEM);
}
static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
@@ -465,7 +464,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
unsigned int estatus = 0;
unsigned int index;
int entries_free;
- int ret;
entries_free = fec_enet_get_free_txdesc_num(fep, txq);
if (entries_free < MAX_SKB_FRAGS + 1) {
@@ -483,6 +481,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
/* Fill in a Tx ring entry */
bdp = txq->cur_tx;
+ last_bdp = bdp;
status = bdp->cbd_sc;
status &= ~BD_ENET_TX_STATS;
@@ -511,9 +510,9 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
}
if (nr_frags) {
- ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
- if (ret)
- return ret;
+ last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
+ if (IS_ERR(last_bdp))
+ return NETDEV_TX_OK;
} else {
status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
if (fep->bufdesc_ex) {
@@ -542,7 +541,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
ebdp->cbd_esc = estatus;
}
- last_bdp = txq->cur_tx;
index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
/* Save skb pointer */
txq->tx_skbuff[index] = skb;
@@ -561,6 +559,10 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
skb_tx_timestamp(skb);
+ /* Make sure the update to bdp and tx_skbuff are performed before
+ * cur_tx.
+ */
+ wmb();
txq->cur_tx = bdp;
/* Trigger transmission start */
@@ -1216,10 +1218,11 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
/* get next bdp of dirty_tx */
bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
- while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
-
- /* current queue is empty */
- if (bdp == txq->cur_tx)
+ while (bdp != READ_ONCE(txq->cur_tx)) {
+ /* Order the load of cur_tx and cbd_sc */
+ rmb();
+ status = READ_ONCE(bdp->cbd_sc);
+ if (status & BD_ENET_TX_READY)
break;
index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
@@ -1273,6 +1276,10 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
+ /* Make sure the update to bdp and tx_skbuff are performed
+ * before dirty_tx
+ */
+ wmb();
txq->dirty_tx = bdp;
/* Update pointer to next buffer descriptor to be transmitted */
@@ -1400,6 +1407,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
if ((status & BD_ENET_RX_LAST) == 0)
netdev_err(ndev, "rcv is not +last\n");
+ writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
/* Check for errors. */
if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
@@ -1767,10 +1775,16 @@ static void fec_enet_adjust_link(struct net_device *ndev)
static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
struct fec_enet_private *fep = bus->priv;
+ struct device *dev = &fep->pdev->dev;
unsigned long time_left;
+ int ret = 0;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ return ret;
fep->mii_timeout = 0;
- init_completion(&fep->mdio_done);
+ reinit_completion(&fep->mdio_done);
/* start a read op */
writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
@@ -1783,21 +1797,35 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (time_left == 0) {
fep->mii_timeout = 1;
netdev_err(fep->netdev, "MDIO read timeout\n");
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto out;
}
- /* return value */
- return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
+ ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
+
+out:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
}
static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
u16 value)
{
struct fec_enet_private *fep = bus->priv;
+ struct device *dev = &fep->pdev->dev;
unsigned long time_left;
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ return ret;
+ else
+ ret = 0;
fep->mii_timeout = 0;
- init_completion(&fep->mdio_done);
+ reinit_completion(&fep->mdio_done);
/* start a write op */
writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
@@ -1811,10 +1839,13 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
if (time_left == 0) {
fep->mii_timeout = 1;
netdev_err(fep->netdev, "MDIO write timeout\n");
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
}
- return 0;
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
}
static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
@@ -1826,9 +1857,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
ret = clk_prepare_enable(fep->clk_ahb);
if (ret)
return ret;
- ret = clk_prepare_enable(fep->clk_ipg);
- if (ret)
- goto failed_clk_ipg;
if (fep->clk_enet_out) {
ret = clk_prepare_enable(fep->clk_enet_out);
if (ret)
@@ -1852,7 +1880,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
}
} else {
clk_disable_unprepare(fep->clk_ahb);
- clk_disable_unprepare(fep->clk_ipg);
if (fep->clk_enet_out)
clk_disable_unprepare(fep->clk_enet_out);
if (fep->clk_ptp) {
@@ -1874,8 +1901,6 @@ failed_clk_ptp:
if (fep->clk_enet_out)
clk_disable_unprepare(fep->clk_enet_out);
failed_clk_enet_out:
- clk_disable_unprepare(fep->clk_ipg);
-failed_clk_ipg:
clk_disable_unprepare(fep->clk_ahb);
return ret;
@@ -2847,10 +2872,14 @@ fec_enet_open(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
+ ret = pm_runtime_get_sync(&fep->pdev->dev);
+ if (ret < 0)
+ return ret;
+
pinctrl_pm_select_default_state(&fep->pdev->dev);
ret = fec_enet_clk_enable(ndev, true);
if (ret)
- return ret;
+ goto clk_enable;
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
@@ -2881,6 +2910,9 @@ err_enet_mii_probe:
fec_enet_free_buffers(ndev);
err_enet_alloc:
fec_enet_clk_enable(ndev, false);
+clk_enable:
+ pm_runtime_mark_last_busy(&fep->pdev->dev);
+ pm_runtime_put_autosuspend(&fep->pdev->dev);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
return ret;
}
@@ -2903,6 +2935,9 @@ fec_enet_close(struct net_device *ndev)
fec_enet_clk_enable(ndev, false);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+ pm_runtime_mark_last_busy(&fep->pdev->dev);
+ pm_runtime_put_autosuspend(&fep->pdev->dev);
+
fec_enet_free_buffers(ndev);
return 0;
@@ -2996,6 +3031,14 @@ fec_set_mac_address(struct net_device *ndev, void *p)
memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
}
+ /* Add netif status check here to avoid system hang in below case:
+ * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
+ * After ethx down, fec all clocks are gated off and then register
+ * access causes system hang.
+ */
+ if (!netif_running(ndev))
+ return 0;
+
writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
(ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
fep->hwp + FEC_ADDR_LOW);
@@ -3115,8 +3158,8 @@ static int fec_enet_init(struct net_device *ndev)
fep->bufdesc_size;
/* Allocate memory for buffer descriptors. */
- cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma,
- GFP_KERNEL);
+ cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
+ GFP_KERNEL);
if (!cbd_base) {
return -ENOMEM;
}
@@ -3388,6 +3431,10 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_clk;
+ ret = clk_prepare_enable(fep->clk_ipg);
+ if (ret)
+ goto failed_clk_ipg;
+
fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
if (!IS_ERR(fep->reg_phy)) {
ret = regulator_enable(fep->reg_phy);
@@ -3400,6 +3447,12 @@ fec_probe(struct platform_device *pdev)
fep->reg_phy = NULL;
}
+ pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
fec_reset_phy(pdev);
if (fep->bufdesc_ex)
@@ -3447,6 +3500,10 @@ fec_probe(struct platform_device *pdev)
fep->rx_copybreak = COPYBREAK_DEFAULT;
INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
return 0;
failed_register:
@@ -3454,9 +3511,12 @@ failed_register:
failed_mii_init:
failed_irq:
failed_init:
+ fec_ptp_stop(pdev);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
failed_regulator:
+ clk_disable_unprepare(fep->clk_ipg);
+failed_clk_ipg:
fec_enet_clk_enable(ndev, false);
failed_clk:
failed_phy:
@@ -3473,14 +3533,12 @@ fec_drv_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
- cancel_delayed_work_sync(&fep->time_keep);
cancel_work_sync(&fep->tx_timeout_work);
+ fec_ptp_stop(pdev);
unregister_netdev(ndev);
fec_enet_mii_remove(fep);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
- if (fep->ptp_clock)
- ptp_clock_unregister(fep->ptp_clock);
of_node_put(fep->phy_node);
free_netdev(ndev);
@@ -3568,7 +3626,28 @@ failed_clk:
return ret;
}
-static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
+static int __maybe_unused fec_runtime_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ clk_disable_unprepare(fep->clk_ipg);
+
+ return 0;
+}
+
+static int __maybe_unused fec_runtime_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ return clk_prepare_enable(fep->clk_ipg);
+}
+
+static const struct dev_pm_ops fec_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
+ SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
+};
static struct platform_driver fec_driver = {
.driver = {
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index a15663ad7f5e..1543cf0e8ef6 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -506,12 +506,6 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
break;
default:
- /*
- * register RXMTRL must be set in order to do V1 packets,
- * therefore it is not possible to time stamp both V1 Sync and
- * Delay_Req messages and hardware does not support
- * timestamping all packets => return error
- */
fep->hwts_rx_en = 1;
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
@@ -604,6 +598,16 @@ void fec_ptp_init(struct platform_device *pdev)
schedule_delayed_work(&fep->time_keep, HZ);
}
+void fec_ptp_stop(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ cancel_delayed_work_sync(&fep->time_keep);
+ if (fep->ptp_clock)
+ ptp_clock_unregister(fep->ptp_clock);
+}
+
/**
* fec_ptp_check_pps_event
* @fep: the fec_enet_private structure handle
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 56316db6c5a6..cf8e54652df9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -586,7 +586,8 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
frag = skb_shinfo(skb)->frags;
while (nr_frags) {
CBDC_SC(bdp,
- BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
+ BD_ENET_TX_TC);
CBDS_SC(bdp, BD_ENET_TX_READY);
if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index b34214e2df5f..016743e355de 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -110,7 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
}
#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
-#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB)
+#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF)
#define FEC_RX_EVENT (FEC_ENET_RXF)
#define FEC_TX_EVENT (FEC_ENET_TXF)
#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ff875028fdff..4b69d061d90f 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -109,15 +109,15 @@
#define TX_TIMEOUT (1*HZ)
-const char gfar_driver_version[] = "1.3";
+const char gfar_driver_version[] = "2.0";
static int gfar_enet_open(struct net_device *dev);
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void gfar_reset_task(struct work_struct *work);
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
-static struct sk_buff *gfar_new_skb(struct net_device *dev,
- dma_addr_t *bufaddr);
+static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
+ int alloc_cnt);
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -141,8 +141,7 @@ static void gfar_netpoll(struct net_device *dev);
#endif
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
-static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
- int amount_pull, struct napi_struct *napi);
+static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
static void gfar_halt_nodisable(struct gfar_private *priv);
static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
@@ -169,17 +168,15 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
bdp->lstatus = cpu_to_be32(lstatus);
}
-static int gfar_init_bds(struct net_device *ndev)
+static void gfar_init_bds(struct net_device *ndev)
{
struct gfar_private *priv = netdev_priv(ndev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
struct txbd8 *txbdp;
- struct rxbd8 *rxbdp;
u32 __iomem *rfbptr;
int i, j;
- dma_addr_t bufaddr;
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
@@ -207,40 +204,26 @@ static int gfar_init_bds(struct net_device *ndev)
rfbptr = &regs->rfbptr0;
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- rx_queue->cur_rx = rx_queue->rx_bd_base;
- rx_queue->skb_currx = 0;
- rxbdp = rx_queue->rx_bd_base;
- for (j = 0; j < rx_queue->rx_ring_size; j++) {
- struct sk_buff *skb = rx_queue->rx_skbuff[j];
+ rx_queue->next_to_clean = 0;
+ rx_queue->next_to_use = 0;
+ rx_queue->next_to_alloc = 0;
- if (skb) {
- bufaddr = be32_to_cpu(rxbdp->bufPtr);
- } else {
- skb = gfar_new_skb(ndev, &bufaddr);
- if (!skb) {
- netdev_err(ndev, "Can't allocate RX buffers\n");
- return -ENOMEM;
- }
- rx_queue->rx_skbuff[j] = skb;
- }
-
- gfar_init_rxbdp(rx_queue, rxbdp, bufaddr);
- rxbdp++;
- }
+ /* make sure next_to_clean != next_to_use after this
+ * by leaving at least 1 unused descriptor
+ */
+ gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
rx_queue->rfbptr = rfbptr;
rfbptr += 2;
}
-
- return 0;
}
static int gfar_alloc_skb_resources(struct net_device *ndev)
{
void *vaddr;
dma_addr_t addr;
- int i, j, k;
+ int i, j;
struct gfar_private *priv = netdev_priv(ndev);
struct device *dev = priv->dev;
struct gfar_priv_tx_q *tx_queue = NULL;
@@ -279,7 +262,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
rx_queue = priv->rx_queue[i];
rx_queue->rx_bd_base = vaddr;
rx_queue->rx_bd_dma_base = addr;
- rx_queue->dev = ndev;
+ rx_queue->ndev = ndev;
+ rx_queue->dev = dev;
addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
}
@@ -294,25 +278,20 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
if (!tx_queue->tx_skbuff)
goto cleanup;
- for (k = 0; k < tx_queue->tx_ring_size; k++)
- tx_queue->tx_skbuff[k] = NULL;
+ for (j = 0; j < tx_queue->tx_ring_size; j++)
+ tx_queue->tx_skbuff[j] = NULL;
}
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- rx_queue->rx_skbuff =
- kmalloc_array(rx_queue->rx_ring_size,
- sizeof(*rx_queue->rx_skbuff),
- GFP_KERNEL);
- if (!rx_queue->rx_skbuff)
+ rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
+ sizeof(*rx_queue->rx_buff),
+ GFP_KERNEL);
+ if (!rx_queue->rx_buff)
goto cleanup;
-
- for (j = 0; j < rx_queue->rx_ring_size; j++)
- rx_queue->rx_skbuff[j] = NULL;
}
- if (gfar_init_bds(ndev))
- goto cleanup;
+ gfar_init_bds(ndev);
return 0;
@@ -354,10 +333,8 @@ static void gfar_init_rqprm(struct gfar_private *priv)
}
}
-static void gfar_rx_buff_size_config(struct gfar_private *priv)
+static void gfar_rx_offload_en(struct gfar_private *priv)
{
- int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
-
/* set this when rx hw offload (TOE) functions are being used */
priv->uses_rxfcb = 0;
@@ -366,16 +343,6 @@ static void gfar_rx_buff_size_config(struct gfar_private *priv)
if (priv->hwts_rx_en)
priv->uses_rxfcb = 1;
-
- if (priv->uses_rxfcb)
- frame_size += GMAC_FCB_LEN;
-
- frame_size += priv->padding;
-
- frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
- INCREMENTAL_BUFFER_SIZE;
-
- priv->rx_buffer_size = frame_size;
}
static void gfar_mac_rx_config(struct gfar_private *priv)
@@ -565,22 +532,6 @@ static void gfar_ints_enable(struct gfar_private *priv)
}
}
-static void lock_tx_qs(struct gfar_private *priv)
-{
- int i;
-
- for (i = 0; i < priv->num_tx_queues; i++)
- spin_lock(&priv->tx_queue[i]->txlock);
-}
-
-static void unlock_tx_qs(struct gfar_private *priv)
-{
- int i;
-
- for (i = 0; i < priv->num_tx_queues; i++)
- spin_unlock(&priv->tx_queue[i]->txlock);
-}
-
static int gfar_alloc_tx_queues(struct gfar_private *priv)
{
int i;
@@ -609,9 +560,8 @@ static int gfar_alloc_rx_queues(struct gfar_private *priv)
if (!priv->rx_queue[i])
return -ENOMEM;
- priv->rx_queue[i]->rx_skbuff = NULL;
priv->rx_queue[i]->qindex = i;
- priv->rx_queue[i]->dev = priv->ndev;
+ priv->rx_queue[i]->ndev = priv->ndev;
}
return 0;
}
@@ -1203,12 +1153,11 @@ void gfar_mac_reset(struct gfar_private *priv)
udelay(3);
- /* Compute rx_buff_size based on config flags */
- gfar_rx_buff_size_config(priv);
+ gfar_rx_offload_en(priv);
/* Initialize the max receive frame/buffer lengths */
- gfar_write(&regs->maxfrm, priv->rx_buffer_size);
- gfar_write(&regs->mrblr, priv->rx_buffer_size);
+ gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
+ gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
/* Initialize the Minimum Frame Length Register */
gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
@@ -1216,12 +1165,11 @@ void gfar_mac_reset(struct gfar_private *priv)
/* Initialize MACCFG2. */
tempval = MACCFG2_INIT_SETTINGS;
- /* If the mtu is larger than the max size for standard
- * ethernet frames (ie, a jumbo frame), then set maccfg2
- * to allow huge frames, and to check the length
+ /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
+ * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
+ * and by checking RxBD[LG] and discarding larger than MAXFRM.
*/
- if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
- gfar_has_errata(priv, GFAR_ERRATA_74))
+ if (gfar_has_errata(priv, GFAR_ERRATA_74))
tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
gfar_write(&regs->maccfg2, tempval);
@@ -1376,7 +1324,6 @@ static int gfar_probe(struct platform_device *ofdev)
priv->dev = &ofdev->dev;
SET_NETDEV_DEV(dev, &ofdev->dev);
- spin_lock_init(&priv->bflock);
INIT_WORK(&priv->reset_task, gfar_reset_task);
platform_set_drvdata(ofdev, priv);
@@ -1432,8 +1379,6 @@ static int gfar_probe(struct platform_device *ofdev)
priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
dev->needed_headroom = GMAC_FCB_LEN;
- priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
-
/* Initializing some of the rx/tx queue level parameters */
for (i = 0; i < priv->num_tx_queues; i++) {
priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
@@ -1470,9 +1415,8 @@ static int gfar_probe(struct platform_device *ofdev)
goto register_fail;
}
- device_init_wakeup(&dev->dev,
- priv->device_flags &
- FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ device_set_wakeup_capable(&dev->dev, priv->device_flags &
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
/* fill out IRQ number and name fields */
for (i = 0; i < priv->num_grps; i++) {
@@ -1540,48 +1484,37 @@ static int gfar_suspend(struct device *dev)
struct gfar_private *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->ndev;
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned long flags;
u32 tempval;
-
int magic_packet = priv->wol_en &&
(priv->device_flags &
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ if (!netif_running(ndev))
+ return 0;
+
+ disable_napi(priv);
+ netif_tx_lock(ndev);
netif_device_detach(ndev);
+ netif_tx_unlock(ndev);
- if (netif_running(ndev)) {
+ gfar_halt(priv);
- local_irq_save(flags);
- lock_tx_qs(priv);
+ if (magic_packet) {
+ /* Enable interrupt on Magic Packet */
+ gfar_write(&regs->imask, IMASK_MAG);
- gfar_halt_nodisable(priv);
+ /* Enable Magic Packet mode */
+ tempval = gfar_read(&regs->maccfg2);
+ tempval |= MACCFG2_MPEN;
+ gfar_write(&regs->maccfg2, tempval);
- /* Disable Tx, and Rx if wake-on-LAN is disabled. */
+ /* re-enable the Rx block */
tempval = gfar_read(&regs->maccfg1);
-
- tempval &= ~MACCFG1_TX_EN;
-
- if (!magic_packet)
- tempval &= ~MACCFG1_RX_EN;
-
+ tempval |= MACCFG1_RX_EN;
gfar_write(&regs->maccfg1, tempval);
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
- disable_napi(priv);
-
- if (magic_packet) {
- /* Enable interrupt on Magic Packet */
- gfar_write(&regs->imask, IMASK_MAG);
-
- /* Enable Magic Packet mode */
- tempval = gfar_read(&regs->maccfg2);
- tempval |= MACCFG2_MPEN;
- gfar_write(&regs->maccfg2, tempval);
- } else {
- phy_stop(priv->phydev);
- }
+ } else {
+ phy_stop(priv->phydev);
}
return 0;
@@ -1592,37 +1525,26 @@ static int gfar_resume(struct device *dev)
struct gfar_private *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->ndev;
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned long flags;
u32 tempval;
int magic_packet = priv->wol_en &&
(priv->device_flags &
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
- if (!netif_running(ndev)) {
- netif_device_attach(ndev);
+ if (!netif_running(ndev))
return 0;
- }
- if (!magic_packet && priv->phydev)
+ if (magic_packet) {
+ /* Disable Magic Packet mode */
+ tempval = gfar_read(&regs->maccfg2);
+ tempval &= ~MACCFG2_MPEN;
+ gfar_write(&regs->maccfg2, tempval);
+ } else {
phy_start(priv->phydev);
-
- /* Disable Magic Packet mode, in case something
- * else woke us up.
- */
- local_irq_save(flags);
- lock_tx_qs(priv);
-
- tempval = gfar_read(&regs->maccfg2);
- tempval &= ~MACCFG2_MPEN;
- gfar_write(&regs->maccfg2, tempval);
+ }
gfar_start(priv);
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
netif_device_attach(ndev);
-
enable_napi(priv);
return 0;
@@ -1639,10 +1561,7 @@ static int gfar_restore(struct device *dev)
return 0;
}
- if (gfar_init_bds(ndev)) {
- free_skb_resources(priv);
- return -ENOMEM;
- }
+ gfar_init_bds(ndev);
gfar_mac_reset(priv);
@@ -1933,26 +1852,32 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
{
- struct rxbd8 *rxbdp;
- struct gfar_private *priv = netdev_priv(rx_queue->dev);
int i;
- rxbdp = rx_queue->rx_bd_base;
+ struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
+
+ if (rx_queue->skb)
+ dev_kfree_skb(rx_queue->skb);
for (i = 0; i < rx_queue->rx_ring_size; i++) {
- if (rx_queue->rx_skbuff[i]) {
- dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr),
- priv->rx_buffer_size,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
- rx_queue->rx_skbuff[i] = NULL;
- }
+ struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
+
rxbdp->lstatus = 0;
rxbdp->bufPtr = 0;
rxbdp++;
+
+ if (!rxb->page)
+ continue;
+
+ dma_unmap_single(rx_queue->dev, rxb->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_page(rxb->page);
+
+ rxb->page = NULL;
}
- kfree(rx_queue->rx_skbuff);
- rx_queue->rx_skbuff = NULL;
+
+ kfree(rx_queue->rx_buff);
+ rx_queue->rx_buff = NULL;
}
/* If there are any tx skbs or rx skbs still around, free them.
@@ -1977,7 +1902,7 @@ static void free_skb_resources(struct gfar_private *priv)
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- if (rx_queue->rx_skbuff)
+ if (rx_queue->rx_buff)
free_skb_rx_queue(rx_queue);
}
@@ -2045,7 +1970,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
/* Install our interrupt handlers for Error,
* Transmit, and Receive
*/
- err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
+ err = request_irq(gfar_irq(grp, ER)->irq, gfar_error,
+ IRQF_NO_SUSPEND,
gfar_irq(grp, ER)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2068,7 +1994,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
goto rx_irq_fail;
}
} else {
- err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
+ err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt,
+ IRQF_NO_SUSPEND,
gfar_irq(grp, TX)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2140,6 +2067,11 @@ int startup_gfar(struct net_device *ndev)
/* Start Rx/Tx DMA and enable the interrupts */
gfar_start(priv);
+ /* force link state update after mac reset */
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
phy_start(priv->phydev);
enable_napi(priv);
@@ -2169,8 +2101,6 @@ static int gfar_enet_open(struct net_device *dev)
if (err)
return err;
- device_set_wakeup_enable(&dev->dev, priv->wol_en);
-
return err;
}
@@ -2535,7 +2465,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
struct gfar_private *priv = netdev_priv(dev);
int frame_size = new_mtu + ETH_HLEN;
- if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
+ if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) {
netif_err(priv, drv, dev, "Invalid MTU setting\n");
return -EINVAL;
}
@@ -2589,15 +2519,6 @@ static void gfar_timeout(struct net_device *dev)
schedule_work(&priv->reset_task);
}
-static void gfar_align_skb(struct sk_buff *skb)
-{
- /* We need the data buffer to be aligned properly. We will reserve
- * as many bytes as needed to align the data properly
- */
- skb_reserve(skb, RXBUF_ALIGNMENT -
- (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
-}
-
/* Interrupt Handler for Transmit complete */
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
{
@@ -2655,7 +2576,8 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
struct skb_shared_hwtstamps shhwtstamps;
- u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+ u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
+ ~0x7UL);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(*ns);
@@ -2704,49 +2626,85 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
netdev_tx_completed_queue(txq, howmany, bytes_sent);
}
-static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
+static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
{
- struct gfar_private *priv = netdev_priv(dev);
- struct sk_buff *skb;
+ struct page *page;
+ dma_addr_t addr;
- skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
- if (!skb)
- return NULL;
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return false;
- gfar_align_skb(skb);
+ addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rxq->dev, addr))) {
+ __free_page(page);
- return skb;
+ return false;
+ }
+
+ rxb->dma = addr;
+ rxb->page = page;
+ rxb->page_offset = 0;
+
+ return true;
}
-static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
+static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
{
- struct gfar_private *priv = netdev_priv(dev);
- struct sk_buff *skb;
- dma_addr_t addr;
+ struct gfar_private *priv = netdev_priv(rx_queue->ndev);
+ struct gfar_extra_stats *estats = &priv->extra_stats;
- skb = gfar_alloc_skb(dev);
- if (!skb)
- return NULL;
+ netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
+ atomic64_inc(&estats->rx_alloc_err);
+}
- addr = dma_map_single(priv->dev, skb->data,
- priv->rx_buffer_size, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(priv->dev, addr))) {
- dev_kfree_skb_any(skb);
- return NULL;
+static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
+ int alloc_cnt)
+{
+ struct rxbd8 *bdp;
+ struct gfar_rx_buff *rxb;
+ int i;
+
+ i = rx_queue->next_to_use;
+ bdp = &rx_queue->rx_bd_base[i];
+ rxb = &rx_queue->rx_buff[i];
+
+ while (alloc_cnt--) {
+ /* try reuse page */
+ if (unlikely(!rxb->page)) {
+ if (unlikely(!gfar_new_page(rx_queue, rxb))) {
+ gfar_rx_alloc_err(rx_queue);
+ break;
+ }
+ }
+
+ /* Setup the new RxBD */
+ gfar_init_rxbdp(rx_queue, bdp,
+ rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
+
+ /* Update to the next pointer */
+ bdp++;
+ rxb++;
+
+ if (unlikely(++i == rx_queue->rx_ring_size)) {
+ i = 0;
+ bdp = rx_queue->rx_bd_base;
+ rxb = rx_queue->rx_buff;
+ }
}
- *bufaddr = addr;
- return skb;
+ rx_queue->next_to_use = i;
+ rx_queue->next_to_alloc = i;
}
-static inline void count_errors(unsigned short status, struct net_device *dev)
+static void count_errors(u32 lstatus, struct net_device *ndev)
{
- struct gfar_private *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
struct gfar_extra_stats *estats = &priv->extra_stats;
/* If the packet was truncated, none of the other errors matter */
- if (status & RXBD_TRUNCATED) {
+ if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
stats->rx_length_errors++;
atomic64_inc(&estats->rx_trunc);
@@ -2754,25 +2712,25 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
return;
}
/* Count the errors, if there were any */
- if (status & (RXBD_LARGE | RXBD_SHORT)) {
+ if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
stats->rx_length_errors++;
- if (status & RXBD_LARGE)
+ if (lstatus & BD_LFLAG(RXBD_LARGE))
atomic64_inc(&estats->rx_large);
else
atomic64_inc(&estats->rx_short);
}
- if (status & RXBD_NONOCTET) {
+ if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
stats->rx_frame_errors++;
atomic64_inc(&estats->rx_nonoctet);
}
- if (status & RXBD_CRCERR) {
+ if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
atomic64_inc(&estats->rx_crcerr);
stats->rx_crc_errors++;
}
- if (status & RXBD_OVERRUN) {
+ if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
atomic64_inc(&estats->rx_overrun);
- stats->rx_crc_errors++;
+ stats->rx_over_errors++;
}
}
@@ -2823,6 +2781,93 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
return IRQ_HANDLED;
}
+static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
+ struct sk_buff *skb, bool first)
+{
+ unsigned int size = lstatus & BD_LENGTH_MASK;
+ struct page *page = rxb->page;
+
+ /* Remove the FCS from the packet length */
+ if (likely(lstatus & BD_LFLAG(RXBD_LAST)))
+ size -= ETH_FCS_LEN;
+
+ if (likely(first))
+ skb_put(skb, size);
+ else
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rxb->page_offset + RXBUF_ALIGNMENT,
+ size, GFAR_RXB_TRUESIZE);
+
+ /* try reuse page */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* change offset to the other half */
+ rxb->page_offset ^= GFAR_RXB_TRUESIZE;
+
+ atomic_inc(&page->_count);
+
+ return true;
+}
+
+static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
+ struct gfar_rx_buff *old_rxb)
+{
+ struct gfar_rx_buff *new_rxb;
+ u16 nta = rxq->next_to_alloc;
+
+ new_rxb = &rxq->rx_buff[nta];
+
+ /* find next buf that can reuse a page */
+ nta++;
+ rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
+
+ /* copy page reference */
+ *new_rxb = *old_rxb;
+
+ /* sync for use by the device */
+ dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
+ old_rxb->page_offset,
+ GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
+}
+
+static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
+ u32 lstatus, struct sk_buff *skb)
+{
+ struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
+ struct page *page = rxb->page;
+ bool first = false;
+
+ if (likely(!skb)) {
+ void *buff_addr = page_address(page) + rxb->page_offset;
+
+ skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
+ if (unlikely(!skb)) {
+ gfar_rx_alloc_err(rx_queue);
+ return NULL;
+ }
+ skb_reserve(skb, RXBUF_ALIGNMENT);
+ first = true;
+ }
+
+ dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
+ GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
+
+ if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
+ /* reuse the free half of the page */
+ gfar_reuse_rx_page(rx_queue, rxb);
+ } else {
+ /* page cannot be reused, unmap it */
+ dma_unmap_page(rx_queue->dev, rxb->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+
+ /* clear rxb content */
+ rxb->page = NULL;
+
+ return skb;
+}
+
static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
{
/* If valid headers were found, and valid sums
@@ -2837,10 +2882,9 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
}
/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
-static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
- int amount_pull, struct napi_struct *napi)
+static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
{
- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_private *priv = netdev_priv(ndev);
struct rxfcb *fcb = NULL;
/* fcb is at the beginning if exists */
@@ -2849,10 +2893,8 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
/* Remove the FCB from the skb
* Remove the padded bytes, if there are any
*/
- if (amount_pull) {
- skb_record_rx_queue(skb, fcb->rq);
- skb_pull(skb, amount_pull);
- }
+ if (priv->uses_rxfcb)
+ skb_pull(skb, GMAC_FCB_LEN);
/* Get receive timestamp from the skb */
if (priv->hwts_rx_en) {
@@ -2866,24 +2908,20 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
if (priv->padding)
skb_pull(skb, priv->padding);
- if (dev->features & NETIF_F_RXCSUM)
+ if (ndev->features & NETIF_F_RXCSUM)
gfar_rx_checksum(skb, fcb);
/* Tell the skb what kind of packet this is */
- skb->protocol = eth_type_trans(skb, dev);
+ skb->protocol = eth_type_trans(skb, ndev);
/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
* Even if vlan rx accel is disabled, on some chips
* RXFCB_VLN is pseudo randomly set.
*/
- if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
+ if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
be16_to_cpu(fcb->flags) & RXFCB_VLN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
be16_to_cpu(fcb->vlctl));
-
- /* Send the packet up the stack */
- napi_gro_receive(napi, skb);
-
}
/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
@@ -2892,91 +2930,89 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
*/
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
{
- struct net_device *dev = rx_queue->dev;
- struct rxbd8 *bdp, *base;
- struct sk_buff *skb;
- int pkt_len;
- int amount_pull;
- int howmany = 0;
- struct gfar_private *priv = netdev_priv(dev);
+ struct net_device *ndev = rx_queue->ndev;
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct rxbd8 *bdp;
+ int i, howmany = 0;
+ struct sk_buff *skb = rx_queue->skb;
+ int cleaned_cnt = gfar_rxbd_unused(rx_queue);
+ unsigned int total_bytes = 0, total_pkts = 0;
/* Get the first full descriptor */
- bdp = rx_queue->cur_rx;
- base = rx_queue->rx_bd_base;
+ i = rx_queue->next_to_clean;
- amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
+ while (rx_work_limit--) {
+ u32 lstatus;
- while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) {
- struct sk_buff *newskb;
- dma_addr_t bufaddr;
+ if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
+ gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
+ cleaned_cnt = 0;
+ }
+
+ bdp = &rx_queue->rx_bd_base[i];
+ lstatus = be32_to_cpu(bdp->lstatus);
+ if (lstatus & BD_LFLAG(RXBD_EMPTY))
+ break;
+ /* order rx buffer descriptor reads */
rmb();
- /* Add another skb for the future */
- newskb = gfar_new_skb(dev, &bufaddr);
+ /* fetch next to clean buffer from the ring */
+ skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
+ if (unlikely(!skb))
+ break;
- skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
+ cleaned_cnt++;
+ howmany++;
- dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
- priv->rx_buffer_size, DMA_FROM_DEVICE);
-
- if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) &&
- be16_to_cpu(bdp->length) > priv->rx_buffer_size))
- bdp->status = cpu_to_be16(RXBD_LARGE);
-
- /* We drop the frame if we failed to allocate a new buffer */
- if (unlikely(!newskb ||
- !(be16_to_cpu(bdp->status) & RXBD_LAST) ||
- be16_to_cpu(bdp->status) & RXBD_ERR)) {
- count_errors(be16_to_cpu(bdp->status), dev);
-
- if (unlikely(!newskb)) {
- newskb = skb;
- bufaddr = be32_to_cpu(bdp->bufPtr);
- } else if (skb)
- dev_kfree_skb(skb);
- } else {
- /* Increment the number of packets */
- rx_queue->stats.rx_packets++;
- howmany++;
-
- if (likely(skb)) {
- pkt_len = be16_to_cpu(bdp->length) -
- ETH_FCS_LEN;
- /* Remove the FCS from the packet length */
- skb_put(skb, pkt_len);
- rx_queue->stats.rx_bytes += pkt_len;
- skb_record_rx_queue(skb, rx_queue->qindex);
- gfar_process_frame(dev, skb, amount_pull,
- &rx_queue->grp->napi_rx);
+ if (unlikely(++i == rx_queue->rx_ring_size))
+ i = 0;
- } else {
- netif_warn(priv, rx_err, dev, "Missing skb!\n");
- rx_queue->stats.rx_dropped++;
- atomic64_inc(&priv->extra_stats.rx_skbmissing);
- }
+ rx_queue->next_to_clean = i;
+
+ /* fetch next buffer if not the last in frame */
+ if (!(lstatus & BD_LFLAG(RXBD_LAST)))
+ continue;
+ if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
+ count_errors(lstatus, ndev);
+
+ /* discard faulty buffer */
+ dev_kfree_skb(skb);
+ skb = NULL;
+ rx_queue->stats.rx_dropped++;
+ continue;
}
- rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
+ /* Increment the number of packets */
+ total_pkts++;
+ total_bytes += skb->len;
- /* Setup the new bdp */
- gfar_init_rxbdp(rx_queue, bdp, bufaddr);
+ skb_record_rx_queue(skb, rx_queue->qindex);
- /* Update Last Free RxBD pointer for LFC */
- if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))
- gfar_write(rx_queue->rfbptr, (u32)bdp);
+ gfar_process_frame(ndev, skb);
- /* Update to the next pointer */
- bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
+ /* Send the packet up the stack */
+ napi_gro_receive(&rx_queue->grp->napi_rx, skb);
- /* update to point at the next skb */
- rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
- RX_RING_MOD_MASK(rx_queue->rx_ring_size);
+ skb = NULL;
}
- /* Update the current rxbd pointer to be the next one */
- rx_queue->cur_rx = bdp;
+ /* Store incomplete frames for completion */
+ rx_queue->skb = skb;
+
+ rx_queue->stats.rx_packets += total_pkts;
+ rx_queue->stats.rx_bytes += total_bytes;
+
+ if (cleaned_cnt)
+ gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
+
+ /* Update Last Free RxBD pointer for LFC */
+ if (unlikely(priv->tx_actual_en)) {
+ u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
+
+ gfar_write(rx_queue->rfbptr, bdp_dma);
+ }
return howmany;
}
@@ -3494,7 +3530,6 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
struct phy_device *phydev = priv->phydev;
struct gfar_priv_rx_q *rx_queue = NULL;
int i;
- struct rxbd8 *bdp;
if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
return;
@@ -3551,15 +3586,11 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
/* Turn last free buffer recording on */
if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
for (i = 0; i < priv->num_rx_queues; i++) {
+ u32 bdp_dma;
+
rx_queue = priv->rx_queue[i];
- bdp = rx_queue->cur_rx;
- /* skip to previous bd */
- bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1,
- rx_queue->rx_bd_base,
- rx_queue->rx_ring_size);
-
- if (rx_queue->rfbptr)
- gfar_write(rx_queue->rfbptr, (u32)bdp);
+ bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
+ gfar_write(rx_queue->rfbptr, bdp_dma);
}
priv->tx_actual_en = 1;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index daa1d37de642..8c1994856e93 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -71,11 +71,6 @@ struct ethtool_rx_list {
/* Number of bytes to align the rx bufs to */
#define RXBUF_ALIGNMENT 64
-/* The number of bytes which composes a unit for the purpose of
- * allocating data buffers. ie-for any given MTU, the data buffer
- * will be the next highest multiple of 512 bytes. */
-#define INCREMENTAL_BUFFER_SIZE 512
-
#define PHY_INIT_TIMEOUT 100000
#define DRV_NAME "gfar-enet"
@@ -92,6 +87,8 @@ extern const char gfar_driver_version[];
#define DEFAULT_TX_RING_SIZE 256
#define DEFAULT_RX_RING_SIZE 256
+#define GFAR_RX_BUFF_ALLOC 16
+
#define GFAR_RX_MAX_RING_SIZE 256
#define GFAR_TX_MAX_RING_SIZE 256
@@ -103,11 +100,14 @@ extern const char gfar_driver_version[];
#define DEFAULT_RX_LFC_THR 16
#define DEFAULT_LFC_PTVVAL 4
-#define DEFAULT_RX_BUFFER_SIZE 1536
+#define GFAR_RXB_SIZE 1536
+#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
+ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define GFAR_RXB_TRUESIZE 2048
+
#define TX_RING_MOD_MASK(size) (size-1)
#define RX_RING_MOD_MASK(size) (size-1)
-#define JUMBO_BUFFER_SIZE 9728
-#define JUMBO_FRAME_SIZE 9600
+#define GFAR_JUMBO_FRAME_SIZE 9600
#define DEFAULT_FIFO_TX_THR 0x100
#define DEFAULT_FIFO_TX_STARVE 0x40
@@ -640,6 +640,7 @@ struct rmon_mib
};
struct gfar_extra_stats {
+ atomic64_t rx_alloc_err;
atomic64_t rx_large;
atomic64_t rx_short;
atomic64_t rx_nonoctet;
@@ -651,7 +652,6 @@ struct gfar_extra_stats {
atomic64_t eberr;
atomic64_t tx_babt;
atomic64_t tx_underrun;
- atomic64_t rx_skbmissing;
atomic64_t tx_timeout;
};
@@ -1012,34 +1012,42 @@ struct rx_q_stats {
unsigned long rx_dropped;
};
+struct gfar_rx_buff {
+ dma_addr_t dma;
+ struct page *page;
+ unsigned int page_offset;
+};
+
/**
* struct gfar_priv_rx_q - per rx queue structure
- * @rx_skbuff: skb pointers
- * @skb_currx: currently use skb pointer
+ * @rx_buff: Array of buffer info metadata structs
* @rx_bd_base: First rx buffer descriptor
- * @cur_rx: Next free rx ring entry
+ * @next_to_use: index of the next buffer to be alloc'd
+ * @next_to_clean: index of the next buffer to be cleaned
* @qindex: index of this queue
- * @dev: back pointer to the dev structure
+ * @ndev: back pointer to net_device
* @rx_ring_size: Rx ring size
* @rxcoalescing: enable/disable rx-coalescing
* @rxic: receive interrupt coalescing vlaue
*/
struct gfar_priv_rx_q {
- struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
- dma_addr_t rx_bd_dma_base;
+ struct gfar_rx_buff *rx_buff __aligned(SMP_CACHE_BYTES);
struct rxbd8 *rx_bd_base;
- struct rxbd8 *cur_rx;
- struct net_device *dev;
- struct gfar_priv_grp *grp;
+ struct net_device *ndev;
+ struct device *dev;
+ u16 rx_ring_size;
+ u16 qindex;
+ struct gfar_priv_grp *grp;
+ u16 next_to_clean;
+ u16 next_to_use;
+ u16 next_to_alloc;
+ struct sk_buff *skb;
struct rx_q_stats stats;
- u16 skb_currx;
- u16 qindex;
- unsigned int rx_ring_size;
- /* RX Coalescing values */
+ u32 __iomem *rfbptr;
unsigned char rxcoalescing;
unsigned long rxic;
- u32 __iomem *rfbptr;
+ dma_addr_t rx_bd_dma_base;
};
enum gfar_irqinfo_id {
@@ -1109,7 +1117,6 @@ struct gfar_private {
struct device *dev;
struct net_device *ndev;
enum gfar_errata errata;
- unsigned int rx_buffer_size;
u16 uses_rxfcb;
u16 padding;
@@ -1145,9 +1152,6 @@ struct gfar_private {
int oldduplex;
int oldlink;
- /* Bitfield update lock */
- spinlock_t bflock;
-
uint32_t msg_enable;
struct work_struct reset_task;
@@ -1295,6 +1299,28 @@ static inline void gfar_clear_txbd_status(struct txbd8 *bdp)
bdp->lstatus = cpu_to_be32(lstatus);
}
+static inline int gfar_rxbd_unused(struct gfar_priv_rx_q *rxq)
+{
+ if (rxq->next_to_clean > rxq->next_to_use)
+ return rxq->next_to_clean - rxq->next_to_use - 1;
+
+ return rxq->rx_ring_size + rxq->next_to_clean - rxq->next_to_use - 1;
+}
+
+static inline u32 gfar_rxbd_dma_lastfree(struct gfar_priv_rx_q *rxq)
+{
+ struct rxbd8 *bdp;
+ u32 bdp_dma;
+ int i;
+
+ i = rxq->next_to_use ? rxq->next_to_use - 1 : rxq->rx_ring_size - 1;
+ bdp = &rxq->rx_bd_base[i];
+ bdp_dma = lower_32_bits(rxq->rx_bd_dma_base);
+ bdp_dma += (uintptr_t)bdp - (uintptr_t)rxq->rx_bd_base;
+
+ return bdp_dma;
+}
+
irqreturn_t gfar_receive(int irq, void *dev_id);
int startup_gfar(struct net_device *dev);
void stop_gfar(struct net_device *dev);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index fda12fb32ec7..6bdc89179b72 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -61,6 +61,8 @@ static void gfar_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo);
static const char stat_gstrings[][ETH_GSTRING_LEN] = {
+ /* extra stats */
+ "rx-allocation-errors",
"rx-large-frame-errors",
"rx-short-frame-errors",
"rx-non-octet-errors",
@@ -72,8 +74,8 @@ static const char stat_gstrings[][ETH_GSTRING_LEN] = {
"ethernet-bus-error",
"tx-babbling-errors",
"tx-underrun-errors",
- "rx-skb-missing-errors",
"tx-timeout-errors",
+ /* rmon stats */
"tx-rx-64-frames",
"tx-rx-65-127-frames",
"tx-rx-128-255-frames",
@@ -653,7 +655,6 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct gfar_private *priv = netdev_priv(dev);
- unsigned long flags;
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
wol->wolopts != 0)
@@ -664,9 +665,7 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
- spin_lock_irqsave(&priv->bflock, flags);
- priv->wol_en = !!device_may_wakeup(&dev->dev);
- spin_unlock_irqrestore(&priv->bflock, flags);
+ priv->wol_en = !!device_may_wakeup(&dev->dev);
return 0;
}
@@ -903,27 +902,6 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
return 0;
}
-static int gfar_comp_asc(const void *a, const void *b)
-{
- return memcmp(a, b, 4);
-}
-
-static int gfar_comp_desc(const void *a, const void *b)
-{
- return -memcmp(a, b, 4);
-}
-
-static void gfar_swap(void *a, void *b, int size)
-{
- u32 *_a = a;
- u32 *_b = b;
-
- swap(_a[0], _b[0]);
- swap(_a[1], _b[1]);
- swap(_a[2], _b[2]);
- swap(_a[3], _b[3]);
-}
-
/* Write a mask to filer cache */
static void gfar_set_mask(u32 mask, struct filer_table *tab)
{
@@ -1273,310 +1251,6 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
return 0;
}
-/* Copy size filer entries */
-static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
- struct gfar_filer_entry src[0], s32 size)
-{
- while (size > 0) {
- size--;
- dst[size].ctrl = src[size].ctrl;
- dst[size].prop = src[size].prop;
- }
-}
-
-/* Delete the contents of the filer-table between start and end
- * and collapse them
- */
-static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
-{
- int length;
-
- if (end > MAX_FILER_CACHE_IDX || end < begin)
- return -EINVAL;
-
- end++;
- length = end - begin;
-
- /* Copy */
- while (end < tab->index) {
- tab->fe[begin].ctrl = tab->fe[end].ctrl;
- tab->fe[begin++].prop = tab->fe[end++].prop;
-
- }
- /* Fill up with don't cares */
- while (begin < tab->index) {
- tab->fe[begin].ctrl = 0x60;
- tab->fe[begin].prop = 0xFFFFFFFF;
- begin++;
- }
-
- tab->index -= length;
- return 0;
-}
-
-/* Make space on the wanted location */
-static int gfar_expand_filer_entries(u32 begin, u32 length,
- struct filer_table *tab)
-{
- if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
- begin > MAX_FILER_CACHE_IDX)
- return -EINVAL;
-
- gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
- tab->index - length + 1);
-
- tab->index += length;
- return 0;
-}
-
-static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
-{
- for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
- start++) {
- if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
- (RQFCR_AND | RQFCR_CLE))
- return start;
- }
- return -1;
-}
-
-static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
-{
- for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
- start++) {
- if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
- (RQFCR_CLE))
- return start;
- }
- return -1;
-}
-
-/* Uses hardwares clustering option to reduce
- * the number of filer table entries
- */
-static void gfar_cluster_filer(struct filer_table *tab)
-{
- s32 i = -1, j, iend, jend;
-
- while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
- j = i;
- while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
- /* The cluster entries self and the previous one
- * (a mask) must be identical!
- */
- if (tab->fe[i].ctrl != tab->fe[j].ctrl)
- break;
- if (tab->fe[i].prop != tab->fe[j].prop)
- break;
- if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
- break;
- if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
- break;
- iend = gfar_get_next_cluster_end(i, tab);
- jend = gfar_get_next_cluster_end(j, tab);
- if (jend == -1 || iend == -1)
- break;
-
- /* First we make some free space, where our cluster
- * element should be. Then we copy it there and finally
- * delete in from its old location.
- */
- if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
- -EINVAL)
- break;
-
- gfar_copy_filer_entries(&(tab->fe[iend + 1]),
- &(tab->fe[jend + 1]), jend - j);
-
- if (gfar_trim_filer_entries(jend - 1,
- jend + (jend - j),
- tab) == -EINVAL)
- return;
-
- /* Mask out cluster bit */
- tab->fe[iend].ctrl &= ~(RQFCR_CLE);
- }
- }
-}
-
-/* Swaps the masked bits of a1<>a2 and b1<>b2 */
-static void gfar_swap_bits(struct gfar_filer_entry *a1,
- struct gfar_filer_entry *a2,
- struct gfar_filer_entry *b1,
- struct gfar_filer_entry *b2, u32 mask)
-{
- u32 temp[4];
- temp[0] = a1->ctrl & mask;
- temp[1] = a2->ctrl & mask;
- temp[2] = b1->ctrl & mask;
- temp[3] = b2->ctrl & mask;
-
- a1->ctrl &= ~mask;
- a2->ctrl &= ~mask;
- b1->ctrl &= ~mask;
- b2->ctrl &= ~mask;
-
- a1->ctrl |= temp[1];
- a2->ctrl |= temp[0];
- b1->ctrl |= temp[3];
- b2->ctrl |= temp[2];
-}
-
-/* Generate a list consisting of masks values with their start and
- * end of validity and block as indicator for parts belonging
- * together (glued by ANDs) in mask_table
- */
-static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
- struct filer_table *tab)
-{
- u32 i, and_index = 0, block_index = 1;
-
- for (i = 0; i < tab->index; i++) {
-
- /* LSByte of control = 0 sets a mask */
- if (!(tab->fe[i].ctrl & 0xF)) {
- mask_table[and_index].mask = tab->fe[i].prop;
- mask_table[and_index].start = i;
- mask_table[and_index].block = block_index;
- if (and_index >= 1)
- mask_table[and_index - 1].end = i - 1;
- and_index++;
- }
- /* cluster starts and ends will be separated because they should
- * hold their position
- */
- if (tab->fe[i].ctrl & RQFCR_CLE)
- block_index++;
- /* A not set AND indicates the end of a depended block */
- if (!(tab->fe[i].ctrl & RQFCR_AND))
- block_index++;
- }
-
- mask_table[and_index - 1].end = i - 1;
-
- return and_index;
-}
-
-/* Sorts the entries of mask_table by the values of the masks.
- * Important: The 0xFF80 flags of the first and last entry of a
- * block must hold their position (which queue, CLusterEnable, ReJEct,
- * AND)
- */
-static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
- struct filer_table *temp_table, u32 and_index)
-{
- /* Pointer to compare function (_asc or _desc) */
- int (*gfar_comp)(const void *, const void *);
-
- u32 i, size = 0, start = 0, prev = 1;
- u32 old_first, old_last, new_first, new_last;
-
- gfar_comp = &gfar_comp_desc;
-
- for (i = 0; i < and_index; i++) {
- if (prev != mask_table[i].block) {
- old_first = mask_table[start].start + 1;
- old_last = mask_table[i - 1].end;
- sort(mask_table + start, size,
- sizeof(struct gfar_mask_entry),
- gfar_comp, &gfar_swap);
-
- /* Toggle order for every block. This makes the
- * thing more efficient!
- */
- if (gfar_comp == gfar_comp_desc)
- gfar_comp = &gfar_comp_asc;
- else
- gfar_comp = &gfar_comp_desc;
-
- new_first = mask_table[start].start + 1;
- new_last = mask_table[i - 1].end;
-
- gfar_swap_bits(&temp_table->fe[new_first],
- &temp_table->fe[old_first],
- &temp_table->fe[new_last],
- &temp_table->fe[old_last],
- RQFCR_QUEUE | RQFCR_CLE |
- RQFCR_RJE | RQFCR_AND);
-
- start = i;
- size = 0;
- }
- size++;
- prev = mask_table[i].block;
- }
-}
-
-/* Reduces the number of masks needed in the filer table to save entries
- * This is done by sorting the masks of a depended block. A depended block is
- * identified by gluing ANDs or CLE. The sorting order toggles after every
- * block. Of course entries in scope of a mask must change their location with
- * it.
- */
-static int gfar_optimize_filer_masks(struct filer_table *tab)
-{
- struct filer_table *temp_table;
- struct gfar_mask_entry *mask_table;
-
- u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
- s32 ret = 0;
-
- /* We need a copy of the filer table because
- * we want to change its order
- */
- temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
- if (temp_table == NULL)
- return -ENOMEM;
-
- mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
- sizeof(struct gfar_mask_entry), GFP_KERNEL);
-
- if (mask_table == NULL) {
- ret = -ENOMEM;
- goto end;
- }
-
- and_index = gfar_generate_mask_table(mask_table, tab);
-
- gfar_sort_mask_table(mask_table, temp_table, and_index);
-
- /* Now we can copy the data from our duplicated filer table to
- * the real one in the order the mask table says
- */
- for (i = 0; i < and_index; i++) {
- size = mask_table[i].end - mask_table[i].start + 1;
- gfar_copy_filer_entries(&(tab->fe[j]),
- &(temp_table->fe[mask_table[i].start]), size);
- j += size;
- }
-
- /* And finally we just have to check for duplicated masks and drop the
- * second ones
- */
- for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
- if (tab->fe[i].ctrl == 0x80) {
- previous_mask = i++;
- break;
- }
- }
- for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
- if (tab->fe[i].ctrl == 0x80) {
- if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
- /* Two identical ones found!
- * So drop the second one!
- */
- gfar_trim_filer_entries(i, i, tab);
- } else
- /* Not identical! */
- previous_mask = i;
- }
- }
-
- kfree(mask_table);
-end: kfree(temp_table);
- return ret;
-}
-
/* Write the bit-pattern from software's buffer to hardware registers */
static int gfar_write_filer_table(struct gfar_private *priv,
struct filer_table *tab)
@@ -1586,11 +1260,10 @@ static int gfar_write_filer_table(struct gfar_private *priv,
return -EBUSY;
/* Fill regular entries */
- for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
- i++)
+ for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
/* Fill the rest with fall-troughs */
- for (; i < MAX_FILER_IDX - 1; i++)
+ for (; i < MAX_FILER_IDX; i++)
gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
/* Last entry must be default accept
* because that's what people expect
@@ -1624,7 +1297,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
{
struct ethtool_flow_spec_container *j;
struct filer_table *tab;
- s32 i = 0;
s32 ret = 0;
/* So index is set to zero, too! */
@@ -1649,17 +1321,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
}
}
- i = tab->index;
-
- /* Optimizations to save entries */
- gfar_cluster_filer(tab);
- gfar_optimize_filer_masks(tab);
-
- pr_debug("\tSummary:\n"
- "\tData on hardware: %d\n"
- "\tCompression rate: %d%%\n",
- tab->index, 100 - (100 * tab->index) / i);
-
/* Write everything to hardware */
ret = gfar_write_filer_table(priv, tab);
if (ret == -EBUSY) {
@@ -1725,13 +1386,14 @@ static int gfar_add_cls(struct gfar_private *priv,
}
process:
+ priv->rx_list.count++;
ret = gfar_process_filer_changes(priv);
if (ret)
goto clean_list;
- priv->rx_list.count++;
return ret;
clean_list:
+ priv->rx_list.count--;
list_del(&temp->list);
clean_mem:
kfree(temp);
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index d49bee38cd31..cc2d8b4b18e3 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -965,7 +965,6 @@ static struct platform_driver hip04_mac_driver = {
.remove = hip04_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = hip04_mac_match,
},
};
diff --git a/drivers/net/ethernet/hisilicon/hip04_mdio.c b/drivers/net/ethernet/hisilicon/hip04_mdio.c
index b3bac25db99c..fca0a5be1f0f 100644
--- a/drivers/net/ethernet/hisilicon/hip04_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hip04_mdio.c
@@ -174,7 +174,6 @@ static struct platform_driver hip04_mdio_driver = {
.remove = hip04_mdio_remove,
.driver = {
.name = "hip04-mdio",
- .owner = THIS_MODULE,
.of_match_table = hip04_mdio_match,
},
};
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 29bbb628d712..7af870a3c549 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -79,6 +79,11 @@ static unsigned int rx_flush __read_mostly = 0;
module_param(rx_flush, uint, 0644);
MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
+static bool old_large_send __read_mostly;
+module_param(old_large_send, bool, S_IRUGO);
+MODULE_PARM_DESC(old_large_send,
+ "Use old large send method on firmware that supports the new method");
+
struct ibmveth_stat {
char name[ETH_GSTRING_LEN];
int offset;
@@ -101,7 +106,8 @@ struct ibmveth_stat ibmveth_stats[] = {
{ "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
{ "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
{ "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
- { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) }
+ { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) },
+ { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) }
};
/* simple methods of getting data from the current rxq entry */
@@ -848,25 +854,91 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
return rc1 ? rc1 : rc2;
}
+static int ibmveth_set_tso(struct net_device *dev, u32 data)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+ unsigned long set_attr, clr_attr, ret_attr;
+ long ret1, ret2;
+ int rc1 = 0, rc2 = 0;
+ int restart = 0;
+
+ if (netif_running(dev)) {
+ restart = 1;
+ adapter->pool_config = 1;
+ ibmveth_close(dev);
+ adapter->pool_config = 0;
+ }
+
+ set_attr = 0;
+ clr_attr = 0;
+
+ if (data)
+ set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
+ else
+ clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
+
+ ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
+
+ if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
+ !old_large_send) {
+ ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
+ set_attr, &ret_attr);
+
+ if (ret2 != H_SUCCESS) {
+ netdev_err(dev, "unable to change tso settings. %d rc=%ld\n",
+ data, ret2);
+
+ h_illan_attributes(adapter->vdev->unit_address,
+ set_attr, clr_attr, &ret_attr);
+
+ if (data == 1)
+ dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ rc1 = -EIO;
+
+ } else {
+ adapter->fw_large_send_support = data;
+ adapter->large_send = data;
+ }
+ } else {
+ /* Older firmware version of large send offload does not
+ * support tcp6/ipv6
+ */
+ if (data == 1) {
+ dev->features &= ~NETIF_F_TSO6;
+ netdev_info(dev, "TSO feature requires all partitions to have updated driver");
+ }
+ adapter->large_send = data;
+ }
+
+ if (restart)
+ rc2 = ibmveth_open(dev);
+
+ return rc1 ? rc1 : rc2;
+}
+
static int ibmveth_set_features(struct net_device *dev,
netdev_features_t features)
{
struct ibmveth_adapter *adapter = netdev_priv(dev);
int rx_csum = !!(features & NETIF_F_RXCSUM);
- int rc;
- netdev_features_t changed = features ^ dev->features;
-
- if (features & NETIF_F_TSO & changed)
- netdev_info(dev, "TSO feature requires all partitions to have updated driver");
+ int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6));
+ int rc1 = 0, rc2 = 0;
- if (rx_csum == adapter->rx_csum)
- return 0;
+ if (rx_csum != adapter->rx_csum) {
+ rc1 = ibmveth_set_csum_offload(dev, rx_csum);
+ if (rc1 && !adapter->rx_csum)
+ dev->features =
+ features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+ }
- rc = ibmveth_set_csum_offload(dev, rx_csum);
- if (rc && !adapter->rx_csum)
- dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+ if (large_send != adapter->large_send) {
+ rc2 = ibmveth_set_tso(dev, large_send);
+ if (rc2 && !adapter->large_send)
+ dev->features =
+ features & ~(NETIF_F_TSO | NETIF_F_TSO6);
+ }
- return rc;
+ return rc1 ? rc1 : rc2;
}
static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -917,7 +989,7 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
static int ibmveth_send(struct ibmveth_adapter *adapter,
- union ibmveth_buf_desc *descs)
+ union ibmveth_buf_desc *descs, unsigned long mss)
{
unsigned long correlator;
unsigned int retry_count;
@@ -934,7 +1006,8 @@ static int ibmveth_send(struct ibmveth_adapter *adapter,
descs[0].desc, descs[1].desc,
descs[2].desc, descs[3].desc,
descs[4].desc, descs[5].desc,
- correlator, &correlator);
+ correlator, &correlator, mss,
+ adapter->fw_large_send_support);
} while ((ret == H_BUSY) && (retry_count--));
if (ret != H_SUCCESS && ret != H_DROPPED) {
@@ -955,6 +1028,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
int last, i;
int force_bounce = 0;
dma_addr_t dma_addr;
+ unsigned long mss = 0;
/*
* veth handles a maximum of 6 segments including the header, so
@@ -980,6 +1054,9 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
desc_flags = IBMVETH_BUF_VALID;
+ if (skb_is_gso(skb) && adapter->fw_large_send_support)
+ desc_flags |= IBMVETH_BUF_LRG_SND;
+
if (skb->ip_summed == CHECKSUM_PARTIAL) {
unsigned char *buf = skb_transport_header(skb) +
skb->csum_offset;
@@ -1007,7 +1084,7 @@ retry_bounce:
descs[0].fields.flags_len = desc_flags | skb->len;
descs[0].fields.address = adapter->bounce_buffer_dma;
- if (ibmveth_send(adapter, descs)) {
+ if (ibmveth_send(adapter, descs, 0)) {
adapter->tx_send_failed++;
netdev->stats.tx_dropped++;
} else {
@@ -1041,16 +1118,23 @@ retry_bounce:
descs[i+1].fields.address = dma_addr;
}
- if (skb_is_gso(skb) && !skb_is_gso_v6(skb)) {
- /* Put -1 in the IP checksum to tell phyp it
- * is a largesend packet and put the mss in the TCP checksum.
- */
- ip_hdr(skb)->check = 0xffff;
- tcp_hdr(skb)->check = cpu_to_be16(skb_shinfo(skb)->gso_size);
- adapter->tx_large_packets++;
+ if (skb_is_gso(skb)) {
+ if (adapter->fw_large_send_support) {
+ mss = (unsigned long)skb_shinfo(skb)->gso_size;
+ adapter->tx_large_packets++;
+ } else if (!skb_is_gso_v6(skb)) {
+ /* Put -1 in the IP checksum to tell phyp it
+ * is a largesend packet. Put the mss in
+ * the TCP checksum.
+ */
+ ip_hdr(skb)->check = 0xffff;
+ tcp_hdr(skb)->check =
+ cpu_to_be16(skb_shinfo(skb)->gso_size);
+ adapter->tx_large_packets++;
+ }
}
- if (ibmveth_send(adapter, descs)) {
+ if (ibmveth_send(adapter, descs, mss)) {
adapter->tx_send_failed++;
netdev->stats.tx_dropped++;
} else {
@@ -1401,6 +1485,8 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
struct ibmveth_adapter *adapter;
unsigned char *mac_addr_p;
unsigned int *mcastFilterSize_p;
+ long ret;
+ unsigned long ret_attr;
dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
dev->unit_address);
@@ -1449,10 +1535,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
SET_NETDEV_DEV(netdev, &dev->dev);
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
netdev->features |= netdev->hw_features;
- /* TSO is disabled by default */
- netdev->hw_features |= NETIF_F_TSO;
+ ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
+
+ /* If running older firmware, TSO should not be enabled by default */
+ if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
+ !old_large_send) {
+ netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ netdev->features |= netdev->hw_features;
+ } else {
+ netdev->hw_features |= NETIF_F_TSO;
+ }
memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 41dedb1fb2ae..4eade67fe30c 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -40,6 +40,8 @@
#define IbmVethMcastRemoveFilter 0x2UL
#define IbmVethMcastClearFilterTable 0x3UL
+#define IBMVETH_ILLAN_LRG_SR_ENABLED 0x0000000000010000UL
+#define IBMVETH_ILLAN_LRG_SND_SUPPORT 0x0000000000008000UL
#define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000UL
#define IBMVETH_ILLAN_TRUNK_PRI_MASK 0x0000000000000F00UL
#define IBMVETH_ILLAN_IPV6_TCP_CSUM 0x0000000000000004UL
@@ -59,13 +61,20 @@
static inline long h_send_logical_lan(unsigned long unit_address,
unsigned long desc1, unsigned long desc2, unsigned long desc3,
unsigned long desc4, unsigned long desc5, unsigned long desc6,
- unsigned long corellator_in, unsigned long *corellator_out)
+ unsigned long corellator_in, unsigned long *corellator_out,
+ unsigned long mss, unsigned long large_send_support)
{
long rc;
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
- rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, desc1,
- desc2, desc3, desc4, desc5, desc6, corellator_in);
+ if (large_send_support)
+ rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
+ desc1, desc2, desc3, desc4, desc5, desc6,
+ corellator_in, mss);
+ else
+ rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
+ desc1, desc2, desc3, desc4, desc5, desc6,
+ corellator_in);
*corellator_out = retbuf[0];
@@ -147,11 +156,13 @@ struct ibmveth_adapter {
struct ibmveth_rx_q rx_queue;
int pool_config;
int rx_csum;
+ int large_send;
void *bounce_buffer;
dma_addr_t bounce_buffer_dma;
u64 fw_ipv6_csum_support;
u64 fw_ipv4_csum_support;
+ u64 fw_large_send_support;
/* adapter specific stats */
u64 replenish_task_cycles;
u64 replenish_no_mem;
@@ -182,6 +193,7 @@ struct ibmveth_buf_desc_fields {
#endif
#define IBMVETH_BUF_VALID 0x80000000
#define IBMVETH_BUF_TOGGLE 0x40000000
+#define IBMVETH_BUF_LRG_SND 0x04000000
#define IBMVETH_BUF_NO_CSUM 0x02000000
#define IBMVETH_BUF_CSUM_GOOD 0x01000000
#define IBMVETH_BUF_LEN_MASK 0x00FFFFFF
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index d2657a412768..068789e694c9 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1770,8 +1770,11 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
dma_addr = pci_map_single(nic->pdev,
skb->data, skb->len, PCI_DMA_TODEVICE);
/* If we can't map the skb, have the upper layer try later */
- if (pci_dma_mapping_error(nic->pdev, dma_addr))
+ if (pci_dma_mapping_error(nic->pdev, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ skb = NULL;
return -ENOMEM;
+ }
/*
* Use the last 4 bytes of the SKB payload packet as the CRC, used for
@@ -2967,6 +2970,11 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
nic->params.cbs.max * sizeof(struct cb),
sizeof(u32),
0);
+ if (!nic->cbs_pool) {
+ netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n");
+ err = -ENOMEM;
+ goto err_out_pool;
+ }
netif_info(nic, probe, nic->netdev,
"addr 0x%llx, irq %d, MAC addr %pM\n",
(unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
@@ -2974,6 +2982,8 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
+err_out_pool:
+ unregister_netdev(netdev);
err_out_free:
e100_free(nic);
err_out_iounmap:
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 26459853c6be..34c551e322eb 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -106,14 +106,14 @@
#define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000
/* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */
-#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000
+#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000
#define K1_ENTRY_LATENCY 0
#define K1_MIN_TIME 1
#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */
#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */
#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */
-
+#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29)
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 89d788d8f263..faf4b3f3d0b5 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -48,7 +48,7 @@
#define DRV_EXTRAVERSION "-k"
-#define DRV_VERSION "3.2.5" DRV_EXTRAVERSION
+#define DRV_VERSION "3.2.6" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -1737,12 +1737,6 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
adapter->flags2 &= ~FLAG2_IS_DISCARDING;
-
- writel(0, rx_ring->head);
- if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
- e1000e_update_rdt_wa(rx_ring, 0);
- else
- writel(0, rx_ring->tail);
}
static void e1000e_downshift_workaround(struct work_struct *work)
@@ -2447,12 +2441,6 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
-
- writel(0, tx_ring->head);
- if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
- e1000e_update_tdt_wa(tx_ring, 0);
- else
- writel(0, tx_ring->tail);
}
/**
@@ -2954,6 +2942,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
+ writel(0, tx_ring->head);
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_tdt_wa(tx_ring, 0);
+ else
+ writel(0, tx_ring->tail);
+
/* Set the Tx Interrupt Delay register */
ew32(TIDV, adapter->tx_int_delay);
/* Tx irq moderation */
@@ -3275,6 +3269,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
+ writel(0, rx_ring->head);
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_rdt_wa(rx_ring, 0);
+ else
+ writel(0, rx_ring->tail);
+
/* Enable Receive Checksum Offload for TCP and UDP */
rxcsum = er32(RXCSUM);
if (adapter->netdev->features & NETIF_F_RXCSUM)
@@ -4280,18 +4280,29 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
cc);
struct e1000_hw *hw = &adapter->hw;
+ u32 systimel_1, systimel_2, systimeh;
cycle_t systim, systim_next;
- /* SYSTIMH latching upon SYSTIML read does not work well. To fix that
- * we don't want to allow overflow of SYSTIML and a change to SYSTIMH
- * to occur between reads, so if we read a vale close to overflow, we
- * wait for overflow to occur and read both registers when its safe.
+ /* SYSTIMH latching upon SYSTIML read does not work well.
+ * This means that if SYSTIML overflows after we read it but before
+ * we read SYSTIMH, the value of SYSTIMH has been incremented and we
+ * will experience a huge non linear increment in the systime value
+ * to fix that we test for overflow and if true, we re-read systime.
*/
- u32 systim_overflow_latch_fix = 0x3FFFFFFF;
-
- do {
- systim = (cycle_t)er32(SYSTIML);
- } while (systim > systim_overflow_latch_fix);
- systim |= (cycle_t)er32(SYSTIMH) << 32;
+ systimel_1 = er32(SYSTIML);
+ systimeh = er32(SYSTIMH);
+ systimel_2 = er32(SYSTIML);
+ /* Check for overflow. If there was no overflow, use the values */
+ if (systimel_1 < systimel_2) {
+ systim = (cycle_t)systimel_1;
+ systim |= (cycle_t)systimeh << 32;
+ } else {
+ /* There was an overflow, read again SYSTIMH, and use
+ * systimel_2
+ */
+ systimeh = er32(SYSTIMH);
+ systim = (cycle_t)systimel_2;
+ systim |= (cycle_t)systimeh << 32;
+ }
if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
u64 incvalue, time_delta, rem, temp;
@@ -4588,6 +4599,7 @@ static int e1000_open(struct net_device *netdev)
return 0;
err_req_irq:
+ pm_qos_remove_request(&adapter->pm_qos_req);
e1000e_release_hw_control(adapter);
e1000_power_down_phy(adapter);
e1000e_free_rx_resources(adapter->rx_ring);
@@ -6316,6 +6328,33 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
return retval;
}
+ /* Ensure that the appropriate bits are set in LPI_CTRL
+ * for EEE in Sx
+ */
+ if ((hw->phy.type >= e1000_phy_i217) &&
+ adapter->eee_advert && hw->dev_spec.ich8lan.eee_lp_ability) {
+ u16 lpi_ctrl = 0;
+
+ retval = hw->phy.ops.acquire(hw);
+ if (!retval) {
+ retval = e1e_rphy_locked(hw, I82579_LPI_CTRL,
+ &lpi_ctrl);
+ if (!retval) {
+ if (adapter->eee_advert &
+ hw->dev_spec.ich8lan.eee_lp_ability &
+ I82579_EEE_100_SUPPORTED)
+ lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
+ if (adapter->eee_advert &
+ hw->dev_spec.ich8lan.eee_lp_ability &
+ I82579_EEE_1000_SUPPORTED)
+ lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
+
+ retval = e1e_wphy_locked(hw, I82579_LPI_CTRL,
+ lpi_ctrl);
+ }
+ }
+ hw->phy.ops.release(hw);
+ }
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
@@ -6465,7 +6504,7 @@ static int __e1000_resume(struct pci_dev *pdev)
if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
aspm_disable_flag |= PCIE_LINK_STATE_L1;
if (aspm_disable_flag)
- e1000e_disable_aspm_locked(pdev, aspm_disable_flag);
+ e1000e_disable_aspm(pdev, aspm_disable_flag);
pci_set_master(pdev);
@@ -6743,7 +6782,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
aspm_disable_flag |= PCIE_LINK_STATE_L1;
if (aspm_disable_flag)
- e1000e_disable_aspm(pdev, aspm_disable_flag);
+ e1000e_disable_aspm_locked(pdev, aspm_disable_flag);
err = pci_enable_device_mem(pdev);
if (err) {
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index b24e5fee17f2..1d5e0b77062a 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -38,8 +38,8 @@
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
-#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
-#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */
+#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
+#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */
#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
@@ -125,7 +125,6 @@
(0x054E4 + ((_i - 16) * 8)))
#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
-#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29)
#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 982fdcdc795b..b5b2925103ec 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
static inline bool fm10k_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index ec76c3fa3a04..e7462793d48d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -79,10 +79,13 @@
#define I40E_MIN_MSIX 2
#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
-#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */
+/* max 16 qps */
+#define i40e_default_queues_per_vmdq(pf) \
+ (((pf)->flags & I40E_FLAG_RSS_AQ_CAPABLE) ? 4 : 1)
#define I40E_DEFAULT_QUEUES_PER_VF 4
#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
-#define I40E_MAX_QUEUES_PER_TC 64 /* should be a power of 2 */
+#define i40e_pf_get_max_q_per_tc(pf) \
+ (((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64)
#define I40E_FDIR_RING 0
#define I40E_FDIR_RING_COUNT 32
#ifdef I40E_FCOE
@@ -98,7 +101,7 @@
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9)
/* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_NPAR_FLAG (1 << 0)
+#define I40E_PRIV_FLAGS_NPAR_FLAG BIT(0)
#define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
@@ -289,35 +292,42 @@ struct i40e_pf {
struct work_struct service_task;
u64 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED (u64)(1 << 1)
-#define I40E_FLAG_MSI_ENABLED (u64)(1 << 2)
-#define I40E_FLAG_MSIX_ENABLED (u64)(1 << 3)
-#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4)
-#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5)
-#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6)
-#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7)
-#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8)
-#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9)
+#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1)
+#define I40E_FLAG_MSI_ENABLED BIT_ULL(2)
+#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3)
+#define I40E_FLAG_RX_1BUF_ENABLED BIT_ULL(4)
+#define I40E_FLAG_RX_PS_ENABLED BIT_ULL(5)
+#define I40E_FLAG_RSS_ENABLED BIT_ULL(6)
+#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8)
+#define I40E_FLAG_NEED_LINK_UPDATE BIT_ULL(9)
+#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10)
#ifdef I40E_FCOE
-#define I40E_FLAG_FCOE_ENABLED (u64)(1 << 11)
+#define I40E_FLAG_FCOE_ENABLED BIT_ULL(11)
#endif /* I40E_FCOE */
-#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12)
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13)
-#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14)
-#define I40E_FLAG_FILTER_SYNC (u64)(1 << 15)
-#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 17)
-#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 18)
-#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 19)
-#define I40E_FLAG_DCB_ENABLED (u64)(1 << 20)
-#define I40E_FLAG_FD_SB_ENABLED (u64)(1 << 21)
-#define I40E_FLAG_FD_ATR_ENABLED (u64)(1 << 22)
-#define I40E_FLAG_PTP (u64)(1 << 25)
-#define I40E_FLAG_MFP_ENABLED (u64)(1 << 26)
+#define I40E_FLAG_IN_NETPOLL BIT_ULL(12)
+#define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13)
+#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14)
+#define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
+#define I40E_FLAG_PROCESS_MDD_EVENT BIT_ULL(17)
+#define I40E_FLAG_PROCESS_VFLR_EVENT BIT_ULL(18)
+#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19)
+#define I40E_FLAG_DCB_ENABLED BIT_ULL(20)
+#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(21)
+#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22)
+#define I40E_FLAG_PTP BIT_ULL(25)
+#define I40E_FLAG_MFP_ENABLED BIT_ULL(26)
#ifdef CONFIG_I40E_VXLAN
-#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
+#define I40E_FLAG_VXLAN_FILTER_SYNC BIT_ULL(27)
#endif
-#define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28)
-#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29)
+#define I40E_FLAG_PORT_ID_VALID BIT_ULL(28)
+#define I40E_FLAG_DCB_CAPABLE BIT_ULL(29)
+#define I40E_FLAG_RSS_AQ_CAPABLE BIT_ULL(31)
+#define I40E_FLAG_HW_ATR_EVICT_CAPABLE BIT_ULL(32)
+#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE BIT_ULL(33)
+#define I40E_FLAG_128_QP_RSS_CAPABLE BIT_ULL(34)
+#define I40E_FLAG_WB_ON_ITR_CAPABLE BIT_ULL(35)
+#define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(38)
#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40)
/* tracks features that get auto disabled by errors */
@@ -362,6 +372,7 @@ struct i40e_pf {
#ifdef CONFIG_DEBUG_FS
struct dentry *i40e_dbg_pf;
#endif /* CONFIG_DEBUG_FS */
+ bool cur_promisc;
u16 instance; /* A unique number per i40e_pf instance in the system */
@@ -432,6 +443,8 @@ struct i40e_veb {
bool stat_offsets_loaded;
struct i40e_eth_stats stats;
struct i40e_eth_stats stats_offsets;
+ struct i40e_veb_tc_stats tc_stats;
+ struct i40e_veb_tc_stats tc_stats_offsets;
};
/* struct that defines a VSI, associated with a dev */
@@ -443,8 +456,8 @@ struct i40e_vsi {
u32 current_netdev_flags;
unsigned long state;
-#define I40E_VSI_FLAG_FILTER_CHANGED (1<<0)
-#define I40E_VSI_FLAG_VEB_OWNER (1<<1)
+#define I40E_VSI_FLAG_FILTER_CHANGED BIT(0)
+#define I40E_VSI_FLAG_VEB_OWNER BIT(1)
unsigned long flags;
struct list_head mac_filter_list;
@@ -550,6 +563,7 @@ struct i40e_q_vector {
cpumask_t affinity_mask;
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN];
+ bool arm_wb_state;
} ____cacheline_internodealigned_in_smp;
/* lan device */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 929e3d72a01e..95d23bfbcbf1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -34,7 +34,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0002
+#define I40E_FW_API_VERSION_MINOR 0x0004
struct i40e_aq_desc {
__le16 flags;
@@ -132,12 +132,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
- i40e_aqc_opc_set_cppm_configuration = 0x0103,
- i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
- i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
-
/* LAA */
- i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -262,7 +257,10 @@ enum i40e_admin_queue_opc {
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
i40e_aqc_opc_del_udp_tunnel = 0x0B01,
- i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+ i40e_aqc_opc_set_rss_key = 0x0B02,
+ i40e_aqc_opc_set_rss_lut = 0x0B03,
+ i40e_aqc_opc_get_rss_key = 0x0B04,
+ i40e_aqc_opc_get_rss_lut = 0x0B05,
/* Async Events */
i40e_aqc_opc_event_lan_overflow = 0x1001,
@@ -274,8 +272,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
/* debug commands */
- i40e_aqc_opc_debug_get_deviceid = 0xFF00,
- i40e_aqc_opc_debug_set_mode = 0xFF01,
i40e_aqc_opc_debug_read_reg = 0xFF03,
i40e_aqc_opc_debug_write_reg = 0xFF04,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
@@ -509,7 +505,8 @@ struct i40e_aqc_mac_address_read {
#define I40E_AQC_SAN_ADDR_VALID 0x20
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
-#define I40E_AQC_ADDR_VALID_MASK 0xf0
+#define I40E_AQC_MC_MAG_EN_VALID 0x100
+#define I40E_AQC_ADDR_VALID_MASK 0x1F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
@@ -532,7 +529,9 @@ struct i40e_aqc_mac_address_write {
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
-#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000
+#define I40E_AQC_WRITE_TYPE_MASK 0xC000
+
__le16 mac_sah;
__le32 mac_sal;
u8 reserved[8];
@@ -826,8 +825,12 @@ struct i40e_aqc_vsi_properties_data {
I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
u8 queueing_opt_reserved[3];
/* scheduler section */
u8 up_enable_bits;
@@ -1068,6 +1071,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
__le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF
#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
u8 reserved[8];
};
@@ -2064,6 +2068,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8
#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
+#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
+#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xA
+#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10
+#define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
u8 reserved1;
u8 oper_num_tc;
@@ -2177,6 +2187,46 @@ struct i40e_aqc_del_udp_tunnel_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+
/* tunnel key structure 0x0B10 */
struct i40e_aqc_tunnel_key_structure {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 0bae22da014d..114dc6450183 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -54,6 +54,15 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_20G_KR2:
hw->mac.type = I40E_MAC_XL710;
break;
+ case I40E_DEV_ID_SFP_X722:
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ hw->mac.type = I40E_MAC_X722;
+ break;
+ case I40E_DEV_ID_X722_VF:
+ case I40E_DEV_ID_X722_VF_HV:
+ hw->mac.type = I40E_MAC_X722_VF;
+ break;
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
hw->mac.type = I40E_MAC_VF;
@@ -72,6 +81,212 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
}
/**
+ * i40e_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+ switch (aq_err) {
+ case I40E_AQ_RC_OK:
+ return "OK";
+ case I40E_AQ_RC_EPERM:
+ return "I40E_AQ_RC_EPERM";
+ case I40E_AQ_RC_ENOENT:
+ return "I40E_AQ_RC_ENOENT";
+ case I40E_AQ_RC_ESRCH:
+ return "I40E_AQ_RC_ESRCH";
+ case I40E_AQ_RC_EINTR:
+ return "I40E_AQ_RC_EINTR";
+ case I40E_AQ_RC_EIO:
+ return "I40E_AQ_RC_EIO";
+ case I40E_AQ_RC_ENXIO:
+ return "I40E_AQ_RC_ENXIO";
+ case I40E_AQ_RC_E2BIG:
+ return "I40E_AQ_RC_E2BIG";
+ case I40E_AQ_RC_EAGAIN:
+ return "I40E_AQ_RC_EAGAIN";
+ case I40E_AQ_RC_ENOMEM:
+ return "I40E_AQ_RC_ENOMEM";
+ case I40E_AQ_RC_EACCES:
+ return "I40E_AQ_RC_EACCES";
+ case I40E_AQ_RC_EFAULT:
+ return "I40E_AQ_RC_EFAULT";
+ case I40E_AQ_RC_EBUSY:
+ return "I40E_AQ_RC_EBUSY";
+ case I40E_AQ_RC_EEXIST:
+ return "I40E_AQ_RC_EEXIST";
+ case I40E_AQ_RC_EINVAL:
+ return "I40E_AQ_RC_EINVAL";
+ case I40E_AQ_RC_ENOTTY:
+ return "I40E_AQ_RC_ENOTTY";
+ case I40E_AQ_RC_ENOSPC:
+ return "I40E_AQ_RC_ENOSPC";
+ case I40E_AQ_RC_ENOSYS:
+ return "I40E_AQ_RC_ENOSYS";
+ case I40E_AQ_RC_ERANGE:
+ return "I40E_AQ_RC_ERANGE";
+ case I40E_AQ_RC_EFLUSHED:
+ return "I40E_AQ_RC_EFLUSHED";
+ case I40E_AQ_RC_BAD_ADDR:
+ return "I40E_AQ_RC_BAD_ADDR";
+ case I40E_AQ_RC_EMODE:
+ return "I40E_AQ_RC_EMODE";
+ case I40E_AQ_RC_EFBIG:
+ return "I40E_AQ_RC_EFBIG";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+ return hw->err_str;
+}
+
+/**
+ * i40e_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+{
+ switch (stat_err) {
+ case 0:
+ return "OK";
+ case I40E_ERR_NVM:
+ return "I40E_ERR_NVM";
+ case I40E_ERR_NVM_CHECKSUM:
+ return "I40E_ERR_NVM_CHECKSUM";
+ case I40E_ERR_PHY:
+ return "I40E_ERR_PHY";
+ case I40E_ERR_CONFIG:
+ return "I40E_ERR_CONFIG";
+ case I40E_ERR_PARAM:
+ return "I40E_ERR_PARAM";
+ case I40E_ERR_MAC_TYPE:
+ return "I40E_ERR_MAC_TYPE";
+ case I40E_ERR_UNKNOWN_PHY:
+ return "I40E_ERR_UNKNOWN_PHY";
+ case I40E_ERR_LINK_SETUP:
+ return "I40E_ERR_LINK_SETUP";
+ case I40E_ERR_ADAPTER_STOPPED:
+ return "I40E_ERR_ADAPTER_STOPPED";
+ case I40E_ERR_INVALID_MAC_ADDR:
+ return "I40E_ERR_INVALID_MAC_ADDR";
+ case I40E_ERR_DEVICE_NOT_SUPPORTED:
+ return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+ case I40E_ERR_MASTER_REQUESTS_PENDING:
+ return "I40E_ERR_MASTER_REQUESTS_PENDING";
+ case I40E_ERR_INVALID_LINK_SETTINGS:
+ return "I40E_ERR_INVALID_LINK_SETTINGS";
+ case I40E_ERR_AUTONEG_NOT_COMPLETE:
+ return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+ case I40E_ERR_RESET_FAILED:
+ return "I40E_ERR_RESET_FAILED";
+ case I40E_ERR_SWFW_SYNC:
+ return "I40E_ERR_SWFW_SYNC";
+ case I40E_ERR_NO_AVAILABLE_VSI:
+ return "I40E_ERR_NO_AVAILABLE_VSI";
+ case I40E_ERR_NO_MEMORY:
+ return "I40E_ERR_NO_MEMORY";
+ case I40E_ERR_BAD_PTR:
+ return "I40E_ERR_BAD_PTR";
+ case I40E_ERR_RING_FULL:
+ return "I40E_ERR_RING_FULL";
+ case I40E_ERR_INVALID_PD_ID:
+ return "I40E_ERR_INVALID_PD_ID";
+ case I40E_ERR_INVALID_QP_ID:
+ return "I40E_ERR_INVALID_QP_ID";
+ case I40E_ERR_INVALID_CQ_ID:
+ return "I40E_ERR_INVALID_CQ_ID";
+ case I40E_ERR_INVALID_CEQ_ID:
+ return "I40E_ERR_INVALID_CEQ_ID";
+ case I40E_ERR_INVALID_AEQ_ID:
+ return "I40E_ERR_INVALID_AEQ_ID";
+ case I40E_ERR_INVALID_SIZE:
+ return "I40E_ERR_INVALID_SIZE";
+ case I40E_ERR_INVALID_ARP_INDEX:
+ return "I40E_ERR_INVALID_ARP_INDEX";
+ case I40E_ERR_INVALID_FPM_FUNC_ID:
+ return "I40E_ERR_INVALID_FPM_FUNC_ID";
+ case I40E_ERR_QP_INVALID_MSG_SIZE:
+ return "I40E_ERR_QP_INVALID_MSG_SIZE";
+ case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+ return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+ case I40E_ERR_INVALID_FRAG_COUNT:
+ return "I40E_ERR_INVALID_FRAG_COUNT";
+ case I40E_ERR_QUEUE_EMPTY:
+ return "I40E_ERR_QUEUE_EMPTY";
+ case I40E_ERR_INVALID_ALIGNMENT:
+ return "I40E_ERR_INVALID_ALIGNMENT";
+ case I40E_ERR_FLUSHED_QUEUE:
+ return "I40E_ERR_FLUSHED_QUEUE";
+ case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+ return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+ case I40E_ERR_INVALID_IMM_DATA_SIZE:
+ return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+ case I40E_ERR_TIMEOUT:
+ return "I40E_ERR_TIMEOUT";
+ case I40E_ERR_OPCODE_MISMATCH:
+ return "I40E_ERR_OPCODE_MISMATCH";
+ case I40E_ERR_CQP_COMPL_ERROR:
+ return "I40E_ERR_CQP_COMPL_ERROR";
+ case I40E_ERR_INVALID_VF_ID:
+ return "I40E_ERR_INVALID_VF_ID";
+ case I40E_ERR_INVALID_HMCFN_ID:
+ return "I40E_ERR_INVALID_HMCFN_ID";
+ case I40E_ERR_BACKING_PAGE_ERROR:
+ return "I40E_ERR_BACKING_PAGE_ERROR";
+ case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+ return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+ case I40E_ERR_INVALID_PBLE_INDEX:
+ return "I40E_ERR_INVALID_PBLE_INDEX";
+ case I40E_ERR_INVALID_SD_INDEX:
+ return "I40E_ERR_INVALID_SD_INDEX";
+ case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+ return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+ case I40E_ERR_INVALID_SD_TYPE:
+ return "I40E_ERR_INVALID_SD_TYPE";
+ case I40E_ERR_MEMCPY_FAILED:
+ return "I40E_ERR_MEMCPY_FAILED";
+ case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+ return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+ case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+ return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+ case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+ return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+ case I40E_ERR_SRQ_ENABLED:
+ return "I40E_ERR_SRQ_ENABLED";
+ case I40E_ERR_ADMIN_QUEUE_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_ERROR";
+ case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+ return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+ case I40E_ERR_BUF_TOO_SHORT:
+ return "I40E_ERR_BUF_TOO_SHORT";
+ case I40E_ERR_ADMIN_QUEUE_FULL:
+ return "I40E_ERR_ADMIN_QUEUE_FULL";
+ case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+ return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+ case I40E_ERR_BAD_IWARP_CQE:
+ return "I40E_ERR_BAD_IWARP_CQE";
+ case I40E_ERR_NVM_BLANK_MODE:
+ return "I40E_ERR_NVM_BLANK_MODE";
+ case I40E_ERR_NOT_IMPLEMENTED:
+ return "I40E_ERR_NOT_IMPLEMENTED";
+ case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+ return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+ case I40E_ERR_DIAG_TEST_FAILED:
+ return "I40E_ERR_DIAG_TEST_FAILED";
+ case I40E_ERR_NOT_READY:
+ return "I40E_ERR_NOT_READY";
+ case I40E_NOT_SUPPORTED:
+ return "I40E_NOT_SUPPORTED";
+ case I40E_ERR_FIRMWARE_API_VERSION:
+ return "I40E_ERR_FIRMWARE_API_VERSION";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+ return hw->err_str;
+}
+
+/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
* @mask: debug mask
@@ -177,6 +392,169 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
return status;
}
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
+{
+ i40e_status status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_lut *cmd_resp =
+ (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_lut);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_lut);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ cpu_to_le16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+ if (pf_lut)
+ cmd_resp->flags |= cpu_to_le16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ else
+ cmd_resp->flags |= cpu_to_le16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+ cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut));
+ cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut));
+
+ status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ false);
+}
+
+/**
+ * i40e_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key,
+ bool set)
+{
+ i40e_status status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_key *cmd_resp =
+ (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_key);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_key);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ cpu_to_le16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+ cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key));
+ cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key));
+
+ status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * i40e_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
* hardware to a bit-field that can be used by SW to more easily determine the
* packet type.
@@ -563,6 +941,7 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
switch (hw->mac.type) {
case I40E_MAC_XL710:
+ case I40E_MAC_X722:
break;
default:
return I40E_ERR_DEVICE_NOT_SUPPORTED;
@@ -1187,9 +1566,9 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
blink = false;
if (blink)
- gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+ gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
else
- gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+ gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
break;
@@ -2391,7 +2770,7 @@ i40e_aq_erase_nvm_exit:
#define I40E_DEV_FUNC_CAP_MSIX_VF 0x44
#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45
#define I40E_DEV_FUNC_CAP_IEEE_1588 0x46
-#define I40E_DEV_FUNC_CAP_MFP_MODE_1 0xF1
+#define I40E_DEV_FUNC_CAP_FLEX10 0xF1
#define I40E_DEV_FUNC_CAP_CEM 0xF2
#define I40E_DEV_FUNC_CAP_IWARP 0x51
#define I40E_DEV_FUNC_CAP_LED 0x61
@@ -2416,6 +2795,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
u32 valid_functions, num_functions;
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
+ u8 major_rev;
u32 i = 0;
u16 id;
@@ -2433,6 +2813,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
number = le32_to_cpu(cap->number);
logical_id = le32_to_cpu(cap->logical_id);
phys_id = le32_to_cpu(cap->phys_id);
+ major_rev = cap->major_rev;
switch (id) {
case I40E_DEV_FUNC_CAP_SWITCH_MODE:
@@ -2507,9 +2888,21 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
case I40E_DEV_FUNC_CAP_MSIX_VF:
p->num_msix_vectors_vf = number;
break;
- case I40E_DEV_FUNC_CAP_MFP_MODE_1:
- if (number == 1)
- p->mfp_mode_1 = true;
+ case I40E_DEV_FUNC_CAP_FLEX10:
+ if (major_rev == 1) {
+ if (number == 1) {
+ p->flex10_enable = true;
+ p->flex10_capable = true;
+ }
+ } else {
+ /* Capability revision >= 2 */
+ if (number & 1)
+ p->flex10_enable = true;
+ if (number & 2)
+ p->flex10_capable = true;
+ }
+ p->flex10_mode = logical_id;
+ p->flex10_status = phys_id;
break;
case I40E_DEV_FUNC_CAP_CEM:
if (number == 1)
@@ -2557,7 +2950,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
/* Software override ensuring FCoE is disabled if npar or mfp
* mode because it is not supported in these modes.
*/
- if (p->npar_enable || p->mfp_mode_1)
+ if (p->npar_enable || p->flex10_enable)
p->fcoe = false;
/* count the enabled ports (aka the "not disabled" ports) */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 2547aa21b2ca..90de46aef557 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -588,6 +588,8 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
if (!ret) {
/* CEE mode */
hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ hw->local_dcbx_config.tlv_status =
+ le16_to_cpu(cee_v1_cfg.tlv_status);
i40e_cee_to_dcb_v1_config(&cee_v1_cfg,
&hw->local_dcbx_config);
}
@@ -597,6 +599,8 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
if (!ret) {
/* CEE mode */
hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ hw->local_dcbx_config.tlv_status =
+ le32_to_cpu(cee_cfg.tlv_status);
i40e_cee_to_dcb_config(&cee_cfg,
&hw->local_dcbx_config);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index e137e3fac8ee..50fc894a4cde 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -58,9 +58,9 @@
#define I40E_IEEE_ETS_MAXTC_SHIFT 0
#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
#define I40E_IEEE_ETS_CBS_SHIFT 6
-#define I40E_IEEE_ETS_CBS_MASK (0x1 << I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_CBS_MASK BIT(I40E_IEEE_ETS_CBS_SHIFT)
#define I40E_IEEE_ETS_WILLING_SHIFT 7
-#define I40E_IEEE_ETS_WILLING_MASK (0x1 << I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_WILLING_MASK BIT(I40E_IEEE_ETS_WILLING_SHIFT)
#define I40E_IEEE_ETS_PRIO_0_SHIFT 0
#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
#define I40E_IEEE_ETS_PRIO_1_SHIFT 4
@@ -79,9 +79,9 @@
#define I40E_IEEE_PFC_CAP_SHIFT 0
#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT)
#define I40E_IEEE_PFC_MBC_SHIFT 6
-#define I40E_IEEE_PFC_MBC_MASK (0x1 << I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_MBC_MASK BIT(I40E_IEEE_PFC_MBC_SHIFT)
#define I40E_IEEE_PFC_WILLING_SHIFT 7
-#define I40E_IEEE_PFC_WILLING_MASK (0x1 << I40E_IEEE_PFC_WILLING_SHIFT)
+#define I40E_IEEE_PFC_WILLING_MASK BIT(I40E_IEEE_PFC_WILLING_SHIFT)
/* Defines for IEEE APP TLV */
#define I40E_IEEE_APP_SEL_SHIFT 0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index bd5079d5c1b6..1c51f736a8d0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -187,7 +187,7 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
/* Set up all the App TLVs if DCBx is negotiated */
for (i = 0; i < dcbxcfg->numapps; i++) {
prio = dcbxcfg->app[i].priority;
- tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]);
+ tc_map = BIT(dcbxcfg->etscfg.prioritytable[prio]);
/* Add APP only if the TC is enabled for this VSI */
if (tc_map & vsi->tc_config.enabled_tc) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index da0faf478af0..d7c15d17faa6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -964,7 +964,7 @@ static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
pf->auto_disable_flags |= flag;
}
dev_info(&pf->pdev->dev, "requesting a PF reset\n");
- i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
}
#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
@@ -1471,19 +1471,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
- i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "corer", 5) == 0) {
dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
- i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "globr", 5) == 0) {
dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
- i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "empr", 4) == 0) {
dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n");
- i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, BIT(__I40E_EMP_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "read", 4) == 0) {
u32 address;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index 56438bd579e6..f141e78d409e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -144,11 +144,8 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
if (!ret_code &&
((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
- (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
- ret_code = i40e_validate_nvm_checksum(hw, NULL);
- } else {
- ret_code = I40E_ERR_DIAG_TEST_FAILED;
- }
-
- return ret_code;
+ BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
+ return i40e_validate_nvm_checksum(hw, NULL);
+ else
+ return I40E_ERR_DIAG_TEST_FAILED;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 9a68c65b17ea..e972b5ecbf0b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -114,7 +114,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
- I40E_PF_STAT("crc_errors", stats.crc_errors),
+ I40E_PF_STAT("rx_crc_errors", stats.crc_errors),
I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
@@ -148,7 +148,9 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
+ I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status),
I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
+ I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status),
/* LPI stats */
I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
@@ -195,7 +197,14 @@ static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
/ sizeof(u64))
+#define I40E_VEB_TC_STATS_LEN ( \
+ (FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_packets) + \
+ FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_bytes) + \
+ FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_packets) + \
+ FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_bytes)) \
+ / sizeof(u64))
#define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats)
+#define I40E_VEB_STATS_TOTAL (I40E_VEB_STATS_LEN + I40E_VEB_TC_STATS_LEN)
#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
I40E_PFC_STATS_LEN + \
I40E_VSI_STATS_LEN((n)))
@@ -679,15 +688,17 @@ static int i40e_set_settings(struct net_device *netdev,
/* make the aq call */
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status) {
- netdev_info(netdev, "Set phy config failed with error %d.\n",
- status);
+ netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
return -EAGAIN;
}
status = i40e_aq_get_link_info(hw, true, NULL, NULL);
if (status)
- netdev_info(netdev, "Updating link info failed with error %d\n",
- status);
+ netdev_info(netdev, "Updating link info failed with err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -707,8 +718,9 @@ static int i40e_nway_reset(struct net_device *netdev)
ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
if (ret) {
- netdev_info(netdev, "link restart failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
return -EIO;
}
@@ -820,18 +832,21 @@ static int i40e_set_pauseparam(struct net_device *netdev,
status = i40e_set_fc(hw, &aq_failures, link_up);
if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
- netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n",
- status, hw->aq.asq_last_status);
+ netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
- netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n",
- status, hw->aq.asq_last_status);
+ netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
- netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n",
- status, hw->aq.asq_last_status);
+ netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
@@ -1009,7 +1024,7 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
/* register returns value in power of 2, 64Kbyte chunks. */
- val = (64 * 1024) * (1 << val);
+ val = (64 * 1024) * BIT(val);
return val;
}
@@ -1249,7 +1264,7 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
int len = I40E_PF_STATS_LEN(netdev);
if (pf->lan_veb != I40E_NO_VEB)
- len += I40E_VEB_STATS_LEN;
+ len += I40E_VEB_STATS_TOTAL;
return len;
} else {
return I40E_VSI_STATS_LEN(netdev);
@@ -1400,6 +1415,20 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
i40e_gstrings_veb_stats[i].stat_string);
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "veb.tc_%u_tx_packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "veb.tc_%u_tx_bytes", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "veb.tc_%u_rx_packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "veb.tc_%u_rx_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
}
for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
snprintf(p, ETH_GSTRING_LEN, "port.%s",
@@ -1462,20 +1491,11 @@ static int i40e_get_ts_info(struct net_device *dev,
else
info->phc_index = -1;
- info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
-
- info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
}
@@ -1560,6 +1580,21 @@ static inline bool i40e_active_vfs(struct i40e_pf *pf)
return false;
}
+static inline bool i40e_active_vmdqs(struct i40e_pf *pf)
+{
+ struct i40e_vsi **vsi = pf->vsi;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (!vsi[i])
+ continue;
+ if (vsi[i]->type == I40E_VSI_VMDQ2)
+ return true;
+ }
+
+ return false;
+}
+
static void i40e_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
@@ -1573,9 +1608,9 @@ static void i40e_diag_test(struct net_device *netdev,
set_bit(__I40E_TESTING, &pf->state);
- if (i40e_active_vfs(pf)) {
+ if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
dev_warn(&pf->pdev->dev,
- "Please take active VFS offline and restart the adapter before running NIC diagnostics\n");
+ "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
data[I40E_ETH_TEST_REG] = 1;
data[I40E_ETH_TEST_EEPROM] = 1;
data[I40E_ETH_TEST_INTR] = 1;
@@ -1591,11 +1626,13 @@ static void i40e_diag_test(struct net_device *netdev,
/* indicate we're in test mode */
dev_close(netdev);
else
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ /* This reset does not affect link - if it is
+ * changed to a type of reset that does affect
+ * link then the following link test would have
+ * to be moved to before the reset
+ */
+ i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
- /* Link test performed before hardware reset
- * so autoneg doesn't interfere with test result
- */
if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1613,7 +1650,7 @@ static void i40e_diag_test(struct net_device *netdev,
eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__I40E_TESTING, &pf->state);
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
if (if_running)
dev_open(netdev);
@@ -1646,7 +1683,7 @@ static void i40e_get_wol(struct net_device *netdev,
/* NVM bit on means WoL disabled for the port */
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
- if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) {
+ if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) {
wol->supported = 0;
wol->wolopts = 0;
} else {
@@ -1679,7 +1716,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
/* NVM bit on means WoL disabled for the port */
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
- if (((1 << hw->port) & wol_nvm_bits))
+ if (BIT(hw->port) & wol_nvm_bits)
return -EOPNOTSUPP;
/* only magic packet is supported */
@@ -2025,10 +2062,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case TCP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
break;
default:
return -EINVAL;
@@ -2037,10 +2074,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case TCP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
break;
default:
return -EINVAL;
@@ -2049,12 +2086,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
default:
return -EINVAL;
@@ -2063,12 +2100,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
default:
return -EINVAL;
@@ -2081,7 +2118,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
break;
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -2090,15 +2127,15 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
break;
case IPV4_FLOW:
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
break;
case IPV6_FLOW:
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
break;
default:
return -EINVAL;
@@ -2509,7 +2546,7 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
* @indir: indirection table
* @key: hash key
*
- * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
**/
static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index c8b621e0e7cd..5ea75dd537d6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -298,8 +298,8 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
/* enable FCoE hash filter */
val = rd32(hw, I40E_PFQF_HENA(1));
- val |= 1 << (I40E_FILTER_PCTYPE_FCOE_OX - 32);
- val |= 1 << (I40E_FILTER_PCTYPE_FCOE_RX - 32);
+ val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32);
+ val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32);
val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
wr32(hw, I40E_PFQF_HENA(1), val);
@@ -308,10 +308,10 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
/* Reserve 4K DDP contexts and 20K filter size for FCoE */
- pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) *
- I40E_DMA_CNTX_BASE_SIZE;
+ pf->fcoe_hmc_cntx_num = BIT(I40E_DMA_CNTX_SIZE_4K) *
+ I40E_DMA_CNTX_BASE_SIZE;
pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num +
- (1 << I40E_HASH_FILTER_SIZE_16K) *
+ BIT(I40E_HASH_FILTER_SIZE_16K) *
I40E_HASH_FILTER_BASE_SIZE;
/* FCoE object: max 16K filter buckets and 4K DMA contexts */
@@ -348,7 +348,7 @@ u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf)
if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
app.protocolid == ETH_P_FCOE) {
tc = dcbcfg->etscfg.prioritytable[app.priority];
- enabled_tc |= (1 << tc);
+ enabled_tc |= BIT(tc);
break;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
index 0d49e2d15d40..a93174ddeaba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
@@ -59,9 +59,9 @@
(((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1)
#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT \
- (1 << I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
+ BIT(I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
#define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT \
- (1 << I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
+ BIT(I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e) \
I40E_RX_PROG_FCOE_ERROR_CONFLICT(e)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 9b987ccc9e82..5ebe12d56ebf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -116,6 +116,7 @@ exit:
* @hw: pointer to our HW structure
* @hmc_info: pointer to the HMC configuration information structure
* @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
*
* This function:
* 1. Initializes the pd entry
@@ -129,12 +130,14 @@ exit:
**/
i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 pd_index)
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg)
{
i40e_status ret_code = 0;
struct i40e_hmc_pd_table *pd_table;
struct i40e_hmc_pd_entry *pd_entry;
struct i40e_dma_mem mem;
+ struct i40e_dma_mem *page = &mem;
u32 sd_idx, rel_pd_idx;
u64 *pd_addr;
u64 page_desc;
@@ -155,18 +158,24 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
pd_entry = &pd_table->pd_entry[rel_pd_idx];
if (!pd_entry->valid) {
- /* allocate a 4K backing page */
- ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp,
- I40E_HMC_PAGED_BP_SIZE,
- I40E_HMC_PD_BP_BUF_ALIGNMENT);
- if (ret_code)
- goto exit;
+ if (rsrc_pg) {
+ pd_entry->rsrc_pg = true;
+ page = rsrc_pg;
+ } else {
+ /* allocate a 4K backing page */
+ ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
+ I40E_HMC_PAGED_BP_SIZE,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+ pd_entry->rsrc_pg = false;
+ }
- pd_entry->bp.addr = mem;
+ pd_entry->bp.addr = *page;
pd_entry->bp.sd_pd_index = pd_index;
pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
/* Set page address and valid bit */
- page_desc = mem.pa | 0x1;
+ page_desc = page->pa | 0x1;
pd_addr = (u64 *)pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
@@ -240,7 +249,8 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
/* free memory here */
- ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+ if (!pd_entry->rsrc_pg)
+ ret_code = i40e_free_dma_mem(hw, &pd_entry->bp.addr);
if (ret_code)
goto exit;
if (!pd_table->ref_cnt)
@@ -287,21 +297,15 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
u32 idx, bool is_pf)
{
struct i40e_hmc_sd_entry *sd_entry;
- i40e_status ret_code = 0;
+
+ if (!is_pf)
+ return I40E_NOT_SUPPORTED;
/* get the entry and decrease its ref counter */
sd_entry = &hmc_info->sd_table.sd_entry[idx];
- if (is_pf) {
- I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
- } else {
- ret_code = I40E_NOT_SUPPORTED;
- goto exit;
- }
- ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
- if (ret_code)
- goto exit;
-exit:
- return ret_code;
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+
+ return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr);
}
/**
@@ -341,20 +345,13 @@ i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx, bool is_pf)
{
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
+ if (!is_pf)
+ return I40E_NOT_SUPPORTED;
+
sd_entry = &hmc_info->sd_table.sd_entry[idx];
- if (is_pf) {
- I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
- } else {
- ret_code = I40E_NOT_SUPPORTED;
- goto exit;
- }
- /* free memory here */
- ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
- if (ret_code)
- goto exit;
-exit:
- return ret_code;
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+
+ return i40e_free_dma_mem(hw, &sd_entry->u.pd_table.pd_page_addr);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 732a02660330..d90669211392 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -62,6 +62,7 @@ struct i40e_hmc_bp {
struct i40e_hmc_pd_entry {
struct i40e_hmc_bp bp;
u32 sd_index;
+ bool rsrc_pg;
bool valid;
};
@@ -126,8 +127,8 @@ struct i40e_hmc_info {
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
- (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
- val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
@@ -146,7 +147,7 @@ struct i40e_hmc_info {
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
- val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
@@ -218,7 +219,8 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 pd_index);
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg);
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index 0079ad7bcd0e..fa371a2a40c6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -129,7 +129,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
obj->cnt = txq_num;
obj->base = 0;
size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
- obj->size = (u64)1 << size_exp;
+ obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */
if (txq_num > obj->max_cnt) {
@@ -152,7 +152,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
obj->base = i40e_align_l2obj_base(obj->base);
size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
- obj->size = (u64)1 << size_exp;
+ obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */
if (rxq_num > obj->max_cnt) {
@@ -175,7 +175,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
obj->base = i40e_align_l2obj_base(obj->base);
size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
- obj->size = (u64)1 << size_exp;
+ obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */
if (fcoe_cntx_num > obj->max_cnt) {
@@ -198,7 +198,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
obj->base = i40e_align_l2obj_base(obj->base);
size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
- obj->size = (u64)1 << size_exp;
+ obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */
if (fcoe_filt_num > obj->max_cnt) {
@@ -387,7 +387,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
/* update the pd table entry */
ret_code = i40e_add_pd_table_entry(hw,
info->hmc_info,
- i);
+ i, NULL);
if (ret_code) {
pd_error = true;
break;
@@ -763,7 +763,7 @@ static void i40e_write_byte(u8 *hmc_bits,
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = ((u8)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
src_byte = *from;
src_byte &= mask;
@@ -804,7 +804,7 @@ static void i40e_write_word(u8 *hmc_bits,
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = ((u16)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
@@ -854,7 +854,7 @@ static void i40e_write_dword(u8 *hmc_bits,
* to 5 bits so the shift will do nothing
*/
if (ce_info->width < 32)
- mask = ((u32)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
else
mask = ~(u32)0;
@@ -906,7 +906,7 @@ static void i40e_write_qword(u8 *hmc_bits,
* to 6 bits so the shift will do nothing
*/
if (ce_info->width < 64)
- mask = ((u64)1 << ce_info->width) - 1;
+ mask = BIT_ULL(ce_info->width) - 1;
else
mask = ~(u64)0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 48a52b35b614..851c1a159be8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 4
+#define DRV_VERSION_BUILD 9
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -76,6 +76,9 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
/* required last entry */
{0, }
};
@@ -520,7 +523,7 @@ static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
if (likely(new_data >= *offset))
*stat = new_data - *offset;
else
- *stat = (new_data + ((u64)1 << 48)) - *offset;
+ *stat = (new_data + BIT_ULL(48)) - *offset;
*stat &= 0xFFFFFFFFFFFFULL;
}
@@ -543,7 +546,7 @@ static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
if (likely(new_data >= *offset))
*stat = (u32)(new_data - *offset);
else
- *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
+ *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
}
/**
@@ -621,11 +624,15 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
struct i40e_hw *hw = &pf->hw;
struct i40e_eth_stats *oes;
struct i40e_eth_stats *es; /* device's eth stats */
- int idx = 0;
+ struct i40e_veb_tc_stats *veb_oes;
+ struct i40e_veb_tc_stats *veb_es;
+ int i, idx = 0;
idx = veb->stats_idx;
es = &veb->stats;
oes = &veb->stats_offsets;
+ veb_es = &veb->tc_stats;
+ veb_oes = &veb->tc_stats_offsets;
/* Gather up the stats that the hw collects */
i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
@@ -661,6 +668,28 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
veb->stat_offsets_loaded,
&oes->tx_broadcast, &es->tx_broadcast);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
+ I40E_GLVEBTC_RPCL(i, idx),
+ veb->stat_offsets_loaded,
+ &veb_oes->tc_rx_packets[i],
+ &veb_es->tc_rx_packets[i]);
+ i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
+ I40E_GLVEBTC_RBCL(i, idx),
+ veb->stat_offsets_loaded,
+ &veb_oes->tc_rx_bytes[i],
+ &veb_es->tc_rx_bytes[i]);
+ i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
+ I40E_GLVEBTC_TPCL(i, idx),
+ veb->stat_offsets_loaded,
+ &veb_oes->tc_tx_packets[i],
+ &veb_es->tc_tx_packets[i]);
+ i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
+ I40E_GLVEBTC_TBCL(i, idx),
+ veb->stat_offsets_loaded,
+ &veb_oes->tc_tx_bytes[i],
+ &veb_es->tc_tx_bytes[i]);
+ }
veb->stat_offsets_loaded = true;
}
@@ -1123,6 +1152,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
pf->stat_offsets_loaded,
&osd->rx_lpi_count, &nsd->rx_lpi_count);
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
+ !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
+ nsd->fd_sb_status = true;
+ else
+ nsd->fd_sb_status = false;
+
+ if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
+ !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ nsd->fd_atr_status = true;
+ else
+ nsd->fd_atr_status = false;
+
pf->stat_offsets_loaded = true;
}
@@ -1240,6 +1281,8 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
struct i40e_mac_filter *f;
list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (vsi->info.pvid)
+ f->vlan = le16_to_cpu(vsi->info.pvid);
if (!i40e_find_filter(vsi, macaddr, f->vlan,
is_vf, is_netdev)) {
if (!i40e_add_filter(vsi, macaddr, f->vlan,
@@ -1264,7 +1307,7 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
{
struct i40e_aqc_remove_macvlan_element_data element;
struct i40e_pf *pf = vsi->back;
- i40e_status aq_ret;
+ i40e_status ret;
/* Only appropriate for the PF main VSI */
if (vsi->type != I40E_VSI_MAIN)
@@ -1275,8 +1318,8 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
element.vlan_tag = 0;
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
- aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
- if (aq_ret)
+ ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+ if (ret)
return -ENOENT;
return 0;
@@ -1514,7 +1557,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
/* Find numtc from enabled TC bitmap */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & (1 << i)) /* TC is enabled */
+ if (enabled_tc & BIT_ULL(i)) /* TC is enabled */
numtc++;
}
if (!numtc) {
@@ -1533,14 +1576,18 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
* vectors available and so we need to lower the used
* q count.
*/
- qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
+ else
+ qcount = vsi->alloc_queue_pairs;
num_tc_qps = qcount / numtc;
- num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
+ num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
/* Setup queue offset/count for all TCs for given VSI */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
/* See if the given TC is enabled for the given VSI */
- if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
+ if (vsi->tc_config.enabled_tc & BIT_ULL(i)) {
+ /* TC is enabled */
int pow, num_qps;
switch (vsi->type) {
@@ -1566,7 +1613,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
/* find the next higher power-of-2 of num queue pairs */
num_qps = qcount;
pow = 0;
- while (num_qps && ((1 << pow) < qcount)) {
+ while (num_qps && (BIT_ULL(pow) < qcount)) {
pow++;
num_qps >>= 1;
}
@@ -1596,7 +1643,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
if (vsi->req_queue_pairs > 0)
vsi->num_queue_pairs = vsi->req_queue_pairs;
- else
+ else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
vsi->num_queue_pairs = pf->num_lan_msix;
}
@@ -1716,10 +1763,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
bool add_happened = false;
int filter_list_len = 0;
u32 changed_flags = 0;
- i40e_status aq_ret = 0;
+ i40e_status ret = 0;
struct i40e_pf *pf;
int num_add = 0;
int num_del = 0;
+ int aq_err = 0;
u16 cmd_flags;
/* empty array typed pointers, kcalloc later */
@@ -1771,31 +1819,31 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* flush a full buffer */
if (num_del == filter_list_len) {
- aq_ret = i40e_aq_remove_macvlan(&pf->hw,
- vsi->seid, del_list, num_del,
- NULL);
+ ret = i40e_aq_remove_macvlan(&pf->hw,
+ vsi->seid, del_list, num_del,
+ NULL);
+ aq_err = pf->hw.aq.asq_last_status;
num_del = 0;
memset(del_list, 0, sizeof(*del_list));
- if (aq_ret &&
- pf->hw.aq.asq_last_status !=
- I40E_AQ_RC_ENOENT)
+ if (ret && aq_err != I40E_AQ_RC_ENOENT)
dev_info(&pf->pdev->dev,
- "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
- aq_ret,
- pf->hw.aq.asq_last_status);
+ "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, aq_err));
}
}
if (num_del) {
- aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
+ ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
del_list, num_del, NULL);
+ aq_err = pf->hw.aq.asq_last_status;
num_del = 0;
- if (aq_ret &&
- pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
+ if (ret && aq_err != I40E_AQ_RC_ENOENT)
dev_info(&pf->pdev->dev,
- "ignoring delete macvlan error, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "ignoring delete macvlan error, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, aq_err));
}
kfree(del_list);
@@ -1833,29 +1881,31 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* flush a full buffer */
if (num_add == filter_list_len) {
- aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
- add_list, num_add,
- NULL);
+ ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+ add_list, num_add,
+ NULL);
+ aq_err = pf->hw.aq.asq_last_status;
num_add = 0;
- if (aq_ret)
+ if (ret)
break;
memset(add_list, 0, sizeof(*add_list));
}
}
if (num_add) {
- aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
- add_list, num_add, NULL);
+ ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+ add_list, num_add, NULL);
+ aq_err = pf->hw.aq.asq_last_status;
num_add = 0;
}
kfree(add_list);
add_list = NULL;
- if (add_happened && aq_ret &&
- pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) {
+ if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) {
dev_info(&pf->pdev->dev,
- "add filter failed, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "add filter failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, aq_err));
if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
!test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state)) {
@@ -1871,34 +1921,60 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
if (changed_flags & IFF_ALLMULTI) {
bool cur_multipromisc;
cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
- aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
- vsi->seid,
- cur_multipromisc,
- NULL);
- if (aq_ret)
+ ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
+ vsi->seid,
+ cur_multipromisc,
+ NULL);
+ if (ret)
dev_info(&pf->pdev->dev,
- "set multi promisc failed, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "set multi promisc failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
}
if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
bool cur_promisc;
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state));
- aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
- vsi->seid,
- cur_promisc, NULL);
- if (aq_ret)
- dev_info(&pf->pdev->dev,
- "set uni promisc failed, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
- aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
- vsi->seid,
- cur_promisc, NULL);
- if (aq_ret)
+ if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) {
+ /* set defport ON for Main VSI instead of true promisc
+ * this way we will get all unicast/multicast and VLAN
+ * promisc behavior but will not get VF or VMDq traffic
+ * replicated on the Main VSI.
+ */
+ if (pf->cur_promisc != cur_promisc) {
+ pf->cur_promisc = cur_promisc;
+ i40e_do_reset_safe(pf,
+ BIT(__I40E_PF_RESET_REQUESTED));
+ }
+ } else {
+ ret = i40e_aq_set_vsi_unicast_promiscuous(
+ &vsi->back->hw,
+ vsi->seid,
+ cur_promisc, NULL);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "set unicast promisc failed, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ ret = i40e_aq_set_vsi_multicast_promiscuous(
+ &vsi->back->hw,
+ vsi->seid,
+ cur_promisc, NULL);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "set multicast promisc failed, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ }
+ ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
+ vsi->seid,
+ cur_promisc, NULL);
+ if (ret)
dev_info(&pf->pdev->dev,
- "set brdcast promisc failed, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "set brdcast promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
}
clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1994,8 +2070,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "%s: update vsi failed, aq_err=%d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
+ "update vlan stripping failed, err %s aq_err %s\n",
+ i40e_stat_str(&vsi->back->hw, ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
}
}
@@ -2023,8 +2101,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "%s: update vsi failed, aq_err=%d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
+ "update vlan stripping failed, err %s aq_err %s\n",
+ i40e_stat_str(&vsi->back->hw, ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
}
}
@@ -2294,7 +2374,7 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
{
struct i40e_vsi_context ctxt;
- i40e_status aq_ret;
+ i40e_status ret;
vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
vsi->info.pvid = cpu_to_le16(vid);
@@ -2304,11 +2384,13 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
ctxt.seid = vsi->seid;
ctxt.info = vsi->info;
- aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
- if (aq_ret) {
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
dev_info(&vsi->back->pdev->dev,
- "%s: update vsi failed, aq_err=%d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
+ "add pvid failed, err %s aq_err %s\n",
+ i40e_stat_str(&vsi->back->hw, ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
return -ENOENT;
}
@@ -2696,9 +2778,9 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
#endif /* I40E_FCOE */
/* round up for the chip's needs */
vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
- (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
+ BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
- (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+ BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
/* set up individual rings */
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
@@ -2728,7 +2810,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
}
for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
- if (!(vsi->tc_config.enabled_tc & (1 << n)))
+ if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
continue;
qoffset = vsi->tc_config.tc_info[n].qoffset;
@@ -2877,6 +2959,9 @@ static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
I40E_PFINT_ICR0_ENA_VFLR_MASK |
I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED)
+ val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+
if (pf->flags & I40E_FLAG_PTP)
val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
@@ -3167,6 +3252,13 @@ static irqreturn_t i40e_intr(int irq, void *data)
(icr0 & I40E_PFINT_ICR0_SWINT_MASK))
pf->sw_int_count++;
+ if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
+ (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+ icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+ dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
+ }
+
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
@@ -3373,7 +3465,7 @@ static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
* @v_idx: vector index
* @qp_idx: queue pair index
**/
-static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
+static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
{
struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
@@ -3427,7 +3519,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
q_vector->tx.ring = NULL;
while (num_ringpairs--) {
- map_vector_to_qp(vsi, v_start, qp_idx);
+ i40e_map_vector_to_qp(vsi, v_start, qp_idx);
qp_idx++;
qp_remaining--;
}
@@ -3929,6 +4021,7 @@ static void i40e_vsi_close(struct i40e_vsi *vsi)
i40e_vsi_free_irq(vsi);
i40e_vsi_free_tx_resources(vsi);
i40e_vsi_free_rx_resources(vsi);
+ vsi->current_netdev_flags = 0;
}
/**
@@ -4073,7 +4166,7 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
if (app.selector == I40E_APP_SEL_TCPIP &&
app.protocolid == I40E_APP_PROTOID_ISCSI) {
tc = dcbcfg->etscfg.prioritytable[app.priority];
- enabled_tc |= (1 << tc);
+ enabled_tc |= BIT_ULL(tc);
break;
}
}
@@ -4122,7 +4215,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
u8 i;
for (i = 0; i < num_tc; i++)
- enabled_tc |= 1 << i;
+ enabled_tc |= BIT(i);
return enabled_tc;
}
@@ -4157,7 +4250,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
/* At least have TC0 */
enabled_tc = (enabled_tc ? enabled_tc : 0x1);
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & (1 << i))
+ if (enabled_tc & BIT_ULL(i))
num_tc++;
}
return num_tc;
@@ -4179,11 +4272,11 @@ static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
/* Find the first enabled TC */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & (1 << i))
+ if (enabled_tc & BIT_ULL(i))
break;
}
- return 1 << i;
+ return BIT(i);
}
/**
@@ -4221,26 +4314,28 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- i40e_status aq_ret;
+ i40e_status ret;
u32 tc_bw_max;
int i;
/* Get the VSI level BW configuration */
- aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
- if (aq_ret) {
+ ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+ if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi bw config, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "couldn't get PF vsi bw config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
/* Get the VSI level BW configuration per TC */
- aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
- NULL);
- if (aq_ret) {
+ ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
+ NULL);
+ if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi ets bw config, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -4279,16 +4374,16 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
u8 *bw_share)
{
struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
- i40e_status aq_ret;
+ i40e_status ret;
int i;
bw_data.tc_valid_bits = enabled_tc;
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
bw_data.tc_bw_credits[i] = bw_share[i];
- aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
- NULL);
- if (aq_ret) {
+ ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
+ NULL);
+ if (ret) {
dev_info(&vsi->back->pdev->dev,
"AQ command Config VSI BW allocation per TC failed = %d\n",
vsi->back->hw.aq.asq_last_status);
@@ -4337,7 +4432,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
* will set the numtc for netdev as 2 that will be
* referenced by the netdev layer as TC 0 and 1.
*/
- if (vsi->tc_config.enabled_tc & (1 << i))
+ if (vsi->tc_config.enabled_tc & BIT_ULL(i))
netdev_set_tc_queue(netdev,
vsi->tc_config.tc_info[i].netdev_tc,
vsi->tc_config.tc_info[i].qcount,
@@ -4399,7 +4494,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
/* Enable ETS TCs with equal BW Share for now across all VSIs */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & (1 << i))
+ if (enabled_tc & BIT_ULL(i))
bw_share[i] = 1;
}
@@ -4423,8 +4518,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "update vsi failed, aq_err=%d\n",
- vsi->back->hw.aq.asq_last_status);
+ "Update vsi tc config failed, err %s aq_err %s\n",
+ i40e_stat_str(&vsi->back->hw, ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
goto out;
}
/* update the local VSI info with updated queue map */
@@ -4435,8 +4532,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ret = i40e_vsi_get_bw_info(vsi);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "Failed updating vsi bw info, aq_err=%d\n",
- vsi->back->hw.aq.asq_last_status);
+ "Failed updating vsi bw info, err %s aq_err %s\n",
+ i40e_stat_str(&vsi->back->hw, ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
goto out;
}
@@ -4469,7 +4568,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
/* Enable ETS TCs with equal BW Share for now */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & (1 << i))
+ if (enabled_tc & BIT_ULL(i))
bw_data.tc_bw_share_credits[i] = 1;
}
@@ -4477,8 +4576,9 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
&bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "veb bw config failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ "VEB bw config failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@@ -4486,8 +4586,9 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
ret = i40e_veb_get_bw_info(veb);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed getting veb bw config, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ "Failed getting veb bw config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
out:
@@ -4574,8 +4675,9 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
ret = i40e_aq_resume_port_tx(hw, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "AQ command Resume Port Tx failed = %d\n",
- pf->hw.aq.asq_last_status);
+ "Resume Port Tx failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
i40e_service_event_schedule(pf);
@@ -4627,8 +4729,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
}
} else {
dev_info(&pf->pdev->dev,
- "AQ Querying DCB configuration failed: aq_err %d\n",
- pf->hw.aq.asq_last_status);
+ "Query for DCB configuration failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
out:
@@ -4859,7 +4962,7 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc)
/* Generate TC map for number of tc requested */
for (i = 0; i < tc; i++)
- enabled_tc |= (1 << i);
+ enabled_tc |= BIT_ULL(i);
/* Requesting same TC configuration as already enabled */
if (enabled_tc == vsi->tc_config.enabled_tc)
@@ -4998,7 +5101,7 @@ err_setup_rx:
err_setup_tx:
i40e_vsi_free_tx_resources(vsi);
if (vsi == pf->vsi[pf->lan_vsi])
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
return err;
}
@@ -5066,7 +5169,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
i40e_vc_notify_reset(pf);
/* do the biggest reset indicated */
- if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
+ if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
/* Request a Global Reset
*
@@ -5081,7 +5184,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
- } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
+ } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
/* Request a Core Reset
*
@@ -5093,7 +5196,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
i40e_flush(&pf->hw);
- } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
+ } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
/* Request a PF Reset
*
@@ -5106,7 +5209,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
dev_dbg(&pf->pdev->dev, "PFR requested\n");
i40e_handle_reset_warning(pf);
- } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
+ } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
int v;
/* Find the VSI(s) that requested a re-init */
@@ -5123,7 +5226,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
/* no further action needed, so return now */
return;
- } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) {
+ } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
int v;
/* Find the VSI(s) that needs to be brought down */
@@ -5253,7 +5356,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* Get updated DCBX data from firmware */
ret = i40e_get_dcb_config(&pf->hw);
if (ret) {
- dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n");
+ dev_info(&pf->pdev->dev,
+ "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto exit;
}
@@ -5761,23 +5867,23 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
rtnl_lock();
if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
- reset_flags |= (1 << __I40E_REINIT_REQUESTED);
+ reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED);
clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
}
if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
- reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
+ reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED);
clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
}
if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
- reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
+ reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED);
clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
}
if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
- reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
+ reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED);
clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
}
if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
- reset_flags |= (1 << __I40E_DOWN_REQUESTED);
+ reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED);
clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
}
@@ -5983,27 +6089,29 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
{
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_vsi_context ctxt;
- int aq_ret;
+ int ret;
ctxt.seid = pf->main_vsi_seid;
ctxt.pf_num = pf->hw.pf_id;
ctxt.vf_num = 0;
- aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
- if (aq_ret) {
+ ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (ret) {
dev_info(&pf->pdev->dev,
- "%s couldn't get PF vsi config, err %d, aq_err %d\n",
- __func__, aq_ret, pf->hw.aq.asq_last_status);
+ "couldn't get PF vsi config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return;
}
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
- aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
- if (aq_ret) {
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
dev_info(&pf->pdev->dev,
- "%s: update vsi switch failed, aq_err=%d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
+ "update vsi switch failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
}
@@ -6017,27 +6125,29 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
{
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_vsi_context ctxt;
- int aq_ret;
+ int ret;
ctxt.seid = pf->main_vsi_seid;
ctxt.pf_num = pf->hw.pf_id;
ctxt.vf_num = 0;
- aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
- if (aq_ret) {
+ ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (ret) {
dev_info(&pf->pdev->dev,
- "%s couldn't get PF vsi config, err %d, aq_err %d\n",
- __func__, aq_ret, pf->hw.aq.asq_last_status);
+ "couldn't get PF vsi config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return;
}
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
- aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
- if (aq_ret) {
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
dev_info(&pf->pdev->dev,
- "%s: update vsi switch failed, aq_err=%d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
+ "update vsi switch failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
}
@@ -6097,7 +6207,8 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
ret = i40e_add_vsi(ctl_vsi);
if (ret) {
dev_info(&pf->pdev->dev,
- "rebuild of owner VSI failed: %d\n", ret);
+ "rebuild of veb_idx %d owner VSI failed: %d\n",
+ veb->idx, ret);
goto end_reconstitute;
}
i40e_vsi_reset_stats(ctl_vsi);
@@ -6176,8 +6287,10 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
buf_len = data_size;
} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
dev_info(&pf->pdev->dev,
- "capability discovery failed: aq=%d\n",
- pf->hw.aq.asq_last_status);
+ "capability discovery failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
return -ENODEV;
}
} while (err);
@@ -6363,7 +6476,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
ret = i40e_init_adminq(&pf->hw);
if (ret) {
- dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
+ dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto clear_recovery;
}
@@ -6373,11 +6488,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
i40e_clear_pxe_mode(hw);
ret = i40e_get_capabilities(pf);
- if (ret) {
- dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
- ret);
+ if (ret)
goto end_core_reset;
- }
ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp,
@@ -6418,12 +6530,16 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
I40E_AQ_EVENT_LINK_UPDOWN |
I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
if (ret)
- dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret);
+ dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* make sure our flow control settings are restored */
ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
if (ret)
- dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret);
+ dev_info(&pf->pdev->dev, "set fc fail, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Rebuild the VSIs and VEBs that existed before reset.
* They are still in our local switch element arrays, so only
@@ -6484,8 +6600,10 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
msleep(75);
ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (ret)
- dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
}
/* reinit the misc interrupt */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -6647,8 +6765,8 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- if (pf->pending_vxlan_bitmap & (1 << i)) {
- pf->pending_vxlan_bitmap &= ~(1 << i);
+ if (pf->pending_vxlan_bitmap & BIT_ULL(i)) {
+ pf->pending_vxlan_bitmap &= ~BIT_ULL(i);
port = pf->vxlan_ports[i];
if (port)
ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
@@ -6659,10 +6777,12 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
- "%s vxlan port %d, index %d failed, err %d, aq_err %d\n",
+ "%s vxlan port %d, index %d failed, err %s aq_err %s\n",
port ? "add" : "delete",
- ntohs(port), i, ret,
- pf->hw.aq.asq_last_status);
+ ntohs(port), i,
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
pf->vxlan_ports[i] = 0;
}
}
@@ -7013,6 +7133,10 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
tx_ring->count = vsi->num_desc;
tx_ring->size = 0;
tx_ring->dcb_tc = 0;
+ if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
+ tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
+ if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
+ tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM;
vsi->tx_rings[i] = tx_ring;
rx_ring = &tx_ring[1];
@@ -7411,62 +7535,139 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
}
/**
- * i40e_config_rss - Prepare for RSS if used
+ * i40e_config_rss_aq - Prepare for RSS using AQ commands
+ * @vsi: vsi structure
+ * @seed: RSS hash seed
+ **/
+static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
+{
+ struct i40e_aqc_get_set_rss_key_data rss_key;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ bool pf_lut = false;
+ u8 *rss_lut;
+ int ret, i;
+
+ memset(&rss_key, 0, sizeof(rss_key));
+ memcpy(&rss_key, seed, sizeof(rss_key));
+
+ rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
+ if (!rss_lut)
+ return -ENOMEM;
+
+ /* Populate the LUT with max no. of queues in round robin fashion */
+ for (i = 0; i < vsi->rss_table_size; i++)
+ rss_lut[i] = i % vsi->rss_size;
+
+ ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot set RSS key, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return ret;
+ }
+
+ if (vsi->type == I40E_VSI_MAIN)
+ pf_lut = true;
+
+ ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut,
+ vsi->rss_table_size);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "Cannot set RSS lut, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+ return ret;
+}
+
+/**
+ * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
+ * @vsi: VSI structure
+ **/
+static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
+{
+ u8 seed[I40E_HKEY_ARRAY_SIZE];
+ struct i40e_pf *pf = vsi->back;
+
+ netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+ vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
+
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
+ return i40e_config_rss_aq(vsi, seed);
+
+ return 0;
+}
+
+/**
+ * i40e_config_rss_reg - Prepare for RSS if used
* @pf: board private structure
+ * @seed: RSS hash seed
**/
-static int i40e_config_rss(struct i40e_pf *pf)
+static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed)
{
- u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1];
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
+ u32 *seed_dw = (u32 *)seed;
+ u32 current_queue = 0;
u32 lut = 0;
int i, j;
- u64 hena;
- u32 reg_val;
- netdev_rss_key_fill(rss_key, sizeof(rss_key));
+ /* Fill out hash function seed */
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]);
+ wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
+
+ for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
+ lut = 0;
+ for (j = 0; j < 4; j++) {
+ if (current_queue == vsi->rss_size)
+ current_queue = 0;
+ lut |= ((current_queue) << (8 * j));
+ current_queue++;
+ }
+ wr32(&pf->hw, I40E_PFQF_HLUT(i), lut);
+ }
+ i40e_flush(hw);
+
+ return 0;
+}
+
+/**
+ * i40e_config_rss - Prepare for RSS if used
+ * @pf: board private structure
+ **/
+static int i40e_config_rss(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ u8 seed[I40E_HKEY_ARRAY_SIZE];
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg_val;
+ u64 hena;
+
+ netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
- hena |= I40E_DEFAULT_RSS_HENA;
+ hena |= i40e_pf_get_default_rss_hena(pf);
+
wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
- /* Check capability and Set table size and register per hw expectation*/
+ /* Determine the RSS table size based on the hardware capabilities */
reg_val = rd32(hw, I40E_PFQF_CTL_0);
- if (pf->rss_table_size == 512)
- reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
- else
- reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
+ reg_val = (pf->rss_table_size == 512) ?
+ (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
+ (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
wr32(hw, I40E_PFQF_CTL_0, reg_val);
- /* Populate the LUT with max no. of queues in round robin fashion */
- for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) {
-
- /* The assumption is that lan qp count will be the highest
- * qp count for any PF VSI that needs RSS.
- * If multiple VSIs need RSS support, all the qp counts
- * for those VSIs should be a power of 2 for RSS to work.
- * If LAN VSI is the only consumer for RSS then this requirement
- * is not necessary.
- */
- if (j == vsi->rss_size)
- j = 0;
- /* lut = 4-byte sliding window of 4 lut entries */
- lut = (lut << 8) | (j &
- ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
- /* On i = 3, we have 4 entries in lut; write to the register */
- if ((i & 3) == 3)
- wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
- }
- i40e_flush(hw);
-
- return 0;
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
+ return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed);
+ else
+ return i40e_config_rss_reg(pf, seed);
}
/**
@@ -7533,7 +7734,7 @@ i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
i40e_status status;
/* Set the valid bit for this PF */
- bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id);
+ bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
@@ -7567,8 +7768,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for read access, err %d: aq_err %d\n",
- ret, last_aq_status);
+ "Cannot acquire NVM for read access, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@@ -7583,8 +7785,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
i40e_release_nvm(&pf->hw);
if (ret) {
- dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n",
- ret, last_aq_status);
+ dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@@ -7596,8 +7799,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for write access, err %d: aq_err %d\n",
- ret, last_aq_status);
+ "Cannot acquire NVM for write access, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
/* Write it back out unchanged to initiate update NVM,
@@ -7615,8 +7819,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
i40e_release_nvm(&pf->hw);
if (ret)
dev_info(&pf->pdev->dev,
- "BW settings NOT SAVED, err %d aq_err %d\n",
- ret, last_aq_status);
+ "BW settings NOT SAVED, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
bw_commit_out:
return ret;
@@ -7662,7 +7867,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* Depending on PF configurations, it is possible that the RSS
* maximum might end up larger than the available queues
*/
- pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+ pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
pf->rss_size = 1;
pf->rss_table_size = pf->hw.func_caps.rss_table_size;
pf->rss_size_max = min_t(int, pf->rss_size_max,
@@ -7673,7 +7878,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
}
/* MFP mode enabled */
- if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
+ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
pf->flags |= I40E_FLAG_MFP_ENABLED;
dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
if (i40e_get_npar_bw_setting(pf))
@@ -7703,9 +7908,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
}
if (pf->hw.func_caps.vmdq) {
- pf->flags |= I40E_FLAG_VMDQ_ENABLED;
pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
- pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
+ pf->flags |= I40E_FLAG_VMDQ_ENABLED;
}
#ifdef I40E_FCOE
@@ -7723,6 +7927,14 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_MAX_VF_COUNT);
}
#endif /* CONFIG_PCI_IOV */
+ if (pf->hw.mac.type == I40E_MAC_X722) {
+ pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
+ I40E_FLAG_128_QP_RSS_CAPABLE |
+ I40E_FLAG_HW_ATR_EVICT_CAPABLE |
+ I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
+ I40E_FLAG_WB_ON_ITR_CAPABLE |
+ I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE;
+ }
pf->eeprom_version = 0xDEAD;
pf->lan_veb = I40E_NO_VEB;
pf->lan_vsi = I40E_NO_VSI;
@@ -7812,7 +8024,7 @@ static int i40e_set_features(struct net_device *netdev,
need_reset = i40e_set_ntuple(pf, features);
if (need_reset)
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
return 0;
}
@@ -7875,10 +8087,8 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
/* New port: add it and mark its index in the bitmap */
pf->vxlan_ports[next_idx] = port;
- pf->pending_vxlan_bitmap |= (1 << next_idx);
+ pf->pending_vxlan_bitmap |= BIT_ULL(next_idx);
pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
-
- dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port));
}
/**
@@ -7906,7 +8116,7 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
* and make it pending
*/
pf->vxlan_ports[idx] = 0;
- pf->pending_vxlan_bitmap |= (1 << idx);
+ pf->pending_vxlan_bitmap |= BIT_ULL(idx);
pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
@@ -7981,7 +8191,6 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
}
-#ifdef HAVE_BRIDGE_ATTRIBS
/**
* i40e_ndo_bridge_setlink - Set the hardware bridge mode
* @dev: the netdev being configured
@@ -7995,7 +8204,8 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
* bridge mode enabled.
**/
static int i40e_ndo_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh)
+ struct nlmsghdr *nlh,
+ u16 flags)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
@@ -8066,14 +8276,9 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
* Return the mode in which the hardware bridge is operating in
* i.e VEB or VEPA.
**/
-#ifdef HAVE_BRIDGE_FILTER
static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev,
u32 filter_mask, int nlflags)
-#else
-static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev, int nlflags)
-#endif /* HAVE_BRIDGE_FILTER */
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
@@ -8097,7 +8302,25 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
nlflags, 0, 0, filter_mask, NULL);
}
-#endif /* HAVE_BRIDGE_ATTRIBS */
+
+#define I40E_MAX_TUNNEL_HDR_LEN 80
+/**
+ * i40e_features_check - Validate encapsulated packet conforms to limits
+ * @skb: skb buff
+ * @netdev: This physical port's netdev
+ * @features: Offload features that the stack believes apply
+ **/
+static netdev_features_t i40e_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ if (skb->encapsulation &&
+ (skb_inner_mac_header(skb) - skb_transport_header(skb) >
+ I40E_MAX_TUNNEL_HDR_LEN))
+ return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+
+ return features;
+}
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
@@ -8133,10 +8356,9 @@ static const struct net_device_ops i40e_netdev_ops = {
#endif
.ndo_get_phys_port_id = i40e_get_phys_port_id,
.ndo_fdb_add = i40e_ndo_fdb_add,
-#ifdef HAVE_BRIDGE_ATTRIBS
+ .ndo_features_check = i40e_features_check,
.ndo_bridge_getlink = i40e_ndo_bridge_getlink,
.ndo_bridge_setlink = i40e_ndo_bridge_setlink,
-#endif /* HAVE_BRIDGE_ATTRIBS */
};
/**
@@ -8304,8 +8526,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi config, err %d, aq_err %d\n",
- ret, pf->hw.aq.asq_last_status);
+ "couldn't get PF vsi config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
return -ENOENT;
}
vsi->info = ctxt.info;
@@ -8327,8 +8551,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "update vsi failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ "update vsi failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
ret = -ENOENT;
goto err;
}
@@ -8345,9 +8571,11 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_vsi_config_tc(vsi, enabled_tc);
if (ret) {
dev_info(&pf->pdev->dev,
- "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
- enabled_tc, ret,
- pf->hw.aq.asq_last_status);
+ "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
+ enabled_tc,
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
ret = -ENOENT;
}
}
@@ -8438,8 +8666,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "add vsi failed, aq_err=%d\n",
- vsi->back->hw.aq.asq_last_status);
+ "add vsi failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
ret = -ENOENT;
goto err;
}
@@ -8484,8 +8714,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_vsi_get_bw_info(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get vsi bw info, err %d, aq_err %d\n",
- ret, pf->hw.aq.asq_last_status);
+ "couldn't get vsi bw info, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* VSI is already added so not tearing that up */
ret = 0;
}
@@ -8615,6 +8846,11 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
goto vector_setup_out;
}
+ /* In Legacy mode, we do not have to get any other vector since we
+ * piggyback on the misc/ICR0 for queue interrupts.
+ */
+ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ return ret;
if (vsi->num_q_vectors)
vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
vsi->num_q_vectors, vsi->idx);
@@ -8658,7 +8894,7 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
if (ret < 0) {
dev_info(&pf->pdev->dev,
- "failed to get tracking for %d queues for VSI %d err=%d\n",
+ "failed to get tracking for %d queues for VSI %d err %d\n",
vsi->alloc_queue_pairs, vsi->seid, ret);
goto err_vsi;
}
@@ -8857,6 +9093,10 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
break;
}
+ if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
+ (vsi->type == I40E_VSI_VMDQ2)) {
+ ret = i40e_vsi_config_rss(vsi);
+ }
return vsi;
err_rings:
@@ -8896,8 +9136,9 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
&bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "query veb bw config failed, aq_err=%d\n",
- hw->aq.asq_last_status);
+ "query veb bw config failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
goto out;
}
@@ -8905,8 +9146,9 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
&ets_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "query veb bw ets config failed, aq_err=%d\n",
- hw->aq.asq_last_status);
+ "query veb bw ets config failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
goto out;
}
@@ -9090,36 +9332,40 @@ void i40e_veb_release(struct i40e_veb *veb)
**/
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
{
- bool is_default = false;
+ struct i40e_pf *pf = veb->pf;
+ bool is_default = veb->pf->cur_promisc;
bool is_cloud = false;
int ret;
/* get a VEB from the hardware */
- ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
+ ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
veb->enabled_tc, is_default,
is_cloud, &veb->seid, NULL);
if (ret) {
- dev_info(&veb->pf->pdev->dev,
- "couldn't add VEB, err %d, aq_err %d\n",
- ret, veb->pf->hw.aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "couldn't add VEB, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EPERM;
}
/* get statistics counter */
- ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
+ ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
&veb->stats_idx, NULL, NULL, NULL);
if (ret) {
- dev_info(&veb->pf->pdev->dev,
- "couldn't get VEB statistics idx, err %d, aq_err %d\n",
- ret, veb->pf->hw.aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "couldn't get VEB statistics idx, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EPERM;
}
ret = i40e_veb_get_bw_info(veb);
if (ret) {
- dev_info(&veb->pf->pdev->dev,
- "couldn't get VEB bw info, err %d, aq_err %d\n",
- ret, veb->pf->hw.aq.asq_last_status);
- i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
+ dev_info(&pf->pdev->dev,
+ "couldn't get VEB bw info, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
return -ENOENT;
}
@@ -9325,8 +9571,10 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
&next_seid, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "get switch config failed %d aq_err=%x\n",
- ret, pf->hw.aq.asq_last_status);
+ "get switch config failed err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
kfree(aq_buf);
return -ENOENT;
}
@@ -9367,8 +9615,9 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
ret = i40e_fetch_switch_configuration(pf, false);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't fetch switch config, err %d, aq_err %d\n",
- ret, pf->hw.aq.asq_last_status);
+ "couldn't fetch switch config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return ret;
}
i40e_pf_reset_stats(pf);
@@ -9743,7 +9992,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_init_shared_code(hw);
if (err) {
- dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
+ dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
+ err);
goto err_pf_reset;
}
@@ -9910,15 +10160,19 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
I40E_AQ_EVENT_LINK_UPDOWN |
I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
if (err)
- dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
+ dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
(pf->hw.aq.fw_maj_ver < 4)) {
msleep(75);
err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (err)
- dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
}
/* The main driver is (mostly) up and happy. We need to set this state
* before setting up the misc vector or we get a race and the vector
@@ -10006,8 +10260,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* get the requested speeds from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
if (err)
- dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n",
- err);
+ dev_info(&pf->pdev->dev,
+ "get phy capabilities failed, err %s aq_err %s, advertised speed settings may not be correct\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
/* print a string summarizing features */
@@ -10247,6 +10503,19 @@ static void i40e_shutdown(struct pci_dev *pdev)
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+ del_timer_sync(&pf->service_timer);
+ cancel_work_sync(&pf->service_task);
+ i40e_fdir_teardown(pf);
+
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+
+ wr32(hw, I40E_PFPM_APM,
+ (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ wr32(hw, I40E_PFPM_WUFC,
+ (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
i40e_clear_interrupt_scheme(pf);
if (system_state == SYSTEM_POWER_OFF) {
@@ -10267,9 +10536,6 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
set_bit(__I40E_SUSPENDED, &pf->state);
set_bit(__I40E_DOWN, &pf->state);
- del_timer_sync(&pf->service_timer);
- cancel_work_sync(&pf->service_task);
- i40e_fdir_teardown(pf);
rtnl_lock();
i40e_prep_for_reset(pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 554e49d02683..9b83abc0e774 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -50,7 +50,7 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
I40E_GLNVM_GENS_SR_SIZE_SHIFT);
/* Switching to words (sr_size contains power of 2KB) */
- nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
+ nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
/* Check if we are in the normal or blank NVM programming mode */
fla = rd32(hw, I40E_GLNVM_FLA);
@@ -189,8 +189,8 @@ static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
ret_code = i40e_poll_sr_srctl_done_bit(hw);
if (!ret_code) {
/* Write the address and start reading */
- sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
- (1 << I40E_GLNVM_SRCTL_START_SHIFT);
+ sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+ BIT(I40E_GLNVM_SRCTL_START_SHIFT);
wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
/* Poll I40E_GLNVM_SRCTL until the done bit is set */
@@ -212,6 +212,74 @@ read_nvm_exit:
}
/**
+ * i40e_read_nvm_aq - Read Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 words, void *data,
+ bool last_command)
+{
+ i40e_status ret_code = I40E_ERR_NVM;
+ struct i40e_asq_cmd_details cmd_details;
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+
+ /* Here we are checking the SR limit only for the flat memory model.
+ * We cannot do it for the module-based model, as we did not acquire
+ * the NVM resource yet (we cannot get the module pointer value).
+ * Firmware will check the module-based model.
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: offset %d beyond Shadow RAM limit %d\n",
+ (offset + words), hw->nvm.sr_size);
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+ /* We can write only up to 4KB (one sector), in one AQ write */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write fail error: tried to write %d words, limit is %d.\n",
+ words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+ /* A single write cannot spread over two sectors */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
+ offset, words);
+ else
+ ret_code = i40e_aq_read_nvm(hw, module_pointer,
+ 2 * offset, /*bytes*/
+ 2 * words, /*bytes*/
+ data, last_command, &cmd_details);
+
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ i40e_status ret_code = I40E_ERR_TIMEOUT;
+
+ ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
+ *data = le16_to_cpu(*(__le16 *)data);
+
+ return ret_code;
+}
+
+/**
* i40e_read_nvm_word - Reads Shadow RAM
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
@@ -222,6 +290,8 @@ read_nvm_exit:
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data)
{
+ if (hw->mac.type == I40E_MAC_X722)
+ return i40e_read_nvm_word_aq(hw, offset, data);
return i40e_read_nvm_word_srctl(hw, offset, data);
}
@@ -257,6 +327,63 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
}
/**
+ * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ i40e_status ret_code;
+ u16 read_size = *words;
+ bool last_cmd = false;
+ u16 words_read = 0;
+ u16 i = 0;
+
+ do {
+ /* Calculate number of bytes we should read in this step.
+ * FVL AQ do not allow to read more than one page at a time or
+ * to cross page boundaries.
+ */
+ if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
+ read_size = min(*words,
+ (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
+ (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
+ else
+ read_size = min((*words - words_read),
+ I40E_SR_SECTOR_SIZE_IN_WORDS);
+
+ /* Check if this is last command, if so set proper flag */
+ if ((words_read + read_size) >= *words)
+ last_cmd = true;
+
+ ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
+ data + words_read, last_cmd);
+ if (ret_code)
+ goto read_nvm_buffer_aq_exit;
+
+ /* Increment counter for words already read and move offset to
+ * new read location
+ */
+ words_read += read_size;
+ offset += read_size;
+ } while (words_read < *words);
+
+ for (i = 0; i < *words; i++)
+ data[i] = le16_to_cpu(((__le16 *)data)[i]);
+
+read_nvm_buffer_aq_exit:
+ *words = words_read;
+ return ret_code;
+}
+
+/**
* i40e_read_nvm_buffer - Reads Shadow RAM buffer
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
@@ -270,6 +397,8 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
+ if (hw->mac.type == I40E_MAC_X722)
+ return i40e_read_nvm_buffer_aq(hw, offset, words, data);
return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 7b34f1e660ea..dcb72a8ee8e5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -58,6 +58,19 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+
+i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
u32 i40e_led_get(struct i40e_hw *hw);
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index a92b7725dec3..8c40d6ea15fd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -43,9 +43,8 @@
#define I40E_PTP_10GB_INCVAL 0x0333333333ULL
#define I40E_PTP_1GB_INCVAL 0x2000000000ULL
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 (0x1 << \
- I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
/**
@@ -357,7 +356,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
- if (!(prttsyn_stat & (1 << index)))
+ if (!(prttsyn_stat & BIT(index)))
return;
lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 522d6df51330..dc0402fe3370 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -873,6 +873,13 @@
#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT)
#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
@@ -3366,4 +3373,1933 @@
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
-#endif
+
+#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */
+#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0
+#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT)
+#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */
+#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2
+#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4
+#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8
+#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT)
+#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */
+#define I40E_MNGSB_FDS_START_BC_SHIFT 0
+#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT)
+#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16
+#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT)
+
+#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT)
+#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT)
+
+#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+
+#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT)
+#define I40E_GL_FWSTS_FWROWD_SHIFT 8
+#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT)
+#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT)
+#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT)
+#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT)
+#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT)
+#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT)
+#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */
+#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0
+#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT)
+#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT)
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QBASE_MAX_INDEX 127
+#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0
+#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11
+#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT)
+#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT)
+#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */
+#define I40E_GLNVM_AL_REQ_POR_SHIFT 0
+#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT)
+#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2
+#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT)
+#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3
+#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT)
+#define I40E_GLNVM_AL_REQ_PE_SHIFT 4
+#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT)
+#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT)
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1
+#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8
+#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9
+#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT)
+#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10
+#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT)
+#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2
+#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT)
+#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3
+#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5
+#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6
+#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7
+#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT)
+#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8
+#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10
+#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT)
+#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */
+#define I40E_MNGSB_DADD_ADDR_SHIFT 0
+#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT)
+#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */
+#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0
+#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT)
+#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */
+#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0
+#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT)
+#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8
+#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT)
+#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26
+#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30
+#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT)
+#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31
+#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT)
+#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */
+#define I40E_MNGSB_RDATA_DATA_SHIFT 0
+#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT)
+#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */
+#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0
+#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT)
+#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8
+#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT)
+#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16
+#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT)
+#define I40E_MNGSB_RHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT)
+#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27
+#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT)
+#define I40E_MNGSB_RHDR0_EH_SHIFT 31
+#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT)
+#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26
+#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31
+#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT)
+#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */
+#define I40E_MNGSB_WDATA_DATA_SHIFT 0
+#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT)
+#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */
+#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0
+#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT)
+#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12
+#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT)
+#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */
+#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0
+#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT)
+#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */
+#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0
+#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT)
+
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT)
+
+#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT)
+
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT)
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
+#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
+#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
+#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
+#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
+#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
+#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
+#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */
+#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13
+#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT)
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT)
+#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT)
+#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */
+#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT)
+#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT)
+#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */
+#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT)
+/* Redefined for X722 family */
+#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_X722_PFQF_HLUT_MAX_INDEX 127
+#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT)
+#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HKEY_MAX_INDEX 12
+#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT)
+#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HLUT_MAX_INDEX 15
+#define I40E_VSIQF_HLUT_LUT0_SHIFT 0
+#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT)
+#define I40E_VSIQF_HLUT_LUT1_SHIFT 8
+#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT)
+#define I40E_VSIQF_HLUT_LUT2_SHIFT 16
+#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT)
+#define I40E_VSIQF_HLUT_LUT3_SHIFT 24
+#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT)
+#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT)
+#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT)
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9a4f2bc70cd2..738aca68f665 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -464,7 +464,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
- if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+ if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
(I40E_DEBUG_FD & pf->hw.debug_mask))
dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
@@ -509,8 +509,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
dev_info(&pdev->dev,
"FD filter programming failed due to incorrect filter parameters\n");
}
- } else if (error ==
- (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
+ } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
rx_desc->wb.qword0.hi_dword.fd_id);
@@ -854,15 +853,40 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
**/
static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
{
- u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
- I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
- I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
- /* allow 00 to be written to the index */
-
- wr32(&vsi->back->hw,
- I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
- val);
+ u16 flags = q_vector->tx.ring[0].flags;
+
+ if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+ u32 val;
+
+ if (q_vector->arm_wb_state)
+ return;
+
+ val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK;
+
+ wr32(&vsi->back->hw,
+ I40E_PFINT_DYN_CTLN(q_vector->v_idx +
+ vsi->base_vector - 1),
+ val);
+ q_vector->arm_wb_state = true;
+ } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
+ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
+ /* allow 00 to be written to the index */
+
+ wr32(&vsi->back->hw,
+ I40E_PFINT_DYN_CTLN(q_vector->v_idx +
+ vsi->base_vector - 1), val);
+ } else {
+ u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
+ I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
+ /* allow 00 to be written to the index */
+
+ wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
+ }
}
/**
@@ -892,7 +916,7 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
* 20-1249MB/s bulk (8000 ints/s)
*/
bytes_per_int = rc->total_bytes / rc->itr;
- switch (rc->itr) {
+ switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
if (bytes_per_int > 10)
new_latency_range = I40E_LOW_LATENCY;
@@ -905,9 +929,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
break;
case I40E_BULK_LATENCY:
if (bytes_per_int <= 20)
- rc->latency_range = I40E_LOW_LATENCY;
+ new_latency_range = I40E_LOW_LATENCY;
+ break;
+ default:
+ if (bytes_per_int <= 20)
+ new_latency_range = I40E_LOW_LATENCY;
break;
}
+ rc->latency_range = new_latency_range;
switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
@@ -923,42 +952,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
break;
}
- if (new_itr != rc->itr) {
- /* do an exponential smoothing */
- new_itr = (10 * new_itr * rc->itr) /
- ((9 * new_itr) + rc->itr);
- rc->itr = new_itr & I40E_MAX_ITR;
- }
+ if (new_itr != rc->itr)
+ rc->itr = new_itr;
rc->total_bytes = 0;
rc->total_packets = 0;
}
/**
- * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
- * @q_vector: the vector to adjust
- **/
-static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
-{
- u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
- struct i40e_hw *hw = &q_vector->vsi->back->hw;
- u32 reg_addr;
- u16 old_itr;
-
- reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1);
- old_itr = q_vector->rx.itr;
- i40e_set_new_dynamic_itr(&q_vector->rx);
- if (old_itr != q_vector->rx.itr)
- wr32(hw, reg_addr, q_vector->rx.itr);
-
- reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1);
- old_itr = q_vector->tx.itr;
- i40e_set_new_dynamic_itr(&q_vector->tx);
- if (old_itr != q_vector->tx.itr)
- wr32(hw, reg_addr, q_vector->tx.itr);
-}
-
-/**
* i40e_clean_programming_status - clean the programming status descriptor
* @rx_ring: the rx ring that has this descriptor
* @rx_desc: the rx descriptor written back by HW
@@ -1386,7 +1387,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
return;
/* did the hardware decode the packet and checksum? */
- if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
/* both known and outer_ip must be set for the below code to work */
@@ -1401,25 +1402,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
ipv6 = true;
if (ipv4 &&
- (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+ (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
goto checksum_fail;
/* likely incorrect csum if alternate IP extension headers found */
if (ipv6 &&
- rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
/* don't increment checksum err here, non-fatal err */
return;
/* there was some L4 error, count error and punt packet to the stack */
- if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+ if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
goto checksum_fail;
/* handle packets that were not able to be checksummed due
* to arrival speed, in this case the stack can compute
* the csum.
*/
- if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
+ if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
/* If VXLAN traffic has an outer UDPv4 checksum we need to check
@@ -1428,7 +1429,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
* so the total length of IPv4 header is IHL*4 bytes
* The UDP_0 bit *may* bet set if the *inner* header is UDP
*/
- if (ipv4_tunnel) {
+ if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
+ (ipv4_tunnel)) {
skb->transport_header = skb->mac_header +
sizeof(struct ethhdr) +
(ip_hdr(skb)->ihl * 4);
@@ -1543,7 +1545,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT;
- if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
/* This memory barrier is needed to keep us from reading
@@ -1584,8 +1586,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
I40E_RXD_QW1_ERROR_SHIFT;
- rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
- rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1637,7 +1639,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
I40E_RX_INCREMENT(rx_ring, i);
if (unlikely(
- !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
struct i40e_rx_buffer *next_buffer;
next_buffer = &rx_ring->rx_bi[i];
@@ -1647,7 +1649,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
}
/* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
continue;
}
@@ -1669,7 +1671,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
- vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0;
#ifdef I40E_FCOE
@@ -1730,7 +1732,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT;
- if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
/* This memory barrier is needed to keep us from reading
@@ -1753,7 +1755,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
I40E_RXD_QW1_ERROR_SHIFT;
- rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1771,13 +1773,13 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
I40E_RX_INCREMENT(rx_ring, i);
if (unlikely(
- !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
rx_ring->rx_stats.non_eop_descs++;
continue;
}
/* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
/* TODO: shouldn't we increment a counter indicating the
* drop?
@@ -1802,7 +1804,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
- vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0;
#ifdef I40E_FCOE
@@ -1827,6 +1829,68 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
}
/**
+ * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * @vsi: the VSI we care about
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ **/
+static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+ struct i40e_q_vector *q_vector)
+{
+ struct i40e_hw *hw = &vsi->back->hw;
+ u16 old_itr;
+ int vector;
+ u32 val;
+
+ vector = (q_vector->v_idx + vsi->base_vector);
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+ old_itr = q_vector->rx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->rx);
+ if (old_itr != q_vector->rx.itr) {
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (I40E_RX_ITR <<
+ I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+ (q_vector->rx.itr <<
+ I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+ } else {
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (I40E_ITR_NONE <<
+ I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ }
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
+ } else {
+ i40e_irq_dynamic_enable(vsi,
+ q_vector->v_idx + vsi->base_vector);
+ }
+ if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+ old_itr = q_vector->tx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->tx);
+ if (old_itr != q_vector->tx.itr) {
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (I40E_TX_ITR <<
+ I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+ (q_vector->tx.itr <<
+ I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+ } else {
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (I40E_ITR_NONE <<
+ I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ }
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx +
+ vsi->base_vector - 1), val);
+ } else {
+ i40e_irq_dynamic_enable(vsi,
+ q_vector->v_idx + vsi->base_vector);
+ }
+}
+
+/**
* i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
* @napi: napi struct with our devices info in it
* @budget: amount of work driver is allowed to do this pass, in packets
@@ -1880,35 +1944,29 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
return budget;
}
+ if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
+ q_vector->arm_wb_state = false;
+
/* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete(napi);
- if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
- ITR_IS_DYNAMIC(vsi->tx_itr_setting))
- i40e_update_dynamic_itr(q_vector);
-
- if (!test_bit(__I40E_DOWN, &vsi->state)) {
- if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
- i40e_irq_dynamic_enable(vsi,
- q_vector->v_idx + vsi->base_vector);
- } else {
- struct i40e_hw *hw = &vsi->back->hw;
- /* We re-enable the queue 0 cause, but
- * don't worry about dynamic_enable
- * because we left it on for the other
- * possible interrupts during napi
- */
- u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
- qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
- wr32(hw, I40E_QINT_RQCTL(0), qval);
-
- qval = rd32(hw, I40E_QINT_TQCTL(0));
- qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
- wr32(hw, I40E_QINT_TQCTL(0), qval);
-
- i40e_irq_dynamic_enable_icr0(vsi->back);
- }
+ if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ i40e_update_enable_itr(vsi, q_vector);
+ } else { /* Legacy mode */
+ struct i40e_hw *hw = &vsi->back->hw;
+ /* We re-enable the queue 0 cause, but
+ * don't worry about dynamic_enable
+ * because we left it on for the other
+ * possible interrupts during napi
+ */
+ u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
+ I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+
+ wr32(hw, I40E_QINT_RQCTL(0), qval);
+ qval = rd32(hw, I40E_QINT_TQCTL(0)) |
+ I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_TQCTL(0), qval);
+ i40e_irq_dynamic_enable_icr0(vsi->back);
}
-
return 0;
}
@@ -1982,6 +2040,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
/* Due to lack of space, no more new filters can be programmed */
if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
return;
+ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
+ /* HW ATR eviction will take care of removing filters on FIN
+ * and RST packets.
+ */
+ if (th->fin || th->rst)
+ return;
+ }
tx_ring->atr_count++;
@@ -2037,6 +2102,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
+ dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
+
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
fdir_desc->rsvd = cpu_to_le32(0);
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
@@ -2244,11 +2312,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
struct iphdr *this_ip_hdr;
u32 network_hdr_len;
u8 l4_hdr = 0;
+ struct udphdr *oudph;
+ struct iphdr *oiph;
u32 l4_tunnel = 0;
if (skb->encapsulation) {
switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP:
+ oudph = udp_hdr(skb);
+ oiph = ip_hdr(skb);
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
@@ -2285,6 +2357,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
*tx_flags &= ~I40E_TX_FLAGS_IPV4;
*tx_flags |= I40E_TX_FLAGS_IPV6;
}
+ if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
+ (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
+ (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
+ oudph->check = ~csum_tcpudp_magic(oiph->saddr,
+ oiph->daddr,
+ (skb->len - skb_transport_offset(skb)),
+ IPPROTO_UDP, 0);
+ *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
+ }
} else {
network_hdr_len = skb_network_header_len(skb);
this_ip_hdr = ip_hdr(skb);
@@ -2616,6 +2697,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index)))
writel(i, tx_ring->tail);
+ else
+ prefetchw(tx_desc + 1);
return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 0dc48dc9ca61..f1385a1989fa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -66,17 +66,29 @@ enum i40e_dyn_idx_t {
/* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
- ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
+ BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+
+#define i40e_pf_get_default_rss_hena(pf) \
+ (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
+ I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
/* Supported Rx Buffer Sizes */
#define I40E_RXBUFFER_512 512 /* Used for packet split */
@@ -129,17 +141,17 @@ enum i40e_dyn_idx_t {
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4
-#define I40E_TX_FLAGS_CSUM (u32)(1)
-#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
-#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2)
-#define I40E_TX_FLAGS_TSO (u32)(1 << 3)
-#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4)
-#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
-#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
-#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
-#define I40E_TX_FLAGS_TSYN (u32)(1 << 8)
-#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
-#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
+#define I40E_TX_FLAGS_CSUM BIT(0)
+#define I40E_TX_FLAGS_HW_VLAN BIT(1)
+#define I40E_TX_FLAGS_SW_VLAN BIT(2)
+#define I40E_TX_FLAGS_TSO BIT(3)
+#define I40E_TX_FLAGS_IPV4 BIT(4)
+#define I40E_TX_FLAGS_IPV6 BIT(5)
+#define I40E_TX_FLAGS_FCCRC BIT(6)
+#define I40E_TX_FLAGS_FSO BIT(7)
+#define I40E_TX_FLAGS_TSYN BIT(8)
+#define I40E_TX_FLAGS_FD_SB BIT(9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -253,6 +265,10 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */
+ u16 flags;
+#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
+#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1)
+
/* stats structs */
struct i40e_queue_stats stats;
struct u64_stats_sync syncp;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 9a5a75b1e2bc..4842239ee777 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -47,6 +47,11 @@
#define I40E_DEV_ID_20G_KR2 0x1587
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
+#define I40E_DEV_ID_SFP_X722 0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_X722_VF 0x37CD
+#define I40E_DEV_ID_X722_VF_HV 0x37D9
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
@@ -120,6 +125,8 @@ enum i40e_mac_type {
I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
+ I40E_MAC_X722,
+ I40E_MAC_X722_VF,
I40E_MAC_GENERIC,
};
@@ -213,7 +220,17 @@ struct i40e_hw_capabilities {
bool dcb;
bool fcoe;
bool iscsi; /* Indicates iSCSI enabled */
- bool mfp_mode_1;
+ bool flex10_enable;
+ bool flex10_capable;
+ u32 flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN 0x0
+#define I40E_FLEX10_MODE_DCC 0x1
+#define I40E_FLEX10_MODE_DCI 0x2
+
+ u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
+#define I40E_FLEX10_STATUS_VC_MODE 0x2
+
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@@ -423,6 +440,7 @@ struct i40e_dcbx_config {
#define I40E_DCBX_MODE_CEE 0x1
#define I40E_DCBX_MODE_IEEE 0x2
u32 numapps;
+ u32 tlv_status; /* CEE mode TLV status */
struct i40e_dcb_ets_config etscfg;
struct i40e_dcb_ets_config etsrec;
struct i40e_dcb_pfc_config pfc;
@@ -487,11 +505,13 @@ struct i40e_hw {
/* debug mask */
u32 debug_mask;
+ char err_str[16];
};
static inline bool i40e_is_vf(struct i40e_hw *hw)
{
- return hw->mac.type == I40E_MAC_VF;
+ return (hw->mac.type == I40E_MAC_VF ||
+ hw->mac.type == I40E_MAC_X722_VF);
}
struct i40e_driver_version {
@@ -588,19 +608,23 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
- I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
+ /* Note: Bit 8 is reserved in X710 and XL710 */
+ I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
- I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+ /* Note: For non-tunnel packets INT_UDP_0 is the right status for
+ * UDP header
+ */
+ I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
#define I40E_RXD_QW1_STATUS_SHIFT 0
-#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
<< I40E_RXD_QW1_STATUS_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
@@ -608,8 +632,8 @@ enum i40e_rx_desc_status_bits {
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
- I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
+ BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
enum i40e_rx_desc_fltstat_values {
I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
@@ -743,8 +767,7 @@ enum i40e_rx_ptype_payload_layer {
I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
- I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
enum i40e_rx_desc_ext_status_bits {
/* Note: These are predefined bit offsets */
@@ -920,12 +943,12 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
-#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
- I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
+ BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
@@ -937,6 +960,8 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
struct i40e_filter_program_desc {
__le32 qindex_flex_ptype_vsi;
__le32 rsvd;
@@ -955,15 +980,24 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
- /* Note: Values 0-30 are reserved for future use */
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- /* Note: Value 32 is reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-40 are reserved for future use */
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
@@ -990,8 +1024,8 @@ enum i40e_filter_program_desc_fd_status {
};
#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
- I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
+ BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
@@ -1009,14 +1043,17 @@ enum i40e_filter_program_desc_pcmd {
#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
- I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
+
#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
@@ -1069,6 +1106,14 @@ struct i40e_eth_stats {
u64 tx_errors; /* tepc */
};
+/* Statistics collected per VEB per TC */
+struct i40e_veb_tc_stats {
+ u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
+};
+
#ifdef I40E_FCOE
/* Statistics collected per function for FCoE */
struct i40e_fcoe_stats {
@@ -1134,6 +1179,8 @@ struct i40e_hw_port_stats {
u64 fd_atr_match;
u64 fd_sb_match;
u64 fd_atr_tunnel_match;
+ u32 fd_atr_status;
+ u32 fd_sb_status;
/* EEE LPI */
u32 tx_lpi_status;
u32 rx_lpi_status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 2d20af290fbf..0f8d4156f8b1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -110,7 +110,9 @@ struct i40e_virtchnl_msg {
* error regardless of version mismatch.
*/
#define I40E_VIRTCHNL_VERSION_MAJOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR 0
+#define I40E_VIRTCHNL_VERSION_MINOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
struct i40e_virtchnl_version_info {
u32 major;
u32 minor;
@@ -129,7 +131,8 @@ struct i40e_virtchnl_version_info {
*/
/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * VF sends this request to PF with no parameters
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
* PF responds with an indirect message containing
* i40e_virtchnl_vf_resource and one or more
* i40e_virtchnl_vsi_resource structures.
@@ -143,9 +146,13 @@ struct i40e_virtchnl_vsi_resource {
u8 default_mac_addr[ETH_ALEN];
};
/* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 23f95cdbdfcc..d99c116032f3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -160,13 +160,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
**/
static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
{
- struct i40e_hw *hw = &pf->hw;
- u32 reg;
-
- reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
- reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
- i40e_flush(hw);
+ i40e_vc_notify_vf_reset(vf);
+ i40e_reset_vf(vf, false);
}
/**
@@ -282,16 +277,14 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
}
tempmap = vecmap->rxq_map;
for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
- linklistmap |= (1 <<
- (I40E_VIRTCHNL_SUPPORTED_QTYPES *
- vsi_queue_id));
+ linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
+ vsi_queue_id));
}
tempmap = vecmap->txq_map;
for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
- linklistmap |= (1 <<
- (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
- + 1));
+ linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
+ vsi_queue_id + 1));
}
next_q = find_first_bit(&linklistmap,
@@ -337,11 +330,23 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
reg = (vector_id) |
(qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
(pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
- (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+ BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
(itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
wr32(hw, reg_idx, reg);
}
+ /* if the vf is running in polling mode and using interrupt zero,
+ * need to disable auto-mask on enabling zero interrupt for VFs.
+ */
+ if ((vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
+ (vector_id == 0)) {
+ reg = rd32(hw, I40E_GLINT_CTL);
+ if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
+ reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
+ wr32(hw, I40E_GLINT_CTL, reg);
+ }
+ }
+
irq_list_done:
i40e_flush(hw);
}
@@ -542,11 +547,13 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
if (vf->port_vlan_id)
i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
- vf->port_vlan_id, true, false);
+ vf->port_vlan_id ? vf->port_vlan_id : -1,
+ true, false);
if (!f)
dev_info(&pf->pdev->dev,
"Could not allocate VF MAC addr\n");
- f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id,
+ f = i40e_add_filter(vsi, brdcast,
+ vf->port_vlan_id ? vf->port_vlan_id : -1,
true, false);
if (!f)
dev_info(&pf->pdev->dev,
@@ -835,6 +842,7 @@ complete_reset:
i40e_alloc_vf_res(vf);
i40e_enable_vf_mappings(vf);
set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+ clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
/* tell the VF the reset is done */
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
@@ -899,7 +907,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
for (vf_id = 0; vf_id < tmp; vf_id++) {
reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
- wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
}
}
clear_bit(__I40E_VF_DISABLE, &pf->state);
@@ -925,8 +933,6 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
if (ret) {
- dev_err(&pf->pdev->dev,
- "Failed to enable SR-IOV, error %d.\n", ret);
pf->num_alloc_vfs = 0;
goto err_iov;
}
@@ -1123,12 +1129,16 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
*
* called from the VF to request the API version used by the PF
**/
-static int i40e_vc_get_version_msg(struct i40e_vf *vf)
+static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
{
struct i40e_virtchnl_version_info info = {
I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
};
+ vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg;
+ /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
+ if (VF_IS_V10(vf))
+ info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
I40E_SUCCESS, (u8 *)&info,
sizeof(struct
@@ -1143,7 +1153,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf)
*
* called from the VF to request its resources
**/
-static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
+static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
{
struct i40e_virtchnl_vf_resource *vfres = NULL;
struct i40e_pf *pf = vf->pf;
@@ -1167,12 +1177,24 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
len = 0;
goto err;
}
+ if (VF_IS_V11(vf))
+ vf->driver_caps = *(u32 *)msg;
+ else
+ vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi->info.pvid)
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
-
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+ vfres->vf_offload_flags |=
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+ } else {
+ vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
+ }
vfres->num_vsis = num_vsis;
vfres->num_queue_pairs = vf->num_queue_pairs;
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
@@ -1773,9 +1795,14 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
valid_len = sizeof(struct i40e_virtchnl_version_info);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
valid_len = 0;
break;
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ if (VF_IS_V11(vf))
+ valid_len = sizeof(u32);
+ else
+ valid_len = 0;
+ break;
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
valid_len = sizeof(struct i40e_virtchnl_txq_info);
break;
@@ -1888,10 +1915,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
switch (v_opcode) {
case I40E_VIRTCHNL_OP_VERSION:
- ret = i40e_vc_get_version_msg(vf);
+ ret = i40e_vc_get_version_msg(vf, msg);
break;
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
- ret = i40e_vc_get_vf_resources_msg(vf);
+ ret = i40e_vc_get_vf_resources_msg(vf, msg);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
i40e_vc_reset_vf_msg(vf);
@@ -1969,9 +1996,9 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
/* read GLGEN_VFLRSTAT register to find out the flr VFs */
vf = &pf->vf[vf_id];
reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
- if (reg & (1 << bit_idx)) {
+ if (reg & BIT(bit_idx)) {
/* clear the bit in GLGEN_VFLRSTAT */
- wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
if (!test_bit(__I40E_DOWN, &pf->state))
i40e_reset_vf(vf, true);
@@ -2023,7 +2050,8 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
}
/* delete the temporary mac address */
- i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
+ i40e_del_filter(vsi, vf->default_lan_addr.addr,
+ vf->port_vlan_id ? vf->port_vlan_id : -1,
true, false);
/* Delete all the filters for this VSI - we're going to kill it
@@ -2088,7 +2116,12 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
goto error_pvid;
}
- if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
+ if (le16_to_cpu(vsi->info.pvid) ==
+ (vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)))
+ /* duplicate request, so just return success */
+ goto error_pvid;
+
+ if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) {
dev_err(&pf->pdev->dev,
"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
vf_id);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 09043c1aae54..736f6f08b4f2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -42,6 +42,9 @@
#define I40E_VLAN_MASK 0xFFF
#define I40E_PRIORITY_MASK 0x7000
+#define VF_IS_V10(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 0))
+#define VF_IS_V11(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 1))
+
/* Various queue ctrls */
enum i40e_queue_ctrl {
I40E_QUEUE_CTRL_UNKNOWN = 0,
@@ -75,6 +78,8 @@ struct i40e_vf {
u16 vf_id;
/* all VF vsis connect to the same parent */
enum i40e_switch_element_types parent_type;
+ struct i40e_virtchnl_version_info vf_ver;
+ u32 driver_caps; /* reported by VF driver */
/* VF Port Extender (PE) stag if used */
u16 stag;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index c1d25f8c1abc..f08450b90774 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -60,17 +60,6 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
hw->aq.arq.len = I40E_VF_ARQLEN1;
hw->aq.arq.bal = I40E_VF_ARQBAL1;
hw->aq.arq.bah = I40E_VF_ARQBAH1;
- } else {
- hw->aq.asq.tail = I40E_PF_ATQT;
- hw->aq.asq.head = I40E_PF_ATQH;
- hw->aq.asq.len = I40E_PF_ATQLEN;
- hw->aq.asq.bal = I40E_PF_ATQBAL;
- hw->aq.asq.bah = I40E_PF_ATQBAH;
- hw->aq.arq.tail = I40E_PF_ARQT;
- hw->aq.arq.head = I40E_PF_ARQH;
- hw->aq.arq.len = I40E_PF_ARQLEN;
- hw->aq.arq.bal = I40E_PF_ARQBAL;
- hw->aq.arq.bah = I40E_PF_ARQBAH;
}
}
@@ -308,7 +297,7 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
/* set starting point */
wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
- I40E_PF_ATQLEN_ATQENABLE_MASK));
+ I40E_VF_ATQLEN1_ATQENABLE_MASK));
wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
@@ -337,7 +326,7 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
/* set starting point */
wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
- I40E_PF_ARQLEN_ARQENABLE_MASK));
+ I40E_VF_ARQLEN1_ARQENABLE_MASK));
wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
@@ -899,7 +888,7 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
mutex_lock(&hw->aq.arq_mutex);
/* set next_to_use to head */
- ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index e715bccfb5d2..c8022092d369 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -34,8 +34,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0002
-#define I40E_FW_API_VERSION_A0_MINOR 0x0000
+#define I40E_FW_API_VERSION_MINOR 0x0004
struct i40e_aq_desc {
__le16 flags;
@@ -133,12 +132,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
- i40e_aqc_opc_set_cppm_configuration = 0x0103,
- i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
- i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
-
/* LAA */
- i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -260,7 +254,10 @@ enum i40e_admin_queue_opc {
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
i40e_aqc_opc_del_udp_tunnel = 0x0B01,
- i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+ i40e_aqc_opc_set_rss_key = 0x0B02,
+ i40e_aqc_opc_set_rss_lut = 0x0B03,
+ i40e_aqc_opc_get_rss_key = 0x0B04,
+ i40e_aqc_opc_get_rss_lut = 0x0B05,
/* Async Events */
i40e_aqc_opc_event_lan_overflow = 0x1001,
@@ -272,8 +269,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
/* debug commands */
- i40e_aqc_opc_debug_get_deviceid = 0xFF00,
- i40e_aqc_opc_debug_set_mode = 0xFF01,
i40e_aqc_opc_debug_read_reg = 0xFF03,
i40e_aqc_opc_debug_write_reg = 0xFF04,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
@@ -507,7 +502,8 @@ struct i40e_aqc_mac_address_read {
#define I40E_AQC_SAN_ADDR_VALID 0x20
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
-#define I40E_AQC_ADDR_VALID_MASK 0xf0
+#define I40E_AQC_MC_MAG_EN_VALID 0x100
+#define I40E_AQC_ADDR_VALID_MASK 0x1F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
@@ -530,7 +526,9 @@ struct i40e_aqc_mac_address_write {
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
-#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000
+#define I40E_AQC_WRITE_TYPE_MASK 0xC000
+
__le16 mac_sah;
__le32 mac_sal;
u8 reserved[8];
@@ -824,8 +822,12 @@ struct i40e_aqc_vsi_properties_data {
I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
u8 queueing_opt_reserved[3];
/* scheduler section */
u8 up_enable_bits;
@@ -1066,6 +1068,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
__le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF
#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
u8 reserved[8];
};
@@ -2093,6 +2096,46 @@ struct i40e_aqc_del_udp_tunnel_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+
/* tunnel key structure 0x0B10 */
struct i40e_aqc_tunnel_key_structure_A0 {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 39fcb1dc4ea6..d45d0ae6bd3b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -54,6 +54,15 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_20G_KR2:
hw->mac.type = I40E_MAC_XL710;
break;
+ case I40E_DEV_ID_SFP_X722:
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ hw->mac.type = I40E_MAC_X722;
+ break;
+ case I40E_DEV_ID_X722_VF:
+ case I40E_DEV_ID_X722_VF_HV:
+ hw->mac.type = I40E_MAC_X722_VF;
+ break;
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
hw->mac.type = I40E_MAC_VF;
@@ -72,6 +81,212 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
}
/**
+ * i40evf_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+ switch (aq_err) {
+ case I40E_AQ_RC_OK:
+ return "OK";
+ case I40E_AQ_RC_EPERM:
+ return "I40E_AQ_RC_EPERM";
+ case I40E_AQ_RC_ENOENT:
+ return "I40E_AQ_RC_ENOENT";
+ case I40E_AQ_RC_ESRCH:
+ return "I40E_AQ_RC_ESRCH";
+ case I40E_AQ_RC_EINTR:
+ return "I40E_AQ_RC_EINTR";
+ case I40E_AQ_RC_EIO:
+ return "I40E_AQ_RC_EIO";
+ case I40E_AQ_RC_ENXIO:
+ return "I40E_AQ_RC_ENXIO";
+ case I40E_AQ_RC_E2BIG:
+ return "I40E_AQ_RC_E2BIG";
+ case I40E_AQ_RC_EAGAIN:
+ return "I40E_AQ_RC_EAGAIN";
+ case I40E_AQ_RC_ENOMEM:
+ return "I40E_AQ_RC_ENOMEM";
+ case I40E_AQ_RC_EACCES:
+ return "I40E_AQ_RC_EACCES";
+ case I40E_AQ_RC_EFAULT:
+ return "I40E_AQ_RC_EFAULT";
+ case I40E_AQ_RC_EBUSY:
+ return "I40E_AQ_RC_EBUSY";
+ case I40E_AQ_RC_EEXIST:
+ return "I40E_AQ_RC_EEXIST";
+ case I40E_AQ_RC_EINVAL:
+ return "I40E_AQ_RC_EINVAL";
+ case I40E_AQ_RC_ENOTTY:
+ return "I40E_AQ_RC_ENOTTY";
+ case I40E_AQ_RC_ENOSPC:
+ return "I40E_AQ_RC_ENOSPC";
+ case I40E_AQ_RC_ENOSYS:
+ return "I40E_AQ_RC_ENOSYS";
+ case I40E_AQ_RC_ERANGE:
+ return "I40E_AQ_RC_ERANGE";
+ case I40E_AQ_RC_EFLUSHED:
+ return "I40E_AQ_RC_EFLUSHED";
+ case I40E_AQ_RC_BAD_ADDR:
+ return "I40E_AQ_RC_BAD_ADDR";
+ case I40E_AQ_RC_EMODE:
+ return "I40E_AQ_RC_EMODE";
+ case I40E_AQ_RC_EFBIG:
+ return "I40E_AQ_RC_EFBIG";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+ return hw->err_str;
+}
+
+/**
+ * i40evf_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+{
+ switch (stat_err) {
+ case 0:
+ return "OK";
+ case I40E_ERR_NVM:
+ return "I40E_ERR_NVM";
+ case I40E_ERR_NVM_CHECKSUM:
+ return "I40E_ERR_NVM_CHECKSUM";
+ case I40E_ERR_PHY:
+ return "I40E_ERR_PHY";
+ case I40E_ERR_CONFIG:
+ return "I40E_ERR_CONFIG";
+ case I40E_ERR_PARAM:
+ return "I40E_ERR_PARAM";
+ case I40E_ERR_MAC_TYPE:
+ return "I40E_ERR_MAC_TYPE";
+ case I40E_ERR_UNKNOWN_PHY:
+ return "I40E_ERR_UNKNOWN_PHY";
+ case I40E_ERR_LINK_SETUP:
+ return "I40E_ERR_LINK_SETUP";
+ case I40E_ERR_ADAPTER_STOPPED:
+ return "I40E_ERR_ADAPTER_STOPPED";
+ case I40E_ERR_INVALID_MAC_ADDR:
+ return "I40E_ERR_INVALID_MAC_ADDR";
+ case I40E_ERR_DEVICE_NOT_SUPPORTED:
+ return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+ case I40E_ERR_MASTER_REQUESTS_PENDING:
+ return "I40E_ERR_MASTER_REQUESTS_PENDING";
+ case I40E_ERR_INVALID_LINK_SETTINGS:
+ return "I40E_ERR_INVALID_LINK_SETTINGS";
+ case I40E_ERR_AUTONEG_NOT_COMPLETE:
+ return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+ case I40E_ERR_RESET_FAILED:
+ return "I40E_ERR_RESET_FAILED";
+ case I40E_ERR_SWFW_SYNC:
+ return "I40E_ERR_SWFW_SYNC";
+ case I40E_ERR_NO_AVAILABLE_VSI:
+ return "I40E_ERR_NO_AVAILABLE_VSI";
+ case I40E_ERR_NO_MEMORY:
+ return "I40E_ERR_NO_MEMORY";
+ case I40E_ERR_BAD_PTR:
+ return "I40E_ERR_BAD_PTR";
+ case I40E_ERR_RING_FULL:
+ return "I40E_ERR_RING_FULL";
+ case I40E_ERR_INVALID_PD_ID:
+ return "I40E_ERR_INVALID_PD_ID";
+ case I40E_ERR_INVALID_QP_ID:
+ return "I40E_ERR_INVALID_QP_ID";
+ case I40E_ERR_INVALID_CQ_ID:
+ return "I40E_ERR_INVALID_CQ_ID";
+ case I40E_ERR_INVALID_CEQ_ID:
+ return "I40E_ERR_INVALID_CEQ_ID";
+ case I40E_ERR_INVALID_AEQ_ID:
+ return "I40E_ERR_INVALID_AEQ_ID";
+ case I40E_ERR_INVALID_SIZE:
+ return "I40E_ERR_INVALID_SIZE";
+ case I40E_ERR_INVALID_ARP_INDEX:
+ return "I40E_ERR_INVALID_ARP_INDEX";
+ case I40E_ERR_INVALID_FPM_FUNC_ID:
+ return "I40E_ERR_INVALID_FPM_FUNC_ID";
+ case I40E_ERR_QP_INVALID_MSG_SIZE:
+ return "I40E_ERR_QP_INVALID_MSG_SIZE";
+ case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+ return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+ case I40E_ERR_INVALID_FRAG_COUNT:
+ return "I40E_ERR_INVALID_FRAG_COUNT";
+ case I40E_ERR_QUEUE_EMPTY:
+ return "I40E_ERR_QUEUE_EMPTY";
+ case I40E_ERR_INVALID_ALIGNMENT:
+ return "I40E_ERR_INVALID_ALIGNMENT";
+ case I40E_ERR_FLUSHED_QUEUE:
+ return "I40E_ERR_FLUSHED_QUEUE";
+ case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+ return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+ case I40E_ERR_INVALID_IMM_DATA_SIZE:
+ return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+ case I40E_ERR_TIMEOUT:
+ return "I40E_ERR_TIMEOUT";
+ case I40E_ERR_OPCODE_MISMATCH:
+ return "I40E_ERR_OPCODE_MISMATCH";
+ case I40E_ERR_CQP_COMPL_ERROR:
+ return "I40E_ERR_CQP_COMPL_ERROR";
+ case I40E_ERR_INVALID_VF_ID:
+ return "I40E_ERR_INVALID_VF_ID";
+ case I40E_ERR_INVALID_HMCFN_ID:
+ return "I40E_ERR_INVALID_HMCFN_ID";
+ case I40E_ERR_BACKING_PAGE_ERROR:
+ return "I40E_ERR_BACKING_PAGE_ERROR";
+ case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+ return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+ case I40E_ERR_INVALID_PBLE_INDEX:
+ return "I40E_ERR_INVALID_PBLE_INDEX";
+ case I40E_ERR_INVALID_SD_INDEX:
+ return "I40E_ERR_INVALID_SD_INDEX";
+ case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+ return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+ case I40E_ERR_INVALID_SD_TYPE:
+ return "I40E_ERR_INVALID_SD_TYPE";
+ case I40E_ERR_MEMCPY_FAILED:
+ return "I40E_ERR_MEMCPY_FAILED";
+ case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+ return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+ case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+ return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+ case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+ return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+ case I40E_ERR_SRQ_ENABLED:
+ return "I40E_ERR_SRQ_ENABLED";
+ case I40E_ERR_ADMIN_QUEUE_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_ERROR";
+ case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+ return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+ case I40E_ERR_BUF_TOO_SHORT:
+ return "I40E_ERR_BUF_TOO_SHORT";
+ case I40E_ERR_ADMIN_QUEUE_FULL:
+ return "I40E_ERR_ADMIN_QUEUE_FULL";
+ case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+ return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+ case I40E_ERR_BAD_IWARP_CQE:
+ return "I40E_ERR_BAD_IWARP_CQE";
+ case I40E_ERR_NVM_BLANK_MODE:
+ return "I40E_ERR_NVM_BLANK_MODE";
+ case I40E_ERR_NOT_IMPLEMENTED:
+ return "I40E_ERR_NOT_IMPLEMENTED";
+ case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+ return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+ case I40E_ERR_DIAG_TEST_FAILED:
+ return "I40E_ERR_DIAG_TEST_FAILED";
+ case I40E_ERR_NOT_READY:
+ return "I40E_ERR_NOT_READY";
+ case I40E_NOT_SUPPORTED:
+ return "I40E_NOT_SUPPORTED";
+ case I40E_ERR_FIRMWARE_API_VERSION:
+ return "I40E_ERR_FIRMWARE_API_VERSION";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+ return hw->err_str;
+}
+
+/**
* i40evf_debug_aq
* @hw: debug mask related to admin queue
* @mask: debug mask
@@ -146,7 +361,7 @@ bool i40evf_check_asq_alive(struct i40e_hw *hw)
{
if (hw->aq.asq.len)
return !!(rd32(hw, hw->aq.asq.len) &
- I40E_PF_ATQLEN_ATQENABLE_MASK);
+ I40E_VF_ATQLEN1_ATQENABLE_MASK);
else
return false;
}
@@ -177,6 +392,169 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
return status;
}
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
+{
+ i40e_status status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_lut *cmd_resp =
+ (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+ if (set)
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_lut);
+ else
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_lut);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ cpu_to_le16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+ if (pf_lut)
+ cmd_resp->flags |= cpu_to_le16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ else
+ cmd_resp->flags |= cpu_to_le16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+ cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut));
+ cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut));
+
+ status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40evf_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ false);
+}
+
+/**
+ * i40evf_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key,
+ bool set)
+{
+ i40e_status status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_key *cmd_resp =
+ (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+ if (set)
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_key);
+ else
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_key);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ cpu_to_le16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+ cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key));
+ cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key));
+
+ status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40evf_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * i40evf_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the
* hardware to a bit-field that can be used by SW to more easily determine the
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
index 931c88044300..00ed24bfce13 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -62,6 +62,7 @@ struct i40e_hmc_bp {
struct i40e_hmc_pd_entry {
struct i40e_hmc_bp bp;
u32 sd_index;
+ bool rsrc_pg;
bool valid;
};
@@ -126,8 +127,8 @@ struct i40e_hmc_info {
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
- (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
- val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
@@ -146,7 +147,7 @@ struct i40e_hmc_info {
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
- val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
@@ -218,7 +219,8 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 pd_index);
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg);
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 58e37a44b80a..55ae4b0f8192 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -60,6 +60,19 @@ void i40e_idle_aq(struct i40e_hw *hw);
void i40evf_resume_aq(struct i40e_hw *hw);
bool i40evf_check_asq_alive(struct i40e_hw *hw);
i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+
+i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
i40e_status i40e_set_mac_type(struct i40e_hw *hw);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
index 3cc737629bf7..10febcfd7cd8 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -27,1580 +27,6 @@
#ifndef _I40E_REGISTER_H_
#define _I40E_REGISTER_H_
-#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */
-#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
-#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */
-#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
-#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */
-#define I40E_GL_ARQH_ARQH_SHIFT 0
-#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
-#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */
-#define I40E_GL_ARQT_ARQT_SHIFT 0
-#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
-#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */
-#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
-#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */
-#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
-#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */
-#define I40E_GL_ATQH_ATQH_SHIFT 0
-#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT)
-#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */
-#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT)
-#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT)
-#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT)
-#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT)
-#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */
-#define I40E_GL_ATQT_ATQT_SHIFT 0
-#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT)
-#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */
-#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT)
-#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */
-#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT)
-#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */
-#define I40E_PF_ARQH_ARQH_SHIFT 0
-#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT)
-#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */
-#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
-#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT)
-#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
-#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT)
-#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
-#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT)
-#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
-#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
-#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
-#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
-#define I40E_PF_ARQT_ARQT_SHIFT 0
-#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
-#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */
-#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT)
-#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */
-#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT)
-#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */
-#define I40E_PF_ATQH_ATQH_SHIFT 0
-#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT)
-#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */
-#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT)
-#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT)
-#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT)
-#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
-#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
-#define I40E_PF_ATQT_ATQT_SHIFT 0
-#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
-#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQBAH_MAX_INDEX 127
-#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT)
-#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQBAL_MAX_INDEX 127
-#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT)
-#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQH_MAX_INDEX 127
-#define I40E_VF_ARQH_ARQH_SHIFT 0
-#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT)
-#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQLEN_MAX_INDEX 127
-#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
-#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT)
-#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
-#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT)
-#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
-#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT)
-#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
-#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
-#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
-#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQT_MAX_INDEX 127
-#define I40E_VF_ARQT_ARQT_SHIFT 0
-#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT)
-#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQBAH_MAX_INDEX 127
-#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT)
-#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQBAL_MAX_INDEX 127
-#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT)
-#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQH_MAX_INDEX 127
-#define I40E_VF_ATQH_ATQH_SHIFT 0
-#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT)
-#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQLEN_MAX_INDEX 127
-#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT)
-#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT)
-#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT)
-#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
-#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQT_MAX_INDEX 127
-#define I40E_VF_ATQT_ATQT_SHIFT 0
-#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT)
-#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */
-#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
-#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */
-#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */
-#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */
-#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
-#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
-#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
-#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
-#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
-#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
-#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
-#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
-#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
-#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
-#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT)
-#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */
-#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
-#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
-#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
-#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
-#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
-#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
-#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
-#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
-#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
-#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
-#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
-#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
-#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
-#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
-#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */
-#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
-#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT)
-#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */
-#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
-#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
-#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */
-#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
-#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT)
-#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */
-#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
-#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
-#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */
-#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
-#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
-#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
-#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
-#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
-#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */
-#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
-#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
-#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
-#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT)
-#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
-#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT)
-#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
-#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
-#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
-#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT)
-#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */
-#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
-#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
-#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */
-#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
-#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT)
-#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
-#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
-#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
-#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
-#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
-#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT)
-#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
-#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
-#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */
-#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
-#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
-#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
-#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
-#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
-#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
-#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
-#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT)
-#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
-#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
-#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
-#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
-#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
-#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
-#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
-#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
-#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
-#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
-#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
-#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
-#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
-#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */
-#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
-#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
-#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */
-#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
-#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
-#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
-#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
-#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
-#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
-#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
-#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
-#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
-#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
-#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
-#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
-#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
-#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
-#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
-#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7
-#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT)
-#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */
-#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
-#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT)
-#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
-#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT)
-#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
-#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
-#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
-#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */
-#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
-#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT)
-#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
-#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */
-#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
-#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
-#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
-#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
-#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */
-#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
-#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
-#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
-#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
-#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */
-#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
-#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
-#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
-#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
-#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
-#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
-#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
-#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
-#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
-#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
-#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */
-#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
-#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
-#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
-#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */
-#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
-#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
-#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
-#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
-#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
-#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT)
-#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
-#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
-#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */
-#define I40E_GL_FWSTS_FWS0B_SHIFT 0
-#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT)
-#define I40E_GL_FWSTS_FWRI_SHIFT 9
-#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
-#define I40E_GL_FWSTS_FWS1B_SHIFT 16
-#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
-#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
-#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
-#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
-#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
-#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
-#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
-#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
-#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
-#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */
-#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
-#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
-#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
-#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
-#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
-#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
-#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
-#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
-#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
-#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
-#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26
-#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT)
-#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
-#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
-#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
-#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
-#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
-#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
-#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
-#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */
-#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
-#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
-#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */
-#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
-#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
-#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
-#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
-#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT)
-#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
-#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT)
-#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
-#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
-#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
-#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT)
-#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
-#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT)
-#define I40E_GLGEN_I2CCMD_R_SHIFT 29
-#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT)
-#define I40E_GLGEN_I2CCMD_E_SHIFT 31
-#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT)
-#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
-#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
-#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
-#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
-#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
-#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
-#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
-#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
-#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
-#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
-#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
-#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
-#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
-#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */
-#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
-#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
-#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
-#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
-#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MSCA_MAX_INDEX 3
-#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
-#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT)
-#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
-#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT)
-#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
-#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT)
-#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
-#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
-#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT)
-#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
-#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
-#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
-#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
-#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MSRWD_MAX_INDEX 3
-#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
-#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
-#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
-#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
-#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
-#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
-#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
-#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
-#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
-#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
-#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
-#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
-#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
-#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
-#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
-#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
-#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
-#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
-#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
-#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
-#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
-#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
-#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */
-#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
-#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
-#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
-#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
-#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
-#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
-#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
-#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
-#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT)
-#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
-#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
-#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */
-#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
-#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT)
-#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
-#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT)
-#define I40E_GLGEN_STAT_VTEN_SHIFT 3
-#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT)
-#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
-#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT)
-#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
-#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT)
-#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
-#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT)
-#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
-#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
-#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
-#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */
-#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
-#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT)
-#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */
-#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
-#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT)
-#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */
-#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
-#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
-#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */
-#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
-#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
-#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */
-#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0
-#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT)
-#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
-#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT)
-#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
-#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT)
-#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
-#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT)
-#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */
-#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
-#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
-#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
-#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */
-#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
-#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
-#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */
-#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
-#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
-#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
-#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
-#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
-#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
-#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
-#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
-#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
-#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
-#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
-#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
-#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
-#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
-#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
-#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT)
-#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
-#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
-#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
-#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
-#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
-#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
-#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
-#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
-#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
-#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */
-#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
-#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
-#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
-#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
-#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
-#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
-#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
-#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
-#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */
-#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
-#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
-#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */
-#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
-#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
-#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */
-#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
-#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
-#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
-#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
-#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
-#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
-#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
-#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
-#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
-#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
-#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */
-#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
-#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
-#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */
-#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
-#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
-#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
-#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
-#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
-#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
-#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
-#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
-#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */
-#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
-#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
-#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */
-#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
-#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
-#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */
-#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
-#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
-#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
-#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
-#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
-#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
-#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
-#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
-#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */
-#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
-#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
-#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
-#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
-#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
-#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
-#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
-#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
-#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
-#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
-#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */
-#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
-#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
-#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
-#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
-#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
-#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_SDPART_MAX_INDEX 15
-#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
-#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
-#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
-#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
-#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */
-#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
-#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
-#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */
-#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
-#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
-#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
-#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
-#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
-#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
-#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
-#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
-#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
-#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
-#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
-#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
-#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
-#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
-#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */
-#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
-#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
-#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
-#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */
-#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
-#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
-#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */
-#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
-#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
-#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
-#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
-#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
-#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
-#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
-#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
-#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */
-#define I40E_GL_GP_FUSE_MAX_INDEX 28
-#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
-#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
-#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */
-#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
-#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
-#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
-#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT)
-#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
-#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
-#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
-#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
-#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */
-#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
-#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
-#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
-#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
-#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
-#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
-#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
-#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
-#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
-#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
-#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
-#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
-#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
-#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
-#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
-#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
-#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
-#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
-#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
-#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
-#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
-#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
-#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
-#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
-#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
-#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
-#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
-#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
-#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
-#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
-#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
-#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */
-#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
-#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
-#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
-#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
-#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
-#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
-#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
-#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
-#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
-#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
-#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
-#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
-#define I40E_PFINT_CEQCTL_MAX_INDEX 511
-#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
-#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
-#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
-#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
-#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
-#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
-#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
-#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
-#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
-#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
-#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
-#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
-#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
-#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
-#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
-#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
-#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
-#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
-#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
-#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
-#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
-#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
-#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
-#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
-#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
-#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
-#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
-#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
-#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
-#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
-#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
-#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
-#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
-#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
-#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */
-#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
-#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
-#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
-#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
-#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
-#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
-#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
-#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
-#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
-#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
-#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
-#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
-#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
-#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
-#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
-#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
-#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
-#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
-#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
-#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
-#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
-#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
-#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
-#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
-#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
-#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
-#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
-#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
-#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
-#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
-#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
-#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
-#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
-#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
-#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
-#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
-#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
-#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
-#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
-#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
-#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
-#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT)
-#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
-#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT)
-#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
-#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
-#define I40E_PFINT_ICR0_GRST_SHIFT 20
-#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT)
-#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
-#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
-#define I40E_PFINT_ICR0_GPIO_SHIFT 22
-#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT)
-#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
-#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
-#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
-#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
-#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT)
-#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
-#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
-#define I40E_PFINT_ICR0_VFLR_SHIFT 29
-#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT)
-#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
-#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT)
-#define I40E_PFINT_ICR0_SWINT_SHIFT 31
-#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT)
-#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */
-#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
-#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
-#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
-#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
-#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
-#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT)
-#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
-#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
-#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
-#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
-#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
-#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
-#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
-#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
-#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
-#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
-#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
-#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
-#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
-#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
-#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
-#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
-#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
-#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */
-#define I40E_PFINT_ITR0_MAX_INDEX 2
-#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
-#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT)
-#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_ITRN_MAX_INDEX 2
-#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
-#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */
-#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
-#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
-#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
-#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
-#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
-#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
-#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
-#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
-#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
-#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */
-#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
-#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT)
-#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
-#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
-#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_RATEN_MAX_INDEX 511
-#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
-#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
-#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
-#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */
-#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
-#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
-#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QINT_RQCTL_MAX_INDEX 1535
-#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
-#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
-#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
-#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT)
-#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
-#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
-#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
-#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
-#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
-#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT)
-#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QINT_TQCTL_MAX_INDEX 1535
-#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
-#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
-#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
-#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT)
-#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
-#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
-#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
-#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
-#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
-#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT)
-#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
-#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
-#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
-#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
-#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
-#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
-#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
-#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
-#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
-#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
-#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
-#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VFINT_ICR0_MAX_INDEX 127
-#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
-#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
-#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
-#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
-#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
-#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT)
-#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
-#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
-#define I40E_VFINT_ICR0_SWINT_SHIFT 31
-#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT)
-#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
-#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
-#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
-#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
-#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
-#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */
-#define I40E_VFINT_ITR0_MAX_INDEX 2
-#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT)
-#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */
-#define I40E_VFINT_ITRN_MAX_INDEX 2
-#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
-#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
-#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VPINT_AEQCTL_MAX_INDEX 127
-#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
-#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
-#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
-#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */
-#define I40E_VPINT_CEQCTL_MAX_INDEX 511
-#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
-#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
-#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
-#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
-#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
-#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
-#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
-#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
-#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPINT_LNKLST0_MAX_INDEX 127
-#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
-#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
-#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
-#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
-#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
-#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
-#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
-#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
-#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
-#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
-#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPINT_RATE0_MAX_INDEX 127
-#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
-#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT)
-#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
-#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
-#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
-#define I40E_VPINT_RATEN_MAX_INDEX 511
-#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
-#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT)
-#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
-#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */
-#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
-#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
-#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
-#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT)
-#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */
-#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
-#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
-#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */
-#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
-#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
-#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */
-#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
-#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
-#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */
-#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
-#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */
-#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11
-#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
-#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16
-#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
-#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
-#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
-#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
-#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
-#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
-#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
-#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
-#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
-#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
-#define I40E_QRX_ENA_MAX_INDEX 1535
-#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
-#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT)
-#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
-#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT)
-#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
-#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT)
-#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QRX_TAIL_MAX_INDEX 1535
-#define I40E_QRX_TAIL_TAIL_SHIFT 0
-#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT)
-#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QTX_CTL_MAX_INDEX 1535
-#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
-#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT)
-#define I40E_QTX_CTL_PF_INDX_SHIFT 2
-#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT)
-#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
-#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT)
-#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
-#define I40E_QTX_ENA_MAX_INDEX 1535
-#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
-#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT)
-#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
-#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT)
-#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
-#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT)
-#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QTX_HEAD_MAX_INDEX 1535
-#define I40E_QTX_HEAD_HEAD_SHIFT 0
-#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT)
-#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
-#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT)
-#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
-#define I40E_QTX_TAIL_MAX_INDEX 1535
-#define I40E_QTX_TAIL_TAIL_SHIFT 0
-#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT)
-#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPLAN_MAPENA_MAX_INDEX 127
-#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
-#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
-#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */
-#define I40E_VPLAN_QTABLE_MAX_INDEX 15
-#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
-#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT)
-#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
-#define I40E_VSILAN_QBASE_MAX_INDEX 383
-#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
-#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT)
-#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
-#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
-#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */
-#define I40E_VSILAN_QTABLE_MAX_INDEX 7
-#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
-#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
-#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
-#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
-#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */
-#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
-#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT)
-#define I40E_PRTGL_SAH_MFS_SHIFT 16
-#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT)
-#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */
-#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
-#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
-#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */
-#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0
-#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT)
-#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */
-#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
-#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
-#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
-#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
-#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
-#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
-#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
-#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
-#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
-#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
-#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
-#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
-#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
-#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */
-#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
-#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
-#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */
-#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
-#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
-#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
-#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */
-#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
-#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
-#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
-#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
-#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */
-#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
-#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
-#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
-#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
-#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
-#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
-#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
-#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
-#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
-#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
-#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
-#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
-#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
-#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
-#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
-#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
-#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
-#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
-#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT)
-#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
-#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
-#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
-#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
-#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
-#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
-#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
-#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
-#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
-#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
-#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
-#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
-#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
-#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
-#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
-#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_METF_MAX_INDEX 3
-#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
-#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT)
-#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
-#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT)
-#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
-#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
-#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
-#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
-#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
-#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT)
-#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
-#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT)
-#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
-#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
-#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
-#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
-#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
-#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
-#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
-#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
-#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
-#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
-#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
-#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT)
-#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
-#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
-#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT)
-#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */
-#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
-#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
-#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */
-#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
-#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
-#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
-#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
-#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
-#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
-#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
-#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
-#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
-#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
-#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
-#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
-#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */
-#define I40E_MSIX_PBA_MAX_INDEX 5
-#define I40E_MSIX_PBA_PENBIT_SHIFT 0
-#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT)
-#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TADD_MAX_INDEX 128
-#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
-#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT)
-#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
-#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TMSG_MAX_INDEX 128
-#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
-#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TUADD_MAX_INDEX 128
-#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
-#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TVCTRL_MAX_INDEX 128
-#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
-#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT)
#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
#define I40E_VFMSIX_PBA1_MAX_INDEX 19
#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
@@ -1623,1525 +49,6 @@
#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
-#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
-#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
-#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT)
-#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
-#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT)
-#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
-#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT)
-#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
-#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT)
-#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
-#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT)
-#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
-#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT)
-#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
-#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
-#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
-#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT)
-#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
-#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT)
-#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
-#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT)
-#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */
-#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
-#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT)
-#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31
-#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT)
-#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */
-#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
-#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT)
-#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
-#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT)
-#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
-#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT)
-#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
-#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT)
-#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
-#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
-#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */
-#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
-#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
-#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
-#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */
-#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
-#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
-#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
-#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT)
-#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
-#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT)
-#define I40E_GLNVM_SRCTL_START_SHIFT 30
-#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
-#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
-#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
-#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
-#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
-#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
-#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
-#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT)
-#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
-#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
-#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
-#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
-#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
-#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
-#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
-#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
-#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
-#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
-#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
-#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
-#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */
-#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
-#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
-#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */
-#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
-#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
-#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */
-#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
-#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */
-#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
-#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
-#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
-#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
-#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
-#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
-#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
-#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
-#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
-#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
-#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
-#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
-#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
-#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
-#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
-#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
-#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
-#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
-#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */
-#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
-#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT)
-#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
-#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
-#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */
-#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
-#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT)
-#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
-#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
-#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
-#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
-#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
-#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
-#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */
-#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
-#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
-#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
-#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
-#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
-#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
-#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
-#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
-#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
-#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
-#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
-#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
-#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
-#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
-#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
-#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
-#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
-#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
-#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
-#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
-#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
-#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4
-#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT)
-#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
-#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10
-#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT)
-#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
-#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
-#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */
-#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
-#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
-#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
-#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
-#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
-#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
-#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */
-#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
-#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
-#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */
-#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
-#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
-#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */
-#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
-#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
-#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */
-#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
-#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
-#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */
-#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
-#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
-#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
-#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
-#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
-#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
-#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
-#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
-#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
-#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
-#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
-#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */
-#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
-#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
-#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
-#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
-#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
-#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
-#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
-#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
-#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */
-#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
-#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT)
-#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */
-#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
-#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
-#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */
-#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
-#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
-#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */
-#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
-#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
-#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */
-#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
-#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
-#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */
-#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0
-#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT)
-#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */
-#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
-#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT)
-#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */
-#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0
-#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT)
-#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */
-#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
-#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
-#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
-#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
-#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */
-#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9
-#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
-#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11
-#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
-#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
-#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
-#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
-#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
-#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
-#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
-#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
-#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */
-#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
-#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
-#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
-#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
-#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */
-#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
-#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT)
-#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */
-#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
-#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
-#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1
-#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT)
-#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2
-#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT)
-#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */
-#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
-#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT)
-#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
-#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
-#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
-#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT)
-#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
-#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT)
-#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */
-#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0
-#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT)
-#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16
-#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT)
-#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */
-#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
-#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
-#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
-#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
-#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */
-#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
-#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
-#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
-#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
-#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */
-#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
-#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */
-#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
-#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
-#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */
-#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
-#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
-#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */
-#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */
-#define I40E_PFPCI_PM_PME_EN_SHIFT 0
-#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT)
-#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */
-#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
-#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
-#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */
-#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0
-#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT)
-#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16
-#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT)
-#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */
-#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */
-#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
-#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */
-#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */
-#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
-#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
-#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */
-#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
-#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT)
-#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */
-#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
-#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
-#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
-#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
-#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
-#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
-#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */
-#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
-#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
-#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
-#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
-#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
-#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
-#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */
-#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
-#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
-#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */
-#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
-#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
-#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
-#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
-#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */
-#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
-#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
-#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */
-#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
-#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
-#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
-#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT)
-#define I40E_PRTPM_GC_RATD_SHIFT 2
-#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT)
-#define I40E_PRTPM_GC_LCDMP_SHIFT 3
-#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT)
-#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
-#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
-#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */
-#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
-#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
-#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
-#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
-#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
-#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
-#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
-#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
-#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */
-#define I40E_GLRPB_GHW_GHW_SHIFT 0
-#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT)
-#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */
-#define I40E_GLRPB_GLW_GLW_SHIFT 0
-#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT)
-#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */
-#define I40E_GLRPB_PHW_PHW_SHIFT 0
-#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT)
-#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */
-#define I40E_GLRPB_PLW_PLW_SHIFT 0
-#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT)
-#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_DHW_MAX_INDEX 7
-#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
-#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
-#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_DLW_MAX_INDEX 7
-#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
-#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
-#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_DPS_MAX_INDEX 7
-#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
-#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
-#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_SHT_MAX_INDEX 7
-#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
-#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
-#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */
-#define I40E_PRTRPB_SHW_SHW_SHIFT 0
-#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT)
-#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_SLT_MAX_INDEX 7
-#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
-#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
-#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */
-#define I40E_PRTRPB_SLW_SLW_SHIFT 0
-#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT)
-#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */
-#define I40E_PRTRPB_SPS_SPS_SHIFT 0
-#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT)
-#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */
-#define I40E_GLQF_CTL_HTOEP_SHIFT 1
-#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT)
-#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
-#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
-#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
-#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
-#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6
-#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT)
-#define I40E_GLQF_CTL_RSVD_SHIFT 7
-#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT)
-#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
-#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
-#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
-#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
-#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
-#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
-#define I40E_GLQF_CTL_FDBEST_SHIFT 17
-#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT)
-#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
-#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT)
-#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
-#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT)
-#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
-#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT)
-#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */
-#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
-#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
-#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
-#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
-#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
-#define I40E_GLQF_HKEY_MAX_INDEX 12
-#define I40E_GLQF_HKEY_KEY_0_SHIFT 0
-#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT)
-#define I40E_GLQF_HKEY_KEY_1_SHIFT 8
-#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT)
-#define I40E_GLQF_HKEY_KEY_2_SHIFT 16
-#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT)
-#define I40E_GLQF_HKEY_KEY_3_SHIFT 24
-#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT)
-#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
-#define I40E_GLQF_HSYM_MAX_INDEX 63
-#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
-#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
-#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */
-#define I40E_GLQF_PCNT_MAX_INDEX 511
-#define I40E_GLQF_PCNT_PCNT_SHIFT 0
-#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT)
-#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
-#define I40E_GLQF_SWAP_MAX_INDEX 1
-#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
-#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
-#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
-#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
-#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
-#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT)
-#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
-#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
-#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
-#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
-#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
-#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT)
-#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */
-#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
-#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
-#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
-#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
-#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
-#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
-#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT)
-#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
-#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
-#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
-#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
-#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
-#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
-#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
-#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */
-#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
-#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
-#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */
-#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
-#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
-#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
-#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT)
-#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */
-#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
-#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
-#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
-#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
-#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */
-#define I40E_PFQF_HENA_MAX_INDEX 1
-#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
-#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
-#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */
-#define I40E_PFQF_HKEY_MAX_INDEX 12
-#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
-#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT)
-#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
-#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT)
-#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
-#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT)
-#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
-#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT)
-#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_PFQF_HLUT_MAX_INDEX 127
-#define I40E_PFQF_HLUT_LUT0_SHIFT 0
-#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT)
-#define I40E_PFQF_HLUT_LUT1_SHIFT 8
-#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT)
-#define I40E_PFQF_HLUT_LUT2_SHIFT 16
-#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT)
-#define I40E_PFQF_HLUT_LUT3_SHIFT 24
-#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT)
-#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */
-#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
-#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
-#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */
-#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
-#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
-#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
-#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
-#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
-#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
-#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT)
-#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
-#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
-#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */
-#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
-#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
-#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
-#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
-#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
-#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
-#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
-#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */
-#define I40E_VFQF_HENA1_MAX_INDEX 1
-#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
-#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
-#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */
-#define I40E_VFQF_HKEY1_MAX_INDEX 12
-#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
-#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT)
-#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
-#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT)
-#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
-#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT)
-#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
-#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT)
-#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
-#define I40E_VFQF_HLUT1_MAX_INDEX 15
-#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
-#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT)
-#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
-#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT)
-#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
-#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT)
-#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
-#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT)
-#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */
-#define I40E_VFQF_HREGION1_MAX_INDEX 7
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
-#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
-#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
-#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
-#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
-#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
-#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
-#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
-#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT)
-#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPQF_CTL_MAX_INDEX 127
-#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
-#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT)
-#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
-#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT)
-#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
-#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT)
-#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
-#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT)
-#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
-#define I40E_VSIQF_CTL_MAX_INDEX 383
-#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
-#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
-#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
-#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
-#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
-#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
-#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
-#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */
-#define I40E_VSIQF_TCREGION_MAX_INDEX 3
-#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
-#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
-#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
-#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
-#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
-#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
-#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
-#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
-#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOECRC_MAX_INDEX 143
-#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
-#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT)
-#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDDPC_MAX_INDEX 143
-#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
-#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
-#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
-#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
-#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
-#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
-#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
-#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
-#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
-#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
-#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
-#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
-#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
-#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
-#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
-#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
-#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
-#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
-#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
-#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
-#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
-#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
-#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
-#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
-#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
-#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
-#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOELAST_MAX_INDEX 143
-#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
-#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT)
-#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEPRC_MAX_INDEX 143
-#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
-#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
-#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEPTC_MAX_INDEX 143
-#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
-#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
-#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOERPDC_MAX_INDEX 143
-#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
-#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
-#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_RXERR1_L_MAX_INDEX 143
-#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0
-#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT)
-#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_RXERR2_L_MAX_INDEX 143
-#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0
-#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
-#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPRCH_MAX_INDEX 3
-#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT)
-#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPRCL_MAX_INDEX 3
-#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT)
-#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPTCH_MAX_INDEX 3
-#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT)
-#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPTCL_MAX_INDEX 3
-#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT)
-#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
-#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
-#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
-#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GORCH_MAX_INDEX 3
-#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
-#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT)
-#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GORCL_MAX_INDEX 3
-#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
-#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT)
-#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GOTCH_MAX_INDEX 3
-#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT)
-#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GOTCL_MAX_INDEX 3
-#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT)
-#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
-#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
-#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
-#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LDPC_MAX_INDEX 3
-#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
-#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT)
-#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
-#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
-#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
-#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
-#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
-#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
-#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
-#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
-#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
-#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
-#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
-#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
-#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MLFC_MAX_INDEX 3
-#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
-#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT)
-#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPRCH_MAX_INDEX 3
-#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT)
-#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPRCL_MAX_INDEX 3
-#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT)
-#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPTCH_MAX_INDEX 3
-#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT)
-#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPTCL_MAX_INDEX 3
-#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT)
-#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MRFC_MAX_INDEX 3
-#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
-#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT)
-#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
-#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
-#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
-#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
-#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
-#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
-#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC127H_MAX_INDEX 3
-#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
-#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT)
-#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC127L_MAX_INDEX 3
-#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
-#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT)
-#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
-#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
-#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
-#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
-#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
-#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
-#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC255H_MAX_INDEX 3
-#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
-#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
-#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC255L_MAX_INDEX 3
-#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
-#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT)
-#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC511H_MAX_INDEX 3
-#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
-#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT)
-#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC511L_MAX_INDEX 3
-#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
-#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT)
-#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC64H_MAX_INDEX 3
-#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
-#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT)
-#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC64L_MAX_INDEX 3
-#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
-#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT)
-#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
-#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
-#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
-#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
-#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
-#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
-#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
-#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
-#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
-#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
-#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
-#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
-#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC127H_MAX_INDEX 3
-#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
-#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT)
-#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC127L_MAX_INDEX 3
-#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
-#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT)
-#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
-#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
-#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
-#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
-#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
-#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
-#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC255H_MAX_INDEX 3
-#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
-#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT)
-#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC255L_MAX_INDEX 3
-#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
-#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT)
-#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC511H_MAX_INDEX 3
-#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
-#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT)
-#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC511L_MAX_INDEX 3
-#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
-#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT)
-#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC64H_MAX_INDEX 3
-#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
-#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT)
-#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC64L_MAX_INDEX 3
-#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
-#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT)
-#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
-#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
-#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
-#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
-#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
-#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
-#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
-#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
-#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
-#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
-#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
-#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
-#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
-#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
-#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
-#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
-#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
-#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
-#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RDPC_MAX_INDEX 3
-#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
-#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT)
-#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RFC_MAX_INDEX 3
-#define I40E_GLPRT_RFC_RFC_SHIFT 0
-#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT)
-#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RJC_MAX_INDEX 3
-#define I40E_GLPRT_RJC_RJC_SHIFT 0
-#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT)
-#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RLEC_MAX_INDEX 3
-#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
-#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT)
-#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_ROC_MAX_INDEX 3
-#define I40E_GLPRT_ROC_ROC_SHIFT 0
-#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT)
-#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RUC_MAX_INDEX 3
-#define I40E_GLPRT_RUC_RUC_SHIFT 0
-#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT)
-#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RUPP_MAX_INDEX 3
-#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
-#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT)
-#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
-#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
-#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
-#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_TDOLD_MAX_INDEX 3
-#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
-#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
-#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPRCH_MAX_INDEX 3
-#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT)
-#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPRCL_MAX_INDEX 3
-#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT)
-#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPTCH_MAX_INDEX 3
-#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
-#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT)
-#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPTCL_MAX_INDEX 3
-#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
-#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
-#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPRCH_MAX_INDEX 15
-#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT)
-#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPRCL_MAX_INDEX 15
-#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT)
-#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPTCH_MAX_INDEX 15
-#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT)
-#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPTCL_MAX_INDEX 15
-#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT)
-#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GORCH_MAX_INDEX 15
-#define I40E_GLSW_GORCH_GORCH_SHIFT 0
-#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT)
-#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GORCL_MAX_INDEX 15
-#define I40E_GLSW_GORCL_GORCL_SHIFT 0
-#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT)
-#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GOTCH_MAX_INDEX 15
-#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT)
-#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GOTCL_MAX_INDEX 15
-#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT)
-#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPRCH_MAX_INDEX 15
-#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT)
-#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPRCL_MAX_INDEX 15
-#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT)
-#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPTCH_MAX_INDEX 15
-#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT)
-#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPTCL_MAX_INDEX 15
-#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT)
-#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_RUPP_MAX_INDEX 15
-#define I40E_GLSW_RUPP_RUPP_SHIFT 0
-#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT)
-#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_TDPC_MAX_INDEX 15
-#define I40E_GLSW_TDPC_TDPC_SHIFT 0
-#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT)
-#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPRCH_MAX_INDEX 15
-#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT)
-#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPRCL_MAX_INDEX 15
-#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT)
-#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPTCH_MAX_INDEX 15
-#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
-#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT)
-#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPTCL_MAX_INDEX 15
-#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
-#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT)
-#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPRCH_MAX_INDEX 383
-#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT)
-#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPRCL_MAX_INDEX 383
-#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT)
-#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPTCH_MAX_INDEX 383
-#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT)
-#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPTCL_MAX_INDEX 383
-#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT)
-#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GORCH_MAX_INDEX 383
-#define I40E_GLV_GORCH_GORCH_SHIFT 0
-#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT)
-#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GORCL_MAX_INDEX 383
-#define I40E_GLV_GORCL_GORCL_SHIFT 0
-#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT)
-#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GOTCH_MAX_INDEX 383
-#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT)
-#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GOTCL_MAX_INDEX 383
-#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT)
-#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPRCH_MAX_INDEX 383
-#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT)
-#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPRCL_MAX_INDEX 383
-#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT)
-#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPTCH_MAX_INDEX 383
-#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT)
-#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPTCL_MAX_INDEX 383
-#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT)
-#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_RDPC_MAX_INDEX 383
-#define I40E_GLV_RDPC_RDPC_SHIFT 0
-#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT)
-#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_RUPP_MAX_INDEX 383
-#define I40E_GLV_RUPP_RUPP_SHIFT 0
-#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
-#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_TEPC_MAX_INDEX 383
-#define I40E_GLV_TEPC_TEPC_SHIFT 0
-#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
-#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPRCH_MAX_INDEX 383
-#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT)
-#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPRCL_MAX_INDEX 383
-#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT)
-#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPTCH_MAX_INDEX 383
-#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
-#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
-#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPTCL_MAX_INDEX 383
-#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
-#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT)
-#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
-#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
-#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
-#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
-#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
-#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
-#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
-#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
-#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
-#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
-#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
-#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
-#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
-#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
-#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
-#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
-#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
-#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
-#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
-#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
-#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
-#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
-#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
-#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
-#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
-#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
-#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
-#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
-#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
-#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
-#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
-#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
-#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
-#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
-#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
-#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
-#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
-#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
-#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
-#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
-#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
-#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
-#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
-#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
-#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
-#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
-#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
-#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
-#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
-#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
-#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
-#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
-#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
-#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
-#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */
-#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
-#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
-#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */
-#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35
-#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
-#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
-#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
-#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1
-#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
-#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
-#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */
-#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
-#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
-#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
-#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT)
-#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
-#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
-#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
-#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
-#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
-#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
-#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
-#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
-#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
-#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
-#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
-#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
-#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
-#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
-#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
-#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
-#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
-#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
-#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
-#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */
-#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
-#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
-#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
-#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
-#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
-#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
-#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
-#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
-#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
-#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
-#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
-#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
-#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
-#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
-#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
-#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
-#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
-#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
-#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
-#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
-#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
-#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
-#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
-#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
-#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
-#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */
-#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
-#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
-#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */
-#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
-#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
-#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
-#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
-#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
-#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
-#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
-#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
-#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */
-#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
-#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
-#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
-#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
-#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
-#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
-#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
-#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
-#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
-#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
-#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */
-#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
-#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
-#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
-#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
-#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
-#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
-#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
-#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
-#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
-#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
-#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
-#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
-#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
-#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
-#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
-#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
-#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
-#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
-#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
-#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
-#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
-#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
-#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
-#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
-#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
-#define I40E_GL_MDET_RX_EVENT_SHIFT 8
-#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT)
-#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
-#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT)
-#define I40E_GL_MDET_RX_VALID_SHIFT 31
-#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT)
-#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */
-#define I40E_GL_MDET_TX_QUEUE_SHIFT 0
-#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT)
-#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12
-#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT)
-#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21
-#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT)
-#define I40E_GL_MDET_TX_EVENT_SHIFT 25
-#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT)
-#define I40E_GL_MDET_TX_VALID_SHIFT 31
-#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT)
-#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */
-#define I40E_PF_MDET_RX_VALID_SHIFT 0
-#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT)
-#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */
-#define I40E_PF_MDET_TX_VALID_SHIFT 0
-#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT)
-#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */
-#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
-#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
-#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
-#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
-#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
-#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
-#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VP_MDET_RX_MAX_INDEX 127
-#define I40E_VP_MDET_RX_VALID_SHIFT 0
-#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT)
-#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VP_MDET_TX_MAX_INDEX 127
-#define I40E_VP_MDET_TX_VALID_SHIFT 0
-#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT)
-#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */
-#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
-#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT)
-#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
-#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
-#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
-#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT)
-#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
-#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT)
-#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
-#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
-#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */
-#define I40E_PFPM_APM_APME_SHIFT 0
-#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT)
-#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
-#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
-#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */
-#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
-#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT)
-#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
-#define I40E_PFPM_WUFC_LNKC_SHIFT 0
-#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT)
-#define I40E_PFPM_WUFC_MAG_SHIFT 1
-#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
-#define I40E_PFPM_WUFC_MNG_SHIFT 3
-#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT)
-#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
-#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
-#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
-#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
-#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
-#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
-#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
-#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
-#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX0_SHIFT 16
-#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT)
-#define I40E_PFPM_WUFC_FLX1_SHIFT 17
-#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT)
-#define I40E_PFPM_WUFC_FLX2_SHIFT 18
-#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT)
-#define I40E_PFPM_WUFC_FLX3_SHIFT 19
-#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT)
-#define I40E_PFPM_WUFC_FLX4_SHIFT 20
-#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT)
-#define I40E_PFPM_WUFC_FLX5_SHIFT 21
-#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT)
-#define I40E_PFPM_WUFC_FLX6_SHIFT 22
-#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT)
-#define I40E_PFPM_WUFC_FLX7_SHIFT 23
-#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT)
-#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
-#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
-#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */
-#define I40E_PFPM_WUS_LNKC_SHIFT 0
-#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT)
-#define I40E_PFPM_WUS_MAG_SHIFT 1
-#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT)
-#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
-#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT)
-#define I40E_PFPM_WUS_MNG_SHIFT 3
-#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT)
-#define I40E_PFPM_WUS_FLX0_SHIFT 16
-#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT)
-#define I40E_PFPM_WUS_FLX1_SHIFT 17
-#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT)
-#define I40E_PFPM_WUS_FLX2_SHIFT 18
-#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT)
-#define I40E_PFPM_WUS_FLX3_SHIFT 19
-#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT)
-#define I40E_PFPM_WUS_FLX4_SHIFT 20
-#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT)
-#define I40E_PFPM_WUS_FLX5_SHIFT 21
-#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT)
-#define I40E_PFPM_WUS_FLX6_SHIFT 22
-#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT)
-#define I40E_PFPM_WUS_FLX7_SHIFT 23
-#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT)
-#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
-#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT)
-#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */
-#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
-#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT)
-#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
-#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
-#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
-#define I40E_PRTPM_SAH_MAX_INDEX 3
-#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
-#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
-#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
-#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT)
-#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
-#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
-#define I40E_PRTPM_SAH_AV_SHIFT 31
-#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT)
-#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
-#define I40E_PRTPM_SAL_MAX_INDEX 3
-#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
-#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
@@ -3366,4 +273,64 @@
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
-#endif
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 395f32f226c0..7e91d825c760 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -366,15 +366,32 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
**/
static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
{
- u32 val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
- I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
- I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
- /* allow 00 to be written to the index */
-
- wr32(&vsi->back->hw,
- I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1),
- val);
+ u16 flags = q_vector->tx.ring[0].flags;
+
+ if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+ u32 val;
+
+ if (q_vector->arm_wb_state)
+ return;
+
+ val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK;
+
+ wr32(&vsi->back->hw,
+ I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
+ vsi->base_vector - 1),
+ val);
+ q_vector->arm_wb_state = true;
+ } else {
+ u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
+ I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
+ I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK;
+ /* allow 00 to be written to the index */
+
+ wr32(&vsi->back->hw,
+ I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
+ vsi->base_vector - 1), val);
+ }
}
/**
@@ -404,7 +421,7 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
* 20-1249MB/s bulk (8000 ints/s)
*/
bytes_per_int = rc->total_bytes / rc->itr;
- switch (rc->itr) {
+ switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
if (bytes_per_int > 10)
new_latency_range = I40E_LOW_LATENCY;
@@ -417,9 +434,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
break;
case I40E_BULK_LATENCY:
if (bytes_per_int <= 20)
- rc->latency_range = I40E_LOW_LATENCY;
+ new_latency_range = I40E_LOW_LATENCY;
+ break;
+ default:
+ if (bytes_per_int <= 20)
+ new_latency_range = I40E_LOW_LATENCY;
break;
}
+ rc->latency_range = new_latency_range;
switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
@@ -435,42 +457,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
break;
}
- if (new_itr != rc->itr) {
- /* do an exponential smoothing */
- new_itr = (10 * new_itr * rc->itr) /
- ((9 * new_itr) + rc->itr);
- rc->itr = new_itr & I40E_MAX_ITR;
- }
+ if (new_itr != rc->itr)
+ rc->itr = new_itr;
rc->total_bytes = 0;
rc->total_packets = 0;
}
-/**
- * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
- * @q_vector: the vector to adjust
- **/
-static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
-{
- u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
- struct i40e_hw *hw = &q_vector->vsi->back->hw;
- u32 reg_addr;
- u16 old_itr;
-
- reg_addr = I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1);
- old_itr = q_vector->rx.itr;
- i40e_set_new_dynamic_itr(&q_vector->rx);
- if (old_itr != q_vector->rx.itr)
- wr32(hw, reg_addr, q_vector->rx.itr);
-
- reg_addr = I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1);
- old_itr = q_vector->tx.itr;
- i40e_set_new_dynamic_itr(&q_vector->tx);
- if (old_itr != q_vector->tx.itr)
- wr32(hw, reg_addr, q_vector->tx.itr);
-}
-
-/**
+/*
* i40evf_setup_tx_descriptors - Allocate the Tx descriptors
* @tx_ring: the tx ring to set up
*
@@ -873,7 +867,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
return;
/* did the hardware decode the packet and checksum? */
- if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
/* both known and outer_ip must be set for the below code to work */
@@ -888,25 +882,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
ipv6 = true;
if (ipv4 &&
- (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+ (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
goto checksum_fail;
/* likely incorrect csum if alternate IP extension headers found */
if (ipv6 &&
- rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
/* don't increment checksum err here, non-fatal err */
return;
/* there was some L4 error, count error and punt packet to the stack */
- if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+ if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
goto checksum_fail;
/* handle packets that were not able to be checksummed due
* to arrival speed, in this case the stack can compute
* the csum.
*/
- if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
+ if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
/* If VXLAN traffic has an outer UDPv4 checksum we need to check
@@ -1027,7 +1021,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT;
- if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
/* This memory barrier is needed to keep us from reading
@@ -1063,8 +1057,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
I40E_RXD_QW1_ERROR_SHIFT;
- rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
- rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1116,7 +1110,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
I40E_RX_INCREMENT(rx_ring, i);
if (unlikely(
- !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
struct i40e_rx_buffer *next_buffer;
next_buffer = &rx_ring->rx_bi[i];
@@ -1126,7 +1120,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
}
/* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
continue;
}
@@ -1141,7 +1135,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
- vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0;
#ifdef I40E_FCOE
@@ -1202,7 +1196,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT;
- if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
/* This memory barrier is needed to keep us from reading
@@ -1220,7 +1214,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
I40E_RXD_QW1_ERROR_SHIFT;
- rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1238,13 +1232,13 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
I40E_RX_INCREMENT(rx_ring, i);
if (unlikely(
- !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
rx_ring->rx_stats.non_eop_descs++;
continue;
}
/* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
/* TODO: shouldn't we increment a counter indicating the
* drop?
@@ -1262,7 +1256,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
- vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0;
i40e_receive_skb(rx_ring, skb, vlan_tag);
@@ -1281,6 +1275,67 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
}
/**
+ * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * @vsi: the VSI we care about
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ **/
+static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+ struct i40e_q_vector *q_vector)
+{
+ struct i40e_hw *hw = &vsi->back->hw;
+ u16 old_itr;
+ int vector;
+ u32 val;
+
+ vector = (q_vector->v_idx + vsi->base_vector);
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+ old_itr = q_vector->rx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->rx);
+ if (old_itr != q_vector->rx.itr) {
+ val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+ (I40E_RX_ITR <<
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
+ (q_vector->rx.itr <<
+ I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
+ } else {
+ val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+ (I40E_ITR_NONE <<
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT);
+ }
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
+ } else {
+ i40evf_irq_enable_queues(vsi->back, 1
+ << q_vector->v_idx);
+ }
+ if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+ old_itr = q_vector->tx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->tx);
+ if (old_itr != q_vector->tx.itr) {
+ val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+ (I40E_TX_ITR <<
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
+ (q_vector->tx.itr <<
+ I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
+
+ } else {
+ val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+ (I40E_ITR_NONE <<
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT);
+ }
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
+ } else {
+ i40evf_irq_enable_queues(vsi->back, BIT(q_vector->v_idx));
+ }
+}
+
+/**
* i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
* @napi: napi struct with our devices info in it
* @budget: amount of work driver is allowed to do this pass, in packets
@@ -1334,15 +1389,12 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
return budget;
}
+ if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
+ q_vector->arm_wb_state = false;
+
/* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete(napi);
- if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
- ITR_IS_DYNAMIC(vsi->tx_itr_setting))
- i40e_update_dynamic_itr(q_vector);
-
- if (!test_bit(__I40E_DOWN, &vsi->state))
- i40evf_irq_enable_queues(vsi->back, 1 << q_vector->v_idx);
-
+ i40e_update_enable_itr(vsi, q_vector);
return 0;
}
@@ -1476,11 +1528,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
struct iphdr *this_ip_hdr;
u32 network_hdr_len;
u8 l4_hdr = 0;
+ struct udphdr *oudph;
+ struct iphdr *oiph;
u32 l4_tunnel = 0;
if (skb->encapsulation) {
switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP:
+ oudph = udp_hdr(skb);
+ oiph = ip_hdr(skb);
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
@@ -1519,6 +1575,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
}
+ if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
+ (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
+ (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
+ oudph->check = ~csum_tcpudp_magic(oiph->saddr,
+ oiph->daddr,
+ (skb->len - skb_transport_offset(skb)),
+ IPPROTO_UDP, 0);
+ *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
+ }
} else {
network_hdr_len = skb_network_header_len(skb);
this_ip_hdr = ip_hdr(skb);
@@ -1841,6 +1906,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index)))
writel(i, tx_ring->tail);
+ else
+ prefetchw(tx_desc + 1);
return;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index e7a34f899f2c..9a30f5d8c089 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -66,17 +66,29 @@ enum i40e_dyn_idx_t {
/* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
- ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
+ BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+
+#define i40e_pf_get_default_rss_hena(pf) \
+ (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
+ I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
/* Supported Rx Buffer Sizes */
#define I40E_RXBUFFER_512 512 /* Used for packet split */
@@ -129,16 +141,16 @@ enum i40e_dyn_idx_t {
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4
-#define I40E_TX_FLAGS_CSUM (u32)(1)
-#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
-#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2)
-#define I40E_TX_FLAGS_TSO (u32)(1 << 3)
-#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4)
-#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
-#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
-#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
-#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
-#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
+#define I40E_TX_FLAGS_CSUM BIT(0)
+#define I40E_TX_FLAGS_HW_VLAN BIT(1)
+#define I40E_TX_FLAGS_SW_VLAN BIT(2)
+#define I40E_TX_FLAGS_TSO BIT(3)
+#define I40E_TX_FLAGS_IPV4 BIT(4)
+#define I40E_TX_FLAGS_IPV6 BIT(5)
+#define I40E_TX_FLAGS_FCCRC BIT(6)
+#define I40E_TX_FLAGS_FSO BIT(7)
+#define I40E_TX_FLAGS_FD_SB BIT(9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -250,6 +262,10 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */
+ u16 flags;
+#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
+#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1)
+
/* stats structs */
struct i40e_queue_stats stats;
struct u64_stats_sync syncp;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index c463ec41579c..24a2693869a1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -47,6 +47,11 @@
#define I40E_DEV_ID_20G_KR2 0x1587
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
+#define I40E_DEV_ID_SFP_X722 0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_X722_VF 0x37CD
+#define I40E_DEV_ID_X722_VF_HV 0x37D9
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
@@ -120,6 +125,8 @@ enum i40e_mac_type {
I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
+ I40E_MAC_X722,
+ I40E_MAC_X722_VF,
I40E_MAC_GENERIC,
};
@@ -213,7 +220,17 @@ struct i40e_hw_capabilities {
bool dcb;
bool fcoe;
bool iscsi; /* Indicates iSCSI enabled */
- bool mfp_mode_1;
+ bool flex10_enable;
+ bool flex10_capable;
+ u32 flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN 0x0
+#define I40E_FLEX10_MODE_DCC 0x1
+#define I40E_FLEX10_MODE_DCI 0x2
+
+ u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
+#define I40E_FLEX10_STATUS_VC_MODE 0x2
+
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@@ -417,6 +434,7 @@ struct i40e_ieee_app_priority_table {
struct i40e_dcbx_config {
u32 numapps;
+ u32 tlv_status; /* CEE mode TLV status */
struct i40e_ieee_ets_config etscfg;
struct i40e_ieee_ets_recommend etsrec;
struct i40e_ieee_pfc_config pfc;
@@ -481,11 +499,13 @@ struct i40e_hw {
/* debug mask */
u32 debug_mask;
+ char err_str[16];
};
static inline bool i40e_is_vf(struct i40e_hw *hw)
{
- return hw->mac.type == I40E_MAC_VF;
+ return (hw->mac.type == I40E_MAC_VF ||
+ hw->mac.type == I40E_MAC_X722_VF);
}
struct i40e_driver_version {
@@ -582,19 +602,23 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
- I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
+ /* Note: Bit 8 is reserved in X710 and XL710 */
+ I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
- I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+ /* Note: For non-tunnel packets INT_UDP_0 is the right status for
+ * UDP header
+ */
+ I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
#define I40E_RXD_QW1_STATUS_SHIFT 0
-#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
<< I40E_RXD_QW1_STATUS_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
@@ -602,8 +626,8 @@ enum i40e_rx_desc_status_bits {
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
- I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
+ BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
enum i40e_rx_desc_fltstat_values {
I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
@@ -737,8 +761,7 @@ enum i40e_rx_ptype_payload_layer {
I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
- I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
enum i40e_rx_desc_ext_status_bits {
/* Note: These are predefined bit offsets */
@@ -914,12 +937,12 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
-#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
- I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
+ BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
@@ -931,6 +954,8 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
struct i40e_filter_program_desc {
__le32 qindex_flex_ptype_vsi;
__le32 rsvd;
@@ -949,15 +974,24 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
- /* Note: Values 0-30 are reserved for future use */
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- /* Note: Value 32 is reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-40 are reserved for future use */
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
@@ -984,8 +1018,8 @@ enum i40e_filter_program_desc_fd_status {
};
#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
- I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
+ BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
@@ -1003,8 +1037,7 @@ enum i40e_filter_program_desc_pcmd {
#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
- I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
I40E_TXD_FLTR_QW1_CMD_SHIFT)
@@ -1063,6 +1096,14 @@ struct i40e_eth_stats {
u64 tx_errors; /* tepc */
};
+/* Statistics collected per VEB per TC */
+struct i40e_veb_tc_stats {
+ u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
+};
+
/* Statistics collected by the MAC */
struct i40e_hw_port_stats {
/* eth stats collected by the port */
@@ -1109,6 +1150,8 @@ struct i40e_hw_port_stats {
u64 fd_atr_match;
u64 fd_sb_match;
u64 fd_atr_tunnel_match;
+ u32 fd_atr_status;
+ u32 fd_sb_status;
/* EEE LPI */
u32 tx_lpi_status;
u32 rx_lpi_status;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index 59f62f0e65dd..e6db20e8a395 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -110,7 +110,9 @@ struct i40e_virtchnl_msg {
* error regardless of version mismatch.
*/
#define I40E_VIRTCHNL_VERSION_MAJOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR 0
+#define I40E_VIRTCHNL_VERSION_MINOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
struct i40e_virtchnl_version_info {
u32 major;
u32 minor;
@@ -129,7 +131,8 @@ struct i40e_virtchnl_version_info {
*/
/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * VF sends this request to PF with no parameters
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
* PF responds with an indirect message containing
* i40e_virtchnl_vf_resource and one or more
* i40e_virtchnl_vsi_resource structures.
@@ -143,9 +146,13 @@ struct i40e_virtchnl_vsi_resource {
u8 default_mac_addr[ETH_ALEN];
};
/* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index fea3b75a9a35..3817cbbf45e6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -101,6 +101,8 @@ struct i40e_vsi {
#define MAX_RX_QUEUES 8
#define MAX_TX_QUEUES MAX_RX_QUEUES
+#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4)
+
/* MAX_MSIX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector.
*/
@@ -115,6 +117,7 @@ struct i40e_q_vector {
u8 num_ringpairs; /* total number of ring pairs in vector */
int v_idx; /* vector index in list */
char name[IFNAMSIZ + 9];
+ bool arm_wb_state;
cpumask_var_t affinity_mask;
};
@@ -207,33 +210,39 @@ struct i40evf_adapter {
struct msix_entry *msix_entries;
u32 flags;
-#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1)
-#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
-#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
-#define I40EVF_FLAG_RX_PS_ENABLED (u32)(1 << 3)
-#define I40EVF_FLAG_IN_NETPOLL (u32)(1 << 4)
-#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5)
-#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6)
-#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
-#define I40EVF_FLAG_PF_COMMS_FAILED (u32)(1 << 8)
-#define I40EVF_FLAG_RESET_PENDING (u32)(1 << 9)
-#define I40EVF_FLAG_RESET_NEEDED (u32)(1 << 10)
-/* duplcates for common code */
+#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
+#define I40EVF_FLAG_RX_1BUF_CAPABLE BIT(1)
+#define I40EVF_FLAG_RX_PS_CAPABLE BIT(2)
+#define I40EVF_FLAG_RX_PS_ENABLED BIT(3)
+#define I40EVF_FLAG_IN_NETPOLL BIT(4)
+#define I40EVF_FLAG_IMIR_ENABLED BIT(5)
+#define I40EVF_FLAG_MQ_CAPABLE BIT(6)
+#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7)
+#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8)
+#define I40EVF_FLAG_RESET_PENDING BIT(9)
+#define I40EVF_FLAG_RESET_NEEDED BIT(10)
+#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11)
+#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12)
+/* duplicates for common code */
#define I40E_FLAG_FDIR_ATR_ENABLED 0
#define I40E_FLAG_DCB_ENABLED 0
#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL
#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
+#define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE
+#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE
/* flags for admin queue service task */
u32 aq_required;
-#define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1)
-#define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
-#define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2)
-#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3)
-#define I40EVF_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4)
-#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5)
-#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
-#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
-#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
+#define I40EVF_FLAG_AQ_ENABLE_QUEUES BIT(0)
+#define I40EVF_FLAG_AQ_DISABLE_QUEUES BIT(1)
+#define I40EVF_FLAG_AQ_ADD_MAC_FILTER BIT(2)
+#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3)
+#define I40EVF_FLAG_AQ_DEL_MAC_FILTER BIT(4)
+#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5)
+#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6)
+#define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7)
+#define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8)
+#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9)
+#define I40EVF_FLAG_AQ_GET_CONFIG BIT(10)
/* OS defined structs */
struct net_device *netdev;
@@ -249,8 +258,17 @@ struct i40evf_adapter {
bool netdev_registered;
bool link_up;
enum i40e_virtchnl_ops current_op;
+#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \
+ I40E_VIRTCHNL_VF_OFFLOAD_IWARP)
+#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
+ struct i40e_virtchnl_version_info pf_version;
+#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
+ ((_a)->pf_version.minor == 1))
u16 msg_enable;
struct i40e_eth_stats current_stats;
struct i40e_vsi vsi;
@@ -264,6 +282,7 @@ extern const char i40evf_driver_version[];
int i40evf_up(struct i40evf_adapter *adapter);
void i40evf_down(struct i40evf_adapter *adapter);
+int i40evf_process_config(struct i40evf_adapter *adapter);
void i40evf_reset(struct i40evf_adapter *adapter);
void i40evf_set_ethtool_ops(struct net_device *netdev);
void i40evf_update_stats(struct i40evf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 2b53c870e7f1..4790437a50ac 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -381,11 +381,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
switch (cmd->flow_type) {
case TCP_V4_FLOW:
- if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
+ if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V4_FLOW:
- if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
+ if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
@@ -397,11 +397,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
break;
case TCP_V6_FLOW:
- if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
+ if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V6_FLOW:
- if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
+ if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
@@ -479,10 +479,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
case TCP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
break;
default:
return -EINVAL;
@@ -491,10 +491,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
case TCP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
break;
default:
return -EINVAL;
@@ -503,12 +503,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
case UDP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
default:
return -EINVAL;
@@ -517,12 +517,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
case UDP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
default:
return -EINVAL;
@@ -535,7 +535,7 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
break;
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -544,15 +544,15 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
break;
case IPV4_FLOW:
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
case IPV6_FLOW:
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 4ab4ebba07a1..e85849b9ff98 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -34,10 +34,10 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710/X710 Virtual Function Network Driver";
-#define DRV_VERSION "1.2.25"
+#define DRV_VERSION "1.3.5"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
- "Copyright (c) 2013 - 2014 Intel Corporation.";
+ "Copyright (c) 2013 - 2015 Intel Corporation.";
/* i40evf_pci_tbl - PCI Device ID Table
*
@@ -49,6 +49,7 @@ static const char i40evf_copyright[] =
*/
static const struct pci_device_id i40evf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
/* required last entry */
{0, }
};
@@ -203,7 +204,7 @@ static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
- wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
+ wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
/* read flush */
rd32(hw, I40E_VFGEN_RSTAT);
@@ -240,11 +241,11 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
int i;
for (i = 1; i < adapter->num_msix_vectors; i++) {
- if (mask & (1 << (i - 1))) {
+ if (mask & BIT(i - 1)) {
wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
I40E_VFINT_DYN_CTLN1_INTENA_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK);
}
}
}
@@ -262,17 +263,17 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
if (mask & 1) {
dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
- dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
}
for (i = 1; i < adapter->num_msix_vectors; i++) {
- if (mask & (1 << i)) {
+ if (mask & BIT(i)) {
dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
- dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
}
}
@@ -312,7 +313,7 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data)
val = rd32(hw, I40E_VFINT_DYN_CTL01);
- val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+ val = val | I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
wr32(hw, I40E_VFINT_DYN_CTL01, val);
/* schedule work on the private workqueue */
@@ -377,7 +378,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
q_vector->tx.count++;
q_vector->tx.latency_range = I40E_LOW_LATENCY;
q_vector->num_ringpairs++;
- q_vector->ring_mask |= (1 << t_idx);
+ q_vector->ring_mask |= BIT(t_idx);
}
/**
@@ -406,7 +407,7 @@ static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
/* The ideal configuration...
* We have enough vectors to map one per queue.
*/
- if (q_vectors == (rxr_remaining * 2)) {
+ if (q_vectors >= (rxr_remaining * 2)) {
for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx);
@@ -892,8 +893,10 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
break;
}
}
+ if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
+ found = true;
}
- if (found) {
+ if (!found) {
f->remove = true;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
}
@@ -1170,6 +1173,113 @@ out:
}
/**
+ * i40e_configure_rss_aq - Prepare for RSS using AQ commands
+ * @vsi: vsi structure
+ * @seed: RSS hash seed
+ **/
+static void i40evf_configure_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
+{
+ struct i40e_aqc_get_set_rss_key_data rss_key;
+ struct i40evf_adapter *adapter = vsi->back;
+ struct i40e_hw *hw = &adapter->hw;
+ int ret = 0, i;
+ u8 *rss_lut;
+
+ if (!vsi->id)
+ return;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot confiure RSS, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ memset(&rss_key, 0, sizeof(rss_key));
+ memcpy(&rss_key, seed, sizeof(rss_key));
+
+ rss_lut = kzalloc(((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4), GFP_KERNEL);
+ if (!rss_lut)
+ return;
+
+ /* Populate the LUT with max no. PF queues in round robin fashion */
+ for (i = 0; i <= (I40E_VFQF_HLUT_MAX_INDEX * 4); i++)
+ rss_lut[i] = i % adapter->num_active_queues;
+
+ ret = i40evf_aq_set_rss_key(hw, vsi->id, &rss_key);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot set RSS key, err %s aq_err %s\n",
+ i40evf_stat_str(hw, ret),
+ i40evf_aq_str(hw, hw->aq.asq_last_status));
+ return;
+ }
+
+ ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, rss_lut,
+ (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4);
+ if (ret)
+ dev_err(&adapter->pdev->dev,
+ "Cannot set RSS lut, err %s aq_err %s\n",
+ i40evf_stat_str(hw, ret),
+ i40evf_aq_str(hw, hw->aq.asq_last_status));
+}
+
+/**
+ * i40e_configure_rss_reg - Prepare for RSS if used
+ * @adapter: board private structure
+ * @seed: RSS hash seed
+ **/
+static void i40evf_configure_rss_reg(struct i40evf_adapter *adapter,
+ const u8 *seed)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ u32 *seed_dw = (u32 *)seed;
+ u32 cqueue = 0;
+ u32 lut = 0;
+ int i, j;
+
+ /* Fill out hash function seed */
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]);
+
+ /* Populate the LUT with max no. PF queues in round robin fashion */
+ for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
+ lut = 0;
+ for (j = 0; j < 4; j++) {
+ if (cqueue == adapter->num_active_queues)
+ cqueue = 0;
+ lut |= ((cqueue) << (8 * j));
+ cqueue++;
+ }
+ wr32(hw, I40E_VFQF_HLUT(i), lut);
+ }
+ i40e_flush(hw);
+}
+
+/**
+ * i40evf_configure_rss - Prepare for RSS
+ * @adapter: board private structure
+ **/
+static void i40evf_configure_rss(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ u8 seed[I40EVF_HKEY_ARRAY_SIZE];
+ u64 hena;
+
+ netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE);
+
+ /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
+ hena = I40E_DEFAULT_RSS_HENA;
+ wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+
+ if (RSS_AQ(adapter))
+ i40evf_configure_rss_aq(&adapter->vsi, seed);
+ else
+ i40evf_configure_rss_reg(adapter, seed);
+}
+
+/**
* i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
* @adapter: board private structure to initialize
*
@@ -1369,6 +1479,10 @@ static void i40evf_watchdog_task(struct work_struct *work)
}
goto watchdog_done;
}
+ if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) {
+ i40evf_send_vf_config_msg(adapter);
+ goto watchdog_done;
+ }
if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
i40evf_disable_queues(adapter);
@@ -1410,6 +1524,16 @@ static void i40evf_watchdog_task(struct work_struct *work)
goto watchdog_done;
}
+ if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) {
+ /* This message goes straight to the firmware, not the
+ * PF, so we don't have to set current_op as we will
+ * not get a response through the ARQ.
+ */
+ i40evf_configure_rss(adapter);
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
+ goto watchdog_done;
+ }
+
if (adapter->state == __I40EVF_RUNNING)
i40evf_request_stats(adapter);
watchdog_done:
@@ -1432,45 +1556,6 @@ restart_watchdog:
schedule_work(&adapter->adminq_task);
}
-/**
- * i40evf_configure_rss - Prepare for RSS
- * @adapter: board private structure
- **/
-static void i40evf_configure_rss(struct i40evf_adapter *adapter)
-{
- u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1];
- struct i40e_hw *hw = &adapter->hw;
- u32 cqueue = 0;
- u32 lut = 0;
- int i, j;
- u64 hena;
-
- /* Hash type is configured by the PF - we just supply the key */
- netdev_rss_key_fill(rss_key, sizeof(rss_key));
-
- /* Fill out hash function seed */
- for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
- wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]);
-
- /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
- hena = I40E_DEFAULT_RSS_HENA;
- wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
- wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
-
- /* Populate the LUT with max no. of queues in round robin fashion */
- for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
- lut = 0;
- for (j = 0; j < 4; j++) {
- if (cqueue == adapter->num_active_queues)
- cqueue = 0;
- lut |= ((cqueue) << (8 * j));
- cqueue++;
- }
- wr32(hw, I40E_VFQF_HLUT(i), lut);
- }
- i40e_flush(hw);
-}
-
#define I40EVF_RESET_WAIT_MS 10
#define I40EVF_RESET_WAIT_COUNT 500
/**
@@ -1604,7 +1689,8 @@ continue_reset:
dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
err);
- i40evf_map_queues(adapter);
+ adapter->aq_required = I40EVF_FLAG_AQ_GET_CONFIG;
+ adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
/* re-add all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
@@ -1614,7 +1700,7 @@ continue_reset:
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
f->add = true;
}
- adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
i40evf_misc_irq_enable(adapter);
@@ -1693,34 +1779,34 @@ static void i40evf_adminq_task(struct work_struct *work)
/* check for error indications */
val = rd32(hw, hw->aq.arq.len);
oldval = val;
- if (val & I40E_VF_ARQLEN_ARQVFE_MASK) {
+ if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
- val &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
+ val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
}
- if (val & I40E_VF_ARQLEN_ARQOVFL_MASK) {
+ if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
- val &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
+ val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
}
- if (val & I40E_VF_ARQLEN_ARQCRIT_MASK) {
+ if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
- val &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
+ val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
}
if (oldval != val)
wr32(hw, hw->aq.arq.len, val);
val = rd32(hw, hw->aq.asq.len);
oldval = val;
- if (val & I40E_VF_ATQLEN_ATQVFE_MASK) {
+ if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) {
dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
- val &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
+ val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
}
- if (val & I40E_VF_ATQLEN_ATQOVFL_MASK) {
+ if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
- val &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
+ val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
}
- if (val & I40E_VF_ATQLEN_ATQCRIT_MASK) {
+ if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
- val &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
+ val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
}
if (oldval != val)
wr32(hw, hw->aq.asq.len, val);
@@ -1856,6 +1942,7 @@ static int i40evf_open(struct net_device *netdev)
if (err)
goto err_req_irq;
+ i40evf_add_filter(adapter, adapter->hw.mac.addr);
i40evf_configure(adapter);
err = i40evf_up_complete(adapter);
@@ -1979,6 +2066,62 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
}
/**
+ * i40evf_process_config - Process the config information we got from the PF
+ * @adapter: board private structure
+ *
+ * Verify that we have a valid config struct, and set up our netdev features
+ * and our VSI struct.
+ **/
+int i40evf_process_config(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ /* got VF config message back from PF, now we can parse it */
+ for (i = 0; i < adapter->vf_res->num_vsis; i++) {
+ if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+ }
+ if (!adapter->vsi_res) {
+ dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
+ return -ENODEV;
+ }
+
+ if (adapter->vf_res->vf_offload_flags
+ & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
+ netdev->vlan_features = netdev->features;
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+ netdev->features |= NETIF_F_HIGHDMA |
+ NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_SCTP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXCSUM |
+ NETIF_F_GRO;
+
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features;
+ netdev->hw_features &= ~NETIF_F_RXCSUM;
+
+ adapter->vsi.id = adapter->vsi_res->vsi_id;
+
+ adapter->vsi.back = adapter;
+ adapter->vsi.base_vector = 1;
+ adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
+ adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+ adapter->vsi.netdev = adapter->netdev;
+ return 0;
+}
+
+/**
* i40evf_init_task - worker thread to perform delayed initialization
* @work: pointer to work_struct containing our data
*
@@ -1996,10 +2139,9 @@ static void i40evf_init_task(struct work_struct *work)
struct i40evf_adapter,
init_task.work);
struct net_device *netdev = adapter->netdev;
- struct i40evf_mac_filter *f;
struct i40e_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- int i, err, bufsz;
+ int err, bufsz;
switch (adapter->state) {
case __I40EVF_STARTUP:
@@ -2050,6 +2192,12 @@ static void i40evf_init_task(struct work_struct *work)
if (err) {
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
err = i40evf_send_api_ver(adapter);
+ else
+ dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
+ adapter->pf_version.major,
+ adapter->pf_version.minor,
+ I40E_VIRTCHNL_VERSION_MAJOR,
+ I40E_VIRTCHNL_VERSION_MINOR);
goto err;
}
err = i40evf_send_vf_config_msg(adapter);
@@ -2085,42 +2233,15 @@ static void i40evf_init_task(struct work_struct *work)
default:
goto err_alloc;
}
- /* got VF config message back from PF, now we can parse it */
- for (i = 0; i < adapter->vf_res->num_vsis; i++) {
- if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
- adapter->vsi_res = &adapter->vf_res->vsi_res[i];
- }
- if (!adapter->vsi_res) {
- dev_err(&pdev->dev, "No LAN VSI found\n");
+ if (i40evf_process_config(adapter))
goto err_alloc;
- }
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
netdev->netdev_ops = &i40evf_netdev_ops;
i40evf_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- netdev->features |= NETIF_F_HIGHDMA |
- NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_SCTP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_RXCSUM |
- NETIF_F_GRO;
-
- if (adapter->vf_res->vf_offload_flags
- & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
- netdev->vlan_features = netdev->features;
- netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
- }
-
- /* copy netdev features into list of user selectable features */
- netdev->hw_features |= netdev->features;
- netdev->hw_features &= ~NETIF_F_RXCSUM;
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
@@ -2130,16 +2251,6 @@ static void i40evf_init_task(struct work_struct *work)
ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
- f = kzalloc(sizeof(*f), GFP_ATOMIC);
- if (!f)
- goto err_sw_init;
-
- ether_addr_copy(f->macaddr, adapter->hw.mac.addr);
- f->add = true;
- adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
-
- list_add(&f->list, &adapter->mac_filter_list);
-
init_timer(&adapter->watchdog_timer);
adapter->watchdog_timer.function = &i40evf_watchdog_timer;
adapter->watchdog_timer.data = (unsigned long)adapter;
@@ -2154,24 +2265,14 @@ static void i40evf_init_task(struct work_struct *work)
if (err)
goto err_sw_init;
i40evf_map_rings_to_vectors(adapter);
- i40evf_configure_rss(adapter);
+ if (!RSS_AQ(adapter))
+ i40evf_configure_rss(adapter);
err = i40evf_request_misc_irq(adapter);
if (err)
goto err_sw_init;
netif_carrier_off(netdev);
- adapter->vsi.id = adapter->vsi_res->vsi_id;
- adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
- adapter->vsi.back = adapter;
- adapter->vsi.base_vector = 1;
- adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
- adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
- adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
- adapter->vsi.netdev = adapter->netdev;
-
if (!adapter->netdev_registered) {
err = register_netdev(netdev);
if (err)
@@ -2190,6 +2291,13 @@ static void i40evf_init_task(struct work_struct *work)
adapter->state = __I40EVF_DOWN;
set_bit(__I40E_DOWN, &adapter->vsi.state);
i40evf_misc_irq_enable(adapter);
+
+ if (RSS_AQ(adapter)) {
+ adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
+ mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+ } else {
+ i40evf_configure_rss(adapter);
+ }
return;
restart:
schedule_delayed_work(&adapter->init_task,
@@ -2299,7 +2407,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw = &adapter->hw;
hw->back = adapter;
- adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+ adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
adapter->state = __I40EVF_STARTUP;
/* Call save state here because it relies on the adapter struct. */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 61e090558f31..d4eb1a5e7d42 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -51,8 +51,9 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
if (err)
- dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
- op, err, hw->aq.asq_last_status);
+ dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
+ op, i40evf_stat_str(hw, err),
+ i40evf_aq_str(hw, hw->aq.asq_last_status));
return err;
}
@@ -125,8 +126,11 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
}
pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
- if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
- (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
+ adapter->pf_version = *pf_vvi;
+
+ if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
+ ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
+ (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR)))
err = -EIO;
out_alloc:
@@ -145,8 +149,24 @@ out:
**/
int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
{
- return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
- NULL, 0);
+ u32 caps;
+
+ adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
+ caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+ adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
+ if (PF_IS_V11(adapter))
+ return i40evf_send_pf_msg(adapter,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ (u8 *)&caps, sizeof(caps));
+ else
+ return i40evf_send_pf_msg(adapter,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ NULL, 0);
}
/**
@@ -274,7 +294,7 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
}
adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id;
- vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
+ vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
@@ -299,7 +319,7 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
}
adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id;
- vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
+ vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
@@ -708,8 +728,9 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
return;
}
if (v_retval) {
- dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
- __func__, v_retval, v_opcode);
+ dev_err(&adapter->pdev->dev, "%s: PF returned error %d (%s) to our request %d\n",
+ __func__, v_retval,
+ i40evf_stat_str(&adapter->hw, v_retval), v_opcode);
}
switch (v_opcode) {
case I40E_VIRTCHNL_OP_GET_STATS: {
@@ -729,6 +750,15 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
adapter->current_stats = *stats;
}
break;
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: {
+ u16 len = sizeof(struct i40e_virtchnl_vf_resource) +
+ I40E_MAX_VF_VSI *
+ sizeof(struct i40e_virtchnl_vsi_resource);
+ memcpy(adapter->vf_res, msg, min(msglen, len));
+ i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
+ i40evf_process_config(adapter);
+ }
+ break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
/* enable transmits */
i40evf_irq_enable(adapter, true);
@@ -740,7 +770,6 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
i40evf_free_all_rx_resources(adapter);
break;
case I40E_VIRTCHNL_OP_VERSION:
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
/* Don't display an error if we get these out of sequence.
* If the firmware needed to get kicked, we'll get these and
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index b0182dd31346..7a73510e547c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -139,10 +139,6 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* reset page to 0 */
- ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
- if (ret_val)
- return ret_val;
if (data & E1000_M88E1112_STATUS_LINK)
port = E1000_MEDIA_PORT_OTHER;
@@ -151,8 +147,20 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
if (port && (hw->dev_spec._82575.media_port != port)) {
hw->dev_spec._82575.media_port = port;
hw->dev_spec._82575.media_changed = true;
+ }
+
+ if (port == E1000_MEDIA_PORT_COPPER) {
+ /* reset page to 0 */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+ igb_check_for_link_82575(hw);
} else {
- ret_val = igb_check_for_link_82575(hw);
+ igb_check_for_link_82575(hw);
+ /* reset page to 0 */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
}
return 0;
@@ -223,6 +231,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
/* Verify phy id and set remaining function pointers */
switch (phy->id) {
case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
case M88E1111_I_PHY_ID:
@@ -235,7 +244,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
else
phy->ops.get_cable_length = igb_get_cable_length_m88;
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
- /* Check if this PHY is confgured for media swap. */
+ /* Check if this PHY is configured for media swap. */
if (phy->id == M88E1112_E_PHY_ID) {
u16 data;
@@ -258,6 +267,11 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
hw->mac.ops.check_for_link =
igb_check_for_link_media_swap;
}
+ if (phy->id == M88E1512_E_PHY_ID) {
+ ret_val = igb_initialize_M88E1512_phy(hw);
+ if (ret_val)
+ goto out;
+ }
break;
case IGP03E1000_E_PHY_ID:
phy->type = e1000_phy_igp_3;
@@ -889,6 +903,7 @@ out:
**/
static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
{
+ struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
/* This isn't a true "hard" reset, but is the only reset
@@ -905,7 +920,11 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
goto out;
ret_val = igb_phy_sw_reset(hw);
+ if (ret_val)
+ goto out;
+ if (phy->id == M88E1512_E_PHY_ID)
+ ret_val = igb_initialize_M88E1512_phy(hw);
out:
return ret_val;
}
@@ -1579,6 +1598,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
case I210_I_PHY_ID:
ret_val = igb_copper_link_setup_m88_gen2(hw);
break;
@@ -2621,7 +2641,8 @@ s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M)
u16 phy_data;
if ((hw->phy.media_type != e1000_media_type_copper) ||
- (phy->id != M88E1543_E_PHY_ID))
+ ((phy->id != M88E1543_E_PHY_ID) &&
+ (phy->id != M88E1512_E_PHY_ID)))
goto out;
if (!hw->dev_spec._82575.eee_disable) {
@@ -2701,7 +2722,8 @@ s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
/* Check if EEE is supported on this device. */
if ((hw->phy.media_type != e1000_media_type_copper) ||
- (phy->id != M88E1543_E_PHY_ID))
+ ((phy->id != M88E1543_E_PHY_ID) &&
+ (phy->id != M88E1512_E_PHY_ID)))
goto out;
ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index f8684aa285be..b1915043bc0c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -604,6 +604,10 @@
#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7
#define E1000_M88E1112_PAGE_ADDR 0x16
#define E1000_M88E1112_STATUS 0x01
+#define E1000_M88E1512_CFG_REG_1 0x0010
+#define E1000_M88E1512_CFG_REG_2 0x0011
+#define E1000_M88E1512_CFG_REG_3 0x0007
+#define E1000_M88E1512_MODE 0x0014
/* PCI Express Control */
#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
@@ -861,6 +865,7 @@
#define M88_VENDOR 0x0141
#define I210_I_PHY_ID 0x01410C00
#define M88E1543_E_PHY_ID 0x01410EA0
+#define M88E1512_E_PHY_ID 0x01410DD0
/* M88E1000 Specific Registers */
#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index c1bb64d8366f..23ec28f43f6d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,5 +1,5 @@
/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
+ * Copyright(c) 2007-2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -36,9 +36,6 @@ static s32 igb_set_master_slave_mode(struct e1000_hw *hw);
/* Cable length tables */
static const u16 e1000_m88_cable_length_table[] = {
0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
-#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
- (sizeof(e1000_m88_cable_length_table) / \
- sizeof(e1000_m88_cable_length_table[0]))
static const u16 e1000_igp_2_cable_length_table[] = {
0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
@@ -49,9 +46,6 @@ static const u16 e1000_igp_2_cable_length_table[] = {
60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
104, 109, 114, 118, 121, 124};
-#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
- (sizeof(e1000_igp_2_cable_length_table) / \
- sizeof(e1000_igp_2_cable_length_table[0]))
/**
* igb_check_reset_block - Check if PHY reset is blocked
@@ -1268,6 +1262,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
switch (hw->phy.id) {
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
case I210_I_PHY_ID:
reset_dsp = false;
break;
@@ -1276,9 +1272,9 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
reset_dsp = false;
break;
}
- if (!reset_dsp)
+ if (!reset_dsp) {
hw_dbg("Link taking longer than expected.\n");
- else {
+ } else {
/* We didn't get link.
* Reset the DSP and cross our fingers.
*/
@@ -1303,6 +1299,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
if (hw->phy.type != e1000_phy_m88 ||
hw->phy.id == I347AT4_E_PHY_ID ||
hw->phy.id == M88E1112_E_PHY_ID ||
+ hw->phy.id == M88E1543_E_PHY_ID ||
+ hw->phy.id == M88E1512_E_PHY_ID ||
hw->phy.id == I210_I_PHY_ID)
goto out;
@@ -1700,7 +1698,7 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw)
index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
M88E1000_PSSR_CABLE_LENGTH_SHIFT;
- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+ if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
ret_val = -E1000_ERR_PHY;
goto out;
}
@@ -1743,6 +1741,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
phy->cable_length = phy_data / (is_cm ? 100 : 1);
break;
case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
case I347AT4_E_PHY_ID:
/* Remember the original page select and set it to 7 */
ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
@@ -1796,7 +1795,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
M88E1000_PSSR_CABLE_LENGTH_SHIFT;
- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+ if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
ret_val = -E1000_ERR_PHY;
goto out;
}
@@ -1840,7 +1839,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
s32 ret_val = 0;
u16 phy_data, i, agc_value = 0;
u16 cur_agc_index, max_agc_index = 0;
- u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+ u16 min_agc_index = ARRAY_SIZE(e1000_igp_2_cable_length_table) - 1;
static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
IGP02E1000_PHY_AGC_A,
IGP02E1000_PHY_AGC_B,
@@ -1863,7 +1862,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
IGP02E1000_AGC_LENGTH_MASK;
/* Array index bound check. */
- if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+ if ((cur_agc_index >= ARRAY_SIZE(e1000_igp_2_cable_length_table)) ||
(cur_agc_index == 0)) {
ret_val = -E1000_ERR_PHY;
goto out;
@@ -2195,6 +2194,90 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
}
/**
+ * igb_initialize_M88E1512_phy - Initialize M88E1512 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Initialize Marvel 1512 to work correctly with Avoton.
+ **/
+s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+
+ /* Switch to PHY page 0xFF. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 0xFB. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 0x12. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12);
+ if (ret_val)
+ goto out;
+
+ /* Change mode to SGMII-to-Copper */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001);
+ if (ret_val)
+ goto out;
+
+ /* Return the PHY to page 0. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+ if (ret_val)
+ goto out;
+
+ ret_val = igb_phy_sw_reset(hw);
+ if (ret_val) {
+ hw_dbg("Error committing the PHY changes\n");
+ return ret_val;
+ }
+
+ /* msec_delay(1000); */
+ usleep_range(1000, 2000);
+out:
+ return ret_val;
+}
+
+/**
* igb_power_up_phy_copper - Restore copper link in case of PHY power down
* @hw: pointer to the HW structure
*
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 7af4ffab0285..24d55edbb0e3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -61,6 +61,7 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
void igb_power_up_phy_copper(struct e1000_hw *hw);
void igb_power_down_phy_copper(struct e1000_hw *hw);
s32 igb_phy_init_script_igp3(struct e1000_hw *hw);
+s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw);
s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 6f0490d0e981..4af2870e49f8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -104,6 +104,8 @@
#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */
#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */
#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */
+#define E1000_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */
+#define E1000_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */
#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */
#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */
#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index c2bd4f98a837..212d668dabb3 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -540,6 +540,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
struct sk_buff *skb);
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
#ifdef CONFIG_IGB_HWMON
void igb_sysfs_exit(struct igb_adapter *adapter);
int igb_sysfs_init(struct igb_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index d5673eb90c54..74262768b09b 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2159,6 +2159,27 @@ static int igb_set_coalesce(struct net_device *netdev,
struct igb_adapter *adapter = netdev_priv(netdev);
int i;
+ if (ec->rx_max_coalesced_frames ||
+ ec->rx_coalesce_usecs_irq ||
+ ec->rx_max_coalesced_frames_irq ||
+ ec->tx_max_coalesced_frames ||
+ ec->tx_coalesce_usecs_irq ||
+ ec->stats_block_coalesce_usecs ||
+ ec->use_adaptive_rx_coalesce ||
+ ec->use_adaptive_tx_coalesce ||
+ ec->pkt_rate_low ||
+ ec->rx_coalesce_usecs_low ||
+ ec->rx_max_coalesced_frames_low ||
+ ec->tx_coalesce_usecs_low ||
+ ec->tx_max_coalesced_frames_low ||
+ ec->pkt_rate_high ||
+ ec->rx_coalesce_usecs_high ||
+ ec->rx_max_coalesced_frames_high ||
+ ec->tx_coalesce_usecs_high ||
+ ec->tx_max_coalesced_frames_high ||
+ ec->rate_sample_interval)
+ return -ENOTSUPP;
+
if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
((ec->rx_coalesce_usecs > 3) &&
(ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
@@ -2396,10 +2417,6 @@ static int igb_get_ts_info(struct net_device *dev,
info->rx_filters |=
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
@@ -2991,6 +3008,7 @@ static int igb_set_channels(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
unsigned int count = ch->combined_count;
+ unsigned int max_combined = 0;
/* Verify they are not requesting separate vectors */
if (!count || ch->rx_count || ch->tx_count)
@@ -3001,11 +3019,13 @@ static int igb_set_channels(struct net_device *netdev,
return -EINVAL;
/* Verify the number of channels doesn't exceed hw limits */
- if (count > igb_max_channels(adapter))
+ max_combined = igb_max_channels(adapter);
+ if (count > max_combined)
return -EINVAL;
if (count != adapter->rss_queues) {
adapter->rss_queues = count;
+ igb_set_flag_queue_pairs(adapter, max_combined);
/* Hardware has to reinitialize queues and interrupts to
* match the new configuration.
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 2f70a9b152bd..e174fbbdba40 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -57,8 +57,8 @@
#include "igb.h"
#define MAJ 5
-#define MIN 2
-#define BUILD 18
+#define MIN 3
+#define BUILD 0
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
char igb_driver_name[] = "igb";
@@ -179,6 +179,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *);
#ifdef CONFIG_PCI_IOV
static int igb_vf_configure(struct igb_adapter *adapter, int vf);
static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
+static int igb_disable_sriov(struct pci_dev *dev);
+static int igb_pci_disable_sriov(struct pci_dev *dev);
#endif
#ifdef CONFIG_PM
@@ -1205,10 +1207,14 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
/* allocate q_vector and rings */
q_vector = adapter->q_vector[v_idx];
- if (!q_vector)
+ if (!q_vector) {
q_vector = kzalloc(size, GFP_KERNEL);
- else
+ } else if (size > ksize(q_vector)) {
+ kfree_rcu(q_vector, rcu);
+ q_vector = kzalloc(size, GFP_KERNEL);
+ } else {
memset(q_vector, 0, size);
+ }
if (!q_vector)
return -ENOMEM;
@@ -2645,7 +2651,11 @@ err_eeprom:
if (hw->flash_address)
iounmap(hw->flash_address);
err_sw_init:
+ kfree(adapter->shadow_vfta);
igb_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_PCI_IOV
+ igb_disable_sriov(pdev);
+#endif
pci_iounmap(pdev, hw->hw_addr);
err_ioremap:
free_netdev(netdev);
@@ -2805,14 +2815,14 @@ static void igb_remove(struct pci_dev *pdev)
*/
igb_release_hw_control(adapter);
- unregister_netdev(netdev);
-
- igb_clear_interrupt_scheme(adapter);
-
#ifdef CONFIG_PCI_IOV
igb_disable_sriov(pdev);
#endif
+ unregister_netdev(netdev);
+
+ igb_clear_interrupt_scheme(adapter);
+
pci_iounmap(pdev, hw->hw_addr);
if (hw->flash_address)
iounmap(hw->flash_address);
@@ -2847,7 +2857,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
return;
pci_sriov_set_totalvfs(pdev, 7);
- igb_pci_enable_sriov(pdev, max_vfs);
+ igb_enable_sriov(pdev, max_vfs);
#endif /* CONFIG_PCI_IOV */
}
@@ -2888,6 +2898,14 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
+ igb_set_flag_queue_pairs(adapter, max_rss_queues);
+}
+
+void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
+ const u32 max_rss_queues)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
/* Determine if we need to pair queues. */
switch (hw->mac.type) {
case e1000_82575:
@@ -2968,6 +2986,8 @@ static int igb_sw_init(struct igb_adapter *adapter)
}
#endif /* CONFIG_PCI_IOV */
+ igb_probe_vfs(adapter);
+
igb_init_queue_configuration(adapter);
/* Setup and initialize a copy of the hw vlan table array */
@@ -2980,8 +3000,6 @@ static int igb_sw_init(struct igb_adapter *adapter)
return -ENOMEM;
}
- igb_probe_vfs(adapter);
-
/* Explicitly disable IRQ since the NIC can be in any state. */
igb_irq_disable(adapter);
@@ -6566,7 +6584,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
static inline bool igb_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
@@ -6621,22 +6639,25 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = IGB_RX_BUFSZ;
#else
- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int truesize = SKB_DATA_ALIGN(size);
#endif
+ unsigned int pull_len;
- if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
+ if (unlikely(skb_is_nonlinear(skb)))
+ goto add_tail_frag;
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
- va += IGB_TS_HDR_LEN;
- size -= IGB_TS_HDR_LEN;
- }
+ if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+ va += IGB_TS_HDR_LEN;
+ size -= IGB_TS_HDR_LEN;
+ }
+ if (likely(size <= IGB_RX_HDR_LEN)) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* page is not reserved, we can reuse buffer as-is */
@@ -6648,8 +6669,21 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
return false;
}
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ va += pull_len;
+ size -= pull_len;
+
+add_tail_frag:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buffer->page_offset, size, truesize);
+ (unsigned long)va & ~PAGE_MASK, size, truesize);
return igb_can_reuse_rx_page(rx_buffer, page, truesize);
}
@@ -6791,62 +6825,6 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring,
}
/**
- * igb_pull_tail - igb specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
- * @skb: pointer to current skb being adjusted
- *
- * This function is an igb specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- */
-static void igb_pull_tail(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned char *va;
- unsigned int pull_len;
-
- /* it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- /* retrieve timestamp from buffer */
- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
-
- /* update pointers to remove timestamp header */
- skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
- frag->page_offset += IGB_TS_HDR_LEN;
- skb->data_len -= IGB_TS_HDR_LEN;
- skb->len -= IGB_TS_HDR_LEN;
-
- /* move va to start of packet data */
- va += IGB_TS_HDR_LEN;
- }
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-}
-
-/**
* igb_cleanup_headers - Correct corrupted or empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
@@ -6873,10 +6851,6 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
}
}
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- igb_pull_tail(rx_ring, rx_desc, skb);
-
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
@@ -7445,6 +7419,7 @@ static int igb_resume(struct device *dev)
if (igb_init_interrupt_scheme(adapter, true)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ rtnl_unlock();
return -ENOMEM;
}
@@ -7538,6 +7513,7 @@ static int igb_sriov_reinit(struct pci_dev *dev)
igb_init_queue_configuration(adapter);
if (igb_init_interrupt_scheme(adapter, true)) {
+ rtnl_unlock();
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index c3a9392cbc19..5982f28d521a 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -405,7 +405,7 @@ static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin)
wr32(E1000_CTRL_EXT, ctrl_ext);
}
-static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
+static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin, int freq)
{
static const u32 aux0_sel_sdp[IGB_N_SDP] = {
AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
@@ -424,6 +424,14 @@ static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1,
TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1,
};
+ static const u32 ts_sdp_sel_fc0[IGB_N_SDP] = {
+ TS_SDP0_SEL_FC0, TS_SDP1_SEL_FC0,
+ TS_SDP2_SEL_FC0, TS_SDP3_SEL_FC0,
+ };
+ static const u32 ts_sdp_sel_fc1[IGB_N_SDP] = {
+ TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
+ TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
+ };
static const u32 ts_sdp_sel_clr[IGB_N_SDP] = {
TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
@@ -445,11 +453,17 @@ static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
tssdp &= ~AUX1_TS_SDP_EN;
tssdp &= ~ts_sdp_sel_clr[pin];
- if (chan == 1)
- tssdp |= ts_sdp_sel_tt1[pin];
- else
- tssdp |= ts_sdp_sel_tt0[pin];
-
+ if (freq) {
+ if (chan == 1)
+ tssdp |= ts_sdp_sel_fc1[pin];
+ else
+ tssdp |= ts_sdp_sel_fc0[pin];
+ } else {
+ if (chan == 1)
+ tssdp |= ts_sdp_sel_tt1[pin];
+ else
+ tssdp |= ts_sdp_sel_tt0[pin];
+ }
tssdp |= ts_sdp_en[pin];
wr32(E1000_TSSDP, tssdp);
@@ -463,10 +477,10 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
struct igb_adapter *igb =
container_of(ptp, struct igb_adapter, ptp_caps);
struct e1000_hw *hw = &igb->hw;
- u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh;
+ u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout;
unsigned long flags;
struct timespec ts;
- int pin = -1;
+ int use_freq = 0, pin = -1;
s64 ns;
switch (rq->type) {
@@ -511,40 +525,58 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
ts.tv_nsec = rq->perout.period.nsec;
ns = timespec_to_ns(&ts);
ns = ns >> 1;
- if (on && ns < 500000LL) {
- /* 2k interrupts per second is an awful lot. */
- return -EINVAL;
+ if (on && ns <= 70000000LL) {
+ if (ns < 8LL)
+ return -EINVAL;
+ use_freq = 1;
}
ts = ns_to_timespec(ns);
if (rq->perout.index == 1) {
- tsauxc_mask = TSAUXC_EN_TT1;
- tsim_mask = TSINTR_TT1;
+ if (use_freq) {
+ tsauxc_mask = TSAUXC_EN_CLK1 | TSAUXC_ST1;
+ tsim_mask = 0;
+ } else {
+ tsauxc_mask = TSAUXC_EN_TT1;
+ tsim_mask = TSINTR_TT1;
+ }
trgttiml = E1000_TRGTTIML1;
trgttimh = E1000_TRGTTIMH1;
+ freqout = E1000_FREQOUT1;
} else {
- tsauxc_mask = TSAUXC_EN_TT0;
- tsim_mask = TSINTR_TT0;
+ if (use_freq) {
+ tsauxc_mask = TSAUXC_EN_CLK0 | TSAUXC_ST0;
+ tsim_mask = 0;
+ } else {
+ tsauxc_mask = TSAUXC_EN_TT0;
+ tsim_mask = TSINTR_TT0;
+ }
trgttiml = E1000_TRGTTIML0;
trgttimh = E1000_TRGTTIMH0;
+ freqout = E1000_FREQOUT0;
}
spin_lock_irqsave(&igb->tmreg_lock, flags);
tsauxc = rd32(E1000_TSAUXC);
tsim = rd32(E1000_TSIM);
+ if (rq->perout.index == 1) {
+ tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1);
+ tsim &= ~TSINTR_TT1;
+ } else {
+ tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0);
+ tsim &= ~TSINTR_TT0;
+ }
if (on) {
int i = rq->perout.index;
-
- igb_pin_perout(igb, i, pin);
+ igb_pin_perout(igb, i, pin, use_freq);
igb->perout[i].start.tv_sec = rq->perout.start.sec;
igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
igb->perout[i].period.tv_sec = ts.tv_sec;
igb->perout[i].period.tv_nsec = ts.tv_nsec;
wr32(trgttimh, rq->perout.start.sec);
wr32(trgttiml, rq->perout.start.nsec);
+ if (use_freq)
+ wr32(freqout, ns);
tsauxc |= tsauxc_mask;
tsim |= tsim_mask;
- } else {
- tsauxc &= ~tsauxc_mask;
- tsim &= ~tsim_mask;
}
wr32(E1000_TSAUXC, tsauxc);
wr32(E1000_TSIM, tsim);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 95af14e139d7..686fa7184179 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -319,6 +319,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_hdr_size,
DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
skb_put(skb, hlen);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index ac3ac2a20386..edf1fb913209 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -630,6 +630,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21)
#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22)
#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23)
+#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24)
u32 flags2;
#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0)
@@ -644,6 +645,9 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11)
+#ifdef CONFIG_IXGBE_VXLAN
+#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12)
+#endif
/* Tx fast path data */
int num_tx_queues;
@@ -757,7 +761,9 @@ struct ixgbe_adapter {
u32 timer_event_accumulator;
u32 vferr_refcount;
struct ixgbe_mac_addr *mac_table;
+#ifdef CONFIG_IXGBE_VXLAN
u16 vxlan_port;
+#endif
struct kobject *info_kobj;
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon_buff;
@@ -967,4 +973,5 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring);
u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
+void ixgbe_store_reta(struct ixgbe_adapter *adapter);
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 6b87d9634614..dd7062fed61a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -504,16 +504,12 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
**/
static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
{
- u32 autoc2_reg, fwsm;
+ u32 autoc2_reg;
u16 ee_ctrl_2 = 0;
hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
- /* Check to see if MNG FW could be enabled */
- fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
-
- if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) &&
- !hw->wol_enabled &&
+ if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
@@ -1246,6 +1242,25 @@ mac_reset_top:
}
/**
+ * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
+ * @hw: pointer to hardware structure
+ * @fdircmd: current value of FDIRCMD register
+ */
+static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
+ *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
+ if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
+ return 0;
+ udelay(10);
+ }
+
+ return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
+}
+
+/**
* ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
* @hw: pointer to hardware structure
**/
@@ -1253,6 +1268,8 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
{
int i;
u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+ u32 fdircmd;
+ s32 err;
fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
@@ -1260,15 +1277,10 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
* Before starting reinitialization process,
* FDIRCMD.CMD must be zero.
*/
- for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
- if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
- IXGBE_FDIRCMD_CMD_MASK))
- break;
- udelay(10);
- }
- if (i >= IXGBE_FDIRCMD_CMD_POLL) {
- hw_dbg(hw, "Flow Director previous command isn't complete, aborting table re-initialization.\n");
- return IXGBE_ERR_FDIR_REINIT_FAILED;
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err) {
+ hw_dbg(hw, "Flow Director previous command did not complete, aborting table re-initialization.\n");
+ return err;
}
IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
@@ -1394,14 +1406,12 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
/*
* Continue setup of fdirctrl register bits:
* Turn perfect match filtering on
- * Report hash in RSS field of Rx wb descriptor
* Initialize the drop queue
* Move the flexible bytes to use the ethertype - shift 6 words
* Set the maximum length per hash bucket to 0xA filters
* Send interrupt when 64 (0x4 * 16) filters are left
*/
fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
- IXGBE_FDIRCTRL_REPORT_STATUS |
(IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
(0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
(0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
@@ -1509,20 +1519,28 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
* @input: unique input dword
* @common: compressed common input dword
* @queue: queue index to direct traffic to
+ *
+ * Note that the tunnel bit in input must not be set when the hardware
+ * tunneling support does not exist.
**/
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue)
{
- u64 fdirhashcmd;
- u32 fdircmd;
+ u64 fdirhashcmd;
+ u8 flow_type;
+ bool tunnel;
+ u32 fdircmd;
/*
* Get the flow_type in order to program FDIRCMD properly
* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
*/
- switch (input.formatted.flow_type) {
+ tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
+ flow_type = input.formatted.flow_type &
+ (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
+ switch (flow_type) {
case IXGBE_ATR_FLOW_TYPE_TCPV4:
case IXGBE_ATR_FLOW_TYPE_UDPV4:
case IXGBE_ATR_FLOW_TYPE_SCTPV4:
@@ -1538,8 +1556,10 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
/* configure FDIRCMD register */
fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
- fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+ if (tunnel)
+ fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
/*
* The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
@@ -1760,6 +1780,7 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
u16 soft_id, u8 queue)
{
u32 fdirport, fdirvlan, fdirhash, fdircmd;
+ s32 err;
/* currently IPv6 is not supported, must be programmed with 0 */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
@@ -1808,6 +1829,11 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err) {
+ hw_dbg(hw, "Flow Director command did not complete!\n");
+ return err;
+ }
return 0;
}
@@ -1817,9 +1843,8 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
u16 soft_id)
{
u32 fdirhash;
- u32 fdircmd = 0;
- u32 retry_count;
- s32 err = 0;
+ u32 fdircmd;
+ s32 err;
/* configure FDIRHASH register */
fdirhash = input->formatted.bkt_hash;
@@ -1832,18 +1857,12 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
/* Query if filter is present */
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
- for (retry_count = 10; retry_count; retry_count--) {
- /* allow 10us for query to process */
- udelay(10);
- /* verify query completed successfully */
- fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
- if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
- break;
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err) {
+ hw_dbg(hw, "Flow Director command did not complete!\n");
+ return err;
}
- if (!retry_count)
- err = IXGBE_ERR_FDIR_REINIT_FAILED;
-
/* if filter exists in hardware then remove it */
if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
@@ -1852,7 +1871,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
}
- return err;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 4c1c26732b67..3f56a8080118 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -3905,3 +3905,18 @@ void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
}
}
}
+
+/** ixgbe_mng_present - returns true when management capability is present
+ * @hw: pointer to hardware structure
+ **/
+bool ixgbe_mng_present(struct ixgbe_hw *hw)
+{
+ u32 fwsm;
+
+ if (hw->mac.type < ixgbe_mac_82599EB)
+ return false;
+
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
+ fwsm &= IXGBE_FWSM_MODE_MASK;
+ return fwsm == IXGBE_FWSM_FW_MODE_PT;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index ec015fed8fa7..2f779f35dc4f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -113,6 +113,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 length, u32 timeout, bool return_data);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
+bool ixgbe_mng_present(struct ixgbe_hw *hw);
bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index ec7b2324b77b..ab2edc8e7703 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -166,6 +166,8 @@ static int ixgbe_get_settings(struct net_device *netdev,
/* set the supported link speeds */
if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
ecmd->supported |= SUPPORTED_10000baseT_Full;
+ if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL)
+ ecmd->supported |= SUPPORTED_2500baseX_Full;
if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
ecmd->supported |= SUPPORTED_1000baseT_Full;
if (supported_link & IXGBE_LINK_SPEED_100_FULL)
@@ -177,6 +179,8 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->advertising |= ADVERTISED_100baseT_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
+ ecmd->advertising |= ADVERTISED_2500baseX_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
ecmd->advertising |= ADVERTISED_1000baseT_Full;
} else {
@@ -286,6 +290,9 @@ static int ixgbe_get_settings(struct net_device *netdev,
case IXGBE_LINK_SPEED_10GB_FULL:
ethtool_cmd_speed_set(ecmd, SPEED_10000);
break;
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ ethtool_cmd_speed_set(ecmd, SPEED_2500);
+ break;
case IXGBE_LINK_SPEED_1GB_FULL:
ethtool_cmd_speed_set(ecmd, SPEED_1000);
break;
@@ -2868,6 +2875,14 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
+static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
+{
+ if (adapter->hw.mac.type < ixgbe_mac_X550)
+ return 16;
+ else
+ return 64;
+}
+
static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -2907,6 +2922,44 @@ static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return 0;
}
+static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int i;
+ u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
+
+ if (hfunc)
+ return -EINVAL;
+
+ /* Fill out the redirection table */
+ if (indir) {
+ int max_queues = min_t(int, adapter->num_rx_queues,
+ ixgbe_rss_indir_tbl_max(adapter));
+
+ /*Allow at least 2 queues w/ SR-IOV.*/
+ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
+ (max_queues < 2))
+ max_queues = 2;
+
+ /* Verify user input. */
+ for (i = 0; i < reta_entries; i++)
+ if (indir[i] >= max_queues)
+ return -EINVAL;
+
+ for (i = 0; i < reta_entries; i++)
+ adapter->rss_indir_tbl[i] = indir[i];
+ }
+
+ /* Fill out the rss hash key */
+ if (key)
+ memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
+
+ ixgbe_store_reta(adapter);
+
+ return 0;
+}
+
static int ixgbe_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
@@ -2938,14 +2991,6 @@ static int ixgbe_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
break;
default:
@@ -3167,6 +3212,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_rxfh_indir_size = ixgbe_rss_indir_size,
.get_rxfh_key_size = ixgbe_get_rxfh_key_size,
.get_rxfh = ixgbe_get_rxfh,
+ .set_rxfh = ixgbe_set_rxfh,
.get_channels = ixgbe_get_channels,
.set_channels = ixgbe_set_channels,
.get_ts_info = ixgbe_get_ts_info,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 9aa6104e34ea..63b2cfe9416b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -65,6 +65,9 @@
#include "ixgbe_common.h"
#include "ixgbe_dcb_82599.h"
#include "ixgbe_sriov.h"
+#ifdef CONFIG_IXGBE_VXLAN
+#include <net/vxlan.h>
+#endif
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
@@ -79,7 +82,7 @@ static char ixgbe_default_device_descr[] =
#define DRV_VERSION "4.0.1-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
- "Copyright (c) 1999-2014 Intel Corporation.";
+ "Copyright (c) 1999-2015 Intel Corporation.";
static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
@@ -243,13 +246,20 @@ static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
int expected_gts)
{
+ struct ixgbe_hw *hw = &adapter->hw;
int max_gts = 0;
enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
struct pci_dev *pdev;
- /* determine whether to use the the parent device
+ /* Some devices are not connected over PCIe and thus do not negotiate
+ * speed. These devices do not have valid bus info, and thus any report
+ * we generate may not be correct.
*/
+ if (hw->bus.type == ixgbe_bus_type_internal)
+ return;
+
+ /* determine whether to use the parent device */
if (ixgbe_pcie_from_parent(&adapter->hw))
pdev = adapter->pdev->bus->parent->self;
else
@@ -1360,14 +1370,31 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
}
#endif /* CONFIG_IXGBE_DCA */
+
+#define IXGBE_RSS_L4_TYPES_MASK \
+ ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
+ (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
+ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
+ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
+
static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- if (ring->netdev->features & NETIF_F_RXHASH)
- skb_set_hash(skb,
- le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
- PKT_HASH_TYPE_L3);
+ u16 rss_type;
+
+ if (!(ring->netdev->features & NETIF_F_RXHASH))
+ return;
+
+ rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
+ IXGBE_RXDADV_RSSTYPE_MASK;
+
+ if (!rss_type)
+ return;
+
+ skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+ (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
}
#ifdef IXGBE_FCOE
@@ -1414,7 +1441,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
(hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) {
encap_pkt = true;
skb->encapsulation = 1;
- skb->ip_summed = CHECKSUM_NONE;
}
/* if IP and error */
@@ -1832,7 +1858,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
static inline bool ixgbe_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
/**
@@ -3287,7 +3313,7 @@ u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
*
* Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
*/
-static void ixgbe_store_reta(struct ixgbe_adapter *adapter)
+void ixgbe_store_reta(struct ixgbe_adapter *adapter)
{
u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
struct ixgbe_hw *hw = &adapter->hw;
@@ -4245,6 +4271,21 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
}
}
+static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
+{
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
+#ifdef CONFIG_IXGBE_VXLAN
+ adapter->vxlan_port = 0;
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
#ifdef CONFIG_IXGBE_DCB
/**
* ixgbe_configure_dcb - Configure DCB hardware
@@ -5286,6 +5327,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#ifdef CONFIG_IXGBE_DCA
adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
#endif
+#ifdef CONFIG_IXGBE_VXLAN
+ adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
+#endif
break;
default:
break;
@@ -5737,10 +5781,11 @@ static int ixgbe_open(struct net_device *netdev)
ixgbe_up_complete(adapter);
-#if IS_ENABLED(CONFIG_IXGBE_VXLAN)
+ ixgbe_clear_vxlan_port(adapter);
+#ifdef CONFIG_IXGBE_VXLAN
vxlan_get_rx_port(netdev);
-
#endif
+
return 0;
err_set_queues:
@@ -5761,7 +5806,15 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
{
ixgbe_ptp_suspend(adapter);
- ixgbe_down(adapter);
+ if (adapter->hw.phy.ops.enter_lplu) {
+ adapter->hw.phy.reset_disable = true;
+ ixgbe_down(adapter);
+ adapter->hw.phy.ops.enter_lplu(&adapter->hw);
+ adapter->hw.phy.reset_disable = false;
+ } else {
+ ixgbe_down(adapter);
+ }
+
ixgbe_free_irq(adapter);
ixgbe_free_all_tx_resources(adapter);
@@ -6327,6 +6380,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
struct net_device *upper;
struct list_head *iter;
u32 link_speed = adapter->link_speed;
+ const char *speed_str;
bool flow_rx, flow_tx;
/* only continue if link was previously down */
@@ -6364,14 +6418,24 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
ixgbe_ptp_start_cyclecounter(adapter);
- e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
- (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
- "10 Gbps" :
- (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
- "1 Gbps" :
- (link_speed == IXGBE_LINK_SPEED_100_FULL ?
- "100 Mbps" :
- "unknown speed"))),
+ switch (link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ speed_str = "10 Gbps";
+ break;
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ speed_str = "2.5 Gbps";
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ speed_str = "1 Gbps";
+ break;
+ case IXGBE_LINK_SPEED_100_FULL:
+ speed_str = "100 Mbps";
+ break;
+ default:
+ speed_str = "unknown speed";
+ break;
+ }
+ e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
((flow_rx && flow_tx) ? "RX/TX" :
(flow_rx ? "RX" :
(flow_tx ? "TX" : "None"))));
@@ -6800,6 +6864,12 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_service_event_complete(adapter);
return;
}
+#ifdef CONFIG_IXGBE_VXLAN
+ if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
+ adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
+ vxlan_get_rx_port(adapter->netdev);
+ }
+#endif /* CONFIG_IXGBE_VXLAN */
ixgbe_reset_subtask(adapter);
ixgbe_phy_interrupt_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
@@ -6896,31 +6966,55 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
!(first->tx_flags & IXGBE_TX_FLAGS_CC))
return;
+ vlan_macip_lens = skb_network_offset(skb) <<
+ IXGBE_ADVTXD_MACLEN_SHIFT;
} else {
u8 l4_hdr = 0;
- switch (first->protocol) {
- case htons(ETH_P_IP):
- vlan_macip_lens |= skb_network_header_len(skb);
+ union {
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ u8 *raw;
+ } network_hdr;
+ union {
+ struct tcphdr *tcphdr;
+ u8 *raw;
+ } transport_hdr;
+
+ if (skb->encapsulation) {
+ network_hdr.raw = skb_inner_network_header(skb);
+ transport_hdr.raw = skb_inner_transport_header(skb);
+ vlan_macip_lens = skb_inner_network_offset(skb) <<
+ IXGBE_ADVTXD_MACLEN_SHIFT;
+ } else {
+ network_hdr.raw = skb_network_header(skb);
+ transport_hdr.raw = skb_transport_header(skb);
+ vlan_macip_lens = skb_network_offset(skb) <<
+ IXGBE_ADVTXD_MACLEN_SHIFT;
+ }
+
+ /* use first 4 bits to determine IP version */
+ switch (network_hdr.ipv4->version) {
+ case IPVERSION:
+ vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
- l4_hdr = ip_hdr(skb)->protocol;
+ l4_hdr = network_hdr.ipv4->protocol;
break;
- case htons(ETH_P_IPV6):
- vlan_macip_lens |= skb_network_header_len(skb);
- l4_hdr = ipv6_hdr(skb)->nexthdr;
+ case 6:
+ vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
+ l4_hdr = network_hdr.ipv6->nexthdr;
break;
default:
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
- "partial checksum but proto=%x!\n",
- first->protocol);
+ "partial checksum but version=%d\n",
+ network_hdr.ipv4->version);
}
- break;
}
switch (l4_hdr) {
case IPPROTO_TCP:
type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- mss_l4len_idx = tcp_hdrlen(skb) <<
+ mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) <<
IXGBE_ADVTXD_L4LEN_SHIFT;
break;
case IPPROTO_SCTP:
@@ -6946,7 +7040,6 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
}
/* vlan_macip_lens: MACLEN, VLAN tag */
- vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
@@ -7201,6 +7294,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
struct ipv6hdr *ipv6;
} hdr;
struct tcphdr *th;
+ struct sk_buff *skb;
+#ifdef CONFIG_IXGBE_VXLAN
+ u8 encap = false;
+#endif /* CONFIG_IXGBE_VXLAN */
__be16 vlan_id;
/* if ring doesn't have a interrupt vector, cannot perform ATR */
@@ -7214,16 +7311,36 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
ring->atr_count++;
/* snag network header to get L4 type and address */
- hdr.network = skb_network_header(first->skb);
+ skb = first->skb;
+ hdr.network = skb_network_header(skb);
+ if (skb->encapsulation) {
+#ifdef CONFIG_IXGBE_VXLAN
+ struct ixgbe_adapter *adapter = q_vector->adapter;
- /* Currently only IPv4/IPv6 with TCP is supported */
- if ((first->protocol != htons(ETH_P_IPV6) ||
- hdr.ipv6->nexthdr != IPPROTO_TCP) &&
- (first->protocol != htons(ETH_P_IP) ||
- hdr.ipv4->protocol != IPPROTO_TCP))
+ if (!adapter->vxlan_port)
+ return;
+ if (first->protocol != htons(ETH_P_IP) ||
+ hdr.ipv4->version != IPVERSION ||
+ hdr.ipv4->protocol != IPPROTO_UDP) {
+ return;
+ }
+ if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port)
+ return;
+ encap = true;
+ hdr.network = skb_inner_network_header(skb);
+ th = inner_tcp_hdr(skb);
+#else
return;
-
- th = tcp_hdr(first->skb);
+#endif /* CONFIG_IXGBE_VXLAN */
+ } else {
+ /* Currently only IPv4/IPv6 with TCP is supported */
+ if ((first->protocol != htons(ETH_P_IPV6) ||
+ hdr.ipv6->nexthdr != IPPROTO_TCP) &&
+ (first->protocol != htons(ETH_P_IP) ||
+ hdr.ipv4->protocol != IPPROTO_TCP))
+ return;
+ th = tcp_hdr(skb);
+ }
/* skip this packet since it is invalid or the socket is closing */
if (!th || th->fin)
@@ -7272,6 +7389,11 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
hdr.ipv6->daddr.s6_addr32[3];
}
+#ifdef CONFIG_IXGBE_VXLAN
+ if (encap)
+ input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
+#endif /* CONFIG_IXGBE_VXLAN */
+
/* This assumes the Rx queue and Tx queue are bound to the same CPU */
ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
input, common, ring->queue_index);
@@ -7737,9 +7859,10 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
bool pools;
/* Hardware supports up to 8 traffic classes */
- if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
- (hw->mac.type == ixgbe_mac_82598EB &&
- tc < MAX_TRAFFIC_CLASS))
+ if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
+ return -EINVAL;
+
+ if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
return -EINVAL;
pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
@@ -7898,12 +8021,23 @@ static int ixgbe_set_features(struct net_device *netdev,
need_reset = true;
netdev->features = features;
+
+#ifdef CONFIG_IXGBE_VXLAN
+ if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
+ if (features & NETIF_F_RXCSUM)
+ adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
+ else
+ ixgbe_clear_vxlan_port(adapter);
+ }
+#endif /* CONFIG_IXGBE_VXLAN */
+
if (need_reset)
ixgbe_do_reset(netdev);
return 0;
}
+#ifdef CONFIG_IXGBE_VXLAN
/**
* ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up
* @dev: The port's netdev
@@ -7917,17 +8051,18 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
struct ixgbe_hw *hw = &adapter->hw;
u16 new_port = ntohs(port);
+ if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+ return;
+
if (sa_family == AF_INET6)
return;
- if (adapter->vxlan_port == new_port) {
- netdev_info(dev, "Port %d already offloaded\n", new_port);
+ if (adapter->vxlan_port == new_port)
return;
- }
if (adapter->vxlan_port) {
netdev_info(dev,
- "Hit Max num of UDP ports, not adding port %d\n",
+ "Hit Max num of VXLAN ports, not adding port %d\n",
new_port);
return;
}
@@ -7946,9 +8081,11 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
__be16 port)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- struct ixgbe_hw *hw = &adapter->hw;
u16 new_port = ntohs(port);
+ if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+ return;
+
if (sa_family == AF_INET6)
return;
@@ -7958,9 +8095,10 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
return;
}
- adapter->vxlan_port = 0;
- IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, 0);
+ ixgbe_clear_vxlan_port(adapter);
+ adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
}
+#endif /* CONFIG_IXGBE_VXLAN */
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
@@ -8135,7 +8273,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
(adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
return ERR_PTR(-EBUSY);
- fwd_adapter = kcalloc(1, sizeof(struct ixgbe_fwd_adapter), GFP_KERNEL);
+ fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL);
if (!fwd_adapter)
return ERR_PTR(-ENOMEM);
@@ -8191,6 +8329,21 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
kfree(fwd_adapter);
}
+#define IXGBE_MAX_TUNNEL_HDR_LEN 80
+static netdev_features_t
+ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ if (!skb->encapsulation)
+ return features;
+
+ if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
+ IXGBE_MAX_TUNNEL_HDR_LEN))
+ return features & ~NETIF_F_ALL_CSUM;
+
+ return features;
+}
+
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
@@ -8236,8 +8389,11 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
.ndo_dfwd_add_station = ixgbe_fwd_add,
.ndo_dfwd_del_station = ixgbe_fwd_del,
+#ifdef CONFIG_IXGBE_VXLAN
.ndo_add_vxlan_port = ixgbe_add_vxlan_port,
.ndo_del_vxlan_port = ixgbe_del_vxlan_port,
+#endif /* CONFIG_IXGBE_VXLAN */
+ .ndo_features_check = ixgbe_features_check,
};
/**
@@ -8597,17 +8753,24 @@ skip_sriov:
netdev->vlan_features |= NETIF_F_IPV6_CSUM;
netdev->vlan_features |= NETIF_F_SG;
+ netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM;
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
+#ifdef CONFIG_IXGBE_VXLAN
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
- netdev->hw_enc_features |= NETIF_F_RXCSUM;
+ netdev->hw_enc_features |= NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM;
break;
default:
break;
}
+#endif /* CONFIG_IXGBE_VXLAN */
#ifdef CONFIG_IXGBE_DCB
netdev->dcbnl_ops = &dcbnl_ops;
@@ -8694,9 +8857,10 @@ skip_sriov:
hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
/* pick up the PCI bus settings for reporting later */
- hw->mac.ops.get_bus_info(hw);
if (ixgbe_pcie_from_parent(hw))
ixgbe_get_parent_bus_info(adapter);
+ else
+ hw->mac.ops.get_bus_info(hw);
/* calculate the expected PCIe bandwidth required for optimal
* performance. Note that some older parts will never have enough
@@ -8859,12 +9023,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
#ifdef CONFIG_PCI_IOV
- /*
- * Only disable SR-IOV on unload if the user specified the now
- * deprecated max_vfs module parameter.
- */
- if (max_vfs)
- ixgbe_disable_sriov(adapter);
+ ixgbe_disable_sriov(adapter);
#endif
ixgbe_clear_interrupt_scheme(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 526a20bf7488..597d0b1c2370 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -243,9 +243,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
u16 ext_ability = 0;
if (!hw->phy.phy_semaphore_mask) {
- hw->phy.lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) &
- IXGBE_STATUS_LAN_ID_1;
- if (hw->phy.lan_id)
+ if (hw->bus.lan_id)
hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
else
hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
@@ -608,12 +606,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
s32 status;
- u32 gssr;
-
- if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
- gssr = IXGBE_GSSR_PHY1_SM;
- else
- gssr = IXGBE_GSSR_PHY0_SM;
+ u32 gssr = hw->phy.phy_semaphore_mask;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
@@ -737,39 +730,61 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
}
/**
- * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
+ * ixgbe_get_copper_speeds_supported - Get copper link speed from phy
* @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @autoneg: boolean auto-negotiation value
*
- * Determines the link capabilities by reading the AUTOC register.
+ * Determines the supported link capabilities by reading the PHY auto
+ * negotiation register.
*/
-s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
+static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
{
- s32 status;
u16 speed_ability;
-
- *speed = 0;
- *autoneg = true;
+ s32 status;
status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
&speed_ability);
+ if (status)
+ return status;
- if (status == 0) {
- if (speed_ability & MDIO_SPEED_10G)
- *speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (speed_ability & MDIO_PMA_SPEED_1000)
- *speed |= IXGBE_LINK_SPEED_1GB_FULL;
- if (speed_ability & MDIO_PMA_SPEED_100)
- *speed |= IXGBE_LINK_SPEED_100_FULL;
+ if (speed_ability & MDIO_SPEED_10G)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (speed_ability & MDIO_PMA_SPEED_1000)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (speed_ability & MDIO_PMA_SPEED_100)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
+ break;
+ case ixgbe_mac_X550EM_x:
+ hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL;
+ break;
+ default:
+ break;
}
- /* Internal PHY does not support 100 Mbps */
- if (hw->mac.type == ixgbe_mac_X550EM_x)
- *speed &= ~IXGBE_LINK_SPEED_100_FULL;
+ return 0;
+}
+
+/**
+ * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ */
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ s32 status = 0;
+
+ *autoneg = true;
+ if (!hw->phy.speeds_supported)
+ status = ixgbe_get_copper_speeds_supported(hw);
+ *speed = hw->phy.speeds_supported;
return status;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index b6f424f3b1a8..63689192b149 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -848,6 +848,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */
+#define IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK 0x6 /* Speed Mask */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s H Duplex */
@@ -856,6 +857,24 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB 0x4 /* 1Gb/s */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB 0x6 /* 10Gb/s */
+
+#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */
+#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
+#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */
+#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */
+#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/
+#define IXGBE_MII_2_5GBASE_T_ADVERTISE 0x0400
+#define IXGBE_MII_5GBASE_T_ADVERTISE 0x0800
+#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */
+#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */
+#define IXGBE_MII_RESTART 0x200
+#define IXGBE_MII_AUTONEG_COMPLETE 0x20
+#define IXGBE_MII_AUTONEG_LINK_UP 0x04
+#define IXGBE_MII_AUTONEG_REG 0x0
/* Management */
#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -1305,6 +1324,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM 0xCC00 /* AUTO_NEG Vendor TX Reg */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */
#define IXGBE_MDIO_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */
#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
@@ -1312,7 +1332,8 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */
#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */
-
+#define IXGBE_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG Rx LP Status Reg */
+#define IXGBE_AUTO_NEG_LP_1000BASE_CAP 0x8000 /* AUTO NEG Rx LP 1000BaseT */
#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */
#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */
#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */
@@ -2041,6 +2062,11 @@ enum {
#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */
+#define NVM_INIT_CTRL_3 0x38
+#define NVM_INIT_CTRL_3_LPLU 0x8
+#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40
+#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100
+
#define IXGBE_EEPROM_PAGE_SIZE_MAX 128
#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
@@ -2540,9 +2566,11 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5
#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
+#define IXGBE_FDIRCMD_RX_TUNNEL_FILTER_SHIFT 23
#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
#define IXGBE_FDIR_INIT_DONE_POLL 10
#define IXGBE_FDIRCMD_CMD_POLL 10
+#define IXGBE_FDIRCMD_TUNNEL_FILTER 0x00800000
#define IXGBE_FDIR_DROP_QUEUE 127
@@ -2833,12 +2861,13 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
/* Software ATR input stream values and masks */
-#define IXGBE_ATR_HASH_MASK 0x7fff
-#define IXGBE_ATR_L4TYPE_MASK 0x3
-#define IXGBE_ATR_L4TYPE_UDP 0x1
-#define IXGBE_ATR_L4TYPE_TCP 0x2
-#define IXGBE_ATR_L4TYPE_SCTP 0x3
-#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+#define IXGBE_ATR_HASH_MASK 0x7fff
+#define IXGBE_ATR_L4TYPE_MASK 0x3
+#define IXGBE_ATR_L4TYPE_UDP 0x1
+#define IXGBE_ATR_L4TYPE_TCP 0x2
+#define IXGBE_ATR_L4TYPE_SCTP 0x3
+#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+#define IXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10
enum ixgbe_atr_flow_type {
IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
@@ -3035,9 +3064,8 @@ enum ixgbe_smart_speed {
/* PCI bus types */
enum ixgbe_bus_type {
ixgbe_bus_type_unknown = 0,
- ixgbe_bus_type_pci,
- ixgbe_bus_type_pcix,
ixgbe_bus_type_pci_express,
+ ixgbe_bus_type_internal,
ixgbe_bus_type_reserved
};
@@ -3298,6 +3326,7 @@ struct ixgbe_phy_operations {
s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
s32 (*check_overtemp)(struct ixgbe_hw *);
s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
+ s32 (*enter_lplu)(struct ixgbe_hw *);
s32 (*handle_lasi)(struct ixgbe_hw *hw);
};
@@ -3308,6 +3337,7 @@ struct ixgbe_eeprom_info {
u16 word_size;
u16 address_bits;
u16 word_page_size;
+ u16 ctrl_word_3;
};
#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
@@ -3351,10 +3381,10 @@ struct ixgbe_phy_info {
bool sfp_setup_needed;
u32 revision;
enum ixgbe_media_type media_type;
- u8 lan_id;
u32 phy_semaphore_mask;
bool reset_disable;
ixgbe_autoneg_advertised autoneg_advertised;
+ ixgbe_link_speed speeds_supported;
enum ixgbe_smart_speed smart_speed;
bool smart_speed_active;
bool multispeed_fiber;
@@ -3460,16 +3490,21 @@ struct ixgbe_info {
#define IXGBE_ERR_PBA_SECTION -31
#define IXGBE_ERR_INVALID_ARGUMENT -32
#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
+#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010))
-#define IXGBE_KRM_LINK_CTRL_1(P) ((P == 0) ? (0x420C) : (0x820C))
-#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634))
-#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638))
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P == 0) ? (0x4B00) : (0x8B00))
-#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P == 0) ? (0x4E00) : (0x8E00))
-#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P == 0) ? (0x5520) : (0x9520))
-#define IXGBE_KRM_RX_ANA_CTL(P) ((P == 0) ? (0x5A00) : (0x9A00))
+#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4))
+#define IXGBE_FUSES0_300MHZ BIT(5)
+#define IXGBE_FUSES0_REV1 BIT(6)
+
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
+#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
+#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
+#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00)
+#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00)
+#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
+#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 032a5870abd1..4e758435ece8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -54,6 +54,11 @@ enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+
+ /* set_phy_power was set by default to NULL */
+ if (!ixgbe_mng_present(hw))
+ phy->ops.set_phy_power = ixgbe_set_copper_phy_power;
mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 7581da13e92a..9fe9445cd73b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -26,6 +26,20 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
+static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+
+ /* Start with X540 invariants, since so simular */
+ ixgbe_get_invariants_X540(hw);
+
+ if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
+ phy->ops.set_phy_power = NULL;
+
+ return 0;
+}
+
/** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
* @hw: pointer to hardware structure
**/
@@ -597,6 +611,24 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
return status;
}
+/**
+ * ixgbe_get_bus_info_X550em - Set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Sets bus link width and speed to unknown because X550em is
+ * not a PCI device.
+ **/
+static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
+{
+ hw->bus.type = ixgbe_bus_type_internal;
+ hw->bus.width = ixgbe_bus_width_unknown;
+ hw->bus.speed = ixgbe_bus_speed_unknown;
+
+ hw->mac.ops.set_lan_id(hw);
+
+ return 0;
+}
+
/** ixgbe_disable_rx_x550 - Disable RX unit
*
* Enables the Rx DMA unit for x550
@@ -1444,6 +1476,144 @@ static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
return ixgbe_enable_lasi_ext_t_x550em(hw);
}
+/** ixgbe_get_lcd_x550em - Determine lowest common denominator
+ * @hw: pointer to hardware structure
+ * @lcd_speed: pointer to lowest common link speed
+ *
+ * Determine lowest common link speed with link partner.
+ **/
+static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed *lcd_speed)
+{
+ u16 an_lp_status;
+ s32 status;
+ u16 word = hw->eeprom.ctrl_word_3;
+
+ *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &an_lp_status);
+ if (status)
+ return status;
+
+ /* If link partner advertised 1G, return 1G */
+ if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
+ *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
+ return status;
+ }
+
+ /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
+ if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
+ (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
+ return status;
+
+ /* Link partner not capable of lower speeds, return 10G */
+ *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ return status;
+}
+
+/** ixgbe_enter_lplu_x550em - Transition to low power states
+ * @hw: pointer to hardware structure
+ *
+ * Configures Low Power Link Up on transition to low power states
+ * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting
+ * the X557 PHY immediately prior to entering LPLU.
+ **/
+static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
+{
+ u16 an_10g_cntl_reg, autoneg_reg, speed;
+ s32 status;
+ ixgbe_link_speed lcd_speed;
+ u32 save_autoneg;
+ bool link_up;
+
+ /* SW LPLU not required on later HW revisions. */
+ if (IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))
+ return 0;
+
+ /* If blocked by MNG FW, then don't restart AN */
+ if (ixgbe_check_reset_blocked(hw))
+ return 0;
+
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+ if (status)
+ return status;
+
+ status = hw->eeprom.ops.read(hw, NVM_INIT_CTRL_3,
+ &hw->eeprom.ctrl_word_3);
+ if (status)
+ return status;
+
+ /* If link is down, LPLU disabled in NVM, WoL disabled, or
+ * manageability disabled, then force link down by entering
+ * low power mode.
+ */
+ if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
+ !(hw->wol_enabled || ixgbe_mng_present(hw)))
+ return ixgbe_set_copper_phy_power(hw, false);
+
+ /* Determine LCD */
+ status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
+ if (status)
+ return status;
+
+ /* If no valid LCD link speed, then force link down and exit. */
+ if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
+ return ixgbe_set_copper_phy_power(hw, false);
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &speed);
+ if (status)
+ return status;
+
+ /* If no link now, speed is invalid so take link down */
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+ if (status)
+ return ixgbe_set_copper_phy_power(hw, false);
+
+ /* clear everything but the speed bits */
+ speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
+
+ /* If current speed is already LCD, then exit. */
+ if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
+ (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
+ ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
+ (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
+ return status;
+
+ /* Clear AN completed indication */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+ if (status)
+ return status;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &an_10g_cntl_reg);
+ if (status)
+ return status;
+
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+ if (status)
+ return status;
+
+ save_autoneg = hw->phy.autoneg_advertised;
+
+ /* Setup link at least common link speed */
+ status = hw->mac.ops.setup_link(hw, lcd_speed, false);
+
+ /* restore autoneg from before setting lplu speed */
+ hw->phy.autoneg_advertised = save_autoneg;
+
+ return status;
+}
+
/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
* @hw: pointer to hardware structure
*
@@ -1514,6 +1684,11 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
ret_val = ixgbe_setup_kr_speed_x550em(hw, speed);
}
+ /* setup SW LPLU only for first revision */
+ if (!(IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw,
+ IXGBE_FUSES0_GROUP(0))))
+ phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
+
phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
phy->ops.reset = ixgbe_reset_phy_t_X550em;
break;
@@ -1760,7 +1935,6 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
.get_mac_addr = &ixgbe_get_mac_addr_generic, \
.get_device_caps = &ixgbe_get_device_caps_generic, \
.stop_adapter = &ixgbe_stop_adapter_generic, \
- .get_bus_info = &ixgbe_get_bus_info_generic, \
.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, \
.read_analog_reg8 = NULL, \
.write_analog_reg8 = NULL, \
@@ -1809,6 +1983,7 @@ static struct ixgbe_mac_operations mac_ops_X550 = {
.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
.setup_link = &ixgbe_setup_mac_link_X540,
.get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
+ .get_bus_info = &ixgbe_get_bus_info_generic,
.setup_sfp = NULL,
};
@@ -1820,6 +1995,7 @@ static struct ixgbe_mac_operations mac_ops_X550EM_x = {
.get_wwn_prefix = NULL,
.setup_link = NULL, /* defined later */
.get_link_capabilities = &ixgbe_get_link_capabilities_X550em,
+ .get_bus_info = &ixgbe_get_bus_info_X550em,
.setup_sfp = ixgbe_setup_sfp_modules_X550em,
};
@@ -1855,7 +2031,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
.read_reg = &ixgbe_read_phy_reg_generic, \
.write_reg = &ixgbe_write_phy_reg_generic, \
.setup_link = &ixgbe_setup_phy_link_generic, \
- .set_phy_power = &ixgbe_set_copper_phy_power, \
+ .set_phy_power = NULL, \
.check_overtemp = &ixgbe_tn_check_overtemp, \
.get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
@@ -1893,7 +2069,7 @@ struct ixgbe_info ixgbe_X550_info = {
struct ixgbe_info ixgbe_X550EM_x_info = {
.mac = ixgbe_mac_X550EM_x,
- .get_invariants = &ixgbe_get_invariants_X540,
+ .get_invariants = &ixgbe_get_invariants_X550_x,
.mac_ops = &mac_ops_X550EM_x,
.eeprom_ops = &eeprom_ops_X550EM_x,
.phy_ops = &phy_ops_X550EM_x,
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 770e21a64388..58434584b16d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -161,6 +161,18 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
#define IXGBE_RXDADV_SPH 0x8000
+/* RSS Hash results */
+#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000
+#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
+#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
+#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004
+#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
+#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
+
#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
IXGBE_RXD_ERR_CE | \
IXGBE_RXD_ERR_LE | \
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index b2f5b161d792..d3e5f5b37999 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -813,22 +813,15 @@ static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- /* We support this operation only for 82599 and x540 at the moment */
- if (adapter->hw.mac.type < ixgbe_mac_X550_vf)
- return IXGBEVF_82599_RETA_SIZE;
+ if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
+ return IXGBEVF_X550_VFRETA_SIZE;
- return 0;
+ return IXGBEVF_82599_RETA_SIZE;
}
static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-
- /* We support this operation only for 82599 and x540 at the moment */
- if (adapter->hw.mac.type < ixgbe_mac_X550_vf)
- return IXGBEVF_RSS_HASH_KEY_SIZE;
-
- return 0;
+ return IXGBEVF_RSS_HASH_KEY_SIZE;
}
static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
@@ -840,21 +833,33 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
- /* If neither indirection table nor hash key was requested - just
- * return a success avoiding taking any locks.
- */
- if (!indir && !key)
- return 0;
+ if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
+ if (key)
+ memcpy(key, adapter->rss_key, sizeof(adapter->rss_key));
- spin_lock_bh(&adapter->mbx_lock);
- if (indir)
- err = ixgbevf_get_reta_locked(&adapter->hw, indir,
- adapter->num_rx_queues);
+ if (indir) {
+ int i;
- if (!err && key)
- err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
+ for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
+ indir[i] = adapter->rss_indir_tbl[i];
+ }
+ } else {
+ /* If neither indirection table nor hash key was requested
+ * - just return a success avoiding taking any locks.
+ */
+ if (!indir && !key)
+ return 0;
- spin_unlock_bh(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
+ if (indir)
+ err = ixgbevf_get_reta_locked(&adapter->hw, indir,
+ adapter->num_rx_queues);
+
+ if (!err && key)
+ err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
+
+ spin_unlock_bh(&adapter->mbx_lock);
+ }
return err;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 775d08900949..04c7ec8446e0 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -144,9 +144,11 @@ struct ixgbevf_ring {
#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
-#define IXGBEVF_MAX_RSS_QUEUES 2
-#define IXGBEVF_82599_RETA_SIZE 128
+#define IXGBEVF_MAX_RSS_QUEUES 2
+#define IXGBEVF_82599_RETA_SIZE 128 /* 128 entries */
+#define IXGBEVF_X550_VFRETA_SIZE 64 /* 64 entries */
#define IXGBEVF_RSS_HASH_KEY_SIZE 40
+#define IXGBEVF_VFRSSRK_REGS 10 /* 10 registers for RSS key */
#define IXGBEVF_DEFAULT_TXD 1024
#define IXGBEVF_DEFAULT_RXD 512
@@ -447,6 +449,9 @@ struct ixgbevf_adapter {
spinlock_t mbx_lock;
unsigned long last_reset;
+
+ u32 rss_key[IXGBEVF_VFRSSRK_REGS];
+ u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
};
enum ixbgevf_state_t {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index e71cdde9cb01..149a0b4489be 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -457,6 +457,32 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
napi_gro_receive(&q_vector->napi, skb);
}
+#define IXGBE_RSS_L4_TYPES_MASK \
+ ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
+ (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
+ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
+ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
+
+static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u16 rss_type;
+
+ if (!(ring->netdev->features & NETIF_F_RXHASH))
+ return;
+
+ rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
+ IXGBE_RXDADV_RSSTYPE_MASK;
+
+ if (!rss_type)
+ return;
+
+ skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+ (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
+}
+
/**
* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
* @ring: structure containig ring specific data
@@ -506,6 +532,7 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ ixgbevf_rx_hash(rx_ring, rx_desc, skb);
ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -649,46 +676,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
}
/**
- * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @skb: pointer to current skb being adjusted
- *
- * This function is an ixgbevf specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- **/
-static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
- struct sk_buff *skb)
-{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned char *va;
- unsigned int pull_len;
-
- /* it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-}
-
-/**
* ixgbevf_cleanup_headers - Correct corrupted or empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
@@ -721,10 +708,6 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
}
}
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- ixgbevf_pull_tail(rx_ring, skb);
-
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
@@ -765,7 +748,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
static inline bool ixgbevf_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
/**
@@ -789,16 +772,19 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = IXGBEVF_RX_BUFSZ;
#else
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
#endif
+ unsigned int pull_len;
- if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
+ if (unlikely(skb_is_nonlinear(skb)))
+ goto add_tail_frag;
+ if (likely(size <= IXGBEVF_RX_HDR_SIZE)) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* page is not reserved, we can reuse buffer as is */
@@ -810,8 +796,21 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
return false;
}
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ va += pull_len;
+ size -= pull_len;
+
+add_tail_frag:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buffer->page_offset, size, truesize);
+ (unsigned long)va & ~PAGE_MASK, size, truesize);
/* avoid re-using remote pages */
if (unlikely(ixgbevf_page_is_reserved(page)))
@@ -1697,22 +1696,25 @@ static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 vfmrqc = 0, vfreta = 0;
- u32 rss_key[10];
u16 rss_i = adapter->num_rx_queues;
- int i, j;
+ u8 i, j;
/* Fill out hash function seeds */
- netdev_rss_key_fill(rss_key, sizeof(rss_key));
- for (i = 0; i < 10; i++)
- IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
+ netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
+ for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]);
- /* Fill out redirection table */
- for (i = 0, j = 0; i < 64; i++, j++) {
+ for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
if (j == rss_i)
j = 0;
- vfreta = (vfreta << 8) | (j * 0x1);
- if ((i & 3) == 3)
+
+ adapter->rss_indir_tbl[i] = j;
+
+ vfreta |= j << (i & 0x3) * 8;
+ if ((i & 3) == 3) {
IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
+ vfreta = 0;
+ }
}
/* Perform hash on these packet types */
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 6e9a792097d3..060dd3922974 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -583,7 +583,7 @@ jme_setup_tx_resources(struct jme_adapter *jme)
atomic_set(&txring->next_to_clean, 0);
atomic_set(&txring->nr_free, jme->tx_ring_size);
- txring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
+ txring->bufinf = kzalloc(sizeof(struct jme_buffer_info) *
jme->tx_ring_size, GFP_ATOMIC);
if (unlikely(!(txring->bufinf)))
goto err_free_txring;
@@ -592,8 +592,6 @@ jme_setup_tx_resources(struct jme_adapter *jme)
* Initialize Transmit Descriptors
*/
memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
- memset(txring->bufinf, 0,
- sizeof(struct jme_buffer_info) * jme->tx_ring_size);
return 0;
@@ -845,7 +843,7 @@ jme_setup_rx_resources(struct jme_adapter *jme)
rxring->next_to_use = 0;
atomic_set(&rxring->next_to_clean, 0);
- rxring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
+ rxring->bufinf = kzalloc(sizeof(struct jme_buffer_info) *
jme->rx_ring_size, GFP_ATOMIC);
if (unlikely(!(rxring->bufinf)))
goto err_free_rxring;
@@ -853,8 +851,6 @@ jme_setup_rx_resources(struct jme_adapter *jme)
/*
* Initiallize Receive Descriptors
*/
- memset(rxring->bufinf, 0,
- sizeof(struct jme_buffer_info) * jme->rx_ring_size);
for (i = 0 ; i < jme->rx_ring_size ; ++i) {
if (unlikely(jme_make_new_rx_buf(jme, i))) {
jme_free_rx_resources(jme);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index d52639bc491f..960169efe636 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1859,14 +1859,11 @@ oom:
return;
}
- mc_spec = kmalloc(0x200, GFP_ATOMIC);
+ mc_spec = kzalloc(0x200, GFP_ATOMIC);
if (mc_spec == NULL)
goto oom;
mc_other = mc_spec + (0x100 >> 2);
- memset(mc_spec, 0, 0x100);
- memset(mc_other, 0, 0x100);
-
netdev_for_each_mc_addr(ha, dev) {
u8 *a = ha->addr;
u32 *table;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 370e20ed224c..fe2299ac4f5c 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1462,7 +1462,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
struct mvneta_rx_queue *rxq)
{
struct net_device *dev = pp->dev;
- int rx_done, rx_filled;
+ int rx_done;
u32 rcvd_pkts = 0;
u32 rcvd_bytes = 0;
@@ -1473,7 +1473,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
rx_todo = rx_done;
rx_done = 0;
- rx_filled = 0;
/* Fairness NAPI loop */
while (rx_done < rx_todo) {
@@ -1484,7 +1483,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
int rx_bytes, err;
rx_done++;
- rx_filled++;
rx_status = rx_desc->status;
rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
data = (unsigned char *)rx_desc->buf_cookie;
@@ -1524,6 +1522,14 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
continue;
}
+ /* Refill processing */
+ err = mvneta_rx_refill(pp, rx_desc);
+ if (err) {
+ netdev_err(dev, "Linux processing - Can't refill\n");
+ rxq->missed++;
+ goto err_drop_frame;
+ }
+
skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
if (!skb)
goto err_drop_frame;
@@ -1543,14 +1549,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
mvneta_rx_csum(pp, rx_status, skb);
napi_gro_receive(&pp->napi, skb);
-
- /* Refill processing */
- err = mvneta_rx_refill(pp, rx_desc);
- if (err) {
- netdev_err(dev, "Linux processing - Can't refill\n");
- rxq->missed++;
- rx_filled--;
- }
}
if (rcvd_pkts) {
@@ -1563,7 +1561,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
}
/* Update rxq management counters */
- mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
+ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
return rx_done;
}
@@ -3029,8 +3027,8 @@ static int mvneta_probe(struct platform_device *pdev)
const char *dt_mac_addr;
char hw_mac_addr[ETH_ALEN];
const char *mac_from;
+ const char *managed;
int phy_mode;
- int fixed_phy = 0;
int err;
/* Our multiqueue support is not complete, so for now, only
@@ -3064,7 +3062,6 @@ static int mvneta_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "cannot register fixed PHY\n");
goto err_free_irq;
}
- fixed_phy = 1;
/* In the case of a fixed PHY, the DT node associated
* to the PHY is the Ethernet MAC DT node.
@@ -3088,8 +3085,10 @@ static int mvneta_probe(struct platform_device *pdev)
pp = netdev_priv(dev);
pp->phy_node = phy_node;
pp->phy_interface = phy_mode;
- pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) &&
- fixed_phy;
+
+ err = of_property_read_string(dn, "managed", &managed);
+ pp->use_inband_status = (err == 0 &&
+ strcmp(managed, "in-band-status") == 0);
pp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pp->clk)) {
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 3e8b1bfb1f2e..d9884fd15b45 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -27,6 +27,8 @@
#include <linux/of_address.h>
#include <linux/phy.h>
#include <linux/clk.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
#include <uapi/linux/ppp_defs.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -299,6 +301,7 @@
/* Coalescing */
#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
+#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
#define MVPP2_RX_COAL_PKTS 32
#define MVPP2_RX_COAL_USEC 100
@@ -660,6 +663,14 @@ struct mvpp2_pcpu_stats {
u64 tx_bytes;
};
+/* Per-CPU port control */
+struct mvpp2_port_pcpu {
+ struct hrtimer tx_done_timer;
+ bool timer_scheduled;
+ /* Tasklet for egress finalization */
+ struct tasklet_struct tx_done_tasklet;
+};
+
struct mvpp2_port {
u8 id;
@@ -679,6 +690,9 @@ struct mvpp2_port {
u32 pending_cause_rx;
struct napi_struct napi;
+ /* Per-CPU port control */
+ struct mvpp2_port_pcpu __percpu *pcpu;
+
/* Flags */
unsigned long flags;
@@ -776,6 +790,9 @@ struct mvpp2_txq_pcpu {
/* Array of transmitted skb */
struct sk_buff **tx_skb;
+ /* Array of transmitted buffers' physical addresses */
+ dma_addr_t *tx_buffs;
+
/* Index of last TX DMA descriptor that was inserted */
int txq_put_index;
@@ -913,8 +930,6 @@ struct mvpp2_bm_pool {
/* Occupied buffers indicator */
atomic_t in_use;
int in_use_thresh;
-
- spinlock_t lock;
};
struct mvpp2_buff_hdr {
@@ -963,9 +978,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
}
static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct mvpp2_tx_desc *tx_desc)
{
txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
+ if (skb)
+ txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
+ tx_desc->buf_phys_addr;
txq_pcpu->txq_put_index++;
if (txq_pcpu->txq_put_index == txq_pcpu->size)
txq_pcpu->txq_put_index = 0;
@@ -3376,7 +3395,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
bm_pool->pkt_size = 0;
bm_pool->buf_num = 0;
atomic_set(&bm_pool->in_use, 0);
- spin_lock_init(&bm_pool->lock);
return 0;
}
@@ -3647,7 +3665,6 @@ static struct mvpp2_bm_pool *
mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
int pkt_size)
{
- unsigned long flags = 0;
struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
int num;
@@ -3656,8 +3673,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
return NULL;
}
- spin_lock_irqsave(&new_pool->lock, flags);
-
if (new_pool->type == MVPP2_BM_FREE)
new_pool->type = type;
@@ -3686,8 +3701,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
if (num != pkts_num) {
WARN(1, "pool %d: %d of %d allocated\n",
new_pool->id, num, pkts_num);
- /* We need to undo the bufs_add() allocations */
- spin_unlock_irqrestore(&new_pool->lock, flags);
return NULL;
}
}
@@ -3695,15 +3708,12 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
- spin_unlock_irqrestore(&new_pool->lock, flags);
-
return new_pool;
}
/* Initialize pools for swf */
static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
{
- unsigned long flags = 0;
int rxq;
if (!port->pool_long) {
@@ -3714,9 +3724,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
if (!port->pool_long)
return -ENOMEM;
- spin_lock_irqsave(&port->pool_long->lock, flags);
port->pool_long->port_map |= (1 << port->id);
- spin_unlock_irqrestore(&port->pool_long->lock, flags);
for (rxq = 0; rxq < rxq_number; rxq++)
mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
@@ -3730,9 +3738,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
if (!port->pool_short)
return -ENOMEM;
- spin_lock_irqsave(&port->pool_short->lock, flags);
port->pool_short->port_map |= (1 << port->id);
- spin_unlock_irqrestore(&port->pool_short->lock, flags);
for (rxq = 0; rxq < rxq_number; rxq++)
mvpp2_rxq_short_pool_set(port, rxq,
@@ -3806,7 +3812,6 @@ static void mvpp2_interrupts_unmask(void *arg)
mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
(MVPP2_CAUSE_MISC_SUM_MASK |
- MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
}
@@ -4382,23 +4387,6 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
rxq->time_coal = usec;
}
-/* Set threshold for TX_DONE pkts coalescing */
-static void mvpp2_tx_done_pkts_coal_set(void *arg)
-{
- struct mvpp2_port *port = arg;
- int queue;
- u32 val;
-
- for (queue = 0; queue < txq_number; queue++) {
- struct mvpp2_tx_queue *txq = port->txqs[queue];
-
- val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
- MVPP2_TRANSMITTED_THRESH_MASK;
- mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val);
- }
-}
-
/* Free Tx queue skbuffs */
static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq,
@@ -4407,8 +4395,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
int i;
for (i = 0; i < num; i++) {
- struct mvpp2_tx_desc *tx_desc = txq->descs +
- txq_pcpu->txq_get_index;
+ dma_addr_t buf_phys_addr =
+ txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
mvpp2_txq_inc_get(txq_pcpu);
@@ -4416,8 +4404,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
if (!skb)
continue;
- dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr,
- tx_desc->data_size, DMA_TO_DEVICE);
+ dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
}
@@ -4433,7 +4421,7 @@ static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
u32 cause)
{
- int queue = fls(cause >> 16) - 1;
+ int queue = fls(cause) - 1;
return port->txqs[queue];
}
@@ -4460,6 +4448,29 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
netif_tx_wake_queue(nq);
}
+static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
+{
+ struct mvpp2_tx_queue *txq;
+ struct mvpp2_txq_pcpu *txq_pcpu;
+ unsigned int tx_todo = 0;
+
+ while (cause) {
+ txq = mvpp2_get_tx_queue(port, cause);
+ if (!txq)
+ break;
+
+ txq_pcpu = this_cpu_ptr(txq->pcpu);
+
+ if (txq_pcpu->count) {
+ mvpp2_txq_done(port, txq, txq_pcpu);
+ tx_todo += txq_pcpu->count;
+ }
+
+ cause &= ~(1 << txq->log_id);
+ }
+ return tx_todo;
+}
+
/* Rx/Tx queue initialization/cleanup methods */
/* Allocate and initialize descriptors for aggr TXQ */
@@ -4649,12 +4660,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
sizeof(*txq_pcpu->tx_skb),
GFP_KERNEL);
- if (!txq_pcpu->tx_skb) {
- dma_free_coherent(port->dev->dev.parent,
- txq->size * MVPP2_DESC_ALIGNED_SIZE,
- txq->descs, txq->descs_phys);
- return -ENOMEM;
- }
+ if (!txq_pcpu->tx_skb)
+ goto error;
+
+ txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
+ sizeof(dma_addr_t), GFP_KERNEL);
+ if (!txq_pcpu->tx_buffs)
+ goto error;
txq_pcpu->count = 0;
txq_pcpu->reserved_num = 0;
@@ -4663,6 +4675,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
}
return 0;
+
+error:
+ for_each_present_cpu(cpu) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+ kfree(txq_pcpu->tx_skb);
+ kfree(txq_pcpu->tx_buffs);
+ }
+
+ dma_free_coherent(port->dev->dev.parent,
+ txq->size * MVPP2_DESC_ALIGNED_SIZE,
+ txq->descs, txq->descs_phys);
+
+ return -ENOMEM;
}
/* Free allocated TXQ resources */
@@ -4675,6 +4700,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
kfree(txq_pcpu->tx_skb);
+ kfree(txq_pcpu->tx_buffs);
}
if (txq->descs)
@@ -4805,7 +4831,6 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port)
goto err_cleanup;
}
- on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
return 0;
@@ -4887,6 +4912,49 @@ static void mvpp2_link_event(struct net_device *dev)
}
}
+static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
+{
+ ktime_t interval;
+
+ if (!port_pcpu->timer_scheduled) {
+ port_pcpu->timer_scheduled = true;
+ interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
+ hrtimer_start(&port_pcpu->tx_done_timer, interval,
+ HRTIMER_MODE_REL_PINNED);
+ }
+}
+
+static void mvpp2_tx_proc_cb(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+ unsigned int tx_todo, cause;
+
+ if (!netif_running(dev))
+ return;
+ port_pcpu->timer_scheduled = false;
+
+ /* Process all the Tx queues */
+ cause = (1 << txq_number) - 1;
+ tx_todo = mvpp2_tx_done(port, cause);
+
+ /* Set the timer in case not all the packets were processed */
+ if (tx_todo)
+ mvpp2_timer_set(port_pcpu);
+}
+
+static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
+{
+ struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
+ struct mvpp2_port_pcpu,
+ tx_done_timer);
+
+ tasklet_schedule(&port_pcpu->tx_done_tasklet);
+
+ return HRTIMER_NORESTART;
+}
+
/* Main RX/TX processing routines */
/* Display more error info */
@@ -5144,11 +5212,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
if (i == (skb_shinfo(skb)->nr_frags - 1)) {
/* Last descriptor */
tx_desc->command = MVPP2_TXD_L_DESC;
- mvpp2_txq_inc_put(txq_pcpu, skb);
+ mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
} else {
/* Descriptor in the middle: Not First, Not Last */
tx_desc->command = 0;
- mvpp2_txq_inc_put(txq_pcpu, NULL);
+ mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
}
}
@@ -5214,12 +5282,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
/* First and Last descriptor */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
tx_desc->command = tx_cmd;
- mvpp2_txq_inc_put(txq_pcpu, skb);
+ mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
} else {
/* First but not Last */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
tx_desc->command = tx_cmd;
- mvpp2_txq_inc_put(txq_pcpu, NULL);
+ mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
/* Continue with other skb fragments */
if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
@@ -5255,6 +5323,17 @@ out:
dev_kfree_skb_any(skb);
}
+ /* Finalize TX processing */
+ if (txq_pcpu->count >= txq->done_pkts_coal)
+ mvpp2_txq_done(port, txq, txq_pcpu);
+
+ /* Set the timer in case not all frags were processed */
+ if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
+ struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+
+ mvpp2_timer_set(port_pcpu);
+ }
+
return NETDEV_TX_OK;
}
@@ -5268,10 +5347,11 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause)
netdev_err(dev, "tx fifo underrun error\n");
}
-static void mvpp2_txq_done_percpu(void *arg)
+static int mvpp2_poll(struct napi_struct *napi, int budget)
{
- struct mvpp2_port *port = arg;
- u32 cause_rx_tx, cause_tx, cause_misc;
+ u32 cause_rx_tx, cause_rx, cause_misc;
+ int rx_done = 0;
+ struct mvpp2_port *port = netdev_priv(napi->dev);
/* Rx/Tx cause register
*
@@ -5285,7 +5365,7 @@ static void mvpp2_txq_done_percpu(void *arg)
*/
cause_rx_tx = mvpp2_read(port->priv,
MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
- cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+ cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
if (cause_misc) {
@@ -5297,26 +5377,6 @@ static void mvpp2_txq_done_percpu(void *arg)
cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
}
- /* Release TX descriptors */
- if (cause_tx) {
- struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx);
- struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
-
- if (txq_pcpu->count)
- mvpp2_txq_done(port, txq, txq_pcpu);
- }
-}
-
-static int mvpp2_poll(struct napi_struct *napi, int budget)
-{
- u32 cause_rx_tx, cause_rx;
- int rx_done = 0;
- struct mvpp2_port *port = netdev_priv(napi->dev);
-
- on_each_cpu(mvpp2_txq_done_percpu, port, 1);
-
- cause_rx_tx = mvpp2_read(port->priv,
- MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
/* Process RX packets */
@@ -5561,6 +5621,8 @@ err_cleanup_rxqs:
static int mvpp2_stop(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port_pcpu *port_pcpu;
+ int cpu;
mvpp2_stop_dev(port);
mvpp2_phy_disconnect(port);
@@ -5569,6 +5631,13 @@ static int mvpp2_stop(struct net_device *dev)
on_each_cpu(mvpp2_interrupts_mask, port, 1);
free_irq(port->irq, port);
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+ hrtimer_cancel(&port_pcpu->tx_done_timer);
+ port_pcpu->timer_scheduled = false;
+ tasklet_kill(&port_pcpu->tx_done_tasklet);
+ }
mvpp2_cleanup_rxqs(port);
mvpp2_cleanup_txqs(port);
@@ -5784,7 +5853,6 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
txq->done_pkts_coal = c->tx_max_coalesced_frames;
}
- on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
return 0;
}
@@ -6035,6 +6103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
{
struct device_node *phy_node;
struct mvpp2_port *port;
+ struct mvpp2_port_pcpu *port_pcpu;
struct net_device *dev;
struct resource *res;
const char *dt_mac_addr;
@@ -6044,7 +6113,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
int features;
int phy_mode;
int priv_common_regs_num = 2;
- int err, i;
+ int err, i, cpu;
dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
rxq_number);
@@ -6135,6 +6204,24 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
mvpp2_port_power_up(port);
+ port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
+ if (!port->pcpu) {
+ err = -ENOMEM;
+ goto err_free_txq_pcpu;
+ }
+
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+ hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+ port_pcpu->timer_scheduled = false;
+
+ tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
+ (unsigned long)dev);
+ }
+
netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
features = NETIF_F_SG | NETIF_F_IP_CSUM;
dev->features = features | NETIF_F_RXCSUM;
@@ -6144,7 +6231,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register netdev\n");
- goto err_free_txq_pcpu;
+ goto err_free_port_pcpu;
}
netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
@@ -6153,6 +6240,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
priv->port_list[id] = port;
return 0;
+err_free_port_pcpu:
+ free_percpu(port->pcpu);
err_free_txq_pcpu:
for (i = 0; i < txq_number; i++)
free_percpu(port->txqs[i]->pcpu);
@@ -6171,6 +6260,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
int i;
unregister_netdev(port->dev);
+ free_percpu(port->pcpu);
free_percpu(port->stats);
for (i = 0; i < txq_number; i++)
free_percpu(port->txqs[i]->pcpu);
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
index 52a6665b7abf..d54701047401 100644
--- a/drivers/net/ethernet/mellanox/Kconfig
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -18,5 +18,6 @@ if NET_VENDOR_MELLANOX
source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig"
+source "drivers/net/ethernet/mellanox/mlxsw/Kconfig"
endif # NET_VENDOR_MELLANOX
diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile
index 38fe32ef5e5f..2e2a5ec509ac 100644
--- a/drivers/net/ethernet/mellanox/Makefile
+++ b/drivers/net/ethernet/mellanox/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_MLX4_CORE) += mlx4/
obj-$(CONFIG_MLX5_CORE) += mlx5/core/
+obj-$(CONFIG_MLXSW_CORE) += mlxsw/
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 82040137d7d9..0a3202047569 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -686,6 +686,7 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
{
struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
struct mlx4_cmd_context *context;
+ long ret_wait;
int err = 0;
down(&cmd->event_sem);
@@ -711,8 +712,20 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
if (err)
goto out_reset;
- if (!wait_for_completion_timeout(&context->done,
- msecs_to_jiffies(timeout))) {
+ if (op == MLX4_CMD_SENSE_PORT) {
+ ret_wait =
+ wait_for_completion_interruptible_timeout(&context->done,
+ msecs_to_jiffies(timeout));
+ if (ret_wait < 0) {
+ context->fw_status = 0;
+ context->out_param = 0;
+ context->result = 0;
+ }
+ } else {
+ ret_wait = (long)wait_for_completion_timeout(&context->done,
+ msecs_to_jiffies(timeout));
+ }
+ if (!ret_wait) {
mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
op);
if (op == MLX4_CMD_NOP) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 63769df872a4..eb8a4988de63 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -100,7 +100,6 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
{
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
- char name[25];
int timestamp_en = 0;
bool assigned_eq = false;
@@ -119,8 +118,8 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
err = mlx4_assign_eq(mdev->dev, priv->port,
&cq->vector);
if (err) {
- mlx4_err(mdev, "Failed assigning an EQ to %s\n",
- name);
+ mlx4_err(mdev, "Failed assigning an EQ to CQ vector %d\n",
+ cq->vector);
goto free_eq;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 99ba1c50e585..f79d8124321e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -102,6 +102,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
"blueflame",
+ "phv-bit"
};
static const char main_strings[][ETH_GSTRING_LEN] = {
@@ -1797,35 +1798,49 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
+ bool phv_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_PHV);
+ bool phv_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_PHV);
int i;
+ int ret = 0;
- if (bf_enabled_new == bf_enabled_old)
- return 0; /* Nothing to do */
+ if (bf_enabled_new != bf_enabled_old) {
+ if (bf_enabled_new) {
+ bool bf_supported = true;
- if (bf_enabled_new) {
- bool bf_supported = true;
+ for (i = 0; i < priv->tx_ring_num; i++)
+ bf_supported &= priv->tx_ring[i]->bf_alloced;
- for (i = 0; i < priv->tx_ring_num; i++)
- bf_supported &= priv->tx_ring[i]->bf_alloced;
+ if (!bf_supported) {
+ en_err(priv, "BlueFlame is not supported\n");
+ return -EINVAL;
+ }
- if (!bf_supported) {
- en_err(priv, "BlueFlame is not supported\n");
- return -EINVAL;
+ priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
+ } else {
+ priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
}
- priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
- } else {
- priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
- }
-
- for (i = 0; i < priv->tx_ring_num; i++)
- priv->tx_ring[i]->bf_enabled = bf_enabled_new;
+ for (i = 0; i < priv->tx_ring_num; i++)
+ priv->tx_ring[i]->bf_enabled = bf_enabled_new;
- en_info(priv, "BlueFlame %s\n",
- bf_enabled_new ? "Enabled" : "Disabled");
+ en_info(priv, "BlueFlame %s\n",
+ bf_enabled_new ? "Enabled" : "Disabled");
+ }
+ if (phv_enabled_new != phv_enabled_old) {
+ ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new);
+ if (ret)
+ return ret;
+ else if (phv_enabled_new)
+ priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
+ else
+ priv->pflags &= ~MLX4_EN_PRIV_FLAGS_PHV;
+ en_info(priv, "PHV bit %s\n",
+ phv_enabled_new ? "Enabled" : "Disabled");
+ }
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 913b716ed2e1..a946e4bf71d2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -224,6 +224,26 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
kfree(mdev);
}
+static void mlx4_en_activate(struct mlx4_dev *dev, void *ctx)
+{
+ int i;
+ struct mlx4_en_dev *mdev = ctx;
+
+ /* Create a netdev for each port */
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+ mlx4_info(mdev, "Activating port:%d\n", i);
+ if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
+ mdev->pndev[i] = NULL;
+ }
+
+ /* register notifier */
+ mdev->nb.notifier_call = mlx4_en_netdev_event;
+ if (register_netdevice_notifier(&mdev->nb)) {
+ mdev->nb.notifier_call = NULL;
+ mlx4_err(mdev, "Failed to create notifier\n");
+ }
+}
+
static void *mlx4_en_add(struct mlx4_dev *dev)
{
struct mlx4_en_dev *mdev;
@@ -297,21 +317,6 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mutex_init(&mdev->state_lock);
mdev->device_up = true;
- /* Setup ports */
-
- /* Create a netdev for each port */
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
- mlx4_info(mdev, "Activating port:%d\n", i);
- if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
- mdev->pndev[i] = NULL;
- }
- /* register notifier */
- mdev->nb.notifier_call = mlx4_en_netdev_event;
- if (register_netdevice_notifier(&mdev->nb)) {
- mdev->nb.notifier_call = NULL;
- mlx4_err(mdev, "Failed to create notifier\n");
- }
-
return mdev;
err_mr:
@@ -335,6 +340,7 @@ static struct mlx4_interface mlx4_en_interface = {
.event = mlx4_en_event,
.get_dev = mlx4_en_get_netdev,
.protocol = MLX4_PROT_ETH,
+ .activate = mlx4_en_activate,
};
static void mlx4_en_verify_params(void)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index e0de2fd1ce12..4726122ea76b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2184,6 +2184,25 @@ static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
+static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct mlx4_en_priv *en_priv = netdev_priv(netdev);
+ struct mlx4_en_dev *mdev = en_priv->mdev;
+
+ /* Since there is no support for separate RX C-TAG/S-TAG vlan accel
+ * enable/disable make sure S-TAG flag is always in same state as
+ * C-TAG.
+ */
+ if (features & NETIF_F_HW_VLAN_CTAG_RX &&
+ !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+ features |= NETIF_F_HW_VLAN_STAG_RX;
+ else
+ features &= ~NETIF_F_HW_VLAN_STAG_RX;
+
+ return features;
+}
+
static int mlx4_en_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -2218,6 +2237,10 @@ static int mlx4_en_set_features(struct net_device *netdev,
en_info(priv, "Turn %s TX vlan strip offload\n",
(features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
+ if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
+ en_info(priv, "Turn %s TX S-VLAN strip offload\n",
+ (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
+
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
en_info(priv, "Turn %s loopback\n",
(features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
@@ -2460,6 +2483,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_poll_controller = mlx4_en_netpoll,
#endif
.ndo_set_features = mlx4_en_set_features,
+ .ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = mlx4_en_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
@@ -2500,6 +2524,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_poll_controller = mlx4_en_netpoll,
#endif
.ndo_set_features = mlx4_en_set_features,
+ .ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = mlx4_en_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
@@ -2931,6 +2956,27 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->hw_features |= NETIF_F_LOOPBACK |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+ dev->features |= NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_FILTER;
+ dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+ }
+
+ if (mlx4_is_slave(mdev->dev)) {
+ int phv;
+
+ err = get_phv_bit(mdev->dev, port, &phv);
+ if (!err && phv) {
+ dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
+ }
+ } else {
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
+ !(mdev->dev->caps.flags2 &
+ MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+ dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ }
+
if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
dev->hw_features |= NETIF_F_RXFCS;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 7a4f20bb7fcb..4402a1e48c9b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -246,7 +246,6 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
{
- BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
return ring->prod == ring->cons;
}
@@ -726,7 +725,7 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
- if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) &&
+ if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
!(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
hdr += sizeof(struct vlan_hdr);
@@ -907,17 +906,25 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
gro_skb->csum_level = 1;
if ((cqe->vlan_my_qpn &
- cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
+ cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u16 vid = be16_to_cpu(cqe->sl_vid);
__vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
+ } else if ((be32_to_cpu(cqe->vlan_my_qpn) &
+ MLX4_CQE_SVLAN_PRESENT_MASK) &&
+ (dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
+ __vlan_hwaccel_put_tag(gro_skb,
+ htons(ETH_P_8021AD),
+ be16_to_cpu(cqe->sl_vid));
}
if (dev->features & NETIF_F_RXHASH)
skb_set_hash(gro_skb,
be32_to_cpu(cqe->immed_rss_invalid),
- PKT_HASH_TYPE_L3);
+ (ip_summed == CHECKSUM_UNNECESSARY) ?
+ PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_L3);
skb_record_rx_queue(gro_skb, cq->ring);
skb_mark_napi_id(gro_skb, &cq->napi);
@@ -963,12 +970,19 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (dev->features & NETIF_F_RXHASH)
skb_set_hash(skb,
be32_to_cpu(cqe->immed_rss_invalid),
- PKT_HASH_TYPE_L3);
+ (ip_summed == CHECKSUM_UNNECESSARY) ?
+ PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_L3);
if ((be32_to_cpu(cqe->vlan_my_qpn) &
- MLX4_CQE_VLAN_PRESENT_MASK) &&
+ MLX4_CQE_CVLAN_PRESENT_MASK) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
+ else if ((be32_to_cpu(cqe->vlan_my_qpn) &
+ MLX4_CQE_SVLAN_PRESENT_MASK) &&
+ (dev->features & NETIF_F_HW_VLAN_STAG_RX))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
+ be16_to_cpu(cqe->sl_vid));
if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
timestamp = mlx4_en_get_cqe_ts(cqe);
@@ -1066,7 +1080,10 @@ static const int frag_sizes[] = {
void mlx4_en_calc_rx_buf(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN;
+ /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
+ * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
+ */
+ int eff_mtu = dev->mtu + ETH_HLEN + (2 * VLAN_HLEN);
int buf_size = 0;
int i = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c10d98f6ad96..494e7762fdb1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -718,6 +718,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
u32 index, bf_index;
__be32 op_own;
u16 vlan_tag = 0;
+ u16 vlan_proto = 0;
int i_frag;
int lso_header_size;
void *fragptr = NULL;
@@ -750,9 +751,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
}
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
vlan_tag = skb_vlan_tag_get(skb);
-
+ vlan_proto = be16_to_cpu(skb->vlan_proto);
+ }
netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
@@ -958,8 +960,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->bf.offset ^= ring->bf.buf_size;
} else {
tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
- !!skb_vlan_tag_present(skb);
+ if (vlan_proto == ETH_P_8021AD)
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
+ else if (vlan_proto == ETH_P_8021Q)
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
+
tx_desc->ctrl.fence_size = real_size;
/* Ensure new descriptor hits memory
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index aae13adfb492..8e81e53c370e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -601,7 +601,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
continue;
mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
__func__, i, port);
- s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
+ s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
eqe->event.port_change.port =
cpu_to_be32(
@@ -640,7 +640,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
continue;
if (i == mlx4_master_func_num(dev))
continue;
- s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
+ s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
eqe->event.port_change.port =
cpu_to_be32(
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index e30bf57ad7a1..e8ec1dec5789 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -154,6 +154,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[26] = "Port ETS Scheduler support",
[27] = "Port beacon support",
[28] = "RX-ALL support",
+ [29] = "802.1ad offload support",
};
int i;
@@ -307,6 +308,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
+#define QUERY_FUNC_CAP_PHV_BIT 0x40
if (vhcr->op_modifier == 1) {
struct mlx4_active_ports actv_ports =
@@ -351,6 +353,12 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
QUERY_FUNC_CAP_PHYS_PORT_ID);
+ if (dev->caps.phv_bit[port]) {
+ field = QUERY_FUNC_CAP_PHV_BIT;
+ MLX4_PUT(outbox->buf, field,
+ QUERY_FUNC_CAP_FLAGS0_OFFSET);
+ }
+
} else if (vhcr->op_modifier == 0) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, slave);
@@ -600,6 +608,9 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
MLX4_GET(func_cap->phys_port_id, outbox,
QUERY_FUNC_CAP_PHYS_PORT_ID);
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
+ func_cap->flags |= (field & QUERY_FUNC_CAP_PHV_BIT);
+
/* All other resources are allocated by the master, but we still report
* 'num' and 'reserved' capabilities as follows:
* - num remains the maximum resource index
@@ -700,6 +711,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
#define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94
+#define QUERY_DEV_CAP_PHV_EN_OFFSET 0x96
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
@@ -898,6 +910,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
if (field & (1 << 2))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET);
+ if (field & 0x80)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN;
+ if (field & 0x40)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN;
+
MLX4_GET(dev_cap->reserved_lkey, outbox,
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
@@ -1992,6 +2010,10 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+ /* phv_check enable */
+ MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
+ if (byte_field & 0x2)
+ param->phv_check_en = 1;
out:
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -2758,3 +2780,63 @@ int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
}
+
+static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit)
+{
+#define SET_PORT_GEN_PHV_VALID 0x10
+#define SET_PORT_GEN_PHV_EN 0x80
+
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_general_context *context;
+ u32 in_mod;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+
+ context->v_ignore_fcs |= SET_PORT_GEN_PHV_VALID;
+ if (phv_bit)
+ context->phv_en |= SET_PORT_GEN_PHV_EN;
+
+ in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+ MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv)
+{
+ int err;
+ struct mlx4_func_cap func_cap;
+
+ memset(&func_cap, 0, sizeof(func_cap));
+ err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
+ if (!err)
+ *phv = func_cap.flags & QUERY_FUNC_CAP_PHV_BIT;
+ return err;
+}
+EXPORT_SYMBOL(get_phv_bit);
+
+int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
+{
+ int ret;
+
+ if (mlx4_is_slave(dev))
+ return -EPERM;
+
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
+ !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+ ret = mlx4_SET_PORT_phv_bit(dev, port, new_val);
+ if (!ret)
+ dev->caps.phv_bit[port] = new_val;
+ return ret;
+ }
+
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(set_phv_bit);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 07cb7c2461ad..08de5555c2f4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -204,6 +204,7 @@ struct mlx4_init_hca_param {
u16 cqe_size; /* For use only when CQE stride feature enabled */
u16 eqe_size; /* For use only when EQE stride feature enabled */
u8 rss_ip_frags;
+ u8 phv_check_en; /* for QUERY_HCA */
};
struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 0d80aed59043..0472941af820 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -63,8 +63,11 @@ static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
spin_lock_irq(&priv->ctx_lock);
list_add_tail(&dev_ctx->list, &priv->ctx_list);
spin_unlock_irq(&priv->ctx_lock);
+ if (intf->activate)
+ intf->activate(&priv->dev, dev_ctx->context);
} else
kfree(dev_ctx);
+
}
static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 12fbfcb44d8a..006757f80988 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -405,6 +405,21 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
+ struct mlx4_init_hca_param hca_param;
+
+ memset(&hca_param, 0, sizeof(hca_param));
+ err = mlx4_QUERY_HCA(dev, &hca_param);
+ /* Turn off PHV_EN flag in case phv_check_en is set.
+ * phv_check_en is a HW check that parse the packet and verify
+ * phv bit was reported correctly in the wqe. To allow QinQ
+ * PHV_EN flag should be set and phv_check_en must be cleared
+ * otherwise QinQ packets will be drop by the HW.
+ */
+ if (err || hca_param.phv_check_en)
+ dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
+ }
+
/* Sense port always allowed on supported devices for ConnectX-1 and -2 */
if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@@ -2273,6 +2288,11 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
} else if (err == -ENOENT) {
err = 0;
continue;
+ } else if (mlx4_is_slave(dev) && err == -EINVAL) {
+ priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
+ mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
+ MLX4_SINK_COUNTER_INDEX(dev));
+ err = 0;
} else {
mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
__func__, port + 1, err);
@@ -2649,9 +2669,14 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
if (msi_x) {
int nreq = dev->caps.num_ports * num_online_cpus() + 1;
+ bool shared_ports = false;
nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
nreq);
+ if (nreq > MAX_MSIX) {
+ nreq = MAX_MSIX;
+ shared_ports = true;
+ }
entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
if (!entries)
@@ -2674,6 +2699,9 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
dev->caps.num_ports);
+ if (MLX4_IS_LEGACY_EQ_MODE(dev->caps))
+ shared_ports = true;
+
for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
if (i == MLX4_EQ_ASYNC)
continue;
@@ -2681,7 +2709,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
priv->eq_table.eq[i].irq =
entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
- if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
+ if (shared_ports) {
bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
dev->caps.num_ports);
/* We don't set affinity hint when there
@@ -2907,6 +2935,8 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
{
u64 dev_flags = dev->flags;
int err = 0;
+ int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev),
+ MLX4_MAX_NUM_VF);
if (reset_flow) {
dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
@@ -2932,6 +2962,12 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
}
if (!(dev->flags & MLX4_FLAG_SRIOV)) {
+ if (total_vfs > fw_enabled_sriov_vfs) {
+ mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n",
+ total_vfs, fw_enabled_sriov_vfs);
+ err = -ENOMEM;
+ goto disable_sriov;
+ }
mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
err = pci_enable_sriov(pdev, total_vfs);
}
@@ -3413,20 +3449,20 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
goto err_disable_pdev;
}
}
- if (total_vfs >= MLX4_MAX_NUM_VF) {
+ if (total_vfs > MLX4_MAX_NUM_VF) {
dev_err(&pdev->dev,
- "Requested more VF's (%d) than allowed (%d)\n",
- total_vfs, MLX4_MAX_NUM_VF - 1);
+ "Requested more VF's (%d) than allowed by hw (%d)\n",
+ total_vfs, MLX4_MAX_NUM_VF);
err = -EINVAL;
goto err_disable_pdev;
}
for (i = 0; i < MLX4_MAX_PORTS; i++) {
- if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) {
+ if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) {
dev_err(&pdev->dev,
- "Requested more VF's (%d) for port (%d) than allowed (%d)\n",
+ "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
nvfs[i] + nvfs[2], i + 1,
- MLX4_MAX_NUM_VF_P_PORT - 1);
+ MLX4_MAX_NUM_VF_P_PORT);
err = -EINVAL;
goto err_disable_pdev;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index a092c5c34d43..232b2b55f23b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -787,6 +787,9 @@ struct mlx4_set_port_general_context {
u8 pprx;
u8 pfcrx;
u16 reserved4;
+ u32 reserved5;
+ u8 phv_en;
+ u8 reserved6[3];
};
struct mlx4_set_port_rqp_calc_context {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 666d1669eb52..defcf8c395bf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -95,6 +95,7 @@
*/
#define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1
+#define MLX4_EN_PRIV_FLAGS_PHV 2
#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 0715b497511f..6cb38304669f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -45,15 +45,34 @@
* register it in a memory region at HCA virtual address 0.
*/
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
+ size_t size, dma_addr_t *dma_handle,
+ int node)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ int original_node;
+ void *cpu_handle;
+
+ mutex_lock(&priv->alloc_mutex);
+ original_node = dev_to_node(&dev->pdev->dev);
+ set_dev_node(&dev->pdev->dev, node);
+ cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
+ dma_handle, GFP_KERNEL);
+ set_dev_node(&dev->pdev->dev, original_node);
+ mutex_unlock(&priv->alloc_mutex);
+ return cpu_handle;
+}
+
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+ struct mlx5_buf *buf, int node)
{
dma_addr_t t;
buf->size = size;
buf->npages = 1;
buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
- buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
- size, &t, GFP_KERNEL);
+ buf->direct.buf = mlx5_dma_zalloc_coherent_node(dev, size,
+ &t, node);
if (!buf->direct.buf)
return -ENOMEM;
@@ -66,6 +85,11 @@ int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
return 0;
}
+
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+{
+ return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
+}
EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
@@ -75,7 +99,8 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
}
EXPORT_SYMBOL_GPL(mlx5_buf_free);
-static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
+static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
+ int node)
{
struct mlx5_db_pgdir *pgdir;
@@ -84,8 +109,9 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
return NULL;
bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
- pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
- &pgdir->db_dma, GFP_KERNEL);
+
+ pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
+ &pgdir->db_dma, node);
if (!pgdir->db_page) {
kfree(pgdir);
return NULL;
@@ -118,7 +144,7 @@ static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
return 0;
}
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
{
struct mlx5_db_pgdir *pgdir;
int ret = 0;
@@ -129,7 +155,7 @@ int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
if (!mlx5_alloc_db_from_pgdir(pgdir, db))
goto out;
- pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev));
+ pgdir = mlx5_alloc_db_pgdir(dev, node);
if (!pgdir) {
ret = -ENOMEM;
goto out;
@@ -145,6 +171,12 @@ out:
return ret;
}
+EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
+
+int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+{
+ return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
+}
EXPORT_SYMBOL_GPL(mlx5_db_alloc);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 3d23bd657e3c..0983a208b299 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -42,24 +42,27 @@
#define MLX5E_MAX_NUM_TC 8
-#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x7
+#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
-#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x7
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
-#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (16 * 1024)
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
-#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ 0x7
+#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
+#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
+#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
+#define MLX5E_SQ_BF_BUDGET 16
static const char vport_strings[][ETH_GSTRING_LEN] = {
/* vport statistics */
@@ -91,6 +94,7 @@ static const char vport_strings[][ETH_GSTRING_LEN] = {
"lro_bytes",
"rx_csum_good",
"rx_csum_none",
+ "rx_csum_sw",
"tx_csum_offload",
"tx_queue_stopped",
"tx_queue_wake",
@@ -128,18 +132,94 @@ struct mlx5e_vport_stats {
u64 lro_bytes;
u64 rx_csum_good;
u64 rx_csum_none;
+ u64 rx_csum_sw;
u64 tx_csum_offload;
u64 tx_queue_stopped;
u64 tx_queue_wake;
u64 tx_queue_dropped;
u64 rx_wqe_err;
-#define NUM_VPORT_COUNTERS 31
+#define NUM_VPORT_COUNTERS 32
+};
+
+static const char pport_strings[][ETH_GSTRING_LEN] = {
+ /* IEEE802.3 counters */
+ "frames_tx",
+ "frames_rx",
+ "check_seq_err",
+ "alignment_err",
+ "octets_tx",
+ "octets_received",
+ "multicast_xmitted",
+ "broadcast_xmitted",
+ "multicast_rx",
+ "broadcast_rx",
+ "in_range_len_errors",
+ "out_of_range_len",
+ "too_long_errors",
+ "symbol_err",
+ "mac_control_tx",
+ "mac_control_rx",
+ "unsupported_op_rx",
+ "pause_ctrl_rx",
+ "pause_ctrl_tx",
+
+ /* RFC2863 counters */
+ "in_octets",
+ "in_ucast_pkts",
+ "in_discards",
+ "in_errors",
+ "in_unknown_protos",
+ "out_octets",
+ "out_ucast_pkts",
+ "out_discards",
+ "out_errors",
+ "in_multicast_pkts",
+ "in_broadcast_pkts",
+ "out_multicast_pkts",
+ "out_broadcast_pkts",
+
+ /* RFC2819 counters */
+ "drop_events",
+ "octets",
+ "pkts",
+ "broadcast_pkts",
+ "multicast_pkts",
+ "crc_align_errors",
+ "undersize_pkts",
+ "oversize_pkts",
+ "fragments",
+ "jabbers",
+ "collisions",
+ "p64octets",
+ "p65to127octets",
+ "p128to255octets",
+ "p256to511octets",
+ "p512to1023octets",
+ "p1024to1518octets",
+ "p1519to2047octets",
+ "p2048to4095octets",
+ "p4096to8191octets",
+ "p8192to10239octets",
+};
+
+#define NUM_IEEE_802_3_COUNTERS 19
+#define NUM_RFC_2863_COUNTERS 13
+#define NUM_RFC_2819_COUNTERS 21
+#define NUM_PPORT_COUNTERS (NUM_IEEE_802_3_COUNTERS + \
+ NUM_RFC_2863_COUNTERS + \
+ NUM_RFC_2819_COUNTERS)
+
+struct mlx5e_pport_stats {
+ __be64 IEEE_802_3_counters[NUM_IEEE_802_3_COUNTERS];
+ __be64 RFC_2863_counters[NUM_RFC_2863_COUNTERS];
+ __be64 RFC_2819_counters[NUM_RFC_2819_COUNTERS];
};
static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
"packets",
"csum_none",
+ "csum_sw",
"lro_packets",
"lro_bytes",
"wqe_err"
@@ -148,10 +228,11 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
struct mlx5e_rq_stats {
u64 packets;
u64 csum_none;
+ u64 csum_sw;
u64 lro_packets;
u64 lro_bytes;
u64 wqe_err;
-#define NUM_RQ_STATS 5
+#define NUM_RQ_STATS 6
};
static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
@@ -179,6 +260,7 @@ struct mlx5e_sq_stats {
struct mlx5e_stats {
struct mlx5e_vport_stats vport;
+ struct mlx5e_pport_stats pport;
};
struct mlx5e_params {
@@ -192,9 +274,12 @@ struct mlx5e_params {
u16 tx_cq_moderation_usec;
u16 tx_cq_moderation_pkts;
u16 min_rx_wqes;
- u16 rx_hash_log_tbl_sz;
bool lro_en;
u32 lro_wqe_sz;
+ u16 tx_max_inline;
+ u8 rss_hfunc;
+ u8 toeplitz_hash_key[40];
+ u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
};
enum {
@@ -214,6 +299,7 @@ struct mlx5e_cq {
struct napi_struct *napi;
struct mlx5_core_cq mcq;
struct mlx5e_channel *channel;
+ struct mlx5e_priv *priv;
/* control */
struct mlx5_wq_ctrl wq_ctrl;
@@ -237,6 +323,7 @@ struct mlx5e_rq {
struct mlx5_wq_ctrl wq_ctrl;
u32 rqn;
struct mlx5e_channel *channel;
+ struct mlx5e_priv *priv;
} ____cacheline_aligned_in_smp;
struct mlx5e_tx_skb_cb {
@@ -266,7 +353,9 @@ struct mlx5e_sq {
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
u32 dma_fifo_pc;
- u32 bf_offset;
+ u16 bf_offset;
+ u16 prev_cc;
+ u8 bf_budget;
struct mlx5e_sq_stats stats;
struct mlx5e_cq cq;
@@ -279,9 +368,10 @@ struct mlx5e_sq {
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
void __iomem *uar_map;
+ void __iomem *uar_bf_map;
struct netdev_queue *txq;
u32 sqn;
- u32 bf_buf_size;
+ u16 bf_buf_size;
u16 max_inline;
u16 edge;
struct device *pdev;
@@ -315,7 +405,6 @@ struct mlx5e_channel {
__be32 mkey_be;
u8 num_tc;
unsigned long flags;
- int tc_to_txq_map[MLX5E_MAX_NUM_TC];
/* control */
struct mlx5e_priv *priv;
@@ -324,20 +413,24 @@ struct mlx5e_channel {
};
enum mlx5e_traffic_types {
- MLX5E_TT_IPV4_TCP = 0,
- MLX5E_TT_IPV6_TCP = 1,
- MLX5E_TT_IPV4_UDP = 2,
- MLX5E_TT_IPV6_UDP = 3,
- MLX5E_TT_IPV4 = 4,
- MLX5E_TT_IPV6 = 5,
- MLX5E_TT_ANY = 6,
- MLX5E_NUM_TT = 7,
+ MLX5E_TT_IPV4_TCP,
+ MLX5E_TT_IPV6_TCP,
+ MLX5E_TT_IPV4_UDP,
+ MLX5E_TT_IPV6_UDP,
+ MLX5E_TT_IPV4_IPSEC_AH,
+ MLX5E_TT_IPV6_IPSEC_AH,
+ MLX5E_TT_IPV4_IPSEC_ESP,
+ MLX5E_TT_IPV6_IPSEC_ESP,
+ MLX5E_TT_IPV4,
+ MLX5E_TT_IPV6,
+ MLX5E_TT_ANY,
+ MLX5E_NUM_TT,
};
-enum {
- MLX5E_RQT_SPREADING = 0,
- MLX5E_RQT_DEFAULT_RQ = 1,
- MLX5E_NUM_RQT = 2,
+enum mlx5e_rqt_ix {
+ MLX5E_INDIRECTION_RQT,
+ MLX5E_SINGLE_RQ_RQT,
+ MLX5E_NUM_RQT,
};
struct mlx5e_eth_addr_info {
@@ -362,10 +455,10 @@ struct mlx5e_eth_addr_db {
enum {
MLX5E_STATE_ASYNC_EVENTS_ENABLE,
MLX5E_STATE_OPENED,
+ MLX5E_STATE_DESTROYING,
};
struct mlx5e_vlan_db {
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u32 active_vlans_ft_ix[VLAN_N_VID];
u32 untagged_rule_ft_ix;
u32 any_vlan_rule_ft_ix;
@@ -379,9 +472,9 @@ struct mlx5e_flow_table {
struct mlx5e_priv {
/* priv data path fields - start */
- int num_tc;
int default_vlan_prio;
struct mlx5e_sq **txq_to_sq_map;
+ int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
/* priv data path fields - end */
unsigned long state;
@@ -390,10 +483,11 @@ struct mlx5e_priv {
u32 pdn;
u32 tdn;
struct mlx5_core_mr mr;
+ struct mlx5e_rq drop_rq;
struct mlx5e_channel **channel;
u32 tisn[MLX5E_MAX_NUM_TC];
- u32 rqtn;
+ u32 rqtn[MLX5E_NUM_RQT];
u32 tirn[MLX5E_NUM_TT];
struct mlx5e_flow_table ft;
@@ -470,10 +564,9 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
void mlx5e_update_stats(struct mlx5e_priv *priv);
-int mlx5e_open_flow_table(struct mlx5e_priv *priv);
-void mlx5e_close_flow_table(struct mlx5e_priv *priv);
+int mlx5e_create_flow_tables(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv);
void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
-void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv);
void mlx5e_set_rx_mode_work(struct work_struct *work);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -482,17 +575,17 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
-int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
-void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
-int mlx5e_update_priv_params(struct mlx5e_priv *priv,
- struct mlx5e_params *new_params);
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
- struct mlx5e_tx_wqe *wqe)
+ struct mlx5e_tx_wqe *wqe, int bf_sz)
{
+ u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
+
/* ensure wqe is visible to device before updating doorbell record */
dma_wmb();
@@ -503,9 +596,15 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
*/
wmb();
- mlx5_write64((__be32 *)&wqe->ctrl,
- sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset,
- NULL);
+ if (bf_sz) {
+ __iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);
+
+ /* flush the write-combining mapped buffer */
+ wmb();
+
+ } else {
+ mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
+ }
sq->bf_offset ^= sq->bf_buf_size;
}
@@ -519,3 +618,4 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
}
extern const struct ethtool_ops mlx5e_ethtool_ops;
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 388938482ff9..bce912688ca8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -171,9 +171,9 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
switch (sset) {
case ETH_SS_STATS:
- return NUM_VPORT_COUNTERS +
+ return NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
priv->params.num_channels * NUM_RQ_STATS +
- priv->params.num_channels * priv->num_tc *
+ priv->params.num_channels * priv->params.num_tc *
NUM_SQ_STATS;
/* fallthrough */
default:
@@ -200,6 +200,11 @@ static void mlx5e_get_strings(struct net_device *dev,
strcpy(data + (idx++) * ETH_GSTRING_LEN,
vport_strings[i]);
+ /* PPORT counters */
+ for (i = 0; i < NUM_PPORT_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_strings[i]);
+
/* per channel counters */
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
@@ -207,7 +212,7 @@ static void mlx5e_get_strings(struct net_device *dev,
"rx%d_%s", i, rq_stats_strings[j]);
for (i = 0; i < priv->params.num_channels; i++)
- for (tc = 0; tc < priv->num_tc; tc++)
+ for (tc = 0; tc < priv->params.num_tc; tc++)
for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data +
(idx++) * ETH_GSTRING_LEN,
@@ -234,6 +239,9 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
data[idx++] = ((u64 *)&priv->stats.vport)[i];
+ for (i = 0; i < NUM_PPORT_COUNTERS; i++)
+ data[idx++] = be64_to_cpu(((__be64 *)&priv->stats.pport)[i]);
+
/* per channel counters */
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
@@ -242,7 +250,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
((u64 *)&priv->channel[i]->rq.stats)[j];
for (i = 0; i < priv->params.num_channels; i++)
- for (tc = 0; tc < priv->num_tc; tc++)
+ for (tc = 0; tc < priv->params.num_tc; tc++)
for (j = 0; j < NUM_SQ_STATS; j++)
data[idx++] = !test_bit(MLX5E_STATE_OPENED,
&priv->state) ? 0 :
@@ -264,7 +272,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_params new_params;
+ bool was_opened;
u16 min_rx_wqes;
u8 log_rq_size;
u8 log_sq_size;
@@ -316,11 +324,18 @@ static int mlx5e_set_ringparam(struct net_device *dev,
return 0;
mutex_lock(&priv->state_lock);
- new_params = priv->params;
- new_params.log_rq_size = log_rq_size;
- new_params.log_sq_size = log_sq_size;
- new_params.min_rx_wqes = min_rx_wqes;
- err = mlx5e_update_priv_params(priv, &new_params);
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(dev);
+
+ priv->params.log_rq_size = log_rq_size;
+ priv->params.log_sq_size = log_sq_size;
+ priv->params.min_rx_wqes = min_rx_wqes;
+
+ if (was_opened)
+ err = mlx5e_open_locked(dev);
+
mutex_unlock(&priv->state_lock);
return err;
@@ -342,7 +357,7 @@ static int mlx5e_set_channels(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev);
int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
unsigned int count = ch->combined_count;
- struct mlx5e_params new_params;
+ bool was_opened;
int err = 0;
if (!count) {
@@ -365,9 +380,16 @@ static int mlx5e_set_channels(struct net_device *dev,
return 0;
mutex_lock(&priv->state_lock);
- new_params = priv->params;
- new_params.num_channels = count;
- err = mlx5e_update_priv_params(priv, &new_params);
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(dev);
+
+ priv->params.num_channels = count;
+
+ if (was_opened)
+ err = mlx5e_open_locked(dev);
+
mutex_unlock(&priv->state_lock);
return err;
@@ -606,7 +628,7 @@ static int mlx5e_set_settings(struct net_device *netdev,
u32 link_modes;
u32 speed;
u32 eth_proto_cap, eth_proto_admin;
- u8 port_status;
+ enum mlx5_port_status ps;
int err;
speed = ethtool_cmd_speed(cmd);
@@ -640,25 +662,197 @@ static int mlx5e_set_settings(struct net_device *netdev,
if (link_modes == eth_proto_admin)
goto out;
- err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
+ mlx5_query_port_admin_status(mdev, &ps);
+ if (ps == MLX5_PORT_UP)
+ mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
+ mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
+ if (ps == MLX5_PORT_UP)
+ mlx5_set_port_admin_status(mdev, MLX5_PORT_UP);
+
+out:
+ return err;
+}
+
+static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return sizeof(priv->params.toeplitz_hash_key);
+}
+
+static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return MLX5E_INDIR_RQT_SIZE;
+}
+
+static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ if (indir)
+ memcpy(indir, priv->params.indirection_rqt,
+ sizeof(priv->params.indirection_rqt));
+
+ if (key)
+ memcpy(key, priv->params.toeplitz_hash_key,
+ sizeof(priv->params.toeplitz_hash_key));
+
+ if (hfunc)
+ *hfunc = priv->params.rss_hfunc;
+
+ return 0;
+}
+
+static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ bool close_open;
+ int err = 0;
+
+ if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
+ (hfunc != ETH_RSS_HASH_XOR) &&
+ (hfunc != ETH_RSS_HASH_TOP))
+ return -EINVAL;
+
+ mutex_lock(&priv->state_lock);
+
+ if (indir) {
+ memcpy(priv->params.indirection_rqt, indir,
+ sizeof(priv->params.indirection_rqt));
+ mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
+ }
+
+ close_open = (key || (hfunc != ETH_RSS_HASH_NO_CHANGE)) &&
+ test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (close_open)
+ mlx5e_close_locked(dev);
+
+ if (key)
+ memcpy(priv->params.toeplitz_hash_key, key,
+ sizeof(priv->params.toeplitz_hash_key));
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE)
+ priv->params.rss_hfunc = hfunc;
+
+ if (close_open)
+ err = mlx5e_open_locked(priv->netdev);
+
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static int mlx5e_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = priv->params.num_channels;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int mlx5e_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ void *data)
+{
+ const struct mlx5e_priv *priv = netdev_priv(dev);
+ int err = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_TX_COPYBREAK:
+ *(u32 *)data = priv->params.tx_max_inline;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int mlx5e_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ bool was_opened;
+ u32 val;
+ int err = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_TX_COPYBREAK:
+ val = *(u32 *)data;
+ if (val > mlx5e_get_max_inline_cap(mdev)) {
+ err = -EINVAL;
+ break;
+ }
+
+ mutex_lock(&priv->state_lock);
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(dev);
+
+ priv->params.tx_max_inline = val;
+
+ if (was_opened)
+ err = mlx5e_open_locked(dev);
+
+ mutex_unlock(&priv->state_lock);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static void mlx5e_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+
+ err = mlx5_query_port_pause(mdev, &pauseparam->rx_pause,
+ &pauseparam->tx_pause);
if (err) {
- netdev_err(netdev, "%s: set port eth proto admin failed: %d\n",
+ netdev_err(netdev, "%s: mlx5_query_port_pause failed:0x%x\n",
__func__, err);
- goto out;
}
+}
- err = mlx5_query_port_status(mdev, &port_status);
- if (err)
- goto out;
+static int mlx5e_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
- if (port_status == MLX5_PORT_DOWN)
- return 0;
+ if (pauseparam->autoneg)
+ return -EINVAL;
+
+ err = mlx5_set_port_pause(mdev,
+ pauseparam->rx_pause ? 1 : 0,
+ pauseparam->tx_pause ? 1 : 0);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_set_port_pause failed:0x%x\n",
+ __func__, err);
+ }
- err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
- if (err)
- goto out;
- err = mlx5_set_port_status(mdev, MLX5_PORT_UP);
-out:
return err;
}
@@ -676,4 +870,13 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_coalesce = mlx5e_set_coalesce,
.get_settings = mlx5e_get_settings,
.set_settings = mlx5e_set_settings,
+ .get_rxfh_key_size = mlx5e_get_rxfh_key_size,
+ .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
+ .get_rxfh = mlx5e_get_rxfh,
+ .set_rxfh = mlx5e_set_rxfh,
+ .get_rxnfc = mlx5e_get_rxnfc,
+ .get_tunable = mlx5e_get_tunable,
+ .set_tunable = mlx5e_set_tunable,
+ .get_pauseparam = mlx5e_get_pauseparam,
+ .set_pauseparam = mlx5e_set_pauseparam,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
index 120db80c47aa..e71563ce05d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
@@ -105,25 +105,41 @@ static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
{
void *ft = priv->ft.main;
- if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
+ mlx5_del_flow_table_entry(ft,
+ ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]);
+
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
+ mlx5_del_flow_table_entry(ft,
+ ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]);
+
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
+ mlx5_del_flow_table_entry(ft,
+ ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]);
+
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
+ mlx5_del_flow_table_entry(ft,
+ ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]);
+
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
- if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
- if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
- if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
- if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
- if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
- if (ai->tt_vec & (1 << MLX5E_TT_ANY))
+ if (ai->tt_vec & BIT(MLX5E_TT_ANY))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
}
@@ -156,33 +172,37 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
switch (eth_addr_type) {
case MLX5E_UC:
ret =
- (1 << MLX5E_TT_IPV4_TCP) |
- (1 << MLX5E_TT_IPV6_TCP) |
- (1 << MLX5E_TT_IPV4_UDP) |
- (1 << MLX5E_TT_IPV6_UDP) |
- (1 << MLX5E_TT_IPV4) |
- (1 << MLX5E_TT_IPV6) |
- (1 << MLX5E_TT_ANY) |
+ BIT(MLX5E_TT_IPV4_TCP) |
+ BIT(MLX5E_TT_IPV6_TCP) |
+ BIT(MLX5E_TT_IPV4_UDP) |
+ BIT(MLX5E_TT_IPV6_UDP) |
+ BIT(MLX5E_TT_IPV4_IPSEC_AH) |
+ BIT(MLX5E_TT_IPV6_IPSEC_AH) |
+ BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
+ BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
+ BIT(MLX5E_TT_IPV4) |
+ BIT(MLX5E_TT_IPV6) |
+ BIT(MLX5E_TT_ANY) |
0;
break;
case MLX5E_MC_IPV4:
ret =
- (1 << MLX5E_TT_IPV4_UDP) |
- (1 << MLX5E_TT_IPV4) |
+ BIT(MLX5E_TT_IPV4_UDP) |
+ BIT(MLX5E_TT_IPV4) |
0;
break;
case MLX5E_MC_IPV6:
ret =
- (1 << MLX5E_TT_IPV6_UDP) |
- (1 << MLX5E_TT_IPV6) |
+ BIT(MLX5E_TT_IPV6_UDP) |
+ BIT(MLX5E_TT_IPV6) |
0;
break;
case MLX5E_MC_OTHER:
ret =
- (1 << MLX5E_TT_ANY) |
+ BIT(MLX5E_TT_ANY) |
0;
break;
}
@@ -191,23 +211,27 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
case MLX5E_ALLMULTI:
ret =
- (1 << MLX5E_TT_IPV4_UDP) |
- (1 << MLX5E_TT_IPV6_UDP) |
- (1 << MLX5E_TT_IPV4) |
- (1 << MLX5E_TT_IPV6) |
- (1 << MLX5E_TT_ANY) |
+ BIT(MLX5E_TT_IPV4_UDP) |
+ BIT(MLX5E_TT_IPV6_UDP) |
+ BIT(MLX5E_TT_IPV4) |
+ BIT(MLX5E_TT_IPV6) |
+ BIT(MLX5E_TT_ANY) |
0;
break;
default: /* MLX5E_PROMISC */
ret =
- (1 << MLX5E_TT_IPV4_TCP) |
- (1 << MLX5E_TT_IPV6_TCP) |
- (1 << MLX5E_TT_IPV4_UDP) |
- (1 << MLX5E_TT_IPV6_UDP) |
- (1 << MLX5E_TT_IPV4) |
- (1 << MLX5E_TT_IPV6) |
- (1 << MLX5E_TT_ANY) |
+ BIT(MLX5E_TT_IPV4_TCP) |
+ BIT(MLX5E_TT_IPV6_TCP) |
+ BIT(MLX5E_TT_IPV4_UDP) |
+ BIT(MLX5E_TT_IPV6_UDP) |
+ BIT(MLX5E_TT_IPV4_IPSEC_AH) |
+ BIT(MLX5E_TT_IPV6_IPSEC_AH) |
+ BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
+ BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
+ BIT(MLX5E_TT_IPV4) |
+ BIT(MLX5E_TT_IPV6) |
+ BIT(MLX5E_TT_ANY) |
0;
break;
}
@@ -226,6 +250,7 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
u8 *match_criteria_dmac;
void *ft = priv->ft.main;
u32 *tirn = priv->tirn;
+ u32 *ft_ix;
u32 tt_vec;
int err;
@@ -261,51 +286,51 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
tt_vec = mlx5e_get_tt_vec(ai, type);
- if (tt_vec & (1 << MLX5E_TT_ANY)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_ANY];
+ if (tt_vec & BIT(MLX5E_TT_ANY)) {
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_ANY]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_ANY]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_ANY);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_ANY);
}
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
outer_headers.ethertype);
- if (tt_vec & (1 << MLX5E_TT_IPV4)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV4];
+ if (tt_vec & BIT(MLX5E_TT_IPV4)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IP);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV4]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV4]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV4);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV4);
}
- if (tt_vec & (1 << MLX5E_TT_IPV6)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV6];
+ if (tt_vec & BIT(MLX5E_TT_IPV6)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IPV6);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV6]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV6]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV6);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV6);
}
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
@@ -313,70 +338,141 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
IPPROTO_UDP);
- if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP];
+ if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IP);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV4_UDP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV4_UDP]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
}
- if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP];
+ if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IPV6);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV6_UDP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV6_UDP]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
}
MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
IPPROTO_TCP);
- if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP];
+ if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IP);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV4_TCP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV4_TCP]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
}
- if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP];
+ if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IPV6);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV6_TCP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV6_TCP]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
+ }
+
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+ IPPROTO_AH);
+
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH];
+ if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4_IPSEC_AH]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
+ }
+
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH];
+ if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6_IPSEC_AH]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
+ }
+
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+ IPPROTO_ESP);
+
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP];
+ if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4_IPSEC_ESP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
+ }
+
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP];
+ if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6_IPSEC_ESP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
}
return 0;
+
+err_del_ai:
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+
+ return err;
}
static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
@@ -498,44 +594,28 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
{
- WARN_ON(!mutex_is_locked(&priv->state_lock));
+ if (!priv->vlan.filter_disabled)
+ return;
- if (priv->vlan.filter_disabled) {
- priv->vlan.filter_disabled = false;
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
- 0);
- }
+ priv->vlan.filter_disabled = false;
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
}
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
{
- WARN_ON(!mutex_is_locked(&priv->state_lock));
+ if (priv->vlan.filter_disabled)
+ return;
- if (!priv->vlan.filter_disabled) {
- priv->vlan.filter_disabled = true;
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
- 0);
- }
+ priv->vlan.filter_disabled = true;
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
}
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int err = 0;
-
- mutex_lock(&priv->state_lock);
-
- set_bit(vid, priv->vlan.active_vlans);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
- vid);
- mutex_unlock(&priv->state_lock);
-
- return err;
+ return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
}
int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -543,56 +623,11 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- mutex_lock(&priv->state_lock);
-
- clear_bit(vid, priv->vlan.active_vlans);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
-
- mutex_unlock(&priv->state_lock);
-
- return 0;
-}
-
-int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
-{
- u16 vid;
- int err;
-
- for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
- vid);
- if (err)
- return err;
- }
-
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- if (err)
- return err;
-
- if (priv->vlan.filter_disabled) {
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
- 0);
- if (err)
- return err;
- }
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
return 0;
}
-void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
-{
- u16 vid;
-
- if (priv->vlan.filter_disabled)
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
-
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
-
- for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
-}
-
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
@@ -656,18 +691,21 @@ static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
hn->action = MLX5E_ACTION_DEL;
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
mlx5e_sync_netdev_addr(priv);
mlx5e_apply_netdev_addr(priv);
}
-void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
+void mlx5e_set_rx_mode_work(struct work_struct *work)
{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ set_rx_mode_work);
+
struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
struct net_device *ndev = priv->netdev;
- bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
bool broadcast_enabled = rx_mode_enable;
@@ -700,17 +738,6 @@ void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
ea->broadcast_enabled = broadcast_enabled;
}
-void mlx5e_set_rx_mode_work(struct work_struct *work)
-{
- struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
- set_rx_mode_work);
-
- mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_set_rx_mode_core(priv);
- mutex_unlock(&priv->state_lock);
-}
-
void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
{
ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
@@ -725,7 +752,7 @@ static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
if (!g)
return -ENOMEM;
- g[0].log_sz = 2;
+ g[0].log_sz = 3;
g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
outer_headers.ethertype);
@@ -833,7 +860,7 @@ static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
mlx5_destroy_flow_table(priv->ft.vlan);
}
-int mlx5e_open_flow_table(struct mlx5e_priv *priv)
+int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
{
int err;
@@ -845,16 +872,24 @@ int mlx5e_open_flow_table(struct mlx5e_priv *priv)
if (err)
goto err_destroy_main_flow_table;
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ if (err)
+ goto err_destroy_vlan_flow_table;
+
return 0;
+err_destroy_vlan_flow_table:
+ mlx5e_destroy_vlan_flow_table(priv);
+
err_destroy_main_flow_table:
mlx5e_destroy_main_flow_table(priv);
return err;
}
-void mlx5e_close_flow_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
{
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
mlx5e_destroy_vlan_flow_table(priv);
mlx5e_destroy_main_flow_table(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 40206da1f9d7..59874d666cff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -41,6 +41,7 @@ struct mlx5e_rq_param {
struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq;
+ u16 max_inline;
};
struct mlx5e_cq_param {
@@ -81,6 +82,47 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
+static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_pport_stats *s = &priv->stats.pport;
+ u32 *in;
+ u32 *out;
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ in = mlx5_vzalloc(sz);
+ out = mlx5_vzalloc(sz);
+ if (!in || !out)
+ goto free_out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out,
+ sz, MLX5_REG_PPCNT, 0, 0);
+ memcpy(s->IEEE_802_3_counters,
+ MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+ sizeof(s->IEEE_802_3_counters));
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out,
+ sz, MLX5_REG_PPCNT, 0, 0);
+ memcpy(s->RFC_2863_counters,
+ MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+ sizeof(s->RFC_2863_counters));
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out,
+ sz, MLX5_REG_PPCNT, 0, 0);
+ memcpy(s->RFC_2819_counters,
+ MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+ sizeof(s->RFC_2819_counters));
+
+free_out:
+ kvfree(in);
+ kvfree(out);
+}
+
void mlx5e_update_stats(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -107,6 +149,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
s->lro_packets = 0;
s->lro_bytes = 0;
s->rx_csum_none = 0;
+ s->rx_csum_sw = 0;
s->rx_wqe_err = 0;
for (i = 0; i < priv->params.num_channels; i++) {
rq_stats = &priv->channel[i]->rq.stats;
@@ -114,9 +157,10 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
s->lro_packets += rq_stats->lro_packets;
s->lro_bytes += rq_stats->lro_bytes;
s->rx_csum_none += rq_stats->csum_none;
+ s->rx_csum_sw += rq_stats->csum_sw;
s->rx_wqe_err += rq_stats->wqe_err;
- for (j = 0; j < priv->num_tc; j++) {
+ for (j = 0; j < priv->params.num_tc; j++) {
sq_stats = &priv->channel[i]->sq[j].stats;
s->tso_packets += sq_stats->tso_packets;
@@ -199,8 +243,10 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
/* Update calculated offload counters */
s->tx_csum_offload = s->tx_packets - tx_offload_none;
- s->rx_csum_good = s->rx_packets - s->rx_csum_none;
+ s->rx_csum_good = s->rx_packets - s->rx_csum_none -
+ s->rx_csum_sw;
+ mlx5e_update_pport_counters(priv);
free_out:
kvfree(out);
}
@@ -272,6 +318,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
int err;
int i;
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
+
err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
&rq->wq_ctrl);
if (err)
@@ -304,6 +352,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->netdev = c->netdev;
rq->channel = c;
rq->ix = c->ix;
+ rq->priv = c->priv;
return 0;
@@ -321,8 +370,7 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
{
- struct mlx5e_channel *c = rq->channel;
- struct mlx5e_priv *priv = c->priv;
+ struct mlx5e_priv *priv = rq->priv;
struct mlx5_core_dev *mdev = priv->mdev;
void *in;
@@ -342,11 +390,11 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
memcpy(rqc, param->rqc, sizeof(param->rqc));
- MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
+ MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
- PAGE_SHIFT);
+ MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
mlx5_fill_page_array(&rq->wq_ctrl.buf,
@@ -389,11 +437,7 @@ static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
static void mlx5e_disable_rq(struct mlx5e_rq *rq)
{
- struct mlx5e_channel *c = rq->channel;
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
-
- mlx5_core_destroy_rq(mdev, rq->rqn);
+ mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
}
static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
@@ -502,6 +546,8 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
if (err)
return err;
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
+
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
&sq->wq_ctrl);
if (err)
@@ -509,7 +555,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
sq->uar_map = sq->uar.map;
+ sq->uar_bf_map = sq->uar.bf_map;
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+ sq->max_inline = param->max_inline;
err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
if (err)
@@ -518,11 +566,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
txq_ix = c->ix + tc * priv->params.num_channels;
sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
- sq->pdev = c->pdev;
- sq->mkey_be = c->mkey_be;
- sq->channel = c;
- sq->tc = tc;
- sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+ sq->pdev = c->pdev;
+ sq->mkey_be = c->mkey_be;
+ sq->channel = c;
+ sq->tc = tc;
+ sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+ sq->bf_budget = MLX5E_SQ_BF_BUDGET;
priv->txq_to_sq_map[txq_ix] = sq;
return 0;
@@ -569,7 +618,6 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
memcpy(sqc, param->sqc, sizeof(param->sqc));
- MLX5_SET(sqc, sqc, user_index, sq->tc);
MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]);
MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
@@ -579,7 +627,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, uar_page, sq->uar.index);
MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
- PAGE_SHIFT);
+ MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
mlx5_fill_page_array(&sq->wq_ctrl.buf,
@@ -702,7 +750,8 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
int err;
u32 i;
- param->wq.numa = cpu_to_node(c->cpu);
+ param->wq.buf_numa_node = cpu_to_node(c->cpu);
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
param->eq_ix = c->ix;
err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
@@ -732,6 +781,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
}
cq->channel = c;
+ cq->priv = priv;
return 0;
}
@@ -743,8 +793,7 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
{
- struct mlx5e_channel *c = cq->channel;
- struct mlx5e_priv *priv = c->priv;
+ struct mlx5e_priv *priv = cq->priv;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
@@ -773,7 +822,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
- PAGE_SHIFT);
+ MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
err = mlx5_core_create_cq(mdev, mcq, in, inlen);
@@ -790,8 +839,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
static void mlx5e_disable_cq(struct mlx5e_cq *cq)
{
- struct mlx5e_channel *c = cq->channel;
- struct mlx5e_priv *priv = c->priv;
+ struct mlx5e_priv *priv = cq->priv;
struct mlx5_core_dev *mdev = priv->mdev;
mlx5_core_destroy_cq(mdev, &cq->mcq);
@@ -901,13 +949,13 @@ static void mlx5e_close_sqs(struct mlx5e_channel *c)
mlx5e_close_sq(&c->sq[tc]);
}
-static void mlx5e_build_tc_to_txq_map(struct mlx5e_channel *c,
- int num_channels)
+static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
{
int i;
for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
- c->tc_to_txq_map[i] = c->ix + i * num_channels;
+ priv->channeltc_to_txq_map[ix][i] =
+ ix + i * priv->params.num_channels;
}
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
@@ -929,9 +977,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->pdev = &priv->mdev->pdev->dev;
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mr.key);
- c->num_tc = priv->num_tc;
+ c->num_tc = priv->params.num_tc;
- mlx5e_build_tc_to_txq_map(c, priv->params.num_channels);
+ mlx5e_build_channeltc_to_txq_map(priv, ix);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
@@ -1000,7 +1048,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
MLX5_SET(wq, wq, pd, priv->pdn);
- param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+ param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
param->wq.linear = 1;
}
@@ -1014,7 +1062,8 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, pd, priv->pdn);
- param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+ param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+ param->max_inline = priv->params.tx_max_inline;
}
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -1059,27 +1108,28 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
static int mlx5e_open_channels(struct mlx5e_priv *priv)
{
struct mlx5e_channel_param cparam;
+ int nch = priv->params.num_channels;
int err = -ENOMEM;
int i;
int j;
- priv->channel = kcalloc(priv->params.num_channels,
- sizeof(struct mlx5e_channel *), GFP_KERNEL);
+ priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
+ GFP_KERNEL);
- priv->txq_to_sq_map = kcalloc(priv->params.num_channels * priv->num_tc,
+ priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
sizeof(struct mlx5e_sq *), GFP_KERNEL);
if (!priv->channel || !priv->txq_to_sq_map)
goto err_free_txq_to_sq_map;
mlx5e_build_channel_param(priv, &cparam);
- for (i = 0; i < priv->params.num_channels; i++) {
+ for (i = 0; i < nch; i++) {
err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
if (err)
goto err_close_channels;
}
- for (j = 0; j < priv->params.num_channels; j++) {
+ for (j = 0; j < nch; j++) {
err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
if (err)
goto err_close_channels;
@@ -1109,67 +1159,73 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv)
kfree(priv->channel);
}
-static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
+static int mlx5e_rx_hash_fn(int hfunc)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 in[MLX5_ST_SZ_DW(create_tis_in)];
- void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+ return (hfunc == ETH_RSS_HASH_TOP) ?
+ MLX5_RX_HASH_FN_TOEPLITZ :
+ MLX5_RX_HASH_FN_INVERTED_XOR8;
+}
- memset(in, 0, sizeof(in));
+static int mlx5e_bits_invert(unsigned long a, int size)
+{
+ int inv = 0;
+ int i;
- MLX5_SET(tisc, tisc, prio, tc);
- MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
+ for (i = 0; i < size; i++)
+ inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
- return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+ return inv;
}
-static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
+static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
{
- mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
-}
+ int i;
-static int mlx5e_open_tises(struct mlx5e_priv *priv)
-{
- int num_tc = priv->num_tc;
- int err;
- int tc;
+ for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
+ int ix = i;
- for (tc = 0; tc < num_tc; tc++) {
- err = mlx5e_open_tis(priv, tc);
- if (err)
- goto err_close_tises;
+ if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
+ ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
+
+ ix = priv->params.indirection_rqt[ix];
+ ix = ix % priv->params.num_channels;
+ MLX5_SET(rqtc, rqtc, rq_num[i],
+ test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+ priv->channel[ix]->rq.rqn :
+ priv->drop_rq.rqn);
}
+}
- return 0;
+static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
+ enum mlx5e_rqt_ix rqt_ix)
+{
-err_close_tises:
- for (tc--; tc >= 0; tc--)
- mlx5e_close_tis(priv, tc);
+ switch (rqt_ix) {
+ case MLX5E_INDIRECTION_RQT:
+ mlx5e_fill_indir_rqt_rqns(priv, rqtc);
- return err;
-}
+ break;
-static void mlx5e_close_tises(struct mlx5e_priv *priv)
-{
- int num_tc = priv->num_tc;
- int tc;
+ default: /* MLX5E_SINGLE_RQ_RQT */
+ MLX5_SET(rqtc, rqtc, rq_num[0],
+ test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+ priv->channel[0]->rq.rqn :
+ priv->drop_rq.rqn);
- for (tc = 0; tc < num_tc; tc++)
- mlx5e_close_tis(priv, tc);
+ break;
+ }
}
-static int mlx5e_open_rqt(struct mlx5e_priv *priv)
+static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 *in;
- u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
void *rqtc;
int inlen;
- int err;
int sz;
- int i;
+ int err;
- sz = 1 << priv->params.rx_hash_log_tbl_sz;
+ sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
@@ -1181,198 +1237,101 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv)
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
- for (i = 0; i < sz; i++) {
- int ix = i % priv->params.num_channels;
-
- MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
- }
-
- MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+ mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
- if (!err)
- priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+ err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
kvfree(in);
return err;
}
-static void mlx5e_close_rqt(struct mlx5e_priv *priv)
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ void *rqtc;
+ int inlen;
+ int sz;
+ int err;
- memset(in, 0, sizeof(in));
+ sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
- MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
- MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
+ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
- mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
- sizeof(out));
-}
+ rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
-static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
-{
- void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
- MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+ mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
-#define ROUGH_MAX_L2_L3_HDR_SZ 256
+ MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
-#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP)
-
-#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP |\
- MLX5_HASH_FIELD_SEL_L4_SPORT |\
- MLX5_HASH_FIELD_SEL_L4_DPORT)
-
- if (priv->params.lro_en) {
- MLX5_SET(tirc, tirc, lro_enable_mask,
- MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
- MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
- MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
- (priv->params.lro_wqe_sz -
- ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
- MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
- MLX5_CAP_ETH(priv->mdev,
- lro_timer_supported_periods[3]));
- }
+ err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
- switch (tt) {
- case MLX5E_TT_ANY:
- MLX5_SET(tirc, tirc, disp_type,
- MLX5_TIRC_DISP_TYPE_DIRECT);
- MLX5_SET(tirc, tirc, inline_rqn,
- priv->channel[0]->rq.rqn);
- break;
- default:
- MLX5_SET(tirc, tirc, disp_type,
- MLX5_TIRC_DISP_TYPE_INDIRECT);
- MLX5_SET(tirc, tirc, indirect_table,
- priv->rqtn);
- MLX5_SET(tirc, tirc, rx_hash_fn,
- MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
- netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc,
- rx_hash_toeplitz_key),
- MLX5_FLD_SZ_BYTES(tirc,
- rx_hash_toeplitz_key));
- break;
- }
+ kvfree(in);
- switch (tt) {
- case MLX5E_TT_IPV4_TCP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_TCP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_ALL);
- break;
+ return err;
+}
- case MLX5E_TT_IPV6_TCP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_TCP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_ALL);
- break;
+static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
+{
+ mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
+}
- case MLX5E_TT_IPV4_UDP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_UDP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_ALL);
- break;
+static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
+{
+ mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
+ mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+}
- case MLX5E_TT_IPV6_UDP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_UDP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_ALL);
- break;
+static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
+{
+ if (!priv->params.lro_en)
+ return;
- case MLX5E_TT_IPV4:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP);
- break;
+#define ROUGH_MAX_L2_L3_HDR_SZ 256
- case MLX5E_TT_IPV6:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP);
- break;
- }
+ MLX5_SET(tirc, tirc, lro_enable_mask,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+ MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+ (priv->params.lro_wqe_sz -
+ ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
+ MLX5_CAP_ETH(priv->mdev,
+ lro_timer_supported_periods[2]));
}
-static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
+static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u32 *in;
+
+ void *in;
void *tirc;
int inlen;
int err;
- inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
+ tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
- mlx5e_build_tir_ctx(priv, tirc, tt);
+ mlx5e_build_tir_ctx_lro(tirc, priv);
- err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+ err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
kvfree(in);
return err;
}
-static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
-{
- mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
-}
-
-static int mlx5e_open_tirs(struct mlx5e_priv *priv)
-{
- int err;
- int i;
-
- for (i = 0; i < MLX5E_NUM_TT; i++) {
- err = mlx5e_open_tir(priv, i);
- if (err)
- goto err_close_tirs;
- }
-
- return 0;
-
-err_close_tirs:
- for (i--; i >= 0; i--)
- mlx5e_close_tir(priv, i);
-
- return err;
-}
-
-static void mlx5e_close_tirs(struct mlx5e_priv *priv)
-{
- int i;
-
- for (i = 0; i < MLX5E_NUM_TT; i++)
- mlx5e_close_tir(priv, i);
-}
-
static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -1400,6 +1359,8 @@ int mlx5e_open_locked(struct net_device *netdev)
int num_txqs;
int err;
+ set_bit(MLX5E_STATE_OPENED, &priv->state);
+
num_txqs = priv->params.num_channels * priv->params.num_tc;
netif_set_real_num_tx_queues(netdev, num_txqs);
netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
@@ -1408,74 +1369,19 @@ int mlx5e_open_locked(struct net_device *netdev)
if (err)
return err;
- err = mlx5e_open_tises(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n",
- __func__, err);
- return err;
- }
-
err = mlx5e_open_channels(priv);
if (err) {
netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
__func__, err);
- goto err_close_tises;
- }
-
- err = mlx5e_open_rqt(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n",
- __func__, err);
- goto err_close_channels;
- }
-
- err = mlx5e_open_tirs(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n",
- __func__, err);
- goto err_close_rqls;
- }
-
- err = mlx5e_open_flow_table(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n",
- __func__, err);
- goto err_close_tirs;
- }
-
- err = mlx5e_add_all_vlan_rules(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
- __func__, err);
- goto err_close_flow_table;
+ return err;
}
- mlx5e_init_eth_addr(priv);
-
- set_bit(MLX5E_STATE_OPENED, &priv->state);
-
mlx5e_update_carrier(priv);
- mlx5e_set_rx_mode_core(priv);
+ mlx5e_redirect_rqts(priv);
schedule_delayed_work(&priv->update_stats_work, 0);
- return 0;
-
-err_close_flow_table:
- mlx5e_close_flow_table(priv);
-
-err_close_tirs:
- mlx5e_close_tirs(priv);
-
-err_close_rqls:
- mlx5e_close_rqt(priv);
-err_close_channels:
- mlx5e_close_channels(priv);
-
-err_close_tises:
- mlx5e_close_tises(priv);
-
- return err;
+ return 0;
}
static int mlx5e_open(struct net_device *netdev)
@@ -1496,14 +1402,9 @@ int mlx5e_close_locked(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
- mlx5e_set_rx_mode_core(priv);
- mlx5e_del_all_vlan_rules(priv);
+ mlx5e_redirect_rqts(priv);
netif_carrier_off(priv->netdev);
- mlx5e_close_flow_table(priv);
- mlx5e_close_tirs(priv);
- mlx5e_close_rqt(priv);
mlx5e_close_channels(priv);
- mlx5e_close_tises(priv);
return 0;
}
@@ -1520,26 +1421,341 @@ static int mlx5e_close(struct net_device *netdev)
return err;
}
-int mlx5e_update_priv_params(struct mlx5e_priv *priv,
- struct mlx5e_params *new_params)
+static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
+ struct mlx5e_rq *rq,
+ struct mlx5e_rq_param *param)
{
- int err = 0;
- int was_opened;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ void *rqc = param->rqc;
+ void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ int err;
- WARN_ON(!mutex_is_locked(&priv->state_lock));
+ param->wq.db_numa_node = param->wq.buf_numa_node;
- was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened)
- mlx5e_close_locked(priv->netdev);
+ err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
+ &rq->wq_ctrl);
+ if (err)
+ return err;
- priv->params = *new_params;
+ rq->priv = priv;
- if (was_opened)
- err = mlx5e_open_locked(priv->netdev);
+ return 0;
+}
+
+static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
+ struct mlx5e_cq *cq,
+ struct mlx5e_cq_param *param)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ int eqn_not_used;
+ int irqn;
+ int err;
+
+ err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
+ &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+ *mcq->set_ci_db = 0;
+ *mcq->arm_db = 0;
+ mcq->vector = param->eq_ix;
+ mcq->comp = mlx5e_completion_event;
+ mcq->event = mlx5e_cq_error_event;
+ mcq->irqn = irqn;
+ mcq->uar = &priv->cq_uar;
+
+ cq->priv = priv;
+
+ return 0;
+}
+
+static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
+{
+ struct mlx5e_cq_param cq_param;
+ struct mlx5e_rq_param rq_param;
+ struct mlx5e_rq *rq = &priv->drop_rq;
+ struct mlx5e_cq *cq = &priv->drop_rq.cq;
+ int err;
+
+ memset(&cq_param, 0, sizeof(cq_param));
+ memset(&rq_param, 0, sizeof(rq_param));
+ mlx5e_build_rx_cq_param(priv, &cq_param);
+ mlx5e_build_rq_param(priv, &rq_param);
+
+ err = mlx5e_create_drop_cq(priv, cq, &cq_param);
+ if (err)
+ return err;
+
+ err = mlx5e_enable_cq(cq, &cq_param);
+ if (err)
+ goto err_destroy_cq;
+
+ err = mlx5e_create_drop_rq(priv, rq, &rq_param);
+ if (err)
+ goto err_disable_cq;
+
+ err = mlx5e_enable_rq(rq, &rq_param);
+ if (err)
+ goto err_destroy_rq;
+
+ return 0;
+
+err_destroy_rq:
+ mlx5e_destroy_rq(&priv->drop_rq);
+
+err_disable_cq:
+ mlx5e_disable_cq(&priv->drop_rq.cq);
+
+err_destroy_cq:
+ mlx5e_destroy_cq(&priv->drop_rq.cq);
+
+ return err;
+}
+
+static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
+{
+ mlx5e_disable_rq(&priv->drop_rq);
+ mlx5e_destroy_rq(&priv->drop_rq);
+ mlx5e_disable_cq(&priv->drop_rq.cq);
+ mlx5e_destroy_cq(&priv->drop_rq.cq);
+}
+
+static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+ void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(tisc, tisc, prio, tc);
+ MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
+
+ return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+}
+
+static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
+{
+ mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+}
+
+static int mlx5e_create_tises(struct mlx5e_priv *priv)
+{
+ int err;
+ int tc;
+
+ for (tc = 0; tc < priv->params.num_tc; tc++) {
+ err = mlx5e_create_tis(priv, tc);
+ if (err)
+ goto err_close_tises;
+ }
+
+ return 0;
+
+err_close_tises:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_destroy_tis(priv, tc);
+
+ return err;
+}
+
+static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
+{
+ int tc;
+
+ for (tc = 0; tc < priv->params.num_tc; tc++)
+ mlx5e_destroy_tis(priv, tc);
+}
+
+static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
+{
+ void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+ MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+
+#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_L4_SPORT |\
+ MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+
+ mlx5e_build_tir_ctx_lro(tirc, priv);
+
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+
+ switch (tt) {
+ case MLX5E_TT_ANY:
+ MLX5_SET(tirc, tirc, indirect_table,
+ priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
+ break;
+ default:
+ MLX5_SET(tirc, tirc, indirect_table,
+ priv->rqtn[MLX5E_INDIRECTION_RQT]);
+ MLX5_SET(tirc, tirc, rx_hash_fn,
+ mlx5e_rx_hash_fn(priv->params.rss_hfunc));
+ if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
+ void *rss_key = MLX5_ADDR_OF(tirc, tirc,
+ rx_hash_toeplitz_key);
+ size_t len = MLX5_FLD_SZ_BYTES(tirc,
+ rx_hash_toeplitz_key);
+
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ memcpy(rss_key, priv->params.toeplitz_hash_key, len);
+ }
+ break;
+ }
+
+ switch (tt) {
+ case MLX5E_TT_IPV4_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV6_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV4_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV6_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV4_IPSEC_AH:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV6_IPSEC_AH:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV4_IPSEC_ESP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV6_IPSEC_ESP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV4:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+
+ case MLX5E_TT_IPV6:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+ }
+}
+
+static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ void *tirc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+ mlx5e_build_tir_ctx(priv, tirc, tt);
+
+ err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
+{
+ mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
+}
+
+static int mlx5e_create_tirs(struct mlx5e_priv *priv)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++) {
+ err = mlx5e_create_tir(priv, i);
+ if (err)
+ goto err_destroy_tirs;
+ }
+
+ return 0;
+
+err_destroy_tirs:
+ for (i--; i >= 0; i--)
+ mlx5e_destroy_tir(priv, i);
return err;
}
+static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++)
+ mlx5e_destroy_tir(priv, i);
+}
+
static struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
@@ -1589,20 +1805,26 @@ static int mlx5e_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err = 0;
netdev_features_t changes = features ^ netdev->features;
- struct mlx5e_params new_params;
- bool update_params = false;
mutex_lock(&priv->state_lock);
- new_params = priv->params;
if (changes & NETIF_F_LRO) {
- new_params.lro_en = !!(features & NETIF_F_LRO);
- update_params = true;
+ bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ if (was_opened)
+ mlx5e_close_locked(priv->netdev);
+
+ priv->params.lro_en = !!(features & NETIF_F_LRO);
+ mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP);
+ mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP);
+
+ if (was_opened)
+ err = mlx5e_open_locked(priv->netdev);
}
- if (update_params)
- mlx5e_update_priv_params(priv, &new_params);
+ mutex_unlock(&priv->state_lock);
if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
@@ -1611,8 +1833,6 @@ static int mlx5e_set_features(struct net_device *netdev,
mlx5e_disable_vlan_filter(priv);
}
- mutex_unlock(&priv->state_lock);
-
return 0;
}
@@ -1620,8 +1840,9 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ bool was_opened;
int max_mtu;
- int err;
+ int err = 0;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
@@ -1633,8 +1854,16 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
}
mutex_lock(&priv->state_lock);
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(netdev);
+
netdev->mtu = new_mtu;
- err = mlx5e_update_priv_params(priv, &priv->params);
+
+ if (was_opened)
+ err = mlx5e_open_locked(netdev);
+
mutex_unlock(&priv->state_lock);
return err;
@@ -1673,11 +1902,21 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
return 0;
}
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
+{
+ int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+ return bf_buf_size -
+ sizeof(struct mlx5e_tx_wqe) +
+ 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
+}
+
static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
- int num_comp_vectors)
+ int num_channels)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ int i;
priv->params.log_sq_size =
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
@@ -1691,24 +1930,25 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
priv->params.tx_cq_moderation_pkts =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
priv->params.min_rx_wqes =
MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
- priv->params.rx_hash_log_tbl_sz =
- (order_base_2(num_comp_vectors) >
- MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
- order_base_2(num_comp_vectors) :
- MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
priv->params.num_tc = 1;
priv->params.default_vlan_prio = 0;
+ priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
+
+ netdev_rss_key_fill(priv->params.toeplitz_hash_key,
+ sizeof(priv->params.toeplitz_hash_key));
+
+ for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
+ priv->params.indirection_rqt[i] = i % num_channels;
- priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
priv->params.lro_wqe_sz =
MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
priv->mdev = mdev;
priv->netdev = netdev;
- priv->params.num_channels = num_comp_vectors;
- priv->num_tc = priv->params.num_tc;
+ priv->params.num_channels = num_channels;
priv->default_vlan_prio = priv->params.default_vlan_prio;
spin_lock_init(&priv->async_events_spinlock);
@@ -1733,9 +1973,8 @@ static void mlx5e_build_netdev(struct net_device *netdev)
SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
- if (priv->num_tc > 1) {
+ if (priv->params.num_tc > 1)
mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
- }
netdev->netdev_ops = &mlx5e_netdev_ops;
netdev->watchdog_timeo = 15 * HZ;
@@ -1798,19 +2037,20 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
{
struct net_device *netdev;
struct mlx5e_priv *priv;
- int ncv = mdev->priv.eq_table.num_comp_vectors;
+ int nch = min_t(int, mdev->priv.eq_table.num_comp_vectors,
+ MLX5E_MAX_NUM_CHANNELS);
int err;
if (mlx5e_check_required_hca_cap(mdev))
return NULL;
- netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), ncv, ncv);
+ netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch);
if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
return NULL;
}
- mlx5e_build_netdev_priv(mdev, netdev, ncv);
+ mlx5e_build_netdev_priv(mdev, netdev, nch);
mlx5e_build_netdev(netdev);
netif_carrier_off(netdev);
@@ -1819,43 +2059,95 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
if (err) {
- netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n",
- __func__, err);
+ mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
goto err_free_netdev;
}
err = mlx5_core_alloc_pd(mdev, &priv->pdn);
if (err) {
- netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n",
- __func__, err);
+ mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
goto err_unmap_free_uar;
}
err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
if (err) {
- netdev_err(netdev, "%s: mlx5_alloc_transport_domain failed, %d\n",
- __func__, err);
+ mlx5_core_err(mdev, "alloc td failed, %d\n", err);
goto err_dealloc_pd;
}
err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
if (err) {
- netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n",
- __func__, err);
+ mlx5_core_err(mdev, "create mkey failed, %d\n", err);
goto err_dealloc_transport_domain;
}
- err = register_netdev(netdev);
+ err = mlx5e_create_tises(priv);
if (err) {
- netdev_err(netdev, "%s: register_netdev failed, %d\n",
- __func__, err);
+ mlx5_core_warn(mdev, "create tises failed, %d\n", err);
goto err_destroy_mkey;
}
+ err = mlx5e_open_drop_rq(priv);
+ if (err) {
+ mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
+ goto err_destroy_tises;
+ }
+
+ err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
+ if (err) {
+ mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
+ goto err_close_drop_rq;
+ }
+
+ err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+ if (err) {
+ mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
+ goto err_destroy_rqt_indir;
+ }
+
+ err = mlx5e_create_tirs(priv);
+ if (err) {
+ mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
+ goto err_destroy_rqt_single;
+ }
+
+ err = mlx5e_create_flow_tables(priv);
+ if (err) {
+ mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
+ goto err_destroy_tirs;
+ }
+
+ mlx5e_init_eth_addr(priv);
+
+ err = register_netdev(netdev);
+ if (err) {
+ mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
+ goto err_destroy_flow_tables;
+ }
+
mlx5e_enable_async_events(priv);
+ schedule_work(&priv->set_rx_mode_work);
return priv;
+err_destroy_flow_tables:
+ mlx5e_destroy_flow_tables(priv);
+
+err_destroy_tirs:
+ mlx5e_destroy_tirs(priv);
+
+err_destroy_rqt_single:
+ mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+
+err_destroy_rqt_indir:
+ mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+
+err_close_drop_rq:
+ mlx5e_close_drop_rq(priv);
+
+err_destroy_tises:
+ mlx5e_destroy_tises(priv);
+
err_destroy_mkey:
mlx5_core_destroy_mkey(mdev, &priv->mr);
@@ -1879,13 +2171,22 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
struct mlx5e_priv *priv = vpriv;
struct net_device *netdev = priv->netdev;
+ set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+
+ schedule_work(&priv->set_rx_mode_work);
+ mlx5e_disable_async_events(priv);
+ flush_scheduled_work();
unregister_netdev(netdev);
+ mlx5e_destroy_flow_tables(priv);
+ mlx5e_destroy_tirs(priv);
+ mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+ mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+ mlx5e_close_drop_rq(priv);
+ mlx5e_destroy_tises(priv);
mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
- mlx5e_disable_async_events(priv);
- flush_scheduled_work();
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 9a9374131f5b..cf0098596e85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -111,10 +111,12 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
sizeof(struct iphdr));
ipv6 = NULL;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
} else {
tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
sizeof(struct ipv6hdr));
ipv4 = NULL;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
}
if (get_cqe_lro_tcppsh(cqe))
@@ -149,6 +151,38 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
}
+static inline bool is_first_ethertype_ip(struct sk_buff *skb)
+{
+ __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto;
+
+ return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
+}
+
+static inline void mlx5e_handle_csum(struct net_device *netdev,
+ struct mlx5_cqe64 *cqe,
+ struct mlx5e_rq *rq,
+ struct sk_buff *skb)
+{
+ if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
+ goto csum_none;
+
+ if (likely(cqe->hds_ip_ext & CQE_L4_OK)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if (is_first_ethertype_ip(skb)) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+ rq->stats.csum_sw++;
+ } else {
+ goto csum_none;
+ }
+
+ return;
+
+csum_none:
+ skb->ip_summed = CHECKSUM_NONE;
+ rq->stats.csum_none++;
+}
+
static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
struct sk_buff *skb)
@@ -162,20 +196,12 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe);
- skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
rq->stats.lro_packets++;
rq->stats.lro_bytes += cqe_bcnt;
}
- if (likely(netdev->features & NETIF_F_RXCSUM) &&
- (cqe->hds_ip_ext & CQE_L2_OK) &&
- (cqe->hds_ip_ext & CQE_L3_OK) &&
- (cqe->hds_ip_ext & CQE_L4_OK)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- skb->ip_summed = CHECKSUM_NONE;
- rq->stats.csum_none++;
- }
+ mlx5e_handle_csum(netdev, cqe, rq, skb);
skb->protocol = eth_type_trans(skb, netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 03f28f438e55..b73672f32e2c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -57,7 +57,7 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
if (notify_hw) {
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- mlx5e_tx_notify_hw(sq, wqe);
+ mlx5e_tx_notify_hw(sq, wqe, 0);
}
}
@@ -106,13 +106,21 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
priv->default_vlan_prio;
int tc = netdev_get_prio_tc_map(dev, up);
- return priv->channel[channel_ix]->tc_to_txq_map[tc];
+ return priv->channeltc_to_txq_map[channel_ix][tc];
}
static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool bf)
{
-#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
+ /* Some NIC TX decisions, e.g loopback, are based on the packet
+ * headers and occur before the data gather.
+ * Therefore these headers must be copied into the WQE
+ */
+#define MLX5E_MIN_INLINE (ETH_HLEN + 2/*vlan tag*/)
+
+ if (bf && (skb_headlen(skb) <= sq->max_inline))
+ return skb_headlen(skb);
+
return MLX5E_MIN_INLINE;
}
@@ -129,6 +137,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
u8 opcode = MLX5_OPCODE_SEND;
dma_addr_t dma_addr = 0;
+ bool bf = false;
u16 headlen;
u16 ds_cnt;
u16 ihs;
@@ -141,6 +150,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
else
sq->stats.csum_offload_none++;
+ if (sq->cc != sq->prev_cc) {
+ sq->prev_cc = sq->cc;
+ sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
+ }
+
if (skb_is_gso(skb)) {
u32 payload_len;
@@ -153,7 +167,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.tso_packets++;
sq->stats.tso_bytes += payload_len;
} else {
- ihs = mlx5e_get_inline_hdr_size(sq, skb);
+ bf = sq->bf_budget &&
+ !skb->xmit_more &&
+ !skb_shinfo(skb)->nr_frags;
+ ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
ETH_ZLEN);
}
@@ -225,14 +242,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
}
if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
+ int bf_sz = 0;
+
+ if (bf && sq->uar_bf_map)
+ bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3;
+
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- mlx5e_tx_notify_hw(sq, wqe);
+ mlx5e_tx_notify_hw(sq, wqe, bf_sz);
}
/* fill sq edge with nops to avoid wqe wrap around */
while ((sq->pc & wq->sz_m1) > sq->edge)
mlx5e_send_nop(sq, false);
+ sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
+
sq->stats.packets++;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 9335e5ae18cc..aa0d5ffe92d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -200,3 +200,25 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
return err;
}
+
+int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey)
+{
+ struct mlx5_cmd_query_special_contexts_mbox_in in;
+ struct mlx5_cmd_query_special_contexts_mbox_out out;
+ int err;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (err)
+ return err;
+
+ if (out.hdr.status)
+ err = mlx5_cmd_status_to_err(&out.hdr);
+
+ *rsvd_lkey = be32_to_cpu(out.resd_lkey);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_core_query_special_context);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index afad529838de..03aabdd79abe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -391,6 +391,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
/* disable cmdif checksum */
MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
+ MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
+
err = set_caps(dev, set_ctx, set_sz);
query_ex:
@@ -455,7 +457,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
struct mlx5_priv *priv = &mdev->priv;
struct msix_entry *msix = priv->msix_arr;
int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
- int numa_node = dev_to_node(&mdev->pdev->dev);
+ int numa_node = priv->numa_node;
int err;
if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
@@ -654,6 +656,22 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
}
#endif
+static int map_bf_area(struct mlx5_core_dev *dev)
+{
+ resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
+ resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
+
+ dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
+
+ return dev->priv.bf_mapping ? 0 : -ENOMEM;
+}
+
+static void unmap_bf_area(struct mlx5_core_dev *dev)
+{
+ if (dev->priv.bf_mapping)
+ io_mapping_free(dev->priv.bf_mapping);
+}
+
static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
{
struct mlx5_priv *priv = &dev->priv;
@@ -668,6 +686,10 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
INIT_LIST_HEAD(&priv->pgdir_list);
spin_lock_init(&priv->mkey_lock);
+ mutex_init(&priv->alloc_mutex);
+
+ priv->numa_node = dev_to_node(&dev->pdev->dev);
+
priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
if (!priv->dbg_root)
return -ENOMEM;
@@ -804,10 +826,13 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_stop_eqs;
}
+ if (map_bf_area(dev))
+ dev_err(&pdev->dev, "Failed to map blue flame area\n");
+
err = mlx5_irq_set_affinity_hints(dev);
if (err) {
dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
- goto err_free_comp_eqs;
+ goto err_unmap_bf_area;
}
MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
@@ -819,7 +844,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
return 0;
-err_free_comp_eqs:
+err_unmap_bf_area:
+ unmap_bf_area(dev);
+
free_comp_eqs(dev);
err_stop_eqs:
@@ -877,6 +904,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
mlx5_irq_clear_affinity_hints(dev);
+ unmap_bf_area(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index fc88ecaecb4b..566a70488db1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -73,7 +73,12 @@ static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
int in_size, u32 *out,
int out_size)
{
- mlx5_cmd_exec(dev, in, in_size, out, out_size);
+ int err;
+
+ err = mlx5_cmd_exec(dev, in, in_size, out, out_size);
+ if (err)
+ return err;
+
return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 70147999f657..821caaab9bfb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -216,22 +216,25 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
}
EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
-int mlx5_set_port_status(struct mlx5_core_dev *dev,
- enum mlx5_port_status status)
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status)
{
u32 in[MLX5_ST_SZ_DW(paos_reg)];
u32 out[MLX5_ST_SZ_DW(paos_reg)];
memset(in, 0, sizeof(in));
+ MLX5_SET(paos_reg, in, local_port, 1);
MLX5_SET(paos_reg, in, admin_status, status);
MLX5_SET(paos_reg, in, ase, 1);
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PAOS, 0, 1);
}
+EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
-int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status)
{
u32 in[MLX5_ST_SZ_DW(paos_reg)];
u32 out[MLX5_ST_SZ_DW(paos_reg)];
@@ -239,14 +242,17 @@ int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
memset(in, 0, sizeof(in));
+ MLX5_SET(paos_reg, in, local_port, 1);
+
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PAOS, 0, 0);
if (err)
return err;
- *status = MLX5_GET(paos_reg, out, oper_status);
+ *status = MLX5_GET(paos_reg, out, admin_status);
return err;
}
+EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
int *max_mtu, int *oper_mtu, u8 port)
@@ -328,3 +334,45 @@ int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
+
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
+{
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(pfcc_reg, in, local_port, 1);
+ MLX5_SET(pfcc_reg, in, pptx, tx_pause);
+ MLX5_SET(pfcc_reg, in, pprx, rx_pause);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 1);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
+
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+ u32 *rx_pause, u32 *tx_pause)
+{
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(pfcc_reg, in, local_port, 1);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 0);
+ if (err)
+ return err;
+
+ if (rx_pause)
+ *rx_pause = MLX5_GET(pfcc_reg, out, pprx);
+
+ if (tx_pause)
+ *tx_pause = MLX5_GET(pfcc_reg, out, pptx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 8d98b03026d5..b4c87c7b0cf0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -163,6 +163,18 @@ int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
return err;
}
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
+ int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_tir_out)];
+
+ MLX5_SET(modify_tir_in, in, tirn, tirn);
+ MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
{
u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
@@ -358,3 +370,44 @@ int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
}
+
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rqtn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+ int err;
+
+ MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+ return err;
+}
+
+int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
+ int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_rqt_out)];
+
+ MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
+ MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
index f9ef244710d5..74cae51436e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
@@ -45,6 +45,8 @@ int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn);
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
+ int inlen);
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tisn);
@@ -61,4 +63,10 @@ int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rqtn);
+int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
+ int inlen);
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
+
#endif /* __TRANSOBJ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 9ef85873ceea..eb05c845ece9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
@@ -246,6 +247,10 @@ int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
goto err_free_uar;
}
+ if (mdev->priv.bf_mapping)
+ uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping,
+ uar->index << PAGE_SHIFT);
+
return 0;
err_free_uar:
@@ -257,6 +262,7 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
{
+ io_mapping_unmap(uar->bf_map);
iounmap(uar->map);
mlx5_cmd_free_uar(mdev, uar->index);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 8388411582cf..ce21ee5b2357 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -73,13 +73,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
- err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
return err;
}
- err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf);
+ err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
+ &wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
goto err_db_free;
@@ -108,13 +109,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
wq->sz_m1 = (1 << wq->log_sz) - 1;
- err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
return err;
}
- err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf);
+ err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
+ &wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
goto err_db_free;
@@ -144,7 +146,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
- err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index e0ddd69fb429..6c2a8f95093c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -37,7 +37,8 @@
struct mlx5_wq_param {
int linear;
- int numa;
+ int buf_numa_node;
+ int db_numa_node;
};
struct mlx5_wq_ctrl {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
new file mode 100644
index 000000000000..2941d9c5ae48
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -0,0 +1,32 @@
+#
+# Mellanox switch drivers configuration
+#
+
+config MLXSW_CORE
+ tristate "Mellanox Technologies Switch ASICs support"
+ ---help---
+ This driver supports Mellanox Technologies Switch ASICs family.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_core.
+
+config MLXSW_PCI
+ tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
+ depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE
+ default m
+ ---help---
+ This is PCI bus implementation for Mellanox Technologies Switch ASICs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_pci.
+
+config MLXSW_SWITCHX2
+ tristate "Mellanox Technologies SwitchX-2 support"
+ depends on MLXSW_CORE && NET_SWITCHDEV
+ default m
+ ---help---
+ This driver supports Mellanox Technologies SwitchX-2 Ethernet
+ Switch ASICs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_switchx2.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
new file mode 100644
index 000000000000..0a05f65ee814
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o
+mlxsw_core-objs := core.o
+obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o
+mlxsw_pci-objs := pci.o
+obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o
+mlxsw_switchx2-objs := switchx2.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
new file mode 100644
index 000000000000..770db17eb03f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -0,0 +1,1090 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/cmd.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CMD_H
+#define _MLXSW_CMD_H
+
+#include "item.h"
+
+#define MLXSW_CMD_MBOX_SIZE 4096
+
+static inline char *mlxsw_cmd_mbox_alloc(void)
+{
+ return kzalloc(MLXSW_CMD_MBOX_SIZE, GFP_KERNEL);
+}
+
+static inline void mlxsw_cmd_mbox_free(char *mbox)
+{
+ kfree(mbox);
+}
+
+static inline void mlxsw_cmd_mbox_zero(char *mbox)
+{
+ memset(mbox, 0, MLXSW_CMD_MBOX_SIZE);
+}
+
+struct mlxsw_core;
+
+int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size);
+
+static inline int mlxsw_cmd_exec_in(struct mlxsw_core *mlxsw_core, u16 opcode,
+ u8 opcode_mod, u32 in_mod, char *in_mbox,
+ size_t in_mbox_size)
+{
+ return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
+ in_mbox, in_mbox_size, NULL, 0);
+}
+
+static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode,
+ u8 opcode_mod, u32 in_mod,
+ bool out_mbox_direct,
+ char *out_mbox, size_t out_mbox_size)
+{
+ return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod,
+ out_mbox_direct, NULL, 0,
+ out_mbox, out_mbox_size);
+}
+
+static inline int mlxsw_cmd_exec_none(struct mlxsw_core *mlxsw_core, u16 opcode,
+ u8 opcode_mod, u32 in_mod)
+{
+ return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
+ NULL, 0, NULL, 0);
+}
+
+enum mlxsw_cmd_opcode {
+ MLXSW_CMD_OPCODE_QUERY_FW = 0x004,
+ MLXSW_CMD_OPCODE_QUERY_BOARDINFO = 0x006,
+ MLXSW_CMD_OPCODE_QUERY_AQ_CAP = 0x003,
+ MLXSW_CMD_OPCODE_MAP_FA = 0xFFF,
+ MLXSW_CMD_OPCODE_UNMAP_FA = 0xFFE,
+ MLXSW_CMD_OPCODE_CONFIG_PROFILE = 0x100,
+ MLXSW_CMD_OPCODE_ACCESS_REG = 0x040,
+ MLXSW_CMD_OPCODE_SW2HW_DQ = 0x201,
+ MLXSW_CMD_OPCODE_HW2SW_DQ = 0x202,
+ MLXSW_CMD_OPCODE_2ERR_DQ = 0x01E,
+ MLXSW_CMD_OPCODE_QUERY_DQ = 0x022,
+ MLXSW_CMD_OPCODE_SW2HW_CQ = 0x016,
+ MLXSW_CMD_OPCODE_HW2SW_CQ = 0x017,
+ MLXSW_CMD_OPCODE_QUERY_CQ = 0x018,
+ MLXSW_CMD_OPCODE_SW2HW_EQ = 0x013,
+ MLXSW_CMD_OPCODE_HW2SW_EQ = 0x014,
+ MLXSW_CMD_OPCODE_QUERY_EQ = 0x015,
+};
+
+static inline const char *mlxsw_cmd_opcode_str(u16 opcode)
+{
+ switch (opcode) {
+ case MLXSW_CMD_OPCODE_QUERY_FW:
+ return "QUERY_FW";
+ case MLXSW_CMD_OPCODE_QUERY_BOARDINFO:
+ return "QUERY_BOARDINFO";
+ case MLXSW_CMD_OPCODE_QUERY_AQ_CAP:
+ return "QUERY_AQ_CAP";
+ case MLXSW_CMD_OPCODE_MAP_FA:
+ return "MAP_FA";
+ case MLXSW_CMD_OPCODE_UNMAP_FA:
+ return "UNMAP_FA";
+ case MLXSW_CMD_OPCODE_CONFIG_PROFILE:
+ return "CONFIG_PROFILE";
+ case MLXSW_CMD_OPCODE_ACCESS_REG:
+ return "ACCESS_REG";
+ case MLXSW_CMD_OPCODE_SW2HW_DQ:
+ return "SW2HW_DQ";
+ case MLXSW_CMD_OPCODE_HW2SW_DQ:
+ return "HW2SW_DQ";
+ case MLXSW_CMD_OPCODE_2ERR_DQ:
+ return "2ERR_DQ";
+ case MLXSW_CMD_OPCODE_QUERY_DQ:
+ return "QUERY_DQ";
+ case MLXSW_CMD_OPCODE_SW2HW_CQ:
+ return "SW2HW_CQ";
+ case MLXSW_CMD_OPCODE_HW2SW_CQ:
+ return "HW2SW_CQ";
+ case MLXSW_CMD_OPCODE_QUERY_CQ:
+ return "QUERY_CQ";
+ case MLXSW_CMD_OPCODE_SW2HW_EQ:
+ return "SW2HW_EQ";
+ case MLXSW_CMD_OPCODE_HW2SW_EQ:
+ return "HW2SW_EQ";
+ case MLXSW_CMD_OPCODE_QUERY_EQ:
+ return "QUERY_EQ";
+ default:
+ return "*UNKNOWN*";
+ }
+}
+
+enum mlxsw_cmd_status {
+ /* Command execution succeeded. */
+ MLXSW_CMD_STATUS_OK = 0x00,
+ /* Internal error (e.g. bus error) occurred while processing command. */
+ MLXSW_CMD_STATUS_INTERNAL_ERR = 0x01,
+ /* Operation/command not supported or opcode modifier not supported. */
+ MLXSW_CMD_STATUS_BAD_OP = 0x02,
+ /* Parameter not supported, parameter out of range. */
+ MLXSW_CMD_STATUS_BAD_PARAM = 0x03,
+ /* System was not enabled or bad system state. */
+ MLXSW_CMD_STATUS_BAD_SYS_STATE = 0x04,
+ /* Attempt to access reserved or unallocated resource, or resource in
+ * inappropriate ownership.
+ */
+ MLXSW_CMD_STATUS_BAD_RESOURCE = 0x05,
+ /* Requested resource is currently executing a command. */
+ MLXSW_CMD_STATUS_RESOURCE_BUSY = 0x06,
+ /* Required capability exceeds device limits. */
+ MLXSW_CMD_STATUS_EXCEED_LIM = 0x08,
+ /* Resource is not in the appropriate state or ownership. */
+ MLXSW_CMD_STATUS_BAD_RES_STATE = 0x09,
+ /* Index out of range (might be beyond table size or attempt to
+ * access a reserved resource).
+ */
+ MLXSW_CMD_STATUS_BAD_INDEX = 0x0A,
+ /* NVMEM checksum/CRC failed. */
+ MLXSW_CMD_STATUS_BAD_NVMEM = 0x0B,
+ /* Bad management packet (silently discarded). */
+ MLXSW_CMD_STATUS_BAD_PKT = 0x30,
+};
+
+static inline const char *mlxsw_cmd_status_str(u8 status)
+{
+ switch (status) {
+ case MLXSW_CMD_STATUS_OK:
+ return "OK";
+ case MLXSW_CMD_STATUS_INTERNAL_ERR:
+ return "INTERNAL_ERR";
+ case MLXSW_CMD_STATUS_BAD_OP:
+ return "BAD_OP";
+ case MLXSW_CMD_STATUS_BAD_PARAM:
+ return "BAD_PARAM";
+ case MLXSW_CMD_STATUS_BAD_SYS_STATE:
+ return "BAD_SYS_STATE";
+ case MLXSW_CMD_STATUS_BAD_RESOURCE:
+ return "BAD_RESOURCE";
+ case MLXSW_CMD_STATUS_RESOURCE_BUSY:
+ return "RESOURCE_BUSY";
+ case MLXSW_CMD_STATUS_EXCEED_LIM:
+ return "EXCEED_LIM";
+ case MLXSW_CMD_STATUS_BAD_RES_STATE:
+ return "BAD_RES_STATE";
+ case MLXSW_CMD_STATUS_BAD_INDEX:
+ return "BAD_INDEX";
+ case MLXSW_CMD_STATUS_BAD_NVMEM:
+ return "BAD_NVMEM";
+ case MLXSW_CMD_STATUS_BAD_PKT:
+ return "BAD_PKT";
+ default:
+ return "*UNKNOWN*";
+ }
+}
+
+/* QUERY_FW - Query Firmware
+ * -------------------------
+ * OpMod == 0, INMmod == 0
+ * -----------------------
+ * The QUERY_FW command retrieves information related to firmware, command
+ * interface version and the amount of resources that should be allocated to
+ * the firmware.
+ */
+
+static inline int mlxsw_cmd_query_fw(struct mlxsw_core *mlxsw_core,
+ char *out_mbox)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_FW,
+ 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_fw_fw_pages
+ * Amount of physical memory to be allocatedfor firmware usage in 4KB pages.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_pages, 0x00, 16, 16);
+
+/* cmd_mbox_query_fw_fw_rev_major
+ * Firmware Revision - Major
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_major, 0x00, 0, 16);
+
+/* cmd_mbox_query_fw_fw_rev_subminor
+ * Firmware Sub-minor version (Patch level)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_subminor, 0x04, 16, 16);
+
+/* cmd_mbox_query_fw_fw_rev_minor
+ * Firmware Revision - Minor
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_minor, 0x04, 0, 16);
+
+/* cmd_mbox_query_fw_core_clk
+ * Internal Clock Frequency (in MHz)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, core_clk, 0x08, 16, 16);
+
+/* cmd_mbox_query_fw_cmd_interface_rev
+ * Command Interface Interpreter Revision ID. This number is bumped up
+ * every time a non-backward-compatible change is done for the command
+ * interface. The current cmd_interface_rev is 1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, cmd_interface_rev, 0x08, 0, 16);
+
+/* cmd_mbox_query_fw_dt
+ * If set, Debug Trace is supported
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, dt, 0x0C, 31, 1);
+
+/* cmd_mbox_query_fw_api_version
+ * Indicates the version of the API, to enable software querying
+ * for compatibility. The current api_version is 1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, api_version, 0x0C, 0, 16);
+
+/* cmd_mbox_query_fw_fw_hour
+ * Firmware timestamp - hour
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_hour, 0x10, 24, 8);
+
+/* cmd_mbox_query_fw_fw_minutes
+ * Firmware timestamp - minutes
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_minutes, 0x10, 16, 8);
+
+/* cmd_mbox_query_fw_fw_seconds
+ * Firmware timestamp - seconds
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_seconds, 0x10, 8, 8);
+
+/* cmd_mbox_query_fw_fw_year
+ * Firmware timestamp - year
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_year, 0x14, 16, 16);
+
+/* cmd_mbox_query_fw_fw_month
+ * Firmware timestamp - month
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_month, 0x14, 8, 8);
+
+/* cmd_mbox_query_fw_fw_day
+ * Firmware timestamp - day
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_day, 0x14, 0, 8);
+
+/* cmd_mbox_query_fw_clr_int_base_offset
+ * Clear Interrupt register's offset from clr_int_bar register
+ * in PCI address space.
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, clr_int_base_offset, 0x20, 0, 64);
+
+/* cmd_mbox_query_fw_clr_int_bar
+ * PCI base address register (BAR) where clr_int register is located.
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, clr_int_bar, 0x28, 30, 2);
+
+/* cmd_mbox_query_fw_error_buf_offset
+ * Read Only buffer for internal error reports of offset
+ * from error_buf_bar register in PCI address space).
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, error_buf_offset, 0x30, 0, 64);
+
+/* cmd_mbox_query_fw_error_buf_size
+ * Internal error buffer size in DWORDs
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, error_buf_size, 0x38, 0, 32);
+
+/* cmd_mbox_query_fw_error_int_bar
+ * PCI base address register (BAR) where error buffer
+ * register is located.
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, error_int_bar, 0x3C, 30, 2);
+
+/* cmd_mbox_query_fw_doorbell_page_offset
+ * Offset of the doorbell page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, doorbell_page_offset, 0x40, 0, 64);
+
+/* cmd_mbox_query_fw_doorbell_page_bar
+ * PCI base address register (BAR) of the doorbell page
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, doorbell_page_bar, 0x48, 30, 2);
+
+/* QUERY_BOARDINFO - Query Board Information
+ * -----------------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The QUERY_BOARDINFO command retrieves adapter specific parameters.
+ */
+
+static inline int mlxsw_cmd_boardinfo(struct mlxsw_core *mlxsw_core,
+ char *out_mbox)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_BOARDINFO,
+ 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_boardinfo_intapin
+ * When PCIe interrupt messages are being used, this value is used for clearing
+ * an interrupt. When using MSI-X, this register is not used.
+ */
+MLXSW_ITEM32(cmd_mbox, boardinfo, intapin, 0x10, 24, 8);
+
+/* cmd_mbox_boardinfo_vsd_vendor_id
+ * PCISIG Vendor ID (www.pcisig.com/membership/vid_search) of the vendor
+ * specifying/formatting the VSD. The vsd_vendor_id identifies the management
+ * domain of the VSD/PSID data. Different vendors may choose different VSD/PSID
+ * format and encoding as long as they use their assigned vsd_vendor_id.
+ */
+MLXSW_ITEM32(cmd_mbox, boardinfo, vsd_vendor_id, 0x1C, 0, 16);
+
+/* cmd_mbox_boardinfo_vsd
+ * Vendor Specific Data. The VSD string that is burnt to the Flash
+ * with the firmware.
+ */
+#define MLXSW_CMD_BOARDINFO_VSD_LEN 208
+MLXSW_ITEM_BUF(cmd_mbox, boardinfo, vsd, 0x20, MLXSW_CMD_BOARDINFO_VSD_LEN);
+
+/* cmd_mbox_boardinfo_psid
+ * The PSID field is a 16-ascii (byte) character string which acts as
+ * the board ID. The PSID format is used in conjunction with
+ * Mellanox vsd_vendor_id (15B3h).
+ */
+#define MLXSW_CMD_BOARDINFO_PSID_LEN 16
+MLXSW_ITEM_BUF(cmd_mbox, boardinfo, psid, 0xF0, MLXSW_CMD_BOARDINFO_PSID_LEN);
+
+/* QUERY_AQ_CAP - Query Asynchronous Queues Capabilities
+ * -----------------------------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The QUERY_AQ_CAP command returns the device asynchronous queues
+ * capabilities supported.
+ */
+
+static inline int mlxsw_cmd_query_aq_cap(struct mlxsw_core *mlxsw_core,
+ char *out_mbox)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_AQ_CAP,
+ 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_aq_cap_log_max_sdq_sz
+ * Log (base 2) of max WQEs allowed on SDQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_sdq_sz, 0x00, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_sdqs
+ * Maximum number of SDQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_sdqs, 0x00, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_rdq_sz
+ * Log (base 2) of max WQEs allowed on RDQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_rdqs
+ * Maximum number of RDQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_cq_sz
+ * Log (base 2) of max CQEs allowed on CQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_cqs
+ * Maximum number of CQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_cqs, 0x08, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_eq_sz
+ * Log (base 2) of max EQEs allowed on EQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_eq_sz, 0x0C, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_eqs
+ * Maximum number of EQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_eqs, 0x0C, 0, 8);
+
+/* cmd_mbox_query_aq_cap_max_sg_sq
+ * The maximum S/G list elements in an DSQ. DSQ must not contain
+ * more S/G entries than indicated here.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_sq, 0x10, 8, 8);
+
+/* cmd_mbox_query_aq_cap_
+ * The maximum S/G list elements in an DRQ. DRQ must not contain
+ * more S/G entries than indicated here.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_rq, 0x10, 0, 8);
+
+/* MAP_FA - Map Firmware Area
+ * --------------------------
+ * OpMod == 0 (N/A), INMmod == Number of VPM entries
+ * -------------------------------------------------
+ * The MAP_FA command passes physical pages to the switch. These pages
+ * are used to store the device firmware. MAP_FA can be executed multiple
+ * times until all the firmware area is mapped (the size that should be
+ * mapped is retrieved through the QUERY_FW command). All required pages
+ * must be mapped to finish the initialization phase. Physical memory
+ * passed in this command must be pinned.
+ */
+
+static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 vpm_entries_count)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_MAP_FA,
+ 0, vpm_entries_count,
+ in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_map_fa_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, map_fa, pa, 0x00, 12, 52, 0x08, 0x00, true);
+
+/* cmd_mbox_map_fa_log2size
+ * Log (base 2) of the size in 4KB pages of the physical and contiguous memory
+ * that starts at PA_L/H.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, map_fa, log2size, 0x00, 0, 5, 0x08, 0x04, false);
+
+/* UNMAP_FA - Unmap Firmware Area
+ * ------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The UNMAP_FA command unload the firmware and unmaps all the
+ * firmware area. After this command is completed the device will not access
+ * the pages that were mapped to the firmware area. After executing UNMAP_FA
+ * command, software reset must be done prior to execution of MAP_FW command.
+ */
+
+static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_UNMAP_FA, 0, 0);
+}
+
+/* CONFIG_PROFILE (Set) - Configure Switch Profile
+ * ------------------------------
+ * OpMod == 1 (Set), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The CONFIG_PROFILE command sets the switch profile. The command can be
+ * executed on the device only once at startup in order to allocate and
+ * configure all switch resources and prepare it for operational mode.
+ * It is not possible to change the device profile after the chip is
+ * in operational mode.
+ * Failure of the CONFIG_PROFILE command leaves the hardware in an indeterminate
+ * state therefore it is required to perform software reset to the device
+ * following an unsuccessful completion of the command. It is required
+ * to perform software reset to the device to change an existing profile.
+ */
+
+static inline int mlxsw_cmd_config_profile_set(struct mlxsw_core *mlxsw_core,
+ char *in_mbox)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_CONFIG_PROFILE,
+ 1, 0, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_config_profile_set_max_vepa_channels
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vepa_channels, 0x0C, 0, 1);
+
+/* cmd_mbox_config_profile_set_max_lag
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_lag, 0x0C, 1, 1);
+
+/* cmd_mbox_config_profile_set_max_port_per_lag
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_port_per_lag, 0x0C, 2, 1);
+
+/* cmd_mbox_config_profile_set_max_mid
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_mid, 0x0C, 3, 1);
+
+/* cmd_mbox_config_profile_set_max_pgt
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pgt, 0x0C, 4, 1);
+
+/* cmd_mbox_config_profile_set_max_system_port
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_system_port, 0x0C, 5, 1);
+
+/* cmd_mbox_config_profile_set_max_vlan_groups
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vlan_groups, 0x0C, 6, 1);
+
+/* cmd_mbox_config_profile_set_max_regions
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1);
+
+/* cmd_mbox_config_profile_set_fid_based
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_mode, 0x0C, 8, 1);
+
+/* cmd_mbox_config_profile_set_max_flood_tables
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_tables, 0x0C, 9, 1);
+
+/* cmd_mbox_config_profile_set_max_ib_mc
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_ib_mc, 0x0C, 12, 1);
+
+/* cmd_mbox_config_profile_set_max_pkey
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pkey, 0x0C, 13, 1);
+
+/* cmd_mbox_config_profile_set_adaptive_routing_group_cap
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+ set_adaptive_routing_group_cap, 0x0C, 14, 1);
+
+/* cmd_mbox_config_profile_set_ar_sec
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
+
+/* cmd_mbox_config_profile_max_vepa_channels
+ * Maximum number of VEPA channels per port (0 through 16)
+ * 0 - multi-channel VEPA is disabled
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vepa_channels, 0x10, 0, 8);
+
+/* cmd_mbox_config_profile_max_lag
+ * Maximum number of LAG IDs requested.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_lag, 0x14, 0, 16);
+
+/* cmd_mbox_config_profile_max_port_per_lag
+ * Maximum number of ports per LAG requested.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_port_per_lag, 0x18, 0, 16);
+
+/* cmd_mbox_config_profile_max_mid
+ * Maximum Multicast IDs.
+ * Multicast IDs are allocated from 0 to max_mid-1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_mid, 0x1C, 0, 16);
+
+/* cmd_mbox_config_profile_max_pgt
+ * Maximum records in the Port Group Table per Switch Partition.
+ * Port Group Table indexes are from 0 to max_pgt-1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_pgt, 0x20, 0, 16);
+
+/* cmd_mbox_config_profile_max_system_port
+ * The maximum number of system ports that can be allocated.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_system_port, 0x24, 0, 16);
+
+/* cmd_mbox_config_profile_max_vlan_groups
+ * Maximum number VLAN Groups for VLAN binding.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vlan_groups, 0x28, 0, 12);
+
+/* cmd_mbox_config_profile_max_regions
+ * Maximum number of TCAM Regions.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16);
+
+/* cmd_mbox_config_profile_max_flood_tables
+ * Maximum number of Flooding Tables. Flooding Tables are associated to
+ * the different packet types for the different switch partitions.
+ * Note that the table size depends on the fid_based mode.
+ * In SwitchX silicon, tables are split equally between the switch
+ * partitions. e.g. for 2 swids and 8 tables, the first 4 are associated
+ * with swid-1 and the last 4 are associated with swid-2.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
+
+/* cmd_mbox_config_profile_max_vid_flood_tables
+ * Maximum number of per-vid flooding tables. Flooding tables are associated
+ * to the different packet types for the different switch partitions.
+ * Table size is 4K entries covering all VID space.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
+
+/* cmd_mbox_config_profile_fid_based
+ * FID Based Flood Mode
+ * 00 Do not use FID to offset the index into the Port Group Table/Multicast ID
+ * 01 Use FID to offset the index to the Port Group Table (pgi)
+ * 10 Use FID to offset the index to the Port Group Table (pgi) and
+ * the Multicast ID
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2);
+
+/* cmd_mbox_config_profile_max_ib_mc
+ * Maximum number of multicast FDB records for InfiniBand
+ * FDB (in 512 chunks) per InfiniBand switch partition.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_ib_mc, 0x40, 0, 15);
+
+/* cmd_mbox_config_profile_max_pkey
+ * Maximum per port PKEY table size (for PKEY enforcement)
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_pkey, 0x44, 0, 15);
+
+/* cmd_mbox_config_profile_ar_sec
+ * Primary/secondary capability
+ * Describes the number of adaptive routing sub-groups
+ * 0 - disable primary/secondary (single group)
+ * 1 - enable primary/secondary (2 sub-groups)
+ * 2 - 3 sub-groups: Not supported in SwitchX, SwitchX-2
+ * 3 - 4 sub-groups: Not supported in SwitchX, SwitchX-2
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, ar_sec, 0x4C, 24, 2);
+
+/* cmd_mbox_config_profile_adaptive_routing_group_cap
+ * Adaptive Routing Group Capability. Indicates the number of AR groups
+ * supported. Note that when Primary/secondary is enabled, each
+ * primary/secondary couple consumes 2 adaptive routing entries.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
+
+/* cmd_mbox_config_profile_arn
+ * Adaptive Routing Notification Enable
+ * Not supported in SwitchX, SwitchX-2
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
+
+/* cmd_mbox_config_profile_swid_config_mask
+ * Modify Switch Partition Configuration mask. When set, the configu-
+ * ration value for the Switch Partition are taken from the mailbox.
+ * When clear, the current configuration values are used.
+ * Bit 0 - set type
+ * Bit 1 - properties
+ * Other - reserved
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_mask,
+ 0x60, 24, 8, 0x08, 0x00, false);
+
+/* cmd_mbox_config_profile_swid_config_type
+ * Switch Partition type.
+ * 0000 - disabled (Switch Partition does not exist)
+ * 0001 - InfiniBand
+ * 0010 - Ethernet
+ * 1000 - router port (SwitchX-2 only)
+ * Other - reserved
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
+ 0x60, 20, 4, 0x08, 0x00, false);
+
+/* cmd_mbox_config_profile_swid_config_properties
+ * Switch Partition properties.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
+ 0x60, 0, 8, 0x08, 0x00, false);
+
+/* ACCESS_REG - Access EMAD Supported Register
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -------------------------------------
+ * The ACCESS_REG command supports accessing device registers. This access
+ * is mainly used for bootstrapping.
+ */
+
+static inline int mlxsw_cmd_access_reg(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, char *out_mbox)
+{
+ return mlxsw_cmd_exec(mlxsw_core, MLXSW_CMD_OPCODE_ACCESS_REG,
+ 0, 0, false, in_mbox, MLXSW_CMD_MBOX_SIZE,
+ out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* SW2HW_DQ - Software to Hardware DQ
+ * ----------------------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The SW2HW_DQ command transitions a descriptor queue from software to
+ * hardware ownership. The command enables posting WQEs and ringing DoorBells
+ * on the descriptor queue.
+ */
+
+static inline int __mlxsw_cmd_sw2hw_dq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 dq_number,
+ u8 opcode_mod)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_DQ,
+ opcode_mod, dq_number,
+ in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+enum {
+ MLXSW_CMD_OPCODE_MOD_SDQ = 0,
+ MLXSW_CMD_OPCODE_MOD_RDQ = 1,
+};
+
+static inline int mlxsw_cmd_sw2hw_sdq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 dq_number)
+{
+ return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
+ MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_sw2hw_rdq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 dq_number)
+{
+ return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
+ MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* cmd_mbox_sw2hw_dq_cq
+ * Number of the CQ that this Descriptor Queue reports completions to.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8);
+
+/* cmd_mbox_sw2hw_dq_sdq_tclass
+ * SDQ: CPU Egress TClass
+ * RDQ: Reserved
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_tclass, 0x00, 16, 6);
+
+/* cmd_mbox_sw2hw_dq_log2_dq_sz
+ * Log (base 2) of the Descriptor Queue size in 4KB pages.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, log2_dq_sz, 0x00, 0, 6);
+
+/* cmd_mbox_sw2hw_dq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_dq, pa, 0x10, 12, 52, 0x08, 0x00, true);
+
+/* HW2SW_DQ - Hardware to Software DQ
+ * ----------------------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The HW2SW_DQ command transitions a descriptor queue from hardware to
+ * software ownership. Incoming packets on the DQ are silently discarded,
+ * SW should not post descriptors on nonoperational DQs.
+ */
+
+static inline int __mlxsw_cmd_hw2sw_dq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number, u8 opcode_mod)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_DQ,
+ opcode_mod, dq_number);
+}
+
+static inline int mlxsw_cmd_hw2sw_sdq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number)
+{
+ return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
+ MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_hw2sw_rdq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number)
+{
+ return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
+ MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* 2ERR_DQ - To Error DQ
+ * ---------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The 2ERR_DQ command transitions the DQ into the error state from the state
+ * in which it has been. While the command is executed, some in-process
+ * descriptors may complete. Once the DQ transitions into the error state,
+ * if there are posted descriptors on the RDQ/SDQ, the hardware writes
+ * a completion with error (flushed) for all descriptors posted in the RDQ/SDQ.
+ * When the command is completed successfully, the DQ is already in
+ * the error state.
+ */
+
+static inline int __mlxsw_cmd_2err_dq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number, u8 opcode_mod)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
+ opcode_mod, dq_number);
+}
+
+static inline int mlxsw_cmd_2err_sdq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number)
+{
+ return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
+ MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_2err_rdq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number)
+{
+ return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
+ MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* QUERY_DQ - Query DQ
+ * ---------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The QUERY_DQ command retrieves a snapshot of DQ parameters from the hardware.
+ *
+ * Note: Output mailbox has the same format as SW2HW_DQ.
+ */
+
+static inline int __mlxsw_cmd_query_dq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 dq_number,
+ u8 opcode_mod)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
+ opcode_mod, dq_number, false,
+ out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+static inline int mlxsw_cmd_query_sdq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 dq_number)
+{
+ return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
+ MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_query_rdq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 dq_number)
+{
+ return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
+ MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* SW2HW_CQ - Software to Hardware CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The SW2HW_CQ command transfers ownership of a CQ context entry from software
+ * to hardware. The command takes the CQ context entry from the input mailbox
+ * and stores it in the CQC in the ownership of the hardware. The command fails
+ * if the requested CQC entry is already in the ownership of the hardware.
+ */
+
+static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 cq_number)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_CQ,
+ 0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_sw2hw_cq_cv
+ * CQE Version.
+ * 0 - CQE Version 0, 1 - CQE Version 1
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4);
+
+/* cmd_mbox_sw2hw_cq_c_eqn
+ * Event Queue this CQ reports completion events to.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1);
+
+/* cmd_mbox_sw2hw_cq_oi
+ * When set, overrun ignore is enabled. When set, updates of
+ * CQ consumer counter (poll for completion) or Request completion
+ * notifications (Arm CQ) DoorBells should not be rung on that CQ.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, oi, 0x00, 12, 1);
+
+/* cmd_mbox_sw2hw_cq_st
+ * Event delivery state machine
+ * 0x0 - FIRED
+ * 0x1 - ARMED (Request for Notification)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, st, 0x00, 8, 1);
+
+/* cmd_mbox_sw2hw_cq_log_cq_size
+ * Log (base 2) of the CQ size (in entries).
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, log_cq_size, 0x00, 0, 4);
+
+/* cmd_mbox_sw2hw_cq_producer_counter
+ * Producer Counter. The counter is incremented for each CQE that is
+ * written by the HW to the CQ.
+ * Maintained by HW (valid for the QUERY_CQ command only)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, producer_counter, 0x04, 0, 16);
+
+/* cmd_mbox_sw2hw_cq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_cq, pa, 0x10, 11, 53, 0x08, 0x00, true);
+
+/* HW2SW_CQ - Hardware to Software CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The HW2SW_CQ command transfers ownership of a CQ context entry from hardware
+ * to software. The CQC entry is invalidated as a result of this command.
+ */
+
+static inline int mlxsw_cmd_hw2sw_cq(struct mlxsw_core *mlxsw_core,
+ u32 cq_number)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_CQ,
+ 0, cq_number);
+}
+
+/* QUERY_CQ - Query CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The QUERY_CQ command retrieves a snapshot of the current CQ context entry.
+ * The command stores the snapshot in the output mailbox in the software format.
+ * Note that the CQ context state and values are not affected by the QUERY_CQ
+ * command. The QUERY_CQ command is for debug purposes only.
+ *
+ * Note: Output mailbox has the same format as SW2HW_CQ.
+ */
+
+static inline int mlxsw_cmd_query_cq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 cq_number)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_CQ,
+ 0, cq_number, false,
+ out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* SW2HW_EQ - Software to Hardware EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ * The SW2HW_EQ command transfers ownership of an EQ context entry from software
+ * to hardware. The command takes the EQ context entry from the input mailbox
+ * and stores it in the EQC in the ownership of the hardware. The command fails
+ * if the requested EQC entry is already in the ownership of the hardware.
+ */
+
+static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 eq_number)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_EQ,
+ 0, eq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_sw2hw_eq_int_msix
+ * When set, MSI-X cycles will be generated by this EQ.
+ * When cleared, an interrupt will be generated by this EQ.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1);
+
+/* cmd_mbox_sw2hw_eq_int_oi
+ * When set, overrun ignore is enabled.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1);
+
+/* cmd_mbox_sw2hw_eq_int_st
+ * Event delivery state machine
+ * 0x0 - FIRED
+ * 0x1 - ARMED (Request for Notification)
+ * 0x11 - Always ARMED
+ * other - reserved
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, st, 0x00, 8, 2);
+
+/* cmd_mbox_sw2hw_eq_int_log_eq_size
+ * Log (base 2) of the EQ size (in entries).
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, log_eq_size, 0x00, 0, 4);
+
+/* cmd_mbox_sw2hw_eq_int_producer_counter
+ * Producer Counter. The counter is incremented for each EQE that is written
+ * by the HW to the EQ.
+ * Maintained by HW (valid for the QUERY_EQ command only)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, producer_counter, 0x04, 0, 16);
+
+/* cmd_mbox_sw2hw_eq_int_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_eq, pa, 0x10, 11, 53, 0x08, 0x00, true);
+
+/* HW2SW_EQ - Hardware to Software EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ */
+
+static inline int mlxsw_cmd_hw2sw_eq(struct mlxsw_core *mlxsw_core,
+ u32 eq_number)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_EQ,
+ 0, eq_number);
+}
+
+/* QUERY_EQ - Query EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ *
+ * Note: Output mailbox has the same format as SW2HW_EQ.
+ */
+
+static inline int mlxsw_cmd_query_eq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 eq_number)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_EQ,
+ 0, eq_number, false,
+ out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
new file mode 100644
index 000000000000..dbcaf5df8967
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -0,0 +1,1295 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/if_link.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/netdevice.h>
+#include <linux/wait.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/gfp.h>
+#include <linux/random.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+
+#include "core.h"
+#include "item.h"
+#include "cmd.h"
+#include "port.h"
+#include "trap.h"
+#include "emad.h"
+#include "reg.h"
+
+static LIST_HEAD(mlxsw_core_driver_list);
+static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
+
+static const char mlxsw_core_driver_name[] = "mlxsw_core";
+
+static struct dentry *mlxsw_core_dbg_root;
+
+struct mlxsw_core_pcpu_stats {
+ u64 trap_rx_packets[MLXSW_TRAP_ID_MAX];
+ u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX];
+ u64 port_rx_packets[MLXSW_PORT_MAX_PORTS];
+ u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS];
+ struct u64_stats_sync syncp;
+ u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX];
+ u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS];
+ u32 trap_rx_invalid;
+ u32 port_rx_invalid;
+};
+
+struct mlxsw_core {
+ struct mlxsw_driver *driver;
+ const struct mlxsw_bus *bus;
+ void *bus_priv;
+ const struct mlxsw_bus_info *bus_info;
+ struct list_head rx_listener_list;
+ struct list_head event_listener_list;
+ struct {
+ struct sk_buff *resp_skb;
+ u64 tid;
+ wait_queue_head_t wait;
+ bool trans_active;
+ struct mutex lock; /* One EMAD transaction at a time. */
+ bool use_emad;
+ } emad;
+ struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
+ struct dentry *dbg_dir;
+ struct {
+ struct debugfs_blob_wrapper vsd_blob;
+ struct debugfs_blob_wrapper psid_blob;
+ } dbg;
+ unsigned long driver_priv[0];
+ /* driver_priv has to be always the last item */
+};
+
+struct mlxsw_rx_listener_item {
+ struct list_head list;
+ struct mlxsw_rx_listener rxl;
+ void *priv;
+};
+
+struct mlxsw_event_listener_item {
+ struct list_head list;
+ struct mlxsw_event_listener el;
+ void *priv;
+};
+
+/******************
+ * EMAD processing
+ ******************/
+
+/* emad_eth_hdr_dmac
+ * Destination MAC in EMAD's Ethernet header.
+ * Must be set to 01:02:c9:00:00:01
+ */
+MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
+
+/* emad_eth_hdr_smac
+ * Source MAC in EMAD's Ethernet header.
+ * Must be set to 00:02:c9:01:02:03
+ */
+MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
+
+/* emad_eth_hdr_ethertype
+ * Ethertype in EMAD's Ethernet header.
+ * Must be set to 0x8932
+ */
+MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
+
+/* emad_eth_hdr_mlx_proto
+ * Mellanox protocol.
+ * Must be set to 0x0.
+ */
+MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
+
+/* emad_eth_hdr_ver
+ * Mellanox protocol version.
+ * Must be set to 0x0.
+ */
+MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
+
+/* emad_op_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x1 (operation TLV).
+ */
+MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
+
+/* emad_op_tlv_len
+ * Length of the operation TLV in u32.
+ * Must be set to 0x4.
+ */
+MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
+
+/* emad_op_tlv_dr
+ * Direct route bit. Setting to 1 indicates the EMAD is a direct route
+ * EMAD. DR TLV must follow.
+ *
+ * Note: Currently not supported and must not be set.
+ */
+MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
+
+/* emad_op_tlv_status
+ * Returned status in case of EMAD response. Must be set to 0 in case
+ * of EMAD request.
+ * 0x0 - success
+ * 0x1 - device is busy. Requester should retry
+ * 0x2 - Mellanox protocol version not supported
+ * 0x3 - unknown TLV
+ * 0x4 - register not supported
+ * 0x5 - operation class not supported
+ * 0x6 - EMAD method not supported
+ * 0x7 - bad parameter (e.g. port out of range)
+ * 0x8 - resource not available
+ * 0x9 - message receipt acknowledgment. Requester should retry
+ * 0x70 - internal error
+ */
+MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
+
+/* emad_op_tlv_register_id
+ * Register ID of register within register TLV.
+ */
+MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
+
+/* emad_op_tlv_r
+ * Response bit. Setting to 1 indicates Response, otherwise request.
+ */
+MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
+
+/* emad_op_tlv_method
+ * EMAD method type.
+ * 0x1 - query
+ * 0x2 - write
+ * 0x3 - send (currently not supported)
+ * 0x4 - event
+ */
+MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
+
+/* emad_op_tlv_class
+ * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
+ */
+MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
+
+/* emad_op_tlv_tid
+ * EMAD transaction ID. Used for pairing request and response EMADs.
+ */
+MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
+
+/* emad_reg_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x3 (register TLV).
+ */
+MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
+
+/* emad_reg_tlv_len
+ * Length of the operation TLV in u32.
+ */
+MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
+
+/* emad_end_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x0 (end TLV).
+ */
+MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
+
+/* emad_end_tlv_len
+ * Length of the end TLV in u32.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
+
+enum mlxsw_core_reg_access_type {
+ MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
+ MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
+};
+
+static inline const char *
+mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
+{
+ switch (type) {
+ case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
+ return "query";
+ case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
+ return "write";
+ }
+ BUG();
+}
+
+static void mlxsw_emad_pack_end_tlv(char *end_tlv)
+{
+ mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
+ mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
+}
+
+static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
+ const struct mlxsw_reg_info *reg,
+ char *payload)
+{
+ mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
+ mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
+ memcpy(reg_tlv + sizeof(u32), payload, reg->len);
+}
+
+static void mlxsw_emad_pack_op_tlv(char *op_tlv,
+ const struct mlxsw_reg_info *reg,
+ enum mlxsw_core_reg_access_type type,
+ struct mlxsw_core *mlxsw_core)
+{
+ mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
+ mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
+ mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
+ mlxsw_emad_op_tlv_status_set(op_tlv, 0);
+ mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
+ mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
+ if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type)
+ mlxsw_emad_op_tlv_method_set(op_tlv,
+ MLXSW_EMAD_OP_TLV_METHOD_QUERY);
+ else
+ mlxsw_emad_op_tlv_method_set(op_tlv,
+ MLXSW_EMAD_OP_TLV_METHOD_WRITE);
+ mlxsw_emad_op_tlv_class_set(op_tlv,
+ MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
+ mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid);
+}
+
+static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
+{
+ char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
+
+ mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
+ mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
+ mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
+ mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
+ mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
+
+ skb_reset_mac_header(skb);
+
+ return 0;
+}
+
+static void mlxsw_emad_construct(struct sk_buff *skb,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type,
+ struct mlxsw_core *mlxsw_core)
+{
+ char *buf;
+
+ buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
+ mlxsw_emad_pack_end_tlv(buf);
+
+ buf = skb_push(skb, reg->len + sizeof(u32));
+ mlxsw_emad_pack_reg_tlv(buf, reg, payload);
+
+ buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
+ mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core);
+
+ mlxsw_emad_construct_eth_hdr(skb);
+}
+
+static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
+{
+ return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
+}
+
+static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
+{
+ return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
+ MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
+}
+
+static char *mlxsw_emad_reg_payload(const char *op_tlv)
+{
+ return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
+}
+
+static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
+{
+ char *op_tlv;
+
+ op_tlv = mlxsw_emad_op_tlv(skb);
+ return mlxsw_emad_op_tlv_tid_get(op_tlv);
+}
+
+static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
+{
+ char *op_tlv;
+
+ op_tlv = mlxsw_emad_op_tlv(skb);
+ return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv));
+}
+
+#define MLXSW_EMAD_TIMEOUT_MS 200
+
+static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ int err;
+ int ret;
+
+ err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
+ if (err) {
+ dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
+ mlxsw_core->emad.tid);
+ dev_kfree_skb(skb);
+ return err;
+ }
+
+ mlxsw_core->emad.trans_active = true;
+ ret = wait_event_timeout(mlxsw_core->emad.wait,
+ !(mlxsw_core->emad.trans_active),
+ msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
+ if (!ret) {
+ dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
+ mlxsw_core->emad.tid);
+ mlxsw_core->emad.trans_active = false;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
+ char *op_tlv)
+{
+ enum mlxsw_emad_op_tlv_status status;
+ u64 tid;
+
+ status = mlxsw_emad_op_tlv_status_get(op_tlv);
+ tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
+
+ switch (status) {
+ case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
+ return 0;
+ case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
+ case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
+ dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
+ tid, status, mlxsw_emad_op_tlv_status_str(status));
+ return -EAGAIN;
+ case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
+ case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
+ case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
+ case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
+ case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
+ case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
+ case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
+ case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
+ default:
+ dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
+ tid, status, mlxsw_emad_op_tlv_status_str(status));
+ return -EIO;
+ }
+}
+
+static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb)
+{
+ return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb));
+}
+
+static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct sk_buff *trans_skb;
+ int n_retry;
+ int err;
+
+ n_retry = 0;
+retry:
+ /* We copy the EMAD to a new skb, since we might need
+ * to retransmit it in case of failure.
+ */
+ trans_skb = skb_copy(skb, GFP_KERNEL);
+ if (!trans_skb) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info);
+ if (!err) {
+ struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb;
+
+ err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb);
+ if (err)
+ dev_kfree_skb(resp_skb);
+ if (!err || err != -EAGAIN)
+ goto out;
+ }
+ if (n_retry++ < MLXSW_EMAD_MAX_RETRY)
+ goto retry;
+
+out:
+ dev_kfree_skb(skb);
+ mlxsw_core->emad.tid++;
+ return err;
+}
+
+static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ struct mlxsw_core *mlxsw_core = priv;
+
+ if (mlxsw_emad_is_resp(skb) &&
+ mlxsw_core->emad.trans_active &&
+ mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
+ mlxsw_core->emad.resp_skb = skb;
+ mlxsw_core->emad.trans_active = false;
+ wake_up(&mlxsw_core->emad.wait);
+ } else {
+ dev_kfree_skb(skb);
+ }
+}
+
+static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
+ .func = mlxsw_emad_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_ETHEMAD,
+};
+
+static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+ MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+ MLXSW_TRAP_ID_ETHEMAD);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+}
+
+static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
+{
+ int err;
+
+ /* Set the upper 32 bits of the transaction ID field to a random
+ * number. This allows us to discard EMADs addressed to other
+ * devices.
+ */
+ get_random_bytes(&mlxsw_core->emad.tid, 4);
+ mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32;
+
+ init_waitqueue_head(&mlxsw_core->emad.wait);
+ mlxsw_core->emad.trans_active = false;
+ mutex_init(&mlxsw_core->emad.lock);
+
+ err = mlxsw_core_rx_listener_register(mlxsw_core,
+ &mlxsw_emad_rx_listener,
+ mlxsw_core);
+ if (err)
+ return err;
+
+ err = mlxsw_emad_traps_set(mlxsw_core);
+ if (err)
+ goto err_emad_trap_set;
+
+ mlxsw_core->emad.use_emad = true;
+
+ return 0;
+
+err_emad_trap_set:
+ mlxsw_core_rx_listener_unregister(mlxsw_core,
+ &mlxsw_emad_rx_listener,
+ mlxsw_core);
+ return err;
+}
+
+static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
+{
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
+ MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+ MLXSW_TRAP_ID_ETHEMAD);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+
+ mlxsw_core_rx_listener_unregister(mlxsw_core,
+ &mlxsw_emad_rx_listener,
+ mlxsw_core);
+}
+
+static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
+ u16 reg_len)
+{
+ struct sk_buff *skb;
+ u16 emad_len;
+
+ emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
+ (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
+ sizeof(u32) + mlxsw_core->driver->txhdr_len);
+ if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
+ return NULL;
+
+ skb = netdev_alloc_skb(NULL, emad_len);
+ if (!skb)
+ return NULL;
+ memset(skb->data, 0, emad_len);
+ skb_reserve(skb, emad_len);
+
+ return skb;
+}
+
+/*****************
+ * Core functions
+ *****************/
+
+static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
+{
+ struct mlxsw_core *mlxsw_core = file->private;
+ struct mlxsw_core_pcpu_stats *p;
+ u64 rx_packets, rx_bytes;
+ u64 tmp_rx_packets, tmp_rx_bytes;
+ u32 rx_dropped, rx_invalid;
+ unsigned int start;
+ int i;
+ int j;
+ static const char hdr[] =
+ " NUM RX_PACKETS RX_BYTES RX_DROPPED\n";
+
+ seq_printf(file, hdr);
+ for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
+ rx_packets = 0;
+ rx_bytes = 0;
+ rx_dropped = 0;
+ for_each_possible_cpu(j) {
+ p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+ do {
+ start = u64_stats_fetch_begin(&p->syncp);
+ tmp_rx_packets = p->trap_rx_packets[i];
+ tmp_rx_bytes = p->trap_rx_bytes[i];
+ } while (u64_stats_fetch_retry(&p->syncp, start));
+
+ rx_packets += tmp_rx_packets;
+ rx_bytes += tmp_rx_bytes;
+ rx_dropped += p->trap_rx_dropped[i];
+ }
+ seq_printf(file, "trap %3d %12llu %12llu %10u\n",
+ i, rx_packets, rx_bytes, rx_dropped);
+ }
+ rx_invalid = 0;
+ for_each_possible_cpu(j) {
+ p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+ rx_invalid += p->trap_rx_invalid;
+ }
+ seq_printf(file, "trap INV %10u\n",
+ rx_invalid);
+
+ for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
+ rx_packets = 0;
+ rx_bytes = 0;
+ rx_dropped = 0;
+ for_each_possible_cpu(j) {
+ p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+ do {
+ start = u64_stats_fetch_begin(&p->syncp);
+ tmp_rx_packets = p->port_rx_packets[i];
+ tmp_rx_bytes = p->port_rx_bytes[i];
+ } while (u64_stats_fetch_retry(&p->syncp, start));
+
+ rx_packets += tmp_rx_packets;
+ rx_bytes += tmp_rx_bytes;
+ rx_dropped += p->port_rx_dropped[i];
+ }
+ seq_printf(file, "port %3d %12llu %12llu %10u\n",
+ i, rx_packets, rx_bytes, rx_dropped);
+ }
+ rx_invalid = 0;
+ for_each_possible_cpu(j) {
+ p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+ rx_invalid += p->port_rx_invalid;
+ }
+ seq_printf(file, "port INV %10u\n",
+ rx_invalid);
+ return 0;
+}
+
+static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
+{
+ struct mlxsw_core *mlxsw_core = inode->i_private;
+
+ return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
+}
+
+static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
+ .owner = THIS_MODULE,
+ .open = mlxsw_core_rx_stats_dbg_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek
+};
+
+static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
+ const char *buf, size_t size)
+{
+ __be32 *m = (__be32 *) buf;
+ int i;
+ int count = size / sizeof(__be32);
+
+ for (i = count - 1; i >= 0; i--)
+ if (m[i])
+ break;
+ i++;
+ count = i ? i : 1;
+ for (i = 0; i < count; i += 4)
+ dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
+ i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
+ be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
+}
+
+int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
+{
+ spin_lock(&mlxsw_core_driver_list_lock);
+ list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
+ spin_unlock(&mlxsw_core_driver_list_lock);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_driver_register);
+
+void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
+{
+ spin_lock(&mlxsw_core_driver_list_lock);
+ list_del(&mlxsw_driver->list);
+ spin_unlock(&mlxsw_core_driver_list_lock);
+}
+EXPORT_SYMBOL(mlxsw_core_driver_unregister);
+
+static struct mlxsw_driver *__driver_find(const char *kind)
+{
+ struct mlxsw_driver *mlxsw_driver;
+
+ list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
+ if (strcmp(mlxsw_driver->kind, kind) == 0)
+ return mlxsw_driver;
+ }
+ return NULL;
+}
+
+static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
+{
+ struct mlxsw_driver *mlxsw_driver;
+
+ spin_lock(&mlxsw_core_driver_list_lock);
+ mlxsw_driver = __driver_find(kind);
+ if (!mlxsw_driver) {
+ spin_unlock(&mlxsw_core_driver_list_lock);
+ request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
+ spin_lock(&mlxsw_core_driver_list_lock);
+ mlxsw_driver = __driver_find(kind);
+ }
+ if (mlxsw_driver) {
+ if (!try_module_get(mlxsw_driver->owner))
+ mlxsw_driver = NULL;
+ }
+
+ spin_unlock(&mlxsw_core_driver_list_lock);
+ return mlxsw_driver;
+}
+
+static void mlxsw_core_driver_put(const char *kind)
+{
+ struct mlxsw_driver *mlxsw_driver;
+
+ spin_lock(&mlxsw_core_driver_list_lock);
+ mlxsw_driver = __driver_find(kind);
+ spin_unlock(&mlxsw_core_driver_list_lock);
+ if (!mlxsw_driver)
+ return;
+ module_put(mlxsw_driver->owner);
+}
+
+static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
+{
+ const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
+
+ mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
+ mlxsw_core_dbg_root);
+ if (!mlxsw_core->dbg_dir)
+ return -ENOMEM;
+ debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
+ mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
+ mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
+ mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
+ debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
+ &mlxsw_core->dbg.vsd_blob);
+ mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
+ mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
+ debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
+ &mlxsw_core->dbg.psid_blob);
+ return 0;
+}
+
+static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
+{
+ debugfs_remove_recursive(mlxsw_core->dbg_dir);
+}
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+ const struct mlxsw_bus *mlxsw_bus,
+ void *bus_priv)
+{
+ const char *device_kind = mlxsw_bus_info->device_kind;
+ struct mlxsw_core *mlxsw_core;
+ struct mlxsw_driver *mlxsw_driver;
+ size_t alloc_size;
+ int err;
+
+ mlxsw_driver = mlxsw_core_driver_get(device_kind);
+ if (!mlxsw_driver)
+ return -EINVAL;
+ alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
+ mlxsw_core = kzalloc(alloc_size, GFP_KERNEL);
+ if (!mlxsw_core) {
+ err = -ENOMEM;
+ goto err_core_alloc;
+ }
+
+ INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
+ INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
+ mlxsw_core->driver = mlxsw_driver;
+ mlxsw_core->bus = mlxsw_bus;
+ mlxsw_core->bus_priv = bus_priv;
+ mlxsw_core->bus_info = mlxsw_bus_info;
+
+ mlxsw_core->pcpu_stats =
+ netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
+ if (!mlxsw_core->pcpu_stats) {
+ err = -ENOMEM;
+ goto err_alloc_stats;
+ }
+
+ err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
+ if (err)
+ goto err_bus_init;
+
+ err = mlxsw_emad_init(mlxsw_core);
+ if (err)
+ goto err_emad_init;
+
+ err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core,
+ mlxsw_bus_info);
+ if (err)
+ goto err_driver_init;
+
+ err = mlxsw_core_debugfs_init(mlxsw_core);
+ if (err)
+ goto err_debugfs_init;
+
+ return 0;
+
+err_debugfs_init:
+ mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+err_driver_init:
+ mlxsw_emad_fini(mlxsw_core);
+err_emad_init:
+ mlxsw_bus->fini(bus_priv);
+err_bus_init:
+ free_percpu(mlxsw_core->pcpu_stats);
+err_alloc_stats:
+ kfree(mlxsw_core);
+err_core_alloc:
+ mlxsw_core_driver_put(device_kind);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_core_bus_device_register);
+
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
+{
+ const char *device_kind = mlxsw_core->bus_info->device_kind;
+
+ mlxsw_core_debugfs_fini(mlxsw_core);
+ mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+ mlxsw_emad_fini(mlxsw_core);
+ mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+ free_percpu(mlxsw_core->pcpu_stats);
+ kfree(mlxsw_core);
+ mlxsw_core_driver_put(device_kind);
+}
+EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
+
+static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
+{
+ return container_of(driver_priv, struct mlxsw_core, driver_priv);
+}
+
+bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
+
+ return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
+ tx_info);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
+
+int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
+
+ return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
+ tx_info);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_transmit);
+
+static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
+ const struct mlxsw_rx_listener *rxl_b)
+{
+ return (rxl_a->func == rxl_b->func &&
+ rxl_a->local_port == rxl_b->local_port &&
+ rxl_a->trap_id == rxl_b->trap_id);
+}
+
+static struct mlxsw_rx_listener_item *
+__find_rx_listener_item(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+
+ list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
+ if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
+ rxl_item->priv == priv)
+ return rxl_item;
+ }
+ return NULL;
+}
+
+int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+
+ rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+ if (rxl_item)
+ return -EEXIST;
+ rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
+ if (!rxl_item)
+ return -ENOMEM;
+ rxl_item->rxl = *rxl;
+ rxl_item->priv = priv;
+
+ list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
+
+void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+
+ rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+ if (!rxl_item)
+ return;
+ list_del_rcu(&rxl_item->list);
+ synchronize_rcu();
+ kfree(rxl_item);
+}
+EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
+
+static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ struct mlxsw_event_listener_item *event_listener_item = priv;
+ struct mlxsw_reg_info reg;
+ char *payload;
+ char *op_tlv = mlxsw_emad_op_tlv(skb);
+ char *reg_tlv = mlxsw_emad_reg_tlv(skb);
+
+ reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
+ reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
+ payload = mlxsw_emad_reg_payload(op_tlv);
+ event_listener_item->el.func(&reg, payload, event_listener_item->priv);
+ dev_kfree_skb(skb);
+}
+
+static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
+ const struct mlxsw_event_listener *el_b)
+{
+ return (el_a->func == el_b->func &&
+ el_a->trap_id == el_b->trap_id);
+}
+
+static struct mlxsw_event_listener_item *
+__find_event_listener_item(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv)
+{
+ struct mlxsw_event_listener_item *el_item;
+
+ list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
+ if (__is_event_listener_equal(&el_item->el, el) &&
+ el_item->priv == priv)
+ return el_item;
+ }
+ return NULL;
+}
+
+int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv)
+{
+ int err;
+ struct mlxsw_event_listener_item *el_item;
+ const struct mlxsw_rx_listener rxl = {
+ .func = mlxsw_core_event_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = el->trap_id,
+ };
+
+ el_item = __find_event_listener_item(mlxsw_core, el, priv);
+ if (el_item)
+ return -EEXIST;
+ el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
+ if (!el_item)
+ return -ENOMEM;
+ el_item->el = *el;
+ el_item->priv = priv;
+
+ err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
+ if (err)
+ goto err_rx_listener_register;
+
+ /* No reason to save item if we did not manage to register an RX
+ * listener for it.
+ */
+ list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
+
+ return 0;
+
+err_rx_listener_register:
+ kfree(el_item);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_core_event_listener_register);
+
+void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv)
+{
+ struct mlxsw_event_listener_item *el_item;
+ const struct mlxsw_rx_listener rxl = {
+ .func = mlxsw_core_event_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = el->trap_id,
+ };
+
+ el_item = __find_event_listener_item(mlxsw_core, el, priv);
+ if (!el_item)
+ return;
+ mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
+ list_del(&el_item->list);
+ kfree(el_item);
+}
+EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
+
+static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type)
+{
+ int err;
+ char *op_tlv;
+ struct sk_buff *skb;
+ struct mlxsw_tx_info tx_info = {
+ .local_port = MLXSW_PORT_CPU_PORT,
+ .is_emad = true,
+ };
+
+ skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
+ if (!skb)
+ return -ENOMEM;
+
+ mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core);
+ mlxsw_core->driver->txhdr_construct(skb, &tx_info);
+
+ dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n",
+ mlxsw_core->emad.tid);
+ mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len);
+
+ err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info);
+ if (!err) {
+ op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb);
+ memcpy(payload, mlxsw_emad_reg_payload(op_tlv),
+ reg->len);
+
+ dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n",
+ mlxsw_core->emad.tid - 1);
+ mlxsw_core_buf_dump_dbg(mlxsw_core,
+ mlxsw_core->emad.resp_skb->data,
+ mlxsw_core->emad.resp_skb->len);
+
+ dev_kfree_skb(mlxsw_core->emad.resp_skb);
+ }
+
+ return err;
+}
+
+static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type)
+{
+ int err, n_retry;
+ char *in_mbox, *out_mbox, *tmp;
+
+ in_mbox = mlxsw_cmd_mbox_alloc();
+ if (!in_mbox)
+ return -ENOMEM;
+
+ out_mbox = mlxsw_cmd_mbox_alloc();
+ if (!out_mbox) {
+ err = -ENOMEM;
+ goto free_in_mbox;
+ }
+
+ mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core);
+ tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
+ mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
+
+ n_retry = 0;
+retry:
+ err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
+ if (!err) {
+ err = mlxsw_emad_process_status(mlxsw_core, out_mbox);
+ if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
+ goto retry;
+ }
+
+ if (!err)
+ memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
+ reg->len);
+
+ mlxsw_core->emad.tid++;
+ mlxsw_cmd_mbox_free(out_mbox);
+free_in_mbox:
+ mlxsw_cmd_mbox_free(in_mbox);
+ return err;
+}
+
+static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type)
+{
+ u64 cur_tid;
+ int err;
+
+ if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
+ dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
+ reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+ return -EINTR;
+ }
+
+ cur_tid = mlxsw_core->emad.tid;
+ dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
+ cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+
+ /* During initialization EMAD interface is not available to us,
+ * so we default to command interface. We switch to EMAD interface
+ * after setting the appropriate traps.
+ */
+ if (!mlxsw_core->emad.use_emad)
+ err = mlxsw_core_reg_access_cmd(mlxsw_core, reg,
+ payload, type);
+ else
+ err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
+ payload, type);
+
+ if (err)
+ dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
+ cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+
+ mutex_unlock(&mlxsw_core->emad.lock);
+ return err;
+}
+
+int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload)
+{
+ return mlxsw_core_reg_access(mlxsw_core, reg, payload,
+ MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
+}
+EXPORT_SYMBOL(mlxsw_reg_query);
+
+int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload)
+{
+ return mlxsw_core_reg_access(mlxsw_core, reg, payload,
+ MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
+}
+EXPORT_SYMBOL(mlxsw_reg_write);
+
+void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+ struct mlxsw_rx_info *rx_info)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+ const struct mlxsw_rx_listener *rxl;
+ struct mlxsw_core_pcpu_stats *pcpu_stats;
+ u8 local_port = rx_info->sys_port;
+ bool found = false;
+
+ dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n",
+ __func__, rx_info->sys_port, rx_info->trap_id);
+
+ if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
+ (local_port >= MLXSW_PORT_MAX_PORTS))
+ goto drop;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
+ rxl = &rxl_item->rxl;
+ if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
+ rxl->local_port == local_port) &&
+ rxl->trap_id == rx_info->trap_id) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (!found)
+ goto drop;
+
+ pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->port_rx_packets[local_port]++;
+ pcpu_stats->port_rx_bytes[local_port] += skb->len;
+ pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
+ pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ rxl->func(skb, local_port, rxl_item->priv);
+ return;
+
+drop:
+ if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
+ this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
+ else
+ this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
+ if (local_port >= MLXSW_PORT_MAX_PORTS)
+ this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
+ else
+ this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
+ dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_receive);
+
+int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size)
+{
+ u8 status;
+ int err;
+
+ BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
+ if (!mlxsw_core->bus->cmd_exec)
+ return -EOPNOTSUPP;
+
+ dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
+ opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
+ if (in_mbox) {
+ dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
+ mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
+ }
+
+ err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
+ opcode_mod, in_mod, out_mbox_direct,
+ in_mbox, in_mbox_size,
+ out_mbox, out_mbox_size, &status);
+
+ if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
+ dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
+ opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
+ in_mod, status, mlxsw_cmd_status_str(status));
+ } else if (err == -ETIMEDOUT) {
+ dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
+ opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
+ in_mod);
+ }
+
+ if (!err && out_mbox) {
+ dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
+ mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
+ }
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_cmd_exec);
+
+static int __init mlxsw_core_module_init(void)
+{
+ mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
+ if (!mlxsw_core_dbg_root)
+ return -ENOMEM;
+ return 0;
+}
+
+static void __exit mlxsw_core_module_exit(void)
+{
+ debugfs_remove_recursive(mlxsw_core_dbg_root);
+}
+
+module_init(mlxsw_core_module_init);
+module_exit(mlxsw_core_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch device core driver");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
new file mode 100644
index 000000000000..165808471188
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -0,0 +1,207 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CORE_H
+#define _MLXSW_CORE_H
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#include "trap.h"
+#include "reg.h"
+
+#include "cmd.h"
+
+#define MLXSW_MODULE_ALIAS_PREFIX "mlxsw-driver-"
+#define MODULE_MLXSW_DRIVER_ALIAS(kind) \
+ MODULE_ALIAS(MLXSW_MODULE_ALIAS_PREFIX kind)
+
+#define MLXSW_DEVICE_KIND_SWITCHX2 "switchx2"
+
+struct mlxsw_core;
+struct mlxsw_driver;
+struct mlxsw_bus;
+struct mlxsw_bus_info;
+
+int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
+void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+ const struct mlxsw_bus *mlxsw_bus,
+ void *bus_priv);
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core);
+
+struct mlxsw_tx_info {
+ u8 local_port;
+ bool is_emad;
+};
+
+bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+ const struct mlxsw_tx_info *tx_info);
+
+int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+
+struct mlxsw_rx_listener {
+ void (*func)(struct sk_buff *skb, u8 local_port, void *priv);
+ u8 local_port;
+ u16 trap_id;
+};
+
+struct mlxsw_event_listener {
+ void (*func)(const struct mlxsw_reg_info *reg,
+ char *payload, void *priv);
+ enum mlxsw_event_trap_id trap_id;
+};
+
+int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv);
+void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv);
+
+int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv);
+void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv);
+
+int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload);
+int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload);
+
+struct mlxsw_rx_info {
+ u16 sys_port;
+ int trap_id;
+};
+
+void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+ struct mlxsw_rx_info *rx_info);
+
+#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
+
+struct mlxsw_swid_config {
+ u8 used_type:1,
+ used_properties:1;
+ u8 type;
+ u8 properties;
+};
+
+struct mlxsw_config_profile {
+ u16 used_max_vepa_channels:1,
+ used_max_lag:1,
+ used_max_port_per_lag:1,
+ used_max_mid:1,
+ used_max_pgt:1,
+ used_max_system_port:1,
+ used_max_vlan_groups:1,
+ used_max_regions:1,
+ used_flood_tables:1,
+ used_flood_mode:1,
+ used_max_ib_mc:1,
+ used_max_pkey:1,
+ used_ar_sec:1,
+ used_adaptive_routing_group_cap:1;
+ u8 max_vepa_channels;
+ u16 max_lag;
+ u16 max_port_per_lag;
+ u16 max_mid;
+ u16 max_pgt;
+ u16 max_system_port;
+ u16 max_vlan_groups;
+ u16 max_regions;
+ u8 max_flood_tables;
+ u8 max_vid_flood_tables;
+ u8 flood_mode;
+ u16 max_ib_mc;
+ u16 max_pkey;
+ u8 ar_sec;
+ u16 adaptive_routing_group_cap;
+ u8 arn;
+ struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
+};
+
+struct mlxsw_driver {
+ struct list_head list;
+ const char *kind;
+ struct module *owner;
+ size_t priv_size;
+ int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info);
+ void (*fini)(void *driver_priv);
+ void (*txhdr_construct)(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+ u8 txhdr_len;
+ const struct mlxsw_config_profile *profile;
+};
+
+struct mlxsw_bus {
+ const char *kind;
+ int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile);
+ void (*fini)(void *bus_priv);
+ bool (*skb_transmit_busy)(void *bus_priv,
+ const struct mlxsw_tx_info *tx_info);
+ int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+ int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size,
+ u8 *p_status);
+};
+
+struct mlxsw_bus_info {
+ const char *device_kind;
+ const char *device_name;
+ struct device *dev;
+ struct {
+ u16 major;
+ u16 minor;
+ u16 subminor;
+ } fw_rev;
+ u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN];
+ u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN];
+};
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h
new file mode 100644
index 000000000000..97b6bb5d9185
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/emad.h
@@ -0,0 +1,127 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/emad.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_EMAD_H
+#define _MLXSW_EMAD_H
+
+#define MLXSW_EMAD_MAX_FRAME_LEN 1518 /* Length in u8 */
+#define MLXSW_EMAD_MAX_RETRY 5
+
+/* EMAD Ethernet header */
+#define MLXSW_EMAD_ETH_HDR_LEN 0x10 /* Length in u8 */
+#define MLXSW_EMAD_EH_DMAC "\x01\x02\xc9\x00\x00\x01"
+#define MLXSW_EMAD_EH_SMAC "\x00\x02\xc9\x01\x02\x03"
+#define MLXSW_EMAD_EH_ETHERTYPE 0x8932
+#define MLXSW_EMAD_EH_MLX_PROTO 0
+#define MLXSW_EMAD_EH_PROTO_VERSION 0
+
+/* EMAD TLV Types */
+enum {
+ MLXSW_EMAD_TLV_TYPE_END,
+ MLXSW_EMAD_TLV_TYPE_OP,
+ MLXSW_EMAD_TLV_TYPE_DR,
+ MLXSW_EMAD_TLV_TYPE_REG,
+ MLXSW_EMAD_TLV_TYPE_USERDATA,
+ MLXSW_EMAD_TLV_TYPE_OOBETH,
+};
+
+/* OP TLV */
+#define MLXSW_EMAD_OP_TLV_LEN 4 /* Length in u32 */
+
+enum {
+ MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS = 1,
+ MLXSW_EMAD_OP_TLV_CLASS_IPC = 2,
+};
+
+enum mlxsw_emad_op_tlv_status {
+ MLXSW_EMAD_OP_TLV_STATUS_SUCCESS,
+ MLXSW_EMAD_OP_TLV_STATUS_BUSY,
+ MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED,
+ MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV,
+ MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED,
+ MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED,
+ MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED,
+ MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER,
+ MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE,
+ MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK,
+ MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR = 0x70,
+};
+
+static inline char *mlxsw_emad_op_tlv_status_str(u8 status)
+{
+ switch (status) {
+ case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
+ return "operation performed";
+ case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
+ return "device is busy";
+ case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
+ return "version not supported";
+ case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
+ return "unknown TLV";
+ case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
+ return "register not supported";
+ case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
+ return "class not supported";
+ case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
+ return "method not supported";
+ case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
+ return "bad parameter";
+ case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
+ return "resource not available";
+ case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
+ return "acknowledged. retransmit";
+ case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
+ return "internal error";
+ default:
+ return "*UNKNOWN*";
+ }
+}
+
+enum {
+ MLXSW_EMAD_OP_TLV_REQUEST,
+ MLXSW_EMAD_OP_TLV_RESPONSE
+};
+
+enum {
+ MLXSW_EMAD_OP_TLV_METHOD_QUERY = 1,
+ MLXSW_EMAD_OP_TLV_METHOD_WRITE = 2,
+ MLXSW_EMAD_OP_TLV_METHOD_SEND = 3,
+ MLXSW_EMAD_OP_TLV_METHOD_EVENT = 5,
+};
+
+/* END TLV */
+#define MLXSW_EMAD_END_TLV_LEN 1 /* Length in u32 */
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
new file mode 100644
index 000000000000..ffd55d030ce2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -0,0 +1,405 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/item.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_ITEM_H
+#define _MLXSW_ITEM_H
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+
+struct mlxsw_item {
+ unsigned short offset; /* bytes in container */
+ unsigned short step; /* step in bytes for indexed items */
+ unsigned short in_step_offset; /* offset within one step */
+ unsigned char shift; /* shift in bits */
+ unsigned char element_size; /* size of element in bit array */
+ bool no_real_shift;
+ union {
+ unsigned char bits;
+ unsigned short bytes;
+ } size;
+ const char *name;
+};
+
+static inline unsigned int
+__mlxsw_item_offset(struct mlxsw_item *item, unsigned short index,
+ size_t typesize)
+{
+ BUG_ON(index && !item->step);
+ if (item->offset % typesize != 0 ||
+ item->step % typesize != 0 ||
+ item->in_step_offset % typesize != 0) {
+ pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%zx)\n",
+ item->name, item->offset, item->step,
+ item->in_step_offset, typesize);
+ BUG();
+ }
+
+ return ((item->offset + item->step * index + item->in_step_offset) /
+ typesize);
+}
+
+static inline u16 __mlxsw_item_get16(char *buf, struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16));
+ __be16 *b = (__be16 *) buf;
+ u16 tmp;
+
+ tmp = be16_to_cpu(b[offset]);
+ tmp >>= item->shift;
+ tmp &= GENMASK(item->size.bits - 1, 0);
+ if (item->no_real_shift)
+ tmp <<= item->shift;
+ return tmp;
+}
+
+static inline void __mlxsw_item_set16(char *buf, struct mlxsw_item *item,
+ unsigned short index, u16 val)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index,
+ sizeof(u16));
+ __be16 *b = (__be16 *) buf;
+ u16 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+ u16 tmp;
+
+ if (!item->no_real_shift)
+ val <<= item->shift;
+ val &= mask;
+ tmp = be16_to_cpu(b[offset]);
+ tmp &= ~mask;
+ tmp |= val;
+ b[offset] = cpu_to_be16(tmp);
+}
+
+static inline u32 __mlxsw_item_get32(char *buf, struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32));
+ __be32 *b = (__be32 *) buf;
+ u32 tmp;
+
+ tmp = be32_to_cpu(b[offset]);
+ tmp >>= item->shift;
+ tmp &= GENMASK(item->size.bits - 1, 0);
+ if (item->no_real_shift)
+ tmp <<= item->shift;
+ return tmp;
+}
+
+static inline void __mlxsw_item_set32(char *buf, struct mlxsw_item *item,
+ unsigned short index, u32 val)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index,
+ sizeof(u32));
+ __be32 *b = (__be32 *) buf;
+ u32 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+ u32 tmp;
+
+ if (!item->no_real_shift)
+ val <<= item->shift;
+ val &= mask;
+ tmp = be32_to_cpu(b[offset]);
+ tmp &= ~mask;
+ tmp |= val;
+ b[offset] = cpu_to_be32(tmp);
+}
+
+static inline u64 __mlxsw_item_get64(char *buf, struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
+ __be64 *b = (__be64 *) buf;
+ u64 tmp;
+
+ tmp = be64_to_cpu(b[offset]);
+ tmp >>= item->shift;
+ tmp &= GENMASK_ULL(item->size.bits - 1, 0);
+ if (item->no_real_shift)
+ tmp <<= item->shift;
+ return tmp;
+}
+
+static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item,
+ unsigned short index, u64 val)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
+ __be64 *b = (__be64 *) buf;
+ u64 mask = GENMASK_ULL(item->size.bits - 1, 0) << item->shift;
+ u64 tmp;
+
+ if (!item->no_real_shift)
+ val <<= item->shift;
+ val &= mask;
+ tmp = be64_to_cpu(b[offset]);
+ tmp &= ~mask;
+ tmp |= val;
+ b[offset] = cpu_to_be64(tmp);
+}
+
+static inline void __mlxsw_item_memcpy_from(char *buf, char *dst,
+ struct mlxsw_item *item)
+{
+ memcpy(dst, &buf[item->offset], item->size.bytes);
+}
+
+static inline void __mlxsw_item_memcpy_to(char *buf, char *src,
+ struct mlxsw_item *item)
+{
+ memcpy(&buf[item->offset], src, item->size.bytes);
+}
+
+static inline u16
+__mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
+{
+ u16 max_index, be_index;
+ u16 offset; /* byte offset inside the array */
+
+ BUG_ON(index && !item->element_size);
+ if (item->offset % sizeof(u32) != 0 ||
+ BITS_PER_BYTE % item->element_size != 0) {
+ pr_err("mlxsw: item bug (name=%s,offset=%x,element_size=%x)\n",
+ item->name, item->offset, item->element_size);
+ BUG();
+ }
+
+ max_index = (item->size.bytes << 3) / item->element_size - 1;
+ be_index = max_index - index;
+ offset = be_index * item->element_size >> 3;
+ *shift = index % (BITS_PER_BYTE / item->element_size) << 1;
+
+ return item->offset + offset;
+}
+
+static inline u8 __mlxsw_item_bit_array_get(char *buf, struct mlxsw_item *item,
+ u16 index)
+{
+ u8 shift, tmp;
+ u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
+
+ tmp = buf[offset];
+ tmp >>= shift;
+ tmp &= GENMASK(item->element_size - 1, 0);
+ return tmp;
+}
+
+static inline void __mlxsw_item_bit_array_set(char *buf, struct mlxsw_item *item,
+ u16 index, u8 val)
+{
+ u8 shift, tmp;
+ u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
+ u8 mask = GENMASK(item->element_size - 1, 0) << shift;
+
+ val <<= shift;
+ val &= mask;
+ tmp = buf[offset];
+ tmp &= ~mask;
+ tmp |= val;
+ buf[offset] = tmp;
+}
+
+#define __ITEM_NAME(_type, _cname, _iname) \
+ mlxsw_##_type##_##_cname##_##_iname##_item
+
+/* _type: cmd_mbox, reg, etc.
+ * _cname: containter name (e.g. command name, register name)
+ * _iname: item name within the container
+ */
+
+#define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \
+{ \
+ return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\
+{ \
+ __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
+}
+
+#define MLXSW_ITEM16_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
+ _step, _instepoffset, _norealshift) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .shift = _shift, \
+ .no_real_shift = _norealshift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u16 \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \
+{ \
+ return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
+ u16 val) \
+{ \
+ __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+}
+
+#define MLXSW_ITEM32(_type, _cname, _iname, _offset, _shift, _sizebits) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \
+{ \
+ return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\
+{ \
+ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
+}
+
+#define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
+ _step, _instepoffset, _norealshift) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .shift = _shift, \
+ .no_real_shift = _norealshift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u32 \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \
+{ \
+ return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
+ u32 val) \
+{ \
+ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+}
+
+#define MLXSW_ITEM64(_type, _cname, _iname, _offset, _shift, _sizebits) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \
+{ \
+ return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\
+{ \
+ __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
+}
+
+#define MLXSW_ITEM64_INDEXED(_type, _cname, _iname, _offset, _shift, \
+ _sizebits, _step, _instepoffset, _norealshift) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .shift = _shift, \
+ .no_real_shift = _norealshift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u64 \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \
+{ \
+ return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
+ u64 val) \
+{ \
+ __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+}
+
+#define MLXSW_ITEM_BUF(_type, _cname, _iname, _offset, _sizebytes) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .size = {.bytes = _sizebytes,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst) \
+{ \
+ __mlxsw_item_memcpy_from(buf, dst, &__ITEM_NAME(_type, _cname, _iname));\
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, char *src) \
+{ \
+ __mlxsw_item_memcpy_to(buf, src, &__ITEM_NAME(_type, _cname, _iname)); \
+}
+
+#define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \
+ _element_size) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .element_size = _element_size, \
+ .size = {.bytes = _sizebytes,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u8 \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, u16 index) \
+{ \
+ return __mlxsw_item_bit_array_get(buf, \
+ &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val) \
+{ \
+ return __mlxsw_item_bit_array_set(buf, \
+ &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+} \
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
new file mode 100644
index 000000000000..462cea31ecbb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -0,0 +1,1826 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/pci.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/log2.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+
+#include "pci.h"
+#include "core.h"
+#include "cmd.h"
+#include "port.h"
+
+static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
+
+static const struct pci_device_id mlxsw_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
+ {0, }
+};
+
+static struct dentry *mlxsw_pci_dbg_root;
+
+static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id)
+{
+ switch (id->device) {
+ case PCI_DEVICE_ID_MELLANOX_SWITCHX2:
+ return MLXSW_DEVICE_KIND_SWITCHX2;
+ default:
+ BUG();
+ }
+}
+
+#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
+ iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
+#define mlxsw_pci_read32(mlxsw_pci, reg) \
+ ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
+
+enum mlxsw_pci_queue_type {
+ MLXSW_PCI_QUEUE_TYPE_SDQ,
+ MLXSW_PCI_QUEUE_TYPE_RDQ,
+ MLXSW_PCI_QUEUE_TYPE_CQ,
+ MLXSW_PCI_QUEUE_TYPE_EQ,
+};
+
+static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type)
+{
+ switch (q_type) {
+ case MLXSW_PCI_QUEUE_TYPE_SDQ:
+ return "sdq";
+ case MLXSW_PCI_QUEUE_TYPE_RDQ:
+ return "rdq";
+ case MLXSW_PCI_QUEUE_TYPE_CQ:
+ return "cq";
+ case MLXSW_PCI_QUEUE_TYPE_EQ:
+ return "eq";
+ }
+ BUG();
+}
+
+#define MLXSW_PCI_QUEUE_TYPE_COUNT 4
+
+static const u16 mlxsw_pci_doorbell_type_offset[] = {
+ MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
+ MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
+ MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
+ MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
+};
+
+static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
+ 0, /* unused */
+ 0, /* unused */
+ MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
+ MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
+};
+
+struct mlxsw_pci_mem_item {
+ char *buf;
+ dma_addr_t mapaddr;
+ size_t size;
+};
+
+struct mlxsw_pci_queue_elem_info {
+ char *elem; /* pointer to actual dma mapped element mem chunk */
+ union {
+ struct {
+ struct sk_buff *skb;
+ } sdq;
+ struct {
+ struct sk_buff *skb;
+ } rdq;
+ } u;
+};
+
+struct mlxsw_pci_queue {
+ spinlock_t lock; /* for queue accesses */
+ struct mlxsw_pci_mem_item mem_item;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ u16 producer_counter;
+ u16 consumer_counter;
+ u16 count; /* number of elements in queue */
+ u8 num; /* queue number */
+ u8 elem_size; /* size of one element */
+ enum mlxsw_pci_queue_type type;
+ struct tasklet_struct tasklet; /* queue processing tasklet */
+ struct mlxsw_pci *pci;
+ union {
+ struct {
+ u32 comp_sdq_count;
+ u32 comp_rdq_count;
+ } cq;
+ struct {
+ u32 ev_cmd_count;
+ u32 ev_comp_count;
+ u32 ev_other_count;
+ } eq;
+ } u;
+};
+
+struct mlxsw_pci_queue_type_group {
+ struct mlxsw_pci_queue *q;
+ u8 count; /* number of queues in group */
+};
+
+struct mlxsw_pci {
+ struct pci_dev *pdev;
+ u8 __iomem *hw_addr;
+ struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
+ u32 doorbell_offset;
+ struct msix_entry msix_entry;
+ struct mlxsw_core *core;
+ struct {
+ u16 num_pages;
+ struct mlxsw_pci_mem_item *items;
+ } fw_area;
+ struct {
+ struct mlxsw_pci_mem_item out_mbox;
+ struct mlxsw_pci_mem_item in_mbox;
+ struct mutex lock; /* Lock access to command registers */
+ bool nopoll;
+ wait_queue_head_t wait;
+ bool wait_done;
+ struct {
+ u8 status;
+ u64 out_param;
+ } comp;
+ } cmd;
+ struct mlxsw_bus_info bus_info;
+ struct dentry *dbg_dir;
+};
+
+static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
+{
+ tasklet_schedule(&q->tasklet);
+}
+
+static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
+ size_t elem_size, int elem_index)
+{
+ return q->mem_item.buf + (elem_size * elem_index);
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
+{
+ return &q->elem_info[elem_index];
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
+{
+ int index = q->producer_counter & (q->count - 1);
+
+ if ((q->producer_counter - q->consumer_counter) == q->count)
+ return NULL;
+ return mlxsw_pci_queue_elem_info_get(q, index);
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
+{
+ int index = q->consumer_counter & (q->count - 1);
+
+ return mlxsw_pci_queue_elem_info_get(q, index);
+}
+
+static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
+{
+ return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
+}
+
+static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
+{
+ return owner_bit != !!(q->consumer_counter & q->count);
+}
+
+static char *mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
+ u32 (*get_elem_owner_func)(char *))
+{
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *elem;
+ bool owner_bit;
+
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ elem = elem_info->elem;
+ owner_bit = get_elem_owner_func(elem);
+ if (mlxsw_pci_elem_hw_owned(q, owner_bit))
+ return NULL;
+ q->consumer_counter++;
+ rmb(); /* make sure we read owned bit before the rest of elem */
+ return elem;
+}
+
+static struct mlxsw_pci_queue_type_group *
+mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
+ enum mlxsw_pci_queue_type q_type)
+{
+ return &mlxsw_pci->queues[q_type];
+}
+
+static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
+ enum mlxsw_pci_queue_type q_type)
+{
+ struct mlxsw_pci_queue_type_group *queue_group;
+
+ queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
+ return queue_group->count;
+}
+
+static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
+{
+ return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
+}
+
+static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci)
+{
+ return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ);
+}
+
+static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
+{
+ return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
+}
+
+static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci)
+{
+ return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ);
+}
+
+static struct mlxsw_pci_queue *
+__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
+ enum mlxsw_pci_queue_type q_type, u8 q_num)
+{
+ return &mlxsw_pci->queues[q_type].q[q_num];
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
+ u8 q_num)
+{
+ return __mlxsw_pci_queue_get(mlxsw_pci,
+ MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
+ u8 q_num)
+{
+ return __mlxsw_pci_queue_get(mlxsw_pci,
+ MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
+ u8 q_num)
+{
+ return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
+ u8 q_num)
+{
+ return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
+}
+
+static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q,
+ u16 val)
+{
+ mlxsw_pci_write32(mlxsw_pci,
+ DOORBELL(mlxsw_pci->doorbell_offset,
+ mlxsw_pci_doorbell_type_offset[q->type],
+ q->num), val);
+}
+
+static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q,
+ u16 val)
+{
+ mlxsw_pci_write32(mlxsw_pci,
+ DOORBELL(mlxsw_pci->doorbell_offset,
+ mlxsw_pci_doorbell_arm_type_offset[q->type],
+ q->num), val);
+}
+
+static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ wmb(); /* ensure all writes are done before we ring a bell */
+ __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
+}
+
+static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ wmb(); /* ensure all writes are done before we ring a bell */
+ __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
+ q->consumer_counter + q->count);
+}
+
+static void
+mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ wmb(); /* ensure all writes are done before we ring a bell */
+ __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
+}
+
+static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
+ int page_index)
+{
+ return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
+}
+
+static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ int i;
+ int err;
+
+ q->producer_counter = 0;
+ q->consumer_counter = 0;
+
+ /* Set CQ of same number of this SDQ. */
+ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
+ mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 7);
+ mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
+ }
+
+ err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+ return 0;
+}
+
+static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+ struct mlxsw_pci_queue *q;
+ int i;
+ static const char hdr[] =
+ "NUM PROD_COUNT CONS_COUNT COUNT\n";
+
+ seq_printf(file, hdr);
+ for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) {
+ q = mlxsw_pci_sdq_get(mlxsw_pci, i);
+ spin_lock_bh(&q->lock);
+ seq_printf(file, "%3d %10d %10d %5d\n",
+ i, q->producer_counter, q->consumer_counter,
+ q->count);
+ spin_unlock_bh(&q->lock);
+ }
+ return 0;
+}
+
+static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
+ int index, char *frag_data, size_t frag_len,
+ int direction)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ dma_addr_t mapaddr;
+
+ mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
+ if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
+ if (net_ratelimit())
+ dev_err(&pdev->dev, "failed to dma map tx frag\n");
+ return -EIO;
+ }
+ mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
+ mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
+ return 0;
+}
+
+static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
+ int index, int direction)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
+ dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
+
+ if (!frag_len)
+ return;
+ pci_unmap_single(pdev, mapaddr, frag_len, direction);
+}
+
+static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue_elem_info *elem_info)
+{
+ size_t buf_len = MLXSW_PORT_MAX_MTU;
+ char *wqe = elem_info->elem;
+ struct sk_buff *skb;
+ int err;
+
+ elem_info->u.rdq.skb = NULL;
+ skb = netdev_alloc_skb_ip_align(NULL, buf_len);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Assume that wqe was previously zeroed. */
+
+ err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+ buf_len, DMA_FROM_DEVICE);
+ if (err)
+ goto err_frag_map;
+
+ elem_info->u.rdq.skb = skb;
+ return 0;
+
+err_frag_map:
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue_elem_info *elem_info)
+{
+ struct sk_buff *skb;
+ char *wqe;
+
+ skb = elem_info->u.rdq.skb;
+ wqe = elem_info->elem;
+
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+}
+
+static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ int i;
+ int err;
+
+ q->producer_counter = 0;
+ q->consumer_counter = 0;
+
+ /* Set CQ of same number of this RDQ with base
+ * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs.
+ */
+ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT);
+ mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
+ }
+
+ err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+
+ for (i = 0; i < q->count; i++) {
+ elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
+ BUG_ON(!elem_info);
+ err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+ if (err)
+ goto rollback;
+ /* Everything is set up, ring doorbell to pass elem to HW */
+ q->producer_counter++;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+ }
+
+ return 0;
+
+rollback:
+ for (i--; i >= 0; i--) {
+ elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+ mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+ }
+ mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+
+ return err;
+}
+
+static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ int i;
+
+ mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+ for (i = 0; i < q->count; i++) {
+ elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+ mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+ }
+}
+
+static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+ struct mlxsw_pci_queue *q;
+ int i;
+ static const char hdr[] =
+ "NUM PROD_COUNT CONS_COUNT COUNT\n";
+
+ seq_printf(file, hdr);
+ for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) {
+ q = mlxsw_pci_rdq_get(mlxsw_pci, i);
+ spin_lock_bh(&q->lock);
+ seq_printf(file, "%3d %10d %10d %5d\n",
+ i, q->producer_counter, q->consumer_counter,
+ q->count);
+ spin_unlock_bh(&q->lock);
+ }
+ return 0;
+}
+
+static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ int i;
+ int err;
+
+ q->consumer_counter = 0;
+
+ for (i = 0; i < q->count; i++) {
+ char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+ mlxsw_pci_cqe_owner_set(elem, 1);
+ }
+
+ mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
+ mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
+ mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0);
+ mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
+ mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
+ }
+ err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ return 0;
+}
+
+static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+
+ struct mlxsw_pci_queue *q;
+ int i;
+ static const char hdr[] =
+ "NUM CONS_INDEX SDQ_COUNT RDQ_COUNT COUNT\n";
+
+ seq_printf(file, hdr);
+ for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) {
+ q = mlxsw_pci_cq_get(mlxsw_pci, i);
+ spin_lock_bh(&q->lock);
+ seq_printf(file, "%3d %10d %10d %10d %5d\n",
+ i, q->consumer_counter, q->u.cq.comp_sdq_count,
+ q->u.cq.comp_rdq_count, q->count);
+ spin_unlock_bh(&q->lock);
+ }
+ return 0;
+}
+
+static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q,
+ u16 consumer_counter_limit,
+ char *cqe)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *wqe;
+ struct sk_buff *skb;
+ int i;
+
+ spin_lock(&q->lock);
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ skb = elem_info->u.sdq.skb;
+ wqe = elem_info->elem;
+ for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ elem_info->u.sdq.skb = NULL;
+
+ if (q->consumer_counter++ != consumer_counter_limit)
+ dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
+ spin_unlock(&q->lock);
+}
+
+static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q,
+ u16 consumer_counter_limit,
+ char *cqe)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *wqe;
+ struct sk_buff *skb;
+ struct mlxsw_rx_info rx_info;
+ u16 byte_count;
+ int err;
+
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ skb = elem_info->u.sdq.skb;
+ if (!skb)
+ return;
+ wqe = elem_info->elem;
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+
+ if (q->consumer_counter++ != consumer_counter_limit)
+ dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
+
+ /* We do not support lag now */
+ if (mlxsw_pci_cqe_lag_get(cqe))
+ goto drop;
+
+ rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
+ rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
+
+ byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
+ if (mlxsw_pci_cqe_crc_get(cqe))
+ byte_count -= ETH_FCS_LEN;
+ skb_put(skb, byte_count);
+ mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
+
+put_new_skb:
+ memset(wqe, 0, q->elem_size);
+ err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+ if (err && net_ratelimit())
+ dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n");
+ /* Everything is set up, ring doorbell to pass elem to HW */
+ q->producer_counter++;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+ return;
+
+drop:
+ dev_kfree_skb_any(skb);
+ goto put_new_skb;
+}
+
+static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
+{
+ return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get);
+}
+
+static void mlxsw_pci_cq_tasklet(unsigned long data)
+{
+ struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+ char *cqe;
+ int items = 0;
+ int credits = q->count >> 1;
+
+ while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
+ u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
+ u8 sendq = mlxsw_pci_cqe_sr_get(cqe);
+ u8 dqn = mlxsw_pci_cqe_dqn_get(cqe);
+
+ if (sendq) {
+ struct mlxsw_pci_queue *sdq;
+
+ sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
+ mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
+ wqe_counter, cqe);
+ q->u.cq.comp_sdq_count++;
+ } else {
+ struct mlxsw_pci_queue *rdq;
+
+ rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
+ mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
+ wqe_counter, cqe);
+ q->u.cq.comp_rdq_count++;
+ }
+ if (++items == credits)
+ break;
+ }
+ if (items) {
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ }
+}
+
+static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ int i;
+ int err;
+
+ q->consumer_counter = 0;
+
+ for (i = 0; i < q->count; i++) {
+ char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+ mlxsw_pci_eqe_owner_set(elem, 1);
+ }
+
+ mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
+ mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0);
+ mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
+ mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
+ }
+ err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ return 0;
+}
+
+static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+ struct mlxsw_pci_queue *q;
+ int i;
+ static const char hdr[] =
+ "NUM CONS_COUNT EV_CMD EV_COMP EV_OTHER COUNT\n";
+
+ seq_printf(file, hdr);
+ for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) {
+ q = mlxsw_pci_eq_get(mlxsw_pci, i);
+ spin_lock_bh(&q->lock);
+ seq_printf(file, "%3d %10d %10d %10d %10d %5d\n",
+ i, q->consumer_counter, q->u.eq.ev_cmd_count,
+ q->u.eq.ev_comp_count, q->u.eq.ev_other_count,
+ q->count);
+ spin_unlock_bh(&q->lock);
+ }
+ return 0;
+}
+
+static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
+{
+ mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
+ mlxsw_pci->cmd.comp.out_param =
+ ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
+ mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
+ mlxsw_pci->cmd.wait_done = true;
+ wake_up(&mlxsw_pci->cmd.wait);
+}
+
+static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
+{
+ return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get);
+}
+
+static void mlxsw_pci_eq_tasklet(unsigned long data)
+{
+ struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+ unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)];
+ char *eqe;
+ u8 cqn;
+ bool cq_handle = false;
+ int items = 0;
+ int credits = q->count >> 1;
+
+ memset(&active_cqns, 0, sizeof(active_cqns));
+
+ while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
+ u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
+
+ switch (event_type) {
+ case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
+ mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
+ q->u.eq.ev_cmd_count++;
+ break;
+ case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
+ cqn = mlxsw_pci_eqe_cqn_get(eqe);
+ set_bit(cqn, active_cqns);
+ cq_handle = true;
+ q->u.eq.ev_comp_count++;
+ break;
+ default:
+ q->u.eq.ev_other_count++;
+ }
+ if (++items == credits)
+ break;
+ }
+ if (items) {
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ }
+
+ if (!cq_handle)
+ return;
+ for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) {
+ q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
+ mlxsw_pci_queue_tasklet_schedule(q);
+ }
+}
+
+struct mlxsw_pci_queue_ops {
+ const char *name;
+ enum mlxsw_pci_queue_type type;
+ int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q);
+ void (*fini)(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q);
+ void (*tasklet)(unsigned long data);
+ int (*dbg_read)(struct seq_file *s, void *data);
+ u16 elem_count;
+ u8 elem_size;
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
+ .type = MLXSW_PCI_QUEUE_TYPE_SDQ,
+ .init = mlxsw_pci_sdq_init,
+ .fini = mlxsw_pci_sdq_fini,
+ .dbg_read = mlxsw_pci_sdq_dbg_read,
+ .elem_count = MLXSW_PCI_WQE_COUNT,
+ .elem_size = MLXSW_PCI_WQE_SIZE,
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
+ .type = MLXSW_PCI_QUEUE_TYPE_RDQ,
+ .init = mlxsw_pci_rdq_init,
+ .fini = mlxsw_pci_rdq_fini,
+ .dbg_read = mlxsw_pci_rdq_dbg_read,
+ .elem_count = MLXSW_PCI_WQE_COUNT,
+ .elem_size = MLXSW_PCI_WQE_SIZE
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
+ .type = MLXSW_PCI_QUEUE_TYPE_CQ,
+ .init = mlxsw_pci_cq_init,
+ .fini = mlxsw_pci_cq_fini,
+ .tasklet = mlxsw_pci_cq_tasklet,
+ .dbg_read = mlxsw_pci_cq_dbg_read,
+ .elem_count = MLXSW_PCI_CQE_COUNT,
+ .elem_size = MLXSW_PCI_CQE_SIZE
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
+ .type = MLXSW_PCI_QUEUE_TYPE_EQ,
+ .init = mlxsw_pci_eq_init,
+ .fini = mlxsw_pci_eq_fini,
+ .tasklet = mlxsw_pci_eq_tasklet,
+ .dbg_read = mlxsw_pci_eq_dbg_read,
+ .elem_count = MLXSW_PCI_EQE_COUNT,
+ .elem_size = MLXSW_PCI_EQE_SIZE
+};
+
+static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ const struct mlxsw_pci_queue_ops *q_ops,
+ struct mlxsw_pci_queue *q, u8 q_num)
+{
+ struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
+ int i;
+ int err;
+
+ spin_lock_init(&q->lock);
+ q->num = q_num;
+ q->count = q_ops->elem_count;
+ q->elem_size = q_ops->elem_size;
+ q->type = q_ops->type;
+ q->pci = mlxsw_pci;
+
+ if (q_ops->tasklet)
+ tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
+
+ mem_item->size = MLXSW_PCI_AQ_SIZE;
+ mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
+ mem_item->size,
+ &mem_item->mapaddr);
+ if (!mem_item->buf)
+ return -ENOMEM;
+ memset(mem_item->buf, 0, mem_item->size);
+
+ q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
+ if (!q->elem_info) {
+ err = -ENOMEM;
+ goto err_elem_info_alloc;
+ }
+
+ /* Initialize dma mapped elements info elem_info for
+ * future easy access.
+ */
+ for (i = 0; i < q->count; i++) {
+ struct mlxsw_pci_queue_elem_info *elem_info;
+
+ elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+ elem_info->elem =
+ __mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i);
+ }
+
+ mlxsw_cmd_mbox_zero(mbox);
+ err = q_ops->init(mlxsw_pci, mbox, q);
+ if (err)
+ goto err_q_ops_init;
+ return 0;
+
+err_q_ops_init:
+ kfree(q->elem_info);
+err_elem_info_alloc:
+ pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
+ return err;
+}
+
+static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_pci_queue_ops *q_ops,
+ struct mlxsw_pci_queue *q)
+{
+ struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
+
+ q_ops->fini(mlxsw_pci, q);
+ kfree(q->elem_info);
+ pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
+}
+
+static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ const struct mlxsw_pci_queue_ops *q_ops,
+ u8 num_qs)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ struct mlxsw_pci_queue_type_group *queue_group;
+ char tmp[16];
+ int i;
+ int err;
+
+ queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
+ queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
+ if (!queue_group->q)
+ return -ENOMEM;
+
+ for (i = 0; i < num_qs; i++) {
+ err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
+ &queue_group->q[i], i);
+ if (err)
+ goto err_queue_init;
+ }
+ queue_group->count = num_qs;
+
+ sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type));
+ debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir,
+ q_ops->dbg_read);
+
+ return 0;
+
+err_queue_init:
+ for (i--; i >= 0; i--)
+ mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
+ kfree(queue_group->q);
+ return err;
+}
+
+static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_pci_queue_ops *q_ops)
+{
+ struct mlxsw_pci_queue_type_group *queue_group;
+ int i;
+
+ queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
+ for (i = 0; i < queue_group->count; i++)
+ mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
+ kfree(queue_group->q);
+}
+
+static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ u8 num_sdqs;
+ u8 sdq_log2sz;
+ u8 num_rdqs;
+ u8 rdq_log2sz;
+ u8 num_cqs;
+ u8 cq_log2sz;
+ u8 num_eqs;
+ u8 eq_log2sz;
+ int err;
+
+ mlxsw_cmd_mbox_zero(mbox);
+ err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
+ if (err)
+ return err;
+
+ num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
+ sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
+ num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
+ rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
+ num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
+ cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
+ num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
+ eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
+
+ if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) ||
+ (num_rdqs != MLXSW_PCI_RDQS_COUNT) ||
+ (num_cqs != MLXSW_PCI_CQS_COUNT) ||
+ (num_eqs != MLXSW_PCI_EQS_COUNT)) {
+ dev_err(&pdev->dev, "Unsupported number of queues\n");
+ return -EINVAL;
+ }
+
+ if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
+ (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
+ (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) ||
+ (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
+ dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
+ return -EINVAL;
+ }
+
+ err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
+ num_eqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize event queues\n");
+ return err;
+ }
+
+ err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
+ num_cqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize completion queues\n");
+ goto err_cqs_init;
+ }
+
+ err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
+ num_sdqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
+ goto err_sdqs_init;
+ }
+
+ err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
+ num_rdqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
+ goto err_rdqs_init;
+ }
+
+ /* We have to poll in command interface until queues are initialized */
+ mlxsw_pci->cmd.nopoll = true;
+ return 0;
+
+err_rdqs_init:
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
+err_sdqs_init:
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
+err_cqs_init:
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
+ return err;
+}
+
+static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
+{
+ mlxsw_pci->cmd.nopoll = false;
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
+}
+
+static void
+mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
+ char *mbox, int index,
+ const struct mlxsw_swid_config *swid)
+{
+ u8 mask = 0;
+
+ if (swid->used_type) {
+ mlxsw_cmd_mbox_config_profile_swid_config_type_set(
+ mbox, index, swid->type);
+ mask |= 1;
+ }
+ if (swid->used_properties) {
+ mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
+ mbox, index, swid->properties);
+ mask |= 2;
+ }
+ mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
+}
+
+static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ const struct mlxsw_config_profile *profile)
+{
+ int i;
+
+ mlxsw_cmd_mbox_zero(mbox);
+
+ if (profile->used_max_vepa_channels) {
+ mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
+ mbox, profile->max_vepa_channels);
+ }
+ if (profile->used_max_lag) {
+ mlxsw_cmd_mbox_config_profile_set_max_lag_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_lag_set(
+ mbox, profile->max_lag);
+ }
+ if (profile->used_max_port_per_lag) {
+ mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
+ mbox, profile->max_port_per_lag);
+ }
+ if (profile->used_max_mid) {
+ mlxsw_cmd_mbox_config_profile_set_max_mid_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_mid_set(
+ mbox, profile->max_mid);
+ }
+ if (profile->used_max_pgt) {
+ mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_pgt_set(
+ mbox, profile->max_pgt);
+ }
+ if (profile->used_max_system_port) {
+ mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_system_port_set(
+ mbox, profile->max_system_port);
+ }
+ if (profile->used_max_vlan_groups) {
+ mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
+ mbox, profile->max_vlan_groups);
+ }
+ if (profile->used_max_regions) {
+ mlxsw_cmd_mbox_config_profile_set_max_regions_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_regions_set(
+ mbox, profile->max_regions);
+ }
+ if (profile->used_flood_tables) {
+ mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
+ mbox, profile->max_flood_tables);
+ mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
+ mbox, profile->max_vid_flood_tables);
+ }
+ if (profile->used_flood_mode) {
+ mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_flood_mode_set(
+ mbox, profile->flood_mode);
+ }
+ if (profile->used_max_ib_mc) {
+ mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
+ mbox, profile->max_ib_mc);
+ }
+ if (profile->used_max_pkey) {
+ mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_pkey_set(
+ mbox, profile->max_pkey);
+ }
+ if (profile->used_ar_sec) {
+ mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_ar_sec_set(
+ mbox, profile->ar_sec);
+ }
+ if (profile->used_adaptive_routing_group_cap) {
+ mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
+ mbox, profile->adaptive_routing_group_cap);
+ }
+
+ for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
+ mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
+ &profile->swid_config[i]);
+
+ return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
+}
+
+static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
+{
+ struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
+ int err;
+
+ mlxsw_cmd_mbox_zero(mbox);
+ err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
+ if (err)
+ return err;
+ mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
+ mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
+ return 0;
+}
+
+static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ u16 num_pages)
+{
+ struct mlxsw_pci_mem_item *mem_item;
+ int i;
+ int err;
+
+ mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
+ GFP_KERNEL);
+ if (!mlxsw_pci->fw_area.items)
+ return -ENOMEM;
+ mlxsw_pci->fw_area.num_pages = num_pages;
+
+ mlxsw_cmd_mbox_zero(mbox);
+ for (i = 0; i < num_pages; i++) {
+ mem_item = &mlxsw_pci->fw_area.items[i];
+
+ mem_item->size = MLXSW_PCI_PAGE_SIZE;
+ mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
+ mem_item->size,
+ &mem_item->mapaddr);
+ if (!mem_item->buf) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr);
+ mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */
+ }
+
+ err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages);
+ if (err)
+ goto err_cmd_map_fa;
+
+ return 0;
+
+err_cmd_map_fa:
+err_alloc:
+ for (i--; i >= 0; i--) {
+ mem_item = &mlxsw_pci->fw_area.items[i];
+
+ pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
+ }
+ kfree(mlxsw_pci->fw_area.items);
+ return err;
+}
+
+static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
+{
+ struct mlxsw_pci_mem_item *mem_item;
+ int i;
+
+ mlxsw_cmd_unmap_fa(mlxsw_pci->core);
+
+ for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) {
+ mem_item = &mlxsw_pci->fw_area.items[i];
+
+ pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
+ }
+ kfree(mlxsw_pci->fw_area.items);
+}
+
+static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_id;
+ struct mlxsw_pci_queue *q;
+ int i;
+
+ for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
+ q = mlxsw_pci_eq_get(mlxsw_pci, i);
+ mlxsw_pci_queue_tasklet_schedule(q);
+ }
+ return IRQ_HANDLED;
+}
+
+static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_mem_item *mbox)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ int err = 0;
+
+ mbox->size = MLXSW_CMD_MBOX_SIZE;
+ mbox->buf = pci_alloc_consistent(pdev, MLXSW_CMD_MBOX_SIZE,
+ &mbox->mapaddr);
+ if (!mbox->buf) {
+ dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
+ err = -ENOMEM;
+ }
+
+ return err;
+}
+
+static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_mem_item *mbox)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+
+ pci_free_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
+ mbox->mapaddr);
+}
+
+static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ char *mbox;
+ u16 num_pages;
+ int err;
+
+ mutex_init(&mlxsw_pci->cmd.lock);
+ init_waitqueue_head(&mlxsw_pci->cmd.wait);
+
+ mlxsw_pci->core = mlxsw_core;
+
+ mbox = mlxsw_cmd_mbox_alloc();
+ if (!mbox)
+ return -ENOMEM;
+
+ err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+ if (err)
+ goto mbox_put;
+
+ err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+ if (err)
+ goto err_out_mbox_alloc;
+
+ err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
+ if (err)
+ goto err_query_fw;
+
+ mlxsw_pci->bus_info.fw_rev.major =
+ mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
+ mlxsw_pci->bus_info.fw_rev.minor =
+ mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
+ mlxsw_pci->bus_info.fw_rev.subminor =
+ mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
+
+ if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
+ dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
+ err = -EINVAL;
+ goto err_iface_rev;
+ }
+ if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
+ dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
+ err = -EINVAL;
+ goto err_doorbell_page_bar;
+ }
+
+ mlxsw_pci->doorbell_offset =
+ mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
+
+ num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
+ err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
+ if (err)
+ goto err_fw_area_init;
+
+ err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
+ if (err)
+ goto err_boardinfo;
+
+ err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
+ if (err)
+ goto err_config_profile;
+
+ err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
+ if (err)
+ goto err_aqs_init;
+
+ err = request_irq(mlxsw_pci->msix_entry.vector,
+ mlxsw_pci_eq_irq_handler, 0,
+ mlxsw_pci_driver_name, mlxsw_pci);
+ if (err) {
+ dev_err(&pdev->dev, "IRQ request failed\n");
+ goto err_request_eq_irq;
+ }
+
+ goto mbox_put;
+
+err_request_eq_irq:
+ mlxsw_pci_aqs_fini(mlxsw_pci);
+err_aqs_init:
+err_config_profile:
+err_boardinfo:
+ mlxsw_pci_fw_area_fini(mlxsw_pci);
+err_fw_area_init:
+err_doorbell_page_bar:
+err_iface_rev:
+err_query_fw:
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+err_out_mbox_alloc:
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+mbox_put:
+ mlxsw_cmd_mbox_free(mbox);
+ return err;
+}
+
+static void mlxsw_pci_fini(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci);
+ mlxsw_pci_aqs_fini(mlxsw_pci);
+ mlxsw_pci_fw_area_fini(mlxsw_pci);
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+}
+
+static struct mlxsw_pci_queue *
+mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_tx_info *tx_info)
+{
+ u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci);
+
+ return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
+}
+
+static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+
+ return !mlxsw_pci_queue_elem_info_producer_get(q);
+}
+
+static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ struct mlxsw_pci_queue *q;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *wqe;
+ int i;
+ int err;
+
+ if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
+ err = skb_linearize(skb);
+ if (err)
+ return err;
+ }
+
+ q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+ spin_lock_bh(&q->lock);
+ elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
+ if (!elem_info) {
+ /* queue is full */
+ err = -EAGAIN;
+ goto unlock;
+ }
+ elem_info->u.sdq.skb = skb;
+
+ wqe = elem_info->elem;
+ mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
+ mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
+ mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
+
+ err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (err)
+ goto unlock;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
+ skb_frag_address(frag),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (err)
+ goto unmap_frags;
+ }
+
+ /* Set unused sq entries byte count to zero. */
+ for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
+ mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
+
+ /* Everything is set up, ring producer doorbell to get HW going */
+ q->producer_counter++;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+
+ goto unlock;
+
+unmap_frags:
+ for (; i >= 0; i--)
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
+unlock:
+ spin_unlock_bh(&q->lock);
+ return err;
+}
+
+static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size,
+ u8 *p_status)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ dma_addr_t in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
+ dma_addr_t out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
+ bool evreq = mlxsw_pci->cmd.nopoll;
+ unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
+ bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
+ int err;
+
+ *p_status = MLXSW_CMD_STATUS_OK;
+
+ err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
+ if (err)
+ return err;
+
+ if (in_mbox)
+ memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
+ mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, in_mapaddr >> 32);
+ mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, in_mapaddr);
+
+ mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, out_mapaddr >> 32);
+ mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, out_mapaddr);
+
+ mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
+ mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
+
+ *p_wait_done = false;
+
+ wmb(); /* all needs to be written before we write control register */
+ mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
+ MLXSW_PCI_CIR_CTRL_GO_BIT |
+ (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
+ (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
+ opcode);
+
+ if (!evreq) {
+ unsigned long end;
+
+ end = jiffies + timeout;
+ do {
+ u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
+
+ if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
+ *p_wait_done = true;
+ *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
+ break;
+ }
+ cond_resched();
+ } while (time_before(jiffies, end));
+ } else {
+ wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
+ *p_status = mlxsw_pci->cmd.comp.status;
+ }
+
+ err = 0;
+ if (*p_wait_done) {
+ if (*p_status)
+ err = -EIO;
+ } else {
+ err = -ETIMEDOUT;
+ }
+
+ if (!err && out_mbox && out_mbox_direct) {
+ /* Some commands don't use output param as address to mailbox
+ * but they store output directly into registers. In that case,
+ * copy registers into mbox buffer.
+ */
+ __be32 tmp;
+
+ if (!evreq) {
+ tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+ CIR_OUT_PARAM_HI));
+ memcpy(out_mbox, &tmp, sizeof(tmp));
+ tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+ CIR_OUT_PARAM_LO));
+ memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
+ }
+ } else if (!err && out_mbox)
+ memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
+
+ mutex_unlock(&mlxsw_pci->cmd.lock);
+
+ return err;
+}
+
+static const struct mlxsw_bus mlxsw_pci_bus = {
+ .kind = "pci",
+ .init = mlxsw_pci_init,
+ .fini = mlxsw_pci_fini,
+ .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
+ .skb_transmit = mlxsw_pci_skb_transmit,
+ .cmd_exec = mlxsw_pci_cmd_exec,
+};
+
+static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
+{
+ mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
+ /* Current firware does not let us know when the reset is done.
+ * So we just wait here for constant time and hope for the best.
+ */
+ msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+ return 0;
+}
+
+static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mlxsw_pci *mlxsw_pci;
+ int err;
+
+ mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
+ if (!mlxsw_pci)
+ return -ENOMEM;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ goto err_pci_enable_device;
+ }
+
+ err = pci_request_regions(pdev, mlxsw_pci_driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed\n");
+ goto err_pci_request_regions;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
+ goto err_pci_set_dma_mask;
+ }
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
+ goto err_pci_set_dma_mask;
+ }
+ }
+
+ if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
+ dev_err(&pdev->dev, "invalid PCI region size\n");
+ err = -EINVAL;
+ goto err_pci_resource_len_check;
+ }
+
+ mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!mlxsw_pci->hw_addr) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+ pci_set_master(pdev);
+
+ mlxsw_pci->pdev = pdev;
+ pci_set_drvdata(pdev, mlxsw_pci);
+
+ err = mlxsw_pci_sw_reset(mlxsw_pci);
+ if (err) {
+ dev_err(&pdev->dev, "Software reset failed\n");
+ goto err_sw_reset;
+ }
+
+ err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1);
+ if (err) {
+ dev_err(&pdev->dev, "MSI-X init failed\n");
+ goto err_msix_init;
+ }
+
+ mlxsw_pci->bus_info.device_kind = mlxsw_pci_device_kind_get(id);
+ mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
+ mlxsw_pci->bus_info.dev = &pdev->dev;
+
+ mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name,
+ mlxsw_pci_dbg_root);
+ if (!mlxsw_pci->dbg_dir) {
+ dev_err(&pdev->dev, "Failed to create debugfs dir\n");
+ err = -ENOMEM;
+ goto err_dbg_create_dir;
+ }
+
+ err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
+ &mlxsw_pci_bus, mlxsw_pci);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register bus device\n");
+ goto err_bus_device_register;
+ }
+
+ return 0;
+
+err_bus_device_register:
+ debugfs_remove_recursive(mlxsw_pci->dbg_dir);
+err_dbg_create_dir:
+ pci_disable_msix(mlxsw_pci->pdev);
+err_msix_init:
+err_sw_reset:
+ iounmap(mlxsw_pci->hw_addr);
+err_ioremap:
+err_pci_resource_len_check:
+err_pci_set_dma_mask:
+ pci_release_regions(pdev);
+err_pci_request_regions:
+ pci_disable_device(pdev);
+err_pci_enable_device:
+ kfree(mlxsw_pci);
+ return err;
+}
+
+static void mlxsw_pci_remove(struct pci_dev *pdev)
+{
+ struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
+
+ mlxsw_core_bus_device_unregister(mlxsw_pci->core);
+ debugfs_remove_recursive(mlxsw_pci->dbg_dir);
+ pci_disable_msix(mlxsw_pci->pdev);
+ iounmap(mlxsw_pci->hw_addr);
+ pci_release_regions(mlxsw_pci->pdev);
+ pci_disable_device(mlxsw_pci->pdev);
+ kfree(mlxsw_pci);
+}
+
+static struct pci_driver mlxsw_pci_driver = {
+ .name = mlxsw_pci_driver_name,
+ .id_table = mlxsw_pci_id_table,
+ .probe = mlxsw_pci_probe,
+ .remove = mlxsw_pci_remove,
+};
+
+static int __init mlxsw_pci_module_init(void)
+{
+ int err;
+
+ mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL);
+ if (!mlxsw_pci_dbg_root)
+ return -ENOMEM;
+ err = pci_register_driver(&mlxsw_pci_driver);
+ if (err)
+ goto err_register_driver;
+ return 0;
+
+err_register_driver:
+ debugfs_remove_recursive(mlxsw_pci_dbg_root);
+ return err;
+}
+
+static void __exit mlxsw_pci_module_exit(void)
+{
+ pci_unregister_driver(&mlxsw_pci_driver);
+ debugfs_remove_recursive(mlxsw_pci_dbg_root);
+}
+
+module_init(mlxsw_pci_module_init);
+module_exit(mlxsw_pci_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
+MODULE_DEVICE_TABLE(pci, mlxsw_pci_id_table);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
new file mode 100644
index 000000000000..1ef9664b4512
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -0,0 +1,227 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/pci.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_PCI_H
+#define _MLXSW_PCI_H
+
+#include <linux/bitops.h>
+
+#include "item.h"
+
+#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738
+#define MLXSW_PCI_BAR0_SIZE (1024 * 1024) /* 1MB */
+#define MLXSW_PCI_PAGE_SIZE 4096
+
+#define MLXSW_PCI_CIR_BASE 0x71000
+#define MLXSW_PCI_CIR_IN_PARAM_HI MLXSW_PCI_CIR_BASE
+#define MLXSW_PCI_CIR_IN_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x04)
+#define MLXSW_PCI_CIR_IN_MODIFIER (MLXSW_PCI_CIR_BASE + 0x08)
+#define MLXSW_PCI_CIR_OUT_PARAM_HI (MLXSW_PCI_CIR_BASE + 0x0C)
+#define MLXSW_PCI_CIR_OUT_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x10)
+#define MLXSW_PCI_CIR_TOKEN (MLXSW_PCI_CIR_BASE + 0x14)
+#define MLXSW_PCI_CIR_CTRL (MLXSW_PCI_CIR_BASE + 0x18)
+#define MLXSW_PCI_CIR_CTRL_GO_BIT BIT(23)
+#define MLXSW_PCI_CIR_CTRL_EVREQ_BIT BIT(22)
+#define MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT 12
+#define MLXSW_PCI_CIR_CTRL_STATUS_SHIFT 24
+#define MLXSW_PCI_CIR_TIMEOUT_MSECS 1000
+
+#define MLXSW_PCI_SW_RESET 0xF0010
+#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
+
+#define MLXSW_PCI_DOORBELL_SDQ_OFFSET 0x000
+#define MLXSW_PCI_DOORBELL_RDQ_OFFSET 0x200
+#define MLXSW_PCI_DOORBELL_CQ_OFFSET 0x400
+#define MLXSW_PCI_DOORBELL_EQ_OFFSET 0x600
+#define MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET 0x800
+#define MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET 0xA00
+
+#define MLXSW_PCI_DOORBELL(offset, type_offset, num) \
+ ((offset) + (type_offset) + (num) * 4)
+
+#define MLXSW_PCI_RDQS_COUNT 24
+#define MLXSW_PCI_SDQS_COUNT 24
+#define MLXSW_PCI_CQS_COUNT (MLXSW_PCI_RDQS_COUNT + MLXSW_PCI_SDQS_COUNT)
+#define MLXSW_PCI_EQS_COUNT 2
+#define MLXSW_PCI_EQ_ASYNC_NUM 0
+#define MLXSW_PCI_EQ_COMP_NUM 1
+
+#define MLXSW_PCI_AQ_PAGES 8
+#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
+#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
+#define MLXSW_PCI_CQE_SIZE 16 /* 16 bytes per element */
+#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
+#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
+#define MLXSW_PCI_CQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE)
+#define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE)
+#define MLXSW_PCI_EQE_UPDATE_COUNT 0x80
+
+#define MLXSW_PCI_WQE_SG_ENTRIES 3
+#define MLXSW_PCI_WQE_TYPE_ETHERNET 0xA
+
+/* pci_wqe_c
+ * If set it indicates that a completion should be reported upon
+ * execution of this descriptor.
+ */
+MLXSW_ITEM32(pci, wqe, c, 0x00, 31, 1);
+
+/* pci_wqe_lp
+ * Local Processing, set if packet should be processed by the local
+ * switch hardware:
+ * For Ethernet EMAD (Direct Route and non Direct Route) -
+ * must be set if packet destination is local device
+ * For InfiniBand CTL - must be set if packet destination is local device
+ * Otherwise it must be clear
+ * Local Process packets must not exceed the size of 2K (including payload
+ * and headers).
+ */
+MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1);
+
+/* pci_wqe_type
+ * Packet type.
+ */
+MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4);
+
+/* pci_wqe_byte_count
+ * Size of i-th scatter/gather entry, 0 if entry is unused.
+ */
+MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false);
+
+/* pci_wqe_address
+ * Physical address of i-th scatter/gather entry.
+ * Gather Entries must be 2Byte aligned.
+ */
+MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
+
+/* pci_cqe_lag
+ * Packet arrives from a port which is a LAG
+ */
+MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
+
+/* pci_cqe_system_port
+ * When lag=0: System port on which the packet was received
+ * When lag=1:
+ * bits [15:4] LAG ID on which the packet was received
+ * bits [3:0] sub_port on which the packet was received
+ */
+MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
+
+/* pci_cqe_wqe_counter
+ * WQE count of the WQEs completed on the associated dqn
+ */
+MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
+
+/* pci_cqe_byte_count
+ * Byte count of received packets including additional two
+ * Reserved Bytes that are append to the end of the frame.
+ * Reserved for Send CQE.
+ */
+MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14);
+
+/* pci_cqe_trap_id
+ * Trap ID that captured the packet.
+ */
+MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 8);
+
+/* pci_cqe_crc
+ * Length include CRC. Indicates the length field includes
+ * the packet's CRC.
+ */
+MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1);
+
+/* pci_cqe_e
+ * CQE with Error.
+ */
+MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1);
+
+/* pci_cqe_sr
+ * 1 - Send Queue
+ * 0 - Receive Queue
+ */
+MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1);
+
+/* pci_cqe_dqn
+ * Descriptor Queue (DQ) Number.
+ */
+MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5);
+
+/* pci_cqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_event_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8);
+#define MLXSW_PCI_EQE_EVENT_TYPE_COMP 0x00
+#define MLXSW_PCI_EQE_EVENT_TYPE_CMD 0x0A
+
+/* pci_eqe_event_sub_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8);
+
+/* pci_eqe_cqn
+ * Completion Queue that triggeret this EQE.
+ */
+MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7);
+
+/* pci_eqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_cmd_token
+ * Command completion event - token
+ */
+MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
+
+/* pci_eqe_cmd_status
+ * Command completion event - status
+ */
+MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
+
+/* pci_eqe_cmd_out_param_h
+ * Command completion event - output parameter - higher part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
+
+/* pci_eqe_cmd_out_param_l
+ * Command completion event - output parameter - lower part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
new file mode 100644
index 000000000000..726f5435b32f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -0,0 +1,75 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/port.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXSW_PORT_H
+#define _MLXSW_PORT_H
+
+#include <linux/types.h>
+
+#define MLXSW_PORT_MAX_MTU 10000
+
+#define MLXSW_PORT_DEFAULT_VID 1
+
+#define MLXSW_PORT_SWID_DISABLED_PORT 255
+#define MLXSW_PORT_SWID_ALL_SWIDS 254
+#define MLXSW_PORT_SWID_TYPE_ETH 2
+
+#define MLXSW_PORT_MID 0xd000
+
+#define MLXSW_PORT_MAX_PHY_PORTS 0x40
+#define MLXSW_PORT_MAX_PORTS MLXSW_PORT_MAX_PHY_PORTS
+
+#define MLXSW_PORT_DEVID_BITS_OFFSET 10
+#define MLXSW_PORT_PHY_BITS_OFFSET 4
+#define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1)
+
+#define MLXSW_PORT_CPU_PORT 0x0
+
+#define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS)
+
+enum mlxsw_port_admin_status {
+ MLXSW_PORT_ADMIN_STATUS_UP = 1,
+ MLXSW_PORT_ADMIN_STATUS_DOWN = 2,
+ MLXSW_PORT_ADMIN_STATUS_UP_ONCE = 3,
+ MLXSW_PORT_ADMIN_STATUS_DISABLED = 4,
+};
+
+enum mlxsw_reg_pude_oper_status {
+ MLXSW_PORT_OPER_STATUS_UP = 1,
+ MLXSW_PORT_OPER_STATUS_DOWN = 2,
+ MLXSW_PORT_OPER_STATUS_FAILURE = 4, /* Can be set to up again. */
+};
+
+#endif /* _MLXSW_PORT_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
new file mode 100644
index 000000000000..096e1c12175a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -0,0 +1,1349 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/reg.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_REG_H
+#define _MLXSW_REG_H
+
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+
+#include "item.h"
+#include "port.h"
+
+struct mlxsw_reg_info {
+ u16 id;
+ u16 len; /* In u8 */
+};
+
+#define MLXSW_REG(type) (&mlxsw_reg_##type)
+#define MLXSW_REG_LEN(type) MLXSW_REG(type)->len
+#define MLXSW_REG_ZERO(type, payload) memset(payload, 0, MLXSW_REG(type)->len)
+
+/* SGCR - Switch General Configuration Register
+ * --------------------------------------------
+ * This register is used for configuration of the switch capabilities.
+ */
+#define MLXSW_REG_SGCR_ID 0x2000
+#define MLXSW_REG_SGCR_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_sgcr = {
+ .id = MLXSW_REG_SGCR_ID,
+ .len = MLXSW_REG_SGCR_LEN,
+};
+
+/* reg_sgcr_llb
+ * Link Local Broadcast (Default=0)
+ * When set, all Link Local packets (224.0.0.X) will be treated as broadcast
+ * packets and ignore the IGMP snooping entries.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sgcr, llb, 0x04, 0, 1);
+
+static inline void mlxsw_reg_sgcr_pack(char *payload, bool llb)
+{
+ MLXSW_REG_ZERO(sgcr, payload);
+ mlxsw_reg_sgcr_llb_set(payload, !!llb);
+}
+
+/* SPAD - Switch Physical Address Register
+ * ---------------------------------------
+ * The SPAD register configures the switch physical MAC address.
+ */
+#define MLXSW_REG_SPAD_ID 0x2002
+#define MLXSW_REG_SPAD_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_spad = {
+ .id = MLXSW_REG_SPAD_ID,
+ .len = MLXSW_REG_SPAD_LEN,
+};
+
+/* reg_spad_base_mac
+ * Base MAC address for the switch partitions.
+ * Per switch partition MAC address is equal to:
+ * base_mac + swid
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
+
+/* SMID - Switch Multicast ID
+ * --------------------------
+ * In multi-chip configuration, each device should maintain mapping between
+ * Multicast ID (MID) into a list of local ports. This mapping is used in all
+ * the devices other than the ingress device, and is implemented as part of the
+ * FDB. The MID record maps from a MID, which is a unique identi- fier of the
+ * multicast group within the stacking domain, into a list of local ports into
+ * which the packet is replicated.
+ */
+#define MLXSW_REG_SMID_ID 0x2007
+#define MLXSW_REG_SMID_LEN 0x420
+
+static const struct mlxsw_reg_info mlxsw_reg_smid = {
+ .id = MLXSW_REG_SMID_ID,
+ .len = MLXSW_REG_SMID_LEN,
+};
+
+/* reg_smid_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8);
+
+/* reg_smid_mid
+ * Multicast identifier - global identifier that represents the multicast group
+ * across all devices
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16);
+
+/* reg_smid_port
+ * Local port memebership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1);
+
+/* reg_smid_port_mask
+ * Local port mask (1 bit per port).
+ * Access: W
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1);
+
+static inline void mlxsw_reg_smid_pack(char *payload, u16 mid)
+{
+ MLXSW_REG_ZERO(smid, payload);
+ mlxsw_reg_smid_swid_set(payload, 0);
+ mlxsw_reg_smid_mid_set(payload, mid);
+ mlxsw_reg_smid_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
+ mlxsw_reg_smid_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
+}
+
+/* SSPR - Switch System Port Record Register
+ * -----------------------------------------
+ * Configures the system port to local port mapping.
+ */
+#define MLXSW_REG_SSPR_ID 0x2008
+#define MLXSW_REG_SSPR_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_sspr = {
+ .id = MLXSW_REG_SSPR_ID,
+ .len = MLXSW_REG_SSPR_LEN,
+};
+
+/* reg_sspr_m
+ * Master - if set, then the record describes the master system port.
+ * This is needed in case a local port is mapped into several system ports
+ * (for multipathing). That number will be reported as the source system
+ * port when packets are forwarded to the CPU. Only one master port is allowed
+ * per local port.
+ *
+ * Note: Must be set for Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1);
+
+/* reg_sspr_local_port
+ * Local port number.
+ *
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, local_port, 0x00, 16, 8);
+
+/* reg_sspr_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ *
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8);
+
+/* reg_sspr_system_port
+ * Unique identifier within the stacking domain that represents all the ports
+ * that are available in the system (external ports).
+ *
+ * Currently, only single-ASIC configurations are supported, so we default to
+ * 1:1 mapping between system ports and local ports.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sspr, system_port, 0x04, 0, 16);
+
+static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(sspr, payload);
+ mlxsw_reg_sspr_m_set(payload, 1);
+ mlxsw_reg_sspr_local_port_set(payload, local_port);
+ mlxsw_reg_sspr_sub_port_set(payload, 0);
+ mlxsw_reg_sspr_system_port_set(payload, local_port);
+}
+
+/* SPMS - Switch Port MSTP/RSTP State Register
+ * -------------------------------------------
+ * Configures the spanning tree state of a physical port.
+ */
+#define MLXSW_REG_SPMS_ID 0x200d
+#define MLXSW_REG_SPMS_LEN 0x404
+
+static const struct mlxsw_reg_info mlxsw_reg_spms = {
+ .id = MLXSW_REG_SPMS_ID,
+ .len = MLXSW_REG_SPMS_LEN,
+};
+
+/* reg_spms_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spms, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_spms_state {
+ MLXSW_REG_SPMS_STATE_NO_CHANGE,
+ MLXSW_REG_SPMS_STATE_DISCARDING,
+ MLXSW_REG_SPMS_STATE_LEARNING,
+ MLXSW_REG_SPMS_STATE_FORWARDING,
+};
+
+/* reg_spms_state
+ * Spanning tree state of each VLAN ID (VID) of the local port.
+ * 0 - Do not change spanning tree state (used only when writing).
+ * 1 - Discarding. No learning or forwarding to/from this port (default).
+ * 2 - Learning. Port is learning, but not forwarding.
+ * 3 - Forwarding. Port is learning and forwarding.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2);
+
+static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port, u16 vid,
+ enum mlxsw_reg_spms_state state)
+{
+ MLXSW_REG_ZERO(spms, payload);
+ mlxsw_reg_spms_local_port_set(payload, local_port);
+ mlxsw_reg_spms_state_set(payload, vid, state);
+}
+
+/* SFGC - Switch Flooding Group Configuration
+ * ------------------------------------------
+ * The following register controls the association of flooding tables and MIDs
+ * to packet types used for flooding.
+ */
+#define MLXSW_REG_SFGC_ID 0x2011
+#define MLXSW_REG_SFGC_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
+ .id = MLXSW_REG_SFGC_ID,
+ .len = MLXSW_REG_SFGC_LEN,
+};
+
+enum mlxsw_reg_sfgc_type {
+ MLXSW_REG_SFGC_TYPE_BROADCAST = 0,
+ MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST = 1,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4 = 2,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6 = 3,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP = 5,
+ MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL = 6,
+ MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST = 7,
+};
+
+/* reg_sfgc_type
+ * The traffic type to reach the flooding table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfgc, type, 0x00, 0, 4);
+
+enum mlxsw_reg_sfgc_bridge_type {
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID = 0,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_VFID = 1,
+};
+
+/* reg_sfgc_bridge_type
+ * Access: Index
+ *
+ * Note: SwitchX-2 only supports 802.1Q mode.
+ */
+MLXSW_ITEM32(reg, sfgc, bridge_type, 0x04, 24, 3);
+
+enum mlxsw_flood_table_type {
+ MLXSW_REG_SFGC_TABLE_TYPE_VID = 1,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE = 2,
+ MLXSW_REG_SFGC_TABLE_TYPE_ANY = 0,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST = 3,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID = 4,
+};
+
+/* reg_sfgc_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ *
+ * Note: FID offset and FID types are not supported in SwitchX-2.
+ */
+MLXSW_ITEM32(reg, sfgc, table_type, 0x04, 16, 3);
+
+/* reg_sfgc_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, flood_table, 0x04, 0, 6);
+
+/* reg_sfgc_mid
+ * The multicast ID for the swid. Not supported for Spectrum
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, mid, 0x08, 0, 16);
+
+/* reg_sfgc_counter_set_type
+ * Counter Set Type for flow counters.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, counter_set_type, 0x0C, 24, 8);
+
+/* reg_sfgc_counter_index
+ * Counter Index for flow counters.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, counter_index, 0x0C, 0, 24);
+
+static inline void
+mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
+ enum mlxsw_reg_sfgc_bridge_type bridge_type,
+ enum mlxsw_flood_table_type table_type,
+ unsigned int flood_table)
+{
+ MLXSW_REG_ZERO(sfgc, payload);
+ mlxsw_reg_sfgc_type_set(payload, type);
+ mlxsw_reg_sfgc_bridge_type_set(payload, bridge_type);
+ mlxsw_reg_sfgc_table_type_set(payload, table_type);
+ mlxsw_reg_sfgc_flood_table_set(payload, flood_table);
+ mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID);
+}
+
+/* SFTR - Switch Flooding Table Register
+ * -------------------------------------
+ * The switch flooding table is used for flooding packet replication. The table
+ * defines a bit mask of ports for packet replication.
+ */
+#define MLXSW_REG_SFTR_ID 0x2012
+#define MLXSW_REG_SFTR_LEN 0x420
+
+static const struct mlxsw_reg_info mlxsw_reg_sftr = {
+ .id = MLXSW_REG_SFTR_ID,
+ .len = MLXSW_REG_SFTR_LEN,
+};
+
+/* reg_sftr_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, swid, 0x00, 24, 8);
+
+/* reg_sftr_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, flood_table, 0x00, 16, 6);
+
+/* reg_sftr_index
+ * Index. Used as an index into the Flooding Table in case the table is
+ * configured to use VID / FID or FID Offset.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, index, 0x00, 0, 16);
+
+/* reg_sftr_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sftr, table_type, 0x04, 16, 3);
+
+/* reg_sftr_range
+ * Range of entries to update
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, range, 0x04, 0, 16);
+
+/* reg_sftr_port
+ * Local port membership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr, port, 0x20, 0x20, 1);
+
+/* reg_sftr_cpu_port_mask
+ * CPU port mask (1 bit per port).
+ * Access: W
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr, port_mask, 0x220, 0x20, 1);
+
+static inline void mlxsw_reg_sftr_pack(char *payload,
+ unsigned int flood_table,
+ unsigned int index,
+ enum mlxsw_flood_table_type table_type,
+ unsigned int range)
+{
+ MLXSW_REG_ZERO(sftr, payload);
+ mlxsw_reg_sftr_swid_set(payload, 0);
+ mlxsw_reg_sftr_flood_table_set(payload, flood_table);
+ mlxsw_reg_sftr_index_set(payload, index);
+ mlxsw_reg_sftr_table_type_set(payload, table_type);
+ mlxsw_reg_sftr_range_set(payload, range);
+ mlxsw_reg_sftr_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
+ mlxsw_reg_sftr_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
+}
+
+/* SPMLR - Switch Port MAC Learning Register
+ * -----------------------------------------
+ * Controls the Switch MAC learning policy per port.
+ */
+#define MLXSW_REG_SPMLR_ID 0x2018
+#define MLXSW_REG_SPMLR_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_spmlr = {
+ .id = MLXSW_REG_SPMLR_ID,
+ .len = MLXSW_REG_SPMLR_LEN,
+};
+
+/* reg_spmlr_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spmlr, local_port, 0x00, 16, 8);
+
+/* reg_spmlr_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spmlr, sub_port, 0x00, 8, 8);
+
+enum mlxsw_reg_spmlr_learn_mode {
+ MLXSW_REG_SPMLR_LEARN_MODE_DISABLE = 0,
+ MLXSW_REG_SPMLR_LEARN_MODE_ENABLE = 2,
+ MLXSW_REG_SPMLR_LEARN_MODE_SEC = 3,
+};
+
+/* reg_spmlr_learn_mode
+ * Learning mode on the port.
+ * 0 - Learning disabled.
+ * 2 - Learning enabled.
+ * 3 - Security mode.
+ *
+ * In security mode the switch does not learn MACs on the port, but uses the
+ * SMAC to see if it exists on another ingress port. If so, the packet is
+ * classified as a bad packet and is discarded unless the software registers
+ * to receive port security error packets usign HPKT.
+ */
+MLXSW_ITEM32(reg, spmlr, learn_mode, 0x04, 30, 2);
+
+static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port,
+ enum mlxsw_reg_spmlr_learn_mode mode)
+{
+ MLXSW_REG_ZERO(spmlr, payload);
+ mlxsw_reg_spmlr_local_port_set(payload, local_port);
+ mlxsw_reg_spmlr_sub_port_set(payload, 0);
+ mlxsw_reg_spmlr_learn_mode_set(payload, mode);
+}
+
+/* PMLP - Ports Module to Local Port Register
+ * ------------------------------------------
+ * Configures the assignment of modules to local ports.
+ */
+#define MLXSW_REG_PMLP_ID 0x5002
+#define MLXSW_REG_PMLP_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_pmlp = {
+ .id = MLXSW_REG_PMLP_ID,
+ .len = MLXSW_REG_PMLP_LEN,
+};
+
+/* reg_pmlp_rxtx
+ * 0 - Tx value is used for both Tx and Rx.
+ * 1 - Rx value is taken from a separte field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmlp, rxtx, 0x00, 31, 1);
+
+/* reg_pmlp_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8);
+
+/* reg_pmlp_width
+ * 0 - Unmap local port.
+ * 1 - Lane 0 is used.
+ * 2 - Lanes 0 and 1 are used.
+ * 4 - Lanes 0, 1, 2 and 3 are used.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
+
+/* reg_pmlp_module
+ * Module number.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0, false);
+
+/* reg_pmlp_tx_lane
+ * Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 16, false);
+
+/* reg_pmlp_rx_lane
+ * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
+ * equal to Tx lane.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 24, false);
+
+static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(pmlp, payload);
+ mlxsw_reg_pmlp_local_port_set(payload, local_port);
+}
+
+/* PMTU - Port MTU Register
+ * ------------------------
+ * Configures and reports the port MTU.
+ */
+#define MLXSW_REG_PMTU_ID 0x5003
+#define MLXSW_REG_PMTU_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_pmtu = {
+ .id = MLXSW_REG_PMTU_ID,
+ .len = MLXSW_REG_PMTU_LEN,
+};
+
+/* reg_pmtu_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtu, local_port, 0x00, 16, 8);
+
+/* reg_pmtu_max_mtu
+ * Maximum MTU.
+ * When port type (e.g. Ethernet) is configured, the relevant MTU is
+ * reported, otherwise the minimum between the max_mtu of the different
+ * types is reported.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtu, max_mtu, 0x04, 16, 16);
+
+/* reg_pmtu_admin_mtu
+ * MTU value to set port to. Must be smaller or equal to max_mtu.
+ * Note: If port type is Infiniband, then port must be disabled, when its
+ * MTU is set.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmtu, admin_mtu, 0x08, 16, 16);
+
+/* reg_pmtu_oper_mtu
+ * The actual MTU configured on the port. Packets exceeding this size
+ * will be dropped.
+ * Note: In Ethernet and FC oper_mtu == admin_mtu, however, in Infiniband
+ * oper_mtu might be smaller than admin_mtu.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtu, oper_mtu, 0x0C, 16, 16);
+
+static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port,
+ u16 new_mtu)
+{
+ MLXSW_REG_ZERO(pmtu, payload);
+ mlxsw_reg_pmtu_local_port_set(payload, local_port);
+ mlxsw_reg_pmtu_max_mtu_set(payload, 0);
+ mlxsw_reg_pmtu_admin_mtu_set(payload, new_mtu);
+ mlxsw_reg_pmtu_oper_mtu_set(payload, 0);
+}
+
+/* PTYS - Port Type and Speed Register
+ * -----------------------------------
+ * Configures and reports the port speed type.
+ *
+ * Note: When set while the link is up, the changes will not take effect
+ * until the port transitions from down to up state.
+ */
+#define MLXSW_REG_PTYS_ID 0x5004
+#define MLXSW_REG_PTYS_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_ptys = {
+ .id = MLXSW_REG_PTYS_ID,
+ .len = MLXSW_REG_PTYS_LEN,
+};
+
+/* reg_ptys_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8);
+
+#define MLXSW_REG_PTYS_PROTO_MASK_ETH BIT(2)
+
+/* reg_ptys_proto_mask
+ * Protocol mask. Indicates which protocol is used.
+ * 0 - Infiniband.
+ * 1 - Fibre Channel.
+ * 2 - Ethernet.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
+
+#define MLXSW_REG_PTYS_ETH_SPEED_SGMII BIT(0)
+#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX BIT(1)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 BIT(2)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 BIT(3)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR BIT(4)
+#define MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2 BIT(5)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 BIT(6)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 BIT(7)
+#define MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4 BIT(8)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR BIT(12)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR BIT(13)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR BIT(14)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 BIT(15)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4 BIT(16)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 BIT(19)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 BIT(20)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 BIT(21)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 BIT(22)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4 BIT(23)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX BIT(24)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_T BIT(25)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T BIT(26)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR BIT(27)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR BIT(28)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR BIT(29)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 BIT(30)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2 BIT(31)
+
+/* reg_ptys_eth_proto_cap
+ * Ethernet port supported speeds and protocols.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_cap, 0x0C, 0, 32);
+
+/* reg_ptys_eth_proto_admin
+ * Speed and protocol to set port to.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
+
+/* reg_ptys_eth_proto_oper
+ * The current speed and protocol configured for the port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
+
+static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port,
+ u32 proto_admin)
+{
+ MLXSW_REG_ZERO(ptys, payload);
+ mlxsw_reg_ptys_local_port_set(payload, local_port);
+ mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH);
+ mlxsw_reg_ptys_eth_proto_admin_set(payload, proto_admin);
+}
+
+static inline void mlxsw_reg_ptys_unpack(char *payload, u32 *p_eth_proto_cap,
+ u32 *p_eth_proto_adm,
+ u32 *p_eth_proto_oper)
+{
+ if (p_eth_proto_cap)
+ *p_eth_proto_cap = mlxsw_reg_ptys_eth_proto_cap_get(payload);
+ if (p_eth_proto_adm)
+ *p_eth_proto_adm = mlxsw_reg_ptys_eth_proto_admin_get(payload);
+ if (p_eth_proto_oper)
+ *p_eth_proto_oper = mlxsw_reg_ptys_eth_proto_oper_get(payload);
+}
+
+/* PPAD - Port Physical Address Register
+ * -------------------------------------
+ * The PPAD register configures the per port physical MAC address.
+ */
+#define MLXSW_REG_PPAD_ID 0x5005
+#define MLXSW_REG_PPAD_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_ppad = {
+ .id = MLXSW_REG_PPAD_ID,
+ .len = MLXSW_REG_PPAD_LEN,
+};
+
+/* reg_ppad_single_base_mac
+ * 0: base_mac, local port should be 0 and mac[7:0] is
+ * reserved. HW will set incremental
+ * 1: single_mac - mac of the local_port
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppad, single_base_mac, 0x00, 28, 1);
+
+/* reg_ppad_local_port
+ * port number, if single_base_mac = 0 then local_port is reserved
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8);
+
+/* reg_ppad_mac
+ * If single_base_mac = 0 - base MAC address, mac[7:0] is reserved.
+ * If single_base_mac = 1 - the per port MAC address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ppad, mac, 0x02, 6);
+
+static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac,
+ u8 local_port)
+{
+ MLXSW_REG_ZERO(ppad, payload);
+ mlxsw_reg_ppad_single_base_mac_set(payload, !!single_base_mac);
+ mlxsw_reg_ppad_local_port_set(payload, local_port);
+}
+
+/* PAOS - Ports Administrative and Operational Status Register
+ * -----------------------------------------------------------
+ * Configures and retrieves per port administrative and operational status.
+ */
+#define MLXSW_REG_PAOS_ID 0x5006
+#define MLXSW_REG_PAOS_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_paos = {
+ .id = MLXSW_REG_PAOS_ID,
+ .len = MLXSW_REG_PAOS_LEN,
+};
+
+/* reg_paos_swid
+ * Switch partition ID with which to associate the port.
+ * Note: while external ports uses unique local port numbers (and thus swid is
+ * redundant), router ports use the same local port number where swid is the
+ * only indication for the relevant port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, paos, swid, 0x00, 24, 8);
+
+/* reg_paos_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, paos, local_port, 0x00, 16, 8);
+
+/* reg_paos_admin_status
+ * Port administrative state (the desired state of the port):
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Up once. This means that in case of link failure, the port won't go
+ * into polling mode, but will wait to be re-enabled by software.
+ * 4 - Disabled by system. Can only be set by hardware.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, paos, admin_status, 0x00, 8, 4);
+
+/* reg_paos_oper_status
+ * Port operational state (the current state):
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Down by port failure. This means that the device will not let the
+ * port up again until explicitly specified by software.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, paos, oper_status, 0x00, 0, 4);
+
+/* reg_paos_ase
+ * Admin state update enabled.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, paos, ase, 0x04, 31, 1);
+
+/* reg_paos_ee
+ * Event update enable. If this bit is set, event generation will be
+ * updated based on the e field.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, paos, ee, 0x04, 30, 1);
+
+/* reg_paos_e
+ * Event generation on operational state change:
+ * 0 - Do not generate event.
+ * 1 - Generate Event.
+ * 2 - Generate Single Event.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, paos, e, 0x04, 0, 2);
+
+static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port,
+ enum mlxsw_port_admin_status status)
+{
+ MLXSW_REG_ZERO(paos, payload);
+ mlxsw_reg_paos_swid_set(payload, 0);
+ mlxsw_reg_paos_local_port_set(payload, local_port);
+ mlxsw_reg_paos_admin_status_set(payload, status);
+ mlxsw_reg_paos_oper_status_set(payload, 0);
+ mlxsw_reg_paos_ase_set(payload, 1);
+ mlxsw_reg_paos_ee_set(payload, 1);
+ mlxsw_reg_paos_e_set(payload, 1);
+}
+
+/* PPCNT - Ports Performance Counters Register
+ * -------------------------------------------
+ * The PPCNT register retrieves per port performance counters.
+ */
+#define MLXSW_REG_PPCNT_ID 0x5008
+#define MLXSW_REG_PPCNT_LEN 0x100
+
+static const struct mlxsw_reg_info mlxsw_reg_ppcnt = {
+ .id = MLXSW_REG_PPCNT_ID,
+ .len = MLXSW_REG_PPCNT_LEN,
+};
+
+/* reg_ppcnt_swid
+ * For HCA: must be always 0.
+ * Switch partition ID to associate port with.
+ * Switch partitions are numbered from 0 to 7 inclusively.
+ * Switch partition 254 indicates stacking ports.
+ * Switch partition 255 indicates all switch partitions.
+ * Only valid on Set() operation with local_port=255.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, swid, 0x00, 24, 8);
+
+/* reg_ppcnt_local_port
+ * Local port number.
+ * 255 indicates all ports on the device, and is only allowed
+ * for Set() operation.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8);
+
+/* reg_ppcnt_pnat
+ * Port number access type:
+ * 0 - Local port number
+ * 1 - IB port number
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
+
+/* reg_ppcnt_grp
+ * Performance counter group.
+ * Group 63 indicates all groups. Only valid on Set() operation with
+ * clr bit set.
+ * 0x0: IEEE 802.3 Counters
+ * 0x1: RFC 2863 Counters
+ * 0x2: RFC 2819 Counters
+ * 0x3: RFC 3635 Counters
+ * 0x5: Ethernet Extended Counters
+ * 0x8: Link Level Retransmission Counters
+ * 0x10: Per Priority Counters
+ * 0x11: Per Traffic Class Counters
+ * 0x12: Physical Layer Counters
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6);
+
+/* reg_ppcnt_clr
+ * Clear counters. Setting the clr bit will reset the counter value
+ * for all counters in the counter group. This bit can be set
+ * for both Set() and Get() operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1);
+
+/* reg_ppcnt_prio_tc
+ * Priority for counter set that support per priority, valid values: 0-7.
+ * Traffic class for counter set that support per traffic class,
+ * valid values: 0- cap_max_tclass-1 .
+ * For HCA: cap_max_tclass is always 8.
+ * Otherwise must be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5);
+
+/* reg_ppcnt_a_frames_transmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frames_transmitted_ok,
+ 0x08 + 0x00, 0, 64);
+
+/* reg_ppcnt_a_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frames_received_ok,
+ 0x08 + 0x08, 0, 64);
+
+/* reg_ppcnt_a_frame_check_sequence_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frame_check_sequence_errors,
+ 0x08 + 0x10, 0, 64);
+
+/* reg_ppcnt_a_alignment_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_alignment_errors,
+ 0x08 + 0x18, 0, 64);
+
+/* reg_ppcnt_a_octets_transmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_octets_transmitted_ok,
+ 0x08 + 0x20, 0, 64);
+
+/* reg_ppcnt_a_octets_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_octets_received_ok,
+ 0x08 + 0x28, 0, 64);
+
+/* reg_ppcnt_a_multicast_frames_xmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_xmitted_ok,
+ 0x08 + 0x30, 0, 64);
+
+/* reg_ppcnt_a_broadcast_frames_xmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_xmitted_ok,
+ 0x08 + 0x38, 0, 64);
+
+/* reg_ppcnt_a_multicast_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_received_ok,
+ 0x08 + 0x40, 0, 64);
+
+/* reg_ppcnt_a_broadcast_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_received_ok,
+ 0x08 + 0x48, 0, 64);
+
+/* reg_ppcnt_a_in_range_length_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_in_range_length_errors,
+ 0x08 + 0x50, 0, 64);
+
+/* reg_ppcnt_a_out_of_range_length_field
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_out_of_range_length_field,
+ 0x08 + 0x58, 0, 64);
+
+/* reg_ppcnt_a_frame_too_long_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frame_too_long_errors,
+ 0x08 + 0x60, 0, 64);
+
+/* reg_ppcnt_a_symbol_error_during_carrier
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_symbol_error_during_carrier,
+ 0x08 + 0x68, 0, 64);
+
+/* reg_ppcnt_a_mac_control_frames_transmitted
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_transmitted,
+ 0x08 + 0x70, 0, 64);
+
+/* reg_ppcnt_a_mac_control_frames_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_received,
+ 0x08 + 0x78, 0, 64);
+
+/* reg_ppcnt_a_unsupported_opcodes_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_unsupported_opcodes_received,
+ 0x08 + 0x80, 0, 64);
+
+/* reg_ppcnt_a_pause_mac_ctrl_frames_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
+ 0x08 + 0x88, 0, 64);
+
+/* reg_ppcnt_a_pause_mac_ctrl_frames_transmitted
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
+ 0x08 + 0x90, 0, 64);
+
+static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(ppcnt, payload);
+ mlxsw_reg_ppcnt_swid_set(payload, 0);
+ mlxsw_reg_ppcnt_local_port_set(payload, local_port);
+ mlxsw_reg_ppcnt_pnat_set(payload, 0);
+ mlxsw_reg_ppcnt_grp_set(payload, 0);
+ mlxsw_reg_ppcnt_clr_set(payload, 0);
+ mlxsw_reg_ppcnt_prio_tc_set(payload, 0);
+}
+
+/* PSPA - Port Switch Partition Allocation
+ * ---------------------------------------
+ * Controls the association of a port with a switch partition and enables
+ * configuring ports as stacking ports.
+ */
+#define MLXSW_REG_PSPA_ID 0x500d
+#define MLXSW_REG_PSPA_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_pspa = {
+ .id = MLXSW_REG_PSPA_ID,
+ .len = MLXSW_REG_PSPA_LEN,
+};
+
+/* reg_pspa_swid
+ * Switch partition ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pspa, swid, 0x00, 24, 8);
+
+/* reg_pspa_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8);
+
+/* reg_pspa_sub_port
+ * Virtual port within the local port. Set to 0 when virtual ports are
+ * disabled on the local port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pspa, sub_port, 0x00, 8, 8);
+
+static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
+{
+ MLXSW_REG_ZERO(pspa, payload);
+ mlxsw_reg_pspa_swid_set(payload, swid);
+ mlxsw_reg_pspa_local_port_set(payload, local_port);
+ mlxsw_reg_pspa_sub_port_set(payload, 0);
+}
+
+/* HTGT - Host Trap Group Table
+ * ----------------------------
+ * Configures the properties for forwarding to CPU.
+ */
+#define MLXSW_REG_HTGT_ID 0x7002
+#define MLXSW_REG_HTGT_LEN 0x100
+
+static const struct mlxsw_reg_info mlxsw_reg_htgt = {
+ .id = MLXSW_REG_HTGT_ID,
+ .len = MLXSW_REG_HTGT_LEN,
+};
+
+/* reg_htgt_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8);
+
+#define MLXSW_REG_HTGT_PATH_TYPE_LOCAL 0x0 /* For locally attached CPU */
+
+/* reg_htgt_type
+ * CPU path type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
+
+#define MLXSW_REG_HTGT_TRAP_GROUP_EMAD 0x0
+#define MLXSW_REG_HTGT_TRAP_GROUP_RX 0x1
+
+/* reg_htgt_trap_group
+ * Trap group number. User defined number specifying which trap groups
+ * should be forwarded to the CPU. The mapping between trap IDs and trap
+ * groups is configured using HPKT register.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, htgt, trap_group, 0x00, 0, 8);
+
+enum {
+ MLXSW_REG_HTGT_POLICER_DISABLE,
+ MLXSW_REG_HTGT_POLICER_ENABLE,
+};
+
+/* reg_htgt_pide
+ * Enable policer ID specified using 'pid' field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, pide, 0x04, 15, 1);
+
+/* reg_htgt_pid
+ * Policer ID for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, pid, 0x04, 0, 8);
+
+#define MLXSW_REG_HTGT_TRAP_TO_CPU 0x0
+
+/* reg_htgt_mirror_action
+ * Mirror action to use.
+ * 0 - Trap to CPU.
+ * 1 - Trap to CPU and mirror to a mirroring agent.
+ * 2 - Mirror to a mirroring agent and do not trap to CPU.
+ * Access: RW
+ *
+ * Note: Mirroring to a mirroring agent is only supported in Spectrum.
+ */
+MLXSW_ITEM32(reg, htgt, mirror_action, 0x08, 8, 2);
+
+/* reg_htgt_mirroring_agent
+ * Mirroring agent.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, mirroring_agent, 0x08, 0, 3);
+
+/* reg_htgt_priority
+ * Trap group priority.
+ * In case a packet matches multiple classification rules, the packet will
+ * only be trapped once, based on the trap ID associated with the group (via
+ * register HPKT) with the highest priority.
+ * Supported values are 0-7, with 7 represnting the highest priority.
+ * Access: RW
+ *
+ * Note: In SwitchX-2 this field is ignored and the priority value is replaced
+ * by the 'trap_group' field.
+ */
+MLXSW_ITEM32(reg, htgt, priority, 0x0C, 0, 4);
+
+/* reg_htgt_local_path_cpu_tclass
+ * CPU ingress traffic class for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
+
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD 0x15
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX 0x14
+
+/* reg_htgt_local_path_rdq
+ * Receive descriptor queue (RDQ) to use for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6);
+
+static inline void mlxsw_reg_htgt_pack(char *payload, u8 trap_group)
+{
+ u8 swid, rdq;
+
+ MLXSW_REG_ZERO(htgt, payload);
+ if (MLXSW_REG_HTGT_TRAP_GROUP_EMAD == trap_group) {
+ swid = MLXSW_PORT_SWID_ALL_SWIDS;
+ rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD;
+ } else {
+ swid = 0;
+ rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX;
+ }
+ mlxsw_reg_htgt_swid_set(payload, swid);
+ mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL);
+ mlxsw_reg_htgt_trap_group_set(payload, trap_group);
+ mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE);
+ mlxsw_reg_htgt_pid_set(payload, 0);
+ mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU);
+ mlxsw_reg_htgt_mirroring_agent_set(payload, 0);
+ mlxsw_reg_htgt_priority_set(payload, 0);
+ mlxsw_reg_htgt_local_path_cpu_tclass_set(payload, 7);
+ mlxsw_reg_htgt_local_path_rdq_set(payload, rdq);
+}
+
+/* HPKT - Host Packet Trap
+ * -----------------------
+ * Configures trap IDs inside trap groups.
+ */
+#define MLXSW_REG_HPKT_ID 0x7003
+#define MLXSW_REG_HPKT_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_hpkt = {
+ .id = MLXSW_REG_HPKT_ID,
+ .len = MLXSW_REG_HPKT_LEN,
+};
+
+enum {
+ MLXSW_REG_HPKT_ACK_NOT_REQUIRED,
+ MLXSW_REG_HPKT_ACK_REQUIRED,
+};
+
+/* reg_hpkt_ack
+ * Require acknowledgements from the host for events.
+ * If set, then the device will wait for the event it sent to be acknowledged
+ * by the host. This option is only relevant for event trap IDs.
+ * Access: RW
+ *
+ * Note: Currently not supported by firmware.
+ */
+MLXSW_ITEM32(reg, hpkt, ack, 0x00, 24, 1);
+
+enum mlxsw_reg_hpkt_action {
+ MLXSW_REG_HPKT_ACTION_FORWARD,
+ MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+ MLXSW_REG_HPKT_ACTION_MIRROR_TO_CPU,
+ MLXSW_REG_HPKT_ACTION_DISCARD,
+ MLXSW_REG_HPKT_ACTION_SOFT_DISCARD,
+ MLXSW_REG_HPKT_ACTION_TRAP_AND_SOFT_DISCARD,
+};
+
+/* reg_hpkt_action
+ * Action to perform on packet when trapped.
+ * 0 - No action. Forward to CPU based on switching rules.
+ * 1 - Trap to CPU (CPU receives sole copy).
+ * 2 - Mirror to CPU (CPU receives a replica of the packet).
+ * 3 - Discard.
+ * 4 - Soft discard (allow other traps to act on the packet).
+ * 5 - Trap and soft discard (allow other traps to overwrite this trap).
+ * Access: RW
+ *
+ * Note: Must be set to 0 (forward) for event trap IDs, as they are already
+ * addressed to the CPU.
+ */
+MLXSW_ITEM32(reg, hpkt, action, 0x00, 20, 3);
+
+/* reg_hpkt_trap_group
+ * Trap group to associate the trap with.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, hpkt, trap_group, 0x00, 12, 6);
+
+/* reg_hpkt_trap_id
+ * Trap ID.
+ * Access: Index
+ *
+ * Note: A trap ID can only be associated with a single trap group. The device
+ * will associate the trap ID with the last trap group configured.
+ */
+MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 9);
+
+enum {
+ MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT,
+ MLXSW_REG_HPKT_CTRL_PACKET_NO_BUFFER,
+ MLXSW_REG_HPKT_CTRL_PACKET_USE_BUFFER,
+};
+
+/* reg_hpkt_ctrl
+ * Configure dedicated buffer resources for control packets.
+ * 0 - Keep factory defaults.
+ * 1 - Do not use control buffer for this trap ID.
+ * 2 - Use control buffer for this trap ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2);
+
+static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action,
+ u8 trap_group, u16 trap_id)
+{
+ MLXSW_REG_ZERO(hpkt, payload);
+ mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED);
+ mlxsw_reg_hpkt_action_set(payload, action);
+ mlxsw_reg_hpkt_trap_group_set(payload, trap_group);
+ mlxsw_reg_hpkt_trap_id_set(payload, trap_id);
+ mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
+}
+
+static inline const char *mlxsw_reg_id_str(u16 reg_id)
+{
+ switch (reg_id) {
+ case MLXSW_REG_SGCR_ID:
+ return "SGCR";
+ case MLXSW_REG_SPAD_ID:
+ return "SPAD";
+ case MLXSW_REG_SMID_ID:
+ return "SMID";
+ case MLXSW_REG_SSPR_ID:
+ return "SSPR";
+ case MLXSW_REG_SPMS_ID:
+ return "SPMS";
+ case MLXSW_REG_SFGC_ID:
+ return "SFGC";
+ case MLXSW_REG_SFTR_ID:
+ return "SFTR";
+ case MLXSW_REG_SPMLR_ID:
+ return "SPMLR";
+ case MLXSW_REG_PMLP_ID:
+ return "PMLP";
+ case MLXSW_REG_PMTU_ID:
+ return "PMTU";
+ case MLXSW_REG_PTYS_ID:
+ return "PTYS";
+ case MLXSW_REG_PPAD_ID:
+ return "PPAD";
+ case MLXSW_REG_PAOS_ID:
+ return "PAOS";
+ case MLXSW_REG_PPCNT_ID:
+ return "PPCNT";
+ case MLXSW_REG_PSPA_ID:
+ return "PSPA";
+ case MLXSW_REG_HTGT_ID:
+ return "HTGT";
+ case MLXSW_REG_HPKT_ID:
+ return "HPKT";
+ default:
+ return "*UNKNOWN*";
+ }
+}
+
+/* PUDE - Port Up / Down Event
+ * ---------------------------
+ * Reports the operational state change of a port.
+ */
+#define MLXSW_REG_PUDE_LEN 0x10
+
+/* reg_pude_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pude, swid, 0x00, 24, 8);
+
+/* reg_pude_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pude, local_port, 0x00, 16, 8);
+
+/* reg_pude_admin_status
+ * Port administrative state (the desired state).
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Up once. This means that in case of link failure, the port won't go
+ * into polling mode, but will wait to be re-enabled by software.
+ * 4 - Disabled by system. Can only be set by hardware.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pude, admin_status, 0x00, 8, 4);
+
+/* reg_pude_oper_status
+ * Port operatioanl state.
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Down by port failure. This means that the device will not let the
+ * port up again until explicitly specified by software.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pude, oper_status, 0x00, 0, 4);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
new file mode 100644
index 000000000000..3e52ee93438c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -0,0 +1,1568 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/switchdev.h>
+#include <generated/utsrelease.h>
+
+#include "core.h"
+#include "reg.h"
+#include "port.h"
+#include "trap.h"
+#include "txheader.h"
+
+static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
+static const char mlxsw_sx_driver_version[] = "1.0";
+
+struct mlxsw_sx_port;
+
+#define MLXSW_SW_HW_ID_LEN 6
+
+struct mlxsw_sx {
+ struct mlxsw_sx_port **ports;
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ u8 hw_id[MLXSW_SW_HW_ID_LEN];
+};
+
+struct mlxsw_sx_port_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 tx_dropped;
+};
+
+struct mlxsw_sx_port {
+ struct net_device *dev;
+ struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
+ struct mlxsw_sx *mlxsw_sx;
+ u8 local_port;
+};
+
+/* tx_hdr_version
+ * Tx header version.
+ * Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
+
+/* tx_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
+
+/* tx_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
+
+/* tx_hdr_etclass
+ * Egress TClass to be used on the egress device on the egress port.
+ * The MSB is specified in the 'ctclass3' field.
+ * Range is 0-15, where 15 is the highest priority.
+ */
+MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
+
+/* tx_hdr_swid
+ * Switch partition ID.
+ */
+MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
+
+/* tx_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_hdr_ctclass3
+ * See field 'etclass'.
+ */
+MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
+
+/* tx_hdr_rdq
+ * RDQ for control packets sent to remote CPU.
+ * Must be set to 0x1F for EMADs, otherwise 0.
+ */
+MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
+
+/* tx_hdr_cpu_sig
+ * Signature control for packets going to CPU. Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
+
+/* tx_hdr_sig
+ * Stacking protocl signature. Must be set to 0xE0E0.
+ */
+MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
+
+/* tx_hdr_stclass
+ * Stacking TClass.
+ */
+MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
+
+/* tx_hdr_emad
+ * EMAD bit. Must be set for EMADs.
+ */
+MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
+
+/* tx_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+
+static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+ bool is_emad = tx_info->is_emad;
+
+ memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+ /* We currently set default values for the egress tclass (QoS). */
+ mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
+ mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+ mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+ mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
+ MLXSW_TXHDR_ETCLASS_5);
+ mlxsw_tx_hdr_swid_set(txhdr, 0);
+ mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
+ mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
+ mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
+ MLXSW_TXHDR_RDQ_OTHER);
+ mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
+ mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
+ mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
+ mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
+ MLXSW_TXHDR_NOT_EMAD);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+}
+
+static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ bool is_up)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char paos_pl[MLXSW_REG_PAOS_LEN];
+
+ mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
+ is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
+ MLXSW_PORT_ADMIN_STATUS_DOWN);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
+}
+
+static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
+ bool *p_is_up)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char paos_pl[MLXSW_REG_PAOS_LEN];
+ u8 oper_status;
+ int err;
+
+ mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
+ if (err)
+ return err;
+ oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
+ *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
+ return 0;
+}
+
+static int mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port, u16 mtu)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char pmtu_pl[MLXSW_REG_PMTU_LEN];
+ int max_mtu;
+ int err;
+
+ mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
+ mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
+ if (err)
+ return err;
+ max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
+
+ if (mtu > max_mtu)
+ return -EINVAL;
+
+ mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
+}
+
+static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char pspa_pl[MLXSW_REG_PSPA_LEN];
+
+ mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
+}
+
+static int
+mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char sspr_pl[MLXSW_REG_SSPR_LEN];
+
+ mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl);
+}
+
+static int mlxsw_sx_port_module_check(struct mlxsw_sx_port *mlxsw_sx_port,
+ bool *p_usable)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ int err;
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sx_port->local_port);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
+ if (err)
+ return err;
+ *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
+ return 0;
+}
+
+static int mlxsw_sx_port_open(struct net_device *dev)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
+ if (err)
+ return err;
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int mlxsw_sx_port_stop(struct net_device *dev)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+}
+
+static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
+ const struct mlxsw_tx_info tx_info = {
+ .local_port = mlxsw_sx_port->local_port,
+ .is_emad = false,
+ };
+ u64 len;
+ int err;
+
+ if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info))
+ return NETDEV_TX_BUSY;
+
+ if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
+ struct sk_buff *skb_orig = skb;
+
+ skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
+ if (!skb) {
+ this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb_orig);
+ return NETDEV_TX_OK;
+ }
+ }
+ mlxsw_sx_txhdr_construct(skb, &tx_info);
+ len = skb->len;
+ /* Due to a race we might fail here because of a full queue. In that
+ * unlikely case we simply drop the packet.
+ */
+ err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info);
+
+ if (!err) {
+ pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->tx_packets++;
+ pcpu_stats->tx_bytes += len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+ } else {
+ this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ }
+ return NETDEV_TX_OK;
+}
+
+static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
+ if (err)
+ return err;
+ dev->mtu = mtu;
+ return 0;
+}
+
+static struct rtnl_link_stats64 *
+mlxsw_sx_port_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx_port_pcpu_stats *p;
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+ u32 tx_dropped = 0;
+ unsigned int start;
+ int i;
+
+ for_each_possible_cpu(i) {
+ p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp);
+ rx_packets = p->rx_packets;
+ rx_bytes = p->rx_bytes;
+ tx_packets = p->tx_packets;
+ tx_bytes = p->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ /* tx_dropped is u32, updated without syncp protection. */
+ tx_dropped += p->tx_dropped;
+ }
+ stats->tx_dropped = tx_dropped;
+ return stats;
+}
+
+static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
+ .ndo_open = mlxsw_sx_port_open,
+ .ndo_stop = mlxsw_sx_port_stop,
+ .ndo_start_xmit = mlxsw_sx_port_xmit,
+ .ndo_change_mtu = mlxsw_sx_port_change_mtu,
+ .ndo_get_stats64 = mlxsw_sx_port_get_stats64,
+};
+
+static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+
+ strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, mlxsw_sx_driver_version,
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ mlxsw_sx->bus_info->fw_rev.major,
+ mlxsw_sx->bus_info->fw_rev.minor,
+ mlxsw_sx->bus_info->fw_rev.subminor);
+ strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
+ sizeof(drvinfo->bus_info));
+}
+
+struct mlxsw_sx_port_hw_stats {
+ char str[ETH_GSTRING_LEN];
+ u64 (*getter)(char *payload);
+};
+
+static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
+ {
+ .str = "a_frames_transmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
+ },
+ {
+ .str = "a_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
+ },
+ {
+ .str = "a_frame_check_sequence_errors",
+ .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
+ },
+ {
+ .str = "a_alignment_errors",
+ .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
+ },
+ {
+ .str = "a_octets_transmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
+ },
+ {
+ .str = "a_octets_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
+ },
+ {
+ .str = "a_multicast_frames_xmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
+ },
+ {
+ .str = "a_broadcast_frames_xmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
+ },
+ {
+ .str = "a_multicast_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
+ },
+ {
+ .str = "a_broadcast_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
+ },
+ {
+ .str = "a_in_range_length_errors",
+ .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
+ },
+ {
+ .str = "a_out_of_range_length_field",
+ .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
+ },
+ {
+ .str = "a_frame_too_long_errors",
+ .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
+ },
+ {
+ .str = "a_symbol_error_during_carrier",
+ .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
+ },
+ {
+ .str = "a_mac_control_frames_transmitted",
+ .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
+ },
+ {
+ .str = "a_mac_control_frames_received",
+ .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
+ },
+ {
+ .str = "a_unsupported_opcodes_received",
+ .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
+ },
+ {
+ .str = "a_pause_mac_ctrl_frames_received",
+ .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
+ },
+ {
+ .str = "a_pause_mac_ctrl_frames_xmitted",
+ .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
+ },
+};
+
+#define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
+
+static void mlxsw_sx_port_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sx_port_hw_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void mlxsw_sx_port_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int i;
+ int err;
+
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
+ for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
+ data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
+}
+
+static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return MLXSW_SX_PORT_HW_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+struct mlxsw_sx_port_link_mode {
+ u32 mask;
+ u32 supported;
+ u32 advertised;
+ u32 speed;
+};
+
+static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
+ .supported = SUPPORTED_100baseT_Full,
+ .advertised = ADVERTISED_100baseT_Full,
+ .speed = 100,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
+ .speed = 100,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
+ MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
+ .supported = SUPPORTED_1000baseKX_Full,
+ .advertised = ADVERTISED_1000baseKX_Full,
+ .speed = 1000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
+ .supported = SUPPORTED_10000baseT_Full,
+ .advertised = ADVERTISED_10000baseT_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
+ .supported = SUPPORTED_10000baseKX4_Full,
+ .advertised = ADVERTISED_10000baseKX4_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
+ .supported = SUPPORTED_20000baseKR2_Full,
+ .advertised = ADVERTISED_20000baseKR2_Full,
+ .speed = 20000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
+ .supported = SUPPORTED_40000baseCR4_Full,
+ .advertised = ADVERTISED_40000baseCR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
+ .supported = SUPPORTED_40000baseKR4_Full,
+ .advertised = ADVERTISED_40000baseKR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
+ .supported = SUPPORTED_40000baseSR4_Full,
+ .advertised = ADVERTISED_40000baseSR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
+ .supported = SUPPORTED_40000baseLR4_Full,
+ .advertised = ADVERTISED_40000baseLR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+ .speed = 25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
+ MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+ .speed = 50000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+ .supported = SUPPORTED_56000baseKR4_Full,
+ .advertised = ADVERTISED_56000baseKR4_Full,
+ .speed = 56000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+ .speed = 100000,
+ },
+};
+
+#define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
+
+static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
+{
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+ return SUPPORTED_FIBRE;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
+ return SUPPORTED_Backplane;
+ return 0;
+}
+
+static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
+{
+ u32 modes = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
+ modes |= mlxsw_sx_port_link_mode[i].supported;
+ }
+ return modes;
+}
+
+static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
+{
+ u32 modes = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
+ modes |= mlxsw_sx_port_link_mode[i].advertised;
+ }
+ return modes;
+}
+
+static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
+ struct ethtool_cmd *cmd)
+{
+ u32 speed = SPEED_UNKNOWN;
+ u8 duplex = DUPLEX_UNKNOWN;
+ int i;
+
+ if (!carrier_ok)
+ goto out;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
+ speed = mlxsw_sx_port_link_mode[i].speed;
+ duplex = DUPLEX_FULL;
+ break;
+ }
+ }
+out:
+ ethtool_cmd_speed_set(cmd, speed);
+ cmd->duplex = duplex;
+}
+
+static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
+{
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+ return PORT_FIBRE;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
+ return PORT_DA;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
+ return PORT_NONE;
+
+ return PORT_OTHER;
+}
+
+static int mlxsw_sx_port_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_cap;
+ u32 eth_proto_admin;
+ u32 eth_proto_oper;
+ int err;
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to get proto");
+ return err;
+ }
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
+ &eth_proto_admin, &eth_proto_oper);
+
+ cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
+ mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ cmd->advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
+ mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
+ eth_proto_oper, cmd);
+
+ eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+ cmd->port = mlxsw_sx_port_connector_port(eth_proto_oper);
+ cmd->lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
+
+ cmd->transceiver = XCVR_INTERNAL;
+ return 0;
+}
+
+static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
+{
+ u32 ptys_proto = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (advertising & mlxsw_sx_port_link_mode[i].advertised)
+ ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
+static u32 mlxsw_sx_to_ptys_speed(u32 speed)
+{
+ u32 ptys_proto = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (speed == mlxsw_sx_port_link_mode[i].speed)
+ ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
+static int mlxsw_sx_port_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 speed;
+ u32 eth_proto_new;
+ u32 eth_proto_cap;
+ u32 eth_proto_admin;
+ bool is_up;
+ int err;
+
+ speed = ethtool_cmd_speed(cmd);
+
+ eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
+ mlxsw_sx_to_ptys_advert_link(cmd->advertising) :
+ mlxsw_sx_to_ptys_speed(speed);
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to get proto");
+ return err;
+ }
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
+
+ eth_proto_new = eth_proto_new & eth_proto_cap;
+ if (!eth_proto_new) {
+ netdev_err(dev, "Not supported proto admin requested");
+ return -EINVAL;
+ }
+ if (eth_proto_new == eth_proto_admin)
+ return 0;
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, eth_proto_new);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to set proto admin");
+ return err;
+ }
+
+ err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
+ if (err) {
+ netdev_err(dev, "Failed to get oper status");
+ return err;
+ }
+ if (!is_up)
+ return 0;
+
+ err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+ if (err) {
+ netdev_err(dev, "Failed to set admin status");
+ return err;
+ }
+
+ err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
+ if (err) {
+ netdev_err(dev, "Failed to set admin status");
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
+ .get_drvinfo = mlxsw_sx_port_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mlxsw_sx_port_get_strings,
+ .get_ethtool_stats = mlxsw_sx_port_get_stats,
+ .get_sset_count = mlxsw_sx_port_get_sset_count,
+ .get_settings = mlxsw_sx_port_get_settings,
+ .set_settings = mlxsw_sx_port_set_settings,
+};
+
+static int mlxsw_sx_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_PORT_PARENT_ID:
+ attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
+ memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
+ .switchdev_port_attr_get = mlxsw_sx_port_attr_get,
+};
+
+static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
+{
+ char spad_pl[MLXSW_REG_SPAD_LEN];
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
+ if (err)
+ return err;
+ mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
+ return 0;
+}
+
+static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ struct net_device *dev = mlxsw_sx_port->dev;
+ char ppad_pl[MLXSW_REG_PPAD_LEN];
+ int err;
+
+ mlxsw_reg_ppad_pack(ppad_pl, false, 0);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
+ if (err)
+ return err;
+ mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
+ /* The last byte value in base mac address is guaranteed
+ * to be such it does not overflow when adding local_port
+ * value.
+ */
+ dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
+ return 0;
+}
+
+static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ u16 vid, enum mlxsw_reg_spms_state state)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char *spms_pl;
+ int err;
+
+ spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+ if (!spms_pl)
+ return -ENOMEM;
+ mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port, vid, state);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
+ kfree(spms_pl);
+ return err;
+}
+
+static int mlxsw_sx_port_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ u32 speed)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, speed);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+}
+
+static int
+mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ enum mlxsw_reg_spmlr_learn_mode mode)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char spmlr_pl[MLXSW_REG_SPMLR_LEN];
+
+ mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
+}
+
+static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port;
+ struct net_device *dev;
+ bool usable;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
+ if (!dev)
+ return -ENOMEM;
+ mlxsw_sx_port = netdev_priv(dev);
+ mlxsw_sx_port->dev = dev;
+ mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
+ mlxsw_sx_port->local_port = local_port;
+
+ mlxsw_sx_port->pcpu_stats =
+ netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
+ if (!mlxsw_sx_port->pcpu_stats) {
+ err = -ENOMEM;
+ goto err_alloc_stats;
+ }
+
+ dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
+ dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
+ dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops;
+
+ err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
+ mlxsw_sx_port->local_port);
+ goto err_dev_addr_get;
+ }
+
+ netif_carrier_off(dev);
+
+ dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
+ NETIF_F_VLAN_CHALLENGED;
+
+ /* Each packet needs to have a Tx header (metadata) on top all other
+ * headers.
+ */
+ dev->hard_header_len += MLXSW_TXHDR_LEN;
+
+ err = mlxsw_sx_port_module_check(mlxsw_sx_port, &usable);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to check module\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_module_check;
+ }
+
+ if (!usable) {
+ dev_dbg(mlxsw_sx->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
+ mlxsw_sx_port->local_port);
+ goto port_not_usable;
+ }
+
+ err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_system_port_mapping_set;
+ }
+
+ err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_swid_set;
+ }
+
+ err = mlxsw_sx_port_speed_set(mlxsw_sx_port,
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_speed_set;
+ }
+
+ err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, ETH_DATA_LEN);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_mtu_set;
+ }
+
+ err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+ if (err)
+ goto err_port_admin_status_set;
+
+ err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
+ MLXSW_PORT_DEFAULT_VID,
+ MLXSW_REG_SPMS_STATE_FORWARDING);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_stp_state_set;
+ }
+
+ err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
+ MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_mac_learning_mode_set;
+ }
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
+ mlxsw_sx_port->local_port);
+ goto err_register_netdev;
+ }
+
+ mlxsw_sx->ports[local_port] = mlxsw_sx_port;
+ return 0;
+
+err_register_netdev:
+err_port_admin_status_set:
+err_port_mac_learning_mode_set:
+err_port_stp_state_set:
+err_port_mtu_set:
+err_port_speed_set:
+err_port_swid_set:
+err_port_system_port_mapping_set:
+port_not_usable:
+err_port_module_check:
+err_dev_addr_get:
+ free_percpu(mlxsw_sx_port->pcpu_stats);
+err_alloc_stats:
+ free_netdev(dev);
+ return err;
+}
+
+static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+
+ if (!mlxsw_sx_port)
+ return;
+ unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
+ mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
+ free_percpu(mlxsw_sx_port->pcpu_stats);
+ free_netdev(mlxsw_sx_port->dev);
+}
+
+static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
+{
+ int i;
+
+ for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+ mlxsw_sx_port_remove(mlxsw_sx, i);
+ kfree(mlxsw_sx->ports);
+}
+
+static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
+{
+ size_t alloc_size;
+ int i;
+ int err;
+
+ alloc_size = sizeof(struct mlxsw_sx_port *) * MLXSW_PORT_MAX_PORTS;
+ mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
+ if (!mlxsw_sx->ports)
+ return -ENOMEM;
+
+ for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
+ err = mlxsw_sx_port_create(mlxsw_sx, i);
+ if (err)
+ goto err_port_create;
+ }
+ return 0;
+
+err_port_create:
+ for (i--; i >= 1; i--)
+ mlxsw_sx_port_remove(mlxsw_sx, i);
+ kfree(mlxsw_sx->ports);
+ return err;
+}
+
+static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
+ char *pude_pl, void *priv)
+{
+ struct mlxsw_sx *mlxsw_sx = priv;
+ struct mlxsw_sx_port *mlxsw_sx_port;
+ enum mlxsw_reg_pude_oper_status status;
+ u8 local_port;
+
+ local_port = mlxsw_reg_pude_local_port_get(pude_pl);
+ mlxsw_sx_port = mlxsw_sx->ports[local_port];
+ if (!mlxsw_sx_port) {
+ dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
+ local_port);
+ return;
+ }
+
+ status = mlxsw_reg_pude_oper_status_get(pude_pl);
+ if (MLXSW_PORT_OPER_STATUS_UP == status) {
+ netdev_info(mlxsw_sx_port->dev, "link up\n");
+ netif_carrier_on(mlxsw_sx_port->dev);
+ } else {
+ netdev_info(mlxsw_sx_port->dev, "link down\n");
+ netif_carrier_off(mlxsw_sx_port->dev);
+ }
+}
+
+static struct mlxsw_event_listener mlxsw_sx_pude_event = {
+ .func = mlxsw_sx_pude_event_func,
+ .trap_id = MLXSW_TRAP_ID_PUDE,
+};
+
+static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx,
+ enum mlxsw_event_trap_id trap_id)
+{
+ struct mlxsw_event_listener *el;
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int err;
+
+ switch (trap_id) {
+ case MLXSW_TRAP_ID_PUDE:
+ el = &mlxsw_sx_pude_event;
+ break;
+ }
+ err = mlxsw_core_event_listener_register(mlxsw_sx->core, el, mlxsw_sx);
+ if (err)
+ return err;
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ MLXSW_REG_HTGT_TRAP_GROUP_EMAD, trap_id);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+ if (err)
+ goto err_event_trap_set;
+
+ return 0;
+
+err_event_trap_set:
+ mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
+ return err;
+}
+
+static void mlxsw_sx_event_unregister(struct mlxsw_sx *mlxsw_sx,
+ enum mlxsw_event_trap_id trap_id)
+{
+ struct mlxsw_event_listener *el;
+
+ switch (trap_id) {
+ case MLXSW_TRAP_ID_PUDE:
+ el = &mlxsw_sx_pude_event;
+ break;
+ }
+ mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
+}
+
+static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ struct mlxsw_sx *mlxsw_sx = priv;
+ struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+ struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
+
+ if (unlikely(!mlxsw_sx_port)) {
+ if (net_ratelimit())
+ dev_warn(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
+ local_port);
+ return;
+ }
+
+ skb->dev = mlxsw_sx_port->dev;
+
+ pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->rx_packets++;
+ pcpu_stats->rx_bytes += skb->len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ netif_receive_skb(skb);
+}
+
+static const struct mlxsw_rx_listener mlxsw_sx_rx_listener[] = {
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_FDB_MC,
+ },
+ /* Traps for specific L2 packet types, not trapped as FDB MC */
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_STP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_LACP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_EAPOL,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_LLDP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_MMRP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_MVRP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_RPVST,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_DHCP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
+ },
+};
+
+static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int i;
+ int err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
+ err = mlxsw_core_rx_listener_register(mlxsw_sx->core,
+ &mlxsw_sx_rx_listener[i],
+ mlxsw_sx);
+ if (err)
+ goto err_rx_listener_register;
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+ MLXSW_REG_HTGT_TRAP_GROUP_RX,
+ mlxsw_sx_rx_listener[i].trap_id);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+ if (err)
+ goto err_rx_trap_set;
+ }
+ return 0;
+
+err_rx_trap_set:
+ mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+ &mlxsw_sx_rx_listener[i],
+ mlxsw_sx);
+err_rx_listener_register:
+ for (i--; i >= 0; i--) {
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ MLXSW_REG_HTGT_TRAP_GROUP_RX,
+ mlxsw_sx_rx_listener[i].trap_id);
+ mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+
+ mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+ &mlxsw_sx_rx_listener[i],
+ mlxsw_sx);
+ }
+ return err;
+}
+
+static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
+{
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ MLXSW_REG_HTGT_TRAP_GROUP_RX,
+ mlxsw_sx_rx_listener[i].trap_id);
+ mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+
+ mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+ &mlxsw_sx_rx_listener[i],
+ mlxsw_sx);
+ }
+}
+
+static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
+{
+ char sfgc_pl[MLXSW_REG_SFGC_LEN];
+ char sgcr_pl[MLXSW_REG_SGCR_LEN];
+ char *smid_pl;
+ char *sftr_pl;
+ int err;
+
+ /* Due to FW bug, we must configure SMID. */
+ smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
+ if (!smid_pl)
+ return -ENOMEM;
+ mlxsw_reg_smid_pack(smid_pl, MLXSW_PORT_MID);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(smid), smid_pl);
+ kfree(smid_pl);
+ if (err)
+ return err;
+
+ /* Configure a flooding table, which includes only CPU port. */
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl)
+ return -ENOMEM;
+ mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
+ kfree(sftr_pl);
+ if (err)
+ return err;
+
+ /* Flood different packet types using the flooding table. */
+ mlxsw_reg_sfgc_pack(sfgc_pl,
+ MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+ 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_sfgc_pack(sfgc_pl,
+ MLXSW_REG_SFGC_TYPE_BROADCAST,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+ 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_sfgc_pack(sfgc_pl,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+ 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_sfgc_pack(sfgc_pl,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+ 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_sfgc_pack(sfgc_pl,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+ 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_sgcr_pack(sgcr_pl, true);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
+}
+
+static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+ struct mlxsw_sx *mlxsw_sx = priv;
+ int err;
+
+ mlxsw_sx->core = mlxsw_core;
+ mlxsw_sx->bus_info = mlxsw_bus_info;
+
+ err = mlxsw_sx_hw_id_get(mlxsw_sx);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
+ return err;
+ }
+
+ err = mlxsw_sx_ports_create(mlxsw_sx);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
+ return err;
+ }
+
+ err = mlxsw_sx_event_register(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to register for PUDE events\n");
+ goto err_event_register;
+ }
+
+ err = mlxsw_sx_traps_init(mlxsw_sx);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps for RX\n");
+ goto err_rx_listener_register;
+ }
+
+ err = mlxsw_sx_flood_init(mlxsw_sx);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
+ goto err_flood_init;
+ }
+
+ return 0;
+
+err_flood_init:
+ mlxsw_sx_traps_fini(mlxsw_sx);
+err_rx_listener_register:
+ mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+err_event_register:
+ mlxsw_sx_ports_remove(mlxsw_sx);
+ return err;
+}
+
+static void mlxsw_sx_fini(void *priv)
+{
+ struct mlxsw_sx *mlxsw_sx = priv;
+
+ mlxsw_sx_traps_fini(mlxsw_sx);
+ mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+ mlxsw_sx_ports_remove(mlxsw_sx);
+}
+
+static struct mlxsw_config_profile mlxsw_sx_config_profile = {
+ .used_max_vepa_channels = 1,
+ .max_vepa_channels = 0,
+ .used_max_lag = 1,
+ .max_lag = 64,
+ .used_max_port_per_lag = 1,
+ .max_port_per_lag = 16,
+ .used_max_mid = 1,
+ .max_mid = 7000,
+ .used_max_pgt = 1,
+ .max_pgt = 0,
+ .used_max_system_port = 1,
+ .max_system_port = 48000,
+ .used_max_vlan_groups = 1,
+ .max_vlan_groups = 127,
+ .used_max_regions = 1,
+ .max_regions = 400,
+ .used_flood_tables = 1,
+ .max_flood_tables = 2,
+ .max_vid_flood_tables = 1,
+ .used_flood_mode = 1,
+ .flood_mode = 3,
+ .used_max_ib_mc = 1,
+ .max_ib_mc = 0,
+ .used_max_pkey = 1,
+ .max_pkey = 0,
+ .swid_config = {
+ {
+ .used_type = 1,
+ .type = MLXSW_PORT_SWID_TYPE_ETH,
+ }
+ },
+};
+
+static struct mlxsw_driver mlxsw_sx_driver = {
+ .kind = MLXSW_DEVICE_KIND_SWITCHX2,
+ .owner = THIS_MODULE,
+ .priv_size = sizeof(struct mlxsw_sx),
+ .init = mlxsw_sx_init,
+ .fini = mlxsw_sx_fini,
+ .txhdr_construct = mlxsw_sx_txhdr_construct,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sx_config_profile,
+};
+
+static int __init mlxsw_sx_module_init(void)
+{
+ return mlxsw_core_driver_register(&mlxsw_sx_driver);
+}
+
+static void __exit mlxsw_sx_module_exit(void)
+{
+ mlxsw_core_driver_unregister(&mlxsw_sx_driver);
+}
+
+module_init(mlxsw_sx_module_init);
+module_exit(mlxsw_sx_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
+MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SWITCHX2);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
new file mode 100644
index 000000000000..53a9550be75e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -0,0 +1,66 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/trap.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXSW_TRAP_H
+#define _MLXSW_TRAP_H
+
+enum {
+ /* Ethernet EMAD and FDB miss */
+ MLXSW_TRAP_ID_FDB_MC = 0x01,
+ MLXSW_TRAP_ID_ETHEMAD = 0x05,
+ /* L2 traps for specific packet types */
+ MLXSW_TRAP_ID_STP = 0x10,
+ MLXSW_TRAP_ID_LACP = 0x11,
+ MLXSW_TRAP_ID_EAPOL = 0x12,
+ MLXSW_TRAP_ID_LLDP = 0x13,
+ MLXSW_TRAP_ID_MMRP = 0x14,
+ MLXSW_TRAP_ID_MVRP = 0x15,
+ MLXSW_TRAP_ID_RPVST = 0x16,
+ MLXSW_TRAP_ID_DHCP = 0x19,
+ MLXSW_TRAP_ID_IGMP_QUERY = 0x30,
+ MLXSW_TRAP_ID_IGMP_V1_REPORT = 0x31,
+ MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32,
+ MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
+ MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
+
+ MLXSW_TRAP_ID_MAX = 0x1FF
+};
+
+enum mlxsw_event_trap_id {
+ /* Port Up/Down event generated by hardware */
+ MLXSW_TRAP_ID_PUDE = 0x8,
+};
+
+#endif /* _MLXSW_TRAP_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
new file mode 100644
index 000000000000..06fc46c78a0b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
@@ -0,0 +1,80 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/txheader.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_TXHEADER_H
+#define _MLXSW_TXHEADER_H
+
+#define MLXSW_TXHDR_LEN 0x10
+#define MLXSW_TXHDR_VERSION_0 0
+
+enum {
+ MLXSW_TXHDR_ETH_CTL,
+ MLXSW_TXHDR_ETH_DATA,
+};
+
+#define MLXSW_TXHDR_PROTO_ETH 1
+
+enum {
+ MLXSW_TXHDR_ETCLASS_0,
+ MLXSW_TXHDR_ETCLASS_1,
+ MLXSW_TXHDR_ETCLASS_2,
+ MLXSW_TXHDR_ETCLASS_3,
+ MLXSW_TXHDR_ETCLASS_4,
+ MLXSW_TXHDR_ETCLASS_5,
+ MLXSW_TXHDR_ETCLASS_6,
+ MLXSW_TXHDR_ETCLASS_7,
+};
+
+enum {
+ MLXSW_TXHDR_RDQ_OTHER,
+ MLXSW_TXHDR_RDQ_EMAD = 0x1f,
+};
+
+#define MLXSW_TXHDR_CTCLASS3 0
+#define MLXSW_TXHDR_CPU_SIG 0
+#define MLXSW_TXHDR_SIG 0xE0E0
+#define MLXSW_TXHDR_STCLASS_NONE 0
+
+enum {
+ MLXSW_TXHDR_NOT_EMAD,
+ MLXSW_TXHDR_EMAD,
+};
+
+enum {
+ MLXSW_TXHDR_TYPE_DATA,
+ MLXSW_TXHDR_TYPE_CONTROL = 6,
+};
+
+#endif
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index f78909a00f15..09d2e16fd6b0 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -952,9 +952,8 @@ static int ks8842_alloc_dma_bufs(struct net_device *netdev)
sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
- err = dma_mapping_error(adapter->dev,
- sg_dma_address(&tx_ctl->sg));
- if (err) {
+ if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
+ err = -ENOMEM;
sg_dma_address(&tx_ctl->sg) = 0;
goto err;
}
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index c28111749e1f..2d1b94274079 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -8226,31 +8226,7 @@ static void s2io_rem_nic(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-/**
- * s2io_starter - Entry point for the driver
- * Description: This function is the entry point for the driver. It verifies
- * the module loadable parameters and initializes PCI configuration space.
- */
-
-static int __init s2io_starter(void)
-{
- return pci_register_driver(&s2io_driver);
-}
-
-/**
- * s2io_closer - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver. It
- * unregisters the driver.
- */
-
-static __exit void s2io_closer(void)
-{
- pci_unregister_driver(&s2io_driver);
- DBG_PRINT(INIT_DBG, "cleanup done\n");
-}
-
-module_init(s2io_starter);
-module_exit(s2io_closer);
+module_pci_driver(s2io_driver);
static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
struct tcphdr **tcp, struct RxD_t *rxdp,
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index d89b6ed82c51..6c5997dc8afc 100644
--- a/drivers/net/ethernet/neterion/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -1085,8 +1085,6 @@ static void s2io_txpic_intr_handle(struct s2io_nic *sp);
static void tx_intr_handler(struct fifo_info *fifo_data);
static void s2io_handle_errors(void * dev_id);
-static int s2io_starter(void);
-static void s2io_closer(void);
static void s2io_tx_watchdog(struct net_device *dev);
static void s2io_set_multicast(struct net_device *dev);
static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 055f3763e577..06bcc734fe8d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -24,9 +24,7 @@
#include <linux/mii.h>
#include <linux/timer.h>
#include <linux/irq.h>
-
#include <linux/vmalloc.h>
-
#include <linux/io.h>
#include <asm/byteorder.h>
#include <linux/bitops.h>
@@ -39,8 +37,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 62
-#define QLCNIC_LINUX_VERSIONID "5.3.62"
+#define _QLCNIC_LINUX_SUBVERSION 63
+#define QLCNIC_LINUX_VERSIONID "5.3.63"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -926,6 +924,7 @@ struct qlcnic_mac_vlan_list {
#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5
#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9
+#define QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP BIT_13
#define QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD BIT_0
#define QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD BIT_1
@@ -2291,8 +2290,9 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
-#define PCI_DEVICE_ID_QLOGIC_QLE8830 0x8830
#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430
+#define PCI_DEVICE_ID_QLOGIC_QLE8830 0x8830
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE8C30 0x8C30
#define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040
#define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440
@@ -2319,7 +2319,8 @@ static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
(device == PCI_DEVICE_ID_QLOGIC_QLE8830) ||
(device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
(device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
- (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
return status;
}
@@ -2335,7 +2336,8 @@ static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
bool status;
status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
- (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
return status;
}
@@ -2351,7 +2353,8 @@ static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
{
unsigned short device = adapter->pdev->device;
- return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+ return ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
}
static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 840bf36b5e9d..9f0bdd993955 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -5,14 +5,15 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
-#include "qlcnic.h"
-#include "qlcnic_sriov.h"
#include <linux/if_vlan.h>
#include <linux/ipv6.h>
#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/aer.h>
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+
static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
@@ -118,6 +119,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
{QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
{QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1},
+ {QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP, 4, 1},
};
const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -916,8 +918,6 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
mbx->req.arg = NULL;
return -ENOMEM;
}
- memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
- memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
temp = adapter->ahw->fw_hal_version << 29;
mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
mbx->cmd_op = type;
@@ -3513,6 +3513,31 @@ out:
qlcnic_free_mbx_args(&cmd);
}
+#define QLCNIC_83XX_ADD_PORT0 BIT_0
+#define QLCNIC_83XX_ADD_PORT1 BIT_1
+#define QLCNIC_83XX_EXTENDED_MEM_SIZE 13 /* In MB */
+int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = (QLCNIC_83XX_ADD_PORT0 | QLCNIC_83XX_ADD_PORT1);
+ cmd.req.arg[2] = QLCNIC_83XX_EXTENDED_MEM_SIZE;
+ cmd.req.arg[3] = QLCNIC_83XX_EXTENDED_MEM_SIZE;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "failed to issue extend iSCSI minidump capability\n");
+
+ return err;
+}
+
int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter)
{
u32 major, minor, sub;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 69f828eb42cf..331ae2c20f40 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/etherdevice.h>
+
#include "qlcnic_hw.h"
#define QLCNIC_83XX_BAR0_LENGTH 0x4000
@@ -626,6 +627,7 @@ int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *);
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
+int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *);
int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 33669c29b341..bf892160dd5f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1384,7 +1384,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
size_t size;
u64 addr;
- temp = kzalloc(fw->size, GFP_KERNEL);
+ temp = vzalloc(fw->size);
if (!temp) {
release_firmware(fw);
fw_info->fw = NULL;
@@ -1415,7 +1415,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
if (fw->size & 0xF) {
addr = dest + size;
for (i = 0; i < (fw->size & 0xF); i++)
- data[i] = temp[size + i];
+ data[i] = ((u8 *)temp)[size + i];
for (; i < 16; i++)
data[i] = 0;
ret = qlcnic_ms_mem_write128(adapter, addr,
@@ -1430,7 +1430,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
exit:
release_firmware(fw);
fw_info->fw = NULL;
- kfree(temp);
+ vfree(temp);
return ret;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 6e6f18fc5d76..a5f422f26cb4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -73,8 +73,6 @@ int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
mbx->req.arg = NULL;
return -ENOMEM;
}
- memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
- memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
mbx->req.arg[0] = type;
break;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 75ee9e4ced51..509b596cf1e8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -5,13 +5,13 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
-#include "qlcnic.h"
-#include "qlcnic_hdr.h"
-
#include <linux/slab.h>
#include <net/ip.h>
#include <linux/bitops.h>
+#include "qlcnic.h"
+#include "qlcnic_hdr.h"
+
#define MASK(n) ((1ULL<<(n))-1)
#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index cbe2399c30a0..4bb33af8e2b3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -109,6 +109,7 @@ enum qlcnic_regs {
#define QLCNIC_CMD_GET_LED_CONFIG 0x6A
#define QLCNIC_CMD_83XX_SET_DRV_VER 0x6F
#define QLCNIC_CMD_ADD_RCV_RINGS 0x0B
+#define QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP 0x37
#define QLCNIC_INTRPT_INTX 1
#define QLCNIC_INTRPT_MSIX 3
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 2f6cc423ab1d..8b08b20e8b30 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -7,11 +7,6 @@
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
-
-#include "qlcnic.h"
-#include "qlcnic_sriov.h"
-#include "qlcnic_hw.h"
-
#include <linux/swab.h>
#include <linux/dma-mapping.h>
#include <linux/if_vlan.h>
@@ -25,6 +20,10 @@
#include <net/vxlan.h>
#endif
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+#include "qlcnic_hw.h"
+
MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
@@ -111,8 +110,9 @@ static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
static const struct pci_device_id qlcnic_pci_tbl[] = {
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
- ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830),
ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE8C30),
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X),
ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X),
{0,}
@@ -1149,6 +1149,7 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
case PCI_DEVICE_ID_QLOGIC_QLE844X:
case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30:
*bar = QLCNIC_83XX_BAR0_LENGTH;
break;
default:
@@ -2403,7 +2404,6 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
qlcnic_free_tx_rings(adapter);
return -ENOMEM;
}
- memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
tx_ring->cmd_buf_arr = cmd_buf_arr;
spin_lock_init(&tx_ring->tx_clean_lock);
}
@@ -2492,6 +2492,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
qlcnic_83xx_register_map(ahw);
break;
case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30:
case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
qlcnic_sriov_vf_register_map(ahw);
break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 332bb8a3f430..cda9e604a95f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -5,13 +5,13 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
+#include <net/ip.h>
+
#include "qlcnic.h"
#include "qlcnic_hdr.h"
#include "qlcnic_83xx_hw.h"
#include "qlcnic_hw.h"
-#include <net/ip.h>
-
#define QLC_83XX_MINIDUMP_FLASH 0x520000
#define QLC_83XX_OCM_INDEX 3
#define QLC_83XX_PCI_INDEX 0
@@ -1388,27 +1388,60 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
fw_dump->clr = 1;
snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
netdev_info(adapter->netdev,
- "Dump data %d bytes captured, template header size %d bytes\n",
- fw_dump->size, fw_dump->tmpl_hdr_size);
+ "Dump data %d bytes captured, dump data address = %p, template header size %d bytes, template address = %p\n",
+ fw_dump->size, fw_dump->data, fw_dump->tmpl_hdr_size,
+ fw_dump->tmpl_hdr);
/* Send a udev event to notify availability of FW dump */
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
return 0;
}
+static inline bool
+qlcnic_83xx_md_check_extended_dump_capability(struct qlcnic_adapter *adapter)
+{
+ /* For special adapters (with 0x8830 device ID), where iSCSI firmware
+ * dump needs to be captured as part of regular firmware dump
+ * collection process, firmware exports it's capability through
+ * capability registers
+ */
+ return ((adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE8830) &&
+ (adapter->ahw->extra_capability[0] &
+ QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP));
+}
+
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
{
u32 prev_version, current_version;
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
struct pci_dev *pdev = adapter->pdev;
+ bool extended = false;
prev_version = adapter->fw_version;
current_version = qlcnic_83xx_get_fw_version(adapter);
if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
vfree(fw_dump->tmpl_hdr);
+
+ if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
+ extended = !qlcnic_83xx_extend_md_capab(adapter);
+
if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
dev_info(&pdev->dev, "Supports FW dump capability\n");
+
+ /* Once we have minidump template with extended iSCSI dump
+ * capability, update the minidump capture mask to 0x1f as
+ * per FW requirement
+ */
+ if (extended) {
+ struct qlcnic_83xx_dump_template_hdr *hdr;
+
+ hdr = fw_dump->tmpl_hdr;
+ hdr->drv_cap_mask = 0x1f;
+ fw_dump->cap_mask = 0x1f;
+ dev_info(&pdev->dev,
+ "Extended iSCSI dump capability and updated capture mask to 0x1f\n");
+ }
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 4677b2edccca..017d8c2c8285 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -8,10 +8,11 @@
#ifndef _QLCNIC_83XX_SRIOV_H_
#define _QLCNIC_83XX_SRIOV_H_
-#include "qlcnic.h"
#include <linux/types.h>
#include <linux/pci.h>
+#include "qlcnic.h"
+
extern const u32 qlcnic_83xx_reg_tbl[];
extern const u32 qlcnic_83xx_ext_reg_tbl[];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index e6312465fe45..7327b729ba2e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -5,10 +5,11 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
+#include <linux/types.h>
+
#include "qlcnic_sriov.h"
#include "qlcnic.h"
#include "qlcnic_83xx_hw.h"
-#include <linux/types.h>
#define QLC_BC_COMMAND 0
#define QLC_BC_RESPONSE 1
@@ -728,8 +729,6 @@ static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
mbx->req.arg = NULL;
return -ENOMEM;
}
- memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
- memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
mbx->req.arg[0] = (type | (mbx->req.num << 16) |
(3 << 29));
mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index a29538b86edf..afd687e5e779 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -5,9 +5,10 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
+#include <linux/types.h>
+
#include "qlcnic_sriov.h"
#include "qlcnic.h"
-#include <linux/types.h>
#define QLCNIC_SRIOV_VF_MAX_MAC 7
#define QLC_VF_MIN_TX_RATE 100
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 05c28f2c6df7..ccbb04503b27 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -7,10 +7,6 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
-
-#include "qlcnic.h"
-#include "qlcnic_hw.h"
-
#include <linux/swab.h>
#include <linux/dma-mapping.h>
#include <net/ip.h>
@@ -24,6 +20,9 @@
#include <linux/hwmon-sysfs.h>
#endif
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
{
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3df51faf18ae..2b32e0c5a0b4 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -637,6 +637,9 @@ enum rtl_register_content {
/* _TBICSRBit */
TBILinkOK = 0x02000000,
+ /* ResetCounterCommand */
+ CounterReset = 0x1,
+
/* DumpCounterCommand */
CounterDump = 0x8,
@@ -747,6 +750,13 @@ struct rtl8169_counters {
__le16 tx_underun;
};
+struct rtl8169_tc_offsets {
+ bool inited;
+ __le64 tx_errors;
+ __le32 tx_multi_collision;
+ __le16 tx_aborted;
+};
+
enum rtl_flag {
RTL_FLAG_TASK_ENABLED,
RTL_FLAG_TASK_SLOW_PENDING,
@@ -823,7 +833,9 @@ struct rtl8169_private {
unsigned features;
struct mii_if_info mii;
- struct rtl8169_counters counters;
+ dma_addr_t counters_phys_addr;
+ struct rtl8169_counters *counters;
+ struct rtl8169_tc_offsets tc_offset;
u32 saved_wolopts;
u32 opts1_mask;
@@ -2183,65 +2195,121 @@ DECLARE_RTL_COND(rtl_counters_cond)
{
void __iomem *ioaddr = tp->mmio_addr;
- return RTL_R32(CounterAddrLow) & CounterDump;
+ return RTL_R32(CounterAddrLow) & (CounterReset | CounterDump);
}
-static void rtl8169_update_counters(struct net_device *dev)
+static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
- struct device *d = &tp->pci_dev->dev;
- struct rtl8169_counters *counters;
- dma_addr_t paddr;
+ dma_addr_t paddr = tp->counters_phys_addr;
u32 cmd;
+ bool ret;
+
+ RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
+ cmd = (u64)paddr & DMA_BIT_MASK(32);
+ RTL_W32(CounterAddrLow, cmd);
+ RTL_W32(CounterAddrLow, cmd | counter_cmd);
+
+ ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
+
+ RTL_W32(CounterAddrLow, 0);
+ RTL_W32(CounterAddrHigh, 0);
+
+ return ret;
+}
+
+static bool rtl8169_reset_counters(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ /*
+ * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the
+ * tally counters.
+ */
+ if (tp->mac_version < RTL_GIGA_MAC_VER_19)
+ return true;
+
+ return rtl8169_do_counters(dev, CounterReset);
+}
+
+static bool rtl8169_update_counters(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
/*
* Some chips are unable to dump tally counters when the receiver
* is disabled.
*/
if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
- return;
+ return true;
- counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
- if (!counters)
- return;
+ return rtl8169_do_counters(dev, CounterDump);
+}
- RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
- cmd = (u64)paddr & DMA_BIT_MASK(32);
- RTL_W32(CounterAddrLow, cmd);
- RTL_W32(CounterAddrLow, cmd | CounterDump);
+static bool rtl8169_init_counter_offsets(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_counters *counters = tp->counters;
+ bool ret = false;
+
+ /*
+ * rtl8169_init_counter_offsets is called from rtl_open. On chip
+ * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only
+ * reset by a power cycle, while the counter values collected by the
+ * driver are reset at every driver unload/load cycle.
+ *
+ * To make sure the HW values returned by @get_stats64 match the SW
+ * values, we collect the initial values at first open(*) and use them
+ * as offsets to normalize the values returned by @get_stats64.
+ *
+ * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one
+ * for the reason stated in rtl8169_update_counters; CmdRxEnb is only
+ * set at open time by rtl_hw_start.
+ */
- if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
- memcpy(&tp->counters, counters, sizeof(*counters));
+ if (tp->tc_offset.inited)
+ return true;
- RTL_W32(CounterAddrLow, 0);
- RTL_W32(CounterAddrHigh, 0);
+ /* If both, reset and update fail, propagate to caller. */
+ if (rtl8169_reset_counters(dev))
+ ret = true;
+
+ if (rtl8169_update_counters(dev))
+ ret = true;
- dma_free_coherent(d, sizeof(*counters), counters, paddr);
+ tp->tc_offset.tx_errors = counters->tx_errors;
+ tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
+ tp->tc_offset.tx_aborted = counters->tx_aborted;
+ tp->tc_offset.inited = true;
+
+ return ret;
}
static void rtl8169_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_counters *counters = tp->counters;
ASSERT_RTNL();
rtl8169_update_counters(dev);
- data[0] = le64_to_cpu(tp->counters.tx_packets);
- data[1] = le64_to_cpu(tp->counters.rx_packets);
- data[2] = le64_to_cpu(tp->counters.tx_errors);
- data[3] = le32_to_cpu(tp->counters.rx_errors);
- data[4] = le16_to_cpu(tp->counters.rx_missed);
- data[5] = le16_to_cpu(tp->counters.align_errors);
- data[6] = le32_to_cpu(tp->counters.tx_one_collision);
- data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
- data[8] = le64_to_cpu(tp->counters.rx_unicast);
- data[9] = le64_to_cpu(tp->counters.rx_broadcast);
- data[10] = le32_to_cpu(tp->counters.rx_multicast);
- data[11] = le16_to_cpu(tp->counters.tx_aborted);
- data[12] = le16_to_cpu(tp->counters.tx_underun);
+ data[0] = le64_to_cpu(counters->tx_packets);
+ data[1] = le64_to_cpu(counters->rx_packets);
+ data[2] = le64_to_cpu(counters->tx_errors);
+ data[3] = le32_to_cpu(counters->rx_errors);
+ data[4] = le16_to_cpu(counters->rx_missed);
+ data[5] = le16_to_cpu(counters->align_errors);
+ data[6] = le32_to_cpu(counters->tx_one_collision);
+ data[7] = le32_to_cpu(counters->tx_multi_collision);
+ data[8] = le64_to_cpu(counters->rx_unicast);
+ data[9] = le64_to_cpu(counters->rx_broadcast);
+ data[10] = le32_to_cpu(counters->rx_multicast);
+ data[11] = le16_to_cpu(counters->tx_aborted);
+ data[12] = le16_to_cpu(counters->tx_underun);
}
static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -4875,10 +4943,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_46:
case RTL_GIGA_MAC_VER_47:
case RTL_GIGA_MAC_VER_48:
+ RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ break;
case RTL_GIGA_MAC_VER_49:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
- RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
default:
RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
@@ -7365,6 +7435,9 @@ process_pkt:
tp->rx_stats.packets++;
tp->rx_stats.bytes += pkt_size;
u64_stats_update_end(&tp->rx_stats.syncp);
+
+ if (skb->pkt_type == PACKET_MULTICAST)
+ dev->stats.multicast++;
}
release_descriptor:
desc->opts2 = 0;
@@ -7629,6 +7702,9 @@ static int rtl_open(struct net_device *dev)
rtl_hw_start(dev);
+ if (!rtl8169_init_counter_offsets(dev))
+ netif_warn(tp, hw, dev, "counter reset/update failed\n");
+
netif_start_queue(dev);
rtl_unlock_work(tp);
@@ -7661,6 +7737,7 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
+ struct rtl8169_counters *counters = tp->counters;
unsigned int start;
if (netif_running(dev))
@@ -7672,7 +7749,6 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_bytes = tp->rx_stats.bytes;
} while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
-
do {
start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
stats->tx_packets = tp->tx_stats.packets;
@@ -7686,6 +7762,24 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_crc_errors = dev->stats.rx_crc_errors;
stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
stats->rx_missed_errors = dev->stats.rx_missed_errors;
+ stats->multicast = dev->stats.multicast;
+
+ /*
+ * Fetch additonal counter values missing in stats collected by driver
+ * from tally counters.
+ */
+ rtl8169_update_counters(dev);
+
+ /*
+ * Subtract values fetched during initalization.
+ * See rtl8169_init_counter_offsets for a description why we do that.
+ */
+ stats->tx_errors = le64_to_cpu(counters->tx_errors) -
+ le64_to_cpu(tp->tc_offset.tx_errors);
+ stats->collisions = le32_to_cpu(counters->tx_multi_collision) -
+ le32_to_cpu(tp->tc_offset.tx_multi_collision);
+ stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
+ le16_to_cpu(tp->tc_offset.tx_aborted);
return stats;
}
@@ -7886,6 +7980,9 @@ static void rtl_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
+ dma_free_coherent(&tp->pci_dev->dev, sizeof(*tp->counters),
+ tp->counters, tp->counters_phys_addr);
+
rtl_release_firmware(tp);
if (pci_dev_run_wake(pdev))
@@ -8311,9 +8408,16 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
+ tp->counters = dma_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
+ &tp->counters_phys_addr, GFP_KERNEL);
+ if (!tp->counters) {
+ rc = -ENOMEM;
+ goto err_out_msi_4;
+ }
+
rc = register_netdev(dev);
if (rc < 0)
- goto err_out_msi_4;
+ goto err_out_cnt_5;
pci_set_drvdata(pdev, dev);
@@ -8347,6 +8451,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
out:
return rc;
+err_out_cnt_5:
+ dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters,
+ tp->counters_phys_addr);
err_out_msi_4:
netif_napi_del(&tp->napi);
rtl_disable_msi(pdev, tp);
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 8aa50ac4e2d6..a157aaaaff6a 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -658,6 +658,8 @@ struct ravb_desc {
__le32 dptr; /* Descriptor pointer */
};
+#define DPTR_ALIGN 4 /* Required descriptor pointer alignment */
+
enum DIE_DT {
/* Frame data */
DT_FMID = 0x40,
@@ -739,6 +741,7 @@ enum RAVB_QUEUE {
#define RX_QUEUE_OFFSET 4
#define NUM_RX_QUEUE 2
#define NUM_TX_QUEUE 2
+#define NUM_TX_DESC 2 /* TX descriptors per packet */
struct ravb_tstamp_skb {
struct list_head list;
@@ -777,9 +780,9 @@ struct ravb_private {
dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
+ void *tx_align[NUM_TX_QUEUE];
struct sk_buff **rx_skb[NUM_RX_QUEUE];
struct sk_buff **tx_skb[NUM_TX_QUEUE];
- void **tx_buffers[NUM_TX_QUEUE];
u32 rx_over_errors;
u32 rx_fifo_errors;
struct net_device_stats stats[NUM_RX_QUEUE];
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index fd9745714d90..450899e9cea2 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -195,12 +195,8 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_skb[q] = NULL;
/* Free aligned TX buffers */
- if (priv->tx_buffers[q]) {
- for (i = 0; i < priv->num_tx_ring[q]; i++)
- kfree(priv->tx_buffers[q][i]);
- }
- kfree(priv->tx_buffers[q]);
- priv->tx_buffers[q] = NULL;
+ kfree(priv->tx_align[q]);
+ priv->tx_align[q] = NULL;
if (priv->rx_ring[q]) {
ring_size = sizeof(struct ravb_ex_rx_desc) *
@@ -212,7 +208,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
if (priv->tx_ring[q]) {
ring_size = sizeof(struct ravb_tx_desc) *
- (priv->num_tx_ring[q] + 1);
+ (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
dma_free_coherent(NULL, ring_size, priv->tx_ring[q],
priv->tx_desc_dma[q]);
priv->tx_ring[q] = NULL;
@@ -223,14 +219,13 @@ static void ravb_ring_free(struct net_device *ndev, int q)
static void ravb_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- struct ravb_ex_rx_desc *rx_desc = NULL;
- struct ravb_tx_desc *tx_desc = NULL;
- struct ravb_desc *desc = NULL;
+ struct ravb_ex_rx_desc *rx_desc;
+ struct ravb_tx_desc *tx_desc;
+ struct ravb_desc *desc;
int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
- int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
- struct sk_buff *skb;
+ int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
+ NUM_TX_DESC;
dma_addr_t dma_addr;
- void *buffer;
int i;
priv->cur_rx[q] = 0;
@@ -241,45 +236,33 @@ static void ravb_ring_format(struct net_device *ndev, int q)
memset(priv->rx_ring[q], 0, rx_ring_size);
/* Build RX ring buffer */
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- priv->rx_skb[q][i] = NULL;
- skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
- if (!skb)
- break;
- ravb_set_buffer_align(skb);
/* RX descriptor */
rx_desc = &priv->rx_ring[q][i];
/* The size of the buffer should be on 16-byte boundary. */
rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
- dma_addr = dma_map_single(&ndev->dev, skb->data,
+ dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data,
ALIGN(PKT_BUF_SZ, 16),
DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, dma_addr)) {
- dev_kfree_skb(skb);
- break;
- }
- priv->rx_skb[q][i] = skb;
+ /* We just set the data size to 0 for a failed mapping which
+ * should prevent DMA from happening...
+ */
+ if (dma_mapping_error(&ndev->dev, dma_addr))
+ rx_desc->ds_cc = cpu_to_le16(0);
rx_desc->dptr = cpu_to_le32(dma_addr);
rx_desc->die_dt = DT_FEMPTY;
}
rx_desc = &priv->rx_ring[q][i];
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
rx_desc->die_dt = DT_LINKFIX; /* type */
- priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]);
memset(priv->tx_ring[q], 0, tx_ring_size);
/* Build TX ring buffer */
- for (i = 0; i < priv->num_tx_ring[q]; i++) {
- priv->tx_skb[q][i] = NULL;
- priv->tx_buffers[q][i] = NULL;
- buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
- if (!buffer)
- break;
- /* Aligned TX buffer */
- priv->tx_buffers[q][i] = buffer;
- tx_desc = &priv->tx_ring[q][i];
+ for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
+ i++, tx_desc++) {
+ tx_desc->die_dt = DT_EEMPTY;
+ tx_desc++;
tx_desc->die_dt = DT_EEMPTY;
}
- tx_desc = &priv->tx_ring[q][i];
tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
tx_desc->die_dt = DT_LINKFIX; /* type */
@@ -298,7 +281,9 @@ static void ravb_ring_format(struct net_device *ndev, int q)
static int ravb_ring_init(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
+ struct sk_buff *skb;
int ring_size;
+ int i;
/* Allocate RX and TX skb rings */
priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -308,10 +293,18 @@ static int ravb_ring_init(struct net_device *ndev, int q)
if (!priv->rx_skb[q] || !priv->tx_skb[q])
goto error;
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
+ if (!skb)
+ goto error;
+ ravb_set_buffer_align(skb);
+ priv->rx_skb[q][i] = skb;
+ }
+
/* Allocate rings for the aligned buffers */
- priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
- sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
- if (!priv->tx_buffers[q])
+ priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
+ DPTR_ALIGN - 1, GFP_KERNEL);
+ if (!priv->tx_align[q])
goto error;
/* Allocate all RX descriptors. */
@@ -325,7 +318,8 @@ static int ravb_ring_init(struct net_device *ndev, int q)
priv->dirty_rx[q] = 0;
/* Allocate all TX descriptors. */
- ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] + 1);
+ ring_size = sizeof(struct ravb_tx_desc) *
+ (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size,
&priv->tx_desc_dma[q],
GFP_KERNEL);
@@ -435,11 +429,12 @@ static int ravb_tx_free(struct net_device *ndev, int q)
struct net_device_stats *stats = &priv->stats[q];
struct ravb_tx_desc *desc;
int free_num = 0;
- int entry = 0;
+ int entry;
u32 size;
for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
- entry = priv->dirty_tx[q] % priv->num_tx_ring[q];
+ entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
+ NUM_TX_DESC);
desc = &priv->tx_ring[q][entry];
if (desc->die_dt != DT_FEMPTY)
break;
@@ -447,14 +442,18 @@ static int ravb_tx_free(struct net_device *ndev, int q)
dma_rmb();
size = le16_to_cpu(desc->ds_tagl) & TX_DS;
/* Free the original skb. */
- if (priv->tx_skb[q][entry]) {
+ if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
size, DMA_TO_DEVICE);
- dev_kfree_skb_any(priv->tx_skb[q][entry]);
- priv->tx_skb[q][entry] = NULL;
+ /* Last packet descriptor? */
+ if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
+ entry /= NUM_TX_DESC;
+ dev_kfree_skb_any(priv->tx_skb[q][entry]);
+ priv->tx_skb[q][entry] = NULL;
+ stats->tx_packets++;
+ }
free_num++;
}
- stats->tx_packets++;
stats->tx_bytes += size;
desc->die_dt = DT_EEMPTY;
}
@@ -508,8 +507,8 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
struct sk_buff *skb;
dma_addr_t dma_addr;
struct timespec64 ts;
- u16 pkt_len = 0;
u8 desc_status;
+ u16 pkt_len;
int limit;
boguscnt = min(boguscnt, *quota);
@@ -524,6 +523,10 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
if (--boguscnt < 0)
break;
+ /* We use 0-byte descriptors to mark the DMA mapping errors */
+ if (!pkt_len)
+ continue;
+
if (desc_status & MSC_MC)
stats->multicast++;
@@ -543,10 +546,9 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
skb = priv->rx_skb[q][entry];
priv->rx_skb[q][entry] = NULL;
- dma_sync_single_for_cpu(&ndev->dev,
- le32_to_cpu(desc->dptr),
- ALIGN(PKT_BUF_SZ, 16),
- DMA_FROM_DEVICE);
+ dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+ ALIGN(PKT_BUF_SZ, 16),
+ DMA_FROM_DEVICE);
get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
@@ -584,17 +586,15 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
if (!skb)
break; /* Better luck next round. */
ravb_set_buffer_align(skb);
- dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
- ALIGN(PKT_BUF_SZ, 16),
- DMA_FROM_DEVICE);
dma_addr = dma_map_single(&ndev->dev, skb->data,
le16_to_cpu(desc->ds_cc),
DMA_FROM_DEVICE);
skb_checksum_none_assert(skb);
- if (dma_mapping_error(&ndev->dev, dma_addr)) {
- dev_kfree_skb_any(skb);
- break;
- }
+ /* We just set the data size to 0 for a failed mapping
+ * which should prevent DMA from happening...
+ */
+ if (dma_mapping_error(&ndev->dev, dma_addr))
+ desc->ds_cc = cpu_to_le16(0);
desc->dptr = cpu_to_le32(dma_addr);
priv->rx_skb[q][entry] = skb;
}
@@ -1272,45 +1272,60 @@ static void ravb_tx_timeout_work(struct work_struct *work)
static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
- struct ravb_tstamp_skb *ts_skb = NULL;
u16 q = skb_get_queue_mapping(skb);
+ struct ravb_tstamp_skb *ts_skb;
struct ravb_tx_desc *desc;
unsigned long flags;
u32 dma_addr;
void *buffer;
u32 entry;
- u32 tccr;
+ u32 len;
spin_lock_irqsave(&priv->lock, flags);
- if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
+ if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
+ NUM_TX_DESC) {
netif_err(priv, tx_queued, ndev,
"still transmitting with the full ring!\n");
netif_stop_subqueue(ndev, q);
spin_unlock_irqrestore(&priv->lock, flags);
return NETDEV_TX_BUSY;
}
- entry = priv->cur_tx[q] % priv->num_tx_ring[q];
- priv->tx_skb[q][entry] = skb;
+ entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
+ priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
if (skb_put_padto(skb, ETH_ZLEN))
goto drop;
- buffer = PTR_ALIGN(priv->tx_buffers[q][entry], RAVB_ALIGN);
- memcpy(buffer, skb->data, skb->len);
- desc = &priv->tx_ring[q][entry];
- desc->ds_tagl = cpu_to_le16(skb->len);
- dma_addr = dma_map_single(&ndev->dev, buffer, skb->len, DMA_TO_DEVICE);
+ buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
+ entry / NUM_TX_DESC * DPTR_ALIGN;
+ len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
+ memcpy(buffer, skb->data, len);
+ dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
if (dma_mapping_error(&ndev->dev, dma_addr))
goto drop;
+
+ desc = &priv->tx_ring[q][entry];
+ desc->ds_tagl = cpu_to_le16(len);
+ desc->dptr = cpu_to_le32(dma_addr);
+
+ buffer = skb->data + len;
+ len = skb->len - len;
+ dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&ndev->dev, dma_addr))
+ goto unmap;
+
+ desc++;
+ desc->ds_tagl = cpu_to_le16(len);
desc->dptr = cpu_to_le32(dma_addr);
/* TX timestamp required */
if (q == RAVB_NC) {
ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
if (!ts_skb) {
- dma_unmap_single(&ndev->dev, dma_addr, skb->len,
+ desc--;
+ dma_unmap_single(&ndev->dev, dma_addr, len,
DMA_TO_DEVICE);
- goto drop;
+ goto unmap;
}
ts_skb->skb = skb;
ts_skb->tag = priv->ts_skb_tag++;
@@ -1326,15 +1341,15 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Descriptor type must be set after all the above writes */
dma_wmb();
- desc->die_dt = DT_FSINGLE;
+ desc->die_dt = DT_FEND;
+ desc--;
+ desc->die_dt = DT_FSTART;
- tccr = ravb_read(ndev, TCCR);
- if (!(tccr & (TCCR_TSRQ0 << q)))
- ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR);
+ ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
- priv->cur_tx[q]++;
- if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
- !ravb_tx_free(ndev, q))
+ priv->cur_tx[q] += NUM_TX_DESC;
+ if (priv->cur_tx[q] - priv->dirty_tx[q] >
+ (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
netif_stop_subqueue(ndev, q);
exit:
@@ -1342,9 +1357,12 @@ exit:
spin_unlock_irqrestore(&priv->lock, flags);
return NETDEV_TX_OK;
+unmap:
+ dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+ le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
drop:
dev_kfree_skb_any(skb);
- priv->tx_skb[q][entry] = NULL;
+ priv->tx_skb[q][entry / NUM_TX_DESC] = NULL;
goto exit;
}
@@ -1641,7 +1659,7 @@ static int ravb_probe(struct platform_device *pdev)
ndev->dma = -1;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- error = -ENODEV;
+ error = irq;
goto out_release;
}
ndev->irq = irq;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 7fb244f565b2..257ea713b4c1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3089,10 +3089,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ndev->dma = -1;
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- ret = -ENODEV;
+ if (ret < 0)
goto out_release;
- }
ndev->irq = ret;
SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 2d8578cade03..34ac41ac9e61 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -202,6 +202,7 @@ enum {
ROCKER_CTRL_IPV4_MCAST,
ROCKER_CTRL_IPV6_MCAST,
ROCKER_CTRL_DFLT_BRIDGING,
+ ROCKER_CTRL_DFLT_OVS,
ROCKER_CTRL_MAX,
};
@@ -323,7 +324,14 @@ static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
{
- return !!rocker_port->bridge_dev;
+ return rocker_port->bridge_dev &&
+ netif_is_bridge_master(rocker_port->bridge_dev);
+}
+
+static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
+{
+ return rocker_port->bridge_dev &&
+ netif_is_ovs_master(rocker_port->bridge_dev);
}
#define ROCKER_OP_FLAG_REMOVE BIT(0)
@@ -1818,6 +1826,30 @@ rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
}
static int
+rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ int mtu = *(int *)priv;
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
+ rocker_port->pport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
+ mtu))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+ return 0;
+}
+
+static int
rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
@@ -1874,6 +1906,14 @@ static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
macaddr, NULL, NULL);
}
+static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
+ int mtu)
+{
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+ rocker_cmd_set_port_settings_mtu_prep,
+ &mtu, NULL, NULL);
+}
+
static int rocker_port_set_learning(struct rocker_port *rocker_port,
enum switchdev_trans trans)
{
@@ -3243,6 +3283,12 @@ static struct rocker_ctrl {
.bridge = true,
.copy_to_cpu = true,
},
+ [ROCKER_CTRL_DFLT_OVS] = {
+ /* pass all pkts up to CPU */
+ .eth_dst = zero_mac,
+ .eth_dst_mask = zero_mac,
+ .acl = true,
+ },
};
static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
@@ -3755,11 +3801,14 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port,
break;
case BR_STATE_LEARNING:
case BR_STATE_FORWARDING:
- want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
+ if (!rocker_port_is_ovsed(rocker_port))
+ want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
want[ROCKER_CTRL_IPV4_MCAST] = true;
want[ROCKER_CTRL_IPV6_MCAST] = true;
if (rocker_port_is_bridged(rocker_port))
want[ROCKER_CTRL_DFLT_BRIDGING] = true;
+ else if (rocker_port_is_ovsed(rocker_port))
+ want[ROCKER_CTRL_DFLT_OVS] = true;
else
want[ROCKER_CTRL_LOCAL_ARP] = true;
break;
@@ -3983,7 +4032,8 @@ static int rocker_port_open(struct net_device *dev)
napi_enable(&rocker_port->napi_tx);
napi_enable(&rocker_port->napi_rx);
- rocker_port_set_enable(rocker_port, true);
+ if (!dev->proto_down)
+ rocker_port_set_enable(rocker_port, true);
netif_start_queue(dev);
return 0;
@@ -4102,8 +4152,11 @@ static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
skb->data, skb_headlen(skb));
if (err)
goto nest_cancel;
- if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX)
- goto nest_cancel;
+ if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
+ err = skb_linearize(skb);
+ if (err)
+ goto unmap_frags;
+ }
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -4152,6 +4205,34 @@ static int rocker_port_set_mac_address(struct net_device *dev, void *p)
return 0;
}
+static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int running = netif_running(dev);
+ int err;
+
+#define ROCKER_PORT_MIN_MTU 68
+#define ROCKER_PORT_MAX_MTU 9000
+
+ if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
+ return -EINVAL;
+
+ if (running)
+ rocker_port_stop(dev);
+
+ netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
+ dev->mtu = new_mtu;
+
+ err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
+ if (err)
+ return err;
+
+ if (running)
+ err = rocker_port_open(dev);
+
+ return err;
+}
+
static int rocker_port_get_phys_port_name(struct net_device *dev,
char *buf, size_t len)
{
@@ -4167,11 +4248,33 @@ static int rocker_port_get_phys_port_name(struct net_device *dev,
return err ? -EOPNOTSUPP : 0;
}
+static int rocker_port_change_proto_down(struct net_device *dev,
+ bool proto_down)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+
+ if (rocker_port->dev->flags & IFF_UP)
+ rocker_port_set_enable(rocker_port, !proto_down);
+ rocker_port->dev->proto_down = proto_down;
+ return 0;
+}
+
+static void rocker_port_neigh_destroy(struct neighbour *n)
+{
+ struct rocker_port *rocker_port = netdev_priv(n->dev);
+ int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
+ __be32 ip_addr = *(__be32 *)n->primary_key;
+
+ rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
+ flags, ip_addr, n->ha);
+}
+
static const struct net_device_ops rocker_port_netdev_ops = {
.ndo_open = rocker_port_open,
.ndo_stop = rocker_port_stop,
.ndo_start_xmit = rocker_port_xmit,
.ndo_set_mac_address = rocker_port_set_mac_address,
+ .ndo_change_mtu = rocker_port_change_mtu,
.ndo_bridge_getlink = switchdev_port_bridge_getlink,
.ndo_bridge_setlink = switchdev_port_bridge_setlink,
.ndo_bridge_dellink = switchdev_port_bridge_dellink,
@@ -4179,6 +4282,8 @@ static const struct net_device_ops rocker_port_netdev_ops = {
.ndo_fdb_del = switchdev_port_fdb_del,
.ndo_fdb_dump = switchdev_port_fdb_dump,
.ndo_get_phys_port_name = rocker_port_get_phys_port_name,
+ .ndo_change_proto_down = rocker_port_change_proto_down,
+ .ndo_neigh_destroy = rocker_port_neigh_destroy,
};
/********************
@@ -4445,6 +4550,7 @@ static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
if (found->key.pport != rocker_port->pport)
continue;
fdb->addr = found->key.addr;
+ fdb->ndm_state = NUD_REACHABLE;
fdb->vid = rocker_port_vlan_to_vid(rocker_port,
found->key.vlan_id);
err = obj->cb(rocker_port->dev, obj);
@@ -4726,6 +4832,7 @@ static int rocker_port_rx_proc(const struct rocker *rocker,
const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
size_t rx_len;
+ u16 rx_flags = 0;
if (!skb)
return -ENOENT;
@@ -4733,6 +4840,8 @@ static int rocker_port_rx_proc(const struct rocker *rocker,
rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
return -EINVAL;
+ if (attrs[ROCKER_TLV_RX_FLAGS])
+ rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
rocker_dma_rx_ring_skb_unmap(rocker, attrs);
@@ -4740,6 +4849,9 @@ static int rocker_port_rx_proc(const struct rocker *rocker,
skb_put(skb, rx_len);
skb->protocol = eth_type_trans(skb, rocker_port->dev);
+ if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
+ skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
+
rocker_port->dev->stats.rx_packets++;
rocker_port->dev->stats.rx_bytes += skb->len;
@@ -4821,6 +4933,7 @@ static void rocker_remove_ports(const struct rocker *rocker)
rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
ROCKER_OP_FLAG_REMOVE);
unregister_netdev(rocker_port->dev);
+ free_netdev(rocker_port->dev);
}
kfree(rocker->ports);
}
@@ -4868,7 +4981,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
NAPI_POLL_WEIGHT);
rocker_carrier_init(rocker_port);
- dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
err = register_netdev(dev);
if (err) {
@@ -4877,11 +4990,13 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
}
rocker->ports[port_number] = rocker_port;
+ switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
+
rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
if (err) {
- dev_err(&pdev->dev, "install ig port table failed\n");
+ netdev_err(rocker_port->dev, "install ig port table failed\n");
goto err_port_ig_tbl;
}
@@ -4901,6 +5016,7 @@ err_untagged_vlan:
rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
ROCKER_OP_FLAG_REMOVE);
err_port_ig_tbl:
+ rocker->ports[port_number] = NULL;
unregister_netdev(dev);
err_register_netdev:
free_netdev(dev);
@@ -5073,7 +5189,8 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_probe_ports;
}
- dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id);
+ dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
+ (int)sizeof(rocker->hw.id), &rocker->hw.id);
return 0;
@@ -5156,6 +5273,7 @@ static int rocker_port_bridge_join(struct rocker_port *rocker_port,
rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
rocker_port->bridge_dev = bridge;
+ switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
untagged_vid, 0);
@@ -5176,6 +5294,8 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
rocker_port_internal_vlan_id_get(rocker_port,
rocker_port->dev->ifindex);
+ switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
+ false);
rocker_port->bridge_dev = NULL;
err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
@@ -5190,46 +5310,77 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
return err;
}
-static int rocker_port_master_changed(struct net_device *dev)
+
+static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
+ struct net_device *master)
+{
+ int err;
+
+ rocker_port->bridge_dev = master;
+
+ err = rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+ if (err)
+ return err;
+ err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+
+ return err;
+}
+
+static int rocker_port_master_linked(struct rocker_port *rocker_port,
+ struct net_device *master)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- struct net_device *master = netdev_master_upper_dev_get(dev);
int err = 0;
- /* There are currently three cases handled here:
- * 1. Joining a bridge
- * 2. Leaving a previously joined bridge
- * 3. Other, e.g. being added to or removed from a bond or openvswitch,
- * in which case nothing is done
- */
- if (master && master->rtnl_link_ops &&
- !strcmp(master->rtnl_link_ops->kind, "bridge"))
+ if (netif_is_bridge_master(master))
err = rocker_port_bridge_join(rocker_port, master);
- else if (rocker_port_is_bridged(rocker_port))
- err = rocker_port_bridge_leave(rocker_port);
+ else if (netif_is_ovs_master(master))
+ err = rocker_port_ovs_changed(rocker_port, master);
+ return err;
+}
+static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
+{
+ int err = 0;
+
+ if (rocker_port_is_bridged(rocker_port))
+ err = rocker_port_bridge_leave(rocker_port);
+ else if (rocker_port_is_ovsed(rocker_port))
+ err = rocker_port_ovs_changed(rocker_port, NULL);
return err;
}
static int rocker_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
- struct net_device *dev;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ struct rocker_port *rocker_port;
int err;
+ if (!rocker_port_dev_check(dev))
+ return NOTIFY_DONE;
+
switch (event) {
case NETDEV_CHANGEUPPER:
- dev = netdev_notifier_info_to_dev(ptr);
- if (!rocker_port_dev_check(dev))
- return NOTIFY_DONE;
- err = rocker_port_master_changed(dev);
- if (err)
- netdev_warn(dev,
- "failed to reflect master change (err %d)\n",
- err);
+ info = ptr;
+ if (!info->master)
+ goto out;
+ rocker_port = netdev_priv(dev);
+ if (info->linking) {
+ err = rocker_port_master_linked(rocker_port,
+ info->upper_dev);
+ if (err)
+ netdev_warn(dev, "failed to reflect master linked (err %d)\n",
+ err);
+ } else {
+ err = rocker_port_master_unlinked(rocker_port);
+ if (err)
+ netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
+ err);
+ }
break;
}
-
+out:
return NOTIFY_DONE;
}
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index c61fbf968036..12490b2f6504 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -159,6 +159,7 @@ enum {
ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */
ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */
ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, /* binary */
+ ROCKER_TLV_CMD_PORT_SETTINGS_MTU, /* u16 */
__ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
ROCKER_TLV_CMD_PORT_SETTINGS_MAX =
@@ -245,6 +246,7 @@ enum {
#define ROCKER_RX_FLAGS_TCP BIT(5)
#define ROCKER_RX_FLAGS_UDP BIT(6)
#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD BIT(7)
+#define ROCKER_RX_FLAGS_FWD_OFFLOAD BIT(8)
enum {
ROCKER_TLV_TX_UNSPEC,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 847643455468..ff649ebef637 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -49,6 +49,12 @@ enum {
*/
#define HUNT_FILTER_TBL_ROWS 8192
+#define EFX_EF10_FILTER_ID_INVALID 0xffff
+struct efx_ef10_dev_addr {
+ u8 addr[ETH_ALEN];
+ u16 id;
+};
+
struct efx_ef10_filter_table {
/* The RX match field masks supported by this fw & hw, in order of priority */
enum efx_filter_match_flags rx_match_flags[
@@ -69,13 +75,14 @@ struct efx_ef10_filter_table {
/* Shadow of net_device address lists, guarded by mac_lock */
#define EFX_EF10_FILTER_DEV_UC_MAX 32
#define EFX_EF10_FILTER_DEV_MC_MAX 256
- struct {
- u8 addr[ETH_ALEN];
- u16 id;
- } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX],
- dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
- int dev_uc_count; /* negative for PROMISC */
- int dev_mc_count; /* negative for PROMISC/ALLMULTI */
+ struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
+ struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
+ int dev_uc_count;
+ int dev_mc_count;
+/* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */
+ u16 ucdef_id;
+ u16 bcast_id;
+ u16 mcdef_id;
};
/* An arbitrary search limit for the software hash table */
@@ -101,6 +108,11 @@ static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
return resource_size(&efx->pci_dev->resource[bar]);
}
+static bool efx_ef10_is_vf(struct efx_nic *efx)
+{
+ return efx->type->is_vf;
+}
+
static int efx_ef10_get_pf_index(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
@@ -283,11 +295,11 @@ static int efx_ef10_probe(struct efx_nic *efx)
/* We can have one VI for each 8K region. However, until we
* use TX option descriptors we need two TX queues per channel.
*/
- efx->max_channels =
- min_t(unsigned int,
- EFX_MAX_CHANNELS,
- efx_ef10_mem_map_size(efx) /
- (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
+ efx->max_channels = min_t(unsigned int,
+ EFX_MAX_CHANNELS,
+ efx_ef10_mem_map_size(efx) /
+ (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
+ efx->max_tx_channels = efx->max_channels;
if (WARN_ON(efx->max_channels == 0))
return -EIO;
@@ -382,7 +394,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
* First try to enable it, then if we get EPERM, just
* ask if it's already enabled
*/
- rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
+ rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true, NULL);
if (rc == 0) {
nic_data->workaround_35388 = true;
} else if (rc == -EPERM) {
@@ -677,6 +689,48 @@ static int efx_ef10_probe_pf(struct efx_nic *efx)
return efx_ef10_probe(efx);
}
+int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+ return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
+ return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+int efx_ef10_vport_add_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
+ ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+}
+
+int efx_ef10_vport_del_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
+ ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+}
+
#ifdef CONFIG_SFC_SRIOV
static int efx_ef10_probe_vf(struct efx_nic *efx)
{
@@ -770,11 +824,13 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
unsigned int uc_mem_map_size, wc_mem_map_size;
- unsigned int min_vis, pio_write_vi_base, max_vis;
+ unsigned int min_vis = max(EFX_TXQ_TYPES,
+ efx_separate_tx_channels ? 2 : 1);
+ unsigned int channel_vis, pio_write_vi_base, max_vis;
void __iomem *membase;
int rc;
- min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+ channel_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
#ifdef EFX_USE_PIO
/* Try to allocate PIO buffers if wanted and if the full
@@ -808,11 +864,11 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
* page size is >4K). So we may allocate some extra VIs just
* for writing PIO buffers through.
*
- * The UC mapping contains (min_vis - 1) complete VIs and the
+ * The UC mapping contains (channel_vis - 1) complete VIs and the
* first half of the next VI. Then the WC mapping begins with
* the second half of this last VI.
*/
- uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
+ uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * EFX_VI_PAGE_SIZE +
ER_DZ_TX_PIOBUF);
if (nic_data->n_piobufs) {
/* pio_write_vi_base rounds down to give the number of complete
@@ -827,7 +883,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
} else {
pio_write_vi_base = 0;
wc_mem_map_size = 0;
- max_vis = min_vis;
+ max_vis = channel_vis;
}
/* In case the last attached driver failed to free VIs, do it now */
@@ -839,6 +895,23 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
if (rc != 0)
return rc;
+ if (nic_data->n_allocated_vis < channel_vis) {
+ netif_info(efx, drv, efx->net_dev,
+ "Could not allocate enough VIs to satisfy RSS"
+ " requirements. Performance may not be optimal.\n");
+ /* We didn't get the VIs to populate our channels.
+ * We could keep what we got but then we'd have more
+ * interrupts than we need.
+ * Instead calculate new max_channels and restart
+ */
+ efx->max_channels = nic_data->n_allocated_vis;
+ efx->max_tx_channels =
+ nic_data->n_allocated_vis / EFX_TXQ_TYPES;
+
+ efx_ef10_free_vis(efx);
+ return -EAGAIN;
+ }
+
/* If we didn't get enough VIs to map all the PIO buffers, free the
* PIO buffers
*/
@@ -937,12 +1010,24 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
+#ifdef CONFIG_SFC_SRIOV
+ unsigned int i;
+#endif
/* All our allocations have been reset */
nic_data->must_realloc_vis = true;
nic_data->must_restore_filters = true;
nic_data->must_restore_piobufs = true;
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+
+ /* Driver-created vswitches and vports must be re-created */
+ nic_data->must_probe_vswitching = true;
+ nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+#ifdef CONFIG_SFC_SRIOV
+ if (nic_data->vf)
+ for (i = 0; i < efx->vf_count; i++)
+ nic_data->vf[i].vport_id = 0;
+#endif
}
static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
@@ -987,6 +1072,12 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
{
int rc = efx_mcdi_reset(efx, reset_type);
+ /* Unprivileged functions return -EPERM, but need to return success
+ * here so that the datapath is brought back up.
+ */
+ if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
+ rc = 0;
+
/* If it was a port reset, trigger reallocation of MC resources.
* Note that on an MC reset nothing needs to be done now because we'll
* detect the MC reset later and handle it then.
@@ -1235,7 +1326,12 @@ static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
}
}
- if (core_stats) {
+ if (!core_stats)
+ return stats_count;
+
+ if (nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
+ /* Use vadaptor stats. */
core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
stats[EF10_STAT_rx_multicast] +
stats[EF10_STAT_rx_broadcast];
@@ -1255,6 +1351,26 @@ static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
core_stats->rx_errors = core_stats->rx_crc_errors;
core_stats->tx_errors = stats[EF10_STAT_tx_bad];
+ } else {
+ /* Use port stats. */
+ core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
+ core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
+ core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
+ core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
+ core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
+ stats[GENERIC_STAT_rx_nodesc_trunc] +
+ stats[GENERIC_STAT_rx_noskb_drops];
+ core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
+ core_stats->rx_length_errors =
+ stats[EF10_STAT_port_rx_gtjumbo] +
+ stats[EF10_STAT_port_rx_length_error];
+ core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
+ core_stats->rx_frame_errors =
+ stats[EF10_STAT_port_rx_align_error];
+ core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors);
}
return stats_count;
@@ -1511,10 +1627,6 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
/* All our allocations have been reset */
efx_ef10_reset_mc_allocations(efx);
- /* Driver-created vswitches and vports must be re-created */
- nic_data->must_probe_vswitching = true;
- nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
-
/* The datapath firmware might have been changed */
nic_data->must_check_datapath_caps = true;
@@ -2150,6 +2262,29 @@ static int efx_ef10_ev_probe(struct efx_channel *channel)
GFP_KERNEL);
}
+static void efx_ef10_ev_fini(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ struct efx_nic *efx = channel->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
+ outbuf, outlen, rc);
+}
+
static int efx_ef10_ev_init(struct efx_channel *channel)
{
MCDI_DECLARE_BUF(inbuf,
@@ -2161,6 +2296,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
struct efx_ef10_nic_data *nic_data;
bool supports_rx_merge;
size_t inlen, outlen;
+ unsigned int enabled, implemented;
dma_addr_t dma_addr;
int rc;
int i;
@@ -2201,30 +2337,52 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
outbuf, sizeof(outbuf), &outlen);
/* IRQ return is ignored */
- return rc;
-}
-
-static void efx_ef10_ev_fini(struct efx_channel *channel)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
- MCDI_DECLARE_BUF_ERR(outbuf);
- struct efx_nic *efx = channel->efx;
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
-
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
+ if (channel->channel || rc)
+ return rc;
- if (rc && rc != -EALREADY)
+ /* Successfully created event queue on channel 0 */
+ rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
+ if (rc == -ENOSYS) {
+ /* GET_WORKAROUNDS was implemented before the bug26807
+ * workaround, thus the latter must be unavailable in this fw
+ */
+ nic_data->workaround_26807 = false;
+ rc = 0;
+ } else if (rc) {
goto fail;
+ } else {
+ nic_data->workaround_26807 =
+ !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
+
+ if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
+ !nic_data->workaround_26807) {
+ unsigned int flags;
+
+ rc = efx_mcdi_set_workaround(efx,
+ MC_CMD_WORKAROUND_BUG26807,
+ true, &flags);
+
+ if (!rc) {
+ if (flags &
+ 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
+ netif_info(efx, drv, efx->net_dev,
+ "other functions on NIC have been reset\n");
+ /* MC's boot count has incremented */
+ ++nic_data->warm_boot_count;
+ }
+ nic_data->workaround_26807 = true;
+ } else if (rc == -EPERM) {
+ rc = 0;
+ }
+ }
+ }
- return;
+ if (!rc)
+ return 0;
fail:
- efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
- outbuf, outlen, rc);
+ efx_ef10_ev_fini(channel);
+ return rc;
}
static void efx_ef10_ev_remove(struct efx_channel *channel)
@@ -3178,6 +3336,19 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
filter_id, false);
}
+static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id)
+{
+ return filter_id % HUNT_FILTER_TBL_ROWS;
+}
+
+static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ return efx_ef10_filter_remove_internal(efx, 1U << priority,
+ filter_id, true);
+}
+
static int efx_ef10_filter_get_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id, struct efx_filter_spec *spec)
@@ -3551,6 +3722,10 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
goto fail;
}
+ table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
+ table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
+ table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+
efx->filter_state = table;
init_waitqueue_head(&table->waitq);
return 0;
@@ -3653,145 +3828,233 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
kfree(table);
}
-/* Caller must hold efx->filter_sem for read if race against
- * efx_ef10_filter_table_remove() is possible
- */
-static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+#define EFX_EF10_FILTER_DO_MARK_OLD(id) \
+ if (id != EFX_EF10_FILTER_ID_INVALID) { \
+ filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \
+ WARN_ON(!table->entry[filter_idx].spec); \
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; \
+ }
+static void efx_ef10_filter_mark_old(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
- struct net_device *net_dev = efx->net_dev;
- struct efx_filter_spec spec;
- bool remove_failed = false;
- struct netdev_hw_addr *uc;
- struct netdev_hw_addr *mc;
- unsigned int filter_idx;
- int i, n, rc;
-
- if (!efx_dev_registered(efx))
- return;
+ unsigned int filter_idx, i;
if (!table)
return;
/* Mark old filters that may need to be removed */
spin_lock_bh(&efx->filter_lock);
- n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
- for (i = 0; i < n; i++) {
- filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
- }
- n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count;
- for (i = 0; i < n; i++) {
- filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
- }
+ for (i = 0; i < table->dev_uc_count; i++)
+ EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id);
+ for (i = 0; i < table->dev_mc_count; i++)
+ EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id);
+ EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id);
+ EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id);
+ EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id);
spin_unlock_bh(&efx->filter_lock);
+}
+#undef EFX_EF10_FILTER_DO_MARK_OLD
- /* Copy/convert the address lists; add the primary station
- * address and broadcast address
- */
- netif_addr_lock_bh(net_dev);
- if (net_dev->flags & IFF_PROMISC ||
- netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) {
- table->dev_uc_count = -1;
- } else {
- table->dev_uc_count = 1 + netdev_uc_count(net_dev);
- ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
- i = 1;
- netdev_for_each_uc_addr(uc, net_dev) {
- ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
- i++;
+static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct netdev_hw_addr *uc;
+ int addr_count;
+ unsigned int i;
+
+ table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
+ addr_count = netdev_uc_count(net_dev);
+ if (net_dev->flags & IFF_PROMISC)
+ *promisc = true;
+ table->dev_uc_count = 1 + addr_count;
+ ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
+ i = 1;
+ netdev_for_each_uc_addr(uc, net_dev) {
+ if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
+ *promisc = true;
+ break;
}
+ ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
+ table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
+ i++;
}
- if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
- netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) {
- table->dev_mc_count = -1;
- } else {
- table->dev_mc_count = 1 + netdev_mc_count(net_dev);
- eth_broadcast_addr(table->dev_mc_list[0].addr);
- i = 1;
- netdev_for_each_mc_addr(mc, net_dev) {
- ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
- i++;
+}
+
+static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct netdev_hw_addr *mc;
+ unsigned int i, addr_count;
+
+ table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+ table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
+ if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
+ *promisc = true;
+
+ addr_count = netdev_mc_count(net_dev);
+ i = 0;
+ netdev_for_each_mc_addr(mc, net_dev) {
+ if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
+ *promisc = true;
+ break;
}
+ ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
+ table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
+ i++;
}
- netif_addr_unlock_bh(net_dev);
- /* Insert/renew unicast filters */
- if (table->dev_uc_count >= 0) {
- for (i = 0; i < table->dev_uc_count; i++) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
- EFX_FILTER_FLAG_RX_RSS,
- 0);
- efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
- table->dev_uc_list[i].addr);
- rc = efx_ef10_filter_insert(efx, &spec, true);
- if (rc < 0) {
- /* Fall back to unicast-promisc */
- while (i--)
- efx_ef10_filter_remove_safe(
+ table->dev_mc_count = i;
+}
+
+static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
+ bool multicast, bool rollback)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_dev_addr *addr_list;
+ struct efx_filter_spec spec;
+ u8 baddr[ETH_ALEN];
+ unsigned int i, j;
+ int addr_count;
+ int rc;
+
+ if (multicast) {
+ addr_list = table->dev_mc_list;
+ addr_count = table->dev_mc_count;
+ } else {
+ addr_list = table->dev_uc_list;
+ addr_count = table->dev_uc_count;
+ }
+
+ /* Insert/renew filters */
+ for (i = 0; i < addr_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
+ 0);
+ efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
+ addr_list[i].addr);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ if (rollback) {
+ netif_info(efx, drv, efx->net_dev,
+ "efx_ef10_filter_insert failed rc=%d\n",
+ rc);
+ /* Fall back to promiscuous */
+ for (j = 0; j < i; j++) {
+ if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
+ continue;
+ efx_ef10_filter_remove_unsafe(
efx, EFX_FILTER_PRI_AUTO,
- table->dev_uc_list[i].id);
- table->dev_uc_count = -1;
- break;
+ addr_list[j].id);
+ addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
+ }
+ return rc;
+ } else {
+ /* mark as not inserted, and carry on */
+ rc = EFX_EF10_FILTER_ID_INVALID;
}
- table->dev_uc_list[i].id = rc;
}
+ addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc);
}
- if (table->dev_uc_count < 0) {
+
+ if (multicast && rollback) {
+ /* Also need an Ethernet broadcast filter */
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
EFX_FILTER_FLAG_RX_RSS,
0);
- efx_filter_set_uc_def(&spec);
+ eth_broadcast_addr(baddr);
+ efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
- WARN_ON(1);
- table->dev_uc_count = 0;
+ netif_warn(efx, drv, efx->net_dev,
+ "Broadcast filter insert failed rc=%d\n", rc);
+ /* Fall back to promiscuous */
+ for (j = 0; j < i; j++) {
+ if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
+ continue;
+ efx_ef10_filter_remove_unsafe(
+ efx, EFX_FILTER_PRI_AUTO,
+ addr_list[j].id);
+ addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
+ }
+ return rc;
} else {
- table->dev_uc_list[0].id = rc;
+ table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
}
}
- /* Insert/renew multicast filters */
- if (table->dev_mc_count >= 0) {
- for (i = 0; i < table->dev_mc_count; i++) {
+ return 0;
+}
+
+static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
+ bool rollback)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_filter_spec spec;
+ u8 baddr[ETH_ALEN];
+ int rc;
+
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
+ 0);
+
+ if (multicast)
+ efx_filter_set_mc_def(&spec);
+ else
+ efx_filter_set_uc_def(&spec);
+
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ netif_warn(efx, drv, efx->net_dev,
+ "%scast mismatch filter insert failed rc=%d\n",
+ multicast ? "Multi" : "Uni", rc);
+ } else if (multicast) {
+ table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc);
+ if (!nic_data->workaround_26807) {
+ /* Also need an Ethernet broadcast filter */
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
EFX_FILTER_FLAG_RX_RSS,
0);
+ eth_broadcast_addr(baddr);
efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
- table->dev_mc_list[i].addr);
+ baddr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
- /* Fall back to multicast-promisc */
- while (i--)
- efx_ef10_filter_remove_safe(
- efx, EFX_FILTER_PRI_AUTO,
- table->dev_mc_list[i].id);
- table->dev_mc_count = -1;
- break;
+ netif_warn(efx, drv, efx->net_dev,
+ "Broadcast filter insert failed rc=%d\n",
+ rc);
+ if (rollback) {
+ /* Roll back the mc_def filter */
+ efx_ef10_filter_remove_unsafe(
+ efx, EFX_FILTER_PRI_AUTO,
+ table->mcdef_id);
+ table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+ return rc;
+ }
+ } else {
+ table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
}
- table->dev_mc_list[i].id = rc;
- }
- }
- if (table->dev_mc_count < 0) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
- EFX_FILTER_FLAG_RX_RSS,
- 0);
- efx_filter_set_mc_def(&spec);
- rc = efx_ef10_filter_insert(efx, &spec, true);
- if (rc < 0) {
- WARN_ON(1);
- table->dev_mc_count = 0;
- } else {
- table->dev_mc_list[0].id = rc;
}
+ rc = 0;
+ } else {
+ table->ucdef_id = rc;
+ rc = 0;
}
+ return rc;
+}
+
+/* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD
+ * flag or removes these filters, we don't need to hold the filter_lock while
+ * scanning for these filters.
+ */
+static void efx_ef10_filter_remove_old(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ bool remove_failed = false;
+ int i;
- /* Remove filters that weren't renewed. Since nothing else
- * changes the AUTO_OLD flag or removes these filters, we
- * don't need to hold the filter_lock while scanning for
- * these filters.
- */
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
if (ACCESS_ONCE(table->entry[i].spec) &
EFX_EF10_FILTER_FLAG_AUTO_OLD) {
@@ -3804,6 +4067,153 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
WARN_ON(remove_failed);
}
+static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u8 mac_old[ETH_ALEN];
+ int rc, rc2;
+
+ /* Only reconfigure a PF-created vport */
+ if (is_zero_ether_addr(nic_data->vport_mac))
+ return 0;
+
+ efx_device_detach_sync(efx);
+ efx_net_stop(efx->net_dev);
+ down_write(&efx->filter_sem);
+ efx_ef10_filter_table_remove(efx);
+ up_write(&efx->filter_sem);
+
+ rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
+ if (rc)
+ goto restore_filters;
+
+ ether_addr_copy(mac_old, nic_data->vport_mac);
+ rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
+ nic_data->vport_mac);
+ if (rc)
+ goto restore_vadaptor;
+
+ rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
+ efx->net_dev->dev_addr);
+ if (!rc) {
+ ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
+ } else {
+ rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
+ if (rc2) {
+ /* Failed to add original MAC, so clear vport_mac */
+ eth_zero_addr(nic_data->vport_mac);
+ goto reset_nic;
+ }
+ }
+
+restore_vadaptor:
+ rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ if (rc2)
+ goto reset_nic;
+restore_filters:
+ down_write(&efx->filter_sem);
+ rc2 = efx_ef10_filter_table_probe(efx);
+ up_write(&efx->filter_sem);
+ if (rc2)
+ goto reset_nic;
+
+ rc2 = efx_net_open(efx->net_dev);
+ if (rc2)
+ goto reset_nic;
+
+ netif_device_attach(efx->net_dev);
+
+ return rc;
+
+reset_nic:
+ netif_err(efx, drv, efx->net_dev,
+ "Failed to restore when changing MAC address - scheduling reset\n");
+ efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
+
+ return rc ? rc : rc2;
+}
+
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
+static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct net_device *net_dev = efx->net_dev;
+ bool uc_promisc = false, mc_promisc = false;
+
+ if (!efx_dev_registered(efx))
+ return;
+
+ if (!table)
+ return;
+
+ efx_ef10_filter_mark_old(efx);
+
+ /* Copy/convert the address lists; add the primary station
+ * address and broadcast address
+ */
+ netif_addr_lock_bh(net_dev);
+ efx_ef10_filter_uc_addr_list(efx, &uc_promisc);
+ efx_ef10_filter_mc_addr_list(efx, &mc_promisc);
+ netif_addr_unlock_bh(net_dev);
+
+ /* Insert/renew unicast filters */
+ if (uc_promisc) {
+ efx_ef10_filter_insert_def(efx, false, false);
+ efx_ef10_filter_insert_addr_list(efx, false, false);
+ } else {
+ /* If any of the filters failed to insert, fall back to
+ * promiscuous mode - add in the uc_def filter. But keep
+ * our individual unicast filters.
+ */
+ if (efx_ef10_filter_insert_addr_list(efx, false, false))
+ efx_ef10_filter_insert_def(efx, false, false);
+ }
+
+ /* Insert/renew multicast filters */
+ /* If changing promiscuous state with cascaded multicast filters, remove
+ * old filters first, so that packets are dropped rather than duplicated
+ */
+ if (nic_data->workaround_26807 && efx->mc_promisc != mc_promisc)
+ efx_ef10_filter_remove_old(efx);
+ if (mc_promisc) {
+ if (nic_data->workaround_26807) {
+ /* If we failed to insert promiscuous filters, rollback
+ * and fall back to individual multicast filters
+ */
+ if (efx_ef10_filter_insert_def(efx, true, true)) {
+ /* Changing promisc state, so remove old filters */
+ efx_ef10_filter_remove_old(efx);
+ efx_ef10_filter_insert_addr_list(efx, true, false);
+ }
+ } else {
+ /* If we failed to insert promiscuous filters, don't
+ * rollback. Regardless, also insert the mc_list
+ */
+ efx_ef10_filter_insert_def(efx, true, false);
+ efx_ef10_filter_insert_addr_list(efx, true, false);
+ }
+ } else {
+ /* If any filters failed to insert, rollback and fall back to
+ * promiscuous mode - mc_def filter and maybe broadcast. If
+ * that fails, roll back again and insert as many of our
+ * individual multicast filters as we can.
+ */
+ if (efx_ef10_filter_insert_addr_list(efx, true, true)) {
+ /* Changing promisc state, so remove old filters */
+ if (nic_data->workaround_26807)
+ efx_ef10_filter_remove_old(efx);
+ if (efx_ef10_filter_insert_def(efx, true, true))
+ efx_ef10_filter_insert_addr_list(efx, true, false);
+ }
+ }
+
+ efx_ef10_filter_remove_old(efx);
+ efx->mc_promisc = mc_promisc;
+}
+
static int efx_ef10_set_mac_address(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
@@ -3820,8 +4230,8 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
efx->net_dev->dev_addr);
MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
nic_data->vport_id);
- rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
- sizeof(inbuf), NULL, 0, NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
efx_ef10_filter_table_probe(efx);
up_write(&efx->filter_sem);
@@ -3829,38 +4239,27 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
efx_net_open(efx->net_dev);
netif_device_attach(efx->net_dev);
-#if !defined(CONFIG_SFC_SRIOV)
- if (rc == -EPERM)
- netif_err(efx, drv, efx->net_dev,
- "Cannot change MAC address; use sfboot to enable mac-spoofing"
- " on this interface\n");
-#else
- if (rc == -EPERM) {
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
- /* Switch to PF and change MAC address on vport */
- if (efx->pci_dev->is_virtfn && pci_dev_pf) {
- struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+ if (rc == -EPERM) {
+ struct efx_nic *efx_pf;
- if (!efx_ef10_sriov_set_vf_mac(efx_pf,
- nic_data->vf_index,
- efx->net_dev->dev_addr))
- return 0;
- }
- netif_err(efx, drv, efx->net_dev,
- "Cannot change MAC address; use sfboot to enable mac-spoofing"
- " on this interface\n");
- } else if (efx->pci_dev->is_virtfn) {
- /* Successfully changed by VF (with MAC spoofing), so update the
- * parent PF if possible.
- */
- struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+ /* Switch to PF and change MAC address on vport */
+ efx_pf = pci_get_drvdata(pci_dev_pf);
- if (pci_dev_pf) {
+ rc = efx_ef10_sriov_set_vf_mac(efx_pf,
+ nic_data->vf_index,
+ efx->net_dev->dev_addr);
+ } else if (!rc) {
struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
unsigned int i;
+ /* MAC address successfully changed by VF (with MAC
+ * spoofing) so update the parent PF if possible.
+ */
for (i = 0; i < efx_pf->vf_count; ++i) {
struct ef10_vf *vf = nic_data->vf + i;
@@ -3871,8 +4270,24 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
}
}
}
- }
+ } else
#endif
+ if (rc == -EPERM) {
+ netif_err(efx, drv, efx->net_dev,
+ "Cannot change MAC address; use sfboot to enable"
+ " mac-spoofing on this interface\n");
+ } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
+ /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
+ * fall-back to the method of changing the MAC address on the
+ * vport. This only applies to PFs because such versions of
+ * MCFW do not support VFs.
+ */
+ rc = efx_ef10_vport_set_mac_address(efx);
+ } else {
+ efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
+ sizeof(inbuf), NULL, 0, rc);
+ }
+
return rc;
}
@@ -3967,6 +4382,8 @@ efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
out:
+ if (rc == -EPERM)
+ rc = 0;
rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
return rc ? rc : rc2;
}
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 6c9b6e45509a..3c17f274e802 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -29,30 +29,6 @@ static int efx_ef10_evb_port_assign(struct efx_nic *efx, unsigned int port_id,
NULL, 0, NULL);
}
-static int efx_ef10_vport_add_mac(struct efx_nic *efx,
- unsigned int port_id, u8 *mac)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
-
- MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
- ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
-
- return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
- sizeof(inbuf), NULL, 0, NULL);
-}
-
-static int efx_ef10_vport_del_mac(struct efx_nic *efx,
- unsigned int port_id, u8 *mac)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
-
- MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
- ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
-
- return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
- sizeof(inbuf), NULL, 0, NULL);
-}
-
static int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id,
unsigned int vswitch_type)
{
@@ -136,24 +112,6 @@ static int efx_ef10_vport_free(struct efx_nic *efx, unsigned int port_id)
NULL, 0, NULL);
}
-static int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
-
- MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
- return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
-}
-
-static int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
-
- MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
- return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
-}
-
static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -640,21 +598,21 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
vf->vlan, &vf->vport_id);
if (rc)
- goto reset_nic;
+ goto reset_nic_up_write;
restore_mac:
if (!is_zero_ether_addr(vf->mac)) {
rc2 = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac);
if (rc2) {
eth_zero_addr(vf->mac);
- goto reset_nic;
+ goto reset_nic_up_write;
}
}
restore_evb_port:
rc2 = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
if (rc2)
- goto reset_nic;
+ goto reset_nic_up_write;
else
vf->vport_assigned = 1;
@@ -662,14 +620,16 @@ restore_vadaptor:
if (vf->efx) {
rc2 = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
if (rc2)
- goto reset_nic;
+ goto reset_nic_up_write;
}
restore_filters:
if (vf->efx) {
rc2 = vf->efx->type->filter_table_probe(vf->efx);
if (rc2)
- goto reset_nic;
+ goto reset_nic_up_write;
+
+ up_write(&vf->efx->filter_sem);
up_write(&vf->efx->filter_sem);
@@ -681,9 +641,12 @@ restore_filters:
}
return rc;
+reset_nic_up_write:
+ if (vf->efx)
+ up_write(&vf->efx->filter_sem);
+
reset_nic:
if (vf->efx) {
- up_write(&vf->efx->filter_sem);
netif_err(efx, drv, efx->net_dev,
"Failed to restore VF - scheduling reset.\n");
efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH);
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
index db4ef537c610..6d25b92cb45e 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.h
+++ b/drivers/net/ethernet/sfc/ef10_sriov.h
@@ -65,5 +65,11 @@ int efx_ef10_vswitching_restore_pf(struct efx_nic *efx);
int efx_ef10_vswitching_restore_vf(struct efx_nic *efx);
void efx_ef10_vswitching_remove_pf(struct efx_nic *efx);
void efx_ef10_vswitching_remove_vf(struct efx_nic *efx);
+int efx_ef10_vport_add_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac);
+int efx_ef10_vport_del_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac);
+int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id);
+int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id);
#endif /* EF10_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 804b9ad553d3..974637d3ae25 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -115,9 +115,9 @@ static struct workqueue_struct *reset_workqueue;
*
* This is only used in MSI-X interrupt mode
*/
-static bool separate_tx_channels;
-module_param(separate_tx_channels, bool, 0444);
-MODULE_PARM_DESC(separate_tx_channels,
+bool efx_separate_tx_channels;
+module_param(efx_separate_tx_channels, bool, 0444);
+MODULE_PARM_DESC(efx_separate_tx_channels,
"Use separate channels for TX and RX");
/* This is the weight assigned to each of the (per-channel) virtual
@@ -245,11 +245,17 @@ static int efx_check_disabled(struct efx_nic *efx)
*/
static int efx_process_channel(struct efx_channel *channel, int budget)
{
+ struct efx_tx_queue *tx_queue;
int spent;
if (unlikely(!channel->enabled))
return 0;
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->pkts_compl = 0;
+ tx_queue->bytes_compl = 0;
+ }
+
spent = efx_nic_process_eventq(channel, budget);
if (spent && efx_channel_has_rx_queue(channel)) {
struct efx_rx_queue *rx_queue =
@@ -259,6 +265,14 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
efx_fast_push_rx_descriptors(rx_queue, true);
}
+ /* Update BQL */
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->bytes_compl) {
+ netdev_tx_completed_queue(tx_queue->core_txq,
+ tx_queue->pkts_compl, tx_queue->bytes_compl);
+ }
+ }
+
return spent;
}
@@ -1377,7 +1391,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
unsigned int n_channels;
n_channels = efx_wanted_parallelism(efx);
- if (separate_tx_channels)
+ if (efx_separate_tx_channels)
n_channels *= 2;
n_channels += extra_channels;
n_channels = min(n_channels, efx->max_channels);
@@ -1404,13 +1418,16 @@ static int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = n_channels;
if (n_channels > extra_channels)
n_channels -= extra_channels;
- if (separate_tx_channels) {
- efx->n_tx_channels = max(n_channels / 2, 1U);
+ if (efx_separate_tx_channels) {
+ efx->n_tx_channels = min(max(n_channels / 2,
+ 1U),
+ efx->max_tx_channels);
efx->n_rx_channels = max(n_channels -
efx->n_tx_channels,
1U);
} else {
- efx->n_tx_channels = n_channels;
+ efx->n_tx_channels = min(n_channels,
+ efx->max_tx_channels);
efx->n_rx_channels = n_channels;
}
for (i = 0; i < efx->n_channels; i++)
@@ -1436,7 +1453,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
/* Assume legacy interrupts */
if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
- efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
+ efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
efx->legacy_irq = efx->pci_dev->irq;
@@ -1610,7 +1627,8 @@ static void efx_set_channels(struct efx_nic *efx)
struct efx_tx_queue *tx_queue;
efx->tx_channel_offset =
- separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
+ efx_separate_tx_channels ?
+ efx->n_channels - efx->n_tx_channels : 0;
/* We need to mark which channels really have RX and TX
* queues, and adjust the TX queue numbers if we have separate
@@ -1639,17 +1657,34 @@ static int efx_probe_nic(struct efx_nic *efx)
if (rc)
return rc;
- /* Determine the number of channels and queues by trying to hook
- * in MSI-X interrupts. */
- rc = efx_probe_interrupts(efx);
- if (rc)
- goto fail1;
+ do {
+ if (!efx->max_channels || !efx->max_tx_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "Insufficient resources to allocate"
+ " any channels\n");
+ rc = -ENOSPC;
+ goto fail1;
+ }
- efx_set_channels(efx);
+ /* Determine the number of channels and queues by trying
+ * to hook in MSI-X interrupts.
+ */
+ rc = efx_probe_interrupts(efx);
+ if (rc)
+ goto fail1;
- rc = efx->type->dimension_resources(efx);
- if (rc)
- goto fail2;
+ efx_set_channels(efx);
+
+ /* dimension_resources can fail with EAGAIN */
+ rc = efx->type->dimension_resources(efx);
+ if (rc != 0 && rc != -EAGAIN)
+ goto fail2;
+
+ if (rc == -EAGAIN)
+ /* try again with new max_channels */
+ efx_remove_interrupts(efx);
+
+ } while (rc == -EAGAIN);
if (efx->n_channels > 1)
netdev_rss_key_fill(&efx->rx_hash_key,
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index acb1e0718485..1aaf76c1ace8 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -35,6 +35,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
extern unsigned int efx_piobuf_size;
+extern bool efx_separate_tx_channels;
/* RX */
void efx_set_default_rx_indir_table(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 80e69af21642..d790cb8d9db3 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -2371,6 +2371,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 :
EFX_MAX_CHANNELS);
+ efx->max_tx_channels = efx->max_channels;
efx->timer_quantum_ns = 4968; /* 621 cycles */
/* Initialise I2C adapter */
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 81640f8bb811..98d172b04f71 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1779,15 +1779,31 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
return rc;
}
-int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
+ unsigned int *flags)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN);
+ size_t outlen;
+ int rc;
BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
- return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (!flags)
+ return 0;
+
+ if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
+ *flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS);
+ else
+ *flags = 0;
+
+ return 0;
}
int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
@@ -1816,7 +1832,11 @@ int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
return 0;
fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ /* Older firmware lacks GET_WORKAROUNDS and this isn't especially
+ * terrifying. The call site will have to deal with it though.
+ */
+ netif_printk(efx, hw, rc == -ENOSYS ? KERN_DEBUG : KERN_ERR,
+ efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 1838afe2da92..025d504c472b 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -346,7 +346,8 @@ void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
-int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
+ unsigned int *flags);
int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
unsigned int *enabled_out);
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 45fca9fc66b7..4cc772164a79 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -26,6 +26,10 @@
* Unlike a warm boot, assume DMEM has been reloaded, so that
* the MC persistent data must be reinitialised. */
#define MC_FW_TEPID_BOOT_OK (16)
+/* We have entered the main firmware via recovery mode. This
+ * means that MC persistent data must be reinitialised, but that
+ * we shouldn't touch PCIe config. */
+#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32)
/* BIST state has been initialized */
#define MC_FW_BIST_INIT_OK (128)
@@ -169,6 +173,8 @@
#define MC_CMD_ERR_EINTR 4
/* I/O failure */
#define MC_CMD_ERR_EIO 5
+/* Already exists */
+#define MC_CMD_ERR_EEXIST 6
/* Try again */
#define MC_CMD_ERR_EAGAIN 11
/* Out of memory */
@@ -181,6 +187,10 @@
#define MC_CMD_ERR_ENODEV 19
/* Invalid argument to target */
#define MC_CMD_ERR_EINVAL 22
+/* Broken pipe */
+#define MC_CMD_ERR_EPIPE 32
+/* Read-only */
+#define MC_CMD_ERR_EROFS 30
/* Out of range */
#define MC_CMD_ERR_ERANGE 34
/* Non-recursive resource is already acquired */
@@ -226,6 +236,43 @@
#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
/* The datapath is disabled. */
#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+/* The requesting client is not a function */
+#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
+/* The requested operation might require the
+ command to be passed between MCs, and the
+ transport doesn't support that. Should
+ only ever been seen over the UART. */
+#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
+/* VLAN tag(s) exists */
+#define MC_CMD_ERR_VLAN_EXIST 0x100e
+/* No MAC address assigned to an EVB port */
+#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
+/* Notifies the driver that the request has been relayed
+ * to an admin function for authorization. The driver should
+ * wait for a PROXY_RESPONSE event and then resend its request.
+ * This error code is followed by a 32-bit handle that
+ * helps matching it with the respective PROXY_RESPONSE event. */
+#define MC_CMD_ERR_PROXY_PENDING 0x1010
+#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
+/* The request cannot be passed for authorization because
+ * another request from the same function is currently being
+ * authorized. The drvier should try again later. */
+#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
+/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
+ * that has enabled proxying or BLOCK_INDEX points to a function that
+ * doesn't await an authorization. */
+#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
+/* This code is currently only used internally in FW. Its meaning is that
+ * an operation failed due to lack of SR-IOV privilege.
+ * Normally it is translated to EPERM by send_cmd_err(),
+ * but it may also be used to trigger some special mechanism
+ * for handling such case, e.g. to relay the failed request
+ * to a designated admin function for authorization. */
+#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
+/* Workaround 26807 could not be turned on/off because some functions
+ * have already installed filters. See the comment at
+ * MC_CMD_WORKAROUND_BUG26807. */
+#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
#define MC_CMD_ERR_CODE_OFST 0
@@ -275,6 +322,11 @@
MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \
(n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default
+ * stack ID (which must be in the range 1-255) along with an EVB port ID.
+ */
+#define EVB_STACK_ID(n) (((n) & 0xff) << 16)
+
/* Version 2 adds an optional argument to error returns: the errno value
* may be followed by the (0-based) number of the first argument that
@@ -394,6 +446,8 @@
#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
/* enum: DDR ECC status update */
#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
+/* enum: PTP status update */
+#define MCDI_EVENT_AOE_PTP_STATUS 0xb
#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
@@ -408,6 +462,16 @@
#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
+#define MCDI_EVENT_MUM_ERR_TYPE_LBN 0
+#define MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8
+/* enum: MUM failed to load - no valid image? */
+#define MCDI_EVENT_MUM_NO_LOAD 0x1
+/* enum: MUM f/w reported an exception */
+#define MCDI_EVENT_MUM_ASSERT 0x2
+/* enum: MUM not kicking watchdog */
+#define MCDI_EVENT_MUM_WATCHDOG 0x3
+#define MCDI_EVENT_MUM_ERR_DATA_LBN 8
+#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
#define MCDI_EVENT_DATA_LBN 0
#define MCDI_EVENT_DATA_WIDTH 32
#define MCDI_EVENT_SRC_LBN 36
@@ -416,6 +480,8 @@
#define MCDI_EVENT_EV_CODE_WIDTH 4
#define MCDI_EVENT_CODE_LBN 44
#define MCDI_EVENT_CODE_WIDTH 8
+/* enum: Event generated by host software */
+#define MCDI_EVENT_SW_EVENT 0x0
/* enum: Bad assert. */
#define MCDI_EVENT_CODE_BADSSERT 0x1
/* enum: PM Notice. */
@@ -470,6 +536,14 @@
#define MCDI_EVENT_CODE_MC_BIST 0x19
/* enum: PTP tick event providing current NIC time */
#define MCDI_EVENT_CODE_PTP_TIME 0x1a
+/* enum: MUM fault */
+#define MCDI_EVENT_CODE_MUM 0x1b
+/* enum: notify the designated PF of a new authorization request */
+#define MCDI_EVENT_CODE_PROXY_REQUEST 0x1c
+/* enum: notify a function that awaits an authorization that its request has
+ * been processed and it may now resend the command
+ */
+#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
@@ -537,6 +611,33 @@
/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC clock has ever been set
+ */
+#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36
+#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC and System clocks are in sync
+ */
+#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37
+#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of
+ * the minor value of the PTP clock
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
+#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
+/* Zero means that the request has been completed or authorized, and the driver
+ * should resend it. A non-zero value means that the authorization has been
+ * denied, and gives the reason. Typically it will be EPERM.
+ */
+#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
+#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
/* FCDI_EVENT structuredef */
#define FCDI_EVENT_LEN 8
@@ -581,6 +682,10 @@
#define FCDI_EVENT_CODE_PTP_TICK 0x7
/* enum: ECC error counters */
#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
+/* enum: Current status of PTP */
+#define FCDI_EVENT_CODE_PTP_STATUS 0x9
+/* enum: Port id config to map MC-FC port idx */
+#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
@@ -594,11 +699,24 @@
#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
+#define FCDI_EVENT_PTP_STATE_OFST 0
+#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
+#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
+#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
+#define FCDI_EVENT_PTP_STATE_LBN 0
+#define FCDI_EVENT_PTP_STATE_WIDTH 32
#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+/* Index of MC port being referred to */
+#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
+#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
+/* FC Port index that matches the MC port index in SRC */
+#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
* to the MC. Note that this structure | is overlayed over a normal FCDI event
@@ -631,6 +749,90 @@
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
+/* MUM_EVENT structuredef */
+#define MUM_EVENT_LEN 8
+#define MUM_EVENT_CONT_LBN 32
+#define MUM_EVENT_CONT_WIDTH 1
+#define MUM_EVENT_LEVEL_LBN 33
+#define MUM_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define MUM_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MUM_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MUM_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MUM_EVENT_LEVEL_FATAL 0x3
+#define MUM_EVENT_DATA_OFST 0
+#define MUM_EVENT_SENSOR_ID_LBN 0
+#define MUM_EVENT_SENSOR_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MUM_EVENT_SENSOR_STATE_LBN 8
+#define MUM_EVENT_SENSOR_STATE_WIDTH 8
+#define MUM_EVENT_PORT_PHY_READY_LBN 0
+#define MUM_EVENT_PORT_PHY_READY_WIDTH 1
+#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
+#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
+#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
+#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
+#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
+#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
+#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
+#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
+#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
+#define MUM_EVENT_DATA_LBN 0
+#define MUM_EVENT_DATA_WIDTH 32
+#define MUM_EVENT_SRC_LBN 36
+#define MUM_EVENT_SRC_WIDTH 8
+#define MUM_EVENT_EV_CODE_LBN 60
+#define MUM_EVENT_EV_CODE_WIDTH 4
+#define MUM_EVENT_CODE_LBN 44
+#define MUM_EVENT_CODE_WIDTH 8
+/* enum: The MUM was rebooted. */
+#define MUM_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define MUM_EVENT_CODE_ASSERT 0x2
+/* enum: Sensor failure. */
+#define MUM_EVENT_CODE_SENSOR 0x3
+/* enum: Link fault has been asserted, or has cleared. */
+#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
+#define MUM_EVENT_SENSOR_DATA_OFST 0
+#define MUM_EVENT_SENSOR_DATA_LBN 0
+#define MUM_EVENT_SENSOR_DATA_WIDTH 32
+#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
+#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
+#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
+#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
+#define MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
+#define MUM_EVENT_PORT_PHY_TECH_LBN 0
+#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
+#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
+
/***********************************/
/* MC_CMD_READ32
@@ -687,24 +889,34 @@
/* MC_CMD_COPYCODE_IN msgrequest */
#define MC_CMD_COPYCODE_IN_LEN 16
-/* Source address */
-#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
-/* enum: The main image should be entered via a copy of a single word from and
- * to this address when none of the other magic behaviours are required.
+/* Source address
+ *
+ * The main image should be entered via a copy of a single word from and to a
+ * magic address, which controls various aspects of the boot. The magic address
+ * is a bitfield, with each bit as documented below.
*/
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
-/* enum: Entering the main image via a copy of a single word from and to this
- * address indicates that it should not attempt to start the datapath CPUs.
- * This is useful for certain soft rebooting scenarios. (Huntington only)
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
*/
#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
-/* enum: Entering the main image via a copy of a single word from and to this
- * address indicates that it should not attempt to parse any configuration from
- * flash. (In addition, the datapath CPUs will not be started, as for
- * MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR above.) This is useful for
- * certain soft rebooting scenarios. (Huntington only)
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
+ * below)
*/
#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
/* Destination address */
#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
@@ -795,6 +1007,10 @@
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
/* Failing thread address */
#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
@@ -802,7 +1018,8 @@
/***********************************/
/* MC_CMD_LOG_CTRL
- * Configure the output stream for various events and messages.
+ * Configure the output stream for log events such as link state changes,
+ * sensor notifications and MCDI completions
*/
#define MC_CMD_LOG_CTRL 0x7
@@ -816,6 +1033,7 @@
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
/* enum: Event queue. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
+/* Legacy argument. Must be zero. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
/* MC_CMD_LOG_CTRL_OUT msgresponse */
@@ -955,8 +1173,12 @@
* input on the same NIC.
*/
#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
+/* enum: Set the PTP sync status. Status is used by firmware to report to event
+ * subscribers.
+ */
+#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
/* enum: Above this for future use. */
-#define MC_CMD_PTP_OP_MAX 0x1b
+#define MC_CMD_PTP_OP_MAX 0x1c
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
@@ -1191,8 +1413,12 @@
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
-/* Event queue to send PTP time events to */
+/* Original field containing queue ID. Now extended to include flags. */
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1
/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
@@ -1214,6 +1440,23 @@
/* 1 to enable PPS test mode, 0 to disable and return result. */
#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* NIC - Host System Clock Synchronization status */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+/* enum: Host System clock and NIC clock are not in sync */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
+/* enum: Host System clock and NIC clock are synchronized */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1
+/* If synchronized, number of seconds until clocks should be considered to be
+ * no longer in sync.
+ */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
@@ -1375,7 +1618,7 @@
#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
-#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24
/* Time format required/used by for this NIC. Applies to all PTP MCDI
* operations that pass times between the host and firmware. If this operation
* is not supported (older firmware) a format of seconds and nanoseconds should
@@ -1396,6 +1639,13 @@
* end and start times minus the time that the MC waited for host end.
*/
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+/* Various PTP capabilities */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
@@ -1415,6 +1665,9 @@
/* Enum values, see field(s): */
/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
+/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
+#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
+
/***********************************/
/* MC_CMD_CSR_READ32
@@ -1915,6 +2168,14 @@
#define MC_CMD_FW_FULL_FEATURED 0x0
/* enum: Prefer to use firmware with fewer features but lower latency */
#define MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Prefer to use firmware for SolarCapture packed stream mode */
+#define MC_CMD_FW_PACKED_STREAM 0x2
+/* enum: Prefer to use firmware with fewer features and simpler TX event
+ * batching but higher TX packet rate
+ */
+#define MC_CMD_FW_HIGH_TX_RATE 0x3
+/* enum: Reserved value */
+#define MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4
/* enum: Only this option is allowed for non-admin functions */
#define MC_CMD_FW_DONT_CARE 0xffffffff
@@ -2481,6 +2742,12 @@
#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
/* enum: Near side of AOE Siena side port */
#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
+/* enum: Medford Wireside datapath loopback */
+#define MC_CMD_LOOPBACK_DATA_WS 0x24
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
@@ -2552,12 +2819,8 @@
#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
-/* enum: Flow control is off. */
-#define MC_CMD_FCNTL_OFF 0x0
-/* enum: Respond to flow control. */
-#define MC_CMD_FCNTL_RESPOND 0x1
-/* enum: Respond to and Issue flow control. */
-#define MC_CMD_FCNTL_BIDIR 0x2
+/* Enum values, see field(s): */
+/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
@@ -2632,7 +2895,7 @@
#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK
/* MC_CMD_SET_MAC_IN msgrequest */
-#define MC_CMD_SET_MAC_IN_LEN 24
+#define MC_CMD_SET_MAC_IN_LEN 28
/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
* EtherII, VLAN, bug16011 padding).
*/
@@ -2649,13 +2912,20 @@
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
/* enum: Flow control is off. */
-/* MC_CMD_FCNTL_OFF 0x0 */
+#define MC_CMD_FCNTL_OFF 0x0
/* enum: Respond to flow control. */
-/* MC_CMD_FCNTL_RESPOND 0x1 */
+#define MC_CMD_FCNTL_RESPOND 0x1
/* enum: Respond to and Issue flow control. */
-/* MC_CMD_FCNTL_BIDIR 0x2 */
+#define MC_CMD_FCNTL_BIDIR 0x2
/* enum: Auto neg flow control. */
#define MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control (eftest builds only). */
+#define MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define MC_CMD_FCNTL_GENERATE 0x5
+#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
/* MC_CMD_SET_MAC_OUT msgresponse */
#define MC_CMD_SET_MAC_OUT_LEN 0
@@ -2748,7 +3018,8 @@
* guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
* performed, and the statistics may be read from the message response. If
* DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
- * Locks required: None. Returns: 0, ETIME
+ * Locks required: None. The PERIODIC_CLEAR option is not used and now has no
+ * effect. Returns: 0, ETIME
*/
#define MC_CMD_MAC_STATS 0x2e
@@ -2791,6 +3062,7 @@
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
+#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */
#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */
@@ -2890,8 +3162,8 @@
* PM_AND_RXDP_COUNTERS capability only.
*/
#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
-/* enum: RXDP counter: Number of times an emergency descriptor fetch was
- * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed.
+ * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
*/
#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47
/* enum: RXDP counter: Number of times the DPCPU waited for an existing
@@ -3213,6 +3485,8 @@
#define MC_CMD_NVRAM_TYPE_LICENSE 0x12
/* enum: FC Log. */
#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13
+/* enum: Additional flash on FPGA. */
+#define MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14
/***********************************/
@@ -3407,6 +3681,8 @@
*/
#define MC_CMD_SCHEDINFO 0x3e
+#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SCHEDINFO_IN msgrequest */
#define MC_CMD_SCHEDINFO_IN_LEN 0
@@ -3593,6 +3869,68 @@
#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
/* enum: Hotpoint temperature: degC */
#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
+/* enum: Port 0 PHY power switch over-current: bool */
+#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e
+/* enum: Port 1 PHY power switch over-current: bool */
+#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f
+/* enum: Mop-up microcontroller reference voltage (millivolts) */
+#define MC_CMD_SENSOR_MUM_VCC 0x30
+/* enum: 0.9v power phase A voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9_A 0x31
+/* enum: 0.9v power phase A current: mA */
+#define MC_CMD_SENSOR_IN_I0V9_A 0x32
+/* enum: 0.9V voltage regulator phase A temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33
+/* enum: 0.9v power phase B voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9_B 0x34
+/* enum: 0.9v power phase B current: mA */
+#define MC_CMD_SENSOR_IN_I0V9_B 0x35
+/* enum: 0.9V voltage regulator phase B temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36
+/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37
+/* enum: CCOM AVREG 1v2 supply (external ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38
+/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39
+/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f
+/* enum: controller internal temperature sensor voltage on master core
+ * (internal ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40
+/* enum: controller internal temperature on master core (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41
+/* enum: controller internal temperature sensor voltage on master core
+ * (external ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42
+/* enum: controller internal temperature on master core (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43
+/* enum: controller internal temperature on slave core sensor voltage (internal
+ * ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44
+/* enum: controller internal temperature on slave core (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45
+/* enum: controller internal temperature on slave core sensor voltage (external
+ * ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46
+/* enum: controller internal temperature on slave core (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47
+/* enum: Voltage supplied to the SODIMMs from their power supply: mV */
+#define MC_CMD_SENSOR_SODIMM_VOUT 0x49
+/* enum: Temperature of SODIMM 0 (if installed): degC */
+#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a
+/* enum: Temperature of SODIMM 1 (if installed): degC */
+#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b
+/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */
+#define MC_CMD_SENSOR_PHY0_VCC 0x4c
+/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
+#define MC_CMD_SENSOR_PHY1_VCC 0x4d
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
@@ -3701,6 +4039,8 @@
#define MC_CMD_SENSOR_STATE_BROKEN 0x3
/* enum: Sensor is working but does not currently have a reading. */
#define MC_CMD_SENSOR_STATE_NO_READING 0x4
+/* enum: Sensor initialisation failed. */
+#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
@@ -3870,6 +4210,7 @@
/* MC_CMD_WORKAROUND_IN msgrequest */
#define MC_CMD_WORKAROUND_IN_LEN 8
+/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
/* enum: Bug 17230 work around. */
#define MC_CMD_WORKAROUND_BUG17230 0x1
@@ -3877,11 +4218,38 @@
#define MC_CMD_WORKAROUND_BUG35388 0x2
/* enum: Bug35017 workaround (A64 tables must be identity map) */
#define MC_CMD_WORKAROUND_BUG35017 0x3
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define MC_CMD_WORKAROUND_BUG41750 0x4
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define MC_CMD_WORKAROUND_BUG42008 0x5
+/* enum: Bug 26807 features present in firmware (multicast filter chaining)
+ * This feature cannot be turned on/off while there are any filters already
+ * present. The behaviour in such case depends on the acting client's privilege
+ * level. If the client has the admin privilege, then all functions that have
+ * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise
+ * the command will fail with MC_CMD_ERR_FILTERS_PRESENT.
+ */
+#define MC_CMD_WORKAROUND_BUG26807 0x6
+/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable
+ * the workaround
+ */
#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
/* MC_CMD_WORKAROUND_OUT msgresponse */
#define MC_CMD_WORKAROUND_OUT_LEN 0
+/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used
+ * when (TYPE == MC_CMD_WORKAROUND_BUG26807)
+ */
+#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4
+#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
+
/***********************************/
/* MC_CMD_GET_PHY_MEDIA_INFO
@@ -4093,7 +4461,7 @@
/***********************************/
/* MC_CMD_GET_MAC_ADDRESSES
- * Returns the base MAC, count and stride for the requestiong function
+ * Returns the base MAC, count and stride for the requesting function
*/
#define MC_CMD_GET_MAC_ADDRESSES 0x55
@@ -4115,6 +4483,527 @@
/* Spacing of allocated MAC addresses */
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+
+/***********************************/
+/* MC_CMD_CLP
+ * Perform a CLP related operation
+ */
+#define MC_CMD_CLP 0x56
+
+#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CLP_IN msgrequest */
+#define MC_CMD_CLP_IN_LEN 4
+/* Sub operation */
+#define MC_CMD_CLP_IN_OP_OFST 0
+/* enum: Return to factory default settings */
+#define MC_CMD_CLP_OP_DEFAULT 0x1
+/* enum: Set MAC address */
+#define MC_CMD_CLP_OP_SET_MAC 0x2
+/* enum: Get MAC address */
+#define MC_CMD_CLP_OP_GET_MAC 0x3
+/* enum: Set UEFI/GPXE boot mode */
+#define MC_CMD_CLP_OP_SET_BOOT 0x4
+/* enum: Get UEFI/GPXE boot mode */
+#define MC_CMD_CLP_OP_GET_BOOT 0x5
+
+/* MC_CMD_CLP_OUT msgresponse */
+#define MC_CMD_CLP_OUT_LEN 0
+
+/* MC_CMD_CLP_IN_DEFAULT msgrequest */
+#define MC_CMD_CLP_IN_DEFAULT_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
+#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
+
+/* MC_CMD_CLP_IN_SET_MAC msgrequest */
+#define MC_CMD_CLP_IN_SET_MAC_LEN 12
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MAC address assigned to port */
+#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
+#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
+#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
+#define MC_CMD_CLP_OUT_SET_MAC_LEN 0
+
+/* MC_CMD_CLP_IN_GET_MAC msgrequest */
+#define MC_CMD_CLP_IN_GET_MAC_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
+#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
+/* MAC address assigned to port */
+#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
+#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
+#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
+#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* Boot flag */
+#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
+#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
+
+/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
+#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0
+
+/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
+#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
+#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
+/* Boot flag */
+#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
+#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
+/* Padding */
+#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
+#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
+
+
+/***********************************/
+/* MC_CMD_MUM
+ * Perform a MUM operation
+ */
+#define MC_CMD_MUM 0x57
+
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MUM_IN msgrequest */
+#define MC_CMD_MUM_IN_LEN 4
+#define MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define MC_CMD_MUM_IN_OP_LBN 0
+#define MC_CMD_MUM_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to MUM */
+#define MC_CMD_MUM_OP_NULL 0x1
+/* enum: Get MUM version */
+#define MC_CMD_MUM_OP_GET_VERSION 0x2
+/* enum: Issue raw I2C command to MUM */
+#define MC_CMD_MUM_OP_RAW_CMD 0x3
+/* enum: Read from registers on devices connected to MUM. */
+#define MC_CMD_MUM_OP_READ 0x4
+/* enum: Write to registers on devices connected to MUM. */
+#define MC_CMD_MUM_OP_WRITE 0x5
+/* enum: Control UART logging. */
+#define MC_CMD_MUM_OP_LOG 0x6
+/* enum: Operations on MUM GPIO lines */
+#define MC_CMD_MUM_OP_GPIO 0x7
+/* enum: Get sensor readings from MUM */
+#define MC_CMD_MUM_OP_READ_SENSORS 0x8
+/* enum: Initiate clock programming on the MUM */
+#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
+/* enum: Initiate FPGA load from flash on the MUM */
+#define MC_CMD_MUM_OP_FPGA_LOAD 0xa
+/* enum: Request sensor reading from MUM ADC resulting from earlier request via
+ * MUM ATB
+ */
+#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
+/* enum: Send commands relating to the QSFP ports via the MUM for PHY
+ * operations
+ */
+#define MC_CMD_MUM_OP_QSFP 0xc
+
+/* MC_CMD_MUM_IN_NULL msgrequest */
+#define MC_CMD_MUM_IN_NULL_LEN 4
+/* MUM cmd header */
+#define MC_CMD_MUM_IN_CMD_OFST 0
+
+/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
+#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_READ msgrequest */
+#define MC_CMD_MUM_IN_READ_LEN 16
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to read from registers of */
+#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+#define MC_CMD_MUM_DEV_HITTITE 0x1
+/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
+#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
+/* 32-bit address to read from */
+#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
+/* Number of words to read. */
+#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+
+/* MC_CMD_MUM_IN_WRITE msgrequest */
+#define MC_CMD_MUM_IN_WRITE_LENMIN 16
+#define MC_CMD_MUM_IN_WRITE_LENMAX 252
+#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to write to registers of */
+#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+/* MC_CMD_MUM_DEV_HITTITE 0x1 */
+/* 32-bit address to write to */
+#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+/* Words to write */
+#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
+
+/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
+#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
+#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
+#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MUM I2C cmd code */
+#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+/* Number of bytes to write */
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+/* Number of bytes to read */
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+/* Bytes to write */
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
+
+/* MC_CMD_MUM_IN_LOG msgrequest */
+#define MC_CMD_MUM_IN_LOG_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
+
+/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
+#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/* Enable/disable debug output to UART */
+#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+
+/* MC_CMD_MUM_IN_GPIO msgrequest */
+#define MC_CMD_MUM_IN_GPIO_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
+#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
+#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
+
+/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
+#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
+
+/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Bit-mask of clocks to be programmed */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
+#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
+#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
+/* Control flags for clock programming */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
+
+/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
+#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Enable/Disable FPGA config from flash */
+#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+
+/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
+#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_QSFP msgrequest */
+#define MC_CMD_MUM_IN_QSFP_LEN 12
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
+#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
+#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
+#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
+#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+
+/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+
+/* MC_CMD_MUM_OUT msgresponse */
+#define MC_CMD_MUM_OUT_LEN 0
+
+/* MC_CMD_MUM_OUT_NULL msgresponse */
+#define MC_CMD_MUM_OUT_NULL_LEN 0
+
+/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
+#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
+#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
+#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
+/* returned data */
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
+
+/* MC_CMD_MUM_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_READ_LENMIN 4
+#define MC_CMD_MUM_OUT_READ_LENMAX 252
+#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
+#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
+#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
+#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
+#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
+
+/* MC_CMD_MUM_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG msgresponse */
+#define MC_CMD_MUM_OUT_LOG_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
+#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
+/* The first 32-bit word read from the GPIO IN register. */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO IN register. */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
+/* The first 32-bit word read from the GPIO OUT register. */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO OUT register. */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
+#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
+
+/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+
+/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
+#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
+
+/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
+
+/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
+
+/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+
+/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+
/* MC_CMD_RESOURCE_SPECIFIER enum */
/* enum: Any */
#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
@@ -4203,6 +5092,30 @@
#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
+/* enum: Primary FPGA partition */
+#define NVRAM_PARTITION_TYPE_FPGA 0xb00
+/* enum: Secondary FPGA partition */
+#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01
+/* enum: FC firmware partition */
+#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02
+/* enum: FC License partition */
+#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03
+/* enum: Non-volatile log output partition for FC */
+#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04
+/* enum: MUM firmware partition */
+#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
+/* enum: MUM Non-volatile log output partition. */
+#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
+/* enum: MUM Application table partition. */
+#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02
+/* enum: MUM boot rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03
+/* enum: MUM production signatures & calibration rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04
+/* enum: MUM user signatures & calibration rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05
+/* enum: MUM fuses and lockbits partition. */
+#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
/* enum: Start of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
@@ -4218,66 +5131,69 @@
#define LICENSED_APP_ID_LEN 4
#define LICENSED_APP_ID_ID_OFST 0
/* enum: OpenOnload */
-#define LICENSED_APP_ID_ONLOAD 0x1
+#define LICENSED_APP_ID_ONLOAD 0x1
/* enum: PTP timestamping */
-#define LICENSED_APP_ID_PTP 0x2
+#define LICENSED_APP_ID_PTP 0x2
/* enum: SolarCapture Pro */
-#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+/* enum: SolarSecure filter engine */
+#define LICENSED_APP_ID_SOLARSECURE 0x8
+/* enum: Performance monitor */
+#define LICENSED_APP_ID_PERF_MONITOR 0x10
+/* enum: SolarCapture Live */
+#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20
+/* enum: Capture SolarSystem */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40
+/* enum: Network Access Control */
+#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80
#define LICENSED_APP_ID_ID_LBN 0
#define LICENSED_APP_ID_ID_WIDTH 32
-
-/***********************************/
-/* MC_CMD_GET_WORKAROUNDS
- * Read the list of all implemented and all currently enabled workarounds. The
- * enums here must correspond with those in MC_CMD_WORKAROUND.
- */
-#define MC_CMD_GET_WORKAROUNDS 0x59
-
-/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
-#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
-/* Each workaround is represented by a single bit according to the enums below.
- */
-#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
-#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
-/* enum: Bug 17230 work around. */
-#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
-/* enum: Bug 35388 work around (unsafe EVQ writes). */
-#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
-/* enum: Bug35017 workaround (A64 tables must be identity map) */
-#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
-
-
-/***********************************/
-/* MC_CMD_LINK_STATE_MODE
- * Read/set link state mode of a VF
- */
-#define MC_CMD_LINK_STATE_MODE 0x5c
-
-#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
-#define MC_CMD_LINK_STATE_MODE_IN_LEN 8
-/* The target function to have its link state mode read or set, must be a VF
- * e.g. VF 1,3 = 0x00030001
- */
-#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
-#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
-#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
-#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
-#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
-/* New link state mode to be set */
-#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
-#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
-#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
-#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
-/* enum: Use this value to just read the existing setting without modifying it.
- */
-#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
-
-/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
-#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
-#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+/* TX_TIMESTAMP_EVENT structuredef */
+#define TX_TIMESTAMP_EVENT_LEN 6
+/* lower 16 bits of timestamp data */
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16
+/* Type of TX event, ordinary TX completion, low or high part of TX timestamp
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
+/* enum: This is a TX completion event, not a timestamp */
+#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0
+/* enum: This is the low part of a TX timestamp event */
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51
+/* enum: This is the high part of a TX timestamp event */
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8
+/* upper 16 bits of timestamp data */
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16
+
+/* RSS_MODE structuredef */
+#define RSS_MODE_LEN 1
+/* The RSS mode for a particular packet type is a value from 0 - 15 which can
+ * be considered as 4 bits selecting which fields are included in the hash. (A
+ * value 0 effectively disables RSS spreading for the packet type.) The YAML
+ * generation tools require this structure to be a whole number of bytes wide,
+ * but only 4 bits are relevant.
+ */
+#define RSS_MODE_HASH_SELECTOR_OFST 0
+#define RSS_MODE_HASH_SELECTOR_LEN 1
+#define RSS_MODE_HASH_SRC_ADDR_LBN 0
+#define RSS_MODE_HASH_SRC_ADDR_WIDTH 1
+#define RSS_MODE_HASH_DST_ADDR_LBN 1
+#define RSS_MODE_HASH_DST_ADDR_WIDTH 1
+#define RSS_MODE_HASH_SRC_PORT_LBN 2
+#define RSS_MODE_HASH_SRC_PORT_WIDTH 1
+#define RSS_MODE_HASH_DST_PORT_LBN 3
+#define RSS_MODE_HASH_DST_PORT_WIDTH 1
+#define RSS_MODE_HASH_SELECTOR_LBN 0
+#define RSS_MODE_HASH_SELECTOR_WIDTH 8
/***********************************/
@@ -4413,7 +5329,9 @@
#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-/* MC_CMD_INIT_RXQ_IN msgrequest */
+/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version
+ * in new code.
+ */
#define MC_CMD_INIT_RXQ_IN_LENMIN 36
#define MC_CMD_INIT_RXQ_IN_LENMAX 252
#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
@@ -4456,9 +5374,73 @@
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
+/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode
+ * flags
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+
/* MC_CMD_INIT_RXQ_OUT msgresponse */
#define MC_CMD_INIT_RXQ_OUT_LEN 0
+/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0
+
/***********************************/
/* MC_CMD_INIT_TXQ
@@ -4467,7 +5449,9 @@
#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-/* MC_CMD_INIT_TXQ_IN msgrequest */
+/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version
+ * in new code.
+ */
#define MC_CMD_INIT_TXQ_IN_LENMIN 36
#define MC_CMD_INIT_TXQ_IN_LENMAX 252
#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
@@ -4499,6 +5483,10 @@
#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -4511,6 +5499,60 @@
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
+/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode
+ * flags
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
+/* Flags related to Qbb flow control mode. */
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3
+
/* MC_CMD_INIT_TXQ_OUT msgresponse */
#define MC_CMD_INIT_TXQ_OUT_LEN 0
@@ -4617,6 +5659,132 @@
/* MC_CMD_PROXY_CMD_OUT msgresponse */
#define MC_CMD_PROXY_CMD_OUT_LEN 0
+/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to
+ * manage proxied requests
+ */
+#define MC_PROXY_STATUS_BUFFER_LEN 16
+/* Handle allocated by the firmware for this proxy transaction */
+#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+/* enum: An invalid handle. */
+#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
+/* The requesting physical function number */
+#define MC_PROXY_STATUS_BUFFER_PF_OFST 4
+#define MC_PROXY_STATUS_BUFFER_PF_LEN 2
+#define MC_PROXY_STATUS_BUFFER_PF_LBN 32
+#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16
+/* The requesting virtual function number. Set to VF_NULL if the target is a
+ * PF.
+ */
+#define MC_PROXY_STATUS_BUFFER_VF_OFST 6
+#define MC_PROXY_STATUS_BUFFER_VF_LEN 2
+#define MC_PROXY_STATUS_BUFFER_VF_LBN 48
+#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16
+/* The target function RID. */
+#define MC_PROXY_STATUS_BUFFER_RID_OFST 8
+#define MC_PROXY_STATUS_BUFFER_RID_LEN 2
+#define MC_PROXY_STATUS_BUFFER_RID_LBN 64
+#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16
+/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */
+#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10
+#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2
+#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80
+#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16
+/* If a request is authorized rather than carried out by the host, this is the
+ * elevated privilege mask granted to the requesting function.
+ */
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PROXY_CONFIGURE
+ * Enable/disable authorization of MCDI requests from unprivileged functions by
+ * a designated admin function
+ */
+#define MC_CMD_PROXY_CONFIGURE 0x58
+
+#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
+#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108
+#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
+#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+/* Applies to all three buffers */
+#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+/* A bit mask defining which MCDI operations may be proxied */
+#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
+#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
+
+/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
+#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_COMPLETE
+ * Tells FW that a requested proxy operation has either been completed (by
+ * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the
+ * function that enabled proxying/authorization (by using
+ * MC_CMD_PROXY_CONFIGURE).
+ */
+#define MC_CMD_PROXY_COMPLETE 0x5f
+
+#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
+#define MC_CMD_PROXY_COMPLETE_IN_LEN 12
+#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
+ * is stored in the REPLY_BUFF.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0
+/* enum: The operation has been authorized. The originating function may now
+ * try again.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1
+/* enum: The operation has been declined. */
+#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2
+/* enum: The authorization failed because the relevant application did not
+ * respond in time.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
+#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+
+/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
+#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0
+
/***********************************/
/* MC_CMD_ALLOC_BUFTBL_CHUNK
@@ -4688,6 +5856,44 @@
/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
+/* PORT_CONFIG_ENTRY structuredef */
+#define PORT_CONFIG_ENTRY_LEN 16
+/* External port number (label) */
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8
+/* Port core location */
+#define PORT_CONFIG_ENTRY_CORE_OFST 1
+#define PORT_CONFIG_ENTRY_CORE_LEN 1
+#define PORT_CONFIG_ENTRY_STANDALONE 0x0 /* enum */
+#define PORT_CONFIG_ENTRY_MASTER 0x1 /* enum */
+#define PORT_CONFIG_ENTRY_SLAVE 0x2 /* enum */
+#define PORT_CONFIG_ENTRY_CORE_LBN 8
+#define PORT_CONFIG_ENTRY_CORE_WIDTH 8
+/* Internal number (HW resource) relative to the core */
+#define PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2
+#define PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1
+#define PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16
+#define PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8
+/* Reserved */
+#define PORT_CONFIG_ENTRY_RSVD_OFST 3
+#define PORT_CONFIG_ENTRY_RSVD_LEN 1
+#define PORT_CONFIG_ENTRY_RSVD_LBN 24
+#define PORT_CONFIG_ENTRY_RSVD_WIDTH 8
+/* Bitmask of KR lanes used by the port */
+#define PORT_CONFIG_ENTRY_LANES_OFST 4
+#define PORT_CONFIG_ENTRY_LANES_LBN 32
+#define PORT_CONFIG_ENTRY_LANES_WIDTH 32
+/* Port capabilities (MC_CMD_PHY_CAP_*) */
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32
+/* Reserved (align to 16 bytes) */
+#define PORT_CONFIG_ENTRY_RSVD2_OFST 12
+#define PORT_CONFIG_ENTRY_RSVD2_LBN 96
+#define PORT_CONFIG_ENTRY_RSVD2_WIDTH 32
+
/***********************************/
/* MC_CMD_FILTER_OP
@@ -4759,9 +5965,9 @@
#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
/* enum: receive to MC */
#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
-/* enum: loop back to port 0 TX MAC */
+/* enum: loop back to TXDP 0 */
#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
-/* enum: loop back to port 1 TX MAC */
+/* enum: loop back to TXDP 1 */
#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
/* receive queue handle (for multiple queue modes, this is the base queue) */
#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
@@ -4778,9 +5984,7 @@
#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
* RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
- * MC_CMD_DOT1P_MAPPING_ALLOC. Note that these handles should be considered
- * opaque to the host, although a value of 0xFFFFFFFF is guaranteed never to be
- * a valid handle.
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
*/
#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
/* transmit domain (reserved; set to 0) */
@@ -4835,6 +6039,235 @@
#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
+/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to
+ * include handling of VXLAN/NVGRE encapsulated frame filtering (which is
+ * supported on Medford only).
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_LEN 172
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+/* receive mode */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0
+/* enum: Match Geneve traffic with this VNI */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1
+/* enum: Reserved for experimental development use */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16
+
/* MC_CMD_FILTER_OP_OUT msgresponse */
#define MC_CMD_FILTER_OP_OUT_LEN 12
/* identifies the type of operation requested */
@@ -4849,6 +6282,27 @@
#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+/* enum: guaranteed invalid filter handle (low 32 bits) */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff
+/* enum: guaranteed invalid filter handle (high 32 bits) */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff
+
+/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */
+#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_EXT_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_OUT/HANDLE */
/***********************************/
@@ -4865,6 +6319,10 @@
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
/* enum: read the list of supported RX filter matches */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
+/* enum: read flags indicating restrictions on filter insertion for the calling
+ * client
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2
/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -4884,6 +6342,17 @@
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
+/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* bitfield of filter insertion restrictions */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+
/***********************************/
/* MC_CMD_PARSER_DISP_RW
@@ -4901,8 +6370,10 @@
#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
/* enum: TX dispatcher CPU */
#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
-/* enum: Lookup engine */
+/* enum: Lookup engine (with original metadata format) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
+/* enum: Lookup engine (with requested metadata format) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
/* identifies the type of operation requested */
#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
/* enum: read a word of DICPU DMEM or a LUE entry */
@@ -4919,6 +6390,8 @@
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
/* value to write (for LUE writes) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
@@ -5019,7 +6492,9 @@
/* The maximum number of VIs that would be useful */
#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
-/* MC_CMD_ALLOC_VIS_OUT msgresponse */
+/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
+ * Use extended version in new code.
+ */
#define MC_CMD_ALLOC_VIS_OUT_LEN 8
/* The number of VIs allocated on this function */
#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
@@ -5028,6 +6503,17 @@
*/
#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+
/***********************************/
/* MC_CMD_FREE_VIS
@@ -5114,13 +6600,15 @@
#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 8
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
/* The number of VIs allocated on this function */
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
/* The base absolute VI number allocated to this function. Required to
* correctly interpret wakeup events.
*/
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
/***********************************/
@@ -5575,6 +7063,7 @@
#define MC_CMD_GET_CAPABILITIES 0xbe
#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
@@ -5582,6 +7071,20 @@
#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
@@ -5600,8 +7103,14 @@
#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1
/* RxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
@@ -5609,6 +7118,10 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
/* enum: Low latency RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a
/* enum: RXDP Test firmware image 1 */
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
/* enum: RXDP Test firmware image 2 */
@@ -5632,6 +7145,10 @@
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
/* enum: Low latency TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d
/* enum: TXDP Test firmware image 1 */
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
/* enum: TXDP Test firmware image 2 */
@@ -5642,22 +7159,69 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Virtual switching (full feature) RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Virtual switching (full feature) TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* Hardware capabilities of NIC */
#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
/* Licensed capabilities */
@@ -5735,6 +7299,15 @@
/* the rate in mbps */
#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+/* the rate in mbps */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+/* the desired maximum fill level */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+
/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
@@ -5753,8 +7326,14 @@
#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
/* the static priority associated with the txq */
#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
-/* bitmask of the priority queues this txq is inserted into */
+/* bitmask of the priority queues this txq is inserted into when inserted. */
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
/* the reaction point (RP) bucket */
#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
/* an already reserved bucket (typically set to bucket associated with outer
@@ -5768,6 +7347,35 @@
/* the min bucket (typically for ETS/minimum bandwidth) */
#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
+/* the txq id */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+/* bitmask of the priority queues this txq is inserted into when inserted. */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
+/* the reaction point (RP) bucket */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+
/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
@@ -5826,13 +7434,23 @@
#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
/* enum: VEB */
#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
-/* enum: VEPA */
+/* enum: VEPA (obsolete) */
#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
+/* enum: MUX */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4
+/* enum: Snapper specific; semantics TBD */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5
/* Flags controlling v-port creation */
#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
-/* The number of VLAN tags to support. */
+/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
+ * this must be one or greated, and the attached v-ports must have exactly this
+ * number of tags. For other v-switch types, this must be zero of greater, and
+ * is an upper limit on the number of VLAN tags for attached v-ports. An error
+ * will be returned if existing configuration means we can't support attached
+ * v-ports with this number of tags.
+ */
#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
@@ -5892,7 +7510,10 @@
#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
-/* The number of VLAN tags to insert/remove. */
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
/* The actual VLAN tags to insert/remove */
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
@@ -6136,8 +7757,13 @@
/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
-/* The handle of the new RSS context */
+/* The handle of the new RSS context. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+/* enum: guaranteed invalid RSS context handle value */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff
/***********************************/
@@ -6249,7 +7875,11 @@
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
-/* Hash control flags */
+/* Hash control flags. The _EN bits are always supported. The _MODE bits only
+ * work when the firmware reports ADDITIONAL_RSS_MODES in
+ * MC_CMD_GET_CAPABILITIES and override the _EN bits if any of them are not 0.
+ * See the RSS_MODE structure for the meaning of the mode bits.
+ */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
@@ -6259,6 +7889,20 @@
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4
/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
@@ -6279,7 +7923,12 @@
/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
-/* Hash control flags */
+/* Hash control flags. If any _MODE bits are non-zero (which will only be true
+ * when the firmware reports ADDITIONAL_RSS_MODES) then the _EN bits should be
+ * disregarded (but are guaranteed to be consistent with the _MODE bits if
+ * RSS_CONTEXT_SET_FLAGS has never been called for this context since it was
+ * allocated).
+ */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
@@ -6289,6 +7938,20 @@
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4
/***********************************/
@@ -6311,8 +7974,13 @@
/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
-/* The handle of the new .1p mapping */
+/* The handle of the new .1p mapping. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+/* enum: guaranteed invalid .1p mapping handle value */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
/***********************************/
@@ -6421,375 +8089,6 @@
/***********************************/
-/* MC_CMD_RMON_RX_CLASS_STATS
- * Retrieve rmon rx class statistics
- */
-#define MC_CMD_RMON_RX_CLASS_STATS 0xc3
-
-/* MC_CMD_RMON_RX_CLASS_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_CLASS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_CLASS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_LBN 0
-#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_WIDTH 8
-#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_LBN 8
-#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_CLASS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_CLASS_STATS
- * Retrieve rmon tx class statistics
- */
-#define MC_CMD_RMON_TX_CLASS_STATS 0xc4
-
-/* MC_CMD_RMON_TX_CLASS_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_CLASS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_CLASS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_LBN 0
-#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_WIDTH 8
-#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_LBN 8
-#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_CLASS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS
- * Retrieve rmon rx super_class statistics
- */
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS 0xc5
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_LBN 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS
- * Retrieve rmon tx super_class statistics
- */
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS 0xc6
-
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_LBN 4
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS 0xc7
-
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN msgrequest */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS 0xc8
-
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN msgrequest */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS 0xc9
-
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN msgrequest */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_ALLOC_CLASS
- * Allocate an rmon class
- */
-#define MC_CMD_RMON_ALLOC_CLASS 0xca
-
-/* MC_CMD_RMON_ALLOC_CLASS_IN msgrequest */
-#define MC_CMD_RMON_ALLOC_CLASS_IN_LEN 0
-
-/* MC_CMD_RMON_ALLOC_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_ALLOC_CLASS_OUT_LEN 4
-/* class */
-#define MC_CMD_RMON_ALLOC_CLASS_OUT_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_DEALLOC_CLASS
- * Deallocate an rmon class
- */
-#define MC_CMD_RMON_DEALLOC_CLASS 0xcb
-
-/* MC_CMD_RMON_DEALLOC_CLASS_IN msgrequest */
-#define MC_CMD_RMON_DEALLOC_CLASS_IN_LEN 4
-/* class */
-#define MC_CMD_RMON_DEALLOC_CLASS_IN_CLASS_OFST 0
-
-/* MC_CMD_RMON_DEALLOC_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_DEALLOC_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS
- * Allocate an rmon super_class
- */
-#define MC_CMD_RMON_ALLOC_SUPER_CLASS 0xcc
-
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS_IN msgrequest */
-#define MC_CMD_RMON_ALLOC_SUPER_CLASS_IN_LEN 0
-
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_LEN 4
-/* super_class */
-#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_SUPER_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS
- * Deallocate an rmon tx super_class
- */
-#define MC_CMD_RMON_DEALLOC_SUPER_CLASS 0xcd
-
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN msgrequest */
-#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_LEN 4
-/* super_class */
-#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_SUPER_CLASS_OFST 0
-
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_UP_CONV_STATS
- * Retrieve up converter statistics
- */
-#define MC_CMD_RMON_RX_UP_CONV_STATS 0xce
-
-/* MC_CMD_RMON_RX_UP_CONV_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_LBN 0
-#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_WIDTH 2
-#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_LBN 2
-#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_UP_CONV_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPI_STATS
- * Retrieve rx ipi stats
- */
-#define MC_CMD_RMON_RX_IPI_STATS 0xcf
-
-/* MC_CMD_RMON_RX_IPI_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_IPI_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_IPI_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_LBN 0
-#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_WIDTH 5
-#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_LBN 5
-#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPI_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS
- * Retrieve rx ipsec cntxt_ptr indexed stats
- */
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS 0xd0
-
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS
- * Retrieve rx ipsec port indexed stats
- */
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS 0xd1
-
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_LBN 0
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_LBN 2
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS
- * Retrieve tx ipsec overflow
- */
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS 0xd2
-
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
/* MC_CMD_VPORT_ADD_MAC_ADDRESS
* Add a MAC address to a v-port
*/
@@ -6877,7 +8176,7 @@
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
-/* Raw buffer table entries, laid out as BUFTBL_ENTRY. */
+/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
@@ -6921,354 +8220,6 @@
/***********************************/
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS
- * Retrieve rx class drop stats
- */
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS 0xd3
-
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_LBN 0
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_WIDTH 8
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_LBN 8
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS
- * Retrieve rx super class drop stats
- */
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS 0xd4
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_LBN 0
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_LBN 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_ERRORS_STATS
- * Retrieve rxdp errors
- */
-#define MC_CMD_RMON_RX_ERRORS_STATS 0xd5
-
-/* MC_CMD_RMON_RX_ERRORS_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_ERRORS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_ERRORS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_LBN 0
-#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_WIDTH 11
-#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_LBN 11
-#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_ERRORS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_OVERFLOW_STATS
- * Retrieve rxdp overflow
- */
-#define MC_CMD_RMON_RX_OVERFLOW_STATS 0xd6
-
-/* MC_CMD_RMON_RX_OVERFLOW_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_LBN 0
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_LBN 8
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_OVERFLOW_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPI_STATS
- * Retrieve tx ipi stats
- */
-#define MC_CMD_RMON_TX_IPI_STATS 0xd7
-
-/* MC_CMD_RMON_TX_IPI_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_IPI_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_IPI_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_LBN 0
-#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_WIDTH 5
-#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_LBN 5
-#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPI_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS
- * Retrieve tx ipsec counters by cntxt_ptr
- */
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS 0xd8
-
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS
- * Retrieve tx ipsec counters by port
- */
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS 0xd9
-
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_LBN 0
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_LBN 2
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS
- * Retrieve tx ipsec overflow
- */
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS 0xda
-
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_NOWHERE_STATS
- * Retrieve tx nowhere stats
- */
-#define MC_CMD_RMON_TX_NOWHERE_STATS 0xdb
-
-/* MC_CMD_RMON_TX_NOWHERE_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_LBN 0
-#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_WIDTH 8
-#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_LBN 8
-#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_NOWHERE_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS
- * Retrieve tx nowhere qbb stats
- */
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS 0xdc
-
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_LBN 0
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_WIDTH 3
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_LBN 3
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_ERRORS_STATS
- * Retrieve rxdp errors
- */
-#define MC_CMD_RMON_TX_ERRORS_STATS 0xdd
-
-/* MC_CMD_RMON_TX_ERRORS_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_ERRORS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_ERRORS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_LBN 0
-#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_WIDTH 11
-#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_LBN 11
-#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_ERRORS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_OVERFLOW_STATS
- * Retrieve rxdp overflow
- */
-#define MC_CMD_RMON_TX_OVERFLOW_STATS 0xde
-
-/* MC_CMD_RMON_TX_OVERFLOW_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_LBN 0
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_LBN 8
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_OVERFLOW_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_COLLECT_CLASS_STATS
- * Explicitly collect class stats at the specified evb port
- */
-#define MC_CMD_RMON_COLLECT_CLASS_STATS 0xdf
-
-/* MC_CMD_RMON_COLLECT_CLASS_STATS_IN msgrequest */
-#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_LEN 4
-/* The port id associated with the vport/pport at which to collect class stats
- */
-#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_PORT_ID_OFST 0
-
-/* MC_CMD_RMON_COLLECT_CLASS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_LEN 4
-/* class */
-#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS
- * Explicitly collect class stats at the specified evb port
- */
-#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS 0xe0
-
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN msgrequest */
-#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_LEN 4
-/* The port id associated with the vport/pport at which to collect class stats
- */
-#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_PORT_ID_OFST 0
-
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_LEN 4
-/* super_class */
-#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_SUPER_CLASS_OFST 0
-
-
-/***********************************/
/* MC_CMD_GET_CLOCK
* Return the system and PDCPU clock frequencies.
*/
@@ -7296,22 +8247,66 @@
#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
/* MC_CMD_SET_CLOCK_IN msgrequest */
-#define MC_CMD_SET_CLOCK_IN_LEN 12
-/* Requested system frequency in MHz; 0 leaves unchanged. */
+#define MC_CMD_SET_CLOCK_IN_LEN 28
+/* Requested frequency in MHz for system clock domain */
#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
-/* Requested inter-core frequency in MHz; 0 leaves unchanged. */
+/* enum: Leave the system clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for inter-core clock domain */
#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
-/* Request DPCPU frequency in MHz; 0 leaves unchanged. */
+/* enum: Leave the inter-core clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for DPCPU clock domain */
#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+/* enum: Leave the DPCPU clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for PCS clock domain */
+#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+/* enum: Leave the PCS clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for MC clock domain */
+#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+/* enum: Leave the MC clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for rmon clock domain */
+#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+/* enum: Leave the rmon clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for vswitch clock domain */
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+/* enum: Leave the vswitch clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
/* MC_CMD_SET_CLOCK_OUT msgresponse */
-#define MC_CMD_SET_CLOCK_OUT_LEN 12
+#define MC_CMD_SET_CLOCK_OUT_LEN 28
/* Resulting system frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* enum: The system clock domain doesn't exist */
+#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
/* Resulting inter-core frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+/* enum: The inter-core clock domain doesn't exist / isn't used */
+#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
/* Resulting DPCPU frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+/* enum: The dpcpu clock domain doesn't exist */
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
+/* Resulting PCS frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+/* enum: The PCS clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
+/* Resulting MC frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+/* enum: The MC clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
+/* Resulting rmon frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+/* enum: The rmon clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
+/* Resulting vswitch frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+/* enum: The vswitch clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
/***********************************/
@@ -7325,12 +8320,22 @@
/* MC_CMD_DPCPU_RPC_IN msgrequest */
#define MC_CMD_DPCPU_RPC_IN_LEN 36
#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
-/* enum: RxDPCPU */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x0
+/* enum: RxDPCPU0 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
/* enum: TxDPCPU0 */
#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
/* enum: TxDPCPU1 */
#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
+/* enum: RxDPCPU1 (Medford only) */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3
+/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_RX0)
+ */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80
+/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_TX0)
+ */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81
/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
* initialised to zero
*/
@@ -7418,6 +8423,25 @@
/***********************************/
+/* MC_CMD_SHMBOOT_OP
+ * Special operations to support (for now) shmboot.
+ */
+#define MC_CMD_SHMBOOT_OP 0xe6
+
+#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SHMBOOT_OP_IN msgrequest */
+#define MC_CMD_SHMBOOT_OP_IN_LEN 4
+/* Identifies the operation to perform */
+#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+/* enum: Copy slave_data section to the slave core. (Greenport only) */
+#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
+
+/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
+#define MC_CMD_SHMBOOT_OP_OUT_LEN 0
+
+
+/***********************************/
/* MC_CMD_CAP_BLK_READ
* Read multiple 64bit words from capture block memory
*/
@@ -7730,6 +8754,8 @@
* more data is returned.
*/
#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6
+/* enum: Read Figure Of Merit (eye quality, higher is better). */
+#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7
/* Align the arguments to 32 bits */
#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
@@ -7762,20 +8788,32 @@
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: Attenuation (0-15) */
+/* enum: Attenuation (0-15, TBD for Medford) */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
-/* enum: CTLE Boost (0-15) */
+/* enum: CTLE Boost (0-15, TBD for Medford) */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
-/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive, TBD
+ * for Medford)
+ */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
-/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
-/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
-/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
-/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
+/* enum: Edge DFE DLEV (TBD for Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
@@ -7865,6 +8903,8 @@
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
/* enum: TX Slew Rate Fine control */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
+/* enum: TX Termination Impedance control */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
@@ -7955,6 +8995,20 @@
#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+/* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+
/***********************************/
/* MC_CMD_PCIE_TUNE
@@ -8224,6 +9278,8 @@
#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
/* enum: validate application */
#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
+/* enum: mask application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
/* arguments specific to this particular operation */
#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
@@ -8258,10 +9314,22 @@
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
+/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+/* flag */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+
+/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
+
/***********************************/
/* MC_CMD_SET_PORT_SNIFF_CONFIG
- * Configure port sniffing for the physical port associated with the calling
+ * Configure RX port sniffing for the physical port associated with the calling
* function. Only a privileged function may change the port sniffing
* configuration. A copy of all traffic delivered to the host (non-promiscuous
* mode) or all traffic arriving at the port (promiscuous mode) may be
@@ -8299,7 +9367,7 @@
/***********************************/
/* MC_CMD_GET_PORT_SNIFF_CONFIG
- * Obtain the current port sniffing configuration for the physical port
+ * Obtain the current RX port sniffing configuration for the physical port
* associated with the calling function. Only a privileged function may read
* the configuration.
*/
@@ -8330,4 +9398,673 @@
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+/***********************************/
+/* MC_CMD_SET_PARSER_DISP_CONFIG
+ * Change configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9
+
+#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
+/* the type of configuration setting to change */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/* enum: Per-TXQ enable for multicast UDP destination lookup for possible
+ * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0
+/* enum: Per-v-adaptor enable for suppression of self-transmissions on the
+ * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single
+ * boolean.)
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1
+/* handle for the entity to update: queue handle, EVB port ID, etc. depending
+ * on the type of configuration setting being changed
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+/* new value: the details depend on the type of configuration setting being
+ * changed
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_CONFIG
+ * Read configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
+
+#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
+/* the type of configuration setting to read */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
+/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
+ * the type of configuration setting being read
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
+/* current value: the details depend on the type of configuration setting being
+ * read
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG
+ * Configure TX port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic transmitted through the port may be
+ * delivered to a specific queue, or a set of queues with RSS. Note that these
+ * packets are delivered with transmit timestamps in the packet prefix, not
+ * receive timestamps, so it is likely that the queue(s) will need to be
+ * dedicated as TX sniff receivers.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb
+
+#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG
+ * Obtain the current TX port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
+
+#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_RMON_STATS_RX_ERRORS
+ * Per queue rx error stats.
+ */
+#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe
+
+#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
+/* The rx queue to get stats for. */
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_GET_PCIE_RESOURCE_INFO
+ * Find out about available PCIE resources
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
+/* The maximum number of PFs the device can expose */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+/* The maximum number of VFs the device can expose in total */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+/* The maximum number of MSI-X vectors the device can provide in total */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+/* the number of MSI-X vectors the device will allocate by default to each PF
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+/* the number of MSI-X vectors the device will allocate by default to each VF
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+/* the maximum number of MSI-X vectors the device can allocate to any one PF */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+/* the maximum number of MSI-X vectors the device can allocate to any one VF */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_MODES
+ * Find out about available port modes
+ */
+#define MC_CMD_GET_PORT_MODES 0xff
+
+#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_MODES_IN msgrequest */
+#define MC_CMD_GET_PORT_MODES_IN_LEN 0
+
+/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
+#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
+/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
+#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+/* Default (canonical) board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+/* Current board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+
+
+/***********************************/
+/* MC_CMD_READ_ATB
+ * Sample voltages on the ATB
+ */
+#define MC_CMD_READ_ATB 0x100
+
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_ATB_IN msgrequest */
+#define MC_CMD_READ_ATB_IN_LEN 16
+#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
+#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
+#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
+#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
+#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+
+/* MC_CMD_READ_ATB_OUT msgresponse */
+#define MC_CMD_READ_ATB_OUT_LEN 4
+#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+
+#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+/* enum: Bug 17230 work around. */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20
+/* enum: Bug 26807 features present in firmware (multicast filter chaining) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MASK
+ * Read/set privileges of an arbitrary PCIe function
+ */
+#define MC_CMD_PRIVILEGE_MASK 0x5a
+
+#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */
+#define MC_CMD_PRIVILEGE_MASK_IN_LEN 8
+/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF
+ * 1,3 = 0x00030001
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */
+/* New privilege mask to be set. The mask will only be changed if the MSB is
+ * set to 1.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */
+/* enum: Set this bit to indicate that a new privilege mask is to be set,
+ * otherwise the command will only read the existing mask.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000
+
+/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */
+#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
+/* For an admin function, always all the privileges are reported. */
+#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_SNAPSHOT_LENGTH
+ * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH
+ * parameter to MC_CMD_INIT_RXQ.
+ */
+#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
+
+#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
+/* Minimum acceptable snapshot length. */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+/* Maximum acceptable snapshot length. */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+
+
+/***********************************/
+/* MC_CMD_FUSE_DIAGS
+ * Additional fuse diagnostics
+ */
+#define MC_CMD_FUSE_DIAGS 0x102
+
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_FUSE_DIAGS_IN msgrequest */
+#define MC_CMD_FUSE_DIAGS_IN_LEN 0
+
+/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
+#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
+/* Total number of mismatched bits between pairs in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+/* Checksum of data after logical OR of pairs in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+/* Total number of mismatched bits between pairs in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+/* Checksum of data after logical OR of pairs in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+/* Total number of mismatched bits between pairs in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+/* Checksum of data after logical OR of pairs in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MODIFY
+ * Modify the privileges of a set of PCIe functions. Note that this operation
+ * only effects non-admin functions unless the admin privilege itself is
+ * included in one of the masks provided.
+ */
+#define MC_CMD_PRIVILEGE_MODIFY 0x60
+
+#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
+/* The groups of functions to have their privilege masks modified. */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
+/* For VFS_OF_PF specify the PF, for ONE specify the target function */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
+/* Privileges to be added to the target functions. For privilege definitions
+ * refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+/* Privileges to be removed from the target functions. For privilege
+ * definitions refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+
+/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
+#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_BYTES
+ * Read XPM memory
+ */
+#define MC_CMD_XPM_READ_BYTES 0x103
+
+#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_BYTES_IN msgrequest */
+#define MC_CMD_XPM_READ_BYTES_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
+#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
+#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252
+#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num))
+/* Data */
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_BYTES
+ * Write XPM memory
+ */
+#define MC_CMD_XPM_WRITE_BYTES 0x104
+
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
+#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
+#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252
+#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
+/* Start address (byte) */
+#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+/* Data */
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244
+
+/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_SECTOR
+ * Read XPM sector
+ */
+#define MC_CMD_XPM_READ_SECTOR 0x105
+
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8
+/* Sector index */
+#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+/* Sector size */
+#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+
+/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
+#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36
+#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
+/* Sector type */
+#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
+#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
+/* Sector data */
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_SECTOR
+ * Write XPM sector
+ */
+#define MC_CMD_XPM_WRITE_SECTOR 0x106
+
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num))
+/* If writing fails due to an uncorrectable error, try up to RETRIES following
+ * sectors (or until no more space available). If 0, only one write attempt is
+ * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair
+ * mechanism.
+ */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
+/* Sector type */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
+/* Sector size */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+/* Sector data */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32
+
+/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
+/* New sector index */
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+
+
+/***********************************/
+/* MC_CMD_XPM_INVALIDATE_SECTOR
+ * Invalidate XPM sector
+ */
+#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
+
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
+/* Sector index */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_BLANK_CHECK
+ * Blank-check XPM memory and report bad locations
+ */
+#define MC_CMD_XPM_BLANK_CHECK 0x108
+
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
+#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
+/* Total number of bad (non-blank) locations */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
+ * into MCDI response)
+ */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124
+
+
+/***********************************/
+/* MC_CMD_XPM_REPAIR
+ * Blank-check and repair XPM memory
+ */
+#define MC_CMD_XPM_REPAIR 0x109
+
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_REPAIR_IN msgrequest */
+#define MC_CMD_XPM_REPAIR_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_REPAIR_OUT msgresponse */
+#define MC_CMD_XPM_REPAIR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_DECODER_TEST
+ * Test XPM memory address decoders for gross manufacturing defects. Can only
+ * be performed on an unprogrammed part.
+ */
+#define MC_CMD_XPM_DECODER_TEST 0x10a
+
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
+#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */
+#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_TEST
+ * XPM memory write test. Test XPM write logic for gross manufacturing defects
+ * by writing to a dedicated test row. There are 16 locations in the test row
+ * and the test can only be performed on locations that have not been
+ * previously used (i.e. can be run at most 16 times). The test will pick the
+ * first available location to use, or fail with ENOSPC if none left.
+ */
+#define MC_CMD_XPM_WRITE_TEST 0x10b
+
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
+#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
+
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index d72f522bf9c3..c530e1c4cb4f 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -241,6 +241,8 @@ struct efx_tx_queue {
unsigned int read_count ____cacheline_aligned_in_smp;
unsigned int old_write_count;
unsigned int merge_events;
+ unsigned int bytes_compl;
+ unsigned int pkts_compl;
/* Members used only on the xmit path */
unsigned int insert_count ____cacheline_aligned_in_smp;
@@ -923,6 +925,7 @@ struct vfdi_status;
* @stats_lock: Statistics update lock. Must be held when calling
* efx_nic_type::{update,start,stop}_stats.
* @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb
+ * @mc_promisc: Whether in multicast promiscuous mode when last changed
*
* This is stored in the private area of the &struct net_device.
*/
@@ -969,6 +972,7 @@ struct efx_nic {
unsigned next_buffer_table;
unsigned int max_channels;
+ unsigned int max_tx_channels;
unsigned n_channels;
unsigned n_rx_channels;
unsigned rss_spread;
@@ -1070,6 +1074,7 @@ struct efx_nic {
int last_irq_cpu;
spinlock_t stats_lock;
atomic_t n_rx_noskb_drops;
+ bool mc_promisc;
};
static inline int efx_dev_registered(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 31ff9084d9a4..0b536e27d3b2 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -506,6 +506,7 @@ enum {
* @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared
* @stats: Hardware statistics
* @workaround_35388: Flag: firmware supports workaround for bug 35388
+ * @workaround_26807: Flag: firmware supports workaround for bug 26807
* @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
* after MC reboot
* @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
@@ -535,6 +536,7 @@ struct efx_ef10_nic_data {
bool rx_rss_context_exclusive;
u64 stats[EF10_STAT_COUNT];
bool workaround_35388;
+ bool workaround_26807;
bool must_check_datapath_caps;
u32 datapath_caps;
unsigned int rx_dpcpu_fw_id;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index b605dfd5c7bc..9d78830da609 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -114,7 +114,10 @@ static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
if (efx->type->test_nvram) {
rc = efx->type->test_nvram(efx);
- tests->nvram = rc ? -1 : 1;
+ if (rc == -EPERM)
+ rc = 0;
+ else
+ tests->nvram = rc ? -1 : 1;
}
return rc;
@@ -253,6 +256,12 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
mutex_lock(&efx->mac_lock);
rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
mutex_unlock(&efx->mac_lock);
+ if (rc == -EPERM)
+ rc = 0;
+ else
+ netif_info(efx, drv, efx->net_dev,
+ "%s phy selftest\n", rc ? "Failed" : "Passed");
+
return rc;
}
@@ -661,6 +670,9 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
wmb();
kfree(state);
+ if (rc == -EPERM)
+ rc = 0;
+
return rc;
}
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index b323b9167526..2219b5424d2b 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -262,6 +262,7 @@ static int siena_probe_nic(struct efx_nic *efx)
}
efx->max_channels = EFX_MAX_CHANNELS;
+ efx->max_tx_channels = EFX_MAX_CHANNELS;
efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
@@ -1042,9 +1043,5 @@ const struct efx_nic_type siena_a0_nic_type = {
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
.hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ),
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT),
};
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index aaf2987512b5..1833a0146571 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -617,7 +617,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
- netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
+ tx_queue->pkts_compl += pkts_compl;
+ tx_queue->bytes_compl += bytes_compl;
if (pkts_compl > 1)
++tx_queue->merge_events;
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index 67d9fdeedd86..664f596971b5 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -1031,36 +1031,8 @@ err_out:
static void print_packet( byte * buf, int length )
{
#if 0
- int i;
- int remainder;
- int lines;
-
- pr_dbg("Packet of length %d\n", length);
- lines = length / 16;
- remainder = length % 16;
-
- for ( i = 0; i < lines ; i ++ ) {
- int cur;
-
- printk(KERN_DEBUG);
- for ( cur = 0; cur < 8; cur ++ ) {
- byte a, b;
-
- a = *(buf ++ );
- b = *(buf ++ );
- pr_cont("%02x%02x ", a, b);
- }
- pr_cont("\n");
- }
- printk(KERN_DEBUG);
- for ( i = 0; i < remainder/2 ; i++ ) {
- byte a, b;
-
- a = *(buf ++ );
- b = *(buf ++ );
- pr_cont("%02x%02x ", a, b);
- }
- pr_cont("\n");
+ print_hex_dump_debug(DRV_NAME, DUMP_PREFIX_OFFSET, 16, 1,
+ buf, length, true);
#endif
}
#endif
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 959aeeade0c9..3b4cd8a263de 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -59,7 +59,9 @@
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_net.h>
+#include <linux/acpi.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include "smsc911x.h"
@@ -2362,59 +2364,50 @@ static const struct smsc911x_ops shifted_smsc911x_ops = {
.tx_writefifo = smsc911x_tx_writefifo_shift,
};
-#ifdef CONFIG_OF
-static int smsc911x_probe_config_dt(struct smsc911x_platform_config *config,
- struct device_node *np)
+static int smsc911x_probe_config(struct smsc911x_platform_config *config,
+ struct device *dev)
{
- const char *mac;
+ int phy_interface;
u32 width = 0;
+ int err;
- if (!np)
- return -ENODEV;
-
- config->phy_interface = of_get_phy_mode(np);
+ phy_interface = device_get_phy_mode(dev);
+ if (phy_interface < 0)
+ phy_interface = PHY_INTERFACE_MODE_NA;
+ config->phy_interface = phy_interface;
- mac = of_get_mac_address(np);
- if (mac)
- memcpy(config->mac, mac, ETH_ALEN);
+ device_get_mac_address(dev, config->mac, ETH_ALEN);
- of_property_read_u32(np, "reg-shift", &config->shift);
-
- of_property_read_u32(np, "reg-io-width", &width);
- if (width == 4)
+ err = device_property_read_u32(dev, "reg-io-width", &width);
+ if (err == -ENXIO)
+ return err;
+ if (!err && width == 4)
config->flags |= SMSC911X_USE_32BIT;
else
config->flags |= SMSC911X_USE_16BIT;
- if (of_get_property(np, "smsc,irq-active-high", NULL))
+ device_property_read_u32(dev, "reg-shift", &config->shift);
+
+ if (device_property_present(dev, "smsc,irq-active-high"))
config->irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH;
- if (of_get_property(np, "smsc,irq-push-pull", NULL))
+ if (device_property_present(dev, "smsc,irq-push-pull"))
config->irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL;
- if (of_get_property(np, "smsc,force-internal-phy", NULL))
+ if (device_property_present(dev, "smsc,force-internal-phy"))
config->flags |= SMSC911X_FORCE_INTERNAL_PHY;
- if (of_get_property(np, "smsc,force-external-phy", NULL))
+ if (device_property_present(dev, "smsc,force-external-phy"))
config->flags |= SMSC911X_FORCE_EXTERNAL_PHY;
- if (of_get_property(np, "smsc,save-mac-address", NULL))
+ if (device_property_present(dev, "smsc,save-mac-address"))
config->flags |= SMSC911X_SAVE_MAC_ADDRESS;
return 0;
}
-#else
-static inline int smsc911x_probe_config_dt(
- struct smsc911x_platform_config *config,
- struct device_node *np)
-{
- return -ENODEV;
-}
-#endif /* CONFIG_OF */
static int smsc911x_drv_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct net_device *dev;
struct smsc911x_data *pdata;
struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
@@ -2435,7 +2428,10 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
res_size = resource_size(res);
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
+ if (irq == -EPROBE_DEFER) {
+ retval = -EPROBE_DEFER;
+ goto out_0;
+ } else if (irq <= 0) {
pr_warn("Could not allocate irq resource\n");
retval = -ENODEV;
goto out_0;
@@ -2478,7 +2474,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
goto out_disable_resources;
}
- retval = smsc911x_probe_config_dt(&pdata->config, np);
+ retval = smsc911x_probe_config(&pdata->config, &pdev->dev);
if (retval && config) {
/* copy config parameters across to pdata */
memcpy(&pdata->config, config, sizeof(pdata->config));
@@ -2654,6 +2650,12 @@ static const struct of_device_id smsc911x_dt_ids[] = {
MODULE_DEVICE_TABLE(of, smsc911x_dt_ids);
#endif
+static const struct acpi_device_id smsc911x_acpi_match[] = {
+ { "ARMH9118", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, smsc911x_acpi_match);
+
static struct platform_driver smsc911x_driver = {
.probe = smsc911x_drv_probe,
.remove = smsc911x_drv_remove,
@@ -2661,6 +2663,7 @@ static struct platform_driver smsc911x_driver = {
.name = SMSC_CHIPNAME,
.pm = SMSC911X_PM_OPS,
.of_match_table = of_match_ptr(smsc911x_dt_ids),
+ .acpi_match_table = ACPI_PTR(smsc911x_acpi_match),
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index e817a1a44379..b1e5f24708c9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -16,6 +16,46 @@
#include "stmmac.h"
#include "stmmac_platform.h"
+static int dwmac_generic_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ if (pdev->dev.of_node) {
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat)) {
+ dev_err(&pdev->dev, "dt configuration failed\n");
+ return PTR_ERR(plat_dat);
+ }
+ } else {
+ plat_dat = dev_get_platdata(&pdev->dev);
+ if (!plat_dat) {
+ dev_err(&pdev->dev, "no platform data provided\n");
+ return -EINVAL;
+ }
+
+ /* Set default value for multicast hash bins */
+ plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat_dat->unicast_filter_entries = 1;
+ }
+
+ /* Custom initialisation (if needed) */
+ if (plat_dat->init) {
+ ret = plat_dat->init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+ }
+
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
+
static const struct of_device_id dwmac_generic_match[] = {
{ .compatible = "st,spear600-gmac"},
{ .compatible = "snps,dwmac-3.610"},
@@ -27,7 +67,7 @@ static const struct of_device_id dwmac_generic_match[] = {
MODULE_DEVICE_TABLE(of, dwmac_generic_match);
static struct platform_driver dwmac_generic_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = dwmac_generic_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = STMMAC_RESOURCE_NAME,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 7e3129e7f143..9d89bdbf029f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -42,7 +42,7 @@
#define NSS_COMMON_CLK_DIV_MASK 0x7f
#define NSS_COMMON_CLK_SRC_CTRL 0x14
-#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (1 << x)
+#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (x)
/* Mode is coded on 1 bit but is different depending on the MAC ID:
* MAC0: QSGMII=0 RGMII=1
* MAC1: QSGMII=0 SGMII=0 RGMII=1
@@ -248,23 +248,40 @@ static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
return NULL;
}
-static void *ipq806x_gmac_setup(struct platform_device *pdev)
+static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct ipq806x_gmac *gmac = priv;
+
+ ipq806x_gmac_set_speed(gmac, speed);
+}
+
+static int ipq806x_gmac_probe(struct platform_device *pdev)
{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
struct device *dev = &pdev->dev;
struct ipq806x_gmac *gmac;
int val;
void *err;
+ val = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (val)
+ return val;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
if (!gmac)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
gmac->pdev = pdev;
err = ipq806x_gmac_of_parse(gmac);
- if (err) {
+ if (IS_ERR(err)) {
dev_err(dev, "device tree parsing error\n");
- return err;
+ return PTR_ERR(err);
}
regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
@@ -285,13 +302,13 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
default:
dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
phy_modes(gmac->phy_mode));
- return NULL;
+ return -EINVAL;
}
regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
/* Configure the clock src according to the mode */
regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
- val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+ val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id));
switch (gmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
@@ -304,7 +321,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
default:
dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
phy_modes(gmac->phy_mode));
- return NULL;
+ return -EINVAL;
}
regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
@@ -327,30 +344,21 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET);
}
- return gmac;
-}
+ plat_dat->has_gmac = true;
+ plat_dat->bsp_priv = gmac;
+ plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
-static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
-{
- struct ipq806x_gmac *gmac = priv;
-
- ipq806x_gmac_set_speed(gmac, speed);
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
-static const struct stmmac_of_data ipq806x_gmac_data = {
- .has_gmac = 1,
- .setup = ipq806x_gmac_setup,
- .fix_mac_speed = ipq806x_gmac_fix_mac_speed,
-};
-
static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
- { .compatible = "qcom,ipq806x-gmac", .data = &ipq806x_gmac_data },
+ { .compatible = "qcom,ipq806x-gmac" },
{ }
};
MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match);
static struct platform_driver ipq806x_gmac_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = ipq806x_gmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "ipq806x-gmac-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index cb888d3ebbdc..78e9d1861896 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -25,66 +25,53 @@
# define LPC18XX_CREG_CREG6_ETHMODE_MII 0x0
# define LPC18XX_CREG_CREG6_ETHMODE_RMII 0x4
-struct lpc18xx_dwmac_priv_data {
+static int lpc18xx_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
struct regmap *reg;
- int interface;
-};
+ u8 ethmode;
+ int ret;
-static void *lpc18xx_dwmac_setup(struct platform_device *pdev)
-{
- struct lpc18xx_dwmac_priv_data *dwmac;
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
- dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac)
- return ERR_PTR(-ENOMEM);
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
- dwmac->interface = of_get_phy_mode(pdev->dev.of_node);
- if (dwmac->interface < 0)
- return ERR_PTR(dwmac->interface);
+ plat_dat->has_gmac = true;
- dwmac->reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
- if (IS_ERR(dwmac->reg)) {
- dev_err(&pdev->dev, "Syscon lookup failed\n");
- return dwmac->reg;
+ reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
+ if (IS_ERR(reg)) {
+ dev_err(&pdev->dev, "syscon lookup failed\n");
+ return PTR_ERR(reg);
}
- return dwmac;
-}
-
-static int lpc18xx_dwmac_init(struct platform_device *pdev, void *priv)
-{
- struct lpc18xx_dwmac_priv_data *dwmac = priv;
- u8 ethmode;
-
- if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
+ if (plat_dat->interface == PHY_INTERFACE_MODE_MII) {
ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII;
- } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
+ } else if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) {
ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
} else {
dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
return -EINVAL;
}
- regmap_update_bits(dwmac->reg, LPC18XX_CREG_CREG6,
+ regmap_update_bits(reg, LPC18XX_CREG_CREG6,
LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode);
- return 0;
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
-static const struct stmmac_of_data lpc18xx_dwmac_data = {
- .has_gmac = 1,
- .setup = lpc18xx_dwmac_setup,
- .init = lpc18xx_dwmac_init,
-};
-
static const struct of_device_id lpc18xx_dwmac_match[] = {
- { .compatible = "nxp,lpc1850-dwmac", .data = &lpc18xx_dwmac_data },
+ { .compatible = "nxp,lpc1850-dwmac" },
{ }
};
MODULE_DEVICE_TABLE(of, lpc18xx_dwmac_match);
static struct platform_driver lpc18xx_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = lpc18xx_dwmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "lpc18xx-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 61a324a87d09..c1bac1912b37 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -47,36 +47,45 @@ static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed)
writel(val, dwmac->reg);
}
-static void *meson6_dwmac_setup(struct platform_device *pdev)
+static int meson6_dwmac_probe(struct platform_device *pdev)
{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
struct meson_dwmac *dwmac;
struct resource *res;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
if (!dwmac)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
dwmac->reg = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dwmac->reg))
- return ERR_CAST(dwmac->reg);
+ return PTR_ERR(dwmac->reg);
- return dwmac;
-}
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed;
-static const struct stmmac_of_data meson6_dwmac_data = {
- .setup = meson6_dwmac_setup,
- .fix_mac_speed = meson6_dwmac_fix_mac_speed,
-};
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
static const struct of_device_id meson6_dwmac_match[] = {
- { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
+ { .compatible = "amlogic,meson6-dwmac" },
{ }
};
MODULE_DEVICE_TABLE(of, meson6_dwmac_match);
static struct platform_driver meson6_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = meson6_dwmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "meson6-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 00a1e1e09d4f..11baa4b19779 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -46,7 +46,7 @@ struct rk_priv_data {
struct platform_device *pdev;
int phy_iface;
struct regulator *regulator;
- struct rk_gmac_ops *ops;
+ const struct rk_gmac_ops *ops;
bool clk_enabled;
bool clock_input;
@@ -177,7 +177,7 @@ static void rk3288_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
}
}
-struct rk_gmac_ops rk3288_ops = {
+static const struct rk_gmac_ops rk3288_ops = {
.set_to_rgmii = rk3288_set_to_rgmii,
.set_to_rmii = rk3288_set_to_rmii,
.set_rgmii_speed = rk3288_set_rgmii_speed,
@@ -289,7 +289,7 @@ static void rk3368_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
}
}
-struct rk_gmac_ops rk3368_ops = {
+static const struct rk_gmac_ops rk3368_ops = {
.set_to_rgmii = rk3368_set_to_rgmii,
.set_to_rmii = rk3368_set_to_rmii,
.set_rgmii_speed = rk3368_set_rgmii_speed,
@@ -448,7 +448,7 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
}
static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
- struct rk_gmac_ops *ops)
+ const struct rk_gmac_ops *ops)
{
struct rk_priv_data *bsp_priv;
struct device *dev = &pdev->dev;
@@ -529,16 +529,6 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
return bsp_priv;
}
-static void *rk3288_gmac_setup(struct platform_device *pdev)
-{
- return rk_gmac_setup(pdev, &rk3288_ops);
-}
-
-static void *rk3368_gmac_setup(struct platform_device *pdev)
-{
- return rk_gmac_setup(pdev, &rk3368_ops);
-}
-
static int rk_gmac_init(struct platform_device *pdev, void *priv)
{
struct rk_priv_data *bsp_priv = priv;
@@ -576,31 +566,52 @@ static void rk_fix_speed(void *priv, unsigned int speed)
dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
}
-static const struct stmmac_of_data rk3288_gmac_data = {
- .has_gmac = 1,
- .fix_mac_speed = rk_fix_speed,
- .setup = rk3288_gmac_setup,
- .init = rk_gmac_init,
- .exit = rk_gmac_exit,
-};
+static int rk_gmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ const struct rk_gmac_ops *data;
+ int ret;
-static const struct stmmac_of_data rk3368_gmac_data = {
- .has_gmac = 1,
- .fix_mac_speed = rk_fix_speed,
- .setup = rk3368_gmac_setup,
- .init = rk_gmac_init,
- .exit = rk_gmac_exit,
-};
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "no of match data provided\n");
+ return -EINVAL;
+ }
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ plat_dat->has_gmac = true;
+ plat_dat->init = rk_gmac_init;
+ plat_dat->exit = rk_gmac_exit;
+ plat_dat->fix_mac_speed = rk_fix_speed;
+
+ plat_dat->bsp_priv = rk_gmac_setup(pdev, data);
+ if (IS_ERR(plat_dat->bsp_priv))
+ return PTR_ERR(plat_dat->bsp_priv);
+
+ ret = rk_gmac_init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
static const struct of_device_id rk_gmac_dwmac_match[] = {
- { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
- { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_gmac_data},
+ { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
+ { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
{ }
};
MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
static struct platform_driver rk_gmac_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = rk_gmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "rk_gmac-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 8141c5b844ae..401383b252a8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -175,31 +175,6 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
return 0;
}
-static void *socfpga_dwmac_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- int ret;
- struct socfpga_dwmac *dwmac;
-
- dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac)
- return ERR_PTR(-ENOMEM);
-
- ret = socfpga_dwmac_parse_data(dwmac, dev);
- if (ret) {
- dev_err(dev, "Unable to parse OF data\n");
- return ERR_PTR(ret);
- }
-
- ret = socfpga_dwmac_setup(dwmac);
- if (ret) {
- dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
- return ERR_PTR(ret);
- }
-
- return dwmac;
-}
-
static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv)
{
struct socfpga_dwmac *dwmac = priv;
@@ -257,21 +232,58 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
return ret;
}
-static const struct stmmac_of_data socfpga_gmac_data = {
- .setup = socfpga_dwmac_probe,
- .init = socfpga_dwmac_init,
- .exit = socfpga_dwmac_exit,
- .fix_mac_speed = socfpga_dwmac_fix_mac_speed,
-};
+static int socfpga_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ int ret;
+ struct socfpga_dwmac *dwmac;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return -ENOMEM;
+
+ ret = socfpga_dwmac_parse_data(dwmac, dev);
+ if (ret) {
+ dev_err(dev, "Unable to parse OF data\n");
+ return ret;
+ }
+
+ ret = socfpga_dwmac_setup(dwmac);
+ if (ret) {
+ dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
+ return ret;
+ }
+
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->init = socfpga_dwmac_init;
+ plat_dat->exit = socfpga_dwmac_exit;
+ plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
+
+ ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
static const struct of_device_id socfpga_dwmac_match[] = {
- { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
+ { .compatible = "altr,socfpga-stmmac" },
{ }
};
MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
static struct platform_driver socfpga_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = socfpga_dwmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "socfpga-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index a2e8111c5d14..7f6f4a4fcc70 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -21,6 +21,7 @@
#include <linux/regmap.h>
#include <linux/clk.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_net.h>
#include "stmmac_platform.h"
@@ -128,6 +129,11 @@ struct sti_dwmac {
struct device *dev;
struct regmap *regmap;
u32 speed;
+ void (*fix_retime_src)(void *priv, unsigned int speed);
+};
+
+struct sti_dwmac_of_data {
+ void (*fix_retime_src)(void *priv, unsigned int speed);
};
static u32 phy_intf_sels[] = {
@@ -222,8 +228,9 @@ static void stid127_fix_retime_src(void *priv, u32 spd)
regmap_update_bits(dwmac->regmap, reg, STID127_RETIME_SRC_MASK, val);
}
-static void sti_dwmac_ctrl_init(struct sti_dwmac *dwmac)
+static int sti_dwmac_init(struct platform_device *pdev, void *priv)
{
+ struct sti_dwmac *dwmac = priv;
struct regmap *regmap = dwmac->regmap;
int iface = dwmac->interface;
struct device *dev = dwmac->dev;
@@ -241,28 +248,8 @@ static void sti_dwmac_ctrl_init(struct sti_dwmac *dwmac)
val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
regmap_update_bits(regmap, reg, ENMII_MASK, val);
-}
-
-static int stix4xx_init(struct platform_device *pdev, void *priv)
-{
- struct sti_dwmac *dwmac = priv;
- u32 spd = dwmac->speed;
-
- sti_dwmac_ctrl_init(dwmac);
-
- stih4xx_fix_retime_src(priv, spd);
-
- return 0;
-}
-static int stid127_init(struct platform_device *pdev, void *priv)
-{
- struct sti_dwmac *dwmac = priv;
- u32 spd = dwmac->speed;
-
- sti_dwmac_ctrl_init(dwmac);
-
- stid127_fix_retime_src(priv, spd);
+ dwmac->fix_retime_src(priv, dwmac->speed);
return 0;
}
@@ -334,36 +321,58 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
return 0;
}
-static void *sti_dwmac_setup(struct platform_device *pdev)
+static int sti_dwmac_probe(struct platform_device *pdev)
{
+ struct plat_stmmacenet_data *plat_dat;
+ const struct sti_dwmac_of_data *data;
+ struct stmmac_resources stmmac_res;
struct sti_dwmac *dwmac;
int ret;
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "No OF match data provided\n");
+ return -EINVAL;
+ }
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
if (!dwmac)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
ret = sti_dwmac_parse_data(dwmac, pdev);
if (ret) {
dev_err(&pdev->dev, "Unable to parse OF data\n");
- return ERR_PTR(ret);
+ return ret;
}
- return dwmac;
+ dwmac->fix_retime_src = data->fix_retime_src;
+
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->init = sti_dwmac_init;
+ plat_dat->exit = sti_dwmac_exit;
+ plat_dat->fix_mac_speed = data->fix_retime_src;
+
+ ret = sti_dwmac_init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
-static const struct stmmac_of_data stih4xx_dwmac_data = {
- .fix_mac_speed = stih4xx_fix_retime_src,
- .setup = sti_dwmac_setup,
- .init = stix4xx_init,
- .exit = sti_dwmac_exit,
+static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
+ .fix_retime_src = stih4xx_fix_retime_src,
};
-static const struct stmmac_of_data stid127_dwmac_data = {
- .fix_mac_speed = stid127_fix_retime_src,
- .setup = sti_dwmac_setup,
- .init = stid127_init,
- .exit = sti_dwmac_exit,
+static const struct sti_dwmac_of_data stid127_dwmac_data = {
+ .fix_retime_src = stid127_fix_retime_src,
};
static const struct of_device_id sti_dwmac_match[] = {
@@ -376,7 +385,7 @@ static const struct of_device_id sti_dwmac_match[] = {
MODULE_DEVICE_TABLE(of, sti_dwmac_match);
static struct platform_driver sti_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = sti_dwmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "sti-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 15048ca39759..52b8ed9bd87c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -33,35 +33,6 @@ struct sunxi_priv_data {
struct regulator *regulator;
};
-static void *sun7i_gmac_setup(struct platform_device *pdev)
-{
- struct sunxi_priv_data *gmac;
- struct device *dev = &pdev->dev;
-
- gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
- if (!gmac)
- return ERR_PTR(-ENOMEM);
-
- gmac->interface = of_get_phy_mode(dev->of_node);
-
- gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
- if (IS_ERR(gmac->tx_clk)) {
- dev_err(dev, "could not get tx clock\n");
- return gmac->tx_clk;
- }
-
- /* Optional regulator for PHY */
- gmac->regulator = devm_regulator_get_optional(dev, "phy");
- if (IS_ERR(gmac->regulator)) {
- if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
- return ERR_PTR(-EPROBE_DEFER);
- dev_info(dev, "no regulator found\n");
- gmac->regulator = NULL;
- }
-
- return gmac;
-}
-
#define SUN7I_GMAC_GMII_RGMII_RATE 125000000
#define SUN7I_GMAC_MII_RATE 25000000
@@ -132,25 +103,67 @@ static void sun7i_fix_speed(void *priv, unsigned int speed)
}
}
-/* of_data specifying hardware features and callbacks.
- * hardware features were copied from Allwinner drivers. */
-static const struct stmmac_of_data sun7i_gmac_data = {
- .has_gmac = 1,
- .tx_coe = 1,
- .fix_mac_speed = sun7i_fix_speed,
- .setup = sun7i_gmac_setup,
- .init = sun7i_gmac_init,
- .exit = sun7i_gmac_exit,
-};
+static int sun7i_gmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct sunxi_priv_data *gmac;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac)
+ return -ENOMEM;
+
+ gmac->interface = of_get_phy_mode(dev->of_node);
+
+ gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
+ if (IS_ERR(gmac->tx_clk)) {
+ dev_err(dev, "could not get tx clock\n");
+ return PTR_ERR(gmac->tx_clk);
+ }
+
+ /* Optional regulator for PHY */
+ gmac->regulator = devm_regulator_get_optional(dev, "phy");
+ if (IS_ERR(gmac->regulator)) {
+ if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no regulator found\n");
+ gmac->regulator = NULL;
+ }
+
+ /* platform data specifying hardware features and callbacks.
+ * hardware features were copied from Allwinner drivers. */
+ plat_dat->tx_coe = 1;
+ plat_dat->has_gmac = true;
+ plat_dat->bsp_priv = gmac;
+ plat_dat->init = sun7i_gmac_init;
+ plat_dat->exit = sun7i_gmac_exit;
+ plat_dat->fix_mac_speed = sun7i_fix_speed;
+
+ ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
static const struct of_device_id sun7i_dwmac_match[] = {
- { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
+ { .compatible = "allwinner,sun7i-a20-gmac" },
{ }
};
MODULE_DEVICE_TABLE(of, sun7i_dwmac_match);
static struct platform_driver sun7i_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = sun7i_gmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "sun7i-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 50f7a7a26821..925f2f8659b8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -837,8 +837,11 @@ static int stmmac_init_phy(struct net_device *dev)
interface);
}
- if (IS_ERR(phydev)) {
+ if (IS_ERR_OR_NULL(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
+ if (!phydev)
+ return -ENODEV;
+
return PTR_ERR(phydev);
}
@@ -2843,7 +2846,7 @@ int stmmac_dvr_probe(struct device *device,
if (res->mac)
memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
- dev_set_drvdata(device, priv);
+ dev_set_drvdata(device, priv->dev);
/* Verify driver arguments */
stmmac_verify_args();
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index f3918c7e7eeb..d02691ba3d7f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -104,32 +104,16 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
* this function is to read the driver parameters from device-tree and
* set some private fields that will be used by the main at runtime.
*/
-static int stmmac_probe_config_dt(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat,
- const char **mac)
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
{
struct device_node *np = pdev->dev.of_node;
+ struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg;
- const struct of_device_id *device;
- struct device *dev = &pdev->dev;
-
- device = of_match_device(dev->driver->of_match_table, dev);
- if (device->data) {
- const struct stmmac_of_data *data = device->data;
- plat->has_gmac = data->has_gmac;
- plat->enh_desc = data->enh_desc;
- plat->tx_coe = data->tx_coe;
- plat->rx_coe = data->rx_coe;
- plat->bugged_jumbo = data->bugged_jumbo;
- plat->pmt = data->pmt;
- plat->riwt_off = data->riwt_off;
- plat->fix_mac_speed = data->fix_mac_speed;
- plat->bus_setup = data->bus_setup;
- plat->setup = data->setup;
- plat->free = data->free;
- plat->init = data->init;
- plat->exit = data->exit;
- }
+
+ plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return ERR_PTR(-ENOMEM);
*mac = of_get_mac_address(np);
plat->interface = of_get_phy_mode(np);
@@ -151,7 +135,7 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
/* If phy-handle is not specified, check if we have a fixed-phy */
if (!plat->phy_node && of_phy_is_fixed_link(np)) {
if ((of_phy_register_fixed_link(np) < 0))
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
plat->phy_node = of_node_get(np);
}
@@ -182,6 +166,12 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
*/
plat->maxmtu = JUMBO_LEN;
+ /* Set default value for multicast hash bins */
+ plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat->unicast_filter_entries = 1;
+
/*
* Currently only the properties needed on SPEAr600
* are provided. All other properties should be added
@@ -222,7 +212,7 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
GFP_KERNEL);
if (!dma_cfg) {
of_node_put(np);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
plat->dma_cfg = dma_cfg;
of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
@@ -240,44 +230,34 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
}
- return 0;
+ return plat;
}
#else
-static int stmmac_probe_config_dt(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat,
- const char **mac)
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
{
- return -ENOSYS;
+ return ERR_PTR(-ENOSYS);
}
#endif /* CONFIG_OF */
+EXPORT_SYMBOL_GPL(stmmac_probe_config_dt);
-/**
- * stmmac_pltfr_probe - platform driver probe.
- * @pdev: platform device pointer
- * Description: platform_device probe function. It is to allocate
- * the necessary platform resources, invoke custom helper (if required) and
- * invoke the main probe function.
- */
-int stmmac_pltfr_probe(struct platform_device *pdev)
+int stmmac_get_platform_resources(struct platform_device *pdev,
+ struct stmmac_resources *stmmac_res)
{
- struct stmmac_resources stmmac_res;
- int ret = 0;
struct resource *res;
- struct device *dev = &pdev->dev;
- struct plat_stmmacenet_data *plat_dat = NULL;
- memset(&stmmac_res, 0, sizeof(stmmac_res));
+ memset(stmmac_res, 0, sizeof(*stmmac_res));
/* Get IRQ information early to have an ability to ask for deferred
* probe if needed before we went too far with resource allocation.
*/
- stmmac_res.irq = platform_get_irq_byname(pdev, "macirq");
- if (stmmac_res.irq < 0) {
- if (stmmac_res.irq != -EPROBE_DEFER) {
- dev_err(dev,
+ stmmac_res->irq = platform_get_irq_byname(pdev, "macirq");
+ if (stmmac_res->irq < 0) {
+ if (stmmac_res->irq != -EPROBE_DEFER) {
+ dev_err(&pdev->dev,
"MAC IRQ configuration information not found\n");
}
- return stmmac_res.irq;
+ return stmmac_res->irq;
}
/* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
@@ -287,64 +267,23 @@ int stmmac_pltfr_probe(struct platform_device *pdev)
* In case the wake up interrupt is not passed from the platform
* so the driver will continue to use the mac irq (ndev->irq)
*/
- stmmac_res.wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
- if (stmmac_res.wol_irq < 0) {
- if (stmmac_res.wol_irq == -EPROBE_DEFER)
+ stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+ if (stmmac_res->wol_irq < 0) {
+ if (stmmac_res->wol_irq == -EPROBE_DEFER)
return -EPROBE_DEFER;
- stmmac_res.wol_irq = stmmac_res.irq;
+ stmmac_res->wol_irq = stmmac_res->irq;
}
- stmmac_res.lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
- if (stmmac_res.lpi_irq == -EPROBE_DEFER)
+ stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+ if (stmmac_res->lpi_irq == -EPROBE_DEFER)
return -EPROBE_DEFER;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- stmmac_res.addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(stmmac_res.addr))
- return PTR_ERR(stmmac_res.addr);
-
- plat_dat = dev_get_platdata(&pdev->dev);
-
- if (!plat_dat)
- plat_dat = devm_kzalloc(&pdev->dev,
- sizeof(struct plat_stmmacenet_data),
- GFP_KERNEL);
- if (!plat_dat) {
- pr_err("%s: ERROR: no memory", __func__);
- return -ENOMEM;
- }
-
- /* Set default value for multicast hash bins */
- plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
+ stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res);
- /* Set default value for unicast filter entries */
- plat_dat->unicast_filter_entries = 1;
-
- if (pdev->dev.of_node) {
- ret = stmmac_probe_config_dt(pdev, plat_dat, &stmmac_res.mac);
- if (ret) {
- pr_err("%s: main dt probe failed", __func__);
- return ret;
- }
- }
-
- /* Custom setup (if needed) */
- if (plat_dat->setup) {
- plat_dat->bsp_priv = plat_dat->setup(pdev);
- if (IS_ERR(plat_dat->bsp_priv))
- return PTR_ERR(plat_dat->bsp_priv);
- }
-
- /* Custom initialisation (if needed)*/
- if (plat_dat->init) {
- ret = plat_dat->init(pdev, plat_dat->bsp_priv);
- if (unlikely(ret))
- return ret;
- }
-
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ return PTR_ERR_OR_ZERO(stmmac_res->addr);
}
-EXPORT_SYMBOL_GPL(stmmac_pltfr_probe);
+EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
/**
* stmmac_pltfr_remove
@@ -361,9 +300,6 @@ int stmmac_pltfr_remove(struct platform_device *pdev)
if (priv->plat->exit)
priv->plat->exit(pdev, priv->plat->bsp_priv);
- if (priv->plat->free)
- priv->plat->free(pdev, priv->plat->bsp_priv);
-
return ret;
}
EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
@@ -413,3 +349,7 @@ static int stmmac_pltfr_resume(struct device *dev)
SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
stmmac_pltfr_resume);
EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 71da86d7bd00..ffeb8d9e2b2e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -19,7 +19,14 @@
#ifndef __STMMAC_PLATFORM_H__
#define __STMMAC_PLATFORM_H__
-int stmmac_pltfr_probe(struct platform_device *pdev);
+#include "stmmac.h"
+
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac);
+
+int stmmac_get_platform_resources(struct platform_device *pdev,
+ struct stmmac_resources *stmmac_res);
+
int stmmac_pltfr_remove(struct platform_device *pdev);
extern const struct dev_pm_ops stmmac_pltfr_pm_ops;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 0c5842aeb807..ab6051a43134 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6658,10 +6658,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
struct sk_buff *skb_new;
skb_new = skb_realloc_headroom(skb, len);
- if (!skb_new) {
- rp->tx_errors++;
+ if (!skb_new)
goto out_drop;
- }
kfree_skb(skb);
skb = skb_new;
} else
diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig
new file mode 100644
index 000000000000..8276ee5a7d54
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/Kconfig
@@ -0,0 +1,27 @@
+#
+# Synopsys network device configuration
+#
+
+config NET_VENDOR_SYNOPSYS
+ bool "Synopsys devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) device belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Synopsys devices. If you say Y, you will be asked
+ for your specific device in the following questions.
+
+if NET_VENDOR_SYNOPSYS
+
+config SYNOPSYS_DWC_ETH_QOS
+ tristate "Sypnopsys DWC Ethernet QOS v4.10a support"
+ select PHYLIB
+ select CRC32
+ select MII
+ depends on OF && HAS_DMA
+ ---help---
+ This driver supports the DWC Ethernet QoS from Synopsys
+
+endif # NET_VENDOR_SYNOPSYS
diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile
new file mode 100644
index 000000000000..7a375723fc18
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Synopsys network device drivers.
+#
+
+obj-$(CONFIG_SYNOPSYS_DWC_ETH_QOS) += dwc_eth_qos.o
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
new file mode 100644
index 000000000000..85b3326775b8
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -0,0 +1,3019 @@
+/* Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver
+ *
+ * This is a driver for the Synopsys DWC Ethernet QoS IP version 4.10a (GMAC).
+ * This version introduced a lot of changes which breaks backwards
+ * compatibility the non-QoS IP from Synopsys (used in the ST Micro drivers).
+ * Some fields differ between version 4.00a and 4.10a, mainly the interrupt
+ * bit fields. The driver could be made compatible with 4.00, if all relevant
+ * HW erratas are handled.
+ *
+ * The GMAC is highly configurable at synthesis time. This driver has been
+ * developed for a subset of the total available feature set. Currently
+ * it supports:
+ * - TSO
+ * - Checksum offload for RX and TX.
+ * - Energy efficient ethernet.
+ * - GMII phy interface.
+ * - The statistics module.
+ * - Single RX and TX queue.
+ *
+ * Copyright (C) 2015 Axis Communications AB.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ethtool.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+
+#include <linux/phy.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+
+#include <linux/device.h>
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
+
+#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/timer.h>
+#include <linux/tcp.h>
+
+#define DRIVER_NAME "dwceqos"
+#define DRIVER_DESCRIPTION "Synopsys DWC Ethernet QoS driver"
+#define DRIVER_VERSION "0.9"
+
+#define DWCEQOS_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+
+#define DWCEQOS_TX_TIMEOUT 5 /* Seconds */
+
+#define DWCEQOS_LPI_TIMER_MIN 8
+#define DWCEQOS_LPI_TIMER_MAX ((1 << 20) - 1)
+
+#define DWCEQOS_RX_BUF_SIZE 2048
+
+#define DWCEQOS_RX_DCNT 256
+#define DWCEQOS_TX_DCNT 256
+
+#define DWCEQOS_HASH_TABLE_SIZE 64
+
+/* The size field in the DMA descriptor is 14 bits */
+#define BYTES_PER_DMA_DESC 16376
+
+/* Hardware registers */
+#define START_MAC_REG_OFFSET 0x0000
+#define MAX_MAC_REG_OFFSET 0x0bd0
+#define START_MTL_REG_OFFSET 0x0c00
+#define MAX_MTL_REG_OFFSET 0x0d7c
+#define START_DMA_REG_OFFSET 0x1000
+#define MAX_DMA_REG_OFFSET 0x117C
+
+#define REG_SPACE_SIZE 0x1800
+
+/* DMA */
+#define REG_DWCEQOS_DMA_MODE 0x1000
+#define REG_DWCEQOS_DMA_SYSBUS_MODE 0x1004
+#define REG_DWCEQOS_DMA_IS 0x1008
+#define REG_DWCEQOS_DMA_DEBUG_ST0 0x100c
+
+/* DMA channel registers */
+#define REG_DWCEQOS_DMA_CH0_CTRL 0x1100
+#define REG_DWCEQOS_DMA_CH0_TX_CTRL 0x1104
+#define REG_DWCEQOS_DMA_CH0_RX_CTRL 0x1108
+#define REG_DWCEQOS_DMA_CH0_TXDESC_LIST 0x1114
+#define REG_DWCEQOS_DMA_CH0_RXDESC_LIST 0x111c
+#define REG_DWCEQOS_DMA_CH0_TXDESC_TAIL 0x1120
+#define REG_DWCEQOS_DMA_CH0_RXDESC_TAIL 0x1128
+#define REG_DWCEQOS_DMA_CH0_TXDESC_LEN 0x112c
+#define REG_DWCEQOS_DMA_CH0_RXDESC_LEN 0x1130
+#define REG_DWCEQOS_DMA_CH0_IE 0x1134
+#define REG_DWCEQOS_DMA_CH0_CUR_TXDESC 0x1144
+#define REG_DWCEQOS_DMA_CH0_CUR_RXDESC 0x114c
+#define REG_DWCEQOS_DMA_CH0_CUR_TXBUF 0x1154
+#define REG_DWCEQOS_DMA_CH0_CUR_RXBUG 0x115c
+#define REG_DWCEQOS_DMA_CH0_STA 0x1160
+
+#define DWCEQOS_DMA_MODE_TXPR BIT(11)
+#define DWCEQOS_DMA_MODE_DA BIT(1)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_EN_LPI BIT(31)
+#define DWCEQOS_DMA_SYSBUS_MODE_FB BIT(0)
+#define DWCEQOS_DMA_SYSBUS_MODE_AAL BIT(12)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(x) \
+ (((x) << 16) & 0x000F0000)
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT 3
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_MASK GENMASK(19, 16)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(x) \
+ (((x) << 24) & 0x0F000000)
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT 3
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_MASK GENMASK(27, 24)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK GENMASK(7, 1)
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST(x) \
+ (((x) << 1) & DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK)
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT GENMASK(3, 1)
+
+#define DWCEQOS_DMA_CH_CTRL_PBLX8 BIT(16)
+#define DWCEQOS_DMA_CH_CTRL_DSL(x) ((x) << 18)
+
+#define DWCEQOS_DMA_CH_CTRL_PBL(x) ((x) << 16)
+#define DWCEQOS_DMA_CH_CTRL_START BIT(0)
+#define DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(x) ((x) << 1)
+#define DWCEQOS_DMA_CH_TX_OSP BIT(4)
+#define DWCEQOS_DMA_CH_TX_TSE BIT(12)
+
+#define DWCEQOS_DMA_CH0_IE_NIE BIT(15)
+#define DWCEQOS_DMA_CH0_IE_AIE BIT(14)
+#define DWCEQOS_DMA_CH0_IE_RIE BIT(6)
+#define DWCEQOS_DMA_CH0_IE_TIE BIT(0)
+#define DWCEQOS_DMA_CH0_IE_FBEE BIT(12)
+#define DWCEQOS_DMA_CH0_IE_RBUE BIT(7)
+
+#define DWCEQOS_DMA_IS_DC0IS BIT(0)
+#define DWCEQOS_DMA_IS_MTLIS BIT(16)
+#define DWCEQOS_DMA_IS_MACIS BIT(17)
+
+#define DWCEQOS_DMA_CH0_IS_TI BIT(0)
+#define DWCEQOS_DMA_CH0_IS_RI BIT(6)
+#define DWCEQOS_DMA_CH0_IS_RBU BIT(7)
+#define DWCEQOS_DMA_CH0_IS_FBE BIT(12)
+#define DWCEQOS_DMA_CH0_IS_CDE BIT(13)
+#define DWCEQOS_DMA_CH0_IS_AIS BIT(14)
+
+#define DWCEQOS_DMA_CH0_IS_TEB GENMASK(18, 16)
+#define DWCEQOS_DMA_CH0_IS_TX_ERR_READ BIT(16)
+#define DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR BIT(17)
+
+#define DWCEQOS_DMA_CH0_IS_REB GENMASK(21, 19)
+#define DWCEQOS_DMA_CH0_IS_RX_ERR_READ BIT(19)
+#define DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR BIT(20)
+
+/* DMA descriptor bits for RX normal descriptor (read format) */
+#define DWCEQOS_DMA_RDES3_OWN BIT(31)
+#define DWCEQOS_DMA_RDES3_INTE BIT(30)
+#define DWCEQOS_DMA_RDES3_BUF2V BIT(25)
+#define DWCEQOS_DMA_RDES3_BUF1V BIT(24)
+
+/* DMA descriptor bits for RX normal descriptor (write back format) */
+#define DWCEQOS_DMA_RDES1_IPCE BIT(7)
+#define DWCEQOS_DMA_RDES3_ES BIT(15)
+#define DWCEQOS_DMA_RDES3_E_JT BIT(14)
+#define DWCEQOS_DMA_RDES3_PL(x) ((x) & 0x7fff)
+#define DWCEQOS_DMA_RDES1_PT 0x00000007
+#define DWCEQOS_DMA_RDES1_PT_UDP BIT(0)
+#define DWCEQOS_DMA_RDES1_PT_TCP BIT(1)
+#define DWCEQOS_DMA_RDES1_PT_ICMP 0x00000003
+
+/* DMA descriptor bits for TX normal descriptor (read format) */
+#define DWCEQOS_DMA_TDES2_IOC BIT(31)
+#define DWCEQOS_DMA_TDES3_OWN BIT(31)
+#define DWCEQOS_DMA_TDES3_CTXT BIT(30)
+#define DWCEQOS_DMA_TDES3_FD BIT(29)
+#define DWCEQOS_DMA_TDES3_LD BIT(28)
+#define DWCEQOS_DMA_TDES3_CIPH BIT(16)
+#define DWCEQOS_DMA_TDES3_CIPP BIT(17)
+#define DWCEQOS_DMA_TDES3_CA 0x00030000
+#define DWCEQOS_DMA_TDES3_TSE BIT(18)
+#define DWCEQOS_DMA_DES3_THL(x) ((x) << 19)
+#define DWCEQOS_DMA_DES2_B2L(x) ((x) << 16)
+
+#define DWCEQOS_DMA_TDES3_TCMSSV BIT(26)
+
+/* DMA channel states */
+#define DMA_TX_CH_STOPPED 0
+#define DMA_TX_CH_SUSPENDED 6
+
+#define DMA_GET_TX_STATE_CH0(status0) ((status0 & 0xF000) >> 12)
+
+/* MTL */
+#define REG_DWCEQOS_MTL_OPER 0x0c00
+#define REG_DWCEQOS_MTL_DEBUG_ST 0x0c0c
+#define REG_DWCEQOS_MTL_TXQ0_DEBUG_ST 0x0d08
+#define REG_DWCEQOS_MTL_RXQ0_DEBUG_ST 0x0d38
+
+#define REG_DWCEQOS_MTL_IS 0x0c20
+#define REG_DWCEQOS_MTL_TXQ0_OPER 0x0d00
+#define REG_DWCEQOS_MTL_RXQ0_OPER 0x0d30
+#define REG_DWCEQOS_MTL_RXQ0_MIS_CNT 0x0d34
+#define REG_DWCEQOS_MTL_RXQ0_CTRL 0x0d3c
+
+#define REG_DWCEQOS_MTL_Q0_ISCTRL 0x0d2c
+
+#define DWCEQOS_MTL_SCHALG_STRICT 0x00000060
+
+#define DWCEQOS_MTL_TXQ_TXQEN BIT(3)
+#define DWCEQOS_MTL_TXQ_TSF BIT(1)
+#define DWCEQOS_MTL_TXQ_FTQ BIT(0)
+#define DWCEQOS_MTL_TXQ_TTC512 0x00000070
+
+#define DWCEQOS_MTL_TXQ_SIZE(x) ((((x) - 256) & 0xff00) << 8)
+
+#define DWCEQOS_MTL_RXQ_SIZE(x) ((((x) - 256) & 0xff00) << 12)
+#define DWCEQOS_MTL_RXQ_EHFC BIT(7)
+#define DWCEQOS_MTL_RXQ_DIS_TCP_EF BIT(6)
+#define DWCEQOS_MTL_RXQ_FEP BIT(4)
+#define DWCEQOS_MTL_RXQ_FUP BIT(3)
+#define DWCEQOS_MTL_RXQ_RSF BIT(5)
+#define DWCEQOS_MTL_RXQ_RTC32 BIT(0)
+
+/* MAC */
+#define REG_DWCEQOS_MAC_CFG 0x0000
+#define REG_DWCEQOS_MAC_EXT_CFG 0x0004
+#define REG_DWCEQOS_MAC_PKT_FILT 0x0008
+#define REG_DWCEQOS_MAC_WD_TO 0x000c
+#define REG_DWCEQOS_HASTABLE_LO 0x0010
+#define REG_DWCEQOS_HASTABLE_HI 0x0014
+#define REG_DWCEQOS_MAC_IS 0x00b0
+#define REG_DWCEQOS_MAC_IE 0x00b4
+#define REG_DWCEQOS_MAC_STAT 0x00b8
+#define REG_DWCEQOS_MAC_MDIO_ADDR 0x0200
+#define REG_DWCEQOS_MAC_MDIO_DATA 0x0204
+#define REG_DWCEQOS_MAC_MAC_ADDR0_HI 0x0300
+#define REG_DWCEQOS_MAC_MAC_ADDR0_LO 0x0304
+#define REG_DWCEQOS_MAC_RXQ0_CTRL0 0x00a0
+#define REG_DWCEQOS_MAC_HW_FEATURE0 0x011c
+#define REG_DWCEQOS_MAC_HW_FEATURE1 0x0120
+#define REG_DWCEQOS_MAC_HW_FEATURE2 0x0124
+#define REG_DWCEQOS_MAC_HASHTABLE_LO 0x0010
+#define REG_DWCEQOS_MAC_HASHTABLE_HI 0x0014
+#define REG_DWCEQOS_MAC_LPI_CTRL_STATUS 0x00d0
+#define REG_DWCEQOS_MAC_LPI_TIMERS_CTRL 0x00d4
+#define REG_DWCEQOS_MAC_LPI_ENTRY_TIMER 0x00d8
+#define REG_DWCEQOS_MAC_1US_TIC_COUNTER 0x00dc
+#define REG_DWCEQOS_MAC_RX_FLOW_CTRL 0x0090
+#define REG_DWCEQOS_MAC_Q0_TX_FLOW 0x0070
+
+#define DWCEQOS_MAC_CFG_ACS BIT(20)
+#define DWCEQOS_MAC_CFG_JD BIT(17)
+#define DWCEQOS_MAC_CFG_JE BIT(16)
+#define DWCEQOS_MAC_CFG_PS BIT(15)
+#define DWCEQOS_MAC_CFG_FES BIT(14)
+#define DWCEQOS_MAC_CFG_DM BIT(13)
+#define DWCEQOS_MAC_CFG_DO BIT(10)
+#define DWCEQOS_MAC_CFG_TE BIT(1)
+#define DWCEQOS_MAC_CFG_IPC BIT(27)
+#define DWCEQOS_MAC_CFG_RE BIT(0)
+
+#define DWCEQOS_ADDR_HIGH(reg) (0x00000300 + (reg * 8))
+#define DWCEQOS_ADDR_LOW(reg) (0x00000304 + (reg * 8))
+
+#define DWCEQOS_MAC_IS_LPI_INT BIT(5)
+#define DWCEQOS_MAC_IS_MMC_INT BIT(8)
+
+#define DWCEQOS_MAC_RXQ_EN BIT(1)
+#define DWCEQOS_MAC_MAC_ADDR_HI_EN BIT(31)
+#define DWCEQOS_MAC_PKT_FILT_RA BIT(31)
+#define DWCEQOS_MAC_PKT_FILT_HPF BIT(10)
+#define DWCEQOS_MAC_PKT_FILT_SAF BIT(9)
+#define DWCEQOS_MAC_PKT_FILT_SAIF BIT(8)
+#define DWCEQOS_MAC_PKT_FILT_DBF BIT(5)
+#define DWCEQOS_MAC_PKT_FILT_PM BIT(4)
+#define DWCEQOS_MAC_PKT_FILT_DAIF BIT(3)
+#define DWCEQOS_MAC_PKT_FILT_HMC BIT(2)
+#define DWCEQOS_MAC_PKT_FILT_HUC BIT(1)
+#define DWCEQOS_MAC_PKT_FILT_PR BIT(0)
+
+#define DWCEQOS_MAC_MDIO_ADDR_CR(x) (((x & 15)) << 8)
+#define DWCEQOS_MAC_MDIO_ADDR_CR_20 2
+#define DWCEQOS_MAC_MDIO_ADDR_CR_35 3
+#define DWCEQOS_MAC_MDIO_ADDR_CR_60 0
+#define DWCEQOS_MAC_MDIO_ADDR_CR_100 1
+#define DWCEQOS_MAC_MDIO_ADDR_CR_150 4
+#define DWCEQOS_MAC_MDIO_ADDR_CR_250 5
+#define DWCEQOS_MAC_MDIO_ADDR_GOC_READ 0x0000000c
+#define DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE BIT(2)
+#define DWCEQOS_MAC_MDIO_ADDR_GB BIT(0)
+
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEN BIT(0)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEX BIT(1)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEN BIT(2)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEX BIT(3)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST BIT(8)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST BIT(9)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN BIT(16)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLS BIT(17)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLSEN BIT(18)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA BIT(19)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE BIT(20)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE BIT(21)
+
+#define DWCEQOS_MAC_1US_TIC_COUNTER_VAL(x) ((x) & GENMASK(11, 0))
+
+#define DWCEQOS_LPI_CTRL_ENABLE_EEE (DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE | \
+ DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA | \
+ DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN)
+
+#define DWCEQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
+
+#define DWCEQOS_MAC_Q0_TX_FLOW_TFE BIT(1)
+#define DWCEQOS_MAC_Q0_TX_FLOW_PT(time) ((time) << 16)
+#define DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS (0 << 4)
+
+/* Features */
+#define DWCEQOS_MAC_HW_FEATURE0_RXCOESEL BIT(16)
+#define DWCEQOS_MAC_HW_FEATURE0_TXCOESEL BIT(14)
+#define DWCEQOS_MAC_HW_FEATURE0_HDSEL BIT(2)
+#define DWCEQOS_MAC_HW_FEATURE0_EEESEL BIT(13)
+#define DWCEQOS_MAC_HW_FEATURE0_GMIISEL BIT(1)
+#define DWCEQOS_MAC_HW_FEATURE0_MIISEL BIT(0)
+
+#define DWCEQOS_MAC_HW_FEATURE1_TSOEN BIT(18)
+#define DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(x) ((128 << ((x) & 0x7c0)) >> 6)
+#define DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(x) (128 << ((x) & 0x1f))
+
+#define DWCEQOS_MAX_PERFECT_ADDRESSES(feature1) \
+ (1 + (((feature1) & 0x1fc0000) >> 18))
+
+#define DWCEQOS_MDIO_PHYADDR(x) (((x) & 0x1f) << 21)
+#define DWCEQOS_MDIO_PHYREG(x) (((x) & 0x1f) << 16)
+
+#define DWCEQOS_DMA_MODE_SWR BIT(0)
+
+#define DWCEQOS_DWCEQOS_RX_BUF_SIZE 2048
+
+/* Mac Management Counters */
+#define REG_DWCEQOS_MMC_CTRL 0x0700
+#define REG_DWCEQOS_MMC_RXIRQ 0x0704
+#define REG_DWCEQOS_MMC_TXIRQ 0x0708
+#define REG_DWCEQOS_MMC_RXIRQMASK 0x070c
+#define REG_DWCEQOS_MMC_TXIRQMASK 0x0710
+
+#define DWCEQOS_MMC_CTRL_CNTRST BIT(0)
+#define DWCEQOS_MMC_CTRL_RSTONRD BIT(2)
+
+#define DWC_MMC_TXLPITRANSCNTR 0x07F0
+#define DWC_MMC_TXLPIUSCNTR 0x07EC
+#define DWC_MMC_TXOVERSIZE_G 0x0778
+#define DWC_MMC_TXVLANPACKETS_G 0x0774
+#define DWC_MMC_TXPAUSEPACKETS 0x0770
+#define DWC_MMC_TXEXCESSDEF 0x076C
+#define DWC_MMC_TXPACKETCOUNT_G 0x0768
+#define DWC_MMC_TXOCTETCOUNT_G 0x0764
+#define DWC_MMC_TXCARRIERERROR 0x0760
+#define DWC_MMC_TXEXCESSCOL 0x075C
+#define DWC_MMC_TXLATECOL 0x0758
+#define DWC_MMC_TXDEFERRED 0x0754
+#define DWC_MMC_TXMULTICOL_G 0x0750
+#define DWC_MMC_TXSINGLECOL_G 0x074C
+#define DWC_MMC_TXUNDERFLOWERROR 0x0748
+#define DWC_MMC_TXBROADCASTPACKETS_GB 0x0744
+#define DWC_MMC_TXMULTICASTPACKETS_GB 0x0740
+#define DWC_MMC_TXUNICASTPACKETS_GB 0x073C
+#define DWC_MMC_TX1024TOMAXOCTETS_GB 0x0738
+#define DWC_MMC_TX512TO1023OCTETS_GB 0x0734
+#define DWC_MMC_TX256TO511OCTETS_GB 0x0730
+#define DWC_MMC_TX128TO255OCTETS_GB 0x072C
+#define DWC_MMC_TX65TO127OCTETS_GB 0x0728
+#define DWC_MMC_TX64OCTETS_GB 0x0724
+#define DWC_MMC_TXMULTICASTPACKETS_G 0x0720
+#define DWC_MMC_TXBROADCASTPACKETS_G 0x071C
+#define DWC_MMC_TXPACKETCOUNT_GB 0x0718
+#define DWC_MMC_TXOCTETCOUNT_GB 0x0714
+
+#define DWC_MMC_RXLPITRANSCNTR 0x07F8
+#define DWC_MMC_RXLPIUSCNTR 0x07F4
+#define DWC_MMC_RXCTRLPACKETS_G 0x07E4
+#define DWC_MMC_RXRCVERROR 0x07E0
+#define DWC_MMC_RXWATCHDOG 0x07DC
+#define DWC_MMC_RXVLANPACKETS_GB 0x07D8
+#define DWC_MMC_RXFIFOOVERFLOW 0x07D4
+#define DWC_MMC_RXPAUSEPACKETS 0x07D0
+#define DWC_MMC_RXOUTOFRANGETYPE 0x07CC
+#define DWC_MMC_RXLENGTHERROR 0x07C8
+#define DWC_MMC_RXUNICASTPACKETS_G 0x07C4
+#define DWC_MMC_RX1024TOMAXOCTETS_GB 0x07C0
+#define DWC_MMC_RX512TO1023OCTETS_GB 0x07BC
+#define DWC_MMC_RX256TO511OCTETS_GB 0x07B8
+#define DWC_MMC_RX128TO255OCTETS_GB 0x07B4
+#define DWC_MMC_RX65TO127OCTETS_GB 0x07B0
+#define DWC_MMC_RX64OCTETS_GB 0x07AC
+#define DWC_MMC_RXOVERSIZE_G 0x07A8
+#define DWC_MMC_RXUNDERSIZE_G 0x07A4
+#define DWC_MMC_RXJABBERERROR 0x07A0
+#define DWC_MMC_RXRUNTERROR 0x079C
+#define DWC_MMC_RXALIGNMENTERROR 0x0798
+#define DWC_MMC_RXCRCERROR 0x0794
+#define DWC_MMC_RXMULTICASTPACKETS_G 0x0790
+#define DWC_MMC_RXBROADCASTPACKETS_G 0x078C
+#define DWC_MMC_RXOCTETCOUNT_G 0x0788
+#define DWC_MMC_RXOCTETCOUNT_GB 0x0784
+#define DWC_MMC_RXPACKETCOUNT_GB 0x0780
+
+static int debug = 3;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)");
+
+/* DMA ring descriptor. These are used as support descriptors for the HW DMA */
+struct ring_desc {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ size_t len;
+};
+
+/* DMA hardware descriptor */
+struct dwceqos_dma_desc {
+ u32 des0;
+ u32 des1;
+ u32 des2;
+ u32 des3;
+} ____cacheline_aligned;
+
+struct dwceqos_mmc_counters {
+ __u64 txlpitranscntr;
+ __u64 txpiuscntr;
+ __u64 txoversize_g;
+ __u64 txvlanpackets_g;
+ __u64 txpausepackets;
+ __u64 txexcessdef;
+ __u64 txpacketcount_g;
+ __u64 txoctetcount_g;
+ __u64 txcarriererror;
+ __u64 txexcesscol;
+ __u64 txlatecol;
+ __u64 txdeferred;
+ __u64 txmulticol_g;
+ __u64 txsinglecol_g;
+ __u64 txunderflowerror;
+ __u64 txbroadcastpackets_gb;
+ __u64 txmulticastpackets_gb;
+ __u64 txunicastpackets_gb;
+ __u64 tx1024tomaxoctets_gb;
+ __u64 tx512to1023octets_gb;
+ __u64 tx256to511octets_gb;
+ __u64 tx128to255octets_gb;
+ __u64 tx65to127octets_gb;
+ __u64 tx64octets_gb;
+ __u64 txmulticastpackets_g;
+ __u64 txbroadcastpackets_g;
+ __u64 txpacketcount_gb;
+ __u64 txoctetcount_gb;
+
+ __u64 rxlpitranscntr;
+ __u64 rxlpiuscntr;
+ __u64 rxctrlpackets_g;
+ __u64 rxrcverror;
+ __u64 rxwatchdog;
+ __u64 rxvlanpackets_gb;
+ __u64 rxfifooverflow;
+ __u64 rxpausepackets;
+ __u64 rxoutofrangetype;
+ __u64 rxlengtherror;
+ __u64 rxunicastpackets_g;
+ __u64 rx1024tomaxoctets_gb;
+ __u64 rx512to1023octets_gb;
+ __u64 rx256to511octets_gb;
+ __u64 rx128to255octets_gb;
+ __u64 rx65to127octets_gb;
+ __u64 rx64octets_gb;
+ __u64 rxoversize_g;
+ __u64 rxundersize_g;
+ __u64 rxjabbererror;
+ __u64 rxrunterror;
+ __u64 rxalignmenterror;
+ __u64 rxcrcerror;
+ __u64 rxmulticastpackets_g;
+ __u64 rxbroadcastpackets_g;
+ __u64 rxoctetcount_g;
+ __u64 rxoctetcount_gb;
+ __u64 rxpacketcount_gb;
+};
+
+/* Ethtool statistics */
+
+struct dwceqos_stat {
+ const char stat_name[ETH_GSTRING_LEN];
+ int offset;
+};
+
+#define STAT_ITEM(name, var) \
+ {\
+ name,\
+ offsetof(struct dwceqos_mmc_counters, var),\
+ }
+
+static const struct dwceqos_stat dwceqos_ethtool_stats[] = {
+ STAT_ITEM("tx_bytes", txoctetcount_gb),
+ STAT_ITEM("tx_packets", txpacketcount_gb),
+ STAT_ITEM("tx_unicst_packets", txunicastpackets_gb),
+ STAT_ITEM("tx_broadcast_packets", txbroadcastpackets_gb),
+ STAT_ITEM("tx_multicast_packets", txmulticastpackets_gb),
+ STAT_ITEM("tx_pause_packets", txpausepackets),
+ STAT_ITEM("tx_up_to_64_byte_packets", tx64octets_gb),
+ STAT_ITEM("tx_65_to_127_byte_packets", tx65to127octets_gb),
+ STAT_ITEM("tx_128_to_255_byte_packets", tx128to255octets_gb),
+ STAT_ITEM("tx_256_to_511_byte_packets", tx256to511octets_gb),
+ STAT_ITEM("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
+ STAT_ITEM("tx_1024_to_maxsize_packets", tx1024tomaxoctets_gb),
+ STAT_ITEM("tx_underflow_errors", txunderflowerror),
+ STAT_ITEM("tx_lpi_count", txlpitranscntr),
+
+ STAT_ITEM("rx_bytes", rxoctetcount_gb),
+ STAT_ITEM("rx_packets", rxpacketcount_gb),
+ STAT_ITEM("rx_unicast_packets", rxunicastpackets_g),
+ STAT_ITEM("rx_broadcast_packets", rxbroadcastpackets_g),
+ STAT_ITEM("rx_multicast_packets", rxmulticastpackets_g),
+ STAT_ITEM("rx_vlan_packets", rxvlanpackets_gb),
+ STAT_ITEM("rx_pause_packets", rxpausepackets),
+ STAT_ITEM("rx_up_to_64_byte_packets", rx64octets_gb),
+ STAT_ITEM("rx_65_to_127_byte_packets", rx65to127octets_gb),
+ STAT_ITEM("rx_128_to_255_byte_packets", rx128to255octets_gb),
+ STAT_ITEM("rx_256_to_511_byte_packets", rx256to511octets_gb),
+ STAT_ITEM("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
+ STAT_ITEM("rx_1024_to_maxsize_packets", rx1024tomaxoctets_gb),
+ STAT_ITEM("rx_fifo_overflow_errors", rxfifooverflow),
+ STAT_ITEM("rx_oversize_packets", rxoversize_g),
+ STAT_ITEM("rx_undersize_packets", rxundersize_g),
+ STAT_ITEM("rx_jabbers", rxjabbererror),
+ STAT_ITEM("rx_align_errors", rxalignmenterror),
+ STAT_ITEM("rx_crc_errors", rxcrcerror),
+ STAT_ITEM("rx_lpi_count", rxlpitranscntr),
+};
+
+/* Configuration of AXI bus parameters.
+ * These values depend on the parameters set on the MAC core as well
+ * as the AXI interconnect.
+ */
+struct dwceqos_bus_cfg {
+ /* Enable AXI low-power interface. */
+ bool en_lpi;
+ /* Limit on number of outstanding AXI write requests. */
+ u32 write_requests;
+ /* Limit on number of outstanding AXI read requests. */
+ u32 read_requests;
+ /* Bitmap of allowed AXI burst lengths, 4-256 beats. */
+ u32 burst_map;
+ /* DMA Programmable burst length*/
+ u32 tx_pbl;
+ u32 rx_pbl;
+};
+
+struct dwceqos_flowcontrol {
+ int autoneg;
+ int rx;
+ int rx_current;
+ int tx;
+ int tx_current;
+};
+
+struct net_local {
+ void __iomem *baseaddr;
+ struct clk *phy_ref_clk;
+ struct clk *apb_pclk;
+
+ struct device_node *phy_node;
+ struct net_device *ndev;
+ struct platform_device *pdev;
+
+ u32 msg_enable;
+
+ struct tasklet_struct tx_bdreclaim_tasklet;
+ struct workqueue_struct *txtimeout_handler_wq;
+ struct work_struct txtimeout_reinit;
+
+ phy_interface_t phy_interface;
+ struct phy_device *phy_dev;
+ struct mii_bus *mii_bus;
+
+ unsigned int link;
+ unsigned int speed;
+ unsigned int duplex;
+
+ struct napi_struct napi;
+
+ /* DMA Descriptor Areas */
+ struct ring_desc *rx_skb;
+ struct ring_desc *tx_skb;
+
+ struct dwceqos_dma_desc *tx_descs;
+ struct dwceqos_dma_desc *rx_descs;
+
+ /* DMA Mapped Descriptor areas*/
+ dma_addr_t tx_descs_addr;
+ dma_addr_t rx_descs_addr;
+ dma_addr_t tx_descs_tail_addr;
+ dma_addr_t rx_descs_tail_addr;
+
+ size_t tx_free;
+ size_t tx_next;
+ size_t rx_cur;
+ size_t tx_cur;
+
+ /* Spinlocks for accessing DMA Descriptors */
+ spinlock_t tx_lock;
+
+ /* Spinlock for register read-modify-writes. */
+ spinlock_t hw_lock;
+
+ u32 feature0;
+ u32 feature1;
+ u32 feature2;
+
+ struct dwceqos_bus_cfg bus_cfg;
+ bool en_tx_lpi_clockgating;
+
+ int eee_enabled;
+ int eee_active;
+ int csr_val;
+ u32 gso_size;
+
+ struct dwceqos_mmc_counters mmc_counters;
+ /* Protect the mmc_counter updates. */
+ spinlock_t stats_lock;
+ u32 mmc_rx_counters_mask;
+ u32 mmc_tx_counters_mask;
+
+ struct dwceqos_flowcontrol flowcontrol;
+};
+
+static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
+ u32 tx_mask);
+
+static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
+ unsigned int reg_n);
+static int dwceqos_stop(struct net_device *ndev);
+static int dwceqos_open(struct net_device *ndev);
+static void dwceqos_tx_poll_demand(struct net_local *lp);
+
+static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable);
+static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable);
+
+static void dwceqos_reset_state(struct net_local *lp);
+
+#define dwceqos_read(lp, reg) \
+ readl_relaxed(((void __iomem *)((lp)->baseaddr)) + (reg))
+#define dwceqos_write(lp, reg, val) \
+ writel_relaxed((val), ((void __iomem *)((lp)->baseaddr)) + (reg))
+
+static void dwceqos_reset_state(struct net_local *lp)
+{
+ lp->link = 0;
+ lp->speed = 0;
+ lp->duplex = DUPLEX_UNKNOWN;
+ lp->flowcontrol.rx_current = 0;
+ lp->flowcontrol.tx_current = 0;
+ lp->eee_active = 0;
+ lp->eee_enabled = 0;
+}
+
+static void print_descriptor(struct net_local *lp, int index, int tx)
+{
+ struct dwceqos_dma_desc *dd;
+
+ if (tx)
+ dd = (struct dwceqos_dma_desc *)&lp->tx_descs[index];
+ else
+ dd = (struct dwceqos_dma_desc *)&lp->rx_descs[index];
+
+ pr_info("%s DMA Descriptor #%d@%p Contents:\n", tx ? "TX" : "RX",
+ index, dd);
+ pr_info("0x%08x 0x%08x 0x%08x 0x%08x\n", dd->des0, dd->des1, dd->des2,
+ dd->des3);
+}
+
+static void print_status(struct net_local *lp)
+{
+ size_t desci, i;
+
+ pr_info("tx_free %zu, tx_cur %zu, tx_next %zu\n", lp->tx_free,
+ lp->tx_cur, lp->tx_next);
+
+ print_descriptor(lp, lp->rx_cur, 0);
+
+ for (desci = (lp->tx_cur - 10) % DWCEQOS_TX_DCNT, i = 0;
+ i < DWCEQOS_TX_DCNT;
+ ++i) {
+ print_descriptor(lp, desci, 1);
+ desci = (desci + 1) % DWCEQOS_TX_DCNT;
+ }
+
+ pr_info("DMA_Debug_Status0: 0x%08x\n",
+ dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0));
+ pr_info("DMA_CH0_Status: 0x%08x\n",
+ dwceqos_read(lp, REG_DWCEQOS_DMA_IS));
+ pr_info("DMA_CH0_Current_App_TxDesc: 0x%08x\n",
+ dwceqos_read(lp, 0x1144));
+ pr_info("DMA_CH0_Current_App_TxBuff: 0x%08x\n",
+ dwceqos_read(lp, 0x1154));
+ pr_info("MTL_Debug_Status: 0x%08x\n",
+ dwceqos_read(lp, REG_DWCEQOS_MTL_DEBUG_ST));
+ pr_info("MTL_TXQ0_Debug_Status: 0x%08x\n",
+ dwceqos_read(lp, REG_DWCEQOS_MTL_TXQ0_DEBUG_ST));
+ pr_info("MTL_RXQ0_Debug_Status: 0x%08x\n",
+ dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_DEBUG_ST));
+ pr_info("Current TX DMA: 0x%08x, RX DMA: 0x%08x\n",
+ dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_TXDESC),
+ dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_RXDESC));
+}
+
+static void dwceqos_mdio_set_csr(struct net_local *lp)
+{
+ int rate = clk_get_rate(lp->apb_pclk);
+
+ if (rate <= 20000000)
+ lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_20;
+ else if (rate <= 35000000)
+ lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_35;
+ else if (rate <= 60000000)
+ lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_60;
+ else if (rate <= 100000000)
+ lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_100;
+ else if (rate <= 150000000)
+ lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_150;
+ else if (rate <= 250000000)
+ lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_250;
+}
+
+/* Simple MDIO functions implementing mii_bus */
+static int dwceqos_mdio_read(struct mii_bus *bus, int mii_id, int phyreg)
+{
+ struct net_local *lp = bus->priv;
+ u32 regval;
+ int i;
+ int data;
+
+ regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
+ DWCEQOS_MDIO_PHYREG(phyreg) |
+ DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
+ DWCEQOS_MAC_MDIO_ADDR_GB |
+ DWCEQOS_MAC_MDIO_ADDR_GOC_READ;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
+
+ for (i = 0; i < 5; ++i) {
+ usleep_range(64, 128);
+ if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
+ DWCEQOS_MAC_MDIO_ADDR_GB))
+ break;
+ }
+
+ data = dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_DATA);
+ if (i == 5) {
+ netdev_warn(lp->ndev, "MDIO read timed out\n");
+ data = 0xffff;
+ }
+
+ return data & 0xffff;
+}
+
+static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg,
+ u16 value)
+{
+ struct net_local *lp = bus->priv;
+ u32 regval;
+ int i;
+
+ dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_DATA, value);
+
+ regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
+ DWCEQOS_MDIO_PHYREG(phyreg) |
+ DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
+ DWCEQOS_MAC_MDIO_ADDR_GB |
+ DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
+
+ for (i = 0; i < 5; ++i) {
+ usleep_range(64, 128);
+ if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
+ DWCEQOS_MAC_MDIO_ADDR_GB))
+ break;
+ }
+ if (i == 5)
+ netdev_warn(lp->ndev, "MDIO write timed out\n");
+ return 0;
+}
+
+static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!phydev)
+ return -ENODEV;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return phy_mii_ioctl(phydev, rq, cmd);
+ default:
+ dev_info(&lp->pdev->dev, "ioctl %X not implemented.\n", cmd);
+ return -EOPNOTSUPP;
+ }
+}
+
+static void dwceqos_link_down(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+
+ /* Indicate link down to the LPI state machine */
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+ regval &= ~DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_link_up(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+
+ /* Indicate link up to the LPI state machine */
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+ regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+
+ lp->eee_active = !phy_init_eee(lp->phy_dev, 0);
+
+ /* Check for changed EEE capability */
+ if (!lp->eee_active && lp->eee_enabled) {
+ lp->eee_enabled = 0;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+ regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ }
+}
+
+static void dwceqos_set_speed(struct net_local *lp)
+{
+ struct phy_device *phydev = lp->phy_dev;
+ u32 regval;
+
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+ regval &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES |
+ DWCEQOS_MAC_CFG_DM);
+
+ if (phydev->duplex)
+ regval |= DWCEQOS_MAC_CFG_DM;
+ if (phydev->speed == SPEED_10) {
+ regval |= DWCEQOS_MAC_CFG_PS;
+ } else if (phydev->speed == SPEED_100) {
+ regval |= DWCEQOS_MAC_CFG_PS |
+ DWCEQOS_MAC_CFG_FES;
+ } else if (phydev->speed != SPEED_1000) {
+ netdev_err(lp->ndev,
+ "unknown PHY speed %d\n",
+ phydev->speed);
+ return;
+ }
+
+ dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval);
+}
+
+static void dwceqos_adjust_link(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+ int status_change = 0;
+
+ if (phydev->link) {
+ if ((lp->speed != phydev->speed) ||
+ (lp->duplex != phydev->duplex)) {
+ dwceqos_set_speed(lp);
+
+ lp->speed = phydev->speed;
+ lp->duplex = phydev->duplex;
+ status_change = 1;
+ }
+
+ if (lp->flowcontrol.autoneg) {
+ lp->flowcontrol.rx = phydev->pause ||
+ phydev->asym_pause;
+ lp->flowcontrol.tx = phydev->pause ||
+ phydev->asym_pause;
+ }
+
+ if (lp->flowcontrol.rx != lp->flowcontrol.rx_current) {
+ if (netif_msg_link(lp))
+ netdev_dbg(ndev, "set rx flow to %d\n",
+ lp->flowcontrol.rx);
+ dwceqos_set_rx_flowcontrol(lp, lp->flowcontrol.rx);
+ lp->flowcontrol.rx_current = lp->flowcontrol.rx;
+ }
+ if (lp->flowcontrol.tx != lp->flowcontrol.tx_current) {
+ if (netif_msg_link(lp))
+ netdev_dbg(ndev, "set tx flow to %d\n",
+ lp->flowcontrol.tx);
+ dwceqos_set_tx_flowcontrol(lp, lp->flowcontrol.tx);
+ lp->flowcontrol.tx_current = lp->flowcontrol.tx;
+ }
+ }
+
+ if (phydev->link != lp->link) {
+ lp->link = phydev->link;
+ status_change = 1;
+ }
+
+ if (status_change) {
+ if (phydev->link) {
+ lp->ndev->trans_start = jiffies;
+ dwceqos_link_up(lp);
+ } else {
+ dwceqos_link_down(lp);
+ }
+ phy_print_status(phydev);
+ }
+}
+
+static int dwceqos_mii_probe(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = NULL;
+
+ if (lp->phy_node) {
+ phydev = of_phy_connect(lp->ndev,
+ lp->phy_node,
+ &dwceqos_adjust_link,
+ 0,
+ lp->phy_interface);
+
+ if (!phydev) {
+ netdev_err(ndev, "no PHY found\n");
+ return -1;
+ }
+ } else {
+ netdev_err(ndev, "no PHY configured\n");
+ return -ENODEV;
+ }
+
+ if (netif_msg_probe(lp))
+ netdev_dbg(lp->ndev,
+ "phydev %p, phydev->phy_id 0xa%x, phydev->addr 0x%x\n",
+ phydev, phydev->phy_id, phydev->addr);
+
+ phydev->supported &= PHY_GBIT_FEATURES;
+
+ lp->link = 0;
+ lp->speed = 0;
+ lp->duplex = DUPLEX_UNKNOWN;
+ lp->phy_dev = phydev;
+
+ if (netif_msg_probe(lp)) {
+ netdev_dbg(lp->ndev, "phy_addr 0x%x, phy_id 0x%08x\n",
+ lp->phy_dev->addr, lp->phy_dev->phy_id);
+
+ netdev_dbg(lp->ndev, "attach [%s] phy driver\n",
+ lp->phy_dev->drv->name);
+ }
+
+ return 0;
+}
+
+static void dwceqos_alloc_rxring_desc(struct net_local *lp, int index)
+{
+ struct sk_buff *new_skb;
+ dma_addr_t new_skb_baddr = 0;
+
+ new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
+ if (!new_skb) {
+ netdev_err(lp->ndev, "alloc_skb error for desc %d\n", index);
+ goto err_out;
+ }
+
+ new_skb_baddr = dma_map_single(lp->ndev->dev.parent,
+ new_skb->data, DWCEQOS_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
+ netdev_err(lp->ndev, "DMA map error\n");
+ dev_kfree_skb(new_skb);
+ new_skb = NULL;
+ goto err_out;
+ }
+
+ lp->rx_descs[index].des0 = new_skb_baddr;
+ lp->rx_descs[index].des1 = 0;
+ lp->rx_descs[index].des2 = 0;
+ lp->rx_descs[index].des3 = DWCEQOS_DMA_RDES3_INTE |
+ DWCEQOS_DMA_RDES3_BUF1V |
+ DWCEQOS_DMA_RDES3_OWN;
+
+ lp->rx_skb[index].mapping = new_skb_baddr;
+ lp->rx_skb[index].len = DWCEQOS_RX_BUF_SIZE;
+
+err_out:
+ lp->rx_skb[index].skb = new_skb;
+}
+
+static void dwceqos_clean_rings(struct net_local *lp)
+{
+ int i;
+
+ if (lp->rx_skb) {
+ for (i = 0; i < DWCEQOS_RX_DCNT; i++) {
+ if (lp->rx_skb[i].skb) {
+ dma_unmap_single(lp->ndev->dev.parent,
+ lp->rx_skb[i].mapping,
+ lp->rx_skb[i].len,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb(lp->rx_skb[i].skb);
+ lp->rx_skb[i].skb = NULL;
+ lp->rx_skb[i].mapping = 0;
+ }
+ }
+ }
+
+ if (lp->tx_skb) {
+ for (i = 0; i < DWCEQOS_TX_DCNT; i++) {
+ if (lp->tx_skb[i].skb) {
+ dev_kfree_skb(lp->tx_skb[i].skb);
+ lp->tx_skb[i].skb = NULL;
+ }
+ if (lp->tx_skb[i].mapping) {
+ dma_unmap_single(lp->ndev->dev.parent,
+ lp->tx_skb[i].mapping,
+ lp->tx_skb[i].len,
+ DMA_TO_DEVICE);
+ lp->tx_skb[i].mapping = 0;
+ }
+ }
+ }
+}
+
+static void dwceqos_descriptor_free(struct net_local *lp)
+{
+ int size;
+
+ dwceqos_clean_rings(lp);
+
+ kfree(lp->tx_skb);
+ lp->tx_skb = NULL;
+ kfree(lp->rx_skb);
+ lp->rx_skb = NULL;
+
+ size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
+ if (lp->rx_descs) {
+ dma_free_coherent(lp->ndev->dev.parent, size,
+ (void *)(lp->rx_descs), lp->rx_descs_addr);
+ lp->rx_descs = NULL;
+ }
+
+ size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
+ if (lp->tx_descs) {
+ dma_free_coherent(lp->ndev->dev.parent, size,
+ (void *)(lp->tx_descs), lp->tx_descs_addr);
+ lp->tx_descs = NULL;
+ }
+}
+
+static int dwceqos_descriptor_init(struct net_local *lp)
+{
+ int size;
+ u32 i;
+
+ lp->gso_size = 0;
+
+ lp->tx_skb = NULL;
+ lp->rx_skb = NULL;
+ lp->rx_descs = NULL;
+ lp->tx_descs = NULL;
+
+ /* Reset the DMA indexes */
+ lp->rx_cur = 0;
+ lp->tx_cur = 0;
+ lp->tx_next = 0;
+ lp->tx_free = DWCEQOS_TX_DCNT;
+
+ /* Allocate Ring descriptors */
+ size = DWCEQOS_RX_DCNT * sizeof(struct ring_desc);
+ lp->rx_skb = kzalloc(size, GFP_KERNEL);
+ if (!lp->rx_skb)
+ goto err_out;
+
+ size = DWCEQOS_TX_DCNT * sizeof(struct ring_desc);
+ lp->tx_skb = kzalloc(size, GFP_KERNEL);
+ if (!lp->tx_skb)
+ goto err_out;
+
+ /* Allocate DMA descriptors */
+ size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
+ lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
+ &lp->rx_descs_addr, 0);
+ if (!lp->rx_descs)
+ goto err_out;
+ lp->rx_descs_tail_addr = lp->rx_descs_addr +
+ sizeof(struct dwceqos_dma_desc) * DWCEQOS_RX_DCNT;
+
+ size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
+ lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
+ &lp->tx_descs_addr, 0);
+ if (!lp->tx_descs)
+ goto err_out;
+ lp->tx_descs_tail_addr = lp->tx_descs_addr +
+ sizeof(struct dwceqos_dma_desc) * DWCEQOS_TX_DCNT;
+
+ /* Initialize RX Ring Descriptors and buffers */
+ for (i = 0; i < DWCEQOS_RX_DCNT; ++i) {
+ dwceqos_alloc_rxring_desc(lp, i);
+ if (!(lp->rx_skb[lp->rx_cur].skb))
+ goto err_out;
+ }
+
+ /* Initialize TX Descriptors */
+ for (i = 0; i < DWCEQOS_TX_DCNT; ++i) {
+ lp->tx_descs[i].des0 = 0;
+ lp->tx_descs[i].des1 = 0;
+ lp->tx_descs[i].des2 = 0;
+ lp->tx_descs[i].des3 = 0;
+ }
+
+ /* Make descriptor writes visible to the DMA. */
+ wmb();
+
+ return 0;
+
+err_out:
+ dwceqos_descriptor_free(lp);
+ return -ENOMEM;
+}
+
+static int dwceqos_packet_avail(struct net_local *lp)
+{
+ return !(lp->rx_descs[lp->rx_cur].des3 & DWCEQOS_DMA_RDES3_OWN);
+}
+
+static void dwceqos_get_hwfeatures(struct net_local *lp)
+{
+ lp->feature0 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE0);
+ lp->feature1 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE1);
+ lp->feature2 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE2);
+}
+
+static void dwceqos_dma_enable_txirq(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+ regval |= DWCEQOS_DMA_CH0_IE_TIE;
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_disable_txirq(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+ regval &= ~DWCEQOS_DMA_CH0_IE_TIE;
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_enable_rxirq(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+ regval |= DWCEQOS_DMA_CH0_IE_RIE;
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_disable_rxirq(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+ regval &= ~DWCEQOS_DMA_CH0_IE_RIE;
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_enable_mmc_interrupt(struct net_local *lp)
+{
+ dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, 0);
+ dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, 0);
+}
+
+static int dwceqos_mii_init(struct net_local *lp)
+{
+ int ret = -ENXIO, i;
+ struct resource res;
+ struct device_node *mdionode;
+
+ mdionode = of_get_child_by_name(lp->pdev->dev.of_node, "mdio");
+
+ if (!mdionode)
+ return 0;
+
+ lp->mii_bus = mdiobus_alloc();
+ if (!lp->mii_bus) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ lp->mii_bus->name = "DWCEQOS MII bus";
+ lp->mii_bus->read = &dwceqos_mdio_read;
+ lp->mii_bus->write = &dwceqos_mdio_write;
+ lp->mii_bus->priv = lp;
+ lp->mii_bus->parent = &lp->ndev->dev;
+
+ lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!lp->mii_bus->irq) {
+ ret = -ENOMEM;
+ goto err_out_free_mdiobus;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ lp->mii_bus->irq[i] = PHY_POLL;
+ of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
+ snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
+ (unsigned long long)res.start);
+ if (of_mdiobus_register(lp->mii_bus, mdionode))
+ goto err_out_free_mdio_irq;
+
+ return 0;
+
+err_out_free_mdio_irq:
+ kfree(lp->mii_bus->irq);
+err_out_free_mdiobus:
+ mdiobus_free(lp->mii_bus);
+err_out:
+ of_node_put(mdionode);
+ return ret;
+}
+
+/* DMA reset. When issued also resets all MTL and MAC registers as well */
+static void dwceqos_reset_hw(struct net_local *lp)
+{
+ /* Wait (at most) 0.5 seconds for DMA reset*/
+ int i = 5000;
+ u32 reg;
+
+ /* Force gigabit to guarantee a TX clock for GMII. */
+ reg = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+ reg &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES);
+ reg |= DWCEQOS_MAC_CFG_DM;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, reg);
+
+ dwceqos_write(lp, REG_DWCEQOS_DMA_MODE, DWCEQOS_DMA_MODE_SWR);
+
+ do {
+ udelay(100);
+ i--;
+ reg = dwceqos_read(lp, REG_DWCEQOS_DMA_MODE);
+ } while ((reg & DWCEQOS_DMA_MODE_SWR) && i);
+ /* We might experience a timeout if the chip clock mux is broken */
+ if (!i)
+ netdev_err(lp->ndev, "DMA reset timed out!\n");
+}
+
+static void dwceqos_fatal_bus_error(struct net_local *lp, u32 dma_status)
+{
+ if (dma_status & DWCEQOS_DMA_CH0_IS_TEB) {
+ netdev_err(lp->ndev, "txdma bus error %s %s (status=%08x)\n",
+ dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_READ ?
+ "read" : "write",
+ dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR ?
+ "descr" : "data",
+ dma_status);
+
+ print_status(lp);
+ }
+ if (dma_status & DWCEQOS_DMA_CH0_IS_REB) {
+ netdev_err(lp->ndev, "rxdma bus error %s %s (status=%08x)\n",
+ dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_READ ?
+ "read" : "write",
+ dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR ?
+ "descr" : "data",
+ dma_status);
+
+ print_status(lp);
+ }
+}
+
+static void dwceqos_mmc_interrupt(struct net_local *lp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->stats_lock, flags);
+
+ /* A latched mmc interrupt can not be masked, we must read
+ * all the counters with an interrupt pending.
+ */
+ dwceqos_read_mmc_counters(lp,
+ dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQ),
+ dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQ));
+
+ spin_unlock_irqrestore(&lp->stats_lock, flags);
+}
+
+static void dwceqos_mac_interrupt(struct net_local *lp)
+{
+ u32 cause;
+
+ cause = dwceqos_read(lp, REG_DWCEQOS_MAC_IS);
+
+ if (cause & DWCEQOS_MAC_IS_MMC_INT)
+ dwceqos_mmc_interrupt(lp);
+}
+
+static irqreturn_t dwceqos_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct net_local *lp = netdev_priv(ndev);
+
+ u32 cause;
+ u32 dma_status;
+ irqreturn_t ret = IRQ_NONE;
+
+ cause = dwceqos_read(lp, REG_DWCEQOS_DMA_IS);
+ /* DMA Channel 0 Interrupt */
+ if (cause & DWCEQOS_DMA_IS_DC0IS) {
+ dma_status = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_STA);
+
+ /* Transmit Interrupt */
+ if (dma_status & DWCEQOS_DMA_CH0_IS_TI) {
+ tasklet_schedule(&lp->tx_bdreclaim_tasklet);
+ dwceqos_dma_disable_txirq(lp);
+ }
+
+ /* Receive Interrupt */
+ if (dma_status & DWCEQOS_DMA_CH0_IS_RI) {
+ /* Disable RX IRQs */
+ dwceqos_dma_disable_rxirq(lp);
+ napi_schedule(&lp->napi);
+ }
+
+ /* Fatal Bus Error interrupt */
+ if (unlikely(dma_status & DWCEQOS_DMA_CH0_IS_FBE)) {
+ dwceqos_fatal_bus_error(lp, dma_status);
+
+ /* errata 9000831707 */
+ dma_status |= DWCEQOS_DMA_CH0_IS_TEB |
+ DWCEQOS_DMA_CH0_IS_REB;
+ }
+
+ /* Ack all DMA Channel 0 IRQs */
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, dma_status);
+ ret = IRQ_HANDLED;
+ }
+
+ if (cause & DWCEQOS_DMA_IS_MTLIS) {
+ u32 val = dwceqos_read(lp, REG_DWCEQOS_MTL_Q0_ISCTRL);
+
+ dwceqos_write(lp, REG_DWCEQOS_MTL_Q0_ISCTRL, val);
+ ret = IRQ_HANDLED;
+ }
+
+ if (cause & DWCEQOS_DMA_IS_MACIS) {
+ dwceqos_mac_interrupt(lp);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL);
+ if (enable)
+ regval |= DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
+ else
+ regval &= ~DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL, regval);
+
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+
+ /* MTL flow control */
+ regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
+ if (enable)
+ regval |= DWCEQOS_MTL_RXQ_EHFC;
+ else
+ regval &= ~DWCEQOS_MTL_RXQ_EHFC;
+
+ dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+
+ /* MAC flow control */
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW);
+ if (enable)
+ regval |= DWCEQOS_MAC_Q0_TX_FLOW_TFE;
+ else
+ regval &= ~DWCEQOS_MAC_Q0_TX_FLOW_TFE;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
+
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_configure_flow_control(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+ int RQS, RFD, RFA;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+
+ regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
+
+ /* The queue size is in units of 256 bytes. We want 512 bytes units for
+ * the threshold fields.
+ */
+ RQS = ((regval >> 20) & 0x3FF) + 1;
+ RQS /= 2;
+
+ /* The thresholds are relative to a full queue, with a bias
+ * of 1 KiByte below full.
+ */
+ RFD = RQS / 2 - 2;
+ RFA = RQS / 8 - 2;
+
+ regval = (regval & 0xFFF000FF) | (RFD << 14) | (RFA << 8);
+
+ if (RFD >= 0 && RFA >= 0) {
+ dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+ } else {
+ netdev_warn(lp->ndev,
+ "FIFO too small for flow control.");
+ }
+
+ regval = DWCEQOS_MAC_Q0_TX_FLOW_PT(256) |
+ DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS;
+
+ dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
+
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_configure_clock(struct net_local *lp)
+{
+ unsigned long rate_mhz = clk_get_rate(lp->apb_pclk) / 1000000;
+
+ BUG_ON(!rate_mhz);
+
+ dwceqos_write(lp,
+ REG_DWCEQOS_MAC_1US_TIC_COUNTER,
+ DWCEQOS_MAC_1US_TIC_COUNTER_VAL(rate_mhz - 1));
+}
+
+static void dwceqos_configure_bus(struct net_local *lp)
+{
+ u32 sysbus_reg;
+
+ /* N.B. We do not support the Fixed Burst mode because it
+ * opens a race window by making HW access to DMA descriptors
+ * non-atomic.
+ */
+
+ sysbus_reg = DWCEQOS_DMA_SYSBUS_MODE_AAL;
+
+ if (lp->bus_cfg.en_lpi)
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_EN_LPI;
+
+ if (lp->bus_cfg.burst_map)
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
+ lp->bus_cfg.burst_map);
+ else
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
+ DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT);
+
+ if (lp->bus_cfg.read_requests)
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
+ lp->bus_cfg.read_requests - 1);
+ else
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
+ DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT);
+
+ if (lp->bus_cfg.write_requests)
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
+ lp->bus_cfg.write_requests - 1);
+ else
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
+ DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT);
+
+ if (netif_msg_hw(lp))
+ netdev_dbg(lp->ndev, "SysbusMode %#X\n", sysbus_reg);
+
+ dwceqos_write(lp, REG_DWCEQOS_DMA_SYSBUS_MODE, sysbus_reg);
+}
+
+static void dwceqos_init_hw(struct net_local *lp)
+{
+ u32 regval;
+ u32 buswidth;
+ u32 dma_skip;
+
+ /* Software reset */
+ dwceqos_reset_hw(lp);
+
+ dwceqos_configure_bus(lp);
+
+ /* Probe data bus width, 32/64/128 bits. */
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, 0xF);
+ regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL);
+ buswidth = (regval ^ 0xF) + 1;
+
+ /* Cache-align dma descriptors. */
+ dma_skip = (sizeof(struct dwceqos_dma_desc) - 16) / buswidth;
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_CTRL,
+ DWCEQOS_DMA_CH_CTRL_DSL(dma_skip) |
+ DWCEQOS_DMA_CH_CTRL_PBLX8);
+
+ /* Initialize DMA Channel 0 */
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LEN, DWCEQOS_TX_DCNT - 1);
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LEN, DWCEQOS_RX_DCNT - 1);
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LIST,
+ (u32)lp->tx_descs_addr);
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LIST,
+ (u32)lp->rx_descs_addr);
+
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
+ lp->tx_descs_tail_addr);
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
+ lp->rx_descs_tail_addr);
+
+ if (lp->bus_cfg.tx_pbl)
+ regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.tx_pbl);
+ else
+ regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
+
+ /* Enable TSO if the HW support it */
+ if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
+ regval |= DWCEQOS_DMA_CH_TX_TSE;
+
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, regval);
+
+ if (lp->bus_cfg.rx_pbl)
+ regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.rx_pbl);
+ else
+ regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
+
+ regval |= DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(DWCEQOS_DWCEQOS_RX_BUF_SIZE);
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
+
+ regval |= DWCEQOS_DMA_CH_CTRL_START;
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
+
+ /* Initialize MTL Queues */
+ regval = DWCEQOS_MTL_SCHALG_STRICT;
+ dwceqos_write(lp, REG_DWCEQOS_MTL_OPER, regval);
+
+ regval = DWCEQOS_MTL_TXQ_SIZE(
+ DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(lp->feature1)) |
+ DWCEQOS_MTL_TXQ_TXQEN | DWCEQOS_MTL_TXQ_TSF |
+ DWCEQOS_MTL_TXQ_TTC512;
+ dwceqos_write(lp, REG_DWCEQOS_MTL_TXQ0_OPER, regval);
+
+ regval = DWCEQOS_MTL_RXQ_SIZE(
+ DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(lp->feature1)) |
+ DWCEQOS_MTL_RXQ_FUP | DWCEQOS_MTL_RXQ_FEP | DWCEQOS_MTL_RXQ_RSF;
+ dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+
+ dwceqos_configure_flow_control(lp);
+
+ /* Initialize MAC */
+ dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+
+ lp->eee_enabled = 0;
+
+ dwceqos_configure_clock(lp);
+
+ /* MMC counters */
+
+ /* probe implemented counters */
+ dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, ~0u);
+ dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, ~0u);
+ lp->mmc_rx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQMASK);
+ lp->mmc_tx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQMASK);
+
+ dwceqos_write(lp, REG_DWCEQOS_MMC_CTRL, DWCEQOS_MMC_CTRL_CNTRST |
+ DWCEQOS_MMC_CTRL_RSTONRD);
+ dwceqos_enable_mmc_interrupt(lp);
+
+ /* Enable Interrupts */
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
+ DWCEQOS_DMA_CH0_IE_NIE |
+ DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
+ DWCEQOS_DMA_CH0_IE_AIE |
+ DWCEQOS_DMA_CH0_IE_FBEE);
+
+ dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0);
+
+ dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC |
+ DWCEQOS_MAC_CFG_DM | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
+
+ /* Start TX DMA */
+ regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL);
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL,
+ regval | DWCEQOS_DMA_CH_CTRL_START);
+
+ /* Enable MAC TX/RX */
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+ dwceqos_write(lp, REG_DWCEQOS_MAC_CFG,
+ regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
+}
+
+static void dwceqos_tx_reclaim(unsigned long data)
+{
+ struct net_device *ndev = (struct net_device *)data;
+ struct net_local *lp = netdev_priv(ndev);
+ unsigned int tx_bytes = 0;
+ unsigned int tx_packets = 0;
+
+ spin_lock(&lp->tx_lock);
+
+ while (lp->tx_free < DWCEQOS_TX_DCNT) {
+ struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_cur];
+ struct ring_desc *rd = &lp->tx_skb[lp->tx_cur];
+
+ /* Descriptor still being held by DMA ? */
+ if (dd->des3 & DWCEQOS_DMA_TDES3_OWN)
+ break;
+
+ if (rd->mapping)
+ dma_unmap_single(ndev->dev.parent, rd->mapping, rd->len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(rd->skb)) {
+ ++tx_packets;
+ tx_bytes += rd->skb->len;
+ dev_consume_skb_any(rd->skb);
+ }
+
+ rd->skb = NULL;
+ rd->mapping = 0;
+ lp->tx_free++;
+ lp->tx_cur = (lp->tx_cur + 1) % DWCEQOS_TX_DCNT;
+
+ if ((dd->des3 & DWCEQOS_DMA_TDES3_LD) &&
+ (dd->des3 & DWCEQOS_DMA_RDES3_ES)) {
+ if (netif_msg_tx_err(lp))
+ netdev_err(ndev, "TX Error, TDES3 = 0x%x\n",
+ dd->des3);
+ if (netif_msg_hw(lp))
+ print_status(lp);
+ }
+ }
+ spin_unlock(&lp->tx_lock);
+
+ netdev_completed_queue(ndev, tx_packets, tx_bytes);
+
+ dwceqos_dma_enable_txirq(lp);
+ netif_wake_queue(ndev);
+}
+
+static int dwceqos_rx(struct net_local *lp, int budget)
+{
+ struct sk_buff *skb;
+ u32 tot_size = 0;
+ unsigned int n_packets = 0;
+ unsigned int n_descs = 0;
+ u32 len;
+
+ struct dwceqos_dma_desc *dd;
+ struct sk_buff *new_skb;
+ dma_addr_t new_skb_baddr = 0;
+
+ while (n_descs < budget) {
+ if (!dwceqos_packet_avail(lp))
+ break;
+
+ new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
+ if (!new_skb) {
+ netdev_err(lp->ndev, "no memory for new sk_buff\n");
+ break;
+ }
+
+ /* Get dma handle of skb->data */
+ new_skb_baddr = (u32)dma_map_single(lp->ndev->dev.parent,
+ new_skb->data,
+ DWCEQOS_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
+ netdev_err(lp->ndev, "DMA map error\n");
+ dev_kfree_skb(new_skb);
+ break;
+ }
+
+ /* Read descriptor data after reading owner bit. */
+ dma_rmb();
+
+ dd = &lp->rx_descs[lp->rx_cur];
+ len = DWCEQOS_DMA_RDES3_PL(dd->des3);
+ skb = lp->rx_skb[lp->rx_cur].skb;
+
+ /* Unmap old buffer */
+ dma_unmap_single(lp->ndev->dev.parent,
+ lp->rx_skb[lp->rx_cur].mapping,
+ lp->rx_skb[lp->rx_cur].len, DMA_FROM_DEVICE);
+
+ /* Discard packet on reception error or bad checksum */
+ if ((dd->des3 & DWCEQOS_DMA_RDES3_ES) ||
+ (dd->des1 & DWCEQOS_DMA_RDES1_IPCE)) {
+ dev_kfree_skb(skb);
+ skb = NULL;
+ } else {
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, lp->ndev);
+ switch (dd->des1 & DWCEQOS_DMA_RDES1_PT) {
+ case DWCEQOS_DMA_RDES1_PT_UDP:
+ case DWCEQOS_DMA_RDES1_PT_TCP:
+ case DWCEQOS_DMA_RDES1_PT_ICMP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ default:
+ skb->ip_summed = CHECKSUM_NONE;
+ break;
+ }
+ }
+
+ if (unlikely(!skb)) {
+ if (netif_msg_rx_err(lp))
+ netdev_dbg(lp->ndev, "rx error: des3=%X\n",
+ lp->rx_descs[lp->rx_cur].des3);
+ } else {
+ tot_size += skb->len;
+ n_packets++;
+
+ netif_receive_skb(skb);
+ }
+
+ lp->rx_descs[lp->rx_cur].des0 = new_skb_baddr;
+ lp->rx_descs[lp->rx_cur].des1 = 0;
+ lp->rx_descs[lp->rx_cur].des2 = 0;
+ /* The DMA must observe des0/1/2 written before des3. */
+ wmb();
+ lp->rx_descs[lp->rx_cur].des3 = DWCEQOS_DMA_RDES3_INTE |
+ DWCEQOS_DMA_RDES3_OWN |
+ DWCEQOS_DMA_RDES3_BUF1V;
+
+ lp->rx_skb[lp->rx_cur].mapping = new_skb_baddr;
+ lp->rx_skb[lp->rx_cur].len = DWCEQOS_RX_BUF_SIZE;
+ lp->rx_skb[lp->rx_cur].skb = new_skb;
+
+ n_descs++;
+ lp->rx_cur = (lp->rx_cur + 1) % DWCEQOS_RX_DCNT;
+ }
+
+ /* Make sure any ownership update is written to the descriptors before
+ * DMA wakeup.
+ */
+ wmb();
+
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, DWCEQOS_DMA_CH0_IS_RI);
+ /* Wake up RX by writing tail pointer */
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
+ lp->rx_descs_tail_addr);
+
+ return n_descs;
+}
+
+static int dwceqos_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct net_local *lp = container_of(napi, struct net_local, napi);
+ int work_done = 0;
+
+ work_done = dwceqos_rx(lp, budget - work_done);
+
+ if (!dwceqos_packet_avail(lp) && work_done < budget) {
+ napi_complete(napi);
+ dwceqos_dma_enable_rxirq(lp);
+ } else {
+ work_done = budget;
+ }
+
+ return work_done;
+}
+
+/* Reinitialize function if a TX timed out */
+static void dwceqos_reinit_for_txtimeout(struct work_struct *data)
+{
+ struct net_local *lp = container_of(data, struct net_local,
+ txtimeout_reinit);
+
+ netdev_err(lp->ndev, "transmit timeout %d s, resetting...\n",
+ DWCEQOS_TX_TIMEOUT);
+
+ if (netif_msg_hw(lp))
+ print_status(lp);
+
+ rtnl_lock();
+ dwceqos_stop(lp->ndev);
+ dwceqos_open(lp->ndev);
+ rtnl_unlock();
+}
+
+/* DT Probing function called by main probe */
+static inline int dwceqos_probe_config_dt(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+ struct net_local *lp;
+ const void *mac_address;
+ struct dwceqos_bus_cfg *bus_cfg;
+ struct device_node *np = pdev->dev.of_node;
+
+ ndev = platform_get_drvdata(pdev);
+ lp = netdev_priv(ndev);
+ bus_cfg = &lp->bus_cfg;
+
+ /* Set the MAC address. */
+ mac_address = of_get_mac_address(pdev->dev.of_node);
+ if (mac_address)
+ ether_addr_copy(ndev->dev_addr, mac_address);
+
+ /* These are all optional parameters */
+ lp->en_tx_lpi_clockgating = of_property_read_bool(np,
+ "snps,en-tx-lpi-clockgating");
+ bus_cfg->en_lpi = of_property_read_bool(np, "snps,en-lpi");
+ of_property_read_u32(np, "snps,write-requests",
+ &bus_cfg->write_requests);
+ of_property_read_u32(np, "snps,read-requests", &bus_cfg->read_requests);
+ of_property_read_u32(np, "snps,burst-map", &bus_cfg->burst_map);
+ of_property_read_u32(np, "snps,txpbl", &bus_cfg->tx_pbl);
+ of_property_read_u32(np, "snps,rxpbl", &bus_cfg->rx_pbl);
+
+ netdev_dbg(ndev, "BusCfg: lpi:%u wr:%u rr:%u bm:%X rxpbl:%u txpbl:%d\n",
+ bus_cfg->en_lpi,
+ bus_cfg->write_requests,
+ bus_cfg->read_requests,
+ bus_cfg->burst_map,
+ bus_cfg->rx_pbl,
+ bus_cfg->tx_pbl);
+
+ return 0;
+}
+
+static int dwceqos_open(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ int res;
+
+ dwceqos_reset_state(lp);
+ res = dwceqos_descriptor_init(lp);
+ if (res) {
+ netdev_err(ndev, "Unable to allocate DMA memory, rc %d\n", res);
+ return res;
+ }
+ netdev_reset_queue(ndev);
+
+ napi_enable(&lp->napi);
+ phy_start(lp->phy_dev);
+ dwceqos_init_hw(lp);
+
+ netif_start_queue(ndev);
+ tasklet_enable(&lp->tx_bdreclaim_tasklet);
+
+ return 0;
+}
+
+static bool dweqos_is_tx_dma_suspended(struct net_local *lp)
+{
+ u32 reg;
+
+ reg = dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0);
+ reg = DMA_GET_TX_STATE_CH0(reg);
+
+ return reg == DMA_TX_CH_SUSPENDED;
+}
+
+static void dwceqos_drain_dma(struct net_local *lp)
+{
+ /* Wait for all pending TX buffers to be sent. Upper limit based
+ * on max frame size on a 10 Mbit link.
+ */
+ size_t limit = (DWCEQOS_TX_DCNT * 1250) / 100;
+
+ while (!dweqos_is_tx_dma_suspended(lp) && limit--)
+ usleep_range(100, 200);
+}
+
+static int dwceqos_stop(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+
+ phy_stop(lp->phy_dev);
+
+ tasklet_disable(&lp->tx_bdreclaim_tasklet);
+ netif_stop_queue(ndev);
+ napi_disable(&lp->napi);
+
+ dwceqos_drain_dma(lp);
+
+ netif_tx_lock(lp->ndev);
+ dwceqos_reset_hw(lp);
+ dwceqos_descriptor_free(lp);
+ netif_tx_unlock(lp->ndev);
+
+ return 0;
+}
+
+static void dwceqos_dmadesc_set_ctx(struct net_local *lp,
+ unsigned short gso_size)
+{
+ struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_next];
+
+ dd->des0 = 0;
+ dd->des1 = 0;
+ dd->des2 = gso_size;
+ dd->des3 = DWCEQOS_DMA_TDES3_CTXT | DWCEQOS_DMA_TDES3_TCMSSV;
+
+ lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+}
+
+static void dwceqos_tx_poll_demand(struct net_local *lp)
+{
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
+ lp->tx_descs_tail_addr);
+}
+
+struct dwceqos_tx {
+ size_t nr_descriptors;
+ size_t initial_descriptor;
+ size_t last_descriptor;
+ size_t prev_gso_size;
+ size_t network_header_len;
+};
+
+static void dwceqos_tx_prepare(struct sk_buff *skb, struct net_local *lp,
+ struct dwceqos_tx *tx)
+{
+ size_t n = 1;
+ size_t i;
+
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size)
+ ++n;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ n += (skb_frag_size(frag) + BYTES_PER_DMA_DESC - 1) /
+ BYTES_PER_DMA_DESC;
+ }
+
+ tx->nr_descriptors = n;
+ tx->initial_descriptor = lp->tx_next;
+ tx->last_descriptor = lp->tx_next;
+ tx->prev_gso_size = lp->gso_size;
+
+ tx->network_header_len = skb_transport_offset(skb);
+ if (skb_is_gso(skb))
+ tx->network_header_len += tcp_hdrlen(skb);
+}
+
+static int dwceqos_tx_linear(struct sk_buff *skb, struct net_local *lp,
+ struct dwceqos_tx *tx)
+{
+ struct ring_desc *rd;
+ struct dwceqos_dma_desc *dd;
+ size_t payload_len;
+ dma_addr_t dma_handle;
+
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) {
+ dwceqos_dmadesc_set_ctx(lp, skb_shinfo(skb)->gso_size);
+ lp->gso_size = skb_shinfo(skb)->gso_size;
+ }
+
+ dma_handle = dma_map_single(lp->ndev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
+ if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
+ netdev_err(lp->ndev, "TX DMA Mapping error\n");
+ return -ENOMEM;
+ }
+
+ rd = &lp->tx_skb[lp->tx_next];
+ dd = &lp->tx_descs[lp->tx_next];
+
+ rd->skb = NULL;
+ rd->len = skb_headlen(skb);
+ rd->mapping = dma_handle;
+
+ /* Set up DMA Descriptor */
+ dd->des0 = dma_handle;
+
+ if (skb_is_gso(skb)) {
+ payload_len = skb_headlen(skb) - tx->network_header_len;
+
+ if (payload_len)
+ dd->des1 = dma_handle + tx->network_header_len;
+ dd->des2 = tx->network_header_len |
+ DWCEQOS_DMA_DES2_B2L(payload_len);
+ dd->des3 = DWCEQOS_DMA_TDES3_TSE |
+ DWCEQOS_DMA_DES3_THL((tcp_hdrlen(skb) / 4)) |
+ (skb->len - tx->network_header_len);
+ } else {
+ dd->des1 = 0;
+ dd->des2 = skb_headlen(skb);
+ dd->des3 = skb->len;
+
+ switch (skb->ip_summed) {
+ case CHECKSUM_PARTIAL:
+ dd->des3 |= DWCEQOS_DMA_TDES3_CA;
+ case CHECKSUM_NONE:
+ case CHECKSUM_UNNECESSARY:
+ case CHECKSUM_COMPLETE:
+ default:
+ break;
+ }
+ }
+
+ dd->des3 |= DWCEQOS_DMA_TDES3_FD;
+ if (lp->tx_next != tx->initial_descriptor)
+ dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+ tx->last_descriptor = lp->tx_next;
+ lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+
+ return 0;
+}
+
+static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp,
+ struct dwceqos_tx *tx)
+{
+ struct ring_desc *rd = NULL;
+ struct dwceqos_dma_desc *dd;
+ dma_addr_t dma_handle;
+ size_t i;
+
+ /* Setup more ring and DMA descriptor if the packet is fragmented */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ size_t frag_size;
+ size_t consumed_size;
+
+ /* Map DMA Area */
+ dma_handle = skb_frag_dma_map(lp->ndev->dev.parent, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
+ netdev_err(lp->ndev, "DMA Mapping error\n");
+ return -ENOMEM;
+ }
+
+ /* order-3 fragments span more than one descriptor. */
+ frag_size = skb_frag_size(frag);
+ consumed_size = 0;
+ while (consumed_size < frag_size) {
+ size_t dma_size = min_t(size_t, 16376,
+ frag_size - consumed_size);
+
+ rd = &lp->tx_skb[lp->tx_next];
+ memset(rd, 0, sizeof(*rd));
+
+ dd = &lp->tx_descs[lp->tx_next];
+
+ /* Set DMA Descriptor fields */
+ dd->des0 = dma_handle;
+ dd->des1 = 0;
+ dd->des2 = dma_size;
+
+ if (skb_is_gso(skb))
+ dd->des3 = (skb->len - tx->network_header_len);
+ else
+ dd->des3 = skb->len;
+
+ dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+ tx->last_descriptor = lp->tx_next;
+ lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+ consumed_size += dma_size;
+ }
+
+ rd->len = skb_frag_size(frag);
+ rd->mapping = dma_handle;
+ }
+
+ return 0;
+}
+
+static void dwceqos_tx_finalize(struct sk_buff *skb, struct net_local *lp,
+ struct dwceqos_tx *tx)
+{
+ lp->tx_descs[tx->last_descriptor].des3 |= DWCEQOS_DMA_TDES3_LD;
+ lp->tx_descs[tx->last_descriptor].des2 |= DWCEQOS_DMA_TDES2_IOC;
+
+ lp->tx_skb[tx->last_descriptor].skb = skb;
+
+ /* Make all descriptor updates visible to the DMA before setting the
+ * owner bit.
+ */
+ wmb();
+
+ lp->tx_descs[tx->initial_descriptor].des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+ /* Make the owner bit visible before TX wakeup. */
+ wmb();
+
+ dwceqos_tx_poll_demand(lp);
+}
+
+static void dwceqos_tx_rollback(struct net_local *lp, struct dwceqos_tx *tx)
+{
+ size_t i = tx->initial_descriptor;
+
+ while (i != lp->tx_next) {
+ if (lp->tx_skb[i].mapping)
+ dma_unmap_single(lp->ndev->dev.parent,
+ lp->tx_skb[i].mapping,
+ lp->tx_skb[i].len,
+ DMA_TO_DEVICE);
+
+ lp->tx_skb[i].mapping = 0;
+ lp->tx_skb[i].skb = NULL;
+
+ memset(&lp->tx_descs[i], 0, sizeof(lp->tx_descs[i]));
+
+ i = (i + 1) % DWCEQOS_TX_DCNT;
+ }
+
+ lp->tx_next = tx->initial_descriptor;
+ lp->gso_size = tx->prev_gso_size;
+}
+
+static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct dwceqos_tx trans;
+ int err;
+
+ dwceqos_tx_prepare(skb, lp, &trans);
+ if (lp->tx_free < trans.nr_descriptors) {
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
+ err = dwceqos_tx_linear(skb, lp, &trans);
+ if (err)
+ goto tx_error;
+
+ err = dwceqos_tx_frags(skb, lp, &trans);
+ if (err)
+ goto tx_error;
+
+ WARN_ON(lp->tx_next !=
+ ((trans.initial_descriptor + trans.nr_descriptors) %
+ DWCEQOS_TX_DCNT));
+
+ dwceqos_tx_finalize(skb, lp, &trans);
+
+ netdev_sent_queue(ndev, skb->len);
+
+ spin_lock_bh(&lp->tx_lock);
+ lp->tx_free -= trans.nr_descriptors;
+ spin_unlock_bh(&lp->tx_lock);
+
+ ndev->trans_start = jiffies;
+ return 0;
+
+tx_error:
+ dwceqos_tx_rollback(lp, &trans);
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+/* Set MAC address and then update HW accordingly */
+static int dwceqos_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct sockaddr *hwaddr = (struct sockaddr *)addr;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(hwaddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(ndev->dev_addr, hwaddr->sa_data, ndev->addr_len);
+
+ dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+ return 0;
+}
+
+static void dwceqos_tx_timeout(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+
+ queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit);
+}
+
+static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
+ unsigned int reg_n)
+{
+ unsigned long data;
+
+ data = (addr[5] << 8) | addr[4];
+ dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n),
+ data | DWCEQOS_MAC_MAC_ADDR_HI_EN);
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ dwceqos_write(lp, DWCEQOS_ADDR_LOW(reg_n), data);
+}
+
+static void dwceqos_disable_umac_addr(struct net_local *lp, unsigned int reg_n)
+{
+ /* Do not disable MAC address 0 */
+ if (reg_n != 0)
+ dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), 0);
+}
+
+static void dwceqos_set_rx_mode(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ u32 regval = 0;
+ u32 mc_filter[2];
+ int reg = 1;
+ struct netdev_hw_addr *ha;
+ unsigned int max_mac_addr;
+
+ max_mac_addr = DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1);
+
+ if (ndev->flags & IFF_PROMISC) {
+ regval = DWCEQOS_MAC_PKT_FILT_PR;
+ } else if (((netdev_mc_count(ndev) > DWCEQOS_HASH_TABLE_SIZE) ||
+ (ndev->flags & IFF_ALLMULTI))) {
+ regval = DWCEQOS_MAC_PKT_FILT_PM;
+ dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, 0xffffffff);
+ dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, 0xffffffff);
+ } else if (!netdev_mc_empty(ndev)) {
+ regval = DWCEQOS_MAC_PKT_FILT_HMC;
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, ndev) {
+ /* The upper 6 bits of the calculated CRC are used to
+ * index the contens of the hash table
+ */
+ int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
+ /* The most significant bit determines the register
+ * to use (H/L) while the other 5 bits determine
+ * the bit within the register.
+ */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, mc_filter[0]);
+ dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, mc_filter[1]);
+ }
+ if (netdev_uc_count(ndev) > max_mac_addr) {
+ regval |= DWCEQOS_MAC_PKT_FILT_PR;
+ } else {
+ netdev_for_each_uc_addr(ha, ndev) {
+ dwceqos_set_umac_addr(lp, ha->addr, reg);
+ reg++;
+ }
+ for (; reg < DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); reg++)
+ dwceqos_disable_umac_addr(lp, reg);
+ }
+ dwceqos_write(lp, REG_DWCEQOS_MAC_PKT_FILT, regval);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void dwceqos_poll_controller(struct net_device *ndev)
+{
+ disable_irq(ndev->irq);
+ dwceqos_interrupt(ndev->irq, ndev);
+ enable_irq(ndev->irq);
+}
+#endif
+
+static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
+ u32 tx_mask)
+{
+ if (tx_mask & BIT(27))
+ lp->mmc_counters.txlpitranscntr +=
+ dwceqos_read(lp, DWC_MMC_TXLPITRANSCNTR);
+ if (tx_mask & BIT(26))
+ lp->mmc_counters.txpiuscntr +=
+ dwceqos_read(lp, DWC_MMC_TXLPIUSCNTR);
+ if (tx_mask & BIT(25))
+ lp->mmc_counters.txoversize_g +=
+ dwceqos_read(lp, DWC_MMC_TXOVERSIZE_G);
+ if (tx_mask & BIT(24))
+ lp->mmc_counters.txvlanpackets_g +=
+ dwceqos_read(lp, DWC_MMC_TXVLANPACKETS_G);
+ if (tx_mask & BIT(23))
+ lp->mmc_counters.txpausepackets +=
+ dwceqos_read(lp, DWC_MMC_TXPAUSEPACKETS);
+ if (tx_mask & BIT(22))
+ lp->mmc_counters.txexcessdef +=
+ dwceqos_read(lp, DWC_MMC_TXEXCESSDEF);
+ if (tx_mask & BIT(21))
+ lp->mmc_counters.txpacketcount_g +=
+ dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_G);
+ if (tx_mask & BIT(20))
+ lp->mmc_counters.txoctetcount_g +=
+ dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_G);
+ if (tx_mask & BIT(19))
+ lp->mmc_counters.txcarriererror +=
+ dwceqos_read(lp, DWC_MMC_TXCARRIERERROR);
+ if (tx_mask & BIT(18))
+ lp->mmc_counters.txexcesscol +=
+ dwceqos_read(lp, DWC_MMC_TXEXCESSCOL);
+ if (tx_mask & BIT(17))
+ lp->mmc_counters.txlatecol +=
+ dwceqos_read(lp, DWC_MMC_TXLATECOL);
+ if (tx_mask & BIT(16))
+ lp->mmc_counters.txdeferred +=
+ dwceqos_read(lp, DWC_MMC_TXDEFERRED);
+ if (tx_mask & BIT(15))
+ lp->mmc_counters.txmulticol_g +=
+ dwceqos_read(lp, DWC_MMC_TXMULTICOL_G);
+ if (tx_mask & BIT(14))
+ lp->mmc_counters.txsinglecol_g +=
+ dwceqos_read(lp, DWC_MMC_TXSINGLECOL_G);
+ if (tx_mask & BIT(13))
+ lp->mmc_counters.txunderflowerror +=
+ dwceqos_read(lp, DWC_MMC_TXUNDERFLOWERROR);
+ if (tx_mask & BIT(12))
+ lp->mmc_counters.txbroadcastpackets_gb +=
+ dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_GB);
+ if (tx_mask & BIT(11))
+ lp->mmc_counters.txmulticastpackets_gb +=
+ dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_GB);
+ if (tx_mask & BIT(10))
+ lp->mmc_counters.txunicastpackets_gb +=
+ dwceqos_read(lp, DWC_MMC_TXUNICASTPACKETS_GB);
+ if (tx_mask & BIT(9))
+ lp->mmc_counters.tx1024tomaxoctets_gb +=
+ dwceqos_read(lp, DWC_MMC_TX1024TOMAXOCTETS_GB);
+ if (tx_mask & BIT(8))
+ lp->mmc_counters.tx512to1023octets_gb +=
+ dwceqos_read(lp, DWC_MMC_TX512TO1023OCTETS_GB);
+ if (tx_mask & BIT(7))
+ lp->mmc_counters.tx256to511octets_gb +=
+ dwceqos_read(lp, DWC_MMC_TX256TO511OCTETS_GB);
+ if (tx_mask & BIT(6))
+ lp->mmc_counters.tx128to255octets_gb +=
+ dwceqos_read(lp, DWC_MMC_TX128TO255OCTETS_GB);
+ if (tx_mask & BIT(5))
+ lp->mmc_counters.tx65to127octets_gb +=
+ dwceqos_read(lp, DWC_MMC_TX65TO127OCTETS_GB);
+ if (tx_mask & BIT(4))
+ lp->mmc_counters.tx64octets_gb +=
+ dwceqos_read(lp, DWC_MMC_TX64OCTETS_GB);
+ if (tx_mask & BIT(3))
+ lp->mmc_counters.txmulticastpackets_g +=
+ dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_G);
+ if (tx_mask & BIT(2))
+ lp->mmc_counters.txbroadcastpackets_g +=
+ dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_G);
+ if (tx_mask & BIT(1))
+ lp->mmc_counters.txpacketcount_gb +=
+ dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_GB);
+ if (tx_mask & BIT(0))
+ lp->mmc_counters.txoctetcount_gb +=
+ dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_GB);
+
+ if (rx_mask & BIT(27))
+ lp->mmc_counters.rxlpitranscntr +=
+ dwceqos_read(lp, DWC_MMC_RXLPITRANSCNTR);
+ if (rx_mask & BIT(26))
+ lp->mmc_counters.rxlpiuscntr +=
+ dwceqos_read(lp, DWC_MMC_RXLPIUSCNTR);
+ if (rx_mask & BIT(25))
+ lp->mmc_counters.rxctrlpackets_g +=
+ dwceqos_read(lp, DWC_MMC_RXCTRLPACKETS_G);
+ if (rx_mask & BIT(24))
+ lp->mmc_counters.rxrcverror +=
+ dwceqos_read(lp, DWC_MMC_RXRCVERROR);
+ if (rx_mask & BIT(23))
+ lp->mmc_counters.rxwatchdog +=
+ dwceqos_read(lp, DWC_MMC_RXWATCHDOG);
+ if (rx_mask & BIT(22))
+ lp->mmc_counters.rxvlanpackets_gb +=
+ dwceqos_read(lp, DWC_MMC_RXVLANPACKETS_GB);
+ if (rx_mask & BIT(21))
+ lp->mmc_counters.rxfifooverflow +=
+ dwceqos_read(lp, DWC_MMC_RXFIFOOVERFLOW);
+ if (rx_mask & BIT(20))
+ lp->mmc_counters.rxpausepackets +=
+ dwceqos_read(lp, DWC_MMC_RXPAUSEPACKETS);
+ if (rx_mask & BIT(19))
+ lp->mmc_counters.rxoutofrangetype +=
+ dwceqos_read(lp, DWC_MMC_RXOUTOFRANGETYPE);
+ if (rx_mask & BIT(18))
+ lp->mmc_counters.rxlengtherror +=
+ dwceqos_read(lp, DWC_MMC_RXLENGTHERROR);
+ if (rx_mask & BIT(17))
+ lp->mmc_counters.rxunicastpackets_g +=
+ dwceqos_read(lp, DWC_MMC_RXUNICASTPACKETS_G);
+ if (rx_mask & BIT(16))
+ lp->mmc_counters.rx1024tomaxoctets_gb +=
+ dwceqos_read(lp, DWC_MMC_RX1024TOMAXOCTETS_GB);
+ if (rx_mask & BIT(15))
+ lp->mmc_counters.rx512to1023octets_gb +=
+ dwceqos_read(lp, DWC_MMC_RX512TO1023OCTETS_GB);
+ if (rx_mask & BIT(14))
+ lp->mmc_counters.rx256to511octets_gb +=
+ dwceqos_read(lp, DWC_MMC_RX256TO511OCTETS_GB);
+ if (rx_mask & BIT(13))
+ lp->mmc_counters.rx128to255octets_gb +=
+ dwceqos_read(lp, DWC_MMC_RX128TO255OCTETS_GB);
+ if (rx_mask & BIT(12))
+ lp->mmc_counters.rx65to127octets_gb +=
+ dwceqos_read(lp, DWC_MMC_RX65TO127OCTETS_GB);
+ if (rx_mask & BIT(11))
+ lp->mmc_counters.rx64octets_gb +=
+ dwceqos_read(lp, DWC_MMC_RX64OCTETS_GB);
+ if (rx_mask & BIT(10))
+ lp->mmc_counters.rxoversize_g +=
+ dwceqos_read(lp, DWC_MMC_RXOVERSIZE_G);
+ if (rx_mask & BIT(9))
+ lp->mmc_counters.rxundersize_g +=
+ dwceqos_read(lp, DWC_MMC_RXUNDERSIZE_G);
+ if (rx_mask & BIT(8))
+ lp->mmc_counters.rxjabbererror +=
+ dwceqos_read(lp, DWC_MMC_RXJABBERERROR);
+ if (rx_mask & BIT(7))
+ lp->mmc_counters.rxrunterror +=
+ dwceqos_read(lp, DWC_MMC_RXRUNTERROR);
+ if (rx_mask & BIT(6))
+ lp->mmc_counters.rxalignmenterror +=
+ dwceqos_read(lp, DWC_MMC_RXALIGNMENTERROR);
+ if (rx_mask & BIT(5))
+ lp->mmc_counters.rxcrcerror +=
+ dwceqos_read(lp, DWC_MMC_RXCRCERROR);
+ if (rx_mask & BIT(4))
+ lp->mmc_counters.rxmulticastpackets_g +=
+ dwceqos_read(lp, DWC_MMC_RXMULTICASTPACKETS_G);
+ if (rx_mask & BIT(3))
+ lp->mmc_counters.rxbroadcastpackets_g +=
+ dwceqos_read(lp, DWC_MMC_RXBROADCASTPACKETS_G);
+ if (rx_mask & BIT(2))
+ lp->mmc_counters.rxoctetcount_g +=
+ dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_G);
+ if (rx_mask & BIT(1))
+ lp->mmc_counters.rxoctetcount_gb +=
+ dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_GB);
+ if (rx_mask & BIT(0))
+ lp->mmc_counters.rxpacketcount_gb +=
+ dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB);
+}
+
+static struct rtnl_link_stats64*
+dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s)
+{
+ unsigned long flags;
+ struct net_local *lp = netdev_priv(ndev);
+ struct dwceqos_mmc_counters *hwstats = &lp->mmc_counters;
+
+ spin_lock_irqsave(&lp->stats_lock, flags);
+ dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
+ lp->mmc_tx_counters_mask);
+ spin_unlock_irqrestore(&lp->stats_lock, flags);
+
+ s->rx_packets = hwstats->rxpacketcount_gb;
+ s->rx_bytes = hwstats->rxoctetcount_gb;
+ s->rx_errors = hwstats->rxpacketcount_gb -
+ hwstats->rxbroadcastpackets_g -
+ hwstats->rxmulticastpackets_g -
+ hwstats->rxunicastpackets_g;
+ s->multicast = hwstats->rxmulticastpackets_g;
+ s->rx_length_errors = hwstats->rxlengtherror;
+ s->rx_crc_errors = hwstats->rxcrcerror;
+ s->rx_fifo_errors = hwstats->rxfifooverflow;
+
+ s->tx_packets = hwstats->txpacketcount_gb;
+ s->tx_bytes = hwstats->txoctetcount_gb;
+
+ if (lp->mmc_tx_counters_mask & BIT(21))
+ s->tx_errors = hwstats->txpacketcount_gb -
+ hwstats->txpacketcount_g;
+ else
+ s->tx_errors = hwstats->txunderflowerror +
+ hwstats->txcarriererror;
+
+ return s;
+}
+
+static int
+dwceqos_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phydev, ecmd);
+}
+
+static int
+dwceqos_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, ecmd);
+}
+
+static void
+dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed)
+{
+ const struct net_local *lp = netdev_priv(ndev);
+
+ strcpy(ed->driver, lp->pdev->dev.driver->name);
+ strcpy(ed->version, DRIVER_VERSION);
+}
+
+static void dwceqos_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pp)
+{
+ const struct net_local *lp = netdev_priv(ndev);
+
+ pp->autoneg = lp->flowcontrol.autoneg;
+ pp->tx_pause = lp->flowcontrol.tx;
+ pp->rx_pause = lp->flowcontrol.rx;
+}
+
+static int dwceqos_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pp)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ int ret = 0;
+
+ lp->flowcontrol.autoneg = pp->autoneg;
+ if (pp->autoneg) {
+ lp->phy_dev->advertising |= ADVERTISED_Pause;
+ lp->phy_dev->advertising |= ADVERTISED_Asym_Pause;
+ } else {
+ lp->phy_dev->advertising &= ~ADVERTISED_Pause;
+ lp->phy_dev->advertising &= ~ADVERTISED_Asym_Pause;
+ lp->flowcontrol.rx = pp->rx_pause;
+ lp->flowcontrol.tx = pp->tx_pause;
+ }
+
+ if (netif_running(ndev))
+ ret = phy_start_aneg(lp->phy_dev);
+
+ return ret;
+}
+
+static void dwceqos_get_strings(struct net_device *ndev, u32 stringset,
+ u8 *data)
+{
+ size_t i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
+ memcpy(data, dwceqos_ethtool_stats[i].stat_name,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+}
+
+static void dwceqos_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ unsigned long flags;
+ size_t i;
+ u8 *mmcstat = (u8 *)&lp->mmc_counters;
+
+ spin_lock_irqsave(&lp->stats_lock, flags);
+ dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
+ lp->mmc_tx_counters_mask);
+ spin_unlock_irqrestore(&lp->stats_lock, flags);
+
+ for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
+ memcpy(data,
+ mmcstat + dwceqos_ethtool_stats[i].offset,
+ sizeof(u64));
+ data++;
+ }
+}
+
+static int dwceqos_get_sset_count(struct net_device *ndev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ARRAY_SIZE(dwceqos_ethtool_stats);
+
+ return -EOPNOTSUPP;
+}
+
+static void dwceqos_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *space)
+{
+ const struct net_local *lp = netdev_priv(dev);
+ u32 *reg_space = (u32 *)space;
+ int reg_offset;
+ int reg_ix = 0;
+
+ /* MAC registers */
+ for (reg_offset = START_MAC_REG_OFFSET;
+ reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
+ reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+ reg_ix++;
+ }
+ /* MTL registers */
+ for (reg_offset = START_MTL_REG_OFFSET;
+ reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) {
+ reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+ reg_ix++;
+ }
+
+ /* DMA registers */
+ for (reg_offset = START_DMA_REG_OFFSET;
+ reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
+ reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+ reg_ix++;
+ }
+
+ BUG_ON(4 * reg_ix > REG_SPACE_SIZE);
+}
+
+static int dwceqos_get_regs_len(struct net_device *dev)
+{
+ return REG_SPACE_SIZE;
+}
+
+static inline const char *dwceqos_get_rx_lpi_state(u32 lpi_ctrl)
+{
+ return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST) ? "on" : "off";
+}
+
+static inline const char *dwceqos_get_tx_lpi_state(u32 lpi_ctrl)
+{
+ return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST) ? "on" : "off";
+}
+
+static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ u32 lpi_status;
+ u32 lpi_enabled;
+
+ if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
+ return -EOPNOTSUPP;
+
+ edata->eee_active = lp->eee_active;
+ edata->eee_enabled = lp->eee_enabled;
+ edata->tx_lpi_timer = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER);
+ lpi_status = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+ lpi_enabled = !!(lpi_status & DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA);
+ edata->tx_lpi_enabled = lpi_enabled;
+
+ if (netif_msg_hw(lp)) {
+ u32 regval;
+
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+
+ netdev_info(lp->ndev, "MAC LPI State: RX:%s TX:%s\n",
+ dwceqos_get_rx_lpi_state(regval),
+ dwceqos_get_tx_lpi_state(regval));
+ }
+
+ return phy_ethtool_get_eee(lp->phy_dev, edata);
+}
+
+static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ u32 regval;
+ unsigned long flags;
+
+ if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
+ return -EOPNOTSUPP;
+
+ if (edata->eee_enabled && !lp->eee_active)
+ return -EOPNOTSUPP;
+
+ if (edata->tx_lpi_enabled) {
+ if (edata->tx_lpi_timer < DWCEQOS_LPI_TIMER_MIN ||
+ edata->tx_lpi_timer > DWCEQOS_LPI_TIMER_MAX)
+ return -EINVAL;
+ }
+
+ lp->eee_enabled = edata->eee_enabled;
+
+ if (edata->eee_enabled && edata->tx_lpi_enabled) {
+ dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER,
+ edata->tx_lpi_timer);
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+ regval |= DWCEQOS_LPI_CTRL_ENABLE_EEE;
+ if (lp->en_tx_lpi_clockgating)
+ regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ } else {
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+ regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ }
+
+ return phy_ethtool_set_eee(lp->phy_dev, edata);
+}
+
+static u32 dwceqos_get_msglevel(struct net_device *ndev)
+{
+ const struct net_local *lp = netdev_priv(ndev);
+
+ return lp->msg_enable;
+}
+
+static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel)
+{
+ struct net_local *lp = netdev_priv(ndev);
+
+ lp->msg_enable = msglevel;
+}
+
+static struct ethtool_ops dwceqos_ethtool_ops = {
+ .get_settings = dwceqos_get_settings,
+ .set_settings = dwceqos_set_settings,
+ .get_drvinfo = dwceqos_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_pauseparam = dwceqos_get_pauseparam,
+ .set_pauseparam = dwceqos_set_pauseparam,
+ .get_strings = dwceqos_get_strings,
+ .get_ethtool_stats = dwceqos_get_ethtool_stats,
+ .get_sset_count = dwceqos_get_sset_count,
+ .get_regs = dwceqos_get_regs,
+ .get_regs_len = dwceqos_get_regs_len,
+ .get_eee = dwceqos_get_eee,
+ .set_eee = dwceqos_set_eee,
+ .get_msglevel = dwceqos_get_msglevel,
+ .set_msglevel = dwceqos_set_msglevel,
+};
+
+static struct net_device_ops netdev_ops = {
+ .ndo_open = dwceqos_open,
+ .ndo_stop = dwceqos_stop,
+ .ndo_start_xmit = dwceqos_start_xmit,
+ .ndo_set_rx_mode = dwceqos_set_rx_mode,
+ .ndo_set_mac_address = dwceqos_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = dwceqos_poll_controller,
+#endif
+ .ndo_do_ioctl = dwceqos_ioctl,
+ .ndo_tx_timeout = dwceqos_tx_timeout,
+ .ndo_get_stats64 = dwceqos_get_stats64,
+};
+
+static const struct of_device_id dwceq_of_match[] = {
+ { .compatible = "snps,dwc-qos-ethernet-4.10", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dwceq_of_match);
+
+static int dwceqos_probe(struct platform_device *pdev)
+{
+ struct resource *r_mem = NULL;
+ struct net_device *ndev;
+ struct net_local *lp;
+ int ret = -ENXIO;
+
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r_mem) {
+ dev_err(&pdev->dev, "no IO resource defined.\n");
+ return -ENXIO;
+ }
+
+ ndev = alloc_etherdev(sizeof(*lp));
+ if (!ndev) {
+ dev_err(&pdev->dev, "etherdev allocation failed.\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ lp = netdev_priv(ndev);
+ lp->ndev = ndev;
+ lp->pdev = pdev;
+ lp->msg_enable = netif_msg_init(debug, DWCEQOS_MSG_DEFAULT);
+
+ spin_lock_init(&lp->tx_lock);
+ spin_lock_init(&lp->hw_lock);
+ spin_lock_init(&lp->stats_lock);
+
+ lp->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
+ if (IS_ERR(lp->apb_pclk)) {
+ dev_err(&pdev->dev, "apb_pclk clock not found.\n");
+ ret = PTR_ERR(lp->apb_pclk);
+ goto err_out_free_netdev;
+ }
+
+ ret = clk_prepare_enable(lp->apb_pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable APER clock.\n");
+ goto err_out_free_netdev;
+ }
+
+ lp->baseaddr = devm_ioremap_resource(&pdev->dev, r_mem);
+ if (IS_ERR(lp->baseaddr)) {
+ dev_err(&pdev->dev, "failed to map baseaddress.\n");
+ ret = PTR_ERR(lp->baseaddr);
+ goto err_out_clk_dis_aper;
+ }
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ ndev->watchdog_timeo = DWCEQOS_TX_TIMEOUT * HZ;
+ ndev->netdev_ops = &netdev_ops;
+ ndev->ethtool_ops = &dwceqos_ethtool_ops;
+ ndev->base_addr = r_mem->start;
+
+ dwceqos_get_hwfeatures(lp);
+ dwceqos_mdio_set_csr(lp);
+
+ ndev->hw_features = NETIF_F_SG;
+
+ if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
+ ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+
+ if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_TXCOESEL)
+ ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+ if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_RXCOESEL)
+ ndev->hw_features |= NETIF_F_RXCSUM;
+
+ ndev->features = ndev->hw_features;
+
+ netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_clk_dis_aper;
+ }
+
+ lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
+ if (IS_ERR(lp->phy_ref_clk)) {
+ dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
+ ret = PTR_ERR(lp->phy_ref_clk);
+ goto err_out_unregister_netdev;
+ }
+
+ ret = clk_prepare_enable(lp->phy_ref_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable device clock.\n");
+ goto err_out_unregister_netdev;
+ }
+
+ lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
+ "phy-handle", 0);
+ if (!lp->phy_node && of_phy_is_fixed_link(lp->pdev->dev.of_node)) {
+ ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "invalid fixed-link");
+ goto err_out_unregister_netdev;
+ }
+
+ lp->phy_node = of_node_get(lp->pdev->dev.of_node);
+ }
+
+ ret = of_get_phy_mode(lp->pdev->dev.of_node);
+ if (ret < 0) {
+ dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
+ goto err_out_unregister_clk_notifier;
+ }
+
+ lp->phy_interface = ret;
+
+ ret = dwceqos_mii_init(lp);
+ if (ret) {
+ dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
+ goto err_out_unregister_clk_notifier;
+ }
+
+ ret = dwceqos_mii_probe(ndev);
+ if (ret != 0) {
+ netdev_err(ndev, "mii_probe fail.\n");
+ ret = -ENXIO;
+ goto err_out_unregister_clk_notifier;
+ }
+
+ dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+
+ tasklet_init(&lp->tx_bdreclaim_tasklet, dwceqos_tx_reclaim,
+ (unsigned long)ndev);
+ tasklet_disable(&lp->tx_bdreclaim_tasklet);
+
+ lp->txtimeout_handler_wq = create_singlethread_workqueue(DRIVER_NAME);
+ INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout);
+
+ platform_set_drvdata(pdev, ndev);
+ ret = dwceqos_probe_config_dt(pdev);
+ if (ret) {
+ dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
+ ret);
+ goto err_out_unregister_clk_notifier;
+ }
+ dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
+ pdev->id, ndev->base_addr, ndev->irq);
+
+ ret = devm_request_irq(&pdev->dev, ndev->irq, &dwceqos_interrupt, 0,
+ ndev->name, ndev);
+ if (ret) {
+ dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
+ ndev->irq, ret);
+ goto err_out_unregister_clk_notifier;
+ }
+
+ if (netif_msg_probe(lp))
+ netdev_dbg(ndev, "net_local@%p\n", lp);
+
+ return 0;
+
+err_out_unregister_clk_notifier:
+ clk_disable_unprepare(lp->phy_ref_clk);
+err_out_unregister_netdev:
+ unregister_netdev(ndev);
+err_out_clk_dis_aper:
+ clk_disable_unprepare(lp->apb_pclk);
+err_out_free_netdev:
+ if (lp->phy_node)
+ of_node_put(lp->phy_node);
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+static int dwceqos_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_local *lp;
+
+ if (ndev) {
+ lp = netdev_priv(ndev);
+
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ mdiobus_unregister(lp->mii_bus);
+ kfree(lp->mii_bus->irq);
+ mdiobus_free(lp->mii_bus);
+
+ unregister_netdev(ndev);
+
+ clk_disable_unprepare(lp->phy_ref_clk);
+ clk_disable_unprepare(lp->apb_pclk);
+
+ free_netdev(ndev);
+ }
+
+ return 0;
+}
+
+static struct platform_driver dwceqos_driver = {
+ .probe = dwceqos_probe,
+ .remove = dwceqos_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = dwceq_of_match,
+ },
+};
+
+module_platform_driver(dwceqos_driver);
+
+MODULE_DESCRIPTION("DWC Ethernet QoS v4.10a driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andreas Irestaal <andreas.irestal@axis.com>");
+MODULE_AUTHOR("Lars Persson <lars.persson@axis.com>");
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index dd9430043536..cba3d9fcb465 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -41,6 +41,8 @@
#include <linux/gpio.h>
#include <linux/atomic.h>
+#include <asm/mach-ar7/ar7.h>
+
MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 462820514fae..8fc90f1c872c 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -138,19 +138,6 @@ do { \
#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
-#define cpsw_enable_irq(priv) \
- do { \
- u32 i; \
- for (i = 0; i < priv->num_irqs; i++) \
- enable_irq(priv->irqs_table[i]); \
- } while (0)
-#define cpsw_disable_irq(priv) \
- do { \
- u32 i; \
- for (i = 0; i < priv->num_irqs; i++) \
- disable_irq_nosync(priv->irqs_table[i]); \
- } while (0)
-
#define cpsw_slave_index(priv) \
((priv->data.dual_emac) ? priv->emac_port : \
priv->data.active_slave)
@@ -378,7 +365,8 @@ struct cpsw_priv {
spinlock_t lock;
struct platform_device *pdev;
struct net_device *ndev;
- struct napi_struct napi;
+ struct napi_struct napi_rx;
+ struct napi_struct napi_tx;
struct device *dev;
struct cpsw_platform_data data;
struct cpsw_ss_regs __iomem *regs;
@@ -399,10 +387,12 @@ struct cpsw_priv {
struct cpsw_ale *ale;
bool rx_pause;
bool tx_pause;
+ bool quirk_irq;
+ bool rx_irq_disabled;
+ bool tx_irq_disabled;
/* snapshot of IRQ numbers */
u32 irqs_table[4];
u32 num_irqs;
- bool irq_enabled;
struct cpts *cpts;
u32 emac_port;
};
@@ -509,9 +499,11 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = {
(func)(slave++, ##arg); \
} while (0)
#define cpsw_get_slave_ndev(priv, __slave_no__) \
- (priv->slaves[__slave_no__].ndev)
+ ((__slave_no__ < priv->data.slaves) ? \
+ priv->slaves[__slave_no__].ndev : NULL)
#define cpsw_get_slave_priv(priv, __slave_no__) \
- ((priv->slaves[__slave_no__].ndev) ? \
+ (((__slave_no__ < priv->data.slaves) && \
+ (priv->slaves[__slave_no__].ndev)) ? \
netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \
#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \
@@ -763,13 +755,15 @@ static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
{
struct cpsw_priv *priv = dev_id;
+ writel(0, &priv->wr_regs->tx_en);
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
- cpdma_chan_process(priv->txch, 128);
- priv = cpsw_get_slave_priv(priv, 1);
- if (priv)
- cpdma_chan_process(priv->txch, 128);
+ if (priv->quirk_irq) {
+ disable_irq_nosync(priv->irqs_table[1]);
+ priv->tx_irq_disabled = true;
+ }
+ napi_schedule(&priv->napi_tx);
return IRQ_HANDLED;
}
@@ -778,52 +772,55 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
struct cpsw_priv *priv = dev_id;
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+ writel(0, &priv->wr_regs->rx_en);
- cpsw_intr_disable(priv);
- if (priv->irq_enabled == true) {
- cpsw_disable_irq(priv);
- priv->irq_enabled = false;
+ if (priv->quirk_irq) {
+ disable_irq_nosync(priv->irqs_table[0]);
+ priv->rx_irq_disabled = true;
}
- if (netif_running(priv->ndev)) {
- napi_schedule(&priv->napi);
- return IRQ_HANDLED;
- }
+ napi_schedule(&priv->napi_rx);
+ return IRQ_HANDLED;
+}
- priv = cpsw_get_slave_priv(priv, 1);
- if (!priv)
- return IRQ_NONE;
+static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct cpsw_priv *priv = napi_to_priv(napi_tx);
+ int num_tx;
- if (netif_running(priv->ndev)) {
- napi_schedule(&priv->napi);
- return IRQ_HANDLED;
+ num_tx = cpdma_chan_process(priv->txch, budget);
+ if (num_tx < budget) {
+ napi_complete(napi_tx);
+ writel(0xff, &priv->wr_regs->tx_en);
+ if (priv->quirk_irq && priv->tx_irq_disabled) {
+ priv->tx_irq_disabled = false;
+ enable_irq(priv->irqs_table[1]);
+ }
}
- return IRQ_NONE;
+
+ if (num_tx)
+ cpsw_dbg(priv, intr, "poll %d tx pkts\n", num_tx);
+
+ return num_tx;
}
-static int cpsw_poll(struct napi_struct *napi, int budget)
+static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
{
- struct cpsw_priv *priv = napi_to_priv(napi);
- int num_tx, num_rx;
-
- num_tx = cpdma_chan_process(priv->txch, 128);
+ struct cpsw_priv *priv = napi_to_priv(napi_rx);
+ int num_rx;
num_rx = cpdma_chan_process(priv->rxch, budget);
if (num_rx < budget) {
- struct cpsw_priv *prim_cpsw;
-
- napi_complete(napi);
- cpsw_intr_enable(priv);
- prim_cpsw = cpsw_get_slave_priv(priv, 0);
- if (prim_cpsw->irq_enabled == false) {
- prim_cpsw->irq_enabled = true;
- cpsw_enable_irq(priv);
+ napi_complete(napi_rx);
+ writel(0xff, &priv->wr_regs->rx_en);
+ if (priv->quirk_irq && priv->rx_irq_disabled) {
+ priv->rx_irq_disabled = false;
+ enable_irq(priv->irqs_table[0]);
}
}
- if (num_rx || num_tx)
- cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
- num_rx, num_tx);
+ if (num_rx)
+ cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx);
return num_rx;
}
@@ -1244,7 +1241,6 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
static int cpsw_ndo_open(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_priv *prim_cpsw;
int i, ret;
u32 reg;
@@ -1274,6 +1270,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
ALE_ALL_PORTS << priv->host_port, 0, 0);
if (!cpsw_common_res_usage_state(priv)) {
+ struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
+
/* setup tx dma to fixed prio and zero offset */
cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
@@ -1287,6 +1285,19 @@ static int cpsw_ndo_open(struct net_device *ndev)
/* Enable internal fifo flow control */
writel(0x7, &priv->regs->flow_control);
+ napi_enable(&priv_sl0->napi_rx);
+ napi_enable(&priv_sl0->napi_tx);
+
+ if (priv_sl0->tx_irq_disabled) {
+ priv_sl0->tx_irq_disabled = false;
+ enable_irq(priv->irqs_table[1]);
+ }
+
+ if (priv_sl0->rx_irq_disabled) {
+ priv_sl0->rx_irq_disabled = false;
+ enable_irq(priv->irqs_table[0]);
+ }
+
if (WARN_ON(!priv->data.rx_descs))
priv->data.rx_descs = 128;
@@ -1325,18 +1336,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
cpsw_set_coalesce(ndev, &coal);
}
- napi_enable(&priv->napi);
cpdma_ctlr_start(priv->dma);
cpsw_intr_enable(priv);
- prim_cpsw = cpsw_get_slave_priv(priv, 0);
- if (prim_cpsw->irq_enabled == false) {
- if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
- prim_cpsw->irq_enabled = true;
- cpsw_enable_irq(prim_cpsw);
- }
- }
-
if (priv->data.dual_emac)
priv->slaves[priv->emac_port].open_stat = true;
return 0;
@@ -1355,10 +1357,13 @@ static int cpsw_ndo_stop(struct net_device *ndev)
cpsw_info(priv, ifdown, "shutting down cpsw device\n");
netif_stop_queue(priv->ndev);
- napi_disable(&priv->napi);
netif_carrier_off(priv->ndev);
if (cpsw_common_res_usage_state(priv) <= 1) {
+ struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
+
+ napi_disable(&priv_sl0->napi_rx);
+ napi_disable(&priv_sl0->napi_tx);
cpts_unregister(priv->cpts);
cpsw_intr_disable(priv);
cpdma_ctlr_stop(priv->dma);
@@ -2141,7 +2146,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
- netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -2155,6 +2159,44 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
return ret;
}
+#define CPSW_QUIRK_IRQ BIT(0)
+
+static struct platform_device_id cpsw_devtype[] = {
+ {
+ /* keep it for existing comaptibles */
+ .name = "cpsw",
+ .driver_data = CPSW_QUIRK_IRQ,
+ }, {
+ .name = "am335x-cpsw",
+ .driver_data = CPSW_QUIRK_IRQ,
+ }, {
+ .name = "am4372-cpsw",
+ .driver_data = 0,
+ }, {
+ .name = "dra7-cpsw",
+ .driver_data = 0,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, cpsw_devtype);
+
+enum ti_cpsw_type {
+ CPSW = 0,
+ AM335X_CPSW,
+ AM4372_CPSW,
+ DRA7_CPSW,
+};
+
+static const struct of_device_id cpsw_of_mtable[] = {
+ { .compatible = "ti,cpsw", .data = &cpsw_devtype[CPSW], },
+ { .compatible = "ti,am335x-cpsw", .data = &cpsw_devtype[AM335X_CPSW], },
+ { .compatible = "ti,am4372-cpsw", .data = &cpsw_devtype[AM4372_CPSW], },
+ { .compatible = "ti,dra7-cpsw", .data = &cpsw_devtype[DRA7_CPSW], },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
+
static int cpsw_probe(struct platform_device *pdev)
{
struct cpsw_platform_data *data;
@@ -2164,6 +2206,7 @@ static int cpsw_probe(struct platform_device *pdev)
struct cpsw_ale_params ale_params;
void __iomem *ss_regs;
struct resource *res, *ss_res;
+ const struct of_device_id *of_id;
u32 slave_offset, sliver_offset, slave_size;
int ret = 0, i;
int irq;
@@ -2183,7 +2226,6 @@ static int cpsw_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
priv->rx_packet_max = max(rx_packet_max, 128);
priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
- priv->irq_enabled = true;
if (!priv->cpts) {
dev_err(&pdev->dev, "error allocating cpts\n");
ret = -ENOMEM;
@@ -2355,6 +2397,13 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_ale_ret;
}
+ of_id = of_match_device(cpsw_of_mtable, &pdev->dev);
+ if (of_id) {
+ pdev->id_entry = of_id->data;
+ if (pdev->id_entry->driver_data)
+ priv->quirk_irq = true;
+ }
+
/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
* MISC IRQs which are always kept disabled with this driver so
* we will not request them.
@@ -2394,7 +2443,8 @@ static int cpsw_probe(struct platform_device *pdev)
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
- netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -2518,12 +2568,6 @@ static int cpsw_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
-static const struct of_device_id cpsw_of_mtable[] = {
- { .compatible = "ti,cpsw", },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
-
static struct platform_driver cpsw_driver = {
.driver = {
.name = "cpsw",
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index aeebc0a7bf47..a21c77bc1b27 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -2004,8 +2004,10 @@ static int davinci_emac_probe(struct platform_device *pdev)
if (res_ctrl) {
priv->ctrl_base =
devm_ioremap_resource(&pdev->dev, res_ctrl);
- if (IS_ERR(priv->ctrl_base))
+ if (IS_ERR(priv->ctrl_base)) {
+ rc = PTR_ERR(priv->ctrl_base);
goto no_pdata;
+ }
} else {
priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
}
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index bbacf5cccec2..bb1bb72121c0 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -85,7 +85,6 @@ struct netcp_intf {
struct list_head rxhook_list_head;
unsigned int rx_queue_id;
void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
- u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
struct napi_struct rx_napi;
struct napi_struct tx_napi;
@@ -223,6 +222,7 @@ void *netcp_device_find_module(struct netcp_device *netcp_device,
/* SGMII functions */
int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port);
+bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set);
int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port);
int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 5ec4ed3f6c8d..1a5aca55ea9f 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -34,6 +34,7 @@
#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
#define NETCP_NAPI_WEIGHT 64
#define NETCP_TX_TIMEOUT (5 * HZ)
+#define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
#define NETCP_MIN_PACKET_SIZE ETH_ZLEN
#define NETCP_MAX_MCAST_ADDR 16
@@ -51,6 +52,8 @@
NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
NETIF_MSG_RX_STATUS)
+#define NETCP_EFUSE_ADDR_SWAP 2
+
#define knav_queue_get_id(q) knav_queue_device_control(q, \
KNAV_QUEUE_GET_ID, (unsigned long)NULL)
@@ -172,13 +175,22 @@ static void set_words(u32 *words, int num_words, u32 *desc)
}
/* Read the e-fuse value as 32 bit values to be endian independent */
-static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac)
+static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap)
{
unsigned int addr0, addr1;
addr1 = readl(efuse_mac + 4);
addr0 = readl(efuse_mac);
+ switch (swap) {
+ case NETCP_EFUSE_ADDR_SWAP:
+ addr0 = addr1;
+ addr1 = readl(efuse_mac);
+ break;
+ default:
+ break;
+ }
+
x[0] = (addr1 & 0x0000ff00) >> 8;
x[1] = addr1 & 0x000000ff;
x[2] = (addr0 & 0xff000000) >> 24;
@@ -804,30 +816,28 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
if (likely(fdq == 0)) {
unsigned int primary_buf_len;
/* Allocate a primary receive queue entry */
- buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET;
+ buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
primary_buf_len = SKB_DATA_ALIGN(buf_len) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- if (primary_buf_len <= PAGE_SIZE) {
- bufptr = netdev_alloc_frag(primary_buf_len);
- pad[1] = primary_buf_len;
- } else {
- bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
- GFP_DMA32 | __GFP_COLD);
- pad[1] = 0;
- }
+ bufptr = netdev_alloc_frag(primary_buf_len);
+ pad[1] = primary_buf_len;
if (unlikely(!bufptr)) {
- dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n");
+ dev_warn_ratelimited(netcp->ndev_dev,
+ "Primary RX buffer alloc failed\n");
goto fail;
}
dma = dma_map_single(netcp->dev, bufptr, buf_len,
DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(netcp->dev, dma)))
+ goto fail;
+
pad[0] = (u32)bufptr;
} else {
/* Allocate a secondary receive queue entry */
- page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD);
+ page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
if (unlikely(!page)) {
dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
goto fail;
@@ -1010,7 +1020,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
/* Map the linear buffer */
dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
- if (unlikely(!dma_addr)) {
+ if (unlikely(dma_mapping_error(dev, dma_addr))) {
dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
return NULL;
}
@@ -1546,8 +1556,8 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
knav_queue_disable_notify(netcp->rx_queue);
/* open Rx FDQs */
- for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
- netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) {
+ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
+ ++i) {
snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
@@ -1617,11 +1627,11 @@ static int netcp_ndo_open(struct net_device *ndev)
}
mutex_unlock(&netcp_modules_lock);
- netcp_rxpool_refill(netcp);
napi_enable(&netcp->rx_napi);
napi_enable(&netcp->tx_napi);
knav_queue_enable_notify(netcp->tx_compl_q);
knav_queue_enable_notify(netcp->rx_queue);
+ netcp_rxpool_refill(netcp);
netif_tx_wake_all_queues(ndev);
dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
return 0;
@@ -1902,7 +1912,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
goto quit;
}
- emac_arch_get_mac_addr(efuse_mac_addr, efuse);
+ emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac);
if (is_valid_ether_addr(efuse_mac_addr))
ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
else
@@ -1941,14 +1951,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
netcp->rx_queue_depths[0] = 128;
}
- ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
- netcp->rx_buffer_sizes,
- KNAV_DMA_FDQ_PER_CHAN);
- if (ret) {
- dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
- netcp->rx_buffer_sizes[0] = 1536;
- }
-
ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
if (ret < 0) {
dev_err(dev, "missing \"rx-pool\" parameter\n");
@@ -2112,6 +2114,7 @@ probe_quit:
static int netcp_remove(struct platform_device *pdev)
{
struct netcp_device *netcp_device = platform_get_drvdata(pdev);
+ struct netcp_intf *netcp_intf, *netcp_tmp;
struct netcp_inst_modpriv *inst_modpriv, *tmp;
struct netcp_module *module;
@@ -2123,10 +2126,17 @@ static int netcp_remove(struct platform_device *pdev)
list_del(&inst_modpriv->inst_list);
kfree(inst_modpriv);
}
- WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n",
- pdev->name);
- devm_kfree(&pdev->dev, netcp_device);
+ /* now that all modules are removed, clean up the interfaces */
+ list_for_each_entry_safe(netcp_intf, netcp_tmp,
+ &netcp_device->interface_head,
+ interface_list) {
+ netcp_delete_interface(netcp_device, netcp_intf->ndev);
+ }
+
+ WARN(!list_empty(&netcp_device->interface_head),
+ "%s interface list not empty!\n", pdev->name);
+
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
platform_set_drvdata(pdev, NULL);
@@ -2142,7 +2152,6 @@ MODULE_DEVICE_TABLE(of, of_match);
static struct platform_driver netcp_driver = {
.driver = {
.name = "netcp-1.0",
- .owner = THIS_MODULE,
.of_match_table = of_match,
},
.probe = netcp_probe,
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 9b7e0a34c98b..6f16d6aaf7b7 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -295,8 +295,6 @@ struct xgbe_hw_stats {
u32 rx_dma_overruns;
};
-#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
-
struct gbenu_ss_regs {
u32 id_ver;
u32 synce_count; /* NU */
@@ -480,7 +478,6 @@ struct gbenu_hw_stats {
u32 tx_pri7_drop_bcnt;
};
-#define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32))
#define GBENU_HW_STATS_REG_MAP_SZ 0x200
struct gbe_ss_regs {
@@ -615,7 +612,6 @@ struct gbe_hw_stats {
u32 rx_dma_overruns;
};
-#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
#define GBE_MAX_HW_STAT_MODS 9
#define GBE_HW_STATS_REG_MAP_SZ 0x100
@@ -646,6 +642,7 @@ struct gbe_priv {
bool enable_ale;
u8 max_num_slaves;
u8 max_num_ports; /* max_num_slaves + 1 */
+ u8 num_stats_mods;
struct netcp_tx_pipe tx_pipe;
int host_port;
@@ -675,6 +672,7 @@ struct gbe_priv {
struct net_device *dummy_ndev;
u64 *hw_stats;
+ u32 *hw_stats_prev;
const struct netcp_ethtool_stat *et_stats;
int num_et_stats;
/* Lock for updating the hwstats */
@@ -874,7 +872,7 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = {
};
/* This is the size of entries in GBENU_STATS_HOST */
-#define GBENU_ET_STATS_HOST_SIZE 33
+#define GBENU_ET_STATS_HOST_SIZE 52
#define GBENU_STATS_HOST(field) \
{ \
@@ -883,8 +881,8 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = {
offsetof(struct gbenu_hw_stats, field) \
}
-/* This is the size of entries in GBENU_STATS_HOST */
-#define GBENU_ET_STATS_PORT_SIZE 46
+/* This is the size of entries in GBENU_STATS_PORT */
+#define GBENU_ET_STATS_PORT_SIZE 65
#define GBENU_STATS_P1(field) \
{ \
@@ -976,7 +974,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_HOST(ale_unknown_mcast_bytes),
GBENU_STATS_HOST(ale_unknown_bcast),
GBENU_STATS_HOST(ale_unknown_bcast_bytes),
+ GBENU_STATS_HOST(ale_pol_match),
+ GBENU_STATS_HOST(ale_pol_match_red),
+ GBENU_STATS_HOST(ale_pol_match_yellow),
GBENU_STATS_HOST(tx_mem_protect_err),
+ GBENU_STATS_HOST(tx_pri0_drop),
+ GBENU_STATS_HOST(tx_pri1_drop),
+ GBENU_STATS_HOST(tx_pri2_drop),
+ GBENU_STATS_HOST(tx_pri3_drop),
+ GBENU_STATS_HOST(tx_pri4_drop),
+ GBENU_STATS_HOST(tx_pri5_drop),
+ GBENU_STATS_HOST(tx_pri6_drop),
+ GBENU_STATS_HOST(tx_pri7_drop),
+ GBENU_STATS_HOST(tx_pri0_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri1_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri2_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri3_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri4_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri5_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri6_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri7_drop_bcnt),
/* GBENU Module 1 */
GBENU_STATS_P1(rx_good_frames),
GBENU_STATS_P1(rx_broadcast_frames),
@@ -1023,7 +1040,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P1(ale_unknown_mcast_bytes),
GBENU_STATS_P1(ale_unknown_bcast),
GBENU_STATS_P1(ale_unknown_bcast_bytes),
+ GBENU_STATS_P1(ale_pol_match),
+ GBENU_STATS_P1(ale_pol_match_red),
+ GBENU_STATS_P1(ale_pol_match_yellow),
GBENU_STATS_P1(tx_mem_protect_err),
+ GBENU_STATS_P1(tx_pri0_drop),
+ GBENU_STATS_P1(tx_pri1_drop),
+ GBENU_STATS_P1(tx_pri2_drop),
+ GBENU_STATS_P1(tx_pri3_drop),
+ GBENU_STATS_P1(tx_pri4_drop),
+ GBENU_STATS_P1(tx_pri5_drop),
+ GBENU_STATS_P1(tx_pri6_drop),
+ GBENU_STATS_P1(tx_pri7_drop),
+ GBENU_STATS_P1(tx_pri0_drop_bcnt),
+ GBENU_STATS_P1(tx_pri1_drop_bcnt),
+ GBENU_STATS_P1(tx_pri2_drop_bcnt),
+ GBENU_STATS_P1(tx_pri3_drop_bcnt),
+ GBENU_STATS_P1(tx_pri4_drop_bcnt),
+ GBENU_STATS_P1(tx_pri5_drop_bcnt),
+ GBENU_STATS_P1(tx_pri6_drop_bcnt),
+ GBENU_STATS_P1(tx_pri7_drop_bcnt),
/* GBENU Module 2 */
GBENU_STATS_P2(rx_good_frames),
GBENU_STATS_P2(rx_broadcast_frames),
@@ -1070,7 +1106,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P2(ale_unknown_mcast_bytes),
GBENU_STATS_P2(ale_unknown_bcast),
GBENU_STATS_P2(ale_unknown_bcast_bytes),
+ GBENU_STATS_P2(ale_pol_match),
+ GBENU_STATS_P2(ale_pol_match_red),
+ GBENU_STATS_P2(ale_pol_match_yellow),
GBENU_STATS_P2(tx_mem_protect_err),
+ GBENU_STATS_P2(tx_pri0_drop),
+ GBENU_STATS_P2(tx_pri1_drop),
+ GBENU_STATS_P2(tx_pri2_drop),
+ GBENU_STATS_P2(tx_pri3_drop),
+ GBENU_STATS_P2(tx_pri4_drop),
+ GBENU_STATS_P2(tx_pri5_drop),
+ GBENU_STATS_P2(tx_pri6_drop),
+ GBENU_STATS_P2(tx_pri7_drop),
+ GBENU_STATS_P2(tx_pri0_drop_bcnt),
+ GBENU_STATS_P2(tx_pri1_drop_bcnt),
+ GBENU_STATS_P2(tx_pri2_drop_bcnt),
+ GBENU_STATS_P2(tx_pri3_drop_bcnt),
+ GBENU_STATS_P2(tx_pri4_drop_bcnt),
+ GBENU_STATS_P2(tx_pri5_drop_bcnt),
+ GBENU_STATS_P2(tx_pri6_drop_bcnt),
+ GBENU_STATS_P2(tx_pri7_drop_bcnt),
/* GBENU Module 3 */
GBENU_STATS_P3(rx_good_frames),
GBENU_STATS_P3(rx_broadcast_frames),
@@ -1117,7 +1172,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P3(ale_unknown_mcast_bytes),
GBENU_STATS_P3(ale_unknown_bcast),
GBENU_STATS_P3(ale_unknown_bcast_bytes),
+ GBENU_STATS_P3(ale_pol_match),
+ GBENU_STATS_P3(ale_pol_match_red),
+ GBENU_STATS_P3(ale_pol_match_yellow),
GBENU_STATS_P3(tx_mem_protect_err),
+ GBENU_STATS_P3(tx_pri0_drop),
+ GBENU_STATS_P3(tx_pri1_drop),
+ GBENU_STATS_P3(tx_pri2_drop),
+ GBENU_STATS_P3(tx_pri3_drop),
+ GBENU_STATS_P3(tx_pri4_drop),
+ GBENU_STATS_P3(tx_pri5_drop),
+ GBENU_STATS_P3(tx_pri6_drop),
+ GBENU_STATS_P3(tx_pri7_drop),
+ GBENU_STATS_P3(tx_pri0_drop_bcnt),
+ GBENU_STATS_P3(tx_pri1_drop_bcnt),
+ GBENU_STATS_P3(tx_pri2_drop_bcnt),
+ GBENU_STATS_P3(tx_pri3_drop_bcnt),
+ GBENU_STATS_P3(tx_pri4_drop_bcnt),
+ GBENU_STATS_P3(tx_pri5_drop_bcnt),
+ GBENU_STATS_P3(tx_pri6_drop_bcnt),
+ GBENU_STATS_P3(tx_pri7_drop_bcnt),
/* GBENU Module 4 */
GBENU_STATS_P4(rx_good_frames),
GBENU_STATS_P4(rx_broadcast_frames),
@@ -1164,7 +1238,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P4(ale_unknown_mcast_bytes),
GBENU_STATS_P4(ale_unknown_bcast),
GBENU_STATS_P4(ale_unknown_bcast_bytes),
+ GBENU_STATS_P4(ale_pol_match),
+ GBENU_STATS_P4(ale_pol_match_red),
+ GBENU_STATS_P4(ale_pol_match_yellow),
GBENU_STATS_P4(tx_mem_protect_err),
+ GBENU_STATS_P4(tx_pri0_drop),
+ GBENU_STATS_P4(tx_pri1_drop),
+ GBENU_STATS_P4(tx_pri2_drop),
+ GBENU_STATS_P4(tx_pri3_drop),
+ GBENU_STATS_P4(tx_pri4_drop),
+ GBENU_STATS_P4(tx_pri5_drop),
+ GBENU_STATS_P4(tx_pri6_drop),
+ GBENU_STATS_P4(tx_pri7_drop),
+ GBENU_STATS_P4(tx_pri0_drop_bcnt),
+ GBENU_STATS_P4(tx_pri1_drop_bcnt),
+ GBENU_STATS_P4(tx_pri2_drop_bcnt),
+ GBENU_STATS_P4(tx_pri3_drop_bcnt),
+ GBENU_STATS_P4(tx_pri4_drop_bcnt),
+ GBENU_STATS_P4(tx_pri5_drop_bcnt),
+ GBENU_STATS_P4(tx_pri6_drop_bcnt),
+ GBENU_STATS_P4(tx_pri7_drop_bcnt),
/* GBENU Module 5 */
GBENU_STATS_P5(rx_good_frames),
GBENU_STATS_P5(rx_broadcast_frames),
@@ -1211,7 +1304,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P5(ale_unknown_mcast_bytes),
GBENU_STATS_P5(ale_unknown_bcast),
GBENU_STATS_P5(ale_unknown_bcast_bytes),
+ GBENU_STATS_P5(ale_pol_match),
+ GBENU_STATS_P5(ale_pol_match_red),
+ GBENU_STATS_P5(ale_pol_match_yellow),
GBENU_STATS_P5(tx_mem_protect_err),
+ GBENU_STATS_P5(tx_pri0_drop),
+ GBENU_STATS_P5(tx_pri1_drop),
+ GBENU_STATS_P5(tx_pri2_drop),
+ GBENU_STATS_P5(tx_pri3_drop),
+ GBENU_STATS_P5(tx_pri4_drop),
+ GBENU_STATS_P5(tx_pri5_drop),
+ GBENU_STATS_P5(tx_pri6_drop),
+ GBENU_STATS_P5(tx_pri7_drop),
+ GBENU_STATS_P5(tx_pri0_drop_bcnt),
+ GBENU_STATS_P5(tx_pri1_drop_bcnt),
+ GBENU_STATS_P5(tx_pri2_drop_bcnt),
+ GBENU_STATS_P5(tx_pri3_drop_bcnt),
+ GBENU_STATS_P5(tx_pri4_drop_bcnt),
+ GBENU_STATS_P5(tx_pri5_drop_bcnt),
+ GBENU_STATS_P5(tx_pri6_drop_bcnt),
+ GBENU_STATS_P5(tx_pri7_drop_bcnt),
/* GBENU Module 6 */
GBENU_STATS_P6(rx_good_frames),
GBENU_STATS_P6(rx_broadcast_frames),
@@ -1258,7 +1370,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P6(ale_unknown_mcast_bytes),
GBENU_STATS_P6(ale_unknown_bcast),
GBENU_STATS_P6(ale_unknown_bcast_bytes),
+ GBENU_STATS_P6(ale_pol_match),
+ GBENU_STATS_P6(ale_pol_match_red),
+ GBENU_STATS_P6(ale_pol_match_yellow),
GBENU_STATS_P6(tx_mem_protect_err),
+ GBENU_STATS_P6(tx_pri0_drop),
+ GBENU_STATS_P6(tx_pri1_drop),
+ GBENU_STATS_P6(tx_pri2_drop),
+ GBENU_STATS_P6(tx_pri3_drop),
+ GBENU_STATS_P6(tx_pri4_drop),
+ GBENU_STATS_P6(tx_pri5_drop),
+ GBENU_STATS_P6(tx_pri6_drop),
+ GBENU_STATS_P6(tx_pri7_drop),
+ GBENU_STATS_P6(tx_pri0_drop_bcnt),
+ GBENU_STATS_P6(tx_pri1_drop_bcnt),
+ GBENU_STATS_P6(tx_pri2_drop_bcnt),
+ GBENU_STATS_P6(tx_pri3_drop_bcnt),
+ GBENU_STATS_P6(tx_pri4_drop_bcnt),
+ GBENU_STATS_P6(tx_pri5_drop_bcnt),
+ GBENU_STATS_P6(tx_pri6_drop_bcnt),
+ GBENU_STATS_P6(tx_pri7_drop_bcnt),
/* GBENU Module 7 */
GBENU_STATS_P7(rx_good_frames),
GBENU_STATS_P7(rx_broadcast_frames),
@@ -1305,7 +1436,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P7(ale_unknown_mcast_bytes),
GBENU_STATS_P7(ale_unknown_bcast),
GBENU_STATS_P7(ale_unknown_bcast_bytes),
+ GBENU_STATS_P7(ale_pol_match),
+ GBENU_STATS_P7(ale_pol_match_red),
+ GBENU_STATS_P7(ale_pol_match_yellow),
GBENU_STATS_P7(tx_mem_protect_err),
+ GBENU_STATS_P7(tx_pri0_drop),
+ GBENU_STATS_P7(tx_pri1_drop),
+ GBENU_STATS_P7(tx_pri2_drop),
+ GBENU_STATS_P7(tx_pri3_drop),
+ GBENU_STATS_P7(tx_pri4_drop),
+ GBENU_STATS_P7(tx_pri5_drop),
+ GBENU_STATS_P7(tx_pri6_drop),
+ GBENU_STATS_P7(tx_pri7_drop),
+ GBENU_STATS_P7(tx_pri0_drop_bcnt),
+ GBENU_STATS_P7(tx_pri1_drop_bcnt),
+ GBENU_STATS_P7(tx_pri2_drop_bcnt),
+ GBENU_STATS_P7(tx_pri3_drop_bcnt),
+ GBENU_STATS_P7(tx_pri4_drop_bcnt),
+ GBENU_STATS_P7(tx_pri5_drop_bcnt),
+ GBENU_STATS_P7(tx_pri6_drop_bcnt),
+ GBENU_STATS_P7(tx_pri7_drop_bcnt),
/* GBENU Module 8 */
GBENU_STATS_P8(rx_good_frames),
GBENU_STATS_P8(rx_broadcast_frames),
@@ -1352,7 +1502,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P8(ale_unknown_mcast_bytes),
GBENU_STATS_P8(ale_unknown_bcast),
GBENU_STATS_P8(ale_unknown_bcast_bytes),
+ GBENU_STATS_P8(ale_pol_match),
+ GBENU_STATS_P8(ale_pol_match_red),
+ GBENU_STATS_P8(ale_pol_match_yellow),
GBENU_STATS_P8(tx_mem_protect_err),
+ GBENU_STATS_P8(tx_pri0_drop),
+ GBENU_STATS_P8(tx_pri1_drop),
+ GBENU_STATS_P8(tx_pri2_drop),
+ GBENU_STATS_P8(tx_pri3_drop),
+ GBENU_STATS_P8(tx_pri4_drop),
+ GBENU_STATS_P8(tx_pri5_drop),
+ GBENU_STATS_P8(tx_pri6_drop),
+ GBENU_STATS_P8(tx_pri7_drop),
+ GBENU_STATS_P8(tx_pri0_drop_bcnt),
+ GBENU_STATS_P8(tx_pri1_drop_bcnt),
+ GBENU_STATS_P8(tx_pri2_drop_bcnt),
+ GBENU_STATS_P8(tx_pri3_drop_bcnt),
+ GBENU_STATS_P8(tx_pri4_drop_bcnt),
+ GBENU_STATS_P8(tx_pri5_drop_bcnt),
+ GBENU_STATS_P8(tx_pri6_drop_bcnt),
+ GBENU_STATS_P8(tx_pri7_drop_bcnt),
};
#define XGBE_STATS0_INFO(field) \
@@ -1554,70 +1723,97 @@ static int keystone_get_sset_count(struct net_device *ndev, int stringset)
}
}
-static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
+static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
+{
+ void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
+ u32 __iomem *p_stats_entry;
+ int i;
+
+ for (i = 0; i < gbe_dev->num_et_stats; i++) {
+ if (gbe_dev->et_stats[i].type == stats_mod) {
+ p_stats_entry = base + gbe_dev->et_stats[i].offset;
+ gbe_dev->hw_stats[i] = 0;
+ gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
+ }
+ }
+}
+
+static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
+ int et_stats_entry)
{
void __iomem *base = NULL;
- u32 __iomem *p;
- u32 tmp = 0;
+ u32 __iomem *p_stats_entry;
+ u32 curr, delta;
+
+ /* The hw_stats_regs pointers are already
+ * properly set to point to the right base:
+ */
+ base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
+ p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
+ curr = readl(p_stats_entry);
+ delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
+ gbe_dev->hw_stats_prev[et_stats_entry] = curr;
+ gbe_dev->hw_stats[et_stats_entry] += delta;
+}
+
+static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
+{
int i;
for (i = 0; i < gbe_dev->num_et_stats; i++) {
- base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type];
- p = base + gbe_dev->et_stats[i].offset;
- tmp = readl(p);
- gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp;
+ gbe_update_hw_stats_entry(gbe_dev, i);
+
if (data)
data[i] = gbe_dev->hw_stats[i];
- /* write-to-decrement:
- * new register value = old register value - write value
- */
- writel(tmp, p);
}
}
-static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
+static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
+ int stats_mod)
{
- void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0];
- void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1];
- u64 *hw_stats = &gbe_dev->hw_stats[0];
- void __iomem *base = NULL;
- u32 __iomem *p;
- u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2);
- int i, j, pair;
+ u32 val;
- for (pair = 0; pair < 2; pair++) {
- val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+ val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
- if (pair == 0)
- val &= ~GBE_STATS_CD_SEL;
- else
- val |= GBE_STATS_CD_SEL;
+ switch (stats_mod) {
+ case GBE_STATSA_MODULE:
+ case GBE_STATSB_MODULE:
+ val &= ~GBE_STATS_CD_SEL;
+ break;
+ case GBE_STATSC_MODULE:
+ case GBE_STATSD_MODULE:
+ val |= GBE_STATS_CD_SEL;
+ break;
+ default:
+ return;
+ }
- /* make the stat modules visible */
- writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+ /* make the stat module visible */
+ writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+}
- for (i = 0; i < pair_size; i++) {
- j = pair * pair_size + i;
- switch (gbe_dev->et_stats[j].type) {
- case GBE_STATSA_MODULE:
- case GBE_STATSC_MODULE:
- base = gbe_statsa;
- break;
- case GBE_STATSB_MODULE:
- case GBE_STATSD_MODULE:
- base = gbe_statsb;
- break;
- }
+static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
+{
+ gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
+ gbe_reset_mod_stats(gbe_dev, stats_mod);
+}
+
+static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
+{
+ u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
+ int et_entry, j, pair;
+
+ for (pair = 0; pair < 2; pair++) {
+ gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
+ GBE_STATSC_MODULE :
+ GBE_STATSA_MODULE));
+
+ for (j = 0; j < half_num_et_stats; j++) {
+ et_entry = pair * half_num_et_stats + j;
+ gbe_update_hw_stats_entry(gbe_dev, et_entry);
- p = base + gbe_dev->et_stats[j].offset;
- tmp = readl(p);
- hw_stats[j] += tmp;
if (data)
- data[j] = hw_stats[j];
- /* write-to-decrement:
- * new register value = old register value - write value
- */
- writel(tmp, p);
+ data[et_entry] = gbe_dev->hw_stats[et_entry];
}
}
}
@@ -1901,11 +2097,28 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
}
+static void gbe_sgmii_rtreset(struct gbe_priv *priv,
+ struct gbe_slave *slave, bool set)
+{
+ void __iomem *sgmii_port_regs;
+
+ if (SLAVE_LINK_IS_XGMII(slave))
+ return;
+
+ if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
+ sgmii_port_regs = priv->sgmii_port34_regs;
+ else
+ sgmii_port_regs = priv->sgmii_port_regs;
+
+ netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set);
+}
+
static void gbe_slave_stop(struct gbe_intf *intf)
{
struct gbe_priv *gbe_dev = intf->gbe_dev;
struct gbe_slave *slave = intf->slave;
+ gbe_sgmii_rtreset(gbe_dev, slave, true);
gbe_port_reset(slave);
/* Disable forwarding */
cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
@@ -1947,6 +2160,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
gbe_sgmii_config(priv, slave);
gbe_port_reset(slave);
+ gbe_sgmii_rtreset(priv, slave, false);
gbe_port_config(priv, slave, priv->rx_packet_max);
gbe_set_slave_mac(slave, gbe_intf);
/* enable forwarding */
@@ -2189,14 +2403,15 @@ static void netcp_ethss_timer(unsigned long arg)
netcp_ethss_update_link_state(gbe_dev, slave, NULL);
}
- spin_lock_bh(&gbe_dev->hw_stats_lock);
+ /* A timer runs as a BH, no need to block them */
+ spin_lock(&gbe_dev->hw_stats_lock);
if (gbe_dev->ss_version == GBE_SS_VERSION_14)
gbe_update_stats_ver14(gbe_dev, NULL);
else
gbe_update_stats(gbe_dev, NULL);
- spin_unlock_bh(&gbe_dev->hw_stats_lock);
+ spin_unlock(&gbe_dev->hw_stats_lock);
gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
add_timer(&gbe_dev->timer);
@@ -2490,10 +2705,9 @@ static void free_secondary_ports(struct gbe_priv *gbe_dev)
{
struct gbe_slave *slave;
- for (;;) {
+ while (!list_empty(&gbe_dev->secondary_slaves)) {
slave = first_sec_slave(gbe_dev);
- if (!slave)
- break;
+
if (slave->phy)
phy_disconnect(slave->phy);
list_del(&slave->slave_list);
@@ -2554,15 +2768,28 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
}
gbe_dev->xgbe_serdes_regs = regs;
+ gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
+ gbe_dev->et_stats = xgbe10_et_stats;
+ gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
+
gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
- XGBE10_NUM_STAT_ENTRIES *
- (gbe_dev->max_num_ports) * sizeof(u64),
- GFP_KERNEL);
+ gbe_dev->num_et_stats * sizeof(u64),
+ GFP_KERNEL);
if (!gbe_dev->hw_stats) {
dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
return -ENOMEM;
}
+ gbe_dev->hw_stats_prev =
+ devm_kzalloc(gbe_dev->dev,
+ gbe_dev->num_et_stats * sizeof(u32),
+ GFP_KERNEL);
+ if (!gbe_dev->hw_stats_prev) {
+ dev_err(gbe_dev->dev,
+ "hw_stats_prev memory allocation failed\n");
+ return -ENOMEM;
+ }
+
gbe_dev->ss_version = XGBE_SS_VERSION_10;
gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
XGBE10_SGMII_MODULE_OFFSET;
@@ -2576,8 +2803,6 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
gbe_dev->ale_ports = gbe_dev->max_num_ports;
gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
- gbe_dev->et_stats = xgbe10_et_stats;
- gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
/* Subsystem registers */
@@ -2662,30 +2887,45 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
}
gbe_dev->switch_regs = regs;
+ gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
+ gbe_dev->et_stats = gbe13_et_stats;
+ gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
+
gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
- GBE13_NUM_HW_STAT_ENTRIES *
- gbe_dev->max_num_slaves * sizeof(u64),
- GFP_KERNEL);
+ gbe_dev->num_et_stats * sizeof(u64),
+ GFP_KERNEL);
if (!gbe_dev->hw_stats) {
dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
return -ENOMEM;
}
+ gbe_dev->hw_stats_prev =
+ devm_kzalloc(gbe_dev->dev,
+ gbe_dev->num_et_stats * sizeof(u32),
+ GFP_KERNEL);
+ if (!gbe_dev->hw_stats_prev) {
+ dev_err(gbe_dev->dev,
+ "hw_stats_prev memory allocation failed\n");
+ return -ENOMEM;
+ }
+
gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
+ /* K2HK has only 2 hw stats modules visible at a time, so
+ * module 0 & 2 points to one base and
+ * module 1 & 3 points to the other base
+ */
for (i = 0; i < gbe_dev->max_num_slaves; i++) {
gbe_dev->hw_stats_regs[i] =
gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
- (GBE_HW_STATS_REG_MAP_SZ * i);
+ (GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
}
gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
gbe_dev->ale_ports = gbe_dev->max_num_ports;
gbe_dev->host_port = GBE13_HOST_PORT_NUM;
gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
- gbe_dev->et_stats = gbe13_et_stats;
- gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
/* Subsystem registers */
@@ -2712,15 +2952,34 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
void __iomem *regs;
int i, ret;
+ gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
+ gbe_dev->et_stats = gbenu_et_stats;
+
+ if (IS_SS_ID_NU(gbe_dev))
+ gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+ (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
+ else
+ gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+ GBENU_ET_STATS_PORT_SIZE;
+
gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
- GBENU_NUM_HW_STAT_ENTRIES *
- (gbe_dev->max_num_ports) * sizeof(u64),
- GFP_KERNEL);
+ gbe_dev->num_et_stats * sizeof(u64),
+ GFP_KERNEL);
if (!gbe_dev->hw_stats) {
dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
return -ENOMEM;
}
+ gbe_dev->hw_stats_prev =
+ devm_kzalloc(gbe_dev->dev,
+ gbe_dev->num_et_stats * sizeof(u32),
+ GFP_KERNEL);
+ if (!gbe_dev->hw_stats_prev) {
+ dev_err(gbe_dev->dev,
+ "hw_stats_prev memory allocation failed\n");
+ return -ENOMEM;
+ }
+
ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
if (ret) {
dev_err(gbe_dev->dev,
@@ -2748,16 +3007,8 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
gbe_dev->ale_ports = gbe_dev->max_num_ports;
gbe_dev->host_port = GBENU_HOST_PORT_NUM;
gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
- gbe_dev->et_stats = gbenu_et_stats;
gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
- if (IS_SS_ID_NU(gbe_dev))
- gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
- (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
- else
- gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
- GBENU_ET_STATS_PORT_SIZE;
-
/* Subsystem registers */
GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
@@ -2787,7 +3038,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
struct cpsw_ale_params ale_params;
struct gbe_priv *gbe_dev;
u32 slave_num;
- int ret = 0;
+ int i, ret = 0;
if (!node) {
dev_err(dev, "device tree info unavailable\n");
@@ -2839,14 +3090,13 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
&gbe_dev->dma_chan_name);
if (ret < 0) {
dev_err(dev, "missing \"tx-channel\" parameter\n");
- ret = -ENODEV;
- goto quit;
+ return -EINVAL;
}
if (!strcmp(node->name, "gbe")) {
ret = get_gbe_resource_version(gbe_dev, node);
if (ret)
- goto quit;
+ return ret;
dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
@@ -2857,22 +3107,20 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
else
ret = -ENODEV;
- if (ret)
- goto quit;
} else if (!strcmp(node->name, "xgbe")) {
ret = set_xgbe_ethss10_priv(gbe_dev, node);
if (ret)
- goto quit;
+ return ret;
ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
gbe_dev->ss_regs);
- if (ret)
- goto quit;
} else {
dev_err(dev, "unknown GBE node(%s)\n", node->name);
ret = -ENODEV;
- goto quit;
}
+ if (ret)
+ return ret;
+
interfaces = of_get_child_by_name(node, "interfaces");
if (!interfaces)
dev_err(dev, "could not find interfaces\n");
@@ -2880,11 +3128,11 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
if (ret)
- goto quit;
+ return ret;
ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
if (ret)
- goto quit;
+ return ret;
/* Create network interfaces */
INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
@@ -2899,6 +3147,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
break;
}
+ of_node_put(interfaces);
if (!gbe_dev->num_slaves)
dev_warn(dev, "No network interface configured\n");
@@ -2911,9 +3160,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
of_node_put(secondary_ports);
if (!gbe_dev->num_slaves) {
- dev_err(dev, "No network interface or secondary ports configured\n");
+ dev_err(dev,
+ "No network interface or secondary ports configured\n");
ret = -ENODEV;
- goto quit;
+ goto free_sec_ports;
}
memset(&ale_params, 0, sizeof(ale_params));
@@ -2927,7 +3177,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
if (!gbe_dev->ale) {
dev_err(gbe_dev->dev, "error initializing ale engine\n");
ret = -ENODEV;
- goto quit;
+ goto free_sec_ports;
} else {
dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
}
@@ -2935,6 +3185,15 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
/* initialize host port */
gbe_init_host_port(gbe_dev);
+ spin_lock_bh(&gbe_dev->hw_stats_lock);
+ for (i = 0; i < gbe_dev->num_stats_mods; i++) {
+ if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+ gbe_reset_mod_stats_ver14(gbe_dev, i);
+ else
+ gbe_reset_mod_stats(gbe_dev, i);
+ }
+ spin_unlock_bh(&gbe_dev->hw_stats_lock);
+
init_timer(&gbe_dev->timer);
gbe_dev->timer.data = (unsigned long)gbe_dev;
gbe_dev->timer.function = netcp_ethss_timer;
@@ -2943,14 +3202,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
*inst_priv = gbe_dev;
return 0;
-quit:
- if (gbe_dev->hw_stats)
- devm_kfree(dev, gbe_dev->hw_stats);
- cpsw_ale_destroy(gbe_dev->ale);
- if (gbe_dev->ss_regs)
- devm_iounmap(dev, gbe_dev->ss_regs);
- of_node_put(interfaces);
- devm_kfree(dev, gbe_dev);
+free_sec_ports:
+ free_secondary_ports(gbe_dev);
return ret;
}
@@ -3023,12 +3276,9 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
free_secondary_ports(gbe_dev);
if (!list_empty(&gbe_dev->gbe_intf_head))
- dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n");
+ dev_alert(gbe_dev->dev,
+ "unreleased ethss interfaces present\n");
- devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
- devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
- memset(gbe_dev, 0x00, sizeof(*gbe_dev));
- devm_kfree(gbe_dev->dev, gbe_dev);
return 0;
}
diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c
index dbeb14266e2f..5d8419f658d0 100644
--- a/drivers/net/ethernet/ti/netcp_sgmii.c
+++ b/drivers/net/ethernet/ti/netcp_sgmii.c
@@ -18,6 +18,9 @@
#include "netcp.h"
+#define SGMII_SRESET_RESET BIT(0)
+#define SGMII_SRESET_RTRESET BIT(1)
+
#define SGMII_REG_STATUS_LOCK BIT(4)
#define SGMII_REG_STATUS_LINK BIT(0)
#define SGMII_REG_STATUS_AUTONEG BIT(2)
@@ -51,12 +54,35 @@ static void sgmii_write_reg_bit(void __iomem *base, int reg, u32 val)
int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port)
{
/* Soft reset */
- sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port), 0x1);
- while (sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) != 0x0)
+ sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port),
+ SGMII_SRESET_RESET);
+
+ while ((sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) &
+ SGMII_SRESET_RESET) != 0x0)
;
+
return 0;
}
+/* port is 0 based */
+bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set)
+{
+ u32 reg;
+ bool oldval;
+
+ /* Initiate a soft reset */
+ reg = sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port));
+ oldval = (reg & SGMII_SRESET_RTRESET) != 0x0;
+ if (set)
+ reg |= SGMII_SRESET_RTRESET;
+ else
+ reg &= ~SGMII_SRESET_RTRESET;
+ sgmii_write_reg(sgmii_ofs, SGMII_SRESET_REG(port), reg);
+ wmb();
+
+ return oldval;
+}
+
int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port)
{
u32 status = 0, link = 0;
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index a3f7610002aa..0a15acc075b3 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -40,6 +40,7 @@
#include <linux/tcp.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
+#include <linux/tick.h>
#include <asm/checksum.h>
#include <asm/homecache.h>
@@ -2273,7 +2274,8 @@ static int __init tile_net_init_module(void)
tile_net_dev_init(name, mac);
if (!network_cpus_init())
- network_cpus_map = *cpu_online_mask;
+ cpumask_and(&network_cpus_map, housekeeping_cpumask(),
+ cpu_online_mask);
return 0;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 4208dd7ef101..d95f9aae95e7 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1530,9 +1530,9 @@ static int axienet_probe(struct platform_device *pdev)
/* Map device registers */
ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
- if (!lp->regs) {
+ if (IS_ERR(lp->regs)) {
dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(lp->regs);
goto free_netdev;
}
@@ -1599,9 +1599,9 @@ static int axienet_probe(struct platform_device *pdev)
goto free_netdev;
}
lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
- if (!lp->dma_regs) {
+ if (IS_ERR(lp->dma_regs)) {
dev_err(&pdev->dev, "could not map DMA regs\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(lp->dma_regs);
goto free_netdev;
}
lp->rx_irq = irq_of_parse_and_map(np, 1);
diff --git a/drivers/net/fddi/skfp/h/hwmtm.h b/drivers/net/fddi/skfp/h/hwmtm.h
index 5924d4219e9e..4ca2341d7f06 100644
--- a/drivers/net/fddi/skfp/h/hwmtm.h
+++ b/drivers/net/fddi/skfp/h/hwmtm.h
@@ -74,15 +74,6 @@
#define NULL 0
#endif
-#ifdef LITTLE_ENDIAN
-#define HWM_REVERSE(x) (x)
-#else
-#define HWM_REVERSE(x) ((((x)<<24L)&0xff000000L) + \
- (((x)<< 8L)&0x00ff0000L) + \
- (((x)>> 8L)&0x0000ff00L) + \
- (((x)>>24L)&0x000000ffL))
-#endif
-
#define C_INDIC (1L<<25)
#define A_INDIC (1L<<26)
#define RD_FS_LOCAL 0x80
diff --git a/drivers/net/fjes/Makefile b/drivers/net/fjes/Makefile
new file mode 100644
index 000000000000..523e3d7cf7aa
--- /dev/null
+++ b/drivers/net/fjes/Makefile
@@ -0,0 +1,30 @@
+################################################################################
+#
+# FUJITSU Extended Socket Network Device driver
+# Copyright (c) 2015 FUJITSU LIMITED
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+################################################################################
+
+
+#
+# Makefile for the FUJITSU Extended Socket network device driver
+#
+
+obj-$(CONFIG_FUJITSU_ES) += fjes.o
+
+fjes-objs := fjes_main.o fjes_hw.o fjes_ethtool.o
diff --git a/drivers/net/fjes/fjes.h b/drivers/net/fjes/fjes.h
new file mode 100644
index 000000000000..a592fe21c698
--- /dev/null
+++ b/drivers/net/fjes/fjes.h
@@ -0,0 +1,77 @@
+/*
+ * FUJITSU Extended Socket Network Device driver
+ * Copyright (c) 2015 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#ifndef FJES_H_
+#define FJES_H_
+
+#include <linux/acpi.h>
+
+#include "fjes_hw.h"
+
+#define FJES_ACPI_SYMBOL "Extended Socket"
+#define FJES_MAX_QUEUES 1
+#define FJES_TX_RETRY_INTERVAL (20 * HZ)
+#define FJES_TX_RETRY_TIMEOUT (100)
+#define FJES_TX_TX_STALL_TIMEOUT (FJES_TX_RETRY_INTERVAL / 2)
+#define FJES_OPEN_ZONE_UPDATE_WAIT (300) /* msec */
+#define FJES_IRQ_WATCH_DELAY (HZ)
+
+/* board specific private data structure */
+struct fjes_adapter {
+ struct net_device *netdev;
+ struct platform_device *plat_dev;
+
+ struct napi_struct napi;
+ struct rtnl_link_stats64 stats64;
+
+ unsigned int tx_retry_count;
+ unsigned long tx_start_jiffies;
+ unsigned long rx_last_jiffies;
+ bool unset_rx_last;
+
+ struct work_struct force_close_task;
+ bool force_reset;
+ bool open_guard;
+
+ bool irq_registered;
+
+ struct workqueue_struct *txrx_wq;
+ struct workqueue_struct *control_wq;
+
+ struct work_struct tx_stall_task;
+ struct work_struct raise_intr_rxdata_task;
+
+ struct work_struct unshare_watch_task;
+ unsigned long unshare_watch_bitmask;
+
+ struct delayed_work interrupt_watch_task;
+ bool interrupt_watch_enable;
+
+ struct fjes_hw hw;
+};
+
+extern char fjes_driver_name[];
+extern char fjes_driver_version[];
+extern const u32 fjes_support_mtu[];
+
+void fjes_set_ethtool_ops(struct net_device *);
+
+#endif /* FJES_H_ */
diff --git a/drivers/net/fjes/fjes_ethtool.c b/drivers/net/fjes/fjes_ethtool.c
new file mode 100644
index 000000000000..0119dd199276
--- /dev/null
+++ b/drivers/net/fjes/fjes_ethtool.c
@@ -0,0 +1,137 @@
+/*
+ * FUJITSU Extended Socket Network Device driver
+ * Copyright (c) 2015 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+/* ethtool support for fjes */
+
+#include <linux/vmalloc.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+
+#include "fjes.h"
+
+struct fjes_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define FJES_STAT(name, stat) { \
+ .stat_string = name, \
+ .sizeof_stat = FIELD_SIZEOF(struct fjes_adapter, stat), \
+ .stat_offset = offsetof(struct fjes_adapter, stat) \
+}
+
+static const struct fjes_stats fjes_gstrings_stats[] = {
+ FJES_STAT("rx_packets", stats64.rx_packets),
+ FJES_STAT("tx_packets", stats64.tx_packets),
+ FJES_STAT("rx_bytes", stats64.rx_bytes),
+ FJES_STAT("tx_bytes", stats64.rx_bytes),
+ FJES_STAT("rx_dropped", stats64.rx_dropped),
+ FJES_STAT("tx_dropped", stats64.tx_dropped),
+};
+
+static void fjes_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct fjes_adapter *adapter = netdev_priv(netdev);
+ char *p;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fjes_gstrings_stats); i++) {
+ p = (char *)adapter + fjes_gstrings_stats[i].stat_offset;
+ data[i] = (fjes_gstrings_stats[i].sizeof_stat == sizeof(u64))
+ ? *(u64 *)p : *(u32 *)p;
+ }
+}
+
+static void fjes_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(fjes_gstrings_stats); i++) {
+ memcpy(p, fjes_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int fjes_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(fjes_gstrings_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void fjes_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct fjes_adapter *adapter = netdev_priv(netdev);
+ struct platform_device *plat_dev;
+
+ plat_dev = adapter->plat_dev;
+
+ strlcpy(drvinfo->driver, fjes_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, fjes_driver_version,
+ sizeof(drvinfo->version));
+
+ strlcpy(drvinfo->fw_version, "none", sizeof(drvinfo->fw_version));
+ snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
+ "platform:%s", plat_dev->name);
+ drvinfo->regdump_len = 0;
+ drvinfo->eedump_len = 0;
+}
+
+static int fjes_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ ecmd->supported = 0;
+ ecmd->advertising = 0;
+ ecmd->duplex = DUPLEX_FULL;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_DUMMY1;
+ ecmd->port = PORT_NONE;
+ ethtool_cmd_speed_set(ecmd, 20000); /* 20Gb/s */
+
+ return 0;
+}
+
+static const struct ethtool_ops fjes_ethtool_ops = {
+ .get_settings = fjes_get_settings,
+ .get_drvinfo = fjes_get_drvinfo,
+ .get_ethtool_stats = fjes_get_ethtool_stats,
+ .get_strings = fjes_get_strings,
+ .get_sset_count = fjes_get_sset_count,
+};
+
+void fjes_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &fjes_ethtool_ops;
+}
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
new file mode 100644
index 000000000000..b5f4a78da828
--- /dev/null
+++ b/drivers/net/fjes/fjes_hw.c
@@ -0,0 +1,1125 @@
+/*
+ * FUJITSU Extended Socket Network Device driver
+ * Copyright (c) 2015 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include "fjes_hw.h"
+#include "fjes.h"
+
+static void fjes_hw_update_zone_task(struct work_struct *);
+static void fjes_hw_epstop_task(struct work_struct *);
+
+/* supported MTU list */
+const u32 fjes_support_mtu[] = {
+ FJES_MTU_DEFINE(8 * 1024),
+ FJES_MTU_DEFINE(16 * 1024),
+ FJES_MTU_DEFINE(32 * 1024),
+ FJES_MTU_DEFINE(64 * 1024),
+ 0
+};
+
+u32 fjes_hw_rd32(struct fjes_hw *hw, u32 reg)
+{
+ u8 *base = hw->base;
+ u32 value = 0;
+
+ value = readl(&base[reg]);
+
+ return value;
+}
+
+static u8 *fjes_hw_iomap(struct fjes_hw *hw)
+{
+ u8 *base;
+
+ if (!request_mem_region(hw->hw_res.start, hw->hw_res.size,
+ fjes_driver_name)) {
+ pr_err("request_mem_region failed\n");
+ return NULL;
+ }
+
+ base = (u8 *)ioremap_nocache(hw->hw_res.start, hw->hw_res.size);
+
+ return base;
+}
+
+static void fjes_hw_iounmap(struct fjes_hw *hw)
+{
+ iounmap(hw->base);
+ release_mem_region(hw->hw_res.start, hw->hw_res.size);
+}
+
+int fjes_hw_reset(struct fjes_hw *hw)
+{
+ union REG_DCTL dctl;
+ int timeout;
+
+ dctl.reg = 0;
+ dctl.bits.reset = 1;
+ wr32(XSCT_DCTL, dctl.reg);
+
+ timeout = FJES_DEVICE_RESET_TIMEOUT * 1000;
+ dctl.reg = rd32(XSCT_DCTL);
+ while ((dctl.bits.reset == 1) && (timeout > 0)) {
+ msleep(1000);
+ dctl.reg = rd32(XSCT_DCTL);
+ timeout -= 1000;
+ }
+
+ return timeout > 0 ? 0 : -EIO;
+}
+
+static int fjes_hw_get_max_epid(struct fjes_hw *hw)
+{
+ union REG_MAX_EP info;
+
+ info.reg = rd32(XSCT_MAX_EP);
+
+ return info.bits.maxep;
+}
+
+static int fjes_hw_get_my_epid(struct fjes_hw *hw)
+{
+ union REG_OWNER_EPID info;
+
+ info.reg = rd32(XSCT_OWNER_EPID);
+
+ return info.bits.epid;
+}
+
+static int fjes_hw_alloc_shared_status_region(struct fjes_hw *hw)
+{
+ size_t size;
+
+ size = sizeof(struct fjes_device_shared_info) +
+ (sizeof(u8) * hw->max_epid);
+ hw->hw_info.share = kzalloc(size, GFP_KERNEL);
+ if (!hw->hw_info.share)
+ return -ENOMEM;
+
+ hw->hw_info.share->epnum = hw->max_epid;
+
+ return 0;
+}
+
+static void fjes_hw_free_shared_status_region(struct fjes_hw *hw)
+{
+ kfree(hw->hw_info.share);
+ hw->hw_info.share = NULL;
+}
+
+static int fjes_hw_alloc_epbuf(struct epbuf_handler *epbh)
+{
+ void *mem;
+
+ mem = vzalloc(EP_BUFFER_SIZE);
+ if (!mem)
+ return -ENOMEM;
+
+ epbh->buffer = mem;
+ epbh->size = EP_BUFFER_SIZE;
+
+ epbh->info = (union ep_buffer_info *)mem;
+ epbh->ring = (u8 *)(mem + sizeof(union ep_buffer_info));
+
+ return 0;
+}
+
+static void fjes_hw_free_epbuf(struct epbuf_handler *epbh)
+{
+ if (epbh->buffer)
+ vfree(epbh->buffer);
+
+ epbh->buffer = NULL;
+ epbh->size = 0;
+
+ epbh->info = NULL;
+ epbh->ring = NULL;
+}
+
+void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, u8 *mac_addr, u32 mtu)
+{
+ union ep_buffer_info *info = epbh->info;
+ u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX];
+ int i;
+
+ for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
+ vlan_id[i] = info->v1i.vlan_id[i];
+
+ memset(info, 0, sizeof(union ep_buffer_info));
+
+ info->v1i.version = 0; /* version 0 */
+
+ for (i = 0; i < ETH_ALEN; i++)
+ info->v1i.mac_addr[i] = mac_addr[i];
+
+ info->v1i.head = 0;
+ info->v1i.tail = 1;
+
+ info->v1i.info_size = sizeof(union ep_buffer_info);
+ info->v1i.buffer_size = epbh->size - info->v1i.info_size;
+
+ info->v1i.frame_max = FJES_MTU_TO_FRAME_SIZE(mtu);
+ info->v1i.count_max =
+ EP_RING_NUM(info->v1i.buffer_size, info->v1i.frame_max);
+
+ for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
+ info->v1i.vlan_id[i] = vlan_id[i];
+}
+
+void
+fjes_hw_init_command_registers(struct fjes_hw *hw,
+ struct fjes_device_command_param *param)
+{
+ /* Request Buffer length */
+ wr32(XSCT_REQBL, (__le32)(param->req_len));
+ /* Response Buffer Length */
+ wr32(XSCT_RESPBL, (__le32)(param->res_len));
+
+ /* Request Buffer Address */
+ wr32(XSCT_REQBAL,
+ (__le32)(param->req_start & GENMASK_ULL(31, 0)));
+ wr32(XSCT_REQBAH,
+ (__le32)((param->req_start & GENMASK_ULL(63, 32)) >> 32));
+
+ /* Response Buffer Address */
+ wr32(XSCT_RESPBAL,
+ (__le32)(param->res_start & GENMASK_ULL(31, 0)));
+ wr32(XSCT_RESPBAH,
+ (__le32)((param->res_start & GENMASK_ULL(63, 32)) >> 32));
+
+ /* Share status address */
+ wr32(XSCT_SHSTSAL,
+ (__le32)(param->share_start & GENMASK_ULL(31, 0)));
+ wr32(XSCT_SHSTSAH,
+ (__le32)((param->share_start & GENMASK_ULL(63, 32)) >> 32));
+}
+
+static int fjes_hw_setup(struct fjes_hw *hw)
+{
+ u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ struct fjes_device_command_param param;
+ struct ep_share_mem_info *buf_pair;
+ size_t mem_size;
+ int result;
+ int epidx;
+ void *buf;
+
+ hw->hw_info.max_epid = &hw->max_epid;
+ hw->hw_info.my_epid = &hw->my_epid;
+
+ buf = kcalloc(hw->max_epid, sizeof(struct ep_share_mem_info),
+ GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ hw->ep_shm_info = (struct ep_share_mem_info *)buf;
+
+ mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
+ hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
+ if (!(hw->hw_info.req_buf))
+ return -ENOMEM;
+
+ hw->hw_info.req_buf_size = mem_size;
+
+ mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
+ hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
+ if (!(hw->hw_info.res_buf))
+ return -ENOMEM;
+
+ hw->hw_info.res_buf_size = mem_size;
+
+ result = fjes_hw_alloc_shared_status_region(hw);
+ if (result)
+ return result;
+
+ hw->hw_info.buffer_share_bit = 0;
+ hw->hw_info.buffer_unshare_reserve_bit = 0;
+
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ if (epidx != hw->my_epid) {
+ buf_pair = &hw->ep_shm_info[epidx];
+
+ result = fjes_hw_alloc_epbuf(&buf_pair->tx);
+ if (result)
+ return result;
+
+ result = fjes_hw_alloc_epbuf(&buf_pair->rx);
+ if (result)
+ return result;
+
+ fjes_hw_setup_epbuf(&buf_pair->tx, mac,
+ fjes_support_mtu[0]);
+ fjes_hw_setup_epbuf(&buf_pair->rx, mac,
+ fjes_support_mtu[0]);
+ }
+ }
+
+ memset(&param, 0, sizeof(param));
+
+ param.req_len = hw->hw_info.req_buf_size;
+ param.req_start = __pa(hw->hw_info.req_buf);
+ param.res_len = hw->hw_info.res_buf_size;
+ param.res_start = __pa(hw->hw_info.res_buf);
+
+ param.share_start = __pa(hw->hw_info.share->ep_status);
+
+ fjes_hw_init_command_registers(hw, &param);
+
+ return 0;
+}
+
+static void fjes_hw_cleanup(struct fjes_hw *hw)
+{
+ int epidx;
+
+ if (!hw->ep_shm_info)
+ return;
+
+ fjes_hw_free_shared_status_region(hw);
+
+ kfree(hw->hw_info.req_buf);
+ hw->hw_info.req_buf = NULL;
+
+ kfree(hw->hw_info.res_buf);
+ hw->hw_info.res_buf = NULL;
+
+ for (epidx = 0; epidx < hw->max_epid ; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
+ fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
+ fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
+ }
+
+ kfree(hw->ep_shm_info);
+ hw->ep_shm_info = NULL;
+}
+
+int fjes_hw_init(struct fjes_hw *hw)
+{
+ int ret;
+
+ hw->base = fjes_hw_iomap(hw);
+ if (!hw->base)
+ return -EIO;
+
+ ret = fjes_hw_reset(hw);
+ if (ret)
+ return ret;
+
+ fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
+
+ INIT_WORK(&hw->update_zone_task, fjes_hw_update_zone_task);
+ INIT_WORK(&hw->epstop_task, fjes_hw_epstop_task);
+
+ mutex_init(&hw->hw_info.lock);
+
+ hw->max_epid = fjes_hw_get_max_epid(hw);
+ hw->my_epid = fjes_hw_get_my_epid(hw);
+
+ if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid))
+ return -ENXIO;
+
+ ret = fjes_hw_setup(hw);
+
+ return ret;
+}
+
+void fjes_hw_exit(struct fjes_hw *hw)
+{
+ int ret;
+
+ if (hw->base) {
+ ret = fjes_hw_reset(hw);
+ if (ret)
+ pr_err("%s: reset error", __func__);
+
+ fjes_hw_iounmap(hw);
+ hw->base = NULL;
+ }
+
+ fjes_hw_cleanup(hw);
+
+ cancel_work_sync(&hw->update_zone_task);
+ cancel_work_sync(&hw->epstop_task);
+}
+
+static enum fjes_dev_command_response_e
+fjes_hw_issue_request_command(struct fjes_hw *hw,
+ enum fjes_dev_command_request_type type)
+{
+ enum fjes_dev_command_response_e ret = FJES_CMD_STATUS_UNKNOWN;
+ union REG_CR cr;
+ union REG_CS cs;
+ int timeout;
+
+ cr.reg = 0;
+ cr.bits.req_start = 1;
+ cr.bits.req_code = type;
+ wr32(XSCT_CR, cr.reg);
+ cr.reg = rd32(XSCT_CR);
+
+ if (cr.bits.error == 0) {
+ timeout = FJES_COMMAND_REQ_TIMEOUT * 1000;
+ cs.reg = rd32(XSCT_CS);
+
+ while ((cs.bits.complete != 1) && timeout > 0) {
+ msleep(1000);
+ cs.reg = rd32(XSCT_CS);
+ timeout -= 1000;
+ }
+
+ if (cs.bits.complete == 1)
+ ret = FJES_CMD_STATUS_NORMAL;
+ else if (timeout <= 0)
+ ret = FJES_CMD_STATUS_TIMEOUT;
+
+ } else {
+ switch (cr.bits.err_info) {
+ case FJES_CMD_REQ_ERR_INFO_PARAM:
+ ret = FJES_CMD_STATUS_ERROR_PARAM;
+ break;
+ case FJES_CMD_REQ_ERR_INFO_STATUS:
+ ret = FJES_CMD_STATUS_ERROR_STATUS;
+ break;
+ default:
+ ret = FJES_CMD_STATUS_UNKNOWN;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int fjes_hw_request_info(struct fjes_hw *hw)
+{
+ union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
+ union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
+ enum fjes_dev_command_response_e ret;
+ int result;
+
+ memset(req_buf, 0, hw->hw_info.req_buf_size);
+ memset(res_buf, 0, hw->hw_info.res_buf_size);
+
+ req_buf->info.length = FJES_DEV_COMMAND_INFO_REQ_LEN;
+
+ res_buf->info.length = 0;
+ res_buf->info.code = 0;
+
+ ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_INFO);
+
+ result = 0;
+
+ if (FJES_DEV_COMMAND_INFO_RES_LEN((*hw->hw_info.max_epid)) !=
+ res_buf->info.length) {
+ result = -ENOMSG;
+ } else if (ret == FJES_CMD_STATUS_NORMAL) {
+ switch (res_buf->info.code) {
+ case FJES_CMD_REQ_RES_CODE_NORMAL:
+ result = 0;
+ break;
+ default:
+ result = -EPERM;
+ break;
+ }
+ } else {
+ switch (ret) {
+ case FJES_CMD_STATUS_UNKNOWN:
+ result = -EPERM;
+ break;
+ case FJES_CMD_STATUS_TIMEOUT:
+ result = -EBUSY;
+ break;
+ case FJES_CMD_STATUS_ERROR_PARAM:
+ result = -EPERM;
+ break;
+ case FJES_CMD_STATUS_ERROR_STATUS:
+ result = -EPERM;
+ break;
+ default:
+ result = -EPERM;
+ break;
+ }
+ }
+
+ return result;
+}
+
+int fjes_hw_register_buff_addr(struct fjes_hw *hw, int dest_epid,
+ struct ep_share_mem_info *buf_pair)
+{
+ union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
+ union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
+ enum fjes_dev_command_response_e ret;
+ int page_count;
+ int timeout;
+ int i, idx;
+ void *addr;
+ int result;
+
+ if (test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
+ return 0;
+
+ memset(req_buf, 0, hw->hw_info.req_buf_size);
+ memset(res_buf, 0, hw->hw_info.res_buf_size);
+
+ req_buf->share_buffer.length = FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(
+ buf_pair->tx.size,
+ buf_pair->rx.size);
+ req_buf->share_buffer.epid = dest_epid;
+
+ idx = 0;
+ req_buf->share_buffer.buffer[idx++] = buf_pair->tx.size;
+ page_count = buf_pair->tx.size / EP_BUFFER_INFO_SIZE;
+ for (i = 0; i < page_count; i++) {
+ addr = ((u8 *)(buf_pair->tx.buffer)) +
+ (i * EP_BUFFER_INFO_SIZE);
+ req_buf->share_buffer.buffer[idx++] =
+ (__le64)(page_to_phys(vmalloc_to_page(addr)) +
+ offset_in_page(addr));
+ }
+
+ req_buf->share_buffer.buffer[idx++] = buf_pair->rx.size;
+ page_count = buf_pair->rx.size / EP_BUFFER_INFO_SIZE;
+ for (i = 0; i < page_count; i++) {
+ addr = ((u8 *)(buf_pair->rx.buffer)) +
+ (i * EP_BUFFER_INFO_SIZE);
+ req_buf->share_buffer.buffer[idx++] =
+ (__le64)(page_to_phys(vmalloc_to_page(addr)) +
+ offset_in_page(addr));
+ }
+
+ res_buf->share_buffer.length = 0;
+ res_buf->share_buffer.code = 0;
+
+ ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_SHARE_BUFFER);
+
+ timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
+ while ((ret == FJES_CMD_STATUS_NORMAL) &&
+ (res_buf->share_buffer.length ==
+ FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) &&
+ (res_buf->share_buffer.code == FJES_CMD_REQ_RES_CODE_BUSY) &&
+ (timeout > 0)) {
+ msleep(200 + hw->my_epid * 20);
+ timeout -= (200 + hw->my_epid * 20);
+
+ res_buf->share_buffer.length = 0;
+ res_buf->share_buffer.code = 0;
+
+ ret = fjes_hw_issue_request_command(
+ hw, FJES_CMD_REQ_SHARE_BUFFER);
+ }
+
+ result = 0;
+
+ if (res_buf->share_buffer.length !=
+ FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN)
+ result = -ENOMSG;
+ else if (ret == FJES_CMD_STATUS_NORMAL) {
+ switch (res_buf->share_buffer.code) {
+ case FJES_CMD_REQ_RES_CODE_NORMAL:
+ result = 0;
+ set_bit(dest_epid, &hw->hw_info.buffer_share_bit);
+ break;
+ case FJES_CMD_REQ_RES_CODE_BUSY:
+ result = -EBUSY;
+ break;
+ default:
+ result = -EPERM;
+ break;
+ }
+ } else {
+ switch (ret) {
+ case FJES_CMD_STATUS_UNKNOWN:
+ result = -EPERM;
+ break;
+ case FJES_CMD_STATUS_TIMEOUT:
+ result = -EBUSY;
+ break;
+ case FJES_CMD_STATUS_ERROR_PARAM:
+ case FJES_CMD_STATUS_ERROR_STATUS:
+ default:
+ result = -EPERM;
+ break;
+ }
+ }
+
+ return result;
+}
+
+int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
+{
+ union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
+ union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
+ struct fjes_device_shared_info *share = hw->hw_info.share;
+ enum fjes_dev_command_response_e ret;
+ int timeout;
+ int result;
+
+ if (!hw->base)
+ return -EPERM;
+
+ if (!req_buf || !res_buf || !share)
+ return -EPERM;
+
+ if (!test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
+ return 0;
+
+ memset(req_buf, 0, hw->hw_info.req_buf_size);
+ memset(res_buf, 0, hw->hw_info.res_buf_size);
+
+ req_buf->unshare_buffer.length =
+ FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN;
+ req_buf->unshare_buffer.epid = dest_epid;
+
+ res_buf->unshare_buffer.length = 0;
+ res_buf->unshare_buffer.code = 0;
+
+ ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
+
+ timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
+ while ((ret == FJES_CMD_STATUS_NORMAL) &&
+ (res_buf->unshare_buffer.length ==
+ FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) &&
+ (res_buf->unshare_buffer.code ==
+ FJES_CMD_REQ_RES_CODE_BUSY) &&
+ (timeout > 0)) {
+ msleep(200 + hw->my_epid * 20);
+ timeout -= (200 + hw->my_epid * 20);
+
+ res_buf->unshare_buffer.length = 0;
+ res_buf->unshare_buffer.code = 0;
+
+ ret =
+ fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
+ }
+
+ result = 0;
+
+ if (res_buf->unshare_buffer.length !=
+ FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) {
+ result = -ENOMSG;
+ } else if (ret == FJES_CMD_STATUS_NORMAL) {
+ switch (res_buf->unshare_buffer.code) {
+ case FJES_CMD_REQ_RES_CODE_NORMAL:
+ result = 0;
+ clear_bit(dest_epid, &hw->hw_info.buffer_share_bit);
+ break;
+ case FJES_CMD_REQ_RES_CODE_BUSY:
+ result = -EBUSY;
+ break;
+ default:
+ result = -EPERM;
+ break;
+ }
+ } else {
+ switch (ret) {
+ case FJES_CMD_STATUS_UNKNOWN:
+ result = -EPERM;
+ break;
+ case FJES_CMD_STATUS_TIMEOUT:
+ result = -EBUSY;
+ break;
+ case FJES_CMD_STATUS_ERROR_PARAM:
+ case FJES_CMD_STATUS_ERROR_STATUS:
+ default:
+ result = -EPERM;
+ break;
+ }
+ }
+
+ return result;
+}
+
+int fjes_hw_raise_interrupt(struct fjes_hw *hw, int dest_epid,
+ enum REG_ICTL_MASK mask)
+{
+ u32 ig = mask | dest_epid;
+
+ wr32(XSCT_IG, cpu_to_le32(ig));
+
+ return 0;
+}
+
+u32 fjes_hw_capture_interrupt_status(struct fjes_hw *hw)
+{
+ u32 cur_is;
+
+ cur_is = rd32(XSCT_IS);
+
+ return cur_is;
+}
+
+void fjes_hw_set_irqmask(struct fjes_hw *hw,
+ enum REG_ICTL_MASK intr_mask, bool mask)
+{
+ if (mask)
+ wr32(XSCT_IMS, intr_mask);
+ else
+ wr32(XSCT_IMC, intr_mask);
+}
+
+bool fjes_hw_epid_is_same_zone(struct fjes_hw *hw, int epid)
+{
+ if (epid >= hw->max_epid)
+ return false;
+
+ if ((hw->ep_shm_info[epid].es_status !=
+ FJES_ZONING_STATUS_ENABLE) ||
+ (hw->ep_shm_info[hw->my_epid].zone ==
+ FJES_ZONING_ZONE_TYPE_NONE))
+ return false;
+ else
+ return (hw->ep_shm_info[epid].zone ==
+ hw->ep_shm_info[hw->my_epid].zone);
+}
+
+int fjes_hw_epid_is_shared(struct fjes_device_shared_info *share,
+ int dest_epid)
+{
+ int value = false;
+
+ if (dest_epid < share->epnum)
+ value = share->ep_status[dest_epid];
+
+ return value;
+}
+
+static bool fjes_hw_epid_is_stop_requested(struct fjes_hw *hw, int src_epid)
+{
+ return test_bit(src_epid, &hw->txrx_stop_req_bit);
+}
+
+static bool fjes_hw_epid_is_stop_process_done(struct fjes_hw *hw, int src_epid)
+{
+ return (hw->ep_shm_info[src_epid].tx.info->v1i.rx_status &
+ FJES_RX_STOP_REQ_DONE);
+}
+
+enum ep_partner_status
+fjes_hw_get_partner_ep_status(struct fjes_hw *hw, int epid)
+{
+ enum ep_partner_status status;
+
+ if (fjes_hw_epid_is_shared(hw->hw_info.share, epid)) {
+ if (fjes_hw_epid_is_stop_requested(hw, epid)) {
+ status = EP_PARTNER_WAITING;
+ } else {
+ if (fjes_hw_epid_is_stop_process_done(hw, epid))
+ status = EP_PARTNER_COMPLETE;
+ else
+ status = EP_PARTNER_SHARED;
+ }
+ } else {
+ status = EP_PARTNER_UNSHARE;
+ }
+
+ return status;
+}
+
+void fjes_hw_raise_epstop(struct fjes_hw *hw)
+{
+ enum ep_partner_status status;
+ int epidx;
+
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
+
+ status = fjes_hw_get_partner_ep_status(hw, epidx);
+ switch (status) {
+ case EP_PARTNER_SHARED:
+ fjes_hw_raise_interrupt(hw, epidx,
+ REG_ICTL_MASK_TXRX_STOP_REQ);
+ break;
+ default:
+ break;
+ }
+
+ set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
+ set_bit(epidx, &hw->txrx_stop_req_bit);
+
+ hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
+ FJES_RX_STOP_REQ_REQUEST;
+ }
+}
+
+int fjes_hw_wait_epstop(struct fjes_hw *hw)
+{
+ enum ep_partner_status status;
+ union ep_buffer_info *info;
+ int wait_time = 0;
+ int epidx;
+
+ while (hw->hw_info.buffer_unshare_reserve_bit &&
+ (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)) {
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
+ status = fjes_hw_epid_is_shared(hw->hw_info.share,
+ epidx);
+ info = hw->ep_shm_info[epidx].rx.info;
+ if ((!status ||
+ (info->v1i.rx_status &
+ FJES_RX_STOP_REQ_DONE)) &&
+ test_bit(epidx,
+ &hw->hw_info.buffer_unshare_reserve_bit)) {
+ clear_bit(epidx,
+ &hw->hw_info.buffer_unshare_reserve_bit);
+ }
+ }
+
+ msleep(100);
+ wait_time += 100;
+ }
+
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
+ if (test_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit))
+ clear_bit(epidx,
+ &hw->hw_info.buffer_unshare_reserve_bit);
+ }
+
+ return (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)
+ ? 0 : -EBUSY;
+}
+
+bool fjes_hw_check_epbuf_version(struct epbuf_handler *epbh, u32 version)
+{
+ union ep_buffer_info *info = epbh->info;
+
+ return (info->common.version == version);
+}
+
+bool fjes_hw_check_mtu(struct epbuf_handler *epbh, u32 mtu)
+{
+ union ep_buffer_info *info = epbh->info;
+
+ return (info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu));
+}
+
+bool fjes_hw_check_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
+{
+ union ep_buffer_info *info = epbh->info;
+ bool ret = false;
+ int i;
+
+ if (vlan_id == 0) {
+ ret = true;
+ } else {
+ for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
+ if (vlan_id == info->v1i.vlan_id[i]) {
+ ret = true;
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+bool fjes_hw_set_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
+{
+ union ep_buffer_info *info = epbh->info;
+ int i;
+
+ for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
+ if (info->v1i.vlan_id[i] == 0) {
+ info->v1i.vlan_id[i] = vlan_id;
+ return true;
+ }
+ }
+ return false;
+}
+
+void fjes_hw_del_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
+{
+ union ep_buffer_info *info = epbh->info;
+ int i;
+
+ if (0 != vlan_id) {
+ for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
+ if (vlan_id == info->v1i.vlan_id[i])
+ info->v1i.vlan_id[i] = 0;
+ }
+ }
+}
+
+bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *epbh)
+{
+ union ep_buffer_info *info = epbh->info;
+
+ if (info->v1i.count_max == 0)
+ return true;
+
+ return EP_RING_EMPTY(info->v1i.head, info->v1i.tail,
+ info->v1i.count_max);
+}
+
+void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler *epbh,
+ size_t *psize)
+{
+ union ep_buffer_info *info = epbh->info;
+ struct esmem_frame *ring_frame;
+ void *frame;
+
+ ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
+ (info->v1i.head,
+ info->v1i.count_max) *
+ info->v1i.frame_max]);
+
+ *psize = (size_t)ring_frame->frame_size;
+
+ frame = ring_frame->frame_data;
+
+ return frame;
+}
+
+void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler *epbh)
+{
+ union ep_buffer_info *info = epbh->info;
+
+ if (fjes_hw_epbuf_rx_is_empty(epbh))
+ return;
+
+ EP_RING_INDEX_INC(epbh->info->v1i.head, info->v1i.count_max);
+}
+
+int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler *epbh,
+ void *frame, size_t size)
+{
+ union ep_buffer_info *info = epbh->info;
+ struct esmem_frame *ring_frame;
+
+ if (EP_RING_FULL(info->v1i.head, info->v1i.tail, info->v1i.count_max))
+ return -ENOBUFS;
+
+ ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
+ (info->v1i.tail - 1,
+ info->v1i.count_max) *
+ info->v1i.frame_max]);
+
+ ring_frame->frame_size = size;
+ memcpy((void *)(ring_frame->frame_data), (void *)frame, size);
+
+ EP_RING_INDEX_INC(epbh->info->v1i.tail, info->v1i.count_max);
+
+ return 0;
+}
+
+static void fjes_hw_update_zone_task(struct work_struct *work)
+{
+ struct fjes_hw *hw = container_of(work,
+ struct fjes_hw, update_zone_task);
+
+ struct my_s {u8 es_status; u8 zone; } *info;
+ union fjes_device_command_res *res_buf;
+ enum ep_partner_status pstatus;
+
+ struct fjes_adapter *adapter;
+ struct net_device *netdev;
+
+ ulong unshare_bit = 0;
+ ulong share_bit = 0;
+ ulong irq_bit = 0;
+
+ int epidx;
+ int ret;
+
+ adapter = (struct fjes_adapter *)hw->back;
+ netdev = adapter->netdev;
+ res_buf = hw->hw_info.res_buf;
+ info = (struct my_s *)&res_buf->info.info;
+
+ mutex_lock(&hw->hw_info.lock);
+
+ ret = fjes_hw_request_info(hw);
+ switch (ret) {
+ case -ENOMSG:
+ case -EBUSY:
+ default:
+ if (!work_pending(&adapter->force_close_task)) {
+ adapter->force_reset = true;
+ schedule_work(&adapter->force_close_task);
+ }
+ break;
+
+ case 0:
+
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ if (epidx == hw->my_epid) {
+ hw->ep_shm_info[epidx].es_status =
+ info[epidx].es_status;
+ hw->ep_shm_info[epidx].zone =
+ info[epidx].zone;
+ continue;
+ }
+
+ pstatus = fjes_hw_get_partner_ep_status(hw, epidx);
+ switch (pstatus) {
+ case EP_PARTNER_UNSHARE:
+ default:
+ if ((info[epidx].zone !=
+ FJES_ZONING_ZONE_TYPE_NONE) &&
+ (info[epidx].es_status ==
+ FJES_ZONING_STATUS_ENABLE) &&
+ (info[epidx].zone ==
+ info[hw->my_epid].zone))
+ set_bit(epidx, &share_bit);
+ else
+ set_bit(epidx, &unshare_bit);
+ break;
+
+ case EP_PARTNER_COMPLETE:
+ case EP_PARTNER_WAITING:
+ if ((info[epidx].zone ==
+ FJES_ZONING_ZONE_TYPE_NONE) ||
+ (info[epidx].es_status !=
+ FJES_ZONING_STATUS_ENABLE) ||
+ (info[epidx].zone !=
+ info[hw->my_epid].zone)) {
+ set_bit(epidx,
+ &adapter->unshare_watch_bitmask);
+ set_bit(epidx,
+ &hw->hw_info.buffer_unshare_reserve_bit);
+ }
+ break;
+
+ case EP_PARTNER_SHARED:
+ if ((info[epidx].zone ==
+ FJES_ZONING_ZONE_TYPE_NONE) ||
+ (info[epidx].es_status !=
+ FJES_ZONING_STATUS_ENABLE) ||
+ (info[epidx].zone !=
+ info[hw->my_epid].zone))
+ set_bit(epidx, &irq_bit);
+ break;
+ }
+ }
+
+ hw->ep_shm_info[epidx].es_status = info[epidx].es_status;
+ hw->ep_shm_info[epidx].zone = info[epidx].zone;
+
+ break;
+ }
+
+ mutex_unlock(&hw->hw_info.lock);
+
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
+
+ if (test_bit(epidx, &share_bit)) {
+ fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
+ netdev->dev_addr, netdev->mtu);
+
+ mutex_lock(&hw->hw_info.lock);
+
+ ret = fjes_hw_register_buff_addr(
+ hw, epidx, &hw->ep_shm_info[epidx]);
+
+ switch (ret) {
+ case 0:
+ break;
+ case -ENOMSG:
+ case -EBUSY:
+ default:
+ if (!work_pending(&adapter->force_close_task)) {
+ adapter->force_reset = true;
+ schedule_work(
+ &adapter->force_close_task);
+ }
+ break;
+ }
+ mutex_unlock(&hw->hw_info.lock);
+ }
+
+ if (test_bit(epidx, &unshare_bit)) {
+ mutex_lock(&hw->hw_info.lock);
+
+ ret = fjes_hw_unregister_buff_addr(hw, epidx);
+
+ switch (ret) {
+ case 0:
+ break;
+ case -ENOMSG:
+ case -EBUSY:
+ default:
+ if (!work_pending(&adapter->force_close_task)) {
+ adapter->force_reset = true;
+ schedule_work(
+ &adapter->force_close_task);
+ }
+ break;
+ }
+
+ mutex_unlock(&hw->hw_info.lock);
+
+ if (ret == 0)
+ fjes_hw_setup_epbuf(
+ &hw->ep_shm_info[epidx].tx,
+ netdev->dev_addr, netdev->mtu);
+ }
+
+ if (test_bit(epidx, &irq_bit)) {
+ fjes_hw_raise_interrupt(hw, epidx,
+ REG_ICTL_MASK_TXRX_STOP_REQ);
+
+ set_bit(epidx, &hw->txrx_stop_req_bit);
+ hw->ep_shm_info[epidx].tx.
+ info->v1i.rx_status |=
+ FJES_RX_STOP_REQ_REQUEST;
+ set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
+ }
+ }
+
+ if (irq_bit || adapter->unshare_watch_bitmask) {
+ if (!work_pending(&adapter->unshare_watch_task))
+ queue_work(adapter->control_wq,
+ &adapter->unshare_watch_task);
+ }
+}
+
+static void fjes_hw_epstop_task(struct work_struct *work)
+{
+ struct fjes_hw *hw = container_of(work, struct fjes_hw, epstop_task);
+ struct fjes_adapter *adapter = (struct fjes_adapter *)hw->back;
+
+ ulong remain_bit;
+ int epid_bit;
+
+ while ((remain_bit = hw->epstop_req_bit)) {
+ for (epid_bit = 0; remain_bit; remain_bit >>= 1, epid_bit++) {
+ if (remain_bit & 1) {
+ hw->ep_shm_info[epid_bit].
+ tx.info->v1i.rx_status |=
+ FJES_RX_STOP_REQ_DONE;
+
+ clear_bit(epid_bit, &hw->epstop_req_bit);
+ set_bit(epid_bit,
+ &adapter->unshare_watch_bitmask);
+
+ if (!work_pending(&adapter->unshare_watch_task))
+ queue_work(
+ adapter->control_wq,
+ &adapter->unshare_watch_task);
+ }
+ }
+ }
+}
diff --git a/drivers/net/fjes/fjes_hw.h b/drivers/net/fjes/fjes_hw.h
new file mode 100644
index 000000000000..6d57b89a0ee8
--- /dev/null
+++ b/drivers/net/fjes/fjes_hw.h
@@ -0,0 +1,334 @@
+/*
+ * FUJITSU Extended Socket Network Device driver
+ * Copyright (c) 2015 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#ifndef FJES_HW_H_
+#define FJES_HW_H_
+
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
+
+#include "fjes_regs.h"
+
+struct fjes_hw;
+
+#define EP_BUFFER_SUPPORT_VLAN_MAX 4
+#define EP_BUFFER_INFO_SIZE 4096
+
+#define FJES_DEVICE_RESET_TIMEOUT ((17 + 1) * 3) /* sec */
+#define FJES_COMMAND_REQ_TIMEOUT (5 + 1) /* sec */
+#define FJES_COMMAND_REQ_BUFF_TIMEOUT (8 * 3) /* sec */
+#define FJES_COMMAND_EPSTOP_WAIT_TIMEOUT (1) /* sec */
+
+#define FJES_CMD_REQ_ERR_INFO_PARAM (0x0001)
+#define FJES_CMD_REQ_ERR_INFO_STATUS (0x0002)
+
+#define FJES_CMD_REQ_RES_CODE_NORMAL (0)
+#define FJES_CMD_REQ_RES_CODE_BUSY (1)
+
+#define FJES_ZONING_STATUS_DISABLE (0x00)
+#define FJES_ZONING_STATUS_ENABLE (0x01)
+#define FJES_ZONING_STATUS_INVALID (0xFF)
+
+#define FJES_ZONING_ZONE_TYPE_NONE (0xFF)
+
+#define FJES_TX_DELAY_SEND_NONE (0)
+#define FJES_TX_DELAY_SEND_PENDING (1)
+
+#define FJES_RX_STOP_REQ_NONE (0x0)
+#define FJES_RX_STOP_REQ_DONE (0x1)
+#define FJES_RX_STOP_REQ_REQUEST (0x2)
+#define FJES_RX_POLL_WORK (0x4)
+
+#define EP_BUFFER_SIZE \
+ (((sizeof(union ep_buffer_info) + (128 * (64 * 1024))) \
+ / EP_BUFFER_INFO_SIZE) * EP_BUFFER_INFO_SIZE)
+
+#define EP_RING_NUM(buffer_size, frame_size) \
+ (u32)((buffer_size) / (frame_size))
+#define EP_RING_INDEX(_num, _max) (((_num) + (_max)) % (_max))
+#define EP_RING_INDEX_INC(_num, _max) \
+ ((_num) = EP_RING_INDEX((_num) + 1, (_max)))
+#define EP_RING_FULL(_head, _tail, _max) \
+ (0 == EP_RING_INDEX(((_tail) - (_head)), (_max)))
+#define EP_RING_EMPTY(_head, _tail, _max) \
+ (1 == EP_RING_INDEX(((_tail) - (_head)), (_max)))
+
+#define FJES_MTU_TO_BUFFER_SIZE(mtu) \
+ (ETH_HLEN + VLAN_HLEN + (mtu) + ETH_FCS_LEN)
+#define FJES_MTU_TO_FRAME_SIZE(mtu) \
+ (sizeof(struct esmem_frame) + FJES_MTU_TO_BUFFER_SIZE(mtu))
+#define FJES_MTU_DEFINE(size) \
+ ((size) - sizeof(struct esmem_frame) - \
+ (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+
+#define FJES_DEV_COMMAND_INFO_REQ_LEN (4)
+#define FJES_DEV_COMMAND_INFO_RES_LEN(epnum) (8 + 2 * (epnum))
+#define FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(txb, rxb) \
+ (24 + (8 * ((txb) / EP_BUFFER_INFO_SIZE + (rxb) / EP_BUFFER_INFO_SIZE)))
+#define FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN (8)
+#define FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN (8)
+#define FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN (8)
+
+#define FJES_DEV_REQ_BUF_SIZE(maxep) \
+ FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(EP_BUFFER_SIZE, EP_BUFFER_SIZE)
+#define FJES_DEV_RES_BUF_SIZE(maxep) \
+ FJES_DEV_COMMAND_INFO_RES_LEN(maxep)
+
+/* Frame & MTU */
+struct esmem_frame {
+ __le32 frame_size;
+ u8 frame_data[];
+};
+
+/* EP partner status */
+enum ep_partner_status {
+ EP_PARTNER_UNSHARE,
+ EP_PARTNER_SHARED,
+ EP_PARTNER_WAITING,
+ EP_PARTNER_COMPLETE,
+ EP_PARTNER_STATUS_MAX,
+};
+
+/* shared status region */
+struct fjes_device_shared_info {
+ int epnum;
+ u8 ep_status[];
+};
+
+/* structures for command control request data*/
+union fjes_device_command_req {
+ struct {
+ __le32 length;
+ } info;
+ struct {
+ __le32 length;
+ __le32 epid;
+ __le64 buffer[];
+ } share_buffer;
+ struct {
+ __le32 length;
+ __le32 epid;
+ } unshare_buffer;
+ struct {
+ __le32 length;
+ __le32 mode;
+ __le64 buffer_len;
+ __le64 buffer[];
+ } start_trace;
+ struct {
+ __le32 length;
+ } stop_trace;
+};
+
+/* structures for command control response data */
+union fjes_device_command_res {
+ struct {
+ __le32 length;
+ __le32 code;
+ struct {
+ u8 es_status;
+ u8 zone;
+ } info[];
+ } info;
+ struct {
+ __le32 length;
+ __le32 code;
+ } share_buffer;
+ struct {
+ __le32 length;
+ __le32 code;
+ } unshare_buffer;
+ struct {
+ __le32 length;
+ __le32 code;
+ } start_trace;
+ struct {
+ __le32 length;
+ __le32 code;
+ } stop_trace;
+};
+
+/* request command type */
+enum fjes_dev_command_request_type {
+ FJES_CMD_REQ_INFO = 0x0001,
+ FJES_CMD_REQ_SHARE_BUFFER = 0x0002,
+ FJES_CMD_REQ_UNSHARE_BUFFER = 0x0004,
+};
+
+/* parameter for command control */
+struct fjes_device_command_param {
+ u32 req_len;
+ phys_addr_t req_start;
+ u32 res_len;
+ phys_addr_t res_start;
+ phys_addr_t share_start;
+};
+
+/* error code for command control */
+enum fjes_dev_command_response_e {
+ FJES_CMD_STATUS_UNKNOWN,
+ FJES_CMD_STATUS_NORMAL,
+ FJES_CMD_STATUS_TIMEOUT,
+ FJES_CMD_STATUS_ERROR_PARAM,
+ FJES_CMD_STATUS_ERROR_STATUS,
+};
+
+/* EP buffer information */
+union ep_buffer_info {
+ u8 raw[EP_BUFFER_INFO_SIZE];
+
+ struct _ep_buffer_info_common_t {
+ u32 version;
+ } common;
+
+ struct _ep_buffer_info_v1_t {
+ u32 version;
+ u32 info_size;
+
+ u32 buffer_size;
+ u16 count_max;
+
+ u16 _rsv_1;
+
+ u32 frame_max;
+ u8 mac_addr[ETH_ALEN];
+
+ u16 _rsv_2;
+ u32 _rsv_3;
+
+ u16 tx_status;
+ u16 rx_status;
+
+ u32 head;
+ u32 tail;
+
+ u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX];
+
+ } v1i;
+
+};
+
+/* buffer pair for Extended Partition */
+struct ep_share_mem_info {
+ struct epbuf_handler {
+ void *buffer;
+ size_t size;
+ union ep_buffer_info *info;
+ u8 *ring;
+ } tx, rx;
+
+ struct rtnl_link_stats64 net_stats;
+
+ u16 tx_status_work;
+
+ u8 es_status;
+ u8 zone;
+};
+
+struct es_device_trace {
+ u32 record_num;
+ u32 current_record;
+ u32 status_flag;
+ u32 _rsv;
+
+ struct {
+ u16 epid;
+ u16 dir_offset;
+ u32 data;
+ u64 tsc;
+ } record[];
+};
+
+struct fjes_hw_info {
+ struct fjes_device_shared_info *share;
+ union fjes_device_command_req *req_buf;
+ u64 req_buf_size;
+ union fjes_device_command_res *res_buf;
+ u64 res_buf_size;
+
+ int *my_epid;
+ int *max_epid;
+
+ struct es_device_trace *trace;
+ u64 trace_size;
+
+ struct mutex lock; /* buffer lock*/
+
+ unsigned long buffer_share_bit;
+ unsigned long buffer_unshare_reserve_bit;
+};
+
+struct fjes_hw {
+ void *back;
+
+ unsigned long txrx_stop_req_bit;
+ unsigned long epstop_req_bit;
+ struct work_struct update_zone_task;
+ struct work_struct epstop_task;
+
+ int my_epid;
+ int max_epid;
+
+ struct ep_share_mem_info *ep_shm_info;
+
+ struct fjes_hw_resource {
+ u64 start;
+ u64 size;
+ int irq;
+ } hw_res;
+
+ u8 *base;
+
+ struct fjes_hw_info hw_info;
+};
+
+int fjes_hw_init(struct fjes_hw *);
+void fjes_hw_exit(struct fjes_hw *);
+int fjes_hw_reset(struct fjes_hw *);
+int fjes_hw_request_info(struct fjes_hw *);
+int fjes_hw_register_buff_addr(struct fjes_hw *, int,
+ struct ep_share_mem_info *);
+int fjes_hw_unregister_buff_addr(struct fjes_hw *, int);
+void fjes_hw_init_command_registers(struct fjes_hw *,
+ struct fjes_device_command_param *);
+void fjes_hw_setup_epbuf(struct epbuf_handler *, u8 *, u32);
+int fjes_hw_raise_interrupt(struct fjes_hw *, int, enum REG_ICTL_MASK);
+void fjes_hw_set_irqmask(struct fjes_hw *, enum REG_ICTL_MASK, bool);
+u32 fjes_hw_capture_interrupt_status(struct fjes_hw *);
+void fjes_hw_raise_epstop(struct fjes_hw *);
+int fjes_hw_wait_epstop(struct fjes_hw *);
+enum ep_partner_status
+ fjes_hw_get_partner_ep_status(struct fjes_hw *, int);
+
+bool fjes_hw_epid_is_same_zone(struct fjes_hw *, int);
+int fjes_hw_epid_is_shared(struct fjes_device_shared_info *, int);
+bool fjes_hw_check_epbuf_version(struct epbuf_handler *, u32);
+bool fjes_hw_check_mtu(struct epbuf_handler *, u32);
+bool fjes_hw_check_vlan_id(struct epbuf_handler *, u16);
+bool fjes_hw_set_vlan_id(struct epbuf_handler *, u16);
+void fjes_hw_del_vlan_id(struct epbuf_handler *, u16);
+bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *);
+void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler *, size_t *);
+void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler *);
+int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler *, void *, size_t);
+
+#endif /* FJES_HW_H_ */
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
new file mode 100644
index 000000000000..0ddb54fe3d91
--- /dev/null
+++ b/drivers/net/fjes/fjes_main.c
@@ -0,0 +1,1383 @@
+/*
+ * FUJITSU Extended Socket Network Device driver
+ * Copyright (c) 2015 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/nls.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+
+#include "fjes.h"
+
+#define MAJ 1
+#define MIN 0
+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
+#define DRV_NAME "fjes"
+char fjes_driver_name[] = DRV_NAME;
+char fjes_driver_version[] = DRV_VERSION;
+static const char fjes_driver_string[] =
+ "FUJITSU Extended Socket Network Device Driver";
+static const char fjes_copyright[] =
+ "Copyright (c) 2015 FUJITSU LIMITED";
+
+MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
+MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static int fjes_request_irq(struct fjes_adapter *);
+static void fjes_free_irq(struct fjes_adapter *);
+
+static int fjes_open(struct net_device *);
+static int fjes_close(struct net_device *);
+static int fjes_setup_resources(struct fjes_adapter *);
+static void fjes_free_resources(struct fjes_adapter *);
+static netdev_tx_t fjes_xmit_frame(struct sk_buff *, struct net_device *);
+static void fjes_raise_intr_rxdata_task(struct work_struct *);
+static void fjes_tx_stall_task(struct work_struct *);
+static void fjes_force_close_task(struct work_struct *);
+static irqreturn_t fjes_intr(int, void*);
+static struct rtnl_link_stats64 *
+fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
+static int fjes_change_mtu(struct net_device *, int);
+static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16);
+static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16);
+static void fjes_tx_retry(struct net_device *);
+
+static int fjes_acpi_add(struct acpi_device *);
+static int fjes_acpi_remove(struct acpi_device *);
+static acpi_status fjes_get_acpi_resource(struct acpi_resource *, void*);
+
+static int fjes_probe(struct platform_device *);
+static int fjes_remove(struct platform_device *);
+
+static int fjes_sw_init(struct fjes_adapter *);
+static void fjes_netdev_setup(struct net_device *);
+static void fjes_irq_watch_task(struct work_struct *);
+static void fjes_watch_unshare_task(struct work_struct *);
+static void fjes_rx_irq(struct fjes_adapter *, int);
+static int fjes_poll(struct napi_struct *, int);
+
+static const struct acpi_device_id fjes_acpi_ids[] = {
+ {"PNP0C02", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
+
+static struct acpi_driver fjes_acpi_driver = {
+ .name = DRV_NAME,
+ .class = DRV_NAME,
+ .owner = THIS_MODULE,
+ .ids = fjes_acpi_ids,
+ .ops = {
+ .add = fjes_acpi_add,
+ .remove = fjes_acpi_remove,
+ },
+};
+
+static struct platform_driver fjes_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = fjes_probe,
+ .remove = fjes_remove,
+};
+
+static struct resource fjes_resource[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ .start = 0,
+ .end = 0,
+ },
+ {
+ .flags = IORESOURCE_IRQ,
+ .start = 0,
+ .end = 0,
+ },
+};
+
+static int fjes_acpi_add(struct acpi_device *device)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+ char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
+ struct platform_device *plat_dev;
+ union acpi_object *str;
+ acpi_status status;
+ int result;
+
+ status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ str = buffer.pointer;
+ result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
+ str->string.length, UTF16_LITTLE_ENDIAN,
+ str_buf, sizeof(str_buf) - 1);
+ str_buf[result] = 0;
+
+ if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
+ kfree(buffer.pointer);
+ return -ENODEV;
+ }
+ kfree(buffer.pointer);
+
+ status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
+ fjes_get_acpi_resource, fjes_resource);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ /* create platform_device */
+ plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
+ ARRAY_SIZE(fjes_resource));
+ device->driver_data = plat_dev;
+
+ return 0;
+}
+
+static int fjes_acpi_remove(struct acpi_device *device)
+{
+ struct platform_device *plat_dev;
+
+ plat_dev = (struct platform_device *)acpi_driver_data(device);
+ platform_device_unregister(plat_dev);
+
+ return 0;
+}
+
+static acpi_status
+fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
+{
+ struct acpi_resource_address32 *addr;
+ struct acpi_resource_irq *irq;
+ struct resource *res = data;
+
+ switch (acpi_res->type) {
+ case ACPI_RESOURCE_TYPE_ADDRESS32:
+ addr = &acpi_res->data.address32;
+ res[0].start = addr->address.minimum;
+ res[0].end = addr->address.minimum +
+ addr->address.address_length - 1;
+ break;
+
+ case ACPI_RESOURCE_TYPE_IRQ:
+ irq = &acpi_res->data.irq;
+ if (irq->interrupt_count != 1)
+ return AE_ERROR;
+ res[1].start = irq->interrupts[0];
+ res[1].end = irq->interrupts[0];
+ break;
+
+ default:
+ break;
+ }
+
+ return AE_OK;
+}
+
+static int fjes_request_irq(struct fjes_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int result = -1;
+
+ adapter->interrupt_watch_enable = true;
+ if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
+ queue_delayed_work(adapter->control_wq,
+ &adapter->interrupt_watch_task,
+ FJES_IRQ_WATCH_DELAY);
+ }
+
+ if (!adapter->irq_registered) {
+ result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
+ IRQF_SHARED, netdev->name, adapter);
+ if (result)
+ adapter->irq_registered = false;
+ else
+ adapter->irq_registered = true;
+ }
+
+ return result;
+}
+
+static void fjes_free_irq(struct fjes_adapter *adapter)
+{
+ struct fjes_hw *hw = &adapter->hw;
+
+ adapter->interrupt_watch_enable = false;
+ cancel_delayed_work_sync(&adapter->interrupt_watch_task);
+
+ fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
+
+ if (adapter->irq_registered) {
+ free_irq(adapter->hw.hw_res.irq, adapter);
+ adapter->irq_registered = false;
+ }
+}
+
+static const struct net_device_ops fjes_netdev_ops = {
+ .ndo_open = fjes_open,
+ .ndo_stop = fjes_close,
+ .ndo_start_xmit = fjes_xmit_frame,
+ .ndo_get_stats64 = fjes_get_stats64,
+ .ndo_change_mtu = fjes_change_mtu,
+ .ndo_tx_timeout = fjes_tx_retry,
+ .ndo_vlan_rx_add_vid = fjes_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
+};
+
+/* fjes_open - Called when a network interface is made active */
+static int fjes_open(struct net_device *netdev)
+{
+ struct fjes_adapter *adapter = netdev_priv(netdev);
+ struct fjes_hw *hw = &adapter->hw;
+ int result;
+
+ if (adapter->open_guard)
+ return -ENXIO;
+
+ result = fjes_setup_resources(adapter);
+ if (result)
+ goto err_setup_res;
+
+ hw->txrx_stop_req_bit = 0;
+ hw->epstop_req_bit = 0;
+
+ napi_enable(&adapter->napi);
+
+ fjes_hw_capture_interrupt_status(hw);
+
+ result = fjes_request_irq(adapter);
+ if (result)
+ goto err_req_irq;
+
+ fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
+
+ netif_tx_start_all_queues(netdev);
+ netif_carrier_on(netdev);
+
+ return 0;
+
+err_req_irq:
+ fjes_free_irq(adapter);
+ napi_disable(&adapter->napi);
+
+err_setup_res:
+ fjes_free_resources(adapter);
+ return result;
+}
+
+/* fjes_close - Disables a network interface */
+static int fjes_close(struct net_device *netdev)
+{
+ struct fjes_adapter *adapter = netdev_priv(netdev);
+ struct fjes_hw *hw = &adapter->hw;
+ int epidx;
+
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+
+ fjes_hw_raise_epstop(hw);
+
+ napi_disable(&adapter->napi);
+
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
+
+ adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status &=
+ ~FJES_RX_POLL_WORK;
+ }
+
+ fjes_free_irq(adapter);
+
+ cancel_delayed_work_sync(&adapter->interrupt_watch_task);
+ cancel_work_sync(&adapter->unshare_watch_task);
+ adapter->unshare_watch_bitmask = 0;
+ cancel_work_sync(&adapter->raise_intr_rxdata_task);
+ cancel_work_sync(&adapter->tx_stall_task);
+
+ cancel_work_sync(&hw->update_zone_task);
+ cancel_work_sync(&hw->epstop_task);
+
+ fjes_hw_wait_epstop(hw);
+
+ fjes_free_resources(adapter);
+
+ return 0;
+}
+
+static int fjes_setup_resources(struct fjes_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ep_share_mem_info *buf_pair;
+ struct fjes_hw *hw = &adapter->hw;
+ int result;
+ int epidx;
+
+ mutex_lock(&hw->hw_info.lock);
+ result = fjes_hw_request_info(hw);
+ switch (result) {
+ case 0:
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ hw->ep_shm_info[epidx].es_status =
+ hw->hw_info.res_buf->info.info[epidx].es_status;
+ hw->ep_shm_info[epidx].zone =
+ hw->hw_info.res_buf->info.info[epidx].zone;
+ }
+ break;
+ default:
+ case -ENOMSG:
+ case -EBUSY:
+ adapter->force_reset = true;
+
+ mutex_unlock(&hw->hw_info.lock);
+ return result;
+ }
+ mutex_unlock(&hw->hw_info.lock);
+
+ for (epidx = 0; epidx < (hw->max_epid); epidx++) {
+ if ((epidx != hw->my_epid) &&
+ (hw->ep_shm_info[epidx].es_status ==
+ FJES_ZONING_STATUS_ENABLE)) {
+ fjes_hw_raise_interrupt(hw, epidx,
+ REG_ICTL_MASK_INFO_UPDATE);
+ }
+ }
+
+ msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid);
+
+ for (epidx = 0; epidx < (hw->max_epid); epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
+
+ buf_pair = &hw->ep_shm_info[epidx];
+
+ fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
+ netdev->mtu);
+
+ if (fjes_hw_epid_is_same_zone(hw, epidx)) {
+ mutex_lock(&hw->hw_info.lock);
+ result =
+ fjes_hw_register_buff_addr(hw, epidx, buf_pair);
+ mutex_unlock(&hw->hw_info.lock);
+
+ switch (result) {
+ case 0:
+ break;
+ case -ENOMSG:
+ case -EBUSY:
+ default:
+ adapter->force_reset = true;
+ return result;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void fjes_free_resources(struct fjes_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct fjes_device_command_param param;
+ struct ep_share_mem_info *buf_pair;
+ struct fjes_hw *hw = &adapter->hw;
+ bool reset_flag = false;
+ int result;
+ int epidx;
+
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
+
+ mutex_lock(&hw->hw_info.lock);
+ result = fjes_hw_unregister_buff_addr(hw, epidx);
+ mutex_unlock(&hw->hw_info.lock);
+
+ if (result)
+ reset_flag = true;
+
+ buf_pair = &hw->ep_shm_info[epidx];
+
+ fjes_hw_setup_epbuf(&buf_pair->tx,
+ netdev->dev_addr, netdev->mtu);
+
+ clear_bit(epidx, &hw->txrx_stop_req_bit);
+ }
+
+ if (reset_flag || adapter->force_reset) {
+ result = fjes_hw_reset(hw);
+
+ adapter->force_reset = false;
+
+ if (result)
+ adapter->open_guard = true;
+
+ hw->hw_info.buffer_share_bit = 0;
+
+ memset((void *)&param, 0, sizeof(param));
+
+ param.req_len = hw->hw_info.req_buf_size;
+ param.req_start = __pa(hw->hw_info.req_buf);
+ param.res_len = hw->hw_info.res_buf_size;
+ param.res_start = __pa(hw->hw_info.res_buf);
+ param.share_start = __pa(hw->hw_info.share->ep_status);
+
+ fjes_hw_init_command_registers(hw, &param);
+ }
+}
+
+static void fjes_tx_stall_task(struct work_struct *work)
+{
+ struct fjes_adapter *adapter = container_of(work,
+ struct fjes_adapter, tx_stall_task);
+ struct net_device *netdev = adapter->netdev;
+ struct fjes_hw *hw = &adapter->hw;
+ int all_queue_available, sendable;
+ enum ep_partner_status pstatus;
+ int max_epid, my_epid, epid;
+ union ep_buffer_info *info;
+ int i;
+
+ if (((long)jiffies -
+ (long)(netdev->trans_start)) > FJES_TX_TX_STALL_TIMEOUT) {
+ netif_wake_queue(netdev);
+ return;
+ }
+
+ my_epid = hw->my_epid;
+ max_epid = hw->max_epid;
+
+ for (i = 0; i < 5; i++) {
+ all_queue_available = 1;
+
+ for (epid = 0; epid < max_epid; epid++) {
+ if (my_epid == epid)
+ continue;
+
+ pstatus = fjes_hw_get_partner_ep_status(hw, epid);
+ sendable = (pstatus == EP_PARTNER_SHARED);
+ if (!sendable)
+ continue;
+
+ info = adapter->hw.ep_shm_info[epid].tx.info;
+
+ if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
+ info->v1i.count_max)) {
+ all_queue_available = 0;
+ break;
+ }
+ }
+
+ if (all_queue_available) {
+ netif_wake_queue(netdev);
+ return;
+ }
+ }
+
+ usleep_range(50, 100);
+
+ queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
+}
+
+static void fjes_force_close_task(struct work_struct *work)
+{
+ struct fjes_adapter *adapter = container_of(work,
+ struct fjes_adapter, force_close_task);
+ struct net_device *netdev = adapter->netdev;
+
+ rtnl_lock();
+ dev_close(netdev);
+ rtnl_unlock();
+}
+
+static void fjes_raise_intr_rxdata_task(struct work_struct *work)
+{
+ struct fjes_adapter *adapter = container_of(work,
+ struct fjes_adapter, raise_intr_rxdata_task);
+ struct fjes_hw *hw = &adapter->hw;
+ enum ep_partner_status pstatus;
+ int max_epid, my_epid, epid;
+
+ my_epid = hw->my_epid;
+ max_epid = hw->max_epid;
+
+ for (epid = 0; epid < max_epid; epid++)
+ hw->ep_shm_info[epid].tx_status_work = 0;
+
+ for (epid = 0; epid < max_epid; epid++) {
+ if (epid == my_epid)
+ continue;
+
+ pstatus = fjes_hw_get_partner_ep_status(hw, epid);
+ if (pstatus == EP_PARTNER_SHARED) {
+ hw->ep_shm_info[epid].tx_status_work =
+ hw->ep_shm_info[epid].tx.info->v1i.tx_status;
+
+ if (hw->ep_shm_info[epid].tx_status_work ==
+ FJES_TX_DELAY_SEND_PENDING) {
+ hw->ep_shm_info[epid].tx.info->v1i.tx_status =
+ FJES_TX_DELAY_SEND_NONE;
+ }
+ }
+ }
+
+ for (epid = 0; epid < max_epid; epid++) {
+ if (epid == my_epid)
+ continue;
+
+ pstatus = fjes_hw_get_partner_ep_status(hw, epid);
+ if ((hw->ep_shm_info[epid].tx_status_work ==
+ FJES_TX_DELAY_SEND_PENDING) &&
+ (pstatus == EP_PARTNER_SHARED) &&
+ !(hw->ep_shm_info[epid].rx.info->v1i.rx_status)) {
+ fjes_hw_raise_interrupt(hw, epid,
+ REG_ICTL_MASK_RX_DATA);
+ }
+ }
+
+ usleep_range(500, 1000);
+}
+
+static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
+ void *data, size_t len)
+{
+ int retval;
+
+ retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx,
+ data, len);
+ if (retval)
+ return retval;
+
+ adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status =
+ FJES_TX_DELAY_SEND_PENDING;
+ if (!work_pending(&adapter->raise_intr_rxdata_task))
+ queue_work(adapter->txrx_wq,
+ &adapter->raise_intr_rxdata_task);
+
+ retval = 0;
+ return retval;
+}
+
+static netdev_tx_t
+fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct fjes_adapter *adapter = netdev_priv(netdev);
+ struct fjes_hw *hw = &adapter->hw;
+
+ int max_epid, my_epid, dest_epid;
+ enum ep_partner_status pstatus;
+ struct netdev_queue *cur_queue;
+ char shortpkt[VLAN_ETH_HLEN];
+ bool is_multi, vlan;
+ struct ethhdr *eth;
+ u16 queue_no = 0;
+ u16 vlan_id = 0;
+ netdev_tx_t ret;
+ char *data;
+ int len;
+
+ ret = NETDEV_TX_OK;
+ is_multi = false;
+ cur_queue = netdev_get_tx_queue(netdev, queue_no);
+
+ eth = (struct ethhdr *)skb->data;
+ my_epid = hw->my_epid;
+
+ vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;
+
+ data = skb->data;
+ len = skb->len;
+
+ if (is_multicast_ether_addr(eth->h_dest)) {
+ dest_epid = 0;
+ max_epid = hw->max_epid;
+ is_multi = true;
+ } else if (is_local_ether_addr(eth->h_dest)) {
+ dest_epid = eth->h_dest[ETH_ALEN - 1];
+ max_epid = dest_epid + 1;
+
+ if ((eth->h_dest[0] == 0x02) &&
+ (0x00 == (eth->h_dest[1] | eth->h_dest[2] |
+ eth->h_dest[3] | eth->h_dest[4])) &&
+ (dest_epid < hw->max_epid)) {
+ ;
+ } else {
+ dest_epid = 0;
+ max_epid = 0;
+ ret = NETDEV_TX_OK;
+
+ adapter->stats64.tx_packets += 1;
+ hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
+ adapter->stats64.tx_bytes += len;
+ hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
+ }
+ } else {
+ dest_epid = 0;
+ max_epid = 0;
+ ret = NETDEV_TX_OK;
+
+ adapter->stats64.tx_packets += 1;
+ hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
+ adapter->stats64.tx_bytes += len;
+ hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
+ }
+
+ for (; dest_epid < max_epid; dest_epid++) {
+ if (my_epid == dest_epid)
+ continue;
+
+ pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
+ if (pstatus != EP_PARTNER_SHARED) {
+ ret = NETDEV_TX_OK;
+ } else if (!fjes_hw_check_epbuf_version(
+ &adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
+ /* version is NOT 0 */
+ adapter->stats64.tx_carrier_errors += 1;
+ hw->ep_shm_info[my_epid].net_stats
+ .tx_carrier_errors += 1;
+
+ ret = NETDEV_TX_OK;
+ } else if (!fjes_hw_check_mtu(
+ &adapter->hw.ep_shm_info[dest_epid].rx,
+ netdev->mtu)) {
+ adapter->stats64.tx_dropped += 1;
+ hw->ep_shm_info[my_epid].net_stats.tx_dropped += 1;
+ adapter->stats64.tx_errors += 1;
+ hw->ep_shm_info[my_epid].net_stats.tx_errors += 1;
+
+ ret = NETDEV_TX_OK;
+ } else if (vlan &&
+ !fjes_hw_check_vlan_id(
+ &adapter->hw.ep_shm_info[dest_epid].rx,
+ vlan_id)) {
+ ret = NETDEV_TX_OK;
+ } else {
+ if (len < VLAN_ETH_HLEN) {
+ memset(shortpkt, 0, VLAN_ETH_HLEN);
+ memcpy(shortpkt, skb->data, skb->len);
+ len = VLAN_ETH_HLEN;
+ data = shortpkt;
+ }
+
+ if (adapter->tx_retry_count == 0) {
+ adapter->tx_start_jiffies = jiffies;
+ adapter->tx_retry_count = 1;
+ } else {
+ adapter->tx_retry_count++;
+ }
+
+ if (fjes_tx_send(adapter, dest_epid, data, len)) {
+ if (is_multi) {
+ ret = NETDEV_TX_OK;
+ } else if (
+ ((long)jiffies -
+ (long)adapter->tx_start_jiffies) >=
+ FJES_TX_RETRY_TIMEOUT) {
+ adapter->stats64.tx_fifo_errors += 1;
+ hw->ep_shm_info[my_epid].net_stats
+ .tx_fifo_errors += 1;
+ adapter->stats64.tx_errors += 1;
+ hw->ep_shm_info[my_epid].net_stats
+ .tx_errors += 1;
+
+ ret = NETDEV_TX_OK;
+ } else {
+ netdev->trans_start = jiffies;
+ netif_tx_stop_queue(cur_queue);
+
+ if (!work_pending(&adapter->tx_stall_task))
+ queue_work(adapter->txrx_wq,
+ &adapter->tx_stall_task);
+
+ ret = NETDEV_TX_BUSY;
+ }
+ } else {
+ if (!is_multi) {
+ adapter->stats64.tx_packets += 1;
+ hw->ep_shm_info[my_epid].net_stats
+ .tx_packets += 1;
+ adapter->stats64.tx_bytes += len;
+ hw->ep_shm_info[my_epid].net_stats
+ .tx_bytes += len;
+ }
+
+ adapter->tx_retry_count = 0;
+ ret = NETDEV_TX_OK;
+ }
+ }
+ }
+
+ if (ret == NETDEV_TX_OK) {
+ dev_kfree_skb(skb);
+ if (is_multi) {
+ adapter->stats64.tx_packets += 1;
+ hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
+ adapter->stats64.tx_bytes += 1;
+ hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
+ }
+ }
+
+ return ret;
+}
+
+static void fjes_tx_retry(struct net_device *netdev)
+{
+ struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
+
+ netif_tx_wake_queue(queue);
+}
+
+static struct rtnl_link_stats64 *
+fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+{
+ struct fjes_adapter *adapter = netdev_priv(netdev);
+
+ memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
+
+ return stats;
+}
+
+static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ bool running = netif_running(netdev);
+ int ret = 0;
+ int idx;
+
+ for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
+ if (new_mtu <= fjes_support_mtu[idx]) {
+ new_mtu = fjes_support_mtu[idx];
+ if (new_mtu == netdev->mtu)
+ return 0;
+
+ if (running)
+ fjes_close(netdev);
+
+ netdev->mtu = new_mtu;
+
+ if (running)
+ ret = fjes_open(netdev);
+
+ return ret;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int fjes_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
+{
+ struct fjes_adapter *adapter = netdev_priv(netdev);
+ bool ret = true;
+ int epid;
+
+ for (epid = 0; epid < adapter->hw.max_epid; epid++) {
+ if (epid == adapter->hw.my_epid)
+ continue;
+
+ if (!fjes_hw_check_vlan_id(
+ &adapter->hw.ep_shm_info[epid].tx, vid))
+ ret = fjes_hw_set_vlan_id(
+ &adapter->hw.ep_shm_info[epid].tx, vid);
+ }
+
+ return ret ? 0 : -ENOSPC;
+}
+
+static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
+{
+ struct fjes_adapter *adapter = netdev_priv(netdev);
+ int epid;
+
+ for (epid = 0; epid < adapter->hw.max_epid; epid++) {
+ if (epid == adapter->hw.my_epid)
+ continue;
+
+ fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid);
+ }
+
+ return 0;
+}
+
+static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
+ int src_epid)
+{
+ struct fjes_hw *hw = &adapter->hw;
+ enum ep_partner_status status;
+
+ status = fjes_hw_get_partner_ep_status(hw, src_epid);
+ switch (status) {
+ case EP_PARTNER_UNSHARE:
+ case EP_PARTNER_COMPLETE:
+ default:
+ break;
+ case EP_PARTNER_WAITING:
+ if (src_epid < hw->my_epid) {
+ hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
+ FJES_RX_STOP_REQ_DONE;
+
+ clear_bit(src_epid, &hw->txrx_stop_req_bit);
+ set_bit(src_epid, &adapter->unshare_watch_bitmask);
+
+ if (!work_pending(&adapter->unshare_watch_task))
+ queue_work(adapter->control_wq,
+ &adapter->unshare_watch_task);
+ }
+ break;
+ case EP_PARTNER_SHARED:
+ if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
+ FJES_RX_STOP_REQ_REQUEST) {
+ set_bit(src_epid, &hw->epstop_req_bit);
+ if (!work_pending(&hw->epstop_task))
+ queue_work(adapter->control_wq,
+ &hw->epstop_task);
+ }
+ break;
+ }
+}
+
+static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
+{
+ struct fjes_hw *hw = &adapter->hw;
+ enum ep_partner_status status;
+
+ set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
+
+ status = fjes_hw_get_partner_ep_status(hw, src_epid);
+ switch (status) {
+ case EP_PARTNER_WAITING:
+ hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
+ FJES_RX_STOP_REQ_DONE;
+ clear_bit(src_epid, &hw->txrx_stop_req_bit);
+ /* fall through */
+ case EP_PARTNER_UNSHARE:
+ case EP_PARTNER_COMPLETE:
+ default:
+ set_bit(src_epid, &adapter->unshare_watch_bitmask);
+ if (!work_pending(&adapter->unshare_watch_task))
+ queue_work(adapter->control_wq,
+ &adapter->unshare_watch_task);
+ break;
+ case EP_PARTNER_SHARED:
+ set_bit(src_epid, &hw->epstop_req_bit);
+
+ if (!work_pending(&hw->epstop_task))
+ queue_work(adapter->control_wq, &hw->epstop_task);
+ break;
+ }
+}
+
+static void fjes_update_zone_irq(struct fjes_adapter *adapter,
+ int src_epid)
+{
+ struct fjes_hw *hw = &adapter->hw;
+
+ if (!work_pending(&hw->update_zone_task))
+ queue_work(adapter->control_wq, &hw->update_zone_task);
+}
+
+static irqreturn_t fjes_intr(int irq, void *data)
+{
+ struct fjes_adapter *adapter = data;
+ struct fjes_hw *hw = &adapter->hw;
+ irqreturn_t ret;
+ u32 icr;
+
+ icr = fjes_hw_capture_interrupt_status(hw);
+
+ if (icr & REG_IS_MASK_IS_ASSERT) {
+ if (icr & REG_ICTL_MASK_RX_DATA)
+ fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
+
+ if (icr & REG_ICTL_MASK_DEV_STOP_REQ)
+ fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
+
+ if (icr & REG_ICTL_MASK_TXRX_STOP_REQ)
+ fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
+
+ if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
+ fjes_hw_set_irqmask(hw,
+ REG_ICTL_MASK_TXRX_STOP_DONE, true);
+
+ if (icr & REG_ICTL_MASK_INFO_UPDATE)
+ fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
+
+ ret = IRQ_HANDLED;
+ } else {
+ ret = IRQ_NONE;
+ }
+
+ return ret;
+}
+
+static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
+ int start_epid)
+{
+ struct fjes_hw *hw = &adapter->hw;
+ enum ep_partner_status pstatus;
+ int max_epid, cur_epid;
+ int i;
+
+ max_epid = hw->max_epid;
+ start_epid = (start_epid + 1 + max_epid) % max_epid;
+
+ for (i = 0; i < max_epid; i++) {
+ cur_epid = (start_epid + i) % max_epid;
+ if (cur_epid == hw->my_epid)
+ continue;
+
+ pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid);
+ if (pstatus == EP_PARTNER_SHARED) {
+ if (!fjes_hw_epbuf_rx_is_empty(
+ &hw->ep_shm_info[cur_epid].rx))
+ return cur_epid;
+ }
+ }
+ return -1;
+}
+
+static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize,
+ int *cur_epid)
+{
+ void *frame;
+
+ *cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid);
+ if (*cur_epid < 0)
+ return NULL;
+
+ frame =
+ fjes_hw_epbuf_rx_curpkt_get_addr(
+ &adapter->hw.ep_shm_info[*cur_epid].rx, psize);
+
+ return frame;
+}
+
+static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
+{
+ fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
+}
+
+static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
+{
+ struct fjes_hw *hw = &adapter->hw;
+
+ fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
+
+ adapter->unset_rx_last = true;
+ napi_schedule(&adapter->napi);
+}
+
+static int fjes_poll(struct napi_struct *napi, int budget)
+{
+ struct fjes_adapter *adapter =
+ container_of(napi, struct fjes_adapter, napi);
+ struct net_device *netdev = napi->dev;
+ struct fjes_hw *hw = &adapter->hw;
+ struct sk_buff *skb;
+ int work_done = 0;
+ int cur_epid = 0;
+ int epidx;
+ size_t frame_len;
+ void *frame;
+
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
+
+ adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status |=
+ FJES_RX_POLL_WORK;
+ }
+
+ while (work_done < budget) {
+ prefetch(&adapter->hw);
+ frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);
+
+ if (frame) {
+ skb = napi_alloc_skb(napi, frame_len);
+ if (!skb) {
+ adapter->stats64.rx_dropped += 1;
+ hw->ep_shm_info[cur_epid].net_stats
+ .rx_dropped += 1;
+ adapter->stats64.rx_errors += 1;
+ hw->ep_shm_info[cur_epid].net_stats
+ .rx_errors += 1;
+ } else {
+ memcpy(skb_put(skb, frame_len),
+ frame, frame_len);
+ skb->protocol = eth_type_trans(skb, netdev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ netif_receive_skb(skb);
+
+ work_done++;
+
+ adapter->stats64.rx_packets += 1;
+ hw->ep_shm_info[cur_epid].net_stats
+ .rx_packets += 1;
+ adapter->stats64.rx_bytes += frame_len;
+ hw->ep_shm_info[cur_epid].net_stats
+ .rx_bytes += frame_len;
+
+ if (is_multicast_ether_addr(
+ ((struct ethhdr *)frame)->h_dest)) {
+ adapter->stats64.multicast += 1;
+ hw->ep_shm_info[cur_epid].net_stats
+ .multicast += 1;
+ }
+ }
+
+ fjes_rxframe_release(adapter, cur_epid);
+ adapter->unset_rx_last = true;
+ } else {
+ break;
+ }
+ }
+
+ if (work_done < budget) {
+ napi_complete(napi);
+
+ if (adapter->unset_rx_last) {
+ adapter->rx_last_jiffies = jiffies;
+ adapter->unset_rx_last = false;
+ }
+
+ if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
+ napi_reschedule(napi);
+ } else {
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
+ adapter->hw.ep_shm_info[epidx]
+ .tx.info->v1i.rx_status &=
+ ~FJES_RX_POLL_WORK;
+ }
+
+ fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
+ }
+ }
+
+ return work_done;
+}
+
+/* fjes_probe - Device Initialization Routine */
+static int fjes_probe(struct platform_device *plat_dev)
+{
+ struct fjes_adapter *adapter;
+ struct net_device *netdev;
+ struct resource *res;
+ struct fjes_hw *hw;
+ int err;
+
+ err = -ENOMEM;
+ netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
+ NET_NAME_UNKNOWN, fjes_netdev_setup,
+ FJES_MAX_QUEUES);
+
+ if (!netdev)
+ goto err_out;
+
+ SET_NETDEV_DEV(netdev, &plat_dev->dev);
+
+ dev_set_drvdata(&plat_dev->dev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->plat_dev = plat_dev;
+ hw = &adapter->hw;
+ hw->back = adapter;
+
+ /* setup the private structure */
+ err = fjes_sw_init(adapter);
+ if (err)
+ goto err_free_netdev;
+
+ INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
+ adapter->force_reset = false;
+ adapter->open_guard = false;
+
+ adapter->txrx_wq = create_workqueue(DRV_NAME "/txrx");
+ adapter->control_wq = create_workqueue(DRV_NAME "/control");
+
+ INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
+ INIT_WORK(&adapter->raise_intr_rxdata_task,
+ fjes_raise_intr_rxdata_task);
+ INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
+ adapter->unshare_watch_bitmask = 0;
+
+ INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
+ adapter->interrupt_watch_enable = false;
+
+ res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
+ hw->hw_res.start = res->start;
+ hw->hw_res.size = res->end - res->start + 1;
+ hw->hw_res.irq = platform_get_irq(plat_dev, 0);
+ err = fjes_hw_init(&adapter->hw);
+ if (err)
+ goto err_free_netdev;
+
+ /* setup MAC address (02:00:00:00:00:[epid])*/
+ netdev->dev_addr[0] = 2;
+ netdev->dev_addr[1] = 0;
+ netdev->dev_addr[2] = 0;
+ netdev->dev_addr[3] = 0;
+ netdev->dev_addr[4] = 0;
+ netdev->dev_addr[5] = hw->my_epid; /* EPID */
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_hw_exit;
+
+ netif_carrier_off(netdev);
+
+ return 0;
+
+err_hw_exit:
+ fjes_hw_exit(&adapter->hw);
+err_free_netdev:
+ free_netdev(netdev);
+err_out:
+ return err;
+}
+
+/* fjes_remove - Device Removal Routine */
+static int fjes_remove(struct platform_device *plat_dev)
+{
+ struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
+ struct fjes_adapter *adapter = netdev_priv(netdev);
+ struct fjes_hw *hw = &adapter->hw;
+
+ cancel_delayed_work_sync(&adapter->interrupt_watch_task);
+ cancel_work_sync(&adapter->unshare_watch_task);
+ cancel_work_sync(&adapter->raise_intr_rxdata_task);
+ cancel_work_sync(&adapter->tx_stall_task);
+ if (adapter->control_wq)
+ destroy_workqueue(adapter->control_wq);
+ if (adapter->txrx_wq)
+ destroy_workqueue(adapter->txrx_wq);
+
+ unregister_netdev(netdev);
+
+ fjes_hw_exit(hw);
+
+ netif_napi_del(&adapter->napi);
+
+ free_netdev(netdev);
+
+ return 0;
+}
+
+static int fjes_sw_init(struct fjes_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ netif_napi_add(netdev, &adapter->napi, fjes_poll, 64);
+
+ return 0;
+}
+
+/* fjes_netdev_setup - netdevice initialization routine */
+static void fjes_netdev_setup(struct net_device *netdev)
+{
+ ether_setup(netdev);
+
+ netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
+ netdev->netdev_ops = &fjes_netdev_ops;
+ fjes_set_ethtool_ops(netdev);
+ netdev->mtu = fjes_support_mtu[0];
+ netdev->flags |= IFF_BROADCAST;
+ netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER;
+}
+
+static void fjes_irq_watch_task(struct work_struct *work)
+{
+ struct fjes_adapter *adapter = container_of(to_delayed_work(work),
+ struct fjes_adapter, interrupt_watch_task);
+
+ local_irq_disable();
+ fjes_intr(adapter->hw.hw_res.irq, adapter);
+ local_irq_enable();
+
+ if (fjes_rxframe_search_exist(adapter, 0) >= 0)
+ napi_schedule(&adapter->napi);
+
+ if (adapter->interrupt_watch_enable) {
+ if (!delayed_work_pending(&adapter->interrupt_watch_task))
+ queue_delayed_work(adapter->control_wq,
+ &adapter->interrupt_watch_task,
+ FJES_IRQ_WATCH_DELAY);
+ }
+}
+
+static void fjes_watch_unshare_task(struct work_struct *work)
+{
+ struct fjes_adapter *adapter =
+ container_of(work, struct fjes_adapter, unshare_watch_task);
+
+ struct net_device *netdev = adapter->netdev;
+ struct fjes_hw *hw = &adapter->hw;
+
+ int unshare_watch, unshare_reserve;
+ int max_epid, my_epid, epidx;
+ int stop_req, stop_req_done;
+ ulong unshare_watch_bitmask;
+ int wait_time = 0;
+ int is_shared;
+ int ret;
+
+ my_epid = hw->my_epid;
+ max_epid = hw->max_epid;
+
+ unshare_watch_bitmask = adapter->unshare_watch_bitmask;
+ adapter->unshare_watch_bitmask = 0;
+
+ while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
+ (wait_time < 3000)) {
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
+
+ is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
+ epidx);
+
+ stop_req = test_bit(epidx, &hw->txrx_stop_req_bit);
+
+ stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status &
+ FJES_RX_STOP_REQ_DONE;
+
+ unshare_watch = test_bit(epidx, &unshare_watch_bitmask);
+
+ unshare_reserve = test_bit(epidx,
+ &hw->hw_info.buffer_unshare_reserve_bit);
+
+ if ((!stop_req ||
+ (is_shared && (!is_shared || !stop_req_done))) &&
+ (is_shared || !unshare_watch || !unshare_reserve))
+ continue;
+
+ mutex_lock(&hw->hw_info.lock);
+ ret = fjes_hw_unregister_buff_addr(hw, epidx);
+ switch (ret) {
+ case 0:
+ break;
+ case -ENOMSG:
+ case -EBUSY:
+ default:
+ if (!work_pending(
+ &adapter->force_close_task)) {
+ adapter->force_reset = true;
+ schedule_work(
+ &adapter->force_close_task);
+ }
+ break;
+ }
+ mutex_unlock(&hw->hw_info.lock);
+
+ fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
+ netdev->dev_addr, netdev->mtu);
+
+ clear_bit(epidx, &hw->txrx_stop_req_bit);
+ clear_bit(epidx, &unshare_watch_bitmask);
+ clear_bit(epidx,
+ &hw->hw_info.buffer_unshare_reserve_bit);
+ }
+
+ msleep(100);
+ wait_time += 100;
+ }
+
+ if (hw->hw_info.buffer_unshare_reserve_bit) {
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
+
+ if (test_bit(epidx,
+ &hw->hw_info.buffer_unshare_reserve_bit)) {
+ mutex_lock(&hw->hw_info.lock);
+
+ ret = fjes_hw_unregister_buff_addr(hw, epidx);
+ switch (ret) {
+ case 0:
+ break;
+ case -ENOMSG:
+ case -EBUSY:
+ default:
+ if (!work_pending(
+ &adapter->force_close_task)) {
+ adapter->force_reset = true;
+ schedule_work(
+ &adapter->force_close_task);
+ }
+ break;
+ }
+ mutex_unlock(&hw->hw_info.lock);
+
+ fjes_hw_setup_epbuf(
+ &hw->ep_shm_info[epidx].tx,
+ netdev->dev_addr, netdev->mtu);
+
+ clear_bit(epidx, &hw->txrx_stop_req_bit);
+ clear_bit(epidx, &unshare_watch_bitmask);
+ clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
+ }
+
+ if (test_bit(epidx, &unshare_watch_bitmask)) {
+ hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
+ ~FJES_RX_STOP_REQ_DONE;
+ }
+ }
+ }
+}
+
+/* fjes_init_module - Driver Registration Routine */
+static int __init fjes_init_module(void)
+{
+ int result;
+
+ pr_info("%s - version %s - %s\n",
+ fjes_driver_string, fjes_driver_version, fjes_copyright);
+
+ result = platform_driver_register(&fjes_driver);
+ if (result < 0)
+ return result;
+
+ result = acpi_bus_register_driver(&fjes_acpi_driver);
+ if (result < 0)
+ goto fail_acpi_driver;
+
+ return 0;
+
+fail_acpi_driver:
+ platform_driver_unregister(&fjes_driver);
+ return result;
+}
+
+module_init(fjes_init_module);
+
+/* fjes_exit_module - Driver Exit Cleanup Routine */
+static void __exit fjes_exit_module(void)
+{
+ acpi_bus_unregister_driver(&fjes_acpi_driver);
+ platform_driver_unregister(&fjes_driver);
+}
+
+module_exit(fjes_exit_module);
diff --git a/drivers/net/fjes/fjes_regs.h b/drivers/net/fjes/fjes_regs.h
new file mode 100644
index 000000000000..029c924dc175
--- /dev/null
+++ b/drivers/net/fjes/fjes_regs.h
@@ -0,0 +1,142 @@
+/*
+ * FUJITSU Extended Socket Network Device driver
+ * Copyright (c) 2015 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#ifndef FJES_REGS_H_
+#define FJES_REGS_H_
+
+#include <linux/bitops.h>
+
+#define XSCT_DEVICE_REGISTER_SIZE 0x1000
+
+/* register offset */
+/* Information registers */
+#define XSCT_OWNER_EPID 0x0000 /* Owner EPID */
+#define XSCT_MAX_EP 0x0004 /* Maximum EP */
+
+/* Device Control registers */
+#define XSCT_DCTL 0x0010 /* Device Control */
+
+/* Command Control registers */
+#define XSCT_CR 0x0020 /* Command request */
+#define XSCT_CS 0x0024 /* Command status */
+#define XSCT_SHSTSAL 0x0028 /* Share status address Low */
+#define XSCT_SHSTSAH 0x002C /* Share status address High */
+
+#define XSCT_REQBL 0x0034 /* Request Buffer length */
+#define XSCT_REQBAL 0x0038 /* Request Buffer Address Low */
+#define XSCT_REQBAH 0x003C /* Request Buffer Address High */
+
+#define XSCT_RESPBL 0x0044 /* Response Buffer Length */
+#define XSCT_RESPBAL 0x0048 /* Response Buffer Address Low */
+#define XSCT_RESPBAH 0x004C /* Response Buffer Address High */
+
+/* Interrupt Control registers */
+#define XSCT_IS 0x0080 /* Interrupt status */
+#define XSCT_IMS 0x0084 /* Interrupt mask set */
+#define XSCT_IMC 0x0088 /* Interrupt mask clear */
+#define XSCT_IG 0x008C /* Interrupt generator */
+#define XSCT_ICTL 0x0090 /* Interrupt control */
+
+/* register structure */
+/* Information registers */
+union REG_OWNER_EPID {
+ struct {
+ __le32 epid:16;
+ __le32:16;
+ } bits;
+ __le32 reg;
+};
+
+union REG_MAX_EP {
+ struct {
+ __le32 maxep:16;
+ __le32:16;
+ } bits;
+ __le32 reg;
+};
+
+/* Device Control registers */
+union REG_DCTL {
+ struct {
+ __le32 reset:1;
+ __le32 rsv0:15;
+ __le32 rsv1:16;
+ } bits;
+ __le32 reg;
+};
+
+/* Command Control registers */
+union REG_CR {
+ struct {
+ __le32 req_code:16;
+ __le32 err_info:14;
+ __le32 error:1;
+ __le32 req_start:1;
+ } bits;
+ __le32 reg;
+};
+
+union REG_CS {
+ struct {
+ __le32 req_code:16;
+ __le32 rsv0:14;
+ __le32 busy:1;
+ __le32 complete:1;
+ } bits;
+ __le32 reg;
+};
+
+/* Interrupt Control registers */
+union REG_ICTL {
+ struct {
+ __le32 automak:1;
+ __le32 rsv0:31;
+ } bits;
+ __le32 reg;
+};
+
+enum REG_ICTL_MASK {
+ REG_ICTL_MASK_INFO_UPDATE = 1 << 20,
+ REG_ICTL_MASK_DEV_STOP_REQ = 1 << 19,
+ REG_ICTL_MASK_TXRX_STOP_REQ = 1 << 18,
+ REG_ICTL_MASK_TXRX_STOP_DONE = 1 << 17,
+ REG_ICTL_MASK_RX_DATA = 1 << 16,
+ REG_ICTL_MASK_ALL = GENMASK(20, 16),
+};
+
+enum REG_IS_MASK {
+ REG_IS_MASK_IS_ASSERT = 1 << 31,
+ REG_IS_MASK_EPID = GENMASK(15, 0),
+};
+
+struct fjes_hw;
+
+u32 fjes_hw_rd32(struct fjes_hw *hw, u32 reg);
+
+#define wr32(reg, val) \
+do { \
+ u8 *base = hw->base; \
+ writel((val), &base[(reg)]); \
+} while (0)
+
+#define rd32(reg) (fjes_hw_rd32(hw, reg))
+
+#endif /* FJES_REGS_H_ */
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 78d49d186e05..da3259ce7c8d 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -15,8 +15,11 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/hash.h>
+#include <net/dst_metadata.h>
+#include <net/gro_cells.h>
#include <net/rtnetlink.h>
#include <net/geneve.h>
+#include <net/protocol.h>
#define GENEVE_NETDEV_VER "0.6"
@@ -32,12 +35,17 @@ static bool log_ecn_error = true;
module_param(log_ecn_error, bool, 0644);
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+#define GENEVE_VER 0
+#define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr))
+
/* per-network namespace private data for this module */
struct geneve_net {
- struct list_head geneve_list;
- struct hlist_head vni_list[VNI_HASH_SIZE];
+ struct list_head geneve_list;
+ struct list_head sock_list;
};
+static int geneve_net_id;
+
/* Pseudo network device */
struct geneve_dev {
struct hlist_node hlist; /* vni hash table */
@@ -49,9 +57,20 @@ struct geneve_dev {
u8 tos; /* TOS override */
struct sockaddr_in remote; /* IPv4 address for link partner */
struct list_head next; /* geneve's per namespace list */
+ __be16 dst_port;
+ bool collect_md;
+ struct gro_cells gro_cells;
};
-static int geneve_net_id;
+struct geneve_sock {
+ bool collect_md;
+ struct list_head list;
+ struct socket *sock;
+ struct rcu_head rcu;
+ int refcnt;
+ struct udp_offload udp_offloads;
+ struct hlist_head vni_list[VNI_HASH_SIZE];
+};
static inline __u32 geneve_net_vni_hash(u8 vni[3])
{
@@ -61,46 +80,98 @@ static inline __u32 geneve_net_vni_hash(u8 vni[3])
return hash_32(vnid, VNI_HASH_BITS);
}
+static __be64 vni_to_tunnel_id(const __u8 *vni)
+{
+#ifdef __BIG_ENDIAN
+ return (vni[0] << 16) | (vni[1] << 8) | vni[2];
+#else
+ return (__force __be64)(((__force u64)vni[0] << 40) |
+ ((__force u64)vni[1] << 48) |
+ ((__force u64)vni[2] << 56));
+#endif
+}
+
+static struct geneve_dev *geneve_lookup(struct geneve_sock *gs,
+ __be32 addr, u8 vni[])
+{
+ struct hlist_head *vni_list_head;
+ struct geneve_dev *geneve;
+ __u32 hash;
+
+ /* Find the device for this VNI */
+ hash = geneve_net_vni_hash(vni);
+ vni_list_head = &gs->vni_list[hash];
+ hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) {
+ if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
+ addr == geneve->remote.sin_addr.s_addr)
+ return geneve;
+ }
+ return NULL;
+}
+
+static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
+{
+ return (struct genevehdr *)(udp_hdr(skb) + 1);
+}
+
/* geneve receive/decap routine */
static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
{
struct genevehdr *gnvh = geneve_hdr(skb);
- struct geneve_dev *dummy, *geneve = NULL;
- struct geneve_net *gn;
- struct iphdr *iph = NULL;
+ struct metadata_dst *tun_dst = NULL;
+ struct geneve_dev *geneve = NULL;
struct pcpu_sw_netstats *stats;
- struct hlist_head *vni_list_head;
- int err = 0;
- __u32 hash;
-
- iph = ip_hdr(skb); /* Still outer IP header... */
+ struct iphdr *iph;
+ u8 *vni;
+ __be32 addr;
+ int err;
- gn = gs->rcv_data;
+ if (gs->collect_md) {
+ static u8 zero_vni[3];
- /* Find the device for this VNI */
- hash = geneve_net_vni_hash(gnvh->vni);
- vni_list_head = &gn->vni_list[hash];
- hlist_for_each_entry_rcu(dummy, vni_list_head, hlist) {
- if (!memcmp(gnvh->vni, dummy->vni, sizeof(dummy->vni)) &&
- iph->saddr == dummy->remote.sin_addr.s_addr) {
- geneve = dummy;
- break;
- }
+ vni = zero_vni;
+ addr = 0;
+ } else {
+ vni = gnvh->vni;
+ iph = ip_hdr(skb); /* Still outer IP header... */
+ addr = iph->saddr;
}
+
+ geneve = geneve_lookup(gs, addr, vni);
if (!geneve)
goto drop;
- /* Drop packets w/ critical options,
- * since we don't support any...
- */
- if (gnvh->critical)
- goto drop;
+ if (ip_tunnel_collect_metadata() || gs->collect_md) {
+ __be16 flags;
+
+ flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT |
+ (gnvh->oam ? TUNNEL_OAM : 0) |
+ (gnvh->critical ? TUNNEL_CRIT_OPT : 0);
+
+ tun_dst = udp_tun_rx_dst(skb, AF_INET, flags,
+ vni_to_tunnel_id(gnvh->vni),
+ gnvh->opt_len * 4);
+ if (!tun_dst)
+ goto drop;
+ /* Update tunnel dst according to Geneve options. */
+ ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
+ gnvh->options, gnvh->opt_len * 4);
+ } else {
+ /* Drop packets w/ critical options,
+ * since we don't support any...
+ */
+ if (gnvh->critical)
+ goto drop;
+ }
skb_reset_mac_header(skb);
skb_scrub_packet(skb, !net_eq(geneve->net, dev_net(geneve->dev)));
skb->protocol = eth_type_trans(skb, geneve->dev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+ if (tun_dst)
+ skb_dst_set(skb, &tun_dst->dst);
+
/* Ignore packet loops (and multicast echo) */
if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
goto drop;
@@ -127,8 +198,7 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
stats->rx_bytes += skb->len;
u64_stats_update_end(&stats->syncp);
- netif_rx(skb);
-
+ gro_cells_receive(&geneve->gro_cells, skb);
return;
drop:
/* Consume bad packet */
@@ -138,32 +208,305 @@ drop:
/* Setup stats when device is created */
static int geneve_init(struct net_device *dev)
{
+ struct geneve_dev *geneve = netdev_priv(dev);
+ int err;
+
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
+ err = gro_cells_init(&geneve->gro_cells, dev);
+ if (err) {
+ free_percpu(dev->tstats);
+ return err;
+ }
+
return 0;
}
static void geneve_uninit(struct net_device *dev)
{
+ struct geneve_dev *geneve = netdev_priv(dev);
+
+ gro_cells_destroy(&geneve->gro_cells);
free_percpu(dev->tstats);
}
+/* Callback from net/ipv4/udp.c to receive packets */
+static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct genevehdr *geneveh;
+ struct geneve_sock *gs;
+ int opts_len;
+
+ /* Need Geneve and inner Ethernet header to be present */
+ if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
+ goto error;
+
+ /* Return packets with reserved bits set */
+ geneveh = geneve_hdr(skb);
+ if (unlikely(geneveh->ver != GENEVE_VER))
+ goto error;
+
+ if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
+ goto error;
+
+ opts_len = geneveh->opt_len * 4;
+ if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
+ htons(ETH_P_TEB)))
+ goto drop;
+
+ gs = rcu_dereference_sk_user_data(sk);
+ if (!gs)
+ goto drop;
+
+ geneve_rx(gs, skb);
+ return 0;
+
+drop:
+ /* Consume bad packet */
+ kfree_skb(skb);
+ return 0;
+
+error:
+ /* Let the UDP layer deal with the skb */
+ return 1;
+}
+
+static struct socket *geneve_create_sock(struct net *net, bool ipv6,
+ __be16 port)
+{
+ struct socket *sock;
+ struct udp_port_cfg udp_conf;
+ int err;
+
+ memset(&udp_conf, 0, sizeof(udp_conf));
+
+ if (ipv6) {
+ udp_conf.family = AF_INET6;
+ } else {
+ udp_conf.family = AF_INET;
+ udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+ }
+
+ udp_conf.local_udp_port = port;
+
+ /* Open UDP socket */
+ err = udp_sock_create(net, &udp_conf, &sock);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return sock;
+}
+
+static void geneve_notify_add_rx_port(struct geneve_sock *gs)
+{
+ struct sock *sk = gs->sock->sk;
+ sa_family_t sa_family = sk->sk_family;
+ int err;
+
+ if (sa_family == AF_INET) {
+ err = udp_add_offload(&gs->udp_offloads);
+ if (err)
+ pr_warn("geneve: udp_add_offload failed with status %d\n",
+ err);
+ }
+}
+
+static int geneve_hlen(struct genevehdr *gh)
+{
+ return sizeof(*gh) + gh->opt_len * 4;
+}
+
+static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb,
+ struct udp_offload *uoff)
+{
+ struct sk_buff *p, **pp = NULL;
+ struct genevehdr *gh, *gh2;
+ unsigned int hlen, gh_len, off_gnv;
+ const struct packet_offload *ptype;
+ __be16 type;
+ int flush = 1;
+
+ off_gnv = skb_gro_offset(skb);
+ hlen = off_gnv + sizeof(*gh);
+ gh = skb_gro_header_fast(skb, off_gnv);
+ if (skb_gro_header_hard(skb, hlen)) {
+ gh = skb_gro_header_slow(skb, hlen, off_gnv);
+ if (unlikely(!gh))
+ goto out;
+ }
+
+ if (gh->ver != GENEVE_VER || gh->oam)
+ goto out;
+ gh_len = geneve_hlen(gh);
+
+ hlen = off_gnv + gh_len;
+ if (skb_gro_header_hard(skb, hlen)) {
+ gh = skb_gro_header_slow(skb, hlen, off_gnv);
+ if (unlikely(!gh))
+ goto out;
+ }
+
+ flush = 0;
+
+ for (p = *head; p; p = p->next) {
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ gh2 = (struct genevehdr *)(p->data + off_gnv);
+ if (gh->opt_len != gh2->opt_len ||
+ memcmp(gh, gh2, gh_len)) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+ }
+
+ type = gh->proto_type;
+
+ rcu_read_lock();
+ ptype = gro_find_receive_by_type(type);
+ if (!ptype) {
+ flush = 1;
+ goto out_unlock;
+ }
+
+ skb_gro_pull(skb, gh_len);
+ skb_gro_postpull_rcsum(skb, gh, gh_len);
+ pp = ptype->callbacks.gro_receive(head, skb);
+
+out_unlock:
+ rcu_read_unlock();
+out:
+ NAPI_GRO_CB(skb)->flush |= flush;
+
+ return pp;
+}
+
+static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
+ struct udp_offload *uoff)
+{
+ struct genevehdr *gh;
+ struct packet_offload *ptype;
+ __be16 type;
+ int gh_len;
+ int err = -ENOSYS;
+
+ udp_tunnel_gro_complete(skb, nhoff);
+
+ gh = (struct genevehdr *)(skb->data + nhoff);
+ gh_len = geneve_hlen(gh);
+ type = gh->proto_type;
+
+ rcu_read_lock();
+ ptype = gro_find_complete_by_type(type);
+ if (ptype)
+ err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
+
+ rcu_read_unlock();
+ return err;
+}
+
+/* Create new listen socket if needed */
+static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
+ bool ipv6)
+{
+ struct geneve_net *gn = net_generic(net, geneve_net_id);
+ struct geneve_sock *gs;
+ struct socket *sock;
+ struct udp_tunnel_sock_cfg tunnel_cfg;
+ int h;
+
+ gs = kzalloc(sizeof(*gs), GFP_KERNEL);
+ if (!gs)
+ return ERR_PTR(-ENOMEM);
+
+ sock = geneve_create_sock(net, ipv6, port);
+ if (IS_ERR(sock)) {
+ kfree(gs);
+ return ERR_CAST(sock);
+ }
+
+ gs->sock = sock;
+ gs->refcnt = 1;
+ for (h = 0; h < VNI_HASH_SIZE; ++h)
+ INIT_HLIST_HEAD(&gs->vni_list[h]);
+
+ /* Initialize the geneve udp offloads structure */
+ gs->udp_offloads.port = port;
+ gs->udp_offloads.callbacks.gro_receive = geneve_gro_receive;
+ gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete;
+ geneve_notify_add_rx_port(gs);
+
+ /* Mark socket as an encapsulation socket */
+ tunnel_cfg.sk_user_data = gs;
+ tunnel_cfg.encap_type = 1;
+ tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
+ tunnel_cfg.encap_destroy = NULL;
+ setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
+ list_add(&gs->list, &gn->sock_list);
+ return gs;
+}
+
+static void geneve_notify_del_rx_port(struct geneve_sock *gs)
+{
+ struct sock *sk = gs->sock->sk;
+ sa_family_t sa_family = sk->sk_family;
+
+ if (sa_family == AF_INET)
+ udp_del_offload(&gs->udp_offloads);
+}
+
+static void geneve_sock_release(struct geneve_sock *gs)
+{
+ if (--gs->refcnt)
+ return;
+
+ list_del(&gs->list);
+ geneve_notify_del_rx_port(gs);
+ udp_tunnel_sock_release(gs->sock);
+ kfree_rcu(gs, rcu);
+}
+
+static struct geneve_sock *geneve_find_sock(struct geneve_net *gn,
+ __be16 dst_port)
+{
+ struct geneve_sock *gs;
+
+ list_for_each_entry(gs, &gn->sock_list, list) {
+ if (inet_sk(gs->sock->sk)->inet_sport == dst_port &&
+ inet_sk(gs->sock->sk)->sk.sk_family == AF_INET) {
+ return gs;
+ }
+ }
+ return NULL;
+}
+
static int geneve_open(struct net_device *dev)
{
struct geneve_dev *geneve = netdev_priv(dev);
struct net *net = geneve->net;
- struct geneve_net *gn = net_generic(geneve->net, geneve_net_id);
+ struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_sock *gs;
+ __u32 hash;
+
+ gs = geneve_find_sock(gn, geneve->dst_port);
+ if (gs) {
+ gs->refcnt++;
+ goto out;
+ }
- gs = geneve_sock_add(net, htons(GENEVE_UDP_PORT), geneve_rx, gn,
- false, false);
+ gs = geneve_socket_create(net, geneve->dst_port, false);
if (IS_ERR(gs))
return PTR_ERR(gs);
+out:
+ gs->collect_md = geneve->collect_md;
geneve->sock = gs;
+ hash = geneve_net_vni_hash(geneve->vni);
+ hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]);
return 0;
}
@@ -172,74 +515,191 @@ static int geneve_stop(struct net_device *dev)
struct geneve_dev *geneve = netdev_priv(dev);
struct geneve_sock *gs = geneve->sock;
+ if (!hlist_unhashed(&geneve->hlist))
+ hlist_del_rcu(&geneve->hlist);
geneve_sock_release(gs);
-
return 0;
}
-static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
+static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
+ __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
+ bool csum)
{
- struct geneve_dev *geneve = netdev_priv(dev);
- struct geneve_sock *gs = geneve->sock;
- struct rtable *rt = NULL;
- const struct iphdr *iip; /* interior IP header */
- struct flowi4 fl4;
+ struct genevehdr *gnvh;
+ int min_headroom;
int err;
- __be16 sport;
- __u8 tos, ttl;
- iip = ip_hdr(skb);
+ min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr);
+ err = skb_cow_head(skb, min_headroom);
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ goto free_rt;
+ }
- skb_reset_mac_header(skb);
+ skb = udp_tunnel_handle_offloads(skb, csum);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ goto free_rt;
+ }
- /* TODO: port min/max limits should be configurable */
- sport = udp_flow_src_port(dev_net(dev), skb, 0, 0, true);
+ gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
+ gnvh->ver = GENEVE_VER;
+ gnvh->opt_len = opt_len / 4;
+ gnvh->oam = !!(tun_flags & TUNNEL_OAM);
+ gnvh->critical = !!(tun_flags & TUNNEL_CRIT_OPT);
+ gnvh->rsvd1 = 0;
+ memcpy(gnvh->vni, vni, 3);
+ gnvh->proto_type = htons(ETH_P_TEB);
+ gnvh->rsvd2 = 0;
+ memcpy(gnvh->options, opt, opt_len);
+
+ skb_set_inner_protocol(skb, htons(ETH_P_TEB));
+ return 0;
+
+free_rt:
+ ip_rt_put(rt);
+ return err;
+}
- tos = geneve->tos;
- if (tos == 1)
- tos = ip_tunnel_get_dsfield(iip, skb);
+static struct rtable *geneve_get_rt(struct sk_buff *skb,
+ struct net_device *dev,
+ struct flowi4 *fl4,
+ struct ip_tunnel_info *info)
+{
+ struct geneve_dev *geneve = netdev_priv(dev);
+ struct rtable *rt = NULL;
+ __u8 tos;
+
+ memset(fl4, 0, sizeof(*fl4));
+ fl4->flowi4_mark = skb->mark;
+ fl4->flowi4_proto = IPPROTO_UDP;
+
+ if (info) {
+ fl4->daddr = info->key.u.ipv4.dst;
+ fl4->saddr = info->key.u.ipv4.src;
+ fl4->flowi4_tos = RT_TOS(info->key.tos);
+ } else {
+ tos = geneve->tos;
+ if (tos == 1) {
+ const struct iphdr *iip = ip_hdr(skb);
+
+ tos = ip_tunnel_get_dsfield(iip, skb);
+ }
- memset(&fl4, 0, sizeof(fl4));
- fl4.flowi4_tos = RT_TOS(tos);
- fl4.daddr = geneve->remote.sin_addr.s_addr;
- rt = ip_route_output_key(geneve->net, &fl4);
+ fl4->flowi4_tos = RT_TOS(tos);
+ fl4->daddr = geneve->remote.sin_addr.s_addr;
+ }
+
+ rt = ip_route_output_key(geneve->net, fl4);
if (IS_ERR(rt)) {
- netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
+ netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr);
dev->stats.tx_carrier_errors++;
- goto tx_error;
+ return rt;
}
if (rt->dst.dev == dev) { /* is this necessary? */
- netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr);
+ netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr);
dev->stats.collisions++;
- goto rt_tx_error;
+ ip_rt_put(rt);
+ return ERR_PTR(-EINVAL);
}
+ return rt;
+}
- tos = ip_tunnel_ecn_encap(tos, iip, skb);
+/* Convert 64 bit tunnel ID to 24 bit VNI. */
+static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
+{
+#ifdef __BIG_ENDIAN
+ vni[0] = (__force __u8)(tun_id >> 16);
+ vni[1] = (__force __u8)(tun_id >> 8);
+ vni[2] = (__force __u8)tun_id;
+#else
+ vni[0] = (__force __u8)((__force u64)tun_id >> 40);
+ vni[1] = (__force __u8)((__force u64)tun_id >> 48);
+ vni[2] = (__force __u8)((__force u64)tun_id >> 56);
+#endif
+}
+
+static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct geneve_dev *geneve = netdev_priv(dev);
+ struct geneve_sock *gs = geneve->sock;
+ struct ip_tunnel_info *info = NULL;
+ struct rtable *rt = NULL;
+ struct flowi4 fl4;
+ __u8 tos, ttl;
+ __be16 sport;
+ bool udp_csum;
+ __be16 df;
+ int err;
- ttl = geneve->ttl;
- if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
- ttl = 1;
+ if (geneve->collect_md) {
+ info = skb_tunnel_info(skb);
+ if (unlikely(info && !(info->mode & IP_TUNNEL_INFO_TX))) {
+ netdev_dbg(dev, "no tunnel metadata\n");
+ goto tx_error;
+ }
+ if (info && ip_tunnel_info_af(info) != AF_INET)
+ goto tx_error;
+ }
- ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
+ rt = geneve_get_rt(skb, dev, &fl4, info);
+ if (IS_ERR(rt)) {
+ netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
+ dev->stats.tx_carrier_errors++;
+ goto tx_error;
+ }
- /* no need to handle local destination and encap bypass...yet... */
+ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+ skb_reset_mac_header(skb);
- err = geneve_xmit_skb(gs, rt, skb, fl4.saddr, fl4.daddr,
- tos, ttl, 0, sport, htons(GENEVE_UDP_PORT), 0,
- geneve->vni, 0, NULL, false,
- !net_eq(geneve->net, dev_net(geneve->dev)));
- if (err < 0)
- ip_rt_put(rt);
+ if (info) {
+ const struct ip_tunnel_key *key = &info->key;
+ u8 *opts = NULL;
+ u8 vni[3];
+
+ tunnel_id_to_vni(key->tun_id, vni);
+ if (key->tun_flags & TUNNEL_GENEVE_OPT)
+ opts = ip_tunnel_info_opts(info);
+
+ udp_csum = !!(key->tun_flags & TUNNEL_CSUM);
+ err = geneve_build_skb(rt, skb, key->tun_flags, vni,
+ info->options_len, opts, udp_csum);
+ if (unlikely(err))
+ goto err;
+
+ tos = key->tos;
+ ttl = key->ttl;
+ df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+ } else {
+ const struct iphdr *iip; /* interior IP header */
+
+ udp_csum = false;
+ err = geneve_build_skb(rt, skb, 0, geneve->vni,
+ 0, NULL, udp_csum);
+ if (unlikely(err))
+ goto err;
+
+ iip = ip_hdr(skb);
+ tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
+ ttl = geneve->ttl;
+ if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
+ ttl = 1;
+ ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
+ df = 0;
+ }
+ err = udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, fl4.saddr, fl4.daddr,
+ tos, ttl, df, sport, geneve->dst_port,
+ !net_eq(geneve->net, dev_net(geneve->dev)),
+ !udp_csum);
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
-
return NETDEV_TX_OK;
-rt_tx_error:
- ip_rt_put(rt);
tx_error:
- dev->stats.tx_errors++;
dev_kfree_skb(skb);
+err:
+ dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
@@ -283,7 +743,6 @@ static void geneve_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &geneve_type);
- dev->tx_queue_len = 0;
dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->features |= NETIF_F_RXCSUM;
@@ -297,7 +756,8 @@ static void geneve_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
netif_keep_dst(dev);
- dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
+ eth_hw_addr_random(dev);
}
static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
@@ -305,6 +765,8 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
[IFLA_GENEVE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
[IFLA_GENEVE_TTL] = { .type = NLA_U8 },
[IFLA_GENEVE_TOS] = { .type = NLA_U8 },
+ [IFLA_GENEVE_PORT] = { .type = NLA_U16 },
+ [IFLA_GENEVE_COLLECT_METADATA] = { .type = NLA_FLAG },
};
static int geneve_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -330,68 +792,117 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[])
return 0;
}
-static int geneve_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
+ __be16 dst_port,
+ __be32 rem_addr,
+ u8 vni[],
+ bool *tun_on_same_port,
+ bool *tun_collect_md)
+{
+ struct geneve_dev *geneve, *t;
+
+ *tun_on_same_port = false;
+ *tun_collect_md = false;
+ t = NULL;
+ list_for_each_entry(geneve, &gn->geneve_list, next) {
+ if (geneve->dst_port == dst_port) {
+ *tun_collect_md = geneve->collect_md;
+ *tun_on_same_port = true;
+ }
+ if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
+ rem_addr == geneve->remote.sin_addr.s_addr &&
+ dst_port == geneve->dst_port)
+ t = geneve;
+ }
+ return t;
+}
+
+static int geneve_configure(struct net *net, struct net_device *dev,
+ __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos,
+ __u16 dst_port, bool metadata)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
- struct geneve_dev *dummy, *geneve = netdev_priv(dev);
- struct hlist_head *vni_list_head;
- struct sockaddr_in remote; /* IPv4 address for link partner */
- __u32 vni, hash;
+ struct geneve_dev *t, *geneve = netdev_priv(dev);
+ bool tun_collect_md, tun_on_same_port;
int err;
- if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE])
- return -EINVAL;
+ if (metadata) {
+ if (rem_addr || vni || tos || ttl)
+ return -EINVAL;
+ }
geneve->net = net;
geneve->dev = dev;
- vni = nla_get_u32(data[IFLA_GENEVE_ID]);
geneve->vni[0] = (vni & 0x00ff0000) >> 16;
geneve->vni[1] = (vni & 0x0000ff00) >> 8;
geneve->vni[2] = vni & 0x000000ff;
- geneve->remote.sin_addr.s_addr =
- nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
+ geneve->remote.sin_addr.s_addr = rem_addr;
if (IN_MULTICAST(ntohl(geneve->remote.sin_addr.s_addr)))
return -EINVAL;
- remote = geneve->remote;
- hash = geneve_net_vni_hash(geneve->vni);
- vni_list_head = &gn->vni_list[hash];
- hlist_for_each_entry_rcu(dummy, vni_list_head, hlist) {
- if (!memcmp(geneve->vni, dummy->vni, sizeof(dummy->vni)) &&
- !memcmp(&remote, &dummy->remote, sizeof(dummy->remote)))
- return -EBUSY;
+ geneve->ttl = ttl;
+ geneve->tos = tos;
+ geneve->dst_port = htons(dst_port);
+ geneve->collect_md = metadata;
+
+ t = geneve_find_dev(gn, htons(dst_port), rem_addr, geneve->vni,
+ &tun_on_same_port, &tun_collect_md);
+ if (t)
+ return -EBUSY;
+
+ if (metadata) {
+ if (tun_on_same_port)
+ return -EPERM;
+ } else {
+ if (tun_collect_md)
+ return -EPERM;
}
- if (tb[IFLA_ADDRESS] == NULL)
- eth_hw_addr_random(dev);
-
err = register_netdevice(dev);
if (err)
return err;
+ list_add(&geneve->next, &gn->geneve_list);
+ return 0;
+}
+
+static int geneve_newlink(struct net *net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ __u16 dst_port = GENEVE_UDP_PORT;
+ __u8 ttl = 0, tos = 0;
+ bool metadata = false;
+ __be32 rem_addr;
+ __u32 vni;
+
+ if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE])
+ return -EINVAL;
+
+ vni = nla_get_u32(data[IFLA_GENEVE_ID]);
+ rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
+
if (data[IFLA_GENEVE_TTL])
- geneve->ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
+ ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
if (data[IFLA_GENEVE_TOS])
- geneve->tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
+ tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
- list_add(&geneve->next, &gn->geneve_list);
+ if (data[IFLA_GENEVE_PORT])
+ dst_port = nla_get_u16(data[IFLA_GENEVE_PORT]);
- hlist_add_head_rcu(&geneve->hlist, &gn->vni_list[hash]);
+ if (data[IFLA_GENEVE_COLLECT_METADATA])
+ metadata = true;
- return 0;
+ return geneve_configure(net, dev, rem_addr, vni,
+ ttl, tos, dst_port, metadata);
}
static void geneve_dellink(struct net_device *dev, struct list_head *head)
{
struct geneve_dev *geneve = netdev_priv(dev);
- if (!hlist_unhashed(&geneve->hlist))
- hlist_del_rcu(&geneve->hlist);
-
list_del(&geneve->next);
unregister_netdevice_queue(dev, head);
}
@@ -402,6 +913,8 @@ static size_t geneve_get_size(const struct net_device *dev)
nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */
+ nla_total_size(sizeof(__u16)) + /* IFLA_GENEVE_PORT */
+ nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */
0;
}
@@ -422,6 +935,14 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos))
goto nla_put_failure;
+ if (nla_put_u16(skb, IFLA_GENEVE_PORT, ntohs(geneve->dst_port)))
+ goto nla_put_failure;
+
+ if (geneve->collect_md) {
+ if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
+ goto nla_put_failure;
+ }
+
return 0;
nla_put_failure:
@@ -441,16 +962,34 @@ static struct rtnl_link_ops geneve_link_ops __read_mostly = {
.fill_info = geneve_fill_info,
};
+struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
+ u8 name_assign_type, u16 dst_port)
+{
+ struct nlattr *tb[IFLA_MAX + 1];
+ struct net_device *dev;
+ int err;
+
+ memset(tb, 0, sizeof(tb));
+ dev = rtnl_create_link(net, name, name_assign_type,
+ &geneve_link_ops, tb);
+ if (IS_ERR(dev))
+ return dev;
+
+ err = geneve_configure(net, dev, 0, 0, 0, 0, dst_port, true);
+ if (err) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+ return dev;
+}
+EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
+
static __net_init int geneve_init_net(struct net *net)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
- unsigned int h;
INIT_LIST_HEAD(&gn->geneve_list);
-
- for (h = 0; h < VNI_HASH_SIZE; ++h)
- INIT_HLIST_HEAD(&gn->vni_list[h]);
-
+ INIT_LIST_HEAD(&gn->sock_list);
return 0;
}
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 83c7cce0d172..72c9f1f352b4 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -638,7 +638,7 @@ static int receive(struct net_device *dev, int cnt)
#define GETTICK(x) \
({ \
if (cpu_has_tsc) \
- rdtscl(x); \
+ x = (unsigned int)rdtsc(); \
})
#else /* __i386__ */
#define GETTICK(x)
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 7856b6ccf5c5..d95a50ae996d 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -482,6 +482,7 @@ static void bpq_setup(struct net_device *dev)
memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
dev->flags = 0;
+ dev->features = NETIF_F_LLTX; /* Allow recursion */
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
dev->header_ops = &ax25_header_ops;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 2ffbf13471d0..216bfd350169 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -728,11 +728,12 @@ static int mkiss_open(struct tty_struct *tty)
dev->type = ARPHRD_AX25;
/* Perform the low-level AX25 initialization. */
- if ((err = ax_open(ax->dev))) {
+ err = ax_open(ax->dev);
+ if (err)
goto out_free_netdev;
- }
- if (register_netdev(dev))
+ err = register_netdev(dev);
+ if (err)
goto out_free_buffers;
/* after register_netdev() - because else printk smashes the kernel */
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index dd4544085db3..5fa98f599b3d 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -162,6 +162,7 @@ struct netvsc_device_info {
bool link_state; /* 0 - link up, 1 - link down */
int ring_size;
u32 max_num_vrss_chns;
+ u32 num_chn;
};
enum rndis_device_state {
@@ -541,6 +542,29 @@ union nvsp_2_message_uber {
struct nvsp_2_free_rxbuf free_rxbuf;
} __packed;
+struct nvsp_4_send_vf_association {
+ /* 1: allocated, serial number is valid. 0: not allocated */
+ u32 allocated;
+
+ /* Serial number of the VF to team with */
+ u32 serial;
+} __packed;
+
+enum nvsp_vm_datapath {
+ NVSP_DATAPATH_SYNTHETIC = 0,
+ NVSP_DATAPATH_VF,
+ NVSP_DATAPATH_MAX
+};
+
+struct nvsp_4_sw_datapath {
+ u32 active_datapath; /* active data path in VM */
+} __packed;
+
+union nvsp_4_message_uber {
+ struct nvsp_4_send_vf_association vf_assoc;
+ struct nvsp_4_sw_datapath active_dp;
+} __packed;
+
enum nvsp_subchannel_operation {
NVSP_SUBCHANNEL_NONE = 0,
NVSP_SUBCHANNEL_ALLOCATE,
@@ -578,6 +602,7 @@ union nvsp_all_messages {
union nvsp_message_init_uber init_msg;
union nvsp_1_message_uber v1_msg;
union nvsp_2_message_uber v2_msg;
+ union nvsp_4_message_uber v4_msg;
union nvsp_5_message_uber v5_msg;
} __packed;
@@ -589,6 +614,7 @@ struct nvsp_message {
#define NETVSC_MTU 65536
+#define NETVSC_MTU_MIN 68
#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */
#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */
@@ -670,6 +696,8 @@ struct netvsc_device {
u32 send_table[VRSS_SEND_TAB_SIZE];
u32 max_chn;
u32 num_chn;
+ spinlock_t sc_lock; /* Protects num_sc_offered variable */
+ u32 num_sc_offered;
atomic_t queue_sends[NR_CPUS];
/* Holds rndis device info */
@@ -688,6 +716,11 @@ struct netvsc_device {
/* The net device context */
struct net_device_context *nd_ctx;
+
+ /* 1: allocated, serial number is valid. 0: not allocated */
+ u32 vf_alloc;
+ /* Serial number of the VF to team with */
+ u32 vf_serial;
};
/* NdisInitialize message */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 23126a74f357..51e4c0fd0a74 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -453,13 +453,16 @@ static int negotiate_nvsp_ver(struct hv_device *device,
if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
return 0;
- /* NVSPv2 only: Send NDIS config */
+ /* NVSPv2 or later: Send NDIS config */
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu +
ETH_HLEN;
init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
+ if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5)
+ init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
+
ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
(unsigned long)init_packet,
@@ -1064,11 +1067,10 @@ static void netvsc_receive(struct netvsc_device *net_device,
static void netvsc_send_table(struct hv_device *hdev,
- struct vmpacket_descriptor *vmpkt)
+ struct nvsp_message *nvmsg)
{
struct netvsc_device *nvscdev;
struct net_device *ndev;
- struct nvsp_message *nvmsg;
int i;
u32 count, *tab;
@@ -1077,12 +1079,6 @@ static void netvsc_send_table(struct hv_device *hdev,
return;
ndev = nvscdev->ndev;
- nvmsg = (struct nvsp_message *)((unsigned long)vmpkt +
- (vmpkt->offset8 << 3));
-
- if (nvmsg->hdr.msg_type != NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE)
- return;
-
count = nvmsg->msg.v5_msg.send_table.count;
if (count != VRSS_SEND_TAB_SIZE) {
netdev_err(ndev, "Received wrong send-table size:%u\n", count);
@@ -1096,6 +1092,28 @@ static void netvsc_send_table(struct hv_device *hdev,
nvscdev->send_table[i] = tab[i];
}
+static void netvsc_send_vf(struct netvsc_device *nvdev,
+ struct nvsp_message *nvmsg)
+{
+ nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
+ nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+}
+
+static inline void netvsc_receive_inband(struct hv_device *hdev,
+ struct netvsc_device *nvdev,
+ struct nvsp_message *nvmsg)
+{
+ switch (nvmsg->hdr.msg_type) {
+ case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
+ netvsc_send_table(hdev, nvmsg);
+ break;
+
+ case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
+ netvsc_send_vf(nvdev, nvmsg);
+ break;
+ }
+}
+
void netvsc_channel_cb(void *context)
{
int ret;
@@ -1108,6 +1126,7 @@ void netvsc_channel_cb(void *context)
unsigned char *buffer;
int bufferlen = NETVSC_PACKET_SIZE;
struct net_device *ndev;
+ struct nvsp_message *nvmsg;
if (channel->primary_channel != NULL)
device = channel->primary_channel->device_obj;
@@ -1126,6 +1145,8 @@ void netvsc_channel_cb(void *context)
if (ret == 0) {
if (bytes_recvd > 0) {
desc = (struct vmpacket_descriptor *)buffer;
+ nvmsg = (struct nvsp_message *)((unsigned long)
+ desc + (desc->offset8 << 3));
switch (desc->type) {
case VM_PKT_COMP:
netvsc_send_completion(net_device,
@@ -1138,7 +1159,9 @@ void netvsc_channel_cb(void *context)
break;
case VM_PKT_DATA_INBAND:
- netvsc_send_table(device, desc);
+ netvsc_receive_inband(device,
+ net_device,
+ nvmsg);
break;
default:
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 358475ed9b59..409b48e1e589 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -106,7 +106,7 @@ static int netvsc_open(struct net_device *net)
return ret;
}
- netif_tx_start_all_queues(net);
+ netif_tx_wake_all_queues(net);
nvdev = hv_get_drvdata(device_obj);
rdev = nvdev->extension;
@@ -120,15 +120,56 @@ static int netvsc_close(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_device *device_obj = net_device_ctx->device_ctx;
+ struct netvsc_device *nvdev = hv_get_drvdata(device_obj);
int ret;
+ u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
+ struct vmbus_channel *chn;
netif_tx_disable(net);
/* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
cancel_work_sync(&net_device_ctx->work);
ret = rndis_filter_close(device_obj);
- if (ret != 0)
+ if (ret != 0) {
netdev_err(net, "unable to close device (ret %d).\n", ret);
+ return ret;
+ }
+
+ /* Ensure pending bytes in ring are read */
+ while (true) {
+ aread = 0;
+ for (i = 0; i < nvdev->num_chn; i++) {
+ chn = nvdev->chn_table[i];
+ if (!chn)
+ continue;
+
+ hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
+ &awrite);
+
+ if (aread)
+ break;
+
+ hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
+ &awrite);
+
+ if (aread)
+ break;
+ }
+
+ retry++;
+ if (retry > retry_max || aread == 0)
+ break;
+
+ msleep(msec);
+
+ if (msec < 1000)
+ msec *= 2;
+ }
+
+ if (aread) {
+ netdev_err(net, "Ring buffer not empty after closing rndis\n");
+ ret = -ETIMEDOUT;
+ }
return ret;
}
@@ -198,7 +239,7 @@ static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
struct flow_keys flow;
int data_len;
- if (!skb_flow_dissect_flow_keys(skb, &flow) ||
+ if (!skb_flow_dissect_flow_keys(skb, &flow, 0) ||
!(flow.basic.n_proto == htons(ETH_P_IP) ||
flow.basic.n_proto == htons(ETH_P_IPV6)))
return false;
@@ -729,6 +770,104 @@ static void netvsc_get_channels(struct net_device *net,
}
}
+static int netvsc_set_channels(struct net_device *net,
+ struct ethtool_channels *channels)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(net);
+ struct hv_device *dev = net_device_ctx->device_ctx;
+ struct netvsc_device *nvdev = hv_get_drvdata(dev);
+ struct netvsc_device_info device_info;
+ u32 num_chn;
+ u32 max_chn;
+ int ret = 0;
+ bool recovering = false;
+
+ if (!nvdev || nvdev->destroy)
+ return -ENODEV;
+
+ num_chn = nvdev->num_chn;
+ max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
+
+ if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
+ pr_info("vRSS unsupported before NVSP Version 5\n");
+ return -EINVAL;
+ }
+
+ /* We do not support rx, tx, or other */
+ if (!channels ||
+ channels->rx_count ||
+ channels->tx_count ||
+ channels->other_count ||
+ (channels->combined_count < 1))
+ return -EINVAL;
+
+ if (channels->combined_count > max_chn) {
+ pr_info("combined channels too high, using %d\n", max_chn);
+ channels->combined_count = max_chn;
+ }
+
+ ret = netvsc_close(net);
+ if (ret)
+ goto out;
+
+ do_set:
+ nvdev->start_remove = true;
+ rndis_filter_device_remove(dev);
+
+ nvdev->num_chn = channels->combined_count;
+
+ net_device_ctx->device_ctx = dev;
+ hv_set_drvdata(dev, net);
+
+ memset(&device_info, 0, sizeof(device_info));
+ device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
+ device_info.ring_size = ring_size;
+ device_info.max_num_vrss_chns = max_num_vrss_chns;
+
+ ret = rndis_filter_device_add(dev, &device_info);
+ if (ret) {
+ if (recovering) {
+ netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
+ return ret;
+ }
+ goto recover;
+ }
+
+ nvdev = hv_get_drvdata(dev);
+
+ ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
+ if (ret) {
+ if (recovering) {
+ netdev_err(net, "could not set tx queue count (ret %d)\n", ret);
+ return ret;
+ }
+ goto recover;
+ }
+
+ ret = netif_set_real_num_rx_queues(net, nvdev->num_chn);
+ if (ret) {
+ if (recovering) {
+ netdev_err(net, "could not set rx queue count (ret %d)\n", ret);
+ return ret;
+ }
+ goto recover;
+ }
+
+ out:
+ netvsc_open(net);
+
+ return ret;
+
+ recover:
+ /* If the above failed, we attempt to recover through the same
+ * process but with the original number of channels.
+ */
+ netdev_err(net, "could not set channels, recovering\n");
+ recovering = true;
+ channels->combined_count = num_chn;
+ goto do_set;
+}
+
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
@@ -736,6 +875,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
struct netvsc_device *nvdev = hv_get_drvdata(hdev);
struct netvsc_device_info device_info;
int limit = ETH_DATA_LEN;
+ int ret = 0;
if (nvdev == NULL || nvdev->destroy)
return -ENODEV;
@@ -743,25 +883,31 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
limit = NETVSC_MTU - ETH_HLEN;
- /* Hyper-V hosts don't support MTU < ETH_DATA_LEN (1500) */
- if (mtu < ETH_DATA_LEN || mtu > limit)
+ if (mtu < NETVSC_MTU_MIN || mtu > limit)
return -EINVAL;
+ ret = netvsc_close(ndev);
+ if (ret)
+ goto out;
+
nvdev->start_remove = true;
- cancel_work_sync(&ndevctx->work);
- netif_tx_disable(ndev);
rndis_filter_device_remove(hdev);
ndev->mtu = mtu;
ndevctx->device_ctx = hdev;
hv_set_drvdata(hdev, ndev);
+
+ memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
+ device_info.num_chn = nvdev->num_chn;
device_info.max_num_vrss_chns = max_num_vrss_chns;
rndis_filter_device_add(hdev, &device_info);
- netif_tx_wake_all_queues(ndev);
- return 0;
+out:
+ netvsc_open(ndev);
+
+ return ret;
}
static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
@@ -844,6 +990,7 @@ static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = netvsc_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_channels = netvsc_get_channels,
+ .set_channels = netvsc_set_channels,
};
static const struct net_device_ops device_ops = {
@@ -977,6 +1124,7 @@ static int netvsc_probe(struct hv_device *dev,
net->needed_headroom = max_needed_headroom;
/* Notify the netvsc driver of the new device */
+ memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
device_info.max_num_vrss_chns = max_num_vrss_chns;
ret = rndis_filter_device_add(dev, &device_info);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 236aeb76ef22..5931a799aa17 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -984,9 +984,16 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
struct netvsc_device *nvscdev;
u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
int ret;
+ unsigned long flags;
nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj);
+ spin_lock_irqsave(&nvscdev->sc_lock, flags);
+ nvscdev->num_sc_offered--;
+ spin_unlock_irqrestore(&nvscdev->sc_lock, flags);
+ if (nvscdev->num_sc_offered == 0)
+ complete(&nvscdev->channel_init_wait);
+
if (chn_index >= nvscdev->num_chn)
return;
@@ -1015,8 +1022,10 @@ int rndis_filter_device_add(struct hv_device *dev,
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
u32 mtu, size;
u32 num_rss_qs;
+ u32 sc_delta;
const struct cpumask *node_cpu_mask;
u32 num_possible_rss_qs;
+ unsigned long flags;
rndis_device = get_rndis_device();
if (!rndis_device)
@@ -1039,6 +1048,8 @@ int rndis_filter_device_add(struct hv_device *dev,
net_device->max_chn = 1;
net_device->num_chn = 1;
+ spin_lock_init(&net_device->sc_lock);
+
net_device->extension = rndis_device;
rndis_device->net_dev = net_device;
@@ -1054,7 +1065,7 @@ int rndis_filter_device_add(struct hv_device *dev,
ret = rndis_filter_query_device(rndis_device,
RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
&mtu, &size);
- if (ret == 0 && size == sizeof(u32))
+ if (ret == 0 && size == sizeof(u32) && mtu < net_device->ndev->mtu)
net_device->ndev->mtu = mtu;
/* Get the mac address */
@@ -1114,7 +1125,15 @@ int rndis_filter_device_add(struct hv_device *dev,
*/
node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
num_possible_rss_qs = cpumask_weight(node_cpu_mask);
- net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
+
+ /* We will use the given number of channels if available. */
+ if (device_info->num_chn && device_info->num_chn < net_device->max_chn)
+ net_device->num_chn = device_info->num_chn;
+ else
+ net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
+
+ num_rss_qs = net_device->num_chn - 1;
+ net_device->num_sc_offered = num_rss_qs;
if (net_device->num_chn == 1)
goto out;
@@ -1157,11 +1176,25 @@ int rndis_filter_device_add(struct hv_device *dev,
ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn);
+ /*
+ * Wait for the host to send us the sub-channel offers.
+ */
+ spin_lock_irqsave(&net_device->sc_lock, flags);
+ sc_delta = num_rss_qs - (net_device->num_chn - 1);
+ net_device->num_sc_offered -= sc_delta;
+ spin_unlock_irqrestore(&net_device->sc_lock, flags);
+
+ while (net_device->num_sc_offered != 0) {
+ t = wait_for_completion_timeout(&net_device->channel_init_wait, 10*HZ);
+ if (t == 0)
+ WARN(1, "Netvsc: Waiting for sub-channel processing");
+ }
out:
if (ret) {
net_device->max_chn = 1;
net_device->num_chn = 1;
}
+
return 0; /* return 0 because primary channel can be used alone */
err_dev_remv:
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index f7bd9f3ddaac..6422caac8d40 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -97,9 +97,7 @@ struct at86rf230_local {
struct at86rf230_state_change irq;
- bool tx_aret;
unsigned long cal_timeout;
- s8 max_frame_retries;
bool is_tx;
bool is_tx_from_off;
u8 tx_retry;
@@ -545,7 +543,9 @@ at86rf230_async_state_delay(void *context)
}
/* Default delay is 1us in the most cases */
- tim = ktime_set(0, NSEC_PER_USEC);
+ udelay(1);
+ at86rf230_async_state_timer(&ctx->timer);
+ return;
change:
hrtimer_start(&ctx->timer, tim, HRTIMER_MODE_REL);
@@ -649,7 +649,7 @@ at86rf230_tx_complete(void *context)
enable_irq(ctx->irq);
- ieee802154_xmit_complete(lp->hw, lp->tx_skb, !lp->tx_aret);
+ ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
}
static void
@@ -758,17 +758,10 @@ at86rf230_irq_trx_end(struct at86rf230_local *lp)
{
if (lp->is_tx) {
lp->is_tx = 0;
-
- if (lp->tx_aret)
- at86rf230_async_state_change(lp, &lp->irq,
- STATE_FORCE_TX_ON,
- at86rf230_tx_trac_status,
- true);
- else
- at86rf230_async_state_change(lp, &lp->irq,
- STATE_RX_AACK_ON,
- at86rf230_tx_complete,
- true);
+ at86rf230_async_state_change(lp, &lp->irq,
+ STATE_FORCE_TX_ON,
+ at86rf230_tx_trac_status,
+ true);
} else {
at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
at86rf230_rx_trac_check, true);
@@ -874,24 +867,16 @@ at86rf230_xmit_start(void *context)
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- /* In ARET mode we need to go into STATE_TX_ARET_ON after we
- * are in STATE_TX_ON. The pfad differs here, so we change
- * the complete handler.
- */
- if (lp->tx_aret) {
- if (lp->is_tx_from_off) {
- lp->is_tx_from_off = false;
- at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
- at86rf230_write_frame,
- false);
- } else {
- at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
- at86rf230_xmit_tx_on,
- false);
- }
+ /* check if we change from off state */
+ if (lp->is_tx_from_off) {
+ lp->is_tx_from_off = false;
+ at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
+ at86rf230_write_frame,
+ false);
} else {
at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
- at86rf230_write_frame, false);
+ at86rf230_xmit_tx_on,
+ false);
}
}
@@ -1265,15 +1250,8 @@ static int
at86rf230_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
{
struct at86rf230_local *lp = hw->priv;
- int rc = 0;
-
- lp->tx_aret = retries >= 0;
- lp->max_frame_retries = retries;
- if (retries >= 0)
- rc = at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
-
- return rc;
+ return at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
}
static int
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index b6fc29579667..c5b54a15fc4c 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -833,6 +833,7 @@ static int cc2520_get_platform_data(struct spi_device *spi,
if (!spi_pdata)
return -ENOENT;
*pdata = *spi_pdata;
+ priv->fifo_pin = pdata->fifo;
return 0;
}
@@ -1151,7 +1152,6 @@ MODULE_DEVICE_TABLE(of, cc2520_of_ids);
static struct spi_driver cc2520_driver = {
.driver = {
.name = "cc2520",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(cc2520_of_ids),
},
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 2549760e039f..997724b8e434 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -812,7 +812,6 @@ MODULE_DEVICE_TABLE(spi, mrf24j40_ids);
static struct spi_driver mrf24j40_driver = {
.driver = {
.name = "mrf24j40",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.id_table = mrf24j40_ids,
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 94570aace241..cc56fac3c3f8 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -38,69 +38,68 @@
#include <net/net_namespace.h>
#define TX_Q_LIMIT 32
-struct ifb_private {
+struct ifb_q_private {
+ struct net_device *dev;
struct tasklet_struct ifb_tasklet;
- int tasklet_pending;
-
- struct u64_stats_sync rsync;
+ int tasklet_pending;
+ int txqnum;
struct sk_buff_head rq;
- u64 rx_packets;
- u64 rx_bytes;
+ u64 rx_packets;
+ u64 rx_bytes;
+ struct u64_stats_sync rsync;
struct u64_stats_sync tsync;
+ u64 tx_packets;
+ u64 tx_bytes;
struct sk_buff_head tq;
- u64 tx_packets;
- u64 tx_bytes;
-};
+} ____cacheline_aligned_in_smp;
-static int numifbs = 2;
+struct ifb_dev_private {
+ struct ifb_q_private *tx_private;
+};
-static void ri_tasklet(unsigned long dev);
static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
static int ifb_open(struct net_device *dev);
static int ifb_close(struct net_device *dev);
-static void ri_tasklet(unsigned long dev)
+static void ifb_ri_tasklet(unsigned long _txp)
{
- struct net_device *_dev = (struct net_device *)dev;
- struct ifb_private *dp = netdev_priv(_dev);
+ struct ifb_q_private *txp = (struct ifb_q_private *)_txp;
struct netdev_queue *txq;
struct sk_buff *skb;
- txq = netdev_get_tx_queue(_dev, 0);
- if ((skb = skb_peek(&dp->tq)) == NULL) {
- if (__netif_tx_trylock(txq)) {
- skb_queue_splice_tail_init(&dp->rq, &dp->tq);
- __netif_tx_unlock(txq);
- } else {
- /* reschedule */
+ txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
+ skb = skb_peek(&txp->tq);
+ if (!skb) {
+ if (!__netif_tx_trylock(txq))
goto resched;
- }
+ skb_queue_splice_tail_init(&txp->rq, &txp->tq);
+ __netif_tx_unlock(txq);
}
- while ((skb = __skb_dequeue(&dp->tq)) != NULL) {
+ while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
u32 from = G_TC_FROM(skb->tc_verd);
skb->tc_verd = 0;
skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
- u64_stats_update_begin(&dp->tsync);
- dp->tx_packets++;
- dp->tx_bytes += skb->len;
- u64_stats_update_end(&dp->tsync);
+ u64_stats_update_begin(&txp->tsync);
+ txp->tx_packets++;
+ txp->tx_bytes += skb->len;
+ u64_stats_update_end(&txp->tsync);
rcu_read_lock();
- skb->dev = dev_get_by_index_rcu(dev_net(_dev), skb->skb_iif);
+ skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
if (!skb->dev) {
rcu_read_unlock();
dev_kfree_skb(skb);
- _dev->stats.tx_dropped++;
- if (skb_queue_len(&dp->tq) != 0)
+ txp->dev->stats.tx_dropped++;
+ if (skb_queue_len(&txp->tq) != 0)
goto resched;
break;
}
rcu_read_unlock();
- skb->skb_iif = _dev->ifindex;
+ skb->skb_iif = txp->dev->ifindex;
if (from & AT_EGRESS) {
dev_queue_xmit(skb);
@@ -112,10 +111,11 @@ static void ri_tasklet(unsigned long dev)
}
if (__netif_tx_trylock(txq)) {
- if ((skb = skb_peek(&dp->rq)) == NULL) {
- dp->tasklet_pending = 0;
- if (netif_queue_stopped(_dev))
- netif_wake_queue(_dev);
+ skb = skb_peek(&txp->rq);
+ if (!skb) {
+ txp->tasklet_pending = 0;
+ if (netif_tx_queue_stopped(txq))
+ netif_tx_wake_queue(txq);
} else {
__netif_tx_unlock(txq);
goto resched;
@@ -123,8 +123,8 @@ static void ri_tasklet(unsigned long dev)
__netif_tx_unlock(txq);
} else {
resched:
- dp->tasklet_pending = 1;
- tasklet_schedule(&dp->ifb_tasklet);
+ txp->tasklet_pending = 1;
+ tasklet_schedule(&txp->ifb_tasklet);
}
}
@@ -132,29 +132,58 @@ resched:
static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
- struct ifb_private *dp = netdev_priv(dev);
+ struct ifb_dev_private *dp = netdev_priv(dev);
+ struct ifb_q_private *txp = dp->tx_private;
unsigned int start;
-
- do {
- start = u64_stats_fetch_begin_irq(&dp->rsync);
- stats->rx_packets = dp->rx_packets;
- stats->rx_bytes = dp->rx_bytes;
- } while (u64_stats_fetch_retry_irq(&dp->rsync, start));
-
- do {
- start = u64_stats_fetch_begin_irq(&dp->tsync);
-
- stats->tx_packets = dp->tx_packets;
- stats->tx_bytes = dp->tx_bytes;
-
- } while (u64_stats_fetch_retry_irq(&dp->tsync, start));
-
+ u64 packets, bytes;
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++,txp++) {
+ do {
+ start = u64_stats_fetch_begin_irq(&txp->rsync);
+ packets = txp->rx_packets;
+ bytes = txp->rx_bytes;
+ } while (u64_stats_fetch_retry_irq(&txp->rsync, start));
+ stats->rx_packets += packets;
+ stats->rx_bytes += bytes;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&txp->tsync);
+ packets = txp->tx_packets;
+ bytes = txp->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&txp->tsync, start));
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+ }
stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped = dev->stats.tx_dropped;
return stats;
}
+static int ifb_dev_init(struct net_device *dev)
+{
+ struct ifb_dev_private *dp = netdev_priv(dev);
+ struct ifb_q_private *txp;
+ int i;
+
+ txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
+ if (!txp)
+ return -ENOMEM;
+ dp->tx_private = txp;
+ for (i = 0; i < dev->num_tx_queues; i++,txp++) {
+ txp->txqnum = i;
+ txp->dev = dev;
+ __skb_queue_head_init(&txp->rq);
+ __skb_queue_head_init(&txp->tq);
+ u64_stats_init(&txp->rsync);
+ u64_stats_init(&txp->tsync);
+ tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet,
+ (unsigned long)txp);
+ netif_tx_start_queue(netdev_get_tx_queue(dev, i));
+ }
+ return 0;
+}
static const struct net_device_ops ifb_netdev_ops = {
.ndo_open = ifb_open,
@@ -162,6 +191,7 @@ static const struct net_device_ops ifb_netdev_ops = {
.ndo_get_stats64 = ifb_stats64,
.ndo_start_xmit = ifb_xmit,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_init = ifb_dev_init,
};
#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
@@ -169,10 +199,24 @@ static const struct net_device_ops ifb_netdev_ops = {
NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_HW_VLAN_STAG_TX)
+static void ifb_dev_free(struct net_device *dev)
+{
+ struct ifb_dev_private *dp = netdev_priv(dev);
+ struct ifb_q_private *txp = dp->tx_private;
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++,txp++) {
+ tasklet_kill(&txp->ifb_tasklet);
+ __skb_queue_purge(&txp->rq);
+ __skb_queue_purge(&txp->tq);
+ }
+ kfree(dp->tx_private);
+ free_netdev(dev);
+}
+
static void ifb_setup(struct net_device *dev)
{
/* Initialize the device structure. */
- dev->destructor = free_netdev;
dev->netdev_ops = &ifb_netdev_ops;
/* Fill in device structure with ethernet-generic values. */
@@ -188,17 +232,19 @@ static void ifb_setup(struct net_device *dev)
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
netif_keep_dst(dev);
eth_hw_addr_random(dev);
+ dev->destructor = ifb_dev_free;
}
static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct ifb_private *dp = netdev_priv(dev);
+ struct ifb_dev_private *dp = netdev_priv(dev);
u32 from = G_TC_FROM(skb->tc_verd);
+ struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
- u64_stats_update_begin(&dp->rsync);
- dp->rx_packets++;
- dp->rx_bytes += skb->len;
- u64_stats_update_end(&dp->rsync);
+ u64_stats_update_begin(&txp->rsync);
+ txp->rx_packets++;
+ txp->rx_bytes += skb->len;
+ u64_stats_update_end(&txp->rsync);
if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) {
dev_kfree_skb(skb);
@@ -206,14 +252,13 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
- if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) {
- netif_stop_queue(dev);
- }
+ if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
+ netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
- __skb_queue_tail(&dp->rq, skb);
- if (!dp->tasklet_pending) {
- dp->tasklet_pending = 1;
- tasklet_schedule(&dp->ifb_tasklet);
+ __skb_queue_tail(&txp->rq, skb);
+ if (!txp->tasklet_pending) {
+ txp->tasklet_pending = 1;
+ tasklet_schedule(&txp->ifb_tasklet);
}
return NETDEV_TX_OK;
@@ -221,24 +266,13 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
static int ifb_close(struct net_device *dev)
{
- struct ifb_private *dp = netdev_priv(dev);
-
- tasklet_kill(&dp->ifb_tasklet);
- netif_stop_queue(dev);
- __skb_queue_purge(&dp->rq);
- __skb_queue_purge(&dp->tq);
+ netif_tx_stop_all_queues(dev);
return 0;
}
static int ifb_open(struct net_device *dev)
{
- struct ifb_private *dp = netdev_priv(dev);
-
- tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
- __skb_queue_head_init(&dp->rq);
- __skb_queue_head_init(&dp->tq);
- netif_start_queue(dev);
-
+ netif_tx_start_all_queues(dev);
return 0;
}
@@ -255,31 +289,30 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
static struct rtnl_link_ops ifb_link_ops __read_mostly = {
.kind = "ifb",
- .priv_size = sizeof(struct ifb_private),
+ .priv_size = sizeof(struct ifb_dev_private),
.setup = ifb_setup,
.validate = ifb_validate,
};
-/* Number of ifb devices to be set up by this module. */
+/* Number of ifb devices to be set up by this module.
+ * Note that these legacy devices have one queue.
+ * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb
+ */
+static int numifbs = 2;
module_param(numifbs, int, 0);
MODULE_PARM_DESC(numifbs, "Number of ifb devices");
static int __init ifb_init_one(int index)
{
struct net_device *dev_ifb;
- struct ifb_private *dp;
int err;
- dev_ifb = alloc_netdev(sizeof(struct ifb_private), "ifb%d",
+ dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d",
NET_NAME_UNKNOWN, ifb_setup);
if (!dev_ifb)
return -ENOMEM;
- dp = netdev_priv(dev_ifb);
- u64_stats_init(&dp->rsync);
- u64_stats_init(&dp->tsync);
-
dev_ifb->rtnl_link_ops = &ifb_link_ops;
err = register_netdevice(dev_ifb);
if (err < 0)
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 953a97492fab..9542b7bac61a 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -67,8 +67,6 @@ struct ipvl_dev {
struct ipvl_port *port;
struct net_device *phy_dev;
struct list_head addrs;
- int ipv4cnt;
- int ipv6cnt;
struct ipvl_pcpu_stats __percpu *pcpu_stats;
DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
netdev_features_t sfeatures;
@@ -106,6 +104,11 @@ static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
return rcu_dereference(d->rx_handler_data);
}
+static inline struct ipvl_port *ipvlan_port_get_rcu_bh(const struct net_device *d)
+{
+ return rcu_dereference_bh(d->rx_handler_data);
+}
+
static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d)
{
return rtnl_dereference(d->rx_handler_data);
@@ -124,5 +127,5 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
const void *iaddr, bool is_v6);
-void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync);
+void ipvlan_ht_addr_del(struct ipvl_addr *addr);
#endif /* __IPVLAN_H */
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 8afbedad620d..207f62e8de9a 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -85,11 +85,9 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
}
-void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync)
+void ipvlan_ht_addr_del(struct ipvl_addr *addr)
{
hlist_del_init_rcu(&addr->hlnode);
- if (sync)
- synchronize_rcu();
}
struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
@@ -531,7 +529,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
- struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev);
+ struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
if (!port)
goto out;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 1acc283160d9..a9268db4e349 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -153,10 +153,9 @@ static int ipvlan_open(struct net_device *dev)
else
dev->flags &= ~IFF_NOARP;
- if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
- list_for_each_entry(addr, &ipvlan->addrs, anode)
- ipvlan_ht_addr_add(ipvlan, addr);
- }
+ list_for_each_entry(addr, &ipvlan->addrs, anode)
+ ipvlan_ht_addr_add(ipvlan, addr);
+
return dev_uc_add(phy_dev, phy_dev->dev_addr);
}
@@ -171,10 +170,9 @@ static int ipvlan_stop(struct net_device *dev)
dev_uc_del(phy_dev, phy_dev->dev_addr);
- if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
- list_for_each_entry(addr, &ipvlan->addrs, anode)
- ipvlan_ht_addr_del(addr, !dev->dismantle);
- }
+ list_for_each_entry(addr, &ipvlan->addrs, anode)
+ ipvlan_ht_addr_del(addr);
+
return 0;
}
@@ -471,8 +469,6 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
ipvlan->port = port;
ipvlan->sfeatures = IPVLAN_FEATURES;
INIT_LIST_HEAD(&ipvlan->addrs);
- ipvlan->ipv4cnt = 0;
- ipvlan->ipv6cnt = 0;
/* TODO Probably put random address here to be presented to the
* world but keep using the physical-dev address for the outgoing
@@ -508,12 +504,12 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_addr *addr, *next;
- if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
- list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
- ipvlan_ht_addr_del(addr, !dev->dismantle);
- list_del(&addr->anode);
- }
+ list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
+ ipvlan_ht_addr_del(addr);
+ list_del(&addr->anode);
+ kfree_rcu(addr, rcu);
}
+
list_del_rcu(&ipvlan->pnode);
unregister_netdevice_queue(dev, head);
netdev_upper_dev_unlink(ipvlan->phy_dev, dev);
@@ -524,12 +520,11 @@ static void ipvlan_link_setup(struct net_device *dev)
ether_setup(dev);
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
- dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
dev->netdev_ops = &ipvlan_netdev_ops;
dev->destructor = free_netdev;
dev->header_ops = &ipvlan_header_ops;
dev->ethtool_ops = &ipvlan_ethtool_ops;
- dev->tx_queue_len = 0;
}
static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] =
@@ -627,7 +622,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
addr->atype = IPVL_IPV6;
list_add_tail(&addr->anode, &ipvlan->addrs);
- ipvlan->ipv6cnt++;
+
/* If the interface is not up, the address will be added to the hash
* list by ipvlan_open.
*/
@@ -645,10 +640,8 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
if (!addr)
return;
- ipvlan_ht_addr_del(addr, true);
+ ipvlan_ht_addr_del(addr);
list_del(&addr->anode);
- ipvlan->ipv6cnt--;
- WARN_ON(ipvlan->ipv6cnt < 0);
kfree_rcu(addr, rcu);
return;
@@ -661,6 +654,10 @@ static int ipvlan_addr6_event(struct notifier_block *unused,
struct net_device *dev = (struct net_device *)if6->idev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
+ /* FIXME IPv6 autoconf calls us from bh without RTNL */
+ if (in_softirq())
+ return NOTIFY_DONE;
+
if (!netif_is_ipvlan(dev))
return NOTIFY_DONE;
@@ -699,7 +696,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
addr->atype = IPVL_IPV4;
list_add_tail(&addr->anode, &ipvlan->addrs);
- ipvlan->ipv4cnt++;
+
/* If the interface is not up, the address will be added to the hash
* list by ipvlan_open.
*/
@@ -717,10 +714,8 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
if (!addr)
return;
- ipvlan_ht_addr_del(addr, true);
+ ipvlan_ht_addr_del(addr);
list_del(&addr->anode);
- ipvlan->ipv4cnt--;
- WARN_ON(ipvlan->ipv4cnt < 0);
kfree_rcu(addr, rcu);
return;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index c76283c2f84a..dc7d970bd1c0 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -165,10 +165,9 @@ static void loopback_setup(struct net_device *dev)
dev->mtu = 64 * 1024;
dev->hard_header_len = ETH_HLEN; /* 14 */
dev->addr_len = ETH_ALEN; /* 6 */
- dev->tx_queue_len = 0;
dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
dev->flags = IFF_LOOPBACK;
- dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
netif_keep_dst(dev);
dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 9f59f17dc317..47da43595ac2 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1047,6 +1047,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
.ndo_netpoll_cleanup = macvlan_dev_netpoll_cleanup,
#endif
.ndo_get_iflink = macvlan_dev_get_iflink,
+ .ndo_features_check = passthru_features_check,
};
void macvlan_common_setup(struct net_device *dev)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index f8370808a018..edd77342773a 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -719,6 +719,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
struct virtio_net_hdr vnet_hdr = { 0 };
int vnet_hdr_len = 0;
int copylen = 0;
+ int depth;
bool zerocopy = false;
size_t linear;
ssize_t n;
@@ -804,6 +805,12 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
skb_probe_transport_header(skb, ETH_HLEN);
+ /* Move network header to the right position for VLAN tagged packets */
+ if ((skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD)) &&
+ __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
+ skb_set_network_header(skb, depth);
+
rcu_read_lock();
vlan = rcu_dereference(q->vlan);
/* copy skb_ubuf_info for callback when skb has no error */
@@ -1355,6 +1362,7 @@ static void macvtap_exit(void)
class_unregister(macvtap_class);
cdev_del(&macvtap_cdev);
unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
+ idr_destroy(&minor_idr);
}
module_exit(macvtap_exit);
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index 34924dfadd00..7b7c70e2341e 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -130,7 +130,7 @@ static const struct net_device_ops nlmon_ops = {
static void nlmon_setup(struct net_device *dev)
{
dev->type = ARPHRD_NETLINK;
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->netdev_ops = &nlmon_ops;
dev->ethtool_ops = &nlmon_ethtool_ops;
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 3cc316cb7e6b..a9acf7156855 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -61,11 +61,21 @@ MODULE_VERSION(NTB_NETDEV_VER);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");
+/* Time in usecs for tx resource reaper */
+static unsigned int tx_time = 1;
+
+/* Number of descriptors to free before resuming tx */
+static unsigned int tx_start = 10;
+
+/* Number of descriptors still available before stop upper layer tx */
+static unsigned int tx_stop = 5;
+
struct ntb_netdev {
struct list_head list;
struct pci_dev *pdev;
struct net_device *ndev;
struct ntb_transport_qp *qp;
+ struct timer_list tx_timer;
};
#define NTB_TX_TIMEOUT_MS 1000
@@ -102,6 +112,12 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
+ if (len < 0) {
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_length_errors++;
+ goto enqueue_again;
+ }
+
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, ndev);
skb->ip_summed = CHECKSUM_NONE;
@@ -121,6 +137,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
return;
}
+enqueue_again:
rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
if (rc) {
dev_kfree_skb(skb);
@@ -129,11 +146,42 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
}
}
+static int __ntb_netdev_maybe_stop_tx(struct net_device *netdev,
+ struct ntb_transport_qp *qp, int size)
+{
+ struct ntb_netdev *dev = netdev_priv(netdev);
+
+ netif_stop_queue(netdev);
+ /* Make sure to see the latest value of ntb_transport_tx_free_entry()
+ * since the queue was last started.
+ */
+ smp_mb();
+
+ if (likely(ntb_transport_tx_free_entry(qp) < size)) {
+ mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
+ return -EBUSY;
+ }
+
+ netif_start_queue(netdev);
+ return 0;
+}
+
+static int ntb_netdev_maybe_stop_tx(struct net_device *ndev,
+ struct ntb_transport_qp *qp, int size)
+{
+ if (netif_queue_stopped(ndev) ||
+ (ntb_transport_tx_free_entry(qp) >= size))
+ return 0;
+
+ return __ntb_netdev_maybe_stop_tx(ndev, qp, size);
+}
+
static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
void *data, int len)
{
struct net_device *ndev = qp_data;
struct sk_buff *skb;
+ struct ntb_netdev *dev = netdev_priv(ndev);
skb = data;
if (!skb || !ndev)
@@ -148,6 +196,15 @@ static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
}
dev_kfree_skb(skb);
+
+ if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) {
+ /* Make sure anybody stopping the queue after this sees the new
+ * value of ntb_transport_tx_free_entry()
+ */
+ smp_mb();
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+ }
}
static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
@@ -156,10 +213,15 @@ static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
struct ntb_netdev *dev = netdev_priv(ndev);
int rc;
+ ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
+
rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
if (rc)
goto err;
+ /* check for next submit */
+ ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
+
return NETDEV_TX_OK;
err:
@@ -168,6 +230,23 @@ err:
return NETDEV_TX_BUSY;
}
+static void ntb_netdev_tx_timer(unsigned long data)
+{
+ struct net_device *ndev = (struct net_device *)data;
+ struct ntb_netdev *dev = netdev_priv(ndev);
+
+ if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) {
+ mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time));
+ } else {
+ /* Make sure anybody stopping the queue after this sees the new
+ * value of ntb_transport_tx_free_entry()
+ */
+ smp_mb();
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+ }
+}
+
static int ntb_netdev_open(struct net_device *ndev)
{
struct ntb_netdev *dev = netdev_priv(ndev);
@@ -184,14 +263,17 @@ static int ntb_netdev_open(struct net_device *ndev)
rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
ndev->mtu + ETH_HLEN);
- if (rc == -EINVAL) {
+ if (rc) {
dev_kfree_skb(skb);
goto err;
}
}
+ setup_timer(&dev->tx_timer, ntb_netdev_tx_timer, (unsigned long)ndev);
+
netif_carrier_off(ndev);
ntb_transport_link_up(dev->qp);
+ netif_start_queue(ndev);
return 0;
@@ -212,6 +294,8 @@ static int ntb_netdev_close(struct net_device *ndev)
while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
dev_kfree_skb(skb);
+ del_timer_sync(&dev->tx_timer);
+
return 0;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index cf18940f4e84..c5ad98ace5d0 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -14,6 +14,11 @@ if PHYLIB
comment "MII PHY device drivers"
+config AQUANTIA_PHY
+ tristate "Drivers for the Aquantia PHYs"
+ ---help---
+ Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405
+
config AT803X_PHY
tristate "Drivers for Atheros AT803X PHYs"
---help---
@@ -54,6 +59,11 @@ config VITESSE_PHY
---help---
Currently supports the vsc8244
+config TERANETICS_PHY
+ tristate "Drivers for the Teranetics PHYs"
+ ---help---
+ Currently supports the Teranetics TN2020
+
config SMSC_PHY
tristate "Drivers for SMSC PHYs"
---help---
@@ -117,6 +127,11 @@ config DP83867_PHY
---help---
Currently supports the DP83867 PHY.
+config MICROCHIP_PHY
+ tristate "Drivers for Microchip PHYs"
+ help
+ Supports the LAN88XX PHYs.
+
config FIXED_PHY
tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
depends on PHYLIB
@@ -145,13 +160,13 @@ config MDIO_GPIO
will be called mdio-gpio.
config MDIO_OCTEON
- tristate "Support for MDIO buses on Octeon SOCs"
- depends on CAVIUM_OCTEON_SOC
- default y
+ tristate "Support for MDIO buses on Octeon and ThunderX SOCs"
+ depends on 64BIT
help
- This module provides a driver for the Octeon MDIO busses.
- It is required by the Octeon Ethernet device drivers.
+ This module provides a driver for the Octeon and ThunderX MDIO
+ busses. It is required by the Octeon and ThunderX ethernet device
+ drivers.
If in doubt, say Y.
@@ -191,7 +206,7 @@ config MDIO_BUS_MUX_GPIO
config MDIO_BUS_MUX_MMIOREG
tristate "Support for MMIO device-controlled MDIO bus multiplexers"
- depends on OF_MDIO
+ depends on OF_MDIO && HAS_IOMEM
select MDIO_BUS_MUX
help
This module provides a driver for MDIO bus multiplexers that
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index fcc25a0c45cd..87f079c4b2c7 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -3,12 +3,14 @@
libphy-objs := phy.o phy_device.o mdio_bus.o
obj-$(CONFIG_PHYLIB) += libphy.o
+obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o
obj-$(CONFIG_MARVELL_PHY) += marvell.o
obj-$(CONFIG_DAVICOM_PHY) += davicom.o
obj-$(CONFIG_CICADA_PHY) += cicada.o
obj-$(CONFIG_LXT_PHY) += lxt.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
obj-$(CONFIG_SMSC_PHY) += smsc.o
+obj-$(CONFIG_TERANETICS_PHY) += teranetics.o
obj-$(CONFIG_VITESSE_PHY) += vitesse.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
@@ -35,3 +37,4 @@ obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
+obj-$(CONFIG_MICROCHIP_PHY) += microchip.o
diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c
new file mode 100644
index 000000000000..d6111affbcb6
--- /dev/null
+++ b/drivers/net/phy/aquantia.c
@@ -0,0 +1,201 @@
+/*
+ * Driver for Aquantia PHY
+ *
+ * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
+ *
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/mdio.h>
+
+#define PHY_ID_AQ1202 0x03a1b445
+#define PHY_ID_AQ2104 0x03a1b460
+#define PHY_ID_AQR105 0x03a1b4a2
+#define PHY_ID_AQR405 0x03a1b4b0
+
+#define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \
+ SUPPORTED_1000baseT_Full | \
+ SUPPORTED_100baseT_Full | \
+ PHY_DEFAULT_FEATURES)
+
+static int aquantia_config_aneg(struct phy_device *phydev)
+{
+ phydev->supported = PHY_AQUANTIA_FEATURES;
+ phydev->advertising = phydev->supported;
+
+ return 0;
+}
+
+static int aquantia_aneg_done(struct phy_device *phydev)
+{
+ int reg;
+
+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE);
+}
+
+static int aquantia_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 1);
+ if (err < 0)
+ return err;
+
+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 1);
+ if (err < 0)
+ return err;
+
+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0x1001);
+ } else {
+ err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 0);
+ if (err < 0)
+ return err;
+
+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 0);
+ if (err < 0)
+ return err;
+
+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0);
+ }
+
+ return err;
+}
+
+static int aquantia_ack_interrupt(struct phy_device *phydev)
+{
+ int reg;
+
+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xcc01);
+ return (reg < 0) ? reg : 0;
+}
+
+static int aquantia_read_status(struct phy_device *phydev)
+{
+ int reg;
+
+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ if (reg & MDIO_STAT1_LSTATUS)
+ phydev->link = 1;
+ else
+ phydev->link = 0;
+
+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
+ mdelay(10);
+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
+
+ switch (reg) {
+ case 0x9:
+ phydev->speed = SPEED_2500;
+ break;
+ case 0x5:
+ phydev->speed = SPEED_1000;
+ break;
+ case 0x3:
+ phydev->speed = SPEED_100;
+ break;
+ case 0x7:
+ default:
+ phydev->speed = SPEED_10000;
+ break;
+ }
+ phydev->duplex = DUPLEX_FULL;
+
+ return 0;
+}
+
+static struct phy_driver aquantia_driver[] = {
+{
+ .phy_id = PHY_ID_AQ1202,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Aquantia AQ1202",
+ .features = PHY_AQUANTIA_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .aneg_done = aquantia_aneg_done,
+ .config_aneg = aquantia_config_aneg,
+ .config_intr = aquantia_config_intr,
+ .ack_interrupt = aquantia_ack_interrupt,
+ .read_status = aquantia_read_status,
+ .driver = { .owner = THIS_MODULE,},
+},
+{
+ .phy_id = PHY_ID_AQ2104,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Aquantia AQ2104",
+ .features = PHY_AQUANTIA_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .aneg_done = aquantia_aneg_done,
+ .config_aneg = aquantia_config_aneg,
+ .config_intr = aquantia_config_intr,
+ .ack_interrupt = aquantia_ack_interrupt,
+ .read_status = aquantia_read_status,
+ .driver = { .owner = THIS_MODULE,},
+},
+{
+ .phy_id = PHY_ID_AQR105,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Aquantia AQR105",
+ .features = PHY_AQUANTIA_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .aneg_done = aquantia_aneg_done,
+ .config_aneg = aquantia_config_aneg,
+ .config_intr = aquantia_config_intr,
+ .ack_interrupt = aquantia_ack_interrupt,
+ .read_status = aquantia_read_status,
+ .driver = { .owner = THIS_MODULE,},
+},
+{
+ .phy_id = PHY_ID_AQR405,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Aquantia AQR405",
+ .features = PHY_AQUANTIA_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .aneg_done = aquantia_aneg_done,
+ .config_aneg = aquantia_config_aneg,
+ .config_intr = aquantia_config_intr,
+ .ack_interrupt = aquantia_ack_interrupt,
+ .read_status = aquantia_read_status,
+ .driver = { .owner = THIS_MODULE,},
+},
+};
+
+static int __init aquantia_init(void)
+{
+ return phy_drivers_register(aquantia_driver,
+ ARRAY_SIZE(aquantia_driver));
+}
+
+static void __exit aquantia_exit(void)
+{
+ return phy_drivers_unregister(aquantia_driver,
+ ARRAY_SIZE(aquantia_driver));
+}
+
+module_init(aquantia_init);
+module_exit(aquantia_exit);
+
+static struct mdio_device_id __maybe_unused aquantia_tbl[] = {
+ { PHY_ID_AQ1202, 0xfffffff0 },
+ { PHY_ID_AQ2104, 0xfffffff0 },
+ { PHY_ID_AQR105, 0xfffffff0 },
+ { PHY_ID_AQR405, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, aquantia_tbl);
+
+MODULE_DESCRIPTION("Aquantia PHY driver");
+MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 00cb41e71312..185b03c08e16 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1449,17 +1449,9 @@ static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info)
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
}
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index c7a12e2e07b7..32f10662f4ac 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -123,12 +123,8 @@ static int dp83867_of_init(struct phy_device *phydev)
if (ret)
return ret;
- ret = of_property_read_u32(of_node, "ti,fifo-depth",
+ return of_property_read_u32(of_node, "ti,fifo-depth",
&dp83867->fifo_depth);
- if (ret)
- return ret;
-
- return 0;
}
#else
static int dp83867_of_init(struct phy_device *phydev)
@@ -164,7 +160,7 @@ static int dp83867_config_init(struct phy_device *phydev)
return ret;
}
- if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) ||
+ if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) &&
(phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL,
DP83867_DEVADDR, phydev->addr);
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 1960b46add65..fb1299c6326e 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -22,6 +22,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/gpio.h>
#define MII_REGS_NUM 29
@@ -38,6 +39,7 @@ struct fixed_phy {
struct fixed_phy_status status;
int (*link_update)(struct net_device *, struct fixed_phy_status *);
struct list_head node;
+ int link_gpio;
};
static struct platform_device *pdev;
@@ -52,58 +54,86 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
u16 lpagb = 0;
u16 lpa = 0;
- if (fp->status.duplex) {
- bmcr |= BMCR_FULLDPLX;
+ if (gpio_is_valid(fp->link_gpio))
+ fp->status.link = !!gpio_get_value_cansleep(fp->link_gpio);
+ if (fp->status.duplex) {
switch (fp->status.speed) {
case 1000:
bmsr |= BMSR_ESTATEN;
- bmcr |= BMCR_SPEED1000;
- lpagb |= LPA_1000FULL;
break;
case 100:
bmsr |= BMSR_100FULL;
- bmcr |= BMCR_SPEED100;
- lpa |= LPA_100FULL;
break;
case 10:
bmsr |= BMSR_10FULL;
- lpa |= LPA_10FULL;
break;
default:
- pr_warn("fixed phy: unknown speed\n");
- return -EINVAL;
+ break;
}
} else {
switch (fp->status.speed) {
case 1000:
bmsr |= BMSR_ESTATEN;
- bmcr |= BMCR_SPEED1000;
- lpagb |= LPA_1000HALF;
break;
case 100:
bmsr |= BMSR_100HALF;
- bmcr |= BMCR_SPEED100;
- lpa |= LPA_100HALF;
break;
case 10:
bmsr |= BMSR_10HALF;
- lpa |= LPA_10HALF;
break;
default:
- pr_warn("fixed phy: unknown speed\n");
- return -EINVAL;
+ break;
}
}
- if (fp->status.link)
+ if (fp->status.link) {
bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
- if (fp->status.pause)
- lpa |= LPA_PAUSE_CAP;
+ if (fp->status.duplex) {
+ bmcr |= BMCR_FULLDPLX;
+
+ switch (fp->status.speed) {
+ case 1000:
+ bmcr |= BMCR_SPEED1000;
+ lpagb |= LPA_1000FULL;
+ break;
+ case 100:
+ bmcr |= BMCR_SPEED100;
+ lpa |= LPA_100FULL;
+ break;
+ case 10:
+ lpa |= LPA_10FULL;
+ break;
+ default:
+ pr_warn("fixed phy: unknown speed\n");
+ return -EINVAL;
+ }
+ } else {
+ switch (fp->status.speed) {
+ case 1000:
+ bmcr |= BMCR_SPEED1000;
+ lpagb |= LPA_1000HALF;
+ break;
+ case 100:
+ bmcr |= BMCR_SPEED100;
+ lpa |= LPA_100HALF;
+ break;
+ case 10:
+ lpa |= LPA_10HALF;
+ break;
+ default:
+ pr_warn("fixed phy: unknown speed\n");
+ return -EINVAL;
+ }
+ }
- if (fp->status.asym_pause)
- lpa |= LPA_PAUSE_ASYM;
+ if (fp->status.pause)
+ lpa |= LPA_PAUSE_CAP;
+
+ if (fp->status.asym_pause)
+ lpa |= LPA_PAUSE_ASYM;
+ }
fp->regs[MII_PHYSID1] = 0;
fp->regs[MII_PHYSID2] = 0;
@@ -213,7 +243,8 @@ int fixed_phy_update_state(struct phy_device *phydev,
EXPORT_SYMBOL(fixed_phy_update_state);
int fixed_phy_add(unsigned int irq, int phy_addr,
- struct fixed_phy_status *status)
+ struct fixed_phy_status *status,
+ int link_gpio)
{
int ret;
struct fixed_mdio_bus *fmb = &platform_fmb;
@@ -229,15 +260,26 @@ int fixed_phy_add(unsigned int irq, int phy_addr,
fp->addr = phy_addr;
fp->status = *status;
+ fp->link_gpio = link_gpio;
+
+ if (gpio_is_valid(fp->link_gpio)) {
+ ret = gpio_request_one(fp->link_gpio, GPIOF_DIR_IN,
+ "fixed-link-gpio-link");
+ if (ret)
+ goto err_regs;
+ }
ret = fixed_phy_update_regs(fp);
if (ret)
- goto err_regs;
+ goto err_gpio;
list_add_tail(&fp->node, &fmb->phys);
return 0;
+err_gpio:
+ if (gpio_is_valid(fp->link_gpio))
+ gpio_free(fp->link_gpio);
err_regs:
kfree(fp);
return ret;
@@ -252,6 +294,8 @@ void fixed_phy_del(int phy_addr)
list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
if (fp->addr == phy_addr) {
list_del(&fp->node);
+ if (gpio_is_valid(fp->link_gpio))
+ gpio_free(fp->link_gpio);
kfree(fp);
return;
}
@@ -264,6 +308,7 @@ static DEFINE_SPINLOCK(phy_fixed_addr_lock);
struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
+ int link_gpio,
struct device_node *np)
{
struct fixed_mdio_bus *fmb = &platform_fmb;
@@ -280,7 +325,7 @@ struct phy_device *fixed_phy_register(unsigned int irq,
phy_addr = phy_fixed_addr++;
spin_unlock(&phy_fixed_addr_lock);
- ret = fixed_phy_add(PHY_POLL, phy_addr, status);
+ ret = fixed_phy_add(irq, phy_addr, status, link_gpio);
if (ret < 0)
return ERR_PTR(ret);
@@ -290,8 +335,30 @@ struct phy_device *fixed_phy_register(unsigned int irq,
return ERR_PTR(-EINVAL);
}
+ /* propagate the fixed link values to struct phy_device */
+ phy->link = status->link;
+ if (status->link) {
+ phy->speed = status->speed;
+ phy->duplex = status->duplex;
+ phy->pause = status->pause;
+ phy->asym_pause = status->asym_pause;
+ }
+
of_node_get(np);
phy->dev.of_node = np;
+ phy->is_pseudo_fixed_link = true;
+
+ switch (status->speed) {
+ case SPEED_1000:
+ phy->supported = PHY_1000BT_FEATURES;
+ break;
+ case SPEED_100:
+ phy->supported = PHY_100BT_FEATURES;
+ break;
+ case SPEED_10:
+ default:
+ phy->supported = PHY_10BT_FEATURES;
+ }
ret = phy_device_register(phy);
if (ret) {
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index f721444c2b0a..e6897b6a8a53 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -48,8 +48,11 @@
#define MII_M1011_IMASK_CLEAR 0x0000
#define MII_M1011_PHY_SCR 0x10
+#define MII_M1011_PHY_SCR_MDI 0x0000
+#define MII_M1011_PHY_SCR_MDI_X 0x0020
#define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060
+#define MII_M1145_PHY_EXT_ADDR_PAGE 0x16
#define MII_M1145_PHY_EXT_SR 0x1b
#define MII_M1145_PHY_EXT_CR 0x14
#define MII_M1145_RGMII_RX_DELAY 0x0080
@@ -159,6 +162,43 @@ static int marvell_config_intr(struct phy_device *phydev)
return err;
}
+static int marvell_set_polarity(struct phy_device *phydev, int polarity)
+{
+ int reg;
+ int err;
+ int val;
+
+ /* get the current settings */
+ reg = phy_read(phydev, MII_M1011_PHY_SCR);
+ if (reg < 0)
+ return reg;
+
+ val = reg;
+ val &= ~MII_M1011_PHY_SCR_AUTO_CROSS;
+ switch (polarity) {
+ case ETH_TP_MDI:
+ val |= MII_M1011_PHY_SCR_MDI;
+ break;
+ case ETH_TP_MDI_X:
+ val |= MII_M1011_PHY_SCR_MDI_X;
+ break;
+ case ETH_TP_MDI_AUTO:
+ case ETH_TP_MDI_INVALID:
+ default:
+ val |= MII_M1011_PHY_SCR_AUTO_CROSS;
+ break;
+ }
+
+ if (val != reg) {
+ /* Set the new polarity value in the register */
+ err = phy_write(phydev, MII_M1011_PHY_SCR, val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int marvell_config_aneg(struct phy_device *phydev)
{
int err;
@@ -191,8 +231,7 @@ static int marvell_config_aneg(struct phy_device *phydev)
if (err < 0)
return err;
- err = phy_write(phydev, MII_M1011_PHY_SCR,
- MII_M1011_PHY_SCR_AUTO_CROSS);
+ err = marvell_set_polarity(phydev, phydev->mdix);
if (err < 0)
return err;
@@ -514,6 +553,16 @@ static int m88e1111_config_init(struct phy_device *phydev)
err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
if (err < 0)
return err;
+
+ /* make sure copper is selected */
+ err = phy_read(phydev, MII_M1145_PHY_EXT_ADDR_PAGE);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE,
+ err & (~0xff));
+ if (err < 0)
+ return err;
}
if (phydev->interface == PHY_INTERFACE_MODE_RTBI) {
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index c838ad6155f7..fcf4e4df7cc8 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -7,6 +7,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -14,11 +15,12 @@
#include <linux/phy.h>
#include <linux/io.h>
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
#include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-smix-defs.h>
+#endif
-#define DRV_VERSION "1.0"
-#define DRV_DESCRIPTION "Cavium Networks Octeon SMI/MDIO driver"
+#define DRV_VERSION "1.1"
+#define DRV_DESCRIPTION "Cavium Networks Octeon/ThunderX SMI/MDIO driver"
#define SMI_CMD 0x0
#define SMI_WR_DAT 0x8
@@ -26,6 +28,79 @@
#define SMI_CLK 0x18
#define SMI_EN 0x20
+#ifdef __BIG_ENDIAN_BITFIELD
+#define OCT_MDIO_BITFIELD_FIELD(field, more) \
+ field; \
+ more
+
+#else
+#define OCT_MDIO_BITFIELD_FIELD(field, more) \
+ more \
+ field;
+
+#endif
+
+union cvmx_smix_clk {
+ u64 u64;
+ struct cvmx_smix_clk_s {
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_25_63:39,
+ OCT_MDIO_BITFIELD_FIELD(u64 mode:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_21_23:3,
+ OCT_MDIO_BITFIELD_FIELD(u64 sample_hi:5,
+ OCT_MDIO_BITFIELD_FIELD(u64 sample_mode:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_14_14:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 clk_idle:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 preamble:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 sample:4,
+ OCT_MDIO_BITFIELD_FIELD(u64 phase:8,
+ ;))))))))))
+ } s;
+};
+
+union cvmx_smix_cmd {
+ u64 u64;
+ struct cvmx_smix_cmd_s {
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+ OCT_MDIO_BITFIELD_FIELD(u64 phy_op:2,
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_13_15:3,
+ OCT_MDIO_BITFIELD_FIELD(u64 phy_adr:5,
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_5_7:3,
+ OCT_MDIO_BITFIELD_FIELD(u64 reg_adr:5,
+ ;))))))
+ } s;
+};
+
+union cvmx_smix_en {
+ u64 u64;
+ struct cvmx_smix_en_s {
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_1_63:63,
+ OCT_MDIO_BITFIELD_FIELD(u64 en:1,
+ ;))
+ } s;
+};
+
+union cvmx_smix_rd_dat {
+ u64 u64;
+ struct cvmx_smix_rd_dat_s {
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+ OCT_MDIO_BITFIELD_FIELD(u64 pending:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 val:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 dat:16,
+ ;))))
+ } s;
+};
+
+union cvmx_smix_wr_dat {
+ u64 u64;
+ struct cvmx_smix_wr_dat_s {
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+ OCT_MDIO_BITFIELD_FIELD(u64 pending:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 val:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 dat:16,
+ ;))))
+ } s;
+};
+
enum octeon_mdiobus_mode {
UNINIT = 0,
C22,
@@ -41,6 +116,21 @@ struct octeon_mdiobus {
int phy_irq[PHY_MAX_ADDR];
};
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
+static void oct_mdio_writeq(u64 val, u64 addr)
+{
+ cvmx_write_csr(addr, val);
+}
+
+static u64 oct_mdio_readq(u64 addr)
+{
+ return cvmx_read_csr(addr);
+}
+#else
+#define oct_mdio_writeq(val, addr) writeq_relaxed(val, (void *)addr)
+#define oct_mdio_readq(addr) readq_relaxed((void *)addr)
+#endif
+
static void octeon_mdiobus_set_mode(struct octeon_mdiobus *p,
enum octeon_mdiobus_mode m)
{
@@ -49,10 +139,10 @@ static void octeon_mdiobus_set_mode(struct octeon_mdiobus *p,
if (m == p->mode)
return;
- smi_clk.u64 = cvmx_read_csr(p->register_base + SMI_CLK);
+ smi_clk.u64 = oct_mdio_readq(p->register_base + SMI_CLK);
smi_clk.s.mode = (m == C45) ? 1 : 0;
smi_clk.s.preamble = 1;
- cvmx_write_csr(p->register_base + SMI_CLK, smi_clk.u64);
+ oct_mdio_writeq(smi_clk.u64, p->register_base + SMI_CLK);
p->mode = m;
}
@@ -67,7 +157,7 @@ static int octeon_mdiobus_c45_addr(struct octeon_mdiobus *p,
smi_wr.u64 = 0;
smi_wr.s.dat = regnum & 0xffff;
- cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64);
+ oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
regnum = (regnum >> 16) & 0x1f;
@@ -75,14 +165,14 @@ static int octeon_mdiobus_c45_addr(struct octeon_mdiobus *p,
smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_45_ADDRESS */
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = regnum;
- cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+ oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
do {
/* Wait 1000 clocks so we don't saturate the RSL bus
* doing reads.
*/
__delay(1000);
- smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT);
+ smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
} while (smi_wr.s.pending && --timeout);
if (timeout <= 0)
@@ -114,14 +204,14 @@ static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum)
smi_cmd.s.phy_op = op;
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = regnum;
- cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+ oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
do {
/* Wait 1000 clocks so we don't saturate the RSL bus
* doing reads.
*/
__delay(1000);
- smi_rd.u64 = cvmx_read_csr(p->register_base + SMI_RD_DAT);
+ smi_rd.u64 = oct_mdio_readq(p->register_base + SMI_RD_DAT);
} while (smi_rd.s.pending && --timeout);
if (smi_rd.s.val)
@@ -153,20 +243,20 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id,
smi_wr.u64 = 0;
smi_wr.s.dat = val;
- cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64);
+ oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
smi_cmd.u64 = 0;
smi_cmd.s.phy_op = op;
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = regnum;
- cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+ oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
do {
/* Wait 1000 clocks so we don't saturate the RSL bus
* doing reads.
*/
__delay(1000);
- smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT);
+ smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
} while (smi_wr.s.pending && --timeout);
if (timeout <= 0)
@@ -187,30 +277,34 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
return -ENOMEM;
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
if (res_mem == NULL) {
dev_err(&pdev->dev, "found no memory resource\n");
- err = -ENXIO;
- goto fail;
+ return -ENXIO;
}
+
bus->mdio_phys = res_mem->start;
bus->regsize = resource_size(res_mem);
+
if (!devm_request_mem_region(&pdev->dev, bus->mdio_phys, bus->regsize,
res_mem->name)) {
dev_err(&pdev->dev, "request_mem_region failed\n");
- goto fail;
+ return -ENXIO;
}
+
bus->register_base =
(u64)devm_ioremap(&pdev->dev, bus->mdio_phys, bus->regsize);
+ if (!bus->register_base) {
+ dev_err(&pdev->dev, "dev_ioremap failed\n");
+ return -ENOMEM;
+ }
bus->mii_bus = mdiobus_alloc();
-
if (!bus->mii_bus)
goto fail;
smi_en.u64 = 0;
smi_en.s.en = 1;
- cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+ oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
bus->mii_bus->priv = bus;
bus->mii_bus->irq = bus->phy_irq;
@@ -234,7 +328,7 @@ fail_register:
mdiobus_free(bus->mii_bus);
fail:
smi_en.u64 = 0;
- cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+ oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
return err;
}
@@ -248,7 +342,7 @@ static int octeon_mdiobus_remove(struct platform_device *pdev)
mdiobus_unregister(bus->mii_bus);
mdiobus_free(bus->mii_bus);
smi_en.u64 = 0;
- cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+ oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
return 0;
}
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 095ef3fe369a..02a4615b65f8 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -303,12 +303,12 @@ void mdiobus_unregister(struct mii_bus *bus)
BUG_ON(bus->state != MDIOBUS_REGISTERED);
bus->state = MDIOBUS_UNREGISTERED;
- device_del(&bus->dev);
for (i = 0; i < PHY_MAX_ADDR; i++) {
if (bus->phy_map[i])
device_unregister(&bus->phy_map[i]->dev);
bus->phy_map[i] = NULL;
}
+ device_del(&bus->dev);
}
EXPORT_SYMBOL(mdiobus_unregister);
@@ -421,6 +421,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
{
struct phy_device *phydev = to_phy_device(dev);
struct phy_driver *phydrv = to_phy_driver(drv);
+ const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
+ int i;
if (of_driver_match_device(dev, drv))
return 1;
@@ -428,8 +430,21 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
if (phydrv->match_phy_device)
return phydrv->match_phy_device(phydev);
- return (phydrv->phy_id & phydrv->phy_id_mask) ==
- (phydev->phy_id & phydrv->phy_id_mask);
+ if (phydev->is_c45) {
+ for (i = 1; i < num_ids; i++) {
+ if (!(phydev->c45_ids.devices_in_package & (1 << i)))
+ continue;
+
+ if ((phydrv->phy_id & phydrv->phy_id_mask) ==
+ (phydev->c45_ids.device_ids[i] &
+ phydrv->phy_id_mask))
+ return 1;
+ }
+ return 0;
+ } else {
+ return (phydrv->phy_id & phydrv->phy_id_mask) ==
+ (phydev->phy_id & phydrv->phy_id_mask);
+ }
}
#ifdef CONFIG_PM
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
new file mode 100644
index 000000000000..c0a20ebd083b
--- /dev/null
+++ b/drivers/net/phy/microchip.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2015 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/microchipphy.h>
+
+#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
+#define DRIVER_DESC "Microchip LAN88XX PHY driver"
+
+struct lan88xx_priv {
+ int chip_id;
+ int chip_rev;
+ __u32 wolopts;
+};
+
+static int lan88xx_phy_config_intr(struct phy_device *phydev)
+{
+ int rc;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ /* unmask all source and clear them before enable */
+ rc = phy_write(phydev, LAN88XX_INT_MASK, 0x7FFF);
+ rc = phy_read(phydev, LAN88XX_INT_STS);
+ rc = phy_write(phydev, LAN88XX_INT_MASK,
+ LAN88XX_INT_MASK_MDINTPIN_EN_ |
+ LAN88XX_INT_MASK_LINK_CHANGE_);
+ } else {
+ rc = phy_write(phydev, LAN88XX_INT_MASK, 0);
+ }
+
+ return rc < 0 ? rc : 0;
+}
+
+static int lan88xx_phy_ack_interrupt(struct phy_device *phydev)
+{
+ int rc = phy_read(phydev, LAN88XX_INT_STS);
+
+ return rc < 0 ? rc : 0;
+}
+
+int lan88xx_suspend(struct phy_device *phydev)
+{
+ struct lan88xx_priv *priv = phydev->priv;
+
+ /* do not power down PHY when WOL is enabled */
+ if (!priv->wolopts)
+ genphy_suspend(phydev);
+
+ return 0;
+}
+
+static int lan88xx_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->dev;
+ struct lan88xx_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->wolopts = 0;
+
+ /* these values can be used to identify internal PHY */
+ priv->chip_id = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_ID,
+ 3, phydev->addr);
+ priv->chip_rev = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_REV,
+ 3, phydev->addr);
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static void lan88xx_remove(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->dev;
+ struct lan88xx_priv *priv = phydev->priv;
+
+ if (priv)
+ devm_kfree(dev, priv);
+}
+
+static int lan88xx_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ struct lan88xx_priv *priv = phydev->priv;
+
+ priv->wolopts = wol->wolopts;
+
+ return 0;
+}
+
+static struct phy_driver microchip_phy_driver[] = {
+{
+ .phy_id = 0x0007c130,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Microchip LAN88xx",
+
+ .features = (PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+
+ .probe = lan88xx_probe,
+ .remove = lan88xx_remove,
+
+ .config_init = genphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+
+ .ack_interrupt = lan88xx_phy_ack_interrupt,
+ .config_intr = lan88xx_phy_config_intr,
+
+ .suspend = lan88xx_suspend,
+ .resume = genphy_resume,
+ .set_wol = lan88xx_set_wol,
+
+ .driver = { .owner = THIS_MODULE, }
+} };
+
+module_phy_driver(microchip_phy_driver);
+
+static struct mdio_device_id __maybe_unused microchip_tbl[] = {
+ { 0x0007c130, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, microchip_tbl);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index b2197b506acb..adb48abafc87 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -353,6 +353,8 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
phydev->duplex = cmd->duplex;
+ phydev->mdix = cmd->eth_tp_mdix_ctrl;
+
/* Restart the PHY */
phy_start_aneg(phydev);
@@ -377,6 +379,7 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
cmd->transceiver = phy_is_internal(phydev) ?
XCVR_INTERNAL : XCVR_EXTERNAL;
cmd->autoneg = phydev->autoneg;
+ cmd->eth_tp_mdix_ctrl = phydev->mdix;
return 0;
}
@@ -811,6 +814,7 @@ void phy_state_machine(struct work_struct *work)
bool needs_aneg = false, do_suspend = false;
enum phy_state old_state;
int err = 0;
+ int old_link;
mutex_lock(&phydev->lock);
@@ -896,11 +900,18 @@ void phy_state_machine(struct work_struct *work)
phydev->adjust_link(phydev->attached_dev);
break;
case PHY_RUNNING:
- /* Only register a CHANGE if we are
- * polling or ignoring interrupts
+ /* Only register a CHANGE if we are polling or ignoring
+ * interrupts and link changed since latest checking.
*/
- if (!phy_interrupt_is_valid(phydev))
- phydev->state = PHY_CHANGELINK;
+ if (!phy_interrupt_is_valid(phydev)) {
+ old_link = phydev->link;
+ err = phy_read_status(phydev);
+ if (err)
+ break;
+
+ if (old_link != phydev->link)
+ phydev->state = PHY_CHANGELINK;
+ }
break;
case PHY_CHANGELINK:
err = phy_read_status(phydev);
@@ -1029,11 +1040,15 @@ int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
struct phy_driver *phydrv = phydev->drv;
int value = -1;
- if (phydrv->read_mmd_indirect == NULL) {
- mmd_phy_indirect(phydev->bus, prtad, devad, addr);
+ if (!phydrv->read_mmd_indirect) {
+ struct mii_bus *bus = phydev->bus;
+
+ mutex_lock(&bus->mdio_lock);
+ mmd_phy_indirect(bus, prtad, devad, addr);
/* Read the content of the MMD's selected register */
- value = phydev->bus->read(phydev->bus, addr, MII_MMD_DATA);
+ value = bus->read(bus, addr, MII_MMD_DATA);
+ mutex_unlock(&bus->mdio_lock);
} else {
value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr);
}
@@ -1062,11 +1077,15 @@ void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
{
struct phy_driver *phydrv = phydev->drv;
- if (phydrv->write_mmd_indirect == NULL) {
- mmd_phy_indirect(phydev->bus, prtad, devad, addr);
+ if (!phydrv->write_mmd_indirect) {
+ struct mii_bus *bus = phydev->bus;
+
+ mutex_lock(&bus->mdio_lock);
+ mmd_phy_indirect(bus, prtad, devad, addr);
/* Write the data into MMD's selected register */
- phydev->bus->write(phydev->bus, addr, MII_MMD_DATA, data);
+ bus->write(bus, addr, MII_MMD_DATA, data);
+ mutex_unlock(&bus->mdio_lock);
} else {
phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data);
}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 0302483de240..c0f211127274 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -156,8 +156,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
/* We allocate the device, and initialize the default values */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (NULL == dev)
- return (struct phy_device *)PTR_ERR((void *)-ENOMEM);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
dev->dev.release = phy_device_release;
@@ -176,9 +176,9 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
if (c45_ids)
dev->c45_ids = *c45_ids;
dev->bus = bus;
- dev->dev.parent = bus->parent;
+ dev->dev.parent = &bus->dev;
dev->dev.bus = &mdio_bus_type;
- dev->irq = bus->irq != NULL ? bus->irq[addr] : PHY_POLL;
+ dev->irq = bus->irq ? bus->irq[addr] : PHY_POLL;
dev_set_name(&dev->dev, PHY_ID_FMT, bus->id, addr);
dev->state = PHY_DOWN;
@@ -589,7 +589,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
/* Assume that if there is no driver, that it doesn't
* exist, and we should use the genphy driver.
*/
- if (NULL == d->driver) {
+ if (!d->driver) {
if (phydev->is_c45)
d->driver = &genphy_driver[GENPHY_DRV_10G].driver;
else
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 45353613b2ed..43ab691362d4 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -137,6 +137,19 @@ static struct phy_driver realtek_drvs[] = {
.config_intr = &rtl8211b_config_intr,
.driver = { .owner = THIS_MODULE,},
}, {
+ .phy_id = 0x001cc914,
+ .name = "RTL8211DN Gigabit Ethernet",
+ .phy_id_mask = 0x001fffff,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = rtl821x_ack_interrupt,
+ .config_intr = rtl8211e_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .driver = { .owner = THIS_MODULE,},
+ }, {
.phy_id = 0x001cc915,
.name = "RTL8211E Gigabit Ethernet",
.phy_id_mask = 0x001fffff,
@@ -170,6 +183,7 @@ module_phy_driver(realtek_drvs);
static struct mdio_device_id __maybe_unused realtek_tbl[] = {
{ 0x001cc912, 0x001fffff },
+ { 0x001cc914, 0x001fffff },
{ 0x001cc915, 0x001fffff },
{ 0x001cc916, 0x001fffff },
{ }
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index c0f6479e19d4..70b08958763a 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -91,19 +91,18 @@ static int lan911x_config_init(struct phy_device *phydev)
}
/*
- * The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each
- * other in order to set the ENERGYON bit and exit EDPD mode. If a link partner
- * does send the pulses within this interval, the PHY will remained powered
- * down.
- *
- * This workaround will manually toggle the PHY on/off upon calls to read_status
- * in order to generate link test pulses if the link is down. If a link partner
- * is present, it will respond to the pulses, which will cause the ENERGYON bit
- * to be set and will cause the EDPD mode to be exited.
+ * The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable
+ * plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to
+ * unstable detection of plugging in Ethernet cable.
+ * This workaround disables Energy Detect Power-Down mode and waiting for
+ * response on link pulses to detect presence of plugged Ethernet cable.
+ * The Energy Detect Power-Down mode is enabled again in the end of procedure to
+ * save approximately 220 mW of power if cable is unplugged.
*/
static int lan87xx_read_status(struct phy_device *phydev)
{
int err = genphy_read_status(phydev);
+ int i;
if (!phydev->link) {
/* Disable EDPD to wake up PHY */
@@ -116,8 +115,16 @@ static int lan87xx_read_status(struct phy_device *phydev)
if (rc < 0)
return rc;
- /* Sleep 64 ms to allow ~5 link test pulses to be sent */
- msleep(64);
+ /* Wait max 640 ms to detect energy */
+ for (i = 0; i < 64; i++) {
+ /* Sleep to allow link test pulses to be sent */
+ msleep(10);
+ rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+ if (rc < 0)
+ return rc;
+ if (rc & MII_LAN83C185_ENERGYON)
+ break;
+ }
/* Re-enable EDPD */
rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
@@ -191,7 +198,7 @@ static struct phy_driver smsc_phy_driver[] = {
/* basic functions */
.config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
+ .read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 46530159256b..f091d691cf6f 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -209,8 +209,6 @@ static int ks8995_reset(struct ks8995_switch *ks)
return ks8995_start(ks);
}
-/* ------------------------------------------------------------------------ */
-
static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
{
@@ -220,19 +218,9 @@ static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
dev = container_of(kobj, struct device, kobj);
ks8995 = dev_get_drvdata(dev);
- if (unlikely(off > ks8995->regs_attr.size))
- return 0;
-
- if ((off + count) > ks8995->regs_attr.size)
- count = ks8995->regs_attr.size - off;
-
- if (unlikely(!count))
- return count;
-
return ks8995_read(ks8995, buf, off, count);
}
-
static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
{
@@ -242,19 +230,9 @@ static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
dev = container_of(kobj, struct device, kobj);
ks8995 = dev_get_drvdata(dev);
- if (unlikely(off >= ks8995->regs_attr.size))
- return -EFBIG;
-
- if ((off + count) > ks8995->regs_attr.size)
- count = ks8995->regs_attr.size - off;
-
- if (unlikely(!count))
- return count;
-
return ks8995_write(ks8995, buf, off, count);
}
-
static const struct bin_attribute ks8995_registers_attr = {
.attr = {
.name = "registers",
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
new file mode 100644
index 000000000000..91e1bec6079f
--- /dev/null
+++ b/drivers/net/phy/teranetics.c
@@ -0,0 +1,135 @@
+/*
+ * Driver for Teranetics PHY
+ *
+ * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
+ *
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+
+MODULE_DESCRIPTION("Teranetics PHY driver");
+MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
+MODULE_LICENSE("GPL v2");
+
+#define PHY_ID_TN2020 0x00a19410
+#define MDIO_PHYXS_LNSTAT_SYNC0 0x0001
+#define MDIO_PHYXS_LNSTAT_SYNC1 0x0002
+#define MDIO_PHYXS_LNSTAT_SYNC2 0x0004
+#define MDIO_PHYXS_LNSTAT_SYNC3 0x0008
+#define MDIO_PHYXS_LNSTAT_ALIGN 0x1000
+
+#define MDIO_PHYXS_LANE_READY (MDIO_PHYXS_LNSTAT_SYNC0 | \
+ MDIO_PHYXS_LNSTAT_SYNC1 | \
+ MDIO_PHYXS_LNSTAT_SYNC2 | \
+ MDIO_PHYXS_LNSTAT_SYNC3 | \
+ MDIO_PHYXS_LNSTAT_ALIGN)
+
+static int teranetics_config_init(struct phy_device *phydev)
+{
+ phydev->supported = SUPPORTED_10000baseT_Full;
+ phydev->advertising = SUPPORTED_10000baseT_Full;
+
+ return 0;
+}
+
+static int teranetics_soft_reset(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static int teranetics_aneg_done(struct phy_device *phydev)
+{
+ int reg;
+
+ /* auto negotiation state can only be checked when using copper
+ * port, if using fiber port, just lie it's done.
+ */
+ if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) {
+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE);
+ }
+
+ return 1;
+}
+
+static int teranetics_config_aneg(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static int teranetics_read_status(struct phy_device *phydev)
+{
+ int reg;
+
+ phydev->link = 1;
+
+ phydev->speed = SPEED_10000;
+ phydev->duplex = DUPLEX_FULL;
+
+ if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) {
+ reg = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_LNSTAT);
+ if (reg < 0 ||
+ !((reg & MDIO_PHYXS_LANE_READY) == MDIO_PHYXS_LANE_READY)) {
+ phydev->link = 0;
+ return 0;
+ }
+
+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS))
+ phydev->link = 0;
+ }
+
+ return 0;
+}
+
+static int teranetics_match_phy_device(struct phy_device *phydev)
+{
+ return phydev->c45_ids.device_ids[3] == PHY_ID_TN2020;
+}
+
+static struct phy_driver teranetics_driver[] = {
+{
+ .phy_id = PHY_ID_TN2020,
+ .phy_id_mask = 0xffffffff,
+ .name = "Teranetics TN2020",
+ .soft_reset = teranetics_soft_reset,
+ .aneg_done = teranetics_aneg_done,
+ .config_init = teranetics_config_init,
+ .config_aneg = teranetics_config_aneg,
+ .read_status = teranetics_read_status,
+ .match_phy_device = teranetics_match_phy_device,
+ .driver = { .owner = THIS_MODULE,},
+},
+};
+
+static int __init teranetics_init(void)
+{
+ return phy_drivers_register(teranetics_driver,
+ ARRAY_SIZE(teranetics_driver));
+}
+
+static void __exit teranetics_exit(void)
+{
+ return phy_drivers_unregister(teranetics_driver,
+ ARRAY_SIZE(teranetics_driver));
+}
+
+module_init(teranetics_init);
+module_exit(teranetics_exit);
+
+static struct mdio_device_id __maybe_unused teranetics_tbl[] = {
+ { PHY_ID_TN2020, 0xffffffff },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, teranetics_tbl);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 9d15566521a7..0481daf9201a 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -269,9 +269,9 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
static void ppp_ccp_closed(struct ppp *ppp);
static struct compressor *find_compressor(int type);
static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
-static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
+static struct ppp *ppp_create_interface(struct net *net, int unit,
+ struct file *file, int *retp);
static void init_ppp_file(struct ppp_file *pf, int kind);
-static void ppp_shutdown_interface(struct ppp *ppp);
static void ppp_destroy_interface(struct ppp *ppp);
static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
@@ -283,6 +283,8 @@ static int unit_set(struct idr *p, void *ptr, int n);
static void unit_put(struct idr *p, int n);
static void *unit_find(struct idr *p, int n);
+static const struct net_device_ops ppp_netdev_ops;
+
static struct class *ppp_class;
/* per net-namespace data */
@@ -392,8 +394,10 @@ static int ppp_release(struct inode *unused, struct file *file)
file->private_data = NULL;
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
+ rtnl_lock();
if (file == ppp->owner)
- ppp_shutdown_interface(ppp);
+ unregister_netdevice(ppp->dev);
+ rtnl_unlock();
}
if (atomic_dec_and_test(&pf->refcnt)) {
switch (pf->kind) {
@@ -593,8 +597,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
mutex_lock(&ppp_mutex);
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
+ rtnl_lock();
if (file == ppp->owner)
- ppp_shutdown_interface(ppp);
+ unregister_netdevice(ppp->dev);
+ rtnl_unlock();
}
if (atomic_long_read(&file->f_count) < 2) {
ppp_release(NULL, file);
@@ -838,11 +844,10 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
/* Create a new ppp unit */
if (get_user(unit, p))
break;
- ppp = ppp_create_interface(net, unit, &err);
+ ppp = ppp_create_interface(net, unit, file, &err);
if (!ppp)
break;
file->private_data = &ppp->file;
- ppp->owner = file;
err = -EFAULT;
if (put_user(ppp->file.index, p))
break;
@@ -916,6 +921,25 @@ static __net_init int ppp_init_net(struct net *net)
static __net_exit void ppp_exit_net(struct net *net)
{
struct ppp_net *pn = net_generic(net, ppp_net_id);
+ struct net_device *dev;
+ struct net_device *aux;
+ struct ppp *ppp;
+ LIST_HEAD(list);
+ int id;
+
+ rtnl_lock();
+ for_each_netdev_safe(net, dev, aux) {
+ if (dev->netdev_ops == &ppp_netdev_ops)
+ unregister_netdevice_queue(dev, &list);
+ }
+
+ idr_for_each_entry(&pn->units_idr, ppp, id)
+ /* Skip devices already unregistered by previous loop */
+ if (!net_eq(dev_net(ppp->dev), net))
+ unregister_netdevice_queue(ppp->dev, &list);
+
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
idr_destroy(&pn->units_idr);
}
@@ -1004,6 +1028,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
proto = npindex_to_proto[npi];
put_unaligned_be16(proto, pp);
+ skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
skb_queue_tail(&ppp->file.xq, skb);
ppp_xmit_process(ppp);
return NETDEV_TX_OK;
@@ -1088,8 +1113,28 @@ static int ppp_dev_init(struct net_device *dev)
return 0;
}
+static void ppp_dev_uninit(struct net_device *dev)
+{
+ struct ppp *ppp = netdev_priv(dev);
+ struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
+
+ ppp_lock(ppp);
+ ppp->closing = 1;
+ ppp_unlock(ppp);
+
+ mutex_lock(&pn->all_ppp_mutex);
+ unit_put(&pn->units_idr, ppp->file.index);
+ mutex_unlock(&pn->all_ppp_mutex);
+
+ ppp->owner = NULL;
+
+ ppp->file.dead = 1;
+ wake_up_interruptible(&ppp->file.rwait);
+}
+
static const struct net_device_ops ppp_netdev_ops = {
.ndo_init = ppp_dev_init,
+ .ndo_uninit = ppp_dev_uninit,
.ndo_start_xmit = ppp_start_xmit,
.ndo_do_ioctl = ppp_net_ioctl,
.ndo_get_stats64 = ppp_get_stats64,
@@ -1104,7 +1149,6 @@ static void ppp_setup(struct net_device *dev)
dev->tx_queue_len = 3;
dev->type = ARPHRD_PPP;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
- dev->features |= NETIF_F_NETNS_LOCAL;
netif_keep_dst(dev);
}
@@ -1867,6 +1911,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
skb->dev = ppp->dev;
skb->protocol = htons(npindex_to_ethertype[npi]);
skb_reset_mac_header(skb);
+ skb_scrub_packet(skb, !net_eq(ppp->ppp_net,
+ dev_net(ppp->dev)));
netif_rx(skb);
}
}
@@ -2667,8 +2713,8 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
* or if there is already a unit with the requested number.
* unit == -1 means allocate a new number.
*/
-static struct ppp *
-ppp_create_interface(struct net *net, int unit, int *retp)
+static struct ppp *ppp_create_interface(struct net *net, int unit,
+ struct file *file, int *retp)
{
struct ppp *ppp;
struct ppp_net *pn;
@@ -2688,6 +2734,7 @@ ppp_create_interface(struct net *net, int unit, int *retp)
ppp->mru = PPP_MRU;
init_ppp_file(&ppp->file, INTERFACE);
ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
+ ppp->owner = file;
for (i = 0; i < NUM_NP; ++i)
ppp->npmode[i] = NPMODE_PASS;
INIT_LIST_HEAD(&ppp->channels);
@@ -2776,34 +2823,6 @@ init_ppp_file(struct ppp_file *pf, int kind)
}
/*
- * Take down a ppp interface unit - called when the owning file
- * (the one that created the unit) is closed or detached.
- */
-static void ppp_shutdown_interface(struct ppp *ppp)
-{
- struct ppp_net *pn;
-
- pn = ppp_pernet(ppp->ppp_net);
- mutex_lock(&pn->all_ppp_mutex);
-
- /* This will call dev_close() for us. */
- ppp_lock(ppp);
- if (!ppp->closing) {
- ppp->closing = 1;
- ppp_unlock(ppp);
- unregister_netdev(ppp->dev);
- unit_put(&pn->units_idr, ppp->file.index);
- } else
- ppp_unlock(ppp);
-
- ppp->file.dead = 1;
- ppp->owner = NULL;
- wake_up_interruptible(&ppp->file.rwait);
-
- mutex_unlock(&pn->all_ppp_mutex);
-}
-
-/*
* Free the memory used by a ppp unit. This is only called once
* there are no channels connected to the unit and no file structs
* that reference the unit.
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index dac7a0d9bb46..01f08a7751f7 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -396,7 +396,7 @@ static int rionet_close(struct net_device *ndev)
return 0;
}
-static int rionet_remove_dev(struct device *dev, struct subsys_interface *sif)
+static void rionet_remove_dev(struct device *dev, struct subsys_interface *sif)
{
struct rio_dev *rdev = to_rio_dev(dev);
unsigned char netid = rdev->net->hport->id;
@@ -416,8 +416,6 @@ static int rionet_remove_dev(struct device *dev, struct subsys_interface *sif)
}
}
}
-
- return 0;
}
static void rionet_get_drvinfo(struct net_device *ndev,
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index daa054b3ff03..651d35ea22c5 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2051,9 +2051,9 @@ static void team_setup(struct net_device *dev)
dev->netdev_ops = &team_netdev_ops;
dev->ethtool_ops = &team_ethtool_ops;
dev->destructor = team_destructor;
- dev->tx_queue_len = 0;
dev->flags |= IFF_MULTICAST;
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
+ dev->priv_flags |= IFF_NO_QUEUE;
/*
* Indicate we support unicast address filtering. That way core won't
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 06a039414628..976aa9704297 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -961,6 +961,7 @@ static const struct net_device_ops tap_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tun_poll_controller,
#endif
+ .ndo_features_check = passthru_features_check,
};
static void tun_flow_init(struct tun_struct *tun)
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 7ba8d0885f12..1610b79ae386 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -106,6 +106,16 @@ config USB_RTL8152
To compile this driver as a module, choose M here: the
module will be called r8152.
+config USB_LAN78XX
+ tristate "Microchip LAN78XX Based USB Ethernet Adapters"
+ select MII
+ help
+ This option adds support for Microchip LAN78XX based USB 2
+ & USB 3 10/100/1000 Ethernet adapters.
+
+ To compile this driver as a module, choose M here: the
+ module will be called lan78xx.
+
config USB_USBNET
tristate "Multi-purpose USB Networking Framework"
select MII
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index e2797f1e1b31..cf6a0e610a7f 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_USB_PEGASUS) += pegasus.o
obj-$(CONFIG_USB_RTL8150) += rtl8150.o
obj-$(CONFIG_USB_RTL8152) += r8152.o
obj-$(CONFIG_USB_HSO) += hso.o
+obj-$(CONFIG_USB_LAN78XX) += lan78xx.o
obj-$(CONFIG_USB_NET_AX8817X) += asix.o
asix-y := asix_devices.o asix_common.o ax88172a.o
obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 4545e78840b0..35a2bffe848a 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -523,6 +523,7 @@ static const struct driver_info wwan_info = {
#define REALTEK_VENDOR_ID 0x0bda
#define SAMSUNG_VENDOR_ID 0x04e8
#define LENOVO_VENDOR_ID 0x17ef
+#define NVIDIA_VENDOR_ID 0x0955
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@@ -710,6 +711,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index e4b7a47a825c..efc18e05af0a 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -158,7 +158,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
goto err;
- ret = cdc_ncm_bind_common(dev, intf, data_altsetting);
+ ret = cdc_ncm_bind_common(dev, intf, data_altsetting, 0);
if (ret)
goto err;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 8067b8fbb0ee..db40175b1a0b 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -6,7 +6,7 @@
* Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
*
* USB Host Driver for Network Control Model (NCM)
- * http://www.usb.org/developers/devclass_docs/NCM10.zip
+ * http://www.usb.org/developers/docs/devclass_docs/NCM10_012011.zip
*
* The NCM encoding, decoding and initialization logic
* derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h
@@ -684,10 +684,12 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
ctx->tx_curr_skb = NULL;
}
+ kfree(ctx->delayed_ndp16);
+
kfree(ctx);
}
-int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting)
+int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags)
{
const struct usb_cdc_union_desc *union_desc = NULL;
struct cdc_ncm_ctx *ctx;
@@ -855,6 +857,17 @@ advance:
/* finish setting up the device specific data */
cdc_ncm_setup(dev);
+ /* Device-specific flags */
+ ctx->drvflags = drvflags;
+
+ /* Allocate the delayed NDP if needed. */
+ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
+ ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
+ if (!ctx->delayed_ndp16)
+ goto error2;
+ dev_info(&intf->dev, "NDP will be placed at end of frame for this device.");
+ }
+
/* override ethtool_ops */
dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
@@ -954,8 +967,11 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM)
return -ENODEV;
- /* The NCM data altsetting is fixed */
- ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM);
+ /* The NCM data altsetting is fixed, so we hard-coded it.
+ * Additionally, generic NCM devices are assumed to accept arbitrarily
+ * placed NDP.
+ */
+ ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0);
/*
* We should get an event when network connection is "connected" or
@@ -986,6 +1002,14 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data;
size_t ndpoffset = le16_to_cpu(nth16->wNdpIndex);
+ /* If NDP should be moved to the end of the NCM package, we can't follow the
+ * NTH16 header as we would normally do. NDP isn't written to the SKB yet, and
+ * the wNdpIndex field in the header is actually not consistent with reality. It will be later.
+ */
+ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
+ if (ctx->delayed_ndp16->dwSignature == sign)
+ return ctx->delayed_ndp16;
+
/* follow the chain of NDPs, looking for a match */
while (ndpoffset) {
ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
@@ -995,7 +1019,8 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
}
/* align new NDP */
- cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
+ if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
+ cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
/* verify that there is room for the NDP and the datagram (reserve) */
if ((ctx->tx_max - skb->len - reserve) < ctx->max_ndp_size)
@@ -1008,7 +1033,11 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
nth16->wNdpIndex = cpu_to_le16(skb->len);
/* push a new empty NDP */
- ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size);
+ if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
+ ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size);
+ else
+ ndp16 = ctx->delayed_ndp16;
+
ndp16->dwSignature = sign;
ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16));
return ndp16;
@@ -1023,6 +1052,15 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
struct sk_buff *skb_out;
u16 n = 0, index, ndplen;
u8 ready2send = 0;
+ u32 delayed_ndp_size;
+
+ /* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated
+ * accordingly. Otherwise, we should check here.
+ */
+ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
+ delayed_ndp_size = ctx->max_ndp_size;
+ else
+ delayed_ndp_size = 0;
/* if there is a remaining skb, it gets priority */
if (skb != NULL) {
@@ -1077,7 +1115,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_max);
/* check if we had enough room left for both NDP and frame */
- if (!ndp16 || skb_out->len + skb->len > ctx->tx_max) {
+ if (!ndp16 || skb_out->len + skb->len + delayed_ndp_size > ctx->tx_max) {
if (n == 0) {
/* won't fit, MTU problem? */
dev_kfree_skb_any(skb);
@@ -1150,6 +1188,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
/* variables will be reset at next call */
}
+ /* If requested, put NDP at end of frame. */
+ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
+ nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
+ cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_max);
+ nth16->wNdpIndex = cpu_to_le16(skb_out->len);
+ memcpy(skb_put(skb_out, ctx->max_ndp_size), ctx->delayed_ndp16, ctx->max_ndp_size);
+
+ /* Zero out delayed NDP - signature checking will naturally fail. */
+ ndp16 = memset(ctx->delayed_ndp16, 0, ctx->max_ndp_size);
+ }
+
/* If collected data size is less or equal ctx->min_tx_pkt
* bytes, we send buffers as it is. If we get more data, it
* would be more efficient for USB HS mobile device with DMA
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 735f7dadb9a0..2680a65cd5e4 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -73,11 +73,14 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
struct usb_driver *subdriver = ERR_PTR(-ENODEV);
int ret = -ENODEV;
struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
+ int drvflags = 0;
/* altsetting should always be 1 for NCM devices - so we hard-coded
- * it here
+ * it here. Some huawei devices will need the NDP part of the NCM package to
+ * be at the end of the frame.
*/
- ret = cdc_ncm_bind_common(usbnet_dev, intf, 1);
+ drvflags |= CDC_NCM_FLAG_NDP_TO_END;
+ ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags);
if (ret)
goto err;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
new file mode 100644
index 000000000000..a39518fc93aa
--- /dev/null
+++ b/drivers/net/usb/lan78xx.c
@@ -0,0 +1,3494 @@
+/*
+ * Copyright (C) 2015 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/mdio.h>
+#include <net/ip6_checksum.h>
+#include "lan78xx.h"
+
+#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
+#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
+#define DRIVER_NAME "lan78xx"
+#define DRIVER_VERSION "1.0.0"
+
+#define TX_TIMEOUT_JIFFIES (5 * HZ)
+#define THROTTLE_JIFFIES (HZ / 8)
+#define UNLINK_TIMEOUT_MS 3
+
+#define RX_MAX_QUEUE_MEMORY (60 * 1518)
+
+#define SS_USB_PKT_SIZE (1024)
+#define HS_USB_PKT_SIZE (512)
+#define FS_USB_PKT_SIZE (64)
+
+#define MAX_RX_FIFO_SIZE (12 * 1024)
+#define MAX_TX_FIFO_SIZE (12 * 1024)
+#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
+#define DEFAULT_BULK_IN_DELAY (0x0800)
+#define MAX_SINGLE_PACKET_SIZE (9000)
+#define DEFAULT_TX_CSUM_ENABLE (true)
+#define DEFAULT_RX_CSUM_ENABLE (true)
+#define DEFAULT_TSO_CSUM_ENABLE (true)
+#define DEFAULT_VLAN_FILTER_ENABLE (true)
+#define INTERNAL_PHY_ID (2) /* 2: GMII */
+#define TX_OVERHEAD (8)
+#define RXW_PADDING 2
+
+#define LAN78XX_USB_VENDOR_ID (0x0424)
+#define LAN7800_USB_PRODUCT_ID (0x7800)
+#define LAN7850_USB_PRODUCT_ID (0x7850)
+#define LAN78XX_EEPROM_MAGIC (0x78A5)
+#define LAN78XX_OTP_MAGIC (0x78F3)
+
+#define MII_READ 1
+#define MII_WRITE 0
+
+#define EEPROM_INDICATOR (0xA5)
+#define EEPROM_MAC_OFFSET (0x01)
+#define MAX_EEPROM_SIZE 512
+#define OTP_INDICATOR_1 (0xF3)
+#define OTP_INDICATOR_2 (0xF7)
+
+#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
+ WAKE_MCAST | WAKE_BCAST | \
+ WAKE_ARP | WAKE_MAGIC)
+
+/* USB related defines */
+#define BULK_IN_PIPE 1
+#define BULK_OUT_PIPE 2
+
+/* default autosuspend delay (mSec)*/
+#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
+
+static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
+ "RX FCS Errors",
+ "RX Alignment Errors",
+ "Rx Fragment Errors",
+ "RX Jabber Errors",
+ "RX Undersize Frame Errors",
+ "RX Oversize Frame Errors",
+ "RX Dropped Frames",
+ "RX Unicast Byte Count",
+ "RX Broadcast Byte Count",
+ "RX Multicast Byte Count",
+ "RX Unicast Frames",
+ "RX Broadcast Frames",
+ "RX Multicast Frames",
+ "RX Pause Frames",
+ "RX 64 Byte Frames",
+ "RX 65 - 127 Byte Frames",
+ "RX 128 - 255 Byte Frames",
+ "RX 256 - 511 Bytes Frames",
+ "RX 512 - 1023 Byte Frames",
+ "RX 1024 - 1518 Byte Frames",
+ "RX Greater 1518 Byte Frames",
+ "EEE RX LPI Transitions",
+ "EEE RX LPI Time",
+ "TX FCS Errors",
+ "TX Excess Deferral Errors",
+ "TX Carrier Errors",
+ "TX Bad Byte Count",
+ "TX Single Collisions",
+ "TX Multiple Collisions",
+ "TX Excessive Collision",
+ "TX Late Collisions",
+ "TX Unicast Byte Count",
+ "TX Broadcast Byte Count",
+ "TX Multicast Byte Count",
+ "TX Unicast Frames",
+ "TX Broadcast Frames",
+ "TX Multicast Frames",
+ "TX Pause Frames",
+ "TX 64 Byte Frames",
+ "TX 65 - 127 Byte Frames",
+ "TX 128 - 255 Byte Frames",
+ "TX 256 - 511 Bytes Frames",
+ "TX 512 - 1023 Byte Frames",
+ "TX 1024 - 1518 Byte Frames",
+ "TX Greater 1518 Byte Frames",
+ "EEE TX LPI Transitions",
+ "EEE TX LPI Time",
+};
+
+struct lan78xx_statstage {
+ u32 rx_fcs_errors;
+ u32 rx_alignment_errors;
+ u32 rx_fragment_errors;
+ u32 rx_jabber_errors;
+ u32 rx_undersize_frame_errors;
+ u32 rx_oversize_frame_errors;
+ u32 rx_dropped_frames;
+ u32 rx_unicast_byte_count;
+ u32 rx_broadcast_byte_count;
+ u32 rx_multicast_byte_count;
+ u32 rx_unicast_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_pause_frames;
+ u32 rx_64_byte_frames;
+ u32 rx_65_127_byte_frames;
+ u32 rx_128_255_byte_frames;
+ u32 rx_256_511_bytes_frames;
+ u32 rx_512_1023_byte_frames;
+ u32 rx_1024_1518_byte_frames;
+ u32 rx_greater_1518_byte_frames;
+ u32 eee_rx_lpi_transitions;
+ u32 eee_rx_lpi_time;
+ u32 tx_fcs_errors;
+ u32 tx_excess_deferral_errors;
+ u32 tx_carrier_errors;
+ u32 tx_bad_byte_count;
+ u32 tx_single_collisions;
+ u32 tx_multiple_collisions;
+ u32 tx_excessive_collision;
+ u32 tx_late_collisions;
+ u32 tx_unicast_byte_count;
+ u32 tx_broadcast_byte_count;
+ u32 tx_multicast_byte_count;
+ u32 tx_unicast_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_pause_frames;
+ u32 tx_64_byte_frames;
+ u32 tx_65_127_byte_frames;
+ u32 tx_128_255_byte_frames;
+ u32 tx_256_511_bytes_frames;
+ u32 tx_512_1023_byte_frames;
+ u32 tx_1024_1518_byte_frames;
+ u32 tx_greater_1518_byte_frames;
+ u32 eee_tx_lpi_transitions;
+ u32 eee_tx_lpi_time;
+};
+
+struct lan78xx_net;
+
+struct lan78xx_priv {
+ struct lan78xx_net *dev;
+ u32 rfe_ctl;
+ u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
+ u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
+ u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
+ struct mutex dataport_mutex; /* for dataport access */
+ spinlock_t rfe_ctl_lock; /* for rfe register access */
+ struct work_struct set_multicast;
+ struct work_struct set_vlan;
+ u32 wol;
+};
+
+enum skb_state {
+ illegal = 0,
+ tx_start,
+ tx_done,
+ rx_start,
+ rx_done,
+ rx_cleanup,
+ unlink_start
+};
+
+struct skb_data { /* skb->cb is one of these */
+ struct urb *urb;
+ struct lan78xx_net *dev;
+ enum skb_state state;
+ size_t length;
+};
+
+struct usb_context {
+ struct usb_ctrlrequest req;
+ struct lan78xx_net *dev;
+};
+
+#define EVENT_TX_HALT 0
+#define EVENT_RX_HALT 1
+#define EVENT_RX_MEMORY 2
+#define EVENT_STS_SPLIT 3
+#define EVENT_LINK_RESET 4
+#define EVENT_RX_PAUSED 5
+#define EVENT_DEV_WAKING 6
+#define EVENT_DEV_ASLEEP 7
+#define EVENT_DEV_OPEN 8
+
+struct lan78xx_net {
+ struct net_device *net;
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ void *driver_priv;
+
+ int rx_qlen;
+ int tx_qlen;
+ struct sk_buff_head rxq;
+ struct sk_buff_head txq;
+ struct sk_buff_head done;
+ struct sk_buff_head rxq_pause;
+ struct sk_buff_head txq_pend;
+
+ struct tasklet_struct bh;
+ struct delayed_work wq;
+
+ struct usb_host_endpoint *ep_blkin;
+ struct usb_host_endpoint *ep_blkout;
+ struct usb_host_endpoint *ep_intr;
+
+ int msg_enable;
+
+ struct urb *urb_intr;
+ struct usb_anchor deferred;
+
+ struct mutex phy_mutex; /* for phy access */
+ unsigned pipe_in, pipe_out, pipe_intr;
+
+ u32 hard_mtu; /* count any extra framing */
+ size_t rx_urb_size; /* size for rx urbs */
+
+ unsigned long flags;
+
+ wait_queue_head_t *wait;
+ unsigned char suspend_count;
+
+ unsigned maxpacket;
+ struct timer_list delay;
+
+ unsigned long data[5];
+ struct mii_if_info mii;
+
+ int link_on;
+ u8 mdix_ctrl;
+};
+
+/* use ethtool to change the level for any given device */
+static int msg_level = -1;
+module_param(msg_level, int, 0);
+MODULE_PARM_DESC(msg_level, "Override default message level");
+
+static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
+{
+ u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
+ int ret;
+
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+ USB_VENDOR_REQUEST_READ_REGISTER,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
+ if (likely(ret >= 0)) {
+ le32_to_cpus(buf);
+ *data = *buf;
+ } else {
+ netdev_warn(dev->net,
+ "Failed to read register index 0x%08x. ret = %d",
+ index, ret);
+ }
+
+ kfree(buf);
+
+ return ret;
+}
+
+static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
+{
+ u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
+ int ret;
+
+ if (!buf)
+ return -ENOMEM;
+
+ *buf = data;
+ cpu_to_le32s(buf);
+
+ ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ USB_VENDOR_REQUEST_WRITE_REGISTER,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
+ if (unlikely(ret < 0)) {
+ netdev_warn(dev->net,
+ "Failed to write register index 0x%08x. ret = %d",
+ index, ret);
+ }
+
+ kfree(buf);
+
+ return ret;
+}
+
+static int lan78xx_read_stats(struct lan78xx_net *dev,
+ struct lan78xx_statstage *data)
+{
+ int ret = 0;
+ int i;
+ struct lan78xx_statstage *stats;
+ u32 *src;
+ u32 *dst;
+
+ stats = kmalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev->udev,
+ usb_rcvctrlpipe(dev->udev, 0),
+ USB_VENDOR_REQUEST_GET_STATS,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0,
+ 0,
+ (void *)stats,
+ sizeof(*stats),
+ USB_CTRL_SET_TIMEOUT);
+ if (likely(ret >= 0)) {
+ src = (u32 *)stats;
+ dst = (u32 *)data;
+ for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
+ le32_to_cpus(&src[i]);
+ dst[i] = src[i];
+ }
+ } else {
+ netdev_warn(dev->net,
+ "Failed to read stat ret = 0x%x", ret);
+ }
+
+ kfree(stats);
+
+ return ret;
+}
+
+/* Loop until the read is completed with timeout called with phy_mutex held */
+static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
+{
+ unsigned long start_time = jiffies;
+ u32 val;
+ int ret;
+
+ do {
+ ret = lan78xx_read_reg(dev, MII_ACC, &val);
+ if (unlikely(ret < 0))
+ return -EIO;
+
+ if (!(val & MII_ACC_MII_BUSY_))
+ return 0;
+ } while (!time_after(jiffies, start_time + HZ));
+
+ return -EIO;
+}
+
+static inline u32 mii_access(int id, int index, int read)
+{
+ u32 ret;
+
+ ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
+ ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
+ if (read)
+ ret |= MII_ACC_MII_READ_;
+ else
+ ret |= MII_ACC_MII_WRITE_;
+ ret |= MII_ACC_MII_BUSY_;
+
+ return ret;
+}
+
+static int lan78xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ u32 val, addr;
+ int ret;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&dev->phy_mutex);
+
+ /* confirm MII not busy */
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* set the address, index & direction (read from PHY) */
+ phy_id &= dev->mii.phy_id_mask;
+ idx &= dev->mii.reg_num_mask;
+ addr = mii_access(phy_id, idx, MII_READ);
+ ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ ret = lan78xx_read_reg(dev, MII_DATA, &val);
+
+ ret = (int)(val & 0xFFFF);
+
+done:
+ mutex_unlock(&dev->phy_mutex);
+ usb_autopm_put_interface(dev->intf);
+ return ret;
+}
+
+static void lan78xx_mdio_write(struct net_device *netdev, int phy_id,
+ int idx, int regval)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ u32 val, addr;
+ int ret;
+
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return;
+
+ mutex_lock(&dev->phy_mutex);
+
+ /* confirm MII not busy */
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ val = regval;
+ ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+ /* set the address, index & direction (write to PHY) */
+ phy_id &= dev->mii.phy_id_mask;
+ idx &= dev->mii.reg_num_mask;
+ addr = mii_access(phy_id, idx, MII_WRITE);
+ ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+done:
+ mutex_unlock(&dev->phy_mutex);
+ usb_autopm_put_interface(dev->intf);
+}
+
+static void lan78xx_mmd_write(struct net_device *netdev, int phy_id,
+ int mmddev, int mmdidx, int regval)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ u32 val, addr;
+ int ret;
+
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return;
+
+ mutex_lock(&dev->phy_mutex);
+
+ /* confirm MII not busy */
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ mmddev &= 0x1F;
+
+ /* set up device address for MMD */
+ ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
+
+ phy_id &= dev->mii.phy_id_mask;
+ addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+ ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* select register of MMD */
+ val = mmdidx;
+ ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+ phy_id &= dev->mii.phy_id_mask;
+ addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
+ ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* select register data for MMD */
+ val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
+ ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+ phy_id &= dev->mii.phy_id_mask;
+ addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+ ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* write to MMD */
+ val = regval;
+ ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+ phy_id &= dev->mii.phy_id_mask;
+ addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
+ ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+done:
+ mutex_unlock(&dev->phy_mutex);
+ usb_autopm_put_interface(dev->intf);
+}
+
+static int lan78xx_mmd_read(struct net_device *netdev, int phy_id,
+ int mmddev, int mmdidx)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ u32 val, addr;
+ int ret;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&dev->phy_mutex);
+
+ /* confirm MII not busy */
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* set up device address for MMD */
+ ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
+
+ phy_id &= dev->mii.phy_id_mask;
+ addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+ ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* select register of MMD */
+ val = mmdidx;
+ ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+ phy_id &= dev->mii.phy_id_mask;
+ addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
+ ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* select register data for MMD */
+ val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
+ ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+ phy_id &= dev->mii.phy_id_mask;
+ addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+ ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* set the address, index & direction (read from PHY) */
+ phy_id &= dev->mii.phy_id_mask;
+ addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_READ);
+ ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+ ret = lan78xx_phy_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* read from MMD */
+ ret = lan78xx_read_reg(dev, MII_DATA, &val);
+
+ ret = (int)(val & 0xFFFF);
+
+done:
+ mutex_unlock(&dev->phy_mutex);
+ usb_autopm_put_interface(dev->intf);
+ return ret;
+}
+
+static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
+{
+ unsigned long start_time = jiffies;
+ u32 val;
+ int ret;
+
+ do {
+ ret = lan78xx_read_reg(dev, E2P_CMD, &val);
+ if (unlikely(ret < 0))
+ return -EIO;
+
+ if (!(val & E2P_CMD_EPC_BUSY_) ||
+ (val & E2P_CMD_EPC_TIMEOUT_))
+ break;
+ usleep_range(40, 100);
+ } while (!time_after(jiffies, start_time + HZ));
+
+ if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
+ netdev_warn(dev->net, "EEPROM read operation timeout");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
+{
+ unsigned long start_time = jiffies;
+ u32 val;
+ int ret;
+
+ do {
+ ret = lan78xx_read_reg(dev, E2P_CMD, &val);
+ if (unlikely(ret < 0))
+ return -EIO;
+
+ if (!(val & E2P_CMD_EPC_BUSY_))
+ return 0;
+
+ usleep_range(40, 100);
+ } while (!time_after(jiffies, start_time + HZ));
+
+ netdev_warn(dev->net, "EEPROM is busy");
+ return -EIO;
+}
+
+static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
+ u32 length, u8 *data)
+{
+ u32 val;
+ int i, ret;
+
+ ret = lan78xx_eeprom_confirm_not_busy(dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < length; i++) {
+ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
+ val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
+ ret = lan78xx_write_reg(dev, E2P_CMD, val);
+ if (unlikely(ret < 0))
+ return -EIO;
+
+ ret = lan78xx_wait_eeprom(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_read_reg(dev, E2P_DATA, &val);
+ if (unlikely(ret < 0))
+ return -EIO;
+
+ data[i] = val & 0xFF;
+ offset++;
+ }
+
+ return 0;
+}
+
+static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
+ u32 length, u8 *data)
+{
+ u8 sig;
+ int ret;
+
+ ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
+ if ((ret == 0) && (sig == EEPROM_INDICATOR))
+ ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
+ u32 length, u8 *data)
+{
+ u32 val;
+ int i, ret;
+
+ ret = lan78xx_eeprom_confirm_not_busy(dev);
+ if (ret)
+ return ret;
+
+ /* Issue write/erase enable command */
+ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
+ ret = lan78xx_write_reg(dev, E2P_CMD, val);
+ if (unlikely(ret < 0))
+ return -EIO;
+
+ ret = lan78xx_wait_eeprom(dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < length; i++) {
+ /* Fill data register */
+ val = data[i];
+ ret = lan78xx_write_reg(dev, E2P_DATA, val);
+ if (ret < 0)
+ return ret;
+
+ /* Send "write" command */
+ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
+ val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
+ ret = lan78xx_write_reg(dev, E2P_CMD, val);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_wait_eeprom(dev);
+ if (ret < 0)
+ return ret;
+
+ offset++;
+ }
+
+ return 0;
+}
+
+static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
+ u32 length, u8 *data)
+{
+ int i;
+ int ret;
+ u32 buf;
+ unsigned long timeout;
+
+ ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+
+ if (buf & OTP_PWR_DN_PWRDN_N_) {
+ /* clear it and wait to be cleared */
+ ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+
+ timeout = jiffies + HZ;
+ do {
+ usleep_range(1, 10);
+ ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ if (time_after(jiffies, timeout)) {
+ netdev_warn(dev->net,
+ "timeout on OTP_PWR_DN");
+ return -EIO;
+ }
+ } while (buf & OTP_PWR_DN_PWRDN_N_);
+ }
+
+ for (i = 0; i < length; i++) {
+ ret = lan78xx_write_reg(dev, OTP_ADDR1,
+ ((offset + i) >> 8) & OTP_ADDR1_15_11);
+ ret = lan78xx_write_reg(dev, OTP_ADDR2,
+ ((offset + i) & OTP_ADDR2_10_3));
+
+ ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+ ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+
+ timeout = jiffies + HZ;
+ do {
+ udelay(1);
+ ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ if (time_after(jiffies, timeout)) {
+ netdev_warn(dev->net,
+ "timeout on OTP_STATUS");
+ return -EIO;
+ }
+ } while (buf & OTP_STATUS_BUSY_);
+
+ ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
+
+ data[i] = (u8)(buf & 0xFF);
+ }
+
+ return 0;
+}
+
+static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
+ u32 length, u8 *data)
+{
+ u8 sig;
+ int ret;
+
+ ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
+
+ if (ret == 0) {
+ if (sig == OTP_INDICATOR_1)
+ offset = offset;
+ else if (sig == OTP_INDICATOR_2)
+ offset += 0x100;
+ else
+ ret = -EINVAL;
+ ret = lan78xx_read_raw_otp(dev, offset, length, data);
+ }
+
+ return ret;
+}
+
+static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
+{
+ int i, ret;
+
+ for (i = 0; i < 100; i++) {
+ u32 dp_sel;
+
+ ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
+ if (unlikely(ret < 0))
+ return -EIO;
+
+ if (dp_sel & DP_SEL_DPRDY_)
+ return 0;
+
+ usleep_range(40, 100);
+ }
+
+ netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
+
+ return -EIO;
+}
+
+static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
+ u32 addr, u32 length, u32 *buf)
+{
+ struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+ u32 dp_sel;
+ int i, ret;
+
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return 0;
+
+ mutex_lock(&pdata->dataport_mutex);
+
+ ret = lan78xx_dataport_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
+
+ dp_sel &= ~DP_SEL_RSEL_MASK_;
+ dp_sel |= ram_select;
+ ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
+
+ for (i = 0; i < length; i++) {
+ ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
+
+ ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
+
+ ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
+
+ ret = lan78xx_dataport_wait_not_busy(dev);
+ if (ret < 0)
+ goto done;
+ }
+
+done:
+ mutex_unlock(&pdata->dataport_mutex);
+ usb_autopm_put_interface(dev->intf);
+
+ return ret;
+}
+
+static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
+ int index, u8 addr[ETH_ALEN])
+{
+ u32 temp;
+
+ if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
+ temp = addr[3];
+ temp = addr[2] | (temp << 8);
+ temp = addr[1] | (temp << 8);
+ temp = addr[0] | (temp << 8);
+ pdata->pfilter_table[index][1] = temp;
+ temp = addr[5];
+ temp = addr[4] | (temp << 8);
+ temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
+ pdata->pfilter_table[index][0] = temp;
+ }
+}
+
+/* returns hash bit number for given MAC address */
+static inline u32 lan78xx_hash(char addr[ETH_ALEN])
+{
+ return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
+}
+
+static void lan78xx_deferred_multicast_write(struct work_struct *param)
+{
+ struct lan78xx_priv *pdata =
+ container_of(param, struct lan78xx_priv, set_multicast);
+ struct lan78xx_net *dev = pdata->dev;
+ int i;
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
+ pdata->rfe_ctl);
+
+ lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
+ DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
+
+ for (i = 1; i < NUM_OF_MAF; i++) {
+ ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
+ ret = lan78xx_write_reg(dev, MAF_LO(i),
+ pdata->pfilter_table[i][1]);
+ ret = lan78xx_write_reg(dev, MAF_HI(i),
+ pdata->pfilter_table[i][0]);
+ }
+
+ ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+}
+
+static void lan78xx_set_multicast(struct net_device *netdev)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
+
+ pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
+ RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
+
+ for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
+ pdata->mchash_table[i] = 0;
+ /* pfilter_table[0] has own HW address */
+ for (i = 1; i < NUM_OF_MAF; i++) {
+ pdata->pfilter_table[i][0] =
+ pdata->pfilter_table[i][1] = 0;
+ }
+
+ pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
+
+ if (dev->net->flags & IFF_PROMISC) {
+ netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
+ pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
+ } else {
+ if (dev->net->flags & IFF_ALLMULTI) {
+ netif_dbg(dev, drv, dev->net,
+ "receive all multicast enabled");
+ pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
+ }
+ }
+
+ if (netdev_mc_count(dev->net)) {
+ struct netdev_hw_addr *ha;
+ int i;
+
+ netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
+
+ pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
+
+ i = 1;
+ netdev_for_each_mc_addr(ha, netdev) {
+ /* set first 32 into Perfect Filter */
+ if (i < 33) {
+ lan78xx_set_addr_filter(pdata, i, ha->addr);
+ } else {
+ u32 bitnum = lan78xx_hash(ha->addr);
+
+ pdata->mchash_table[bitnum / 32] |=
+ (1 << (bitnum % 32));
+ pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
+ }
+ i++;
+ }
+ }
+
+ spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
+
+ /* defer register writes to a sleepable context */
+ schedule_work(&pdata->set_multicast);
+}
+
+static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
+ u16 lcladv, u16 rmtadv)
+{
+ u32 flow = 0, fct_flow = 0;
+ int ret;
+
+ u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+
+ if (cap & FLOW_CTRL_TX)
+ flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
+
+ if (cap & FLOW_CTRL_RX)
+ flow |= FLOW_CR_RX_FCEN_;
+
+ if (dev->udev->speed == USB_SPEED_SUPER)
+ fct_flow = 0x817;
+ else if (dev->udev->speed == USB_SPEED_HIGH)
+ fct_flow = 0x211;
+
+ netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
+ (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
+ (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
+
+ ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
+
+ /* threshold value should be set before enabling flow */
+ ret = lan78xx_write_reg(dev, FLOW, flow);
+
+ return 0;
+}
+
+static int lan78xx_link_reset(struct lan78xx_net *dev)
+{
+ struct mii_if_info *mii = &dev->mii;
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+ int ladv, radv, ret;
+ u32 buf;
+
+ /* clear PHY interrupt status */
+ /* VTSE PHY */
+ ret = lan78xx_mdio_read(dev->net, mii->phy_id, PHY_VTSE_INT_STS);
+ if (unlikely(ret < 0))
+ return -EIO;
+
+ /* clear LAN78xx interrupt status */
+ ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
+ if (unlikely(ret < 0))
+ return -EIO;
+
+ if (!mii_link_ok(mii) && dev->link_on) {
+ dev->link_on = false;
+ netif_carrier_off(dev->net);
+
+ /* reset MAC */
+ ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+ if (unlikely(ret < 0))
+ return -EIO;
+ buf |= MAC_CR_RST_;
+ ret = lan78xx_write_reg(dev, MAC_CR, buf);
+ if (unlikely(ret < 0))
+ return -EIO;
+ } else if (mii_link_ok(mii) && !dev->link_on) {
+ dev->link_on = true;
+
+ mii_check_media(mii, 1, 1);
+ mii_ethtool_gset(&dev->mii, &ecmd);
+
+ mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
+
+ if (dev->udev->speed == USB_SPEED_SUPER) {
+ if (ethtool_cmd_speed(&ecmd) == 1000) {
+ /* disable U2 */
+ ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+ buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
+ ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+ /* enable U1 */
+ ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+ buf |= USB_CFG1_DEV_U1_INIT_EN_;
+ ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+ } else {
+ /* enable U1 & U2 */
+ ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+ buf |= USB_CFG1_DEV_U2_INIT_EN_;
+ buf |= USB_CFG1_DEV_U1_INIT_EN_;
+ ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+ }
+ }
+
+ ladv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
+ if (ladv < 0)
+ return ladv;
+
+ radv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
+ if (radv < 0)
+ return radv;
+
+ netif_dbg(dev, link, dev->net,
+ "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
+ ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
+
+ ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
+ netif_carrier_on(dev->net);
+ }
+
+ return ret;
+}
+
+/* some work can't be done in tasklets, so we use keventd
+ *
+ * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
+ * but tasklet_schedule() doesn't. hope the failure is rare.
+ */
+void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
+{
+ set_bit(work, &dev->flags);
+ if (!schedule_delayed_work(&dev->wq, 0))
+ netdev_err(dev->net, "kevent %d may have been dropped\n", work);
+}
+
+static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
+{
+ u32 intdata;
+
+ if (urb->actual_length != 4) {
+ netdev_warn(dev->net,
+ "unexpected urb length %d", urb->actual_length);
+ return;
+ }
+
+ memcpy(&intdata, urb->transfer_buffer, 4);
+ le32_to_cpus(&intdata);
+
+ if (intdata & INT_ENP_PHY_INT) {
+ netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
+ lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
+ } else
+ netdev_warn(dev->net,
+ "unexpected interrupt: 0x%08x\n", intdata);
+}
+
+static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
+{
+ return MAX_EEPROM_SIZE;
+}
+
+static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+
+ ee->magic = LAN78XX_EEPROM_MAGIC;
+
+ return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
+}
+
+static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+
+ /* Allow entire eeprom update only */
+ if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
+ (ee->offset == 0) &&
+ (ee->len == 512) &&
+ (data[0] == EEPROM_INDICATOR))
+ return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
+ else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
+ (ee->offset == 0) &&
+ (ee->len == 512) &&
+ (data[0] == OTP_INDICATOR_1))
+ return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
+
+ return -EINVAL;
+}
+
+static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ if (stringset == ETH_SS_STATS)
+ memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
+}
+
+static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ARRAY_SIZE(lan78xx_gstrings);
+ else
+ return -EOPNOTSUPP;
+}
+
+static void lan78xx_get_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ struct lan78xx_statstage lan78xx_stat;
+ u32 *p;
+ int i;
+
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return;
+
+ if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
+ p = (u32 *)&lan78xx_stat;
+ for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
+ data[i] = p[i];
+ }
+
+ usb_autopm_put_interface(dev->intf);
+}
+
+static void lan78xx_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ int ret;
+ u32 buf;
+ struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return;
+
+ ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+ if (unlikely(ret < 0)) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ } else {
+ if (buf & USB_CFG_RMT_WKP_) {
+ wol->supported = WAKE_ALL;
+ wol->wolopts = pdata->wol;
+ } else {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ }
+ }
+
+ usb_autopm_put_interface(dev->intf);
+}
+
+static int lan78xx_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+ int ret;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ return ret;
+
+ pdata->wol = 0;
+ if (wol->wolopts & WAKE_UCAST)
+ pdata->wol |= WAKE_UCAST;
+ if (wol->wolopts & WAKE_MCAST)
+ pdata->wol |= WAKE_MCAST;
+ if (wol->wolopts & WAKE_BCAST)
+ pdata->wol |= WAKE_BCAST;
+ if (wol->wolopts & WAKE_MAGIC)
+ pdata->wol |= WAKE_MAGIC;
+ if (wol->wolopts & WAKE_PHY)
+ pdata->wol |= WAKE_PHY;
+ if (wol->wolopts & WAKE_ARP)
+ pdata->wol |= WAKE_ARP;
+
+ device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
+
+ usb_autopm_put_interface(dev->intf);
+
+ return ret;
+}
+
+static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+ int ret;
+ u32 buf;
+ u32 adv, lpadv;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+ if (buf & MAC_CR_EEE_EN_) {
+ buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
+ PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT);
+ adv = mmd_eee_adv_to_ethtool_adv_t(buf);
+ buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
+ PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
+ lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
+
+ edata->eee_enabled = true;
+ edata->supported = true;
+ edata->eee_active = !!(adv & lpadv);
+ edata->advertised = adv;
+ edata->lp_advertised = lpadv;
+ edata->tx_lpi_enabled = true;
+ /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
+ ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
+ edata->tx_lpi_timer = buf;
+ } else {
+ buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
+ PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
+ lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
+
+ edata->eee_enabled = false;
+ edata->eee_active = false;
+ edata->supported = false;
+ edata->advertised = 0;
+ edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(lpadv);
+ edata->tx_lpi_enabled = false;
+ edata->tx_lpi_timer = 0;
+ }
+
+ usb_autopm_put_interface(dev->intf);
+
+ return 0;
+}
+
+static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+ int ret;
+ u32 buf;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ return ret;
+
+ if (edata->eee_enabled) {
+ ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+ buf |= MAC_CR_EEE_EN_;
+ ret = lan78xx_write_reg(dev, MAC_CR, buf);
+
+ buf = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+ lan78xx_mmd_write(dev->net, dev->mii.phy_id,
+ PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT, buf);
+ } else {
+ ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+ buf &= ~MAC_CR_EEE_EN_;
+ ret = lan78xx_write_reg(dev, MAC_CR, buf);
+ }
+
+ usb_autopm_put_interface(dev->intf);
+
+ return 0;
+}
+
+static u32 lan78xx_get_link(struct net_device *net)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+
+ return mii_link_ok(&dev->mii);
+}
+
+int lan78xx_nway_reset(struct net_device *net)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+
+ if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
+ return -EOPNOTSUPP;
+
+ return mii_nway_restart(&dev->mii);
+}
+
+static void lan78xx_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *info)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+
+ strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
+}
+
+static u32 lan78xx_get_msglevel(struct net_device *net)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+
+ return dev->msg_enable;
+}
+
+static void lan78xx_set_msglevel(struct net_device *net, u32 level)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+
+ dev->msg_enable = level;
+}
+
+static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+ struct mii_if_info *mii = &dev->mii;
+ int ret;
+ int buf;
+
+ if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
+ return -EOPNOTSUPP;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ return ret;
+
+ ret = mii_ethtool_gset(&dev->mii, cmd);
+
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
+ buf = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
+
+ buf &= PHY_EXT_MODE_CTRL_MDIX_MASK_;
+ if (buf == PHY_EXT_MODE_CTRL_AUTO_MDIX_) {
+ cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
+ cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ } else if (buf == PHY_EXT_MODE_CTRL_MDI_) {
+ cmd->eth_tp_mdix = ETH_TP_MDI;
+ cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
+ } else if (buf == PHY_EXT_MODE_CTRL_MDI_X_) {
+ cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
+ }
+
+ usb_autopm_put_interface(dev->intf);
+
+ return ret;
+}
+
+static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+ struct mii_if_info *mii = &dev->mii;
+ int ret = 0;
+ int temp;
+
+ if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
+ return -EOPNOTSUPP;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ return ret;
+
+ if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
+ if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI) {
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_GPIO_PAGE,
+ PHY_EXT_GPIO_PAGE_SPACE_1);
+ temp = mii->mdio_read(mii->dev, mii->phy_id,
+ PHY_EXT_MODE_CTRL);
+ temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_MODE_CTRL,
+ temp | PHY_EXT_MODE_CTRL_MDI_);
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_GPIO_PAGE,
+ PHY_EXT_GPIO_PAGE_SPACE_0);
+ } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_X) {
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_GPIO_PAGE,
+ PHY_EXT_GPIO_PAGE_SPACE_1);
+ temp = mii->mdio_read(mii->dev, mii->phy_id,
+ PHY_EXT_MODE_CTRL);
+ temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_MODE_CTRL,
+ temp | PHY_EXT_MODE_CTRL_MDI_X_);
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_GPIO_PAGE,
+ PHY_EXT_GPIO_PAGE_SPACE_0);
+ } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) {
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_GPIO_PAGE,
+ PHY_EXT_GPIO_PAGE_SPACE_1);
+ temp = mii->mdio_read(mii->dev, mii->phy_id,
+ PHY_EXT_MODE_CTRL);
+ temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_MODE_CTRL,
+ temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_GPIO_PAGE,
+ PHY_EXT_GPIO_PAGE_SPACE_0);
+ }
+ }
+
+ /* change speed & duplex */
+ ret = mii_ethtool_sset(&dev->mii, cmd);
+
+ if (!cmd->autoneg) {
+ /* force link down */
+ temp = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
+ mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR,
+ temp | BMCR_LOOPBACK);
+ mdelay(1);
+ mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, temp);
+ }
+
+ usb_autopm_put_interface(dev->intf);
+
+ return ret;
+}
+
+static const struct ethtool_ops lan78xx_ethtool_ops = {
+ .get_link = lan78xx_get_link,
+ .nway_reset = lan78xx_nway_reset,
+ .get_drvinfo = lan78xx_get_drvinfo,
+ .get_msglevel = lan78xx_get_msglevel,
+ .set_msglevel = lan78xx_set_msglevel,
+ .get_settings = lan78xx_get_settings,
+ .set_settings = lan78xx_set_settings,
+ .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
+ .get_eeprom = lan78xx_ethtool_get_eeprom,
+ .set_eeprom = lan78xx_ethtool_set_eeprom,
+ .get_ethtool_stats = lan78xx_get_stats,
+ .get_sset_count = lan78xx_get_sset_count,
+ .get_strings = lan78xx_get_strings,
+ .get_wol = lan78xx_get_wol,
+ .set_wol = lan78xx_set_wol,
+ .get_eee = lan78xx_get_eee,
+ .set_eee = lan78xx_set_eee,
+};
+
+static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+}
+
+static void lan78xx_init_mac_address(struct lan78xx_net *dev)
+{
+ u32 addr_lo, addr_hi;
+ int ret;
+ u8 addr[6];
+
+ ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
+ ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
+
+ addr[0] = addr_lo & 0xFF;
+ addr[1] = (addr_lo >> 8) & 0xFF;
+ addr[2] = (addr_lo >> 16) & 0xFF;
+ addr[3] = (addr_lo >> 24) & 0xFF;
+ addr[4] = addr_hi & 0xFF;
+ addr[5] = (addr_hi >> 8) & 0xFF;
+
+ if (!is_valid_ether_addr(addr)) {
+ /* reading mac address from EEPROM or OTP */
+ if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
+ addr) == 0) ||
+ (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
+ addr) == 0)) {
+ if (is_valid_ether_addr(addr)) {
+ /* eeprom values are valid so use them */
+ netif_dbg(dev, ifup, dev->net,
+ "MAC address read from EEPROM");
+ } else {
+ /* generate random MAC */
+ random_ether_addr(addr);
+ netif_dbg(dev, ifup, dev->net,
+ "MAC address set to random addr");
+ }
+
+ addr_lo = addr[0] | (addr[1] << 8) |
+ (addr[2] << 16) | (addr[3] << 24);
+ addr_hi = addr[4] | (addr[5] << 8);
+
+ ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ } else {
+ /* generate random MAC */
+ random_ether_addr(addr);
+ netif_dbg(dev, ifup, dev->net,
+ "MAC address set to random addr");
+ }
+ }
+
+ ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+
+ ether_addr_copy(dev->net->dev_addr, addr);
+}
+
+static void lan78xx_mii_init(struct lan78xx_net *dev)
+{
+ /* Initialize MII structure */
+ dev->mii.dev = dev->net;
+ dev->mii.mdio_read = lan78xx_mdio_read;
+ dev->mii.mdio_write = lan78xx_mdio_write;
+ dev->mii.phy_id_mask = 0x1f;
+ dev->mii.reg_num_mask = 0x1f;
+ dev->mii.phy_id = INTERNAL_PHY_ID;
+ dev->mii.supports_gmii = true;
+}
+
+static int lan78xx_phy_init(struct lan78xx_net *dev)
+{
+ int temp;
+ struct mii_if_info *mii = &dev->mii;
+
+ if ((!mii->mdio_write) || (!mii->mdio_read))
+ return -EOPNOTSUPP;
+
+ temp = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE);
+ temp |= ADVERTISE_ALL;
+ mii->mdio_write(mii->dev, mii->phy_id, MII_ADVERTISE,
+ temp | ADVERTISE_CSMA |
+ ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+
+ /* set to AUTOMDIX */
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
+ temp = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
+ temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+ mii->mdio_write(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL,
+ temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
+ mii->mdio_write(mii->dev, mii->phy_id,
+ PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
+ dev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+ /* MAC doesn't support 1000HD */
+ temp = mii->mdio_read(mii->dev, mii->phy_id, MII_CTRL1000);
+ mii->mdio_write(mii->dev, mii->phy_id, MII_CTRL1000,
+ temp & ~ADVERTISE_1000HALF);
+
+ /* clear interrupt */
+ mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
+ mii->mdio_write(mii->dev, mii->phy_id, PHY_VTSE_INT_MASK,
+ PHY_VTSE_INT_MASK_MDINTPIN_EN_ |
+ PHY_VTSE_INT_MASK_LINK_CHANGE_);
+
+ netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
+
+ return 0;
+}
+
+static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
+{
+ int ret = 0;
+ u32 buf;
+ bool rxenabled;
+
+ ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+
+ rxenabled = ((buf & MAC_RX_RXEN_) != 0);
+
+ if (rxenabled) {
+ buf &= ~MAC_RX_RXEN_;
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ }
+
+ /* add 4 to size for FCS */
+ buf &= ~MAC_RX_MAX_SIZE_MASK_;
+ buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
+
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+ if (rxenabled) {
+ buf |= MAC_RX_RXEN_;
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ }
+
+ return 0;
+}
+
+static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+ int count = 0;
+
+ spin_lock_irqsave(&q->lock, flags);
+ while (!skb_queue_empty(q)) {
+ struct skb_data *entry;
+ struct urb *urb;
+ int ret;
+
+ skb_queue_walk(q, skb) {
+ entry = (struct skb_data *)skb->cb;
+ if (entry->state != unlink_start)
+ goto found;
+ }
+ break;
+found:
+ entry->state = unlink_start;
+ urb = entry->urb;
+
+ /* Get reference count of the URB to avoid it to be
+ * freed during usb_unlink_urb, which may trigger
+ * use-after-free problem inside usb_unlink_urb since
+ * usb_unlink_urb is always racing with .complete
+ * handler(include defer_bh).
+ */
+ usb_get_urb(urb);
+ spin_unlock_irqrestore(&q->lock, flags);
+ /* during some PM-driven resume scenarios,
+ * these (async) unlinks complete immediately
+ */
+ ret = usb_unlink_urb(urb);
+ if (ret != -EINPROGRESS && ret != 0)
+ netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
+ else
+ count++;
+ usb_put_urb(urb);
+ spin_lock_irqsave(&q->lock, flags);
+ }
+ spin_unlock_irqrestore(&q->lock, flags);
+ return count;
+}
+
+static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ int ll_mtu = new_mtu + netdev->hard_header_len;
+ int old_hard_mtu = dev->hard_mtu;
+ int old_rx_urb_size = dev->rx_urb_size;
+ int ret;
+
+ if (new_mtu > MAX_SINGLE_PACKET_SIZE)
+ return -EINVAL;
+
+ if (new_mtu <= 0)
+ return -EINVAL;
+ /* no second zero-length packet read wanted after mtu-sized packets */
+ if ((ll_mtu % dev->maxpacket) == 0)
+ return -EDOM;
+
+ ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
+
+ netdev->mtu = new_mtu;
+
+ dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
+ if (dev->rx_urb_size == old_hard_mtu) {
+ dev->rx_urb_size = dev->hard_mtu;
+ if (dev->rx_urb_size > old_rx_urb_size) {
+ if (netif_running(dev->net)) {
+ unlink_urbs(dev, &dev->rxq);
+ tasklet_schedule(&dev->bh);
+ }
+ }
+ }
+
+ return 0;
+}
+
+int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+ u32 addr_lo, addr_hi;
+ int ret;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+
+ addr_lo = netdev->dev_addr[0] |
+ netdev->dev_addr[1] << 8 |
+ netdev->dev_addr[2] << 16 |
+ netdev->dev_addr[3] << 24;
+ addr_hi = netdev->dev_addr[4] |
+ netdev->dev_addr[5] << 8;
+
+ ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+
+ return 0;
+}
+
+/* Enable or disable Rx checksum offload engine */
+static int lan78xx_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
+
+ if (features & NETIF_F_RXCSUM) {
+ pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
+ pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
+ } else {
+ pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
+ pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
+ }
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
+ else
+ pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
+
+ spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
+
+ ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+
+ return 0;
+}
+
+static void lan78xx_deferred_vlan_write(struct work_struct *param)
+{
+ struct lan78xx_priv *pdata =
+ container_of(param, struct lan78xx_priv, set_vlan);
+ struct lan78xx_net *dev = pdata->dev;
+
+ lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
+ DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
+}
+
+static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+ u16 vid_bit_index;
+ u16 vid_dword_index;
+
+ vid_dword_index = (vid >> 5) & 0x7F;
+ vid_bit_index = vid & 0x1F;
+
+ pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
+
+ /* defer register writes to a sleepable context */
+ schedule_work(&pdata->set_vlan);
+
+ return 0;
+}
+
+static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
+{
+ struct lan78xx_net *dev = netdev_priv(netdev);
+ struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+ u16 vid_bit_index;
+ u16 vid_dword_index;
+
+ vid_dword_index = (vid >> 5) & 0x7F;
+ vid_bit_index = vid & 0x1F;
+
+ pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
+
+ /* defer register writes to a sleepable context */
+ schedule_work(&pdata->set_vlan);
+
+ return 0;
+}
+
+static void lan78xx_init_ltm(struct lan78xx_net *dev)
+{
+ int ret;
+ u32 buf;
+ u32 regs[6] = { 0 };
+
+ ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+ if (buf & USB_CFG1_LTM_ENABLE_) {
+ u8 temp[2];
+ /* Get values from EEPROM first */
+ if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
+ if (temp[0] == 24) {
+ ret = lan78xx_read_raw_eeprom(dev,
+ temp[1] * 2,
+ 24,
+ (u8 *)regs);
+ if (ret < 0)
+ return;
+ }
+ } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
+ if (temp[0] == 24) {
+ ret = lan78xx_read_raw_otp(dev,
+ temp[1] * 2,
+ 24,
+ (u8 *)regs);
+ if (ret < 0)
+ return;
+ }
+ }
+ }
+
+ lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
+ lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
+ lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
+ lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
+ lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
+ lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
+}
+
+static int lan78xx_reset(struct lan78xx_net *dev)
+{
+ struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+ u32 buf;
+ int ret = 0;
+ unsigned long timeout;
+
+ ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+ buf |= HW_CFG_LRST_;
+ ret = lan78xx_write_reg(dev, HW_CFG, buf);
+
+ timeout = jiffies + HZ;
+ do {
+ mdelay(1);
+ ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+ if (time_after(jiffies, timeout)) {
+ netdev_warn(dev->net,
+ "timeout on completion of LiteReset");
+ return -EIO;
+ }
+ } while (buf & HW_CFG_LRST_);
+
+ lan78xx_init_mac_address(dev);
+
+ /* Respond to the IN token with a NAK */
+ ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+ buf |= USB_CFG_BIR_;
+ ret = lan78xx_write_reg(dev, USB_CFG0, buf);
+
+ /* Init LTM */
+ lan78xx_init_ltm(dev);
+
+ dev->net->hard_header_len += TX_OVERHEAD;
+ dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+ if (dev->udev->speed == USB_SPEED_SUPER) {
+ buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
+ dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
+ dev->rx_qlen = 4;
+ dev->tx_qlen = 4;
+ } else if (dev->udev->speed == USB_SPEED_HIGH) {
+ buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
+ dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
+ dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
+ dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
+ } else {
+ buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
+ dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
+ dev->rx_qlen = 4;
+ }
+
+ ret = lan78xx_write_reg(dev, BURST_CAP, buf);
+ ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
+
+ ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+ buf |= HW_CFG_MEF_;
+ ret = lan78xx_write_reg(dev, HW_CFG, buf);
+
+ ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+ buf |= USB_CFG_BCE_;
+ ret = lan78xx_write_reg(dev, USB_CFG0, buf);
+
+ /* set FIFO sizes */
+ buf = (MAX_RX_FIFO_SIZE - 512) / 512;
+ ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
+
+ buf = (MAX_TX_FIFO_SIZE - 512) / 512;
+ ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
+
+ ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
+ ret = lan78xx_write_reg(dev, FLOW, 0);
+ ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
+
+ /* Don't need rfe_ctl_lock during initialisation */
+ ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
+ pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
+ ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+
+ /* Enable or disable checksum offload engines */
+ lan78xx_set_features(dev->net, dev->net->features);
+
+ lan78xx_set_multicast(dev->net);
+
+ /* reset PHY */
+ ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+ buf |= PMT_CTL_PHY_RST_;
+ ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+ timeout = jiffies + HZ;
+ do {
+ mdelay(1);
+ ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+ if (time_after(jiffies, timeout)) {
+ netdev_warn(dev->net, "timeout waiting for PHY Reset");
+ return -EIO;
+ }
+ } while (buf & PMT_CTL_PHY_RST_);
+
+ lan78xx_mii_init(dev);
+
+ ret = lan78xx_phy_init(dev);
+
+ ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+
+ buf |= MAC_CR_GMII_EN_;
+ buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
+
+ ret = lan78xx_write_reg(dev, MAC_CR, buf);
+
+ /* enable on PHY */
+ if (buf & MAC_CR_EEE_EN_)
+ lan78xx_mmd_write(dev->net, dev->mii.phy_id, 0x07, 0x3C, 0x06);
+
+ /* enable PHY interrupts */
+ ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ buf |= INT_ENP_PHY_INT;
+ ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
+
+ ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+ buf |= MAC_TX_TXEN_;
+ ret = lan78xx_write_reg(dev, MAC_TX, buf);
+
+ ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
+ buf |= FCT_TX_CTL_EN_;
+ ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
+
+ ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
+
+ ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ buf |= MAC_RX_RXEN_;
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+ ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
+ buf |= FCT_RX_CTL_EN_;
+ ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
+
+ if (!mii_nway_restart(&dev->mii))
+ netif_dbg(dev, link, dev->net, "autoneg initiated");
+
+ return 0;
+}
+
+static int lan78xx_open(struct net_device *net)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+ int ret;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ goto out;
+
+ ret = lan78xx_reset(dev);
+ if (ret < 0)
+ goto done;
+
+ /* for Link Check */
+ if (dev->urb_intr) {
+ ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
+ if (ret < 0) {
+ netif_err(dev, ifup, dev->net,
+ "intr submit %d\n", ret);
+ goto done;
+ }
+ }
+
+ set_bit(EVENT_DEV_OPEN, &dev->flags);
+
+ netif_start_queue(net);
+
+ dev->link_on = false;
+
+ lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
+done:
+ usb_autopm_put_interface(dev->intf);
+
+out:
+ return ret;
+}
+
+static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
+ DECLARE_WAITQUEUE(wait, current);
+ int temp;
+
+ /* ensure there are no more active urbs */
+ add_wait_queue(&unlink_wakeup, &wait);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ dev->wait = &unlink_wakeup;
+ temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
+
+ /* maybe wait for deletions to finish. */
+ while (!skb_queue_empty(&dev->rxq) &&
+ !skb_queue_empty(&dev->txq) &&
+ !skb_queue_empty(&dev->done)) {
+ schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ netif_dbg(dev, ifdown, dev->net,
+ "waited for %d urb completions\n", temp);
+ }
+ set_current_state(TASK_RUNNING);
+ dev->wait = NULL;
+ remove_wait_queue(&unlink_wakeup, &wait);
+}
+
+int lan78xx_stop(struct net_device *net)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+
+ clear_bit(EVENT_DEV_OPEN, &dev->flags);
+ netif_stop_queue(net);
+
+ netif_info(dev, ifdown, dev->net,
+ "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
+ net->stats.rx_packets, net->stats.tx_packets,
+ net->stats.rx_errors, net->stats.tx_errors);
+
+ lan78xx_terminate_urbs(dev);
+
+ usb_kill_urb(dev->urb_intr);
+
+ skb_queue_purge(&dev->rxq_pause);
+
+ /* deferred work (task, timer, softirq) must also stop.
+ * can't flush_scheduled_work() until we drop rtnl (later),
+ * else workers could deadlock; so make workers a NOP.
+ */
+ dev->flags = 0;
+ cancel_delayed_work_sync(&dev->wq);
+ tasklet_kill(&dev->bh);
+
+ usb_autopm_put_interface(dev->intf);
+
+ return 0;
+}
+
+static int lan78xx_linearize(struct sk_buff *skb)
+{
+ return skb_linearize(skb);
+}
+
+static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
+ struct sk_buff *skb, gfp_t flags)
+{
+ u32 tx_cmd_a, tx_cmd_b;
+
+ if (skb_headroom(skb) < TX_OVERHEAD) {
+ struct sk_buff *skb2;
+
+ skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+ if (!skb)
+ return NULL;
+ }
+
+ if (lan78xx_linearize(skb) < 0)
+ return NULL;
+
+ tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
+
+ tx_cmd_b = 0;
+ if (skb_is_gso(skb)) {
+ u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
+
+ tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
+
+ tx_cmd_a |= TX_CMD_A_LSO_;
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ tx_cmd_a |= TX_CMD_A_IVTG_;
+ tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
+ }
+
+ skb_push(skb, 4);
+ cpu_to_le32s(&tx_cmd_b);
+ memcpy(skb->data, &tx_cmd_b, 4);
+
+ skb_push(skb, 4);
+ cpu_to_le32s(&tx_cmd_a);
+ memcpy(skb->data, &tx_cmd_a, 4);
+
+ return skb;
+}
+
+static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
+ struct sk_buff_head *list, enum skb_state state)
+{
+ unsigned long flags;
+ enum skb_state old_state;
+ struct skb_data *entry = (struct skb_data *)skb->cb;
+
+ spin_lock_irqsave(&list->lock, flags);
+ old_state = entry->state;
+ entry->state = state;
+
+ __skb_unlink(skb, list);
+ spin_unlock(&list->lock);
+ spin_lock(&dev->done.lock);
+
+ __skb_queue_tail(&dev->done, skb);
+ if (skb_queue_len(&dev->done) == 1)
+ tasklet_schedule(&dev->bh);
+ spin_unlock_irqrestore(&dev->done.lock, flags);
+
+ return old_state;
+}
+
+static void tx_complete(struct urb *urb)
+{
+ struct sk_buff *skb = (struct sk_buff *)urb->context;
+ struct skb_data *entry = (struct skb_data *)skb->cb;
+ struct lan78xx_net *dev = entry->dev;
+
+ if (urb->status == 0) {
+ dev->net->stats.tx_packets++;
+ dev->net->stats.tx_bytes += entry->length;
+ } else {
+ dev->net->stats.tx_errors++;
+
+ switch (urb->status) {
+ case -EPIPE:
+ lan78xx_defer_kevent(dev, EVENT_TX_HALT);
+ break;
+
+ /* software-driven interface shutdown */
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ break;
+
+ case -EPROTO:
+ case -ETIME:
+ case -EILSEQ:
+ netif_stop_queue(dev->net);
+ break;
+ default:
+ netif_dbg(dev, tx_err, dev->net,
+ "tx err %d\n", entry->urb->status);
+ break;
+ }
+ }
+
+ usb_autopm_put_interface_async(dev->intf);
+
+ defer_bh(dev, skb, &dev->txq, tx_done);
+}
+
+static void lan78xx_queue_skb(struct sk_buff_head *list,
+ struct sk_buff *newsk, enum skb_state state)
+{
+ struct skb_data *entry = (struct skb_data *)newsk->cb;
+
+ __skb_queue_tail(list, newsk);
+ entry->state = state;
+}
+
+netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+ struct sk_buff *skb2 = NULL;
+
+ if (skb) {
+ skb_tx_timestamp(skb);
+ skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
+ }
+
+ if (skb2) {
+ skb_queue_tail(&dev->txq_pend, skb2);
+
+ if (skb_queue_len(&dev->txq_pend) > 10)
+ netif_stop_queue(net);
+ } else {
+ netif_dbg(dev, tx_err, dev->net,
+ "lan78xx_tx_prep return NULL\n");
+ dev->net->stats.tx_errors++;
+ dev->net->stats.tx_dropped++;
+ }
+
+ tasklet_schedule(&dev->bh);
+
+ return NETDEV_TX_OK;
+}
+
+int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
+{
+ int tmp;
+ struct usb_host_interface *alt = NULL;
+ struct usb_host_endpoint *in = NULL, *out = NULL;
+ struct usb_host_endpoint *status = NULL;
+
+ for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
+ unsigned ep;
+
+ in = NULL;
+ out = NULL;
+ status = NULL;
+ alt = intf->altsetting + tmp;
+
+ for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
+ struct usb_host_endpoint *e;
+ int intr = 0;
+
+ e = alt->endpoint + ep;
+ switch (e->desc.bmAttributes) {
+ case USB_ENDPOINT_XFER_INT:
+ if (!usb_endpoint_dir_in(&e->desc))
+ continue;
+ intr = 1;
+ /* FALLTHROUGH */
+ case USB_ENDPOINT_XFER_BULK:
+ break;
+ default:
+ continue;
+ }
+ if (usb_endpoint_dir_in(&e->desc)) {
+ if (!intr && !in)
+ in = e;
+ else if (intr && !status)
+ status = e;
+ } else {
+ if (!out)
+ out = e;
+ }
+ }
+ if (in && out)
+ break;
+ }
+ if (!alt || !in || !out)
+ return -EINVAL;
+
+ dev->pipe_in = usb_rcvbulkpipe(dev->udev,
+ in->desc.bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK);
+ dev->pipe_out = usb_sndbulkpipe(dev->udev,
+ out->desc.bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK);
+ dev->ep_intr = status;
+
+ return 0;
+}
+
+static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
+{
+ struct lan78xx_priv *pdata = NULL;
+ int ret;
+ int i;
+
+ ret = lan78xx_get_endpoints(dev, intf);
+
+ dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
+
+ pdata = (struct lan78xx_priv *)(dev->data[0]);
+ if (!pdata) {
+ netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
+ return -ENOMEM;
+ }
+
+ pdata->dev = dev;
+
+ spin_lock_init(&pdata->rfe_ctl_lock);
+ mutex_init(&pdata->dataport_mutex);
+
+ INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
+
+ for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
+ pdata->vlan_table[i] = 0;
+
+ INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
+
+ dev->net->features = 0;
+
+ if (DEFAULT_TX_CSUM_ENABLE)
+ dev->net->features |= NETIF_F_HW_CSUM;
+
+ if (DEFAULT_RX_CSUM_ENABLE)
+ dev->net->features |= NETIF_F_RXCSUM;
+
+ if (DEFAULT_TSO_CSUM_ENABLE)
+ dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
+
+ dev->net->hw_features = dev->net->features;
+
+ /* Init all registers */
+ ret = lan78xx_reset(dev);
+
+ dev->net->flags |= IFF_MULTICAST;
+
+ pdata->wol = WAKE_MAGIC;
+
+ return 0;
+}
+
+static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
+{
+ struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+
+ if (pdata) {
+ netif_dbg(dev, ifdown, dev->net, "free pdata");
+ kfree(pdata);
+ pdata = NULL;
+ dev->data[0] = 0;
+ }
+}
+
+static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
+ struct sk_buff *skb,
+ u32 rx_cmd_a, u32 rx_cmd_b)
+{
+ if (!(dev->net->features & NETIF_F_RXCSUM) ||
+ unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ } else {
+ skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+}
+
+void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
+{
+ int status;
+
+ if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
+ skb_queue_tail(&dev->rxq_pause, skb);
+ return;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev->net);
+ dev->net->stats.rx_packets++;
+ dev->net->stats.rx_bytes += skb->len;
+
+ netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
+ skb->len + sizeof(struct ethhdr), skb->protocol);
+ memset(skb->cb, 0, sizeof(struct skb_data));
+
+ if (skb_defer_rx_timestamp(skb))
+ return;
+
+ status = netif_rx(skb);
+ if (status != NET_RX_SUCCESS)
+ netif_dbg(dev, rx_err, dev->net,
+ "netif_rx status %d\n", status);
+}
+
+static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
+{
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
+ while (skb->len > 0) {
+ u32 rx_cmd_a, rx_cmd_b, align_count, size;
+ u16 rx_cmd_c;
+ struct sk_buff *skb2;
+ unsigned char *packet;
+
+ memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
+ le32_to_cpus(&rx_cmd_a);
+ skb_pull(skb, sizeof(rx_cmd_a));
+
+ memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
+ le32_to_cpus(&rx_cmd_b);
+ skb_pull(skb, sizeof(rx_cmd_b));
+
+ memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
+ le16_to_cpus(&rx_cmd_c);
+ skb_pull(skb, sizeof(rx_cmd_c));
+
+ packet = skb->data;
+
+ /* get the packet length */
+ size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
+ align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
+
+ if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "Error rx_cmd_a=0x%08x", rx_cmd_a);
+ } else {
+ /* last frame in this batch */
+ if (skb->len == size) {
+ lan78xx_rx_csum_offload(dev, skb,
+ rx_cmd_a, rx_cmd_b);
+
+ skb_trim(skb, skb->len - 4); /* remove fcs */
+ skb->truesize = size + sizeof(struct sk_buff);
+
+ return 1;
+ }
+
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!skb2)) {
+ netdev_warn(dev->net, "Error allocating skb");
+ return 0;
+ }
+
+ skb2->len = size;
+ skb2->data = packet;
+ skb_set_tail_pointer(skb2, size);
+
+ lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
+
+ skb_trim(skb2, skb2->len - 4); /* remove fcs */
+ skb2->truesize = size + sizeof(struct sk_buff);
+
+ lan78xx_skb_return(dev, skb2);
+ }
+
+ skb_pull(skb, size);
+
+ /* padding bytes before the next frame starts */
+ if (skb->len)
+ skb_pull(skb, align_count);
+ }
+
+ if (unlikely(skb->len < 0)) {
+ netdev_warn(dev->net, "invalid rx length<0 %d", skb->len);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
+{
+ if (!lan78xx_rx(dev, skb)) {
+ dev->net->stats.rx_errors++;
+ goto done;
+ }
+
+ if (skb->len) {
+ lan78xx_skb_return(dev, skb);
+ return;
+ }
+
+ netif_dbg(dev, rx_err, dev->net, "drop\n");
+ dev->net->stats.rx_errors++;
+done:
+ skb_queue_tail(&dev->done, skb);
+}
+
+static void rx_complete(struct urb *urb);
+
+static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
+{
+ struct sk_buff *skb;
+ struct skb_data *entry;
+ unsigned long lockflags;
+ size_t size = dev->rx_urb_size;
+ int ret = 0;
+
+ skb = netdev_alloc_skb_ip_align(dev->net, size);
+ if (!skb) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ entry = (struct skb_data *)skb->cb;
+ entry->urb = urb;
+ entry->dev = dev;
+ entry->length = 0;
+
+ usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
+ skb->data, size, rx_complete, skb);
+
+ spin_lock_irqsave(&dev->rxq.lock, lockflags);
+
+ if (netif_device_present(dev->net) &&
+ netif_running(dev->net) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags) &&
+ !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ switch (ret) {
+ case 0:
+ lan78xx_queue_skb(&dev->rxq, skb, rx_start);
+ break;
+ case -EPIPE:
+ lan78xx_defer_kevent(dev, EVENT_RX_HALT);
+ break;
+ case -ENODEV:
+ netif_dbg(dev, ifdown, dev->net, "device gone\n");
+ netif_device_detach(dev->net);
+ break;
+ case -EHOSTUNREACH:
+ ret = -ENOLINK;
+ break;
+ default:
+ netif_dbg(dev, rx_err, dev->net,
+ "rx submit, %d\n", ret);
+ tasklet_schedule(&dev->bh);
+ }
+ } else {
+ netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
+ ret = -ENOLINK;
+ }
+ spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ usb_free_urb(urb);
+ }
+ return ret;
+}
+
+static void rx_complete(struct urb *urb)
+{
+ struct sk_buff *skb = (struct sk_buff *)urb->context;
+ struct skb_data *entry = (struct skb_data *)skb->cb;
+ struct lan78xx_net *dev = entry->dev;
+ int urb_status = urb->status;
+ enum skb_state state;
+
+ skb_put(skb, urb->actual_length);
+ state = rx_done;
+ entry->urb = NULL;
+
+ switch (urb_status) {
+ case 0:
+ if (skb->len < dev->net->hard_header_len) {
+ state = rx_cleanup;
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_length_errors++;
+ netif_dbg(dev, rx_err, dev->net,
+ "rx length %d\n", skb->len);
+ }
+ usb_mark_last_busy(dev->udev);
+ break;
+ case -EPIPE:
+ dev->net->stats.rx_errors++;
+ lan78xx_defer_kevent(dev, EVENT_RX_HALT);
+ /* FALLTHROUGH */
+ case -ECONNRESET: /* async unlink */
+ case -ESHUTDOWN: /* hardware gone */
+ netif_dbg(dev, ifdown, dev->net,
+ "rx shutdown, code %d\n", urb_status);
+ state = rx_cleanup;
+ entry->urb = urb;
+ urb = NULL;
+ break;
+ case -EPROTO:
+ case -ETIME:
+ case -EILSEQ:
+ dev->net->stats.rx_errors++;
+ state = rx_cleanup;
+ entry->urb = urb;
+ urb = NULL;
+ break;
+
+ /* data overrun ... flush fifo? */
+ case -EOVERFLOW:
+ dev->net->stats.rx_over_errors++;
+ /* FALLTHROUGH */
+
+ default:
+ state = rx_cleanup;
+ dev->net->stats.rx_errors++;
+ netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
+ break;
+ }
+
+ state = defer_bh(dev, skb, &dev->rxq, state);
+
+ if (urb) {
+ if (netif_running(dev->net) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags) &&
+ state != unlink_start) {
+ rx_submit(dev, urb, GFP_ATOMIC);
+ return;
+ }
+ usb_free_urb(urb);
+ }
+ netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
+}
+
+static void lan78xx_tx_bh(struct lan78xx_net *dev)
+{
+ int length;
+ struct urb *urb = NULL;
+ struct skb_data *entry;
+ unsigned long flags;
+ struct sk_buff_head *tqp = &dev->txq_pend;
+ struct sk_buff *skb, *skb2;
+ int ret;
+ int count, pos;
+ int skb_totallen, pkt_cnt;
+
+ skb_totallen = 0;
+ pkt_cnt = 0;
+ for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
+ if (skb_is_gso(skb)) {
+ if (pkt_cnt) {
+ /* handle previous packets first */
+ break;
+ }
+ length = skb->len;
+ skb2 = skb_dequeue(tqp);
+ goto gso_skb;
+ }
+
+ if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
+ break;
+ skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
+ pkt_cnt++;
+ }
+
+ /* copy to a single skb */
+ skb = alloc_skb(skb_totallen, GFP_ATOMIC);
+ if (!skb)
+ goto drop;
+
+ skb_put(skb, skb_totallen);
+
+ for (count = pos = 0; count < pkt_cnt; count++) {
+ skb2 = skb_dequeue(tqp);
+ if (skb2) {
+ memcpy(skb->data + pos, skb2->data, skb2->len);
+ pos += roundup(skb2->len, sizeof(u32));
+ dev_kfree_skb(skb2);
+ }
+ }
+
+ length = skb_totallen;
+
+gso_skb:
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ netif_dbg(dev, tx_err, dev->net, "no urb\n");
+ goto drop;
+ }
+
+ entry = (struct skb_data *)skb->cb;
+ entry->urb = urb;
+ entry->dev = dev;
+ entry->length = length;
+
+ spin_lock_irqsave(&dev->txq.lock, flags);
+ ret = usb_autopm_get_interface_async(dev->intf);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&dev->txq.lock, flags);
+ goto drop;
+ }
+
+ usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
+ skb->data, skb->len, tx_complete, skb);
+
+ if (length % dev->maxpacket == 0) {
+ /* send USB_ZERO_PACKET */
+ urb->transfer_flags |= URB_ZERO_PACKET;
+ }
+
+#ifdef CONFIG_PM
+ /* if this triggers the device is still a sleep */
+ if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+ /* transmission will be done in resume */
+ usb_anchor_urb(urb, &dev->deferred);
+ /* no use to process more packets */
+ netif_stop_queue(dev->net);
+ usb_put_urb(urb);
+ spin_unlock_irqrestore(&dev->txq.lock, flags);
+ netdev_dbg(dev->net, "Delaying transmission for resumption\n");
+ return;
+ }
+#endif
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ switch (ret) {
+ case 0:
+ dev->net->trans_start = jiffies;
+ lan78xx_queue_skb(&dev->txq, skb, tx_start);
+ if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
+ netif_stop_queue(dev->net);
+ break;
+ case -EPIPE:
+ netif_stop_queue(dev->net);
+ lan78xx_defer_kevent(dev, EVENT_TX_HALT);
+ usb_autopm_put_interface_async(dev->intf);
+ break;
+ default:
+ usb_autopm_put_interface_async(dev->intf);
+ netif_dbg(dev, tx_err, dev->net,
+ "tx: submit urb err %d\n", ret);
+ break;
+ }
+
+ spin_unlock_irqrestore(&dev->txq.lock, flags);
+
+ if (ret) {
+ netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
+drop:
+ dev->net->stats.tx_dropped++;
+ if (skb)
+ dev_kfree_skb_any(skb);
+ usb_free_urb(urb);
+ } else
+ netif_dbg(dev, tx_queued, dev->net,
+ "> tx, len %d, type 0x%x\n", length, skb->protocol);
+}
+
+static void lan78xx_rx_bh(struct lan78xx_net *dev)
+{
+ struct urb *urb;
+ int i;
+
+ if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
+ for (i = 0; i < 10; i++) {
+ if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
+ break;
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (urb)
+ if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
+ return;
+ }
+
+ if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
+ tasklet_schedule(&dev->bh);
+ }
+ if (skb_queue_len(&dev->txq) < dev->tx_qlen)
+ netif_wake_queue(dev->net);
+}
+
+static void lan78xx_bh(unsigned long param)
+{
+ struct lan78xx_net *dev = (struct lan78xx_net *)param;
+ struct sk_buff *skb;
+ struct skb_data *entry;
+
+ while ((skb = skb_dequeue(&dev->done))) {
+ entry = (struct skb_data *)(skb->cb);
+ switch (entry->state) {
+ case rx_done:
+ entry->state = rx_cleanup;
+ rx_process(dev, skb);
+ continue;
+ case tx_done:
+ usb_free_urb(entry->urb);
+ dev_kfree_skb(skb);
+ continue;
+ case rx_cleanup:
+ usb_free_urb(entry->urb);
+ dev_kfree_skb(skb);
+ continue;
+ default:
+ netdev_dbg(dev->net, "skb state %d\n", entry->state);
+ return;
+ }
+ }
+
+ if (netif_device_present(dev->net) && netif_running(dev->net)) {
+ if (!skb_queue_empty(&dev->txq_pend))
+ lan78xx_tx_bh(dev);
+
+ if (!timer_pending(&dev->delay) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags))
+ lan78xx_rx_bh(dev);
+ }
+}
+
+static void lan78xx_delayedwork(struct work_struct *work)
+{
+ int status;
+ struct lan78xx_net *dev;
+
+ dev = container_of(work, struct lan78xx_net, wq.work);
+
+ if (test_bit(EVENT_TX_HALT, &dev->flags)) {
+ unlink_urbs(dev, &dev->txq);
+ status = usb_autopm_get_interface(dev->intf);
+ if (status < 0)
+ goto fail_pipe;
+ status = usb_clear_halt(dev->udev, dev->pipe_out);
+ usb_autopm_put_interface(dev->intf);
+ if (status < 0 &&
+ status != -EPIPE &&
+ status != -ESHUTDOWN) {
+ if (netif_msg_tx_err(dev))
+fail_pipe:
+ netdev_err(dev->net,
+ "can't clear tx halt, status %d\n",
+ status);
+ } else {
+ clear_bit(EVENT_TX_HALT, &dev->flags);
+ if (status != -ESHUTDOWN)
+ netif_wake_queue(dev->net);
+ }
+ }
+ if (test_bit(EVENT_RX_HALT, &dev->flags)) {
+ unlink_urbs(dev, &dev->rxq);
+ status = usb_autopm_get_interface(dev->intf);
+ if (status < 0)
+ goto fail_halt;
+ status = usb_clear_halt(dev->udev, dev->pipe_in);
+ usb_autopm_put_interface(dev->intf);
+ if (status < 0 &&
+ status != -EPIPE &&
+ status != -ESHUTDOWN) {
+ if (netif_msg_rx_err(dev))
+fail_halt:
+ netdev_err(dev->net,
+ "can't clear rx halt, status %d\n",
+ status);
+ } else {
+ clear_bit(EVENT_RX_HALT, &dev->flags);
+ tasklet_schedule(&dev->bh);
+ }
+ }
+
+ if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
+ int ret = 0;
+
+ clear_bit(EVENT_LINK_RESET, &dev->flags);
+ status = usb_autopm_get_interface(dev->intf);
+ if (status < 0)
+ goto skip_reset;
+ if (lan78xx_link_reset(dev) < 0) {
+ usb_autopm_put_interface(dev->intf);
+skip_reset:
+ netdev_info(dev->net, "link reset failed (%d)\n",
+ ret);
+ } else {
+ usb_autopm_put_interface(dev->intf);
+ }
+ }
+}
+
+static void intr_complete(struct urb *urb)
+{
+ struct lan78xx_net *dev = urb->context;
+ int status = urb->status;
+
+ switch (status) {
+ /* success */
+ case 0:
+ lan78xx_status(dev, urb);
+ break;
+
+ /* software-driven interface shutdown */
+ case -ENOENT: /* urb killed */
+ case -ESHUTDOWN: /* hardware gone */
+ netif_dbg(dev, ifdown, dev->net,
+ "intr shutdown, code %d\n", status);
+ return;
+
+ /* NOTE: not throttling like RX/TX, since this endpoint
+ * already polls infrequently
+ */
+ default:
+ netdev_dbg(dev->net, "intr status %d\n", status);
+ break;
+ }
+
+ if (!netif_running(dev->net))
+ return;
+
+ memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
+ status = usb_submit_urb(urb, GFP_ATOMIC);
+ if (status != 0)
+ netif_err(dev, timer, dev->net,
+ "intr resubmit --> %d\n", status);
+}
+
+static void lan78xx_disconnect(struct usb_interface *intf)
+{
+ struct lan78xx_net *dev;
+ struct usb_device *udev;
+ struct net_device *net;
+
+ dev = usb_get_intfdata(intf);
+ usb_set_intfdata(intf, NULL);
+ if (!dev)
+ return;
+
+ udev = interface_to_usbdev(intf);
+
+ net = dev->net;
+ unregister_netdev(net);
+
+ cancel_delayed_work_sync(&dev->wq);
+
+ usb_scuttle_anchored_urbs(&dev->deferred);
+
+ lan78xx_unbind(dev, intf);
+
+ usb_kill_urb(dev->urb_intr);
+ usb_free_urb(dev->urb_intr);
+
+ free_netdev(net);
+ usb_put_dev(udev);
+}
+
+void lan78xx_tx_timeout(struct net_device *net)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+
+ unlink_urbs(dev, &dev->txq);
+ tasklet_schedule(&dev->bh);
+}
+
+static const struct net_device_ops lan78xx_netdev_ops = {
+ .ndo_open = lan78xx_open,
+ .ndo_stop = lan78xx_stop,
+ .ndo_start_xmit = lan78xx_start_xmit,
+ .ndo_tx_timeout = lan78xx_tx_timeout,
+ .ndo_change_mtu = lan78xx_change_mtu,
+ .ndo_set_mac_address = lan78xx_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = lan78xx_ioctl,
+ .ndo_set_rx_mode = lan78xx_set_multicast,
+ .ndo_set_features = lan78xx_set_features,
+ .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
+};
+
+static int lan78xx_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct lan78xx_net *dev;
+ struct net_device *netdev;
+ struct usb_device *udev;
+ int ret;
+ unsigned maxp;
+ unsigned period;
+ u8 *buf = NULL;
+
+ udev = interface_to_usbdev(intf);
+ udev = usb_get_dev(udev);
+
+ ret = -ENOMEM;
+ netdev = alloc_etherdev(sizeof(struct lan78xx_net));
+ if (!netdev) {
+ dev_err(&intf->dev, "Error: OOM\n");
+ goto out1;
+ }
+
+ /* netdev_printk() needs this */
+ SET_NETDEV_DEV(netdev, &intf->dev);
+
+ dev = netdev_priv(netdev);
+ dev->udev = udev;
+ dev->intf = intf;
+ dev->net = netdev;
+ dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
+ | NETIF_MSG_PROBE | NETIF_MSG_LINK);
+
+ skb_queue_head_init(&dev->rxq);
+ skb_queue_head_init(&dev->txq);
+ skb_queue_head_init(&dev->done);
+ skb_queue_head_init(&dev->rxq_pause);
+ skb_queue_head_init(&dev->txq_pend);
+ mutex_init(&dev->phy_mutex);
+
+ tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
+ INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
+ init_usb_anchor(&dev->deferred);
+
+ netdev->netdev_ops = &lan78xx_netdev_ops;
+ netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
+ netdev->ethtool_ops = &lan78xx_ethtool_ops;
+
+ ret = lan78xx_bind(dev, intf);
+ if (ret < 0)
+ goto out2;
+ strcpy(netdev->name, "eth%d");
+
+ if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
+ netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
+
+ dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
+ dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
+ dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
+
+ dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
+ dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
+
+ dev->pipe_intr = usb_rcvintpipe(dev->udev,
+ dev->ep_intr->desc.bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK);
+ period = dev->ep_intr->desc.bInterval;
+
+ maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
+ buf = kmalloc(maxp, GFP_KERNEL);
+ if (buf) {
+ dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->urb_intr) {
+ kfree(buf);
+ goto out3;
+ } else {
+ usb_fill_int_urb(dev->urb_intr, dev->udev,
+ dev->pipe_intr, buf, maxp,
+ intr_complete, dev, period);
+ }
+ }
+
+ dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
+
+ /* driver requires remote-wakeup capability during autosuspend. */
+ intf->needs_remote_wakeup = 1;
+
+ ret = register_netdev(netdev);
+ if (ret != 0) {
+ netif_err(dev, probe, netdev, "couldn't register the device\n");
+ goto out2;
+ }
+
+ usb_set_intfdata(intf, dev);
+
+ ret = device_set_wakeup_enable(&udev->dev, true);
+
+ /* Default delay of 2sec has more overhead than advantage.
+ * Set to 10sec as default.
+ */
+ pm_runtime_set_autosuspend_delay(&udev->dev,
+ DEFAULT_AUTOSUSPEND_DELAY);
+
+ return 0;
+
+out3:
+ lan78xx_unbind(dev, intf);
+out2:
+ free_netdev(netdev);
+out1:
+ usb_put_dev(udev);
+
+ return ret;
+}
+
+static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
+{
+ const u16 crc16poly = 0x8005;
+ int i;
+ u16 bit, crc, msb;
+ u8 data;
+
+ crc = 0xFFFF;
+ for (i = 0; i < len; i++) {
+ data = *buf++;
+ for (bit = 0; bit < 8; bit++) {
+ msb = crc >> 15;
+ crc <<= 1;
+
+ if (msb ^ (u16)(data & 1)) {
+ crc ^= crc16poly;
+ crc |= (u16)0x0001U;
+ }
+ data >>= 1;
+ }
+ }
+
+ return crc;
+}
+
+static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
+{
+ u32 buf;
+ int ret;
+ int mask_index;
+ u16 crc;
+ u32 temp_wucsr;
+ u32 temp_pmt_ctl;
+ const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
+ const u8 ipv6_multicast[3] = { 0x33, 0x33 };
+ const u8 arp_type[2] = { 0x08, 0x06 };
+
+ ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+ buf &= ~MAC_TX_TXEN_;
+ ret = lan78xx_write_reg(dev, MAC_TX, buf);
+ ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ buf &= ~MAC_RX_RXEN_;
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+ ret = lan78xx_write_reg(dev, WUCSR, 0);
+ ret = lan78xx_write_reg(dev, WUCSR2, 0);
+ ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+
+ temp_wucsr = 0;
+
+ temp_pmt_ctl = 0;
+ ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
+ temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
+ temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
+
+ for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
+ ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
+
+ mask_index = 0;
+ if (wol & WAKE_PHY) {
+ temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
+
+ temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+ temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+ temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+ }
+ if (wol & WAKE_MAGIC) {
+ temp_wucsr |= WUCSR_MPEN_;
+
+ temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+ temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+ temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
+ }
+ if (wol & WAKE_BCAST) {
+ temp_wucsr |= WUCSR_BCST_EN_;
+
+ temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+ temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+ temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+ }
+ if (wol & WAKE_MCAST) {
+ temp_wucsr |= WUCSR_WAKE_EN_;
+
+ /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
+ crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
+ ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+ WUF_CFGX_EN_ |
+ WUF_CFGX_TYPE_MCAST_ |
+ (0 << WUF_CFGX_OFFSET_SHIFT_) |
+ (crc & WUF_CFGX_CRC16_MASK_));
+
+ ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
+ ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+ ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+ ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+ mask_index++;
+
+ /* for IPv6 Multicast */
+ crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
+ ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+ WUF_CFGX_EN_ |
+ WUF_CFGX_TYPE_MCAST_ |
+ (0 << WUF_CFGX_OFFSET_SHIFT_) |
+ (crc & WUF_CFGX_CRC16_MASK_));
+
+ ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
+ ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+ ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+ ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+ mask_index++;
+
+ temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+ temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+ temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+ }
+ if (wol & WAKE_UCAST) {
+ temp_wucsr |= WUCSR_PFDA_EN_;
+
+ temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+ temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+ temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+ }
+ if (wol & WAKE_ARP) {
+ temp_wucsr |= WUCSR_WAKE_EN_;
+
+ /* set WUF_CFG & WUF_MASK
+ * for packettype (offset 12,13) = ARP (0x0806)
+ */
+ crc = lan78xx_wakeframe_crc16(arp_type, 2);
+ ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+ WUF_CFGX_EN_ |
+ WUF_CFGX_TYPE_ALL_ |
+ (0 << WUF_CFGX_OFFSET_SHIFT_) |
+ (crc & WUF_CFGX_CRC16_MASK_));
+
+ ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
+ ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+ ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+ ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+ mask_index++;
+
+ temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+ temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+ temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+ }
+
+ ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
+
+ /* when multiple WOL bits are set */
+ if (hweight_long((unsigned long)wol) > 1) {
+ temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+ temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+ temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+ }
+ ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
+
+ /* clear WUPS */
+ ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+ buf |= PMT_CTL_WUPS_MASK_;
+ ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+ ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ buf |= MAC_RX_RXEN_;
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+ return 0;
+}
+
+int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct lan78xx_net *dev = usb_get_intfdata(intf);
+ struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+ u32 buf;
+ int ret;
+ int event;
+
+ ret = 0;
+ event = message.event;
+
+ if (!dev->suspend_count++) {
+ spin_lock_irq(&dev->txq.lock);
+ /* don't autosuspend while transmitting */
+ if ((skb_queue_len(&dev->txq) ||
+ skb_queue_len(&dev->txq_pend)) &&
+ PMSG_IS_AUTO(message)) {
+ spin_unlock_irq(&dev->txq.lock);
+ ret = -EBUSY;
+ goto out;
+ } else {
+ set_bit(EVENT_DEV_ASLEEP, &dev->flags);
+ spin_unlock_irq(&dev->txq.lock);
+ }
+
+ /* stop TX & RX */
+ ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+ buf &= ~MAC_TX_TXEN_;
+ ret = lan78xx_write_reg(dev, MAC_TX, buf);
+ ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ buf &= ~MAC_RX_RXEN_;
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+ /* empty out the rx and queues */
+ netif_device_detach(dev->net);
+ lan78xx_terminate_urbs(dev);
+ usb_kill_urb(dev->urb_intr);
+
+ /* reattach */
+ netif_device_attach(dev->net);
+ }
+
+ if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+ if (PMSG_IS_AUTO(message)) {
+ /* auto suspend (selective suspend) */
+ ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+ buf &= ~MAC_TX_TXEN_;
+ ret = lan78xx_write_reg(dev, MAC_TX, buf);
+ ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ buf &= ~MAC_RX_RXEN_;
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+ ret = lan78xx_write_reg(dev, WUCSR, 0);
+ ret = lan78xx_write_reg(dev, WUCSR2, 0);
+ ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+
+ /* set goodframe wakeup */
+ ret = lan78xx_read_reg(dev, WUCSR, &buf);
+
+ buf |= WUCSR_RFE_WAKE_EN_;
+ buf |= WUCSR_STORE_WAKE_;
+
+ ret = lan78xx_write_reg(dev, WUCSR, buf);
+
+ ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+
+ buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
+ buf |= PMT_CTL_RES_CLR_WKP_STS_;
+
+ buf |= PMT_CTL_PHY_WAKE_EN_;
+ buf |= PMT_CTL_WOL_EN_;
+ buf &= ~PMT_CTL_SUS_MODE_MASK_;
+ buf |= PMT_CTL_SUS_MODE_3_;
+
+ ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+ ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+
+ buf |= PMT_CTL_WUPS_MASK_;
+
+ ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+ ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ buf |= MAC_RX_RXEN_;
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ } else {
+ lan78xx_set_suspend(dev, pdata->wol);
+ }
+ }
+
+out:
+ return ret;
+}
+
+int lan78xx_resume(struct usb_interface *intf)
+{
+ struct lan78xx_net *dev = usb_get_intfdata(intf);
+ struct sk_buff *skb;
+ struct urb *res;
+ int ret;
+ u32 buf;
+
+ if (!--dev->suspend_count) {
+ /* resume interrupt URBs */
+ if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
+ usb_submit_urb(dev->urb_intr, GFP_NOIO);
+
+ spin_lock_irq(&dev->txq.lock);
+ while ((res = usb_get_from_anchor(&dev->deferred))) {
+ skb = (struct sk_buff *)res->context;
+ ret = usb_submit_urb(res, GFP_ATOMIC);
+ if (ret < 0) {
+ dev_kfree_skb_any(skb);
+ usb_free_urb(res);
+ usb_autopm_put_interface_async(dev->intf);
+ } else {
+ dev->net->trans_start = jiffies;
+ lan78xx_queue_skb(&dev->txq, skb, tx_start);
+ }
+ }
+
+ clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
+ spin_unlock_irq(&dev->txq.lock);
+
+ if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+ if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
+ netif_start_queue(dev->net);
+ tasklet_schedule(&dev->bh);
+ }
+ }
+
+ ret = lan78xx_write_reg(dev, WUCSR2, 0);
+ ret = lan78xx_write_reg(dev, WUCSR, 0);
+ ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+
+ ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
+ WUCSR2_ARP_RCD_ |
+ WUCSR2_IPV6_TCPSYN_RCD_ |
+ WUCSR2_IPV4_TCPSYN_RCD_);
+
+ ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
+ WUCSR_EEE_RX_WAKE_ |
+ WUCSR_PFDA_FR_ |
+ WUCSR_RFE_WAKE_FR_ |
+ WUCSR_WUFR_ |
+ WUCSR_MPR_ |
+ WUCSR_BCST_FR_);
+
+ ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+ buf |= MAC_TX_TXEN_;
+ ret = lan78xx_write_reg(dev, MAC_TX, buf);
+
+ return 0;
+}
+
+int lan78xx_reset_resume(struct usb_interface *intf)
+{
+ struct lan78xx_net *dev = usb_get_intfdata(intf);
+
+ lan78xx_reset(dev);
+ return lan78xx_resume(intf);
+}
+
+static const struct usb_device_id products[] = {
+ {
+ /* LAN7800 USB Gigabit Ethernet Device */
+ USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
+ },
+ {
+ /* LAN7850 USB Gigabit Ethernet Device */
+ USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver lan78xx_driver = {
+ .name = DRIVER_NAME,
+ .id_table = products,
+ .probe = lan78xx_probe,
+ .disconnect = lan78xx_disconnect,
+ .suspend = lan78xx_suspend,
+ .resume = lan78xx_resume,
+ .reset_resume = lan78xx_reset_resume,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(lan78xx_driver);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/lan78xx.h b/drivers/net/usb/lan78xx.h
new file mode 100644
index 000000000000..ae7562ee72ad
--- /dev/null
+++ b/drivers/net/usb/lan78xx.h
@@ -0,0 +1,1069 @@
+/*
+ * Copyright (C) 2015 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _LAN78XX_H
+#define _LAN78XX_H
+
+/* USB Vendor Requests */
+#define USB_VENDOR_REQUEST_WRITE_REGISTER 0xA0
+#define USB_VENDOR_REQUEST_READ_REGISTER 0xA1
+#define USB_VENDOR_REQUEST_GET_STATS 0xA2
+
+/* Interrupt Endpoint status word bitfields */
+#define INT_ENP_EEE_START_TX_LPI_INT BIT(26)
+#define INT_ENP_EEE_STOP_TX_LPI_INT BIT(25)
+#define INT_ENP_EEE_RX_LPI_INT BIT(24)
+#define INT_ENP_RDFO_INT BIT(22)
+#define INT_ENP_TXE_INT BIT(21)
+#define INT_ENP_TX_DIS_INT BIT(19)
+#define INT_ENP_RX_DIS_INT BIT(18)
+#define INT_ENP_PHY_INT BIT(17)
+#define INT_ENP_DP_INT BIT(16)
+#define INT_ENP_MAC_ERR_INT BIT(15)
+#define INT_ENP_TDFU_INT BIT(14)
+#define INT_ENP_TDFO_INT BIT(13)
+#define INT_ENP_UTX_FP_INT BIT(12)
+
+#define TX_PKT_ALIGNMENT 4
+#define RX_PKT_ALIGNMENT 4
+
+/* Tx Command A */
+#define TX_CMD_A_IGE_ (0x20000000)
+#define TX_CMD_A_ICE_ (0x10000000)
+#define TX_CMD_A_LSO_ (0x08000000)
+#define TX_CMD_A_IPE_ (0x04000000)
+#define TX_CMD_A_TPE_ (0x02000000)
+#define TX_CMD_A_IVTG_ (0x01000000)
+#define TX_CMD_A_RVTG_ (0x00800000)
+#define TX_CMD_A_FCS_ (0x00400000)
+#define TX_CMD_A_LEN_MASK_ (0x000FFFFF)
+
+/* Tx Command B */
+#define TX_CMD_B_MSS_SHIFT_ (16)
+#define TX_CMD_B_MSS_MASK_ (0x3FFF0000)
+#define TX_CMD_B_MSS_MIN_ ((unsigned short)8)
+#define TX_CMD_B_VTAG_MASK_ (0x0000FFFF)
+#define TX_CMD_B_VTAG_PRI_MASK_ (0x0000E000)
+#define TX_CMD_B_VTAG_CFI_MASK_ (0x00001000)
+#define TX_CMD_B_VTAG_VID_MASK_ (0x00000FFF)
+
+/* Rx Command A */
+#define RX_CMD_A_ICE_ (0x80000000)
+#define RX_CMD_A_TCE_ (0x40000000)
+#define RX_CMD_A_CSE_MASK_ (0xC0000000)
+#define RX_CMD_A_IPV_ (0x20000000)
+#define RX_CMD_A_PID_MASK_ (0x18000000)
+#define RX_CMD_A_PID_NONE_IP_ (0x00000000)
+#define RX_CMD_A_PID_TCP_IP_ (0x08000000)
+#define RX_CMD_A_PID_UDP_IP_ (0x10000000)
+#define RX_CMD_A_PID_IP_ (0x18000000)
+#define RX_CMD_A_PFF_ (0x04000000)
+#define RX_CMD_A_BAM_ (0x02000000)
+#define RX_CMD_A_MAM_ (0x01000000)
+#define RX_CMD_A_FVTG_ (0x00800000)
+#define RX_CMD_A_RED_ (0x00400000)
+#define RX_CMD_A_RX_ERRS_MASK_ (0xC03F0000)
+#define RX_CMD_A_RWT_ (0x00200000)
+#define RX_CMD_A_RUNT_ (0x00100000)
+#define RX_CMD_A_LONG_ (0x00080000)
+#define RX_CMD_A_RXE_ (0x00040000)
+#define RX_CMD_A_DRB_ (0x00020000)
+#define RX_CMD_A_FCS_ (0x00010000)
+#define RX_CMD_A_UAM_ (0x00008000)
+#define RX_CMD_A_ICSM_ (0x00004000)
+#define RX_CMD_A_LEN_MASK_ (0x00003FFF)
+
+/* Rx Command B */
+#define RX_CMD_B_CSUM_SHIFT_ (16)
+#define RX_CMD_B_CSUM_MASK_ (0xFFFF0000)
+#define RX_CMD_B_VTAG_MASK_ (0x0000FFFF)
+#define RX_CMD_B_VTAG_PRI_MASK_ (0x0000E000)
+#define RX_CMD_B_VTAG_CFI_MASK_ (0x00001000)
+#define RX_CMD_B_VTAG_VID_MASK_ (0x00000FFF)
+
+/* Rx Command C */
+#define RX_CMD_C_WAKE_SHIFT_ (15)
+#define RX_CMD_C_WAKE_ (0x8000)
+#define RX_CMD_C_REF_FAIL_SHIFT_ (14)
+#define RX_CMD_C_REF_FAIL_ (0x4000)
+
+/* SCSRs */
+#define NUMBER_OF_REGS (193)
+
+#define ID_REV (0x00)
+#define ID_REV_CHIP_ID_MASK_ (0xFFFF0000)
+#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
+#define ID_REV_CHIP_ID_7800_ (0x7800)
+
+#define FPGA_REV (0x04)
+#define FPGA_REV_MINOR_MASK_ (0x0000FF00)
+#define FPGA_REV_MAJOR_MASK_ (0x000000FF)
+
+#define INT_STS (0x0C)
+#define INT_STS_CLEAR_ALL_ (0xFFFFFFFF)
+#define INT_STS_EEE_TX_LPI_STRT_ (0x04000000)
+#define INT_STS_EEE_TX_LPI_STOP_ (0x02000000)
+#define INT_STS_EEE_RX_LPI_ (0x01000000)
+#define INT_STS_RDFO_ (0x00400000)
+#define INT_STS_TXE_ (0x00200000)
+#define INT_STS_TX_DIS_ (0x00080000)
+#define INT_STS_RX_DIS_ (0x00040000)
+#define INT_STS_PHY_INT_ (0x00020000)
+#define INT_STS_DP_INT_ (0x00010000)
+#define INT_STS_MAC_ERR_ (0x00008000)
+#define INT_STS_TDFU_ (0x00004000)
+#define INT_STS_TDFO_ (0x00002000)
+#define INT_STS_UFX_FP_ (0x00001000)
+#define INT_STS_GPIO_MASK_ (0x00000FFF)
+#define INT_STS_GPIO11_ (0x00000800)
+#define INT_STS_GPIO10_ (0x00000400)
+#define INT_STS_GPIO9_ (0x00000200)
+#define INT_STS_GPIO8_ (0x00000100)
+#define INT_STS_GPIO7_ (0x00000080)
+#define INT_STS_GPIO6_ (0x00000040)
+#define INT_STS_GPIO5_ (0x00000020)
+#define INT_STS_GPIO4_ (0x00000010)
+#define INT_STS_GPIO3_ (0x00000008)
+#define INT_STS_GPIO2_ (0x00000004)
+#define INT_STS_GPIO1_ (0x00000002)
+#define INT_STS_GPIO0_ (0x00000001)
+
+#define HW_CFG (0x010)
+#define HW_CFG_CLK125_EN_ (0x02000000)
+#define HW_CFG_REFCLK25_EN_ (0x01000000)
+#define HW_CFG_LED3_EN_ (0x00800000)
+#define HW_CFG_LED2_EN_ (0x00400000)
+#define HW_CFG_LED1_EN_ (0x00200000)
+#define HW_CFG_LED0_EN_ (0x00100000)
+#define HW_CFG_EEE_PHY_LUSU_ (0x00020000)
+#define HW_CFG_EEE_TSU_ (0x00010000)
+#define HW_CFG_NETDET_STS_ (0x00008000)
+#define HW_CFG_NETDET_EN_ (0x00004000)
+#define HW_CFG_EEM_ (0x00002000)
+#define HW_CFG_RST_PROTECT_ (0x00001000)
+#define HW_CFG_CONNECT_BUF_ (0x00000400)
+#define HW_CFG_CONNECT_EN_ (0x00000200)
+#define HW_CFG_CONNECT_POL_ (0x00000100)
+#define HW_CFG_SUSPEND_N_SEL_MASK_ (0x000000C0)
+#define HW_CFG_SUSPEND_N_SEL_2 (0x00000000)
+#define HW_CFG_SUSPEND_N_SEL_12N (0x00000040)
+#define HW_CFG_SUSPEND_N_SEL_012N (0x00000080)
+#define HW_CFG_SUSPEND_N_SEL_0123N (0x000000C0)
+#define HW_CFG_SUSPEND_N_POL_ (0x00000020)
+#define HW_CFG_MEF_ (0x00000010)
+#define HW_CFG_ETC_ (0x00000008)
+#define HW_CFG_LRST_ (0x00000002)
+#define HW_CFG_SRST_ (0x00000001)
+
+#define PMT_CTL (0x014)
+#define PMT_CTL_EEE_WAKEUP_EN_ (0x00002000)
+#define PMT_CTL_EEE_WUPS_ (0x00001000)
+#define PMT_CTL_MAC_SRST_ (0x00000800)
+#define PMT_CTL_PHY_PWRUP_ (0x00000400)
+#define PMT_CTL_RES_CLR_WKP_MASK_ (0x00000300)
+#define PMT_CTL_RES_CLR_WKP_STS_ (0x00000200)
+#define PMT_CTL_RES_CLR_WKP_EN_ (0x00000100)
+#define PMT_CTL_READY_ (0x00000080)
+#define PMT_CTL_SUS_MODE_MASK_ (0x00000060)
+#define PMT_CTL_SUS_MODE_0_ (0x00000000)
+#define PMT_CTL_SUS_MODE_1_ (0x00000020)
+#define PMT_CTL_SUS_MODE_2_ (0x00000040)
+#define PMT_CTL_SUS_MODE_3_ (0x00000060)
+#define PMT_CTL_PHY_RST_ (0x00000010)
+#define PMT_CTL_WOL_EN_ (0x00000008)
+#define PMT_CTL_PHY_WAKE_EN_ (0x00000004)
+#define PMT_CTL_WUPS_MASK_ (0x00000003)
+#define PMT_CTL_WUPS_MLT_ (0x00000003)
+#define PMT_CTL_WUPS_MAC_ (0x00000002)
+#define PMT_CTL_WUPS_PHY_ (0x00000001)
+
+#define GPIO_CFG0 (0x018)
+#define GPIO_CFG0_GPIOEN_MASK_ (0x0000F000)
+#define GPIO_CFG0_GPIOEN3_ (0x00008000)
+#define GPIO_CFG0_GPIOEN2_ (0x00004000)
+#define GPIO_CFG0_GPIOEN1_ (0x00002000)
+#define GPIO_CFG0_GPIOEN0_ (0x00001000)
+#define GPIO_CFG0_GPIOBUF_MASK_ (0x00000F00)
+#define GPIO_CFG0_GPIOBUF3_ (0x00000800)
+#define GPIO_CFG0_GPIOBUF2_ (0x00000400)
+#define GPIO_CFG0_GPIOBUF1_ (0x00000200)
+#define GPIO_CFG0_GPIOBUF0_ (0x00000100)
+#define GPIO_CFG0_GPIODIR_MASK_ (0x000000F0)
+#define GPIO_CFG0_GPIODIR3_ (0x00000080)
+#define GPIO_CFG0_GPIODIR2_ (0x00000040)
+#define GPIO_CFG0_GPIODIR1_ (0x00000020)
+#define GPIO_CFG0_GPIODIR0_ (0x00000010)
+#define GPIO_CFG0_GPIOD_MASK_ (0x0000000F)
+#define GPIO_CFG0_GPIOD3_ (0x00000008)
+#define GPIO_CFG0_GPIOD2_ (0x00000004)
+#define GPIO_CFG0_GPIOD1_ (0x00000002)
+#define GPIO_CFG0_GPIOD0_ (0x00000001)
+
+#define GPIO_CFG1 (0x01C)
+#define GPIO_CFG1_GPIOEN_MASK_ (0xFF000000)
+#define GPIO_CFG1_GPIOEN11_ (0x80000000)
+#define GPIO_CFG1_GPIOEN10_ (0x40000000)
+#define GPIO_CFG1_GPIOEN9_ (0x20000000)
+#define GPIO_CFG1_GPIOEN8_ (0x10000000)
+#define GPIO_CFG1_GPIOEN7_ (0x08000000)
+#define GPIO_CFG1_GPIOEN6_ (0x04000000)
+#define GPIO_CFG1_GPIOEN5_ (0x02000000)
+#define GPIO_CFG1_GPIOEN4_ (0x01000000)
+#define GPIO_CFG1_GPIOBUF_MASK_ (0x00FF0000)
+#define GPIO_CFG1_GPIOBUF11_ (0x00800000)
+#define GPIO_CFG1_GPIOBUF10_ (0x00400000)
+#define GPIO_CFG1_GPIOBUF9_ (0x00200000)
+#define GPIO_CFG1_GPIOBUF8_ (0x00100000)
+#define GPIO_CFG1_GPIOBUF7_ (0x00080000)
+#define GPIO_CFG1_GPIOBUF6_ (0x00040000)
+#define GPIO_CFG1_GPIOBUF5_ (0x00020000)
+#define GPIO_CFG1_GPIOBUF4_ (0x00010000)
+#define GPIO_CFG1_GPIODIR_MASK_ (0x0000FF00)
+#define GPIO_CFG1_GPIODIR11_ (0x00008000)
+#define GPIO_CFG1_GPIODIR10_ (0x00004000)
+#define GPIO_CFG1_GPIODIR9_ (0x00002000)
+#define GPIO_CFG1_GPIODIR8_ (0x00001000)
+#define GPIO_CFG1_GPIODIR7_ (0x00000800)
+#define GPIO_CFG1_GPIODIR6_ (0x00000400)
+#define GPIO_CFG1_GPIODIR5_ (0x00000200)
+#define GPIO_CFG1_GPIODIR4_ (0x00000100)
+#define GPIO_CFG1_GPIOD_MASK_ (0x000000FF)
+#define GPIO_CFG1_GPIOD11_ (0x00000080)
+#define GPIO_CFG1_GPIOD10_ (0x00000040)
+#define GPIO_CFG1_GPIOD9_ (0x00000020)
+#define GPIO_CFG1_GPIOD8_ (0x00000010)
+#define GPIO_CFG1_GPIOD7_ (0x00000008)
+#define GPIO_CFG1_GPIOD6_ (0x00000004)
+#define GPIO_CFG1_GPIOD6_ (0x00000004)
+#define GPIO_CFG1_GPIOD5_ (0x00000002)
+#define GPIO_CFG1_GPIOD4_ (0x00000001)
+
+#define GPIO_WAKE (0x020)
+#define GPIO_WAKE_GPIOPOL_MASK_ (0x0FFF0000)
+#define GPIO_WAKE_GPIOPOL11_ (0x08000000)
+#define GPIO_WAKE_GPIOPOL10_ (0x04000000)
+#define GPIO_WAKE_GPIOPOL9_ (0x02000000)
+#define GPIO_WAKE_GPIOPOL8_ (0x01000000)
+#define GPIO_WAKE_GPIOPOL7_ (0x00800000)
+#define GPIO_WAKE_GPIOPOL6_ (0x00400000)
+#define GPIO_WAKE_GPIOPOL5_ (0x00200000)
+#define GPIO_WAKE_GPIOPOL4_ (0x00100000)
+#define GPIO_WAKE_GPIOPOL3_ (0x00080000)
+#define GPIO_WAKE_GPIOPOL2_ (0x00040000)
+#define GPIO_WAKE_GPIOPOL1_ (0x00020000)
+#define GPIO_WAKE_GPIOPOL0_ (0x00010000)
+#define GPIO_WAKE_GPIOWK_MASK_ (0x00000FFF)
+#define GPIO_WAKE_GPIOWK11_ (0x00000800)
+#define GPIO_WAKE_GPIOWK10_ (0x00000400)
+#define GPIO_WAKE_GPIOWK9_ (0x00000200)
+#define GPIO_WAKE_GPIOWK8_ (0x00000100)
+#define GPIO_WAKE_GPIOWK7_ (0x00000080)
+#define GPIO_WAKE_GPIOWK6_ (0x00000040)
+#define GPIO_WAKE_GPIOWK5_ (0x00000020)
+#define GPIO_WAKE_GPIOWK4_ (0x00000010)
+#define GPIO_WAKE_GPIOWK3_ (0x00000008)
+#define GPIO_WAKE_GPIOWK2_ (0x00000004)
+#define GPIO_WAKE_GPIOWK1_ (0x00000002)
+#define GPIO_WAKE_GPIOWK0_ (0x00000001)
+
+#define DP_SEL (0x024)
+#define DP_SEL_DPRDY_ (0x80000000)
+#define DP_SEL_RSEL_MASK_ (0x0000000F)
+#define DP_SEL_RSEL_USB_PHY_CSRS_ (0x0000000F)
+#define DP_SEL_RSEL_OTP_64BIT_ (0x00000009)
+#define DP_SEL_RSEL_OTP_8BIT_ (0x00000008)
+#define DP_SEL_RSEL_UTX_BUF_RAM_ (0x00000007)
+#define DP_SEL_RSEL_DESC_RAM_ (0x00000005)
+#define DP_SEL_RSEL_TXFIFO_ (0x00000004)
+#define DP_SEL_RSEL_RXFIFO_ (0x00000003)
+#define DP_SEL_RSEL_LSO_ (0x00000002)
+#define DP_SEL_RSEL_VLAN_DA_ (0x00000001)
+#define DP_SEL_RSEL_URXBUF_ (0x00000000)
+#define DP_SEL_VHF_HASH_LEN (16)
+#define DP_SEL_VHF_VLAN_LEN (128)
+
+#define DP_CMD (0x028)
+#define DP_CMD_WRITE_ (0x00000001)
+#define DP_CMD_READ_ (0x00000000)
+
+#define DP_ADDR (0x02C)
+#define DP_ADDR_MASK_ (0x00003FFF)
+
+#define DP_DATA (0x030)
+
+#define E2P_CMD (0x040)
+#define E2P_CMD_EPC_BUSY_ (0x80000000)
+#define E2P_CMD_EPC_CMD_MASK_ (0x70000000)
+#define E2P_CMD_EPC_CMD_RELOAD_ (0x70000000)
+#define E2P_CMD_EPC_CMD_ERAL_ (0x60000000)
+#define E2P_CMD_EPC_CMD_ERASE_ (0x50000000)
+#define E2P_CMD_EPC_CMD_WRAL_ (0x40000000)
+#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000)
+#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000)
+#define E2P_CMD_EPC_CMD_EWDS_ (0x10000000)
+#define E2P_CMD_EPC_CMD_READ_ (0x00000000)
+#define E2P_CMD_EPC_TIMEOUT_ (0x00000400)
+#define E2P_CMD_EPC_DL_ (0x00000200)
+#define E2P_CMD_EPC_ADDR_MASK_ (0x000001FF)
+
+#define E2P_DATA (0x044)
+#define E2P_DATA_EEPROM_DATA_MASK_ (0x000000FF)
+
+#define BOS_ATTR (0x050)
+#define BOS_ATTR_BLOCK_SIZE_MASK_ (0x000000FF)
+
+#define SS_ATTR (0x054)
+#define SS_ATTR_POLL_INT_MASK_ (0x00FF0000)
+#define SS_ATTR_DEV_DESC_SIZE_MASK_ (0x0000FF00)
+#define SS_ATTR_CFG_BLK_SIZE_MASK_ (0x000000FF)
+
+#define HS_ATTR (0x058)
+#define HS_ATTR_POLL_INT_MASK_ (0x00FF0000)
+#define HS_ATTR_DEV_DESC_SIZE_MASK_ (0x0000FF00)
+#define HS_ATTR_CFG_BLK_SIZE_MASK_ (0x000000FF)
+
+#define FS_ATTR (0x05C)
+#define FS_ATTR_POLL_INT_MASK_ (0x00FF0000)
+#define FS_ATTR_DEV_DESC_SIZE_MASK_ (0x0000FF00)
+#define FS_ATTR_CFG_BLK_SIZE_MASK_ (0x000000FF)
+
+#define STR_ATTR0 (0x060)
+#define STR_ATTR0_CFGSTR_DESC_SIZE_MASK_ (0xFF000000)
+#define STR_ATTR0_SERSTR_DESC_SIZE_MASK_ (0x00FF0000)
+#define STR_ATTR0_PRODSTR_DESC_SIZE_MASK_ (0x0000FF00)
+#define STR_ATTR0_MANUF_DESC_SIZE_MASK_ (0x000000FF)
+
+#define STR_ATTR1 (0x064)
+#define STR_ATTR1_INTSTR_DESC_SIZE_MASK_ (0x000000FF)
+
+#define STR_FLAG_ATTR (0x068)
+#define STR_FLAG_ATTR_PME_FLAGS_MASK_ (0x000000FF)
+
+#define USB_CFG0 (0x080)
+#define USB_CFG_LPM_RESPONSE_ (0x80000000)
+#define USB_CFG_LPM_CAPABILITY_ (0x40000000)
+#define USB_CFG_LPM_ENBL_SLPM_ (0x20000000)
+#define USB_CFG_HIRD_THR_MASK_ (0x1F000000)
+#define USB_CFG_HIRD_THR_960_ (0x1C000000)
+#define USB_CFG_HIRD_THR_885_ (0x1B000000)
+#define USB_CFG_HIRD_THR_810_ (0x1A000000)
+#define USB_CFG_HIRD_THR_735_ (0x19000000)
+#define USB_CFG_HIRD_THR_660_ (0x18000000)
+#define USB_CFG_HIRD_THR_585_ (0x17000000)
+#define USB_CFG_HIRD_THR_510_ (0x16000000)
+#define USB_CFG_HIRD_THR_435_ (0x15000000)
+#define USB_CFG_HIRD_THR_360_ (0x14000000)
+#define USB_CFG_HIRD_THR_285_ (0x13000000)
+#define USB_CFG_HIRD_THR_210_ (0x12000000)
+#define USB_CFG_HIRD_THR_135_ (0x11000000)
+#define USB_CFG_HIRD_THR_60_ (0x10000000)
+#define USB_CFG_MAX_BURST_BI_MASK_ (0x00F00000)
+#define USB_CFG_MAX_BURST_BO_MASK_ (0x000F0000)
+#define USB_CFG_MAX_DEV_SPEED_MASK_ (0x0000E000)
+#define USB_CFG_MAX_DEV_SPEED_SS_ (0x00008000)
+#define USB_CFG_MAX_DEV_SPEED_HS_ (0x00000000)
+#define USB_CFG_MAX_DEV_SPEED_FS_ (0x00002000)
+#define USB_CFG_PHY_BOOST_MASK_ (0x00000180)
+#define USB_CFG_PHY_BOOST_PLUS_12_ (0x00000180)
+#define USB_CFG_PHY_BOOST_PLUS_8_ (0x00000100)
+#define USB_CFG_PHY_BOOST_PLUS_4_ (0x00000080)
+#define USB_CFG_PHY_BOOST_NORMAL_ (0x00000000)
+#define USB_CFG_BIR_ (0x00000040)
+#define USB_CFG_BCE_ (0x00000020)
+#define USB_CFG_PORT_SWAP_ (0x00000010)
+#define USB_CFG_LPM_EN_ (0x00000008)
+#define USB_CFG_RMT_WKP_ (0x00000004)
+#define USB_CFG_PWR_SEL_ (0x00000002)
+#define USB_CFG_STALL_BO_DIS_ (0x00000001)
+
+#define USB_CFG1 (0x084)
+#define USB_CFG1_U1_TIMEOUT_MASK_ (0xFF000000)
+#define USB_CFG1_U2_TIMEOUT_MASK_ (0x00FF0000)
+#define USB_CFG1_HS_TOUT_CAL_MASK_ (0x0000E000)
+#define USB_CFG1_DEV_U2_INIT_EN_ (0x00001000)
+#define USB_CFG1_DEV_U2_EN_ (0x00000800)
+#define USB_CFG1_DEV_U1_INIT_EN_ (0x00000400)
+#define USB_CFG1_DEV_U1_EN_ (0x00000200)
+#define USB_CFG1_LTM_ENABLE_ (0x00000100)
+#define USB_CFG1_FS_TOUT_CAL_MASK_ (0x00000070)
+#define USB_CFG1_SCALE_DOWN_MASK_ (0x00000003)
+#define USB_CFG1_SCALE_DOWN_MODE3_ (0x00000003)
+#define USB_CFG1_SCALE_DOWN_MODE2_ (0x00000002)
+#define USB_CFG1_SCALE_DOWN_MODE1_ (0x00000001)
+#define USB_CFG1_SCALE_DOWN_MODE0_ (0x00000000)
+
+#define USB_CFG2 (0x088)
+#define USB_CFG2_SS_DETACH_TIME_MASK_ (0xFFFF0000)
+#define USB_CFG2_HS_DETACH_TIME_MASK_ (0x0000FFFF)
+
+#define BURST_CAP (0x090)
+#define BURST_CAP_SIZE_MASK_ (0x000000FF)
+
+#define BULK_IN_DLY (0x094)
+#define BULK_IN_DLY_MASK_ (0x0000FFFF)
+
+#define INT_EP_CTL (0x098)
+#define INT_EP_INTEP_ON_ (0x80000000)
+#define INT_STS_EEE_TX_LPI_STRT_EN_ (0x04000000)
+#define INT_STS_EEE_TX_LPI_STOP_EN_ (0x02000000)
+#define INT_STS_EEE_RX_LPI_EN_ (0x01000000)
+#define INT_EP_RDFO_EN_ (0x00400000)
+#define INT_EP_TXE_EN_ (0x00200000)
+#define INT_EP_TX_DIS_EN_ (0x00080000)
+#define INT_EP_RX_DIS_EN_ (0x00040000)
+#define INT_EP_PHY_INT_EN_ (0x00020000)
+#define INT_EP_DP_INT_EN_ (0x00010000)
+#define INT_EP_MAC_ERR_EN_ (0x00008000)
+#define INT_EP_TDFU_EN_ (0x00004000)
+#define INT_EP_TDFO_EN_ (0x00002000)
+#define INT_EP_UTX_FP_EN_ (0x00001000)
+#define INT_EP_GPIO_EN_MASK_ (0x00000FFF)
+
+#define PIPE_CTL (0x09C)
+#define PIPE_CTL_TXSWING_ (0x00000040)
+#define PIPE_CTL_TXMARGIN_MASK_ (0x00000038)
+#define PIPE_CTL_TXDEEMPHASIS_MASK_ (0x00000006)
+#define PIPE_CTL_ELASTICITYBUFFERMODE_ (0x00000001)
+
+#define U1_LATENCY (0xA0)
+#define U2_LATENCY (0xA4)
+
+#define USB_STATUS (0x0A8)
+#define USB_STATUS_REMOTE_WK_ (0x00100000)
+#define USB_STATUS_FUNC_REMOTE_WK_ (0x00080000)
+#define USB_STATUS_LTM_ENABLE_ (0x00040000)
+#define USB_STATUS_U2_ENABLE_ (0x00020000)
+#define USB_STATUS_U1_ENABLE_ (0x00010000)
+#define USB_STATUS_SET_SEL_ (0x00000020)
+#define USB_STATUS_REMOTE_WK_STS_ (0x00000010)
+#define USB_STATUS_FUNC_REMOTE_WK_STS_ (0x00000008)
+#define USB_STATUS_LTM_ENABLE_STS_ (0x00000004)
+#define USB_STATUS_U2_ENABLE_STS_ (0x00000002)
+#define USB_STATUS_U1_ENABLE_STS_ (0x00000001)
+
+#define USB_CFG3 (0x0AC)
+#define USB_CFG3_EN_U2_LTM_ (0x40000000)
+#define USB_CFG3_BULK_OUT_NUMP_OVR_ (0x20000000)
+#define USB_CFG3_DIS_FAST_U1_EXIT_ (0x10000000)
+#define USB_CFG3_LPM_NYET_THR_ (0x0F000000)
+#define USB_CFG3_RX_DET_2_POL_LFPS_ (0x00800000)
+#define USB_CFG3_LFPS_FILT_ (0x00400000)
+#define USB_CFG3_SKIP_RX_DET_ (0x00200000)
+#define USB_CFG3_DELAY_P1P2P3_ (0x001C0000)
+#define USB_CFG3_DELAY_PHY_PWR_CHG_ (0x00020000)
+#define USB_CFG3_U1U2_EXIT_FR_ (0x00010000)
+#define USB_CFG3_REQ_P1P2P3 (0x00008000)
+#define USB_CFG3_HST_PRT_CMPL_ (0x00004000)
+#define USB_CFG3_DIS_SCRAMB_ (0x00002000)
+#define USB_CFG3_PWR_DN_SCALE_ (0x00001FFF)
+
+#define RFE_CTL (0x0B0)
+#define RFE_CTL_IGMP_COE_ (0x00004000)
+#define RFE_CTL_ICMP_COE_ (0x00002000)
+#define RFE_CTL_TCPUDP_COE_ (0x00001000)
+#define RFE_CTL_IP_COE_ (0x00000800)
+#define RFE_CTL_BCAST_EN_ (0x00000400)
+#define RFE_CTL_MCAST_EN_ (0x00000200)
+#define RFE_CTL_UCAST_EN_ (0x00000100)
+#define RFE_CTL_VLAN_STRIP_ (0x00000080)
+#define RFE_CTL_DISCARD_UNTAGGED_ (0x00000040)
+#define RFE_CTL_VLAN_FILTER_ (0x00000020)
+#define RFE_CTL_SA_FILTER_ (0x00000010)
+#define RFE_CTL_MCAST_HASH_ (0x00000008)
+#define RFE_CTL_DA_HASH_ (0x00000004)
+#define RFE_CTL_DA_PERFECT_ (0x00000002)
+#define RFE_CTL_RST_ (0x00000001)
+
+#define VLAN_TYPE (0x0B4)
+#define VLAN_TYPE_MASK_ (0x0000FFFF)
+
+#define FCT_RX_CTL (0x0C0)
+#define FCT_RX_CTL_EN_ (0x80000000)
+#define FCT_RX_CTL_RST_ (0x40000000)
+#define FCT_RX_CTL_SBF_ (0x02000000)
+#define FCT_RX_CTL_OVFL_ (0x01000000)
+#define FCT_RX_CTL_DROP_ (0x00800000)
+#define FCT_RX_CTL_NOT_EMPTY_ (0x00400000)
+#define FCT_RX_CTL_EMPTY_ (0x00200000)
+#define FCT_RX_CTL_DIS_ (0x00100000)
+#define FCT_RX_CTL_USED_MASK_ (0x0000FFFF)
+
+#define FCT_TX_CTL (0x0C4)
+#define FCT_TX_CTL_EN_ (0x80000000)
+#define FCT_TX_CTL_RST_ (0x40000000)
+#define FCT_TX_CTL_NOT_EMPTY_ (0x00400000)
+#define FCT_TX_CTL_EMPTY_ (0x00200000)
+#define FCT_TX_CTL_DIS_ (0x00100000)
+#define FCT_TX_CTL_USED_MASK_ (0x0000FFFF)
+
+#define FCT_RX_FIFO_END (0x0C8)
+#define FCT_RX_FIFO_END_MASK_ (0x0000007F)
+
+#define FCT_TX_FIFO_END (0x0CC)
+#define FCT_TX_FIFO_END_MASK_ (0x0000003F)
+
+#define FCT_FLOW (0x0D0)
+#define FCT_FLOW_OFF_MASK_ (0x00007F00)
+#define FCT_FLOW_ON_MASK_ (0x0000007F)
+
+#define RX_DP_STOR (0x0D4)
+#define RX_DP_STORE_TOT_RXUSED_MASK_ (0xFFFF0000)
+#define RX_DP_STORE_UTX_RXUSED_MASK_ (0x0000FFFF)
+
+#define TX_DP_STOR (0x0D8)
+#define TX_DP_STORE_TOT_TXUSED_MASK_ (0xFFFF0000)
+#define TX_DP_STORE_URX_TXUSED_MASK_ (0x0000FFFF)
+
+#define LTM_BELT_IDLE0 (0x0E0)
+#define LTM_BELT_IDLE0_IDLE1000_ (0x0FFF0000)
+#define LTM_BELT_IDLE0_IDLE100_ (0x00000FFF)
+
+#define LTM_BELT_IDLE1 (0x0E4)
+#define LTM_BELT_IDLE1_IDLE10_ (0x00000FFF)
+
+#define LTM_BELT_ACT0 (0x0E8)
+#define LTM_BELT_ACT0_ACT1000_ (0x0FFF0000)
+#define LTM_BELT_ACT0_ACT100_ (0x00000FFF)
+
+#define LTM_BELT_ACT1 (0x0EC)
+#define LTM_BELT_ACT1_ACT10_ (0x00000FFF)
+
+#define LTM_INACTIVE0 (0x0F0)
+#define LTM_INACTIVE0_TIMER1000_ (0xFFFF0000)
+#define LTM_INACTIVE0_TIMER100_ (0x0000FFFF)
+
+#define LTM_INACTIVE1 (0x0F4)
+#define LTM_INACTIVE1_TIMER10_ (0x0000FFFF)
+
+#define MAC_CR (0x100)
+#define MAC_CR_GMII_EN_ (0x00080000)
+#define MAC_CR_EEE_TX_CLK_STOP_EN_ (0x00040000)
+#define MAC_CR_EEE_EN_ (0x00020000)
+#define MAC_CR_EEE_TLAR_EN_ (0x00010000)
+#define MAC_CR_ADP_ (0x00002000)
+#define MAC_CR_AUTO_DUPLEX_ (0x00001000)
+#define MAC_CR_AUTO_SPEED_ (0x00000800)
+#define MAC_CR_LOOPBACK_ (0x00000400)
+#define MAC_CR_BOLMT_MASK_ (0x000000C0)
+#define MAC_CR_FULL_DUPLEX_ (0x00000008)
+#define MAC_CR_SPEED_MASK_ (0x00000006)
+#define MAC_CR_SPEED_1000_ (0x00000004)
+#define MAC_CR_SPEED_100_ (0x00000002)
+#define MAC_CR_SPEED_10_ (0x00000000)
+#define MAC_CR_RST_ (0x00000001)
+
+#define MAC_RX (0x104)
+#define MAC_RX_MAX_SIZE_SHIFT_ (16)
+#define MAC_RX_MAX_SIZE_MASK_ (0x3FFF0000)
+#define MAC_RX_FCS_STRIP_ (0x00000010)
+#define MAC_RX_VLAN_FSE_ (0x00000004)
+#define MAC_RX_RXD_ (0x00000002)
+#define MAC_RX_RXEN_ (0x00000001)
+
+#define MAC_TX (0x108)
+#define MAC_TX_BAD_FCS_ (0x00000004)
+#define MAC_TX_TXD_ (0x00000002)
+#define MAC_TX_TXEN_ (0x00000001)
+
+#define FLOW (0x10C)
+#define FLOW_CR_FORCE_FC_ (0x80000000)
+#define FLOW_CR_TX_FCEN_ (0x40000000)
+#define FLOW_CR_RX_FCEN_ (0x20000000)
+#define FLOW_CR_FPF_ (0x10000000)
+#define FLOW_CR_FCPT_MASK_ (0x0000FFFF)
+
+#define RAND_SEED (0x110)
+#define RAND_SEED_MASK_ (0x0000FFFF)
+
+#define ERR_STS (0x114)
+#define ERR_STS_FERR_ (0x00000100)
+#define ERR_STS_LERR_ (0x00000080)
+#define ERR_STS_RFERR_ (0x00000040)
+#define ERR_STS_ECERR_ (0x00000010)
+#define ERR_STS_ALERR_ (0x00000008)
+#define ERR_STS_URERR_ (0x00000004)
+
+#define RX_ADDRH (0x118)
+#define RX_ADDRH_MASK_ (0x0000FFFF)
+
+#define RX_ADDRL (0x11C)
+#define RX_ADDRL_MASK_ (0xFFFFFFFF)
+
+#define MII_ACC (0x120)
+#define MII_ACC_PHY_ADDR_SHIFT_ (11)
+#define MII_ACC_PHY_ADDR_MASK_ (0x0000F800)
+#define MII_ACC_MIIRINDA_SHIFT_ (6)
+#define MII_ACC_MIIRINDA_MASK_ (0x000007C0)
+#define MII_ACC_MII_READ_ (0x00000000)
+#define MII_ACC_MII_WRITE_ (0x00000002)
+#define MII_ACC_MII_BUSY_ (0x00000001)
+
+#define MII_DATA (0x124)
+#define MII_DATA_MASK_ (0x0000FFFF)
+
+#define MAC_RGMII_ID (0x128)
+#define MAC_RGMII_ID_TXC_DELAY_EN_ (0x00000002)
+#define MAC_RGMII_ID_RXC_DELAY_EN_ (0x00000001)
+
+#define EEE_TX_LPI_REQ_DLY (0x130)
+#define EEE_TX_LPI_REQ_DLY_CNT_MASK_ (0xFFFFFFFF)
+
+#define EEE_TW_TX_SYS (0x134)
+#define EEE_TW_TX_SYS_CNT1G_MASK_ (0xFFFF0000)
+#define EEE_TW_TX_SYS_CNT100M_MASK_ (0x0000FFFF)
+
+#define EEE_TX_LPI_REM_DLY (0x138)
+#define EEE_TX_LPI_REM_DLY_CNT_ (0x00FFFFFF)
+
+#define WUCSR (0x140)
+#define WUCSR_TESTMODE_ (0x80000000)
+#define WUCSR_RFE_WAKE_EN_ (0x00004000)
+#define WUCSR_EEE_TX_WAKE_ (0x00002000)
+#define WUCSR_EEE_TX_WAKE_EN_ (0x00001000)
+#define WUCSR_EEE_RX_WAKE_ (0x00000800)
+#define WUCSR_EEE_RX_WAKE_EN_ (0x00000400)
+#define WUCSR_RFE_WAKE_FR_ (0x00000200)
+#define WUCSR_STORE_WAKE_ (0x00000100)
+#define WUCSR_PFDA_FR_ (0x00000080)
+#define WUCSR_WUFR_ (0x00000040)
+#define WUCSR_MPR_ (0x00000020)
+#define WUCSR_BCST_FR_ (0x00000010)
+#define WUCSR_PFDA_EN_ (0x00000008)
+#define WUCSR_WAKE_EN_ (0x00000004)
+#define WUCSR_MPEN_ (0x00000002)
+#define WUCSR_BCST_EN_ (0x00000001)
+
+#define WK_SRC (0x144)
+#define WK_SRC_GPIOX_INT_WK_SHIFT_ (20)
+#define WK_SRC_GPIOX_INT_WK_MASK_ (0xFFF00000)
+#define WK_SRC_IPV6_TCPSYN_RCD_WK_ (0x00010000)
+#define WK_SRC_IPV4_TCPSYN_RCD_WK_ (0x00008000)
+#define WK_SRC_EEE_TX_WK_ (0x00004000)
+#define WK_SRC_EEE_RX_WK_ (0x00002000)
+#define WK_SRC_GOOD_FR_WK_ (0x00001000)
+#define WK_SRC_PFDA_FR_WK_ (0x00000800)
+#define WK_SRC_MP_FR_WK_ (0x00000400)
+#define WK_SRC_BCAST_FR_WK_ (0x00000200)
+#define WK_SRC_WU_FR_WK_ (0x00000100)
+#define WK_SRC_WUFF_MATCH_MASK_ (0x0000001F)
+
+#define WUF_CFG0 (0x150)
+#define NUM_OF_WUF_CFG (32)
+#define WUF_CFG_BEGIN (WUF_CFG0)
+#define WUF_CFG(index) (WUF_CFG_BEGIN + (4 * (index)))
+#define WUF_CFGX_EN_ (0x80000000)
+#define WUF_CFGX_TYPE_MASK_ (0x03000000)
+#define WUF_CFGX_TYPE_MCAST_ (0x02000000)
+#define WUF_CFGX_TYPE_ALL_ (0x01000000)
+#define WUF_CFGX_TYPE_UCAST_ (0x00000000)
+#define WUF_CFGX_OFFSET_SHIFT_ (16)
+#define WUF_CFGX_OFFSET_MASK_ (0x00FF0000)
+#define WUF_CFGX_CRC16_MASK_ (0x0000FFFF)
+
+#define WUF_MASK0_0 (0x200)
+#define WUF_MASK0_1 (0x204)
+#define WUF_MASK0_2 (0x208)
+#define WUF_MASK0_3 (0x20C)
+#define NUM_OF_WUF_MASK (32)
+#define WUF_MASK0_BEGIN (WUF_MASK0_0)
+#define WUF_MASK1_BEGIN (WUF_MASK0_1)
+#define WUF_MASK2_BEGIN (WUF_MASK0_2)
+#define WUF_MASK3_BEGIN (WUF_MASK0_3)
+#define WUF_MASK0(index) (WUF_MASK0_BEGIN + (0x10 * (index)))
+#define WUF_MASK1(index) (WUF_MASK1_BEGIN + (0x10 * (index)))
+#define WUF_MASK2(index) (WUF_MASK2_BEGIN + (0x10 * (index)))
+#define WUF_MASK3(index) (WUF_MASK3_BEGIN + (0x10 * (index)))
+
+#define MAF_BASE (0x400)
+#define MAF_HIX (0x00)
+#define MAF_LOX (0x04)
+#define NUM_OF_MAF (33)
+#define MAF_HI_BEGIN (MAF_BASE + MAF_HIX)
+#define MAF_LO_BEGIN (MAF_BASE + MAF_LOX)
+#define MAF_HI(index) (MAF_BASE + (8 * (index)) + (MAF_HIX))
+#define MAF_LO(index) (MAF_BASE + (8 * (index)) + (MAF_LOX))
+#define MAF_HI_VALID_ (0x80000000)
+#define MAF_HI_TYPE_MASK_ (0x40000000)
+#define MAF_HI_TYPE_SRC_ (0x40000000)
+#define MAF_HI_TYPE_DST_ (0x00000000)
+#define MAF_HI_ADDR_MASK (0x0000FFFF)
+#define MAF_LO_ADDR_MASK (0xFFFFFFFF)
+
+#define WUCSR2 (0x600)
+#define WUCSR2_CSUM_DISABLE_ (0x80000000)
+#define WUCSR2_NA_SA_SEL_ (0x00000100)
+#define WUCSR2_NS_RCD_ (0x00000080)
+#define WUCSR2_ARP_RCD_ (0x00000040)
+#define WUCSR2_IPV6_TCPSYN_RCD_ (0x00000020)
+#define WUCSR2_IPV4_TCPSYN_RCD_ (0x00000010)
+#define WUCSR2_NS_OFFLOAD_EN_ (0x00000008)
+#define WUCSR2_ARP_OFFLOAD_EN_ (0x00000004)
+#define WUCSR2_IPV6_TCPSYN_WAKE_EN_ (0x00000002)
+#define WUCSR2_IPV4_TCPSYN_WAKE_EN_ (0x00000001)
+
+#define NS1_IPV6_ADDR_DEST0 (0x610)
+#define NS1_IPV6_ADDR_DEST1 (0x614)
+#define NS1_IPV6_ADDR_DEST2 (0x618)
+#define NS1_IPV6_ADDR_DEST3 (0x61C)
+
+#define NS1_IPV6_ADDR_SRC0 (0x620)
+#define NS1_IPV6_ADDR_SRC1 (0x624)
+#define NS1_IPV6_ADDR_SRC2 (0x628)
+#define NS1_IPV6_ADDR_SRC3 (0x62C)
+
+#define NS1_ICMPV6_ADDR0_0 (0x630)
+#define NS1_ICMPV6_ADDR0_1 (0x634)
+#define NS1_ICMPV6_ADDR0_2 (0x638)
+#define NS1_ICMPV6_ADDR0_3 (0x63C)
+
+#define NS1_ICMPV6_ADDR1_0 (0x640)
+#define NS1_ICMPV6_ADDR1_1 (0x644)
+#define NS1_ICMPV6_ADDR1_2 (0x648)
+#define NS1_ICMPV6_ADDR1_3 (0x64C)
+
+#define NS2_IPV6_ADDR_DEST0 (0x650)
+#define NS2_IPV6_ADDR_DEST1 (0x654)
+#define NS2_IPV6_ADDR_DEST2 (0x658)
+#define NS2_IPV6_ADDR_DEST3 (0x65C)
+
+#define NS2_IPV6_ADDR_SRC0 (0x660)
+#define NS2_IPV6_ADDR_SRC1 (0x664)
+#define NS2_IPV6_ADDR_SRC2 (0x668)
+#define NS2_IPV6_ADDR_SRC3 (0x66C)
+
+#define NS2_ICMPV6_ADDR0_0 (0x670)
+#define NS2_ICMPV6_ADDR0_1 (0x674)
+#define NS2_ICMPV6_ADDR0_2 (0x678)
+#define NS2_ICMPV6_ADDR0_3 (0x67C)
+
+#define NS2_ICMPV6_ADDR1_0 (0x680)
+#define NS2_ICMPV6_ADDR1_1 (0x684)
+#define NS2_ICMPV6_ADDR1_2 (0x688)
+#define NS2_ICMPV6_ADDR1_3 (0x68C)
+
+#define SYN_IPV4_ADDR_SRC (0x690)
+#define SYN_IPV4_ADDR_DEST (0x694)
+#define SYN_IPV4_TCP_PORTS (0x698)
+#define SYN_IPV4_TCP_PORTS_IPV4_DEST_PORT_SHIFT_ (16)
+#define SYN_IPV4_TCP_PORTS_IPV4_DEST_PORT_MASK_ (0xFFFF0000)
+#define SYN_IPV4_TCP_PORTS_IPV4_SRC_PORT_MASK_ (0x0000FFFF)
+
+#define SYN_IPV6_ADDR_SRC0 (0x69C)
+#define SYN_IPV6_ADDR_SRC1 (0x6A0)
+#define SYN_IPV6_ADDR_SRC2 (0x6A4)
+#define SYN_IPV6_ADDR_SRC3 (0x6A8)
+
+#define SYN_IPV6_ADDR_DEST0 (0x6AC)
+#define SYN_IPV6_ADDR_DEST1 (0x6B0)
+#define SYN_IPV6_ADDR_DEST2 (0x6B4)
+#define SYN_IPV6_ADDR_DEST3 (0x6B8)
+
+#define SYN_IPV6_TCP_PORTS (0x6BC)
+#define SYN_IPV6_TCP_PORTS_IPV6_DEST_PORT_SHIFT_ (16)
+#define SYN_IPV6_TCP_PORTS_IPV6_DEST_PORT_MASK_ (0xFFFF0000)
+#define SYN_IPV6_TCP_PORTS_IPV6_SRC_PORT_MASK_ (0x0000FFFF)
+
+#define ARP_SPA (0x6C0)
+#define ARP_TPA (0x6C4)
+
+#define PHY_DEV_ID (0x700)
+#define PHY_DEV_ID_REV_SHIFT_ (28)
+#define PHY_DEV_ID_REV_SHIFT_ (28)
+#define PHY_DEV_ID_REV_MASK_ (0xF0000000)
+#define PHY_DEV_ID_MODEL_SHIFT_ (22)
+#define PHY_DEV_ID_MODEL_MASK_ (0x0FC00000)
+#define PHY_DEV_ID_OUI_MASK_ (0x003FFFFF)
+
+#define OTP_BASE_ADDR (0x00001000)
+#define OTP_ADDR_RANGE_ (0x1FF)
+
+#define OTP_PWR_DN (OTP_BASE_ADDR + 4 * 0x00)
+#define OTP_PWR_DN_PWRDN_N_ (0x01)
+
+#define OTP_ADDR1 (OTP_BASE_ADDR + 4 * 0x01)
+#define OTP_ADDR1_15_11 (0x1F)
+
+#define OTP_ADDR2 (OTP_BASE_ADDR + 4 * 0x02)
+#define OTP_ADDR2_10_3 (0xFF)
+
+#define OTP_ADDR3 (OTP_BASE_ADDR + 4 * 0x03)
+#define OTP_ADDR3_2_0 (0x03)
+
+#define OTP_PRGM_DATA (OTP_BASE_ADDR + 4 * 0x04)
+
+#define OTP_PRGM_MODE (OTP_BASE_ADDR + 4 * 0x05)
+#define OTP_PRGM_MODE_BYTE_ (0x01)
+
+#define OTP_RD_DATA (OTP_BASE_ADDR + 4 * 0x06)
+
+#define OTP_FUNC_CMD (OTP_BASE_ADDR + 4 * 0x08)
+#define OTP_FUNC_CMD_RESET_ (0x04)
+#define OTP_FUNC_CMD_PROGRAM_ (0x02)
+#define OTP_FUNC_CMD_READ_ (0x01)
+
+#define OTP_TST_CMD (OTP_BASE_ADDR + 4 * 0x09)
+#define OTP_TST_CMD_TEST_DEC_SEL_ (0x10)
+#define OTP_TST_CMD_PRGVRFY_ (0x08)
+#define OTP_TST_CMD_WRTEST_ (0x04)
+#define OTP_TST_CMD_TESTDEC_ (0x02)
+#define OTP_TST_CMD_BLANKCHECK_ (0x01)
+
+#define OTP_CMD_GO (OTP_BASE_ADDR + 4 * 0x0A)
+#define OTP_CMD_GO_GO_ (0x01)
+
+#define OTP_PASS_FAIL (OTP_BASE_ADDR + 4 * 0x0B)
+#define OTP_PASS_FAIL_PASS_ (0x02)
+#define OTP_PASS_FAIL_FAIL_ (0x01)
+
+#define OTP_STATUS (OTP_BASE_ADDR + 4 * 0x0C)
+#define OTP_STATUS_OTP_LOCK_ (0x10)
+#define OTP_STATUS_WEB_ (0x08)
+#define OTP_STATUS_PGMEN (0x04)
+#define OTP_STATUS_CPUMPEN_ (0x02)
+#define OTP_STATUS_BUSY_ (0x01)
+
+#define OTP_MAX_PRG (OTP_BASE_ADDR + 4 * 0x0D)
+#define OTP_MAX_PRG_MAX_PROG (0x1F)
+
+#define OTP_INTR_STATUS (OTP_BASE_ADDR + 4 * 0x10)
+#define OTP_INTR_STATUS_READY_ (0x01)
+
+#define OTP_INTR_MASK (OTP_BASE_ADDR + 4 * 0x11)
+#define OTP_INTR_MASK_READY_ (0x01)
+
+#define OTP_RSTB_PW1 (OTP_BASE_ADDR + 4 * 0x14)
+#define OTP_RSTB_PW2 (OTP_BASE_ADDR + 4 * 0x15)
+#define OTP_PGM_PW1 (OTP_BASE_ADDR + 4 * 0x18)
+#define OTP_PGM_PW2 (OTP_BASE_ADDR + 4 * 0x19)
+#define OTP_READ_PW1 (OTP_BASE_ADDR + 4 * 0x1C)
+#define OTP_READ_PW2 (OTP_BASE_ADDR + 4 * 0x1D)
+#define OTP_TCRST (OTP_BASE_ADDR + 4 * 0x20)
+#define OTP_RSRD (OTP_BASE_ADDR + 4 * 0x21)
+#define OTP_TREADEN_VAL (OTP_BASE_ADDR + 4 * 0x22)
+#define OTP_TDLES_VAL (OTP_BASE_ADDR + 4 * 0x23)
+#define OTP_TWWL_VAL (OTP_BASE_ADDR + 4 * 0x24)
+#define OTP_TDLEH_VAL (OTP_BASE_ADDR + 4 * 0x25)
+#define OTP_TWPED_VAL (OTP_BASE_ADDR + 4 * 0x26)
+#define OTP_TPES_VAL (OTP_BASE_ADDR + 4 * 0x27)
+#define OTP_TCPS_VAL (OTP_BASE_ADDR + 4 * 0x28)
+#define OTP_TCPH_VAL (OTP_BASE_ADDR + 4 * 0x29)
+#define OTP_TPGMVFY_VAL (OTP_BASE_ADDR + 4 * 0x2A)
+#define OTP_TPEH_VAL (OTP_BASE_ADDR + 4 * 0x2B)
+#define OTP_TPGRST_VAL (OTP_BASE_ADDR + 4 * 0x2C)
+#define OTP_TCLES_VAL (OTP_BASE_ADDR + 4 * 0x2D)
+#define OTP_TCLEH_VAL (OTP_BASE_ADDR + 4 * 0x2E)
+#define OTP_TRDES_VAL (OTP_BASE_ADDR + 4 * 0x2F)
+#define OTP_TBCACC_VAL (OTP_BASE_ADDR + 4 * 0x30)
+#define OTP_TAAC_VAL (OTP_BASE_ADDR + 4 * 0x31)
+#define OTP_TACCT_VAL (OTP_BASE_ADDR + 4 * 0x32)
+#define OTP_TRDEP_VAL (OTP_BASE_ADDR + 4 * 0x38)
+#define OTP_TPGSV_VAL (OTP_BASE_ADDR + 4 * 0x39)
+#define OTP_TPVSR_VAL (OTP_BASE_ADDR + 4 * 0x3A)
+#define OTP_TPVHR_VAL (OTP_BASE_ADDR + 4 * 0x3B)
+#define OTP_TPVSA_VAL (OTP_BASE_ADDR + 4 * 0x3C)
+
+#define PHY_ID1 (0x02)
+#define PHY_ID2 (0x03)
+
+#define PHY_DEV_ID_OUI_VTSE (0x04001C)
+#define PHY_DEV_ID_MODEL_VTSE_8502 (0x23)
+
+#define PHY_AUTONEG_ADV (0x04)
+#define NWAY_AR_NEXT_PAGE_ (0x8000)
+#define NWAY_AR_REMOTE_FAULT_ (0x2000)
+#define NWAY_AR_ASM_DIR_ (0x0800)
+#define NWAY_AR_PAUSE_ (0x0400)
+#define NWAY_AR_100T4_CAPS_ (0x0200)
+#define NWAY_AR_100TX_FD_CAPS_ (0x0100)
+#define NWAY_AR_SELECTOR_FIELD_ (0x001F)
+#define NWAY_AR_100TX_HD_CAPS_ (0x0080)
+#define NWAY_AR_10T_FD_CAPS_ (0x0040)
+#define NWAY_AR_10T_HD_CAPS_ (0x0020)
+#define NWAY_AR_ALL_CAPS_ (NWAY_AR_10T_HD_CAPS_ | \
+ NWAY_AR_10T_FD_CAPS_ | \
+ NWAY_AR_100TX_HD_CAPS_ | \
+ NWAY_AR_100TX_FD_CAPS_)
+#define NWAY_AR_PAUSE_MASK (NWAY_AR_PAUSE_ | NWAY_AR_ASM_DIR_)
+
+#define PHY_LP_ABILITY (0x05)
+#define NWAY_LPAR_NEXT_PAGE_ (0x8000)
+#define NWAY_LPAR_ACKNOWLEDGE_ (0x4000)
+#define NWAY_LPAR_REMOTE_FAULT_ (0x2000)
+#define NWAY_LPAR_ASM_DIR_ (0x0800)
+#define NWAY_LPAR_PAUSE_ (0x0400)
+#define NWAY_LPAR_100T4_CAPS_ (0x0200)
+#define NWAY_LPAR_100TX_FD_CAPS_ (0x0100)
+#define NWAY_LPAR_100TX_HD_CAPS_ (0x0080)
+#define NWAY_LPAR_10T_FD_CAPS_ (0x0040)
+#define NWAY_LPAR_10T_HD_CAPS_ (0x0020)
+#define NWAY_LPAR_SELECTOR_FIELD_ (0x001F)
+
+#define PHY_AUTONEG_EXP (0x06)
+#define NWAY_ER_PAR_DETECT_FAULT_ (0x0010)
+#define NWAY_ER_LP_NEXT_PAGE_CAPS_ (0x0008)
+#define NWAY_ER_NEXT_PAGE_CAPS_ (0x0004)
+#define NWAY_ER_PAGE_RXD_ (0x0002)
+#define NWAY_ER_LP_NWAY_CAPS_ (0x0001)
+
+#define PHY_NEXT_PAGE_TX (0x07)
+#define NPTX_NEXT_PAGE_ (0x8000)
+#define NPTX_MSG_PAGE_ (0x2000)
+#define NPTX_ACKNOWLDGE2_ (0x1000)
+#define NPTX_TOGGLE_ (0x0800)
+#define NPTX_MSG_CODE_FIELD_ (0x0001)
+
+#define PHY_LP_NEXT_PAGE (0x08)
+#define LP_RNPR_NEXT_PAGE_ (0x8000)
+#define LP_RNPR_ACKNOWLDGE_ (0x4000)
+#define LP_RNPR_MSG_PAGE_ (0x2000)
+#define LP_RNPR_ACKNOWLDGE2_ (0x1000)
+#define LP_RNPR_TOGGLE_ (0x0800)
+#define LP_RNPR_MSG_CODE_FIELD_ (0x0001)
+
+#define PHY_1000T_CTRL (0x09)
+#define CR_1000T_TEST_MODE_4_ (0x8000)
+#define CR_1000T_TEST_MODE_3_ (0x6000)
+#define CR_1000T_TEST_MODE_2_ (0x4000)
+#define CR_1000T_TEST_MODE_1_ (0x2000)
+#define CR_1000T_MS_ENABLE_ (0x1000)
+#define CR_1000T_MS_VALUE_ (0x0800)
+#define CR_1000T_REPEATER_DTE_ (0x0400)
+#define CR_1000T_FD_CAPS_ (0x0200)
+#define CR_1000T_HD_CAPS_ (0x0100)
+#define CR_1000T_ASYM_PAUSE_ (0x0080)
+#define CR_1000T_TEST_MODE_NORMAL_ (0x0000)
+
+#define PHY_1000T_STATUS (0x0A)
+#define SR_1000T_MS_CONFIG_FAULT_ (0x8000)
+#define SR_1000T_MS_CONFIG_RES_ (0x4000)
+#define SR_1000T_LOCAL_RX_STATUS_ (0x2000)
+#define SR_1000T_REMOTE_RX_STATUS_ (0x1000)
+#define SR_1000T_LP_FD_CAPS_ (0x0800)
+#define SR_1000T_LP_HD_CAPS_ (0x0400)
+#define SR_1000T_ASYM_PAUSE_DIR_ (0x0100)
+#define SR_1000T_IDLE_ERROR_CNT_ (0x00FF)
+#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12
+#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100
+
+#define PHY_EXT_STATUS (0x0F)
+#define IEEE_ESR_1000X_FD_CAPS_ (0x8000)
+#define IEEE_ESR_1000X_HD_CAPS_ (0x4000)
+#define IEEE_ESR_1000T_FD_CAPS_ (0x2000)
+#define IEEE_ESR_1000T_HD_CAPS_ (0x1000)
+#define PHY_TX_POLARITY_MASK_ (0x0100)
+#define PHY_TX_NORMAL_POLARITY_ (0x0000)
+#define AUTO_POLARITY_DISABLE_ (0x0010)
+
+#define PHY_MMD_CTL (0x0D)
+#define PHY_MMD_CTRL_OP_MASK_ (0xC000)
+#define PHY_MMD_CTRL_OP_REG_ (0x0000)
+#define PHY_MMD_CTRL_OP_DNI_ (0x4000)
+#define PHY_MMD_CTRL_OP_DPIRW_ (0x8000)
+#define PHY_MMD_CTRL_OP_DPIWO_ (0xC000)
+#define PHY_MMD_CTRL_DEV_ADDR_MASK_ (0x001F)
+
+#define PHY_MMD_REG_DATA (0x0E)
+
+/* VTSE Vendor Specific registers */
+#define PHY_VTSE_BYPASS (0x12)
+#define PHY_VTSE_BYPASS_DISABLE_PAIR_SWAP_ (0x0020)
+
+#define PHY_VTSE_INT_MASK (0x19)
+#define PHY_VTSE_INT_MASK_MDINTPIN_EN_ (0x8000)
+#define PHY_VTSE_INT_MASK_SPEED_CHANGE_ (0x4000)
+#define PHY_VTSE_INT_MASK_LINK_CHANGE_ (0x2000)
+#define PHY_VTSE_INT_MASK_FDX_CHANGE_ (0x1000)
+#define PHY_VTSE_INT_MASK_AUTONEG_ERR_ (0x0800)
+#define PHY_VTSE_INT_MASK_AUTONEG_DONE_ (0x0400)
+#define PHY_VTSE_INT_MASK_POE_DETECT_ (0x0200)
+#define PHY_VTSE_INT_MASK_SYMBOL_ERR_ (0x0100)
+#define PHY_VTSE_INT_MASK_FAST_LINK_FAIL_ (0x0080)
+#define PHY_VTSE_INT_MASK_WOL_EVENT_ (0x0040)
+#define PHY_VTSE_INT_MASK_EXTENDED_INT_ (0x0020)
+#define PHY_VTSE_INT_MASK_RESERVED_ (0x0010)
+#define PHY_VTSE_INT_MASK_FALSE_CARRIER_ (0x0008)
+#define PHY_VTSE_INT_MASK_LINK_SPEED_DS_ (0x0004)
+#define PHY_VTSE_INT_MASK_MASTER_SLAVE_DONE_ (0x0002)
+#define PHY_VTSE_INT_MASK_RX__ER_ (0x0001)
+
+#define PHY_VTSE_INT_STS (0x1A)
+#define PHY_VTSE_INT_STS_INT_ACTIVE_ (0x8000)
+#define PHY_VTSE_INT_STS_SPEED_CHANGE_ (0x4000)
+#define PHY_VTSE_INT_STS_LINK_CHANGE_ (0x2000)
+#define PHY_VTSE_INT_STS_FDX_CHANGE_ (0x1000)
+#define PHY_VTSE_INT_STS_AUTONEG_ERR_ (0x0800)
+#define PHY_VTSE_INT_STS_AUTONEG_DONE_ (0x0400)
+#define PHY_VTSE_INT_STS_POE_DETECT_ (0x0200)
+#define PHY_VTSE_INT_STS_SYMBOL_ERR_ (0x0100)
+#define PHY_VTSE_INT_STS_FAST_LINK_FAIL_ (0x0080)
+#define PHY_VTSE_INT_STS_WOL_EVENT_ (0x0040)
+#define PHY_VTSE_INT_STS_EXTENDED_INT_ (0x0020)
+#define PHY_VTSE_INT_STS_RESERVED_ (0x0010)
+#define PHY_VTSE_INT_STS_FALSE_CARRIER_ (0x0008)
+#define PHY_VTSE_INT_STS_LINK_SPEED_DS_ (0x0004)
+#define PHY_VTSE_INT_STS_MASTER_SLAVE_DONE_ (0x0002)
+#define PHY_VTSE_INT_STS_RX_ER_ (0x0001)
+
+/* VTSE PHY registers */
+#define PHY_EXT_GPIO_PAGE (0x1F)
+#define PHY_EXT_GPIO_PAGE_SPACE_0 (0x0000)
+#define PHY_EXT_GPIO_PAGE_SPACE_1 (0x0001)
+#define PHY_EXT_GPIO_PAGE_SPACE_2 (0x0002)
+
+/* Extended Register Page 1 space */
+#define PHY_EXT_MODE_CTRL (0x13)
+#define PHY_EXT_MODE_CTRL_MDIX_MASK_ (0x000C)
+#define PHY_EXT_MODE_CTRL_AUTO_MDIX_ (0x0000)
+#define PHY_EXT_MODE_CTRL_MDI_ (0x0008)
+#define PHY_EXT_MODE_CTRL_MDI_X_ (0x000C)
+
+#define PHY_ANA_10BASE_T_HD 0x01
+#define PHY_ANA_10BASE_T_FD 0x02
+#define PHY_ANA_100BASE_TX_HD 0x04
+#define PHY_ANA_100BASE_TX_FD 0x08
+#define PHY_ANA_1000BASE_T_FD 0x10
+#define PHY_ANA_ALL_SUPPORTED_MEDIA (PHY_ANA_10BASE_T_HD | \
+ PHY_ANA_10BASE_T_FD | \
+ PHY_ANA_100BASE_TX_HD | \
+ PHY_ANA_100BASE_TX_FD | \
+ PHY_ANA_1000BASE_T_FD)
+/* PHY MMD registers */
+#define PHY_MMD_DEV_3 3
+
+#define PHY_EEE_PCS_STATUS (0x1)
+#define PHY_EEE_PCS_STATUS_TX_LPI_RCVD_ ((WORD)0x0800)
+#define PHY_EEE_PCS_STATUS_RX_LPI_RCVD_ ((WORD)0x0400)
+#define PHY_EEE_PCS_STATUS_TX_LPI_IND_ ((WORD)0x0200)
+#define PHY_EEE_PCS_STATUS_RX_LPI_IND_ ((WORD)0x0100)
+#define PHY_EEE_PCS_STATUS_PCS_RCV_LNK_STS_ ((WORD)0x0004)
+
+#define PHY_EEE_CAPABILITIES (0x14)
+#define PHY_EEE_CAPABILITIES_1000BT_EEE_ ((WORD)0x0004)
+#define PHY_EEE_CAPABILITIES_100BT_EEE_ ((WORD)0x0002)
+
+#define PHY_MMD_DEV_7 7
+
+#define PHY_EEE_ADVERTISEMENT (0x3C)
+#define PHY_EEE_ADVERTISEMENT_1000BT_EEE_ ((WORD)0x0004)
+#define PHY_EEE_ADVERTISEMENT_100BT_EEE_ ((WORD)0x0002)
+
+#define PHY_EEE_LP_ADVERTISEMENT (0x3D)
+#define PHY_EEE_1000BT_EEE_CAPABLE_ ((WORD)0x0004)
+#define PHY_EEE_100BT_EEE_CAPABLE_ ((WORD)0x0002)
+#endif /* _LAN78XX_H */
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index f603f362504b..355842b85ee9 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -752,11 +752,12 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
- {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */
- {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */
+ {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354 */
+ {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC7304/MC7354 */
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
{QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
{QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
+ {QMI_FIXED_INTF(0x1199, 0x9041, 10)}, /* Sierra Wireless MC7305/MC7355 */
{QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
{QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
@@ -784,6 +785,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+ {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
+ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
/* 4. Gobi 1000 devices */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index aafa1a1898e4..d9427ca3dba7 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -26,8 +26,13 @@
#include <linux/mdio.h>
#include <linux/usb/cdc.h>
-/* Version Information */
-#define DRIVER_VERSION "v1.08.0 (2015/01/13)"
+/* Information for net-next */
+#define NETNEXT_VERSION "08"
+
+/* Information for net */
+#define NET_VERSION "2"
+
+#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
#define MODULENAME "r8152"
@@ -143,6 +148,7 @@
#define OCP_EEE_ABLE 0xa5c4
#define OCP_EEE_ADV 0xa5d0
#define OCP_EEE_LPABLE 0xa5d2
+#define OCP_PHY_STATE 0xa708 /* nway state for 8153 */
#define OCP_ADC_CFG 0xbc06
/* SRAM Register */
@@ -339,6 +345,7 @@
/* USB_USB_CTRL */
#define RX_AGG_DISABLE 0x0010
+#define RX_ZERO_EN 0x0080
/* USB_U2P3_CTRL */
#define U2P3_ENABLE 0x0001
@@ -426,6 +433,10 @@
/* OCP_DOWN_SPEED */
#define EN_10M_BGOFF 0x0080
+/* OCP_PHY_STATE */
+#define TXDIS_STATE 0x01
+#define ABD_STATE 0x02
+
/* OCP_ADC_CFG */
#define CKADSEL_L 0x0100
#define ADC_EN 0x0080
@@ -494,6 +505,7 @@ enum rtl8152_flags {
#define VENDOR_ID_REALTEK 0x0bda
#define VENDOR_ID_SAMSUNG 0x04e8
#define VENDOR_ID_LENOVO 0x17ef
+#define VENDOR_ID_NVIDIA 0x0955
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
@@ -602,6 +614,7 @@ struct r8152 {
void (*unload)(struct r8152 *);
int (*eee_get)(struct r8152 *, struct ethtool_eee *);
int (*eee_set)(struct r8152 *, struct ethtool_eee *);
+ bool (*in_nway)(struct r8152 *);
} rtl_ops;
int intr_interval;
@@ -621,6 +634,7 @@ enum rtl_version {
RTL_VER_03,
RTL_VER_04,
RTL_VER_05,
+ RTL_VER_06,
RTL_VER_MAX
};
@@ -1901,11 +1915,10 @@ static void rtl_drop_queued_tx(struct r8152 *tp)
static void rtl8152_tx_timeout(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
- int i;
netif_warn(tp, tx_err, netdev, "Tx timeout\n");
- for (i = 0; i < RTL8152_MAX_TX; i++)
- usb_unlink_urb(tp->tx_info[i].urb);
+
+ usb_queue_reset_device(tp->intf);
}
static void rtl8152_set_rx_mode(struct net_device *netdev)
@@ -2074,7 +2087,6 @@ static int rtl_start_rx(struct r8152 *tp)
{
int i, ret = 0;
- napi_disable(&tp->napi);
INIT_LIST_HEAD(&tp->rx_done);
for (i = 0; i < RTL8152_MAX_RX; i++) {
INIT_LIST_HEAD(&tp->rx_info[i].list);
@@ -2082,7 +2094,6 @@ static int rtl_start_rx(struct r8152 *tp)
if (ret)
break;
}
- napi_enable(&tp->napi);
if (ret && ++i < RTL8152_MAX_RX) {
struct list_head rx_queue;
@@ -2165,6 +2176,7 @@ static int rtl8153_enable(struct r8152 *tp)
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return -ENODEV;
+ usb_disable_lpm(tp->udev);
set_tx_qlen(tp);
rtl_set_eee_plus(tp);
r8153_set_rx_early_timeout(tp);
@@ -2336,11 +2348,61 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
device_set_wakeup_enable(&tp->udev->dev, false);
}
+static void r8153_u1u2en(struct r8152 *tp, bool enable)
+{
+ u8 u1u2[8];
+
+ if (enable)
+ memset(u1u2, 0xff, sizeof(u1u2));
+ else
+ memset(u1u2, 0x00, sizeof(u1u2));
+
+ usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
+}
+
+static void r8153_u2p3en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
+ if (enable && tp->version != RTL_VER_03 && tp->version != RTL_VER_04)
+ ocp_data |= U2P3_ENABLE;
+ else
+ ocp_data &= ~U2P3_ENABLE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
+}
+
+static void r8153_power_cut_en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
+ if (enable)
+ ocp_data |= PWR_EN | PHASE2_EN;
+ else
+ ocp_data &= ~(PWR_EN | PHASE2_EN);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+ ocp_data &= ~PCUT_STATUS;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
+}
+
+static bool rtl_can_wakeup(struct r8152 *tp)
+{
+ struct usb_device *udev = tp->udev;
+
+ return (udev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_WAKEUP);
+}
+
static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
{
if (enable) {
u32 ocp_data;
+ r8153_u1u2en(tp, false);
+ r8153_u2p3en(tp, false);
+
__rtl_set_wol(tp, WAKE_ANY);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
@@ -2352,6 +2414,8 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
} else {
__rtl_set_wol(tp, tp->saved_wolopts);
+ r8153_u2p3en(tp, true);
+ r8153_u1u2en(tp, true);
}
}
@@ -2559,7 +2623,10 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
u32 ocp_data;
u16 data;
- ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+ if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
+ tp->version == RTL_VER_05)
+ ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+
data = r8152_mdio_read(tp, MII_BMCR);
if (data & BMCR_PDOWN) {
data &= ~BMCR_PDOWN;
@@ -2598,46 +2665,6 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
set_bit(PHY_RESET, &tp->flags);
}
-static void r8153_u1u2en(struct r8152 *tp, bool enable)
-{
- u8 u1u2[8];
-
- if (enable)
- memset(u1u2, 0xff, sizeof(u1u2));
- else
- memset(u1u2, 0x00, sizeof(u1u2));
-
- usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
-}
-
-static void r8153_u2p3en(struct r8152 *tp, bool enable)
-{
- u32 ocp_data;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
- if (enable)
- ocp_data |= U2P3_ENABLE;
- else
- ocp_data &= ~U2P3_ENABLE;
- ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
-}
-
-static void r8153_power_cut_en(struct r8152 *tp, bool enable)
-{
- u32 ocp_data;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
- if (enable)
- ocp_data |= PWR_EN | PHASE2_EN;
- else
- ocp_data &= ~(PWR_EN | PHASE2_EN);
- ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
- ocp_data &= ~PCUT_STATUS;
- ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
-}
-
static void r8153_first_init(struct r8152 *tp)
{
u32 ocp_data;
@@ -2700,7 +2727,7 @@ static void r8153_first_init(struct r8152 *tp)
/* rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
- ocp_data &= ~RX_AGG_DISABLE;
+ ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
}
@@ -2780,6 +2807,7 @@ static void rtl8153_disable(struct r8152 *tp)
r8153_disable_aldps(tp);
rtl_disable(tp);
r8153_enable_aldps(tp);
+ usb_enable_lpm(tp->udev);
}
static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
@@ -2900,9 +2928,13 @@ static void rtl8153_up(struct r8152 *tp)
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
+ r8153_u1u2en(tp, false);
r8153_disable_aldps(tp);
r8153_first_init(tp);
r8153_enable_aldps(tp);
+ r8153_u2p3en(tp, true);
+ r8153_u1u2en(tp, true);
+ usb_enable_lpm(tp->udev);
}
static void rtl8153_down(struct r8152 *tp)
@@ -2913,12 +2945,39 @@ static void rtl8153_down(struct r8152 *tp)
}
r8153_u1u2en(tp, false);
+ r8153_u2p3en(tp, false);
r8153_power_cut_en(tp, false);
r8153_disable_aldps(tp);
r8153_enter_oob(tp);
r8153_enable_aldps(tp);
}
+static bool rtl8152_in_nway(struct r8152 *tp)
+{
+ u16 nway_state;
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, 0x2000);
+ tp->ocp_base = 0x2000;
+ ocp_write_byte(tp, MCU_TYPE_PLA, 0xb014, 0x4c); /* phy state */
+ nway_state = ocp_read_word(tp, MCU_TYPE_PLA, 0xb01a);
+
+ /* bit 15: TXDIS_STATE, bit 14: ABD_STATE */
+ if (nway_state & 0xc000)
+ return false;
+ else
+ return true;
+}
+
+static bool rtl8153_in_nway(struct r8152 *tp)
+{
+ u16 phy_state = ocp_reg_read(tp, OCP_PHY_STATE) & 0xff;
+
+ if (phy_state == TXDIS_STATE || phy_state == ABD_STATE)
+ return false;
+ else
+ return true;
+}
+
static void set_carrier(struct r8152 *tp)
{
struct net_device *netdev = tp->netdev;
@@ -2931,8 +2990,10 @@ static void set_carrier(struct r8152 *tp)
if (!netif_carrier_ok(netdev)) {
tp->rtl_ops.enable(tp);
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+ napi_disable(&tp->napi);
netif_carrier_on(netdev);
rtl_start_rx(tp);
+ napi_enable(&tp->napi);
}
} else {
if (netif_carrier_ok(netdev)) {
@@ -3222,7 +3283,7 @@ static void r8152b_init(struct r8152 *tp)
/* enable rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
- ocp_data &= ~RX_AGG_DISABLE;
+ ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
}
@@ -3251,6 +3312,7 @@ static void r8153_init(struct r8152 *tp)
msleep(20);
}
+ usb_disable_lpm(tp->udev);
r8153_u2p3en(tp, false);
if (tp->version == RTL_VER_04) {
@@ -3273,6 +3335,13 @@ static void r8153_init(struct r8152 *tp)
else
ocp_data |= DYNAMIC_BURST;
ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data);
+ } else if (tp->version == RTL_VER_06) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1);
+ if (ocp_read_word(tp, MCU_TYPE_USB, USB_BURST_SIZE) == 0)
+ ocp_data &= ~DYNAMIC_BURST;
+ else
+ ocp_data |= DYNAMIC_BURST;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data);
}
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2);
@@ -3318,6 +3387,80 @@ static void r8153_init(struct r8152 *tp)
r8153_enable_aldps(tp);
r8152b_enable_fc(tp);
rtl_tally_reset(tp);
+ r8153_u2p3en(tp, true);
+}
+
+static int rtl8152_pre_reset(struct usb_interface *intf)
+{
+ struct r8152 *tp = usb_get_intfdata(intf);
+ struct net_device *netdev;
+
+ if (!tp)
+ return 0;
+
+ netdev = tp->netdev;
+ if (!netif_running(netdev))
+ return 0;
+
+ napi_disable(&tp->napi);
+ clear_bit(WORK_ENABLE, &tp->flags);
+ usb_kill_urb(tp->intr_urb);
+ cancel_delayed_work_sync(&tp->schedule);
+ if (netif_carrier_ok(netdev)) {
+ netif_stop_queue(netdev);
+ mutex_lock(&tp->control);
+ tp->rtl_ops.disable(tp);
+ mutex_unlock(&tp->control);
+ }
+
+ return 0;
+}
+
+static int rtl8152_post_reset(struct usb_interface *intf)
+{
+ struct r8152 *tp = usb_get_intfdata(intf);
+ struct net_device *netdev;
+
+ if (!tp)
+ return 0;
+
+ netdev = tp->netdev;
+ if (!netif_running(netdev))
+ return 0;
+
+ set_bit(WORK_ENABLE, &tp->flags);
+ if (netif_carrier_ok(netdev)) {
+ mutex_lock(&tp->control);
+ tp->rtl_ops.enable(tp);
+ rtl8152_set_rx_mode(netdev);
+ mutex_unlock(&tp->control);
+ netif_wake_queue(netdev);
+ }
+
+ napi_enable(&tp->napi);
+
+ return 0;
+}
+
+static bool delay_autosuspend(struct r8152 *tp)
+{
+ bool sw_linking = !!netif_carrier_ok(tp->netdev);
+ bool hw_linking = !!(rtl8152_get_speed(tp) & LINK_STATUS);
+
+ /* This means a linking change occurs and the driver doesn't detect it,
+ * yet. If the driver has disabled tx/rx and hw is linking on, the
+ * device wouldn't wake up by receiving any packet.
+ */
+ if (work_busy(&tp->schedule.work) || sw_linking != hw_linking)
+ return true;
+
+ /* If the linking down is occurred by nway, the device may miss the
+ * linking change event. And it wouldn't wake when linking on.
+ */
+ if (!sw_linking && tp->rtl_ops.in_nway(tp))
+ return true;
+ else
+ return false;
}
static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
@@ -3329,7 +3472,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
mutex_lock(&tp->control);
if (PMSG_IS_AUTO(message)) {
- if (netif_running(netdev) && work_busy(&tp->schedule.work)) {
+ if (netif_running(netdev) && delay_autosuspend(tp)) {
ret = -EBUSY;
goto out1;
}
@@ -3373,9 +3516,11 @@ static int rtl8152_resume(struct usb_interface *intf)
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
rtl_runtime_suspend_enable(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ napi_disable(&tp->napi);
set_bit(WORK_ENABLE, &tp->flags);
if (netif_carrier_ok(tp->netdev))
rtl_start_rx(tp);
+ napi_enable(&tp->napi);
} else {
tp->rtl_ops.up(tp);
rtl8152_set_speed(tp, AUTONEG_ENABLE,
@@ -3402,12 +3547,15 @@ static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (usb_autopm_get_interface(tp->intf) < 0)
return;
- mutex_lock(&tp->control);
-
- wol->supported = WAKE_ANY;
- wol->wolopts = __rtl_get_wol(tp);
-
- mutex_unlock(&tp->control);
+ if (!rtl_can_wakeup(tp)) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ } else {
+ mutex_lock(&tp->control);
+ wol->supported = WAKE_ANY;
+ wol->wolopts = __rtl_get_wol(tp);
+ mutex_unlock(&tp->control);
+ }
usb_autopm_put_interface(tp->intf);
}
@@ -3417,6 +3565,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct r8152 *tp = netdev_priv(dev);
int ret;
+ if (!rtl_can_wakeup(tp))
+ return -EOPNOTSUPP;
+
ret = usb_autopm_get_interface(tp->intf);
if (ret < 0)
goto out_set_wol;
@@ -3907,6 +4058,10 @@ static void r8152b_get_version(struct r8152 *tp)
tp->version = RTL_VER_05;
tp->mii.supports_gmii = 1;
break;
+ case 0x5c30:
+ tp->version = RTL_VER_06;
+ tp->mii.supports_gmii = 1;
+ break;
default:
netif_info(tp, probe, tp->netdev,
"Unknown version 0x%04x\n", version);
@@ -3947,11 +4102,13 @@ static int rtl_ops_init(struct r8152 *tp)
ops->unload = rtl8152_unload;
ops->eee_get = r8152_get_eee;
ops->eee_set = r8152_set_eee;
+ ops->in_nway = rtl8152_in_nway;
break;
case RTL_VER_03:
case RTL_VER_04:
case RTL_VER_05:
+ case RTL_VER_06:
ops->init = r8153_init;
ops->enable = rtl8153_enable;
ops->disable = rtl8153_disable;
@@ -3960,6 +4117,7 @@ static int rtl_ops_init(struct r8152 *tp)
ops->unload = rtl8153_unload;
ops->eee_get = r8153_get_eee;
ops->eee_set = r8153_set_eee;
+ ops->in_nway = rtl8153_in_nway;
break;
default:
@@ -4058,6 +4216,9 @@ static int rtl8152_probe(struct usb_interface *intf,
goto out1;
}
+ if (!rtl_can_wakeup(tp))
+ __rtl_set_wol(tp, 0);
+
tp->saved_wolopts = __rtl_get_wol(tp);
if (tp->saved_wolopts)
device_set_wakeup_enable(&udev->dev, true);
@@ -4117,6 +4278,7 @@ static struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
{}
};
@@ -4130,6 +4292,8 @@ static struct usb_driver rtl8152_driver = {
.suspend = rtl8152_suspend,
.resume = rtl8152_resume,
.reset_resume = rtl8152_resume,
+ .pre_reset = rtl8152_pre_reset,
+ .post_reset = rtl8152_post_reset,
.supports_autosuspend = 1,
.disable_hub_initiated_lpm = 1,
};
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 3c86b107275a..b4cf10781348 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -428,12 +428,18 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
old_state = entry->state;
entry->state = state;
__skb_unlink(skb, list);
- spin_unlock(&list->lock);
- spin_lock(&dev->done.lock);
+
+ /* defer_bh() is never called with list == &dev->done.
+ * spin_lock_nested() tells lockdep that it is OK to take
+ * dev->done.lock here with list->lock held.
+ */
+ spin_lock_nested(&dev->done.lock, SINGLE_DEPTH_NESTING);
+
__skb_queue_tail(&dev->done, skb);
if (dev->done.qlen == 1)
tasklet_schedule(&dev->bh);
- spin_unlock_irqrestore(&dev->done.lock, flags);
+ spin_unlock(&dev->done.lock);
+ spin_unlock_irqrestore(&list->lock, flags);
return old_state;
}
@@ -749,6 +755,20 @@ EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
/*-------------------------------------------------------------------------*/
+static void wait_skb_queue_empty(struct sk_buff_head *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->lock, flags);
+ while (!skb_queue_empty(q)) {
+ spin_unlock_irqrestore(&q->lock, flags);
+ schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_lock_irqsave(&q->lock, flags);
+ }
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
// precondition: never called in_interrupt
static void usbnet_terminate_urbs(struct usbnet *dev)
{
@@ -762,14 +782,11 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
unlink_urbs(dev, &dev->rxq);
/* maybe wait for deletions to finish. */
- while (!skb_queue_empty(&dev->rxq)
- && !skb_queue_empty(&dev->txq)
- && !skb_queue_empty(&dev->done)) {
- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
- set_current_state(TASK_UNINTERRUPTIBLE);
- netif_dbg(dev, ifdown, dev->net,
- "waited for %d urb completions\n", temp);
- }
+ wait_skb_queue_empty(&dev->rxq);
+ wait_skb_queue_empty(&dev->txq);
+ wait_skb_queue_empty(&dev->done);
+ netif_dbg(dev, ifdown, dev->net,
+ "waited for %d urb completions\n", temp);
set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->wait, &wait);
}
@@ -778,7 +795,7 @@ int usbnet_stop (struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
struct driver_info *info = dev->driver_info;
- int retval, pm;
+ int retval, pm, mpn;
clear_bit(EVENT_DEV_OPEN, &dev->flags);
netif_stop_queue (net);
@@ -809,6 +826,8 @@ int usbnet_stop (struct net_device *net)
usbnet_purge_paused_rxq(dev);
+ mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
+
/* deferred work (task, timer, softirq) must also stop.
* can't flush_scheduled_work() until we drop rtnl (later),
* else workers could deadlock; so make workers a NOP.
@@ -819,8 +838,7 @@ int usbnet_stop (struct net_device *net)
if (!pm)
usb_autopm_put_interface(dev->intf);
- if (info->manage_power &&
- !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
+ if (info->manage_power && mpn)
info->manage_power(dev, 0);
else
usb_autopm_put_interface(dev->intf);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index c8186ffda1a3..0ef4a5ad5557 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -290,6 +290,7 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_poll_controller = veth_poll_controller,
#endif
.ndo_get_iflink = veth_get_iflink,
+ .ndo_features_check = passthru_features_check,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
@@ -305,6 +306,7 @@ static void veth_setup(struct net_device *dev)
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->netdev_ops = &veth_netdev_ops;
dev->ethtool_ops = &veth_ethtool_ops;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 63c7810e1545..d8838dedb7a4 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -40,12 +40,12 @@ module_param(gso, bool, 0444);
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
#define GOOD_COPY_LEN 128
-/* Weight used for the RX packet size EWMA. The average packet size is used to
- * determine the packet buffer size when refilling RX rings. As the entire RX
- * ring may be refilled at once, the weight is chosen so that the EWMA will be
- * insensitive to short-term, transient changes in packet size.
+/* RX packet size EWMA. The average packet size is used to determine the packet
+ * buffer size when refilling RX rings. As the entire RX ring may be refilled
+ * at once, the weight is chosen so that the EWMA will be insensitive to short-
+ * term, transient changes in packet size.
*/
-#define RECEIVE_AVG_WEIGHT 64
+DECLARE_EWMA(pkt_len, 1, 64)
/* Minimum alignment for mergeable packet buffers. */
#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
@@ -85,7 +85,7 @@ struct receive_queue {
struct page *pages;
/* Average packet length for mergeable receive buffers. */
- struct ewma mrg_avg_pkt_len;
+ struct ewma_pkt_len mrg_avg_pkt_len;
/* Page frag for packet buffer allocation. */
struct page_frag alloc_frag;
@@ -407,7 +407,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
}
- ewma_add(&rq->mrg_avg_pkt_len, head_skb->len);
+ ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
return head_skb;
err_skb:
@@ -518,7 +518,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
skb_mark_napi_id(skb, &rq->napi);
- netif_receive_skb(skb);
+ napi_gro_receive(&rq->napi, skb);
return;
frame_err:
@@ -540,7 +540,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
skb_put(skb, GOOD_PACKET_LEN);
hdr = skb_vnet_hdr(skb);
- sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
+ sg_init_table(rq->sg, 2);
sg_set_buf(rq->sg, hdr, vi->hdr_len);
skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
@@ -600,12 +600,12 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
return err;
}
-static unsigned int get_mergeable_buf_len(struct ewma *avg_pkt_len)
+static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len)
{
const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
unsigned int len;
- len = hdr_len + clamp_t(unsigned int, ewma_read(avg_pkt_len),
+ len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
GOOD_PACKET_LEN, PAGE_SIZE - hdr_len);
return ALIGN(len, MERGEABLE_BUFFER_ALIGN);
}
@@ -756,7 +756,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
/* Out of packets? */
if (received < budget) {
r = virtqueue_enable_cb_prepare(rq->vq);
- napi_complete(napi);
+ napi_complete_done(napi, received);
if (unlikely(virtqueue_poll(rq->vq, r)) &&
napi_schedule_prep(napi)) {
virtqueue_disable_cb(rq->vq);
@@ -893,7 +893,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
if (vi->mergeable_rx_bufs)
hdr->num_buffers = 0;
- sg_init_table(sq->sg, MAX_SKB_FRAGS + 2);
+ sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
if (can_push) {
__skb_push(skb, hdr_len);
num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
@@ -1615,7 +1615,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
napi_hash_add(&vi->rq[i].napi);
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
- ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
+ ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
}
@@ -1658,7 +1658,7 @@ static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
{
struct virtnet_info *vi = netdev_priv(queue->dev);
unsigned int queue_index = get_netdev_rx_queue_index(queue);
- struct ewma *avg;
+ struct ewma_pkt_len *avg;
BUG_ON(queue_index >= vi->max_queue_pairs);
avg = &vi->rq[queue_index].mrg_avg_pkt_len;
@@ -1756,9 +1756,9 @@ static int virtnet_probe(struct virtio_device *vdev)
/* Do we support "hardware" checksums? */
if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
/* This opens up the world of extra features. */
- dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+ dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
if (csum)
- dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+ dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
@@ -1828,7 +1828,8 @@ static int virtnet_probe(struct virtio_device *vdev)
else
vi->hdr_len = sizeof(struct virtio_net_hdr);
- if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
+ if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
+ virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
vi->any_header_sg = true;
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index da11bb5e9c7f..46f4caddccbe 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1216,7 +1216,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
static const u32 rxprod_reg[2] = {
VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
};
- u32 num_rxd = 0;
+ u32 num_pkts = 0;
bool skip_page_frags = false;
struct Vmxnet3_RxCompDesc *rcd;
struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
@@ -1235,13 +1235,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
struct Vmxnet3_RxDesc *rxd;
u32 idx, ring_idx;
struct vmxnet3_cmd_ring *ring = NULL;
- if (num_rxd >= quota) {
+ if (num_pkts >= quota) {
/* we may stop even before we see the EOP desc of
* the current pkt
*/
break;
}
- num_rxd++;
BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
idx = rcd->rxdIdx;
ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
@@ -1413,6 +1412,7 @@ not_lro:
napi_gro_receive(&rq->napi, skb);
ctx->skb = NULL;
+ num_pkts++;
}
rcd_done:
@@ -1443,7 +1443,7 @@ rcd_done:
&rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
}
- return num_rxd;
+ return num_pkts;
}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
new file mode 100644
index 000000000000..e7094fbd7568
--- /dev/null
+++ b/drivers/net/vrf.c
@@ -0,0 +1,710 @@
+/*
+ * vrf.c: device driver to encapsulate a VRF space
+ *
+ * Copyright (c) 2015 Cumulus Networks. All rights reserved.
+ * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
+ * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
+ *
+ * Based on dummy, team and ipvlan drivers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/netfilter.h>
+#include <linux/rtnetlink.h>
+#include <net/rtnetlink.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/hashtable.h>
+
+#include <linux/inetdevice.h>
+#include <net/arp.h>
+#include <net/ip.h>
+#include <net/ip_fib.h>
+#include <net/ip6_route.h>
+#include <net/rtnetlink.h>
+#include <net/route.h>
+#include <net/addrconf.h>
+#include <net/vrf.h>
+
+#define DRV_NAME "vrf"
+#define DRV_VERSION "1.0"
+
+#define vrf_is_slave(dev) ((dev)->flags & IFF_SLAVE)
+
+#define vrf_master_get_rcu(dev) \
+ ((struct net_device *)rcu_dereference(dev->rx_handler_data))
+
+struct pcpu_dstats {
+ u64 tx_pkts;
+ u64 tx_bytes;
+ u64 tx_drps;
+ u64 rx_pkts;
+ u64 rx_bytes;
+ struct u64_stats_sync syncp;
+};
+
+static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
+{
+ return dst;
+}
+
+static int vrf_ip_local_out(struct sk_buff *skb)
+{
+ return ip_local_out(skb);
+}
+
+static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
+{
+ /* TO-DO: return max ethernet size? */
+ return dst->dev->mtu;
+}
+
+static void vrf_dst_destroy(struct dst_entry *dst)
+{
+ /* our dst lives forever - or until the device is closed */
+}
+
+static unsigned int vrf_default_advmss(const struct dst_entry *dst)
+{
+ return 65535 - 40;
+}
+
+static struct dst_ops vrf_dst_ops = {
+ .family = AF_INET,
+ .local_out = vrf_ip_local_out,
+ .check = vrf_ip_check,
+ .mtu = vrf_v4_mtu,
+ .destroy = vrf_dst_destroy,
+ .default_advmss = vrf_default_advmss,
+};
+
+static bool is_ip_rx_frame(struct sk_buff *skb)
+{
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPV6):
+ return true;
+ }
+ return false;
+}
+
+static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
+{
+ vrf_dev->stats.tx_errors++;
+ kfree_skb(skb);
+}
+
+/* note: already called with rcu_read_lock */
+static rx_handler_result_t vrf_handle_frame(struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+
+ if (is_ip_rx_frame(skb)) {
+ struct net_device *dev = vrf_master_get_rcu(skb->dev);
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ dstats->rx_pkts++;
+ dstats->rx_bytes += skb->len;
+ u64_stats_update_end(&dstats->syncp);
+
+ skb->dev = dev;
+
+ return RX_HANDLER_ANOTHER;
+ }
+ return RX_HANDLER_PASS;
+}
+
+static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_dstats *dstats;
+ u64 tbytes, tpkts, tdrops, rbytes, rpkts;
+ unsigned int start;
+
+ dstats = per_cpu_ptr(dev->dstats, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&dstats->syncp);
+ tbytes = dstats->tx_bytes;
+ tpkts = dstats->tx_pkts;
+ tdrops = dstats->tx_drps;
+ rbytes = dstats->rx_bytes;
+ rpkts = dstats->rx_pkts;
+ } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
+ stats->tx_bytes += tbytes;
+ stats->tx_packets += tpkts;
+ stats->tx_dropped += tdrops;
+ stats->rx_bytes += rbytes;
+ stats->rx_packets += rpkts;
+ }
+ return stats;
+}
+
+static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ vrf_tx_error(dev, skb);
+ return NET_XMIT_DROP;
+}
+
+static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4,
+ struct net_device *vrf_dev)
+{
+ struct rtable *rt;
+ int err = 1;
+
+ rt = ip_route_output_flow(dev_net(vrf_dev), fl4, NULL);
+ if (IS_ERR(rt))
+ goto out;
+
+ /* TO-DO: what about broadcast ? */
+ if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
+ ip_rt_put(rt);
+ goto out;
+ }
+
+ skb_dst_drop(skb);
+ skb_dst_set(skb, &rt->dst);
+ err = 0;
+out:
+ return err;
+}
+
+static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
+ struct net_device *vrf_dev)
+{
+ struct iphdr *ip4h = ip_hdr(skb);
+ int ret = NET_XMIT_DROP;
+ struct flowi4 fl4 = {
+ /* needed to match OIF rule */
+ .flowi4_oif = vrf_dev->ifindex,
+ .flowi4_iif = LOOPBACK_IFINDEX,
+ .flowi4_tos = RT_TOS(ip4h->tos),
+ .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC,
+ .daddr = ip4h->daddr,
+ };
+
+ if (vrf_send_v4_prep(skb, &fl4, vrf_dev))
+ goto err;
+
+ if (!ip4h->saddr) {
+ ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
+ RT_SCOPE_LINK);
+ }
+
+ ret = ip_local_out(skb);
+ if (unlikely(net_xmit_eval(ret)))
+ vrf_dev->stats.tx_errors++;
+ else
+ ret = NET_XMIT_SUCCESS;
+
+out:
+ return ret;
+err:
+ vrf_tx_error(vrf_dev, skb);
+ goto out;
+}
+
+static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
+{
+ /* strip the ethernet header added for pass through VRF device */
+ __skb_pull(skb, skb_network_offset(skb));
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ return vrf_process_v4_outbound(skb, dev);
+ case htons(ETH_P_IPV6):
+ return vrf_process_v6_outbound(skb, dev);
+ default:
+ vrf_tx_error(dev, skb);
+ return NET_XMIT_DROP;
+ }
+}
+
+static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ netdev_tx_t ret = is_ip_tx_frame(skb, dev);
+
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ dstats->tx_pkts++;
+ dstats->tx_bytes += skb->len;
+ u64_stats_update_end(&dstats->syncp);
+ } else {
+ this_cpu_inc(dev->dstats->tx_drps);
+ }
+
+ return ret;
+}
+
+/* modelled after ip_finish_output2 */
+static int vrf_finish_output(struct sock *sk, struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ struct rtable *rt = (struct rtable *)dst;
+ struct net_device *dev = dst->dev;
+ unsigned int hh_len = LL_RESERVED_SPACE(dev);
+ struct neighbour *neigh;
+ u32 nexthop;
+ int ret = -EINVAL;
+
+ /* Be paranoid, rather than too clever. */
+ if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
+ struct sk_buff *skb2;
+
+ skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
+ if (!skb2) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ if (skb->sk)
+ skb_set_owner_w(skb2, skb->sk);
+
+ consume_skb(skb);
+ skb = skb2;
+ }
+
+ rcu_read_lock_bh();
+
+ nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
+ neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
+ if (unlikely(!neigh))
+ neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
+ if (!IS_ERR(neigh))
+ ret = dst_neigh_output(dst, neigh, skb);
+
+ rcu_read_unlock_bh();
+err:
+ if (unlikely(ret < 0))
+ vrf_tx_error(skb->dev, skb);
+ return ret;
+}
+
+static int vrf_output(struct sock *sk, struct sk_buff *skb)
+{
+ struct net_device *dev = skb_dst(skb)->dev;
+
+ IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
+
+ skb->dev = dev;
+ skb->protocol = htons(ETH_P_IP);
+
+ return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb,
+ NULL, dev,
+ vrf_finish_output,
+ !(IPCB(skb)->flags & IPSKB_REROUTED));
+}
+
+static void vrf_rtable_destroy(struct net_vrf *vrf)
+{
+ struct dst_entry *dst = (struct dst_entry *)vrf->rth;
+
+ dst_destroy(dst);
+ vrf->rth = NULL;
+}
+
+static struct rtable *vrf_rtable_create(struct net_device *dev)
+{
+ struct rtable *rth;
+
+ rth = dst_alloc(&vrf_dst_ops, dev, 2,
+ DST_OBSOLETE_NONE,
+ (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
+ if (rth) {
+ rth->dst.output = vrf_output;
+ rth->rt_genid = rt_genid_ipv4(dev_net(dev));
+ rth->rt_flags = 0;
+ rth->rt_type = RTN_UNICAST;
+ rth->rt_is_input = 0;
+ rth->rt_iif = 0;
+ rth->rt_pmtu = 0;
+ rth->rt_gateway = 0;
+ rth->rt_uses_gateway = 0;
+ INIT_LIST_HEAD(&rth->rt_uncached);
+ rth->rt_uncached_list = NULL;
+ }
+
+ return rth;
+}
+
+/**************************** device handling ********************/
+
+/* cycle interface to flush neighbor cache and move routes across tables */
+static void cycle_netdev(struct net_device *dev)
+{
+ unsigned int flags = dev->flags;
+ int ret;
+
+ if (!netif_running(dev))
+ return;
+
+ ret = dev_change_flags(dev, flags & ~IFF_UP);
+ if (ret >= 0)
+ ret = dev_change_flags(dev, flags);
+
+ if (ret < 0) {
+ netdev_err(dev,
+ "Failed to cycle device %s; route tables might be wrong!\n",
+ dev->name);
+ }
+}
+
+static struct slave *__vrf_find_slave_dev(struct slave_queue *queue,
+ struct net_device *dev)
+{
+ struct list_head *head = &queue->all_slaves;
+ struct slave *slave;
+
+ list_for_each_entry(slave, head, list) {
+ if (slave->dev == dev)
+ return slave;
+ }
+
+ return NULL;
+}
+
+/* inverse of __vrf_insert_slave */
+static void __vrf_remove_slave(struct slave_queue *queue, struct slave *slave)
+{
+ list_del(&slave->list);
+}
+
+static void __vrf_insert_slave(struct slave_queue *queue, struct slave *slave)
+{
+ list_add(&slave->list, &queue->all_slaves);
+}
+
+static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
+{
+ struct net_vrf_dev *vrf_ptr = kmalloc(sizeof(*vrf_ptr), GFP_KERNEL);
+ struct slave *slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+ struct net_vrf *vrf = netdev_priv(dev);
+ struct slave_queue *queue = &vrf->queue;
+ int ret = -ENOMEM;
+
+ if (!slave || !vrf_ptr)
+ goto out_fail;
+
+ slave->dev = port_dev;
+ vrf_ptr->ifindex = dev->ifindex;
+ vrf_ptr->tb_id = vrf->tb_id;
+
+ /* register the packet handler for slave ports */
+ ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev);
+ if (ret) {
+ netdev_err(port_dev,
+ "Device %s failed to register rx_handler\n",
+ port_dev->name);
+ goto out_fail;
+ }
+
+ ret = netdev_master_upper_dev_link(port_dev, dev);
+ if (ret < 0)
+ goto out_unregister;
+
+ port_dev->flags |= IFF_SLAVE;
+ __vrf_insert_slave(queue, slave);
+ rcu_assign_pointer(port_dev->vrf_ptr, vrf_ptr);
+ cycle_netdev(port_dev);
+
+ return 0;
+
+out_unregister:
+ netdev_rx_handler_unregister(port_dev);
+out_fail:
+ kfree(vrf_ptr);
+ kfree(slave);
+ return ret;
+}
+
+static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
+{
+ if (netif_is_vrf(port_dev) || vrf_is_slave(port_dev))
+ return -EINVAL;
+
+ return do_vrf_add_slave(dev, port_dev);
+}
+
+/* inverse of do_vrf_add_slave */
+static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
+{
+ struct net_vrf_dev *vrf_ptr = rtnl_dereference(port_dev->vrf_ptr);
+ struct net_vrf *vrf = netdev_priv(dev);
+ struct slave_queue *queue = &vrf->queue;
+ struct slave *slave;
+
+ RCU_INIT_POINTER(port_dev->vrf_ptr, NULL);
+
+ netdev_upper_dev_unlink(port_dev, dev);
+ port_dev->flags &= ~IFF_SLAVE;
+
+ netdev_rx_handler_unregister(port_dev);
+
+ /* after netdev_rx_handler_unregister for synchronize_rcu */
+ kfree(vrf_ptr);
+
+ cycle_netdev(port_dev);
+
+ slave = __vrf_find_slave_dev(queue, port_dev);
+ if (slave)
+ __vrf_remove_slave(queue, slave);
+
+ kfree(slave);
+
+ return 0;
+}
+
+static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
+{
+ return do_vrf_del_slave(dev, port_dev);
+}
+
+static void vrf_dev_uninit(struct net_device *dev)
+{
+ struct net_vrf *vrf = netdev_priv(dev);
+ struct slave_queue *queue = &vrf->queue;
+ struct list_head *head = &queue->all_slaves;
+ struct slave *slave, *next;
+
+ vrf_rtable_destroy(vrf);
+
+ list_for_each_entry_safe(slave, next, head, list)
+ vrf_del_slave(dev, slave->dev);
+
+ free_percpu(dev->dstats);
+ dev->dstats = NULL;
+}
+
+static int vrf_dev_init(struct net_device *dev)
+{
+ struct net_vrf *vrf = netdev_priv(dev);
+
+ INIT_LIST_HEAD(&vrf->queue.all_slaves);
+
+ dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
+ if (!dev->dstats)
+ goto out_nomem;
+
+ /* create the default dst which points back to us */
+ vrf->rth = vrf_rtable_create(dev);
+ if (!vrf->rth)
+ goto out_stats;
+
+ dev->flags = IFF_MASTER | IFF_NOARP;
+
+ return 0;
+
+out_stats:
+ free_percpu(dev->dstats);
+ dev->dstats = NULL;
+out_nomem:
+ return -ENOMEM;
+}
+
+static const struct net_device_ops vrf_netdev_ops = {
+ .ndo_init = vrf_dev_init,
+ .ndo_uninit = vrf_dev_uninit,
+ .ndo_start_xmit = vrf_xmit,
+ .ndo_get_stats64 = vrf_get_stats64,
+ .ndo_add_slave = vrf_add_slave,
+ .ndo_del_slave = vrf_del_slave,
+};
+
+static void vrf_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+}
+
+static const struct ethtool_ops vrf_ethtool_ops = {
+ .get_drvinfo = vrf_get_drvinfo,
+};
+
+static void vrf_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+
+ /* Initialize the device structure. */
+ dev->netdev_ops = &vrf_netdev_ops;
+ dev->ethtool_ops = &vrf_ethtool_ops;
+ dev->destructor = free_netdev;
+
+ /* Fill in device structure with ethernet-generic values. */
+ eth_hw_addr_random(dev);
+
+ /* don't acquire vrf device's netif_tx_lock when transmitting */
+ dev->features |= NETIF_F_LLTX;
+
+ /* don't allow vrf devices to change network namespaces. */
+ dev->features |= NETIF_F_NETNS_LOCAL;
+}
+
+static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+ return -EINVAL;
+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+ return -EADDRNOTAVAIL;
+ }
+ return 0;
+}
+
+static void vrf_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr);
+
+ RCU_INIT_POINTER(dev->vrf_ptr, NULL);
+ kfree_rcu(vrf_ptr, rcu);
+ unregister_netdevice_queue(dev, head);
+}
+
+static int vrf_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct net_vrf *vrf = netdev_priv(dev);
+ struct net_vrf_dev *vrf_ptr;
+ int err;
+
+ if (!data || !data[IFLA_VRF_TABLE])
+ return -EINVAL;
+
+ vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
+
+ dev->priv_flags |= IFF_VRF_MASTER;
+
+ err = -ENOMEM;
+ vrf_ptr = kmalloc(sizeof(*dev->vrf_ptr), GFP_KERNEL);
+ if (!vrf_ptr)
+ goto out_fail;
+
+ vrf_ptr->ifindex = dev->ifindex;
+ vrf_ptr->tb_id = vrf->tb_id;
+
+ err = register_netdevice(dev);
+ if (err < 0)
+ goto out_fail;
+
+ rcu_assign_pointer(dev->vrf_ptr, vrf_ptr);
+
+ return 0;
+
+out_fail:
+ kfree(vrf_ptr);
+ free_netdev(dev);
+ return err;
+}
+
+static size_t vrf_nl_getsize(const struct net_device *dev)
+{
+ return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */
+}
+
+static int vrf_fillinfo(struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ struct net_vrf *vrf = netdev_priv(dev);
+
+ return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
+}
+
+static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
+ [IFLA_VRF_TABLE] = { .type = NLA_U32 },
+};
+
+static struct rtnl_link_ops vrf_link_ops __read_mostly = {
+ .kind = DRV_NAME,
+ .priv_size = sizeof(struct net_vrf),
+
+ .get_size = vrf_nl_getsize,
+ .policy = vrf_nl_policy,
+ .validate = vrf_validate,
+ .fill_info = vrf_fillinfo,
+
+ .newlink = vrf_newlink,
+ .dellink = vrf_dellink,
+ .setup = vrf_setup,
+ .maxtype = IFLA_VRF_MAX,
+};
+
+static int vrf_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ /* only care about unregister events to drop slave references */
+ if (event == NETDEV_UNREGISTER) {
+ struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr);
+ struct net_device *vrf_dev;
+
+ if (!vrf_ptr || netif_is_vrf(dev))
+ goto out;
+
+ vrf_dev = netdev_master_upper_dev_get(dev);
+ vrf_del_slave(vrf_dev, dev);
+ }
+out:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block vrf_notifier_block __read_mostly = {
+ .notifier_call = vrf_device_event,
+};
+
+static int __init vrf_init_module(void)
+{
+ int rc;
+
+ vrf_dst_ops.kmem_cachep =
+ kmem_cache_create("vrf_ip_dst_cache",
+ sizeof(struct rtable), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+
+ if (!vrf_dst_ops.kmem_cachep)
+ return -ENOMEM;
+
+ register_netdevice_notifier(&vrf_notifier_block);
+
+ rc = rtnl_link_register(&vrf_link_ops);
+ if (rc < 0)
+ goto error;
+
+ return 0;
+
+error:
+ unregister_netdevice_notifier(&vrf_notifier_block);
+ kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
+ return rc;
+}
+
+static void __exit vrf_cleanup_module(void)
+{
+ rtnl_link_unregister(&vrf_link_ops);
+ unregister_netdevice_notifier(&vrf_notifier_block);
+ kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
+}
+
+module_init(vrf_init_module);
+module_exit(vrf_cleanup_module);
+MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
+MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK(DRV_NAME);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 34c519eb1db5..cf8b7f0473b3 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -49,15 +49,12 @@
#include <net/ip6_tunnel.h>
#include <net/ip6_checksum.h>
#endif
+#include <net/dst_metadata.h>
#define VXLAN_VERSION "0.1"
#define PORT_HASH_BITS 8
#define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
-#define VNI_HASH_BITS 10
-#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
-#define FDB_HASH_BITS 8
-#define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
#define FDB_AGE_DEFAULT 300 /* 5 min */
#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
@@ -74,9 +71,13 @@ module_param(log_ecn_error, bool, 0644);
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
static int vxlan_net_id;
+static struct rtnl_link_ops vxlan_link_ops;
static const u8 all_zeros_mac[ETH_ALEN];
+static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+ bool no_share, u32 flags);
+
/* per-network namespace private data for this module */
struct vxlan_net {
struct list_head vxlan_list;
@@ -84,21 +85,6 @@ struct vxlan_net {
spinlock_t sock_lock;
};
-union vxlan_addr {
- struct sockaddr_in sin;
- struct sockaddr_in6 sin6;
- struct sockaddr sa;
-};
-
-struct vxlan_rdst {
- union vxlan_addr remote_ip;
- __be16 remote_port;
- u32 remote_vni;
- u32 remote_ifindex;
- struct list_head list;
- struct rcu_head rcu;
-};
-
/* Forwarding table entry */
struct vxlan_fdb {
struct hlist_node hlist; /* linked list of entries */
@@ -106,40 +92,21 @@ struct vxlan_fdb {
unsigned long updated; /* jiffies */
unsigned long used;
struct list_head remotes;
+ u8 eth_addr[ETH_ALEN];
u16 state; /* see ndm_state */
u8 flags; /* see ndm_flags */
- u8 eth_addr[ETH_ALEN];
-};
-
-/* Pseudo network device */
-struct vxlan_dev {
- struct hlist_node hlist; /* vni hash table */
- struct list_head next; /* vxlan's per namespace list */
- struct vxlan_sock *vn_sock; /* listening socket */
- struct net_device *dev;
- struct net *net; /* netns for packet i/o */
- struct vxlan_rdst default_dst; /* default destination */
- union vxlan_addr saddr; /* source address */
- __be16 dst_port;
- __u16 port_min; /* source port range */
- __u16 port_max;
- __u8 tos; /* TOS override */
- __u8 ttl;
- u32 flags; /* VXLAN_F_* in vxlan.h */
-
- unsigned long age_interval;
- struct timer_list age_timer;
- spinlock_t hash_lock;
- unsigned int addrcnt;
- unsigned int addrmax;
-
- struct hlist_head fdb_head[FDB_HASH_SIZE];
};
/* salt for hash table */
static u32 vxlan_salt __read_mostly;
static struct workqueue_struct *vxlan_wq;
+static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
+{
+ return vs->flags & VXLAN_F_COLLECT_METADATA ||
+ ip_tunnel_collect_metadata();
+}
+
#if IS_ENABLED(CONFIG_IPV6)
static inline
bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
@@ -269,7 +236,7 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
if (inet_sk(vs->sock->sk)->inet_sport == port &&
- inet_sk(vs->sock->sk)->sk.sk_family == family &&
+ vxlan_get_sk_family(vs) == family &&
vs->flags == flags)
return vs;
}
@@ -345,7 +312,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
goto nla_put_failure;
- if (rdst->remote_port && rdst->remote_port != vxlan->dst_port &&
+ if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port &&
nla_put_be16(skb, NDA_PORT, rdst->remote_port))
goto nla_put_failure;
if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
@@ -552,10 +519,10 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
u32 data, struct gro_remcsum *grc,
bool nopartial)
{
- size_t start, offset, plen;
+ size_t start, offset;
if (skb->remcsum_offload)
- return NULL;
+ return vh;
if (!NAPI_GRO_CB(skb)->csum_valid)
return NULL;
@@ -565,17 +532,8 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
offsetof(struct udphdr, check) :
offsetof(struct tcphdr, check));
- plen = hdrlen + offset + sizeof(u16);
-
- /* Pull checksum that will be written */
- if (skb_gro_header_hard(skb, off + plen)) {
- vh = skb_gro_header_slow(skb, off + plen, off);
- if (!vh)
- return NULL;
- }
-
- skb_gro_remcsum_process(skb, (void *)vh + hdrlen,
- start, offset, grc, nopartial);
+ vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
+ start, offset, grc, nopartial);
skb->remcsum_offload = 1;
@@ -606,7 +564,6 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
goto out;
}
- skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
flags = ntohl(vh->vx_flags);
@@ -621,6 +578,8 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
goto out;
}
+ skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
+
flush = 0;
for (p = *head; p; p = p->next) {
@@ -658,7 +617,7 @@ static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
struct net_device *dev;
struct sock *sk = vs->sock->sk;
struct net *net = sock_net(sk);
- sa_family_t sa_family = sk->sk_family;
+ sa_family_t sa_family = vxlan_get_sk_family(vs);
__be16 port = inet_sk(sk)->inet_sport;
int err;
@@ -683,7 +642,7 @@ static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
struct net_device *dev;
struct sock *sk = vs->sock->sk;
struct net *net = sock_net(sk);
- sa_family_t sa_family = sk->sk_family;
+ sa_family_t sa_family = vxlan_get_sk_family(vs);
__be16 port = inet_sk(sk)->inet_sport;
rcu_read_lock();
@@ -749,7 +708,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
if (!(flags & NLM_F_CREATE))
return -ENOENT;
- if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
+ if (vxlan->cfg.addrmax &&
+ vxlan->addrcnt >= vxlan->cfg.addrmax)
return -ENOSPC;
/* Disallow replace to add a multicast entry */
@@ -835,7 +795,7 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
return -EINVAL;
*port = nla_get_be16(tb[NDA_PORT]);
} else {
- *port = vxlan->dst_port;
+ *port = vxlan->cfg.dst_port;
}
if (tb[NDA_VNI]) {
@@ -963,10 +923,10 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
struct vxlan_rdst *rd;
- if (idx < cb->args[0])
- goto skip;
-
list_for_each_entry_rcu(rd, &f->remotes, list) {
+ if (idx < cb->args[0])
+ goto skip;
+
err = vxlan_fdb_info(skb, vxlan, f,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
@@ -974,9 +934,9 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
NLM_F_MULTI, rd);
if (err < 0)
goto out;
- }
skip:
- ++idx;
+ ++idx;
+ }
}
}
out:
@@ -1021,7 +981,7 @@ static bool vxlan_snoop(struct net_device *dev,
vxlan_fdb_create(vxlan, src_mac, src_ip,
NUD_REACHABLE,
NLM_F_EXCL|NLM_F_CREATE,
- vxlan->dst_port,
+ vxlan->cfg.dst_port,
vxlan->default_dst.remote_vni,
0, NTF_SELF);
spin_unlock(&vxlan->hash_lock);
@@ -1062,7 +1022,7 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
return false;
}
-void vxlan_sock_release(struct vxlan_sock *vs)
+static void vxlan_sock_release(struct vxlan_sock *vs)
{
struct sock *sk = vs->sock->sk;
struct net *net = sock_net(sk);
@@ -1078,7 +1038,6 @@ void vxlan_sock_release(struct vxlan_sock *vs)
queue_work(vxlan_wq, &vs->del_work);
}
-EXPORT_SYMBOL_GPL(vxlan_sock_release);
/* Update multicast group membership when first VNI on
* multicast address is brought up
@@ -1143,6 +1102,9 @@ static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
{
size_t start, offset, plen;
+ if (skb->remcsum_offload)
+ return vh;
+
start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
offset = start + ((data & VXLAN_RCO_UDP) ?
offsetof(struct udphdr, check) :
@@ -1161,13 +1123,111 @@ static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
return vh;
}
+static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
+ struct vxlan_metadata *md, u32 vni,
+ struct metadata_dst *tun_dst)
+{
+ struct iphdr *oip = NULL;
+ struct ipv6hdr *oip6 = NULL;
+ struct vxlan_dev *vxlan;
+ struct pcpu_sw_netstats *stats;
+ union vxlan_addr saddr;
+ int err = 0;
+ union vxlan_addr *remote_ip;
+
+ /* For flow based devices, map all packets to VNI 0 */
+ if (vs->flags & VXLAN_F_COLLECT_METADATA)
+ vni = 0;
+
+ /* Is this VNI defined? */
+ vxlan = vxlan_vs_find_vni(vs, vni);
+ if (!vxlan)
+ goto drop;
+
+ remote_ip = &vxlan->default_dst.remote_ip;
+ skb_reset_mac_header(skb);
+ skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
+ skb->protocol = eth_type_trans(skb, vxlan->dev);
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+
+ /* Ignore packet loops (and multicast echo) */
+ if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
+ goto drop;
+
+ /* Re-examine inner Ethernet packet */
+ if (remote_ip->sa.sa_family == AF_INET) {
+ oip = ip_hdr(skb);
+ saddr.sin.sin_addr.s_addr = oip->saddr;
+ saddr.sa.sa_family = AF_INET;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ oip6 = ipv6_hdr(skb);
+ saddr.sin6.sin6_addr = oip6->saddr;
+ saddr.sa.sa_family = AF_INET6;
+#endif
+ }
+
+ if (tun_dst) {
+ skb_dst_set(skb, (struct dst_entry *)tun_dst);
+ tun_dst = NULL;
+ }
+
+ if ((vxlan->flags & VXLAN_F_LEARN) &&
+ vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
+ goto drop;
+
+ skb_reset_network_header(skb);
+ /* In flow-based mode, GBP is carried in dst_metadata */
+ if (!(vs->flags & VXLAN_F_COLLECT_METADATA))
+ skb->mark = md->gbp;
+
+ if (oip6)
+ err = IP6_ECN_decapsulate(oip6, skb);
+ if (oip)
+ err = IP_ECN_decapsulate(oip, skb);
+
+ if (unlikely(err)) {
+ if (log_ecn_error) {
+ if (oip6)
+ net_info_ratelimited("non-ECT from %pI6\n",
+ &oip6->saddr);
+ if (oip)
+ net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+ &oip->saddr, oip->tos);
+ }
+ if (err > 1) {
+ ++vxlan->dev->stats.rx_frame_errors;
+ ++vxlan->dev->stats.rx_errors;
+ goto drop;
+ }
+ }
+
+ stats = this_cpu_ptr(vxlan->dev->tstats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+
+ gro_cells_receive(&vxlan->gro_cells, skb);
+
+ return;
+drop:
+ if (tun_dst)
+ dst_release((struct dst_entry *)tun_dst);
+
+ /* Consume bad packet */
+ kfree_skb(skb);
+}
+
/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
+ struct metadata_dst *tun_dst = NULL;
struct vxlan_sock *vs;
struct vxlanhdr *vxh;
u32 flags, vni;
- struct vxlan_metadata md = {0};
+ struct vxlan_metadata _md;
+ struct vxlan_metadata *md = &_md;
/* Need Vxlan and inner Ethernet header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN))
@@ -1202,6 +1262,18 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
vni &= VXLAN_VNI_MASK;
}
+ if (vxlan_collect_metadata(vs)) {
+ tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
+ cpu_to_be64(vni >> 8), sizeof(*md));
+
+ if (!tun_dst)
+ goto drop;
+
+ md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
+ } else {
+ memset(md, 0, sizeof(*md));
+ }
+
/* For backwards compatibility, only allow reserved fields to be
* used by VXLAN extensions if explicitly requested.
*/
@@ -1209,13 +1281,16 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
struct vxlanhdr_gbp *gbp;
gbp = (struct vxlanhdr_gbp *)vxh;
- md.gbp = ntohs(gbp->policy_id);
+ md->gbp = ntohs(gbp->policy_id);
+
+ if (tun_dst)
+ tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
if (gbp->dont_learn)
- md.gbp |= VXLAN_GBP_DONT_LEARN;
+ md->gbp |= VXLAN_GBP_DONT_LEARN;
if (gbp->policy_applied)
- md.gbp |= VXLAN_GBP_POLICY_APPLIED;
+ md->gbp |= VXLAN_GBP_POLICY_APPLIED;
flags &= ~VXLAN_GBP_USED_BITS;
}
@@ -1233,8 +1308,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
goto bad_flags;
}
- md.vni = vxh->vx_vni;
- vs->rcv(vs, skb, &md);
+ vxlan_rcv(vs, skb, md, vni >> 8, tun_dst);
return 0;
drop:
@@ -1247,93 +1321,13 @@ bad_flags:
ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
error:
+ if (tun_dst)
+ dst_release((struct dst_entry *)tun_dst);
+
/* Return non vxlan pkt */
return 1;
}
-static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
- struct vxlan_metadata *md)
-{
- struct iphdr *oip = NULL;
- struct ipv6hdr *oip6 = NULL;
- struct vxlan_dev *vxlan;
- struct pcpu_sw_netstats *stats;
- union vxlan_addr saddr;
- __u32 vni;
- int err = 0;
- union vxlan_addr *remote_ip;
-
- vni = ntohl(md->vni) >> 8;
- /* Is this VNI defined? */
- vxlan = vxlan_vs_find_vni(vs, vni);
- if (!vxlan)
- goto drop;
-
- remote_ip = &vxlan->default_dst.remote_ip;
- skb_reset_mac_header(skb);
- skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
- skb->protocol = eth_type_trans(skb, vxlan->dev);
- skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
-
- /* Ignore packet loops (and multicast echo) */
- if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
- goto drop;
-
- /* Re-examine inner Ethernet packet */
- if (remote_ip->sa.sa_family == AF_INET) {
- oip = ip_hdr(skb);
- saddr.sin.sin_addr.s_addr = oip->saddr;
- saddr.sa.sa_family = AF_INET;
-#if IS_ENABLED(CONFIG_IPV6)
- } else {
- oip6 = ipv6_hdr(skb);
- saddr.sin6.sin6_addr = oip6->saddr;
- saddr.sa.sa_family = AF_INET6;
-#endif
- }
-
- if ((vxlan->flags & VXLAN_F_LEARN) &&
- vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
- goto drop;
-
- skb_reset_network_header(skb);
- skb->mark = md->gbp;
-
- if (oip6)
- err = IP6_ECN_decapsulate(oip6, skb);
- if (oip)
- err = IP_ECN_decapsulate(oip, skb);
-
- if (unlikely(err)) {
- if (log_ecn_error) {
- if (oip6)
- net_info_ratelimited("non-ECT from %pI6\n",
- &oip6->saddr);
- if (oip)
- net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
- &oip->saddr, oip->tos);
- }
- if (err > 1) {
- ++vxlan->dev->stats.rx_frame_errors;
- ++vxlan->dev->stats.rx_errors;
- goto drop;
- }
- }
-
- stats = this_cpu_ptr(vxlan->dev->tstats);
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
-
- netif_rx(skb);
-
- return;
-drop:
- /* Consume bad packet */
- kfree_skb(skb);
-}
-
static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -1672,7 +1666,7 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb,
struct net_device *dev, struct in6_addr *saddr,
struct in6_addr *daddr, __u8 prio, __u8 ttl,
- __be16 src_port, __be16 dst_port,
+ __be16 src_port, __be16 dst_port, __be32 vni,
struct vxlan_metadata *md, bool xnet, u32 vxflags)
{
struct vxlanhdr *vxh;
@@ -1722,7 +1716,7 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_HF_VNI);
- vxh->vx_vni = md->vni;
+ vxh->vx_vni = vni;
if (type & SKB_GSO_TUNNEL_REMCSUM) {
u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
@@ -1755,10 +1749,10 @@ err:
}
#endif
-int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
- __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
- __be16 src_port, __be16 dst_port,
- struct vxlan_metadata *md, bool xnet, u32 vxflags)
+static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
+ __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
+ __be16 src_port, __be16 dst_port, __be32 vni,
+ struct vxlan_metadata *md, bool xnet, u32 vxflags)
{
struct vxlanhdr *vxh;
int min_headroom;
@@ -1801,7 +1795,7 @@ int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_HF_VNI);
- vxh->vx_vni = md->vni;
+ vxh->vx_vni = vni;
if (type & SKB_GSO_TUNNEL_REMCSUM) {
u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
@@ -1828,7 +1822,6 @@ int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
ttl, df, src_port, dst_port, xnet,
!(vxflags & VXLAN_F_UDP_CSUM));
}
-EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
/* Bypass encapsulation if the destination is local */
static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
@@ -1878,22 +1871,48 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct vxlan_rdst *rdst, bool did_rsc)
{
+ struct ip_tunnel_info *info;
struct vxlan_dev *vxlan = netdev_priv(dev);
struct sock *sk = vxlan->vn_sock->sock->sk;
+ unsigned short family = vxlan_get_sk_family(vxlan->vn_sock);
struct rtable *rt = NULL;
const struct iphdr *old_iph;
struct flowi4 fl4;
union vxlan_addr *dst;
- struct vxlan_metadata md;
+ union vxlan_addr remote_ip;
+ struct vxlan_metadata _md;
+ struct vxlan_metadata *md = &_md;
__be16 src_port = 0, dst_port;
u32 vni;
__be16 df = 0;
__u8 tos, ttl;
int err;
+ u32 flags = vxlan->flags;
- dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
- vni = rdst->remote_vni;
- dst = &rdst->remote_ip;
+ info = skb_tunnel_info(skb);
+
+ if (rdst) {
+ dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
+ vni = rdst->remote_vni;
+ dst = &rdst->remote_ip;
+ } else {
+ if (!info) {
+ WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
+ dev->name);
+ goto drop;
+ }
+ if (family != ip_tunnel_info_af(info))
+ goto drop;
+
+ dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
+ vni = be64_to_cpu(info->key.tun_id);
+ remote_ip.sa.sa_family = family;
+ if (family == AF_INET)
+ remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
+ else
+ remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
+ dst = &remote_ip;
+ }
if (vxlan_addr_any(dst)) {
if (did_rsc) {
@@ -1906,25 +1925,43 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
old_iph = ip_hdr(skb);
- ttl = vxlan->ttl;
+ ttl = vxlan->cfg.ttl;
if (!ttl && vxlan_addr_multicast(dst))
ttl = 1;
- tos = vxlan->tos;
+ tos = vxlan->cfg.tos;
if (tos == 1)
tos = ip_tunnel_get_dsfield(old_iph, skb);
- src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->port_min,
- vxlan->port_max, true);
+ src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
+ vxlan->cfg.port_max, true);
+
+ if (info) {
+ if (info->key.tun_flags & TUNNEL_CSUM)
+ flags |= VXLAN_F_UDP_CSUM;
+ else
+ flags &= ~VXLAN_F_UDP_CSUM;
+
+ ttl = info->key.ttl;
+ tos = info->key.tos;
+
+ if (info->options_len)
+ md = ip_tunnel_info_opts(info);
+ } else {
+ md->gbp = skb->mark;
+ }
if (dst->sa.sa_family == AF_INET) {
+ if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
+ df = htons(IP_DF);
+
memset(&fl4, 0, sizeof(fl4));
- fl4.flowi4_oif = rdst->remote_ifindex;
+ fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
fl4.flowi4_tos = RT_TOS(tos);
fl4.flowi4_mark = skb->mark;
fl4.flowi4_proto = IPPROTO_UDP;
fl4.daddr = dst->sin.sin_addr.s_addr;
- fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
+ fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
rt = ip_route_output_key(vxlan->net, &fl4);
if (IS_ERR(rt)) {
@@ -1958,14 +1995,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
- md.vni = htonl(vni << 8);
- md.gbp = skb->mark;
-
err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr,
dst->sin.sin_addr.s_addr, tos, ttl, df,
- src_port, dst_port, &md,
+ src_port, dst_port, htonl(vni << 8), md,
!net_eq(vxlan->net, dev_net(vxlan->dev)),
- vxlan->flags);
+ flags);
if (err < 0) {
/* skb is already freed. */
skb = NULL;
@@ -1977,16 +2011,16 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
} else {
struct dst_entry *ndst;
struct flowi6 fl6;
- u32 flags;
+ u32 rt6i_flags;
memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_oif = rdst->remote_ifindex;
+ fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0;
fl6.daddr = dst->sin6.sin6_addr;
- fl6.saddr = vxlan->saddr.sin6.sin6_addr;
+ fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = IPPROTO_UDP;
- if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
+ if (ipv6_stub->ipv6_dst_lookup(vxlan->net, sk, &ndst, &fl6)) {
netdev_dbg(dev, "no route to %pI6\n",
&dst->sin6.sin6_addr);
dev->stats.tx_carrier_errors++;
@@ -2002,9 +2036,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
/* Bypass encapsulation if the destination is local */
- flags = ((struct rt6_info *)ndst)->rt6i_flags;
- if (flags & RTF_LOCAL &&
- !(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
+ rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
+ if (rt6i_flags & RTF_LOCAL &&
+ !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
struct vxlan_dev *dst_vxlan;
dst_release(ndst);
@@ -2018,13 +2052,10 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
ttl = ttl ? : ip6_dst_hoplimit(ndst);
- md.vni = htonl(vni << 8);
- md.gbp = skb->mark;
-
err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr,
- 0, ttl, src_port, dst_port, &md,
+ 0, ttl, src_port, dst_port, htonl(vni << 8), md,
!net_eq(vxlan->net, dev_net(vxlan->dev)),
- vxlan->flags);
+ flags);
#endif
}
@@ -2051,11 +2082,14 @@ tx_free:
static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ const struct ip_tunnel_info *info;
struct ethhdr *eth;
bool did_rsc = false;
struct vxlan_rdst *rdst, *fdst = NULL;
struct vxlan_fdb *f;
+ info = skb_tunnel_info(skb);
+
skb_reset_mac_header(skb);
eth = eth_hdr(skb);
@@ -2078,6 +2112,12 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
}
+ if (vxlan->flags & VXLAN_F_COLLECT_METADATA &&
+ info && info->mode & IP_TUNNEL_INFO_TX) {
+ vxlan_xmit_one(skb, dev, NULL, false);
+ return NETDEV_TX_OK;
+ }
+
f = vxlan_find_mac(vxlan, eth->h_dest);
did_rsc = false;
@@ -2143,7 +2183,7 @@ static void vxlan_cleanup(unsigned long arg)
if (f->state & NUD_PERMANENT)
continue;
- timeout = f->used + vxlan->age_interval * HZ;
+ timeout = f->used + vxlan->cfg.age_interval * HZ;
if (time_before_eq(timeout, jiffies)) {
netdev_dbg(vxlan->dev,
"garbage collect %pM\n",
@@ -2207,8 +2247,8 @@ static int vxlan_open(struct net_device *dev)
struct vxlan_sock *vs;
int ret = 0;
- vs = vxlan_sock_add(vxlan->net, vxlan->dst_port, vxlan_rcv, NULL,
- false, vxlan->flags);
+ vs = vxlan_sock_add(vxlan->net, vxlan->cfg.dst_port,
+ vxlan->cfg.no_share, vxlan->flags);
if (IS_ERR(vs))
return PTR_ERR(vs);
@@ -2216,13 +2256,15 @@ static int vxlan_open(struct net_device *dev)
if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
ret = vxlan_igmp_join(vxlan);
+ if (ret == -EADDRINUSE)
+ ret = 0;
if (ret) {
vxlan_sock_release(vs);
return ret;
}
}
- if (vxlan->age_interval)
+ if (vxlan->cfg.age_interval)
mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
return ret;
@@ -2333,7 +2375,7 @@ void vxlan_get_rx_port(struct net_device *dev)
for (i = 0; i < PORT_HASH_SIZE; ++i) {
hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
port = inet_sk(vs->sock->sk)->inet_sport;
- sa_family = vs->sock->sk->sk_family;
+ sa_family = vxlan_get_sk_family(vs);
dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
port);
}
@@ -2359,7 +2401,6 @@ static void vxlan_setup(struct net_device *dev)
dev->destructor = free_netdev;
SET_NETDEV_DEVTYPE(dev, &vxlan_type);
- dev->tx_queue_len = 0;
dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->features |= NETIF_F_RXCSUM;
@@ -2371,7 +2412,7 @@ static void vxlan_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
netif_keep_dst(dev);
- dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
INIT_LIST_HEAD(&vxlan->next);
spin_lock_init(&vxlan->hash_lock);
@@ -2380,10 +2421,12 @@ static void vxlan_setup(struct net_device *dev)
vxlan->age_timer.function = vxlan_cleanup;
vxlan->age_timer.data = (unsigned long) vxlan;
- vxlan->dst_port = htons(vxlan_port);
+ vxlan->cfg.dst_port = htons(vxlan_port);
vxlan->dev = dev;
+ gro_cells_init(&vxlan->gro_cells, dev);
+
for (h = 0; h < FDB_HASH_SIZE; ++h)
INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
}
@@ -2405,6 +2448,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_RSC] = { .type = NLA_U8 },
[IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
[IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
+ [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 },
[IFLA_VXLAN_PORT] = { .type = NLA_U16 },
[IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
@@ -2484,6 +2528,7 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
udp_conf.family = AF_INET6;
udp_conf.use_udp6_rx_checksums =
!(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
+ udp_conf.ipv6_v6only = 1;
} else {
udp_conf.family = AF_INET;
}
@@ -2500,7 +2545,6 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
/* Create new listen socket if needed */
static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
- vxlan_rcv_t *rcv, void *data,
u32 flags)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
@@ -2529,8 +2573,6 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
vs->sock = sock;
atomic_set(&vs->refcnt, 1);
- vs->rcv = rcv;
- vs->data = data;
vs->flags = (flags & VXLAN_F_RCV_FLAGS);
/* Initialize the vxlan udp offloads structure */
@@ -2554,9 +2596,8 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
return vs;
}
-struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
- vxlan_rcv_t *rcv, void *data,
- bool no_share, u32 flags)
+static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+ bool no_share, u32 flags)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_sock *vs;
@@ -2566,7 +2607,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
spin_lock(&vn->sock_lock);
vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port,
flags);
- if (vs && vs->rcv == rcv) {
+ if (vs) {
if (!atomic_add_unless(&vs->refcnt, 1, 0))
vs = ERR_PTR(-EBUSY);
spin_unlock(&vn->sock_lock);
@@ -2575,58 +2616,38 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
spin_unlock(&vn->sock_lock);
}
- return vxlan_socket_create(net, port, rcv, data, flags);
+ return vxlan_socket_create(net, port, flags);
}
-EXPORT_SYMBOL_GPL(vxlan_sock_add);
-static int vxlan_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
+ struct vxlan_config *conf)
{
struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *dst = &vxlan->default_dst;
- __u32 vni;
int err;
bool use_ipv6 = false;
-
- if (!data[IFLA_VXLAN_ID])
- return -EINVAL;
+ __be16 default_port = vxlan->cfg.dst_port;
vxlan->net = src_net;
- vni = nla_get_u32(data[IFLA_VXLAN_ID]);
- dst->remote_vni = vni;
-
- /* Unless IPv6 is explicitly requested, assume IPv4 */
- dst->remote_ip.sa.sa_family = AF_INET;
- if (data[IFLA_VXLAN_GROUP]) {
- dst->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
- } else if (data[IFLA_VXLAN_GROUP6]) {
- if (!IS_ENABLED(CONFIG_IPV6))
- return -EPFNOSUPPORT;
+ dst->remote_vni = conf->vni;
- dst->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
- dst->remote_ip.sa.sa_family = AF_INET6;
- use_ipv6 = true;
- }
+ memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
- if (data[IFLA_VXLAN_LOCAL]) {
- vxlan->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
- vxlan->saddr.sa.sa_family = AF_INET;
- } else if (data[IFLA_VXLAN_LOCAL6]) {
- if (!IS_ENABLED(CONFIG_IPV6))
- return -EPFNOSUPPORT;
+ /* Unless IPv6 is explicitly requested, assume IPv4 */
+ if (!dst->remote_ip.sa.sa_family)
+ dst->remote_ip.sa.sa_family = AF_INET;
- /* TODO: respect scope id */
- vxlan->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
- vxlan->saddr.sa.sa_family = AF_INET6;
+ if (dst->remote_ip.sa.sa_family == AF_INET6 ||
+ vxlan->cfg.saddr.sa.sa_family == AF_INET6)
use_ipv6 = true;
- }
- if (data[IFLA_VXLAN_LINK] &&
- (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
+ if (conf->remote_ifindex) {
struct net_device *lowerdev
- = __dev_get_by_index(src_net, dst->remote_ifindex);
+ = __dev_get_by_index(src_net, conf->remote_ifindex);
+
+ dst->remote_ifindex = conf->remote_ifindex;
if (!lowerdev) {
pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
@@ -2644,7 +2665,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
}
#endif
- if (!tb[IFLA_MTU])
+ if (!conf->mtu)
dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
dev->needed_headroom = lowerdev->hard_header_len +
@@ -2652,101 +2673,188 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
} else if (use_ipv6)
vxlan->flags |= VXLAN_F_IPV6;
+ memcpy(&vxlan->cfg, conf, sizeof(*conf));
+ if (!vxlan->cfg.dst_port)
+ vxlan->cfg.dst_port = default_port;
+ vxlan->flags |= conf->flags;
+
+ if (!vxlan->cfg.age_interval)
+ vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
+
+ if (vxlan_find_vni(src_net, conf->vni, use_ipv6 ? AF_INET6 : AF_INET,
+ vxlan->cfg.dst_port, vxlan->flags))
+ return -EEXIST;
+
+ dev->ethtool_ops = &vxlan_ethtool_ops;
+
+ /* create an fdb entry for a valid default destination */
+ if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
+ err = vxlan_fdb_create(vxlan, all_zeros_mac,
+ &vxlan->default_dst.remote_ip,
+ NUD_REACHABLE|NUD_PERMANENT,
+ NLM_F_EXCL|NLM_F_CREATE,
+ vxlan->cfg.dst_port,
+ vxlan->default_dst.remote_vni,
+ vxlan->default_dst.remote_ifindex,
+ NTF_SELF);
+ if (err)
+ return err;
+ }
+
+ err = register_netdevice(dev);
+ if (err) {
+ vxlan_fdb_delete_default(vxlan);
+ return err;
+ }
+
+ list_add(&vxlan->next, &vn->vxlan_list);
+
+ return 0;
+}
+
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+ u8 name_assign_type, struct vxlan_config *conf)
+{
+ struct nlattr *tb[IFLA_MAX+1];
+ struct net_device *dev;
+ int err;
+
+ memset(&tb, 0, sizeof(tb));
+
+ dev = rtnl_create_link(net, name, name_assign_type,
+ &vxlan_link_ops, tb);
+ if (IS_ERR(dev))
+ return dev;
+
+ err = vxlan_dev_configure(net, dev, conf);
+ if (err < 0) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(vxlan_dev_create);
+
+static int vxlan_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct vxlan_config conf;
+ int err;
+
+ if (!data[IFLA_VXLAN_ID])
+ return -EINVAL;
+
+ memset(&conf, 0, sizeof(conf));
+ conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]);
+
+ if (data[IFLA_VXLAN_GROUP]) {
+ conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
+ } else if (data[IFLA_VXLAN_GROUP6]) {
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return -EPFNOSUPPORT;
+
+ conf.remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
+ conf.remote_ip.sa.sa_family = AF_INET6;
+ }
+
+ if (data[IFLA_VXLAN_LOCAL]) {
+ conf.saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
+ conf.saddr.sa.sa_family = AF_INET;
+ } else if (data[IFLA_VXLAN_LOCAL6]) {
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return -EPFNOSUPPORT;
+
+ /* TODO: respect scope id */
+ conf.saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
+ conf.saddr.sa.sa_family = AF_INET6;
+ }
+
+ if (data[IFLA_VXLAN_LINK])
+ conf.remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]);
+
if (data[IFLA_VXLAN_TOS])
- vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
+ conf.tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
if (data[IFLA_VXLAN_TTL])
- vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
+ conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
- vxlan->flags |= VXLAN_F_LEARN;
+ conf.flags |= VXLAN_F_LEARN;
if (data[IFLA_VXLAN_AGEING])
- vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
- else
- vxlan->age_interval = FDB_AGE_DEFAULT;
+ conf.age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
- vxlan->flags |= VXLAN_F_PROXY;
+ conf.flags |= VXLAN_F_PROXY;
if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
- vxlan->flags |= VXLAN_F_RSC;
+ conf.flags |= VXLAN_F_RSC;
if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
- vxlan->flags |= VXLAN_F_L2MISS;
+ conf.flags |= VXLAN_F_L2MISS;
if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
- vxlan->flags |= VXLAN_F_L3MISS;
+ conf.flags |= VXLAN_F_L3MISS;
if (data[IFLA_VXLAN_LIMIT])
- vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
+ conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
+
+ if (data[IFLA_VXLAN_COLLECT_METADATA] &&
+ nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA]))
+ conf.flags |= VXLAN_F_COLLECT_METADATA;
if (data[IFLA_VXLAN_PORT_RANGE]) {
const struct ifla_vxlan_port_range *p
= nla_data(data[IFLA_VXLAN_PORT_RANGE]);
- vxlan->port_min = ntohs(p->low);
- vxlan->port_max = ntohs(p->high);
+ conf.port_min = ntohs(p->low);
+ conf.port_max = ntohs(p->high);
}
if (data[IFLA_VXLAN_PORT])
- vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
+ conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
- vxlan->flags |= VXLAN_F_UDP_CSUM;
+ conf.flags |= VXLAN_F_UDP_CSUM;
if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
- vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
+ conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
- vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
+ conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
if (data[IFLA_VXLAN_REMCSUM_TX] &&
nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
- vxlan->flags |= VXLAN_F_REMCSUM_TX;
+ conf.flags |= VXLAN_F_REMCSUM_TX;
if (data[IFLA_VXLAN_REMCSUM_RX] &&
nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
- vxlan->flags |= VXLAN_F_REMCSUM_RX;
+ conf.flags |= VXLAN_F_REMCSUM_RX;
if (data[IFLA_VXLAN_GBP])
- vxlan->flags |= VXLAN_F_GBP;
+ conf.flags |= VXLAN_F_GBP;
if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
- vxlan->flags |= VXLAN_F_REMCSUM_NOPARTIAL;
-
- if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
- vxlan->dst_port, vxlan->flags)) {
- pr_info("duplicate VNI %u\n", vni);
- return -EEXIST;
- }
+ conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
- dev->ethtool_ops = &vxlan_ethtool_ops;
+ err = vxlan_dev_configure(src_net, dev, &conf);
+ switch (err) {
+ case -ENODEV:
+ pr_info("ifindex %d does not exist\n", conf.remote_ifindex);
+ break;
- /* create an fdb entry for a valid default destination */
- if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
- err = vxlan_fdb_create(vxlan, all_zeros_mac,
- &vxlan->default_dst.remote_ip,
- NUD_REACHABLE|NUD_PERMANENT,
- NLM_F_EXCL|NLM_F_CREATE,
- vxlan->dst_port,
- vxlan->default_dst.remote_vni,
- vxlan->default_dst.remote_ifindex,
- NTF_SELF);
- if (err)
- return err;
- }
+ case -EPERM:
+ pr_info("IPv6 is disabled via sysctl\n");
+ break;
- err = register_netdevice(dev);
- if (err) {
- vxlan_fdb_delete_default(vxlan);
- return err;
+ case -EEXIST:
+ pr_info("duplicate VNI %u\n", conf.vni);
+ break;
}
- list_add(&vxlan->next, &vn->vxlan_list);
-
- return 0;
+ return err;
}
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
@@ -2759,6 +2867,7 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
hlist_del_rcu(&vxlan->hlist);
spin_unlock(&vn->sock_lock);
+ gro_cells_destroy(&vxlan->gro_cells);
list_del(&vxlan->next);
unregister_netdevice_queue(dev, head);
}
@@ -2777,6 +2886,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
@@ -2794,8 +2904,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
const struct vxlan_dev *vxlan = netdev_priv(dev);
const struct vxlan_rdst *dst = &vxlan->default_dst;
struct ifla_vxlan_port_range ports = {
- .low = htons(vxlan->port_min),
- .high = htons(vxlan->port_max),
+ .low = htons(vxlan->cfg.port_min),
+ .high = htons(vxlan->cfg.port_max),
};
if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
@@ -2818,22 +2928,22 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
goto nla_put_failure;
- if (!vxlan_addr_any(&vxlan->saddr)) {
- if (vxlan->saddr.sa.sa_family == AF_INET) {
+ if (!vxlan_addr_any(&vxlan->cfg.saddr)) {
+ if (vxlan->cfg.saddr.sa.sa_family == AF_INET) {
if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
- vxlan->saddr.sin.sin_addr.s_addr))
+ vxlan->cfg.saddr.sin.sin_addr.s_addr))
goto nla_put_failure;
#if IS_ENABLED(CONFIG_IPV6)
} else {
if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
- &vxlan->saddr.sin6.sin6_addr))
+ &vxlan->cfg.saddr.sin6.sin6_addr))
goto nla_put_failure;
#endif
}
}
- if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
- nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
+ if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
+ nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
nla_put_u8(skb, IFLA_VXLAN_LEARNING,
!!(vxlan->flags & VXLAN_F_LEARN)) ||
nla_put_u8(skb, IFLA_VXLAN_PROXY,
@@ -2843,9 +2953,11 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
!!(vxlan->flags & VXLAN_F_L2MISS)) ||
nla_put_u8(skb, IFLA_VXLAN_L3MISS,
!!(vxlan->flags & VXLAN_F_L3MISS)) ||
- nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
- nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
- nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port) ||
+ nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
+ !!(vxlan->flags & VXLAN_F_COLLECT_METADATA)) ||
+ nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
+ nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
+ nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
!!(vxlan->flags & VXLAN_F_UDP_CSUM)) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
@@ -2964,8 +3076,10 @@ static void __net_exit vxlan_exit_net(struct net *net)
/* If vxlan->dev is in the same netns, it has already been added
* to the list by the previous loop.
*/
- if (!net_eq(dev_net(vxlan->dev), net))
+ if (!net_eq(dev_net(vxlan->dev), net)) {
+ gro_cells_destroy(&vxlan->gro_cells);
unregister_netdevice_queue(vxlan->dev, &list);
+ }
}
unregister_netdevice_many(&list);
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 7193b7304fdd..848ea6a399f2 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -589,7 +589,8 @@ static int cosa_probe(int base, int irq, int dma)
chan->netdev->base_addr = chan->cosa->datareg;
chan->netdev->irq = chan->cosa->irq;
chan->netdev->dma = chan->cosa->dma;
- if (register_hdlc_device(chan->netdev)) {
+ err = register_hdlc_device(chan->netdev);
+ if (err) {
netdev_warn(chan->netdev,
"register_hdlc_device() failed\n");
free_netdev(chan->netdev);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 3ebed1c40abb..e92aaf615901 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1096,7 +1096,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
}
dev->netdev_ops = &pvc_ops;
dev->mtu = HDLC_MAX_MTU;
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->ml_priv = pvc;
if (register_netdevice(dev) != 0) {
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 758c4ba1e97c..8fef8d83436d 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -1358,6 +1358,8 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
if( !slave_dev || !(slave_dev->flags & IFF_UP) ) {
netdev_err(dev, "trying to enslave non-active device %s\n",
slave_name);
+ if (slave_dev)
+ dev_put(slave_dev);
return -EPERM;
}
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index feacc3b994b7..2f0bd6955f33 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -1044,7 +1044,7 @@ EXPORT_SYMBOL(z8530_sync_dma_close);
* @dev: The network device to attach
* @c: The Z8530 channel to configure in sync DMA mode.
*
- * Set up a Z85x30 device for synchronous DMA tranmission. One
+ * Set up a Z85x30 device for synchronous DMA transmission. One
* ISA DMA channel must be available for this to work. The receive
* side is run in PIO mode, but then it has the bigger FIFO.
*/
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 9729e6941635..c04fb00e7930 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -11,7 +11,8 @@ ath10k_core-y += mac.o \
wmi-tlv.o \
bmi.o \
hw.o \
- p2p.o
+ p2p.o \
+ swap.o
ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 31a990635490..df7c7616533b 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -178,7 +178,7 @@ struct bmi_target_info {
};
/* in msec */
-#define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ)
+#define BMI_COMMUNICATION_TIMEOUT_HZ (2 * HZ)
#define BMI_CE_NUM_TO_TARG 0
#define BMI_CE_NUM_TO_HOST 1
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index e508c65b6ba8..cf28fbebaedc 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -452,6 +452,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
{
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
+ struct ath10k *ar = ce_state->ar;
unsigned int sw_index = dest_ring->sw_index;
struct ce_desc *base = dest_ring->base_addr_owner_space;
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 0eddb204d85b..5c903e15dd65 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -21,7 +21,7 @@
#include "hif.h"
/* Maximum number of Copy Engine's supported */
-#define CE_COUNT_MAX 8
+#define CE_COUNT_MAX 12
#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
/* Descriptor rings must be aligned to this boundary */
@@ -38,8 +38,13 @@ struct ath10k_ce_pipe;
#define CE_DESC_FLAGS_GATHER (1 << 0)
#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
-#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
-#define CE_DESC_FLAGS_META_DATA_LSB 2
+
+/* Following desc flags are used in QCA99X0 */
+#define CE_DESC_FLAGS_HOST_INT_DIS (1 << 2)
+#define CE_DESC_FLAGS_TGT_INT_DIS (1 << 3)
+
+#define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask
+#define CE_DESC_FLAGS_META_DATA_LSB ar->hw_values->ce_desc_meta_data_lsb
struct ce_desc {
__le32 addr;
@@ -423,8 +428,10 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
-#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8
-#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \
+ ar->regs->ce_wrap_intr_sum_host_msi_lsb
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \
+ ar->regs->ce_wrap_intr_sum_host_msi_mask
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
(((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 59496a90ad5e..b87b98617073 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -31,16 +31,19 @@
#include "wmi-ops.h"
unsigned int ath10k_debug_mask;
+static unsigned int ath10k_cryptmode_param;
static bool uart_print;
static bool skip_otp;
module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
+module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
module_param(uart_print, bool, 0644);
module_param(skip_otp, bool, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
MODULE_PARM_DESC(uart_print, "Uart target debugging");
MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
+MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
@@ -49,6 +52,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
.has_shifted_cc_wraparound = true,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
.fw = {
.dir = QCA988X_HW_2_0_FW_DIR,
.fw = QCA988X_HW_2_0_FW_FILE,
@@ -63,6 +68,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw2.1",
.patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
.fw = {
.dir = QCA6174_HW_2_1_FW_DIR,
.fw = QCA6174_HW_2_1_FW_FILE,
@@ -77,6 +84,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw3.0",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
.fw = {
.dir = QCA6174_HW_3_0_FW_DIR,
.fw = QCA6174_HW_3_0_FW_FILE,
@@ -91,6 +100,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw3.2",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
.fw = {
/* uses same binaries as hw3.0 */
.dir = QCA6174_HW_3_0_FW_DIR,
@@ -101,8 +112,69 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
},
+ {
+ .id = QCA99X0_HW_2_0_DEV_VERSION,
+ .name = "qca99x0 hw2.0",
+ .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .otp_exe_param = 0x00000700,
+ .continuous_frag_desc = true,
+ .channel_counters_freq_hz = 150000,
+ .fw = {
+ .dir = QCA99X0_HW_2_0_FW_DIR,
+ .fw = QCA99X0_HW_2_0_FW_FILE,
+ .otp = QCA99X0_HW_2_0_OTP_FILE,
+ .board = QCA99X0_HW_2_0_BOARD_DATA_FILE,
+ .board_size = QCA99X0_BOARD_DATA_SZ,
+ .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+ },
+ },
+};
+
+static const char *const ath10k_core_fw_feature_str[] = {
+ [ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX] = "wmi-mgmt-rx",
+ [ATH10K_FW_FEATURE_WMI_10X] = "wmi-10.x",
+ [ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX] = "has-wmi-mgmt-tx",
+ [ATH10K_FW_FEATURE_NO_P2P] = "no-p2p",
+ [ATH10K_FW_FEATURE_WMI_10_2] = "wmi-10.2",
+ [ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT] = "multi-vif-ps",
+ [ATH10K_FW_FEATURE_WOWLAN_SUPPORT] = "wowlan",
+ [ATH10K_FW_FEATURE_IGNORE_OTP_RESULT] = "ignore-otp",
+ [ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING] = "no-4addr-pad",
+ [ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init",
};
+static unsigned int ath10k_core_get_fw_feature_str(char *buf,
+ size_t buf_len,
+ enum ath10k_fw_features feat)
+{
+ if (feat >= ARRAY_SIZE(ath10k_core_fw_feature_str) ||
+ WARN_ON(!ath10k_core_fw_feature_str[feat])) {
+ return scnprintf(buf, buf_len, "bit%d", feat);
+ }
+
+ return scnprintf(buf, buf_len, "%s", ath10k_core_fw_feature_str[feat]);
+}
+
+void ath10k_core_get_fw_features_str(struct ath10k *ar,
+ char *buf,
+ size_t buf_len)
+{
+ unsigned int len = 0;
+ int i;
+
+ for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
+ if (test_bit(i, ar->fw_features)) {
+ if (len > 0)
+ len += scnprintf(buf + len, buf_len - len, ",");
+
+ len += ath10k_core_get_fw_feature_str(buf + len,
+ buf_len - len,
+ i);
+ }
+ }
+}
+
static void ath10k_send_suspend_complete(struct ath10k *ar)
{
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n");
@@ -164,6 +236,17 @@ static int ath10k_init_configure_target(struct ath10k *ar)
return ret;
}
+ /* Some devices have a special sanity check that verifies the PCI
+ * Device ID is written to this host interest var. It is known to be
+ * required to boot QCA6164.
+ */
+ ret = ath10k_bmi_write32(ar, hi_hci_uart_pwr_mgmt_params_ext,
+ ar->dev_id);
+ if (ret) {
+ ath10k_err(ar, "failed to set pwr_mgmt_params: %d\n", ret);
+ return ret;
+ }
+
return 0;
}
@@ -355,6 +438,7 @@ out:
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
u32 result, address = ar->hw_params.patch_load_addr;
+ u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param;
int ret;
ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len);
@@ -380,7 +464,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
return ret;
}
- ret = ath10k_bmi_execute(ar, address, 0, &result);
+ ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
if (ret) {
ath10k_err(ar, "could not execute otp (%d)\n", ret);
return ret;
@@ -412,6 +496,13 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
data = ar->firmware_data;
data_len = ar->firmware_len;
mode_name = "normal";
+ ret = ath10k_swap_code_seg_configure(ar,
+ ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW);
+ if (ret) {
+ ath10k_err(ar, "failed to configure fw code swap: %d\n",
+ ret);
+ return ret;
+ }
break;
case ATH10K_FIRMWARE_MODE_UTF:
data = ar->testmode.utf->data;
@@ -451,6 +542,8 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
if (!IS_ERR(ar->cal_file))
release_firmware(ar->cal_file);
+ ath10k_swap_code_seg_release(ar);
+
ar->board = NULL;
ar->board_data = NULL;
ar->board_len = 0;
@@ -464,6 +557,7 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
ar->firmware_len = 0;
ar->cal_file = NULL;
+
}
static int ath10k_fetch_cal_file(struct ath10k *ar)
@@ -737,6 +831,13 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
ar->htt.op_version);
break;
+ case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "found fw code swap image ie (%zd B)\n",
+ ie_len);
+ ar->swap.firmware_codeswap_data = data;
+ ar->swap.firmware_codeswap_len = ie_len;
+ break;
default:
ath10k_warn(ar, "Unknown FW IE: %u\n",
le32_to_cpu(hdr->id));
@@ -991,6 +1092,46 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
return -EINVAL;
}
+ ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_NATIVE_WIFI;
+ switch (ath10k_cryptmode_param) {
+ case ATH10K_CRYPT_MODE_HW:
+ clear_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+ clear_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
+ break;
+ case ATH10K_CRYPT_MODE_SW:
+ if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
+ ar->fw_features)) {
+ ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware");
+ return -EINVAL;
+ }
+
+ set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+ set_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
+ break;
+ default:
+ ath10k_info(ar, "invalid cryptmode: %d\n",
+ ath10k_cryptmode_param);
+ return -EINVAL;
+ }
+
+ ar->htt.max_num_amsdu = ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT;
+ ar->htt.max_num_ampdu = ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT;
+
+ if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_RAW;
+
+ /* Workaround:
+ *
+ * Firmware A-MSDU aggregation breaks with RAW Tx encap mode
+ * and causes enormous performance issues (malformed frames,
+ * etc).
+ *
+ * Disabling A-MSDU makes RAW mode stable with heavy traffic
+ * albeit a bit slower compared to regular operation.
+ */
+ ar->htt.max_num_amsdu = 1;
+ }
+
/* Backwards compatibility for firmwares without
* ATH10K_FW_IE_WMI_OP_VERSION.
*/
@@ -1014,6 +1155,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
WMI_STAT_PEER;
+ ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
break;
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
@@ -1023,6 +1165,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
ar->fw_stats_req_mask = WMI_STAT_PEER;
+ ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
break;
case ATH10K_FW_WMI_OP_VERSION_TLV:
ar->max_num_peers = TARGET_TLV_NUM_PEERS;
@@ -1033,6 +1176,17 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
WMI_STAT_PEER;
+ ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_4:
+ ar->max_num_peers = TARGET_10_4_NUM_PEERS;
+ ar->max_num_stations = TARGET_10_4_NUM_STATIONS;
+ ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS;
+ ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS;
+ ar->num_tids = TARGET_10_4_TGT_NUM_TIDS;
+ ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC;
+ ar->fw_stats_req_mask = WMI_STAT_PEER;
+ ar->max_spatial_stream = WMI_10_4_MAX_SPATIAL_STREAM;
break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
@@ -1056,6 +1210,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
case ATH10K_FW_WMI_OP_VERSION_TLV:
ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
break;
+ case ATH10K_FW_WMI_OP_VERSION_10_4:
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
WARN_ON(1);
@@ -1272,13 +1427,13 @@ int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
void ath10k_core_stop(struct ath10k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
+ ath10k_debug_stop(ar);
/* try to suspend target */
if (ar->state != ATH10K_STATE_RESTARTING &&
ar->state != ATH10K_STATE_UTF)
ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
- ath10k_debug_stop(ar);
ath10k_hif_stop(ar);
ath10k_htt_tx_free(&ar->htt);
ath10k_htt_rx_free(&ar->htt);
@@ -1330,6 +1485,13 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
goto err_free_firmware_files;
}
+ ret = ath10k_swap_code_seg_init(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize code swap segment: %d\n",
+ ret);
+ goto err_free_firmware_files;
+ }
+
mutex_lock(&ar->conf_mutex);
ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
@@ -1470,9 +1632,15 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
switch (hw_rev) {
case ATH10K_HW_QCA988X:
ar->regs = &qca988x_regs;
+ ar->hw_values = &qca988x_values;
break;
case ATH10K_HW_QCA6174:
ar->regs = &qca6174_regs;
+ ar->hw_values = &qca6174_values;
+ break;
+ case ATH10K_HW_QCA99X0:
+ ar->regs = &qca99x0_regs;
+ ar->hw_values = &qca99x0_values;
break;
default:
ath10k_err(ar, "unsupported core hardware revision %d\n",
@@ -1497,6 +1665,10 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
if (!ar->workqueue)
goto err_free_mac;
+ ar->workqueue_aux = create_singlethread_workqueue("ath10k_aux_wq");
+ if (!ar->workqueue_aux)
+ goto err_free_wq;
+
mutex_init(&ar->conf_mutex);
spin_lock_init(&ar->data_lock);
@@ -1517,10 +1689,12 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
ret = ath10k_debug_create(ar);
if (ret)
- goto err_free_wq;
+ goto err_free_aux_wq;
return ar;
+err_free_aux_wq:
+ destroy_workqueue(ar->workqueue_aux);
err_free_wq:
destroy_workqueue(ar->workqueue);
@@ -1536,6 +1710,9 @@ void ath10k_core_destroy(struct ath10k *ar)
flush_workqueue(ar->workqueue);
destroy_workqueue(ar->workqueue);
+ flush_workqueue(ar->workqueue_aux);
+ destroy_workqueue(ar->workqueue_aux);
+
ath10k_debug_destroy(ar);
ath10k_mac_destroy(ar);
}
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 78094f23c9dd..12542144fe12 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -36,6 +36,7 @@
#include "spectral.h"
#include "thermal.h"
#include "wow.h"
+#include "swap.h"
#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -91,6 +92,7 @@ struct ath10k_skb_cb {
u8 tid;
u16 freq;
bool is_offchan;
+ bool nohwcrypt;
struct ath10k_htt_txbuf *txbuf;
u32 txbuf_paddr;
} __packed htt;
@@ -151,6 +153,7 @@ struct ath10k_wmi {
const struct wmi_ops *ops;
u32 num_mem_chunks;
+ u32 rx_decap_mode;
struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
};
@@ -327,8 +330,8 @@ struct ath10k_vif {
u32 uapsd;
} sta;
struct {
- /* 127 stations; wmi limit */
- u8 tim_bitmap[16];
+ /* 512 stations */
+ u8 tim_bitmap[64];
u8 tim_len;
u32 ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -340,6 +343,7 @@ struct ath10k_vif {
} u;
bool use_cts_prot;
+ bool nohwcrypt;
int num_legacy_stations;
int txpower;
struct wmi_wmm_params_all_arg wmm_params;
@@ -381,9 +385,6 @@ struct ath10k_debug {
u32 reg_addr;
u32 nf_cal_period;
- u8 htt_max_amsdu;
- u8 htt_max_ampdu;
-
struct ath10k_fw_crash_data *fw_crash_data;
};
@@ -452,16 +453,21 @@ enum ath10k_fw_features {
ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6,
/* Don't trust error code from otp.bin */
- ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
+ ATH10K_FW_FEATURE_IGNORE_OTP_RESULT = 7,
/* Some firmware revisions pad 4th hw address to 4 byte boundary making
* it 8 bytes long in Native Wifi Rx decap.
*/
- ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
+ ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING = 8,
/* Firmware supports bypassing PLL setting on init. */
ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT = 9,
+ /* Raw mode support. If supported, FW supports receiving and trasmitting
+ * frames in raw mode.
+ */
+ ATH10K_FW_FEATURE_RAW_MODE_SUPPORT = 10,
+
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
@@ -475,6 +481,15 @@ enum ath10k_dev_flags {
* waiters should immediately cancel instead of waiting for a time out.
*/
ATH10K_FLAG_CRASH_FLUSH,
+
+ /* Use Raw mode instead of native WiFi Tx/Rx encap mode.
+ * Raw mode supports both hardware and software crypto. Native WiFi only
+ * supports hardware crypto.
+ */
+ ATH10K_FLAG_RAW_MODE,
+
+ /* Disable HW crypto engine */
+ ATH10K_FLAG_HW_CRYPTO_DISABLED,
};
enum ath10k_cal_mode {
@@ -483,6 +498,13 @@ enum ath10k_cal_mode {
ATH10K_CAL_MODE_DT,
};
+enum ath10k_crypt_mode {
+ /* Only use hardware crypto engine */
+ ATH10K_CRYPT_MODE_HW,
+ /* Only use software crypto engine */
+ ATH10K_CRYPT_MODE_SW,
+};
+
static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
{
switch (mode) {
@@ -532,6 +554,7 @@ struct ath10k {
u8 mac_addr[ETH_ALEN];
enum ath10k_hw_rev hw_rev;
+ u16 dev_id;
u32 chip_id;
u32 target_version;
u8 fw_version_major;
@@ -545,6 +568,7 @@ struct ath10k {
u32 ht_cap_info;
u32 vht_cap_info;
u32 num_rf_chains;
+ u32 max_spatial_stream;
/* protected by conf_mutex */
bool ani_enabled;
@@ -560,6 +584,7 @@ struct ath10k {
struct completion target_suspend;
const struct ath10k_hw_regs *regs;
+ const struct ath10k_hw_values *hw_values;
struct ath10k_bmi bmi;
struct ath10k_wmi wmi;
struct ath10k_htc htc;
@@ -570,6 +595,7 @@ struct ath10k {
const char *name;
u32 patch_load_addr;
int uart_pin;
+ u32 otp_exe_param;
/* This is true if given HW chip has a quirky Cycle Counter
* wraparound which resets to 0x7fffffff instead of 0. All
@@ -578,6 +604,14 @@ struct ath10k {
*/
bool has_shifted_cc_wraparound;
+ /* Some of chip expects fragment descriptor to be continuous
+ * memory for any TX operation. Set continuous_frag_desc flag
+ * for the hardware which have such requirement.
+ */
+ bool continuous_frag_desc;
+
+ u32 channel_counters_freq_hz;
+
struct ath10k_hw_params_fw {
const char *dir;
const char *fw;
@@ -602,6 +636,12 @@ struct ath10k {
const struct firmware *cal_file;
+ struct {
+ const void *firmware_codeswap_data;
+ size_t firmware_codeswap_len;
+ struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
+ } swap;
+
char spec_board_id[100];
bool spec_board_loaded;
@@ -617,6 +657,7 @@ struct ath10k {
bool is_roc;
int vdev_id;
int roc_freq;
+ bool roc_notify;
} scan;
struct {
@@ -656,6 +697,8 @@ struct ath10k {
struct completion vdev_setup_done;
struct workqueue_struct *workqueue;
+ /* Auxiliary workqueue */
+ struct workqueue_struct *workqueue_aux;
/* prevents concurrent FW reconfiguration */
struct mutex conf_mutex;
@@ -675,6 +718,11 @@ struct ath10k {
int max_num_stations;
int max_num_vdevs;
int max_num_tdls_vdevs;
+ int num_active_peers;
+ int num_tids;
+
+ struct work_struct svc_rdy_work;
+ struct sk_buff *svc_rdy_skb;
struct work_struct offchan_tx_work;
struct sk_buff_head offchan_tx_queue;
@@ -749,6 +797,9 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
enum ath10k_hw_rev hw_rev,
const struct ath10k_hif_ops *hif_ops);
void ath10k_core_destroy(struct ath10k *ar);
+void ath10k_core_get_fw_features_str(struct ath10k *ar,
+ char *buf,
+ size_t max_len);
int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode);
int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 8fa606a9c4dd..bf033f46f8aa 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -124,7 +124,11 @@ EXPORT_SYMBOL(ath10k_info);
void ath10k_print_driver_info(struct ath10k *ar)
{
- ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
+ char fw_features[128] = {};
+
+ ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
+
+ ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n",
ar->hw_params.name,
ar->target_version,
ar->chip_id,
@@ -137,8 +141,12 @@ void ath10k_print_driver_info(struct ath10k *ar)
ar->htt.target_version_major,
ar->htt.target_version_minor,
ar->wmi.op_version,
+ ar->htt.op_version,
ath10k_cal_mode_str(ar->cal_mode),
- ar->max_num_stations);
+ ar->max_num_stations,
+ test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags),
+ !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags),
+ fw_features);
ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
config_enabled(CONFIG_ATH10K_DEBUG),
config_enabled(CONFIG_ATH10K_DEBUGFS),
@@ -315,7 +323,7 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats);
if (ret) {
ath10k_warn(ar, "failed to pull fw stats: %d\n", ret);
- goto unlock;
+ goto free;
}
/* Stat data may exceed htc-wmi buffer limit. In such case firmware
@@ -378,7 +386,6 @@ free:
ath10k_debug_fw_stats_vdevs_free(&stats.vdevs);
ath10k_debug_fw_stats_peers_free(&stats.peers);
-unlock:
spin_unlock_bh(&ar->data_lock);
}
@@ -1357,12 +1364,8 @@ static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file,
mutex_lock(&ar->conf_mutex);
- if (ar->debug.htt_max_amsdu)
- amsdu = ar->debug.htt_max_amsdu;
-
- if (ar->debug.htt_max_ampdu)
- ampdu = ar->debug.htt_max_ampdu;
-
+ amsdu = ar->htt.max_num_amsdu;
+ ampdu = ar->htt.max_num_ampdu;
mutex_unlock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf), "%u %u\n", amsdu, ampdu);
@@ -1396,8 +1399,8 @@ static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file,
goto out;
res = count;
- ar->debug.htt_max_amsdu = amsdu;
- ar->debug.htt_max_ampdu = ampdu;
+ ar->htt.max_num_amsdu = amsdu;
+ ar->htt.max_num_ampdu = ampdu;
out:
mutex_unlock(&ar->conf_mutex);
@@ -1899,9 +1902,6 @@ void ath10k_debug_stop(struct ath10k *ar)
if (ar->debug.htt_stats_mask != 0)
cancel_delayed_work(&ar->debug.htt_stats_dwork);
- ar->debug.htt_max_amsdu = 0;
- ar->debug.htt_max_ampdu = 0;
-
ath10k_wmi_pdev_pktlog_disable(ar);
}
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 85bfa2acb801..32d9ff1b19dc 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -145,8 +145,10 @@ int ath10k_htc_send(struct ath10k_htc *htc,
skb_cb->eid = eid;
skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
ret = dma_mapping_error(dev, skb_cb->paddr);
- if (ret)
+ if (ret) {
+ ret = -EIO;
goto err_credits;
+ }
sg_item.transfer_id = ep->eid;
sg_item.transfer_context = skb;
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 6da6ef26143a..3e6ba63dfdff 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -102,6 +102,43 @@ static const enum htt_t2h_msg_type htt_tlv_t2h_msg_types[] = {
[HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
};
+static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = {
+ [HTT_10_4_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+ [HTT_10_4_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+ [HTT_10_4_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+ [HTT_10_4_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+ [HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ [HTT_10_4_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+ [HTT_10_4_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+ [HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ [HTT_10_4_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+ [HTT_10_4_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+ [HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ [HTT_10_4_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+ [HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+ [HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND] =
+ HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ [HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+ [HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+ [HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+ [HTT_10_4_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+ [HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+ [HTT_10_4_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+ [HTT_10_4_T2H_MSG_TYPE_EN_STATS] = HTT_T2H_MSG_TYPE_EN_STATS,
+ [HTT_10_4_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
+ [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND] =
+ HTT_T2H_MSG_TYPE_TX_FETCH_IND,
+ [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF] =
+ HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
+ [HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD] =
+ HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
+ [HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND] =
+ HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
+};
+
int ath10k_htt_connect(struct ath10k_htt *htt)
{
struct ath10k_htc_svc_conn_req conn_req;
@@ -147,6 +184,10 @@ int ath10k_htt_init(struct ath10k *ar)
2; /* ip4 dscp or ip6 priority */
switch (ar->htt.op_version) {
+ case ATH10K_FW_HTT_OP_VERSION_10_4:
+ ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types;
+ ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS;
+ break;
case ATH10K_FW_HTT_OP_VERSION_10_1:
ar->htt.t2h_msg_types = htt_10x_t2h_msg_types;
ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS;
@@ -205,8 +246,31 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
}
status = ath10k_htt_verify_version(htt);
+ if (status) {
+ ath10k_warn(ar, "failed to verify htt version: %d\n",
+ status);
+ return status;
+ }
+
+ status = ath10k_htt_send_frag_desc_bank_cfg(htt);
if (status)
return status;
- return ath10k_htt_send_rx_ring_cfg_ll(htt);
+ status = ath10k_htt_send_rx_ring_cfg_ll(htt);
+ if (status) {
+ ath10k_warn(ar, "failed to setup rx ring: %d\n",
+ status);
+ return status;
+ }
+
+ status = ath10k_htt_h2t_aggr_cfg_msg(htt,
+ htt->max_num_ampdu,
+ htt->max_num_amsdu);
+ if (status) {
+ ath10k_warn(ar, "failed to setup amsdu/ampdu limit: %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 7e8a0d835663..573187512895 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -83,10 +83,39 @@ struct htt_ver_req {
* around the mask + shift defs.
*/
struct htt_data_tx_desc_frag {
- __le32 paddr;
- __le32 len;
+ union {
+ struct double_word_addr {
+ __le32 paddr;
+ __le32 len;
+ } __packed dword_addr;
+ struct triple_word_addr {
+ __le32 paddr_lo;
+ __le16 paddr_hi;
+ __le16 len_16;
+ } __packed tword_addr;
+ } __packed;
} __packed;
+struct htt_msdu_ext_desc {
+ __le32 tso_flag[3];
+ __le16 ip_identification;
+ u8 flags;
+ u8 reserved;
+ struct htt_data_tx_desc_frag frags[6];
+};
+
+#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE BIT(0)
+#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE BIT(1)
+#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE BIT(2)
+#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE BIT(3)
+#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE BIT(4)
+
+#define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \
+ | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \
+ | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \
+ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \
+ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE)
+
enum htt_data_tx_desc_flags0 {
HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1,
@@ -255,6 +284,9 @@ struct htt_aggr_conf {
} __packed;
#define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
+struct htt_mgmt_tx_desc_qca99x0 {
+ __le32 rate;
+} __packed;
struct htt_mgmt_tx_desc {
u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
@@ -263,6 +295,9 @@ struct htt_mgmt_tx_desc {
__le32 len;
__le32 vdev_id;
u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
+ union {
+ struct htt_mgmt_tx_desc_qca99x0 qca99x0;
+ } __packed;
} __packed;
enum htt_mgmt_tx_status {
@@ -349,6 +384,38 @@ enum htt_tlv_t2h_msg_type {
HTT_TLV_T2H_NUM_MSGS
};
+enum htt_10_4_t2h_msg_type {
+ HTT_10_4_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_10_4_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_10_4_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_10_4_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_10_4_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_10_4_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_10_4_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_10_4_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_10_4_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
+ HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
+ HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE = 0xf,
+ HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0x10,
+ HTT_10_4_T2H_MSG_TYPE_RX_PN_IND = 0x11,
+ HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x12,
+ HTT_10_4_T2H_MSG_TYPE_TEST = 0x13,
+ HTT_10_4_T2H_MSG_TYPE_EN_STATS = 0x14,
+ HTT_10_4_T2H_MSG_TYPE_AGGR_CONF = 0x15,
+ HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND = 0x16,
+ HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF = 0x17,
+ HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x18,
+ /* 0x19 to 0x2f are reserved */
+ HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND = 0x30,
+ /* keep this last */
+ HTT_10_4_T2H_NUM_MSGS
+};
+
enum htt_t2h_msg_type {
HTT_T2H_MSG_TYPE_VERSION_CONF,
HTT_T2H_MSG_TYPE_RX_IND,
@@ -375,6 +442,10 @@ enum htt_t2h_msg_type {
HTT_T2H_MSG_TYPE_AGGR_CONF,
HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
HTT_T2H_MSG_TYPE_TEST,
+ HTT_T2H_MSG_TYPE_EN_STATS,
+ HTT_T2H_MSG_TYPE_TX_FETCH_IND,
+ HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
+ HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
/* keep this last */
HTT_T2H_NUM_MSGS
};
@@ -1325,6 +1396,8 @@ struct ath10k_htt {
u8 target_version_minor;
struct completion target_version_received;
enum ath10k_fw_htt_op_version op_version;
+ u8 max_num_amsdu;
+ u8 max_num_ampdu;
const enum htt_t2h_msg_type *t2h_msg_types;
u32 t2h_msg_types_max;
@@ -1430,6 +1503,11 @@ struct ath10k_htt {
/* rx_status template */
struct ieee80211_rx_status rx_status;
+
+ struct {
+ dma_addr_t paddr;
+ struct htt_msdu_ext_desc *vaddr;
+ } frag_desc;
};
#define RX_HTT_HDR_STATUS_LEN 64
@@ -1482,6 +1560,12 @@ struct htt_rx_desc {
#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
+/* These values are default in most firmware revisions and apparently are a
+ * sweet spot performance wise.
+ */
+#define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3
+#define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64
+
int ath10k_htt_connect(struct ath10k_htt *htt);
int ath10k_htt_init(struct ath10k *ar);
int ath10k_htt_setup(struct ath10k_htt *htt);
@@ -1497,6 +1581,7 @@ void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
+int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
u8 max_subfrms_ampdu,
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 89eb16b30fc4..1b7a04366256 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -368,7 +368,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
- msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
+ msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
RX_MSDU_START_INFO0_MSDU_LENGTH);
msdu_chained = rx_desc->frag_info.ring2_more_count;
@@ -394,7 +394,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu_chaining = 1;
}
- last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
+ last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
RX_MSDU_END_INFO0_LAST_MSDU;
trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
@@ -740,7 +740,7 @@ ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
__cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
return NULL;
- if (!(rxd->msdu_end.info0 &
+ if (!(rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
return NULL;
@@ -991,9 +991,9 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
bool is_last;
rxd = (void *)msdu->data - sizeof(*rxd);
- is_first = !!(rxd->msdu_end.info0 &
+ is_first = !!(rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
- is_last = !!(rxd->msdu_end.info0 &
+ is_last = !!(rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
/* Delivered decapped frame:
@@ -1017,9 +1017,8 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
skb_trim(msdu, msdu->len - FCS_LEN);
/* In most cases this will be true for sniffed frames. It makes sense
- * to deliver them as-is without stripping the crypto param. This would
- * also make sense for software based decryption (which is not
- * implemented in ath10k).
+ * to deliver them as-is without stripping the crypto param. This is
+ * necessary for software based decryption.
*
* If there's no error then the frame is decrypted. At least that is
* the case for frames that come in via fragmented rx indication.
@@ -1104,9 +1103,9 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
rxd = (void *)msdu->data - sizeof(*rxd);
hdr = (void *)rxd->rx_hdr_status;
- is_first = !!(rxd->msdu_end.info0 &
+ is_first = !!(rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
- is_last = !!(rxd->msdu_end.info0 &
+ is_last = !!(rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
is_amsdu = !(is_first && is_last);
@@ -1201,7 +1200,6 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
{
struct htt_rx_desc *rxd;
enum rx_msdu_decap_format decap;
- struct ieee80211_hdr *hdr;
/* First msdu's decapped header:
* [802.11 header] <-- padded to 4 bytes long
@@ -1215,8 +1213,7 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
*/
rxd = (void *)msdu->data - sizeof(*rxd);
- hdr = (void *)rxd->rx_hdr_status;
- decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
+ decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
switch (decap) {
@@ -1246,7 +1243,7 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
rxd = (void *)skb->data - sizeof(*rxd);
flags = __le32_to_cpu(rxd->attention.flags);
- info = __le32_to_cpu(rxd->msdu_start.info1);
+ info = __le32_to_cpu(rxd->msdu_start.common.info1);
is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
@@ -1439,7 +1436,7 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
first = skb_peek(amsdu);
rxd = (void *)first->data - sizeof(*rxd);
- decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
+ decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
if (!chained)
@@ -1633,8 +1630,6 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
__le16 msdu_id;
int i;
- lockdep_assert_held(&htt->tx_lock);
-
switch (status) {
case HTT_DATA_TX_STATUS_NO_ACK:
tx_done.no_ack = true;
@@ -1759,14 +1754,14 @@ static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
__skb_queue_tail(amsdu, msdu);
rxd = (void *)msdu->data - sizeof(*rxd);
- if (rxd->msdu_end.info0 &
+ if (rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
break;
}
msdu = skb_peek_tail(amsdu);
rxd = (void *)msdu->data - sizeof(*rxd);
- if (!(rxd->msdu_end.info0 &
+ if (!(rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
skb_queue_splice_init(amsdu, list);
return -EAGAIN;
@@ -2000,15 +1995,11 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
- spin_lock_bh(&htt->tx_lock);
ath10k_txrx_tx_unref(htt, &tx_done);
- spin_unlock_bh(&htt->tx_lock);
break;
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
- spin_lock_bh(&htt->tx_lock);
- __skb_queue_tail(&htt->tx_compl_q, skb);
- spin_unlock_bh(&htt->tx_lock);
+ skb_queue_tail(&htt->tx_compl_q, skb);
tasklet_schedule(&htt->txrx_compl_task);
return;
case HTT_T2H_MSG_TYPE_SEC_IND: {
@@ -2074,6 +2065,12 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
break;
+ case HTT_T2H_MSG_TYPE_AGGR_CONF:
+ break;
+ case HTT_T2H_MSG_TYPE_EN_STATS:
+ case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
+ case HTT_T2H_MSG_TYPE_TX_FETCH_CONF:
+ case HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND:
default:
ath10k_warn(ar, "htt event (%d) not handled\n",
resp->hdr.msg_type);
@@ -2093,12 +2090,10 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
struct htt_resp *resp;
struct sk_buff *skb;
- spin_lock_bh(&htt->tx_lock);
- while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
+ while ((skb = skb_dequeue(&htt->tx_compl_q))) {
ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
dev_kfree_skb_any(skb);
}
- spin_unlock_bh(&htt->tx_lock);
spin_lock_bh(&htt->rx_ring.lock);
while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index a60ef7d1d5fc..43aa5e2d1b87 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -63,7 +63,8 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
lockdep_assert_held(&htt->tx_lock);
- ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
+ ret = idr_alloc(&htt->pending_tx, skb, 0,
+ htt->max_num_pending_tx, GFP_ATOMIC);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
@@ -84,6 +85,7 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
+ int ret, size;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
@@ -94,11 +96,31 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
sizeof(struct ath10k_htt_txbuf), 4, 0);
if (!htt->tx_pool) {
- idr_destroy(&htt->pending_tx);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_idr_pending_tx;
+ }
+
+ if (!ar->hw_params.continuous_frag_desc)
+ goto skip_frag_desc_alloc;
+
+ size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
+ htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
+ &htt->frag_desc.paddr,
+ GFP_DMA);
+ if (!htt->frag_desc.vaddr) {
+ ath10k_warn(ar, "failed to alloc fragment desc memory\n");
+ ret = -ENOMEM;
+ goto free_tx_pool;
}
+skip_frag_desc_alloc:
return 0;
+
+free_tx_pool:
+ dma_pool_destroy(htt->tx_pool);
+free_idr_pending_tx:
+ idr_destroy(&htt->pending_tx);
+ return ret;
}
static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
@@ -112,18 +134,25 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
tx_done.discard = 1;
tx_done.msdu_id = msdu_id;
- spin_lock_bh(&htt->tx_lock);
ath10k_txrx_tx_unref(htt, &tx_done);
- spin_unlock_bh(&htt->tx_lock);
return 0;
}
void ath10k_htt_tx_free(struct ath10k_htt *htt)
{
+ int size;
+
idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
idr_destroy(&htt->pending_tx);
dma_pool_destroy(htt->tx_pool);
+
+ if (htt->frag_desc.vaddr) {
+ size = htt->max_num_pending_tx *
+ sizeof(struct htt_msdu_ext_desc);
+ dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr,
+ htt->frag_desc.paddr);
+ }
}
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
@@ -201,6 +230,49 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
return 0;
}
+int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ int ret, size;
+
+ if (!ar->hw_params.continuous_frag_desc)
+ return 0;
+
+ if (!htt->frag_desc.paddr) {
+ ath10k_warn(ar, "invalid frag desc memory\n");
+ return -EINVAL;
+ }
+
+ size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
+ skb = ath10k_htc_alloc_skb(ar, size);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, size);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
+ cmd->frag_desc_bank_cfg.info = 0;
+ cmd->frag_desc_bank_cfg.num_banks = 1;
+ cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
+ cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
+ __cpu_to_le32(htt->frag_desc.paddr);
+ cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0;
+ cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
+ __cpu_to_le16(htt->max_num_pending_tx - 1);
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
+ ret);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
@@ -355,12 +427,11 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+ spin_unlock_bh(&htt->tx_lock);
if (res < 0) {
- spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec;
}
msdu_id = res;
- spin_unlock_bh(&htt->tx_lock);
txdesc = ath10k_htc_alloc_skb(ar, len);
if (!txdesc) {
@@ -371,11 +442,15 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
DMA_TO_DEVICE);
res = dma_mapping_error(dev, skb_cb->paddr);
- if (res)
+ if (res) {
+ res = -EIO;
goto err_free_txdesc;
+ }
skb_put(txdesc, len);
cmd = (struct htt_cmd *)txdesc->data;
+ memset(cmd, 0, len);
+
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
@@ -422,6 +497,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
u16 msdu_id, flags1 = 0;
dma_addr_t paddr = 0;
u32 frags_paddr = 0;
+ struct htt_msdu_ext_desc *ext_desc = NULL;
res = ath10k_htt_tx_inc_pending(htt);
if (res)
@@ -429,12 +505,11 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+ spin_unlock_bh(&htt->tx_lock);
if (res < 0) {
- spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec;
}
msdu_id = res;
- spin_unlock_bh(&htt->tx_lock);
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
@@ -450,14 +525,20 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
- ieee80211_has_protected(hdr->frame_control))
+ ieee80211_has_protected(hdr->frame_control)) {
skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ } else if (!skb_cb->htt.nohwcrypt &&
+ skb_cb->txmode == ATH10K_HW_TXRX_RAW) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ }
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
DMA_TO_DEVICE);
res = dma_mapping_error(dev, skb_cb->paddr);
- if (res)
+ if (res) {
+ res = -EIO;
goto err_free_txbuf;
+ }
switch (skb_cb->txmode) {
case ATH10K_HW_TXRX_RAW:
@@ -465,16 +546,30 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
/* pass through */
case ATH10K_HW_TXRX_ETHERNET:
- frags = skb_cb->htt.txbuf->frags;
-
- frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
- frags[0].len = __cpu_to_le32(msdu->len);
- frags[1].paddr = 0;
- frags[1].len = 0;
-
+ if (ar->hw_params.continuous_frag_desc) {
+ memset(&htt->frag_desc.vaddr[msdu_id], 0,
+ sizeof(struct htt_msdu_ext_desc));
+ frags = (struct htt_data_tx_desc_frag *)
+ &htt->frag_desc.vaddr[msdu_id].frags;
+ ext_desc = &htt->frag_desc.vaddr[msdu_id];
+ frags[0].tword_addr.paddr_lo =
+ __cpu_to_le32(skb_cb->paddr);
+ frags[0].tword_addr.paddr_hi = 0;
+ frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+
+ frags_paddr = htt->frag_desc.paddr +
+ (sizeof(struct htt_msdu_ext_desc) * msdu_id);
+ } else {
+ frags = skb_cb->htt.txbuf->frags;
+ frags[0].dword_addr.paddr =
+ __cpu_to_le32(skb_cb->paddr);
+ frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
+ frags[1].dword_addr.paddr = 0;
+ frags[1].dword_addr.len = 0;
+
+ frags_paddr = skb_cb->htt.txbuf_paddr;
+ }
flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
-
- frags_paddr = skb_cb->htt.txbuf_paddr;
break;
case ATH10K_HW_TXRX_MGMT:
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
@@ -508,14 +603,20 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
prefetch_len);
skb_cb->htt.txbuf->htc_hdr.flags = 0;
+ if (skb_cb->htt.nohwcrypt)
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
if (!skb_cb->is_protected)
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
- if (msdu->ip_summed == CHECKSUM_PARTIAL) {
+ if (msdu->ip_summed == CHECKSUM_PARTIAL &&
+ !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+ if (ar->hw_params.continuous_frag_desc)
+ ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
}
/* Prevent firmware from sending up tx inspection requests. There's
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 5997f00afe3b..7b84d08a5154 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -34,8 +34,15 @@ const struct ath10k_hw_regs qca988x_regs = {
.ce7_base_address = 0x00059000,
.soc_reset_control_si0_rst_mask = 0x00000001,
.soc_reset_control_ce_rst_mask = 0x00040000,
- .soc_chip_id_address = 0x00ec,
- .scratch_3_address = 0x0030,
+ .soc_chip_id_address = 0x000000ec,
+ .scratch_3_address = 0x00000030,
+ .fw_indicator_address = 0x00009030,
+ .pcie_local_base_address = 0x00080000,
+ .ce_wrap_intr_sum_host_msi_lsb = 0x00000008,
+ .ce_wrap_intr_sum_host_msi_mask = 0x0000ff00,
+ .pcie_intr_fw_mask = 0x00000400,
+ .pcie_intr_ce_mask_all = 0x0007f800,
+ .pcie_intr_clr_address = 0x00000014,
};
const struct ath10k_hw_regs qca6174_regs = {
@@ -54,8 +61,79 @@ const struct ath10k_hw_regs qca6174_regs = {
.ce7_base_address = 0x00036000,
.soc_reset_control_si0_rst_mask = 0x00000000,
.soc_reset_control_ce_rst_mask = 0x00000001,
- .soc_chip_id_address = 0x000f0,
- .scratch_3_address = 0x0028,
+ .soc_chip_id_address = 0x000000f0,
+ .scratch_3_address = 0x00000028,
+ .fw_indicator_address = 0x0003a028,
+ .pcie_local_base_address = 0x00080000,
+ .ce_wrap_intr_sum_host_msi_lsb = 0x00000008,
+ .ce_wrap_intr_sum_host_msi_mask = 0x0000ff00,
+ .pcie_intr_fw_mask = 0x00000400,
+ .pcie_intr_ce_mask_all = 0x0007f800,
+ .pcie_intr_clr_address = 0x00000014,
+};
+
+const struct ath10k_hw_regs qca99x0_regs = {
+ .rtc_state_cold_reset_mask = 0x00000400,
+ .rtc_soc_base_address = 0x00080000,
+ .rtc_wmac_base_address = 0x00000000,
+ .soc_core_base_address = 0x00082000,
+ .ce_wrapper_base_address = 0x0004d000,
+ .ce0_base_address = 0x0004a000,
+ .ce1_base_address = 0x0004a400,
+ .ce2_base_address = 0x0004a800,
+ .ce3_base_address = 0x0004ac00,
+ .ce4_base_address = 0x0004b000,
+ .ce5_base_address = 0x0004b400,
+ .ce6_base_address = 0x0004b800,
+ .ce7_base_address = 0x0004bc00,
+ /* Note: qca99x0 supports upto 12 Copy Engines. Other than address of
+ * CE0 and CE1 no other copy engine is directly referred in the code.
+ * It is not really neccessary to assign address for newly supported
+ * CEs in this address table.
+ * Copy Engine Address
+ * CE8 0x0004c000
+ * CE9 0x0004c400
+ * CE10 0x0004c800
+ * CE11 0x0004cc00
+ */
+ .soc_reset_control_si0_rst_mask = 0x00000001,
+ .soc_reset_control_ce_rst_mask = 0x00000100,
+ .soc_chip_id_address = 0x000000ec,
+ .scratch_3_address = 0x00040050,
+ .fw_indicator_address = 0x00040050,
+ .pcie_local_base_address = 0x00000000,
+ .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c,
+ .ce_wrap_intr_sum_host_msi_mask = 0x00fff000,
+ .pcie_intr_fw_mask = 0x00100000,
+ .pcie_intr_ce_mask_all = 0x000fff00,
+ .pcie_intr_clr_address = 0x00000010,
+};
+
+const struct ath10k_hw_values qca988x_values = {
+ .rtc_state_val_on = 3,
+ .ce_count = 8,
+ .msi_assign_ce_max = 7,
+ .num_target_ce_config_wlan = 7,
+ .ce_desc_meta_data_mask = 0xFFFC,
+ .ce_desc_meta_data_lsb = 2,
+};
+
+const struct ath10k_hw_values qca6174_values = {
+ .rtc_state_val_on = 3,
+ .ce_count = 8,
+ .msi_assign_ce_max = 7,
+ .num_target_ce_config_wlan = 7,
+ .ce_desc_meta_data_mask = 0xFFFC,
+ .ce_desc_meta_data_lsb = 2,
+};
+
+const struct ath10k_hw_values qca99x0_values = {
+ .rtc_state_val_on = 5,
+ .ce_count = 12,
+ .msi_assign_ce_max = 12,
+ .num_target_ce_config_wlan = 10,
+ .ce_desc_meta_data_mask = 0xFFF0,
+ .ce_desc_meta_data_lsb = 4,
};
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
@@ -74,6 +152,6 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
cc -= cc_prev - cc_fix;
rcc -= rcc_prev;
- survey->time = CCNT_TO_MSEC(cc);
- survey->time_busy = CCNT_TO_MSEC(rcc);
+ survey->time = CCNT_TO_MSEC(ar, cc);
+ survey->time_busy = CCNT_TO_MSEC(ar, rcc);
}
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 85cca29375fe..23afcda2de96 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -72,6 +72,18 @@ enum qca6174_chip_id_rev {
#define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin"
#define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
+/* QCA99X0 1.0 definitions (unsupported) */
+#define QCA99X0_HW_1_0_CHIP_ID_REV 0x0
+
+/* QCA99X0 2.0 definitions */
+#define QCA99X0_HW_2_0_DEV_VERSION 0x01000000
+#define QCA99X0_HW_2_0_CHIP_ID_REV 0x1
+#define QCA99X0_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA99X0/hw2.0"
+#define QCA99X0_HW_2_0_FW_FILE "firmware.bin"
+#define QCA99X0_HW_2_0_OTP_FILE "otp.bin"
+#define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin"
+#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
+
#define ATH10K_FW_API2_FILE "firmware-2.bin"
#define ATH10K_FW_API3_FILE "firmware-3.bin"
@@ -112,6 +124,9 @@ enum ath10k_fw_ie_type {
* FW API 5 and above.
*/
ATH10K_FW_IE_HTT_OP_VERSION = 6,
+
+ /* Code swap image for firmware binary */
+ ATH10K_FW_IE_FW_CODE_SWAP_IMAGE = 7,
};
enum ath10k_fw_wmi_op_version {
@@ -122,6 +137,7 @@ enum ath10k_fw_wmi_op_version {
ATH10K_FW_WMI_OP_VERSION_10_2 = 3,
ATH10K_FW_WMI_OP_VERSION_TLV = 4,
ATH10K_FW_WMI_OP_VERSION_10_2_4 = 5,
+ ATH10K_FW_WMI_OP_VERSION_10_4 = 6,
/* keep last */
ATH10K_FW_WMI_OP_VERSION_MAX,
@@ -137,6 +153,8 @@ enum ath10k_fw_htt_op_version {
ATH10K_FW_HTT_OP_VERSION_TLV = 3,
+ ATH10K_FW_HTT_OP_VERSION_10_4 = 4,
+
/* keep last */
ATH10K_FW_HTT_OP_VERSION_MAX,
};
@@ -144,6 +162,7 @@ enum ath10k_fw_htt_op_version {
enum ath10k_hw_rev {
ATH10K_HW_QCA988X,
ATH10K_HW_QCA6174,
+ ATH10K_HW_QCA99X0,
};
struct ath10k_hw_regs {
@@ -164,26 +183,50 @@ struct ath10k_hw_regs {
u32 soc_reset_control_ce_rst_mask;
u32 soc_chip_id_address;
u32 scratch_3_address;
+ u32 fw_indicator_address;
+ u32 pcie_local_base_address;
+ u32 ce_wrap_intr_sum_host_msi_lsb;
+ u32 ce_wrap_intr_sum_host_msi_mask;
+ u32 pcie_intr_fw_mask;
+ u32 pcie_intr_ce_mask_all;
+ u32 pcie_intr_clr_address;
};
extern const struct ath10k_hw_regs qca988x_regs;
extern const struct ath10k_hw_regs qca6174_regs;
+extern const struct ath10k_hw_regs qca99x0_regs;
+
+struct ath10k_hw_values {
+ u32 rtc_state_val_on;
+ u8 ce_count;
+ u8 msi_assign_ce_max;
+ u8 num_target_ce_config_wlan;
+ u16 ce_desc_meta_data_mask;
+ u8 ce_desc_meta_data_lsb;
+};
+
+extern const struct ath10k_hw_values qca988x_values;
+extern const struct ath10k_hw_values qca6174_values;
+extern const struct ath10k_hw_values qca99x0_values;
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
+#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
/* Known pecularities:
- * - current FW doesn't support raw rx mode (last tested v599)
- * - current FW dumps upon raw tx mode (last tested v599)
* - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
* - raw have FCS, nwifi doesn't
* - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
* param, llc/snap) are aligned to 4byte boundaries each */
enum ath10k_hw_txrx_mode {
ATH10K_HW_TXRX_RAW = 0,
+
+ /* Native Wifi decap mode is used to align IP frames to 4-byte
+ * boundaries and avoid a very expensive re-alignment in mac80211.
+ */
ATH10K_HW_TXRX_NATIVE_WIFI = 1,
ATH10K_HW_TXRX_ETHERNET = 2,
@@ -245,10 +288,6 @@ enum ath10k_hw_rate_cck {
#define TARGET_RX_TIMEOUT_LO_PRI 100
#define TARGET_RX_TIMEOUT_HI_PRI 40
-/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
- * avoid a very expensive re-alignment in mac80211. */
-#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
-
#define TARGET_SCAN_MAX_PENDING_REQS 4
#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
@@ -283,7 +322,6 @@ enum ath10k_hw_rate_cck {
#define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_10X_RX_TIMEOUT_LO_PRI 100
#define TARGET_10X_RX_TIMEOUT_HI_PRI 40
-#define TARGET_10X_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
#define TARGET_10X_SCAN_MAX_PENDING_REQS 4
#define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV 2
#define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV 2
@@ -310,8 +348,70 @@ enum ath10k_hw_rate_cck {
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
#define TARGET_TLV_NUM_WOW_PATTERNS 22
+/* Diagnostic Window */
+#define CE_DIAG_PIPE 7
+
+#define NUM_TARGET_CE_CONFIG_WLAN ar->hw_values->num_target_ce_config_wlan
+
+/* Target specific defines for 10.4 firmware */
+#define TARGET_10_4_NUM_VDEVS 16
+#define TARGET_10_4_NUM_STATIONS 32
+#define TARGET_10_4_NUM_PEERS ((TARGET_10_4_NUM_STATIONS) + \
+ (TARGET_10_4_NUM_VDEVS))
+#define TARGET_10_4_ACTIVE_PEERS 0
+
+#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 512
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS 50
+#define TARGET_10_4_NUM_OFFLOAD_PEERS 0
+#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0
+#define TARGET_10_4_NUM_PEER_KEYS 2
+#define TARGET_10_4_TGT_NUM_TIDS ((TARGET_10_4_NUM_PEERS) * 2)
+#define TARGET_10_4_AST_SKID_LIMIT 32
+#define TARGET_10_4_TX_CHAIN_MASK (BIT(0) | BIT(1) | \
+ BIT(2) | BIT(3))
+#define TARGET_10_4_RX_CHAIN_MASK (BIT(0) | BIT(1) | \
+ BIT(2) | BIT(3))
+
+/* 100 ms for video, best-effort, and background */
+#define TARGET_10_4_RX_TIMEOUT_LO_PRI 100
+
+/* 40 ms for voice */
+#define TARGET_10_4_RX_TIMEOUT_HI_PRI 40
+
+#define TARGET_10_4_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
+#define TARGET_10_4_SCAN_MAX_REQS 4
+#define TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV 3
+#define TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV 3
+#define TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES 8
+
+/* Note: mcast to ucast is disabled by default */
+#define TARGET_10_4_NUM_MCAST_GROUPS 0
+#define TARGET_10_4_NUM_MCAST_TABLE_ELEMS 0
+#define TARGET_10_4_MCAST2UCAST_MODE 0
+
+#define TARGET_10_4_TX_DBG_LOG_SIZE 1024
+#define TARGET_10_4_NUM_WDS_ENTRIES 32
+#define TARGET_10_4_DMA_BURST_SIZE 1
+#define TARGET_10_4_MAC_AGGR_DELIM 0
+#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_10_4_VOW_CONFIG 0
+#define TARGET_10_4_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_10_4_NUM_MSDU_DESC (1024 + 400)
+#define TARGET_10_4_11AC_TX_MAX_FRAGS 2
+#define TARGET_10_4_MAX_PEER_EXT_STATS 16
+#define TARGET_10_4_SMART_ANT_CAP 0
+#define TARGET_10_4_BK_MIN_FREE 0
+#define TARGET_10_4_BE_MIN_FREE 0
+#define TARGET_10_4_VI_MIN_FREE 0
+#define TARGET_10_4_VO_MIN_FREE 0
+#define TARGET_10_4_RX_BATCH_MODE 1
+#define TARGET_10_4_THERMAL_THROTTLING_CONFIG 0
+#define TARGET_10_4_ATF_CONFIG 0
+#define TARGET_10_4_IPHDR_PAD_CONFIG 1
+#define TARGET_10_4_QWRAP_CONFIG 0
+
/* Number of Copy Engines supported */
-#define CE_COUNT 8
+#define CE_COUNT ar->hw_values->ce_count
/*
* Total number of PCIe MSI interrupts requested for all interrupt sources.
@@ -335,10 +435,10 @@ enum ath10k_hw_rate_cck {
/* MSIs for Copy Engines */
#define MSI_ASSIGN_CE_INITIAL 1
-#define MSI_ASSIGN_CE_MAX 7
+#define MSI_ASSIGN_CE_MAX ar->hw_values->msi_assign_ce_max
/* as of IP3.7.1 */
-#define RTC_STATE_V_ON 3
+#define RTC_STATE_V_ON ar->hw_values->rtc_state_val_on
#define RTC_STATE_COLD_RESET_MASK ar->regs->rtc_state_cold_reset_mask
#define RTC_STATE_V_LSB 0
@@ -374,7 +474,7 @@ enum ath10k_hw_rate_cck {
#define CE7_BASE_ADDRESS ar->regs->ce7_base_address
#define DBI_BASE_ADDRESS 0x00060000
#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
-#define PCIE_LOCAL_BASE_ADDRESS 0x00080000
+#define PCIE_LOCAL_BASE_ADDRESS ar->regs->pcie_local_base_address
#define SOC_RESET_CONTROL_ADDRESS 0x00000000
#define SOC_RESET_CONTROL_OFFSET 0x00000000
@@ -448,24 +548,25 @@ enum ath10k_hw_rate_cck {
#define CORE_CTRL_ADDRESS 0x0000
#define PCIE_INTR_ENABLE_ADDRESS 0x0008
#define PCIE_INTR_CAUSE_ADDRESS 0x000c
-#define PCIE_INTR_CLR_ADDRESS 0x0014
+#define PCIE_INTR_CLR_ADDRESS ar->regs->pcie_intr_clr_address
#define SCRATCH_3_ADDRESS ar->regs->scratch_3_address
#define CPU_INTR_ADDRESS 0x0010
-/* Cycle counters are running at 88MHz */
-#define CCNT_TO_MSEC(x) ((x) / 88000)
+#define CCNT_TO_MSEC(ar, x) ((x) / ar->hw_params.channel_counters_freq_hz)
/* Firmware indications to the Host via SCRATCH_3 register. */
-#define FW_INDICATOR_ADDRESS (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS)
+#define FW_INDICATOR_ADDRESS ar->regs->fw_indicator_address
#define FW_IND_EVENT_PENDING 1
#define FW_IND_INITIALIZED 2
/* HOST_REG interrupt from firmware */
-#define PCIE_INTR_FIRMWARE_MASK 0x00000400
-#define PCIE_INTR_CE_MASK_ALL 0x0007f800
+#define PCIE_INTR_FIRMWARE_MASK ar->regs->pcie_intr_fw_mask
+#define PCIE_INTR_CE_MASK_ALL ar->regs->pcie_intr_ce_mask_all
#define DRAM_BASE_ADDRESS 0x00400000
+#define PCIE_BAR_REG_ADDRESS 0x40030
+
#define MISSING 0
#define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 218b6af63447..64674c955d44 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -197,6 +197,10 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
return -EOPNOTSUPP;
}
+ if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ }
+
if (cmd == DISABLE_KEY) {
arg.key_cipher = WMI_CIPHER_NONE;
arg.key_data = NULL;
@@ -218,6 +222,9 @@ static int ath10k_install_key(struct ath10k_vif *arvif,
reinit_completion(&ar->install_key_done);
+ if (arvif->nohwcrypt)
+ return 1;
+
ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
if (ret)
return ret;
@@ -240,6 +247,10 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
lockdep_assert_held(&ar->conf_mutex);
+ if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
+ arvif->vif->type != NL80211_IFTYPE_ADHOC))
+ return -EINVAL;
+
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
spin_unlock_bh(&ar->data_lock);
@@ -251,21 +262,34 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
if (arvif->wep_keys[i] == NULL)
continue;
- flags = 0;
- flags |= WMI_KEY_PAIRWISE;
+ switch (arvif->vif->type) {
+ case NL80211_IFTYPE_AP:
+ flags = WMI_KEY_PAIRWISE;
- ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
- addr, flags);
- if (ret)
- return ret;
+ if (arvif->def_wep_key_idx == i)
+ flags |= WMI_KEY_TX_USAGE;
- flags = 0;
- flags |= WMI_KEY_GROUP;
+ ret = ath10k_install_key(arvif, arvif->wep_keys[i],
+ SET_KEY, addr, flags);
+ if (ret < 0)
+ return ret;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ret = ath10k_install_key(arvif, arvif->wep_keys[i],
+ SET_KEY, addr,
+ WMI_KEY_PAIRWISE);
+ if (ret < 0)
+ return ret;
- ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
- addr, flags);
- if (ret)
- return ret;
+ ret = ath10k_install_key(arvif, arvif->wep_keys[i],
+ SET_KEY, addr, WMI_KEY_GROUP);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
spin_lock_bh(&ar->data_lock);
peer->keys[i] = arvif->wep_keys[i];
@@ -280,6 +304,9 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
*
* FIXME: Revisit. Perhaps this can be done in a less hacky way.
*/
+ if (arvif->vif->type != NL80211_IFTYPE_ADHOC)
+ return 0;
+
if (arvif->def_wep_key_idx == -1)
return 0;
@@ -322,10 +349,10 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
/* key flags are not required to delete the key */
ret = ath10k_install_key(arvif, peer->keys[i],
DISABLE_KEY, addr, flags);
- if (ret && first_errno == 0)
+ if (ret < 0 && first_errno == 0)
first_errno = ret;
- if (ret)
+ if (ret < 0)
ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
i, ret);
@@ -398,7 +425,7 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
break;
/* key flags are not required to delete the key */
ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
- if (ret && first_errno == 0)
+ if (ret < 0 && first_errno == 0)
first_errno = ret;
if (ret)
@@ -591,11 +618,19 @@ ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
enum wmi_peer_type peer_type)
{
+ struct ath10k_vif *arvif;
+ int num_peers = 0;
int ret;
lockdep_assert_held(&ar->conf_mutex);
- if (ar->num_peers >= ar->max_num_peers)
+ num_peers = ar->num_peers;
+
+ /* Each vdev consumes a peer entry as well */
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ num_peers++;
+
+ if (num_peers >= ar->max_num_peers)
return -ENOBUFS;
ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
@@ -671,20 +706,6 @@ static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
}
-static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
-{
- struct ath10k *ar = arvif->ar;
- u32 vdev_param;
-
- if (value != 0xFFFFFFFF)
- value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
- ATH10K_FRAGMT_THRESHOLD_MIN,
- ATH10K_FRAGMT_THRESHOLD_MAX);
-
- vdev_param = ar->wmi.vdev_param->fragmentation_threshold;
- return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
-}
-
static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
{
int ret;
@@ -836,7 +857,7 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
{
struct cfg80211_chan_def *chandef = NULL;
- struct ieee80211_channel *channel = chandef->chan;
+ struct ieee80211_channel *channel = NULL;
struct wmi_vdev_start_request_arg arg = {};
int ret = 0;
@@ -1668,7 +1689,7 @@ static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
return 0;
}
-static int ath10k_mac_ps_vif_count(struct ath10k *ar)
+static int ath10k_mac_num_vifs_started(struct ath10k *ar)
{
struct ath10k_vif *arvif;
int num = 0;
@@ -1676,7 +1697,7 @@ static int ath10k_mac_ps_vif_count(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list)
- if (arvif->ps)
+ if (arvif->is_started)
num++;
return num;
@@ -1700,7 +1721,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
enable_ps = arvif->ps;
- if (enable_ps && ath10k_mac_ps_vif_count(ar) > 1 &&
+ if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
!test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
ar->fw_features)) {
ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
@@ -2502,6 +2523,9 @@ static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
u32 param;
u32 value;
+ if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
+ return 0;
+
if (!(ar->vht_cap_info &
(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
@@ -2995,6 +3019,8 @@ void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
IEEE80211_IFACE_ITER_RESUME_ALL,
ath10k_mac_tx_unlock_iter,
ar);
+
+ ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue);
}
void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
@@ -3034,38 +3060,16 @@ static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
lockdep_assert_held(&ar->htt.tx_lock);
- switch (pause_id) {
- case WMI_TLV_TX_PAUSE_ID_MCC:
- case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
- case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
- case WMI_TLV_TX_PAUSE_ID_AP_PS:
- case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
- switch (action) {
- case WMI_TLV_TX_PAUSE_ACTION_STOP:
- ath10k_mac_vif_tx_lock(arvif, pause_id);
- break;
- case WMI_TLV_TX_PAUSE_ACTION_WAKE:
- ath10k_mac_vif_tx_unlock(arvif, pause_id);
- break;
- default:
- ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
- action, arvif->vdev_id);
- break;
- }
+ switch (action) {
+ case WMI_TLV_TX_PAUSE_ACTION_STOP:
+ ath10k_mac_vif_tx_lock(arvif, pause_id);
+ break;
+ case WMI_TLV_TX_PAUSE_ACTION_WAKE:
+ ath10k_mac_vif_tx_unlock(arvif, pause_id);
break;
- case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
- case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
- case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
- case WMI_TLV_TX_PAUSE_ID_HOST:
default:
- /* FIXME: Some pause_ids aren't vdev specific. Instead they
- * target peer_id and tid. Implementing these could improve
- * traffic scheduling fairness across multiple connected
- * stations in AP/IBSS modes.
- */
- ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac ignoring unsupported tx pause vdev %i id %d\n",
- arvif->vdev_id, pause_id);
+ ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
+ action, arvif->vdev_id);
break;
}
}
@@ -3082,12 +3086,15 @@ static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct ath10k_mac_tx_pause *arg = data;
+ if (arvif->vdev_id != arg->vdev_id)
+ return;
+
ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
}
-void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
- enum wmi_tlv_tx_pause_id pause_id,
- enum wmi_tlv_tx_pause_action action)
+void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tlv_tx_pause_id pause_id,
+ enum wmi_tlv_tx_pause_action action)
{
struct ath10k_mac_tx_pause arg = {
.vdev_id = vdev_id,
@@ -3168,13 +3175,30 @@ ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif,
* Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
* NativeWifi txmode - it selects AP key instead of peer key. It seems
* to work with Ethernet txmode so use it.
+ *
+ * FIXME: Check if raw mode works with TDLS.
*/
if (ieee80211_is_data_present(fc) && sta && sta->tdls)
return ATH10K_HW_TXRX_ETHERNET;
+ if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+ return ATH10K_HW_TXRX_RAW;
+
return ATH10K_HW_TXRX_NATIVE_WIFI;
}
+static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
+ struct sk_buff *skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
+ IEEE80211_TX_CTL_INJECTED;
+ if ((info->flags & mask) == mask)
+ return false;
+ if (vif)
+ return !ath10k_vif_to_arvif(vif)->nohwcrypt;
+ return true;
+}
+
/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
* Control in the header.
*/
@@ -3341,6 +3365,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
int vdev_id;
int ret;
unsigned long time_left;
+ bool tmp_peer_created = false;
/* FW requirement: We must create a peer before FW will send out
* an offchannel frame. Otherwise the frame will be stuck and
@@ -3378,6 +3403,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
if (ret)
ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
peer_addr, vdev_id, ret);
+ tmp_peer_created = (ret == 0);
}
spin_lock_bh(&ar->data_lock);
@@ -3393,7 +3419,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
skb);
- if (!peer) {
+ if (!peer && tmp_peer_created) {
ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
if (ret)
ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
@@ -3449,14 +3475,13 @@ void __ath10k_scan_finish(struct ath10k *ar)
case ATH10K_SCAN_IDLE:
break;
case ATH10K_SCAN_RUNNING:
- if (ar->scan.is_roc)
- ieee80211_remain_on_channel_expired(ar->hw);
- /* fall through */
case ATH10K_SCAN_ABORTING:
if (!ar->scan.is_roc)
ieee80211_scan_completed(ar->hw,
(ar->scan.state ==
ATH10K_SCAN_ABORTING));
+ else if (ar->scan.roc_notify)
+ ieee80211_remain_on_channel_expired(ar->hw);
/* fall through */
case ATH10K_SCAN_STARTING:
ar->scan.state = ATH10K_SCAN_IDLE;
@@ -3620,6 +3645,7 @@ static void ath10k_tx(struct ieee80211_hw *hw,
ATH10K_SKB_CB(skb)->htt.is_offchan = false;
ATH10K_SKB_CB(skb)->htt.freq = 0;
ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
+ ATH10K_SKB_CB(skb)->htt.nohwcrypt = !ath10k_tx_h_use_hwcrypto(vif, skb);
ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif);
ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb);
ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc);
@@ -3635,12 +3661,11 @@ static void ath10k_tx(struct ieee80211_hw *hw,
ath10k_tx_h_8023(skb);
break;
case ATH10K_HW_TXRX_RAW:
- /* FIXME: Packet injection isn't implemented. It should be
- * doable with firmware 10.2 on qca988x.
- */
- WARN_ON_ONCE(1);
- ieee80211_free_txskb(hw, skb);
- return;
+ if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ WARN_ON_ONCE(1);
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
}
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
@@ -4039,6 +4064,43 @@ static u32 get_nss_from_chainmask(u16 chain_mask)
return 1;
}
+static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
+{
+ u32 value = 0;
+ struct ath10k *ar = arvif->ar;
+
+ if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
+ return 0;
+
+ if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
+ value |= SM((ar->num_rf_chains - 1), WMI_TXBF_STS_CAP_OFFSET);
+
+ if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
+ value |= SM((ar->num_rf_chains - 1), WMI_BF_SOUND_DIM_OFFSET);
+
+ if (!value)
+ return 0;
+
+ if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+ if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
+ value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
+ WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
+
+ if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+ if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
+ value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
+ WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
+
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ ar->wmi.vdev_param->txbf, value);
+}
+
/*
* TODO:
* Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
@@ -4080,6 +4142,12 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
sizeof(arvif->bitrate_mask.control[i].vht_mcs));
}
+ if (ar->num_peers >= ar->max_num_peers) {
+ ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
+ ret = -ENOBUFS;
+ goto err;
+ }
+
if (ar->free_vdev_map == 0) {
ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
ret = -EBUSY;
@@ -4159,6 +4227,14 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
goto err;
}
}
+ if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
+ arvif->nohwcrypt = true;
+
+ if (arvif->nohwcrypt &&
+ !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
+ goto err;
+ }
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
@@ -4257,16 +4333,16 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
}
}
- ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
+ ret = ath10k_mac_set_txbf_conf(arvif);
if (ret) {
- ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
+ ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
- ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
+ ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
if (ret) {
- ath10k_warn(ar, "failed to set frag threshold for vdev %d: %d\n",
+ ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
@@ -4287,6 +4363,11 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
}
}
+ spin_lock_bh(&ar->htt.tx_lock);
+ if (!ar->tx_paused)
+ ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -4641,9 +4722,6 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
arg.vdev_id = arvif->vdev_id;
arg.scan_id = ATH10K_SCAN_ID;
- if (!req->no_cck)
- arg.scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
-
if (req->ie_len) {
arg.ie_len = req->ie_len;
memcpy(arg.ie, req->ie, arg.ie_len);
@@ -4751,6 +4829,9 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
return 1;
+ if (arvif->nohwcrypt)
+ return 1;
+
if (key->keyidx > WMI_MAX_KEY_INDEX)
return -ENOSPC;
@@ -4820,6 +4901,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
if (ret) {
+ WARN_ON(ret > 0);
ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
arvif->vdev_id, peer_addr, ret);
goto exit;
@@ -4835,13 +4917,16 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
if (ret) {
+ WARN_ON(ret > 0);
ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
arvif->vdev_id, peer_addr, ret);
ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
peer_addr, flags);
- if (ret2)
+ if (ret2) {
+ WARN_ON(ret2 > 0);
ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
arvif->vdev_id, peer_addr, ret2);
+ }
goto exit;
}
}
@@ -5462,6 +5547,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
ar->scan.is_roc = true;
ar->scan.vdev_id = arvif->vdev_id;
ar->scan.roc_freq = chan->center_freq;
+ ar->scan.roc_notify = true;
ret = 0;
break;
case ATH10K_SCAN_STARTING:
@@ -5525,7 +5611,13 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
struct ath10k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.roc_notify = false;
+ spin_unlock_bh(&ar->data_lock);
+
ath10k_scan_abort(ar);
+
mutex_unlock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
@@ -5561,12 +5653,27 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
return ret;
}
+static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ /* Even though there's a WMI enum for fragmentation threshold no known
+ * firmware actually implements it. Moreover it is not possible to rely
+ * frame fragmentation to mac80211 because firmware clears the "more
+ * fragments" bit in frame control making it impossible for remote
+ * devices to reassemble frames.
+ *
+ * Hence implement a dummy callback just to say fragmentation isn't
+ * supported. This effectively prevents mac80211 from doing frame
+ * fragmentation in software.
+ */
+ return -EOPNOTSUPP;
+}
+
static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct ath10k *ar = hw->priv;
bool skip;
- int ret;
+ long time_left;
/* mac80211 doesn't care if we really xmit queued frames or not
* we'll collect those frames either way if we stop/delete vdevs */
@@ -5578,7 +5685,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (ar->state == ATH10K_STATE_WEDGED)
goto skip;
- ret = wait_event_timeout(ar->htt.empty_tx_wq, ({
+ time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
bool empty;
spin_lock_bh(&ar->htt.tx_lock);
@@ -5592,9 +5699,9 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
(empty || skip);
}), ATH10K_FLUSH_TIMEOUT_HZ);
- if (ret <= 0 || skip)
- ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %i\n",
- skip, ar->state, ret);
+ if (time_left == 0 || skip)
+ ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
+ skip, ar->state, time_left);
skip:
mutex_unlock(&ar->conf_mutex);
@@ -6219,6 +6326,13 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
arvif->is_started = true;
+ ret = ath10k_mac_vif_setup_ps(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
+ arvif->vdev_id, ret);
+ goto err_stop;
+ }
+
if (vif->type == NL80211_IFTYPE_MONITOR) {
ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
if (ret) {
@@ -6236,6 +6350,7 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
err_stop:
ath10k_vdev_stop(arvif);
arvif->is_started = false;
+ ath10k_mac_vif_setup_ps(arvif);
err:
mutex_unlock(&ar->conf_mutex);
@@ -6395,6 +6510,7 @@ static const struct ieee80211_ops ath10k_ops = {
.remain_on_channel = ath10k_remain_on_channel,
.cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
.set_rts_threshold = ath10k_set_rts_threshold,
+ .set_frag_threshold = ath10k_mac_op_set_frag_threshold,
.flush = ath10k_flush,
.tx_last_beacon = ath10k_tx_last_beacon,
.set_antenna = ath10k_set_antenna,
@@ -6565,8 +6681,11 @@ static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
{
.max = 2,
- .types = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO),
},
@@ -6576,6 +6695,26 @@ static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
},
};
+static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
+};
+
static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
{
.max = 1,
@@ -6594,7 +6733,7 @@ static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
{
.limits = ath10k_tlv_if_limit,
.num_different_channels = 1,
- .max_interfaces = 3,
+ .max_interfaces = 4,
.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
},
{
@@ -6608,11 +6747,17 @@ static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
{
.limits = ath10k_tlv_if_limit,
- .num_different_channels = 2,
- .max_interfaces = 3,
+ .num_different_channels = 1,
+ .max_interfaces = 4,
.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
},
{
+ .limits = ath10k_tlv_qcs_if_limit,
+ .num_different_channels = 2,
+ .max_interfaces = 4,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
+ },
+ {
.limits = ath10k_tlv_if_limit_ibss,
.num_different_channels = 1,
.max_interfaces = 2,
@@ -6620,6 +6765,33 @@ static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
},
};
+static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 16,
+ .types = BIT(NL80211_IFTYPE_AP)
+ },
+};
+
+static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
+ {
+ .limits = ath10k_10_4_if_limits,
+ .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
+ .max_interfaces = 16,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+#endif
+ },
+};
+
static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
{
struct ieee80211_sta_vht_cap vht_cap = {0};
@@ -6844,7 +7016,6 @@ int ath10k_mac_register(struct ath10k *ar)
ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
ieee80211_hw_set(ar->hw, AP_LINK_PS);
ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
- ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
@@ -6852,6 +7023,9 @@ int ath10k_mac_register(struct ath10k *ar)
ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
+ if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+ ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
+
ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -6902,6 +7076,8 @@ int ath10k_mac_register(struct ath10k *ar)
goto err_free;
}
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
/*
* on LL hardware queues are managed entirely by the FW
* so we only advertise to mac we can do the queues thing
@@ -6941,6 +7117,11 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->n_iface_combinations =
ARRAY_SIZE(ath10k_10x_if_comb);
break;
+ case ATH10K_FW_WMI_OP_VERSION_10_4:
+ ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_10_4_if_comb);
+ break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
WARN_ON(1);
@@ -6948,7 +7129,8 @@ int ath10k_mac_register(struct ath10k *ar)
goto err_free;
}
- ar->hw->netdev_features = NETIF_F_HW_CSUM;
+ if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+ ar->hw->netdev_features = NETIF_F_HW_CSUM;
if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
/* Init ath dfs pattern detector */
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index b291f063705c..e3cefe4c7cfd 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -61,9 +61,9 @@ int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb);
void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id);
-void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
- enum wmi_tlv_tx_pause_id pause_id,
- enum wmi_tlv_tx_pause_action action);
+void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tlv_tx_pause_id pause_id,
+ enum wmi_tlv_tx_pause_action action);
u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
u8 hw_rate);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index ea656e011a96..1046ab65b9ab 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -58,11 +58,15 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
#define QCA988X_2_0_DEVICE_ID (0x003c)
+#define QCA6164_2_1_DEVICE_ID (0x0041)
#define QCA6174_2_1_DEVICE_ID (0x003e)
+#define QCA99X0_2_0_DEVICE_ID (0x0040)
static const struct pci_device_id ath10k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
+ { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
+ { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
{0}
};
@@ -72,16 +76,25 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
* because of that.
*/
{ QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
+
+ { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
+ { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
+ { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
+ { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
+ { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
+
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
+
+ { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
};
static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
static int ath10k_pci_cold_reset(struct ath10k *ar);
-static int ath10k_pci_warm_reset(struct ath10k *ar);
+static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
static int ath10k_pci_init_irq(struct ath10k *ar);
static int ath10k_pci_deinit_irq(struct ath10k *ar);
@@ -90,6 +103,7 @@ static void ath10k_pci_free_irq(struct ath10k *ar);
static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
struct ath10k_ce_pipe *rx_pipe,
struct bmi_xfer *xfer);
+static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
static const struct ce_attr host_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
@@ -155,6 +169,38 @@ static const struct ce_attr host_ce_config_wlan[] = {
.src_sz_max = DIAG_TRANSFER_LIMIT,
.dest_nentries = 2,
},
+
+ /* CE8: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 128,
+ },
+
+ /* CE9 target autonomous qcache memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE10: target autonomous hif memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE11: target autonomous hif memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
};
/* Target firmware's Copy Engine configuration. */
@@ -232,6 +278,38 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
},
/* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(0),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host packtlog */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(64),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE9 target autonomous qcache memcpy */
+ {
+ .pipenum = __cpu_to_le32(9),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* It not necessary to send target wlan configuration for CE10 & CE11
+ * as these CEs are not actively used in target.
+ */
};
/*
@@ -479,6 +557,12 @@ void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
+ if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
+ ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
+ offset, offset + sizeof(value), ar_pci->mem_len);
+ return;
+ }
+
ret = ath10k_pci_wake(ar);
if (ret) {
ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
@@ -496,6 +580,12 @@ u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
u32 val;
int ret;
+ if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
+ ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
+ offset, offset + sizeof(val), ar_pci->mem_len);
+ return 0;
+ }
+
ret = ath10k_pci_wake(ar);
if (ret) {
ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
@@ -678,6 +768,26 @@ static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
ath10k_pci_rx_post(ar);
}
+static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0;
+
+ switch (ar->hw_rev) {
+ case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA6174:
+ val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ CORE_CTRL_ADDRESS) &
+ 0x7ff) << 21;
+ break;
+ case ATH10K_HW_QCA99X0:
+ val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+ break;
+ }
+
+ val |= 0x100000 | (addr & 0xfffff);
+ return val;
+}
+
/*
* Diagnostic read/write access is provided for startup/config/debug usage.
* Caller must guarantee proper alignment, when applicable, and single user
@@ -740,8 +850,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
* convert it from Target CPU virtual address space
* to CE address space
*/
- address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
- address);
+ address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
0);
@@ -899,7 +1008,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* to
* CE address space
*/
- address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
+ address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
remaining_bytes = orig_nbytes;
ce_data = ce_data_base;
@@ -1331,20 +1440,42 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
{
u32 val;
- val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
- val &= ~CORE_CTRL_PCIE_REG_31_MASK;
-
- ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
+ switch (ar->hw_rev) {
+ case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA6174:
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ CORE_CTRL_ADDRESS);
+ val &= ~CORE_CTRL_PCIE_REG_31_MASK;
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ CORE_CTRL_ADDRESS, val);
+ break;
+ case ATH10K_HW_QCA99X0:
+ /* TODO: Find appropriate register configuration for QCA99X0
+ * to mask irq/MSI.
+ */
+ break;
+ }
}
static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
{
u32 val;
- val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
- val |= CORE_CTRL_PCIE_REG_31_MASK;
-
- ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
+ switch (ar->hw_rev) {
+ case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA6174:
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ CORE_CTRL_ADDRESS);
+ val |= CORE_CTRL_PCIE_REG_31_MASK;
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ CORE_CTRL_ADDRESS, val);
+ break;
+ case ATH10K_HW_QCA99X0:
+ /* TODO: Find appropriate register configuration for QCA99X0
+ * to unmask irq/MSI.
+ */
+ break;
+ }
}
static void ath10k_pci_irq_disable(struct ath10k *ar)
@@ -1506,7 +1637,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
* masked. To prevent the device from asserting the interrupt reset it
* before proceeding with cleanup.
*/
- ath10k_pci_warm_reset(ar);
+ ath10k_pci_safe_chip_reset(ar);
ath10k_pci_irq_disable(ar);
ath10k_pci_irq_sync(ar);
@@ -1546,8 +1677,10 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
ret = dma_mapping_error(ar->dev, req_paddr);
- if (ret)
+ if (ret) {
+ ret = -EIO;
goto err_dma;
+ }
if (resp && resp_len) {
tresp = kzalloc(*resp_len, GFP_KERNEL);
@@ -1559,8 +1692,10 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
DMA_FROM_DEVICE);
ret = dma_mapping_error(ar->dev, resp_paddr);
- if (ret)
+ if (ret) {
+ ret = EIO;
goto err_req;
+ }
xfer.wait_for_resp = true;
xfer.resp_len = 0;
@@ -1687,7 +1822,9 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
switch (ar_pci->pdev->device) {
case QCA988X_2_0_DEVICE_ID:
+ case QCA99X0_2_0_DEVICE_ID:
return 1;
+ case QCA6164_2_1_DEVICE_ID:
case QCA6174_2_1_DEVICE_ID:
switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
case QCA6174_HW_1_0_CHIP_ID_REV:
@@ -1757,7 +1894,8 @@ static int ath10k_pci_init_config(struct ath10k *ar)
ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
target_ce_config_wlan,
- sizeof(target_ce_config_wlan));
+ sizeof(struct ce_pipe_config) *
+ NUM_TARGET_CE_CONFIG_WLAN);
if (ret != 0) {
ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
@@ -1871,7 +2009,7 @@ static int ath10k_pci_alloc_pipes(struct ath10k *ar)
}
/* Last CE is Diagnostic Window */
- if (i == CE_COUNT - 1) {
+ if (i == CE_DIAG_PIPE) {
ar_pci->ce_diag = pipe->ce_hdl;
continue;
}
@@ -2016,6 +2154,18 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
return 0;
}
+static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
+{
+ if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) {
+ return ath10k_pci_warm_reset(ar);
+ } else if (QCA_REV_99X0(ar)) {
+ ath10k_pci_irq_disable(ar);
+ return ath10k_pci_qca99x0_chip_reset(ar);
+ } else {
+ return -ENOTSUPP;
+ }
+}
+
static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
{
int i, ret;
@@ -2122,12 +2272,38 @@ static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
return 0;
}
+static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
+
+ ret = ath10k_pci_cold_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to cold reset: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
+
+ return 0;
+}
+
static int ath10k_pci_chip_reset(struct ath10k *ar)
{
if (QCA_REV_988X(ar))
return ath10k_pci_qca988x_chip_reset(ar);
else if (QCA_REV_6174(ar))
return ath10k_pci_qca6174_chip_reset(ar);
+ else if (QCA_REV_99X0(ar))
+ return ath10k_pci_qca99x0_chip_reset(ar);
else
return -ENOTSUPP;
}
@@ -2602,7 +2778,6 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
static int ath10k_pci_cold_reset(struct ath10k *ar)
{
- int i;
u32 val;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
@@ -2618,23 +2793,18 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
val |= 1;
ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
- for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
- if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
- RTC_STATE_COLD_RESET_MASK)
- break;
- msleep(1);
- }
+ /* After writing into SOC_GLOBAL_RESET to put device into
+ * reset and pulling out of reset pcie may not be stable
+ * for any immediate pcie register access and cause bus error,
+ * add delay before any pcie access request to fix this issue.
+ */
+ msleep(20);
/* Pull Target, including PCIe, out of RESET. */
val &= ~1;
ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
- for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
- if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
- RTC_STATE_COLD_RESET_MASK))
- break;
- msleep(1);
- }
+ msleep(20);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
@@ -2679,6 +2849,7 @@ static int ath10k_pci_claim(struct ath10k *ar)
pci_set_master(pdev);
/* Arrange for access to Target SoC registers. */
+ ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
if (!ar_pci->mem) {
ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
@@ -2742,9 +2913,13 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
case QCA988X_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA988X;
break;
+ case QCA6164_2_1_DEVICE_ID:
case QCA6174_2_1_DEVICE_ID:
hw_rev = ATH10K_HW_QCA6174;
break;
+ case QCA99X0_2_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA99X0;
+ break;
default:
WARN_ON(1);
return -ENOTSUPP;
@@ -2763,6 +2938,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar_pci->pdev = pdev;
ar_pci->dev = &pdev->dev;
ar_pci->ar = ar;
+ ar->dev_id = pci_dev->device;
if (pdev->subsystem_vendor || pdev->subsystem_device)
scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id),
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index d7696ddc03c4..8d364fb8f743 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -162,6 +162,7 @@ struct ath10k_pci {
struct device *dev;
struct ath10k *ar;
void __iomem *mem;
+ size_t mem_len;
/*
* Number of MSI interrupts granted, 0 --> using legacy PCI line
@@ -236,18 +237,6 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
#define CDC_WAR_MAGIC_STR 0xceef0000
#define CDC_WAR_DATA_CE 4
-/*
- * TODO: Should be a function call specific to each Target-type.
- * This convoluted macro converts from Target CPU Virtual Address Space to CE
- * Address Space. As part of this process, we conservatively fetch the current
- * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space
- * for this device; but that's not guaranteed.
- */
-#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \
- (((ath10k_pci_read32(ar, (SOC_CORE_BASE_ADDRESS | \
- CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \
- 0x100000 | ((addr) & 0xfffff))
-
/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
#define DIAG_ACCESS_CE_TIMEOUT_MS 10
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index 492b5a5af434..ca8d16884af1 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -422,6 +422,12 @@ struct rx_mpdu_end {
#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14)
#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15)
+#define RX_MSDU_START_INFO2_DA_IDX_MASK 0x000007ff
+#define RX_MSDU_START_INFO2_DA_IDX_LSB 0
+#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_MASK 0x00ff0000
+#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_LSB 16
+#define RX_MSDU_START_INFO2_DA_BCAST_MCAST BIT(11)
+
/* The decapped header (rx_hdr_status) contains the following:
* a) 802.11 header
* [padding to 4 bytes]
@@ -449,12 +455,23 @@ enum rx_msdu_decap_format {
RX_MSDU_DECAP_8023_SNAP_LLC = 3
};
-struct rx_msdu_start {
+struct rx_msdu_start_common {
__le32 info0; /* %RX_MSDU_START_INFO0_ */
__le32 flow_id_crc;
__le32 info1; /* %RX_MSDU_START_INFO1_ */
} __packed;
+struct rx_msdu_start_qca99x0 {
+ __le32 info2; /* %RX_MSDU_START_INFO2_ */
+} __packed;
+
+struct rx_msdu_start {
+ struct rx_msdu_start_common common;
+ union {
+ struct rx_msdu_start_qca99x0 qca99x0;
+ } __packed;
+} __packed;
+
/*
* msdu_length
* MSDU length in bytes after decapsulation. This field is
@@ -540,7 +557,7 @@ struct rx_msdu_start {
#define RX_MSDU_END_INFO0_PRE_DELIM_ERR (1 << 30)
#define RX_MSDU_END_INFO0_RESERVED_3B (1 << 31)
-struct rx_msdu_end {
+struct rx_msdu_end_common {
__le16 ip_hdr_cksum;
__le16 tcp_hdr_cksum;
u8 key_id_octet;
@@ -549,6 +566,36 @@ struct rx_msdu_end {
__le32 info0;
} __packed;
+#define RX_MSDU_END_INFO1_TCP_FLAG_MASK 0x000001ff
+#define RX_MSDU_END_INFO1_TCP_FLAG_LSB 0
+#define RX_MSDU_END_INFO1_L3_HDR_PAD_MASK 0x00001c00
+#define RX_MSDU_END_INFO1_L3_HDR_PAD_LSB 10
+#define RX_MSDU_END_INFO1_WINDOW_SIZE_MASK 0xffff0000
+#define RX_MSDU_END_INFO1_WINDOW_SIZE_LSB 16
+#define RX_MSDU_END_INFO1_IRO_ELIGIBLE BIT(9)
+
+#define RX_MSDU_END_INFO2_DA_OFFSET_MASK 0x0000003f
+#define RX_MSDU_END_INFO2_DA_OFFSET_LSB 0
+#define RX_MSDU_END_INFO2_SA_OFFSET_MASK 0x00000fc0
+#define RX_MSDU_END_INFO2_SA_OFFSET_LSB 6
+#define RX_MSDU_END_INFO2_TYPE_OFFSET_MASK 0x0003f000
+#define RX_MSDU_END_INFO2_TYPE_OFFSET_LSB 12
+
+struct rx_msdu_end_qca99x0 {
+ __le32 ipv6_crc;
+ __le32 tcp_seq_no;
+ __le32 tcp_ack_no;
+ __le32 info1;
+ __le32 info2;
+} __packed;
+
+struct rx_msdu_end {
+ struct rx_msdu_end_common common;
+ union {
+ struct rx_msdu_end_qca99x0 qca99x0;
+ } __packed;
+} __packed;
+
/*
*ip_hdr_chksum
* This can include the IP header checksum or the pseudo header
@@ -870,7 +917,11 @@ struct rx_ppdu_start {
#define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24)
#define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25)
-#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
+#define RX_PPDU_END_INFO1_PEER_IDX_MASK 0x1ffc
+#define RX_PPDU_END_INFO1_PEER_IDX_LSB 2
+#define RX_PPDU_END_INFO1_BB_DATA BIT(0)
+#define RX_PPDU_END_INFO1_PEER_IDX_VALID BIT(1)
+#define RX_PPDU_END_INFO1_PPDU_DONE BIT(15)
struct rx_ppdu_end_common {
__le32 evm_p0;
@@ -891,13 +942,13 @@ struct rx_ppdu_end_common {
__le32 evm_p15;
__le32 tsf_timestamp;
__le32 wb_timestamp;
+} __packed;
+
+struct rx_ppdu_end_qca988x {
u8 locationing_timestamp;
u8 phy_err_code;
__le16 flags; /* %RX_PPDU_END_FLAGS_ */
__le32 info0; /* %RX_PPDU_END_INFO0_ */
-} __packed;
-
-struct rx_ppdu_end_qca988x {
__le16 bb_length;
__le16 info1; /* %RX_PPDU_END_INFO1_ */
} __packed;
@@ -909,16 +960,126 @@ struct rx_ppdu_end_qca988x {
#define RX_PPDU_END_RTT_NORMAL_MODE BIT(31)
struct rx_ppdu_end_qca6174 {
+ u8 locationing_timestamp;
+ u8 phy_err_code;
+ __le16 flags; /* %RX_PPDU_END_FLAGS_ */
+ __le32 info0; /* %RX_PPDU_END_INFO0_ */
__le32 rtt; /* %RX_PPDU_END_RTT_ */
__le16 bb_length;
__le16 info1; /* %RX_PPDU_END_INFO1_ */
} __packed;
+#define RX_PKT_END_INFO0_RX_SUCCESS BIT(0)
+#define RX_PKT_END_INFO0_ERR_TX_INTERRUPT_RX BIT(3)
+#define RX_PKT_END_INFO0_ERR_OFDM_POWER_DROP BIT(4)
+#define RX_PKT_END_INFO0_ERR_OFDM_RESTART BIT(5)
+#define RX_PKT_END_INFO0_ERR_CCK_POWER_DROP BIT(6)
+#define RX_PKT_END_INFO0_ERR_CCK_RESTART BIT(7)
+
+#define RX_LOCATION_INFO_RTT_CORR_VAL_MASK 0x0001ffff
+#define RX_LOCATION_INFO_RTT_CORR_VAL_LSB 0
+#define RX_LOCATION_INFO_FAC_STATUS_MASK 0x000c0000
+#define RX_LOCATION_INFO_FAC_STATUS_LSB 18
+#define RX_LOCATION_INFO_PKT_BW_MASK 0x00700000
+#define RX_LOCATION_INFO_PKT_BW_LSB 20
+#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_MASK 0x01800000
+#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_LSB 23
+#define RX_LOCATION_INFO_CIR_STATUS BIT(17)
+#define RX_LOCATION_INFO_RTT_MAC_PHY_PHASE BIT(25)
+#define RX_LOCATION_INFO_RTT_TX_DATA_START_X BIT(26)
+#define RX_LOCATION_INFO_HW_IFFT_MODE BIT(30)
+#define RX_LOCATION_INFO_RX_LOCATION_VALID BIT(31)
+
+struct rx_pkt_end {
+ __le32 info0; /* %RX_PKT_END_INFO0_ */
+ __le32 phy_timestamp_1;
+ __le32 phy_timestamp_2;
+ __le32 rx_location_info; /* %RX_LOCATION_INFO_ */
+} __packed;
+
+enum rx_phy_ppdu_end_info0 {
+ RX_PHY_PPDU_END_INFO0_ERR_RADAR = BIT(2),
+ RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT = BIT(3),
+ RX_PHY_PPDU_END_INFO0_ERR_RX_NAP = BIT(4),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_TIMING = BIT(5),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_PARITY = BIT(6),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_RATE = BIT(7),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_LENGTH = BIT(8),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_RESTART = BIT(9),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_SERVICE = BIT(10),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_POWER_DROP = BIT(11),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_BLOCKER = BIT(12),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_TIMING = BIT(13),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_HEADER_CRC = BIT(14),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_RATE = BIT(15),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_LENGTH = BIT(16),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_RESTART = BIT(17),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_SERVICE = BIT(18),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_POWER_DROP = BIT(19),
+ RX_PHY_PPDU_END_INFO0_ERR_HT_CRC = BIT(20),
+ RX_PHY_PPDU_END_INFO0_ERR_HT_LENGTH = BIT(21),
+ RX_PHY_PPDU_END_INFO0_ERR_HT_RATE = BIT(22),
+ RX_PHY_PPDU_END_INFO0_ERR_HT_ZLF = BIT(23),
+ RX_PHY_PPDU_END_INFO0_ERR_FALSE_RADAR_EXT = BIT(24),
+ RX_PHY_PPDU_END_INFO0_ERR_GREEN_FIELD = BIT(25),
+ RX_PHY_PPDU_END_INFO0_ERR_SPECTRAL_SCAN = BIT(26),
+ RX_PHY_PPDU_END_INFO0_ERR_RX_DYN_BW = BIT(27),
+ RX_PHY_PPDU_END_INFO0_ERR_LEG_HT_MISMATCH = BIT(28),
+ RX_PHY_PPDU_END_INFO0_ERR_VHT_CRC = BIT(29),
+ RX_PHY_PPDU_END_INFO0_ERR_VHT_SIGA = BIT(30),
+ RX_PHY_PPDU_END_INFO0_ERR_VHT_LSIG = BIT(31),
+};
+
+enum rx_phy_ppdu_end_info1 {
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_NDP = BIT(0),
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_NSYM = BIT(1),
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_EXT_SYM = BIT(2),
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID0 = BIT(3),
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID1_62 = BIT(4),
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID63 = BIT(5),
+ RX_PHY_PPDU_END_INFO1_ERR_OFDM_LDPC_DECODER = BIT(6),
+ RX_PHY_PPDU_END_INFO1_ERR_DEFER_NAP = BIT(7),
+ RX_PHY_PPDU_END_INFO1_ERR_FDOMAIN_TIMEOUT = BIT(8),
+ RX_PHY_PPDU_END_INFO1_ERR_LSIG_REL_CHECK = BIT(9),
+ RX_PHY_PPDU_END_INFO1_ERR_BT_COLLISION = BIT(10),
+ RX_PHY_PPDU_END_INFO1_ERR_MU_FEEDBACK = BIT(11),
+ RX_PHY_PPDU_END_INFO1_ERR_TX_INTERRUPT_RX = BIT(12),
+ RX_PHY_PPDU_END_INFO1_ERR_RX_CBF = BIT(13),
+};
+
+struct rx_phy_ppdu_end {
+ __le32 info0; /* %RX_PHY_PPDU_END_INFO0_ */
+ __le32 info1; /* %RX_PHY_PPDU_END_INFO1_ */
+} __packed;
+
+#define RX_PPDU_END_RX_TIMING_OFFSET_MASK 0x00000fff
+#define RX_PPDU_END_RX_TIMING_OFFSET_LSB 0
+
+#define RX_PPDU_END_RX_INFO_RX_ANTENNA_MASK 0x00ffffff
+#define RX_PPDU_END_RX_INFO_RX_ANTENNA_LSB 0
+#define RX_PPDU_END_RX_INFO_TX_HT_VHT_ACK BIT(24)
+#define RX_PPDU_END_RX_INFO_RX_PKT_END_VALID BIT(25)
+#define RX_PPDU_END_RX_INFO_RX_PHY_PPDU_END_VALID BIT(26)
+#define RX_PPDU_END_RX_INFO_RX_TIMING_OFFSET_VALID BIT(27)
+#define RX_PPDU_END_RX_INFO_BB_CAPTURED_CHANNEL BIT(28)
+#define RX_PPDU_END_RX_INFO_UNSUPPORTED_MU_NC BIT(29)
+#define RX_PPDU_END_RX_INFO_OTP_TXBF_DISABLE BIT(30)
+
+struct rx_ppdu_end_qca99x0 {
+ struct rx_pkt_end rx_pkt_end;
+ struct rx_phy_ppdu_end rx_phy_ppdu_end;
+ __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
+ __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
+ __le16 bb_length;
+ __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
struct rx_ppdu_end {
struct rx_ppdu_end_common common;
union {
struct rx_ppdu_end_qca988x qca988x;
struct rx_ppdu_end_qca6174 qca6174;
+ struct rx_ppdu_end_qca99x0 qca99x0;
} __packed;
} __packed;
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index 8dcd424aa502..4671cfbcd8f7 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -57,7 +57,7 @@ static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len,
}
int ath10k_spectral_process_fft(struct ath10k *ar,
- const struct wmi_phyerr *phyerr,
+ struct wmi_phyerr_ev_arg *phyerr,
const struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf)
{
@@ -73,6 +73,15 @@ int ath10k_spectral_process_fft(struct ath10k *ar,
if (bin_len < 64 || bin_len > SPECTRAL_ATH10K_MAX_NUM_BINS)
return -EINVAL;
+ /* qca99x0 reports bin size as 68 bytes (64 bytes + 4 bytes) in
+ * report mode 2. First 64 bytes carries inband tones (-32 to +31)
+ * and last 4 byte carries band edge detection data (+32) mainly
+ * used in radar detection purpose. Strip last 4 byte to make bin
+ * size is valid one.
+ */
+ if (bin_len == 68)
+ bin_len -= 4;
+
reg0 = __le32_to_cpu(fftr->reg0);
reg1 = __le32_to_cpu(fftr->reg1);
@@ -118,15 +127,14 @@ int ath10k_spectral_process_fft(struct ath10k *ar,
fft_sample->total_gain_db = __cpu_to_be16(total_gain_db);
fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db);
- freq1 = __le16_to_cpu(phyerr->freq1);
- freq2 = __le16_to_cpu(phyerr->freq2);
+ freq1 = phyerr->freq1;
+ freq2 = phyerr->freq2;
fft_sample->freq1 = __cpu_to_be16(freq1);
fft_sample->freq2 = __cpu_to_be16(freq2);
chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX);
- fft_sample->noise = __cpu_to_be16(
- __le16_to_cpu(phyerr->nf_chains[chain_idx]));
+ fft_sample->noise = __cpu_to_be16(phyerr->nf_chains[chain_idx]);
bins = (u8 *)fftr;
bins += sizeof(*fftr);
diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h
index 042f5b302c75..89b0ad769d4f 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.h
+++ b/drivers/net/wireless/ath/ath10k/spectral.h
@@ -47,7 +47,7 @@ enum ath10k_spectral_mode {
#ifdef CONFIG_ATH10K_DEBUGFS
int ath10k_spectral_process_fft(struct ath10k *ar,
- const struct wmi_phyerr *phyerr,
+ struct wmi_phyerr_ev_arg *phyerr,
const struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf);
int ath10k_spectral_start(struct ath10k *ar);
@@ -59,7 +59,7 @@ void ath10k_spectral_destroy(struct ath10k *ar);
static inline int
ath10k_spectral_process_fft(struct ath10k *ar,
- const struct wmi_phyerr *phyerr,
+ struct wmi_phyerr_ev_arg *phyerr,
const struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf)
{
diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c
new file mode 100644
index 000000000000..3ca3fae408a7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/swap.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* This file has implementation for code swap logic. With code swap feature,
+ * target can run the fw binary with even smaller IRAM size by using host
+ * memory to store some of the code segments.
+ */
+
+#include "core.h"
+#include "bmi.h"
+#include "debug.h"
+
+static int ath10k_swap_code_seg_fill(struct ath10k *ar,
+ struct ath10k_swap_code_seg_info *seg_info,
+ const void *data, size_t data_len)
+{
+ u8 *virt_addr = seg_info->virt_address[0];
+ u8 swap_magic[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ] = {};
+ const u8 *fw_data = data;
+ union ath10k_swap_code_seg_item *swap_item;
+ u32 length = 0;
+ u32 payload_len;
+ u32 total_payload_len = 0;
+ u32 size_left = data_len;
+
+ /* Parse swap bin and copy the content to host allocated memory.
+ * The format is Address, length and value. The last 4-bytes is
+ * target write address. Currently address field is not used.
+ */
+ seg_info->target_addr = -1;
+ while (size_left >= sizeof(*swap_item)) {
+ swap_item = (union ath10k_swap_code_seg_item *)fw_data;
+ payload_len = __le32_to_cpu(swap_item->tlv.length);
+ if ((payload_len > size_left) ||
+ (payload_len == 0 &&
+ size_left != sizeof(struct ath10k_swap_code_seg_tail))) {
+ ath10k_err(ar, "refusing to parse invalid tlv length %d\n",
+ payload_len);
+ return -EINVAL;
+ }
+
+ if (payload_len == 0) {
+ if (memcmp(swap_item->tail.magic_signature, swap_magic,
+ ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ)) {
+ ath10k_err(ar, "refusing an invalid swap file\n");
+ return -EINVAL;
+ }
+ seg_info->target_addr =
+ __le32_to_cpu(swap_item->tail.bmi_write_addr);
+ break;
+ }
+
+ memcpy(virt_addr, swap_item->tlv.data, payload_len);
+ virt_addr += payload_len;
+ length = payload_len + sizeof(struct ath10k_swap_code_seg_tlv);
+ size_left -= length;
+ fw_data += length;
+ total_payload_len += payload_len;
+ }
+
+ if (seg_info->target_addr == -1) {
+ ath10k_err(ar, "failed to parse invalid swap file\n");
+ return -EINVAL;
+ }
+ seg_info->seg_hw_info.swap_size = __cpu_to_le32(total_payload_len);
+
+ return 0;
+}
+
+static void
+ath10k_swap_code_seg_free(struct ath10k *ar,
+ struct ath10k_swap_code_seg_info *seg_info)
+{
+ u32 seg_size;
+
+ if (!seg_info)
+ return;
+
+ if (!seg_info->virt_address[0])
+ return;
+
+ seg_size = __le32_to_cpu(seg_info->seg_hw_info.size);
+ dma_free_coherent(ar->dev, seg_size, seg_info->virt_address[0],
+ seg_info->paddr[0]);
+}
+
+static struct ath10k_swap_code_seg_info *
+ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
+{
+ struct ath10k_swap_code_seg_info *seg_info;
+ void *virt_addr;
+ dma_addr_t paddr;
+
+ swap_bin_len = roundup(swap_bin_len, 2);
+ if (swap_bin_len > ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX) {
+ ath10k_err(ar, "refusing code swap bin because it is too big %zu > %d\n",
+ swap_bin_len, ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX);
+ return NULL;
+ }
+
+ seg_info = devm_kzalloc(ar->dev, sizeof(*seg_info), GFP_KERNEL);
+ if (!seg_info)
+ return NULL;
+
+ virt_addr = dma_alloc_coherent(ar->dev, swap_bin_len, &paddr,
+ GFP_KERNEL);
+ if (!virt_addr) {
+ ath10k_err(ar, "failed to allocate dma coherent memory\n");
+ return NULL;
+ }
+
+ seg_info->seg_hw_info.bus_addr[0] = __cpu_to_le32(paddr);
+ seg_info->seg_hw_info.size = __cpu_to_le32(swap_bin_len);
+ seg_info->seg_hw_info.swap_size = __cpu_to_le32(swap_bin_len);
+ seg_info->seg_hw_info.num_segs =
+ __cpu_to_le32(ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED);
+ seg_info->seg_hw_info.size_log2 = __cpu_to_le32(ilog2(swap_bin_len));
+ seg_info->virt_address[0] = virt_addr;
+ seg_info->paddr[0] = paddr;
+
+ return seg_info;
+}
+
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+ enum ath10k_swap_code_seg_bin_type type)
+{
+ int ret;
+ struct ath10k_swap_code_seg_info *seg_info = NULL;
+
+ switch (type) {
+ case ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW:
+ if (!ar->swap.firmware_swap_code_seg_info)
+ return 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
+ seg_info = ar->swap.firmware_swap_code_seg_info;
+ break;
+ default:
+ case ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP:
+ case ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF:
+ ath10k_warn(ar, "ignoring unknown code swap binary type %d\n",
+ type);
+ return 0;
+ }
+
+ ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
+ &seg_info->seg_hw_info,
+ sizeof(seg_info->seg_hw_info));
+ if (ret) {
+ ath10k_err(ar, "failed to write Code swap segment information (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath10k_swap_code_seg_release(struct ath10k *ar)
+{
+ ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info);
+ ar->swap.firmware_codeswap_data = NULL;
+ ar->swap.firmware_codeswap_len = 0;
+ ar->swap.firmware_swap_code_seg_info = NULL;
+}
+
+int ath10k_swap_code_seg_init(struct ath10k *ar)
+{
+ int ret;
+ struct ath10k_swap_code_seg_info *seg_info;
+
+ if (!ar->swap.firmware_codeswap_len || !ar->swap.firmware_codeswap_data)
+ return 0;
+
+ seg_info = ath10k_swap_code_seg_alloc(ar,
+ ar->swap.firmware_codeswap_len);
+ if (!seg_info) {
+ ath10k_err(ar, "failed to allocate fw code swap segment\n");
+ return -ENOMEM;
+ }
+
+ ret = ath10k_swap_code_seg_fill(ar, seg_info,
+ ar->swap.firmware_codeswap_data,
+ ar->swap.firmware_codeswap_len);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n",
+ ret);
+ ath10k_swap_code_seg_free(ar, seg_info);
+ return ret;
+ }
+
+ ar->swap.firmware_swap_code_seg_info = seg_info;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
new file mode 100644
index 000000000000..5c89952dd20f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/swap.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SWAP_H_
+#define _SWAP_H_
+
+#define ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX (512 * 1024)
+#define ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ 12
+#define ATH10K_SWAP_CODE_SEG_NUM_MAX 16
+/* Currently only one swap segment is supported */
+#define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED 1
+
+struct ath10k_swap_code_seg_tlv {
+ __le32 address;
+ __le32 length;
+ u8 data[0];
+} __packed;
+
+struct ath10k_swap_code_seg_tail {
+ u8 magic_signature[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ];
+ __le32 bmi_write_addr;
+} __packed;
+
+union ath10k_swap_code_seg_item {
+ struct ath10k_swap_code_seg_tlv tlv;
+ struct ath10k_swap_code_seg_tail tail;
+} __packed;
+
+enum ath10k_swap_code_seg_bin_type {
+ ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP,
+ ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW,
+ ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF,
+};
+
+struct ath10k_swap_code_seg_hw_info {
+ /* Swap binary image size */
+ __le32 swap_size;
+ __le32 num_segs;
+
+ /* Swap data size */
+ __le32 size;
+ __le32 size_log2;
+ __le32 bus_addr[ATH10K_SWAP_CODE_SEG_NUM_MAX];
+ __le64 reserved[ATH10K_SWAP_CODE_SEG_NUM_MAX];
+} __packed;
+
+struct ath10k_swap_code_seg_info {
+ struct ath10k_swap_code_seg_hw_info seg_hw_info;
+ void *virt_address[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
+ u32 target_addr;
+ dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
+};
+
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+ enum ath10k_swap_code_seg_bin_type type);
+void ath10k_swap_code_seg_release(struct ath10k *ar);
+int ath10k_swap_code_seg_init(struct ath10k *ar);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index a417aae52623..768bef629099 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -450,4 +450,7 @@ Fw Mode/SubMode Mask
#define QCA6174_BOARD_DATA_SZ 8192
#define QCA6174_BOARD_EXT_DATA_SZ 0
+#define QCA99X0_BOARD_DATA_SZ 12288
+#define QCA99X0_BOARD_EXT_DATA_SZ 0
+
#endif /* __TARGADDRS_H__ */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 826500bb2b1b..e4a9c4c8d0cb 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -53,8 +53,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
struct ath10k_skb_cb *skb_cb;
struct sk_buff *msdu;
- lockdep_assert_held(&htt->tx_lock);
-
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
tx_done->msdu_id, !!tx_done->discard,
@@ -66,12 +64,19 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
return;
}
+ spin_lock_bh(&htt->tx_lock);
msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
if (!msdu) {
ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
tx_done->msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
return;
}
+ ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
+ __ath10k_htt_tx_dec_pending(htt);
+ if (htt->num_pending_tx == 0)
+ wake_up(&htt->empty_tx_wq);
+ spin_unlock_bh(&htt->tx_lock);
skb_cb = ATH10K_SKB_CB(msdu);
@@ -90,7 +95,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
if (tx_done->discard) {
ieee80211_free_txskb(htt->ar->hw, msdu);
- goto exit;
+ return;
}
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
@@ -104,12 +109,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
-
-exit:
- ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
- __ath10k_htt_tx_dec_pending(htt);
- if (htt->num_pending_tx == 0)
- wake_up(&htt->empty_tx_wq);
}
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
@@ -147,9 +146,9 @@ struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
const u8 *addr, bool expect_mapped)
{
- int ret;
+ long time_left;
- ret = wait_event_timeout(ar->peer_mapping_wq, ({
+ time_left = wait_event_timeout(ar->peer_mapping_wq, ({
bool mapped;
spin_lock_bh(&ar->data_lock);
@@ -160,7 +159,7 @@ static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
}), 3*HZ);
- if (ret <= 0)
+ if (time_left == 0)
return -ETIMEDOUT;
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 47fe2e756bec..248ffc3d6620 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -37,8 +37,10 @@ struct wmi_ops {
struct wmi_peer_kick_ev_arg *arg);
int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_swba_ev_arg *arg);
- int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb,
- struct wmi_phyerr_ev_arg *arg);
+ int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_phyerr_hdr_arg *arg);
+ int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
+ int left_len, struct wmi_phyerr_ev_arg *arg);
int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_svc_rdy_ev_arg *arg);
int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
@@ -49,6 +51,7 @@ struct wmi_ops {
struct wmi_roam_ev_arg *arg);
int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_wow_ev_arg *arg);
+ enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
@@ -260,13 +263,23 @@ ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
}
static inline int
-ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb,
- struct wmi_phyerr_ev_arg *arg)
+ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_phyerr_hdr_arg *arg)
+{
+ if (!ar->wmi.ops->pull_phyerr_hdr)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
+ int left_len, struct wmi_phyerr_ev_arg *arg)
{
if (!ar->wmi.ops->pull_phyerr)
return -EOPNOTSUPP;
- return ar->wmi.ops->pull_phyerr(ar, skb, arg);
+ return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
}
static inline int
@@ -319,6 +332,15 @@ ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
return ar->wmi.ops->pull_wow_event(ar, skb, arg);
}
+static inline enum wmi_txbf_conf
+ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
+{
+ if (!ar->wmi.ops->get_txbf_conf_scheme)
+ return WMI_TXBF_CONF_UNSUPPORTED;
+
+ return ar->wmi.ops->get_txbf_conf_scheme(ar);
+}
+
static inline int
ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
{
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 8fdba3865c96..b5849b3fd2f0 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -377,12 +377,34 @@ static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
"wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
pause_id, action, vdev_map, peer_id, tid_map);
- for (vdev_id = 0; vdev_map; vdev_id++) {
- if (!(vdev_map & BIT(vdev_id)))
- continue;
-
- vdev_map &= ~BIT(vdev_id);
- ath10k_mac_handle_tx_pause(ar, vdev_id, pause_id, action);
+ switch (pause_id) {
+ case WMI_TLV_TX_PAUSE_ID_MCC:
+ case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
+ case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
+ case WMI_TLV_TX_PAUSE_ID_AP_PS:
+ case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
+ for (vdev_id = 0; vdev_map; vdev_id++) {
+ if (!(vdev_map & BIT(vdev_id)))
+ continue;
+
+ vdev_map &= ~BIT(vdev_id);
+ ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id,
+ action);
+ }
+ break;
+ case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
+ case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
+ case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
+ case WMI_TLV_TX_PAUSE_ID_HOST:
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac ignoring unsupported tx pause id %d\n",
+ pause_id);
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac ignoring unknown tx pause vdev %d\n",
+ pause_id);
+ break;
}
kfree(tb);
@@ -497,7 +519,7 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_TLV_SERVICE_READY_EVENTID:
ath10k_wmi_event_service_ready(ar, skb);
- break;
+ return;
case WMI_TLV_READY_EVENTID:
ath10k_wmi_event_ready(ar, skb);
break;
@@ -709,6 +731,8 @@ static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_swba_parse *swba = data;
+ struct wmi_tim_info_arg *tim_info_arg;
+ const struct wmi_tim_info *tim_info_ev = ptr;
if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
return -EPROTO;
@@ -716,7 +740,21 @@ static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
return -ENOBUFS;
- swba->arg->tim_info[swba->n_tim++] = ptr;
+ if (__le32_to_cpu(tim_info_ev->tim_len) >
+ sizeof(tim_info_ev->tim_bitmap)) {
+ ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+ return -EPROTO;
+ }
+
+ tim_info_arg = &swba->arg->tim_info[swba->n_tim];
+ tim_info_arg->tim_len = tim_info_ev->tim_len;
+ tim_info_arg->tim_mcast = tim_info_ev->tim_mcast;
+ tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap;
+ tim_info_arg->tim_changed = tim_info_ev->tim_changed;
+ tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending;
+
+ swba->n_tim++;
+
return 0;
}
@@ -800,9 +838,9 @@ static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar,
return 0;
}
-static int ath10k_wmi_tlv_op_pull_phyerr_ev(struct ath10k *ar,
- struct sk_buff *skb,
- struct wmi_phyerr_ev_arg *arg)
+static int ath10k_wmi_tlv_op_pull_phyerr_ev_hdr(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_phyerr_hdr_arg *arg)
{
const void **tb;
const struct wmi_tlv_phyerr_ev *ev;
@@ -824,10 +862,10 @@ static int ath10k_wmi_tlv_op_pull_phyerr_ev(struct ath10k *ar,
return -EPROTO;
}
- arg->num_phyerrs = ev->num_phyerrs;
- arg->tsf_l32 = ev->tsf_l32;
- arg->tsf_u32 = ev->tsf_u32;
- arg->buf_len = ev->buf_len;
+ arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs);
+ arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
+ arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
+ arg->buf_len = __le32_to_cpu(ev->buf_len);
arg->phyerrs = phyerrs;
kfree(tb);
@@ -1241,6 +1279,11 @@ ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
return skb;
}
+static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar)
+{
+ return WMI_TXBF_CONF_AFTER_ASSOC;
+}
+
static struct sk_buff *
ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
u32 param_value)
@@ -1335,7 +1378,7 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
- cfg->rx_decap_mode = __cpu_to_le32(1);
+ cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
cfg->scan_max_pending_reqs = __cpu_to_le32(4);
cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
@@ -3151,6 +3194,38 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
.tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
.tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
.adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
+ .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+ .nan_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+ .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+ .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+ .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
};
static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
@@ -3204,6 +3279,48 @@ static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
.burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
.burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+ .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+ .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+ .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
};
static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
@@ -3262,6 +3379,22 @@ static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
.tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
.ap_detect_out_of_sync_sleeping_sta_time_secs =
WMI_TLV_VDEV_PARAM_UNSUPPORTED,
+ .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+ .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+ .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+ .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+ .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
};
static const struct wmi_ops wmi_tlv_ops = {
@@ -3274,12 +3407,14 @@ static const struct wmi_ops wmi_tlv_ops = {
.pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
.pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
.pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
- .pull_phyerr = ath10k_wmi_tlv_op_pull_phyerr_ev,
+ .pull_phyerr_hdr = ath10k_wmi_tlv_op_pull_phyerr_ev_hdr,
+ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
.pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
+ .get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 6c046c244705..ce01107ef37a 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -148,6 +148,48 @@ static struct wmi_cmd_map wmi_cmd_map = {
.gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
.gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+ .nan_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+ .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+ .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+ .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+ .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+ .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
};
/* 10.X WMI cmd track */
@@ -271,6 +313,48 @@ static struct wmi_cmd_map wmi_10x_cmd_map = {
.gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
.gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+ .nan_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+ .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+ .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+ .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+ .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+ .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
};
/* 10.2.4 WMI cmd track */
@@ -393,6 +477,231 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
.pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
+ .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+ .nan_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+ .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+ .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+ .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+ .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+ .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+/* 10.4 WMI cmd track */
+static struct wmi_cmd_map wmi_10_4_cmd_map = {
+ .init_cmdid = WMI_10_4_INIT_CMDID,
+ .start_scan_cmdid = WMI_10_4_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
+ .pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_dscp_tid_map_cmdid = WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_10_4_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_10_4_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_10_4_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_10_4_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_10_4_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_10_4_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_10_4_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_10_4_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_10_4_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_10_4_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_10_4_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_10_4_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_10_4_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_10_4_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_10_4_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_10_4_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_10_4_BCN_PRB_TMPL_CMDID,
+ .bcn_filter_rx_cmdid = WMI_10_4_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_10_4_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_10_4_PRB_TMPL_CMDID,
+ .addba_clear_resp_cmdid = WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_10_4_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_10_4_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_10_4_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_10_4_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_10_4_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_10_4_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_10_4_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_10_4_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_10_4_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_10_4_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_10_4_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_10_4_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_10_4_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_10_4_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
+ .ap_ps_peer_param_cmdid = WMI_10_4_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
+ .peer_rate_retry_sched_cmdid = WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_10_4_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_10_4_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_10_4_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_10_4_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_10_4_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_10_4_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_10_4_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid =
+ WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_10_4_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .gtk_offload_cmdid = WMI_10_4_GTK_OFFLOAD_CMDID,
+ .csa_offload_enable_cmdid = WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
+ .csa_offload_chanswitch_cmdid = WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+ .echo_cmdid = WMI_10_4_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_10_4_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_10_4_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_10_4_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_keepalive_cmdid = WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
+ .vdev_get_keepalive_cmdid = WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
+ .force_fw_hang_cmdid = WMI_10_4_FORCE_FW_HANG_CMDID,
+ .gpio_config_cmdid = WMI_10_4_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
+ .vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED,
+ .tdls_set_state_cmdid = WMI_CMD_UNSUPPORTED,
+ .tdls_peer_update_cmdid = WMI_CMD_UNSUPPORTED,
+ .adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
+ .vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
+ .vdev_resume_response_cmdid = WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
+ .wlan_peer_caching_add_peer_cmdid =
+ WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
+ .wlan_peer_caching_evict_peer_cmdid =
+ WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
+ .wlan_peer_caching_restore_peer_cmdid =
+ WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
+ .wlan_peer_caching_print_all_peers_info_cmdid =
+ WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
+ .peer_update_wds_entry_cmdid = WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
+ .peer_add_proxy_sta_entry_cmdid =
+ WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+ .rtt_keepalive_cmdid = WMI_10_4_RTT_KEEPALIVE_CMDID,
+ .oem_req_cmdid = WMI_10_4_OEM_REQ_CMDID,
+ .nan_cmdid = WMI_10_4_NAN_CMDID,
+ .vdev_ratemask_cmdid = WMI_10_4_VDEV_RATEMASK_CMDID,
+ .qboost_cfg_cmdid = WMI_10_4_QBOOST_CFG_CMDID,
+ .pdev_smart_ant_enable_cmdid = WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
+ .pdev_smart_ant_set_rx_antenna_cmdid =
+ WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+ .peer_smart_ant_set_tx_antenna_cmdid =
+ WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+ .peer_smart_ant_set_train_info_cmdid =
+ WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+ .peer_smart_ant_set_node_config_ops_cmdid =
+ WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+ .pdev_set_antenna_switch_table_cmdid =
+ WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+ .pdev_set_ctl_table_cmdid = WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
+ .pdev_set_mimogain_table_cmdid = WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+ .pdev_ratepwr_table_cmdid = WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
+ .pdev_ratepwr_chainmsk_table_cmdid =
+ WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+ .pdev_fips_cmdid = WMI_10_4_PDEV_FIPS_CMDID,
+ .tt_set_conf_cmdid = WMI_10_4_TT_SET_CONF_CMDID,
+ .fwtest_cmdid = WMI_10_4_FWTEST_CMDID,
+ .vdev_atf_request_cmdid = WMI_10_4_VDEV_ATF_REQUEST_CMDID,
+ .peer_atf_request_cmdid = WMI_10_4_PEER_ATF_REQUEST_CMDID,
+ .pdev_get_ani_cck_config_cmdid = WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+ .pdev_get_ani_ofdm_config_cmdid =
+ WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+ .pdev_reserve_ast_entry_cmdid = WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
+ .pdev_get_nfcal_power_cmdid = WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
+ .pdev_get_tpc_cmdid = WMI_10_4_PDEV_GET_TPC_CMDID,
+ .pdev_get_ast_info_cmdid = WMI_10_4_PDEV_GET_AST_INFO_CMDID,
+ .vdev_set_dscp_tid_map_cmdid = WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_get_info_cmdid = WMI_10_4_PDEV_GET_INFO_CMDID,
+ .vdev_get_info_cmdid = WMI_10_4_VDEV_GET_INFO_CMDID,
+ .vdev_filter_neighbor_rx_packets_cmdid =
+ WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+ .mu_cal_start_cmdid = WMI_10_4_MU_CAL_START_CMDID,
+ .set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
+ .pdev_bss_chan_info_request_cmdid =
+ WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
};
/* MAIN WMI VDEV param map */
@@ -452,6 +761,22 @@ static struct wmi_vdev_param_map wmi_vdev_param_map = {
.tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
.ap_detect_out_of_sync_sleeping_sta_time_secs =
WMI_VDEV_PARAM_UNSUPPORTED,
+ .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+ .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+ .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+ .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+ .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
};
/* 10.X WMI VDEV param map */
@@ -511,6 +836,22 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.ap_detect_out_of_sync_sleeping_sta_time_secs =
WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+ .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+ .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+ .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+ .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
};
static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
@@ -569,6 +910,97 @@ static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.ap_detect_out_of_sync_sleeping_sta_time_secs =
WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+ .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+ .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+ .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+ .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
+ .rts_threshold = WMI_10_4_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_10_4_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_10_4_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_10_4_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_10_4_VDEV_PARAM_WDS,
+ .atim_window = WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
+ .bmiss_final_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
+ .feature_wmm = WMI_10_4_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_10_4_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_10_4_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_10_4_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_10_4_VDEV_PARAM_SGI,
+ .ldpc = WMI_10_4_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_10_4_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_10_4_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_10_4_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_10_4_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
+ .enable_rtscts = WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_10_4_VDEV_PARAM_TXBF,
+ .packet_powersave = WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
+ .drop_unencry = WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
+ .tx_encap_type = WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ .rc_num_retries = WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
+ .cabq_maxdur = WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
+ .mfptest_set = WMI_10_4_VDEV_PARAM_MFPTEST_SET,
+ .rts_fixed_rate = WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
+ .vht_sgimask = WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
+ .vht80_ratemask = WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
+ .early_rx_adjust_enable = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+ .early_rx_tgt_bmiss_num = WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+ .early_rx_bmiss_sample_cycle =
+ WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+ .early_rx_slop_step = WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+ .early_rx_init_slop = WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+ .early_rx_adjust_pause = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+ .proxy_sta = WMI_10_4_VDEV_PARAM_PROXY_STA,
+ .meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
+ .rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
+ .bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
};
static struct wmi_pdev_param_map wmi_pdev_param_map = {
@@ -621,6 +1053,48 @@ static struct wmi_pdev_param_map wmi_pdev_param_map = {
.burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
.burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+ .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+ .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+ .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
};
static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
@@ -674,6 +1148,48 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
.cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+ .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+ .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+ .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
};
static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
@@ -727,6 +1243,48 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
.cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+ .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+ .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+ .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
};
/* firmware 10.2 specific mappings */
@@ -849,6 +1407,139 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = {
.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+ .nan_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+ .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+ .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+ .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
+ .tx_chain_mask = WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_10_4_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ .pcielp_txbuf_watermark = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ .pcielp_txbuf_tmo_en = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_value = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ .pdev_stats_update_period =
+ WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period =
+ WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period =
+ WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period =
+ WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_10_4_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
+ .dcs = WMI_10_4_PDEV_PARAM_DCS,
+ .ani_enable = WMI_10_4_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_10_4_PDEV_PARAM_PROXY_STA,
+ .idle_ps_config = WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
+ .power_gating_sleep = WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
+ .fast_channel_reset = WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
+ .burst_dur = WMI_10_4_PDEV_PARAM_BURST_DUR,
+ .burst_enable = WMI_10_4_PDEV_PARAM_BURST_ENABLE,
+ .cal_period = WMI_10_4_PDEV_PARAM_CAL_PERIOD,
+ .aggr_burst = WMI_10_4_PDEV_PARAM_AGGR_BURST,
+ .rx_decap_mode = WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
+ .smart_antenna_default_antenna =
+ WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+ .igmpmld_override = WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
+ .igmpmld_tid = WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
+ .antenna_gain = WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
+ .rx_filter = WMI_10_4_PDEV_PARAM_RX_FILTER,
+ .set_mcast_to_ucast_tid = WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
+ .proxy_sta_mode = WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
+ .set_mcast2ucast_mode = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+ .set_mcast2ucast_buffer = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+ .remove_mcast2ucast_buffer =
+ WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+ .peer_sta_ps_statechg_enable =
+ WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+ .igmpmld_ac_override = WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+ .block_interbss = WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
+ .set_disable_reset_cmdid = WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+ .set_msdu_ttl_cmdid = WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+ .set_ppdu_duration_cmdid = WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+ .txbf_sound_period_cmdid = WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+ .set_promisc_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+ .set_burst_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
+ .en_stats = WMI_10_4_PDEV_PARAM_EN_STATS,
+ .mu_group_policy = WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
+ .noise_detection = WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
+ .noise_threshold = WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
+ .dpd_enable = WMI_10_4_PDEV_PARAM_DPD_ENABLE,
+ .set_mcast_bcast_echo = WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+ .atf_strict_sch = WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
+ .atf_sched_duration = WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
+ .ant_plzn = WMI_10_4_PDEV_PARAM_ANT_PLZN,
+ .mgmt_retry_limit = WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
+ .sensitivity_level = WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
+ .signed_txpower_2g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
+ .signed_txpower_5g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
+ .enable_per_tid_amsdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+ .enable_per_tid_ampdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+ .cca_threshold = WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
+ .rts_fixed_rate = WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
+ .pdev_reset = WMI_10_4_PDEV_PARAM_PDEV_RESET,
+ .wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+ .arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
+ .arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
};
void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
@@ -1232,6 +1923,8 @@ ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
return "completed [preempted]";
case WMI_SCAN_REASON_TIMEDOUT:
return "completed [timedout]";
+ case WMI_SCAN_REASON_INTERNAL_FAILURE:
+ return "completed [internal err]";
case WMI_SCAN_REASON_MAX:
break;
}
@@ -1246,6 +1939,10 @@ ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
return "preempted";
case WMI_SCAN_EVENT_START_FAILED:
return "start failed";
+ case WMI_SCAN_EVENT_RESTARTED:
+ return "restarted";
+ case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
+ return "foreign channel exit";
default:
return "unknown";
}
@@ -1321,6 +2018,8 @@ int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_SCAN_EVENT_DEQUEUED:
case WMI_SCAN_EVENT_PREEMPTED:
+ case WMI_SCAN_EVENT_RESTARTED:
+ case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
default:
break;
}
@@ -1433,6 +2132,40 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
+static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_mgmt_rx_ev_arg *arg)
+{
+ struct wmi_10_4_mgmt_rx_event *ev;
+ struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
+ size_t pull_len;
+ u32 msdu_len;
+
+ ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
+ ev_hdr = &ev->hdr;
+ pull_len = sizeof(*ev);
+
+ if (skb->len < pull_len)
+ return -EPROTO;
+
+ skb_pull(skb, pull_len);
+ arg->channel = ev_hdr->channel;
+ arg->buf_len = ev_hdr->buf_len;
+ arg->status = ev_hdr->status;
+ arg->snr = ev_hdr->snr;
+ arg->phy_mode = ev_hdr->phy_mode;
+ arg->rate = ev_hdr->rate;
+
+ msdu_len = __le32_to_cpu(arg->buf_len);
+ if (skb->len < msdu_len)
+ return -EPROTO;
+
+ /* Make sure bytes added for padding are removed. */
+ skb_trim(skb, msdu_len);
+
+ return 0;
+}
+
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_mgmt_rx_ev_arg arg = {};
@@ -1593,6 +2326,29 @@ static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
+static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_ch_info_ev_arg *arg)
+{
+ struct wmi_10_4_chan_info_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->err_code = ev->err_code;
+ arg->freq = ev->freq;
+ arg->cmd_flags = ev->cmd_flags;
+ arg->noise_floor = ev->noise_floor;
+ arg->rx_clear_count = ev->rx_clear_count;
+ arg->cycle_count = ev->cycle_count;
+ arg->chan_tx_pwr_range = ev->chan_tx_pwr_range;
+ arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
+ arg->rx_frame_count = ev->rx_frame_count;
+
+ return 0;
+}
+
void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_ch_info_ev_arg arg = {};
@@ -1656,8 +2412,10 @@ void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
ar->ch_info_can_report_survey = true;
}
- ar->survey_last_rx_clear_count = rx_clear_count;
- ar->survey_last_cycle_count = cycle_count;
+ if (!(cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) {
+ ar->survey_last_rx_clear_count = rx_clear_count;
+ ar->survey_last_cycle_count = cycle_count;
+ }
exit:
spin_unlock_bh(&ar->data_lock);
@@ -2149,33 +2907,42 @@ exit:
static void ath10k_wmi_update_tim(struct ath10k *ar,
struct ath10k_vif *arvif,
struct sk_buff *bcn,
- const struct wmi_tim_info *tim_info)
+ const struct wmi_tim_info_arg *tim_info)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
struct ieee80211_tim_ie *tim;
u8 *ies, *ie;
u8 ie_len, pvm_len;
__le32 t;
- u32 v;
+ u32 v, tim_len;
+
+ /* When FW reports 0 in tim_len, ensure atleast first byte
+ * in tim_bitmap is considered for pvm calculation.
+ */
+ tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
/* if next SWBA has no tim_changed the tim_bitmap is garbage.
* we must copy the bitmap upon change and reuse it later */
if (__le32_to_cpu(tim_info->tim_changed)) {
int i;
- BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
- sizeof(tim_info->tim_bitmap));
+ if (sizeof(arvif->u.ap.tim_bitmap) < tim_len) {
+ ath10k_warn(ar, "SWBA TIM field is too big (%u), truncated it to %zu",
+ tim_len, sizeof(arvif->u.ap.tim_bitmap));
+ tim_len = sizeof(arvif->u.ap.tim_bitmap);
+ }
- for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
+ for (i = 0; i < tim_len; i++) {
t = tim_info->tim_bitmap[i / 4];
v = __le32_to_cpu(t);
arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
}
- /* FW reports either length 0 or 16
- * so we calculate this on our own */
+ /* FW reports either length 0 or length based on max supported
+ * station. so we calculate this on our own
+ */
arvif->u.ap.tim_len = 0;
- for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
+ for (i = 0; i < tim_len; i++)
if (arvif->u.ap.tim_bitmap[i])
arvif->u.ap.tim_len = i;
@@ -2199,7 +2966,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
if (pvm_len < arvif->u.ap.tim_len) {
- int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
+ int expand_size = tim_len - pvm_len;
int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
void *next_ie = ie + 2 + ie_len;
@@ -2214,7 +2981,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
}
}
- if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
+ if (pvm_len > tim_len) {
ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
return;
}
@@ -2278,7 +3045,21 @@ static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
break;
- arg->tim_info[i] = &ev->bcn_info[i].tim_info;
+ if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+ sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+ ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+ return -EPROTO;
+ }
+
+ arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
+ arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+ arg->tim_info[i].tim_bitmap =
+ ev->bcn_info[i].tim_info.tim_bitmap;
+ arg->tim_info[i].tim_changed =
+ ev->bcn_info[i].tim_info.tim_changed;
+ arg->tim_info[i].tim_num_ps_pending =
+ ev->bcn_info[i].tim_info.tim_num_ps_pending;
+
arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
i++;
}
@@ -2286,12 +3067,74 @@ static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
+static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg)
+{
+ struct wmi_10_4_host_swba_event *ev = (void *)skb->data;
+ u32 map, tim_len;
+ size_t i;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_map = ev->vdev_map;
+
+ for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
+ if (!(map & BIT(0)))
+ continue;
+
+ /* If this happens there were some changes in firmware and
+ * ath10k should update the max size of tim_info array.
+ */
+ if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
+ break;
+
+ if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+ sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+ ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+ return -EPROTO;
+ }
+
+ tim_len = __le32_to_cpu(ev->bcn_info[i].tim_info.tim_len);
+ if (tim_len) {
+ /* Exclude 4 byte guard length */
+ tim_len -= 4;
+ arg->tim_info[i].tim_len = __cpu_to_le32(tim_len);
+ } else {
+ arg->tim_info[i].tim_len = 0;
+ }
+
+ arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+ arg->tim_info[i].tim_bitmap =
+ ev->bcn_info[i].tim_info.tim_bitmap;
+ arg->tim_info[i].tim_changed =
+ ev->bcn_info[i].tim_info.tim_changed;
+ arg->tim_info[i].tim_num_ps_pending =
+ ev->bcn_info[i].tim_info.tim_num_ps_pending;
+
+ /* 10.4 firmware doesn't have p2p support. notice of absence
+ * info can be ignored for now.
+ */
+
+ i++;
+ }
+
+ return 0;
+}
+
+static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar)
+{
+ return WMI_TXBF_CONF_BEFORE_ASSOC;
+}
+
void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_swba_ev_arg arg = {};
u32 map;
int i = -1;
- const struct wmi_tim_info *tim_info;
+ const struct wmi_tim_info_arg *tim_info;
const struct wmi_p2p_noa_info *noa_info;
struct ath10k_vif *arvif;
struct sk_buff *bcn;
@@ -2320,7 +3163,7 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
break;
}
- tim_info = arg.tim_info[i];
+ tim_info = &arg.tim_info[i];
noa_info = arg.noa_info[i];
ath10k_dbg(ar, ATH10K_DBG_MGMT,
@@ -2335,6 +3178,10 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
__le32_to_cpu(tim_info->tim_bitmap[1]),
__le32_to_cpu(tim_info->tim_bitmap[0]));
+ /* TODO: Only first 4 word from tim_bitmap is dumped.
+ * Extend debug code to dump full tim_bitmap.
+ */
+
arvif = ath10k_get_arvif(ar, vdev_id);
if (arvif == NULL) {
ath10k_warn(ar, "no vif for vdev_id %d found\n",
@@ -2391,6 +3238,7 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ath10k_warn(ar, "failed to map beacon: %d\n",
ret);
dev_kfree_skb_any(bcn);
+ ret = -EIO;
goto skip;
}
@@ -2424,7 +3272,7 @@ void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb)
}
static void ath10k_dfs_radar_report(struct ath10k *ar,
- const struct wmi_phyerr *phyerr,
+ struct wmi_phyerr_ev_arg *phyerr,
const struct phyerr_radar_report *rr,
u64 tsf)
{
@@ -2468,7 +3316,7 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
}
/* report event to DFS pattern detector */
- tsf32l = __le32_to_cpu(phyerr->tsf_timestamp);
+ tsf32l = phyerr->tsf_timestamp;
tsf64 = tsf & (~0xFFFFFFFFULL);
tsf64 |= tsf32l;
@@ -2513,7 +3361,7 @@ radar_detected:
}
static int ath10k_dfs_fft_report(struct ath10k *ar,
- const struct wmi_phyerr *phyerr,
+ struct wmi_phyerr_ev_arg *phyerr,
const struct phyerr_fft_report *fftr,
u64 tsf)
{
@@ -2551,7 +3399,7 @@ static int ath10k_dfs_fft_report(struct ath10k *ar,
}
void ath10k_wmi_event_dfs(struct ath10k *ar,
- const struct wmi_phyerr *phyerr,
+ struct wmi_phyerr_ev_arg *phyerr,
u64 tsf)
{
int buf_len, tlv_len, res, i = 0;
@@ -2560,11 +3408,11 @@ void ath10k_wmi_event_dfs(struct ath10k *ar,
const struct phyerr_fft_report *fftr;
const u8 *tlv_buf;
- buf_len = __le32_to_cpu(phyerr->buf_len);
+ buf_len = phyerr->buf_len;
ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
phyerr->phy_err_code, phyerr->rssi_combined,
- __le32_to_cpu(phyerr->tsf_timestamp), tsf, buf_len);
+ phyerr->tsf_timestamp, tsf, buf_len);
/* Skip event if DFS disabled */
if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
@@ -2616,7 +3464,7 @@ void ath10k_wmi_event_dfs(struct ath10k *ar,
}
void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
- const struct wmi_phyerr *phyerr,
+ struct wmi_phyerr_ev_arg *phyerr,
u64 tsf)
{
int buf_len, tlv_len, res, i = 0;
@@ -2625,7 +3473,7 @@ void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
const struct phyerr_fft_report *fftr;
size_t fftr_len;
- buf_len = __le32_to_cpu(phyerr->buf_len);
+ buf_len = phyerr->buf_len;
while (i < buf_len) {
if (i + sizeof(*tlv) > buf_len) {
@@ -2658,7 +3506,7 @@ void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
fftr, fftr_len,
tsf);
if (res < 0) {
- ath10k_warn(ar, "failed to process fft report: %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
res);
return;
}
@@ -2669,65 +3517,169 @@ void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
}
}
-static int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, struct sk_buff *skb,
- struct wmi_phyerr_ev_arg *arg)
+static int ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_phyerr_hdr_arg *arg)
{
struct wmi_phyerr_event *ev = (void *)skb->data;
if (skb->len < sizeof(*ev))
return -EPROTO;
- arg->num_phyerrs = ev->num_phyerrs;
- arg->tsf_l32 = ev->tsf_l32;
- arg->tsf_u32 = ev->tsf_u32;
- arg->buf_len = __cpu_to_le32(skb->len - sizeof(*ev));
+ arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs);
+ arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
+ arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
+ arg->buf_len = skb->len - sizeof(*ev);
arg->phyerrs = ev->phyerrs;
return 0;
}
+static int ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_phyerr_hdr_arg *arg)
+{
+ struct wmi_10_4_phyerr_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ /* 10.4 firmware always reports only one phyerr */
+ arg->num_phyerrs = 1;
+
+ arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
+ arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
+ arg->buf_len = skb->len;
+ arg->phyerrs = skb->data;
+
+ return 0;
+}
+
+int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar,
+ const void *phyerr_buf,
+ int left_len,
+ struct wmi_phyerr_ev_arg *arg)
+{
+ const struct wmi_phyerr *phyerr = phyerr_buf;
+ int i;
+
+ if (left_len < sizeof(*phyerr)) {
+ ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
+ left_len, sizeof(*phyerr));
+ return -EINVAL;
+ }
+
+ arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
+ arg->freq1 = __le16_to_cpu(phyerr->freq1);
+ arg->freq2 = __le16_to_cpu(phyerr->freq2);
+ arg->rssi_combined = phyerr->rssi_combined;
+ arg->chan_width_mhz = phyerr->chan_width_mhz;
+ arg->buf_len = __le32_to_cpu(phyerr->buf_len);
+ arg->buf = phyerr->buf;
+ arg->hdr_len = sizeof(*phyerr);
+
+ for (i = 0; i < 4; i++)
+ arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
+
+ switch (phyerr->phy_err_code) {
+ case PHY_ERROR_GEN_SPECTRAL_SCAN:
+ arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
+ break;
+ case PHY_ERROR_GEN_FALSE_RADAR_EXT:
+ arg->phy_err_code = PHY_ERROR_FALSE_RADAR_EXT;
+ break;
+ case PHY_ERROR_GEN_RADAR:
+ arg->phy_err_code = PHY_ERROR_RADAR;
+ break;
+ default:
+ arg->phy_err_code = PHY_ERROR_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k *ar,
+ const void *phyerr_buf,
+ int left_len,
+ struct wmi_phyerr_ev_arg *arg)
+{
+ const struct wmi_10_4_phyerr_event *phyerr = phyerr_buf;
+ u32 phy_err_mask;
+ int i;
+
+ if (left_len < sizeof(*phyerr)) {
+ ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
+ left_len, sizeof(*phyerr));
+ return -EINVAL;
+ }
+
+ arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
+ arg->freq1 = __le16_to_cpu(phyerr->freq1);
+ arg->freq2 = __le16_to_cpu(phyerr->freq2);
+ arg->rssi_combined = phyerr->rssi_combined;
+ arg->chan_width_mhz = phyerr->chan_width_mhz;
+ arg->buf_len = __le32_to_cpu(phyerr->buf_len);
+ arg->buf = phyerr->buf;
+ arg->hdr_len = sizeof(*phyerr);
+
+ for (i = 0; i < 4; i++)
+ arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
+
+ phy_err_mask = __le32_to_cpu(phyerr->phy_err_mask[0]);
+
+ if (phy_err_mask & PHY_ERROR_10_4_SPECTRAL_SCAN_MASK)
+ arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
+ else if (phy_err_mask & PHY_ERROR_10_4_RADAR_MASK)
+ arg->phy_err_code = PHY_ERROR_RADAR;
+ else
+ arg->phy_err_code = PHY_ERROR_UNKNOWN;
+
+ return 0;
+}
+
void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
{
- struct wmi_phyerr_ev_arg arg = {};
- const struct wmi_phyerr *phyerr;
+ struct wmi_phyerr_hdr_arg hdr_arg = {};
+ struct wmi_phyerr_ev_arg phyerr_arg = {};
+ const void *phyerr;
u32 count, i, buf_len, phy_err_code;
u64 tsf;
int left_len, ret;
ATH10K_DFS_STAT_INC(ar, phy_errors);
- ret = ath10k_wmi_pull_phyerr(ar, skb, &arg);
+ ret = ath10k_wmi_pull_phyerr_hdr(ar, skb, &hdr_arg);
if (ret) {
- ath10k_warn(ar, "failed to parse phyerr event: %d\n", ret);
+ ath10k_warn(ar, "failed to parse phyerr event hdr: %d\n", ret);
return;
}
- left_len = __le32_to_cpu(arg.buf_len);
-
/* Check number of included events */
- count = __le32_to_cpu(arg.num_phyerrs);
+ count = hdr_arg.num_phyerrs;
+
+ left_len = hdr_arg.buf_len;
- tsf = __le32_to_cpu(arg.tsf_u32);
+ tsf = hdr_arg.tsf_u32;
tsf <<= 32;
- tsf |= __le32_to_cpu(arg.tsf_l32);
+ tsf |= hdr_arg.tsf_l32;
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi event phyerr count %d tsf64 0x%llX\n",
count, tsf);
- phyerr = arg.phyerrs;
+ phyerr = hdr_arg.phyerrs;
for (i = 0; i < count; i++) {
- /* Check if we can read event header */
- if (left_len < sizeof(*phyerr)) {
- ath10k_warn(ar, "single event (%d) wrong head len\n",
+ ret = ath10k_wmi_pull_phyerr(ar, phyerr, left_len, &phyerr_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse phyerr event (%d)\n",
i);
return;
}
- left_len -= sizeof(*phyerr);
-
- buf_len = __le32_to_cpu(phyerr->buf_len);
- phy_err_code = phyerr->phy_err_code;
+ left_len -= phyerr_arg.hdr_len;
+ buf_len = phyerr_arg.buf_len;
+ phy_err_code = phyerr_arg.phy_err_code;
if (left_len < buf_len) {
ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
@@ -2738,20 +3690,20 @@ void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
switch (phy_err_code) {
case PHY_ERROR_RADAR:
- ath10k_wmi_event_dfs(ar, phyerr, tsf);
+ ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
break;
case PHY_ERROR_SPECTRAL_SCAN:
- ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
+ ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
break;
case PHY_ERROR_FALSE_RADAR_EXT:
- ath10k_wmi_event_dfs(ar, phyerr, tsf);
- ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
+ ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
+ ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
break;
default:
break;
}
- phyerr = (void *)phyerr + sizeof(*phyerr) + buf_len;
+ phyerr = phyerr + phyerr_arg.hdr_len + buf_len;
}
}
@@ -2949,7 +3901,7 @@ static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
pool_size,
&paddr,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!ar->wmi.mem_chunks[idx].vaddr) {
ath10k_warn(ar, "failed to allocate memory chunk\n");
return -ENOMEM;
@@ -3038,12 +3990,19 @@ ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
-void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
+static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
{
+ struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work);
+ struct sk_buff *skb = ar->svc_rdy_skb;
struct wmi_svc_rdy_ev_arg arg = {};
u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
int ret;
+ if (!skb) {
+ ath10k_warn(ar, "invalid service ready event skb\n");
+ return;
+ }
+
ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
if (ret) {
ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
@@ -3075,10 +4034,10 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
if (ar->fw_api == 1 && ar->fw_version_build > 636)
set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
- if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
+ if (ar->num_rf_chains > ar->max_spatial_stream) {
ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
- ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
- ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
+ ar->num_rf_chains, ar->max_spatial_stream);
+ ar->num_rf_chains = ar->max_spatial_stream;
}
ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1;
@@ -3101,20 +4060,39 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
return;
}
+ if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
+ ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
+ TARGET_10_4_NUM_VDEVS;
+ ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
+ TARGET_10_4_NUM_VDEVS;
+ ar->num_tids = ar->num_active_peers * 2;
+ ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
+ }
+
+ /* TODO: Adjust max peer count for cases like WMI_SERVICE_RATECTRL_CACHE
+ * and WMI_SERVICE_IRAM_TIDS, etc.
+ */
+
for (i = 0; i < num_mem_reqs; ++i) {
req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
- if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
+ if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
+ if (ar->num_active_peers)
+ num_units = ar->num_active_peers + 1;
+ else
+ num_units = ar->max_num_peers + 1;
+ } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
/* number of units to allocate is number of
* peers, 1 extra for self peer on target */
/* this needs to be tied, host and target
* can get out of sync */
- num_units = TARGET_10X_NUM_PEERS + 1;
- else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
- num_units = TARGET_10X_NUM_VDEVS + 1;
+ num_units = ar->max_num_peers + 1;
+ } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
+ num_units = ar->max_num_vdevs + 1;
+ }
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
@@ -3144,9 +4122,17 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
__le32_to_cpu(arg.eeprom_rd),
__le32_to_cpu(arg.num_mem_reqs));
+ dev_kfree_skb(skb);
+ ar->svc_rdy_skb = NULL;
complete(&ar->wmi.service_ready);
}
+void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
+{
+ ar->svc_rdy_skb = skb;
+ queue_work(ar->workqueue_aux, &ar->svc_rdy_work);
+}
+
static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
struct wmi_rdy_ev_arg *arg)
{
@@ -3318,7 +4304,7 @@ static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_SERVICE_READY_EVENTID:
ath10k_wmi_event_service_ready(ar, skb);
- break;
+ return;
case WMI_READY_EVENTID:
ath10k_wmi_event_ready(ar, skb);
break;
@@ -3439,7 +4425,7 @@ static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_10X_SERVICE_READY_EVENTID:
ath10k_wmi_event_service_ready(ar, skb);
- break;
+ return;
case WMI_10X_READY_EVENTID:
ath10k_wmi_event_ready(ar, skb);
break;
@@ -3550,7 +4536,7 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_10_2_SERVICE_READY_EVENTID:
ath10k_wmi_event_service_ready(ar, skb);
- break;
+ return;
case WMI_10_2_READY_EVENTID:
ath10k_wmi_event_ready(ar, skb);
break;
@@ -3576,6 +4562,76 @@ out:
dev_kfree_skb(skb);
}
+static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_10_4_event_id id;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+ if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
+ goto out;
+
+ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+ switch (id) {
+ case WMI_10_4_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
+ return;
+ case WMI_10_4_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_10_4_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ break;
+ case WMI_10_4_SERVICE_READY_EVENTID:
+ ath10k_wmi_event_service_ready(ar, skb);
+ return;
+ case WMI_10_4_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ break;
+ case WMI_10_4_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_10_4_PHYERR_EVENTID:
+ ath10k_wmi_event_phyerr(ar, skb);
+ break;
+ case WMI_10_4_READY_EVENTID:
+ ath10k_wmi_event_ready(ar, skb);
+ break;
+ case WMI_10_4_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_10_4_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_10_4_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_10_4_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ break;
+ case WMI_10_4_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ break;
+ case WMI_10_4_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ break;
+ case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "received event id %d not implemented\n", id);
+ break;
+ default:
+ ath10k_warn(ar, "Unknown eventid: %d\n", id);
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
int ret;
@@ -3762,8 +4818,7 @@ static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
- config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
-
+ config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
config.scan_max_pending_reqs =
__cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
@@ -3831,8 +4886,7 @@ static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
- config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
-
+ config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
config.scan_max_pending_reqs =
__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
@@ -3897,7 +4951,7 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
- config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
+ config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
config.scan_max_pending_reqs =
__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
@@ -3950,6 +5004,88 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
return buf;
}
+static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
+{
+ struct wmi_init_cmd_10_4 *cmd;
+ struct sk_buff *buf;
+ struct wmi_resource_config_10_4 config = {};
+ u32 len;
+
+ config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs);
+ config.num_peers = __cpu_to_le32(ar->max_num_peers);
+ config.num_active_peers = __cpu_to_le32(ar->num_active_peers);
+ config.num_tids = __cpu_to_le32(ar->num_tids);
+
+ config.num_offload_peers = __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_PEERS);
+ config.num_offload_reorder_buffs =
+ __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS);
+ config.num_peer_keys = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS);
+ config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT);
+ config.tx_chain_mask = __cpu_to_le32(TARGET_10_4_TX_CHAIN_MASK);
+ config.rx_chain_mask = __cpu_to_le32(TARGET_10_4_RX_CHAIN_MASK);
+
+ config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI);
+
+ config.rx_decap_mode = __cpu_to_le32(TARGET_10_4_RX_DECAP_MODE);
+ config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS);
+ config.bmiss_offload_max_vdev =
+ __cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV);
+ config.roam_offload_max_vdev =
+ __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV);
+ config.roam_offload_max_ap_profiles =
+ __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES);
+ config.num_mcast_groups = __cpu_to_le32(TARGET_10_4_NUM_MCAST_GROUPS);
+ config.num_mcast_table_elems =
+ __cpu_to_le32(TARGET_10_4_NUM_MCAST_TABLE_ELEMS);
+
+ config.mcast2ucast_mode = __cpu_to_le32(TARGET_10_4_MCAST2UCAST_MODE);
+ config.tx_dbg_log_size = __cpu_to_le32(TARGET_10_4_TX_DBG_LOG_SIZE);
+ config.num_wds_entries = __cpu_to_le32(TARGET_10_4_NUM_WDS_ENTRIES);
+ config.dma_burst_size = __cpu_to_le32(TARGET_10_4_DMA_BURST_SIZE);
+ config.mac_aggr_delim = __cpu_to_le32(TARGET_10_4_MAC_AGGR_DELIM);
+
+ config.rx_skip_defrag_timeout_dup_detection_check =
+ __cpu_to_le32(TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK);
+
+ config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG);
+ config.gtk_offload_max_vdev =
+ __cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV);
+ config.num_msdu_desc = __cpu_to_le32(TARGET_10_4_NUM_MSDU_DESC);
+ config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS);
+ config.max_peer_ext_stats =
+ __cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS);
+ config.smart_ant_cap = __cpu_to_le32(TARGET_10_4_SMART_ANT_CAP);
+
+ config.bk_minfree = __cpu_to_le32(TARGET_10_4_BK_MIN_FREE);
+ config.be_minfree = __cpu_to_le32(TARGET_10_4_BE_MIN_FREE);
+ config.vi_minfree = __cpu_to_le32(TARGET_10_4_VI_MIN_FREE);
+ config.vo_minfree = __cpu_to_le32(TARGET_10_4_VO_MIN_FREE);
+
+ config.rx_batchmode = __cpu_to_le32(TARGET_10_4_RX_BATCH_MODE);
+ config.tt_support =
+ __cpu_to_le32(TARGET_10_4_THERMAL_THROTTLING_CONFIG);
+ config.atf_config = __cpu_to_le32(TARGET_10_4_ATF_CONFIG);
+ config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG);
+ config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG);
+
+ len = sizeof(*cmd) +
+ (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+ buf = ath10k_wmi_alloc_skb(ar, len);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_init_cmd_10_4 *)buf->data;
+ memcpy(&cmd->resource_config, &config, sizeof(config));
+ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.4\n");
+ return buf;
+}
+
int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
{
if (arg->ie_len && !arg->ie)
@@ -4172,7 +5308,6 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar,
| WMI_SCAN_EVENT_BSS_CHANNEL
| WMI_SCAN_EVENT_FOREIGN_CHANNEL
| WMI_SCAN_EVENT_DEQUEUED;
- arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
arg->n_bssids = 1;
arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
@@ -5170,6 +6305,7 @@ static const struct wmi_ops wmi_ops = {
.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
.pull_swba = ath10k_wmi_op_pull_swba_ev,
+ .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
@@ -5241,6 +6377,7 @@ static const struct wmi_ops wmi_10_1_ops = {
.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
.pull_swba = ath10k_wmi_op_pull_swba_ev,
+ .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
@@ -5306,6 +6443,7 @@ static const struct wmi_ops wmi_10_2_ops = {
.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
.pull_swba = ath10k_wmi_op_pull_swba_ev,
+ .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
@@ -5367,6 +6505,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
.pull_swba = ath10k_wmi_op_pull_swba_ev,
+ .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
@@ -5412,9 +6551,73 @@ static const struct wmi_ops wmi_10_2_4_ops = {
/* .gen_adaptive_qcs not implemented */
};
+static const struct wmi_ops wmi_10_4_ops = {
+ .rx = ath10k_wmi_10_4_op_rx,
+ .map_svc = wmi_10_4_svc_map,
+
+ .pull_scan = ath10k_wmi_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_10_4_op_pull_swba_ev,
+ .pull_phyerr_hdr = ath10k_wmi_10_4_op_pull_phyerr_ev_hdr,
+ .pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev,
+ .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
+ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
+
+ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+ .gen_init = ath10k_wmi_10_4_op_gen_init,
+ .gen_start_scan = ath10k_wmi_op_gen_start_scan,
+ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
+ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+
+ /* shared with 10.2 */
+ .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
+};
+
int ath10k_wmi_attach(struct ath10k *ar)
{
switch (ar->wmi.op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_10_4:
+ ar->wmi.ops = &wmi_10_4_ops;
+ ar->wmi.cmd = &wmi_10_4_cmd_map;
+ ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
+ break;
case ATH10K_FW_WMI_OP_VERSION_10_2_4:
ar->wmi.cmd = &wmi_10_2_4_cmd_map;
ar->wmi.ops = &wmi_10_2_4_ops;
@@ -5452,6 +6655,8 @@ int ath10k_wmi_attach(struct ath10k *ar)
init_completion(&ar->wmi.service_ready);
init_completion(&ar->wmi.unified_ready);
+ INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
+
return 0;
}
@@ -5459,6 +6664,11 @@ void ath10k_wmi_detach(struct ath10k *ar)
{
int i;
+ cancel_work_sync(&ar->svc_rdy_work);
+
+ if (ar->svc_rdy_skb)
+ dev_kfree_skb(ar->svc_rdy_skb);
+
/* free the host memory chunks requested by firmware */
for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
dma_free_coherent(ar->dev,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index cf44a3d080a3..52d35032d53e 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -150,6 +150,12 @@ enum wmi_service {
WMI_SERVICE_SAP_AUTH_OFFLOAD,
WMI_SERVICE_ATF,
WMI_SERVICE_COEX_GPIO,
+ WMI_SERVICE_ENHANCED_PROXY_STA,
+ WMI_SERVICE_TT,
+ WMI_SERVICE_PEER_CACHING,
+ WMI_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_SERVICE_BSS_CHANNEL_INFO_64,
/* keep last */
WMI_SERVICE_MAX,
@@ -218,6 +224,51 @@ enum wmi_main_service {
WMI_MAIN_SERVICE_TX_ENCAP,
};
+enum wmi_10_4_service {
+ WMI_10_4_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_10_4_SERVICE_SCAN_OFFLOAD,
+ WMI_10_4_SERVICE_ROAM_OFFLOAD,
+ WMI_10_4_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_10_4_SERVICE_STA_PWRSAVE,
+ WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_10_4_SERVICE_AP_UAPSD,
+ WMI_10_4_SERVICE_AP_DFS,
+ WMI_10_4_SERVICE_11AC,
+ WMI_10_4_SERVICE_BLOCKACK,
+ WMI_10_4_SERVICE_PHYERR,
+ WMI_10_4_SERVICE_BCN_FILTER,
+ WMI_10_4_SERVICE_RTT,
+ WMI_10_4_SERVICE_RATECTRL,
+ WMI_10_4_SERVICE_WOW,
+ WMI_10_4_SERVICE_RATECTRL_CACHE,
+ WMI_10_4_SERVICE_IRAM_TIDS,
+ WMI_10_4_SERVICE_BURST,
+ WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_10_4_SERVICE_GTK_OFFLOAD,
+ WMI_10_4_SERVICE_SCAN_SCH,
+ WMI_10_4_SERVICE_CSA_OFFLOAD,
+ WMI_10_4_SERVICE_CHATTER,
+ WMI_10_4_SERVICE_COEX_FREQAVOID,
+ WMI_10_4_SERVICE_PACKET_POWER_SAVE,
+ WMI_10_4_SERVICE_FORCE_FW_HANG,
+ WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_10_4_SERVICE_GPIO,
+ WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_10_4_SERVICE_STA_KEEP_ALIVE,
+ WMI_10_4_SERVICE_TX_ENCAP,
+ WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+ WMI_10_4_SERVICE_EARLY_RX,
+ WMI_10_4_SERVICE_ENHANCED_PROXY_STA,
+ WMI_10_4_SERVICE_TT,
+ WMI_10_4_SERVICE_ATF,
+ WMI_10_4_SERVICE_PEER_CACHING,
+ WMI_10_4_SERVICE_COEX_GPIO,
+ WMI_10_4_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
+};
+
static inline char *wmi_service_name(int service_id)
{
#define SVCSTR(x) case x: return #x
@@ -299,6 +350,12 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
SVCSTR(WMI_SERVICE_ATF);
SVCSTR(WMI_SERVICE_COEX_GPIO);
+ SVCSTR(WMI_SERVICE_ENHANCED_PROXY_STA);
+ SVCSTR(WMI_SERVICE_TT);
+ SVCSTR(WMI_SERVICE_PEER_CACHING);
+ SVCSTR(WMI_SERVICE_AUX_SPECTRAL_INTF);
+ SVCSTR(WMI_SERVICE_AUX_CHAN_LOAD_INTF);
+ SVCSTR(WMI_SERVICE_BSS_CHANNEL_INFO_64);
default:
return NULL;
}
@@ -437,6 +494,95 @@ static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_TX_ENCAP, len);
}
+static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
+ size_t len)
+{
+ SVCMAP(WMI_10_4_SERVICE_BEACON_OFFLOAD,
+ WMI_SERVICE_BEACON_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_SCAN_OFFLOAD,
+ WMI_SERVICE_SCAN_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_ROAM_OFFLOAD,
+ WMI_SERVICE_ROAM_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_STA_PWRSAVE,
+ WMI_SERVICE_STA_PWRSAVE, len);
+ SVCMAP(WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+ SVCMAP(WMI_10_4_SERVICE_AP_UAPSD,
+ WMI_SERVICE_AP_UAPSD, len);
+ SVCMAP(WMI_10_4_SERVICE_AP_DFS,
+ WMI_SERVICE_AP_DFS, len);
+ SVCMAP(WMI_10_4_SERVICE_11AC,
+ WMI_SERVICE_11AC, len);
+ SVCMAP(WMI_10_4_SERVICE_BLOCKACK,
+ WMI_SERVICE_BLOCKACK, len);
+ SVCMAP(WMI_10_4_SERVICE_PHYERR,
+ WMI_SERVICE_PHYERR, len);
+ SVCMAP(WMI_10_4_SERVICE_BCN_FILTER,
+ WMI_SERVICE_BCN_FILTER, len);
+ SVCMAP(WMI_10_4_SERVICE_RTT,
+ WMI_SERVICE_RTT, len);
+ SVCMAP(WMI_10_4_SERVICE_RATECTRL,
+ WMI_SERVICE_RATECTRL, len);
+ SVCMAP(WMI_10_4_SERVICE_WOW,
+ WMI_SERVICE_WOW, len);
+ SVCMAP(WMI_10_4_SERVICE_RATECTRL_CACHE,
+ WMI_SERVICE_RATECTRL_CACHE, len);
+ SVCMAP(WMI_10_4_SERVICE_IRAM_TIDS,
+ WMI_SERVICE_IRAM_TIDS, len);
+ SVCMAP(WMI_10_4_SERVICE_BURST,
+ WMI_SERVICE_BURST, len);
+ SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_GTK_OFFLOAD,
+ WMI_SERVICE_GTK_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_SCAN_SCH,
+ WMI_SERVICE_SCAN_SCH, len);
+ SVCMAP(WMI_10_4_SERVICE_CSA_OFFLOAD,
+ WMI_SERVICE_CSA_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_CHATTER,
+ WMI_SERVICE_CHATTER, len);
+ SVCMAP(WMI_10_4_SERVICE_COEX_FREQAVOID,
+ WMI_SERVICE_COEX_FREQAVOID, len);
+ SVCMAP(WMI_10_4_SERVICE_PACKET_POWER_SAVE,
+ WMI_SERVICE_PACKET_POWER_SAVE, len);
+ SVCMAP(WMI_10_4_SERVICE_FORCE_FW_HANG,
+ WMI_SERVICE_FORCE_FW_HANG, len);
+ SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_GPIO,
+ WMI_SERVICE_GPIO, len);
+ SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
+ SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
+ SVCMAP(WMI_10_4_SERVICE_STA_KEEP_ALIVE,
+ WMI_SERVICE_STA_KEEP_ALIVE, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_ENCAP,
+ WMI_SERVICE_TX_ENCAP, len);
+ SVCMAP(WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+ WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len);
+ SVCMAP(WMI_10_4_SERVICE_EARLY_RX,
+ WMI_SERVICE_EARLY_RX, len);
+ SVCMAP(WMI_10_4_SERVICE_ENHANCED_PROXY_STA,
+ WMI_SERVICE_ENHANCED_PROXY_STA, len);
+ SVCMAP(WMI_10_4_SERVICE_TT,
+ WMI_SERVICE_TT, len);
+ SVCMAP(WMI_10_4_SERVICE_ATF,
+ WMI_SERVICE_ATF, len);
+ SVCMAP(WMI_10_4_SERVICE_PEER_CACHING,
+ WMI_SERVICE_PEER_CACHING, len);
+ SVCMAP(WMI_10_4_SERVICE_COEX_GPIO,
+ WMI_SERVICE_COEX_GPIO, len);
+ SVCMAP(WMI_10_4_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_SERVICE_AUX_SPECTRAL_INTF, len);
+ SVCMAP(WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_SERVICE_AUX_CHAN_LOAD_INTF, len);
+ SVCMAP(WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
+ WMI_SERVICE_BSS_CHANNEL_INFO_64, len);
+}
+
#undef SVCMAP
/* 2 word representation of MAC addr */
@@ -565,6 +711,48 @@ struct wmi_cmd_map {
u32 tdls_set_state_cmdid;
u32 tdls_peer_update_cmdid;
u32 adaptive_qcs_cmdid;
+ u32 scan_update_request_cmdid;
+ u32 vdev_standby_response_cmdid;
+ u32 vdev_resume_response_cmdid;
+ u32 wlan_peer_caching_add_peer_cmdid;
+ u32 wlan_peer_caching_evict_peer_cmdid;
+ u32 wlan_peer_caching_restore_peer_cmdid;
+ u32 wlan_peer_caching_print_all_peers_info_cmdid;
+ u32 peer_update_wds_entry_cmdid;
+ u32 peer_add_proxy_sta_entry_cmdid;
+ u32 rtt_keepalive_cmdid;
+ u32 oem_req_cmdid;
+ u32 nan_cmdid;
+ u32 vdev_ratemask_cmdid;
+ u32 qboost_cfg_cmdid;
+ u32 pdev_smart_ant_enable_cmdid;
+ u32 pdev_smart_ant_set_rx_antenna_cmdid;
+ u32 peer_smart_ant_set_tx_antenna_cmdid;
+ u32 peer_smart_ant_set_train_info_cmdid;
+ u32 peer_smart_ant_set_node_config_ops_cmdid;
+ u32 pdev_set_antenna_switch_table_cmdid;
+ u32 pdev_set_ctl_table_cmdid;
+ u32 pdev_set_mimogain_table_cmdid;
+ u32 pdev_ratepwr_table_cmdid;
+ u32 pdev_ratepwr_chainmsk_table_cmdid;
+ u32 pdev_fips_cmdid;
+ u32 tt_set_conf_cmdid;
+ u32 fwtest_cmdid;
+ u32 vdev_atf_request_cmdid;
+ u32 peer_atf_request_cmdid;
+ u32 pdev_get_ani_cck_config_cmdid;
+ u32 pdev_get_ani_ofdm_config_cmdid;
+ u32 pdev_reserve_ast_entry_cmdid;
+ u32 pdev_get_nfcal_power_cmdid;
+ u32 pdev_get_tpc_cmdid;
+ u32 pdev_get_ast_info_cmdid;
+ u32 vdev_set_dscp_tid_map_cmdid;
+ u32 pdev_get_info_cmdid;
+ u32 vdev_get_info_cmdid;
+ u32 vdev_filter_neighbor_rx_packets_cmdid;
+ u32 mu_cal_start_cmdid;
+ u32 set_cca_params_cmdid;
+ u32 pdev_bss_chan_info_request_cmdid;
};
/*
@@ -1220,6 +1408,216 @@ enum wmi_10_2_event_id {
WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1,
};
+enum wmi_10_4_cmd_id {
+ WMI_10_4_START_CMDID = 0x9000,
+ WMI_10_4_END_CMDID = 0x9FFF,
+ WMI_10_4_INIT_CMDID,
+ WMI_10_4_START_SCAN_CMDID = WMI_10_4_START_CMDID,
+ WMI_10_4_STOP_SCAN_CMDID,
+ WMI_10_4_SCAN_CHAN_LIST_CMDID,
+ WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
+ WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
+ WMI_10_4_ECHO_CMDID,
+ WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
+ WMI_10_4_PDEV_SET_CHANNEL_CMDID,
+ WMI_10_4_PDEV_SET_PARAM_CMDID,
+ WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_10_4_VDEV_CREATE_CMDID,
+ WMI_10_4_VDEV_DELETE_CMDID,
+ WMI_10_4_VDEV_START_REQUEST_CMDID,
+ WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
+ WMI_10_4_VDEV_UP_CMDID,
+ WMI_10_4_VDEV_STOP_CMDID,
+ WMI_10_4_VDEV_DOWN_CMDID,
+ WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
+ WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
+ WMI_10_4_VDEV_SET_PARAM_CMDID,
+ WMI_10_4_VDEV_INSTALL_KEY_CMDID,
+ WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
+ WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
+ WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
+ WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
+ WMI_10_4_PEER_CREATE_CMDID,
+ WMI_10_4_PEER_DELETE_CMDID,
+ WMI_10_4_PEER_FLUSH_TIDS_CMDID,
+ WMI_10_4_PEER_SET_PARAM_CMDID,
+ WMI_10_4_PEER_ASSOC_CMDID,
+ WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
+ WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+ WMI_10_4_PEER_MCAST_GROUP_CMDID,
+ WMI_10_4_BCN_TX_CMDID,
+ WMI_10_4_PDEV_SEND_BCN_CMDID,
+ WMI_10_4_BCN_PRB_TMPL_CMDID,
+ WMI_10_4_BCN_FILTER_RX_CMDID,
+ WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
+ WMI_10_4_MGMT_TX_CMDID,
+ WMI_10_4_PRB_TMPL_CMDID,
+ WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
+ WMI_10_4_ADDBA_SEND_CMDID,
+ WMI_10_4_ADDBA_STATUS_CMDID,
+ WMI_10_4_DELBA_SEND_CMDID,
+ WMI_10_4_ADDBA_SET_RESP_CMDID,
+ WMI_10_4_SEND_SINGLEAMSDU_CMDID,
+ WMI_10_4_STA_POWERSAVE_MODE_CMDID,
+ WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
+ WMI_10_4_STA_MIMO_PS_MODE_CMDID,
+ WMI_10_4_DBGLOG_CFG_CMDID,
+ WMI_10_4_PDEV_DFS_ENABLE_CMDID,
+ WMI_10_4_PDEV_DFS_DISABLE_CMDID,
+ WMI_10_4_PDEV_QVIT_CMDID,
+ WMI_10_4_ROAM_SCAN_MODE,
+ WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_10_4_ROAM_SCAN_PERIOD,
+ WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_10_4_ROAM_AP_PROFILE,
+ WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
+ WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_10_4_OFL_SCAN_PERIOD,
+ WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
+ WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_10_4_P2P_GO_SET_BEACON_IE,
+ WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
+ WMI_10_4_AP_PS_PEER_PARAM_CMDID,
+ WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
+ WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
+ WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
+ WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ WMI_10_4_PDEV_SUSPEND_CMDID,
+ WMI_10_4_PDEV_RESUME_CMDID,
+ WMI_10_4_ADD_BCN_FILTER_CMDID,
+ WMI_10_4_RMV_BCN_FILTER_CMDID,
+ WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
+ WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_10_4_WOW_ENABLE_CMDID,
+ WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ WMI_10_4_RTT_MEASREQ_CMDID,
+ WMI_10_4_RTT_TSF_CMDID,
+ WMI_10_4_RTT_KEEPALIVE_CMDID,
+ WMI_10_4_OEM_REQ_CMDID,
+ WMI_10_4_NAN_CMDID,
+ WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_10_4_REQUEST_STATS_CMDID,
+ WMI_10_4_GPIO_CONFIG_CMDID,
+ WMI_10_4_GPIO_OUTPUT_CMDID,
+ WMI_10_4_VDEV_RATEMASK_CMDID,
+ WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
+ WMI_10_4_GTK_OFFLOAD_CMDID,
+ WMI_10_4_QBOOST_CFG_CMDID,
+ WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
+ WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+ WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+ WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+ WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+ WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
+ WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
+ WMI_10_4_FORCE_FW_HANG_CMDID,
+ WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+ WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
+ WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+ WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
+ WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+ WMI_10_4_PDEV_FIPS_CMDID,
+ WMI_10_4_TT_SET_CONF_CMDID,
+ WMI_10_4_FWTEST_CMDID,
+ WMI_10_4_VDEV_ATF_REQUEST_CMDID,
+ WMI_10_4_PEER_ATF_REQUEST_CMDID,
+ WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+ WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+ WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
+ WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
+ WMI_10_4_PDEV_GET_TPC_CMDID,
+ WMI_10_4_PDEV_GET_AST_INFO_CMDID,
+ WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
+ WMI_10_4_PDEV_GET_INFO_CMDID,
+ WMI_10_4_VDEV_GET_INFO_CMDID,
+ WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+ WMI_10_4_MU_CAL_START_CMDID,
+ WMI_10_4_SET_CCA_PARAMS_CMDID,
+ WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1,
+};
+
+enum wmi_10_4_event_id {
+ WMI_10_4_SERVICE_READY_EVENTID = 0x8000,
+ WMI_10_4_READY_EVENTID,
+ WMI_10_4_DEBUG_MESG_EVENTID,
+ WMI_10_4_START_EVENTID = 0x9000,
+ WMI_10_4_END_EVENTID = 0x9FFF,
+ WMI_10_4_SCAN_EVENTID = WMI_10_4_START_EVENTID,
+ WMI_10_4_ECHO_EVENTID,
+ WMI_10_4_UPDATE_STATS_EVENTID,
+ WMI_10_4_INST_RSSI_STATS_EVENTID,
+ WMI_10_4_VDEV_START_RESP_EVENTID,
+ WMI_10_4_VDEV_STANDBY_REQ_EVENTID,
+ WMI_10_4_VDEV_RESUME_REQ_EVENTID,
+ WMI_10_4_VDEV_STOPPED_EVENTID,
+ WMI_10_4_PEER_STA_KICKOUT_EVENTID,
+ WMI_10_4_HOST_SWBA_EVENTID,
+ WMI_10_4_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_10_4_MGMT_RX_EVENTID,
+ WMI_10_4_CHAN_INFO_EVENTID,
+ WMI_10_4_PHYERR_EVENTID,
+ WMI_10_4_ROAM_EVENTID,
+ WMI_10_4_PROFILE_MATCH,
+ WMI_10_4_DEBUG_PRINT_EVENTID,
+ WMI_10_4_PDEV_QVIT_EVENTID,
+ WMI_10_4_WLAN_PROFILE_DATA_EVENTID,
+ WMI_10_4_RTT_MEASUREMENT_REPORT_EVENTID,
+ WMI_10_4_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_10_4_RTT_ERROR_REPORT_EVENTID,
+ WMI_10_4_RTT_KEEPALIVE_EVENTID,
+ WMI_10_4_OEM_CAPABILITY_EVENTID,
+ WMI_10_4_OEM_MEASUREMENT_REPORT_EVENTID,
+ WMI_10_4_OEM_ERROR_REPORT_EVENTID,
+ WMI_10_4_NAN_EVENTID,
+ WMI_10_4_WOW_WAKEUP_HOST_EVENTID,
+ WMI_10_4_GTK_OFFLOAD_STATUS_EVENTID,
+ WMI_10_4_GTK_REKEY_FAIL_EVENTID,
+ WMI_10_4_DCS_INTERFERENCE_EVENTID,
+ WMI_10_4_PDEV_TPC_CONFIG_EVENTID,
+ WMI_10_4_CSA_HANDLING_EVENTID,
+ WMI_10_4_GPIO_INPUT_EVENTID,
+ WMI_10_4_PEER_RATECODE_LIST_EVENTID,
+ WMI_10_4_GENERIC_BUFFER_EVENTID,
+ WMI_10_4_MCAST_BUF_RELEASE_EVENTID,
+ WMI_10_4_MCAST_LIST_AGEOUT_EVENTID,
+ WMI_10_4_VDEV_GET_KEEPALIVE_EVENTID,
+ WMI_10_4_WDS_PEER_EVENTID,
+ WMI_10_4_PEER_STA_PS_STATECHG_EVENTID,
+ WMI_10_4_PDEV_FIPS_EVENTID,
+ WMI_10_4_TT_STATS_EVENTID,
+ WMI_10_4_PDEV_CHANNEL_HOPPING_EVENTID,
+ WMI_10_4_PDEV_ANI_CCK_LEVEL_EVENTID,
+ WMI_10_4_PDEV_ANI_OFDM_LEVEL_EVENTID,
+ WMI_10_4_PDEV_RESERVE_AST_ENTRY_EVENTID,
+ WMI_10_4_PDEV_NFCAL_POWER_EVENTID,
+ WMI_10_4_PDEV_TPC_EVENTID,
+ WMI_10_4_PDEV_GET_AST_INFO_EVENTID,
+ WMI_10_4_PDEV_TEMPERATURE_EVENTID,
+ WMI_10_4_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID,
+ WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID,
+ WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1,
+};
+
enum wmi_phy_mode {
MODE_11A = 0, /* 11a Mode */
MODE_11G = 1, /* 11b/g Mode */
@@ -1349,7 +1747,8 @@ enum wmi_channel_change_cause {
/* Indicate reason for channel switch */
#define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13)
-#define WMI_MAX_SPATIAL_STREAM 3
+#define WMI_MAX_SPATIAL_STREAM 3 /* default max ss */
+#define WMI_10_4_MAX_SPATIAL_STREAM 4
/* HT Capabilities*/
#define WMI_HT_CAP_ENABLED 0x0001 /* HT Enabled/ disabled */
@@ -1979,8 +2378,224 @@ struct wmi_resource_config_10_2 {
__le32 feature_mask;
} __packed;
-#define NUM_UNITS_IS_NUM_VDEVS 0x1
-#define NUM_UNITS_IS_NUM_PEERS 0x2
+#define NUM_UNITS_IS_NUM_VDEVS BIT(0)
+#define NUM_UNITS_IS_NUM_PEERS BIT(1)
+#define NUM_UNITS_IS_NUM_ACTIVE_PEERS BIT(2)
+
+struct wmi_resource_config_10_4 {
+ /* Number of virtual devices (VAPs) to support */
+ __le32 num_vdevs;
+
+ /* Number of peer nodes to support */
+ __le32 num_peers;
+
+ /* Number of active peer nodes to support */
+ __le32 num_active_peers;
+
+ /* In offload mode, target supports features like WOW, chatter and other
+ * protocol offloads. In order to support them some functionalities like
+ * reorder buffering, PN checking need to be done in target.
+ * This determines maximum number of peers supported by target in
+ * offload mode.
+ */
+ __le32 num_offload_peers;
+
+ /* Number of reorder buffers available for doing target based reorder
+ * Rx reorder buffering
+ */
+ __le32 num_offload_reorder_buffs;
+
+ /* Number of keys per peer */
+ __le32 num_peer_keys;
+
+ /* Total number of TX/RX data TIDs */
+ __le32 num_tids;
+
+ /* Max skid for resolving hash collisions.
+ * The address search table is sparse, so that if two MAC addresses
+ * result in the same hash value, the second of these conflicting
+ * entries can slide to the next index in the address search table,
+ * and use it, if it is unoccupied. This ast_skid_limit parameter
+ * specifies the upper bound on how many subsequent indices to search
+ * over to find an unoccupied space.
+ */
+ __le32 ast_skid_limit;
+
+ /* The nominal chain mask for transmit.
+ * The chain mask may be modified dynamically, e.g. to operate AP tx
+ * with a reduced number of chains if no clients are associated.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of tx chains.
+ */
+ __le32 tx_chain_mask;
+
+ /* The nominal chain mask for receive.
+ * The chain mask may be modified dynamically, e.g. for a client to use
+ * a reduced number of chains for receive if the traffic to the client
+ * is low enough that it doesn't require downlink MIMO or antenna
+ * diversity. This configuration parameter specifies the nominal
+ * chain-mask that should be used when not operating with a reduced
+ * set of rx chains.
+ */
+ __le32 rx_chain_mask;
+
+ /* What rx reorder timeout (ms) to use for the AC.
+ * Each WMM access class (voice, video, best-effort, background) will
+ * have its own timeout value to dictate how long to wait for missing
+ * rx MPDUs to arrive before flushing subsequent MPDUs that have already
+ * been received. This parameter specifies the timeout in milliseconds
+ * for each class.
+ */
+ __le32 rx_timeout_pri[4];
+
+ /* What mode the rx should decap packets to.
+ * MAC can decap to RAW (no decap), native wifi or Ethernet types.
+ * This setting also determines the default TX behavior, however TX
+ * behavior can be modified on a per VAP basis during VAP init
+ */
+ __le32 rx_decap_mode;
+
+ __le32 scan_max_pending_req;
+
+ __le32 bmiss_offload_max_vdev;
+
+ __le32 roam_offload_max_vdev;
+
+ __le32 roam_offload_max_ap_profiles;
+
+ /* How many groups to use for mcast->ucast conversion.
+ * The target's WAL maintains a table to hold information regarding
+ * which peers belong to a given multicast group, so that if
+ * multicast->unicast conversion is enabled, the target can convert
+ * multicast tx frames to a series of unicast tx frames, to each peer
+ * within the multicast group. This num_mcast_groups configuration
+ * parameter tells the target how many multicast groups to provide
+ * storage for within its multicast group membership table.
+ */
+ __le32 num_mcast_groups;
+
+ /* Size to alloc for the mcast membership table.
+ * This num_mcast_table_elems configuration parameter tells the target
+ * how many peer elements it needs to provide storage for in its
+ * multicast group membership table. These multicast group membership
+ * table elements are shared by the multicast groups stored within
+ * the table.
+ */
+ __le32 num_mcast_table_elems;
+
+ /* Whether/how to do multicast->unicast conversion.
+ * This configuration parameter specifies whether the target should
+ * perform multicast --> unicast conversion on transmit, and if so,
+ * what to do if it finds no entries in its multicast group membership
+ * table for the multicast IP address in the tx frame.
+ * Configuration value:
+ * 0 -> Do not perform multicast to unicast conversion.
+ * 1 -> Convert multicast frames to unicast, if the IP multicast address
+ * from the tx frame is found in the multicast group membership
+ * table. If the IP multicast address is not found, drop the frame
+ * 2 -> Convert multicast frames to unicast, if the IP multicast address
+ * from the tx frame is found in the multicast group membership
+ * table. If the IP multicast address is not found, transmit the
+ * frame as multicast.
+ */
+ __le32 mcast2ucast_mode;
+
+ /* How much memory to allocate for a tx PPDU dbg log.
+ * This parameter controls how much memory the target will allocate to
+ * store a log of tx PPDU meta-information (how large the PPDU was,
+ * when it was sent, whether it was successful, etc.)
+ */
+ __le32 tx_dbg_log_size;
+
+ /* How many AST entries to be allocated for WDS */
+ __le32 num_wds_entries;
+
+ /* MAC DMA burst size. 0 -default, 1 -256B */
+ __le32 dma_burst_size;
+
+ /* Fixed delimiters to be inserted after every MPDU to account for
+ * interface latency to avoid underrun.
+ */
+ __le32 mac_aggr_delim;
+
+ /* Determine whether target is responsible for detecting duplicate
+ * non-aggregate MPDU and timing out stale fragments. A-MPDU reordering
+ * is always performed on the target.
+ *
+ * 0: target responsible for frag timeout and dup checking
+ * 1: host responsible for frag timeout and dup checking
+ */
+ __le32 rx_skip_defrag_timeout_dup_detection_check;
+
+ /* Configuration for VoW : No of Video nodes to be supported and max
+ * no of descriptors for each video link (node).
+ */
+ __le32 vow_config;
+
+ /* Maximum vdev that could use gtk offload */
+ __le32 gtk_offload_max_vdev;
+
+ /* Number of msdu descriptors target should use */
+ __le32 num_msdu_desc;
+
+ /* Max number of tx fragments per MSDU.
+ * This parameter controls the max number of tx fragments per MSDU.
+ * This will passed by target as part of the WMI_SERVICE_READY event
+ * and is overridden by the OS shim as required.
+ */
+ __le32 max_frag_entries;
+
+ /* Max number of extended peer stats.
+ * This parameter controls the max number of peers for which extended
+ * statistics are supported by target
+ */
+ __le32 max_peer_ext_stats;
+
+ /* Smart antenna capabilities information.
+ * 1 - Smart antenna is enabled
+ * 0 - Smart antenna is disabled
+ * In future this can contain smart antenna specific capabilities.
+ */
+ __le32 smart_ant_cap;
+
+ /* User can configure the buffers allocated for each AC (BE, BK, VI, VO)
+ * during init.
+ */
+ __le32 bk_minfree;
+ __le32 be_minfree;
+ __le32 vi_minfree;
+ __le32 vo_minfree;
+
+ /* Rx batch mode capability.
+ * 1 - Rx batch mode enabled
+ * 0 - Rx batch mode disabled
+ */
+ __le32 rx_batchmode;
+
+ /* Thermal throttling capability.
+ * 1 - Capable of thermal throttling
+ * 0 - Not capable of thermal throttling
+ */
+ __le32 tt_support;
+
+ /* ATF configuration.
+ * 1 - Enable ATF
+ * 0 - Disable ATF
+ */
+ __le32 atf_config;
+
+ /* Configure padding to manage IP header un-alignment
+ * 1 - Enable padding
+ * 0 - Disable padding
+ */
+ __le32 iphdr_pad_config;
+
+ /* qwrap configuration
+ * 1 - This is qwrap configuration
+ * 0 - This is not qwrap
+ */
+ __le32 qwrap_config;
+} __packed;
/* strucutre describing host memory chunk. */
struct host_memory_chunk {
@@ -2014,6 +2629,11 @@ struct wmi_init_cmd_10_2 {
struct wmi_host_mem_chunks mem_chunks;
} __packed;
+struct wmi_init_cmd_10_4 {
+ struct wmi_resource_config_10_4 resource_config;
+ struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
struct wmi_chan_list_entry {
__le16 freq;
u8 phy_mode; /* valid for 10.2 only */
@@ -2260,15 +2880,17 @@ enum wmi_bss_filter {
};
enum wmi_scan_event_type {
- WMI_SCAN_EVENT_STARTED = 0x1,
- WMI_SCAN_EVENT_COMPLETED = 0x2,
- WMI_SCAN_EVENT_BSS_CHANNEL = 0x4,
- WMI_SCAN_EVENT_FOREIGN_CHANNEL = 0x8,
- WMI_SCAN_EVENT_DEQUEUED = 0x10,
- WMI_SCAN_EVENT_PREEMPTED = 0x20, /* possibly by high-prio scan */
- WMI_SCAN_EVENT_START_FAILED = 0x40,
- WMI_SCAN_EVENT_RESTARTED = 0x80,
- WMI_SCAN_EVENT_MAX = 0x8000
+ WMI_SCAN_EVENT_STARTED = BIT(0),
+ WMI_SCAN_EVENT_COMPLETED = BIT(1),
+ WMI_SCAN_EVENT_BSS_CHANNEL = BIT(2),
+ WMI_SCAN_EVENT_FOREIGN_CHANNEL = BIT(3),
+ WMI_SCAN_EVENT_DEQUEUED = BIT(4),
+ /* possibly by high-prio scan */
+ WMI_SCAN_EVENT_PREEMPTED = BIT(5),
+ WMI_SCAN_EVENT_START_FAILED = BIT(6),
+ WMI_SCAN_EVENT_RESTARTED = BIT(7),
+ WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT = BIT(8),
+ WMI_SCAN_EVENT_MAX = BIT(15),
};
enum wmi_scan_completion_reason {
@@ -2276,6 +2898,7 @@ enum wmi_scan_completion_reason {
WMI_SCAN_REASON_CANCELLED,
WMI_SCAN_REASON_PREEMPTED,
WMI_SCAN_REASON_TIMEDOUT,
+ WMI_SCAN_REASON_INTERNAL_FAILURE,
WMI_SCAN_REASON_MAX,
};
@@ -2329,15 +2952,40 @@ struct wmi_mgmt_rx_event_v2 {
u8 buf[0];
} __packed;
+struct wmi_10_4_mgmt_rx_hdr {
+ __le32 channel;
+ __le32 snr;
+ u8 rssi_ctl[4];
+ __le32 rate;
+ __le32 phy_mode;
+ __le32 buf_len;
+ __le32 status;
+} __packed;
+
+struct wmi_10_4_mgmt_rx_event {
+ struct wmi_10_4_mgmt_rx_hdr hdr;
+ u8 buf[0];
+} __packed;
+
#define WMI_RX_STATUS_OK 0x00
#define WMI_RX_STATUS_ERR_CRC 0x01
#define WMI_RX_STATUS_ERR_DECRYPT 0x08
#define WMI_RX_STATUS_ERR_MIC 0x10
#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20
-#define PHY_ERROR_SPECTRAL_SCAN 0x26
-#define PHY_ERROR_FALSE_RADAR_EXT 0x24
-#define PHY_ERROR_RADAR 0x05
+#define PHY_ERROR_GEN_SPECTRAL_SCAN 0x26
+#define PHY_ERROR_GEN_FALSE_RADAR_EXT 0x24
+#define PHY_ERROR_GEN_RADAR 0x05
+
+#define PHY_ERROR_10_4_RADAR_MASK 0x4
+#define PHY_ERROR_10_4_SPECTRAL_SCAN_MASK 0x4000000
+
+enum phy_err_type {
+ PHY_ERROR_UNKNOWN,
+ PHY_ERROR_SPECTRAL_SCAN,
+ PHY_ERROR_FALSE_RADAR_EXT,
+ PHY_ERROR_RADAR
+};
struct wmi_phyerr {
__le32 tsf_timestamp;
@@ -2360,6 +3008,23 @@ struct wmi_phyerr_event {
struct wmi_phyerr phyerrs[0];
} __packed;
+struct wmi_10_4_phyerr_event {
+ __le32 tsf_l32;
+ __le32 tsf_u32;
+ __le16 freq1;
+ __le16 freq2;
+ u8 rssi_combined;
+ u8 chan_width_mhz;
+ u8 phy_err_code;
+ u8 rsvd0;
+ __le32 rssi_chains[4];
+ __le16 nf_chains[4];
+ __le32 phy_err_mask[2];
+ __le32 tsf_timestamp;
+ __le32 buf_len;
+ u8 buf[0];
+} __packed;
+
#define PHYERR_TLV_SIG 0xBB
#define PHYERR_TLV_TAG_SEARCH_FFT_REPORT 0xFB
#define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY 0xF8
@@ -2613,6 +3278,48 @@ struct wmi_pdev_param_map {
u32 burst_dur;
u32 burst_enable;
u32 cal_period;
+ u32 aggr_burst;
+ u32 rx_decap_mode;
+ u32 smart_antenna_default_antenna;
+ u32 igmpmld_override;
+ u32 igmpmld_tid;
+ u32 antenna_gain;
+ u32 rx_filter;
+ u32 set_mcast_to_ucast_tid;
+ u32 proxy_sta_mode;
+ u32 set_mcast2ucast_mode;
+ u32 set_mcast2ucast_buffer;
+ u32 remove_mcast2ucast_buffer;
+ u32 peer_sta_ps_statechg_enable;
+ u32 igmpmld_ac_override;
+ u32 block_interbss;
+ u32 set_disable_reset_cmdid;
+ u32 set_msdu_ttl_cmdid;
+ u32 set_ppdu_duration_cmdid;
+ u32 txbf_sound_period_cmdid;
+ u32 set_promisc_mode_cmdid;
+ u32 set_burst_mode_cmdid;
+ u32 en_stats;
+ u32 mu_group_policy;
+ u32 noise_detection;
+ u32 noise_threshold;
+ u32 dpd_enable;
+ u32 set_mcast_bcast_echo;
+ u32 atf_strict_sch;
+ u32 atf_sched_duration;
+ u32 ant_plzn;
+ u32 mgmt_retry_limit;
+ u32 sensitivity_level;
+ u32 signed_txpower_2g;
+ u32 signed_txpower_5g;
+ u32 enable_per_tid_amsdu;
+ u32 enable_per_tid_ampdu;
+ u32 cca_threshold;
+ u32 rts_fixed_rate;
+ u32 pdev_reset;
+ u32 wapi_mbssid_offset;
+ u32 arp_srcaddr;
+ u32 arp_dstaddr;
};
#define WMI_PDEV_PARAM_UNSUPPORTED 0
@@ -2828,6 +3535,100 @@ enum wmi_10x_pdev_param {
WMI_10X_PDEV_PARAM_CAL_PERIOD
};
+enum wmi_10_4_pdev_param {
+ WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
+ WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
+ WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
+ WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
+ WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
+ WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
+ WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
+ WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
+ WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
+ WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ WMI_10_4_PDEV_PARAM_LTR_ENABLE,
+ WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
+ WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
+ WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
+ WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ WMI_10_4_PDEV_PARAM_PMF_QOS,
+ WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
+ WMI_10_4_PDEV_PARAM_DCS,
+ WMI_10_4_PDEV_PARAM_ANI_ENABLE,
+ WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
+ WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
+ WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
+ WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
+ WMI_10_4_PDEV_PARAM_PROXY_STA,
+ WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
+ WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
+ WMI_10_4_PDEV_PARAM_AGGR_BURST,
+ WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
+ WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
+ WMI_10_4_PDEV_PARAM_BURST_DUR,
+ WMI_10_4_PDEV_PARAM_BURST_ENABLE,
+ WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+ WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
+ WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
+ WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
+ WMI_10_4_PDEV_PARAM_RX_FILTER,
+ WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
+ WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
+ WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+ WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+ WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+ WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+ WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+ WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
+ WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+ WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+ WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+ WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+ WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+ WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
+ WMI_10_4_PDEV_PARAM_EN_STATS,
+ WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
+ WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
+ WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
+ WMI_10_4_PDEV_PARAM_DPD_ENABLE,
+ WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+ WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
+ WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
+ WMI_10_4_PDEV_PARAM_ANT_PLZN,
+ WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
+ WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
+ WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
+ WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
+ WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+ WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+ WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
+ WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
+ WMI_10_4_PDEV_PARAM_CAL_PERIOD,
+ WMI_10_4_PDEV_PARAM_PDEV_RESET,
+ WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+ WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
+ WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
+};
+
struct wmi_pdev_set_param_cmd {
__le32 param_id;
__le32 param_value;
@@ -3506,6 +4307,22 @@ struct wmi_vdev_param_map {
u32 drop_unencry;
u32 tx_encap_type;
u32 ap_detect_out_of_sync_sleeping_sta_time_secs;
+ u32 rc_num_retries;
+ u32 cabq_maxdur;
+ u32 mfptest_set;
+ u32 rts_fixed_rate;
+ u32 vht_sgimask;
+ u32 vht80_ratemask;
+ u32 early_rx_adjust_enable;
+ u32 early_rx_tgt_bmiss_num;
+ u32 early_rx_bmiss_sample_cycle;
+ u32 early_rx_slop_step;
+ u32 early_rx_init_slop;
+ u32 early_rx_adjust_pause;
+ u32 proxy_sta;
+ u32 meru_vc;
+ u32 rx_decap_type;
+ u32 bw_nss_ratemask;
};
#define WMI_VDEV_PARAM_UNSUPPORTED 0
@@ -3764,11 +4581,85 @@ enum wmi_10x_vdev_param {
WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
};
+enum wmi_10_4_vdev_param {
+ WMI_10_4_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
+ WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
+ WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
+ WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
+ WMI_10_4_VDEV_PARAM_SLOT_TIME,
+ WMI_10_4_VDEV_PARAM_PREAMBLE,
+ WMI_10_4_VDEV_PARAM_SWBA_TIME,
+ WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
+ WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
+ WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
+ WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
+ WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ WMI_10_4_VDEV_PARAM_WDS,
+ WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
+ WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
+ WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
+ WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
+ WMI_10_4_VDEV_PARAM_FEATURE_WMM,
+ WMI_10_4_VDEV_PARAM_CHWIDTH,
+ WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
+ WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
+ WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
+ WMI_10_4_VDEV_PARAM_MGMT_RATE,
+ WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
+ WMI_10_4_VDEV_PARAM_FIXED_RATE,
+ WMI_10_4_VDEV_PARAM_SGI,
+ WMI_10_4_VDEV_PARAM_LDPC,
+ WMI_10_4_VDEV_PARAM_TX_STBC,
+ WMI_10_4_VDEV_PARAM_RX_STBC,
+ WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
+ WMI_10_4_VDEV_PARAM_DEF_KEYID,
+ WMI_10_4_VDEV_PARAM_NSS,
+ WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
+ WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
+ WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
+ WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
+ WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
+ WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
+ WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
+ WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
+ WMI_10_4_VDEV_PARAM_TXBF,
+ WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
+ WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
+ WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
+ WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
+ WMI_10_4_VDEV_PARAM_MFPTEST_SET,
+ WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
+ WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
+ WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+ WMI_10_4_VDEV_PARAM_PROXY_STA,
+ WMI_10_4_VDEV_PARAM_MERU_VC,
+ WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
+ WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+};
+
#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
+#define WMI_TXBF_STS_CAP_OFFSET_LSB 4
+#define WMI_TXBF_STS_CAP_OFFSET_MASK 0xf0
+#define WMI_BF_SOUND_DIM_OFFSET_LSB 8
+#define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00
+
/* slot time long */
#define WMI_VDEV_SLOT_TIME_LONG 0x1
/* slot time short */
@@ -4305,6 +5196,14 @@ struct wmi_tim_info {
__le32 tim_num_ps_pending;
} __packed;
+struct wmi_tim_info_arg {
+ __le32 tim_len;
+ __le32 tim_mcast;
+ const __le32 *tim_bitmap;
+ __le32 tim_changed;
+ __le32 tim_num_ps_pending;
+} __packed;
+
/* Maximum number of NOA Descriptors supported */
#define WMI_P2P_MAX_NOA_DESCRIPTORS 4
#define WMI_P2P_OPPPS_ENABLE_BIT BIT(0)
@@ -4336,6 +5235,47 @@ struct wmi_host_swba_event {
struct wmi_bcn_info bcn_info[0];
} __packed;
+/* 16 words = 512 client + 1 word = for guard */
+#define WMI_10_4_TIM_BITMAP_ARRAY_SIZE 17
+
+struct wmi_10_4_tim_info {
+ __le32 tim_len;
+ __le32 tim_mcast;
+ __le32 tim_bitmap[WMI_10_4_TIM_BITMAP_ARRAY_SIZE];
+ __le32 tim_changed;
+ __le32 tim_num_ps_pending;
+} __packed;
+
+#define WMI_10_4_P2P_MAX_NOA_DESCRIPTORS 1
+
+struct wmi_10_4_p2p_noa_info {
+ /* Bit 0 - Flag to indicate an update in NOA schedule
+ * Bits 7-1 - Reserved
+ */
+ u8 changed;
+ /* NOA index */
+ u8 index;
+ /* Bit 0 - Opp PS state of the AP
+ * Bits 1-7 - Ctwindow in TUs
+ */
+ u8 ctwindow_oppps;
+ /* Number of NOA descriptors */
+ u8 num_descriptors;
+
+ struct wmi_p2p_noa_descriptor
+ noa_descriptors[WMI_10_4_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
+struct wmi_10_4_bcn_info {
+ struct wmi_10_4_tim_info tim_info;
+ struct wmi_10_4_p2p_noa_info p2p_noa_info;
+} __packed;
+
+struct wmi_10_4_host_swba_event {
+ __le32 vdev_map;
+ struct wmi_10_4_bcn_info bcn_info[0];
+} __packed;
+
#define WMI_MAX_AP_VDEV 16
struct wmi_tbtt_offset_event {
@@ -4660,11 +5600,24 @@ struct wmi_chan_info_event {
__le32 cycle_count;
} __packed;
+struct wmi_10_4_chan_info_event {
+ __le32 err_code;
+ __le32 freq;
+ __le32 cmd_flags;
+ __le32 noise_floor;
+ __le32 rx_clear_count;
+ __le32 cycle_count;
+ __le32 chan_tx_pwr_range;
+ __le32 chan_tx_pwr_tp;
+ __le32 rx_frame_count;
+} __packed;
+
struct wmi_peer_sta_kickout_event {
struct wmi_mac_addr peer_macaddr;
} __packed;
#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
+#define WMI_CHAN_INFO_FLAG_PRE_COMPLETE BIT(1)
/* Beacon filter wmi command info */
#define BCN_FLT_MAX_SUPPORTED_IES 256
@@ -4840,6 +5793,9 @@ struct wmi_ch_info_ev_arg {
__le32 noise_floor;
__le32 rx_clear_count;
__le32 cycle_count;
+ __le32 chan_tx_pwr_range;
+ __le32 chan_tx_pwr_tp;
+ __le32 rx_frame_count;
};
struct wmi_vdev_start_ev_arg {
@@ -4855,16 +5811,29 @@ struct wmi_peer_kick_ev_arg {
struct wmi_swba_ev_arg {
__le32 vdev_map;
- const struct wmi_tim_info *tim_info[WMI_MAX_AP_VDEV];
+ struct wmi_tim_info_arg tim_info[WMI_MAX_AP_VDEV];
const struct wmi_p2p_noa_info *noa_info[WMI_MAX_AP_VDEV];
};
struct wmi_phyerr_ev_arg {
- __le32 num_phyerrs;
- __le32 tsf_l32;
- __le32 tsf_u32;
- __le32 buf_len;
- const struct wmi_phyerr *phyerrs;
+ u32 tsf_timestamp;
+ u16 freq1;
+ u16 freq2;
+ u8 rssi_combined;
+ u8 chan_width_mhz;
+ u8 phy_err_code;
+ u16 nf_chains[4];
+ u32 buf_len;
+ const u8 *buf;
+ u8 hdr_len;
+};
+
+struct wmi_phyerr_hdr_arg {
+ u32 num_phyerrs;
+ u32 tsf_l32;
+ u32 tsf_u32;
+ u32 buf_len;
+ const void *phyerrs;
};
struct wmi_svc_rdy_ev_arg {
@@ -5085,6 +6054,12 @@ struct wmi_tdls_peer_capab_arg {
u32 pref_offchan_bw;
};
+enum wmi_txbf_conf {
+ WMI_TXBF_CONF_UNSUPPORTED,
+ WMI_TXBF_CONF_BEFORE_ASSOC,
+ WMI_TXBF_CONF_AFTER_ASSOC,
+};
+
struct ath10k;
struct ath10k_vif;
struct ath10k_fw_stats_pdev;
@@ -5136,9 +6111,9 @@ void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_dfs(struct ath10k *ar,
- const struct wmi_phyerr *phyerr, u64 tsf);
+ struct wmi_phyerr_ev_arg *phyerr, u64 tsf);
void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
- const struct wmi_phyerr *phyerr,
+ struct wmi_phyerr_ev_arg *phyerr,
u64 tsf);
void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb);
@@ -5167,5 +6142,6 @@ void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb);
-
+int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, const void *phyerr_buf,
+ int left_len, struct wmi_phyerr_ev_arg *arg);
#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index a68d8fd853a3..8e02b381990f 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -301,8 +301,26 @@ int ath10k_wow_op_resume(struct ieee80211_hw *hw)
ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
exit:
+ if (ret) {
+ switch (ar->state) {
+ case ATH10K_STATE_ON:
+ ar->state = ATH10K_STATE_RESTARTING;
+ ret = 1;
+ break;
+ case ATH10K_STATE_OFF:
+ case ATH10K_STATE_RESTARTING:
+ case ATH10K_STATE_RESTARTED:
+ case ATH10K_STATE_UTF:
+ case ATH10K_STATE_WEDGED:
+ ath10k_warn(ar, "encountered unexpected device state %d on resume, cannot recover\n",
+ ar->state);
+ ret = -EIO;
+ break;
+ }
+ }
+
mutex_unlock(&ar->conf_mutex);
- return ret ? 1 : 0;
+ return ret;
}
int ath10k_wow_init(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index 2399a3921762..b1278f9f24ba 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -5,7 +5,6 @@ config ATH5K
select MAC80211_LEDS
select LEDS_CLASS
select NEW_LEDS
- select AVERAGE
select ATH5K_AHB if ATH25
select ATH5K_PCI if !ATH25
---help---
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index 5c008757662b..38be2702c0e2 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -223,7 +223,7 @@ static void
ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
bool ofdm_trigger)
{
- int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
+ int rssi = ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg);
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "raise immunity (%s)",
ofdm_trigger ? "ODFM" : "CCK");
@@ -309,7 +309,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
static void
ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
{
- int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
+ int rssi = ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg);
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "lower immunity");
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index e22b0e778927..fa6e89e5c421 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1252,6 +1252,8 @@ struct ath5k_statistics {
#define ATH5K_TXQ_LEN_MAX (ATH_TXBUF / 4) /* bufs per queue */
#define ATH5K_TXQ_LEN_LOW (ATH5K_TXQ_LEN_MAX / 2) /* low mark */
+DECLARE_EWMA(beacon_rssi, 1024, 8)
+
/* Driver state associated with an instance of a device */
struct ath5k_hw {
struct ath_common common;
@@ -1432,7 +1434,7 @@ struct ath5k_hw {
struct ath5k_nfcal_hist ah_nfcal_hist;
/* average beacon RSSI in our BSS (used by ANI) */
- struct ewma ah_beacon_rssi_avg;
+ struct ewma_beacon_rssi ah_beacon_rssi_avg;
/* noise floor from last periodic calibration */
s32 ah_noise_floor;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 23552f43d125..342563a3706f 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1430,7 +1430,7 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
trace_ath5k_rx(ah, skb);
if (ath_is_mybeacon(common, (struct ieee80211_hdr *)skb->data)) {
- ewma_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi);
+ ewma_beacon_rssi_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi);
/* check beacons in IBSS mode */
if (ah->opmode == NL80211_IFTYPE_ADHOC)
@@ -2936,7 +2936,7 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
ah->ah_cal_next_short = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
- ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
+ ewma_beacon_rssi_init(&ah->ah_beacon_rssi_avg);
/* clear survey data and cycle counters */
memset(&ah->survey, 0, sizeof(ah->survey));
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index c70782e8f07b..654a1e33f827 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -722,7 +722,7 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
st->mib_intr);
len += snprintf(buf + len, sizeof(buf) - len,
"beacon RSSI average:\t%d\n",
- (int)ewma_read(&ah->ah_beacon_rssi_avg));
+ (int)ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg));
#define CC_PRINT(_struct, _field) \
_struct._field, \
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
index 14cab1403dd6..112d8a9b8d43 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.h
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -427,7 +427,7 @@ struct htc_endpoint_credit_dist {
};
/*
- * credit distibution code that is passed into the distrbution function,
+ * credit distribution code that is passed into the distribution function,
* there are mandatory and optional codes that must be handled
*/
enum htc_credit_dist_reason {
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index b921005ad7ee..a5e1de75a4a3 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -154,7 +154,7 @@ struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx)
}
/* Performs DIX to 802.3 encapsulation for transmit packets.
- * Assumes the entire DIX header is contigous and that there is
+ * Assumes the entire DIX header is contiguous and that there is
* enough room in the buffer for a 802.3 mac header and LLC+SNAP headers.
*/
int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb)
@@ -449,7 +449,7 @@ int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
/*
* Performs 802.3 to DIX encapsulation for received packets.
- * Assumes the entire 802.3 header is contigous.
+ * Assumes the entire 802.3 header is contiguous.
*/
int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb)
{
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index fc595b92ac56..c5f8bc4b5595 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -455,7 +455,7 @@
#define AR_PHY_MODE (AR_SM_BASE + 0x8)
#define AR_PHY_ACTIVE (AR_SM_BASE + 0xc)
#define AR_PHY_SPUR_MASK_A (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x18 : 0x20))
-#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + 0x24)
+#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x1c : 0x24))
#define AR_PHY_SPECTRAL_SCAN (AR_SM_BASE + 0x28)
#define AR_PHY_RADAR_BW_FILTER (AR_SM_BASE + 0x2c)
#define AR_PHY_SEARCH_START_DELAY (AR_SM_BASE + 0x30)
@@ -495,7 +495,7 @@
#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A 0x3FF
#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A_S 0
-#define AR_PHY_TEST (AR_SM_BASE + 0x160)
+#define AR_PHY_TEST (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x15c : 0x160))
#define AR_PHY_TEST_BBB_OBS_SEL 0x780000
#define AR_PHY_TEST_BBB_OBS_SEL_S 19
@@ -521,24 +521,29 @@
#define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S 29
-#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168)
+#define AR_PHY_TSTDAC (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x164 : 0x168))
-#define AR_PHY_CHAN_STATUS (AR_SM_BASE + 0x16c)
+#define AR_PHY_CHAN_STATUS (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x168 : 0x16c))
#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x16c : 0x170))
#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ 0x00000008
#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ_S 3
-#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + 0x174)
-#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + 0x178)
-#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + 0x17c)
-#define AR_PHY_CHAN_INFO_GAIN_0 (AR_SM_BASE + 0x180)
-#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + 0x190)
-#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + 0x194)
+#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x170 : 0x174))
+#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x174 : 0x178))
+#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x178 : 0x17c))
+#define AR_PHY_CHAN_INFO_GAIN_0 (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x17c : 0x180))
+#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x184 : 0x190))
+#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x188 : 0x194))
#define AR_PHY_HEAVYCLIP_CTL (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x198 : 0x1a4))
#define AR_PHY_HEAVYCLIP_20 (AR_SM_BASE + 0x1a8)
#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac)
+#define AR_PHY_HEAVYCLIP_1 (AR_SM_BASE + 0x19c)
+#define AR_PHY_HEAVYCLIP_2 (AR_SM_BASE + 0x1a0)
+#define AR_PHY_HEAVYCLIP_3 (AR_SM_BASE + 0x1a4)
+#define AR_PHY_HEAVYCLIP_4 (AR_SM_BASE + 0x1a8)
+#define AR_PHY_HEAVYCLIP_5 (AR_SM_BASE + 0x1ac)
#define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0)
#define AR_PHY_POWER_TX_RATE(_d) (AR_SM_BASE + 0x1c0 + ((_d) << 2))
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index a7a81b3969ce..c85c47978e1e 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -172,14 +172,6 @@ struct ath_txq {
struct sk_buff_head complete_q;
};
-struct ath_atx_ac {
- struct ath_txq *txq;
- struct list_head list;
- struct list_head tid_q;
- bool clear_ps_filter;
- bool sched;
-};
-
struct ath_frame_info {
struct ath_buf *bf;
u16 framelen;
@@ -242,7 +234,7 @@ struct ath_atx_tid {
struct sk_buff_head buf_q;
struct sk_buff_head retry_q;
struct ath_node *an;
- struct ath_atx_ac *ac;
+ struct ath_txq *txq;
unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
u16 seq_start;
u16 seq_next;
@@ -252,8 +244,8 @@ struct ath_atx_tid {
int baw_tail; /* next unused tx buffer slot */
s8 bar_index;
- bool sched;
bool active;
+ bool clear_ps_filter;
};
struct ath_node {
@@ -261,7 +253,6 @@ struct ath_node {
struct ieee80211_sta *sta; /* station struct we're part of */
struct ieee80211_vif *vif; /* interface with which we're associated */
struct ath_atx_tid tid[IEEE80211_NUM_TIDS];
- struct ath_atx_ac ac[IEEE80211_NUM_ACS];
u16 maxampdu;
u8 mpdudensity;
@@ -410,6 +401,12 @@ enum ath_offchannel_state {
ATH_OFFCHANNEL_ROC_DONE,
};
+enum ath_roc_complete_reason {
+ ATH_ROC_COMPLETE_EXPIRE,
+ ATH_ROC_COMPLETE_ABORT,
+ ATH_ROC_COMPLETE_CANCEL,
+};
+
struct ath_offchannel {
struct ath_chanctx chan;
struct timer_list timer;
@@ -471,7 +468,8 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
void ath_chanctx_set_next(struct ath_softc *sc, bool force);
void ath_offchannel_next(struct ath_softc *sc);
void ath_scan_complete(struct ath_softc *sc, bool abort);
-void ath_roc_complete(struct ath_softc *sc, bool abort);
+void ath_roc_complete(struct ath_softc *sc,
+ enum ath_roc_complete_reason reason);
struct ath_chanctx* ath_is_go_chanctx_present(struct ath_softc *sc);
#else
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 206665059d66..90f5773a1a61 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -915,18 +915,27 @@ void ath_offchannel_next(struct ath_softc *sc)
}
}
-void ath_roc_complete(struct ath_softc *sc, bool abort)
+void ath_roc_complete(struct ath_softc *sc, enum ath_roc_complete_reason reason)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- if (abort)
+ sc->offchannel.roc_vif = NULL;
+ sc->offchannel.roc_chan = NULL;
+
+ switch (reason) {
+ case ATH_ROC_COMPLETE_ABORT:
ath_dbg(common, CHAN_CTX, "RoC aborted\n");
- else
+ ieee80211_remain_on_channel_expired(sc->hw);
+ break;
+ case ATH_ROC_COMPLETE_EXPIRE:
ath_dbg(common, CHAN_CTX, "RoC expired\n");
+ ieee80211_remain_on_channel_expired(sc->hw);
+ break;
+ case ATH_ROC_COMPLETE_CANCEL:
+ ath_dbg(common, CHAN_CTX, "RoC canceled\n");
+ break;
+ }
- sc->offchannel.roc_vif = NULL;
- sc->offchannel.roc_chan = NULL;
- ieee80211_remain_on_channel_expired(sc->hw);
ath_offchannel_next(sc);
ath9k_ps_restore(sc);
}
@@ -1058,7 +1067,7 @@ static void ath_offchannel_timer(unsigned long data)
case ATH_OFFCHANNEL_ROC_START:
case ATH_OFFCHANNEL_ROC_WAIT:
sc->offchannel.state = ATH_OFFCHANNEL_ROC_DONE;
- ath_roc_complete(sc, false);
+ ath_roc_complete(sc, ATH_ROC_COMPLETE_EXPIRE);
break;
default:
break;
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index dbf8f4959642..da32c8faad94 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -765,6 +765,8 @@ static int read_file_reset(struct seq_file *file, void *data)
[RESET_TYPE_BEACON_STUCK] = "Stuck Beacon",
[RESET_TYPE_MCI] = "MCI Reset",
[RESET_TYPE_CALIBRATION] = "Calibration error",
+ [RESET_TX_DMA_ERROR] = "Tx DMA stop error",
+ [RESET_RX_DMA_ERROR] = "Rx DMA stop error",
};
int i;
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index a8e9319958e6..cd68c5f0e751 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -50,6 +50,8 @@ enum ath_reset_type {
RESET_TYPE_BEACON_STUCK,
RESET_TYPE_MCI,
RESET_TYPE_CALIBRATION,
+ RESET_TX_DMA_ERROR,
+ RESET_RX_DMA_ERROR,
__RESET_TYPE_MAX
};
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
index ffca918ff16a..c2ca57a2ed09 100644
--- a/drivers/net/wireless/ath/ath9k/debug_sta.c
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -26,12 +26,11 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
struct ath_node *an = file->private_data;
struct ath_softc *sc = an->sc;
struct ath_atx_tid *tid;
- struct ath_atx_ac *ac;
struct ath_txq *txq;
u32 len = 0, size = 4096;
char *buf;
size_t retval;
- int tidno, acno;
+ int tidno;
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
@@ -49,26 +48,13 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
an->mpdudensity);
len += scnprintf(buf + len, size - len,
- "%2s%7s\n", "AC", "SCHED");
-
- for (acno = 0, ac = &an->ac[acno];
- acno < IEEE80211_NUM_ACS; acno++, ac++) {
- txq = ac->txq;
- ath_txq_lock(sc, txq);
- len += scnprintf(buf + len, size - len,
- "%2d%7d\n",
- acno, ac->sched);
- ath_txq_unlock(sc, txq);
- }
-
- len += scnprintf(buf + len, size - len,
"\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
"TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
"BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
- txq = tid->ac->txq;
+ txq = tid->txq;
ath_txq_lock(sc, txq);
if (tid->active) {
len += scnprintf(buf + len, size - len,
@@ -80,7 +66,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
tid->baw_head,
tid->baw_tail,
tid->bar_index,
- tid->sched);
+ !list_empty(&tid->list));
}
ath_txq_unlock(sc, txq);
}
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index e98a9eaba7ff..1ece42c2443d 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -30,6 +30,157 @@ struct ath_radar_data {
u8 pulse_length_pri;
};
+/**** begin: CHIRP ************************************************************/
+
+/* min and max gradients for defined FCC chirping pulses, given by
+ * - 20MHz chirp width over a pulse width of 50us
+ * - 5MHz chirp width over a pulse width of 100us
+ */
+static const int BIN_DELTA_MIN = 1;
+static const int BIN_DELTA_MAX = 10;
+
+/* we need at least 3 deltas / 4 samples for a reliable chirp detection */
+#define NUM_DIFFS 3
+static const int FFT_NUM_SAMPLES = (NUM_DIFFS + 1);
+
+/* Threshold for difference of delta peaks */
+static const int MAX_DIFF = 2;
+
+/* width range to be checked for chirping */
+static const int MIN_CHIRP_PULSE_WIDTH = 20;
+static const int MAX_CHIRP_PULSE_WIDTH = 110;
+
+struct ath9k_dfs_fft_20 {
+ u8 bin[28];
+ u8 lower_bins[3];
+} __packed;
+struct ath9k_dfs_fft_40 {
+ u8 bin[64];
+ u8 lower_bins[3];
+ u8 upper_bins[3];
+} __packed;
+
+static inline int fft_max_index(u8 *bins)
+{
+ return (bins[2] & 0xfc) >> 2;
+}
+static inline int fft_max_magnitude(u8 *bins)
+{
+ return (bins[0] & 0xc0) >> 6 | bins[1] << 2 | (bins[2] & 0x03) << 10;
+}
+static inline u8 fft_bitmap_weight(u8 *bins)
+{
+ return bins[0] & 0x3f;
+}
+
+static int ath9k_get_max_index_ht40(struct ath9k_dfs_fft_40 *fft,
+ bool is_ctl, bool is_ext)
+{
+ const int DFS_UPPER_BIN_OFFSET = 64;
+ /* if detected radar on both channels, select the significant one */
+ if (is_ctl && is_ext) {
+ /* first check wether channels have 'strong' bins */
+ is_ctl = fft_bitmap_weight(fft->lower_bins) != 0;
+ is_ext = fft_bitmap_weight(fft->upper_bins) != 0;
+
+ /* if still unclear, take higher magnitude */
+ if (is_ctl && is_ext) {
+ int mag_lower = fft_max_magnitude(fft->lower_bins);
+ int mag_upper = fft_max_magnitude(fft->upper_bins);
+ if (mag_upper > mag_lower)
+ is_ctl = false;
+ else
+ is_ext = false;
+ }
+ }
+ if (is_ctl)
+ return fft_max_index(fft->lower_bins);
+ return fft_max_index(fft->upper_bins) + DFS_UPPER_BIN_OFFSET;
+}
+static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data,
+ int datalen, bool is_ctl, bool is_ext)
+{
+ int i;
+ int max_bin[FFT_NUM_SAMPLES];
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int prev_delta;
+
+ if (IS_CHAN_HT40(ah->curchan)) {
+ struct ath9k_dfs_fft_40 *fft = (struct ath9k_dfs_fft_40 *) data;
+ int num_fft_packets = datalen / sizeof(*fft);
+ if (num_fft_packets == 0)
+ return false;
+
+ ath_dbg(common, DFS, "HT40: datalen=%d, num_fft_packets=%d\n",
+ datalen, num_fft_packets);
+ if (num_fft_packets < (FFT_NUM_SAMPLES)) {
+ ath_dbg(common, DFS, "not enough packets for chirp\n");
+ return false;
+ }
+ /* HW sometimes adds 2 garbage bytes in front of FFT samples */
+ if ((datalen % sizeof(*fft)) == 2) {
+ fft = (struct ath9k_dfs_fft_40 *) (data + 2);
+ ath_dbg(common, DFS, "fixing datalen by 2\n");
+ }
+ if (IS_CHAN_HT40MINUS(ah->curchan)) {
+ int temp = is_ctl;
+ is_ctl = is_ext;
+ is_ext = temp;
+ }
+ for (i = 0; i < FFT_NUM_SAMPLES; i++)
+ max_bin[i] = ath9k_get_max_index_ht40(fft + i, is_ctl,
+ is_ext);
+ } else {
+ struct ath9k_dfs_fft_20 *fft = (struct ath9k_dfs_fft_20 *) data;
+ int num_fft_packets = datalen / sizeof(*fft);
+ if (num_fft_packets == 0)
+ return false;
+ ath_dbg(common, DFS, "HT20: datalen=%d, num_fft_packets=%d\n",
+ datalen, num_fft_packets);
+ if (num_fft_packets < (FFT_NUM_SAMPLES)) {
+ ath_dbg(common, DFS, "not enough packets for chirp\n");
+ return false;
+ }
+ /* in ht20, this is a 6-bit signed number => shift it to 0 */
+ for (i = 0; i < FFT_NUM_SAMPLES; i++)
+ max_bin[i] = fft_max_index(fft[i].lower_bins) ^ 0x20;
+ }
+ ath_dbg(common, DFS, "bin_max = [%d, %d, %d, %d]\n",
+ max_bin[0], max_bin[1], max_bin[2], max_bin[3]);
+
+ /* Check for chirp attributes within specs
+ * a) delta of adjacent max_bins is within range
+ * b) delta of adjacent deltas are within tolerance
+ */
+ prev_delta = 0;
+ for (i = 0; i < NUM_DIFFS; i++) {
+ int ddelta = -1;
+ int delta = max_bin[i + 1] - max_bin[i];
+
+ /* ensure gradient is within valid range */
+ if (abs(delta) < BIN_DELTA_MIN || abs(delta) > BIN_DELTA_MAX) {
+ ath_dbg(common, DFS, "CHIRP: invalid delta %d "
+ "in sample %d\n", delta, i);
+ return false;
+ }
+ if (i == 0)
+ goto done;
+ ddelta = delta - prev_delta;
+ if (abs(ddelta) > MAX_DIFF) {
+ ath_dbg(common, DFS, "CHIRP: ddelta %d too high\n",
+ ddelta);
+ return false;
+ }
+done:
+ ath_dbg(common, DFS, "CHIRP - %d: delta=%d, ddelta=%d\n",
+ i, delta, ddelta);
+ prev_delta = delta;
+ }
+ return true;
+}
+/**** end: CHIRP **************************************************************/
+
/* convert pulse duration to usecs, considering clock mode */
static u32 dur_to_usecs(struct ath_hw *ah, u32 dur)
{
@@ -113,12 +264,6 @@ ath9k_postprocess_radar_event(struct ath_softc *sc,
return false;
}
- /*
- * TODO: check chirping pulses
- * checks for chirping are dependent on the DFS regulatory domain
- * used, which is yet TBD
- */
-
/* convert duration to usecs */
pe->width = dur_to_usecs(sc->sc_ah, dur);
pe->rssi = rssi;
@@ -190,6 +335,16 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
if (!ath9k_postprocess_radar_event(sc, &ard, &pe))
return;
+ if (pe.width > MIN_CHIRP_PULSE_WIDTH &&
+ pe.width < MAX_CHIRP_PULSE_WIDTH) {
+ bool is_ctl = !!(ard.pulse_bw_info & PRI_CH_RADAR_FOUND);
+ bool is_ext = !!(ard.pulse_bw_info & EXT_CH_RADAR_FOUND);
+ int clen = datalen - 3;
+ pe.chirp = ath9k_check_chirping(sc, data, clen, is_ctl, is_ext);
+ } else {
+ pe.chirp = false;
+ }
+
ath_dbg(common, DFS,
"ath9k_dfs_process_phyerr: type=%d, freq=%d, ts=%llu, "
"width=%d, rssi=%d, delta_ts=%llu\n",
@@ -198,7 +353,8 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
sc->dfs_prev_pulse_ts = pe.ts;
if (ard.pulse_bw_info & PRI_CH_RADAR_FOUND)
ath9k_dfs_process_radar_pulse(sc, &pe);
- if (ard.pulse_bw_info & EXT_CH_RADAR_FOUND) {
+ if (IS_CHAN_HT40(ah->curchan) &&
+ ard.pulse_bw_info & EXT_CH_RADAR_FOUND) {
pe.freq += IS_CHAN_HT40PLUS(ah->curchan) ? 20 : -20;
ath9k_dfs_process_radar_pulse(sc, &pe);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 39eaf9b6e9b4..1e84882f8c5b 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -74,7 +74,7 @@ static struct ath_ps_ops ath9k_htc_ps_ops = {
static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv)
{
- int time_left;
+ unsigned long time_left;
if (atomic_read(&priv->htc->tgt_ready) > 0) {
atomic_dec(&priv->htc->tgt_ready);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index dab1323dfec7..172a9ff4aaab 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -794,8 +794,11 @@ void ath9k_htc_ani_work(struct work_struct *work)
common->ani.longcal_timer = timestamp;
}
- /* Short calibration applies only while caldone is false */
- if (!common->ani.caldone) {
+ /*
+ * Short calibration applies only while caldone
+ * is false or -ETIMEDOUT
+ */
+ if (common->ani.caldone <= 0) {
if ((timestamp - common->ani.shortcal_timer) >=
short_cal_interval) {
shortcal = true;
@@ -844,7 +847,11 @@ set_timer:
*/
cal_interval = ATH_LONG_CALINTERVAL;
cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
- if (!common->ani.caldone)
+ /*
+ * Short calibration applies only while caldone
+ * is false or -ETIMEDOUT
+ */
+ if (common->ani.caldone <= 0)
cal_interval = min(cal_interval, (u32)short_cal_interval);
ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index d2408da38c1c..2294709ee8b0 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -146,7 +146,8 @@ static int htc_config_pipe_credits(struct htc_target *target)
{
struct sk_buff *skb;
struct htc_config_pipe_msg *cp_msg;
- int ret, time_left;
+ int ret;
+ unsigned long time_left;
skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
if (!skb) {
@@ -184,7 +185,8 @@ static int htc_setup_complete(struct htc_target *target)
{
struct sk_buff *skb;
struct htc_comp_msg *comp_msg;
- int ret = 0, time_left;
+ int ret = 0;
+ unsigned long time_left;
skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
if (!skb) {
@@ -236,7 +238,8 @@ int htc_connect_service(struct htc_target *target,
struct sk_buff *skb;
struct htc_endpoint *endpoint;
struct htc_conn_svc_msg *conn_msg;
- int ret, time_left;
+ int ret;
+ unsigned long time_left;
/* Find an available endpoint */
endpoint = get_next_avail_ep(target->endpoint);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 5e15e8e10ed3..1dd0339de372 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -279,6 +279,7 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
return;
case AR9300_DEVID_QCA956X:
ah->hw_version.macVersion = AR_SREV_VERSION_9561;
+ return;
}
val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
@@ -3185,6 +3186,7 @@ static struct {
{ AR_SREV_VERSION_9550, "9550" },
{ AR_SREV_VERSION_9565, "9565" },
{ AR_SREV_VERSION_9531, "9531" },
+ { AR_SREV_VERSION_9561, "9561" },
};
/* For devices with external radios */
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index eff0e5325e6a..57f95f2dca5b 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -736,13 +736,14 @@ static const struct ieee80211_iface_limit if_limits_multi[] = {
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) },
{ .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
};
static const struct ieee80211_iface_combination if_comb_multi[] = {
{
.limits = if_limits_multi,
.n_limits = ARRAY_SIZE(if_limits_multi),
- .max_interfaces = 2,
+ .max_interfaces = 3,
.num_different_channels = 2,
.beacon_int_infra_match = true,
},
@@ -826,6 +827,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, RX_INCLUDES_FCS);
ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
if (ath9k_ps_enable)
ieee80211_hw_set(hw, SUPPORTS_PS);
@@ -855,6 +857,10 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_WDS);
+ if (ath9k_is_chanctx_enabled())
+ hw->wiphy->interface_modes |=
+ BIT(NL80211_IFTYPE_P2P_DEVICE);
+
hw->wiphy->iface_combinations = if_comb;
hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
}
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 90631d768a60..5ad0feeebc86 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -172,7 +172,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_tx_control txctl;
- int time_left;
+ unsigned long time_left;
memset(&txctl, 0, sizeof(txctl));
txctl.txq = sc->tx.txq_map[IEEE80211_AC_BE];
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index cfd45cb8ccfc..c27143ba9ffb 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1459,13 +1459,18 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct ath_softc *sc = hw->priv;
+ struct ath_chanctx *ctx;
u32 rfilt;
changed_flags &= SUPPORTED_FILTERS;
*total_flags &= SUPPORTED_FILTERS;
spin_lock_bh(&sc->chan_lock);
- sc->cur_chan->rxfilter = *total_flags;
+ ath_for_each_chanctx(sc, ctx)
+ ctx->rxfilter = *total_flags;
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+ sc->offchannel.chan.rxfilter = *total_flags;
+#endif
spin_unlock_bh(&sc->chan_lock);
ath9k_ps_wakeup(sc);
@@ -2246,7 +2251,7 @@ static void ath9k_cancel_pending_offchannel(struct ath_softc *sc)
del_timer_sync(&sc->offchannel.timer);
if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
- ath_roc_complete(sc, true);
+ ath_roc_complete(sc, ATH_ROC_COMPLETE_ABORT);
}
if (test_bit(ATH_OP_SCANNING, &common->op_flags)) {
@@ -2355,7 +2360,7 @@ static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw)
if (sc->offchannel.roc_vif) {
if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
- ath_roc_complete(sc, true);
+ ath_roc_complete(sc, ATH_ROC_COMPLETE_CANCEL);
}
mutex_unlock(&sc->mutex);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 6c75fb1ab77d..d3189daf9996 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -491,10 +491,9 @@ bool ath_stoprecv(struct ath_softc *sc)
if (!(ah->ah_flags & AH_UNPLUGGED) &&
unlikely(!stopped)) {
- ath_err(ath9k_hw_common(sc->sc_ah),
- "Could not stop RX, we could be "
- "confusing the DMA engine when we start RX up\n");
- ATH_DBG_WARN_ON_ONCE(!stopped);
+ ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
+ "Failed to stop Rx DMA\n");
+ RESET_STAT_INC(sc, RESET_RX_DMA_ERROR);
}
return stopped && !reset;
}
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index ca533b4321bd..9c16e2a6d185 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -299,7 +299,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
sizeof(struct wmi_cmd_hdr);
struct sk_buff *skb;
u8 *data;
- int time_left, ret = 0;
+ unsigned long time_left;
+ int ret = 0;
if (ah->ah_flags & AH_UNPLUGGED)
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 3ad79bb4f2c2..3e3dac3d7060 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -106,7 +106,6 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid)
{
- struct ath_atx_ac *ac = tid->ac;
struct list_head *list;
struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
struct ath_chanctx *ctx = avp->chanctx;
@@ -114,19 +113,9 @@ static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
if (!ctx)
return;
- if (tid->sched)
- return;
-
- tid->sched = true;
- list_add_tail(&tid->list, &ac->tid_q);
-
- if (ac->sched)
- return;
-
- ac->sched = true;
-
list = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
- list_add_tail(&ac->list, list);
+ if (list_empty(&tid->list))
+ list_add_tail(&tid->list, list);
}
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
@@ -208,7 +197,7 @@ static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
static void
ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
{
- struct ath_txq *txq = tid->ac->txq;
+ struct ath_txq *txq = tid->txq;
struct ieee80211_tx_info *tx_info;
struct sk_buff *skb, *tskb;
struct ath_buf *bf;
@@ -237,7 +226,7 @@ ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
- struct ath_txq *txq = tid->ac->txq;
+ struct ath_txq *txq = tid->txq;
struct sk_buff *skb;
struct ath_buf *bf;
struct list_head bf_head;
@@ -644,7 +633,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_queue_tid(sc, txq, tid);
if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
- tid->ac->clear_ps_filter = true;
+ tid->clear_ps_filter = true;
}
}
@@ -734,7 +723,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
struct ieee80211_tx_rate *rates;
u32 max_4ms_framelen, frmlen;
u16 aggr_limit, bt_aggr_limit, legacy = 0;
- int q = tid->ac->txq->mac80211_qnum;
+ int q = tid->txq->mac80211_qnum;
int i;
skb = bf->bf_mpdu;
@@ -1471,8 +1460,8 @@ static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (list_empty(&bf_q))
return false;
- if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
- tid->ac->clear_ps_filter = false;
+ if (tid->clear_ps_filter || tid->an->no_ps_filter) {
+ tid->clear_ps_filter = false;
tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
}
@@ -1491,7 +1480,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
an = (struct ath_node *)sta->drv_priv;
txtid = ATH_AN_2_TID(an, tid);
- txq = txtid->ac->txq;
+ txq = txtid->txq;
ath_txq_lock(sc, txq);
@@ -1525,7 +1514,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
{
struct ath_node *an = (struct ath_node *)sta->drv_priv;
struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
- struct ath_txq *txq = txtid->ac->txq;
+ struct ath_txq *txq = txtid->txq;
ath_txq_lock(sc, txq);
txtid->active = false;
@@ -1538,7 +1527,6 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
struct ath_node *an)
{
struct ath_atx_tid *tid;
- struct ath_atx_ac *ac;
struct ath_txq *txq;
bool buffered;
int tidno;
@@ -1546,25 +1534,18 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
- ac = tid->ac;
- txq = ac->txq;
+ txq = tid->txq;
ath_txq_lock(sc, txq);
- if (!tid->sched) {
+ if (list_empty(&tid->list)) {
ath_txq_unlock(sc, txq);
continue;
}
buffered = ath_tid_has_buffered(tid);
- tid->sched = false;
- list_del(&tid->list);
-
- if (ac->sched) {
- ac->sched = false;
- list_del(&ac->list);
- }
+ list_del_init(&tid->list);
ath_txq_unlock(sc, txq);
@@ -1575,18 +1556,16 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
{
struct ath_atx_tid *tid;
- struct ath_atx_ac *ac;
struct ath_txq *txq;
int tidno;
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
- ac = tid->ac;
- txq = ac->txq;
+ txq = tid->txq;
ath_txq_lock(sc, txq);
- ac->clear_ps_filter = true;
+ tid->clear_ps_filter = true;
if (ath_tid_has_buffered(tid)) {
ath_tx_queue_tid(sc, txq, tid);
@@ -1606,7 +1585,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
an = (struct ath_node *)sta->drv_priv;
tid = ATH_AN_2_TID(an, tidno);
- txq = tid->ac->txq;
+ txq = tid->txq;
ath_txq_lock(sc, txq);
@@ -1645,7 +1624,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
tid = ATH_AN_2_TID(an, i);
- ath_txq_lock(sc, tid->ac->txq);
+ ath_txq_lock(sc, tid->txq);
while (nframes > 0) {
bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
if (!bf)
@@ -1669,7 +1648,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
if (an->sta && !ath_tid_has_buffered(tid))
ieee80211_sta_set_buffered(an->sta, i, false);
}
- ath_txq_unlock_complete(sc, tid->ac->txq);
+ ath_txq_unlock_complete(sc, tid->txq);
}
if (list_empty(&bf_q))
@@ -1883,8 +1862,11 @@ bool ath_drain_all_txq(struct ath_softc *sc)
npend |= BIT(i);
}
- if (npend)
- ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
+ if (npend) {
+ RESET_STAT_INC(sc, RESET_TX_DMA_ERROR);
+ ath_dbg(common, RESET,
+ "Failed to stop TX DMA, queues=0x%03x!\n", npend);
+ }
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
if (!ATH_TXQ_SETUP(sc, i))
@@ -1915,9 +1897,8 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_atx_ac *ac, *last_ac;
struct ath_atx_tid *tid, *last_tid;
- struct list_head *ac_list;
+ struct list_head *tid_list;
bool sent = false;
if (txq->mac80211_qnum < 0)
@@ -1927,63 +1908,45 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
return;
spin_lock_bh(&sc->chan_lock);
- ac_list = &sc->cur_chan->acq[txq->mac80211_qnum];
+ tid_list = &sc->cur_chan->acq[txq->mac80211_qnum];
- if (list_empty(ac_list)) {
+ if (list_empty(tid_list)) {
spin_unlock_bh(&sc->chan_lock);
return;
}
rcu_read_lock();
- last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list);
- while (!list_empty(ac_list)) {
+ last_tid = list_entry(tid_list->prev, struct ath_atx_tid, list);
+ while (!list_empty(tid_list)) {
bool stop = false;
if (sc->cur_chan->stopped)
break;
- ac = list_first_entry(ac_list, struct ath_atx_ac, list);
- last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
- list_del(&ac->list);
- ac->sched = false;
-
- while (!list_empty(&ac->tid_q)) {
-
- tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
- list);
- list_del(&tid->list);
- tid->sched = false;
-
- if (ath_tx_sched_aggr(sc, txq, tid, &stop))
- sent = true;
-
- /*
- * add tid to round-robin queue if more frames
- * are pending for the tid
- */
- if (ath_tid_has_buffered(tid))
- ath_tx_queue_tid(sc, txq, tid);
+ tid = list_first_entry(tid_list, struct ath_atx_tid, list);
+ list_del_init(&tid->list);
- if (stop || tid == last_tid)
- break;
- }
+ if (ath_tx_sched_aggr(sc, txq, tid, &stop))
+ sent = true;
- if (!list_empty(&ac->tid_q) && !ac->sched) {
- ac->sched = true;
- list_add_tail(&ac->list, ac_list);
- }
+ /*
+ * add tid to round-robin queue if more frames
+ * are pending for the tid
+ */
+ if (ath_tid_has_buffered(tid))
+ ath_tx_queue_tid(sc, txq, tid);
if (stop)
break;
- if (ac == last_ac) {
+ if (tid == last_tid) {
if (!sent)
break;
sent = false;
- last_ac = list_entry(ac_list->prev,
- struct ath_atx_ac, list);
+ last_tid = list_entry(tid_list->prev,
+ struct ath_atx_tid, list);
}
}
@@ -2373,10 +2336,10 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
txq = sc->tx.uapsdq;
ath_txq_lock(sc, txq);
} else if (txctl->an && queue) {
- WARN_ON(tid->ac->txq != txctl->txq);
+ WARN_ON(tid->txq != txctl->txq);
if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
- tid->ac->clear_ps_filter = true;
+ tid->clear_ps_filter = true;
/*
* Add this frame to software queue for scheduling later
@@ -2470,8 +2433,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
bf = list_first_entry(&bf_q, struct ath_buf, list);
hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
- if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
- hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
+ if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) {
+ hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
sizeof(*hdr), DMA_TO_DEVICE);
}
@@ -2870,7 +2833,6 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
{
struct ath_atx_tid *tid;
- struct ath_atx_ac *ac;
int tidno, acno;
for (tidno = 0, tid = &an->tid[tidno];
@@ -2881,26 +2843,18 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
tid->seq_start = tid->seq_next = 0;
tid->baw_size = WME_MAX_BA;
tid->baw_head = tid->baw_tail = 0;
- tid->sched = false;
tid->active = false;
+ tid->clear_ps_filter = true;
__skb_queue_head_init(&tid->buf_q);
__skb_queue_head_init(&tid->retry_q);
+ INIT_LIST_HEAD(&tid->list);
acno = TID_TO_WME_AC(tidno);
- tid->ac = &an->ac[acno];
- }
-
- for (acno = 0, ac = &an->ac[acno];
- acno < IEEE80211_NUM_ACS; acno++, ac++) {
- ac->sched = false;
- ac->clear_ps_filter = true;
- ac->txq = sc->tx.txq_map[acno];
- INIT_LIST_HEAD(&ac->tid_q);
+ tid->txq = sc->tx.txq_map[acno];
}
}
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
{
- struct ath_atx_ac *ac;
struct ath_atx_tid *tid;
struct ath_txq *txq;
int tidno;
@@ -2908,20 +2862,12 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
- ac = tid->ac;
- txq = ac->txq;
+ txq = tid->txq;
ath_txq_lock(sc, txq);
- if (tid->sched) {
- list_del(&tid->list);
- tid->sched = false;
- }
-
- if (ac->sched) {
- list_del(&ac->list);
- tid->ac->sched = false;
- }
+ if (!list_empty(&tid->list))
+ list_del_init(&tid->list);
ath_tid_drain(sc, txq, tid);
tid->active = false;
diff --git a/drivers/net/wireless/ath/debug.c b/drivers/net/wireless/ath/debug.c
index 508eccf5d982..d59d83e0ce4b 100644
--- a/drivers/net/wireless/ath/debug.c
+++ b/drivers/net/wireless/ath/debug.c
@@ -40,6 +40,8 @@ const char *ath_opmode_to_string(enum nl80211_iftype opmode)
return "P2P-CLIENT";
case NL80211_IFTYPE_P2P_GO:
return "P2P-GO";
+ case NL80211_IFTYPE_OCB:
+ return "OCB";
default:
return "UNKNOWN";
}
diff --git a/drivers/net/wireless/ath/dfs_pri_detector.c b/drivers/net/wireless/ath/dfs_pri_detector.c
index 1b5ad1965607..cc5c592fc4c0 100644
--- a/drivers/net/wireless/ath/dfs_pri_detector.c
+++ b/drivers/net/wireless/ath/dfs_pri_detector.c
@@ -273,7 +273,7 @@ static bool pseq_handler_create_sequences(struct pri_detector *pde,
tmp_false_count++;
}
}
- if (ps.count < min_count)
+ if (ps.count <= min_count)
/* did not reach minimum count, drop sequence */
continue;
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 050506f842e9..64b432625fbb 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -12,6 +12,7 @@ wil6210-y += debug.o
wil6210-y += rx_reorder.o
wil6210-y += ioctl.o
wil6210-y += fw.o
+wil6210-y += pm.o
wil6210-y += pmc.o
wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
wil6210-y += wil_platform.o
diff --git a/drivers/net/wireless/ath/wil6210/boot_loader.h b/drivers/net/wireless/ath/wil6210/boot_loader.h
new file mode 100644
index 000000000000..c131b5e1292f
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/boot_loader.h
@@ -0,0 +1,61 @@
+/* Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* This file contains the definitions for the boot loader
+ * for the Qualcomm "Sparrow" 60 Gigabit wireless solution.
+ */
+#ifndef BOOT_LOADER_EXPORT_H_
+#define BOOT_LOADER_EXPORT_H_
+
+struct bl_dedicated_registers_v1 {
+ __le32 boot_loader_ready; /* 0x880A3C driver will poll
+ * this Dword until BL will
+ * set it to 1 (initial value
+ * should be 0)
+ */
+ __le32 boot_loader_struct_version; /* 0x880A40 BL struct ver. */
+ __le16 rf_type; /* 0x880A44 connected RF ID */
+ __le16 rf_status; /* 0x880A46 RF status,
+ * 0 is OK else error
+ */
+ __le32 baseband_type; /* 0x880A48 board type ID */
+ u8 mac_address[6]; /* 0x880A4c BL mac address */
+ u8 bl_version_major; /* 0x880A52 BL ver. major */
+ u8 bl_version_minor; /* 0x880A53 BL ver. minor */
+ __le16 bl_version_subminor; /* 0x880A54 BL ver. subminor */
+ __le16 bl_version_build; /* 0x880A56 BL ver. build */
+ /* valid only for version 2 and above */
+ __le32 bl_assert_code; /* 0x880A58 BL Assert code */
+ __le32 bl_assert_blink; /* 0x880A5C BL Assert Branch */
+ __le32 bl_reserved[22]; /* 0x880A60 - 0x880AB4 */
+ __le32 bl_magic_number; /* 0x880AB8 BL Magic number */
+} __packed;
+
+/* the following struct is the version 0 struct */
+
+struct bl_dedicated_registers_v0 {
+ __le32 boot_loader_ready; /* 0x880A3C driver will poll
+ * this Dword until BL will
+ * set it to 1 (initial value
+ * should be 0)
+ */
+#define BL_READY (1) /* ready indication */
+ __le32 boot_loader_struct_version; /* 0x880A40 BL struct ver. */
+ __le32 rf_type; /* 0x880A44 connected RF ID */
+ __le32 baseband_type; /* 0x880A48 board type ID */
+ u8 mac_address[6]; /* 0x880A4c BL mac address */
+} __packed;
+
+#endif /* BOOT_LOADER_EXPORT_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index c79cfe02ec80..20d07ef679e8 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -336,12 +336,9 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
else
wil_dbg_misc(wil, "Scan has no IE's\n");
- rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len,
- request->ie);
- if (rc) {
- wil_err(wil, "Aborting scan, set_ie failed: %d\n", rc);
+ rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie);
+ if (rc)
goto out;
- }
rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
@@ -462,10 +459,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
* ies in FW.
*/
rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
- if (rc) {
- wil_err(wil, "WMI_SET_APPIE_CMD failed\n");
+ if (rc)
goto out;
- }
/* WMI_CONNECT_CMD */
memset(&conn, 0, sizeof(conn));
@@ -722,17 +717,98 @@ static int wil_fix_bcon(struct wil6210_priv *wil,
{
struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
- int rc = 0;
if (bcon->probe_resp_len <= hlen)
return 0;
+/* always use IE's from full probe frame, they has more info
+ * notable RSN
+ */
+ bcon->proberesp_ies = f->u.probe_resp.variable;
+ bcon->proberesp_ies_len = bcon->probe_resp_len - hlen;
if (!bcon->assocresp_ies) {
- bcon->assocresp_ies = f->u.probe_resp.variable;
- bcon->assocresp_ies_len = bcon->probe_resp_len - hlen;
- rc = 1;
+ bcon->assocresp_ies = bcon->proberesp_ies;
+ bcon->assocresp_ies_len = bcon->proberesp_ies_len;
}
+ return 1;
+}
+
+/* internal functions for device reset and starting AP */
+static int _wil_cfg80211_set_ies(struct wiphy *wiphy,
+ struct cfg80211_beacon_data *bcon)
+{
+ int rc;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
+ bcon->proberesp_ies);
+ if (rc)
+ return rc;
+
+ rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
+ bcon->assocresp_ies);
+#if 0 /* to use beacon IE's, remove this #if 0 */
+ if (rc)
+ return rc;
+
+ rc = wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->tail_len, bcon->tail);
+#endif
+
+ return rc;
+}
+
+static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
+ struct net_device *ndev,
+ const u8 *ssid, size_t ssid_len, u32 privacy,
+ int bi, u8 chan,
+ struct cfg80211_beacon_data *bcon,
+ u8 hidden_ssid)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+ wil_set_recovery_state(wil, fw_recovery_idle);
+
+ mutex_lock(&wil->mutex);
+
+ __wil_down(wil);
+ rc = __wil_up(wil);
+ if (rc)
+ goto out;
+
+ rc = wmi_set_ssid(wil, ssid_len, ssid);
+ if (rc)
+ goto out;
+
+ rc = _wil_cfg80211_set_ies(wiphy, bcon);
+ if (rc)
+ goto out;
+
+ wil->privacy = privacy;
+ wil->channel = chan;
+ wil->hidden_ssid = hidden_ssid;
+
+ netif_carrier_on(ndev);
+
+ rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid);
+ if (rc)
+ goto err_pcp_start;
+
+ rc = wil_bcast_init(wil);
+ if (rc)
+ goto err_bcast;
+
+ goto out; /* success */
+
+err_bcast:
+ wmi_pcp_stop(wil);
+err_pcp_start:
+ netif_carrier_off(ndev);
+out:
+ mutex_unlock(&wil->mutex);
return rc;
}
@@ -741,63 +817,50 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
struct cfg80211_beacon_data *bcon)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
- size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
- const u8 *pr_ies = NULL;
- size_t pr_ies_len = 0;
int rc;
+ u32 privacy = 0;
wil_dbg_misc(wil, "%s()\n", __func__);
wil_print_bcon_data(bcon);
- if (bcon->probe_resp_len > hlen) {
- pr_ies = f->u.probe_resp.variable;
- pr_ies_len = bcon->probe_resp_len - hlen;
- }
-
if (wil_fix_bcon(wil, bcon)) {
wil_dbg_misc(wil, "Fixed bcon\n");
wil_print_bcon_data(bcon);
}
- /* FW do not form regular beacon, so bcon IE's are not set
- * For the DMG bcon, when it will be supported, bcon IE's will
- * be reused; add something like:
- * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
- * bcon->beacon_ies);
- */
- rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
- if (rc) {
- wil_err(wil, "set_ie(PROBE_RESP) failed\n");
- return rc;
- }
+ if (bcon->proberesp_ies &&
+ cfg80211_find_ie(WLAN_EID_RSN, bcon->proberesp_ies,
+ bcon->proberesp_ies_len))
+ privacy = 1;
- rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP,
- bcon->assocresp_ies_len,
- bcon->assocresp_ies);
- if (rc) {
- wil_err(wil, "set_ie(ASSOC_RESP) failed\n");
- return rc;
+ /* in case privacy has changed, need to restart the AP */
+ if (wil->privacy != privacy) {
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+
+ wil_dbg_misc(wil, "privacy changed %d=>%d. Restarting AP\n",
+ wil->privacy, privacy);
+
+ rc = _wil_cfg80211_start_ap(wiphy, ndev, wdev->ssid,
+ wdev->ssid_len, privacy,
+ wdev->beacon_interval,
+ wil->channel, bcon,
+ wil->hidden_ssid);
+ } else {
+ rc = _wil_cfg80211_set_ies(wiphy, bcon);
}
- return 0;
+ return rc;
}
static int wil_cfg80211_start_ap(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_ap_settings *info)
{
- int rc = 0;
+ int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- struct wireless_dev *wdev = ndev->ieee80211_ptr;
struct ieee80211_channel *channel = info->chandef.chan;
struct cfg80211_beacon_data *bcon = &info->beacon;
struct cfg80211_crypto_settings *crypto = &info->crypto;
- u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
- struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
- size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
- const u8 *pr_ies = NULL;
- size_t pr_ies_len = 0;
u8 hidden_ssid;
wil_dbg_misc(wil, "%s()\n", __func__);
@@ -807,6 +870,23 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
return -EINVAL;
}
+ switch (info->hidden_ssid) {
+ case NL80211_HIDDEN_SSID_NOT_IN_USE:
+ hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
+ break;
+
+ case NL80211_HIDDEN_SSID_ZERO_LEN:
+ hidden_ssid = WMI_HIDDEN_SSID_SEND_EMPTY;
+ break;
+
+ case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
+ hidden_ssid = WMI_HIDDEN_SSID_CLEAR;
+ break;
+
+ default:
+ wil_err(wil, "AP: Invalid hidden SSID %d\n", info->hidden_ssid);
+ return -EOPNOTSUPP;
+ }
wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
channel->center_freq, info->privacy ? "secure" : "open");
wil_dbg_misc(wil, "Privacy: %d auth_type %d\n",
@@ -820,80 +900,16 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
wil_print_bcon_data(bcon);
wil_print_crypto(wil, crypto);
- if (bcon->probe_resp_len > hlen) {
- pr_ies = f->u.probe_resp.variable;
- pr_ies_len = bcon->probe_resp_len - hlen;
- }
-
if (wil_fix_bcon(wil, bcon)) {
wil_dbg_misc(wil, "Fixed bcon\n");
wil_print_bcon_data(bcon);
}
- wil_set_recovery_state(wil, fw_recovery_idle);
-
- mutex_lock(&wil->mutex);
-
- __wil_down(wil);
- rc = __wil_up(wil);
- if (rc)
- goto out;
-
- rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
- if (rc)
- goto out;
-
- /* IE's */
- /* bcon 'head IE's are not relevant for 60g band */
- /*
- * FW do not form regular beacon, so bcon IE's are not set
- * For the DMG bcon, when it will be supported, bcon IE's will
- * be reused; add something like:
- * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
- * bcon->beacon_ies);
- */
- wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
- wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
- bcon->assocresp_ies);
-
- wil->privacy = info->privacy;
-
- switch (info->hidden_ssid) {
- case NL80211_HIDDEN_SSID_NOT_IN_USE:
- hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
- break;
-
- case NL80211_HIDDEN_SSID_ZERO_LEN:
- hidden_ssid = WMI_HIDDEN_SSID_SEND_EMPTY;
- break;
-
- case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
- hidden_ssid = WMI_HIDDEN_SSID_CLEAR;
- break;
-
- default:
- rc = -EOPNOTSUPP;
- goto out;
- }
-
- netif_carrier_on(ndev);
-
- rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
- channel->hw_value, hidden_ssid);
- if (rc)
- goto err_pcp_start;
+ rc = _wil_cfg80211_start_ap(wiphy, ndev,
+ info->ssid, info->ssid_len, info->privacy,
+ info->beacon_interval, channel->hw_value,
+ bcon, hidden_ssid);
- rc = wil_bcast_init(wil);
- if (rc)
- goto err_bcast;
-
- goto out; /* success */
-err_bcast:
- wmi_pcp_stop(wil);
-err_pcp_start:
- netif_carrier_off(ndev);
-out:
- mutex_unlock(&wil->mutex);
return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 75219a1b8805..d1a1e160ef31 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -62,7 +62,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
seq_printf(s, " swhead = %d\n", vring->swhead);
seq_printf(s, " hwtail = [0x%08x] -> ", vring->hwtail);
if (x) {
- v = ioread32(x);
+ v = readl(x);
seq_printf(s, "0x%08x = %d\n", v, v);
} else {
seq_puts(s, "???\n");
@@ -156,6 +156,12 @@ static const struct file_operations fops_vring = {
.llseek = seq_lseek,
};
+static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
+ const char *prefix)
+{
+ seq_hex_dump(s, prefix, DUMP_PREFIX_NONE, 16, 1, p, len, false);
+}
+
static void wil_print_ring(struct seq_file *s, const char *prefix,
void __iomem *off)
{
@@ -212,8 +218,6 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
le16_to_cpu(hdr.seq), len,
le16_to_cpu(hdr.type), hdr.flags);
if (len <= MAX_MBOXITEM_SIZE) {
- int n = 0;
- char printbuf[16 * 3 + 2];
unsigned char databuf[MAX_MBOXITEM_SIZE];
void __iomem *src = wmi_buffer(wil, d.addr) +
sizeof(struct wil6210_mbox_hdr);
@@ -223,16 +227,7 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
* reading header
*/
wil_memcpy_fromio_32(databuf, src, len);
- while (n < len) {
- int l = min(len - n, 16);
-
- hex_dump_to_buffer(databuf + n, l,
- 16, 1, printbuf,
- sizeof(printbuf),
- false);
- seq_printf(s, " : %s\n", printbuf);
- n += l;
- }
+ wil_seq_hexdump(s, databuf, len, " : ");
}
} else {
seq_puts(s, "\n");
@@ -268,7 +263,7 @@ static const struct file_operations fops_mbox = {
static int wil_debugfs_iomem_x32_set(void *data, u64 val)
{
- iowrite32(val, (void __iomem *)data);
+ writel(val, (void __iomem *)data);
wmb(); /* make sure write propagated to HW */
return 0;
@@ -276,7 +271,7 @@ static int wil_debugfs_iomem_x32_set(void *data, u64 val)
static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
{
- *val = ioread32((void __iomem *)data);
+ *val = readl((void __iomem *)data);
return 0;
}
@@ -306,7 +301,7 @@ static int wil_debugfs_ulong_get(void *data, u64 *val)
}
DEFINE_SIMPLE_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
- wil_debugfs_ulong_set, "%llu\n");
+ wil_debugfs_ulong_set, "0x%llx\n");
static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode,
struct dentry *parent,
@@ -477,7 +472,7 @@ static int wil_memread_debugfs_show(struct seq_file *s, void *data)
void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
if (a)
- seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, ioread32(a));
+ seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, readl(a));
else
seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
@@ -867,22 +862,6 @@ static const struct file_operations fops_wmi = {
.open = simple_open,
};
-static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
- const char *prefix)
-{
- char printbuf[16 * 3 + 2];
- int i = 0;
-
- while (i < len) {
- int l = min(len - i, 16);
-
- hex_dump_to_buffer(p + i, l, 16, 1, printbuf,
- sizeof(printbuf), false);
- seq_printf(s, "%s%s\n", prefix, printbuf);
- i += l;
- }
-}
-
static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
{
int i = 0;
@@ -1344,6 +1323,7 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
{
int i;
u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size;
+ unsigned long long drop_dup = r->drop_dup, drop_old = r->drop_old;
seq_printf(s, "([%2d] %3d TU) 0x%03x [", r->buf_size, r->timeout,
r->head_seq_num);
@@ -1353,7 +1333,10 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
else
seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_');
}
- seq_printf(s, "] last drop 0x%03x\n", r->ssn_last_drop);
+ seq_printf(s,
+ "] total %llu drop %llu (dup %llu + old %llu) last 0x%03x\n",
+ r->total, drop_dup + drop_old, drop_dup, drop_old,
+ r->ssn_last_drop);
}
static int wil_sta_debugfs_show(struct seq_file *s, void *data)
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index 0ea695ff98ad..7053b62ca8d3 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -50,19 +50,13 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
wil_dbg_misc(wil, "%s()\n", __func__);
- tx_itr_en = ioread32(wil->csr +
- HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL));
+ tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
- tx_itr_val =
- ioread32(wil->csr +
- HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH));
+ tx_itr_val = wil_r(wil, RGF_DMA_ITR_TX_CNT_TRSH);
- rx_itr_en = ioread32(wil->csr +
- HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL));
+ rx_itr_en = wil_r(wil, RGF_DMA_ITR_RX_CNT_CTL);
if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
- rx_itr_val =
- ioread32(wil->csr +
- HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH));
+ rx_itr_val = wil_r(wil, RGF_DMA_ITR_RX_CNT_TRSH);
cp->tx_coalesce_usecs = tx_itr_val;
cp->rx_coalesce_usecs = rx_itr_val;
diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c
index 4428345e5a47..82aae2d705b4 100644
--- a/drivers/net/wireless/ath/wil6210/fw.c
+++ b/drivers/net/wireless/ath/wil6210/fw.c
@@ -22,16 +22,6 @@
MODULE_FIRMWARE(WIL_FW_NAME);
MODULE_FIRMWARE(WIL_FW2_NAME);
-/* target operations */
-/* register read */
-#define R(a) ioread32(wil->csr + HOSTADDR(a))
-/* register write. wmb() to make sure it is completed */
-#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
-/* register set = read, OR, write */
-#define S(a, v) W(a, R(a) | v)
-/* register clear = read, AND with inverted, write */
-#define C(a, v) W(a, R(a) & ~v)
-
static
void wil_memset_toio_32(volatile void __iomem *dst, u32 val,
size_t count)
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index 157f5ef384e0..d30657ee7e83 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -221,12 +221,12 @@ static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data,
FW_ADDR_CHECK(dst, block[i].addr, "address");
- x = ioread32(dst);
+ x = readl(dst);
y = (x & m) | (v & ~m);
wil_dbg_fw(wil, "write [0x%08x] <== 0x%08x "
"(old 0x%08x val 0x%08x mask 0x%08x)\n",
le32_to_cpu(block[i].addr), y, x, v, m);
- iowrite32(y, dst);
+ writel(y, dst);
wmb(); /* finish before processing next record */
}
@@ -239,18 +239,18 @@ static int gw_write(struct wil6210_priv *wil, void __iomem *gwa_addr,
{
unsigned delay = 0;
- iowrite32(a, gwa_addr);
- iowrite32(gw_cmd, gwa_cmd);
+ writel(a, gwa_addr);
+ writel(gw_cmd, gwa_cmd);
wmb(); /* finish before activate gw */
- iowrite32(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */
+ writel(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */
do {
udelay(1); /* typical time is few usec */
if (delay++ > 100) {
wil_err_fw(wil, "gw timeout\n");
return -EINVAL;
}
- } while (ioread32(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */
+ } while (readl(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */
return 0;
}
@@ -305,7 +305,7 @@ static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data,
wil_dbg_fw(wil, " gw write[%3d] [0x%08x] <== 0x%08x\n",
i, a, v);
- iowrite32(v, gwa_val);
+ writel(v, gwa_val);
rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
if (rc)
return rc;
@@ -372,7 +372,7 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
sizeof(v), false);
for (k = 0; k < ARRAY_SIZE(block->value); k++)
- iowrite32(v[k], gwa_val[k]);
+ writel(v[k], gwa_val[k]);
rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
if (rc)
return rc;
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 28ffc18466c4..a371f036d054 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -61,13 +61,13 @@ static inline void wil_icr_clear(u32 x, void __iomem *addr)
static inline void wil_icr_clear(u32 x, void __iomem *addr)
{
- iowrite32(x, addr);
+ writel(x, addr);
}
#endif /* defined(CONFIG_WIL6210_ISR_COR) */
static inline u32 wil_ioread32_and_clear(void __iomem *addr)
{
- u32 x = ioread32(addr);
+ u32 x = readl(addr);
wil_icr_clear(x, addr);
@@ -76,54 +76,47 @@ static inline u32 wil_ioread32_and_clear(void __iomem *addr)
static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
{
- iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
- HOSTADDR(RGF_DMA_EP_TX_ICR) +
- offsetof(struct RGF_ICR, IMS));
+ wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMS),
+ WIL6210_IRQ_DISABLE);
}
static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
{
- iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
- HOSTADDR(RGF_DMA_EP_RX_ICR) +
- offsetof(struct RGF_ICR, IMS));
+ wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS),
+ WIL6210_IRQ_DISABLE);
}
static void wil6210_mask_irq_misc(struct wil6210_priv *wil)
{
- iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
- HOSTADDR(RGF_DMA_EP_MISC_ICR) +
- offsetof(struct RGF_ICR, IMS));
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
+ WIL6210_IRQ_DISABLE);
}
static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "%s()\n", __func__);
- iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
- HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+ wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_DISABLE);
clear_bit(wil_status_irqen, wil->status);
}
void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
{
- iowrite32(WIL6210_IMC_TX, wil->csr +
- HOSTADDR(RGF_DMA_EP_TX_ICR) +
- offsetof(struct RGF_ICR, IMC));
+ wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMC),
+ WIL6210_IMC_TX);
}
void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
{
- iowrite32(WIL6210_IMC_RX, wil->csr +
- HOSTADDR(RGF_DMA_EP_RX_ICR) +
- offsetof(struct RGF_ICR, IMC));
+ wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMC),
+ WIL6210_IMC_RX);
}
static void wil6210_unmask_irq_misc(struct wil6210_priv *wil)
{
- iowrite32(WIL6210_IMC_MISC, wil->csr +
- HOSTADDR(RGF_DMA_EP_MISC_ICR) +
- offsetof(struct RGF_ICR, IMC));
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
+ WIL6210_IMC_MISC);
}
static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
@@ -132,8 +125,7 @@ static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
set_bit(wil_status_irqen, wil->status);
- iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr +
- HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+ wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_PSEUDO_MASK);
}
void wil_mask_irq(struct wil6210_priv *wil)
@@ -150,12 +142,12 @@ void wil_unmask_irq(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "%s()\n", __func__);
- iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
- offsetof(struct RGF_ICR, ICC));
- iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
- offsetof(struct RGF_ICR, ICC));
- iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
- offsetof(struct RGF_ICR, ICC));
+ wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
+ wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
wil6210_unmask_irq_pseudo(wil);
wil6210_unmask_irq_tx(wil);
@@ -163,9 +155,6 @@ void wil_unmask_irq(struct wil6210_priv *wil)
wil6210_unmask_irq_misc(wil);
}
-/* target write operation */
-#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
-
void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "%s()\n", __func__);
@@ -177,44 +166,42 @@ void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
return;
/* Disable and clear tx counter before (re)configuration */
- W(RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
- W(RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
+ wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
+ wil_w(wil, RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
wil_info(wil, "set ITR_TX_CNT_TRSH = %d usec\n",
wil->tx_max_burst_duration);
/* Configure TX max burst duration timer to use usec units */
- W(RGF_DMA_ITR_TX_CNT_CTL,
- BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
+ wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL,
+ BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
/* Disable and clear tx idle counter before (re)configuration */
- W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
- W(RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
+ wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
+ wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
wil_info(wil, "set ITR_TX_IDL_CNT_TRSH = %d usec\n",
wil->tx_interframe_timeout);
/* Configure TX max burst duration timer to use usec units */
- W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
- BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
+ wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
+ BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
/* Disable and clear rx counter before (re)configuration */
- W(RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
- W(RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
+ wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
+ wil_w(wil, RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
wil_info(wil, "set ITR_RX_CNT_TRSH = %d usec\n",
wil->rx_max_burst_duration);
/* Configure TX max burst duration timer to use usec units */
- W(RGF_DMA_ITR_RX_CNT_CTL,
- BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
+ wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL,
+ BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
/* Disable and clear rx idle counter before (re)configuration */
- W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
- W(RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
+ wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
+ wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
wil_info(wil, "set ITR_RX_IDL_CNT_TRSH = %d usec\n",
wil->rx_interframe_timeout);
/* Configure TX max burst duration timer to use usec units */
- W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
- BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
+ wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
+ BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
}
-#undef W
-
static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
@@ -452,27 +439,24 @@ static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
u32 icr_rx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, ICR));
- u32 imv_rx = ioread32(wil->csr +
- HOSTADDR(RGF_DMA_EP_RX_ICR) +
- offsetof(struct RGF_ICR, IMV));
+ u32 imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
+ offsetof(struct RGF_ICR, IMV));
u32 icm_tx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICM));
u32 icr_tx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
- u32 imv_tx = ioread32(wil->csr +
- HOSTADDR(RGF_DMA_EP_TX_ICR) +
- offsetof(struct RGF_ICR, IMV));
+ u32 imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
+ offsetof(struct RGF_ICR, IMV));
u32 icm_misc = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICM));
u32 icr_misc = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICR));
- u32 imv_misc = ioread32(wil->csr +
- HOSTADDR(RGF_DMA_EP_MISC_ICR) +
- offsetof(struct RGF_ICR, IMV));
+ u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
+ offsetof(struct RGF_ICR, IMV));
wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
"Rx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
"Tx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
@@ -492,7 +476,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
{
irqreturn_t rc = IRQ_HANDLED;
struct wil6210_priv *wil = cookie;
- u32 pseudo_cause = ioread32(wil->csr + HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
+ u32 pseudo_cause = wil_r(wil, RGF_DMA_PSEUDO_CAUSE);
/**
* pseudo_cause is Clear-On-Read, no need to ACK
@@ -541,48 +525,12 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
return rc;
}
-static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
-{
- int rc;
- /*
- * IRQ's are in the following order:
- * - Tx
- * - Rx
- * - Misc
- */
-
- rc = request_irq(irq, wil6210_irq_tx, IRQF_SHARED,
- WIL_NAME"_tx", wil);
- if (rc)
- return rc;
-
- rc = request_irq(irq + 1, wil6210_irq_rx, IRQF_SHARED,
- WIL_NAME"_rx", wil);
- if (rc)
- goto free0;
-
- rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
- wil6210_irq_misc_thread,
- IRQF_SHARED, WIL_NAME"_misc", wil);
- if (rc)
- goto free1;
-
- return 0;
- /* error branch */
-free1:
- free_irq(irq + 1, wil);
-free0:
- free_irq(irq, wil);
-
- return rc;
-}
-
/* can't use wil_ioread32_and_clear because ICC value is not set yet */
static inline void wil_clear32(void __iomem *addr)
{
- u32 x = ioread32(addr);
+ u32 x = readl(addr);
- iowrite32(x, addr);
+ writel(x, addr);
}
void wil6210_clear_irq(struct wil6210_priv *wil)
@@ -596,19 +544,16 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
wmb(); /* make sure write completed */
}
-int wil6210_init_irq(struct wil6210_priv *wil, int irq)
+int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
{
int rc;
- wil_dbg_misc(wil, "%s() n_msi=%d\n", __func__, wil->n_msi);
+ wil_dbg_misc(wil, "%s(%s)\n", __func__, use_msi ? "MSI" : "INTx");
- if (wil->n_msi == 3)
- rc = wil6210_request_3msi(wil, irq);
- else
- rc = request_threaded_irq(irq, wil6210_hardirq,
- wil6210_thread_irq,
- wil->n_msi ? 0 : IRQF_SHARED,
- WIL_NAME, wil);
+ rc = request_threaded_irq(irq, wil6210_hardirq,
+ wil6210_thread_irq,
+ use_msi ? 0 : IRQF_SHARED,
+ WIL_NAME, wil);
return rc;
}
@@ -618,8 +563,4 @@ void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
wil_mask_irq(wil);
free_irq(irq, wil);
- if (wil->n_msi == 3) {
- free_irq(irq + 1, wil);
- free_irq(irq + 2, wil);
- }
}
diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c
index e9c0673819c6..f7f948621951 100644
--- a/drivers/net/wireless/ath/wil6210/ioctl.c
+++ b/drivers/net/wireless/ath/wil6210/ioctl.c
@@ -76,11 +76,11 @@ static int wil_ioc_memio_dword(struct wil6210_priv *wil, void __user *data)
/* operation */
switch (io.op & wil_mmio_op_mask) {
case wil_mmio_read:
- io.val = ioread32(a);
+ io.val = readl(a);
need_copy = true;
break;
case wil_mmio_write:
- iowrite32(io.val, a);
+ writel(io.val, a);
wmb(); /* make sure write propagated to HW */
break;
default:
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 6ca6193ab8a6..2fb04c51da53 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -21,6 +21,7 @@
#include "wil6210.h"
#include "txrx.h"
#include "wmi.h"
+#include "boot_loader.h"
#define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000
#define WAIT_FOR_DISCONNECT_INTERVAL_MS 10
@@ -270,8 +271,7 @@ static void wil_scan_timer_fn(ulong x)
clear_bit(wil_status_fwready, wil->status);
wil_err(wil, "Scan timeout detected, start fw error recovery\n");
- wil->recovery_state = fw_recovery_pending;
- schedule_work(&wil->fw_error_worker);
+ wil_fw_error_recovery(wil);
}
static int wil_wait_for_recovery(struct wil6210_priv *wil)
@@ -528,26 +528,16 @@ void wil_priv_deinit(struct wil6210_priv *wil)
destroy_workqueue(wil->wmi_wq);
}
-/* target operations */
-/* register read */
-#define R(a) ioread32(wil->csr + HOSTADDR(a))
-/* register write. wmb() to make sure it is completed */
-#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
-/* register set = read, OR, write */
-#define S(a, v) W(a, R(a) | v)
-/* register clear = read, AND with inverted, write */
-#define C(a, v) W(a, R(a) & ~v)
-
static inline void wil_halt_cpu(struct wil6210_priv *wil)
{
- W(RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
- W(RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST);
+ wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
+ wil_w(wil, RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST);
}
static inline void wil_release_cpu(struct wil6210_priv *wil)
{
/* Start CPU */
- W(RGF_USER_USER_CPU_0, 1);
+ wil_w(wil, RGF_USER_USER_CPU_0, 1);
}
static int wil_target_reset(struct wil6210_priv *wil)
@@ -558,56 +548,60 @@ static int wil_target_reset(struct wil6210_priv *wil)
wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
/* Clear MAC link up */
- S(RGF_HP_CTRL, BIT(15));
- S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
- S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
+ wil_s(wil, RGF_HP_CTRL, BIT(15));
+ wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
+ wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
wil_halt_cpu(wil);
/* clear all boot loader "ready" bits */
- W(RGF_USER_BL + offsetof(struct RGF_BL, ready), 0);
+ wil_w(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0, boot_loader_ready), 0);
/* Clear Fw Download notification */
- C(RGF_USER_USAGE_6, BIT(0));
+ wil_c(wil, RGF_USER_USAGE_6, BIT(0));
- S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
+ wil_s(wil, RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
/* XTAL stabilization should take about 3ms */
usleep_range(5000, 7000);
- x = R(RGF_CAF_PLL_LOCK_STATUS);
+ x = wil_r(wil, RGF_CAF_PLL_LOCK_STATUS);
if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) {
wil_err(wil, "Xtal stabilization timeout\n"
"RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x);
return -ETIME;
}
/* switch 10k to XTAL*/
- C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
+ wil_c(wil, RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
/* 40 MHz */
- C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
+ wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
- W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
- W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
+ wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
+ wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
- W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
- W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
+ wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
+ wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); /* reset A2 PCIE AHB */
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
+ /* reset A2 PCIE AHB */
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
/* wait until device ready. typical time is 20..80 msec */
do {
msleep(RST_DELAY);
- x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready));
+ x = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0,
+ boot_loader_ready));
if (x1 != x) {
wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x);
x1 = x;
@@ -617,13 +611,13 @@ static int wil_target_reset(struct wil6210_priv *wil)
x);
return -ETIME;
}
- } while (x != BIT_BL_READY);
+ } while (x != BL_READY);
- C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+ wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
/* enable fix for HW bug related to the SA/DA swap in AP Rx */
- S(RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
- BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
+ wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
+ BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
return 0;
@@ -641,29 +635,93 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
static int wil_get_bl_info(struct wil6210_priv *wil)
{
struct net_device *ndev = wil_to_ndev(wil);
- struct RGF_BL bl;
-
- wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL), sizeof(bl));
- le32_to_cpus(&bl.ready);
- le32_to_cpus(&bl.version);
- le32_to_cpus(&bl.rf_type);
- le32_to_cpus(&bl.baseband_type);
+ union {
+ struct bl_dedicated_registers_v0 bl0;
+ struct bl_dedicated_registers_v1 bl1;
+ } bl;
+ u32 bl_ver;
+ u8 *mac;
+ u16 rf_status;
+
+ wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL),
+ sizeof(bl));
+ bl_ver = le32_to_cpu(bl.bl0.boot_loader_struct_version);
+ mac = bl.bl0.mac_address;
+
+ if (bl_ver == 0) {
+ le32_to_cpus(&bl.bl0.rf_type);
+ le32_to_cpus(&bl.bl0.baseband_type);
+ rf_status = 0; /* actually, unknown */
+ wil_info(wil,
+ "Boot Loader struct v%d: MAC = %pM RF = 0x%08x bband = 0x%08x\n",
+ bl_ver, mac,
+ bl.bl0.rf_type, bl.bl0.baseband_type);
+ wil_info(wil, "Boot Loader build unknown for struct v0\n");
+ } else {
+ le16_to_cpus(&bl.bl1.rf_type);
+ rf_status = le16_to_cpu(bl.bl1.rf_status);
+ le32_to_cpus(&bl.bl1.baseband_type);
+ le16_to_cpus(&bl.bl1.bl_version_subminor);
+ le16_to_cpus(&bl.bl1.bl_version_build);
+ wil_info(wil,
+ "Boot Loader struct v%d: MAC = %pM RF = 0x%04x (status 0x%04x) bband = 0x%08x\n",
+ bl_ver, mac,
+ bl.bl1.rf_type, rf_status,
+ bl.bl1.baseband_type);
+ wil_info(wil, "Boot Loader build %d.%d.%d.%d\n",
+ bl.bl1.bl_version_major, bl.bl1.bl_version_minor,
+ bl.bl1.bl_version_subminor, bl.bl1.bl_version_build);
+ }
- if (!is_valid_ether_addr(bl.mac_address)) {
- wil_err(wil, "BL: Invalid MAC %pM\n", bl.mac_address);
+ if (!is_valid_ether_addr(mac)) {
+ wil_err(wil, "BL: Invalid MAC %pM\n", mac);
return -EINVAL;
}
- ether_addr_copy(ndev->perm_addr, bl.mac_address);
+ ether_addr_copy(ndev->perm_addr, mac);
if (!is_valid_ether_addr(ndev->dev_addr))
- ether_addr_copy(ndev->dev_addr, bl.mac_address);
- wil_info(wil,
- "Boot Loader: ver = %d MAC = %pM RF = 0x%08x bband = 0x%08x\n",
- bl.version, bl.mac_address, bl.rf_type, bl.baseband_type);
+ ether_addr_copy(ndev->dev_addr, mac);
+
+ if (rf_status) {/* bad RF cable? */
+ wil_err(wil, "RF communication error 0x%04x",
+ rf_status);
+ return -EAGAIN;
+ }
return 0;
}
+static void wil_bl_crash_info(struct wil6210_priv *wil, bool is_err)
+{
+ u32 bl_assert_code, bl_assert_blink, bl_magic_number;
+ u32 bl_ver = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0,
+ boot_loader_struct_version));
+
+ if (bl_ver < 2)
+ return;
+
+ bl_assert_code = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_assert_code));
+ bl_assert_blink = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_assert_blink));
+ bl_magic_number = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_magic_number));
+
+ if (is_err) {
+ wil_err(wil,
+ "BL assert code 0x%08x blink 0x%08x magic 0x%08x\n",
+ bl_assert_code, bl_assert_blink, bl_magic_number);
+ } else {
+ wil_dbg_misc(wil,
+ "BL assert code 0x%08x blink 0x%08x magic 0x%08x\n",
+ bl_assert_code, bl_assert_blink, bl_magic_number);
+ }
+}
+
static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
{
ulong to = msecs_to_jiffies(1000);
@@ -690,9 +748,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
wil_dbg_misc(wil, "%s()\n", __func__);
- if (wil->hw_version == HW_VER_UNKNOWN)
- return -ENODEV;
-
WARN_ON(!mutex_is_locked(&wil->mutex));
WARN_ON(test_bit(wil_status_napi_en, wil->status));
@@ -707,6 +762,9 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return 0;
}
+ if (wil->hw_version == HW_VER_UNKNOWN)
+ return -ENODEV;
+
cancel_work_sync(&wil->disconnect_worker);
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
wil_bcast_fini(wil);
@@ -729,12 +787,17 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
flush_workqueue(wil->wq_service);
flush_workqueue(wil->wmi_wq);
+ wil_bl_crash_info(wil, false);
rc = wil_target_reset(wil);
wil_rx_fini(wil);
- if (rc)
+ if (rc) {
+ wil_bl_crash_info(wil, true);
return rc;
+ }
rc = wil_get_bl_info(wil);
+ if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */
+ rc = 0;
if (rc)
return rc;
@@ -752,7 +815,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return rc;
/* Mark FW as loaded from host */
- S(RGF_USER_USAGE_6, 1);
+ wil_s(wil, RGF_USER_USAGE_6, 1);
/* clear any interrupts which on-card-firmware
* may have set
@@ -760,8 +823,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
wil6210_clear_irq(wil);
/* CAF_ICR - clear and mask */
/* it is W1C, clear by writing back same value */
- S(RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
- W(RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
+ wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
+ wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
wil_release_cpu(wil);
}
@@ -785,11 +848,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return rc;
}
-#undef R
-#undef W
-#undef S
-#undef C
-
void wil_fw_error_recovery(struct wil6210_priv *wil)
{
wil_dbg_misc(wil, "starting fw error recovery\n");
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 8ef18ace110f..e3b3c8fb4605 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -173,7 +173,10 @@ void *wil_if_alloc(struct device *dev)
wil_set_ethtoolops(ndev);
ndev->ieee80211_ptr = wdev;
ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
- NETIF_F_SG | NETIF_F_GRO;
+ NETIF_F_SG | NETIF_F_GRO |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_RXHASH;
+
ndev->features |= ndev->hw_features;
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
wdev->netdev = ndev;
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index aa3ecc607ca3..feff1ef10fb3 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -21,16 +21,14 @@
#include "wil6210.h"
-static int use_msi = 1;
-module_param(use_msi, int, S_IRUGO);
-MODULE_PARM_DESC(use_msi,
- " Use MSI interrupt: "
- "0 - don't, 1 - (default) - single, or 3");
+static bool use_msi = true;
+module_param(use_msi, bool, S_IRUGO);
+MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
static
void wil_set_capabilities(struct wil6210_priv *wil)
{
- u32 rev_id = ioread32(wil->csr + HOSTADDR(RGF_USER_JTAG_DEV_ID));
+ u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
bitmap_zero(wil->hw_capabilities, hw_capability_last);
@@ -50,24 +48,12 @@ void wil_set_capabilities(struct wil6210_priv *wil)
void wil_disable_irq(struct wil6210_priv *wil)
{
- int irq = wil->pdev->irq;
-
- disable_irq(irq);
- if (wil->n_msi == 3) {
- disable_irq(irq + 1);
- disable_irq(irq + 2);
- }
+ disable_irq(wil->pdev->irq);
}
void wil_enable_irq(struct wil6210_priv *wil)
{
- int irq = wil->pdev->irq;
-
- enable_irq(irq);
- if (wil->n_msi == 3) {
- enable_irq(irq + 1);
- enable_irq(irq + 2);
- }
+ enable_irq(wil->pdev->irq);
}
/* Bus ops */
@@ -80,6 +66,7 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
* and only MSI should be used
*/
int msi_only = pdev->msi_enabled;
+ bool _use_msi = use_msi;
wil_dbg_misc(wil, "%s()\n", __func__);
@@ -87,41 +74,20 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
pci_set_master(pdev);
- /*
- * how many MSI interrupts to request?
- */
- switch (use_msi) {
- case 3:
- case 1:
- wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
- break;
- case 0:
- wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
- break;
- default:
- wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi);
- use_msi = 1;
- }
-
- if (use_msi == 3 && pci_enable_msi_range(pdev, 3, 3) < 0) {
- wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
- use_msi = 1;
- }
+ wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx");
- if (use_msi == 1 && pci_enable_msi(pdev)) {
+ if (use_msi && pci_enable_msi(pdev)) {
wil_err(wil, "pci_enable_msi failed, use INTx\n");
- use_msi = 0;
+ _use_msi = false;
}
- wil->n_msi = use_msi;
-
- if ((wil->n_msi == 0) && msi_only) {
+ if (!_use_msi && msi_only) {
wil_err(wil, "Interrupt pin not routed, unable to use INTx\n");
rc = -ENODEV;
goto stop_master;
}
- rc = wil6210_init_irq(wil, pdev->irq);
+ rc = wil6210_init_irq(wil, pdev->irq, _use_msi);
if (rc)
goto stop_master;
@@ -293,11 +259,80 @@ static const struct pci_device_id wil6210_pcie_ids[] = {
};
MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
+#ifdef CONFIG_PM
+
+static int wil6210_suspend(struct device *dev, bool is_runtime)
+{
+ int rc = 0;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+ wil_dbg_pm(wil, "%s(%s)\n", __func__,
+ is_runtime ? "runtime" : "system");
+
+ rc = wil_can_suspend(wil, is_runtime);
+ if (rc)
+ goto out;
+
+ rc = wil_suspend(wil, is_runtime);
+ if (rc)
+ goto out;
+
+ /* TODO: how do I bring card in low power state? */
+
+ /* disable bus mastering */
+ pci_clear_master(pdev);
+ /* PCI will call pci_save_state(pdev) and pci_prepare_to_sleep(pdev) */
+
+out:
+ return rc;
+}
+
+static int wil6210_resume(struct device *dev, bool is_runtime)
+{
+ int rc = 0;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+ wil_dbg_pm(wil, "%s(%s)\n", __func__,
+ is_runtime ? "runtime" : "system");
+
+ /* allow master */
+ pci_set_master(pdev);
+
+ rc = wil_resume(wil, is_runtime);
+ if (rc)
+ pci_clear_master(pdev);
+
+ return rc;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int wil6210_pm_suspend(struct device *dev)
+{
+ return wil6210_suspend(dev, false);
+}
+
+static int wil6210_pm_resume(struct device *dev)
+{
+ return wil6210_resume(dev, false);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops wil6210_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
+};
+
static struct pci_driver wil6210_driver = {
.probe = wil_pcie_probe,
.remove = wil_pcie_remove,
.id_table = wil6210_pcie_ids,
.name = WIL_NAME,
+ .driver = {
+ .pm = &wil6210_pm_ops,
+ },
};
static int __init wil6210_driver_init(void)
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
new file mode 100644
index 000000000000..0b7ecbcac19c
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "wil6210.h"
+
+int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
+{
+ int rc = 0;
+ struct wireless_dev *wdev = wil->wdev;
+
+ wil_dbg_pm(wil, "%s(%s)\n", __func__,
+ is_runtime ? "runtime" : "system");
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ break;
+ /* AP-like interface - can't suspend */
+ default:
+ wil_dbg_pm(wil, "AP-like interface\n");
+ rc = -EBUSY;
+ break;
+ }
+
+ wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__,
+ is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
+
+ return rc;
+}
+
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+{
+ int rc = 0;
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ wil_dbg_pm(wil, "%s(%s)\n", __func__,
+ is_runtime ? "runtime" : "system");
+
+ /* if netif up, hardware is alive, shut it down */
+ if (ndev->flags & IFF_UP) {
+ rc = wil_down(wil);
+ if (rc) {
+ wil_err(wil, "wil_down : %d\n", rc);
+ goto out;
+ }
+ }
+
+ if (wil->platform_ops.suspend)
+ rc = wil->platform_ops.suspend(wil->platform_handle);
+
+out:
+ wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
+ is_runtime ? "runtime" : "system", rc);
+ return rc;
+}
+
+int wil_resume(struct wil6210_priv *wil, bool is_runtime)
+{
+ int rc = 0;
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ wil_dbg_pm(wil, "%s(%s)\n", __func__,
+ is_runtime ? "runtime" : "system");
+
+ if (wil->platform_ops.resume) {
+ rc = wil->platform_ops.resume(wil->platform_handle);
+ if (rc) {
+ wil_err(wil, "platform_ops.resume : %d\n", rc);
+ goto out;
+ }
+ }
+
+ /* if netif up, bring hardware up
+ * During open(), IFF_UP set after actual device method
+ * invocation. This prevent recursive call to wil_up()
+ */
+ if (ndev->flags & IFF_UP)
+ rc = wil_up(wil);
+
+out:
+ wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
+ is_runtime ? "runtime" : "system", rc);
+ return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index ca10dcf0986e..9238c1ac23dd 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -121,6 +121,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
goto out;
}
+ r->total++;
hseq = r->head_seq_num;
/** Due to the race between WMI events, where BACK establishment
@@ -153,6 +154,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
/* frame with out of date sequence number */
if (seq_less(seq, r->head_seq_num)) {
r->ssn_last_drop = seq;
+ r->drop_old++;
+ wil_dbg_txrx(wil, "Rx drop: old seq 0x%03x head 0x%03x\n",
+ seq, r->head_seq_num);
dev_kfree_skb(skb);
goto out;
}
@@ -173,6 +177,8 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
/* check if we already stored this frame */
if (r->reorder_buf[index]) {
+ r->drop_dup++;
+ wil_dbg_txrx(wil, "Rx drop: dup seq 0x%03x\n", seq);
dev_kfree_skb(skb);
goto out;
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index aa20af86e1d6..6229110d558a 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -509,7 +509,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
break;
}
}
- iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail));
+ wil_w(wil, v->hwtail, v->swtail);
return rc;
}
@@ -541,6 +541,14 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
[GRO_DROP] = "GRO_DROP",
};
+ if (ndev->features & NETIF_F_RXHASH)
+ /* fake L4 to ensure it won't be re-calculated later
+ * set hash to any non-zero value to activate rps
+ * mechanism, core will be chosen according
+ * to user-level rps configuration.
+ */
+ skb_set_hash(skb, 1, PKT_HASH_TYPE_L4);
+
skb_orphan(skb);
if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
@@ -1058,14 +1066,52 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
static inline
void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
{
- d->mac.d[2] |= ((nr_frags + 1) <<
- MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+ d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
}
-static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
- struct vring_tx_desc *d,
- struct sk_buff *skb)
+/**
+ * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
+ * @skb is used to obtain the protocol and headers length.
+ * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
+ * 2 - middle, 3 - last descriptor.
+ */
+
+static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
+ struct sk_buff *skb,
+ int tso_desc_type, bool is_ipv4,
+ int tcp_hdr_len, int skb_net_hdr_len)
{
+ d->dma.b11 = ETH_HLEN; /* MAC header length */
+ d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
+
+ d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
+ /* L4 header len: TCP header length */
+ d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
+
+ /* Setup TSO: bit and desc type */
+ d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
+ (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
+ d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
+
+ d->dma.ip_length = skb_net_hdr_len;
+ /* Enable TCP/UDP checksum */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
+ /* Calculate pseudo-header */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
+}
+
+/**
+ * Sets the descriptor @d up for csum. The corresponding
+ * @skb is used to obtain the protocol and headers length.
+ * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
+ * Note, if d==NULL, the function only returns the protocol result.
+ *
+ * It is very similar to previous wil_tx_desc_offload_setup_tso. This
+ * is "if unrolling" to optimize the critical path.
+ */
+
+static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
+ struct sk_buff *skb){
int protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -1110,6 +1156,305 @@ static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
return 0;
}
+static inline void wil_tx_last_desc(struct vring_tx_desc *d)
+{
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
+ BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
+ BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
+}
+
+static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
+{
+ d->dma.d0 |= wil_tso_type_lst <<
+ DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
+}
+
+static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
+ struct sk_buff *skb)
+{
+ struct device *dev = wil_to_dev(wil);
+
+ /* point to descriptors in shared memory */
+ volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
+ *_first_desc = NULL;
+
+ /* pointers to shadow descriptors */
+ struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
+ *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
+ *first_desc = &first_desc_mem;
+
+ /* pointer to shadow descriptors' context */
+ struct wil_ctx *hdr_ctx, *first_ctx = NULL;
+
+ int descs_used = 0; /* total number of used descriptors */
+ int sg_desc_cnt = 0; /* number of descriptors for current mss*/
+
+ u32 swhead = vring->swhead;
+ int used, avail = wil_vring_avail_tx(vring);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int min_desc_required = nr_frags + 1;
+ int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */
+ int f, len, hdrlen, headlen;
+ int vring_index = vring - wil->vring_tx;
+ struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+ uint i = swhead;
+ dma_addr_t pa;
+ const skb_frag_t *frag = NULL;
+ int rem_data = mss;
+ int lenmss;
+ int hdr_compensation_need = true;
+ int desc_tso_type = wil_tso_type_first;
+ bool is_ipv4;
+ int tcp_hdr_len;
+ int skb_net_hdr_len;
+ int gso_type;
+
+ wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
+ __func__, skb->len, vring_index);
+
+ if (unlikely(!txdata->enabled))
+ return -EINVAL;
+
+ /* A typical page 4K is 3-4 payloads, we assume each fragment
+ * is a full payload, that's how min_desc_required has been
+ * calculated. In real we might need more or less descriptors,
+ * this is the initial check only.
+ */
+ if (unlikely(avail < min_desc_required)) {
+ wil_err_ratelimited(wil,
+ "TSO: Tx ring[%2d] full. No space for %d fragments\n",
+ vring_index, min_desc_required);
+ return -ENOMEM;
+ }
+
+ /* Header Length = MAC header len + IP header len + TCP header len*/
+ hdrlen = ETH_HLEN +
+ (int)skb_network_header_len(skb) +
+ tcp_hdrlen(skb);
+
+ gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
+ switch (gso_type) {
+ case SKB_GSO_TCPV4:
+ /* TCP v4, zero out the IP length and IPv4 checksum fields
+ * as required by the offloading doc
+ */
+ ip_hdr(skb)->tot_len = 0;
+ ip_hdr(skb)->check = 0;
+ is_ipv4 = true;
+ break;
+ case SKB_GSO_TCPV6:
+ /* TCP v6, zero out the payload length */
+ ipv6_hdr(skb)->payload_len = 0;
+ is_ipv4 = false;
+ break;
+ default:
+ /* other than TCPv4 or TCPv6 types are not supported for TSO.
+ * It is also illegal for both to be set simultaneously
+ */
+ return -EINVAL;
+ }
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return -EINVAL;
+
+ /* tcp header length and skb network header length are fixed for all
+ * packet's descriptors - read then once here
+ */
+ tcp_hdr_len = tcp_hdrlen(skb);
+ skb_net_hdr_len = skb_network_header_len(skb);
+
+ _hdr_desc = &vring->va[i].tx;
+
+ pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, pa))) {
+ wil_err(wil, "TSO: Skb head DMA map error\n");
+ goto err_exit;
+ }
+
+ wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index);
+ wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
+ tcp_hdr_len, skb_net_hdr_len);
+ wil_tx_last_desc(hdr_desc);
+
+ vring->ctx[i].mapped_as = wil_mapped_as_single;
+ hdr_ctx = &vring->ctx[i];
+
+ descs_used++;
+ headlen = skb_headlen(skb) - hdrlen;
+
+ for (f = headlen ? -1 : 0; f < nr_frags; f++) {
+ if (headlen) {
+ len = headlen;
+ wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
+ len);
+ } else {
+ frag = &skb_shinfo(skb)->frags[f];
+ len = frag->size;
+ wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
+ }
+
+ while (len) {
+ wil_dbg_txrx(wil,
+ "TSO: len %d, rem_data %d, descs_used %d\n",
+ len, rem_data, descs_used);
+
+ if (descs_used == avail) {
+ wil_err(wil, "TSO: ring overflow\n");
+ goto dma_error;
+ }
+
+ lenmss = min_t(int, rem_data, len);
+ i = (swhead + descs_used) % vring->size;
+ wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
+
+ if (!headlen) {
+ pa = skb_frag_dma_map(dev, frag,
+ frag->size - len, lenmss,
+ DMA_TO_DEVICE);
+ vring->ctx[i].mapped_as = wil_mapped_as_page;
+ } else {
+ pa = dma_map_single(dev,
+ skb->data +
+ skb_headlen(skb) - headlen,
+ lenmss,
+ DMA_TO_DEVICE);
+ vring->ctx[i].mapped_as = wil_mapped_as_single;
+ headlen -= lenmss;
+ }
+
+ if (unlikely(dma_mapping_error(dev, pa)))
+ goto dma_error;
+
+ _desc = &vring->va[i].tx;
+
+ if (!_first_desc) {
+ _first_desc = _desc;
+ first_ctx = &vring->ctx[i];
+ d = first_desc;
+ } else {
+ d = &desc_mem;
+ }
+
+ wil_tx_desc_map(d, pa, lenmss, vring_index);
+ wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
+ is_ipv4, tcp_hdr_len,
+ skb_net_hdr_len);
+
+ /* use tso_type_first only once */
+ desc_tso_type = wil_tso_type_mid;
+
+ descs_used++; /* desc used so far */
+ sg_desc_cnt++; /* desc used for this segment */
+ len -= lenmss;
+ rem_data -= lenmss;
+
+ wil_dbg_txrx(wil,
+ "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
+ len, rem_data, descs_used, sg_desc_cnt);
+
+ /* Close the segment if reached mss size or last frag*/
+ if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
+ if (hdr_compensation_need) {
+ /* first segment include hdr desc for
+ * release
+ */
+ hdr_ctx->nr_frags = sg_desc_cnt;
+ wil_tx_desc_set_nr_frags(first_desc,
+ sg_desc_cnt +
+ 1);
+ hdr_compensation_need = false;
+ } else {
+ wil_tx_desc_set_nr_frags(first_desc,
+ sg_desc_cnt);
+ }
+ first_ctx->nr_frags = sg_desc_cnt - 1;
+
+ wil_tx_last_desc(d);
+
+ /* first descriptor may also be the last
+ * for this mss - make sure not to copy
+ * it twice
+ */
+ if (first_desc != d)
+ *_first_desc = *first_desc;
+
+ /*last descriptor will be copied at the end
+ * of this TS processing
+ */
+ if (f < nr_frags - 1 || len > 0)
+ *_desc = *d;
+
+ rem_data = mss;
+ _first_desc = NULL;
+ sg_desc_cnt = 0;
+ } else if (first_desc != d) /* update mid descriptor */
+ *_desc = *d;
+ }
+ }
+
+ /* first descriptor may also be the last.
+ * in this case d pointer is invalid
+ */
+ if (_first_desc == _desc)
+ d = first_desc;
+
+ /* Last data descriptor */
+ wil_set_tx_desc_last_tso(d);
+ *_desc = *d;
+
+ /* Fill the total number of descriptors in first desc (hdr)*/
+ wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
+ *_hdr_desc = *hdr_desc;
+
+ /* hold reference to skb
+ * to prevent skb release before accounting
+ * in case of immediate "tx done"
+ */
+ vring->ctx[i].skb = skb_get(skb);
+
+ /* performance monitoring */
+ used = wil_vring_used_tx(vring);
+ if (wil_val_in_range(vring_idle_trsh,
+ used, used + descs_used)) {
+ txdata->idle += get_cycles() - txdata->last_idle;
+ wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
+ vring_index, used, used + descs_used);
+ }
+
+ /* advance swhead */
+ wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
+ wil_vring_advance_head(vring, descs_used);
+
+ /* make sure all writes to descriptors (shared memory) are done before
+ * committing them to HW
+ */
+ wmb();
+
+ wil_w(wil, vring->hwtail, vring->swhead);
+ return 0;
+
+dma_error:
+ wil_err(wil, "TSO: DMA map page error\n");
+ while (descs_used > 0) {
+ struct wil_ctx *ctx;
+
+ i = (swhead + descs_used) % vring->size;
+ d = (struct vring_tx_desc *)&vring->va[i].tx;
+ _desc = &vring->va[i].tx;
+ *d = *_desc;
+ _desc->dma.status = TX_DMA_STATUS_DU;
+ ctx = &vring->ctx[i];
+ wil_txdesc_unmap(dev, d, ctx);
+ if (ctx->skb)
+ dev_kfree_skb_any(ctx->skb);
+ memset(ctx, 0, sizeof(*ctx));
+ descs_used--;
+ }
+
+err_exit:
+ return -EINVAL;
+}
+
static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
struct sk_buff *skb)
{
@@ -1128,7 +1473,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
bool mcast = (vring_index == wil->bcast_vring);
uint len = skb_headlen(skb);
- wil_dbg_txrx(wil, "%s()\n", __func__);
+ wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
+ __func__, skb->len, vring_index);
if (unlikely(!txdata->enabled))
return -EINVAL;
@@ -1159,14 +1505,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
}
/* Process TCP/UDP checksum offloading */
- if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
+ if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
vring_index);
goto dma_error;
}
vring->ctx[i].nr_frags = nr_frags;
- wil_tx_desc_set_nr_frags(d, nr_frags);
+ wil_tx_desc_set_nr_frags(d, nr_frags + 1);
/* middle segments */
for (; f < nr_frags; f++) {
@@ -1190,7 +1536,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
* if it succeeded for 1-st descriptor,
* it will succeed here too
*/
- wil_tx_desc_offload_cksum_set(wil, d, skb);
+ wil_tx_desc_offload_setup(d, skb);
}
/* for the last seg only */
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
@@ -1221,7 +1567,13 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
vring->swhead);
trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
- iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
+
+ /* make sure all writes to descriptors (shared memory) are done before
+ * committing them to HW
+ */
+ wmb();
+
+ wil_w(wil, vring->hwtail, vring->swhead);
return 0;
dma_error:
@@ -1254,8 +1606,12 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
int rc;
spin_lock(&txdata->lock);
- rc = __wil_tx_vring(wil, vring, skb);
+
+ rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
+ (wil, vring, skb);
+
spin_unlock(&txdata->lock);
+
return rc;
}
@@ -1382,7 +1738,8 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
struct wil_ctx *ctx = &vring->ctx[vring->swtail];
/**
* For the fragmented skb, HW will set DU bit only for the
- * last fragment. look for it
+ * last fragment. look for it.
+ * In TSO the first DU will include hdr desc
*/
int lf = (vring->swtail + ctx->nr_frags) % vring->size;
/* TODO: check we are not past head */
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 0c4638487c74..82a8f9a030e7 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -291,6 +291,14 @@ struct vring_tx_dma {
__le16 length;
} __packed;
+/* TSO type used in dma descriptor d0 bits 11-12 */
+enum {
+ wil_tso_type_hdr = 0,
+ wil_tso_type_first = 1,
+ wil_tso_type_mid = 2,
+ wil_tso_type_lst = 3,
+};
+
/* Rx descriptor - MAC part
* [dword 0]
* bit 0.. 3 : tid:4 The QoS (b3-0) TID Field
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 275355d46a36..dd4ea926b8e3 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -127,16 +127,6 @@ struct RGF_ICR {
u32 IMC; /* Mask Clear, write 1 to clear */
} __packed;
-struct RGF_BL {
- u32 ready; /* 0x880A3C bit [0] */
-#define BIT_BL_READY BIT(0)
- u32 version; /* 0x880A40 version of the BL struct */
- u32 rf_type; /* 0x880A44 ID of the connected RF */
- u32 baseband_type; /* 0x880A48 ID of the baseband */
- u8 mac_address[ETH_ALEN]; /* 0x880A4C permanent MAC */
- u8 pad[2];
-} __packed;
-
/* registers - FW addresses */
#define RGF_USER_USAGE_1 (0x880004)
#define RGF_USER_USAGE_6 (0x880018)
@@ -262,9 +252,8 @@ enum {
};
/* popular locations */
-#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
-#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
- offsetof(struct RGF_ICR, ICS))
+#define RGF_MBOX RGF_USER_USER_SCRATCH_PAD
+#define HOST_MBOX HOSTADDR(RGF_MBOX)
#define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2
/* ISR register bits */
@@ -434,12 +423,12 @@ struct pci_dev;
* @ssn: Starting Sequence Number expected to be aggregated.
* @buf_size: buffer size for incoming A-MPDUs
* @timeout: reset timer value (in TUs).
+ * @ssn_last_drop: SSN of the last dropped frame
+ * @total: total number of processed incoming frames
+ * @drop_dup: duplicate frames dropped for this reorder buffer
+ * @drop_old: old frames dropped for this reorder buffer
* @dialog_token: dialog token for aggregation session
- * @rcu_head: RCU head used for freeing this struct
- *
- * This structure's lifetime is managed by RCU, assignments to
- * the array holding it must hold the aggregation mutex.
- *
+ * @first_time: true when this buffer used 1-st time
*/
struct wil_tid_ampdu_rx {
struct sk_buff **reorder_buf;
@@ -453,6 +442,9 @@ struct wil_tid_ampdu_rx {
u16 buf_size;
u16 timeout;
u16 ssn_last_drop;
+ unsigned long long total; /* frames processed */
+ unsigned long long drop_dup;
+ unsigned long long drop_old;
u8 dialog_token;
bool first_time; /* is it 1-st time this buffer used? */
};
@@ -543,7 +535,6 @@ struct pmc_ctx {
struct wil6210_priv {
struct pci_dev *pdev;
- int n_msi;
struct wireless_dev *wdev;
void __iomem *csr;
DECLARE_BITMAP(status, wil_status_last);
@@ -559,6 +550,8 @@ struct wil6210_priv {
/* profile */
u32 monitor_flags;
u32 privacy; /* secure connection? */
+ u8 hidden_ssid; /* relevant in AP mode */
+ u16 channel; /* relevant in AP mode */
int sinfo_gen;
u32 ap_isolate; /* no intra-BSS communication */
/* interrupt moderation */
@@ -654,6 +647,33 @@ void wil_info(struct wil6210_priv *wil, const char *fmt, ...);
#define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
#define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
#define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg)
+#define wil_dbg_pm(wil, fmt, arg...) wil_dbg(wil, "DBG[ PM ]" fmt, ##arg)
+
+/* target operations */
+/* register read */
+static inline u32 wil_r(struct wil6210_priv *wil, u32 reg)
+{
+ return readl(wil->csr + HOSTADDR(reg));
+}
+
+/* register write. wmb() to make sure it is completed */
+static inline void wil_w(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+ writel(val, wil->csr + HOSTADDR(reg));
+ wmb(); /* wait for write to propagate to the HW */
+}
+
+/* register set = read, OR, write */
+static inline void wil_s(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+ wil_w(wil, reg, wil_r(wil, reg) | val);
+}
+
+/* register clear = read, AND with inverted, write */
+static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+ wil_w(wil, reg, wil_r(wil, reg) & ~val);
+}
#if defined(CONFIG_DYNAMIC_DEBUG)
#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \
@@ -744,7 +764,7 @@ void wil_back_tx_worker(struct work_struct *work);
void wil_back_tx_flush(struct wil6210_priv *wil);
void wil6210_clear_irq(struct wil6210_priv *wil);
-int wil6210_init_irq(struct wil6210_priv *wil, int irq);
+int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi);
void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
void wil_mask_irq(struct wil6210_priv *wil);
void wil_unmask_irq(struct wil6210_priv *wil);
@@ -796,4 +816,8 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type);
int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
int wil_request_firmware(struct wil6210_priv *wil, const char *name);
+int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
+int wil_resume(struct wil6210_priv *wil, bool is_runtime);
+
#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.c b/drivers/net/wireless/ath/wil6210/wil_platform.c
index de15f1422fe9..2e831bf20117 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.c
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.c
@@ -14,7 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include "linux/device.h"
+#include <linux/device.h>
#include "wil_platform.h"
int __init wil_platform_modinit(void)
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index c759759afbb2..2f35d4c51f34 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -228,8 +228,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
/* wait till FW finish with previous command */
for (retry = 5; retry > 0; retry--) {
- r->tail = ioread32(wil->csr + HOST_MBOX +
- offsetof(struct wil6210_mbox_ctl, tx.tail));
+ r->tail = wil_r(wil, RGF_MBOX +
+ offsetof(struct wil6210_mbox_ctl, tx.tail));
if (next_head != r->tail)
break;
msleep(20);
@@ -254,16 +254,16 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
/* mark entry as full */
- iowrite32(1, wil->csr + HOSTADDR(r->head) +
- offsetof(struct wil6210_mbox_ring_desc, sync));
+ wil_w(wil, r->head + offsetof(struct wil6210_mbox_ring_desc, sync), 1);
/* advance next ptr */
- iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
- offsetof(struct wil6210_mbox_ctl, tx.head));
+ wil_w(wil, RGF_MBOX + offsetof(struct wil6210_mbox_ctl, tx.head),
+ r->head = next_head);
trace_wil6210_wmi_cmd(&cmd.wmi, buf, len);
/* interrupt to FW */
- iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
+ wil_w(wil, RGF_USER_USER_ICR + offsetof(struct RGF_ICR, ICS),
+ SW_INT_MBOX);
return 0;
}
@@ -312,22 +312,44 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
struct wiphy *wiphy = wil_to_wiphy(wil);
struct ieee80211_mgmt *rx_mgmt_frame =
(struct ieee80211_mgmt *)data->payload;
- int ch_no = data->info.channel+1;
- u32 freq = ieee80211_channel_to_frequency(ch_no,
- IEEE80211_BAND_60GHZ);
- struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
- s32 signal = data->info.sqi;
- __le16 fc = rx_mgmt_frame->frame_control;
- u32 d_len = le32_to_cpu(data->info.len);
- u16 d_status = le16_to_cpu(data->info.status);
-
- wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d SQI %d%%\n",
+ int flen = len - offsetof(struct wmi_rx_mgmt_packet_event, payload);
+ int ch_no;
+ u32 freq;
+ struct ieee80211_channel *channel;
+ s32 signal;
+ __le16 fc;
+ u32 d_len;
+ u16 d_status;
+
+ if (flen < 0) {
+ wil_err(wil, "MGMT Rx: short event, len %d\n", len);
+ return;
+ }
+
+ d_len = le32_to_cpu(data->info.len);
+ if (d_len != flen) {
+ wil_err(wil,
+ "MGMT Rx: length mismatch, d_len %d should be %d\n",
+ d_len, flen);
+ return;
+ }
+
+ ch_no = data->info.channel + 1;
+ freq = ieee80211_channel_to_frequency(ch_no, IEEE80211_BAND_60GHZ);
+ channel = ieee80211_get_channel(wiphy, freq);
+ signal = data->info.sqi;
+ d_status = le16_to_cpu(data->info.status);
+ fc = rx_mgmt_frame->frame_control;
+
+ wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d SNR %d SQI %d%%\n",
data->info.channel, data->info.mcs, data->info.snr,
data->info.sqi);
wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
le16_to_cpu(fc));
wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
data->info.qid, data->info.mid, data->info.cid);
+ wil_hex_dump_wmi("MGMT Rx ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
+ d_len, true);
if (!channel) {
wil_err(wil, "Frame on unsupported channel\n");
@@ -363,6 +385,17 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
}
}
+static void wmi_evt_tx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct wmi_tx_mgmt_packet_event *data = d;
+ struct ieee80211_mgmt *mgmt_frame =
+ (struct ieee80211_mgmt *)data->payload;
+ int flen = len - offsetof(struct wmi_tx_mgmt_packet_event, payload);
+
+ wil_hex_dump_wmi("MGMT Tx ", DUMP_PREFIX_OFFSET, 16, 1, mgmt_frame,
+ flen, true);
+}
+
static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
void *d, int len)
{
@@ -659,6 +692,7 @@ static const struct {
{WMI_READY_EVENTID, wmi_evt_ready},
{WMI_FW_READY_EVENTID, wmi_evt_fw_ready},
{WMI_RX_MGMT_PACKET_EVENTID, wmi_evt_rx_mgmt},
+ {WMI_TX_MGMT_PACKET_EVENTID, wmi_evt_tx_mgmt},
{WMI_SCAN_COMPLETE_EVENTID, wmi_evt_scan_complete},
{WMI_CONNECT_EVENTID, wmi_evt_connect},
{WMI_DISCONNECT_EVENTID, wmi_evt_disconnect},
@@ -695,8 +729,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
u16 len;
bool q;
- r->head = ioread32(wil->csr + HOST_MBOX +
- offsetof(struct wil6210_mbox_ctl, rx.head));
+ r->head = wil_r(wil, RGF_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx.head));
if (r->tail == r->head)
break;
@@ -734,8 +768,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
cmd = (void *)&evt->event.wmi;
wil_memcpy_fromio_32(cmd, src, len);
/* mark entry as empty */
- iowrite32(0, wil->csr + HOSTADDR(r->tail) +
- offsetof(struct wil6210_mbox_ring_desc, sync));
+ wil_w(wil, r->tail +
+ offsetof(struct wil6210_mbox_ring_desc, sync), 0);
/* indicate */
if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
(len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
@@ -754,8 +788,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
/* advance tail */
r->tail = r->base + ((r->tail - r->base +
sizeof(struct wil6210_mbox_ring_desc)) % r->size);
- iowrite32(r->tail, wil->csr + HOST_MBOX +
- offsetof(struct wil6210_mbox_ctl, rx.tail));
+ wil_w(wil, RGF_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail);
/* add to the pending list */
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
@@ -772,7 +806,7 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
u16 reply_id, void *reply, u8 reply_size, int to_msec)
{
int rc;
- int remain;
+ unsigned long remain;
mutex_lock(&wil->wmi_mutex);
@@ -988,12 +1022,21 @@ int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
{
+ static const char *const names[] = {
+ [WMI_FRAME_BEACON] = "BEACON",
+ [WMI_FRAME_PROBE_REQ] = "PROBE_REQ",
+ [WMI_FRAME_PROBE_RESP] = "WMI_FRAME_PROBE_RESP",
+ [WMI_FRAME_ASSOC_REQ] = "WMI_FRAME_ASSOC_REQ",
+ [WMI_FRAME_ASSOC_RESP] = "WMI_FRAME_ASSOC_RESP",
+ };
int rc;
u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
+ if (!cmd) {
+ rc = -ENOMEM;
+ goto out;
+ }
if (!ie)
ie_len = 0;
@@ -1003,6 +1046,12 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
memcpy(cmd->ie_info, ie, ie_len);
rc = wmi_send(wil, WMI_SET_APPIE_CMDID, cmd, len);
kfree(cmd);
+out:
+ if (rc) {
+ const char *name = type < ARRAY_SIZE(names) ?
+ names[type] : "??";
+ wil_err(wil, "set_ie(%d %s) failed : %d\n", type, name, rc);
+ }
return rc;
}
@@ -1129,15 +1178,42 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
{
+ int rc;
+ u16 reason_code;
struct wmi_disconnect_sta_cmd cmd = {
.disconnect_reason = cpu_to_le16(reason),
};
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_disconnect_event evt;
+ } __packed reply;
ether_addr_copy(cmd.dst_mac, mac);
wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
- return wmi_send(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd));
+ rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd),
+ WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), 1000);
+ /* failure to disconnect in reasonable time treated as FW error */
+ if (rc) {
+ wil_fw_error_recovery(wil);
+ return rc;
+ }
+
+ /* call event handler manually after processing wmi_call,
+ * to avoid deadlock - disconnect event handler acquires wil->mutex
+ * while it is already held here
+ */
+ reason_code = le16_to_cpu(reply.evt.protocol_reason_status);
+
+ wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n",
+ reply.evt.bssid, reason_code,
+ reply.evt.disconnect_reason);
+
+ wil->sinfo_gen++;
+ wil6210_disconnect(wil, reply.evt.bssid, reason_code, true);
+
+ return 0;
}
int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout)
@@ -1279,7 +1355,7 @@ static void wmi_event_handle(struct wil6210_priv *wil,
/* search for handler */
if (!wmi_evt_call_handler(wil, id, evt_data,
len - sizeof(*wmi))) {
- wil_err(wil, "Unhandled event 0x%04x\n", id);
+ wil_info(wil, "Unhandled event 0x%04x\n", id);
}
} else {
wil_err(wil, "Unknown event type\n");
diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c
index 916123a3d74e..a335f94c72ff 100644
--- a/drivers/net/wireless/b43/lo.c
+++ b/drivers/net/wireless/b43/lo.c
@@ -929,8 +929,8 @@ void b43_lo_g_adjust_to(struct b43_wldev *dev,
b43_lo_write(dev, &cal->ctl);
}
-/* Periodic LO maintanance work */
-void b43_lo_g_maintanance_work(struct b43_wldev *dev)
+/* Periodic LO maintenance work */
+void b43_lo_g_maintenance_work(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
struct b43_phy_g *gphy = phy->g;
diff --git a/drivers/net/wireless/b43/lo.h b/drivers/net/wireless/b43/lo.h
index 3b27e20eff80..7b4df3883bc2 100644
--- a/drivers/net/wireless/b43/lo.h
+++ b/drivers/net/wireless/b43/lo.h
@@ -80,7 +80,7 @@ void b43_lo_g_adjust_to(struct b43_wldev *dev,
void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all);
-void b43_lo_g_maintanance_work(struct b43_wldev *dev);
+void b43_lo_g_maintenance_work(struct b43_wldev *dev);
void b43_lo_g_cleanup(struct b43_wldev *dev);
void b43_lo_g_init(struct b43_wldev *dev);
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 727ce6edb4b3..462310e6e88f 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -3004,7 +3004,7 @@ static void b43_gphy_op_pwork_15sec(struct b43_wldev *dev)
phy->rev == 1) {
//TODO: implement rev1 workaround
}
- b43_lo_g_maintanance_work(dev);
+ b43_lo_g_maintenance_work(dev);
b43_mac_enable(dev);
}
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 25d1cbd34306..b2f0d245bcf3 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -3728,7 +3728,7 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
switch (phy->rev) {
case 6:
case 5:
- if (sprom->fem.ghz5.extpa_gain == 3)
+ if (sprom->fem.ghz2.extpa_gain == 3)
return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g;
/* fall through */
case 4:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
index d86d1f1f1c91..a293275c1b0b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -469,6 +469,36 @@ brcmf_find_wpsie(const u8 *parse, u32 len)
return NULL;
}
+static int brcmf_vif_change_validate(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_cfg80211_vif *vif,
+ enum nl80211_iftype new_type)
+{
+ int iftype_num[NUM_NL80211_IFTYPES];
+ struct brcmf_cfg80211_vif *pos;
+
+ memset(&iftype_num[0], 0, sizeof(iftype_num));
+ list_for_each_entry(pos, &cfg->vif_list, list)
+ if (pos == vif)
+ iftype_num[new_type]++;
+ else
+ iftype_num[pos->wdev.iftype]++;
+
+ return cfg80211_check_combinations(cfg->wiphy, 1, 0, iftype_num);
+}
+
+static int brcmf_vif_add_validate(struct brcmf_cfg80211_info *cfg,
+ enum nl80211_iftype new_type)
+{
+ int iftype_num[NUM_NL80211_IFTYPES];
+ struct brcmf_cfg80211_vif *pos;
+
+ memset(&iftype_num[0], 0, sizeof(iftype_num));
+ list_for_each_entry(pos, &cfg->vif_list, list)
+ iftype_num[pos->wdev.iftype]++;
+
+ iftype_num[new_type]++;
+ return cfg80211_check_combinations(cfg->wiphy, 1, 0, iftype_num);
+}
static void convert_key_from_CPU(struct brcmf_wsec_key *key,
struct brcmf_wsec_key_le *key_le)
@@ -663,8 +693,14 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
struct vif_params *params)
{
struct wireless_dev *wdev;
+ int err;
brcmf_dbg(TRACE, "enter: %s type %d\n", name, type);
+ err = brcmf_vif_add_validate(wiphy_to_cfg(wiphy), type);
+ if (err) {
+ brcmf_err("iface validation failed: err=%d\n", err);
+ return ERR_PTR(err);
+ }
switch (type) {
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_STATION:
@@ -823,8 +859,12 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
s32 ap = 0;
s32 err = 0;
- brcmf_dbg(TRACE, "Enter, ndev=%p, type=%d\n", ndev, type);
-
+ brcmf_dbg(TRACE, "Enter, idx=%d, type=%d\n", ifp->bssidx, type);
+ err = brcmf_vif_change_validate(wiphy_to_cfg(wiphy), vif, type);
+ if (err) {
+ brcmf_err("iface validation failed: err=%d\n", err);
+ return err;
+ }
switch (type) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_WDS:
@@ -5695,63 +5735,132 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
}
};
+/**
+ * brcmf_setup_ifmodes() - determine interface modes and combinations.
+ *
+ * @wiphy: wiphy object.
+ * @ifp: interface object needed for feat module api.
+ *
+ * The interface modes and combinations are determined dynamically here
+ * based on firmware functionality.
+ *
+ * no p2p and no mbss:
+ *
+ * #STA <= 1, #AP <= 1, channels = 1, 2 total
+ *
+ * no p2p and mbss:
+ *
+ * #STA <= 1, #AP <= 1, channels = 1, 2 total
+ * #AP <= 4, matching BI, channels = 1, 4 total
+ *
+ * p2p, no mchan, and mbss:
+ *
+ * #STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 1, 3 total
+ * #STA <= 1, #P2P-DEV <= 1, #AP <= 1, #P2P-CL <= 1, channels = 1, 4 total
+ * #AP <= 4, matching BI, channels = 1, 4 total
+ *
+ * p2p, mchan, and mbss:
+ *
+ * #STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 2, 3 total
+ * #STA <= 1, #P2P-DEV <= 1, #AP <= 1, #P2P-CL <= 1, channels = 1, 4 total
+ * #AP <= 4, matching BI, channels = 1, 4 total
+ */
static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
{
struct ieee80211_iface_combination *combo = NULL;
- struct ieee80211_iface_limit *limits = NULL;
- int i = 0, max_iface_cnt;
+ struct ieee80211_iface_limit *c0_limits = NULL;
+ struct ieee80211_iface_limit *p2p_limits = NULL;
+ struct ieee80211_iface_limit *mbss_limits = NULL;
+ bool mbss, p2p;
+ int i, c, n_combos;
- combo = kzalloc(sizeof(*combo), GFP_KERNEL);
+ mbss = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS);
+ p2p = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_P2P);
+
+ n_combos = 1 + !!p2p + !!mbss;
+ combo = kcalloc(n_combos, sizeof(*combo), GFP_KERNEL);
if (!combo)
goto err;
- limits = kzalloc(sizeof(*limits) * 4, GFP_KERNEL);
- if (!limits)
+ c0_limits = kcalloc(p2p ? 3 : 2, sizeof(*c0_limits), GFP_KERNEL);
+ if (!c0_limits)
goto err;
+ if (p2p) {
+ p2p_limits = kcalloc(4, sizeof(*p2p_limits), GFP_KERNEL);
+ if (!p2p_limits)
+ goto err;
+ }
+
+ if (mbss) {
+ mbss_limits = kcalloc(1, sizeof(*mbss_limits), GFP_KERNEL);
+ if (!mbss_limits)
+ goto err;
+ }
+
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP);
- if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN))
- combo->num_different_channels = 2;
- else
- combo->num_different_channels = 1;
-
- if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) {
- limits[i].max = 1;
- limits[i++].types = BIT(NL80211_IFTYPE_STATION);
- limits[i].max = 4;
- limits[i++].types = BIT(NL80211_IFTYPE_AP);
- max_iface_cnt = 5;
- } else {
- limits[i].max = 2;
- limits[i++].types = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP);
- max_iface_cnt = 2;
- }
-
- if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_P2P)) {
+ c = 0;
+ i = 0;
+ combo[c].num_different_channels = 1;
+ c0_limits[i].max = 1;
+ c0_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
+ if (p2p) {
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN))
+ combo[c].num_different_channels = 2;
wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_DEVICE);
- limits[i].max = 1;
- limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO);
- limits[i].max = 1;
- limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
- max_iface_cnt += 2;
- }
- combo->max_interfaces = max_iface_cnt;
- combo->limits = limits;
- combo->n_limits = i;
-
+ c0_limits[i].max = 1;
+ c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
+ c0_limits[i].max = 1;
+ c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+ } else {
+ c0_limits[i].max = 1;
+ c0_limits[i++].types = BIT(NL80211_IFTYPE_AP);
+ }
+ combo[c].max_interfaces = i;
+ combo[c].n_limits = i;
+ combo[c].limits = c0_limits;
+
+ if (p2p) {
+ c++;
+ i = 0;
+ combo[c].num_different_channels = 1;
+ p2p_limits[i].max = 1;
+ p2p_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
+ p2p_limits[i].max = 1;
+ p2p_limits[i++].types = BIT(NL80211_IFTYPE_AP);
+ p2p_limits[i].max = 1;
+ p2p_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT);
+ p2p_limits[i].max = 1;
+ p2p_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
+ combo[c].max_interfaces = i;
+ combo[c].n_limits = i;
+ combo[c].limits = p2p_limits;
+ }
+
+ if (mbss) {
+ c++;
+ combo[c].beacon_int_infra_match = true;
+ combo[c].num_different_channels = 1;
+ mbss_limits[0].max = 4;
+ mbss_limits[0].types = BIT(NL80211_IFTYPE_AP);
+ combo[c].max_interfaces = 4;
+ combo[c].n_limits = 1;
+ combo[c].limits = mbss_limits;
+ }
+ wiphy->n_iface_combinations = n_combos;
wiphy->iface_combinations = combo;
- wiphy->n_iface_combinations = 1;
return 0;
err:
- kfree(limits);
+ kfree(c0_limits);
+ kfree(p2p_limits);
+ kfree(mbss_limits);
kfree(combo);
return -ENOMEM;
}
@@ -5785,7 +5894,10 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy)
static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
{
+ struct brcmf_pub *drvr = ifp->drvr;
+ const struct ieee80211_iface_combination *combo;
struct ieee80211_supported_band *band;
+ u16 max_interfaces = 0;
__le32 bandlist[3];
u32 n_bands;
int err, i;
@@ -5798,6 +5910,24 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
if (err)
return err;
+ for (i = 0, combo = wiphy->iface_combinations;
+ i < wiphy->n_iface_combinations; i++, combo++) {
+ max_interfaces = max(max_interfaces, combo->max_interfaces);
+ }
+
+ for (i = 0; i < max_interfaces && i < ARRAY_SIZE(drvr->addresses);
+ i++) {
+ u8 *addr = drvr->addresses[i].addr;
+
+ memcpy(addr, drvr->mac, ETH_ALEN);
+ if (i) {
+ addr[0] |= BIT(1);
+ addr[ETH_ALEN - 1] ^= i;
+ }
+ }
+ wiphy->addresses = drvr->addresses;
+ wiphy->n_addresses = i;
+
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wiphy->cipher_suites = __wl_cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
@@ -6059,11 +6189,15 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
static void brcmf_free_wiphy(struct wiphy *wiphy)
{
+ int i;
+
if (!wiphy)
return;
- if (wiphy->iface_combinations)
- kfree(wiphy->iface_combinations->limits);
+ if (wiphy->iface_combinations) {
+ for (i = 0; i < wiphy->n_iface_combinations; i++)
+ kfree(wiphy->iface_combinations[i].limits);
+ }
kfree(wiphy->iface_combinations);
if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.h b/drivers/net/wireless/brcm80211/brcmfmac/core.h
index fd74a9c6e9ac..746304121cdb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.h
@@ -21,6 +21,7 @@
#ifndef BRCMFMAC_CORE_H
#define BRCMFMAC_CORE_H
+#include <net/cfg80211.h>
#include "fweh.h"
#define TOE_TX_CSUM_OL 0x00000001
@@ -118,6 +119,8 @@ struct brcmf_pub {
/* Multicast data packets sent to dongle */
unsigned long tx_multicast;
+ struct mac_address addresses[BRCMF_MAX_IFS];
+
struct brcmf_if *iflist[BRCMF_MAX_IFS];
struct mutex proto_block;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
index 743f16b6a072..971920f77b68 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
@@ -19,6 +19,7 @@
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/module.h>
+#include <linux/bcm47xx_nvram.h>
#include "debug.h"
#include "firmware.h"
@@ -426,18 +427,32 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
struct brcmf_fw *fwctx = ctx;
u32 nvram_length = 0;
void *nvram = NULL;
+ u8 *data = NULL;
+ size_t data_len;
+ bool raw_nvram;
brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
- if (!fw && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
- goto fail;
+ if (fw && fw->data) {
+ data = (u8 *)fw->data;
+ data_len = fw->size;
+ raw_nvram = false;
+ } else {
+ data = bcm47xx_nvram_get_contents(&data_len);
+ if (!data && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
+ goto fail;
+ raw_nvram = true;
+ }
- if (fw) {
- nvram = brcmf_fw_nvram_strip(fw->data, fw->size, &nvram_length,
+ if (data)
+ nvram = brcmf_fw_nvram_strip(data, data_len, &nvram_length,
fwctx->domain_nr, fwctx->bus_nr);
+
+ if (raw_nvram)
+ bcm47xx_nvram_release_contents(data);
+ if (fw)
release_firmware(fw);
- if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
- goto fail;
- }
+ if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
+ goto fail;
fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length);
kfree(fwctx);
@@ -473,15 +488,9 @@ static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
if (!ret)
return;
- /* when nvram is optional call .done() callback here */
- if (fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL) {
- fwctx->done(fwctx->dev, fw, NULL, 0);
- kfree(fwctx);
- return;
- }
+ brcmf_fw_request_nvram_done(NULL, fwctx);
+ return;
- /* failed nvram request */
- release_firmware(fw);
fail:
brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
device_release_driver(fwctx->dev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
index 59440631fec5..8d1ab4ab5be8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
@@ -194,11 +194,15 @@ static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
spin_lock_irqsave(&flow->block_lock, flags);
ring = flow->rings[flowid];
+ if (ring->blocked == blocked) {
+ spin_unlock_irqrestore(&flow->block_lock, flags);
+ return;
+ }
ifidx = brcmf_flowring_ifidx_get(flow, flowid);
currently_blocked = false;
for (i = 0; i < flow->nrofrings; i++) {
- if (flow->rings[i]) {
+ if ((flow->rings[i]) && (i != flowid)) {
ring = flow->rings[i];
if ((ring->status == RING_OPEN) &&
(brcmf_flowring_ifidx_get(flow, i) == ifidx)) {
@@ -209,8 +213,8 @@ static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
}
}
}
- ring->blocked = blocked;
- if (currently_blocked == blocked) {
+ flow->rings[flowid]->blocked = blocked;
+ if (currently_blocked) {
spin_unlock_irqrestore(&flow->block_lock, flags);
return;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index cbf033f59109..1326898d608e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -85,7 +85,6 @@ struct brcmf_event;
BRCMF_ENUM_DEF(IF, 54) \
BRCMF_ENUM_DEF(P2P_DISC_LISTEN_COMPLETE, 55) \
BRCMF_ENUM_DEF(RSSI, 56) \
- BRCMF_ENUM_DEF(PFN_SCAN_COMPLETE, 57) \
BRCMF_ENUM_DEF(EXTLOG_MSG, 58) \
BRCMF_ENUM_DEF(ACTION_FRAME, 59) \
BRCMF_ENUM_DEF(ACTION_FRAME_COMPLETE, 60) \
@@ -103,8 +102,7 @@ struct brcmf_event;
BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
BRCMF_ENUM_DEF(TDLS_PEER_EVENT, 92) \
- BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) \
- BRCMF_ENUM_DEF(PSTA_PRIMARY_INTF_IND, 128)
+ BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127)
#define BRCMF_ENUM_DEF(id, val) \
BRCMF_E_##id = (val),
@@ -112,7 +110,11 @@ struct brcmf_event;
/* firmware event codes sent by the dongle */
enum brcmf_fweh_event_code {
BRCMF_FWEH_EVENT_ENUM_DEFLIST
- BRCMF_E_LAST
+ /* this determines event mask length which must match
+ * minimum length check in device firmware so it is
+ * hard-coded here.
+ */
+ BRCMF_E_LAST = 139
};
#undef BRCMF_ENUM_DEF
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
index 898c3801e658..7b2136c9badb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -1360,6 +1360,60 @@ void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid)
}
}
+#ifdef DEBUG
+static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
+ struct brcmf_pub *drvr = bus_if->drvr;
+ struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+ struct brcmf_commonring *commonring;
+ u16 i;
+ struct brcmf_flowring_ring *ring;
+ struct brcmf_flowring_hash *hash;
+
+ commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
+ seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u\n",
+ commonring->r_ptr, commonring->w_ptr, commonring->depth);
+ commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
+ seq_printf(seq, "h2d_rx_submit: rp %4u, wp %4u, depth %4u\n",
+ commonring->r_ptr, commonring->w_ptr, commonring->depth);
+ commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
+ seq_printf(seq, "d2h_ctl_cmplt: rp %4u, wp %4u, depth %4u\n",
+ commonring->r_ptr, commonring->w_ptr, commonring->depth);
+ commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
+ seq_printf(seq, "d2h_tx_cmplt: rp %4u, wp %4u, depth %4u\n",
+ commonring->r_ptr, commonring->w_ptr, commonring->depth);
+ commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
+ seq_printf(seq, "d2h_rx_cmplt: rp %4u, wp %4u, depth %4u\n",
+ commonring->r_ptr, commonring->w_ptr, commonring->depth);
+
+ seq_printf(seq, "\nh2d_flowrings: depth %u\n",
+ BRCMF_H2D_TXFLOWRING_MAX_ITEM);
+ seq_puts(seq, "Active flowrings:\n");
+ hash = msgbuf->flow->hash;
+ for (i = 0; i < msgbuf->flow->nrofrings; i++) {
+ if (!msgbuf->flow->rings[i])
+ continue;
+ ring = msgbuf->flow->rings[i];
+ if (ring->status != RING_OPEN)
+ continue;
+ commonring = msgbuf->flowrings[i];
+ hash = &msgbuf->flow->hash[ring->hash_id];
+ seq_printf(seq, "id %3u: rp %4u, wp %4u, qlen %4u, blocked %u\n"
+ " ifidx %u, fifo %u, da %pM\n",
+ i, commonring->r_ptr, commonring->w_ptr,
+ skb_queue_len(&ring->skblist), ring->blocked,
+ hash->ifidx, hash->fifo, hash->mac);
+ }
+
+ return 0;
+}
+#else
+static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
+{
+ return 0;
+}
+#endif
int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
{
@@ -1460,6 +1514,8 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
spin_lock_init(&msgbuf->flowring_work_lock);
INIT_LIST_HEAD(&msgbuf->work_queue);
+ brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read);
+
return 0;
fail:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
index d36f5f3d931b..f990e3d0e696 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
@@ -2564,15 +2564,6 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
}
}
-static void atomic_orr(int val, atomic_t *v)
-{
- int old_val;
-
- old_val = atomic_read(v);
- while (atomic_cmpxchg(v, old_val, val | old_val) != old_val)
- old_val = atomic_read(v);
-}
-
static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
{
struct brcmf_core *buscore;
@@ -2595,7 +2586,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
if (val) {
brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
bus->sdcnt.f1regdata++;
- atomic_orr(val, &bus->intstatus);
+ atomic_or(val, &bus->intstatus);
}
return ret;
@@ -2712,7 +2703,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
/* Keep still-pending events for next scheduling */
if (intstatus)
- atomic_orr(intstatus, &bus->intstatus);
+ atomic_or(intstatus, &bus->intstatus);
brcmf_sdio_clrintr(bus);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index ab775a5d5b33..d2c5747e3ac9 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -1472,9 +1472,7 @@ struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
wl->timers = t;
#ifdef DEBUG
- t->name = kmalloc(strlen(name) + 1, GFP_ATOMIC);
- if (t->name)
- strcpy(t->name, name);
+ t->name = kstrdup(name, GFP_ATOMIC);
#endif
return t;
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index 7603546d2de3..29185aeccba8 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -467,7 +467,6 @@ static struct spi_driver spi_driver = {
.remove = cw1200_spi_disconnect,
.driver = {
.name = "cw1200_wlan_spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &cw1200_pm_ops,
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 01de1a3bf94e..80d4228ba754 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -865,7 +865,7 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
switch(type) {
case HOSTAP_INTERFACE_AP:
- dev->tx_queue_len = 0; /* use main radio device queue */
+ dev->priv_flags |= IFF_NO_QUEUE; /* use main radio device queue */
dev->netdev_ops = &hostap_mgmt_netdev_ops;
dev->type = ARPHRD_IEEE80211;
dev->header_ops = &hostap_80211_ops;
@@ -874,7 +874,7 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
dev->netdev_ops = &hostap_master_ops;
break;
default:
- dev->tx_queue_len = 0; /* use main radio device queue */
+ dev->priv_flags |= IFF_NO_QUEUE; /* use main radio device queue */
dev->netdev_ops = &hostap_netdev_ops;
}
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 08eb229e7816..36818c7f30b9 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1410,7 +1410,7 @@ static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv)
static int ipw2100_hw_phy_off(struct ipw2100_priv *priv)
{
-#define HW_PHY_OFF_LOOP_DELAY (HZ / 5000)
+#define HW_PHY_OFF_LOOP_DELAY (msecs_to_jiffies(50))
struct host_command cmd = {
.host_command = CARD_DISABLE_PHY_OFF,
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index c6d78790cb0d..193947865efd 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -746,7 +746,7 @@ struct ipw2100_priv {
#define IPW_REG_GPIO IPW_REG_DOMAIN_0_OFFSET + 0x0030
#define IPW_REG_FW_TYPE IPW_REG_DOMAIN_1_OFFSET + 0x0188
#define IPW_REG_FW_VERSION IPW_REG_DOMAIN_1_OFFSET + 0x018C
-#define IPW_REG_FW_COMPATABILITY_VERSION IPW_REG_DOMAIN_1_OFFSET + 0x0190
+#define IPW_REG_FW_COMPATIBILITY_VERSION IPW_REG_DOMAIN_1_OFFSET + 0x0190
#define IPW_REG_INDIRECT_ADDR_MASK 0x00FFFFFC
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index 7f4cb692cc57..af1b3e6839fa 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3259,7 +3259,7 @@ il3945_show_measurement(struct device *d, struct device_attribute *attr,
while (size && PAGE_SIZE - len) {
hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
- PAGE_SIZE - len, 1);
+ PAGE_SIZE - len, true);
len = strlen(buf);
if (PAGE_SIZE - len)
buf[len++] = '\n';
diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c
index 344010153196..908b9f4fef6f 100644
--- a/drivers/net/wireless/iwlegacy/debug.c
+++ b/drivers/net/wireless/iwlegacy/debug.c
@@ -515,12 +515,8 @@ il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count,
scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n",
eeprom_ver);
for (ofs = 0; ofs < eeprom_len; ofs += 16) {
- pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
- hex_dump_to_buffer(ptr + ofs, 16, 16, 2, buf + pos,
- buf_size - pos, 0);
- pos += strlen(buf + pos);
- if (buf_size - pos > 0)
- buf[pos++] = '\n';
+ pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n",
+ ofs, ptr + ofs);
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index c160dad03037..991def878881 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -122,9 +122,8 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
void iwl_down(struct iwl_priv *priv);
void iwl_cancel_deferred_work(struct iwl_priv *priv);
void iwlagn_prepare_restart(struct iwl_priv *priv);
-int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb);
bool iwl_check_for_ct_kill(struct iwl_priv *priv);
@@ -216,11 +215,9 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
-int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
static inline u32 iwl_tx_status_to_mac80211(u32 status)
{
@@ -277,9 +274,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
/* bt coex */
void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
-int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
@@ -332,8 +326,7 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct iwl_link_quality_cmd *lq, u8 flags, bool init);
-int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct ieee80211_sta *sta);
@@ -480,7 +473,7 @@ do { \
} while (0)
#endif /* CONFIG_IWLWIFI_DEBUG */
-extern const char *const iwl_dvm_cmd_strings[REPLY_MAX];
+extern const char *const iwl_dvm_cmd_strings[REPLY_MAX + 1];
static inline const char *iwl_dvm_get_cmd_string(u8 cmd)
{
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 0ffb6ff1a255..b15e44f8d1bd 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -310,12 +310,8 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
pos += scnprintf(buf + pos, buf_size - pos,
"NVM version: 0x%x\n", nvm_ver);
for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
- pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
- hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
- buf_size - pos, 0);
- pos += strlen(buf + pos);
- if (buf_size - pos > 0)
- buf[pos++] = '\n';
+ pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n",
+ ofs, ptr + ofs);
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 3811878ab9cd..0ba3e56d6015 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -669,6 +669,8 @@ struct iwl_priv {
/* ieee device used by generic ieee processing code */
struct ieee80211_hw *hw;
+ struct napi_struct *napi;
+
struct list_head calib_results;
struct workqueue_struct *workqueue;
@@ -678,9 +680,8 @@ struct iwl_priv {
enum ieee80211_band band;
u8 valid_contexts;
- int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+ void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb);
struct iwl_notif_wait_data notif_wait;
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 1d2223df5cb0..ab45819c1fbb 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -659,9 +659,8 @@ static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
return need_update;
}
-int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif *coex = (void *)pkt->data;
@@ -669,7 +668,7 @@ int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
/* bt coex disabled */
- return 0;
+ return;
}
IWL_DEBUG_COEX(priv, "BT Coex notification:\n");
@@ -714,7 +713,6 @@ int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
/* FIXME: based on notification, adjust the prio_boost */
priv->bt_ci_compliance = coex->bt_ci_compliance;
- return 0;
}
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 7acaa266b704..453f7c315ab5 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -250,12 +250,24 @@ static int __iwl_up(struct iwl_priv *priv)
}
}
+ ret = iwl_trans_start_hw(priv->trans);
+ if (ret) {
+ IWL_ERR(priv, "Failed to start HW: %d\n", ret);
+ goto error;
+ }
+
ret = iwl_run_init_ucode(priv);
if (ret) {
IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
goto error;
}
+ ret = iwl_trans_start_hw(priv->trans);
+ if (ret) {
+ IWL_ERR(priv, "Failed to start HW: %d\n", ret);
+ goto error;
+ }
+
ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
if (ret) {
IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
@@ -432,7 +444,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
u32 error_id;
} err_info;
struct iwl_notification_wait status_wait;
- static const u8 status_cmd[] = {
+ static const u16 status_cmd[] = {
REPLY_WOWLAN_GET_STATUS,
};
struct iwlagn_wowlan_status status_data = {};
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 234e30f498b2..e7616f0ee6e8 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -2029,17 +2029,6 @@ static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
return false;
}
-static void iwl_napi_add(struct iwl_op_mode *op_mode,
- struct napi_struct *napi,
- struct net_device *napi_dev,
- int (*poll)(struct napi_struct *, int),
- int weight)
-{
- struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
-
- ieee80211_napi_add(priv->hw, napi, napi_dev, poll, weight);
-}
-
static const struct iwl_op_mode_ops iwl_dvm_ops = {
.start = iwl_op_mode_dvm_start,
.stop = iwl_op_mode_dvm_stop,
@@ -2052,7 +2041,6 @@ static const struct iwl_op_mode_ops iwl_dvm_ops = {
.cmd_queue_full = iwl_cmd_queue_full,
.nic_config = iwl_nic_config,
.wimax_active = iwl_wimax_active,
- .napi_add = iwl_napi_add,
};
/*****************************************************************************
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 3bd7c86e90d9..cef921c1a623 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -1416,11 +1416,11 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
/*
* Try to switch to new modulation mode from legacy
*/
-static int rs_move_legacy_other(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta,
- int index)
+static void rs_move_legacy_other(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta,
+ int index)
{
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct iwl_scale_tbl_info *search_tbl =
@@ -1575,7 +1575,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
}
search_tbl->lq_type = LQ_NONE;
- return 0;
+ return;
out:
lq_sta->search_better_tbl = 1;
@@ -1584,17 +1584,15 @@ out:
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
- return 0;
-
}
/*
* Try to switch to new modulation mode from SISO
*/
-static int rs_move_siso_to_other(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta, int index)
+static void rs_move_siso_to_other(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int index)
{
u8 is_green = lq_sta->is_green;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1747,7 +1745,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
break;
}
search_tbl->lq_type = LQ_NONE;
- return 0;
+ return;
out:
lq_sta->search_better_tbl = 1;
@@ -1756,17 +1754,15 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
-
- return 0;
}
/*
* Try to switch to new modulation mode from MIMO2
*/
-static int rs_move_mimo2_to_other(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta, int index)
+static void rs_move_mimo2_to_other(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int index)
{
s8 is_green = lq_sta->is_green;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1917,7 +1913,7 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
break;
}
search_tbl->lq_type = LQ_NONE;
- return 0;
+ return;
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
@@ -1926,17 +1922,15 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
- return 0;
-
}
/*
* Try to switch to new modulation mode from MIMO3
*/
-static int rs_move_mimo3_to_other(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta, int index)
+static void rs_move_mimo3_to_other(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int index)
{
s8 is_green = lq_sta->is_green;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -2093,7 +2087,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
break;
}
search_tbl->lq_type = LQ_NONE;
- return 0;
+ return;
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
@@ -2101,9 +2095,6 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
-
- return 0;
-
}
/*
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index debec963c610..4a45b0b594c7 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -39,7 +39,7 @@
#define IWL_CMD_ENTRY(x) [x] = #x
-const char *const iwl_dvm_cmd_strings[REPLY_MAX] = {
+const char *const iwl_dvm_cmd_strings[REPLY_MAX + 1] = {
IWL_CMD_ENTRY(REPLY_ALIVE),
IWL_CMD_ENTRY(REPLY_ERROR),
IWL_CMD_ENTRY(REPLY_ECHO),
@@ -123,9 +123,8 @@ const char *const iwl_dvm_cmd_strings[REPLY_MAX] = {
*
******************************************************************************/
-static int iwlagn_rx_reply_error(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_error(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_error_resp *err_resp = (void *)pkt->data;
@@ -136,11 +135,9 @@ static int iwlagn_rx_reply_error(struct iwl_priv *priv,
err_resp->cmd_id,
le16_to_cpu(err_resp->bad_cmd_seq_num),
le32_to_cpu(err_resp->error_info));
- return 0;
}
-static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_csa_notification *csa = (void *)pkt->data;
@@ -152,7 +149,7 @@ static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
- return 0;
+ return;
if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
rxon->channel = csa->channel;
@@ -165,13 +162,11 @@ static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
le16_to_cpu(csa->channel));
iwl_chswitch_done(priv, false);
}
- return 0;
}
-static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_spectrum_notification *report = (void *)pkt->data;
@@ -179,17 +174,15 @@ static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
if (!report->state) {
IWL_DEBUG_11H(priv,
"Spectrum Measure Notification: Start\n");
- return 0;
+ return;
}
memcpy(&priv->measure_report, report, sizeof(*report));
priv->measurement_status |= MEASUREMENT_READY;
- return 0;
}
-static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -197,24 +190,20 @@ static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
sleep->pm_sleep_mode, sleep->pm_wakeup_src);
#endif
- return 0;
}
-static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 __maybe_unused len = iwl_rx_packet_len(pkt);
IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
"notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len);
iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len);
- return 0;
}
-static int iwlagn_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwlagn_beacon_notif *beacon = (void *)pkt->data;
@@ -232,8 +221,6 @@ static int iwlagn_rx_beacon_notif(struct iwl_priv *priv,
#endif
priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-
- return 0;
}
/**
@@ -448,9 +435,8 @@ iwlagn_accumulative_statistics(struct iwl_priv *priv,
}
#endif
-static int iwlagn_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_statistics(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
unsigned long stamp = jiffies;
const int reg_recalib_period = 60;
@@ -505,7 +491,7 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
len, sizeof(struct iwl_bt_notif_statistics),
sizeof(struct iwl_notif_statistics));
spin_unlock(&priv->statistics.lock);
- return 0;
+ return;
}
change = common->temperature != priv->statistics.common.temperature ||
@@ -550,13 +536,10 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
priv->lib->temperature(priv);
spin_unlock(&priv->statistics.lock);
-
- return 0;
}
-static int iwlagn_rx_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_statistics(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_notif_statistics *stats = (void *)pkt->data;
@@ -572,15 +555,14 @@ static int iwlagn_rx_reply_statistics(struct iwl_priv *priv,
#endif
IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
}
- iwlagn_rx_statistics(priv, rxb, cmd);
- return 0;
+
+ iwlagn_rx_statistics(priv, rxb);
}
/* Handle notification from uCode that card's power state is changing
* due to software, hardware, or critical temperature RFKILL */
-static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_card_state_notif(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
@@ -627,12 +609,10 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
test_bit(STATUS_RF_KILL_HW, &priv->status)))
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
test_bit(STATUS_RF_KILL_HW, &priv->status));
- return 0;
}
-static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -649,14 +629,12 @@ static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
if (!test_bit(STATUS_SCANNING, &priv->status))
iwl_init_sensitivity(priv);
}
- return 0;
}
/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
* This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
-static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -664,7 +642,6 @@ static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
priv->ampdu_ref++;
memcpy(&priv->last_phy_res, pkt->data,
sizeof(struct iwl_rx_phy_res));
- return 0;
}
/*
@@ -786,7 +763,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
- ieee80211_rx(priv->hw, skb);
+ ieee80211_rx_napi(priv->hw, skb, priv->napi);
}
static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
@@ -890,9 +867,8 @@ static int iwlagn_calc_rssi(struct iwl_priv *priv,
}
/* Called for REPLY_RX_MPDU_CMD */
-static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_rx(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct ieee80211_hdr *header;
struct ieee80211_rx_status rx_status = {};
@@ -906,7 +882,7 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
if (!priv->last_phy_res_valid) {
IWL_ERR(priv, "MPDU frame without cached PHY data\n");
- return 0;
+ return;
}
phy_res = &priv->last_phy_res;
amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data;
@@ -919,14 +895,14 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n",
phy_res->cfg_phy_cnt);
- return 0;
+ return;
}
if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
!(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
le32_to_cpu(rx_pkt_status));
- return 0;
+ return;
}
/* This will be used in several places later */
@@ -998,12 +974,10 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
rxb, &rx_status);
- return 0;
}
-static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwlagn_rx_noa_notification(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_wipan_noa_data *new_data, *old_data;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -1041,8 +1015,6 @@ static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
if (old_data)
kfree_rcu(old_data, rcu_head);
-
- return 0;
}
/**
@@ -1053,8 +1025,7 @@ static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
*/
void iwl_setup_rx_handlers(struct iwl_priv *priv)
{
- int (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+ void (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
handlers = priv->rx_handlers;
@@ -1102,12 +1073,11 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
iwlagn_bt_rx_handler_setup(priv);
}
-int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
- int err = 0;
/*
* Do the notification wait before RX handlers so
@@ -1121,12 +1091,11 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
* rx_handlers table. See iwl_setup_rx_handlers() */
if (priv->rx_handlers[pkt->hdr.cmd]) {
priv->rx_handlers_stats[pkt->hdr.cmd]++;
- err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
+ priv->rx_handlers[pkt->hdr.cmd](priv, rxb);
} else {
/* No handling needed */
IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
iwl_dvm_get_cmd_string(pkt->hdr.cmd),
pkt->hdr.cmd);
}
- return err;
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index ed50de6362ed..85ceceb34fcc 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -123,7 +124,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
__le32 old_filter = send->filter_flags;
u8 old_dev_type = send->dev_type;
int ret;
- static const u8 deactivate_cmd[] = {
+ static const u16 deactivate_cmd[] = {
REPLY_WIPAN_DEACTIVATION_COMPLETE
};
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index 43bef901e8f9..648159495bbc 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -247,9 +247,8 @@ void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
}
/* Service response to REPLY_SCAN_CMD (0x80) */
-static int iwl_rx_reply_scan(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwl_rx_reply_scan(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -257,13 +256,11 @@ static int iwl_rx_reply_scan(struct iwl_priv *priv,
IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
#endif
- return 0;
}
/* Service SCAN_START_NOTIFICATION (0x82) */
-static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scanstart_notification *notif = (void *)pkt->data;
@@ -277,14 +274,11 @@ static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
le32_to_cpu(notif->tsf_high),
le32_to_cpu(notif->tsf_low),
notif->status, notif->beacon_timer);
-
- return 0;
}
/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
-static int iwl_rx_scan_results_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -303,13 +297,11 @@ static int iwl_rx_scan_results_notif(struct iwl_priv *priv,
le32_to_cpu(notif->statistics[0]),
le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
#endif
- return 0;
}
/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
-static int iwl_rx_scan_complete_notif(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scancomplete_notification *scan_notif = (void *)pkt->data;
@@ -356,7 +348,6 @@ static int iwl_rx_scan_complete_notif(struct iwl_priv *priv,
queue_work(priv->workqueue,
&priv->bt_traffic_change_work);
}
- return 0;
}
void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index 6ec86adbe4a1..0fa67d3b7235 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -60,41 +60,28 @@ static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
return 0;
}
-static int iwl_process_add_sta_resp(struct iwl_priv *priv,
- struct iwl_addsta_cmd *addsta,
- struct iwl_rx_packet *pkt)
+static void iwl_process_add_sta_resp(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt)
{
struct iwl_add_sta_resp *add_sta_resp = (void *)pkt->data;
- u8 sta_id = addsta->sta.sta_id;
- int ret = -EIO;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
- pkt->hdr.flags);
- return ret;
- }
-
- IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
- sta_id);
+ IWL_DEBUG_INFO(priv, "Processing response for adding station\n");
spin_lock_bh(&priv->sta_lock);
switch (add_sta_resp->status) {
case ADD_STA_SUCCESS_MSK:
IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
- ret = iwl_sta_ucode_activate(priv, sta_id);
break;
case ADD_STA_NO_ROOM_IN_TABLE:
- IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
- sta_id);
+ IWL_ERR(priv, "Adding station failed, no room in table.\n");
break;
case ADD_STA_NO_BLOCK_ACK_RESOURCE:
- IWL_ERR(priv, "Adding station %d failed, no block ack "
- "resource.\n", sta_id);
+ IWL_ERR(priv,
+ "Adding station failed, no block ack resource.\n");
break;
case ADD_STA_MODIFY_NON_EXIST_STA:
- IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
- sta_id);
+ IWL_ERR(priv, "Attempting to modify non-existing station\n");
break;
default:
IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
@@ -102,37 +89,14 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
break;
}
- IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
- priv->stations[sta_id].sta.mode ==
- STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
- sta_id, priv->stations[sta_id].sta.sta.addr);
-
- /*
- * XXX: The MAC address in the command buffer is often changed from
- * the original sent to the device. That is, the MAC address
- * written to the command buffer often is not the same MAC address
- * read from the command buffer when the command returns. This
- * issue has not yet been resolved and this debugging is left to
- * observe the problem.
- */
- IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
- priv->stations[sta_id].sta.mode ==
- STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
- addsta->sta.addr);
spin_unlock_bh(&priv->sta_lock);
-
- return ret;
}
-int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- if (!cmd)
- return 0;
-
- return iwl_process_add_sta_resp(priv, (void *)cmd->payload, pkt);
+ iwl_process_add_sta_resp(priv, pkt);
}
int iwl_send_add_sta(struct iwl_priv *priv,
@@ -146,6 +110,8 @@ int iwl_send_add_sta(struct iwl_priv *priv,
.len = { sizeof(*sta), },
};
u8 sta_id __maybe_unused = sta->sta.sta_id;
+ struct iwl_rx_packet *pkt;
+ struct iwl_add_sta_resp *add_sta_resp;
IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
@@ -159,16 +125,22 @@ int iwl_send_add_sta(struct iwl_priv *priv,
if (ret || (flags & CMD_ASYNC))
return ret;
- /*else the command was successfully sent in SYNC mode, need to free
- * the reply page */
- iwl_free_resp(&cmd);
+ pkt = cmd.resp_pkt;
+ add_sta_resp = (void *)pkt->data;
- if (cmd.handler_status)
- IWL_ERR(priv, "%s - error in the CMD response %d\n", __func__,
- cmd.handler_status);
+ /* debug messages are printed in the handler */
+ if (add_sta_resp->status == ADD_STA_SUCCESS_MSK) {
+ spin_lock_bh(&priv->sta_lock);
+ ret = iwl_sta_ucode_activate(priv, sta_id);
+ spin_unlock_bh(&priv->sta_lock);
+ } else {
+ ret = -EIO;
+ }
- return cmd.handler_status;
+ iwl_free_resp(&cmd);
+
+ return ret;
}
bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
@@ -452,6 +424,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
struct iwl_rx_packet *pkt;
int ret;
struct iwl_rem_sta_cmd rm_sta_cmd;
+ struct iwl_rem_sta_resp *rem_sta_resp;
struct iwl_host_cmd cmd = {
.id = REPLY_REMOVE_STA,
@@ -471,29 +444,23 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
return ret;
pkt = cmd.resp_pkt;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
- pkt->hdr.flags);
- ret = -EIO;
- }
+ rem_sta_resp = (void *)pkt->data;
- if (!ret) {
- struct iwl_rem_sta_resp *rem_sta_resp = (void *)pkt->data;
- switch (rem_sta_resp->status) {
- case REM_STA_SUCCESS_MSK:
- if (!temporary) {
- spin_lock_bh(&priv->sta_lock);
- iwl_sta_ucode_deactivate(priv, sta_id);
- spin_unlock_bh(&priv->sta_lock);
- }
- IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
- break;
- default:
- ret = -EIO;
- IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
- break;
+ switch (rem_sta_resp->status) {
+ case REM_STA_SUCCESS_MSK:
+ if (!temporary) {
+ spin_lock_bh(&priv->sta_lock);
+ iwl_sta_ucode_deactivate(priv, sta_id);
+ spin_unlock_bh(&priv->sta_lock);
}
+ IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
+ break;
}
+
iwl_free_resp(&cmd);
return ret;
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 275df12a6045..bddd19769035 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -1128,8 +1128,7 @@ static void iwl_check_abort_status(struct iwl_priv *priv,
}
}
-int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -1273,8 +1272,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
skb = __skb_dequeue(&skbs);
ieee80211_tx_status(priv->hw, skb);
}
-
- return 0;
}
/**
@@ -1283,9 +1280,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
* Handles block-acknowledge notification from device, which reports success
* of frames sent via aggregation.
*/
-int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
@@ -1306,7 +1302,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
if (scd_flow >= priv->cfg->base_params->num_of_queues) {
IWL_ERR(priv,
"BUG_ON scd_flow is bigger than number of queues\n");
- return 0;
+ return;
}
sta_id = ba_resp->sta_id;
@@ -1319,7 +1315,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
if (unlikely(ba_resp->bitmap))
IWL_ERR(priv, "Received BA when not expected\n");
spin_unlock_bh(&priv->sta_lock);
- return 0;
+ return;
}
if (unlikely(scd_flow != agg->txq_id)) {
@@ -1333,7 +1329,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
"Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
scd_flow, sta_id, tid, agg->txq_id);
spin_unlock_bh(&priv->sta_lock);
- return 0;
+ return;
}
__skb_queue_head_init(&reclaimed_skbs);
@@ -1413,6 +1409,4 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
skb = __skb_dequeue(&reclaimed_skbs);
ieee80211_tx_status(priv->hw, skb);
}
-
- return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 5244e43bfafb..931a8e4269ef 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -3,6 +3,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -327,7 +328,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
const struct fw_img *fw;
int ret;
enum iwl_ucode_type old_type;
- static const u8 alive_cmd[] = { REPLY_ALIVE };
+ static const u16 alive_cmd[] = { REPLY_ALIVE };
fw = iwl_get_ucode_image(priv, ucode_type);
if (WARN_ON(!fw))
@@ -406,7 +407,7 @@ static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
int iwl_run_init_ucode(struct iwl_priv *priv)
{
struct iwl_notification_wait calib_wait;
- static const u8 calib_complete[] = {
+ static const u16 calib_complete[] = {
CALIBRATION_RES_NOTIFICATION,
CALIBRATION_COMPLETE_NOTIFICATION
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index cc35f796d406..6951aba620eb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -69,14 +69,14 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX 15
+#define IWL7260_UCODE_API_MAX 17
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 12
#define IWL3165_UCODE_API_OK 13
/* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN 10
+#define IWL7260_UCODE_API_MIN 12
#define IWL3165_UCODE_API_MIN 13
/* NVM versions */
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index 72040cd0b979..197abe43ddc5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -69,13 +69,13 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 15
+#define IWL8000_UCODE_API_MAX 17
/* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 12
/* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN 10
+#define IWL8000_UCODE_API_MIN 12
/* NVM versions */
#define IWL8000_NVM_VERSION 0x0a1d
@@ -97,8 +97,9 @@
#define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B"
#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C"
-/* Max SDIO RX aggregation size of the ADDBA request/response */
-#define MAX_RX_AGG_SIZE_8260_SDIO 28
+/* Max SDIO RX/TX aggregation sizes of the ADDBA request/response */
+#define MAX_RX_AGG_SIZE_8260_SDIO 21
+#define MAX_TX_AGG_SIZE_8260_SDIO 40
/* Max A-MPDU exponent for HT and VHT */
#define MAX_HT_AMPDU_EXPONENT_8260_SDIO IEEE80211_HT_MAX_AMPDU_32K
@@ -154,6 +155,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \
.d0i3 = true, \
+ .features = NETIF_F_RXCSUM, \
.non_shared_ant = ANT_A, \
.dccm_offset = IWL8260_DCCM_OFFSET, \
.dccm_len = IWL8260_DCCM_LEN, \
@@ -203,6 +205,7 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
+ .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
.disable_dummy_notification = true,
.max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
@@ -216,6 +219,7 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
+ .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
.bt_shared_single_ant = true,
.disable_dummy_notification = true,
.max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 08c14afeb148..939fa229c038 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -297,6 +297,7 @@ struct iwl_pwr_tx_backoff {
* mode set
* @d0i3: device uses d0i3 instead of d3
* @nvm_hw_section_num: the ID of the HW NVM section
+ * @features: hw features, any combination of feature_whitelist
* @pwr_tx_backoffs: translation table between power limits and backoffs
* @max_rx_agg_size: max RX aggregation size of the ADDBA request/response
* @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
@@ -348,6 +349,7 @@ struct iwl_cfg {
bool no_power_up_nic_in_init;
const char *default_nvm_file_B_step;
const char *default_nvm_file_C_step;
+ netdev_features_t features;
unsigned int max_rx_agg_size;
bool disable_dummy_notification;
unsigned int max_tx_agg_size;
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index faa17f2e352a..543abeaffcf0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -200,6 +200,7 @@
#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
+#define CSR_INT_BIT_PAGING (1 << 24) /* SDIO PAGING */
#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses */
@@ -210,6 +211,7 @@
CSR_INT_BIT_HW_ERR | \
CSR_INT_BIT_FH_TX | \
CSR_INT_BIT_SW_ERR | \
+ CSR_INT_BIT_PAGING | \
CSR_INT_BIT_RF_KILL | \
CSR_INT_BIT_SW_RX | \
CSR_INT_BIT_WAKEUP | \
@@ -422,6 +424,7 @@ enum {
/* DRAM INT TABLE */
#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
+#define CSR_DRAM_INIT_TBL_WRITE_POINTER (1 << 28)
#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h
index 04e6649340b8..71a78cede9b0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h
@@ -35,8 +35,8 @@
TRACE_EVENT(iwlwifi_dev_tx_data,
TP_PROTO(const struct device *dev,
struct sk_buff *skb,
- void *data, size_t data_len),
- TP_ARGS(dev, skb, data, data_len),
+ u8 hdr_len, size_t data_len),
+ TP_ARGS(dev, skb, hdr_len, data_len),
TP_STRUCT__entry(
DEV_ENTRY
@@ -45,7 +45,8 @@ TRACE_EVENT(iwlwifi_dev_tx_data,
TP_fast_assign(
DEV_ASSIGN;
if (iwl_trace_data(skb))
- memcpy(__get_dynamic_array(data), data, data_len);
+ skb_copy_bits(skb, hdr_len,
+ __get_dynamic_array(data), data_len);
),
TP_printk("[%s] TX frame data", __get_str(dev))
);
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
index 948ce0802fa7..eb4b99a1c8cd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
@@ -36,7 +36,7 @@
TRACE_EVENT(iwlwifi_dev_hcmd,
TP_PROTO(const struct device *dev,
struct iwl_host_cmd *cmd, u16 total_size,
- struct iwl_cmd_header *hdr),
+ struct iwl_cmd_header_wide *hdr),
TP_ARGS(dev, cmd, total_size, hdr),
TP_STRUCT__entry(
DEV_ENTRY
@@ -44,11 +44,14 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
__field(u32, flags)
),
TP_fast_assign(
- int i, offset = sizeof(*hdr);
+ int i, offset = sizeof(struct iwl_cmd_header);
+
+ if (hdr->group_id)
+ offset = sizeof(struct iwl_cmd_header_wide);
DEV_ASSIGN;
__entry->flags = cmd->flags;
- memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
+ memcpy(__get_dynamic_array(hcmd), hdr, offset);
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
if (!cmd->len[i])
@@ -58,8 +61,9 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
offset += cmd->len[i];
}
),
- TP_printk("[%s] hcmd %#.2x (%ssync)",
- __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[0],
+ TP_printk("[%s] hcmd %#.2x.%#.2x (%ssync)",
+ __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[1],
+ ((u8 *)__get_dynamic_array(hcmd))[0],
__entry->flags & CMD_ASYNC ? "a" : "")
);
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 6685259927f8..a86aa5bcee7d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -372,6 +372,30 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
return 0;
}
+static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
+ const u32 len)
+{
+ struct iwl_fw_gscan_capabilities *fw_capa = (void *)data;
+ struct iwl_gscan_capabilities *capa = &fw->gscan_capa;
+
+ if (len < sizeof(*fw_capa))
+ return -EINVAL;
+
+ capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size);
+ capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets);
+ capa->max_ap_cache_per_scan =
+ le32_to_cpu(fw_capa->max_ap_cache_per_scan);
+ capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size);
+ capa->max_scan_reporting_threshold =
+ le32_to_cpu(fw_capa->max_scan_reporting_threshold);
+ capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps);
+ capa->max_significant_change_aps =
+ le32_to_cpu(fw_capa->max_significant_change_aps);
+ capa->max_bssid_history_entries =
+ le32_to_cpu(fw_capa->max_bssid_history_entries);
+ return 0;
+}
+
/*
* Gets uCode section from tlv.
*/
@@ -573,13 +597,15 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
size_t len = ucode_raw->size;
const u8 *data;
u32 tlv_len;
+ u32 usniffer_img;
enum iwl_ucode_tlv_type tlv_type;
const u8 *tlv_data;
char buildstr[25];
- u32 build;
+ u32 build, paging_mem_size;
int num_of_cpus;
bool usniffer_images = false;
bool usniffer_req = false;
+ bool gscan_capa = false;
if (len < sizeof(*ucode)) {
IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -955,12 +981,46 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
IWL_UCODE_REGULAR_USNIFFER,
tlv_len);
break;
+ case IWL_UCODE_TLV_PAGING:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ paging_mem_size = le32_to_cpup((__le32 *)tlv_data);
+
+ IWL_DEBUG_FW(drv,
+ "Paging: paging enabled (size = %u bytes)\n",
+ paging_mem_size);
+
+ if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) {
+ IWL_ERR(drv,
+ "Paging: driver supports up to %lu bytes for paging image\n",
+ MAX_PAGING_IMAGE_SIZE);
+ return -EINVAL;
+ }
+
+ if (paging_mem_size & (FW_PAGING_SIZE - 1)) {
+ IWL_ERR(drv,
+ "Paging: image isn't multiple %lu\n",
+ FW_PAGING_SIZE);
+ return -EINVAL;
+ }
+
+ drv->fw.img[IWL_UCODE_REGULAR].paging_mem_size =
+ paging_mem_size;
+ usniffer_img = IWL_UCODE_REGULAR_USNIFFER;
+ drv->fw.img[usniffer_img].paging_mem_size =
+ paging_mem_size;
+ break;
case IWL_UCODE_TLV_SDIO_ADMA_ADDR:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
drv->fw.sdio_adma_addr =
le32_to_cpup((__le32 *)tlv_data);
break;
+ case IWL_UCODE_TLV_FW_GSCAN_CAPA:
+ if (iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len))
+ goto invalid_tlv_len;
+ gscan_capa = true;
+ break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
@@ -979,6 +1039,16 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL;
}
+ /*
+ * If ucode advertises that it supports GSCAN but GSCAN
+ * capabilities TLV is not present, warn and continue without GSCAN.
+ */
+ if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
+ WARN(!gscan_capa,
+ "GSCAN is supported but capabilities TLV is unavailable\n"))
+ __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
+ capa->_capa);
+
return 0;
invalid_tlv_len:
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index 21302b6f2bfd..acc3d186c5c1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -713,12 +713,12 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data,
struct ieee80211_channel *chan = &data->channels[0];
int n = 0, idx = 0;
- while (chan->band != band && idx < n_channels)
+ while (idx < n_channels && chan->band != band)
chan = &data->channels[++idx];
sband->channels = &data->channels[idx];
- while (chan->band == band && idx < n_channels) {
+ while (idx < n_channels && chan->band == band) {
chan = &data->channels[++idx];
n++;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
index e57dbd0ef2e1..af5b3201492c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
@@ -84,6 +84,8 @@
* @IWL_FW_ERROR_DUMP_MEM: chunk of memory
* @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
* Structured as &struct iwl_fw_error_dump_trigger_desc.
+ * @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as
+ * &struct iwl_fw_error_dump_rb
*/
enum iwl_fw_error_dump_type {
/* 0 is deprecated */
@@ -97,6 +99,7 @@ enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_FH_REGS = 8,
IWL_FW_ERROR_DUMP_MEM = 9,
IWL_FW_ERROR_DUMP_ERROR_INFO = 10,
+ IWL_FW_ERROR_DUMP_RB = 11,
IWL_FW_ERROR_DUMP_MAX,
};
@@ -223,6 +226,20 @@ struct iwl_fw_error_dump_mem {
};
/**
+ * struct iwl_fw_error_dump_rb - content of an Receive Buffer
+ * @index: the index of the Receive Buffer in the Rx queue
+ * @rxq: the RB's Rx queue
+ * @reserved:
+ * @data: the content of the Receive Buffer
+ */
+struct iwl_fw_error_dump_rb {
+ __le32 index;
+ __le32 rxq;
+ __le32 reserved;
+ u8 data[];
+};
+
+/**
* iwl_fw_error_next_data - advance fw error dump data pointer
* @data: previous data block
* Returns: next data block
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index a9b5ae4ebec0..84653e3d02ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -132,12 +132,14 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_API_CHANGES_SET = 29,
IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30,
IWL_UCODE_TLV_N_SCAN_CHANNELS = 31,
+ IWL_UCODE_TLV_PAGING = 32,
IWL_UCODE_TLV_SEC_RT_USNIFFER = 34,
IWL_UCODE_TLV_SDIO_ADMA_ADDR = 35,
IWL_UCODE_TLV_FW_VERSION = 36,
IWL_UCODE_TLV_FW_DBG_DEST = 38,
IWL_UCODE_TLV_FW_DBG_CONF = 39,
IWL_UCODE_TLV_FW_DBG_TRIGGER = 40,
+ IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
};
struct iwl_ucode_tlv {
@@ -247,9 +249,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
* @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
* IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
* @IWL_UCODE_TLV_API_TX_POWER_DEV: new API for tx power.
- * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
- * regardless of the band or the number of the probes. FW will calculate
- * the actual dwell time.
+ * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
* @IWL_UCODE_TLV_API_SCD_CFG: This firmware can configure the scheduler
* through the dedicated host command.
* @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
@@ -259,6 +259,8 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
* @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
* @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
* instead of 3.
+ * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size
+ * (command version 3) that supports per-chain limits
*/
enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_BT_COEX_SPLIT = (__force iwl_ucode_tlv_api_t)3,
@@ -266,7 +268,7 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9,
IWL_UCODE_TLV_API_HDC_PHASE_0 = (__force iwl_ucode_tlv_api_t)10,
IWL_UCODE_TLV_API_TX_POWER_DEV = (__force iwl_ucode_tlv_api_t)11,
- IWL_UCODE_TLV_API_BASIC_DWELL = (__force iwl_ucode_tlv_api_t)13,
+ IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14,
IWL_UCODE_TLV_API_SCD_CFG = (__force iwl_ucode_tlv_api_t)15,
IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = (__force iwl_ucode_tlv_api_t)16,
IWL_UCODE_TLV_API_ASYNC_DTM = (__force iwl_ucode_tlv_api_t)17,
@@ -274,6 +276,7 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_STATS_V10 = (__force iwl_ucode_tlv_api_t)19,
IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24,
+ IWL_UCODE_TLV_API_TX_POWER_CHAIN = (__force iwl_ucode_tlv_api_t)27,
};
typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
@@ -284,6 +287,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
* @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
* @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
+ * @IWL_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM)
* @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
* @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
* tx power value into TPC Report action frame and Link Measurement Report
@@ -298,6 +302,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
* @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
* @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
+ * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
* @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
* @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
* @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
@@ -305,12 +310,14 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
* IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
* is supported.
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
+ * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
*/
enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1,
IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2,
IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3,
+ IWL_UCODE_TLV_CAPA_TOF_SUPPORT = (__force iwl_ucode_tlv_capa_t)5,
IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6,
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8,
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9,
@@ -320,10 +327,12 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13,
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18,
IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19,
+ IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22,
IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28,
IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29,
IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
+ IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31,
};
/* The default calibrate table size if not specified by firmware file */
@@ -341,8 +350,9 @@ enum iwl_ucode_tlv_capa {
* For 16.0 uCode and above, there is no differentiation between sections,
* just an offset to the HW address.
*/
-#define IWL_UCODE_SECTION_MAX 12
+#define IWL_UCODE_SECTION_MAX 16
#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
+#define PAGING_SEPARATOR_SECTION 0xAAAABBBB
/* uCode version contains 4 values: Major/Minor/API/Serial */
#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
@@ -412,6 +422,12 @@ enum iwl_fw_dbg_reg_operator {
PRPH_ASSIGN,
PRPH_SETBIT,
PRPH_CLEARBIT,
+
+ INDIRECT_ASSIGN,
+ INDIRECT_SETBIT,
+ INDIRECT_CLEARBIT,
+
+ PRPH_BLOCKBIT,
};
/**
@@ -485,10 +501,13 @@ struct iwl_fw_dbg_conf_hcmd {
*
* @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism
* @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data
+ * @IWL_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to
+ * collect only monitor data
*/
enum iwl_fw_dbg_trigger_mode {
IWL_FW_DBG_TRIGGER_START = BIT(0),
IWL_FW_DBG_TRIGGER_STOP = BIT(1),
+ IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2),
};
/**
@@ -718,4 +737,28 @@ struct iwl_fw_dbg_conf_tlv {
struct iwl_fw_dbg_conf_hcmd hcmd;
} __packed;
+/**
+ * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW
+ * @max_scan_cache_size: total space allocated for scan results (in bytes).
+ * @max_scan_buckets: maximum number of channel buckets.
+ * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
+ * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
+ * @max_scan_reporting_threshold: max possible report threshold. in percentage.
+ * @max_hotlist_aps: maximum number of entries for hotlist APs.
+ * @max_significant_change_aps: maximum number of entries for significant
+ * change APs.
+ * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
+ * hold.
+ */
+struct iwl_fw_gscan_capabilities {
+ __le32 max_scan_cache_size;
+ __le32 max_scan_buckets;
+ __le32 max_ap_cache_per_scan;
+ __le32 max_rssi_sample_size;
+ __le32 max_scan_reporting_threshold;
+ __le32 max_hotlist_aps;
+ __le32 max_significant_change_aps;
+ __le32 max_bssid_history_entries;
+} __packed;
+
#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 3e3c9d8b3c37..45e732150d28 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -133,6 +133,7 @@ struct fw_desc {
struct fw_img {
struct fw_desc sec[IWL_UCODE_SECTION_MAX];
bool is_dual_cpus;
+ u32 paging_mem_size;
};
struct iwl_sf_region {
@@ -140,6 +141,48 @@ struct iwl_sf_region {
u32 size;
};
+/*
+ * Block paging calculations
+ */
+#define PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */
+#define FW_PAGING_SIZE BIT(PAGE_2_EXP_SIZE) /* page size is 4KB */
+#define PAGE_PER_GROUP_2_EXP_SIZE 3
+/* 8 pages per group */
+#define NUM_OF_PAGE_PER_GROUP BIT(PAGE_PER_GROUP_2_EXP_SIZE)
+/* don't change, support only 32KB size */
+#define PAGING_BLOCK_SIZE (NUM_OF_PAGE_PER_GROUP * FW_PAGING_SIZE)
+/* 32K == 2^15 */
+#define BLOCK_2_EXP_SIZE (PAGE_2_EXP_SIZE + PAGE_PER_GROUP_2_EXP_SIZE)
+
+/*
+ * Image paging calculations
+ */
+#define BLOCK_PER_IMAGE_2_EXP_SIZE 5
+/* 2^5 == 32 blocks per image */
+#define NUM_OF_BLOCK_PER_IMAGE BIT(BLOCK_PER_IMAGE_2_EXP_SIZE)
+/* maximum image size 1024KB */
+#define MAX_PAGING_IMAGE_SIZE (NUM_OF_BLOCK_PER_IMAGE * PAGING_BLOCK_SIZE)
+
+/* Virtual address signature */
+#define PAGING_ADDR_SIG 0xAA000000
+
+#define PAGING_CMD_IS_SECURED BIT(9)
+#define PAGING_CMD_IS_ENABLED BIT(8)
+#define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS 0
+#define PAGING_TLV_SECURE_MASK 1
+
+/**
+ * struct iwl_fw_paging
+ * @fw_paging_phys: page phy pointer
+ * @fw_paging_block: pointer to the allocated block
+ * @fw_paging_size: page size
+ */
+struct iwl_fw_paging {
+ dma_addr_t fw_paging_phys;
+ struct page *fw_paging_block;
+ u32 fw_paging_size;
+};
+
/**
* struct iwl_fw_cscheme_list - a cipher scheme list
* @size: a number of entries
@@ -151,6 +194,30 @@ struct iwl_fw_cscheme_list {
} __packed;
/**
+ * struct iwl_gscan_capabilities - gscan capabilities supported by FW
+ * @max_scan_cache_size: total space allocated for scan results (in bytes).
+ * @max_scan_buckets: maximum number of channel buckets.
+ * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
+ * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
+ * @max_scan_reporting_threshold: max possible report threshold. in percentage.
+ * @max_hotlist_aps: maximum number of entries for hotlist APs.
+ * @max_significant_change_aps: maximum number of entries for significant
+ * change APs.
+ * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
+ * hold.
+ */
+struct iwl_gscan_capabilities {
+ u32 max_scan_cache_size;
+ u32 max_scan_buckets;
+ u32 max_ap_cache_per_scan;
+ u32 max_rssi_sample_size;
+ u32 max_scan_reporting_threshold;
+ u32 max_hotlist_aps;
+ u32 max_significant_change_aps;
+ u32 max_bssid_history_entries;
+};
+
+/**
* struct iwl_fw - variables associated with the firmware
*
* @ucode_ver: ucode version from the ucode file
@@ -208,6 +275,7 @@ struct iwl_fw {
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
u8 dbg_dest_reg_num;
+ struct iwl_gscan_capabilities gscan_capa;
};
static inline const char *get_fw_dbg_mode_string(int mode)
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
index b5bc959b1dfe..6caf2affbbb5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -98,7 +99,8 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
continue;
for (i = 0; i < w->n_cmds; i++) {
- if (w->cmds[i] == pkt->hdr.cmd) {
+ if (w->cmds[i] ==
+ WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
found = true;
break;
}
@@ -136,7 +138,7 @@ IWL_EXPORT_SYMBOL(iwl_abort_notification_waits);
void
iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
struct iwl_notification_wait *wait_entry,
- const u8 *cmds, int n_cmds,
+ const u16 *cmds, int n_cmds,
bool (*fn)(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data),
void *fn_data)
@@ -147,7 +149,7 @@ iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
wait_entry->fn = fn;
wait_entry->fn_data = fn_data;
wait_entry->n_cmds = n_cmds;
- memcpy(wait_entry->cmds, cmds, n_cmds);
+ memcpy(wait_entry->cmds, cmds, n_cmds * sizeof(u16));
wait_entry->triggered = false;
wait_entry->aborted = false;
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
index 95af97a6c2cf..dbe8234521de 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -105,7 +106,7 @@ struct iwl_notification_wait {
struct iwl_rx_packet *pkt, void *data);
void *fn_data;
- u8 cmds[MAX_NOTIF_CMDS];
+ u16 cmds[MAX_NOTIF_CMDS];
u8 n_cmds;
bool triggered, aborted;
};
@@ -121,7 +122,7 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
void __acquires(wait_entry)
iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
struct iwl_notification_wait *wait_entry,
- const u8 *cmds, int n_cmds,
+ const u16 *cmds, int n_cmds,
bool (*fn)(struct iwl_notif_wait_data *notif_data,
struct iwl_rx_packet *pkt, void *data),
void *fn_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 80fefe7d7b8c..3b8e85e51002 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -540,13 +540,11 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
hw_addr = (const u8 *)(mac_override +
MAC_ADDRESS_OVERRIDE_FAMILY_8000);
- /* The byte order is little endian 16 bit, meaning 214365 */
- data->hw_addr[0] = hw_addr[1];
- data->hw_addr[1] = hw_addr[0];
- data->hw_addr[2] = hw_addr[3];
- data->hw_addr[3] = hw_addr[2];
- data->hw_addr[4] = hw_addr[5];
- data->hw_addr[5] = hw_addr[4];
+ /*
+ * Store the MAC address from MAO section.
+ * No byte swapping is required in MAO section
+ */
+ memcpy(data->hw_addr, hw_addr, ETH_ALEN);
/*
* Force the use of the OTP MAC address in case of reserved MAC
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index ce1cdd7604e8..b47fe9d6b97a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -116,10 +116,6 @@ struct iwl_cfg;
* May sleep
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
* HCMD this Rx responds to. Can't sleep.
- * @napi_add: NAPI initialization. The transport is fully responsible for NAPI,
- * but the higher layers need to know about it (in particular mac80211 to
- * to able to call the right NAPI RX functions); this function is needed
- * to eventually call netif_napi_add() with higher layer involvement.
* @queue_full: notifies that a HW queue is full.
* Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
@@ -148,13 +144,8 @@ struct iwl_op_mode_ops {
const struct iwl_fw *fw,
struct dentry *dbgfs_dir);
void (*stop)(struct iwl_op_mode *op_mode);
- int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
- void (*napi_add)(struct iwl_op_mode *op_mode,
- struct napi_struct *napi,
- struct net_device *napi_dev,
- int (*poll)(struct napi_struct *, int),
- int weight);
+ void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb);
void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@@ -188,11 +179,11 @@ static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
op_mode->ops->stop(op_mode);
}
-static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb)
{
- return op_mode->ops->rx(op_mode, rxb, cmd);
+ return op_mode->ops->rx(op_mode, napi, rxb);
}
static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
@@ -260,15 +251,4 @@ static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
return op_mode->ops->exit_d0i3(op_mode);
}
-static inline void iwl_op_mode_napi_add(struct iwl_op_mode *op_mode,
- struct napi_struct *napi,
- struct net_device *napi_dev,
- int (*poll)(struct napi_struct *, int),
- int weight)
-{
- if (!op_mode->ops->napi_add)
- return;
- op_mode->ops->napi_add(op_mode, napi, napi_dev, poll, weight);
-}
-
#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 5af1c776d2d4..3ab777f79e4f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -253,6 +253,7 @@
#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
#define SCD_GP_CTRL_ENABLE_31_QUEUES BIT(0)
+#define SCD_GP_CTRL_AUTO_ACTIVE_MODE BIT(18)
/* Context Data */
#define SCD_CONTEXT_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x600)
@@ -291,6 +292,9 @@
/*********************** END TX SCHEDULER *************************************/
+/* tcp checksum offload */
+#define RX_EN_CSUM (0x00a00d88)
+
/* Oscillator clock */
#define OSC_CLK (0xa04068)
#define OSC_CLK_FORCE_CONTROL (0x8)
@@ -379,6 +383,8 @@ enum aux_misc_master1_en {
#define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800
#define RSA_ENABLE 0xA24B08
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
+#define SB_CPU_1_STATUS 0xA01E30
+#define SB_CPU_2_STATUS 0xA01E34
/* FW chicken bits */
#define LMPM_CHICK 0xA01FF8
@@ -386,4 +392,10 @@ enum {
LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0),
};
+/* FW chicken bits */
+#define LMPM_PAGE_PASS_NOTIF 0xA03824
+enum {
+ LMPM_PAGE_PASS_NOTIF_POS = BIT(20),
+};
+
#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 87a230a7f4b6..c829c505e141 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -122,6 +122,40 @@
#define INDEX_TO_SEQ(i) ((i) & 0xff)
#define SEQ_RX_FRAME cpu_to_le16(0x8000)
+/*
+ * those functions retrieve specific information from
+ * the id field in the iwl_host_cmd struct which contains
+ * the command id, the group id and the version of the command
+ * and vice versa
+*/
+static inline u8 iwl_cmd_opcode(u32 cmdid)
+{
+ return cmdid & 0xFF;
+}
+
+static inline u8 iwl_cmd_groupid(u32 cmdid)
+{
+ return ((cmdid & 0xFF00) >> 8);
+}
+
+static inline u8 iwl_cmd_version(u32 cmdid)
+{
+ return ((cmdid & 0xFF0000) >> 16);
+}
+
+static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
+{
+ return opcode + (groupid << 8) + (version << 16);
+}
+
+/* make u16 wide id out of u8 group and opcode */
+#define WIDE_ID(grp, opcode) ((grp << 8) | opcode)
+
+/* due to the conversion, this group is special; new groups
+ * should be defined in the appropriate fw-api header files
+ */
+#define IWL_ALWAYS_LONG_GROUP 1
+
/**
* struct iwl_cmd_header
*
@@ -130,7 +164,7 @@
*/
struct iwl_cmd_header {
u8 cmd; /* Command ID: REPLY_RXON, etc. */
- u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
+ u8 group_id;
/*
* The driver sets up the sequence number to values of its choosing.
* uCode does not use this value, but passes it back to the driver
@@ -154,9 +188,22 @@ struct iwl_cmd_header {
__le16 sequence;
} __packed;
-/* iwl_cmd_header flags value */
-#define IWL_CMD_FAILED_MSK 0x40
-
+/**
+ * struct iwl_cmd_header_wide
+ *
+ * This header format appears in the beginning of each command sent from the
+ * driver, and each response/notification received from uCode.
+ * this is the wide version that contains more information about the command
+ * like length, version and command type
+ */
+struct iwl_cmd_header_wide {
+ u8 cmd;
+ u8 group_id;
+ __le16 sequence;
+ __le16 length;
+ u8 reserved;
+ u8 version;
+} __packed;
#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
#define FH_RSCSR_FRAME_INVALID 0x55550000
@@ -201,6 +248,8 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
* @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
* @CMD_WAKE_UP_TRANS: The command response should wake up the trans
* (i.e. mark it as non-idle).
+ * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to
+ * check that we leave enough room for the TBs bitmap which needs 20 bits.
*/
enum CMD_MODE {
CMD_ASYNC = BIT(0),
@@ -210,6 +259,8 @@ enum CMD_MODE {
CMD_SEND_IN_IDLE = BIT(4),
CMD_MAKE_TRANS_IDLE = BIT(5),
CMD_WAKE_UP_TRANS = BIT(6),
+
+ CMD_TB_BITMAP_POS = 11,
};
#define DEF_CMD_PAYLOAD_SIZE 320
@@ -222,8 +273,18 @@ enum CMD_MODE {
* aren't fully copied and use other TFD space.
*/
struct iwl_device_cmd {
- struct iwl_cmd_header hdr; /* uCode API */
- u8 payload[DEF_CMD_PAYLOAD_SIZE];
+ union {
+ struct {
+ struct iwl_cmd_header hdr; /* uCode API */
+ u8 payload[DEF_CMD_PAYLOAD_SIZE];
+ };
+ struct {
+ struct iwl_cmd_header_wide hdr_wide;
+ u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
+ sizeof(struct iwl_cmd_header_wide) +
+ sizeof(struct iwl_cmd_header)];
+ };
+ };
} __packed;
#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
@@ -261,24 +322,22 @@ enum iwl_hcmd_dataflag {
* @resp_pkt: response packet, if %CMD_WANT_SKB was set
* @_rx_page_order: (internally used to free response packet)
* @_rx_page_addr: (internally used to free response packet)
- * @handler_status: return value of the handler of the command
- * (put in setup_rx_handlers) - valid for SYNC mode only
* @flags: can be CMD_*
* @len: array of the lengths of the chunks in data
* @dataflags: IWL_HCMD_DFL_*
- * @id: id of the host command
+ * @id: command id of the host command, for wide commands encoding the
+ * version and group as well
*/
struct iwl_host_cmd {
const void *data[IWL_MAX_CMD_TBS_PER_TFD];
struct iwl_rx_packet *resp_pkt;
unsigned long _rx_page_addr;
u32 _rx_page_order;
- int handler_status;
u32 flags;
+ u32 id;
u16 len[IWL_MAX_CMD_TBS_PER_TFD];
u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
- u8 id;
};
static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
@@ -379,6 +438,7 @@ enum iwl_trans_status {
* @bc_table_dword: set to true if the BC table expects the byte count to be
* in DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
+ * @wide_cmd_header: firmware supports wide host command header
* @command_names: array of command names, must be 256 entries
* (one for each command); for debugging only
* @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
@@ -396,6 +456,7 @@ struct iwl_trans_config {
bool rx_buf_size_8k;
bool bc_table_dword;
bool scd_set_active;
+ bool wide_cmd_header;
const char *const *command_names;
u32 sdio_adma_addr;
@@ -544,10 +605,12 @@ struct iwl_trans_ops {
u32 value);
void (*ref)(struct iwl_trans *trans);
void (*unref)(struct iwl_trans *trans);
- void (*suspend)(struct iwl_trans *trans);
+ int (*suspend)(struct iwl_trans *trans);
void (*resume)(struct iwl_trans *trans);
- struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans);
+ struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
+ struct iwl_fw_dbg_trigger_tlv
+ *trigger);
};
/**
@@ -584,6 +647,8 @@ enum iwl_d0i3_mode {
* @cfg - pointer to the configuration
* @status: a bit-mask of transport status flags
* @dev - pointer to struct device * that represents the device
+ * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
+ * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
* @hw_id: a u32 with the ID of the device / sub-device.
* Set during transport allocation.
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
@@ -603,6 +668,12 @@ enum iwl_d0i3_mode {
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug
* @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
* @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
+ * @paging_req_addr: The location were the FW will upload / download the pages
+ * from. The address is set by the opmode
+ * @paging_db: Pointer to the opmode paging data base, the pointer is set by
+ * the opmode.
+ * @paging_download_buf: Buffer used for copying all of the pages before
+ * downloading them to the FW. The buffer is allocated in the opmode
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@@ -612,6 +683,7 @@ struct iwl_trans {
unsigned long status;
struct device *dev;
+ u32 max_skb_frags;
u32 hw_rev;
u32 hw_id;
char hw_id_str[52];
@@ -639,6 +711,14 @@ struct iwl_trans {
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
u8 dbg_dest_reg_num;
+ /*
+ * Paging parameters - All of the parameters should be set by the
+ * opmode when paging is enabled
+ */
+ u32 paging_req_addr;
+ struct iwl_fw_paging *paging_db;
+ void *paging_download_buf;
+
enum iwl_d0i3_mode d0i3_mode;
bool wowlan_d0i3;
@@ -730,7 +810,8 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans)
static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
{
might_sleep();
- trans->ops->d3_suspend(trans, test);
+ if (trans->ops->d3_suspend)
+ trans->ops->d3_suspend(trans, test);
}
static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
@@ -738,6 +819,9 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
bool test)
{
might_sleep();
+ if (!trans->ops->d3_resume)
+ return 0;
+
return trans->ops->d3_resume(trans, status, test);
}
@@ -753,10 +837,12 @@ static inline void iwl_trans_unref(struct iwl_trans *trans)
trans->ops->unref(trans);
}
-static inline void iwl_trans_suspend(struct iwl_trans *trans)
+static inline int iwl_trans_suspend(struct iwl_trans *trans)
{
- if (trans->ops->suspend)
- trans->ops->suspend(trans);
+ if (!trans->ops->suspend)
+ return 0;
+
+ return trans->ops->suspend(trans);
}
static inline void iwl_trans_resume(struct iwl_trans *trans)
@@ -766,11 +852,12 @@ static inline void iwl_trans_resume(struct iwl_trans *trans)
}
static inline struct iwl_trans_dump_data *
-iwl_trans_dump_data(struct iwl_trans *trans)
+iwl_trans_dump_data(struct iwl_trans *trans,
+ struct iwl_fw_dbg_trigger_tlv *trigger)
{
if (!trans->ops->dump_data)
return NULL;
- return trans->ops->dump_data(trans);
+ return trans->ops->dump_data(trans, trigger);
}
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index 2d7c3ea3c4f8..8c2c3d13b092 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -6,6 +6,7 @@ iwlmvm-y += power.o coex.o coex_legacy.o
iwlmvm-y += tt.o offloading.o tdls.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
+iwlmvm-y += tof.o
iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index b4737e296c92..e290ac67d975 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -725,15 +725,17 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
}
}
-int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
- return iwl_mvm_rx_bt_coex_notif_old(mvm, rxb, dev_cmd);
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+ iwl_mvm_rx_bt_coex_notif_old(mvm, rxb);
+ return;
+ }
IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
@@ -748,12 +750,6 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
iwl_mvm_bt_coex_notif_handle(mvm);
-
- /*
- * This is an async handler for a notification, returning anything other
- * than 0 doesn't make sense even if HCMD failed.
- */
- return 0;
}
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -947,9 +943,8 @@ void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
iwl_mvm_bt_coex_notif_handle(mvm);
}
-int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 ant_isolation = le32_to_cpup((void *)pkt->data);
@@ -957,20 +952,23 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
u8 __maybe_unused lower_bound, upper_bound;
u8 lut;
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
- return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd);
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+ iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb);
+ return;
+ }
if (!iwl_mvm_bt_is_plcr_supported(mvm))
- return 0;
+ return;
lockdep_assert_held(&mvm->mutex);
/* Ignore updates if we are in force mode */
if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
- return 0;
+ return;
if (ant_isolation == mvm->last_ant_isol)
- return 0;
+ return;
for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
@@ -989,7 +987,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
mvm->last_ant_isol = ant_isolation;
if (mvm->last_corun_lut == lut)
- return 0;
+ return;
mvm->last_corun_lut = lut;
@@ -1000,6 +998,8 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
memcpy(&cmd.corun_lut40, antenna_coupling_ranges[lut].lut20,
sizeof(cmd.corun_lut40));
- return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0,
- sizeof(cmd), &cmd);
+ if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0,
+ sizeof(cmd), &cmd))
+ IWL_ERR(mvm,
+ "failed to send BT_COEX_UPDATE_CORUN_LUT command\n");
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
index 6ac6de2af977..61c07b05fcaa 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
@@ -1058,9 +1058,8 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
}
-int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data;
@@ -1083,12 +1082,6 @@ int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old));
iwl_mvm_bt_coex_notif_handle(mvm);
-
- /*
- * This is an async handler for a notification, returning anything other
- * than 0 doesn't make sense even if HCMD failed.
- */
- return 0;
}
static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
@@ -1250,14 +1243,12 @@ void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm)
iwl_mvm_bt_coex_notif_handle(mvm);
}
-int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 ant_isolation = le32_to_cpup((void *)pkt->data);
u8 __maybe_unused lower_bound, upper_bound;
- int ret;
u8 lut;
struct iwl_bt_coex_cmd_old *bt_cmd;
@@ -1268,16 +1259,16 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
};
if (!iwl_mvm_bt_is_plcr_supported(mvm))
- return 0;
+ return;
lockdep_assert_held(&mvm->mutex);
/* Ignore updates if we are in force mode */
if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
- return 0;
+ return;
if (ant_isolation == mvm->last_ant_isol)
- return 0;
+ return;
for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
@@ -1296,13 +1287,13 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
mvm->last_ant_isol = ant_isolation;
if (mvm->last_corun_lut == lut)
- return 0;
+ return;
mvm->last_corun_lut = lut;
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
if (!bt_cmd)
- return 0;
+ return;
cmd.data[0] = bt_cmd;
bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
@@ -1317,8 +1308,8 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
sizeof(bt_cmd->bt4_corun_lut40));
- ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (iwl_mvm_send_cmd(mvm, &cmd))
+ IWL_ERR(mvm, "failed to send BT_CONFIG command\n");
kfree(bt_cmd);
- return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index beba375489f1..b8ee3121fbd2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -102,6 +102,7 @@
#define IWL_MVM_QUOTA_THRESHOLD 4
#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0
#define IWL_MVM_RS_DISABLE_P2P_MIMO 0
+#define IWL_MVM_TOF_IS_RESPONDER 0
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 4165d104e4c3..04264e417c1c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -1145,7 +1145,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm)
{
struct iwl_notification_wait wait_d3;
- static const u8 d3_notif[] = { D3_CONFIG_CMD };
+ static const u16 d3_notif[] = { D3_CONFIG_CMD };
int ret;
iwl_init_notification_wait(&mvm->notif_wait, &wait_d3,
@@ -1168,13 +1168,17 @@ remove_notif:
int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ ret = iwl_trans_suspend(mvm->trans);
+ if (ret)
+ return ret;
- iwl_trans_suspend(mvm->trans);
mvm->trans->wowlan_d0i3 = wowlan->any;
if (mvm->trans->wowlan_d0i3) {
/* 'any' trigger means d0i3 usage */
if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
- int ret = iwl_mvm_enter_d0i3_sync(mvm);
+ ret = iwl_mvm_enter_d0i3_sync(mvm);
if (ret)
return ret;
@@ -1183,6 +1187,9 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
mutex_lock(&mvm->d0i3_suspend_mutex);
__set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
mutex_unlock(&mvm->d0i3_suspend_mutex);
+
+ iwl_trans_d3_suspend(mvm->trans, false);
+
return 0;
}
@@ -1935,28 +1942,59 @@ out:
return 1;
}
-int iwl_mvm_resume(struct ieee80211_hw *hw)
+static int iwl_mvm_resume_d3(struct iwl_mvm *mvm)
{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ iwl_trans_resume(mvm->trans);
+
+ return __iwl_mvm_resume(mvm, false);
+}
+
+static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm)
+{
+ bool exit_now;
+ enum iwl_d3_status d3_status;
+
+ iwl_trans_d3_resume(mvm->trans, &d3_status, false);
+
+ /*
+ * make sure to clear D0I3_DEFER_WAKEUP before
+ * calling iwl_trans_resume(), which might wait
+ * for d0i3 exit completion.
+ */
+ mutex_lock(&mvm->d0i3_suspend_mutex);
+ __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+ exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
+ &mvm->d0i3_suspend_flags);
+ mutex_unlock(&mvm->d0i3_suspend_mutex);
+ if (exit_now) {
+ IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
+ _iwl_mvm_exit_d0i3(mvm);
+ }
iwl_trans_resume(mvm->trans);
- if (mvm->hw->wiphy->wowlan_config->any) {
- /* 'any' trigger means d0i3 usage */
- if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
- int ret = iwl_mvm_exit_d0i3(hw->priv);
+ if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
+ int ret = iwl_mvm_exit_d0i3(mvm->hw->priv);
- if (ret)
- return ret;
- /*
- * d0i3 exit will be deferred until reconfig_complete.
- * make sure there we are out of d0i3.
- */
- }
- return 0;
+ if (ret)
+ return ret;
+ /*
+ * d0i3 exit will be deferred until reconfig_complete.
+ * make sure there we are out of d0i3.
+ */
}
+ return 0;
+}
- return __iwl_mvm_resume(mvm, false);
+int iwl_mvm_resume(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* 'any' trigger means d0i3 was used */
+ if (hw->wiphy->wowlan_config->any)
+ return iwl_mvm_resume_d0i3(mvm);
+ else
+ return iwl_mvm_resume_d3(mvm);
}
void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 5c8a65de0e77..383a3162046c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -63,6 +63,7 @@
*
*****************************************************************************/
#include "mvm.h"
+#include "fw-api-tof.h"
#include "debugfs.h"
static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
@@ -497,6 +498,731 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
+static inline char *iwl_dbgfs_is_match(char *name, char *buf)
+{
+ int len = strlen(name);
+
+ return !strncmp(name, buf, len) ? buf + len : NULL;
+}
+
+static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif,
+ char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ int value, ret = -EINVAL;
+ char *data;
+
+ mutex_lock(&mvm->mutex);
+
+ data = iwl_dbgfs_is_match("tof_disabled=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.tof_cfg.tof_disabled = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("one_sided_disabled=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.tof_cfg.one_sided_disabled = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("is_debug_mode=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.tof_cfg.is_debug_mode = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("is_buf=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.tof_cfg.is_buf_required = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("send_tof_cfg=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0 && value) {
+ ret = iwl_mvm_tof_config_cmd(mvm);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_enable_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct iwl_tof_config_cmd *cmd;
+
+ cmd = &mvm->tof_data.tof_cfg;
+
+ mutex_lock(&mvm->mutex);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n",
+ cmd->tof_disabled);
+ pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n",
+ cmd->one_sided_disabled);
+ pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n",
+ cmd->is_debug_mode);
+ pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n",
+ cmd->is_buf_required);
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
+ char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ int value, ret = 0;
+ char *data;
+
+ mutex_lock(&mvm->mutex);
+
+ data = iwl_dbgfs_is_match("burst_period=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (!ret)
+ mvm->tof_data.responder_cfg.burst_period =
+ cpu_to_le16(value);
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.min_delta_ftm = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("burst_duration=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.burst_duration = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("num_of_burst_exp=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.num_of_burst_exp = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("abort_responder=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.abort_responder = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("get_ch_est=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.get_ch_est = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("recv_sta_req_params=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.recv_sta_req_params = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("channel_num=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.channel_num = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("bandwidth=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.bandwidth = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("rate=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.rate = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("bssid=", buf);
+ if (data) {
+ u8 *mac = mvm->tof_data.responder_cfg.bssid;
+
+ if (!mac_pton(data, mac)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.tsf_timer_offset_msecs =
+ cpu_to_le16(value);
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("toa_offset=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.toa_offset =
+ cpu_to_le16(value);
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("ctrl_ch_position=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.ctrl_ch_position = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("ftm_per_burst=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.ftm_per_burst = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("asap_mode=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.asap_mode = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("send_responder_cfg=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0 && value) {
+ ret = iwl_mvm_tof_responder_cmd(mvm, vif);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct iwl_tof_responder_config_cmd *cmd;
+
+ cmd = &mvm->tof_data.responder_cfg;
+
+ mutex_lock(&mvm->mutex);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n",
+ le16_to_cpu(cmd->burst_period));
+ pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n",
+ cmd->burst_duration);
+ pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n",
+ cmd->bandwidth);
+ pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n",
+ cmd->channel_num);
+ pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n",
+ cmd->ctrl_ch_position);
+ pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n",
+ cmd->bssid);
+ pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n",
+ cmd->min_delta_ftm);
+ pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n",
+ cmd->num_of_burst_exp);
+ pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate);
+ pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n",
+ cmd->abort_responder);
+ pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n",
+ cmd->get_ch_est);
+ pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n",
+ cmd->recv_sta_req_params);
+ pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n",
+ cmd->ftm_per_burst);
+ pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n",
+ cmd->ftm_resp_ts_avail);
+ pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n",
+ cmd->asap_mode);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "tsf_timer_offset_msecs = %d\n",
+ le16_to_cpu(cmd->tsf_timer_offset_msecs));
+ pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n",
+ le16_to_cpu(cmd->toa_offset));
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ int value, ret = 0;
+ char *data;
+
+ mutex_lock(&mvm->mutex);
+
+ data = iwl_dbgfs_is_match("request_id=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.request_id = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("initiator=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.initiator = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("one_sided_los_disable=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.one_sided_los_disable = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("req_timeout=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.req_timeout = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("report_policy=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.report_policy = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("macaddr_random=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.macaddr_random = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("num_of_ap=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.num_of_ap = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("macaddr_template=", buf);
+ if (data) {
+ u8 mac[ETH_ALEN];
+
+ if (!mac_pton(data, mac)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
+ }
+
+ data = iwl_dbgfs_is_match("macaddr_mask=", buf);
+ if (data) {
+ u8 mac[ETH_ALEN];
+
+ if (!mac_pton(data, mac)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
+ }
+
+ data = iwl_dbgfs_is_match("ap=", buf);
+ if (data) {
+ struct iwl_tof_range_req_ap_entry ap;
+ int size = sizeof(struct iwl_tof_range_req_ap_entry);
+ u16 burst_period;
+ u8 *mac = ap.bssid;
+ unsigned int i;
+
+ if (sscanf(data, "%u %hhd %hhx %hhx"
+ "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
+ "%hhx %hhx %hx"
+ "%hhx %hhx %x"
+ "%hhx %hhx %hhx %hhx",
+ &i, &ap.channel_num, &ap.bandwidth,
+ &ap.ctrl_ch_position,
+ mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5,
+ &ap.measure_type, &ap.num_of_bursts,
+ &burst_period,
+ &ap.samples_per_burst, &ap.retries_per_sample,
+ &ap.tsf_delta, &ap.location_req, &ap.asap_mode,
+ &ap.enable_dyn_ack, &ap.rssi) != 20) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (i >= IWL_MVM_TOF_MAX_APS) {
+ IWL_ERR(mvm, "Invalid AP index %d\n", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ap.burst_period = cpu_to_le16(burst_period);
+
+ memcpy(&mvm->tof_data.range_req.ap[i], &ap, size);
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("send_range_request=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0 && value) {
+ ret = iwl_mvm_tof_range_request_cmd(mvm, vif);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[512];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct iwl_tof_range_req_cmd *cmd;
+ int i;
+
+ cmd = &mvm->tof_data.range_req;
+
+ mutex_lock(&mvm->mutex);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n",
+ cmd->request_id);
+ pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n",
+ cmd->initiator);
+ pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n",
+ cmd->one_sided_los_disable);
+ pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n",
+ cmd->req_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n",
+ cmd->report_policy);
+ pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n",
+ cmd->macaddr_random);
+ pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n",
+ cmd->macaddr_template);
+ pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n",
+ cmd->macaddr_mask);
+ pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n",
+ cmd->num_of_ap);
+ for (i = 0; i < cmd->num_of_ap; i++) {
+ struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i];
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "ap %.2d: channel_num=%hhx bw=%hhx"
+ " control=%hhx bssid=%pM type=%hhx"
+ " num_of_bursts=%hhx burst_period=%hx ftm=%hhx"
+ " retries=%hhx tsf_delta=%x location_req=%hhx "
+ " asap=%hhx enable=%hhx rssi=%hhx\n",
+ i, ap->channel_num, ap->bandwidth,
+ ap->ctrl_ch_position, ap->bssid,
+ ap->measure_type, ap->num_of_bursts,
+ ap->burst_period, ap->samples_per_burst,
+ ap->retries_per_sample, ap->tsf_delta,
+ ap->location_req, ap->asap_mode,
+ ap->enable_dyn_ack, ap->rssi);
+ }
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif,
+ char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ int value, ret = 0;
+ char *data;
+
+ mutex_lock(&mvm->mutex);
+
+ data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req_ext.tsf_timer_offset_msec =
+ cpu_to_le16(value);
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req_ext.min_delta_ftm = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req_ext.ftm_format_and_bw20M =
+ value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req_ext.ftm_format_and_bw40M =
+ value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req_ext.ftm_format_and_bw80M =
+ value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0 && value) {
+ ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct iwl_tof_range_req_ext_cmd *cmd;
+
+ cmd = &mvm->tof_data.range_req_ext;
+
+ mutex_lock(&mvm->mutex);
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "tsf_timer_offset_msec = %hx\n",
+ cmd->tsf_timer_offset_msec);
+ pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhx\n",
+ cmd->min_delta_ftm);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "ftm_format_and_bw20M = %hhx\n",
+ cmd->ftm_format_and_bw20M);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "ftm_format_and_bw40M = %hhx\n",
+ cmd->ftm_format_and_bw40M);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "ftm_format_and_bw80M = %hhx\n",
+ cmd->ftm_format_and_bw80M);
+
+ mutex_unlock(&mvm->mutex);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif,
+ char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ int value, ret = 0;
+ int abort_id;
+ char *data;
+
+ mutex_lock(&mvm->mutex);
+
+ data = iwl_dbgfs_is_match("abort_id=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.last_abort_id = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("send_range_abort=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0 && value) {
+ abort_id = mvm->tof_data.last_abort_id;
+ ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[32];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ int last_abort_id;
+
+ mutex_lock(&mvm->mutex);
+ last_abort_id = mvm->tof_data.last_abort_id;
+ mutex_unlock(&mvm->mutex);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n",
+ last_abort_id);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char *buf;
+ int pos = 0;
+ const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256;
+ struct iwl_tof_range_rsp_ntfy *cmd;
+ int i, ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+ cmd = &mvm->tof_data.range_resp;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n",
+ cmd->request_id);
+ pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n",
+ cmd->request_status);
+ pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n",
+ cmd->last_in_batch);
+ pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n",
+ cmd->num_of_aps);
+ for (i = 0; i < cmd->num_of_aps; i++) {
+ struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i];
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "ap %.2d: bssid=%pM status=%hhx bw=%hhx"
+ " rtt=%x rtt_var=%x rtt_spread=%x"
+ " rssi=%hhx rssi_spread=%hhx"
+ " range=%x range_var=%x"
+ " time_stamp=%x\n",
+ i, ap->bssid, ap->measure_status,
+ ap->measure_bw,
+ ap->rtt, ap->rtt_variance, ap->rtt_spread,
+ ap->rssi, ap->rssi_spread, ap->range,
+ ap->range_variance, ap->timestamp);
+ }
+ mutex_unlock(&mvm->mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
size_t count, loff_t *ppos)
{
@@ -628,6 +1354,12 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
+MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
@@ -671,6 +1403,25 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) &&
+ !vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) {
+ if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP)
+ MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params,
+ mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
+
+ MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir,
+ S_IRUSR);
+ }
+
/*
* Create symlink for convenience pointing to interface specific
* debugfs entries for the driver. For example, under
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index ffb4b5cef275..7d69a556bcc8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -949,9 +949,10 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
char *buf, size_t count,
loff_t *ppos)
{
- int ret, conf_id;
+ unsigned int conf_id;
+ int ret;
- ret = kstrtoint(buf, 0, &conf_id);
+ ret = kstrtouint(buf, 0, &conf_id);
if (ret)
return ret;
@@ -974,7 +975,7 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
if (ret)
return ret;
- iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, 0);
+ iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, NULL);
iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
@@ -1200,12 +1201,7 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
if (ptr) {
for (ofs = 0; ofs < len; ofs += 16) {
pos += scnprintf(buf + pos, bufsz - pos,
- "0x%.4x ", ofs);
- hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos,
- bufsz - pos, false);
- pos += strlen(buf + pos);
- if (bufsz - pos > 0)
- buf[pos++] = '\n';
+ "0x%.4x %16ph\n", ofs, ptr + ofs);
}
} else {
pos += scnprintf(buf + pos, bufsz - pos,
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index d7658d16e965..20521bebb0b1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -339,8 +339,13 @@ enum iwl_wowlan_wakeup_reason {
IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8),
IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9),
IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10),
- /* BIT(11) reserved */
+ IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL = BIT(11),
IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
+ IWL_WOWLAN_WAKEUP_BY_IOAC_MAGIC_PACKET = BIT(13),
+ IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER = BIT(14),
+ IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN = BIT(15),
+ IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN = BIT(16),
+
}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
struct iwl_wowlan_gtk_status {
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index b1baa33cc19b..7005fa4be74a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -310,17 +312,22 @@ struct iwl_reduce_tx_power_cmd {
__le16 pwr_restriction;
} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
+enum iwl_dev_tx_power_cmd_mode {
+ IWL_TX_POWER_MODE_SET_MAC = 0,
+ IWL_TX_POWER_MODE_SET_DEVICE = 1,
+ IWL_TX_POWER_MODE_SET_CHAINS = 2,
+}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_2 */;
+
/**
- * struct iwl_dev_tx_power_cmd - TX power reduction command
- * REDUCE_TX_POWER_CMD = 0x9f
- * @set_mode: 0 - MAC tx power, 1 - device tx power
+ * struct iwl_dev_tx_power_cmd_v2 - TX power reduction command
+ * @set_mode: see &enum iwl_dev_tx_power_cmd_mode
* @mac_context_id: id of the mac ctx for which we are reducing TX power.
* @pwr_restriction: TX power restriction in 1/8 dBms.
* @dev_24: device TX power restriction in 1/8 dBms
* @dev_52_low: device TX power restriction upper band - low
* @dev_52_high: device TX power restriction upper band - high
*/
-struct iwl_dev_tx_power_cmd {
+struct iwl_dev_tx_power_cmd_v2 {
__le32 set_mode;
__le32 mac_context_id;
__le16 pwr_restriction;
@@ -329,6 +336,20 @@ struct iwl_dev_tx_power_cmd {
__le16 dev_52_high;
} __packed; /* TX_REDUCED_POWER_API_S_VER_2 */
+#define IWL_NUM_CHAIN_LIMITS 2
+#define IWL_NUM_SUB_BANDS 5
+
+/**
+ * struct iwl_dev_tx_power_cmd - TX power reduction command
+ * @v2: version 2 of the command, embedded here for easier software handling
+ * @per_chain_restriction: per chain restrictions
+ */
+struct iwl_dev_tx_power_cmd {
+ /* v3 is just an extension of v2 - keep this here */
+ struct iwl_dev_tx_power_cmd_v2 v2;
+ __le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
+} __packed; /* TX_REDUCED_POWER_API_S_VER_3 */
+
#define IWL_DEV_MAX_TX_POWER 0x7FFF
/**
@@ -413,7 +434,7 @@ struct iwl_beacon_filter_cmd {
#define IWL_BF_TEMP_FAST_FILTER_MIN 0
#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
-#define IWL_BF_TEMP_SLOW_FILTER_D0I3 5
+#define IWL_BF_TEMP_SLOW_FILTER_D0I3 20
#define IWL_BF_TEMP_SLOW_FILTER_MAX 255
#define IWL_BF_TEMP_SLOW_FILTER_MIN 0
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 5e4cbdb44c60..660cc1c93e19 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -87,41 +87,6 @@ struct iwl_ssid_ie {
u8 ssid[IEEE80211_MAX_SSID_LEN];
} __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
-/* How many statistics are gathered for each channel */
-#define SCAN_RESULTS_STATISTICS 1
-
-/**
- * enum iwl_scan_complete_status - status codes for scan complete notifications
- * @SCAN_COMP_STATUS_OK: scan completed successfully
- * @SCAN_COMP_STATUS_ABORT: scan was aborted by user
- * @SCAN_COMP_STATUS_ERR_SLEEP: sending null sleep packet failed
- * @SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT: timeout before channel is ready
- * @SCAN_COMP_STATUS_ERR_PROBE: sending probe request failed
- * @SCAN_COMP_STATUS_ERR_WAKEUP: sending null wakeup packet failed
- * @SCAN_COMP_STATUS_ERR_ANTENNAS: invalid antennas chosen at scan command
- * @SCAN_COMP_STATUS_ERR_INTERNAL: internal error caused scan abort
- * @SCAN_COMP_STATUS_ERR_COEX: medium was lost ot WiMax
- * @SCAN_COMP_STATUS_P2P_ACTION_OK: P2P public action frame TX was successful
- * (not an error!)
- * @SCAN_COMP_STATUS_ITERATION_END: indicates end of one repetition the driver
- * asked for
- * @SCAN_COMP_STATUS_ERR_ALLOC_TE: scan could not allocate time events
-*/
-enum iwl_scan_complete_status {
- SCAN_COMP_STATUS_OK = 0x1,
- SCAN_COMP_STATUS_ABORT = 0x2,
- SCAN_COMP_STATUS_ERR_SLEEP = 0x3,
- SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT = 0x4,
- SCAN_COMP_STATUS_ERR_PROBE = 0x5,
- SCAN_COMP_STATUS_ERR_WAKEUP = 0x6,
- SCAN_COMP_STATUS_ERR_ANTENNAS = 0x7,
- SCAN_COMP_STATUS_ERR_INTERNAL = 0x8,
- SCAN_COMP_STATUS_ERR_COEX = 0x9,
- SCAN_COMP_STATUS_P2P_ACTION_OK = 0xA,
- SCAN_COMP_STATUS_ITERATION_END = 0x0B,
- SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C,
-};
-
/* scan offload */
#define IWL_SCAN_MAX_BLACKLIST_LEN 64
#define IWL_SCAN_SHORT_BLACKLIST_LEN 16
@@ -144,71 +109,6 @@ enum scan_framework_client {
};
/**
- * struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6
- * @scan_flags: see enum iwl_scan_flags
- * @channel_count: channels in channel list
- * @quiet_time: dwell time, in milliseconds, on quiet channel
- * @quiet_plcp_th: quiet channel num of packets threshold
- * @good_CRC_th: passive to active promotion threshold
- * @rx_chain: RXON rx chain.
- * @max_out_time: max TUs to be out of associated channel
- * @suspend_time: pause scan this TUs when returning to service channel
- * @flags: RXON flags
- * @filter_flags: RXONfilter
- * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz.
- * @direct_scan: list of SSIDs for directed active scan
- * @scan_type: see enum iwl_scan_type.
- * @rep_count: repetition count for each scheduled scan iteration.
- */
-struct iwl_scan_offload_cmd {
- __le16 len;
- u8 scan_flags;
- u8 channel_count;
- __le16 quiet_time;
- __le16 quiet_plcp_th;
- __le16 good_CRC_th;
- __le16 rx_chain;
- __le32 max_out_time;
- __le32 suspend_time;
- /* RX_ON_FLAGS_API_S_VER_1 */
- __le32 flags;
- __le32 filter_flags;
- struct iwl_tx_cmd tx_cmd[2];
- /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
- struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
- __le32 scan_type;
- __le32 rep_count;
-} __packed;
-
-enum iwl_scan_offload_channel_flags {
- IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE = BIT(0),
- IWL_SCAN_OFFLOAD_CHANNEL_NARROW = BIT(22),
- IWL_SCAN_OFFLOAD_CHANNEL_FULL = BIT(24),
- IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL = BIT(25),
-};
-
-/* channel configuration for struct iwl_scan_offload_cfg. Each channels needs:
- * __le32 type: bitmap; bits 1-20 are for directed scan to i'th ssid and
- * see enum iwl_scan_offload_channel_flags.
- * __le16 channel_number: channel number 1-13 etc.
- * __le16 iter_count: repetition count for the channel.
- * __le32 iter_interval: interval between two iterations on one channel.
- * u8 active_dwell.
- * u8 passive_dwell.
- */
-#define IWL_SCAN_CHAN_SIZE 14
-
-/**
- * iwl_scan_offload_cfg - SCAN_OFFLOAD_CONFIG_API_S
- * @scan_cmd: scan command fixed part
- * @data: scan channel configuration and probe request frames
- */
-struct iwl_scan_offload_cfg {
- struct iwl_scan_offload_cmd scan_cmd;
- u8 data[0];
-} __packed;
-
-/**
* iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
* @ssid: MAC address to filter out
* @reported_rssi: AP rssi reported to the host
@@ -298,35 +198,6 @@ enum iwl_scan_ebs_status {
};
/**
- * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
- * @last_schedule_line: last schedule line executed (fast or regular)
- * @last_schedule_iteration: last scan iteration executed before scan abort
- * @status: enum iwl_scan_offload_compleate_status
- * @ebs_status: last EBS status, see IWL_SCAN_EBS_*
- */
-struct iwl_scan_offload_complete {
- u8 last_schedule_line;
- u8 last_schedule_iteration;
- u8 status;
- u8 ebs_status;
-} __packed;
-
-/**
- * iwl_sched_scan_results - SCAN_OFFLOAD_MATCH_FOUND_NTF_API_S_VER_1
- * @ssid_bitmap: SSIDs indexes found in this iteration
- * @client_bitmap: clients that are active and wait for this notification
- */
-struct iwl_sched_scan_results {
- __le16 ssid_bitmap;
- u8 client_bitmap;
- u8 reserved;
-};
-
-/* Unified LMAC scan API */
-
-#define IWL_MVM_BASIC_PASSIVE_DWELL 110
-
-/**
* iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
* @tx_flags: combination of TX_CMD_FLG_*
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
@@ -550,18 +421,6 @@ struct iwl_periodic_scan_complete {
/* UMAC Scan API */
-/**
- * struct iwl_mvm_umac_cmd_hdr - Command header for UMAC commands
- * @size: size of the command (not including header)
- * @reserved0: for future use and alignment
- * @ver: API version number
- */
-struct iwl_mvm_umac_cmd_hdr {
- __le16 size;
- u8 reserved0;
- u8 ver;
-} __packed;
-
/* The maximum of either of these cannot exceed 8, because we use an
* 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
*/
@@ -621,7 +480,6 @@ enum iwl_channel_flags {
/**
* struct iwl_scan_config
- * @hdr: umac command header
* @flags: enum scan_config_flags
* @tx_chains: valid_tx antenna - ANT_* definitions
* @rx_chains: valid_rx antenna - ANT_* definitions
@@ -639,7 +497,6 @@ enum iwl_channel_flags {
* @channel_array: default supported channels
*/
struct iwl_scan_config {
- struct iwl_mvm_umac_cmd_hdr hdr;
__le32 flags;
__le32 tx_chains;
__le32 rx_chains;
@@ -660,7 +517,8 @@ struct iwl_scan_config {
* iwl_umac_scan_flags
*@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
* can be preempted by other scan requests with higher priority.
- * The low priority scan is aborted.
+ * The low priority scan will be resumed when the higher proirity scan is
+ * completed.
*@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
* when scan starts.
*/
@@ -734,7 +592,6 @@ struct iwl_scan_req_umac_tail {
/**
* struct iwl_scan_req_umac
- * @hdr: umac command header
* @flags: &enum iwl_umac_scan_flags
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
@@ -753,7 +610,6 @@ struct iwl_scan_req_umac_tail {
* &struct iwl_scan_req_umac_tail
*/
struct iwl_scan_req_umac {
- struct iwl_mvm_umac_cmd_hdr hdr;
__le32 flags;
__le32 uid;
__le32 ooc_priority;
@@ -775,12 +631,10 @@ struct iwl_scan_req_umac {
/**
* struct iwl_umac_scan_abort
- * @hdr: umac command header
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @flags: reserved
*/
struct iwl_umac_scan_abort {
- struct iwl_mvm_umac_cmd_hdr hdr;
__le32 uid;
__le32 flags;
} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index 21dd5b771660..493a8bdfbc9e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -366,8 +366,8 @@ struct iwl_mvm_rm_sta_cmd {
* ( MGMT_MCAST_KEY = 0x1f )
* @ctrl_flags: %iwl_sta_key_flag
* @IGTK:
- * @K1: IGTK master key
- * @K2: IGTK sub key
+ * @K1: unused
+ * @K2: unused
* @sta_id: station ID that support IGTK
* @key_id:
* @receive_seq_cnt: initial RSC/PN needed for replay check
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h
new file mode 100644
index 000000000000..eed6271d01a3
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h
@@ -0,0 +1,386 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __fw_api_tof_h__
+#define __fw_api_tof_h__
+
+#include "fw-api.h"
+
+/* ToF sub-group command IDs */
+enum iwl_mvm_tof_sub_grp_ids {
+ TOF_RANGE_REQ_CMD = 0x1,
+ TOF_CONFIG_CMD = 0x2,
+ TOF_RANGE_ABORT_CMD = 0x3,
+ TOF_RANGE_REQ_EXT_CMD = 0x4,
+ TOF_RESPONDER_CONFIG_CMD = 0x5,
+ TOF_NW_INITIATED_RES_SEND_CMD = 0x6,
+ TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7,
+ TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC,
+ TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD,
+ TOF_RANGE_RESPONSE_NOTIF = 0xFE,
+ TOF_MCSI_DEBUG_NOTIF = 0xFB,
+};
+
+/**
+ * struct iwl_tof_config_cmd - ToF configuration
+ * @tof_disabled: 0 enabled, 1 - disabled
+ * @one_sided_disabled: 0 enabled, 1 - disabled
+ * @is_debug_mode: 1 debug mode, 0 - otherwise
+ * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise
+ */
+struct iwl_tof_config_cmd {
+ __le32 sub_grp_cmd_id;
+ u8 tof_disabled;
+ u8 one_sided_disabled;
+ u8 is_debug_mode;
+ u8 is_buf_required;
+} __packed;
+
+/**
+ * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
+ * @burst_period: future use: (currently hard coded in the LMAC)
+ * The interval between two sequential bursts.
+ * @min_delta_ftm: future use: (currently hard coded in the LMAC)
+ * The minimum delay between two sequential FTM Responses
+ * in the same burst.
+ * @burst_duration: future use: (currently hard coded in the LMAC)
+ * The total time for all FTMs handshake in the same burst.
+ * Affect the time events duration in the LMAC.
+ * @num_of_burst_exp: future use: (currently hard coded in the LMAC)
+ * The number of bursts for the current ToF request. Affect
+ * the number of events allocations in the current iteration.
+ * @get_ch_est: for xVT only, NA for driver
+ * @abort_responder: when set to '1' - Responder will terminate its activity
+ * (all other fields in the command are ignored)
+ * @recv_sta_req_params: 1 - Responder will ignore the other Responder's
+ * params and use the recomended Initiator params.
+ * 0 - otherwise
+ * @channel_num: current AP Channel
+ * @bandwidth: current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
+ * @rate: current AP rate
+ * @ctrl_ch_position: coding of the control channel position relative to
+ * the center frequency.
+ * 40MHz 0 below center, 1 above center
+ * 80MHz bits [0..1]: 0 the near 20MHz to the center,
+ * 1 the far 20MHz to the center
+ * bit[2] as above 40MHz
+ * @ftm_per_burst: FTMs per Burst
+ * @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response,
+ * '1' - we measure over the Initial FTM Response
+ * @asap_mode: ASAP / Non ASAP mode for the current WLS station
+ * @sta_id: index of the AP STA when in AP mode
+ * @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF
+ * @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug
+ * purposes, simulating station movement by adding various values
+ * to this field
+ * @bssid: Current AP BSSID
+ */
+struct iwl_tof_responder_config_cmd {
+ __le32 sub_grp_cmd_id;
+ __le16 burst_period;
+ u8 min_delta_ftm;
+ u8 burst_duration;
+ u8 num_of_burst_exp;
+ u8 get_ch_est;
+ u8 abort_responder;
+ u8 recv_sta_req_params;
+ u8 channel_num;
+ u8 bandwidth;
+ u8 rate;
+ u8 ctrl_ch_position;
+ u8 ftm_per_burst;
+ u8 ftm_resp_ts_avail;
+ u8 asap_mode;
+ u8 sta_id;
+ __le16 tsf_timer_offset_msecs;
+ __le16 toa_offset;
+ u8 bssid[ETH_ALEN];
+} __packed;
+
+/**
+ * struct iwl_tof_range_request_ext_cmd - extended range req for WLS
+ * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF
+ * @min_delta_ftm: Minimal time between two consecutive measurements,
+ * in units of 100us. 0 means no preference by station
+ * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended
+ * value be sent to the AP
+ * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended
+ * value to be sent to the AP
+ * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended
+ * value to be sent to the AP
+ */
+struct iwl_tof_range_req_ext_cmd {
+ __le32 sub_grp_cmd_id;
+ __le16 tsf_timer_offset_msec;
+ __le16 reserved;
+ u8 min_delta_ftm;
+ u8 ftm_format_and_bw20M;
+ u8 ftm_format_and_bw40M;
+ u8 ftm_format_and_bw80M;
+} __packed;
+
+#define IWL_MVM_TOF_MAX_APS 21
+
+/**
+ * struct iwl_tof_range_req_ap_entry - AP configuration parameters
+ * @channel_num: Current AP Channel
+ * @bandwidth: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
+ * @tsf_delta_direction: TSF relatively to the subject AP
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency.
+ * 40MHz 0 below center, 1 above center
+ * 80MHz bits [0..1]: 0 the near 20MHz to the center,
+ * 1 the far 20MHz to the center
+ * bit[2] as above 40MHz
+ * @bssid: AP's bss id
+ * @measure_type: Measurement type: 0 - two sided, 1 - One sided
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the
+ * number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ * periodicity In units of 100ms. ignored if num_of_bursts = 0
+ * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31)
+ * 1-sided: how many rts/cts pairs should be used per burst.
+ * @retries_per_sample: Max number of retries that the LMAC should send
+ * in case of no replies by the AP.
+ * @tsf_delta: TSF Delta in units of microseconds.
+ * The difference between the AP TSF and the device local clock.
+ * @location_req: Location Request Bit[0] LCI should be sent in the FTMR
+ * Bit[1] Civic should be sent in the FTMR
+ * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided)
+ * @enable_dyn_ack: Enable Dynamic ACK BW.
+ * 0 Initiator interact with regular AP
+ * 1 Initiator interact with Responder machine: need to send the
+ * Initiator Acks with HT 40MHz / 80MHz, since the Responder should
+ * use it for its ch est measurement (this flag will be set when we
+ * configure the opposite machine to be Responder).
+ * @rssi: Last received value
+ * leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value.
+ */
+struct iwl_tof_range_req_ap_entry {
+ u8 channel_num;
+ u8 bandwidth;
+ u8 tsf_delta_direction;
+ u8 ctrl_ch_position;
+ u8 bssid[ETH_ALEN];
+ u8 measure_type;
+ u8 num_of_bursts;
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 retries_per_sample;
+ __le32 tsf_delta;
+ u8 location_req;
+ u8 asap_mode;
+ u8 enable_dyn_ack;
+ s8 rssi;
+} __packed;
+
+/**
+ * enum iwl_tof_response_mode
+ * @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as
+ * possible (not supported for this release)
+ * @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon
+ * timeout expiration
+ * @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the
+ * earlier of: measurements completion / timeout
+ * expiration.
+ */
+enum iwl_tof_response_mode {
+ IWL_MVM_TOF_RESPOSE_ASAP = 1,
+ IWL_MVM_TOF_RESPOSE_TIMEOUT,
+ IWL_MVM_TOF_RESPOSE_COMPLETE,
+};
+
+/**
+ * struct iwl_tof_range_req_cmd - start measurement cmd
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @initiator: 0- NW initiated, 1 - Client Initiated
+ * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided,
+ * '1' - run ML-Algo for ToF only
+ * @req_timeout: Requested timeout of the response in units of 100ms.
+ * This is equivalent to the session time configured to the
+ * LMAC in Initiator Request
+ * @report_policy: Supported partially for this release: For current release -
+ * the range report will be uploaded as a batch when ready or
+ * when the session is done (successfully / partially).
+ * one of iwl_tof_response_mode.
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
+ * '1' Use MAC Address randomization according to the below
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ */
+struct iwl_tof_range_req_cmd {
+ __le32 sub_grp_cmd_id;
+ u8 request_id;
+ u8 initiator;
+ u8 one_sided_los_disable;
+ u8 req_timeout;
+ u8 report_policy;
+ u8 los_det_disable;
+ u8 num_of_ap;
+ u8 macaddr_random;
+ u8 macaddr_template[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
+} __packed;
+
+/**
+ * struct iwl_tof_gen_resp_cmd - generic ToF response
+ */
+struct iwl_tof_gen_resp_cmd {
+ __le32 sub_grp_cmd_id;
+ u8 data[];
+} __packed;
+
+/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
+ * @measure_status: current APs measurement status
+ * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ * current AP [nSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ * values measured for current AP in the current session [nsec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ * measured for current AP in the current session
+ * @range: Measured range [cm]
+ * @range_variance: Measured range variance [cm]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ * uploaded by the LMAC
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy {
+ u8 bssid[ETH_ALEN];
+ u8 measure_status;
+ u8 measure_bw;
+ __le32 rtt;
+ __le32 rtt_variance;
+ __le32 rtt_spread;
+ s8 rssi;
+ u8 rssi_spread;
+ __le16 reserved;
+ __le32 range;
+ __le32 range_variance;
+ __le32 timestamp;
+} __packed;
+
+/**
+ * struct iwl_tof_range_rsp_ntfy -
+ * @request_id: A Token ID of the corresponding Range request
+ * @request_status: status of current measurement session
+ * @last_in_batch: reprot policy (when not all responses are uploaded at once)
+ * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ */
+struct iwl_tof_range_rsp_ntfy {
+ u8 request_id;
+ u8 request_status;
+ u8 last_in_batch;
+ u8 num_of_aps;
+ struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
+} __packed;
+
+#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
+/**
+ * struct iwl_tof_mcsi_notif - used for debug
+ * @token: token ID for the current session
+ * @role: '0' - initiator, '1' - responder
+ * @initiator_bssid: initiator machine
+ * @responder_bssid: responder machine
+ * @mcsi_buffer: debug data
+ */
+struct iwl_tof_mcsi_notif {
+ u8 token;
+ u8 role;
+ __le16 reserved;
+ u8 initiator_bssid[ETH_ALEN];
+ u8 responder_bssid[ETH_ALEN];
+ u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4];
+} __packed;
+
+/**
+ * struct iwl_tof_neighbor_report_notif
+ * @bssid: BSSID of the AP which sent the report
+ * @request_token: same token as the corresponding request
+ * @status:
+ * @report_ie_len: the length of the response frame starting from the Element ID
+ * @data: the IEs
+ */
+struct iwl_tof_neighbor_report {
+ u8 bssid[ETH_ALEN];
+ u8 request_token;
+ u8 status;
+ __le16 report_ie_len;
+ u8 data[];
+} __packed;
+
+/**
+ * struct iwl_tof_range_abort_cmd
+ * @request_id: corresponds to a range request
+ */
+struct iwl_tof_range_abort_cmd {
+ __le32 sub_grp_cmd_id;
+ u8 request_id;
+ u8 reserved[3];
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index 81c4ea3c6958..853698ab8b05 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -124,6 +124,18 @@ enum iwl_tx_flags {
TX_CMD_FLG_HCCA_CHUNK = BIT(31)
}; /* TX_FLAGS_BITS_API_S_VER_1 */
+/**
+ * enum iwl_tx_pm_timeouts - pm timeout values in TX command
+ * @PM_FRAME_NONE: no need to suspend sleep mode
+ * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU
+ * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec
+ */
+enum iwl_tx_pm_timeouts {
+ PM_FRAME_NONE = 0,
+ PM_FRAME_MGMT = 2,
+ PM_FRAME_ASSOC = 3,
+};
+
/*
* TX command security control
*/
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 16e9ef49397f..4af7513adda2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -75,6 +75,7 @@
#include "fw-api-coex.h"
#include "fw-api-scan.h"
#include "fw-api-stats.h"
+#include "fw-api-tof.h"
/* Tx queue numbers */
enum {
@@ -119,6 +120,9 @@ enum {
ADD_STA = 0x18,
REMOVE_STA = 0x19,
+ /* paging get item */
+ FW_GET_ITEM_CMD = 0x1a,
+
/* TX */
TX_CMD = 0x1c,
TXPATH_FLUSH = 0x1e,
@@ -148,6 +152,9 @@ enum {
LQ_CMD = 0x4e,
+ /* paging block to FW cpu2 */
+ FW_PAGING_BLOCK_CMD = 0x4f,
+
/* Scan offload */
SCAN_OFFLOAD_REQUEST_CMD = 0x51,
SCAN_OFFLOAD_ABORT_CMD = 0x52,
@@ -163,6 +170,10 @@ enum {
CALIB_RES_NOTIF_PHY_DB = 0x6b,
/* PHY_DB_CMD = 0x6c, */
+ /* ToF - 802.11mc FTM */
+ TOF_CMD = 0x10,
+ TOF_NOTIFICATION = 0x11,
+
/* Power - legacy power table command */
POWER_TABLE_CMD = 0x77,
PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
@@ -365,6 +376,50 @@ struct iwl_nvm_access_cmd {
u8 data[];
} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
+#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */
+
+/*
+ * struct iwl_fw_paging_cmd - paging layout
+ *
+ * (FW_PAGING_BLOCK_CMD = 0x4f)
+ *
+ * Send to FW the paging layout in the driver.
+ *
+ * @flags: various flags for the command
+ * @block_size: the block size in powers of 2
+ * @block_num: number of blocks specified in the command.
+ * @device_phy_addr: virtual addresses from device side
+*/
+struct iwl_fw_paging_cmd {
+ __le32 flags;
+ __le32 block_size;
+ __le32 block_num;
+ __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
+} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
+
+/*
+ * Fw items ID's
+ *
+ * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload
+ * download
+ */
+enum iwl_fw_item_id {
+ IWL_FW_ITEM_ID_PAGING = 3,
+};
+
+/*
+ * struct iwl_fw_get_item_cmd - get an item from the fw
+ */
+struct iwl_fw_get_item_cmd {
+ __le32 item_id;
+} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */
+
+struct iwl_fw_get_item_resp {
+ __le32 item_id;
+ __le32 item_byte_cnt;
+ __le32 item_val;
+} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */
+
/**
* struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
* @offset: offset in bytes into the section
@@ -1080,10 +1135,33 @@ struct iwl_rx_phy_info {
__le16 frame_time;
} __packed;
+/*
+ * TCP offload Rx assist info
+ *
+ * bits 0:3 - reserved
+ * bits 4:7 - MIC CRC length
+ * bits 8:12 - MAC header length
+ * bit 13 - Padding indication
+ * bit 14 - A-AMSDU indication
+ * bit 15 - Offload enabled
+ */
+enum iwl_csum_rx_assist_info {
+ CSUM_RXA_RESERVED_MASK = 0x000f,
+ CSUM_RXA_MICSIZE_MASK = 0x00f0,
+ CSUM_RXA_HEADERLEN_MASK = 0x1f00,
+ CSUM_RXA_PADD = BIT(13),
+ CSUM_RXA_AMSDU = BIT(14),
+ CSUM_RXA_ENA = BIT(15)
+};
+
+/**
+ * struct iwl_rx_mpdu_res_start - phy info
+ * @assist: see CSUM_RX_ASSIST_ above
+ */
struct iwl_rx_mpdu_res_start {
__le16 byte_count;
- __le16 reserved;
-} __packed;
+ __le16 assist;
+} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */
/**
* enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags
@@ -1136,6 +1214,8 @@ enum iwl_rx_phy_flags {
* @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
* @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
* @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
+ * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw
+ * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
* @RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
* @RX_MPDU_RES_STATUS_STA_ID_MSK:
* @RX_MPDU_RES_STATUS_RRF_KILL:
@@ -1165,6 +1245,8 @@ enum iwl_mvm_rx_status {
RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13),
RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14),
RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15),
+ RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16),
+ RX_MPDU_RES_STATUS_CSUM_OK = BIT(17),
RX_MPDU_RES_STATUS_HASH_INDEX_MSK = (0x3F0000),
RX_MPDU_RES_STATUS_STA_ID_MSK = (0x1f000000),
RX_MPDU_RES_STATUS_RRF_KILL = BIT(29),
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index eb10c5ee4a14..4a0ce83315bd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -106,6 +106,306 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
sizeof(tx_ant_cmd), &tx_ant_cmd);
}
+static void iwl_free_fw_paging(struct iwl_mvm *mvm)
+{
+ int i;
+
+ if (!mvm->fw_paging_db[0].fw_paging_block)
+ return;
+
+ for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
+ if (!mvm->fw_paging_db[i].fw_paging_block) {
+ IWL_DEBUG_FW(mvm,
+ "Paging: block %d already freed, continue to next page\n",
+ i);
+
+ continue;
+ }
+
+ __free_pages(mvm->fw_paging_db[i].fw_paging_block,
+ get_order(mvm->fw_paging_db[i].fw_paging_size));
+ }
+ kfree(mvm->trans->paging_download_buf);
+ memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
+}
+
+static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
+{
+ int sec_idx, idx;
+ u32 offset = 0;
+
+ /*
+ * find where is the paging image start point:
+ * if CPU2 exist and it's in paging format, then the image looks like:
+ * CPU1 sections (2 or more)
+ * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
+ * CPU2 sections (not paged)
+ * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
+ * non paged to CPU2 paging sec
+ * CPU2 paging CSS
+ * CPU2 paging image (including instruction and data)
+ */
+ for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) {
+ if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
+ sec_idx++;
+ break;
+ }
+ }
+
+ if (sec_idx >= IWL_UCODE_SECTION_MAX) {
+ IWL_ERR(mvm, "driver didn't find paging image\n");
+ iwl_free_fw_paging(mvm);
+ return -EINVAL;
+ }
+
+ /* copy the CSS block to the dram */
+ IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
+ sec_idx);
+
+ memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
+ image->sec[sec_idx].data,
+ mvm->fw_paging_db[0].fw_paging_size);
+
+ IWL_DEBUG_FW(mvm,
+ "Paging: copied %d CSS bytes to first block\n",
+ mvm->fw_paging_db[0].fw_paging_size);
+
+ sec_idx++;
+
+ /*
+ * copy the paging blocks to the dram
+ * loop index start from 1 since that CSS block already copied to dram
+ * and CSS index is 0.
+ * loop stop at num_of_paging_blk since that last block is not full.
+ */
+ for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
+ memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+ image->sec[sec_idx].data + offset,
+ mvm->fw_paging_db[idx].fw_paging_size);
+
+ IWL_DEBUG_FW(mvm,
+ "Paging: copied %d paging bytes to block %d\n",
+ mvm->fw_paging_db[idx].fw_paging_size,
+ idx);
+
+ offset += mvm->fw_paging_db[idx].fw_paging_size;
+ }
+
+ /* copy the last paging block */
+ if (mvm->num_of_pages_in_last_blk > 0) {
+ memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+ image->sec[sec_idx].data + offset,
+ FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
+
+ IWL_DEBUG_FW(mvm,
+ "Paging: copied %d pages in the last block %d\n",
+ mvm->num_of_pages_in_last_blk, idx);
+ }
+
+ return 0;
+}
+
+static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
+ const struct fw_img *image)
+{
+ struct page *block;
+ dma_addr_t phys = 0;
+ int blk_idx = 0;
+ int order, num_of_pages;
+ int dma_enabled;
+
+ if (mvm->fw_paging_db[0].fw_paging_block)
+ return 0;
+
+ dma_enabled = is_device_dma_capable(mvm->trans->dev);
+
+ /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
+ BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
+
+ num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
+ mvm->num_of_paging_blk = ((num_of_pages - 1) /
+ NUM_OF_PAGE_PER_GROUP) + 1;
+
+ mvm->num_of_pages_in_last_blk =
+ num_of_pages -
+ NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
+
+ IWL_DEBUG_FW(mvm,
+ "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
+ mvm->num_of_paging_blk,
+ mvm->num_of_pages_in_last_blk);
+
+ /* allocate block of 4Kbytes for paging CSS */
+ order = get_order(FW_PAGING_SIZE);
+ block = alloc_pages(GFP_KERNEL, order);
+ if (!block) {
+ /* free all the previous pages since we failed */
+ iwl_free_fw_paging(mvm);
+ return -ENOMEM;
+ }
+
+ mvm->fw_paging_db[blk_idx].fw_paging_block = block;
+ mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE;
+
+ if (dma_enabled) {
+ phys = dma_map_page(mvm->trans->dev, block, 0,
+ PAGE_SIZE << order, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(mvm->trans->dev, phys)) {
+ /*
+ * free the previous pages and the current one since
+ * we failed to map_page.
+ */
+ iwl_free_fw_paging(mvm);
+ return -ENOMEM;
+ }
+ mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
+ } else {
+ mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG |
+ blk_idx << BLOCK_2_EXP_SIZE;
+ }
+
+ IWL_DEBUG_FW(mvm,
+ "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
+ order);
+
+ /*
+ * allocate blocks in dram.
+ * since that CSS allocated in fw_paging_db[0] loop start from index 1
+ */
+ for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
+ /* allocate block of PAGING_BLOCK_SIZE (32K) */
+ order = get_order(PAGING_BLOCK_SIZE);
+ block = alloc_pages(GFP_KERNEL, order);
+ if (!block) {
+ /* free all the previous pages since we failed */
+ iwl_free_fw_paging(mvm);
+ return -ENOMEM;
+ }
+
+ mvm->fw_paging_db[blk_idx].fw_paging_block = block;
+ mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE;
+
+ if (dma_enabled) {
+ phys = dma_map_page(mvm->trans->dev, block, 0,
+ PAGE_SIZE << order,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(mvm->trans->dev, phys)) {
+ /*
+ * free the previous pages and the current one
+ * since we failed to map_page.
+ */
+ iwl_free_fw_paging(mvm);
+ return -ENOMEM;
+ }
+ mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
+ } else {
+ mvm->fw_paging_db[blk_idx].fw_paging_phys =
+ PAGING_ADDR_SIG |
+ blk_idx << BLOCK_2_EXP_SIZE;
+ }
+
+ IWL_DEBUG_FW(mvm,
+ "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
+ order);
+ }
+
+ return 0;
+}
+
+static int iwl_save_fw_paging(struct iwl_mvm *mvm,
+ const struct fw_img *fw)
+{
+ int ret;
+
+ ret = iwl_alloc_fw_paging_mem(mvm, fw);
+ if (ret)
+ return ret;
+
+ return iwl_fill_paging_mem(mvm, fw);
+}
+
+/* send paging cmd to FW in case CPU2 has paging image */
+static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
+{
+ int blk_idx;
+ __le32 dev_phy_addr;
+ struct iwl_fw_paging_cmd fw_paging_cmd = {
+ .flags =
+ cpu_to_le32(PAGING_CMD_IS_SECURED |
+ PAGING_CMD_IS_ENABLED |
+ (mvm->num_of_pages_in_last_blk <<
+ PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
+ .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
+ .block_num = cpu_to_le32(mvm->num_of_paging_blk),
+ };
+
+ /* loop for for all paging blocks + CSS block */
+ for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
+ dev_phy_addr =
+ cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
+ PAGE_2_EXP_SIZE);
+ fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
+ }
+
+ return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
+ IWL_ALWAYS_LONG_GROUP, 0),
+ 0, sizeof(fw_paging_cmd), &fw_paging_cmd);
+}
+
+/*
+ * Send paging item cmd to FW in case CPU2 has paging image
+ */
+static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
+{
+ int ret;
+ struct iwl_fw_get_item_cmd fw_get_item_cmd = {
+ .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
+ };
+
+ struct iwl_fw_get_item_resp *item_resp;
+ struct iwl_host_cmd cmd = {
+ .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ .data = { &fw_get_item_cmd, },
+ };
+
+ cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret) {
+ IWL_ERR(mvm,
+ "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
+ if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
+ IWL_ERR(mvm,
+ "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
+ le32_to_cpu(item_resp->item_id));
+ ret = -EIO;
+ goto exit;
+ }
+
+ mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
+ GFP_KERNEL);
+ if (!mvm->trans->paging_download_buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
+ mvm->trans->paging_db = mvm->fw_paging_db;
+ IWL_DEBUG_FW(mvm,
+ "Paging: got paging request address (paging_req_addr 0x%08x)\n",
+ mvm->trans->paging_req_addr);
+
+exit:
+ iwl_free_resp(&cmd);
+
+ return ret;
+}
+
static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
@@ -213,7 +513,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
const struct fw_img *fw;
int ret, i;
enum iwl_ucode_type old_type = mvm->cur_ucode;
- static const u8 alive_cmd[] = { MVM_ALIVE };
+ static const u16 alive_cmd[] = { MVM_ALIVE };
struct iwl_sf_region st_fwrd_space;
if (ucode_type == IWL_UCODE_REGULAR &&
@@ -244,6 +544,11 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
MVM_UCODE_ALIVE_TIMEOUT);
if (ret) {
+ if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ IWL_ERR(mvm,
+ "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+ iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
+ iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
mvm->cur_ucode = old_type;
return ret;
}
@@ -269,6 +574,40 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
/*
+ * configure and operate fw paging mechanism.
+ * driver configures the paging flow only once, CPU2 paging image
+ * included in the IWL_UCODE_INIT image.
+ */
+ if (fw->paging_mem_size) {
+ /*
+ * When dma is not enabled, the driver needs to copy / write
+ * the downloaded / uploaded page to / from the smem.
+ * This gets the location of the place were the pages are
+ * stored.
+ */
+ if (!is_device_dma_capable(mvm->trans->dev)) {
+ ret = iwl_trans_get_paging_item(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "failed to get FW paging item\n");
+ return ret;
+ }
+ }
+
+ ret = iwl_save_fw_paging(mvm, fw);
+ if (ret) {
+ IWL_ERR(mvm, "failed to save the FW paging image\n");
+ return ret;
+ }
+
+ ret = iwl_send_paging_cmd(mvm, fw);
+ if (ret) {
+ IWL_ERR(mvm, "failed to send the paging cmd\n");
+ iwl_free_fw_paging(mvm);
+ return ret;
+ }
+ }
+
+ /*
* Note: all the queues are enabled as part of the interface
* initialization, but in firmware restart scenarios they
* could be stopped, so wake them up. In firmware restart,
@@ -314,7 +653,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
{
struct iwl_notification_wait calib_wait;
- static const u8 init_complete[] = {
+ static const u16 init_complete[] = {
INIT_COMPLETE_NOTIF,
CALIB_RES_NOTIF_PHY_DB
};
@@ -444,12 +783,6 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
return;
pkt = cmd.resp_pkt;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(mvm, "Bad return from SHARED_MEM_CFG (0x%08X)\n",
- pkt->hdr.flags);
- goto exit;
- }
-
mem_cfg = (void *)pkt->data;
mvm->shared_mem_cfg.shared_mem_addr =
@@ -473,14 +806,18 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
le32_to_cpu(mem_cfg->page_buff_size);
IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
-exit:
iwl_free_resp(&cmd);
}
int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
struct iwl_mvm_dump_desc *desc,
- unsigned int delay)
+ struct iwl_fw_dbg_trigger_tlv *trigger)
{
+ unsigned int delay = 0;
+
+ if (trigger)
+ delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
+
if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status))
return -EBUSY;
@@ -491,6 +828,7 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
le32_to_cpu(desc->trig_desc.type));
mvm->fw_dump_desc = desc;
+ mvm->fw_dump_trig = trigger;
queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
@@ -498,7 +836,8 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
}
int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
- const char *str, size_t len, unsigned int delay)
+ const char *str, size_t len,
+ struct iwl_fw_dbg_trigger_tlv *trigger)
{
struct iwl_mvm_dump_desc *desc;
@@ -510,14 +849,13 @@ int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
desc->trig_desc.type = cpu_to_le32(trig);
memcpy(desc->trig_desc.data, str, len);
- return iwl_mvm_fw_dbg_collect_desc(mvm, desc, delay);
+ return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger);
}
int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trigger,
const char *fmt, ...)
{
- unsigned int delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
u16 occurrences = le16_to_cpu(trigger->occurrences);
int ret, len = 0;
char buf[64];
@@ -541,8 +879,9 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
len = strlen(buf) + 1;
}
- ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf,
- len, delay);
+ ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len,
+ trigger);
+
if (ret)
return ret;
@@ -676,8 +1015,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
- if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10)
- iwl_mvm_get_shared_mem_conf(mvm);
+ iwl_mvm_get_shared_mem_conf(mvm);
ret = iwl_mvm_sf_update(mvm, NULL, false);
if (ret)
@@ -760,6 +1098,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
+ if (iwl_mvm_is_csum_supported(mvm) &&
+ mvm->cfg->features & NETIF_F_RXCSUM)
+ iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);
+
/* allow FW/transport low power modes if not during restart */
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
@@ -815,9 +1157,8 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
return ret;
}
-int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
@@ -828,13 +1169,10 @@ int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
(flags & SW_CARD_DISABLED) ? "Kill" : "On",
(flags & CT_KILL_CARD_DISABLED) ?
"Reached" : "Not reached");
-
- return 0;
}
-int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
@@ -845,5 +1183,4 @@ int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
le32_to_cpu(mfuart_notif->external_ver),
le32_to_cpu(mfuart_notif->status),
le32_to_cpu(mfuart_notif->duration));
- return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 1812dd018af2..3424315dd876 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -1312,9 +1312,8 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
}
}
-int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
@@ -1365,8 +1364,6 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
}
}
-
- return 0;
}
static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
@@ -1415,9 +1412,8 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL);
}
-int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
@@ -1434,5 +1430,4 @@ int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_beacon_loss_iterator,
mb);
- return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index dfdab38e2d4a..aa8c2b7f23c7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -641,6 +641,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+ ieee80211_hw_set(hw, TDLS_WIDER_BW);
}
if (fw_has_capa(&mvm->fw->ucode_capa,
@@ -649,6 +650,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
}
+ hw->netdev_features |= mvm->cfg->features;
+ if (!iwl_mvm_is_csum_supported(mvm))
+ hw->netdev_features &= ~NETIF_F_RXCSUM;
+
ret = ieee80211_register_hw(mvm->hw);
if (ret)
iwl_mvm_leds_exit(mvm);
@@ -1120,9 +1125,14 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
u32 file_len, fifo_data_len = 0;
u32 smem_len = mvm->cfg->smem_len;
u32 sram2_len = mvm->cfg->dccm2_len;
+ bool monitor_dump_only = false;
lockdep_assert_held(&mvm->mutex);
+ if (mvm->fw_dump_trig &&
+ mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
+ monitor_dump_only = true;
+
fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
if (!fw_error_dump)
return;
@@ -1174,6 +1184,20 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
fifo_data_len +
sizeof(*dump_info);
+ /* Make room for the SMEM, if it exists */
+ if (smem_len)
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
+
+ /* Make room for the secondary SRAM, if it exists */
+ if (sram2_len)
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
+
+ /* If we only want a monitor dump, reset the file length */
+ if (monitor_dump_only) {
+ file_len = sizeof(*dump_file) + sizeof(*dump_data) +
+ sizeof(*dump_info);
+ }
+
/*
* In 8000 HW family B-step include the ICCM (which resides separately)
*/
@@ -1186,14 +1210,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
mvm->fw_dump_desc->len;
- /* Make room for the SMEM, if it exists */
- if (smem_len)
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
-
- /* Make room for the secondary SRAM, if it exists */
- if (sram2_len)
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
-
dump_file = vzalloc(file_len);
if (!dump_file) {
kfree(fw_error_dump);
@@ -1239,6 +1255,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_data = iwl_fw_error_next_data(dump_data);
}
+ /* In case we only want monitor dump, skip to dump trasport data */
+ if (monitor_dump_only)
+ goto dump_trans_data;
+
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
@@ -1282,7 +1302,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_mem->data, IWL8260_ICCM_LEN);
}
- fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans);
+dump_trans_data:
+ fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
+ mvm->fw_dump_trig);
fw_error_dump->op_mode_len = file_len;
if (fw_error_dump->trans_ptr)
file_len += fw_error_dump->trans_ptr->len;
@@ -1291,6 +1313,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
+ mvm->fw_dump_trig = NULL;
clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
}
@@ -1433,22 +1456,9 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
{
- bool exit_now;
-
if (!iwl_mvm_is_d0i3_supported(mvm))
return;
- mutex_lock(&mvm->d0i3_suspend_mutex);
- __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
- exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
- &mvm->d0i3_suspend_flags);
- mutex_unlock(&mvm->d0i3_suspend_mutex);
-
- if (exit_now) {
- IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
- _iwl_mvm_exit_d0i3(mvm);
- }
-
if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND)
if (!wait_event_timeout(mvm->d0i3_exit_waitq,
!test_bit(IWL_MVM_STATUS_IN_D0I3,
@@ -1585,20 +1595,23 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
s16 tx_power)
{
struct iwl_dev_tx_power_cmd cmd = {
- .set_mode = 0,
- .mac_context_id =
+ .v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
+ .v2.mac_context_id =
cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
- .pwr_restriction = cpu_to_le16(8 * tx_power),
+ .v2.pwr_restriction = cpu_to_le16(8 * tx_power),
};
+ int len = sizeof(cmd);
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_DEV))
return iwl_mvm_set_tx_power_old(mvm, vif, tx_power);
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
- cmd.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
+ cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
- return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0,
- sizeof(cmd), &cmd);
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN))
+ len = sizeof(cmd.v2);
+
+ return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
@@ -1664,6 +1677,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_unlock;
}
+ mvmvif->features |= hw->netdev_features;
+
ret = iwl_mvm_mac_ctxt_add(mvm, vif);
if (ret)
goto out_release;
@@ -2880,10 +2895,11 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- /* fall-through */
- case WLAN_CIPHER_SUITE_CCMP:
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+ break;
case WLAN_CIPHER_SUITE_AES_CMAC:
WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
break;
@@ -3025,7 +3041,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
int res, time_reg = DEVICE_SYSTEM_TIME_REG;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
- static const u8 time_event_response[] = { HOT_SPOT_CMD };
+ static const u16 time_event_response[] = { HOT_SPOT_CMD };
struct iwl_notification_wait wait_time_event;
struct iwl_hs20_roc_req aux_roc_req = {
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 2d4bad5fe825..b95a07ec9e36 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -80,6 +80,7 @@
#include "sta.h"
#include "fw-api.h"
#include "constants.h"
+#include "tof.h"
#define IWL_INVALID_MAC80211_QUEUE 0xff
#define IWL_MVM_MAX_ADDRESSES 5
@@ -122,8 +123,7 @@ extern const struct ieee80211_ops iwl_mvm_hw_ops;
* be up'ed after the INIT fw asserted. This is useful to be able to use
* proprietary tools over testmode to debug the INIT fw.
* @tfd_q_hang_detect: enabled the detection of hung transmit queues
- * @power_scheme: CAM(Continuous Active Mode)-1, BPS(Balanced Power
- * Save)-2(default), LP(Low Power)-3
+ * @power_scheme: one of enum iwl_power_scheme
*/
struct iwl_mvm_mod_params {
bool init_dbg;
@@ -357,6 +357,7 @@ struct iwl_mvm_vif_bf_data {
* # of received beacons accumulated over FW restart, and the current
* average signal of beacons retrieved from the firmware
* @csa_failed: CSA failed to schedule time event, report an error later
+ * @features: hw features active for this vif
*/
struct iwl_mvm_vif {
struct iwl_mvm *mvm;
@@ -437,6 +438,9 @@ struct iwl_mvm_vif {
/* Indicates that CSA countdown may be started */
bool csa_countdown;
bool csa_failed;
+
+ /* TCP Checksum Offload */
+ netdev_features_t features;
};
static inline struct iwl_mvm_vif *
@@ -606,6 +610,11 @@ struct iwl_mvm {
/* NVM sections */
struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
+ /* Paging section */
+ struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS];
+ u16 num_of_paging_blk;
+ u16 num_of_pages_in_last_blk;
+
/* EEPROM MAC addresses */
struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
@@ -686,6 +695,7 @@ struct iwl_mvm {
* can hold 16 keys at most. Reflect this fact.
*/
unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
+ u8 fw_key_deleted[STA_KEY_MAX_NUM];
/* references taken by the driver and spinlock protecting them */
spinlock_t refs_lock;
@@ -698,6 +708,7 @@ struct iwl_mvm {
u8 fw_dbg_conf;
struct delayed_work fw_dump_wk;
struct iwl_mvm_dump_desc *fw_dump_desc;
+ struct iwl_fw_dbg_trigger_tlv *fw_dump_trig;
#ifdef CONFIG_IWLWIFI_LEDS
struct led_classdev led;
@@ -822,6 +833,7 @@ struct iwl_mvm {
struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
u32 ciphers[6];
+ struct iwl_mvm_tof_data tof_data;
};
/* Extract MVM priv from op_mode and _hw */
@@ -941,6 +953,12 @@ static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
IWL_MVM_BT_COEX_RRC;
}
+static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CSUM_SUPPORT);
+}
+
extern const u8 iwl_mvm_ac_to_tx_fifo[];
struct iwl_rate_info {
@@ -974,12 +992,12 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
/* Tx / Host Commands */
int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
struct iwl_host_cmd *cmd);
-int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
+int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
u32 flags, u16 len, const void *data);
int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
struct iwl_host_cmd *cmd,
u32 *status);
-int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id,
+int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
u16 len, const void *data,
u32 *status);
int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
@@ -988,10 +1006,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info, u8 sta_id);
-void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
- struct ieee80211_tx_info *info,
- struct iwl_tx_cmd *tx_cmd,
- struct sk_buff *skb_frag);
void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, __le16 fc);
@@ -1003,6 +1017,17 @@ static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
+static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+ tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+ memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
+}
+
static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
{
flush_work(&mvm->async_handlers_wk);
@@ -1011,9 +1036,8 @@ static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
/* Statistics */
void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt);
-int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
@@ -1059,27 +1083,20 @@ bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
* FW notifications / CMD responses handlers
* Convention: iwl_mvm_rx_<NAME OF THE CMD>
*/
-int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
/* MVM PHY */
int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
@@ -1106,12 +1123,10 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
-int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
@@ -1135,29 +1150,24 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
/* Scheduled scan */
-int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies,
int type);
-int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
/* UMAC scan */
int iwl_mvm_config_scan(struct iwl_mvm *mvm);
-int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1196,9 +1206,8 @@ int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
char *buf, int bufsz);
void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
#ifdef CONFIG_IWLWIFI_LEDS
int iwl_mvm_leds_init(struct iwl_mvm *mvm);
@@ -1254,9 +1263,8 @@ int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
/* BT Coex */
int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
-int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event_data);
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
@@ -1274,9 +1282,8 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
-int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event_data);
u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
@@ -1285,9 +1292,8 @@ bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
enum ieee80211_band band);
-int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
/* beacon filtering */
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1376,9 +1382,8 @@ static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
-int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
@@ -1390,9 +1395,8 @@ struct iwl_mcc_update_resp *
iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
enum iwl_mcc_source src_id);
int iwl_mvm_init_mcc(struct iwl_mvm *mvm);
-int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
const char *alpha2,
enum iwl_mcc_source src_id,
@@ -1431,8 +1435,7 @@ void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
@@ -1442,10 +1445,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id);
int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
- const char *str, size_t len, unsigned int delay);
+ const char *str, size_t len,
+ struct iwl_fw_dbg_trigger_tlv *trigger);
int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
struct iwl_mvm_dump_desc *desc,
- unsigned int delay);
+ struct iwl_fw_dbg_trigger_tlv *trigger);
void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm);
int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trigger,
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 2a6be350704a..328187da7541 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -139,12 +139,6 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
return ret;
pkt = cmd.resp_pkt;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(mvm, "Bad return from NVM_ACCES_COMMAND (0x%08X)\n",
- pkt->hdr.flags);
- ret = -EIO;
- goto exit;
- }
/* Extract NVM response */
nvm_resp = (void *)pkt->data;
@@ -652,12 +646,6 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
return ERR_PTR(ret);
pkt = cmd.resp_pkt;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(mvm, "Bad return from MCC_UPDATE_COMMAND (0x%08X)\n",
- pkt->hdr.flags);
- ret = -EIO;
- goto exit;
- }
/* Extract MCC response */
mcc_resp = (void *)pkt->data;
@@ -839,9 +827,8 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
return retval;
}
-int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
@@ -852,7 +839,7 @@ int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
- return 0;
+ return;
mcc[0] = notif->mcc >> 8;
mcc[1] = notif->mcc & 0xff;
@@ -864,10 +851,8 @@ int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
mcc, src);
regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL);
if (IS_ERR_OR_NULL(regd))
- return 0;
+ return;
regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
kfree(regd);
-
- return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index e4fa50075ffd..a37de3f410a0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -201,14 +201,15 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
}
struct iwl_rx_handlers {
- u8 cmd_id;
+ u16 cmd_id;
bool async;
- int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+ void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
};
#define RX_HANDLER(_cmd_id, _fn, _async) \
{ .cmd_id = _cmd_id , .fn = _fn , .async = _async }
+#define RX_HANDLER_GRP(_grp, _cmd, _fn, _async) \
+ { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .async = _async }
/*
* Handlers for fw notifications
@@ -221,7 +222,6 @@ struct iwl_rx_handlers {
* called from a worker with mvm->mutex held.
*/
static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
- RX_HANDLER(REPLY_RX_MPDU_CMD, iwl_mvm_rx_rx_mpdu, false),
RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false),
RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
@@ -261,12 +261,14 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
true),
RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false),
+ RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true),
};
#undef RX_HANDLER
+#undef RX_HANDLER_GRP
#define CMD(x) [x] = #x
-static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
+static const char *const iwl_mvm_cmd_strings[REPLY_MAX + 1] = {
CMD(MVM_ALIVE),
CMD(REPLY_ERROR),
CMD(INIT_COMPLETE_NOTIF),
@@ -286,8 +288,10 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(PHY_CONFIGURATION_CMD),
CMD(CALIB_RES_NOTIF_PHY_DB),
CMD(SET_CALIB_DEFAULT_CMD),
+ CMD(FW_PAGING_BLOCK_CMD),
CMD(ADD_STA_KEY),
CMD(ADD_STA),
+ CMD(FW_GET_ITEM_CMD),
CMD(REMOVE_STA),
CMD(LQ_CMD),
CMD(SCAN_OFFLOAD_CONFIG_CMD),
@@ -470,6 +474,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
+ trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_WIDE_CMD_HDR);
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE)
trans_cfg.bc_table_dword = true;
@@ -576,6 +582,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
/* rpm starts with a taken ref. only set the appropriate bit here. */
mvm->refs[IWL_MVM_REF_UCODE_DOWN] = 1;
+ iwl_mvm_tof_init(mvm);
+
return op_mode;
out_unregister:
@@ -623,14 +631,15 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
+ iwl_mvm_tof_clean(mvm);
+
ieee80211_free_hw(mvm->hw);
}
struct iwl_async_handler_entry {
struct list_head list;
struct iwl_rx_cmd_buffer rxb;
- int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+ void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
};
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
@@ -667,9 +676,7 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
spin_unlock_bh(&mvm->async_handlers_lock);
list_for_each_entry_safe(entry, tmp, &local_list, list) {
- if (entry->fn(mvm, &entry->rxb, NULL))
- IWL_WARN(mvm,
- "returned value from ASYNC handlers are ignored\n");
+ entry->fn(mvm, &entry->rxb);
iwl_free_rxb(&entry->rxb);
list_del(&entry->list);
kfree(entry);
@@ -698,24 +705,30 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
if (!cmds_trig->cmds[i].cmd_id)
break;
- if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd)
+ if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
+ cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
continue;
iwl_mvm_fw_dbg_collect_trig(mvm, trig,
- "CMD 0x%02x received",
- pkt->hdr.cmd);
+ "CMD 0x%02x.%02x received",
+ pkt->hdr.group_id, pkt->hdr.cmd);
break;
}
}
-static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
u8 i;
+ if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) {
+ iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
+ return;
+ }
+
iwl_mvm_rx_check_trigger(mvm, pkt);
/*
@@ -729,16 +742,18 @@ static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
struct iwl_async_handler_entry *entry;
- if (rx_h->cmd_id != pkt->hdr.cmd)
+ if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
continue;
- if (!rx_h->async)
- return rx_h->fn(mvm, rxb, cmd);
+ if (!rx_h->async) {
+ rx_h->fn(mvm, rxb);
+ return;
+ }
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
/* we can't do much... */
if (!entry)
- return 0;
+ return;
entry->rxb._page = rxb_steal_page(rxb);
entry->rxb._offset = rxb->_offset;
@@ -750,8 +765,6 @@ static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
schedule_work(&mvm->async_handlers_wk);
break;
}
-
- return 0;
}
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
@@ -903,7 +916,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
* can't recover this since we're already half suspended.
*/
if (!mvm->restart_fw && fw_error) {
- iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, 0);
+ iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert,
+ NULL);
} else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
&mvm->status)) {
struct iwl_mvm_reprobe *reprobe;
@@ -1100,9 +1114,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
- /* make sure we have no running tx while configuring the qos */
set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
- synchronize_net();
/*
* iwl_mvm_ref_sync takes a reference before checking the flag.
@@ -1130,6 +1142,9 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
mvm->d0i3_offloading = false;
}
+ /* make sure we have no running tx while configuring the seqno */
+ synchronize_net();
+
iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data);
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
sizeof(wowlan_config_cmd),
@@ -1156,15 +1171,25 @@ static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
}
-static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac,
- struct ieee80211_vif *vif)
+struct iwl_mvm_wakeup_reason_iter_data {
+ struct iwl_mvm *mvm;
+ u32 wakeup_reasons;
+};
+
+static void iwl_mvm_d0i3_wakeup_reason_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
{
- struct iwl_mvm *mvm = data;
+ struct iwl_mvm_wakeup_reason_iter_data *data = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc &&
- mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
- iwl_mvm_connection_loss(mvm, vif, "D0i3");
+ data->mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) {
+ if (data->wakeup_reasons &
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)
+ iwl_mvm_connection_loss(data->mvm, vif, "D0i3");
+ else
+ ieee80211_beacon_loss(vif);
+ }
}
void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
@@ -1232,7 +1257,7 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
};
struct iwl_wowlan_status *status;
int ret;
- u32 disconnection_reasons, wakeup_reasons;
+ u32 handled_reasons, wakeup_reasons;
__le16 *qos_seq = NULL;
mutex_lock(&mvm->mutex);
@@ -1249,13 +1274,18 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
- disconnection_reasons =
- IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
- IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
- if (wakeup_reasons & disconnection_reasons)
+ handled_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
+ if (wakeup_reasons & handled_reasons) {
+ struct iwl_mvm_wakeup_reason_iter_data data = {
+ .mvm = mvm,
+ .wakeup_reasons = wakeup_reasons,
+ };
+
ieee80211_iterate_active_interfaces(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_d0i3_disconnect_iter, mvm);
+ iwl_mvm_d0i3_wakeup_reason_iter, &data);
+ }
out:
iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
@@ -1308,17 +1338,6 @@ int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
return _iwl_mvm_exit_d0i3(mvm);
}
-static void iwl_mvm_napi_add(struct iwl_op_mode *op_mode,
- struct napi_struct *napi,
- struct net_device *napi_dev,
- int (*poll)(struct napi_struct *, int),
- int weight)
-{
- struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-
- ieee80211_napi_add(mvm->hw, napi, napi_dev, poll, weight);
-}
-
static const struct iwl_op_mode_ops iwl_mvm_ops = {
.start = iwl_op_mode_mvm_start,
.stop = iwl_op_mode_mvm_stop,
@@ -1332,5 +1351,4 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = {
.nic_config = iwl_mvm_nic_config,
.enter_d0i3 = iwl_mvm_enter_d0i3,
.exit_d0i3 = iwl_mvm_exit_d0i3,
- .napi_add = iwl_mvm_napi_add,
};
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index d2c6ba9d326b..4645877882a6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -112,11 +112,12 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
static
void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_beacon_filter_cmd *cmd)
+ struct iwl_beacon_filter_cmd *cmd,
+ bool d0i3)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- if (vif->bss_conf.cqm_rssi_thold) {
+ if (vif->bss_conf.cqm_rssi_thold && !d0i3) {
cmd->bf_energy_delta =
cpu_to_le32(vif->bss_conf.cqm_rssi_hyst);
/* fw uses an absolute value for this */
@@ -287,27 +288,6 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
return true;
}
-static int iwl_mvm_power_get_skip_over_dtim(int dtimper, int bi)
-{
- int numerator;
- int dtim_interval = dtimper * bi;
-
- if (WARN_ON(!dtim_interval))
- return 0;
-
- if (dtimper == 1) {
- if (bi > 100)
- numerator = 408;
- else
- numerator = 510;
- } else if (dtimper < 10) {
- numerator = 612;
- } else {
- return 0;
- }
- return max(1, (numerator / dtim_interval));
-}
-
static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
{
struct ieee80211_chanctx_conf *chanctx_conf;
@@ -357,8 +337,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
- if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
- !mvmvif->pm_enabled)
+ if (!vif->bss_conf.ps || !mvmvif->pm_enabled ||
+ (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p))
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -377,11 +357,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
if (!radar_detect && (dtimper < 10) &&
(iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
- cmd->skip_dtim_periods =
- iwl_mvm_power_get_skip_over_dtim(dtimper, bi);
- if (cmd->skip_dtim_periods)
- cmd->flags |=
- cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ cmd->skip_dtim_periods = 3;
}
if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
@@ -509,9 +486,8 @@ static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac,
ETH_ALEN);
}
-int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
@@ -520,8 +496,6 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id);
-
- return 0;
}
struct iwl_power_vifs {
@@ -810,7 +784,7 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
- iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd);
+ iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd, d0i3);
if (!d0i3)
iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index daff1d0a8e4a..5ae9c8aa868f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -177,7 +177,8 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
- if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
+ if (IWL_MVM_RS_DISABLE_P2P_MIMO &&
+ iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
return false;
if (mvm->nvm_data->sku_cap_mimo_disabled)
@@ -2403,7 +2404,7 @@ struct rs_init_rate_info {
u8 rate_idx;
};
-static const struct rs_init_rate_info rs_init_rates_24ghz[] = {
+static const struct rs_init_rate_info rs_optimal_rates_24ghz_legacy[] = {
{ -60, IWL_RATE_54M_INDEX },
{ -64, IWL_RATE_48M_INDEX },
{ -68, IWL_RATE_36M_INDEX },
@@ -2416,7 +2417,7 @@ static const struct rs_init_rate_info rs_init_rates_24ghz[] = {
{ S8_MIN, IWL_RATE_1M_INDEX },
};
-static const struct rs_init_rate_info rs_init_rates_5ghz[] = {
+static const struct rs_init_rate_info rs_optimal_rates_5ghz_legacy[] = {
{ -60, IWL_RATE_54M_INDEX },
{ -64, IWL_RATE_48M_INDEX },
{ -72, IWL_RATE_36M_INDEX },
@@ -2427,6 +2428,124 @@ static const struct rs_init_rate_info rs_init_rates_5ghz[] = {
{ S8_MIN, IWL_RATE_6M_INDEX },
};
+static const struct rs_init_rate_info rs_optimal_rates_ht[] = {
+ { -60, IWL_RATE_MCS_7_INDEX },
+ { -64, IWL_RATE_MCS_6_INDEX },
+ { -68, IWL_RATE_MCS_5_INDEX },
+ { -72, IWL_RATE_MCS_4_INDEX },
+ { -80, IWL_RATE_MCS_3_INDEX },
+ { -84, IWL_RATE_MCS_2_INDEX },
+ { -85, IWL_RATE_MCS_1_INDEX },
+ { S8_MIN, IWL_RATE_MCS_0_INDEX},
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = {
+ { -60, IWL_RATE_MCS_8_INDEX },
+ { -64, IWL_RATE_MCS_7_INDEX },
+ { -68, IWL_RATE_MCS_6_INDEX },
+ { -72, IWL_RATE_MCS_5_INDEX },
+ { -80, IWL_RATE_MCS_4_INDEX },
+ { -84, IWL_RATE_MCS_3_INDEX },
+ { -85, IWL_RATE_MCS_2_INDEX },
+ { -87, IWL_RATE_MCS_1_INDEX },
+ { S8_MIN, IWL_RATE_MCS_0_INDEX},
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_vht_40_80mhz[] = {
+ { -60, IWL_RATE_MCS_9_INDEX },
+ { -64, IWL_RATE_MCS_8_INDEX },
+ { -68, IWL_RATE_MCS_7_INDEX },
+ { -72, IWL_RATE_MCS_6_INDEX },
+ { -80, IWL_RATE_MCS_5_INDEX },
+ { -84, IWL_RATE_MCS_4_INDEX },
+ { -85, IWL_RATE_MCS_3_INDEX },
+ { -87, IWL_RATE_MCS_2_INDEX },
+ { -88, IWL_RATE_MCS_1_INDEX },
+ { S8_MIN, IWL_RATE_MCS_0_INDEX },
+};
+
+/* Init the optimal rate based on STA caps
+ * This combined with rssi is used to report the last tx rate
+ * to userspace when we haven't transmitted enough frames.
+ */
+static void rs_init_optimal_rate(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta)
+{
+ struct rs_rate *rate = &lq_sta->optimal_rate;
+
+ if (lq_sta->max_mimo2_rate_idx != IWL_RATE_INVALID)
+ rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
+ else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID)
+ rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
+ else if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ rate->type = LQ_LEGACY_A;
+ else
+ rate->type = LQ_LEGACY_G;
+
+ rate->bw = rs_bw_from_sta_bw(sta);
+ rate->sgi = rs_sgi_allow(mvm, sta, rate, NULL);
+
+ /* ANT/LDPC/STBC aren't relevant for the rate reported to userspace */
+
+ if (is_mimo(rate)) {
+ lq_sta->optimal_rate_mask = lq_sta->active_mimo2_rate;
+ } else if (is_siso(rate)) {
+ lq_sta->optimal_rate_mask = lq_sta->active_siso_rate;
+ } else {
+ lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate;
+
+ if (lq_sta->band == IEEE80211_BAND_5GHZ) {
+ lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy;
+ lq_sta->optimal_nentries =
+ ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
+ } else {
+ lq_sta->optimal_rates = rs_optimal_rates_24ghz_legacy;
+ lq_sta->optimal_nentries =
+ ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
+ }
+ }
+
+ if (is_vht(rate)) {
+ if (rate->bw == RATE_MCS_CHAN_WIDTH_20) {
+ lq_sta->optimal_rates = rs_optimal_rates_vht_20mhz;
+ lq_sta->optimal_nentries =
+ ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
+ } else {
+ lq_sta->optimal_rates = rs_optimal_rates_vht_40_80mhz;
+ lq_sta->optimal_nentries =
+ ARRAY_SIZE(rs_optimal_rates_vht_40_80mhz);
+ }
+ } else if (is_ht(rate)) {
+ lq_sta->optimal_rates = rs_optimal_rates_ht;
+ lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_ht);
+ }
+}
+
+/* Compute the optimal rate index based on RSSI */
+static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta)
+{
+ struct rs_rate *rate = &lq_sta->optimal_rate;
+ int i;
+
+ rate->index = find_first_bit(&lq_sta->optimal_rate_mask,
+ BITS_PER_LONG);
+
+ for (i = 0; i < lq_sta->optimal_nentries; i++) {
+ int rate_idx = lq_sta->optimal_rates[i].rate_idx;
+
+ if ((lq_sta->pers.last_rssi >= lq_sta->optimal_rates[i].rssi) &&
+ (BIT(rate_idx) & lq_sta->optimal_rate_mask)) {
+ rate->index = rate_idx;
+ break;
+ }
+ }
+
+ rs_dump_rate(mvm, rate, "OPTIMAL RATE");
+ return rate;
+}
+
/* Choose an initial legacy rate and antenna to use based on the RSSI
* of last Rx
*/
@@ -2468,12 +2587,12 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
if (band == IEEE80211_BAND_5GHZ) {
rate->type = LQ_LEGACY_A;
- initial_rates = rs_init_rates_5ghz;
- nentries = ARRAY_SIZE(rs_init_rates_5ghz);
+ initial_rates = rs_optimal_rates_5ghz_legacy;
+ nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
} else {
rate->type = LQ_LEGACY_G;
- initial_rates = rs_init_rates_24ghz;
- nentries = ARRAY_SIZE(rs_init_rates_24ghz);
+ initial_rates = rs_optimal_rates_24ghz_legacy;
+ nentries = ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
}
if (IWL_MVM_RS_RSSI_BASED_INIT_RATE) {
@@ -2496,10 +2615,21 @@ void rs_update_last_rssi(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta,
struct ieee80211_rx_status *rx_status)
{
+ int i;
+
lq_sta->pers.chains = rx_status->chains;
lq_sta->pers.chain_signal[0] = rx_status->chain_signal[0];
lq_sta->pers.chain_signal[1] = rx_status->chain_signal[1];
lq_sta->pers.chain_signal[2] = rx_status->chain_signal[2];
+ lq_sta->pers.last_rssi = S8_MIN;
+
+ for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) {
+ if (!(lq_sta->pers.chains & BIT(i)))
+ continue;
+
+ if (lq_sta->pers.chain_signal[i] > lq_sta->pers.last_rssi)
+ lq_sta->pers.last_rssi = lq_sta->pers.chain_signal[i];
+ }
}
/**
@@ -2538,6 +2668,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
rate = &tbl->rate;
rs_get_initial_rate(mvm, lq_sta, band, rate);
+ rs_init_optimal_rate(mvm, sta, lq_sta);
WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
if (rate->ant == ANT_A)
@@ -2560,6 +2691,8 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_lq_sta *lq_sta = mvm_sta;
+ struct rs_rate *optimal_rate;
+ u32 last_ucode_rate;
if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) {
/* if vif isn't initialized mvm doesn't know about
@@ -2583,8 +2716,18 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
info->band, &info->control.rates[0]);
-
info->control.rates[0].count = 1;
+
+ /* Report the optimal rate based on rssi and STA caps if we haven't
+ * converged yet (too little traffic) or exploring other modulations
+ */
+ if (lq_sta->rs_state != RS_STATE_STAY_IN_COLUMN) {
+ optimal_rate = rs_get_optimal_rate(mvm, lq_sta);
+ last_ucode_rate = ucode_rate_from_rs_rate(mvm,
+ optimal_rate);
+ iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band,
+ &txrc->reported_rate);
+ }
}
static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
@@ -2605,6 +2748,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
#endif
lq_sta->pers.chains = 0;
memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
+ lq_sta->pers.last_rssi = S8_MIN;
return &sta_priv->lq_sta;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 2a3da314305a..81314ad9ebe0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -316,6 +317,14 @@ struct iwl_lq_sta {
u8 max_siso_rate_idx;
u8 max_mimo2_rate_idx;
+ /* Optimal rate based on RSSI and STA caps.
+ * Used only to reflect link speed to userspace.
+ */
+ struct rs_rate optimal_rate;
+ unsigned long optimal_rate_mask;
+ const struct rs_init_rate_info *optimal_rates;
+ int optimal_nentries;
+
u8 missed_rate_counter;
struct iwl_lq_cmd lq;
@@ -341,6 +350,7 @@ struct iwl_lq_sta {
#endif
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
+ s8 last_rssi;
struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
struct iwl_mvm *drv;
} pers;
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 8f1d93b7a13a..c37c10a423ce 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -61,6 +61,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
+#include <linux/skbuff.h>
#include "iwl-trans.h"
#include "mvm.h"
#include "fw-api.h"
@@ -71,8 +72,7 @@
* Copies the phy information in mvm->last_phy_info, it will be used when the
* actual data will come from the fw in the next packet.
*/
-int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -86,8 +86,6 @@ int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
spin_unlock(&mvm->drv_stats_lock);
}
#endif
-
- return 0;
}
/*
@@ -96,6 +94,7 @@ int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
* Adds the rxb to a new skb and give it to mac80211
*/
static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+ struct napi_struct *napi,
struct sk_buff *skb,
struct ieee80211_hdr *hdr, u16 len,
u32 ampdu_status, u8 crypt_len,
@@ -129,7 +128,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
fraglen, rxb->truesize);
}
- ieee80211_rx(mvm->hw, skb);
+ ieee80211_rx_napi(mvm->hw, skb, napi);
}
/*
@@ -237,13 +236,26 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
return 0;
}
+static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ u32 status)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+ if (mvmvif->features & NETIF_F_RXCSUM &&
+ status & RX_MPDU_RES_STATUS_CSUM_DONE &&
+ status & RX_MPDU_RES_STATUS_CSUM_OK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
/*
* iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler
*
* Handles the actual data of the Rx packet from the fw
*/
-int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct ieee80211_hdr *hdr;
struct ieee80211_rx_status *rx_status;
@@ -271,7 +283,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
skb = alloc_skb(128, GFP_ATOMIC);
if (!skb) {
IWL_ERR(mvm, "alloc_skb failed\n");
- return 0;
+ return;
}
rx_status = IEEE80211_SKB_RXCB(skb);
@@ -284,14 +296,14 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
rx_pkt_status);
kfree_skb(skb);
- return 0;
+ return;
}
if ((unlikely(phy_info->cfg_phy_cnt > 20))) {
IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n",
phy_info->cfg_phy_cnt);
kfree_skb(skb);
- return 0;
+ return;
}
/*
@@ -366,6 +378,9 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
}
}
+ if (sta && ieee80211_is_data(hdr->frame_control))
+ iwl_mvm_rx_csum(sta, skb, rx_pkt_status);
+
rcu_read_unlock();
/* set the preamble flag if appropriate */
@@ -429,9 +444,8 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
iwl_mvm_update_frame_stats(mvm, rate_n_flags,
rx_status->flag & RX_FLAG_AMPDU_DETAILS);
#endif
- iwl_mvm_pass_packet_to_mac80211(mvm, skb, hdr, len, ampdu_status,
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status,
crypt_len, rxb);
- return 0;
}
static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
@@ -623,10 +637,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
iwl_rx_packet_payload_len(pkt));
}
-int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
- return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 5de144968723..56559d4d34ad 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -72,10 +72,60 @@
#define IWL_DENSE_EBS_SCAN_RATIO 5
#define IWL_SPARSE_EBS_SCAN_RATIO 1
-struct iwl_mvm_scan_params {
- u32 max_out_time;
+enum iwl_mvm_scan_type {
+ IWL_SCAN_TYPE_UNASSOC,
+ IWL_SCAN_TYPE_WILD,
+ IWL_SCAN_TYPE_MILD,
+ IWL_SCAN_TYPE_FRAGMENTED,
+};
+
+enum iwl_mvm_traffic_load {
+ IWL_MVM_TRAFFIC_LOW,
+ IWL_MVM_TRAFFIC_MEDIUM,
+ IWL_MVM_TRAFFIC_HIGH,
+};
+
+struct iwl_mvm_scan_timing_params {
+ u32 dwell_active;
+ u32 dwell_passive;
+ u32 dwell_fragmented;
u32 suspend_time;
- bool passive_fragmented;
+ u32 max_out_time;
+};
+
+static struct iwl_mvm_scan_timing_params scan_timing[] = {
+ [IWL_SCAN_TYPE_UNASSOC] = {
+ .dwell_active = 10,
+ .dwell_passive = 110,
+ .dwell_fragmented = 44,
+ .suspend_time = 0,
+ .max_out_time = 0,
+ },
+ [IWL_SCAN_TYPE_WILD] = {
+ .dwell_active = 10,
+ .dwell_passive = 110,
+ .dwell_fragmented = 44,
+ .suspend_time = 30,
+ .max_out_time = 120,
+ },
+ [IWL_SCAN_TYPE_MILD] = {
+ .dwell_active = 10,
+ .dwell_passive = 110,
+ .dwell_fragmented = 44,
+ .suspend_time = 120,
+ .max_out_time = 120,
+ },
+ [IWL_SCAN_TYPE_FRAGMENTED] = {
+ .dwell_active = 10,
+ .dwell_passive = 110,
+ .dwell_fragmented = 44,
+ .suspend_time = 95,
+ .max_out_time = 44,
+ },
+};
+
+struct iwl_mvm_scan_params {
+ enum iwl_mvm_scan_type type;
u32 n_channels;
u16 delay;
int n_ssids;
@@ -90,15 +140,7 @@ struct iwl_mvm_scan_params {
int n_match_sets;
struct iwl_scan_probe_req preq;
struct cfg80211_match_set *match_sets;
- struct _dwell {
- u16 passive;
- u16 active;
- u16 fragmented;
- } dwell[IEEE80211_NUM_BANDS];
- struct {
- u8 iterations;
- u8 full_scan_mul; /* not used for UMAC */
- } schedule[2];
+ u8 iterations[2];
};
static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
@@ -147,34 +189,6 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
}
-/*
- * If req->n_ssids > 0, it means we should do an active scan.
- * In case of active scan w/o directed scan, we receive a zero-length SSID
- * just to notify that this scan is active and not passive.
- * In order to notify the FW of the number of SSIDs we wish to scan (including
- * the zero-length one), we need to set the corresponding bits in chan->type,
- * one for each SSID, and set the active bit (first). If the first SSID is
- * already included in the probe template, so we need to set only
- * req->n_ssids - 1 bits in addition to the first bit.
- */
-static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
- enum ieee80211_band band, int n_ssids)
-{
- if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
- return 10;
- if (band == IEEE80211_BAND_2GHZ)
- return 20 + 3 * (n_ssids + 1);
- return 10 + 2 * (n_ssids + 1);
-}
-
-static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
- enum ieee80211_band band)
-{
- if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
- return 110;
- return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
-}
-
static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -186,90 +200,39 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
*global_cnt += 1;
}
-static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct iwl_mvm_scan_params *params)
+static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
+{
+ return IWL_MVM_TRAFFIC_LOW;
+}
+
+static enum
+iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params)
{
int global_cnt = 0;
- enum ieee80211_band band;
- u8 frag_passive_dwell = 0;
+ enum iwl_mvm_traffic_load load;
+ bool low_latency;
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_scan_condition_iterator,
&global_cnt);
if (!global_cnt)
- goto not_bound;
-
- params->suspend_time = 30;
- params->max_out_time = 120;
-
- if (iwl_mvm_low_latency(mvm)) {
- if (fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
-
- params->suspend_time = 105;
- /*
- * If there is more than one active interface make
- * passive scan more fragmented.
- */
- frag_passive_dwell = 40;
- params->max_out_time = frag_passive_dwell;
- } else {
- params->suspend_time = 120;
- params->max_out_time = 120;
- }
- }
+ return IWL_SCAN_TYPE_UNASSOC;
- if (frag_passive_dwell &&
- fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
- /*
- * P2P device scan should not be fragmented to avoid negative
- * impact on P2P device discovery. Configure max_out_time to be
- * equal to dwell time on passive channel. Take a longest
- * possible value, one that corresponds to 2GHz band
- */
- if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
- u32 passive_dwell =
- iwl_mvm_get_passive_dwell(mvm,
- IEEE80211_BAND_2GHZ);
- params->max_out_time = passive_dwell;
- } else {
- params->passive_fragmented = true;
- }
- }
-
- if ((params->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
- (params->max_out_time > 200))
- params->max_out_time = 200;
+ load = iwl_mvm_get_traffic_load(mvm);
+ low_latency = iwl_mvm_low_latency(mvm);
-not_bound:
+ if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
+ vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
+ return IWL_SCAN_TYPE_FRAGMENTED;
- for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
- if (params->passive_fragmented)
- params->dwell[band].fragmented = frag_passive_dwell;
-
- params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm,
- band);
- params->dwell[band].active =
- iwl_mvm_get_active_dwell(mvm, band, params->n_ssids);
- }
+ if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
+ return IWL_SCAN_TYPE_MILD;
- IWL_DEBUG_SCAN(mvm,
- "scan parameters: max_out_time %d, suspend_time %d, passive_fragmented %d\n",
- params->max_out_time, params->suspend_time,
- params->passive_fragmented);
- IWL_DEBUG_SCAN(mvm,
- "dwell[IEEE80211_BAND_2GHZ]: passive %d, active %d, fragmented %d\n",
- params->dwell[IEEE80211_BAND_2GHZ].passive,
- params->dwell[IEEE80211_BAND_2GHZ].active,
- params->dwell[IEEE80211_BAND_2GHZ].fragmented);
- IWL_DEBUG_SCAN(mvm,
- "dwell[IEEE80211_BAND_5GHZ]: passive %d, active %d, fragmented %d\n",
- params->dwell[IEEE80211_BAND_5GHZ].passive,
- params->dwell[IEEE80211_BAND_5GHZ].active,
- params->dwell[IEEE80211_BAND_5GHZ].fragmented);
+ return IWL_SCAN_TYPE_WILD;
}
static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
@@ -327,9 +290,8 @@ static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res,
return buf;
}
-int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
@@ -341,17 +303,13 @@ int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
iwl_mvm_dump_channel_list(notif->results,
notif->scanned_channels, buf,
sizeof(buf)));
- return 0;
}
-int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
ieee80211_sched_scan_results(mvm->hw);
-
- return 0;
}
static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
@@ -368,9 +326,8 @@ static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
}
}
-int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
@@ -395,6 +352,11 @@ int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
aborted ? "aborted" : "completed",
iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+ IWL_DEBUG_SCAN(mvm,
+ "Last line %d, Last iteration %d, Time after last iteration %d\n",
+ scan_notif->last_schedule_line,
+ scan_notif->last_schedule_iteration,
+ __le32_to_cpu(scan_notif->time_after_last_iter));
mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
} else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
@@ -406,9 +368,14 @@ int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
} else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
- IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s (FW)\n",
+ IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
aborted ? "aborted" : "completed",
iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+ IWL_DEBUG_SCAN(mvm,
+ "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n",
+ scan_notif->last_schedule_line,
+ scan_notif->last_schedule_iteration,
+ __le32_to_cpu(scan_notif->time_after_last_iter));
mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
ieee80211_sched_scan_stopped(mvm->hw);
@@ -426,8 +393,6 @@ int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
mvm->last_ebs_successful =
scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
-
- return 0;
}
static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
@@ -751,13 +716,11 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
struct iwl_scan_req_lmac *cmd,
struct iwl_mvm_scan_params *params)
{
- cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
- cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
- if (params->passive_fragmented)
- cmd->fragmented_dwell =
- params->dwell[IEEE80211_BAND_2GHZ].fragmented;
- cmd->max_out_time = cpu_to_le32(params->max_out_time);
- cmd->suspend_time = cpu_to_le32(params->suspend_time);
+ cmd->active_dwell = scan_timing[params->type].dwell_active;
+ cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+ cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
+ cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
+ cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
}
@@ -794,7 +757,7 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params)
{
- return params->schedule[0].iterations + params->schedule[1].iterations;
+ return params->iterations[0] + params->iterations[1];
}
static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
@@ -808,7 +771,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
- if (params->passive_fragmented)
+ if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
if (iwl_mvm_rrm_scan_needed(mvm))
@@ -861,11 +824,11 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ssid_bitmap <<= 1;
cmd->schedule[0].delay = cpu_to_le16(params->interval);
- cmd->schedule[0].iterations = params->schedule[0].iterations;
- cmd->schedule[0].full_scan_mul = params->schedule[0].full_scan_mul;
+ cmd->schedule[0].iterations = params->iterations[0];
+ cmd->schedule[0].full_scan_mul = 1;
cmd->schedule[1].delay = cpu_to_le16(params->interval);
- cmd->schedule[1].iterations = params->schedule[1].iterations;
- cmd->schedule[1].full_scan_mul = params->schedule[1].iterations;
+ cmd->schedule[1].iterations = params->iterations[1];
+ cmd->schedule[1].full_scan_mul = 1;
if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) {
cmd->channel_opt[0].flags =
@@ -937,9 +900,9 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
int num_channels =
mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
- int ret, i, j = 0, cmd_size, data_size;
+ int ret, i, j = 0, cmd_size;
struct iwl_host_cmd cmd = {
- .id = SCAN_CFG_CMD,
+ .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
};
if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
@@ -951,8 +914,6 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
if (!scan_config)
return -ENOMEM;
- data_size = cmd_size - sizeof(struct iwl_mvm_umac_cmd_hdr);
- scan_config->hdr.size = cpu_to_le16(data_size);
scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
SCAN_CONFIG_FLAG_SET_TX_CHAINS |
@@ -1013,17 +974,15 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
struct iwl_scan_req_umac *cmd,
struct iwl_mvm_scan_params *params)
{
- cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
- cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
- if (params->passive_fragmented)
- cmd->fragmented_dwell =
- params->dwell[IEEE80211_BAND_2GHZ].fragmented;
- cmd->max_out_time = cpu_to_le32(params->max_out_time);
- cmd->suspend_time = cpu_to_le32(params->suspend_time);
+ cmd->active_dwell = scan_timing[params->type].dwell_active;
+ cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+ cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
+ cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
+ cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
cmd->scan_priority =
iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
- if (iwl_mvm_scan_total_iterations(params) == 0)
+ if (iwl_mvm_scan_total_iterations(params) == 1)
cmd->ooc_priority =
iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
else
@@ -1059,7 +1018,7 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
- if (params->passive_fragmented)
+ if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
if (iwl_mvm_rrm_scan_needed(mvm))
@@ -1099,8 +1058,6 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return uid;
memset(cmd, 0, ksize(cmd));
- cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
- sizeof(struct iwl_mvm_umac_cmd_hdr));
iwl_mvm_scan_umac_dwell(mvm, cmd, params);
@@ -1109,6 +1066,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->uid = cpu_to_le32(uid);
cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
+ if (type == IWL_MVM_SCAN_SCHED)
+ cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
+
if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
@@ -1227,17 +1187,15 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
params.n_match_sets = 0;
params.match_sets = NULL;
- params.schedule[0].iterations = 1;
- params.schedule[0].full_scan_mul = 0;
- params.schedule[1].iterations = 0;
- params.schedule[1].full_scan_mul = 0;
+ params.iterations[0] = 1;
+ params.iterations[1] = 0;
- iwl_mvm_scan_calc_dwell(mvm, vif, &params);
+ params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
- hcmd.id = SCAN_REQ_UMAC;
+ hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
ret = iwl_mvm_scan_umac(mvm, vif, &params,
IWL_MVM_SCAN_REGULAR);
} else {
@@ -1310,10 +1268,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
params.n_match_sets = req->n_match_sets;
params.match_sets = req->match_sets;
- params.schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
- params.schedule[0].full_scan_mul = 1;
- params.schedule[1].iterations = 0xff;
- params.schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
+ params.iterations[0] = 0;
+ params.iterations[1] = 0xff;
+
+ params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
if (req->interval > U16_MAX) {
IWL_DEBUG_SCAN(mvm,
@@ -1336,8 +1294,6 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
params.delay = req->delay;
}
- iwl_mvm_scan_calc_dwell(mvm, vif, &params);
-
ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
if (ret)
return ret;
@@ -1345,7 +1301,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
- hcmd.id = SCAN_REQ_UMAC;
+ hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED);
} else {
hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
@@ -1371,9 +1327,8 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
return ret;
}
-int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_umac_scan_complete *notif = (void *)pkt->data;
@@ -1381,7 +1336,7 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
- return 0;
+ return;
/* if the scan is already stopping, we don't need to notify mac80211 */
if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
@@ -1392,26 +1347,26 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
}
mvm->scan_status &= ~mvm->scan_uid_status[uid];
-
IWL_DEBUG_SCAN(mvm,
"Scan completed, uid %u type %u, status %s, EBS status %s\n",
uid, mvm->scan_uid_status[uid],
notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
"completed" : "aborted",
iwl_mvm_ebs_status_str(notif->ebs_status));
+ IWL_DEBUG_SCAN(mvm,
+ "Last line %d, Last iteration %d, Time from last iteration %d\n",
+ notif->last_schedule, notif->last_iter,
+ __le32_to_cpu(notif->time_from_last_iter));
if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
mvm->last_ebs_successful = false;
mvm->scan_uid_status[uid] = 0;
-
- return 0;
}
-int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
@@ -1423,15 +1378,11 @@ int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
iwl_mvm_dump_channel_list(notif->results,
notif->scanned_channels, buf,
sizeof(buf)));
- return 0;
}
static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
{
- struct iwl_umac_scan_abort cmd = {
- .hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
- sizeof(struct iwl_mvm_umac_cmd_hdr)),
- };
+ struct iwl_umac_scan_abort cmd = {};
int uid, ret;
lockdep_assert_held(&mvm->mutex);
@@ -1448,7 +1399,10 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
- ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ iwl_cmd_id(SCAN_ABORT_UMAC,
+ IWL_ALWAYS_LONG_GROUP, 0),
+ 0, sizeof(cmd), &cmd);
if (!ret)
mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
@@ -1458,7 +1412,7 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
{
struct iwl_notification_wait wait_scan_done;
- static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
+ static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
SCAN_OFFLOAD_COMPLETE, };
int ret;
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index d68dc697a4a0..df216cd0c98f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -1148,18 +1148,31 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
{
- int i;
+ int i, max = -1, max_offs = -1;
lockdep_assert_held(&mvm->mutex);
- i = find_first_zero_bit(mvm->fw_key_table, STA_KEY_MAX_NUM);
+ /* Pick the unused key offset with the highest 'deleted'
+ * counter. Every time a key is deleted, all the counters
+ * are incremented and the one that was just deleted is
+ * reset to zero. Thus, the highest counter is the one
+ * that was deleted longest ago. Pick that one.
+ */
+ for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+ if (test_bit(i, mvm->fw_key_table))
+ continue;
+ if (mvm->fw_key_deleted[i] > max) {
+ max = mvm->fw_key_deleted[i];
+ max_offs = i;
+ }
+ }
- if (i == STA_KEY_MAX_NUM)
+ if (max_offs < 0)
return STA_KEY_IDX_INVALID;
- __set_bit(i, mvm->fw_key_table);
+ __set_bit(max_offs, mvm->fw_key_table);
- return i;
+ return max_offs;
}
static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
@@ -1277,8 +1290,6 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
const u8 *pn;
memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
- ieee80211_aes_cmac_calculate_k1_k2(keyconf,
- igtk_cmd.K1, igtk_cmd.K2);
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
pn = seq.aes_cmac.pn;
igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
@@ -1401,6 +1412,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
u8 sta_id;
int ret;
+ static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
lockdep_assert_held(&mvm->mutex);
@@ -1467,7 +1479,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
end:
IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
keyconf->cipher, keyconf->keylen, keyconf->keyidx,
- sta->addr, ret);
+ sta ? sta->addr : zero_addr, ret);
return ret;
}
@@ -1478,7 +1490,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
{
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
u8 sta_id;
- int ret;
+ int ret, i;
lockdep_assert_held(&mvm->mutex);
@@ -1497,6 +1509,13 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
return -ENOENT;
}
+ /* track which key was deleted last */
+ for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+ if (mvm->fw_key_deleted[i] < U8_MAX)
+ mvm->fw_key_deleted[i]++;
+ }
+ mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
+
if (sta_id == IWL_MVM_STATION_COUNT) {
IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
return 0;
@@ -1660,9 +1679,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
-int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
@@ -1670,15 +1688,13 @@ int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
u32 sta_id = le32_to_cpu(notif->sta_id);
if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
- return 0;
+ return;
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
if (!IS_ERR_OR_NULL(sta))
ieee80211_sta_eosp(sta);
rcu_read_unlock();
-
- return 0;
}
void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 748f5dc3f9f4..eedb215eba3f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -378,9 +378,8 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u32 iv32,
u16 *phase1key);
-int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/mvm/tdls.c b/drivers/net/wireless/iwlwifi/mvm/tdls.c
index a87b506c8c72..fe2fa5650443 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tdls.c
@@ -169,18 +169,11 @@ static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return;
pkt = cmd.resp_pkt;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(mvm, "Bad return from TDLS_CONFIG_COMMAND (0x%08X)\n",
- pkt->hdr.flags);
- goto exit;
- }
- if (WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)))
- goto exit;
+ WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
/* we don't really care about the response at this point */
-exit:
iwl_free_resp(&cmd);
}
@@ -261,8 +254,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
}
-int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
@@ -277,17 +269,17 @@ int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
/* can fail sometimes */
if (!le32_to_cpu(notif->status)) {
iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
- goto out;
+ return;
}
if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
- goto out;
+ return;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
/* the station may not be here, but if it is, it must be a TDLS peer */
if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
- goto out;
+ return;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
vif = mvmsta->vif;
@@ -301,9 +293,6 @@ int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
msecs_to_jiffies(delay));
iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
-
-out:
- return 0;
}
static int
@@ -471,13 +460,19 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
info = IEEE80211_SKB_CB(skb);
- if (info->control.hw_key)
- iwl_mvm_set_tx_cmd_crypto(mvm, info, &cmd.frame.tx_cmd, skb);
+ hdr = (void *)skb->data;
+ if (info->control.hw_key) {
+ if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
+ rcu_read_unlock();
+ ret = -EINVAL;
+ goto out;
+ }
+ iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd);
+ }
iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
mvmsta->sta_id);
- hdr = (void *)skb->data;
iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
hdr->frame_control);
rcu_read_unlock();
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index d24b6a83e68c..dbd7d544575d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -86,7 +86,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
{
lockdep_assert_held(&mvm->time_event_lock);
- if (te_data->id == TE_MAX)
+ if (!te_data->vif)
return;
list_del(&te_data->list);
@@ -410,9 +410,8 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
/*
* The Rx handler for time event notifications
*/
-int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_time_event_notif *notif = (void *)pkt->data;
@@ -433,8 +432,6 @@ int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
}
unlock:
spin_unlock_bh(&mvm->time_event_lock);
-
- return 0;
}
static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
@@ -503,7 +500,7 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
struct iwl_mvm_time_event_data *te_data,
struct iwl_time_event_cmd *te_cmd)
{
- static const u8 time_event_response[] = { TIME_EVENT_CMD };
+ static const u16 time_event_response[] = { TIME_EVENT_CMD };
struct iwl_notification_wait wait_time_event;
int ret;
@@ -566,7 +563,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
- const u8 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
+ const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
struct iwl_notification_wait wait_te_notif;
struct iwl_time_event_cmd time_cmd = {};
@@ -599,8 +596,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
- time_cmd.apply_time =
- cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
+ time_cmd.apply_time = cpu_to_le32(0);
time_cmd.max_frags = TE_V2_FRAG_NONE;
time_cmd.max_delay = cpu_to_le32(max_delay);
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
index de4fbc6d57f1..cbdf8e52a5f1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h
@@ -157,9 +157,8 @@ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
/*
* iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION.
*/
-int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
/**
* iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality
diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.c b/drivers/net/wireless/iwlwifi/mvm/tof.c
new file mode 100644
index 000000000000..380972f8fb82
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/tof.c
@@ -0,0 +1,304 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+#include "fw-api-tof.h"
+
+#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256
+
+void iwl_mvm_tof_init(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ return;
+
+ memset(tof_data, 0, sizeof(*tof_data));
+
+ tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (IWL_MVM_TOF_IS_RESPONDER) {
+ tof_data->responder_cfg.sub_grp_cmd_id =
+ cpu_to_le32(TOF_RESPONDER_CONFIG_CMD);
+ tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT;
+ }
+#endif
+
+ tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD);
+ tof_data->range_req.req_timeout = 1;
+ tof_data->range_req.initiator = 1;
+ tof_data->range_req.report_policy = 3;
+
+ tof_data->range_req_ext.sub_grp_cmd_id =
+ cpu_to_le32(TOF_RANGE_REQ_EXT_CMD);
+
+ mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+}
+
+void iwl_mvm_tof_clean(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ return;
+
+ memset(tof_data, 0, sizeof(*tof_data));
+ mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+}
+
+static void iwl_tof_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ bool *enabled = _data;
+
+ /* non bss vif exists */
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
+ *enabled = false;
+}
+
+int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm)
+{
+ struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg;
+ bool enabled;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ return -EINVAL;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_tof_iterator, &enabled);
+ if (!enabled) {
+ IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n");
+ return -EINVAL;
+ }
+
+ mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+ return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+ IWL_ALWAYS_LONG_GROUP, 0),
+ 0, sizeof(*cmd), cmd);
+}
+
+int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id)
+{
+ struct iwl_tof_range_abort_cmd cmd = {
+ .sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD),
+ .request_id = id,
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ return -EINVAL;
+
+ if (id != mvm->tof_data.active_range_request) {
+ IWL_ERR(mvm, "Invalid range request id %d (active %d)\n",
+ id, mvm->tof_data.active_range_request);
+ return -EINVAL;
+ }
+
+ /* after abort is sent there's no active request anymore */
+ mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+
+ return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+ IWL_ALWAYS_LONG_GROUP, 0),
+ 0, sizeof(cmd), &cmd);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ return -EINVAL;
+
+ if (vif->p2p || vif->type != NL80211_IFTYPE_AP) {
+ IWL_ERR(mvm, "Cannot start responder, not in AP mode\n");
+ return -EIO;
+ }
+
+ cmd->sta_id = mvmvif->bcast_sta.sta_id;
+ return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+ IWL_ALWAYS_LONG_GROUP, 0),
+ 0, sizeof(*cmd), cmd);
+}
+#endif
+
+int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_host_cmd cmd = {
+ .id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+ .len = { sizeof(mvm->tof_data.range_req), },
+ /* no copy because of the command size */
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ return -EINVAL;
+
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
+ IWL_ERR(mvm, "Cannot send range request, not STA mode\n");
+ return -EIO;
+ }
+
+ /* nesting of range requests is not supported in FW */
+ if (mvm->tof_data.active_range_request !=
+ IWL_MVM_TOF_RANGE_REQ_MAX_ID) {
+ IWL_ERR(mvm, "Cannot send range req, already active req %d\n",
+ mvm->tof_data.active_range_request);
+ return -EIO;
+ }
+
+ mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id;
+
+ cmd.data[0] = &mvm->tof_data.range_req;
+ return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ return -EINVAL;
+
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
+ IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n");
+ return -EIO;
+ }
+
+ return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+ IWL_ALWAYS_LONG_GROUP, 0),
+ 0, sizeof(mvm->tof_data.range_req_ext),
+ &mvm->tof_data.range_req_ext);
+}
+
+static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data)
+{
+ struct iwl_tof_range_rsp_ntfy *resp = (void *)data;
+
+ if (resp->request_id != mvm->tof_data.active_range_request) {
+ IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n",
+ resp->request_id, mvm->tof_data.active_range_request);
+ return -EIO;
+ }
+
+ memcpy(&mvm->tof_data.range_resp, resp,
+ sizeof(struct iwl_tof_range_rsp_ntfy));
+ mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+
+ return 0;
+}
+
+static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data)
+{
+ struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data;
+
+ IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token);
+ return 0;
+}
+
+static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data)
+{
+ struct iwl_tof_neighbor_report *report =
+ (struct iwl_tof_neighbor_report *)data;
+
+ IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n",
+ report->bssid, report->request_token, report->status);
+ return 0;
+}
+
+void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ switch (le32_to_cpu(resp->sub_grp_cmd_id)) {
+ case TOF_RANGE_RESPONSE_NOTIF:
+ iwl_mvm_tof_range_resp(mvm, resp->data);
+ break;
+ case TOF_MCSI_DEBUG_NOTIF:
+ iwl_mvm_tof_mcsi_notif(mvm, resp->data);
+ break;
+ case TOF_NEIGHBOR_REPORT_RSP_NOTIF:
+ iwl_mvm_tof_nb_report_notif(mvm, resp->data);
+ break;
+ default:
+ IWL_ERR(mvm, "Unknown sub-group command 0x%x\n",
+ resp->sub_grp_cmd_id);
+ break;
+ }
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.h b/drivers/net/wireless/iwlwifi/mvm/tof.h
new file mode 100644
index 000000000000..50ae8adaaa6e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/tof.h
@@ -0,0 +1,94 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __tof
+#define __tof_h__
+
+#include "fw-api-tof.h"
+
+struct iwl_mvm_tof_data {
+ struct iwl_tof_config_cmd tof_cfg;
+ struct iwl_tof_range_req_cmd range_req;
+ struct iwl_tof_range_req_ext_cmd range_req_ext;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct iwl_tof_responder_config_cmd responder_cfg;
+#endif
+ struct iwl_tof_range_rsp_ntfy range_resp;
+ u8 last_abort_id;
+ u16 active_range_request;
+};
+
+void iwl_mvm_tof_init(struct iwl_mvm *mvm);
+void iwl_mvm_tof_clean(struct iwl_mvm *mvm);
+int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm);
+int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id);
+int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+#endif
+#endif /* __tof_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index 80d07db6e7e8..fe7145c2c98a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -33,6 +33,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -154,24 +155,20 @@ static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait,
return true;
}
-int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
int temp;
/* the notification is handled synchronously in ctkill, so skip here */
if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
- return 0;
+ return;
temp = iwl_mvm_temp_notif_parse(mvm, pkt);
if (temp < 0)
- return 0;
+ return;
iwl_mvm_tt_temp_changed(mvm, temp);
-
- return 0;
}
static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
@@ -187,7 +184,7 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
int iwl_mvm_get_temp(struct iwl_mvm *mvm)
{
struct iwl_notification_wait wait_temp_notif;
- static const u8 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION };
+ static const u16 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION };
int ret, temp;
lockdep_assert_held(&mvm->mutex);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 7ba7a118ff5c..6df5aada4f16 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -153,18 +153,20 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
if (ieee80211_is_mgmt(fc)) {
if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
- tx_cmd->pm_frame_timeout = cpu_to_le16(3);
+ tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
+ else if (ieee80211_is_action(fc))
+ tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
else
- tx_cmd->pm_frame_timeout = cpu_to_le16(2);
+ tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
/* The spec allows Action frames in A-MPDU, we don't support
* it
*/
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
} else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
- tx_cmd->pm_frame_timeout = cpu_to_le16(2);
+ tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
} else {
- tx_cmd->pm_frame_timeout = 0;
+ tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
}
if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
@@ -252,7 +254,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
if (info->band == IEEE80211_BAND_2GHZ &&
!iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
- rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS;
+ rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
else
rate_flags =
BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
@@ -268,19 +270,29 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
/*
* Sets the fields in the Tx cmd that are crypto related
*/
-void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
- struct ieee80211_tx_info *info,
- struct iwl_tx_cmd *tx_cmd,
- struct sk_buff *skb_frag)
+static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd,
+ struct sk_buff *skb_frag,
+ int hdrlen)
{
struct ieee80211_key_conf *keyconf = info->control.hw_key;
+ u8 *crypto_hdr = skb_frag->data + hdrlen;
+ u64 pn;
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
- tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
- memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
+ pn = atomic64_inc_return(&keyconf->tx_pn);
+ crypto_hdr[0] = pn;
+ crypto_hdr[2] = 0;
+ crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
+ crypto_hdr[1] = pn >> 8;
+ crypto_hdr[4] = pn >> 16;
+ crypto_hdr[5] = pn >> 24;
+ crypto_hdr[6] = pn >> 32;
+ crypto_hdr[7] = pn >> 40;
break;
case WLAN_CIPHER_SUITE_TKIP:
@@ -308,7 +320,7 @@ void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
*/
static struct iwl_device_cmd *
iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct ieee80211_sta *sta, u8 sta_id)
+ int hdrlen, struct ieee80211_sta *sta, u8 sta_id)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -325,7 +337,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
if (info->control.hw_key)
- iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb);
+ iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
@@ -346,6 +358,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
struct iwl_device_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd;
u8 sta_id;
+ int hdrlen = ieee80211_hdrlen(hdr->frame_control);
if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU))
return -1;
@@ -366,23 +379,34 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
/*
- * If the interface on which frame is sent is the P2P_DEVICE
+ * If the interface on which the frame is sent is the P2P_DEVICE
* or an AP/GO interface use the broadcast station associated
- * with it; otherwise use the AUX station.
+ * with it; otherwise if the interface is a managed interface
+ * use the AP station associated with it for multicast traffic
+ * (this is not possible for unicast packets as a TLDS discovery
+ * response are sent without a station entry); otherwise use the
+ * AUX station.
*/
- if (info->control.vif &&
- (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
- info->control.vif->type == NL80211_IFTYPE_AP)) {
+ sta_id = mvm->aux_sta.sta_id;
+ if (info->control.vif) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(info->control.vif);
- sta_id = mvmvif->bcast_sta.sta_id;
- } else {
- sta_id = mvm->aux_sta.sta_id;
+
+ if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+ info->control.vif->type == NL80211_IFTYPE_AP)
+ sta_id = mvmvif->bcast_sta.sta_id;
+ else if (info->control.vif->type == NL80211_IFTYPE_STATION &&
+ is_multicast_ether_addr(hdr->addr1)) {
+ u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
+
+ if (ap_sta_id != IWL_MVM_STATION_COUNT)
+ sta_id = ap_sta_id;
+ }
}
IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue);
- dev_cmd = iwl_mvm_set_tx_params(mvm, skb, NULL, sta_id);
+ dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id);
if (!dev_cmd)
return -1;
@@ -390,7 +414,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
/* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control));
+ memcpy(tx_cmd->hdr, hdr, hdrlen);
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) {
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
@@ -416,9 +440,11 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
u8 tid = IWL_MAX_TID_COUNT;
u8 txq_id = info->hw_queue;
bool is_data_qos = false, is_ampdu = false;
+ int hdrlen;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
fc = hdr->frame_control;
+ hdrlen = ieee80211_hdrlen(fc);
if (WARN_ON_ONCE(!mvmsta))
return -1;
@@ -426,7 +452,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
return -1;
- dev_cmd = iwl_mvm_set_tx_params(mvm, skb, sta, mvmsta->sta_id);
+ dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id);
if (!dev_cmd)
goto drop;
@@ -458,7 +484,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
}
/* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(fc));
+ memcpy(tx_cmd->hdr, hdr, hdrlen);
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
@@ -911,8 +937,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
rcu_read_unlock();
}
-int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
@@ -921,8 +946,6 @@ int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
iwl_mvm_rx_tx_cmd_single(mvm, pkt);
else
iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
-
- return 0;
}
static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
@@ -942,8 +965,7 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
(void *)(uintptr_t)tid_data->rate_n_flags;
}
-int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
@@ -965,7 +987,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
tid >= IWL_MAX_TID_COUNT,
"sta_id %d tid %d", sta_id, tid))
- return 0;
+ return;
rcu_read_lock();
@@ -974,7 +996,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
/* Reclaiming frames for a station that has been deleted ? */
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
rcu_read_unlock();
- return 0;
+ return;
}
mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -985,7 +1007,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
"invalid BA notification: Q %d, tid %d, flow %d\n",
tid_data->txq_id, tid, scd_flow);
rcu_read_unlock();
- return 0;
+ return;
}
spin_lock_bh(&mvmsta->lock);
@@ -1072,8 +1094,6 @@ out:
skb = __skb_dequeue(&reclaimed_skbs);
ieee80211_tx_status(mvm->hw, skb);
}
-
- return 0;
}
/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 03f8e06dded7..a7d434256423 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -108,7 +108,7 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
return ret;
}
-int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
+int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
u32 flags, u16 len, const void *data)
{
struct iwl_host_cmd cmd = {
@@ -166,11 +166,6 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
goto out_free_resp;
}
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- ret = -EIO;
- goto out_free_resp;
- }
-
resp_len = iwl_rx_packet_payload_len(pkt);
if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
ret = -EIO;
@@ -187,7 +182,7 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
/*
* We assume that the caller set the status to the sucess value
*/
-int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id, u16 len,
+int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
const void *data, u32 *status)
{
struct iwl_host_cmd cmd = {
@@ -243,8 +238,7 @@ u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx)
return fw_rate_idx_to_plcp[rate_idx];
}
-int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_error_resp *err_resp = (void *)pkt->data;
@@ -256,7 +250,6 @@ int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
le32_to_cpu(err_resp->error_service));
IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n",
le64_to_cpu(err_resp->timestamp));
- return 0;
}
/*
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 2ed1e4d2774d..b0825c402c73 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -368,12 +368,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* 3165 Series */
{IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
/* 7265 Series */
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
@@ -426,9 +428,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
@@ -613,6 +614,7 @@ static int iwl_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct iwl_trans *trans = pci_get_drvdata(pdev);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
/* Before you put code here, think about WoWLAN. You cannot check here
@@ -630,20 +632,16 @@ static int iwl_pci_resume(struct device *device)
return 0;
/*
- * On suspend, ict is disabled, and the interrupt mask
- * gets cleared. Reconfigure them both in case of d0i3
- * image. Otherwise, only enable rfkill interrupt (in
- * order to keep track of the rfkill status)
+ * Enable rfkill interrupt (in order to keep track of
+ * the rfkill status)
*/
- if (trans->wowlan_d0i3) {
- iwl_pcie_reset_ict(trans);
- iwl_enable_interrupts(trans);
- } else {
- iwl_enable_rfkill_int(trans);
- }
+ iwl_enable_rfkill_int(trans);
hw_rfkill = iwl_is_rfkill_set(trans);
+
+ mutex_lock(&trans_pcie->mutex);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+ mutex_unlock(&trans_pcie->mutex);
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 31f72a61cc3f..feb2f7e81134 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -44,6 +44,12 @@
#include "iwl-io.h"
#include "iwl-op-mode.h"
+/* We need 2 entries for the TX command and header, and another one might
+ * be needed for potential data in the SKB's head. The remaining ones can
+ * be used for frags.
+ */
+#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)
+
/*
* RX related structures and functions
*/
@@ -299,8 +305,10 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
* @rx_buf_size_8k: 8 kB RX buffer size
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
+ * @wide_cmd_header: true when ucode supports wide command header format
* @rx_page_order: page order for receive buffer size
* @reg_lock: protect hw register access
+ * @mutex: to protect stop_device / start_fw / start_hw
* @cmd_in_flight: true when we have a host command in flight
* @fw_mon_phys: physical address of the buffer for the firmware monitor
* @fw_mon_page: points to the first page of the buffer for the firmware monitor
@@ -320,9 +328,11 @@ struct iwl_trans_pcie {
dma_addr_t ict_tbl_dma;
int ict_index;
bool use_ict;
+ bool is_down;
struct isr_statistics isr_stats;
spinlock_t irq_lock;
+ struct mutex mutex;
u32 inta_mask;
u32 scd_base_addr;
struct iwl_dma_ptr scd_bc_tbls;
@@ -349,6 +359,7 @@ struct iwl_trans_pcie {
bool rx_buf_size_8k;
bool bc_table_dword;
bool scd_set_active;
+ bool wide_cmd_header;
u32 rx_page_order;
const char *const *command_names;
@@ -420,7 +431,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
- struct iwl_rx_cmd_buffer *rxb, int handler_status);
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index a3fbaa0ef5e0..e06591f625c4 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -281,12 +281,13 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
* iwl_pcie_rx_alloc_page - allocates and returns a page.
*
*/
-static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
+static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
+ gfp_t priority)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct page *page;
- gfp_t gfp_mask = GFP_KERNEL;
+ gfp_t gfp_mask = priority;
if (rxq->free_count > RX_LOW_WATERMARK)
gfp_mask |= __GFP_NOWARN;
@@ -324,7 +325,7 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
* iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
* allocated buffers.
*/
-static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
+static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
@@ -340,7 +341,7 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
spin_unlock(&rxq->lock);
/* Alloc a new receive buffer */
- page = iwl_pcie_rx_alloc_page(trans);
+ page = iwl_pcie_rx_alloc_page(trans, priority);
if (!page)
return;
@@ -414,7 +415,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
*/
static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
{
- iwl_pcie_rxq_alloc_rbs(trans);
+ iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
iwl_pcie_rxq_restock(trans);
}
@@ -429,17 +430,22 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ struct list_head local_empty;
+ int pending = atomic_xchg(&rba->req_pending, 0);
- while (atomic_read(&rba->req_pending)) {
+ IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
+
+ /* If we were scheduled - there is at least one request */
+ spin_lock(&rba->lock);
+ /* swap out the rba->rbd_empty to a local list */
+ list_replace_init(&rba->rbd_empty, &local_empty);
+ spin_unlock(&rba->lock);
+
+ while (pending) {
int i;
- struct list_head local_empty;
struct list_head local_allocated;
INIT_LIST_HEAD(&local_allocated);
- spin_lock(&rba->lock);
- /* swap out the entire rba->rbd_empty to a local list */
- list_replace_init(&rba->rbd_empty, &local_empty);
- spin_unlock(&rba->lock);
for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
struct iwl_rx_mem_buffer *rxb;
@@ -457,7 +463,7 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
BUG_ON(rxb->page);
/* Alloc a new receive buffer */
- page = iwl_pcie_rx_alloc_page(trans);
+ page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL);
if (!page)
continue;
rxb->page = page;
@@ -481,16 +487,28 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
i++;
}
+ pending--;
+ if (!pending) {
+ pending = atomic_xchg(&rba->req_pending, 0);
+ IWL_DEBUG_RX(trans,
+ "Pending allocation requests = %d\n",
+ pending);
+ }
+
spin_lock(&rba->lock);
/* add the allocated rbds to the allocator allocated list */
list_splice_tail(&local_allocated, &rba->rbd_allocated);
- /* add the unused rbds back to the allocator empty list */
- list_splice_tail(&local_empty, &rba->rbd_empty);
+ /* get more empty RBDs for current pending requests */
+ list_splice_tail_init(&rba->rbd_empty, &local_empty);
spin_unlock(&rba->lock);
- atomic_dec(&rba->req_pending);
atomic_inc(&rba->req_ready);
}
+
+ spin_lock(&rba->lock);
+ /* return unused rbds to the allocator empty list */
+ list_splice_tail(&local_empty, &rba->rbd_empty);
+ spin_unlock(&rba->lock);
}
/*
@@ -507,13 +525,16 @@ static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i;
- if (atomic_dec_return(&rba->req_ready) < 0) {
- atomic_inc(&rba->req_ready);
- IWL_DEBUG_RX(trans,
- "Allocation request not ready, pending requests = %d\n",
- atomic_read(&rba->req_pending));
+ /*
+ * atomic_dec_if_positive returns req_ready - 1 for any scenario.
+ * If req_ready is 0 atomic_dec_if_positive will return -1 and this
+ * function will return -ENOMEM, as there are no ready requests.
+ * atomic_dec_if_positive will perofrm the *actual* decrement only if
+ * req_ready > 0, i.e. - there are ready requests and the function
+ * hands one request to the caller.
+ */
+ if (atomic_dec_if_positive(&rba->req_ready) < 0)
return -ENOMEM;
- }
spin_lock(&rba->lock);
for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
@@ -777,18 +798,21 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
*/
static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
struct iwl_rx_mem_buffer *rxb,
- struct iwl_rxq *rxq)
+ struct iwl_rxq *rxq, bool emergency)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
- /* Count the used RBDs */
- rxq->used_count++;
-
/* Move the RBD to the used list, will be moved to allocator in batches
* before claiming or posting a request*/
list_add_tail(&rxb->list, &rxq->rx_used);
+ if (unlikely(emergency))
+ return;
+
+ /* Count the allocator owned RBDs */
+ rxq->used_count++;
+
/* If we have RX_POST_REQ_ALLOC new released rx buffers -
* issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
* used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
@@ -807,7 +831,8 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
}
static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
- struct iwl_rx_mem_buffer *rxb)
+ struct iwl_rx_mem_buffer *rxb,
+ bool emergency)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
@@ -823,10 +848,9 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
struct iwl_rx_packet *pkt;
- struct iwl_device_cmd *cmd;
u16 sequence;
bool reclaim;
- int index, cmd_index, err, len;
+ int index, cmd_index, len;
struct iwl_rx_cmd_buffer rxcb = {
._offset = offset,
._rx_page_order = trans_pcie->rx_page_order,
@@ -874,12 +898,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
index = SEQ_TO_INDEX(sequence);
cmd_index = get_cmd_index(&txq->q, index);
- if (reclaim)
- cmd = txq->entries[cmd_index].cmd;
- else
- cmd = NULL;
-
- err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
+ iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb);
if (reclaim) {
kzfree(txq->entries[cmd_index].free_buf);
@@ -897,7 +916,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
* iwl_trans_send_cmd()
* as we reclaim the driver command queue */
if (!rxcb._page_stolen)
- iwl_pcie_hcmd_complete(trans, &rxcb, err);
+ iwl_pcie_hcmd_complete(trans, &rxcb);
else
IWL_WARN(trans, "Claim null rxb?\n");
}
@@ -928,13 +947,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
*/
__free_pages(rxb->page, trans_pcie->rx_page_order);
rxb->page = NULL;
- iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
+ iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
} else {
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
}
} else
- iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
+ iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
}
/*
@@ -944,7 +963,8 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
- u32 r, i, j;
+ u32 r, i, j, count = 0;
+ bool emergency = false;
restart:
spin_lock(&rxq->lock);
@@ -960,12 +980,15 @@ restart:
while (i != r) {
struct iwl_rx_mem_buffer *rxb;
+ if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2))
+ emergency = true;
+
rxb = rxq->queue[i];
rxq->queue[i] = NULL;
IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
r, i, rxb);
- iwl_pcie_rx_handle_rb(trans, rxb);
+ iwl_pcie_rx_handle_rb(trans, rxb, emergency);
i = (i + 1) & RX_QUEUE_MASK;
@@ -975,10 +998,16 @@ restart:
struct iwl_rb_allocator *rba = &trans_pcie->rba;
struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
- /* Add the remaining 6 empty RBDs for allocator use */
- spin_lock(&rba->lock);
- list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
- spin_unlock(&rba->lock);
+ if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 &&
+ !emergency) {
+ /* Add the remaining 6 empty RBDs
+ * for allocator use
+ */
+ spin_lock(&rba->lock);
+ list_splice_tail_init(&rxq->rx_used,
+ &rba->rbd_empty);
+ spin_unlock(&rba->lock);
+ }
/* If not ready - continue, will try to reclaim later.
* No need to reschedule work - allocator exits only on
@@ -995,9 +1024,22 @@ restart:
}
}
}
- /* handle restock for two cases:
+ if (emergency) {
+ count++;
+ if (count == 8) {
+ count = 0;
+ if (rxq->used_count < RX_QUEUE_SIZE / 3)
+ emergency = false;
+ spin_unlock(&rxq->lock);
+ iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+ spin_lock(&rxq->lock);
+ }
+ }
+ /* handle restock for three cases, can be all of them at once:
* - we just pulled buffers from the allocator
- * - we have 8+ unstolen pages accumulated */
+ * - we have 8+ unstolen pages accumulated
+ * - we are in emergency and allocated buffers
+ */
if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) {
rxq->read = i;
spin_unlock(&rxq->lock);
@@ -1010,6 +1052,21 @@ restart:
rxq->read = i;
spin_unlock(&rxq->lock);
+ /*
+ * handle a case where in emergency there are some unallocated RBDs.
+ * those RBDs are in the used list, but are not tracked by the queue's
+ * used_count which counts allocator owned RBDs.
+ * unallocated emergency RBDs must be allocated on exit, otherwise
+ * when called again the function may not be in emergency mode and
+ * they will be handed to the allocator with no tracking in the RBD
+ * allocator counters, which will lead to them never being claimed back
+ * by the queue.
+ * by allocating them here, they are now in the queue free list, and
+ * will be restocked by the next call of iwl_pcie_rxq_restock.
+ */
+ if (unlikely(emergency && count))
+ iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+
if (trans_pcie->napi.poll)
napi_gro_flush(&trans_pcie->napi, false);
}
@@ -1020,6 +1077,7 @@ restart:
static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
if (trans->cfg->internal_wimax_coex &&
@@ -1043,6 +1101,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
iwl_trans_fw_error(trans);
local_bh_enable();
+ for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
+ del_timer(&trans_pcie->txq[i].stuck_timer);
+
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
wake_up(&trans_pcie->wait_command_queue);
}
@@ -1251,7 +1312,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
isr_stats->rfkill++;
+ mutex_lock(&trans_pcie->mutex);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+ mutex_unlock(&trans_pcie->mutex);
if (hw_rfkill) {
set_bit(STATUS_RFKILL, &trans->status);
if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
@@ -1443,8 +1506,9 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
- val |= CSR_DRAM_INT_TBL_ENABLE;
- val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
+ val |= CSR_DRAM_INT_TBL_ENABLE |
+ CSR_DRAM_INIT_TBL_WRAP_CHECK |
+ CSR_DRAM_INIT_TBL_WRITE_POINTER;
IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 43ae658af6ec..6ba7d300b08f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -182,7 +182,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
{
- if (!trans->cfg->apmg_not_supported)
+ if (trans->cfg->apmg_not_supported)
return;
if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -478,10 +478,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_WAKE_ME);
- else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PREPARE |
CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+ mdelay(1);
+ iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ }
mdelay(5);
}
@@ -575,6 +581,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
if (ret >= 0)
return 0;
+ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ msleep(1);
+
for (iter = 0; iter < 10; iter++) {
/* If HW is not ready, prepare the conditions to check again */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
@@ -582,8 +592,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
do {
ret = iwl_pcie_set_hw_ready(trans);
- if (ret >= 0)
- return 0;
+ if (ret >= 0) {
+ ret = 0;
+ goto out;
+ }
usleep_range(200, 1000);
t += 200;
@@ -593,6 +605,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
IWL_ERR(trans, "Couldn't prepare the card\n");
+out:
+ iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+
return ret;
}
@@ -764,8 +780,15 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
last_read_idx = i;
+ /*
+ * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+ * CPU1 to CPU2.
+ * PAGING_SEPARATOR_SECTION delimiter - separate between
+ * CPU2 non paged to CPU2 paging sec.
+ */
if (!image->sec[i].data ||
- image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+ image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
IWL_DEBUG_FW(trans,
"Break since Data not valid or Empty section, sec = %d\n",
i);
@@ -813,8 +836,15 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
last_read_idx = i;
+ /*
+ * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+ * CPU1 to CPU2.
+ * PAGING_SEPARATOR_SECTION delimiter - separate between
+ * CPU2 non paged to CPU2 paging sec.
+ */
if (!image->sec[i].data ||
- image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+ image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
IWL_DEBUG_FW(trans,
"Break since Data not valid or Empty section, sec = %d\n",
i);
@@ -881,6 +911,14 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans)
case PRPH_CLEARBIT:
iwl_clear_bits_prph(trans, addr, BIT(val));
break;
+ case PRPH_BLOCKBIT:
+ if (iwl_read_prph(trans, addr) & BIT(val)) {
+ IWL_ERR(trans,
+ "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
+ val, addr);
+ goto monitor;
+ }
+ break;
default:
IWL_ERR(trans, "FW debug - unknown OP %d\n",
dest->reg_ops[i].op);
@@ -888,6 +926,7 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans)
}
}
+monitor:
if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
trans_pcie->fw_mon_phys >> dest->base_shift);
@@ -982,13 +1021,25 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill)
{
- int ret;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
+ int ret;
+
+ mutex_lock(&trans_pcie->mutex);
+
+ /* Someone called stop_device, don't try to start_fw */
+ if (trans_pcie->is_down) {
+ IWL_WARN(trans,
+ "Can't start_fw since the HW hasn't been started\n");
+ ret = EIO;
+ goto out;
+ }
/* This may fail if AMT took ownership of the device */
if (iwl_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
- return -EIO;
+ ret = -EIO;
+ goto out;
}
iwl_enable_rfkill_int(trans);
@@ -1000,15 +1051,17 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
else
clear_bit(STATUS_RFKILL, &trans->status);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
- if (hw_rfkill && !run_in_rfkill)
- return -ERFKILL;
+ if (hw_rfkill && !run_in_rfkill) {
+ ret = -ERFKILL;
+ goto out;
+ }
iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
ret = iwl_pcie_nic_init(trans);
if (ret) {
IWL_ERR(trans, "Unable to init nic\n");
- return ret;
+ goto out;
}
/* make sure rfkill handshake bits are cleared */
@@ -1026,9 +1079,13 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
/* Load the given image to the HW */
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
- return iwl_pcie_load_given_ucode_8000(trans, fw);
+ ret = iwl_pcie_load_given_ucode_8000(trans, fw);
else
- return iwl_pcie_load_given_ucode(trans, fw);
+ ret = iwl_pcie_load_given_ucode(trans, fw);
+
+out:
+ mutex_unlock(&trans_pcie->mutex);
+ return ret;
}
static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
@@ -1037,11 +1094,18 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
iwl_pcie_tx_start(trans, scd_addr);
}
-static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill, was_hw_rfkill;
+ lockdep_assert_held(&trans_pcie->mutex);
+
+ if (trans_pcie->is_down)
+ return;
+
+ trans_pcie->is_down = true;
+
was_hw_rfkill = iwl_is_rfkill_set(trans);
/* tell the device to stop sending interrupts */
@@ -1131,14 +1195,36 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
iwl_pcie_prepare_card_hw(trans);
}
+static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ mutex_lock(&trans_pcie->mutex);
+ _iwl_trans_pcie_stop_device(trans, low_power);
+ mutex_unlock(&trans_pcie->mutex);
+}
+
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
{
+ struct iwl_trans_pcie __maybe_unused *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ lockdep_assert_held(&trans_pcie->mutex);
+
if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
- iwl_trans_pcie_stop_device(trans, true);
+ _iwl_trans_pcie_stop_device(trans, true);
}
static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (trans->wowlan_d0i3) {
+ /* Enable persistence mode to avoid reset */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+ }
+
iwl_disable_interrupts(trans);
/*
@@ -1150,17 +1236,21 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
iwl_pcie_disable_ict(trans);
+ synchronize_irq(trans_pcie->pci_dev->irq);
+
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
- /*
- * reset TX queues -- some of their registers reset during S3
- * so if we don't reset everything here the D3 image would try
- * to execute some invalid memory upon resume
- */
- iwl_trans_pcie_tx_reset(trans);
+ if (!trans->wowlan_d0i3) {
+ /*
+ * reset TX queues -- some of their registers reset during S3
+ * so if we don't reset everything here the D3 image would try
+ * to execute some invalid memory upon resume
+ */
+ iwl_trans_pcie_tx_reset(trans);
+ }
iwl_pcie_set_pwr(trans, true);
}
@@ -1202,12 +1292,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_pcie_set_pwr(trans, false);
- iwl_trans_pcie_tx_reset(trans);
+ if (trans->wowlan_d0i3) {
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ } else {
+ iwl_trans_pcie_tx_reset(trans);
- ret = iwl_pcie_rx_init(trans);
- if (ret) {
- IWL_ERR(trans, "Failed to resume the device (RX reset)\n");
- return ret;
+ ret = iwl_pcie_rx_init(trans);
+ if (ret) {
+ IWL_ERR(trans,
+ "Failed to resume the device (RX reset)\n");
+ return ret;
+ }
}
val = iwl_read32(trans, CSR_RESET);
@@ -1219,11 +1315,14 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return 0;
}
-static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
int err;
+ lockdep_assert_held(&trans_pcie->mutex);
+
err = iwl_pcie_prepare_card_hw(trans);
if (err) {
IWL_ERR(trans, "Error while preparing HW: %d\n", err);
@@ -1240,20 +1339,38 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
/* From now on, the op_mode will be kept updated about RF kill state */
iwl_enable_rfkill_int(trans);
+ /* Set is_down to false here so that...*/
+ trans_pcie->is_down = false;
+
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
set_bit(STATUS_RFKILL, &trans->status);
else
clear_bit(STATUS_RFKILL, &trans->status);
+ /* ... rfkill can call stop_device and set it false if needed */
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
return 0;
}
+static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+
+ mutex_lock(&trans_pcie->mutex);
+ ret = _iwl_trans_pcie_start_hw(trans, low_power);
+ mutex_unlock(&trans_pcie->mutex);
+
+ return ret;
+}
+
static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ mutex_lock(&trans_pcie->mutex);
+
/* disable interrupts - don't enable HW RF kill interrupt */
spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
@@ -1266,6 +1383,10 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
spin_unlock(&trans_pcie->irq_lock);
iwl_pcie_disable_ict(trans);
+
+ mutex_unlock(&trans_pcie->mutex);
+
+ synchronize_irq(trans_pcie->pci_dev->irq);
}
static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
@@ -1326,6 +1447,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
else
trans_pcie->rx_page_order = get_order(4 * 1024);
+ trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
trans_pcie->command_names = trans_cfg->command_names;
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
@@ -1338,11 +1460,10 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
* As this function may be called again in some corner cases don't
* do anything if NAPI was already initialized.
*/
- if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
+ if (!trans_pcie->napi.poll) {
init_dummy_netdev(&trans_pcie->napi_dev);
- iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
- &trans_pcie->napi_dev,
- iwl_pcie_dummy_napi_poll, 64);
+ netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi,
+ iwl_pcie_dummy_napi_poll, 64);
}
}
@@ -2169,6 +2290,47 @@ static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans,
return prph_len;
}
+static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_data **data,
+ int allocated_rb_nums)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ u32 i, r, j, rb_len = 0;
+
+ spin_lock(&rxq->lock);
+
+ r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+
+ for (i = rxq->read, j = 0;
+ i != r && j < allocated_rb_nums;
+ i = (i + 1) & RX_QUEUE_MASK, j++) {
+ struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
+ struct iwl_fw_error_dump_rb *rb;
+
+ dma_unmap_page(trans->dev, rxb->page_dma, max_len,
+ DMA_FROM_DEVICE);
+
+ rb_len += sizeof(**data) + sizeof(*rb) + max_len;
+
+ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
+ (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
+ rb = (void *)(*data)->data;
+ rb->index = cpu_to_le32(i);
+ memcpy(rb->data, page_address(rxb->page), max_len);
+ /* remap the page for the free benefit */
+ rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
+ max_len,
+ DMA_FROM_DEVICE);
+
+ *data = iwl_fw_error_next_data(*data);
+ }
+
+ spin_unlock(&rxq->lock);
+
+ return rb_len;
+}
#define IWL_CSR_TO_DUMP (0x250)
static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
@@ -2238,17 +2400,97 @@ iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
return monitor_len;
}
-static
-struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
+static u32
+iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_data **data,
+ u32 monitor_len)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 len = 0;
+
+ if ((trans_pcie->fw_mon_page &&
+ trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
+ trans->dbg_dest_tlv) {
+ struct iwl_fw_error_dump_fw_mon *fw_mon_data;
+ u32 base, write_ptr, wrap_cnt;
+
+ /* If there was a dest TLV - use the values from there */
+ if (trans->dbg_dest_tlv) {
+ write_ptr =
+ le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
+ wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
+ base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+ } else {
+ base = MON_BUFF_BASE_ADDR;
+ write_ptr = MON_BUFF_WRPTR;
+ wrap_cnt = MON_BUFF_CYCLE_CNT;
+ }
+
+ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
+ fw_mon_data = (void *)(*data)->data;
+ fw_mon_data->fw_mon_wr_ptr =
+ cpu_to_le32(iwl_read_prph(trans, write_ptr));
+ fw_mon_data->fw_mon_cycle_cnt =
+ cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
+ fw_mon_data->fw_mon_base_ptr =
+ cpu_to_le32(iwl_read_prph(trans, base));
+
+ len += sizeof(**data) + sizeof(*fw_mon_data);
+ if (trans_pcie->fw_mon_page) {
+ /*
+ * The firmware is now asserted, it won't write anything
+ * to the buffer. CPU can take ownership to fetch the
+ * data. The buffer will be handed back to the device
+ * before the firmware will be restarted.
+ */
+ dma_sync_single_for_cpu(trans->dev,
+ trans_pcie->fw_mon_phys,
+ trans_pcie->fw_mon_size,
+ DMA_FROM_DEVICE);
+ memcpy(fw_mon_data->data,
+ page_address(trans_pcie->fw_mon_page),
+ trans_pcie->fw_mon_size);
+
+ monitor_len = trans_pcie->fw_mon_size;
+ } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
+ /*
+ * Update pointers to reflect actual values after
+ * shifting
+ */
+ base = iwl_read_prph(trans, base) <<
+ trans->dbg_dest_tlv->base_shift;
+ iwl_trans_read_mem(trans, base, fw_mon_data->data,
+ monitor_len / sizeof(u32));
+ } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
+ monitor_len =
+ iwl_trans_pci_dump_marbh_monitor(trans,
+ fw_mon_data,
+ monitor_len);
+ } else {
+ /* Didn't match anything - output no monitor data */
+ monitor_len = 0;
+ }
+
+ len += monitor_len;
+ (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
+ }
+
+ return len;
+}
+
+static struct iwl_trans_dump_data
+*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
+ struct iwl_fw_dbg_trigger_tlv *trigger)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_fw_error_dump_data *data;
struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
- u32 len;
+ u32 len, num_rbs;
u32 monitor_len;
int i, ptr;
+ bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status);
/* transport dump header */
len = sizeof(*dump_data);
@@ -2257,22 +2499,6 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
len += sizeof(*data) +
cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
- /* CSR registers */
- len += sizeof(*data) + IWL_CSR_TO_DUMP;
-
- /* PRPH registers */
- for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
- /* The range includes both boundaries */
- int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
- iwl_prph_dump_addr[i].start + 4;
-
- len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
- num_bytes_in_chunk;
- }
-
- /* FH registers */
- len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
-
/* FW monitor */
if (trans_pcie->fw_mon_page) {
len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
@@ -2300,6 +2526,45 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
monitor_len = 0;
}
+ if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
+ dump_data = vzalloc(len);
+ if (!dump_data)
+ return NULL;
+
+ data = (void *)dump_data->data;
+ len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
+ dump_data->len = len;
+
+ return dump_data;
+ }
+
+ /* CSR registers */
+ len += sizeof(*data) + IWL_CSR_TO_DUMP;
+
+ /* PRPH registers */
+ for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
+ /* The range includes both boundaries */
+ int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
+ iwl_prph_dump_addr[i].start + 4;
+
+ len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
+ num_bytes_in_chunk;
+ }
+
+ /* FH registers */
+ len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
+
+ if (dump_rbs) {
+ /* RBs */
+ num_rbs = le16_to_cpu(ACCESS_ONCE(
+ trans_pcie->rxq.rb_stts->closed_rb_num))
+ & 0x0FFF;
+ num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK;
+ len += num_rbs * (sizeof(*data) +
+ sizeof(struct iwl_fw_error_dump_rb) +
+ (PAGE_SIZE << trans_pcie->rx_page_order));
+ }
+
dump_data = vzalloc(len);
if (!dump_data)
return NULL;
@@ -2336,74 +2601,10 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
len += iwl_trans_pcie_dump_prph(trans, &data);
len += iwl_trans_pcie_dump_csr(trans, &data);
len += iwl_trans_pcie_fh_regs_dump(trans, &data);
- /* data is already pointing to the next section */
-
- if ((trans_pcie->fw_mon_page &&
- trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
- trans->dbg_dest_tlv) {
- struct iwl_fw_error_dump_fw_mon *fw_mon_data;
- u32 base, write_ptr, wrap_cnt;
-
- /* If there was a dest TLV - use the values from there */
- if (trans->dbg_dest_tlv) {
- write_ptr =
- le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
- wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
- base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
- } else {
- base = MON_BUFF_BASE_ADDR;
- write_ptr = MON_BUFF_WRPTR;
- wrap_cnt = MON_BUFF_CYCLE_CNT;
- }
+ if (dump_rbs)
+ len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
- data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
- fw_mon_data = (void *)data->data;
- fw_mon_data->fw_mon_wr_ptr =
- cpu_to_le32(iwl_read_prph(trans, write_ptr));
- fw_mon_data->fw_mon_cycle_cnt =
- cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
- fw_mon_data->fw_mon_base_ptr =
- cpu_to_le32(iwl_read_prph(trans, base));
-
- len += sizeof(*data) + sizeof(*fw_mon_data);
- if (trans_pcie->fw_mon_page) {
- /*
- * The firmware is now asserted, it won't write anything
- * to the buffer. CPU can take ownership to fetch the
- * data. The buffer will be handed back to the device
- * before the firmware will be restarted.
- */
- dma_sync_single_for_cpu(trans->dev,
- trans_pcie->fw_mon_phys,
- trans_pcie->fw_mon_size,
- DMA_FROM_DEVICE);
- memcpy(fw_mon_data->data,
- page_address(trans_pcie->fw_mon_page),
- trans_pcie->fw_mon_size);
-
- monitor_len = trans_pcie->fw_mon_size;
- } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
- /*
- * Update pointers to reflect actual values after
- * shifting
- */
- base = iwl_read_prph(trans, base) <<
- trans->dbg_dest_tlv->base_shift;
- iwl_trans_read_mem(trans, base, fw_mon_data->data,
- monitor_len / sizeof(u32));
- } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
- monitor_len =
- iwl_trans_pci_dump_marbh_monitor(trans,
- fw_mon_data,
- monitor_len);
- } else {
- /* Didn't match anything - output no monitor data */
- monitor_len = 0;
- }
-
- len += monitor_len;
- data->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
- }
+ len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
dump_data->len = len;
@@ -2459,23 +2660,26 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
struct iwl_trans_pcie *trans_pcie;
struct iwl_trans *trans;
u16 pci_cmd;
- int err;
+ int ret;
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
&pdev->dev, cfg, &trans_ops_pcie, 0);
if (!trans)
return ERR_PTR(-ENOMEM);
+ trans->max_skb_frags = IWL_PCIE_MAX_FRAGS;
+
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->trans = trans;
spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock);
spin_lock_init(&trans_pcie->ref_lock);
+ mutex_init(&trans_pcie->mutex);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
- err = pci_enable_device(pdev);
- if (err)
+ ret = pci_enable_device(pdev);
+ if (ret)
goto out_no_pci;
if (!cfg->base_params->pcie_l1_allowed) {
@@ -2491,23 +2695,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
- if (err) {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev,
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (!ret)
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (ret) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!ret)
+ ret = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
/* both attempts failed: */
- if (err) {
+ if (ret) {
dev_err(&pdev->dev, "No suitable DMA available\n");
goto out_pci_disable_device;
}
}
- err = pci_request_regions(pdev, DRV_NAME);
- if (err) {
+ ret = pci_request_regions(pdev, DRV_NAME);
+ if (ret) {
dev_err(&pdev->dev, "pci_request_regions failed\n");
goto out_pci_disable_device;
}
@@ -2515,7 +2719,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
if (!trans_pcie->hw_base) {
dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
- err = -ENODEV;
+ ret = -ENODEV;
goto out_pci_release_regions;
}
@@ -2527,9 +2731,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->pci_dev = pdev;
iwl_disable_interrupts(trans);
- err = pci_enable_msi(pdev);
- if (err) {
- dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
+ ret = pci_enable_msi(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
/* enable rfkill interrupt: hw bug w/a */
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
@@ -2547,11 +2751,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
*/
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
unsigned long flags;
- int ret;
trans->hw_rev = (trans->hw_rev & 0xfff0) |
(CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
+ ret = iwl_pcie_prepare_card_hw(trans);
+ if (ret) {
+ IWL_WARN(trans, "Exit HW not ready\n");
+ goto out_pci_disable_msi;
+ }
+
/*
* in-order to recognize C step driver should read chip version
* id located at the AUX bus MISC address space.
@@ -2591,13 +2800,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* Initialize the wait queue for commands */
init_waitqueue_head(&trans_pcie->wait_command_queue);
- if (iwl_pcie_alloc_ict(trans))
+ ret = iwl_pcie_alloc_ict(trans);
+ if (ret)
goto out_pci_disable_msi;
- err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
+ ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
iwl_pcie_irq_handler,
IRQF_SHARED, DRV_NAME, trans);
- if (err) {
+ if (ret) {
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
goto out_free_ict;
}
@@ -2617,5 +2827,5 @@ out_pci_disable_device:
pci_disable_device(pdev);
out_no_pci:
iwl_trans_free(trans);
- return ERR_PTR(err);
+ return ERR_PTR(ret);
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 2b86c2135de3..a8c8a4a7420b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -219,8 +219,6 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
- WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
-
sta_id = tx_cmd->sta_id;
sec_ctl = tx_cmd->sec_ctl;
@@ -239,6 +237,9 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
if (trans_pcie->bc_table_dword)
len = DIV_ROUND_UP(len, 4);
+ if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
+ return;
+
bc_ent = cpu_to_le16(len | (sta_id << 12));
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
@@ -387,11 +388,18 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
/* first TB is never freed - it's the scratchbuf data */
- for (i = 1; i < num_tbs; i++)
- dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
- iwl_pcie_tfd_tb_get_len(tfd, i),
- DMA_TO_DEVICE);
-
+ for (i = 1; i < num_tbs; i++) {
+ if (meta->flags & BIT(i + CMD_TB_BITMAP_POS))
+ dma_unmap_page(trans->dev,
+ iwl_pcie_tfd_tb_get_addr(tfd, i),
+ iwl_pcie_tfd_tb_get_len(tfd, i),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev,
+ iwl_pcie_tfd_tb_get_addr(tfd, i),
+ iwl_pcie_tfd_tb_get_len(tfd, i),
+ DMA_TO_DEVICE);
+ }
tfd->num_tbs = 0;
}
@@ -467,7 +475,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
- return 0;
+ return num_tbs;
}
static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
@@ -915,6 +923,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
}
}
+ iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
if (trans->cfg->base_params->num_of_queues > 20)
iwl_set_bits_prph(trans, SCD_GP_CTRL,
SCD_GP_CTRL_ENABLE_31_QUEUES);
@@ -1320,13 +1329,24 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
int idx;
u16 copy_size, cmd_size, scratch_size;
bool had_nocopy = false;
+ u8 group_id = iwl_cmd_groupid(cmd->id);
int i, ret;
u32 cmd_pos;
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
- copy_size = sizeof(out_cmd->hdr);
- cmd_size = sizeof(out_cmd->hdr);
+ if (WARN(!trans_pcie->wide_cmd_header &&
+ group_id > IWL_ALWAYS_LONG_GROUP,
+ "unsupported wide command %#x\n", cmd->id))
+ return -EINVAL;
+
+ if (group_id != 0) {
+ copy_size = sizeof(struct iwl_cmd_header_wide);
+ cmd_size = sizeof(struct iwl_cmd_header_wide);
+ } else {
+ copy_size = sizeof(struct iwl_cmd_header);
+ cmd_size = sizeof(struct iwl_cmd_header);
+ }
/* need one for the header if the first is NOCOPY */
BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
@@ -1416,16 +1436,32 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
out_meta->source = cmd;
/* set up the header */
-
- out_cmd->hdr.cmd = cmd->id;
- out_cmd->hdr.flags = 0;
- out_cmd->hdr.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
- INDEX_TO_SEQ(q->write_ptr));
+ if (group_id != 0) {
+ out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
+ out_cmd->hdr_wide.group_id = group_id;
+ out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
+ out_cmd->hdr_wide.length =
+ cpu_to_le16(cmd_size -
+ sizeof(struct iwl_cmd_header_wide));
+ out_cmd->hdr_wide.reserved = 0;
+ out_cmd->hdr_wide.sequence =
+ cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+ INDEX_TO_SEQ(q->write_ptr));
+
+ cmd_pos = sizeof(struct iwl_cmd_header_wide);
+ copy_size = sizeof(struct iwl_cmd_header_wide);
+ } else {
+ out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
+ out_cmd->hdr.sequence =
+ cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+ INDEX_TO_SEQ(q->write_ptr));
+ out_cmd->hdr.group_id = 0;
+
+ cmd_pos = sizeof(struct iwl_cmd_header);
+ copy_size = sizeof(struct iwl_cmd_header);
+ }
/* and copy the data that needs to be copied */
- cmd_pos = offsetof(struct iwl_device_cmd, payload);
- copy_size = sizeof(out_cmd->hdr);
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
int copy;
@@ -1464,9 +1500,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
IWL_DEBUG_HC(trans,
- "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
+ "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
- out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
+ group_id, out_cmd->hdr.cmd,
+ le16_to_cpu(out_cmd->hdr.sequence),
cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
/* start the TFD with the scratchbuf */
@@ -1516,12 +1553,14 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
}
+ BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS >
+ sizeof(out_meta->flags) * BITS_PER_BYTE);
out_meta->flags = cmd->flags;
if (WARN_ON_ONCE(txq->entries[idx].free_buf))
kzfree(txq->entries[idx].free_buf);
txq->entries[idx].free_buf = dup_buf;
- trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
+ trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
/* start timer if queue currently empty */
if (q->read_ptr == q->write_ptr && txq->wd_timeout)
@@ -1552,15 +1591,13 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
/*
* iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
* @rxb: Rx buffer to reclaim
- * @handler_status: return value of the handler of the command
- * (put in setup_rx_handlers)
*
* If an Rx buffer has an async callback associated with it the callback
* will be executed. The attached skb (if present) will only be freed
* if the callback returns 1
*/
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
- struct iwl_rx_cmd_buffer *rxb, int handler_status)
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -1599,7 +1636,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
meta->source->resp_pkt = pkt;
meta->source->_rx_page_addr = (unsigned long)page_address(p);
meta->source->_rx_page_order = trans_pcie->rx_page_order;
- meta->source->handler_status = handler_status;
}
iwl_pcie_cmdq_reclaim(trans, txq_id, index);
@@ -1762,7 +1798,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_hdr *hdr;
struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
struct iwl_cmd_meta *out_meta;
struct iwl_txq *txq;
@@ -1771,9 +1807,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
void *tb1_addr;
u16 len, tb1_len, tb2_len;
bool wait_write_ptr;
- __le16 fc = hdr->frame_control;
- u8 hdr_len = ieee80211_hdrlen(fc);
+ __le16 fc;
+ u8 hdr_len;
u16 wifi_seq;
+ int i;
txq = &trans_pcie->txq[txq_id];
q = &txq->q;
@@ -1782,6 +1819,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
"TX on unused queue %d\n", txq_id))
return -EINVAL;
+ if (skb_is_nonlinear(skb) &&
+ skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS &&
+ __skb_linearize(skb))
+ return -ENOMEM;
+
+ /* mac80211 always puts the full header into the SKB's head,
+ * so there's no need to check if it's readable there
+ */
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+ hdr_len = ieee80211_hdrlen(fc);
+
spin_lock(&txq->lock);
/* In AGG mode, the index in the ring must correspond to the WiFi
@@ -1812,6 +1861,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* Set up first empty entry in queue's array of Tx/cmd buffers */
out_meta = &txq->entries[q->write_ptr].meta;
+ out_meta->flags = 0;
/*
* The second TB (tb1) points to the remainder of the TX command
@@ -1845,9 +1895,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/*
* Set up TFD's third entry to point directly to remainder
- * of skb, if any (802.11 null frames have no payload).
+ * of skb's head, if any
*/
- tb2_len = skb->len - hdr_len;
+ tb2_len = skb_headlen(skb) - hdr_len;
if (tb2_len > 0) {
dma_addr_t tb2_phys = dma_map_single(trans->dev,
skb->data + hdr_len,
@@ -1860,6 +1910,29 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
}
+ /* set up the remaining entries to point to the data */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ dma_addr_t tb_phys;
+ int tb_idx;
+
+ if (!skb_frag_size(frag))
+ continue;
+
+ tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+ iwl_pcie_tfd_unmap(trans, out_meta,
+ &txq->tfds[q->write_ptr]);
+ goto out_err;
+ }
+ tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
+ skb_frag_size(frag), false);
+
+ out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS);
+ }
+
/* Set up entry for this TFD in Tx byte-count array */
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
@@ -1869,14 +1942,25 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
&dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
skb->data + hdr_len, tb2_len);
trace_iwlwifi_dev_tx_data(trans->dev, skb,
- skb->data + hdr_len, tb2_len);
+ hdr_len, skb->len - hdr_len);
wait_write_ptr = ieee80211_has_morefrags(fc);
/* start timer if queue currently empty */
if (q->read_ptr == q->write_ptr) {
- if (txq->wd_timeout)
- mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+ if (txq->wd_timeout) {
+ /*
+ * If the TXQ is active, then set the timer, if not,
+ * set the timer in remainder so that the timer will
+ * be armed with the right value when the station will
+ * wake up.
+ */
+ if (!txq->frozen)
+ mod_timer(&txq->stuck_timer,
+ jiffies + txq->wd_timeout);
+ else
+ txq->frozen_expiry_remainder = txq->wd_timeout;
+ }
IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
iwl_trans_pcie_ref(trans);
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 99e873dc8684..520bef80747f 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2399,6 +2399,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, TDLS_WIDER_BW);
if (rctbl)
ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
@@ -2676,7 +2677,7 @@ static void hwsim_mon_setup(struct net_device *dev)
dev->netdev_ops = &hwsim_netdev_ops;
dev->destructor = free_netdev;
ether_setup(dev);
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->type = ARPHRD_IEEE80211_RADIOTAP;
eth_zero_addr(dev->dev_addr);
dev->dev_addr[0] = 0x12;
@@ -3120,8 +3121,10 @@ static int hwsim_init_netlink(void)
goto failure;
rc = netlink_register_notifier(&hwsim_netlink_notifier);
- if (rc)
+ if (rc) {
+ genl_unregister_family(&hwsim_genl_family);
goto failure;
+ }
return 0;
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index 7217da4f1543..57a80cfa39b1 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -112,7 +112,9 @@ static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
if (!skb)
return;
- ieee80211_rx_ni(dev->hw, skb);
+ spin_lock(&dev->mac_lock);
+ ieee80211_rx(dev->hw, skb);
+ spin_unlock(&dev->mac_lock);
}
static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
@@ -236,23 +238,42 @@ static void mt7601u_complete_tx(struct urb *urb)
skb = q->e[q->start].skb;
trace_mt_tx_dma_done(dev, skb);
- mt7601u_tx_status(dev, skb);
+ __skb_queue_tail(&dev->tx_skb_done, skb);
+ tasklet_schedule(&dev->tx_tasklet);
if (q->used == q->entries - q->entries / 8)
ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
q->start = (q->start + 1) % q->entries;
q->used--;
+out:
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
- if (urb->status)
- goto out;
+static void mt7601u_tx_tasklet(unsigned long data)
+{
+ struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
+ struct sk_buff_head skbs;
+ unsigned long flags;
+
+ __skb_queue_head_init(&skbs);
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
queue_delayed_work(dev->stat_wq, &dev->stat_work,
msecs_to_jiffies(10));
-out:
+
+ skb_queue_splice_init(&dev->tx_skb_done, &skbs);
+
spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+ while (!skb_queue_empty(&skbs)) {
+ struct sk_buff *skb = __skb_dequeue(&skbs);
+
+ mt7601u_tx_status(dev, skb);
+ }
}
static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
@@ -475,6 +496,7 @@ int mt7601u_dma_init(struct mt7601u_dev *dev)
{
int ret = -ENOMEM;
+ tasklet_init(&dev->tx_tasklet, mt7601u_tx_tasklet, (unsigned long) dev);
tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
ret = mt7601u_alloc_tx(dev);
@@ -502,4 +524,6 @@ void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
mt7601u_free_rx(dev);
mt7601u_free_tx(dev);
+
+ tasklet_kill(&dev->tx_tasklet);
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index df3dd56199a7..26190fd33407 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -454,8 +454,10 @@ struct mt7601u_dev *mt7601u_alloc_device(struct device *pdev)
spin_lock_init(&dev->tx_lock);
spin_lock_init(&dev->rx_lock);
spin_lock_init(&dev->lock);
+ spin_lock_init(&dev->mac_lock);
spin_lock_init(&dev->con_mon_lock);
atomic_set(&dev->avg_ampdu_len, 1);
+ skb_queue_head_init(&dev->tx_skb_done);
dev->stat_wq = alloc_workqueue("mt7601u", WQ_UNBOUND, 0);
if (!dev->stat_wq) {
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c
index 7514bce1ac91..e21c53ed09fb 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mac.c
+++ b/drivers/net/wireless/mediatek/mt7601u/mac.c
@@ -181,7 +181,11 @@ void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat)
}
mt76_mac_fill_tx_status(dev, &info, stat);
+
+ spin_lock_bh(&dev->mac_lock);
ieee80211_tx_status_noskb(dev->hw, sta, &info);
+ spin_unlock_bh(&dev->mac_lock);
+
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
index 9102be6b95cb..428bd2f10b7b 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
+++ b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
@@ -141,12 +141,13 @@ enum {
/**
* struct mt7601u_dev - adapter structure
* @lock: protects @wcid->tx_rate.
+ * @mac_lock: locks out mac80211's tx status and rx paths.
* @tx_lock: protects @tx_q and changes of MT7601U_STATE_*_STATS
- flags in @state.
+ * flags in @state.
* @rx_lock: protects @rx_q.
* @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi.
* @mutex: ensures exclusive access from mac80211 callbacks.
- * @vendor_req_mutex: ensures atomicity of vendor requests.
+ * @vendor_req_mutex: protects @vend_buf, ensures atomicity of split writes.
* @reg_atomic_mutex: ensures atomicity of indirect register accesses
* (accesses to RF and BBP).
* @hw_atomic_mutex: ensures exclusive access to HW during critical
@@ -177,6 +178,7 @@ struct mt7601u_dev {
struct mt76_wcid __rcu *wcid[N_WCIDS];
spinlock_t lock;
+ spinlock_t mac_lock;
const u16 *beacon_offsets;
@@ -184,6 +186,8 @@ struct mt7601u_dev {
struct mt7601u_eeprom_params *ee;
struct mutex vendor_req_mutex;
+ void *vend_buf;
+
struct mutex reg_atomic_mutex;
struct mutex hw_atomic_mutex;
@@ -197,7 +201,9 @@ struct mt7601u_dev {
/* TX */
spinlock_t tx_lock;
+ struct tasklet_struct tx_tasklet;
struct mt7601u_tx_queue *tx_q;
+ struct sk_buff_head tx_skb_done;
atomic_t avg_ampdu_len;
diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c
index 0be2080ceab3..a0a33dc8f6bc 100644
--- a/drivers/net/wireless/mediatek/mt7601u/tx.c
+++ b/drivers/net/wireless/mediatek/mt7601u/tx.c
@@ -116,7 +116,10 @@ void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
ieee80211_tx_info_clear_status(info);
info->status.rates[0].idx = -1;
info->flags |= IEEE80211_TX_STAT_ACK;
+
+ spin_lock(&dev->mac_lock);
ieee80211_tx_status(dev->hw, skb);
+ spin_unlock(&dev->mac_lock);
}
static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c
index 54dba4001865..416c6045ff31 100644
--- a/drivers/net/wireless/mediatek/mt7601u/usb.c
+++ b/drivers/net/wireless/mediatek/mt7601u/usb.c
@@ -92,10 +92,9 @@ void mt7601u_complete_urb(struct urb *urb)
complete(cmpl);
}
-static int
-__mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
- const u8 direction, const u16 val, const u16 offset,
- void *buf, const size_t buflen)
+int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+ const u8 direction, const u16 val, const u16 offset,
+ void *buf, const size_t buflen)
{
int i, ret;
struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
@@ -110,6 +109,8 @@ __mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
trace_mt_vend_req(dev, pipe, req, req_type, val, offset,
buf, buflen, ret);
+ if (ret == -ENODEV)
+ set_bit(MT7601U_STATE_REMOVED, &dev->state);
if (ret >= 0 || ret == -ENODEV)
return ret;
@@ -122,25 +123,6 @@ __mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
return ret;
}
-int
-mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
- const u8 direction, const u16 val, const u16 offset,
- void *buf, const size_t buflen)
-{
- int ret;
-
- mutex_lock(&dev->vendor_req_mutex);
-
- ret = __mt7601u_vendor_request(dev, req, direction, val, offset,
- buf, buflen);
- if (ret == -ENODEV)
- set_bit(MT7601U_STATE_REMOVED, &dev->state);
-
- mutex_unlock(&dev->vendor_req_mutex);
-
- return ret;
-}
-
void mt7601u_vendor_reset(struct mt7601u_dev *dev)
{
mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
@@ -150,19 +132,21 @@ void mt7601u_vendor_reset(struct mt7601u_dev *dev)
u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset)
{
int ret;
- __le32 reg;
- u32 val;
+ u32 val = ~0;
WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
+ mutex_lock(&dev->vendor_req_mutex);
+
ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN,
- 0, offset, &reg, sizeof(reg));
- val = le32_to_cpu(reg);
- if (ret > 0 && ret != sizeof(reg)) {
+ 0, offset, dev->vend_buf, MT_VEND_BUF);
+ if (ret == MT_VEND_BUF)
+ val = get_unaligned_le32(dev->vend_buf);
+ else if (ret > 0)
dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
ret, offset);
- val = ~0;
- }
+
+ mutex_unlock(&dev->vendor_req_mutex);
trace_reg_read(dev, offset, val);
return val;
@@ -173,12 +157,17 @@ int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
{
int ret;
+ mutex_lock(&dev->vendor_req_mutex);
+
ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
val & 0xffff, offset, NULL, 0);
- if (ret)
- return ret;
- return mt7601u_vendor_request(dev, req, USB_DIR_OUT,
- val >> 16, offset + 2, NULL, 0);
+ if (!ret)
+ ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
+ val >> 16, offset + 2, NULL, 0);
+
+ mutex_unlock(&dev->vendor_req_mutex);
+
+ return ret;
}
void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
@@ -275,6 +264,12 @@ static int mt7601u_probe(struct usb_interface *usb_intf,
usb_set_intfdata(usb_intf, dev);
+ dev->vend_buf = devm_kmalloc(dev->dev, MT_VEND_BUF, GFP_KERNEL);
+ if (!dev->vend_buf) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
ret = mt7601u_assign_pipes(usb_intf, dev);
if (ret)
goto err;
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.h b/drivers/net/wireless/mediatek/mt7601u/usb.h
index 49e188fa3798..bc182022b9d6 100644
--- a/drivers/net/wireless/mediatek/mt7601u/usb.h
+++ b/drivers/net/wireless/mediatek/mt7601u/usb.h
@@ -23,6 +23,8 @@
#define MT_VEND_DEV_MODE_RESET 1
+#define MT_VEND_BUF sizeof(__le32)
+
enum mt_vendor_req {
MT_VEND_DEV_MODE = 1,
MT_VEND_WRITE = 2,
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index 48edf387683e..317d99189556 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -9,36 +9,36 @@ config MWIFIEX
mwifiex.
config MWIFIEX_SDIO
- tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897"
+ tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8997"
depends on MWIFIEX && MMC
select FW_LOADER
select WANT_DEV_COREDUMP
---help---
This adds support for wireless adapters based on Marvell
- 8786/8787/8797/8887/8897 chipsets with SDIO interface.
+ 8786/8787/8797/8887/8897/8997 chipsets with SDIO interface.
If you choose to build it as a module, it will be called
mwifiex_sdio.
config MWIFIEX_PCIE
- tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897"
+ tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897/8997"
depends on MWIFIEX && PCI
select FW_LOADER
select WANT_DEV_COREDUMP
---help---
This adds support for wireless adapters based on Marvell
- 8766/8897 chipsets with PCIe interface.
+ 8766/8897/8997 chipsets with PCIe interface.
If you choose to build it as a module, it will be called
mwifiex_pcie.
config MWIFIEX_USB
- tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897"
+ tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897/8997"
depends on MWIFIEX && USB
select FW_LOADER
---help---
This adds support for wireless adapters based on Marvell
- 8797/8897 chipset with USB interface.
+ 8797/8897/8997 chipset with USB interface.
If you choose to build it as a module, it will be called
mwifiex_usb.
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index b15e4c7acbec..ff63cb5632eb 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -19,6 +19,7 @@
#include "cfg80211.h"
#include "main.h"
+#include "11n.h"
static char *reg_alpha2;
module_param(reg_alpha2, charp, 0);
@@ -34,12 +35,38 @@ static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
},
};
-static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
+static const struct ieee80211_iface_combination
+mwifiex_iface_comb_ap_sta = {
.limits = mwifiex_ap_sta_limits,
.num_different_channels = 1,
.n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits),
.max_interfaces = MWIFIEX_MAX_BSS_NUM,
.beacon_int_infra_match = true,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40),
+};
+
+static const struct ieee80211_iface_combination
+mwifiex_iface_comb_ap_sta_vht = {
+ .limits = mwifiex_ap_sta_limits,
+ .num_different_channels = 1,
+ .n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits),
+ .max_interfaces = MWIFIEX_MAX_BSS_NUM,
+ .beacon_int_infra_match = true,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+};
+
+static const struct
+ieee80211_iface_combination mwifiex_iface_comb_ap_sta_drcs = {
+ .limits = mwifiex_ap_sta_limits,
+ .num_different_channels = 2,
+ .n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits),
+ .max_interfaces = MWIFIEX_MAX_BSS_NUM,
+ .beacon_int_infra_match = true,
};
/*
@@ -441,7 +468,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
* - Country codes
* - Sub bands (first channel, number of channels, maximum Tx power)
*/
-static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
+int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
{
u8 no_of_triplet = 0;
struct ieee80211_country_ie_triplet *t;
@@ -804,10 +831,13 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
priv->bss_type = MWIFIEX_BSS_TYPE_STA;
break;
case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_P2P_GO:
priv->bss_role = MWIFIEX_BSS_ROLE_STA;
priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
break;
+ case NL80211_IFTYPE_P2P_GO:
+ priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
+ priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
+ break;
case NL80211_IFTYPE_AP:
priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
@@ -1115,8 +1145,10 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_P2P_GO:
switch (type) {
case NL80211_IFTYPE_STATION:
- if (mwifiex_cfg80211_init_p2p_client(priv))
+ if (mwifiex_cfg80211_deinit_p2p(priv))
return -EFAULT;
+ priv->adapter->curr_iface_comb.p2p_intf--;
+ priv->adapter->curr_iface_comb.sta_intf++;
dev->ieee80211_ptr->iftype = type;
break;
case NL80211_IFTYPE_ADHOC:
@@ -2788,6 +2820,7 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
struct mwifiex_adapter *adapter = priv->adapter;
+ struct sk_buff *skb, *tmp;
#ifdef CONFIG_DEBUG_FS
mwifiex_dev_debugfs_remove(priv);
@@ -2795,6 +2828,9 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
+ skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
+ mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
@@ -2954,7 +2990,6 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
MWIFIEX_MEF_MAX_BYTESEQ)) {
mwifiex_dbg(priv->adapter, ERROR,
"Pattern not supported\n");
- kfree(mef_entry);
return -EOPNOTSUPP;
}
@@ -3036,9 +3071,12 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
mwifiex_set_auto_arp_mef_entry(priv, &mef_entry[0]);
- if (wowlan->n_patterns || wowlan->magic_pkt)
+ if (wowlan->n_patterns || wowlan->magic_pkt) {
ret = mwifiex_set_wowlan_mef_entry(priv, &mef_cfg,
&mef_entry[1], wowlan);
+ if (ret)
+ goto err;
+ }
if (!mef_cfg.criteria)
mef_cfg.criteria = MWIFIEX_CRITERIA_BROADCAST |
@@ -3048,6 +3086,8 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG,
HostCmd_ACT_GEN_SET, 0,
&mef_cfg, true);
+
+err:
kfree(mef_entry);
return ret;
}
@@ -3360,6 +3400,72 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
}
static int
+mwifiex_cfg80211_tdls_chan_switch(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *addr, u8 oper_class,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ unsigned long flags;
+ u16 chan;
+ u8 second_chan_offset, band;
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ sta_ptr = mwifiex_get_sta_entry(priv, addr);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+ if (!sta_ptr) {
+ wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n",
+ __func__, addr);
+ return -ENOENT;
+ }
+
+ if (!(sta_ptr->tdls_cap.extcap.ext_capab[3] &
+ WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH)) {
+ wiphy_err(wiphy, "%pM do not support tdls cs\n", addr);
+ return -ENOENT;
+ }
+
+ if (sta_ptr->tdls_status == TDLS_CHAN_SWITCHING ||
+ sta_ptr->tdls_status == TDLS_IN_OFF_CHAN) {
+ wiphy_err(wiphy, "channel switch is running, abort request\n");
+ return -EALREADY;
+ }
+
+ chan = chandef->chan->hw_value;
+ second_chan_offset = mwifiex_get_sec_chan_offset(chan);
+ band = chandef->chan->band;
+ mwifiex_start_tdls_cs(priv, addr, chan, second_chan_offset, band);
+
+ return 0;
+}
+
+static void
+mwifiex_cfg80211_tdls_cancel_chan_switch(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *addr)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ unsigned long flags;
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ sta_ptr = mwifiex_get_sta_entry(priv, addr);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+ if (!sta_ptr) {
+ wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n",
+ __func__, addr);
+ } else if (!(sta_ptr->tdls_status == TDLS_CHAN_SWITCHING ||
+ sta_ptr->tdls_status == TDLS_IN_BASE_CHAN ||
+ sta_ptr->tdls_status == TDLS_IN_OFF_CHAN)) {
+ wiphy_err(wiphy, "tdls chan switch not initialize by %pM\n",
+ addr);
+ } else
+ mwifiex_stop_tdls_cs(priv, addr);
+}
+
+static int
mwifiex_cfg80211_add_station(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac, struct station_parameters *params)
{
@@ -3575,6 +3681,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.set_coalesce = mwifiex_cfg80211_set_coalesce,
.tdls_mgmt = mwifiex_cfg80211_tdls_mgmt,
.tdls_oper = mwifiex_cfg80211_tdls_oper,
+ .tdls_channel_switch = mwifiex_cfg80211_tdls_chan_switch,
+ .tdls_cancel_channel_switch = mwifiex_cfg80211_tdls_cancel_chan_switch,
.add_station = mwifiex_cfg80211_add_station,
.change_station = mwifiex_cfg80211_change_station,
.get_channel = mwifiex_cfg80211_get_channel,
@@ -3672,7 +3780,12 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
else
wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
- wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta;
+ if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
+ wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs;
+ else if (adapter->is_hw_11ac_capable)
+ wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_vht;
+ else
+ wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta;
wiphy->n_iface_combinations = 1;
/* Initialize cipher suits */
@@ -3709,6 +3822,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
NL80211_FEATURE_INACTIVITY_TIMER |
NL80211_FEATURE_NEED_OBSS_SCAN;
+ if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
+ wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
+
if (adapter->fw_api_ver == MWIFIEX_FW_V15)
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 207da40500f4..45ae38e32621 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -167,8 +167,6 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
mwifiex_dbg(adapter, ERROR,
"DNLD_CMD: FW in reset state, ignore cmd %#x\n",
cmd_code);
- if (cmd_node->wait_q_enabled)
- mwifiex_complete_cmd(adapter, cmd_node);
mwifiex_recycle_cmd_node(adapter, cmd_node);
queue_work(adapter->workqueue, &adapter->main_work);
return -1;
@@ -809,17 +807,6 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
adapter->is_cmd_timedout = 0;
resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
- if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) {
- mwifiex_dbg(adapter, ERROR,
- "CMD_RESP: %#x been canceled\n",
- le16_to_cpu(resp->command));
- mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
- adapter->curr_cmd = NULL;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
- return -1;
- }
-
if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
/* Copy original response back to response buffer */
struct mwifiex_ds_misc_cmd *hostcmd;
@@ -989,12 +976,13 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
if (cmd_node->wait_q_enabled) {
adapter->cmd_wait_q.status = -ETIMEDOUT;
- wake_up_interruptible(&adapter->cmd_wait_q.wait);
mwifiex_cancel_pending_ioctl(adapter);
}
}
- if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
+ if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) {
mwifiex_init_fw_complete(adapter);
+ return;
+ }
if (adapter->if_ops.device_dump)
adapter->if_ops.device_dump(adapter);
@@ -1024,6 +1012,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
adapter->curr_cmd->wait_q_enabled = false;
adapter->cmd_wait_q.status = -1;
mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+ /* no recycle probably wait for response */
}
/* Cancel all pending command */
spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
@@ -1032,11 +1021,8 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
list_del(&cmd_node->list);
spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
- if (cmd_node->wait_q_enabled) {
+ if (cmd_node->wait_q_enabled)
adapter->cmd_wait_q.status = -1;
- mwifiex_complete_cmd(adapter, cmd_node);
- cmd_node->wait_q_enabled = false;
- }
mwifiex_recycle_cmd_node(adapter, cmd_node);
spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
}
@@ -1094,12 +1080,18 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
(adapter->curr_cmd->wait_q_enabled)) {
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
cmd_node = adapter->curr_cmd;
- cmd_node->wait_q_enabled = false;
- cmd_node->cmd_flag |= CMD_F_CANCELED;
- mwifiex_recycle_cmd_node(adapter, cmd_node);
- mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+ /* setting curr_cmd to NULL is quite dangerous, because
+ * mwifiex_process_cmdresp checks curr_cmd to be != NULL
+ * at the beginning then relies on it and dereferences
+ * it at will
+ * this probably works since mwifiex_cmd_timeout_func
+ * is the only caller of this function and responses
+ * at that point
+ */
adapter->curr_cmd = NULL;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+
+ mwifiex_recycle_cmd_node(adapter, cmd_node);
}
/* Cancel all pending scan command */
@@ -1129,7 +1121,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
}
}
}
- adapter->cmd_wait_q.status = -1;
}
/*
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 51e344789ba2..098e1f14dc9a 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -141,6 +141,9 @@ enum mwifiex_tdls_status {
TDLS_SETUP_COMPLETE,
TDLS_SETUP_FAILURE,
TDLS_LINK_TEARDOWN,
+ TDLS_CHAN_SWITCHING,
+ TDLS_IN_BASE_CHAN,
+ TDLS_IN_OFF_CHAN,
};
enum mwifiex_tdls_error_code {
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index cd09051710e6..3ec2ac82e394 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -169,14 +169,17 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_UAP_PS_AO_TIMER (PROPRIETARY_TLV_BASE_ID + 123)
#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145)
#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
+#define TLV_TYPE_TX_PAUSE (PROPRIETARY_TLV_BASE_ID + 148)
#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154)
#define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156)
+#define TLV_TYPE_MULTI_CHAN_INFO (PROPRIETARY_TLV_BASE_ID + 183)
#define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194)
#define TLV_TYPE_SCAN_CHANNEL_GAP (PROPRIETARY_TLV_BASE_ID + 197)
#define TLV_TYPE_API_REV (PROPRIETARY_TLV_BASE_ID + 199)
#define TLV_TYPE_CHANNEL_STATS (PROPRIETARY_TLV_BASE_ID + 198)
#define TLV_BTCOEX_WL_AGGR_WINSIZE (PROPRIETARY_TLV_BASE_ID + 202)
#define TLV_BTCOEX_WL_SCANTIME (PROPRIETARY_TLV_BASE_ID + 203)
+#define TLV_TYPE_BSS_MODE (PROPRIETARY_TLV_BASE_ID + 206)
#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -200,6 +203,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
#define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
+#define ISSUPP_DRCS_ENABLED(FwCapInfo) (FwCapInfo & BIT(15))
#define ISSUPP_SDIO_SPA_ENABLED(FwCapInfo) (FwCapInfo & BIT(16))
#define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \
@@ -359,6 +363,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_MGMT_FRAME_REG 0x010c
#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d
#define HostCmd_CMD_11AC_CFG 0x0112
+#define HostCmd_CMD_TDLS_CONFIG 0x0100
+#define HostCmd_CMD_MC_POLICY 0x0121
#define HostCmd_CMD_TDLS_OPER 0x0122
#define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG 0x0223
@@ -432,7 +438,6 @@ enum P2P_MODES {
#define CMD_F_HOSTCMD (1 << 0)
-#define CMD_F_CANCELED (1 << 1)
#define HostCmd_CMD_ID_MASK 0x0fff
@@ -509,8 +514,10 @@ enum P2P_MODES {
#define EVENT_TDLS_GENERIC_EVENT 0x00000052
#define EVENT_RADAR_DETECTED 0x00000053
#define EVENT_CHANNEL_REPORT_RDY 0x00000054
+#define EVENT_TX_DATA_PAUSE 0x00000055
#define EVENT_EXT_SCAN_REPORT 0x00000058
#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
+#define EVENT_MULTI_CHAN_INFO 0x0000006a
#define EVENT_TX_STATUS_REPORT 0x00000074
#define EVENT_BT_COEX_WLAN_PARA_CHANGE 0X00000076
@@ -545,7 +552,27 @@ enum P2P_MODES {
#define ACT_TDLS_DELETE 0x00
#define ACT_TDLS_CREATE 0x01
#define ACT_TDLS_CONFIG 0x02
-#define TDLS_EVENT_LINK_TEAR_DOWN 3
+
+#define TDLS_EVENT_LINK_TEAR_DOWN 3
+#define TDLS_EVENT_CHAN_SWITCH_RESULT 7
+#define TDLS_EVENT_START_CHAN_SWITCH 8
+#define TDLS_EVENT_CHAN_SWITCH_STOPPED 9
+
+#define TDLS_BASE_CHANNEL 0
+#define TDLS_OFF_CHANNEL 1
+
+#define ACT_TDLS_CS_ENABLE_CONFIG 0x00
+#define ACT_TDLS_CS_INIT 0x06
+#define ACT_TDLS_CS_STOP 0x07
+#define ACT_TDLS_CS_PARAMS 0x08
+
+#define MWIFIEX_DEF_CS_UNIT_TIME 2
+#define MWIFIEX_DEF_CS_THR_OTHERLINK 10
+#define MWIFIEX_DEF_THR_DIRECTLINK 0
+#define MWIFIEX_DEF_CS_TIME 10
+#define MWIFIEX_DEF_CS_TIMEOUT 16
+#define MWIFIEX_DEF_CS_REG_CLASS 12
+#define MWIFIEX_DEF_CS_PERIODICITY 1
#define MWIFIEX_FW_V15 15
@@ -658,6 +685,7 @@ struct mwifiex_fw_chan_stats {
enum mwifiex_chan_scan_mode_bitmasks {
MWIFIEX_PASSIVE_SCAN = BIT(0),
MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
+ MWIFIEX_HIDDEN_SSID_REPORT = BIT(4),
};
struct mwifiex_chan_scan_param_set {
@@ -1131,6 +1159,13 @@ struct host_cmd_ds_tx_rate_query {
u8 ht_info;
} __packed;
+struct mwifiex_tx_pause_tlv {
+ struct mwifiex_ie_types_header header;
+ u8 peermac[ETH_ALEN];
+ u8 tx_pause;
+ u8 pkt_cnt;
+} __packed;
+
enum Host_Sleep_Action {
HS_CONFIGURE = 0x0001,
HS_ACTIVATE = 0x0002,
@@ -1249,6 +1284,36 @@ struct host_cmd_ds_tdls_oper {
u8 peer_mac[ETH_ALEN];
} __packed;
+struct mwifiex_tdls_config {
+ __le16 enable;
+};
+
+struct mwifiex_tdls_config_cs_params {
+ u8 unit_time;
+ u8 thr_otherlink;
+ u8 thr_directlink;
+};
+
+struct mwifiex_tdls_init_cs_params {
+ u8 peer_mac[ETH_ALEN];
+ u8 primary_chan;
+ u8 second_chan_offset;
+ u8 band;
+ __le16 switch_time;
+ __le16 switch_timeout;
+ u8 reg_class;
+ u8 periodicity;
+} __packed;
+
+struct mwifiex_tdls_stop_cs_params {
+ u8 peer_mac[ETH_ALEN];
+};
+
+struct host_cmd_ds_tdls_config {
+ __le16 tdls_action;
+ u8 tdls_data[1];
+} __packed;
+
struct mwifiex_chan_desc {
__le16 start_freq;
u8 chan_width;
@@ -1370,6 +1435,11 @@ struct host_cmd_ds_802_11_scan_ext {
u8 tlv_buffer[1];
} __packed;
+struct mwifiex_ie_types_bss_mode {
+ struct mwifiex_ie_types_header header;
+ u8 bss_mode;
+} __packed;
+
struct mwifiex_ie_types_bss_scan_rsp {
struct mwifiex_ie_types_header header;
u8 bssid[ETH_ALEN];
@@ -1908,6 +1978,12 @@ struct mwifiex_radar_det_event {
__le32 passed;
} __packed;
+struct mwifiex_ie_types_multi_chan_info {
+ struct mwifiex_ie_types_header header;
+ __le16 status;
+ u8 tlv_buffer[0];
+} __packed;
+
struct meas_rpt_map {
u8 rssi:3;
u8 unmeasured:1;
@@ -1927,10 +2003,18 @@ struct host_cmd_ds_802_11_subsc_evt {
__le16 events;
} __packed;
+struct chan_switch_result {
+ u8 cur_chan;
+ u8 status;
+ u8 reason;
+} __packed;
+
struct mwifiex_tdls_generic_event {
__le16 type;
u8 peer_mac[ETH_ALEN];
union {
+ struct chan_switch_result switch_result;
+ u8 cs_stop_reason;
__le16 reason_code;
__le16 reserved;
} u;
@@ -1971,6 +2055,11 @@ struct host_cmd_ds_coalesce_cfg {
struct coalesce_receive_filt_rule rule[0];
} __packed;
+struct host_cmd_ds_multi_chan_policy {
+ __le16 action;
+ __le16 policy;
+} __packed;
+
struct host_cmd_ds_command {
__le16 command;
__le16 size;
@@ -2035,9 +2124,11 @@ struct host_cmd_ds_command {
struct host_cmd_ds_sta_list sta_list;
struct host_cmd_11ac_vht_cfg vht_cfg;
struct host_cmd_ds_coalesce_cfg coalesce_cfg;
+ struct host_cmd_ds_tdls_config tdls_config;
struct host_cmd_ds_tdls_oper tdls_oper;
struct host_cmd_ds_chan_rpt_req chan_rpt_req;
struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg;
+ struct host_cmd_ds_multi_chan_policy mc_policy;
} params;
} __packed;
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index 0ba894509413..abf52d25b981 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -409,6 +409,8 @@ int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
int ret;
ret = mwifiex_uap_parse_tail_ies(priv, info);
+
+ if (ret)
return ret;
return mwifiex_set_mgmt_beacon_data_ies(priv, info);
@@ -477,6 +479,7 @@ int mwifiex_del_mgmt_ies(struct mwifiex_private *priv)
ar_ie, &priv->assocresp_idx);
done:
+ kfree(gen_ie);
kfree(beacon_ie);
kfree(pr_ie);
kfree(ar_ie);
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index df7fdc09d38c..5d3ae63baea4 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -77,7 +77,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
priv->media_connected = false;
eth_broadcast_addr(priv->curr_addr);
-
+ priv->port_open = false;
priv->pkt_tx_ctrl = 0;
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
priv->data_rate = 0; /* Initially indicate the rate as auto */
@@ -301,7 +301,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->iface_limit.sta_intf = MWIFIEX_MAX_STA_NUM;
adapter->iface_limit.uap_intf = MWIFIEX_MAX_UAP_NUM;
adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM;
-
+ adapter->active_scan_triggered = false;
setup_timer(&adapter->wakeup_timer, wakeup_timer_fn,
(unsigned long)adapter);
}
@@ -499,6 +499,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
INIT_LIST_HEAD(&priv->sta_list);
INIT_LIST_HEAD(&priv->auto_tdls_list);
skb_queue_head_init(&priv->tdls_txq);
+ skb_queue_head_init(&priv->bypass_txq);
spin_lock_init(&priv->tx_ba_stream_tbl_lock);
spin_lock_init(&priv->rx_reorder_tbl_lock);
@@ -550,11 +551,6 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter)
}
}
- if (adapter->if_ops.init_fw_port) {
- if (adapter->if_ops.init_fw_port(adapter))
- return -1;
- }
-
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i]) {
ret = mwifiex_sta_init_cmd(adapter->priv[i], first_sta,
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 56b024a6aaa5..3cda1f956f0b 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -783,6 +783,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
priv->scan_block = true;
+ else
+ priv->port_open = true;
done:
/* Need to indicate IOCTL complete */
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 3ba4e0e04223..278dc94eaecb 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -276,6 +276,7 @@ process_start:
!adapter->pm_wakeup_fw_try) &&
(is_command_pending(adapter) ||
!skb_queue_empty(&adapter->tx_data_q) ||
+ !mwifiex_bypass_txlist_empty(adapter) ||
!mwifiex_wmm_lists_empty(adapter))) {
adapter->pm_wakeup_fw_try = true;
mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3));
@@ -299,9 +300,16 @@ process_start:
if ((!adapter->scan_chan_gap_enabled &&
adapter->scan_processing) || adapter->data_sent ||
+ mwifiex_is_tdls_chan_switching
+ (mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_STA)) ||
(mwifiex_wmm_lists_empty(adapter) &&
+ mwifiex_bypass_txlist_empty(adapter) &&
skb_queue_empty(&adapter->tx_data_q))) {
if (adapter->cmd_sent || adapter->curr_cmd ||
+ !mwifiex_is_send_cmd_allowed
+ (mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_STA)) ||
(!is_command_pending(adapter)))
break;
}
@@ -342,7 +350,9 @@ process_start:
continue;
}
- if (!adapter->cmd_sent && !adapter->curr_cmd) {
+ if (!adapter->cmd_sent && !adapter->curr_cmd &&
+ mwifiex_is_send_cmd_allowed
+ (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
if (mwifiex_exec_next_cmd(adapter) == -1) {
ret = -1;
break;
@@ -365,7 +375,25 @@ process_start:
if ((adapter->scan_chan_gap_enabled ||
!adapter->scan_processing) &&
- !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter)) {
+ !adapter->data_sent &&
+ !mwifiex_bypass_txlist_empty(adapter) &&
+ !mwifiex_is_tdls_chan_switching
+ (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
+ mwifiex_process_bypass_tx(adapter);
+ if (adapter->hs_activated) {
+ adapter->is_hs_configured = false;
+ mwifiex_hs_activated_event
+ (mwifiex_get_priv
+ (adapter, MWIFIEX_BSS_ROLE_ANY),
+ false);
+ }
+ }
+
+ if ((adapter->scan_chan_gap_enabled ||
+ !adapter->scan_processing) &&
+ !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter) &&
+ !mwifiex_is_tdls_chan_switching
+ (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
mwifiex_wmm_process_tx(adapter);
if (adapter->hs_activated) {
adapter->is_hs_configured = false;
@@ -379,6 +407,7 @@ process_start:
if (adapter->delay_null_pkt && !adapter->cmd_sent &&
!adapter->curr_cmd && !is_command_pending(adapter) &&
(mwifiex_wmm_lists_empty(adapter) &&
+ mwifiex_bypass_txlist_empty(adapter) &&
skb_queue_empty(&adapter->tx_data_q))) {
if (!mwifiex_send_null_packet
(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
@@ -649,6 +678,26 @@ mwifiex_close(struct net_device *dev)
return 0;
}
+static bool
+mwifiex_bypass_tx_queue(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
+
+ if (ntohs(eth_hdr->h_proto) == ETH_P_PAE ||
+ mwifiex_is_skb_mgmt_frame(skb) ||
+ (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
+ ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ (ntohs(eth_hdr->h_proto) == ETH_P_TDLS))) {
+ mwifiex_dbg(priv->adapter, DATA,
+ "bypass txqueue; eth type %#x, mgmt %d\n",
+ ntohs(eth_hdr->h_proto),
+ mwifiex_is_skb_mgmt_frame(skb));
+ return true;
+ }
+
+ return false;
+}
/*
* Add buffer into wmm tx queue and queue work to transmit it.
*/
@@ -666,8 +715,14 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
}
}
- atomic_inc(&priv->adapter->tx_pending);
- mwifiex_wmm_add_buf_txqueue(priv, skb);
+ if (mwifiex_bypass_tx_queue(priv, skb)) {
+ atomic_inc(&priv->adapter->tx_pending);
+ atomic_inc(&priv->adapter->bypass_tx_pending);
+ mwifiex_wmm_add_buf_bypass_txqueue(priv, skb);
+ } else {
+ atomic_inc(&priv->adapter->tx_pending);
+ mwifiex_wmm_add_buf_txqueue(priv, skb);
+ }
mwifiex_queue_main_work(priv->adapter);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index ae98b5b83b1f..6b9512140e7a 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -281,6 +281,7 @@ struct mwifiex_ra_list_tbl {
u8 amsdu_in_ampdu;
u16 total_pkt_count;
bool tdls_link;
+ bool tx_paused;
};
struct mwifiex_tid_tbl {
@@ -294,6 +295,7 @@ struct mwifiex_tid_tbl {
struct mwifiex_wmm_desc {
struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID];
u32 packets_out[MAX_NUM_TID];
+ u32 pkts_paused[MAX_NUM_TID];
/* spin lock to protect ra_list */
spinlock_t ra_list_spinlock;
struct mwifiex_wmm_ac_status ac_status[IEEE80211_NUM_ACS];
@@ -517,6 +519,7 @@ struct mwifiex_private {
u8 frame_type;
u8 curr_addr[ETH_ALEN];
u8 media_connected;
+ u8 port_open;
u32 num_tx_timeout;
/* track consecutive timeout */
u8 tx_timeout_cnt;
@@ -662,6 +665,8 @@ struct mwifiex_private {
struct cfg80211_beacon_data beacon_after;
struct mwifiex_11h_intf_state state_11h;
struct mwifiex_ds_mem_rw mem_rw;
+ struct sk_buff_head bypass_txq;
+ struct mwifiex_user_scan_chan hidden_chan[MWIFIEX_USER_SCAN_CHAN_MAX];
};
@@ -768,6 +773,7 @@ struct mwifiex_sta_node {
u8 tdls_status;
struct mwifiex_tdls_capab tdls_cap;
struct mwifiex_station_stats stats;
+ u8 tx_pause;
};
struct mwifiex_auto_tdls_peer {
@@ -831,6 +837,7 @@ struct mwifiex_adapter {
wait_queue_head_t init_wait_q;
void *card;
struct mwifiex_if_ops if_ops;
+ atomic_t bypass_tx_pending;
atomic_t rx_pending;
atomic_t tx_pending;
atomic_t cmd_pending;
@@ -979,6 +986,8 @@ struct mwifiex_adapter {
u8 coex_win_size;
u8 coex_tx_win_size;
u8 coex_rx_win_size;
+ bool drcs_enabled;
+ u8 active_scan_triggered;
};
void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@@ -1330,6 +1339,21 @@ static inline u8 mwifiex_is_any_intf_active(struct mwifiex_private *priv)
return 0;
}
+static inline u8 mwifiex_is_tdls_link_setup(u8 status)
+{
+ switch (status) {
+ case TDLS_SETUP_COMPLETE:
+ case TDLS_CHAN_SWITCHING:
+ case TDLS_IN_BASE_CHAN:
+ case TDLS_IN_OFF_CHAN:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
u32 func_init_shutdown);
int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8);
@@ -1458,6 +1482,9 @@ struct mwifiex_sta_node *
mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac);
struct mwifiex_sta_node *
mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac);
+u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv);
+u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv);
+u8 mwifiex_is_send_cmd_allowed(struct mwifiex_private *priv);
int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
u8 action_code, u8 dialog_token,
u16 status_code, const u8 *extra_ies,
@@ -1488,6 +1515,13 @@ void mwifiex_check_auto_tdls(unsigned long context);
void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac);
void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv);
void mwifiex_clean_auto_tdls(struct mwifiex_private *priv);
+int mwifiex_config_tdls_enable(struct mwifiex_private *priv);
+int mwifiex_config_tdls_disable(struct mwifiex_private *priv);
+int mwifiex_config_tdls_cs_params(struct mwifiex_private *priv);
+int mwifiex_stop_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac);
+int mwifiex_start_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac,
+ u8 primary_chan, u8 second_chan_offset, u8 band);
+
int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
void *data_buf);
@@ -1522,6 +1556,12 @@ void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter);
void mwifiex_11n_delba(struct mwifiex_private *priv, int tid);
+int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy);
+void mwifiex_process_tx_pause_event(struct mwifiex_private *priv,
+ struct sk_buff *event);
+void mwifiex_process_multi_chan_event(struct mwifiex_private *priv,
+ struct sk_buff *event_skb);
+
#ifdef CONFIG_DEBUG_FS
void mwifiex_debugfs_init(void);
void mwifiex_debugfs_remove(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 77b9055a2d14..408b68460716 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -266,12 +266,17 @@ static const struct pci_device_id mwifiex_ids[] = {
{
PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- .driver_data = (unsigned long) &mwifiex_pcie8766,
+ .driver_data = (unsigned long)&mwifiex_pcie8766,
},
{
PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8897,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- .driver_data = (unsigned long) &mwifiex_pcie8897,
+ .driver_data = (unsigned long)&mwifiex_pcie8897,
+ },
+ {
+ PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8997,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ .driver_data = (unsigned long)&mwifiex_pcie8997,
},
{},
};
@@ -1082,6 +1087,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
card->txbd_rdptr++;
break;
case PCIE_DEVICE_ID_MARVELL_88W8897:
+ case PCIE_DEVICE_ID_MARVELL_88W8997:
card->txbd_rdptr += reg->ring_tx_start_ptr;
break;
}
@@ -1179,6 +1185,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
card->txbd_wrptr++;
break;
case PCIE_DEVICE_ID_MARVELL_88W8897:
+ case PCIE_DEVICE_ID_MARVELL_88W8997:
card->txbd_wrptr += reg->ring_tx_start_ptr;
break;
}
@@ -1807,6 +1814,8 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
if (!card->evt_buf_list[rdptr]) {
skb_push(skb, INTF_HEADER_LEN);
+ skb_put(skb, MAX_EVENT_SIZE - skb->len);
+ memset(skb->data, 0, MAX_EVENT_SIZE);
if (mwifiex_map_pci_memory(adapter, skb,
MAX_EVENT_SIZE,
PCI_DMA_FROMDEVICE))
@@ -2731,3 +2740,4 @@ MODULE_VERSION(PCIE_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(PCIE8766_DEFAULT_FW_NAME);
MODULE_FIRMWARE(PCIE8897_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(PCIE8997_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h
index 0e7ee8b72358..48e549c3b285 100644
--- a/drivers/net/wireless/mwifiex/pcie.h
+++ b/drivers/net/wireless/mwifiex/pcie.h
@@ -30,10 +30,12 @@
#define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin"
#define PCIE8897_DEFAULT_FW_NAME "mrvl/pcie8897_uapsta.bin"
+#define PCIE8997_DEFAULT_FW_NAME "mrvl/pcie8997_uapsta.bin"
#define PCIE_VENDOR_ID_MARVELL (0x11ab)
#define PCIE_DEVICE_ID_MARVELL_88W8766P (0x2b30)
#define PCIE_DEVICE_ID_MARVELL_88W8897 (0x2b38)
+#define PCIE_DEVICE_ID_MARVELL_88W8997 (0x2b42)
/* Constants for Buffer Descriptor (BD) rings */
#define MWIFIEX_MAX_TXRX_BD 0x20
@@ -197,7 +199,38 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = {
.sleep_cookie = 0,
.fw_dump_ctrl = 0xcf4,
.fw_dump_start = 0xcf8,
- .fw_dump_end = 0xcff
+ .fw_dump_end = 0xcff,
+};
+
+static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = {
+ .cmd_addr_lo = PCIE_SCRATCH_0_REG,
+ .cmd_addr_hi = PCIE_SCRATCH_1_REG,
+ .cmd_size = PCIE_SCRATCH_2_REG,
+ .fw_status = PCIE_SCRATCH_3_REG,
+ .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
+ .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
+ .tx_rdptr = 0xC1A4,
+ .tx_wrptr = 0xC1A8,
+ .rx_rdptr = 0xC1A8,
+ .rx_wrptr = 0xC1A4,
+ .evt_rdptr = PCIE_SCRATCH_10_REG,
+ .evt_wrptr = PCIE_SCRATCH_11_REG,
+ .drv_rdy = PCIE_SCRATCH_12_REG,
+ .tx_start_ptr = 16,
+ .tx_mask = 0x0FFF0000,
+ .tx_wrap_mask = 0x01FF0000,
+ .rx_mask = 0x00000FFF,
+ .rx_wrap_mask = 0x000001FF,
+ .tx_rollover_ind = BIT(28),
+ .rx_rollover_ind = BIT(12),
+ .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND,
+ .ring_flag_sop = MWIFIEX_BD_FLAG_SOP,
+ .ring_flag_eop = MWIFIEX_BD_FLAG_EOP,
+ .ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP,
+ .ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP,
+ .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
+ .pfu_enabled = 1,
+ .sleep_cookie = 0,
};
struct mwifiex_pcie_device {
@@ -227,6 +260,15 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
.can_ext_scan = true,
};
+static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
+ .firmware = PCIE8997_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_8997,
+ .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .can_dump_fw = false,
+ .can_ext_scan = true,
+};
+
struct mwifiex_evt_buf_desc {
u64 paddr;
u16 len;
@@ -325,6 +367,7 @@ mwifiex_pcie_txbd_not_full(struct pcie_service_card *card)
return 1;
break;
case PCIE_DEVICE_ID_MARVELL_88W8897:
+ case PCIE_DEVICE_ID_MARVELL_88W8997:
if (((card->txbd_wrptr & reg->tx_mask) !=
(card->txbd_rdptr & reg->tx_mask)) ||
((card->txbd_wrptr & reg->tx_rollover_ind) ==
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index baf9715ddc10..5847863a2d6b 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -527,7 +527,8 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
if (ch->flags & IEEE80211_CHAN_NO_IR)
scan_chan_list[chan_idx].chan_scan_mode_bitmap
- |= MWIFIEX_PASSIVE_SCAN;
+ |= (MWIFIEX_PASSIVE_SCAN |
+ MWIFIEX_HIDDEN_SSID_REPORT);
else
scan_chan_list[chan_idx].chan_scan_mode_bitmap
&= ~MWIFIEX_PASSIVE_SCAN;
@@ -823,6 +824,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
int i;
u8 ssid_filter;
struct mwifiex_ie_types_htcap *ht_cap;
+ struct mwifiex_ie_types_bss_mode *bss_mode;
/* The tlv_buf_len is calculated for each scan command. The TLVs added
in this routine will be preserved since the routine that sends the
@@ -908,6 +910,10 @@ mwifiex_config_scan(struct mwifiex_private *priv,
wildcard_ssid_tlv->max_ssid_length =
IEEE80211_MAX_SSID_LEN;
+ if (!memcmp(user_scan_in->ssid_list[i].ssid,
+ "DIRECT-", 7))
+ wildcard_ssid_tlv->max_ssid_length = 0xfe;
+
memcpy(wildcard_ssid_tlv->ssid,
user_scan_in->ssid_list[i].ssid, ssid_len);
@@ -968,6 +974,15 @@ mwifiex_config_scan(struct mwifiex_private *priv,
else
*max_chan_per_scan = MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD;
+ if (adapter->ext_scan) {
+ bss_mode = (struct mwifiex_ie_types_bss_mode *)tlv_pos;
+ bss_mode->header.type = cpu_to_le16(TLV_TYPE_BSS_MODE);
+ bss_mode->header.len = cpu_to_le16(sizeof(bss_mode->bss_mode));
+ bss_mode->bss_mode = scan_cfg_out->bss_mode;
+ tlv_pos += sizeof(bss_mode->header) +
+ le16_to_cpu(bss_mode->header.len);
+ }
+
/* If the input config or adapter has the number of Probes set,
add tlv */
if (num_probes) {
@@ -1035,7 +1050,8 @@ mwifiex_config_scan(struct mwifiex_private *priv,
if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
(scan_chan_list +
chan_idx)->chan_scan_mode_bitmap
- |= MWIFIEX_PASSIVE_SCAN;
+ |= (MWIFIEX_PASSIVE_SCAN |
+ MWIFIEX_HIDDEN_SSID_REPORT);
else
(scan_chan_list +
chan_idx)->chan_scan_mode_bitmap
@@ -1586,6 +1602,62 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
return ret;
}
+/* This function checks if SSID string contains all zeroes or length is zero */
+static bool mwifiex_is_hidden_ssid(struct cfg80211_ssid *ssid)
+{
+ int idx;
+
+ for (idx = 0; idx < ssid->ssid_len; idx++) {
+ if (ssid->ssid[idx])
+ return false;
+ }
+
+ return true;
+}
+
+/* This function checks if any hidden SSID found in passive scan channels
+ * and save those channels for specific SSID active scan
+ */
+static int mwifiex_save_hidden_ssid_channels(struct mwifiex_private *priv,
+ struct cfg80211_bss *bss)
+{
+ struct mwifiex_bssdescriptor *bss_desc;
+ int ret;
+ int chid;
+
+ /* Allocate and fill new bss descriptor */
+ bss_desc = kzalloc(sizeof(*bss_desc), GFP_KERNEL);
+ if (!bss_desc)
+ return -ENOMEM;
+
+ ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc);
+ if (ret)
+ goto done;
+
+ if (mwifiex_is_hidden_ssid(&bss_desc->ssid)) {
+ mwifiex_dbg(priv->adapter, INFO, "found hidden SSID\n");
+ for (chid = 0 ; chid < MWIFIEX_USER_SCAN_CHAN_MAX; chid++) {
+ if (priv->hidden_chan[chid].chan_number ==
+ bss->channel->hw_value)
+ break;
+
+ if (!priv->hidden_chan[chid].chan_number) {
+ priv->hidden_chan[chid].chan_number =
+ bss->channel->hw_value;
+ priv->hidden_chan[chid].radio_type =
+ bss->channel->band;
+ priv->hidden_chan[chid].scan_type =
+ MWIFIEX_SCAN_TYPE_ACTIVE;
+ break;
+ }
+ }
+ }
+
+done:
+ kfree(bss_desc);
+ return 0;
+}
+
static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv,
struct cfg80211_bss *bss)
{
@@ -1775,6 +1847,14 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
.mac_address, ETH_ALEN))
mwifiex_update_curr_bss_params(priv, bss);
cfg80211_put_bss(priv->wdev.wiphy, bss);
+
+ if ((chan->flags & IEEE80211_CHAN_RADAR) ||
+ (chan->flags & IEEE80211_CHAN_NO_IR)) {
+ mwifiex_dbg(adapter, INFO,
+ "radar or passive channel %d\n",
+ channel);
+ mwifiex_save_hidden_ssid_channels(priv, bss);
+ }
}
} else {
mwifiex_dbg(adapter, WARN, "missing BSS channel IE\n");
@@ -1798,6 +1878,57 @@ static void mwifiex_complete_scan(struct mwifiex_private *priv)
}
}
+/* This function checks if any hidden SSID found in passive scan channels
+ * and do specific SSID active scan for those channels
+ */
+static int
+mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv)
+{
+ int ret;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u8 id = 0;
+ struct mwifiex_user_scan_cfg *user_scan_cfg;
+
+ if (adapter->active_scan_triggered) {
+ adapter->active_scan_triggered = false;
+ return 0;
+ }
+
+ if (!priv->hidden_chan[0].chan_number) {
+ mwifiex_dbg(adapter, INFO, "No BSS with hidden SSID found on DFS channels\n");
+ return 0;
+ }
+ user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL);
+
+ if (!user_scan_cfg)
+ return -ENOMEM;
+
+ memset(user_scan_cfg, 0, sizeof(*user_scan_cfg));
+
+ for (id = 0; id < MWIFIEX_USER_SCAN_CHAN_MAX; id++) {
+ if (!priv->hidden_chan[id].chan_number)
+ break;
+ memcpy(&user_scan_cfg->chan_list[id],
+ &priv->hidden_chan[id],
+ sizeof(struct mwifiex_user_scan_chan));
+ }
+
+ adapter->active_scan_triggered = true;
+ user_scan_cfg->num_ssids = priv->scan_request->n_ssids;
+ user_scan_cfg->ssid_list = priv->scan_request->ssids;
+
+ ret = mwifiex_scan_networks(priv, user_scan_cfg);
+ kfree(user_scan_cfg);
+
+ memset(&priv->hidden_chan, 0, sizeof(priv->hidden_chan));
+
+ if (ret) {
+ dev_err(priv->adapter->dev, "scan failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
{
struct mwifiex_adapter *adapter = priv->adapter;
@@ -1811,6 +1942,8 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
adapter->scan_processing = false;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ mwifiex_active_scan_req_for_passive_chan(priv);
+
if (!adapter->ext_scan)
mwifiex_complete_scan(priv);
@@ -1837,15 +1970,17 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
adapter->scan_processing = false;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
- if (priv->scan_request) {
- mwifiex_dbg(adapter, INFO,
- "info: aborting scan\n");
- cfg80211_scan_done(priv->scan_request, 1);
- priv->scan_request = NULL;
- } else {
- priv->scan_aborting = false;
- mwifiex_dbg(adapter, INFO,
- "info: scan already aborted\n");
+ if (!adapter->active_scan_triggered) {
+ if (priv->scan_request) {
+ mwifiex_dbg(adapter, INFO,
+ "info: aborting scan\n");
+ cfg80211_scan_done(priv->scan_request, 1);
+ priv->scan_request = NULL;
+ } else {
+ priv->scan_aborting = false;
+ mwifiex_dbg(adapter, INFO,
+ "info: scan already aborted\n");
+ }
}
} else {
/* Get scan command from scan_pending_q and put to
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index a0b121f3460c..5d05c6fe6429 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -51,6 +51,10 @@ static unsigned long iface_work_flags;
static struct semaphore add_remove_card_sem;
+static struct memory_type_mapping generic_mem_type_map[] = {
+ {"DUMP", NULL, 0, 0xDD},
+};
+
static struct memory_type_mapping mem_type_mapping_tbl[] = {
{"ITCM", NULL, 0, 0xF0},
{"DTCM", NULL, 0, 0xF1},
@@ -91,6 +95,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
return -ENOMEM;
card->func = func;
+ card->device_id = id;
func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
@@ -107,6 +112,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
card->can_dump_fw = data->can_dump_fw;
+ card->fw_dump_enh = data->fw_dump_enh;
card->can_auto_tdls = data->can_auto_tdls;
card->can_ext_scan = data->can_ext_scan;
}
@@ -287,6 +293,8 @@ static int mwifiex_sdio_suspend(struct device *dev)
#define SDIO_DEVICE_ID_MARVELL_8887 (0x9135)
/* Device ID for SD8801 */
#define SDIO_DEVICE_ID_MARVELL_8801 (0x9139)
+/* Device ID for SD8997 */
+#define SDIO_DEVICE_ID_MARVELL_8997 (0x9141)
/* WLAN IDs */
@@ -303,6 +311,8 @@ static const struct sdio_device_id mwifiex_ids[] = {
.driver_data = (unsigned long)&mwifiex_sdio_sd8887},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8801),
.driver_data = (unsigned long)&mwifiex_sdio_sd8801},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997),
+ .driver_data = (unsigned long)&mwifiex_sdio_sd8997},
{},
};
@@ -910,6 +920,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
if (!fwbuf)
return -ENOMEM;
+ sdio_claim_host(card->func);
+
/* Perform firmware data transfer */
do {
/* The host polls for the DN_LD_CARD_RDY and CARD_IO_READY
@@ -1014,6 +1026,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
offset += txlen;
} while (true);
+ sdio_release_host(card->func);
+
mwifiex_dbg(adapter, MSG,
"info: FW download over, size %d bytes\n", offset);
@@ -1964,8 +1978,13 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
adapter->dev = &func->dev;
strcpy(adapter->fw_name, card->firmware);
- adapter->mem_type_mapping_tbl = mem_type_mapping_tbl;
- adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
+ if (card->fw_dump_enh) {
+ adapter->mem_type_mapping_tbl = generic_mem_type_map;
+ adapter->num_mem_types = 1;
+ } else {
+ adapter->mem_type_mapping_tbl = mem_type_mapping_tbl;
+ adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
+ }
return 0;
}
@@ -2107,26 +2126,46 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
port, card->mp_data_port_mask);
}
+static void mwifiex_recreate_adapter(struct sdio_mmc_card *card)
+{
+ struct sdio_func *func = card->func;
+ const struct sdio_device_id *device_id = card->device_id;
+
+ /* TODO mmc_hw_reset does not require destroying and re-probing the
+ * whole adapter. Hence there was no need to for this rube-goldberg
+ * design to reload the fw from an external workqueue. If we don't
+ * destroy the adapter we could reload the fw from
+ * mwifiex_main_work_queue directly.
+ * The real difficulty with fw reset is to restore all the user
+ * settings applied through ioctl. By destroying and recreating the
+ * adapter, we take the easy way out, since we rely on user space to
+ * restore them. We assume that user space will treat the new
+ * incarnation of the adapter(interfaces) as if they had been just
+ * discovered and initializes them from scratch.
+ */
+
+ mwifiex_sdio_remove(func);
+
+ /* power cycle the adapter */
+ sdio_claim_host(func);
+ mmc_hw_reset(func->card->host);
+ sdio_release_host(func);
+
+ mwifiex_sdio_probe(func, device_id);
+}
+
static struct mwifiex_adapter *save_adapter;
static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
- struct mmc_host *target = card->func->card->host;
-
- /* The actual reset operation must be run outside of driver thread.
- * This is because mmc_remove_host() will cause the device to be
- * instantly destroyed, and the driver then needs to end its thread,
- * leading to a deadlock.
- *
- * We run it in a totally independent workqueue.
- */
- mwifiex_dbg(adapter, WARN, "Resetting card...\n");
- mmc_remove_host(target);
- /* 200ms delay is based on experiment with sdhci controller */
- mdelay(200);
- target->rescan_entered = 0; /* rescan non-removable cards */
- mmc_add_host(target);
+ /* TODO card pointer is unprotected. If the adapter is removed
+ * physically, sdio core might trigger mwifiex_sdio_remove, before this
+ * workqueue is run, which will destroy the adapter struct. When this
+ * workqueue eventually exceutes it will dereference an invalid adapter
+ * pointer
+ */
+ mwifiex_recreate_adapter(card);
}
/* This function read/write firmware */
@@ -2138,8 +2177,8 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
int ret, tries;
u8 ctrl_data = 0;
- sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl,
- &ret);
+ sdio_writeb(card->func, card->reg->fw_dump_host_ready,
+ card->reg->fw_dump_ctrl, &ret);
if (ret) {
mwifiex_dbg(adapter, ERROR, "SDIO Write ERR\n");
return RDWR_STATUS_FAILURE;
@@ -2155,10 +2194,10 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
break;
if (doneflag && ctrl_data == doneflag)
return RDWR_STATUS_DONE;
- if (ctrl_data != FW_DUMP_HOST_READY) {
+ if (ctrl_data != card->reg->fw_dump_host_ready) {
mwifiex_dbg(adapter, WARN,
- "The ctrl reg was changed, re-try again!\n");
- sdio_writeb(card->func, FW_DUMP_HOST_READY,
+ "The ctrl reg was changed, re-try again\n");
+ sdio_writeb(card->func, card->reg->fw_dump_host_ready,
card->reg->fw_dump_ctrl, &ret);
if (ret) {
mwifiex_dbg(adapter, ERROR, "SDIO write err\n");
@@ -2167,7 +2206,7 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
}
usleep_range(100, 200);
}
- if (ctrl_data == FW_DUMP_HOST_READY) {
+ if (ctrl_data == card->reg->fw_dump_host_ready) {
mwifiex_dbg(adapter, ERROR,
"Fail to pull ctrl_data\n");
return RDWR_STATUS_FAILURE;
@@ -2300,10 +2339,129 @@ done:
sdio_release_host(card->func);
}
+static void mwifiex_sdio_generic_fw_dump(struct mwifiex_adapter *adapter)
+{
+ struct sdio_mmc_card *card = adapter->card;
+ struct memory_type_mapping *entry = &generic_mem_type_map[0];
+ unsigned int reg, reg_start, reg_end;
+ u8 start_flag = 0, done_flag = 0;
+ u8 *dbg_ptr, *end_ptr;
+ enum rdwr_status stat;
+ int ret = -1, tries;
+
+ if (!card->fw_dump_enh)
+ return;
+
+ if (entry->mem_ptr) {
+ vfree(entry->mem_ptr);
+ entry->mem_ptr = NULL;
+ }
+ entry->mem_size = 0;
+
+ mwifiex_pm_wakeup_card(adapter);
+ sdio_claim_host(card->func);
+
+ mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n");
+
+ stat = mwifiex_sdio_rdwr_firmware(adapter, done_flag);
+ if (stat == RDWR_STATUS_FAILURE)
+ goto done;
+
+ reg_start = card->reg->fw_dump_start;
+ reg_end = card->reg->fw_dump_end;
+ for (reg = reg_start; reg <= reg_end; reg++) {
+ for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
+ start_flag = sdio_readb(card->func, reg, &ret);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR,
+ "SDIO read err\n");
+ goto done;
+ }
+ if (start_flag == 0)
+ break;
+ if (tries == MAX_POLL_TRIES) {
+ mwifiex_dbg(adapter, ERROR,
+ "FW not ready to dump\n");
+ ret = -1;
+ goto done;
+ }
+ }
+ usleep_range(100, 200);
+ }
+
+ entry->mem_ptr = vmalloc(0xf0000 + 1);
+ if (!entry->mem_ptr) {
+ ret = -1;
+ goto done;
+ }
+ dbg_ptr = entry->mem_ptr;
+ entry->mem_size = 0xf0000;
+ end_ptr = dbg_ptr + entry->mem_size;
+
+ done_flag = entry->done_flag;
+ mwifiex_dbg(adapter, DUMP,
+ "Start %s output, please wait...\n", entry->mem_name);
+
+ while (true) {
+ stat = mwifiex_sdio_rdwr_firmware(adapter, done_flag);
+ if (stat == RDWR_STATUS_FAILURE)
+ goto done;
+ for (reg = reg_start; reg <= reg_end; reg++) {
+ *dbg_ptr = sdio_readb(card->func, reg, &ret);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR,
+ "SDIO read err\n");
+ goto done;
+ }
+ dbg_ptr++;
+ if (dbg_ptr >= end_ptr) {
+ u8 *tmp_ptr;
+
+ tmp_ptr = vmalloc(entry->mem_size + 0x4000 + 1);
+ if (!tmp_ptr)
+ goto done;
+
+ memcpy(tmp_ptr, entry->mem_ptr,
+ entry->mem_size);
+ vfree(entry->mem_ptr);
+ entry->mem_ptr = tmp_ptr;
+ tmp_ptr = NULL;
+ dbg_ptr = entry->mem_ptr + entry->mem_size;
+ entry->mem_size += 0x4000;
+ end_ptr = entry->mem_ptr + entry->mem_size;
+ }
+ }
+ if (stat == RDWR_STATUS_DONE) {
+ entry->mem_size = dbg_ptr - entry->mem_ptr;
+ mwifiex_dbg(adapter, DUMP, "dump %s done size=0x%x\n",
+ entry->mem_name, entry->mem_size);
+ ret = 0;
+ break;
+ }
+ }
+ mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n");
+
+done:
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR, "firmware dump failed\n");
+ if (entry->mem_ptr) {
+ vfree(entry->mem_ptr);
+ entry->mem_ptr = NULL;
+ }
+ entry->mem_size = 0;
+ }
+ sdio_release_host(card->func);
+}
+
static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter)
{
+ struct sdio_mmc_card *card = adapter->card;
+
mwifiex_drv_info_dump(adapter);
- mwifiex_sdio_fw_dump(adapter);
+ if (card->fw_dump_enh)
+ mwifiex_sdio_generic_fw_dump(adapter);
+ else
+ mwifiex_sdio_fw_dump(adapter);
mwifiex_upload_device_dump(adapter);
}
@@ -2510,3 +2668,4 @@ MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 6f645cf47369..b9fbc5cf6262 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -35,6 +35,7 @@
#define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin"
#define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
#define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
+#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin"
#define BLOCK_MODE 1
#define BYTE_MODE 0
@@ -222,6 +223,7 @@ struct mwifiex_sdio_card_reg {
u8 cmd_cfg_1;
u8 cmd_cfg_2;
u8 cmd_cfg_3;
+ u8 fw_dump_host_ready;
u8 fw_dump_ctrl;
u8 fw_dump_start;
u8 fw_dump_end;
@@ -257,11 +259,15 @@ struct sdio_mmc_card {
bool supports_sdio_new_mode;
bool has_control_mask;
bool can_dump_fw;
+ bool fw_dump_enh;
bool can_auto_tdls;
bool can_ext_scan;
struct mwifiex_sdio_mpa_tx mpa_tx;
struct mwifiex_sdio_mpa_rx mpa_rx;
+
+ /* needed for card reset */
+ const struct sdio_device_id *device_id;
};
struct mwifiex_sdio_device {
@@ -275,6 +281,7 @@ struct mwifiex_sdio_device {
bool supports_sdio_new_mode;
bool has_control_mask;
bool can_dump_fw;
+ bool fw_dump_enh;
bool can_auto_tdls;
bool can_ext_scan;
};
@@ -350,6 +357,7 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
.cmd_cfg_1 = 0xb9,
.cmd_cfg_2 = 0xba,
.cmd_cfg_3 = 0xbb,
+ .fw_dump_host_ready = 0xee,
.fw_dump_ctrl = 0xe2,
.fw_dump_start = 0xe3,
.fw_dump_end = 0xea,
@@ -361,6 +369,59 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
0x59, 0x5c, 0x5d},
};
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8997 = {
+ .start_rd_port = 0,
+ .start_wr_port = 0,
+ .base_0_reg = 0xF8,
+ .base_1_reg = 0xF9,
+ .poll_reg = 0x5C,
+ .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+ CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+ .host_int_rsr_reg = 0x4,
+ .host_int_status_reg = 0x0C,
+ .host_int_mask_reg = 0x08,
+ .status_reg_0 = 0xE8,
+ .status_reg_1 = 0xE9,
+ .sdio_int_mask = 0xff,
+ .data_port_mask = 0xffffffff,
+ .io_port_0_reg = 0xE4,
+ .io_port_1_reg = 0xE5,
+ .io_port_2_reg = 0xE6,
+ .max_mp_regs = 196,
+ .rd_bitmap_l = 0x10,
+ .rd_bitmap_u = 0x11,
+ .rd_bitmap_1l = 0x12,
+ .rd_bitmap_1u = 0x13,
+ .wr_bitmap_l = 0x14,
+ .wr_bitmap_u = 0x15,
+ .wr_bitmap_1l = 0x16,
+ .wr_bitmap_1u = 0x17,
+ .rd_len_p0_l = 0x18,
+ .rd_len_p0_u = 0x19,
+ .card_misc_cfg_reg = 0xd8,
+ .card_cfg_2_1_reg = 0xd9,
+ .cmd_rd_len_0 = 0xc0,
+ .cmd_rd_len_1 = 0xc1,
+ .cmd_rd_len_2 = 0xc2,
+ .cmd_rd_len_3 = 0xc3,
+ .cmd_cfg_0 = 0xc4,
+ .cmd_cfg_1 = 0xc5,
+ .cmd_cfg_2 = 0xc6,
+ .cmd_cfg_3 = 0xc7,
+ .fw_dump_host_ready = 0xcc,
+ .fw_dump_ctrl = 0xf0,
+ .fw_dump_start = 0xf1,
+ .fw_dump_end = 0xf8,
+ .func1_dump_reg_start = 0x10,
+ .func1_dump_reg_end = 0x17,
+ .func1_scratch_reg = 0xe8,
+ .func1_spec_reg_num = 13,
+ .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D,
+ 0x60, 0x61, 0x62, 0x64,
+ 0x65, 0x66, 0x68, 0x69,
+ 0x6a},
+};
+
static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = {
.start_rd_port = 0,
.start_wr_port = 0,
@@ -469,6 +530,22 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
.can_ext_scan = true,
};
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
+ .firmware = SD8997_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd8997,
+ .max_ports = 32,
+ .mp_agg_pkt_limit = 16,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .supports_sdio_new_mode = true,
+ .has_control_mask = false,
+ .can_dump_fw = true,
+ .fw_dump_enh = true,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
+};
+
static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
.firmware = SD8887_DEFAULT_FW_NAME,
.reg = &mwifiex_reg_sd8887,
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 037adcd1f484..a49a80dd773e 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -26,6 +26,10 @@
#include "11n.h"
#include "11ac.h"
+static bool drcs;
+module_param(drcs, bool, 0644);
+MODULE_PARM_DESC(drcs, "multi-channel operation:1, single-channel operation:0");
+
static bool disable_auto_ds;
module_param(disable_auto_ds, bool, 0);
MODULE_PARM_DESC(disable_auto_ds,
@@ -1512,6 +1516,22 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
}
static int
+mwifiex_cmd_set_mc_policy(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, void *data_buf)
+{
+ struct host_cmd_ds_multi_chan_policy *mc_pol = &cmd->params.mc_policy;
+ const u16 *drcs_info = data_buf;
+
+ mc_pol->action = cpu_to_le16(cmd_action);
+ mc_pol->policy = cpu_to_le16(*drcs_info);
+ cmd->command = cpu_to_le16(HostCmd_CMD_MC_POLICY);
+ cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_multi_chan_policy) +
+ S_DS_GEN);
+ return 0;
+}
+
+static int
mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
u16 cmd_action, void *data_buf)
@@ -1576,6 +1596,50 @@ mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
}
static int
+mwifiex_cmd_tdls_config(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, void *data_buf)
+{
+ struct host_cmd_ds_tdls_config *tdls_config = &cmd->params.tdls_config;
+ struct mwifiex_tdls_init_cs_params *config;
+ struct mwifiex_tdls_config *init_config;
+ u16 len;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_TDLS_CONFIG);
+ cmd->size = cpu_to_le16(S_DS_GEN);
+ tdls_config->tdls_action = cpu_to_le16(cmd_action);
+ le16_add_cpu(&cmd->size, sizeof(tdls_config->tdls_action));
+
+ switch (cmd_action) {
+ case ACT_TDLS_CS_ENABLE_CONFIG:
+ init_config = data_buf;
+ len = sizeof(*init_config);
+ memcpy(tdls_config->tdls_data, init_config, len);
+ break;
+ case ACT_TDLS_CS_INIT:
+ config = data_buf;
+ len = sizeof(*config);
+ memcpy(tdls_config->tdls_data, config, len);
+ break;
+ case ACT_TDLS_CS_STOP:
+ len = sizeof(struct mwifiex_tdls_stop_cs_params);
+ memcpy(tdls_config->tdls_data, data_buf, len);
+ break;
+ case ACT_TDLS_CS_PARAMS:
+ len = sizeof(struct mwifiex_tdls_config_cs_params);
+ memcpy(tdls_config->tdls_data, data_buf, len);
+ break;
+ default:
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Unknown TDLS configuration\n");
+ return -ENOTSUPP;
+ }
+
+ le16_add_cpu(&cmd->size, len);
+ return 0;
+}
+
+static int
mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
void *data_buf)
@@ -1933,10 +1997,12 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
if (priv->bss_mode == NL80211_IFTYPE_ADHOC)
cmd_ptr->params.bss_mode.con_type =
CONNECTION_TYPE_ADHOC;
- else if (priv->bss_mode == NL80211_IFTYPE_STATION)
+ else if (priv->bss_mode == NL80211_IFTYPE_STATION ||
+ priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT)
cmd_ptr->params.bss_mode.con_type =
CONNECTION_TYPE_INFRA;
- else if (priv->bss_mode == NL80211_IFTYPE_AP)
+ else if (priv->bss_mode == NL80211_IFTYPE_AP ||
+ priv->bss_mode == NL80211_IFTYPE_P2P_GO)
cmd_ptr->params.bss_mode.con_type = CONNECTION_TYPE_AP;
cmd_ptr->size = cpu_to_le16(sizeof(struct
host_cmd_ds_set_bss_mode) + S_DS_GEN);
@@ -1958,6 +2024,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
case HostCmd_CMD_TDLS_OPER:
ret = mwifiex_cmd_tdls_oper(priv, cmd_ptr, data_buf);
break;
+ case HostCmd_CMD_TDLS_CONFIG:
+ ret = mwifiex_cmd_tdls_config(priv, cmd_ptr, cmd_action,
+ data_buf);
+ break;
case HostCmd_CMD_CHAN_REPORT_REQUEST:
ret = mwifiex_cmd_issue_chan_report_request(priv, cmd_ptr,
data_buf);
@@ -1966,6 +2036,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_sdio_rx_aggr_cfg(cmd_ptr, cmd_action,
data_buf);
break;
+ case HostCmd_CMD_MC_POLICY:
+ ret = mwifiex_cmd_set_mc_policy(priv, cmd_ptr, cmd_action,
+ data_buf);
+ break;
default:
mwifiex_dbg(priv->adapter, ERROR,
"PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -2082,6 +2156,18 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
if (ret)
return -1;
}
+
+ if (drcs) {
+ adapter->drcs_enabled = true;
+ if (ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
+ ret = mwifiex_send_cmd(priv,
+ HostCmd_CMD_MC_POLICY,
+ HostCmd_ACT_GEN_SET, 0,
+ &adapter->drcs_enabled,
+ true);
+ if (ret)
+ return -1;
+ }
}
/* get tx rate */
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index b645884b3b97..87b69d8ad120 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -599,6 +599,7 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
"info: key: GTK is set\n");
priv->wpa_is_gtk_set = true;
priv->scan_block = false;
+ priv->port_open = true;
}
}
@@ -629,6 +630,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n");
priv->wpa_is_gtk_set = true;
priv->scan_block = false;
+ priv->port_open = true;
}
}
@@ -893,7 +895,7 @@ static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv,
case ACT_TDLS_DELETE:
if (reason) {
if (!node || reason == TDLS_ERR_LINK_NONEXISTENT)
- mwifiex_dbg(priv->adapter, ERROR,
+ mwifiex_dbg(priv->adapter, MSG,
"TDLS link delete for %pM failed: reason %d\n",
cmd_tdls_oper->peer_mac, reason);
else
@@ -1191,12 +1193,15 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
break;
case HostCmd_CMD_TDLS_OPER:
ret = mwifiex_ret_tdls_oper(priv, resp);
+ case HostCmd_CMD_MC_POLICY:
break;
case HostCmd_CMD_CHAN_REPORT_REQUEST:
break;
case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp);
break;
+ case HostCmd_CMD_TDLS_CONFIG:
+ break;
default:
mwifiex_dbg(adapter, ERROR,
"CMD_RESP: unknown cmd response %#x\n",
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 848de2621958..3d18c585e543 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -54,6 +54,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
priv->media_connected = false;
priv->scan_block = false;
+ priv->port_open = false;
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info)) {
@@ -153,6 +154,7 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
struct mwifiex_sta_node *sta_ptr;
struct mwifiex_tdls_generic_event *tdls_evt =
(void *)event_skb->data + sizeof(adapter->event_cause);
+ u8 *mac = tdls_evt->peer_mac;
/* reserved 2 bytes are not mandatory in tdls event */
if (event_skb->len < (sizeof(struct mwifiex_tdls_generic_event) -
@@ -175,6 +177,59 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
le16_to_cpu(tdls_evt->u.reason_code),
GFP_KERNEL);
break;
+ case TDLS_EVENT_CHAN_SWITCH_RESULT:
+ mwifiex_dbg(adapter, EVENT, "tdls channel switch result :\n");
+ mwifiex_dbg(adapter, EVENT,
+ "status=0x%x, reason=0x%x cur_chan=%d\n",
+ tdls_evt->u.switch_result.status,
+ tdls_evt->u.switch_result.reason,
+ tdls_evt->u.switch_result.cur_chan);
+
+ /* tdls channel switch failed */
+ if (tdls_evt->u.switch_result.status != 0) {
+ switch (tdls_evt->u.switch_result.cur_chan) {
+ case TDLS_BASE_CHANNEL:
+ sta_ptr->tdls_status = TDLS_IN_BASE_CHAN;
+ break;
+ case TDLS_OFF_CHANNEL:
+ sta_ptr->tdls_status = TDLS_IN_OFF_CHAN;
+ break;
+ default:
+ break;
+ }
+ return ret;
+ }
+
+ /* tdls channel switch success */
+ switch (tdls_evt->u.switch_result.cur_chan) {
+ case TDLS_BASE_CHANNEL:
+ if (sta_ptr->tdls_status == TDLS_IN_BASE_CHAN)
+ break;
+ mwifiex_update_ralist_tx_pause_in_tdls_cs(priv, mac,
+ false);
+ sta_ptr->tdls_status = TDLS_IN_BASE_CHAN;
+ break;
+ case TDLS_OFF_CHANNEL:
+ if (sta_ptr->tdls_status == TDLS_IN_OFF_CHAN)
+ break;
+ mwifiex_update_ralist_tx_pause_in_tdls_cs(priv, mac,
+ true);
+ sta_ptr->tdls_status = TDLS_IN_OFF_CHAN;
+ break;
+ default:
+ break;
+ }
+
+ break;
+ case TDLS_EVENT_START_CHAN_SWITCH:
+ mwifiex_dbg(adapter, EVENT, "tdls start channel switch...\n");
+ sta_ptr->tdls_status = TDLS_CHAN_SWITCHING;
+ break;
+ case TDLS_EVENT_CHAN_SWITCH_STOPPED:
+ mwifiex_dbg(adapter, EVENT,
+ "tdls chan switch stopped, reason=%d\n",
+ tdls_evt->u.cs_stop_reason);
+ break;
default:
break;
}
@@ -182,6 +237,145 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
return ret;
}
+static void mwifiex_process_uap_tx_pause(struct mwifiex_private *priv,
+ struct mwifiex_ie_types_header *tlv)
+{
+ struct mwifiex_tx_pause_tlv *tp;
+ struct mwifiex_sta_node *sta_ptr;
+ unsigned long flags;
+
+ tp = (void *)tlv;
+ mwifiex_dbg(priv->adapter, EVENT,
+ "uap tx_pause: %pM pause=%d, pkts=%d\n",
+ tp->peermac, tp->tx_pause,
+ tp->pkt_cnt);
+
+ if (ether_addr_equal(tp->peermac, priv->netdev->dev_addr)) {
+ if (tp->tx_pause)
+ priv->port_open = false;
+ else
+ priv->port_open = true;
+ } else if (is_multicast_ether_addr(tp->peermac)) {
+ mwifiex_update_ralist_tx_pause(priv, tp->peermac, tp->tx_pause);
+ } else {
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+ if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) {
+ sta_ptr->tx_pause = tp->tx_pause;
+ mwifiex_update_ralist_tx_pause(priv, tp->peermac,
+ tp->tx_pause);
+ }
+ }
+}
+
+static void mwifiex_process_sta_tx_pause(struct mwifiex_private *priv,
+ struct mwifiex_ie_types_header *tlv)
+{
+ struct mwifiex_tx_pause_tlv *tp;
+ struct mwifiex_sta_node *sta_ptr;
+ int status;
+ unsigned long flags;
+
+ tp = (void *)tlv;
+ mwifiex_dbg(priv->adapter, EVENT,
+ "sta tx_pause: %pM pause=%d, pkts=%d\n",
+ tp->peermac, tp->tx_pause,
+ tp->pkt_cnt);
+
+ if (ether_addr_equal(tp->peermac, priv->cfg_bssid)) {
+ if (tp->tx_pause)
+ priv->port_open = false;
+ else
+ priv->port_open = true;
+ } else {
+ if (!ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+ return;
+
+ status = mwifiex_get_tdls_link_status(priv, tp->peermac);
+ if (mwifiex_is_tdls_link_setup(status)) {
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+ if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) {
+ sta_ptr->tx_pause = tp->tx_pause;
+ mwifiex_update_ralist_tx_pause(priv,
+ tp->peermac,
+ tp->tx_pause);
+ }
+ }
+ }
+}
+
+void mwifiex_process_multi_chan_event(struct mwifiex_private *priv,
+ struct sk_buff *event_skb)
+{
+ struct mwifiex_ie_types_multi_chan_info *chan_info;
+ u16 status;
+
+ chan_info = (void *)event_skb->data + sizeof(u32);
+
+ if (le16_to_cpu(chan_info->header.type) != TLV_TYPE_MULTI_CHAN_INFO) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "unknown TLV in chan_info event\n");
+ return;
+ }
+
+ status = le16_to_cpu(chan_info->status);
+
+ if (status) {
+ mwifiex_dbg(priv->adapter, EVENT,
+ "multi-channel operation started\n");
+ } else {
+ mwifiex_dbg(priv->adapter, EVENT,
+ "multi-channel operation over\n");
+ }
+}
+
+void mwifiex_process_tx_pause_event(struct mwifiex_private *priv,
+ struct sk_buff *event_skb)
+{
+ struct mwifiex_ie_types_header *tlv;
+ u16 tlv_type, tlv_len;
+ int tlv_buf_left;
+
+ if (!priv->media_connected) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "tx_pause event while disconnected; bss_role=%d\n",
+ priv->bss_role);
+ return;
+ }
+
+ tlv_buf_left = event_skb->len - sizeof(u32);
+ tlv = (void *)event_skb->data + sizeof(u32);
+
+ while (tlv_buf_left >= (int)sizeof(struct mwifiex_ie_types_header)) {
+ tlv_type = le16_to_cpu(tlv->type);
+ tlv_len = le16_to_cpu(tlv->len);
+ if ((sizeof(struct mwifiex_ie_types_header) + tlv_len) >
+ tlv_buf_left) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "wrong tlv: tlvLen=%d, tlvBufLeft=%d\n",
+ tlv_len, tlv_buf_left);
+ break;
+ }
+ if (tlv_type == TLV_TYPE_TX_PAUSE) {
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
+ mwifiex_process_sta_tx_pause(priv, tlv);
+ else
+ mwifiex_process_uap_tx_pause(priv, tlv);
+ }
+
+ tlv_buf_left -= sizeof(struct mwifiex_ie_types_header) +
+ tlv_len;
+ tlv = (void *)((u8 *)tlv + tlv_len +
+ sizeof(struct mwifiex_ie_types_header));
+ }
+
+}
+
/*
* This function handles coex events generated by firmware
*/
@@ -359,7 +553,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_PS_AWAKE:
mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
- if (!adapter->pps_uapsd_mode &&
+ if (!adapter->pps_uapsd_mode && priv->port_open &&
priv->media_connected && adapter->sleep_period.period) {
adapter->pps_uapsd_mode = true;
mwifiex_dbg(adapter, EVENT,
@@ -438,6 +632,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_PORT_RELEASE:
mwifiex_dbg(adapter, EVENT, "event: PORT RELEASE\n");
+ priv->port_open = true;
break;
case EVENT_EXT_SCAN_REPORT:
@@ -573,6 +768,16 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
ret = mwifiex_parse_tdls_event(priv, adapter->event_skb);
break;
+ case EVENT_TX_DATA_PAUSE:
+ mwifiex_dbg(adapter, EVENT, "event: TX DATA PAUSE\n");
+ mwifiex_process_tx_pause_event(priv, adapter->event_skb);
+ break;
+
+ case EVENT_MULTI_CHAN_INFO:
+ mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n");
+ mwifiex_process_multi_chan_event(priv, adapter->event_skb);
+ break;
+
case EVENT_TX_STATUS_REPORT:
mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n");
mwifiex_parse_tx_status_event(priv, adapter->event_body);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index d8b7d9c20450..a6c8a4f7bfe9 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -66,8 +66,8 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
if (status <= 0) {
if (status == 0)
status = -ETIMEDOUT;
- mwifiex_dbg(adapter, ERROR,
- "cmd_wait_q terminated: %d\n", status);
+ mwifiex_dbg(adapter, ERROR, "cmd_wait_q terminated: %d\n",
+ status);
mwifiex_cancel_all_pending_cmd(adapter);
return status;
}
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index 2faa1bc42abe..b3e163de9899 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -49,7 +49,7 @@ static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv,
tid = skb->priority;
tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
- if (status == TDLS_SETUP_COMPLETE) {
+ if (mwifiex_is_tdls_link_setup(status)) {
ra_list = mwifiex_wmm_get_queue_raptr(priv, tid, mac);
ra_list->tdls_link = true;
tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
@@ -164,7 +164,7 @@ static void mwifiex_tdls_add_aid(struct mwifiex_private *priv,
pos = (void *)skb_put(skb, 4);
*pos++ = WLAN_EID_AID;
*pos++ = 2;
- *pos++ = le16_to_cpu(assoc_rsp->a_id);
+ memcpy(pos, &assoc_rsp->a_id, sizeof(assoc_rsp->a_id));
return;
}
@@ -355,6 +355,7 @@ static void mwifiex_tdls_add_ext_capab(struct mwifiex_private *priv,
extcap->ieee_hdr.len = 8;
memset(extcap->ext_capab, 0, 8);
extcap->ext_capab[4] |= WLAN_EXT_CAPA5_TDLS_ENABLED;
+ extcap->ext_capab[3] |= WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH;
if (priv->adapter->is_hw_11ac_capable)
extcap->ext_capab[7] |= WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED;
@@ -1071,6 +1072,11 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
for (i = 0; i < MAX_NUM_TID; i++)
sta_ptr->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
}
+ if (sta_ptr->tdls_cap.extcap.ext_capab[3] &
+ WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH) {
+ mwifiex_config_tdls_enable(priv);
+ mwifiex_config_tdls_cs_params(priv);
+ }
memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
mwifiex_restore_tdls_packets(priv, peer, TDLS_SETUP_COMPLETE);
@@ -1141,7 +1147,7 @@ int mwifiex_get_tdls_list(struct mwifiex_private *priv,
spin_lock_irqsave(&priv->sta_list_spinlock, flags);
list_for_each_entry(sta_ptr, &priv->sta_list, list) {
- if (sta_ptr->tdls_status == TDLS_SETUP_COMPLETE) {
+ if (mwifiex_is_tdls_link_setup(sta_ptr->tdls_status)) {
ether_addr_copy(peer->peer_addr, sta_ptr->mac_addr);
peer++;
count++;
@@ -1295,7 +1301,7 @@ void mwifiex_auto_tdls_update_peer_status(struct mwifiex_private *priv,
if ((link_status == TDLS_NOT_SETUP) &&
(peer->tdls_status == TDLS_SETUP_INPROGRESS))
peer->failure_count++;
- else if (link_status == TDLS_SETUP_COMPLETE)
+ else if (mwifiex_is_tdls_link_setup(link_status))
peer->failure_count = 0;
peer->tdls_status = link_status;
@@ -1367,7 +1373,7 @@ void mwifiex_check_auto_tdls(unsigned long context)
if (((tdls_peer->rssi >= MWIFIEX_TDLS_RSSI_LOW) ||
!tdls_peer->rssi) &&
- tdls_peer->tdls_status == TDLS_SETUP_COMPLETE) {
+ mwifiex_is_tdls_link_setup(tdls_peer->tdls_status)) {
tdls_peer->tdls_status = TDLS_LINK_TEARDOWN;
mwifiex_dbg(priv->adapter, MSG,
"teardown TDLS link,peer=%pM rssi=%d\n",
@@ -1416,3 +1422,67 @@ void mwifiex_clean_auto_tdls(struct mwifiex_private *priv)
mwifiex_flush_auto_tdls_list(priv);
}
}
+
+static int mwifiex_config_tdls(struct mwifiex_private *priv, u8 enable)
+{
+ struct mwifiex_tdls_config config;
+
+ config.enable = cpu_to_le16(enable);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+ ACT_TDLS_CS_ENABLE_CONFIG, 0, &config, true);
+}
+
+int mwifiex_config_tdls_enable(struct mwifiex_private *priv)
+{
+ return mwifiex_config_tdls(priv, true);
+}
+
+int mwifiex_config_tdls_disable(struct mwifiex_private *priv)
+{
+ return mwifiex_config_tdls(priv, false);
+}
+
+int mwifiex_config_tdls_cs_params(struct mwifiex_private *priv)
+{
+ struct mwifiex_tdls_config_cs_params config_tdls_cs_params;
+
+ config_tdls_cs_params.unit_time = MWIFIEX_DEF_CS_UNIT_TIME;
+ config_tdls_cs_params.thr_otherlink = MWIFIEX_DEF_CS_THR_OTHERLINK;
+ config_tdls_cs_params.thr_directlink = MWIFIEX_DEF_THR_DIRECTLINK;
+
+ return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+ ACT_TDLS_CS_PARAMS, 0,
+ &config_tdls_cs_params, true);
+}
+
+int mwifiex_stop_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac)
+{
+ struct mwifiex_tdls_stop_cs_params stop_tdls_cs_params;
+
+ ether_addr_copy(stop_tdls_cs_params.peer_mac, peer_mac);
+
+ return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+ ACT_TDLS_CS_STOP, 0,
+ &stop_tdls_cs_params, true);
+}
+
+int mwifiex_start_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac,
+ u8 primary_chan, u8 second_chan_offset, u8 band)
+{
+ struct mwifiex_tdls_init_cs_params start_tdls_cs_params;
+
+ ether_addr_copy(start_tdls_cs_params.peer_mac, peer_mac);
+ start_tdls_cs_params.primary_chan = primary_chan;
+ start_tdls_cs_params.second_chan_offset = second_chan_offset;
+ start_tdls_cs_params.band = band;
+
+ start_tdls_cs_params.switch_time = cpu_to_le16(MWIFIEX_DEF_CS_TIME);
+ start_tdls_cs_params.switch_timeout =
+ cpu_to_le16(MWIFIEX_DEF_CS_TIMEOUT);
+ start_tdls_cs_params.reg_class = MWIFIEX_DEF_CS_REG_CLASS;
+ start_tdls_cs_params.periodicity = MWIFIEX_DEF_CS_PERIODICITY;
+
+ return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+ ACT_TDLS_CS_INIT, 0,
+ &start_tdls_cs_params, true);
+}
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 5ed9b794053e..8b1e5b5d47fe 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -370,8 +370,28 @@ void mwifiex_parse_tx_status_event(struct mwifiex_private *priv,
/* consumes ack_skb */
skb_complete_wifi_ack(ack_skb, !tx_status->status);
} else {
+ /* Remove broadcast address which was added by driver */
+ memmove(ack_skb->data +
+ sizeof(struct ieee80211_hdr_3addr) +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16),
+ ack_skb->data +
+ sizeof(struct ieee80211_hdr_3addr) +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16) +
+ ETH_ALEN, ack_skb->len -
+ (sizeof(struct ieee80211_hdr_3addr) +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16) +
+ ETH_ALEN));
+ ack_skb->len = ack_skb->len - ETH_ALEN;
+ /* Remove driver's proprietary header including 2 bytes
+ * of packet length and pass actual management frame buffer
+ * to cfg80211.
+ */
cfg80211_mgmt_tx_status(&priv->wdev, tx_info->cookie,
- ack_skb->data, ack_skb->len,
+ ack_skb->data +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+ sizeof(u16), ack_skb->len -
+ (MWIFIEX_MGMT_FRAME_HEADER_SIZE
+ + sizeof(u16)),
!tx_status->status, GFP_ATOMIC);
dev_kfree_skb_any(ack_skb);
}
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index b74930054b8c..4d5a6e3b6361 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -808,7 +808,7 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_chan_def chandef)
{
- u8 config_bands = 0;
+ u8 config_bands = 0, old_bands = priv->adapter->config_bands;
priv->bss_chandef = chandef;
@@ -834,6 +834,11 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv,
}
priv->adapter->config_bands = config_bands;
+
+ if (old_bands != config_bands) {
+ mwifiex_send_domain_info_cmd_fw(priv->adapter->wiphy);
+ mwifiex_dnld_txpwr_table(priv);
+ }
}
int mwifiex_config_start_uap(struct mwifiex_private *priv,
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index 7bc1f850e3b7..46c972a650a4 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -41,6 +41,8 @@ static int mwifiex_check_uap_capabilties(struct mwifiex_private *priv,
mwifiex_dbg_dump(priv->adapter, EVT_D, "uap capabilties:",
event->data, event->len);
+ skb_push(event, MWIFIEX_BSS_START_EVT_FIX_SIZE);
+
while ((evt_len >= sizeof(tlv_hdr->header))) {
tlv_hdr = (struct mwifiex_ie_types_data *)curr;
tlv_len = le16_to_cpu(tlv_hdr->header.len);
@@ -176,6 +178,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
break;
case EVENT_UAP_BSS_IDLE:
priv->media_connected = false;
+ priv->port_open = false;
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
@@ -185,6 +188,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
break;
case EVENT_UAP_BSS_ACTIVE:
priv->media_connected = true;
+ priv->port_open = true;
if (!netif_carrier_ok(priv->netdev))
netif_carrier_on(priv->netdev);
mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
@@ -192,6 +196,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
case EVENT_UAP_BSS_START:
mwifiex_dbg(adapter, EVENT,
"AP EVENT: event id: %#x\n", eventcause);
+ priv->port_open = false;
memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
ETH_ALEN);
if (priv->hist_data)
@@ -297,6 +302,16 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
mwifiex_bt_coex_wlan_param_update_event(priv,
adapter->event_skb);
break;
+ case EVENT_TX_DATA_PAUSE:
+ mwifiex_dbg(adapter, EVENT, "event: TX DATA PAUSE\n");
+ mwifiex_process_tx_pause_event(priv, adapter->event_skb);
+ break;
+
+ case EVENT_MULTI_CHAN_INFO:
+ mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n");
+ mwifiex_process_multi_chan_event(priv, adapter->event_skb);
+ break;
+
default:
mwifiex_dbg(adapter, EVENT,
"event: unknown event id: %#x\n", eventcause);
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index aada93425f80..5e789b2e06ea 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -47,6 +47,11 @@ static struct usb_device_id mwifiex_usb_table[] = {
{USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8897_PID_2,
USB_CLASS_VENDOR_SPEC,
USB_SUBCLASS_VENDOR_SPEC, 0xff)},
+ /* 8997 */
+ {USB_DEVICE(USB8XXX_VID, USB8997_PID_1)},
+ {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8997_PID_2,
+ USB_CLASS_VENDOR_SPEC,
+ USB_SUBCLASS_VENDOR_SPEC, 0xff)},
{ } /* Terminating entry */
};
@@ -244,9 +249,11 @@ setup_for_next:
if (card->rx_cmd_ep == context->ep) {
mwifiex_usb_submit_rx_urb(context, size);
} else {
- context->skb = NULL;
- if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING)
+ if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING){
mwifiex_usb_submit_rx_urb(context, size);
+ }else{
+ context->skb = NULL;
+ }
}
return;
@@ -380,12 +387,14 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
case USB8797_PID_1:
case USB8801_PID_1:
case USB8897_PID_1:
+ case USB8997_PID_1:
card->usb_boot_state = USB8XXX_FW_DNLD;
break;
case USB8766_PID_2:
case USB8797_PID_2:
case USB8801_PID_2:
case USB8897_PID_2:
+ case USB8997_PID_2:
card->usb_boot_state = USB8XXX_FW_READY;
break;
default:
@@ -812,6 +821,12 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
adapter->dev = &card->udev->dev;
switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
+ case USB8997_PID_1:
+ case USB8997_PID_2:
+ adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
+ strcpy(adapter->fw_name, USB8997_DEFAULT_FW_NAME);
+ adapter->ext_scan = true;
+ break;
case USB8897_PID_1:
case USB8897_PID_2:
adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
@@ -868,8 +883,10 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
/* Allocate memory for transmit */
fwdata = kzalloc(FW_DNLD_TX_BUF_SIZE, GFP_KERNEL);
- if (!fwdata)
+ if (!fwdata) {
+ ret = -ENOMEM;
goto fw_exit;
+ }
/* Allocate memory for receive */
recv_buff = kzalloc(FW_DNLD_RX_BUF_SIZE, GFP_KERNEL);
@@ -1119,3 +1136,4 @@ MODULE_FIRMWARE(USB8766_DEFAULT_FW_NAME);
MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME);
MODULE_FIRMWARE(USB8801_DEFAULT_FW_NAME);
MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(USB8997_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h
index 57e1a5736318..f0051f8c8981 100644
--- a/drivers/net/wireless/mwifiex/usb.h
+++ b/drivers/net/wireless/mwifiex/usb.h
@@ -32,6 +32,8 @@
#define USB8897_PID_2 0x2046
#define USB8801_PID_1 0x2049
#define USB8801_PID_2 0x204a
+#define USB8997_PID_1 0x204d
+#define USB8997_PID_2 0x204e
#define USB8XXX_FW_DNLD 1
@@ -46,6 +48,7 @@
#define USB8797_DEFAULT_FW_NAME "mrvl/usb8797_uapsta.bin"
#define USB8801_DEFAULT_FW_NAME "mrvl/usb8801_uapsta.bin"
#define USB8897_DEFAULT_FW_NAME "mrvl/usb8897_uapsta.bin"
+#define USB8997_DEFAULT_FW_NAME "mrvl/usb8997_uapsta.bin"
#define FW_DNLD_TX_BUF_SIZE 620
#define FW_DNLD_RX_BUF_SIZE 2048
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 790e61953abf..0cec8a64473e 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -126,6 +126,10 @@ static int num_of_items = ARRAY_SIZE(items);
int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter)
{
+ if (adapter->hw_status == MWIFIEX_HW_STATUS_READY)
+ if (adapter->if_ops.init_fw_port)
+ adapter->if_ops.init_fw_port(adapter);
+
adapter->init_wait_q_woken = true;
wake_up_interruptible(&adapter->init_wait_q);
return 0;
@@ -496,16 +500,12 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node)
{
- mwifiex_dbg(adapter, CMD,
- "cmd completed: status=%d\n",
+ WARN_ON(!cmd_node->wait_q_enabled);
+ mwifiex_dbg(adapter, CMD, "cmd completed: status=%d\n",
adapter->cmd_wait_q.status);
- *(cmd_node->condition) = true;
-
- if (adapter->cmd_wait_q.status == -ETIMEDOUT)
- mwifiex_dbg(adapter, ERROR, "cmd timeout\n");
- else
- wake_up_interruptible(&adapter->cmd_wait_q.wait);
+ *cmd_node->condition = true;
+ wake_up_interruptible(&adapter->cmd_wait_q.wait);
return 0;
}
@@ -531,6 +531,65 @@ mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac)
return NULL;
}
+static struct mwifiex_sta_node *
+mwifiex_get_tdls_sta_entry(struct mwifiex_private *priv, u8 status)
+{
+ struct mwifiex_sta_node *node;
+
+ list_for_each_entry(node, &priv->sta_list, list) {
+ if (node->tdls_status == status)
+ return node;
+ }
+
+ return NULL;
+}
+
+/* If tdls channel switching is on-going, tx data traffic should be
+ * blocked until the switching stage completed.
+ */
+u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv)
+{
+ struct mwifiex_sta_node *sta_ptr;
+
+ if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+ return false;
+
+ sta_ptr = mwifiex_get_tdls_sta_entry(priv, TDLS_CHAN_SWITCHING);
+ if (sta_ptr)
+ return true;
+
+ return false;
+}
+
+u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv)
+{
+ struct mwifiex_sta_node *sta_ptr;
+
+ if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+ return false;
+
+ sta_ptr = mwifiex_get_tdls_sta_entry(priv, TDLS_IN_OFF_CHAN);
+ if (sta_ptr)
+ return true;
+
+ return false;
+}
+
+/* If tdls channel switching is on-going or tdls operate on off-channel,
+ * cmd path should be blocked until tdls switched to base-channel.
+ */
+u8 mwifiex_is_send_cmd_allowed(struct mwifiex_private *priv)
+{
+ if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+ return true;
+
+ if (mwifiex_is_tdls_chan_switching(priv) ||
+ mwifiex_is_tdls_off_chan(priv))
+ return false;
+
+ return true;
+}
+
/* This function will add a sta_node entry to associated station list
* table with the given mac address.
* If entry exist already, existing entry is returned.
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index a8ea21c3340c..173d3663c2e0 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -160,9 +160,10 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
ra_list->tdls_link = false;
ra_list->ba_status = BA_SETUP_NONE;
ra_list->amsdu_in_ampdu = false;
+ ra_list->tx_paused = false;
if (!mwifiex_queuing_ra_based(priv)) {
- if (mwifiex_get_tdls_link_status(priv, ra) ==
- TDLS_SETUP_COMPLETE) {
+ if (mwifiex_is_tdls_link_setup
+ (mwifiex_get_tdls_link_status(priv, ra))) {
ra_list->tdls_link = true;
ra_list->is_11n_enabled =
mwifiex_tdls_peer_11n_enabled(priv, ra);
@@ -448,6 +449,11 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
}
}
+int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter)
+{
+ return atomic_read(&adapter->bypass_tx_pending) ? false : true;
+}
+
/*
* This function checks if WMM Tx queue is empty.
*/
@@ -459,6 +465,8 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
for (i = 0; i < adapter->priv_num; ++i) {
priv = adapter->priv[i];
+ if (priv && !priv->port_open)
+ continue;
if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
return false;
}
@@ -580,6 +588,10 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+ skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
+ mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+ atomic_set(&priv->adapter->bypass_tx_pending, 0);
+
idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL);
idr_destroy(&priv->ack_status_frames);
}
@@ -603,6 +615,88 @@ mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
return NULL;
}
+void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac,
+ u8 tx_pause)
+{
+ struct mwifiex_ra_list_tbl *ra_list;
+ u32 pkt_cnt = 0, tx_pkts_queued;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+ for (i = 0; i < MAX_NUM_TID; ++i) {
+ ra_list = mwifiex_wmm_get_ralist_node(priv, i, mac);
+ if (ra_list && ra_list->tx_paused != tx_pause) {
+ pkt_cnt += ra_list->total_pkt_count;
+ ra_list->tx_paused = tx_pause;
+ if (tx_pause)
+ priv->wmm.pkts_paused[i] +=
+ ra_list->total_pkt_count;
+ else
+ priv->wmm.pkts_paused[i] -=
+ ra_list->total_pkt_count;
+ }
+ }
+
+ if (pkt_cnt) {
+ tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
+ if (tx_pause)
+ tx_pkts_queued -= pkt_cnt;
+ else
+ tx_pkts_queued += pkt_cnt;
+
+ atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
+ atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
+ }
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+}
+
+/* This function update non-tdls peer ralist tx_pause while
+ * tdls channel swithing
+ */
+void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
+ u8 *mac, u8 tx_pause)
+{
+ struct mwifiex_ra_list_tbl *ra_list;
+ u32 pkt_cnt = 0, tx_pkts_queued;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+ for (i = 0; i < MAX_NUM_TID; ++i) {
+ list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[i].ra_list,
+ list) {
+ if (!memcmp(ra_list->ra, mac, ETH_ALEN))
+ continue;
+
+ if (ra_list && ra_list->tx_paused != tx_pause) {
+ pkt_cnt += ra_list->total_pkt_count;
+ ra_list->tx_paused = tx_pause;
+ if (tx_pause)
+ priv->wmm.pkts_paused[i] +=
+ ra_list->total_pkt_count;
+ else
+ priv->wmm.pkts_paused[i] -=
+ ra_list->total_pkt_count;
+ }
+ }
+ }
+
+ if (pkt_cnt) {
+ tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
+ if (tx_pause)
+ tx_pkts_queued -= pkt_cnt;
+ else
+ tx_pkts_queued += pkt_cnt;
+
+ atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
+ atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
+ }
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+}
+
/*
* This function retrieves an RA list node for a given TID and
* RA address pair.
@@ -670,6 +764,18 @@ mwifiex_is_ralist_valid(struct mwifiex_private *priv,
}
/*
+ * This function adds a packet to bypass TX queue.
+ * This is special TX queue for packets which can be sent even when port_open
+ * is false.
+ */
+void
+mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ skb_queue_tail(&priv->bypass_txq, skb);
+}
+
+/*
* This function adds a packet to WMM queue.
*
* In disconnected state the packet is immediately dropped and the
@@ -723,6 +829,9 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
!mwifiex_is_skb_mgmt_frame(skb)) {
switch (tdls_status) {
case TDLS_SETUP_COMPLETE:
+ case TDLS_CHAN_SWITCHING:
+ case TDLS_IN_BASE_CHAN:
+ case TDLS_IN_OFF_CHAN:
ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
ra);
tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
@@ -765,7 +874,10 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
atomic_set(&priv->wmm.highest_queued_prio,
priv->tos_to_tid_inv[tid_down]);
- atomic_inc(&priv->wmm.tx_pkts_queued);
+ if (ra_list->tx_paused)
+ priv->wmm.pkts_paused[tid_down]++;
+ else
+ atomic_inc(&priv->wmm.tx_pkts_queued);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
}
@@ -970,7 +1082,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
- if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0)
+ if (!priv_tmp->port_open ||
+ (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
continue;
/* iterate over the WMM queues of the BSS */
@@ -987,7 +1100,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
list_for_each_entry(ptr, &tid_ptr->ra_list,
list) {
- if (!skb_queue_empty(&ptr->skb_head))
+ if (!ptr->tx_paused &&
+ !skb_queue_empty(&ptr->skb_head))
/* holds both locks */
goto found;
}
@@ -1339,6 +1453,38 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
return 0;
}
+void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter)
+{
+ struct mwifiex_tx_param tx_param;
+ struct sk_buff *skb;
+ struct mwifiex_txinfo *tx_info;
+ struct mwifiex_private *priv;
+ int i;
+
+ if (adapter->data_sent || adapter->tx_lock_flag)
+ return;
+
+ for (i = 0; i < adapter->priv_num; ++i) {
+ priv = adapter->priv[i];
+
+ if (skb_queue_empty(&priv->bypass_txq))
+ continue;
+
+ skb = skb_dequeue(&priv->bypass_txq);
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+
+ /* no aggregation for bypass packets */
+ tx_param.next_pkt_len = 0;
+
+ if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
+ skb_queue_head(&priv->bypass_txq, skb);
+ tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
+ } else {
+ atomic_dec(&adapter->bypass_tx_pending);
+ }
+ }
+}
+
/*
* This function transmits the highest priority packet awaiting in the
* WMM Queues.
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index 48ece0b35591..38f09762bd2f 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -99,12 +99,16 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead)
void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
struct sk_buff *skb);
+void mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv,
+ struct sk_buff *skb);
void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra);
void mwifiex_rotate_priolists(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ra, int tid);
int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter);
+int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter);
void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter);
+void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter);
int mwifiex_is_ralist_valid(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ra_list, int tid);
@@ -126,6 +130,10 @@ struct mwifiex_ra_list_tbl *
mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
const u8 *ra_addr);
u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
+void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac,
+ u8 tx_pause);
+void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
+ u8 *mac, u8 tx_pause);
struct mwifiex_ra_list_tbl *mwifiex_wmm_get_ralist_node(struct mwifiex_private
*priv, u8 tid, const u8 *ra_addr);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 77361af68b18..9420fc61c2e6 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -5019,35 +5019,36 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
rcu_read_unlock();
- }
- if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc &&
- !priv->ap_fw) {
- rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates);
- if (rc)
- goto out;
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (!priv->ap_fw) {
+ rc = mwl8k_cmd_set_rate(hw, vif,
+ ap_legacy_rates,
+ ap_mcs_rates);
+ if (rc)
+ goto out;
- rc = mwl8k_cmd_use_fixed_rate_sta(hw);
- if (rc)
- goto out;
- } else {
- if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc &&
- priv->ap_fw) {
- int idx;
- int rate;
+ rc = mwl8k_cmd_use_fixed_rate_sta(hw);
+ if (rc)
+ goto out;
+ } else {
+ int idx;
+ int rate;
- /* Use AP firmware specific rate command.
- */
- idx = ffs(vif->bss_conf.basic_rates);
- if (idx)
- idx--;
+ /* Use AP firmware specific rate command.
+ */
+ idx = ffs(vif->bss_conf.basic_rates);
+ if (idx)
+ idx--;
- if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
- rate = mwl8k_rates_24[idx].hw_value;
- else
- rate = mwl8k_rates_50[idx].hw_value;
+ if (hw->conf.chandef.chan->band ==
+ IEEE80211_BAND_2GHZ)
+ rate = mwl8k_rates_24[idx].hw_value;
+ else
+ rate = mwl8k_rates_50[idx].hw_value;
- mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
+ mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
+ }
}
}
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index c410180479e6..7b5c554323c7 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -2321,8 +2321,6 @@ void free_orinocodev(struct orinoco_private *priv)
struct orinoco_rx_data *rx_data, *temp;
struct orinoco_scan_data *sd, *sdtemp;
- wiphy_unregister(wiphy);
-
/* If the tasklet is scheduled when we call tasklet_kill it
* will run one final time. However the tasklet will only
* drain priv->rx_list if the hw is still available. */
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index c0a27377d9e2..a956f965a1e5 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -118,6 +118,7 @@ static void orinoco_cs_detach(struct pcmcia_device *link)
orinoco_cs_release(link);
+ wiphy_unregister(priv_to_wiphy(priv));
free_orinocodev(priv);
} /* orinoco_cs_detach */
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index 1b543e30eff7..048693b6c6c2 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -223,13 +223,15 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev,
err = orinoco_if_add(priv, 0, 0, NULL);
if (err) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
- goto fail;
+ goto fail_wiphy;
}
pci_set_drvdata(pdev, priv);
return 0;
+ fail_wiphy:
+ wiphy_unregister(priv_to_wiphy(priv));
fail:
free_irq(pdev->irq, priv);
@@ -263,6 +265,7 @@ static void orinoco_nortel_remove_one(struct pci_dev *pdev)
iowrite16(0, card->bridge_io + 10);
orinoco_if_del(priv);
+ wiphy_unregister(priv_to_wiphy(priv));
free_irq(pdev->irq, priv);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index 74219d59d7e1..4938a2208a37 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -173,13 +173,15 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
err = orinoco_if_add(priv, 0, 0, NULL);
if (err) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
- goto fail;
+ goto fail_wiphy;
}
pci_set_drvdata(pdev, priv);
return 0;
+ fail_wiphy:
+ wiphy_unregister(priv_to_wiphy(priv));
fail:
free_irq(pdev->irq, priv);
@@ -203,6 +205,7 @@ static void orinoco_pci_remove_one(struct pci_dev *pdev)
struct orinoco_private *priv = pci_get_drvdata(pdev);
orinoco_if_del(priv);
+ wiphy_unregister(priv_to_wiphy(priv));
free_irq(pdev->irq, priv);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index 8b045236b6e0..221352027779 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -262,13 +262,15 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
err = orinoco_if_add(priv, 0, 0, NULL);
if (err) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
- goto fail;
+ goto fail_wiphy;
}
pci_set_drvdata(pdev, priv);
return 0;
+ fail_wiphy:
+ wiphy_unregister(priv_to_wiphy(priv));
fail:
free_irq(pdev->irq, priv);
@@ -299,6 +301,7 @@ static void orinoco_plx_remove_one(struct pci_dev *pdev)
struct orinoco_pci_card *card = priv->card;
orinoco_if_del(priv);
+ wiphy_unregister(priv_to_wiphy(priv));
free_irq(pdev->irq, priv);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 91f05442de28..26a57d773d30 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -1502,6 +1502,7 @@ static inline void ezusb_delete(struct ezusb_priv *upriv)
if (upriv->dev) {
struct orinoco_private *priv = ndev_priv(upriv->dev);
orinoco_if_del(priv);
+ wiphy_unregister(priv_to_wiphy(upriv));
free_orinocodev(priv);
}
}
@@ -1695,6 +1696,7 @@ static int ezusb_probe(struct usb_interface *interface,
if (orinoco_if_add(priv, 0, 0, &ezusb_netdev_ops) != 0) {
upriv->dev = NULL;
err("%s: orinoco_if_add() failed", __func__);
+ wiphy_unregister(priv_to_wiphy(priv));
goto error;
}
upriv->dev = priv->ndev;
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index b6cc9ff47fc2..40d72312f3df 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -172,6 +172,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
(struct rsi_91x_sdiodev *)adapter->rsi_dev;
u32 len;
u32 num_blocks;
+ const u8 *fw;
const struct firmware *fw_entry = NULL;
u32 block_size = dev->tx_blk_size;
int status = 0;
@@ -200,6 +201,12 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
return status;
}
+ /* Copy firmware into DMA-accessible memory */
+ fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+ if (!fw) {
+ status = -ENOMEM;
+ goto out;
+ }
len = fw_entry->size;
if (len % 4)
@@ -210,7 +217,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
- status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks);
+ status = rsi_copy_to_card(common, fw, len, num_blocks);
+ kfree(fw);
+
+out:
release_firmware(fw_entry);
return status;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
index 1106ce76707e..de4900862836 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
@@ -146,7 +146,12 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
return status;
}
+ /* Copy firmware into DMA-accessible memory */
fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+ if (!fw) {
+ status = -ENOMEM;
+ goto out;
+ }
len = fw_entry->size;
if (len % 4)
@@ -158,6 +163,9 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
status = rsi_copy_to_card(common, fw, len, num_blocks);
+ kfree(fw);
+
+out:
release_firmware(fw_entry);
return status;
}
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 2b4ef256c6b9..de62f5dcb62f 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -240,7 +240,6 @@ config RT2X00_LIB_USB
config RT2X00_LIB
tristate
- select AVERAGE
config RT2X00_LIB_FIRMWARE
bool
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
index afba0739c3b8..78cc035b2d17 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/rt2x00/rt2500usb.h
@@ -54,7 +54,7 @@
#define CSR_REG_BASE 0x0400
#define CSR_REG_SIZE 0x0100
#define EEPROM_BASE 0x0000
-#define EEPROM_SIZE 0x006a
+#define EEPROM_SIZE 0x006e
#define BBP_BASE 0x0000
#define BBP_SIZE 0x0060
#define RF_BASE 0x0004
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 9bb398bed9bb..3282ddb766f4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -254,6 +254,8 @@ struct link_qual {
int tx_failed;
};
+DECLARE_EWMA(rssi, 1024, 8)
+
/*
* Antenna settings about the currently active link.
*/
@@ -285,7 +287,7 @@ struct link_ant {
* Similar to the avg_rssi in the link_qual structure
* this value is updated by using the walking average.
*/
- struct ewma rssi_ant;
+ struct ewma_rssi rssi_ant;
};
/*
@@ -314,7 +316,7 @@ struct link {
/*
* Currently active average RSSI value
*/
- struct ewma avg_rssi;
+ struct ewma_rssi avg_rssi;
/*
* Work structure for scheduling periodic link tuning.
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index 9b941c0c1264..017188e5a736 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -33,15 +33,11 @@
*/
#define DEFAULT_RSSI -128
-/* Constants for EWMA calculations. */
-#define RT2X00_EWMA_FACTOR 1024
-#define RT2X00_EWMA_WEIGHT 8
-
-static inline int rt2x00link_get_avg_rssi(struct ewma *ewma)
+static inline int rt2x00link_get_avg_rssi(struct ewma_rssi *ewma)
{
unsigned long avg;
- avg = ewma_read(ewma);
+ avg = ewma_rssi_read(ewma);
if (avg)
return -avg;
@@ -76,8 +72,7 @@ static void rt2x00link_antenna_update_rssi_history(struct rt2x00_dev *rt2x00dev,
static void rt2x00link_antenna_reset(struct rt2x00_dev *rt2x00dev)
{
- ewma_init(&rt2x00dev->link.ant.rssi_ant, RT2X00_EWMA_FACTOR,
- RT2X00_EWMA_WEIGHT);
+ ewma_rssi_init(&rt2x00dev->link.ant.rssi_ant);
}
static void rt2x00lib_antenna_diversity_sample(struct rt2x00_dev *rt2x00dev)
@@ -225,12 +220,12 @@ void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev,
/*
* Update global RSSI
*/
- ewma_add(&link->avg_rssi, -rxdesc->rssi);
+ ewma_rssi_add(&link->avg_rssi, -rxdesc->rssi);
/*
* Update antenna RSSI
*/
- ewma_add(&ant->rssi_ant, -rxdesc->rssi);
+ ewma_rssi_add(&ant->rssi_ant, -rxdesc->rssi);
}
void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
@@ -285,8 +280,7 @@ void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna)
*/
rt2x00dev->link.count = 0;
memset(qual, 0, sizeof(*qual));
- ewma_init(&rt2x00dev->link.avg_rssi, RT2X00_EWMA_FACTOR,
- RT2X00_EWMA_WEIGHT);
+ ewma_rssi_init(&rt2x00dev->link.avg_rssi);
/*
* Restore the VGC level as stored in the registers,
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 3b3a88b53b11..585d0883c7e5 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1015,9 +1015,12 @@ static void send_beacon_frame(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+ struct rtl_tcb_desc tcb_desc;
- if (skb)
- rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL);
+ if (skb) {
+ memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+ rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
+ }
}
static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
index c8058aa73ecf..629125658b87 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
@@ -200,7 +200,7 @@ int rtl88e_download_fw(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl92c_firmware_header *pfwheader;
+ struct rtlwifi_firmware_header *pfwheader;
u8 *pfwdata;
u32 fwsize;
int err;
@@ -209,7 +209,7 @@ int rtl88e_download_fw(struct ieee80211_hw *hw,
if (!rtlhal->pfirmware)
return 1;
- pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
+ pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
pfwdata = rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
@@ -219,10 +219,10 @@ int rtl88e_download_fw(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
"Firmware Version(%d), Signature(%#x), Size(%d)\n",
pfwheader->version, pfwheader->signature,
- (int)sizeof(struct rtl92c_firmware_header));
+ (int)sizeof(struct rtlwifi_firmware_header));
- pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
- fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+ pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+ fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
}
if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h
index 05e944e451f4..21bd4a5337ab 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h
@@ -37,7 +37,7 @@
#define FW_8192C_POLLING_TIMEOUT_COUNT 3000
#define IS_FW_HEADER_EXIST(_pfwhdr) \
- ((_pfwhdr->signature&0xFFFF) == 0x88E1)
+ ((le16_to_cpu(_pfwhdr->signature) & 0xFFFF) == 0x88E1)
#define USE_OLD_WOWLAN_DEBUG_FW 0
#define H2C_88E_RSVDPAGE_LOC_LEN 5
@@ -131,25 +131,6 @@
#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
#define FW_PWR_STATE_RF_OFF 0
-struct rtl92c_firmware_header {
- u16 signature;
- u8 category;
- u8 function;
- u16 version;
- u8 subversion;
- u8 rsvd1;
- u8 month;
- u8 date;
- u8 hour;
- u8 minute;
- u16 ramcodesize;
- u16 rsvd2;
- u32 svnindex;
- u32 rsvd3;
- u32 rsvd4;
- u32 rsvd5;
-};
-
enum rtl8188e_h2c_cmd {
H2C_88E_RSVDPAGE = 0,
H2C_88E_JOINBSSRPT = 1,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index 0aca6f47487c..03cbe4cf110b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -39,6 +39,7 @@
#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1)
#define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1)
#define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1)
+#define BT_MASK 0x00ffffff
#define RTLPRIV (struct rtl_priv *)
#define GET_UNDECORATED_AVERAGE_RSSI(_priv) \
@@ -312,7 +313,7 @@ static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
struct dig_t *digtable = &rtlpriv->dm_digtable;
u32 isbt;
- /* modify DIG lower bound, deal with abnorally large false alarm */
+ /* modify DIG lower bound, deal with abnormally large false alarm */
if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
digtable->large_fa_hit++;
if (digtable->forbidden_igi < digtable->cur_igvalue) {
@@ -1536,13 +1537,11 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
return false;
bt_state = rtl_read_byte(rtlpriv, 0x4fd);
- bt_tx = rtl_read_dword(rtlpriv, 0x488);
- bt_tx = bt_tx & 0x00ffffff;
- bt_pri = rtl_read_dword(rtlpriv, 0x48c);
- bt_pri = bt_pri & 0x00ffffff;
+ bt_tx = rtl_read_dword(rtlpriv, 0x488) & BT_MASK;
+ bt_pri = rtl_read_dword(rtlpriv, 0x48c) & BT_MASK;
polling = rtl_read_dword(rtlpriv, 0x490);
- if (bt_tx == 0xffffffff && bt_pri == 0xffffffff &&
+ if (bt_tx == BT_MASK && bt_pri == BT_MASK &&
polling == 0xffffffff && bt_state == 0xff)
return false;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 14b819ea8b71..43fcb25c885f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -221,7 +221,7 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl92c_firmware_header *pfwheader;
+ struct rtlwifi_firmware_header *pfwheader;
u8 *pfwdata;
u32 fwsize;
int err;
@@ -230,19 +230,19 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
if (!rtlhal->pfirmware)
return 1;
- pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
+ pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
pfwdata = (u8 *)rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
if (IS_FW_HEADER_EXIST(pfwheader)) {
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
"Firmware Version(%d), Signature(%#x),Size(%d)\n",
pfwheader->version, pfwheader->signature,
- (int)sizeof(struct rtl92c_firmware_header));
+ (int)sizeof(struct rtlwifi_firmware_header));
- rtlhal->fw_version = pfwheader->version;
+ rtlhal->fw_version = le16_to_cpu(pfwheader->version);
rtlhal->fw_subversion = pfwheader->subversion;
- pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
- fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+ pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+ fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
}
_rtl92c_enable_fw_download(hw, true);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index e9f4281f5067..864806c19ca7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
@@ -69,25 +69,6 @@
((GET_CVID_CUT_VERSION(version) == \
CHIP_VENDOR_UMC_B_CUT) ? true : false) : false)
-struct rtl92c_firmware_header {
- __le16 signature;
- u8 category;
- u8 function;
- __le16 version;
- u8 subversion;
- u8 rsvd1;
- u8 month;
- u8 date;
- u8 hour;
- u8 minute;
- __le16 ramcodeSize;
- __le16 rsvd2;
- __le32 svnindex;
- __le32 rsvd3;
- __le32 rsvd4;
- __le32 rsvd5;
-};
-
#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
index c940a87175ca..74a479ac323d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
@@ -32,24 +32,15 @@
/*-------------------------------------------------------------------------
* Chip specific
*-------------------------------------------------------------------------*/
-#define CHIP_8723 BIT(2) /* RTL8723 With BT feature */
-#define CHIP_8723_DRV_REV BIT(3) /* RTL8723 Driver Revised */
#define NORMAL_CHIP BIT(4)
#define CHIP_VENDOR_UMC BIT(5)
#define CHIP_VENDOR_UMC_B_CUT BIT(6)
-#define IS_8723_SERIES(version) \
- (((version) & CHIP_8723) ? true : false)
-
#define IS_92C_1T2R(version) \
(((version) & CHIP_92C) && ((version) & CHIP_92C_1T2R))
#define IS_VENDOR_UMC(version) \
(((version) & CHIP_VENDOR_UMC) ? true : false)
-#define IS_VENDOR_8723_A_CUT(version) \
- (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6))) ? \
- false : true) : false)
-
#define CHIP_BONDING_92C_1T2R 0x1
#define CHIP_BONDING_IDENTIFIER(_value) (((_value) >> 22) & 0x3)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 767358a553fb..25db369b5d18 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -818,26 +818,29 @@ static void _rtl92cu_init_usb_aggregation(struct ieee80211_hw *hw)
static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw)
{
- u16 value16;
-
+ u16 value16;
+ u32 value32;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- mac->rx_conf = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APPFCS |
- RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
- RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32);
- rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf);
+ value32 = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APPFCS |
+ RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
+ RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&value32));
/* Accept all multicast address */
rtl_write_dword(rtlpriv, REG_MAR, 0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_MAR + 4, 0xFFFFFFFF);
/* Accept all management frames */
value16 = 0xFFFF;
- rtl92c_set_mgt_filter(hw, value16);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_MGT_FILTER,
+ (u8 *)(&value16));
/* Reject all control frame - default value is 0 */
- rtl92c_set_ctrl_filter(hw, 0x0);
+ value16 = 0x0;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CTRL_FILTER,
+ (u8 *)(&value16));
/* Accept all data frames */
value16 = 0xFFFF;
- rtl92c_set_data_filter(hw, value16);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DATA_FILTER,
+ (u8 *)(&value16));
}
static void _rtl92cu_init_beacon_parameters(struct ieee80211_hw *hw)
@@ -988,17 +991,6 @@ static void _InitPABias(struct ieee80211_hw *hw)
}
}
-static void _update_mac_setting(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
- mac->rx_conf = rtl_read_dword(rtlpriv, REG_RCR);
- mac->rx_mgt_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
- mac->rx_ctrl_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
- mac->rx_data_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
-}
-
int rtl92cu_hw_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1068,7 +1060,6 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
}
_rtl92cu_hw_configure(hw);
_InitPABias(hw);
- _update_mac_setting(hw);
rtl92c_dm_init(hw);
exit:
local_irq_restore(flags);
@@ -1620,7 +1611,6 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
enum wireless_mode wirelessmode = mac->mode;
u8 idx = 0;
@@ -1829,63 +1819,10 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u4b_ac_param);
break;
default:
- RT_ASSERT(false,
- "SetHwReg8185(): invalid aci: %d !\n",
+ RT_ASSERT(false, "invalid aci: %d !\n",
e_aci);
break;
}
- if (rtlusb->acm_method != EACMWAY2_SW)
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_ACM_CTRL, &e_aci);
- break;
- }
- case HW_VAR_ACM_CTRL:{
- u8 e_aci = *val;
- union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)
- (&(mac->ac[0].aifs));
- u8 acm = p_aci_aifsn->f.acm;
- u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
-
- acm_ctrl =
- acm_ctrl | ((rtlusb->acm_method == 2) ? 0x0 : 0x1);
- if (acm) {
- switch (e_aci) {
- case AC0_BE:
- acm_ctrl |= AcmHw_BeqEn;
- break;
- case AC2_VI:
- acm_ctrl |= AcmHw_ViqEn;
- break;
- case AC3_VO:
- acm_ctrl |= AcmHw_VoqEn;
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
- acm);
- break;
- }
- } else {
- switch (e_aci) {
- case AC0_BE:
- acm_ctrl &= (~AcmHw_BeqEn);
- break;
- case AC2_VI:
- acm_ctrl &= (~AcmHw_ViqEn);
- break;
- case AC3_VO:
- acm_ctrl &= (~AcmHw_VoqEn);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
- break;
- }
- }
- RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
- acm_ctrl);
- rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
break;
}
case HW_VAR_RCR:{
@@ -1999,12 +1936,15 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
case HW_VAR_MGT_FILTER:
rtl_write_word(rtlpriv, REG_RXFLTMAP0, *(u16 *)val);
+ mac->rx_mgt_filter = *(u16 *)val;
break;
case HW_VAR_CTRL_FILTER:
rtl_write_word(rtlpriv, REG_RXFLTMAP1, *(u16 *)val);
+ mac->rx_ctrl_filter = *(u16 *)val;
break;
case HW_VAR_DATA_FILTER:
rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val);
+ mac->rx_data_filter = *(u16 *)val;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
@@ -2280,7 +2220,6 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
u8 u1tmp = 0;
bool actuallyset = false;
@@ -2357,20 +2296,7 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
if (ppsc->pwrdown_mode && e_rfpowerstate_toset == ERFOFF) {
/* Enable register area 0x0-0xc. */
rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0);
- if (IS_HARDWARE_TYPE_8723U(rtlhal)) {
- /*
- * We should configure HW PDn source for WiFi
- * ONLY, and then our HW will be set in
- * power-down mode if PDn source from all
- * functions are configured.
- */
- u1tmp = rtl_read_byte(rtlpriv,
- REG_MULTI_FUNC_CTRL);
- rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL,
- (u1tmp|WL_HWPDN_EN));
- } else {
- rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812);
- }
+ rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812);
}
if (e_rfpowerstate_toset == ERFOFF) {
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 490a7cf7c702..035713311a4a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -69,8 +69,6 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
chip_version = NORMAL_CHIP;
chip_version |= ((value32 & TYPE_ID) ? CHIP_92C : 0);
chip_version |= ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0);
- /* RTL8723 with BT function. */
- chip_version |= ((value32 & BT_FUNC) ? CHIP_8723 : 0);
if (IS_VENDOR_UMC(chip_version))
chip_version |= ((value32 & CHIP_VER_RTL_MASK) ?
CHIP_VENDOR_UMC_B_CUT : 0);
@@ -78,10 +76,6 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM);
chip_version |= ((CHIP_BONDING_IDENTIFIER(value32) ==
CHIP_BONDING_92C_1T2R) ? CHIP_92C_1T2R : 0);
- } else if (IS_8723_SERIES(chip_version)) {
- value32 = rtl_read_dword(rtlpriv, REG_GPIO_OUTSTS);
- chip_version |= ((value32 & RF_RL_ID) ?
- CHIP_8723_DRV_REV : 0);
}
}
rtlhal->version = (enum version_8192c)chip_version;
@@ -114,12 +108,6 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
case VERSION_NORMAL_UMC_CHIP_88C_B_CUT:
versionid = "NORMAL_UMC_CHIP_88C_B_CUT";
break;
- case VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT:
- versionid = "NORMAL_UMC_CHIP_8723_1T1R_A_CUT";
- break;
- case VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT:
- versionid = "NORMAL_UMC_CHIP_8723_1T1R_B_CUT";
- break;
case VERSION_TEST_CHIP_92C:
versionid = "TEST_CHIP_92C";
break;
@@ -405,59 +393,9 @@ void rtl92c_disable_interrupt(struct ieee80211_hw *hw)
void rtl92c_set_qos(struct ieee80211_hw *hw, int aci)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- u32 u4b_ac_param;
rtl92c_dm_init_edca_turbo(hw);
- u4b_ac_param = (u32) mac->ac[aci].aifs;
- u4b_ac_param |=
- ((u32) le16_to_cpu(mac->ac[aci].cw_min) & 0xF) <<
- AC_PARAM_ECW_MIN_OFFSET;
- u4b_ac_param |=
- ((u32) le16_to_cpu(mac->ac[aci].cw_max) & 0xF) <<
- AC_PARAM_ECW_MAX_OFFSET;
- u4b_ac_param |= (u32) le16_to_cpu(mac->ac[aci].tx_op) <<
- AC_PARAM_TXOP_OFFSET;
- RT_TRACE(rtlpriv, COMP_QOS, DBG_LOUD, "queue:%x, ac_param:%x\n",
- aci, u4b_ac_param);
- switch (aci) {
- case AC1_BK:
- rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
- break;
- case AC0_BE:
- rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param);
- break;
- case AC2_VI:
- rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, u4b_ac_param);
- break;
- case AC3_VO:
- rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, u4b_ac_param);
- break;
- default:
- RT_ASSERT(false, "invalid aci: %d !\n", aci);
- break;
- }
-}
-
-/*-------------------------------------------------------------------------
- * HW MAC Address
- *-------------------------------------------------------------------------*/
-void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr)
-{
- u32 i;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- for (i = 0 ; i < ETH_ALEN ; i++)
- rtl_write_byte(rtlpriv, (REG_MACID + i), *(addr+i));
-
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
- "MAC Address: %02X-%02X-%02X-%02X-%02X-%02X\n",
- rtl_read_byte(rtlpriv, REG_MACID),
- rtl_read_byte(rtlpriv, REG_MACID+1),
- rtl_read_byte(rtlpriv, REG_MACID+2),
- rtl_read_byte(rtlpriv, REG_MACID+3),
- rtl_read_byte(rtlpriv, REG_MACID+4),
- rtl_read_byte(rtlpriv, REG_MACID+5));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, (u8 *)&aci);
}
void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size)
@@ -656,47 +594,6 @@ void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T)
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, value);
}
-u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- return rtl_read_word(rtlpriv, REG_RXFLTMAP0);
-}
-
-void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtl_write_word(rtlpriv, REG_RXFLTMAP0, filter);
-}
-
-u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- return rtl_read_word(rtlpriv, REG_RXFLTMAP1);
-}
-
-void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtl_write_word(rtlpriv, REG_RXFLTMAP1, filter);
-}
-
-u16 rtl92c_get_data_filter(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- return rtl_read_word(rtlpriv, REG_RXFLTMAP2);
-}
-
-void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtl_write_word(rtlpriv, REG_RXFLTMAP2, filter);
-}
/*==============================================================*/
static u8 _rtl92c_query_rxpwrpercentage(char antpower)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
index e34f0f14ccd7..553a4bfac668 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
@@ -48,7 +48,6 @@ void rtl92c_set_qos(struct ieee80211_hw *hw, int aci);
/*---------------------------------------------------------------
* Hardware init functions
*---------------------------------------------------------------*/
-void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr);
void rtl92c_init_interrupt(struct ieee80211_hw *hw);
void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size);
@@ -73,15 +72,6 @@ void rtl92c_init_retry_function(struct ieee80211_hw *hw);
void rtl92c_disable_fast_edca(struct ieee80211_hw *hw);
void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T);
-/* For filter */
-u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw);
-void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter);
-u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw);
-void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter);
-u16 rtl92c_get_data_filter(struct ieee80211_hw *hw);
-void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter);
-
-
u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw);
struct rx_fwinfo_92c {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 23806c243a53..fd4a5353d216 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+ {RTL_USB_DEVICE(0x0846, 0x9043, rtl92cu_hal_cfg)}, /*NG WNA1000Mv2*/
{RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index 587b8c505a76..7c1db7e7572d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -420,7 +420,7 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
"dm_DIG() Before: Recover_cnt=%d, rx_gain_min=%x\n",
de_digtable->recover_cnt, de_digtable->rx_gain_min);
- /* deal with abnorally large false alarm */
+ /* deal with abnormally large false alarm */
if (falsealm_cnt->cnt_all > 10000) {
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"dm_DIG(): Abnormally false alarm case\n");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
index 1646e7c3d0f8..8a38daa316cb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
@@ -110,28 +110,6 @@
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 2, 0, 8, __val)
-struct rtl92d_firmware_header {
- u16 signature;
- u8 category;
- u8 function;
- u16 version;
- u8 subversion;
- u8 rsvd1;
-
- u8 month;
- u8 date;
- u8 hour;
- u8 minute;
- u16 ramcodeSize;
- u16 rsvd2;
-
- u32 svnindex;
- u32 rsvd3;
-
- u32 rsvd4;
- u32 rsvd5;
-};
-
int rtl92d_download_fw(struct ieee80211_hw *hw);
void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
u32 cmd_len, u8 *p_cmdbuffer);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 1961b8e28dc1..bb06fe836fe7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3515,14 +3515,14 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
rfpath++) {
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- /* MOD_AG for RF paht_A 0x18 BIT8,BIT16 */
+ /* MOD_AG for RF path_A 0x18 BIT8,BIT16 */
rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) | BIT(16) |
BIT(18), 0);
/* RF0x0b[16:14] =3b'111 */
rtl_set_rfreg(hw, (enum radio_path)rfpath, 0x0B,
0x1c000, 0x07);
} else {
- /* MOD_AG for RF paht_A 0x18 BIT8,BIT16 */
+ /* MOD_AG for RF path_A 0x18 BIT8,BIT16 */
rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) |
BIT(16) | BIT(18),
(BIT(16) | BIT(8)) >> 8);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
index 232865cc3ffd..0708eedd9671 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
@@ -198,7 +198,7 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl92c_firmware_header *pfwheader;
+ struct rtlwifi_firmware_header *pfwheader;
u8 *pfwdata;
u32 fwsize;
int err;
@@ -207,8 +207,8 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
if (!rtlhal->pfirmware)
return 1;
- pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
- rtlhal->fw_version = pfwheader->version;
+ pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
+ rtlhal->fw_version = le16_to_cpu(pfwheader->version);
rtlhal->fw_subversion = pfwheader->subversion;
pfwdata = (u8 *)rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
@@ -219,10 +219,10 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
"Firmware Version(%d), Signature(%#x),Size(%d)\n",
pfwheader->version, pfwheader->signature,
- (int)sizeof(struct rtl92c_firmware_header));
+ (int)sizeof(struct rtlwifi_firmware_header));
- pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
- fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+ pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+ fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
} else {
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
"Firmware no Header, Signature(%#x)\n",
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h
index 3e2a48e5fb4d..069da1e7e80a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h
@@ -33,7 +33,7 @@
#define FW_8192C_POLLING_TIMEOUT_COUNT 3000
#define IS_FW_HEADER_EXIST(_pfwhdr) \
- ((_pfwhdr->signature&0xFFF0) == 0x92E0)
+ ((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x92E0)
#define USE_OLD_WOWLAN_DEBUG_FW 0
#define H2C_92E_RSVDPAGE_LOC_LEN 5
@@ -89,25 +89,6 @@
#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
#define FW_PWR_STATE_RF_OFF 0
-struct rtl92c_firmware_header {
- u16 signature;
- u8 category;
- u8 function;
- u16 version;
- u8 subversion;
- u8 rsvd1;
- u8 month;
- u8 date;
- u8 hour;
- u8 minute;
- u16 ramcodesize;
- u16 rsvd2;
- u32 svnindex;
- u32 rsvd3;
- u32 rsvd4;
- u32 rsvd5;
-};
-
enum rtl8192e_h2c_cmd {
H2C_92E_RSVDPAGE = 0,
H2C_92E_MSRRPT = 1,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c
index a863a44f9e16..018340aedf09 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c
@@ -449,7 +449,7 @@ static void _rtl92ee_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
"Invalid RateSection %d in 2.4G,Rf %d,%dTx\n",
rate_section, path, txnum);
break;
- };
+ }
} else {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Invalid Band %d\n", band);
@@ -489,7 +489,7 @@ static u8 _rtl92ee_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
"Invalid RateSection %d in 2.4G,Rf %d,%dTx\n",
rate_section, path, txnum);
break;
- };
+ }
} else {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Invalid Band %d()\n", band);
@@ -853,7 +853,7 @@ static u8 _rtl92ee_get_rate_section_index(u32 regaddr)
else if (regaddr >= 0xE20 && regaddr <= 0xE4C)
index = (u8)((regaddr - 0xE20) / 4);
break;
- };
+ }
return index;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index 8280bab43df4..3859b3e3d158 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -205,9 +205,9 @@ bool rtl8723e_get_btc_status(void)
return true;
}
-static bool is_fw_header(struct rtl8723e_firmware_header *hdr)
+static bool is_fw_header(struct rtlwifi_firmware_header *hdr)
{
- return (hdr->signature & 0xfff0) == 0x2300;
+ return (le16_to_cpu(hdr->signature) & 0xfff0) == 0x2300;
}
static struct rtl_hal_ops rtl8723e_hal_ops = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
index 1017f02d7bf7..d091f1d5f91e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -209,9 +209,9 @@ bool rtl8723be_get_btc_status(void)
return true;
}
-static bool is_fw_header(struct rtl8723e_firmware_header *hdr)
+static bool is_fw_header(struct rtlwifi_firmware_header *hdr)
{
- return (hdr->signature & 0xfff0) == 0x5300;
+ return (le16_to_cpu(hdr->signature) & 0xfff0) == 0x5300;
}
static struct rtl_hal_ops rtl8723be_hal_ops = {
@@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
bool, 0444);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
index dd698e7e9ace..a2f5e89bedfe 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
@@ -253,7 +253,7 @@ int rtl8723_download_fw(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl8723e_firmware_header *pfwheader;
+ struct rtlwifi_firmware_header *pfwheader;
u8 *pfwdata;
u32 fwsize;
int err;
@@ -263,7 +263,7 @@ int rtl8723_download_fw(struct ieee80211_hw *hw,
if (!rtlhal->pfirmware)
return 1;
- pfwheader = (struct rtl8723e_firmware_header *)rtlhal->pfirmware;
+ pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
pfwdata = rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
@@ -275,10 +275,10 @@ int rtl8723_download_fw(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
"Firmware Version(%d), Signature(%#x), Size(%d)\n",
pfwheader->version, pfwheader->signature,
- (int)sizeof(struct rtl8723e_firmware_header));
+ (int)sizeof(struct rtlwifi_firmware_header));
- pfwdata = pfwdata + sizeof(struct rtl8723e_firmware_header);
- fwsize = fwsize - sizeof(struct rtl8723e_firmware_header);
+ pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+ fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
}
if (rtl_read_byte(rtlpriv, REG_MCUFWDL)&BIT(7)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
index 3ebafc80972f..8ea372d1626e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
@@ -50,25 +50,6 @@ enum version_8723e {
VERSION_UNKNOWN = 0xFF,
};
-struct rtl8723e_firmware_header {
- u16 signature;
- u8 category;
- u8 function;
- u16 version;
- u8 subversion;
- u8 rsvd1;
- u8 month;
- u8 date;
- u8 hour;
- u8 minute;
- u16 ramcodesize;
- u16 rsvd2;
- u32 svnindex;
- u32 rsvd3;
- u32 rsvd4;
- u32 rsvd5;
-};
-
enum rtl8723be_cmd {
H2C_8723BE_RSVDPAGE = 0,
H2C_8723BE_JOINBSSRPT = 1,
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c
index 95e95626b632..525eb234627c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c
@@ -210,7 +210,7 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl8821a_firmware_header *pfwheader;
+ struct rtlwifi_firmware_header *pfwheader;
u8 *pfwdata;
u32 fwsize;
int err;
@@ -228,8 +228,8 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
return 1;
pfwheader =
- (struct rtl8821a_firmware_header *)rtlhal->wowlan_firmware;
- rtlhal->fw_version = pfwheader->version;
+ (struct rtlwifi_firmware_header *)rtlhal->wowlan_firmware;
+ rtlhal->fw_version = le16_to_cpu(pfwheader->version);
rtlhal->fw_subversion = pfwheader->subversion;
pfwdata = (u8 *)rtlhal->wowlan_firmware;
fwsize = rtlhal->wowlan_fwsize;
@@ -238,8 +238,8 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
return 1;
pfwheader =
- (struct rtl8821a_firmware_header *)rtlhal->pfirmware;
- rtlhal->fw_version = pfwheader->version;
+ (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
+ rtlhal->fw_version = le16_to_cpu(pfwheader->version);
rtlhal->fw_subversion = pfwheader->subversion;
pfwdata = (u8 *)rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
@@ -255,8 +255,8 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
"Firmware Version(%d), Signature(%#x)\n",
pfwheader->version, pfwheader->signature);
- pfwdata = pfwdata + sizeof(struct rtl8821a_firmware_header);
- fwsize = fwsize - sizeof(struct rtl8821a_firmware_header);
+ pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+ fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
}
if (rtlhal->mac_func_enable) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h
index 591c14c0b9b5..8f5b4aade3c9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h
@@ -34,10 +34,10 @@
#define FW_8821AE_POLLING_TIMEOUT_COUNT 6000
#define IS_FW_HEADER_EXIST_8812(_pfwhdr) \
- ((_pfwhdr->signature&0xFFF0) == 0x9500)
+ ((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x9500)
#define IS_FW_HEADER_EXIST_8821(_pfwhdr) \
- ((_pfwhdr->signature&0xFFF0) == 0x2100)
+ ((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x2100)
#define USE_OLD_WOWLAN_DEBUG_FW 0
@@ -137,25 +137,6 @@
#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
#define FW_PWR_STATE_RF_OFF 0
-struct rtl8821a_firmware_header {
- u16 signature;
- u8 category;
- u8 function;
- u16 version;
- u8 subversion;
- u8 rsvd1;
- u8 month;
- u8 date;
- u8 hour;
- u8 minute;
- u16 ramcodeSize;
- u16 rsvd2;
- u32 svnindex;
- u32 rsvd3;
- u32 rsvd4;
- u32 rsvd5;
-};
-
enum rtl8812_c2h_evt {
C2H_8812_DBG = 0,
C2H_8812_LB = 1,
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
index 3236d44b459d..b7f18e2155eb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
@@ -2180,7 +2180,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
rtl_write_byte(rtlpriv, MSR, bt_msr);
rtlpriv->cfg->ops->led_control(hw, ledaction);
- if ((bt_msr & 0xfc) == MSR_AP)
+ if ((bt_msr & MSR_MASK) == MSR_AP)
rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
else
rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
index 53668fc8f23e..1d6110f9c1fb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
@@ -429,6 +429,7 @@
#define MSR_ADHOC 0x01
#define MSR_INFRA 0x02
#define MSR_AP 0x03
+#define MSR_MASK 0x03
#define RRSR_RSC_OFFSET 21
#define RRSR_SHORT_OFFSET 23
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 2b770b5e2620..b90ca618b123 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -222,6 +222,25 @@ enum rf_tx_num {
#define WOL_REASON_REALWOW_V2_WAKEUPPKT BIT(9)
#define WOL_REASON_REALWOW_V2_ACKLOST BIT(10)
+struct rtlwifi_firmware_header {
+ __le16 signature;
+ u8 category;
+ u8 function;
+ __le16 version;
+ u8 subversion;
+ u8 rsvd1;
+ u8 month;
+ u8 date;
+ u8 hour;
+ u8 minute;
+ __le16 ramcodeSize;
+ __le16 rsvd2;
+ __le32 svnindex;
+ __le32 rsvd3;
+ __le32 rsvd4;
+ __le32 rsvd5;
+};
+
struct txpower_info_2g {
u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
@@ -2064,16 +2083,12 @@ struct rtl_tcb_desc {
bool tx_enable_sw_calc_duration;
};
-struct rtl92c_firmware_header;
-
struct rtl_wow_pattern {
u8 type;
u16 crc;
u32 mask[4];
};
-struct rtl8723e_firmware_header;
-
struct rtl_hal_ops {
int (*init_sw_vars) (struct ieee80211_hw *hw);
void (*deinit_sw_vars) (struct ieee80211_hw *hw);
@@ -2177,7 +2192,7 @@ struct rtl_hal_ops {
void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id,
u32 cmd_len, u8 *p_cmdbuffer);
bool (*get_btc_status) (void);
- bool (*is_fw_header)(struct rtl8723e_firmware_header *hdr);
+ bool (*is_fw_header)(struct rtlwifi_firmware_header *hdr);
u32 (*rx_command_packet)(struct ieee80211_hw *hw,
struct rtl_stats status, struct sk_buff *skb);
void (*add_wowlan_pattern)(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c
index 0c0d5cd98514..7c355fff2c5e 100644
--- a/drivers/net/wireless/ti/wl12xx/scan.c
+++ b/drivers/net/wireless/ti/wl12xx/scan.c
@@ -118,7 +118,11 @@ static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
if (passive)
scan_options |= WL1271_SCAN_OPT_PASSIVE;
- cmd->params.role_id = wlvif->role_id;
+ /* scan on the dev role if the regular one is not started */
+ if (wlcore_is_p2p_mgmt(wlvif))
+ cmd->params.role_id = wlvif->dev_role_id;
+ else
+ cmd->params.role_id = wlvif->role_id;
if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) {
ret = -EINVAL;
diff --git a/drivers/net/wireless/ti/wl18xx/acx.c b/drivers/net/wireless/ti/wl18xx/acx.c
index 67f2a0eec854..4be0409308cb 100644
--- a/drivers/net/wireless/ti/wl18xx/acx.c
+++ b/drivers/net/wireless/ti/wl18xx/acx.c
@@ -282,3 +282,30 @@ out:
kfree(acx);
return ret;
}
+
+int wl18xx_acx_dynamic_fw_traces(struct wl1271 *wl)
+{
+ struct acx_dynamic_fw_traces_cfg *acx;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx dynamic fw traces config %d",
+ wl->dynamic_fw_traces);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->dynamic_fw_traces = cpu_to_le32(wl->dynamic_fw_traces);
+
+ ret = wl1271_cmd_configure(wl, ACX_DYNAMIC_TRACES_CFG,
+ acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx config dynamic fw traces failed: %d", ret);
+ goto out;
+ }
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/acx.h b/drivers/net/wireless/ti/wl18xx/acx.h
index 4afccd4b9467..342a2993ef98 100644
--- a/drivers/net/wireless/ti/wl18xx/acx.h
+++ b/drivers/net/wireless/ti/wl18xx/acx.h
@@ -35,7 +35,8 @@ enum {
ACX_PEER_CAP = 0x0056,
ACX_INTERRUPT_NOTIFY = 0x0057,
ACX_RX_BA_FILTER = 0x0058,
- ACX_AP_SLEEP_CFG = 0x0059
+ ACX_AP_SLEEP_CFG = 0x0059,
+ ACX_DYNAMIC_TRACES_CFG = 0x005A,
};
/* numbers of bits the length field takes (add 1 for the actual number) */
@@ -92,27 +93,26 @@ struct wl18xx_acx_checksum_state {
struct wl18xx_acx_error_stats {
- u32 error_frame;
- u32 error_null_Frame_tx_start;
- u32 error_numll_frame_cts_start;
- u32 error_bar_retry;
- u32 error_frame_cts_nul_flid;
-} __packed;
-
-struct wl18xx_acx_debug_stats {
- u32 debug1;
- u32 debug2;
- u32 debug3;
- u32 debug4;
- u32 debug5;
- u32 debug6;
-} __packed;
-
-struct wl18xx_acx_ring_stats {
- u32 prepared_descs;
- u32 tx_cmplt;
+ u32 error_frame_non_ctrl;
+ u32 error_frame_ctrl;
+ u32 error_frame_during_protection;
+ u32 null_frame_tx_start;
+ u32 null_frame_cts_start;
+ u32 bar_retry;
+ u32 num_frame_cts_nul_flid;
+ u32 tx_abort_failure;
+ u32 tx_resume_failure;
+ u32 rx_cmplt_db_overflow_cnt;
+ u32 elp_while_rx_exch;
+ u32 elp_while_tx_exch;
+ u32 elp_while_tx;
+ u32 elp_while_nvic_pending;
+ u32 rx_excessive_frame_len;
+ u32 burst_mismatch;
+ u32 tbc_exch_mismatch;
} __packed;
+#define NUM_OF_RATES_INDEXES 30
struct wl18xx_acx_tx_stats {
u32 tx_prepared_descs;
u32 tx_cmplt;
@@ -122,7 +122,7 @@ struct wl18xx_acx_tx_stats {
u32 tx_data_programmed;
u32 tx_burst_programmed;
u32 tx_starts;
- u32 tx_imm_resp;
+ u32 tx_stop;
u32 tx_start_templates;
u32 tx_start_int_templates;
u32 tx_start_fw_gen;
@@ -131,13 +131,14 @@ struct wl18xx_acx_tx_stats {
u32 tx_exch;
u32 tx_retry_template;
u32 tx_retry_data;
+ u32 tx_retry_per_rate[NUM_OF_RATES_INDEXES];
u32 tx_exch_pending;
u32 tx_exch_expiry;
u32 tx_done_template;
u32 tx_done_data;
u32 tx_done_int_template;
- u32 tx_frame_checksum;
- u32 tx_checksum_result;
+ u32 tx_cfe1;
+ u32 tx_cfe2;
u32 frag_called;
u32 frag_mpdu_alloc_failed;
u32 frag_init_called;
@@ -165,11 +166,8 @@ struct wl18xx_acx_rx_stats {
u32 rx_cmplt_task;
u32 rx_phy_hdr;
u32 rx_timeout;
+ u32 rx_rts_timeout;
u32 rx_timeout_wa;
- u32 rx_wa_density_dropped_frame;
- u32 rx_wa_ba_not_expected;
- u32 rx_frame_checksum;
- u32 rx_checksum_result;
u32 defrag_called;
u32 defrag_init_called;
u32 defrag_in_process_called;
@@ -179,6 +177,7 @@ struct wl18xx_acx_rx_stats {
u32 decrypt_key_not_found;
u32 defrag_need_decrypt;
u32 rx_tkip_replays;
+ u32 rx_xfr;
} __packed;
struct wl18xx_acx_isr_stats {
@@ -193,21 +192,13 @@ struct wl18xx_acx_pwr_stats {
u32 connection_out_of_sync;
u32 cont_miss_bcns_spread[PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD];
u32 rcvd_awake_bcns_cnt;
-} __packed;
-
-struct wl18xx_acx_event_stats {
- u32 calibration;
- u32 rx_mismatch;
- u32 rx_mem_empty;
-} __packed;
-
-struct wl18xx_acx_ps_poll_stats {
- u32 ps_poll_timeouts;
- u32 upsd_timeouts;
- u32 upsd_max_ap_turn;
- u32 ps_poll_max_ap_turn;
- u32 ps_poll_utilization;
- u32 upsd_utilization;
+ u32 sleep_time_count;
+ u32 sleep_time_avg;
+ u32 sleep_cycle_avg;
+ u32 sleep_percent;
+ u32 ap_sleep_active_conf;
+ u32 ap_sleep_user_conf;
+ u32 ap_sleep_counter;
} __packed;
struct wl18xx_acx_rx_filter_stats {
@@ -227,11 +218,11 @@ struct wl18xx_acx_rx_rate_stats {
} __packed;
#define AGGR_STATS_TX_AGG 16
-#define AGGR_STATS_TX_RATE 16
#define AGGR_STATS_RX_SIZE_LEN 16
struct wl18xx_acx_aggr_stats {
- u32 tx_agg_vs_rate[AGGR_STATS_TX_AGG * AGGR_STATS_TX_RATE];
+ u32 tx_agg_rate[AGGR_STATS_TX_AGG];
+ u32 tx_agg_len[AGGR_STATS_TX_AGG];
u32 rx_size[AGGR_STATS_RX_SIZE_LEN];
} __packed;
@@ -240,8 +231,6 @@ struct wl18xx_acx_aggr_stats {
struct wl18xx_acx_pipeline_stats {
u32 hs_tx_stat_fifo_int;
u32 hs_rx_stat_fifo_int;
- u32 tcp_tx_stat_fifo_int;
- u32 tcp_rx_stat_fifo_int;
u32 enc_tx_stat_fifo_int;
u32 enc_rx_stat_fifo_int;
u32 rx_complete_stat_fifo_int;
@@ -249,38 +238,61 @@ struct wl18xx_acx_pipeline_stats {
u32 post_proc_swi;
u32 sec_frag_swi;
u32 pre_to_defrag_swi;
- u32 defrag_to_csum_swi;
- u32 csum_to_rx_xfer_swi;
+ u32 defrag_to_rx_xfer_swi;
u32 dec_packet_in;
u32 dec_packet_in_fifo_full;
u32 dec_packet_out;
- u32 cs_rx_packet_in;
- u32 cs_rx_packet_out;
u16 pipeline_fifo_full[PIPE_STATS_HW_FIFO];
+ u16 padding;
+} __packed;
+
+#define DIVERSITY_STATS_NUM_OF_ANT 2
+
+struct wl18xx_acx_diversity_stats {
+ u32 num_of_packets_per_ant[DIVERSITY_STATS_NUM_OF_ANT];
+ u32 total_num_of_toggles;
} __packed;
-struct wl18xx_acx_mem_stats {
- u32 rx_free_mem_blks;
- u32 tx_free_mem_blks;
- u32 fwlog_free_mem_blks;
- u32 fw_gen_free_mem_blks;
+struct wl18xx_acx_thermal_stats {
+ u16 irq_thr_low;
+ u16 irq_thr_high;
+ u16 tx_stop;
+ u16 tx_resume;
+ u16 false_irq;
+ u16 adc_source_unexpected;
+} __packed;
+
+#define WL18XX_NUM_OF_CALIBRATIONS_ERRORS 18
+struct wl18xx_acx_calib_failure_stats {
+ u16 fail_count[WL18XX_NUM_OF_CALIBRATIONS_ERRORS];
+ u32 calib_count;
+} __packed;
+
+struct wl18xx_roaming_stats {
+ s32 rssi_level;
+} __packed;
+
+struct wl18xx_dfs_stats {
+ u32 num_of_radar_detections;
} __packed;
struct wl18xx_acx_statistics {
struct acx_header header;
struct wl18xx_acx_error_stats error;
- struct wl18xx_acx_debug_stats debug;
struct wl18xx_acx_tx_stats tx;
struct wl18xx_acx_rx_stats rx;
struct wl18xx_acx_isr_stats isr;
struct wl18xx_acx_pwr_stats pwr;
- struct wl18xx_acx_ps_poll_stats ps_poll;
struct wl18xx_acx_rx_filter_stats rx_filter;
struct wl18xx_acx_rx_rate_stats rx_rate;
struct wl18xx_acx_aggr_stats aggr_size;
struct wl18xx_acx_pipeline_stats pipeline;
- struct wl18xx_acx_mem_stats mem;
+ struct wl18xx_acx_diversity_stats diversity;
+ struct wl18xx_acx_thermal_stats thermal;
+ struct wl18xx_acx_calib_failure_stats calib;
+ struct wl18xx_roaming_stats roaming;
+ struct wl18xx_dfs_stats dfs;
} __packed;
struct wl18xx_acx_clear_statistics {
@@ -367,6 +379,15 @@ struct acx_ap_sleep_cfg {
u8 idle_conn_thresh;
} __packed;
+/*
+ * ACX_DYNAMIC_TRACES_CFG
+ * configure the FW dynamic traces
+ */
+struct acx_dynamic_fw_traces_cfg {
+ struct acx_header header;
+ __le32 dynamic_fw_traces;
+} __packed;
+
int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
u32 sdio_blk_size, u32 extra_mem_blks,
u32 len_field_size);
@@ -380,5 +401,6 @@ int wl18xx_acx_set_peer_cap(struct wl1271 *wl,
int wl18xx_acx_interrupt_notify_config(struct wl1271 *wl, bool action);
int wl18xx_acx_rx_ba_filter(struct wl1271 *wl, bool action);
int wl18xx_acx_ap_sleep(struct wl1271 *wl);
+int wl18xx_acx_dynamic_fw_traces(struct wl1271 *wl);
#endif /* __WL18XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
index 5fbd2230f372..4edfe28395f0 100644
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -36,18 +36,23 @@
DEBUGFS_FWSTATS_FILE_ARRAY(a, b, c, wl18xx_acx_statistics)
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug1, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug2, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug3, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug4, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug5, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug6, "%u");
-
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_null_Frame_tx_start, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_numll_frame_cts_start, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_bar_retry, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_cts_nul_flid, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_non_ctrl, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_ctrl, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_during_protection, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, null_frame_tx_start, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, null_frame_cts_start, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, bar_retry, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, num_frame_cts_nul_flid, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, tx_abort_failure, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, tx_resume_failure, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, rx_cmplt_db_overflow_cnt, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_rx_exch, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_tx_exch, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_tx, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_nvic_pending, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, rx_excessive_frame_len, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, burst_mismatch, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, tbc_exch_mismatch, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_prepared_descs, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cmplt, "%u");
@@ -57,7 +62,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_template_programmed, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_programmed, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_burst_programmed, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_starts, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_imm_resp, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_stop, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_templates, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_int_templates, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_fw_gen, "%u");
@@ -66,13 +71,15 @@ WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_null_frame, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_template, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_data, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(tx, tx_retry_per_rate,
+ NUM_OF_RATES_INDEXES);
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_pending, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_expiry, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_template, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_data, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_int_template, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_frame_checksum, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_checksum_result, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cfe1, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cfe2, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_called, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_mpdu_alloc_failed, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_init_called, "%u");
@@ -97,11 +104,8 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_pre_complt, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_cmplt_task, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_phy_hdr, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_rts_timeout, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout_wa, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_density_dropped_frame, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_ba_not_expected, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_frame_checksum, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_checksum_result, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_called, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_init_called, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_in_process_called, "%u");
@@ -111,6 +115,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_decrypt_failed, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, decrypt_key_not_found, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_need_decrypt, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_tkip_replays, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_xfr, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
@@ -120,14 +125,13 @@ WL18XX_DEBUGFS_FWSTATS_FILE(pwr, connection_out_of_sync, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pwr, cont_miss_bcns_spread,
PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD);
WL18XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_bcns_cnt, "%u");
-
-
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_timeouts, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_timeouts, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_max_ap_turn, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_max_ap_turn, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_utilization, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_utilization, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_time_count, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_time_avg, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_cycle_avg, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_percent, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_active_conf, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_user_conf, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_counter, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, beacon_filter, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, arp_filter, "%u");
@@ -141,14 +145,14 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
-WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
- AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_rate,
+ AGGR_STATS_TX_AGG);
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_len,
+ AGGR_STATS_TX_AGG);
WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, rx_size,
AGGR_STATS_RX_SIZE_LEN);
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, hs_tx_stat_fifo_int, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_tx_stat_fifo_int, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_rx_stat_fifo_int, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_tx_stat_fifo_int, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_rx_stat_fifo_int, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, rx_complete_stat_fifo_int, "%u");
@@ -156,21 +160,32 @@ WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_proc_swi, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, post_proc_swi, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, sec_frag_swi, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_to_defrag_swi, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_csum_swi, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, csum_to_rx_xfer_swi, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_rx_xfer_swi, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in_fifo_full, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_out, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_in, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_out, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pipeline, pipeline_fifo_full,
PIPE_STATS_HW_FIFO);
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, rx_free_mem_blks, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, tx_free_mem_blks, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, fwlog_free_mem_blks, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, fw_gen_free_mem_blks, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(diversity, num_of_packets_per_ant,
+ DIVERSITY_STATS_NUM_OF_ANT);
+WL18XX_DEBUGFS_FWSTATS_FILE(diversity, total_num_of_toggles, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, irq_thr_low, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, irq_thr_high, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, tx_stop, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, tx_resume, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, false_irq, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, adc_source_unexpected, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(calib, fail_count,
+ WL18XX_NUM_OF_CALIBRATIONS_ERRORS);
+WL18XX_DEBUGFS_FWSTATS_FILE(calib, calib_count, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(roaming, rssi_level, "%d");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(dfs, num_of_radar_detections, "%d");
static ssize_t conf_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -281,6 +296,55 @@ static const struct file_operations radar_detection_ops = {
.llseek = default_llseek,
};
+static ssize_t dynamic_fw_traces_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &value);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&wl->mutex);
+
+ wl->dynamic_fw_traces = value;
+
+ if (unlikely(wl->state != WLCORE_STATE_ON))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl18xx_acx_dynamic_fw_traces(wl);
+ if (ret < 0)
+ count = ret;
+
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static ssize_t dynamic_fw_traces_read(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ return wl1271_format_buffer(userbuf, count, ppos,
+ "%d\n", wl->dynamic_fw_traces);
+}
+
+static const struct file_operations dynamic_fw_traces_ops = {
+ .read = dynamic_fw_traces_read,
+ .write = dynamic_fw_traces_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
int wl18xx_debugfs_add_files(struct wl1271 *wl,
struct dentry *rootdir)
{
@@ -301,18 +365,23 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_ADD(clear_fw_stats, stats);
- DEBUGFS_FWSTATS_ADD(debug, debug1);
- DEBUGFS_FWSTATS_ADD(debug, debug2);
- DEBUGFS_FWSTATS_ADD(debug, debug3);
- DEBUGFS_FWSTATS_ADD(debug, debug4);
- DEBUGFS_FWSTATS_ADD(debug, debug5);
- DEBUGFS_FWSTATS_ADD(debug, debug6);
-
- DEBUGFS_FWSTATS_ADD(error, error_frame);
- DEBUGFS_FWSTATS_ADD(error, error_null_Frame_tx_start);
- DEBUGFS_FWSTATS_ADD(error, error_numll_frame_cts_start);
- DEBUGFS_FWSTATS_ADD(error, error_bar_retry);
- DEBUGFS_FWSTATS_ADD(error, error_frame_cts_nul_flid);
+ DEBUGFS_FWSTATS_ADD(error, error_frame_non_ctrl);
+ DEBUGFS_FWSTATS_ADD(error, error_frame_ctrl);
+ DEBUGFS_FWSTATS_ADD(error, error_frame_during_protection);
+ DEBUGFS_FWSTATS_ADD(error, null_frame_tx_start);
+ DEBUGFS_FWSTATS_ADD(error, null_frame_cts_start);
+ DEBUGFS_FWSTATS_ADD(error, bar_retry);
+ DEBUGFS_FWSTATS_ADD(error, num_frame_cts_nul_flid);
+ DEBUGFS_FWSTATS_ADD(error, tx_abort_failure);
+ DEBUGFS_FWSTATS_ADD(error, tx_resume_failure);
+ DEBUGFS_FWSTATS_ADD(error, rx_cmplt_db_overflow_cnt);
+ DEBUGFS_FWSTATS_ADD(error, elp_while_rx_exch);
+ DEBUGFS_FWSTATS_ADD(error, elp_while_tx_exch);
+ DEBUGFS_FWSTATS_ADD(error, elp_while_tx);
+ DEBUGFS_FWSTATS_ADD(error, elp_while_nvic_pending);
+ DEBUGFS_FWSTATS_ADD(error, rx_excessive_frame_len);
+ DEBUGFS_FWSTATS_ADD(error, burst_mismatch);
+ DEBUGFS_FWSTATS_ADD(error, tbc_exch_mismatch);
DEBUGFS_FWSTATS_ADD(tx, tx_prepared_descs);
DEBUGFS_FWSTATS_ADD(tx, tx_cmplt);
@@ -322,7 +391,7 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_FWSTATS_ADD(tx, tx_data_programmed);
DEBUGFS_FWSTATS_ADD(tx, tx_burst_programmed);
DEBUGFS_FWSTATS_ADD(tx, tx_starts);
- DEBUGFS_FWSTATS_ADD(tx, tx_imm_resp);
+ DEBUGFS_FWSTATS_ADD(tx, tx_stop);
DEBUGFS_FWSTATS_ADD(tx, tx_start_templates);
DEBUGFS_FWSTATS_ADD(tx, tx_start_int_templates);
DEBUGFS_FWSTATS_ADD(tx, tx_start_fw_gen);
@@ -331,13 +400,14 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_FWSTATS_ADD(tx, tx_exch);
DEBUGFS_FWSTATS_ADD(tx, tx_retry_template);
DEBUGFS_FWSTATS_ADD(tx, tx_retry_data);
+ DEBUGFS_FWSTATS_ADD(tx, tx_retry_per_rate);
DEBUGFS_FWSTATS_ADD(tx, tx_exch_pending);
DEBUGFS_FWSTATS_ADD(tx, tx_exch_expiry);
DEBUGFS_FWSTATS_ADD(tx, tx_done_template);
DEBUGFS_FWSTATS_ADD(tx, tx_done_data);
DEBUGFS_FWSTATS_ADD(tx, tx_done_int_template);
- DEBUGFS_FWSTATS_ADD(tx, tx_frame_checksum);
- DEBUGFS_FWSTATS_ADD(tx, tx_checksum_result);
+ DEBUGFS_FWSTATS_ADD(tx, tx_cfe1);
+ DEBUGFS_FWSTATS_ADD(tx, tx_cfe2);
DEBUGFS_FWSTATS_ADD(tx, frag_called);
DEBUGFS_FWSTATS_ADD(tx, frag_mpdu_alloc_failed);
DEBUGFS_FWSTATS_ADD(tx, frag_init_called);
@@ -362,11 +432,8 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_FWSTATS_ADD(rx, rx_cmplt_task);
DEBUGFS_FWSTATS_ADD(rx, rx_phy_hdr);
DEBUGFS_FWSTATS_ADD(rx, rx_timeout);
+ DEBUGFS_FWSTATS_ADD(rx, rx_rts_timeout);
DEBUGFS_FWSTATS_ADD(rx, rx_timeout_wa);
- DEBUGFS_FWSTATS_ADD(rx, rx_wa_density_dropped_frame);
- DEBUGFS_FWSTATS_ADD(rx, rx_wa_ba_not_expected);
- DEBUGFS_FWSTATS_ADD(rx, rx_frame_checksum);
- DEBUGFS_FWSTATS_ADD(rx, rx_checksum_result);
DEBUGFS_FWSTATS_ADD(rx, defrag_called);
DEBUGFS_FWSTATS_ADD(rx, defrag_init_called);
DEBUGFS_FWSTATS_ADD(rx, defrag_in_process_called);
@@ -376,6 +443,7 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_FWSTATS_ADD(rx, decrypt_key_not_found);
DEBUGFS_FWSTATS_ADD(rx, defrag_need_decrypt);
DEBUGFS_FWSTATS_ADD(rx, rx_tkip_replays);
+ DEBUGFS_FWSTATS_ADD(rx, rx_xfr);
DEBUGFS_FWSTATS_ADD(isr, irqs);
@@ -384,13 +452,13 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_FWSTATS_ADD(pwr, connection_out_of_sync);
DEBUGFS_FWSTATS_ADD(pwr, cont_miss_bcns_spread);
DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_bcns_cnt);
-
- DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_timeouts);
- DEBUGFS_FWSTATS_ADD(ps_poll, upsd_timeouts);
- DEBUGFS_FWSTATS_ADD(ps_poll, upsd_max_ap_turn);
- DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_max_ap_turn);
- DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_utilization);
- DEBUGFS_FWSTATS_ADD(ps_poll, upsd_utilization);
+ DEBUGFS_FWSTATS_ADD(pwr, sleep_time_count);
+ DEBUGFS_FWSTATS_ADD(pwr, sleep_time_avg);
+ DEBUGFS_FWSTATS_ADD(pwr, sleep_cycle_avg);
+ DEBUGFS_FWSTATS_ADD(pwr, sleep_percent);
+ DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_active_conf);
+ DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_user_conf);
+ DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_counter);
DEBUGFS_FWSTATS_ADD(rx_filter, beacon_filter);
DEBUGFS_FWSTATS_ADD(rx_filter, arp_filter);
@@ -404,12 +472,11 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_FWSTATS_ADD(rx_rate, rx_frames_per_rates);
- DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_vs_rate);
+ DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_rate);
+ DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_len);
DEBUGFS_FWSTATS_ADD(aggr_size, rx_size);
DEBUGFS_FWSTATS_ADD(pipeline, hs_tx_stat_fifo_int);
- DEBUGFS_FWSTATS_ADD(pipeline, tcp_tx_stat_fifo_int);
- DEBUGFS_FWSTATS_ADD(pipeline, tcp_rx_stat_fifo_int);
DEBUGFS_FWSTATS_ADD(pipeline, enc_tx_stat_fifo_int);
DEBUGFS_FWSTATS_ADD(pipeline, enc_rx_stat_fifo_int);
DEBUGFS_FWSTATS_ADD(pipeline, rx_complete_stat_fifo_int);
@@ -417,22 +484,33 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_FWSTATS_ADD(pipeline, post_proc_swi);
DEBUGFS_FWSTATS_ADD(pipeline, sec_frag_swi);
DEBUGFS_FWSTATS_ADD(pipeline, pre_to_defrag_swi);
- DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_csum_swi);
- DEBUGFS_FWSTATS_ADD(pipeline, csum_to_rx_xfer_swi);
+ DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_rx_xfer_swi);
DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in);
DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in_fifo_full);
DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_out);
- DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_in);
- DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_out);
DEBUGFS_FWSTATS_ADD(pipeline, pipeline_fifo_full);
- DEBUGFS_FWSTATS_ADD(mem, rx_free_mem_blks);
- DEBUGFS_FWSTATS_ADD(mem, tx_free_mem_blks);
- DEBUGFS_FWSTATS_ADD(mem, fwlog_free_mem_blks);
- DEBUGFS_FWSTATS_ADD(mem, fw_gen_free_mem_blks);
+ DEBUGFS_FWSTATS_ADD(diversity, num_of_packets_per_ant);
+ DEBUGFS_FWSTATS_ADD(diversity, total_num_of_toggles);
+
+ DEBUGFS_FWSTATS_ADD(thermal, irq_thr_low);
+ DEBUGFS_FWSTATS_ADD(thermal, irq_thr_high);
+ DEBUGFS_FWSTATS_ADD(thermal, tx_stop);
+ DEBUGFS_FWSTATS_ADD(thermal, tx_resume);
+ DEBUGFS_FWSTATS_ADD(thermal, false_irq);
+ DEBUGFS_FWSTATS_ADD(thermal, adc_source_unexpected);
+
+ DEBUGFS_FWSTATS_ADD(calib, fail_count);
+
+ DEBUGFS_FWSTATS_ADD(calib, calib_count);
+
+ DEBUGFS_FWSTATS_ADD(roaming, rssi_level);
+
+ DEBUGFS_FWSTATS_ADD(dfs, num_of_radar_detections);
DEBUGFS_ADD(conf, moddir);
DEBUGFS_ADD(radar_detection, moddir);
+ DEBUGFS_ADD(dynamic_fw_traces, moddir);
return 0;
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index 548bb9e7e91e..09c7e098f460 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -112,6 +112,14 @@ static int wlcore_smart_config_decode_event(struct wl1271 *wl,
return 0;
}
+static void wlcore_event_time_sync(struct wl1271 *wl, u16 tsf_msb, u16 tsf_lsb)
+{
+ u32 clock;
+ /* convert the MSB+LSB to a u32 TSF value */
+ clock = (tsf_msb << 16) | tsf_lsb;
+ wl1271_info("TIME_SYNC_EVENT_ID: clock %u", clock);
+}
+
int wl18xx_process_mailbox_events(struct wl1271 *wl)
{
struct wl18xx_event_mailbox *mbox = wl->mbox;
@@ -128,6 +136,11 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl)
wl18xx_scan_completed(wl, wl->scan_wlvif);
}
+ if (vector & TIME_SYNC_EVENT_ID)
+ wlcore_event_time_sync(wl,
+ mbox->time_sync_tsf_msb,
+ mbox->time_sync_tsf_lsb);
+
if (vector & RADAR_DETECTED_EVENT_ID) {
wl1271_info("radar event: channel %d type %s",
mbox->radar_channel,
diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h
index 266ee87834e4..f3d4f13379cb 100644
--- a/drivers/net/wireless/ti/wl18xx/event.h
+++ b/drivers/net/wireless/ti/wl18xx/event.h
@@ -38,8 +38,9 @@ enum {
REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(18),
DFS_CHANNELS_CONFIG_COMPLETE_EVENT = BIT(19),
PERIODIC_SCAN_REPORT_EVENT_ID = BIT(20),
- SMART_CONFIG_SYNC_EVENT_ID = BIT(22),
- SMART_CONFIG_DECODE_EVENT_ID = BIT(23),
+ SMART_CONFIG_SYNC_EVENT_ID = BIT(22),
+ SMART_CONFIG_DECODE_EVENT_ID = BIT(23),
+ TIME_SYNC_EVENT_ID = BIT(24),
};
enum wl18xx_radar_types {
@@ -95,13 +96,16 @@ struct wl18xx_event_mailbox {
/* smart config sync channel */
u8 sc_sync_channel;
u8 sc_sync_band;
- u8 padding2[2];
+ /* time sync msb*/
+ u16 time_sync_tsf_msb;
/* radar detect */
u8 radar_channel;
u8 radar_type;
- u8 padding3[2];
+ /* time sync lsb*/
+ u16 time_sync_tsf_lsb;
+
} __packed;
int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 49aca2cf7605..abbf054fb6da 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -422,6 +422,8 @@ static struct wlcore_conf wl18xx_conf = {
.num_probe_reqs = 2,
.rssi_threshold = -90,
.snr_threshold = 0,
+ .num_short_intervals = SCAN_MAX_SHORT_INTERVALS,
+ .long_interval = 30000,
},
.ht = {
.rx_ba_win_size = 32,
@@ -1026,8 +1028,8 @@ static int wl18xx_boot(struct wl1271 *wl)
CHANNEL_SWITCH_COMPLETE_EVENT_ID |
DFS_CHANNELS_CONFIG_COMPLETE_EVENT |
SMART_CONFIG_SYNC_EVENT_ID |
- SMART_CONFIG_DECODE_EVENT_ID;
-;
+ SMART_CONFIG_DECODE_EVENT_ID |
+ TIME_SYNC_EVENT_ID;
wl->ap_event_mask = MAX_TX_FAILURE_EVENT_ID;
@@ -1159,6 +1161,11 @@ static int wl18xx_hw_init(struct wl1271 *wl)
if (ret < 0)
return ret;
+ /* set the dynamic fw traces bitmap */
+ ret = wl18xx_acx_dynamic_fw_traces(wl);
+ if (ret < 0)
+ return ret;
+
if (checksum_param) {
ret = wl18xx_acx_set_checksum_state(wl);
if (ret != 0)
@@ -1797,7 +1804,7 @@ static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
static const struct ieee80211_iface_limit wl18xx_iface_limits[] = {
{
- .max = 3,
+ .max = 2,
.types = BIT(NL80211_IFTYPE_STATION),
},
{
@@ -1806,6 +1813,10 @@ static const struct ieee80211_iface_limit wl18xx_iface_limits[] = {
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_CLIENT),
},
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
};
static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = {
@@ -1813,6 +1824,48 @@ static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = {
.max = 2,
.types = BIT(NL80211_IFTYPE_AP),
},
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
+};
+
+static const struct ieee80211_iface_limit wl18xx_iface_ap_cl_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
+};
+
+static const struct ieee80211_iface_limit wl18xx_iface_ap_go_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_GO),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
};
static const struct ieee80211_iface_combination
diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c
index 98666f235a12..c938c494c785 100644
--- a/drivers/net/wireless/ti/wl18xx/scan.c
+++ b/drivers/net/wireless/ti/wl18xx/scan.c
@@ -51,7 +51,11 @@ static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
goto out;
}
- cmd->role_id = wlvif->role_id;
+ /* scan on the dev role if the regular one is not started */
+ if (wlcore_is_p2p_mgmt(wlvif))
+ cmd->role_id = wlvif->dev_role_id;
+ else
+ cmd->role_id = wlvif->role_id;
if (WARN_ON(cmd->role_id == WL12XX_INVALID_ROLE_ID)) {
ret = -EINVAL;
@@ -223,9 +227,20 @@ int wl18xx_scan_sched_scan_config(struct wl1271 *wl,
SCAN_TYPE_PERIODIC);
wl18xx_adjust_channels(cmd, cmd_channels);
- cmd->short_cycles_sec = 0;
- cmd->long_cycles_sec = cpu_to_le16(req->interval);
- cmd->short_cycles_count = 0;
+ if (c->num_short_intervals && c->long_interval &&
+ c->long_interval > req->interval) {
+ cmd->short_cycles_msec = cpu_to_le16(req->interval);
+ cmd->long_cycles_msec = cpu_to_le16(c->long_interval);
+ cmd->short_cycles_count = c->num_short_intervals;
+ } else {
+ cmd->short_cycles_msec = 0;
+ cmd->long_cycles_msec = cpu_to_le16(req->interval);
+ cmd->short_cycles_count = 0;
+ }
+ wl1271_debug(DEBUG_SCAN, "short_interval: %d, long_interval: %d, num_short: %d",
+ le16_to_cpu(cmd->short_cycles_msec),
+ le16_to_cpu(cmd->long_cycles_msec),
+ cmd->short_cycles_count);
cmd->total_cycles = 0;
diff --git a/drivers/net/wireless/ti/wl18xx/scan.h b/drivers/net/wireless/ti/wl18xx/scan.h
index 2e636aa5dba9..66a763f644d2 100644
--- a/drivers/net/wireless/ti/wl18xx/scan.h
+++ b/drivers/net/wireless/ti/wl18xx/scan.h
@@ -74,8 +74,8 @@ struct wl18xx_cmd_scan_params {
u8 dfs; /* number of dfs channels in 5ghz */
u8 passive_active; /* number of passive before active channels 2.4ghz */
- __le16 short_cycles_sec;
- __le16 long_cycles_sec;
+ __le16 short_cycles_msec;
+ __le16 long_cycles_msec;
u8 short_cycles_count;
u8 total_cycles; /* 0 - infinite */
u8 padding[2];
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 68919f8d4310..f01d24baff7c 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -2003,12 +2003,15 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wlvif->bss_type == BSS_TYPE_IBSS)))
return -EINVAL;
- ret = wl12xx_cmd_role_enable(wl,
- wl12xx_wlvif_to_vif(wlvif)->addr,
- WL1271_ROLE_DEVICE,
- &wlvif->dev_role_id);
- if (ret < 0)
- goto out;
+ /* the dev role is already started for p2p mgmt interfaces */
+ if (!wlcore_is_p2p_mgmt(wlvif)) {
+ ret = wl12xx_cmd_role_enable(wl,
+ wl12xx_wlvif_to_vif(wlvif)->addr,
+ WL1271_ROLE_DEVICE,
+ &wlvif->dev_role_id);
+ if (ret < 0)
+ goto out;
+ }
ret = wl12xx_cmd_role_start_dev(wl, wlvif, band, channel);
if (ret < 0)
@@ -2023,7 +2026,8 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
out_stop:
wl12xx_cmd_role_stop_dev(wl, wlvif);
out_disable:
- wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+ if (!wlcore_is_p2p_mgmt(wlvif))
+ wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
out:
return ret;
}
@@ -2052,10 +2056,42 @@ int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
if (ret < 0)
goto out;
- ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
- if (ret < 0)
- goto out;
+ if (!wlcore_is_p2p_mgmt(wlvif)) {
+ ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+ if (ret < 0)
+ goto out;
+ }
out:
return ret;
}
+
+int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 feature, u8 enable, u8 value)
+{
+ struct wlcore_cmd_generic_cfg *cmd;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD,
+ "cmd generic cfg (role %d feature %d enable %d value %d)",
+ wlvif->role_id, feature, enable, value);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->role_id = wlvif->role_id;
+ cmd->feature = feature;
+ cmd->enable = enable;
+ cmd->value = value;
+
+ ret = wl1271_cmd_send(wl, CMD_GENERIC_CFG, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send generic cfg command");
+ goto out_free;
+ }
+out_free:
+ kfree(cmd);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(wlcore_cmd_generic_cfg);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index e14cd407a6ae..8dc46c0a489a 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -92,6 +92,8 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
enum ieee80211_band band);
int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
+int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 feature, u8 enable, u8 value);
int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
@@ -652,6 +654,19 @@ struct wl12xx_cmd_regdomain_dfs_config {
u8 padding[3];
} __packed;
+enum wlcore_generic_cfg_feature {
+ WLCORE_CFG_FEATURE_RADAR_DEBUG = 2,
+};
+
+struct wlcore_cmd_generic_cfg {
+ struct wl1271_cmd_header header;
+
+ u8 role_id;
+ u8 feature;
+ u8 enable;
+ u8 value;
+} __packed;
+
struct wl12xx_cmd_config_fwlog {
struct wl1271_cmd_header header;
diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index 166add00b50f..52a9d1b14020 100644
--- a/drivers/net/wireless/ti/wlcore/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -1186,6 +1186,15 @@ struct conf_sched_scan_settings {
/* SNR threshold to be used for filtering */
s8 snr_threshold;
+
+ /*
+ * number of short intervals scheduled scan cycles before
+ * switching to long intervals
+ */
+ u8 num_short_intervals;
+
+ /* interval between each long scheduled scan cycle (in ms) */
+ u16 long_interval;
} __packed;
struct conf_ht_setting {
@@ -1352,7 +1361,7 @@ struct conf_recovery_settings {
* version, the two LSB are the lower driver's private conf
* version.
*/
-#define WLCORE_CONF_VERSION (0x0006 << 16)
+#define WLCORE_CONF_VERSION (0x0007 << 16)
#define WLCORE_CONF_MASK 0xffff0000
#define WLCORE_CONF_SIZE (sizeof(struct wlcore_conf_header) + \
sizeof(struct wlcore_conf))
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 5ca1fb161a50..e92f2639af2c 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -348,7 +348,7 @@ static int wl12xx_init_fwlog(struct wl1271 *wl)
}
/* generic sta initialization (non vif-specific) */
-static int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
diff --git a/drivers/net/wireless/ti/wlcore/init.h b/drivers/net/wireless/ti/wlcore/init.h
index a45fbfddec19..fd1cdb6bc3e4 100644
--- a/drivers/net/wireless/ti/wlcore/init.h
+++ b/drivers/net/wireless/ti/wlcore/init.h
@@ -35,5 +35,6 @@ int wl1271_hw_init(struct wl1271 *wl);
int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif);
int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif);
+int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif);
#endif
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 337223b9f6f8..e819369d8f8f 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1792,6 +1792,9 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
wl->wow_enabled = true;
wl12xx_for_each_wlvif(wl, wlvif) {
+ if (wlcore_is_p2p_mgmt(wlvif))
+ continue;
+
ret = wl1271_configure_suspend(wl, wlvif, wow);
if (ret < 0) {
mutex_unlock(&wl->mutex);
@@ -1901,6 +1904,9 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
goto out;
wl12xx_for_each_wlvif(wl, wlvif) {
+ if (wlcore_is_p2p_mgmt(wlvif))
+ continue;
+
wl1271_configure_resume(wl, wlvif);
}
@@ -2256,6 +2262,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
wlvif->p2p = 1;
/* fall-through */
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_DEVICE:
wlvif->bss_type = BSS_TYPE_STA_BSS;
break;
case NL80211_IFTYPE_ADHOC:
@@ -2477,7 +2484,8 @@ static void wlcore_hw_queue_iter(void *data, u8 *mac,
{
struct wlcore_hw_queue_iter_data *iter_data = data;
- if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+ WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
return;
if (iter_data->cur_running || vif == iter_data->vif) {
@@ -2495,6 +2503,11 @@ static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
struct wlcore_hw_queue_iter_data iter_data = {};
int i, q_base;
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+ return 0;
+ }
+
iter_data.vif = vif;
/* mark all bits taken by active interfaces */
@@ -2618,14 +2631,27 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
- ret = wl12xx_cmd_role_enable(wl, vif->addr,
- role_type, &wlvif->role_id);
- if (ret < 0)
- goto out;
+ if (!wlcore_is_p2p_mgmt(wlvif)) {
+ ret = wl12xx_cmd_role_enable(wl, vif->addr,
+ role_type, &wlvif->role_id);
+ if (ret < 0)
+ goto out;
- ret = wl1271_init_vif_specific(wl, vif);
- if (ret < 0)
- goto out;
+ ret = wl1271_init_vif_specific(wl, vif);
+ if (ret < 0)
+ goto out;
+
+ } else {
+ ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
+ &wlvif->dev_role_id);
+ if (ret < 0)
+ goto out;
+
+ /* needed mainly for configuring rate policies */
+ ret = wl1271_sta_hw_init(wl, wlvif);
+ if (ret < 0)
+ goto out;
+ }
list_add(&wlvif->list, &wl->wlvif_list);
set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
@@ -2696,9 +2722,15 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
wl12xx_stop_dev(wl, wlvif);
}
- ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
- if (ret < 0)
- goto deinit;
+ if (!wlcore_is_p2p_mgmt(wlvif)) {
+ ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
+ if (ret < 0)
+ goto deinit;
+ } else {
+ ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+ if (ret < 0)
+ goto deinit;
+ }
wl1271_ps_elp_sleep(wl);
}
@@ -3088,6 +3120,9 @@ static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
{
int ret;
+ if (wlcore_is_p2p_mgmt(wlvif))
+ return 0;
+
if (conf->power_level != wlvif->power_level) {
ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
if (ret < 0)
@@ -3207,6 +3242,9 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
goto out;
wl12xx_for_each_wlvif(wl, wlvif) {
+ if (wlcore_is_p2p_mgmt(wlvif))
+ continue;
+
if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
if (*total & FIF_ALLMULTI)
ret = wl1271_acx_group_address_tbl(wl, wlvif,
@@ -4837,6 +4875,9 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
u8 ps_scheme;
int ret = 0;
+ if (wlcore_is_p2p_mgmt(wlvif))
+ return 0;
+
mutex_lock(&wl->mutex);
wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
@@ -6078,8 +6119,10 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
wl->hw->wiphy->max_scan_ssids = 1;
wl->hw->wiphy->max_sched_scan_ssids = 16;
wl->hw->wiphy->max_match_sets = 16;
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index e125974285cc..5b2927391d1c 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -74,7 +74,14 @@ static void wl1271_rx_status(struct wl1271 *wl,
if (desc->rate <= wl->hw_min_ht_rate)
status->flag |= RX_FLAG_HT;
- status->signal = desc->rssi;
+ /*
+ * Read the signal level and antenna diversity indication.
+ * The msb in the signal level is always set as it is a
+ * negative number.
+ * The antenna indication is the msb of the rssi.
+ */
+ status->signal = ((desc->rssi & RSSI_LEVEL_BITMASK) | BIT(7));
+ status->antenna = ((desc->rssi & ANT_DIVERSITY_BITMASK) >> 7);
/*
* FIXME: In wl1251, the SNR should be divided by two. In wl1271 we
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index a3b1618db27c..f5a7087cfb97 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -30,6 +30,9 @@
#define WL1271_RX_MAX_RSSI -30
#define WL1271_RX_MIN_RSSI -95
+#define RSSI_LEVEL_BITMASK 0x7F
+#define ANT_DIVERSITY_BITMASK BIT(7)
+
#define SHORT_PREAMBLE_BIT BIT(0)
#define OFDM_RATE_BIT BIT(6)
#define PBCC_RATE_BIT BIT(7)
diff --git a/drivers/net/wireless/ti/wlcore/scan.h b/drivers/net/wireless/ti/wlcore/scan.h
index 4dadd0c62cde..782eb297c196 100644
--- a/drivers/net/wireless/ti/wlcore/scan.h
+++ b/drivers/net/wireless/ti/wlcore/scan.h
@@ -83,6 +83,12 @@ struct wl1271_cmd_trigger_scan_to {
#define MAX_CHANNELS_5GHZ 42
#define SCAN_MAX_CYCLE_INTERVALS 16
+
+/* The FW intervals can take up to 16 entries.
+ * The 1st entry isn't used (scan is immediate). The last
+ * entry should be used for the long_interval
+ */
+#define SCAN_MAX_SHORT_INTERVALS (SCAN_MAX_CYCLE_INTERVALS - 2)
#define SCAN_MAX_BANDS 3
enum {
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index ea7e07abca4e..c172da56b550 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -293,7 +293,8 @@ static int wl1271_probe(struct sdio_func *func,
/* Use block mode for transferring over one block size of data */
func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
- if (wlcore_probe_of(&func->dev, &irq, &pdev_data))
+ ret = wlcore_probe_of(&func->dev, &irq, &pdev_data);
+ if (ret)
goto out_free_glue;
/* if sdio can keep power while host is suspended, enable wow */
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 7f363fa566a3..a1b6040e6491 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -500,6 +500,9 @@ struct wl1271 {
/* interface combinations supported by the hw */
const struct ieee80211_iface_combination *iface_combinations;
u8 n_iface_combinations;
+
+ /* dynamic fw traces */
+ u32 dynamic_fw_traces;
};
int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 39efc6d78b10..27c56876b2c1 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -503,6 +503,11 @@ struct ieee80211_vif *wl12xx_wlvif_to_vif(struct wl12xx_vif *wlvif)
return container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
}
+static inline bool wlcore_is_p2p_mgmt(struct wl12xx_vif *wlvif)
+{
+ return wl12xx_wlvif_to_vif(wlvif)->type == NL80211_IFTYPE_P2P_DEVICE;
+}
+
#define wl12xx_for_each_wlvif(wl, wlvif) \
list_for_each_entry(wlvif, &wl->wlvif_list, list)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 8a495b318b6f..a7bf74727116 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -200,22 +200,27 @@ struct xenvif_queue { /* Per-queue data for xenvif */
struct xenvif_stats stats;
};
-/* Maximum number of Rx slots a to-guest packet may use, including the
- * slot needed for GSO meta-data.
- */
-#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1)
-
enum state_bit_shift {
/* This bit marks that the vif is connected */
VIF_STATUS_CONNECTED,
};
+struct xenvif_mcast_addr {
+ struct list_head entry;
+ struct rcu_head rcu;
+ u8 addr[6];
+};
+
+#define XEN_NETBK_MCAST_MAX 64
+
struct xenvif {
/* Unique identifier for this interface. */
domid_t domid;
unsigned int handle;
u8 fe_dev_addr[6];
+ struct list_head fe_mcast_addr;
+ unsigned int fe_mcast_count;
/* Frontend feature information. */
int gso_mask;
@@ -224,6 +229,7 @@ struct xenvif {
u8 can_sg:1;
u8 ip_csum:1;
u8 ipv6_csum:1;
+ u8 multicast_control:1;
/* Is this interface disabled? True when backend discovers
* frontend is rogue.
@@ -306,11 +312,6 @@ int xenvif_dealloc_kthread(void *data);
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
-/* Determine whether the needed number of slots (req) are available,
- * and set req_event if not.
- */
-bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed);
-
void xenvif_carrier_on(struct xenvif *vif);
/* Callback from stack when TX packet can be released */
@@ -325,9 +326,6 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
queue->pending_prod + queue->pending_cons;
}
-/* Callback from stack when TX packet can be released */
-void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
-
irqreturn_t xenvif_interrupt(int irq, void *dev_id);
extern bool separate_tx_rx_irq;
@@ -344,4 +342,8 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
struct sk_buff *skb);
void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
+/* Multicast control */
+bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr);
+void xenvif_mcast_addr_list_free(struct xenvif *vif);
+
#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 1a83e190fc15..e7bd63eb2876 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -61,6 +61,12 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
{
atomic_dec(&queue->inflight_packets);
+
+ /* Wake the dealloc thread _after_ decrementing inflight_packets so
+ * that if kthread_stop() has already been called, the dealloc thread
+ * does not wait forever with nothing to wake it.
+ */
+ wake_up(&queue->dealloc_wq);
}
int xenvif_schedulable(struct xenvif *vif)
@@ -165,6 +171,13 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
!xenvif_schedulable(vif))
goto drop;
+ if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+
+ if (!xenvif_mcast_match(vif, eth->h_dest))
+ goto drop;
+ }
+
cb = XENVIF_RX_CB(skb);
cb->expires = jiffies + vif->drain_timeout;
@@ -421,6 +434,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif->num_queues = 0;
spin_lock_init(&vif->lock);
+ INIT_LIST_HEAD(&vif->fe_mcast_addr);
dev->netdev_ops = &xenvif_netdev_ops;
dev->hw_features = NETIF_F_SG |
@@ -655,6 +669,8 @@ void xenvif_disconnect(struct xenvif *vif)
xenvif_unmap_frontend_rings(queue);
}
+
+ xenvif_mcast_addr_list_free(vif);
}
/* Reverse the relevant parts of xenvif_init_queue().
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 880d0d63e872..ec98d43916a8 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -149,9 +149,20 @@ static inline pending_ring_idx_t pending_index(unsigned i)
return i & (MAX_PENDING_REQS-1);
}
-bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
+static int xenvif_rx_ring_slots_needed(struct xenvif *vif)
+{
+ if (vif->gso_mask)
+ return DIV_ROUND_UP(vif->dev->gso_max_size, PAGE_SIZE) + 1;
+ else
+ return DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
+}
+
+static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
{
RING_IDX prod, cons;
+ int needed;
+
+ needed = xenvif_rx_ring_slots_needed(queue->vif);
do {
prod = queue->rx.sring->req_prod;
@@ -314,7 +325,7 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
} else {
copy_gop->source.domid = DOMID_SELF;
copy_gop->source.u.gmfn =
- virt_to_mfn(page_address(page));
+ virt_to_gfn(page_address(page));
}
copy_gop->source.offset = offset;
@@ -513,7 +524,7 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
skb_queue_head_init(&rxq);
- while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
+ while (xenvif_rx_ring_slots_available(queue)
&& (skb = xenvif_rx_dequeue(queue)) != NULL) {
queue->last_rx_time = jiffies;
@@ -810,23 +821,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
struct sk_buff *skb,
struct xen_netif_tx_request *txp,
- struct gnttab_map_grant_ref *gop)
+ struct gnttab_map_grant_ref *gop,
+ unsigned int frag_overflow,
+ struct sk_buff *nskb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
int start;
pending_ring_idx_t index;
- unsigned int nr_slots, frag_overflow = 0;
+ unsigned int nr_slots;
- /* At this point shinfo->nr_frags is in fact the number of
- * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
- */
- if (shinfo->nr_frags > MAX_SKB_FRAGS) {
- frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
- BUG_ON(frag_overflow > MAX_SKB_FRAGS);
- shinfo->nr_frags = MAX_SKB_FRAGS;
- }
nr_slots = shinfo->nr_frags;
/* Skip first skb fragment if it is on same page as header fragment. */
@@ -841,13 +846,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
}
if (frag_overflow) {
- struct sk_buff *nskb = xenvif_alloc_skb(0);
- if (unlikely(nskb == NULL)) {
- if (net_ratelimit())
- netdev_err(queue->vif->dev,
- "Can't allocate the frag_list skb.\n");
- return NULL;
- }
shinfo = skb_shinfo(nskb);
frags = shinfo->frags;
@@ -1170,14 +1168,89 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
return false;
}
+/* No locking is required in xenvif_mcast_add/del() as they are
+ * only ever invoked from NAPI poll. An RCU list is used because
+ * xenvif_mcast_match() is called asynchronously, during start_xmit.
+ */
+
+static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
+{
+ struct xenvif_mcast_addr *mcast;
+
+ if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
+ if (net_ratelimit())
+ netdev_err(vif->dev,
+ "Too many multicast addresses\n");
+ return -ENOSPC;
+ }
+
+ mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
+ if (!mcast)
+ return -ENOMEM;
+
+ ether_addr_copy(mcast->addr, addr);
+ list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
+ vif->fe_mcast_count++;
+
+ return 0;
+}
+
+static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
+{
+ struct xenvif_mcast_addr *mcast;
+
+ list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
+ if (ether_addr_equal(addr, mcast->addr)) {
+ --vif->fe_mcast_count;
+ list_del_rcu(&mcast->entry);
+ kfree_rcu(mcast, rcu);
+ break;
+ }
+ }
+}
+
+bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
+{
+ struct xenvif_mcast_addr *mcast;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
+ if (ether_addr_equal(addr, mcast->addr)) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+
+ return false;
+}
+
+void xenvif_mcast_addr_list_free(struct xenvif *vif)
+{
+ /* No need for locking or RCU here. NAPI poll and TX queue
+ * are stopped.
+ */
+ while (!list_empty(&vif->fe_mcast_addr)) {
+ struct xenvif_mcast_addr *mcast;
+
+ mcast = list_first_entry(&vif->fe_mcast_addr,
+ struct xenvif_mcast_addr,
+ entry);
+ --vif->fe_mcast_count;
+ list_del(&mcast->entry);
+ kfree(mcast);
+ }
+}
+
static void xenvif_tx_build_gops(struct xenvif_queue *queue,
int budget,
unsigned *copy_ops,
unsigned *map_ops)
{
- struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
- struct sk_buff *skb;
+ struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
+ struct sk_buff *skb, *nskb;
int ret;
+ unsigned int frag_overflow;
while (skb_queue_len(&queue->tx_queue) < budget) {
struct xen_netif_tx_request txreq;
@@ -1227,6 +1300,31 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
break;
}
+ if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
+ struct xen_netif_extra_info *extra;
+
+ extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
+ ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
+
+ make_tx_response(queue, &txreq,
+ (ret == 0) ?
+ XEN_NETIF_RSP_OKAY :
+ XEN_NETIF_RSP_ERROR);
+ push_tx_responses(queue);
+ continue;
+ }
+
+ if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
+ struct xen_netif_extra_info *extra;
+
+ extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
+ xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
+
+ make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY);
+ push_tx_responses(queue);
+ continue;
+ }
+
ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
if (unlikely(ret < 0))
break;
@@ -1265,6 +1363,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
break;
}
+ skb_shinfo(skb)->nr_frags = ret;
+ if (data_len < txreq.size)
+ skb_shinfo(skb)->nr_frags++;
+ /* At this point shinfo->nr_frags is in fact the number of
+ * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
+ */
+ frag_overflow = 0;
+ nskb = NULL;
+ if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
+ frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
+ BUG_ON(frag_overflow > MAX_SKB_FRAGS);
+ skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
+ nskb = xenvif_alloc_skb(0);
+ if (unlikely(nskb == NULL)) {
+ kfree_skb(skb);
+ xenvif_tx_err(queue, &txreq, idx);
+ if (net_ratelimit())
+ netdev_err(queue->vif->dev,
+ "Can't allocate the frag_list skb.\n");
+ break;
+ }
+ }
+
if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1272,6 +1393,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
/* Failure in xenvif_set_skb_gso is fatal. */
kfree_skb(skb);
+ kfree_skb(nskb);
break;
}
}
@@ -1284,7 +1406,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
- virt_to_mfn(skb->data);
+ virt_to_gfn(skb->data);
queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
queue->tx_copy_ops[*copy_ops].dest.offset =
offset_in_page(skb->data);
@@ -1294,9 +1416,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
(*copy_ops)++;
- skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size) {
- skb_shinfo(skb)->nr_frags++;
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
@@ -1310,13 +1430,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
queue->pending_cons++;
- request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
- if (request_gop == NULL) {
- kfree_skb(skb);
- xenvif_tx_err(queue, &txreq, idx);
- break;
- }
- gop = request_gop;
+ gop = xenvif_get_requests(queue, skb, txfrags, gop,
+ frag_overflow, nskb);
__skb_queue_tail(&queue->tx_queue, skb);
@@ -1536,7 +1651,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
smp_wmb();
queue->dealloc_prod++;
} while (ubuf);
- wake_up(&queue->dealloc_wq);
spin_unlock_irqrestore(&queue->callback_lock, flags);
if (likely(zerocopy_success))
@@ -1566,13 +1680,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
smp_rmb();
while (dc != dp) {
- BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS);
+ BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
pending_idx =
queue->dealloc_ring[pending_index(dc++)];
- pending_idx_release[gop-queue->tx_unmap_ops] =
+ pending_idx_release[gop - queue->tx_unmap_ops] =
pending_idx;
- queue->pages_to_unmap[gop-queue->tx_unmap_ops] =
+ queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
queue->mmap_pages[pending_idx];
gnttab_set_unmap_op(gop,
idx_to_kaddr(queue, pending_idx),
@@ -1835,8 +1949,7 @@ static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
- return !queue->stalled
- && prod - cons < XEN_NETBK_RX_SLOTS_MAX
+ return !queue->stalled && prod - cons < 1
&& time_after(jiffies,
queue->last_rx_time + queue->vif->stall_timeout);
}
@@ -1848,14 +1961,13 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
- return queue->stalled
- && prod - cons >= XEN_NETBK_RX_SLOTS_MAX;
+ return queue->stalled && prod - cons >= 1;
}
static bool xenvif_have_rx_work(struct xenvif_queue *queue)
{
return (!skb_queue_empty(&queue->rx_queue)
- && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
+ && xenvif_rx_ring_slots_available(queue))
|| (queue->vif->stall_timeout &&
(xenvif_rx_queue_stalled(queue)
|| xenvif_rx_queue_ready(queue)))
@@ -2002,8 +2114,11 @@ static int __init netback_init(void)
if (!xen_domain())
return -ENODEV;
- /* Allow as many queues as there are CPUs, by default */
- xenvif_max_queues = num_online_cpus();
+ /* Allow as many queues as there are CPUs if user has not
+ * specified a value.
+ */
+ if (xenvif_max_queues == 0)
+ xenvif_max_queues = num_online_cpus();
if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index ec383b0f5443..929a6e7e5ecf 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -327,6 +327,14 @@ static int netback_probe(struct xenbus_device *dev,
goto abort_transaction;
}
+ /* We support multicast-control. */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-multicast-control", "%d", 1);
+ if (err) {
+ message = "writing feature-multicast-control";
+ goto abort_transaction;
+ }
+
err = xenbus_transaction_end(xbt, 0);
} while (err == -EAGAIN);
@@ -1016,6 +1024,11 @@ static int read_xenbus_vif_flags(struct backend_info *be)
val = 0;
vif->ipv6_csum = !!val;
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "request-multicast-control",
+ "%d", &val) < 0)
+ val = 0;
+ vif->multicast_control = !!val;
+
return 0;
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index f948c46d5132..f821a97d7827 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -291,7 +291,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
struct sk_buff *skb;
unsigned short id;
grant_ref_t ref;
- unsigned long pfn;
+ unsigned long gfn;
struct xen_netif_rx_request *req;
skb = xennet_alloc_one_rx_buffer(queue);
@@ -307,12 +307,12 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
BUG_ON((signed short)ref < 0);
queue->grant_rx_ref[id] = ref;
- pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
+ gfn = xen_page_to_gfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
req = RING_GET_REQUEST(&queue->rx, req_prod);
gnttab_grant_foreign_access_ref(ref,
queue->info->xbdev->otherend_id,
- pfn_to_mfn(pfn),
+ gfn,
0);
req->id = id;
@@ -430,8 +430,10 @@ static struct xen_netif_tx_request *xennet_make_one_txreq(
ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
BUG_ON((signed short)ref < 0);
- gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
- page_to_mfn(page), GNTMAP_readonly);
+ gnttab_grant_foreign_access_ref(ref,
+ queue->info->xbdev->otherend_id,
+ xen_page_to_gfn(page),
+ GNTMAP_readonly);
queue->tx_skbs[id].skb = skb;
queue->grant_tx_page[id] = page;
@@ -1336,7 +1338,7 @@ static void xennet_disconnect_backend(struct netfront_info *info)
netif_carrier_off(info->netdev);
- for (i = 0; i < num_queues; ++i) {
+ for (i = 0; i < num_queues && info->queues; ++i) {
struct netfront_queue *queue = &info->queues[i];
if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
@@ -1348,7 +1350,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
queue->tx_evtchn = queue->rx_evtchn = 0;
queue->tx_irq = queue->rx_irq = 0;
- napi_synchronize(&queue->napi);
+ if (netif_running(info->netdev))
+ napi_synchronize(&queue->napi);
xennet_release_tx_bufs(queue);
xennet_release_rx_bufs(queue);
@@ -2101,7 +2104,8 @@ static int xennet_remove(struct xenbus_device *dev)
unregister_netdev(info->netdev);
- xennet_destroy_queues(info);
+ if (info->queues)
+ xennet_destroy_queues(info);
xennet_free_netdev(info->netdev);
return 0;
@@ -2130,8 +2134,11 @@ static int __init netif_init(void)
pr_info("Initialising Xen virtual ethernet driver\n");
- /* Allow as many queues as there are CPUs, by default */
- xennet_max_queues = num_online_cpus();
+ /* Allow as many queues as there are CPUs if user has not
+ * specified a value.
+ */
+ if (xennet_max_queues == 0)
+ xennet_max_queues = num_online_cpus();
return xenbus_register_frontend(&netfront_driver);
}
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 722673cb785b..6639cd1cae36 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -74,4 +74,5 @@ source "drivers/nfc/nfcmrvl/Kconfig"
source "drivers/nfc/st21nfca/Kconfig"
source "drivers/nfc/st-nci/Kconfig"
source "drivers/nfc/nxp-nci/Kconfig"
+source "drivers/nfc/s3fwrn5/Kconfig"
endmenu
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index 368b6dfe71b3..2757fe1b8aa5 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_NFC_TRF7970A) += trf7970a.o
obj-$(CONFIG_NFC_ST21NFCA) += st21nfca/
obj-$(CONFIG_NFC_ST_NCI) += st-nci/
obj-$(CONFIG_NFC_NXP_NCI) += nxp-nci/
+obj-$(CONFIG_NFC_S3FWRN5) += s3fwrn5/
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 2b77ccf77f81..754a9bb0f58d 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -355,7 +355,8 @@ static int nfc_mei_phy_enable(void *phy_id)
goto err;
}
- r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy);
+ r = mei_cl_register_event_cb(phy->device, BIT(MEI_CL_EVENT_RX),
+ nfc_mei_event_cb, phy);
if (r) {
pr_err("Event cb registration failed %d\n", r);
goto err;
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 2f77f1d03638..fac80c691914 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -318,19 +318,15 @@ static int nxp_nci_i2c_acpi_config(struct nxp_nci_i2c_phy *phy)
struct i2c_client *client = phy->i2c_dev;
struct gpio_desc *gpiod_en, *gpiod_fw, *gpiod_irq;
- gpiod_en = devm_gpiod_get_index(&client->dev, NULL, 2);
- gpiod_fw = devm_gpiod_get_index(&client->dev, NULL, 1);
- gpiod_irq = devm_gpiod_get_index(&client->dev, NULL, 0);
+ gpiod_en = devm_gpiod_get_index(&client->dev, NULL, 2, GPIOD_OUT_LOW);
+ gpiod_fw = devm_gpiod_get_index(&client->dev, NULL, 1, GPIOD_OUT_LOW);
+ gpiod_irq = devm_gpiod_get_index(&client->dev, NULL, 0, GPIOD_IN);
if (IS_ERR(gpiod_en) || IS_ERR(gpiod_fw) || IS_ERR(gpiod_irq)) {
nfc_err(&client->dev, "No GPIOs\n");
return -EINVAL;
}
- gpiod_direction_output(gpiod_en, 0);
- gpiod_direction_output(gpiod_fw, 0);
- gpiod_direction_input(gpiod_irq);
-
client->irq = gpiod_to_irq(gpiod_irq);
if (client->irq < 0) {
nfc_err(&client->dev, "No IRQ\n");
diff --git a/drivers/nfc/s3fwrn5/Kconfig b/drivers/nfc/s3fwrn5/Kconfig
new file mode 100644
index 000000000000..7e3b255b3f99
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/Kconfig
@@ -0,0 +1,19 @@
+config NFC_S3FWRN5
+ tristate
+ ---help---
+ Core driver for Samsung S3FWRN5 NFC chip. Contains core utilities
+ of chip. It's intended to be used by PHYs to avoid duplicating lots
+ of common code.
+
+config NFC_S3FWRN5_I2C
+ tristate "Samsung S3FWRN5 I2C support"
+ depends on NFC_NCI && I2C
+ select NFC_S3FWRN5
+ default n
+ ---help---
+ This module adds support for an I2C interface to the S3FWRN5 chip.
+ Select this if your platform is using the I2C bus.
+
+ To compile this driver as a module, choose m here. The module will
+ be called s3fwrn5_i2c.ko.
+ Say N if unsure.
diff --git a/drivers/nfc/s3fwrn5/Makefile b/drivers/nfc/s3fwrn5/Makefile
new file mode 100644
index 000000000000..3381c34faf62
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for Samsung S3FWRN5 NFC driver
+#
+
+s3fwrn5-objs = core.o firmware.o nci.o
+s3fwrn5_i2c-objs = i2c.o
+
+obj-$(CONFIG_NFC_S3FWRN5) += s3fwrn5.o
+obj-$(CONFIG_NFC_S3FWRN5_I2C) += s3fwrn5_i2c.o
+
+ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
new file mode 100644
index 000000000000..0d866ca295e3
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/core.c
@@ -0,0 +1,219 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <net/nfc/nci_core.h>
+
+#include "s3fwrn5.h"
+#include "firmware.h"
+#include "nci.h"
+
+#define S3FWRN5_NFC_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \
+ NFC_PROTO_MIFARE_MASK | \
+ NFC_PROTO_FELICA_MASK | \
+ NFC_PROTO_ISO14443_MASK | \
+ NFC_PROTO_ISO14443_B_MASK | \
+ NFC_PROTO_ISO15693_MASK)
+
+static int s3fwrn5_firmware_update(struct s3fwrn5_info *info)
+{
+ bool need_update;
+ int ret;
+
+ s3fwrn5_fw_init(&info->fw_info, "sec_s3fwrn5_firmware.bin");
+
+ /* Update firmware */
+
+ s3fwrn5_set_wake(info, false);
+ s3fwrn5_set_mode(info, S3FWRN5_MODE_FW);
+
+ ret = s3fwrn5_fw_setup(&info->fw_info);
+ if (ret < 0)
+ return ret;
+
+ need_update = s3fwrn5_fw_check_version(&info->fw_info,
+ info->ndev->manufact_specific_info);
+ if (!need_update)
+ goto out;
+
+ dev_info(&info->ndev->nfc_dev->dev, "Detected new firmware version\n");
+
+ ret = s3fwrn5_fw_download(&info->fw_info);
+ if (ret < 0)
+ goto out;
+
+ /* Update RF configuration */
+
+ s3fwrn5_set_mode(info, S3FWRN5_MODE_NCI);
+
+ s3fwrn5_set_wake(info, true);
+ ret = s3fwrn5_nci_rf_configure(info, "sec_s3fwrn5_rfreg.bin");
+ s3fwrn5_set_wake(info, false);
+
+out:
+ s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD);
+ s3fwrn5_fw_cleanup(&info->fw_info);
+ return ret;
+}
+
+static int s3fwrn5_nci_open(struct nci_dev *ndev)
+{
+ struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+
+ if (s3fwrn5_get_mode(info) != S3FWRN5_MODE_COLD)
+ return -EBUSY;
+
+ s3fwrn5_set_mode(info, S3FWRN5_MODE_NCI);
+ s3fwrn5_set_wake(info, true);
+
+ return 0;
+}
+
+static int s3fwrn5_nci_close(struct nci_dev *ndev)
+{
+ struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+
+ s3fwrn5_set_wake(info, false);
+ s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD);
+
+ return 0;
+}
+
+static int s3fwrn5_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+ int ret;
+
+ mutex_lock(&info->mutex);
+
+ if (s3fwrn5_get_mode(info) != S3FWRN5_MODE_NCI) {
+ mutex_unlock(&info->mutex);
+ return -EINVAL;
+ }
+
+ ret = s3fwrn5_write(info, skb);
+ if (ret < 0)
+ kfree_skb(skb);
+
+ mutex_unlock(&info->mutex);
+ return ret;
+}
+
+static int s3fwrn5_nci_post_setup(struct nci_dev *ndev)
+{
+ struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+ int ret;
+
+ ret = s3fwrn5_firmware_update(info);
+ if (ret < 0)
+ goto out;
+
+ /* NCI core reset */
+
+ s3fwrn5_set_mode(info, S3FWRN5_MODE_NCI);
+ s3fwrn5_set_wake(info, true);
+
+ ret = nci_core_reset(info->ndev);
+ if (ret < 0)
+ goto out;
+
+ ret = nci_core_init(info->ndev);
+
+out:
+ return ret;
+}
+
+static struct nci_ops s3fwrn5_nci_ops = {
+ .open = s3fwrn5_nci_open,
+ .close = s3fwrn5_nci_close,
+ .send = s3fwrn5_nci_send,
+ .post_setup = s3fwrn5_nci_post_setup,
+};
+
+int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev,
+ struct s3fwrn5_phy_ops *phy_ops, unsigned int max_payload)
+{
+ struct s3fwrn5_info *info;
+ int ret;
+
+ info = devm_kzalloc(pdev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->phy_id = phy_id;
+ info->pdev = pdev;
+ info->phy_ops = phy_ops;
+ info->max_payload = max_payload;
+ mutex_init(&info->mutex);
+
+ s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD);
+
+ s3fwrn5_nci_get_prop_ops(&s3fwrn5_nci_ops.prop_ops,
+ &s3fwrn5_nci_ops.n_prop_ops);
+
+ info->ndev = nci_allocate_device(&s3fwrn5_nci_ops,
+ S3FWRN5_NFC_PROTOCOLS, 0, 0);
+ if (!info->ndev)
+ return -ENOMEM;
+
+ nci_set_parent_dev(info->ndev, pdev);
+ nci_set_drvdata(info->ndev, info);
+
+ ret = nci_register_device(info->ndev);
+ if (ret < 0) {
+ nci_free_device(info->ndev);
+ return ret;
+ }
+
+ info->fw_info.ndev = info->ndev;
+
+ *ndev = info->ndev;
+
+ return ret;
+}
+EXPORT_SYMBOL(s3fwrn5_probe);
+
+void s3fwrn5_remove(struct nci_dev *ndev)
+{
+ struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+
+ s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD);
+
+ nci_unregister_device(ndev);
+ nci_free_device(ndev);
+}
+EXPORT_SYMBOL(s3fwrn5_remove);
+
+int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb,
+ enum s3fwrn5_mode mode)
+{
+ switch (mode) {
+ case S3FWRN5_MODE_NCI:
+ return nci_recv_frame(ndev, skb);
+ case S3FWRN5_MODE_FW:
+ return s3fwrn5_fw_recv_frame(ndev, skb);
+ default:
+ return -ENODEV;
+ }
+}
+EXPORT_SYMBOL(s3fwrn5_recv_frame);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Samsung S3FWRN5 NFC driver");
+MODULE_AUTHOR("Robert Baldyga <r.baldyga@samsung.com>");
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
new file mode 100644
index 000000000000..64a90252c57f
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -0,0 +1,511 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/crypto.h>
+#include <crypto/sha.h>
+
+#include "s3fwrn5.h"
+#include "firmware.h"
+
+struct s3fwrn5_fw_version {
+ __u8 major;
+ __u8 build1;
+ __u8 build2;
+ __u8 target;
+};
+
+static int s3fwrn5_fw_send_msg(struct s3fwrn5_fw_info *fw_info,
+ struct sk_buff *msg, struct sk_buff **rsp)
+{
+ struct s3fwrn5_info *info =
+ container_of(fw_info, struct s3fwrn5_info, fw_info);
+ long ret;
+
+ reinit_completion(&fw_info->completion);
+
+ ret = s3fwrn5_write(info, msg);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_interruptible_timeout(
+ &fw_info->completion, msecs_to_jiffies(1000));
+ if (ret < 0)
+ return ret;
+ else if (ret == 0)
+ return -ENXIO;
+
+ if (!fw_info->rsp)
+ return -EINVAL;
+
+ *rsp = fw_info->rsp;
+ fw_info->rsp = NULL;
+
+ return 0;
+}
+
+static int s3fwrn5_fw_prep_msg(struct s3fwrn5_fw_info *fw_info,
+ struct sk_buff **msg, u8 type, u8 code, const void *data, u16 len)
+{
+ struct s3fwrn5_fw_header hdr;
+ struct sk_buff *skb;
+
+ hdr.type = type | fw_info->parity;
+ fw_info->parity ^= 0x80;
+ hdr.code = code;
+ hdr.len = len;
+
+ skb = alloc_skb(S3FWRN5_FW_HDR_SIZE + len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ memcpy(skb_put(skb, S3FWRN5_FW_HDR_SIZE), &hdr, S3FWRN5_FW_HDR_SIZE);
+ if (len)
+ memcpy(skb_put(skb, len), data, len);
+
+ *msg = skb;
+
+ return 0;
+}
+
+static int s3fwrn5_fw_get_bootinfo(struct s3fwrn5_fw_info *fw_info,
+ struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo)
+{
+ struct sk_buff *msg, *rsp = NULL;
+ struct s3fwrn5_fw_header *hdr;
+ int ret;
+
+ /* Send GET_BOOTINFO command */
+
+ ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_CMD,
+ S3FWRN5_FW_CMD_GET_BOOTINFO, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+ kfree_skb(msg);
+ if (ret < 0)
+ return ret;
+
+ hdr = (struct s3fwrn5_fw_header *) rsp->data;
+ if (hdr->code != S3FWRN5_FW_RET_SUCCESS) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(bootinfo, rsp->data + S3FWRN5_FW_HDR_SIZE, 10);
+
+out:
+ kfree_skb(rsp);
+ return ret;
+}
+
+static int s3fwrn5_fw_enter_update_mode(struct s3fwrn5_fw_info *fw_info,
+ const void *hash_data, u16 hash_size,
+ const void *sig_data, u16 sig_size)
+{
+ struct s3fwrn5_fw_cmd_enter_updatemode args;
+ struct sk_buff *msg, *rsp = NULL;
+ struct s3fwrn5_fw_header *hdr;
+ int ret;
+
+ /* Send ENTER_UPDATE_MODE command */
+
+ args.hashcode_size = hash_size;
+ args.signature_size = sig_size;
+
+ ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_CMD,
+ S3FWRN5_FW_CMD_ENTER_UPDATE_MODE, &args, sizeof(args));
+ if (ret < 0)
+ return ret;
+
+ ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+ kfree_skb(msg);
+ if (ret < 0)
+ return ret;
+
+ hdr = (struct s3fwrn5_fw_header *) rsp->data;
+ if (hdr->code != S3FWRN5_FW_RET_SUCCESS) {
+ ret = -EPROTO;
+ goto out;
+ }
+
+ kfree_skb(rsp);
+
+ /* Send hashcode data */
+
+ ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_DATA, 0,
+ hash_data, hash_size);
+ if (ret < 0)
+ return ret;
+
+ ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+ kfree_skb(msg);
+ if (ret < 0)
+ return ret;
+
+ hdr = (struct s3fwrn5_fw_header *) rsp->data;
+ if (hdr->code != S3FWRN5_FW_RET_SUCCESS) {
+ ret = -EPROTO;
+ goto out;
+ }
+
+ kfree_skb(rsp);
+
+ /* Send signature data */
+
+ ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_DATA, 0,
+ sig_data, sig_size);
+ if (ret < 0)
+ return ret;
+
+ ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+ kfree_skb(msg);
+ if (ret < 0)
+ return ret;
+
+ hdr = (struct s3fwrn5_fw_header *) rsp->data;
+ if (hdr->code != S3FWRN5_FW_RET_SUCCESS)
+ ret = -EPROTO;
+
+out:
+ kfree_skb(rsp);
+ return ret;
+}
+
+static int s3fwrn5_fw_update_sector(struct s3fwrn5_fw_info *fw_info,
+ u32 base_addr, const void *data)
+{
+ struct s3fwrn5_fw_cmd_update_sector args;
+ struct sk_buff *msg, *rsp = NULL;
+ struct s3fwrn5_fw_header *hdr;
+ int ret, i;
+
+ /* Send UPDATE_SECTOR command */
+
+ args.base_address = base_addr;
+
+ ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_CMD,
+ S3FWRN5_FW_CMD_UPDATE_SECTOR, &args, sizeof(args));
+ if (ret < 0)
+ return ret;
+
+ ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+ kfree_skb(msg);
+ if (ret < 0)
+ return ret;
+
+ hdr = (struct s3fwrn5_fw_header *) rsp->data;
+ if (hdr->code != S3FWRN5_FW_RET_SUCCESS) {
+ ret = -EPROTO;
+ goto err;
+ }
+
+ kfree_skb(rsp);
+
+ /* Send data split into 256-byte packets */
+
+ for (i = 0; i < 16; ++i) {
+ ret = s3fwrn5_fw_prep_msg(fw_info, &msg,
+ S3FWRN5_FW_MSG_DATA, 0, data+256*i, 256);
+ if (ret < 0)
+ break;
+
+ ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+ kfree_skb(msg);
+ if (ret < 0)
+ break;
+
+ hdr = (struct s3fwrn5_fw_header *) rsp->data;
+ if (hdr->code != S3FWRN5_FW_RET_SUCCESS) {
+ ret = -EPROTO;
+ goto err;
+ }
+
+ kfree_skb(rsp);
+ }
+
+ return ret;
+
+err:
+ kfree_skb(rsp);
+ return ret;
+}
+
+static int s3fwrn5_fw_complete_update_mode(struct s3fwrn5_fw_info *fw_info)
+{
+ struct sk_buff *msg, *rsp = NULL;
+ struct s3fwrn5_fw_header *hdr;
+ int ret;
+
+ /* Send COMPLETE_UPDATE_MODE command */
+
+ ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_CMD,
+ S3FWRN5_FW_CMD_COMPLETE_UPDATE_MODE, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+ kfree_skb(msg);
+ if (ret < 0)
+ return ret;
+
+ hdr = (struct s3fwrn5_fw_header *) rsp->data;
+ if (hdr->code != S3FWRN5_FW_RET_SUCCESS)
+ ret = -EPROTO;
+
+ kfree_skb(rsp);
+
+ return ret;
+}
+
+/*
+ * Firmware header stucture:
+ *
+ * 0x00 - 0x0B : Date and time string (w/o NUL termination)
+ * 0x10 - 0x13 : Firmware version
+ * 0x14 - 0x17 : Signature address
+ * 0x18 - 0x1B : Signature size
+ * 0x1C - 0x1F : Firmware image address
+ * 0x20 - 0x23 : Firmware sectors count
+ * 0x24 - 0x27 : Custom signature address
+ * 0x28 - 0x2B : Custom signature size
+ */
+
+#define S3FWRN5_FW_IMAGE_HEADER_SIZE 44
+
+static int s3fwrn5_fw_request_firmware(struct s3fwrn5_fw_info *fw_info)
+{
+ struct s3fwrn5_fw_image *fw = &fw_info->fw;
+ u32 sig_off;
+ u32 image_off;
+ u32 custom_sig_off;
+ int ret;
+
+ ret = request_firmware(&fw->fw, fw_info->fw_name,
+ &fw_info->ndev->nfc_dev->dev);
+ if (ret < 0)
+ return ret;
+
+ if (fw->fw->size < S3FWRN5_FW_IMAGE_HEADER_SIZE)
+ return -EINVAL;
+
+ memcpy(fw->date, fw->fw->data + 0x00, 12);
+ fw->date[12] = '\0';
+
+ memcpy(&fw->version, fw->fw->data + 0x10, 4);
+
+ memcpy(&sig_off, fw->fw->data + 0x14, 4);
+ fw->sig = fw->fw->data + sig_off;
+ memcpy(&fw->sig_size, fw->fw->data + 0x18, 4);
+
+ memcpy(&image_off, fw->fw->data + 0x1C, 4);
+ fw->image = fw->fw->data + image_off;
+ memcpy(&fw->image_sectors, fw->fw->data + 0x20, 4);
+
+ memcpy(&custom_sig_off, fw->fw->data + 0x24, 4);
+ fw->custom_sig = fw->fw->data + custom_sig_off;
+ memcpy(&fw->custom_sig_size, fw->fw->data + 0x28, 4);
+
+ return 0;
+}
+
+static void s3fwrn5_fw_release_firmware(struct s3fwrn5_fw_info *fw_info)
+{
+ release_firmware(fw_info->fw.fw);
+}
+
+static int s3fwrn5_fw_get_base_addr(
+ struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo, u32 *base_addr)
+{
+ int i;
+ struct {
+ u8 version[4];
+ u32 base_addr;
+ } match[] = {
+ {{0x05, 0x00, 0x00, 0x00}, 0x00005000},
+ {{0x05, 0x00, 0x00, 0x01}, 0x00003000},
+ {{0x05, 0x00, 0x00, 0x02}, 0x00003000},
+ {{0x05, 0x00, 0x00, 0x03}, 0x00003000},
+ {{0x05, 0x00, 0x00, 0x05}, 0x00003000}
+ };
+
+ for (i = 0; i < ARRAY_SIZE(match); ++i)
+ if (bootinfo->hw_version[0] == match[i].version[0] &&
+ bootinfo->hw_version[1] == match[i].version[1] &&
+ bootinfo->hw_version[3] == match[i].version[3]) {
+ *base_addr = match[i].base_addr;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline bool
+s3fwrn5_fw_is_custom(struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo)
+{
+ return !!bootinfo->hw_version[2];
+}
+
+int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info)
+{
+ struct s3fwrn5_fw_cmd_get_bootinfo_rsp bootinfo;
+ int ret;
+
+ /* Get firmware data */
+
+ ret = s3fwrn5_fw_request_firmware(fw_info);
+ if (ret < 0) {
+ dev_err(&fw_info->ndev->nfc_dev->dev,
+ "Failed to get fw file, ret=%02x\n", ret);
+ return ret;
+ }
+
+ /* Get bootloader info */
+
+ ret = s3fwrn5_fw_get_bootinfo(fw_info, &bootinfo);
+ if (ret < 0) {
+ dev_err(&fw_info->ndev->nfc_dev->dev,
+ "Failed to get bootinfo, ret=%02x\n", ret);
+ goto err;
+ }
+
+ /* Match hardware version to obtain firmware base address */
+
+ ret = s3fwrn5_fw_get_base_addr(&bootinfo, &fw_info->base_addr);
+ if (ret < 0) {
+ dev_err(&fw_info->ndev->nfc_dev->dev,
+ "Unknown hardware version\n");
+ goto err;
+ }
+
+ fw_info->sector_size = bootinfo.sector_size;
+
+ fw_info->sig_size = s3fwrn5_fw_is_custom(&bootinfo) ?
+ fw_info->fw.custom_sig_size : fw_info->fw.sig_size;
+ fw_info->sig = s3fwrn5_fw_is_custom(&bootinfo) ?
+ fw_info->fw.custom_sig : fw_info->fw.sig;
+
+ return 0;
+
+err:
+ s3fwrn5_fw_release_firmware(fw_info);
+ return ret;
+}
+
+bool s3fwrn5_fw_check_version(struct s3fwrn5_fw_info *fw_info, u32 version)
+{
+ struct s3fwrn5_fw_version *new = (void *) &fw_info->fw.version;
+ struct s3fwrn5_fw_version *old = (void *) &version;
+
+ if (new->major > old->major)
+ return true;
+ if (new->build1 > old->build1)
+ return true;
+ if (new->build2 > old->build2)
+ return true;
+
+ return false;
+}
+
+int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
+{
+ struct s3fwrn5_fw_image *fw = &fw_info->fw;
+ u8 hash_data[SHA1_DIGEST_SIZE];
+ struct scatterlist sg;
+ struct hash_desc desc;
+ u32 image_size, off;
+ int ret;
+
+ image_size = fw_info->sector_size * fw->image_sectors;
+
+ /* Compute SHA of firmware data */
+
+ sg_init_one(&sg, fw->image, image_size);
+ desc.tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
+ crypto_hash_init(&desc);
+ crypto_hash_update(&desc, &sg, image_size);
+ crypto_hash_final(&desc, hash_data);
+ crypto_free_hash(desc.tfm);
+
+ /* Firmware update process */
+
+ dev_info(&fw_info->ndev->nfc_dev->dev,
+ "Firmware update: %s\n", fw_info->fw_name);
+
+ ret = s3fwrn5_fw_enter_update_mode(fw_info, hash_data,
+ SHA1_DIGEST_SIZE, fw_info->sig, fw_info->sig_size);
+ if (ret < 0) {
+ dev_err(&fw_info->ndev->nfc_dev->dev,
+ "Unable to enter update mode\n");
+ goto out;
+ }
+
+ for (off = 0; off < image_size; off += fw_info->sector_size) {
+ ret = s3fwrn5_fw_update_sector(fw_info,
+ fw_info->base_addr + off, fw->image + off);
+ if (ret < 0) {
+ dev_err(&fw_info->ndev->nfc_dev->dev,
+ "Firmware update error (code=%d)\n", ret);
+ goto out;
+ }
+ }
+
+ ret = s3fwrn5_fw_complete_update_mode(fw_info);
+ if (ret < 0) {
+ dev_err(&fw_info->ndev->nfc_dev->dev,
+ "Unable to complete update mode\n");
+ goto out;
+ }
+
+ dev_info(&fw_info->ndev->nfc_dev->dev,
+ "Firmware update: success\n");
+
+out:
+ return ret;
+}
+
+void s3fwrn5_fw_init(struct s3fwrn5_fw_info *fw_info, const char *fw_name)
+{
+ fw_info->parity = 0x00;
+ fw_info->rsp = NULL;
+ fw_info->fw.fw = NULL;
+ strcpy(fw_info->fw_name, fw_name);
+ init_completion(&fw_info->completion);
+}
+
+void s3fwrn5_fw_cleanup(struct s3fwrn5_fw_info *fw_info)
+{
+ s3fwrn5_fw_release_firmware(fw_info);
+}
+
+int s3fwrn5_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+ struct s3fwrn5_fw_info *fw_info = &info->fw_info;
+
+ BUG_ON(fw_info->rsp);
+
+ fw_info->rsp = skb;
+
+ complete(&fw_info->completion);
+
+ return 0;
+}
diff --git a/drivers/nfc/s3fwrn5/firmware.h b/drivers/nfc/s3fwrn5/firmware.h
new file mode 100644
index 000000000000..1ec0647ab917
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/firmware.h
@@ -0,0 +1,111 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LOCAL_S3FWRN5_FIRMWARE_H_
+#define __LOCAL_S3FWRN5_FIRMWARE_H_
+
+/* FW Message Types */
+#define S3FWRN5_FW_MSG_CMD 0x00
+#define S3FWRN5_FW_MSG_RSP 0x01
+#define S3FWRN5_FW_MSG_DATA 0x02
+
+/* FW Return Codes */
+#define S3FWRN5_FW_RET_SUCCESS 0x00
+#define S3FWRN5_FW_RET_MESSAGE_TYPE_INVALID 0x01
+#define S3FWRN5_FW_RET_COMMAND_INVALID 0x02
+#define S3FWRN5_FW_RET_PAGE_DATA_OVERFLOW 0x03
+#define S3FWRN5_FW_RET_SECT_DATA_OVERFLOW 0x04
+#define S3FWRN5_FW_RET_AUTHENTICATION_FAIL 0x05
+#define S3FWRN5_FW_RET_FLASH_OPERATION_FAIL 0x06
+#define S3FWRN5_FW_RET_ADDRESS_OUT_OF_RANGE 0x07
+#define S3FWRN5_FW_RET_PARAMETER_INVALID 0x08
+
+/* ---- FW Packet structures ---- */
+#define S3FWRN5_FW_HDR_SIZE 4
+
+struct s3fwrn5_fw_header {
+ __u8 type;
+ __u8 code;
+ __u16 len;
+};
+
+#define S3FWRN5_FW_CMD_RESET 0x00
+
+#define S3FWRN5_FW_CMD_GET_BOOTINFO 0x01
+
+struct s3fwrn5_fw_cmd_get_bootinfo_rsp {
+ __u8 hw_version[4];
+ __u16 sector_size;
+ __u16 page_size;
+ __u16 frame_max_size;
+ __u16 hw_buffer_size;
+};
+
+#define S3FWRN5_FW_CMD_ENTER_UPDATE_MODE 0x02
+
+struct s3fwrn5_fw_cmd_enter_updatemode {
+ __u16 hashcode_size;
+ __u16 signature_size;
+};
+
+#define S3FWRN5_FW_CMD_UPDATE_SECTOR 0x04
+
+struct s3fwrn5_fw_cmd_update_sector {
+ __u32 base_address;
+};
+
+#define S3FWRN5_FW_CMD_COMPLETE_UPDATE_MODE 0x05
+
+struct s3fwrn5_fw_image {
+ const struct firmware *fw;
+
+ char date[13];
+ u32 version;
+ const void *sig;
+ u32 sig_size;
+ const void *image;
+ u32 image_sectors;
+ const void *custom_sig;
+ u32 custom_sig_size;
+};
+
+struct s3fwrn5_fw_info {
+ struct nci_dev *ndev;
+ struct s3fwrn5_fw_image fw;
+ char fw_name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
+
+ const void *sig;
+ u32 sig_size;
+ u32 sector_size;
+ u32 base_addr;
+
+ struct completion completion;
+ struct sk_buff *rsp;
+ char parity;
+};
+
+void s3fwrn5_fw_init(struct s3fwrn5_fw_info *fw_info, const char *fw_name);
+int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info);
+bool s3fwrn5_fw_check_version(struct s3fwrn5_fw_info *fw_info, u32 version);
+int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info);
+void s3fwrn5_fw_cleanup(struct s3fwrn5_fw_info *fw_info);
+
+int s3fwrn5_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
+
+#endif /* __LOCAL_S3FWRN5_FIRMWARE_H_ */
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
new file mode 100644
index 000000000000..b4dd7dd47473
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -0,0 +1,306 @@
+/*
+ * I2C Link Layer for Samsung S3FWRN5 NCI based Driver
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/module.h>
+
+#include <net/nfc/nfc.h>
+
+#include "s3fwrn5.h"
+
+#define S3FWRN5_I2C_DRIVER_NAME "s3fwrn5_i2c"
+
+#define S3FWRN5_I2C_MAX_PAYLOAD 32
+#define S3FWRN5_EN_WAIT_TIME 150
+
+struct s3fwrn5_i2c_phy {
+ struct i2c_client *i2c_dev;
+ struct nci_dev *ndev;
+
+ unsigned int gpio_en;
+ unsigned int gpio_fw_wake;
+
+ struct mutex mutex;
+
+ enum s3fwrn5_mode mode;
+ unsigned int irq_skip:1;
+};
+
+static void s3fwrn5_i2c_set_wake(void *phy_id, bool wake)
+{
+ struct s3fwrn5_i2c_phy *phy = phy_id;
+
+ mutex_lock(&phy->mutex);
+ gpio_set_value(phy->gpio_fw_wake, wake);
+ msleep(S3FWRN5_EN_WAIT_TIME/2);
+ mutex_unlock(&phy->mutex);
+}
+
+static void s3fwrn5_i2c_set_mode(void *phy_id, enum s3fwrn5_mode mode)
+{
+ struct s3fwrn5_i2c_phy *phy = phy_id;
+
+ mutex_lock(&phy->mutex);
+
+ if (phy->mode == mode)
+ goto out;
+
+ phy->mode = mode;
+
+ gpio_set_value(phy->gpio_en, 1);
+ gpio_set_value(phy->gpio_fw_wake, 0);
+ if (mode == S3FWRN5_MODE_FW)
+ gpio_set_value(phy->gpio_fw_wake, 1);
+
+ if (mode != S3FWRN5_MODE_COLD) {
+ msleep(S3FWRN5_EN_WAIT_TIME);
+ gpio_set_value(phy->gpio_en, 0);
+ msleep(S3FWRN5_EN_WAIT_TIME/2);
+ }
+
+ phy->irq_skip = true;
+
+out:
+ mutex_unlock(&phy->mutex);
+}
+
+static enum s3fwrn5_mode s3fwrn5_i2c_get_mode(void *phy_id)
+{
+ struct s3fwrn5_i2c_phy *phy = phy_id;
+ enum s3fwrn5_mode mode;
+
+ mutex_lock(&phy->mutex);
+
+ mode = phy->mode;
+
+ mutex_unlock(&phy->mutex);
+
+ return mode;
+}
+
+static int s3fwrn5_i2c_write(void *phy_id, struct sk_buff *skb)
+{
+ struct s3fwrn5_i2c_phy *phy = phy_id;
+ int ret;
+
+ mutex_lock(&phy->mutex);
+
+ phy->irq_skip = false;
+
+ ret = i2c_master_send(phy->i2c_dev, skb->data, skb->len);
+ if (ret == -EREMOTEIO) {
+ /* Retry, chip was in standby */
+ usleep_range(110000, 120000);
+ ret = i2c_master_send(phy->i2c_dev, skb->data, skb->len);
+ }
+
+ mutex_unlock(&phy->mutex);
+
+ if (ret < 0)
+ return ret;
+
+ if (ret != skb->len)
+ return -EREMOTEIO;
+
+ return 0;
+}
+
+static struct s3fwrn5_phy_ops i2c_phy_ops = {
+ .set_wake = s3fwrn5_i2c_set_wake,
+ .set_mode = s3fwrn5_i2c_set_mode,
+ .get_mode = s3fwrn5_i2c_get_mode,
+ .write = s3fwrn5_i2c_write,
+};
+
+static int s3fwrn5_i2c_read(struct s3fwrn5_i2c_phy *phy)
+{
+ struct sk_buff *skb;
+ size_t hdr_size;
+ size_t data_len;
+ char hdr[4];
+ int ret;
+
+ hdr_size = (phy->mode == S3FWRN5_MODE_NCI) ?
+ NCI_CTRL_HDR_SIZE : S3FWRN5_FW_HDR_SIZE;
+ ret = i2c_master_recv(phy->i2c_dev, hdr, hdr_size);
+ if (ret < 0)
+ return ret;
+
+ if (ret < hdr_size)
+ return -EBADMSG;
+
+ data_len = (phy->mode == S3FWRN5_MODE_NCI) ?
+ ((struct nci_ctrl_hdr *)hdr)->plen :
+ ((struct s3fwrn5_fw_header *)hdr)->len;
+
+ skb = alloc_skb(hdr_size + data_len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ memcpy(skb_put(skb, hdr_size), hdr, hdr_size);
+
+ if (data_len == 0)
+ goto out;
+
+ ret = i2c_master_recv(phy->i2c_dev, skb_put(skb, data_len), data_len);
+ if (ret != data_len) {
+ kfree_skb(skb);
+ return -EBADMSG;
+ }
+
+out:
+ return s3fwrn5_recv_frame(phy->ndev, skb, phy->mode);
+}
+
+static irqreturn_t s3fwrn5_i2c_irq_thread_fn(int irq, void *phy_id)
+{
+ struct s3fwrn5_i2c_phy *phy = phy_id;
+ int ret = 0;
+
+ if (!phy || !phy->ndev) {
+ WARN_ON_ONCE(1);
+ return IRQ_NONE;
+ }
+
+ mutex_lock(&phy->mutex);
+
+ if (phy->irq_skip)
+ goto out;
+
+ switch (phy->mode) {
+ case S3FWRN5_MODE_NCI:
+ case S3FWRN5_MODE_FW:
+ ret = s3fwrn5_i2c_read(phy);
+ break;
+ case S3FWRN5_MODE_COLD:
+ ret = -EREMOTEIO;
+ break;
+ }
+
+out:
+ mutex_unlock(&phy->mutex);
+
+ return IRQ_HANDLED;
+}
+
+static int s3fwrn5_i2c_parse_dt(struct i2c_client *client)
+{
+ struct s3fwrn5_i2c_phy *phy = i2c_get_clientdata(client);
+ struct device_node *np = client->dev.of_node;
+
+ if (!np)
+ return -ENODEV;
+
+ phy->gpio_en = of_get_named_gpio(np, "s3fwrn5,en-gpios", 0);
+ if (!gpio_is_valid(phy->gpio_en))
+ return -ENODEV;
+
+ phy->gpio_fw_wake = of_get_named_gpio(np, "s3fwrn5,fw-gpios", 0);
+ if (!gpio_is_valid(phy->gpio_fw_wake))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int s3fwrn5_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct s3fwrn5_i2c_phy *phy;
+ int ret;
+
+ phy = devm_kzalloc(&client->dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ mutex_init(&phy->mutex);
+ phy->mode = S3FWRN5_MODE_COLD;
+ phy->irq_skip = true;
+
+ phy->i2c_dev = client;
+ i2c_set_clientdata(client, phy);
+
+ ret = s3fwrn5_i2c_parse_dt(client);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_gpio_request_one(&phy->i2c_dev->dev, phy->gpio_en,
+ GPIOF_OUT_INIT_HIGH, "s3fwrn5_en");
+ if (ret < 0)
+ return ret;
+
+ ret = devm_gpio_request_one(&phy->i2c_dev->dev, phy->gpio_fw_wake,
+ GPIOF_OUT_INIT_LOW, "s3fwrn5_fw_wake");
+ if (ret < 0)
+ return ret;
+
+ ret = s3fwrn5_probe(&phy->ndev, phy, &phy->i2c_dev->dev, &i2c_phy_ops,
+ S3FWRN5_I2C_MAX_PAYLOAD);
+ if (ret < 0)
+ return ret;
+
+ ret = request_threaded_irq(phy->i2c_dev->irq, NULL,
+ s3fwrn5_i2c_irq_thread_fn, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ S3FWRN5_I2C_DRIVER_NAME, phy);
+ if (ret)
+ s3fwrn5_remove(phy->ndev);
+
+ return ret;
+}
+
+static int s3fwrn5_i2c_remove(struct i2c_client *client)
+{
+ struct s3fwrn5_i2c_phy *phy = i2c_get_clientdata(client);
+
+ s3fwrn5_remove(phy->ndev);
+
+ return 0;
+}
+
+static struct i2c_device_id s3fwrn5_i2c_id_table[] = {
+ {S3FWRN5_I2C_DRIVER_NAME, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, s3fwrn5_i2c_id_table);
+
+static const struct of_device_id of_s3fwrn5_i2c_match[] = {
+ { .compatible = "samsung,s3fwrn5-i2c", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_s3fwrn5_i2c_match);
+
+static struct i2c_driver s3fwrn5_i2c_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = S3FWRN5_I2C_DRIVER_NAME,
+ .of_match_table = of_match_ptr(of_s3fwrn5_i2c_match),
+ },
+ .probe = s3fwrn5_i2c_probe,
+ .remove = s3fwrn5_i2c_remove,
+ .id_table = s3fwrn5_i2c_id_table,
+};
+
+module_i2c_driver(s3fwrn5_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("I2C driver for Samsung S3FWRN5");
+MODULE_AUTHOR("Robert Baldyga <r.baldyga@samsung.com>");
diff --git a/drivers/nfc/s3fwrn5/nci.c b/drivers/nfc/s3fwrn5/nci.c
new file mode 100644
index 000000000000..ace0071c5339
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/nci.c
@@ -0,0 +1,165 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/completion.h>
+#include <linux/firmware.h>
+
+#include "s3fwrn5.h"
+#include "nci.h"
+
+static int s3fwrn5_nci_prop_rsp(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ __u8 status = skb->data[0];
+
+ nci_req_complete(ndev, status);
+ return 0;
+}
+
+static struct nci_prop_ops s3fwrn5_nci_prop_ops[] = {
+ {
+ .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+ NCI_PROP_AGAIN),
+ .rsp = s3fwrn5_nci_prop_rsp,
+ },
+ {
+ .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+ NCI_PROP_GET_RFREG),
+ .rsp = s3fwrn5_nci_prop_rsp,
+ },
+ {
+ .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+ NCI_PROP_SET_RFREG),
+ .rsp = s3fwrn5_nci_prop_rsp,
+ },
+ {
+ .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+ NCI_PROP_GET_RFREG_VER),
+ .rsp = s3fwrn5_nci_prop_rsp,
+ },
+ {
+ .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+ NCI_PROP_SET_RFREG_VER),
+ .rsp = s3fwrn5_nci_prop_rsp,
+ },
+ {
+ .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+ NCI_PROP_START_RFREG),
+ .rsp = s3fwrn5_nci_prop_rsp,
+ },
+ {
+ .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+ NCI_PROP_STOP_RFREG),
+ .rsp = s3fwrn5_nci_prop_rsp,
+ },
+ {
+ .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+ NCI_PROP_FW_CFG),
+ .rsp = s3fwrn5_nci_prop_rsp,
+ },
+ {
+ .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+ NCI_PROP_WR_RESET),
+ .rsp = s3fwrn5_nci_prop_rsp,
+ },
+};
+
+void s3fwrn5_nci_get_prop_ops(struct nci_prop_ops **ops, size_t *n)
+{
+ *ops = s3fwrn5_nci_prop_ops;
+ *n = ARRAY_SIZE(s3fwrn5_nci_prop_ops);
+}
+
+#define S3FWRN5_RFREG_SECTION_SIZE 252
+
+int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
+{
+ const struct firmware *fw;
+ struct nci_prop_fw_cfg_cmd fw_cfg;
+ struct nci_prop_set_rfreg_cmd set_rfreg;
+ struct nci_prop_stop_rfreg_cmd stop_rfreg;
+ u32 checksum;
+ int i, len;
+ int ret;
+
+ ret = request_firmware(&fw, fw_name, &info->ndev->nfc_dev->dev);
+ if (ret < 0)
+ return ret;
+
+ /* Compute rfreg checksum */
+
+ checksum = 0;
+ for (i = 0; i < fw->size; i += 4)
+ checksum += *((u32 *)(fw->data+i));
+
+ /* Set default clock configuration for external crystal */
+
+ fw_cfg.clk_type = 0x01;
+ fw_cfg.clk_speed = 0xff;
+ fw_cfg.clk_req = 0xff;
+ ret = nci_prop_cmd(info->ndev, NCI_PROP_FW_CFG,
+ sizeof(fw_cfg), (__u8 *)&fw_cfg);
+ if (ret < 0)
+ goto out;
+
+ /* Start rfreg configuration */
+
+ dev_info(&info->ndev->nfc_dev->dev,
+ "rfreg configuration update: %s\n", fw_name);
+
+ ret = nci_prop_cmd(info->ndev, NCI_PROP_START_RFREG, 0, NULL);
+ if (ret < 0) {
+ dev_err(&info->ndev->nfc_dev->dev,
+ "Unable to start rfreg update\n");
+ goto out;
+ }
+
+ /* Update rfreg */
+
+ set_rfreg.index = 0;
+ for (i = 0; i < fw->size; i += S3FWRN5_RFREG_SECTION_SIZE) {
+ len = (fw->size - i < S3FWRN5_RFREG_SECTION_SIZE) ?
+ (fw->size - i) : S3FWRN5_RFREG_SECTION_SIZE;
+ memcpy(set_rfreg.data, fw->data+i, len);
+ ret = nci_prop_cmd(info->ndev, NCI_PROP_SET_RFREG,
+ len+1, (__u8 *)&set_rfreg);
+ if (ret < 0) {
+ dev_err(&info->ndev->nfc_dev->dev,
+ "rfreg update error (code=%d)\n", ret);
+ goto out;
+ }
+ set_rfreg.index++;
+ }
+
+ /* Finish rfreg configuration */
+
+ stop_rfreg.checksum = checksum & 0xffff;
+ ret = nci_prop_cmd(info->ndev, NCI_PROP_STOP_RFREG,
+ sizeof(stop_rfreg), (__u8 *)&stop_rfreg);
+ if (ret < 0) {
+ dev_err(&info->ndev->nfc_dev->dev,
+ "Unable to stop rfreg update\n");
+ goto out;
+ }
+
+ dev_info(&info->ndev->nfc_dev->dev,
+ "rfreg configuration update: success\n");
+out:
+ release_firmware(fw);
+ return ret;
+}
diff --git a/drivers/nfc/s3fwrn5/nci.h b/drivers/nfc/s3fwrn5/nci.h
new file mode 100644
index 000000000000..0e68d439dde6
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/nci.h
@@ -0,0 +1,89 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LOCAL_S3FWRN5_NCI_H_
+#define __LOCAL_S3FWRN5_NCI_H_
+
+#include "s3fwrn5.h"
+
+#define NCI_PROP_AGAIN 0x01
+
+#define NCI_PROP_GET_RFREG 0x21
+#define NCI_PROP_SET_RFREG 0x22
+
+struct nci_prop_set_rfreg_cmd {
+ __u8 index;
+ __u8 data[252];
+};
+
+struct nci_prop_set_rfreg_rsp {
+ __u8 status;
+};
+
+#define NCI_PROP_GET_RFREG_VER 0x24
+
+struct nci_prop_get_rfreg_ver_rsp {
+ __u8 status;
+ __u8 data[8];
+};
+
+#define NCI_PROP_SET_RFREG_VER 0x25
+
+struct nci_prop_set_rfreg_ver_cmd {
+ __u8 data[8];
+};
+
+struct nci_prop_set_rfreg_ver_rsp {
+ __u8 status;
+};
+
+#define NCI_PROP_START_RFREG 0x26
+
+struct nci_prop_start_rfreg_rsp {
+ __u8 status;
+};
+
+#define NCI_PROP_STOP_RFREG 0x27
+
+struct nci_prop_stop_rfreg_cmd {
+ __u16 checksum;
+};
+
+struct nci_prop_stop_rfreg_rsp {
+ __u8 status;
+};
+
+#define NCI_PROP_FW_CFG 0x28
+
+struct nci_prop_fw_cfg_cmd {
+ __u8 clk_type;
+ __u8 clk_speed;
+ __u8 clk_req;
+};
+
+struct nci_prop_fw_cfg_rsp {
+ __u8 status;
+};
+
+#define NCI_PROP_WR_RESET 0x2f
+
+void s3fwrn5_nci_get_prop_ops(struct nci_prop_ops **ops, size_t *n);
+int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name);
+
+#endif /* __LOCAL_S3FWRN5_NCI_H_ */
diff --git a/drivers/nfc/s3fwrn5/s3fwrn5.h b/drivers/nfc/s3fwrn5/s3fwrn5.h
new file mode 100644
index 000000000000..89210d4828b8
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/s3fwrn5.h
@@ -0,0 +1,99 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LOCAL_S3FWRN5_H_
+#define __LOCAL_S3FWRN5_H_
+
+#include <linux/nfc.h>
+
+#include <net/nfc/nci_core.h>
+
+#include "firmware.h"
+
+enum s3fwrn5_mode {
+ S3FWRN5_MODE_COLD,
+ S3FWRN5_MODE_NCI,
+ S3FWRN5_MODE_FW,
+};
+
+struct s3fwrn5_phy_ops {
+ void (*set_wake)(void *id, bool sleep);
+ void (*set_mode)(void *id, enum s3fwrn5_mode);
+ enum s3fwrn5_mode (*get_mode)(void *id);
+ int (*write)(void *id, struct sk_buff *skb);
+};
+
+struct s3fwrn5_info {
+ struct nci_dev *ndev;
+ void *phy_id;
+ struct device *pdev;
+
+ struct s3fwrn5_phy_ops *phy_ops;
+ unsigned int max_payload;
+
+ struct s3fwrn5_fw_info fw_info;
+
+ struct mutex mutex;
+};
+
+static inline int s3fwrn5_set_mode(struct s3fwrn5_info *info,
+ enum s3fwrn5_mode mode)
+{
+ if (!info->phy_ops->set_mode)
+ return -ENOTSUPP;
+
+ info->phy_ops->set_mode(info->phy_id, mode);
+
+ return 0;
+}
+
+static inline enum s3fwrn5_mode s3fwrn5_get_mode(struct s3fwrn5_info *info)
+{
+ if (!info->phy_ops->get_mode)
+ return -ENOTSUPP;
+
+ return info->phy_ops->get_mode(info->phy_id);
+}
+
+static inline int s3fwrn5_set_wake(struct s3fwrn5_info *info, bool wake)
+{
+ if (!info->phy_ops->set_wake)
+ return -ENOTSUPP;
+
+ info->phy_ops->set_wake(info->phy_id, wake);
+
+ return 0;
+}
+
+static inline int s3fwrn5_write(struct s3fwrn5_info *info, struct sk_buff *skb)
+{
+ if (!info->phy_ops->write)
+ return -ENOTSUPP;
+
+ return info->phy_ops->write(info->phy_id, skb);
+}
+
+int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev,
+ struct s3fwrn5_phy_ops *phy_ops, unsigned int max_payload);
+void s3fwrn5_remove(struct nci_dev *ndev);
+
+int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb,
+ enum s3fwrn5_mode mode);
+
+#endif /* __LOCAL_S3FWRN5_H_ */
diff --git a/drivers/nfc/st-nci/Kconfig b/drivers/nfc/st-nci/Kconfig
index fc3904c946ee..e7c6db9c5860 100644
--- a/drivers/nfc/st-nci/Kconfig
+++ b/drivers/nfc/st-nci/Kconfig
@@ -21,3 +21,14 @@ config NFC_ST_NCI_I2C
If you choose to build a module, it'll be called st-nci_i2c.
Say N if unsure.
+
+config NFC_ST_NCI_SPI
+ tristate "NFC ST NCI spi support"
+ depends on NFC_ST_NCI && SPI
+ ---help---
+ This module adds support for an SPI interface to the
+ STMicroelectronics NFC NCI chips familly.
+ Select this if your platform is using the spi bus.
+
+ If you choose to build a module, it'll be called st-nci_spi.
+ Say N if unsure.
diff --git a/drivers/nfc/st-nci/Makefile b/drivers/nfc/st-nci/Makefile
index 0df157df3a94..348ce76f2177 100644
--- a/drivers/nfc/st-nci/Makefile
+++ b/drivers/nfc/st-nci/Makefile
@@ -7,3 +7,6 @@ obj-$(CONFIG_NFC_ST_NCI) += st-nci.o
st-nci_i2c-objs = i2c.o
obj-$(CONFIG_NFC_ST_NCI_I2C) += st-nci_i2c.o
+
+st-nci_spi-objs = spi.o
+obj-$(CONFIG_NFC_ST_NCI_SPI) += st-nci_spi.o
diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
index 06175ce769bb..707ed2eb5936 100644
--- a/drivers/nfc/st-nci/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -25,15 +25,15 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/nfc.h>
-#include <linux/platform_data/st_nci.h>
+#include <linux/platform_data/st-nci.h>
#include "ndlc.h"
-#define DRIVER_DESC "NCI NFC driver for ST21NFCB"
+#define DRIVER_DESC "NCI NFC driver for ST_NCI"
/* ndlc header */
-#define ST21NFCB_FRAME_HEADROOM 1
-#define ST21NFCB_FRAME_TAILROOM 0
+#define ST_NCI_FRAME_HEADROOM 1
+#define ST_NCI_FRAME_TAILROOM 0
#define ST_NCI_I2C_MIN_SIZE 4 /* PCB(1) + NCI Packet header(3) */
#define ST_NCI_I2C_MAX_SIZE 250 /* req 4.2.1 */
@@ -118,15 +118,10 @@ static int st_nci_i2c_write(void *phy_id, struct sk_buff *skb)
/*
* Reads an ndlc frame and returns it in a newly allocated sk_buff.
* returns:
- * frame size : if received frame is complete (find ST21NFCB_SOF_EOF at
- * end of read)
- * -EAGAIN : if received frame is incomplete (not find ST21NFCB_SOF_EOF
- * at end of read)
+ * 0 : if received frame is complete
* -EREMOTEIO : i2c read error (fatal)
* -EBADMSG : frame was incorrect and discarded
- * (value returned from st_nci_i2c_repack)
- * -EIO : if no ST21NFCB_SOF_EOF is found after reaching
- * the read length end sequence
+ * -ENOMEM : cannot allocate skb, frame dropped
*/
static int st_nci_i2c_read(struct st_nci_i2c_phy *phy,
struct sk_buff **skb)
@@ -179,7 +174,7 @@ static int st_nci_i2c_read(struct st_nci_i2c_phy *phy,
/*
* Reads an ndlc frame from the chip.
*
- * On ST21NFCB, IRQ goes in idle state when read starts.
+ * On ST_NCI, IRQ goes in idle state when read starts.
*/
static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
{
@@ -325,12 +320,12 @@ static int st_nci_i2c_probe(struct i2c_client *client,
}
} else {
nfc_err(&client->dev,
- "st21nfcb platform resources not available\n");
+ "st_nci platform resources not available\n");
return -ENODEV;
}
r = ndlc_probe(phy, &i2c_phy_ops, &client->dev,
- ST21NFCB_FRAME_HEADROOM, ST21NFCB_FRAME_TAILROOM,
+ ST_NCI_FRAME_HEADROOM, ST_NCI_FRAME_TAILROOM,
&phy->ndlc);
if (r < 0) {
nfc_err(&client->dev, "Unable to register ndlc layer\n");
diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c
index 56c6a4cb4c96..d2cf84e680c6 100644
--- a/drivers/nfc/st-nci/ndlc.c
+++ b/drivers/nfc/st-nci/ndlc.c
@@ -171,6 +171,8 @@ static void llt_ndlc_rcv_queue(struct llt_ndlc *ndlc)
if ((pcb & PCB_TYPE_MASK) == PCB_TYPE_SUPERVISOR) {
switch (pcb & PCB_SYNC_MASK) {
case PCB_SYNC_ACK:
+ skb = skb_dequeue(&ndlc->ack_pending_q);
+ kfree_skb(skb);
del_timer_sync(&ndlc->t1_timer);
del_timer_sync(&ndlc->t2_timer);
ndlc->t2_active = false;
@@ -192,12 +194,13 @@ static void llt_ndlc_rcv_queue(struct llt_ndlc *ndlc)
msecs_to_jiffies(NDLC_TIMER_T1_WAIT));
break;
default:
- pr_err("UNKNOWN Packet Control Byte=%d\n", pcb);
kfree_skb(skb);
break;
}
- } else {
+ } else if ((pcb & PCB_TYPE_MASK) == PCB_TYPE_DATAFRAME) {
nci_recv_frame(ndlc->ndev, skb);
+ } else {
+ kfree_skb(skb);
}
}
}
diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
new file mode 100644
index 000000000000..598a58c4d6d1
--- /dev/null
+++ b/drivers/nfc/st-nci/spi.c
@@ -0,0 +1,392 @@
+/*
+ * SPI Link Layer for ST NCI based Driver
+ * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/nfc.h>
+#include <linux/platform_data/st-nci.h>
+
+#include "ndlc.h"
+
+#define DRIVER_DESC "NCI NFC driver for ST_NCI"
+
+/* ndlc header */
+#define ST_NCI_FRAME_HEADROOM 1
+#define ST_NCI_FRAME_TAILROOM 0
+
+#define ST_NCI_SPI_MIN_SIZE 4 /* PCB(1) + NCI Packet header(3) */
+#define ST_NCI_SPI_MAX_SIZE 250 /* req 4.2.1 */
+
+#define ST_NCI_SPI_DRIVER_NAME "st_nci_spi"
+
+static struct spi_device_id st_nci_spi_id_table[] = {
+ {ST_NCI_SPI_DRIVER_NAME, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, st_nci_spi_id_table);
+
+struct st_nci_spi_phy {
+ struct spi_device *spi_dev;
+ struct llt_ndlc *ndlc;
+
+ unsigned int gpio_reset;
+ unsigned int irq_polarity;
+};
+
+#define SPI_DUMP_SKB(info, skb) \
+do { \
+ pr_debug("%s:\n", info); \
+ print_hex_dump(KERN_DEBUG, "spi: ", DUMP_PREFIX_OFFSET, \
+ 16, 1, (skb)->data, (skb)->len, 0); \
+} while (0)
+
+static int st_nci_spi_enable(void *phy_id)
+{
+ struct st_nci_spi_phy *phy = phy_id;
+
+ gpio_set_value(phy->gpio_reset, 0);
+ usleep_range(10000, 15000);
+ gpio_set_value(phy->gpio_reset, 1);
+ usleep_range(80000, 85000);
+
+ if (phy->ndlc->powered == 0)
+ enable_irq(phy->spi_dev->irq);
+
+ return 0;
+}
+
+static void st_nci_spi_disable(void *phy_id)
+{
+ struct st_nci_spi_phy *phy = phy_id;
+
+ disable_irq_nosync(phy->spi_dev->irq);
+}
+
+/*
+ * Writing a frame must not return the number of written bytes.
+ * It must return either zero for success, or <0 for error.
+ * In addition, it must not alter the skb
+ */
+static int st_nci_spi_write(void *phy_id, struct sk_buff *skb)
+{
+ int r;
+ struct st_nci_spi_phy *phy = phy_id;
+ struct spi_device *dev = phy->spi_dev;
+ struct sk_buff *skb_rx;
+ u8 buf[ST_NCI_SPI_MAX_SIZE];
+ struct spi_transfer spi_xfer = {
+ .tx_buf = skb->data,
+ .rx_buf = buf,
+ .len = skb->len,
+ };
+
+ SPI_DUMP_SKB("st_nci_spi_write", skb);
+
+ if (phy->ndlc->hard_fault != 0)
+ return phy->ndlc->hard_fault;
+
+ r = spi_sync_transfer(dev, &spi_xfer, 1);
+ /*
+ * We may have received some valuable data on miso line.
+ * Send them back in the ndlc state machine.
+ */
+ if (!r) {
+ skb_rx = alloc_skb(skb->len, GFP_KERNEL);
+ if (!skb_rx) {
+ r = -ENOMEM;
+ goto exit;
+ }
+
+ skb_put(skb_rx, skb->len);
+ memcpy(skb_rx->data, buf, skb->len);
+ ndlc_recv(phy->ndlc, skb_rx);
+ }
+
+exit:
+ return r;
+}
+
+/*
+ * Reads an ndlc frame and returns it in a newly allocated sk_buff.
+ * returns:
+ * 0 : if received frame is complete
+ * -EREMOTEIO : i2c read error (fatal)
+ * -EBADMSG : frame was incorrect and discarded
+ * -ENOMEM : cannot allocate skb, frame dropped
+ */
+static int st_nci_spi_read(struct st_nci_spi_phy *phy,
+ struct sk_buff **skb)
+{
+ int r;
+ u8 len;
+ u8 buf[ST_NCI_SPI_MAX_SIZE];
+ struct spi_device *dev = phy->spi_dev;
+ struct spi_transfer spi_xfer = {
+ .rx_buf = buf,
+ .len = ST_NCI_SPI_MIN_SIZE,
+ };
+
+ r = spi_sync_transfer(dev, &spi_xfer, 1);
+ if (r < 0)
+ return -EREMOTEIO;
+
+ len = be16_to_cpu(*(__be16 *) (buf + 2));
+ if (len > ST_NCI_SPI_MAX_SIZE) {
+ nfc_err(&dev->dev, "invalid frame len\n");
+ phy->ndlc->hard_fault = 1;
+ return -EBADMSG;
+ }
+
+ *skb = alloc_skb(ST_NCI_SPI_MIN_SIZE + len, GFP_KERNEL);
+ if (*skb == NULL)
+ return -ENOMEM;
+
+ skb_reserve(*skb, ST_NCI_SPI_MIN_SIZE);
+ skb_put(*skb, ST_NCI_SPI_MIN_SIZE);
+ memcpy((*skb)->data, buf, ST_NCI_SPI_MIN_SIZE);
+
+ if (!len)
+ return 0;
+
+ spi_xfer.len = len;
+ r = spi_sync_transfer(dev, &spi_xfer, 1);
+ if (r < 0) {
+ kfree_skb(*skb);
+ return -EREMOTEIO;
+ }
+
+ skb_put(*skb, len);
+ memcpy((*skb)->data + ST_NCI_SPI_MIN_SIZE, buf, len);
+
+ SPI_DUMP_SKB("spi frame read", *skb);
+
+ return 0;
+}
+
+/*
+ * Reads an ndlc frame from the chip.
+ *
+ * On ST21NFCB, IRQ goes in idle state when read starts.
+ */
+static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
+{
+ struct st_nci_spi_phy *phy = phy_id;
+ struct spi_device *dev;
+ struct sk_buff *skb = NULL;
+ int r;
+
+ if (!phy || !phy->ndlc || irq != phy->spi_dev->irq) {
+ WARN_ON_ONCE(1);
+ return IRQ_NONE;
+ }
+
+ dev = phy->spi_dev;
+ dev_dbg(&dev->dev, "IRQ\n");
+
+ if (phy->ndlc->hard_fault)
+ return IRQ_HANDLED;
+
+ if (!phy->ndlc->powered) {
+ st_nci_spi_disable(phy);
+ return IRQ_HANDLED;
+ }
+
+ r = st_nci_spi_read(phy, &skb);
+ if (r == -EREMOTEIO || r == -ENOMEM || r == -EBADMSG)
+ return IRQ_HANDLED;
+
+ ndlc_recv(phy->ndlc, skb);
+
+ return IRQ_HANDLED;
+}
+
+static struct nfc_phy_ops spi_phy_ops = {
+ .write = st_nci_spi_write,
+ .enable = st_nci_spi_enable,
+ .disable = st_nci_spi_disable,
+};
+
+#ifdef CONFIG_OF
+static int st_nci_spi_of_request_resources(struct spi_device *dev)
+{
+ struct st_nci_spi_phy *phy = spi_get_drvdata(dev);
+ struct device_node *pp;
+ int gpio;
+ int r;
+
+ pp = dev->dev.of_node;
+ if (!pp)
+ return -ENODEV;
+
+ /* Get GPIO from device tree */
+ gpio = of_get_named_gpio(pp, "reset-gpios", 0);
+ if (gpio < 0) {
+ nfc_err(&dev->dev,
+ "Failed to retrieve reset-gpios from device tree\n");
+ return gpio;
+ }
+
+ /* GPIO request and configuration */
+ r = devm_gpio_request_one(&dev->dev, gpio,
+ GPIOF_OUT_INIT_HIGH, "clf_reset");
+ if (r) {
+ nfc_err(&dev->dev, "Failed to request reset pin\n");
+ return r;
+ }
+ phy->gpio_reset = gpio;
+
+ phy->irq_polarity = irq_get_trigger_type(dev->irq);
+
+ return 0;
+}
+#else
+static int st_nci_spi_of_request_resources(struct spi_device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+static int st_nci_spi_request_resources(struct spi_device *dev)
+{
+ struct st_nci_nfc_platform_data *pdata;
+ struct st_nci_spi_phy *phy = spi_get_drvdata(dev);
+ int r;
+
+ pdata = dev->dev.platform_data;
+ if (pdata == NULL) {
+ nfc_err(&dev->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ /* store for later use */
+ phy->gpio_reset = pdata->gpio_reset;
+ phy->irq_polarity = pdata->irq_polarity;
+
+ r = devm_gpio_request_one(&dev->dev,
+ phy->gpio_reset, GPIOF_OUT_INIT_HIGH, "clf_reset");
+ if (r) {
+ pr_err("%s : reset gpio_request failed\n", __FILE__);
+ return r;
+ }
+
+ return 0;
+}
+
+static int st_nci_spi_probe(struct spi_device *dev)
+{
+ struct st_nci_spi_phy *phy;
+ struct st_nci_nfc_platform_data *pdata;
+ int r;
+
+ dev_dbg(&dev->dev, "%s\n", __func__);
+ dev_dbg(&dev->dev, "IRQ: %d\n", dev->irq);
+
+ /* Check SPI platform functionnalities */
+ if (!dev) {
+ pr_debug("%s: dev is NULL. Device is not accessible.\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ phy = devm_kzalloc(&dev->dev, sizeof(struct st_nci_spi_phy),
+ GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->spi_dev = dev;
+
+ spi_set_drvdata(dev, phy);
+
+ pdata = dev->dev.platform_data;
+ if (!pdata && dev->dev.of_node) {
+ r = st_nci_spi_of_request_resources(dev);
+ if (r) {
+ nfc_err(&dev->dev, "No platform data\n");
+ return r;
+ }
+ } else if (pdata) {
+ r = st_nci_spi_request_resources(dev);
+ if (r) {
+ nfc_err(&dev->dev,
+ "Cannot get platform resources\n");
+ return r;
+ }
+ } else {
+ nfc_err(&dev->dev,
+ "st_nci platform resources not available\n");
+ return -ENODEV;
+ }
+
+ r = ndlc_probe(phy, &spi_phy_ops, &dev->dev,
+ ST_NCI_FRAME_HEADROOM, ST_NCI_FRAME_TAILROOM,
+ &phy->ndlc);
+ if (r < 0) {
+ nfc_err(&dev->dev, "Unable to register ndlc layer\n");
+ return r;
+ }
+
+ r = devm_request_threaded_irq(&dev->dev, dev->irq, NULL,
+ st_nci_irq_thread_fn,
+ phy->irq_polarity | IRQF_ONESHOT,
+ ST_NCI_SPI_DRIVER_NAME, phy);
+ if (r < 0)
+ nfc_err(&dev->dev, "Unable to register IRQ handler\n");
+
+ return r;
+}
+
+static int st_nci_spi_remove(struct spi_device *dev)
+{
+ struct st_nci_spi_phy *phy = spi_get_drvdata(dev);
+
+ dev_dbg(&dev->dev, "%s\n", __func__);
+
+ ndlc_remove(phy->ndlc);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_st_nci_spi_match[] = {
+ { .compatible = "st,st21nfcb-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_st_nci_spi_match);
+#endif
+
+static struct spi_driver st_nci_spi_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = ST_NCI_SPI_DRIVER_NAME,
+ .of_match_table = of_match_ptr(of_st_nci_spi_match),
+ },
+ .probe = st_nci_spi_probe,
+ .id_table = st_nci_spi_id_table,
+ .remove = st_nci_spi_remove,
+};
+
+module_spi_driver(st_nci_spi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/st-nci/st-nci_se.c b/drivers/nfc/st-nci/st-nci_se.c
index 97addfa96c6f..c742ef65a05a 100644
--- a/drivers/nfc/st-nci/st-nci_se.c
+++ b/drivers/nfc/st-nci/st-nci_se.c
@@ -189,14 +189,14 @@ int st_nci_hci_load_session(struct nci_dev *ndev)
ST_NCI_DEVICE_MGNT_GATE,
ST_NCI_DEVICE_MGNT_PIPE);
if (r < 0)
- goto free_info;
+ return r;
/* Get pipe list */
r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE,
ST_NCI_DM_GETINFO, pipe_list, sizeof(pipe_list),
&skb_pipe_list);
if (r < 0)
- goto free_info;
+ return r;
/* Complete the existing gate_pipe table */
for (i = 0; i < skb_pipe_list->len; i++) {
@@ -222,6 +222,7 @@ int st_nci_hci_load_session(struct nci_dev *ndev)
dm_pipe_info->src_host_id != ST_NCI_ESE_HOST_ID) {
pr_err("Unexpected apdu_reader pipe on host %x\n",
dm_pipe_info->src_host_id);
+ kfree_skb(skb_pipe_info);
continue;
}
@@ -241,13 +242,12 @@ int st_nci_hci_load_session(struct nci_dev *ndev)
ndev->hci_dev->pipes[st_nci_gates[j].pipe].host =
dm_pipe_info->src_host_id;
}
+ kfree_skb(skb_pipe_info);
}
memcpy(ndev->hci_dev->init_data.gates, st_nci_gates,
sizeof(st_nci_gates));
-free_info:
- kfree_skb(skb_pipe_info);
kfree_skb(skb_pipe_list);
return r;
}
diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
index d251f7229c4e..051286562fab 100644
--- a/drivers/nfc/st21nfca/st21nfca.c
+++ b/drivers/nfc/st21nfca/st21nfca.c
@@ -148,14 +148,14 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
ST21NFCA_DEVICE_MGNT_GATE,
ST21NFCA_DEVICE_MGNT_PIPE);
if (r < 0)
- goto free_info;
+ return r;
/* Get pipe list */
r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
ST21NFCA_DM_GETINFO, pipe_list, sizeof(pipe_list),
&skb_pipe_list);
if (r < 0)
- goto free_info;
+ return r;
/* Complete the existing gate_pipe table */
for (i = 0; i < skb_pipe_list->len; i++) {
@@ -181,6 +181,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
info->src_host_id != ST21NFCA_ESE_HOST_ID) {
pr_err("Unexpected apdu_reader pipe on host %x\n",
info->src_host_id);
+ kfree_skb(skb_pipe_info);
continue;
}
@@ -200,6 +201,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
hdev->pipes[st21nfca_gates[j].pipe].dest_host =
info->src_host_id;
}
+ kfree_skb(skb_pipe_info);
}
/*
@@ -214,13 +216,12 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
st21nfca_gates[i].gate,
st21nfca_gates[i].pipe);
if (r < 0)
- goto free_info;
+ goto free_list;
}
}
memcpy(hdev->init_data.gates, st21nfca_gates, sizeof(st21nfca_gates));
-free_info:
- kfree_skb(skb_pipe_info);
+free_list:
kfree_skb(skb_pipe_list);
return r;
}
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index 85b4d86772d8..70b0707fd9a9 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -336,7 +336,7 @@
#define TRF7970A_NFC_TARGET_LEVEL_RFDET(v) ((v) & 0x07)
#define TRF7970A_NFC_TARGET_LEVEL_HI_RF BIT(3)
-#define TRF7970A_NFC_TARGET_LEVEL_SDD_EN BIT(3)
+#define TRF7970A_NFC_TARGET_LEVEL_SDD_EN BIT(5)
#define TRF7970A_NFC_TARGET_LEVEL_LD_S_4BYTES (0x0 << 6)
#define TRF7970A_NFC_TARGET_LEVEL_LD_S_7BYTES (0x1 << 6)
#define TRF7970A_NFC_TARGET_LEVEL_LD_S_10BYTES (0x2 << 6)
@@ -629,7 +629,9 @@ static void trf7970a_send_upstream(struct trf7970a *trf)
}
if (trf->adjust_resp_len) {
- skb_trim(trf->rx_skb, trf->rx_skb->len - 1);
+ if (trf->rx_skb)
+ skb_trim(trf->rx_skb, trf->rx_skb->len - 1);
+
trf->adjust_resp_len = false;
}
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index 87751cfd6f4f..865a3e3cc581 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -190,14 +190,17 @@ static inline int pdev_is_xeon(struct pci_dev *pdev)
case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
+ case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
return 1;
}
return 0;
@@ -237,7 +240,7 @@ static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
{
- if (idx < 0 || idx > ndev->mw_count)
+ if (idx < 0 || idx >= ndev->mw_count)
return -EINVAL;
return ndev->reg->mw_bar[idx];
}
@@ -572,10 +575,13 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
"Connection Topology -\t%s\n",
ntb_topo_string(ndev->ntb.topo));
- off += scnprintf(buf + off, buf_size - off,
- "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
- off += scnprintf(buf + off, buf_size - off,
- "B2B MW Idx -\t\t%d\n", ndev->b2b_idx);
+ if (ndev->b2b_idx != UINT_MAX) {
+ off += scnprintf(buf + off, buf_size - off,
+ "B2B MW Idx -\t\t%u\n", ndev->b2b_idx);
+ off += scnprintf(buf + off, buf_size - off,
+ "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
+ }
+
off += scnprintf(buf + off, buf_size - off,
"BAR4 Split -\t\t%s\n",
ndev->bar4_split ? "yes" : "no");
@@ -1484,7 +1490,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
pdev = ndev_pdev(ndev);
mmio = ndev->self_mmio;
- if (ndev->b2b_idx >= ndev->mw_count) {
+ if (ndev->b2b_idx == UINT_MAX) {
dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
b2b_bar = 0;
ndev->b2b_off = 0;
@@ -1776,6 +1782,13 @@ static int xeon_init_ntb(struct intel_ntb_dev *ndev)
else
ndev->b2b_idx = b2b_mw_idx;
+ if (ndev->b2b_idx >= ndev->mw_count) {
+ dev_dbg(ndev_dev(ndev),
+ "b2b_mw_idx %d invalid for mw_count %u\n",
+ b2b_mw_idx, ndev->mw_count);
+ return -EINVAL;
+ }
+
dev_dbg(ndev_dev(ndev),
"setting up b2b mw idx %d means %d\n",
b2b_mw_idx, ndev->b2b_idx);
@@ -1843,6 +1856,9 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev)
case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
+ case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
break;
}
@@ -1857,6 +1873,9 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev)
case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
+ case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
break;
}
@@ -1878,6 +1897,9 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev)
case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
+ case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
+ case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
+ case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
break;
}
@@ -1996,7 +2018,7 @@ static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
ndev->ntb.ops = &intel_ntb_ops;
ndev->b2b_off = 0;
- ndev->b2b_idx = INT_MAX;
+ ndev->b2b_idx = UINT_MAX;
ndev->bar4_split = 0;
@@ -2234,14 +2256,17 @@ static const struct pci_device_id intel_ntb_pci_tbl[] = {
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BDX)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_BDX)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
{0}
};
MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
index 7ddaf387b679..ea0612f797df 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.h
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -67,6 +67,9 @@
#define PCI_DEVICE_ID_INTEL_NTB_PS_HSX 0x2F0E
#define PCI_DEVICE_ID_INTEL_NTB_SS_HSX 0x2F0F
#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_BDX 0x6F0D
+#define PCI_DEVICE_ID_INTEL_NTB_PS_BDX 0x6F0E
+#define PCI_DEVICE_ID_INTEL_NTB_SS_BDX 0x6F0F
/* Intel Xeon hardware */
diff --git a/drivers/ntb/ntb.c b/drivers/ntb/ntb.c
index 23435f2a5486..2e2530743831 100644
--- a/drivers/ntb/ntb.c
+++ b/drivers/ntb/ntb.c
@@ -114,7 +114,7 @@ int ntb_register_device(struct ntb_dev *ntb)
ntb->dev.bus = &ntb_bus;
ntb->dev.parent = &ntb->pdev->dev;
ntb->dev.release = ntb_dev_release;
- dev_set_name(&ntb->dev, pci_name(ntb->pdev));
+ dev_set_name(&ntb->dev, "%s", pci_name(ntb->pdev));
ntb->ctx = NULL;
ntb->ctx_ops = NULL;
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index efe3ad4122f2..6e3ee907d186 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -119,7 +119,8 @@ struct ntb_transport_qp {
struct ntb_transport_ctx *transport;
struct ntb_dev *ndev;
void *cb_data;
- struct dma_chan *dma_chan;
+ struct dma_chan *tx_dma_chan;
+ struct dma_chan *rx_dma_chan;
bool client_ready;
bool link_is_up;
@@ -142,10 +143,11 @@ struct ntb_transport_qp {
void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
void *data, int len);
+ struct list_head rx_post_q;
struct list_head rx_pend_q;
struct list_head rx_free_q;
- spinlock_t ntb_rx_pend_q_lock;
- spinlock_t ntb_rx_free_q_lock;
+ /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
+ spinlock_t ntb_rx_q_lock;
void *rx_buff;
unsigned int rx_index;
unsigned int rx_max_entry;
@@ -211,6 +213,8 @@ struct ntb_transport_ctx {
bool link_is_up;
struct delayed_work link_work;
struct work_struct link_cleanup;
+
+ struct dentry *debugfs_node_dir;
};
enum {
@@ -294,7 +298,7 @@ static LIST_HEAD(ntb_transport_list);
static int ntb_bus_init(struct ntb_transport_ctx *nt)
{
- list_add(&nt->entry, &ntb_transport_list);
+ list_add_tail(&nt->entry, &ntb_transport_list);
return 0;
}
@@ -436,16 +440,20 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
char *buf;
ssize_t ret, out_offset, out_count;
+ qp = filp->private_data;
+
+ if (!qp || !qp->link_is_up)
+ return 0;
+
out_count = 1000;
buf = kmalloc(out_count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- qp = filp->private_data;
out_offset = 0;
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "NTB QP stats\n");
+ "\nNTB QP stats:\n\n");
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"rx_bytes - \t%llu\n", qp->rx_bytes);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
@@ -463,11 +471,11 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"rx_err_ver - \t%llu\n", qp->rx_err_ver);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "rx_buff - \t%p\n", qp->rx_buff);
+ "rx_buff - \t0x%p\n", qp->rx_buff);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"rx_index - \t%u\n", qp->rx_index);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "rx_max_entry - \t%u\n", qp->rx_max_entry);
+ "rx_max_entry - \t%u\n\n", qp->rx_max_entry);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"tx_bytes - \t%llu\n", qp->tx_bytes);
@@ -482,15 +490,32 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "tx_mw - \t%p\n", qp->tx_mw);
+ "tx_mw - \t0x%p\n", qp->tx_mw);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "tx_index (H) - \t%u\n", qp->tx_index);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "tx_index - \t%u\n", qp->tx_index);
+ "RRI (T) - \t%u\n",
+ qp->remote_rx_info->entry);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"tx_max_entry - \t%u\n", qp->tx_max_entry);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "free tx - \t%u\n",
+ ntb_transport_tx_free_entry(qp));
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "\nQP Link %s\n",
+ "\n");
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "Using TX DMA - \t%s\n",
+ qp->tx_dma_chan ? "Yes" : "No");
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "Using RX DMA - \t%s\n",
+ qp->rx_dma_chan ? "Yes" : "No");
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "QP Link - \t%s\n",
qp->link_is_up ? "Up" : "Down");
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "\n");
+
if (out_offset > out_count)
out_offset = out_count;
@@ -528,12 +553,34 @@ static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
}
entry = list_first_entry(list, struct ntb_queue_entry, entry);
list_del(&entry->entry);
+
out:
spin_unlock_irqrestore(lock, flags);
return entry;
}
+static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
+ struct list_head *list,
+ struct list_head *to_list)
+{
+ struct ntb_queue_entry *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+
+ if (list_empty(list)) {
+ entry = NULL;
+ } else {
+ entry = list_first_entry(list, struct ntb_queue_entry, entry);
+ list_move_tail(&entry->entry, to_list);
+ }
+
+ spin_unlock_irqrestore(lock, flags);
+
+ return entry;
+}
+
static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
unsigned int qp_num)
{
@@ -601,13 +648,16 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
}
static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
- unsigned int size)
+ resource_size_t size)
{
struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
struct pci_dev *pdev = nt->ndev->pdev;
- unsigned int xlat_size, buff_size;
+ size_t xlat_size, buff_size;
int rc;
+ if (!size)
+ return -EINVAL;
+
xlat_size = round_up(size, mw->xlat_align_size);
buff_size = round_up(size, mw->xlat_align);
@@ -627,7 +677,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
if (!mw->virt_addr) {
mw->xlat_size = 0;
mw->buff_size = 0;
- dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n",
+ dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
buff_size);
return -ENOMEM;
}
@@ -867,6 +917,8 @@ static void ntb_qp_link_work(struct work_struct *work)
if (qp->event_handler)
qp->event_handler(qp->cb_data, qp->link_is_up);
+
+ tasklet_schedule(&qp->rxc_db_work);
} else if (nt->link_is_up)
schedule_delayed_work(&qp->link_work,
msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
@@ -923,12 +975,12 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
qp->tx_max_frame = min(transport_mtu, tx_size / 2);
qp->tx_max_entry = tx_size / qp->tx_max_frame;
- if (nt_debugfs_dir) {
+ if (nt->debugfs_node_dir) {
char debugfs_name[4];
snprintf(debugfs_name, 4, "qp%d", qp_num);
qp->debugfs_dir = debugfs_create_dir(debugfs_name,
- nt_debugfs_dir);
+ nt->debugfs_node_dir);
qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
qp->debugfs_dir, qp,
@@ -941,10 +993,10 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
- spin_lock_init(&qp->ntb_rx_pend_q_lock);
- spin_lock_init(&qp->ntb_rx_free_q_lock);
+ spin_lock_init(&qp->ntb_rx_q_lock);
spin_lock_init(&qp->ntb_tx_free_q_lock);
+ INIT_LIST_HEAD(&qp->rx_post_q);
INIT_LIST_HEAD(&qp->rx_pend_q);
INIT_LIST_HEAD(&qp->rx_free_q);
INIT_LIST_HEAD(&qp->tx_free_q);
@@ -1031,6 +1083,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
goto err2;
}
+ if (nt_debugfs_dir) {
+ nt->debugfs_node_dir =
+ debugfs_create_dir(pci_name(ndev->pdev),
+ nt_debugfs_dir);
+ }
+
for (i = 0; i < qp_count; i++) {
rc = ntb_transport_init_queue(nt, i);
if (rc)
@@ -1107,22 +1165,47 @@ static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
kfree(nt);
}
-static void ntb_rx_copy_callback(void *data)
+static void ntb_complete_rxc(struct ntb_transport_qp *qp)
{
- struct ntb_queue_entry *entry = data;
- struct ntb_transport_qp *qp = entry->qp;
- void *cb_data = entry->cb_data;
- unsigned int len = entry->len;
- struct ntb_payload_header *hdr = entry->rx_hdr;
+ struct ntb_queue_entry *entry;
+ void *cb_data;
+ unsigned int len;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
+
+ while (!list_empty(&qp->rx_post_q)) {
+ entry = list_first_entry(&qp->rx_post_q,
+ struct ntb_queue_entry, entry);
+ if (!(entry->flags & DESC_DONE_FLAG))
+ break;
+
+ entry->rx_hdr->flags = 0;
+ iowrite32(entry->index, &qp->rx_info->entry);
+
+ cb_data = entry->cb_data;
+ len = entry->len;
- hdr->flags = 0;
+ list_move_tail(&entry->entry, &qp->rx_free_q);
+
+ spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
+
+ if (qp->rx_handler && qp->client_ready)
+ qp->rx_handler(qp, qp->cb_data, cb_data, len);
+
+ spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
+ }
+
+ spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
+}
- iowrite32(entry->index, &qp->rx_info->entry);
+static void ntb_rx_copy_callback(void *data)
+{
+ struct ntb_queue_entry *entry = data;
- ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+ entry->flags |= DESC_DONE_FLAG;
- if (qp->rx_handler && qp->client_ready)
- qp->rx_handler(qp, qp->cb_data, cb_data, len);
+ ntb_complete_rxc(entry->qp);
}
static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
@@ -1138,36 +1221,35 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
ntb_rx_copy_callback(entry);
}
-static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
- size_t len)
+static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
{
struct dma_async_tx_descriptor *txd;
struct ntb_transport_qp *qp = entry->qp;
- struct dma_chan *chan = qp->dma_chan;
+ struct dma_chan *chan = qp->rx_dma_chan;
struct dma_device *device;
- size_t pay_off, buff_off;
+ size_t pay_off, buff_off, len;
struct dmaengine_unmap_data *unmap;
dma_cookie_t cookie;
void *buf = entry->buf;
- entry->len = len;
+ len = entry->len;
if (!chan)
goto err;
if (len < copy_bytes)
- goto err_wait;
+ goto err;
device = chan->device;
pay_off = (size_t)offset & ~PAGE_MASK;
buff_off = (size_t)buf & ~PAGE_MASK;
if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
- goto err_wait;
+ goto err;
unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
if (!unmap)
- goto err_wait;
+ goto err;
unmap->len = len;
unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
@@ -1210,12 +1292,6 @@ err_set_unmap:
dmaengine_unmap_put(unmap);
err_get_unmap:
dmaengine_unmap_put(unmap);
-err_wait:
- /* If the callbacks come out of order, the writing of the index to the
- * last completed will be out of order. This may result in the
- * receive stalling forever.
- */
- dma_sync_wait(chan, qp->last_cookie);
err:
ntb_memcpy_rx(entry, offset);
qp->rx_memcpy++;
@@ -1226,7 +1302,6 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
struct ntb_payload_header *hdr;
struct ntb_queue_entry *entry;
void *offset;
- int rc;
offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
@@ -1255,65 +1330,43 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
return -EIO;
}
- entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+ entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
if (!entry) {
dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
qp->rx_err_no_buf++;
-
- rc = -ENOMEM;
- goto err;
+ return -EAGAIN;
}
+ entry->rx_hdr = hdr;
+ entry->index = qp->rx_index;
+
if (hdr->len > entry->len) {
dev_dbg(&qp->ndev->pdev->dev,
"receive buffer overflow! Wanted %d got %d\n",
hdr->len, entry->len);
qp->rx_err_oflow++;
- rc = -EIO;
- goto err;
- }
+ entry->len = -EIO;
+ entry->flags |= DESC_DONE_FLAG;
- dev_dbg(&qp->ndev->pdev->dev,
- "RX OK index %u ver %u size %d into buf size %d\n",
- qp->rx_index, hdr->ver, hdr->len, entry->len);
+ ntb_complete_rxc(qp);
+ } else {
+ dev_dbg(&qp->ndev->pdev->dev,
+ "RX OK index %u ver %u size %d into buf size %d\n",
+ qp->rx_index, hdr->ver, hdr->len, entry->len);
- qp->rx_bytes += hdr->len;
- qp->rx_pkts++;
+ qp->rx_bytes += hdr->len;
+ qp->rx_pkts++;
- entry->index = qp->rx_index;
- entry->rx_hdr = hdr;
+ entry->len = hdr->len;
- ntb_async_rx(entry, offset, hdr->len);
+ ntb_async_rx(entry, offset);
+ }
qp->rx_index++;
qp->rx_index %= qp->rx_max_entry;
return 0;
-
-err:
- /* FIXME: if this syncrhonous update of the rx_index gets ahead of
- * asyncrhonous ntb_rx_copy_callback of previous entry, there are three
- * scenarios:
- *
- * 1) The peer might miss this update, but observe the update
- * from the memcpy completion callback. In this case, the buffer will
- * not be freed on the peer to be reused for a different packet. The
- * successful rx of a later packet would clear the condition, but the
- * condition could persist if several rx fail in a row.
- *
- * 2) The peer may observe this update before the asyncrhonous copy of
- * prior packets is completed. The peer may overwrite the buffers of
- * the prior packets before they are copied.
- *
- * 3) Both: the peer may observe the update, and then observe the index
- * decrement by the asynchronous completion callback. Who knows what
- * badness that will cause.
- */
- hdr->flags = 0;
- iowrite32(qp->rx_index, &qp->rx_info->entry);
-
- return rc;
}
static void ntb_transport_rxc_db(unsigned long data)
@@ -1333,8 +1386,8 @@ static void ntb_transport_rxc_db(unsigned long data)
break;
}
- if (qp->dma_chan)
- dma_async_issue_pending(qp->dma_chan);
+ if (i && qp->rx_dma_chan)
+ dma_async_issue_pending(qp->rx_dma_chan);
if (i == qp->rx_max_entry) {
/* there is more work to do */
@@ -1401,7 +1454,7 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
{
struct ntb_payload_header __iomem *hdr;
struct dma_async_tx_descriptor *txd;
- struct dma_chan *chan = qp->dma_chan;
+ struct dma_chan *chan = qp->tx_dma_chan;
struct dma_device *device;
size_t dest_off, buff_off;
struct dmaengine_unmap_data *unmap;
@@ -1594,14 +1647,27 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
dma_cap_set(DMA_MEMCPY, dma_mask);
if (use_dma) {
- qp->dma_chan = dma_request_channel(dma_mask, ntb_dma_filter_fn,
- (void *)(unsigned long)node);
- if (!qp->dma_chan)
- dev_info(&pdev->dev, "Unable to allocate DMA channel\n");
+ qp->tx_dma_chan =
+ dma_request_channel(dma_mask, ntb_dma_filter_fn,
+ (void *)(unsigned long)node);
+ if (!qp->tx_dma_chan)
+ dev_info(&pdev->dev, "Unable to allocate TX DMA channel\n");
+
+ qp->rx_dma_chan =
+ dma_request_channel(dma_mask, ntb_dma_filter_fn,
+ (void *)(unsigned long)node);
+ if (!qp->rx_dma_chan)
+ dev_info(&pdev->dev, "Unable to allocate RX DMA channel\n");
} else {
- qp->dma_chan = NULL;
+ qp->tx_dma_chan = NULL;
+ qp->rx_dma_chan = NULL;
}
- dev_dbg(&pdev->dev, "Using %s memcpy\n", qp->dma_chan ? "DMA" : "CPU");
+
+ dev_dbg(&pdev->dev, "Using %s memcpy for TX\n",
+ qp->tx_dma_chan ? "DMA" : "CPU");
+
+ dev_dbg(&pdev->dev, "Using %s memcpy for RX\n",
+ qp->rx_dma_chan ? "DMA" : "CPU");
for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
@@ -1609,7 +1675,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
goto err1;
entry->qp = qp;
- ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
+ ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
&qp->rx_free_q);
}
@@ -1634,10 +1700,12 @@ err2:
while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
kfree(entry);
err1:
- while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+ while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
kfree(entry);
- if (qp->dma_chan)
- dma_release_channel(qp->dma_chan);
+ if (qp->tx_dma_chan)
+ dma_release_channel(qp->tx_dma_chan);
+ if (qp->rx_dma_chan)
+ dma_release_channel(qp->rx_dma_chan);
nt->qp_bitmap_free |= qp_bit;
err:
return NULL;
@@ -1652,7 +1720,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
*/
void ntb_transport_free_queue(struct ntb_transport_qp *qp)
{
- struct ntb_transport_ctx *nt = qp->transport;
struct pci_dev *pdev;
struct ntb_queue_entry *entry;
u64 qp_bit;
@@ -1662,12 +1729,27 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
pdev = qp->ndev->pdev;
- if (qp->dma_chan) {
- struct dma_chan *chan = qp->dma_chan;
+ if (qp->tx_dma_chan) {
+ struct dma_chan *chan = qp->tx_dma_chan;
/* Putting the dma_chan to NULL will force any new traffic to be
* processed by the CPU instead of the DAM engine
*/
- qp->dma_chan = NULL;
+ qp->tx_dma_chan = NULL;
+
+ /* Try to be nice and wait for any queued DMA engine
+ * transactions to process before smashing it with a rock
+ */
+ dma_sync_wait(chan, qp->last_cookie);
+ dmaengine_terminate_all(chan);
+ dma_release_channel(chan);
+ }
+
+ if (qp->rx_dma_chan) {
+ struct dma_chan *chan = qp->rx_dma_chan;
+ /* Putting the dma_chan to NULL will force any new traffic to be
+ * processed by the CPU instead of the DAM engine
+ */
+ qp->rx_dma_chan = NULL;
/* Try to be nice and wait for any queued DMA engine
* transactions to process before smashing it with a rock
@@ -1689,18 +1771,23 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
qp->tx_handler = NULL;
qp->event_handler = NULL;
- while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+ while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
+ kfree(entry);
+
+ while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
+ dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
kfree(entry);
+ }
- while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
- dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
+ while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
+ dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
kfree(entry);
}
while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
kfree(entry);
- nt->qp_bitmap_free |= qp_bit;
+ qp->transport->qp_bitmap_free |= qp_bit;
dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
}
@@ -1724,14 +1811,14 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
if (!qp || qp->client_ready)
return NULL;
- entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+ entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
if (!entry)
return NULL;
buf = entry->cb_data;
*len = entry->len;
- ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+ ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
return buf;
}
@@ -1757,15 +1844,18 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
if (!qp)
return -EINVAL;
- entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
+ entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
if (!entry)
return -ENOMEM;
entry->cb_data = cb;
entry->buf = data;
entry->len = len;
+ entry->flags = 0;
+
+ ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
- ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
+ tasklet_schedule(&qp->rxc_db_work);
return 0;
}
@@ -1796,7 +1886,7 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
if (!entry) {
qp->tx_err_no_buf++;
- return -ENOMEM;
+ return -EBUSY;
}
entry->cb_data = cb;
@@ -1907,21 +1997,34 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
{
unsigned int max;
+ unsigned int copy_align;
if (!qp)
return 0;
- if (!qp->dma_chan)
+ if (!qp->tx_dma_chan && !qp->rx_dma_chan)
return qp->tx_max_frame - sizeof(struct ntb_payload_header);
+ copy_align = max(qp->tx_dma_chan->device->copy_align,
+ qp->rx_dma_chan->device->copy_align);
+
/* If DMA engine usage is possible, try to find the max size for that */
max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
- max -= max % (1 << qp->dma_chan->device->copy_align);
+ max -= max % (1 << copy_align);
return max;
}
EXPORT_SYMBOL_GPL(ntb_transport_max_size);
+unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
+{
+ unsigned int head = qp->tx_index;
+ unsigned int tail = qp->remote_rx_info->entry;
+
+ return tail > head ? tail - head : qp->tx_max_entry + tail - head;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry);
+
static void ntb_transport_doorbell_callback(void *data, int vector)
{
struct ntb_transport_ctx *nt = data;
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 72226acb5c0f..53c11621d5b1 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -21,6 +21,7 @@ config BLK_DEV_PMEM
default LIBNVDIMM
depends on HAS_IOMEM
select ND_BTT if BTT
+ select ND_PFN if NVDIMM_PFN
help
Memory ranges for PMEM are described by either an NFIT
(NVDIMM Firmware Interface Table, see CONFIG_NFIT_ACPI), a
@@ -47,12 +48,16 @@ config ND_BLK
(CONFIG_ACPI_NFIT), or otherwise exposes BLK-mode
capabilities.
+config ND_CLAIM
+ bool
+
config ND_BTT
tristate
config BTT
bool "BTT: Block Translation Table (atomic sector updates)"
default y if LIBNVDIMM
+ select ND_CLAIM
help
The Block Translation Table (BTT) provides atomic sector
update semantics for persistent memory devices, so that
@@ -65,4 +70,22 @@ config BTT
Select Y if unsure
+config ND_PFN
+ tristate
+
+config NVDIMM_PFN
+ bool "PFN: Map persistent (device) memory"
+ default LIBNVDIMM
+ depends on ZONE_DEVICE
+ select ND_CLAIM
+ help
+ Map persistent memory, i.e. advertise it to the memory
+ management sub-system. By default persistent memory does
+ not support direct I/O, RDMA, or any other usage that
+ requires a 'struct page' to mediate an I/O request. This
+ driver allocates and initializes the infrastructure needed
+ to support those use cases.
+
+ Select Y if unsure
+
endif
diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile
index 594bb97c867a..ea84d3c4e8e5 100644
--- a/drivers/nvdimm/Makefile
+++ b/drivers/nvdimm/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o
obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
obj-$(CONFIG_ND_BTT) += nd_btt.o
obj-$(CONFIG_ND_BLK) += nd_blk.o
+obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o
nd_pmem-y := pmem.o
@@ -9,6 +10,8 @@ nd_btt-y := btt.o
nd_blk-y := blk.o
+nd_e820-y := e820.o
+
libnvdimm-y := core.o
libnvdimm-y += bus.o
libnvdimm-y += dimm_devs.o
@@ -17,4 +20,6 @@ libnvdimm-y += region_devs.o
libnvdimm-y += region.o
libnvdimm-y += namespace_devs.o
libnvdimm-y += label.o
+libnvdimm-$(CONFIG_ND_CLAIM) += claim.o
libnvdimm-$(CONFIG_BTT) += btt_devs.o
+libnvdimm-$(CONFIG_NVDIMM_PFN) += pfn_devs.o
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 4f97b248c236..0df77cb07df6 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -180,7 +180,7 @@ static void nd_blk_make_request(struct request_queue *q, struct bio *bio)
* another kernel subsystem, and we just pass it through.
*/
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
- err = -EIO;
+ bio->bi_error = -EIO;
goto out;
}
@@ -199,6 +199,7 @@ static void nd_blk_make_request(struct request_queue *q, struct bio *bio)
"io error in %s sector %lld, len %d,\n",
(rw == READ) ? "READ" : "WRITE",
(unsigned long long) iter.bi_sector, len);
+ bio->bi_error = err;
break;
}
}
@@ -206,7 +207,7 @@ static void nd_blk_make_request(struct request_queue *q, struct bio *bio)
nd_iostat_end(bio, start);
out:
- bio_endio(bio, err);
+ bio_endio(bio);
}
static int nd_blk_rw_bytes(struct nd_namespace_common *ndns,
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 411c7b2bb37a..254239746020 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -583,33 +583,6 @@ static void free_arenas(struct btt *btt)
}
/*
- * This function checks if the metadata layout is valid and error free
- */
-static int arena_is_valid(struct arena_info *arena, struct btt_sb *super,
- u8 *uuid, u32 lbasize)
-{
- u64 checksum;
-
- if (memcmp(super->uuid, uuid, 16))
- return 0;
-
- checksum = le64_to_cpu(super->checksum);
- super->checksum = 0;
- if (checksum != nd_btt_sb_checksum(super))
- return 0;
- super->checksum = cpu_to_le64(checksum);
-
- if (lbasize != le32_to_cpu(super->external_lbasize))
- return 0;
-
- /* TODO: figure out action for this */
- if ((le32_to_cpu(super->flags) & IB_FLAG_ERROR_MASK) != 0)
- dev_info(to_dev(arena), "Found arena with an error flag\n");
-
- return 1;
-}
-
-/*
* This function reads an existing valid btt superblock and
* populates the corresponding arena_info struct
*/
@@ -632,8 +605,9 @@ static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
arena->logoff = arena_off + le64_to_cpu(super->logoff);
arena->info2off = arena_off + le64_to_cpu(super->info2off);
- arena->size = (super->nextoff > 0) ? (le64_to_cpu(super->nextoff)) :
- (arena->info2off - arena->infooff + BTT_PG_SIZE);
+ arena->size = (le64_to_cpu(super->nextoff) > 0)
+ ? (le64_to_cpu(super->nextoff))
+ : (arena->info2off - arena->infooff + BTT_PG_SIZE);
arena->flags = le32_to_cpu(super->flags);
}
@@ -665,8 +639,7 @@ static int discover_arenas(struct btt *btt)
if (ret)
goto out;
- if (!arena_is_valid(arena, super, btt->nd_btt->uuid,
- btt->lbasize)) {
+ if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
if (remaining == btt->rawsize) {
btt->init_state = INIT_NOTFOUND;
dev_info(to_dev(arena), "No existing arenas\n");
@@ -755,10 +728,13 @@ static int create_arenas(struct btt *btt)
* It is only called for an uninitialized arena when a write
* to that arena occurs for the first time.
*/
-static int btt_arena_write_layout(struct arena_info *arena, u8 *uuid)
+static int btt_arena_write_layout(struct arena_info *arena)
{
int ret;
+ u64 sum;
struct btt_sb *super;
+ struct nd_btt *nd_btt = arena->nd_btt;
+ const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
ret = btt_map_init(arena);
if (ret)
@@ -773,7 +749,8 @@ static int btt_arena_write_layout(struct arena_info *arena, u8 *uuid)
return -ENOMEM;
strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
- memcpy(super->uuid, uuid, 16);
+ memcpy(super->uuid, nd_btt->uuid, 16);
+ memcpy(super->parent_uuid, parent_uuid, 16);
super->flags = cpu_to_le32(arena->flags);
super->version_major = cpu_to_le16(arena->version_major);
super->version_minor = cpu_to_le16(arena->version_minor);
@@ -794,7 +771,8 @@ static int btt_arena_write_layout(struct arena_info *arena, u8 *uuid)
super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
super->flags = 0;
- super->checksum = cpu_to_le64(nd_btt_sb_checksum(super));
+ sum = nd_sb_checksum((struct nd_gen_sb *) super);
+ super->checksum = cpu_to_le64(sum);
ret = btt_info_write(arena, super);
@@ -813,7 +791,7 @@ static int btt_meta_init(struct btt *btt)
mutex_lock(&btt->init_lock);
list_for_each_entry(arena, &btt->arena_list, list) {
- ret = btt_arena_write_layout(arena, btt->nd_btt->uuid);
+ ret = btt_arena_write_layout(arena);
if (ret)
goto unlock;
@@ -1189,7 +1167,7 @@ static void btt_make_request(struct request_queue *q, struct bio *bio)
* another kernel subsystem, and we just pass it through.
*/
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
- err = -EIO;
+ bio->bi_error = -EIO;
goto out;
}
@@ -1211,6 +1189,7 @@ static void btt_make_request(struct request_queue *q, struct bio *bio)
"io error in %s sector %lld, len %d,\n",
(rw == READ) ? "READ" : "WRITE",
(unsigned long long) iter.bi_sector, len);
+ bio->bi_error = err;
break;
}
}
@@ -1218,7 +1197,7 @@ static void btt_make_request(struct request_queue *q, struct bio *bio)
nd_iostat_end(bio, start);
out:
- bio_endio(bio, err);
+ bio_endio(bio);
}
static int btt_rw_page(struct block_device *bdev, sector_t sector,
@@ -1446,8 +1425,6 @@ static int __init nd_btt_init(void)
{
int rc;
- BUILD_BUG_ON(sizeof(struct btt_sb) != SZ_4K);
-
btt_major = register_blkdev(0, "btt");
if (btt_major < 0)
return btt_major;
diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h
index 75b0d80a6bd9..b2f8651e5395 100644
--- a/drivers/nvdimm/btt.h
+++ b/drivers/nvdimm/btt.h
@@ -182,4 +182,7 @@ struct btt {
int init_state;
int num_arenas;
};
+
+bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super);
+
#endif
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 6ac8c0fea3ec..59ad54a63d9f 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -21,63 +21,13 @@
#include "btt.h"
#include "nd.h"
-static void __nd_btt_detach_ndns(struct nd_btt *nd_btt)
-{
- struct nd_namespace_common *ndns = nd_btt->ndns;
-
- dev_WARN_ONCE(&nd_btt->dev, !mutex_is_locked(&ndns->dev.mutex)
- || ndns->claim != &nd_btt->dev,
- "%s: invalid claim\n", __func__);
- ndns->claim = NULL;
- nd_btt->ndns = NULL;
- put_device(&ndns->dev);
-}
-
-static void nd_btt_detach_ndns(struct nd_btt *nd_btt)
-{
- struct nd_namespace_common *ndns = nd_btt->ndns;
-
- if (!ndns)
- return;
- get_device(&ndns->dev);
- device_lock(&ndns->dev);
- __nd_btt_detach_ndns(nd_btt);
- device_unlock(&ndns->dev);
- put_device(&ndns->dev);
-}
-
-static bool __nd_btt_attach_ndns(struct nd_btt *nd_btt,
- struct nd_namespace_common *ndns)
-{
- if (ndns->claim)
- return false;
- dev_WARN_ONCE(&nd_btt->dev, !mutex_is_locked(&ndns->dev.mutex)
- || nd_btt->ndns,
- "%s: invalid claim\n", __func__);
- ndns->claim = &nd_btt->dev;
- nd_btt->ndns = ndns;
- get_device(&ndns->dev);
- return true;
-}
-
-static bool nd_btt_attach_ndns(struct nd_btt *nd_btt,
- struct nd_namespace_common *ndns)
-{
- bool claimed;
-
- device_lock(&ndns->dev);
- claimed = __nd_btt_attach_ndns(nd_btt, ndns);
- device_unlock(&ndns->dev);
- return claimed;
-}
-
static void nd_btt_release(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
struct nd_btt *nd_btt = to_nd_btt(dev);
dev_dbg(dev, "%s\n", __func__);
- nd_btt_detach_ndns(nd_btt);
+ nd_detach_ndns(&nd_btt->dev, &nd_btt->ndns);
ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
kfree(nd_btt->uuid);
kfree(nd_btt);
@@ -172,104 +122,15 @@ static ssize_t namespace_show(struct device *dev,
return rc;
}
-static int namespace_match(struct device *dev, void *data)
-{
- char *name = data;
-
- return strcmp(name, dev_name(dev)) == 0;
-}
-
-static bool is_nd_btt_idle(struct device *dev)
-{
- struct nd_region *nd_region = to_nd_region(dev->parent);
- struct nd_btt *nd_btt = to_nd_btt(dev);
-
- if (nd_region->btt_seed == dev || nd_btt->ndns || dev->driver)
- return false;
- return true;
-}
-
-static ssize_t __namespace_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct nd_btt *nd_btt = to_nd_btt(dev);
- struct nd_namespace_common *ndns;
- struct device *found;
- char *name;
-
- if (dev->driver) {
- dev_dbg(dev, "%s: -EBUSY\n", __func__);
- return -EBUSY;
- }
-
- name = kstrndup(buf, len, GFP_KERNEL);
- if (!name)
- return -ENOMEM;
- strim(name);
-
- if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0)
- /* pass */;
- else {
- len = -EINVAL;
- goto out;
- }
-
- ndns = nd_btt->ndns;
- if (strcmp(name, "") == 0) {
- /* detach the namespace and destroy / reset the btt device */
- nd_btt_detach_ndns(nd_btt);
- if (is_nd_btt_idle(dev))
- nd_device_unregister(dev, ND_ASYNC);
- else {
- nd_btt->lbasize = 0;
- kfree(nd_btt->uuid);
- nd_btt->uuid = NULL;
- }
- goto out;
- } else if (ndns) {
- dev_dbg(dev, "namespace already set to: %s\n",
- dev_name(&ndns->dev));
- len = -EBUSY;
- goto out;
- }
-
- found = device_find_child(dev->parent, name, namespace_match);
- if (!found) {
- dev_dbg(dev, "'%s' not found under %s\n", name,
- dev_name(dev->parent));
- len = -ENODEV;
- goto out;
- }
-
- ndns = to_ndns(found);
- if (__nvdimm_namespace_capacity(ndns) < SZ_16M) {
- dev_dbg(dev, "%s too small to host btt\n", name);
- len = -ENXIO;
- goto out_attach;
- }
-
- WARN_ON_ONCE(!is_nvdimm_bus_locked(&nd_btt->dev));
- if (!nd_btt_attach_ndns(nd_btt, ndns)) {
- dev_dbg(dev, "%s already claimed\n",
- dev_name(&ndns->dev));
- len = -EBUSY;
- }
-
- out_attach:
- put_device(&ndns->dev); /* from device_find_child */
- out:
- kfree(name);
- return len;
-}
-
static ssize_t namespace_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
+ struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
nvdimm_bus_lock(dev);
device_lock(dev);
- rc = __namespace_store(dev, attr, buf, len);
+ rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len);
dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
rc, buf, buf[len - 1] == '\n' ? "" : "\n");
device_unlock(dev);
@@ -324,7 +185,7 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
dev->type = &nd_btt_device_type;
dev->groups = nd_btt_attribute_groups;
device_initialize(&nd_btt->dev);
- if (ndns && !__nd_btt_attach_ndns(nd_btt, ndns)) {
+ if (ndns && !__nd_attach_ndns(&nd_btt->dev, ndns, &nd_btt->ndns)) {
dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n",
__func__, dev_name(ndns->claim));
put_device(dev);
@@ -342,30 +203,54 @@ struct device *nd_btt_create(struct nd_region *nd_region)
return dev;
}
-/*
- * nd_btt_sb_checksum: compute checksum for btt info block
+static bool uuid_is_null(u8 *uuid)
+{
+ static const u8 null_uuid[16];
+
+ return (memcmp(uuid, null_uuid, 16) == 0);
+}
+
+/**
+ * nd_btt_arena_is_valid - check if the metadata layout is valid
+ * @nd_btt: device with BTT geometry and backing device info
+ * @super: pointer to the arena's info block being tested
+ *
+ * Check consistency of the btt info block with itself by validating
+ * the checksum, and with the parent namespace by verifying the
+ * parent_uuid contained in the info block with the one supplied in.
*
- * Returns a fletcher64 checksum of everything in the given info block
- * except the last field (since that's where the checksum lives).
+ * Returns:
+ * false for an invalid info block, true for a valid one
*/
-u64 nd_btt_sb_checksum(struct btt_sb *btt_sb)
+bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super)
{
- u64 sum;
- __le64 sum_save;
-
- sum_save = btt_sb->checksum;
- btt_sb->checksum = 0;
- sum = nd_fletcher64(btt_sb, sizeof(*btt_sb), 1);
- btt_sb->checksum = sum_save;
- return sum;
+ const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
+ u64 checksum;
+
+ if (memcmp(super->signature, BTT_SIG, BTT_SIG_LEN) != 0)
+ return false;
+
+ if (!uuid_is_null(super->parent_uuid))
+ if (memcmp(super->parent_uuid, parent_uuid, 16) != 0)
+ return false;
+
+ checksum = le64_to_cpu(super->checksum);
+ super->checksum = 0;
+ if (checksum != nd_sb_checksum((struct nd_gen_sb *) super))
+ return false;
+ super->checksum = cpu_to_le64(checksum);
+
+ /* TODO: figure out action for this */
+ if ((le32_to_cpu(super->flags) & IB_FLAG_ERROR_MASK) != 0)
+ dev_info(&nd_btt->dev, "Found arena with an error flag\n");
+
+ return true;
}
-EXPORT_SYMBOL(nd_btt_sb_checksum);
+EXPORT_SYMBOL(nd_btt_arena_is_valid);
static int __nd_btt_probe(struct nd_btt *nd_btt,
struct nd_namespace_common *ndns, struct btt_sb *btt_sb)
{
- u64 checksum;
-
if (!btt_sb || !ndns || !nd_btt)
return -ENODEV;
@@ -375,14 +260,8 @@ static int __nd_btt_probe(struct nd_btt *nd_btt,
if (nvdimm_namespace_capacity(ndns) < SZ_16M)
return -ENXIO;
- if (memcmp(btt_sb->signature, BTT_SIG, BTT_SIG_LEN) != 0)
- return -ENODEV;
-
- checksum = le64_to_cpu(btt_sb->checksum);
- btt_sb->checksum = 0;
- if (checksum != nd_btt_sb_checksum(btt_sb))
+ if (!nd_btt_arena_is_valid(nd_btt, btt_sb))
return -ENODEV;
- btt_sb->checksum = cpu_to_le64(checksum);
nd_btt->lbasize = le32_to_cpu(btt_sb->external_lbasize);
nd_btt->uuid = kmemdup(btt_sb->uuid, 16, GFP_KERNEL);
@@ -416,7 +295,9 @@ int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata)
dev_dbg(&ndns->dev, "%s: btt: %s\n", __func__,
rc == 0 ? dev_name(dev) : "<none>");
if (rc < 0) {
- __nd_btt_detach_ndns(to_nd_btt(dev));
+ struct nd_btt *nd_btt = to_nd_btt(dev);
+
+ __nd_detach_ndns(dev, &nd_btt->ndns);
put_device(dev);
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 8eb22c0ca7ce..7e2c43f701bc 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -535,8 +535,6 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
__func__, dimm_name, cmd_name, i);
return -ENXIO;
}
- if (!access_ok(VERIFY_READ, p + in_len, in_size))
- return -EFAULT;
if (in_len < sizeof(in_env))
copy = min_t(u32, sizeof(in_env) - in_len, in_size);
else
@@ -557,8 +555,6 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
__func__, dimm_name, cmd_name, i);
return -EFAULT;
}
- if (!access_ok(VERIFY_WRITE, p + in_len + out_len, out_size))
- return -EFAULT;
if (out_len < sizeof(out_env))
copy = min_t(u32, sizeof(out_env) - out_len, out_size);
else
@@ -570,9 +566,6 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
buf_len = out_len + in_len;
- if (!access_ok(VERIFY_WRITE, p, sizeof(buf_len)))
- return -EFAULT;
-
if (buf_len > ND_IOCTL_MAX_BUFLEN) {
dev_dbg(dev, "%s:%s cmd: %s buf_len: %zu > %d\n", __func__,
dimm_name, cmd_name, buf_len,
@@ -706,8 +699,10 @@ int __init nvdimm_bus_init(void)
nvdimm_major = rc;
nd_class = class_create(THIS_MODULE, "nd");
- if (IS_ERR(nd_class))
+ if (IS_ERR(nd_class)) {
+ rc = PTR_ERR(nd_class);
goto err_class;
+ }
return 0;
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
new file mode 100644
index 000000000000..e8f03b0e95e4
--- /dev/null
+++ b/drivers/nvdimm/claim.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/device.h>
+#include <linux/sizes.h>
+#include "nd-core.h"
+#include "pfn.h"
+#include "btt.h"
+#include "nd.h"
+
+void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns)
+{
+ struct nd_namespace_common *ndns = *_ndns;
+
+ dev_WARN_ONCE(dev, !mutex_is_locked(&ndns->dev.mutex)
+ || ndns->claim != dev,
+ "%s: invalid claim\n", __func__);
+ ndns->claim = NULL;
+ *_ndns = NULL;
+ put_device(&ndns->dev);
+}
+
+void nd_detach_ndns(struct device *dev,
+ struct nd_namespace_common **_ndns)
+{
+ struct nd_namespace_common *ndns = *_ndns;
+
+ if (!ndns)
+ return;
+ get_device(&ndns->dev);
+ device_lock(&ndns->dev);
+ __nd_detach_ndns(dev, _ndns);
+ device_unlock(&ndns->dev);
+ put_device(&ndns->dev);
+}
+
+bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
+ struct nd_namespace_common **_ndns)
+{
+ if (attach->claim)
+ return false;
+ dev_WARN_ONCE(dev, !mutex_is_locked(&attach->dev.mutex)
+ || *_ndns,
+ "%s: invalid claim\n", __func__);
+ attach->claim = dev;
+ *_ndns = attach;
+ get_device(&attach->dev);
+ return true;
+}
+
+bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
+ struct nd_namespace_common **_ndns)
+{
+ bool claimed;
+
+ device_lock(&attach->dev);
+ claimed = __nd_attach_ndns(dev, attach, _ndns);
+ device_unlock(&attach->dev);
+ return claimed;
+}
+
+static int namespace_match(struct device *dev, void *data)
+{
+ char *name = data;
+
+ return strcmp(name, dev_name(dev)) == 0;
+}
+
+static bool is_idle(struct device *dev, struct nd_namespace_common *ndns)
+{
+ struct nd_region *nd_region = to_nd_region(dev->parent);
+ struct device *seed = NULL;
+
+ if (is_nd_btt(dev))
+ seed = nd_region->btt_seed;
+ else if (is_nd_pfn(dev))
+ seed = nd_region->pfn_seed;
+
+ if (seed == dev || ndns || dev->driver)
+ return false;
+ return true;
+}
+
+static void nd_detach_and_reset(struct device *dev,
+ struct nd_namespace_common **_ndns)
+{
+ /* detach the namespace and destroy / reset the device */
+ nd_detach_ndns(dev, _ndns);
+ if (is_idle(dev, *_ndns)) {
+ nd_device_unregister(dev, ND_ASYNC);
+ } else if (is_nd_btt(dev)) {
+ struct nd_btt *nd_btt = to_nd_btt(dev);
+
+ nd_btt->lbasize = 0;
+ kfree(nd_btt->uuid);
+ nd_btt->uuid = NULL;
+ } else if (is_nd_pfn(dev)) {
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+
+ kfree(nd_pfn->uuid);
+ nd_pfn->uuid = NULL;
+ nd_pfn->mode = PFN_MODE_NONE;
+ }
+}
+
+ssize_t nd_namespace_store(struct device *dev,
+ struct nd_namespace_common **_ndns, const char *buf,
+ size_t len)
+{
+ struct nd_namespace_common *ndns;
+ struct device *found;
+ char *name;
+
+ if (dev->driver) {
+ dev_dbg(dev, "%s: -EBUSY\n", __func__);
+ return -EBUSY;
+ }
+
+ name = kstrndup(buf, len, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ strim(name);
+
+ if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0)
+ /* pass */;
+ else {
+ len = -EINVAL;
+ goto out;
+ }
+
+ ndns = *_ndns;
+ if (strcmp(name, "") == 0) {
+ nd_detach_and_reset(dev, _ndns);
+ goto out;
+ } else if (ndns) {
+ dev_dbg(dev, "namespace already set to: %s\n",
+ dev_name(&ndns->dev));
+ len = -EBUSY;
+ goto out;
+ }
+
+ found = device_find_child(dev->parent, name, namespace_match);
+ if (!found) {
+ dev_dbg(dev, "'%s' not found under %s\n", name,
+ dev_name(dev->parent));
+ len = -ENODEV;
+ goto out;
+ }
+
+ ndns = to_ndns(found);
+ if (__nvdimm_namespace_capacity(ndns) < SZ_16M) {
+ dev_dbg(dev, "%s too small to host\n", name);
+ len = -ENXIO;
+ goto out_attach;
+ }
+
+ WARN_ON_ONCE(!is_nvdimm_bus_locked(dev));
+ if (!nd_attach_ndns(dev, ndns, _ndns)) {
+ dev_dbg(dev, "%s already claimed\n",
+ dev_name(&ndns->dev));
+ len = -EBUSY;
+ }
+
+ out_attach:
+ put_device(&ndns->dev); /* from device_find_child */
+ out:
+ kfree(name);
+ return len;
+}
+
+/*
+ * nd_sb_checksum: compute checksum for a generic info block
+ *
+ * Returns a fletcher64 checksum of everything in the given info block
+ * except the last field (since that's where the checksum lives).
+ */
+u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb)
+{
+ u64 sum;
+ __le64 sum_save;
+
+ BUILD_BUG_ON(sizeof(struct btt_sb) != SZ_4K);
+ BUILD_BUG_ON(sizeof(struct nd_pfn_sb) != SZ_4K);
+ BUILD_BUG_ON(sizeof(struct nd_gen_sb) != SZ_4K);
+
+ sum_save = nd_gen_sb->checksum;
+ nd_gen_sb->checksum = 0;
+ sum = nd_fletcher64(nd_gen_sb, sizeof(*nd_gen_sb), 1);
+ nd_gen_sb->checksum = sum_save;
+ return sum;
+}
+EXPORT_SYMBOL(nd_sb_checksum);
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index c05eb807d674..651b8d19d324 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -241,10 +241,7 @@ void nvdimm_drvdata_release(struct kref *kref)
nvdimm_free_dpa(ndd, res);
nvdimm_bus_unlock(dev);
- if (ndd->data && is_vmalloc_addr(ndd->data))
- vfree(ndd->data);
- else
- kfree(ndd->data);
+ kvfree(ndd->data);
kfree(ndd);
put_device(dev);
}
diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c
new file mode 100644
index 000000000000..8282db2ef99e
--- /dev/null
+++ b/drivers/nvdimm/e820.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2015, Christoph Hellwig.
+ * Copyright (c) 2015, Intel Corporation.
+ */
+#include <linux/platform_device.h>
+#include <linux/libnvdimm.h>
+#include <linux/module.h>
+
+static const struct attribute_group *e820_pmem_attribute_groups[] = {
+ &nvdimm_bus_attribute_group,
+ NULL,
+};
+
+static const struct attribute_group *e820_pmem_region_attribute_groups[] = {
+ &nd_region_attribute_group,
+ &nd_device_attribute_group,
+ NULL,
+};
+
+static int e820_pmem_remove(struct platform_device *pdev)
+{
+ struct nvdimm_bus *nvdimm_bus = platform_get_drvdata(pdev);
+
+ nvdimm_bus_unregister(nvdimm_bus);
+ return 0;
+}
+
+static int e820_pmem_probe(struct platform_device *pdev)
+{
+ static struct nvdimm_bus_descriptor nd_desc;
+ struct device *dev = &pdev->dev;
+ struct nvdimm_bus *nvdimm_bus;
+ struct resource *p;
+
+ nd_desc.attr_groups = e820_pmem_attribute_groups;
+ nd_desc.provider_name = "e820";
+ nvdimm_bus = nvdimm_bus_register(dev, &nd_desc);
+ if (!nvdimm_bus)
+ goto err;
+ platform_set_drvdata(pdev, nvdimm_bus);
+
+ for (p = iomem_resource.child; p ; p = p->sibling) {
+ struct nd_region_desc ndr_desc;
+
+ if (strncmp(p->name, "Persistent Memory (legacy)", 26) != 0)
+ continue;
+
+ memset(&ndr_desc, 0, sizeof(ndr_desc));
+ ndr_desc.res = p;
+ ndr_desc.attr_groups = e820_pmem_region_attribute_groups;
+ ndr_desc.numa_node = NUMA_NO_NODE;
+ set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
+ if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc))
+ goto err;
+ }
+
+ return 0;
+
+ err:
+ nvdimm_bus_unregister(nvdimm_bus);
+ dev_err(dev, "failed to register legacy persistent memory ranges\n");
+ return -ENXIO;
+}
+
+static struct platform_driver e820_pmem_driver = {
+ .probe = e820_pmem_probe,
+ .remove = e820_pmem_remove,
+ .driver = {
+ .name = "e820_pmem",
+ },
+};
+
+static __init int e820_pmem_init(void)
+{
+ return platform_driver_register(&e820_pmem_driver);
+}
+
+static __exit void e820_pmem_exit(void)
+{
+ platform_driver_unregister(&e820_pmem_driver);
+}
+
+MODULE_ALIAS("platform:e820_pmem*");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
+module_init(e820_pmem_init);
+module_exit(e820_pmem_exit);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index fef0dd80d4ad..0955b2cb10fe 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/slab.h>
+#include <linux/pmem.h>
#include <linux/nd.h>
#include "nd-core.h"
#include "nd.h"
@@ -76,22 +77,54 @@ static bool is_namespace_io(struct device *dev)
return dev ? dev->type == &namespace_io_device_type : false;
}
+bool pmem_should_map_pages(struct device *dev)
+{
+ struct nd_region *nd_region = to_nd_region(dev->parent);
+
+ if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
+ return false;
+
+ if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
+ return false;
+
+ if (is_nd_pfn(dev) || is_nd_btt(dev))
+ return false;
+
+#ifdef ARCH_MEMREMAP_PMEM
+ return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
+#else
+ return false;
+#endif
+}
+EXPORT_SYMBOL(pmem_should_map_pages);
+
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
char *name)
{
struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
- const char *suffix = "";
+ const char *suffix = NULL;
- if (ndns->claim && is_nd_btt(ndns->claim))
- suffix = "s";
+ if (ndns->claim) {
+ if (is_nd_btt(ndns->claim))
+ suffix = "s";
+ else if (is_nd_pfn(ndns->claim))
+ suffix = "m";
+ else
+ dev_WARN_ONCE(&ndns->dev, 1,
+ "unknown claim type by %s\n",
+ dev_name(ndns->claim));
+ }
- if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev))
- sprintf(name, "pmem%d%s", nd_region->id, suffix);
- else if (is_namespace_blk(&ndns->dev)) {
+ if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
+ if (!suffix && pmem_should_map_pages(&ndns->dev))
+ suffix = "m";
+ sprintf(name, "pmem%d%s", nd_region->id, suffix ? suffix : "");
+ } else if (is_namespace_blk(&ndns->dev)) {
struct nd_namespace_blk *nsblk;
nsblk = to_nd_namespace_blk(&ndns->dev);
- sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id, suffix);
+ sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
+ suffix ? suffix : "");
} else {
return NULL;
}
@@ -100,6 +133,26 @@ const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
}
EXPORT_SYMBOL(nvdimm_namespace_disk_name);
+const u8 *nd_dev_to_uuid(struct device *dev)
+{
+ static const u8 null_uuid[16];
+
+ if (!dev)
+ return null_uuid;
+
+ if (is_namespace_pmem(dev)) {
+ struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
+
+ return nspm->uuid;
+ } else if (is_namespace_blk(dev)) {
+ struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
+
+ return nsblk->uuid;
+ } else
+ return null_uuid;
+}
+EXPORT_SYMBOL(nd_dev_to_uuid);
+
static ssize_t nstype_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1235,12 +1288,22 @@ static const struct attribute_group *nd_namespace_attribute_groups[] = {
struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
{
struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
+ struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
struct nd_namespace_common *ndns;
resource_size_t size;
- if (nd_btt) {
- ndns = nd_btt->ndns;
- if (!ndns)
+ if (nd_btt || nd_pfn) {
+ struct device *host = NULL;
+
+ if (nd_btt) {
+ host = &nd_btt->dev;
+ ndns = nd_btt->ndns;
+ } else if (nd_pfn) {
+ host = &nd_pfn->dev;
+ ndns = nd_pfn->ndns;
+ }
+
+ if (!ndns || !host)
return ERR_PTR(-ENODEV);
/*
@@ -1251,12 +1314,12 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
device_unlock(&ndns->dev);
if (ndns->dev.driver) {
dev_dbg(&ndns->dev, "is active, can't bind %s\n",
- dev_name(&nd_btt->dev));
+ dev_name(host));
return ERR_PTR(-EBUSY);
}
- if (dev_WARN_ONCE(&ndns->dev, ndns->claim != &nd_btt->dev,
+ if (dev_WARN_ONCE(&ndns->dev, ndns->claim != host,
"host (%s) vs claim (%s) mismatch\n",
- dev_name(&nd_btt->dev),
+ dev_name(host),
dev_name(ndns->claim)))
return ERR_PTR(-ENXIO);
} else {
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index e1970c71ad1c..159aed532042 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -80,4 +80,13 @@ struct resource *nsblk_add_resource(struct nd_region *nd_region,
int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd);
void get_ndd(struct nvdimm_drvdata *ndd);
resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
+void nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns);
+void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns);
+bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
+ struct nd_namespace_common **_ndns);
+bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
+ struct nd_namespace_common **_ndns);
+ssize_t nd_namespace_store(struct device *dev,
+ struct nd_namespace_common **_ndns, const char *buf,
+ size_t len);
#endif /* __ND_CORE_H__ */
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index c41f53e74277..417e521d299c 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -29,6 +29,13 @@ enum {
ND_MAX_LANES = 256,
SECTOR_SHIFT = 9,
INT_LBASIZE_ALIGNMENT = 64,
+#if IS_ENABLED(CONFIG_NVDIMM_PFN)
+ ND_PFN_ALIGN = PAGES_PER_SECTION * PAGE_SIZE,
+ ND_PFN_MASK = ND_PFN_ALIGN - 1,
+#else
+ ND_PFN_ALIGN = 0,
+ ND_PFN_MASK = 0,
+#endif
};
struct nvdimm_drvdata {
@@ -92,8 +99,11 @@ struct nd_region {
struct device dev;
struct ida ns_ida;
struct ida btt_ida;
+ struct ida pfn_ida;
+ unsigned long flags;
struct device *ns_seed;
struct device *btt_seed;
+ struct device *pfn_seed;
u16 ndr_mappings;
u64 ndr_size;
u64 ndr_start;
@@ -133,6 +143,22 @@ struct nd_btt {
int id;
};
+enum nd_pfn_mode {
+ PFN_MODE_NONE,
+ PFN_MODE_RAM,
+ PFN_MODE_PMEM,
+};
+
+struct nd_pfn {
+ int id;
+ u8 *uuid;
+ struct device dev;
+ unsigned long npfns;
+ enum nd_pfn_mode mode;
+ struct nd_pfn_sb *pfn_sb;
+ struct nd_namespace_common *ndns;
+};
+
enum nd_async_mode {
ND_SYNC,
ND_ASYNC,
@@ -159,14 +185,19 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd);
int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
void *buf, size_t len);
struct nd_btt *to_nd_btt(struct device *dev);
-struct btt_sb;
-u64 nd_btt_sb_checksum(struct btt_sb *btt_sb);
+
+struct nd_gen_sb {
+ char reserved[SZ_4K - 8];
+ __le64 checksum;
+};
+
+u64 nd_sb_checksum(struct nd_gen_sb *sb);
#if IS_ENABLED(CONFIG_BTT)
int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata);
bool is_nd_btt(struct device *dev);
struct device *nd_btt_create(struct nd_region *nd_region);
#else
-static inline nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata)
+static inline int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata)
{
return -ENODEV;
}
@@ -180,8 +211,36 @@ static inline struct device *nd_btt_create(struct nd_region *nd_region)
{
return NULL;
}
+#endif
+struct nd_pfn *to_nd_pfn(struct device *dev);
+#if IS_ENABLED(CONFIG_NVDIMM_PFN)
+int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata);
+bool is_nd_pfn(struct device *dev);
+struct device *nd_pfn_create(struct nd_region *nd_region);
+int nd_pfn_validate(struct nd_pfn *nd_pfn);
+#else
+static inline int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata)
+{
+ return -ENODEV;
+}
+
+static inline bool is_nd_pfn(struct device *dev)
+{
+ return false;
+}
+
+static inline struct device *nd_pfn_create(struct nd_region *nd_region)
+{
+ return NULL;
+}
+
+static inline int nd_pfn_validate(struct nd_pfn *nd_pfn)
+{
+ return -ENODEV;
+}
#endif
+
struct nd_region *to_nd_region(struct device *dev);
int nd_region_to_nstype(struct nd_region *nd_region);
int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
@@ -217,4 +276,6 @@ static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
}
void nd_iostat_end(struct bio *bio, unsigned long start);
resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
+const u8 *nd_dev_to_uuid(struct device *dev);
+bool pmem_should_map_pages(struct device *dev);
#endif /* __ND_H__ */
diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
new file mode 100644
index 000000000000..cc243754acef
--- /dev/null
+++ b/drivers/nvdimm/pfn.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __NVDIMM_PFN_H
+#define __NVDIMM_PFN_H
+
+#include <linux/types.h>
+
+#define PFN_SIG_LEN 16
+#define PFN_SIG "NVDIMM_PFN_INFO\0"
+
+struct nd_pfn_sb {
+ u8 signature[PFN_SIG_LEN];
+ u8 uuid[16];
+ u8 parent_uuid[16];
+ __le32 flags;
+ __le16 version_major;
+ __le16 version_minor;
+ __le64 dataoff;
+ __le64 npfns;
+ __le32 mode;
+ u8 padding[4012];
+ __le64 checksum;
+};
+#endif /* __NVDIMM_PFN_H */
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
new file mode 100644
index 000000000000..3fd7d0d81a47
--- /dev/null
+++ b/drivers/nvdimm/pfn_devs.c
@@ -0,0 +1,337 @@
+/*
+ * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/blkdev.h>
+#include <linux/device.h>
+#include <linux/genhd.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include "nd-core.h"
+#include "pfn.h"
+#include "nd.h"
+
+static void nd_pfn_release(struct device *dev)
+{
+ struct nd_region *nd_region = to_nd_region(dev->parent);
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+ nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns);
+ ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id);
+ kfree(nd_pfn->uuid);
+ kfree(nd_pfn);
+}
+
+static struct device_type nd_pfn_device_type = {
+ .name = "nd_pfn",
+ .release = nd_pfn_release,
+};
+
+bool is_nd_pfn(struct device *dev)
+{
+ return dev ? dev->type == &nd_pfn_device_type : false;
+}
+EXPORT_SYMBOL(is_nd_pfn);
+
+struct nd_pfn *to_nd_pfn(struct device *dev)
+{
+ struct nd_pfn *nd_pfn = container_of(dev, struct nd_pfn, dev);
+
+ WARN_ON(!is_nd_pfn(dev));
+ return nd_pfn;
+}
+EXPORT_SYMBOL(to_nd_pfn);
+
+static ssize_t mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+
+ switch (nd_pfn->mode) {
+ case PFN_MODE_RAM:
+ return sprintf(buf, "ram\n");
+ case PFN_MODE_PMEM:
+ return sprintf(buf, "pmem\n");
+ default:
+ return sprintf(buf, "none\n");
+ }
+}
+
+static ssize_t mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ ssize_t rc = 0;
+
+ device_lock(dev);
+ nvdimm_bus_lock(dev);
+ if (dev->driver)
+ rc = -EBUSY;
+ else {
+ size_t n = len - 1;
+
+ if (strncmp(buf, "pmem\n", n) == 0
+ || strncmp(buf, "pmem", n) == 0) {
+ /* TODO: allocate from PMEM support */
+ rc = -ENOTTY;
+ } else if (strncmp(buf, "ram\n", n) == 0
+ || strncmp(buf, "ram", n) == 0)
+ nd_pfn->mode = PFN_MODE_RAM;
+ else if (strncmp(buf, "none\n", n) == 0
+ || strncmp(buf, "none", n) == 0)
+ nd_pfn->mode = PFN_MODE_NONE;
+ else
+ rc = -EINVAL;
+ }
+ dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
+ rc, buf, buf[len - 1] == '\n' ? "" : "\n");
+ nvdimm_bus_unlock(dev);
+ device_unlock(dev);
+
+ return rc ? rc : len;
+}
+static DEVICE_ATTR_RW(mode);
+
+static ssize_t uuid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+
+ if (nd_pfn->uuid)
+ return sprintf(buf, "%pUb\n", nd_pfn->uuid);
+ return sprintf(buf, "\n");
+}
+
+static ssize_t uuid_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ ssize_t rc;
+
+ device_lock(dev);
+ rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len);
+ dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
+ rc, buf, buf[len - 1] == '\n' ? "" : "\n");
+ device_unlock(dev);
+
+ return rc ? rc : len;
+}
+static DEVICE_ATTR_RW(uuid);
+
+static ssize_t namespace_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ ssize_t rc;
+
+ nvdimm_bus_lock(dev);
+ rc = sprintf(buf, "%s\n", nd_pfn->ndns
+ ? dev_name(&nd_pfn->ndns->dev) : "");
+ nvdimm_bus_unlock(dev);
+ return rc;
+}
+
+static ssize_t namespace_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ ssize_t rc;
+
+ nvdimm_bus_lock(dev);
+ device_lock(dev);
+ rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
+ dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
+ rc, buf, buf[len - 1] == '\n' ? "" : "\n");
+ device_unlock(dev);
+ nvdimm_bus_unlock(dev);
+
+ return rc;
+}
+static DEVICE_ATTR_RW(namespace);
+
+static struct attribute *nd_pfn_attributes[] = {
+ &dev_attr_mode.attr,
+ &dev_attr_namespace.attr,
+ &dev_attr_uuid.attr,
+ NULL,
+};
+
+static struct attribute_group nd_pfn_attribute_group = {
+ .attrs = nd_pfn_attributes,
+};
+
+static const struct attribute_group *nd_pfn_attribute_groups[] = {
+ &nd_pfn_attribute_group,
+ &nd_device_attribute_group,
+ &nd_numa_attribute_group,
+ NULL,
+};
+
+static struct device *__nd_pfn_create(struct nd_region *nd_region,
+ u8 *uuid, enum nd_pfn_mode mode,
+ struct nd_namespace_common *ndns)
+{
+ struct nd_pfn *nd_pfn;
+ struct device *dev;
+
+ /* we can only create pages for contiguous ranged of pmem */
+ if (!is_nd_pmem(&nd_region->dev))
+ return NULL;
+
+ nd_pfn = kzalloc(sizeof(*nd_pfn), GFP_KERNEL);
+ if (!nd_pfn)
+ return NULL;
+
+ nd_pfn->id = ida_simple_get(&nd_region->pfn_ida, 0, 0, GFP_KERNEL);
+ if (nd_pfn->id < 0) {
+ kfree(nd_pfn);
+ return NULL;
+ }
+
+ nd_pfn->mode = mode;
+ if (uuid)
+ uuid = kmemdup(uuid, 16, GFP_KERNEL);
+ nd_pfn->uuid = uuid;
+ dev = &nd_pfn->dev;
+ dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id);
+ dev->parent = &nd_region->dev;
+ dev->type = &nd_pfn_device_type;
+ dev->groups = nd_pfn_attribute_groups;
+ device_initialize(&nd_pfn->dev);
+ if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
+ dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n",
+ __func__, dev_name(ndns->claim));
+ put_device(dev);
+ return NULL;
+ }
+ return dev;
+}
+
+struct device *nd_pfn_create(struct nd_region *nd_region)
+{
+ struct device *dev = __nd_pfn_create(nd_region, NULL, PFN_MODE_NONE,
+ NULL);
+
+ if (dev)
+ __nd_device_register(dev);
+ return dev;
+}
+
+int nd_pfn_validate(struct nd_pfn *nd_pfn)
+{
+ struct nd_namespace_common *ndns = nd_pfn->ndns;
+ struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+ struct nd_namespace_io *nsio;
+ u64 checksum, offset;
+
+ if (!pfn_sb || !ndns)
+ return -ENODEV;
+
+ if (!is_nd_pmem(nd_pfn->dev.parent))
+ return -ENODEV;
+
+ /* section alignment for simple hotplug */
+ if (nvdimm_namespace_capacity(ndns) < ND_PFN_ALIGN)
+ return -ENODEV;
+
+ if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb)))
+ return -ENXIO;
+
+ if (memcmp(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN) != 0)
+ return -ENODEV;
+
+ checksum = le64_to_cpu(pfn_sb->checksum);
+ pfn_sb->checksum = 0;
+ if (checksum != nd_sb_checksum((struct nd_gen_sb *) pfn_sb))
+ return -ENODEV;
+ pfn_sb->checksum = cpu_to_le64(checksum);
+
+ switch (le32_to_cpu(pfn_sb->mode)) {
+ case PFN_MODE_RAM:
+ break;
+ case PFN_MODE_PMEM:
+ /* TODO: allocate from PMEM support */
+ return -ENOTTY;
+ default:
+ return -ENXIO;
+ }
+
+ if (!nd_pfn->uuid) {
+ /* from probe we allocate */
+ nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL);
+ if (!nd_pfn->uuid)
+ return -ENOMEM;
+ } else {
+ /* from init we validate */
+ if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
+ return -EINVAL;
+ }
+
+ /*
+ * These warnings are verbose because they can only trigger in
+ * the case where the physical address alignment of the
+ * namespace has changed since the pfn superblock was
+ * established.
+ */
+ offset = le64_to_cpu(pfn_sb->dataoff);
+ nsio = to_nd_namespace_io(&ndns->dev);
+ if (nsio->res.start & ND_PFN_MASK) {
+ dev_err(&nd_pfn->dev,
+ "init failed: %s not section aligned\n",
+ dev_name(&ndns->dev));
+ return -EBUSY;
+ } else if (offset >= resource_size(&nsio->res)) {
+ dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
+ dev_name(&ndns->dev));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(nd_pfn_validate);
+
+int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata)
+{
+ int rc;
+ struct device *dev;
+ struct nd_pfn *nd_pfn;
+ struct nd_pfn_sb *pfn_sb;
+ struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
+
+ if (ndns->force_raw)
+ return -ENODEV;
+
+ nvdimm_bus_lock(&ndns->dev);
+ dev = __nd_pfn_create(nd_region, NULL, PFN_MODE_NONE, ndns);
+ nvdimm_bus_unlock(&ndns->dev);
+ if (!dev)
+ return -ENOMEM;
+ dev_set_drvdata(dev, drvdata);
+ pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
+ nd_pfn = to_nd_pfn(dev);
+ nd_pfn->pfn_sb = pfn_sb;
+ rc = nd_pfn_validate(nd_pfn);
+ nd_pfn->pfn_sb = NULL;
+ kfree(pfn_sb);
+ dev_dbg(&ndns->dev, "%s: pfn: %s\n", __func__,
+ rc == 0 ? dev_name(dev) : "<none>");
+ if (rc < 0) {
+ __nd_detach_ndns(dev, &nd_pfn->ndns);
+ put_device(dev);
+ } else
+ __nd_device_register(&nd_pfn->dev);
+
+ return rc;
+}
+EXPORT_SYMBOL(nd_pfn_probe);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index ade9eb917a4d..b9525385c0dc 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -21,18 +21,24 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/module.h>
+#include <linux/memory_hotplug.h>
#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/pmem.h>
#include <linux/nd.h>
+#include "pfn.h"
#include "nd.h"
struct pmem_device {
struct request_queue *pmem_queue;
struct gendisk *pmem_disk;
+ struct nd_namespace_common *ndns;
/* One contiguous memory region per device */
phys_addr_t phys_addr;
+ /* when non-zero this device is hosting a 'pfn' instance */
+ phys_addr_t data_offset;
void __pmem *virt_addr;
size_t size;
};
@@ -44,7 +50,7 @@ static void pmem_do_bvec(struct pmem_device *pmem, struct page *page,
sector_t sector)
{
void *mem = kmap_atomic(page);
- size_t pmem_off = sector << 9;
+ phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
if (rw == READ) {
@@ -77,7 +83,7 @@ static void pmem_make_request(struct request_queue *q, struct bio *bio)
if (bio_data_dir(bio))
wmb_pmem();
- bio_endio(bio, 0);
+ bio_endio(bio);
}
static int pmem_rw_page(struct block_device *bdev, sector_t sector,
@@ -92,19 +98,26 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
}
static long pmem_direct_access(struct block_device *bdev, sector_t sector,
- void **kaddr, unsigned long *pfn, long size)
+ void __pmem **kaddr, unsigned long *pfn)
{
struct pmem_device *pmem = bdev->bd_disk->private_data;
- size_t offset = sector << 9;
-
- if (!pmem)
- return -ENODEV;
+ resource_size_t offset = sector * 512 + pmem->data_offset;
+ resource_size_t size;
+
+ if (pmem->data_offset) {
+ /*
+ * Limit the direct_access() size to what is covered by
+ * the memmap
+ */
+ size = (pmem->size - offset) & ~ND_PFN_MASK;
+ } else
+ size = pmem->size - offset;
/* FIXME convert DAX to comprehend that this mapping has a lifetime */
- *kaddr = (void __force *) pmem->virt_addr + offset;
+ *kaddr = pmem->virt_addr + offset;
*pfn = (pmem->phys_addr + offset) >> PAGE_SHIFT;
- return pmem->size - offset;
+ return size;
}
static const struct block_device_operations pmem_fops = {
@@ -119,27 +132,33 @@ static struct pmem_device *pmem_alloc(struct device *dev,
{
struct pmem_device *pmem;
- pmem = kzalloc(sizeof(*pmem), GFP_KERNEL);
+ pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
if (!pmem)
return ERR_PTR(-ENOMEM);
pmem->phys_addr = res->start;
pmem->size = resource_size(res);
- if (!arch_has_pmem_api())
+ if (!arch_has_wmb_pmem())
dev_warn(dev, "unable to guarantee persistence of writes\n");
- if (!request_mem_region(pmem->phys_addr, pmem->size, dev_name(dev))) {
+ if (!devm_request_mem_region(dev, pmem->phys_addr, pmem->size,
+ dev_name(dev))) {
dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n",
&pmem->phys_addr, pmem->size);
- kfree(pmem);
return ERR_PTR(-EBUSY);
}
- pmem->virt_addr = memremap_pmem(pmem->phys_addr, pmem->size);
- if (!pmem->virt_addr) {
- release_mem_region(pmem->phys_addr, pmem->size);
- kfree(pmem);
- return ERR_PTR(-ENXIO);
+ if (pmem_should_map_pages(dev)) {
+ void *addr = devm_memremap_pages(dev, res);
+
+ if (IS_ERR(addr))
+ return addr;
+ pmem->virt_addr = (void __pmem *) addr;
+ } else {
+ pmem->virt_addr = memremap_pmem(dev, pmem->phys_addr,
+ pmem->size);
+ if (!pmem->virt_addr)
+ return ERR_PTR(-ENXIO);
}
return pmem;
@@ -147,13 +166,16 @@ static struct pmem_device *pmem_alloc(struct device *dev,
static void pmem_detach_disk(struct pmem_device *pmem)
{
+ if (!pmem->pmem_disk)
+ return;
+
del_gendisk(pmem->pmem_disk);
put_disk(pmem->pmem_disk);
blk_cleanup_queue(pmem->pmem_queue);
}
-static int pmem_attach_disk(struct nd_namespace_common *ndns,
- struct pmem_device *pmem)
+static int pmem_attach_disk(struct device *dev,
+ struct nd_namespace_common *ndns, struct pmem_device *pmem)
{
struct gendisk *disk;
@@ -162,6 +184,7 @@ static int pmem_attach_disk(struct nd_namespace_common *ndns,
return -ENOMEM;
blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
+ blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);
@@ -179,8 +202,8 @@ static int pmem_attach_disk(struct nd_namespace_common *ndns,
disk->queue = pmem->pmem_queue;
disk->flags = GENHD_FL_EXT_DEVT;
nvdimm_namespace_disk_name(ndns, disk->disk_name);
- disk->driverfs_dev = &ndns->dev;
- set_capacity(disk, pmem->size >> 9);
+ disk->driverfs_dev = dev;
+ set_capacity(disk, (pmem->size - pmem->data_offset) / 512);
pmem->pmem_disk = disk;
add_disk(disk);
@@ -209,11 +232,152 @@ static int pmem_rw_bytes(struct nd_namespace_common *ndns,
return 0;
}
-static void pmem_free(struct pmem_device *pmem)
+static int nd_pfn_init(struct nd_pfn *nd_pfn)
+{
+ struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
+ struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev);
+ struct nd_namespace_common *ndns = nd_pfn->ndns;
+ struct nd_region *nd_region;
+ unsigned long npfns;
+ phys_addr_t offset;
+ u64 checksum;
+ int rc;
+
+ if (!pfn_sb)
+ return -ENOMEM;
+
+ nd_pfn->pfn_sb = pfn_sb;
+ rc = nd_pfn_validate(nd_pfn);
+ if (rc == 0 || rc == -EBUSY)
+ return rc;
+
+ /* section alignment for simple hotplug */
+ if (nvdimm_namespace_capacity(ndns) < ND_PFN_ALIGN
+ || pmem->phys_addr & ND_PFN_MASK)
+ return -ENODEV;
+
+ nd_region = to_nd_region(nd_pfn->dev.parent);
+ if (nd_region->ro) {
+ dev_info(&nd_pfn->dev,
+ "%s is read-only, unable to init metadata\n",
+ dev_name(&nd_region->dev));
+ goto err;
+ }
+
+ memset(pfn_sb, 0, sizeof(*pfn_sb));
+ npfns = (pmem->size - SZ_8K) / SZ_4K;
+ /*
+ * Note, we use 64 here for the standard size of struct page,
+ * debugging options may cause it to be larger in which case the
+ * implementation will limit the pfns advertised through
+ * ->direct_access() to those that are included in the memmap.
+ */
+ if (nd_pfn->mode == PFN_MODE_PMEM)
+ offset = ALIGN(SZ_8K + 64 * npfns, PMD_SIZE);
+ else if (nd_pfn->mode == PFN_MODE_RAM)
+ offset = SZ_8K;
+ else
+ goto err;
+
+ npfns = (pmem->size - offset) / SZ_4K;
+ pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
+ pfn_sb->dataoff = cpu_to_le64(offset);
+ pfn_sb->npfns = cpu_to_le64(npfns);
+ memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
+ memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
+ pfn_sb->version_major = cpu_to_le16(1);
+ checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
+ pfn_sb->checksum = cpu_to_le64(checksum);
+
+ rc = nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
+ if (rc)
+ goto err;
+
+ return 0;
+ err:
+ nd_pfn->pfn_sb = NULL;
+ kfree(pfn_sb);
+ return -ENXIO;
+}
+
+static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns)
+{
+ struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
+ struct pmem_device *pmem;
+
+ /* free pmem disk */
+ pmem = dev_get_drvdata(&nd_pfn->dev);
+ pmem_detach_disk(pmem);
+
+ /* release nd_pfn resources */
+ kfree(nd_pfn->pfn_sb);
+ nd_pfn->pfn_sb = NULL;
+
+ return 0;
+}
+
+static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
{
- memunmap_pmem(pmem->virt_addr);
- release_mem_region(pmem->phys_addr, pmem->size);
- kfree(pmem);
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
+ struct device *dev = &nd_pfn->dev;
+ struct vmem_altmap *altmap;
+ struct nd_region *nd_region;
+ struct nd_pfn_sb *pfn_sb;
+ struct pmem_device *pmem;
+ phys_addr_t offset;
+ int rc;
+
+ if (!nd_pfn->uuid || !nd_pfn->ndns)
+ return -ENODEV;
+
+ nd_region = to_nd_region(dev->parent);
+ rc = nd_pfn_init(nd_pfn);
+ if (rc)
+ return rc;
+
+ if (PAGE_SIZE != SZ_4K) {
+ dev_err(dev, "only supported on systems with 4K PAGE_SIZE\n");
+ return -ENXIO;
+ }
+ if (nsio->res.start & ND_PFN_MASK) {
+ dev_err(dev, "%s not memory hotplug section aligned\n",
+ dev_name(&ndns->dev));
+ return -ENXIO;
+ }
+
+ pfn_sb = nd_pfn->pfn_sb;
+ offset = le64_to_cpu(pfn_sb->dataoff);
+ nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
+ if (nd_pfn->mode == PFN_MODE_RAM) {
+ if (offset != SZ_8K)
+ return -EINVAL;
+ nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
+ altmap = NULL;
+ } else {
+ rc = -ENXIO;
+ goto err;
+ }
+
+ /* establish pfn range for lookup, and switch to direct map */
+ pmem = dev_get_drvdata(dev);
+ memunmap_pmem(dev, pmem->virt_addr);
+ pmem->virt_addr = (void __pmem *)devm_memremap_pages(dev, &nsio->res);
+ if (IS_ERR(pmem->virt_addr)) {
+ rc = PTR_ERR(pmem->virt_addr);
+ goto err;
+ }
+
+ /* attach pmem disk in "pfn-mode" */
+ pmem->data_offset = offset;
+ rc = pmem_attach_disk(dev, ndns, pmem);
+ if (rc)
+ goto err;
+
+ return rc;
+ err:
+ nvdimm_namespace_detach_pfn(ndns);
+ return rc;
}
static int nd_pmem_probe(struct device *dev)
@@ -222,7 +386,6 @@ static int nd_pmem_probe(struct device *dev)
struct nd_namespace_common *ndns;
struct nd_namespace_io *nsio;
struct pmem_device *pmem;
- int rc;
ndns = nvdimm_namespace_common_probe(dev);
if (IS_ERR(ndns))
@@ -233,18 +396,27 @@ static int nd_pmem_probe(struct device *dev)
if (IS_ERR(pmem))
return PTR_ERR(pmem);
+ pmem->ndns = ndns;
dev_set_drvdata(dev, pmem);
ndns->rw_bytes = pmem_rw_bytes;
+
if (is_nd_btt(dev))
- rc = nvdimm_namespace_attach_btt(ndns);
- else if (nd_btt_probe(ndns, pmem) == 0) {
+ return nvdimm_namespace_attach_btt(ndns);
+
+ if (is_nd_pfn(dev))
+ return nvdimm_namespace_attach_pfn(ndns);
+
+ if (nd_btt_probe(ndns, pmem) == 0) {
/* we'll come back as btt-pmem */
- rc = -ENXIO;
- } else
- rc = pmem_attach_disk(ndns, pmem);
- if (rc)
- pmem_free(pmem);
- return rc;
+ return -ENXIO;
+ }
+
+ if (nd_pfn_probe(ndns, pmem) == 0) {
+ /* we'll come back as pfn-pmem */
+ return -ENXIO;
+ }
+
+ return pmem_attach_disk(dev, ndns, pmem);
}
static int nd_pmem_remove(struct device *dev)
@@ -252,10 +424,11 @@ static int nd_pmem_remove(struct device *dev)
struct pmem_device *pmem = dev_get_drvdata(dev);
if (is_nd_btt(dev))
- nvdimm_namespace_detach_btt(to_nd_btt(dev)->ndns);
+ nvdimm_namespace_detach_btt(pmem->ndns);
+ else if (is_nd_pfn(dev))
+ nvdimm_namespace_detach_pfn(pmem->ndns);
else
pmem_detach_disk(pmem);
- pmem_free(pmem);
return 0;
}
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index f28f78ccff19..7da63eac78ee 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -53,6 +53,7 @@ static int nd_region_probe(struct device *dev)
return -ENODEV;
nd_region->btt_seed = nd_btt_create(nd_region);
+ nd_region->pfn_seed = nd_pfn_create(nd_region);
if (err == 0)
return 0;
@@ -84,6 +85,7 @@ static int nd_region_remove(struct device *dev)
nvdimm_bus_lock(dev);
nd_region->ns_seed = NULL;
nd_region->btt_seed = NULL;
+ nd_region->pfn_seed = NULL;
dev_set_drvdata(dev, NULL);
nvdimm_bus_unlock(dev);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index a5233422f9dc..529f3f02e7b2 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -345,6 +345,23 @@ static ssize_t btt_seed_show(struct device *dev,
}
static DEVICE_ATTR_RO(btt_seed);
+static ssize_t pfn_seed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_region *nd_region = to_nd_region(dev);
+ ssize_t rc;
+
+ nvdimm_bus_lock(dev);
+ if (nd_region->pfn_seed)
+ rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
+ else
+ rc = sprintf(buf, "\n");
+ nvdimm_bus_unlock(dev);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(pfn_seed);
+
static ssize_t read_only_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -373,6 +390,7 @@ static struct attribute *nd_region_attributes[] = {
&dev_attr_nstype.attr,
&dev_attr_mappings.attr,
&dev_attr_btt_seed.attr,
+ &dev_attr_pfn_seed.attr,
&dev_attr_read_only.attr,
&dev_attr_set_cookie.attr,
&dev_attr_available_size.attr,
@@ -458,10 +476,15 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
nvdimm_bus_unlock(dev);
}
if (is_nd_btt(dev) && probe) {
+ struct nd_btt *nd_btt = to_nd_btt(dev);
+
nd_region = to_nd_region(dev->parent);
nvdimm_bus_lock(dev);
if (nd_region->btt_seed == dev)
nd_region_create_btt_seed(nd_region);
+ if (nd_region->ns_seed == &nd_btt->ndns->dev &&
+ is_nd_blk(dev->parent))
+ nd_region_create_blk_seed(nd_region);
nvdimm_bus_unlock(dev);
}
}
@@ -735,10 +758,12 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
nd_region->provider_data = ndr_desc->provider_data;
nd_region->nd_set = ndr_desc->nd_set;
nd_region->num_lanes = ndr_desc->num_lanes;
+ nd_region->flags = ndr_desc->flags;
nd_region->ro = ro;
nd_region->numa_node = ndr_desc->numa_node;
ida_init(&nd_region->ns_ida);
ida_init(&nd_region->btt_ida);
+ ida_init(&nd_region->pfn_ida);
dev = &nd_region->dev;
dev_set_name(dev, "region%d", nd_region->id);
dev->parent = &nvdimm_bus->dev;
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
new file mode 100644
index 000000000000..8db297821f78
--- /dev/null
+++ b/drivers/nvmem/Kconfig
@@ -0,0 +1,39 @@
+menuconfig NVMEM
+ tristate "NVMEM Support"
+ select REGMAP
+ help
+ Support for NVMEM(Non Volatile Memory) devices like EEPROM, EFUSES...
+
+ This framework is designed to provide a generic interface to NVMEM
+ from both the Linux Kernel and the userspace.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_core.
+
+ If unsure, say no.
+
+if NVMEM
+
+config QCOM_QFPROM
+ tristate "QCOM QFPROM Support"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ Say y here to enable QFPROM support. The QFPROM provides access
+ functions for QFPROM data to rest of the drivers via nvmem interface.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_qfprom.
+
+config NVMEM_SUNXI_SID
+ tristate "Allwinner SoCs SID support"
+ depends on ARCH_SUNXI
+ select REGMAP_MMIO
+ help
+ This is a driver for the 'security ID' available on various Allwinner
+ devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_sunxi_sid.
+
+endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
new file mode 100644
index 000000000000..4328b930ad9a
--- /dev/null
+++ b/drivers/nvmem/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for nvmem drivers.
+#
+
+obj-$(CONFIG_NVMEM) += nvmem_core.o
+nvmem_core-y := core.o
+
+# Devices
+obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o
+nvmem_qfprom-y := qfprom.o
+obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
+nvmem_sunxi_sid-y := sunxi_sid.o
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
new file mode 100644
index 000000000000..d3c6676b3c0c
--- /dev/null
+++ b/drivers/nvmem/core.c
@@ -0,0 +1,1083 @@
+/*
+ * nvmem framework core.
+ *
+ * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+ * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+struct nvmem_device {
+ const char *name;
+ struct regmap *regmap;
+ struct module *owner;
+ struct device dev;
+ int stride;
+ int word_size;
+ int ncells;
+ int id;
+ int users;
+ size_t size;
+ bool read_only;
+};
+
+struct nvmem_cell {
+ const char *name;
+ int offset;
+ int bytes;
+ int bit_offset;
+ int nbits;
+ struct nvmem_device *nvmem;
+ struct list_head node;
+};
+
+static DEFINE_MUTEX(nvmem_mutex);
+static DEFINE_IDA(nvmem_ida);
+
+static LIST_HEAD(nvmem_cells);
+static DEFINE_MUTEX(nvmem_cells_mutex);
+
+#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
+
+static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t pos, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+ int rc;
+
+ /* Stop the user from reading */
+ if (pos > nvmem->size)
+ return 0;
+
+ if (pos + count > nvmem->size)
+ count = nvmem->size - pos;
+
+ count = round_down(count, nvmem->word_size);
+
+ rc = regmap_raw_read(nvmem->regmap, pos, buf, count);
+
+ if (IS_ERR_VALUE(rc))
+ return rc;
+
+ return count;
+}
+
+static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t pos, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+ int rc;
+
+ /* Stop the user from writing */
+ if (pos > nvmem->size)
+ return 0;
+
+ if (pos + count > nvmem->size)
+ count = nvmem->size - pos;
+
+ count = round_down(count, nvmem->word_size);
+
+ rc = regmap_raw_write(nvmem->regmap, pos, buf, count);
+
+ if (IS_ERR_VALUE(rc))
+ return rc;
+
+ return count;
+}
+
+/* default read/write permissions */
+static struct bin_attribute bin_attr_rw_nvmem = {
+ .attr = {
+ .name = "nvmem",
+ .mode = S_IWUSR | S_IRUGO,
+ },
+ .read = bin_attr_nvmem_read,
+ .write = bin_attr_nvmem_write,
+};
+
+static struct bin_attribute *nvmem_bin_rw_attributes[] = {
+ &bin_attr_rw_nvmem,
+ NULL,
+};
+
+static const struct attribute_group nvmem_bin_rw_group = {
+ .bin_attrs = nvmem_bin_rw_attributes,
+};
+
+static const struct attribute_group *nvmem_rw_dev_groups[] = {
+ &nvmem_bin_rw_group,
+ NULL,
+};
+
+/* read only permission */
+static struct bin_attribute bin_attr_ro_nvmem = {
+ .attr = {
+ .name = "nvmem",
+ .mode = S_IRUGO,
+ },
+ .read = bin_attr_nvmem_read,
+};
+
+static struct bin_attribute *nvmem_bin_ro_attributes[] = {
+ &bin_attr_ro_nvmem,
+ NULL,
+};
+
+static const struct attribute_group nvmem_bin_ro_group = {
+ .bin_attrs = nvmem_bin_ro_attributes,
+};
+
+static const struct attribute_group *nvmem_ro_dev_groups[] = {
+ &nvmem_bin_ro_group,
+ NULL,
+};
+
+static void nvmem_release(struct device *dev)
+{
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+
+ ida_simple_remove(&nvmem_ida, nvmem->id);
+ kfree(nvmem);
+}
+
+static const struct device_type nvmem_provider_type = {
+ .release = nvmem_release,
+};
+
+static struct bus_type nvmem_bus_type = {
+ .name = "nvmem",
+};
+
+static int of_nvmem_match(struct device *dev, void *nvmem_np)
+{
+ return dev->of_node == nvmem_np;
+}
+
+static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
+{
+ struct device *d;
+
+ if (!nvmem_np)
+ return NULL;
+
+ d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match);
+
+ if (!d)
+ return NULL;
+
+ return to_nvmem_device(d);
+}
+
+static struct nvmem_cell *nvmem_find_cell(const char *cell_id)
+{
+ struct nvmem_cell *p;
+
+ list_for_each_entry(p, &nvmem_cells, node)
+ if (p && !strcmp(p->name, cell_id))
+ return p;
+
+ return NULL;
+}
+
+static void nvmem_cell_drop(struct nvmem_cell *cell)
+{
+ mutex_lock(&nvmem_cells_mutex);
+ list_del(&cell->node);
+ mutex_unlock(&nvmem_cells_mutex);
+ kfree(cell);
+}
+
+static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
+{
+ struct nvmem_cell *cell;
+ struct list_head *p, *n;
+
+ list_for_each_safe(p, n, &nvmem_cells) {
+ cell = list_entry(p, struct nvmem_cell, node);
+ if (cell->nvmem == nvmem)
+ nvmem_cell_drop(cell);
+ }
+}
+
+static void nvmem_cell_add(struct nvmem_cell *cell)
+{
+ mutex_lock(&nvmem_cells_mutex);
+ list_add_tail(&cell->node, &nvmem_cells);
+ mutex_unlock(&nvmem_cells_mutex);
+}
+
+static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
+ const struct nvmem_cell_info *info,
+ struct nvmem_cell *cell)
+{
+ cell->nvmem = nvmem;
+ cell->offset = info->offset;
+ cell->bytes = info->bytes;
+ cell->name = info->name;
+
+ cell->bit_offset = info->bit_offset;
+ cell->nbits = info->nbits;
+
+ if (cell->nbits)
+ cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
+ BITS_PER_BYTE);
+
+ if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
+ dev_err(&nvmem->dev,
+ "cell %s unaligned to nvmem stride %d\n",
+ cell->name, nvmem->stride);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nvmem_add_cells(struct nvmem_device *nvmem,
+ const struct nvmem_config *cfg)
+{
+ struct nvmem_cell **cells;
+ const struct nvmem_cell_info *info = cfg->cells;
+ int i, rval;
+
+ cells = kcalloc(cfg->ncells, sizeof(*cells), GFP_KERNEL);
+ if (!cells)
+ return -ENOMEM;
+
+ for (i = 0; i < cfg->ncells; i++) {
+ cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
+ if (!cells[i]) {
+ rval = -ENOMEM;
+ goto err;
+ }
+
+ rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
+ if (IS_ERR_VALUE(rval)) {
+ kfree(cells[i]);
+ goto err;
+ }
+
+ nvmem_cell_add(cells[i]);
+ }
+
+ nvmem->ncells = cfg->ncells;
+ /* remove tmp array */
+ kfree(cells);
+
+ return 0;
+err:
+ while (--i)
+ nvmem_cell_drop(cells[i]);
+
+ return rval;
+}
+
+/**
+ * nvmem_register() - Register a nvmem device for given nvmem_config.
+ * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
+ *
+ * @config: nvmem device configuration with which nvmem device is created.
+ *
+ * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
+ * on success.
+ */
+
+struct nvmem_device *nvmem_register(const struct nvmem_config *config)
+{
+ struct nvmem_device *nvmem;
+ struct device_node *np;
+ struct regmap *rm;
+ int rval;
+
+ if (!config->dev)
+ return ERR_PTR(-EINVAL);
+
+ rm = dev_get_regmap(config->dev, NULL);
+ if (!rm) {
+ dev_err(config->dev, "Regmap not found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
+ if (!nvmem)
+ return ERR_PTR(-ENOMEM);
+
+ rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
+ if (rval < 0) {
+ kfree(nvmem);
+ return ERR_PTR(rval);
+ }
+
+ nvmem->id = rval;
+ nvmem->regmap = rm;
+ nvmem->owner = config->owner;
+ nvmem->stride = regmap_get_reg_stride(rm);
+ nvmem->word_size = regmap_get_val_bytes(rm);
+ nvmem->size = regmap_get_max_register(rm) + nvmem->stride;
+ nvmem->dev.type = &nvmem_provider_type;
+ nvmem->dev.bus = &nvmem_bus_type;
+ nvmem->dev.parent = config->dev;
+ np = config->dev->of_node;
+ nvmem->dev.of_node = np;
+ dev_set_name(&nvmem->dev, "%s%d",
+ config->name ? : "nvmem", config->id);
+
+ nvmem->read_only = of_property_read_bool(np, "read-only") |
+ config->read_only;
+
+ nvmem->dev.groups = nvmem->read_only ? nvmem_ro_dev_groups :
+ nvmem_rw_dev_groups;
+
+ device_initialize(&nvmem->dev);
+
+ dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
+
+ rval = device_add(&nvmem->dev);
+ if (rval) {
+ ida_simple_remove(&nvmem_ida, nvmem->id);
+ kfree(nvmem);
+ return ERR_PTR(rval);
+ }
+
+ if (config->cells)
+ nvmem_add_cells(nvmem, config);
+
+ return nvmem;
+}
+EXPORT_SYMBOL_GPL(nvmem_register);
+
+/**
+ * nvmem_unregister() - Unregister previously registered nvmem device
+ *
+ * @nvmem: Pointer to previously registered nvmem device.
+ *
+ * Return: Will be an negative on error or a zero on success.
+ */
+int nvmem_unregister(struct nvmem_device *nvmem)
+{
+ mutex_lock(&nvmem_mutex);
+ if (nvmem->users) {
+ mutex_unlock(&nvmem_mutex);
+ return -EBUSY;
+ }
+ mutex_unlock(&nvmem_mutex);
+
+ nvmem_device_remove_all_cells(nvmem);
+ device_del(&nvmem->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmem_unregister);
+
+static struct nvmem_device *__nvmem_device_get(struct device_node *np,
+ struct nvmem_cell **cellp,
+ const char *cell_id)
+{
+ struct nvmem_device *nvmem = NULL;
+
+ mutex_lock(&nvmem_mutex);
+
+ if (np) {
+ nvmem = of_nvmem_find(np);
+ if (!nvmem) {
+ mutex_unlock(&nvmem_mutex);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+ } else {
+ struct nvmem_cell *cell = nvmem_find_cell(cell_id);
+
+ if (cell) {
+ nvmem = cell->nvmem;
+ *cellp = cell;
+ }
+
+ if (!nvmem) {
+ mutex_unlock(&nvmem_mutex);
+ return ERR_PTR(-ENOENT);
+ }
+ }
+
+ nvmem->users++;
+ mutex_unlock(&nvmem_mutex);
+
+ if (!try_module_get(nvmem->owner)) {
+ dev_err(&nvmem->dev,
+ "could not increase module refcount for cell %s\n",
+ nvmem->name);
+
+ mutex_lock(&nvmem_mutex);
+ nvmem->users--;
+ mutex_unlock(&nvmem_mutex);
+
+ return ERR_PTR(-EINVAL);
+ }
+
+ return nvmem;
+}
+
+static void __nvmem_device_put(struct nvmem_device *nvmem)
+{
+ module_put(nvmem->owner);
+ mutex_lock(&nvmem_mutex);
+ nvmem->users--;
+ mutex_unlock(&nvmem_mutex);
+}
+
+static int nvmem_match(struct device *dev, void *data)
+{
+ return !strcmp(dev_name(dev), data);
+}
+
+static struct nvmem_device *nvmem_find(const char *name)
+{
+ struct device *d;
+
+ d = bus_find_device(&nvmem_bus_type, NULL, (void *)name, nvmem_match);
+
+ if (!d)
+ return NULL;
+
+ return to_nvmem_device(d);
+}
+
+#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+/**
+ * of_nvmem_device_get() - Get nvmem device from a given id
+ *
+ * @dev node: Device tree node that uses the nvmem device
+ * @id: nvmem name from nvmem-names property.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
+ * on success.
+ */
+struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
+{
+
+ struct device_node *nvmem_np;
+ int index;
+
+ index = of_property_match_string(np, "nvmem-names", id);
+
+ nvmem_np = of_parse_phandle(np, "nvmem", index);
+ if (!nvmem_np)
+ return ERR_PTR(-EINVAL);
+
+ return __nvmem_device_get(nvmem_np, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(of_nvmem_device_get);
+#endif
+
+/**
+ * nvmem_device_get() - Get nvmem device from a given id
+ *
+ * @dev : Device that uses the nvmem device
+ * @id: nvmem name from nvmem-names property.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
+ * on success.
+ */
+struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
+{
+ if (dev->of_node) { /* try dt first */
+ struct nvmem_device *nvmem;
+
+ nvmem = of_nvmem_device_get(dev->of_node, dev_name);
+
+ if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
+ return nvmem;
+
+ }
+
+ return nvmem_find(dev_name);
+}
+EXPORT_SYMBOL_GPL(nvmem_device_get);
+
+static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
+{
+ struct nvmem_device **nvmem = res;
+
+ if (WARN_ON(!nvmem || !*nvmem))
+ return 0;
+
+ return *nvmem == data;
+}
+
+static void devm_nvmem_device_release(struct device *dev, void *res)
+{
+ nvmem_device_put(*(struct nvmem_device **)res);
+}
+
+/**
+ * devm_nvmem_device_put() - put alredy got nvmem device
+ *
+ * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
+ * that needs to be released.
+ */
+void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
+{
+ int ret;
+
+ ret = devres_release(dev, devm_nvmem_device_release,
+ devm_nvmem_device_match, nvmem);
+
+ WARN_ON(ret);
+}
+EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
+
+/**
+ * nvmem_device_put() - put alredy got nvmem device
+ *
+ * @nvmem: pointer to nvmem device that needs to be released.
+ */
+void nvmem_device_put(struct nvmem_device *nvmem)
+{
+ __nvmem_device_put(nvmem);
+}
+EXPORT_SYMBOL_GPL(nvmem_device_put);
+
+/**
+ * devm_nvmem_device_get() - Get nvmem cell of device form a given id
+ *
+ * @dev node: Device tree node that uses the nvmem cell
+ * @id: nvmem name in nvmems property.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
+ * on success. The nvmem_cell will be freed by the automatically once the
+ * device is freed.
+ */
+struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
+{
+ struct nvmem_device **ptr, *nvmem;
+
+ ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ nvmem = nvmem_device_get(dev, id);
+ if (!IS_ERR(nvmem)) {
+ *ptr = nvmem;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return nvmem;
+}
+EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
+
+static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id)
+{
+ struct nvmem_cell *cell = NULL;
+ struct nvmem_device *nvmem;
+
+ nvmem = __nvmem_device_get(NULL, &cell, cell_id);
+ if (IS_ERR(nvmem))
+ return ERR_CAST(nvmem);
+
+ return cell;
+}
+
+#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+/**
+ * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
+ *
+ * @dev node: Device tree node that uses the nvmem cell
+ * @id: nvmem cell name from nvmem-cell-names property.
+ *
+ * Return: Will be an ERR_PTR() on error or a valid pointer
+ * to a struct nvmem_cell. The nvmem_cell will be freed by the
+ * nvmem_cell_put().
+ */
+struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
+ const char *name)
+{
+ struct device_node *cell_np, *nvmem_np;
+ struct nvmem_cell *cell;
+ struct nvmem_device *nvmem;
+ const __be32 *addr;
+ int rval, len, index;
+
+ index = of_property_match_string(np, "nvmem-cell-names", name);
+
+ cell_np = of_parse_phandle(np, "nvmem-cells", index);
+ if (!cell_np)
+ return ERR_PTR(-EINVAL);
+
+ nvmem_np = of_get_next_parent(cell_np);
+ if (!nvmem_np)
+ return ERR_PTR(-EINVAL);
+
+ nvmem = __nvmem_device_get(nvmem_np, NULL, NULL);
+ if (IS_ERR(nvmem))
+ return ERR_CAST(nvmem);
+
+ addr = of_get_property(cell_np, "reg", &len);
+ if (!addr || (len < 2 * sizeof(u32))) {
+ dev_err(&nvmem->dev, "nvmem: invalid reg on %s\n",
+ cell_np->full_name);
+ rval = -EINVAL;
+ goto err_mem;
+ }
+
+ cell = kzalloc(sizeof(*cell), GFP_KERNEL);
+ if (!cell) {
+ rval = -ENOMEM;
+ goto err_mem;
+ }
+
+ cell->nvmem = nvmem;
+ cell->offset = be32_to_cpup(addr++);
+ cell->bytes = be32_to_cpup(addr);
+ cell->name = cell_np->name;
+
+ addr = of_get_property(cell_np, "bits", &len);
+ if (addr && len == (2 * sizeof(u32))) {
+ cell->bit_offset = be32_to_cpup(addr++);
+ cell->nbits = be32_to_cpup(addr);
+ }
+
+ if (cell->nbits)
+ cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
+ BITS_PER_BYTE);
+
+ if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
+ dev_err(&nvmem->dev,
+ "cell %s unaligned to nvmem stride %d\n",
+ cell->name, nvmem->stride);
+ rval = -EINVAL;
+ goto err_sanity;
+ }
+
+ nvmem_cell_add(cell);
+
+ return cell;
+
+err_sanity:
+ kfree(cell);
+
+err_mem:
+ __nvmem_device_put(nvmem);
+
+ return ERR_PTR(rval);
+}
+EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
+#endif
+
+/**
+ * nvmem_cell_get() - Get nvmem cell of device form a given cell name
+ *
+ * @dev node: Device tree node that uses the nvmem cell
+ * @id: nvmem cell name to get.
+ *
+ * Return: Will be an ERR_PTR() on error or a valid pointer
+ * to a struct nvmem_cell. The nvmem_cell will be freed by the
+ * nvmem_cell_put().
+ */
+struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id)
+{
+ struct nvmem_cell *cell;
+
+ if (dev->of_node) { /* try dt first */
+ cell = of_nvmem_cell_get(dev->of_node, cell_id);
+ if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
+ return cell;
+ }
+
+ return nvmem_cell_get_from_list(cell_id);
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_get);
+
+static void devm_nvmem_cell_release(struct device *dev, void *res)
+{
+ nvmem_cell_put(*(struct nvmem_cell **)res);
+}
+
+/**
+ * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
+ *
+ * @dev node: Device tree node that uses the nvmem cell
+ * @id: nvmem id in nvmem-names property.
+ *
+ * Return: Will be an ERR_PTR() on error or a valid pointer
+ * to a struct nvmem_cell. The nvmem_cell will be freed by the
+ * automatically once the device is freed.
+ */
+struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
+{
+ struct nvmem_cell **ptr, *cell;
+
+ ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ cell = nvmem_cell_get(dev, id);
+ if (!IS_ERR(cell)) {
+ *ptr = cell;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return cell;
+}
+EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
+
+static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
+{
+ struct nvmem_cell **c = res;
+
+ if (WARN_ON(!c || !*c))
+ return 0;
+
+ return *c == data;
+}
+
+/**
+ * devm_nvmem_cell_put() - Release previously allocated nvmem cell
+ * from devm_nvmem_cell_get.
+ *
+ * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get()
+ */
+void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
+{
+ int ret;
+
+ ret = devres_release(dev, devm_nvmem_cell_release,
+ devm_nvmem_cell_match, cell);
+
+ WARN_ON(ret);
+}
+EXPORT_SYMBOL(devm_nvmem_cell_put);
+
+/**
+ * nvmem_cell_put() - Release previously allocated nvmem cell.
+ *
+ * @cell: Previously allocated nvmem cell by nvmem_cell_get()
+ */
+void nvmem_cell_put(struct nvmem_cell *cell)
+{
+ struct nvmem_device *nvmem = cell->nvmem;
+
+ __nvmem_device_put(nvmem);
+ nvmem_cell_drop(cell);
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_put);
+
+static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell,
+ void *buf)
+{
+ u8 *p, *b;
+ int i, bit_offset = cell->bit_offset;
+
+ p = b = buf;
+ if (bit_offset) {
+ /* First shift */
+ *b++ >>= bit_offset;
+
+ /* setup rest of the bytes if any */
+ for (i = 1; i < cell->bytes; i++) {
+ /* Get bits from next byte and shift them towards msb */
+ *p |= *b << (BITS_PER_BYTE - bit_offset);
+
+ p = b;
+ *b++ >>= bit_offset;
+ }
+
+ /* result fits in less bytes */
+ if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE))
+ *p-- = 0;
+ }
+ /* clear msb bits if any leftover in the last byte */
+ *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
+}
+
+static int __nvmem_cell_read(struct nvmem_device *nvmem,
+ struct nvmem_cell *cell,
+ void *buf, size_t *len)
+{
+ int rc;
+
+ rc = regmap_raw_read(nvmem->regmap, cell->offset, buf, cell->bytes);
+
+ if (IS_ERR_VALUE(rc))
+ return rc;
+
+ /* shift bits in-place */
+ if (cell->bit_offset || cell->bit_offset)
+ nvmem_shift_read_buffer_in_place(cell, buf);
+
+ *len = cell->bytes;
+
+ return 0;
+}
+
+/**
+ * nvmem_cell_read() - Read a given nvmem cell
+ *
+ * @cell: nvmem cell to be read.
+ * @len: pointer to length of cell which will be populated on successful read.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a char * buffer on success.
+ * The buffer should be freed by the consumer with a kfree().
+ */
+void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
+{
+ struct nvmem_device *nvmem = cell->nvmem;
+ u8 *buf;
+ int rc;
+
+ if (!nvmem || !nvmem->regmap)
+ return ERR_PTR(-EINVAL);
+
+ buf = kzalloc(cell->bytes, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ rc = __nvmem_cell_read(nvmem, cell, buf, len);
+ if (IS_ERR_VALUE(rc)) {
+ kfree(buf);
+ return ERR_PTR(rc);
+ }
+
+ return buf;
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_read);
+
+static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
+ u8 *_buf, int len)
+{
+ struct nvmem_device *nvmem = cell->nvmem;
+ int i, rc, nbits, bit_offset = cell->bit_offset;
+ u8 v, *p, *buf, *b, pbyte, pbits;
+
+ nbits = cell->nbits;
+ buf = kzalloc(cell->bytes, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(buf, _buf, len);
+ p = b = buf;
+
+ if (bit_offset) {
+ pbyte = *b;
+ *b <<= bit_offset;
+
+ /* setup the first byte with lsb bits from nvmem */
+ rc = regmap_raw_read(nvmem->regmap, cell->offset, &v, 1);
+ *b++ |= GENMASK(bit_offset - 1, 0) & v;
+
+ /* setup rest of the byte if any */
+ for (i = 1; i < cell->bytes; i++) {
+ /* Get last byte bits and shift them towards lsb */
+ pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
+ pbyte = *b;
+ p = b;
+ *b <<= bit_offset;
+ *b++ |= pbits;
+ }
+ }
+
+ /* if it's not end on byte boundary */
+ if ((nbits + bit_offset) % BITS_PER_BYTE) {
+ /* setup the last byte with msb bits from nvmem */
+ rc = regmap_raw_read(nvmem->regmap,
+ cell->offset + cell->bytes - 1, &v, 1);
+ *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
+
+ }
+
+ return buf;
+}
+
+/**
+ * nvmem_cell_write() - Write to a given nvmem cell
+ *
+ * @cell: nvmem cell to be written.
+ * @buf: Buffer to be written.
+ * @len: length of buffer to be written to nvmem cell.
+ *
+ * Return: length of bytes written or negative on failure.
+ */
+int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
+{
+ struct nvmem_device *nvmem = cell->nvmem;
+ int rc;
+
+ if (!nvmem || !nvmem->regmap || nvmem->read_only ||
+ (cell->bit_offset == 0 && len != cell->bytes))
+ return -EINVAL;
+
+ if (cell->bit_offset || cell->nbits) {
+ buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+ }
+
+ rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes);
+
+ /* free the tmp buffer */
+ if (cell->bit_offset)
+ kfree(buf);
+
+ if (IS_ERR_VALUE(rc))
+ return rc;
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_write);
+
+/**
+ * nvmem_device_cell_read() - Read a given nvmem device and cell
+ *
+ * @nvmem: nvmem device to read from.
+ * @info: nvmem cell info to be read.
+ * @buf: buffer pointer which will be populated on successful read.
+ *
+ * Return: length of successful bytes read on success and negative
+ * error code on error.
+ */
+ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *info, void *buf)
+{
+ struct nvmem_cell cell;
+ int rc;
+ ssize_t len;
+
+ if (!nvmem || !nvmem->regmap)
+ return -EINVAL;
+
+ rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
+ if (IS_ERR_VALUE(rc))
+ return rc;
+
+ rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
+ if (IS_ERR_VALUE(rc))
+ return rc;
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
+
+/**
+ * nvmem_device_cell_write() - Write cell to a given nvmem device
+ *
+ * @nvmem: nvmem device to be written to.
+ * @info: nvmem cell info to be written
+ * @buf: buffer to be written to cell.
+ *
+ * Return: length of bytes written or negative error code on failure.
+ * */
+int nvmem_device_cell_write(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *info, void *buf)
+{
+ struct nvmem_cell cell;
+ int rc;
+
+ if (!nvmem || !nvmem->regmap)
+ return -EINVAL;
+
+ rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
+ if (IS_ERR_VALUE(rc))
+ return rc;
+
+ return nvmem_cell_write(&cell, buf, cell.bytes);
+}
+EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
+
+/**
+ * nvmem_device_read() - Read from a given nvmem device
+ *
+ * @nvmem: nvmem device to read from.
+ * @offset: offset in nvmem device.
+ * @bytes: number of bytes to read.
+ * @buf: buffer pointer which will be populated on successful read.
+ *
+ * Return: length of successful bytes read on success and negative
+ * error code on error.
+ */
+int nvmem_device_read(struct nvmem_device *nvmem,
+ unsigned int offset,
+ size_t bytes, void *buf)
+{
+ int rc;
+
+ if (!nvmem || !nvmem->regmap)
+ return -EINVAL;
+
+ rc = regmap_raw_read(nvmem->regmap, offset, buf, bytes);
+
+ if (IS_ERR_VALUE(rc))
+ return rc;
+
+ return bytes;
+}
+EXPORT_SYMBOL_GPL(nvmem_device_read);
+
+/**
+ * nvmem_device_write() - Write cell to a given nvmem device
+ *
+ * @nvmem: nvmem device to be written to.
+ * @offset: offset in nvmem device.
+ * @bytes: number of bytes to write.
+ * @buf: buffer to be written.
+ *
+ * Return: length of bytes written or negative error code on failure.
+ * */
+int nvmem_device_write(struct nvmem_device *nvmem,
+ unsigned int offset,
+ size_t bytes, void *buf)
+{
+ int rc;
+
+ if (!nvmem || !nvmem->regmap)
+ return -EINVAL;
+
+ rc = regmap_raw_write(nvmem->regmap, offset, buf, bytes);
+
+ if (IS_ERR_VALUE(rc))
+ return rc;
+
+
+ return bytes;
+}
+EXPORT_SYMBOL_GPL(nvmem_device_write);
+
+static int __init nvmem_init(void)
+{
+ return bus_register(&nvmem_bus_type);
+}
+
+static void __exit nvmem_exit(void)
+{
+ bus_unregister(&nvmem_bus_type);
+}
+
+subsys_initcall(nvmem_init);
+module_exit(nvmem_exit);
+
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
+MODULE_DESCRIPTION("nvmem Driver Core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
new file mode 100644
index 000000000000..afb67e7eeee4
--- /dev/null
+++ b/drivers/nvmem/qfprom.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+static struct regmap_config qfprom_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 8,
+ .reg_stride = 1,
+};
+
+static struct nvmem_config econfig = {
+ .name = "qfprom",
+ .owner = THIS_MODULE,
+};
+
+static int qfprom_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(nvmem);
+}
+
+static int qfprom_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct nvmem_device *nvmem;
+ struct regmap *regmap;
+ void __iomem *base;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ qfprom_regmap_config.max_register = resource_size(res) - 1;
+
+ regmap = devm_regmap_init_mmio(dev, base, &qfprom_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "regmap init failed\n");
+ return PTR_ERR(regmap);
+ }
+ econfig.dev = dev;
+ nvmem = nvmem_register(&econfig);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ platform_set_drvdata(pdev, nvmem);
+
+ return 0;
+}
+
+static const struct of_device_id qfprom_of_match[] = {
+ { .compatible = "qcom,qfprom",},
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, qfprom_of_match);
+
+static struct platform_driver qfprom_driver = {
+ .probe = qfprom_probe,
+ .remove = qfprom_remove,
+ .driver = {
+ .name = "qcom,qfprom",
+ .of_match_table = qfprom_of_match,
+ },
+};
+module_platform_driver(qfprom_driver);
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm QFPROM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
new file mode 100644
index 000000000000..14777dd5212d
--- /dev/null
+++ b/drivers/nvmem/sunxi_sid.c
@@ -0,0 +1,171 @@
+/*
+ * Allwinner sunXi SoCs Security ID support.
+ *
+ * Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl>
+ * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+
+
+static struct nvmem_config econfig = {
+ .name = "sunxi-sid",
+ .read_only = true,
+ .owner = THIS_MODULE,
+};
+
+struct sunxi_sid {
+ void __iomem *base;
+};
+
+/* We read the entire key, due to a 32 bit read alignment requirement. Since we
+ * want to return the requested byte, this results in somewhat slower code and
+ * uses 4 times more reads as needed but keeps code simpler. Since the SID is
+ * only very rarely probed, this is not really an issue.
+ */
+static u8 sunxi_sid_read_byte(const struct sunxi_sid *sid,
+ const unsigned int offset)
+{
+ u32 sid_key;
+
+ sid_key = ioread32be(sid->base + round_down(offset, 4));
+ sid_key >>= (offset % 4) * 8;
+
+ return sid_key; /* Only return the last byte */
+}
+
+static int sunxi_sid_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct sunxi_sid *sid = context;
+ unsigned int offset = *(u32 *)reg;
+ u8 *buf = val;
+
+ while (val_size) {
+ *buf++ = sunxi_sid_read_byte(sid, offset);
+ val_size--;
+ offset++;
+ }
+
+ return 0;
+}
+
+static int sunxi_sid_write(void *context, const void *data, size_t count)
+{
+ /* Unimplemented, dummy to keep regmap core happy */
+ return 0;
+}
+
+static struct regmap_bus sunxi_sid_bus = {
+ .read = sunxi_sid_read,
+ .write = sunxi_sid_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+static bool sunxi_sid_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return false;
+}
+
+static struct regmap_config sunxi_sid_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 8,
+ .reg_stride = 1,
+ .writeable_reg = sunxi_sid_writeable_reg,
+};
+
+static int sunxi_sid_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct nvmem_device *nvmem;
+ struct regmap *regmap;
+ struct sunxi_sid *sid;
+ int i, size;
+ char *randomness;
+
+ sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL);
+ if (!sid)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sid->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(sid->base))
+ return PTR_ERR(sid->base);
+
+ size = resource_size(res) - 1;
+ sunxi_sid_regmap_config.max_register = size;
+
+ regmap = devm_regmap_init(dev, &sunxi_sid_bus, sid,
+ &sunxi_sid_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "regmap init failed\n");
+ return PTR_ERR(regmap);
+ }
+
+ econfig.dev = dev;
+ nvmem = nvmem_register(&econfig);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL);
+ for (i = 0; i < size; i++)
+ randomness[i] = sunxi_sid_read_byte(sid, i);
+
+ add_device_randomness(randomness, size);
+ kfree(randomness);
+
+ platform_set_drvdata(pdev, nvmem);
+
+ return 0;
+}
+
+static int sunxi_sid_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(nvmem);
+}
+
+static const struct of_device_id sunxi_sid_of_match[] = {
+ { .compatible = "allwinner,sun4i-a10-sid" },
+ { .compatible = "allwinner,sun7i-a20-sid" },
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
+
+static struct platform_driver sunxi_sid_driver = {
+ .probe = sunxi_sid_probe,
+ .remove = sunxi_sid_remove,
+ .driver = {
+ .name = "eeprom-sunxi-sid",
+ .of_match_table = sunxi_sid_of_match,
+ },
+};
+module_platform_driver(sunxi_sid_driver);
+
+MODULE_AUTHOR("Oliver Schinagl <oliver@schinagl.nl>");
+MODULE_DESCRIPTION("Allwinner sunxi security id driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 8df1b1777745..59bb8556e43a 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -47,7 +47,7 @@ config OF_DYNAMIC
config OF_ADDRESS
def_bool y
- depends on !SPARC
+ depends on !SPARC && HAS_IOMEM
select OF_ADDRESS_PCI if PCI
config OF_ADDRESS_PCI
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 8bfda6ade2c0..384574c3987c 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -845,10 +845,10 @@ struct device_node *of_find_matching_node_by_address(struct device_node *from,
struct resource res;
while (dn) {
- if (of_address_to_resource(dn, 0, &res))
- continue;
- if (res.start == base_address)
+ if (!of_address_to_resource(dn, 0, &res) &&
+ res.start == base_address)
return dn;
+
dn = of_find_matching_node(dn, matches);
}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 07496560e5b9..6e82bc42373b 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -967,7 +967,9 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
}
#ifdef CONFIG_HAVE_MEMBLOCK
-#define MAX_PHYS_ADDR ((phys_addr_t)~0)
+#ifndef MAX_MEMBLOCK_ADDR
+#define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0)
+#endif
void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
{
@@ -984,16 +986,16 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
}
size &= PAGE_MASK;
- if (base > MAX_PHYS_ADDR) {
+ if (base > MAX_MEMBLOCK_ADDR) {
pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
base, base + size);
return;
}
- if (base + size - 1 > MAX_PHYS_ADDR) {
+ if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
- ((u64)MAX_PHYS_ADDR) + 1, base + size);
- size = MAX_PHYS_ADDR - base + 1;
+ ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
+ size = MAX_MEMBLOCK_ADDR - base + 1;
}
if (base + size < phys_offset) {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 3cf7a01f557f..55317fa9c9dc 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -18,6 +18,7 @@
* driver.
*/
+#include <linux/device.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -431,6 +432,7 @@ int of_irq_get_byname(struct device_node *dev, const char *name)
return of_irq_get(dev, index);
}
+EXPORT_SYMBOL_GPL(of_irq_get_byname);
/**
* of_irq_count - Count the number of IRQs a node uses
@@ -576,3 +578,23 @@ err:
kfree(desc);
}
}
+
+/**
+ * of_msi_configure - Set the msi_domain field of a device
+ * @dev: device structure to associate with an MSI irq domain
+ * @np: device node for that device
+ */
+void of_msi_configure(struct device *dev, struct device_node *np)
+{
+ struct device_node *msi_np;
+ struct irq_domain *d;
+
+ msi_np = of_parse_phandle(np, "msi-parent", 0);
+ if (!msi_np)
+ return;
+
+ d = irq_find_matching_host(msi_np, DOMAIN_BUS_PLATFORM_MSI);
+ if (!d)
+ d = irq_find_host(msi_np);
+ dev_set_msi_domain(dev, d);
+}
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index fdc60db60829..1350fa25cdb0 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -16,6 +16,7 @@
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/module.h>
@@ -266,7 +267,8 @@ EXPORT_SYMBOL(of_phy_attach);
bool of_phy_is_fixed_link(struct device_node *np)
{
struct device_node *dn;
- int len;
+ int len, err;
+ const char *managed;
/* New binding */
dn = of_get_child_by_name(np, "fixed-link");
@@ -275,6 +277,10 @@ bool of_phy_is_fixed_link(struct device_node *np)
return true;
}
+ err = of_property_read_string(np, "managed", &managed);
+ if (err == 0 && strcmp(managed, "auto") != 0)
+ return true;
+
/* Old binding */
if (of_get_property(np, "fixed-link", &len) &&
len == (5 * sizeof(__be32)))
@@ -289,8 +295,19 @@ int of_phy_register_fixed_link(struct device_node *np)
struct fixed_phy_status status = {};
struct device_node *fixed_link_node;
const __be32 *fixed_link_prop;
- int len;
+ int link_gpio;
+ int len, err;
struct phy_device *phy;
+ const char *managed;
+
+ err = of_property_read_string(np, "managed", &managed);
+ if (err == 0) {
+ if (strcmp(managed, "in-band-status") == 0) {
+ /* status is zeroed, namely its .link member */
+ phy = fixed_phy_register(PHY_POLL, &status, -1, np);
+ return IS_ERR(phy) ? PTR_ERR(phy) : 0;
+ }
+ }
/* New binding */
fixed_link_node = of_get_child_by_name(np, "fixed-link");
@@ -303,8 +320,13 @@ int of_phy_register_fixed_link(struct device_node *np)
status.pause = of_property_read_bool(fixed_link_node, "pause");
status.asym_pause = of_property_read_bool(fixed_link_node,
"asym-pause");
+ link_gpio = of_get_named_gpio_flags(fixed_link_node,
+ "link-gpios", 0, NULL);
of_node_put(fixed_link_node);
- phy = fixed_phy_register(PHY_POLL, &status, np);
+ if (link_gpio == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ phy = fixed_phy_register(PHY_POLL, &status, link_gpio, np);
return IS_ERR(phy) ? PTR_ERR(phy) : 0;
}
@@ -316,7 +338,7 @@ int of_phy_register_fixed_link(struct device_node *np)
status.speed = be32_to_cpu(fixed_link_prop[2]);
status.pause = be32_to_cpu(fixed_link_prop[3]);
status.asym_pause = be32_to_cpu(fixed_link_prop[4]);
- phy = fixed_phy_register(PHY_POLL, &status, np);
+ phy = fixed_phy_register(PHY_POLL, &status, -1, np);
return IS_ERR(phy) ? PTR_ERR(phy) : 0;
}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index ddf8e42c9367..1001efaedcb8 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -184,6 +184,7 @@ static struct platform_device *of_platform_device_create_pdata(
dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data;
of_dma_configure(&dev->dev, dev->dev.of_node);
+ of_msi_configure(&dev->dev, dev->dev.of_node);
if (of_device_add(dev) != 0) {
of_dma_deconfigure(&dev->dev);
@@ -456,6 +457,15 @@ int of_platform_populate(struct device_node *root,
}
EXPORT_SYMBOL_GPL(of_platform_populate);
+int of_platform_default_populate(struct device_node *root,
+ const struct of_dev_auxdata *lookup,
+ struct device *parent)
+{
+ return of_platform_populate(root, of_default_bus_match_table, lookup,
+ parent);
+}
+EXPORT_SYMBOL_GPL(of_platform_default_populate);
+
static int of_platform_device_destroy(struct device *dev, void *data)
{
/* Do not touch devices not populated from the device tree */
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 18016341d5a9..9f71770b6226 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -979,7 +979,6 @@ static struct platform_driver unittest_driver = {
.remove = unittest_remove,
.driver = {
.name = "unittest",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(unittest_match),
},
};
@@ -1666,7 +1665,6 @@ static const struct i2c_device_id unittest_i2c_dev_id[] = {
static struct i2c_driver unittest_i2c_dev_driver = {
.driver = {
.name = "unittest-i2c-dev",
- .owner = THIS_MODULE,
},
.probe = unittest_i2c_dev_probe,
.remove = unittest_i2c_dev_remove,
@@ -1761,7 +1759,6 @@ static const struct i2c_device_id unittest_i2c_mux_id[] = {
static struct i2c_driver unittest_i2c_mux_driver = {
.driver = {
.name = "unittest-i2c-mux",
- .owner = THIS_MODULE,
},
.probe = unittest_i2c_mux_probe,
.remove = unittest_i2c_mux_remove,
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 02ff84fcfa61..957b42198328 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1103,16 +1103,9 @@ static int ccio_proc_bitmap_info(struct seq_file *m, void *p)
struct ioc *ioc = ioc_list;
while (ioc != NULL) {
- u32 *res_ptr = (u32 *)ioc->res_map;
- int j;
-
- for (j = 0; j < (ioc->res_size / sizeof(u32)); j++) {
- if ((j & 7) == 0)
- seq_puts(m, "\n ");
- seq_printf(m, "%08x", *res_ptr);
- res_ptr++;
- }
- seq_puts(m, "\n\n");
+ seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map,
+ ioc->res_size, false);
+ seq_putc(m, '\n');
ioc = ioc->next;
break; /* XXX - remove me */
}
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index a0580afe1713..baec33c4e698 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -560,9 +560,6 @@ dino_fixup_bus(struct pci_bus *bus)
} else if (bus->parent) {
int i;
- pci_read_bridge_bases(bus);
-
-
for(i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
if((bus->self->resource[i].flags &
(IORESOURCE_IO | IORESOURCE_MEM)) == 0)
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 9ee04b4b68bf..144c77dfe4b1 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -691,7 +691,7 @@ static int iosapic_set_affinity_irq(struct irq_data *d,
if (dest_cpu < 0)
return -1;
- cpumask_copy(d->affinity, cpumask_of(dest_cpu));
+ cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(dest_cpu));
vi->txn_addr = txn_affinity_addr(d->irq, dest_cpu);
spin_lock_irqsave(&iosapic_lock, flags);
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index dceb9ddfd99a..7b9e89ba0465 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -693,7 +693,6 @@ lba_fixup_bus(struct pci_bus *bus)
if (bus->parent) {
int i;
/* PCI-PCI Bridge */
- pci_read_bridge_bases(bus);
for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++)
pci_claim_bridge_resource(bus->self, i);
} else {
@@ -1556,8 +1555,11 @@ lba_driver_probe(struct parisc_device *dev)
if (lba_dev->hba.lmmio_space.flags)
pci_add_resource_offset(&resources, &lba_dev->hba.lmmio_space,
lba_dev->hba.lmmio_space_offset);
- if (lba_dev->hba.gmmio_space.flags)
- pci_add_resource(&resources, &lba_dev->hba.gmmio_space);
+ if (lba_dev->hba.gmmio_space.flags) {
+ /* pci_add_resource(&resources, &lba_dev->hba.gmmio_space); */
+ pr_warn("LBA: Not registering GMMIO space %pR\n",
+ &lba_dev->hba.gmmio_space);
+ }
pci_add_resource(&resources, &lba_dev->hba.bus_num);
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index f1441e466c06..225049b492e5 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -1854,14 +1854,9 @@ sba_proc_bitmap_info(struct seq_file *m, void *p)
{
struct sba_device *sba_dev = sba_list;
struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
- unsigned int *res_ptr = (unsigned int *)ioc->res_map;
- int i;
- for (i = 0; i < (ioc->res_size/sizeof(unsigned int)); ++i, ++res_ptr) {
- if ((i & 7) == 0)
- seq_puts(m, "\n ");
- seq_printf(m, " %08x", *res_ptr);
- }
+ seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map,
+ ioc->res_size, false);
seq_putc(m, '\n');
return 0;
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 8067f54ce050..5ce5ef211bdb 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -891,8 +891,10 @@ parport_register_dev_model(struct parport *port, const char *name,
par_dev->dev.release = free_pardevice;
par_dev->devmodel = true;
ret = device_register(&par_dev->dev);
- if (ret)
- goto err_put_dev;
+ if (ret) {
+ put_device(&par_dev->dev);
+ goto err_put_port;
+ }
/* Chain this onto the list */
par_dev->prev = NULL;
@@ -907,7 +909,8 @@ parport_register_dev_model(struct parport *port, const char *name,
spin_unlock(&port->physport->pardevice_lock);
pr_debug("%s: cannot grant exclusive access for device %s\n",
port->name, name);
- goto err_put_dev;
+ device_unregister(&par_dev->dev);
+ goto err_put_port;
}
port->flags |= PARPORT_FLAG_EXCL;
}
@@ -938,8 +941,6 @@ parport_register_dev_model(struct parport *port, const char *name,
return par_dev;
-err_put_dev:
- put_device(&par_dev->dev);
err_free_devname:
kfree(devname);
err_free_par_dev:
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 73e4af400a5a..be3f631c3f75 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_PCI_IOV) += iov.o
#
obj-$(CONFIG_ALPHA) += setup-irq.o
obj-$(CONFIG_ARM) += setup-irq.o
+obj-$(CONFIG_ARM64) += setup-irq.o
obj-$(CONFIG_UNICORE32) += setup-irq.o
obj-$(CONFIG_SUPERH) += setup-irq.o
obj-$(CONFIG_MIPS) += setup-irq.o
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index d9b64a175990..769f7e35f1a2 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -439,6 +439,56 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = {
.release = pci_vpd_pci22_release,
};
+static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
+ void *arg)
+{
+ struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+ ssize_t ret;
+
+ if (!tdev)
+ return -ENODEV;
+
+ ret = pci_read_vpd(tdev, pos, count, arg);
+ pci_dev_put(tdev);
+ return ret;
+}
+
+static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
+ const void *arg)
+{
+ struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+ ssize_t ret;
+
+ if (!tdev)
+ return -ENODEV;
+
+ ret = pci_write_vpd(tdev, pos, count, arg);
+ pci_dev_put(tdev);
+ return ret;
+}
+
+static const struct pci_vpd_ops pci_vpd_f0_ops = {
+ .read = pci_vpd_f0_read,
+ .write = pci_vpd_f0_write,
+ .release = pci_vpd_pci22_release,
+};
+
+static int pci_vpd_f0_dev_check(struct pci_dev *dev)
+{
+ struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+ int ret = 0;
+
+ if (!tdev)
+ return -ENODEV;
+ if (!tdev->vpd || !tdev->multifunction ||
+ dev->class != tdev->class || dev->vendor != tdev->vendor ||
+ dev->device != tdev->device)
+ ret = -ENODEV;
+
+ pci_dev_put(tdev);
+ return ret;
+}
+
int pci_vpd_pci22_init(struct pci_dev *dev)
{
struct pci_vpd_pci22 *vpd;
@@ -447,12 +497,21 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
if (!cap)
return -ENODEV;
+ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
+ int ret = pci_vpd_f0_dev_check(dev);
+
+ if (ret)
+ return ret;
+ }
vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
if (!vpd)
return -ENOMEM;
vpd->base.len = PCI_VPD_PCI22_SIZE;
- vpd->base.ops = &pci_vpd_pci22_ops;
+ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
+ vpd->base.ops = &pci_vpd_f0_ops;
+ else
+ vpd->base.ops = &pci_vpd_pci22_ops;
mutex_init(&vpd->lock);
vpd->cap = cap;
vpd->busy = false;
@@ -531,6 +590,14 @@ static inline int pcie_cap_version(const struct pci_dev *dev)
return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
}
+static bool pcie_downstream_port(const struct pci_dev *dev)
+{
+ int type = pci_pcie_type(dev);
+
+ return type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_DOWNSTREAM;
+}
+
bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
@@ -546,10 +613,7 @@ bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
{
- int type = pci_pcie_type(dev);
-
- return (type == PCI_EXP_TYPE_ROOT_PORT ||
- type == PCI_EXP_TYPE_DOWNSTREAM) &&
+ return pcie_downstream_port(dev) &&
pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
}
@@ -628,10 +692,9 @@ int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
* State bit in the Slot Status register of Downstream Ports,
* which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
*/
- if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
- pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
+ if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
+ pos == PCI_EXP_SLTSTA)
*val = PCI_EXP_SLTSTA_PDS;
- }
return 0;
}
@@ -657,10 +720,9 @@ int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
return ret;
}
- if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL &&
- pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
+ if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
+ pos == PCI_EXP_SLTSTA)
*val = PCI_EXP_SLTSTA_PDS;
- }
return 0;
}
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index a8099d4d0c9d..eeb9fb2b47aa 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -17,34 +17,15 @@
#include "pci.h"
-static int ats_alloc_one(struct pci_dev *dev, int ps)
+void pci_ats_init(struct pci_dev *dev)
{
int pos;
- u16 cap;
- struct pci_ats *ats;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
if (!pos)
- return -ENODEV;
-
- ats = kzalloc(sizeof(*ats), GFP_KERNEL);
- if (!ats)
- return -ENOMEM;
-
- ats->pos = pos;
- ats->stu = ps;
- pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
- ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
- PCI_ATS_MAX_QDEP;
- dev->ats = ats;
-
- return 0;
-}
+ return;
-static void ats_free_one(struct pci_dev *dev)
-{
- kfree(dev->ats);
- dev->ats = NULL;
+ dev->ats_cap = pos;
}
/**
@@ -56,43 +37,36 @@ static void ats_free_one(struct pci_dev *dev)
*/
int pci_enable_ats(struct pci_dev *dev, int ps)
{
- int rc;
u16 ctrl;
+ struct pci_dev *pdev;
- BUG_ON(dev->ats && dev->ats->is_enabled);
-
- if (ps < PCI_ATS_MIN_STU)
+ if (!dev->ats_cap)
return -EINVAL;
- if (dev->is_physfn || dev->is_virtfn) {
- struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
-
- mutex_lock(&pdev->sriov->lock);
- if (pdev->ats)
- rc = pdev->ats->stu == ps ? 0 : -EINVAL;
- else
- rc = ats_alloc_one(pdev, ps);
-
- if (!rc)
- pdev->ats->ref_cnt++;
- mutex_unlock(&pdev->sriov->lock);
- if (rc)
- return rc;
- }
+ if (WARN_ON(dev->ats_enabled))
+ return -EBUSY;
- if (!dev->is_physfn) {
- rc = ats_alloc_one(dev, ps);
- if (rc)
- return rc;
- }
+ if (ps < PCI_ATS_MIN_STU)
+ return -EINVAL;
+ /*
+ * Note that enabling ATS on a VF fails unless it's already enabled
+ * with the same STU on the PF.
+ */
ctrl = PCI_ATS_CTRL_ENABLE;
- if (!dev->is_virtfn)
- ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
- pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
-
- dev->ats->is_enabled = 1;
+ if (dev->is_virtfn) {
+ pdev = pci_physfn(dev);
+ if (pdev->ats_stu != ps)
+ return -EINVAL;
+
+ atomic_inc(&pdev->ats_ref_cnt); /* count enabled VFs */
+ } else {
+ dev->ats_stu = ps;
+ ctrl |= PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU);
+ }
+ pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl);
+ dev->ats_enabled = 1;
return 0;
}
EXPORT_SYMBOL_GPL(pci_enable_ats);
@@ -103,28 +77,25 @@ EXPORT_SYMBOL_GPL(pci_enable_ats);
*/
void pci_disable_ats(struct pci_dev *dev)
{
+ struct pci_dev *pdev;
u16 ctrl;
- BUG_ON(!dev->ats || !dev->ats->is_enabled);
-
- pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
- ctrl &= ~PCI_ATS_CTRL_ENABLE;
- pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
-
- dev->ats->is_enabled = 0;
+ if (WARN_ON(!dev->ats_enabled))
+ return;
- if (dev->is_physfn || dev->is_virtfn) {
- struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
+ if (atomic_read(&dev->ats_ref_cnt))
+ return; /* VFs still enabled */
- mutex_lock(&pdev->sriov->lock);
- pdev->ats->ref_cnt--;
- if (!pdev->ats->ref_cnt)
- ats_free_one(pdev);
- mutex_unlock(&pdev->sriov->lock);
+ if (dev->is_virtfn) {
+ pdev = pci_physfn(dev);
+ atomic_dec(&pdev->ats_ref_cnt);
}
- if (!dev->is_physfn)
- ats_free_one(dev);
+ pci_read_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, &ctrl);
+ ctrl &= ~PCI_ATS_CTRL_ENABLE;
+ pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl);
+
+ dev->ats_enabled = 0;
}
EXPORT_SYMBOL_GPL(pci_disable_ats);
@@ -132,16 +103,13 @@ void pci_restore_ats_state(struct pci_dev *dev)
{
u16 ctrl;
- if (!pci_ats_enabled(dev))
+ if (!dev->ats_enabled)
return;
- if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS))
- BUG();
ctrl = PCI_ATS_CTRL_ENABLE;
if (!dev->is_virtfn)
- ctrl |= PCI_ATS_CTRL_STU(dev->ats->stu - PCI_ATS_MIN_STU);
-
- pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+ ctrl |= PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU);
+ pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl);
}
EXPORT_SYMBOL_GPL(pci_restore_ats_state);
@@ -159,23 +127,16 @@ EXPORT_SYMBOL_GPL(pci_restore_ats_state);
*/
int pci_ats_queue_depth(struct pci_dev *dev)
{
- int pos;
u16 cap;
+ if (!dev->ats_cap)
+ return -EINVAL;
+
if (dev->is_virtfn)
return 0;
- if (dev->ats)
- return dev->ats->qdep;
-
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
- if (!pos)
- return -ENODEV;
-
- pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
-
- return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
- PCI_ATS_MAX_QDEP;
+ pci_read_config_word(dev, dev->ats_cap + PCI_ATS_CAP, &cap);
+ return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : PCI_ATS_MAX_QDEP;
}
EXPORT_SYMBOL_GPL(pci_ats_queue_depth);
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index c132bddc03f3..d5e58bae95cf 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -53,7 +53,7 @@ config PCI_RCAR_GEN2_PCIE
config PCI_HOST_GENERIC
bool "Generic PCI host controller"
- depends on ARM && OF
+ depends on (ARM || ARM64) && OF
help
Say Y here if you want to support a simple generic PCI host
controller, such as the one emulated by kvmtool.
@@ -117,7 +117,7 @@ config PCI_VERSATILE
config PCIE_IPROC
tristate "Broadcom iProc PCIe controller"
- depends on OF && ARM
+ depends on OF && (ARM || ARM64)
default n
help
This enables the iProc PCIe core controller support for Broadcom's
@@ -135,8 +135,8 @@ config PCIE_IPROC_PLATFORM
through the generic platform bus interface
config PCIE_IPROC_BCMA
- bool "Broadcom iProc PCIe BCMA bus driver"
- depends on ARCH_BCM_IPROC || (ARM && COMPILE_TEST)
+ tristate "Broadcom iProc PCIe BCMA bus driver"
+ depends on ARM && (ARCH_BCM_IPROC || COMPILE_TEST)
select PCIE_IPROC
select BCMA
select PCI_DOMAINS
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
index 80db09e47800..199e29a044cd 100644
--- a/drivers/pci/host/pci-dra7xx.c
+++ b/drivers/pci/host/pci-dra7xx.c
@@ -17,6 +17,7 @@
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -83,6 +84,17 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
writel(value, pcie->base + offset);
}
+static inline u32 dra7xx_pcie_readl_rc(struct pcie_port *pp, u32 offset)
+{
+ return readl(pp->dbi_base + offset);
+}
+
+static inline void dra7xx_pcie_writel_rc(struct pcie_port *pp, u32 offset,
+ u32 value)
+{
+ writel(value, pp->dbi_base + offset);
+}
+
static int dra7xx_pcie_link_up(struct pcie_port *pp)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
@@ -155,7 +167,6 @@ static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
{
irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
- set_irq_flags(irq, IRQF_VALID);
return 0;
}
@@ -325,6 +336,9 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
char name[10];
+ int gpio_sel;
+ enum of_gpio_flags flags;
+ unsigned long gpio_flags;
dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
if (!dra7xx)
@@ -382,9 +396,25 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
dev_err(dev, "pm_runtime_get_sync failed\n");
- goto err_phy;
+ goto err_get_sync;
+ }
+
+ gpio_sel = of_get_gpio_flags(dev->of_node, 0, &flags);
+ if (gpio_is_valid(gpio_sel)) {
+ gpio_flags = (flags & OF_GPIO_ACTIVE_LOW) ?
+ GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH;
+ ret = devm_gpio_request_one(dev, gpio_sel, gpio_flags,
+ "pcie_reset");
+ if (ret) {
+ dev_err(&pdev->dev, "gpio%d request failed, ret %d\n",
+ gpio_sel, ret);
+ goto err_gpio;
+ }
+ } else if (gpio_sel == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_gpio;
}
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
@@ -395,12 +425,14 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
ret = dra7xx_add_pcie_port(dra7xx, pdev);
if (ret < 0)
- goto err_add_port;
+ goto err_gpio;
return 0;
-err_add_port:
+err_gpio:
pm_runtime_put(dev);
+
+err_get_sync:
pm_runtime_disable(dev);
err_phy:
@@ -431,6 +463,85 @@ static int __exit dra7xx_pcie_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int dra7xx_pcie_suspend(struct device *dev)
+{
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+ struct pcie_port *pp = &dra7xx->pp;
+ u32 val;
+
+ /* clear MSE */
+ val = dra7xx_pcie_readl_rc(pp, PCI_COMMAND);
+ val &= ~PCI_COMMAND_MEMORY;
+ dra7xx_pcie_writel_rc(pp, PCI_COMMAND, val);
+
+ return 0;
+}
+
+static int dra7xx_pcie_resume(struct device *dev)
+{
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+ struct pcie_port *pp = &dra7xx->pp;
+ u32 val;
+
+ /* set MSE */
+ val = dra7xx_pcie_readl_rc(pp, PCI_COMMAND);
+ val |= PCI_COMMAND_MEMORY;
+ dra7xx_pcie_writel_rc(pp, PCI_COMMAND, val);
+
+ return 0;
+}
+
+static int dra7xx_pcie_suspend_noirq(struct device *dev)
+{
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+ int count = dra7xx->phy_count;
+
+ while (count--) {
+ phy_power_off(dra7xx->phy[count]);
+ phy_exit(dra7xx->phy[count]);
+ }
+
+ return 0;
+}
+
+static int dra7xx_pcie_resume_noirq(struct device *dev)
+{
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+ int phy_count = dra7xx->phy_count;
+ int ret;
+ int i;
+
+ for (i = 0; i < phy_count; i++) {
+ ret = phy_init(dra7xx->phy[i]);
+ if (ret < 0)
+ goto err_phy;
+
+ ret = phy_power_on(dra7xx->phy[i]);
+ if (ret < 0) {
+ phy_exit(dra7xx->phy[i]);
+ goto err_phy;
+ }
+ }
+
+ return 0;
+
+err_phy:
+ while (--i >= 0) {
+ phy_power_off(dra7xx->phy[i]);
+ phy_exit(dra7xx->phy[i]);
+ }
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
+ dra7xx_pcie_resume_noirq)
+};
+
static const struct of_device_id of_dra7xx_pcie_match[] = {
{ .compatible = "ti,dra7-pcie", },
{},
@@ -442,6 +553,7 @@ static struct platform_driver dra7xx_pcie_driver = {
.driver = {
.name = "dra7-pcie",
.of_match_table = of_dra7xx_pcie_match,
+ .pm = &dra7xx_pcie_pm_ops,
},
};
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index ba46e581db99..265dd25169bf 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -38,7 +38,16 @@ struct gen_pci_cfg_windows {
const struct gen_pci_cfg_bus_ops *ops;
};
+/*
+ * ARM pcibios functions expect the ARM struct pci_sys_data as the PCI
+ * sysdata. Add pci_sys_data as the first element in struct gen_pci so
+ * that when we use a gen_pci pointer as sysdata, it is also a pointer to
+ * a struct pci_sys_data.
+ */
struct gen_pci {
+#ifdef CONFIG_ARM
+ struct pci_sys_data sys;
+#endif
struct pci_host_bridge host;
struct gen_pci_cfg_windows cfg;
struct list_head resources;
@@ -48,8 +57,7 @@ static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
unsigned int devfn,
int where)
{
- struct pci_sys_data *sys = bus->sysdata;
- struct gen_pci *pci = sys->private_data;
+ struct gen_pci *pci = bus->sysdata;
resource_size_t idx = bus->number - pci->cfg.bus_range->start;
return pci->cfg.win[idx] + ((devfn << 8) | where);
@@ -64,8 +72,7 @@ static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
unsigned int devfn,
int where)
{
- struct pci_sys_data *sys = bus->sysdata;
- struct gen_pci *pci = sys->private_data;
+ struct gen_pci *pci = bus->sysdata;
resource_size_t idx = bus->number - pci->cfg.bus_range->start;
return pci->cfg.win[idx] + ((devfn << 12) | where);
@@ -198,13 +205,6 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
return 0;
}
-static int gen_pci_setup(int nr, struct pci_sys_data *sys)
-{
- struct gen_pci *pci = sys->private_data;
- list_splice_init(&pci->resources, &sys->resources);
- return 1;
-}
-
static int gen_pci_probe(struct platform_device *pdev)
{
int err;
@@ -214,13 +214,7 @@ static int gen_pci_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
- struct hw_pci hw = {
- .nr_controllers = 1,
- .private_data = (void **)&pci,
- .setup = gen_pci_setup,
- .map_irq = of_irq_parse_and_map_pci,
- .ops = &gen_pci_ops,
- };
+ struct pci_bus *bus, *child;
if (!pci)
return -ENOMEM;
@@ -258,7 +252,27 @@ static int gen_pci_probe(struct platform_device *pdev)
return err;
}
- pci_common_init_dev(dev, &hw);
+ /* Do not reassign resources if probe only */
+ if (!pci_has_flag(PCI_PROBE_ONLY))
+ pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
+
+ bus = pci_scan_root_bus(dev, 0, &gen_pci_ops, pci, &pci->resources);
+ if (!bus) {
+ dev_err(dev, "Scanning rootbus failed");
+ return -ENODEV;
+ }
+
+ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+
+ if (!pci_has_flag(PCI_PROBE_ONLY)) {
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
+
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ }
+
+ pci_bus_add_devices(bus);
return 0;
}
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index 233a196c6e66..8f3a9813c4e5 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -117,11 +117,7 @@ static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
val = addr << PCIE_PHY_CTRL_DATA_LOC;
writel(val, dbi_base + PCIE_PHY_CTRL);
- ret = pcie_phy_poll_ack(dbi_base, 0);
- if (ret)
- return ret;
-
- return 0;
+ return pcie_phy_poll_ack(dbi_base, 0);
}
/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
@@ -148,11 +144,7 @@ static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data)
/* deassert Read signal */
writel(0x00, dbi_base + PCIE_PHY_CTRL);
- ret = pcie_phy_poll_ack(dbi_base, 0);
- if (ret)
- return ret;
-
- return 0;
+ return pcie_phy_poll_ack(dbi_base, 0);
}
static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
index f34892e0edb4..e71da991949b 100644
--- a/drivers/pci/host/pci-keystone-dw.c
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -104,14 +104,13 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
{
u32 offset, reg_offset, bit_pos;
struct keystone_pcie *ks_pcie;
- unsigned int irq = d->irq;
struct msi_desc *msi;
struct pcie_port *pp;
- msi = irq_get_msi_desc(irq);
- pp = sys_to_pcie(msi->dev->bus->sysdata);
+ msi = irq_data_get_msi_desc(d);
+ pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
ks_pcie = to_keystone_pcie(pp);
- offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+ offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
writel(BIT(bit_pos),
@@ -142,15 +141,14 @@ void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
{
struct keystone_pcie *ks_pcie;
- unsigned int irq = d->irq;
struct msi_desc *msi;
struct pcie_port *pp;
u32 offset;
- msi = irq_get_msi_desc(irq);
- pp = sys_to_pcie(msi->dev->bus->sysdata);
+ msi = irq_data_get_msi_desc(d);
+ pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
ks_pcie = to_keystone_pcie(pp);
- offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+ offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
/* Mask the end point if PVM implemented */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
@@ -164,15 +162,14 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
{
struct keystone_pcie *ks_pcie;
- unsigned int irq = d->irq;
struct msi_desc *msi;
struct pcie_port *pp;
u32 offset;
- msi = irq_get_msi_desc(irq);
- pp = sys_to_pcie(msi->dev->bus->sysdata);
+ msi = irq_data_get_msi_desc(d);
+ pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
ks_pcie = to_keystone_pcie(pp);
- offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+ offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
/* Mask the end point if PVM implemented */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
@@ -196,7 +193,6 @@ static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip,
handle_level_irq);
irq_set_chip_data(irq, domain->host_data);
- set_irq_flags(irq, IRQF_VALID);
return 0;
}
@@ -277,7 +273,6 @@ static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d,
irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
handle_level_irq);
irq_set_chip_data(irq, d->host_data);
- set_irq_flags(irq, IRQF_VALID);
return 0;
}
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index 734da589cdfb..81253e70b1c5 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -110,8 +110,9 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
return -EINVAL;
}
-static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ks_pcie_msi_irq_handler(unsigned int __irq, struct irq_desc *desc)
{
+ unsigned int irq = irq_desc_get_irq(desc);
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
u32 offset = irq - ks_pcie->msi_host_irqs[0];
struct pcie_port *pp = &ks_pcie->pp;
@@ -137,8 +138,10 @@ static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
* Traverse through pending legacy interrupts and invoke handler for each. Also
* takes care of interrupt controller level mask/ack operation.
*/
-static void ks_pcie_legacy_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ks_pcie_legacy_irq_handler(unsigned int __irq,
+ struct irq_desc *desc)
{
+ unsigned int irq = irq_desc_get_irq(desc);
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
struct pcie_port *pp = &ks_pcie->pp;
u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
@@ -212,9 +215,9 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
/* Legacy IRQ */
for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
- irq_set_handler_data(ks_pcie->legacy_host_irqs[i], ks_pcie);
- irq_set_chained_handler(ks_pcie->legacy_host_irqs[i],
- ks_pcie_legacy_irq_handler);
+ irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i],
+ ks_pcie_legacy_irq_handler,
+ ks_pcie);
}
ks_dw_pcie_enable_legacy_irqs(ks_pcie);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 70aa09556ec5..67ec5e1c99db 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -879,6 +879,7 @@ static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie)
return;
pcie->msi = of_pci_find_msi_chip_by_node(msi_node);
+ of_node_put(msi_node);
if (pcie->msi)
pcie->msi->dev = &pcie->pdev->dev;
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 10c05718dbfd..81df0c1fe063 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -1248,7 +1248,6 @@ static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
{
irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
- set_irq_flags(irq, IRQF_VALID);
tegra_cpuidle_pcie_irqs_in_use();
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
index 2d31d4d6fd08..996327cfa1e1 100644
--- a/drivers/pci/host/pci-xgene-msi.c
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -40,8 +40,8 @@ struct xgene_msi_group {
struct xgene_msi {
struct device_node *node;
- struct msi_controller mchip;
- struct irq_domain *domain;
+ struct irq_domain *inner_domain;
+ struct irq_domain *msi_domain;
u64 msi_addr;
void __iomem *msi_regs;
unsigned long *bitmap;
@@ -223,7 +223,6 @@ static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
irq_domain_set_info(domain, virq, msi_irq,
&xgene_msi_bottom_irq_chip, domain->host_data,
handle_simple_irq, NULL, NULL);
- set_irq_flags(virq, IRQF_VALID);
return 0;
}
@@ -252,17 +251,17 @@ static const struct irq_domain_ops msi_domain_ops = {
static int xgene_allocate_domains(struct xgene_msi *msi)
{
- msi->domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
- &msi_domain_ops, msi);
- if (!msi->domain)
+ msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
+ &msi_domain_ops, msi);
+ if (!msi->inner_domain)
return -ENOMEM;
- msi->mchip.domain = pci_msi_create_irq_domain(msi->mchip.of_node,
- &xgene_msi_domain_info,
- msi->domain);
+ msi->msi_domain = pci_msi_create_irq_domain(msi->node,
+ &xgene_msi_domain_info,
+ msi->inner_domain);
- if (!msi->mchip.domain) {
- irq_domain_remove(msi->domain);
+ if (!msi->msi_domain) {
+ irq_domain_remove(msi->inner_domain);
return -ENOMEM;
}
@@ -271,10 +270,10 @@ static int xgene_allocate_domains(struct xgene_msi *msi)
static void xgene_free_domains(struct xgene_msi *msi)
{
- if (msi->mchip.domain)
- irq_domain_remove(msi->mchip.domain);
- if (msi->domain)
- irq_domain_remove(msi->domain);
+ if (msi->msi_domain)
+ irq_domain_remove(msi->msi_domain);
+ if (msi->inner_domain)
+ irq_domain_remove(msi->inner_domain);
}
static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi)
@@ -340,7 +339,7 @@ static void xgene_msi_isr(unsigned int irq, struct irq_desc *desc)
* CPU0
*/
hw_irq = hwirq_to_canonical_hwirq(hw_irq);
- virq = irq_find_mapping(xgene_msi->domain, hw_irq);
+ virq = irq_find_mapping(xgene_msi->inner_domain, hw_irq);
WARN_ON(!virq);
if (virq != 0)
generic_handle_irq(virq);
@@ -368,10 +367,8 @@ static int xgene_msi_remove(struct platform_device *pdev)
for (i = 0; i < NR_HW_IRQS; i++) {
virq = msi->msi_groups[i].gic_irq;
- if (virq != 0) {
- irq_set_chained_handler(virq, NULL);
- irq_set_handler_data(virq, NULL);
- }
+ if (virq != 0)
+ irq_set_chained_handler_and_data(virq, NULL, NULL);
}
kfree(msi->msi_groups);
@@ -421,8 +418,8 @@ static int xgene_msi_hwirq_alloc(unsigned int cpu)
}
if (err) {
- irq_set_chained_handler(msi_group->gic_irq, NULL);
- irq_set_handler_data(msi_group->gic_irq, NULL);
+ irq_set_chained_handler_and_data(msi_group->gic_irq,
+ NULL, NULL);
return err;
}
}
@@ -441,8 +438,8 @@ static void xgene_msi_hwirq_free(unsigned int cpu)
if (!msi_group->gic_irq)
continue;
- irq_set_chained_handler(msi_group->gic_irq, NULL);
- irq_set_handler_data(msi_group->gic_irq, NULL);
+ irq_set_chained_handler_and_data(msi_group->gic_irq, NULL,
+ NULL);
}
}
@@ -497,7 +494,7 @@ static int xgene_msi_probe(struct platform_device *pdev)
goto error;
}
xgene_msi->msi_addr = res->start;
-
+ xgene_msi->node = pdev->dev.of_node;
xgene_msi->num_cpus = num_possible_cpus();
rc = xgene_msi_init_allocator(xgene_msi);
@@ -561,19 +558,10 @@ static int xgene_msi_probe(struct platform_device *pdev)
cpu_notifier_register_done();
- xgene_msi->mchip.of_node = pdev->dev.of_node;
- rc = of_pci_msi_chip_add(&xgene_msi->mchip);
- if (rc) {
- dev_err(&pdev->dev, "failed to add MSI controller chip\n");
- goto error_notifier;
- }
-
dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n");
return 0;
-error_notifier:
- unregister_hotcpu_notifier(&xgene_msi_cpu_notifier);
error:
xgene_msi_remove(pdev);
return rc;
@@ -582,7 +570,6 @@ error:
static struct platform_driver xgene_msi_driver = {
.driver = {
.name = "xgene-msi",
- .owner = THIS_MODULE,
.of_match_table = xgene_msi_match_table,
},
.probe = xgene_msi_probe,
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index a9dfb70d623a..0236ab9d5720 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -321,8 +321,16 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
return ret;
break;
case IORESOURCE_MEM:
- xgene_pcie_setup_ob_reg(port, res, OMR1BARL, res->start,
- res->start - window->offset);
+ if (res->flags & IORESOURCE_PREFETCH)
+ xgene_pcie_setup_ob_reg(port, res, OMR2BARL,
+ res->start,
+ res->start -
+ window->offset);
+ else
+ xgene_pcie_setup_ob_reg(port, res, OMR1BARL,
+ res->start,
+ res->start -
+ window->offset);
break;
case IORESOURCE_BUS:
break;
@@ -514,6 +522,7 @@ static int xgene_pcie_msi_enable(struct pci_bus *bus)
if (!bus->msi)
return -ENODEV;
+ of_node_put(msi_node);
bus->msi->dev = &bus->dev;
return 0;
}
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 69486be7181e..52aa6e34002b 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -255,7 +255,7 @@ static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
{
int irq, pos0, i;
- struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
+ struct pcie_port *pp = sys_to_pcie(msi_desc_to_pci_sysdata(desc));
pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
order_base_2(no_irqs));
@@ -326,8 +326,8 @@ static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
{
struct irq_data *data = irq_get_irq_data(irq);
- struct msi_desc *msi = irq_data_get_msi(data);
- struct pcie_port *pp = sys_to_pcie(msi->dev->bus->sysdata);
+ struct msi_desc *msi = irq_data_get_msi_desc(data);
+ struct pcie_port *pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
clear_irq_range(pp, irq, 1, data->hwirq);
}
@@ -350,7 +350,6 @@ static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
{
irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
- set_irq_flags(irq, IRQF_VALID);
return 0;
}
@@ -388,7 +387,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
addrp = of_get_address(np, index, NULL, NULL);
pp->cfg0_mod_base = of_read_number(addrp, ns);
pp->cfg1_mod_base = pp->cfg0_mod_base + pp->cfg0_size;
- } else {
+ } else if (!pp->va_cfg0_base) {
dev_err(pp->dev, "missing *config* reg space\n");
}
@@ -526,7 +525,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
#ifdef CONFIG_PCI_MSI
dw_pcie_msi_chip.dev = pp->dev;
- dw_pci.msi_ctrl = &dw_pcie_msi_chip;
#endif
dw_pci.nr_controllers = 1;
@@ -708,8 +706,15 @@ static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
struct pcie_port *pp = sys_to_pcie(sys);
pp->root_bus_nr = sys->busnr;
- bus = pci_scan_root_bus(pp->dev, sys->busnr,
- &dw_pcie_ops, sys, &sys->resources);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ bus = pci_scan_root_bus_msi(pp->dev, sys->busnr, &dw_pcie_ops,
+ sys, &sys->resources,
+ &dw_pcie_msi_chip);
+ else
+ bus = pci_scan_root_bus(pp->dev, sys->busnr, &dw_pcie_ops,
+ sys, &sys->resources);
+
if (!bus)
return NULL;
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index d77481ea553e..fe2efb141a9b 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -58,9 +58,17 @@
#define SYS_RC_INTX_EN 0x330
#define SYS_RC_INTX_MASK 0xf
-static inline struct iproc_pcie *sys_to_pcie(struct pci_sys_data *sys)
+static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
{
- return sys->private_data;
+ struct iproc_pcie *pcie;
+#ifdef CONFIG_ARM
+ struct pci_sys_data *sys = bus->sysdata;
+
+ pcie = sys->private_data;
+#else
+ pcie = bus->sysdata;
+#endif
+ return pcie;
}
/**
@@ -71,8 +79,7 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
unsigned int devfn,
int where)
{
- struct pci_sys_data *sys = bus->sysdata;
- struct iproc_pcie *pcie = sys_to_pcie(sys);
+ struct iproc_pcie *pcie = iproc_data(bus);
unsigned slot = PCI_SLOT(devfn);
unsigned fn = PCI_FUNC(devfn);
unsigned busno = bus->number;
@@ -186,32 +193,34 @@ static void iproc_pcie_enable(struct iproc_pcie *pcie)
int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
{
int ret;
+ void *sysdata;
struct pci_bus *bus;
if (!pcie || !pcie->dev || !pcie->base)
return -EINVAL;
- if (pcie->phy) {
- ret = phy_init(pcie->phy);
- if (ret) {
- dev_err(pcie->dev, "unable to initialize PCIe PHY\n");
- return ret;
- }
-
- ret = phy_power_on(pcie->phy);
- if (ret) {
- dev_err(pcie->dev, "unable to power on PCIe PHY\n");
- goto err_exit_phy;
- }
+ ret = phy_init(pcie->phy);
+ if (ret) {
+ dev_err(pcie->dev, "unable to initialize PCIe PHY\n");
+ return ret;
+ }
+ ret = phy_power_on(pcie->phy);
+ if (ret) {
+ dev_err(pcie->dev, "unable to power on PCIe PHY\n");
+ goto err_exit_phy;
}
iproc_pcie_reset(pcie);
+#ifdef CONFIG_ARM
pcie->sysdata.private_data = pcie;
+ sysdata = &pcie->sysdata;
+#else
+ sysdata = pcie;
+#endif
- bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops,
- &pcie->sysdata, res);
+ bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops, sysdata, res);
if (!bus) {
dev_err(pcie->dev, "unable to create PCI root bus\n");
ret = -ENOMEM;
@@ -229,7 +238,9 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
+#ifdef CONFIG_ARM
pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
+#endif
pci_bus_add_devices(bus);
return 0;
@@ -239,12 +250,9 @@ err_rm_root_bus:
pci_remove_root_bus(bus);
err_power_off_phy:
- if (pcie->phy)
- phy_power_off(pcie->phy);
+ phy_power_off(pcie->phy);
err_exit_phy:
- if (pcie->phy)
- phy_exit(pcie->phy);
-
+ phy_exit(pcie->phy);
return ret;
}
EXPORT_SYMBOL(iproc_pcie_setup);
@@ -254,10 +262,8 @@ int iproc_pcie_remove(struct iproc_pcie *pcie)
pci_stop_root_bus(pcie->root_bus);
pci_remove_root_bus(pcie->root_bus);
- if (pcie->phy) {
- phy_power_off(pcie->phy);
- phy_exit(pcie->phy);
- }
+ phy_power_off(pcie->phy);
+ phy_exit(pcie->phy);
return 0;
}
diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h
index ba0a108309cc..c9e4c10a462e 100644
--- a/drivers/pci/host/pcie-iproc.h
+++ b/drivers/pci/host/pcie-iproc.h
@@ -21,7 +21,7 @@
* @dev: pointer to device data structure
* @base: PCIe host controller I/O register base
* @resources: linked list of all PCI resources
- * @sysdata: Per PCI controller data
+ * @sysdata: Per PCI controller data (ARM-specific)
* @root_bus: pointer to root bus
* @phy: optional PHY device that controls the Serdes
* @irqs: interrupt IDs
@@ -29,7 +29,9 @@
struct iproc_pcie {
struct device *dev;
void __iomem *base;
+#ifdef CONFIG_ARM
struct pci_sys_data sysdata;
+#endif
struct pci_bus *root_bus;
struct phy *phy;
int irqs[IPROC_PCIE_MAX_NUM_IRQS];
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index c086210f2ffd..7678fe0820d7 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -664,7 +664,6 @@ static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
{
irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
- set_irq_flags(irq, IRQF_VALID);
return 0;
}
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index c49fbdc0f6e4..98d2683181bc 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -223,8 +223,7 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
status = readl(&app_reg->int_sts);
if (status & MSI_CTRL_INT) {
- if (!IS_ENABLED(CONFIG_PCI_MSI))
- BUG();
+ BUG_ON(!IS_ENABLED(CONFIG_PCI_MSI));
dw_handle_msi_irq(pp);
}
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index f1a06a091ccb..3c7a0d580b1e 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -227,18 +227,16 @@ static struct pci_ops xilinx_pcie_ops = {
*/
static void xilinx_pcie_destroy_msi(unsigned int irq)
{
- struct irq_desc *desc;
struct msi_desc *msi;
struct xilinx_pcie_port *port;
- desc = irq_to_desc(irq);
- msi = irq_desc_get_msi_desc(desc);
- port = sys_to_pcie(msi->dev->bus->sysdata);
-
- if (!test_bit(irq, msi_irq_in_use))
+ if (!test_bit(irq, msi_irq_in_use)) {
+ msi = irq_get_msi_desc(irq);
+ port = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
dev_err(port->dev, "Trying to free unused MSI#%d\n", irq);
- else
+ } else {
clear_bit(irq, msi_irq_in_use);
+ }
}
/**
@@ -338,7 +336,6 @@ static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
{
irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
- set_irq_flags(irq, IRQF_VALID);
return 0;
}
@@ -377,7 +374,6 @@ static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
{
irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
- set_irq_flags(irq, IRQF_VALID);
return 0;
}
@@ -449,14 +445,17 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
return IRQ_HANDLED;
}
- /* Clear interrupt FIFO register 1 */
- pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
- XILINX_PCIE_REG_RPIFR1);
+ if (!(val & XILINX_PCIE_RPIFR1_MSI_INTR)) {
+ /* Clear interrupt FIFO register 1 */
+ pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
+ XILINX_PCIE_REG_RPIFR1);
- /* Handle INTx Interrupt */
- val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
- XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1;
- generic_handle_irq(irq_find_mapping(port->irq_domain, val));
+ /* Handle INTx Interrupt */
+ val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
+ XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1;
+ generic_handle_irq(irq_find_mapping(port->irq_domain,
+ val));
+ }
}
if (status & XILINX_PCIE_INTR_MSI) {
@@ -647,9 +646,15 @@ static struct pci_bus *xilinx_pcie_scan_bus(int nr, struct pci_sys_data *sys)
struct pci_bus *bus;
port->root_busno = sys->busnr;
- bus = pci_scan_root_bus(port->dev, sys->busnr, &xilinx_pcie_ops,
- sys, &sys->resources);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ bus = pci_scan_root_bus_msi(port->dev, sys->busnr,
+ &xilinx_pcie_ops, sys,
+ &sys->resources,
+ &xilinx_pcie_msi_chip);
+ else
+ bus = pci_scan_root_bus(port->dev, sys->busnr,
+ &xilinx_pcie_ops, sys, &sys->resources);
return bus;
}
@@ -847,7 +852,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
#ifdef CONFIG_PCI_MSI
xilinx_pcie_msi_chip.dev = port->dev;
- hw.msi_ctrl = &xilinx_pcie_msi_chip;
#endif
pci_common_init_dev(dev, &hw);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 56d8486dc167..d1fab97d6b01 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -83,12 +83,12 @@ GET_STATUS(attention_status, u8)
GET_STATUS(latch_status, u8)
GET_STATUS(adapter_status, u8)
-static ssize_t power_read_file(struct pci_slot *slot, char *buf)
+static ssize_t power_read_file(struct pci_slot *pci_slot, char *buf)
{
int retval;
u8 value;
- retval = get_power_status(slot->hotplug, &value);
+ retval = get_power_status(pci_slot->hotplug, &value);
if (retval)
return retval;
@@ -140,22 +140,22 @@ static struct pci_slot_attribute hotplug_slot_attr_power = {
.store = power_write_file
};
-static ssize_t attention_read_file(struct pci_slot *slot, char *buf)
+static ssize_t attention_read_file(struct pci_slot *pci_slot, char *buf)
{
int retval;
u8 value;
- retval = get_attention_status(slot->hotplug, &value);
+ retval = get_attention_status(pci_slot->hotplug, &value);
if (retval)
return retval;
return sprintf(buf, "%d\n", value);
}
-static ssize_t attention_write_file(struct pci_slot *slot, const char *buf,
+static ssize_t attention_write_file(struct pci_slot *pci_slot, const char *buf,
size_t count)
{
- struct hotplug_slot_ops *ops = slot->hotplug->ops;
+ struct hotplug_slot_ops *ops = pci_slot->hotplug->ops;
unsigned long lattention;
u8 attention;
int retval = 0;
@@ -169,7 +169,7 @@ static ssize_t attention_write_file(struct pci_slot *slot, const char *buf,
goto exit;
}
if (ops->set_attention_status)
- retval = ops->set_attention_status(slot->hotplug, attention);
+ retval = ops->set_attention_status(pci_slot->hotplug, attention);
module_put(ops->owner);
exit:
@@ -184,12 +184,12 @@ static struct pci_slot_attribute hotplug_slot_attr_attention = {
.store = attention_write_file
};
-static ssize_t latch_read_file(struct pci_slot *slot, char *buf)
+static ssize_t latch_read_file(struct pci_slot *pci_slot, char *buf)
{
int retval;
u8 value;
- retval = get_latch_status(slot->hotplug, &value);
+ retval = get_latch_status(pci_slot->hotplug, &value);
if (retval)
return retval;
@@ -201,12 +201,12 @@ static struct pci_slot_attribute hotplug_slot_attr_latch = {
.show = latch_read_file,
};
-static ssize_t presence_read_file(struct pci_slot *slot, char *buf)
+static ssize_t presence_read_file(struct pci_slot *pci_slot, char *buf)
{
int retval;
u8 value;
- retval = get_adapter_status(slot->hotplug, &value);
+ retval = get_adapter_status(pci_slot->hotplug, &value);
if (retval)
return retval;
@@ -307,43 +307,43 @@ static bool has_test_file(struct pci_slot *pci_slot)
return false;
}
-static int fs_add_slot(struct pci_slot *slot)
+static int fs_add_slot(struct pci_slot *pci_slot)
{
int retval = 0;
/* Create symbolic link to the hotplug driver module */
- pci_hp_create_module_link(slot);
+ pci_hp_create_module_link(pci_slot);
- if (has_power_file(slot)) {
- retval = sysfs_create_file(&slot->kobj,
+ if (has_power_file(pci_slot)) {
+ retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_power.attr);
if (retval)
goto exit_power;
}
- if (has_attention_file(slot)) {
- retval = sysfs_create_file(&slot->kobj,
+ if (has_attention_file(pci_slot)) {
+ retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_attention.attr);
if (retval)
goto exit_attention;
}
- if (has_latch_file(slot)) {
- retval = sysfs_create_file(&slot->kobj,
+ if (has_latch_file(pci_slot)) {
+ retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_latch.attr);
if (retval)
goto exit_latch;
}
- if (has_adapter_file(slot)) {
- retval = sysfs_create_file(&slot->kobj,
+ if (has_adapter_file(pci_slot)) {
+ retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_presence.attr);
if (retval)
goto exit_adapter;
}
- if (has_test_file(slot)) {
- retval = sysfs_create_file(&slot->kobj,
+ if (has_test_file(pci_slot)) {
+ retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_test.attr);
if (retval)
goto exit_test;
@@ -352,45 +352,45 @@ static int fs_add_slot(struct pci_slot *slot)
goto exit;
exit_test:
- if (has_adapter_file(slot))
- sysfs_remove_file(&slot->kobj,
+ if (has_adapter_file(pci_slot))
+ sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_presence.attr);
exit_adapter:
- if (has_latch_file(slot))
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
+ if (has_latch_file(pci_slot))
+ sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_latch.attr);
exit_latch:
- if (has_attention_file(slot))
- sysfs_remove_file(&slot->kobj,
+ if (has_attention_file(pci_slot))
+ sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_attention.attr);
exit_attention:
- if (has_power_file(slot))
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
+ if (has_power_file(pci_slot))
+ sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_power.attr);
exit_power:
- pci_hp_remove_module_link(slot);
+ pci_hp_remove_module_link(pci_slot);
exit:
return retval;
}
-static void fs_remove_slot(struct pci_slot *slot)
+static void fs_remove_slot(struct pci_slot *pci_slot)
{
- if (has_power_file(slot))
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
+ if (has_power_file(pci_slot))
+ sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_power.attr);
- if (has_attention_file(slot))
- sysfs_remove_file(&slot->kobj,
+ if (has_attention_file(pci_slot))
+ sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_attention.attr);
- if (has_latch_file(slot))
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
+ if (has_latch_file(pci_slot))
+ sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_latch.attr);
- if (has_adapter_file(slot))
- sysfs_remove_file(&slot->kobj,
+ if (has_adapter_file(pci_slot))
+ sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_presence.attr);
- if (has_test_file(slot))
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr);
+ if (has_test_file(pci_slot))
+ sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_test.attr);
- pci_hp_remove_module_link(slot);
+ pci_hp_remove_module_link(pci_slot);
}
static struct hotplug_slot *get_slot_from_name(const char *name)
@@ -467,37 +467,37 @@ EXPORT_SYMBOL_GPL(__pci_hp_register);
/**
* pci_hp_deregister - deregister a hotplug_slot with the PCI hotplug subsystem
- * @hotplug: pointer to the &struct hotplug_slot to deregister
+ * @slot: pointer to the &struct hotplug_slot to deregister
*
* The @slot must have been registered with the pci hotplug subsystem
* previously with a call to pci_hp_register().
*
* Returns 0 if successful, anything else for an error.
*/
-int pci_hp_deregister(struct hotplug_slot *hotplug)
+int pci_hp_deregister(struct hotplug_slot *slot)
{
struct hotplug_slot *temp;
- struct pci_slot *slot;
+ struct pci_slot *pci_slot;
- if (!hotplug)
+ if (!slot)
return -ENODEV;
mutex_lock(&pci_hp_mutex);
- temp = get_slot_from_name(hotplug_slot_name(hotplug));
- if (temp != hotplug) {
+ temp = get_slot_from_name(hotplug_slot_name(slot));
+ if (temp != slot) {
mutex_unlock(&pci_hp_mutex);
return -ENODEV;
}
- list_del(&hotplug->slot_list);
+ list_del(&slot->slot_list);
- slot = hotplug->pci_slot;
- fs_remove_slot(slot);
- dbg("Removed slot %s from the list\n", hotplug_slot_name(hotplug));
+ pci_slot = slot->pci_slot;
+ fs_remove_slot(pci_slot);
+ dbg("Removed slot %s from the list\n", hotplug_slot_name(slot));
- hotplug->release(hotplug);
- slot->hotplug = NULL;
- pci_destroy_slot(slot);
+ slot->release(slot);
+ pci_slot->hotplug = NULL;
+ pci_destroy_slot(pci_slot);
mutex_unlock(&pci_hp_mutex);
return 0;
@@ -506,7 +506,7 @@ EXPORT_SYMBOL_GPL(pci_hp_deregister);
/**
* pci_hp_change_slot_info - changes the slot's information structure in the core
- * @hotplug: pointer to the slot whose info has changed
+ * @slot: pointer to the slot whose info has changed
* @info: pointer to the info copy into the slot's info structure
*
* @slot must have been registered with the pci
@@ -514,13 +514,13 @@ EXPORT_SYMBOL_GPL(pci_hp_deregister);
*
* Returns 0 if successful, anything else for an error.
*/
-int pci_hp_change_slot_info(struct hotplug_slot *hotplug,
+int pci_hp_change_slot_info(struct hotplug_slot *slot,
struct hotplug_slot_info *info)
{
- if (!hotplug || !info)
+ if (!slot || !info)
return -ENODEV;
- memcpy(hotplug->info, info, sizeof(struct hotplug_slot_info));
+ memcpy(slot->info, info, sizeof(struct hotplug_slot_info));
return 0;
}
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 57cd1327346f..62d6fe6c3714 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -101,18 +101,12 @@ struct controller {
unsigned int power_fault_detected;
};
-#define INT_BUTTON_IGNORE 0
#define INT_PRESENCE_ON 1
#define INT_PRESENCE_OFF 2
-#define INT_SWITCH_CLOSE 3
-#define INT_SWITCH_OPEN 4
-#define INT_POWER_FAULT 5
-#define INT_POWER_FAULT_CLEAR 6
-#define INT_BUTTON_PRESS 7
-#define INT_BUTTON_RELEASE 8
-#define INT_BUTTON_CANCEL 9
-#define INT_LINK_UP 10
-#define INT_LINK_DOWN 11
+#define INT_POWER_FAULT 3
+#define INT_BUTTON_PRESS 4
+#define INT_LINK_UP 5
+#define INT_LINK_DOWN 6
#define STATIC_STATE 0
#define BLINKINGON_STATE 1
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 2913f7e68a10..5c24e938042f 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -109,21 +109,23 @@ static int pcie_poll_cmd(struct controller *ctrl, int timeout)
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
- pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
- if (slot_status & PCI_EXP_SLTSTA_CC) {
- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
- PCI_EXP_SLTSTA_CC);
- return 1;
- }
- while (timeout > 0) {
- msleep(10);
- timeout -= 10;
+ while (true) {
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+ if (slot_status == (u16) ~0) {
+ ctrl_info(ctrl, "%s: no response from device\n",
+ __func__);
+ return 0;
+ }
+
if (slot_status & PCI_EXP_SLTSTA_CC) {
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_CC);
return 1;
}
+ if (timeout < 0)
+ break;
+ msleep(10);
+ timeout -= 10;
}
return 0; /* timeout */
}
@@ -190,6 +192,11 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
pcie_wait_cmd(ctrl);
pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
+ if (slot_ctrl == (u16) ~0) {
+ ctrl_info(ctrl, "%s: no response from device\n", __func__);
+ goto out;
+ }
+
slot_ctrl &= ~mask;
slot_ctrl |= (cmd & mask);
ctrl->cmd_busy = 1;
@@ -205,6 +212,7 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
if (wait)
pcie_wait_cmd(ctrl);
+out:
mutex_unlock(&ctrl->ctrl_lock);
}
@@ -535,7 +543,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
struct pci_dev *dev;
struct slot *slot = ctrl->slot;
u16 detected, intr_loc;
- u8 open, present;
+ u8 present;
bool link;
/*
@@ -546,9 +554,14 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
intr_loc = 0;
do {
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &detected);
+ if (detected == (u16) ~0) {
+ ctrl_info(ctrl, "%s: no response from device\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
- PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
+ PCI_EXP_SLTSTA_PDC |
PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
detected &= ~intr_loc;
intr_loc |= detected;
@@ -581,15 +594,6 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
if (!(intr_loc & ~PCI_EXP_SLTSTA_CC))
return IRQ_HANDLED;
- /* Check MRL Sensor Changed */
- if (intr_loc & PCI_EXP_SLTSTA_MRLSC) {
- pciehp_get_latch_status(slot, &open);
- ctrl_info(ctrl, "Latch %s on Slot(%s)\n",
- open ? "open" : "close", slot_name(slot));
- pciehp_queue_interrupt_event(slot, open ? INT_SWITCH_OPEN :
- INT_SWITCH_CLOSE);
- }
-
/* Check Attention Button Pressed */
if (intr_loc & PCI_EXP_SLTSTA_ABP) {
ctrl_info(ctrl, "Button pressed on Slot(%s)\n",
@@ -649,13 +653,11 @@ void pcie_enable_notification(struct controller *ctrl)
cmd |= PCI_EXP_SLTCTL_ABPE;
else
cmd |= PCI_EXP_SLTCTL_PDCE;
- if (MRL_SENS(ctrl))
- cmd |= PCI_EXP_SLTCTL_MRLSCE;
if (!pciehp_poll_mode)
cmd |= PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE;
mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
- PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
+ PCI_EXP_SLTCTL_PFDE |
PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
PCI_EXP_SLTCTL_DLLSCE);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index f66be868ad21..d4497141d083 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -39,14 +39,13 @@ struct irq_domain * __weak arch_get_pci_msi_domain(struct pci_dev *dev)
static struct irq_domain *pci_msi_get_domain(struct pci_dev *dev)
{
- struct irq_domain *domain = NULL;
+ struct irq_domain *domain;
- if (dev->bus->msi)
- domain = dev->bus->msi->domain;
- if (!domain)
- domain = arch_get_pci_msi_domain(dev);
+ domain = dev_get_msi_domain(&dev->dev);
+ if (domain)
+ return domain;
- return domain;
+ return arch_get_pci_msi_domain(dev);
}
static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
@@ -77,24 +76,9 @@ static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
/* Arch hooks */
-struct msi_controller * __weak pcibios_msi_controller(struct pci_dev *dev)
-{
- return NULL;
-}
-
-static struct msi_controller *pci_msi_controller(struct pci_dev *dev)
-{
- struct msi_controller *msi_ctrl = dev->bus->msi;
-
- if (msi_ctrl)
- return msi_ctrl;
-
- return pcibios_msi_controller(dev);
-}
-
int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
{
- struct msi_controller *chip = pci_msi_controller(dev);
+ struct msi_controller *chip = dev->bus->msi;
int err;
if (!chip || !chip->setup_irq)
@@ -131,7 +115,7 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
ret = arch_setup_msi_irq(dev, entry);
if (ret < 0)
return ret;
@@ -151,7 +135,7 @@ void default_teardown_msi_irqs(struct pci_dev *dev)
int i;
struct msi_desc *entry;
- list_for_each_entry(entry, &dev->msi_list, list)
+ for_each_pci_msi_entry(entry, dev)
if (entry->irq)
for (i = 0; i < entry->nvec_used; i++)
arch_teardown_msi_irq(entry->irq + i);
@@ -168,7 +152,7 @@ static void default_restore_msi_irq(struct pci_dev *dev, int irq)
entry = NULL;
if (dev->msix_enabled) {
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
if (irq == entry->irq)
break;
}
@@ -208,7 +192,8 @@ u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
mask_bits &= ~mask;
mask_bits |= flag;
- pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits);
+ pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
+ mask_bits);
return mask_bits;
}
@@ -249,7 +234,7 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag)
static void msi_set_mask_bit(struct irq_data *data, u32 flag)
{
- struct msi_desc *desc = irq_data_get_msi(data);
+ struct msi_desc *desc = irq_data_get_msi_desc(data);
if (desc->msi_attrib.is_msix) {
msix_mask_irq(desc, flag);
@@ -282,13 +267,15 @@ void default_restore_msi_irqs(struct pci_dev *dev)
{
struct msi_desc *entry;
- list_for_each_entry(entry, &dev->msi_list, list)
+ for_each_pci_msi_entry(entry, dev)
default_restore_msi_irq(dev, entry->irq);
}
void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
- BUG_ON(entry->dev->current_state != PCI_D0);
+ struct pci_dev *dev = msi_desc_to_pci_dev(entry);
+
+ BUG_ON(dev->current_state != PCI_D0);
if (entry->msi_attrib.is_msix) {
void __iomem *base = entry->mask_base +
@@ -298,7 +285,6 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
} else {
- struct pci_dev *dev = entry->dev;
int pos = dev->msi_cap;
u16 data;
@@ -318,7 +304,9 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
- if (entry->dev->current_state != PCI_D0) {
+ struct pci_dev *dev = msi_desc_to_pci_dev(entry);
+
+ if (dev->current_state != PCI_D0) {
/* Don't touch the hardware now */
} else if (entry->msi_attrib.is_msix) {
void __iomem *base;
@@ -329,7 +317,6 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
} else {
- struct pci_dev *dev = entry->dev;
int pos = dev->msi_cap;
u16 msgctl;
@@ -363,21 +350,22 @@ EXPORT_SYMBOL_GPL(pci_write_msi_msg);
static void free_msi_irqs(struct pci_dev *dev)
{
+ struct list_head *msi_list = dev_to_msi_list(&dev->dev);
struct msi_desc *entry, *tmp;
struct attribute **msi_attrs;
struct device_attribute *dev_attr;
int i, count = 0;
- list_for_each_entry(entry, &dev->msi_list, list)
+ for_each_pci_msi_entry(entry, dev)
if (entry->irq)
for (i = 0; i < entry->nvec_used; i++)
BUG_ON(irq_has_action(entry->irq + i));
pci_msi_teardown_msi_irqs(dev);
- list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
+ list_for_each_entry_safe(entry, tmp, msi_list, list) {
if (entry->msi_attrib.is_msix) {
- if (list_is_last(&entry->list, &dev->msi_list))
+ if (list_is_last(&entry->list, msi_list))
iounmap(entry->mask_base);
}
@@ -402,18 +390,6 @@ static void free_msi_irqs(struct pci_dev *dev)
}
}
-static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
-{
- struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return NULL;
-
- INIT_LIST_HEAD(&desc->list);
- desc->dev = dev;
-
- return desc;
-}
-
static void pci_intx_for_msi(struct pci_dev *dev, int enable)
{
if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
@@ -448,7 +424,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
if (!dev->msix_enabled)
return;
- BUG_ON(list_empty(&dev->msi_list));
+ BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
/* route the table */
pci_intx_for_msi(dev, 0);
@@ -456,7 +432,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
arch_restore_msi_irqs(dev);
- list_for_each_entry(entry, &dev->msi_list, list)
+ for_each_pci_msi_entry(entry, dev)
msix_mask_irq(entry, entry->masked);
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
@@ -501,7 +477,7 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
int count = 0;
/* Determine how many msi entries we have */
- list_for_each_entry(entry, &pdev->msi_list, list)
+ for_each_pci_msi_entry(entry, pdev)
++num_msi;
if (!num_msi)
return 0;
@@ -510,7 +486,7 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL);
if (!msi_attrs)
return -ENOMEM;
- list_for_each_entry(entry, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(entry, pdev) {
msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
if (!msi_dev_attr)
goto error_attrs;
@@ -568,7 +544,7 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec)
struct msi_desc *entry;
/* MSI Entry Initialization */
- entry = alloc_msi_entry(dev);
+ entry = alloc_msi_entry(&dev->dev);
if (!entry)
return NULL;
@@ -599,7 +575,7 @@ static int msi_verify_entries(struct pci_dev *dev)
{
struct msi_desc *entry;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
if (!dev->no_64bit_msi || !entry->msg.address_hi)
continue;
dev_err(&dev->dev, "Device has broken 64-bit MSI but arch"
@@ -636,7 +612,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
mask = msi_mask(entry->msi_attrib.multi_cap);
msi_mask_irq(entry, mask, mask);
- list_add_tail(&entry->list, &dev->msi_list);
+ list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
/* Configure MSI capability structure */
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
@@ -665,6 +641,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
pci_msi_set_enable(dev, 1);
dev->msi_enabled = 1;
+ pcibios_free_irq(dev);
dev->irq = entry->irq;
return 0;
}
@@ -696,7 +673,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
int i;
for (i = 0; i < nvec; i++) {
- entry = alloc_msi_entry(dev);
+ entry = alloc_msi_entry(&dev->dev);
if (!entry) {
if (!i)
iounmap(base);
@@ -713,7 +690,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
entry->mask_base = base;
entry->nvec_used = 1;
- list_add_tail(&entry->list, &dev->msi_list);
+ list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
}
return 0;
@@ -725,7 +702,7 @@ static void msix_program_entries(struct pci_dev *dev,
struct msi_desc *entry;
int i = 0;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_VECTOR_CTRL;
@@ -792,9 +769,9 @@ static int msix_capability_init(struct pci_dev *dev,
/* Set MSI-X enabled bits and unmask the function */
pci_intx_for_msi(dev, 0);
dev->msix_enabled = 1;
-
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
+ pcibios_free_irq(dev);
return 0;
out_avail:
@@ -806,7 +783,7 @@ out_avail:
struct msi_desc *entry;
int avail = 0;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
if (entry->irq != 0)
avail++;
}
@@ -895,8 +872,8 @@ void pci_msi_shutdown(struct pci_dev *dev)
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
- BUG_ON(list_empty(&dev->msi_list));
- desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
+ BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
+ desc = first_pci_msi_entry(dev);
pci_msi_set_enable(dev, 0);
pci_intx_for_msi(dev, 1);
@@ -909,6 +886,7 @@ void pci_msi_shutdown(struct pci_dev *dev)
/* Restore dev->irq to its default pin-assertion irq */
dev->irq = desc->msi_attrib.default_irq;
+ pcibios_alloc_irq(dev);
}
void pci_disable_msi(struct pci_dev *dev)
@@ -1001,7 +979,7 @@ void pci_msix_shutdown(struct pci_dev *dev)
return;
/* Return the device with MSI-X masked as initial states */
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
/* Keep cached states to be restored */
__pci_msix_desc_mask_irq(entry, 1);
}
@@ -1009,6 +987,7 @@ void pci_msix_shutdown(struct pci_dev *dev)
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
pci_intx_for_msi(dev, 1);
dev->msix_enabled = 0;
+ pcibios_alloc_irq(dev);
}
void pci_disable_msix(struct pci_dev *dev)
@@ -1040,7 +1019,6 @@ EXPORT_SYMBOL(pci_msi_enabled);
void pci_msi_init_pci_dev(struct pci_dev *dev)
{
- INIT_LIST_HEAD(&dev->msi_list);
}
/**
@@ -1137,6 +1115,19 @@ int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
}
EXPORT_SYMBOL(pci_enable_msix_range);
+struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
+{
+ return to_pci_dev(desc->dev);
+}
+
+void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
+{
+ struct pci_dev *dev = msi_desc_to_pci_dev(desc);
+
+ return dev->bus->sysdata;
+}
+EXPORT_SYMBOL_GPL(msi_desc_to_pci_sysdata);
+
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
/**
* pci_msi_domain_write_msg - Helper to write MSI message to PCI config space
@@ -1145,7 +1136,7 @@ EXPORT_SYMBOL(pci_enable_msix_range);
*/
void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
{
- struct msi_desc *desc = irq_data->msi_desc;
+ struct msi_desc *desc = irq_data_get_msi_desc(irq_data);
/*
* For MSI-X desc->irq is always equal to irq_data->irq. For
@@ -1269,12 +1260,19 @@ struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
struct msi_domain_info *info,
struct irq_domain *parent)
{
+ struct irq_domain *domain;
+
if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
pci_msi_domain_update_dom_ops(info);
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
pci_msi_domain_update_chip_ops(info);
- return msi_create_irq_domain(node, info, parent);
+ domain = msi_create_irq_domain(node, info, parent);
+ if (!domain)
+ return NULL;
+
+ domain->bus_token = DOMAIN_BUS_PCI_MSI;
+ return domain;
}
/**
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index f0929934bb7a..2e99a500cb83 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -9,6 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/of.h>
@@ -59,3 +60,32 @@ struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
return of_node_get(bus->bridge->parent->of_node);
return NULL;
}
+
+struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
+{
+#ifdef CONFIG_IRQ_DOMAIN
+ struct device_node *np;
+ struct irq_domain *d;
+
+ if (!bus->dev.of_node)
+ return NULL;
+
+ /* Start looking for a phandle to an MSI controller. */
+ np = of_parse_phandle(bus->dev.of_node, "msi-parent", 0);
+
+ /*
+ * If we don't have an msi-parent property, look for a domain
+ * directly attached to the host bridge.
+ */
+ if (!np)
+ np = bus->dev.of_node;
+
+ d = irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI);
+ if (d)
+ return d;
+
+ return irq_find_host(np);
+#else
+ return NULL;
+#endif
+}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 314a625b78d6..a32ba753e413 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -594,7 +594,7 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev)
/**
* pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI
* @pdev: the PCI device whose delay is to be updated
- * @adev: the companion ACPI device of this PCI device
+ * @handle: ACPI handle of this device
*
* Update the d3_delay and d3cold_delay of a PCI device from the ACPI _DSM
* control method of either the device itself or the PCI host bridge.
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 3cb2210de553..dd652f2ae03d 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -388,18 +388,31 @@ static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
return error;
}
+int __weak pcibios_alloc_irq(struct pci_dev *dev)
+{
+ return 0;
+}
+
+void __weak pcibios_free_irq(struct pci_dev *dev)
+{
+}
+
static int pci_device_probe(struct device *dev)
{
- int error = 0;
- struct pci_driver *drv;
- struct pci_dev *pci_dev;
+ int error;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *drv = to_pci_driver(dev->driver);
+
+ error = pcibios_alloc_irq(pci_dev);
+ if (error < 0)
+ return error;
- drv = to_pci_driver(dev->driver);
- pci_dev = to_pci_dev(dev);
pci_dev_get(pci_dev);
error = __pci_device_probe(drv, pci_dev);
- if (error)
+ if (error) {
+ pcibios_free_irq(pci_dev);
pci_dev_put(pci_dev);
+ }
return error;
}
@@ -415,6 +428,7 @@ static int pci_device_remove(struct device *dev)
drv->remove(pci_dev);
pm_runtime_put_noidle(dev);
}
+ pcibios_free_irq(pci_dev);
pci_dev->driver = NULL;
}
@@ -453,7 +467,7 @@ static void pci_device_shutdown(struct device *dev)
pci_msi_shutdown(pci_dev);
pci_msix_shutdown(pci_dev);
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
/*
* If this is a kexec reboot, turn off Bus Master bit on the
* device to tell it to not continue to do DMA. Don't touch
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0008c950452c..6a9a1116f1eb 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -81,7 +81,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
-enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
/*
* The default CLS is used if arch didn't set CLS explicitly and not
@@ -138,9 +138,22 @@ void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
return ioremap_nocache(res->start, resource_size(res));
}
EXPORT_SYMBOL_GPL(pci_ioremap_bar);
+
+void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
+{
+ /*
+ * Make sure the BAR is actually a memory resource, not an IO resource
+ */
+ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
+ WARN_ON(1);
+ return NULL;
+ }
+ return ioremap_wc(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar));
+}
+EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
#endif
-#define PCI_FIND_CAP_TTL 48
static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
u8 pos, int cap, int *ttl)
@@ -196,8 +209,6 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus,
return PCI_CAPABILITY_LIST;
case PCI_HEADER_TYPE_CARDBUS:
return PCI_CB_CAPABILITY_LIST;
- default:
- return 0;
}
return 0;
@@ -972,7 +983,7 @@ static int pci_save_pcix_state(struct pci_dev *dev)
struct pci_cap_saved_state *save_state;
pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
- if (pos <= 0)
+ if (!pos)
return 0;
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
@@ -995,7 +1006,7 @@ static void pci_restore_pcix_state(struct pci_dev *dev)
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
- if (!save_state || pos <= 0)
+ if (!save_state || !pos)
return;
cap = (u16 *)&save_state->cap.data[0];
@@ -1092,6 +1103,9 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_pcix_state(dev);
pci_restore_msi_state(dev);
+
+ /* Restore ACS and IOV configuration state */
+ pci_enable_acs(dev);
pci_restore_iov_state(dev);
dev->state_saved = false;
@@ -2159,7 +2173,7 @@ static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
else
pos = pci_find_capability(dev, cap);
- if (pos <= 0)
+ if (!pos)
return 0;
save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 4ff0ff1c4088..24ba9dc8910a 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -4,6 +4,8 @@
#define PCI_CFG_SPACE_SIZE 256
#define PCI_CFG_SPACE_EXP_SIZE 4096
+#define PCI_FIND_CAP_TTL 48
+
extern const unsigned char pcie_link_speed[];
bool pcie_cap_has_lnkctl(const struct pci_dev *dev);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 2f0ce668a775..88122dc2e1b1 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -448,7 +448,7 @@ static int resume_iter(struct device *dev, void *data)
}
/**
- * pcie_port_device_suspend - resume port services associated with a PCIe port
+ * pcie_port_device_resume - resume port services associated with a PCIe port
* @dev: PCI Express port to handle
*/
int pcie_port_device_resume(struct device *dev)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index cefd636681b6..0b2be174d981 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -326,8 +326,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
dev->rom_base_reg = rom;
res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
- IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
- IORESOURCE_SIZEALIGN;
+ IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
__pci_read_base(dev, pci_bar_mem32, res, rom);
}
}
@@ -661,6 +660,35 @@ static void pci_set_bus_speed(struct pci_bus *bus)
}
}
+static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
+{
+ struct irq_domain *d;
+
+ /*
+ * Any firmware interface that can resolve the msi_domain
+ * should be called from here.
+ */
+ d = pci_host_bridge_of_msi_domain(bus);
+
+ return d;
+}
+
+static void pci_set_bus_msi_domain(struct pci_bus *bus)
+{
+ struct irq_domain *d;
+
+ /*
+ * Either bus is the root, and we must obtain it from the
+ * firmware, or we inherit it from the bridge device.
+ */
+ if (pci_is_root_bus(bus))
+ d = pci_host_bridge_msi_domain(bus);
+ else
+ d = dev_get_msi_domain(&bus->self->dev);
+
+ dev_set_msi_domain(&bus->dev, d);
+}
+
static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
struct pci_dev *bridge, int busnr)
{
@@ -714,6 +742,7 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
bridge->subordinate = child;
add_dev:
+ pci_set_bus_msi_domain(child);
ret = device_register(&child->dev);
WARN_ON(ret < 0);
@@ -826,6 +855,9 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
child->bridge_ctl = bctl;
}
+ /* Read and initialize bridge resources */
+ pci_read_bridge_bases(child);
+
cmax = pci_scan_child_bus(child);
if (cmax > subordinate)
dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
@@ -886,6 +918,9 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
if (!is_cardbus) {
child->bridge_ctl = bctl;
+
+ /* Read and initialize bridge resources */
+ pci_read_bridge_bases(child);
max = pci_scan_child_bus(child);
} else {
/*
@@ -997,7 +1032,12 @@ void set_pcie_port_type(struct pci_dev *pdev)
else if (type == PCI_EXP_TYPE_UPSTREAM ||
type == PCI_EXP_TYPE_DOWNSTREAM) {
parent = pci_upstream_bridge(pdev);
- if (!parent->has_secondary_link)
+
+ /*
+ * Usually there's an upstream device (Root Port or Switch
+ * Downstream Port), but we can't assume one exists.
+ */
+ if (parent && !parent->has_secondary_link)
pdev->has_secondary_link = 1;
}
}
@@ -1103,7 +1143,7 @@ int pci_cfg_space_size(struct pci_dev *dev)
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
-static void pci_msi_setup_pci_dev(struct pci_dev *dev)
+void pci_msi_setup_pci_dev(struct pci_dev *dev)
{
/*
* Disable the MSI hardware to avoid screaming interrupts
@@ -1133,7 +1173,6 @@ int pci_setup_device(struct pci_dev *dev)
{
u32 class;
u8 hdr_type;
- struct pci_slot *slot;
int pos = 0;
struct pci_bus_region region;
struct resource *res;
@@ -1149,10 +1188,7 @@ int pci_setup_device(struct pci_dev *dev)
dev->error_state = pci_channel_io_normal;
set_pcie_port_type(dev);
- list_for_each_entry(slot, &dev->bus->slots, list)
- if (PCI_SLOT(dev->devfn) == slot->number)
- dev->slot = slot;
-
+ pci_dev_assign_slot(dev);
/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
set this higher, assuming the system even supports it. */
dev->dma_mask = 0xffffffff;
@@ -1268,13 +1304,51 @@ int pci_setup_device(struct pci_dev *dev)
bad:
dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
dev->class, dev->hdr_type);
- dev->class = PCI_CLASS_NOT_DEFINED;
+ dev->class = PCI_CLASS_NOT_DEFINED << 8;
}
/* We found a fine healthy device, go go go... */
return 0;
}
+static void pci_configure_mps(struct pci_dev *dev)
+{
+ struct pci_dev *bridge = pci_upstream_bridge(dev);
+ int mps, p_mps, rc;
+
+ if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
+ return;
+
+ mps = pcie_get_mps(dev);
+ p_mps = pcie_get_mps(bridge);
+
+ if (mps == p_mps)
+ return;
+
+ if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
+ dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
+ mps, pci_name(bridge), p_mps);
+ return;
+ }
+
+ /*
+ * Fancier MPS configuration is done later by
+ * pcie_bus_configure_settings()
+ */
+ if (pcie_bus_config != PCIE_BUS_DEFAULT)
+ return;
+
+ rc = pcie_set_mps(dev, p_mps);
+ if (rc) {
+ dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
+ p_mps);
+ return;
+ }
+
+ dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
+ p_mps, mps, 128 << dev->pcie_mpss);
+}
+
static struct hpp_type0 pci_default_type0 = {
.revision = 1,
.cache_line_size = 8,
@@ -1396,6 +1470,8 @@ static void pci_configure_device(struct pci_dev *dev)
struct hotplug_params hpp;
int ret;
+ pci_configure_mps(dev);
+
memset(&hpp, 0, sizeof(hpp));
ret = pci_get_hp_params(dev, &hpp);
if (ret)
@@ -1540,10 +1616,24 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Single Root I/O Virtualization */
pci_iov_init(dev);
+ /* Address Translation Services */
+ pci_ats_init(dev);
+
/* Enable ACS P2P upstream forwarding */
pci_enable_acs(dev);
}
+static void pci_set_msi_domain(struct pci_dev *dev)
+{
+ /*
+ * If no domain has been set through the pcibios_add_device
+ * callback, inherit the default from the bus device.
+ */
+ if (!dev_get_msi_domain(&dev->dev))
+ dev_set_msi_domain(&dev->dev,
+ dev_get_msi_domain(&dev->bus->dev));
+}
+
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
{
int ret;
@@ -1585,6 +1675,9 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
ret = pcibios_add_device(dev);
WARN_ON(ret < 0);
+ /* Setup MSI irq domain */
+ pci_set_msi_domain(dev);
+
/* Notifier could use PCI capabilities */
dev->match_driver = false;
ret = device_add(&dev->dev);
@@ -1791,22 +1884,6 @@ static void pcie_write_mrrs(struct pci_dev *dev)
dev_err(&dev->dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n");
}
-static void pcie_bus_detect_mps(struct pci_dev *dev)
-{
- struct pci_dev *bridge = dev->bus->self;
- int mps, p_mps;
-
- if (!bridge)
- return;
-
- mps = pcie_get_mps(dev);
- p_mps = pcie_get_mps(bridge);
-
- if (mps != p_mps)
- dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
- mps, pci_name(bridge), p_mps);
-}
-
static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
{
int mps, orig_mps;
@@ -1814,10 +1891,9 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
if (!pci_is_pcie(dev))
return 0;
- if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
- pcie_bus_detect_mps(dev);
+ if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
+ pcie_bus_config == PCIE_BUS_DEFAULT)
return 0;
- }
mps = 128 << *(u8 *)data;
orig_mps = pcie_get_mps(dev);
@@ -1975,6 +2051,7 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
b->bridge = get_device(&bridge->dev);
device_enable_async_suspend(b->bridge);
pci_set_bus_of_node(b);
+ pci_set_bus_msi_domain(b);
if (!parent)
set_dev_node(b->bridge, pcibus_to_node(b));
@@ -2096,8 +2173,9 @@ void pci_bus_release_busn_res(struct pci_bus *b)
res, ret ? "can not be" : "is");
}
-struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
- struct pci_ops *ops, void *sysdata, struct list_head *resources)
+struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata,
+ struct list_head *resources, struct msi_controller *msi)
{
struct resource_entry *window;
bool found = false;
@@ -2114,6 +2192,8 @@ struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
if (!b)
return NULL;
+ b->msi = msi;
+
if (!found) {
dev_info(&b->dev,
"No busn resource found for root bus, will use [bus %02x-ff]\n",
@@ -2128,6 +2208,13 @@ struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
return b;
}
+
+struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata, struct list_head *resources)
+{
+ return pci_scan_root_bus_msi(parent, bus, ops, sysdata, resources,
+ NULL);
+}
EXPORT_SYMBOL(pci_scan_root_bus);
struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e9fd0e90fa3b..6a30252cd79f 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -163,7 +163,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_
* VIA Apollo KT133 needs PCI latency patch
* Made according to a windows driver based patch by George E. Breese
* see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
- * and http://www.georgebreese.com/net/software/#PCI
* Also see http://www.au-ja.org/review-kt133a-1-en.phtml for
* the info on which Mr Breese based his work.
*
@@ -424,10 +423,12 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
*/
static void quirk_amd_nl_class(struct pci_dev *pdev)
{
- /*
- * Use 'USB Device' (0x0c03fe) instead of PCI header provided
- */
- pdev->class = 0x0c03fe;
+ u32 class = pdev->class;
+
+ /* Use "USB Device (not host controller)" class */
+ pdev->class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe;
+ dev_info(&pdev->dev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
+ class, pdev->class);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
quirk_amd_nl_class);
@@ -1569,6 +1570,18 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB3
#endif
+static void quirk_jmicron_async_suspend(struct pci_dev *dev)
+{
+ if (dev->multifunction) {
+ device_disable_async_suspend(&dev->dev);
+ dev_info(&dev->dev, "async suspend disabled to avoid multi-function power-on ordering issue\n");
+ }
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_jmicron_async_suspend);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0, quirk_jmicron_async_suspend);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x2362, quirk_jmicron_async_suspend);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x236f, quirk_jmicron_async_suspend);
+
#ifdef CONFIG_X86_IO_APIC
static void quirk_alder_ioapic(struct pci_dev *pdev)
{
@@ -1894,6 +1907,15 @@ static void quirk_netmos(struct pci_dev *dev)
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
+static void quirk_f0_vpd_link(struct pci_dev *dev)
+{
+ if (!dev->multifunction || !PCI_FUNC(dev->devfn))
+ return;
+ dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
+}
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
+
static void quirk_e100_interrupt(struct pci_dev *dev)
{
u16 command, pmcsr;
@@ -1986,14 +2008,18 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
static void fixup_rev1_53c810(struct pci_dev *dev)
{
- /* rev 1 ncr53c810 chips don't set the class at all which means
+ u32 class = dev->class;
+
+ /*
+ * rev 1 ncr53c810 chips don't set the class at all which means
* they don't get their resources remapped. Fix that here.
*/
+ if (class)
+ return;
- if (dev->class == PCI_CLASS_NOT_DEFINED) {
- dev_info(&dev->dev, "NCR 53c810 rev 1 detected; setting PCI class\n");
- dev->class = PCI_CLASS_STORAGE_SCSI;
- }
+ dev->class = PCI_CLASS_STORAGE_SCSI << 8;
+ dev_info(&dev->dev, "NCR 53c810 rev 1 PCI class overridden (%#08x -> %#08x)\n",
+ class, dev->class);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
@@ -2241,7 +2267,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
* return 1 if a HT MSI capability is found and enabled */
static int msi_ht_cap_enabled(struct pci_dev *dev)
{
- int pos, ttl = 48;
+ int pos, ttl = PCI_FIND_CAP_TTL;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
while (pos && ttl--) {
@@ -2300,7 +2326,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
/* Force enable MSI mapping capability on HT bridges */
static void ht_enable_msi_mapping(struct pci_dev *dev)
{
- int pos, ttl = 48;
+ int pos, ttl = PCI_FIND_CAP_TTL;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
while (pos && ttl--) {
@@ -2379,7 +2405,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
static int ht_check_msi_mapping(struct pci_dev *dev)
{
- int pos, ttl = 48;
+ int pos, ttl = PCI_FIND_CAP_TTL;
int found = 0;
/* check if there is HT MSI cap or enabled on this device */
@@ -2504,7 +2530,7 @@ out:
static void ht_disable_msi_mapping(struct pci_dev *dev)
{
- int pos, ttl = 48;
+ int pos, ttl = PCI_FIND_CAP_TTL;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
while (pos && ttl--) {
@@ -2829,12 +2855,15 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
static void fixup_ti816x_class(struct pci_dev *dev)
{
+ u32 class = dev->class;
+
/* TI 816x devices do not have class code set when in PCIe boot mode */
- dev_info(&dev->dev, "Setting PCI class for 816x PCIe device\n");
- dev->class = PCI_CLASS_MULTIMEDIA_VIDEO;
+ dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8;
+ dev_info(&dev->dev, "PCI class overridden (%#08x -> %#08x)\n",
+ class, dev->class);
}
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
- PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class);
+ PCI_CLASS_NOT_DEFINED, 8, fixup_ti816x_class);
/* Some PCIe devices do not work reliably with the claimed maximum
* payload size supported.
@@ -2862,7 +2891,8 @@ static void quirk_intel_mc_errata(struct pci_dev *dev)
int err;
u16 rcc;
- if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
+ if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
+ pcie_bus_config == PCIE_BUS_DEFAULT)
return;
/* Intel errata specifies bits to change but does not say what they are.
@@ -3028,7 +3058,16 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
-
+/* Intel Cherrytrail devices do not need 10ms d3_delay */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay);
/*
* Some devices may pass our check in pci_intx_mask_supported if
* PCI_COMMAND_INTX_DISABLE works though they actually do not properly
@@ -3326,28 +3365,6 @@ fs_initcall_sync(pci_apply_final_quirks);
* reset a single function if other methods (e.g. FLR, PM D0->D3) are
* not available.
*/
-static int reset_intel_generic_dev(struct pci_dev *dev, int probe)
-{
- int pos;
-
- /* only implement PCI_CLASS_SERIAL_USB at present */
- if (dev->class == PCI_CLASS_SERIAL_USB) {
- pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
- if (!pos)
- return -ENOTTY;
-
- if (probe)
- return 0;
-
- pci_write_config_byte(dev, pos + 0x4, 1);
- msleep(100);
-
- return 0;
- } else {
- return -ENOTTY;
- }
-}
-
static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
{
/*
@@ -3506,8 +3523,6 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
reset_ivb_igd },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
reset_ivb_igd },
- { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
- reset_intel_generic_dev },
{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
reset_chelsio_generic_dev },
{ 0 }
@@ -3655,6 +3670,28 @@ DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
/*
+ * Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero)
+ * class code. Fix it.
+ */
+static void quirk_tw686x_class(struct pci_dev *pdev)
+{
+ u32 class = pdev->class;
+
+ /* Use "Multimedia controller" class */
+ pdev->class = (PCI_CLASS_MULTIMEDIA_OTHER << 8) | 0x01;
+ dev_info(&pdev->dev, "TW686x PCI class overridden (%#08x -> %#08x)\n",
+ class, pdev->class);
+}
+DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6864, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_tw686x_class);
+DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6865, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_tw686x_class);
+DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6868, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_tw686x_class);
+DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_tw686x_class);
+
+/*
* AMD has indicated that the devices below do not support peer-to-peer
* in any system where they are found in the southbridge with an AMD
* IOMMU in the system. Multifunction devices that do not support
@@ -3848,6 +3885,9 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_INTEL, 0x105F, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x1060, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10D9, pci_quirk_mf_endpoint_acs },
+ /* I219 */
+ { PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
/* Intel PCH root ports */
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
{ 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */
@@ -4008,3 +4048,88 @@ void pci_dev_specific_enable_acs(struct pci_dev *dev)
}
}
}
+
+/*
+ * The PCI capabilities list for Intel DH895xCC VFs (device id 0x0443) with
+ * QuickAssist Technology (QAT) is prematurely terminated in hardware. The
+ * Next Capability pointer in the MSI Capability Structure should point to
+ * the PCIe Capability Structure but is incorrectly hardwired as 0 terminating
+ * the list.
+ */
+static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
+{
+ int pos, i = 0;
+ u8 next_cap;
+ u16 reg16, *cap;
+ struct pci_cap_saved_state *state;
+
+ /* Bail if the hardware bug is fixed */
+ if (pdev->pcie_cap || pci_find_capability(pdev, PCI_CAP_ID_EXP))
+ return;
+
+ /* Bail if MSI Capability Structure is not found for some reason */
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+ if (!pos)
+ return;
+
+ /*
+ * Bail if Next Capability pointer in the MSI Capability Structure
+ * is not the expected incorrect 0x00.
+ */
+ pci_read_config_byte(pdev, pos + 1, &next_cap);
+ if (next_cap)
+ return;
+
+ /*
+ * PCIe Capability Structure is expected to be at 0x50 and should
+ * terminate the list (Next Capability pointer is 0x00). Verify
+ * Capability Id and Next Capability pointer is as expected.
+ * Open-code some of set_pcie_port_type() and pci_cfg_space_size_ext()
+ * to correctly set kernel data structures which have already been
+ * set incorrectly due to the hardware bug.
+ */
+ pos = 0x50;
+ pci_read_config_word(pdev, pos, &reg16);
+ if (reg16 == (0x0000 | PCI_CAP_ID_EXP)) {
+ u32 status;
+#ifndef PCI_EXP_SAVE_REGS
+#define PCI_EXP_SAVE_REGS 7
+#endif
+ int size = PCI_EXP_SAVE_REGS * sizeof(u16);
+
+ pdev->pcie_cap = pos;
+ pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
+ pdev->pcie_flags_reg = reg16;
+ pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
+ pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
+
+ pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
+ if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) !=
+ PCIBIOS_SUCCESSFUL || (status == 0xffffffff))
+ pdev->cfg_size = PCI_CFG_SPACE_SIZE;
+
+ if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
+ return;
+
+ /*
+ * Save PCIE cap
+ */
+ state = kzalloc(sizeof(*state) + size, GFP_KERNEL);
+ if (!state)
+ return;
+
+ state->cap.cap_nr = PCI_CAP_ID_EXP;
+ state->cap.cap_extended = 0;
+ state->cap.size = size;
+ cap = (u16 *)&state->cap.data[0];
+ pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap[i++]);
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &cap[i++]);
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &cap[i++]);
+ pcie_capability_read_word(pdev, PCI_EXP_RTCTL, &cap[i++]);
+ pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &cap[i++]);
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL2, &cap[i++]);
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL2, &cap[i++]);
+ hlist_add_head(&state->next, &pdev->saved_cap_space);
+ }
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 396c200b9ddb..429d34c348b9 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -14,6 +14,7 @@
struct kset *pci_slots_kset;
EXPORT_SYMBOL_GPL(pci_slots_kset);
+static DEFINE_MUTEX(pci_slot_mutex);
static ssize_t pci_slot_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
@@ -106,9 +107,11 @@ static void pci_slot_release(struct kobject *kobj)
dev_dbg(&slot->bus->dev, "dev %02x, released physical slot %s\n",
slot->number, pci_slot_name(slot));
+ down_read(&pci_bus_sem);
list_for_each_entry(dev, &slot->bus->devices, bus_list)
if (PCI_SLOT(dev->devfn) == slot->number)
dev->slot = NULL;
+ up_read(&pci_bus_sem);
list_del(&slot->list);
@@ -191,12 +194,22 @@ static int rename_slot(struct pci_slot *slot, const char *name)
return result;
}
+void pci_dev_assign_slot(struct pci_dev *dev)
+{
+ struct pci_slot *slot;
+
+ mutex_lock(&pci_slot_mutex);
+ list_for_each_entry(slot, &dev->bus->slots, list)
+ if (PCI_SLOT(dev->devfn) == slot->number)
+ dev->slot = slot;
+ mutex_unlock(&pci_slot_mutex);
+}
+
static struct pci_slot *get_slot(struct pci_bus *parent, int slot_nr)
{
struct pci_slot *slot;
- /*
- * We already hold pci_bus_sem so don't worry
- */
+
+ /* We already hold pci_slot_mutex */
list_for_each_entry(slot, &parent->slots, list)
if (slot->number == slot_nr) {
kobject_get(&slot->kobj);
@@ -253,7 +266,7 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
int err = 0;
char *slot_name = NULL;
- down_write(&pci_bus_sem);
+ mutex_lock(&pci_slot_mutex);
if (slot_nr == -1)
goto placeholder;
@@ -301,16 +314,18 @@ placeholder:
INIT_LIST_HEAD(&slot->list);
list_add(&slot->list, &parent->slots);
+ down_read(&pci_bus_sem);
list_for_each_entry(dev, &parent->devices, bus_list)
if (PCI_SLOT(dev->devfn) == slot_nr)
dev->slot = slot;
+ up_read(&pci_bus_sem);
dev_dbg(&parent->dev, "dev %02x, created physical slot %s\n",
slot_nr, pci_slot_name(slot));
out:
kfree(slot_name);
- up_write(&pci_bus_sem);
+ mutex_unlock(&pci_slot_mutex);
return slot;
err:
kfree(slot);
@@ -332,9 +347,9 @@ void pci_destroy_slot(struct pci_slot *slot)
dev_dbg(&slot->bus->dev, "dev %02x, dec refcount to %d\n",
slot->number, atomic_read(&slot->kobj.kref.refcount) - 1);
- down_write(&pci_bus_sem);
+ mutex_lock(&pci_slot_mutex);
kobject_put(&slot->kobj);
- up_write(&pci_bus_sem);
+ mutex_unlock(&pci_slot_mutex);
}
EXPORT_SYMBOL_GPL(pci_destroy_slot);
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 8b7a900cd28b..c777b97207d5 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -265,7 +265,7 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
}
i = 0;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
op.msix_entries[i].entry = entry->msi_attrib.entry_nr;
/* Vector is useless at this point. */
op.msix_entries[i].vector = -1;
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 984a8ff559d8..483f919e0d2e 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -296,20 +296,18 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
goto err0;
}
- clk = clk_get(&dev->dev, NULL);
+ clk = devm_clk_get(&dev->dev, NULL);
if (IS_ERR(clk))
return -ENODEV;
pxa2xx_drv_pcmcia_ops(ops);
- sinfo = kzalloc(SKT_DEV_INFO_SIZE(ops->nr), GFP_KERNEL);
- if (!sinfo) {
- clk_put(clk);
+ sinfo = devm_kzalloc(&dev->dev, SKT_DEV_INFO_SIZE(ops->nr),
+ GFP_KERNEL);
+ if (!sinfo)
return -ENOMEM;
- }
sinfo->nskt = ops->nr;
- sinfo->clk = clk;
/* Initialize processor specific parameters */
for (i = 0; i < ops->nr; i++) {
@@ -332,8 +330,7 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
err1:
while (--i >= 0)
soc_pcmcia_remove_one(&sinfo->skt[i]);
- clk_put(clk);
- kfree(sinfo);
+
err0:
return ret;
}
@@ -343,13 +340,9 @@ static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
struct skt_dev_info *sinfo = platform_get_drvdata(dev);
int i;
- platform_set_drvdata(dev, NULL);
-
for (i = 0; i < sinfo->nskt; i++)
soc_pcmcia_remove_one(&sinfo->skt[i]);
- clk_put(sinfo->clk);
- kfree(sinfo);
return 0;
}
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index 803945259da8..66acdc85727c 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -93,8 +93,6 @@ static int sa11x0_drv_pcmcia_remove(struct platform_device *dev)
for (i = 0; i < sinfo->nskt; i++)
soc_pcmcia_remove_one(&sinfo->skt[i]);
- clk_put(sinfo->clk);
- kfree(sinfo);
return 0;
}
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index 80b8e9d05275..a1531feb8460 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -135,8 +135,13 @@ int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops,
int (*add)(struct soc_pcmcia_socket *))
{
struct sa1111_pcmcia_socket *s;
+ struct clk *clk;
int i, ret = 0;
+ clk = devm_clk_get(&dev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
ops->socket_state = sa1111_pcmcia_socket_state;
for (i = 0; i < ops->nr; i++) {
@@ -145,12 +150,8 @@ int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops,
return -ENOMEM;
s->soc.nr = ops->first + i;
- s->soc.clk = clk_get(&dev->dev, NULL);
- if (IS_ERR(s->soc.clk)) {
- ret = PTR_ERR(s->soc.clk);
- kfree(s);
- return ret;
- }
+ s->soc.clk = clk;
+
soc_pcmcia_init_one(&s->soc, ops, &dev->dev);
s->dev = dev;
if (s->soc.nr) {
@@ -226,7 +227,6 @@ static int pcmcia_remove(struct sa1111_dev *dev)
for (; s; s = next) {
next = s->next;
soc_pcmcia_remove_one(&s->soc);
- clk_put(s->soc.clk);
kfree(s);
}
diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c
index cf6de2c2b329..9f6ec87b9f9e 100644
--- a/drivers/pcmcia/sa11xx_base.c
+++ b/drivers/pcmcia/sa11xx_base.c
@@ -222,18 +222,17 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
int i, ret = 0;
struct clk *clk;
- clk = clk_get(dev, NULL);
+ clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
sa11xx_drv_pcmcia_ops(ops);
- sinfo = kzalloc(SKT_DEV_INFO_SIZE(nr), GFP_KERNEL);
+ sinfo = devm_kzalloc(dev, SKT_DEV_INFO_SIZE(nr), GFP_KERNEL);
if (!sinfo)
return -ENOMEM;
sinfo->nskt = nr;
- sinfo->clk = clk;
/* Initialize processor specific parameters */
for (i = 0; i < nr; i++) {
@@ -251,8 +250,6 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
if (ret) {
while (--i >= 0)
soc_pcmcia_remove_one(&sinfo->skt[i]);
- clk_put(clk);
- kfree(sinfo);
} else {
dev_set_drvdata(dev, sinfo);
}
@@ -261,16 +258,6 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
}
EXPORT_SYMBOL(sa11xx_drv_pcmcia_probe);
-static int __init sa11xx_pcmcia_init(void)
-{
- return 0;
-}
-fs_initcall(sa11xx_pcmcia_init);
-
-static void __exit sa11xx_pcmcia_exit(void) {}
-
-module_exit(sa11xx_pcmcia_exit);
-
MODULE_AUTHOR("John Dorsey <john+@cs.cmu.edu>");
MODULE_DESCRIPTION("Linux PCMCIA Card Services: SA-11xx core socket driver");
MODULE_LICENSE("Dual MPL/GPL");
diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h
index e6fcbea5b682..94762a54d731 100644
--- a/drivers/pcmcia/soc_common.h
+++ b/drivers/pcmcia/soc_common.h
@@ -68,7 +68,6 @@ struct soc_pcmcia_socket {
struct skt_dev_info {
int nskt;
- struct clk *clk;
struct soc_pcmcia_socket skt[0];
};
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
new file mode 100644
index 000000000000..d9de36ee165d
--- /dev/null
+++ b/drivers/perf/Kconfig
@@ -0,0 +1,15 @@
+#
+# Performance Monitor Drivers
+#
+
+menu "Performance monitor support"
+
+config ARM_PMU
+ depends on PERF_EVENTS && ARM
+ bool "ARM PMU framework"
+ default y
+ help
+ Say y if you want to use CPU performance monitors on ARM-based
+ systems.
+
+endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
new file mode 100644
index 000000000000..acd2397ded94
--- /dev/null
+++ b/drivers/perf/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_ARM_PMU) += arm_pmu.o
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
new file mode 100644
index 000000000000..2365a32a595e
--- /dev/null
+++ b/drivers/perf/arm_pmu.c
@@ -0,0 +1,921 @@
+#undef DEBUG
+
+/*
+ * ARM performance counter support.
+ *
+ * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
+ * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
+ *
+ * This code is based on the sparc64 perf event code, which is in turn based
+ * on the x86 code.
+ */
+#define pr_fmt(fmt) "hw perfevents: " fmt
+
+#include <linux/bitmap.h>
+#include <linux/cpumask.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/perf/arm_pmu.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+
+#include <asm/cputype.h>
+#include <asm/irq_regs.h>
+
+static int
+armpmu_map_cache_event(const unsigned (*cache_map)
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX],
+ u64 config)
+{
+ unsigned int cache_type, cache_op, cache_result, ret;
+
+ cache_type = (config >> 0) & 0xff;
+ if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+ return -EINVAL;
+
+ cache_op = (config >> 8) & 0xff;
+ if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+ return -EINVAL;
+
+ cache_result = (config >> 16) & 0xff;
+ if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return -EINVAL;
+
+ ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
+
+ if (ret == CACHE_OP_UNSUPPORTED)
+ return -ENOENT;
+
+ return ret;
+}
+
+static int
+armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
+{
+ int mapping;
+
+ if (config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+
+ mapping = (*event_map)[config];
+ return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
+}
+
+static int
+armpmu_map_raw_event(u32 raw_event_mask, u64 config)
+{
+ return (int)(config & raw_event_mask);
+}
+
+int
+armpmu_map_event(struct perf_event *event,
+ const unsigned (*event_map)[PERF_COUNT_HW_MAX],
+ const unsigned (*cache_map)
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX],
+ u32 raw_event_mask)
+{
+ u64 config = event->attr.config;
+ int type = event->attr.type;
+
+ if (type == event->pmu->type)
+ return armpmu_map_raw_event(raw_event_mask, config);
+
+ switch (type) {
+ case PERF_TYPE_HARDWARE:
+ return armpmu_map_hw_event(event_map, config);
+ case PERF_TYPE_HW_CACHE:
+ return armpmu_map_cache_event(cache_map, config);
+ case PERF_TYPE_RAW:
+ return armpmu_map_raw_event(raw_event_mask, config);
+ }
+
+ return -ENOENT;
+}
+
+int armpmu_event_set_period(struct perf_event *event)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ s64 left = local64_read(&hwc->period_left);
+ s64 period = hwc->sample_period;
+ int ret = 0;
+
+ if (unlikely(left <= -period)) {
+ left = period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (unlikely(left <= 0)) {
+ left += period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ /*
+ * Limit the maximum period to prevent the counter value
+ * from overtaking the one we are about to program. In
+ * effect we are reducing max_period to account for
+ * interrupt latency (and we are being very conservative).
+ */
+ if (left > (armpmu->max_period >> 1))
+ left = armpmu->max_period >> 1;
+
+ local64_set(&hwc->prev_count, (u64)-left);
+
+ armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
+
+ perf_event_update_userpage(event);
+
+ return ret;
+}
+
+u64 armpmu_event_update(struct perf_event *event)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 delta, prev_raw_count, new_raw_count;
+
+again:
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = armpmu->read_counter(event);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
+
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
+
+ return new_raw_count;
+}
+
+static void
+armpmu_read(struct perf_event *event)
+{
+ armpmu_event_update(event);
+}
+
+static void
+armpmu_stop(struct perf_event *event, int flags)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ /*
+ * ARM pmu always has to update the counter, so ignore
+ * PERF_EF_UPDATE, see comments in armpmu_start().
+ */
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ armpmu->disable(event);
+ armpmu_event_update(event);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ }
+}
+
+static void armpmu_start(struct perf_event *event, int flags)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ /*
+ * ARM pmu always has to reprogram the period, so ignore
+ * PERF_EF_RELOAD, see the comment below.
+ */
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+ hwc->state = 0;
+ /*
+ * Set the period again. Some counters can't be stopped, so when we
+ * were stopped we simply disabled the IRQ source and the counter
+ * may have been left counting. If we don't do this step then we may
+ * get an interrupt too soon or *way* too late if the overflow has
+ * happened since disabling.
+ */
+ armpmu_event_set_period(event);
+ armpmu->enable(event);
+}
+
+static void
+armpmu_del(struct perf_event *event, int flags)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ armpmu_stop(event, PERF_EF_UPDATE);
+ hw_events->events[idx] = NULL;
+ clear_bit(idx, hw_events->used_mask);
+ if (armpmu->clear_event_idx)
+ armpmu->clear_event_idx(hw_events, event);
+
+ perf_event_update_userpage(event);
+}
+
+static int
+armpmu_add(struct perf_event *event, int flags)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+ int err = 0;
+
+ /* An event following a process won't be stopped earlier */
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+ return -ENOENT;
+
+ perf_pmu_disable(event->pmu);
+
+ /* If we don't have a space for the counter then finish early. */
+ idx = armpmu->get_event_idx(hw_events, event);
+ if (idx < 0) {
+ err = idx;
+ goto out;
+ }
+
+ /*
+ * If there is an event in the counter we are going to use then make
+ * sure it is disabled.
+ */
+ event->hw.idx = idx;
+ armpmu->disable(event);
+ hw_events->events[idx] = event;
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ if (flags & PERF_EF_START)
+ armpmu_start(event, PERF_EF_RELOAD);
+
+ /* Propagate our changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+out:
+ perf_pmu_enable(event->pmu);
+ return err;
+}
+
+static int
+validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
+ struct perf_event *event)
+{
+ struct arm_pmu *armpmu;
+
+ if (is_software_event(event))
+ return 1;
+
+ /*
+ * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
+ * core perf code won't check that the pmu->ctx == leader->ctx
+ * until after pmu->event_init(event).
+ */
+ if (event->pmu != pmu)
+ return 0;
+
+ if (event->state < PERF_EVENT_STATE_OFF)
+ return 1;
+
+ if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
+ return 1;
+
+ armpmu = to_arm_pmu(event->pmu);
+ return armpmu->get_event_idx(hw_events, event) >= 0;
+}
+
+static int
+validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct pmu_hw_events fake_pmu;
+
+ /*
+ * Initialise the fake PMU. We only need to populate the
+ * used_mask for the purposes of validation.
+ */
+ memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
+
+ if (!validate_event(event->pmu, &fake_pmu, leader))
+ return -EINVAL;
+
+ list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+ if (!validate_event(event->pmu, &fake_pmu, sibling))
+ return -EINVAL;
+ }
+
+ if (!validate_event(event->pmu, &fake_pmu, event))
+ return -EINVAL;
+
+ return 0;
+}
+
+static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
+{
+ struct arm_pmu *armpmu;
+ struct platform_device *plat_device;
+ struct arm_pmu_platdata *plat;
+ int ret;
+ u64 start_clock, finish_clock;
+
+ /*
+ * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
+ * the handlers expect a struct arm_pmu*. The percpu_irq framework will
+ * do any necessary shifting, we just need to perform the first
+ * dereference.
+ */
+ armpmu = *(void **)dev;
+ plat_device = armpmu->plat_device;
+ plat = dev_get_platdata(&plat_device->dev);
+
+ start_clock = sched_clock();
+ if (plat && plat->handle_irq)
+ ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
+ else
+ ret = armpmu->handle_irq(irq, armpmu);
+ finish_clock = sched_clock();
+
+ perf_sample_event_took(finish_clock - start_clock);
+ return ret;
+}
+
+static void
+armpmu_release_hardware(struct arm_pmu *armpmu)
+{
+ armpmu->free_irq(armpmu);
+}
+
+static int
+armpmu_reserve_hardware(struct arm_pmu *armpmu)
+{
+ int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
+ if (err) {
+ armpmu_release_hardware(armpmu);
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+hw_perf_event_destroy(struct perf_event *event)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ atomic_t *active_events = &armpmu->active_events;
+ struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
+
+ if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
+ armpmu_release_hardware(armpmu);
+ mutex_unlock(pmu_reserve_mutex);
+ }
+}
+
+static int
+event_requires_mode_exclusion(struct perf_event_attr *attr)
+{
+ return attr->exclude_idle || attr->exclude_user ||
+ attr->exclude_kernel || attr->exclude_hv;
+}
+
+static int
+__hw_perf_event_init(struct perf_event *event)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int mapping;
+
+ mapping = armpmu->map_event(event);
+
+ if (mapping < 0) {
+ pr_debug("event %x:%llx not supported\n", event->attr.type,
+ event->attr.config);
+ return mapping;
+ }
+
+ /*
+ * We don't assign an index until we actually place the event onto
+ * hardware. Use -1 to signify that we haven't decided where to put it
+ * yet. For SMP systems, each core has it's own PMU so we can't do any
+ * clever allocation or constraints checking at this point.
+ */
+ hwc->idx = -1;
+ hwc->config_base = 0;
+ hwc->config = 0;
+ hwc->event_base = 0;
+
+ /*
+ * Check whether we need to exclude the counter from certain modes.
+ */
+ if ((!armpmu->set_event_filter ||
+ armpmu->set_event_filter(hwc, &event->attr)) &&
+ event_requires_mode_exclusion(&event->attr)) {
+ pr_debug("ARM performance counters do not support "
+ "mode exclusion\n");
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * Store the event encoding into the config_base field.
+ */
+ hwc->config_base |= (unsigned long)mapping;
+
+ if (!is_sampling_event(event)) {
+ /*
+ * For non-sampling runs, limit the sample_period to half
+ * of the counter width. That way, the new counter value
+ * is far less likely to overtake the previous one unless
+ * you have some serious IRQ latency issues.
+ */
+ hwc->sample_period = armpmu->max_period >> 1;
+ hwc->last_period = hwc->sample_period;
+ local64_set(&hwc->period_left, hwc->sample_period);
+ }
+
+ if (event->group_leader != event) {
+ if (validate_group(event) != 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int armpmu_event_init(struct perf_event *event)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ int err = 0;
+ atomic_t *active_events = &armpmu->active_events;
+
+ /*
+ * Reject CPU-affine events for CPUs that are of a different class to
+ * that which this PMU handles. Process-following events (where
+ * event->cpu == -1) can be migrated between CPUs, and thus we have to
+ * reject them later (in armpmu_add) if they're scheduled on a
+ * different class of CPU.
+ */
+ if (event->cpu != -1 &&
+ !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
+ return -ENOENT;
+
+ /* does not support taken branch sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
+ if (armpmu->map_event(event) == -ENOENT)
+ return -ENOENT;
+
+ event->destroy = hw_perf_event_destroy;
+
+ if (!atomic_inc_not_zero(active_events)) {
+ mutex_lock(&armpmu->reserve_mutex);
+ if (atomic_read(active_events) == 0)
+ err = armpmu_reserve_hardware(armpmu);
+
+ if (!err)
+ atomic_inc(active_events);
+ mutex_unlock(&armpmu->reserve_mutex);
+ }
+
+ if (err)
+ return err;
+
+ err = __hw_perf_event_init(event);
+ if (err)
+ hw_perf_event_destroy(event);
+
+ return err;
+}
+
+static void armpmu_enable(struct pmu *pmu)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(pmu);
+ struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
+ int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
+
+ /* For task-bound events we may be called on other CPUs */
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+ return;
+
+ if (enabled)
+ armpmu->start(armpmu);
+}
+
+static void armpmu_disable(struct pmu *pmu)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(pmu);
+
+ /* For task-bound events we may be called on other CPUs */
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+ return;
+
+ armpmu->stop(armpmu);
+}
+
+/*
+ * In heterogeneous systems, events are specific to a particular
+ * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
+ * the same microarchitecture.
+ */
+static int armpmu_filter_match(struct perf_event *event)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ unsigned int cpu = smp_processor_id();
+ return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
+}
+
+static void armpmu_init(struct arm_pmu *armpmu)
+{
+ atomic_set(&armpmu->active_events, 0);
+ mutex_init(&armpmu->reserve_mutex);
+
+ armpmu->pmu = (struct pmu) {
+ .pmu_enable = armpmu_enable,
+ .pmu_disable = armpmu_disable,
+ .event_init = armpmu_event_init,
+ .add = armpmu_add,
+ .del = armpmu_del,
+ .start = armpmu_start,
+ .stop = armpmu_stop,
+ .read = armpmu_read,
+ .filter_match = armpmu_filter_match,
+ };
+}
+
+int armpmu_register(struct arm_pmu *armpmu, int type)
+{
+ armpmu_init(armpmu);
+ pr_info("enabled with %s PMU driver, %d counters available\n",
+ armpmu->name, armpmu->num_events);
+ return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
+}
+
+/* Set at runtime when we know what CPU type we are. */
+static struct arm_pmu *__oprofile_cpu_pmu;
+
+/*
+ * Despite the names, these two functions are CPU-specific and are used
+ * by the OProfile/perf code.
+ */
+const char *perf_pmu_name(void)
+{
+ if (!__oprofile_cpu_pmu)
+ return NULL;
+
+ return __oprofile_cpu_pmu->name;
+}
+EXPORT_SYMBOL_GPL(perf_pmu_name);
+
+int perf_num_counters(void)
+{
+ int max_events = 0;
+
+ if (__oprofile_cpu_pmu != NULL)
+ max_events = __oprofile_cpu_pmu->num_events;
+
+ return max_events;
+}
+EXPORT_SYMBOL_GPL(perf_num_counters);
+
+static void cpu_pmu_enable_percpu_irq(void *data)
+{
+ int irq = *(int *)data;
+
+ enable_percpu_irq(irq, IRQ_TYPE_NONE);
+}
+
+static void cpu_pmu_disable_percpu_irq(void *data)
+{
+ int irq = *(int *)data;
+
+ disable_percpu_irq(irq);
+}
+
+static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
+{
+ int i, irq, irqs;
+ struct platform_device *pmu_device = cpu_pmu->plat_device;
+ struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
+
+ irqs = min(pmu_device->num_resources, num_possible_cpus());
+
+ irq = platform_get_irq(pmu_device, 0);
+ if (irq >= 0 && irq_is_percpu(irq)) {
+ on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
+ free_percpu_irq(irq, &hw_events->percpu_pmu);
+ } else {
+ for (i = 0; i < irqs; ++i) {
+ int cpu = i;
+
+ if (cpu_pmu->irq_affinity)
+ cpu = cpu_pmu->irq_affinity[i];
+
+ if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
+ continue;
+ irq = platform_get_irq(pmu_device, i);
+ if (irq >= 0)
+ free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
+ }
+ }
+}
+
+static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
+{
+ int i, err, irq, irqs;
+ struct platform_device *pmu_device = cpu_pmu->plat_device;
+ struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
+
+ if (!pmu_device)
+ return -ENODEV;
+
+ irqs = min(pmu_device->num_resources, num_possible_cpus());
+ if (irqs < 1) {
+ pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
+ return 0;
+ }
+
+ irq = platform_get_irq(pmu_device, 0);
+ if (irq >= 0 && irq_is_percpu(irq)) {
+ err = request_percpu_irq(irq, handler, "arm-pmu",
+ &hw_events->percpu_pmu);
+ if (err) {
+ pr_err("unable to request IRQ%d for ARM PMU counters\n",
+ irq);
+ return err;
+ }
+ on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
+ } else {
+ for (i = 0; i < irqs; ++i) {
+ int cpu = i;
+
+ err = 0;
+ irq = platform_get_irq(pmu_device, i);
+ if (irq < 0)
+ continue;
+
+ if (cpu_pmu->irq_affinity)
+ cpu = cpu_pmu->irq_affinity[i];
+
+ /*
+ * If we have a single PMU interrupt that we can't shift,
+ * assume that we're running on a uniprocessor machine and
+ * continue. Otherwise, continue without this interrupt.
+ */
+ if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
+ pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
+ irq, cpu);
+ continue;
+ }
+
+ err = request_irq(irq, handler,
+ IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
+ per_cpu_ptr(&hw_events->percpu_pmu, cpu));
+ if (err) {
+ pr_err("unable to request IRQ%d for ARM PMU counters\n",
+ irq);
+ return err;
+ }
+
+ cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * PMU hardware loses all context when a CPU goes offline.
+ * When a CPU is hotplugged back in, since some hardware registers are
+ * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
+ * junk values out of them.
+ */
+static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
+ void *hcpu)
+{
+ int cpu = (unsigned long)hcpu;
+ struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
+
+ if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
+ return NOTIFY_DONE;
+
+ if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
+ return NOTIFY_DONE;
+
+ if (pmu->reset)
+ pmu->reset(pmu);
+ else
+ return NOTIFY_DONE;
+
+ return NOTIFY_OK;
+}
+
+static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ int err;
+ int cpu;
+ struct pmu_hw_events __percpu *cpu_hw_events;
+
+ cpu_hw_events = alloc_percpu(struct pmu_hw_events);
+ if (!cpu_hw_events)
+ return -ENOMEM;
+
+ cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
+ err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
+ if (err)
+ goto out_hw_events;
+
+ for_each_possible_cpu(cpu) {
+ struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
+ raw_spin_lock_init(&events->pmu_lock);
+ events->percpu_pmu = cpu_pmu;
+ }
+
+ cpu_pmu->hw_events = cpu_hw_events;
+ cpu_pmu->request_irq = cpu_pmu_request_irq;
+ cpu_pmu->free_irq = cpu_pmu_free_irq;
+
+ /* Ensure the PMU has sane values out of reset. */
+ if (cpu_pmu->reset)
+ on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
+ cpu_pmu, 1);
+
+ /* If no interrupts available, set the corresponding capability flag */
+ if (!platform_get_irq(cpu_pmu->plat_device, 0))
+ cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
+ return 0;
+
+out_hw_events:
+ free_percpu(cpu_hw_events);
+ return err;
+}
+
+static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
+{
+ unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
+ free_percpu(cpu_pmu->hw_events);
+}
+
+/*
+ * CPU PMU identification and probing.
+ */
+static int probe_current_pmu(struct arm_pmu *pmu,
+ const struct pmu_probe_info *info)
+{
+ int cpu = get_cpu();
+ unsigned int cpuid = read_cpuid_id();
+ int ret = -ENODEV;
+
+ pr_info("probing PMU on CPU %d\n", cpu);
+
+ for (; info->init != NULL; info++) {
+ if ((cpuid & info->mask) != info->cpuid)
+ continue;
+ ret = info->init(pmu);
+ break;
+ }
+
+ put_cpu();
+ return ret;
+}
+
+static int of_pmu_irq_cfg(struct arm_pmu *pmu)
+{
+ int *irqs, i = 0;
+ bool using_spi = false;
+ struct platform_device *pdev = pmu->plat_device;
+
+ irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
+
+ do {
+ struct device_node *dn;
+ int cpu, irq;
+
+ /* See if we have an affinity entry */
+ dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i);
+ if (!dn)
+ break;
+
+ /* Check the IRQ type and prohibit a mix of PPIs and SPIs */
+ irq = platform_get_irq(pdev, i);
+ if (irq >= 0) {
+ bool spi = !irq_is_percpu(irq);
+
+ if (i > 0 && spi != using_spi) {
+ pr_err("PPI/SPI IRQ type mismatch for %s!\n",
+ dn->name);
+ kfree(irqs);
+ return -EINVAL;
+ }
+
+ using_spi = spi;
+ }
+
+ /* Now look up the logical CPU number */
+ for_each_possible_cpu(cpu)
+ if (dn == of_cpu_device_node_get(cpu))
+ break;
+
+ if (cpu >= nr_cpu_ids) {
+ pr_warn("Failed to find logical CPU for %s\n",
+ dn->name);
+ of_node_put(dn);
+ cpumask_setall(&pmu->supported_cpus);
+ break;
+ }
+ of_node_put(dn);
+
+ /* For SPIs, we need to track the affinity per IRQ */
+ if (using_spi) {
+ if (i >= pdev->num_resources) {
+ of_node_put(dn);
+ break;
+ }
+
+ irqs[i] = cpu;
+ }
+
+ /* Keep track of the CPUs containing this PMU type */
+ cpumask_set_cpu(cpu, &pmu->supported_cpus);
+ of_node_put(dn);
+ i++;
+ } while (1);
+
+ /* If we didn't manage to parse anything, claim to support all CPUs */
+ if (cpumask_weight(&pmu->supported_cpus) == 0)
+ cpumask_setall(&pmu->supported_cpus);
+
+ /* If we matched up the IRQ affinities, use them to route the SPIs */
+ if (using_spi && i == pdev->num_resources)
+ pmu->irq_affinity = irqs;
+ else
+ kfree(irqs);
+
+ return 0;
+}
+
+int arm_pmu_device_probe(struct platform_device *pdev,
+ const struct of_device_id *of_table,
+ const struct pmu_probe_info *probe_table)
+{
+ const struct of_device_id *of_id;
+ const int (*init_fn)(struct arm_pmu *);
+ struct device_node *node = pdev->dev.of_node;
+ struct arm_pmu *pmu;
+ int ret = -ENODEV;
+
+ pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
+ if (!pmu) {
+ pr_info("failed to allocate PMU device!\n");
+ return -ENOMEM;
+ }
+
+ if (!__oprofile_cpu_pmu)
+ __oprofile_cpu_pmu = pmu;
+
+ pmu->plat_device = pdev;
+
+ if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
+ init_fn = of_id->data;
+
+ ret = of_pmu_irq_cfg(pmu);
+ if (!ret)
+ ret = init_fn(pmu);
+ } else {
+ ret = probe_current_pmu(pmu, probe_table);
+ cpumask_setall(&pmu->supported_cpus);
+ }
+
+ if (ret) {
+ pr_info("failed to probe PMU!\n");
+ goto out_free;
+ }
+
+ ret = cpu_pmu_init(pmu);
+ if (ret)
+ goto out_free;
+
+ ret = armpmu_register(pmu, -1);
+ if (ret)
+ goto out_destroy;
+
+ return 0;
+
+out_destroy:
+ cpu_pmu_destroy(pmu);
+out_free:
+ pr_info("failed to register PMU devices!\n");
+ kfree(pmu);
+ return ret;
+}
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index c0e6ede3e27d..47da573d0bab 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -54,8 +54,20 @@ config PHY_EXYNOS_MIPI_VIDEO
Support for MIPI CSI-2 and MIPI DSI DPHY found on Samsung S5P
and EXYNOS SoCs.
+config PHY_LPC18XX_USB_OTG
+ tristate "NXP LPC18xx/43xx SoC USB OTG PHY driver"
+ depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
+ depends on MFD_SYSCON
+ select GENERIC_PHY
+ help
+ Enable this to support NXP LPC18xx/43xx internal USB OTG PHY.
+
+ This driver is need for USB0 support on LPC18xx/43xx and takes
+ care of enabling and clock setup.
+
config PHY_PXA_28NM_HSIC
tristate "Marvell USB HSIC 28nm PHY Driver"
+ depends on HAS_IOMEM
select GENERIC_PHY
help
Enable this to support Marvell USB HSIC PHY driver for Marvell
@@ -66,6 +78,7 @@ config PHY_PXA_28NM_HSIC
config PHY_PXA_28NM_USB2
tristate "Marvell USB 2.0 28nm PHY Driver"
+ depends on HAS_IOMEM
select GENERIC_PHY
help
Enable this to support Marvell USB 2.0 PHY driver for Marvell
@@ -197,6 +210,8 @@ config PHY_SUN4I_USB
tristate "Allwinner sunxi SoC USB PHY driver"
depends on ARCH_SUNXI && HAS_IOMEM && OF
depends on RESET_CONTROLLER
+ depends on EXTCON
+ depends on POWER_SUPPLY
select GENERIC_PHY
help
Enable this to support the transceiver that is part of Allwinner
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index f344e1b2e825..a5b18c18fc12 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_ARMADA375_USBCLUSTER_PHY) += phy-armada375-usb2.o
obj-$(CONFIG_BCM_KONA_USB2_PHY) += phy-bcm-kona-usb2.o
obj-$(CONFIG_PHY_EXYNOS_DP_VIDEO) += phy-exynos-dp-video.o
obj-$(CONFIG_PHY_EXYNOS_MIPI_VIDEO) += phy-exynos-mipi-video.o
+obj-$(CONFIG_PHY_LPC18XX_USB_OTG) += phy-lpc18xx-usb-otg.o
obj-$(CONFIG_PHY_PXA_28NM_USB2) += phy-pxa-28nm-usb2.o
obj-$(CONFIG_PHY_PXA_28NM_HSIC) += phy-pxa-28nm-hsic.o
obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o
diff --git a/drivers/phy/phy-armada375-usb2.c b/drivers/phy/phy-armada375-usb2.c
index 8ccc3952c13d..1a3db288c0a9 100644
--- a/drivers/phy/phy-armada375-usb2.c
+++ b/drivers/phy/phy-armada375-usb2.c
@@ -51,7 +51,7 @@ static int armada375_usb_phy_init(struct phy *phy)
return 0;
}
-static struct phy_ops armada375_usb_phy_ops = {
+static const struct phy_ops armada375_usb_phy_ops = {
.init = armada375_usb_phy_init,
.owner = THIS_MODULE,
};
@@ -149,7 +149,6 @@ static struct platform_driver armada375_usb_phy_driver = {
.driver = {
.of_match_table = of_usb_cluster_table,
.name = "armada-375-usb-cluster",
- .owner = THIS_MODULE,
}
};
module_platform_driver(armada375_usb_phy_driver);
diff --git a/drivers/phy/phy-bcm-kona-usb2.c b/drivers/phy/phy-bcm-kona-usb2.c
index ef2dc1aab2b9..7b67fe49e30b 100644
--- a/drivers/phy/phy-bcm-kona-usb2.c
+++ b/drivers/phy/phy-bcm-kona-usb2.c
@@ -91,7 +91,7 @@ static int bcm_kona_usb_phy_power_off(struct phy *gphy)
return 0;
}
-static struct phy_ops ops = {
+static const struct phy_ops ops = {
.init = bcm_kona_usb_phy_init,
.power_on = bcm_kona_usb_phy_power_on,
.power_off = bcm_kona_usb_phy_power_off,
diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c
index 6f3e06d687de..0062027afb1e 100644
--- a/drivers/phy/phy-berlin-sata.c
+++ b/drivers/phy/phy-berlin-sata.c
@@ -176,7 +176,7 @@ static struct phy *phy_berlin_sata_phy_xlate(struct device *dev,
return priv->phys[i]->phy;
}
-static struct phy_ops phy_berlin_sata_ops = {
+static const struct phy_ops phy_berlin_sata_ops = {
.power_on = phy_berlin_sata_power_on,
.power_off = phy_berlin_sata_power_off,
.owner = THIS_MODULE,
diff --git a/drivers/phy/phy-berlin-usb.c b/drivers/phy/phy-berlin-usb.c
index c6fc95b53083..797ba17c404f 100644
--- a/drivers/phy/phy-berlin-usb.c
+++ b/drivers/phy/phy-berlin-usb.c
@@ -105,9 +105,9 @@
static const u32 phy_berlin_pll_dividers[] = {
/* Berlin 2 */
- CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
- /* Berlin 2CD */
CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55),
+ /* Berlin 2CD/Q */
+ CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
};
struct phy_berlin_usb_priv {
@@ -147,12 +147,12 @@ static int phy_berlin_usb_power_on(struct phy *phy)
return 0;
}
-static struct phy_ops phy_berlin_usb_ops = {
+static const struct phy_ops phy_berlin_usb_ops = {
.power_on = phy_berlin_usb_power_on,
.owner = THIS_MODULE,
};
-static const struct of_device_id phy_berlin_sata_of_match[] = {
+static const struct of_device_id phy_berlin_usb_of_match[] = {
{
.compatible = "marvell,berlin2-usb-phy",
.data = &phy_berlin_pll_dividers[0],
@@ -163,12 +163,12 @@ static const struct of_device_id phy_berlin_sata_of_match[] = {
},
{ },
};
-MODULE_DEVICE_TABLE(of, phy_berlin_sata_of_match);
+MODULE_DEVICE_TABLE(of, phy_berlin_usb_of_match);
static int phy_berlin_usb_probe(struct platform_device *pdev)
{
const struct of_device_id *match =
- of_match_device(phy_berlin_sata_of_match, &pdev->dev);
+ of_match_device(phy_berlin_usb_of_match, &pdev->dev);
struct phy_berlin_usb_priv *priv;
struct resource *res;
struct phy *phy;
@@ -207,9 +207,8 @@ static struct platform_driver phy_berlin_usb_driver = {
.probe = phy_berlin_usb_probe,
.driver = {
.name = "phy-berlin-usb",
- .owner = THIS_MODULE,
- .of_match_table = phy_berlin_sata_of_match,
- },
+ .of_match_table = phy_berlin_usb_of_match,
+ },
};
module_platform_driver(phy_berlin_usb_driver);
diff --git a/drivers/phy/phy-brcmstb-sata.c b/drivers/phy/phy-brcmstb-sata.c
index b7e303d28caf..8a2cb16a1937 100644
--- a/drivers/phy/phy-brcmstb-sata.c
+++ b/drivers/phy/phy-brcmstb-sata.c
@@ -122,7 +122,7 @@ static int brcm_sata_phy_init(struct phy *phy)
return 0;
}
-static struct phy_ops phy_ops_28nm = {
+static const struct phy_ops phy_ops_28nm = {
.init = brcm_sata_phy_init,
.owner = THIS_MODULE,
};
diff --git a/drivers/phy/phy-dm816x-usb.c b/drivers/phy/phy-dm816x-usb.c
index 7b42555ddd51..b4bbef664d20 100644
--- a/drivers/phy/phy-dm816x-usb.c
+++ b/drivers/phy/phy-dm816x-usb.c
@@ -113,7 +113,7 @@ static int dm816x_usb_phy_init(struct phy *x)
return 0;
}
-static struct phy_ops ops = {
+static const struct phy_ops ops = {
.init = dm816x_usb_phy_init,
.owner = THIS_MODULE,
};
diff --git a/drivers/phy/phy-exynos-dp-video.c b/drivers/phy/phy-exynos-dp-video.c
index 179cbf9451aa..34b06154e5d9 100644
--- a/drivers/phy/phy-exynos-dp-video.c
+++ b/drivers/phy/phy-exynos-dp-video.c
@@ -48,7 +48,7 @@ static int exynos_dp_video_phy_power_off(struct phy *phy)
EXYNOS5_PHY_ENABLE, 0);
}
-static struct phy_ops exynos_dp_video_phy_ops = {
+static const struct phy_ops exynos_dp_video_phy_ops = {
.power_on = exynos_dp_video_phy_power_on,
.power_off = exynos_dp_video_phy_power_off,
.owner = THIS_MODULE,
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
index df7519a39ba0..2a54caba93b4 100644
--- a/drivers/phy/phy-exynos-mipi-video.c
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -124,7 +124,7 @@ static struct phy *exynos_mipi_video_phy_xlate(struct device *dev,
return state->phys[args->args[0]].phy;
}
-static struct phy_ops exynos_mipi_video_phy_ops = {
+static const struct phy_ops exynos_mipi_video_phy_ops = {
.power_on = exynos_mipi_video_phy_power_on,
.power_off = exynos_mipi_video_phy_power_off,
.owner = THIS_MODULE,
diff --git a/drivers/phy/phy-exynos5-usbdrd.c b/drivers/phy/phy-exynos5-usbdrd.c
index d72ef15b0d68..20696f53303f 100644
--- a/drivers/phy/phy-exynos5-usbdrd.c
+++ b/drivers/phy/phy-exynos5-usbdrd.c
@@ -537,7 +537,7 @@ static struct phy *exynos5_usbdrd_phy_xlate(struct device *dev,
return phy_drd->phys[args->args[0]].phy;
}
-static struct phy_ops exynos5_usbdrd_phy_ops = {
+static const struct phy_ops exynos5_usbdrd_phy_ops = {
.init = exynos5_usbdrd_phy_init,
.exit = exynos5_usbdrd_phy_exit,
.power_on = exynos5_usbdrd_phy_power_on,
diff --git a/drivers/phy/phy-exynos5250-sata.c b/drivers/phy/phy-exynos5250-sata.c
index bc858cc800a1..60e13afcd9b8 100644
--- a/drivers/phy/phy-exynos5250-sata.c
+++ b/drivers/phy/phy-exynos5250-sata.c
@@ -154,7 +154,7 @@ static int exynos_sata_phy_init(struct phy *phy)
return ret;
}
-static struct phy_ops exynos_sata_phy_ops = {
+static const struct phy_ops exynos_sata_phy_ops = {
.init = exynos_sata_phy_init,
.power_on = exynos_sata_phy_power_on,
.power_off = exynos_sata_phy_power_off,
diff --git a/drivers/phy/phy-hix5hd2-sata.c b/drivers/phy/phy-hix5hd2-sata.c
index d6b22659cac1..e5ab3aa78b9d 100644
--- a/drivers/phy/phy-hix5hd2-sata.c
+++ b/drivers/phy/phy-hix5hd2-sata.c
@@ -129,7 +129,7 @@ static int hix5hd2_sata_phy_init(struct phy *phy)
return 0;
}
-static struct phy_ops hix5hd2_sata_phy_ops = {
+static const struct phy_ops hix5hd2_sata_phy_ops = {
.init = hix5hd2_sata_phy_init,
.owner = THIS_MODULE,
};
diff --git a/drivers/phy/phy-lpc18xx-usb-otg.c b/drivers/phy/phy-lpc18xx-usb-otg.c
new file mode 100644
index 000000000000..3b7a71eb5b7e
--- /dev/null
+++ b/drivers/phy/phy-lpc18xx-usb-otg.c
@@ -0,0 +1,143 @@
+/*
+ * PHY driver for NXP LPC18xx/43xx internal USB OTG PHY
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* USB OTG PHY register offset and bit in CREG */
+#define LPC18XX_CREG_CREG0 0x004
+#define LPC18XX_CREG_CREG0_USB0PHY BIT(5)
+
+struct lpc18xx_usb_otg_phy {
+ struct phy *phy;
+ struct clk *clk;
+ struct regmap *reg;
+};
+
+static int lpc18xx_usb_otg_phy_init(struct phy *phy)
+{
+ struct lpc18xx_usb_otg_phy *lpc = phy_get_drvdata(phy);
+ int ret;
+
+ /* The PHY must be clocked at 480 MHz */
+ ret = clk_set_rate(lpc->clk, 480000000);
+ if (ret)
+ return ret;
+
+ return clk_prepare(lpc->clk);
+}
+
+static int lpc18xx_usb_otg_phy_exit(struct phy *phy)
+{
+ struct lpc18xx_usb_otg_phy *lpc = phy_get_drvdata(phy);
+
+ clk_unprepare(lpc->clk);
+
+ return 0;
+}
+
+static int lpc18xx_usb_otg_phy_power_on(struct phy *phy)
+{
+ struct lpc18xx_usb_otg_phy *lpc = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_enable(lpc->clk);
+ if (ret)
+ return ret;
+
+ /* The bit in CREG is cleared to enable the PHY */
+ return regmap_update_bits(lpc->reg, LPC18XX_CREG_CREG0,
+ LPC18XX_CREG_CREG0_USB0PHY, 0);
+}
+
+static int lpc18xx_usb_otg_phy_power_off(struct phy *phy)
+{
+ struct lpc18xx_usb_otg_phy *lpc = phy_get_drvdata(phy);
+ int ret;
+
+ ret = regmap_update_bits(lpc->reg, LPC18XX_CREG_CREG0,
+ LPC18XX_CREG_CREG0_USB0PHY,
+ LPC18XX_CREG_CREG0_USB0PHY);
+ if (ret)
+ return ret;
+
+ clk_disable(lpc->clk);
+
+ return 0;
+}
+
+static const struct phy_ops lpc18xx_usb_otg_phy_ops = {
+ .init = lpc18xx_usb_otg_phy_init,
+ .exit = lpc18xx_usb_otg_phy_exit,
+ .power_on = lpc18xx_usb_otg_phy_power_on,
+ .power_off = lpc18xx_usb_otg_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int lpc18xx_usb_otg_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct lpc18xx_usb_otg_phy *lpc;
+
+ lpc = devm_kzalloc(&pdev->dev, sizeof(*lpc), GFP_KERNEL);
+ if (!lpc)
+ return -ENOMEM;
+
+ lpc->reg = syscon_node_to_regmap(pdev->dev.of_node->parent);
+ if (IS_ERR(lpc->reg)) {
+ dev_err(&pdev->dev, "failed to get syscon\n");
+ return PTR_ERR(lpc->reg);
+ }
+
+ lpc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(lpc->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(lpc->clk);
+ }
+
+ lpc->phy = devm_phy_create(&pdev->dev, NULL, &lpc18xx_usb_otg_phy_ops);
+ if (IS_ERR(lpc->phy)) {
+ dev_err(&pdev->dev, "failed to create PHY\n");
+ return PTR_ERR(lpc->phy);
+ }
+
+ phy_set_drvdata(lpc->phy, lpc);
+
+ phy_provider = devm_of_phy_provider_register(&pdev->dev,
+ of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id lpc18xx_usb_otg_phy_match[] = {
+ { .compatible = "nxp,lpc1850-usb-otg-phy" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_usb_otg_phy_match);
+
+static struct platform_driver lpc18xx_usb_otg_phy_driver = {
+ .probe = lpc18xx_usb_otg_phy_probe,
+ .driver = {
+ .name = "lpc18xx-usb-otg-phy",
+ .of_match_table = lpc18xx_usb_otg_phy_match,
+ },
+};
+module_platform_driver(lpc18xx_usb_otg_phy_driver);
+
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_DESCRIPTION("NXP LPC18xx/43xx USB OTG PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c
index 5e257ef7ac05..c47b56b4a2b8 100644
--- a/drivers/phy/phy-miphy28lp.c
+++ b/drivers/phy/phy-miphy28lp.c
@@ -1132,7 +1132,7 @@ static struct phy *miphy28lp_xlate(struct device *dev,
return miphy_phy->phy;
}
-static struct phy_ops miphy28lp_ops = {
+static const struct phy_ops miphy28lp_ops = {
.init = miphy28lp_init,
.owner = THIS_MODULE,
};
@@ -1268,7 +1268,6 @@ static struct platform_driver miphy28lp_driver = {
.probe = miphy28lp_probe,
.driver = {
.name = "miphy28lp-phy",
- .owner = THIS_MODULE,
.of_match_table = miphy28lp_of_match,
}
};
diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c
index 0ff354d6e183..00a686a073ed 100644
--- a/drivers/phy/phy-miphy365x.c
+++ b/drivers/phy/phy-miphy365x.c
@@ -510,7 +510,7 @@ static struct phy *miphy365x_xlate(struct device *dev,
return miphy_phy->phy;
}
-static struct phy_ops miphy365x_ops = {
+static const struct phy_ops miphy365x_ops = {
.init = miphy365x_init,
.owner = THIS_MODULE,
};
diff --git a/drivers/phy/phy-mvebu-sata.c b/drivers/phy/phy-mvebu-sata.c
index 03b94f92e6f1..768ce92e81ce 100644
--- a/drivers/phy/phy-mvebu-sata.c
+++ b/drivers/phy/phy-mvebu-sata.c
@@ -75,7 +75,7 @@ static int phy_mvebu_sata_power_off(struct phy *phy)
return 0;
}
-static struct phy_ops phy_mvebu_sata_ops = {
+static const struct phy_ops phy_mvebu_sata_ops = {
.power_on = phy_mvebu_sata_power_on,
.power_off = phy_mvebu_sata_power_off,
.owner = THIS_MODULE,
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index c1a468686bdc..0fe80589ffbe 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -137,7 +137,7 @@ static int omap_usb_init(struct phy *x)
return 0;
}
-static struct phy_ops ops = {
+static const struct phy_ops ops = {
.init = omap_usb_init,
.power_on = omap_usb_power_on,
.power_off = omap_usb_power_off,
diff --git a/drivers/phy/phy-qcom-apq8064-sata.c b/drivers/phy/phy-qcom-apq8064-sata.c
index 4b243f7a10e4..69ce2afac015 100644
--- a/drivers/phy/phy-qcom-apq8064-sata.c
+++ b/drivers/phy/phy-qcom-apq8064-sata.c
@@ -204,7 +204,7 @@ static int qcom_apq8064_sata_phy_exit(struct phy *generic_phy)
return 0;
}
-static struct phy_ops qcom_apq8064_sata_phy_ops = {
+static const struct phy_ops qcom_apq8064_sata_phy_ops = {
.init = qcom_apq8064_sata_phy_init,
.exit = qcom_apq8064_sata_phy_exit,
.owner = THIS_MODULE,
diff --git a/drivers/phy/phy-qcom-ipq806x-sata.c b/drivers/phy/phy-qcom-ipq806x-sata.c
index 6f2fe2627916..0ad127cc9298 100644
--- a/drivers/phy/phy-qcom-ipq806x-sata.c
+++ b/drivers/phy/phy-qcom-ipq806x-sata.c
@@ -126,7 +126,7 @@ static int qcom_ipq806x_sata_phy_exit(struct phy *generic_phy)
return 0;
}
-static struct phy_ops qcom_ipq806x_sata_phy_ops = {
+static const struct phy_ops qcom_ipq806x_sata_phy_ops = {
.init = qcom_ipq806x_sata_phy_init,
.exit = qcom_ipq806x_sata_phy_exit,
.owner = THIS_MODULE,
diff --git a/drivers/phy/phy-qcom-ufs-i.h b/drivers/phy/phy-qcom-ufs-i.h
index 591a39175e8a..2bd5ce43a724 100644
--- a/drivers/phy/phy-qcom-ufs-i.h
+++ b/drivers/phy/phy-qcom-ufs-i.h
@@ -150,7 +150,7 @@ int ufs_qcom_phy_remove(struct phy *generic_phy,
struct ufs_qcom_phy *ufs_qcom_phy);
struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
struct ufs_qcom_phy *common_cfg,
- struct phy_ops *ufs_qcom_phy_gen_ops,
+ const struct phy_ops *ufs_qcom_phy_gen_ops,
struct ufs_qcom_phy_specific_ops *phy_spec_ops);
int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
struct ufs_qcom_phy_calibration *tbl_A, int tbl_size_A,
diff --git a/drivers/phy/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
index f5fc50a9fce7..56631e77c11d 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-14nm.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
@@ -115,7 +115,7 @@ static int ufs_qcom_phy_qmp_14nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
return err;
}
-static struct phy_ops ufs_qcom_phy_qmp_14nm_phy_ops = {
+static const struct phy_ops ufs_qcom_phy_qmp_14nm_phy_ops = {
.init = ufs_qcom_phy_qmp_14nm_init,
.exit = ufs_qcom_phy_exit,
.power_on = ufs_qcom_phy_power_on,
@@ -191,7 +191,6 @@ static struct platform_driver ufs_qcom_phy_qmp_14nm_driver = {
.driver = {
.of_match_table = ufs_qcom_phy_qmp_14nm_of_match,
.name = "ufs_qcom_phy_qmp_14nm",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/phy/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
index 8332f96b2c4a..b16ea77d07b9 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-20nm.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
@@ -171,7 +171,7 @@ static int ufs_qcom_phy_qmp_20nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
return err;
}
-static struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
+static const struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
.init = ufs_qcom_phy_qmp_20nm_init,
.exit = ufs_qcom_phy_exit,
.power_on = ufs_qcom_phy_power_on,
@@ -247,7 +247,6 @@ static struct platform_driver ufs_qcom_phy_qmp_20nm_driver = {
.driver = {
.of_match_table = ufs_qcom_phy_qmp_20nm_of_match,
.name = "ufs_qcom_phy_qmp_20nm",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
index f9c618f0ab6e..49a1ed0cef56 100644
--- a/drivers/phy/phy-qcom-ufs.c
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -77,7 +77,7 @@ EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate);
struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
struct ufs_qcom_phy *common_cfg,
- struct phy_ops *ufs_qcom_phy_gen_ops,
+ const struct phy_ops *ufs_qcom_phy_gen_ops,
struct ufs_qcom_phy_specific_ops *phy_spec_ops)
{
int err;
diff --git a/drivers/phy/phy-rcar-gen2.c b/drivers/phy/phy-rcar-gen2.c
index 39d9b2995435..6e0d9fa8e1d1 100644
--- a/drivers/phy/phy-rcar-gen2.c
+++ b/drivers/phy/phy-rcar-gen2.c
@@ -184,7 +184,7 @@ static int rcar_gen2_phy_power_off(struct phy *p)
return 0;
}
-static struct phy_ops rcar_gen2_phy_ops = {
+static const struct phy_ops rcar_gen2_phy_ops = {
.init = rcar_gen2_phy_init,
.exit = rcar_gen2_phy_exit,
.power_on = rcar_gen2_phy_power_on,
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c
index 7d4c33643768..5a5c073e72fe 100644
--- a/drivers/phy/phy-rockchip-usb.c
+++ b/drivers/phy/phy-rockchip-usb.c
@@ -84,7 +84,7 @@ static int rockchip_usb_phy_power_on(struct phy *_phy)
return 0;
}
-static struct phy_ops ops = {
+static const struct phy_ops ops = {
.power_on = rockchip_usb_phy_power_on,
.power_off = rockchip_usb_phy_power_off,
.owner = THIS_MODULE,
@@ -146,7 +146,6 @@ static struct platform_driver rockchip_usb_driver = {
.probe = rockchip_usb_phy_probe,
.driver = {
.name = "rockchip-usb-phy",
- .owner = THIS_MODULE,
.of_match_table = rockchip_usb_phy_dt_ids,
},
};
diff --git a/drivers/phy/phy-samsung-usb2.c b/drivers/phy/phy-samsung-usb2.c
index 55b6994932e3..f278a9c547e1 100644
--- a/drivers/phy/phy-samsung-usb2.c
+++ b/drivers/phy/phy-samsung-usb2.c
@@ -71,7 +71,7 @@ static int samsung_usb2_phy_power_off(struct phy *phy)
return 0;
}
-static struct phy_ops samsung_usb2_phy_ops = {
+static const struct phy_ops samsung_usb2_phy_ops = {
.power_on = samsung_usb2_phy_power_on,
.power_off = samsung_usb2_phy_power_off,
.owner = THIS_MODULE,
diff --git a/drivers/phy/phy-spear1310-miphy.c b/drivers/phy/phy-spear1310-miphy.c
index 45d0005b2203..ed67e98e54ca 100644
--- a/drivers/phy/phy-spear1310-miphy.c
+++ b/drivers/phy/phy-spear1310-miphy.c
@@ -179,7 +179,7 @@ static const struct of_device_id spear1310_miphy_of_match[] = {
};
MODULE_DEVICE_TABLE(of, spear1310_miphy_of_match);
-static struct phy_ops spear1310_miphy_ops = {
+static const struct phy_ops spear1310_miphy_ops = {
.init = spear1310_miphy_init,
.exit = spear1310_miphy_exit,
.owner = THIS_MODULE,
diff --git a/drivers/phy/phy-spear1340-miphy.c b/drivers/phy/phy-spear1340-miphy.c
index 494240da4a39..97280c0cf612 100644
--- a/drivers/phy/phy-spear1340-miphy.c
+++ b/drivers/phy/phy-spear1340-miphy.c
@@ -189,7 +189,7 @@ static const struct of_device_id spear1340_miphy_of_match[] = {
};
MODULE_DEVICE_TABLE(of, spear1340_miphy_of_match);
-static struct phy_ops spear1340_miphy_ops = {
+static const struct phy_ops spear1340_miphy_ops = {
.init = spear1340_miphy_init,
.exit = spear1340_miphy_exit,
.owner = THIS_MODULE,
diff --git a/drivers/phy/phy-stih41x-usb.c b/drivers/phy/phy-stih41x-usb.c
index c093b472b57d..0ac74639ad02 100644
--- a/drivers/phy/phy-stih41x-usb.c
+++ b/drivers/phy/phy-stih41x-usb.c
@@ -112,7 +112,7 @@ static int stih41x_usb_phy_power_off(struct phy *phy)
return 0;
}
-static struct phy_ops stih41x_usb_phy_ops = {
+static const struct phy_ops stih41x_usb_phy_ops = {
.init = stih41x_usb_phy_init,
.power_on = stih41x_usb_phy_power_on,
.power_off = stih41x_usb_phy_power_off,
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index e17c539e4f6f..731b395d6e6a 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -1,7 +1,7 @@
/*
* Allwinner sun4i USB phy driver
*
- * Copyright (C) 2014 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (C) 2014-2015 Hans de Goede <hdegoede@redhat.com>
*
* Based on code from
* Allwinner Technology Co., Ltd. <www.allwinnertech.com>
@@ -22,23 +22,30 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/extcon.h>
#include <linux/io.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_gpio.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-sun4i-usb.h>
#include <linux/platform_device.h>
+#include <linux/power_supply.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
+#include <linux/workqueue.h>
#define REG_ISCR 0x00
-#define REG_PHYCTL 0x04
+#define REG_PHYCTL_A10 0x04
#define REG_PHYBIST 0x08
#define REG_PHYTUNE 0x0c
+#define REG_PHYCTL_A33 0x10
#define PHYCTL_DATA BIT(7)
@@ -47,6 +54,17 @@
#define SUNXI_AHB_INCRX_ALIGN_EN BIT(8)
#define SUNXI_ULPI_BYPASS_EN BIT(0)
+/* ISCR, Interface Status and Control bits */
+#define ISCR_ID_PULLUP_EN (1 << 17)
+#define ISCR_DPDM_PULLUP_EN (1 << 16)
+/* sunxi has the phy id/vbus pins not connected, so we use the force bits */
+#define ISCR_FORCE_ID_MASK (3 << 14)
+#define ISCR_FORCE_ID_LOW (2 << 14)
+#define ISCR_FORCE_ID_HIGH (3 << 14)
+#define ISCR_FORCE_VBUS_MASK (3 << 12)
+#define ISCR_FORCE_VBUS_LOW (2 << 12)
+#define ISCR_FORCE_VBUS_HIGH (3 << 12)
+
/* Common Control Bits for Both PHYs */
#define PHY_PLL_BW 0x03
#define PHY_RES45_CAL_EN 0x0c
@@ -63,60 +81,124 @@
#define MAX_PHYS 3
+/*
+ * Note do not raise the debounce time, we must report Vusb high within 100ms
+ * otherwise we get Vbus errors
+ */
+#define DEBOUNCE_TIME msecs_to_jiffies(50)
+#define POLL_TIME msecs_to_jiffies(250)
+
struct sun4i_usb_phy_data {
void __iomem *base;
struct mutex mutex;
int num_phys;
u32 disc_thresh;
+ bool has_a33_phyctl;
struct sun4i_usb_phy {
struct phy *phy;
void __iomem *pmu;
struct regulator *vbus;
struct reset_control *reset;
struct clk *clk;
+ bool regulator_on;
int index;
} phys[MAX_PHYS];
+ /* phy0 / otg related variables */
+ struct extcon_dev *extcon;
+ bool phy0_init;
+ bool phy0_poll;
+ struct gpio_desc *id_det_gpio;
+ struct gpio_desc *vbus_det_gpio;
+ struct power_supply *vbus_power_supply;
+ struct notifier_block vbus_power_nb;
+ bool vbus_power_nb_registered;
+ int id_det_irq;
+ int vbus_det_irq;
+ int id_det;
+ int vbus_det;
+ struct delayed_work detect;
};
#define to_sun4i_usb_phy_data(phy) \
container_of((phy), struct sun4i_usb_phy_data, phys[(phy)->index])
+static void sun4i_usb_phy0_update_iscr(struct phy *_phy, u32 clr, u32 set)
+{
+ struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
+ struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
+ u32 iscr;
+
+ iscr = readl(data->base + REG_ISCR);
+ iscr &= ~clr;
+ iscr |= set;
+ writel(iscr, data->base + REG_ISCR);
+}
+
+static void sun4i_usb_phy0_set_id_detect(struct phy *phy, u32 val)
+{
+ if (val)
+ val = ISCR_FORCE_ID_HIGH;
+ else
+ val = ISCR_FORCE_ID_LOW;
+
+ sun4i_usb_phy0_update_iscr(phy, ISCR_FORCE_ID_MASK, val);
+}
+
+static void sun4i_usb_phy0_set_vbus_detect(struct phy *phy, u32 val)
+{
+ if (val)
+ val = ISCR_FORCE_VBUS_HIGH;
+ else
+ val = ISCR_FORCE_VBUS_LOW;
+
+ sun4i_usb_phy0_update_iscr(phy, ISCR_FORCE_VBUS_MASK, val);
+}
+
static void sun4i_usb_phy_write(struct sun4i_usb_phy *phy, u32 addr, u32 data,
int len)
{
struct sun4i_usb_phy_data *phy_data = to_sun4i_usb_phy_data(phy);
u32 temp, usbc_bit = BIT(phy->index * 2);
+ void *phyctl;
int i;
mutex_lock(&phy_data->mutex);
+ if (phy_data->has_a33_phyctl) {
+ phyctl = phy_data->base + REG_PHYCTL_A33;
+ /* A33 needs us to set phyctl to 0 explicitly */
+ writel(0, phyctl);
+ } else {
+ phyctl = phy_data->base + REG_PHYCTL_A10;
+ }
+
for (i = 0; i < len; i++) {
- temp = readl(phy_data->base + REG_PHYCTL);
+ temp = readl(phyctl);
/* clear the address portion */
temp &= ~(0xff << 8);
/* set the address */
temp |= ((addr + i) << 8);
- writel(temp, phy_data->base + REG_PHYCTL);
+ writel(temp, phyctl);
/* set the data bit and clear usbc bit*/
- temp = readb(phy_data->base + REG_PHYCTL);
+ temp = readb(phyctl);
if (data & 0x1)
temp |= PHYCTL_DATA;
else
temp &= ~PHYCTL_DATA;
temp &= ~usbc_bit;
- writeb(temp, phy_data->base + REG_PHYCTL);
+ writeb(temp, phyctl);
/* pulse usbc_bit */
- temp = readb(phy_data->base + REG_PHYCTL);
+ temp = readb(phyctl);
temp |= usbc_bit;
- writeb(temp, phy_data->base + REG_PHYCTL);
+ writeb(temp, phyctl);
- temp = readb(phy_data->base + REG_PHYCTL);
+ temp = readb(phyctl);
temp &= ~usbc_bit;
- writeb(temp, phy_data->base + REG_PHYCTL);
+ writeb(temp, phyctl);
data >>= 1;
}
@@ -171,12 +253,39 @@ static int sun4i_usb_phy_init(struct phy *_phy)
sun4i_usb_phy_passby(phy, 1);
+ if (phy->index == 0) {
+ data->phy0_init = true;
+
+ /* Enable pull-ups */
+ sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_DPDM_PULLUP_EN);
+ sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_ID_PULLUP_EN);
+
+ if (data->id_det_gpio) {
+ /* OTG mode, force ISCR and cable state updates */
+ data->id_det = -1;
+ data->vbus_det = -1;
+ queue_delayed_work(system_wq, &data->detect, 0);
+ } else {
+ /* Host only mode */
+ sun4i_usb_phy0_set_id_detect(_phy, 0);
+ sun4i_usb_phy0_set_vbus_detect(_phy, 1);
+ }
+ }
+
return 0;
}
static int sun4i_usb_phy_exit(struct phy *_phy)
{
struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
+ struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
+
+ if (phy->index == 0) {
+ /* Disable pull-ups */
+ sun4i_usb_phy0_update_iscr(_phy, ISCR_DPDM_PULLUP_EN, 0);
+ sun4i_usb_phy0_update_iscr(_phy, ISCR_ID_PULLUP_EN, 0);
+ data->phy0_init = false;
+ }
sun4i_usb_phy_passby(phy, 0);
reset_control_assert(phy->reset);
@@ -185,23 +294,74 @@ static int sun4i_usb_phy_exit(struct phy *_phy)
return 0;
}
+static int sun4i_usb_phy0_get_vbus_det(struct sun4i_usb_phy_data *data)
+{
+ if (data->vbus_det_gpio)
+ return gpiod_get_value_cansleep(data->vbus_det_gpio);
+
+ if (data->vbus_power_supply) {
+ union power_supply_propval val;
+ int r;
+
+ r = power_supply_get_property(data->vbus_power_supply,
+ POWER_SUPPLY_PROP_PRESENT, &val);
+ if (r == 0)
+ return val.intval;
+ }
+
+ /* Fallback: report vbus as high */
+ return 1;
+}
+
+static bool sun4i_usb_phy0_have_vbus_det(struct sun4i_usb_phy_data *data)
+{
+ return data->vbus_det_gpio || data->vbus_power_supply;
+}
+
static int sun4i_usb_phy_power_on(struct phy *_phy)
{
struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
- int ret = 0;
+ struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
+ int ret;
+
+ if (!phy->vbus || phy->regulator_on)
+ return 0;
+
+ /* For phy0 only turn on Vbus if we don't have an ext. Vbus */
+ if (phy->index == 0 && sun4i_usb_phy0_have_vbus_det(data) &&
+ data->vbus_det)
+ return 0;
+
+ ret = regulator_enable(phy->vbus);
+ if (ret)
+ return ret;
+
+ phy->regulator_on = true;
- if (phy->vbus)
- ret = regulator_enable(phy->vbus);
+ /* We must report Vbus high within OTG_TIME_A_WAIT_VRISE msec. */
+ if (phy->index == 0 && data->vbus_det_gpio && data->phy0_poll)
+ mod_delayed_work(system_wq, &data->detect, DEBOUNCE_TIME);
- return ret;
+ return 0;
}
static int sun4i_usb_phy_power_off(struct phy *_phy)
{
struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
+ struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
+
+ if (!phy->vbus || !phy->regulator_on)
+ return 0;
+
+ regulator_disable(phy->vbus);
+ phy->regulator_on = false;
- if (phy->vbus)
- regulator_disable(phy->vbus);
+ /*
+ * phy0 vbus typically slowly discharges, sometimes this causes the
+ * Vbus gpio to not trigger an edge irq on Vbus off, so force a rescan.
+ */
+ if (phy->index == 0 && data->vbus_det_gpio && !data->phy0_poll)
+ mod_delayed_work(system_wq, &data->detect, POLL_TIME);
return 0;
}
@@ -212,8 +372,9 @@ void sun4i_usb_phy_set_squelch_detect(struct phy *_phy, bool enabled)
sun4i_usb_phy_write(phy, PHY_SQUELCH_DETECT, enabled ? 0 : 2, 2);
}
+EXPORT_SYMBOL_GPL(sun4i_usb_phy_set_squelch_detect);
-static struct phy_ops sun4i_usb_phy_ops = {
+static const struct phy_ops sun4i_usb_phy_ops = {
.init = sun4i_usb_phy_init,
.exit = sun4i_usb_phy_exit,
.power_on = sun4i_usb_phy_power_on,
@@ -221,6 +382,95 @@ static struct phy_ops sun4i_usb_phy_ops = {
.owner = THIS_MODULE,
};
+static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
+{
+ struct sun4i_usb_phy_data *data =
+ container_of(work, struct sun4i_usb_phy_data, detect.work);
+ struct phy *phy0 = data->phys[0].phy;
+ int id_det, vbus_det, id_notify = 0, vbus_notify = 0;
+
+ id_det = gpiod_get_value_cansleep(data->id_det_gpio);
+ vbus_det = sun4i_usb_phy0_get_vbus_det(data);
+
+ mutex_lock(&phy0->mutex);
+
+ if (!data->phy0_init) {
+ mutex_unlock(&phy0->mutex);
+ return;
+ }
+
+ if (id_det != data->id_det) {
+ /*
+ * When a host cable (id == 0) gets plugged in on systems
+ * without vbus detection report vbus low for long enough for
+ * the musb-ip to end the current device session.
+ */
+ if (!sun4i_usb_phy0_have_vbus_det(data) && id_det == 0) {
+ sun4i_usb_phy0_set_vbus_detect(phy0, 0);
+ msleep(200);
+ sun4i_usb_phy0_set_vbus_detect(phy0, 1);
+ }
+ sun4i_usb_phy0_set_id_detect(phy0, id_det);
+ data->id_det = id_det;
+ id_notify = 1;
+ }
+
+ if (vbus_det != data->vbus_det) {
+ sun4i_usb_phy0_set_vbus_detect(phy0, vbus_det);
+ data->vbus_det = vbus_det;
+ vbus_notify = 1;
+ }
+
+ mutex_unlock(&phy0->mutex);
+
+ if (id_notify) {
+ extcon_set_cable_state_(data->extcon, EXTCON_USB_HOST,
+ !id_det);
+ /*
+ * When a host cable gets unplugged (id == 1) on systems
+ * without vbus detection report vbus low for long enough to
+ * the musb-ip to end the current host session.
+ */
+ if (!sun4i_usb_phy0_have_vbus_det(data) && id_det == 1) {
+ mutex_lock(&phy0->mutex);
+ sun4i_usb_phy0_set_vbus_detect(phy0, 0);
+ msleep(1000);
+ sun4i_usb_phy0_set_vbus_detect(phy0, 1);
+ mutex_unlock(&phy0->mutex);
+ }
+ }
+
+ if (vbus_notify)
+ extcon_set_cable_state_(data->extcon, EXTCON_USB, vbus_det);
+
+ if (data->phy0_poll)
+ queue_delayed_work(system_wq, &data->detect, POLL_TIME);
+}
+
+static irqreturn_t sun4i_usb_phy0_id_vbus_det_irq(int irq, void *dev_id)
+{
+ struct sun4i_usb_phy_data *data = dev_id;
+
+ /* vbus or id changed, let the pins settle and then scan them */
+ mod_delayed_work(system_wq, &data->detect, DEBOUNCE_TIME);
+
+ return IRQ_HANDLED;
+}
+
+static int sun4i_usb_phy0_vbus_notify(struct notifier_block *nb,
+ unsigned long val, void *v)
+{
+ struct sun4i_usb_phy_data *data =
+ container_of(nb, struct sun4i_usb_phy_data, vbus_power_nb);
+ struct power_supply *psy = v;
+
+ /* Properties on the vbus_power_supply changed, scan vbus_det */
+ if (val == PSY_EVENT_PROP_CHANGED && psy == data->vbus_power_supply)
+ mod_delayed_work(system_wq, &data->detect, DEBOUNCE_TIME);
+
+ return NOTIFY_OK;
+}
+
static struct phy *sun4i_usb_phy_xlate(struct device *dev,
struct of_phandle_args *args)
{
@@ -232,6 +482,29 @@ static struct phy *sun4i_usb_phy_xlate(struct device *dev,
return data->phys[args->args[0]].phy;
}
+static int sun4i_usb_phy_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sun4i_usb_phy_data *data = dev_get_drvdata(dev);
+
+ if (data->vbus_power_nb_registered)
+ power_supply_unreg_notifier(&data->vbus_power_nb);
+ if (data->id_det_irq >= 0)
+ devm_free_irq(dev, data->id_det_irq, data);
+ if (data->vbus_det_irq >= 0)
+ devm_free_irq(dev, data->vbus_det_irq, data);
+
+ cancel_delayed_work_sync(&data->detect);
+
+ return 0;
+}
+
+static const unsigned int sun4i_usb_phy0_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_NONE,
+};
+
static int sun4i_usb_phy_probe(struct platform_device *pdev)
{
struct sun4i_usb_phy_data *data;
@@ -240,35 +513,87 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
struct phy_provider *phy_provider;
bool dedicated_clocks;
struct resource *res;
- int i;
+ int i, ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
mutex_init(&data->mutex);
+ INIT_DELAYED_WORK(&data->detect, sun4i_usb_phy0_id_vbus_det_scan);
+ dev_set_drvdata(dev, data);
- if (of_device_is_compatible(np, "allwinner,sun5i-a13-usb-phy"))
+ if (of_device_is_compatible(np, "allwinner,sun5i-a13-usb-phy") ||
+ of_device_is_compatible(np, "allwinner,sun8i-a23-usb-phy") ||
+ of_device_is_compatible(np, "allwinner,sun8i-a33-usb-phy"))
data->num_phys = 2;
else
data->num_phys = 3;
- if (of_device_is_compatible(np, "allwinner,sun4i-a10-usb-phy") ||
- of_device_is_compatible(np, "allwinner,sun6i-a31-usb-phy"))
- data->disc_thresh = 3;
- else
+ if (of_device_is_compatible(np, "allwinner,sun5i-a13-usb-phy") ||
+ of_device_is_compatible(np, "allwinner,sun7i-a20-usb-phy"))
data->disc_thresh = 2;
+ else
+ data->disc_thresh = 3;
- if (of_device_is_compatible(np, "allwinner,sun6i-a31-usb-phy"))
+ if (of_device_is_compatible(np, "allwinner,sun6i-a31-usb-phy") ||
+ of_device_is_compatible(np, "allwinner,sun8i-a23-usb-phy") ||
+ of_device_is_compatible(np, "allwinner,sun8i-a33-usb-phy"))
dedicated_clocks = true;
else
dedicated_clocks = false;
+ if (of_device_is_compatible(np, "allwinner,sun8i-a33-usb-phy"))
+ data->has_a33_phyctl = true;
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_ctrl");
data->base = devm_ioremap_resource(dev, res);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
+ data->id_det_gpio = devm_gpiod_get(dev, "usb0_id_det", GPIOD_IN);
+ if (IS_ERR(data->id_det_gpio)) {
+ if (PTR_ERR(data->id_det_gpio) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ data->id_det_gpio = NULL;
+ }
+
+ data->vbus_det_gpio = devm_gpiod_get(dev, "usb0_vbus_det", GPIOD_IN);
+ if (IS_ERR(data->vbus_det_gpio)) {
+ if (PTR_ERR(data->vbus_det_gpio) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ data->vbus_det_gpio = NULL;
+ }
+
+ if (of_find_property(np, "usb0_vbus_power-supply", NULL)) {
+ data->vbus_power_supply = devm_power_supply_get_by_phandle(dev,
+ "usb0_vbus_power-supply");
+ if (IS_ERR(data->vbus_power_supply))
+ return PTR_ERR(data->vbus_power_supply);
+
+ if (!data->vbus_power_supply)
+ return -EPROBE_DEFER;
+ }
+
+ /* vbus_det without id_det makes no sense, and is not supported */
+ if (sun4i_usb_phy0_have_vbus_det(data) && !data->id_det_gpio) {
+ dev_err(dev, "usb0_id_det missing or invalid\n");
+ return -ENODEV;
+ }
+
+ if (data->id_det_gpio) {
+ data->extcon = devm_extcon_dev_allocate(dev,
+ sun4i_usb_phy0_cable);
+ if (IS_ERR(data->extcon))
+ return PTR_ERR(data->extcon);
+
+ ret = devm_extcon_dev_register(dev, data->extcon);
+ if (ret) {
+ dev_err(dev, "failed to register extcon: %d\n", ret);
+ return ret;
+ }
+ }
+
for (i = 0; i < data->num_phys; i++) {
struct sun4i_usb_phy *phy = data->phys + i;
char name[16];
@@ -318,10 +643,54 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
phy_set_drvdata(phy->phy, &data->phys[i]);
}
- dev_set_drvdata(dev, data);
+ data->id_det_irq = gpiod_to_irq(data->id_det_gpio);
+ data->vbus_det_irq = gpiod_to_irq(data->vbus_det_gpio);
+ if ((data->id_det_gpio && data->id_det_irq < 0) ||
+ (data->vbus_det_gpio && data->vbus_det_irq < 0))
+ data->phy0_poll = true;
+
+ if (data->id_det_irq >= 0) {
+ ret = devm_request_irq(dev, data->id_det_irq,
+ sun4i_usb_phy0_id_vbus_det_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "usb0-id-det", data);
+ if (ret) {
+ dev_err(dev, "Err requesting id-det-irq: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (data->vbus_det_irq >= 0) {
+ ret = devm_request_irq(dev, data->vbus_det_irq,
+ sun4i_usb_phy0_id_vbus_det_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "usb0-vbus-det", data);
+ if (ret) {
+ dev_err(dev, "Err requesting vbus-det-irq: %d\n", ret);
+ data->vbus_det_irq = -1;
+ sun4i_usb_phy_remove(pdev); /* Stop detect work */
+ return ret;
+ }
+ }
+
+ if (data->vbus_power_supply) {
+ data->vbus_power_nb.notifier_call = sun4i_usb_phy0_vbus_notify;
+ data->vbus_power_nb.priority = 0;
+ ret = power_supply_reg_notifier(&data->vbus_power_nb);
+ if (ret) {
+ sun4i_usb_phy_remove(pdev); /* Stop detect work */
+ return ret;
+ }
+ data->vbus_power_nb_registered = true;
+ }
+
phy_provider = devm_of_phy_provider_register(dev, sun4i_usb_phy_xlate);
+ if (IS_ERR(phy_provider)) {
+ sun4i_usb_phy_remove(pdev); /* Stop detect work */
+ return PTR_ERR(phy_provider);
+ }
- return PTR_ERR_OR_ZERO(phy_provider);
+ return 0;
}
static const struct of_device_id sun4i_usb_phy_of_match[] = {
@@ -329,12 +698,15 @@ static const struct of_device_id sun4i_usb_phy_of_match[] = {
{ .compatible = "allwinner,sun5i-a13-usb-phy" },
{ .compatible = "allwinner,sun6i-a31-usb-phy" },
{ .compatible = "allwinner,sun7i-a20-usb-phy" },
+ { .compatible = "allwinner,sun8i-a23-usb-phy" },
+ { .compatible = "allwinner,sun8i-a33-usb-phy" },
{ },
};
MODULE_DEVICE_TABLE(of, sun4i_usb_phy_of_match);
static struct platform_driver sun4i_usb_phy_driver = {
.probe = sun4i_usb_phy_probe,
+ .remove = sun4i_usb_phy_remove,
.driver = {
.of_match_table = sun4i_usb_phy_of_match,
.name = "sun4i-usb-phy",
diff --git a/drivers/phy/phy-sun9i-usb.c b/drivers/phy/phy-sun9i-usb.c
index 0095914a662c..ac4f31abefe3 100644
--- a/drivers/phy/phy-sun9i-usb.c
+++ b/drivers/phy/phy-sun9i-usb.c
@@ -114,7 +114,7 @@ static int sun9i_usb_phy_exit(struct phy *_phy)
return 0;
}
-static struct phy_ops sun9i_usb_phy_ops = {
+static const struct phy_ops sun9i_usb_phy_ops = {
.init = sun9i_usb_phy_init,
.exit = sun9i_usb_phy_exit,
.owner = THIS_MODULE,
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 53f295c1bab1..93bc1120af12 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -28,7 +28,8 @@
#include <linux/delay.h>
#include <linux/phy/omap_control_phy.h>
#include <linux/of_platform.h>
-#include <linux/spinlock.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#define PLL_STATUS 0x00000004
#define PLL_GO 0x00000008
@@ -53,6 +54,8 @@
#define PLL_LOCK 0x2
#define PLL_IDLE 0x1
+#define SATA_PLL_SOFT_RESET BIT(18)
+
/*
* This is an Empirical value that works, need to confirm the actual
* value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status
@@ -83,10 +86,9 @@ struct ti_pipe3 {
struct clk *refclk;
struct clk *div_clk;
struct pipe3_dpll_map *dpll_map;
- bool enabled;
- spinlock_t lock; /* serialize clock enable/disable */
- /* the below flag is needed specifically for SATA */
- bool refclk_enabled;
+ struct regmap *dpll_reset_syscon; /* ctrl. reg. acces */
+ unsigned int dpll_reset_reg; /* reg. index within syscon */
+ bool sata_refclk_enabled;
};
static struct pipe3_dpll_map dpll_map_usb[] = {
@@ -137,6 +139,9 @@ static struct pipe3_dpll_params *ti_pipe3_get_dpll_params(struct ti_pipe3 *phy)
return NULL;
}
+static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy);
+static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy);
+
static int ti_pipe3_power_off(struct phy *x)
{
struct ti_pipe3 *phy = phy_get_drvdata(x);
@@ -217,6 +222,7 @@ static int ti_pipe3_init(struct phy *x)
u32 val;
int ret = 0;
+ ti_pipe3_enable_clocks(phy);
/*
* Set pcie_pcs register to 0x96 for proper functioning of phy
* as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table
@@ -250,36 +256,49 @@ static int ti_pipe3_exit(struct phy *x)
u32 val;
unsigned long timeout;
- /* SATA DPLL can't be powered down due to Errata i783 and PCIe
- * does not have internal DPLL
+ /* If dpll_reset_syscon is not present we wont power down SATA DPLL
+ * due to Errata i783
*/
- if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") ||
- of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie"))
+ if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") &&
+ !phy->dpll_reset_syscon)
return 0;
- /* Put DPLL in IDLE mode */
- val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
- val |= PLL_IDLE;
- ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
+ /* PCIe doesn't have internal DPLL */
+ if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) {
+ /* Put DPLL in IDLE mode */
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
+ val |= PLL_IDLE;
+ ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
- /* wait for LDO and Oscillator to power down */
- timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME);
- do {
- cpu_relax();
- val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
- if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN))
- break;
- } while (!time_after(jiffies, timeout));
+ /* wait for LDO and Oscillator to power down */
+ timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME);
+ do {
+ cpu_relax();
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
+ if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN))
+ break;
+ } while (!time_after(jiffies, timeout));
+
+ if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) {
+ dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n",
+ val);
+ return -EBUSY;
+ }
+ }
- if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) {
- dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n",
- val);
- return -EBUSY;
+ /* i783: SATA needs control bit toggle after PLL unlock */
+ if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) {
+ regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
+ SATA_PLL_SOFT_RESET, SATA_PLL_SOFT_RESET);
+ regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
+ SATA_PLL_SOFT_RESET, 0);
}
+ ti_pipe3_disable_clocks(phy);
+
return 0;
}
-static struct phy_ops ops = {
+static const struct phy_ops ops = {
.init = ti_pipe3_init,
.exit = ti_pipe3_exit,
.power_on = ti_pipe3_power_on,
@@ -306,7 +325,6 @@ static int ti_pipe3_probe(struct platform_device *pdev)
return -ENOMEM;
phy->dev = &pdev->dev;
- spin_lock_init(&phy->lock);
if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
match = of_match_device(ti_pipe3_id_table, &pdev->dev);
@@ -350,6 +368,21 @@ static int ti_pipe3_probe(struct platform_device *pdev)
}
} else {
phy->wkupclk = ERR_PTR(-ENODEV);
+ phy->dpll_reset_syscon = syscon_regmap_lookup_by_phandle(node,
+ "syscon-pllreset");
+ if (IS_ERR(phy->dpll_reset_syscon)) {
+ dev_info(&pdev->dev,
+ "can't get syscon-pllreset, sata dpll won't idle\n");
+ phy->dpll_reset_syscon = NULL;
+ } else {
+ if (of_property_read_u32_index(node,
+ "syscon-pllreset", 1,
+ &phy->dpll_reset_reg)) {
+ dev_err(&pdev->dev,
+ "couldn't get pllreset reg. offset\n");
+ return -EINVAL;
+ }
+ }
}
if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
@@ -403,6 +436,16 @@ static int ti_pipe3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, phy);
pm_runtime_enable(phy->dev);
+ /*
+ * Prevent auto-disable of refclk for SATA PHY due to Errata i783
+ */
+ if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) {
+ if (!IS_ERR(phy->refclk)) {
+ clk_prepare_enable(phy->refclk);
+ phy->sata_refclk_enabled = true;
+ }
+ }
+
generic_phy = devm_phy_create(phy->dev, NULL, &ops);
if (IS_ERR(generic_phy))
return PTR_ERR(generic_phy);
@@ -413,63 +456,33 @@ static int ti_pipe3_probe(struct platform_device *pdev)
if (IS_ERR(phy_provider))
return PTR_ERR(phy_provider);
- pm_runtime_get(&pdev->dev);
-
return 0;
}
static int ti_pipe3_remove(struct platform_device *pdev)
{
- if (!pm_runtime_suspended(&pdev->dev))
- pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
-#ifdef CONFIG_PM
-static int ti_pipe3_enable_refclk(struct ti_pipe3 *phy)
+static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
{
- if (!IS_ERR(phy->refclk) && !phy->refclk_enabled) {
- int ret;
+ int ret = 0;
+ if (!IS_ERR(phy->refclk)) {
ret = clk_prepare_enable(phy->refclk);
if (ret) {
dev_err(phy->dev, "Failed to enable refclk %d\n", ret);
return ret;
}
- phy->refclk_enabled = true;
}
- return 0;
-}
-
-static void ti_pipe3_disable_refclk(struct ti_pipe3 *phy)
-{
- if (!IS_ERR(phy->refclk))
- clk_disable_unprepare(phy->refclk);
-
- phy->refclk_enabled = false;
-}
-
-static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
-{
- int ret = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&phy->lock, flags);
- if (phy->enabled)
- goto err1;
-
- ret = ti_pipe3_enable_refclk(phy);
- if (ret)
- goto err1;
-
if (!IS_ERR(phy->wkupclk)) {
ret = clk_prepare_enable(phy->wkupclk);
if (ret) {
dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
- goto err2;
+ goto disable_refclk;
}
}
@@ -477,96 +490,43 @@ static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
ret = clk_prepare_enable(phy->div_clk);
if (ret) {
dev_err(phy->dev, "Failed to enable div_clk %d\n", ret);
- goto err3;
+ goto disable_wkupclk;
}
}
- phy->enabled = true;
- spin_unlock_irqrestore(&phy->lock, flags);
return 0;
-err3:
+disable_wkupclk:
if (!IS_ERR(phy->wkupclk))
clk_disable_unprepare(phy->wkupclk);
-err2:
+disable_refclk:
if (!IS_ERR(phy->refclk))
clk_disable_unprepare(phy->refclk);
- ti_pipe3_disable_refclk(phy);
-err1:
- spin_unlock_irqrestore(&phy->lock, flags);
return ret;
}
static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy)
{
- unsigned long flags;
-
- spin_lock_irqsave(&phy->lock, flags);
- if (!phy->enabled) {
- spin_unlock_irqrestore(&phy->lock, flags);
- return;
- }
-
if (!IS_ERR(phy->wkupclk))
clk_disable_unprepare(phy->wkupclk);
- /* Don't disable refclk for SATA PHY due to Errata i783 */
- if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata"))
- ti_pipe3_disable_refclk(phy);
+ if (!IS_ERR(phy->refclk)) {
+ clk_disable_unprepare(phy->refclk);
+ /*
+ * SATA refclk needs an additional disable as we left it
+ * on in probe to avoid Errata i783
+ */
+ if (phy->sata_refclk_enabled) {
+ clk_disable_unprepare(phy->refclk);
+ phy->sata_refclk_enabled = false;
+ }
+ }
+
if (!IS_ERR(phy->div_clk))
clk_disable_unprepare(phy->div_clk);
- phy->enabled = false;
- spin_unlock_irqrestore(&phy->lock, flags);
}
-static int ti_pipe3_runtime_suspend(struct device *dev)
-{
- struct ti_pipe3 *phy = dev_get_drvdata(dev);
-
- ti_pipe3_disable_clocks(phy);
- return 0;
-}
-
-static int ti_pipe3_runtime_resume(struct device *dev)
-{
- struct ti_pipe3 *phy = dev_get_drvdata(dev);
- int ret = 0;
-
- ret = ti_pipe3_enable_clocks(phy);
- return ret;
-}
-
-static int ti_pipe3_suspend(struct device *dev)
-{
- struct ti_pipe3 *phy = dev_get_drvdata(dev);
-
- ti_pipe3_disable_clocks(phy);
- return 0;
-}
-
-static int ti_pipe3_resume(struct device *dev)
-{
- struct ti_pipe3 *phy = dev_get_drvdata(dev);
- int ret;
-
- ret = ti_pipe3_enable_clocks(phy);
- if (ret)
- return ret;
-
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops ti_pipe3_pm_ops = {
- SET_RUNTIME_PM_OPS(ti_pipe3_runtime_suspend,
- ti_pipe3_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(ti_pipe3_suspend, ti_pipe3_resume)
-};
-
static const struct of_device_id ti_pipe3_id_table[] = {
{
.compatible = "ti,phy-usb3",
@@ -592,7 +552,6 @@ static struct platform_driver ti_pipe3_driver = {
.remove = ti_pipe3_remove,
.driver = {
.name = "ti-pipe3",
- .pm = &ti_pipe3_pm_ops,
.of_match_table = ti_pipe3_id_table,
},
};
diff --git a/drivers/phy/phy-tusb1210.c b/drivers/phy/phy-tusb1210.c
index 07efdd318bdc..4f6d5e71507d 100644
--- a/drivers/phy/phy-tusb1210.c
+++ b/drivers/phy/phy-tusb1210.c
@@ -53,7 +53,7 @@ static int tusb1210_power_off(struct phy *phy)
return 0;
}
-static struct phy_ops phy_ops = {
+static const struct phy_ops phy_ops = {
.power_on = tusb1210_power_on,
.power_off = tusb1210_power_off,
.owner = THIS_MODULE,
@@ -61,32 +61,26 @@ static struct phy_ops phy_ops = {
static int tusb1210_probe(struct ulpi *ulpi)
{
- struct gpio_desc *gpio;
struct tusb1210 *tusb;
u8 val, reg;
- int ret;
tusb = devm_kzalloc(&ulpi->dev, sizeof(*tusb), GFP_KERNEL);
if (!tusb)
return -ENOMEM;
- gpio = devm_gpiod_get(&ulpi->dev, "reset");
- if (!IS_ERR(gpio)) {
- ret = gpiod_direction_output(gpio, 0);
- if (ret)
- return ret;
- gpiod_set_value_cansleep(gpio, 1);
- tusb->gpio_reset = gpio;
- }
+ tusb->gpio_reset = devm_gpiod_get_optional(&ulpi->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(tusb->gpio_reset))
+ return PTR_ERR(tusb->gpio_reset);
- gpio = devm_gpiod_get(&ulpi->dev, "cs");
- if (!IS_ERR(gpio)) {
- ret = gpiod_direction_output(gpio, 0);
- if (ret)
- return ret;
- gpiod_set_value_cansleep(gpio, 1);
- tusb->gpio_cs = gpio;
- }
+ gpiod_set_value_cansleep(tusb->gpio_reset, 1);
+
+ tusb->gpio_cs = devm_gpiod_get_optional(&ulpi->dev, "cs",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(tusb->gpio_cs))
+ return PTR_ERR(tusb->gpio_cs);
+
+ gpiod_set_value_cansleep(tusb->gpio_cs, 1);
/*
* VENDOR_SPECIFIC2 register in TUSB1210 can be used for configuring eye
diff --git a/drivers/phy/ulpi_phy.h b/drivers/phy/ulpi_phy.h
index ac49fb6285ee..f2ebe490a4bc 100644
--- a/drivers/phy/ulpi_phy.h
+++ b/drivers/phy/ulpi_phy.h
@@ -5,7 +5,7 @@
* and it's controller, which is always the parent.
*/
static inline struct phy
-*ulpi_phy_create(struct ulpi *ulpi, struct phy_ops *ops)
+*ulpi_phy_create(struct ulpi *ulpi, const struct phy_ops *ops)
{
struct phy *phy;
int ret;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 100d9ac2ae1f..84dd2ed47a92 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -82,6 +82,12 @@ config PINCTRL_AMD
Requires ACPI/FDT device enumeration code to set up a platform
device.
+config PINCTRL_DIGICOLOR
+ bool
+ depends on OF && (ARCH_DIGICOLOR || COMPILE_TEST)
+ select PINMUX
+ select GENERIC_PINCONF
+
config PINCTRL_LANTIQ
bool
depends on LANTIQ
@@ -240,6 +246,7 @@ source "drivers/pinctrl/samsung/Kconfig"
source "drivers/pinctrl/sh-pfc/Kconfig"
source "drivers/pinctrl/spear/Kconfig"
source "drivers/pinctrl/sunxi/Kconfig"
+source "drivers/pinctrl/uniphier/Kconfig"
source "drivers/pinctrl/vt8500/Kconfig"
source "drivers/pinctrl/mediatek/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index f4216d9347e2..cad077c43fb7 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -2,12 +2,10 @@
subdir-ccflags-$(CONFIG_DEBUG_PINCTRL) += -DDEBUG
-obj-$(CONFIG_PINCTRL) += core.o pinctrl-utils.o
+obj-y += core.o pinctrl-utils.o
obj-$(CONFIG_PINMUX) += pinmux.o
obj-$(CONFIG_PINCONF) += pinconf.o
-ifeq ($(CONFIG_OF),y)
-obj-$(CONFIG_PINCTRL) += devicetree.o
-endif
+obj-$(CONFIG_OF) += devicetree.o
obj-$(CONFIG_GENERIC_PINCONF) += pinconf-generic.o
obj-$(CONFIG_PINCTRL_ADI2) += pinctrl-adi2.o
obj-$(CONFIG_PINCTRL_AS3722) += pinctrl-as3722.o
@@ -15,6 +13,7 @@ obj-$(CONFIG_PINCTRL_BF54x) += pinctrl-adi2-bf54x.o
obj-$(CONFIG_PINCTRL_BF60x) += pinctrl-adi2-bf60x.o
obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
obj-$(CONFIG_PINCTRL_AMD) += pinctrl-amd.o
+obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o
obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
obj-$(CONFIG_PINCTRL_MESON) += meson/
obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl-palmas.o
@@ -51,5 +50,6 @@ obj-$(CONFIG_PINCTRL_SAMSUNG) += samsung/
obj-$(CONFIG_PINCTRL_SH_PFC) += sh-pfc/
obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
+obj-$(CONFIG_ARCH_UNIPHIER) += uniphier/
obj-$(CONFIG_ARCH_VT8500) += vt8500/
obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index efcf2a2b3975..8efa235ca1c9 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -473,6 +473,8 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data)
spin_lock_irqsave(&pc->irq_lock[bank], flags);
bcm2835_gpio_irq_config(pc, gpio, false);
+ /* Clear events that were latched prior to clearing event sources */
+ bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
clear_bit(offset, &pc->enabled_irq_map[bank]);
spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
}
@@ -584,9 +586,9 @@ static int bcm2835_gpio_irq_set_type(struct irq_data *data, unsigned int type)
ret = __bcm2835_gpio_irq_set_type_disabled(pc, gpio, type);
if (type & IRQ_TYPE_EDGE_BOTH)
- __irq_set_handler_locked(data->irq, handle_edge_irq);
+ irq_set_handler_locked(data, handle_edge_irq);
else
- __irq_set_handler_locked(data->irq, handle_level_irq);
+ irq_set_handler_locked(data, handle_level_irq);
spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
@@ -987,7 +989,6 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
irq_set_chip_and_handler(irq, &bcm2835_gpio_irq_chip,
handle_level_irq);
irq_set_chip_data(irq, pc);
- set_irq_flags(irq, IRQF_VALID);
}
for (i = 0; i < BCM2835_NUM_BANKS; i++) {
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 8b8f3a04c353..69723e07036b 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -231,8 +231,7 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
pindesc = pin_desc_get(pctldev, number);
if (pindesc != NULL) {
- pr_err("pin %d already registered on %s\n", number,
- pctldev->desc->name);
+ dev_err(pctldev->dev, "pin %d already registered\n", number);
return -EINVAL;
}
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 0bbf7d71b281..fe04e748dfe4 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -97,13 +97,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
{
- struct pinctrl_dev *pctldev;
-
- pctldev = get_pinctrl_dev_from_of_node(np);
- if (!pctldev)
- return NULL;
-
- return pctldev;
+ return get_pinctrl_dev_from_of_node(np);
}
static int dt_to_map_one_config(struct pinctrl *p, const char *statename,
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 12ef544b4894..debe1219d76d 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -87,6 +87,13 @@ config PINCTRL_IMX6SX
help
Say Y here to enable the imx6sx pinctrl driver
+config PINCTRL_IMX6UL
+ bool "IMX6UL pinctrl driver"
+ depends on SOC_IMX6UL
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx6ul pinctrl driver
+
config PINCTRL_IMX7D
bool "IMX7D pinctrl driver"
depends on SOC_IMX7D
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
index 343cb436ab17..d44c9e253f21 100644
--- a/drivers/pinctrl/freescale/Makefile
+++ b/drivers/pinctrl/freescale/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_PINCTRL_IMX6Q) += pinctrl-imx6q.o
obj-$(CONFIG_PINCTRL_IMX6Q) += pinctrl-imx6dl.o
obj-$(CONFIG_PINCTRL_IMX6SL) += pinctrl-imx6sl.o
obj-$(CONFIG_PINCTRL_IMX6SX) += pinctrl-imx6sx.o
+obj-$(CONFIG_PINCTRL_IMX6UL) += pinctrl-imx6ul.o
obj-$(CONFIG_PINCTRL_IMX7D) += pinctrl-imx7d.o
obj-$(CONFIG_PINCTRL_VF610) += pinctrl-vf610.o
obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index 5fd4437cee15..88a7fac11bd4 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -403,14 +403,13 @@ static int imx1_pinconf_set(struct pinctrl_dev *pctldev,
unsigned num_configs)
{
struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx1_pinctrl_soc_info *info = ipctl->info;
int i;
for (i = 0; i != num_configs; ++i) {
imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN);
dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n",
- info->pins[pin_id].name);
+ pin_desc_get(pctldev, pin_id)->name);
}
return 0;
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6ul.c b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
new file mode 100644
index 000000000000..08e75764e7be
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-imx.h"
+
+enum imx6ul_pads {
+ MX6UL_PAD_RESERVE0 = 0,
+ MX6UL_PAD_RESERVE1 = 1,
+ MX6UL_PAD_RESERVE2 = 2,
+ MX6UL_PAD_RESERVE3 = 3,
+ MX6UL_PAD_RESERVE4 = 4,
+ MX6UL_PAD_RESERVE5 = 5,
+ MX6UL_PAD_RESERVE6 = 6,
+ MX6UL_PAD_RESERVE7 = 7,
+ MX6UL_PAD_RESERVE8 = 8,
+ MX6UL_PAD_RESERVE9 = 9,
+ MX6UL_PAD_RESERVE10 = 10,
+ MX6UL_PAD_SNVS_TAMPER4 = 11,
+ MX6UL_PAD_RESERVE12 = 12,
+ MX6UL_PAD_RESERVE13 = 13,
+ MX6UL_PAD_RESERVE14 = 14,
+ MX6UL_PAD_RESERVE15 = 15,
+ MX6UL_PAD_RESERVE16 = 16,
+ MX6UL_PAD_JTAG_MOD = 17,
+ MX6UL_PAD_JTAG_TMS = 18,
+ MX6UL_PAD_JTAG_TDO = 19,
+ MX6UL_PAD_JTAG_TDI = 20,
+ MX6UL_PAD_JTAG_TCK = 21,
+ MX6UL_PAD_JTAG_TRST_B = 22,
+ MX6UL_PAD_GPIO1_IO00 = 23,
+ MX6UL_PAD_GPIO1_IO01 = 24,
+ MX6UL_PAD_GPIO1_IO02 = 25,
+ MX6UL_PAD_GPIO1_IO03 = 26,
+ MX6UL_PAD_GPIO1_IO04 = 27,
+ MX6UL_PAD_GPIO1_IO05 = 28,
+ MX6UL_PAD_GPIO1_IO06 = 29,
+ MX6UL_PAD_GPIO1_IO07 = 30,
+ MX6UL_PAD_GPIO1_IO08 = 31,
+ MX6UL_PAD_GPIO1_IO09 = 32,
+ MX6UL_PAD_UART1_TX_DATA = 33,
+ MX6UL_PAD_UART1_RX_DATA = 34,
+ MX6UL_PAD_UART1_CTS_B = 35,
+ MX6UL_PAD_UART1_RTS_B = 36,
+ MX6UL_PAD_UART2_TX_DATA = 37,
+ MX6UL_PAD_UART2_RX_DATA = 38,
+ MX6UL_PAD_UART2_CTS_B = 39,
+ MX6UL_PAD_UART2_RTS_B = 40,
+ MX6UL_PAD_UART3_TX_DATA = 41,
+ MX6UL_PAD_UART3_RX_DATA = 42,
+ MX6UL_PAD_UART3_CTS_B = 43,
+ MX6UL_PAD_UART3_RTS_B = 44,
+ MX6UL_PAD_UART4_TX_DATA = 45,
+ MX6UL_PAD_UART4_RX_DATA = 46,
+ MX6UL_PAD_UART5_TX_DATA = 47,
+ MX6UL_PAD_UART5_RX_DATA = 48,
+ MX6UL_PAD_ENET1_RX_DATA0 = 49,
+ MX6UL_PAD_ENET1_RX_DATA1 = 50,
+ MX6UL_PAD_ENET1_RX_EN = 51,
+ MX6UL_PAD_ENET1_TX_DATA0 = 52,
+ MX6UL_PAD_ENET1_TX_DATA1 = 53,
+ MX6UL_PAD_ENET1_TX_EN = 54,
+ MX6UL_PAD_ENET1_TX_CLK = 55,
+ MX6UL_PAD_ENET1_RX_ER = 56,
+ MX6UL_PAD_ENET2_RX_DATA0 = 57,
+ MX6UL_PAD_ENET2_RX_DATA1 = 58,
+ MX6UL_PAD_ENET2_RX_EN = 59,
+ MX6UL_PAD_ENET2_TX_DATA0 = 60,
+ MX6UL_PAD_ENET2_TX_DATA1 = 61,
+ MX6UL_PAD_ENET2_TX_EN = 62,
+ MX6UL_PAD_ENET2_TX_CLK = 63,
+ MX6UL_PAD_ENET2_RX_ER = 64,
+ MX6UL_PAD_LCD_CLK = 65,
+ MX6UL_PAD_LCD_ENABLE = 66,
+ MX6UL_PAD_LCD_HSYNC = 67,
+ MX6UL_PAD_LCD_VSYNC = 68,
+ MX6UL_PAD_LCD_RESET = 69,
+ MX6UL_PAD_LCD_DATA00 = 70,
+ MX6UL_PAD_LCD_DATA01 = 71,
+ MX6UL_PAD_LCD_DATA02 = 72,
+ MX6UL_PAD_LCD_DATA03 = 73,
+ MX6UL_PAD_LCD_DATA04 = 74,
+ MX6UL_PAD_LCD_DATA05 = 75,
+ MX6UL_PAD_LCD_DATA06 = 76,
+ MX6UL_PAD_LCD_DATA07 = 77,
+ MX6UL_PAD_LCD_DATA08 = 78,
+ MX6UL_PAD_LCD_DATA09 = 79,
+ MX6UL_PAD_LCD_DATA10 = 80,
+ MX6UL_PAD_LCD_DATA11 = 81,
+ MX6UL_PAD_LCD_DATA12 = 82,
+ MX6UL_PAD_LCD_DATA13 = 83,
+ MX6UL_PAD_LCD_DATA14 = 84,
+ MX6UL_PAD_LCD_DATA15 = 85,
+ MX6UL_PAD_LCD_DATA16 = 86,
+ MX6UL_PAD_LCD_DATA17 = 87,
+ MX6UL_PAD_LCD_DATA18 = 88,
+ MX6UL_PAD_LCD_DATA19 = 89,
+ MX6UL_PAD_LCD_DATA20 = 90,
+ MX6UL_PAD_LCD_DATA21 = 91,
+ MX6UL_PAD_LCD_DATA22 = 92,
+ MX6UL_PAD_LCD_DATA23 = 93,
+ MX6UL_PAD_NAND_RE_B = 94,
+ MX6UL_PAD_NAND_WE_B = 95,
+ MX6UL_PAD_NAND_DATA00 = 96,
+ MX6UL_PAD_NAND_DATA01 = 97,
+ MX6UL_PAD_NAND_DATA02 = 98,
+ MX6UL_PAD_NAND_DATA03 = 99,
+ MX6UL_PAD_NAND_DATA04 = 100,
+ MX6UL_PAD_NAND_DATA05 = 101,
+ MX6UL_PAD_NAND_DATA06 = 102,
+ MX6UL_PAD_NAND_DATA07 = 103,
+ MX6UL_PAD_NAND_ALE = 104,
+ MX6UL_PAD_NAND_WP_B = 105,
+ MX6UL_PAD_NAND_READY_B = 106,
+ MX6UL_PAD_NAND_CE0_B = 107,
+ MX6UL_PAD_NAND_CE1_B = 108,
+ MX6UL_PAD_NAND_CLE = 109,
+ MX6UL_PAD_NAND_DQS = 110,
+ MX6UL_PAD_SD1_CMD = 111,
+ MX6UL_PAD_SD1_CLK = 112,
+ MX6UL_PAD_SD1_DATA0 = 113,
+ MX6UL_PAD_SD1_DATA1 = 114,
+ MX6UL_PAD_SD1_DATA2 = 115,
+ MX6UL_PAD_SD1_DATA3 = 116,
+ MX6UL_PAD_CSI_MCLK = 117,
+ MX6UL_PAD_CSI_PIXCLK = 118,
+ MX6UL_PAD_CSI_VSYNC = 119,
+ MX6UL_PAD_CSI_HSYNC = 120,
+ MX6UL_PAD_CSI_DATA00 = 121,
+ MX6UL_PAD_CSI_DATA01 = 122,
+ MX6UL_PAD_CSI_DATA02 = 123,
+ MX6UL_PAD_CSI_DATA03 = 124,
+ MX6UL_PAD_CSI_DATA04 = 125,
+ MX6UL_PAD_CSI_DATA05 = 126,
+ MX6UL_PAD_CSI_DATA06 = 127,
+ MX6UL_PAD_CSI_DATA07 = 128,
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx6ul_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE0),
+ IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE1),
+ IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE2),
+ IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE3),
+ IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE4),
+ IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE5),
+ IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE6),
+ IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE7),
+ IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE8),
+ IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE9),
+ IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE10),
+ IMX_PINCTRL_PIN(MX6UL_PAD_SNVS_TAMPER4),
+ IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE12),
+ IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE13),
+ IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE14),
+ IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE15),
+ IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE16),
+ IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_MOD),
+ IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TMS),
+ IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TDO),
+ IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TDI),
+ IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TCK),
+ IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TRST_B),
+ IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO00),
+ IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO01),
+ IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO02),
+ IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO03),
+ IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO04),
+ IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO05),
+ IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO06),
+ IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO07),
+ IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO08),
+ IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO09),
+ IMX_PINCTRL_PIN(MX6UL_PAD_UART1_TX_DATA),
+ IMX_PINCTRL_PIN(MX6UL_PAD_UART1_RX_DATA),
+ IMX_PINCTRL_PIN(MX6UL_PAD_UART1_CTS_B),
+ IMX_PINCTRL_PIN(MX6UL_PAD_UART1_RTS_B),
+ IMX_PINCTRL_PIN(MX6UL_PAD_UART2_TX_DATA),
+ IMX_PINCTRL_PIN(MX6UL_PAD_UART2_RX_DATA),
+ IMX_PINCTRL_PIN(MX6UL_PAD_UART2_CTS_B),
+ IMX_PINCTRL_PIN(MX6UL_PAD_UART2_RTS_B),
+ IMX_PINCTRL_PIN(MX6UL_PAD_UART3_TX_DATA),
+ IMX_PINCTRL_PIN(MX6UL_PAD_UART3_RX_DATA),
+ IMX_PINCTRL_PIN(MX6UL_PAD_UART3_CTS_B),
+ IMX_PINCTRL_PIN(MX6UL_PAD_UART3_RTS_B),
+ IMX_PINCTRL_PIN(MX6UL_PAD_UART4_TX_DATA),
+ IMX_PINCTRL_PIN(MX6UL_PAD_UART4_RX_DATA),
+ IMX_PINCTRL_PIN(MX6UL_PAD_UART5_TX_DATA),
+ IMX_PINCTRL_PIN(MX6UL_PAD_UART5_RX_DATA),
+ IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_DATA0),
+ IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_DATA1),
+ IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_EN),
+ IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_DATA0),
+ IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_DATA1),
+ IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_EN),
+ IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_CLK),
+ IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_ER),
+ IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_DATA0),
+ IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_DATA1),
+ IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_EN),
+ IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_DATA0),
+ IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_DATA1),
+ IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_EN),
+ IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_CLK),
+ IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_ER),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_CLK),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_ENABLE),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_HSYNC),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_VSYNC),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_RESET),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA00),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA01),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA02),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA03),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA04),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA05),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA06),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA07),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA08),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA09),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA10),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA11),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA12),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA13),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA14),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA15),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA16),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA17),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA18),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA19),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA20),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA21),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA22),
+ IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA23),
+ IMX_PINCTRL_PIN(MX6UL_PAD_NAND_RE_B),
+ IMX_PINCTRL_PIN(MX6UL_PAD_NAND_WE_B),
+ IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA00),
+ IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA01),
+ IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA02),
+ IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA03),
+ IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA04),
+ IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA05),
+ IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA06),
+ IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA07),
+ IMX_PINCTRL_PIN(MX6UL_PAD_NAND_ALE),
+ IMX_PINCTRL_PIN(MX6UL_PAD_NAND_WP_B),
+ IMX_PINCTRL_PIN(MX6UL_PAD_NAND_READY_B),
+ IMX_PINCTRL_PIN(MX6UL_PAD_NAND_CE0_B),
+ IMX_PINCTRL_PIN(MX6UL_PAD_NAND_CE1_B),
+ IMX_PINCTRL_PIN(MX6UL_PAD_NAND_CLE),
+ IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DQS),
+ IMX_PINCTRL_PIN(MX6UL_PAD_SD1_CMD),
+ IMX_PINCTRL_PIN(MX6UL_PAD_SD1_CLK),
+ IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA0),
+ IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA1),
+ IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA2),
+ IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA3),
+ IMX_PINCTRL_PIN(MX6UL_PAD_CSI_MCLK),
+ IMX_PINCTRL_PIN(MX6UL_PAD_CSI_PIXCLK),
+ IMX_PINCTRL_PIN(MX6UL_PAD_CSI_VSYNC),
+ IMX_PINCTRL_PIN(MX6UL_PAD_CSI_HSYNC),
+ IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA00),
+ IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA01),
+ IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA02),
+ IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA03),
+ IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA04),
+ IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA05),
+ IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA06),
+ IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA07),
+};
+
+static struct imx_pinctrl_soc_info imx6ul_pinctrl_info = {
+ .pins = imx6ul_pinctrl_pads,
+ .npins = ARRAY_SIZE(imx6ul_pinctrl_pads),
+};
+
+static struct of_device_id imx6ul_pinctrl_of_match[] = {
+ { .compatible = "fsl,imx6ul-iomuxc", },
+ { /* sentinel */ }
+};
+
+static int imx6ul_pinctrl_probe(struct platform_device *pdev)
+{
+ return imx_pinctrl_probe(pdev, &imx6ul_pinctrl_info);
+}
+
+static struct platform_driver imx6ul_pinctrl_driver = {
+ .driver = {
+ .name = "imx6ul-pinctrl",
+ .of_match_table = of_match_ptr(imx6ul_pinctrl_of_match),
+ },
+ .probe = imx6ul_pinctrl_probe,
+ .remove = imx_pinctrl_remove,
+};
+
+static int __init imx6ul_pinctrl_init(void)
+{
+ return platform_driver_register(&imx6ul_pinctrl_driver);
+}
+arch_initcall(imx6ul_pinctrl_init);
+
+static void __exit imx6ul_pinctrl_exit(void)
+{
+ platform_driver_unregister(&imx6ul_pinctrl_driver);
+}
+module_exit(imx6ul_pinctrl_exit);
+
+MODULE_AUTHOR("Anson Huang <Anson.Huang@freescale.com>");
+MODULE_DESCRIPTION("Freescale imx6ul pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 2062c224e32f..dac4865f3203 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -12,11 +12,6 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
#include <linux/kernel.h>
@@ -146,7 +141,7 @@ struct byt_gpio_pin_context {
struct byt_gpio {
struct gpio_chip chip;
struct platform_device *pdev;
- spinlock_t lock;
+ raw_spinlock_t lock;
void __iomem *reg_base;
struct pinctrl_gpio_range *range;
struct byt_gpio_pin_context *saved_context;
@@ -174,11 +169,11 @@ static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned offset)
unsigned long flags;
u32 value;
- spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&vg->lock, flags);
value = readl(reg);
value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
writel(value, reg);
- spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&vg->lock, flags);
}
static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned offset)
@@ -201,6 +196,9 @@ static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
struct byt_gpio *vg = to_byt_gpio(chip);
void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG);
u32 value, gpio_mux;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&vg->lock, flags);
/*
* In most cases, func pin mux 000 means GPIO function.
@@ -214,18 +212,16 @@ static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
value = readl(reg) & BYT_PIN_MUX;
gpio_mux = byt_get_gpio_mux(vg, offset);
if (WARN_ON(gpio_mux != value)) {
- unsigned long flags;
-
- spin_lock_irqsave(&vg->lock, flags);
value = readl(reg) & ~BYT_PIN_MUX;
value |= gpio_mux;
writel(value, reg);
- spin_unlock_irqrestore(&vg->lock, flags);
dev_warn(&vg->pdev->dev,
"pin %u forcibly re-configured as GPIO\n", offset);
}
+ raw_spin_unlock_irqrestore(&vg->lock, flags);
+
pm_runtime_get(&vg->pdev->dev);
return 0;
@@ -250,7 +246,7 @@ static int byt_irq_type(struct irq_data *d, unsigned type)
if (offset >= vg->chip.ngpio)
return -EINVAL;
- spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&vg->lock, flags);
value = readl(reg);
WARN(value & BYT_DIRECT_IRQ_EN,
@@ -265,11 +261,11 @@ static int byt_irq_type(struct irq_data *d, unsigned type)
writel(value, reg);
if (type & IRQ_TYPE_EDGE_BOTH)
- __irq_set_handler_locked(d->irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
else if (type & IRQ_TYPE_LEVEL_MASK)
- __irq_set_handler_locked(d->irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
- spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&vg->lock, flags);
return 0;
}
@@ -277,7 +273,15 @@ static int byt_irq_type(struct irq_data *d, unsigned type)
static int byt_gpio_get(struct gpio_chip *chip, unsigned offset)
{
void __iomem *reg = byt_gpio_reg(chip, offset, BYT_VAL_REG);
- return readl(reg) & BYT_LEVEL;
+ struct byt_gpio *vg = to_byt_gpio(chip);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&vg->lock, flags);
+ val = readl(reg);
+ raw_spin_unlock_irqrestore(&vg->lock, flags);
+
+ return val & BYT_LEVEL;
}
static void byt_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -287,7 +291,7 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
unsigned long flags;
u32 old_val;
- spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&vg->lock, flags);
old_val = readl(reg);
@@ -296,7 +300,7 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
else
writel(old_val & ~BYT_LEVEL, reg);
- spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&vg->lock, flags);
}
static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -306,13 +310,13 @@ static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
unsigned long flags;
u32 value;
- spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&vg->lock, flags);
value = readl(reg) | BYT_DIR_MASK;
value &= ~BYT_INPUT_EN; /* active low */
writel(value, reg);
- spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&vg->lock, flags);
return 0;
}
@@ -326,7 +330,7 @@ static int byt_gpio_direction_output(struct gpio_chip *chip,
unsigned long flags;
u32 reg_val;
- spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&vg->lock, flags);
/*
* Before making any direction modifications, do a check if gpio
@@ -345,7 +349,7 @@ static int byt_gpio_direction_output(struct gpio_chip *chip,
else
writel(reg_val & ~BYT_LEVEL, reg);
- spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&vg->lock, flags);
return 0;
}
@@ -354,18 +358,19 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
struct byt_gpio *vg = to_byt_gpio(chip);
int i;
- unsigned long flags;
u32 conf0, val, offs;
- spin_lock_irqsave(&vg->lock, flags);
-
for (i = 0; i < vg->chip.ngpio; i++) {
const char *pull_str = NULL;
const char *pull = NULL;
+ unsigned long flags;
const char *label;
offs = vg->range->pins[i] * 16;
+
+ raw_spin_lock_irqsave(&vg->lock, flags);
conf0 = readl(vg->reg_base + offs + BYT_CONF0_REG);
val = readl(vg->reg_base + offs + BYT_VAL_REG);
+ raw_spin_unlock_irqrestore(&vg->lock, flags);
label = gpiochip_is_requested(chip, i);
if (!label)
@@ -418,7 +423,6 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
seq_puts(s, "\n");
}
- spin_unlock_irqrestore(&vg->lock, flags);
}
static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
@@ -450,8 +454,10 @@ static void byt_irq_ack(struct irq_data *d)
unsigned offset = irqd_to_hwirq(d);
void __iomem *reg;
+ raw_spin_lock(&vg->lock);
reg = byt_gpio_reg(&vg->chip, offset, BYT_INT_STAT_REG);
writel(BIT(offset % 32), reg);
+ raw_spin_unlock(&vg->lock);
}
static void byt_irq_unmask(struct irq_data *d)
@@ -463,9 +469,9 @@ static void byt_irq_unmask(struct irq_data *d)
void __iomem *reg;
u32 value;
- spin_lock_irqsave(&vg->lock, flags);
-
reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
+
+ raw_spin_lock_irqsave(&vg->lock, flags);
value = readl(reg);
switch (irqd_get_trigger_type(d)) {
@@ -486,7 +492,7 @@ static void byt_irq_unmask(struct irq_data *d)
writel(value, reg);
- spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&vg->lock, flags);
}
static void byt_irq_mask(struct irq_data *d)
@@ -578,7 +584,7 @@ static int byt_gpio_probe(struct platform_device *pdev)
if (IS_ERR(vg->reg_base))
return PTR_ERR(vg->reg_base);
- spin_lock_init(&vg->lock);
+ raw_spin_lock_init(&vg->lock);
gc = &vg->chip;
gc->label = dev_name(&pdev->dev);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 3f737daa3fd2..2d5d3ddc36e5 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -174,7 +174,7 @@ struct chv_pinctrl {
struct pinctrl_dev *pctldev;
struct gpio_chip chip;
void __iomem *regs;
- spinlock_t lock;
+ raw_spinlock_t lock;
unsigned intr_lines[16];
const struct chv_community *community;
u32 saved_intmask;
@@ -720,13 +720,13 @@ static void chv_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
u32 ctrl0, ctrl1;
bool locked;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
ctrl1 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL1));
locked = chv_pad_locked(pctrl, offset);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
if (ctrl0 & CHV_PADCTRL0_GPIOEN) {
seq_puts(s, "GPIO ");
@@ -789,14 +789,14 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
grp = &pctrl->community->groups[group];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
/* Check first that the pad is not locked */
for (i = 0; i < grp->npins; i++) {
if (chv_pad_locked(pctrl, grp->pins[i])) {
dev_warn(pctrl->dev, "unable to set mode for locked pin %u\n",
grp->pins[i]);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return -EBUSY;
}
}
@@ -839,7 +839,7 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
pin, altfunc->mode, altfunc->invert_oe ? "" : "not ");
}
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -853,13 +853,13 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
void __iomem *reg;
u32 value;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
if (chv_pad_locked(pctrl, offset)) {
value = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
if (!(value & CHV_PADCTRL0_GPIOEN)) {
/* Locked so cannot enable */
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return -EBUSY;
}
} else {
@@ -899,7 +899,7 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
chv_writel(value, reg);
}
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -913,13 +913,13 @@ static void chv_gpio_disable_free(struct pinctrl_dev *pctldev,
void __iomem *reg;
u32 value;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
value = readl(reg) & ~CHV_PADCTRL0_GPIOEN;
chv_writel(value, reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
@@ -931,7 +931,7 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
unsigned long flags;
u32 ctrl0;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
ctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIOCFG_MASK;
if (input)
@@ -940,7 +940,7 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPO << CHV_PADCTRL0_GPIOCFG_SHIFT;
chv_writel(ctrl0, reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -965,10 +965,10 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned pin,
u16 arg = 0;
u32 term;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
ctrl1 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1));
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
term = (ctrl0 & CHV_PADCTRL0_TERM_MASK) >> CHV_PADCTRL0_TERM_SHIFT;
@@ -1042,7 +1042,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
unsigned long flags;
u32 ctrl0, pull;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
ctrl0 = readl(reg);
switch (param) {
@@ -1065,7 +1065,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
break;
default:
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return -EINVAL;
}
@@ -1083,7 +1083,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
break;
default:
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return -EINVAL;
}
@@ -1091,12 +1091,12 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
break;
default:
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return -EINVAL;
}
chv_writel(ctrl0, reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -1169,9 +1169,12 @@ static int chv_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
int pin = chv_gpio_offset_to_pin(pctrl, offset);
+ unsigned long flags;
u32 ctrl0, cfg;
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
cfg >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
@@ -1189,7 +1192,7 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
void __iomem *reg;
u32 ctrl0;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
ctrl0 = readl(reg);
@@ -1201,7 +1204,7 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
chv_writel(ctrl0, reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -1209,8 +1212,11 @@ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
unsigned pin = chv_gpio_offset_to_pin(pctrl, offset);
u32 ctrl0, direction;
+ unsigned long flags;
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
@@ -1248,14 +1254,14 @@ static void chv_gpio_irq_ack(struct irq_data *d)
int pin = chv_gpio_offset_to_pin(pctrl, irqd_to_hwirq(d));
u32 intr_line;
- spin_lock(&pctrl->lock);
+ raw_spin_lock(&pctrl->lock);
intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
intr_line &= CHV_PADCTRL0_INTSEL_MASK;
intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT;
chv_writel(BIT(intr_line), pctrl->regs + CHV_INTSTAT);
- spin_unlock(&pctrl->lock);
+ raw_spin_unlock(&pctrl->lock);
}
static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
@@ -1266,7 +1272,7 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
u32 value, intr_line;
unsigned long flags;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
intr_line &= CHV_PADCTRL0_INTSEL_MASK;
@@ -1279,7 +1285,7 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
value |= BIT(intr_line);
chv_writel(value, pctrl->regs + CHV_INTMASK);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
static void chv_gpio_irq_mask(struct irq_data *d)
@@ -1313,6 +1319,7 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
unsigned long flags;
u32 intsel, value;
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
intsel &= CHV_PADCTRL0_INTSEL_MASK;
intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
@@ -1323,12 +1330,11 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
else
handler = handle_edge_irq;
- spin_lock_irqsave(&pctrl->lock, flags);
if (!pctrl->intr_lines[intsel]) {
- __irq_set_handler_locked(d->irq, handler);
+ irq_set_handler_locked(d, handler);
pctrl->intr_lines[intsel] = offset;
}
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
chv_gpio_irq_unmask(d);
@@ -1344,7 +1350,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
unsigned long flags;
u32 value;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
/*
* Pins which can be used as shared interrupt are configured in
@@ -1389,11 +1395,11 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
pctrl->intr_lines[value] = offset;
if (type & IRQ_TYPE_EDGE_BOTH)
- __irq_set_handler_locked(d->irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
else if (type & IRQ_TYPE_LEVEL_MASK)
- __irq_set_handler_locked(d->irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -1412,7 +1418,7 @@ static void chv_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
- struct irq_chip *chip = irq_get_chip(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long pending;
u32 intr_line;
@@ -1505,7 +1511,7 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
if (i == ARRAY_SIZE(chv_communities))
return -ENODEV;
- spin_lock_init(&pctrl->lock);
+ raw_spin_lock_init(&pctrl->lock);
pctrl->dev = &pdev->dev;
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index f9ee0d68b288..bb377c110541 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -758,9 +758,9 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
writel(value, reg);
if (type & IRQ_TYPE_EDGE_BOTH)
- __irq_set_handler_locked(d->irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
else if (type & IRQ_TYPE_LEVEL_MASK)
- __irq_set_handler_locked(d->irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
spin_unlock_irqrestore(&pctrl->lock, flags);
@@ -840,7 +840,7 @@ static void intel_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
- struct irq_chip *chip = irq_get_chip(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
int i;
chained_irq_enter(chip, desc);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8173.c b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
index d0c811d5f07b..ad271840d865 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8173.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
@@ -385,6 +385,7 @@ static struct platform_driver mtk_pinctrl_driver = {
.driver = {
.name = "mediatek-mt8173-pinctrl",
.of_match_table = mt8173_pctrl_match,
+ .pm = &mtk_eint_pm_ops,
},
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index ad1ea1695b4a..7726c6caaf83 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -33,6 +33,7 @@
#include <linux/mfd/syscon.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/pm.h>
#include <dt-bindings/pinctrl/mt65xx.h>
#include "../core.h"
@@ -702,7 +703,7 @@ static int mtk_pmx_set_mux(struct pinctrl_dev *pctldev,
ret = mtk_pctrl_is_function_valid(pctl, g->pin, function);
if (!ret) {
- dev_err(pctl->dev, "invaild function %d on group %d .\n",
+ dev_err(pctl->dev, "invalid function %d on group %d .\n",
function, group);
return -EINVAL;
}
@@ -1062,6 +1063,77 @@ static int mtk_eint_set_type(struct irq_data *d,
return 0;
}
+static int mtk_eint_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+ int shift = d->hwirq & 0x1f;
+ int reg = d->hwirq >> 5;
+
+ if (on)
+ pctl->wake_mask[reg] |= BIT(shift);
+ else
+ pctl->wake_mask[reg] &= ~BIT(shift);
+
+ return 0;
+}
+
+static void mtk_eint_chip_write_mask(const struct mtk_eint_offsets *chip,
+ void __iomem *eint_reg_base, u32 *buf)
+{
+ int port;
+ void __iomem *reg;
+
+ for (port = 0; port < chip->ports; port++) {
+ reg = eint_reg_base + (port << 2);
+ writel_relaxed(~buf[port], reg + chip->mask_set);
+ writel_relaxed(buf[port], reg + chip->mask_clr);
+ }
+}
+
+static void mtk_eint_chip_read_mask(const struct mtk_eint_offsets *chip,
+ void __iomem *eint_reg_base, u32 *buf)
+{
+ int port;
+ void __iomem *reg;
+
+ for (port = 0; port < chip->ports; port++) {
+ reg = eint_reg_base + chip->mask + (port << 2);
+ buf[port] = ~readl_relaxed(reg);
+ /* Mask is 0 when irq is enabled, and 1 when disabled. */
+ }
+}
+
+static int mtk_eint_suspend(struct device *device)
+{
+ void __iomem *reg;
+ struct mtk_pinctrl *pctl = dev_get_drvdata(device);
+ const struct mtk_eint_offsets *eint_offsets =
+ &pctl->devdata->eint_offsets;
+
+ reg = pctl->eint_reg_base;
+ mtk_eint_chip_read_mask(eint_offsets, reg, pctl->cur_mask);
+ mtk_eint_chip_write_mask(eint_offsets, reg, pctl->wake_mask);
+
+ return 0;
+}
+
+static int mtk_eint_resume(struct device *device)
+{
+ struct mtk_pinctrl *pctl = dev_get_drvdata(device);
+ const struct mtk_eint_offsets *eint_offsets =
+ &pctl->devdata->eint_offsets;
+
+ mtk_eint_chip_write_mask(eint_offsets,
+ pctl->eint_reg_base, pctl->cur_mask);
+
+ return 0;
+}
+
+const struct dev_pm_ops mtk_eint_pm_ops = {
+ .suspend = mtk_eint_suspend,
+ .resume = mtk_eint_resume,
+};
+
static void mtk_eint_ack(struct irq_data *d)
{
struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
@@ -1076,10 +1148,12 @@ static void mtk_eint_ack(struct irq_data *d)
static struct irq_chip mtk_pinctrl_irq_chip = {
.name = "mt-eint",
+ .irq_disable = mtk_eint_mask,
.irq_mask = mtk_eint_mask,
.irq_unmask = mtk_eint_unmask,
.irq_ack = mtk_eint_ack,
.irq_set_type = mtk_eint_set_type,
+ .irq_set_wake = mtk_eint_irq_set_wake,
.irq_request_resources = mtk_pinctrl_irq_request_resources,
.irq_release_resources = mtk_pinctrl_irq_release_resources,
};
@@ -1118,8 +1192,8 @@ mtk_eint_debounce_process(struct mtk_pinctrl *pctl, int index)
static void mtk_eint_irq_handler(unsigned irq, struct irq_desc *desc)
{
- struct irq_chip *chip = irq_get_chip(irq);
- struct mtk_pinctrl *pctl = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct mtk_pinctrl *pctl = irq_desc_get_handler_data(desc);
unsigned int status, eint_num;
int offset, index, virq;
const struct mtk_eint_offsets *eint_offsets =
@@ -1202,12 +1276,6 @@ static int mtk_pctrl_build_state(struct platform_device *pdev)
return 0;
}
-static struct pinctrl_desc mtk_pctrl_desc = {
- .confops = &mtk_pconf_ops,
- .pctlops = &mtk_pctrl_ops,
- .pmxops = &mtk_pmx_ops,
-};
-
int mtk_pctrl_init(struct platform_device *pdev,
const struct mtk_pinctrl_devdata *data,
struct regmap *regmap)
@@ -1217,7 +1285,7 @@ int mtk_pctrl_init(struct platform_device *pdev,
struct device_node *np = pdev->dev.of_node, *node;
struct property *prop;
struct resource *res;
- int i, ret, irq;
+ int i, ret, irq, ports_buf;
pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
if (!pctl)
@@ -1265,12 +1333,17 @@ int mtk_pctrl_init(struct platform_device *pdev,
for (i = 0; i < pctl->devdata->npins; i++)
pins[i] = pctl->devdata->pins[i].pin;
- mtk_pctrl_desc.name = dev_name(&pdev->dev);
- mtk_pctrl_desc.owner = THIS_MODULE;
- mtk_pctrl_desc.pins = pins;
- mtk_pctrl_desc.npins = pctl->devdata->npins;
+
+ pctl->pctl_desc.name = dev_name(&pdev->dev);
+ pctl->pctl_desc.owner = THIS_MODULE;
+ pctl->pctl_desc.pins = pins;
+ pctl->pctl_desc.npins = pctl->devdata->npins;
+ pctl->pctl_desc.confops = &mtk_pconf_ops;
+ pctl->pctl_desc.pctlops = &mtk_pctrl_ops;
+ pctl->pctl_desc.pmxops = &mtk_pmx_ops;
pctl->dev = &pdev->dev;
- pctl->pctl_dev = pinctrl_register(&mtk_pctrl_desc, &pdev->dev, pctl);
+
+ pctl->pctl_dev = pinctrl_register(&pctl->pctl_desc, &pdev->dev, pctl);
if (IS_ERR(pctl->pctl_dev)) {
dev_err(&pdev->dev, "couldn't register pinctrl driver\n");
return PTR_ERR(pctl->pctl_dev);
@@ -1319,6 +1392,21 @@ int mtk_pctrl_init(struct platform_device *pdev,
goto chip_error;
}
+ ports_buf = pctl->devdata->eint_offsets.ports;
+ pctl->wake_mask = devm_kcalloc(&pdev->dev, ports_buf,
+ sizeof(*pctl->wake_mask), GFP_KERNEL);
+ if (!pctl->wake_mask) {
+ ret = -ENOMEM;
+ goto chip_error;
+ }
+
+ pctl->cur_mask = devm_kcalloc(&pdev->dev, ports_buf,
+ sizeof(*pctl->cur_mask), GFP_KERNEL);
+ if (!pctl->cur_mask) {
+ ret = -ENOMEM;
+ goto chip_error;
+ }
+
pctl->eint_dual_edges = devm_kcalloc(&pdev->dev, pctl->devdata->ap_num,
sizeof(int), GFP_KERNEL);
if (!pctl->eint_dual_edges) {
@@ -1348,11 +1436,9 @@ int mtk_pctrl_init(struct platform_device *pdev,
irq_set_chip_and_handler(virq, &mtk_pinctrl_irq_chip,
handle_level_irq);
irq_set_chip_data(virq, pctl);
- set_irq_flags(virq, IRQF_VALID);
};
irq_set_chained_handler_and_data(irq, mtk_eint_irq_handler, pctl);
- set_irq_flags(irq, IRQF_VALID);
return 0;
chip_error:
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
index 30213e514c2f..55a534338931 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
@@ -256,6 +256,7 @@ struct mtk_pinctrl_devdata {
struct mtk_pinctrl {
struct regmap *regmap1;
struct regmap *regmap2;
+ struct pinctrl_desc pctl_desc;
struct device *dev;
struct gpio_chip *chip;
struct mtk_pinctrl_group *groups;
@@ -266,6 +267,8 @@ struct mtk_pinctrl {
void __iomem *eint_reg_base;
struct irq_domain *domain;
int *eint_dual_edges;
+ u32 *wake_mask;
+ u32 *cur_mask;
};
int mtk_pctrl_init(struct platform_device *pdev,
@@ -281,4 +284,6 @@ int mtk_pconf_spec_set_ies_smt_range(struct regmap *regmap,
const struct mtk_pin_ies_smt_set *ies_smt_infos, unsigned int info_num,
unsigned int pin, unsigned char align, int value);
+extern const struct dev_pm_ops mtk_eint_pm_ops;
+
#endif /* __PINCTRL_MTK_COMMON_H */
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 557d0f2a3031..97681fac082e 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -787,7 +787,6 @@ static const struct pinmux_ops abx500_pinmux_ops = {
.set_mux = abx500_pmx_set,
.gpio_request_enable = abx500_gpio_request_enable,
.gpio_disable_free = abx500_gpio_disable_free,
- .strict = true,
};
static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
index c74840729648..8392083514fb 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
@@ -355,25 +355,6 @@ static const struct pinctrl_pin_desc nmk_db8500_pins[] = {
PINCTRL_PIN(DB8500_PIN_AC27, "GPIO267_AC27"),
};
-#define DB8500_GPIO_RANGE(a, b, c) { .name = "DB8500", .id = a, .base = b, \
- .pin_base = b, .npins = c }
-
-/*
- * This matches the 32-pin gpio chips registered by the GPIO portion. This
- * cannot be const since we assign the struct gpio_chip * pointer at runtime.
- */
-static struct pinctrl_gpio_range nmk_db8500_ranges[] = {
- DB8500_GPIO_RANGE(0, 0, 32),
- DB8500_GPIO_RANGE(1, 32, 5),
- DB8500_GPIO_RANGE(2, 64, 32),
- DB8500_GPIO_RANGE(3, 96, 2),
- DB8500_GPIO_RANGE(4, 128, 32),
- DB8500_GPIO_RANGE(5, 160, 12),
- DB8500_GPIO_RANGE(6, 192, 32),
- DB8500_GPIO_RANGE(7, 224, 7),
- DB8500_GPIO_RANGE(8, 256, 12),
-};
-
/*
* Read the pin group names like this:
* u0_a_1 = first groups of pins for uart0 on alt function a
@@ -1238,8 +1219,6 @@ static const u16 db8500_prcm_gpiocr_regs[] = {
};
static const struct nmk_pinctrl_soc_data nmk_db8500_soc = {
- .gpio_ranges = nmk_db8500_ranges,
- .gpio_num_ranges = ARRAY_SIZE(nmk_db8500_ranges),
.pins = nmk_db8500_pins,
.npins = ARRAY_SIZE(nmk_db8500_pins),
.functions = nmk_db8500_functions,
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c
index d7ba5443bae0..2860eafd1b42 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c
@@ -341,28 +341,6 @@ static const struct pinctrl_pin_desc nmk_db8540_pins[] = {
PINCTRL_PIN(DB8540_PIN_D17, "GPIO267_D17"),
};
-#define DB8540_GPIO_RANGE(a, b, c) { .name = "db8540", .id = a, .base = b, \
- .pin_base = b, .npins = c }
-
-/*
- * This matches the 32-pin gpio chips registered by the GPIO portion. This
- * cannot be const since we assign the struct gpio_chip * pointer at runtime.
- */
-static struct pinctrl_gpio_range nmk_db8540_ranges[] = {
- DB8540_GPIO_RANGE(0, 0, 18),
- DB8540_GPIO_RANGE(0, 22, 7),
- DB8540_GPIO_RANGE(1, 33, 6),
- DB8540_GPIO_RANGE(2, 64, 4),
- DB8540_GPIO_RANGE(2, 70, 18),
- DB8540_GPIO_RANGE(3, 116, 12),
- DB8540_GPIO_RANGE(4, 128, 32),
- DB8540_GPIO_RANGE(5, 160, 9),
- DB8540_GPIO_RANGE(6, 192, 23),
- DB8540_GPIO_RANGE(6, 219, 5),
- DB8540_GPIO_RANGE(7, 224, 9),
- DB8540_GPIO_RANGE(8, 256, 12),
-};
-
/*
* Read the pin group names like this:
* u0_a_1 = first groups of pins for uart0 on alt function a
@@ -1247,8 +1225,6 @@ static const u16 db8540_prcm_gpiocr_regs[] = {
};
static const struct nmk_pinctrl_soc_data nmk_db8540_soc = {
- .gpio_ranges = nmk_db8540_ranges,
- .gpio_num_ranges = ARRAY_SIZE(nmk_db8540_ranges),
.pins = nmk_db8540_pins,
.npins = ARRAY_SIZE(nmk_db8540_pins),
.functions = nmk_db8540_functions,
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
index 2cd71470f270..587b222f12f3 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
@@ -264,20 +264,6 @@ static const struct pinctrl_pin_desc nmk_stn8815_pins[] = {
PINCTRL_PIN(STN8815_PIN_J22, "GPIO123_J22"),
};
-#define STN8815_GPIO_RANGE(a, b, c) { .name = "STN8815", .id = a, .base = b, \
- .pin_base = b, .npins = c }
-
-/*
- * This matches the 32-pin gpio chips registered by the GPIO portion. This
- * cannot be const since we assign the struct gpio_chip * pointer at runtime.
- */
-static struct pinctrl_gpio_range nmk_stn8815_ranges[] = {
- STN8815_GPIO_RANGE(0, 0, 32),
- STN8815_GPIO_RANGE(1, 32, 32),
- STN8815_GPIO_RANGE(2, 64, 32),
- STN8815_GPIO_RANGE(3, 96, 28),
-};
-
/*
* Read the pin group names like this:
* u0_a_1 = first groups of pins for uart0 on alt function a
@@ -285,9 +271,11 @@ static struct pinctrl_gpio_range nmk_stn8815_ranges[] = {
*/
/* Altfunction A */
-static const unsigned u0_a_1_pins[] = { STN8815_PIN_B4, STN8815_PIN_D5,
- STN8815_PIN_C5, STN8815_PIN_A4, STN8815_PIN_B5, STN8815_PIN_D6,
- STN8815_PIN_C6, STN8815_PIN_B6 };
+static const unsigned u0txrx_a_1_pins[] = { STN8815_PIN_B4, STN8815_PIN_D5 };
+static const unsigned u0ctsrts_a_1_pins[] = { STN8815_PIN_C5, STN8815_PIN_B6 };
+/* Modem pins: DCD, DSR, RI, DTR */
+static const unsigned u0modem_a_1_pins[] = { STN8815_PIN_A4, STN8815_PIN_B5,
+ STN8815_PIN_D6, STN8815_PIN_C6 };
static const unsigned mmcsd_a_1_pins[] = { STN8815_PIN_B10, STN8815_PIN_A10,
STN8815_PIN_C11, STN8815_PIN_B11, STN8815_PIN_A11, STN8815_PIN_C12,
STN8815_PIN_B12, STN8815_PIN_A12, STN8815_PIN_C13, STN8815_PIN_C15 };
@@ -304,7 +292,9 @@ static const unsigned i2cusb_b_1_pins[] = { STN8815_PIN_C21, STN8815_PIN_C20 };
.npins = ARRAY_SIZE(a##_pins), .altsetting = b }
static const struct nmk_pingroup nmk_stn8815_groups[] = {
- STN8815_PIN_GROUP(u0_a_1, NMK_GPIO_ALT_A),
+ STN8815_PIN_GROUP(u0txrx_a_1, NMK_GPIO_ALT_A),
+ STN8815_PIN_GROUP(u0ctsrts_a_1, NMK_GPIO_ALT_A),
+ STN8815_PIN_GROUP(u0modem_a_1, NMK_GPIO_ALT_A),
STN8815_PIN_GROUP(mmcsd_a_1, NMK_GPIO_ALT_A),
STN8815_PIN_GROUP(mmcsd_b_1, NMK_GPIO_ALT_B),
STN8815_PIN_GROUP(u1_a_1, NMK_GPIO_ALT_A),
@@ -318,7 +308,7 @@ static const struct nmk_pingroup nmk_stn8815_groups[] = {
#define STN8815_FUNC_GROUPS(a, b...) \
static const char * const a##_groups[] = { b };
-STN8815_FUNC_GROUPS(u0, "u0_a_1");
+STN8815_FUNC_GROUPS(u0, "u0txrx_a_1", "u0ctsrts_a_1", "u0modem_a_1");
STN8815_FUNC_GROUPS(mmcsd, "mmcsd_a_1", "mmcsd_b_1");
STN8815_FUNC_GROUPS(u1, "u1_a_1", "u1_b_1");
STN8815_FUNC_GROUPS(i2c1, "i2c1_a_1");
@@ -342,8 +332,6 @@ static const struct nmk_function nmk_stn8815_functions[] = {
};
static const struct nmk_pinctrl_soc_data nmk_stn8815_soc = {
- .gpio_ranges = nmk_stn8815_ranges,
- .gpio_num_ranges = ARRAY_SIZE(nmk_stn8815_ranges),
.pins = nmk_stn8815_pins,
.npins = ARRAY_SIZE(nmk_stn8815_pins),
.functions = nmk_stn8815_functions,
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 809d88445db5..352ede13a9e9 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -203,6 +203,7 @@ typedef unsigned long pin_cfg_t;
#define GPIO_BLOCK_SHIFT 5
#define NMK_GPIO_PER_CHIP (1 << GPIO_BLOCK_SHIFT)
+#define NMK_MAX_BANKS DIV_ROUND_UP(ARCH_NR_GPIOS, NMK_GPIO_PER_CHIP)
/* Register in the logic block */
#define NMK_GPIO_DAT 0x00
@@ -282,8 +283,7 @@ struct nmk_pinctrl {
void __iomem *prcm_base;
};
-static struct nmk_gpio_chip *
-nmk_gpio_chips[DIV_ROUND_UP(ARCH_NR_GPIOS, NMK_GPIO_PER_CHIP)];
+static struct nmk_gpio_chip *nmk_gpio_chips[NMK_MAX_BANKS];
static DEFINE_SPINLOCK(nmk_gpio_slpm_lock);
@@ -843,10 +843,9 @@ static void nmk_gpio_irq_shutdown(struct irq_data *d)
clk_disable(nmk_chip->clk);
}
-static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc,
- u32 status)
+static void __nmk_gpio_irq_handler(struct irq_desc *desc, u32 status)
{
- struct irq_chip *host_chip = irq_get_chip(irq);
+ struct irq_chip *host_chip = irq_desc_get_chip(desc);
struct gpio_chip *chip = irq_desc_get_handler_data(desc);
chained_irq_enter(host_chip, desc);
@@ -871,17 +870,16 @@ static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
status = readl(nmk_chip->addr + NMK_GPIO_IS);
clk_disable(nmk_chip->clk);
- __nmk_gpio_irq_handler(irq, desc, status);
+ __nmk_gpio_irq_handler(desc, status);
}
-static void nmk_gpio_latent_irq_handler(unsigned int irq,
- struct irq_desc *desc)
+static void nmk_gpio_latent_irq_handler(unsigned int irq, struct irq_desc *desc)
{
struct gpio_chip *chip = irq_desc_get_handler_data(desc);
struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
u32 status = nmk_chip->get_latent_status(nmk_chip->bank);
- __nmk_gpio_irq_handler(irq, desc, status);
+ __nmk_gpio_irq_handler(desc, status);
}
/* I/O Functions */
@@ -1012,6 +1010,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
int irq = gpio_to_irq(gpio);
struct irq_desc *desc = irq_to_desc(irq);
int pullidx = 0;
+ int val;
if (pull)
pullidx = data_out ? 1 : 2;
@@ -1021,6 +1020,10 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
label ?: "(none)",
pulls[pullidx],
(mode < 0) ? "unknown" : modes[mode]);
+
+ val = nmk_gpio_get_input(chip, offset);
+ seq_printf(s, " VAL %d", val);
+
/*
* This races with request_irq(), set_irq_type(),
* and set_irq_wake() ... but those are "rare".
@@ -1162,29 +1165,90 @@ void nmk_gpio_read_pull(int gpio_bank, u32 *pull_up)
}
}
+/*
+ * We will allocate memory for the state container using devm* allocators
+ * binding to the first device reaching this point, it doesn't matter if
+ * it is the pin controller or GPIO driver. However we need to use the right
+ * platform device when looking up resources so pay attention to pdev.
+ */
+static struct nmk_gpio_chip *nmk_gpio_populate_chip(struct device_node *np,
+ struct platform_device *pdev)
+{
+ struct nmk_gpio_chip *nmk_chip;
+ struct platform_device *gpio_pdev;
+ struct gpio_chip *chip;
+ struct resource *res;
+ struct clk *clk;
+ void __iomem *base;
+ u32 id;
+
+ gpio_pdev = of_find_device_by_node(np);
+ if (!gpio_pdev) {
+ pr_err("populate \"%s\": device not found\n", np->name);
+ return ERR_PTR(-ENODEV);
+ }
+ if (of_property_read_u32(np, "gpio-bank", &id)) {
+ dev_err(&pdev->dev, "populate: gpio-bank property not found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Already populated? */
+ nmk_chip = nmk_gpio_chips[id];
+ if (nmk_chip)
+ return nmk_chip;
+
+ nmk_chip = devm_kzalloc(&pdev->dev, sizeof(*nmk_chip), GFP_KERNEL);
+ if (!nmk_chip)
+ return ERR_PTR(-ENOMEM);
+
+ nmk_chip->bank = id;
+ chip = &nmk_chip->chip;
+ chip->base = id * NMK_GPIO_PER_CHIP;
+ chip->ngpio = NMK_GPIO_PER_CHIP;
+ chip->label = dev_name(&gpio_pdev->dev);
+ chip->dev = &gpio_pdev->dev;
+
+ res = platform_get_resource(gpio_pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return base;
+ nmk_chip->addr = base;
+
+ clk = clk_get(&gpio_pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return (void *) clk;
+ clk_prepare(clk);
+ nmk_chip->clk = clk;
+
+ BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips));
+ nmk_gpio_chips[id] = nmk_chip;
+ return nmk_chip;
+}
+
static int nmk_gpio_probe(struct platform_device *dev)
{
struct device_node *np = dev->dev.of_node;
struct nmk_gpio_chip *nmk_chip;
struct gpio_chip *chip;
struct irq_chip *irqchip;
- struct resource *res;
- struct clk *clk;
int latent_irq;
bool supports_sleepmode;
- void __iomem *base;
int irq;
int ret;
+ nmk_chip = nmk_gpio_populate_chip(np, dev);
+ if (IS_ERR(nmk_chip)) {
+ dev_err(&dev->dev, "could not populate nmk chip struct\n");
+ return PTR_ERR(nmk_chip);
+ }
+
if (of_get_property(np, "st,supports-sleepmode", NULL))
supports_sleepmode = true;
else
supports_sleepmode = false;
- if (of_property_read_u32(np, "gpio-bank", &dev->id)) {
- dev_err(&dev->dev, "gpio-bank property not found\n");
- return -EINVAL;
- }
+ /* Correct platform device ID */
+ dev->id = nmk_chip->bank;
irq = platform_get_irq(dev, 0);
if (irq < 0)
@@ -1193,27 +1257,10 @@ static int nmk_gpio_probe(struct platform_device *dev)
/* It's OK for this IRQ not to be present */
latent_irq = platform_get_irq(dev, 1);
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&dev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- clk = devm_clk_get(&dev->dev, NULL);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
- clk_prepare(clk);
-
- nmk_chip = devm_kzalloc(&dev->dev, sizeof(*nmk_chip), GFP_KERNEL);
- if (!nmk_chip)
- return -ENOMEM;
-
/*
* The virt address in nmk_chip->addr is in the nomadik register space,
* so we can simply convert the resource address, without remapping
*/
- nmk_chip->bank = dev->id;
- nmk_chip->clk = clk;
- nmk_chip->addr = base;
nmk_chip->parent_irq = irq;
nmk_chip->latent_parent_irq = latent_irq;
nmk_chip->sleepmode = supports_sleepmode;
@@ -1228,10 +1275,6 @@ static int nmk_gpio_probe(struct platform_device *dev)
chip->set = nmk_gpio_set_output;
chip->dbg_show = nmk_gpio_dbg_show;
chip->can_sleep = false;
- chip->base = dev->id * NMK_GPIO_PER_CHIP;
- chip->ngpio = NMK_GPIO_PER_CHIP;
- chip->label = dev_name(&dev->dev);
- chip->dev = &dev->dev;
chip->owner = THIS_MODULE;
irqchip = &nmk_chip->irqchip;
@@ -1253,14 +1296,10 @@ static int nmk_gpio_probe(struct platform_device *dev)
clk_disable(nmk_chip->clk);
chip->of_node = np;
- ret = gpiochip_add(&nmk_chip->chip);
+ ret = gpiochip_add(chip);
if (ret)
return ret;
- BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips));
-
- nmk_gpio_chips[nmk_chip->bank] = nmk_chip;
-
platform_set_drvdata(dev, nmk_chip);
/*
@@ -1320,35 +1359,40 @@ static int nmk_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
return 0;
}
-static struct pinctrl_gpio_range *
-nmk_match_gpio_range(struct pinctrl_dev *pctldev, unsigned offset)
+static struct nmk_gpio_chip *find_nmk_gpio_from_pin(unsigned pin)
{
- struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
int i;
+ struct nmk_gpio_chip *nmk_gpio;
- for (i = 0; i < npct->soc->gpio_num_ranges; i++) {
- struct pinctrl_gpio_range *range;
-
- range = &npct->soc->gpio_ranges[i];
- if (offset >= range->pin_base &&
- offset <= (range->pin_base + range->npins - 1))
- return range;
+ for(i = 0; i < NMK_MAX_BANKS; i++) {
+ nmk_gpio = nmk_gpio_chips[i];
+ if (!nmk_gpio)
+ continue;
+ if (pin >= nmk_gpio->chip.base &&
+ pin < nmk_gpio->chip.base + nmk_gpio->chip.ngpio)
+ return nmk_gpio;
}
return NULL;
}
+static struct gpio_chip *find_gc_from_pin(unsigned pin)
+{
+ struct nmk_gpio_chip *nmk_gpio = find_nmk_gpio_from_pin(pin);
+
+ if (nmk_gpio)
+ return &nmk_gpio->chip;
+ return NULL;
+}
+
static void nmk_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
unsigned offset)
{
- struct pinctrl_gpio_range *range;
- struct gpio_chip *chip;
+ struct gpio_chip *chip = find_gc_from_pin(offset);
- range = nmk_match_gpio_range(pctldev, offset);
- if (!range || !range->gc) {
+ if (!chip) {
seq_printf(s, "invalid pin offset");
return;
}
- chip = range->gc;
nmk_gpio_dbg_show_one(s, pctldev, chip, offset - chip->base, offset);
}
@@ -1693,25 +1737,16 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
}
for (i = 0; i < g->npins; i++) {
- struct pinctrl_gpio_range *range;
struct nmk_gpio_chip *nmk_chip;
- struct gpio_chip *chip;
unsigned bit;
- range = nmk_match_gpio_range(pctldev, g->pins[i]);
- if (!range) {
+ nmk_chip = find_nmk_gpio_from_pin(g->pins[i]);
+ if (!nmk_chip) {
dev_err(npct->dev,
"invalid pin offset %d in group %s at index %d\n",
g->pins[i], g->name, i);
goto out_glitch;
}
- if (!range->gc) {
- dev_err(npct->dev, "GPIO chip missing in range for pin offset %d in group %s at index %d\n",
- g->pins[i], g->name, i);
- goto out_glitch;
- }
- chip = range->gc;
- nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
dev_dbg(npct->dev, "setting pin %d to altsetting %d\n", g->pins[i], g->altsetting);
clk_enable(nmk_chip->clk);
@@ -1827,25 +1862,17 @@ static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin,
};
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
struct nmk_gpio_chip *nmk_chip;
- struct pinctrl_gpio_range *range;
- struct gpio_chip *chip;
unsigned bit;
pin_cfg_t cfg;
int pull, slpm, output, val, i;
bool lowemi, gpiomode, sleep;
- range = nmk_match_gpio_range(pctldev, pin);
- if (!range) {
- dev_err(npct->dev, "invalid pin offset %d\n", pin);
- return -EINVAL;
- }
- if (!range->gc) {
- dev_err(npct->dev, "GPIO chip missing in range for pin %d\n",
- pin);
+ nmk_chip = find_nmk_gpio_from_pin(pin);
+ if (!nmk_chip) {
+ dev_err(npct->dev,
+ "invalid pin offset %d\n", pin);
return -EINVAL;
}
- chip = range->gc;
- nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
for (i = 0; i < num_configs; i++) {
/*
@@ -1997,6 +2024,31 @@ static int nmk_pinctrl_probe(struct platform_device *pdev)
if (version == PINCTRL_NMK_DB8540)
nmk_pinctrl_db8540_init(&npct->soc);
+ /*
+ * Since we depend on the GPIO chips to provide clock and register base
+ * for the pin control operations, make sure that we have these
+ * populated before we continue. Follow the phandles to instantiate
+ * them. The GPIO portion of the actual hardware may be probed before
+ * or after this point: it shouldn't matter as the APIs are orthogonal.
+ */
+ for (i = 0; i < NMK_MAX_BANKS; i++) {
+ struct device_node *gpio_np;
+ struct nmk_gpio_chip *nmk_chip;
+
+ gpio_np = of_parse_phandle(np, "nomadik-gpio-chips", i);
+ if (gpio_np) {
+ dev_info(&pdev->dev,
+ "populate NMK GPIO %d \"%s\"\n",
+ i, gpio_np->name);
+ nmk_chip = nmk_gpio_populate_chip(gpio_np, pdev);
+ if (IS_ERR(nmk_chip))
+ dev_err(&pdev->dev,
+ "could not populate nmk chip struct "
+ "- continue anyway\n");
+ of_node_put(gpio_np);
+ }
+ }
+
prcm_np = of_parse_phandle(np, "prcm", 0);
if (prcm_np)
npct->prcm_base = of_iomap(prcm_np, 0);
@@ -2011,19 +2063,6 @@ static int nmk_pinctrl_probe(struct platform_device *pdev)
}
}
- /*
- * We need all the GPIO drivers to probe FIRST, or we will not be able
- * to obtain references to the struct gpio_chip * for them, and we
- * need this to proceed.
- */
- for (i = 0; i < npct->soc->gpio_num_ranges; i++) {
- if (!nmk_gpio_chips[npct->soc->gpio_ranges[i].id]) {
- dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i);
- return -EPROBE_DEFER;
- }
- npct->soc->gpio_ranges[i].gc = &nmk_gpio_chips[npct->soc->gpio_ranges[i].id]->chip;
- }
-
nmk_pinctrl_desc.pins = npct->soc->pins;
nmk_pinctrl_desc.npins = npct->soc->npins;
npct->dev = &pdev->dev;
@@ -2034,10 +2073,6 @@ static int nmk_pinctrl_probe(struct platform_device *pdev)
return PTR_ERR(npct->pctl);
}
- /* We will handle a range of GPIO pins */
- for (i = 0; i < npct->soc->gpio_num_ranges; i++)
- pinctrl_add_gpio_range(npct->pctl, &npct->soc->gpio_ranges[i]);
-
platform_set_drvdata(pdev, npct);
dev_info(&pdev->dev, "initialized Nomadik pin control driver\n");
@@ -2072,15 +2107,15 @@ static struct platform_driver nmk_pinctrl_driver = {
static int __init nmk_gpio_init(void)
{
- int ret;
+ return platform_driver_register(&nmk_gpio_driver);
+}
+subsys_initcall(nmk_gpio_init);
- ret = platform_driver_register(&nmk_gpio_driver);
- if (ret)
- return ret;
+static int __init nmk_pinctrl_init(void)
+{
return platform_driver_register(&nmk_pinctrl_driver);
}
-
-core_initcall(nmk_gpio_init);
+core_initcall(nmk_pinctrl_init);
MODULE_AUTHOR("Prafulla WADASKAR and Alessandro Rubini");
MODULE_DESCRIPTION("Nomadik GPIO Driver");
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.h b/drivers/pinctrl/nomadik/pinctrl-nomadik.h
index d8215f1e70c7..30bba2a75a58 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.h
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.h
@@ -121,8 +121,6 @@ struct nmk_pingroup {
/**
* struct nmk_pinctrl_soc_data - Nomadik pin controller per-SoC configuration
- * @gpio_ranges: An array of GPIO ranges for this SoC
- * @gpio_num_ranges: The number of GPIO ranges for this SoC
* @pins: An array describing all pins the pin controller affects.
* All pins which are also GPIOs must be listed first within the
* array, and be numbered identically to the GPIO controller's
@@ -137,8 +135,6 @@ struct nmk_pingroup {
* @prcm_gpiocr_registers: The array of PRCM GPIOCR registers on this SoC
*/
struct nmk_pinctrl_soc_data {
- struct pinctrl_gpio_range *gpio_ranges;
- unsigned gpio_num_ranges;
const struct pinctrl_pin_desc *pins;
unsigned npins;
const struct nmk_function *functions;
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index 1fc09dc20199..29a7bb17a42f 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -61,8 +61,8 @@ int pin_config_get_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
const struct pinconf_ops *ops = pctldev->desc->confops;
if (!ops || !ops->pin_config_get) {
- dev_dbg(pctldev->dev, "cannot get pin configuration, missing "
- "pin_config_get() function in driver\n");
+ dev_dbg(pctldev->dev,
+ "cannot get pin configuration, .pin_config_get missing in driver\n");
return -ENOTSUPP;
}
@@ -202,18 +202,34 @@ int pinconf_apply_setting(struct pinctrl_setting const *setting)
#ifdef CONFIG_DEBUG_FS
-void pinconf_show_map(struct seq_file *s, struct pinctrl_map const *map)
+static void pinconf_show_config(struct seq_file *s, struct pinctrl_dev *pctldev,
+ unsigned long *configs, unsigned num_configs)
{
- struct pinctrl_dev *pctldev;
const struct pinconf_ops *confops;
int i;
- pctldev = get_pinctrl_dev_from_devname(map->ctrl_dev_name);
if (pctldev)
confops = pctldev->desc->confops;
else
confops = NULL;
+ for (i = 0; i < num_configs; i++) {
+ seq_puts(s, "config ");
+ if (confops && confops->pin_config_config_dbg_show)
+ confops->pin_config_config_dbg_show(pctldev, s,
+ configs[i]);
+ else
+ seq_printf(s, "%08lx", configs[i]);
+ seq_puts(s, "\n");
+ }
+}
+
+void pinconf_show_map(struct seq_file *s, struct pinctrl_map const *map)
+{
+ struct pinctrl_dev *pctldev;
+
+ pctldev = get_pinctrl_dev_from_devname(map->ctrl_dev_name);
+
switch (map->type) {
case PIN_MAP_TYPE_CONFIGS_PIN:
seq_printf(s, "pin ");
@@ -227,15 +243,8 @@ void pinconf_show_map(struct seq_file *s, struct pinctrl_map const *map)
seq_printf(s, "%s\n", map->data.configs.group_or_pin);
- for (i = 0; i < map->data.configs.num_configs; i++) {
- seq_printf(s, "config ");
- if (confops && confops->pin_config_config_dbg_show)
- confops->pin_config_config_dbg_show(pctldev, s,
- map->data.configs.configs[i]);
- else
- seq_printf(s, "%08lx", map->data.configs.configs[i]);
- seq_printf(s, "\n");
- }
+ pinconf_show_config(s, pctldev, map->data.configs.configs,
+ map->data.configs.num_configs);
}
void pinconf_show_setting(struct seq_file *s,
@@ -243,9 +252,7 @@ void pinconf_show_setting(struct seq_file *s,
{
struct pinctrl_dev *pctldev = setting->pctldev;
const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
- const struct pinconf_ops *confops = pctldev->desc->confops;
struct pin_desc *desc;
- int i;
switch (setting->type) {
case PIN_MAP_TYPE_CONFIGS_PIN:
@@ -269,17 +276,8 @@ void pinconf_show_setting(struct seq_file *s,
* FIXME: We should really get the pin controler to dump the config
* values, so they can be decoded to something meaningful.
*/
- for (i = 0; i < setting->data.configs.num_configs; i++) {
- seq_printf(s, " ");
- if (confops && confops->pin_config_config_dbg_show)
- confops->pin_config_config_dbg_show(pctldev, s,
- setting->data.configs.configs[i]);
- else
- seq_printf(s, "%08lx",
- setting->data.configs.configs[i]);
- }
-
- seq_printf(s, "\n");
+ pinconf_show_config(s, pctldev, setting->data.configs.configs,
+ setting->data.configs.num_configs);
}
static void pinconf_dump_pin(struct pinctrl_dev *pctldev,
@@ -412,10 +410,8 @@ static int pinconf_dbg_config_print(struct seq_file *s, void *d)
const struct pinctrl_map *map;
const struct pinctrl_map *found = NULL;
struct pinctrl_dev *pctldev;
- const struct pinconf_ops *confops = NULL;
struct dbg_cfg *dbg = &pinconf_dbg_conf;
int i, j;
- unsigned long config;
mutex_lock(&pinctrl_maps_mutex);
@@ -449,16 +445,10 @@ static int pinconf_dbg_config_print(struct seq_file *s, void *d)
}
pctldev = get_pinctrl_dev_from_devname(found->ctrl_dev_name);
- config = *found->data.configs.configs;
- seq_printf(s, "Dev %s has config of %s in state %s: 0x%08lX\n",
- dbg->dev_name, dbg->pin_name,
- dbg->state_name, config);
-
- if (pctldev)
- confops = pctldev->desc->confops;
-
- if (confops && confops->pin_config_config_dbg_show)
- confops->pin_config_config_dbg_show(pctldev, s, config);
+ seq_printf(s, "Dev %s has config of %s in state %s:\n",
+ dbg->dev_name, dbg->pin_name, dbg->state_name);
+ pinconf_show_config(s, pctldev, found->data.configs.configs,
+ found->data.configs.num_configs);
exit:
mutex_unlock(&pinctrl_maps_mutex);
@@ -470,10 +460,12 @@ exit:
* pinconf_dbg_config_write() - modify the pinctrl config in the pinctrl
* map, of a dev/pin/state entry based on user entries to pinconf-config
* @user_buf: contains the modification request with expected format:
- * modify config_pin <devicename> <state> <pinname> <newvalue>
+ * modify <config> <devicename> <state> <name> <newvalue>
* modify is literal string, alternatives like add/delete not supported yet
- * config_pin is literal, alternatives like config_mux not supported yet
- * <devicename> <state> <pinname> are values that should match the pinctrl-maps
+ * <config> is the configuration to be changed. Supported configs are
+ * "config_pin" or "config_group", alternatives like config_mux are not
+ * supported yet.
+ * <devicename> <state> <name> are values that should match the pinctrl-maps
* <newvalue> reflects the new config and is driver dependant
*/
static ssize_t pinconf_dbg_config_write(struct file *file,
@@ -511,13 +503,19 @@ static ssize_t pinconf_dbg_config_write(struct file *file,
if (strcmp(token, "modify"))
return -EINVAL;
- /* Get arg type: "config_pin" type supported so far */
+ /*
+ * Get arg type: "config_pin" and "config_group"
+ * types are supported so far
+ */
token = strsep(&b, " ");
if (!token)
return -EINVAL;
- if (strcmp(token, "config_pin"))
+ if (!strcmp(token, "config_pin"))
+ dbg->map_type = PIN_MAP_TYPE_CONFIGS_PIN;
+ else if (!strcmp(token, "config_group"))
+ dbg->map_type = PIN_MAP_TYPE_CONFIGS_GROUP;
+ else
return -EINVAL;
- dbg->map_type = PIN_MAP_TYPE_CONFIGS_PIN;
/* get arg 'device_name' */
token = strsep(&b, " ");
diff --git a/drivers/pinctrl/pinctrl-adi2-bf60x.c b/drivers/pinctrl/pinctrl-adi2-bf60x.c
index 4cb59fe9be70..fcfa00821f12 100644
--- a/drivers/pinctrl/pinctrl-adi2-bf60x.c
+++ b/drivers/pinctrl/pinctrl-adi2-bf60x.c
@@ -394,25 +394,25 @@ static const unsigned short ppi2_16b_mux[] = {
static const unsigned short lp0_mux[] = {
P_LP0_CLK, P_LP0_ACK, P_LP0_D0, P_LP0_D1, P_LP0_D2,
P_LP0_D3, P_LP0_D4, P_LP0_D5, P_LP0_D6, P_LP0_D7,
- 0
+ 0
};
static const unsigned short lp1_mux[] = {
P_LP1_CLK, P_LP1_ACK, P_LP1_D0, P_LP1_D1, P_LP1_D2,
P_LP1_D3, P_LP1_D4, P_LP1_D5, P_LP1_D6, P_LP1_D7,
- 0
+ 0
};
static const unsigned short lp2_mux[] = {
P_LP2_CLK, P_LP2_ACK, P_LP2_D0, P_LP2_D1, P_LP2_D2,
P_LP2_D3, P_LP2_D4, P_LP2_D5, P_LP2_D6, P_LP2_D7,
- 0
+ 0
};
static const unsigned short lp3_mux[] = {
P_LP3_CLK, P_LP3_ACK, P_LP3_D0, P_LP3_D1, P_LP3_D2,
P_LP3_D3, P_LP3_D4, P_LP3_D5, P_LP3_D6, P_LP3_D7,
- 0
+ 0
};
static const struct adi_pin_group adi_pin_groups[] = {
diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c
index c3c3d2345fc6..a5976ebc4482 100644
--- a/drivers/pinctrl/pinctrl-adi2.c
+++ b/drivers/pinctrl/pinctrl-adi2.c
@@ -427,10 +427,10 @@ static int adi_gpio_irq_type(struct irq_data *d, unsigned int type)
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
writel(pintmask, &pint_regs->edge_set);
- __irq_set_handler_locked(irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
} else {
writel(pintmask, &pint_regs->edge_clear);
- __irq_set_handler_locked(irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
}
out:
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index d8e3f7c7fea3..5e86bb8ca80e 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -420,7 +420,7 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
- __irq_set_handler_locked(d->irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_FALLING:
@@ -428,7 +428,7 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
- __irq_set_handler_locked(d->irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_BOTH:
@@ -436,7 +436,7 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
pin_reg |= BOTH_EADGE << ACTIVE_LEVEL_OFF;
pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
- __irq_set_handler_locked(d->irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
break;
case IRQ_TYPE_LEVEL_HIGH:
@@ -445,7 +445,7 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
pin_reg |= DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF;
- __irq_set_handler_locked(d->irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
break;
case IRQ_TYPE_LEVEL_LOW:
@@ -454,7 +454,7 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
pin_reg |= DB_TYPE_PRESERVE_HIGH_GLITCH << DB_CNTRL_OFF;
- __irq_set_handler_locked(d->irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
break;
case IRQ_TYPE_NONE:
@@ -492,8 +492,9 @@ static struct irq_chip amd_gpio_irqchip = {
.irq_set_type = amd_gpio_irq_set_type,
};
-static void amd_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void amd_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
{
+ unsigned int irq = irq_desc_get_irq(desc);
u32 i;
u32 off;
u32 reg;
@@ -501,7 +502,7 @@ static void amd_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
u64 reg64;
int handled = 0;
unsigned long flags;
- struct irq_chip *chip = irq_get_chip(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct amd_gpio *gpio_dev = to_amd_gpio(gc);
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index a0824477072b..bae0012ee356 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -320,6 +320,9 @@ static const struct pinctrl_ops at91_pctrl_ops = {
static void __iomem *pin_to_controller(struct at91_pinctrl *info,
unsigned int bank)
{
+ if (!gpio_chips[bank])
+ return NULL;
+
return gpio_chips[bank]->regbase;
}
@@ -729,6 +732,10 @@ static int at91_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
pin = &pins_conf[i];
at91_pin_dbg(info->dev, pin);
pio = pin_to_controller(info, pin->bank);
+
+ if (!pio)
+ continue;
+
mask = pin_to_mask(pin->pin);
at91_mux_disable_interrupt(pio, mask);
switch (pin->mux) {
@@ -848,6 +855,10 @@ static int at91_pinconf_get(struct pinctrl_dev *pctldev,
*config = 0;
dev_dbg(info->dev, "%s:%d, pin_id=%d", __func__, __LINE__, pin_id);
pio = pin_to_controller(info, pin_to_bank(pin_id));
+
+ if (!pio)
+ return -EINVAL;
+
pin = pin_id % MAX_NB_GPIO_PER_BANK;
if (at91_mux_get_multidrive(pio, pin))
@@ -889,6 +900,10 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev,
"%s:%d, pin_id=%d, config=0x%lx",
__func__, __LINE__, pin_id, config);
pio = pin_to_controller(info, pin_to_bank(pin_id));
+
+ if (!pio)
+ return -EINVAL;
+
pin = pin_id % MAX_NB_GPIO_PER_BANK;
mask = pin_to_mask(pin);
@@ -1444,22 +1459,22 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- __irq_set_handler_locked(d->irq, handle_simple_irq);
+ irq_set_handler_locked(d, handle_simple_irq);
writel_relaxed(mask, pio + PIO_ESR);
writel_relaxed(mask, pio + PIO_REHLSR);
break;
case IRQ_TYPE_EDGE_FALLING:
- __irq_set_handler_locked(d->irq, handle_simple_irq);
+ irq_set_handler_locked(d, handle_simple_irq);
writel_relaxed(mask, pio + PIO_ESR);
writel_relaxed(mask, pio + PIO_FELLSR);
break;
case IRQ_TYPE_LEVEL_LOW:
- __irq_set_handler_locked(d->irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
writel_relaxed(mask, pio + PIO_LSR);
writel_relaxed(mask, pio + PIO_FELLSR);
break;
case IRQ_TYPE_LEVEL_HIGH:
- __irq_set_handler_locked(d->irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
writel_relaxed(mask, pio + PIO_LSR);
writel_relaxed(mask, pio + PIO_REHLSR);
break;
@@ -1468,7 +1483,7 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
* disable additional interrupt modes:
* fall back to default behavior
*/
- __irq_set_handler_locked(d->irq, handle_simple_irq);
+ irq_set_handler_locked(d, handle_simple_irq);
writel_relaxed(mask, pio + PIO_AIMDR);
return 0;
case IRQ_TYPE_NONE:
@@ -1488,28 +1503,6 @@ static void gpio_irq_ack(struct irq_data *d)
/* the interrupt is already cleared before by reading ISR */
}
-static int gpio_irq_request_res(struct irq_data *d)
-{
- struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
- unsigned pin = d->hwirq;
- int ret;
-
- ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin);
- if (ret)
- dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n",
- d->hwirq);
-
- return ret;
-}
-
-static void gpio_irq_release_res(struct irq_data *d)
-{
- struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
- unsigned pin = d->hwirq;
-
- gpiochip_unlock_as_irq(&at91_gpio->chip, pin);
-}
-
#ifdef CONFIG_PM
static u32 wakeups[MAX_GPIO_BANKS];
@@ -1585,8 +1578,6 @@ void at91_pinctrl_gpio_resume(void)
static struct irq_chip gpio_irqchip = {
.name = "GPIO",
.irq_ack = gpio_irq_ack,
- .irq_request_resources = gpio_irq_request_res,
- .irq_release_resources = gpio_irq_release_res,
.irq_disable = gpio_irq_mask,
.irq_mask = gpio_irq_mask,
.irq_unmask = gpio_irq_unmask,
@@ -1596,7 +1587,7 @@ static struct irq_chip gpio_irqchip = {
static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
{
- struct irq_chip *chip = irq_get_chip(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
struct gpio_chip *gpio_chip = irq_desc_get_handler_data(desc);
struct at91_gpio_chip *at91_gpio = container_of(gpio_chip,
struct at91_gpio_chip, chip);
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index 29cbbab8c3a6..3731cc67a88b 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -519,10 +519,11 @@ static struct irq_chip u300_gpio_irqchip = {
.irq_set_type = u300_gpio_irq_type,
};
-static void u300_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void u300_gpio_irq_handler(unsigned __irq, struct irq_desc *desc)
{
- struct irq_chip *parent_chip = irq_get_chip(irq);
- struct gpio_chip *chip = irq_get_handler_data(irq);
+ unsigned int irq = irq_desc_get_irq(desc);
+ struct irq_chip *parent_chip = irq_desc_get_chip(desc);
+ struct gpio_chip *chip = irq_desc_get_handler_data(desc);
struct u300_gpio *gpio = to_u300_gpio(chip);
struct u300_gpio_port *port = &gpio->ports[irq - chip->base];
int pinoffset = port->number << 3; /* get the right stride */
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
new file mode 100644
index 000000000000..461fffc4c62a
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -0,0 +1,378 @@
+/*
+ * Driver for Conexant Digicolor General Purpose Pin Mapping
+ *
+ * Author: Baruch Siach <baruch@tkos.co.il>
+ *
+ * Copyright (C) 2015 Paradox Innovation Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * TODO:
+ * - GPIO interrupt support
+ * - Pin pad configuration (pull up/down, strength)
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/spinlock.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include "pinctrl-utils.h"
+
+#define DRIVER_NAME "pinctrl-digicolor"
+
+#define GP_CLIENTSEL(clct) ((clct)*8 + 0x20)
+#define GP_DRIVE0(clct) (GP_CLIENTSEL(clct) + 2)
+#define GP_OUTPUT0(clct) (GP_CLIENTSEL(clct) + 3)
+#define GP_INPUT(clct) (GP_CLIENTSEL(clct) + 6)
+
+#define PIN_COLLECTIONS ('R' - 'A' + 1)
+#define PINS_PER_COLLECTION 8
+#define PINS_COUNT (PIN_COLLECTIONS * PINS_PER_COLLECTION)
+
+struct dc_pinmap {
+ void __iomem *regs;
+ struct device *dev;
+ struct pinctrl_dev *pctl;
+
+ struct pinctrl_desc *desc;
+ const char *pin_names[PINS_COUNT];
+
+ struct gpio_chip chip;
+ spinlock_t lock;
+};
+
+static int dc_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return PINS_COUNT;
+}
+
+static const char *dc_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pctldev);
+
+ /* Exactly one group per pin */
+ return pmap->desc->pins[selector].name;
+}
+
+static int dc_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = &pmap->desc->pins[selector].number;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static struct pinctrl_ops dc_pinctrl_ops = {
+ .get_groups_count = dc_get_groups_count,
+ .get_group_name = dc_get_group_name,
+ .get_group_pins = dc_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static const char *const dc_functions[] = {
+ "gpio",
+ "client_a",
+ "client_b",
+ "client_c",
+};
+
+static int dc_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(dc_functions);
+}
+
+static const char *dc_get_fname(struct pinctrl_dev *pctldev, unsigned selector)
+{
+ return dc_functions[selector];
+}
+
+static int dc_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pmap->pin_names;
+ *num_groups = PINS_COUNT;
+
+ return 0;
+}
+
+static void dc_client_sel(int pin_num, int *reg, int *bit)
+{
+ *bit = (pin_num % PINS_PER_COLLECTION) * 2;
+ *reg = GP_CLIENTSEL(pin_num/PINS_PER_COLLECTION);
+
+ if (*bit >= PINS_PER_COLLECTION) {
+ *bit -= PINS_PER_COLLECTION;
+ *reg += 1;
+ }
+}
+
+static int dc_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
+{
+ struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pctldev);
+ int bit_off, reg_off;
+ u8 reg;
+
+ dc_client_sel(group, &reg_off, &bit_off);
+
+ reg = readb_relaxed(pmap->regs + reg_off);
+ reg &= ~(3 << bit_off);
+ reg |= (selector << bit_off);
+ writeb_relaxed(reg, pmap->regs + reg_off);
+
+ return 0;
+}
+
+static int dc_pmx_request_gpio(struct pinctrl_dev *pcdev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pcdev);
+ int bit_off, reg_off;
+ u8 reg;
+
+ dc_client_sel(offset, &reg_off, &bit_off);
+
+ reg = readb_relaxed(pmap->regs + reg_off);
+ if ((reg & (3 << bit_off)) != 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static struct pinmux_ops dc_pmxops = {
+ .get_functions_count = dc_get_functions_count,
+ .get_function_name = dc_get_fname,
+ .get_function_groups = dc_get_groups,
+ .set_mux = dc_set_mux,
+ .gpio_request_enable = dc_pmx_request_gpio,
+};
+
+static int dc_gpio_request(struct gpio_chip *chip, unsigned gpio)
+{
+ return pinctrl_request_gpio(chip->base + gpio);
+}
+
+static void dc_gpio_free(struct gpio_chip *chip, unsigned gpio)
+{
+ pinctrl_free_gpio(chip->base + gpio);
+}
+
+static int dc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ struct dc_pinmap *pmap = container_of(chip, struct dc_pinmap, chip);
+ int reg_off = GP_DRIVE0(gpio/PINS_PER_COLLECTION);
+ int bit_off = gpio % PINS_PER_COLLECTION;
+ u8 drive;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pmap->lock, flags);
+ drive = readb_relaxed(pmap->regs + reg_off);
+ drive &= ~BIT(bit_off);
+ writeb_relaxed(drive, pmap->regs + reg_off);
+ spin_unlock_irqrestore(&pmap->lock, flags);
+
+ return 0;
+}
+
+static void dc_gpio_set(struct gpio_chip *chip, unsigned gpio, int value);
+
+static int dc_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
+ int value)
+{
+ struct dc_pinmap *pmap = container_of(chip, struct dc_pinmap, chip);
+ int reg_off = GP_DRIVE0(gpio/PINS_PER_COLLECTION);
+ int bit_off = gpio % PINS_PER_COLLECTION;
+ u8 drive;
+ unsigned long flags;
+
+ dc_gpio_set(chip, gpio, value);
+
+ spin_lock_irqsave(&pmap->lock, flags);
+ drive = readb_relaxed(pmap->regs + reg_off);
+ drive |= BIT(bit_off);
+ writeb_relaxed(drive, pmap->regs + reg_off);
+ spin_unlock_irqrestore(&pmap->lock, flags);
+
+ return 0;
+}
+
+static int dc_gpio_get(struct gpio_chip *chip, unsigned gpio)
+{
+ struct dc_pinmap *pmap = container_of(chip, struct dc_pinmap, chip);
+ int reg_off = GP_INPUT(gpio/PINS_PER_COLLECTION);
+ int bit_off = gpio % PINS_PER_COLLECTION;
+ u8 input;
+
+ input = readb_relaxed(pmap->regs + reg_off);
+
+ return !!(input & BIT(bit_off));
+}
+
+static void dc_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
+{
+ struct dc_pinmap *pmap = container_of(chip, struct dc_pinmap, chip);
+ int reg_off = GP_OUTPUT0(gpio/PINS_PER_COLLECTION);
+ int bit_off = gpio % PINS_PER_COLLECTION;
+ u8 output;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pmap->lock, flags);
+ output = readb_relaxed(pmap->regs + reg_off);
+ if (value)
+ output |= BIT(bit_off);
+ else
+ output &= ~BIT(bit_off);
+ writeb_relaxed(output, pmap->regs + reg_off);
+ spin_unlock_irqrestore(&pmap->lock, flags);
+}
+
+static int dc_gpiochip_add(struct dc_pinmap *pmap, struct device_node *np)
+{
+ struct gpio_chip *chip = &pmap->chip;
+ int ret;
+
+ chip->label = DRIVER_NAME;
+ chip->dev = pmap->dev;
+ chip->request = dc_gpio_request;
+ chip->free = dc_gpio_free;
+ chip->direction_input = dc_gpio_direction_input;
+ chip->direction_output = dc_gpio_direction_output;
+ chip->get = dc_gpio_get;
+ chip->set = dc_gpio_set;
+ chip->base = -1;
+ chip->ngpio = PINS_COUNT;
+ chip->of_node = np;
+ chip->of_gpio_n_cells = 2;
+
+ spin_lock_init(&pmap->lock);
+
+ ret = gpiochip_add(chip);
+ if (ret < 0)
+ return ret;
+
+ ret = gpiochip_add_pin_range(chip, dev_name(pmap->dev), 0, 0,
+ PINS_COUNT);
+ if (ret < 0) {
+ gpiochip_remove(chip);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dc_pinctrl_probe(struct platform_device *pdev)
+{
+ struct dc_pinmap *pmap;
+ struct resource *r;
+ struct pinctrl_pin_desc *pins;
+ struct pinctrl_desc *pctl_desc;
+ char *pin_names;
+ int name_len = strlen("GP_xx") + 1;
+ int i, j, ret;
+
+ pmap = devm_kzalloc(&pdev->dev, sizeof(*pmap), GFP_KERNEL);
+ if (!pmap)
+ return -ENOMEM;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pmap->regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(pmap->regs))
+ return PTR_ERR(pmap->regs);
+
+ pins = devm_kzalloc(&pdev->dev, sizeof(*pins)*PINS_COUNT, GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+ pin_names = devm_kzalloc(&pdev->dev, name_len * PINS_COUNT,
+ GFP_KERNEL);
+ if (!pin_names)
+ return -ENOMEM;
+
+ for (i = 0; i < PIN_COLLECTIONS; i++) {
+ for (j = 0; j < PINS_PER_COLLECTION; j++) {
+ int pin_id = i*PINS_PER_COLLECTION + j;
+ char *name = &pin_names[pin_id * name_len];
+
+ snprintf(name, name_len, "GP_%c%c", 'A'+i, '0'+j);
+
+ pins[pin_id].number = pin_id;
+ pins[pin_id].name = name;
+ pmap->pin_names[pin_id] = name;
+ }
+ }
+
+ pctl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctl_desc), GFP_KERNEL);
+ if (!pctl_desc)
+ return -ENOMEM;
+
+ pctl_desc->name = DRIVER_NAME,
+ pctl_desc->owner = THIS_MODULE,
+ pctl_desc->pctlops = &dc_pinctrl_ops,
+ pctl_desc->pmxops = &dc_pmxops,
+ pctl_desc->npins = PINS_COUNT;
+ pctl_desc->pins = pins;
+ pmap->desc = pctl_desc;
+
+ pmap->dev = &pdev->dev;
+
+ pmap->pctl = pinctrl_register(pctl_desc, &pdev->dev, pmap);
+ if (!pmap->pctl) {
+ dev_err(&pdev->dev, "pinctrl driver registration failed\n");
+ return -EINVAL;
+ }
+
+ ret = dc_gpiochip_add(pmap, pdev->dev.of_node);
+ if (ret < 0) {
+ pinctrl_unregister(pmap->pctl);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dc_pinctrl_remove(struct platform_device *pdev)
+{
+ struct dc_pinmap *pmap = platform_get_drvdata(pdev);
+
+ pinctrl_unregister(pmap->pctl);
+ gpiochip_remove(&pmap->chip);
+
+ return 0;
+}
+
+static const struct of_device_id dc_pinctrl_ids[] = {
+ { .compatible = "cnxt,cx92755-pinctrl" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_pinctrl_ids);
+
+static struct platform_driver dc_pinctrl_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = dc_pinctrl_ids,
+ },
+ .probe = dc_pinctrl_probe,
+ .remove = dc_pinctrl_remove,
+};
+module_platform_driver(dc_pinctrl_driver);
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index ef0b697639a7..f0bebbe0682b 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -37,6 +37,9 @@
#define LPC18XX_SCU_PIN_EHD_MASK 0x300
#define LPC18XX_SCU_PIN_EHD_POS 8
+#define LPC18XX_SCU_USB1_EPD BIT(2)
+#define LPC18XX_SCU_USB1_EPWR BIT(4)
+
#define LPC18XX_SCU_I2C0_EFP BIT(0)
#define LPC18XX_SCU_I2C0_EHD BIT(2)
#define LPC18XX_SCU_I2C0_EZI BIT(3)
@@ -617,8 +620,31 @@ static const struct pinctrl_pin_desc lpc18xx_pins[] = {
static int lpc18xx_pconf_get_usb1(enum pin_config_param param, int *arg, u32 reg)
{
- /* TODO */
- return -ENOTSUPP;
+ switch (param) {
+ case PIN_CONFIG_LOW_POWER_MODE:
+ if (reg & LPC18XX_SCU_USB1_EPWR)
+ *arg = 0;
+ else
+ *arg = 1;
+ break;
+
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (reg & LPC18XX_SCU_USB1_EPD)
+ return -EINVAL;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (reg & LPC18XX_SCU_USB1_EPD)
+ *arg = 1;
+ else
+ return -EINVAL;
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
}
static int lpc18xx_pconf_get_i2c0(enum pin_config_param param, int *arg, u32 reg,
@@ -782,8 +808,28 @@ static int lpc18xx_pconf_set_usb1(struct pinctrl_dev *pctldev,
enum pin_config_param param,
u16 param_val, u32 *reg)
{
- /* TODO */
- return -ENOTSUPP;
+ switch (param) {
+ case PIN_CONFIG_LOW_POWER_MODE:
+ if (param_val)
+ *reg &= ~LPC18XX_SCU_USB1_EPWR;
+ else
+ *reg |= LPC18XX_SCU_USB1_EPWR;
+ break;
+
+ case PIN_CONFIG_BIAS_DISABLE:
+ *reg &= ~LPC18XX_SCU_USB1_EPD;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ *reg |= LPC18XX_SCU_USB1_EPD;
+ break;
+
+ default:
+ dev_err(pctldev->dev, "Property not supported\n");
+ return -ENOTSUPP;
+ }
+
+ return 0;
}
static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev,
@@ -823,7 +869,7 @@ static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
- if (param)
+ if (param_val)
*reg &= ~(LPC18XX_SCU_I2C0_ZIF << shift);
else
*reg |= (LPC18XX_SCU_I2C0_ZIF << shift);
@@ -876,7 +922,7 @@ static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
- if (param)
+ if (param_val)
*reg &= ~LPC18XX_SCU_PIN_ZIF;
else
*reg |= LPC18XX_SCU_PIN_ZIF;
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 63100be81015..3dc2ae15f3a1 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1310,9 +1310,11 @@ static int pistachio_gpio_irq_set_type(struct irq_data *data, unsigned int type)
return 0;
}
-static void pistachio_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void pistachio_gpio_irq_handler(unsigned int __irq,
+ struct irq_desc *desc)
{
- struct gpio_chip *gc = irq_get_handler_data(irq);
+ unsigned int irq = irq_desc_get_irq(desc);
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct pistachio_gpio_bank *bank = gc_to_bank(gc);
struct irq_chip *chip = irq_get_chip(irq);
unsigned long pending;
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 9affcd725776..c5246c05f70c 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -945,6 +945,7 @@ static int _rockchip_pmx_gpio_set_direction(struct gpio_chip *chip,
if (ret < 0)
return ret;
+ clk_enable(bank->clk);
spin_lock_irqsave(&bank->slock, flags);
data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
@@ -956,6 +957,7 @@ static int _rockchip_pmx_gpio_set_direction(struct gpio_chip *chip,
writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR);
spin_unlock_irqrestore(&bank->slock, flags);
+ clk_disable(bank->clk);
return 0;
}
@@ -1389,6 +1391,7 @@ static void rockchip_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
unsigned long flags;
u32 data;
+ clk_enable(bank->clk);
spin_lock_irqsave(&bank->slock, flags);
data = readl(reg);
@@ -1398,6 +1401,7 @@ static void rockchip_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
writel(data, reg);
spin_unlock_irqrestore(&bank->slock, flags);
+ clk_disable(bank->clk);
}
/*
@@ -1409,7 +1413,9 @@ static int rockchip_gpio_get(struct gpio_chip *gc, unsigned offset)
struct rockchip_pin_bank *bank = gc_to_pin_bank(gc);
u32 data;
+ clk_enable(bank->clk);
data = readl(bank->reg_base + GPIO_EXT_PORT);
+ clk_disable(bank->clk);
data >>= offset;
data &= 1;
return data;
@@ -1469,10 +1475,10 @@ static const struct gpio_chip rockchip_gpiolib_chip = {
* Interrupt handling
*/
-static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void rockchip_irq_demux(unsigned int __irq, struct irq_desc *desc)
{
- struct irq_chip *chip = irq_get_chip(irq);
- struct rockchip_pin_bank *bank = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct rockchip_pin_bank *bank = irq_desc_get_handler_data(desc);
u32 pend;
dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name);
@@ -1482,7 +1488,7 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
pend = readl_relaxed(bank->reg_base + GPIO_INT_STATUS);
while (pend) {
- unsigned int virq;
+ unsigned int irq, virq;
irq = __ffs(pend);
pend &= ~BIT(irq);
@@ -1546,6 +1552,7 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
if (ret < 0)
return ret;
+ clk_enable(bank->clk);
spin_lock_irqsave(&bank->slock, flags);
data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
@@ -1555,9 +1562,9 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
spin_unlock_irqrestore(&bank->slock, flags);
if (type & IRQ_TYPE_EDGE_BOTH)
- __irq_set_handler_locked(d->irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
else
- __irq_set_handler_locked(d->irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
spin_lock_irqsave(&bank->slock, flags);
irq_gc_lock(gc);
@@ -1603,6 +1610,7 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
default:
irq_gc_unlock(gc);
spin_unlock_irqrestore(&bank->slock, flags);
+ clk_disable(bank->clk);
return -EINVAL;
}
@@ -1611,6 +1619,7 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
irq_gc_unlock(gc);
spin_unlock_irqrestore(&bank->slock, flags);
+ clk_disable(bank->clk);
return 0;
}
@@ -1620,8 +1629,10 @@ static void rockchip_irq_suspend(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct rockchip_pin_bank *bank = gc->private;
+ clk_enable(bank->clk);
bank->saved_masks = irq_reg_readl(gc, GPIO_INTMASK);
irq_reg_writel(gc, ~gc->wake_active, GPIO_INTMASK);
+ clk_disable(bank->clk);
}
static void rockchip_irq_resume(struct irq_data *d)
@@ -1629,7 +1640,27 @@ static void rockchip_irq_resume(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct rockchip_pin_bank *bank = gc->private;
+ clk_enable(bank->clk);
irq_reg_writel(gc, bank->saved_masks, GPIO_INTMASK);
+ clk_disable(bank->clk);
+}
+
+static void rockchip_irq_gc_mask_clr_bit(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct rockchip_pin_bank *bank = gc->private;
+
+ clk_enable(bank->clk);
+ irq_gc_mask_clr_bit(d);
+}
+
+void rockchip_irq_gc_mask_set_bit(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct rockchip_pin_bank *bank = gc->private;
+
+ irq_gc_mask_set_bit(d);
+ clk_disable(bank->clk);
}
static int rockchip_interrupts_register(struct platform_device *pdev,
@@ -1640,7 +1671,7 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct irq_chip_generic *gc;
int ret;
- int i;
+ int i, j;
for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
if (!bank->valid) {
@@ -1649,11 +1680,19 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
continue;
}
+ ret = clk_enable(bank->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock for bank %s\n",
+ bank->name);
+ continue;
+ }
+
bank->domain = irq_domain_add_linear(bank->of_node, 32,
&irq_generic_chip_ops, NULL);
if (!bank->domain) {
dev_warn(&pdev->dev, "could not initialize irq domain for bank %s\n",
bank->name);
+ clk_disable(bank->clk);
continue;
}
@@ -1664,6 +1703,7 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
dev_err(&pdev->dev, "could not alloc generic chips for bank %s\n",
bank->name);
irq_domain_remove(bank->domain);
+ clk_disable(bank->clk);
continue;
}
@@ -1681,16 +1721,23 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
gc->chip_types[0].regs.mask = GPIO_INTMASK;
gc->chip_types[0].regs.ack = GPIO_PORTS_EOI;
gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
- gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
- gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_mask = rockchip_irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_unmask =
+ rockchip_irq_gc_mask_clr_bit;
gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
gc->wake_enabled = IRQ_MSK(bank->nr_pins);
- irq_set_handler_data(bank->irq, bank);
- irq_set_chained_handler(bank->irq, rockchip_irq_demux);
+ irq_set_chained_handler_and_data(bank->irq,
+ rockchip_irq_demux, bank);
+
+ /* map the gpio irqs here, when the clock is still running */
+ for (j = 0 ; j < 32 ; j++)
+ irq_create_mapping(bank->domain, j);
+
+ clk_disable(bank->clk);
}
return 0;
@@ -1808,7 +1855,7 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank,
if (IS_ERR(bank->clk))
return PTR_ERR(bank->clk);
- return clk_prepare_enable(bank->clk);
+ return clk_prepare(bank->clk);
}
static const struct of_device_id rockchip_pinctrl_dt_match[];
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index b2de09d3b1a0..bf548c2a7a9d 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1684,7 +1684,7 @@ static void pcs_irq_chain_handler(unsigned int irq, struct irq_desc *desc)
struct pcs_soc_data *pcs_soc = irq_desc_get_handler_data(desc);
struct irq_chip *chip;
- chip = irq_get_chip(irq);
+ chip = irq_desc_get_chip(desc);
chained_irq_enter(chip, desc);
pcs_irq_handle(pcs_soc);
/* REVISIT: export and add handle_bad_irq(irq, desc)? */
@@ -1716,12 +1716,7 @@ static int pcs_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_set_chip_data(irq, pcs_soc);
irq_set_chip_and_handler(irq, &pcs->chip,
handle_level_irq);
-
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
irq_set_noprobe(irq);
-#endif
return 0;
}
@@ -1760,16 +1755,17 @@ static int pcs_irq_init_chained_handler(struct pcs_device *pcs,
int res;
res = request_irq(pcs_soc->irq, pcs_irq_handler,
- IRQF_SHARED | IRQF_NO_SUSPEND,
+ IRQF_SHARED | IRQF_NO_SUSPEND |
+ IRQF_NO_THREAD,
name, pcs_soc);
if (res) {
pcs_soc->irq = -1;
return res;
}
} else {
- irq_set_handler_data(pcs_soc->irq, pcs_soc);
- irq_set_chained_handler(pcs_soc->irq,
- pcs_irq_chain_handler);
+ irq_set_chained_handler_and_data(pcs_soc->irq,
+ pcs_irq_chain_handler,
+ pcs_soc);
}
/*
@@ -1982,7 +1978,6 @@ static const struct pcs_soc_data pinctrl_single_omap_wkup = {
};
static const struct pcs_soc_data pinctrl_single_dra7 = {
- .flags = PCS_QUIRK_SHARED_IRQ,
.irq_enable_mask = (1 << 24), /* WAKEUPENABLE */
.irq_status_mask = (1 << 25), /* WAKEUPEVENT */
};
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index c262e5f35c28..f8338d2e6b6b 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1463,7 +1463,7 @@ static void __gpio_irq_handler(struct st_gpio_bank *bank)
static void st_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
{
/* interrupt dedicated per bank */
- struct irq_chip *chip = irq_get_chip(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct st_gpio_bank *bank = gpio_chip_to_bank(gc);
@@ -1474,8 +1474,8 @@ static void st_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
{
- struct irq_chip *chip = irq_get_chip(irq);
- struct st_pinctrl *info = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct st_pinctrl *info = irq_desc_get_handler_data(desc);
unsigned long status;
int n;
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index 0f982b829be1..0fd7fd2b0f72 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -624,6 +624,22 @@ static struct pinctrl_desc tegra_pinctrl_desc = {
.owner = THIS_MODULE,
};
+static bool gpio_node_has_range(void)
+{
+ struct device_node *np;
+ bool has_prop = false;
+
+ np = of_find_compatible_node(NULL, NULL, "nvidia,tegra30-gpio");
+ if (!np)
+ return has_prop;
+
+ has_prop = of_find_property(np, "gpio-ranges", NULL);
+
+ of_node_put(np);
+
+ return has_prop;
+}
+
int tegra_pinctrl_probe(struct platform_device *pdev,
const struct tegra_pinctrl_soc_data *soc_data)
{
@@ -708,7 +724,8 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
return PTR_ERR(pmx->pctl);
}
- pinctrl_add_gpio_range(pmx->pctl, &tegra_pinctrl_gpio_range);
+ if (!gpio_node_has_range())
+ pinctrl_add_gpio_range(pmx->pctl, &tegra_pinctrl_gpio_range);
platform_set_drvdata(pdev, pmx);
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
index 7ce23b6282ad..5aafea8c6590 100644
--- a/drivers/pinctrl/pinctrl-zynq.c
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -706,10 +706,10 @@ static const char * const sdio1_wp_groups[] = {"gpio0_0_grp",
"gpio0_39_grp", "gpio0_41_grp", "gpio0_43_grp",
"gpio0_45_grp", "gpio0_47_grp", "gpio0_49_grp",
"gpio0_51_grp", "gpio0_53_grp", "sdio1_emio_wp_grp"};
-static const char * const smc0_nor_groups[] = {"smc0_nor"};
+static const char * const smc0_nor_groups[] = {"smc0_nor_grp"};
static const char * const smc0_nor_cs1_groups[] = {"smc0_nor_cs1_grp"};
static const char * const smc0_nor_addr25_groups[] = {"smc0_nor_addr25_grp"};
-static const char * const smc0_nand_groups[] = {"smc0_nand"};
+static const char * const smc0_nand_groups[] = {"smc0_nand_grp"};
static const char * const can0_groups[] = {"can0_0_grp", "can0_1_grp",
"can0_2_grp", "can0_3_grp", "can0_4_grp", "can0_5_grp",
"can0_6_grp", "can0_7_grp", "can0_8_grp", "can0_9_grp",
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index e7ae890dcf1a..67e08cb315c4 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -322,8 +322,7 @@ static int pinmux_func_name_to_selector(struct pinctrl_dev *pctldev,
selector++;
}
- pr_err("%s does not support function %s\n",
- pinctrl_dev_get_name(pctldev), function);
+ dev_err(pctldev->dev, "function '%s' not supported\n", function);
return -EINVAL;
}
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 58f5632b27f4..383263a92e59 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -63,6 +63,14 @@ config PINCTRL_MSM8916
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found on the Qualcomm 8916 platform.
+config PINCTRL_QDF2XXX
+ tristate "Qualcomm Technologies QDF2xxx pin controller driver"
+ depends on GPIOLIB && ACPI
+ select PINCTRL_MSM
+ help
+ This is the GPIO driver for the TLMM block found on the
+ Qualcomm Technologies QDF2xxx SOCs.
+
config PINCTRL_QCOM_SPMI_PMIC
tristate "Qualcomm SPMI PMIC pin controller driver"
depends on GPIOLIB && OF && SPMI
@@ -76,4 +84,16 @@ config PINCTRL_QCOM_SPMI_PMIC
which are using SPMI for communication with SoC. Example PMIC's
devices are pm8841, pm8941 and pma8084.
+config PINCTRL_QCOM_SSBI_PMIC
+ tristate "Qualcomm SSBI PMIC pin controller driver"
+ depends on GPIOLIB && OF
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
+ which are using SSBI for communication with SoC. Example PMIC's
+ devices are pm8058 and pm8921.
+
endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 3666c703ce88..13b190e72c21 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -7,5 +7,8 @@ obj-$(CONFIG_PINCTRL_MSM8660) += pinctrl-msm8660.o
obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
obj-$(CONFIG_PINCTRL_MSM8916) += pinctrl-msm8916.o
+obj-$(CONFIG_PINCTRL_QDF2XXX) += pinctrl-qdf2xxx.o
obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-gpio.o
obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o
+obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o
+obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index e457d52302a2..492cdd51dc5c 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -28,6 +28,7 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/reboot.h>
+#include <linux/pm.h>
#include "../core.h"
#include "../pinconf.h"
@@ -733,9 +734,9 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
spin_unlock_irqrestore(&pctrl->lock, flags);
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
- __irq_set_handler_locked(d->irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
- __irq_set_handler_locked(d->irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
return 0;
}
@@ -764,12 +765,13 @@ static struct irq_chip msm_gpio_irq_chip = {
.irq_set_wake = msm_gpio_irq_set_wake,
};
-static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void msm_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
{
+ unsigned int irq = irq_desc_get_irq(desc);
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
const struct msm_pingroup *g;
struct msm_pinctrl *pctrl = to_msm_pinctrl(gc);
- struct irq_chip *chip = irq_get_chip(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
int irq_pin;
int handled = 0;
u32 val;
@@ -855,6 +857,13 @@ static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action,
return NOTIFY_DONE;
}
+static struct msm_pinctrl *poweroff_pctrl;
+
+static void msm_ps_hold_poweroff(void)
+{
+ msm_ps_hold_restart(&poweroff_pctrl->restart_nb, 0, NULL);
+}
+
static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
{
int i;
@@ -867,6 +876,8 @@ static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
if (register_restart_handler(&pctrl->restart_nb))
dev_err(pctrl->dev,
"failed to setup restart handler.\n");
+ poweroff_pctrl = pctrl;
+ pm_power_off = msm_ps_hold_poweroff;
break;
}
}
diff --git a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
new file mode 100644
index 000000000000..e9ff3bc150bb
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * GPIO and pin control functions on this SOC are handled by the "TLMM"
+ * device. The driver which controls this device is pinctrl-msm.c. Each
+ * SOC with a TLMM is expected to create a client driver that registers
+ * with pinctrl-msm.c. This means that all TLMM drivers are pin control
+ * drivers.
+ *
+ * This pin control driver is intended to be used only an ACPI-enabled
+ * system. As such, UEFI will handle all pin control configuration, so
+ * this driver does not provide pin control functions. It is effectively
+ * a GPIO-only driver. The alternative is to duplicate the GPIO code of
+ * pinctrl-msm.c into another driver.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/acpi.h>
+
+#include "pinctrl-msm.h"
+
+static struct msm_pinctrl_soc_data qdf2xxx_pinctrl;
+
+static int qdf2xxx_pinctrl_probe(struct platform_device *pdev)
+{
+ struct pinctrl_pin_desc *pins;
+ struct msm_pingroup *groups;
+ unsigned int i;
+ u32 num_gpios;
+ int ret;
+
+ /* Query the number of GPIOs from ACPI */
+ ret = device_property_read_u32(&pdev->dev, "num-gpios", &num_gpios);
+ if (ret < 0)
+ return ret;
+
+ if (!num_gpios) {
+ dev_warn(&pdev->dev, "missing num-gpios property\n");
+ return -ENODEV;
+ }
+
+ pins = devm_kcalloc(&pdev->dev, num_gpios,
+ sizeof(struct pinctrl_pin_desc), GFP_KERNEL);
+ groups = devm_kcalloc(&pdev->dev, num_gpios,
+ sizeof(struct msm_pingroup), GFP_KERNEL);
+
+ for (i = 0; i < num_gpios; i++) {
+ pins[i].number = i;
+
+ groups[i].npins = 1,
+ groups[i].pins = &pins[i].number;
+ groups[i].ctl_reg = 0x10000 * i;
+ groups[i].io_reg = 0x04 + 0x10000 * i;
+ groups[i].intr_cfg_reg = 0x08 + 0x10000 * i;
+ groups[i].intr_status_reg = 0x0c + 0x10000 * i;
+ groups[i].intr_target_reg = 0x08 + 0x10000 * i;
+
+ groups[i].mux_bit = 2;
+ groups[i].pull_bit = 0;
+ groups[i].drv_bit = 6;
+ groups[i].oe_bit = 9;
+ groups[i].in_bit = 0;
+ groups[i].out_bit = 1;
+ groups[i].intr_enable_bit = 0;
+ groups[i].intr_status_bit = 0;
+ groups[i].intr_target_bit = 5;
+ groups[i].intr_target_kpss_val = 1;
+ groups[i].intr_raw_status_bit = 4;
+ groups[i].intr_polarity_bit = 1;
+ groups[i].intr_detection_bit = 2;
+ groups[i].intr_detection_width = 2;
+ }
+
+ qdf2xxx_pinctrl.pins = pins;
+ qdf2xxx_pinctrl.groups = groups;
+ qdf2xxx_pinctrl.npins = num_gpios;
+ qdf2xxx_pinctrl.ngroups = num_gpios;
+ qdf2xxx_pinctrl.ngpios = num_gpios;
+
+ return msm_pinctrl_probe(pdev, &qdf2xxx_pinctrl);
+}
+
+static const struct acpi_device_id qdf2xxx_acpi_ids[] = {
+ {"QCOM8001"},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, qdf2xxx_acpi_ids);
+
+static struct platform_driver qdf2xxx_pinctrl_driver = {
+ .driver = {
+ .name = "qdf2xxx-pinctrl",
+ .acpi_match_table = ACPI_PTR(qdf2xxx_acpi_ids),
+ },
+ .probe = qdf2xxx_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init qdf2xxx_pinctrl_init(void)
+{
+ return platform_driver_register(&qdf2xxx_pinctrl_driver);
+}
+arch_initcall(qdf2xxx_pinctrl_init);
+
+static void __exit qdf2xxx_pinctrl_exit(void)
+{
+ platform_driver_unregister(&qdf2xxx_pinctrl_driver);
+}
+module_exit(qdf2xxx_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies QDF2xxx pin control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 3121de9b6331..e3be3ce2cada 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -61,7 +61,9 @@
#define PMIC_MPP_REG_DIG_PULL_CTL 0x42
#define PMIC_MPP_REG_DIG_IN_CTL 0x43
#define PMIC_MPP_REG_EN_CTL 0x46
+#define PMIC_MPP_REG_AOUT_CTL 0x48
#define PMIC_MPP_REG_AIN_CTL 0x4a
+#define PMIC_MPP_REG_SINK_CTL 0x4c
/* PMIC_MPP_REG_MODE_CTL */
#define PMIC_MPP_REG_MODE_VALUE_MASK 0x1
@@ -85,11 +87,25 @@
#define PMIC_MPP_REG_AIN_ROUTE_SHIFT 0
#define PMIC_MPP_REG_AIN_ROUTE_MASK 0x7
+#define PMIC_MPP_MODE_DIGITAL_INPUT 0
+#define PMIC_MPP_MODE_DIGITAL_OUTPUT 1
+#define PMIC_MPP_MODE_DIGITAL_BIDIR 2
+#define PMIC_MPP_MODE_ANALOG_BIDIR 3
+#define PMIC_MPP_MODE_ANALOG_INPUT 4
+#define PMIC_MPP_MODE_ANALOG_OUTPUT 5
+#define PMIC_MPP_MODE_CURRENT_SINK 6
+
+#define PMIC_MPP_SELECTOR_NORMAL 0
+#define PMIC_MPP_SELECTOR_PAIRED 1
+#define PMIC_MPP_SELECTOR_DTEST_FIRST 4
+
#define PMIC_MPP_PHYSICAL_OFFSET 1
/* Qualcomm specific pin configurations */
#define PMIC_MPP_CONF_AMUX_ROUTE (PIN_CONFIG_END + 1)
-#define PMIC_MPP_CONF_ANALOG_MODE (PIN_CONFIG_END + 2)
+#define PMIC_MPP_CONF_ANALOG_LEVEL (PIN_CONFIG_END + 2)
+#define PMIC_MPP_CONF_DTEST_SELECTOR (PIN_CONFIG_END + 3)
+#define PMIC_MPP_CONF_PAIRED (PIN_CONFIG_END + 4)
/**
* struct pmic_mpp_pad - keep current MPP settings
@@ -99,13 +115,15 @@
* @out_value: Cached pin output value.
* @output_enabled: Set to true if MPP output logic is enabled.
* @input_enabled: Set to true if MPP input buffer logic is enabled.
- * @analog_mode: Set to true when MPP should operate in Analog Input, Analog
- * Output or Bidirectional Analog mode.
+ * @paired: Pin operates in paired mode
* @num_sources: Number of power-sources supported by this MPP.
* @power_source: Current power-source used.
* @amux_input: Set the source for analog input.
+ * @aout_level: Analog output level
* @pullup: Pullup resistor value. Valid in Bidirectional mode only.
* @function: See pmic_mpp_functions[].
+ * @drive_strength: Amount of current in sink mode
+ * @dtest: DTEST route selector
*/
struct pmic_mpp_pad {
u16 base;
@@ -114,12 +132,15 @@ struct pmic_mpp_pad {
bool out_value;
bool output_enabled;
bool input_enabled;
- bool analog_mode;
+ bool paired;
unsigned int num_sources;
unsigned int power_source;
unsigned int amux_input;
+ unsigned int aout_level;
unsigned int pullup;
unsigned int function;
+ unsigned int drive_strength;
+ unsigned int dtest;
};
struct pmic_mpp_state {
@@ -129,25 +150,32 @@ struct pmic_mpp_state {
struct gpio_chip chip;
};
-struct pmic_mpp_bindings {
- const char *property;
- unsigned param;
+static const struct pinconf_generic_params pmic_mpp_bindings[] = {
+ {"qcom,amux-route", PMIC_MPP_CONF_AMUX_ROUTE, 0},
+ {"qcom,analog-level", PMIC_MPP_CONF_ANALOG_LEVEL, 0},
+ {"qcom,dtest", PMIC_MPP_CONF_DTEST_SELECTOR, 0},
+ {"qcom,paired", PMIC_MPP_CONF_PAIRED, 0},
};
-static struct pmic_mpp_bindings pmic_mpp_bindings[] = {
- {"qcom,amux-route", PMIC_MPP_CONF_AMUX_ROUTE},
- {"qcom,analog-mode", PMIC_MPP_CONF_ANALOG_MODE},
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item pmic_conf_items[] = {
+ PCONFDUMP(PMIC_MPP_CONF_AMUX_ROUTE, "analog mux", NULL, true),
+ PCONFDUMP(PMIC_MPP_CONF_ANALOG_LEVEL, "analog level", NULL, true),
+ PCONFDUMP(PMIC_MPP_CONF_DTEST_SELECTOR, "dtest", NULL, true),
+ PCONFDUMP(PMIC_MPP_CONF_PAIRED, "paired", NULL, false),
};
+#endif
static const char *const pmic_mpp_groups[] = {
"mpp1", "mpp2", "mpp3", "mpp4", "mpp5", "mpp6", "mpp7", "mpp8",
};
+#define PMIC_MPP_DIGITAL 0
+#define PMIC_MPP_ANALOG 1
+#define PMIC_MPP_SINK 2
+
static const char *const pmic_mpp_functions[] = {
- PMIC_MPP_FUNC_NORMAL, PMIC_MPP_FUNC_PAIRED,
- "reserved1", "reserved2",
- PMIC_MPP_FUNC_DTEST1, PMIC_MPP_FUNC_DTEST2,
- PMIC_MPP_FUNC_DTEST3, PMIC_MPP_FUNC_DTEST4,
+ "digital", "analog", "sink"
};
static inline struct pmic_mpp_state *to_mpp_state(struct gpio_chip *chip)
@@ -204,118 +232,11 @@ static int pmic_mpp_get_group_pins(struct pinctrl_dev *pctldev,
return 0;
}
-static int pmic_mpp_parse_dt_config(struct device_node *np,
- struct pinctrl_dev *pctldev,
- unsigned long **configs,
- unsigned int *nconfs)
-{
- struct pmic_mpp_bindings *par;
- unsigned long cfg;
- int ret, i;
- u32 val;
-
- for (i = 0; i < ARRAY_SIZE(pmic_mpp_bindings); i++) {
- par = &pmic_mpp_bindings[i];
- ret = of_property_read_u32(np, par->property, &val);
-
- /* property not found */
- if (ret == -EINVAL)
- continue;
-
- /* use zero as default value, when no value is specified */
- if (ret)
- val = 0;
-
- dev_dbg(pctldev->dev, "found %s with value %u\n",
- par->property, val);
-
- cfg = pinconf_to_config_packed(par->param, val);
-
- ret = pinctrl_utils_add_config(pctldev, configs, nconfs, cfg);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int pmic_mpp_dt_subnode_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np,
- struct pinctrl_map **map,
- unsigned *reserv, unsigned *nmaps,
- enum pinctrl_map_type type)
-{
- unsigned long *configs = NULL;
- unsigned nconfs = 0;
- struct property *prop;
- const char *group;
- int ret;
-
- ret = pmic_mpp_parse_dt_config(np, pctldev, &configs, &nconfs);
- if (ret < 0)
- return ret;
-
- if (!nconfs)
- return 0;
-
- ret = of_property_count_strings(np, "pins");
- if (ret < 0)
- goto exit;
-
- ret = pinctrl_utils_reserve_map(pctldev, map, reserv, nmaps, ret);
- if (ret < 0)
- goto exit;
-
- of_property_for_each_string(np, "pins", prop, group) {
- ret = pinctrl_utils_add_map_configs(pctldev, map,
- reserv, nmaps, group,
- configs, nconfs, type);
- if (ret < 0)
- break;
- }
-exit:
- kfree(configs);
- return ret;
-}
-
-static int pmic_mpp_dt_node_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np_config,
- struct pinctrl_map **map, unsigned *nmaps)
-{
- struct device_node *np;
- enum pinctrl_map_type type;
- unsigned reserv;
- int ret;
-
- ret = 0;
- *map = NULL;
- *nmaps = 0;
- reserv = 0;
- type = PIN_MAP_TYPE_CONFIGS_GROUP;
-
- for_each_child_of_node(np_config, np) {
- ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map,
- &reserv, nmaps, type);
- if (ret)
- break;
-
- ret = pmic_mpp_dt_subnode_to_map(pctldev, np, map, &reserv,
- nmaps, type);
- if (ret)
- break;
- }
-
- if (ret < 0)
- pinctrl_utils_dt_free_map(pctldev, *map, *nmaps);
-
- return ret;
-}
-
static const struct pinctrl_ops pmic_mpp_pinctrl_ops = {
.get_groups_count = pmic_mpp_get_groups_count,
.get_group_name = pmic_mpp_get_group_name,
.get_group_pins = pmic_mpp_get_group_pins,
- .dt_node_to_map = pmic_mpp_dt_node_to_map,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
.dt_free_map = pinctrl_utils_dt_free_map,
};
@@ -340,6 +261,53 @@ static int pmic_mpp_get_function_groups(struct pinctrl_dev *pctldev,
return 0;
}
+static int pmic_mpp_write_mode_ctl(struct pmic_mpp_state *state,
+ struct pmic_mpp_pad *pad)
+{
+ unsigned int mode;
+ unsigned int sel;
+ unsigned int val;
+ unsigned int en;
+
+ switch (pad->function) {
+ case PMIC_MPP_ANALOG:
+ if (pad->input_enabled && pad->output_enabled)
+ mode = PMIC_MPP_MODE_ANALOG_BIDIR;
+ else if (pad->input_enabled)
+ mode = PMIC_MPP_MODE_ANALOG_INPUT;
+ else
+ mode = PMIC_MPP_MODE_ANALOG_OUTPUT;
+ break;
+ case PMIC_MPP_DIGITAL:
+ if (pad->input_enabled && pad->output_enabled)
+ mode = PMIC_MPP_MODE_DIGITAL_BIDIR;
+ else if (pad->input_enabled)
+ mode = PMIC_MPP_MODE_DIGITAL_INPUT;
+ else
+ mode = PMIC_MPP_MODE_DIGITAL_OUTPUT;
+ break;
+ case PMIC_MPP_SINK:
+ default:
+ mode = PMIC_MPP_MODE_CURRENT_SINK;
+ break;
+ }
+
+ if (pad->dtest)
+ sel = PMIC_MPP_SELECTOR_DTEST_FIRST + pad->dtest - 1;
+ else if (pad->paired)
+ sel = PMIC_MPP_SELECTOR_PAIRED;
+ else
+ sel = PMIC_MPP_SELECTOR_NORMAL;
+
+ en = !!pad->out_value;
+
+ val = mode << PMIC_MPP_REG_MODE_DIR_SHIFT |
+ sel << PMIC_MPP_REG_MODE_FUNCTION_SHIFT |
+ en;
+
+ return pmic_mpp_write(state, pad, PMIC_MPP_REG_MODE_CTL, val);
+}
+
static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function,
unsigned pin)
{
@@ -352,31 +320,7 @@ static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function,
pad->function = function;
- if (!pad->analog_mode) {
- val = 0; /* just digital input */
- if (pad->output_enabled) {
- if (pad->input_enabled)
- val = 2; /* digital input and output */
- else
- val = 1; /* just digital output */
- }
- } else {
- val = 4; /* just analog input */
- if (pad->output_enabled) {
- if (pad->input_enabled)
- val = 3; /* analog input and output */
- else
- val = 5; /* just analog output */
- }
- }
-
- val = val << PMIC_MPP_REG_MODE_DIR_SHIFT;
- val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
- val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK;
-
- ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_MODE_CTL, val);
- if (ret < 0)
- return ret;
+ ret = pmic_mpp_write_mode_ctl(state, pad);
val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT;
@@ -433,11 +377,20 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
case PIN_CONFIG_OUTPUT:
arg = pad->out_value;
break;
+ case PMIC_MPP_CONF_DTEST_SELECTOR:
+ arg = pad->dtest;
+ break;
case PMIC_MPP_CONF_AMUX_ROUTE:
arg = pad->amux_input;
break;
- case PMIC_MPP_CONF_ANALOG_MODE:
- arg = pad->analog_mode;
+ case PMIC_MPP_CONF_PAIRED:
+ arg = pad->paired;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ arg = pad->drive_strength;
+ break;
+ case PMIC_MPP_CONF_ANALOG_LEVEL:
+ arg = pad->aout_level;
break;
default:
return -EINVAL;
@@ -459,6 +412,9 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
pad = pctldev->desc->pins[pin].drv_data;
+ /* Make it possible to enable the pin, by not setting high impedance */
+ pad->is_enabled = true;
+
for (i = 0; i < nconfs; i++) {
param = pinconf_to_config_param(configs[i]);
arg = pinconf_to_config_argument(configs[i]);
@@ -497,13 +453,22 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
pad->output_enabled = true;
pad->out_value = arg;
break;
+ case PMIC_MPP_CONF_DTEST_SELECTOR:
+ pad->dtest = arg;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ arg = pad->drive_strength;
+ break;
case PMIC_MPP_CONF_AMUX_ROUTE:
if (arg >= PMIC_MPP_AMUX_ROUTE_ABUS4)
return -EINVAL;
pad->amux_input = arg;
break;
- case PMIC_MPP_CONF_ANALOG_MODE:
- pad->analog_mode = true;
+ case PMIC_MPP_CONF_ANALOG_LEVEL:
+ pad->aout_level = arg;
+ break;
+ case PMIC_MPP_CONF_PAIRED:
+ pad->paired = !!arg;
break;
default:
return -EINVAL;
@@ -528,29 +493,17 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
if (ret < 0)
return ret;
- if (!pad->analog_mode) {
- val = 0; /* just digital input */
- if (pad->output_enabled) {
- if (pad->input_enabled)
- val = 2; /* digital input and output */
- else
- val = 1; /* just digital output */
- }
- } else {
- val = 4; /* just analog input */
- if (pad->output_enabled) {
- if (pad->input_enabled)
- val = 3; /* analog input and output */
- else
- val = 5; /* just analog output */
- }
- }
+ ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_AOUT_CTL, pad->aout_level);
+ if (ret < 0)
+ return ret;
+
+ ret = pmic_mpp_write_mode_ctl(state, pad);
+ if (ret < 0)
+ return ret;
- val = val << PMIC_MPP_REG_MODE_DIR_SHIFT;
- val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
- val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK;
+ val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT;
- return pmic_mpp_write(state, pad, PMIC_MPP_REG_MODE_CTL, val);
+ return pmic_mpp_write(state, pad, PMIC_MPP_REG_EN_CTL, val);
}
static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev,
@@ -558,20 +511,17 @@ static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev,
{
struct pmic_mpp_state *state = pinctrl_dev_get_drvdata(pctldev);
struct pmic_mpp_pad *pad;
- int ret, val;
+ int ret;
static const char *const biases[] = {
"0.6kOhm", "10kOhm", "30kOhm", "Disabled"
};
-
pad = pctldev->desc->pins[pin].drv_data;
seq_printf(s, " mpp%-2d:", pin + PMIC_MPP_PHYSICAL_OFFSET);
- val = pmic_mpp_read(state, pad, PMIC_MPP_REG_EN_CTL);
-
- if (val < 0 || !(val >> PMIC_MPP_REG_MASTER_EN_SHIFT)) {
+ if (!pad->is_enabled) {
seq_puts(s, " ---");
} else {
@@ -585,15 +535,20 @@ static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev,
}
seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in");
- seq_printf(s, " %-4s", pad->analog_mode ? "ana" : "dig");
seq_printf(s, " %-7s", pmic_mpp_functions[pad->function]);
seq_printf(s, " vin-%d", pad->power_source);
+ seq_printf(s, " %d", pad->aout_level);
seq_printf(s, " %-8s", biases[pad->pullup]);
seq_printf(s, " %-4s", pad->out_value ? "high" : "low");
+ if (pad->dtest)
+ seq_printf(s, " dtest%d", pad->dtest);
+ if (pad->paired)
+ seq_puts(s, " paired");
}
}
static const struct pinconf_ops pmic_mpp_pinconf_ops = {
+ .is_generic = true,
.pin_config_group_get = pmic_mpp_config_get,
.pin_config_group_set = pmic_mpp_config_set,
.pin_config_group_dbg_show = pmic_mpp_config_dbg_show,
@@ -709,6 +664,7 @@ static int pmic_mpp_populate(struct pmic_mpp_state *state,
struct pmic_mpp_pad *pad)
{
int type, subtype, val, dir;
+ unsigned int sel;
type = pmic_mpp_read(state, pad, PMIC_MPP_REG_TYPE);
if (type < 0)
@@ -751,43 +707,53 @@ static int pmic_mpp_populate(struct pmic_mpp_state *state,
dir &= PMIC_MPP_REG_MODE_DIR_MASK;
switch (dir) {
- case 0:
+ case PMIC_MPP_MODE_DIGITAL_INPUT:
pad->input_enabled = true;
pad->output_enabled = false;
- pad->analog_mode = false;
+ pad->function = PMIC_MPP_DIGITAL;
break;
- case 1:
+ case PMIC_MPP_MODE_DIGITAL_OUTPUT:
pad->input_enabled = false;
pad->output_enabled = true;
- pad->analog_mode = false;
+ pad->function = PMIC_MPP_DIGITAL;
break;
- case 2:
+ case PMIC_MPP_MODE_DIGITAL_BIDIR:
pad->input_enabled = true;
pad->output_enabled = true;
- pad->analog_mode = false;
+ pad->function = PMIC_MPP_DIGITAL;
break;
- case 3:
+ case PMIC_MPP_MODE_ANALOG_BIDIR:
pad->input_enabled = true;
pad->output_enabled = true;
- pad->analog_mode = true;
+ pad->function = PMIC_MPP_ANALOG;
break;
- case 4:
+ case PMIC_MPP_MODE_ANALOG_INPUT:
pad->input_enabled = true;
pad->output_enabled = false;
- pad->analog_mode = true;
+ pad->function = PMIC_MPP_ANALOG;
break;
- case 5:
+ case PMIC_MPP_MODE_ANALOG_OUTPUT:
pad->input_enabled = false;
pad->output_enabled = true;
- pad->analog_mode = true;
+ pad->function = PMIC_MPP_ANALOG;
+ break;
+ case PMIC_MPP_MODE_CURRENT_SINK:
+ pad->input_enabled = false;
+ pad->output_enabled = true;
+ pad->function = PMIC_MPP_SINK;
break;
default:
dev_err(state->dev, "unknown MPP direction\n");
return -ENODEV;
}
- pad->function = val >> PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
- pad->function &= PMIC_MPP_REG_MODE_FUNCTION_MASK;
+ sel = val >> PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
+ sel &= PMIC_MPP_REG_MODE_FUNCTION_MASK;
+
+ if (sel >= PMIC_MPP_SELECTOR_DTEST_FIRST)
+ pad->dtest = sel + 1;
+ else if (sel == PMIC_MPP_SELECTOR_PAIRED)
+ pad->paired = true;
val = pmic_mpp_read(state, pad, PMIC_MPP_REG_DIG_VIN_CTL);
if (val < 0)
@@ -810,8 +776,24 @@ static int pmic_mpp_populate(struct pmic_mpp_state *state,
pad->amux_input = val >> PMIC_MPP_REG_AIN_ROUTE_SHIFT;
pad->amux_input &= PMIC_MPP_REG_AIN_ROUTE_MASK;
- /* Pin could be disabled with PIN_CONFIG_BIAS_HIGH_IMPEDANCE */
- pad->is_enabled = true;
+ val = pmic_mpp_read(state, pad, PMIC_MPP_REG_SINK_CTL);
+ if (val < 0)
+ return val;
+
+ pad->drive_strength = val;
+
+ val = pmic_mpp_read(state, pad, PMIC_MPP_REG_AOUT_CTL);
+ if (val < 0)
+ return val;
+
+ pad->aout_level = val;
+
+ val = pmic_mpp_read(state, pad, PMIC_MPP_REG_EN_CTL);
+ if (val < 0)
+ return val;
+
+ pad->is_enabled = !!val;
+
return 0;
}
@@ -866,6 +848,12 @@ static int pmic_mpp_probe(struct platform_device *pdev)
pctrldesc->pins = pindesc;
pctrldesc->npins = npins;
+ pctrldesc->num_custom_params = ARRAY_SIZE(pmic_mpp_bindings);
+ pctrldesc->custom_params = pmic_mpp_bindings;
+#ifdef CONFIG_DEBUG_FS
+ pctrldesc->custom_conf_items = pmic_conf_items;
+#endif
+
for (i = 0; i < npins; i++, pindesc++) {
pad = &pads[i];
pindesc->drv_data = pad;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
new file mode 100644
index 000000000000..c978b311031b
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -0,0 +1,791 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+
+/* mode */
+#define PM8XXX_GPIO_MODE_ENABLED BIT(0)
+#define PM8XXX_GPIO_MODE_INPUT 0
+#define PM8XXX_GPIO_MODE_OUTPUT 2
+
+/* output buffer */
+#define PM8XXX_GPIO_PUSH_PULL 0
+#define PM8XXX_GPIO_OPEN_DRAIN 1
+
+/* bias */
+#define PM8XXX_GPIO_BIAS_PU_30 0
+#define PM8XXX_GPIO_BIAS_PU_1P5 1
+#define PM8XXX_GPIO_BIAS_PU_31P5 2
+#define PM8XXX_GPIO_BIAS_PU_1P5_30 3
+#define PM8XXX_GPIO_BIAS_PD 4
+#define PM8XXX_GPIO_BIAS_NP 5
+
+/* GPIO registers */
+#define SSBI_REG_ADDR_GPIO_BASE 0x150
+#define SSBI_REG_ADDR_GPIO(n) (SSBI_REG_ADDR_GPIO_BASE + n)
+
+#define PM8XXX_BANK_WRITE BIT(7)
+
+#define PM8XXX_MAX_GPIOS 44
+
+/* custom pinconf parameters */
+#define PM8XXX_QCOM_DRIVE_STRENGH (PIN_CONFIG_END + 1)
+#define PM8XXX_QCOM_PULL_UP_STRENGTH (PIN_CONFIG_END + 2)
+
+/**
+ * struct pm8xxx_pin_data - dynamic configuration for a pin
+ * @reg: address of the control register
+ * @irq: IRQ from the PMIC interrupt controller
+ * @power_source: logical selected voltage source, mapping in static data
+ * is used translate to register values
+ * @mode: operating mode for the pin (input/output)
+ * @open_drain: output buffer configured as open-drain (vs push-pull)
+ * @output_value: configured output value
+ * @bias: register view of configured bias
+ * @pull_up_strength: placeholder for selected pull up strength
+ * only used to configure bias when pull up is selected
+ * @output_strength: selector of output-strength
+ * @disable: pin disabled / configured as tristate
+ * @function: pinmux selector
+ * @inverted: pin logic is inverted
+ */
+struct pm8xxx_pin_data {
+ unsigned reg;
+ int irq;
+ u8 power_source;
+ u8 mode;
+ bool open_drain;
+ bool output_value;
+ u8 bias;
+ u8 pull_up_strength;
+ u8 output_strength;
+ bool disable;
+ u8 function;
+ bool inverted;
+};
+
+struct pm8xxx_gpio {
+ struct device *dev;
+ struct regmap *regmap;
+ struct pinctrl_dev *pctrl;
+ struct gpio_chip chip;
+
+ struct pinctrl_desc desc;
+ unsigned npins;
+};
+
+static const struct pinconf_generic_params pm8xxx_gpio_bindings[] = {
+ {"qcom,drive-strength", PM8XXX_QCOM_DRIVE_STRENGH, 0},
+ {"qcom,pull-up-strength", PM8XXX_QCOM_PULL_UP_STRENGTH, 0},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item pm8xxx_conf_items[ARRAY_SIZE(pm8xxx_gpio_bindings)] = {
+ PCONFDUMP(PM8XXX_QCOM_DRIVE_STRENGH, "drive-strength", NULL, true),
+ PCONFDUMP(PM8XXX_QCOM_PULL_UP_STRENGTH, "pull up strength", NULL, true),
+};
+#endif
+
+static const char * const pm8xxx_groups[PM8XXX_MAX_GPIOS] = {
+ "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio8",
+ "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", "gpio15",
+ "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+ "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", "gpio29",
+ "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", "gpio36",
+ "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio44",
+};
+
+static const char * const pm8xxx_gpio_functions[] = {
+ PMIC_GPIO_FUNC_NORMAL, PMIC_GPIO_FUNC_PAIRED,
+ PMIC_GPIO_FUNC_FUNC1, PMIC_GPIO_FUNC_FUNC2,
+ PMIC_GPIO_FUNC_DTEST1, PMIC_GPIO_FUNC_DTEST2,
+ PMIC_GPIO_FUNC_DTEST3, PMIC_GPIO_FUNC_DTEST4,
+};
+
+static int pm8xxx_read_bank(struct pm8xxx_gpio *pctrl,
+ struct pm8xxx_pin_data *pin, int bank)
+{
+ unsigned int val = bank << 4;
+ int ret;
+
+ ret = regmap_write(pctrl->regmap, pin->reg, val);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to select bank %d\n", bank);
+ return ret;
+ }
+
+ ret = regmap_read(pctrl->regmap, pin->reg, &val);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to read register %d\n", bank);
+ return ret;
+ }
+
+ return val;
+}
+
+static int pm8xxx_write_bank(struct pm8xxx_gpio *pctrl,
+ struct pm8xxx_pin_data *pin,
+ int bank,
+ u8 val)
+{
+ int ret;
+
+ val |= PM8XXX_BANK_WRITE;
+ val |= bank << 4;
+
+ ret = regmap_write(pctrl->regmap, pin->reg, val);
+ if (ret)
+ dev_err(pctrl->dev, "failed to write register\n");
+
+ return ret;
+}
+
+static int pm8xxx_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->npins;
+}
+
+static const char *pm8xxx_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ return pm8xxx_groups[group];
+}
+
+
+static int pm8xxx_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = &pctrl->desc.pins[group].number;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static const struct pinctrl_ops pm8xxx_pinctrl_ops = {
+ .get_groups_count = pm8xxx_get_groups_count,
+ .get_group_name = pm8xxx_get_group_name,
+ .get_group_pins = pm8xxx_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int pm8xxx_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(pm8xxx_gpio_functions);
+}
+
+static const char *pm8xxx_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned function)
+{
+ return pm8xxx_gpio_functions[function];
+}
+
+static int pm8xxx_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned function,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pm8xxx_groups;
+ *num_groups = pctrl->npins;
+ return 0;
+}
+
+static int pm8xxx_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned function,
+ unsigned group)
+{
+ struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct pm8xxx_pin_data *pin = pctrl->desc.pins[group].drv_data;
+ u8 val;
+
+ pin->function = function;
+ val = pin->function << 1;
+
+ pm8xxx_write_bank(pctrl, pin, 4, val);
+
+ return 0;
+}
+
+static const struct pinmux_ops pm8xxx_pinmux_ops = {
+ .get_functions_count = pm8xxx_get_functions_count,
+ .get_function_name = pm8xxx_get_function_name,
+ .get_function_groups = pm8xxx_get_function_groups,
+ .set_mux = pm8xxx_pinmux_set_mux,
+};
+
+static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
+ unsigned int offset,
+ unsigned long *config)
+{
+ struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+ unsigned param = pinconf_to_config_param(*config);
+ unsigned arg;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ arg = pin->bias == PM8XXX_GPIO_BIAS_NP;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ arg = pin->bias == PM8XXX_GPIO_BIAS_PD;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = pin->bias <= PM8XXX_GPIO_BIAS_PU_1P5_30;
+ break;
+ case PM8XXX_QCOM_PULL_UP_STRENGTH:
+ arg = pin->pull_up_strength;
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ arg = pin->disable;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ arg = pin->mode == PM8XXX_GPIO_MODE_INPUT;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ if (pin->mode & PM8XXX_GPIO_MODE_OUTPUT)
+ arg = pin->output_value;
+ else
+ arg = 0;
+ break;
+ case PIN_CONFIG_POWER_SOURCE:
+ arg = pin->power_source;
+ break;
+ case PM8XXX_QCOM_DRIVE_STRENGH:
+ arg = pin->output_strength;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ arg = !pin->open_drain;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ arg = pin->open_drain;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int pm8xxx_pin_config_set(struct pinctrl_dev *pctldev,
+ unsigned int offset,
+ unsigned long *configs,
+ unsigned num_configs)
+{
+ struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+ unsigned param;
+ unsigned arg;
+ unsigned i;
+ u8 banks = 0;
+ u8 val;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ pin->bias = PM8XXX_GPIO_BIAS_NP;
+ banks |= BIT(2);
+ pin->disable = 0;
+ banks |= BIT(3);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ pin->bias = PM8XXX_GPIO_BIAS_PD;
+ banks |= BIT(2);
+ pin->disable = 0;
+ banks |= BIT(3);
+ break;
+ case PM8XXX_QCOM_PULL_UP_STRENGTH:
+ if (arg > PM8XXX_GPIO_BIAS_PU_1P5_30) {
+ dev_err(pctrl->dev, "invalid pull-up strength\n");
+ return -EINVAL;
+ }
+ pin->pull_up_strength = arg;
+ /* FALLTHROUGH */
+ case PIN_CONFIG_BIAS_PULL_UP:
+ pin->bias = pin->pull_up_strength;
+ banks |= BIT(2);
+ pin->disable = 0;
+ banks |= BIT(3);
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ pin->disable = 1;
+ banks |= BIT(3);
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ pin->mode = PM8XXX_GPIO_MODE_INPUT;
+ banks |= BIT(0) | BIT(1);
+ break;
+ case PIN_CONFIG_OUTPUT:
+ pin->mode = PM8XXX_GPIO_MODE_OUTPUT;
+ pin->output_value = !!arg;
+ banks |= BIT(0) | BIT(1);
+ break;
+ case PIN_CONFIG_POWER_SOURCE:
+ pin->power_source = arg;
+ banks |= BIT(0);
+ break;
+ case PM8XXX_QCOM_DRIVE_STRENGH:
+ if (arg > PMIC_GPIO_STRENGTH_LOW) {
+ dev_err(pctrl->dev, "invalid drive strength\n");
+ return -EINVAL;
+ }
+ pin->output_strength = arg;
+ banks |= BIT(3);
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ pin->open_drain = 0;
+ banks |= BIT(1);
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ pin->open_drain = 1;
+ banks |= BIT(1);
+ break;
+ default:
+ dev_err(pctrl->dev,
+ "unsupported config parameter: %x\n",
+ param);
+ return -EINVAL;
+ }
+ }
+
+ if (banks & BIT(0)) {
+ val = pin->power_source << 1;
+ val |= PM8XXX_GPIO_MODE_ENABLED;
+ pm8xxx_write_bank(pctrl, pin, 0, val);
+ }
+
+ if (banks & BIT(1)) {
+ val = pin->mode << 2;
+ val |= pin->open_drain << 1;
+ val |= pin->output_value;
+ pm8xxx_write_bank(pctrl, pin, 1, val);
+ }
+
+ if (banks & BIT(2)) {
+ val = pin->bias << 1;
+ pm8xxx_write_bank(pctrl, pin, 2, val);
+ }
+
+ if (banks & BIT(3)) {
+ val = pin->output_strength << 2;
+ val |= pin->disable;
+ pm8xxx_write_bank(pctrl, pin, 3, val);
+ }
+
+ if (banks & BIT(4)) {
+ val = pin->function << 1;
+ pm8xxx_write_bank(pctrl, pin, 4, val);
+ }
+
+ if (banks & BIT(5)) {
+ val = 0;
+ if (!pin->inverted)
+ val |= BIT(3);
+ pm8xxx_write_bank(pctrl, pin, 5, val);
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops pm8xxx_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_group_get = pm8xxx_pin_config_get,
+ .pin_config_group_set = pm8xxx_pin_config_set,
+};
+
+static struct pinctrl_desc pm8xxx_pinctrl_desc = {
+ .name = "pm8xxx_gpio",
+ .pctlops = &pm8xxx_pinctrl_ops,
+ .pmxops = &pm8xxx_pinmux_ops,
+ .confops = &pm8xxx_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static int pm8xxx_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+ struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+ u8 val;
+
+ pin->mode = PM8XXX_GPIO_MODE_INPUT;
+ val = pin->mode << 2;
+
+ pm8xxx_write_bank(pctrl, pin, 1, val);
+
+ return 0;
+}
+
+static int pm8xxx_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset,
+ int value)
+{
+ struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+ struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+ u8 val;
+
+ pin->mode = PM8XXX_GPIO_MODE_OUTPUT;
+ pin->output_value = !!value;
+
+ val = pin->mode << 2;
+ val |= pin->open_drain << 1;
+ val |= pin->output_value;
+
+ pm8xxx_write_bank(pctrl, pin, 1, val);
+
+ return 0;
+}
+
+static int pm8xxx_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+ struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+ bool state;
+ int ret;
+
+ if (pin->mode == PM8XXX_GPIO_MODE_OUTPUT) {
+ ret = pin->output_value;
+ } else {
+ ret = irq_get_irqchip_state(pin->irq, IRQCHIP_STATE_LINE_LEVEL, &state);
+ if (!ret)
+ ret = !!state;
+ }
+
+ return ret;
+}
+
+static void pm8xxx_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+ struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+ u8 val;
+
+ pin->output_value = !!value;
+
+ val = pin->mode << 2;
+ val |= pin->open_drain << 1;
+ val |= pin->output_value;
+
+ pm8xxx_write_bank(pctrl, pin, 1, val);
+}
+
+static int pm8xxx_gpio_of_xlate(struct gpio_chip *chip,
+ const struct of_phandle_args *gpio_desc,
+ u32 *flags)
+{
+ if (chip->of_gpio_n_cells < 2)
+ return -EINVAL;
+
+ if (flags)
+ *flags = gpio_desc->args[1];
+
+ return gpio_desc->args[0] - 1;
+}
+
+
+static int pm8xxx_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+ struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+ return pin->irq;
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+static void pm8xxx_gpio_dbg_show_one(struct seq_file *s,
+ struct pinctrl_dev *pctldev,
+ struct gpio_chip *chip,
+ unsigned offset,
+ unsigned gpio)
+{
+ struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+ struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+ static const char * const modes[] = {
+ "in", "both", "out", "off"
+ };
+ static const char * const biases[] = {
+ "pull-up 30uA", "pull-up 1.5uA", "pull-up 31.5uA",
+ "pull-up 1.5uA + 30uA boost", "pull-down 10uA", "no pull"
+ };
+ static const char * const buffer_types[] = {
+ "push-pull", "open-drain"
+ };
+ static const char * const strengths[] = {
+ "no", "high", "medium", "low"
+ };
+
+ seq_printf(s, " gpio%-2d:", offset + 1);
+ if (pin->disable) {
+ seq_puts(s, " ---");
+ } else {
+ seq_printf(s, " %-4s", modes[pin->mode]);
+ seq_printf(s, " %-7s", pm8xxx_gpio_functions[pin->function]);
+ seq_printf(s, " VIN%d", pin->power_source);
+ seq_printf(s, " %-27s", biases[pin->bias]);
+ seq_printf(s, " %-10s", buffer_types[pin->open_drain]);
+ seq_printf(s, " %-4s", pin->output_value ? "high" : "low");
+ seq_printf(s, " %-7s", strengths[pin->output_strength]);
+ if (pin->inverted)
+ seq_puts(s, " inverted");
+ }
+}
+
+static void pm8xxx_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ unsigned gpio = chip->base;
+ unsigned i;
+
+ for (i = 0; i < chip->ngpio; i++, gpio++) {
+ pm8xxx_gpio_dbg_show_one(s, NULL, chip, i, gpio);
+ seq_puts(s, "\n");
+ }
+}
+
+#else
+#define msm_gpio_dbg_show NULL
+#endif
+
+static struct gpio_chip pm8xxx_gpio_template = {
+ .direction_input = pm8xxx_gpio_direction_input,
+ .direction_output = pm8xxx_gpio_direction_output,
+ .get = pm8xxx_gpio_get,
+ .set = pm8xxx_gpio_set,
+ .of_xlate = pm8xxx_gpio_of_xlate,
+ .to_irq = pm8xxx_gpio_to_irq,
+ .dbg_show = pm8xxx_gpio_dbg_show,
+ .owner = THIS_MODULE,
+};
+
+static int pm8xxx_pin_populate(struct pm8xxx_gpio *pctrl,
+ struct pm8xxx_pin_data *pin)
+{
+ int val;
+
+ val = pm8xxx_read_bank(pctrl, pin, 0);
+ if (val < 0)
+ return val;
+
+ pin->power_source = (val >> 1) & 0x7;
+
+ val = pm8xxx_read_bank(pctrl, pin, 1);
+ if (val < 0)
+ return val;
+
+ pin->mode = (val >> 2) & 0x3;
+ pin->open_drain = !!(val & BIT(1));
+ pin->output_value = val & BIT(0);
+
+ val = pm8xxx_read_bank(pctrl, pin, 2);
+ if (val < 0)
+ return val;
+
+ pin->bias = (val >> 1) & 0x7;
+ if (pin->bias <= PM8XXX_GPIO_BIAS_PU_1P5_30)
+ pin->pull_up_strength = pin->bias;
+ else
+ pin->pull_up_strength = PM8XXX_GPIO_BIAS_PU_30;
+
+ val = pm8xxx_read_bank(pctrl, pin, 3);
+ if (val < 0)
+ return val;
+
+ pin->output_strength = (val >> 2) & 0x3;
+ pin->disable = val & BIT(0);
+
+ val = pm8xxx_read_bank(pctrl, pin, 4);
+ if (val < 0)
+ return val;
+
+ pin->function = (val >> 1) & 0x7;
+
+ val = pm8xxx_read_bank(pctrl, pin, 5);
+ if (val < 0)
+ return val;
+
+ pin->inverted = !(val & BIT(3));
+
+ return 0;
+}
+
+static const struct of_device_id pm8xxx_gpio_of_match[] = {
+ { .compatible = "qcom,pm8018-gpio", .data = (void *)6 },
+ { .compatible = "qcom,pm8038-gpio", .data = (void *)12 },
+ { .compatible = "qcom,pm8058-gpio", .data = (void *)40 },
+ { .compatible = "qcom,pm8917-gpio", .data = (void *)38 },
+ { .compatible = "qcom,pm8921-gpio", .data = (void *)44 },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pm8xxx_gpio_of_match);
+
+static int pm8xxx_gpio_probe(struct platform_device *pdev)
+{
+ struct pm8xxx_pin_data *pin_data;
+ struct pinctrl_pin_desc *pins;
+ struct pm8xxx_gpio *pctrl;
+ int ret;
+ int i;
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ pctrl->dev = &pdev->dev;
+ pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev);
+
+ pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!pctrl->regmap) {
+ dev_err(&pdev->dev, "parent regmap unavailable\n");
+ return -ENXIO;
+ }
+
+ pctrl->desc = pm8xxx_pinctrl_desc;
+ pctrl->desc.npins = pctrl->npins;
+
+ pins = devm_kcalloc(&pdev->dev,
+ pctrl->desc.npins,
+ sizeof(struct pinctrl_pin_desc),
+ GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ pin_data = devm_kcalloc(&pdev->dev,
+ pctrl->desc.npins,
+ sizeof(struct pm8xxx_pin_data),
+ GFP_KERNEL);
+ if (!pin_data)
+ return -ENOMEM;
+
+ for (i = 0; i < pctrl->desc.npins; i++) {
+ pin_data[i].reg = SSBI_REG_ADDR_GPIO(i);
+ pin_data[i].irq = platform_get_irq(pdev, i);
+ if (pin_data[i].irq < 0) {
+ dev_err(&pdev->dev,
+ "missing interrupts for pin %d\n", i);
+ return pin_data[i].irq;
+ }
+
+ ret = pm8xxx_pin_populate(pctrl, &pin_data[i]);
+ if (ret)
+ return ret;
+
+ pins[i].number = i;
+ pins[i].name = pm8xxx_groups[i];
+ pins[i].drv_data = &pin_data[i];
+ }
+ pctrl->desc.pins = pins;
+
+ pctrl->desc.num_custom_params = ARRAY_SIZE(pm8xxx_gpio_bindings);
+ pctrl->desc.custom_params = pm8xxx_gpio_bindings;
+#ifdef CONFIG_DEBUG_FS
+ pctrl->desc.custom_conf_items = pm8xxx_conf_items;
+#endif
+
+ pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl);
+ if (!pctrl->pctrl) {
+ dev_err(&pdev->dev, "couldn't register pm8xxx gpio driver\n");
+ return -ENODEV;
+ }
+
+ pctrl->chip = pm8xxx_gpio_template;
+ pctrl->chip.base = -1;
+ pctrl->chip.dev = &pdev->dev;
+ pctrl->chip.of_node = pdev->dev.of_node;
+ pctrl->chip.of_gpio_n_cells = 2;
+ pctrl->chip.label = dev_name(pctrl->dev);
+ pctrl->chip.ngpio = pctrl->npins;
+ ret = gpiochip_add(&pctrl->chip);
+ if (ret) {
+ dev_err(&pdev->dev, "failed register gpiochip\n");
+ goto unregister_pinctrl;
+ }
+
+ ret = gpiochip_add_pin_range(&pctrl->chip,
+ dev_name(pctrl->dev),
+ 0, 0, pctrl->chip.ngpio);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add pin range\n");
+ goto unregister_gpiochip;
+ }
+
+ platform_set_drvdata(pdev, pctrl);
+
+ dev_dbg(&pdev->dev, "Qualcomm pm8xxx gpio driver probed\n");
+
+ return 0;
+
+unregister_gpiochip:
+ gpiochip_remove(&pctrl->chip);
+
+unregister_pinctrl:
+ pinctrl_unregister(pctrl->pctrl);
+
+ return ret;
+}
+
+static int pm8xxx_gpio_remove(struct platform_device *pdev)
+{
+ struct pm8xxx_gpio *pctrl = platform_get_drvdata(pdev);
+
+ gpiochip_remove(&pctrl->chip);
+
+ pinctrl_unregister(pctrl->pctrl);
+
+ return 0;
+}
+
+static struct platform_driver pm8xxx_gpio_driver = {
+ .driver = {
+ .name = "qcom-ssbi-gpio",
+ .of_match_table = pm8xxx_gpio_of_match,
+ },
+ .probe = pm8xxx_gpio_probe,
+ .remove = pm8xxx_gpio_remove,
+};
+
+module_platform_driver(pm8xxx_gpio_driver);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm PM8xxx GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
new file mode 100644
index 000000000000..2d1b69f171be
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -0,0 +1,882 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+
+#include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+
+/* MPP registers */
+#define SSBI_REG_ADDR_MPP_BASE 0x50
+#define SSBI_REG_ADDR_MPP(n) (SSBI_REG_ADDR_MPP_BASE + n)
+
+/* MPP Type: type */
+#define PM8XXX_MPP_TYPE_D_INPUT 0
+#define PM8XXX_MPP_TYPE_D_OUTPUT 1
+#define PM8XXX_MPP_TYPE_D_BI_DIR 2
+#define PM8XXX_MPP_TYPE_A_INPUT 3
+#define PM8XXX_MPP_TYPE_A_OUTPUT 4
+#define PM8XXX_MPP_TYPE_SINK 5
+#define PM8XXX_MPP_TYPE_DTEST_SINK 6
+#define PM8XXX_MPP_TYPE_DTEST_OUTPUT 7
+
+/* Digital Input: control */
+#define PM8XXX_MPP_DIN_TO_INT 0
+#define PM8XXX_MPP_DIN_TO_DBUS1 1
+#define PM8XXX_MPP_DIN_TO_DBUS2 2
+#define PM8XXX_MPP_DIN_TO_DBUS3 3
+
+/* Digital Output: control */
+#define PM8XXX_MPP_DOUT_CTRL_LOW 0
+#define PM8XXX_MPP_DOUT_CTRL_HIGH 1
+#define PM8XXX_MPP_DOUT_CTRL_MPP 2
+#define PM8XXX_MPP_DOUT_CTRL_INV_MPP 3
+
+/* Bidirectional: control */
+#define PM8XXX_MPP_BI_PULLUP_1KOHM 0
+#define PM8XXX_MPP_BI_PULLUP_OPEN 1
+#define PM8XXX_MPP_BI_PULLUP_10KOHM 2
+#define PM8XXX_MPP_BI_PULLUP_30KOHM 3
+
+/* Analog Output: control */
+#define PM8XXX_MPP_AOUT_CTRL_DISABLE 0
+#define PM8XXX_MPP_AOUT_CTRL_ENABLE 1
+#define PM8XXX_MPP_AOUT_CTRL_MPP_HIGH_EN 2
+#define PM8XXX_MPP_AOUT_CTRL_MPP_LOW_EN 3
+
+/* Current Sink: control */
+#define PM8XXX_MPP_CS_CTRL_DISABLE 0
+#define PM8XXX_MPP_CS_CTRL_ENABLE 1
+#define PM8XXX_MPP_CS_CTRL_MPP_HIGH_EN 2
+#define PM8XXX_MPP_CS_CTRL_MPP_LOW_EN 3
+
+/* DTEST Current Sink: control */
+#define PM8XXX_MPP_DTEST_CS_CTRL_EN1 0
+#define PM8XXX_MPP_DTEST_CS_CTRL_EN2 1
+#define PM8XXX_MPP_DTEST_CS_CTRL_EN3 2
+#define PM8XXX_MPP_DTEST_CS_CTRL_EN4 3
+
+/* DTEST Digital Output: control */
+#define PM8XXX_MPP_DTEST_DBUS1 0
+#define PM8XXX_MPP_DTEST_DBUS2 1
+#define PM8XXX_MPP_DTEST_DBUS3 2
+#define PM8XXX_MPP_DTEST_DBUS4 3
+
+/* custom pinconf parameters */
+#define PM8XXX_CONFIG_AMUX (PIN_CONFIG_END + 1)
+#define PM8XXX_CONFIG_DTEST_SELECTOR (PIN_CONFIG_END + 2)
+#define PM8XXX_CONFIG_ALEVEL (PIN_CONFIG_END + 3)
+#define PM8XXX_CONFIG_PAIRED (PIN_CONFIG_END + 4)
+
+/**
+ * struct pm8xxx_pin_data - dynamic configuration for a pin
+ * @reg: address of the control register
+ * @irq: IRQ from the PMIC interrupt controller
+ * @mode: operating mode for the pin (digital, analog or current sink)
+ * @input: pin is input
+ * @output: pin is output
+ * @high_z: pin is floating
+ * @paired: mpp operates in paired mode
+ * @output_value: logical output value of the mpp
+ * @power_source: selected power source
+ * @dtest: DTEST route selector
+ * @amux: input muxing in analog mode
+ * @aout_level: selector of the output in analog mode
+ * @drive_strength: drive strength of the current sink
+ * @pullup: pull up value, when in digital bidirectional mode
+ */
+struct pm8xxx_pin_data {
+ unsigned reg;
+ int irq;
+
+ u8 mode;
+
+ bool input;
+ bool output;
+ bool high_z;
+ bool paired;
+ bool output_value;
+
+ u8 power_source;
+ u8 dtest;
+ u8 amux;
+ u8 aout_level;
+ u8 drive_strength;
+ unsigned pullup;
+};
+
+struct pm8xxx_mpp {
+ struct device *dev;
+ struct regmap *regmap;
+ struct pinctrl_dev *pctrl;
+ struct gpio_chip chip;
+
+ struct pinctrl_desc desc;
+ unsigned npins;
+};
+
+static const struct pinconf_generic_params pm8xxx_mpp_bindings[] = {
+ {"qcom,amux-route", PM8XXX_CONFIG_AMUX, 0},
+ {"qcom,analog-level", PM8XXX_CONFIG_ALEVEL, 0},
+ {"qcom,dtest", PM8XXX_CONFIG_DTEST_SELECTOR, 0},
+ {"qcom,paired", PM8XXX_CONFIG_PAIRED, 0},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item pm8xxx_conf_items[] = {
+ PCONFDUMP(PM8XXX_CONFIG_AMUX, "analog mux", NULL, true),
+ PCONFDUMP(PM8XXX_CONFIG_ALEVEL, "analog level", NULL, true),
+ PCONFDUMP(PM8XXX_CONFIG_DTEST_SELECTOR, "dtest", NULL, true),
+ PCONFDUMP(PM8XXX_CONFIG_PAIRED, "paired", NULL, false),
+};
+#endif
+
+#define PM8XXX_MAX_MPPS 12
+static const char * const pm8xxx_groups[PM8XXX_MAX_MPPS] = {
+ "mpp1", "mpp2", "mpp3", "mpp4", "mpp5", "mpp6", "mpp7", "mpp8",
+ "mpp9", "mpp10", "mpp11", "mpp12",
+};
+
+#define PM8XXX_MPP_DIGITAL 0
+#define PM8XXX_MPP_ANALOG 1
+#define PM8XXX_MPP_SINK 2
+
+static const char * const pm8xxx_mpp_functions[] = {
+ "digital", "analog", "sink",
+};
+
+static int pm8xxx_mpp_update(struct pm8xxx_mpp *pctrl,
+ struct pm8xxx_pin_data *pin)
+{
+ unsigned level;
+ unsigned ctrl;
+ unsigned type;
+ int ret;
+ u8 val;
+
+ switch (pin->mode) {
+ case PM8XXX_MPP_DIGITAL:
+ if (pin->dtest) {
+ type = PM8XXX_MPP_TYPE_DTEST_OUTPUT;
+ ctrl = pin->dtest - 1;
+ } else if (pin->input && pin->output) {
+ type = PM8XXX_MPP_TYPE_D_BI_DIR;
+ if (pin->high_z)
+ ctrl = PM8XXX_MPP_BI_PULLUP_OPEN;
+ else if (pin->pullup == 600)
+ ctrl = PM8XXX_MPP_BI_PULLUP_1KOHM;
+ else if (pin->pullup == 10000)
+ ctrl = PM8XXX_MPP_BI_PULLUP_10KOHM;
+ else
+ ctrl = PM8XXX_MPP_BI_PULLUP_30KOHM;
+ } else if (pin->input) {
+ type = PM8XXX_MPP_TYPE_D_INPUT;
+ if (pin->dtest)
+ ctrl = pin->dtest;
+ else
+ ctrl = PM8XXX_MPP_DIN_TO_INT;
+ } else {
+ type = PM8XXX_MPP_TYPE_D_OUTPUT;
+ ctrl = !!pin->output_value;
+ if (pin->paired)
+ ctrl |= BIT(1);
+ }
+
+ level = pin->power_source;
+ break;
+ case PM8XXX_MPP_ANALOG:
+ if (pin->output) {
+ type = PM8XXX_MPP_TYPE_A_OUTPUT;
+ level = pin->aout_level;
+ ctrl = pin->output_value;
+ if (pin->paired)
+ ctrl |= BIT(1);
+ } else {
+ type = PM8XXX_MPP_TYPE_A_INPUT;
+ level = pin->amux;
+ ctrl = 0;
+ }
+ break;
+ case PM8XXX_MPP_SINK:
+ level = (pin->drive_strength / 5) - 1;
+ if (pin->dtest) {
+ type = PM8XXX_MPP_TYPE_DTEST_SINK;
+ ctrl = pin->dtest - 1;
+ } else {
+ type = PM8XXX_MPP_TYPE_SINK;
+ ctrl = pin->output_value;
+ if (pin->paired)
+ ctrl |= BIT(1);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val = type << 5 | level << 2 | ctrl;
+ ret = regmap_write(pctrl->regmap, pin->reg, val);
+ if (ret)
+ dev_err(pctrl->dev, "failed to write register\n");
+
+ return ret;
+}
+
+static int pm8xxx_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->npins;
+}
+
+static const char *pm8xxx_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ return pm8xxx_groups[group];
+}
+
+
+static int pm8xxx_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = &pctrl->desc.pins[group].number;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static const struct pinctrl_ops pm8xxx_pinctrl_ops = {
+ .get_groups_count = pm8xxx_get_groups_count,
+ .get_group_name = pm8xxx_get_group_name,
+ .get_group_pins = pm8xxx_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int pm8xxx_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(pm8xxx_mpp_functions);
+}
+
+static const char *pm8xxx_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned function)
+{
+ return pm8xxx_mpp_functions[function];
+}
+
+static int pm8xxx_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned function,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pm8xxx_groups;
+ *num_groups = pctrl->npins;
+ return 0;
+}
+
+static int pm8xxx_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned function,
+ unsigned group)
+{
+ struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct pm8xxx_pin_data *pin = pctrl->desc.pins[group].drv_data;
+
+ pin->mode = function;
+ pm8xxx_mpp_update(pctrl, pin);
+
+ return 0;
+}
+
+static const struct pinmux_ops pm8xxx_pinmux_ops = {
+ .get_functions_count = pm8xxx_get_functions_count,
+ .get_function_name = pm8xxx_get_function_name,
+ .get_function_groups = pm8xxx_get_function_groups,
+ .set_mux = pm8xxx_pinmux_set_mux,
+};
+
+static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
+ unsigned int offset,
+ unsigned long *config)
+{
+ struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+ unsigned param = pinconf_to_config_param(*config);
+ unsigned arg;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = pin->pullup;
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ arg = pin->high_z;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ arg = pin->input;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ arg = pin->output_value;
+ break;
+ case PIN_CONFIG_POWER_SOURCE:
+ arg = pin->power_source;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ arg = pin->drive_strength;
+ break;
+ case PM8XXX_CONFIG_DTEST_SELECTOR:
+ arg = pin->dtest;
+ break;
+ case PM8XXX_CONFIG_AMUX:
+ arg = pin->amux;
+ break;
+ case PM8XXX_CONFIG_ALEVEL:
+ arg = pin->aout_level;
+ break;
+ case PM8XXX_CONFIG_PAIRED:
+ arg = pin->paired;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int pm8xxx_pin_config_set(struct pinctrl_dev *pctldev,
+ unsigned int offset,
+ unsigned long *configs,
+ unsigned num_configs)
+{
+ struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+ unsigned param;
+ unsigned arg;
+ unsigned i;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ pin->pullup = arg;
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ pin->high_z = true;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ pin->input = true;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ pin->output = true;
+ pin->output_value = !!arg;
+ break;
+ case PIN_CONFIG_POWER_SOURCE:
+ pin->power_source = arg;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ pin->drive_strength = arg;
+ break;
+ case PM8XXX_CONFIG_DTEST_SELECTOR:
+ pin->dtest = arg;
+ break;
+ case PM8XXX_CONFIG_AMUX:
+ pin->amux = arg;
+ break;
+ case PM8XXX_CONFIG_ALEVEL:
+ pin->aout_level = arg;
+ break;
+ case PM8XXX_CONFIG_PAIRED:
+ pin->paired = !!arg;
+ break;
+ default:
+ dev_err(pctrl->dev,
+ "unsupported config parameter: %x\n",
+ param);
+ return -EINVAL;
+ }
+ }
+
+ pm8xxx_mpp_update(pctrl, pin);
+
+ return 0;
+}
+
+static const struct pinconf_ops pm8xxx_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_group_get = pm8xxx_pin_config_get,
+ .pin_config_group_set = pm8xxx_pin_config_set,
+};
+
+static struct pinctrl_desc pm8xxx_pinctrl_desc = {
+ .name = "pm8xxx_mpp",
+ .pctlops = &pm8xxx_pinctrl_ops,
+ .pmxops = &pm8xxx_pinmux_ops,
+ .confops = &pm8xxx_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static int pm8xxx_mpp_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+ struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+ switch (pin->mode) {
+ case PM8XXX_MPP_DIGITAL:
+ pin->input = true;
+ break;
+ case PM8XXX_MPP_ANALOG:
+ pin->input = true;
+ pin->output = true;
+ break;
+ case PM8XXX_MPP_SINK:
+ return -EINVAL;
+ }
+
+ pm8xxx_mpp_update(pctrl, pin);
+
+ return 0;
+}
+
+static int pm8xxx_mpp_direction_output(struct gpio_chip *chip,
+ unsigned offset,
+ int value)
+{
+ struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+ struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+ switch (pin->mode) {
+ case PM8XXX_MPP_DIGITAL:
+ pin->output = true;
+ break;
+ case PM8XXX_MPP_ANALOG:
+ pin->input = false;
+ pin->output = true;
+ break;
+ case PM8XXX_MPP_SINK:
+ pin->input = false;
+ pin->output = true;
+ break;
+ }
+
+ pm8xxx_mpp_update(pctrl, pin);
+
+ return 0;
+}
+
+static int pm8xxx_mpp_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+ struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+ bool state;
+ int ret;
+
+ if (!pin->input)
+ return pin->output_value;
+
+ ret = irq_get_irqchip_state(pin->irq, IRQCHIP_STATE_LINE_LEVEL, &state);
+ if (!ret)
+ ret = !!state;
+
+ return ret;
+}
+
+static void pm8xxx_mpp_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+ struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+ pin->output_value = !!value;
+
+ pm8xxx_mpp_update(pctrl, pin);
+}
+
+static int pm8xxx_mpp_of_xlate(struct gpio_chip *chip,
+ const struct of_phandle_args *gpio_desc,
+ u32 *flags)
+{
+ if (chip->of_gpio_n_cells < 2)
+ return -EINVAL;
+
+ if (flags)
+ *flags = gpio_desc->args[1];
+
+ return gpio_desc->args[0] - 1;
+}
+
+
+static int pm8xxx_mpp_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+ struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+ return pin->irq;
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+static void pm8xxx_mpp_dbg_show_one(struct seq_file *s,
+ struct pinctrl_dev *pctldev,
+ struct gpio_chip *chip,
+ unsigned offset,
+ unsigned gpio)
+{
+ struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+ struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+ static const char * const aout_lvls[] = {
+ "1v25", "1v25_2", "0v625", "0v3125", "mpp", "abus1", "abus2",
+ "abus3"
+ };
+
+ static const char * const amuxs[] = {
+ "amux5", "amux6", "amux7", "amux8", "amux9", "abus1", "abus2",
+ "abus3",
+ };
+
+ seq_printf(s, " mpp%-2d:", offset + 1);
+
+ switch (pin->mode) {
+ case PM8XXX_MPP_DIGITAL:
+ seq_puts(s, " digital ");
+ if (pin->dtest) {
+ seq_printf(s, "dtest%d\n", pin->dtest);
+ } else if (pin->input && pin->output) {
+ if (pin->high_z)
+ seq_puts(s, "bi-dir high-z");
+ else
+ seq_printf(s, "bi-dir %dOhm", pin->pullup);
+ } else if (pin->input) {
+ if (pin->dtest)
+ seq_printf(s, "in dtest%d", pin->dtest);
+ else
+ seq_puts(s, "in gpio");
+ } else if (pin->output) {
+ seq_puts(s, "out ");
+
+ if (!pin->paired) {
+ seq_puts(s, pin->output_value ?
+ "high" : "low");
+ } else {
+ seq_puts(s, pin->output_value ?
+ "inverted" : "follow");
+ }
+ }
+ break;
+ case PM8XXX_MPP_ANALOG:
+ seq_puts(s, " analog ");
+ if (pin->output) {
+ seq_printf(s, "out %s ", aout_lvls[pin->aout_level]);
+ if (!pin->paired) {
+ seq_puts(s, pin->output_value ?
+ "high" : "low");
+ } else {
+ seq_puts(s, pin->output_value ?
+ "inverted" : "follow");
+ }
+ } else {
+ seq_printf(s, "input mux %s", amuxs[pin->amux]);
+ }
+ break;
+ case PM8XXX_MPP_SINK:
+ seq_printf(s, " sink %dmA ", pin->drive_strength);
+ if (pin->dtest) {
+ seq_printf(s, "dtest%d", pin->dtest);
+ } else {
+ if (!pin->paired) {
+ seq_puts(s, pin->output_value ?
+ "high" : "low");
+ } else {
+ seq_puts(s, pin->output_value ?
+ "inverted" : "follow");
+ }
+ }
+ break;
+ }
+
+}
+
+static void pm8xxx_mpp_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ unsigned gpio = chip->base;
+ unsigned i;
+
+ for (i = 0; i < chip->ngpio; i++, gpio++) {
+ pm8xxx_mpp_dbg_show_one(s, NULL, chip, i, gpio);
+ seq_puts(s, "\n");
+ }
+}
+
+#else
+#define msm_mpp_dbg_show NULL
+#endif
+
+static struct gpio_chip pm8xxx_mpp_template = {
+ .direction_input = pm8xxx_mpp_direction_input,
+ .direction_output = pm8xxx_mpp_direction_output,
+ .get = pm8xxx_mpp_get,
+ .set = pm8xxx_mpp_set,
+ .of_xlate = pm8xxx_mpp_of_xlate,
+ .to_irq = pm8xxx_mpp_to_irq,
+ .dbg_show = pm8xxx_mpp_dbg_show,
+ .owner = THIS_MODULE,
+};
+
+static int pm8xxx_pin_populate(struct pm8xxx_mpp *pctrl,
+ struct pm8xxx_pin_data *pin)
+{
+ unsigned int val;
+ unsigned level;
+ unsigned ctrl;
+ unsigned type;
+ int ret;
+
+ ret = regmap_read(pctrl->regmap, pin->reg, &val);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to read register\n");
+ return ret;
+ }
+
+ type = (val >> 5) & 7;
+ level = (val >> 2) & 7;
+ ctrl = (val) & 3;
+
+ switch (type) {
+ case PM8XXX_MPP_TYPE_D_INPUT:
+ pin->mode = PM8XXX_MPP_DIGITAL;
+ pin->input = true;
+ pin->power_source = level;
+ pin->dtest = ctrl;
+ break;
+ case PM8XXX_MPP_TYPE_D_OUTPUT:
+ pin->mode = PM8XXX_MPP_DIGITAL;
+ pin->output = true;
+ pin->power_source = level;
+ pin->output_value = !!(ctrl & BIT(0));
+ pin->paired = !!(ctrl & BIT(1));
+ break;
+ case PM8XXX_MPP_TYPE_D_BI_DIR:
+ pin->mode = PM8XXX_MPP_DIGITAL;
+ pin->input = true;
+ pin->output = true;
+ pin->power_source = level;
+ switch (ctrl) {
+ case PM8XXX_MPP_BI_PULLUP_1KOHM:
+ pin->pullup = 600;
+ break;
+ case PM8XXX_MPP_BI_PULLUP_OPEN:
+ pin->high_z = true;
+ break;
+ case PM8XXX_MPP_BI_PULLUP_10KOHM:
+ pin->pullup = 10000;
+ break;
+ case PM8XXX_MPP_BI_PULLUP_30KOHM:
+ pin->pullup = 30000;
+ break;
+ }
+ break;
+ case PM8XXX_MPP_TYPE_A_INPUT:
+ pin->mode = PM8XXX_MPP_ANALOG;
+ pin->input = true;
+ pin->amux = level;
+ break;
+ case PM8XXX_MPP_TYPE_A_OUTPUT:
+ pin->mode = PM8XXX_MPP_ANALOG;
+ pin->output = true;
+ pin->aout_level = level;
+ pin->output_value = !!(ctrl & BIT(0));
+ pin->paired = !!(ctrl & BIT(1));
+ break;
+ case PM8XXX_MPP_TYPE_SINK:
+ pin->mode = PM8XXX_MPP_SINK;
+ pin->drive_strength = 5 * (level + 1);
+ pin->output_value = !!(ctrl & BIT(0));
+ pin->paired = !!(ctrl & BIT(1));
+ break;
+ case PM8XXX_MPP_TYPE_DTEST_SINK:
+ pin->mode = PM8XXX_MPP_SINK;
+ pin->dtest = ctrl + 1;
+ pin->drive_strength = 5 * (level + 1);
+ break;
+ case PM8XXX_MPP_TYPE_DTEST_OUTPUT:
+ pin->mode = PM8XXX_MPP_DIGITAL;
+ pin->power_source = level;
+ if (ctrl >= 1)
+ pin->dtest = ctrl;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id pm8xxx_mpp_of_match[] = {
+ { .compatible = "qcom,pm8018-mpp", .data = (void *)6 },
+ { .compatible = "qcom,pm8038-mpp", .data = (void *)6 },
+ { .compatible = "qcom,pm8917-mpp", .data = (void *)10 },
+ { .compatible = "qcom,pm8821-mpp", .data = (void *)4 },
+ { .compatible = "qcom,pm8921-mpp", .data = (void *)12 },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pm8xxx_mpp_of_match);
+
+static int pm8xxx_mpp_probe(struct platform_device *pdev)
+{
+ struct pm8xxx_pin_data *pin_data;
+ struct pinctrl_pin_desc *pins;
+ struct pm8xxx_mpp *pctrl;
+ int ret;
+ int i;
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ pctrl->dev = &pdev->dev;
+ pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev);
+
+ pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!pctrl->regmap) {
+ dev_err(&pdev->dev, "parent regmap unavailable\n");
+ return -ENXIO;
+ }
+
+ pctrl->desc = pm8xxx_pinctrl_desc;
+ pctrl->desc.npins = pctrl->npins;
+
+ pins = devm_kcalloc(&pdev->dev,
+ pctrl->desc.npins,
+ sizeof(struct pinctrl_pin_desc),
+ GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ pin_data = devm_kcalloc(&pdev->dev,
+ pctrl->desc.npins,
+ sizeof(struct pm8xxx_pin_data),
+ GFP_KERNEL);
+ if (!pin_data)
+ return -ENOMEM;
+
+ for (i = 0; i < pctrl->desc.npins; i++) {
+ pin_data[i].reg = SSBI_REG_ADDR_MPP(i);
+ pin_data[i].irq = platform_get_irq(pdev, i);
+ if (pin_data[i].irq < 0) {
+ dev_err(&pdev->dev,
+ "missing interrupts for pin %d\n", i);
+ return pin_data[i].irq;
+ }
+
+ ret = pm8xxx_pin_populate(pctrl, &pin_data[i]);
+ if (ret)
+ return ret;
+
+ pins[i].number = i;
+ pins[i].name = pm8xxx_groups[i];
+ pins[i].drv_data = &pin_data[i];
+ }
+ pctrl->desc.pins = pins;
+
+ pctrl->desc.num_custom_params = ARRAY_SIZE(pm8xxx_mpp_bindings);
+ pctrl->desc.custom_params = pm8xxx_mpp_bindings;
+#ifdef CONFIG_DEBUG_FS
+ pctrl->desc.custom_conf_items = pm8xxx_conf_items;
+#endif
+
+ pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl);
+ if (!pctrl->pctrl) {
+ dev_err(&pdev->dev, "couldn't register pm8xxx mpp driver\n");
+ return -ENODEV;
+ }
+
+ pctrl->chip = pm8xxx_mpp_template;
+ pctrl->chip.base = -1;
+ pctrl->chip.dev = &pdev->dev;
+ pctrl->chip.of_node = pdev->dev.of_node;
+ pctrl->chip.of_gpio_n_cells = 2;
+ pctrl->chip.label = dev_name(pctrl->dev);
+ pctrl->chip.ngpio = pctrl->npins;
+ ret = gpiochip_add(&pctrl->chip);
+ if (ret) {
+ dev_err(&pdev->dev, "failed register gpiochip\n");
+ goto unregister_pinctrl;
+ }
+
+ ret = gpiochip_add_pin_range(&pctrl->chip,
+ dev_name(pctrl->dev),
+ 0, 0, pctrl->chip.ngpio);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add pin range\n");
+ goto unregister_gpiochip;
+ }
+
+ platform_set_drvdata(pdev, pctrl);
+
+ dev_dbg(&pdev->dev, "Qualcomm pm8xxx mpp driver probed\n");
+
+ return 0;
+
+unregister_gpiochip:
+ gpiochip_remove(&pctrl->chip);
+
+unregister_pinctrl:
+ pinctrl_unregister(pctrl->pctrl);
+
+ return ret;
+}
+
+static int pm8xxx_mpp_remove(struct platform_device *pdev)
+{
+ struct pm8xxx_mpp *pctrl = platform_get_drvdata(pdev);
+
+ gpiochip_remove(&pctrl->chip);
+
+ pinctrl_unregister(pctrl->pctrl);
+
+ return 0;
+}
+
+static struct platform_driver pm8xxx_mpp_driver = {
+ .driver = {
+ .name = "qcom-ssbi-mpp",
+ .of_match_table = pm8xxx_mpp_of_match,
+ },
+ .probe = pm8xxx_mpp_probe,
+ .remove = pm8xxx_mpp_remove,
+};
+
+module_platform_driver(pm8xxx_mpp_driver);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm PM8xxx MPP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index b18dabba03a4..5f45caaef46d 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -148,9 +148,9 @@ static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
}
if (type & IRQ_TYPE_EDGE_BOTH)
- __irq_set_handler_locked(irqd->irq, handle_edge_irq);
+ irq_set_handler_locked(irqd, handle_edge_irq);
else
- __irq_set_handler_locked(irqd->irq, handle_level_irq);
+ irq_set_handler_locked(irqd, handle_level_irq);
con = readl(d->virt_base + reg_con);
con &= ~(EXYNOS_EINT_CON_MASK << shift);
@@ -256,7 +256,6 @@ static int exynos_eint_irq_map(struct irq_domain *h, unsigned int virq,
irq_set_chip_data(virq, b);
irq_set_chip_and_handler(virq, &b->irq_chip->chip,
handle_level_irq);
- set_irq_flags(virq, IRQF_VALID);
return 0;
}
@@ -422,9 +421,9 @@ static const struct of_device_id exynos_wkup_irq_ids[] = {
/* interrupt handler for wakeup interrupts 0..15 */
static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
{
- struct exynos_weint_data *eintd = irq_get_handler_data(irq);
+ struct exynos_weint_data *eintd = irq_desc_get_handler_data(desc);
struct samsung_pin_bank *bank = eintd->bank;
- struct irq_chip *chip = irq_get_chip(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
int eint_irq;
chained_irq_enter(chip, desc);
@@ -454,8 +453,8 @@ static inline void exynos_irq_demux_eint(unsigned long pend,
/* interrupt handler for wakeup interrupt 16 */
static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = irq_get_chip(irq);
- struct exynos_muxed_weint_data *eintd = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc);
struct samsung_pinctrl_drv_data *d = eintd->banks[0]->drvdata;
unsigned long pend;
unsigned long mask;
@@ -542,8 +541,9 @@ static int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
}
weint_data[idx].irq = idx;
weint_data[idx].bank = bank;
- irq_set_handler_data(irq, &weint_data[idx]);
- irq_set_chained_handler(irq, exynos_irq_eint0_15);
+ irq_set_chained_handler_and_data(irq,
+ exynos_irq_eint0_15,
+ &weint_data[idx]);
}
}
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos5440.c b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
index f5619fb50447..9ce0b8619d4c 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos5440.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
@@ -44,9 +44,7 @@
#define PIN_NAME_LENGTH 10
#define GROUP_SUFFIX "-grp"
-#define GSUFFIX_LEN sizeof(GROUP_SUFFIX)
#define FUNCTION_SUFFIX "-mux"
-#define FSUFFIX_LEN sizeof(FUNCTION_SUFFIX)
/*
* pin configuration type and its value are packed together into a 16-bits.
@@ -205,22 +203,17 @@ static int exynos5440_dt_node_to_map(struct pinctrl_dev *pctldev,
/* Allocate memory for pin-map entries */
map = kzalloc(sizeof(*map) * map_cnt, GFP_KERNEL);
- if (!map) {
- dev_err(dev, "could not alloc memory for pin-maps\n");
+ if (!map)
return -ENOMEM;
- }
*nmaps = 0;
/*
* Allocate memory for pin group name. The pin group name is derived
* from the node name from which these map entries are be created.
*/
- gname = kzalloc(strlen(np->name) + GSUFFIX_LEN, GFP_KERNEL);
- if (!gname) {
- dev_err(dev, "failed to alloc memory for group name\n");
+ gname = kasprintf(GFP_KERNEL, "%s%s", np->name, GROUP_SUFFIX);
+ if (!gname)
goto free_map;
- }
- snprintf(gname, strlen(np->name) + 4, "%s%s", np->name, GROUP_SUFFIX);
/*
* don't have config options? then skip over to creating function
@@ -231,10 +224,8 @@ static int exynos5440_dt_node_to_map(struct pinctrl_dev *pctldev,
/* Allocate memory for config entries */
cfg = kzalloc(sizeof(*cfg) * cfg_cnt, GFP_KERNEL);
- if (!cfg) {
- dev_err(dev, "failed to alloc memory for configs\n");
+ if (!cfg)
goto free_gname;
- }
/* Prepare a list of config settings */
for (idx = 0, cfg_cnt = 0; idx < ARRAY_SIZE(pcfgs); idx++) {
@@ -254,13 +245,10 @@ static int exynos5440_dt_node_to_map(struct pinctrl_dev *pctldev,
skip_cfgs:
/* create the function map entry */
if (of_find_property(np, "samsung,exynos5440-pin-function", NULL)) {
- fname = kzalloc(strlen(np->name) + FSUFFIX_LEN, GFP_KERNEL);
- if (!fname) {
- dev_err(dev, "failed to alloc memory for func name\n");
+ fname = kasprintf(GFP_KERNEL,
+ "%s%s", np->name, FUNCTION_SUFFIX);
+ if (!fname)
goto free_cfg;
- }
- snprintf(fname, strlen(np->name) + 4, "%s%s", np->name,
- FUNCTION_SUFFIX);
map[*nmaps].data.mux.group = gname;
map[*nmaps].data.mux.function = fname;
@@ -651,10 +639,8 @@ static int exynos5440_pinctrl_parse_dt_pins(struct platform_device *pdev,
}
*pin_list = devm_kzalloc(dev, *npins * sizeof(**pin_list), GFP_KERNEL);
- if (!*pin_list) {
- dev_err(dev, "failed to allocate memory for pin list\n");
+ if (!*pin_list)
return -ENOMEM;
- }
return of_property_read_u32_array(cfg_np, "samsung,exynos5440-pins",
*pin_list, *npins);
@@ -682,17 +668,15 @@ static int exynos5440_pinctrl_parse_dt(struct platform_device *pdev,
return -EINVAL;
groups = devm_kzalloc(dev, grp_cnt * sizeof(*groups), GFP_KERNEL);
- if (!groups) {
- dev_err(dev, "failed allocate memory for ping group list\n");
+ if (!groups)
return -EINVAL;
- }
+
grp = groups;
functions = devm_kzalloc(dev, grp_cnt * sizeof(*functions), GFP_KERNEL);
- if (!functions) {
- dev_err(dev, "failed to allocate memory for function list\n");
+ if (!functions)
return -EINVAL;
- }
+
func = functions;
/*
@@ -710,14 +694,10 @@ static int exynos5440_pinctrl_parse_dt(struct platform_device *pdev,
}
/* derive pin group name from the node name */
- gname = devm_kzalloc(dev, strlen(cfg_np->name) + GSUFFIX_LEN,
- GFP_KERNEL);
- if (!gname) {
- dev_err(dev, "failed to alloc memory for group name\n");
+ gname = devm_kasprintf(dev, GFP_KERNEL,
+ "%s%s", cfg_np->name, GROUP_SUFFIX);
+ if (!gname)
return -ENOMEM;
- }
- snprintf(gname, strlen(cfg_np->name) + 4, "%s%s", cfg_np->name,
- GROUP_SUFFIX);
grp->name = gname;
grp->pins = pin_list;
@@ -731,22 +711,15 @@ skip_to_pin_function:
continue;
/* derive function name from the node name */
- fname = devm_kzalloc(dev, strlen(cfg_np->name) + FSUFFIX_LEN,
- GFP_KERNEL);
- if (!fname) {
- dev_err(dev, "failed to alloc memory for func name\n");
+ fname = devm_kasprintf(dev, GFP_KERNEL,
+ "%s%s", cfg_np->name, FUNCTION_SUFFIX);
+ if (!fname)
return -ENOMEM;
- }
- snprintf(fname, strlen(cfg_np->name) + 4, "%s%s", cfg_np->name,
- FUNCTION_SUFFIX);
func->name = fname;
func->groups = devm_kzalloc(dev, sizeof(char *), GFP_KERNEL);
- if (!func->groups) {
- dev_err(dev, "failed to alloc memory for group list "
- "in pin function");
+ if (!func->groups)
return -ENOMEM;
- }
func->groups[0] = gname;
func->num_groups = gname ? 1 : 0;
func->function = function;
@@ -774,10 +747,8 @@ static int exynos5440_pinctrl_register(struct platform_device *pdev,
int pin, ret;
ctrldesc = devm_kzalloc(dev, sizeof(*ctrldesc), GFP_KERNEL);
- if (!ctrldesc) {
- dev_err(dev, "could not allocate memory for pinctrl desc\n");
+ if (!ctrldesc)
return -ENOMEM;
- }
ctrldesc->name = "exynos5440-pinctrl";
ctrldesc->owner = THIS_MODULE;
@@ -787,10 +758,8 @@ static int exynos5440_pinctrl_register(struct platform_device *pdev,
pindesc = devm_kzalloc(&pdev->dev, sizeof(*pindesc) *
EXYNOS5440_MAX_PINS, GFP_KERNEL);
- if (!pindesc) {
- dev_err(&pdev->dev, "mem alloc for pin descriptors failed\n");
+ if (!pindesc)
return -ENOMEM;
- }
ctrldesc->pins = pindesc;
ctrldesc->npins = EXYNOS5440_MAX_PINS;
@@ -804,10 +773,8 @@ static int exynos5440_pinctrl_register(struct platform_device *pdev,
*/
pin_names = devm_kzalloc(&pdev->dev, sizeof(char) * PIN_NAME_LENGTH *
ctrldesc->npins, GFP_KERNEL);
- if (!pin_names) {
- dev_err(&pdev->dev, "mem alloc for pin names failed\n");
+ if (!pin_names)
return -ENOMEM;
- }
/* for each pin, set the name of the pin */
for (pin = 0; pin < ctrldesc->npins; pin++) {
@@ -844,10 +811,8 @@ static int exynos5440_gpiolib_register(struct platform_device *pdev,
int ret;
gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL);
- if (!gc) {
- dev_err(&pdev->dev, "mem alloc for gpio_chip failed\n");
+ if (!gc)
return -ENOMEM;
- }
priv->gc = gc;
gc->base = 0;
@@ -929,7 +894,6 @@ static int exynos5440_gpio_irq_map(struct irq_domain *h, unsigned int virq,
irq_set_chip_data(virq, d);
irq_set_chip_and_handler(virq, &exynos5440_gpio_irq_chip,
handle_level_irq);
- set_irq_flags(virq, IRQF_VALID);
return 0;
}
@@ -949,10 +913,8 @@ static int exynos5440_gpio_irq_init(struct platform_device *pdev,
intd = devm_kzalloc(dev, sizeof(*intd) * EXYNOS5440_MAX_GPIO_INT,
GFP_KERNEL);
- if (!intd) {
- dev_err(dev, "failed to allocate memory for gpio intr data\n");
+ if (!intd)
return -ENOMEM;
- }
for (i = 0; i < EXYNOS5440_MAX_GPIO_INT; i++) {
irq = irq_of_parse_and_map(dev->of_node, i);
@@ -995,10 +957,8 @@ static int exynos5440_pinctrl_probe(struct platform_device *pdev)
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(dev, "could not allocate memory for private data\n");
+ if (!priv)
return -ENOMEM;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->reg_base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index 01b43dbfb795..019844d479bb 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
@@ -131,13 +131,13 @@ static int s3c24xx_eint_get_trigger(unsigned int type)
}
}
-static void s3c24xx_eint_set_handler(unsigned int irq, unsigned int type)
+static void s3c24xx_eint_set_handler(struct irq_data *d, unsigned int type)
{
/* Edge- and level-triggered interrupts need different handlers */
if (type & IRQ_TYPE_EDGE_BOTH)
- __irq_set_handler_locked(irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
else
- __irq_set_handler_locked(irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
}
static void s3c24xx_eint_set_function(struct samsung_pinctrl_drv_data *d,
@@ -181,7 +181,7 @@ static int s3c24xx_eint_type(struct irq_data *data, unsigned int type)
return -EINVAL;
}
- s3c24xx_eint_set_handler(data->irq, type);
+ s3c24xx_eint_set_handler(data, type);
/* Set up interrupt trigger */
reg = d->virt_base + EINT_REG(index);
@@ -243,7 +243,7 @@ static struct irq_chip s3c2410_eint0_3_chip = {
static void s3c2410_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
- struct s3c24xx_eint_data *eint_data = irq_get_handler_data(irq);
+ struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc);
unsigned int virq;
/* the first 4 eints have a simple 1 to 1 mapping */
@@ -297,9 +297,9 @@ static struct irq_chip s3c2412_eint0_3_chip = {
static void s3c2412_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = irq_get_chip(irq);
+ struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc);
struct irq_data *data = irq_desc_get_irq_data(desc);
- struct s3c24xx_eint_data *eint_data = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
unsigned int virq;
chained_irq_enter(chip, desc);
@@ -357,11 +357,11 @@ static struct irq_chip s3c24xx_eint_chip = {
.irq_set_type = s3c24xx_eint_type,
};
-static inline void s3c24xx_demux_eint(unsigned int irq, struct irq_desc *desc,
+static inline void s3c24xx_demux_eint(struct irq_desc *desc,
u32 offset, u32 range)
{
- struct irq_chip *chip = irq_get_chip(irq);
- struct s3c24xx_eint_data *data = irq_get_handler_data(irq);
+ struct s3c24xx_eint_data *data = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_irq_chip(desc);
struct samsung_pinctrl_drv_data *d = data->drvdata;
unsigned int pend, mask;
@@ -374,7 +374,7 @@ static inline void s3c24xx_demux_eint(unsigned int irq, struct irq_desc *desc,
pend &= range;
while (pend) {
- unsigned int virq;
+ unsigned int virq, irq;
irq = __ffs(pend);
pend &= ~(1 << irq);
@@ -390,12 +390,12 @@ static inline void s3c24xx_demux_eint(unsigned int irq, struct irq_desc *desc,
static void s3c24xx_demux_eint4_7(unsigned int irq, struct irq_desc *desc)
{
- s3c24xx_demux_eint(irq, desc, 0, 0xf0);
+ s3c24xx_demux_eint(desc, 0, 0xf0);
}
static void s3c24xx_demux_eint8_23(unsigned int irq, struct irq_desc *desc)
{
- s3c24xx_demux_eint(irq, desc, 8, 0xffff00);
+ s3c24xx_demux_eint(desc, 8, 0xffff00);
}
static irq_flow_handler_t s3c2410_eint_handlers[NUM_EINT_IRQ] = {
@@ -437,7 +437,6 @@ static int s3c24xx_gpf_irq_map(struct irq_domain *h, unsigned int virq,
handle_edge_irq);
}
irq_set_chip_data(virq, bank);
- set_irq_flags(virq, IRQF_VALID);
return 0;
}
@@ -457,7 +456,6 @@ static int s3c24xx_gpg_irq_map(struct irq_domain *h, unsigned int virq,
irq_set_chip_and_handler(virq, &s3c24xx_eint_chip, handle_edge_irq);
irq_set_chip_data(virq, bank);
- set_irq_flags(virq, IRQF_VALID);
return 0;
}
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index ec8cc3b47621..f5ea40a69711 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -260,13 +260,13 @@ static int s3c64xx_irq_get_trigger(unsigned int type)
return trigger;
}
-static void s3c64xx_irq_set_handler(unsigned int irq, unsigned int type)
+static void s3c64xx_irq_set_handler(struct irq_data *d, unsigned int type)
{
/* Edge- and level-triggered interrupts need different handlers */
if (type & IRQ_TYPE_EDGE_BOTH)
- __irq_set_handler_locked(irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
else
- __irq_set_handler_locked(irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
}
static void s3c64xx_irq_set_function(struct samsung_pinctrl_drv_data *d,
@@ -356,7 +356,7 @@ static int s3c64xx_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
return -EINVAL;
}
- s3c64xx_irq_set_handler(irqd->irq, type);
+ s3c64xx_irq_set_handler(irqd, type);
/* Set up interrupt trigger */
reg = d->virt_base + EINTCON_REG(bank->eint_offset);
@@ -395,7 +395,6 @@ static int s3c64xx_gpio_irq_map(struct irq_domain *h, unsigned int virq,
irq_set_chip_and_handler(virq,
&s3c64xx_gpio_irq_chip, handle_level_irq);
irq_set_chip_data(virq, bank);
- set_irq_flags(virq, IRQF_VALID);
return 0;
}
@@ -410,8 +409,8 @@ static const struct irq_domain_ops s3c64xx_gpio_irqd_ops = {
static void s3c64xx_eint_gpio_irq(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = irq_get_chip(irq);
- struct s3c64xx_eint_gpio_data *data = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct s3c64xx_eint_gpio_data *data = irq_desc_get_handler_data(desc);
struct samsung_pinctrl_drv_data *drvdata = data->drvdata;
chained_irq_enter(chip, desc);
@@ -567,7 +566,7 @@ static int s3c64xx_eint0_irq_set_type(struct irq_data *irqd, unsigned int type)
return -EINVAL;
}
- s3c64xx_irq_set_handler(irqd->irq, type);
+ s3c64xx_irq_set_handler(irqd, type);
/* Set up interrupt trigger */
reg = d->virt_base + EINT0CON0_REG;
@@ -599,11 +598,10 @@ static struct irq_chip s3c64xx_eint0_irq_chip = {
.irq_set_type = s3c64xx_eint0_irq_set_type,
};
-static inline void s3c64xx_irq_demux_eint(unsigned int irq,
- struct irq_desc *desc, u32 range)
+static inline void s3c64xx_irq_demux_eint(struct irq_desc *desc, u32 range)
{
- struct irq_chip *chip = irq_get_chip(irq);
- struct s3c64xx_eint0_data *data = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct s3c64xx_eint0_data *data = irq_desc_get_handler_data(desc);
struct samsung_pinctrl_drv_data *drvdata = data->drvdata;
unsigned int pend, mask;
@@ -616,11 +614,10 @@ static inline void s3c64xx_irq_demux_eint(unsigned int irq,
pend &= range;
while (pend) {
- unsigned int virq;
+ unsigned int virq, irq;
irq = fls(pend) - 1;
pend &= ~(1 << irq);
-
virq = irq_linear_revmap(data->domains[irq], data->pins[irq]);
/*
* Something must be really wrong if an unmapped EINT
@@ -636,22 +633,22 @@ static inline void s3c64xx_irq_demux_eint(unsigned int irq,
static void s3c64xx_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
{
- s3c64xx_irq_demux_eint(irq, desc, 0xf);
+ s3c64xx_irq_demux_eint(desc, 0xf);
}
static void s3c64xx_demux_eint4_11(unsigned int irq, struct irq_desc *desc)
{
- s3c64xx_irq_demux_eint(irq, desc, 0xff0);
+ s3c64xx_irq_demux_eint(desc, 0xff0);
}
static void s3c64xx_demux_eint12_19(unsigned int irq, struct irq_desc *desc)
{
- s3c64xx_irq_demux_eint(irq, desc, 0xff000);
+ s3c64xx_irq_demux_eint(desc, 0xff000);
}
static void s3c64xx_demux_eint20_27(unsigned int irq, struct irq_desc *desc)
{
- s3c64xx_irq_demux_eint(irq, desc, 0xff00000);
+ s3c64xx_irq_demux_eint(desc, 0xff00000);
}
static irq_flow_handler_t s3c64xx_eint0_handlers[NUM_EINT0_IRQ] = {
@@ -673,7 +670,6 @@ static int s3c64xx_eint0_irq_map(struct irq_domain *h, unsigned int virq,
irq_set_chip_and_handler(virq,
&s3c64xx_eint0_irq_chip, handle_level_irq);
irq_set_chip_data(virq, ddata);
- set_irq_flags(virq, IRQF_VALID);
return 0;
}
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 3dd5a3b2ac62..c760bf43d116 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -33,11 +33,6 @@
#include "../core.h"
#include "pinctrl-samsung.h"
-#define GROUP_SUFFIX "-grp"
-#define GSUFFIX_LEN sizeof(GROUP_SUFFIX)
-#define FUNCTION_SUFFIX "-mux"
-#define FSUFFIX_LEN sizeof(FUNCTION_SUFFIX)
-
/* list of all possible config options supported */
static struct pin_config {
const char *property;
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 865d235612c5..fb9c44805234 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -29,24 +29,25 @@
static int sh_pfc_map_resources(struct sh_pfc *pfc,
struct platform_device *pdev)
{
- unsigned int num_windows = 0;
- unsigned int num_irqs = 0;
+ unsigned int num_windows, num_irqs;
struct sh_pfc_window *windows;
unsigned int *irqs = NULL;
struct resource *res;
unsigned int i;
+ int irq;
/* Count the MEM and IRQ resources. */
- for (i = 0; i < pdev->num_resources; ++i) {
- switch (resource_type(&pdev->resource[i])) {
- case IORESOURCE_MEM:
- num_windows++;
+ for (num_windows = 0;; num_windows++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, num_windows);
+ if (!res)
break;
-
- case IORESOURCE_IRQ:
- num_irqs++;
+ }
+ for (num_irqs = 0;; num_irqs++) {
+ irq = platform_get_irq(pdev, num_irqs);
+ if (irq == -EPROBE_DEFER)
+ return irq;
+ if (irq < 0)
break;
- }
}
if (num_windows == 0)
@@ -72,22 +73,17 @@ static int sh_pfc_map_resources(struct sh_pfc *pfc,
}
/* Fill them. */
- for (i = 0, res = pdev->resource; i < pdev->num_resources; i++, res++) {
- switch (resource_type(res)) {
- case IORESOURCE_MEM:
- windows->phys = res->start;
- windows->size = resource_size(res);
- windows->virt = devm_ioremap_resource(pfc->dev, res);
- if (IS_ERR(windows->virt))
- return -ENOMEM;
- windows++;
- break;
-
- case IORESOURCE_IRQ:
- *irqs++ = res->start;
- break;
- }
+ for (i = 0; i < num_windows; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ windows->phys = res->start;
+ windows->size = resource_size(res);
+ windows->virt = devm_ioremap_resource(pfc->dev, res);
+ if (IS_ERR(windows->virt))
+ return -ENOMEM;
+ windows++;
}
+ for (i = 0; i < num_irqs; i++)
+ *irqs++ = platform_get_irq(pdev, i);
return 0;
}
@@ -591,9 +587,6 @@ static int sh_pfc_remove(struct platform_device *pdev)
}
static const struct platform_device_id sh_pfc_id_table[] = {
-#ifdef CONFIG_PINCTRL_PFC_R8A7740
- { "pfc-r8a7740", (kernel_ulong_t)&r8a7740_pinmux_info },
-#endif
#ifdef CONFIG_PINCTRL_PFC_R8A7778
{ "pfc-r8a7778", (kernel_ulong_t)&r8a7778_pinmux_info },
#endif
@@ -609,9 +602,6 @@ static const struct platform_device_id sh_pfc_id_table[] = {
#ifdef CONFIG_PINCTRL_PFC_SH7269
{ "pfc-sh7269", (kernel_ulong_t)&sh7269_pinmux_info },
#endif
-#ifdef CONFIG_PINCTRL_PFC_SH73A0
- { "pfc-sh73a0", (kernel_ulong_t)&sh73a0_pinmux_info },
-#endif
#ifdef CONFIG_PINCTRL_PFC_SH7720
{ "pfc-sh7720", (kernel_ulong_t)&sh7720_pinmux_info },
#endif
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
index d0bb1459783a..82ef1862dd1b 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
@@ -22,10 +22,6 @@
#include <linux/kernel.h>
#include <linux/pinctrl/pinconf-generic.h>
-#ifndef CONFIG_ARCH_MULTIPLATFORM
-#include <mach/irqs.h>
-#endif
-
#include "core.h"
#include "sh_pfc.h"
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index baab81ead9ff..fc344a7c2b53 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -27,10 +27,27 @@
#include "core.h"
#include "sh_pfc.h"
+#define PORT_GP_30(bank, fn, sfx) \
+ PORT_GP_1(bank, 0, fn, sfx), PORT_GP_1(bank, 1, fn, sfx), \
+ PORT_GP_1(bank, 2, fn, sfx), PORT_GP_1(bank, 3, fn, sfx), \
+ PORT_GP_1(bank, 4, fn, sfx), PORT_GP_1(bank, 5, fn, sfx), \
+ PORT_GP_1(bank, 6, fn, sfx), PORT_GP_1(bank, 7, fn, sfx), \
+ PORT_GP_1(bank, 8, fn, sfx), PORT_GP_1(bank, 9, fn, sfx), \
+ PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx), \
+ PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx), \
+ PORT_GP_1(bank, 14, fn, sfx), PORT_GP_1(bank, 15, fn, sfx), \
+ PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx), \
+ PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx), \
+ PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx), \
+ PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx), \
+ PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx), \
+ PORT_GP_1(bank, 26, fn, sfx), PORT_GP_1(bank, 27, fn, sfx), \
+ PORT_GP_1(bank, 28, fn, sfx), PORT_GP_1(bank, 29, fn, sfx)
+
#define CPU_ALL_PORT(fn, sfx) \
PORT_GP_32(0, fn, sfx), \
- PORT_GP_32(1, fn, sfx), \
- PORT_GP_32(2, fn, sfx), \
+ PORT_GP_30(1, fn, sfx), \
+ PORT_GP_30(2, fn, sfx), \
PORT_GP_32(3, fn, sfx), \
PORT_GP_32(4, fn, sfx), \
PORT_GP_32(5, fn, sfx)
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 3ddf23ec9f0b..25e8117f5a1a 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -14,15 +14,30 @@
#include "core.h"
#include "sh_pfc.h"
+#define PORT_GP_26(bank, fn, sfx) \
+ PORT_GP_1(bank, 0, fn, sfx), PORT_GP_1(bank, 1, fn, sfx), \
+ PORT_GP_1(bank, 2, fn, sfx), PORT_GP_1(bank, 3, fn, sfx), \
+ PORT_GP_1(bank, 4, fn, sfx), PORT_GP_1(bank, 5, fn, sfx), \
+ PORT_GP_1(bank, 6, fn, sfx), PORT_GP_1(bank, 7, fn, sfx), \
+ PORT_GP_1(bank, 8, fn, sfx), PORT_GP_1(bank, 9, fn, sfx), \
+ PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx), \
+ PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx), \
+ PORT_GP_1(bank, 14, fn, sfx), PORT_GP_1(bank, 15, fn, sfx), \
+ PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx), \
+ PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx), \
+ PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx), \
+ PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx), \
+ PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx)
+
#define CPU_ALL_PORT(fn, sfx) \
PORT_GP_32(0, fn, sfx), \
- PORT_GP_32(1, fn, sfx), \
+ PORT_GP_26(1, fn, sfx), \
PORT_GP_32(2, fn, sfx), \
PORT_GP_32(3, fn, sfx), \
PORT_GP_32(4, fn, sfx), \
PORT_GP_32(5, fn, sfx), \
PORT_GP_32(6, fn, sfx), \
- PORT_GP_32(7, fn, sfx)
+ PORT_GP_26(7, fn, sfx)
enum {
PINMUX_RESERVED = 0,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
index bfdcac4b3bc4..5248685dbb4e 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
@@ -2770,6 +2770,24 @@ static const unsigned int sdhi2_wp_pins[] = {
static const unsigned int sdhi2_wp_mux[] = {
SD2_WP_MARK,
};
+/* - USB0 ------------------------------------------------------------------- */
+static const unsigned int usb0_pins[] = {
+ RCAR_GP_PIN(5, 24), /* PWEN */
+ RCAR_GP_PIN(5, 25), /* OVC */
+};
+static const unsigned int usb0_mux[] = {
+ USB0_PWEN_MARK,
+ USB0_OVC_MARK,
+};
+/* - USB1 ------------------------------------------------------------------- */
+static const unsigned int usb1_pins[] = {
+ RCAR_GP_PIN(5, 26), /* PWEN */
+ RCAR_GP_PIN(5, 27), /* OVC */
+};
+static const unsigned int usb1_mux[] = {
+ USB1_PWEN_MARK,
+ USB1_OVC_MARK,
+};
static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(eth_link),
@@ -2945,6 +2963,8 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(sdhi2_ctrl),
SH_PFC_PIN_GROUP(sdhi2_cd),
SH_PFC_PIN_GROUP(sdhi2_wp),
+ SH_PFC_PIN_GROUP(usb0),
+ SH_PFC_PIN_GROUP(usb1),
};
static const char * const eth_groups[] = {
@@ -3219,6 +3239,14 @@ static const char * const sdhi2_groups[] = {
"sdhi2_wp",
};
+static const char * const usb0_groups[] = {
+ "usb0",
+};
+
+static const char * const usb1_groups[] = {
+ "usb1",
+};
+
static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(eth),
SH_PFC_FUNCTION(hscif0),
@@ -3253,6 +3281,8 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(sdhi0),
SH_PFC_FUNCTION(sdhi1),
SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(usb0),
+ SH_PFC_FUNCTION(usb1),
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
index d2efbfb776ac..097526576f88 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
@@ -26,10 +26,6 @@
#include <linux/regulator/machine.h>
#include <linux/slab.h>
-#ifndef CONFIG_ARCH_MULTIPLATFORM
-#include <mach/irqs.h>
-#endif
-
#include "core.h"
#include "sh_pfc.h"
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index ff678966008b..863c3e30ce05 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -40,6 +40,10 @@ struct sh_pfc_pinctrl {
struct pinctrl_pin_desc *pins;
struct sh_pfc_pin_config *configs;
+
+ const char *func_prop_name;
+ const char *groups_prop_name;
+ const char *pins_prop_name;
};
static int sh_pfc_get_groups_count(struct pinctrl_dev *pctldev)
@@ -96,10 +100,13 @@ static int sh_pfc_map_add_config(struct pinctrl_map *map,
return 0;
}
-static int sh_pfc_dt_subnode_to_map(struct device *dev, struct device_node *np,
+static int sh_pfc_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
struct pinctrl_map **map,
unsigned int *num_maps, unsigned int *index)
{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ struct device *dev = pmx->pfc->dev;
struct pinctrl_map *maps = *map;
unsigned int nmaps = *num_maps;
unsigned int idx = *index;
@@ -113,10 +120,27 @@ static int sh_pfc_dt_subnode_to_map(struct device *dev, struct device_node *np,
const char *pin;
int ret;
+ /* Support both the old Renesas-specific properties and the new standard
+ * properties. Mixing old and new properties isn't allowed, neither
+ * inside a subnode nor across subnodes.
+ */
+ if (!pmx->func_prop_name) {
+ if (of_find_property(np, "groups", NULL) ||
+ of_find_property(np, "pins", NULL)) {
+ pmx->func_prop_name = "function";
+ pmx->groups_prop_name = "groups";
+ pmx->pins_prop_name = "pins";
+ } else {
+ pmx->func_prop_name = "renesas,function";
+ pmx->groups_prop_name = "renesas,groups";
+ pmx->pins_prop_name = "renesas,pins";
+ }
+ }
+
/* Parse the function and configuration properties. At least a function
* or one configuration must be specified.
*/
- ret = of_property_read_string(np, "renesas,function", &function);
+ ret = of_property_read_string(np, pmx->func_prop_name, &function);
if (ret < 0 && ret != -EINVAL) {
dev_err(dev, "Invalid function in DT\n");
return ret;
@@ -129,11 +153,12 @@ static int sh_pfc_dt_subnode_to_map(struct device *dev, struct device_node *np,
if (!function && num_configs == 0) {
dev_err(dev,
"DT node must contain at least a function or config\n");
+ ret = -ENODEV;
goto done;
}
/* Count the number of pins and groups and reallocate mappings. */
- ret = of_property_count_strings(np, "renesas,pins");
+ ret = of_property_count_strings(np, pmx->pins_prop_name);
if (ret == -EINVAL) {
num_pins = 0;
} else if (ret < 0) {
@@ -143,7 +168,7 @@ static int sh_pfc_dt_subnode_to_map(struct device *dev, struct device_node *np,
num_pins = ret;
}
- ret = of_property_count_strings(np, "renesas,groups");
+ ret = of_property_count_strings(np, pmx->groups_prop_name);
if (ret == -EINVAL) {
num_groups = 0;
} else if (ret < 0) {
@@ -174,7 +199,7 @@ static int sh_pfc_dt_subnode_to_map(struct device *dev, struct device_node *np,
*num_maps = nmaps;
/* Iterate over pins and groups and create the mappings. */
- of_property_for_each_string(np, "renesas,groups", prop, group) {
+ of_property_for_each_string(np, pmx->groups_prop_name, prop, group) {
if (function) {
maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
maps[idx].data.mux.group = group;
@@ -198,7 +223,7 @@ static int sh_pfc_dt_subnode_to_map(struct device *dev, struct device_node *np,
goto done;
}
- of_property_for_each_string(np, "renesas,pins", prop, pin) {
+ of_property_for_each_string(np, pmx->pins_prop_name, prop, pin) {
ret = sh_pfc_map_add_config(&maps[idx], pin,
PIN_MAP_TYPE_CONFIGS_PIN,
configs, num_configs);
@@ -246,7 +271,7 @@ static int sh_pfc_dt_node_to_map(struct pinctrl_dev *pctldev,
index = 0;
for_each_child_of_node(np, child) {
- ret = sh_pfc_dt_subnode_to_map(dev, child, map, num_maps,
+ ret = sh_pfc_dt_subnode_to_map(pctldev, child, map, num_maps,
&index);
if (ret < 0)
goto done;
@@ -254,7 +279,8 @@ static int sh_pfc_dt_node_to_map(struct pinctrl_dev *pctldev,
/* If no mapping has been found in child nodes try the config node. */
if (*num_maps == 0) {
- ret = sh_pfc_dt_subnode_to_map(dev, np, map, num_maps, &index);
+ ret = sh_pfc_dt_subnode_to_map(pctldev, np, map, num_maps,
+ &index);
if (ret < 0)
goto done;
}
@@ -465,6 +491,9 @@ static bool sh_pfc_pinconf_validate(struct sh_pfc *pfc, unsigned int _pin,
case PIN_CONFIG_BIAS_PULL_DOWN:
return pin->configs & SH_PFC_PIN_CFG_PULL_DOWN;
+ case PIN_CONFIG_POWER_SOURCE:
+ return pin->configs & SH_PFC_PIN_CFG_IO_VOLTAGE;
+
default:
return false;
}
@@ -477,7 +506,6 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned _pin,
struct sh_pfc *pfc = pmx->pfc;
enum pin_config_param param = pinconf_to_config_param(*config);
unsigned long flags;
- unsigned int bias;
if (!sh_pfc_pinconf_validate(pfc, _pin, param))
return -ENOTSUPP;
@@ -485,7 +513,9 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned _pin,
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
case PIN_CONFIG_BIAS_PULL_UP:
- case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_DOWN: {
+ unsigned int bias;
+
if (!pfc->info->ops || !pfc->info->ops->get_bias)
return -ENOTSUPP;
@@ -498,6 +528,24 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned _pin,
*config = 0;
break;
+ }
+
+ case PIN_CONFIG_POWER_SOURCE: {
+ int ret;
+
+ if (!pfc->info->ops || !pfc->info->ops->get_io_voltage)
+ return -ENOTSUPP;
+
+ spin_lock_irqsave(&pfc->lock, flags);
+ ret = pfc->info->ops->get_io_voltage(pfc, _pin);
+ spin_unlock_irqrestore(&pfc->lock, flags);
+
+ if (ret < 0)
+ return ret;
+
+ *config = ret;
+ break;
+ }
default:
return -ENOTSUPP;
@@ -534,6 +582,24 @@ static int sh_pfc_pinconf_set(struct pinctrl_dev *pctldev, unsigned _pin,
break;
+ case PIN_CONFIG_POWER_SOURCE: {
+ unsigned int arg =
+ pinconf_to_config_argument(configs[i]);
+ int ret;
+
+ if (!pfc->info->ops || !pfc->info->ops->set_io_voltage)
+ return -ENOTSUPP;
+
+ spin_lock_irqsave(&pfc->lock, flags);
+ ret = pfc->info->ops->set_io_voltage(pfc, _pin, arg);
+ spin_unlock_irqrestore(&pfc->lock, flags);
+
+ if (ret)
+ return ret;
+
+ break;
+ }
+
default:
return -ENOTSUPP;
}
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index c7508d5f6886..15afd49fd4e3 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -12,6 +12,7 @@
#define __SH_PFC_H
#include <linux/bug.h>
+#include <linux/pinctrl/pinconf-generic.h>
#include <linux/stringify.h>
enum {
@@ -26,6 +27,7 @@ enum {
#define SH_PFC_PIN_CFG_OUTPUT (1 << 1)
#define SH_PFC_PIN_CFG_PULL_UP (1 << 2)
#define SH_PFC_PIN_CFG_PULL_DOWN (1 << 3)
+#define SH_PFC_PIN_CFG_IO_VOLTAGE (1 << 4)
#define SH_PFC_PIN_CFG_NO_GPIO (1 << 31)
struct sh_pfc_pin {
@@ -121,6 +123,9 @@ struct sh_pfc_soc_operations {
unsigned int (*get_bias)(struct sh_pfc *pfc, unsigned int pin);
void (*set_bias)(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias);
+ int (*get_io_voltage)(struct sh_pfc *pfc, unsigned int pin);
+ int (*set_io_voltage)(struct sh_pfc *pfc, unsigned int pin,
+ u16 voltage_mV);
};
struct sh_pfc_soc_info {
@@ -224,7 +229,7 @@ struct sh_pfc_soc_info {
/* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */
#define _GP_GPIO(bank, _pin, _name, sfx) \
- [(bank * 32) + _pin] = { \
+ { \
.pin = (bank * 32) + _pin, \
.name = __stringify(_name), \
.enum_id = _name##_DATA, \
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 9384e0aa3996..9df0c5f25824 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -148,6 +148,19 @@ enum altas7_pad_type {
#define DIV_DISABLE 0x1
#define DIV_ENABLE 0x0
+/* Number of Function input disable registers */
+#define NUM_OF_IN_DISABLE_REG 0x2
+
+/* Offset of Function input disable registers */
+#define IN_DISABLE_0_REG_SET 0x0A00
+#define IN_DISABLE_0_REG_CLR 0x0A04
+#define IN_DISABLE_1_REG_SET 0x0A08
+#define IN_DISABLE_1_REG_CLR 0x0A0C
+#define IN_DISABLE_VAL_0_REG_SET 0x0A80
+#define IN_DISABLE_VAL_0_REG_CLR 0x0A84
+#define IN_DISABLE_VAL_1_REG_SET 0x0A88
+#define IN_DISABLE_VAL_1_REG_CLR 0x0A8C
+
struct dt_params {
const char *property;
int value;
@@ -197,6 +210,16 @@ struct atlas7_pad_config {
}
/**
+ * struct atlas7_pad_status - Atlas7 Pad status
+ */
+struct atlas7_pad_status {
+ u8 func;
+ u8 pull;
+ u8 dstr;
+ u8 reserved;
+};
+
+/**
* struct atlas7_pad_mux - Atlas7 mux
* @bank: The bank of this pad's registers on.
* @pin : The ID of this Pad.
@@ -285,6 +308,9 @@ struct atlas7_pinctrl_data {
/* Platform info of atlas7 pinctrl */
#define ATLAS7_PINCTRL_REG_BANKS 2
#define ATLAS7_PINCTRL_BANK_0_PINS 18
+#define ATLAS7_PINCTRL_BANK_1_PINS 141
+#define ATLAS7_PINCTRL_TOTAL_PINS \
+ (ATLAS7_PINCTRL_BANK_0_PINS + ATLAS7_PINCTRL_BANK_1_PINS)
/**
* Atlas7 GPIO Chip
@@ -316,6 +342,7 @@ struct atlas7_gpio_bank {
unsigned int gpio_offset;
unsigned int ngpio;
const unsigned int *gpio_pins;
+ u32 sleep_data[NGPIO_OF_BANK];
};
struct atlas7_gpio_chip {
@@ -343,6 +370,9 @@ struct atlas7_pmx {
struct pinctrl_desc pctl_desc;
struct atlas7_pinctrl_data *pctl_data;
void __iomem *regs[ATLAS7_PINCTRL_REG_BANKS];
+ u32 status_ds[NUM_OF_IN_DISABLE_REG];
+ u32 status_dsv[NUM_OF_IN_DISABLE_REG];
+ struct atlas7_pad_status sleep_data[ATLAS7_PINCTRL_TOTAL_PINS];
};
/*
@@ -3480,6 +3510,160 @@ struct atlas7_pinctrl_data atlas7_ioc_data = {
.confs_cnt = ARRAY_SIZE(atlas7_ioc_pad_confs),
};
+/* Simple map data structure */
+struct map_data {
+ u8 idx;
+ u8 data;
+};
+
+/**
+ * struct atlas7_pull_info - Atlas7 Pad pull info
+ * @type:The type of this Pad.
+ * @mask:The mas value of this pin's pull bits.
+ * @v2s: The map of pull register value to pull status.
+ * @s2v: The map of pull status to pull register value.
+ */
+struct atlas7_pull_info {
+ u8 pad_type;
+ u8 mask;
+ const struct map_data *v2s;
+ const struct map_data *s2v;
+};
+
+/* Pull Register value map to status */
+static const struct map_data p4we_pull_v2s[] = {
+ { P4WE_PULL_UP, PULL_UP },
+ { P4WE_HIGH_HYSTERESIS, HIGH_HYSTERESIS },
+ { P4WE_HIGH_Z, HIGH_Z },
+ { P4WE_PULL_DOWN, PULL_DOWN },
+};
+
+static const struct map_data p16st_pull_v2s[] = {
+ { P16ST_PULL_UP, PULL_UP },
+ { PD, PULL_UNKNOWN },
+ { P16ST_HIGH_Z, HIGH_Z },
+ { P16ST_PULL_DOWN, PULL_DOWN },
+};
+
+static const struct map_data pm31_pull_v2s[] = {
+ { PM31_PULL_DISABLED, PULL_DOWN },
+ { PM31_PULL_ENABLED, PULL_UP },
+};
+
+static const struct map_data pangd_pull_v2s[] = {
+ { PANGD_PULL_UP, PULL_UP },
+ { PD, PULL_UNKNOWN },
+ { PANGD_HIGH_Z, HIGH_Z },
+ { PANGD_PULL_DOWN, PULL_DOWN },
+};
+
+/* Pull status map to register value */
+static const struct map_data p4we_pull_s2v[] = {
+ { PULL_UP, P4WE_PULL_UP },
+ { HIGH_HYSTERESIS, P4WE_HIGH_HYSTERESIS },
+ { HIGH_Z, P4WE_HIGH_Z },
+ { PULL_DOWN, P4WE_PULL_DOWN },
+ { PULL_DISABLE, -1 },
+ { PULL_ENABLE, -1 },
+};
+
+static const struct map_data p16st_pull_s2v[] = {
+ { PULL_UP, P16ST_PULL_UP },
+ { HIGH_HYSTERESIS, -1 },
+ { HIGH_Z, P16ST_HIGH_Z },
+ { PULL_DOWN, P16ST_PULL_DOWN },
+ { PULL_DISABLE, -1 },
+ { PULL_ENABLE, -1 },
+};
+
+static const struct map_data pm31_pull_s2v[] = {
+ { PULL_UP, PM31_PULL_ENABLED },
+ { HIGH_HYSTERESIS, -1 },
+ { HIGH_Z, -1 },
+ { PULL_DOWN, PM31_PULL_DISABLED },
+ { PULL_DISABLE, -1 },
+ { PULL_ENABLE, -1 },
+};
+
+static const struct map_data pangd_pull_s2v[] = {
+ { PULL_UP, PANGD_PULL_UP },
+ { HIGH_HYSTERESIS, -1 },
+ { HIGH_Z, PANGD_HIGH_Z },
+ { PULL_DOWN, PANGD_PULL_DOWN },
+ { PULL_DISABLE, -1 },
+ { PULL_ENABLE, -1 },
+};
+
+static const struct atlas7_pull_info atlas7_pull_map[] = {
+ { PAD_T_4WE_PD, P4WE_PULL_MASK, p4we_pull_v2s, p4we_pull_s2v },
+ { PAD_T_4WE_PU, P4WE_PULL_MASK, p4we_pull_v2s, p4we_pull_s2v },
+ { PAD_T_16ST, P16ST_PULL_MASK, p16st_pull_v2s, p16st_pull_s2v },
+ { PAD_T_M31_0204_PD, PM31_PULL_MASK, pm31_pull_v2s, pm31_pull_s2v },
+ { PAD_T_M31_0204_PU, PM31_PULL_MASK, pm31_pull_v2s, pm31_pull_s2v },
+ { PAD_T_M31_0610_PD, PM31_PULL_MASK, pm31_pull_v2s, pm31_pull_s2v },
+ { PAD_T_M31_0610_PU, PM31_PULL_MASK, pm31_pull_v2s, pm31_pull_s2v },
+ { PAD_T_AD, PANGD_PULL_MASK, pangd_pull_v2s, pangd_pull_s2v },
+};
+
+/**
+ * struct atlas7_ds_ma_info - Atlas7 Pad DriveStrength & currents info
+ * @ma: The Drive Strength in current value .
+ * @ds_16st: The correspond raw value of 16st pad.
+ * @ds_4we: The correspond raw value of 4we pad.
+ * @ds_0204m31: The correspond raw value of 0204m31 pad.
+ * @ds_0610m31: The correspond raw value of 0610m31 pad.
+ */
+struct atlas7_ds_ma_info {
+ u32 ma;
+ u32 ds_16st;
+ u32 ds_4we;
+ u32 ds_0204m31;
+ u32 ds_0610m31;
+};
+
+static const struct atlas7_ds_ma_info atlas7_ma2ds_map[] = {
+ { 2, DS_16ST_0, DS_4WE_0, DS_M31_0, DS_NULL },
+ { 4, DS_16ST_1, DS_NULL, DS_M31_1, DS_NULL },
+ { 6, DS_16ST_2, DS_NULL, DS_NULL, DS_M31_0 },
+ { 8, DS_16ST_3, DS_4WE_1, DS_NULL, DS_NULL },
+ { 10, DS_16ST_4, DS_NULL, DS_NULL, DS_M31_1 },
+ { 12, DS_16ST_5, DS_NULL, DS_NULL, DS_NULL },
+ { 14, DS_16ST_6, DS_NULL, DS_NULL, DS_NULL },
+ { 16, DS_16ST_7, DS_4WE_2, DS_NULL, DS_NULL },
+ { 18, DS_16ST_8, DS_NULL, DS_NULL, DS_NULL },
+ { 20, DS_16ST_9, DS_NULL, DS_NULL, DS_NULL },
+ { 22, DS_16ST_10, DS_NULL, DS_NULL, DS_NULL },
+ { 24, DS_16ST_11, DS_NULL, DS_NULL, DS_NULL },
+ { 26, DS_16ST_12, DS_NULL, DS_NULL, DS_NULL },
+ { 28, DS_16ST_13, DS_4WE_3, DS_NULL, DS_NULL },
+ { 30, DS_16ST_14, DS_NULL, DS_NULL, DS_NULL },
+ { 32, DS_16ST_15, DS_NULL, DS_NULL, DS_NULL },
+};
+
+/**
+ * struct atlas7_ds_info - Atlas7 Pad DriveStrength info
+ * @type: The type of this Pad.
+ * @mask: The mask value of this pin's pull bits.
+ * @imval: The immediate value of drives trength register.
+ */
+struct atlas7_ds_info {
+ u8 type;
+ u8 mask;
+ u8 imval;
+ u8 reserved;
+};
+
+static const struct atlas7_ds_info atlas7_ds_map[] = {
+ { PAD_T_4WE_PD, DS_2BIT_MASK, DS_2BIT_IM_VAL },
+ { PAD_T_4WE_PU, DS_2BIT_MASK, DS_2BIT_IM_VAL },
+ { PAD_T_16ST, DS_4BIT_MASK, DS_4BIT_IM_VAL },
+ { PAD_T_M31_0204_PD, DS_1BIT_MASK, DS_1BIT_IM_VAL },
+ { PAD_T_M31_0204_PU, DS_1BIT_MASK, DS_1BIT_IM_VAL },
+ { PAD_T_M31_0610_PD, DS_1BIT_MASK, DS_1BIT_IM_VAL },
+ { PAD_T_M31_0610_PU, DS_1BIT_MASK, DS_1BIT_IM_VAL },
+ { PAD_T_AD, DS_NULL, DS_NULL },
+};
+
static inline u32 atlas7_pin_to_bank(u32 pin)
{
return (pin >= ATLAS7_PINCTRL_BANK_0_PINS) ? 1 : 0;
@@ -3682,49 +3866,22 @@ static int atlas7_pmx_set_mux(struct pinctrl_dev *pctldev,
return 0;
}
-struct atlas7_ds_info {
- u32 ma;
- u32 ds_16st;
- u32 ds_4we;
- u32 ds_0204m31;
- u32 ds_0610m31;
-};
-
-const struct atlas7_ds_info atlas7_ds_map[] = {
- { 2, DS_16ST_0, DS_4WE_0, DS_M31_0, DS_NULL},
- { 4, DS_16ST_1, DS_NULL, DS_M31_1, DS_NULL},
- { 6, DS_16ST_2, DS_NULL, DS_NULL, DS_M31_0},
- { 8, DS_16ST_3, DS_4WE_1, DS_NULL, DS_NULL},
- { 10, DS_16ST_4, DS_NULL, DS_NULL, DS_M31_1},
- { 12, DS_16ST_5, DS_NULL, DS_NULL, DS_NULL},
- { 14, DS_16ST_6, DS_NULL, DS_NULL, DS_NULL},
- { 16, DS_16ST_7, DS_4WE_2, DS_NULL, DS_NULL},
- { 18, DS_16ST_8, DS_NULL, DS_NULL, DS_NULL},
- { 20, DS_16ST_9, DS_NULL, DS_NULL, DS_NULL},
- { 22, DS_16ST_10, DS_NULL, DS_NULL, DS_NULL},
- { 24, DS_16ST_11, DS_NULL, DS_NULL, DS_NULL},
- { 26, DS_16ST_12, DS_NULL, DS_NULL, DS_NULL},
- { 28, DS_16ST_13, DS_4WE_3, DS_NULL, DS_NULL},
- { 30, DS_16ST_14, DS_NULL, DS_NULL, DS_NULL},
- { 32, DS_16ST_15, DS_NULL, DS_NULL, DS_NULL},
-};
-
static u32 convert_current_to_drive_strength(u32 type, u32 ma)
{
int idx;
- for (idx = 0; idx < ARRAY_SIZE(atlas7_ds_map); idx++) {
- if (atlas7_ds_map[idx].ma != ma)
+ for (idx = 0; idx < ARRAY_SIZE(atlas7_ma2ds_map); idx++) {
+ if (atlas7_ma2ds_map[idx].ma != ma)
continue;
if (type == PAD_T_4WE_PD || type == PAD_T_4WE_PU)
- return atlas7_ds_map[idx].ds_4we;
+ return atlas7_ma2ds_map[idx].ds_4we;
else if (type == PAD_T_16ST)
- return atlas7_ds_map[idx].ds_16st;
+ return atlas7_ma2ds_map[idx].ds_16st;
else if (type == PAD_T_M31_0204_PD || type == PAD_T_M31_0204_PU)
- return atlas7_ds_map[idx].ds_0204m31;
+ return atlas7_ma2ds_map[idx].ds_0204m31;
else if (type == PAD_T_M31_0610_PD || type == PAD_T_M31_0610_PU)
- return atlas7_ds_map[idx].ds_0610m31;
+ return atlas7_ma2ds_map[idx].ds_0610m31;
}
return DS_NULL;
@@ -3735,78 +3892,21 @@ static int altas7_pinctrl_set_pull_sel(struct pinctrl_dev *pctldev,
{
struct atlas7_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
struct atlas7_pad_config *conf = &pmx->pctl_data->confs[pin];
- u32 type = conf->type;
- u32 shift = conf->pupd_bit;
- u32 bank = atlas7_pin_to_bank(pin);
- void __iomem *pull_sel_reg, *pull_clr_reg;
+ const struct atlas7_pull_info *pull_info;
+ u32 bank;
+ unsigned long regv;
+ void __iomem *pull_sel_reg;
+ bank = atlas7_pin_to_bank(pin);
+ pull_info = &atlas7_pull_map[conf->type];
pull_sel_reg = pmx->regs[bank] + conf->pupd_reg;
- pull_clr_reg = CLR_REG(pull_sel_reg);
-
- if (type == PAD_T_4WE_PD || type == PAD_T_4WE_PU) {
- writel(P4WE_PULL_MASK << shift, pull_clr_reg);
-
- if (sel == PULL_UP)
- writel(P4WE_PULL_UP << shift, pull_sel_reg);
- else if (sel == HIGH_HYSTERESIS)
- writel(P4WE_HIGH_HYSTERESIS << shift, pull_sel_reg);
- else if (sel == HIGH_Z)
- writel(P4WE_HIGH_Z << shift, pull_sel_reg);
- else if (sel == PULL_DOWN)
- writel(P4WE_PULL_DOWN << shift, pull_sel_reg);
- else {
- pr_err("Unknown Pull select type for 4WEPAD#%d\n",
- pin);
- return -ENOTSUPP;
- }
- } else if (type == PAD_T_16ST) {
- writel(P16ST_PULL_MASK << shift, pull_clr_reg);
-
- if (sel == PULL_UP)
- writel(P16ST_PULL_UP << shift, pull_sel_reg);
- else if (sel == HIGH_Z)
- writel(P16ST_HIGH_Z << shift, pull_sel_reg);
- else if (sel == PULL_DOWN)
- writel(P16ST_PULL_DOWN << shift, pull_sel_reg);
- else {
- pr_err("Unknown Pull select type for 16STPAD#%d\n",
- pin);
- return -ENOTSUPP;
- }
- } else if (type == PAD_T_M31_0204_PD ||
- type == PAD_T_M31_0204_PU ||
- type == PAD_T_M31_0610_PD ||
- type == PAD_T_M31_0610_PU) {
- writel(PM31_PULL_MASK << shift, pull_clr_reg);
-
- if (sel == PULL_UP)
- writel(PM31_PULL_ENABLED << shift, pull_sel_reg);
- else if (sel == PULL_DOWN)
- writel(PM31_PULL_DISABLED << shift, pull_sel_reg);
- else {
- pr_err("Unknown Pull select type for M31PAD#%d\n",
- pin);
- return -ENOTSUPP;
- }
- } else if (type == PAD_T_AD) {
- writel(PANGD_PULL_MASK << shift, pull_clr_reg);
-
- if (sel == PULL_UP)
- writel(PANGD_PULL_UP << shift, pull_sel_reg);
- else if (sel == HIGH_Z)
- writel(PANGD_HIGH_Z << shift, pull_sel_reg);
- else if (sel == PULL_DOWN)
- writel(PANGD_PULL_DOWN << shift, pull_sel_reg);
- else {
- pr_err("Unknown Pull select type for A/D PAD#%d\n",
- pin);
- return -ENOTSUPP;
- }
- } else {
- pr_err("Unknown Pad type[%d] for pull select PAD#%d\n",
- type, pin);
- return -ENOTSUPP;
- }
+
+ /* Retrieve correspond register value from table by sel */
+ regv = pull_info->s2v[sel].data & pull_info->mask;
+
+ /* Clear & Set new value to pull register */
+ writel(pull_info->mask << conf->pupd_bit, CLR_REG(pull_sel_reg));
+ writel(regv << conf->pupd_bit, pull_sel_reg);
pr_debug("PIN_CFG ### SET PIN#%d PULL SELECTOR:%d == OK ####\n",
pin, sel);
@@ -3818,43 +3918,25 @@ static int __altas7_pinctrl_set_drive_strength_sel(struct pinctrl_dev *pctldev,
{
struct atlas7_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
struct atlas7_pad_config *conf = &pmx->pctl_data->confs[pin];
- u32 type = conf->type;
- u32 shift = conf->drvstr_bit;
- u32 bank = atlas7_pin_to_bank(pin);
- void __iomem *ds_sel_reg, *ds_clr_reg;
-
- ds_sel_reg = pmx->regs[bank] + conf->drvstr_reg;
- ds_clr_reg = CLR_REG(ds_sel_reg);
- if (type == PAD_T_4WE_PD || type == PAD_T_4WE_PU) {
- if (sel & (~DS_2BIT_MASK))
- goto unsupport;
-
- writel(DS_2BIT_IM_VAL << shift, ds_clr_reg);
- writel(sel << shift, ds_sel_reg);
-
- return 0;
- } else if (type == PAD_T_16ST) {
- if (sel & (~DS_4BIT_MASK))
- goto unsupport;
+ const struct atlas7_ds_info *ds_info;
+ u32 bank;
+ void __iomem *ds_sel_reg;
- writel(DS_4BIT_IM_VAL << shift, ds_clr_reg);
- writel(sel << shift, ds_sel_reg);
+ ds_info = &atlas7_ds_map[conf->type];
+ if (sel & (~(ds_info->mask)))
+ goto unsupport;
- return 0;
- } else if (type == PAD_T_M31_0204_PD || type == PAD_T_M31_0204_PU ||
- type == PAD_T_M31_0610_PD || type == PAD_T_M31_0610_PU) {
- if (sel & (~DS_1BIT_MASK))
- goto unsupport;
+ bank = atlas7_pin_to_bank(pin);
+ ds_sel_reg = pmx->regs[bank] + conf->drvstr_reg;
- writel(DS_1BIT_IM_VAL << shift, ds_clr_reg);
- writel(sel << shift, ds_sel_reg);
+ writel(ds_info->imval << conf->drvstr_bit, CLR_REG(ds_sel_reg));
+ writel(sel << conf->drvstr_bit, ds_sel_reg);
- return 0;
- }
+ return 0;
unsupport:
pr_err("Pad#%d type[%d] doesn't support ds code[%d]!\n",
- pin, type, sel);
+ pin, conf->type, sel);
return -ENOTSUPP;
}
@@ -4101,14 +4183,135 @@ unmap_io:
return ret;
}
+#ifdef CONFIG_PM_SLEEP
+static int atlas7_pinmux_suspend_noirq(struct device *dev)
+{
+ struct atlas7_pmx *pmx = dev_get_drvdata(dev);
+ struct atlas7_pad_status *status;
+ struct atlas7_pad_config *conf;
+ const struct atlas7_ds_info *ds_info;
+ const struct atlas7_pull_info *pull_info;
+ int idx;
+ u32 bank;
+ unsigned long regv;
+
+ for (idx = 0; idx < pmx->pctl_desc.npins; idx++) {
+ /* Get this Pad's descriptor from PINCTRL */
+ conf = &pmx->pctl_data->confs[idx];
+ bank = atlas7_pin_to_bank(idx);
+ status = &pmx->sleep_data[idx];
+
+ /* Save Function selector */
+ regv = readl(pmx->regs[bank] + conf->mux_reg);
+ status->func = (regv >> conf->mux_bit) & FUNC_CLEAR_MASK;
+
+ /* Check if Pad is in Analogue selector */
+ if (conf->ad_ctrl_reg == -1)
+ goto save_ds_sel;
+
+ regv = readl(pmx->regs[bank] + conf->ad_ctrl_reg);
+ if (!(regv & (conf->ad_ctrl_bit << ANA_CLEAR_MASK)))
+ status->func = FUNC_ANALOGUE;
+
+save_ds_sel:
+ if (conf->drvstr_reg == -1)
+ goto save_pull_sel;
+
+ /* Save Drive Strength selector */
+ ds_info = &atlas7_ds_map[conf->type];
+ regv = readl(pmx->regs[bank] + conf->drvstr_reg);
+ status->dstr = (regv >> conf->drvstr_bit) & ds_info->mask;
+
+save_pull_sel:
+ /* Save Pull selector */
+ pull_info = &atlas7_pull_map[conf->type];
+ regv = readl(pmx->regs[bank] + conf->pupd_reg);
+ regv = (regv >> conf->pupd_bit) & pull_info->mask;
+ status->pull = pull_info->v2s[regv].data;
+ }
+
+ /*
+ * Save disable input selector, this selector is not for Pin,
+ * but for Mux function.
+ */
+ for (idx = 0; idx < NUM_OF_IN_DISABLE_REG; idx++) {
+ pmx->status_ds[idx] = readl(pmx->regs[BANK_DS] +
+ IN_DISABLE_0_REG_SET + 0x8 * idx);
+ pmx->status_dsv[idx] = readl(pmx->regs[BANK_DS] +
+ IN_DISABLE_VAL_0_REG_SET + 0x8 * idx);
+ }
+
+ return 0;
+}
+
+static int atlas7_pinmux_resume_noirq(struct device *dev)
+{
+ struct atlas7_pmx *pmx = dev_get_drvdata(dev);
+ struct atlas7_pad_status *status;
+ struct atlas7_pad_config *conf;
+ int idx;
+ u32 bank;
+
+ for (idx = 0; idx < pmx->pctl_desc.npins; idx++) {
+ /* Get this Pad's descriptor from PINCTRL */
+ conf = &pmx->pctl_data->confs[idx];
+ bank = atlas7_pin_to_bank(idx);
+ status = &pmx->sleep_data[idx];
+
+ /* Restore Function selector */
+ __atlas7_pmx_pin_enable(pmx, idx, (u32)status->func & 0xff);
+
+ if (FUNC_ANALOGUE == status->func)
+ goto restore_pull_sel;
+
+ /* Restore Drive Strength selector */
+ __altas7_pinctrl_set_drive_strength_sel(pmx->pctl, idx,
+ (u32)status->dstr & 0xff);
+
+restore_pull_sel:
+ /* Restore Pull selector */
+ altas7_pinctrl_set_pull_sel(pmx->pctl, idx,
+ (u32)status->pull & 0xff);
+ }
+
+ /*
+ * Restore disable input selector, this selector is not for Pin,
+ * but for Mux function
+ */
+ for (idx = 0; idx < NUM_OF_IN_DISABLE_REG; idx++) {
+ writel(~0, pmx->regs[BANK_DS] +
+ IN_DISABLE_0_REG_CLR + 0x8 * idx);
+ writel(pmx->status_ds[idx], pmx->regs[BANK_DS] +
+ IN_DISABLE_0_REG_SET + 0x8 * idx);
+ writel(~0, pmx->regs[BANK_DS] +
+ IN_DISABLE_VAL_0_REG_CLR + 0x8 * idx);
+ writel(pmx->status_dsv[idx], pmx->regs[BANK_DS] +
+ IN_DISABLE_VAL_0_REG_SET + 0x8 * idx);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops atlas7_pinmux_pm_ops = {
+ .suspend_noirq = atlas7_pinmux_suspend_noirq,
+ .resume_noirq = atlas7_pinmux_resume_noirq,
+ .freeze_noirq = atlas7_pinmux_suspend_noirq,
+ .restore_noirq = atlas7_pinmux_resume_noirq,
+};
+#endif
+
static const struct of_device_id atlas7_pinmux_ids[] = {
{ .compatible = "sirf,atlas7-ioc",},
+ {},
};
static struct platform_driver atlas7_pinmux_driver = {
.driver = {
.name = "atlas7-ioc",
.of_match_table = atlas7_pinmux_ids,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &atlas7_pinmux_pm_ops,
+#endif
},
.probe = atlas7_pinmux_probe,
};
@@ -4286,14 +4489,15 @@ static struct irq_chip atlas7_gpio_irq_chip = {
.irq_set_type = atlas7_gpio_irq_type,
};
-static void atlas7_gpio_handle_irq(unsigned int irq, struct irq_desc *desc)
+static void atlas7_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct atlas7_gpio_chip *a7gc = to_atlas7_gpio(gc);
struct atlas7_gpio_bank *bank = NULL;
u32 status, ctrl;
int pin_in_bank = 0, idx;
- struct irq_chip *chip = irq_get_chip(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int irq = irq_desc_get_irq(desc);
for (idx = 0; idx < a7gc->nbank; idx++) {
bank = &a7gc->banks[idx];
@@ -4496,6 +4700,7 @@ static void atlas7_gpio_set_value(struct gpio_chip *chip,
static const struct of_device_id atlas7_gpio_ids[] = {
{ .compatible = "sirf,atlas7-gpio", },
+ {},
};
static int atlas7_gpio_probe(struct platform_device *pdev)
@@ -4612,17 +4817,65 @@ static int atlas7_gpio_probe(struct platform_device *pdev)
BUG_ON(!bank->pctldev);
}
+ platform_set_drvdata(pdev, a7gc);
dev_info(&pdev->dev, "add to system.\n");
return 0;
failed:
return ret;
}
+#ifdef CONFIG_PM_SLEEP
+static int atlas7_gpio_suspend_noirq(struct device *dev)
+{
+ struct atlas7_gpio_chip *a7gc = dev_get_drvdata(dev);
+ struct atlas7_gpio_bank *bank;
+ void __iomem *ctrl_reg;
+ u32 idx, pin;
+
+ for (idx = 0; idx < a7gc->nbank; idx++) {
+ bank = &a7gc->banks[idx];
+ for (pin = 0; pin < bank->ngpio; pin++) {
+ ctrl_reg = ATLAS7_GPIO_CTRL(bank, pin);
+ bank->sleep_data[pin] = readl(ctrl_reg);
+ }
+ }
+
+ return 0;
+}
+
+static int atlas7_gpio_resume_noirq(struct device *dev)
+{
+ struct atlas7_gpio_chip *a7gc = dev_get_drvdata(dev);
+ struct atlas7_gpio_bank *bank;
+ void __iomem *ctrl_reg;
+ u32 idx, pin;
+
+ for (idx = 0; idx < a7gc->nbank; idx++) {
+ bank = &a7gc->banks[idx];
+ for (pin = 0; pin < bank->ngpio; pin++) {
+ ctrl_reg = ATLAS7_GPIO_CTRL(bank, pin);
+ writel(bank->sleep_data[pin], ctrl_reg);
+ }
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops atlas7_gpio_pm_ops = {
+ .suspend_noirq = atlas7_gpio_suspend_noirq,
+ .resume_noirq = atlas7_gpio_resume_noirq,
+ .freeze_noirq = atlas7_gpio_suspend_noirq,
+ .restore_noirq = atlas7_gpio_resume_noirq,
+};
+#endif
+
static struct platform_driver atlas7_gpio_driver = {
.driver = {
.name = "atlas7-gpio",
- .owner = THIS_MODULE,
.of_match_table = atlas7_gpio_ids,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &atlas7_gpio_pm_ops,
+#endif
},
.probe = atlas7_gpio_probe,
};
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 8ba26e45499a..f8bd9fb52033 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -545,14 +545,15 @@ static struct irq_chip sirfsoc_irq_chip = {
.irq_set_type = sirfsoc_gpio_irq_type,
};
-static void sirfsoc_gpio_handle_irq(unsigned int irq, struct irq_desc *desc)
+static void sirfsoc_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc)
{
+ unsigned int irq = irq_desc_get_irq(desc);
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct sirfsoc_gpio_chip *sgpio = to_sirfsoc_gpio(gc);
struct sirfsoc_gpio_bank *bank;
u32 status, ctrl;
int idx = 0;
- struct irq_chip *chip = irq_get_chip(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
int i;
for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index f87a5eaf75da..0afaf79a4e51 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* Inspired from:
* - U300 Pinctl drivers
diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h
index dc8bf85ecb2a..27c2cc8d83ad 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.h
+++ b/drivers/pinctrl/spear/pinctrl-spear.h
@@ -2,7 +2,7 @@
* Driver header file for the ST Microelectronics SPEAr pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
index a7bdc537efa7..92611bb757ac 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1310.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr1310 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -2730,7 +2730,7 @@ static void __exit spear1310_pinctrl_exit(void)
}
module_exit(spear1310_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
index f43ec85a0328..f842e9dc40d0 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1340.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1340.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr1340 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -2046,7 +2046,7 @@ static void __exit spear1340_pinctrl_exit(void)
}
module_exit(spear1340_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c
index da8990a8eeef..d998a2ccff48 100644
--- a/drivers/pinctrl/spear/pinctrl-spear300.c
+++ b/drivers/pinctrl/spear/pinctrl-spear300.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr300 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -703,7 +703,7 @@ static void __exit spear300_pinctrl_exit(void)
}
module_exit(spear300_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c
index 31ede51e819b..609b18aceb16 100644
--- a/drivers/pinctrl/spear/pinctrl-spear310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear310.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr310 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -426,7 +426,7 @@ static void __exit spear310_pinctrl_exit(void)
}
module_exit(spear310_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, spear310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c
index 506e40b641e0..c07114431bd4 100644
--- a/drivers/pinctrl/spear/pinctrl-spear320.c
+++ b/drivers/pinctrl/spear/pinctrl-spear320.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr320 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -3467,7 +3467,7 @@ static void __exit spear320_pinctrl_exit(void)
}
module_exit(spear320_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.c b/drivers/pinctrl/spear/pinctrl-spear3xx.c
index 12ee21af766b..d3119aafe709 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.c
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr3xx pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.h b/drivers/pinctrl/spear/pinctrl-spear3xx.h
index 7860b36053c4..ce19dcf8f08b 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.h
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.h
@@ -2,7 +2,7 @@
* Header file for the ST Microelectronics SPEAr3xx pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index 7376a97b5e65..862a096c5dba 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -135,7 +135,14 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ir0")), /* TX */
+ SUNXI_FUNCTION(0x2, "ir0"), /* TX */
+ /*
+ * The SPDIF block is not referenced at all in the A10 user
+ * manual. However it is described in the code leaked and the
+ * pin descriptions are declared in the A20 user manual which
+ * is pin compatible with this device.
+ */
+ SUNXI_FUNCTION(0x4, "spdif")), /* SPDIF MCLK */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
@@ -176,11 +183,15 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2s"), /* DI */
- SUNXI_FUNCTION(0x3, "ac97")), /* DI */
+ SUNXI_FUNCTION(0x3, "ac97"), /* DI */
+ /* Undocumented mux function - See SPDIF MCLK above */
+ SUNXI_FUNCTION(0x4, "spdif")), /* SPDIF IN */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 13),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2")), /* CS1 */
+ SUNXI_FUNCTION(0x2, "spi2"), /* CS1 */
+ /* Undocumented mux function - See SPDIF MCLK above */
+ SUNXI_FUNCTION(0x4, "spdif")), /* SPDIF OUT */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 14),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index f09573e13203..fb4669c0ce0e 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -588,7 +588,6 @@ static void sunxi_pinctrl_irq_release_resources(struct irq_data *d)
static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- struct irq_desc *desc = container_of(d, struct irq_desc, irq_data);
u32 reg = sunxi_irq_cfg_reg(d->hwirq);
u8 index = sunxi_irq_cfg_offset(d->hwirq);
unsigned long flags;
@@ -615,16 +614,17 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- if (type & IRQ_TYPE_LEVEL_MASK) {
- d->chip = &sunxi_pinctrl_level_irq_chip;
- desc->handle_irq = handle_fasteoi_irq;
- } else {
- d->chip = &sunxi_pinctrl_edge_irq_chip;
- desc->handle_irq = handle_edge_irq;
- }
-
spin_lock_irqsave(&pctl->lock, flags);
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ __irq_set_chip_handler_name_locked(d->irq,
+ &sunxi_pinctrl_level_irq_chip,
+ handle_fasteoi_irq, NULL);
+ else
+ __irq_set_chip_handler_name_locked(d->irq,
+ &sunxi_pinctrl_edge_irq_chip,
+ handle_edge_irq, NULL);
+
regval = readl(pctl->membase + reg);
regval &= ~(IRQ_CFG_IRQ_MASK << index);
writel(regval | (mode << index), pctl->membase + reg);
@@ -685,6 +685,7 @@ static void sunxi_pinctrl_irq_ack_unmask(struct irq_data *d)
}
static struct irq_chip sunxi_pinctrl_edge_irq_chip = {
+ .name = "sunxi_pio_edge",
.irq_ack = sunxi_pinctrl_irq_ack,
.irq_mask = sunxi_pinctrl_irq_mask,
.irq_unmask = sunxi_pinctrl_irq_unmask,
@@ -695,6 +696,7 @@ static struct irq_chip sunxi_pinctrl_edge_irq_chip = {
};
static struct irq_chip sunxi_pinctrl_level_irq_chip = {
+ .name = "sunxi_pio_level",
.irq_eoi = sunxi_pinctrl_irq_ack,
.irq_mask = sunxi_pinctrl_irq_mask,
.irq_unmask = sunxi_pinctrl_irq_unmask,
@@ -709,10 +711,42 @@ static struct irq_chip sunxi_pinctrl_level_irq_chip = {
IRQCHIP_EOI_IF_HANDLED,
};
-static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
+static int sunxi_pinctrl_irq_of_xlate(struct irq_domain *d,
+ struct device_node *node,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ struct sunxi_desc_function *desc;
+ int pin, base;
+
+ if (intsize < 3)
+ return -EINVAL;
+
+ base = PINS_PER_BANK * intspec[0];
+ pin = base + intspec[1];
+
+ desc = sunxi_pinctrl_desc_find_function_by_pin(d->host_data,
+ pin, "irq");
+ if (!desc)
+ return -EINVAL;
+
+ *out_hwirq = desc->irqbank * PINS_PER_BANK + desc->irqnum;
+ *out_type = intspec[2];
+
+ return 0;
+}
+
+static struct irq_domain_ops sunxi_pinctrl_irq_domain_ops = {
+ .xlate = sunxi_pinctrl_irq_of_xlate,
+};
+
+static void sunxi_pinctrl_irq_handler(unsigned __irq, struct irq_desc *desc)
{
- struct irq_chip *chip = irq_get_chip(irq);
- struct sunxi_pinctrl *pctl = irq_get_handler_data(irq);
+ unsigned int irq = irq_desc_get_irq(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct sunxi_pinctrl *pctl = irq_desc_get_handler_data(desc);
unsigned long bank, reg, val;
for (bank = 0; bank < pctl->desc->irq_banks; bank++)
@@ -983,8 +1017,8 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
pctl->domain = irq_domain_add_linear(node,
pctl->desc->irq_banks * IRQ_PER_BANK,
- &irq_domain_simple_ops,
- NULL);
+ &sunxi_pinctrl_irq_domain_ops,
+ pctl);
if (!pctl->domain) {
dev_err(&pdev->dev, "Couldn't register IRQ domain\n");
ret = -ENOMEM;
diff --git a/drivers/pinctrl/uniphier/Kconfig b/drivers/pinctrl/uniphier/Kconfig
new file mode 100644
index 000000000000..eab23ef9ddbf
--- /dev/null
+++ b/drivers/pinctrl/uniphier/Kconfig
@@ -0,0 +1,32 @@
+if ARCH_UNIPHIER
+
+config PINCTRL_UNIPHIER_CORE
+ bool
+ select PINMUX
+ select GENERIC_PINCONF
+
+config PINCTRL_UNIPHIER_PH1_LD4
+ tristate "UniPhier PH1-LD4 SoC pinctrl driver"
+ select PINCTRL_UNIPHIER_CORE
+
+config PINCTRL_UNIPHIER_PH1_PRO4
+ tristate "UniPhier PH1-Pro4 SoC pinctrl driver"
+ select PINCTRL_UNIPHIER_CORE
+
+config PINCTRL_UNIPHIER_PH1_SLD8
+ tristate "UniPhier PH1-sLD8 SoC pinctrl driver"
+ select PINCTRL_UNIPHIER_CORE
+
+config PINCTRL_UNIPHIER_PH1_PRO5
+ tristate "UniPhier PH1-Pro5 SoC pinctrl driver"
+ select PINCTRL_UNIPHIER_CORE
+
+config PINCTRL_UNIPHIER_PROXSTREAM2
+ tristate "UniPhier ProXstream2 SoC pinctrl driver"
+ select PINCTRL_UNIPHIER_CORE
+
+config PINCTRL_UNIPHIER_PH1_LD6B
+ tristate "UniPhier PH1-LD6b SoC pinctrl driver"
+ select PINCTRL_UNIPHIER_CORE
+
+endif
diff --git a/drivers/pinctrl/uniphier/Makefile b/drivers/pinctrl/uniphier/Makefile
new file mode 100644
index 000000000000..e215b1097297
--- /dev/null
+++ b/drivers/pinctrl/uniphier/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_PINCTRL_UNIPHIER_CORE) += pinctrl-uniphier-core.o
+
+obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_LD4) += pinctrl-ph1-ld4.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_PRO4) += pinctrl-ph1-pro4.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_SLD8) += pinctrl-ph1-sld8.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_PRO5) += pinctrl-ph1-pro5.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PROXSTREAM2) += pinctrl-proxstream2.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_LD6B) += pinctrl-ph1-ld6b.o
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c b/drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c
new file mode 100644
index 000000000000..7beb87e8f499
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c
@@ -0,0 +1,886 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "ph1-ld4-pinctrl"
+
+static const struct pinctrl_pin_desc ph1_ld4_pins[] = {
+ UNIPHIER_PINCTRL_PIN(0, "EA1", UNIPHIER_PIN_IECTRL_NONE,
+ 8, UNIPHIER_PIN_DRV_4_8,
+ 8, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(1, "EA2", UNIPHIER_PIN_IECTRL_NONE,
+ 9, UNIPHIER_PIN_DRV_4_8,
+ 9, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(2, "EA3", UNIPHIER_PIN_IECTRL_NONE,
+ 10, UNIPHIER_PIN_DRV_4_8,
+ 10, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(3, "EA4", UNIPHIER_PIN_IECTRL_NONE,
+ 11, UNIPHIER_PIN_DRV_4_8,
+ 11, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(4, "EA5", UNIPHIER_PIN_IECTRL_NONE,
+ 12, UNIPHIER_PIN_DRV_4_8,
+ 12, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(5, "EA6", UNIPHIER_PIN_IECTRL_NONE,
+ 13, UNIPHIER_PIN_DRV_4_8,
+ 13, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(6, "EA7", UNIPHIER_PIN_IECTRL_NONE,
+ 14, UNIPHIER_PIN_DRV_4_8,
+ 14, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(7, "EA8", 0,
+ 15, UNIPHIER_PIN_DRV_4_8,
+ 15, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(8, "EA9", 0,
+ 16, UNIPHIER_PIN_DRV_4_8,
+ 16, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(9, "EA10", 0,
+ 17, UNIPHIER_PIN_DRV_4_8,
+ 17, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(10, "EA11", 0,
+ 18, UNIPHIER_PIN_DRV_4_8,
+ 18, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(11, "EA12", 0,
+ 19, UNIPHIER_PIN_DRV_4_8,
+ 19, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(12, "EA13", 0,
+ 20, UNIPHIER_PIN_DRV_4_8,
+ 20, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(13, "EA14", 0,
+ 21, UNIPHIER_PIN_DRV_4_8,
+ 21, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(14, "EA15", 0,
+ 22, UNIPHIER_PIN_DRV_4_8,
+ 22, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(15, "ECLK", UNIPHIER_PIN_IECTRL_NONE,
+ 23, UNIPHIER_PIN_DRV_4_8,
+ 23, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(16, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
+ 24, UNIPHIER_PIN_DRV_4_8,
+ 24, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(17, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+ 25, UNIPHIER_PIN_DRV_4_8,
+ 25, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(18, "ES0", UNIPHIER_PIN_IECTRL_NONE,
+ 27, UNIPHIER_PIN_DRV_4_8,
+ 27, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(19, "ES1", UNIPHIER_PIN_IECTRL_NONE,
+ 28, UNIPHIER_PIN_DRV_4_8,
+ 28, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(20, "ES2", UNIPHIER_PIN_IECTRL_NONE,
+ 29, UNIPHIER_PIN_DRV_4_8,
+ 29, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(21, "XERST", UNIPHIER_PIN_IECTRL_NONE,
+ 38, UNIPHIER_PIN_DRV_4_8,
+ 38, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(22, "MMCCLK", UNIPHIER_PIN_IECTRL_NONE,
+ 0, UNIPHIER_PIN_DRV_8_12_16_20,
+ 146, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(23, "MMCCMD", UNIPHIER_PIN_IECTRL_NONE,
+ 4, UNIPHIER_PIN_DRV_8_12_16_20,
+ 147, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(24, "MMCDAT0", UNIPHIER_PIN_IECTRL_NONE,
+ 8, UNIPHIER_PIN_DRV_8_12_16_20,
+ 148, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(25, "MMCDAT1", UNIPHIER_PIN_IECTRL_NONE,
+ 12, UNIPHIER_PIN_DRV_8_12_16_20,
+ 149, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(26, "MMCDAT2", UNIPHIER_PIN_IECTRL_NONE,
+ 16, UNIPHIER_PIN_DRV_8_12_16_20,
+ 150, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(27, "MMCDAT3", UNIPHIER_PIN_IECTRL_NONE,
+ 20, UNIPHIER_PIN_DRV_8_12_16_20,
+ 151, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(28, "MMCDAT4", UNIPHIER_PIN_IECTRL_NONE,
+ 24, UNIPHIER_PIN_DRV_8_12_16_20,
+ 152, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(29, "MMCDAT5", UNIPHIER_PIN_IECTRL_NONE,
+ 28, UNIPHIER_PIN_DRV_8_12_16_20,
+ 153, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(30, "MMCDAT6", UNIPHIER_PIN_IECTRL_NONE,
+ 32, UNIPHIER_PIN_DRV_8_12_16_20,
+ 154, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(31, "MMCDAT7", UNIPHIER_PIN_IECTRL_NONE,
+ 36, UNIPHIER_PIN_DRV_8_12_16_20,
+ 155, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(32, "RMII_RXD0", 6,
+ 39, UNIPHIER_PIN_DRV_4_8,
+ 39, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(33, "RMII_RXD1", 6,
+ 40, UNIPHIER_PIN_DRV_4_8,
+ 40, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(34, "RMII_CRS_DV", 6,
+ 41, UNIPHIER_PIN_DRV_4_8,
+ 41, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(35, "RMII_RXER", 6,
+ 42, UNIPHIER_PIN_DRV_4_8,
+ 42, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(36, "RMII_REFCLK", 6,
+ 43, UNIPHIER_PIN_DRV_4_8,
+ 43, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(37, "RMII_TXD0", 6,
+ 44, UNIPHIER_PIN_DRV_4_8,
+ 44, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(38, "RMII_TXD1", 6,
+ 45, UNIPHIER_PIN_DRV_4_8,
+ 45, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(39, "RMII_TXEN", 6,
+ 46, UNIPHIER_PIN_DRV_4_8,
+ 46, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(40, "MDC", 6,
+ 47, UNIPHIER_PIN_DRV_4_8,
+ 47, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(41, "MDIO", 6,
+ 48, UNIPHIER_PIN_DRV_4_8,
+ 48, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(42, "MDIO_INTL", 6,
+ 49, UNIPHIER_PIN_DRV_4_8,
+ 49, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(43, "PHYRSTL", 6,
+ 50, UNIPHIER_PIN_DRV_4_8,
+ 50, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(44, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+ 40, UNIPHIER_PIN_DRV_8_12_16_20,
+ 156, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(45, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+ 44, UNIPHIER_PIN_DRV_8_12_16_20,
+ 157, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(46, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+ 48, UNIPHIER_PIN_DRV_8_12_16_20,
+ 158, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(47, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+ 52, UNIPHIER_PIN_DRV_8_12_16_20,
+ 159, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(48, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+ 56, UNIPHIER_PIN_DRV_8_12_16_20,
+ 160, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(49, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+ 60, UNIPHIER_PIN_DRV_8_12_16_20,
+ 161, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(50, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+ 51, UNIPHIER_PIN_DRV_4_8,
+ 51, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(51, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+ 52, UNIPHIER_PIN_DRV_4_8,
+ 52, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(52, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+ 53, UNIPHIER_PIN_DRV_4_8,
+ 53, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(53, "USB0VBUS", 0,
+ 54, UNIPHIER_PIN_DRV_4_8,
+ 54, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(54, "USB0OD", 0,
+ 55, UNIPHIER_PIN_DRV_4_8,
+ 55, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(55, "USB1VBUS", 0,
+ 56, UNIPHIER_PIN_DRV_4_8,
+ 56, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(56, "USB1OD", 0,
+ 57, UNIPHIER_PIN_DRV_4_8,
+ 57, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(57, "PCRESET", 0,
+ 58, UNIPHIER_PIN_DRV_4_8,
+ 58, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(58, "PCREG", 0,
+ 59, UNIPHIER_PIN_DRV_4_8,
+ 59, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(59, "PCCE2", 0,
+ 60, UNIPHIER_PIN_DRV_4_8,
+ 60, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(60, "PCVS1", 0,
+ 61, UNIPHIER_PIN_DRV_4_8,
+ 61, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(61, "PCCD2", 0,
+ 62, UNIPHIER_PIN_DRV_4_8,
+ 62, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(62, "PCCD1", 0,
+ 63, UNIPHIER_PIN_DRV_4_8,
+ 63, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(63, "PCREADY", 0,
+ 64, UNIPHIER_PIN_DRV_4_8,
+ 64, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(64, "PCDOE", 0,
+ 65, UNIPHIER_PIN_DRV_4_8,
+ 65, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(65, "PCCE1", 0,
+ 66, UNIPHIER_PIN_DRV_4_8,
+ 66, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(66, "PCWE", 0,
+ 67, UNIPHIER_PIN_DRV_4_8,
+ 67, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(67, "PCOE", 0,
+ 68, UNIPHIER_PIN_DRV_4_8,
+ 68, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(68, "PCWAIT", 0,
+ 69, UNIPHIER_PIN_DRV_4_8,
+ 69, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(69, "PCIOWR", 0,
+ 70, UNIPHIER_PIN_DRV_4_8,
+ 70, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(70, "PCIORD", 0,
+ 71, UNIPHIER_PIN_DRV_4_8,
+ 71, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(71, "HS0DIN0", 0,
+ 72, UNIPHIER_PIN_DRV_4_8,
+ 72, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(72, "HS0DIN1", 0,
+ 73, UNIPHIER_PIN_DRV_4_8,
+ 73, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(73, "HS0DIN2", 0,
+ 74, UNIPHIER_PIN_DRV_4_8,
+ 74, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(74, "HS0DIN3", 0,
+ 75, UNIPHIER_PIN_DRV_4_8,
+ 75, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(75, "HS0DIN4", 0,
+ 76, UNIPHIER_PIN_DRV_4_8,
+ 76, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(76, "HS0DIN5", 0,
+ 77, UNIPHIER_PIN_DRV_4_8,
+ 77, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(77, "HS0DIN6", 0,
+ 78, UNIPHIER_PIN_DRV_4_8,
+ 78, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(78, "HS0DIN7", 0,
+ 79, UNIPHIER_PIN_DRV_4_8,
+ 79, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(79, "HS0BCLKIN", 0,
+ 80, UNIPHIER_PIN_DRV_4_8,
+ 80, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(80, "HS0VALIN", 0,
+ 81, UNIPHIER_PIN_DRV_4_8,
+ 81, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(81, "HS0SYNCIN", 0,
+ 82, UNIPHIER_PIN_DRV_4_8,
+ 82, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(82, "HSDOUT0", 0,
+ 83, UNIPHIER_PIN_DRV_4_8,
+ 83, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(83, "HSDOUT1", 0,
+ 84, UNIPHIER_PIN_DRV_4_8,
+ 84, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(84, "HSDOUT2", 0,
+ 85, UNIPHIER_PIN_DRV_4_8,
+ 85, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(85, "HSDOUT3", 0,
+ 86, UNIPHIER_PIN_DRV_4_8,
+ 86, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(86, "HSDOUT4", 0,
+ 87, UNIPHIER_PIN_DRV_4_8,
+ 87, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(87, "HSDOUT5", 0,
+ 88, UNIPHIER_PIN_DRV_4_8,
+ 88, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(88, "HSDOUT6", 0,
+ 89, UNIPHIER_PIN_DRV_4_8,
+ 89, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(89, "HSDOUT7", 0,
+ 90, UNIPHIER_PIN_DRV_4_8,
+ 90, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(90, "HSBCLKOUT", 0,
+ 91, UNIPHIER_PIN_DRV_4_8,
+ 91, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(91, "HSVALOUT", 0,
+ 92, UNIPHIER_PIN_DRV_4_8,
+ 92, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(92, "HSSYNCOUT", 0,
+ 93, UNIPHIER_PIN_DRV_4_8,
+ 93, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(93, "AGCI", 3,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ 162, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(94, "AGCR", 4,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ 163, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(95, "AGCBS", 5,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ 164, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(96, "IECOUT", 0,
+ 94, UNIPHIER_PIN_DRV_4_8,
+ 94, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(97, "ASMCK", 0,
+ 95, UNIPHIER_PIN_DRV_4_8,
+ 95, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(98, "ABCKO", UNIPHIER_PIN_IECTRL_NONE,
+ 96, UNIPHIER_PIN_DRV_4_8,
+ 96, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(99, "ALRCKO", UNIPHIER_PIN_IECTRL_NONE,
+ 97, UNIPHIER_PIN_DRV_4_8,
+ 97, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(100, "ASDOUT0", UNIPHIER_PIN_IECTRL_NONE,
+ 98, UNIPHIER_PIN_DRV_4_8,
+ 98, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(101, "ARCOUT", 0,
+ 99, UNIPHIER_PIN_DRV_4_8,
+ 99, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(102, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(103, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(104, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(105, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(110, "SBO0", UNIPHIER_PIN_IECTRL_NONE,
+ 100, UNIPHIER_PIN_DRV_4_8,
+ 100, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(111, "SBI0", UNIPHIER_PIN_IECTRL_NONE,
+ 101, UNIPHIER_PIN_DRV_4_8,
+ 101, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(112, "HIN", 1,
+ -1, UNIPHIER_PIN_DRV_FIXED_5,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(113, "VIN", 2,
+ -1, UNIPHIER_PIN_DRV_FIXED_5,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(114, "TCON0", UNIPHIER_PIN_IECTRL_NONE,
+ 102, UNIPHIER_PIN_DRV_4_8,
+ 102, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(115, "TCON1", UNIPHIER_PIN_IECTRL_NONE,
+ 103, UNIPHIER_PIN_DRV_4_8,
+ 103, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(116, "TCON2", UNIPHIER_PIN_IECTRL_NONE,
+ 104, UNIPHIER_PIN_DRV_4_8,
+ 104, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(117, "TCON3", UNIPHIER_PIN_IECTRL_NONE,
+ 105, UNIPHIER_PIN_DRV_4_8,
+ 105, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(118, "TCON4", UNIPHIER_PIN_IECTRL_NONE,
+ 106, UNIPHIER_PIN_DRV_4_8,
+ 106, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(119, "TCON5", UNIPHIER_PIN_IECTRL_NONE,
+ 107, UNIPHIER_PIN_DRV_4_8,
+ 107, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(120, "TCON6", 0,
+ 108, UNIPHIER_PIN_DRV_4_8,
+ 108, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(121, "TCON7", 0,
+ 109, UNIPHIER_PIN_DRV_4_8,
+ 109, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(122, "PWMA", 0,
+ 110, UNIPHIER_PIN_DRV_4_8,
+ 110, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(123, "XIRQ1", 0,
+ 111, UNIPHIER_PIN_DRV_4_8,
+ 111, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(124, "XIRQ2", 0,
+ 112, UNIPHIER_PIN_DRV_4_8,
+ 112, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(125, "XIRQ3", 0,
+ 113, UNIPHIER_PIN_DRV_4_8,
+ 113, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(126, "XIRQ4", 0,
+ 114, UNIPHIER_PIN_DRV_4_8,
+ 114, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(127, "XIRQ5", 0,
+ 115, UNIPHIER_PIN_DRV_4_8,
+ 115, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(128, "XIRQ6", 0,
+ 116, UNIPHIER_PIN_DRV_4_8,
+ 116, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(129, "XIRQ7", 0,
+ 117, UNIPHIER_PIN_DRV_4_8,
+ 117, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(130, "XIRQ8", 0,
+ 118, UNIPHIER_PIN_DRV_4_8,
+ 118, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(131, "XIRQ9", 0,
+ 119, UNIPHIER_PIN_DRV_4_8,
+ 119, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(132, "XIRQ10", 0,
+ 120, UNIPHIER_PIN_DRV_4_8,
+ 120, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(133, "XIRQ11", 0,
+ 121, UNIPHIER_PIN_DRV_4_8,
+ 121, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(134, "XIRQ14", 0,
+ 122, UNIPHIER_PIN_DRV_4_8,
+ 122, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(135, "PORT00", 0,
+ 123, UNIPHIER_PIN_DRV_4_8,
+ 123, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(136, "PORT01", 0,
+ 124, UNIPHIER_PIN_DRV_4_8,
+ 124, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(137, "PORT02", 0,
+ 125, UNIPHIER_PIN_DRV_4_8,
+ 125, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(138, "PORT03", 0,
+ 126, UNIPHIER_PIN_DRV_4_8,
+ 126, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(139, "PORT04", 0,
+ 127, UNIPHIER_PIN_DRV_4_8,
+ 127, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(140, "PORT05", 0,
+ 128, UNIPHIER_PIN_DRV_4_8,
+ 128, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(141, "PORT06", 0,
+ 129, UNIPHIER_PIN_DRV_4_8,
+ 129, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(142, "PORT07", 0,
+ 130, UNIPHIER_PIN_DRV_4_8,
+ 130, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(143, "PORT10", 0,
+ 131, UNIPHIER_PIN_DRV_4_8,
+ 131, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(144, "PORT11", 0,
+ 132, UNIPHIER_PIN_DRV_4_8,
+ 132, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(145, "PORT12", 0,
+ 133, UNIPHIER_PIN_DRV_4_8,
+ 133, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(146, "PORT13", 0,
+ 134, UNIPHIER_PIN_DRV_4_8,
+ 134, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(147, "PORT14", 0,
+ 135, UNIPHIER_PIN_DRV_4_8,
+ 135, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(148, "PORT15", 0,
+ 136, UNIPHIER_PIN_DRV_4_8,
+ 136, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(149, "PORT16", 0,
+ 137, UNIPHIER_PIN_DRV_4_8,
+ 137, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(150, "PORT17", UNIPHIER_PIN_IECTRL_NONE,
+ 138, UNIPHIER_PIN_DRV_4_8,
+ 138, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(151, "PORT20", 0,
+ 139, UNIPHIER_PIN_DRV_4_8,
+ 139, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(152, "PORT21", 0,
+ 140, UNIPHIER_PIN_DRV_4_8,
+ 140, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(153, "PORT22", 0,
+ 141, UNIPHIER_PIN_DRV_4_8,
+ 141, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(154, "PORT23", 0,
+ 142, UNIPHIER_PIN_DRV_4_8,
+ 142, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(155, "PORT24", UNIPHIER_PIN_IECTRL_NONE,
+ 143, UNIPHIER_PIN_DRV_4_8,
+ 143, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(156, "PORT25", 0,
+ 144, UNIPHIER_PIN_DRV_4_8,
+ 144, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(157, "PORT26", 0,
+ 145, UNIPHIER_PIN_DRV_4_8,
+ 145, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(158, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+ 31, UNIPHIER_PIN_DRV_4_8,
+ 31, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(159, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+ 32, UNIPHIER_PIN_DRV_4_8,
+ 32, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(160, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+ 33, UNIPHIER_PIN_DRV_4_8,
+ 33, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(161, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+ 34, UNIPHIER_PIN_DRV_4_8,
+ 34, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(162, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
+ 35, UNIPHIER_PIN_DRV_4_8,
+ 35, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(163, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
+ 36, UNIPHIER_PIN_DRV_4_8,
+ 36, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(164, "NANDRYBY0", UNIPHIER_PIN_IECTRL_NONE,
+ 37, UNIPHIER_PIN_DRV_4_8,
+ 37, UNIPHIER_PIN_PULL_UP),
+};
+
+static const unsigned emmc_pins[] = {21, 22, 23, 24, 25, 26, 27};
+static const unsigned emmc_muxvals[] = {0, 1, 1, 1, 1, 1, 1};
+static const unsigned emmc_dat8_pins[] = {28, 29, 30, 31};
+static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const unsigned i2c0_pins[] = {102, 103};
+static const unsigned i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {104, 105};
+static const unsigned i2c1_muxvals[] = {0, 0};
+static const unsigned i2c2_pins[] = {108, 109};
+static const unsigned i2c2_muxvals[] = {2, 2};
+static const unsigned i2c3_pins[] = {108, 109};
+static const unsigned i2c3_muxvals[] = {3, 3};
+static const unsigned nand_pins[] = {24, 25, 26, 27, 28, 29, 30, 31, 158, 159,
+ 160, 161, 162, 163, 164};
+static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0};
+static const unsigned nand_cs1_pins[] = {22, 23};
+static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const unsigned uart0_pins[] = {85, 88};
+static const unsigned uart0_muxvals[] = {1, 1};
+static const unsigned uart1_pins[] = {155, 156};
+static const unsigned uart1_muxvals[] = {13, 13};
+static const unsigned uart1b_pins[] = {69, 70};
+static const unsigned uart1b_muxvals[] = {23, 23};
+static const unsigned uart2_pins[] = {128, 129};
+static const unsigned uart2_muxvals[] = {13, 13};
+static const unsigned uart3_pins[] = {110, 111};
+static const unsigned uart3_muxvals[] = {1, 1};
+static const unsigned usb0_pins[] = {53, 54};
+static const unsigned usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {55, 56};
+static const unsigned usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {155, 156};
+static const unsigned usb2_muxvals[] = {4, 4};
+static const unsigned usb2b_pins[] = {67, 68};
+static const unsigned usb2b_muxvals[] = {23, 23};
+static const unsigned port_range0_pins[] = {
+ 135, 136, 137, 138, 139, 140, 141, 142, /* PORT0x */
+ 143, 144, 145, 146, 147, 148, 149, 150, /* PORT1x */
+ 151, 152, 153, 154, 155, 156, 157, 0, /* PORT2x */
+ 1, 2, 3, 4, 5, 120, 121, 122, /* PORT3x */
+ 24, 25, 26, 27, 28, 29, 30, 31, /* PORT4x */
+ 40, 41, 42, 43, 44, 45, 46, 47, /* PORT5x */
+ 48, 49, 50, 51, 52, 53, 54, 55, /* PORT6x */
+ 56, 85, 84, 59, 82, 61, 64, 65, /* PORT7x */
+ 8, 9, 10, 11, 12, 13, 14, 15, /* PORT8x */
+ 66, 67, 68, 69, 70, 71, 72, 73, /* PORT9x */
+ 74, 75, 89, 86, 78, 79, 80, 81, /* PORT10x */
+ 60, 83, 58, 57, 88, 87, 77, 76, /* PORT11x */
+ 90, 91, 92, 93, 94, 95, 96, 97, /* PORT12x */
+ 98, 99, 100, 6, 101, 114, 115, 116, /* PORT13x */
+ 103, 108, 21, 22, 23, 117, 118, 119, /* PORT14x */
+};
+static const unsigned port_range0_muxvals[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, /* PORT0x */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* PORT1x */
+ 0, 0, 0, 0, 0, 0, 0, 15, /* PORT2x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT3x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT4x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT5x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT6x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT7x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT8x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT9x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT10x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT11x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT12x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT13x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */
+};
+static const unsigned port_range1_pins[] = {
+ 7, /* PORT166 */
+};
+static const unsigned port_range1_muxvals[] = {
+ 15, /* PORT166 */
+};
+static const unsigned xirq_range0_pins[] = {
+ 151, 123, 124, 125, 126, 127, 128, 129, /* XIRQ0-7 */
+ 130, 131, 132, 133, 62, /* XIRQ8-12 */
+};
+static const unsigned xirq_range0_muxvals[] = {
+ 14, 0, 0, 0, 0, 0, 0, 0, /* XIRQ0-7 */
+ 0, 0, 0, 0, 14, /* XIRQ8-12 */
+};
+static const unsigned xirq_range1_pins[] = {
+ 134, 63, /* XIRQ14-15 */
+};
+static const unsigned xirq_range1_muxvals[] = {
+ 0, 14, /* XIRQ14-15 */
+};
+
+static const struct uniphier_pinctrl_group ph1_ld4_groups[] = {
+ UNIPHIER_PINCTRL_GROUP(emmc),
+ UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+ UNIPHIER_PINCTRL_GROUP(i2c0),
+ UNIPHIER_PINCTRL_GROUP(i2c1),
+ UNIPHIER_PINCTRL_GROUP(i2c2),
+ UNIPHIER_PINCTRL_GROUP(i2c3),
+ UNIPHIER_PINCTRL_GROUP(nand),
+ UNIPHIER_PINCTRL_GROUP(nand_cs1),
+ UNIPHIER_PINCTRL_GROUP(uart0),
+ UNIPHIER_PINCTRL_GROUP(uart1),
+ UNIPHIER_PINCTRL_GROUP(uart1b),
+ UNIPHIER_PINCTRL_GROUP(uart2),
+ UNIPHIER_PINCTRL_GROUP(uart3),
+ UNIPHIER_PINCTRL_GROUP(usb0),
+ UNIPHIER_PINCTRL_GROUP(usb1),
+ UNIPHIER_PINCTRL_GROUP(usb2),
+ UNIPHIER_PINCTRL_GROUP(usb2b),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_range0),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_range1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port110, port_range0, 88),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port111, port_range0, 89),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port112, port_range0, 90),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port113, port_range0, 91),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port114, port_range0, 92),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port115, port_range0, 93),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port116, port_range0, 94),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port117, port_range0, 95),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range0, 96),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range0, 97),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range0, 98),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range0, 99),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range0, 100),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range0, 101),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range0, 102),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range0, 103),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range0, 104),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range0, 105),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range0, 106),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range0, 107),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range0, 108),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range0, 109),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range0, 110),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range0, 111),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range0, 112),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range0, 113),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range0, 114),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range0, 115),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range0, 116),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range0, 117),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range0, 118),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range0, 119),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port165, port_range1, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq_range0, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq_range0, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq_range0, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq_range0, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq_range0, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq_range0, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq_range0, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq_range0, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq_range0, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq_range0, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq_range0, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq_range0, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq_range0, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq_range1, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq_range1, 1),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0"};
+static const char * const uart1_groups[] = {"uart1", "uart1b"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2", "usb2b"};
+static const char * const port_groups[] = {
+ "port00", "port01", "port02", "port03",
+ "port04", "port05", "port06", "port07",
+ "port10", "port11", "port12", "port13",
+ "port14", "port15", "port16", "port17",
+ "port20", "port21", "port22", "port23",
+ "port24", "port25", "port26", "port27",
+ "port30", "port31", "port32", "port33",
+ "port34", "port35", "port36", "port37",
+ "port40", "port41", "port42", "port43",
+ "port44", "port45", "port46", "port47",
+ "port50", "port51", "port52", "port53",
+ "port54", "port55", "port56", "port57",
+ "port60", "port61", "port62", "port63",
+ "port64", "port65", "port66", "port67",
+ "port70", "port71", "port72", "port73",
+ "port74", "port75", "port76", "port77",
+ "port80", "port81", "port82", "port83",
+ "port84", "port85", "port86", "port87",
+ "port90", "port91", "port92", "port93",
+ "port94", "port95", "port96", "port97",
+ "port100", "port101", "port102", "port103",
+ "port104", "port105", "port106", "port107",
+ "port110", "port111", "port112", "port113",
+ "port114", "port115", "port116", "port117",
+ "port120", "port121", "port122", "port123",
+ "port124", "port125", "port126", "port127",
+ "port130", "port131", "port132", "port133",
+ "port134", "port135", "port136", "port137",
+ "port140", "port141", "port142", "port143",
+ "port144", "port145", "port146", "port147",
+ /* port150-164 missing */
+ /* none */ "port165",
+};
+static const char * const xirq_groups[] = {
+ "xirq0", "xirq1", "xirq2", "xirq3",
+ "xirq4", "xirq5", "xirq6", "xirq7",
+ "xirq8", "xirq9", "xirq10", "xirq11",
+ "xirq12", /* none*/ "xirq14", "xirq15",
+};
+
+static const struct uniphier_pinmux_function ph1_ld4_functions[] = {
+ UNIPHIER_PINMUX_FUNCTION(emmc),
+ UNIPHIER_PINMUX_FUNCTION(i2c0),
+ UNIPHIER_PINMUX_FUNCTION(i2c1),
+ UNIPHIER_PINMUX_FUNCTION(i2c2),
+ UNIPHIER_PINMUX_FUNCTION(i2c3),
+ UNIPHIER_PINMUX_FUNCTION(nand),
+ UNIPHIER_PINMUX_FUNCTION(uart0),
+ UNIPHIER_PINMUX_FUNCTION(uart1),
+ UNIPHIER_PINMUX_FUNCTION(uart2),
+ UNIPHIER_PINMUX_FUNCTION(uart3),
+ UNIPHIER_PINMUX_FUNCTION(usb0),
+ UNIPHIER_PINMUX_FUNCTION(usb1),
+ UNIPHIER_PINMUX_FUNCTION(usb2),
+ UNIPHIER_PINMUX_FUNCTION(port),
+ UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata ph1_ld4_pindata = {
+ .groups = ph1_ld4_groups,
+ .groups_count = ARRAY_SIZE(ph1_ld4_groups),
+ .functions = ph1_ld4_functions,
+ .functions_count = ARRAY_SIZE(ph1_ld4_functions),
+ .mux_bits = 8,
+ .reg_stride = 4,
+ .load_pinctrl = false,
+};
+
+static struct pinctrl_desc ph1_ld4_pinctrl_desc = {
+ .name = DRIVER_NAME,
+ .pins = ph1_ld4_pins,
+ .npins = ARRAY_SIZE(ph1_ld4_pins),
+ .owner = THIS_MODULE,
+};
+
+static int ph1_ld4_pinctrl_probe(struct platform_device *pdev)
+{
+ return uniphier_pinctrl_probe(pdev, &ph1_ld4_pinctrl_desc,
+ &ph1_ld4_pindata);
+}
+
+static const struct of_device_id ph1_ld4_pinctrl_match[] = {
+ { .compatible = "socionext,ph1-ld4-pinctrl" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ph1_ld4_pinctrl_match);
+
+static struct platform_driver ph1_ld4_pinctrl_driver = {
+ .probe = ph1_ld4_pinctrl_probe,
+ .remove = uniphier_pinctrl_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = ph1_ld4_pinctrl_match,
+ },
+};
+module_platform_driver(ph1_ld4_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-LD4 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c b/drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c
new file mode 100644
index 000000000000..9720e697fbc1
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c
@@ -0,0 +1,1274 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "ph1-ld6b-pinctrl"
+
+static const struct pinctrl_pin_desc ph1_ld6b_pins[] = {
+ UNIPHIER_PINCTRL_PIN(0, "ED0", UNIPHIER_PIN_IECTRL_NONE,
+ 0, UNIPHIER_PIN_DRV_4_8,
+ 0, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(1, "ED1", UNIPHIER_PIN_IECTRL_NONE,
+ 1, UNIPHIER_PIN_DRV_4_8,
+ 1, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(2, "ED2", UNIPHIER_PIN_IECTRL_NONE,
+ 2, UNIPHIER_PIN_DRV_4_8,
+ 2, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(3, "ED3", UNIPHIER_PIN_IECTRL_NONE,
+ 3, UNIPHIER_PIN_DRV_4_8,
+ 3, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(4, "ED4", UNIPHIER_PIN_IECTRL_NONE,
+ 4, UNIPHIER_PIN_DRV_4_8,
+ 4, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(5, "ED5", UNIPHIER_PIN_IECTRL_NONE,
+ 5, UNIPHIER_PIN_DRV_4_8,
+ 5, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(6, "ED6", UNIPHIER_PIN_IECTRL_NONE,
+ 6, UNIPHIER_PIN_DRV_4_8,
+ 6, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(7, "ED7", UNIPHIER_PIN_IECTRL_NONE,
+ 7, UNIPHIER_PIN_DRV_4_8,
+ 7, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(8, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
+ 8, UNIPHIER_PIN_DRV_4_8,
+ 8, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(9, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+ 9, UNIPHIER_PIN_DRV_4_8,
+ 9, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(10, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
+ 10, UNIPHIER_PIN_DRV_4_8,
+ 10, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(11, "ES0", UNIPHIER_PIN_IECTRL_NONE,
+ 11, UNIPHIER_PIN_DRV_4_8,
+ 11, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(12, "ES1", UNIPHIER_PIN_IECTRL_NONE,
+ 12, UNIPHIER_PIN_DRV_4_8,
+ 12, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(13, "ES2", UNIPHIER_PIN_IECTRL_NONE,
+ 13, UNIPHIER_PIN_DRV_4_8,
+ 13, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(14, "XECS1", UNIPHIER_PIN_IECTRL_NONE,
+ 14, UNIPHIER_PIN_DRV_4_8,
+ 14, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(15, "PCA00", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 15, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(16, "PCA01", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 16, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(17, "PCA02", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 17, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(18, "PCA03", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 18, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(19, "PCA04", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 19, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(20, "PCA05", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 20, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(21, "PCA06", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 21, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(22, "PCA07", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 22, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(23, "PCA08", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 23, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(24, "PCA09", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 24, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(25, "PCA10", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 25, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(26, "PCA11", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 26, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(27, "PCA12", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 27, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(28, "PCA13", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 28, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(29, "PCA14", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 29, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(30, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+ 30, UNIPHIER_PIN_DRV_4_8,
+ 30, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(31, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+ 31, UNIPHIER_PIN_DRV_4_8,
+ 31, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(32, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+ 32, UNIPHIER_PIN_DRV_4_8,
+ 32, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(33, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+ 33, UNIPHIER_PIN_DRV_4_8,
+ 33, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(34, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
+ 34, UNIPHIER_PIN_DRV_4_8,
+ 34, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(35, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
+ 35, UNIPHIER_PIN_DRV_4_8,
+ 35, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(36, "NFRYBY0", UNIPHIER_PIN_IECTRL_NONE,
+ 36, UNIPHIER_PIN_DRV_4_8,
+ 36, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(37, "XNFCE1", UNIPHIER_PIN_IECTRL_NONE,
+ 37, UNIPHIER_PIN_DRV_4_8,
+ 37, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(38, "NFRYBY1", UNIPHIER_PIN_IECTRL_NONE,
+ 38, UNIPHIER_PIN_DRV_4_8,
+ 38, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(39, "NFD0", UNIPHIER_PIN_IECTRL_NONE,
+ 39, UNIPHIER_PIN_DRV_4_8,
+ 39, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(40, "NFD1", UNIPHIER_PIN_IECTRL_NONE,
+ 40, UNIPHIER_PIN_DRV_4_8,
+ 40, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(41, "NFD2", UNIPHIER_PIN_IECTRL_NONE,
+ 41, UNIPHIER_PIN_DRV_4_8,
+ 41, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(42, "NFD3", UNIPHIER_PIN_IECTRL_NONE,
+ 42, UNIPHIER_PIN_DRV_4_8,
+ 42, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(43, "NFD4", UNIPHIER_PIN_IECTRL_NONE,
+ 43, UNIPHIER_PIN_DRV_4_8,
+ 43, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(44, "NFD5", UNIPHIER_PIN_IECTRL_NONE,
+ 44, UNIPHIER_PIN_DRV_4_8,
+ 44, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(45, "NFD6", UNIPHIER_PIN_IECTRL_NONE,
+ 45, UNIPHIER_PIN_DRV_4_8,
+ 45, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(46, "NFD7", UNIPHIER_PIN_IECTRL_NONE,
+ 46, UNIPHIER_PIN_DRV_4_8,
+ 46, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(47, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+ 0, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_UP_FIXED),
+ UNIPHIER_PINCTRL_PIN(48, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+ 4, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_UP_FIXED),
+ UNIPHIER_PINCTRL_PIN(49, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+ 8, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_UP_FIXED),
+ UNIPHIER_PINCTRL_PIN(50, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+ 12, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_UP_FIXED),
+ UNIPHIER_PINCTRL_PIN(51, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+ 16, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_UP_FIXED),
+ UNIPHIER_PINCTRL_PIN(52, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+ 20, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_UP_FIXED),
+ UNIPHIER_PINCTRL_PIN(53, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 53, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(54, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 54, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(55, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 55, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(56, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 56, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(57, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 57, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(58, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 58, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(59, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 59, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(60, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 60, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(61, "USB2OD", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 61, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(62, "USB3VBUS", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 62, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(63, "USB3OD", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 63, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(64, "HS0BCLKOUT", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 64, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(65, "HS0SYNCOUT", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 65, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(66, "HS0VALOUT", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 66, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(67, "HS0DOUT0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 67, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(68, "HS0DOUT1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 68, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(69, "HS0DOUT2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 69, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(70, "HS0DOUT3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 70, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(71, "HS0DOUT4", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 71, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(72, "HS0DOUT5", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 72, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(73, "HS0DOUT6", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 73, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(74, "HS0DOUT7", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 74, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(75, "HS1BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 75, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(76, "HS1SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 76, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(77, "HS1VALIN", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 77, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(78, "HS1DIN0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 78, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(79, "HS1DIN1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 79, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(80, "HS1DIN2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 80, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(81, "HS1DIN3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 81, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(82, "HS1DIN4", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 82, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(83, "HS1DIN5", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 83, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(84, "HS1DIN6", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 84, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(85, "HS1DIN7", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 85, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(86, "HS2BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 86, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(87, "HS2SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 87, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(88, "HS2VALIN", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 88, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(89, "HS2DIN0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 89, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(90, "HS2DIN1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 90, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(91, "HS2DIN2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 91, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(92, "HS2DIN3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 92, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(93, "HS2DIN4", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 93, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(94, "HS2DIN5", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 94, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(95, "HS2DIN6", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 95, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(96, "HS2DIN7", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 96, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(97, "AO1IEC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 97, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(98, "AO1DACCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 98, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(99, "AO1BCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 99, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(100, "AO1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 100, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(101, "AO1D0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 101, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(102, "AO1D1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 102, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(103, "AO1D2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 103, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(104, "AO1D3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 104, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(105, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 105, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(106, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 106, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(107, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 107, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(108, "AO2D0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 108, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(109, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 109, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(110, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 110, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(111, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 111, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(112, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 112, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(113, "SBO0", 0,
+ 113, UNIPHIER_PIN_DRV_4_8,
+ 113, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(114, "SBI0", 0,
+ 114, UNIPHIER_PIN_DRV_4_8,
+ 114, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(115, "TXD1", 0,
+ 115, UNIPHIER_PIN_DRV_4_8,
+ 115, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(116, "RXD1", 0,
+ 116, UNIPHIER_PIN_DRV_4_8,
+ 116, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(117, "PWSRA", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 117, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(118, "XIRQ0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 118, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(119, "XIRQ1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 119, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(120, "XIRQ2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 120, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(121, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 121, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(122, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 122, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(123, "XIRQ5", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 123, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(124, "XIRQ6", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 124, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(125, "XIRQ7", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 125, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(126, "XIRQ8", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 126, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(127, "PORT00", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 127, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(128, "PORT01", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 128, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(129, "PORT02", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 129, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(130, "PORT03", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 130, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(131, "PORT04", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 131, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(132, "PORT05", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 132, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(133, "PORT06", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 133, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(134, "PORT07", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 134, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(135, "PORT10", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 135, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(136, "PORT11", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 136, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(137, "PORT12", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 137, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(138, "PORT13", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 138, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(139, "PORT14", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 139, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(140, "PORT15", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 140, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(141, "PORT16", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 141, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(142, "LPST", UNIPHIER_PIN_IECTRL_NONE,
+ 142, UNIPHIER_PIN_DRV_4_8,
+ 142, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(143, "MDC", 0,
+ 143, UNIPHIER_PIN_DRV_4_8,
+ 143, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(144, "MDIO", 0,
+ 144, UNIPHIER_PIN_DRV_4_8,
+ 144, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(145, "MDIO_INTL", 0,
+ 145, UNIPHIER_PIN_DRV_4_8,
+ 145, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(146, "PHYRSTL", 0,
+ 146, UNIPHIER_PIN_DRV_4_8,
+ 146, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(147, "RGMII_RXCLK", 0,
+ 147, UNIPHIER_PIN_DRV_4_8,
+ 147, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(148, "RGMII_RXD0", 0,
+ 148, UNIPHIER_PIN_DRV_4_8,
+ 148, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(149, "RGMII_RXD1", 0,
+ 149, UNIPHIER_PIN_DRV_4_8,
+ 149, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(150, "RGMII_RXD2", 0,
+ 150, UNIPHIER_PIN_DRV_4_8,
+ 150, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(151, "RGMII_RXD3", 0,
+ 151, UNIPHIER_PIN_DRV_4_8,
+ 151, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(152, "RGMII_RXCTL", 0,
+ 152, UNIPHIER_PIN_DRV_4_8,
+ 152, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(153, "RGMII_TXCLK", 0,
+ 153, UNIPHIER_PIN_DRV_4_8,
+ 153, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(154, "RGMII_TXD0", 0,
+ 154, UNIPHIER_PIN_DRV_4_8,
+ 154, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(155, "RGMII_TXD1", 0,
+ 155, UNIPHIER_PIN_DRV_4_8,
+ 155, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(156, "RGMII_TXD2", 0,
+ 156, UNIPHIER_PIN_DRV_4_8,
+ 156, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(157, "RGMII_TXD3", 0,
+ 157, UNIPHIER_PIN_DRV_4_8,
+ 157, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(158, "RGMII_TXCTL", 0,
+ 158, UNIPHIER_PIN_DRV_4_8,
+ 158, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(159, "A_D_PCD00OUT", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 159, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(160, "A_D_PCD01OUT", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 160, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(161, "A_D_PCD02OUT", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 161, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(162, "A_D_PCD03OUT", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 162, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(163, "A_D_PCD04OUT", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 163, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(164, "A_D_PCD05OUT", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 164, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(165, "A_D_PCD06OUT", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 165, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(166, "A_D_PCD07OUT", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 166, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(167, "A_D_PCD00IN", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 167, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(168, "A_D_PCD01IN", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 168, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(169, "A_D_PCD02IN", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 169, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(170, "A_D_PCD03IN", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 170, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(171, "A_D_PCD04IN", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 171, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(172, "A_D_PCD05IN", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 172, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(173, "A_D_PCD06IN", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 173, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(174, "A_D_PCD07IN", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 174, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(175, "A_D_PCDNOE", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 175, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(176, "A_D_PC0READY", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 176, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(177, "A_D_PC0CD1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 177, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(178, "A_D_PC0CD2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 178, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(179, "A_D_PC0WAIT", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 179, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(180, "A_D_PC0RESET", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 180, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(181, "A_D_PC0CE1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 181, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(182, "A_D_PC0WE", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 182, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(183, "A_D_PC0OE", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 183, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(184, "A_D_PC0IOWR", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 184, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(185, "A_D_PC0IORD", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 185, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(186, "A_D_PC0NOE", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 186, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(187, "A_D_HS0BCLKIN", 0,
+ 187, UNIPHIER_PIN_DRV_4_8,
+ 187, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(188, "A_D_HS0SYNCIN", 0,
+ 188, UNIPHIER_PIN_DRV_4_8,
+ 188, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(189, "A_D_HS0VALIN", 0,
+ 189, UNIPHIER_PIN_DRV_4_8,
+ 189, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(190, "A_D_HS0DIN0", 0,
+ 190, UNIPHIER_PIN_DRV_4_8,
+ 190, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(191, "A_D_HS0DIN1", 0,
+ 191, UNIPHIER_PIN_DRV_4_8,
+ 191, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(192, "A_D_HS0DIN2", 0,
+ 192, UNIPHIER_PIN_DRV_4_8,
+ 192, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(193, "A_D_HS0DIN3", 0,
+ 193, UNIPHIER_PIN_DRV_4_8,
+ 193, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(194, "A_D_HS0DIN4", 0,
+ 194, UNIPHIER_PIN_DRV_4_8,
+ 194, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(195, "A_D_HS0DIN5", 0,
+ 195, UNIPHIER_PIN_DRV_4_8,
+ 195, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(196, "A_D_HS0DIN6", 0,
+ 196, UNIPHIER_PIN_DRV_4_8,
+ 196, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(197, "A_D_HS0DIN7", 0,
+ 197, UNIPHIER_PIN_DRV_4_8,
+ 197, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(198, "A_D_AO1ARC", 0,
+ 198, UNIPHIER_PIN_DRV_4_8,
+ 198, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(199, "A_D_SPIXRST", UNIPHIER_PIN_IECTRL_NONE,
+ 199, UNIPHIER_PIN_DRV_4_8,
+ 199, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(200, "A_D_SPISCLK0", UNIPHIER_PIN_IECTRL_NONE,
+ 200, UNIPHIER_PIN_DRV_4_8,
+ 200, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(201, "A_D_SPITXD0", UNIPHIER_PIN_IECTRL_NONE,
+ 201, UNIPHIER_PIN_DRV_4_8,
+ 201, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(202, "A_D_SPIRXD0", UNIPHIER_PIN_IECTRL_NONE,
+ 202, UNIPHIER_PIN_DRV_4_8,
+ 202, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(203, "A_D_DMDCLK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 203, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(204, "A_D_DMDPSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 204, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(205, "A_D_DMDVAL", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 205, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(206, "A_D_DMDDATA", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 206, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(207, "A_D_HDMIRXXIRQ", 0,
+ 207, UNIPHIER_PIN_DRV_4_8,
+ 207, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(208, "A_D_VBIXIRQ", 0,
+ 208, UNIPHIER_PIN_DRV_4_8,
+ 208, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(209, "A_D_HDMITXXIRQ", 0,
+ 209, UNIPHIER_PIN_DRV_4_8,
+ 209, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(210, "A_D_DMDIRQ", UNIPHIER_PIN_IECTRL_NONE,
+ 210, UNIPHIER_PIN_DRV_4_8,
+ 210, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(211, "A_D_SPICIRQ", UNIPHIER_PIN_IECTRL_NONE,
+ 211, UNIPHIER_PIN_DRV_4_8,
+ 211, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(212, "A_D_SPIBIRQ", UNIPHIER_PIN_IECTRL_NONE,
+ 212, UNIPHIER_PIN_DRV_4_8,
+ 212, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(213, "A_D_BESDAOUT", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ 213, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(214, "A_D_BESDAIN", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ 214, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(215, "A_D_BESCLOUT", UNIPHIER_PIN_IECTRL_NONE,
+ 215, UNIPHIER_PIN_DRV_4_8,
+ 215, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(216, "A_D_VDACCLKOUT", 0,
+ 216, UNIPHIER_PIN_DRV_4_8,
+ 216, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(217, "A_D_VDACDOUT5", 0,
+ 217, UNIPHIER_PIN_DRV_4_8,
+ 217, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(218, "A_D_VDACDOUT6", 0,
+ 218, UNIPHIER_PIN_DRV_4_8,
+ 218, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(219, "A_D_VDACDOUT7", 0,
+ 219, UNIPHIER_PIN_DRV_4_8,
+ 219, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(220, "A_D_VDACDOUT8", 0,
+ 220, UNIPHIER_PIN_DRV_4_8,
+ 220, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(221, "A_D_VDACDOUT9", 0,
+ 221, UNIPHIER_PIN_DRV_4_8,
+ 221, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(222, "A_D_SIFBCKIN", 0,
+ 222, UNIPHIER_PIN_DRV_4_8,
+ 222, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(223, "A_D_SIFLRCKIN", 0,
+ 223, UNIPHIER_PIN_DRV_4_8,
+ 223, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(224, "A_D_SIFDIN", 0,
+ 224, UNIPHIER_PIN_DRV_4_8,
+ 224, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(225, "A_D_LIBCKOUT", 0,
+ 225, UNIPHIER_PIN_DRV_4_8,
+ 225, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(226, "A_D_LILRCKOUT", 0,
+ 226, UNIPHIER_PIN_DRV_4_8,
+ 226, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(227, "A_D_LIDIN", 0,
+ 227, UNIPHIER_PIN_DRV_4_8,
+ 227, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(228, "A_D_LODOUT", 0,
+ 228, UNIPHIER_PIN_DRV_4_8,
+ 228, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(229, "A_D_HPDOUT", 0,
+ 229, UNIPHIER_PIN_DRV_4_8,
+ 229, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(230, "A_D_MCLK", 0,
+ 230, UNIPHIER_PIN_DRV_4_8,
+ 230, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(231, "A_D_A2PLLREFOUT", 0,
+ 231, UNIPHIER_PIN_DRV_4_8,
+ 231, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(232, "A_D_HDMI3DSDAOUT", 0,
+ 232, UNIPHIER_PIN_DRV_4_8,
+ 232, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(233, "A_D_HDMI3DSDAIN", 0,
+ 233, UNIPHIER_PIN_DRV_4_8,
+ 233, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(234, "A_D_HDMI3DSCLIN", 0,
+ 234, UNIPHIER_PIN_DRV_4_8,
+ 234, UNIPHIER_PIN_PULL_DOWN),
+};
+
+static const unsigned adinter_pins[] = {
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
+ 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200,
+ 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+ 229, 230, 231, 232, 233, 234,
+};
+static const unsigned adinter_muxvals[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0,
+};
+static const unsigned emmc_pins[] = {36, 37, 38, 39, 40, 41, 42};
+static const unsigned emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1};
+static const unsigned emmc_dat8_pins[] = {43, 44, 45, 46};
+static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const unsigned i2c0_pins[] = {109, 110};
+static const unsigned i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {111, 112};
+static const unsigned i2c1_muxvals[] = {0, 0};
+static const unsigned i2c2_pins[] = {115, 116};
+static const unsigned i2c2_muxvals[] = {1, 1};
+static const unsigned i2c3_pins[] = {118, 119};
+static const unsigned i2c3_muxvals[] = {1, 1};
+static const unsigned nand_pins[] = {30, 31, 32, 33, 34, 35, 36, 39, 40, 41,
+ 42, 43, 44, 45, 46};
+static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0};
+static const unsigned nand_cs1_pins[] = {37, 38};
+static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const unsigned uart0_pins[] = {135, 136};
+static const unsigned uart0_muxvals[] = {3, 3};
+static const unsigned uart0b_pins[] = {11, 12};
+static const unsigned uart0b_muxvals[] = {2, 2};
+static const unsigned uart1_pins[] = {115, 116};
+static const unsigned uart1_muxvals[] = {0, 0};
+static const unsigned uart1b_pins[] = {113, 114};
+static const unsigned uart1b_muxvals[] = {1, 1};
+static const unsigned uart2_pins[] = {113, 114};
+static const unsigned uart2_muxvals[] = {2, 2};
+static const unsigned uart2b_pins[] = {86, 87};
+static const unsigned uart2b_muxvals[] = {1, 1};
+static const unsigned usb0_pins[] = {56, 57};
+static const unsigned usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {58, 59};
+static const unsigned usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {60, 61};
+static const unsigned usb2_muxvals[] = {0, 0};
+static const unsigned usb3_pins[] = {62, 63};
+static const unsigned usb3_muxvals[] = {0, 0};
+static const unsigned port_range0_pins[] = {
+ 127, 128, 129, 130, 131, 132, 133, 134, /* PORT0x */
+ 135, 136, 137, 138, 139, 140, 141, 142, /* PORT1x */
+ 0, 1, 2, 3, 4, 5, 6, 7, /* PORT2x */
+ 8, 9, 10, 11, 12, 13, 14, 15, /* PORT3x */
+ 16, 17, 18, 19, 21, 22, 23, 24, /* PORT4x */
+ 25, 30, 31, 32, 33, 34, 35, 36, /* PORT5x */
+ 37, 38, 39, 40, 41, 42, 43, 44, /* PORT6x */
+ 45, 46, 47, 48, 49, 50, 51, 52, /* PORT7x */
+ 53, 54, 55, 56, 57, 58, 59, 60, /* PORT8x */
+ 61, 62, 63, 64, 65, 66, 67, 68, /* PORT9x */
+ 69, 70, 71, 76, 77, 78, 79, 80, /* PORT10x */
+};
+static const unsigned port_range0_muxvals[] = {
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT3x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT4x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT5x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT6x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT7x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT8x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT9x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT10x */
+};
+static const unsigned port_range1_pins[] = {
+ 81, 82, 83, 84, 85, 86, 87, 88, /* PORT12x */
+ 89, 90, 95, 96, 97, 98, 99, 100, /* PORT13x */
+ 101, 102, 103, 104, 105, 106, 107, 108, /* PORT14x */
+ 118, 119, 120, 121, 122, 123, 124, 125, /* PORT15x */
+ 126, 72, 73, 92, 177, 93, 94, 176, /* PORT16x */
+ 74, 91, 27, 28, 29, 75, 20, 26, /* PORT17x */
+ 109, 110, 111, 112, 113, 114, 115, 116, /* PORT18x */
+ 117, 143, 144, 145, 146, 147, 148, 149, /* PORT19x */
+ 150, 151, 152, 153, 154, 155, 156, 157, /* PORT20x */
+ 158, 159, 160, 161, 162, 163, 164, 165, /* PORT21x */
+ 166, 178, 179, 180, 181, 182, 183, 184, /* PORT22x */
+ 185, 187, 188, 189, 190, 191, 192, 193, /* PORT23x */
+ 194, 195, 196, 197, 198, 199, 200, 201, /* PORT24x */
+ 202, 203, 204, 205, 206, 207, 208, 209, /* PORT25x */
+ 210, 211, 212, 213, 214, 215, 216, 217, /* PORT26x */
+ 218, 219, 220, 221, 223, 224, 225, 226, /* PORT27x */
+ 227, 228, 229, 230, 231, 232, 233, 234, /* PORT28x */
+};
+static const unsigned port_range1_muxvals[] = {
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT12x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT13x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT15x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT16x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT17x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT18x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT19x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT20x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT21x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT22x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT23x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT24x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT25x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT26x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT27x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT28x */
+};
+static const unsigned xirq_pins[] = {
+ 118, 119, 120, 121, 122, 123, 124, 125, /* XIRQ0-7 */
+ 126, 72, 73, 92, 177, 93, 94, 176, /* XIRQ8-15 */
+ 74, 91, 27, 28, 29, 75, 20, 26, /* XIRQ16-23 */
+};
+static const unsigned xirq_muxvals[] = {
+ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ0-7 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ8-15 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ16-23 */
+};
+
+static const struct uniphier_pinctrl_group ph1_ld6b_groups[] = {
+ UNIPHIER_PINCTRL_GROUP(adinter),
+ UNIPHIER_PINCTRL_GROUP(emmc),
+ UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+ UNIPHIER_PINCTRL_GROUP(i2c0),
+ UNIPHIER_PINCTRL_GROUP(i2c1),
+ UNIPHIER_PINCTRL_GROUP(i2c2),
+ UNIPHIER_PINCTRL_GROUP(i2c3),
+ UNIPHIER_PINCTRL_GROUP(nand),
+ UNIPHIER_PINCTRL_GROUP(nand_cs1),
+ UNIPHIER_PINCTRL_GROUP(uart0),
+ UNIPHIER_PINCTRL_GROUP(uart0b),
+ UNIPHIER_PINCTRL_GROUP(uart1),
+ UNIPHIER_PINCTRL_GROUP(uart1b),
+ UNIPHIER_PINCTRL_GROUP(uart2),
+ UNIPHIER_PINCTRL_GROUP(uart2b),
+ UNIPHIER_PINCTRL_GROUP(usb0),
+ UNIPHIER_PINCTRL_GROUP(usb1),
+ UNIPHIER_PINCTRL_GROUP(usb2),
+ UNIPHIER_PINCTRL_GROUP(usb3),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range1, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range1, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range1, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range1, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range1, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range1, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range1, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range1, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range1, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range1, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range1, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range1, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range1, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range1, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range1, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range1, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range1, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range1, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range1, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range1, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range1, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range1, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range1, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range1, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port150, port_range1, 24),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port151, port_range1, 25),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port152, port_range1, 26),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port153, port_range1, 27),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port154, port_range1, 28),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port155, port_range1, 29),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port156, port_range1, 30),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port157, port_range1, 31),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port160, port_range1, 32),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port161, port_range1, 33),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port162, port_range1, 34),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port163, port_range1, 35),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port164, port_range1, 36),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port165, port_range1, 37),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port166, port_range1, 38),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port167, port_range1, 39),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port170, port_range1, 40),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port171, port_range1, 41),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port172, port_range1, 42),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port173, port_range1, 43),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port174, port_range1, 44),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port175, port_range1, 45),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port176, port_range1, 46),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port177, port_range1, 47),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range1, 48),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range1, 49),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range1, 50),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range1, 51),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range1, 52),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range1, 53),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range1, 54),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range1, 55),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port190, port_range1, 56),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port191, port_range1, 57),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port192, port_range1, 58),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port193, port_range1, 59),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port194, port_range1, 60),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port195, port_range1, 61),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port196, port_range1, 62),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port197, port_range1, 63),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range1, 64),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range1, 65),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range1, 66),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range1, 67),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range1, 68),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range1, 69),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range1, 70),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range1, 71),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range1, 72),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range1, 73),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range1, 74),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range1, 75),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range1, 76),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range1, 77),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range1, 78),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range1, 79),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range1, 80),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range1, 81),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range1, 82),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range1, 83),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range1, 84),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range1, 85),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range1, 86),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range1, 87),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range1, 88),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range1, 89),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range1, 90),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range1, 91),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range1, 92),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range1, 93),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range1, 94),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range1, 95),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range1, 96),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range1, 97),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range1, 98),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range1, 99),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range1, 100),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range1, 101),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range1, 102),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range1, 103),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range1, 104),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range1, 105),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range1, 106),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range1, 107),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range1, 108),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port255, port_range1, 109),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port256, port_range1, 110),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port257, port_range1, 111),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port260, port_range1, 112),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port261, port_range1, 113),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port262, port_range1, 114),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port263, port_range1, 115),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port264, port_range1, 116),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port265, port_range1, 117),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port266, port_range1, 118),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port267, port_range1, 119),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port270, port_range1, 120),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port271, port_range1, 121),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port272, port_range1, 122),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port273, port_range1, 123),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port274, port_range1, 124),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port275, port_range1, 125),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port276, port_range1, 126),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port277, port_range1, 127),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port280, port_range1, 128),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port281, port_range1, 129),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port282, port_range1, 130),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port283, port_range1, 131),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port284, port_range1, 132),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port285, port_range1, 133),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port286, port_range1, 134),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port287, port_range1, 135),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21, xirq, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22, xirq, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq23, xirq, 23),
+};
+
+static const char * const adinter_groups[] = {"adinter"};
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0", "uart0b"};
+static const char * const uart1_groups[] = {"uart1", "uart1b"};
+static const char * const uart2_groups[] = {"uart2", "uart2b"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const usb3_groups[] = {"usb3"};
+static const char * const port_groups[] = {
+ "port00", "port01", "port02", "port03",
+ "port04", "port05", "port06", "port07",
+ "port10", "port11", "port12", "port13",
+ "port14", "port15", "port16", "port17",
+ "port20", "port21", "port22", "port23",
+ "port24", "port25", "port26", "port27",
+ "port30", "port31", "port32", "port33",
+ "port34", "port35", "port36", "port37",
+ "port40", "port41", "port42", "port43",
+ "port44", "port45", "port46", "port47",
+ "port50", "port51", "port52", "port53",
+ "port54", "port55", "port56", "port57",
+ "port60", "port61", "port62", "port63",
+ "port64", "port65", "port66", "port67",
+ "port70", "port71", "port72", "port73",
+ "port74", "port75", "port76", "port77",
+ "port80", "port81", "port82", "port83",
+ "port84", "port85", "port86", "port87",
+ "port90", "port91", "port92", "port93",
+ "port94", "port95", "port96", "port97",
+ "port100", "port101", "port102", "port103",
+ "port104", "port105", "port106", "port107",
+ /* port110-117 missing */
+ "port120", "port121", "port122", "port123",
+ "port124", "port125", "port126", "port127",
+ "port130", "port131", "port132", "port133",
+ "port134", "port135", "port136", "port137",
+ "port140", "port141", "port142", "port143",
+ "port144", "port145", "port146", "port147",
+ "port150", "port151", "port152", "port153",
+ "port154", "port155", "port156", "port157",
+ "port160", "port161", "port162", "port163",
+ "port164", "port165", "port166", "port167",
+ "port170", "port171", "port172", "port173",
+ "port174", "port175", "port176", "port177",
+ "port180", "port181", "port182", "port183",
+ "port184", "port185", "port186", "port187",
+ "port190", "port191", "port192", "port193",
+ "port194", "port195", "port196", "port197",
+ "port200", "port201", "port202", "port203",
+ "port204", "port205", "port206", "port207",
+ "port210", "port211", "port212", "port213",
+ "port214", "port215", "port216", "port217",
+ "port220", "port221", "port222", "port223",
+ "port224", "port225", "port226", "port227",
+ "port230", "port231", "port232", "port233",
+ "port234", "port235", "port236", "port237",
+ "port240", "port241", "port242", "port243",
+ "port244", "port245", "port246", "port247",
+ "port250", "port251", "port252", "port253",
+ "port254", "port255", "port256", "port257",
+ "port260", "port261", "port262", "port263",
+ "port264", "port265", "port266", "port267",
+ "port270", "port271", "port272", "port273",
+ "port274", "port275", "port276", "port277",
+ "port280", "port281", "port282", "port283",
+ "port284", "port285", "port286", "port287",
+};
+static const char * const xirq_groups[] = {
+ "xirq0", "xirq1", "xirq2", "xirq3",
+ "xirq4", "xirq5", "xirq6", "xirq7",
+ "xirq8", "xirq9", "xirq10", "xirq11",
+ "xirq12", "xirq13", "xirq14", "xirq15",
+ "xirq16", "xirq17", "xirq18", "xirq19",
+ "xirq20", "xirq21", "xirq22", "xirq23",
+};
+
+static const struct uniphier_pinmux_function ph1_ld6b_functions[] = {
+ UNIPHIER_PINMUX_FUNCTION(adinter), /* Achip-Dchip interconnect */
+ UNIPHIER_PINMUX_FUNCTION(emmc),
+ UNIPHIER_PINMUX_FUNCTION(i2c0),
+ UNIPHIER_PINMUX_FUNCTION(i2c1),
+ UNIPHIER_PINMUX_FUNCTION(i2c2),
+ UNIPHIER_PINMUX_FUNCTION(i2c3),
+ UNIPHIER_PINMUX_FUNCTION(nand),
+ UNIPHIER_PINMUX_FUNCTION(uart0),
+ UNIPHIER_PINMUX_FUNCTION(uart1),
+ UNIPHIER_PINMUX_FUNCTION(uart2),
+ UNIPHIER_PINMUX_FUNCTION(usb0),
+ UNIPHIER_PINMUX_FUNCTION(usb1),
+ UNIPHIER_PINMUX_FUNCTION(usb2),
+ UNIPHIER_PINMUX_FUNCTION(usb3),
+ UNIPHIER_PINMUX_FUNCTION(port),
+ UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata ph1_ld6b_pindata = {
+ .groups = ph1_ld6b_groups,
+ .groups_count = ARRAY_SIZE(ph1_ld6b_groups),
+ .functions = ph1_ld6b_functions,
+ .functions_count = ARRAY_SIZE(ph1_ld6b_functions),
+ .mux_bits = 8,
+ .reg_stride = 4,
+ .load_pinctrl = false,
+};
+
+static struct pinctrl_desc ph1_ld6b_pinctrl_desc = {
+ .name = DRIVER_NAME,
+ .pins = ph1_ld6b_pins,
+ .npins = ARRAY_SIZE(ph1_ld6b_pins),
+ .owner = THIS_MODULE,
+};
+
+static int ph1_ld6b_pinctrl_probe(struct platform_device *pdev)
+{
+ return uniphier_pinctrl_probe(pdev, &ph1_ld6b_pinctrl_desc,
+ &ph1_ld6b_pindata);
+}
+
+static const struct of_device_id ph1_ld6b_pinctrl_match[] = {
+ { .compatible = "socionext,ph1-ld6b-pinctrl" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ph1_ld6b_pinctrl_match);
+
+static struct platform_driver ph1_ld6b_pinctrl_driver = {
+ .probe = ph1_ld6b_pinctrl_probe,
+ .remove = uniphier_pinctrl_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = ph1_ld6b_pinctrl_match,
+ },
+};
+module_platform_driver(ph1_ld6b_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-LD6b pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c b/drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c
new file mode 100644
index 000000000000..96921e40da5f
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c
@@ -0,0 +1,1554 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program5 is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "ph1-pro4-pinctrl"
+
+static const struct pinctrl_pin_desc ph1_pro4_pins[] = {
+ UNIPHIER_PINCTRL_PIN(0, "CK24O", UNIPHIER_PIN_IECTRL_NONE,
+ 0, UNIPHIER_PIN_DRV_4_8,
+ 0, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(1, "VC27A", UNIPHIER_PIN_IECTRL_NONE,
+ 1, UNIPHIER_PIN_DRV_4_8,
+ 1, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(2, "CK27AI", UNIPHIER_PIN_IECTRL_NONE,
+ 2, UNIPHIER_PIN_DRV_4_8,
+ 2, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(3, "CK27AO", UNIPHIER_PIN_IECTRL_NONE,
+ 3, UNIPHIER_PIN_DRV_4_8,
+ 3, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(4, "CKSEL", UNIPHIER_PIN_IECTRL_NONE,
+ 4, UNIPHIER_PIN_DRV_4_8,
+ 4, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(5, "CK27AV", UNIPHIER_PIN_IECTRL_NONE,
+ 5, UNIPHIER_PIN_DRV_4_8,
+ 5, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(6, "AEXCKA", UNIPHIER_PIN_IECTRL_NONE,
+ 6, UNIPHIER_PIN_DRV_4_8,
+ 6, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(7, "ASEL", UNIPHIER_PIN_IECTRL_NONE,
+ 7, UNIPHIER_PIN_DRV_4_8,
+ 7, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(8, "ARCRESET", UNIPHIER_PIN_IECTRL_NONE,
+ 8, UNIPHIER_PIN_DRV_4_8,
+ 8, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(9, "ARCUNLOCK", UNIPHIER_PIN_IECTRL_NONE,
+ 9, UNIPHIER_PIN_DRV_4_8,
+ 9, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(10, "XSRST", UNIPHIER_PIN_IECTRL_NONE,
+ 10, UNIPHIER_PIN_DRV_4_8,
+ 10, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(11, "XNMIRQ", UNIPHIER_PIN_IECTRL_NONE,
+ 11, UNIPHIER_PIN_DRV_4_8,
+ 11, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(12, "XSCIRQ", UNIPHIER_PIN_IECTRL_NONE,
+ 12, UNIPHIER_PIN_DRV_4_8,
+ 12, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(13, "EXTRG", UNIPHIER_PIN_IECTRL_NONE,
+ 13, UNIPHIER_PIN_DRV_4_8,
+ 13, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(14, "TRCCLK", UNIPHIER_PIN_IECTRL_NONE,
+ 14, UNIPHIER_PIN_DRV_4_8,
+ 14, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(15, "TRCCTL", UNIPHIER_PIN_IECTRL_NONE,
+ 15, UNIPHIER_PIN_DRV_4_8,
+ 15, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(16, "TRCD0", UNIPHIER_PIN_IECTRL_NONE,
+ 16, UNIPHIER_PIN_DRV_4_8,
+ 16, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(17, "TRCD1", UNIPHIER_PIN_IECTRL_NONE,
+ 17, UNIPHIER_PIN_DRV_4_8,
+ 17, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(18, "TRCD2", UNIPHIER_PIN_IECTRL_NONE,
+ 18, UNIPHIER_PIN_DRV_4_8,
+ 18, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(19, "TRCD3", UNIPHIER_PIN_IECTRL_NONE,
+ 19, UNIPHIER_PIN_DRV_4_8,
+ 19, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(20, "TRCD4", UNIPHIER_PIN_IECTRL_NONE,
+ 20, UNIPHIER_PIN_DRV_4_8,
+ 20, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(21, "TRCD5", UNIPHIER_PIN_IECTRL_NONE,
+ 21, UNIPHIER_PIN_DRV_4_8,
+ 21, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(22, "TRCD6", UNIPHIER_PIN_IECTRL_NONE,
+ 22, UNIPHIER_PIN_DRV_4_8,
+ 22, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(23, "TRCD7", UNIPHIER_PIN_IECTRL_NONE,
+ 23, UNIPHIER_PIN_DRV_4_8,
+ 23, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(24, "XECS1", UNIPHIER_PIN_IECTRL_NONE,
+ 24, UNIPHIER_PIN_DRV_4_8,
+ 24, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(25, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
+ 25, UNIPHIER_PIN_DRV_4_8,
+ 25, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(26, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
+ 26, UNIPHIER_PIN_DRV_4_8,
+ 26, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(27, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+ 27, UNIPHIER_PIN_DRV_4_8,
+ 27, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(28, "ES0", UNIPHIER_PIN_IECTRL_NONE,
+ 28, UNIPHIER_PIN_DRV_4_8,
+ 28, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(29, "ES1", UNIPHIER_PIN_IECTRL_NONE,
+ 29, UNIPHIER_PIN_DRV_4_8,
+ 29, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(30, "ES2", UNIPHIER_PIN_IECTRL_NONE,
+ 30, UNIPHIER_PIN_DRV_4_8,
+ 30, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(31, "ED0", UNIPHIER_PIN_IECTRL_NONE,
+ 31, UNIPHIER_PIN_DRV_4_8,
+ 31, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(32, "ED1", UNIPHIER_PIN_IECTRL_NONE,
+ 32, UNIPHIER_PIN_DRV_4_8,
+ 32, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(33, "ED2", UNIPHIER_PIN_IECTRL_NONE,
+ 33, UNIPHIER_PIN_DRV_4_8,
+ 33, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(34, "ED3", UNIPHIER_PIN_IECTRL_NONE,
+ 34, UNIPHIER_PIN_DRV_4_8,
+ 34, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(35, "ED4", UNIPHIER_PIN_IECTRL_NONE,
+ 35, UNIPHIER_PIN_DRV_4_8,
+ 35, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(36, "ED5", UNIPHIER_PIN_IECTRL_NONE,
+ 36, UNIPHIER_PIN_DRV_4_8,
+ 36, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(37, "ED6", UNIPHIER_PIN_IECTRL_NONE,
+ 37, UNIPHIER_PIN_DRV_4_8,
+ 37, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(38, "ED7", UNIPHIER_PIN_IECTRL_NONE,
+ 38, UNIPHIER_PIN_DRV_4_8,
+ 38, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(39, "BOOTSWAP", UNIPHIER_PIN_IECTRL_NONE,
+ 39, UNIPHIER_PIN_DRV_NONE,
+ 39, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(40, "NFD0", UNIPHIER_PIN_IECTRL_NONE,
+ 2, UNIPHIER_PIN_DRV_8_12_16_20,
+ 40, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(41, "NFD1", UNIPHIER_PIN_IECTRL_NONE,
+ 3, UNIPHIER_PIN_DRV_8_12_16_20,
+ 41, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(42, "NFD2", UNIPHIER_PIN_IECTRL_NONE,
+ 4, UNIPHIER_PIN_DRV_8_12_16_20,
+ 42, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(43, "NFD3", UNIPHIER_PIN_IECTRL_NONE,
+ 5, UNIPHIER_PIN_DRV_8_12_16_20,
+ 43, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(44, "NFD4", UNIPHIER_PIN_IECTRL_NONE,
+ 6, UNIPHIER_PIN_DRV_8_12_16_20,
+ 44, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(45, "NFD5", UNIPHIER_PIN_IECTRL_NONE,
+ 7, UNIPHIER_PIN_DRV_8_12_16_20,
+ 45, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(46, "NFD6", UNIPHIER_PIN_IECTRL_NONE,
+ 8, UNIPHIER_PIN_DRV_8_12_16_20,
+ 46, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(47, "NFD7", UNIPHIER_PIN_IECTRL_NONE,
+ 9, UNIPHIER_PIN_DRV_8_12_16_20,
+ 47, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(48, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+ 48, UNIPHIER_PIN_DRV_4_8,
+ 48, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(49, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+ 49, UNIPHIER_PIN_DRV_4_8,
+ 49, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(50, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+ 50, UNIPHIER_PIN_DRV_4_8,
+ 50, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(51, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+ 0, UNIPHIER_PIN_DRV_8_12_16_20,
+ 51, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(52, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
+ 52, UNIPHIER_PIN_DRV_4_8,
+ 52, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(53, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
+ 1, UNIPHIER_PIN_DRV_8_12_16_20,
+ 53, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(54, "NRYBY0", UNIPHIER_PIN_IECTRL_NONE,
+ 54, UNIPHIER_PIN_DRV_4_8,
+ 54, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(55, "DMDSCLTST", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_NONE,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(56, "DMDSDATST", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(57, "AGCI0", 3,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ 55, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(58, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(59, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(60, "AGCBS0", 5,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ 56, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(61, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(62, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(63, "ANTSHORT", UNIPHIER_PIN_IECTRL_NONE,
+ 57, UNIPHIER_PIN_DRV_4_8,
+ 57, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(64, "CH0CLK", UNIPHIER_PIN_IECTRL_NONE,
+ 58, UNIPHIER_PIN_DRV_4_8,
+ 58, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(65, "CH0VAL", UNIPHIER_PIN_IECTRL_NONE,
+ 59, UNIPHIER_PIN_DRV_4_8,
+ 59, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(66, "CH0PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ 60, UNIPHIER_PIN_DRV_4_8,
+ 60, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(67, "CH0DATA", UNIPHIER_PIN_IECTRL_NONE,
+ 61, UNIPHIER_PIN_DRV_4_8,
+ 61, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(68, "CH1CLK", UNIPHIER_PIN_IECTRL_NONE,
+ 62, UNIPHIER_PIN_DRV_4_8,
+ 62, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(69, "CH1VAL", UNIPHIER_PIN_IECTRL_NONE,
+ 63, UNIPHIER_PIN_DRV_4_8,
+ 63, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(70, "CH1PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ 64, UNIPHIER_PIN_DRV_4_8,
+ 64, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(71, "CH1DATA", UNIPHIER_PIN_IECTRL_NONE,
+ 65, UNIPHIER_PIN_DRV_4_8,
+ 65, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(72, "CH2CLK", UNIPHIER_PIN_IECTRL_NONE,
+ 66, UNIPHIER_PIN_DRV_4_8,
+ 66, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(73, "CH2VAL", UNIPHIER_PIN_IECTRL_NONE,
+ 67, UNIPHIER_PIN_DRV_4_8,
+ 67, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(74, "CH2PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ 68, UNIPHIER_PIN_DRV_4_8,
+ 68, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(75, "CH2DATA", UNIPHIER_PIN_IECTRL_NONE,
+ 69, UNIPHIER_PIN_DRV_4_8,
+ 69, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(76, "CH3CLK", UNIPHIER_PIN_IECTRL_NONE,
+ 70, UNIPHIER_PIN_DRV_4_8,
+ 70, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(77, "CH3VAL", UNIPHIER_PIN_IECTRL_NONE,
+ 71, UNIPHIER_PIN_DRV_4_8,
+ 71, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(78, "CH3PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ 72, UNIPHIER_PIN_DRV_4_8,
+ 72, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(79, "CH3DATA", UNIPHIER_PIN_IECTRL_NONE,
+ 73, UNIPHIER_PIN_DRV_4_8,
+ 73, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(80, "CH4CLK", UNIPHIER_PIN_IECTRL_NONE,
+ 74, UNIPHIER_PIN_DRV_4_8,
+ 74, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(81, "CH4VAL", UNIPHIER_PIN_IECTRL_NONE,
+ 75, UNIPHIER_PIN_DRV_4_8,
+ 75, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(82, "CH4PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ 76, UNIPHIER_PIN_DRV_4_8,
+ 76, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(83, "CH4DATA", UNIPHIER_PIN_IECTRL_NONE,
+ 77, UNIPHIER_PIN_DRV_4_8,
+ 77, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(84, "CH5CLK", UNIPHIER_PIN_IECTRL_NONE,
+ 78, UNIPHIER_PIN_DRV_4_8,
+ 78, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(85, "CH5VAL", UNIPHIER_PIN_IECTRL_NONE,
+ 79, UNIPHIER_PIN_DRV_4_8,
+ 79, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(86, "CH5PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ 80, UNIPHIER_PIN_DRV_4_8,
+ 80, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(87, "CH5DATA", UNIPHIER_PIN_IECTRL_NONE,
+ 81, UNIPHIER_PIN_DRV_4_8,
+ 81, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(88, "CH6CLK", UNIPHIER_PIN_IECTRL_NONE,
+ 82, UNIPHIER_PIN_DRV_4_8,
+ 82, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(89, "CH6VAL", UNIPHIER_PIN_IECTRL_NONE,
+ 83, UNIPHIER_PIN_DRV_4_8,
+ 83, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(90, "CH6PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ 84, UNIPHIER_PIN_DRV_4_8,
+ 84, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(91, "CH6DATA", UNIPHIER_PIN_IECTRL_NONE,
+ 85, UNIPHIER_PIN_DRV_4_8,
+ 85, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(92, "CKFEO", UNIPHIER_PIN_IECTRL_NONE,
+ 86, UNIPHIER_PIN_DRV_4_8,
+ 86, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(93, "XFERST", UNIPHIER_PIN_IECTRL_NONE,
+ 87, UNIPHIER_PIN_DRV_4_8,
+ 87, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(94, "P_FE_ON", UNIPHIER_PIN_IECTRL_NONE,
+ 88, UNIPHIER_PIN_DRV_4_8,
+ 88, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(95, "P_TU0_ON", UNIPHIER_PIN_IECTRL_NONE,
+ 89, UNIPHIER_PIN_DRV_4_8,
+ 89, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(96, "XFEIRQ0", UNIPHIER_PIN_IECTRL_NONE,
+ 90, UNIPHIER_PIN_DRV_4_8,
+ 90, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(97, "XFEIRQ1", UNIPHIER_PIN_IECTRL_NONE,
+ 91, UNIPHIER_PIN_DRV_4_8,
+ 91, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(98, "XFEIRQ2", UNIPHIER_PIN_IECTRL_NONE,
+ 92, UNIPHIER_PIN_DRV_4_8,
+ 92, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(99, "XFEIRQ3", UNIPHIER_PIN_IECTRL_NONE,
+ 93, UNIPHIER_PIN_DRV_4_8,
+ 93, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(100, "XFEIRQ4", UNIPHIER_PIN_IECTRL_NONE,
+ 94, UNIPHIER_PIN_DRV_4_8,
+ 94, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(101, "XFEIRQ5", UNIPHIER_PIN_IECTRL_NONE,
+ 95, UNIPHIER_PIN_DRV_4_8,
+ 95, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(102, "XFEIRQ6", UNIPHIER_PIN_IECTRL_NONE,
+ 96, UNIPHIER_PIN_DRV_4_8,
+ 96, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(103, "SMTCLK0", UNIPHIER_PIN_IECTRL_NONE,
+ 97, UNIPHIER_PIN_DRV_4_8,
+ 97, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(104, "SMTRST0", UNIPHIER_PIN_IECTRL_NONE,
+ 98, UNIPHIER_PIN_DRV_4_8,
+ 98, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(105, "SMTCMD0", UNIPHIER_PIN_IECTRL_NONE,
+ 99, UNIPHIER_PIN_DRV_4_8,
+ 99, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(106, "SMTD0", UNIPHIER_PIN_IECTRL_NONE,
+ 100, UNIPHIER_PIN_DRV_4_8,
+ 100, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(107, "SMTSEL0", UNIPHIER_PIN_IECTRL_NONE,
+ 101, UNIPHIER_PIN_DRV_4_8,
+ 101, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(108, "SMTDET0", UNIPHIER_PIN_IECTRL_NONE,
+ 102, UNIPHIER_PIN_DRV_4_8,
+ 102, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(109, "SMTCLK1", UNIPHIER_PIN_IECTRL_NONE,
+ 103, UNIPHIER_PIN_DRV_4_8,
+ 103, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(110, "SMTRST1", UNIPHIER_PIN_IECTRL_NONE,
+ 104, UNIPHIER_PIN_DRV_4_8,
+ 104, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(111, "SMTCMD1", UNIPHIER_PIN_IECTRL_NONE,
+ 105, UNIPHIER_PIN_DRV_4_8,
+ 105, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(112, "SMTD1", UNIPHIER_PIN_IECTRL_NONE,
+ 106, UNIPHIER_PIN_DRV_4_8,
+ 106, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(113, "SMTSEL1", UNIPHIER_PIN_IECTRL_NONE,
+ 107, UNIPHIER_PIN_DRV_4_8,
+ 107, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(114, "SMTDET1", UNIPHIER_PIN_IECTRL_NONE,
+ 108, UNIPHIER_PIN_DRV_4_8,
+ 108, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(115, "XINTM", UNIPHIER_PIN_IECTRL_NONE,
+ 109, UNIPHIER_PIN_DRV_4_8,
+ 109, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(116, "SCLKM", UNIPHIER_PIN_IECTRL_NONE,
+ 110, UNIPHIER_PIN_DRV_4_8,
+ 110, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(117, "SBMTP", UNIPHIER_PIN_IECTRL_NONE,
+ 111, UNIPHIER_PIN_DRV_4_8,
+ 111, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(118, "SBPTM", UNIPHIER_PIN_IECTRL_NONE,
+ 112, UNIPHIER_PIN_DRV_4_8,
+ 112, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(119, "XMPREQ", UNIPHIER_PIN_IECTRL_NONE,
+ 113, UNIPHIER_PIN_DRV_4_8,
+ 113, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(120, "XINTP", UNIPHIER_PIN_IECTRL_NONE,
+ 114, UNIPHIER_PIN_DRV_4_8,
+ 114, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(121, "LPST", UNIPHIER_PIN_IECTRL_NONE,
+ 115, UNIPHIER_PIN_DRV_4_8,
+ 115, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(122, "SDBOOT", UNIPHIER_PIN_IECTRL_NONE,
+ 116, UNIPHIER_PIN_DRV_4_8,
+ 116, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(123, "BFAIL", UNIPHIER_PIN_IECTRL_NONE,
+ 117, UNIPHIER_PIN_DRV_4_8,
+ 117, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(124, "XFWE", UNIPHIER_PIN_IECTRL_NONE,
+ 118, UNIPHIER_PIN_DRV_4_8,
+ 118, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(125, "RF_COM_RDY", UNIPHIER_PIN_IECTRL_NONE,
+ 119, UNIPHIER_PIN_DRV_4_8,
+ 119, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(126, "XDIAG0", UNIPHIER_PIN_IECTRL_NONE,
+ 120, UNIPHIER_PIN_DRV_4_8,
+ 120, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(127, "RXD0", UNIPHIER_PIN_IECTRL_NONE,
+ 121, UNIPHIER_PIN_DRV_4_8,
+ 121, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(128, "TXD0", UNIPHIER_PIN_IECTRL_NONE,
+ 122, UNIPHIER_PIN_DRV_4_8,
+ 122, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(129, "RXD1", UNIPHIER_PIN_IECTRL_NONE,
+ 123, UNIPHIER_PIN_DRV_4_8,
+ 123, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(130, "TXD1", UNIPHIER_PIN_IECTRL_NONE,
+ 124, UNIPHIER_PIN_DRV_4_8,
+ 124, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(131, "RXD2", UNIPHIER_PIN_IECTRL_NONE,
+ 125, UNIPHIER_PIN_DRV_4_8,
+ 125, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(132, "TXD2", UNIPHIER_PIN_IECTRL_NONE,
+ 126, UNIPHIER_PIN_DRV_4_8,
+ 126, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(133, "SS0CS", UNIPHIER_PIN_IECTRL_NONE,
+ 127, UNIPHIER_PIN_DRV_4_8,
+ 127, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(134, "SS0CLK", UNIPHIER_PIN_IECTRL_NONE,
+ 128, UNIPHIER_PIN_DRV_4_8,
+ 128, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(135, "SS0DO", UNIPHIER_PIN_IECTRL_NONE,
+ 129, UNIPHIER_PIN_DRV_4_8,
+ 129, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(136, "SS0DI", UNIPHIER_PIN_IECTRL_NONE,
+ 130, UNIPHIER_PIN_DRV_4_8,
+ 130, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(137, "MS0CS0", UNIPHIER_PIN_IECTRL_NONE,
+ 131, UNIPHIER_PIN_DRV_4_8,
+ 131, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(138, "MS0CLK", UNIPHIER_PIN_IECTRL_NONE,
+ 132, UNIPHIER_PIN_DRV_4_8,
+ 132, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(139, "MS0DI", UNIPHIER_PIN_IECTRL_NONE,
+ 133, UNIPHIER_PIN_DRV_4_8,
+ 133, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(140, "MS0DO", UNIPHIER_PIN_IECTRL_NONE,
+ 134, UNIPHIER_PIN_DRV_4_8,
+ 134, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(141, "XMDMRST", UNIPHIER_PIN_IECTRL_NONE,
+ 135, UNIPHIER_PIN_DRV_4_8,
+ 135, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(142, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(143, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(144, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(145, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(146, "SCL2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(147, "SDA2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(148, "SCL3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(149, "SDA3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(150, "SD0DAT0", UNIPHIER_PIN_IECTRL_NONE,
+ 12, UNIPHIER_PIN_DRV_8_12_16_20,
+ 136, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(151, "SD0DAT1", UNIPHIER_PIN_IECTRL_NONE,
+ 13, UNIPHIER_PIN_DRV_8_12_16_20,
+ 137, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(152, "SD0DAT2", UNIPHIER_PIN_IECTRL_NONE,
+ 14, UNIPHIER_PIN_DRV_8_12_16_20,
+ 138, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(153, "SD0DAT3", UNIPHIER_PIN_IECTRL_NONE,
+ 15, UNIPHIER_PIN_DRV_8_12_16_20,
+ 139, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(154, "SD0CMD", UNIPHIER_PIN_IECTRL_NONE,
+ 11, UNIPHIER_PIN_DRV_8_12_16_20,
+ 141, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(155, "SD0CLK", UNIPHIER_PIN_IECTRL_NONE,
+ 10, UNIPHIER_PIN_DRV_8_12_16_20,
+ 140, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(156, "SD0CD", UNIPHIER_PIN_IECTRL_NONE,
+ 142, UNIPHIER_PIN_DRV_4_8,
+ 142, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(157, "SD0WP", UNIPHIER_PIN_IECTRL_NONE,
+ 143, UNIPHIER_PIN_DRV_4_8,
+ 143, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(158, "SD0VTCG", UNIPHIER_PIN_IECTRL_NONE,
+ 144, UNIPHIER_PIN_DRV_4_8,
+ 144, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(159, "CK25O", UNIPHIER_PIN_IECTRL_NONE,
+ 145, UNIPHIER_PIN_DRV_4_8,
+ 145, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(160, "RGMII_TXCLK", 6,
+ 146, UNIPHIER_PIN_DRV_4_8,
+ 146, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(161, "RGMII_TXD0", 6,
+ 147, UNIPHIER_PIN_DRV_4_8,
+ 147, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(162, "RGMII_TXD1", 6,
+ 148, UNIPHIER_PIN_DRV_4_8,
+ 148, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(163, "RGMII_TXD2", 6,
+ 149, UNIPHIER_PIN_DRV_4_8,
+ 149, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(164, "RGMII_TXD3", 6,
+ 150, UNIPHIER_PIN_DRV_4_8,
+ 150, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(165, "RGMII_TXCTL", 6,
+ 151, UNIPHIER_PIN_DRV_4_8,
+ 151, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(166, "MII_TXER", UNIPHIER_PIN_IECTRL_NONE,
+ 152, UNIPHIER_PIN_DRV_4_8,
+ 152, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(167, "RGMII_RXCLK", 6,
+ 153, UNIPHIER_PIN_DRV_4_8,
+ 153, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(168, "RGMII_RXD0", 6,
+ 154, UNIPHIER_PIN_DRV_4_8,
+ 154, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(169, "RGMII_RXD1", 6,
+ 155, UNIPHIER_PIN_DRV_4_8,
+ 155, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(170, "RGMII_RXD2", 6,
+ 156, UNIPHIER_PIN_DRV_4_8,
+ 156, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(171, "RGMII_RXD3", 6,
+ 157, UNIPHIER_PIN_DRV_4_8,
+ 157, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(172, "RGMII_RXCTL", 6,
+ 158, UNIPHIER_PIN_DRV_4_8,
+ 158, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(173, "MII_RXER", 6,
+ 159, UNIPHIER_PIN_DRV_4_8,
+ 159, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(174, "MII_CRS", 6,
+ 160, UNIPHIER_PIN_DRV_4_8,
+ 160, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(175, "MII_COL", 6,
+ 161, UNIPHIER_PIN_DRV_4_8,
+ 161, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(176, "MDC", 6,
+ 162, UNIPHIER_PIN_DRV_4_8,
+ 162, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(177, "MDIO", 6,
+ 163, UNIPHIER_PIN_DRV_4_8,
+ 163, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(178, "MDIO_INTL", 6,
+ 164, UNIPHIER_PIN_DRV_4_8,
+ 164, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(179, "XETH_RST", 6,
+ 165, UNIPHIER_PIN_DRV_4_8,
+ 165, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(180, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+ 166, UNIPHIER_PIN_DRV_4_8,
+ 166, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(181, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+ 167, UNIPHIER_PIN_DRV_4_8,
+ 167, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(182, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+ 168, UNIPHIER_PIN_DRV_4_8,
+ 168, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(183, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+ 169, UNIPHIER_PIN_DRV_4_8,
+ 169, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(184, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE,
+ 170, UNIPHIER_PIN_DRV_4_8,
+ 170, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(185, "USB2OD", UNIPHIER_PIN_IECTRL_NONE,
+ 171, UNIPHIER_PIN_DRV_4_8,
+ 171, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(186, "USB2ID", UNIPHIER_PIN_IECTRL_NONE,
+ 172, UNIPHIER_PIN_DRV_4_8,
+ 172, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(187, "USB3VBUS", UNIPHIER_PIN_IECTRL_NONE,
+ 173, UNIPHIER_PIN_DRV_4_8,
+ 173, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(188, "USB3OD", UNIPHIER_PIN_IECTRL_NONE,
+ 174, UNIPHIER_PIN_DRV_4_8,
+ 174, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(189, "LINKCLK", UNIPHIER_PIN_IECTRL_NONE,
+ 175, UNIPHIER_PIN_DRV_4_8,
+ 175, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(190, "LINKREQ", UNIPHIER_PIN_IECTRL_NONE,
+ 176, UNIPHIER_PIN_DRV_4_8,
+ 176, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(191, "LINKCTL0", UNIPHIER_PIN_IECTRL_NONE,
+ 177, UNIPHIER_PIN_DRV_4_8,
+ 177, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(192, "LINKCTL1", UNIPHIER_PIN_IECTRL_NONE,
+ 178, UNIPHIER_PIN_DRV_4_8,
+ 178, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(193, "LINKDT0", UNIPHIER_PIN_IECTRL_NONE,
+ 179, UNIPHIER_PIN_DRV_4_8,
+ 179, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(194, "LINKDT1", UNIPHIER_PIN_IECTRL_NONE,
+ 180, UNIPHIER_PIN_DRV_4_8,
+ 180, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(195, "LINKDT2", UNIPHIER_PIN_IECTRL_NONE,
+ 181, UNIPHIER_PIN_DRV_4_8,
+ 181, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(196, "LINKDT3", UNIPHIER_PIN_IECTRL_NONE,
+ 182, UNIPHIER_PIN_DRV_4_8,
+ 182, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(197, "LINKDT4", UNIPHIER_PIN_IECTRL_NONE,
+ 183, UNIPHIER_PIN_DRV_4_8,
+ 183, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(198, "LINKDT5", UNIPHIER_PIN_IECTRL_NONE,
+ 184, UNIPHIER_PIN_DRV_4_8,
+ 184, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(199, "LINKDT6", UNIPHIER_PIN_IECTRL_NONE,
+ 185, UNIPHIER_PIN_DRV_4_8,
+ 185, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(200, "LINKDT7", UNIPHIER_PIN_IECTRL_NONE,
+ 186, UNIPHIER_PIN_DRV_4_8,
+ 186, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(201, "CKDVO", UNIPHIER_PIN_IECTRL_NONE,
+ 187, UNIPHIER_PIN_DRV_4_8,
+ 187, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(202, "PHY_PD", UNIPHIER_PIN_IECTRL_NONE,
+ 188, UNIPHIER_PIN_DRV_4_8,
+ 188, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(203, "X1394_RST", UNIPHIER_PIN_IECTRL_NONE,
+ 189, UNIPHIER_PIN_DRV_4_8,
+ 189, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(204, "VOUT_MUTE_L", UNIPHIER_PIN_IECTRL_NONE,
+ 190, UNIPHIER_PIN_DRV_4_8,
+ 190, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(205, "CLK54O", UNIPHIER_PIN_IECTRL_NONE,
+ 191, UNIPHIER_PIN_DRV_4_8,
+ 191, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(206, "CLK54I", UNIPHIER_PIN_IECTRL_NONE,
+ 192, UNIPHIER_PIN_DRV_NONE,
+ 192, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(207, "YIN0", UNIPHIER_PIN_IECTRL_NONE,
+ 193, UNIPHIER_PIN_DRV_4_8,
+ 193, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(208, "YIN1", UNIPHIER_PIN_IECTRL_NONE,
+ 194, UNIPHIER_PIN_DRV_4_8,
+ 194, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(209, "YIN2", UNIPHIER_PIN_IECTRL_NONE,
+ 195, UNIPHIER_PIN_DRV_4_8,
+ 195, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(210, "YIN3", UNIPHIER_PIN_IECTRL_NONE,
+ 196, UNIPHIER_PIN_DRV_4_8,
+ 196, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(211, "YIN4", UNIPHIER_PIN_IECTRL_NONE,
+ 197, UNIPHIER_PIN_DRV_4_8,
+ 197, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(212, "YIN5", UNIPHIER_PIN_IECTRL_NONE,
+ 198, UNIPHIER_PIN_DRV_4_8,
+ 198, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(213, "CIN0", UNIPHIER_PIN_IECTRL_NONE,
+ 199, UNIPHIER_PIN_DRV_4_8,
+ 199, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(214, "CIN1", UNIPHIER_PIN_IECTRL_NONE,
+ 200, UNIPHIER_PIN_DRV_4_8,
+ 200, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(215, "CIN2", UNIPHIER_PIN_IECTRL_NONE,
+ 201, UNIPHIER_PIN_DRV_4_8,
+ 201, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(216, "CIN3", UNIPHIER_PIN_IECTRL_NONE,
+ 202, UNIPHIER_PIN_DRV_4_8,
+ 202, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(217, "CIN4", UNIPHIER_PIN_IECTRL_NONE,
+ 203, UNIPHIER_PIN_DRV_4_8,
+ 203, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(218, "CIN5", UNIPHIER_PIN_IECTRL_NONE,
+ 204, UNIPHIER_PIN_DRV_4_8,
+ 204, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(219, "GCP", UNIPHIER_PIN_IECTRL_NONE,
+ 205, UNIPHIER_PIN_DRV_4_8,
+ 205, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(220, "ADFLG", UNIPHIER_PIN_IECTRL_NONE,
+ 206, UNIPHIER_PIN_DRV_4_8,
+ 206, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(221, "CK27AIOF", UNIPHIER_PIN_IECTRL_NONE,
+ 207, UNIPHIER_PIN_DRV_4_8,
+ 207, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(222, "DACOUT", UNIPHIER_PIN_IECTRL_NONE,
+ 208, UNIPHIER_PIN_DRV_4_8,
+ 208, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(223, "DAFLG", UNIPHIER_PIN_IECTRL_NONE,
+ 209, UNIPHIER_PIN_DRV_4_8,
+ 209, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(224, "VBIH", UNIPHIER_PIN_IECTRL_NONE,
+ 210, UNIPHIER_PIN_DRV_4_8,
+ 210, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(225, "VBIL", UNIPHIER_PIN_IECTRL_NONE,
+ 211, UNIPHIER_PIN_DRV_4_8,
+ 211, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(226, "XSUB_RST", UNIPHIER_PIN_IECTRL_NONE,
+ 212, UNIPHIER_PIN_DRV_4_8,
+ 212, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(227, "XADC_PD", UNIPHIER_PIN_IECTRL_NONE,
+ 213, UNIPHIER_PIN_DRV_4_8,
+ 213, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(228, "AI1ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+ 214, UNIPHIER_PIN_DRV_4_8,
+ 214, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(229, "AI1BCK", UNIPHIER_PIN_IECTRL_NONE,
+ 215, UNIPHIER_PIN_DRV_4_8,
+ 215, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(230, "AI1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+ 216, UNIPHIER_PIN_DRV_4_8,
+ 216, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(231, "AI1DMIX", UNIPHIER_PIN_IECTRL_NONE,
+ 217, UNIPHIER_PIN_DRV_4_8,
+ 217, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(232, "CK27HD", UNIPHIER_PIN_IECTRL_NONE,
+ 218, UNIPHIER_PIN_DRV_4_8,
+ 218, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(233, "XHD_RST", UNIPHIER_PIN_IECTRL_NONE,
+ 219, UNIPHIER_PIN_DRV_4_8,
+ 219, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(234, "INTHD", UNIPHIER_PIN_IECTRL_NONE,
+ 220, UNIPHIER_PIN_DRV_4_8,
+ 220, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(235, "VO1HDCK", UNIPHIER_PIN_IECTRL_NONE,
+ 221, UNIPHIER_PIN_DRV_4_8,
+ 221, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(236, "VO1HSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ 222, UNIPHIER_PIN_DRV_4_8,
+ 222, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(237, "VO1VSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ 223, UNIPHIER_PIN_DRV_4_8,
+ 223, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(238, "VO1DE", UNIPHIER_PIN_IECTRL_NONE,
+ 224, UNIPHIER_PIN_DRV_4_8,
+ 224, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(239, "VO1Y0", UNIPHIER_PIN_IECTRL_NONE,
+ 225, UNIPHIER_PIN_DRV_4_8,
+ 225, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(240, "VO1Y1", UNIPHIER_PIN_IECTRL_NONE,
+ 226, UNIPHIER_PIN_DRV_4_8,
+ 226, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(241, "VO1Y2", UNIPHIER_PIN_IECTRL_NONE,
+ 227, UNIPHIER_PIN_DRV_4_8,
+ 227, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(242, "VO1Y3", UNIPHIER_PIN_IECTRL_NONE,
+ 228, UNIPHIER_PIN_DRV_4_8,
+ 228, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(243, "VO1Y4", UNIPHIER_PIN_IECTRL_NONE,
+ 229, UNIPHIER_PIN_DRV_4_8,
+ 229, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(244, "VO1Y5", UNIPHIER_PIN_IECTRL_NONE,
+ 230, UNIPHIER_PIN_DRV_4_8,
+ 230, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(245, "VO1Y6", UNIPHIER_PIN_IECTRL_NONE,
+ 231, UNIPHIER_PIN_DRV_4_8,
+ 231, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(246, "VO1Y7", UNIPHIER_PIN_IECTRL_NONE,
+ 232, UNIPHIER_PIN_DRV_4_8,
+ 232, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(247, "VO1Y8", UNIPHIER_PIN_IECTRL_NONE,
+ 233, UNIPHIER_PIN_DRV_4_8,
+ 233, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(248, "VO1Y9", UNIPHIER_PIN_IECTRL_NONE,
+ 234, UNIPHIER_PIN_DRV_4_8,
+ 234, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(249, "VO1Y10", UNIPHIER_PIN_IECTRL_NONE,
+ 235, UNIPHIER_PIN_DRV_4_8,
+ 235, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(250, "VO1Y11", UNIPHIER_PIN_IECTRL_NONE,
+ 236, UNIPHIER_PIN_DRV_4_8,
+ 236, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(251, "VO1CB0", UNIPHIER_PIN_IECTRL_NONE,
+ 237, UNIPHIER_PIN_DRV_4_8,
+ 237, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(252, "VO1CB1", UNIPHIER_PIN_IECTRL_NONE,
+ 238, UNIPHIER_PIN_DRV_4_8,
+ 238, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(253, "VO1CB2", UNIPHIER_PIN_IECTRL_NONE,
+ 239, UNIPHIER_PIN_DRV_4_8,
+ 239, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(254, "VO1CB3", UNIPHIER_PIN_IECTRL_NONE,
+ 240, UNIPHIER_PIN_DRV_4_8,
+ 240, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(255, "VO1CB4", UNIPHIER_PIN_IECTRL_NONE,
+ 241, UNIPHIER_PIN_DRV_4_8,
+ 241, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(256, "VO1CB5", UNIPHIER_PIN_IECTRL_NONE,
+ 242, UNIPHIER_PIN_DRV_4_8,
+ 242, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(257, "VO1CB6", UNIPHIER_PIN_IECTRL_NONE,
+ 243, UNIPHIER_PIN_DRV_4_8,
+ 243, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(258, "VO1CB7", UNIPHIER_PIN_IECTRL_NONE,
+ 244, UNIPHIER_PIN_DRV_4_8,
+ 244, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(259, "VO1CB8", UNIPHIER_PIN_IECTRL_NONE,
+ 245, UNIPHIER_PIN_DRV_4_8,
+ 245, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(260, "VO1CB9", UNIPHIER_PIN_IECTRL_NONE,
+ 246, UNIPHIER_PIN_DRV_4_8,
+ 246, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(261, "VO1CB10", UNIPHIER_PIN_IECTRL_NONE,
+ 247, UNIPHIER_PIN_DRV_4_8,
+ 247, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(262, "VO1CB11", UNIPHIER_PIN_IECTRL_NONE,
+ 248, UNIPHIER_PIN_DRV_4_8,
+ 248, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(263, "VO1CR0", UNIPHIER_PIN_IECTRL_NONE,
+ 249, UNIPHIER_PIN_DRV_4_8,
+ 249, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(264, "VO1CR1", UNIPHIER_PIN_IECTRL_NONE,
+ 250, UNIPHIER_PIN_DRV_4_8,
+ 250, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(265, "VO1CR2", UNIPHIER_PIN_IECTRL_NONE,
+ 251, UNIPHIER_PIN_DRV_4_8,
+ 251, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(266, "VO1CR3", UNIPHIER_PIN_IECTRL_NONE,
+ 252, UNIPHIER_PIN_DRV_4_8,
+ 252, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(267, "VO1CR4", UNIPHIER_PIN_IECTRL_NONE,
+ 253, UNIPHIER_PIN_DRV_4_8,
+ 253, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(268, "VO1CR5", UNIPHIER_PIN_IECTRL_NONE,
+ 254, UNIPHIER_PIN_DRV_4_8,
+ 254, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(269, "VO1CR6", UNIPHIER_PIN_IECTRL_NONE,
+ 255, UNIPHIER_PIN_DRV_4_8,
+ 255, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(270, "VO1CR7", UNIPHIER_PIN_IECTRL_NONE,
+ 256, UNIPHIER_PIN_DRV_4_8,
+ 256, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(271, "VO1CR8", UNIPHIER_PIN_IECTRL_NONE,
+ 257, UNIPHIER_PIN_DRV_4_8,
+ 257, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(272, "VO1CR9", UNIPHIER_PIN_IECTRL_NONE,
+ 258, UNIPHIER_PIN_DRV_4_8,
+ 258, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(273, "VO1CR10", UNIPHIER_PIN_IECTRL_NONE,
+ 259, UNIPHIER_PIN_DRV_4_8,
+ 259, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(274, "VO1CR11", UNIPHIER_PIN_IECTRL_NONE,
+ 260, UNIPHIER_PIN_DRV_4_8,
+ 260, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(275, "VO1EX0", UNIPHIER_PIN_IECTRL_NONE,
+ 261, UNIPHIER_PIN_DRV_4_8,
+ 261, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(276, "VO1EX1", UNIPHIER_PIN_IECTRL_NONE,
+ 262, UNIPHIER_PIN_DRV_4_8,
+ 262, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(277, "VO1EX2", UNIPHIER_PIN_IECTRL_NONE,
+ 263, UNIPHIER_PIN_DRV_4_8,
+ 263, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(278, "VO1EX3", UNIPHIER_PIN_IECTRL_NONE,
+ 264, UNIPHIER_PIN_DRV_4_8,
+ 264, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(279, "VEXCKA", UNIPHIER_PIN_IECTRL_NONE,
+ 265, UNIPHIER_PIN_DRV_4_8,
+ 265, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(280, "VSEL0", UNIPHIER_PIN_IECTRL_NONE,
+ 266, UNIPHIER_PIN_DRV_4_8,
+ 266, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(281, "VSEL1", UNIPHIER_PIN_IECTRL_NONE,
+ 267, UNIPHIER_PIN_DRV_4_8,
+ 267, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(282, "AO1DACCK", UNIPHIER_PIN_IECTRL_NONE,
+ 268, UNIPHIER_PIN_DRV_4_8,
+ 268, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(283, "AO1BCK", UNIPHIER_PIN_IECTRL_NONE,
+ 269, UNIPHIER_PIN_DRV_4_8,
+ 269, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(284, "AO1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+ 270, UNIPHIER_PIN_DRV_4_8,
+ 270, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(285, "AO1D0", UNIPHIER_PIN_IECTRL_NONE,
+ 271, UNIPHIER_PIN_DRV_4_8,
+ 271, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(286, "AO1D1", UNIPHIER_PIN_IECTRL_NONE,
+ 272, UNIPHIER_PIN_DRV_4_8,
+ 272, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(287, "AO1D2", UNIPHIER_PIN_IECTRL_NONE,
+ 273, UNIPHIER_PIN_DRV_4_8,
+ 273, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(288, "AO1D3", UNIPHIER_PIN_IECTRL_NONE,
+ 274, UNIPHIER_PIN_DRV_4_8,
+ 274, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(289, "AO1IEC", UNIPHIER_PIN_IECTRL_NONE,
+ 275, UNIPHIER_PIN_DRV_4_8,
+ 275, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(290, "XDAC_PD", UNIPHIER_PIN_IECTRL_NONE,
+ 276, UNIPHIER_PIN_DRV_4_8,
+ 276, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(291, "EX_A_MUTE", UNIPHIER_PIN_IECTRL_NONE,
+ 277, UNIPHIER_PIN_DRV_4_8,
+ 277, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(292, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE,
+ 278, UNIPHIER_PIN_DRV_4_8,
+ 278, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(293, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE,
+ 279, UNIPHIER_PIN_DRV_4_8,
+ 279, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(294, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+ 280, UNIPHIER_PIN_DRV_4_8,
+ 280, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(295, "AO2DMIX", UNIPHIER_PIN_IECTRL_NONE,
+ 281, UNIPHIER_PIN_DRV_4_8,
+ 281, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(296, "AO2IEC", UNIPHIER_PIN_IECTRL_NONE,
+ 282, UNIPHIER_PIN_DRV_4_8,
+ 282, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(297, "HTHPD", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_5,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(298, "HTSCL", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_5,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(299, "HTSDA", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_5,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(300, "PORT00", UNIPHIER_PIN_IECTRL_NONE,
+ 284, UNIPHIER_PIN_DRV_4_8,
+ 284, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(301, "PORT01", UNIPHIER_PIN_IECTRL_NONE,
+ 285, UNIPHIER_PIN_DRV_4_8,
+ 285, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(302, "PORT02", UNIPHIER_PIN_IECTRL_NONE,
+ 286, UNIPHIER_PIN_DRV_4_8,
+ 286, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(303, "PORT03", UNIPHIER_PIN_IECTRL_NONE,
+ 287, UNIPHIER_PIN_DRV_4_8,
+ 287, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(304, "PORT04", UNIPHIER_PIN_IECTRL_NONE,
+ 288, UNIPHIER_PIN_DRV_4_8,
+ 288, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(305, "PORT05", UNIPHIER_PIN_IECTRL_NONE,
+ 289, UNIPHIER_PIN_DRV_4_8,
+ 289, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(306, "PORT06", UNIPHIER_PIN_IECTRL_NONE,
+ 290, UNIPHIER_PIN_DRV_4_8,
+ 290, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(307, "PORT07", UNIPHIER_PIN_IECTRL_NONE,
+ 291, UNIPHIER_PIN_DRV_4_8,
+ 291, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(308, "PORT10", UNIPHIER_PIN_IECTRL_NONE,
+ 292, UNIPHIER_PIN_DRV_4_8,
+ 292, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(309, "PORT11", UNIPHIER_PIN_IECTRL_NONE,
+ 293, UNIPHIER_PIN_DRV_4_8,
+ 293, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(310, "PORT12", UNIPHIER_PIN_IECTRL_NONE,
+ 294, UNIPHIER_PIN_DRV_4_8,
+ 294, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(311, "PORT13", UNIPHIER_PIN_IECTRL_NONE,
+ 295, UNIPHIER_PIN_DRV_4_8,
+ 295, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(312, "PORT14", UNIPHIER_PIN_IECTRL_NONE,
+ 296, UNIPHIER_PIN_DRV_4_8,
+ 296, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(313, "PORT15", UNIPHIER_PIN_IECTRL_NONE,
+ 297, UNIPHIER_PIN_DRV_4_8,
+ 297, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(314, "PORT16", UNIPHIER_PIN_IECTRL_NONE,
+ 298, UNIPHIER_PIN_DRV_4_8,
+ 298, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(315, "PORT17", UNIPHIER_PIN_IECTRL_NONE,
+ 299, UNIPHIER_PIN_DRV_4_8,
+ 299, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(316, "PORT20", UNIPHIER_PIN_IECTRL_NONE,
+ 300, UNIPHIER_PIN_DRV_4_8,
+ 300, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(317, "PORT21", UNIPHIER_PIN_IECTRL_NONE,
+ 301, UNIPHIER_PIN_DRV_4_8,
+ 301, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(318, "PORT22", UNIPHIER_PIN_IECTRL_NONE,
+ 302, UNIPHIER_PIN_DRV_4_8,
+ 302, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(319, "SD1DAT0", UNIPHIER_PIN_IECTRL_NONE,
+ 303, UNIPHIER_PIN_DRV_4_8,
+ 303, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(320, "SD1DAT1", UNIPHIER_PIN_IECTRL_NONE,
+ 304, UNIPHIER_PIN_DRV_4_8,
+ 304, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(321, "SD1DAT2", UNIPHIER_PIN_IECTRL_NONE,
+ 305, UNIPHIER_PIN_DRV_4_8,
+ 305, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(322, "SD1DAT3", UNIPHIER_PIN_IECTRL_NONE,
+ 306, UNIPHIER_PIN_DRV_4_8,
+ 306, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(323, "SD1CMD", UNIPHIER_PIN_IECTRL_NONE,
+ 307, UNIPHIER_PIN_DRV_4_8,
+ 307, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(324, "SD1CLK", UNIPHIER_PIN_IECTRL_NONE,
+ 308, UNIPHIER_PIN_DRV_4_8,
+ 308, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(325, "SD1CD", UNIPHIER_PIN_IECTRL_NONE,
+ 309, UNIPHIER_PIN_DRV_4_8,
+ 309, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(326, "SD1WP", UNIPHIER_PIN_IECTRL_NONE,
+ 310, UNIPHIER_PIN_DRV_4_8,
+ 310, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(327, "SD1VTCG", UNIPHIER_PIN_IECTRL_NONE,
+ 311, UNIPHIER_PIN_DRV_4_8,
+ 311, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(328, "DMDISO", UNIPHIER_PIN_IECTRL_NONE,
+ 312, UNIPHIER_PIN_DRV_NONE,
+ 312, UNIPHIER_PIN_PULL_DOWN),
+};
+
+static const unsigned emmc_pins[] = {40, 41, 42, 43, 51, 52, 53};
+static const unsigned emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1};
+static const unsigned emmc_dat8_pins[] = {44, 45, 46, 47};
+static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const unsigned i2c0_pins[] = {142, 143};
+static const unsigned i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {144, 145};
+static const unsigned i2c1_muxvals[] = {0, 0};
+static const unsigned i2c2_pins[] = {146, 147};
+static const unsigned i2c2_muxvals[] = {0, 0};
+static const unsigned i2c3_pins[] = {148, 149};
+static const unsigned i2c3_muxvals[] = {0, 0};
+static const unsigned i2c6_pins[] = {308, 309};
+static const unsigned i2c6_muxvals[] = {6, 6};
+static const unsigned nand_pins[] = {40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54};
+static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0};
+static const unsigned nand_cs1_pins[] = {131, 132};
+static const unsigned nand_cs1_muxvals[] = {1, 1};
+static const unsigned uart0_pins[] = {127, 128};
+static const unsigned uart0_muxvals[] = {0, 0};
+static const unsigned uart1_pins[] = {129, 130};
+static const unsigned uart1_muxvals[] = {0, 0};
+static const unsigned uart2_pins[] = {131, 132};
+static const unsigned uart2_muxvals[] = {0, 0};
+static const unsigned uart3_pins[] = {88, 89};
+static const unsigned uart3_muxvals[] = {2, 2};
+static const unsigned usb0_pins[] = {180, 181};
+static const unsigned usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {182, 183};
+static const unsigned usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {184, 185};
+static const unsigned usb2_muxvals[] = {0, 0};
+static const unsigned usb3_pins[] = {186, 187};
+static const unsigned usb3_muxvals[] = {0, 0};
+static const unsigned port_range0_pins[] = {
+ 300, 301, 302, 303, 304, 305, 306, 307, /* PORT0x */
+ 308, 309, 310, 311, 312, 313, 314, 315, /* PORT1x */
+ 316, 317, 318, 16, 17, 18, 19, 20, /* PORT2x */
+ 21, 22, 23, 4, 93, 94, 95, 63, /* PORT3x */
+ 123, 122, 124, 125, 126, 141, 202, 203, /* PORT4x */
+ 204, 226, 227, 290, 291, 233, 280, 281, /* PORT5x */
+ 8, 7, 10, 29, 30, 48, 49, 50, /* PORT6x */
+ 40, 41, 42, 43, 44, 45, 46, 47, /* PORT7x */
+ 54, 51, 52, 53, 127, 128, 129, 130, /* PORT8x */
+ 131, 132, 57, 60, 134, 133, 135, 136, /* PORT9x */
+ 138, 137, 140, 139, 64, 65, 66, 67, /* PORT10x */
+ 107, 106, 105, 104, 113, 112, 111, 110, /* PORT11x */
+ 68, 69, 70, 71, 72, 73, 74, 75, /* PORT12x */
+ 76, 77, 78, 79, 80, 81, 82, 83, /* PORT13x */
+ 84, 85, 86, 87, 88, 89, 90, 91, /* PORT14x */
+};
+static const unsigned port_range0_muxvals[] = {
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT0x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT1x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT2x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT3x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT4x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT5x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT6x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT7x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT8x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT9x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT10x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT11x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT12x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT13x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT14x */
+};
+static const unsigned port_range1_pins[] = {
+ 13, 14, 15, /* PORT175-177 */
+ 157, 158, 156, 154, 150, 151, 152, 153, /* PORT18x */
+ 326, 327, 325, 323, 319, 320, 321, 322, /* PORT19x */
+ 160, 161, 162, 163, 164, 165, 166, 167, /* PORT20x */
+ 168, 169, 170, 171, 172, 173, 174, 175, /* PORT21x */
+ 180, 181, 182, 183, 184, 185, 187, 188, /* PORT22x */
+ 193, 194, 195, 196, 197, 198, 199, 200, /* PORT23x */
+ 191, 192, 215, 216, 217, 218, 219, 220, /* PORT24x */
+ 222, 223, 224, 225, 228, 229, 230, 231, /* PORT25x */
+ 282, 283, 284, 285, 286, 287, 288, 289, /* PORT26x */
+ 292, 293, 294, 295, 296, 236, 237, 238, /* PORT27x */
+ 275, 276, 277, 278, 239, 240, 249, 250, /* PORT28x */
+ 251, 252, 261, 262, 263, 264, 273, 274, /* PORT29x */
+ 31, 32, 33, 34, 35, 36, 37, 38, /* PORT30x */
+};
+static const unsigned port_range1_muxvals[] = {
+ 7, 7, 7, /* PORT175-177 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT18x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT19x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT20x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT21x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT22x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT23x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT24x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT25x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT26x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT27x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT28x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT29x */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* PORT30x */
+};
+static const unsigned xirq_pins[] = {
+ 11, 9, 12, 96, 97, 98, 108, 114, /* XIRQ0-7 */
+ 234, 186, 99, 100, 101, 102, 184, 301, /* XIRQ8-15 */
+ 302, 303, 304, 305, 306, /* XIRQ16-20 */
+};
+static const unsigned xirq_muxvals[] = {
+ 7, 7, 7, 7, 7, 7, 7, 7, /* XIRQ0-7 */
+ 7, 7, 7, 7, 7, 7, 2, 2, /* XIRQ8-15 */
+ 2, 2, 2, 2, 2, /* XIRQ16-20 */
+};
+static const unsigned xirq_alternatives_pins[] = {
+ 184, 310, 316,
+};
+static const unsigned xirq_alternatives_muxvals[] = {
+ 2, 2, 2,
+};
+
+static const struct uniphier_pinctrl_group ph1_pro4_groups[] = {
+ UNIPHIER_PINCTRL_GROUP(emmc),
+ UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+ UNIPHIER_PINCTRL_GROUP(i2c0),
+ UNIPHIER_PINCTRL_GROUP(i2c1),
+ UNIPHIER_PINCTRL_GROUP(i2c2),
+ UNIPHIER_PINCTRL_GROUP(i2c3),
+ UNIPHIER_PINCTRL_GROUP(i2c6),
+ UNIPHIER_PINCTRL_GROUP(nand),
+ UNIPHIER_PINCTRL_GROUP(nand_cs1),
+ UNIPHIER_PINCTRL_GROUP(uart0),
+ UNIPHIER_PINCTRL_GROUP(uart1),
+ UNIPHIER_PINCTRL_GROUP(uart2),
+ UNIPHIER_PINCTRL_GROUP(uart3),
+ UNIPHIER_PINCTRL_GROUP(usb0),
+ UNIPHIER_PINCTRL_GROUP(usb1),
+ UNIPHIER_PINCTRL_GROUP(usb2),
+ UNIPHIER_PINCTRL_GROUP(usb3),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port110, port_range0, 88),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port111, port_range0, 89),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port112, port_range0, 90),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port113, port_range0, 91),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port114, port_range0, 92),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port115, port_range0, 93),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port116, port_range0, 94),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port117, port_range0, 95),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range0, 96),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range0, 97),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range0, 98),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range0, 99),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range0, 100),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range0, 101),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range0, 102),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range0, 103),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range0, 104),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range0, 105),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range0, 106),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range0, 107),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range0, 108),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range0, 109),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range0, 110),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range0, 111),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range0, 112),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range0, 113),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range0, 114),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range0, 115),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range0, 116),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range0, 117),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range0, 118),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range0, 119),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port175, port_range1, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port176, port_range1, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port177, port_range1, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range1, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range1, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range1, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range1, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range1, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range1, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range1, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range1, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port190, port_range1, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port191, port_range1, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port192, port_range1, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port193, port_range1, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port194, port_range1, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port195, port_range1, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port196, port_range1, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port197, port_range1, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range1, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range1, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range1, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range1, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range1, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range1, 24),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range1, 25),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range1, 26),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range1, 27),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range1, 28),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range1, 29),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range1, 30),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range1, 31),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range1, 32),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range1, 33),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range1, 34),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range1, 35),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range1, 36),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range1, 37),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range1, 38),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range1, 39),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range1, 40),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range1, 41),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range1, 42),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range1, 43),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range1, 44),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range1, 45),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range1, 46),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range1, 47),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range1, 48),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range1, 49),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range1, 50),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range1, 51),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range1, 52),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range1, 53),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range1, 54),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range1, 55),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range1, 56),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range1, 57),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range1, 58),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range1, 59),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range1, 60),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range1, 61),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range1, 62),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range1, 63),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port255, port_range1, 64),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port256, port_range1, 65),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port257, port_range1, 66),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port260, port_range1, 67),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port261, port_range1, 68),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port262, port_range1, 69),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port263, port_range1, 70),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port264, port_range1, 71),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port265, port_range1, 72),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port266, port_range1, 73),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port267, port_range1, 74),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port270, port_range1, 75),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port271, port_range1, 76),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port272, port_range1, 77),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port273, port_range1, 78),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port274, port_range1, 79),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port275, port_range1, 80),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port276, port_range1, 81),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port277, port_range1, 82),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port280, port_range1, 83),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port281, port_range1, 84),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port282, port_range1, 85),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port283, port_range1, 86),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port284, port_range1, 87),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port285, port_range1, 88),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port286, port_range1, 89),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port287, port_range1, 90),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port290, port_range1, 91),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port291, port_range1, 92),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port292, port_range1, 93),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port293, port_range1, 94),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port294, port_range1, 95),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port295, port_range1, 96),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port296, port_range1, 97),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port297, port_range1, 98),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port300, port_range1, 99),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port301, port_range1, 100),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port302, port_range1, 101),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port303, port_range1, 102),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port304, port_range1, 103),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port305, port_range1, 104),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port306, port_range1, 105),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port307, port_range1, 106),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14b, xirq_alternatives, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17b, xirq_alternatives, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18b, xirq_alternatives, 2),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const i2c6_groups[] = {"i2c6"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0"};
+static const char * const uart1_groups[] = {"uart1"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const usb3_groups[] = {"usb3"};
+static const char * const port_groups[] = {
+ "port00", "port01", "port02", "port03",
+ "port04", "port05", "port06", "port07",
+ "port10", "port11", "port12", "port13",
+ "port14", "port15", "port16", "port17",
+ "port20", "port21", "port22", "port23",
+ "port24", "port25", "port26", "port27",
+ "port30", "port31", "port32", "port33",
+ "port34", "port35", "port36", "port37",
+ "port40", "port41", "port42", "port43",
+ "port44", "port45", "port46", "port47",
+ "port50", "port51", "port52", "port53",
+ "port54", "port55", "port56", "port57",
+ "port60", "port61", "port62", "port63",
+ "port64", "port65", "port66", "port67",
+ "port70", "port71", "port72", "port73",
+ "port74", "port75", "port76", "port77",
+ "port80", "port81", "port82", "port83",
+ "port84", "port85", "port86", "port87",
+ "port90", "port91", "port92", "port93",
+ "port94", "port95", "port96", "port97",
+ "port100", "port101", "port102", "port103",
+ "port104", "port105", "port106", "port107",
+ "port110", "port111", "port112", "port113",
+ "port114", "port115", "port116", "port117",
+ "port120", "port121", "port122", "port123",
+ "port124", "port125", "port126", "port127",
+ "port130", "port131", "port132", "port133",
+ "port134", "port135", "port136", "port137",
+ "port140", "port141", "port142", "port143",
+ "port144", "port145", "port146", "port147",
+ /* port150-174 missing */
+ /* none */ "port175", "port176", "port177",
+ "port180", "port181", "port182", "port183",
+ "port184", "port185", "port186", "port187",
+ "port190", "port191", "port192", "port193",
+ "port194", "port195", "port196", "port197",
+ "port200", "port201", "port202", "port203",
+ "port204", "port205", "port206", "port207",
+ "port210", "port211", "port212", "port213",
+ "port214", "port215", "port216", "port217",
+ "port220", "port221", "port222", "port223",
+ "port224", "port225", "port226", "port227",
+ "port230", "port231", "port232", "port233",
+ "port234", "port235", "port236", "port237",
+ "port240", "port241", "port242", "port243",
+ "port244", "port245", "port246", "port247",
+ "port250", "port251", "port252", "port253",
+ "port254", "port255", "port256", "port257",
+ "port260", "port261", "port262", "port263",
+ "port264", "port265", "port266", "port267",
+ "port270", "port271", "port272", "port273",
+ "port274", "port275", "port276", "port277",
+ "port280", "port281", "port282", "port283",
+ "port284", "port285", "port286", "port287",
+ "port290", "port291", "port292", "port293",
+ "port294", "port295", "port296", "port297",
+ "port300", "port301", "port302", "port303",
+ "port304", "port305", "port306", "port307",
+};
+static const char * const xirq_groups[] = {
+ "xirq0", "xirq1", "xirq2", "xirq3",
+ "xirq4", "xirq5", "xirq6", "xirq7",
+ "xirq8", "xirq9", "xirq10", "xirq11",
+ "xirq12", "xirq13", "xirq14", "xirq15",
+ "xirq16", "xirq17", "xirq18", "xirq19",
+ "xirq20",
+ "xirq14b", "xirq17b", "xirq18b",
+};
+
+static const struct uniphier_pinmux_function ph1_pro4_functions[] = {
+ UNIPHIER_PINMUX_FUNCTION(emmc),
+ UNIPHIER_PINMUX_FUNCTION(i2c0),
+ UNIPHIER_PINMUX_FUNCTION(i2c1),
+ UNIPHIER_PINMUX_FUNCTION(i2c2),
+ UNIPHIER_PINMUX_FUNCTION(i2c3),
+ UNIPHIER_PINMUX_FUNCTION(i2c6),
+ UNIPHIER_PINMUX_FUNCTION(nand),
+ UNIPHIER_PINMUX_FUNCTION(uart0),
+ UNIPHIER_PINMUX_FUNCTION(uart1),
+ UNIPHIER_PINMUX_FUNCTION(uart2),
+ UNIPHIER_PINMUX_FUNCTION(uart3),
+ UNIPHIER_PINMUX_FUNCTION(usb0),
+ UNIPHIER_PINMUX_FUNCTION(usb1),
+ UNIPHIER_PINMUX_FUNCTION(usb2),
+ UNIPHIER_PINMUX_FUNCTION(usb3),
+ UNIPHIER_PINMUX_FUNCTION(port),
+ UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata ph1_pro4_pindata = {
+ .groups = ph1_pro4_groups,
+ .groups_count = ARRAY_SIZE(ph1_pro4_groups),
+ .functions = ph1_pro4_functions,
+ .functions_count = ARRAY_SIZE(ph1_pro4_functions),
+ .mux_bits = 4,
+ .reg_stride = 8,
+ .load_pinctrl = true,
+};
+
+static struct pinctrl_desc ph1_pro4_pinctrl_desc = {
+ .name = DRIVER_NAME,
+ .pins = ph1_pro4_pins,
+ .npins = ARRAY_SIZE(ph1_pro4_pins),
+ .owner = THIS_MODULE,
+};
+
+static int ph1_pro4_pinctrl_probe(struct platform_device *pdev)
+{
+ return uniphier_pinctrl_probe(pdev, &ph1_pro4_pinctrl_desc,
+ &ph1_pro4_pindata);
+}
+
+static const struct of_device_id ph1_pro4_pinctrl_match[] = {
+ { .compatible = "socionext,ph1-pro4-pinctrl" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ph1_pro4_pinctrl_match);
+
+static struct platform_driver ph1_pro4_pinctrl_driver = {
+ .probe = ph1_pro4_pinctrl_probe,
+ .remove = uniphier_pinctrl_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = ph1_pro4_pinctrl_match,
+ },
+};
+module_platform_driver(ph1_pro4_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-Pro4 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c b/drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c
new file mode 100644
index 000000000000..9af455978058
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c
@@ -0,0 +1,1351 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program5 is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "ph1-pro5-pinctrl"
+
+static const struct pinctrl_pin_desc ph1_pro5_pins[] = {
+ UNIPHIER_PINCTRL_PIN(0, "AEXCKA1", 0,
+ 0, UNIPHIER_PIN_DRV_4_8,
+ 0, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(1, "AEXCKA2", 0,
+ 1, UNIPHIER_PIN_DRV_4_8,
+ 1, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(2, "CK27EXI", 0,
+ 2, UNIPHIER_PIN_DRV_4_8,
+ 2, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(3, "CK54EXI", 0,
+ 3, UNIPHIER_PIN_DRV_4_8,
+ 3, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(4, "ED0", UNIPHIER_PIN_IECTRL_NONE,
+ 4, UNIPHIER_PIN_DRV_4_8,
+ 4, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(5, "ED1", UNIPHIER_PIN_IECTRL_NONE,
+ 5, UNIPHIER_PIN_DRV_4_8,
+ 5, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(6, "ED2", UNIPHIER_PIN_IECTRL_NONE,
+ 6, UNIPHIER_PIN_DRV_4_8,
+ 6, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(7, "ED3", UNIPHIER_PIN_IECTRL_NONE,
+ 7, UNIPHIER_PIN_DRV_4_8,
+ 7, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(8, "ED4", UNIPHIER_PIN_IECTRL_NONE,
+ 8, UNIPHIER_PIN_DRV_4_8,
+ 8, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(9, "ED5", UNIPHIER_PIN_IECTRL_NONE,
+ 9, UNIPHIER_PIN_DRV_4_8,
+ 9, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(10, "ED6", UNIPHIER_PIN_IECTRL_NONE,
+ 10, UNIPHIER_PIN_DRV_4_8,
+ 10, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(11, "ED7", UNIPHIER_PIN_IECTRL_NONE,
+ 11, UNIPHIER_PIN_DRV_4_8,
+ 11, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(12, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
+ 12, UNIPHIER_PIN_DRV_4_8,
+ 12, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(13, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+ 13, UNIPHIER_PIN_DRV_4_8,
+ 13, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(14, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
+ 14, UNIPHIER_PIN_DRV_4_8,
+ 14, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(15, "ES0", UNIPHIER_PIN_IECTRL_NONE,
+ 15, UNIPHIER_PIN_DRV_4_8,
+ 15, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(16, "ES1", UNIPHIER_PIN_IECTRL_NONE,
+ 16, UNIPHIER_PIN_DRV_4_8,
+ 16, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(17, "ES2", UNIPHIER_PIN_IECTRL_NONE,
+ 17, UNIPHIER_PIN_DRV_4_8,
+ 17, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(18, "XECS1", UNIPHIER_PIN_IECTRL_NONE,
+ 18, UNIPHIER_PIN_DRV_4_8,
+ 18, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(19, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+ 19, UNIPHIER_PIN_DRV_4_8,
+ 19, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(20, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+ 20, UNIPHIER_PIN_DRV_4_8,
+ 20, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(21, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+ 21, UNIPHIER_PIN_DRV_4_8,
+ 21, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(22, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+ 22, UNIPHIER_PIN_DRV_4_8,
+ 22, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(23, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
+ 23, UNIPHIER_PIN_DRV_4_8,
+ 23, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(24, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
+ 24, UNIPHIER_PIN_DRV_4_8,
+ 24, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(25, "NFRYBY0", UNIPHIER_PIN_IECTRL_NONE,
+ 25, UNIPHIER_PIN_DRV_4_8,
+ 25, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(26, "XNFCE1", UNIPHIER_PIN_IECTRL_NONE,
+ 26, UNIPHIER_PIN_DRV_4_8,
+ 26, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(27, "NFRYBY1", UNIPHIER_PIN_IECTRL_NONE,
+ 27, UNIPHIER_PIN_DRV_4_8,
+ 27, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(28, "NFD0", UNIPHIER_PIN_IECTRL_NONE,
+ 28, UNIPHIER_PIN_DRV_4_8,
+ 28, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(29, "NFD1", UNIPHIER_PIN_IECTRL_NONE,
+ 29, UNIPHIER_PIN_DRV_4_8,
+ 29, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(30, "NFD2", UNIPHIER_PIN_IECTRL_NONE,
+ 30, UNIPHIER_PIN_DRV_4_8,
+ 30, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(31, "NFD3", UNIPHIER_PIN_IECTRL_NONE,
+ 31, UNIPHIER_PIN_DRV_4_8,
+ 31, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(32, "NFD4", UNIPHIER_PIN_IECTRL_NONE,
+ 32, UNIPHIER_PIN_DRV_4_8,
+ 32, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(33, "NFD5", UNIPHIER_PIN_IECTRL_NONE,
+ 33, UNIPHIER_PIN_DRV_4_8,
+ 33, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(34, "NFD6", UNIPHIER_PIN_IECTRL_NONE,
+ 34, UNIPHIER_PIN_DRV_4_8,
+ 34, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(35, "NFD7", UNIPHIER_PIN_IECTRL_NONE,
+ 35, UNIPHIER_PIN_DRV_4_8,
+ 35, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(36, "XERST", UNIPHIER_PIN_IECTRL_NONE,
+ 36, UNIPHIER_PIN_DRV_4_8,
+ 36, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(37, "MMCCLK", UNIPHIER_PIN_IECTRL_NONE,
+ 37, UNIPHIER_PIN_DRV_4_8,
+ 37, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(38, "MMCCMD", UNIPHIER_PIN_IECTRL_NONE,
+ 38, UNIPHIER_PIN_DRV_4_8,
+ 38, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(39, "MMCDAT0", UNIPHIER_PIN_IECTRL_NONE,
+ 39, UNIPHIER_PIN_DRV_4_8,
+ 39, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(40, "MMCDAT1", UNIPHIER_PIN_IECTRL_NONE,
+ 40, UNIPHIER_PIN_DRV_4_8,
+ 40, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(41, "MMCDAT2", UNIPHIER_PIN_IECTRL_NONE,
+ 41, UNIPHIER_PIN_DRV_4_8,
+ 41, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(42, "MMCDAT3", UNIPHIER_PIN_IECTRL_NONE,
+ 42, UNIPHIER_PIN_DRV_4_8,
+ 42, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(43, "MMCDAT4", UNIPHIER_PIN_IECTRL_NONE,
+ 43, UNIPHIER_PIN_DRV_4_8,
+ 43, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(44, "MMCDAT5", UNIPHIER_PIN_IECTRL_NONE,
+ 44, UNIPHIER_PIN_DRV_4_8,
+ 44, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(45, "MMCDAT6", UNIPHIER_PIN_IECTRL_NONE,
+ 45, UNIPHIER_PIN_DRV_4_8,
+ 45, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(46, "MMCDAT7", UNIPHIER_PIN_IECTRL_NONE,
+ 46, UNIPHIER_PIN_DRV_4_8,
+ 46, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(47, "TXD0", 0,
+ 47, UNIPHIER_PIN_DRV_4_8,
+ 47, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(48, "RXD0", 0,
+ 48, UNIPHIER_PIN_DRV_4_8,
+ 48, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(49, "TXD1", 0,
+ 49, UNIPHIER_PIN_DRV_4_8,
+ 49, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(50, "RXD1", 0,
+ 50, UNIPHIER_PIN_DRV_4_8,
+ 50, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(51, "TXD2", UNIPHIER_PIN_IECTRL_NONE,
+ 51, UNIPHIER_PIN_DRV_4_8,
+ 51, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(52, "RXD2", UNIPHIER_PIN_IECTRL_NONE,
+ 52, UNIPHIER_PIN_DRV_4_8,
+ 52, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(53, "TXD3", 0,
+ 53, UNIPHIER_PIN_DRV_4_8,
+ 53, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(54, "RXD3", 0,
+ 54, UNIPHIER_PIN_DRV_4_8,
+ 54, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(55, "MS0CS0", 0,
+ 55, UNIPHIER_PIN_DRV_4_8,
+ 55, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(56, "MS0DO", 0,
+ 56, UNIPHIER_PIN_DRV_4_8,
+ 56, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(57, "MS0DI", 0,
+ 57, UNIPHIER_PIN_DRV_4_8,
+ 57, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(58, "MS0CLK", 0,
+ 58, UNIPHIER_PIN_DRV_4_8,
+ 58, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(59, "CSCLK", 0,
+ 59, UNIPHIER_PIN_DRV_4_8,
+ 59, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(60, "CSBPTM", 0,
+ 60, UNIPHIER_PIN_DRV_4_8,
+ 60, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(61, "CSBMTP", 0,
+ 61, UNIPHIER_PIN_DRV_4_8,
+ 61, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(62, "XCINTP", 0,
+ 62, UNIPHIER_PIN_DRV_4_8,
+ 62, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(63, "XCINTM", 0,
+ 63, UNIPHIER_PIN_DRV_4_8,
+ 63, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(64, "XCMPREQ", 0,
+ 64, UNIPHIER_PIN_DRV_4_8,
+ 64, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(65, "XSRST", 0,
+ 65, UNIPHIER_PIN_DRV_4_8,
+ 65, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(66, "LPST", UNIPHIER_PIN_IECTRL_NONE,
+ 66, UNIPHIER_PIN_DRV_4_8,
+ 66, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(67, "PWMA", 0,
+ 67, UNIPHIER_PIN_DRV_4_8,
+ 67, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(68, "XIRQ0", 0,
+ 68, UNIPHIER_PIN_DRV_4_8,
+ 68, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(69, "XIRQ1", 0,
+ 69, UNIPHIER_PIN_DRV_4_8,
+ 69, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(70, "XIRQ2", 0,
+ 70, UNIPHIER_PIN_DRV_4_8,
+ 70, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(71, "XIRQ3", 0,
+ 71, UNIPHIER_PIN_DRV_4_8,
+ 71, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(72, "XIRQ4", 0,
+ 72, UNIPHIER_PIN_DRV_4_8,
+ 72, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(73, "XIRQ5", 0,
+ 73, UNIPHIER_PIN_DRV_4_8,
+ 73, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(74, "XIRQ6", 0,
+ 74, UNIPHIER_PIN_DRV_4_8,
+ 74, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(75, "XIRQ7", 0,
+ 75, UNIPHIER_PIN_DRV_4_8,
+ 75, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(76, "XIRQ8", 0,
+ 76, UNIPHIER_PIN_DRV_4_8,
+ 76, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(77, "XIRQ9", 0,
+ 77, UNIPHIER_PIN_DRV_4_8,
+ 77, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(78, "XIRQ10", 0,
+ 78, UNIPHIER_PIN_DRV_4_8,
+ 78, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(79, "XIRQ11", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 79, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(80, "XIRQ12", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 80, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(81, "XIRQ13", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 81, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(82, "XIRQ14", 0,
+ 82, UNIPHIER_PIN_DRV_4_8,
+ 82, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(83, "XIRQ15", 0,
+ 83, UNIPHIER_PIN_DRV_4_8,
+ 83, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(84, "XIRQ16", 0,
+ 84, UNIPHIER_PIN_DRV_4_8,
+ 84, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(85, "XIRQ17", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 85, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(86, "XIRQ18", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 86, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(87, "XIRQ19", 0,
+ 87, UNIPHIER_PIN_DRV_4_8,
+ 87, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(88, "XIRQ20", 0,
+ 88, UNIPHIER_PIN_DRV_4_8,
+ 88, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(89, "PORT00", 0,
+ 89, UNIPHIER_PIN_DRV_4_8,
+ 89, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(90, "PORT01", 0,
+ 90, UNIPHIER_PIN_DRV_4_8,
+ 90, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(91, "PORT02", 0,
+ 91, UNIPHIER_PIN_DRV_4_8,
+ 91, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(92, "PORT03", 0,
+ 92, UNIPHIER_PIN_DRV_4_8,
+ 92, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(93, "PORT04", 0,
+ 93, UNIPHIER_PIN_DRV_4_8,
+ 93, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(94, "PORT05", 0,
+ 94, UNIPHIER_PIN_DRV_4_8,
+ 94, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(95, "PORT06", 0,
+ 95, UNIPHIER_PIN_DRV_4_8,
+ 95, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(96, "PORT07", 0,
+ 96, UNIPHIER_PIN_DRV_4_8,
+ 96, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(97, "PORT10", 0,
+ 97, UNIPHIER_PIN_DRV_4_8,
+ 97, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(98, "PORT11", 0,
+ 98, UNIPHIER_PIN_DRV_4_8,
+ 98, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(99, "PORT12", 0,
+ 99, UNIPHIER_PIN_DRV_4_8,
+ 99, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(100, "PORT13", 0,
+ 100, UNIPHIER_PIN_DRV_4_8,
+ 100, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(101, "PORT14", 0,
+ 101, UNIPHIER_PIN_DRV_4_8,
+ 101, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(102, "PORT15", 0,
+ 102, UNIPHIER_PIN_DRV_4_8,
+ 102, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(103, "PORT16", 0,
+ 103, UNIPHIER_PIN_DRV_4_8,
+ 103, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(104, "PORT17", 0,
+ 104, UNIPHIER_PIN_DRV_4_8,
+ 104, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(105, "T0HPD", 0,
+ 105, UNIPHIER_PIN_DRV_4_8,
+ 105, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(106, "T1HPD", 0,
+ 106, UNIPHIER_PIN_DRV_4_8,
+ 106, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(107, "R0HPD", 0,
+ 107, UNIPHIER_PIN_DRV_4_8,
+ 107, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(108, "R1HPD", 0,
+ 108, UNIPHIER_PIN_DRV_4_8,
+ 108, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(109, "XPERST", 0,
+ 109, UNIPHIER_PIN_DRV_4_8,
+ 109, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(110, "XPEWAKE", 0,
+ 110, UNIPHIER_PIN_DRV_4_8,
+ 110, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(111, "XPECLKRQ", 0,
+ 111, UNIPHIER_PIN_DRV_4_8,
+ 111, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(112, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 112, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(113, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 113, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(114, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 114, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(115, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 115, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(116, "SDA2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 116, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(117, "SCL2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 117, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(118, "SDA3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ 118, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(119, "SCL3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ 119, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(120, "SPISYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 120, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(121, "SPISCLK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 121, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(122, "SPITXD", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 122, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(123, "SPIRXD", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 123, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(124, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 124, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(125, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 125, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(126, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 126, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(127, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 127, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(128, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 128, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(129, "USB2OD", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 129, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(130, "SMTRST0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 130, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(131, "SMTCMD0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 131, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(132, "SMTD0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 132, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(133, "SMTSEL0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 133, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(134, "SMTCLK0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 134, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(135, "SMTRST1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 135, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(136, "SMTCMD1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 136, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(137, "SMTD1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 137, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(138, "SMTSEL1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 138, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(139, "SMTCLK1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 139, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(140, "CH0CLK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 140, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(141, "CH0PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 141, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(142, "CH0VAL", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 142, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(143, "CH0DATA", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 143, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(144, "CH1CLK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 144, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(145, "CH1PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 145, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(146, "CH1VAL", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 146, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(147, "CH1DATA", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 147, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(148, "CH2CLK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 148, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(149, "CH2PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 149, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(150, "CH2VAL", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 150, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(151, "CH2DATA", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 151, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(152, "CH3CLK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 152, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(153, "CH3PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 153, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(154, "CH3VAL", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 154, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(155, "CH3DATA", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 155, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(156, "CH4CLK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 156, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(157, "CH4PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 157, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(158, "CH4VAL", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 158, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(159, "CH4DATA", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 159, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(160, "CH5CLK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 160, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(161, "CH5PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 161, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(162, "CH5VAL", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 162, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(163, "CH5DATA", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 163, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(164, "CH6CLK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 164, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(165, "CH6PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 165, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(166, "CH6VAL", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 166, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(167, "CH6DATA", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 167, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(168, "CH7CLK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 168, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(169, "CH7PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 169, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(170, "CH7VAL", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 170, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(171, "CH7DATA", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 171, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(172, "AI1ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 172, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(173, "AI1BCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 173, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(174, "AI1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 174, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(175, "AI1D0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 175, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(176, "AI1D1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 176, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(177, "AI1D2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 177, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(178, "AI1D3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 178, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(179, "AI2ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 179, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(180, "AI2BCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 180, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(181, "AI2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 181, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(182, "AI2D0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 182, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(183, "AI2D1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 183, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(184, "AI2D2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 184, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(185, "AI2D3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 185, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(186, "AI3ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 186, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(187, "AI3BCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 187, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(188, "AI3LRCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 188, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(189, "AI3D0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 189, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(190, "AO1IEC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 190, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(191, "AO1DACCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 191, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(192, "AO1BCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 192, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(193, "AO1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 193, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(194, "AO1D0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 194, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(195, "AO1D1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 195, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(196, "AO1D2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 196, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(197, "AO1D3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 197, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(198, "AO2IEC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 198, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(199, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 199, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(200, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 200, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(201, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 201, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(202, "AO2D0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 202, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(203, "AO2D1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 203, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(204, "AO2D2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 204, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(205, "AO2D3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 205, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(206, "AO3DACCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 206, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(207, "AO3BCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 207, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(208, "AO3LRCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 208, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(209, "AO3DMIX", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 209, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(210, "AO4DACCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 210, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(211, "AO4BCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 211, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(212, "AO4LRCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 212, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(213, "AO4DMIX", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 213, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(214, "VI1CLK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 214, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(215, "VI1C0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 215, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(216, "VI1C1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 216, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(217, "VI1C2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 217, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(218, "VI1C3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 218, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(219, "VI1C4", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 219, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(220, "VI1C5", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 220, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(221, "VI1C6", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 221, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(222, "VI1C7", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 222, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(223, "VI1C8", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 223, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(224, "VI1C9", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 224, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(225, "VI1Y0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 225, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(226, "VI1Y1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 226, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(227, "VI1Y2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 227, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(228, "VI1Y3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 228, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(229, "VI1Y4", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 229, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(230, "VI1Y5", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 230, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(231, "VI1Y6", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 231, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(232, "VI1Y7", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 232, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(233, "VI1Y8", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 233, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(234, "VI1Y9", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 234, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(235, "VI1DE", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 235, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(236, "VI1HSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 236, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(237, "VI1VSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 237, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(238, "VO1CLK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 238, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(239, "VO1D0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 239, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(240, "VO1D1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 240, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(241, "VO1D2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 241, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(242, "VO1D3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 242, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(243, "VO1D4", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 243, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(244, "VO1D5", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 244, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(245, "VO1D6", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 245, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(246, "VO1D7", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 246, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(247, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 247, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(248, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 248, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(249, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 249, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(250, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+ 40, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_UP_FIXED),
+ UNIPHIER_PINCTRL_PIN(251, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+ 44, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_UP_FIXED),
+ UNIPHIER_PINCTRL_PIN(252, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+ 48, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_UP_FIXED),
+ UNIPHIER_PINCTRL_PIN(253, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+ 52, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_UP_FIXED),
+ UNIPHIER_PINCTRL_PIN(254, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+ 56, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_UP_FIXED),
+ UNIPHIER_PINCTRL_PIN(255, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+ 60, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_UP_FIXED),
+};
+
+static const unsigned emmc_pins[] = {36, 37, 38, 39, 40, 41, 42};
+static const unsigned emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0};
+static const unsigned emmc_dat8_pins[] = {43, 44, 45, 46};
+static const unsigned emmc_dat8_muxvals[] = {0, 0, 0, 0};
+static const unsigned i2c0_pins[] = {112, 113};
+static const unsigned i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {114, 115};
+static const unsigned i2c1_muxvals[] = {0, 0};
+static const unsigned i2c2_pins[] = {116, 117};
+static const unsigned i2c2_muxvals[] = {0, 0};
+static const unsigned i2c3_pins[] = {118, 119};
+static const unsigned i2c3_muxvals[] = {0, 0};
+static const unsigned i2c5_pins[] = {87, 88};
+static const unsigned i2c5_muxvals[] = {2, 2};
+static const unsigned i2c5b_pins[] = {196, 197};
+static const unsigned i2c5b_muxvals[] = {2, 2};
+static const unsigned i2c5c_pins[] = {215, 216};
+static const unsigned i2c5c_muxvals[] = {2, 2};
+static const unsigned i2c6_pins[] = {101, 102};
+static const unsigned i2c6_muxvals[] = {2, 2};
+static const unsigned nand_pins[] = {19, 20, 21, 22, 23, 24, 25, 28, 29, 30,
+ 31, 32, 33, 34, 35};
+static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0};
+static const unsigned nand_cs1_pins[] = {26, 27};
+static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const unsigned uart0_pins[] = {47, 48};
+static const unsigned uart0_muxvals[] = {0, 0};
+static const unsigned uart0b_pins[] = {227, 228};
+static const unsigned uart0b_muxvals[] = {3, 3};
+static const unsigned uart1_pins[] = {49, 50};
+static const unsigned uart1_muxvals[] = {0, 0};
+static const unsigned uart2_pins[] = {51, 52};
+static const unsigned uart2_muxvals[] = {0, 0};
+static const unsigned uart3_pins[] = {53, 54};
+static const unsigned uart3_muxvals[] = {0, 0};
+static const unsigned usb0_pins[] = {124, 125};
+static const unsigned usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {126, 127};
+static const unsigned usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {128, 129};
+static const unsigned usb2_muxvals[] = {0, 0};
+static const unsigned port_range0_pins[] = {
+ 89, 90, 91, 92, 93, 94, 95, 96, /* PORT0x */
+ 97, 98, 99, 100, 101, 102, 103, 104, /* PORT1x */
+ 251, 252, 253, 254, 255, 247, 248, 249, /* PORT2x */
+ 39, 40, 41, 42, 43, 44, 45, 46, /* PORT3x */
+ 156, 157, 158, 159, 160, 161, 162, 163, /* PORT4x */
+ 164, 165, 166, 167, 168, 169, 170, 171, /* PORT5x */
+ 190, 191, 192, 193, 194, 195, 196, 197, /* PORT6x */
+ 198, 199, 200, 201, 202, 203, 204, 205, /* PORT7x */
+ 120, 121, 122, 123, 55, 56, 57, 58, /* PORT8x */
+ 124, 125, 126, 127, 49, 50, 53, 54, /* PORT9x */
+ 148, 149, 150, 151, 152, 153, 154, 155, /* PORT10x */
+ 133, 134, 131, 130, 138, 139, 136, 135, /* PORT11x */
+ 28, 29, 30, 31, 32, 33, 34, 35, /* PORT12x */
+ 179, 180, 181, 182, 186, 187, 188, 189, /* PORT13x */
+ 4, 5, 6, 7, 8, 9, 10, 11, /* PORT14x */
+};
+static const unsigned port_range0_muxvals[] = {
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT3x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT4x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT5x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT6x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT7x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT8x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT9x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT10x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT11x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT12x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT13x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */
+};
+static const unsigned port_range1_pins[] = {
+ 109, 110, 111, /* PORT175-177 */
+ 206, 207, 208, 209, 210, 211, 212, 213, /* PORT18x */
+ 12, 13, 14, 15, 16, 17, 107, 108, /* PORT19x */
+ 140, 141, 142, 143, 144, 145, 146, 147, /* PORT20x */
+ 59, 60, 61, 62, 63, 64, 65, 66, /* PORT21x */
+ 214, 215, 216, 217, 218, 219, 220, 221, /* PORT22x */
+ 222, 223, 224, 225, 226, 227, 228, 229, /* PORT23x */
+ 19, 20, 21, 22, 23, 24, 25, 26, /* PORT24x */
+ 230, 231, 232, 233, 234, 235, 236, 237, /* PORT25x */
+ 239, 240, 241, 242, 243, 244, 245, 246, /* PORT26x */
+ 172, 173, 174, 175, 176, 177, 178, 129, /* PORT27x */
+ 0, 1, 2, 67, 85, 86, 87, 88, /* PORT28x */
+ 105, 106, 18, 27, 36, 128, 132, 137, /* PORT29x */
+ 183, 184, 185, 84, 47, 48, 51, 52, /* PORT30x */
+};
+static const unsigned port_range1_muxvals[] = {
+ 15, 15, 15, /* PORT175-177 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT18x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT19x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT20x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT21x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT22x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT23x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT24x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT25x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT26x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT27x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT28x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT29x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT30x */
+};
+static const unsigned xirq_pins[] = {
+ 68, 69, 70, 71, 72, 73, 74, 75, /* XIRQ0-7 */
+ 76, 77, 78, 79, 80, 81, 82, 83, /* XIRQ8-15 */
+ 84, 85, 86, 87, 88, /* XIRQ16-20 */
+};
+static const unsigned xirq_muxvals[] = {
+ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ0-7 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ8-15 */
+ 14, 14, 14, 14, 14, /* XIRQ16-20 */
+};
+static const unsigned xirq_alternatives_pins[] = {
+ 91, 92, 239, 144, 240, 156, 241, 106, 128,
+};
+static const unsigned xirq_alternatives_muxvals[] = {
+ 14, 14, 14, 14, 14, 14, 14, 14, 14,
+};
+
+static const struct uniphier_pinctrl_group ph1_pro5_groups[] = {
+ UNIPHIER_PINCTRL_GROUP(nand),
+ UNIPHIER_PINCTRL_GROUP(nand_cs1),
+ UNIPHIER_PINCTRL_GROUP(emmc),
+ UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+ UNIPHIER_PINCTRL_GROUP(i2c0),
+ UNIPHIER_PINCTRL_GROUP(i2c1),
+ UNIPHIER_PINCTRL_GROUP(i2c2),
+ UNIPHIER_PINCTRL_GROUP(i2c3),
+ UNIPHIER_PINCTRL_GROUP(i2c5),
+ UNIPHIER_PINCTRL_GROUP(i2c5b),
+ UNIPHIER_PINCTRL_GROUP(i2c5c),
+ UNIPHIER_PINCTRL_GROUP(i2c6),
+ UNIPHIER_PINCTRL_GROUP(uart0),
+ UNIPHIER_PINCTRL_GROUP(uart0b),
+ UNIPHIER_PINCTRL_GROUP(uart1),
+ UNIPHIER_PINCTRL_GROUP(uart2),
+ UNIPHIER_PINCTRL_GROUP(uart3),
+ UNIPHIER_PINCTRL_GROUP(usb0),
+ UNIPHIER_PINCTRL_GROUP(usb1),
+ UNIPHIER_PINCTRL_GROUP(usb2),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port110, port_range0, 88),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port111, port_range0, 89),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port112, port_range0, 90),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port113, port_range0, 91),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port114, port_range0, 92),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port115, port_range0, 93),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port116, port_range0, 94),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port117, port_range0, 95),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range0, 96),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range0, 97),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range0, 98),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range0, 99),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range0, 100),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range0, 101),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range0, 102),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range0, 103),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range0, 104),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range0, 105),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range0, 106),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range0, 107),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range0, 108),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range0, 109),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range0, 110),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range0, 111),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range0, 112),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range0, 113),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range0, 114),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range0, 115),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range0, 116),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range0, 117),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range0, 118),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range0, 119),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port175, port_range1, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port176, port_range1, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port177, port_range1, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range1, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range1, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range1, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range1, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range1, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range1, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range1, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range1, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port190, port_range1, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port191, port_range1, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port192, port_range1, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port193, port_range1, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port194, port_range1, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port195, port_range1, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port196, port_range1, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port197, port_range1, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range1, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range1, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range1, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range1, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range1, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range1, 24),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range1, 25),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range1, 26),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range1, 27),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range1, 28),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range1, 29),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range1, 30),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range1, 31),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range1, 32),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range1, 33),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range1, 34),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range1, 35),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range1, 36),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range1, 37),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range1, 38),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range1, 39),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range1, 40),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range1, 41),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range1, 42),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range1, 43),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range1, 44),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range1, 45),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range1, 46),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range1, 47),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range1, 48),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range1, 49),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range1, 50),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range1, 51),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range1, 52),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range1, 53),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range1, 54),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range1, 55),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range1, 56),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range1, 57),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range1, 58),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range1, 59),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range1, 60),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range1, 61),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range1, 62),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range1, 63),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port255, port_range1, 64),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port256, port_range1, 65),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port257, port_range1, 66),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port260, port_range1, 67),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port261, port_range1, 68),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port262, port_range1, 69),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port263, port_range1, 70),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port264, port_range1, 71),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port265, port_range1, 72),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port266, port_range1, 73),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port267, port_range1, 74),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port270, port_range1, 75),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port271, port_range1, 76),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port272, port_range1, 77),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port273, port_range1, 78),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port274, port_range1, 79),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port275, port_range1, 80),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port276, port_range1, 81),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port277, port_range1, 82),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port280, port_range1, 83),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port281, port_range1, 84),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port282, port_range1, 85),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port283, port_range1, 86),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port284, port_range1, 87),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port285, port_range1, 88),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port286, port_range1, 89),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port287, port_range1, 90),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port290, port_range1, 91),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port291, port_range1, 92),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port292, port_range1, 93),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port293, port_range1, 94),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port294, port_range1, 95),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port295, port_range1, 96),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port296, port_range1, 97),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port297, port_range1, 98),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port300, port_range1, 99),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port301, port_range1, 100),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port302, port_range1, 101),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port303, port_range1, 102),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port304, port_range1, 103),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port305, port_range1, 104),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port306, port_range1, 105),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port307, port_range1, 106),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3b, xirq_alternatives, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4b, xirq_alternatives, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16b, xirq_alternatives, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17b, xirq_alternatives, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17c, xirq_alternatives, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18b, xirq_alternatives, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18c, xirq_alternatives, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19b, xirq_alternatives, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20b, xirq_alternatives, 8),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const i2c5_groups[] = {"i2c5", "i2c5b", "i2c5c"};
+static const char * const i2c6_groups[] = {"i2c6"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0", "uart0b"};
+static const char * const uart1_groups[] = {"uart1"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const port_groups[] = {
+ "port00", "port01", "port02", "port03",
+ "port04", "port05", "port06", "port07",
+ "port10", "port11", "port12", "port13",
+ "port14", "port15", "port16", "port17",
+ "port20", "port21", "port22", "port23",
+ "port24", "port25", "port26", "port27",
+ "port30", "port31", "port32", "port33",
+ "port34", "port35", "port36", "port37",
+ "port40", "port41", "port42", "port43",
+ "port44", "port45", "port46", "port47",
+ "port50", "port51", "port52", "port53",
+ "port54", "port55", "port56", "port57",
+ "port60", "port61", "port62", "port63",
+ "port64", "port65", "port66", "port67",
+ "port70", "port71", "port72", "port73",
+ "port74", "port75", "port76", "port77",
+ "port80", "port81", "port82", "port83",
+ "port84", "port85", "port86", "port87",
+ "port90", "port91", "port92", "port93",
+ "port94", "port95", "port96", "port97",
+ "port100", "port101", "port102", "port103",
+ "port104", "port105", "port106", "port107",
+ "port110", "port111", "port112", "port113",
+ "port114", "port115", "port116", "port117",
+ "port120", "port121", "port122", "port123",
+ "port124", "port125", "port126", "port127",
+ "port130", "port131", "port132", "port133",
+ "port134", "port135", "port136", "port137",
+ "port140", "port141", "port142", "port143",
+ "port144", "port145", "port146", "port147",
+ /* port150-174 missing */
+ /* none */ "port175", "port176", "port177",
+ "port180", "port181", "port182", "port183",
+ "port184", "port185", "port186", "port187",
+ "port190", "port191", "port192", "port193",
+ "port194", "port195", "port196", "port197",
+ "port200", "port201", "port202", "port203",
+ "port204", "port205", "port206", "port207",
+ "port210", "port211", "port212", "port213",
+ "port214", "port215", "port216", "port217",
+ "port220", "port221", "port222", "port223",
+ "port224", "port225", "port226", "port227",
+ "port230", "port231", "port232", "port233",
+ "port234", "port235", "port236", "port237",
+ "port240", "port241", "port242", "port243",
+ "port244", "port245", "port246", "port247",
+ "port250", "port251", "port252", "port253",
+ "port254", "port255", "port256", "port257",
+ "port260", "port261", "port262", "port263",
+ "port264", "port265", "port266", "port267",
+ "port270", "port271", "port272", "port273",
+ "port274", "port275", "port276", "port277",
+ "port280", "port281", "port282", "port283",
+ "port284", "port285", "port286", "port287",
+ "port290", "port291", "port292", "port293",
+ "port294", "port295", "port296", "port297",
+ "port300", "port301", "port302", "port303",
+ "port304", "port305", "port306", "port307",
+};
+static const char * const xirq_groups[] = {
+ "xirq0", "xirq1", "xirq2", "xirq3",
+ "xirq4", "xirq5", "xirq6", "xirq7",
+ "xirq8", "xirq9", "xirq10", "xirq11",
+ "xirq12", "xirq13", "xirq14", "xirq15",
+ "xirq16", "xirq17", "xirq18", "xirq19",
+ "xirq20",
+ "xirq3b", "xirq4b", "xirq16b", "xirq17b", "xirq17c",
+ "xirq18b", "xirq18c", "xirq19b", "xirq20b",
+};
+
+static const struct uniphier_pinmux_function ph1_pro5_functions[] = {
+ UNIPHIER_PINMUX_FUNCTION(emmc),
+ UNIPHIER_PINMUX_FUNCTION(i2c0),
+ UNIPHIER_PINMUX_FUNCTION(i2c1),
+ UNIPHIER_PINMUX_FUNCTION(i2c2),
+ UNIPHIER_PINMUX_FUNCTION(i2c3),
+ UNIPHIER_PINMUX_FUNCTION(i2c5),
+ UNIPHIER_PINMUX_FUNCTION(i2c6),
+ UNIPHIER_PINMUX_FUNCTION(nand),
+ UNIPHIER_PINMUX_FUNCTION(uart0),
+ UNIPHIER_PINMUX_FUNCTION(uart1),
+ UNIPHIER_PINMUX_FUNCTION(uart2),
+ UNIPHIER_PINMUX_FUNCTION(uart3),
+ UNIPHIER_PINMUX_FUNCTION(usb0),
+ UNIPHIER_PINMUX_FUNCTION(usb1),
+ UNIPHIER_PINMUX_FUNCTION(usb2),
+ UNIPHIER_PINMUX_FUNCTION(port),
+ UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata ph1_pro5_pindata = {
+ .groups = ph1_pro5_groups,
+ .groups_count = ARRAY_SIZE(ph1_pro5_groups),
+ .functions = ph1_pro5_functions,
+ .functions_count = ARRAY_SIZE(ph1_pro5_functions),
+ .mux_bits = 4,
+ .reg_stride = 8,
+ .load_pinctrl = true,
+};
+
+static struct pinctrl_desc ph1_pro5_pinctrl_desc = {
+ .name = DRIVER_NAME,
+ .pins = ph1_pro5_pins,
+ .npins = ARRAY_SIZE(ph1_pro5_pins),
+ .owner = THIS_MODULE,
+};
+
+static int ph1_pro5_pinctrl_probe(struct platform_device *pdev)
+{
+ return uniphier_pinctrl_probe(pdev, &ph1_pro5_pinctrl_desc,
+ &ph1_pro5_pindata);
+}
+
+static const struct of_device_id ph1_pro5_pinctrl_match[] = {
+ { .compatible = "socionext,ph1-pro5-pinctrl" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ph1_pro5_pinctrl_match);
+
+static struct platform_driver ph1_pro5_pinctrl_driver = {
+ .probe = ph1_pro5_pinctrl_probe,
+ .remove = uniphier_pinctrl_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = ph1_pro5_pinctrl_match,
+ },
+};
+module_platform_driver(ph1_pro5_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-Pro5 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c b/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c
new file mode 100644
index 000000000000..7e9dae54fcb2
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c
@@ -0,0 +1,794 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "ph1-sld8-pinctrl"
+
+static const struct pinctrl_pin_desc ph1_sld8_pins[] = {
+ UNIPHIER_PINCTRL_PIN(0, "PCA00", UNIPHIER_PIN_IECTRL_NONE,
+ 15, UNIPHIER_PIN_DRV_4_8,
+ 15, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(1, "PCA01", UNIPHIER_PIN_IECTRL_NONE,
+ 16, UNIPHIER_PIN_DRV_4_8,
+ 16, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(2, "PCA02", UNIPHIER_PIN_IECTRL_NONE,
+ 17, UNIPHIER_PIN_DRV_4_8,
+ 17, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(3, "PCA03", UNIPHIER_PIN_IECTRL_NONE,
+ 18, UNIPHIER_PIN_DRV_4_8,
+ 18, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(4, "PCA04", UNIPHIER_PIN_IECTRL_NONE,
+ 19, UNIPHIER_PIN_DRV_4_8,
+ 19, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(5, "PCA05", UNIPHIER_PIN_IECTRL_NONE,
+ 20, UNIPHIER_PIN_DRV_4_8,
+ 20, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(6, "PCA06", UNIPHIER_PIN_IECTRL_NONE,
+ 21, UNIPHIER_PIN_DRV_4_8,
+ 21, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(7, "PCA07", UNIPHIER_PIN_IECTRL_NONE,
+ 22, UNIPHIER_PIN_DRV_4_8,
+ 22, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(8, "PCA08", UNIPHIER_PIN_IECTRL_NONE,
+ 23, UNIPHIER_PIN_DRV_4_8,
+ 23, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(9, "PCA09", UNIPHIER_PIN_IECTRL_NONE,
+ 24, UNIPHIER_PIN_DRV_4_8,
+ 24, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(10, "PCA10", UNIPHIER_PIN_IECTRL_NONE,
+ 25, UNIPHIER_PIN_DRV_4_8,
+ 25, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(11, "PCA11", UNIPHIER_PIN_IECTRL_NONE,
+ 26, UNIPHIER_PIN_DRV_4_8,
+ 26, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(12, "PCA12", UNIPHIER_PIN_IECTRL_NONE,
+ 27, UNIPHIER_PIN_DRV_4_8,
+ 27, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(13, "PCA13", UNIPHIER_PIN_IECTRL_NONE,
+ 28, UNIPHIER_PIN_DRV_4_8,
+ 28, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(14, "PCA14", UNIPHIER_PIN_IECTRL_NONE,
+ 29, UNIPHIER_PIN_DRV_4_8,
+ 29, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(15, "XNFRE_GB", UNIPHIER_PIN_IECTRL_NONE,
+ 30, UNIPHIER_PIN_DRV_4_8,
+ 30, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(16, "XNFWE_GB", UNIPHIER_PIN_IECTRL_NONE,
+ 31, UNIPHIER_PIN_DRV_4_8,
+ 31, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(17, "NFALE_GB", UNIPHIER_PIN_IECTRL_NONE,
+ 32, UNIPHIER_PIN_DRV_4_8,
+ 32, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(18, "NFCLE_GB", UNIPHIER_PIN_IECTRL_NONE,
+ 33, UNIPHIER_PIN_DRV_4_8,
+ 33, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(19, "XNFWP_GB", UNIPHIER_PIN_IECTRL_NONE,
+ 34, UNIPHIER_PIN_DRV_4_8,
+ 34, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(20, "XNFCE0_GB", UNIPHIER_PIN_IECTRL_NONE,
+ 35, UNIPHIER_PIN_DRV_4_8,
+ 35, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(21, "NANDRYBY0_GB", UNIPHIER_PIN_IECTRL_NONE,
+ 36, UNIPHIER_PIN_DRV_4_8,
+ 36, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(22, "XNFCE1_GB", UNIPHIER_PIN_IECTRL_NONE,
+ 0, UNIPHIER_PIN_DRV_8_12_16_20,
+ 119, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(23, "NANDRYBY1_GB", UNIPHIER_PIN_IECTRL_NONE,
+ 4, UNIPHIER_PIN_DRV_8_12_16_20,
+ 120, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(24, "NFD0_GB", UNIPHIER_PIN_IECTRL_NONE,
+ 8, UNIPHIER_PIN_DRV_8_12_16_20,
+ 121, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(25, "NFD1_GB", UNIPHIER_PIN_IECTRL_NONE,
+ 12, UNIPHIER_PIN_DRV_8_12_16_20,
+ 122, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(26, "NFD2_GB", UNIPHIER_PIN_IECTRL_NONE,
+ 16, UNIPHIER_PIN_DRV_8_12_16_20,
+ 123, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(27, "NFD3_GB", UNIPHIER_PIN_IECTRL_NONE,
+ 20, UNIPHIER_PIN_DRV_8_12_16_20,
+ 124, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(28, "NFD4_GB", UNIPHIER_PIN_IECTRL_NONE,
+ 24, UNIPHIER_PIN_DRV_8_12_16_20,
+ 125, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(29, "NFD5_GB", UNIPHIER_PIN_IECTRL_NONE,
+ 28, UNIPHIER_PIN_DRV_8_12_16_20,
+ 126, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(30, "NFD6_GB", UNIPHIER_PIN_IECTRL_NONE,
+ 32, UNIPHIER_PIN_DRV_8_12_16_20,
+ 127, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(31, "NFD7_GB", UNIPHIER_PIN_IECTRL_NONE,
+ 36, UNIPHIER_PIN_DRV_8_12_16_20,
+ 128, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(32, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+ 40, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(33, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+ 44, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(34, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+ 48, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(35, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+ 52, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(36, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+ 56, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(37, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+ 60, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(38, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ 129, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(39, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ 130, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(40, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ 131, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(41, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+ 37, UNIPHIER_PIN_DRV_4_8,
+ 37, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(42, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+ 38, UNIPHIER_PIN_DRV_4_8,
+ 38, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(43, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+ 39, UNIPHIER_PIN_DRV_4_8,
+ 39, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(44, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+ 40, UNIPHIER_PIN_DRV_4_8,
+ 40, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(45, "PCRESET", UNIPHIER_PIN_IECTRL_NONE,
+ 41, UNIPHIER_PIN_DRV_4_8,
+ 41, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(46, "PCREG", UNIPHIER_PIN_IECTRL_NONE,
+ 42, UNIPHIER_PIN_DRV_4_8,
+ 42, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(47, "PCCE2", UNIPHIER_PIN_IECTRL_NONE,
+ 43, UNIPHIER_PIN_DRV_4_8,
+ 43, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(48, "PCVS1", UNIPHIER_PIN_IECTRL_NONE,
+ 44, UNIPHIER_PIN_DRV_4_8,
+ 44, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(49, "PCCD2", UNIPHIER_PIN_IECTRL_NONE,
+ 45, UNIPHIER_PIN_DRV_4_8,
+ 45, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(50, "PCCD1", UNIPHIER_PIN_IECTRL_NONE,
+ 46, UNIPHIER_PIN_DRV_4_8,
+ 46, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(51, "PCREADY", UNIPHIER_PIN_IECTRL_NONE,
+ 47, UNIPHIER_PIN_DRV_4_8,
+ 47, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(52, "PCDOE", UNIPHIER_PIN_IECTRL_NONE,
+ 48, UNIPHIER_PIN_DRV_4_8,
+ 48, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(53, "PCCE1", UNIPHIER_PIN_IECTRL_NONE,
+ 49, UNIPHIER_PIN_DRV_4_8,
+ 49, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(54, "PCWE", UNIPHIER_PIN_IECTRL_NONE,
+ 50, UNIPHIER_PIN_DRV_4_8,
+ 50, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(55, "PCOE", UNIPHIER_PIN_IECTRL_NONE,
+ 51, UNIPHIER_PIN_DRV_4_8,
+ 51, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(56, "PCWAIT", UNIPHIER_PIN_IECTRL_NONE,
+ 52, UNIPHIER_PIN_DRV_4_8,
+ 52, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(57, "PCIOWR", UNIPHIER_PIN_IECTRL_NONE,
+ 53, UNIPHIER_PIN_DRV_4_8,
+ 53, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(58, "PCIORD", UNIPHIER_PIN_IECTRL_NONE,
+ 54, UNIPHIER_PIN_DRV_4_8,
+ 54, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(59, "HS0DIN0", UNIPHIER_PIN_IECTRL_NONE,
+ 55, UNIPHIER_PIN_DRV_4_8,
+ 55, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(60, "HS0DIN1", UNIPHIER_PIN_IECTRL_NONE,
+ 56, UNIPHIER_PIN_DRV_4_8,
+ 56, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(61, "HS0DIN2", UNIPHIER_PIN_IECTRL_NONE,
+ 57, UNIPHIER_PIN_DRV_4_8,
+ 57, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(62, "HS0DIN3", UNIPHIER_PIN_IECTRL_NONE,
+ 58, UNIPHIER_PIN_DRV_4_8,
+ 58, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(63, "HS0DIN4", UNIPHIER_PIN_IECTRL_NONE,
+ 59, UNIPHIER_PIN_DRV_4_8,
+ 59, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(64, "HS0DIN5", UNIPHIER_PIN_IECTRL_NONE,
+ 60, UNIPHIER_PIN_DRV_4_8,
+ 60, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(65, "HS0DIN6", UNIPHIER_PIN_IECTRL_NONE,
+ 61, UNIPHIER_PIN_DRV_4_8,
+ 61, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(66, "HS0DIN7", UNIPHIER_PIN_IECTRL_NONE,
+ 62, UNIPHIER_PIN_DRV_4_8,
+ 62, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(67, "HS0BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+ 63, UNIPHIER_PIN_DRV_4_8,
+ 63, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(68, "HS0VALIN", UNIPHIER_PIN_IECTRL_NONE,
+ 64, UNIPHIER_PIN_DRV_4_8,
+ 64, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(69, "HS0SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+ 65, UNIPHIER_PIN_DRV_4_8,
+ 65, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(70, "HSDOUT0", UNIPHIER_PIN_IECTRL_NONE,
+ 66, UNIPHIER_PIN_DRV_4_8,
+ 66, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(71, "HSDOUT1", UNIPHIER_PIN_IECTRL_NONE,
+ 67, UNIPHIER_PIN_DRV_4_8,
+ 67, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(72, "HSDOUT2", UNIPHIER_PIN_IECTRL_NONE,
+ 68, UNIPHIER_PIN_DRV_4_8,
+ 68, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(73, "HSDOUT3", UNIPHIER_PIN_IECTRL_NONE,
+ 69, UNIPHIER_PIN_DRV_4_8,
+ 69, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(74, "HSDOUT4", UNIPHIER_PIN_IECTRL_NONE,
+ 70, UNIPHIER_PIN_DRV_4_8,
+ 70, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(75, "HSDOUT5", UNIPHIER_PIN_IECTRL_NONE,
+ 71, UNIPHIER_PIN_DRV_4_8,
+ 71, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(76, "HSDOUT6", UNIPHIER_PIN_IECTRL_NONE,
+ 72, UNIPHIER_PIN_DRV_4_8,
+ 72, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(77, "HSDOUT7", UNIPHIER_PIN_IECTRL_NONE,
+ 73, UNIPHIER_PIN_DRV_4_8,
+ 73, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(78, "HSBCLKOUT", UNIPHIER_PIN_IECTRL_NONE,
+ 74, UNIPHIER_PIN_DRV_4_8,
+ 74, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(79, "HSVALOUT", UNIPHIER_PIN_IECTRL_NONE,
+ 75, UNIPHIER_PIN_DRV_4_8,
+ 75, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(80, "HSSYNCOUT", UNIPHIER_PIN_IECTRL_NONE,
+ 76, UNIPHIER_PIN_DRV_4_8,
+ 76, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(81, "HS1DIN0", UNIPHIER_PIN_IECTRL_NONE,
+ 77, UNIPHIER_PIN_DRV_4_8,
+ 77, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(82, "HS1DIN1", UNIPHIER_PIN_IECTRL_NONE,
+ 78, UNIPHIER_PIN_DRV_4_8,
+ 78, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(83, "HS1DIN2", UNIPHIER_PIN_IECTRL_NONE,
+ 79, UNIPHIER_PIN_DRV_4_8,
+ 79, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(84, "HS1DIN3", UNIPHIER_PIN_IECTRL_NONE,
+ 80, UNIPHIER_PIN_DRV_4_8,
+ 80, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(85, "HS1DIN4", UNIPHIER_PIN_IECTRL_NONE,
+ 81, UNIPHIER_PIN_DRV_4_8,
+ 81, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(86, "HS1DIN5", UNIPHIER_PIN_IECTRL_NONE,
+ 82, UNIPHIER_PIN_DRV_4_8,
+ 82, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(87, "HS1DIN6", UNIPHIER_PIN_IECTRL_NONE,
+ 83, UNIPHIER_PIN_DRV_4_8,
+ 83, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(88, "HS1DIN7", UNIPHIER_PIN_IECTRL_NONE,
+ 84, UNIPHIER_PIN_DRV_4_8,
+ 84, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(89, "HS1BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+ 85, UNIPHIER_PIN_DRV_4_8,
+ 85, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(90, "HS1VALIN", UNIPHIER_PIN_IECTRL_NONE,
+ 86, UNIPHIER_PIN_DRV_4_8,
+ 86, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(91, "HS1SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+ 87, UNIPHIER_PIN_DRV_4_8,
+ 87, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(92, "AGCI", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ 132, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(93, "AGCR", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ 133, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(94, "AGCBS", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ 134, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(95, "IECOUT", UNIPHIER_PIN_IECTRL_NONE,
+ 88, UNIPHIER_PIN_DRV_4_8,
+ 88, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(96, "ASMCK", UNIPHIER_PIN_IECTRL_NONE,
+ 89, UNIPHIER_PIN_DRV_4_8,
+ 89, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(97, "ABCKO", UNIPHIER_PIN_IECTRL_NONE,
+ 90, UNIPHIER_PIN_DRV_4_8,
+ 90, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(98, "ALRCKO", UNIPHIER_PIN_IECTRL_NONE,
+ 91, UNIPHIER_PIN_DRV_4_8,
+ 91, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(99, "ASDOUT0", UNIPHIER_PIN_IECTRL_NONE,
+ 92, UNIPHIER_PIN_DRV_4_8,
+ 92, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(100, "ASDOUT1", UNIPHIER_PIN_IECTRL_NONE,
+ 93, UNIPHIER_PIN_DRV_4_8,
+ 93, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(101, "ARCOUT", UNIPHIER_PIN_IECTRL_NONE,
+ 94, UNIPHIER_PIN_DRV_4_8,
+ 94, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(102, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(103, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(104, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(105, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(110, "SBO0", UNIPHIER_PIN_IECTRL_NONE,
+ 95, UNIPHIER_PIN_DRV_4_8,
+ 95, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(111, "SBI0", UNIPHIER_PIN_IECTRL_NONE,
+ 96, UNIPHIER_PIN_DRV_4_8,
+ 96, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(112, "SBO1", UNIPHIER_PIN_IECTRL_NONE,
+ 97, UNIPHIER_PIN_DRV_4_8,
+ 97, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(113, "SBI1", UNIPHIER_PIN_IECTRL_NONE,
+ 98, UNIPHIER_PIN_DRV_4_8,
+ 98, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(114, "TXD1", UNIPHIER_PIN_IECTRL_NONE,
+ 99, UNIPHIER_PIN_DRV_4_8,
+ 99, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(115, "RXD1", UNIPHIER_PIN_IECTRL_NONE,
+ 100, UNIPHIER_PIN_DRV_4_8,
+ 100, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(116, "HIN", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_5,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(117, "VIN", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_5,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(118, "TCON0", UNIPHIER_PIN_IECTRL_NONE,
+ 101, UNIPHIER_PIN_DRV_4_8,
+ 101, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(119, "TCON1", UNIPHIER_PIN_IECTRL_NONE,
+ 102, UNIPHIER_PIN_DRV_4_8,
+ 102, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(120, "TCON2", UNIPHIER_PIN_IECTRL_NONE,
+ 103, UNIPHIER_PIN_DRV_4_8,
+ 103, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(121, "TCON3", UNIPHIER_PIN_IECTRL_NONE,
+ 104, UNIPHIER_PIN_DRV_4_8,
+ 104, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(122, "TCON4", UNIPHIER_PIN_IECTRL_NONE,
+ 105, UNIPHIER_PIN_DRV_4_8,
+ 105, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(123, "TCON5", UNIPHIER_PIN_IECTRL_NONE,
+ 106, UNIPHIER_PIN_DRV_4_8,
+ 106, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(124, "TCON6", UNIPHIER_PIN_IECTRL_NONE,
+ 107, UNIPHIER_PIN_DRV_4_8,
+ 107, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(125, "TCON7", UNIPHIER_PIN_IECTRL_NONE,
+ 108, UNIPHIER_PIN_DRV_4_8,
+ 108, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(126, "TCON8", UNIPHIER_PIN_IECTRL_NONE,
+ 109, UNIPHIER_PIN_DRV_4_8,
+ 109, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(127, "PWMA", UNIPHIER_PIN_IECTRL_NONE,
+ 110, UNIPHIER_PIN_DRV_4_8,
+ 110, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(128, "XIRQ0", UNIPHIER_PIN_IECTRL_NONE,
+ 111, UNIPHIER_PIN_DRV_4_8,
+ 111, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(129, "XIRQ1", UNIPHIER_PIN_IECTRL_NONE,
+ 112, UNIPHIER_PIN_DRV_4_8,
+ 112, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(130, "XIRQ2", UNIPHIER_PIN_IECTRL_NONE,
+ 113, UNIPHIER_PIN_DRV_4_8,
+ 113, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(131, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE,
+ 114, UNIPHIER_PIN_DRV_4_8,
+ 114, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(132, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE,
+ 115, UNIPHIER_PIN_DRV_4_8,
+ 115, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(133, "XIRQ5", UNIPHIER_PIN_IECTRL_NONE,
+ 116, UNIPHIER_PIN_DRV_4_8,
+ 116, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(134, "XIRQ6", UNIPHIER_PIN_IECTRL_NONE,
+ 117, UNIPHIER_PIN_DRV_4_8,
+ 117, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(135, "XIRQ7", UNIPHIER_PIN_IECTRL_NONE,
+ 118, UNIPHIER_PIN_DRV_4_8,
+ 118, UNIPHIER_PIN_PULL_DOWN),
+};
+
+static const unsigned emmc_pins[] = {21, 22, 23, 24, 25, 26, 27};
+static const unsigned emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1};
+static const unsigned emmc_dat8_pins[] = {28, 29, 30, 31};
+static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const unsigned i2c0_pins[] = {102, 103};
+static const unsigned i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {104, 105};
+static const unsigned i2c1_muxvals[] = {0, 0};
+static const unsigned i2c2_pins[] = {108, 109};
+static const unsigned i2c2_muxvals[] = {2, 2};
+static const unsigned i2c3_pins[] = {108, 109};
+static const unsigned i2c3_muxvals[] = {3, 3};
+static const unsigned nand_pins[] = {15, 16, 17, 18, 19, 20, 21, 24, 25, 26,
+ 27, 28, 29, 30, 31};
+static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0};
+static const unsigned nand_cs1_pins[] = {22, 23};
+static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const unsigned uart0_pins[] = {70, 71};
+static const unsigned uart0_muxvals[] = {3, 3};
+static const unsigned uart1_pins[] = {114, 115};
+static const unsigned uart1_muxvals[] = {0, 0};
+static const unsigned uart2_pins[] = {112, 113};
+static const unsigned uart2_muxvals[] = {1, 1};
+static const unsigned uart3_pins[] = {110, 111};
+static const unsigned uart3_muxvals[] = {1, 1};
+static const unsigned usb0_pins[] = {41, 42};
+static const unsigned usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {43, 44};
+static const unsigned usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {114, 115};
+static const unsigned usb2_muxvals[] = {1, 1};
+static const unsigned port_range0_pins[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, /* PORT0x */
+ 8, 9, 10, 11, 12, 13, 14, 15, /* PORT1x */
+ 32, 33, 34, 35, 36, 37, 38, 39, /* PORT2x */
+ 59, 60, 61, 62, 63, 64, 65, 66, /* PORT3x */
+ 95, 96, 97, 98, 99, 100, 101, 57, /* PORT4x */
+ 70, 71, 72, 73, 74, 75, 76, 77, /* PORT5x */
+ 81, 83, 84, 85, 86, 89, 90, 91, /* PORT6x */
+ 118, 119, 120, 121, 122, 53, 54, 55, /* PORT7x */
+ 41, 42, 43, 44, 79, 80, 18, 19, /* PORT8x */
+ 110, 111, 112, 113, 114, 115, 16, 17, /* PORT9x */
+ 40, 67, 68, 69, 78, 92, 93, 94, /* PORT10x */
+ 48, 49, 46, 45, 123, 124, 125, 126, /* PORT11x */
+ 47, 127, 20, 56, 22, /* PORT120-124 */
+};
+static const unsigned port_range0_muxvals[] = {
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT3x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT4x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT5x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT6x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT7x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT8x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT9x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT10x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT11x */
+ 15, 15, 15, 15, 15, /* PORT120-124 */
+};
+static const unsigned port_range1_pins[] = {
+ 116, 117, /* PORT130-131 */
+};
+static const unsigned port_range1_muxvals[] = {
+ 15, 15, /* PORT130-131 */
+};
+static const unsigned port_range2_pins[] = {
+ 102, 103, 104, 105, 106, 107, 108, 109, /* PORT14x */
+};
+static const unsigned port_range2_muxvals[] = {
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */
+};
+static const unsigned port_range3_pins[] = {
+ 23, /* PORT166 */
+};
+static const unsigned port_range3_muxvals[] = {
+ 15, /* PORT166 */
+};
+static const unsigned xirq_range0_pins[] = {
+ 128, 129, 130, 131, 132, 133, 134, 135, /* XIRQ0-7 */
+ 82, 87, 88, 50, 51, /* XIRQ8-12 */
+};
+static const unsigned xirq_range0_muxvals[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, /* XIRQ0-7 */
+ 14, 14, 14, 14, 14, /* XIRQ8-12 */
+};
+static const unsigned xirq_range1_pins[] = {
+ 52, 58, /* XIRQ14-15 */
+};
+static const unsigned xirq_range1_muxvals[] = {
+ 14, 14, /* XIRQ14-15 */
+};
+
+static const struct uniphier_pinctrl_group ph1_sld8_groups[] = {
+ UNIPHIER_PINCTRL_GROUP(emmc),
+ UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+ UNIPHIER_PINCTRL_GROUP(i2c0),
+ UNIPHIER_PINCTRL_GROUP(i2c1),
+ UNIPHIER_PINCTRL_GROUP(i2c2),
+ UNIPHIER_PINCTRL_GROUP(i2c3),
+ UNIPHIER_PINCTRL_GROUP(nand),
+ UNIPHIER_PINCTRL_GROUP(nand_cs1),
+ UNIPHIER_PINCTRL_GROUP(uart0),
+ UNIPHIER_PINCTRL_GROUP(uart1),
+ UNIPHIER_PINCTRL_GROUP(uart2),
+ UNIPHIER_PINCTRL_GROUP(uart3),
+ UNIPHIER_PINCTRL_GROUP(usb0),
+ UNIPHIER_PINCTRL_GROUP(usb1),
+ UNIPHIER_PINCTRL_GROUP(usb2),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range2),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range3),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_range0),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_range1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port110, port_range0, 88),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port111, port_range0, 89),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port112, port_range0, 90),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port113, port_range0, 91),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port114, port_range0, 92),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port115, port_range0, 93),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port116, port_range0, 94),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port117, port_range0, 95),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range0, 96),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range0, 97),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range0, 98),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range0, 99),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range0, 100),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range1, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range1, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range2, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range2, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range2, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range2, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range2, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range2, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range2, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range2, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port166, port_range3, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq_range0, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq_range0, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq_range0, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq_range0, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq_range0, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq_range0, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq_range0, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq_range0, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq_range0, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq_range0, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq_range0, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq_range0, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq_range0, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq_range1, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq_range1, 1),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0"};
+static const char * const uart1_groups[] = {"uart1"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const port_groups[] = {
+ "port00", "port01", "port02", "port03",
+ "port04", "port05", "port06", "port07",
+ "port10", "port11", "port12", "port13",
+ "port14", "port15", "port16", "port17",
+ "port20", "port21", "port22", "port23",
+ "port24", "port25", "port26", "port27",
+ "port30", "port31", "port32", "port33",
+ "port34", "port35", "port36", "port37",
+ "port40", "port41", "port42", "port43",
+ "port44", "port45", "port46", "port47",
+ "port50", "port51", "port52", "port53",
+ "port54", "port55", "port56", "port57",
+ "port60", "port61", "port62", "port63",
+ "port64", "port65", "port66", "port67",
+ "port70", "port71", "port72", "port73",
+ "port74", "port75", "port76", "port77",
+ "port80", "port81", "port82", "port83",
+ "port84", "port85", "port86", "port87",
+ "port90", "port91", "port92", "port93",
+ "port94", "port95", "port96", "port97",
+ "port100", "port101", "port102", "port103",
+ "port104", "port105", "port106", "port107",
+ "port110", "port111", "port112", "port113",
+ "port114", "port115", "port116", "port117",
+ "port120", "port121", "port122", "port123",
+ "port124", "port125", "port126", "port127",
+ "port130", "port131", "port132", "port133",
+ "port134", "port135", "port136", "port137",
+ "port140", "port141", "port142", "port143",
+ "port144", "port145", "port146", "port147",
+ /* port150-164 missing */
+ /* none */ "port165",
+};
+static const char * const xirq_groups[] = {
+ "xirq0", "xirq1", "xirq2", "xirq3",
+ "xirq4", "xirq5", "xirq6", "xirq7",
+ "xirq8", "xirq9", "xirq10", "xirq11",
+ "xirq12", /* none*/ "xirq14", "xirq15",
+};
+
+static const struct uniphier_pinmux_function ph1_sld8_functions[] = {
+ UNIPHIER_PINMUX_FUNCTION(emmc),
+ UNIPHIER_PINMUX_FUNCTION(i2c0),
+ UNIPHIER_PINMUX_FUNCTION(i2c1),
+ UNIPHIER_PINMUX_FUNCTION(i2c2),
+ UNIPHIER_PINMUX_FUNCTION(i2c3),
+ UNIPHIER_PINMUX_FUNCTION(nand),
+ UNIPHIER_PINMUX_FUNCTION(uart0),
+ UNIPHIER_PINMUX_FUNCTION(uart1),
+ UNIPHIER_PINMUX_FUNCTION(uart2),
+ UNIPHIER_PINMUX_FUNCTION(uart3),
+ UNIPHIER_PINMUX_FUNCTION(usb0),
+ UNIPHIER_PINMUX_FUNCTION(usb1),
+ UNIPHIER_PINMUX_FUNCTION(usb2),
+ UNIPHIER_PINMUX_FUNCTION(port),
+ UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata ph1_sld8_pindata = {
+ .groups = ph1_sld8_groups,
+ .groups_count = ARRAY_SIZE(ph1_sld8_groups),
+ .functions = ph1_sld8_functions,
+ .functions_count = ARRAY_SIZE(ph1_sld8_functions),
+ .mux_bits = 8,
+ .reg_stride = 4,
+ .load_pinctrl = false,
+};
+
+static struct pinctrl_desc ph1_sld8_pinctrl_desc = {
+ .name = DRIVER_NAME,
+ .pins = ph1_sld8_pins,
+ .npins = ARRAY_SIZE(ph1_sld8_pins),
+ .owner = THIS_MODULE,
+};
+
+static int ph1_sld8_pinctrl_probe(struct platform_device *pdev)
+{
+ return uniphier_pinctrl_probe(pdev, &ph1_sld8_pinctrl_desc,
+ &ph1_sld8_pindata);
+}
+
+static const struct of_device_id ph1_sld8_pinctrl_match[] = {
+ { .compatible = "socionext,ph1-sld8-pinctrl" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ph1_sld8_pinctrl_match);
+
+static struct platform_driver ph1_sld8_pinctrl_driver = {
+ .probe = ph1_sld8_pinctrl_probe,
+ .remove = uniphier_pinctrl_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = ph1_sld8_pinctrl_match,
+ },
+};
+module_platform_driver(ph1_sld8_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-sLD8 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-proxstream2.c b/drivers/pinctrl/uniphier/pinctrl-proxstream2.c
new file mode 100644
index 000000000000..3f036e236ad9
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-proxstream2.c
@@ -0,0 +1,1269 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "proxstream2-pinctrl"
+
+static const struct pinctrl_pin_desc proxstream2_pins[] = {
+ UNIPHIER_PINCTRL_PIN(0, "ED0", UNIPHIER_PIN_IECTRL_NONE,
+ 0, UNIPHIER_PIN_DRV_4_8,
+ 0, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(1, "ED1", UNIPHIER_PIN_IECTRL_NONE,
+ 1, UNIPHIER_PIN_DRV_4_8,
+ 1, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(2, "ED2", UNIPHIER_PIN_IECTRL_NONE,
+ 2, UNIPHIER_PIN_DRV_4_8,
+ 2, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(3, "ED3", UNIPHIER_PIN_IECTRL_NONE,
+ 3, UNIPHIER_PIN_DRV_4_8,
+ 3, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(4, "ED4", UNIPHIER_PIN_IECTRL_NONE,
+ 4, UNIPHIER_PIN_DRV_4_8,
+ 4, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(5, "ED5", UNIPHIER_PIN_IECTRL_NONE,
+ 5, UNIPHIER_PIN_DRV_4_8,
+ 5, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(6, "ED6", UNIPHIER_PIN_IECTRL_NONE,
+ 6, UNIPHIER_PIN_DRV_4_8,
+ 6, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(7, "ED7", UNIPHIER_PIN_IECTRL_NONE,
+ 7, UNIPHIER_PIN_DRV_4_8,
+ 7, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(8, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
+ 8, UNIPHIER_PIN_DRV_4_8,
+ 8, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(9, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+ 9, UNIPHIER_PIN_DRV_4_8,
+ 9, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(10, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
+ 10, UNIPHIER_PIN_DRV_4_8,
+ 10, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(11, "ES0", UNIPHIER_PIN_IECTRL_NONE,
+ 11, UNIPHIER_PIN_DRV_4_8,
+ 11, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(12, "ES1", UNIPHIER_PIN_IECTRL_NONE,
+ 12, UNIPHIER_PIN_DRV_4_8,
+ 12, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(13, "ES2", UNIPHIER_PIN_IECTRL_NONE,
+ 13, UNIPHIER_PIN_DRV_4_8,
+ 13, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(14, "XECS1", UNIPHIER_PIN_IECTRL_NONE,
+ 14, UNIPHIER_PIN_DRV_4_8,
+ 14, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(15, "SMTRST0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 15, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(16, "SMTCMD0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 16, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(17, "SMTD0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 17, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(18, "SMTSEL0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 18, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(19, "SMTCLK0CG", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 19, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(20, "SMTDET0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 20, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(21, "SMTRST1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 21, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(22, "SMTCMD1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 22, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(23, "SMTD1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 23, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(24, "SMTSEL1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 24, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(25, "SMTCLK1CG", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 25, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(26, "SMTDET1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 26, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(27, "XIRQ18", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 27, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(28, "XIRQ19", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 28, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(29, "XIRQ20", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 29, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(30, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+ 30, UNIPHIER_PIN_DRV_4_8,
+ 30, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(31, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+ 31, UNIPHIER_PIN_DRV_4_8,
+ 31, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(32, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+ 32, UNIPHIER_PIN_DRV_4_8,
+ 32, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(33, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+ 33, UNIPHIER_PIN_DRV_4_8,
+ 33, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(34, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
+ 34, UNIPHIER_PIN_DRV_4_8,
+ 34, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(35, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
+ 35, UNIPHIER_PIN_DRV_4_8,
+ 35, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(36, "NFRYBY0", UNIPHIER_PIN_IECTRL_NONE,
+ 36, UNIPHIER_PIN_DRV_4_8,
+ 36, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(37, "XNFCE1", UNIPHIER_PIN_IECTRL_NONE,
+ 37, UNIPHIER_PIN_DRV_4_8,
+ 37, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(38, "NFRYBY1", UNIPHIER_PIN_IECTRL_NONE,
+ 38, UNIPHIER_PIN_DRV_4_8,
+ 38, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(39, "NFD0", UNIPHIER_PIN_IECTRL_NONE,
+ 39, UNIPHIER_PIN_DRV_4_8,
+ 39, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(40, "NFD1", UNIPHIER_PIN_IECTRL_NONE,
+ 40, UNIPHIER_PIN_DRV_4_8,
+ 40, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(41, "NFD2", UNIPHIER_PIN_IECTRL_NONE,
+ 41, UNIPHIER_PIN_DRV_4_8,
+ 41, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(42, "NFD3", UNIPHIER_PIN_IECTRL_NONE,
+ 42, UNIPHIER_PIN_DRV_4_8,
+ 42, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(43, "NFD4", UNIPHIER_PIN_IECTRL_NONE,
+ 43, UNIPHIER_PIN_DRV_4_8,
+ 43, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(44, "NFD5", UNIPHIER_PIN_IECTRL_NONE,
+ 44, UNIPHIER_PIN_DRV_4_8,
+ 44, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(45, "NFD6", UNIPHIER_PIN_IECTRL_NONE,
+ 45, UNIPHIER_PIN_DRV_4_8,
+ 45, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(46, "NFD7", UNIPHIER_PIN_IECTRL_NONE,
+ 46, UNIPHIER_PIN_DRV_4_8,
+ 46, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(47, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+ 0, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_UP_FIXED),
+ UNIPHIER_PINCTRL_PIN(48, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+ 4, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_UP_FIXED),
+ UNIPHIER_PINCTRL_PIN(49, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+ 8, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_UP_FIXED),
+ UNIPHIER_PINCTRL_PIN(50, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+ 12, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_UP_FIXED),
+ UNIPHIER_PINCTRL_PIN(51, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+ 16, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_UP_FIXED),
+ UNIPHIER_PINCTRL_PIN(52, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+ 20, UNIPHIER_PIN_DRV_8_12_16_20,
+ -1, UNIPHIER_PIN_PULL_UP_FIXED),
+ UNIPHIER_PINCTRL_PIN(53, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 53, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(54, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 54, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(55, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 55, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(56, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 56, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(57, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 57, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(58, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 58, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(59, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 59, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(60, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 60, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(61, "USB2OD", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 61, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(62, "USB3VBUS", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 62, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(63, "USB3OD", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 63, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(64, "CH0CLK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 64, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(65, "CH0PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 65, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(66, "CH0VAL", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 66, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(67, "CH0DATA", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 67, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(68, "CH1CLK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 68, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(69, "CH1PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 69, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(70, "CH1VAL", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 70, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(71, "CH1DATA", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 71, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(72, "XIRQ9", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 72, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(73, "XIRQ10", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 73, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(74, "XIRQ16", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 74, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(75, "CH4CLK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 75, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(76, "CH4PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 76, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(77, "CH4VAL", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 77, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(78, "CH4DATA", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 78, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(79, "CH5CLK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 79, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(80, "CH5PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 80, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(81, "CH5VAL", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 81, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(82, "CH5DATA", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 82, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(83, "CH6CLK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 83, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(84, "CH6PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 84, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(85, "CH6VAL", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 85, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(86, "CH6DATA", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 86, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(87, "STS0CLKO", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 87, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(88, "STS0SYNCO", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 88, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(89, "STS0VALO", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 89, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(90, "STS0DATAO", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 90, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(91, "XIRQ17", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 91, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(92, "PORT163", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 92, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(93, "PORT165", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 93, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(94, "PORT166", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 94, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(95, "PORT132", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 95, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(96, "PORT133", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 96, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(97, "AO2IEC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 97, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(98, "AI2ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 98, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(99, "AI2BCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 99, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(100, "AI2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 100, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(101, "AI2D0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 101, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(102, "AI2D1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 102, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(103, "AI2D2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 103, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(104, "AI2D3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 104, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(105, "AO3DACCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 105, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(106, "AO3BCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 106, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(107, "AO3LRCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 107, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(108, "AO3DMIX", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 108, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(109, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 109, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(110, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 110, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(111, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 111, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(112, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 112, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(113, "TXD2", 0,
+ 113, UNIPHIER_PIN_DRV_4_8,
+ 113, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(114, "RXD2", 0,
+ 114, UNIPHIER_PIN_DRV_4_8,
+ 114, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(115, "TXD1", 0,
+ 115, UNIPHIER_PIN_DRV_4_8,
+ 115, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(116, "RXD1", 0,
+ 116, UNIPHIER_PIN_DRV_4_8,
+ 116, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(117, "PORT190", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 117, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(118, "VI1HSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 118, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(119, "VI1VSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 119, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(120, "VI1DE", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 120, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(121, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 121, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(122, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 122, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(123, "VI1G2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 123, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(124, "VI1G3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 124, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(125, "VI1G4", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 125, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(126, "VI1G5", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 126, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(127, "VI1G6", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 127, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(128, "VI1G7", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 128, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(129, "VI1G8", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 129, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(130, "VI1G9", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 130, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(131, "VI1CLK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 131, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(132, "PORT05", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 132, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(133, "PORT06", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 133, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(134, "VI1R2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 134, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(135, "VI1R3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 135, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(136, "VI1R4", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 136, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(137, "VI1R5", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 137, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(138, "VI1R6", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 138, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(139, "VI1R7", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 139, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(140, "VI1R8", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 140, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(141, "VI1R9", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 141, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(142, "LPST", UNIPHIER_PIN_IECTRL_NONE,
+ 142, UNIPHIER_PIN_DRV_4_8,
+ 142, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(143, "MDC", 0,
+ 143, UNIPHIER_PIN_DRV_4_8,
+ 143, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(144, "MDIO", 0,
+ 144, UNIPHIER_PIN_DRV_4_8,
+ 144, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(145, "MDIO_INTL", 0,
+ 145, UNIPHIER_PIN_DRV_4_8,
+ 145, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(146, "PHYRSTL", 0,
+ 146, UNIPHIER_PIN_DRV_4_8,
+ 146, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(147, "RGMII_RXCLK", 0,
+ 147, UNIPHIER_PIN_DRV_4_8,
+ 147, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(148, "RGMII_RXD0", 0,
+ 148, UNIPHIER_PIN_DRV_4_8,
+ 148, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(149, "RGMII_RXD1", 0,
+ 149, UNIPHIER_PIN_DRV_4_8,
+ 149, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(150, "RGMII_RXD2", 0,
+ 150, UNIPHIER_PIN_DRV_4_8,
+ 150, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(151, "RGMII_RXD3", 0,
+ 151, UNIPHIER_PIN_DRV_4_8,
+ 151, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(152, "RGMII_RXCTL", 0,
+ 152, UNIPHIER_PIN_DRV_4_8,
+ 152, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(153, "RGMII_TXCLK", 0,
+ 153, UNIPHIER_PIN_DRV_4_8,
+ 153, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(154, "RGMII_TXD0", 0,
+ 154, UNIPHIER_PIN_DRV_4_8,
+ 154, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(155, "RGMII_TXD1", 0,
+ 155, UNIPHIER_PIN_DRV_4_8,
+ 155, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(156, "RGMII_TXD2", 0,
+ 156, UNIPHIER_PIN_DRV_4_8,
+ 156, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(157, "RGMII_TXD3", 0,
+ 157, UNIPHIER_PIN_DRV_4_8,
+ 157, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(158, "RGMII_TXCTL", 0,
+ 158, UNIPHIER_PIN_DRV_4_8,
+ 158, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(159, "SDA3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 159, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(160, "SCL3", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 160, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(161, "AI1ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 161, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(162, "AI1BCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 162, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(163, "CH2CLK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 163, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(164, "CH2PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 164, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(165, "CH2VAL", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 165, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(166, "CH2DATA", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 166, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(167, "CH3CLK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 167, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(168, "CH3PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 168, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(169, "CH3VAL", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 169, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(170, "CH3DATA", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 170, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(171, "SDA2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 171, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(172, "SCL2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 172, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(173, "AI1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 173, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(174, "AI1D0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 174, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(175, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 175, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(176, "AO2D0", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 176, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(177, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 177, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(178, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 178, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(179, "PORT222", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 179, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(180, "PORT223", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 180, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(181, "PORT224", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 181, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(182, "PORT225", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 182, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(183, "PORT226", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 183, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(184, "PORT227", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 184, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(185, "PORT230", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 185, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(186, "FANPWM", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 186, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(187, "HRDDCSDA0", 0,
+ 187, UNIPHIER_PIN_DRV_4_8,
+ 187, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(188, "HRDDCSCL0", 0,
+ 188, UNIPHIER_PIN_DRV_4_8,
+ 188, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(189, "HRDDCSDA1", 0,
+ 189, UNIPHIER_PIN_DRV_4_8,
+ 189, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(190, "HRDDCSCL1", 0,
+ 190, UNIPHIER_PIN_DRV_4_8,
+ 190, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(191, "HTDDCSDA0", 0,
+ 191, UNIPHIER_PIN_DRV_4_8,
+ 191, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(192, "HTDDCSCL0", 0,
+ 192, UNIPHIER_PIN_DRV_4_8,
+ 192, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(193, "HTDDCSDA1", 0,
+ 193, UNIPHIER_PIN_DRV_4_8,
+ 193, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(194, "HTDDCSCL1", 0,
+ 194, UNIPHIER_PIN_DRV_4_8,
+ 194, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(195, "PORT241", 0,
+ 195, UNIPHIER_PIN_DRV_4_8,
+ 195, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(196, "PORT242", 0,
+ 196, UNIPHIER_PIN_DRV_4_8,
+ 196, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(197, "PORT243", 0,
+ 197, UNIPHIER_PIN_DRV_4_8,
+ 197, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(198, "MVSYNC", 0,
+ 198, UNIPHIER_PIN_DRV_4_8,
+ 198, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(199, "SPISYNC0", UNIPHIER_PIN_IECTRL_NONE,
+ 199, UNIPHIER_PIN_DRV_4_8,
+ 199, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(200, "SPISCLK0", UNIPHIER_PIN_IECTRL_NONE,
+ 200, UNIPHIER_PIN_DRV_4_8,
+ 200, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(201, "SPITXD0", UNIPHIER_PIN_IECTRL_NONE,
+ 201, UNIPHIER_PIN_DRV_4_8,
+ 201, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(202, "SPIRXD0", UNIPHIER_PIN_IECTRL_NONE,
+ 202, UNIPHIER_PIN_DRV_4_8,
+ 202, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(203, "CK54EXI", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 203, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(204, "AEXCKA1", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 204, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(205, "AEXCKA2", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 205, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(206, "CK27EXI", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_8,
+ 206, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(207, "STCDIN", 0,
+ 207, UNIPHIER_PIN_DRV_4_8,
+ 207, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(208, "PHSYNI", 0,
+ 208, UNIPHIER_PIN_DRV_4_8,
+ 208, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(209, "PVSYNI", 0,
+ 209, UNIPHIER_PIN_DRV_4_8,
+ 209, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(210, "MVSYN", UNIPHIER_PIN_IECTRL_NONE,
+ 210, UNIPHIER_PIN_DRV_4_8,
+ 210, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(211, "STCV", UNIPHIER_PIN_IECTRL_NONE,
+ 211, UNIPHIER_PIN_DRV_4_8,
+ 211, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(212, "PORT262", UNIPHIER_PIN_IECTRL_NONE,
+ 212, UNIPHIER_PIN_DRV_4_8,
+ 212, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(213, "USB0VBUS_IRQ", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ 213, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(214, "USB1VBUS_IRQ", UNIPHIER_PIN_IECTRL_NONE,
+ -1, UNIPHIER_PIN_DRV_FIXED_4,
+ 214, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(215, "PORT265", UNIPHIER_PIN_IECTRL_NONE,
+ 215, UNIPHIER_PIN_DRV_4_8,
+ 215, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(216, "CK25O", 0,
+ 216, UNIPHIER_PIN_DRV_4_8,
+ 216, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(217, "TXD0", 0,
+ 217, UNIPHIER_PIN_DRV_4_8,
+ 217, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(218, "RXD0", 0,
+ 218, UNIPHIER_PIN_DRV_4_8,
+ 218, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(219, "TXD3", 0,
+ 219, UNIPHIER_PIN_DRV_4_8,
+ 219, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(220, "RXD3", 0,
+ 220, UNIPHIER_PIN_DRV_4_8,
+ 220, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(221, "PORT273", 0,
+ 221, UNIPHIER_PIN_DRV_4_8,
+ 221, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(222, "STCDOUTC", 0,
+ 222, UNIPHIER_PIN_DRV_4_8,
+ 222, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(223, "PORT274", 0,
+ 223, UNIPHIER_PIN_DRV_4_8,
+ 223, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(224, "PORT275", 0,
+ 224, UNIPHIER_PIN_DRV_4_8,
+ 224, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(225, "PORT276", 0,
+ 225, UNIPHIER_PIN_DRV_4_8,
+ 225, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(226, "PORT277", 0,
+ 226, UNIPHIER_PIN_DRV_4_8,
+ 226, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(227, "PORT280", 0,
+ 227, UNIPHIER_PIN_DRV_4_8,
+ 227, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(228, "PORT281", 0,
+ 228, UNIPHIER_PIN_DRV_4_8,
+ 228, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(229, "PORT282", 0,
+ 229, UNIPHIER_PIN_DRV_4_8,
+ 229, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(230, "PORT283", 0,
+ 230, UNIPHIER_PIN_DRV_4_8,
+ 230, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(231, "PORT284", 0,
+ 231, UNIPHIER_PIN_DRV_4_8,
+ 231, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(232, "PORT285", 0,
+ 232, UNIPHIER_PIN_DRV_4_8,
+ 232, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(233, "T0HPD", 0,
+ 233, UNIPHIER_PIN_DRV_4_8,
+ 233, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(234, "T1HPD", 0,
+ 234, UNIPHIER_PIN_DRV_4_8,
+ 234, UNIPHIER_PIN_PULL_DOWN),
+};
+
+static const unsigned emmc_pins[] = {36, 37, 38, 39, 40, 41, 42};
+static const unsigned emmc_muxvals[] = {9, 9, 9, 9, 9, 9, 9};
+static const unsigned emmc_dat8_pins[] = {43, 44, 45, 46};
+static const unsigned emmc_dat8_muxvals[] = {9, 9, 9, 9};
+static const unsigned i2c0_pins[] = {109, 110};
+static const unsigned i2c0_muxvals[] = {8, 8};
+static const unsigned i2c1_pins[] = {111, 112};
+static const unsigned i2c1_muxvals[] = {8, 8};
+static const unsigned i2c2_pins[] = {171, 172};
+static const unsigned i2c2_muxvals[] = {8, 8};
+static const unsigned i2c3_pins[] = {159, 160};
+static const unsigned i2c3_muxvals[] = {8, 8};
+static const unsigned i2c5_pins[] = {183, 184};
+static const unsigned i2c5_muxvals[] = {11, 11};
+static const unsigned i2c6_pins[] = {185, 186};
+static const unsigned i2c6_muxvals[] = {11, 11};
+static const unsigned nand_pins[] = {30, 31, 32, 33, 34, 35, 36, 39, 40, 41,
+ 42, 43, 44, 45, 46};
+static const unsigned nand_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8};
+static const unsigned nand_cs1_pins[] = {37, 38};
+static const unsigned nand_cs1_muxvals[] = {8, 8};
+static const unsigned uart0_pins[] = {217, 218};
+static const unsigned uart0_muxvals[] = {8, 8};
+static const unsigned uart0b_pins[] = {179, 180};
+static const unsigned uart0b_muxvals[] = {10, 10};
+static const unsigned uart1_pins[] = {115, 116};
+static const unsigned uart1_muxvals[] = {8, 8};
+static const unsigned uart2_pins[] = {113, 114};
+static const unsigned uart2_muxvals[] = {8, 8};
+static const unsigned uart3_pins[] = {219, 220};
+static const unsigned uart3_muxvals[] = {8, 8};
+static const unsigned uart3b_pins[] = {181, 182};
+static const unsigned uart3b_muxvals[] = {10, 10};
+static const unsigned usb0_pins[] = {56, 57};
+static const unsigned usb0_muxvals[] = {8, 8};
+static const unsigned usb1_pins[] = {58, 59};
+static const unsigned usb1_muxvals[] = {8, 8};
+static const unsigned usb2_pins[] = {60, 61};
+static const unsigned usb2_muxvals[] = {8, 8};
+static const unsigned usb3_pins[] = {62, 63};
+static const unsigned usb3_muxvals[] = {8, 8};
+static const unsigned port_range0_pins[] = {
+ 127, 128, 129, 130, 131, 132, 133, 134, /* PORT0x */
+ 135, 136, 137, 138, 139, 140, 141, 142, /* PORT1x */
+ 0, 1, 2, 3, 4, 5, 6, 7, /* PORT2x */
+ 8, 9, 10, 11, 12, 13, 14, 15, /* PORT3x */
+ 16, 17, 18, 19, 21, 22, 23, 24, /* PORT4x */
+ 25, 30, 31, 32, 33, 34, 35, 36, /* PORT5x */
+ 37, 38, 39, 40, 41, 42, 43, 44, /* PORT6x */
+ 45, 46, 47, 48, 49, 50, 51, 52, /* PORT7x */
+ 53, 54, 55, 56, 57, 58, 59, 60, /* PORT8x */
+ 61, 62, 63, 64, 65, 66, 67, 68, /* PORT9x */
+ 69, 70, 71, 76, 77, 78, 79, 80, /* PORT10x */
+};
+static const unsigned port_range0_muxvals[] = {
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT3x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT4x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT5x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT6x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT7x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT8x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT9x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT10x */
+};
+static const unsigned port_range1_pins[] = {
+ 81, 82, 83, 84, 85, 86, 87, 88, /* PORT12x */
+ 89, 90, 95, 96, 97, 98, 99, 100, /* PORT13x */
+ 101, 102, 103, 104, 105, 106, 107, 108, /* PORT14x */
+ 118, 119, 120, 121, 122, 123, 124, 125, /* PORT15x */
+ 126, 72, 73, 92, 177, 93, 94, 176, /* PORT16x */
+ 74, 91, 27, 28, 29, 75, 20, 26, /* PORT17x */
+ 109, 110, 111, 112, 113, 114, 115, 116, /* PORT18x */
+ 117, 143, 144, 145, 146, 147, 148, 149, /* PORT19x */
+ 150, 151, 152, 153, 154, 155, 156, 157, /* PORT20x */
+ 158, 159, 160, 161, 162, 163, 164, 165, /* PORT21x */
+ 166, 178, 179, 180, 181, 182, 183, 184, /* PORT22x */
+ 185, 187, 188, 189, 190, 191, 192, 193, /* PORT23x */
+ 194, 195, 196, 197, 198, 199, 200, 201, /* PORT24x */
+ 202, 203, 204, 205, 206, 207, 208, 209, /* PORT25x */
+ 210, 211, 212, 213, 214, 215, 216, 217, /* PORT26x */
+ 218, 219, 220, 221, 223, 224, 225, 226, /* PORT27x */
+ 227, 228, 229, 230, 231, 232, 233, 234, /* PORT28x */
+};
+static const unsigned port_range1_muxvals[] = {
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT12x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT13x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT15x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT16x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT17x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT18x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT19x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT20x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT21x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT22x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT23x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT24x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT25x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT26x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT27x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT28x */
+};
+static const unsigned xirq_pins[] = {
+ 118, 119, 120, 121, 122, 123, 124, 125, /* XIRQ0-7 */
+ 126, 72, 73, 92, 177, 93, 94, 176, /* XIRQ8-15 */
+ 74, 91, 27, 28, 29, 75, 20, 26, /* XIRQ16-23 */
+};
+static const unsigned xirq_muxvals[] = {
+ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ0-7 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ8-15 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ16-23 */
+};
+
+static const struct uniphier_pinctrl_group proxstream2_groups[] = {
+ UNIPHIER_PINCTRL_GROUP(emmc),
+ UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+ UNIPHIER_PINCTRL_GROUP(i2c0),
+ UNIPHIER_PINCTRL_GROUP(i2c1),
+ UNIPHIER_PINCTRL_GROUP(i2c2),
+ UNIPHIER_PINCTRL_GROUP(i2c3),
+ UNIPHIER_PINCTRL_GROUP(i2c5),
+ UNIPHIER_PINCTRL_GROUP(i2c6),
+ UNIPHIER_PINCTRL_GROUP(nand),
+ UNIPHIER_PINCTRL_GROUP(nand_cs1),
+ UNIPHIER_PINCTRL_GROUP(uart0),
+ UNIPHIER_PINCTRL_GROUP(uart0b),
+ UNIPHIER_PINCTRL_GROUP(uart1),
+ UNIPHIER_PINCTRL_GROUP(uart2),
+ UNIPHIER_PINCTRL_GROUP(uart3),
+ UNIPHIER_PINCTRL_GROUP(uart3b),
+ UNIPHIER_PINCTRL_GROUP(usb0),
+ UNIPHIER_PINCTRL_GROUP(usb1),
+ UNIPHIER_PINCTRL_GROUP(usb2),
+ UNIPHIER_PINCTRL_GROUP(usb3),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range1, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range1, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range1, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range1, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range1, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range1, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range1, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range1, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range1, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range1, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range1, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range1, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range1, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range1, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range1, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range1, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range1, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range1, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range1, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range1, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range1, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range1, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range1, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range1, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port150, port_range1, 24),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port151, port_range1, 25),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port152, port_range1, 26),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port153, port_range1, 27),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port154, port_range1, 28),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port155, port_range1, 29),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port156, port_range1, 30),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port157, port_range1, 31),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port160, port_range1, 32),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port161, port_range1, 33),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port162, port_range1, 34),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port163, port_range1, 35),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port164, port_range1, 36),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port165, port_range1, 37),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port166, port_range1, 38),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port167, port_range1, 39),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port170, port_range1, 40),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port171, port_range1, 41),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port172, port_range1, 42),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port173, port_range1, 43),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port174, port_range1, 44),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port175, port_range1, 45),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port176, port_range1, 46),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port177, port_range1, 47),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range1, 48),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range1, 49),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range1, 50),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range1, 51),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range1, 52),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range1, 53),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range1, 54),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range1, 55),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port190, port_range1, 56),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port191, port_range1, 57),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port192, port_range1, 58),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port193, port_range1, 59),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port194, port_range1, 60),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port195, port_range1, 61),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port196, port_range1, 62),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port197, port_range1, 63),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range1, 64),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range1, 65),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range1, 66),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range1, 67),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range1, 68),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range1, 69),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range1, 70),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range1, 71),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range1, 72),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range1, 73),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range1, 74),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range1, 75),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range1, 76),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range1, 77),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range1, 78),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range1, 79),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range1, 80),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range1, 81),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range1, 82),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range1, 83),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range1, 84),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range1, 85),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range1, 86),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range1, 87),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range1, 88),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range1, 89),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range1, 90),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range1, 91),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range1, 92),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range1, 93),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range1, 94),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range1, 95),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range1, 96),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range1, 97),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range1, 98),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range1, 99),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range1, 100),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range1, 101),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range1, 102),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range1, 103),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range1, 104),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range1, 105),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range1, 106),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range1, 107),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range1, 108),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port255, port_range1, 109),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port256, port_range1, 110),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port257, port_range1, 111),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port260, port_range1, 112),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port261, port_range1, 113),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port262, port_range1, 114),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port263, port_range1, 115),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port264, port_range1, 116),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port265, port_range1, 117),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port266, port_range1, 118),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port267, port_range1, 119),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port270, port_range1, 120),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port271, port_range1, 121),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port272, port_range1, 122),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port273, port_range1, 123),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port274, port_range1, 124),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port275, port_range1, 125),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port276, port_range1, 126),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port277, port_range1, 127),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port280, port_range1, 128),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port281, port_range1, 129),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port282, port_range1, 130),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port283, port_range1, 131),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port284, port_range1, 132),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port285, port_range1, 133),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port286, port_range1, 134),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port287, port_range1, 135),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21, xirq, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22, xirq, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq23, xirq, 23),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const i2c5_groups[] = {"i2c5"};
+static const char * const i2c6_groups[] = {"i2c6"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0", "uart0b"};
+static const char * const uart1_groups[] = {"uart1"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3", "uart3b"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const usb3_groups[] = {"usb3"};
+static const char * const port_groups[] = {
+ "port00", "port01", "port02", "port03",
+ "port04", "port05", "port06", "port07",
+ "port10", "port11", "port12", "port13",
+ "port14", "port15", "port16", "port17",
+ "port20", "port21", "port22", "port23",
+ "port24", "port25", "port26", "port27",
+ "port30", "port31", "port32", "port33",
+ "port34", "port35", "port36", "port37",
+ "port40", "port41", "port42", "port43",
+ "port44", "port45", "port46", "port47",
+ "port50", "port51", "port52", "port53",
+ "port54", "port55", "port56", "port57",
+ "port60", "port61", "port62", "port63",
+ "port64", "port65", "port66", "port67",
+ "port70", "port71", "port72", "port73",
+ "port74", "port75", "port76", "port77",
+ "port80", "port81", "port82", "port83",
+ "port84", "port85", "port86", "port87",
+ "port90", "port91", "port92", "port93",
+ "port94", "port95", "port96", "port97",
+ "port100", "port101", "port102", "port103",
+ "port104", "port105", "port106", "port107",
+ /* port110-117 missing */
+ "port120", "port121", "port122", "port123",
+ "port124", "port125", "port126", "port127",
+ "port130", "port131", "port132", "port133",
+ "port134", "port135", "port136", "port137",
+ "port140", "port141", "port142", "port143",
+ "port144", "port145", "port146", "port147",
+ "port150", "port151", "port152", "port153",
+ "port154", "port155", "port156", "port157",
+ "port160", "port161", "port162", "port163",
+ "port164", "port165", "port166", "port167",
+ "port170", "port171", "port172", "port173",
+ "port174", "port175", "port176", "port177",
+ "port180", "port181", "port182", "port183",
+ "port184", "port185", "port186", "port187",
+ "port190", "port191", "port192", "port193",
+ "port194", "port195", "port196", "port197",
+ "port200", "port201", "port202", "port203",
+ "port204", "port205", "port206", "port207",
+ "port210", "port211", "port212", "port213",
+ "port214", "port215", "port216", "port217",
+ "port220", "port221", "port222", "port223",
+ "port224", "port225", "port226", "port227",
+ "port230", "port231", "port232", "port233",
+ "port234", "port235", "port236", "port237",
+ "port240", "port241", "port242", "port243",
+ "port244", "port245", "port246", "port247",
+ "port250", "port251", "port252", "port253",
+ "port254", "port255", "port256", "port257",
+ "port260", "port261", "port262", "port263",
+ "port264", "port265", "port266", "port267",
+ "port270", "port271", "port272", "port273",
+ "port274", "port275", "port276", "port277",
+ "port280", "port281", "port282", "port283",
+ "port284", "port285", "port286", "port287",
+};
+static const char * const xirq_groups[] = {
+ "xirq0", "xirq1", "xirq2", "xirq3",
+ "xirq4", "xirq5", "xirq6", "xirq7",
+ "xirq8", "xirq9", "xirq10", "xirq11",
+ "xirq12", "xirq13", "xirq14", "xirq15",
+ "xirq16", "xirq17", "xirq18", "xirq19",
+ "xirq20", "xirq21", "xirq22", "xirq23",
+};
+
+static const struct uniphier_pinmux_function proxstream2_functions[] = {
+ UNIPHIER_PINMUX_FUNCTION(emmc),
+ UNIPHIER_PINMUX_FUNCTION(i2c0),
+ UNIPHIER_PINMUX_FUNCTION(i2c1),
+ UNIPHIER_PINMUX_FUNCTION(i2c2),
+ UNIPHIER_PINMUX_FUNCTION(i2c3),
+ UNIPHIER_PINMUX_FUNCTION(i2c5),
+ UNIPHIER_PINMUX_FUNCTION(i2c6),
+ UNIPHIER_PINMUX_FUNCTION(nand),
+ UNIPHIER_PINMUX_FUNCTION(uart0),
+ UNIPHIER_PINMUX_FUNCTION(uart1),
+ UNIPHIER_PINMUX_FUNCTION(uart2),
+ UNIPHIER_PINMUX_FUNCTION(uart3),
+ UNIPHIER_PINMUX_FUNCTION(usb0),
+ UNIPHIER_PINMUX_FUNCTION(usb1),
+ UNIPHIER_PINMUX_FUNCTION(usb2),
+ UNIPHIER_PINMUX_FUNCTION(usb3),
+ UNIPHIER_PINMUX_FUNCTION(port),
+ UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata proxstream2_pindata = {
+ .groups = proxstream2_groups,
+ .groups_count = ARRAY_SIZE(proxstream2_groups),
+ .functions = proxstream2_functions,
+ .functions_count = ARRAY_SIZE(proxstream2_functions),
+ .mux_bits = 8,
+ .reg_stride = 4,
+ .load_pinctrl = false,
+};
+
+static struct pinctrl_desc proxstream2_pinctrl_desc = {
+ .name = DRIVER_NAME,
+ .pins = proxstream2_pins,
+ .npins = ARRAY_SIZE(proxstream2_pins),
+ .owner = THIS_MODULE,
+};
+
+static int proxstream2_pinctrl_probe(struct platform_device *pdev)
+{
+ return uniphier_pinctrl_probe(pdev, &proxstream2_pinctrl_desc,
+ &proxstream2_pindata);
+}
+
+static const struct of_device_id proxstream2_pinctrl_match[] = {
+ { .compatible = "socionext,proxstream2-pinctrl" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, proxstream2_pinctrl_match);
+
+static struct platform_driver proxstream2_pinctrl_driver = {
+ .probe = proxstream2_pinctrl_probe,
+ .remove = uniphier_pinctrl_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = proxstream2_pinctrl_match,
+ },
+};
+module_platform_driver(proxstream2_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier ProXstream2 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
new file mode 100644
index 000000000000..918f3b643f1b
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
@@ -0,0 +1,684 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/mfd/syscon.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+#include "pinctrl-uniphier.h"
+
+struct uniphier_pinctrl_priv {
+ struct pinctrl_dev *pctldev;
+ struct regmap *regmap;
+ struct uniphier_pinctrl_socdata *socdata;
+};
+
+static int uniphier_pctl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ return priv->socdata->groups_count;
+}
+
+static const char *uniphier_pctl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ return priv->socdata->groups[selector].name;
+}
+
+static int uniphier_pctl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = priv->socdata->groups[selector].pins;
+ *num_pins = priv->socdata->groups[selector].num_pins;
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void uniphier_pctl_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned offset)
+{
+ const struct pinctrl_pin_desc *pin = &pctldev->desc->pins[offset];
+ const char *pull_dir, *drv_str;
+
+ switch (uniphier_pin_get_pull_dir(pin->drv_data)) {
+ case UNIPHIER_PIN_PULL_UP:
+ pull_dir = "UP";
+ break;
+ case UNIPHIER_PIN_PULL_DOWN:
+ pull_dir = "DOWN";
+ break;
+ case UNIPHIER_PIN_PULL_NONE:
+ pull_dir = "NONE";
+ break;
+ default:
+ BUG();
+ }
+
+ switch (uniphier_pin_get_drv_str(pin->drv_data)) {
+ case UNIPHIER_PIN_DRV_4_8:
+ drv_str = "4/8(mA)";
+ break;
+ case UNIPHIER_PIN_DRV_8_12_16_20:
+ drv_str = "8/12/16/20(mA)";
+ break;
+ case UNIPHIER_PIN_DRV_FIXED_4:
+ drv_str = "4(mA)";
+ break;
+ case UNIPHIER_PIN_DRV_FIXED_5:
+ drv_str = "5(mA)";
+ break;
+ case UNIPHIER_PIN_DRV_FIXED_8:
+ drv_str = "8(mA)";
+ break;
+ case UNIPHIER_PIN_DRV_NONE:
+ drv_str = "NONE";
+ break;
+ default:
+ BUG();
+ }
+
+ seq_printf(s, " PULL_DIR=%s DRV_STR=%s", pull_dir, drv_str);
+}
+#endif
+
+static const struct pinctrl_ops uniphier_pctlops = {
+ .get_groups_count = uniphier_pctl_get_groups_count,
+ .get_group_name = uniphier_pctl_get_group_name,
+ .get_group_pins = uniphier_pctl_get_group_pins,
+#ifdef CONFIG_DEBUG_FS
+ .pin_dbg_show = uniphier_pctl_pin_dbg_show,
+#endif
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int uniphier_conf_pin_bias_get(struct pinctrl_dev *pctldev,
+ const struct pinctrl_pin_desc *pin,
+ enum pin_config_param param)
+{
+ struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+ enum uniphier_pin_pull_dir pull_dir =
+ uniphier_pin_get_pull_dir(pin->drv_data);
+ unsigned int pupdctrl, reg, shift, val;
+ unsigned int expected = 1;
+ int ret;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (pull_dir == UNIPHIER_PIN_PULL_NONE)
+ return 0;
+ if (pull_dir == UNIPHIER_PIN_PULL_UP_FIXED ||
+ pull_dir == UNIPHIER_PIN_PULL_DOWN_FIXED)
+ return -EINVAL;
+ expected = 0;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (pull_dir == UNIPHIER_PIN_PULL_UP_FIXED)
+ return 0;
+ if (pull_dir != UNIPHIER_PIN_PULL_UP)
+ return -EINVAL;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (pull_dir == UNIPHIER_PIN_PULL_DOWN_FIXED)
+ return 0;
+ if (pull_dir != UNIPHIER_PIN_PULL_DOWN)
+ return -EINVAL;
+ break;
+ default:
+ BUG();
+ }
+
+ pupdctrl = uniphier_pin_get_pupdctrl(pin->drv_data);
+
+ reg = UNIPHIER_PINCTRL_PUPDCTRL_BASE + pupdctrl / 32 * 4;
+ shift = pupdctrl % 32;
+
+ ret = regmap_read(priv->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ val = (val >> shift) & 1;
+
+ return (val == expected) ? 0 : -EINVAL;
+}
+
+static int uniphier_conf_pin_drive_get(struct pinctrl_dev *pctldev,
+ const struct pinctrl_pin_desc *pin,
+ u16 *strength)
+{
+ struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+ enum uniphier_pin_drv_str drv_str =
+ uniphier_pin_get_drv_str(pin->drv_data);
+ const unsigned int strength_4_8[] = {4, 8};
+ const unsigned int strength_8_12_16_20[] = {8, 12, 16, 20};
+ const unsigned int *supported_strength;
+ unsigned int drvctrl, reg, shift, mask, width, val;
+ int ret;
+
+ switch (drv_str) {
+ case UNIPHIER_PIN_DRV_4_8:
+ supported_strength = strength_4_8;
+ width = 1;
+ break;
+ case UNIPHIER_PIN_DRV_8_12_16_20:
+ supported_strength = strength_8_12_16_20;
+ width = 2;
+ break;
+ case UNIPHIER_PIN_DRV_FIXED_4:
+ *strength = 4;
+ return 0;
+ case UNIPHIER_PIN_DRV_FIXED_5:
+ *strength = 5;
+ return 0;
+ case UNIPHIER_PIN_DRV_FIXED_8:
+ *strength = 8;
+ return 0;
+ default:
+ /* drive strength control is not supported for this pin */
+ return -EINVAL;
+ }
+
+ drvctrl = uniphier_pin_get_drvctrl(pin->drv_data);
+ drvctrl *= width;
+
+ reg = (width == 2) ? UNIPHIER_PINCTRL_DRV2CTRL_BASE :
+ UNIPHIER_PINCTRL_DRVCTRL_BASE;
+
+ reg += drvctrl / 32 * 4;
+ shift = drvctrl % 32;
+ mask = (1U << width) - 1;
+
+ ret = regmap_read(priv->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ *strength = supported_strength[(val >> shift) & mask];
+
+ return 0;
+}
+
+static int uniphier_conf_pin_input_enable_get(struct pinctrl_dev *pctldev,
+ const struct pinctrl_pin_desc *pin)
+{
+ struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int iectrl = uniphier_pin_get_iectrl(pin->drv_data);
+ unsigned int val;
+ int ret;
+
+ if (iectrl == UNIPHIER_PIN_IECTRL_NONE)
+ /* This pin is always input-enabled. */
+ return 0;
+
+ ret = regmap_read(priv->regmap, UNIPHIER_PINCTRL_IECTRL, &val);
+ if (ret)
+ return ret;
+
+ return val & BIT(iectrl) ? 0 : -EINVAL;
+}
+
+static int uniphier_conf_pin_config_get(struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long *configs)
+{
+ const struct pinctrl_pin_desc *pin_desc = &pctldev->desc->pins[pin];
+ enum pin_config_param param = pinconf_to_config_param(*configs);
+ bool has_arg = false;
+ u16 arg;
+ int ret;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = uniphier_conf_pin_bias_get(pctldev, pin_desc, param);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ ret = uniphier_conf_pin_drive_get(pctldev, pin_desc, &arg);
+ has_arg = true;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ ret = uniphier_conf_pin_input_enable_get(pctldev, pin_desc);
+ break;
+ default:
+ /* unsupported parameter */
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret == 0 && has_arg)
+ *configs = pinconf_to_config_packed(param, arg);
+
+ return ret;
+}
+
+static int uniphier_conf_pin_bias_set(struct pinctrl_dev *pctldev,
+ const struct pinctrl_pin_desc *pin,
+ enum pin_config_param param,
+ u16 arg)
+{
+ struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+ enum uniphier_pin_pull_dir pull_dir =
+ uniphier_pin_get_pull_dir(pin->drv_data);
+ unsigned int pupdctrl, reg, shift;
+ unsigned int val = 1;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (pull_dir == UNIPHIER_PIN_PULL_NONE)
+ return 0;
+ if (pull_dir == UNIPHIER_PIN_PULL_UP_FIXED ||
+ pull_dir == UNIPHIER_PIN_PULL_DOWN_FIXED) {
+ dev_err(pctldev->dev,
+ "can not disable pull register for pin %u (%s)\n",
+ pin->number, pin->name);
+ return -EINVAL;
+ }
+ val = 0;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (pull_dir == UNIPHIER_PIN_PULL_UP_FIXED && arg != 0)
+ return 0;
+ if (pull_dir != UNIPHIER_PIN_PULL_UP) {
+ dev_err(pctldev->dev,
+ "pull-up is unsupported for pin %u (%s)\n",
+ pin->number, pin->name);
+ return -EINVAL;
+ }
+ if (arg == 0) {
+ dev_err(pctldev->dev, "pull-up can not be total\n");
+ return -EINVAL;
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (pull_dir == UNIPHIER_PIN_PULL_DOWN_FIXED && arg != 0)
+ return 0;
+ if (pull_dir != UNIPHIER_PIN_PULL_DOWN) {
+ dev_err(pctldev->dev,
+ "pull-down is unsupported for pin %u (%s)\n",
+ pin->number, pin->name);
+ return -EINVAL;
+ }
+ if (arg == 0) {
+ dev_err(pctldev->dev, "pull-down can not be total\n");
+ return -EINVAL;
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ if (pull_dir == UNIPHIER_PIN_PULL_NONE) {
+ dev_err(pctldev->dev,
+ "pull-up/down is unsupported for pin %u (%s)\n",
+ pin->number, pin->name);
+ return -EINVAL;
+ }
+
+ if (arg == 0)
+ return 0; /* configuration ingored */
+ break;
+ default:
+ BUG();
+ }
+
+ pupdctrl = uniphier_pin_get_pupdctrl(pin->drv_data);
+
+ reg = UNIPHIER_PINCTRL_PUPDCTRL_BASE + pupdctrl / 32 * 4;
+ shift = pupdctrl % 32;
+
+ return regmap_update_bits(priv->regmap, reg, 1 << shift, val << shift);
+}
+
+static int uniphier_conf_pin_drive_set(struct pinctrl_dev *pctldev,
+ const struct pinctrl_pin_desc *pin,
+ u16 strength)
+{
+ struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+ enum uniphier_pin_drv_str drv_str =
+ uniphier_pin_get_drv_str(pin->drv_data);
+ const unsigned int strength_4_8[] = {4, 8, -1};
+ const unsigned int strength_8_12_16_20[] = {8, 12, 16, 20, -1};
+ const unsigned int *supported_strength;
+ unsigned int drvctrl, reg, shift, mask, width, val;
+
+ switch (drv_str) {
+ case UNIPHIER_PIN_DRV_4_8:
+ supported_strength = strength_4_8;
+ width = 1;
+ break;
+ case UNIPHIER_PIN_DRV_8_12_16_20:
+ supported_strength = strength_8_12_16_20;
+ width = 2;
+ break;
+ default:
+ dev_err(pctldev->dev,
+ "cannot change drive strength for pin %u (%s)\n",
+ pin->number, pin->name);
+ return -EINVAL;
+ }
+
+ for (val = 0; supported_strength[val] > 0; val++) {
+ if (supported_strength[val] > strength)
+ break;
+ }
+
+ if (val == 0) {
+ dev_err(pctldev->dev,
+ "unsupported drive strength %u mA for pin %u (%s)\n",
+ strength, pin->number, pin->name);
+ return -EINVAL;
+ }
+
+ val--;
+
+ drvctrl = uniphier_pin_get_drvctrl(pin->drv_data);
+ drvctrl *= width;
+
+ reg = (width == 2) ? UNIPHIER_PINCTRL_DRV2CTRL_BASE :
+ UNIPHIER_PINCTRL_DRVCTRL_BASE;
+
+ reg += drvctrl / 32 * 4;
+ shift = drvctrl % 32;
+ mask = (1U << width) - 1;
+
+ return regmap_update_bits(priv->regmap, reg,
+ mask << shift, val << shift);
+}
+
+static int uniphier_conf_pin_input_enable(struct pinctrl_dev *pctldev,
+ const struct pinctrl_pin_desc *pin,
+ u16 enable)
+{
+ struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int iectrl = uniphier_pin_get_iectrl(pin->drv_data);
+
+ if (enable == 0) {
+ /*
+ * Multiple pins share one input enable, so per-pin disabling
+ * is impossible.
+ */
+ dev_err(pctldev->dev, "unable to disable input\n");
+ return -EINVAL;
+ }
+
+ if (iectrl == UNIPHIER_PIN_IECTRL_NONE)
+ /* This pin is always input-enabled. nothing to do. */
+ return 0;
+
+ return regmap_update_bits(priv->regmap, UNIPHIER_PINCTRL_IECTRL,
+ BIT(iectrl), BIT(iectrl));
+}
+
+static int uniphier_conf_pin_config_set(struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long *configs,
+ unsigned num_configs)
+{
+ const struct pinctrl_pin_desc *pin_desc = &pctldev->desc->pins[pin];
+ int i, ret;
+
+ for (i = 0; i < num_configs; i++) {
+ enum pin_config_param param =
+ pinconf_to_config_param(configs[i]);
+ u16 arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ ret = uniphier_conf_pin_bias_set(pctldev, pin_desc,
+ param, arg);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ ret = uniphier_conf_pin_drive_set(pctldev, pin_desc,
+ arg);
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ ret = uniphier_conf_pin_input_enable(pctldev,
+ pin_desc, arg);
+ break;
+ default:
+ dev_err(pctldev->dev,
+ "unsupported configuration parameter %u\n",
+ param);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int uniphier_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ unsigned long *configs,
+ unsigned num_configs)
+{
+ struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+ const unsigned *pins = priv->socdata->groups[selector].pins;
+ unsigned num_pins = priv->socdata->groups[selector].num_pins;
+ int i, ret;
+
+ for (i = 0; i < num_pins; i++) {
+ ret = uniphier_conf_pin_config_set(pctldev, pins[i],
+ configs, num_configs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops uniphier_confops = {
+ .is_generic = true,
+ .pin_config_get = uniphier_conf_pin_config_get,
+ .pin_config_set = uniphier_conf_pin_config_set,
+ .pin_config_group_set = uniphier_conf_pin_config_group_set,
+};
+
+static int uniphier_pmx_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ return priv->socdata->functions_count;
+}
+
+static const char *uniphier_pmx_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ return priv->socdata->functions[selector].name;
+}
+
+static int uniphier_pmx_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned *num_groups)
+{
+ struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = priv->socdata->functions[selector].groups;
+ *num_groups = priv->socdata->functions[selector].num_groups;
+
+ return 0;
+}
+
+static int uniphier_pmx_set_one_mux(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned muxval)
+{
+ struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+ unsigned mux_bits = priv->socdata->mux_bits;
+ unsigned reg_stride = priv->socdata->reg_stride;
+ unsigned reg, reg_end, shift, mask;
+ int ret;
+
+ reg = UNIPHIER_PINCTRL_PINMUX_BASE + pin * mux_bits / 32 * reg_stride;
+ reg_end = reg + reg_stride;
+ shift = pin * mux_bits % 32;
+ mask = (1U << mux_bits) - 1;
+
+ /*
+ * If reg_stride is greater than 4, the MSB of each pinsel shall be
+ * stored in the offset+4.
+ */
+ for (; reg < reg_end; reg += 4) {
+ ret = regmap_update_bits(priv->regmap, reg,
+ mask << shift, muxval << shift);
+ if (ret)
+ return ret;
+ muxval >>= mux_bits;
+ }
+
+ if (priv->socdata->load_pinctrl) {
+ ret = regmap_write(priv->regmap,
+ UNIPHIER_PINCTRL_LOAD_PINMUX, 1);
+ if (ret)
+ return ret;
+ }
+
+ /* some pins need input-enabling */
+ return uniphier_conf_pin_input_enable(pctldev,
+ &pctldev->desc->pins[pin], 1);
+}
+
+static int uniphier_pmx_set_mux(struct pinctrl_dev *pctldev,
+ unsigned func_selector,
+ unsigned group_selector)
+{
+ struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+ const struct uniphier_pinctrl_group *grp =
+ &priv->socdata->groups[group_selector];
+ int i;
+ int ret;
+
+ for (i = 0; i < grp->num_pins; i++) {
+ ret = uniphier_pmx_set_one_mux(pctldev, grp->pins[i],
+ grp->muxvals[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int uniphier_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+ const struct uniphier_pinctrl_group *groups = priv->socdata->groups;
+ int groups_count = priv->socdata->groups_count;
+ enum uniphier_pinmux_gpio_range_type range_type;
+ int i, j;
+
+ if (strstr(range->name, "irq"))
+ range_type = UNIPHIER_PINMUX_GPIO_RANGE_IRQ;
+ else
+ range_type = UNIPHIER_PINMUX_GPIO_RANGE_PORT;
+
+ for (i = 0; i < groups_count; i++) {
+ if (groups[i].range_type != range_type)
+ continue;
+
+ for (j = 0; j < groups[i].num_pins; j++)
+ if (groups[i].pins[j] == offset)
+ goto found;
+ }
+
+ dev_err(pctldev->dev, "pin %u does not support GPIO\n", offset);
+ return -EINVAL;
+
+found:
+ return uniphier_pmx_set_one_mux(pctldev, offset, groups[i].muxvals[j]);
+}
+
+static const struct pinmux_ops uniphier_pmxops = {
+ .get_functions_count = uniphier_pmx_get_functions_count,
+ .get_function_name = uniphier_pmx_get_function_name,
+ .get_function_groups = uniphier_pmx_get_function_groups,
+ .set_mux = uniphier_pmx_set_mux,
+ .gpio_request_enable = uniphier_pmx_gpio_request_enable,
+ .strict = true,
+};
+
+int uniphier_pinctrl_probe(struct platform_device *pdev,
+ struct pinctrl_desc *desc,
+ struct uniphier_pinctrl_socdata *socdata)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_pinctrl_priv *priv;
+
+ if (!socdata ||
+ !socdata->groups ||
+ !socdata->groups_count ||
+ !socdata->functions ||
+ !socdata->functions_count ||
+ !socdata->mux_bits ||
+ !socdata->reg_stride) {
+ dev_err(dev, "pinctrl socdata lacks necessary members\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = syscon_node_to_regmap(dev->of_node);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(dev, "failed to get regmap\n");
+ return PTR_ERR(priv->regmap);
+ }
+
+ priv->socdata = socdata;
+ desc->pctlops = &uniphier_pctlops;
+ desc->pmxops = &uniphier_pmxops;
+ desc->confops = &uniphier_confops;
+
+ priv->pctldev = pinctrl_register(desc, dev, priv);
+ if (IS_ERR(priv->pctldev)) {
+ dev_err(dev, "failed to register UniPhier pinctrl driver\n");
+ return PTR_ERR(priv->pctldev);
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uniphier_pinctrl_probe);
+
+int uniphier_pinctrl_remove(struct platform_device *pdev)
+{
+ struct uniphier_pinctrl_priv *priv = platform_get_drvdata(pdev);
+
+ pinctrl_unregister(priv->pctldev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uniphier_pinctrl_remove);
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier.h b/drivers/pinctrl/uniphier/pinctrl-uniphier.h
new file mode 100644
index 000000000000..e1e98b868be5
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PINCTRL_UNIPHIER_H__
+#define __PINCTRL_UNIPHIER_H__
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#define UNIPHIER_PINCTRL_PINMUX_BASE 0x0
+#define UNIPHIER_PINCTRL_LOAD_PINMUX 0x700
+#define UNIPHIER_PINCTRL_DRVCTRL_BASE 0x800
+#define UNIPHIER_PINCTRL_DRV2CTRL_BASE 0x900
+#define UNIPHIER_PINCTRL_PUPDCTRL_BASE 0xa00
+#define UNIPHIER_PINCTRL_IECTRL 0xd00
+
+/* input enable control register bit */
+#define UNIPHIER_PIN_IECTRL_SHIFT 0
+#define UNIPHIER_PIN_IECTRL_BITS 8
+#define UNIPHIER_PIN_IECTRL_MASK ((1UL << (UNIPHIER_PIN_IECTRL_BITS)) \
+ - 1)
+
+/* drive strength control register number */
+#define UNIPHIER_PIN_DRVCTRL_SHIFT ((UNIPHIER_PIN_IECTRL_SHIFT) + \
+ (UNIPHIER_PIN_IECTRL_BITS))
+#define UNIPHIER_PIN_DRVCTRL_BITS 9
+#define UNIPHIER_PIN_DRVCTRL_MASK ((1UL << (UNIPHIER_PIN_DRVCTRL_BITS)) \
+ - 1)
+
+/* supported drive strength (mA) */
+#define UNIPHIER_PIN_DRV_STR_SHIFT ((UNIPHIER_PIN_DRVCTRL_SHIFT) + \
+ (UNIPHIER_PIN_DRVCTRL_BITS))
+#define UNIPHIER_PIN_DRV_STR_BITS 3
+#define UNIPHIER_PIN_DRV_STR_MASK ((1UL << (UNIPHIER_PIN_DRV_STR_BITS)) \
+ - 1)
+
+/* pull-up / pull-down register number */
+#define UNIPHIER_PIN_PUPDCTRL_SHIFT ((UNIPHIER_PIN_DRV_STR_SHIFT) + \
+ (UNIPHIER_PIN_DRV_STR_BITS))
+#define UNIPHIER_PIN_PUPDCTRL_BITS 9
+#define UNIPHIER_PIN_PUPDCTRL_MASK ((1UL << (UNIPHIER_PIN_PUPDCTRL_BITS))\
+ - 1)
+
+/* direction of pull register */
+#define UNIPHIER_PIN_PULL_DIR_SHIFT ((UNIPHIER_PIN_PUPDCTRL_SHIFT) + \
+ (UNIPHIER_PIN_PUPDCTRL_BITS))
+#define UNIPHIER_PIN_PULL_DIR_BITS 3
+#define UNIPHIER_PIN_PULL_DIR_MASK ((1UL << (UNIPHIER_PIN_PULL_DIR_BITS))\
+ - 1)
+
+#if UNIPHIER_PIN_PULL_DIR_SHIFT + UNIPHIER_PIN_PULL_DIR_BITS > BITS_PER_LONG
+#error "unable to pack pin attributes."
+#endif
+
+#define UNIPHIER_PIN_IECTRL_NONE (UNIPHIER_PIN_IECTRL_MASK)
+
+/* selectable drive strength */
+enum uniphier_pin_drv_str {
+ UNIPHIER_PIN_DRV_4_8, /* 2 level control: 4/8 mA */
+ UNIPHIER_PIN_DRV_8_12_16_20, /* 4 level control: 8/12/16/20 mA */
+ UNIPHIER_PIN_DRV_FIXED_4, /* fixed to 4mA */
+ UNIPHIER_PIN_DRV_FIXED_5, /* fixed to 5mA */
+ UNIPHIER_PIN_DRV_FIXED_8, /* fixed to 8mA */
+ UNIPHIER_PIN_DRV_NONE, /* no support (input only pin) */
+};
+
+/* direction of pull register (no pin supports bi-directional pull biasing) */
+enum uniphier_pin_pull_dir {
+ UNIPHIER_PIN_PULL_UP, /* pull-up or disabled */
+ UNIPHIER_PIN_PULL_DOWN, /* pull-down or disabled */
+ UNIPHIER_PIN_PULL_UP_FIXED, /* always pull-up */
+ UNIPHIER_PIN_PULL_DOWN_FIXED, /* always pull-down */
+ UNIPHIER_PIN_PULL_NONE, /* no pull register */
+};
+
+#define UNIPHIER_PIN_IECTRL(x) \
+ (((x) & (UNIPHIER_PIN_IECTRL_MASK)) << (UNIPHIER_PIN_IECTRL_SHIFT))
+#define UNIPHIER_PIN_DRVCTRL(x) \
+ (((x) & (UNIPHIER_PIN_DRVCTRL_MASK)) << (UNIPHIER_PIN_DRVCTRL_SHIFT))
+#define UNIPHIER_PIN_DRV_STR(x) \
+ (((x) & (UNIPHIER_PIN_DRV_STR_MASK)) << (UNIPHIER_PIN_DRV_STR_SHIFT))
+#define UNIPHIER_PIN_PUPDCTRL(x) \
+ (((x) & (UNIPHIER_PIN_PUPDCTRL_MASK)) << (UNIPHIER_PIN_PUPDCTRL_SHIFT))
+#define UNIPHIER_PIN_PULL_DIR(x) \
+ (((x) & (UNIPHIER_PIN_PULL_DIR_MASK)) << (UNIPHIER_PIN_PULL_DIR_SHIFT))
+
+#define UNIPHIER_PIN_ATTR_PACKED(iectrl, drvctrl, drv_str, pupdctrl, pull_dir)\
+ (UNIPHIER_PIN_IECTRL(iectrl) | \
+ UNIPHIER_PIN_DRVCTRL(drvctrl) | \
+ UNIPHIER_PIN_DRV_STR(drv_str) | \
+ UNIPHIER_PIN_PUPDCTRL(pupdctrl) | \
+ UNIPHIER_PIN_PULL_DIR(pull_dir))
+
+static inline unsigned int uniphier_pin_get_iectrl(void *drv_data)
+{
+ return ((unsigned long)drv_data >> UNIPHIER_PIN_IECTRL_SHIFT) &
+ UNIPHIER_PIN_IECTRL_MASK;
+}
+
+static inline unsigned int uniphier_pin_get_drvctrl(void *drv_data)
+{
+ return ((unsigned long)drv_data >> UNIPHIER_PIN_DRVCTRL_SHIFT) &
+ UNIPHIER_PIN_DRVCTRL_MASK;
+}
+
+static inline unsigned int uniphier_pin_get_drv_str(void *drv_data)
+{
+ return ((unsigned long)drv_data >> UNIPHIER_PIN_DRV_STR_SHIFT) &
+ UNIPHIER_PIN_DRV_STR_MASK;
+}
+
+static inline unsigned int uniphier_pin_get_pupdctrl(void *drv_data)
+{
+ return ((unsigned long)drv_data >> UNIPHIER_PIN_PUPDCTRL_SHIFT) &
+ UNIPHIER_PIN_PUPDCTRL_MASK;
+}
+
+static inline unsigned int uniphier_pin_get_pull_dir(void *drv_data)
+{
+ return ((unsigned long)drv_data >> UNIPHIER_PIN_PULL_DIR_SHIFT) &
+ UNIPHIER_PIN_PULL_DIR_MASK;
+}
+
+enum uniphier_pinmux_gpio_range_type {
+ UNIPHIER_PINMUX_GPIO_RANGE_PORT,
+ UNIPHIER_PINMUX_GPIO_RANGE_IRQ,
+ UNIPHIER_PINMUX_GPIO_RANGE_NONE,
+};
+
+struct uniphier_pinctrl_group {
+ const char *name;
+ const unsigned *pins;
+ unsigned num_pins;
+ const unsigned *muxvals;
+ enum uniphier_pinmux_gpio_range_type range_type;
+};
+
+struct uniphier_pinmux_function {
+ const char *name;
+ const char * const *groups;
+ unsigned num_groups;
+};
+
+struct uniphier_pinctrl_socdata {
+ const struct uniphier_pinctrl_group *groups;
+ int groups_count;
+ const struct uniphier_pinmux_function *functions;
+ int functions_count;
+ unsigned mux_bits;
+ unsigned reg_stride;
+ bool load_pinctrl;
+};
+
+#define UNIPHIER_PINCTRL_PIN(a, b, c, d, e, f, g) \
+{ \
+ .number = a, \
+ .name = b, \
+ .drv_data = (void *)UNIPHIER_PIN_ATTR_PACKED(c, d, e, f, g), \
+}
+
+#define __UNIPHIER_PINCTRL_GROUP(grp, type) \
+ { \
+ .name = #grp, \
+ .pins = grp##_pins, \
+ .num_pins = ARRAY_SIZE(grp##_pins), \
+ .muxvals = grp##_muxvals + \
+ BUILD_BUG_ON_ZERO(ARRAY_SIZE(grp##_pins) != \
+ ARRAY_SIZE(grp##_muxvals)), \
+ .range_type = type, \
+ }
+
+#define UNIPHIER_PINCTRL_GROUP(grp) \
+ __UNIPHIER_PINCTRL_GROUP(grp, UNIPHIER_PINMUX_GPIO_RANGE_NONE)
+
+#define UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(grp) \
+ __UNIPHIER_PINCTRL_GROUP(grp, UNIPHIER_PINMUX_GPIO_RANGE_PORT)
+
+#define UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(grp) \
+ __UNIPHIER_PINCTRL_GROUP(grp, UNIPHIER_PINMUX_GPIO_RANGE_IRQ)
+
+#define UNIPHIER_PINCTRL_GROUP_SINGLE(grp, array, ofst) \
+ { \
+ .name = #grp, \
+ .pins = array##_pins + ofst, \
+ .num_pins = 1, \
+ .muxvals = array##_muxvals + ofst, \
+ }
+
+#define UNIPHIER_PINMUX_FUNCTION(func) \
+ { \
+ .name = #func, \
+ .groups = func##_groups, \
+ .num_groups = ARRAY_SIZE(func##_groups), \
+ }
+
+struct platform_device;
+struct pinctrl_desc;
+
+int uniphier_pinctrl_probe(struct platform_device *pdev,
+ struct pinctrl_desc *desc,
+ struct uniphier_pinctrl_socdata *socdata);
+
+int uniphier_pinctrl_remove(struct platform_device *pdev);
+
+#endif /* __PINCTRL_UNIPHIER_H__ */
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index cb1329919527..3271cd1abe7c 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -4,7 +4,6 @@
menuconfig CHROME_PLATFORMS
bool "Platform support for Chrome hardware"
- depends on X86 || ARM
---help---
Say Y here to get to see options for platform support for
various Chromebooks and Chromeboxes. This option alone does
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index a04019ab9feb..02072749fff3 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -23,7 +23,7 @@
#include <linux/dmi.h>
#include <linux/i2c.h>
-#include <linux/i2c/atmel_mxt_ts.h>
+#include <linux/platform_data/atmel_mxt_ts.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -111,6 +111,7 @@ static struct mxt_platform_data atmel_224s_tp_platform_data = {
.irqflags = IRQF_TRIGGER_FALLING,
.t19_num_keys = ARRAY_SIZE(mxt_t19_keys),
.t19_keymap = mxt_t19_keys,
+ .suspend_mode = MXT_SUSPEND_T9_CTRL,
};
static struct i2c_board_info atmel_224s_tp_device = {
@@ -121,6 +122,7 @@ static struct i2c_board_info atmel_224s_tp_device = {
static struct mxt_platform_data atmel_1664s_platform_data = {
.irqflags = IRQF_TRIGGER_FALLING,
+ .suspend_mode = MXT_SUSPEND_T9_CTRL,
};
static struct i2c_board_info atmel_1664s_device = {
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 6dc13e4de396..c69bb703f483 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -919,4 +919,9 @@ config INTEL_PMC_IPC
The PMC is an ARC processor which defines IPC commands for communication
with other entities in the CPU.
+config SURFACE_PRO3_BUTTON
+ tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3 tablet"
+ depends on ACPI && INPUT
+ ---help---
+ This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3 tablet.
endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index dda95a985321..ada512819028 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -60,3 +60,4 @@ obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o
obj-$(CONFIG_PVPANIC) += pvpanic.o
obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o
obj-$(CONFIG_INTEL_PMC_IPC) += intel_pmc_ipc.o
+obj-$(CONFIG_SURFACE_PRO3_BUTTON) += surfacepro3_button.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index f6b280dbfb33..d773b9dc48a0 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -807,6 +807,7 @@ static const struct acpi_device_id norfkill_ids[] __initconst = {
{ "IBM0068", 0},
{ "LEN0068", 0},
{ "SNY5001", 0}, /* sony-laptop in charge */
+ { "HPQ6601", 0},
{ "", 0},
};
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 1ef02daddb60..460fa6708bfc 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -346,8 +346,7 @@ static void acerhdf_check_param(struct thermal_zone_device *thermal)
* as late as the polling interval is since we can't do that in the respective
* accessors of the module parameters.
*/
-static int acerhdf_get_ec_temp(struct thermal_zone_device *thermal,
- unsigned long *t)
+static int acerhdf_get_ec_temp(struct thermal_zone_device *thermal, int *t)
{
int temp, err = 0;
@@ -453,7 +452,7 @@ static int acerhdf_get_trip_type(struct thermal_zone_device *thermal, int trip,
}
static int acerhdf_get_trip_hyst(struct thermal_zone_device *thermal, int trip,
- unsigned long *temp)
+ int *temp)
{
if (trip != 0)
return -EINVAL;
@@ -464,7 +463,7 @@ static int acerhdf_get_trip_hyst(struct thermal_zone_device *thermal, int trip,
}
static int acerhdf_get_trip_temp(struct thermal_zone_device *thermal, int trip,
- unsigned long *temp)
+ int *temp)
{
if (trip == 0)
*temp = fanon;
@@ -477,7 +476,7 @@ static int acerhdf_get_trip_temp(struct thermal_zone_device *thermal, int trip,
}
static int acerhdf_get_crit_temp(struct thermal_zone_device *thermal,
- unsigned long *temperature)
+ int *temperature)
{
*temperature = ACERHDF_TEMP_CRIT;
return 0;
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 58d29c4f2840..f2b5d0a8adf0 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -332,6 +332,7 @@ static const struct key_entry asus_keymap[] = {
{KE_KEY, 0x65, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV */
{KE_KEY, 0x66, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV */
{KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */
+ {KE_KEY, 0x6A, { KEY_TOUCHPAD_TOGGLE } }, /* Lock Touchpad Fn + F9 */
{KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } }, /* Lock Touchpad */
{KE_KEY, 0x6C, { KEY_SLEEP } }, /* Suspend */
{KE_KEY, 0x6D, { KEY_SLEEP } }, /* Hibernate */
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index ed317ccac4a2..aaeeae81e3a9 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -309,12 +309,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
static struct calling_interface_buffer *buffer;
static DEFINE_MUTEX(buffer_mutex);
-static int hwswitch_state;
+static void clear_buffer(void)
+{
+ memset(buffer, 0, sizeof(struct calling_interface_buffer));
+}
static void get_buffer(void)
{
mutex_lock(&buffer_mutex);
- memset(buffer, 0, sizeof(struct calling_interface_buffer));
+ clear_buffer();
}
static void release_buffer(void)
@@ -548,21 +551,41 @@ static int dell_rfkill_set(void *data, bool blocked)
int disable = blocked ? 1 : 0;
unsigned long radio = (unsigned long)data;
int hwswitch_bit = (unsigned long)data - 1;
+ int hwswitch;
+ int status;
+ int ret;
get_buffer();
+
+ dell_send_request(buffer, 17, 11);
+ ret = buffer->output[0];
+ status = buffer->output[1];
+
+ if (ret != 0)
+ goto out;
+
+ clear_buffer();
+
+ buffer->input[0] = 0x2;
dell_send_request(buffer, 17, 11);
+ ret = buffer->output[0];
+ hwswitch = buffer->output[1];
/* If the hardware switch controls this radio, and the hardware
switch is disabled, always disable the radio */
- if ((hwswitch_state & BIT(hwswitch_bit)) &&
- !(buffer->output[1] & BIT(16)))
+ if (ret == 0 && (hwswitch & BIT(hwswitch_bit)) &&
+ (status & BIT(0)) && !(status & BIT(16)))
disable = 1;
+ clear_buffer();
+
buffer->input[0] = (1 | (radio<<8) | (disable << 16));
dell_send_request(buffer, 17, 11);
+ ret = buffer->output[0];
+ out:
release_buffer();
- return 0;
+ return dell_smi_error(ret);
}
/* Must be called with the buffer held */
@@ -572,6 +595,7 @@ static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
if (status & BIT(0)) {
/* Has hw-switch, sync sw_state to BIOS */
int block = rfkill_blocked(rfkill);
+ clear_buffer();
buffer->input[0] = (1 | (radio << 8) | (block << 16));
dell_send_request(buffer, 17, 11);
} else {
@@ -581,23 +605,43 @@ static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
}
static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio,
- int status)
+ int status, int hwswitch)
{
- if (hwswitch_state & (BIT(radio - 1)))
+ if (hwswitch & (BIT(radio - 1)))
rfkill_set_hw_state(rfkill, !(status & BIT(16)));
}
static void dell_rfkill_query(struct rfkill *rfkill, void *data)
{
+ int radio = ((unsigned long)data & 0xF);
+ int hwswitch;
int status;
+ int ret;
get_buffer();
+
dell_send_request(buffer, 17, 11);
+ ret = buffer->output[0];
status = buffer->output[1];
- dell_rfkill_update_hw_state(rfkill, (unsigned long)data, status);
+ if (ret != 0 || !(status & BIT(0))) {
+ release_buffer();
+ return;
+ }
+
+ clear_buffer();
+
+ buffer->input[0] = 0x2;
+ dell_send_request(buffer, 17, 11);
+ ret = buffer->output[0];
+ hwswitch = buffer->output[1];
release_buffer();
+
+ if (ret != 0)
+ return;
+
+ dell_rfkill_update_hw_state(rfkill, radio, status, hwswitch);
}
static const struct rfkill_ops dell_rfkill_ops = {
@@ -609,13 +653,27 @@ static struct dentry *dell_laptop_dir;
static int dell_debugfs_show(struct seq_file *s, void *data)
{
+ int hwswitch_state;
+ int hwswitch_ret;
int status;
+ int ret;
get_buffer();
+
dell_send_request(buffer, 17, 11);
+ ret = buffer->output[0];
status = buffer->output[1];
+
+ clear_buffer();
+
+ buffer->input[0] = 0x2;
+ dell_send_request(buffer, 17, 11);
+ hwswitch_ret = buffer->output[0];
+ hwswitch_state = buffer->output[1];
+
release_buffer();
+ seq_printf(s, "return:\t%d\n", ret);
seq_printf(s, "status:\t0x%X\n", status);
seq_printf(s, "Bit 0 : Hardware switch supported: %lu\n",
status & BIT(0));
@@ -657,7 +715,8 @@ static int dell_debugfs_show(struct seq_file *s, void *data)
seq_printf(s, "Bit 21: WiGig is blocked: %lu\n",
(status & BIT(21)) >> 21);
- seq_printf(s, "\nhwswitch_state:\t0x%X\n", hwswitch_state);
+ seq_printf(s, "\nhwswitch_return:\t%d\n", hwswitch_ret);
+ seq_printf(s, "hwswitch_state:\t0x%X\n", hwswitch_state);
seq_printf(s, "Bit 0 : Wifi controlled by switch: %lu\n",
hwswitch_state & BIT(0));
seq_printf(s, "Bit 1 : Bluetooth controlled by switch: %lu\n",
@@ -693,25 +752,43 @@ static const struct file_operations dell_debugfs_fops = {
static void dell_update_rfkill(struct work_struct *ignored)
{
+ int hwswitch = 0;
int status;
+ int ret;
get_buffer();
+
dell_send_request(buffer, 17, 11);
+ ret = buffer->output[0];
status = buffer->output[1];
+ if (ret != 0)
+ goto out;
+
+ clear_buffer();
+
+ buffer->input[0] = 0x2;
+ dell_send_request(buffer, 17, 11);
+ ret = buffer->output[0];
+
+ if (ret == 0 && (status & BIT(0)))
+ hwswitch = buffer->output[1];
+
if (wifi_rfkill) {
- dell_rfkill_update_hw_state(wifi_rfkill, 1, status);
+ dell_rfkill_update_hw_state(wifi_rfkill, 1, status, hwswitch);
dell_rfkill_update_sw_state(wifi_rfkill, 1, status);
}
if (bluetooth_rfkill) {
- dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status);
+ dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status,
+ hwswitch);
dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status);
}
if (wwan_rfkill) {
- dell_rfkill_update_hw_state(wwan_rfkill, 3, status);
+ dell_rfkill_update_hw_state(wwan_rfkill, 3, status, hwswitch);
dell_rfkill_update_sw_state(wwan_rfkill, 3, status);
}
+ out:
release_buffer();
}
static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
@@ -773,21 +850,17 @@ static int __init dell_setup_rfkill(void)
get_buffer();
dell_send_request(buffer, 17, 11);
+ ret = buffer->output[0];
status = buffer->output[1];
- buffer->input[0] = 0x2;
- dell_send_request(buffer, 17, 11);
- hwswitch_state = buffer->output[1];
release_buffer();
- if (!(status & BIT(0))) {
- if (force_rfkill) {
- /* No hwsitch, clear all hw-controlled bits */
- hwswitch_state &= ~7;
- } else {
- /* rfkill is only tested on laptops with a hwswitch */
- return 0;
- }
- }
+ /* dell wireless info smbios call is not supported */
+ if (ret != 0)
+ return 0;
+
+ /* rfkill is only tested on laptops with a hwswitch */
+ if (!(status & BIT(0)) && !force_rfkill)
+ return 0;
if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev,
@@ -932,47 +1005,50 @@ static void dell_cleanup_rfkill(void)
static int dell_send_intensity(struct backlight_device *bd)
{
- int ret = 0;
+ int token;
+ int ret;
+
+ token = find_token_location(BRIGHTNESS_TOKEN);
+ if (token == -1)
+ return -ENODEV;
get_buffer();
- buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN);
+ buffer->input[0] = token;
buffer->input[1] = bd->props.brightness;
- if (buffer->input[0] == -1) {
- ret = -ENODEV;
- goto out;
- }
-
if (power_supply_is_system_supplied() > 0)
dell_send_request(buffer, 1, 2);
else
dell_send_request(buffer, 1, 1);
- out:
+ ret = dell_smi_error(buffer->output[0]);
+
release_buffer();
return ret;
}
static int dell_get_intensity(struct backlight_device *bd)
{
- int ret = 0;
+ int token;
+ int ret;
- get_buffer();
- buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN);
+ token = find_token_location(BRIGHTNESS_TOKEN);
+ if (token == -1)
+ return -ENODEV;
- if (buffer->input[0] == -1) {
- ret = -ENODEV;
- goto out;
- }
+ get_buffer();
+ buffer->input[0] = token;
if (power_supply_is_system_supplied() > 0)
dell_send_request(buffer, 0, 2);
else
dell_send_request(buffer, 0, 1);
- ret = buffer->output[1];
+ if (buffer->output[0])
+ ret = dell_smi_error(buffer->output[0]);
+ else
+ ret = buffer->output[1];
- out:
release_buffer();
return ret;
}
@@ -2036,6 +2112,7 @@ static void kbd_led_exit(void)
static int __init dell_init(void)
{
int max_intensity = 0;
+ int token;
int ret;
if (!dmi_check_system(dell_device_table))
@@ -2094,13 +2171,15 @@ static int __init dell_init(void)
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
return 0;
- get_buffer();
- buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN);
- if (buffer->input[0] != -1) {
+ token = find_token_location(BRIGHTNESS_TOKEN);
+ if (token != -1) {
+ get_buffer();
+ buffer->input[0] = token;
dell_send_request(buffer, 0, 2);
- max_intensity = buffer->output[3];
+ if (buffer->output[0] == 0)
+ max_intensity = buffer->output[3];
+ release_buffer();
}
- release_buffer();
if (max_intensity) {
struct backlight_properties props;
diff --git a/drivers/platform/x86/hp-wireless.c b/drivers/platform/x86/hp-wireless.c
index 4e4cc8bd7557..988eedbd7c63 100644
--- a/drivers/platform/x86/hp-wireless.c
+++ b/drivers/platform/x86/hp-wireless.c
@@ -114,14 +114,9 @@ static int __init hpwl_init(void)
pr_info("Initializing HPQ6001 module\n");
err = acpi_bus_register_driver(&hpwl_driver);
- if (err) {
+ if (err)
pr_err("Unable to register HP wireless control driver.\n");
- goto error_acpi_register;
- }
-
- return 0;
-error_acpi_register:
return err;
}
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 76b57388d01b..fce49f3c6ed6 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -853,6 +853,20 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
+ .ident = "Lenovo Yoga 3 14",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 3 14"),
+ },
+ },
+ {
+ .ident = "Lenovo Yoga 2 11 / 13 / Pro",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "Yoga2"),
+ },
+ },
+ {
.ident = "Lenovo Yoga 3 Pro 1370",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 0944e834af8d..9f713b832ba3 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -132,7 +132,7 @@ static int is_valid_adc(uint16_t adc_val, uint16_t min, uint16_t max)
* to achieve very close approximate temp value with less than
* 0.5C error
*/
-static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp)
+static int adc_to_temp(int direct, uint16_t adc_val, int *tp)
{
int temp;
@@ -174,14 +174,13 @@ static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp)
*
* Can sleep
*/
-static int mid_read_temp(struct thermal_zone_device *tzd, unsigned long *temp)
+static int mid_read_temp(struct thermal_zone_device *tzd, int *temp)
{
struct thermal_device_info *td_info = tzd->devdata;
uint16_t adc_val, addr;
uint8_t data = 0;
int ret;
- unsigned long curr_temp;
-
+ int curr_temp;
addr = td_info->chnl_addr;
@@ -453,7 +452,7 @@ static SIMPLE_DEV_PM_OPS(mid_thermal_pm,
*
* Can sleep
*/
-static int read_curr_temp(struct thermal_zone_device *tzd, unsigned long *temp)
+static int read_curr_temp(struct thermal_zone_device *tzd, int *temp)
{
WARN_ON(tzd == NULL);
return mid_read_temp(tzd, temp);
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index d734763dab69..28b2a12bb26d 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -33,7 +33,7 @@
#include <linux/suspend.h>
#include <linux/acpi.h>
#include <asm/intel_pmc_ipc.h>
-#include <linux/mfd/lpc_ich.h>
+#include <linux/platform_data/itco_wdt.h>
/*
* IPC registers
@@ -96,18 +96,18 @@ static struct intel_pmc_ipc_dev {
struct completion cmd_complete;
/* The following PMC BARs share the same ACPI device with the IPC */
- void *acpi_io_base;
+ resource_size_t acpi_io_base;
int acpi_io_size;
struct platform_device *tco_dev;
/* gcr */
- void *gcr_base;
+ resource_size_t gcr_base;
int gcr_size;
/* punit */
- void *punit_base;
+ resource_size_t punit_base;
int punit_size;
- void *punit_base2;
+ resource_size_t punit_base2;
int punit_size2;
struct platform_device *punit_dev;
} ipcdev;
@@ -210,10 +210,15 @@ static int intel_pmc_ipc_check_status(void)
return ret;
}
-/*
- * intel_pmc_ipc_simple_command
- * @cmd: command
- * @sub: sub type
+/**
+ * intel_pmc_ipc_simple_command() - Simple IPC command
+ * @cmd: IPC command code.
+ * @sub: IPC command sub type.
+ *
+ * Send a simple IPC command to PMC when don't need to specify
+ * input/output data and source/dest pointers.
+ *
+ * Return: an IPC error code or 0 on success.
*/
int intel_pmc_ipc_simple_command(int cmd, int sub)
{
@@ -232,16 +237,20 @@ int intel_pmc_ipc_simple_command(int cmd, int sub)
}
EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command);
-/*
- * intel_pmc_ipc_raw_cmd
- * @cmd: command
- * @sub: sub type
- * @in: input data
- * @inlen: input length in bytes
- * @out: output data
- * @outlen: output length in dwords
- * @sptr: data writing to SPTR register
- * @dptr: data writing to DPTR register
+/**
+ * intel_pmc_ipc_raw_cmd() - IPC command with data and pointers
+ * @cmd: IPC command code.
+ * @sub: IPC command sub type.
+ * @in: input data of this IPC command.
+ * @inlen: input data length in bytes.
+ * @out: output data of this IPC command.
+ * @outlen: output data length in dwords.
+ * @sptr: data writing to SPTR register.
+ * @dptr: data writing to DPTR register.
+ *
+ * Send an IPC command to PMC with input/output data and source/dest pointers.
+ *
+ * Return: an IPC error code or 0 on success.
*/
int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
u32 outlen, u32 dptr, u32 sptr)
@@ -278,14 +287,18 @@ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
}
EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd);
-/*
- * intel_pmc_ipc_command
- * @cmd: command
- * @sub: sub type
- * @in: input data
- * @inlen: input length in bytes
- * @out: output data
- * @outlen: output length in dwords
+/**
+ * intel_pmc_ipc_command() - IPC command with input/output data
+ * @cmd: IPC command code.
+ * @sub: IPC command sub type.
+ * @in: input data of this IPC command.
+ * @inlen: input data length in bytes.
+ * @out: output data of this IPC command.
+ * @outlen: output data length in dwords.
+ *
+ * Send an IPC command to PMC with input/output data.
+ *
+ * Return: an IPC error code or 0 on success.
*/
int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
u32 *out, u32 outlen)
@@ -460,9 +473,9 @@ static struct resource tco_res[] = {
},
};
-static struct lpc_ich_info tco_info = {
+static struct itco_wdt_platform_data tco_info = {
.name = "Apollo Lake SoC",
- .iTCO_version = 3,
+ .version = 3,
};
static int ipc_create_punit_device(void)
@@ -480,11 +493,11 @@ static int ipc_create_punit_device(void)
pdev->dev.parent = ipcdev.dev;
res = punit_res;
- res->start = (resource_size_t)ipcdev.punit_base;
+ res->start = ipcdev.punit_base;
res->end = res->start + ipcdev.punit_size - 1;
res = punit_res + PUNIT_RESOURCE_INTER;
- res->start = (resource_size_t)ipcdev.punit_base2;
+ res->start = ipcdev.punit_base2;
res->end = res->start + ipcdev.punit_size2 - 1;
ret = platform_device_add_resources(pdev, punit_res,
@@ -522,15 +535,15 @@ static int ipc_create_tco_device(void)
pdev->dev.parent = ipcdev.dev;
res = tco_res + TCO_RESOURCE_ACPI_IO;
- res->start = (resource_size_t)ipcdev.acpi_io_base + TCO_BASE_OFFSET;
+ res->start = ipcdev.acpi_io_base + TCO_BASE_OFFSET;
res->end = res->start + TCO_REGS_SIZE - 1;
res = tco_res + TCO_RESOURCE_SMI_EN_IO;
- res->start = (resource_size_t)ipcdev.acpi_io_base + SMI_EN_OFFSET;
+ res->start = ipcdev.acpi_io_base + SMI_EN_OFFSET;
res->end = res->start + SMI_EN_SIZE - 1;
res = tco_res + TCO_RESOURCE_GCR_MEM;
- res->start = (resource_size_t)ipcdev.gcr_base;
+ res->start = ipcdev.gcr_base;
res->end = res->start + ipcdev.gcr_size - 1;
ret = platform_device_add_resources(pdev, tco_res, ARRAY_SIZE(tco_res));
@@ -539,8 +552,7 @@ static int ipc_create_tco_device(void)
goto err;
}
- ret = platform_device_add_data(pdev, &tco_info,
- sizeof(struct lpc_ich_info));
+ ret = platform_device_add_data(pdev, &tco_info, sizeof(tco_info));
if (ret) {
dev_err(ipcdev.dev, "Failed to add tco platform data\n");
goto err;
@@ -589,7 +601,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
return -ENXIO;
}
size = resource_size(res);
- ipcdev.acpi_io_base = (void *)res->start;
+ ipcdev.acpi_io_base = res->start;
ipcdev.acpi_io_size = size;
dev_info(&pdev->dev, "io res: %llx %x\n",
(long long)res->start, (int)resource_size(res));
@@ -601,7 +613,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
return -ENXIO;
}
size = resource_size(res);
- ipcdev.punit_base = (void *)res->start;
+ ipcdev.punit_base = res->start;
ipcdev.punit_size = size;
dev_info(&pdev->dev, "punit data res: %llx %x\n",
(long long)res->start, (int)resource_size(res));
@@ -613,7 +625,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
return -ENXIO;
}
size = resource_size(res);
- ipcdev.punit_base2 = (void *)res->start;
+ ipcdev.punit_base2 = res->start;
ipcdev.punit_size2 = size;
dev_info(&pdev->dev, "punit interface res: %llx %x\n",
(long long)res->start, (int)resource_size(res));
@@ -637,7 +649,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
}
ipcdev.ipc_base = addr;
- ipcdev.gcr_base = (void *)(res->start + size);
+ ipcdev.gcr_base = res->start + size;
ipcdev.gcr_size = PLAT_RESOURCE_GCR_SIZE;
dev_info(&pdev->dev, "ipc res: %llx %x\n",
(long long)res->start, (int)resource_size(res));
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 001b199a8c33..187d1086d15c 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -216,13 +216,13 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
int nc;
u32 offset = 0;
int err;
- u8 cbuf[IPC_WWBUF_SIZE] = { };
+ u8 cbuf[IPC_WWBUF_SIZE];
u32 *wbuf = (u32 *)&cbuf;
- mutex_lock(&ipclock);
-
memset(cbuf, 0, sizeof(cbuf));
+ mutex_lock(&ipclock);
+
if (ipcdev.pdev == NULL) {
mutex_unlock(&ipclock);
return -ENODEV;
diff --git a/drivers/platform/x86/surfacepro3_button.c b/drivers/platform/x86/surfacepro3_button.c
new file mode 100644
index 000000000000..f7dade3fd2ab
--- /dev/null
+++ b/drivers/platform/x86/surfacepro3_button.c
@@ -0,0 +1,216 @@
+/*
+ * power/home/volume button support for
+ * Microsoft Surface Pro 3 tablet.
+ *
+ * Copyright (c) 2015 Intel Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/acpi.h>
+#include <acpi/button.h>
+
+#define SURFACE_BUTTON_HID "MSHW0028"
+#define SURFACE_BUTTON_OBJ_NAME "VGBI"
+#define SURFACE_BUTTON_DEVICE_NAME "Surface Pro 3 Buttons"
+
+#define SURFACE_BUTTON_NOTIFY_PRESS_POWER 0xc6
+#define SURFACE_BUTTON_NOTIFY_RELEASE_POWER 0xc7
+
+#define SURFACE_BUTTON_NOTIFY_PRESS_HOME 0xc4
+#define SURFACE_BUTTON_NOTIFY_RELEASE_HOME 0xc5
+
+#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP 0xc0
+#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP 0xc1
+
+#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN 0xc2
+#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN 0xc3
+
+ACPI_MODULE_NAME("surface pro 3 button");
+
+MODULE_AUTHOR("Chen Yu");
+MODULE_DESCRIPTION("Surface Pro3 Button Driver");
+MODULE_LICENSE("GPL v2");
+
+/*
+ * Power button, Home button, Volume buttons support is supposed to
+ * be covered by drivers/input/misc/soc_button_array.c, which is implemented
+ * according to "Windows ACPI Design Guide for SoC Platforms".
+ * However surface pro3 seems not to obey the specs, instead it uses
+ * device VGBI(MSHW0028) for dispatching the events.
+ * We choose acpi_driver rather than platform_driver/i2c_driver because
+ * although VGBI has an i2c resource connected to i2c controller, it
+ * is not embedded in any i2c controller's scope, thus neither platform_device
+ * will be created, nor i2c_client will be enumerated, we have to use
+ * acpi_driver.
+ */
+static const struct acpi_device_id surface_button_device_ids[] = {
+ {SURFACE_BUTTON_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, surface_button_device_ids);
+
+struct surface_button {
+ unsigned int type;
+ struct input_dev *input;
+ char phys[32]; /* for input device */
+ unsigned long pushed;
+ bool suspended;
+};
+
+static void surface_button_notify(struct acpi_device *device, u32 event)
+{
+ struct surface_button *button = acpi_driver_data(device);
+ struct input_dev *input;
+ int key_code = KEY_RESERVED;
+ bool pressed = false;
+
+ switch (event) {
+ /* Power button press,release handle */
+ case SURFACE_BUTTON_NOTIFY_PRESS_POWER:
+ pressed = true;
+ /*fall through*/
+ case SURFACE_BUTTON_NOTIFY_RELEASE_POWER:
+ key_code = KEY_POWER;
+ break;
+ /* Home button press,release handle */
+ case SURFACE_BUTTON_NOTIFY_PRESS_HOME:
+ pressed = true;
+ /*fall through*/
+ case SURFACE_BUTTON_NOTIFY_RELEASE_HOME:
+ key_code = KEY_LEFTMETA;
+ break;
+ /* Volume up button press,release handle */
+ case SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP:
+ pressed = true;
+ /*fall through*/
+ case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP:
+ key_code = KEY_VOLUMEUP;
+ break;
+ /* Volume down button press,release handle */
+ case SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN:
+ pressed = true;
+ /*fall through*/
+ case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN:
+ key_code = KEY_VOLUMEDOWN;
+ break;
+ default:
+ dev_info_ratelimited(&device->dev,
+ "Unsupported event [0x%x]\n", event);
+ break;
+ }
+ input = button->input;
+ if (KEY_RESERVED == key_code)
+ return;
+ if (pressed)
+ pm_wakeup_event(&device->dev, 0);
+ if (button->suspended)
+ return;
+ input_report_key(input, key_code, pressed?1:0);
+ input_sync(input);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int surface_button_suspend(struct device *dev)
+{
+ struct acpi_device *device = to_acpi_device(dev);
+ struct surface_button *button = acpi_driver_data(device);
+
+ button->suspended = true;
+ return 0;
+}
+
+static int surface_button_resume(struct device *dev)
+{
+ struct acpi_device *device = to_acpi_device(dev);
+ struct surface_button *button = acpi_driver_data(device);
+
+ button->suspended = false;
+ return 0;
+}
+#endif
+
+static int surface_button_add(struct acpi_device *device)
+{
+ struct surface_button *button;
+ struct input_dev *input;
+ const char *hid = acpi_device_hid(device);
+ char *name;
+ int error;
+
+ if (strncmp(acpi_device_bid(device), SURFACE_BUTTON_OBJ_NAME,
+ strlen(SURFACE_BUTTON_OBJ_NAME)))
+ return -ENODEV;
+
+ button = kzalloc(sizeof(struct surface_button), GFP_KERNEL);
+ if (!button)
+ return -ENOMEM;
+
+ device->driver_data = button;
+ button->input = input = input_allocate_device();
+ if (!input) {
+ error = -ENOMEM;
+ goto err_free_button;
+ }
+
+ name = acpi_device_name(device);
+ strcpy(name, SURFACE_BUTTON_DEVICE_NAME);
+ snprintf(button->phys, sizeof(button->phys), "%s/buttons", hid);
+
+ input->name = name;
+ input->phys = button->phys;
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = &device->dev;
+ input_set_capability(input, EV_KEY, KEY_POWER);
+ input_set_capability(input, EV_KEY, KEY_LEFTMETA);
+ input_set_capability(input, EV_KEY, KEY_VOLUMEUP);
+ input_set_capability(input, EV_KEY, KEY_VOLUMEDOWN);
+
+ error = input_register_device(input);
+ if (error)
+ goto err_free_input;
+ dev_info(&device->dev,
+ "%s [%s]\n", name, acpi_device_bid(device));
+ return 0;
+
+ err_free_input:
+ input_free_device(input);
+ err_free_button:
+ kfree(button);
+ return error;
+}
+
+static int surface_button_remove(struct acpi_device *device)
+{
+ struct surface_button *button = acpi_driver_data(device);
+
+ input_unregister_device(button->input);
+ kfree(button);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(surface_button_pm,
+ surface_button_suspend, surface_button_resume);
+
+static struct acpi_driver surface_button_driver = {
+ .name = "surface_pro3_button",
+ .class = "SurfacePro3",
+ .ids = surface_button_device_ids,
+ .ops = {
+ .add = surface_button_add,
+ .remove = surface_button_remove,
+ .notify = surface_button_notify,
+ },
+ .drv.pm = &surface_button_pm,
+};
+
+module_acpi_driver(surface_button_driver);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 33e488cf5569..131dd7464183 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -402,7 +402,7 @@ static const char *str_supported(int is_supported);
#else
static inline const char *str_supported(int is_supported) { return ""; }
#define vdbg_printk(a_dbg_level, format, arg...) \
- no_printk(format, ##arg)
+ do { if (0) no_printk(format, ##arg); } while (0)
#endif
static void tpacpi_log_usertask(const char * const what)
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 3ad7b1fa24ce..6740c513919c 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -31,7 +31,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define TOSHIBA_ACPI_VERSION "0.22"
+#define TOSHIBA_ACPI_VERSION "0.23"
#define PROC_INTERFACE_VERSION 1
#include <linux/kernel.h>
@@ -50,6 +50,8 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/toshiba.h>
#include <acpi/video.h>
MODULE_AUTHOR("John Belmonte");
@@ -91,6 +93,7 @@ MODULE_LICENSE("GPL");
/* Return codes */
#define TOS_SUCCESS 0x0000
+#define TOS_SUCCESS2 0x0001
#define TOS_OPEN_CLOSE_OK 0x0044
#define TOS_FAILURE 0x1000
#define TOS_NOT_SUPPORTED 0x8000
@@ -111,7 +114,6 @@ MODULE_LICENSE("GPL");
#define HCI_VIDEO_OUT 0x001c
#define HCI_HOTKEY_EVENT 0x001e
#define HCI_LCD_BRIGHTNESS 0x002a
-#define HCI_WIRELESS 0x0056
#define HCI_ACCELEROMETER 0x006d
#define HCI_KBD_ILLUMINATION 0x0095
#define HCI_ECO_MODE 0x0097
@@ -140,10 +142,6 @@ MODULE_LICENSE("GPL");
#define HCI_VIDEO_OUT_LCD 0x1
#define HCI_VIDEO_OUT_CRT 0x2
#define HCI_VIDEO_OUT_TV 0x4
-#define HCI_WIRELESS_KILL_SWITCH 0x01
-#define HCI_WIRELESS_BT_PRESENT 0x0f
-#define HCI_WIRELESS_BT_ATTACH 0x40
-#define HCI_WIRELESS_BT_POWER 0x80
#define SCI_KBD_MODE_MASK 0x1f
#define SCI_KBD_MODE_FNZ 0x1
#define SCI_KBD_MODE_AUTO 0x2
@@ -170,6 +168,7 @@ struct toshiba_acpi_dev {
struct led_classdev led_dev;
struct led_classdev kbd_led;
struct led_classdev eco_led;
+ struct miscdevice miscdev;
int force_fan;
int last_key_event;
@@ -189,7 +188,6 @@ struct toshiba_acpi_dev {
unsigned int info_supported:1;
unsigned int tr_backlight_supported:1;
unsigned int kbd_illum_supported:1;
- unsigned int kbd_led_registered:1;
unsigned int touchpad_supported:1;
unsigned int eco_supported:1;
unsigned int accelerometer_supported:1;
@@ -200,6 +198,10 @@ struct toshiba_acpi_dev {
unsigned int panel_power_on_supported:1;
unsigned int usb_three_supported:1;
unsigned int sysfs_created:1;
+
+ bool kbd_led_registered;
+ bool illumination_led_registered;
+ bool eco_led_registered;
};
static struct toshiba_acpi_dev *toshiba_acpi;
@@ -248,16 +250,16 @@ static const struct key_entry toshiba_acpi_keymap[] = {
};
static const struct key_entry toshiba_acpi_alt_keymap[] = {
- { KE_KEY, 0x157, { KEY_MUTE } },
{ KE_KEY, 0x102, { KEY_ZOOMOUT } },
{ KE_KEY, 0x103, { KEY_ZOOMIN } },
{ KE_KEY, 0x12c, { KEY_KBDILLUMTOGGLE } },
{ KE_KEY, 0x139, { KEY_ZOOMRESET } },
- { KE_KEY, 0x13e, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x13c, { KEY_BRIGHTNESSDOWN } },
{ KE_KEY, 0x13d, { KEY_BRIGHTNESSUP } },
- { KE_KEY, 0x158, { KEY_WLAN } },
+ { KE_KEY, 0x13e, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x13f, { KEY_TOUCHPAD_TOGGLE } },
+ { KE_KEY, 0x157, { KEY_MUTE } },
+ { KE_KEY, 0x158, { KEY_WLAN } },
{ KE_END, 0 },
};
@@ -441,26 +443,24 @@ static u32 sci_write(struct toshiba_acpi_dev *dev, u32 reg, u32 in1)
}
/* Illumination support */
-static int toshiba_illumination_available(struct toshiba_acpi_dev *dev)
+static void toshiba_illumination_available(struct toshiba_acpi_dev *dev)
{
u32 in[TCI_WORDS] = { SCI_GET, SCI_ILLUMINATION, 0, 0, 0, 0 };
u32 out[TCI_WORDS];
acpi_status status;
+ dev->illumination_supported = 0;
+ dev->illumination_led_registered = false;
+
if (!sci_open(dev))
- return 0;
+ return;
status = tci_raw(dev, in, out);
sci_close(dev);
- if (ACPI_FAILURE(status)) {
+ if (ACPI_FAILURE(status))
pr_err("ACPI call to query Illumination support failed\n");
- return 0;
- } else if (out[0] == TOS_NOT_SUPPORTED) {
- pr_info("Illumination device not available\n");
- return 0;
- }
-
- return 1;
+ else if (out[0] == TOS_SUCCESS)
+ dev->illumination_supported = 1;
}
static void toshiba_illumination_set(struct led_classdev *cdev,
@@ -468,7 +468,8 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
{
struct toshiba_acpi_dev *dev = container_of(cdev,
struct toshiba_acpi_dev, led_dev);
- u32 state, result;
+ u32 result;
+ u32 state;
/* First request : initialize communication. */
if (!sci_open(dev))
@@ -478,13 +479,8 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
state = brightness ? 1 : 0;
result = sci_write(dev, SCI_ILLUMINATION, state);
sci_close(dev);
- if (result == TOS_FAILURE) {
+ if (result == TOS_FAILURE)
pr_err("ACPI call for illumination failed\n");
- return;
- } else if (result == TOS_NOT_SUPPORTED) {
- pr_info("Illumination not supported\n");
- return;
- }
}
static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
@@ -500,11 +496,10 @@ static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
/* Check the illumination */
result = sci_read(dev, SCI_ILLUMINATION, &state);
sci_close(dev);
- if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
+ if (result == TOS_FAILURE) {
pr_err("ACPI call for illumination failed\n");
return LED_OFF;
- } else if (result == TOS_NOT_SUPPORTED) {
- pr_info("Illumination not supported\n");
+ } else if (result != TOS_SUCCESS) {
return LED_OFF;
}
@@ -512,41 +507,40 @@ static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
}
/* KBD Illumination */
-static int toshiba_kbd_illum_available(struct toshiba_acpi_dev *dev)
+static void toshiba_kbd_illum_available(struct toshiba_acpi_dev *dev)
{
u32 in[TCI_WORDS] = { SCI_GET, SCI_KBD_ILLUM_STATUS, 0, 0, 0, 0 };
u32 out[TCI_WORDS];
acpi_status status;
+ dev->kbd_illum_supported = 0;
+ dev->kbd_led_registered = false;
+
if (!sci_open(dev))
- return 0;
+ return;
status = tci_raw(dev, in, out);
sci_close(dev);
- if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) {
+ if (ACPI_FAILURE(status)) {
pr_err("ACPI call to query kbd illumination support failed\n");
- return 0;
- } else if (out[0] == TOS_NOT_SUPPORTED) {
- pr_info("Keyboard illumination not available\n");
- return 0;
+ } else if (out[0] == TOS_SUCCESS) {
+ /*
+ * Check for keyboard backlight timeout max value,
+ * previous kbd backlight implementation set this to
+ * 0x3c0003, and now the new implementation set this
+ * to 0x3c001a, use this to distinguish between them.
+ */
+ if (out[3] == SCI_KBD_TIME_MAX)
+ dev->kbd_type = 2;
+ else
+ dev->kbd_type = 1;
+ /* Get the current keyboard backlight mode */
+ dev->kbd_mode = out[2] & SCI_KBD_MODE_MASK;
+ /* Get the current time (1-60 seconds) */
+ dev->kbd_time = out[2] >> HCI_MISC_SHIFT;
+ /* Flag as supported */
+ dev->kbd_illum_supported = 1;
}
-
- /*
- * Check for keyboard backlight timeout max value,
- * previous kbd backlight implementation set this to
- * 0x3c0003, and now the new implementation set this
- * to 0x3c001a, use this to distinguish between them.
- */
- if (out[3] == SCI_KBD_TIME_MAX)
- dev->kbd_type = 2;
- else
- dev->kbd_type = 1;
- /* Get the current keyboard backlight mode */
- dev->kbd_mode = out[2] & SCI_KBD_MODE_MASK;
- /* Get the current time (1-60 seconds) */
- dev->kbd_time = out[2] >> HCI_MISC_SHIFT;
-
- return 1;
}
static int toshiba_kbd_illum_status_set(struct toshiba_acpi_dev *dev, u32 time)
@@ -558,15 +552,12 @@ static int toshiba_kbd_illum_status_set(struct toshiba_acpi_dev *dev, u32 time)
result = sci_write(dev, SCI_KBD_ILLUM_STATUS, time);
sci_close(dev);
- if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
+ if (result == TOS_FAILURE)
pr_err("ACPI call to set KBD backlight status failed\n");
- return -EIO;
- } else if (result == TOS_NOT_SUPPORTED) {
- pr_info("Keyboard backlight status not supported\n");
+ else if (result == TOS_NOT_SUPPORTED)
return -ENODEV;
- }
- return 0;
+ return result == TOS_SUCCESS ? 0 : -EIO;
}
static int toshiba_kbd_illum_status_get(struct toshiba_acpi_dev *dev, u32 *time)
@@ -578,30 +569,27 @@ static int toshiba_kbd_illum_status_get(struct toshiba_acpi_dev *dev, u32 *time)
result = sci_read(dev, SCI_KBD_ILLUM_STATUS, time);
sci_close(dev);
- if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
+ if (result == TOS_FAILURE)
pr_err("ACPI call to get KBD backlight status failed\n");
- return -EIO;
- } else if (result == TOS_NOT_SUPPORTED) {
- pr_info("Keyboard backlight status not supported\n");
+ else if (result == TOS_NOT_SUPPORTED)
return -ENODEV;
- }
- return 0;
+ return result == TOS_SUCCESS ? 0 : -EIO;
}
static enum led_brightness toshiba_kbd_backlight_get(struct led_classdev *cdev)
{
struct toshiba_acpi_dev *dev = container_of(cdev,
struct toshiba_acpi_dev, kbd_led);
- u32 state, result;
+ u32 result;
+ u32 state;
/* Check the keyboard backlight state */
result = hci_read(dev, HCI_KBD_ILLUMINATION, &state);
- if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
+ if (result == TOS_FAILURE) {
pr_err("ACPI call to get the keyboard backlight failed\n");
return LED_OFF;
- } else if (result == TOS_NOT_SUPPORTED) {
- pr_info("Keyboard backlight not supported\n");
+ } else if (result != TOS_SUCCESS) {
return LED_OFF;
}
@@ -613,18 +601,14 @@ static void toshiba_kbd_backlight_set(struct led_classdev *cdev,
{
struct toshiba_acpi_dev *dev = container_of(cdev,
struct toshiba_acpi_dev, kbd_led);
- u32 state, result;
+ u32 result;
+ u32 state;
/* Set the keyboard backlight state */
state = brightness ? 1 : 0;
result = hci_write(dev, HCI_KBD_ILLUMINATION, state);
- if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
+ if (result == TOS_FAILURE)
pr_err("ACPI call to set KBD Illumination mode failed\n");
- return;
- } else if (result == TOS_NOT_SUPPORTED) {
- pr_info("Keyboard backlight not supported\n");
- return;
- }
}
/* TouchPad support */
@@ -637,14 +621,12 @@ static int toshiba_touchpad_set(struct toshiba_acpi_dev *dev, u32 state)
result = sci_write(dev, SCI_TOUCHPAD, state);
sci_close(dev);
- if (result == TOS_FAILURE) {
+ if (result == TOS_FAILURE)
pr_err("ACPI call to set the touchpad failed\n");
- return -EIO;
- } else if (result == TOS_NOT_SUPPORTED) {
+ else if (result == TOS_NOT_SUPPORTED)
return -ENODEV;
- }
- return 0;
+ return result == TOS_SUCCESS ? 0 : -EIO;
}
static int toshiba_touchpad_get(struct toshiba_acpi_dev *dev, u32 *state)
@@ -656,28 +638,27 @@ static int toshiba_touchpad_get(struct toshiba_acpi_dev *dev, u32 *state)
result = sci_read(dev, SCI_TOUCHPAD, state);
sci_close(dev);
- if (result == TOS_FAILURE) {
+ if (result == TOS_FAILURE)
pr_err("ACPI call to query the touchpad failed\n");
- return -EIO;
- } else if (result == TOS_NOT_SUPPORTED) {
+ else if (result == TOS_NOT_SUPPORTED)
return -ENODEV;
- }
- return 0;
+ return result == TOS_SUCCESS ? 0 : -EIO;
}
/* Eco Mode support */
-static int toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
+static void toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
{
acpi_status status;
u32 in[TCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 0, 0, 0 };
u32 out[TCI_WORDS];
+ dev->eco_supported = 0;
+ dev->eco_led_registered = false;
+
status = tci_raw(dev, in, out);
if (ACPI_FAILURE(status)) {
pr_err("ACPI call to get ECO led failed\n");
- } else if (out[0] == TOS_NOT_INSTALLED) {
- pr_info("ECO led not installed");
} else if (out[0] == TOS_INPUT_DATA_ERROR) {
/*
* If we receive 0x8300 (Input Data Error), it means that the
@@ -690,13 +671,11 @@ static int toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
*/
in[3] = 1;
status = tci_raw(dev, in, out);
- if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE)
+ if (ACPI_FAILURE(status))
pr_err("ACPI call to get ECO led failed\n");
else if (out[0] == TOS_SUCCESS)
- return 1;
+ dev->eco_supported = 1;
}
-
- return 0;
}
static enum led_brightness
@@ -709,9 +688,11 @@ toshiba_eco_mode_get_status(struct led_classdev *cdev)
acpi_status status;
status = tci_raw(dev, in, out);
- if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) {
+ if (ACPI_FAILURE(status)) {
pr_err("ACPI call to get ECO led failed\n");
return LED_OFF;
+ } else if (out[0] != TOS_SUCCESS) {
+ return LED_OFF;
}
return out[2] ? LED_FULL : LED_OFF;
@@ -729,41 +710,32 @@ static void toshiba_eco_mode_set_status(struct led_classdev *cdev,
/* Switch the Eco Mode led on/off */
in[2] = (brightness) ? 1 : 0;
status = tci_raw(dev, in, out);
- if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) {
+ if (ACPI_FAILURE(status))
pr_err("ACPI call to set ECO led failed\n");
- return;
- }
}
/* Accelerometer support */
-static int toshiba_accelerometer_supported(struct toshiba_acpi_dev *dev)
+static void toshiba_accelerometer_available(struct toshiba_acpi_dev *dev)
{
u32 in[TCI_WORDS] = { HCI_GET, HCI_ACCELEROMETER2, 0, 0, 0, 0 };
u32 out[TCI_WORDS];
acpi_status status;
+ dev->accelerometer_supported = 0;
+
/*
* Check if the accelerometer call exists,
* this call also serves as initialization
*/
status = tci_raw(dev, in, out);
- if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) {
+ if (ACPI_FAILURE(status))
pr_err("ACPI call to query the accelerometer failed\n");
- return -EIO;
- } else if (out[0] == TOS_DATA_NOT_AVAILABLE ||
- out[0] == TOS_NOT_INITIALIZED) {
- pr_err("Accelerometer not initialized\n");
- return -EIO;
- } else if (out[0] == TOS_NOT_SUPPORTED) {
- pr_info("Accelerometer not supported\n");
- return -ENODEV;
- }
-
- return 0;
+ else if (out[0] == TOS_SUCCESS)
+ dev->accelerometer_supported = 1;
}
static int toshiba_accelerometer_get(struct toshiba_acpi_dev *dev,
- u32 *xy, u32 *z)
+ u32 *xy, u32 *z)
{
u32 in[TCI_WORDS] = { HCI_GET, HCI_ACCELEROMETER, 0, 1, 0, 0 };
u32 out[TCI_WORDS];
@@ -771,15 +743,18 @@ static int toshiba_accelerometer_get(struct toshiba_acpi_dev *dev,
/* Check the Accelerometer status */
status = tci_raw(dev, in, out);
- if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) {
+ if (ACPI_FAILURE(status)) {
pr_err("ACPI call to query the accelerometer failed\n");
return -EIO;
+ } else if (out[0] == TOS_NOT_SUPPORTED) {
+ return -ENODEV;
+ } else if (out[0] == TOS_SUCCESS) {
+ *xy = out[2];
+ *z = out[4];
+ return 0;
}
- *xy = out[2];
- *z = out[4];
-
- return 0;
+ return -EIO;
}
/* Sleep (Charge and Music) utilities support */
@@ -789,7 +764,6 @@ static void toshiba_usb_sleep_charge_available(struct toshiba_acpi_dev *dev)
u32 out[TCI_WORDS];
acpi_status status;
- /* Set the feature to "not supported" in case of error */
dev->usb_sleep_charge_supported = 0;
if (!sci_open(dev))
@@ -801,7 +775,6 @@ static void toshiba_usb_sleep_charge_available(struct toshiba_acpi_dev *dev)
sci_close(dev);
return;
} else if (out[0] == TOS_NOT_SUPPORTED) {
- pr_info("USB Sleep and Charge not supported\n");
sci_close(dev);
return;
} else if (out[0] == TOS_SUCCESS) {
@@ -810,25 +783,15 @@ static void toshiba_usb_sleep_charge_available(struct toshiba_acpi_dev *dev)
in[5] = SCI_USB_CHARGE_BAT_LVL;
status = tci_raw(dev, in, out);
+ sci_close(dev);
if (ACPI_FAILURE(status)) {
pr_err("ACPI call to get USB Sleep and Charge mode failed\n");
- sci_close(dev);
- return;
- } else if (out[0] == TOS_NOT_SUPPORTED) {
- pr_info("USB Sleep and Charge not supported\n");
- sci_close(dev);
- return;
} else if (out[0] == TOS_SUCCESS) {
dev->usbsc_bat_level = out[2];
- /*
- * If we reach this point, it means that the laptop has support
- * for this feature and all values are initialized.
- * Set it as supported.
- */
+ /* Flag as supported */
dev->usb_sleep_charge_supported = 1;
}
- sci_close(dev);
}
static int toshiba_usb_sleep_charge_get(struct toshiba_acpi_dev *dev,
@@ -841,17 +804,12 @@ static int toshiba_usb_sleep_charge_get(struct toshiba_acpi_dev *dev,
result = sci_read(dev, SCI_USB_SLEEP_CHARGE, mode);
sci_close(dev);
- if (result == TOS_FAILURE) {
+ if (result == TOS_FAILURE)
pr_err("ACPI call to set USB S&C mode failed\n");
- return -EIO;
- } else if (result == TOS_NOT_SUPPORTED) {
- pr_info("USB Sleep and Charge not supported\n");
+ else if (result == TOS_NOT_SUPPORTED)
return -ENODEV;
- } else if (result == TOS_INPUT_DATA_ERROR) {
- return -EIO;
- }
- return 0;
+ return result == TOS_SUCCESS ? 0 : -EIO;
}
static int toshiba_usb_sleep_charge_set(struct toshiba_acpi_dev *dev,
@@ -864,17 +822,12 @@ static int toshiba_usb_sleep_charge_set(struct toshiba_acpi_dev *dev,
result = sci_write(dev, SCI_USB_SLEEP_CHARGE, mode);
sci_close(dev);
- if (result == TOS_FAILURE) {
+ if (result == TOS_FAILURE)
pr_err("ACPI call to set USB S&C mode failed\n");
- return -EIO;
- } else if (result == TOS_NOT_SUPPORTED) {
- pr_info("USB Sleep and Charge not supported\n");
+ else if (result == TOS_NOT_SUPPORTED)
return -ENODEV;
- } else if (result == TOS_INPUT_DATA_ERROR) {
- return -EIO;
- }
- return 0;
+ return result == TOS_SUCCESS ? 0 : -EIO;
}
static int toshiba_sleep_functions_status_get(struct toshiba_acpi_dev *dev,
@@ -892,17 +845,14 @@ static int toshiba_sleep_functions_status_get(struct toshiba_acpi_dev *dev,
sci_close(dev);
if (ACPI_FAILURE(status)) {
pr_err("ACPI call to get USB S&C battery level failed\n");
- return -EIO;
} else if (out[0] == TOS_NOT_SUPPORTED) {
- pr_info("USB Sleep and Charge not supported\n");
return -ENODEV;
- } else if (out[0] == TOS_INPUT_DATA_ERROR) {
- return -EIO;
+ } else if (out[0] == TOS_SUCCESS) {
+ *mode = out[2];
+ return 0;
}
- *mode = out[2];
-
- return 0;
+ return -EIO;
}
static int toshiba_sleep_functions_status_set(struct toshiba_acpi_dev *dev,
@@ -919,17 +869,12 @@ static int toshiba_sleep_functions_status_set(struct toshiba_acpi_dev *dev,
in[5] = SCI_USB_CHARGE_BAT_LVL;
status = tci_raw(dev, in, out);
sci_close(dev);
- if (ACPI_FAILURE(status)) {
+ if (ACPI_FAILURE(status))
pr_err("ACPI call to set USB S&C battery level failed\n");
- return -EIO;
- } else if (out[0] == TOS_NOT_SUPPORTED) {
- pr_info("USB Sleep and Charge not supported\n");
+ else if (out[0] == TOS_NOT_SUPPORTED)
return -ENODEV;
- } else if (out[0] == TOS_INPUT_DATA_ERROR) {
- return -EIO;
- }
- return 0;
+ return out[0] == TOS_SUCCESS ? 0 : -EIO;
}
static int toshiba_usb_rapid_charge_get(struct toshiba_acpi_dev *dev,
@@ -947,16 +892,14 @@ static int toshiba_usb_rapid_charge_get(struct toshiba_acpi_dev *dev,
sci_close(dev);
if (ACPI_FAILURE(status)) {
pr_err("ACPI call to get USB Rapid Charge failed\n");
- return -EIO;
- } else if (out[0] == TOS_NOT_SUPPORTED ||
- out[0] == TOS_INPUT_DATA_ERROR) {
- pr_info("USB Rapid Charge not supported\n");
+ } else if (out[0] == TOS_NOT_SUPPORTED) {
return -ENODEV;
+ } else if (out[0] == TOS_SUCCESS || out[0] == TOS_SUCCESS2) {
+ *state = out[2];
+ return 0;
}
- *state = out[2];
-
- return 0;
+ return -EIO;
}
static int toshiba_usb_rapid_charge_set(struct toshiba_acpi_dev *dev,
@@ -973,17 +916,12 @@ static int toshiba_usb_rapid_charge_set(struct toshiba_acpi_dev *dev,
in[5] = SCI_USB_CHARGE_RAPID_DSP;
status = tci_raw(dev, in, out);
sci_close(dev);
- if (ACPI_FAILURE(status)) {
+ if (ACPI_FAILURE(status))
pr_err("ACPI call to set USB Rapid Charge failed\n");
- return -EIO;
- } else if (out[0] == TOS_NOT_SUPPORTED) {
- pr_info("USB Rapid Charge not supported\n");
+ else if (out[0] == TOS_NOT_SUPPORTED)
return -ENODEV;
- } else if (out[0] == TOS_INPUT_DATA_ERROR) {
- return -EIO;
- }
- return 0;
+ return (out[0] == TOS_SUCCESS || out[0] == TOS_SUCCESS2) ? 0 : -EIO;
}
static int toshiba_usb_sleep_music_get(struct toshiba_acpi_dev *dev, u32 *state)
@@ -995,17 +933,12 @@ static int toshiba_usb_sleep_music_get(struct toshiba_acpi_dev *dev, u32 *state)
result = sci_read(dev, SCI_USB_SLEEP_MUSIC, state);
sci_close(dev);
- if (result == TOS_FAILURE) {
+ if (result == TOS_FAILURE)
pr_err("ACPI call to get Sleep and Music failed\n");
- return -EIO;
- } else if (result == TOS_NOT_SUPPORTED) {
- pr_info("Sleep and Music not supported\n");
+ else if (result == TOS_NOT_SUPPORTED)
return -ENODEV;
- } else if (result == TOS_INPUT_DATA_ERROR) {
- return -EIO;
- }
- return 0;
+ return result = TOS_SUCCESS ? 0 : -EIO;
}
static int toshiba_usb_sleep_music_set(struct toshiba_acpi_dev *dev, u32 state)
@@ -1017,17 +950,12 @@ static int toshiba_usb_sleep_music_set(struct toshiba_acpi_dev *dev, u32 state)
result = sci_write(dev, SCI_USB_SLEEP_MUSIC, state);
sci_close(dev);
- if (result == TOS_FAILURE) {
+ if (result == TOS_FAILURE)
pr_err("ACPI call to set Sleep and Music failed\n");
- return -EIO;
- } else if (result == TOS_NOT_SUPPORTED) {
- pr_info("Sleep and Music not supported\n");
+ else if (result == TOS_NOT_SUPPORTED)
return -ENODEV;
- } else if (result == TOS_INPUT_DATA_ERROR) {
- return -EIO;
- }
- return 0;
+ return result == TOS_SUCCESS ? 0 : -EIO;
}
/* Keyboard function keys */
@@ -1040,15 +968,12 @@ static int toshiba_function_keys_get(struct toshiba_acpi_dev *dev, u32 *mode)
result = sci_read(dev, SCI_KBD_FUNCTION_KEYS, mode);
sci_close(dev);
- if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
+ if (result == TOS_FAILURE)
pr_err("ACPI call to get KBD function keys failed\n");
- return -EIO;
- } else if (result == TOS_NOT_SUPPORTED) {
- pr_info("KBD function keys not supported\n");
+ else if (result == TOS_NOT_SUPPORTED)
return -ENODEV;
- }
- return 0;
+ return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
}
static int toshiba_function_keys_set(struct toshiba_acpi_dev *dev, u32 mode)
@@ -1060,15 +985,12 @@ static int toshiba_function_keys_set(struct toshiba_acpi_dev *dev, u32 mode)
result = sci_write(dev, SCI_KBD_FUNCTION_KEYS, mode);
sci_close(dev);
- if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
+ if (result == TOS_FAILURE)
pr_err("ACPI call to set KBD function keys failed\n");
- return -EIO;
- } else if (result == TOS_NOT_SUPPORTED) {
- pr_info("KBD function keys not supported\n");
+ else if (result == TOS_NOT_SUPPORTED)
return -ENODEV;
- }
- return 0;
+ return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
}
/* Panel Power ON */
@@ -1081,17 +1003,12 @@ static int toshiba_panel_power_on_get(struct toshiba_acpi_dev *dev, u32 *state)
result = sci_read(dev, SCI_PANEL_POWER_ON, state);
sci_close(dev);
- if (result == TOS_FAILURE) {
+ if (result == TOS_FAILURE)
pr_err("ACPI call to get Panel Power ON failed\n");
- return -EIO;
- } else if (result == TOS_NOT_SUPPORTED) {
- pr_info("Panel Power on not supported\n");
+ else if (result == TOS_NOT_SUPPORTED)
return -ENODEV;
- } else if (result == TOS_INPUT_DATA_ERROR) {
- return -EIO;
- }
- return 0;
+ return result == TOS_SUCCESS ? 0 : -EIO;
}
static int toshiba_panel_power_on_set(struct toshiba_acpi_dev *dev, u32 state)
@@ -1103,17 +1020,12 @@ static int toshiba_panel_power_on_set(struct toshiba_acpi_dev *dev, u32 state)
result = sci_write(dev, SCI_PANEL_POWER_ON, state);
sci_close(dev);
- if (result == TOS_FAILURE) {
+ if (result == TOS_FAILURE)
pr_err("ACPI call to set Panel Power ON failed\n");
- return -EIO;
- } else if (result == TOS_NOT_SUPPORTED) {
- pr_info("Panel Power ON not supported\n");
+ else if (result == TOS_NOT_SUPPORTED)
return -ENODEV;
- } else if (result == TOS_INPUT_DATA_ERROR) {
- return -EIO;
- }
- return 0;
+ return result == TOS_SUCCESS ? 0 : -EIO;
}
/* USB Three */
@@ -1126,17 +1038,12 @@ static int toshiba_usb_three_get(struct toshiba_acpi_dev *dev, u32 *state)
result = sci_read(dev, SCI_USB_THREE, state);
sci_close(dev);
- if (result == TOS_FAILURE) {
+ if (result == TOS_FAILURE)
pr_err("ACPI call to get USB 3 failed\n");
- return -EIO;
- } else if (result == TOS_NOT_SUPPORTED) {
- pr_info("USB 3 not supported\n");
+ else if (result == TOS_NOT_SUPPORTED)
return -ENODEV;
- } else if (result == TOS_INPUT_DATA_ERROR) {
- return -EIO;
- }
- return 0;
+ return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
}
static int toshiba_usb_three_set(struct toshiba_acpi_dev *dev, u32 state)
@@ -1148,17 +1055,12 @@ static int toshiba_usb_three_set(struct toshiba_acpi_dev *dev, u32 state)
result = sci_write(dev, SCI_USB_THREE, state);
sci_close(dev);
- if (result == TOS_FAILURE) {
+ if (result == TOS_FAILURE)
pr_err("ACPI call to set USB 3 failed\n");
- return -EIO;
- } else if (result == TOS_NOT_SUPPORTED) {
- pr_info("USB 3 not supported\n");
+ else if (result == TOS_NOT_SUPPORTED)
return -ENODEV;
- } else if (result == TOS_INPUT_DATA_ERROR) {
- return -EIO;
- }
- return 0;
+ return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
}
/* Hotkey Event type */
@@ -1172,35 +1074,39 @@ static int toshiba_hotkey_event_type_get(struct toshiba_acpi_dev *dev,
status = tci_raw(dev, in, out);
if (ACPI_FAILURE(status)) {
pr_err("ACPI call to get System type failed\n");
- return -EIO;
} else if (out[0] == TOS_NOT_SUPPORTED) {
- pr_info("System type not supported\n");
return -ENODEV;
+ } else if (out[0] == TOS_SUCCESS) {
+ *type = out[3];
+ return 0;
}
- *type = out[3];
-
- return 0;
+ return -EIO;
}
/* Transflective Backlight */
-static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, bool *enabled)
+static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, u32 *status)
{
- u32 hci_result;
- u32 status;
+ u32 result = hci_read(dev, HCI_TR_BACKLIGHT, status);
+
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to get Transflective Backlight failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
- hci_result = hci_read(dev, HCI_TR_BACKLIGHT, &status);
- *enabled = !status;
- return hci_result == TOS_SUCCESS ? 0 : -EIO;
+ return result == TOS_SUCCESS ? 0 : -EIO;
}
-static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable)
+static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, u32 status)
{
- u32 hci_result;
- u32 value = !enable;
+ u32 result = hci_write(dev, HCI_TR_BACKLIGHT, !status);
+
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to set Transflective Backlight failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
- hci_result = hci_write(dev, HCI_TR_BACKLIGHT, value);
- return hci_result == TOS_SUCCESS ? 0 : -EIO;
+ return result == TOS_SUCCESS ? 0 : -EIO;
}
static struct proc_dir_entry *toshiba_proc_dir;
@@ -1208,23 +1114,26 @@ static struct proc_dir_entry *toshiba_proc_dir;
/* LCD Brightness */
static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
{
- u32 hci_result;
+ u32 result;
u32 value;
int brightness = 0;
if (dev->tr_backlight_supported) {
- bool enabled;
- int ret = get_tr_backlight_status(dev, &enabled);
+ int ret = get_tr_backlight_status(dev, &value);
if (ret)
return ret;
- if (enabled)
+ if (value)
return 0;
brightness++;
}
- hci_result = hci_read(dev, HCI_LCD_BRIGHTNESS, &value);
- if (hci_result == TOS_SUCCESS)
+ result = hci_read(dev, HCI_LCD_BRIGHTNESS, &value);
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to get LCD Brightness failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+ if (result == TOS_SUCCESS)
return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT);
return -EIO;
@@ -1240,8 +1149,8 @@ static int get_lcd_brightness(struct backlight_device *bd)
static int lcd_proc_show(struct seq_file *m, void *v)
{
struct toshiba_acpi_dev *dev = m->private;
- int value;
int levels;
+ int value;
if (!dev->backlight_dev)
return -ENODEV;
@@ -1255,6 +1164,7 @@ static int lcd_proc_show(struct seq_file *m, void *v)
}
pr_err("Error reading LCD brightness\n");
+
return -EIO;
}
@@ -1265,11 +1175,10 @@ static int lcd_proc_open(struct inode *inode, struct file *file)
static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
{
- u32 hci_result;
+ u32 result;
if (dev->tr_backlight_supported) {
- bool enable = !value;
- int ret = set_tr_backlight_status(dev, enable);
+ int ret = set_tr_backlight_status(dev, !value);
if (ret)
return ret;
@@ -1278,8 +1187,13 @@ static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
}
value = value << HCI_LCD_BRIGHTNESS_SHIFT;
- hci_result = hci_write(dev, HCI_LCD_BRIGHTNESS, value);
- return hci_result == TOS_SUCCESS ? 0 : -EIO;
+ result = hci_write(dev, HCI_LCD_BRIGHTNESS, value);
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to set LCD Brightness failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return result == TOS_SUCCESS ? 0 : -EIO;
}
static int set_lcd_status(struct backlight_device *bd)
@@ -1295,24 +1209,22 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
char cmd[42];
size_t len;
- int value;
- int ret;
int levels = dev->backlight_dev->props.max_brightness + 1;
+ int value;
len = min(count, sizeof(cmd) - 1);
if (copy_from_user(cmd, buf, len))
return -EFAULT;
cmd[len] = '\0';
- if (sscanf(cmd, " brightness : %i", &value) == 1 &&
- value >= 0 && value < levels) {
- ret = set_lcd_brightness(dev, value);
- if (ret == 0)
- ret = count;
- } else {
- ret = -EINVAL;
- }
- return ret;
+ if (sscanf(cmd, " brightness : %i", &value) != 1 &&
+ value < 0 && value > levels)
+ return -EINVAL;
+
+ if (set_lcd_brightness(dev, value))
+ return -EIO;
+
+ return count;
}
static const struct file_operations lcd_proc_fops = {
@@ -1324,22 +1236,25 @@ static const struct file_operations lcd_proc_fops = {
.write = lcd_proc_write,
};
+/* Video-Out */
static int get_video_status(struct toshiba_acpi_dev *dev, u32 *status)
{
- u32 hci_result;
+ u32 result = hci_read(dev, HCI_VIDEO_OUT, status);
+
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to get Video-Out failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
- hci_result = hci_read(dev, HCI_VIDEO_OUT, status);
- return hci_result == TOS_SUCCESS ? 0 : -EIO;
+ return result == TOS_SUCCESS ? 0 : -EIO;
}
static int video_proc_show(struct seq_file *m, void *v)
{
struct toshiba_acpi_dev *dev = m->private;
u32 value;
- int ret;
- ret = get_video_status(dev, &value);
- if (!ret) {
+ if (!get_video_status(dev, &value)) {
int is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0;
int is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0;
int is_tv = (value & HCI_VIDEO_OUT_TV) ? 1 : 0;
@@ -1347,9 +1262,10 @@ static int video_proc_show(struct seq_file *m, void *v)
seq_printf(m, "lcd_out: %d\n", is_lcd);
seq_printf(m, "crt_out: %d\n", is_crt);
seq_printf(m, "tv_out: %d\n", is_tv);
+ return 0;
}
- return ret;
+ return -EIO;
}
static int video_proc_open(struct inode *inode, struct file *file)
@@ -1361,13 +1277,14 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
- char *cmd, *buffer;
- int ret;
- int value;
+ char *buffer;
+ char *cmd;
int remain = count;
int lcd_out = -1;
int crt_out = -1;
int tv_out = -1;
+ int value;
+ int ret;
u32 video_out;
cmd = kmalloc(count + 1, GFP_KERNEL);
@@ -1419,7 +1336,7 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
ret = write_acpi_int(METHOD_VIDEO_OUT, new_video_out);
}
- return ret ? ret : count;
+ return ret ? -EIO : count;
}
static const struct file_operations video_proc_fops = {
@@ -1431,27 +1348,43 @@ static const struct file_operations video_proc_fops = {
.write = video_proc_write,
};
+/* Fan status */
static int get_fan_status(struct toshiba_acpi_dev *dev, u32 *status)
{
- u32 hci_result;
+ u32 result = hci_read(dev, HCI_FAN, status);
+
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to get Fan status failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
- hci_result = hci_read(dev, HCI_FAN, status);
- return hci_result == TOS_SUCCESS ? 0 : -EIO;
+ return result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static int set_fan_status(struct toshiba_acpi_dev *dev, u32 status)
+{
+ u32 result = hci_write(dev, HCI_FAN, status);
+
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to set Fan status failed\n");
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return result == TOS_SUCCESS ? 0 : -EIO;
}
static int fan_proc_show(struct seq_file *m, void *v)
{
struct toshiba_acpi_dev *dev = m->private;
- int ret;
u32 value;
- ret = get_fan_status(dev, &value);
- if (!ret) {
- seq_printf(m, "running: %d\n", (value > 0));
- seq_printf(m, "force_on: %d\n", dev->force_fan);
- }
+ if (get_fan_status(dev, &value))
+ return -EIO;
- return ret;
+ seq_printf(m, "running: %d\n", (value > 0));
+ seq_printf(m, "force_on: %d\n", dev->force_fan);
+
+ return 0;
}
static int fan_proc_open(struct inode *inode, struct file *file)
@@ -1466,23 +1399,20 @@ static ssize_t fan_proc_write(struct file *file, const char __user *buf,
char cmd[42];
size_t len;
int value;
- u32 hci_result;
len = min(count, sizeof(cmd) - 1);
if (copy_from_user(cmd, buf, len))
return -EFAULT;
cmd[len] = '\0';
- if (sscanf(cmd, " force_on : %i", &value) == 1 &&
- value >= 0 && value <= 1) {
- hci_result = hci_write(dev, HCI_FAN, value);
- if (hci_result == TOS_SUCCESS)
- dev->force_fan = value;
- else
- return -EIO;
- } else {
+ if (sscanf(cmd, " force_on : %i", &value) != 1 &&
+ value != 0 && value != 1)
return -EINVAL;
- }
+
+ if (set_fan_status(dev, value))
+ return -EIO;
+
+ dev->force_fan = value;
return count;
}
@@ -1499,32 +1429,10 @@ static const struct file_operations fan_proc_fops = {
static int keys_proc_show(struct seq_file *m, void *v)
{
struct toshiba_acpi_dev *dev = m->private;
- u32 hci_result;
- u32 value;
-
- if (!dev->key_event_valid && dev->system_event_supported) {
- hci_result = hci_read(dev, HCI_SYSTEM_EVENT, &value);
- if (hci_result == TOS_SUCCESS) {
- dev->key_event_valid = 1;
- dev->last_key_event = value;
- } else if (hci_result == TOS_FIFO_EMPTY) {
- /* Better luck next time */
- } else if (hci_result == TOS_NOT_SUPPORTED) {
- /*
- * This is a workaround for an unresolved issue on
- * some machines where system events sporadically
- * become disabled.
- */
- hci_result = hci_write(dev, HCI_SYSTEM_EVENT, 1);
- pr_notice("Re-enabled hotkeys\n");
- } else {
- pr_err("Error reading hotkey status\n");
- return -EIO;
- }
- }
seq_printf(m, "hotkey_ready: %d\n", dev->key_event_valid);
seq_printf(m, "hotkey: 0x%04x\n", dev->last_key_event);
+
return 0;
}
@@ -1641,7 +1549,6 @@ static ssize_t fan_store(struct device *dev,
const char *buf, size_t count)
{
struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
- u32 result;
int state;
int ret;
@@ -1652,11 +1559,9 @@ static ssize_t fan_store(struct device *dev,
if (state != 0 && state != 1)
return -EINVAL;
- result = hci_write(toshiba, HCI_FAN, state);
- if (result == TOS_FAILURE)
- return -EIO;
- else if (result == TOS_NOT_SUPPORTED)
- return -ENODEV;
+ ret = set_fan_status(toshiba, state);
+ if (ret)
+ return ret;
return count;
}
@@ -1682,7 +1587,6 @@ static ssize_t kbd_backlight_mode_store(struct device *dev,
{
struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
int mode;
- int time;
int ret;
@@ -1713,7 +1617,7 @@ static ssize_t kbd_backlight_mode_store(struct device *dev,
/* Only make a change if the actual mode has changed */
if (toshiba->kbd_mode != mode) {
/* Shift the time to "base time" (0x3c0000 == 60 seconds) */
- time = toshiba->kbd_time << HCI_MISC_SHIFT;
+ int time = toshiba->kbd_time << HCI_MISC_SHIFT;
/* OR the "base time" to the actual method format */
if (toshiba->kbd_type == 1) {
@@ -2262,6 +2166,81 @@ static struct attribute_group toshiba_attr_group = {
};
/*
+ * Misc device
+ */
+static int toshiba_acpi_smm_bridge(SMMRegisters *regs)
+{
+ u32 in[TCI_WORDS] = { regs->eax, regs->ebx, regs->ecx,
+ regs->edx, regs->esi, regs->edi };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ status = tci_raw(toshiba_acpi, in, out);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to query SMM registers failed\n");
+ return -EIO;
+ }
+
+ /* Fillout the SMM struct with the TCI call results */
+ regs->eax = out[0];
+ regs->ebx = out[1];
+ regs->ecx = out[2];
+ regs->edx = out[3];
+ regs->esi = out[4];
+ regs->edi = out[5];
+
+ return 0;
+}
+
+static long toshiba_acpi_ioctl(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+ SMMRegisters __user *argp = (SMMRegisters __user *)arg;
+ SMMRegisters regs;
+ int ret;
+
+ if (!argp)
+ return -EINVAL;
+
+ switch (cmd) {
+ case TOSH_SMM:
+ if (copy_from_user(&regs, argp, sizeof(SMMRegisters)))
+ return -EFAULT;
+ ret = toshiba_acpi_smm_bridge(&regs);
+ if (ret)
+ return ret;
+ if (copy_to_user(argp, &regs, sizeof(SMMRegisters)))
+ return -EFAULT;
+ break;
+ case TOSHIBA_ACPI_SCI:
+ if (copy_from_user(&regs, argp, sizeof(SMMRegisters)))
+ return -EFAULT;
+ /* Ensure we are being called with a SCI_{GET, SET} register */
+ if (regs.eax != SCI_GET && regs.eax != SCI_SET)
+ return -EINVAL;
+ if (!sci_open(toshiba_acpi))
+ return -EIO;
+ ret = toshiba_acpi_smm_bridge(&regs);
+ sci_close(toshiba_acpi);
+ if (ret)
+ return ret;
+ if (copy_to_user(argp, &regs, sizeof(SMMRegisters)))
+ return -EFAULT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct file_operations toshiba_acpi_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = toshiba_acpi_ioctl,
+ .llseek = noop_llseek,
+};
+
+/*
* Hotkeys
*/
static int toshiba_acpi_enable_hotkeys(struct toshiba_acpi_dev *dev)
@@ -2361,22 +2340,28 @@ static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
static void toshiba_acpi_process_hotkeys(struct toshiba_acpi_dev *dev)
{
- u32 hci_result, value;
- int retries = 3;
- int scancode;
-
if (dev->info_supported) {
- scancode = toshiba_acpi_query_hotkey(dev);
- if (scancode < 0)
+ int scancode = toshiba_acpi_query_hotkey(dev);
+
+ if (scancode < 0) {
pr_err("Failed to query hotkey event\n");
- else if (scancode != 0)
+ } else if (scancode != 0) {
toshiba_acpi_report_hotkey(dev, scancode);
+ dev->key_event_valid = 1;
+ dev->last_key_event = scancode;
+ }
} else if (dev->system_event_supported) {
+ u32 result;
+ u32 value;
+ int retries = 3;
+
do {
- hci_result = hci_read(dev, HCI_SYSTEM_EVENT, &value);
- switch (hci_result) {
+ result = hci_read(dev, HCI_SYSTEM_EVENT, &value);
+ switch (result) {
case TOS_SUCCESS:
toshiba_acpi_report_hotkey(dev, (int)value);
+ dev->key_event_valid = 1;
+ dev->last_key_event = value;
break;
case TOS_NOT_SUPPORTED:
/*
@@ -2384,15 +2369,15 @@ static void toshiba_acpi_process_hotkeys(struct toshiba_acpi_dev *dev)
* issue on some machines where system events
* sporadically become disabled.
*/
- hci_result =
- hci_write(dev, HCI_SYSTEM_EVENT, 1);
- pr_notice("Re-enabled hotkeys\n");
+ result = hci_write(dev, HCI_SYSTEM_EVENT, 1);
+ if (result == TOS_SUCCESS)
+ pr_notice("Re-enabled hotkeys\n");
/* Fall through */
default:
retries--;
break;
}
- } while (retries && hci_result != TOS_FIFO_EMPTY);
+ } while (retries && result != TOS_FIFO_EMPTY);
}
}
@@ -2404,6 +2389,11 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
u32 hci_result;
int error;
+ if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID)) {
+ pr_info("WMI event detected, hotkeys will not be monitored\n");
+ return 0;
+ }
+
error = toshiba_acpi_enable_hotkeys(dev);
if (error)
return error;
@@ -2496,7 +2486,6 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
struct backlight_properties props;
int brightness;
int ret;
- bool enabled;
/*
* Some machines don't support the backlight methods at all, and
@@ -2513,10 +2502,6 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
return 0;
}
- /* Determine whether or not BIOS supports transflective backlight */
- ret = get_tr_backlight_status(dev, &enabled);
- dev->tr_backlight_supported = !ret;
-
/*
* Tell acpi-video-detect code to prefer vendor backlight on all
* systems with transflective backlight and on dmi matched systems.
@@ -2552,10 +2537,52 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
return 0;
}
+static void print_supported_features(struct toshiba_acpi_dev *dev)
+{
+ pr_info("Supported laptop features:");
+
+ if (dev->hotkey_dev)
+ pr_cont(" hotkeys");
+ if (dev->backlight_dev)
+ pr_cont(" backlight");
+ if (dev->video_supported)
+ pr_cont(" video-out");
+ if (dev->fan_supported)
+ pr_cont(" fan");
+ if (dev->tr_backlight_supported)
+ pr_cont(" transflective-backlight");
+ if (dev->illumination_supported)
+ pr_cont(" illumination");
+ if (dev->kbd_illum_supported)
+ pr_cont(" keyboard-backlight");
+ if (dev->touchpad_supported)
+ pr_cont(" touchpad");
+ if (dev->eco_supported)
+ pr_cont(" eco-led");
+ if (dev->accelerometer_supported)
+ pr_cont(" accelerometer-axes");
+ if (dev->usb_sleep_charge_supported)
+ pr_cont(" usb-sleep-charge");
+ if (dev->usb_rapid_charge_supported)
+ pr_cont(" usb-rapid-charge");
+ if (dev->usb_sleep_music_supported)
+ pr_cont(" usb-sleep-music");
+ if (dev->kbd_function_keys_supported)
+ pr_cont(" special-function-keys");
+ if (dev->panel_power_on_supported)
+ pr_cont(" panel-power-on");
+ if (dev->usb_three_supported)
+ pr_cont(" usb3");
+
+ pr_cont("\n");
+}
+
static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
+ misc_deregister(&dev->miscdev);
+
remove_toshiba_proc_entries(dev);
if (dev->sysfs_created)
@@ -2574,13 +2601,13 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
backlight_device_unregister(dev->backlight_dev);
- if (dev->illumination_supported)
+ if (dev->illumination_led_registered)
led_classdev_unregister(&dev->led_dev);
if (dev->kbd_led_registered)
led_classdev_unregister(&dev->kbd_led);
- if (dev->eco_supported)
+ if (dev->eco_led_registered)
led_classdev_unregister(&dev->eco_led);
if (toshiba_acpi)
@@ -2627,6 +2654,17 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
return -ENOMEM;
dev->acpi_dev = acpi_dev;
dev->method_hci = hci_method;
+ dev->miscdev.minor = MISC_DYNAMIC_MINOR;
+ dev->miscdev.name = "toshiba_acpi";
+ dev->miscdev.fops = &toshiba_acpi_fops;
+
+ ret = misc_register(&dev->miscdev);
+ if (ret) {
+ pr_err("Failed to register miscdevice\n");
+ kfree(dev);
+ return ret;
+ }
+
acpi_dev->driver_data = dev;
dev_set_drvdata(&acpi_dev->dev, dev);
@@ -2643,29 +2681,35 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
if (toshiba_acpi_setup_keyboard(dev))
pr_info("Unable to activate hotkeys\n");
+ /* Determine whether or not BIOS supports transflective backlight */
+ ret = get_tr_backlight_status(dev, &dummy);
+ dev->tr_backlight_supported = !ret;
+
ret = toshiba_acpi_setup_backlight(dev);
if (ret)
goto error;
- if (toshiba_illumination_available(dev)) {
+ toshiba_illumination_available(dev);
+ if (dev->illumination_supported) {
dev->led_dev.name = "toshiba::illumination";
dev->led_dev.max_brightness = 1;
dev->led_dev.brightness_set = toshiba_illumination_set;
dev->led_dev.brightness_get = toshiba_illumination_get;
if (!led_classdev_register(&acpi_dev->dev, &dev->led_dev))
- dev->illumination_supported = 1;
+ dev->illumination_led_registered = true;
}
- if (toshiba_eco_mode_available(dev)) {
+ toshiba_eco_mode_available(dev);
+ if (dev->eco_supported) {
dev->eco_led.name = "toshiba::eco_mode";
dev->eco_led.max_brightness = 1;
dev->eco_led.brightness_set = toshiba_eco_mode_set_status;
dev->eco_led.brightness_get = toshiba_eco_mode_get_status;
if (!led_classdev_register(&dev->acpi_dev->dev, &dev->eco_led))
- dev->eco_supported = 1;
+ dev->eco_led_registered = true;
}
- dev->kbd_illum_supported = toshiba_kbd_illum_available(dev);
+ toshiba_kbd_illum_available(dev);
/*
* Only register the LED if KBD illumination is supported
* and the keyboard backlight operation mode is set to FN-Z
@@ -2676,14 +2720,13 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
dev->kbd_led.brightness_set = toshiba_kbd_backlight_set;
dev->kbd_led.brightness_get = toshiba_kbd_backlight_get;
if (!led_classdev_register(&dev->acpi_dev->dev, &dev->kbd_led))
- dev->kbd_led_registered = 1;
+ dev->kbd_led_registered = true;
}
ret = toshiba_touchpad_get(dev, &dummy);
dev->touchpad_supported = !ret;
- ret = toshiba_accelerometer_supported(dev);
- dev->accelerometer_supported = !ret;
+ toshiba_accelerometer_available(dev);
toshiba_usb_sleep_charge_available(dev);
@@ -2705,6 +2748,8 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
ret = get_fan_status(dev, &dummy);
dev->fan_supported = !ret;
+ print_supported_features(dev);
+
/*
* Enable the "Special Functions" mode only if they are
* supported and if they are activated.
@@ -2738,6 +2783,14 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
switch (event) {
case 0x80: /* Hotkeys and some system events */
+ /*
+ * Machines with this WMI GUID aren't supported due to bugs in
+ * their AML.
+ *
+ * Return silently to avoid triggering a netlink event.
+ */
+ if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID))
+ return;
toshiba_acpi_process_hotkeys(dev);
break;
case 0x81: /* Dock events */
@@ -2781,10 +2834,14 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
static int toshiba_acpi_suspend(struct device *device)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
- u32 result;
- if (dev->hotkey_dev)
+ if (dev->hotkey_dev) {
+ u32 result;
+
result = hci_write(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_DISABLE);
+ if (result != TOS_SUCCESS)
+ pr_info("Unable to disable hotkeys\n");
+ }
return 0;
}
@@ -2792,10 +2849,10 @@ static int toshiba_acpi_suspend(struct device *device)
static int toshiba_acpi_resume(struct device *device)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
- int error;
if (dev->hotkey_dev) {
- error = toshiba_acpi_enable_hotkeys(dev);
+ int error = toshiba_acpi_enable_hotkeys(dev);
+
if (error)
pr_info("Unable to re-enable hotkeys\n");
}
@@ -2824,14 +2881,6 @@ static int __init toshiba_acpi_init(void)
{
int ret;
- /*
- * Machines with this WMI guid aren't supported due to bugs in
- * their AML. This check relies on wmi initializing before
- * toshiba_acpi to guarantee guids have been identified.
- */
- if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID))
- return -ENODEV;
-
toshiba_proc_dir = proc_mkdir(PROC_TOSHIBA, acpi_root_dir);
if (!toshiba_proc_dir) {
pr_err("Unable to create proc dir " PROC_TOSHIBA "\n");
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c
index 9357aa779048..7ad3295752ef 100644
--- a/drivers/pnp/manager.c
+++ b/drivers/pnp/manager.c
@@ -97,8 +97,6 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
/* ??? rule->flags restricted to 8 bits, all tests bogus ??? */
if (!(rule->flags & IORESOURCE_MEM_WRITEABLE))
res->flags |= IORESOURCE_READONLY;
- if (rule->flags & IORESOURCE_MEM_CACHEABLE)
- res->flags |= IORESOURCE_CACHEABLE;
if (rule->flags & IORESOURCE_MEM_RANGELENGTH)
res->flags |= IORESOURCE_RANGELENGTH;
if (rule->flags & IORESOURCE_MEM_SHADOWABLE)
diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
index 515f33882ab8..49c1720df59a 100644
--- a/drivers/pnp/system.c
+++ b/drivers/pnp/system.c
@@ -7,7 +7,6 @@
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*/
-#include <linux/acpi.h>
#include <linux/pnp.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -23,41 +22,25 @@ static const struct pnp_device_id pnp_dev_table[] = {
{"", 0}
};
-#ifdef CONFIG_ACPI
-static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
-{
- u8 space_id = io ? ACPI_ADR_SPACE_SYSTEM_IO : ACPI_ADR_SPACE_SYSTEM_MEMORY;
- return !acpi_reserve_region(start, length, space_id, IORESOURCE_BUSY, desc);
-}
-#else
-static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
-{
- struct resource *res;
-
- res = io ? request_region(start, length, desc) :
- request_mem_region(start, length, desc);
- if (res) {
- res->flags &= ~IORESOURCE_BUSY;
- return true;
- }
- return false;
-}
-#endif
-
static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
{
char *regionid;
const char *pnpid = dev_name(&dev->dev);
resource_size_t start = r->start, end = r->end;
- bool reserved;
+ struct resource *res;
regionid = kmalloc(16, GFP_KERNEL);
if (!regionid)
return;
snprintf(regionid, 16, "pnp %s", pnpid);
- reserved = __reserve_range(start, end - start + 1, !!port, regionid);
- if (!reserved)
+ if (port)
+ res = request_region(start, end - start + 1, regionid);
+ else
+ res = request_mem_region(start, end - start + 1, regionid);
+ if (res)
+ res->flags &= ~IORESOURCE_BUSY;
+ else
kfree(regionid);
/*
@@ -66,7 +49,7 @@ static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
* have double reservations.
*/
dev_info(&dev->dev, "%pR %s reserved\n", r,
- reserved ? "has been" : "could not be");
+ res ? "has been" : "could not be");
}
static void reserve_resources_of_dev(struct pnp_dev *dev)
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 08beeed5485d..f8758d6febf8 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -333,7 +333,7 @@ config CHARGER_LP8788
config CHARGER_GPIO
tristate "GPIO charger"
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
help
Say Y to include support for chargers which report their online status
through a GPIO pin.
@@ -391,26 +391,30 @@ config CHARGER_BQ2415X
config CHARGER_BQ24190
tristate "TI BQ24190 battery charger driver"
- depends on I2C && GPIOLIB
+ depends on I2C
+ depends on GPIOLIB || COMPILE_TEST
help
Say Y to enable support for the TI BQ24190 battery charger.
config CHARGER_BQ24257
tristate "TI BQ24257 battery charger driver"
- depends on I2C && GPIOLIB
+ depends on I2C
+ depends on GPIOLIB || COMPILE_TEST
depends on REGMAP_I2C
help
Say Y to enable support for the TI BQ24257 battery charger.
config CHARGER_BQ24735
tristate "TI BQ24735 battery charger support"
- depends on I2C && GPIOLIB
+ depends on I2C
+ depends on GPIOLIB || COMPILE_TEST
help
Say Y to enable support for the TI BQ24735 battery charger.
config CHARGER_BQ25890
tristate "TI BQ25890 battery charger driver"
- depends on I2C && GPIOLIB
+ depends on I2C
+ depends on GPIOLIB || COMPILE_TEST
select REGMAP_I2C
help
Say Y to enable support for the TI BQ25890 battery charger.
@@ -462,7 +466,8 @@ config BATTERY_RT5033
config CHARGER_RT9455
tristate "Richtek RT9455 battery charger driver"
- depends on I2C && GPIOLIB
+ depends on I2C
+ depends on GPIOLIB || COMPILE_TEST
select REGMAP_I2C
help
Say Y to enable support for Richtek RT9455 battery charger.
diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
index 7f3d389bd601..a67eeace6a89 100644
--- a/drivers/power/avs/Kconfig
+++ b/drivers/power/avs/Kconfig
@@ -13,7 +13,7 @@ menuconfig POWER_AVS
config ROCKCHIP_IODOMAIN
tristate "Rockchip IO domain support"
- depends on ARCH_ROCKCHIP && OF
+ depends on POWER_AVS && ARCH_ROCKCHIP && OF
help
Say y here to enable support io domains on Rockchip SoCs. It is
necessary for the io domain setting of the SoC to match the
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
index 3ae35d0590d2..2e300028f0f7 100644
--- a/drivers/power/avs/rockchip-io-domain.c
+++ b/drivers/power/avs/rockchip-io-domain.c
@@ -43,6 +43,10 @@
#define RK3288_SOC_CON2_FLASH0 BIT(7)
#define RK3288_SOC_FLASH_SUPPLY_NUM 2
+#define RK3368_SOC_CON15 0x43c
+#define RK3368_SOC_CON15_FLASH0 BIT(14)
+#define RK3368_SOC_FLASH_SUPPLY_NUM 2
+
struct rockchip_iodomain;
/**
@@ -158,6 +162,25 @@ static void rk3288_iodomain_init(struct rockchip_iodomain *iod)
dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
}
+static void rk3368_iodomain_init(struct rockchip_iodomain *iod)
+{
+ int ret;
+ u32 val;
+
+ /* if no flash supply we should leave things alone */
+ if (!iod->supplies[RK3368_SOC_FLASH_SUPPLY_NUM].reg)
+ return;
+
+ /*
+ * set flash0 iodomain to also use this framework
+ * instead of a special gpio.
+ */
+ val = RK3368_SOC_CON15_FLASH0 | (RK3368_SOC_CON15_FLASH0 << 16);
+ ret = regmap_write(iod->grf, RK3368_SOC_CON15, val);
+ if (ret < 0)
+ dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
+}
+
/*
* On the rk3188 the io-domains are handled by a shared register with the
* lower 8 bits being still being continuing drive-strength settings.
@@ -201,6 +224,34 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3288 = {
.init = rk3288_iodomain_init,
};
+static const struct rockchip_iodomain_soc_data soc_data_rk3368 = {
+ .grf_offset = 0x900,
+ .supply_names = {
+ NULL, /* reserved */
+ "dvp", /* DVPIO_VDD */
+ "flash0", /* FLASH0_VDD (emmc) */
+ "wifi", /* APIO2_VDD (sdio0) */
+ NULL,
+ "audio", /* APIO3_VDD */
+ "sdcard", /* SDMMC0_VDD (sdmmc) */
+ "gpio30", /* APIO1_VDD */
+ "gpio1830", /* APIO4_VDD (gpujtag) */
+ },
+ .init = rk3368_iodomain_init,
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3368_pmu = {
+ .grf_offset = 0x100,
+ .supply_names = {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "pmu", /*PMU IO domain*/
+ "vop", /*LCDC IO domain*/
+ },
+};
+
static const struct of_device_id rockchip_iodomain_match[] = {
{
.compatible = "rockchip,rk3188-io-voltage-domain",
@@ -210,6 +261,14 @@ static const struct of_device_id rockchip_iodomain_match[] = {
.compatible = "rockchip,rk3288-io-voltage-domain",
.data = (void *)&soc_data_rk3288
},
+ {
+ .compatible = "rockchip,rk3368-io-voltage-domain",
+ .data = (void *)&soc_data_rk3368
+ },
+ {
+ .compatible = "rockchip,rk3368-pmu-io-voltage-domain",
+ .data = (void *)&soc_data_rk3368_pmu
+ },
{ /* sentinel */ },
};
diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c
index e98dcb661cc9..ec212b5be755 100644
--- a/drivers/power/bq2415x_charger.c
+++ b/drivers/power/bq2415x_charger.c
@@ -170,7 +170,7 @@ struct bq2415x_device {
struct power_supply *charger;
struct power_supply_desc charger_desc;
struct delayed_work work;
- struct power_supply *notify_psy;
+ struct device_node *notify_node;
struct notifier_block nb;
enum bq2415x_mode reported_mode;/* mode reported by hook function */
enum bq2415x_mode mode; /* currently configured mode */
@@ -792,22 +792,47 @@ static int bq2415x_set_mode(struct bq2415x_device *bq, enum bq2415x_mode mode)
}
+static bool bq2415x_update_reported_mode(struct bq2415x_device *bq, int mA)
+{
+ enum bq2415x_mode mode;
+
+ if (mA == 0)
+ mode = BQ2415X_MODE_OFF;
+ else if (mA < 500)
+ mode = BQ2415X_MODE_NONE;
+ else if (mA < 1800)
+ mode = BQ2415X_MODE_HOST_CHARGER;
+ else
+ mode = BQ2415X_MODE_DEDICATED_CHARGER;
+
+ if (bq->reported_mode == mode)
+ return false;
+
+ bq->reported_mode = mode;
+ return true;
+}
+
static int bq2415x_notifier_call(struct notifier_block *nb,
unsigned long val, void *v)
{
struct bq2415x_device *bq =
container_of(nb, struct bq2415x_device, nb);
struct power_supply *psy = v;
- enum bq2415x_mode mode;
union power_supply_propval prop;
int ret;
- int mA;
if (val != PSY_EVENT_PROP_CHANGED)
return NOTIFY_OK;
- if (psy != bq->notify_psy)
- return NOTIFY_OK;
+ /* Ignore event if it was not send by notify_node/notify_device */
+ if (bq->notify_node) {
+ if (!psy->dev.parent ||
+ psy->dev.parent->of_node != bq->notify_node)
+ return NOTIFY_OK;
+ } else if (bq->init_data.notify_device) {
+ if (strcmp(psy->desc->name, bq->init_data.notify_device) != 0)
+ return NOTIFY_OK;
+ }
dev_dbg(bq->dev, "notifier call was called\n");
@@ -816,22 +841,9 @@ static int bq2415x_notifier_call(struct notifier_block *nb,
if (ret != 0)
return NOTIFY_OK;
- mA = prop.intval;
-
- if (mA == 0)
- mode = BQ2415X_MODE_OFF;
- else if (mA < 500)
- mode = BQ2415X_MODE_NONE;
- else if (mA < 1800)
- mode = BQ2415X_MODE_HOST_CHARGER;
- else
- mode = BQ2415X_MODE_DEDICATED_CHARGER;
-
- if (bq->reported_mode == mode)
+ if (!bq2415x_update_reported_mode(bq, prop.intval))
return NOTIFY_OK;
- bq->reported_mode = mode;
-
/* if automode is not enabled do not tell about reported_mode */
if (bq->automode < 1)
return NOTIFY_OK;
@@ -1536,6 +1548,8 @@ static int bq2415x_probe(struct i2c_client *client,
struct device_node *np = client->dev.of_node;
struct bq2415x_platform_data *pdata = client->dev.platform_data;
const struct acpi_device_id *acpi_id = NULL;
+ struct power_supply *notify_psy = NULL;
+ union power_supply_propval prop;
if (!np && !pdata && !ACPI_HANDLE(&client->dev)) {
dev_err(&client->dev, "Neither devicetree, nor platform data, nor ACPI support\n");
@@ -1569,25 +1583,6 @@ static int bq2415x_probe(struct i2c_client *client,
goto error_2;
}
- if (np) {
- bq->notify_psy = power_supply_get_by_phandle(np,
- "ti,usb-charger-detection");
-
- if (IS_ERR(bq->notify_psy)) {
- dev_info(&client->dev,
- "no 'ti,usb-charger-detection' property (err=%ld)\n",
- PTR_ERR(bq->notify_psy));
- bq->notify_psy = NULL;
- } else if (!bq->notify_psy) {
- ret = -EPROBE_DEFER;
- goto error_2;
- }
- } else if (pdata && pdata->notify_device) {
- bq->notify_psy = power_supply_get_by_name(pdata->notify_device);
- } else {
- bq->notify_psy = NULL;
- }
-
i2c_set_clientdata(client, bq);
bq->id = num;
@@ -1607,32 +1602,35 @@ static int bq2415x_probe(struct i2c_client *client,
"ti,current-limit",
&bq->init_data.current_limit);
if (ret)
- goto error_3;
+ goto error_2;
ret = device_property_read_u32(bq->dev,
"ti,weak-battery-voltage",
&bq->init_data.weak_battery_voltage);
if (ret)
- goto error_3;
+ goto error_2;
ret = device_property_read_u32(bq->dev,
"ti,battery-regulation-voltage",
&bq->init_data.battery_regulation_voltage);
if (ret)
- goto error_3;
+ goto error_2;
ret = device_property_read_u32(bq->dev,
"ti,charge-current",
&bq->init_data.charge_current);
if (ret)
- goto error_3;
+ goto error_2;
ret = device_property_read_u32(bq->dev,
"ti,termination-current",
&bq->init_data.termination_current);
if (ret)
- goto error_3;
+ goto error_2;
ret = device_property_read_u32(bq->dev,
"ti,resistor-sense",
&bq->init_data.resistor_sense);
if (ret)
- goto error_3;
+ goto error_2;
+ if (np)
+ bq->notify_node = of_parse_phandle(np,
+ "ti,usb-charger-detection", 0);
} else {
memcpy(&bq->init_data, pdata, sizeof(bq->init_data));
}
@@ -1642,56 +1640,72 @@ static int bq2415x_probe(struct i2c_client *client,
ret = bq2415x_power_supply_init(bq);
if (ret) {
dev_err(bq->dev, "failed to register power supply: %d\n", ret);
- goto error_3;
+ goto error_2;
}
ret = bq2415x_sysfs_init(bq);
if (ret) {
dev_err(bq->dev, "failed to create sysfs entries: %d\n", ret);
- goto error_4;
+ goto error_3;
}
ret = bq2415x_set_defaults(bq);
if (ret) {
dev_err(bq->dev, "failed to set default values: %d\n", ret);
- goto error_5;
+ goto error_4;
}
- if (bq->notify_psy) {
+ if (bq->notify_node || bq->init_data.notify_device) {
bq->nb.notifier_call = bq2415x_notifier_call;
ret = power_supply_reg_notifier(&bq->nb);
if (ret) {
dev_err(bq->dev, "failed to reg notifier: %d\n", ret);
- goto error_6;
+ goto error_4;
}
- /* Query for initial reported_mode and set it */
- bq2415x_notifier_call(&bq->nb, PSY_EVENT_PROP_CHANGED,
- bq->notify_psy);
- bq2415x_set_mode(bq, bq->reported_mode);
-
bq->automode = 1;
- dev_info(bq->dev, "automode enabled\n");
+ dev_info(bq->dev, "automode supported, waiting for events\n");
} else {
bq->automode = -1;
dev_info(bq->dev, "automode not supported\n");
}
+ /* Query for initial reported_mode and set it */
+ if (bq->nb.notifier_call) {
+ if (np) {
+ notify_psy = power_supply_get_by_phandle(np,
+ "ti,usb-charger-detection");
+ if (IS_ERR(notify_psy))
+ notify_psy = NULL;
+ } else if (bq->init_data.notify_device) {
+ notify_psy = power_supply_get_by_name(
+ bq->init_data.notify_device);
+ }
+ }
+ if (notify_psy) {
+ ret = power_supply_get_property(notify_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &prop);
+ power_supply_put(notify_psy);
+
+ if (ret == 0) {
+ bq2415x_update_reported_mode(bq, prop.intval);
+ bq2415x_set_mode(bq, bq->reported_mode);
+ }
+ }
+
INIT_DELAYED_WORK(&bq->work, bq2415x_timer_work);
bq2415x_set_autotimer(bq, 1);
dev_info(bq->dev, "driver registered\n");
return 0;
-error_6:
-error_5:
- bq2415x_sysfs_exit(bq);
error_4:
- bq2415x_power_supply_exit(bq);
+ bq2415x_sysfs_exit(bq);
error_3:
- if (bq->notify_psy)
- power_supply_put(bq->notify_psy);
+ bq2415x_power_supply_exit(bq);
error_2:
+ if (bq->notify_node)
+ of_node_put(bq->notify_node);
kfree(name);
error_1:
mutex_lock(&bq2415x_id_mutex);
@@ -1707,10 +1721,11 @@ static int bq2415x_remove(struct i2c_client *client)
{
struct bq2415x_device *bq = i2c_get_clientdata(client);
- if (bq->notify_psy) {
+ if (bq->nb.notifier_call)
power_supply_unreg_notifier(&bq->nb);
- power_supply_put(bq->notify_psy);
- }
+
+ if (bq->notify_node)
+ of_node_put(bq->notify_node);
bq2415x_sysfs_exit(bq);
bq2415x_power_supply_exit(bq);
diff --git a/drivers/power/bq24190_charger.c b/drivers/power/bq24190_charger.c
index 052db78c3736..469a452cbe10 100644
--- a/drivers/power/bq24190_charger.c
+++ b/drivers/power/bq24190_charger.c
@@ -902,7 +902,7 @@ static int bq24190_charger_property_is_writeable(struct power_supply *psy,
}
static enum power_supply_property bq24190_charger_properties[] = {
- POWER_SUPPLY_PROP_TYPE,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
@@ -1515,6 +1515,7 @@ static const struct i2c_device_id bq24190_i2c_ids[] = {
{ "bq24190", BQ24190_REG_VPRS_PN_24190 },
{ },
};
+MODULE_DEVICE_TABLE(i2c, bq24190_i2c_ids);
#ifdef CONFIG_OF
static const struct of_device_id bq24190_of_match[] = {
@@ -1534,7 +1535,6 @@ static struct i2c_driver bq24190_driver = {
.id_table = bq24190_i2c_ids,
.driver = {
.name = "bq24190-charger",
- .owner = THIS_MODULE,
.pm = &bq24190_pm_ops,
.of_match_table = of_match_ptr(bq24190_of_match),
},
diff --git a/drivers/power/bq24735-charger.c b/drivers/power/bq24735-charger.c
index 961a18930027..eb2b3689de97 100644
--- a/drivers/power/bq24735-charger.c
+++ b/drivers/power/bq24735-charger.c
@@ -267,8 +267,9 @@ static int bq24735_charger_probe(struct i2c_client *client,
name = (char *)charger->pdata->name;
if (!name) {
- name = kasprintf(GFP_KERNEL, "bq24735@%s",
- dev_name(&client->dev));
+ name = devm_kasprintf(&client->dev, GFP_KERNEL,
+ "bq24735@%s",
+ dev_name(&client->dev));
if (!name) {
dev_err(&client->dev, "Failed to alloc device name\n");
return -ENOMEM;
@@ -296,23 +297,21 @@ static int bq24735_charger_probe(struct i2c_client *client,
if (ret < 0) {
dev_err(&client->dev, "Failed to read manufacturer id : %d\n",
ret);
- goto err_free_name;
+ return ret;
} else if (ret != 0x0040) {
dev_err(&client->dev,
"manufacturer id mismatch. 0x0040 != 0x%04x\n", ret);
- ret = -ENODEV;
- goto err_free_name;
+ return -ENODEV;
}
ret = bq24735_read_word(client, BQ24735_DEVICE_ID);
if (ret < 0) {
dev_err(&client->dev, "Failed to read device id : %d\n", ret);
- goto err_free_name;
+ return ret;
} else if (ret != 0x000B) {
dev_err(&client->dev,
"device id mismatch. 0x000b != 0x%04x\n", ret);
- ret = -ENODEV;
- goto err_free_name;
+ return -ENODEV;
}
if (gpio_is_valid(charger->pdata->status_gpio)) {
@@ -331,7 +330,7 @@ static int bq24735_charger_probe(struct i2c_client *client,
ret = bq24735_config_charger(charger);
if (ret < 0) {
dev_err(&client->dev, "failed in configuring charger");
- goto err_free_name;
+ return ret;
}
/* check for AC adapter presence */
@@ -339,17 +338,17 @@ static int bq24735_charger_probe(struct i2c_client *client,
ret = bq24735_enable_charging(charger);
if (ret < 0) {
dev_err(&client->dev, "Failed to enable charging\n");
- goto err_free_name;
+ return ret;
}
}
- charger->charger = power_supply_register(&client->dev, supply_desc,
- &psy_cfg);
+ charger->charger = devm_power_supply_register(&client->dev, supply_desc,
+ &psy_cfg);
if (IS_ERR(charger->charger)) {
ret = PTR_ERR(charger->charger);
dev_err(&client->dev, "Failed to register power supply: %d\n",
ret);
- goto err_free_name;
+ return ret;
}
if (client->irq) {
@@ -364,34 +363,11 @@ static int bq24735_charger_probe(struct i2c_client *client,
dev_err(&client->dev,
"Unable to register IRQ %d err %d\n",
client->irq, ret);
- goto err_unregister_supply;
+ return ret;
}
}
return 0;
-err_unregister_supply:
- power_supply_unregister(charger->charger);
-err_free_name:
- if (name != charger->pdata->name)
- kfree(name);
-
- return ret;
-}
-
-static int bq24735_charger_remove(struct i2c_client *client)
-{
- struct bq24735 *charger = i2c_get_clientdata(client);
-
- if (charger->client->irq)
- devm_free_irq(&charger->client->dev, charger->client->irq,
- &charger->charger);
-
- power_supply_unregister(charger->charger);
-
- if (charger->charger_desc.name != charger->pdata->name)
- kfree(charger->charger_desc.name);
-
- return 0;
}
static const struct i2c_device_id bq24735_charger_id[] = {
@@ -409,11 +385,9 @@ MODULE_DEVICE_TABLE(of, bq24735_match_ids);
static struct i2c_driver bq24735_charger_driver = {
.driver = {
.name = "bq24735-charger",
- .owner = THIS_MODULE,
.of_match_table = bq24735_match_ids,
},
.probe = bq24735_charger_probe,
- .remove = bq24735_charger_remove,
.id_table = bq24735_charger_id,
};
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index b6b98378faa3..8287261fd978 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -39,47 +39,49 @@
#include <linux/power/bq27x00_battery.h>
-#define DRIVER_VERSION "1.2.0"
-
-#define BQ27x00_REG_TEMP 0x06
-#define BQ27x00_REG_VOLT 0x08
-#define BQ27x00_REG_AI 0x14
-#define BQ27x00_REG_FLAGS 0x0A
-#define BQ27x00_REG_TTE 0x16
-#define BQ27x00_REG_TTF 0x18
-#define BQ27x00_REG_TTECP 0x26
-#define BQ27x00_REG_NAC 0x0C /* Nominal available capacity */
-#define BQ27x00_REG_LMD 0x12 /* Last measured discharge */
-#define BQ27x00_REG_CYCT 0x2A /* Cycle count total */
-#define BQ27x00_REG_AE 0x22 /* Available energy */
-#define BQ27x00_POWER_AVG 0x24
-
-#define BQ27000_REG_RSOC 0x0B /* Relative State-of-Charge */
-#define BQ27000_REG_ILMD 0x76 /* Initial last measured discharge */
-#define BQ27000_FLAG_EDVF BIT(0) /* Final End-of-Discharge-Voltage flag */
-#define BQ27000_FLAG_EDV1 BIT(1) /* First End-of-Discharge-Voltage flag */
-#define BQ27000_FLAG_CI BIT(4) /* Capacity Inaccurate flag */
-#define BQ27000_FLAG_FC BIT(5)
-#define BQ27000_FLAG_CHGS BIT(7) /* Charge state flag */
-
-#define BQ27500_REG_SOC 0x2C
-#define BQ27500_REG_DCAP 0x3C /* Design capacity */
-#define BQ27500_FLAG_DSC BIT(0)
-#define BQ27500_FLAG_SOCF BIT(1) /* State-of-Charge threshold final */
-#define BQ27500_FLAG_SOC1 BIT(2) /* State-of-Charge threshold 1 */
-#define BQ27500_FLAG_FC BIT(9)
-#define BQ27500_FLAG_OTC BIT(15)
-
-#define BQ27742_POWER_AVG 0x76
-
-#define BQ27510_REG_SOC 0x20
-#define BQ27510_REG_DCAP 0x2E /* Design capacity */
-#define BQ27510_REG_CYCT 0x1E /* Cycle count total */
+#define DRIVER_VERSION "1.2.0"
+
+#define BQ27XXX_MANUFACTURER "Texas Instruments"
+
+#define BQ27x00_REG_TEMP 0x06
+#define BQ27x00_REG_VOLT 0x08
+#define BQ27x00_REG_AI 0x14
+#define BQ27x00_REG_FLAGS 0x0A
+#define BQ27x00_REG_TTE 0x16
+#define BQ27x00_REG_TTF 0x18
+#define BQ27x00_REG_TTECP 0x26
+#define BQ27x00_REG_NAC 0x0C /* Nominal available capacity */
+#define BQ27x00_REG_LMD 0x12 /* Last measured discharge */
+#define BQ27x00_REG_CYCT 0x2A /* Cycle count total */
+#define BQ27x00_REG_AE 0x22 /* Available energy */
+#define BQ27x00_POWER_AVG 0x24
+
+#define BQ27000_REG_RSOC 0x0B /* Relative State-of-Charge */
+#define BQ27000_REG_ILMD 0x76 /* Initial last measured discharge */
+#define BQ27000_FLAG_EDVF BIT(0) /* Final End-of-Discharge-Voltage flag */
+#define BQ27000_FLAG_EDV1 BIT(1) /* First End-of-Discharge-Voltage flag */
+#define BQ27000_FLAG_CI BIT(4) /* Capacity Inaccurate flag */
+#define BQ27000_FLAG_FC BIT(5)
+#define BQ27000_FLAG_CHGS BIT(7) /* Charge state flag */
+
+#define BQ27500_REG_SOC 0x2C
+#define BQ27500_REG_DCAP 0x3C /* Design capacity */
+#define BQ27500_FLAG_DSC BIT(0)
+#define BQ27500_FLAG_SOCF BIT(1) /* State-of-Charge threshold final */
+#define BQ27500_FLAG_SOC1 BIT(2) /* State-of-Charge threshold 1 */
+#define BQ27500_FLAG_FC BIT(9)
+#define BQ27500_FLAG_OTC BIT(15)
+
+#define BQ27742_POWER_AVG 0x76
+
+#define BQ27510_REG_SOC 0x20
+#define BQ27510_REG_DCAP 0x2E /* Design capacity */
+#define BQ27510_REG_CYCT 0x1E /* Cycle count total */
/* bq27425 register addresses are same as bq27x00 addresses minus 4 */
-#define BQ27425_REG_OFFSET 0x04
+#define BQ27425_REG_OFFSET 0x04
#define BQ27425_REG_SOC (0x1C + BQ27425_REG_OFFSET)
-#define BQ27425_REG_DCAP (0x3C + BQ27425_REG_OFFSET)
+#define BQ27425_REG_DCAP (0x3C + BQ27425_REG_OFFSET)
#define BQ27000_RS 20 /* Resistor sense */
#define BQ27x00_POWER_CONSTANT (256 * 29200 / 1000)
@@ -106,7 +108,7 @@ struct bq27x00_reg_cache {
};
struct bq27x00_device_info {
- struct device *dev;
+ struct device *dev;
int id;
enum bq27x00_chip chip;
@@ -142,6 +144,7 @@ static enum power_supply_property bq27x00_battery_props[] = {
POWER_SUPPLY_PROP_ENERGY_NOW,
POWER_SUPPLY_PROP_POWER_AVG,
POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MANUFACTURER,
};
static enum power_supply_property bq27425_battery_props[] = {
@@ -156,6 +159,7 @@ static enum power_supply_property bq27425_battery_props[] = {
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_MANUFACTURER,
};
static enum power_supply_property bq27742_battery_props[] = {
@@ -174,6 +178,7 @@ static enum power_supply_property bq27742_battery_props[] = {
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_POWER_AVG,
POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MANUFACTURER,
};
static enum power_supply_property bq27510_battery_props[] = {
@@ -192,19 +197,20 @@ static enum power_supply_property bq27510_battery_props[] = {
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_POWER_AVG,
POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MANUFACTURER,
};
static unsigned int poll_interval = 360;
module_param(poll_interval, uint, 0644);
-MODULE_PARM_DESC(poll_interval, "battery poll interval in seconds - " \
- "0 disables polling");
+MODULE_PARM_DESC(poll_interval,
+ "battery poll interval in seconds - 0 disables polling");
/*
* Common code for BQ27x00 devices
*/
static inline int bq27x00_read(struct bq27x00_device_info *di, u8 reg,
- bool single)
+ bool single)
{
if (di->chip == BQ27425)
return di->bus.read(di, reg - BQ27425_REG_OFFSET, single);
@@ -313,8 +319,9 @@ static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di)
ilmd = bq27x00_read(di, BQ27510_REG_DCAP, false);
else
ilmd = bq27x00_read(di, BQ27500_REG_DCAP, false);
- } else
+ } else {
ilmd = bq27x00_read(di, BQ27000_REG_ILMD, true);
+ }
if (ilmd < 0) {
dev_dbg(di->dev, "error reading initial last measured discharge\n");
@@ -445,7 +452,7 @@ static int bq27x00_battery_read_health(struct bq27x00_device_info *di)
return tval;
}
- if ((di->chip == BQ27500)) {
+ if (di->chip == BQ27500) {
if (tval & BQ27500_FLAG_SOCF)
tval = POWER_SUPPLY_HEALTH_DEAD;
else if (tval & BQ27500_FLAG_OTC)
@@ -559,7 +566,7 @@ static void bq27x00_battery_poll(struct work_struct *work)
* Or 0 if something fails.
*/
static int bq27x00_battery_current(struct bq27x00_device_info *di,
- union power_supply_propval *val)
+ union power_supply_propval *val)
{
int curr;
int flags;
@@ -587,7 +594,7 @@ static int bq27x00_battery_current(struct bq27x00_device_info *di,
}
static int bq27x00_battery_status(struct bq27x00_device_info *di,
- union power_supply_propval *val)
+ union power_supply_propval *val)
{
int status;
@@ -615,7 +622,7 @@ static int bq27x00_battery_status(struct bq27x00_device_info *di,
}
static int bq27x00_battery_capacity_level(struct bq27x00_device_info *di,
- union power_supply_propval *val)
+ union power_supply_propval *val)
{
int level;
@@ -649,7 +656,7 @@ static int bq27x00_battery_capacity_level(struct bq27x00_device_info *di,
* Or < 0 if something fails.
*/
static int bq27x00_battery_voltage(struct bq27x00_device_info *di,
- union power_supply_propval *val)
+ union power_supply_propval *val)
{
int volt;
@@ -665,7 +672,7 @@ static int bq27x00_battery_voltage(struct bq27x00_device_info *di,
}
static int bq27x00_simple_value(int value,
- union power_supply_propval *val)
+ union power_supply_propval *val)
{
if (value < 0)
return value;
@@ -749,6 +756,9 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_HEALTH:
ret = bq27x00_simple_value(di->cache.health, val);
break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = BQ27XXX_MANUFACTURER;
+ break;
default:
return -EINVAL;
}
@@ -827,7 +837,6 @@ static void bq27x00_powersupply_unregister(struct bq27x00_device_info *di)
mutex_destroy(&di->lock);
}
-
/* i2c specific code */
#ifdef CONFIG_BATTERY_BQ27X00_I2C
@@ -888,14 +897,12 @@ static int bq27x00_battery_probe(struct i2c_client *client,
name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s-%d", id->name, num);
if (!name) {
- dev_err(&client->dev, "failed to allocate device name\n");
retval = -ENOMEM;
goto batt_failed;
}
di = devm_kzalloc(&client->dev, sizeof(*di), GFP_KERNEL);
if (!di) {
- dev_err(&client->dev, "failed to allocate device info data\n");
retval = -ENOMEM;
goto batt_failed;
}
@@ -956,8 +963,9 @@ static struct i2c_driver bq27x00_battery_driver = {
static inline int bq27x00_battery_i2c_init(void)
{
int ret = i2c_add_driver(&bq27x00_battery_driver);
+
if (ret)
- printk(KERN_ERR "Unable to register BQ27x00 i2c driver\n");
+ pr_err("Unable to register BQ27x00 i2c driver\n");
return ret;
}
@@ -978,7 +986,7 @@ static inline void bq27x00_battery_i2c_exit(void) {};
#ifdef CONFIG_BATTERY_BQ27X00_PLATFORM
static int bq27000_read_platform(struct bq27x00_device_info *di, u8 reg,
- bool single)
+ bool single)
{
struct device *dev = di->dev;
struct bq27000_platform_data *pdata = dev->platform_data;
@@ -1028,10 +1036,8 @@ static int bq27000_battery_probe(struct platform_device *pdev)
}
di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
- if (!di) {
- dev_err(&pdev->dev, "failed to allocate device info data\n");
+ if (!di)
return -ENOMEM;
- }
platform_set_drvdata(pdev, di);
@@ -1064,8 +1070,9 @@ static struct platform_driver bq27000_battery_driver = {
static inline int bq27x00_battery_platform_init(void)
{
int ret = platform_driver_register(&bq27000_battery_driver);
+
if (ret)
- printk(KERN_ERR "Unable to register BQ27000 platform driver\n");
+ pr_err("Unable to register BQ27000 platform driver\n");
return ret;
}
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 1c202ccbd2a6..907293e6f2a4 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -619,7 +619,7 @@ static int cm_get_battery_temperature(struct charger_manager *cm,
#ifdef CONFIG_THERMAL
if (cm->tzd_batt) {
- ret = thermal_zone_get_temp(cm->tzd_batt, (unsigned long *)temp);
+ ret = thermal_zone_get_temp(cm->tzd_batt, temp);
if (!ret)
/* Calibrate temperature unit */
*temp /= 100;
diff --git a/drivers/power/ds2780_battery.c b/drivers/power/ds2780_battery.c
index a7a0427343f3..d3743d0ad55b 100644
--- a/drivers/power/ds2780_battery.c
+++ b/drivers/power/ds2780_battery.c
@@ -637,10 +637,6 @@ static ssize_t ds2780_read_param_eeprom_bin(struct file *filp,
struct power_supply *psy = to_power_supply(dev);
struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
- count = min_t(loff_t, count,
- DS2780_EEPROM_BLOCK1_END -
- DS2780_EEPROM_BLOCK1_START + 1 - off);
-
return ds2780_read_block(dev_info, buf,
DS2780_EEPROM_BLOCK1_START + off, count);
}
@@ -655,10 +651,6 @@ static ssize_t ds2780_write_param_eeprom_bin(struct file *filp,
struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
int ret;
- count = min_t(loff_t, count,
- DS2780_EEPROM_BLOCK1_END -
- DS2780_EEPROM_BLOCK1_START + 1 - off);
-
ret = ds2780_write(dev_info, buf,
DS2780_EEPROM_BLOCK1_START + off, count);
if (ret < 0)
@@ -676,7 +668,7 @@ static struct bin_attribute ds2780_param_eeprom_bin_attr = {
.name = "param_eeprom",
.mode = S_IRUGO | S_IWUSR,
},
- .size = DS2780_EEPROM_BLOCK1_END - DS2780_EEPROM_BLOCK1_START + 1,
+ .size = DS2780_PARAM_EEPROM_SIZE,
.read = ds2780_read_param_eeprom_bin,
.write = ds2780_write_param_eeprom_bin,
};
@@ -690,10 +682,6 @@ static ssize_t ds2780_read_user_eeprom_bin(struct file *filp,
struct power_supply *psy = to_power_supply(dev);
struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
- count = min_t(loff_t, count,
- DS2780_EEPROM_BLOCK0_END -
- DS2780_EEPROM_BLOCK0_START + 1 - off);
-
return ds2780_read_block(dev_info, buf,
DS2780_EEPROM_BLOCK0_START + off, count);
}
@@ -708,10 +696,6 @@ static ssize_t ds2780_write_user_eeprom_bin(struct file *filp,
struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
int ret;
- count = min_t(loff_t, count,
- DS2780_EEPROM_BLOCK0_END -
- DS2780_EEPROM_BLOCK0_START + 1 - off);
-
ret = ds2780_write(dev_info, buf,
DS2780_EEPROM_BLOCK0_START + off, count);
if (ret < 0)
@@ -729,7 +713,7 @@ static struct bin_attribute ds2780_user_eeprom_bin_attr = {
.name = "user_eeprom",
.mode = S_IRUGO | S_IWUSR,
},
- .size = DS2780_EEPROM_BLOCK0_END - DS2780_EEPROM_BLOCK0_START + 1,
+ .size = DS2780_USER_EEPROM_SIZE,
.read = ds2780_read_user_eeprom_bin,
.write = ds2780_write_user_eeprom_bin,
};
diff --git a/drivers/power/ds2781_battery.c b/drivers/power/ds2781_battery.c
index 56d583dae908..c3680024f399 100644
--- a/drivers/power/ds2781_battery.c
+++ b/drivers/power/ds2781_battery.c
@@ -639,8 +639,6 @@ static ssize_t ds2781_read_param_eeprom_bin(struct file *filp,
struct power_supply *psy = to_power_supply(dev);
struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
- count = min_t(loff_t, count, DS2781_PARAM_EEPROM_SIZE - off);
-
return ds2781_read_block(dev_info, buf,
DS2781_EEPROM_BLOCK1_START + off, count);
}
@@ -655,8 +653,6 @@ static ssize_t ds2781_write_param_eeprom_bin(struct file *filp,
struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
int ret;
- count = min_t(loff_t, count, DS2781_PARAM_EEPROM_SIZE - off);
-
ret = ds2781_write(dev_info, buf,
DS2781_EEPROM_BLOCK1_START + off, count);
if (ret < 0)
@@ -688,8 +684,6 @@ static ssize_t ds2781_read_user_eeprom_bin(struct file *filp,
struct power_supply *psy = to_power_supply(dev);
struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
- count = min_t(loff_t, count, DS2781_USER_EEPROM_SIZE - off);
-
return ds2781_read_block(dev_info, buf,
DS2781_EEPROM_BLOCK0_START + off, count);
@@ -705,8 +699,6 @@ static ssize_t ds2781_write_user_eeprom_bin(struct file *filp,
struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
int ret;
- count = min_t(loff_t, count, DS2781_USER_EEPROM_SIZE - off);
-
ret = ds2781_write(dev_info, buf,
DS2781_EEPROM_BLOCK0_START + off, count);
if (ret < 0)
diff --git a/drivers/power/ltc2941-battery-gauge.c b/drivers/power/ltc2941-battery-gauge.c
index daeb0860736c..4adf2ba021ce 100644
--- a/drivers/power/ltc2941-battery-gauge.c
+++ b/drivers/power/ltc2941-battery-gauge.c
@@ -14,7 +14,6 @@
#include <linux/swab.h>
#include <linux/i2c.h>
#include <linux/delay.h>
-#include <linux/idr.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
@@ -63,15 +62,11 @@ struct ltc294x_info {
struct power_supply_desc supply_desc; /* Supply description */
struct delayed_work work; /* Work scheduler */
int num_regs; /* Number of registers (chip type) */
- int id; /* Identifier of ltc294x chip */
int charge; /* Last charge register content */
int r_sense; /* mOhm */
int Qlsb; /* nAh */
};
-static DEFINE_IDR(ltc294x_id);
-static DEFINE_MUTEX(ltc294x_lock);
-
static inline int convert_bin_to_uAh(
const struct ltc294x_info *info, int Q)
{
@@ -371,10 +366,6 @@ static int ltc294x_i2c_remove(struct i2c_client *client)
cancel_delayed_work(&info->work);
power_supply_unregister(info->supply);
- kfree(info->supply_desc.name);
- mutex_lock(&ltc294x_lock);
- idr_remove(&ltc294x_id, info->id);
- mutex_unlock(&ltc294x_lock);
return 0;
}
@@ -384,44 +375,28 @@ static int ltc294x_i2c_probe(struct i2c_client *client,
struct power_supply_config psy_cfg = {};
struct ltc294x_info *info;
int ret;
- int num;
u32 prescaler_exp;
s32 r_sense;
struct device_node *np;
- mutex_lock(&ltc294x_lock);
- ret = idr_alloc(&ltc294x_id, client, 0, 0, GFP_KERNEL);
- mutex_unlock(&ltc294x_lock);
- if (ret < 0)
- goto fail_id;
-
- num = ret;
-
info = devm_kzalloc(&client->dev, sizeof(*info), GFP_KERNEL);
- if (info == NULL) {
- ret = -ENOMEM;
- goto fail_info;
- }
+ if (info == NULL)
+ return -ENOMEM;
i2c_set_clientdata(client, info);
- info->num_regs = id->driver_data;
- info->supply_desc.name = kasprintf(GFP_KERNEL, "%s-%d", client->name,
- num);
- if (!info->supply_desc.name) {
- ret = -ENOMEM;
- goto fail_name;
- }
-
np = of_node_get(client->dev.of_node);
+ info->num_regs = id->driver_data;
+ info->supply_desc.name = np->name;
+
/* r_sense can be negative, when sense+ is connected to the battery
* instead of the sense-. This results in reversed measurements. */
ret = of_property_read_u32(np, "lltc,resistor-sense", &r_sense);
if (ret < 0) {
dev_err(&client->dev,
"Could not find lltc,resistor-sense in devicetree\n");
- goto fail_name;
+ return ret;
}
info->r_sense = r_sense;
@@ -446,7 +421,6 @@ static int ltc294x_i2c_probe(struct i2c_client *client,
}
info->client = client;
- info->id = num;
info->supply_desc.type = POWER_SUPPLY_TYPE_BATTERY;
info->supply_desc.properties = ltc294x_properties;
if (info->num_regs >= LTC294X_REG_TEMPERATURE_LSB)
@@ -473,31 +447,19 @@ static int ltc294x_i2c_probe(struct i2c_client *client,
ret = ltc294x_reset(info, prescaler_exp);
if (ret < 0) {
dev_err(&client->dev, "Communication with chip failed\n");
- goto fail_comm;
+ return ret;
}
info->supply = power_supply_register(&client->dev, &info->supply_desc,
&psy_cfg);
if (IS_ERR(info->supply)) {
dev_err(&client->dev, "failed to register ltc2941\n");
- ret = PTR_ERR(info->supply);
- goto fail_register;
+ return PTR_ERR(info->supply);
} else {
schedule_delayed_work(&info->work, LTC294X_WORK_DELAY * HZ);
}
return 0;
-
-fail_register:
- kfree(info->supply_desc.name);
-fail_comm:
-fail_name:
-fail_info:
- mutex_lock(&ltc294x_lock);
- idr_remove(&ltc294x_id, num);
- mutex_unlock(&ltc294x_lock);
-fail_id:
- return ret;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/power/max77693_charger.c b/drivers/power/max77693_charger.c
index 754879eb59f6..060cab5ae3aa 100644
--- a/drivers/power/max77693_charger.c
+++ b/drivers/power/max77693_charger.c
@@ -20,6 +20,7 @@
#include <linux/power_supply.h>
#include <linux/regmap.h>
#include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77693-private.h>
#define MAX77693_CHARGER_NAME "max77693-charger"
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index a944338a39de..9e29b1321648 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -521,11 +521,6 @@ static ssize_t olpc_bat_eeprom_read(struct file *filp, struct kobject *kobj,
int ret;
int i;
- if (off >= EEPROM_SIZE)
- return 0;
- if (off + count > EEPROM_SIZE)
- count = EEPROM_SIZE - off;
-
for (i = 0; i < count; i++) {
ec_byte = EEPROM_START + off + i;
ret = olpc_ec_cmd(EC_BAT_EEPROM, &ec_byte, 1, &buf[i], 1);
@@ -545,7 +540,7 @@ static struct bin_attribute olpc_bat_eeprom = {
.name = "eeprom",
.mode = S_IRUGO,
},
- .size = 0,
+ .size = EEPROM_SIZE,
.read = olpc_bat_eeprom_read,
};
diff --git a/drivers/power/pm2301_charger.c b/drivers/power/pm2301_charger.c
index cc0893ffbf7e..3a45cc0c4dce 100644
--- a/drivers/power/pm2301_charger.c
+++ b/drivers/power/pm2301_charger.c
@@ -1244,7 +1244,6 @@ static struct i2c_driver pm2xxx_charger_driver = {
.remove = pm2xxx_wall_charger_remove,
.driver = {
.name = "pm2xxx-wall_charger",
- .owner = THIS_MODULE,
.pm = PM2XXX_PM_OPS,
},
.id_table = pm2xxx_id,
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 869284c2e1e8..456987c88baa 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -557,7 +557,7 @@ EXPORT_SYMBOL_GPL(power_supply_unreg_notifier);
#ifdef CONFIG_THERMAL
static int power_supply_read_temp(struct thermal_zone_device *tzd,
- unsigned long *temp)
+ int *temp)
{
struct power_supply *psy;
union power_supply_propval val;
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 17d93a73c513..5a0189bf19bb 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -166,5 +166,12 @@ config POWER_RESET_RMOBILE
help
Reboot support for Renesas R-Mobile and SH-Mobile SoCs.
+config POWER_RESET_ZX
+ tristate "ZTE SoCs reset driver"
+ depends on ARCH_ZX || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Reboot support for ZTE SoCs.
+
endif
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index dbe06c368743..096fa67047f6 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -19,3 +19,4 @@ obj-$(CONFIG_POWER_RESET_KEYSTONE) += keystone-reset.o
obj-$(CONFIG_POWER_RESET_SYSCON) += syscon-reboot.o
obj-$(CONFIG_POWER_RESET_SYSCON_POWEROFF) += syscon-poweroff.o
obj-$(CONFIG_POWER_RESET_RMOBILE) += rmobile-reset.o
+obj-$(CONFIG_POWER_RESET_ZX) += zx-reboot.o
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 36dc52fb2ec8..c378d4ec826f 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -123,6 +123,15 @@ static int at91sam9g45_restart(struct notifier_block *this, unsigned long mode,
return NOTIFY_DONE;
}
+static int sama5d3_restart(struct notifier_block *this, unsigned long mode,
+ void *cmd)
+{
+ writel(cpu_to_le32(AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST),
+ at91_rstc_base);
+
+ return NOTIFY_DONE;
+}
+
static void __init at91_reset_status(struct platform_device *pdev)
{
u32 reg = readl(at91_rstc_base + AT91_RSTC_SR);
@@ -155,13 +164,13 @@ static void __init at91_reset_status(struct platform_device *pdev)
static const struct of_device_id at91_ramc_of_match[] = {
{ .compatible = "atmel,at91sam9260-sdramc", },
{ .compatible = "atmel,at91sam9g45-ddramc", },
- { .compatible = "atmel,sama5d3-ddramc", },
{ /* sentinel */ }
};
static const struct of_device_id at91_reset_of_match[] = {
{ .compatible = "atmel,at91sam9260-rstc", .data = at91sam9260_restart },
{ .compatible = "atmel,at91sam9g45-rstc", .data = at91sam9g45_restart },
+ { .compatible = "atmel,sama5d3-rstc", .data = sama5d3_restart },
{ /* sentinel */ }
};
@@ -181,13 +190,16 @@ static int at91_reset_of_probe(struct platform_device *pdev)
return -ENODEV;
}
- for_each_matching_node(np, at91_ramc_of_match) {
- at91_ramc_base[idx] = of_iomap(np, 0);
- if (!at91_ramc_base[idx]) {
- dev_err(&pdev->dev, "Could not map ram controller address\n");
- return -ENODEV;
+ if (!of_device_is_compatible(pdev->dev.of_node, "atmel,sama5d3-rstc")) {
+ /* we need to shutdown the ddr controller, so get ramc base */
+ for_each_matching_node(np, at91_ramc_of_match) {
+ at91_ramc_base[idx] = of_iomap(np, 0);
+ if (!at91_ramc_base[idx]) {
+ dev_err(&pdev->dev, "Could not map ram controller address\n");
+ return -ENODEV;
+ }
+ idx++;
}
- idx++;
}
match = of_match_node(at91_reset_of_match, pdev->dev.of_node);
diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c
new file mode 100644
index 000000000000..a5b009673d0e
--- /dev/null
+++ b/drivers/power/reset/zx-reboot.c
@@ -0,0 +1,80 @@
+/*
+ * ZTE zx296702 SoC reset code
+ *
+ * Copyright (c) 2015 Linaro Ltd.
+ *
+ * Author: Jun Nie <jun.nie@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+
+static void __iomem *base;
+static void __iomem *pcu_base;
+
+static int zx_restart_handler(struct notifier_block *this,
+ unsigned long mode, void *cmd)
+{
+ writel_relaxed(1, base + 0xb0);
+ writel_relaxed(1, pcu_base + 0x34);
+
+ mdelay(50);
+ pr_emerg("Unable to restart system\n");
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block zx_restart_nb = {
+ .notifier_call = zx_restart_handler,
+ .priority = 128,
+};
+
+static int zx_reboot_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int err;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ WARN(1, "failed to map base address");
+ return -ENODEV;
+ }
+
+ np = of_find_compatible_node(NULL, NULL, "zte,zx296702-pcu");
+ pcu_base = of_iomap(np, 0);
+ if (!pcu_base) {
+ iounmap(base);
+ WARN(1, "failed to map pcu_base address");
+ return -ENODEV;
+ }
+
+ err = register_restart_handler(&zx_restart_nb);
+ if (err)
+ dev_err(&pdev->dev, "Register restart handler failed(err=%d)\n",
+ err);
+
+ return err;
+}
+
+static const struct of_device_id zx_reboot_of_match[] = {
+ { .compatible = "zte,sysctrl" },
+ {}
+};
+
+static struct platform_driver zx_reboot_driver = {
+ .probe = zx_reboot_probe,
+ .driver = {
+ .name = "zx-reboot",
+ .of_match_table = zx_reboot_of_match,
+ },
+};
+module_platform_driver(zx_reboot_driver);
diff --git a/drivers/power/rt5033_battery.c b/drivers/power/rt5033_battery.c
index a7a6877b4e16..bcdd83048492 100644
--- a/drivers/power/rt5033_battery.c
+++ b/drivers/power/rt5033_battery.c
@@ -165,7 +165,7 @@ static const struct i2c_device_id rt5033_battery_id[] = {
{ "rt5033-battery", },
{ }
};
-MODULE_DEVICE_TABLE(platform, rt5033_battery_id);
+MODULE_DEVICE_TABLE(i2c, rt5033_battery_id);
static struct i2c_driver rt5033_battery_driver = {
.driver = {
diff --git a/drivers/power/rt9455_charger.c b/drivers/power/rt9455_charger.c
index 08baac6e3ada..a49a9d44bdda 100644
--- a/drivers/power/rt9455_charger.c
+++ b/drivers/power/rt9455_charger.c
@@ -973,7 +973,6 @@ static int rt9455_irq_handler_check_irq2_register(struct rt9455_info *info,
if (irq2 & GET_MASK(F_CHRVPI)) {
dev_dbg(dev, "Charger fault occurred\n");
- alert_userspace = true;
/*
* CHRVPI bit is set in 2 cases:
* 1. when the power source is connected to the charger.
@@ -981,6 +980,9 @@ static int rt9455_irq_handler_check_irq2_register(struct rt9455_info *info,
* To identify the case, PWR_RDY bit is checked. Because
* PWR_RDY bit is set / cleared after CHRVPI interrupt is
* triggered, it is used delayed_work to later read PWR_RDY bit.
+ * Also, do not set to true alert_userspace, because there is no
+ * need to notify userspace when CHRVPI interrupt has occurred.
+ * Userspace will be notified after PWR_RDY bit is read.
*/
queue_delayed_work(system_power_efficient_wq,
&info->pwr_rdy_work,
@@ -1178,7 +1180,7 @@ static irqreturn_t rt9455_irq_handler_thread(int irq, void *data)
/*
* Sometimes, an interrupt occurs while rt9455_probe() function
* is executing and power_supply_register() is not yet called.
- * Do not call power_supply_charged() in this case.
+ * Do not call power_supply_changed() in this case.
*/
if (info->charger)
power_supply_changed(info->charger);
@@ -1478,6 +1480,11 @@ static void rt9455_pwr_rdy_work_callback(struct work_struct *work)
RT9455_MAX_CHARGING_TIME * HZ);
break;
}
+ /*
+ * Notify userspace that the charger has been either connected to or
+ * disconnected from the power source.
+ */
+ power_supply_changed(info->charger);
}
static void rt9455_max_charging_time_work_callback(struct work_struct *work)
@@ -1533,6 +1540,11 @@ static void rt9455_batt_presence_work_callback(struct work_struct *work)
if (ret)
dev_err(dev, "Failed to unmask BATAB interrupt\n");
}
+ /*
+ * Notify userspace that the battery is now connected to the
+ * charger.
+ */
+ power_supply_changed(info->charger);
}
}
diff --git a/drivers/power/rx51_battery.c b/drivers/power/rx51_battery.c
index ac6206951d58..af9383d23d12 100644
--- a/drivers/power/rx51_battery.c
+++ b/drivers/power/rx51_battery.c
@@ -215,7 +215,7 @@ static int rx51_battery_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, di);
di->dev = &pdev->dev;
- di->bat_desc.name = dev_name(&pdev->dev);
+ di->bat_desc.name = "rx51-battery";
di->bat_desc.type = POWER_SUPPLY_TYPE_BATTERY;
di->bat_desc.properties = rx51_battery_props;
di->bat_desc.num_properties = ARRAY_SIZE(rx51_battery_props);
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index 022b8910e443..f4f2c1f76c32 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -22,8 +22,10 @@
#include <linux/power_supply.h>
#include <linux/notifier.h>
#include <linux/usb/otg.h>
-#include <linux/regulator/machine.h>
+#include <linux/i2c/twl4030-madc.h>
+#define TWL4030_BCIMDEN 0x00
+#define TWL4030_BCIMDKEY 0x01
#define TWL4030_BCIMSTATEC 0x02
#define TWL4030_BCIICHG 0x08
#define TWL4030_BCIVAC 0x0a
@@ -32,11 +34,19 @@
#define TWL4030_BCIMFSTS4 0x10
#define TWL4030_BCICTL1 0x23
#define TWL4030_BB_CFG 0x12
+#define TWL4030_BCIIREF1 0x27
+#define TWL4030_BCIIREF2 0x28
+#define TWL4030_BCIMFKEY 0x11
+#define TWL4030_BCIMFEN3 0x14
+#define TWL4030_BCIMFTH8 0x1d
+#define TWL4030_BCIMFTH9 0x1e
+#define TWL4030_BCIWDKEY 0x21
#define TWL4030_BCIMFSTS1 0x01
#define TWL4030_BCIAUTOWEN BIT(5)
#define TWL4030_CONFIG_DONE BIT(4)
+#define TWL4030_CVENAC BIT(2)
#define TWL4030_BCIAUTOUSB BIT(1)
#define TWL4030_BCIAUTOAC BIT(0)
#define TWL4030_CGAIN BIT(5)
@@ -81,6 +91,21 @@
#define TWL4030_MSTATEC_COMPLETE1 0x0b
#define TWL4030_MSTATEC_COMPLETE4 0x0e
+#if IS_ENABLED(CONFIG_TWL4030_MADC)
+/*
+ * If AC (Accessory Charger) voltage exceeds 4.5V (MADC 11)
+ * then AC is available.
+ */
+static inline int ac_available(void)
+{
+ return twl4030_get_madc_conversion(11) > 4500;
+}
+#else
+static inline int ac_available(void)
+{
+ return 0;
+}
+#endif
static bool allow_usb;
module_param(allow_usb, bool, 0644);
MODULE_PARM_DESC(allow_usb, "Allow USB charge drawing default current");
@@ -94,12 +119,39 @@ struct twl4030_bci {
struct work_struct work;
int irq_chg;
int irq_bci;
- struct regulator *usb_reg;
int usb_enabled;
+ /*
+ * ichg_* and *_cur values in uA. If any are 'large', we set
+ * CGAIN to '1' which doubles the range for half the
+ * precision.
+ */
+ unsigned int ichg_eoc, ichg_lo, ichg_hi;
+ unsigned int usb_cur, ac_cur;
+ bool ac_is_active;
+ int usb_mode, ac_mode; /* charging mode requested */
+#define CHARGE_OFF 0
+#define CHARGE_AUTO 1
+#define CHARGE_LINEAR 2
+
+ /* When setting the USB current we slowly increase the
+ * requested current until target is reached or the voltage
+ * drops below 4.75V. In the latter case we step back one
+ * step.
+ */
+ unsigned int usb_cur_target;
+ struct delayed_work current_worker;
+#define USB_CUR_STEP 20000 /* 20mA at a time */
+#define USB_MIN_VOLT 4750000 /* 4.75V */
+#define USB_CUR_DELAY msecs_to_jiffies(100)
+#define USB_MAX_CURRENT 1700000 /* TWL4030 caps at 1.7A */
+
unsigned long event;
};
+/* strings for 'usb_mode' values */
+static char *modes[] = { "off", "auto", "continuous" };
+
/*
* clear and set bits on an given register on a given module
*/
@@ -180,27 +232,233 @@ static int twl4030_is_battery_present(struct twl4030_bci *bci)
}
/*
- * Check if VBUS power is present
+ * TI provided formulas:
+ * CGAIN == 0: ICHG = (BCIICHG * 1.7) / (2^10 - 1) - 0.85
+ * CGAIN == 1: ICHG = (BCIICHG * 3.4) / (2^10 - 1) - 1.7
+ * Here we use integer approximation of:
+ * CGAIN == 0: val * 1.6618 - 0.85 * 1000
+ * CGAIN == 1: (val * 1.6618 - 0.85 * 1000) * 2
+ */
+/*
+ * convert twl register value for currents into uA
+ */
+static int regval2ua(int regval, bool cgain)
+{
+ if (cgain)
+ return (regval * 16618 - 8500 * 1000) / 5;
+ else
+ return (regval * 16618 - 8500 * 1000) / 10;
+}
+
+/*
+ * convert uA currents into twl register value
*/
-static int twl4030_bci_have_vbus(struct twl4030_bci *bci)
+static int ua2regval(int ua, bool cgain)
{
int ret;
- u8 hwsts;
+ if (cgain)
+ ua /= 2;
+ ret = (ua * 10 + 8500 * 1000) / 16618;
+ /* rounding problems */
+ if (ret < 512)
+ ret = 512;
+ return ret;
+}
- ret = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &hwsts,
- TWL4030_PM_MASTER_STS_HW_CONDITIONS);
- if (ret < 0)
- return 0;
+static int twl4030_charger_update_current(struct twl4030_bci *bci)
+{
+ int status;
+ int cur;
+ unsigned reg, cur_reg;
+ u8 bcictl1, oldreg, fullreg;
+ bool cgain = false;
+ u8 boot_bci;
- dev_dbg(bci->dev, "check_vbus: HW_CONDITIONS %02x\n", hwsts);
+ /*
+ * If AC (Accessory Charger) voltage exceeds 4.5V (MADC 11)
+ * and AC is enabled, set current for 'ac'
+ */
+ if (ac_available()) {
+ cur = bci->ac_cur;
+ bci->ac_is_active = true;
+ } else {
+ cur = bci->usb_cur;
+ bci->ac_is_active = false;
+ if (cur > bci->usb_cur_target) {
+ cur = bci->usb_cur_target;
+ bci->usb_cur = cur;
+ }
+ if (cur < bci->usb_cur_target)
+ schedule_delayed_work(&bci->current_worker, USB_CUR_DELAY);
+ }
+
+ /* First, check thresholds and see if cgain is needed */
+ if (bci->ichg_eoc >= 200000)
+ cgain = true;
+ if (bci->ichg_lo >= 400000)
+ cgain = true;
+ if (bci->ichg_hi >= 820000)
+ cgain = true;
+ if (cur > 852000)
+ cgain = true;
+
+ status = twl4030_bci_read(TWL4030_BCICTL1, &bcictl1);
+ if (status < 0)
+ return status;
+ if (twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &boot_bci,
+ TWL4030_PM_MASTER_BOOT_BCI) < 0)
+ boot_bci = 0;
+ boot_bci &= 7;
+
+ if ((!!cgain) != !!(bcictl1 & TWL4030_CGAIN))
+ /* Need to turn for charging while we change the
+ * CGAIN bit. Leave it off while everything is
+ * updated.
+ */
+ twl4030_clear_set_boot_bci(boot_bci, 0);
+
+ /*
+ * For ichg_eoc, the hardware only supports reg values matching
+ * 100XXXX000, and requires the XXXX be stored in the high nibble
+ * of TWL4030_BCIMFTH8.
+ */
+ reg = ua2regval(bci->ichg_eoc, cgain);
+ if (reg > 0x278)
+ reg = 0x278;
+ if (reg < 0x200)
+ reg = 0x200;
+ reg = (reg >> 3) & 0xf;
+ fullreg = reg << 4;
+
+ /*
+ * For ichg_lo, reg value must match 10XXXX0000.
+ * XXXX is stored in low nibble of TWL4030_BCIMFTH8.
+ */
+ reg = ua2regval(bci->ichg_lo, cgain);
+ if (reg > 0x2F0)
+ reg = 0x2F0;
+ if (reg < 0x200)
+ reg = 0x200;
+ reg = (reg >> 4) & 0xf;
+ fullreg |= reg;
+
+ /* ichg_eoc and ichg_lo live in same register */
+ status = twl4030_bci_read(TWL4030_BCIMFTH8, &oldreg);
+ if (status < 0)
+ return status;
+ if (oldreg != fullreg) {
+ status = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0xF4,
+ TWL4030_BCIMFKEY);
+ if (status < 0)
+ return status;
+ twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
+ fullreg, TWL4030_BCIMFTH8);
+ }
- /* in case we also have STS_USB_ID, VBUS is driven by TWL itself */
- if ((hwsts & TWL4030_STS_VBUS) && !(hwsts & TWL4030_STS_USB_ID))
- return 1;
+ /* ichg_hi threshold must be 1XXXX01100 (I think) */
+ reg = ua2regval(bci->ichg_hi, cgain);
+ if (reg > 0x3E0)
+ reg = 0x3E0;
+ if (reg < 0x200)
+ reg = 0x200;
+ fullreg = (reg >> 5) & 0xF;
+ fullreg <<= 4;
+ status = twl4030_bci_read(TWL4030_BCIMFTH9, &oldreg);
+ if (status < 0)
+ return status;
+ if ((oldreg & 0xF0) != fullreg) {
+ fullreg |= (oldreg & 0x0F);
+ status = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0xE7,
+ TWL4030_BCIMFKEY);
+ if (status < 0)
+ return status;
+ twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
+ fullreg, TWL4030_BCIMFTH9);
+ }
+ /*
+ * And finally, set the current. This is stored in
+ * two registers.
+ */
+ reg = ua2regval(cur, cgain);
+ /* we have only 10 bits */
+ if (reg > 0x3ff)
+ reg = 0x3ff;
+ status = twl4030_bci_read(TWL4030_BCIIREF1, &oldreg);
+ if (status < 0)
+ return status;
+ cur_reg = oldreg;
+ status = twl4030_bci_read(TWL4030_BCIIREF2, &oldreg);
+ if (status < 0)
+ return status;
+ cur_reg |= oldreg << 8;
+ if (reg != oldreg) {
+ /* disable write protection for one write access for
+ * BCIIREF */
+ status = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0xE7,
+ TWL4030_BCIMFKEY);
+ if (status < 0)
+ return status;
+ status = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
+ (reg & 0x100) ? 3 : 2,
+ TWL4030_BCIIREF2);
+ if (status < 0)
+ return status;
+ /* disable write protection for one write access for
+ * BCIIREF */
+ status = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0xE7,
+ TWL4030_BCIMFKEY);
+ if (status < 0)
+ return status;
+ status = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
+ reg & 0xff,
+ TWL4030_BCIIREF1);
+ }
+ if ((!!cgain) != !!(bcictl1 & TWL4030_CGAIN)) {
+ /* Flip CGAIN and re-enable charging */
+ bcictl1 ^= TWL4030_CGAIN;
+ twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
+ bcictl1, TWL4030_BCICTL1);
+ twl4030_clear_set_boot_bci(0, boot_bci);
+ }
return 0;
}
+static int twl4030_charger_get_current(void);
+
+static void twl4030_current_worker(struct work_struct *data)
+{
+ int v, curr;
+ int res;
+ struct twl4030_bci *bci = container_of(data, struct twl4030_bci,
+ current_worker.work);
+
+ res = twl4030bci_read_adc_val(TWL4030_BCIVBUS);
+ if (res < 0)
+ v = 0;
+ else
+ /* BCIVBUS uses ADCIN8, 7/1023 V/step */
+ v = res * 6843;
+ curr = twl4030_charger_get_current();
+
+ dev_dbg(bci->dev, "v=%d cur=%d limit=%d target=%d\n", v, curr,
+ bci->usb_cur, bci->usb_cur_target);
+
+ if (v < USB_MIN_VOLT) {
+ /* Back up and stop adjusting. */
+ bci->usb_cur -= USB_CUR_STEP;
+ bci->usb_cur_target = bci->usb_cur;
+ } else if (bci->usb_cur >= bci->usb_cur_target ||
+ bci->usb_cur + USB_CUR_STEP > USB_MAX_CURRENT) {
+ /* Reached target and voltage is OK - stop */
+ return;
+ } else {
+ bci->usb_cur += USB_CUR_STEP;
+ schedule_delayed_work(&bci->current_worker, USB_CUR_DELAY);
+ }
+ twl4030_charger_update_current(bci);
+}
+
/*
* Enable/Disable USB Charge functionality.
*/
@@ -208,45 +466,60 @@ static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
{
int ret;
- if (enable) {
- /* Check for USB charger connected */
- if (!twl4030_bci_have_vbus(bci))
- return -ENODEV;
+ if (bci->usb_mode == CHARGE_OFF)
+ enable = false;
+ if (enable && !IS_ERR_OR_NULL(bci->transceiver)) {
- /*
- * Until we can find out what current the device can provide,
- * require a module param to enable USB charging.
- */
- if (!allow_usb) {
- dev_warn(bci->dev, "USB charging is disabled.\n");
- return -EACCES;
- }
+ twl4030_charger_update_current(bci);
- /* Need to keep regulator on */
+ /* Need to keep phy powered */
if (!bci->usb_enabled) {
- ret = regulator_enable(bci->usb_reg);
- if (ret) {
- dev_err(bci->dev,
- "Failed to enable regulator\n");
- return ret;
- }
+ pm_runtime_get_sync(bci->transceiver->dev);
bci->usb_enabled = 1;
}
- /* forcing the field BCIAUTOUSB (BOOT_BCI[1]) to 1 */
- ret = twl4030_clear_set_boot_bci(0, TWL4030_BCIAUTOUSB);
- if (ret < 0)
- return ret;
+ if (bci->usb_mode == CHARGE_AUTO)
+ /* forcing the field BCIAUTOUSB (BOOT_BCI[1]) to 1 */
+ ret = twl4030_clear_set_boot_bci(0, TWL4030_BCIAUTOUSB);
/* forcing USBFASTMCHG(BCIMFSTS4[2]) to 1 */
ret = twl4030_clear_set(TWL_MODULE_MAIN_CHARGE, 0,
TWL4030_USBFASTMCHG, TWL4030_BCIMFSTS4);
+ if (bci->usb_mode == CHARGE_LINEAR) {
+ twl4030_clear_set_boot_bci(TWL4030_BCIAUTOAC|TWL4030_CVENAC, 0);
+ /* Watch dog key: WOVF acknowledge */
+ ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0x33,
+ TWL4030_BCIWDKEY);
+ /* 0x24 + EKEY6: off mode */
+ ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0x2a,
+ TWL4030_BCIMDKEY);
+ /* EKEY2: Linear charge: USB path */
+ ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0x26,
+ TWL4030_BCIMDKEY);
+ /* WDKEY5: stop watchdog count */
+ ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0xf3,
+ TWL4030_BCIWDKEY);
+ /* enable MFEN3 access */
+ ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0x9c,
+ TWL4030_BCIMFKEY);
+ /* ICHGEOCEN - end-of-charge monitor (current < 80mA)
+ * (charging continues)
+ * ICHGLOWEN - current level monitor (charge continues)
+ * don't monitor over-current or heat save
+ */
+ ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0xf0,
+ TWL4030_BCIMFEN3);
+ }
} else {
ret = twl4030_clear_set_boot_bci(TWL4030_BCIAUTOUSB, 0);
+ ret |= twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0x2a,
+ TWL4030_BCIMDKEY);
if (bci->usb_enabled) {
- regulator_disable(bci->usb_reg);
+ pm_runtime_mark_last_busy(bci->transceiver->dev);
+ pm_runtime_put_autosuspend(bci->transceiver->dev);
bci->usb_enabled = 0;
}
+ bci->usb_cur = 0;
}
return ret;
@@ -255,10 +528,13 @@ static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
/*
* Enable/Disable AC Charge funtionality.
*/
-static int twl4030_charger_enable_ac(bool enable)
+static int twl4030_charger_enable_ac(struct twl4030_bci *bci, bool enable)
{
int ret;
+ if (bci->ac_mode == CHARGE_OFF)
+ enable = false;
+
if (enable)
ret = twl4030_clear_set_boot_bci(0, TWL4030_BCIAUTOAC);
else
@@ -318,6 +594,9 @@ static irqreturn_t twl4030_charger_interrupt(int irq, void *arg)
struct twl4030_bci *bci = arg;
dev_dbg(bci->dev, "CHG_PRES irq\n");
+ /* reset current on each 'plug' event */
+ bci->ac_cur = 500000;
+ twl4030_charger_update_current(bci);
power_supply_changed(bci->ac);
power_supply_changed(bci->usb);
@@ -350,6 +629,7 @@ static irqreturn_t twl4030_bci_interrupt(int irq, void *arg)
power_supply_changed(bci->ac);
power_supply_changed(bci->usb);
}
+ twl4030_charger_update_current(bci);
/* various monitoring events, for now we just log them here */
if (irqs1 & (TWL4030_TBATOR2 | TWL4030_TBATOR1))
@@ -370,6 +650,63 @@ static irqreturn_t twl4030_bci_interrupt(int irq, void *arg)
return IRQ_HANDLED;
}
+/*
+ * Provide "max_current" attribute in sysfs.
+ */
+static ssize_t
+twl4030_bci_max_current_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct twl4030_bci *bci = dev_get_drvdata(dev->parent);
+ int cur = 0;
+ int status = 0;
+ status = kstrtoint(buf, 10, &cur);
+ if (status)
+ return status;
+ if (cur < 0)
+ return -EINVAL;
+ if (dev == &bci->ac->dev)
+ bci->ac_cur = cur;
+ else
+ bci->usb_cur_target = cur;
+
+ twl4030_charger_update_current(bci);
+ return n;
+}
+
+/*
+ * sysfs max_current show
+ */
+static ssize_t twl4030_bci_max_current_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int status = 0;
+ int cur = -1;
+ u8 bcictl1;
+ struct twl4030_bci *bci = dev_get_drvdata(dev->parent);
+
+ if (dev == &bci->ac->dev) {
+ if (!bci->ac_is_active)
+ cur = bci->ac_cur;
+ } else {
+ if (bci->ac_is_active)
+ cur = bci->usb_cur_target;
+ }
+ if (cur < 0) {
+ cur = twl4030bci_read_adc_val(TWL4030_BCIIREF1);
+ if (cur < 0)
+ return cur;
+ status = twl4030_bci_read(TWL4030_BCICTL1, &bcictl1);
+ if (status < 0)
+ return status;
+ cur = regval2ua(cur, bcictl1 & TWL4030_CGAIN);
+ }
+ return scnprintf(buf, PAGE_SIZE, "%u\n", cur);
+}
+
+static DEVICE_ATTR(max_current, 0644, twl4030_bci_max_current_show,
+ twl4030_bci_max_current_store);
+
static void twl4030_bci_usb_work(struct work_struct *data)
{
struct twl4030_bci *bci = container_of(data, struct twl4030_bci, work);
@@ -392,6 +729,12 @@ static int twl4030_bci_usb_ncb(struct notifier_block *nb, unsigned long val,
dev_dbg(bci->dev, "OTG notify %lu\n", val);
+ /* reset current on each 'plug' event */
+ if (allow_usb)
+ bci->usb_cur_target = 500000;
+ else
+ bci->usb_cur_target = 100000;
+
bci->event = val;
schedule_work(&bci->work);
@@ -399,13 +742,66 @@ static int twl4030_bci_usb_ncb(struct notifier_block *nb, unsigned long val,
}
/*
- * TI provided formulas:
- * CGAIN == 0: ICHG = (BCIICHG * 1.7) / (2^10 - 1) - 0.85
- * CGAIN == 1: ICHG = (BCIICHG * 3.4) / (2^10 - 1) - 1.7
- * Here we use integer approximation of:
- * CGAIN == 0: val * 1.6618 - 0.85
- * CGAIN == 1: (val * 1.6618 - 0.85) * 2
+ * sysfs charger enabled store
+ */
+static ssize_t
+twl4030_bci_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct twl4030_bci *bci = dev_get_drvdata(dev->parent);
+ int mode;
+ int status;
+
+ if (sysfs_streq(buf, modes[0]))
+ mode = 0;
+ else if (sysfs_streq(buf, modes[1]))
+ mode = 1;
+ else if (sysfs_streq(buf, modes[2]))
+ mode = 2;
+ else
+ return -EINVAL;
+ if (dev == &bci->ac->dev) {
+ if (mode == 2)
+ return -EINVAL;
+ twl4030_charger_enable_ac(bci, false);
+ bci->ac_mode = mode;
+ status = twl4030_charger_enable_ac(bci, true);
+ } else {
+ twl4030_charger_enable_usb(bci, false);
+ bci->usb_mode = mode;
+ status = twl4030_charger_enable_usb(bci, true);
+ }
+ return (status == 0) ? n : status;
+}
+
+/*
+ * sysfs charger enabled show
*/
+static ssize_t
+twl4030_bci_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct twl4030_bci *bci = dev_get_drvdata(dev->parent);
+ int len = 0;
+ int i;
+ int mode = bci->usb_mode;
+
+ if (dev == &bci->ac->dev)
+ mode = bci->ac_mode;
+
+ for (i = 0; i < ARRAY_SIZE(modes); i++)
+ if (mode == i)
+ len += snprintf(buf+len, PAGE_SIZE-len,
+ "[%s] ", modes[i]);
+ else
+ len += snprintf(buf+len, PAGE_SIZE-len,
+ "%s ", modes[i]);
+ buf[len-1] = '\n';
+ return len;
+}
+static DEVICE_ATTR(mode, 0644, twl4030_bci_mode_show,
+ twl4030_bci_mode_store);
+
static int twl4030_charger_get_current(void)
{
int curr;
@@ -420,11 +816,7 @@ static int twl4030_charger_get_current(void)
if (ret)
return ret;
- ret = (curr * 16618 - 850 * 10000) / 10;
- if (bcictl1 & TWL4030_CGAIN)
- ret *= 2;
-
- return ret;
+ return regval2ua(curr, bcictl1 & TWL4030_CGAIN);
}
/*
@@ -476,6 +868,17 @@ static int twl4030_bci_get_property(struct power_supply *psy,
is_charging = state & TWL4030_MSTATEC_USB;
else
is_charging = state & TWL4030_MSTATEC_AC;
+ if (!is_charging) {
+ u8 s;
+ twl4030_bci_read(TWL4030_BCIMDEN, &s);
+ if (psy->desc->type == POWER_SUPPLY_TYPE_USB)
+ is_charging = s & 1;
+ else
+ is_charging = s & 2;
+ if (is_charging)
+ /* A little white lie */
+ state = TWL4030_MSTATEC_QUICK1;
+ }
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
@@ -574,20 +977,31 @@ static const struct power_supply_desc twl4030_bci_usb_desc = {
.get_property = twl4030_bci_get_property,
};
-static int __init twl4030_bci_probe(struct platform_device *pdev)
+static int twl4030_bci_probe(struct platform_device *pdev)
{
struct twl4030_bci *bci;
const struct twl4030_bci_platform_data *pdata = pdev->dev.platform_data;
int ret;
u32 reg;
- bci = kzalloc(sizeof(*bci), GFP_KERNEL);
+ bci = devm_kzalloc(&pdev->dev, sizeof(*bci), GFP_KERNEL);
if (bci == NULL)
return -ENOMEM;
if (!pdata)
pdata = twl4030_bci_parse_dt(&pdev->dev);
+ bci->ichg_eoc = 80100; /* Stop charging when current drops to here */
+ bci->ichg_lo = 241000; /* Low threshold */
+ bci->ichg_hi = 500000; /* High threshold */
+ bci->ac_cur = 500000; /* 500mA */
+ if (allow_usb)
+ bci->usb_cur_target = 500000; /* 500mA */
+ else
+ bci->usb_cur_target = 100000; /* 100mA */
+ bci->usb_mode = CHARGE_AUTO;
+ bci->ac_mode = CHARGE_AUTO;
+
bci->dev = &pdev->dev;
bci->irq_chg = platform_get_irq(pdev, 0);
bci->irq_bci = platform_get_irq(pdev, 1);
@@ -596,47 +1010,46 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
ret = twl4030_is_battery_present(bci);
if (ret) {
dev_crit(&pdev->dev, "Battery was not detected:%d\n", ret);
- goto fail_no_battery;
+ return ret;
}
platform_set_drvdata(pdev, bci);
- bci->ac = power_supply_register(&pdev->dev, &twl4030_bci_ac_desc,
- NULL);
+ bci->ac = devm_power_supply_register(&pdev->dev, &twl4030_bci_ac_desc,
+ NULL);
if (IS_ERR(bci->ac)) {
ret = PTR_ERR(bci->ac);
dev_err(&pdev->dev, "failed to register ac: %d\n", ret);
- goto fail_register_ac;
+ return ret;
}
- bci->usb_reg = regulator_get(bci->dev, "bci3v1");
-
- bci->usb = power_supply_register(&pdev->dev, &twl4030_bci_usb_desc,
- NULL);
+ bci->usb = devm_power_supply_register(&pdev->dev, &twl4030_bci_usb_desc,
+ NULL);
if (IS_ERR(bci->usb)) {
ret = PTR_ERR(bci->usb);
dev_err(&pdev->dev, "failed to register usb: %d\n", ret);
- goto fail_register_usb;
+ return ret;
}
- ret = request_threaded_irq(bci->irq_chg, NULL,
+ ret = devm_request_threaded_irq(&pdev->dev, bci->irq_chg, NULL,
twl4030_charger_interrupt, IRQF_ONESHOT, pdev->name,
bci);
if (ret < 0) {
dev_err(&pdev->dev, "could not request irq %d, status %d\n",
bci->irq_chg, ret);
- goto fail_chg_irq;
+ return ret;
}
- ret = request_threaded_irq(bci->irq_bci, NULL,
+ ret = devm_request_threaded_irq(&pdev->dev, bci->irq_bci, NULL,
twl4030_bci_interrupt, IRQF_ONESHOT, pdev->name, bci);
if (ret < 0) {
dev_err(&pdev->dev, "could not request irq %d, status %d\n",
bci->irq_bci, ret);
- goto fail_bci_irq;
+ return ret;
}
INIT_WORK(&bci->work, twl4030_bci_usb_work);
+ INIT_DELAYED_WORK(&bci->current_worker, twl4030_current_worker);
bci->usb_nb.notifier_call = twl4030_bci_usb_ncb;
if (bci->dev->of_node) {
@@ -644,9 +1057,13 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
phynode = of_find_compatible_node(bci->dev->of_node->parent,
NULL, "ti,twl4030-usb");
- if (phynode)
+ if (phynode) {
bci->transceiver = devm_usb_get_phy_by_node(
bci->dev, phynode, &bci->usb_nb);
+ if (IS_ERR(bci->transceiver) &&
+ PTR_ERR(bci->transceiver) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ }
}
/* Enable interrupts now. */
@@ -656,7 +1073,7 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
TWL4030_INTERRUPTS_BCIIMR1A);
if (ret < 0) {
dev_err(&pdev->dev, "failed to unmask interrupts: %d\n", ret);
- goto fail_unmask_interrupts;
+ return ret;
}
reg = ~(u32)(TWL4030_VBATOV | TWL4030_VBUSOV | TWL4030_ACCHGOV);
@@ -665,8 +1082,23 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
if (ret < 0)
dev_warn(&pdev->dev, "failed to unmask interrupts: %d\n", ret);
- twl4030_charger_enable_ac(true);
- twl4030_charger_enable_usb(bci, true);
+ twl4030_charger_update_current(bci);
+ if (device_create_file(&bci->usb->dev, &dev_attr_max_current))
+ dev_warn(&pdev->dev, "could not create sysfs file\n");
+ if (device_create_file(&bci->usb->dev, &dev_attr_mode))
+ dev_warn(&pdev->dev, "could not create sysfs file\n");
+ if (device_create_file(&bci->ac->dev, &dev_attr_mode))
+ dev_warn(&pdev->dev, "could not create sysfs file\n");
+ if (device_create_file(&bci->ac->dev, &dev_attr_max_current))
+ dev_warn(&pdev->dev, "could not create sysfs file\n");
+
+ twl4030_charger_enable_ac(bci, true);
+ if (!IS_ERR_OR_NULL(bci->transceiver))
+ twl4030_bci_usb_ncb(&bci->usb_nb,
+ bci->transceiver->last_event,
+ NULL);
+ else
+ twl4030_charger_enable_usb(bci, false);
if (pdata)
twl4030_charger_enable_backup(pdata->bb_uvolt,
pdata->bb_uamp);
@@ -674,42 +1106,26 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
twl4030_charger_enable_backup(0, 0);
return 0;
-
-fail_unmask_interrupts:
- free_irq(bci->irq_bci, bci);
-fail_bci_irq:
- free_irq(bci->irq_chg, bci);
-fail_chg_irq:
- power_supply_unregister(bci->usb);
-fail_register_usb:
- power_supply_unregister(bci->ac);
-fail_register_ac:
-fail_no_battery:
- kfree(bci);
-
- return ret;
}
static int __exit twl4030_bci_remove(struct platform_device *pdev)
{
struct twl4030_bci *bci = platform_get_drvdata(pdev);
- twl4030_charger_enable_ac(false);
+ twl4030_charger_enable_ac(bci, false);
twl4030_charger_enable_usb(bci, false);
twl4030_charger_enable_backup(0, 0);
+ device_remove_file(&bci->usb->dev, &dev_attr_max_current);
+ device_remove_file(&bci->usb->dev, &dev_attr_mode);
+ device_remove_file(&bci->ac->dev, &dev_attr_max_current);
+ device_remove_file(&bci->ac->dev, &dev_attr_mode);
/* mask interrupts */
twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, 0xff,
TWL4030_INTERRUPTS_BCIIMR1A);
twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, 0xff,
TWL4030_INTERRUPTS_BCIIMR2A);
- free_irq(bci->irq_bci, bci);
- free_irq(bci->irq_chg, bci);
- power_supply_unregister(bci->usb);
- power_supply_unregister(bci->ac);
- kfree(bci);
-
return 0;
}
@@ -720,14 +1136,14 @@ static const struct of_device_id twl_bci_of_match[] = {
MODULE_DEVICE_TABLE(of, twl_bci_of_match);
static struct platform_driver twl4030_bci_driver = {
+ .probe = twl4030_bci_probe,
.driver = {
.name = "twl4030_bci",
.of_match_table = of_match_ptr(twl_bci_of_match),
},
.remove = __exit_p(twl4030_bci_remove),
};
-
-module_platform_driver_probe(twl4030_bci_driver, twl4030_bci_probe);
+module_platform_driver(twl4030_bci_driver);
MODULE_AUTHOR("Gražvydas Ignotas");
MODULE_DESCRIPTION("TWL4030 Battery Charger Interface driver");
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 482b22ddc7b2..5efacd050c7d 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1096,11 +1096,13 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */
RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
+ RAPL_CPU(0x47, rapl_defaults_core),/* Broadwell-H */
RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */
RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */
RAPL_CPU(0x4A, rapl_defaults_tng),/* Tangier */
RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */
RAPL_CPU(0x5A, rapl_defaults_ann),/* Annidale */
+ RAPL_CPU(0x5E, rapl_defaults_core),/* Skylake-H/S */
RAPL_CPU(0x57, rapl_defaults_hsw_server),/* Knights Landing */
{}
};
@@ -1145,9 +1147,11 @@ static int rapl_unregister_powercap(void)
pr_debug("remove package, undo power limit on %d: %s\n",
rp->id, rd->name);
rapl_write_data_raw(rd, PL1_ENABLE, 0);
- rapl_write_data_raw(rd, PL2_ENABLE, 0);
rapl_write_data_raw(rd, PL1_CLAMP, 0);
- rapl_write_data_raw(rd, PL2_CLAMP, 0);
+ if (find_nr_power_limit(rd) > 1) {
+ rapl_write_data_raw(rd, PL2_ENABLE, 0);
+ rapl_write_data_raw(rd, PL2_CLAMP, 0);
+ }
if (rd->id == RAPL_DOMAIN_PACKAGE) {
rd_package = rd;
continue;
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index b1541f40fd8d..062630ab7424 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -111,6 +111,13 @@ config PWM_CLPS711X
To compile this driver as a module, choose M here: the module
will be called pwm-clps711x.
+config PWM_CRC
+ bool "Intel Crystalcove (CRC) PWM support"
+ depends on X86 && INTEL_SOC_PMIC
+ help
+ Generic PWM framework driver for Crystalcove (CRC) PMIC based PWM
+ control.
+
config PWM_EP93XX
tristate "Cirrus Logic EP93xx PWM support"
depends on ARCH_EP93XX
@@ -173,6 +180,18 @@ config PWM_LP3943
To compile this driver as a module, choose M here: the module
will be called pwm-lp3943.
+config PWM_LPC18XX_SCT
+ tristate "LPC18xx/43xx PWM/SCT support"
+ depends on ARCH_LPC18XX
+ help
+ Generic PWM framework driver for NXP LPC18xx PWM/SCT which
+ supports 16 channels.
+ A maximum of 15 channels can be requested simultaneously and
+ must have the same period.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-lpc18xx-sct.
+
config PWM_LPC32XX
tristate "LPC32XX PWM support"
depends on ARCH_LPC32XX
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index ec50eb5b5a8f..a0e00c09ead3 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -8,12 +8,14 @@ obj-$(CONFIG_PWM_BCM_KONA) += pwm-bcm-kona.o
obj-$(CONFIG_PWM_BCM2835) += pwm-bcm2835.o
obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o
obj-$(CONFIG_PWM_CLPS711X) += pwm-clps711x.o
+obj-$(CONFIG_PWM_CRC) += pwm-crc.o
obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o
obj-$(CONFIG_PWM_IMG) += pwm-img.o
obj-$(CONFIG_PWM_IMX) += pwm-imx.o
obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o
+obj-$(CONFIG_PWM_LPC18XX_SCT) += pwm-lpc18xx-sct.o
obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o
obj-$(CONFIG_PWM_LPSS) += pwm-lpss.o
obj-$(CONFIG_PWM_LPSS_PCI) += pwm-lpss-pci.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 3a7769fe53de..3f9df3ea3350 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -200,6 +200,8 @@ static void of_pwmchip_remove(struct pwm_chip *chip)
* pwm_set_chip_data() - set private chip data for a PWM
* @pwm: PWM device
* @data: pointer to chip-specific data
+ *
+ * Returns: 0 on success or a negative error code on failure.
*/
int pwm_set_chip_data(struct pwm_device *pwm, void *data)
{
@@ -215,6 +217,8 @@ EXPORT_SYMBOL_GPL(pwm_set_chip_data);
/**
* pwm_get_chip_data() - get private chip data for a PWM
* @pwm: PWM device
+ *
+ * Returns: A pointer to the chip-private data for the PWM device.
*/
void *pwm_get_chip_data(struct pwm_device *pwm)
{
@@ -230,6 +234,8 @@ EXPORT_SYMBOL_GPL(pwm_get_chip_data);
* Register a new PWM chip. If chip->base < 0 then a dynamically assigned base
* will be used. The initial polarity for all channels is specified by the
* @polarity parameter.
+ *
+ * Returns: 0 on success or a negative error code on failure.
*/
int pwmchip_add_with_polarity(struct pwm_chip *chip,
enum pwm_polarity polarity)
@@ -291,6 +297,8 @@ EXPORT_SYMBOL_GPL(pwmchip_add_with_polarity);
*
* Register a new PWM chip. If chip->base < 0 then a dynamically assigned base
* will be used. The initial polarity for all channels is normal.
+ *
+ * Returns: 0 on success or a negative error code on failure.
*/
int pwmchip_add(struct pwm_chip *chip)
{
@@ -304,6 +312,8 @@ EXPORT_SYMBOL_GPL(pwmchip_add);
*
* Removes a PWM chip. This function may return busy if the PWM chip provides
* a PWM device that is still requested.
+ *
+ * Returns: 0 on success or a negative error code on failure.
*/
int pwmchip_remove(struct pwm_chip *chip)
{
@@ -338,10 +348,13 @@ EXPORT_SYMBOL_GPL(pwmchip_remove);
/**
* pwm_request() - request a PWM device
- * @pwm_id: global PWM device index
+ * @pwm: global PWM device index
* @label: PWM device label
*
* This function is deprecated, use pwm_get() instead.
+ *
+ * Returns: A pointer to a PWM device or an ERR_PTR()-encoded error code on
+ * failure.
*/
struct pwm_device *pwm_request(int pwm, const char *label)
{
@@ -376,9 +389,9 @@ EXPORT_SYMBOL_GPL(pwm_request);
* @index: per-chip index of the PWM to request
* @label: a literal description string of this PWM
*
- * Returns the PWM at the given index of the given PWM chip. A negative error
- * code is returned if the index is not valid for the specified PWM chip or
- * if the PWM device cannot be requested.
+ * Returns: A pointer to the PWM device at the given index of the given PWM
+ * chip. A negative error code is returned if the index is not valid for the
+ * specified PWM chip or if the PWM device cannot be requested.
*/
struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
unsigned int index,
@@ -419,6 +432,8 @@ EXPORT_SYMBOL_GPL(pwm_free);
* @pwm: PWM device
* @duty_ns: "on" time (in nanoseconds)
* @period_ns: duration (in nanoseconds) of one cycle
+ *
+ * Returns: 0 on success or a negative error code on failure.
*/
int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
{
@@ -443,7 +458,10 @@ EXPORT_SYMBOL_GPL(pwm_config);
* @pwm: PWM device
* @polarity: new polarity of the PWM signal
*
- * Note that the polarity cannot be configured while the PWM device is enabled
+ * Note that the polarity cannot be configured while the PWM device is
+ * enabled.
+ *
+ * Returns: 0 on success or a negative error code on failure.
*/
int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity)
{
@@ -455,7 +473,7 @@ int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity)
if (!pwm->chip->ops->set_polarity)
return -ENOSYS;
- if (test_bit(PWMF_ENABLED, &pwm->flags))
+ if (pwm_is_enabled(pwm))
return -EBUSY;
err = pwm->chip->ops->set_polarity(pwm->chip, pwm, polarity);
@@ -471,6 +489,8 @@ EXPORT_SYMBOL_GPL(pwm_set_polarity);
/**
* pwm_enable() - start a PWM output toggling
* @pwm: PWM device
+ *
+ * Returns: 0 on success or a negative error code on failure.
*/
int pwm_enable(struct pwm_device *pwm)
{
@@ -524,6 +544,9 @@ static struct pwm_chip *of_node_to_pwmchip(struct device_node *np)
* lookup of the PWM index. This also means that the "pwm-names" property
* becomes mandatory for devices that look up the PWM device via the con_id
* parameter.
+ *
+ * Returns: A pointer to the requested PWM device or an ERR_PTR()-encoded
+ * error code on failure.
*/
struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id)
{
@@ -630,6 +653,9 @@ void pwm_remove_table(struct pwm_lookup *table, size_t num)
*
* Once a PWM chip has been found the specified PWM device will be requested
* and is ready to be used.
+ *
+ * Returns: A pointer to the requested PWM device or an ERR_PTR()-encoded
+ * error code on failure.
*/
struct pwm_device *pwm_get(struct device *dev, const char *con_id)
{
@@ -752,6 +778,9 @@ static void devm_pwm_release(struct device *dev, void *res)
*
* This function performs like pwm_get() but the acquired PWM device will
* automatically be released on driver detach.
+ *
+ * Returns: A pointer to the requested PWM device or an ERR_PTR()-encoded
+ * error code on failure.
*/
struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id)
{
@@ -781,6 +810,9 @@ EXPORT_SYMBOL_GPL(devm_pwm_get);
*
* This function performs like of_pwm_get() but the acquired PWM device will
* automatically be released on driver detach.
+ *
+ * Returns: A pointer to the requested PWM device or an ERR_PTR()-encoded
+ * error code on failure.
*/
struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
const char *con_id)
@@ -832,7 +864,7 @@ EXPORT_SYMBOL_GPL(devm_pwm_put);
* pwm_can_sleep() - report whether PWM access will sleep
* @pwm: PWM device
*
- * It returns true if accessing the PWM can sleep, false otherwise.
+ * Returns: True if accessing the PWM can sleep, false otherwise.
*/
bool pwm_can_sleep(struct pwm_device *pwm)
{
@@ -853,7 +885,7 @@ static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s)
if (test_bit(PWMF_REQUESTED, &pwm->flags))
seq_puts(s, " requested");
- if (test_bit(PWMF_ENABLED, &pwm->flags))
+ if (pwm_is_enabled(pwm))
seq_puts(s, " enabled");
seq_puts(s, "\n");
@@ -924,6 +956,5 @@ static int __init pwm_debugfs_init(void)
return 0;
}
-
subsys_initcall(pwm_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index fa5feaba25a5..5df1db40fc07 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -218,6 +218,11 @@ static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_sama5d3_errata = {
static const struct of_device_id atmel_hlcdc_dt_ids[] = {
{
+ .compatible = "atmel,at91sam9n12-hlcdc",
+ /* 9n12 has same errata as 9x5 HLCDC PWM */
+ .data = &atmel_hlcdc_pwm_at91sam9x5_errata,
+ },
+ {
.compatible = "atmel,at91sam9x5-hlcdc",
.data = &atmel_hlcdc_pwm_at91sam9x5_errata,
},
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index d14e0677c92d..6da01b3bf6f4 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -347,7 +347,7 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
tcbpwm->duty = duty;
/* If the PWM is enabled, call enable to apply the new conf */
- if (test_bit(PWMF_ENABLED, &pwm->flags))
+ if (pwm_is_enabled(pwm))
atmel_tcb_pwm_enable(chip, pwm);
return 0;
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index a947c9095d9d..0e4bd4e8e582 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -114,7 +114,7 @@ static int atmel_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
u32 val;
int ret;
- if (test_bit(PWMF_ENABLED, &pwm->flags) && (period_ns != pwm->period)) {
+ if (pwm_is_enabled(pwm) && (period_ns != pwm_get_period(pwm))) {
dev_err(chip->dev, "cannot change PWM period while enabled\n");
return -EBUSY;
}
@@ -176,7 +176,7 @@ static void atmel_pwm_config_v1(struct pwm_chip *chip, struct pwm_device *pwm,
* If the PWM channel is enabled, only update CDTY by using the update
* register, it needs to set bit 10 of CMR to 0
*/
- if (test_bit(PWMF_ENABLED, &pwm->flags))
+ if (pwm_is_enabled(pwm))
return;
/*
* If the PWM channel is disabled, write value to duty and period
@@ -191,7 +191,7 @@ static void atmel_pwm_config_v2(struct pwm_chip *chip, struct pwm_device *pwm,
{
struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
- if (test_bit(PWMF_ENABLED, &pwm->flags)) {
+ if (pwm_is_enabled(pwm)) {
/*
* If the PWM channel is enabled, using the duty update register
* to update the value.
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c
index 7af8fea2dc5b..c63418322023 100644
--- a/drivers/pwm/pwm-bcm-kona.c
+++ b/drivers/pwm/pwm-bcm-kona.c
@@ -76,19 +76,36 @@ static inline struct kona_pwmc *to_kona_pwmc(struct pwm_chip *_chip)
return container_of(_chip, struct kona_pwmc, chip);
}
-static void kona_pwmc_apply_settings(struct kona_pwmc *kp, unsigned int chan)
+/*
+ * Clear trigger bit but set smooth bit to maintain old output.
+ */
+static void kona_pwmc_prepare_for_settings(struct kona_pwmc *kp,
+ unsigned int chan)
{
unsigned int value = readl(kp->base + PWM_CONTROL_OFFSET);
- /* Clear trigger bit but set smooth bit to maintain old output */
value |= 1 << PWM_CONTROL_SMOOTH_SHIFT(chan);
value &= ~(1 << PWM_CONTROL_TRIGGER_SHIFT(chan));
writel(value, kp->base + PWM_CONTROL_OFFSET);
+ /*
+ * There must be a min 400ns delay between clearing trigger and setting
+ * it. Failing to do this may result in no PWM signal.
+ */
+ ndelay(400);
+}
+
+static void kona_pwmc_apply_settings(struct kona_pwmc *kp, unsigned int chan)
+{
+ unsigned int value = readl(kp->base + PWM_CONTROL_OFFSET);
+
/* Set trigger bit and clear smooth bit to apply new settings */
value &= ~(1 << PWM_CONTROL_SMOOTH_SHIFT(chan));
value |= 1 << PWM_CONTROL_TRIGGER_SHIFT(chan);
writel(value, kp->base + PWM_CONTROL_OFFSET);
+
+ /* Trigger bit must be held high for at least 400 ns. */
+ ndelay(400);
}
static int kona_pwmc_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -133,8 +150,14 @@ static int kona_pwmc_config(struct pwm_chip *chip, struct pwm_device *pwm,
return -EINVAL;
}
- /* If the PWM channel is enabled, write the settings to the HW */
- if (test_bit(PWMF_ENABLED, &pwm->flags)) {
+ /*
+ * Don't apply settings if disabled. The period and duty cycle are
+ * always calculated above to ensure the new values are
+ * validated immediately instead of on enable.
+ */
+ if (pwm_is_enabled(pwm)) {
+ kona_pwmc_prepare_for_settings(kp, chan);
+
value = readl(kp->base + PRESCALE_OFFSET);
value &= ~PRESCALE_MASK(chan);
value |= prescale << PRESCALE_SHIFT(chan);
@@ -164,6 +187,8 @@ static int kona_pwmc_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
}
+ kona_pwmc_prepare_for_settings(kp, chan);
+
value = readl(kp->base + PWM_CONTROL_OFFSET);
if (polarity == PWM_POLARITY_NORMAL)
@@ -175,9 +200,6 @@ static int kona_pwmc_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
kona_pwmc_apply_settings(kp, chan);
- /* Wait for waveform to settle before gating off the clock */
- ndelay(400);
-
clk_disable_unprepare(kp->clk);
return 0;
@@ -194,7 +216,8 @@ static int kona_pwmc_enable(struct pwm_chip *chip, struct pwm_device *pwm)
return ret;
}
- ret = kona_pwmc_config(chip, pwm, pwm->duty_cycle, pwm->period);
+ ret = kona_pwmc_config(chip, pwm, pwm_get_duty_cycle(pwm),
+ pwm_get_period(pwm));
if (ret < 0) {
clk_disable_unprepare(kp->clk);
return ret;
@@ -207,13 +230,20 @@ static void kona_pwmc_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct kona_pwmc *kp = to_kona_pwmc(chip);
unsigned int chan = pwm->hwpwm;
+ unsigned int value;
+
+ kona_pwmc_prepare_for_settings(kp, chan);
/* Simulate a disable by configuring for zero duty */
writel(0, kp->base + DUTY_CYCLE_HIGH_OFFSET(chan));
- kona_pwmc_apply_settings(kp, chan);
+ writel(0, kp->base + PERIOD_COUNT_OFFSET(chan));
- /* Wait for waveform to settle before gating off the clock */
- ndelay(400);
+ /* Set prescale to 0 for this channel */
+ value = readl(kp->base + PRESCALE_OFFSET);
+ value &= ~PRESCALE_MASK(chan);
+ writel(value, kp->base + PRESCALE_OFFSET);
+
+ kona_pwmc_apply_settings(kp, chan);
clk_disable_unprepare(kp->clk);
}
@@ -287,7 +317,7 @@ static int kona_pwmc_remove(struct platform_device *pdev)
unsigned int chan;
for (chan = 0; chan < kp->chip.npwm; chan++)
- if (test_bit(PWMF_ENABLED, &kp->chip.pwms[chan].flags))
+ if (pwm_is_enabled(&kp->chip.pwms[chan]))
clk_disable_unprepare(kp->clk);
return pwmchip_remove(&kp->chip);
diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c
new file mode 100644
index 000000000000..7101c7020bf4
--- /dev/null
+++ b/drivers/pwm/pwm-crc.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Shobhit Kumar <shobhit.kumar@intel.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/pwm.h>
+
+#define PWM0_CLK_DIV 0x4B
+#define PWM_OUTPUT_ENABLE BIT(7)
+#define PWM_DIV_CLK_0 0x00 /* DIVIDECLK = BASECLK */
+#define PWM_DIV_CLK_100 0x63 /* DIVIDECLK = BASECLK/100 */
+#define PWM_DIV_CLK_128 0x7F /* DIVIDECLK = BASECLK/128 */
+
+#define PWM0_DUTY_CYCLE 0x4E
+#define BACKLIGHT_EN 0x51
+
+#define PWM_MAX_LEVEL 0xFF
+
+#define PWM_BASE_CLK 6000000 /* 6 MHz */
+#define PWM_MAX_PERIOD_NS 21333 /* 46.875KHz */
+
+/**
+ * struct crystalcove_pwm - Crystal Cove PWM controller
+ * @chip: the abstract pwm_chip structure.
+ * @regmap: the regmap from the parent device.
+ */
+struct crystalcove_pwm {
+ struct pwm_chip chip;
+ struct regmap *regmap;
+};
+
+static inline struct crystalcove_pwm *to_crc_pwm(struct pwm_chip *pc)
+{
+ return container_of(pc, struct crystalcove_pwm, chip);
+}
+
+static int crc_pwm_enable(struct pwm_chip *c, struct pwm_device *pwm)
+{
+ struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
+
+ regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 1);
+
+ return 0;
+}
+
+static void crc_pwm_disable(struct pwm_chip *c, struct pwm_device *pwm)
+{
+ struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
+
+ regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 0);
+}
+
+static int crc_pwm_config(struct pwm_chip *c, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
+ struct device *dev = crc_pwm->chip.dev;
+ int level;
+
+ if (period_ns > PWM_MAX_PERIOD_NS) {
+ dev_err(dev, "un-supported period_ns\n");
+ return -EINVAL;
+ }
+
+ if (pwm->period != period_ns) {
+ int clk_div;
+
+ /* changing the clk divisor, need to disable fisrt */
+ crc_pwm_disable(c, pwm);
+ clk_div = PWM_BASE_CLK * period_ns / NSEC_PER_SEC;
+
+ regmap_write(crc_pwm->regmap, PWM0_CLK_DIV,
+ clk_div | PWM_OUTPUT_ENABLE);
+
+ /* enable back */
+ crc_pwm_enable(c, pwm);
+ }
+
+ /* change the pwm duty cycle */
+ level = duty_ns * PWM_MAX_LEVEL / period_ns;
+ regmap_write(crc_pwm->regmap, PWM0_DUTY_CYCLE, level);
+
+ return 0;
+}
+
+static const struct pwm_ops crc_pwm_ops = {
+ .config = crc_pwm_config,
+ .enable = crc_pwm_enable,
+ .disable = crc_pwm_disable,
+};
+
+static int crystalcove_pwm_probe(struct platform_device *pdev)
+{
+ struct crystalcove_pwm *pwm;
+ struct device *dev = pdev->dev.parent;
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
+
+ pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
+ if (!pwm)
+ return -ENOMEM;
+
+ pwm->chip.dev = &pdev->dev;
+ pwm->chip.ops = &crc_pwm_ops;
+ pwm->chip.base = -1;
+ pwm->chip.npwm = 1;
+
+ /* get the PMIC regmap */
+ pwm->regmap = pmic->regmap;
+
+ platform_set_drvdata(pdev, pwm);
+
+ return pwmchip_add(&pwm->chip);
+}
+
+static int crystalcove_pwm_remove(struct platform_device *pdev)
+{
+ struct crystalcove_pwm *pwm = platform_get_drvdata(pdev);
+
+ return pwmchip_remove(&pwm->chip);
+}
+
+static struct platform_driver crystalcove_pwm_driver = {
+ .probe = crystalcove_pwm_probe,
+ .remove = crystalcove_pwm_remove,
+ .driver = {
+ .name = "crystal_cove_pwm",
+ },
+};
+
+builtin_platform_driver(crystalcove_pwm_driver);
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index e593e9c45c51..bbf10ae02f0e 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -82,7 +82,7 @@ static int ep93xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* The clock needs to be enabled to access the PWM registers.
* Configuration can be changed at any time.
*/
- if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+ if (!pwm_is_enabled(pwm)) {
ret = clk_enable(ep93xx_pwm->clk);
if (ret)
return ret;
@@ -113,7 +113,7 @@ static int ep93xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
ret = -EINVAL;
}
- if (!test_bit(PWMF_ENABLED, &pwm->flags))
+ if (!pwm_is_enabled(pwm))
clk_disable(ep93xx_pwm->clk);
return ret;
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c
index 66d6f0c5c421..d600fd5cd4ba 100644
--- a/drivers/pwm/pwm-imx.c
+++ b/drivers/pwm/pwm-imx.c
@@ -114,7 +114,7 @@ static int imx_pwm_config_v2(struct pwm_chip *chip,
unsigned long long c;
unsigned long period_cycles, duty_cycles, prescale;
unsigned int period_ms;
- bool enable = test_bit(PWMF_ENABLED, &pwm->flags);
+ bool enable = pwm_is_enabled(pwm);
int wait_count = 0, fifoav;
u32 cr, sr;
@@ -129,7 +129,8 @@ static int imx_pwm_config_v2(struct pwm_chip *chip,
sr = readl(imx->mmio_base + MX3_PWMSR);
fifoav = sr & MX3_PWMSR_FIFOAV_MASK;
if (fifoav == MX3_PWMSR_FIFOAV_4WORDS) {
- period_ms = DIV_ROUND_UP(pwm->period, NSEC_PER_MSEC);
+ period_ms = DIV_ROUND_UP(pwm_get_period(pwm),
+ NSEC_PER_MSEC);
msleep(period_ms);
sr = readl(imx->mmio_base + MX3_PWMSR);
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
new file mode 100644
index 000000000000..9163085101bc
--- /dev/null
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -0,0 +1,465 @@
+/*
+ * NXP LPC18xx State Configurable Timer - Pulse Width Modulator driver
+ *
+ * Copyright (c) 2015 Ariel D'Alessandro <ariel@vanguardiasur.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * Notes
+ * =====
+ * NXP LPC18xx provides a State Configurable Timer (SCT) which can be configured
+ * as a Pulse Width Modulator.
+ *
+ * SCT supports 16 outputs, 16 events and 16 registers. Each event will be
+ * triggered when its related register matches the SCT counter value, and it
+ * will set or clear a selected output.
+ *
+ * One of the events is preselected to generate the period, thus the maximum
+ * number of simultaneous channels is limited to 15. Notice that period is
+ * global to all the channels, thus PWM driver will refuse setting different
+ * values to it, unless there's only one channel requested.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+
+/* LPC18xx SCT registers */
+#define LPC18XX_PWM_CONFIG 0x000
+#define LPC18XX_PWM_CONFIG_UNIFY BIT(0)
+#define LPC18XX_PWM_CONFIG_NORELOAD BIT(7)
+
+#define LPC18XX_PWM_CTRL 0x004
+#define LPC18XX_PWM_CTRL_HALT BIT(2)
+#define LPC18XX_PWM_BIDIR BIT(4)
+#define LPC18XX_PWM_PRE_SHIFT 5
+#define LPC18XX_PWM_PRE_MASK (0xff << LPC18XX_PWM_PRE_SHIFT)
+#define LPC18XX_PWM_PRE(x) (x << LPC18XX_PWM_PRE_SHIFT)
+
+#define LPC18XX_PWM_LIMIT 0x008
+
+#define LPC18XX_PWM_RES_BASE 0x058
+#define LPC18XX_PWM_RES_SHIFT(_ch) (_ch * 2)
+#define LPC18XX_PWM_RES(_ch, _action) (_action << LPC18XX_PWM_RES_SHIFT(_ch))
+#define LPC18XX_PWM_RES_MASK(_ch) (0x3 << LPC18XX_PWM_RES_SHIFT(_ch))
+
+#define LPC18XX_PWM_MATCH_BASE 0x100
+#define LPC18XX_PWM_MATCH(_ch) (LPC18XX_PWM_MATCH_BASE + _ch * 4)
+
+#define LPC18XX_PWM_MATCHREL_BASE 0x200
+#define LPC18XX_PWM_MATCHREL(_ch) (LPC18XX_PWM_MATCHREL_BASE + _ch * 4)
+
+#define LPC18XX_PWM_EVSTATEMSK_BASE 0x300
+#define LPC18XX_PWM_EVSTATEMSK(_ch) (LPC18XX_PWM_EVSTATEMSK_BASE + _ch * 8)
+#define LPC18XX_PWM_EVSTATEMSK_ALL 0xffffffff
+
+#define LPC18XX_PWM_EVCTRL_BASE 0x304
+#define LPC18XX_PWM_EVCTRL(_ev) (LPC18XX_PWM_EVCTRL_BASE + _ev * 8)
+
+#define LPC18XX_PWM_EVCTRL_MATCH(_ch) _ch
+
+#define LPC18XX_PWM_EVCTRL_COMB_SHIFT 12
+#define LPC18XX_PWM_EVCTRL_COMB_MATCH (0x1 << LPC18XX_PWM_EVCTRL_COMB_SHIFT)
+
+#define LPC18XX_PWM_OUTPUTSET_BASE 0x500
+#define LPC18XX_PWM_OUTPUTSET(_ch) (LPC18XX_PWM_OUTPUTSET_BASE + _ch * 8)
+
+#define LPC18XX_PWM_OUTPUTCL_BASE 0x504
+#define LPC18XX_PWM_OUTPUTCL(_ch) (LPC18XX_PWM_OUTPUTCL_BASE + _ch * 8)
+
+/* LPC18xx SCT unified counter */
+#define LPC18XX_PWM_TIMER_MAX 0xffffffff
+
+/* LPC18xx SCT events */
+#define LPC18XX_PWM_EVENT_PERIOD 0
+#define LPC18XX_PWM_EVENT_MAX 16
+
+/* SCT conflict resolution */
+enum lpc18xx_pwm_res_action {
+ LPC18XX_PWM_RES_NONE,
+ LPC18XX_PWM_RES_SET,
+ LPC18XX_PWM_RES_CLEAR,
+ LPC18XX_PWM_RES_TOGGLE,
+};
+
+struct lpc18xx_pwm_data {
+ unsigned int duty_event;
+};
+
+struct lpc18xx_pwm_chip {
+ struct device *dev;
+ struct pwm_chip chip;
+ void __iomem *base;
+ struct clk *pwm_clk;
+ unsigned long clk_rate;
+ unsigned int period_ns;
+ unsigned int min_period_ns;
+ unsigned int max_period_ns;
+ unsigned int period_event;
+ unsigned long event_map;
+ struct mutex res_lock;
+ struct mutex period_lock;
+};
+
+static inline struct lpc18xx_pwm_chip *
+to_lpc18xx_pwm_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct lpc18xx_pwm_chip, chip);
+}
+
+static inline void lpc18xx_pwm_writel(struct lpc18xx_pwm_chip *lpc18xx_pwm,
+ u32 reg, u32 val)
+{
+ writel(val, lpc18xx_pwm->base + reg);
+}
+
+static inline u32 lpc18xx_pwm_readl(struct lpc18xx_pwm_chip *lpc18xx_pwm,
+ u32 reg)
+{
+ return readl(lpc18xx_pwm->base + reg);
+}
+
+static void lpc18xx_pwm_set_conflict_res(struct lpc18xx_pwm_chip *lpc18xx_pwm,
+ struct pwm_device *pwm,
+ enum lpc18xx_pwm_res_action action)
+{
+ u32 val;
+
+ mutex_lock(&lpc18xx_pwm->res_lock);
+
+ /*
+ * Simultaneous set and clear may happen on an output, that is the case
+ * when duty_ns == period_ns. LPC18xx SCT allows to set a conflict
+ * resolution action to be taken in such a case.
+ */
+ val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_RES_BASE);
+ val &= ~LPC18XX_PWM_RES_MASK(pwm->hwpwm);
+ val |= LPC18XX_PWM_RES(pwm->hwpwm, action);
+ lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_RES_BASE, val);
+
+ mutex_unlock(&lpc18xx_pwm->res_lock);
+}
+
+static void lpc18xx_pwm_config_period(struct pwm_chip *chip, int period_ns)
+{
+ struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+ u64 val;
+
+ val = (u64)period_ns * lpc18xx_pwm->clk_rate;
+ do_div(val, NSEC_PER_SEC);
+
+ lpc18xx_pwm_writel(lpc18xx_pwm,
+ LPC18XX_PWM_MATCH(lpc18xx_pwm->period_event),
+ (u32)val - 1);
+
+ lpc18xx_pwm_writel(lpc18xx_pwm,
+ LPC18XX_PWM_MATCHREL(lpc18xx_pwm->period_event),
+ (u32)val - 1);
+}
+
+static void lpc18xx_pwm_config_duty(struct pwm_chip *chip,
+ struct pwm_device *pwm, int duty_ns)
+{
+ struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+ struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
+ u64 val;
+
+ val = (u64)duty_ns * lpc18xx_pwm->clk_rate;
+ do_div(val, NSEC_PER_SEC);
+
+ lpc18xx_pwm_writel(lpc18xx_pwm,
+ LPC18XX_PWM_MATCH(lpc18xx_data->duty_event),
+ (u32)val);
+
+ lpc18xx_pwm_writel(lpc18xx_pwm,
+ LPC18XX_PWM_MATCHREL(lpc18xx_data->duty_event),
+ (u32)val);
+}
+
+static int lpc18xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+ int requested_events, i;
+
+ if (period_ns < lpc18xx_pwm->min_period_ns ||
+ period_ns > lpc18xx_pwm->max_period_ns) {
+ dev_err(chip->dev, "period %d not in range\n", period_ns);
+ return -ERANGE;
+ }
+
+ mutex_lock(&lpc18xx_pwm->period_lock);
+
+ requested_events = bitmap_weight(&lpc18xx_pwm->event_map,
+ LPC18XX_PWM_EVENT_MAX);
+
+ /*
+ * The PWM supports only a single period for all PWM channels.
+ * Once the period is set, it can only be changed if no more than one
+ * channel is requested at that moment.
+ */
+ if (requested_events > 2 && lpc18xx_pwm->period_ns != period_ns &&
+ lpc18xx_pwm->period_ns) {
+ dev_err(chip->dev, "conflicting period requested for PWM %u\n",
+ pwm->hwpwm);
+ mutex_unlock(&lpc18xx_pwm->period_lock);
+ return -EBUSY;
+ }
+
+ if ((requested_events <= 2 && lpc18xx_pwm->period_ns != period_ns) ||
+ !lpc18xx_pwm->period_ns) {
+ lpc18xx_pwm->period_ns = period_ns;
+ for (i = 0; i < chip->npwm; i++)
+ pwm_set_period(&chip->pwms[i], period_ns);
+ lpc18xx_pwm_config_period(chip, period_ns);
+ }
+
+ mutex_unlock(&lpc18xx_pwm->period_lock);
+
+ lpc18xx_pwm_config_duty(chip, pwm, duty_ns);
+
+ return 0;
+}
+
+static int lpc18xx_pwm_set_polarity(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ return 0;
+}
+
+static int lpc18xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+ struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
+ enum lpc18xx_pwm_res_action res_action;
+ unsigned int set_event, clear_event;
+
+ lpc18xx_pwm_writel(lpc18xx_pwm,
+ LPC18XX_PWM_EVCTRL(lpc18xx_data->duty_event),
+ LPC18XX_PWM_EVCTRL_MATCH(lpc18xx_data->duty_event) |
+ LPC18XX_PWM_EVCTRL_COMB_MATCH);
+
+ lpc18xx_pwm_writel(lpc18xx_pwm,
+ LPC18XX_PWM_EVSTATEMSK(lpc18xx_data->duty_event),
+ LPC18XX_PWM_EVSTATEMSK_ALL);
+
+ if (pwm->polarity == PWM_POLARITY_NORMAL) {
+ set_event = lpc18xx_pwm->period_event;
+ clear_event = lpc18xx_data->duty_event;
+ res_action = LPC18XX_PWM_RES_SET;
+ } else {
+ set_event = lpc18xx_data->duty_event;
+ clear_event = lpc18xx_pwm->period_event;
+ res_action = LPC18XX_PWM_RES_CLEAR;
+ }
+
+ lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_OUTPUTSET(pwm->hwpwm),
+ BIT(set_event));
+ lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_OUTPUTCL(pwm->hwpwm),
+ BIT(clear_event));
+ lpc18xx_pwm_set_conflict_res(lpc18xx_pwm, pwm, res_action);
+
+ return 0;
+}
+
+static void lpc18xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+ struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
+
+ lpc18xx_pwm_writel(lpc18xx_pwm,
+ LPC18XX_PWM_EVCTRL(lpc18xx_data->duty_event), 0);
+ lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_OUTPUTSET(pwm->hwpwm), 0);
+ lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_OUTPUTCL(pwm->hwpwm), 0);
+}
+
+static int lpc18xx_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+ struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
+ unsigned long event;
+
+ event = find_first_zero_bit(&lpc18xx_pwm->event_map,
+ LPC18XX_PWM_EVENT_MAX);
+
+ if (event >= LPC18XX_PWM_EVENT_MAX) {
+ dev_err(lpc18xx_pwm->dev,
+ "maximum number of simultaneous channels reached\n");
+ return -EBUSY;
+ };
+
+ set_bit(event, &lpc18xx_pwm->event_map);
+ lpc18xx_data->duty_event = event;
+ lpc18xx_pwm_config_duty(chip, pwm, pwm_get_duty_cycle(pwm));
+
+ return 0;
+}
+
+static void lpc18xx_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+ struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
+
+ pwm_disable(pwm);
+ pwm_set_duty_cycle(pwm, 0);
+ clear_bit(lpc18xx_data->duty_event, &lpc18xx_pwm->event_map);
+}
+
+static const struct pwm_ops lpc18xx_pwm_ops = {
+ .config = lpc18xx_pwm_config,
+ .set_polarity = lpc18xx_pwm_set_polarity,
+ .enable = lpc18xx_pwm_enable,
+ .disable = lpc18xx_pwm_disable,
+ .request = lpc18xx_pwm_request,
+ .free = lpc18xx_pwm_free,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id lpc18xx_pwm_of_match[] = {
+ { .compatible = "nxp,lpc1850-sct-pwm" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_pwm_of_match);
+
+static int lpc18xx_pwm_probe(struct platform_device *pdev)
+{
+ struct lpc18xx_pwm_chip *lpc18xx_pwm;
+ struct pwm_device *pwm;
+ struct resource *res;
+ int ret, i;
+ u64 val;
+
+ lpc18xx_pwm = devm_kzalloc(&pdev->dev, sizeof(*lpc18xx_pwm),
+ GFP_KERNEL);
+ if (!lpc18xx_pwm)
+ return -ENOMEM;
+
+ lpc18xx_pwm->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lpc18xx_pwm->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(lpc18xx_pwm->base))
+ return PTR_ERR(lpc18xx_pwm->base);
+
+ lpc18xx_pwm->pwm_clk = devm_clk_get(&pdev->dev, "pwm");
+ if (IS_ERR(lpc18xx_pwm->pwm_clk)) {
+ dev_err(&pdev->dev, "failed to get pwm clock\n");
+ return PTR_ERR(lpc18xx_pwm->pwm_clk);
+ }
+
+ ret = clk_prepare_enable(lpc18xx_pwm->pwm_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not prepare or enable pwm clock\n");
+ return ret;
+ }
+
+ lpc18xx_pwm->clk_rate = clk_get_rate(lpc18xx_pwm->pwm_clk);
+
+ mutex_init(&lpc18xx_pwm->res_lock);
+ mutex_init(&lpc18xx_pwm->period_lock);
+
+ val = (u64)NSEC_PER_SEC * LPC18XX_PWM_TIMER_MAX;
+ do_div(val, lpc18xx_pwm->clk_rate);
+ lpc18xx_pwm->max_period_ns = val;
+
+ lpc18xx_pwm->min_period_ns = DIV_ROUND_UP(NSEC_PER_SEC,
+ lpc18xx_pwm->clk_rate);
+
+ lpc18xx_pwm->chip.dev = &pdev->dev;
+ lpc18xx_pwm->chip.ops = &lpc18xx_pwm_ops;
+ lpc18xx_pwm->chip.base = -1;
+ lpc18xx_pwm->chip.npwm = 16;
+ lpc18xx_pwm->chip.of_xlate = of_pwm_xlate_with_flags;
+ lpc18xx_pwm->chip.of_pwm_n_cells = 3;
+
+ /* SCT counter must be in unify (32 bit) mode */
+ lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CONFIG,
+ LPC18XX_PWM_CONFIG_UNIFY);
+
+ /*
+ * Everytime the timer counter reaches the period value, the related
+ * event will be triggered and the counter reset to 0.
+ */
+ set_bit(LPC18XX_PWM_EVENT_PERIOD, &lpc18xx_pwm->event_map);
+ lpc18xx_pwm->period_event = LPC18XX_PWM_EVENT_PERIOD;
+
+ lpc18xx_pwm_writel(lpc18xx_pwm,
+ LPC18XX_PWM_EVSTATEMSK(lpc18xx_pwm->period_event),
+ LPC18XX_PWM_EVSTATEMSK_ALL);
+
+ val = LPC18XX_PWM_EVCTRL_MATCH(lpc18xx_pwm->period_event) |
+ LPC18XX_PWM_EVCTRL_COMB_MATCH;
+ lpc18xx_pwm_writel(lpc18xx_pwm,
+ LPC18XX_PWM_EVCTRL(lpc18xx_pwm->period_event), val);
+
+ lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_LIMIT,
+ BIT(lpc18xx_pwm->period_event));
+
+ ret = pwmchip_add(&lpc18xx_pwm->chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret);
+ goto disable_pwmclk;
+ }
+
+ for (i = 0; i < lpc18xx_pwm->chip.npwm; i++) {
+ pwm = &lpc18xx_pwm->chip.pwms[i];
+ pwm->chip_data = devm_kzalloc(lpc18xx_pwm->dev,
+ sizeof(struct lpc18xx_pwm_data),
+ GFP_KERNEL);
+ if (!pwm->chip_data) {
+ ret = -ENOMEM;
+ goto remove_pwmchip;
+ }
+ }
+
+ platform_set_drvdata(pdev, lpc18xx_pwm);
+
+ val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL);
+ val &= ~LPC18XX_PWM_BIDIR;
+ val &= ~LPC18XX_PWM_CTRL_HALT;
+ val &= ~LPC18XX_PWM_PRE_MASK;
+ val |= LPC18XX_PWM_PRE(0);
+ lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL, val);
+
+ return 0;
+
+remove_pwmchip:
+ pwmchip_remove(&lpc18xx_pwm->chip);
+disable_pwmclk:
+ clk_disable_unprepare(lpc18xx_pwm->pwm_clk);
+ return ret;
+}
+
+static int lpc18xx_pwm_remove(struct platform_device *pdev)
+{
+ struct lpc18xx_pwm_chip *lpc18xx_pwm = platform_get_drvdata(pdev);
+ u32 val;
+
+ val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL);
+ lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL,
+ val | LPC18XX_PWM_CTRL_HALT);
+
+ clk_disable_unprepare(lpc18xx_pwm->pwm_clk);
+
+ return pwmchip_remove(&lpc18xx_pwm->chip);
+}
+
+static struct platform_driver lpc18xx_pwm_driver = {
+ .driver = {
+ .name = "lpc18xx-sct-pwm",
+ .of_match_table = lpc18xx_pwm_of_match,
+ },
+ .probe = lpc18xx_pwm_probe,
+ .remove = lpc18xx_pwm_remove,
+};
+module_platform_driver(lpc18xx_pwm_driver);
+
+MODULE_AUTHOR("Ariel D'Alessandro <ariel@vanguardiasur.com.ar>");
+MODULE_DESCRIPTION("NXP LPC18xx PWM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index b430811e14f5..9a596324ebef 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -77,7 +77,7 @@ static int mxs_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* If the PWM channel is disabled, make sure to turn on the clock
* before writing the register. Otherwise, keep it enabled.
*/
- if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+ if (!pwm_is_enabled(pwm)) {
ret = clk_prepare_enable(mxs->clk);
if (ret)
return ret;
@@ -92,7 +92,7 @@ static int mxs_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
/*
* If the PWM is not enabled, turn the clock off again to save power.
*/
- if (!test_bit(PWMF_ENABLED, &pwm->flags))
+ if (!pwm_is_enabled(pwm))
clk_disable_unprepare(mxs->clk);
return 0;
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index 34b5c275a92a..70448a6079b0 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -2,6 +2,7 @@
* Driver for PCA9685 16-channel 12-bit PWM LED controller
*
* Copyright (C) 2013 Steffen Trumtrar <s.trumtrar@pengutronix.de>
+ * Copyright (C) 2015 Clemens Gruber <clemens.gruber@pqgruber.com>
*
* based on the pwm-twl-led.c driver
*
@@ -24,6 +25,15 @@
#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/delay.h>
+
+/*
+ * Because the PCA9685 has only one prescaler per chip, changing the period of
+ * one channel affects the period of all 16 PWM outputs!
+ * However, the ratio between each configured duty cycle and the chip-wide
+ * period remains constant, because the OFF time is set in proportion to the
+ * counter range.
+ */
#define PCA9685_MODE1 0x00
#define PCA9685_MODE2 0x01
@@ -42,10 +52,18 @@
#define PCA9685_ALL_LED_OFF_H 0xFD
#define PCA9685_PRESCALE 0xFE
+#define PCA9685_PRESCALE_MIN 0x03 /* => max. frequency of 1526 Hz */
+#define PCA9685_PRESCALE_MAX 0xFF /* => min. frequency of 24 Hz */
+
+#define PCA9685_COUNTER_RANGE 4096
+#define PCA9685_DEFAULT_PERIOD 5000000 /* Default period_ns = 1/200 Hz */
+#define PCA9685_OSC_CLOCK_MHZ 25 /* Internal oscillator with 25 MHz */
+
#define PCA9685_NUMREGS 0xFF
#define PCA9685_MAXCHAN 0x10
#define LED_FULL (1 << 4)
+#define MODE1_RESTART (1 << 7)
#define MODE1_SLEEP (1 << 4)
#define MODE2_INVRT (1 << 4)
#define MODE2_OUTDRV (1 << 2)
@@ -59,6 +77,8 @@ struct pca9685 {
struct pwm_chip chip;
struct regmap *regmap;
int active_cnt;
+ int duty_ns;
+ int period_ns;
};
static inline struct pca9685 *to_pca(struct pwm_chip *chip)
@@ -72,6 +92,47 @@ static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
struct pca9685 *pca = to_pca(chip);
unsigned long long duty;
unsigned int reg;
+ int prescale;
+
+ if (period_ns != pca->period_ns) {
+ prescale = DIV_ROUND_CLOSEST(PCA9685_OSC_CLOCK_MHZ * period_ns,
+ PCA9685_COUNTER_RANGE * 1000) - 1;
+
+ if (prescale >= PCA9685_PRESCALE_MIN &&
+ prescale <= PCA9685_PRESCALE_MAX) {
+ /* Put chip into sleep mode */
+ regmap_update_bits(pca->regmap, PCA9685_MODE1,
+ MODE1_SLEEP, MODE1_SLEEP);
+
+ /* Change the chip-wide output frequency */
+ regmap_write(pca->regmap, PCA9685_PRESCALE, prescale);
+
+ /* Wake the chip up */
+ regmap_update_bits(pca->regmap, PCA9685_MODE1,
+ MODE1_SLEEP, 0x0);
+
+ /* Wait 500us for the oscillator to be back up */
+ udelay(500);
+
+ pca->period_ns = period_ns;
+
+ /*
+ * If the duty cycle did not change, restart PWM with
+ * the same duty cycle to period ratio and return.
+ */
+ if (duty_ns == pca->duty_ns) {
+ regmap_update_bits(pca->regmap, PCA9685_MODE1,
+ MODE1_RESTART, 0x1);
+ return 0;
+ }
+ } else {
+ dev_err(chip->dev,
+ "prescaler not set: period out of bounds!\n");
+ return -EINVAL;
+ }
+ }
+
+ pca->duty_ns = duty_ns;
if (duty_ns < 1) {
if (pwm->hwpwm >= PCA9685_MAXCHAN)
@@ -85,6 +146,22 @@ static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
}
if (duty_ns == period_ns) {
+ /* Clear both OFF registers */
+ if (pwm->hwpwm >= PCA9685_MAXCHAN)
+ reg = PCA9685_ALL_LED_OFF_L;
+ else
+ reg = LED_N_OFF_L(pwm->hwpwm);
+
+ regmap_write(pca->regmap, reg, 0x0);
+
+ if (pwm->hwpwm >= PCA9685_MAXCHAN)
+ reg = PCA9685_ALL_LED_OFF_H;
+ else
+ reg = LED_N_OFF_H(pwm->hwpwm);
+
+ regmap_write(pca->regmap, reg, 0x0);
+
+ /* Set the full ON bit */
if (pwm->hwpwm >= PCA9685_MAXCHAN)
reg = PCA9685_ALL_LED_ON_H;
else
@@ -95,7 +172,7 @@ static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- duty = 4096 * (unsigned long long)duty_ns;
+ duty = PCA9685_COUNTER_RANGE * (unsigned long long)duty_ns;
duty = DIV_ROUND_UP_ULL(duty, period_ns);
if (pwm->hwpwm >= PCA9685_MAXCHAN)
@@ -112,6 +189,14 @@ static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
regmap_write(pca->regmap, reg, ((int)duty >> 8) & 0xf);
+ /* Clear the full ON bit, otherwise the set OFF time has no effect */
+ if (pwm->hwpwm >= PCA9685_MAXCHAN)
+ reg = PCA9685_ALL_LED_ON_H;
+ else
+ reg = LED_N_ON_H(pwm->hwpwm);
+
+ regmap_write(pca->regmap, reg, 0);
+
return 0;
}
@@ -228,6 +313,8 @@ static int pca9685_pwm_probe(struct i2c_client *client,
ret);
return ret;
}
+ pca->duty_ns = 0;
+ pca->period_ns = PCA9685_DEFAULT_PERIOD;
i2c_set_clientdata(client, pca);
@@ -285,7 +372,6 @@ MODULE_DEVICE_TABLE(of, pca9685_dt_ids);
static struct i2c_driver pca9685_i2c_driver = {
.driver = {
.name = "pca9685-pwm",
- .owner = THIS_MODULE,
.of_match_table = pca9685_dt_ids,
},
.probe = pca9685_pwm_probe,
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index ee63f9e9d0fb..075c1a764ba2 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -301,7 +301,7 @@ static int tpu_pwm_config(struct pwm_chip *chip, struct pwm_device *_pwm,
pwm->duty = duty;
/* If the channel is disabled we're done. */
- if (!test_bit(PWMF_ENABLED, &_pwm->flags))
+ if (!pwm_is_enabled(_pwm))
return 0;
if (duty_only && pwm->timer_on) {
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index 9442df244101..7d9cc9049522 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -83,7 +83,7 @@ static void rockchip_pwm_set_enable_v2(struct pwm_chip *chip,
PWM_CONTINUOUS;
u32 val;
- if (pwm->polarity == PWM_POLARITY_INVERSED)
+ if (pwm_get_polarity(pwm) == PWM_POLARITY_INVERSED)
enable_conf |= PWM_DUTY_NEGATIVE | PWM_INACTIVE_POSITIVE;
else
enable_conf |= PWM_DUTY_POSITIVE | PWM_INACTIVE_NEGATIVE;
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index cabd7d8e05cc..d4de0607b502 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -112,7 +112,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* If the PWM channel is disabled, make sure to turn on the clock
* before writing the register. Otherwise, keep it enabled.
*/
- if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+ if (!pwm_is_enabled(pwm)) {
err = clk_prepare_enable(pc->clk);
if (err < 0)
return err;
@@ -124,7 +124,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
/*
* If the PWM is not enabled, turn the clock off again to save power.
*/
- if (!test_bit(PWMF_ENABLED, &pwm->flags))
+ if (!pwm_is_enabled(pwm))
clk_disable_unprepare(pc->clk);
return 0;
@@ -214,7 +214,7 @@ static int tegra_pwm_remove(struct platform_device *pdev)
for (i = 0; i < NUM_PWM; i++) {
struct pwm_device *pwm = &pc->chip.pwms[i];
- if (!test_bit(PWMF_ENABLED, &pwm->flags))
+ if (!pwm_is_enabled(pwm))
if (clk_prepare_enable(pc->clk) < 0)
continue;
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index e557befdf4e6..616af764a276 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -97,7 +97,7 @@ static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
writew(reg_val, pc->mmio_base + ECCTL2);
- if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+ if (!pwm_is_enabled(pwm)) {
/* Update active registers if not running */
writel(duty_cycles, pc->mmio_base + CAP2);
writel(period_cycles, pc->mmio_base + CAP1);
@@ -111,7 +111,7 @@ static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
writel(period_cycles, pc->mmio_base + CAP3);
}
- if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+ if (!pwm_is_enabled(pwm)) {
reg_val = readw(pc->mmio_base + ECCTL2);
/* Disable APWM mode to put APWM output Low */
reg_val &= ~ECCTL2_APWM_MODE;
@@ -179,7 +179,7 @@ static void ecap_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
static void ecap_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- if (test_bit(PWMF_ENABLED, &pwm->flags)) {
+ if (pwm_is_enabled(pwm)) {
dev_warn(chip->dev, "Removing PWM device without disabling\n");
pm_runtime_put_sync(chip->dev);
}
@@ -306,7 +306,7 @@ static int ecap_pwm_suspend(struct device *dev)
ecap_pwm_save_context(pc);
/* Disable explicitly if PWM is running */
- if (test_bit(PWMF_ENABLED, &pwm->flags))
+ if (pwm_is_enabled(pwm))
pm_runtime_put_sync(dev);
return 0;
@@ -318,7 +318,7 @@ static int ecap_pwm_resume(struct device *dev)
struct pwm_device *pwm = pc->chip.pwms;
/* Enable explicitly if PWM was running */
- if (test_bit(PWMF_ENABLED, &pwm->flags))
+ if (pwm_is_enabled(pwm))
pm_runtime_get_sync(dev);
ecap_pwm_restore_context(pc);
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 694b3cf7694b..6a41e66015b6 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -407,7 +407,7 @@ static void ehrpwm_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
- if (test_bit(PWMF_ENABLED, &pwm->flags)) {
+ if (pwm_is_enabled(pwm)) {
dev_warn(chip->dev, "Removing PWM device without disabling\n");
pm_runtime_put_sync(chip->dev);
}
@@ -565,7 +565,7 @@ static int ehrpwm_pwm_suspend(struct device *dev)
for (i = 0; i < pc->chip.npwm; i++) {
struct pwm_device *pwm = &pc->chip.pwms[i];
- if (!test_bit(PWMF_ENABLED, &pwm->flags))
+ if (!pwm_is_enabled(pwm))
continue;
/* Disable explicitly if PWM is running */
@@ -582,7 +582,7 @@ static int ehrpwm_pwm_resume(struct device *dev)
for (i = 0; i < pc->chip.npwm; i++) {
struct pwm_device *pwm = &pc->chip.pwms[i];
- if (!test_bit(PWMF_ENABLED, &pwm->flags))
+ if (!pwm_is_enabled(pwm))
continue;
/* Enable explicitly if PWM was running */
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 4bd0c639e16d..c472772f00a7 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -46,7 +46,7 @@ static ssize_t pwm_period_show(struct device *child,
{
const struct pwm_device *pwm = child_to_pwm_device(child);
- return sprintf(buf, "%u\n", pwm->period);
+ return sprintf(buf, "%u\n", pwm_get_period(pwm));
}
static ssize_t pwm_period_store(struct device *child,
@@ -61,7 +61,7 @@ static ssize_t pwm_period_store(struct device *child,
if (ret)
return ret;
- ret = pwm_config(pwm, pwm->duty_cycle, val);
+ ret = pwm_config(pwm, pwm_get_duty_cycle(pwm), val);
return ret ? : size;
}
@@ -72,7 +72,7 @@ static ssize_t pwm_duty_cycle_show(struct device *child,
{
const struct pwm_device *pwm = child_to_pwm_device(child);
- return sprintf(buf, "%u\n", pwm->duty_cycle);
+ return sprintf(buf, "%u\n", pwm_get_duty_cycle(pwm));
}
static ssize_t pwm_duty_cycle_store(struct device *child,
@@ -87,7 +87,7 @@ static ssize_t pwm_duty_cycle_store(struct device *child,
if (ret)
return ret;
- ret = pwm_config(pwm, val, pwm->period);
+ ret = pwm_config(pwm, val, pwm_get_period(pwm));
return ret ? : size;
}
@@ -97,7 +97,7 @@ static ssize_t pwm_enable_show(struct device *child,
char *buf)
{
const struct pwm_device *pwm = child_to_pwm_device(child);
- int enabled = test_bit(PWMF_ENABLED, &pwm->flags);
+ int enabled = pwm_is_enabled(pwm);
return sprintf(buf, "%d\n", enabled);
}
@@ -133,8 +133,19 @@ static ssize_t pwm_polarity_show(struct device *child,
char *buf)
{
const struct pwm_device *pwm = child_to_pwm_device(child);
+ const char *polarity = "unknown";
- return sprintf(buf, "%s\n", pwm->polarity ? "inversed" : "normal");
+ switch (pwm_get_polarity(pwm)) {
+ case PWM_POLARITY_NORMAL:
+ polarity = "normal";
+ break;
+
+ case PWM_POLARITY_INVERSED:
+ polarity = "inversed";
+ break;
+ }
+
+ return sprintf(buf, "%s\n", polarity);
}
static ssize_t pwm_polarity_store(struct device *child,
@@ -301,9 +312,9 @@ static struct attribute *pwm_chip_attrs[] = {
ATTRIBUTE_GROUPS(pwm_chip);
static struct class pwm_class = {
- .name = "pwm",
- .owner = THIS_MODULE,
- .dev_groups = pwm_chip_groups,
+ .name = "pwm",
+ .owner = THIS_MODULE,
+ .dev_groups = pwm_chip_groups,
};
static int pwmchip_sysfs_match(struct device *parent, const void *data)
diff --git a/drivers/ras/Kconfig b/drivers/ras/Kconfig
index f9da613052c2..4c3c67d13254 100644
--- a/drivers/ras/Kconfig
+++ b/drivers/ras/Kconfig
@@ -1,2 +1,35 @@
-config RAS
- bool
+menuconfig RAS
+ bool "Reliability, Availability and Serviceability (RAS) features"
+ help
+ Reliability, availability and serviceability (RAS) is a computer
+ hardware engineering term. Computers designed with higher levels
+ of RAS have a multitude of features that protect data integrity
+ and help them stay available for long periods of time without
+ failure.
+
+ Reliability can be defined as the probability that the system will
+ produce correct outputs up to some given time. Reliability is
+ enhanced by features that help to avoid, detect and repair hardware
+ faults.
+
+ Availability is the probability a system is operational at a given
+ time, i.e. the amount of time a device is actually operating as the
+ percentage of total time it should be operating.
+
+ Serviceability or maintainability is the simplicity and speed with
+ which a system can be repaired or maintained; if the time to repair
+ a failed system increases, then availability will decrease.
+
+ Note that Reliability and Availability are distinct concepts:
+ Reliability is a measure of the ability of a system to function
+ correctly, including avoiding data corruption, whereas Availability
+ measures how often it is available for use, even though it may not
+ be functioning correctly. For example, a server may run forever and
+ so have ideal availability, but may be unreliable, with frequent
+ data corruption.
+
+if RAS
+
+source arch/x86/ras/Kconfig
+
+endif
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c
index 832932bdc977..a62a89674fb5 100644
--- a/drivers/regulator/88pm800.c
+++ b/drivers/regulator/88pm800.c
@@ -78,7 +78,6 @@ struct pm800_regulator_info {
};
struct pm800_regulators {
- struct regulator_dev *regulators[PM800_ID_RG_MAX];
struct pm80x_chip *chip;
struct regmap *map;
};
@@ -92,14 +91,16 @@ struct pm800_regulators {
* not the constant voltage table.
* n_volt - Number of available selectors
*/
-#define PM800_BUCK(vreg, ereg, ebit, amax, volt_ranges, n_volt) \
+#define PM800_BUCK(match, vreg, ereg, ebit, amax, volt_ranges, n_volt) \
{ \
.desc = { \
- .name = #vreg, \
- .ops = &pm800_volt_range_ops, \
- .type = REGULATOR_VOLTAGE, \
- .id = PM800_ID_##vreg, \
- .owner = THIS_MODULE, \
+ .name = #vreg, \
+ .of_match = of_match_ptr(#match), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .ops = &pm800_volt_range_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = PM800_ID_##vreg, \
+ .owner = THIS_MODULE, \
.n_voltages = n_volt, \
.linear_ranges = volt_ranges, \
.n_linear_ranges = ARRAY_SIZE(volt_ranges), \
@@ -108,7 +109,7 @@ struct pm800_regulators {
.enable_reg = PM800_##ereg, \
.enable_mask = 1 << (ebit), \
}, \
- .max_ua = (amax), \
+ .max_ua = (amax), \
}
/*
@@ -120,22 +121,24 @@ struct pm800_regulators {
* For all the LDOes, there are too many ranges. Using volt_table will be
* simpler and faster.
*/
-#define PM800_LDO(vreg, ereg, ebit, amax, ldo_volt_table) \
+#define PM800_LDO(match, vreg, ereg, ebit, amax, ldo_volt_table) \
{ \
.desc = { \
- .name = #vreg, \
- .ops = &pm800_volt_table_ops, \
- .type = REGULATOR_VOLTAGE, \
- .id = PM800_ID_##vreg, \
- .owner = THIS_MODULE, \
- .n_voltages = ARRAY_SIZE(ldo_volt_table), \
- .vsel_reg = PM800_##vreg##_VOUT, \
- .vsel_mask = 0x1f, \
- .enable_reg = PM800_##ereg, \
- .enable_mask = 1 << (ebit), \
- .volt_table = ldo_volt_table, \
+ .name = #vreg, \
+ .of_match = of_match_ptr(#match), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .ops = &pm800_volt_table_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = PM800_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(ldo_volt_table), \
+ .vsel_reg = PM800_##vreg##_VOUT, \
+ .vsel_mask = 0xf, \
+ .enable_reg = PM800_##ereg, \
+ .enable_mask = 1 << (ebit), \
+ .volt_table = ldo_volt_table, \
}, \
- .max_ua = (amax), \
+ .max_ua = (amax), \
}
/* Ranges are sorted in ascending order. */
@@ -178,122 +181,66 @@ static int pm800_get_current_limit(struct regulator_dev *rdev)
}
static struct regulator_ops pm800_volt_range_ops = {
- .list_voltage = regulator_list_voltage_linear_range,
- .map_voltage = regulator_map_voltage_linear_range,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .get_current_limit = pm800_get_current_limit,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_current_limit = pm800_get_current_limit,
};
static struct regulator_ops pm800_volt_table_ops = {
- .list_voltage = regulator_list_voltage_table,
- .map_voltage = regulator_map_voltage_iterate,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .get_current_limit = pm800_get_current_limit,
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_current_limit = pm800_get_current_limit,
};
/* The array is indexed by id(PM800_ID_XXX) */
static struct pm800_regulator_info pm800_regulator_info[] = {
- PM800_BUCK(BUCK1, BUCK_ENA, 0, 3000000, buck1_volt_range, 0x55),
- PM800_BUCK(BUCK2, BUCK_ENA, 1, 1200000, buck2_5_volt_range, 0x73),
- PM800_BUCK(BUCK3, BUCK_ENA, 2, 1200000, buck2_5_volt_range, 0x73),
- PM800_BUCK(BUCK4, BUCK_ENA, 3, 1200000, buck2_5_volt_range, 0x73),
- PM800_BUCK(BUCK5, BUCK_ENA, 4, 1200000, buck2_5_volt_range, 0x73),
-
- PM800_LDO(LDO1, LDO_ENA1_1, 0, 200000, ldo1_volt_table),
- PM800_LDO(LDO2, LDO_ENA1_1, 1, 10000, ldo2_volt_table),
- PM800_LDO(LDO3, LDO_ENA1_1, 2, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO4, LDO_ENA1_1, 3, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO5, LDO_ENA1_1, 4, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO6, LDO_ENA1_1, 5, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO7, LDO_ENA1_1, 6, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO8, LDO_ENA1_1, 7, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO9, LDO_ENA1_2, 0, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO10, LDO_ENA1_2, 1, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO11, LDO_ENA1_2, 2, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO12, LDO_ENA1_2, 3, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO13, LDO_ENA1_2, 4, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO14, LDO_ENA1_2, 5, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO15, LDO_ENA1_2, 6, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO16, LDO_ENA1_2, 7, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO17, LDO_ENA1_3, 0, 300000, ldo3_17_volt_table),
- PM800_LDO(LDO18, LDO_ENA1_3, 1, 200000, ldo18_19_volt_table),
- PM800_LDO(LDO19, LDO_ENA1_3, 2, 200000, ldo18_19_volt_table),
-};
-
-#define PM800_REGULATOR_OF_MATCH(_name, _id) \
- [PM800_ID_##_id] = { \
- .name = #_name, \
- .driver_data = &pm800_regulator_info[PM800_ID_##_id], \
- }
-
-static struct of_regulator_match pm800_regulator_matches[] = {
- PM800_REGULATOR_OF_MATCH(buck1, BUCK1),
- PM800_REGULATOR_OF_MATCH(buck2, BUCK2),
- PM800_REGULATOR_OF_MATCH(buck3, BUCK3),
- PM800_REGULATOR_OF_MATCH(buck4, BUCK4),
- PM800_REGULATOR_OF_MATCH(buck5, BUCK5),
- PM800_REGULATOR_OF_MATCH(ldo1, LDO1),
- PM800_REGULATOR_OF_MATCH(ldo2, LDO2),
- PM800_REGULATOR_OF_MATCH(ldo3, LDO3),
- PM800_REGULATOR_OF_MATCH(ldo4, LDO4),
- PM800_REGULATOR_OF_MATCH(ldo5, LDO5),
- PM800_REGULATOR_OF_MATCH(ldo6, LDO6),
- PM800_REGULATOR_OF_MATCH(ldo7, LDO7),
- PM800_REGULATOR_OF_MATCH(ldo8, LDO8),
- PM800_REGULATOR_OF_MATCH(ldo9, LDO9),
- PM800_REGULATOR_OF_MATCH(ldo10, LDO10),
- PM800_REGULATOR_OF_MATCH(ldo11, LDO11),
- PM800_REGULATOR_OF_MATCH(ldo12, LDO12),
- PM800_REGULATOR_OF_MATCH(ldo13, LDO13),
- PM800_REGULATOR_OF_MATCH(ldo14, LDO14),
- PM800_REGULATOR_OF_MATCH(ldo15, LDO15),
- PM800_REGULATOR_OF_MATCH(ldo16, LDO16),
- PM800_REGULATOR_OF_MATCH(ldo17, LDO17),
- PM800_REGULATOR_OF_MATCH(ldo18, LDO18),
- PM800_REGULATOR_OF_MATCH(ldo19, LDO19),
+ PM800_BUCK(buck1, BUCK1, BUCK_ENA, 0, 3000000, buck1_volt_range, 0x55),
+ PM800_BUCK(buck2, BUCK2, BUCK_ENA, 1, 1200000, buck2_5_volt_range, 0x73),
+ PM800_BUCK(buck3, BUCK3, BUCK_ENA, 2, 1200000, buck2_5_volt_range, 0x73),
+ PM800_BUCK(buck4, BUCK4, BUCK_ENA, 3, 1200000, buck2_5_volt_range, 0x73),
+ PM800_BUCK(buck5, BUCK5, BUCK_ENA, 4, 1200000, buck2_5_volt_range, 0x73),
+
+ PM800_LDO(ldo1, LDO1, LDO_ENA1_1, 0, 200000, ldo1_volt_table),
+ PM800_LDO(ldo2, LDO2, LDO_ENA1_1, 1, 10000, ldo2_volt_table),
+ PM800_LDO(ldo3, LDO3, LDO_ENA1_1, 2, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo4, LDO4, LDO_ENA1_1, 3, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo5, LDO5, LDO_ENA1_1, 4, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo6, LDO6, LDO_ENA1_1, 5, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo7, LDO7, LDO_ENA1_1, 6, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo8, LDO8, LDO_ENA1_1, 7, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo9, LDO9, LDO_ENA1_2, 0, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo10, LDO10, LDO_ENA1_2, 1, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo11, LDO11, LDO_ENA1_2, 2, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo12, LDO12, LDO_ENA1_2, 3, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo13, LDO13, LDO_ENA1_2, 4, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo14, LDO14, LDO_ENA1_2, 5, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo15, LDO15, LDO_ENA1_2, 6, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo16, LDO16, LDO_ENA1_2, 7, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo17, LDO17, LDO_ENA1_3, 0, 300000, ldo3_17_volt_table),
+ PM800_LDO(ldo18, LDO18, LDO_ENA1_3, 1, 200000, ldo18_19_volt_table),
+ PM800_LDO(ldo19, LDO19, LDO_ENA1_3, 2, 200000, ldo18_19_volt_table),
};
-static int pm800_regulator_dt_init(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- int ret;
-
- ret = of_regulator_match(&pdev->dev, np,
- pm800_regulator_matches,
- ARRAY_SIZE(pm800_regulator_matches));
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
static int pm800_regulator_probe(struct platform_device *pdev)
{
struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm80x_platform_data *pdata = dev_get_platdata(pdev->dev.parent);
struct pm800_regulators *pm800_data;
- struct pm800_regulator_info *info;
struct regulator_config config = { };
struct regulator_init_data *init_data;
int i, ret;
- if (!pdata || pdata->num_regulators == 0) {
- if (IS_ENABLED(CONFIG_OF)) {
- ret = pm800_regulator_dt_init(pdev);
- if (ret)
- return ret;
- } else {
- return -ENODEV;
- }
- } else if (pdata->num_regulators) {
+ if (pdata && pdata->num_regulators) {
unsigned int count = 0;
/* Check whether num_regulator is valid. */
@@ -303,8 +250,6 @@ static int pm800_regulator_probe(struct platform_device *pdev)
}
if (count != pdata->num_regulators)
return -EINVAL;
- } else {
- return -EINVAL;
}
pm800_data = devm_kzalloc(&pdev->dev, sizeof(*pm800_data),
@@ -317,30 +262,27 @@ static int pm800_regulator_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pm800_data);
+ config.dev = chip->dev;
+ config.regmap = pm800_data->map;
for (i = 0; i < PM800_ID_RG_MAX; i++) {
- if (!pdata || pdata->num_regulators == 0)
- init_data = pm800_regulator_matches[i].init_data;
- else
+ struct regulator_dev *regulator;
+
+ if (pdata && pdata->num_regulators) {
init_data = pdata->regulators[i];
- if (!init_data)
- continue;
- info = pm800_regulator_matches[i].driver_data;
- config.dev = &pdev->dev;
- config.init_data = init_data;
- config.driver_data = info;
- config.regmap = pm800_data->map;
- config.of_node = pm800_regulator_matches[i].of_node;
-
- pm800_data->regulators[i] =
- regulator_register(&info->desc, &config);
- if (IS_ERR(pm800_data->regulators[i])) {
- ret = PTR_ERR(pm800_data->regulators[i]);
- dev_err(&pdev->dev, "Failed to register %s\n",
- info->desc.name);
+ if (!init_data)
+ continue;
- while (--i >= 0)
- regulator_unregister(pm800_data->regulators[i]);
+ config.init_data = init_data;
+ }
+
+ config.driver_data = &pm800_regulator_info[i];
+ regulator = devm_regulator_register(&pdev->dev,
+ &pm800_regulator_info[i].desc, &config);
+ if (IS_ERR(regulator)) {
+ ret = PTR_ERR(regulator);
+ dev_err(&pdev->dev, "Failed to register %s\n",
+ pm800_regulator_info[i].desc.name);
return ret;
}
}
@@ -348,23 +290,11 @@ static int pm800_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int pm800_regulator_remove(struct platform_device *pdev)
-{
- struct pm800_regulators *pm800_data = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < PM800_ID_RG_MAX; i++)
- regulator_unregister(pm800_data->regulators[i]);
-
- return 0;
-}
-
static struct platform_driver pm800_regulator_driver = {
.driver = {
.name = "88pm80x-regulator",
},
.probe = pm800_regulator_probe,
- .remove = pm800_regulator_remove,
};
module_platform_driver(pm800_regulator_driver);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index bef3bde6971b..64bccff557be 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -209,13 +209,13 @@ config REGULATOR_DA9210
interface.
config REGULATOR_DA9211
- tristate "Dialog Semiconductor DA9211/DA9212/DA9213/DA9214 regulator"
+ tristate "Dialog Semiconductor DA9211/DA9212/DA9213/DA9214/DA9215 regulator"
depends on I2C
select REGMAP_I2C
help
Say y here to support for the Dialog Semiconductor DA9211/DA9212
- /DA9213/DA9214.
- The DA9211/DA9212/DA9213/DA9214 is a multi-phase synchronous
+ /DA9213/DA9214/DA9215.
+ The DA9211/DA9212/DA9213/DA9214/DA9215 is a multi-phase synchronous
step down converter 12A or 16A DC-DC Buck controlled through an I2C
interface.
@@ -407,13 +407,13 @@ config REGULATOR_MAX77686
Exynos-4 chips to control VARM and VINT voltages.
config REGULATOR_MAX77693
- tristate "Maxim MAX77693 regulator"
- depends on MFD_MAX77693
+ tristate "Maxim 77693/77843 regulator"
+ depends on (MFD_MAX77693 || MFD_MAX77843)
help
- This driver controls a Maxim 77693 regulator via I2C bus.
+ This driver controls a Maxim 77693/77843 regulators via I2C bus.
The regulators include two LDOs, 'SAFEOUT1', 'SAFEOUT2'
and one current regulator 'CHARGER'. This is suitable for
- Exynos-4x12 chips.
+ Exynos-4x12 (MAX77693) or Exynos5433 (MAX77843) SoC chips.
config REGULATOR_MAX77802
tristate "Maxim 77802 regulator"
@@ -424,14 +424,6 @@ config REGULATOR_MAX77802
Exynos5420/Exynos5800 SoCs to control various voltages.
It includes support for control of voltage and ramp speed.
-config REGULATOR_MAX77843
- tristate "Maxim 77843 regulator"
- depends on MFD_MAX77843
- help
- This driver controls a Maxim 77843 regulator.
- The regulator include two 'SAFEOUT' for USB(Universal Serial Bus)
- This is suitable for Exynos5433 SoC chips.
-
config REGULATOR_MC13XXX_CORE
tristate
@@ -451,6 +443,15 @@ config REGULATOR_MC13892
Say y here to support the regulators found on the Freescale MC13892
PMIC.
+config REGULATOR_MT6311
+ tristate "MediaTek MT6311 PMIC"
+ depends on I2C
+ help
+ Say y here to select this option to enable the power regulator of
+ MediaTek MT6311 PMIC.
+ This driver supports the control of different power rails of device
+ through regulator interface.
+
config REGULATOR_MT6397
tristate "MediaTek MT6397 PMIC"
depends on MFD_MT6397
@@ -522,6 +523,18 @@ config REGULATOR_QCOM_RPM
Qualcomm RPM as a module. The module will be named
"qcom_rpm-regulator".
+config REGULATOR_QCOM_SMD_RPM
+ tristate "Qualcomm SMD based RPM regulator driver"
+ depends on QCOM_SMD_RPM
+ help
+ If you say yes to this option, support will be included for the
+ regulators exposed by the Resource Power Manager found in Qualcomm
+ 8974 based devices.
+
+ Say M here if you want to include support for the regulators on the
+ Qualcomm RPM as a module. The module will be named
+ "qcom_smd-regulator".
+
config REGULATOR_QCOM_SPMI
tristate "Qualcomm SPMI regulator driver"
depends on SPMI || COMPILE_TEST
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 91bf76267404..0f8174913c17 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -56,12 +56,13 @@ obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o
obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o
obj-$(CONFIG_REGULATOR_MAX77802) += max77802.o
-obj-$(CONFIG_REGULATOR_MAX77843) += max77843.o
obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
+obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
+obj-$(CONFIG_REGULATOR_QCOM_SMD_RPM) += qcom_smd-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o
obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index 2ff73d72ca34..896db168e4bd 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -530,7 +530,6 @@ MODULE_DEVICE_TABLE(i2c, act8865_ids);
static struct i2c_driver act8865_pmic_driver = {
.driver = {
.name = "act8865",
- .owner = THIS_MODULE,
},
.probe = act8865_pmic_probe,
.id_table = act8865_ids,
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index 48016a050d5f..ea50a886ba63 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -275,4 +275,3 @@ module_exit(ad5398_exit);
MODULE_DESCRIPTION("AD5398 and AD5821 current regulator driver");
MODULE_AUTHOR("Sonic Zhang");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("i2c:ad5398-regulator");
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 646829132b59..01bf3476a791 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -405,3 +405,4 @@ module_platform_driver(axp20x_regulator_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
MODULE_DESCRIPTION("Regulator Driver for AXP20X PMIC");
+MODULE_ALIAS("platform:axp20x-regulator");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index c9f72019bd68..7a85ac9e32c5 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -109,6 +109,12 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
static struct regulator *create_regulator(struct regulator_dev *rdev,
struct device *dev,
const char *supply_name);
+static void _regulator_put(struct regulator *regulator);
+
+static struct regulator_dev *dev_to_rdev(struct device *dev)
+{
+ return container_of(dev, struct regulator_dev, dev);
+}
static const char *rdev_get_name(struct regulator_dev *rdev)
{
@@ -295,7 +301,7 @@ static int regulator_check_drms(struct regulator_dev *rdev)
return -ENODEV;
}
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
- rdev_err(rdev, "operation not allowed\n");
+ rdev_dbg(rdev, "operation not allowed\n");
return -EPERM;
}
return 0;
@@ -640,6 +646,8 @@ static int drms_uA_update(struct regulator_dev *rdev)
int current_uA = 0, output_uV, input_uV, err;
unsigned int mode;
+ lockdep_assert_held_once(&rdev->mutex);
+
/*
* first check to see if we can set modes at all, otherwise just
* tell the consumer everything is OK.
@@ -760,6 +768,8 @@ static int suspend_set_state(struct regulator_dev *rdev,
/* locks held by caller */
static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
{
+ lockdep_assert_held_once(&rdev->mutex);
+
if (!rdev->constraints)
return -EINVAL;
@@ -1081,6 +1091,15 @@ static int set_machine_constraints(struct regulator_dev *rdev,
}
}
+ if (rdev->constraints->over_current_protection
+ && ops->set_over_current_protection) {
+ ret = ops->set_over_current_protection(rdev);
+ if (ret < 0) {
+ rdev_err(rdev, "failed to set over current protection\n");
+ goto out;
+ }
+ }
+
print_constraints(rdev);
return 0;
out:
@@ -1105,6 +1124,9 @@ static int set_supply(struct regulator_dev *rdev,
rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev));
+ if (!try_module_get(supply_rdev->owner))
+ return -ENODEV;
+
rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
if (rdev->supply == NULL) {
err = -ENOMEM;
@@ -1240,7 +1262,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
regulator->debugfs = debugfs_create_dir(regulator->supply_name,
rdev->debugfs);
if (!regulator->debugfs) {
- rdev_warn(rdev, "Failed to create debugfs directory\n");
+ rdev_dbg(rdev, "Failed to create debugfs directory\n");
} else {
debugfs_create_u32("uA_load", 0444, regulator->debugfs,
&regulator->uA_load);
@@ -1381,9 +1403,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
}
if (!r) {
- dev_err(dev, "Failed to resolve %s-supply for %s\n",
- rdev->supply_name, rdev->desc->name);
- return -EPROBE_DEFER;
+ if (have_full_constraints()) {
+ r = dummy_regulator_rdev;
+ } else {
+ dev_err(dev, "Failed to resolve %s-supply for %s\n",
+ rdev->supply_name, rdev->desc->name);
+ return -EPROBE_DEFER;
+ }
}
/* Recursively resolve the supply of the supply */
@@ -1398,8 +1424,11 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
/* Cascade always-on state to supply */
if (_regulator_is_enabled(rdev)) {
ret = regulator_enable(rdev->supply);
- if (ret < 0)
+ if (ret < 0) {
+ if (rdev->supply)
+ _regulator_put(rdev->supply);
return ret;
+ }
}
return 0;
@@ -1584,9 +1613,11 @@ static void _regulator_put(struct regulator *regulator)
{
struct regulator_dev *rdev;
- if (regulator == NULL || IS_ERR(regulator))
+ if (IS_ERR_OR_NULL(regulator))
return;
+ lockdep_assert_held_once(&regulator_list_mutex);
+
rdev = regulator->rdev;
debugfs_remove_recursive(regulator->debugfs);
@@ -1595,14 +1626,15 @@ static void _regulator_put(struct regulator *regulator)
if (regulator->dev)
sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
mutex_lock(&rdev->mutex);
- kfree(regulator->supply_name);
list_del(&regulator->list);
- kfree(regulator);
rdev->open_count--;
rdev->exclusive = 0;
mutex_unlock(&rdev->mutex);
+ kfree(regulator->supply_name);
+ kfree(regulator);
+
module_put(rdev->owner);
}
@@ -1965,6 +1997,8 @@ static int _regulator_enable(struct regulator_dev *rdev)
{
int ret;
+ lockdep_assert_held_once(&rdev->mutex);
+
/* check voltage and requested load before enabling */
if (rdev->constraints &&
(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS))
@@ -2065,6 +2099,8 @@ static int _regulator_disable(struct regulator_dev *rdev)
{
int ret = 0;
+ lockdep_assert_held_once(&rdev->mutex);
+
if (WARN(rdev->use_count <= 0,
"unbalanced disables for %s\n", rdev_get_name(rdev)))
return -EIO;
@@ -2143,6 +2179,8 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
{
int ret = 0;
+ lockdep_assert_held_once(&rdev->mutex);
+
ret = _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
REGULATOR_EVENT_PRE_DISABLE, NULL);
if (ret & NOTIFY_STOP_MASK)
@@ -2711,7 +2749,7 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
goto out;
/* If we're trying to set a range that overlaps the current voltage,
- * return succesfully even though the regulator does not support
+ * return successfully even though the regulator does not support
* changing the voltage.
*/
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
@@ -3439,6 +3477,8 @@ EXPORT_SYMBOL_GPL(regulator_bulk_free);
int regulator_notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data)
{
+ lockdep_assert_held_once(&rdev->mutex);
+
_notifier_call_chain(rdev, event, data);
return NOTIFY_DONE;
@@ -3583,6 +3623,9 @@ static const struct attribute_group *regulator_dev_groups[] = {
static void regulator_dev_release(struct device *dev)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
+
+ kfree(rdev->constraints);
+ of_node_put(rdev->dev.of_node);
kfree(rdev);
}
@@ -3813,11 +3856,9 @@ void regulator_unregister(struct regulator_dev *rdev)
WARN_ON(rdev->open_count);
unset_regulator_supplies(rdev);
list_del(&rdev->list);
- kfree(rdev->constraints);
+ mutex_unlock(&regulator_list_mutex);
regulator_ena_gpio_free(rdev);
- of_node_put(rdev->dev.of_node);
device_unregister(&rdev->dev);
- mutex_unlock(&regulator_list_mutex);
}
EXPORT_SYMBOL_GPL(regulator_unregister);
@@ -4136,13 +4177,57 @@ static int __init regulator_init(void)
/* init early to allow our consumers to complete system booting */
core_initcall(regulator_init);
-static int __init regulator_init_complete(void)
+static int __init regulator_late_cleanup(struct device *dev, void *data)
{
- struct regulator_dev *rdev;
- const struct regulator_ops *ops;
- struct regulation_constraints *c;
+ struct regulator_dev *rdev = dev_to_rdev(dev);
+ const struct regulator_ops *ops = rdev->desc->ops;
+ struct regulation_constraints *c = rdev->constraints;
int enabled, ret;
+ if (c && c->always_on)
+ return 0;
+
+ if (c && !(c->valid_ops_mask & REGULATOR_CHANGE_STATUS))
+ return 0;
+
+ mutex_lock(&rdev->mutex);
+
+ if (rdev->use_count)
+ goto unlock;
+
+ /* If we can't read the status assume it's on. */
+ if (ops->is_enabled)
+ enabled = ops->is_enabled(rdev);
+ else
+ enabled = 1;
+
+ if (!enabled)
+ goto unlock;
+
+ if (have_full_constraints()) {
+ /* We log since this may kill the system if it goes
+ * wrong. */
+ rdev_info(rdev, "disabling\n");
+ ret = _regulator_do_disable(rdev);
+ if (ret != 0)
+ rdev_err(rdev, "couldn't disable: %d\n", ret);
+ } else {
+ /* The intention is that in future we will
+ * assume that full constraints are provided
+ * so warn even if we aren't going to do
+ * anything here.
+ */
+ rdev_warn(rdev, "incomplete constraints, leaving on\n");
+ }
+
+unlock:
+ mutex_unlock(&rdev->mutex);
+
+ return 0;
+}
+
+static int __init regulator_init_complete(void)
+{
/*
* Since DT doesn't provide an idiomatic mechanism for
* enabling full constraints and since it's much more natural
@@ -4152,58 +4237,13 @@ static int __init regulator_init_complete(void)
if (of_have_populated_dt())
has_full_constraints = true;
- mutex_lock(&regulator_list_mutex);
-
/* If we have a full configuration then disable any regulators
* we have permission to change the status for and which are
* not in use or always_on. This is effectively the default
* for DT and ACPI as they have full constraints.
*/
- list_for_each_entry(rdev, &regulator_list, list) {
- ops = rdev->desc->ops;
- c = rdev->constraints;
-
- if (c && c->always_on)
- continue;
-
- if (c && !(c->valid_ops_mask & REGULATOR_CHANGE_STATUS))
- continue;
-
- mutex_lock(&rdev->mutex);
-
- if (rdev->use_count)
- goto unlock;
-
- /* If we can't read the status assume it's on. */
- if (ops->is_enabled)
- enabled = ops->is_enabled(rdev);
- else
- enabled = 1;
-
- if (!enabled)
- goto unlock;
-
- if (have_full_constraints()) {
- /* We log since this may kill the system if it
- * goes wrong. */
- rdev_info(rdev, "disabling\n");
- ret = _regulator_do_disable(rdev);
- if (ret != 0)
- rdev_err(rdev, "couldn't disable: %d\n", ret);
- } else {
- /* The intention is that in future we will
- * assume that full constraints are provided
- * so warn even if we aren't going to do
- * anything here.
- */
- rdev_warn(rdev, "incomplete constraints, leaving on\n");
- }
-
-unlock:
- mutex_unlock(&rdev->mutex);
- }
-
- mutex_unlock(&regulator_list_mutex);
+ class_for_each_device(&regulator_class, NULL, NULL,
+ regulator_late_cleanup);
return 0;
}
diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c
index dd76da09b3c7..5638fe8d759d 100644
--- a/drivers/regulator/da9062-regulator.c
+++ b/drivers/regulator/da9062-regulator.c
@@ -818,7 +818,6 @@ static int da9062_regulator_probe(struct platform_device *pdev)
static struct platform_driver da9062_regulator_driver = {
.driver = {
.name = "da9062-regulators",
- .owner = THIS_MODULE,
},
.probe = da9062_regulator_probe,
};
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index f0489cb9018b..b3517830edb6 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -22,6 +22,8 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
@@ -120,6 +122,55 @@ static int da9210_get_current_limit(struct regulator_dev *rdev)
return da9210_buck_limits[sel];
}
+static irqreturn_t da9210_irq_handler(int irq, void *data)
+{
+ struct da9210 *chip = data;
+ unsigned int val, handled = 0;
+ int error, ret = IRQ_NONE;
+
+ error = regmap_read(chip->regmap, DA9210_REG_EVENT_B, &val);
+ if (error < 0)
+ goto error_i2c;
+
+ if (val & DA9210_E_OVCURR) {
+ regulator_notifier_call_chain(chip->rdev,
+ REGULATOR_EVENT_OVER_CURRENT,
+ NULL);
+ handled |= DA9210_E_OVCURR;
+ }
+ if (val & DA9210_E_NPWRGOOD) {
+ regulator_notifier_call_chain(chip->rdev,
+ REGULATOR_EVENT_UNDER_VOLTAGE,
+ NULL);
+ handled |= DA9210_E_NPWRGOOD;
+ }
+ if (val & (DA9210_E_TEMP_WARN | DA9210_E_TEMP_CRIT)) {
+ regulator_notifier_call_chain(chip->rdev,
+ REGULATOR_EVENT_OVER_TEMP, NULL);
+ handled |= val & (DA9210_E_TEMP_WARN | DA9210_E_TEMP_CRIT);
+ }
+ if (val & DA9210_E_VMAX) {
+ regulator_notifier_call_chain(chip->rdev,
+ REGULATOR_EVENT_REGULATION_OUT,
+ NULL);
+ handled |= DA9210_E_VMAX;
+ }
+ if (handled) {
+ /* Clear handled events */
+ error = regmap_write(chip->regmap, DA9210_REG_EVENT_B, handled);
+ if (error < 0)
+ goto error_i2c;
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+
+error_i2c:
+ dev_err(regmap_get_device(chip->regmap), "I2C error : %d\n", error);
+ return ret;
+}
+
/*
* I2C driver interface functions
*/
@@ -168,6 +219,30 @@ static int da9210_i2c_probe(struct i2c_client *i2c,
}
chip->rdev = rdev;
+ if (i2c->irq) {
+ error = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
+ da9210_irq_handler,
+ IRQF_TRIGGER_LOW |
+ IRQF_ONESHOT | IRQF_SHARED,
+ "da9210", chip);
+ if (error) {
+ dev_err(&i2c->dev, "Failed to request IRQ%u: %d\n",
+ i2c->irq, error);
+ return error;
+ }
+
+ error = regmap_update_bits(chip->regmap, DA9210_REG_MASK_B,
+ DA9210_M_OVCURR | DA9210_M_NPWRGOOD |
+ DA9210_M_TEMP_WARN |
+ DA9210_M_TEMP_CRIT | DA9210_M_VMAX, 0);
+ if (error < 0) {
+ dev_err(&i2c->dev, "Failed to update mask reg: %d\n",
+ error);
+ return error;
+ }
+ } else {
+ dev_warn(&i2c->dev, "No IRQ configured\n");
+ }
i2c_set_clientdata(i2c, chip);
@@ -184,7 +259,6 @@ MODULE_DEVICE_TABLE(i2c, da9210_i2c_id);
static struct i2c_driver da9210_regulator_driver = {
.driver = {
.name = "da9210",
- .owner = THIS_MODULE,
},
.probe = da9210_i2c_probe,
.id_table = da9210_i2c_id,
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index df79e4b1946e..04ef65b7eb3d 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -1,6 +1,6 @@
/*
- * da9211-regulator.c - Regulator device driver for DA9211/DA9213
- * Copyright (C) 2014 Dialog Semiconductor Ltd.
+ * da9211-regulator.c - Regulator device driver for DA9211/DA9213/DA9215
+ * Copyright (C) 2015 Dialog Semiconductor Ltd.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -32,6 +32,7 @@
/* DEVICE IDs */
#define DA9211_DEVICE_ID 0x22
#define DA9213_DEVICE_ID 0x23
+#define DA9215_DEVICE_ID 0x24
#define DA9211_BUCK_MODE_SLEEP 1
#define DA9211_BUCK_MODE_SYNC 2
@@ -90,6 +91,13 @@ static const int da9213_current_limits[] = {
3000000, 3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000,
4600000, 4800000, 5000000, 5200000, 5400000, 5600000, 5800000, 6000000
};
+/* Current limits for DA9215 buck (uA) indices
+ * corresponds with register values
+ */
+static const int da9215_current_limits[] = {
+ 4000000, 4200000, 4400000, 4600000, 4800000, 5000000, 5200000, 5400000,
+ 5600000, 5800000, 6000000, 6200000, 6400000, 6600000, 6800000, 7000000
+};
static unsigned int da9211_buck_get_mode(struct regulator_dev *rdev)
{
@@ -157,6 +165,10 @@ static int da9211_set_current_limit(struct regulator_dev *rdev, int min,
current_limits = da9213_current_limits;
max_size = ARRAY_SIZE(da9213_current_limits)-1;
break;
+ case DA9215:
+ current_limits = da9215_current_limits;
+ max_size = ARRAY_SIZE(da9215_current_limits)-1;
+ break;
default:
return -EINVAL;
}
@@ -189,6 +201,9 @@ static int da9211_get_current_limit(struct regulator_dev *rdev)
case DA9213:
current_limits = da9213_current_limits;
break;
+ case DA9215:
+ current_limits = da9215_current_limits;
+ break;
default:
return -EINVAL;
}
@@ -350,13 +365,11 @@ static int da9211_regulator_init(struct da9211 *chip)
/* If configuration for 1/2 bucks is different between platform data
* and the register, driver should exit.
*/
- if ((chip->pdata->num_buck == 2 && data == 0x40)
- || (chip->pdata->num_buck == 1 && data == 0x00)) {
- if (data == 0)
- chip->num_regulator = 1;
- else
- chip->num_regulator = 2;
- } else {
+ if (chip->pdata->num_buck == 1 && data == 0x00)
+ chip->num_regulator = 1;
+ else if (chip->pdata->num_buck == 2 && data != 0x00)
+ chip->num_regulator = 2;
+ else {
dev_err(chip->dev, "Configuration is mismatched\n");
return -EINVAL;
}
@@ -438,6 +451,9 @@ static int da9211_i2c_probe(struct i2c_client *i2c,
case DA9213_DEVICE_ID:
chip->chip_id = DA9213;
break;
+ case DA9215_DEVICE_ID:
+ chip->chip_id = DA9215;
+ break;
default:
dev_err(chip->dev, "Unsupported device id = 0x%x.\n", data);
return -ENODEV;
@@ -478,6 +494,7 @@ static int da9211_i2c_probe(struct i2c_client *i2c,
static const struct i2c_device_id da9211_i2c_id[] = {
{"da9211", DA9211},
{"da9213", DA9213},
+ {"da9215", DA9215},
{},
};
MODULE_DEVICE_TABLE(i2c, da9211_i2c_id);
@@ -486,6 +503,7 @@ MODULE_DEVICE_TABLE(i2c, da9211_i2c_id);
static const struct of_device_id da9211_dt_ids[] = {
{ .compatible = "dlg,da9211", .data = &da9211_i2c_id[0] },
{ .compatible = "dlg,da9213", .data = &da9211_i2c_id[1] },
+ { .compatible = "dlg,da9215", .data = &da9211_i2c_id[2] },
{},
};
MODULE_DEVICE_TABLE(of, da9211_dt_ids);
@@ -494,7 +512,6 @@ MODULE_DEVICE_TABLE(of, da9211_dt_ids);
static struct i2c_driver da9211_regulator_driver = {
.driver = {
.name = "da9211",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(da9211_dt_ids),
},
.probe = da9211_i2c_probe,
@@ -504,5 +521,5 @@ static struct i2c_driver da9211_regulator_driver = {
module_i2c_driver(da9211_regulator_driver);
MODULE_AUTHOR("James Ban <James.Ban.opensource@diasemi.com>");
-MODULE_DESCRIPTION("Regulator device driver for Dialog DA9211/DA9213");
-MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Regulator device driver for Dialog DA9211/DA9213/DA9215");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/da9211-regulator.h b/drivers/regulator/da9211-regulator.h
index 93fa9df2721c..d6ad96fc64d3 100644
--- a/drivers/regulator/da9211-regulator.h
+++ b/drivers/regulator/da9211-regulator.h
@@ -1,16 +1,16 @@
/*
- * da9211-regulator.h - Regulator definitions for DA9211/DA9213
- * Copyright (C) 2014 Dialog Semiconductor Ltd.
+ * da9211-regulator.h - Regulator definitions for DA9211/DA9213/DA9215
+ * Copyright (C) 2015 Dialog Semiconductor Ltd.
*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#ifndef __DA9211_REGISTERS_H__
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 42865681c00b..4940e8287df6 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -439,6 +439,7 @@ static const struct i2c_device_id fan53555_id[] = {
},
{ },
};
+MODULE_DEVICE_TABLE(i2c, fan53555_id);
static struct i2c_driver fan53555_regulator_driver = {
.driver = {
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index 6e5da95fa025..4abd8e9c81e5 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -156,7 +156,6 @@ MODULE_DEVICE_TABLE(i2c, isl6271a_id);
static struct i2c_driver isl6271a_i2c_driver = {
.driver = {
.name = "isl6271a",
- .owner = THIS_MODULE,
},
.probe = isl6271a_probe,
.id_table = isl6271a_id,
diff --git a/drivers/regulator/isl9305.c b/drivers/regulator/isl9305.c
index 6e3a15fe00f1..257c1943e753 100644
--- a/drivers/regulator/isl9305.c
+++ b/drivers/regulator/isl9305.c
@@ -183,6 +183,7 @@ static const struct of_device_id isl9305_dt_ids[] = {
{ .compatible = "isil,isl9305h" },
{},
};
+MODULE_DEVICE_TABLE(of, isl9305_dt_ids);
#endif
static const struct i2c_device_id isl9305_i2c_id[] = {
@@ -195,7 +196,6 @@ MODULE_DEVICE_TABLE(i2c, isl9305_i2c_id);
static struct i2c_driver isl9305_regulator_driver = {
.driver = {
.name = "isl9305",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(isl9305_dt_ids),
},
.probe = isl9305_i2c_probe,
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 66fd2330dca0..15c25c622edf 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -452,7 +452,6 @@ MODULE_DEVICE_TABLE(i2c, lp3971_i2c_id);
static struct i2c_driver lp3971_i2c_driver = {
.driver = {
.name = "LP3971",
- .owner = THIS_MODULE,
},
.probe = lp3971_i2c_probe,
.id_table = lp3971_i2c_id,
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index aea485afcc1a..3a7e96e2c7b3 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -550,7 +550,6 @@ MODULE_DEVICE_TABLE(i2c, lp3972_i2c_id);
static struct i2c_driver lp3972_i2c_driver = {
.driver = {
.name = "lp3972",
- .owner = THIS_MODULE,
},
.probe = lp3972_i2c_probe,
.id_table = lp3972_i2c_id,
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index 3de328ab41f3..e5af07208f9d 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -849,7 +849,7 @@ static struct lp872x_platform_data
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
- goto out;
+ return ERR_PTR(-ENOMEM);
of_property_read_u8(np, "ti,general-config", &pdata->general_config);
if (of_find_property(np, "ti,update-config", NULL))
@@ -857,7 +857,7 @@ static struct lp872x_platform_data
pdata->dvs = devm_kzalloc(dev, sizeof(struct lp872x_dvs), GFP_KERNEL);
if (!pdata->dvs)
- goto out;
+ return ERR_PTR(-ENOMEM);
pdata->dvs->gpio = of_get_named_gpio(np, "ti,dvs-gpio", 0);
of_property_read_u8(np, "ti,dvs-vsel", (u8 *)&pdata->dvs->vsel);
@@ -903,15 +903,21 @@ static struct lp872x_platform_data
static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
{
struct lp872x *lp;
+ struct lp872x_platform_data *pdata;
int ret;
const int lp872x_num_regulators[] = {
[LP8720] = LP8720_NUM_REGULATORS,
[LP8725] = LP8725_NUM_REGULATORS,
};
- if (cl->dev.of_node)
- cl->dev.platform_data = lp872x_populate_pdata_from_dt(&cl->dev,
+ if (cl->dev.of_node) {
+ pdata = lp872x_populate_pdata_from_dt(&cl->dev,
(enum lp872x_id)id->driver_data);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ } else {
+ pdata = dev_get_platdata(&cl->dev);
+ }
lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL);
if (!lp)
@@ -927,7 +933,7 @@ static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
}
lp->dev = &cl->dev;
- lp->pdata = dev_get_platdata(&cl->dev);
+ lp->pdata = pdata;
lp->chipid = id->driver_data;
i2c_set_clientdata(cl, lp);
@@ -955,7 +961,6 @@ MODULE_DEVICE_TABLE(i2c, lp872x_ids);
static struct i2c_driver lp872x_driver = {
.driver = {
.name = "lp872x",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(lp872x_dt_ids),
},
.probe = lp872x_probe,
diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c
index 0ce8e4e0fa73..972c386b2690 100644
--- a/drivers/regulator/ltc3589.c
+++ b/drivers/regulator/ltc3589.c
@@ -378,7 +378,7 @@ static bool ltc3589_volatile_reg(struct device *dev, unsigned int reg)
return false;
}
-static struct reg_default ltc3589_reg_defaults[] = {
+static const struct reg_default ltc3589_reg_defaults[] = {
{ LTC3589_SCR1, 0x00 },
{ LTC3589_OVEN, 0x00 },
{ LTC3589_SCR2, 0x00 },
@@ -542,7 +542,6 @@ MODULE_DEVICE_TABLE(i2c, ltc3589_i2c_id);
static struct i2c_driver ltc3589_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
.probe = ltc3589_probe,
.id_table = ltc3589_i2c_id,
@@ -552,4 +551,3 @@ module_i2c_driver(ltc3589_driver);
MODULE_AUTHOR("Philipp Zabel <p.zabel@pengutronix.de>");
MODULE_DESCRIPTION("Regulator driver for Linear Technology LTC3589(-1,2)");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("i2c:ltc3589");
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index d2a8c64cae42..2c1228d5796a 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -304,7 +304,6 @@ static struct i2c_driver max1586_pmic_driver = {
.probe = max1586_pmic_probe,
.driver = {
.name = "max1586",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(max1586_of_match),
},
.id_table = max1586_id,
diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c
index 38722c8311a5..de730fd3f8a5 100644
--- a/drivers/regulator/max77693.c
+++ b/drivers/regulator/max77693.c
@@ -1,8 +1,9 @@
/*
- * max77693.c - Regulator driver for the Maxim 77693
+ * max77693.c - Regulator driver for the Maxim 77693 and 77843
*
- * Copyright (C) 2013 Samsung Electronics
+ * Copyright (C) 2013-2015 Samsung Electronics
* Jonghwa Lee <jonghwa3.lee@samsung.com>
+ * Krzysztof Kozlowski <k.kozlowski.k@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -29,38 +30,64 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77693-private.h>
+#include <linux/mfd/max77843-private.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regmap.h>
-#define CHGIN_ILIM_STEP_20mA 20000
+/*
+ * ID for MAX77843 regulators.
+ * There is no need for such for MAX77693.
+ */
+enum max77843_regulator_type {
+ MAX77843_SAFEOUT1 = 0,
+ MAX77843_SAFEOUT2,
+ MAX77843_CHARGER,
+
+ MAX77843_NUM,
+};
+
+/* Register differences between chargers: MAX77693 and MAX77843 */
+struct chg_reg_data {
+ unsigned int linear_reg;
+ unsigned int linear_mask;
+ unsigned int uA_step;
+ unsigned int min_sel;
+};
/*
- * CHARGER regulator - Min : 20mA, Max : 2580mA, step : 20mA
+ * MAX77693 CHARGER regulator - Min : 20mA, Max : 2580mA, step : 20mA
* 0x00, 0x01, 0x2, 0x03 = 60 mA
* 0x04 ~ 0x7E = (60 + (X - 3) * 20) mA
+ * Actually for MAX77693 the driver manipulates the maximum input current,
+ * not the fast charge current (output). This should be fixed.
+ *
+ * On MAX77843 the calculation formula is the same (except values).
+ * Fortunately it properly manipulates the fast charge current.
*/
static int max77693_chg_get_current_limit(struct regulator_dev *rdev)
{
+ const struct chg_reg_data *reg_data = rdev_get_drvdata(rdev);
unsigned int chg_min_uA = rdev->constraints->min_uA;
unsigned int chg_max_uA = rdev->constraints->max_uA;
unsigned int reg, sel;
unsigned int val;
int ret;
- ret = regmap_read(rdev->regmap, MAX77693_CHG_REG_CHG_CNFG_09, &reg);
+ ret = regmap_read(rdev->regmap, reg_data->linear_reg, &reg);
if (ret < 0)
return ret;
- sel = reg & CHG_CNFG_09_CHGIN_ILIM_MASK;
+ sel = reg & reg_data->linear_mask;
/* the first four codes for charger current are all 60mA */
- if (sel <= 3)
+ if (sel <= reg_data->min_sel)
sel = 0;
else
- sel -= 3;
+ sel -= reg_data->min_sel;
- val = chg_min_uA + CHGIN_ILIM_STEP_20mA * sel;
+ val = chg_min_uA + reg_data->uA_step * sel;
if (val > chg_max_uA)
return -EINVAL;
@@ -70,23 +97,43 @@ static int max77693_chg_get_current_limit(struct regulator_dev *rdev)
static int max77693_chg_set_current_limit(struct regulator_dev *rdev,
int min_uA, int max_uA)
{
+ const struct chg_reg_data *reg_data = rdev_get_drvdata(rdev);
unsigned int chg_min_uA = rdev->constraints->min_uA;
int sel = 0;
- while (chg_min_uA + CHGIN_ILIM_STEP_20mA * sel < min_uA)
+ while (chg_min_uA + reg_data->uA_step * sel < min_uA)
sel++;
- if (chg_min_uA + CHGIN_ILIM_STEP_20mA * sel > max_uA)
+ if (chg_min_uA + reg_data->uA_step * sel > max_uA)
return -EINVAL;
/* the first four codes for charger current are all 60mA */
- sel += 3;
+ sel += reg_data->min_sel;
- return regmap_write(rdev->regmap,
- MAX77693_CHG_REG_CHG_CNFG_09, sel);
+ return regmap_write(rdev->regmap, reg_data->linear_reg, sel);
}
/* end of CHARGER regulator ops */
+/* Returns regmap suitable for given regulator on chosen device */
+static struct regmap *max77693_get_regmap(enum max77693_types type,
+ struct max77693_dev *max77693,
+ int reg_id)
+{
+ if (type == TYPE_MAX77693)
+ return max77693->regmap;
+
+ /* Else: TYPE_MAX77843 */
+ switch (reg_id) {
+ case MAX77843_SAFEOUT1:
+ case MAX77843_SAFEOUT2:
+ return max77693->regmap;
+ case MAX77843_CHARGER:
+ return max77693->regmap_chg;
+ default:
+ return max77693->regmap;
+ }
+}
+
static const unsigned int max77693_safeout_table[] = {
4850000,
4900000,
@@ -111,7 +158,7 @@ static struct regulator_ops max77693_charger_ops = {
.set_current_limit = max77693_chg_set_current_limit,
};
-#define regulator_desc_esafeout(_num) { \
+#define max77693_regulator_desc_esafeout(_num) { \
.name = "ESAFEOUT"#_num, \
.id = MAX77693_ESAFEOUT##_num, \
.of_match = of_match_ptr("ESAFEOUT"#_num), \
@@ -127,9 +174,9 @@ static struct regulator_ops max77693_charger_ops = {
.enable_mask = SAFEOUT_CTRL_ENSAFEOUT##_num##_MASK , \
}
-static const struct regulator_desc regulators[] = {
- regulator_desc_esafeout(1),
- regulator_desc_esafeout(2),
+static const struct regulator_desc max77693_supported_regulators[] = {
+ max77693_regulator_desc_esafeout(1),
+ max77693_regulator_desc_esafeout(2),
{
.name = "CHARGER",
.id = MAX77693_CHARGER,
@@ -145,18 +192,86 @@ static const struct regulator_desc regulators[] = {
},
};
+static const struct chg_reg_data max77693_chg_reg_data = {
+ .linear_reg = MAX77693_CHG_REG_CHG_CNFG_09,
+ .linear_mask = CHG_CNFG_09_CHGIN_ILIM_MASK,
+ .uA_step = 20000,
+ .min_sel = 3,
+};
+
+#define max77843_regulator_desc_esafeout(num) { \
+ .name = "SAFEOUT" # num, \
+ .id = MAX77843_SAFEOUT ## num, \
+ .ops = &max77693_safeout_ops, \
+ .of_match = of_match_ptr("SAFEOUT" # num), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(max77693_safeout_table), \
+ .volt_table = max77693_safeout_table, \
+ .enable_reg = MAX77843_SYS_REG_SAFEOUTCTRL, \
+ .enable_mask = MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT ## num, \
+ .vsel_reg = MAX77843_SYS_REG_SAFEOUTCTRL, \
+ .vsel_mask = MAX77843_REG_SAFEOUTCTRL_SAFEOUT ## num ## _MASK, \
+}
+
+static const struct regulator_desc max77843_supported_regulators[] = {
+ [MAX77843_SAFEOUT1] = max77843_regulator_desc_esafeout(1),
+ [MAX77843_SAFEOUT2] = max77843_regulator_desc_esafeout(2),
+ [MAX77843_CHARGER] = {
+ .name = "CHARGER",
+ .id = MAX77843_CHARGER,
+ .ops = &max77693_charger_ops,
+ .of_match = of_match_ptr("CHARGER"),
+ .regulators_node = of_match_ptr("regulators"),
+ .type = REGULATOR_CURRENT,
+ .owner = THIS_MODULE,
+ .enable_reg = MAX77843_CHG_REG_CHG_CNFG_00,
+ .enable_mask = MAX77843_CHG_MASK,
+ .enable_val = MAX77843_CHG_MASK,
+ },
+};
+
+static const struct chg_reg_data max77843_chg_reg_data = {
+ .linear_reg = MAX77843_CHG_REG_CHG_CNFG_02,
+ .linear_mask = MAX77843_CHG_FAST_CHG_CURRENT_MASK,
+ .uA_step = MAX77843_CHG_FAST_CHG_CURRENT_STEP,
+ .min_sel = 2,
+};
+
static int max77693_pmic_probe(struct platform_device *pdev)
{
+ enum max77693_types type = platform_get_device_id(pdev)->driver_data;
struct max77693_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ const struct regulator_desc *regulators;
+ unsigned int regulators_size;
int i;
struct regulator_config config = { };
config.dev = iodev->dev;
- config.regmap = iodev->regmap;
- for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ switch (type) {
+ case TYPE_MAX77693:
+ regulators = max77693_supported_regulators;
+ regulators_size = ARRAY_SIZE(max77693_supported_regulators);
+ config.driver_data = (void *)&max77693_chg_reg_data;
+ break;
+ case TYPE_MAX77843:
+ regulators = max77843_supported_regulators;
+ regulators_size = ARRAY_SIZE(max77843_supported_regulators);
+ config.driver_data = (void *)&max77843_chg_reg_data;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported device type: %u\n", type);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < regulators_size; i++) {
struct regulator_dev *rdev;
+ config.regmap = max77693_get_regmap(type, iodev,
+ regulators[i].id);
+
rdev = devm_regulator_register(&pdev->dev,
&regulators[i], &config);
if (IS_ERR(rdev)) {
@@ -170,7 +285,8 @@ static int max77693_pmic_probe(struct platform_device *pdev)
}
static const struct platform_device_id max77693_pmic_id[] = {
- {"max77693-pmic", 0},
+ { "max77693-pmic", TYPE_MAX77693 },
+ { "max77843-regulator", TYPE_MAX77843 },
{},
};
@@ -184,8 +300,19 @@ static struct platform_driver max77693_pmic_driver = {
.id_table = max77693_pmic_id,
};
-module_platform_driver(max77693_pmic_driver);
+static int __init max77693_pmic_init(void)
+{
+ return platform_driver_register(&max77693_pmic_driver);
+}
+subsys_initcall(max77693_pmic_init);
+
+static void __exit max77693_pmic_cleanup(void)
+{
+ platform_driver_unregister(&max77693_pmic_driver);
+}
+module_exit(max77693_pmic_cleanup);
-MODULE_DESCRIPTION("MAXIM MAX77693 regulator driver");
+MODULE_DESCRIPTION("MAXIM 77693/77843 regulator driver");
MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski.k@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max77843.c b/drivers/regulator/max77843.c
deleted file mode 100644
index f4fd0d3cfa6e..000000000000
--- a/drivers/regulator/max77843.c
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * max77843.c - Regulator driver for the Maxim MAX77843
- *
- * Copyright (C) 2015 Samsung Electronics
- * Author: Jaewon Kim <jaewon02.kim@samsung.com>
- * Author: Beomho Seo <beomho.seo@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/machine.h>
-#include <linux/mfd/max77843-private.h>
-#include <linux/regulator/of_regulator.h>
-
-enum max77843_regulator_type {
- MAX77843_SAFEOUT1 = 0,
- MAX77843_SAFEOUT2,
- MAX77843_CHARGER,
-
- MAX77843_NUM,
-};
-
-static const unsigned int max77843_safeout_voltage_table[] = {
- 4850000,
- 4900000,
- 4950000,
- 3300000,
-};
-
-static int max77843_reg_get_current_limit(struct regulator_dev *rdev)
-{
- struct regmap *regmap = rdev->regmap;
- unsigned int chg_min_uA = rdev->constraints->min_uA;
- unsigned int chg_max_uA = rdev->constraints->max_uA;
- unsigned int val;
- int ret;
- unsigned int reg, sel;
-
- ret = regmap_read(regmap, MAX77843_CHG_REG_CHG_CNFG_02, &reg);
- if (ret) {
- dev_err(&rdev->dev, "Failed to read charger register\n");
- return ret;
- }
-
- sel = reg & MAX77843_CHG_FAST_CHG_CURRENT_MASK;
-
- if (sel < 0x03)
- sel = 0;
- else
- sel -= 2;
-
- val = chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel;
- if (val > chg_max_uA)
- return -EINVAL;
-
- return val;
-}
-
-static int max77843_reg_set_current_limit(struct regulator_dev *rdev,
- int min_uA, int max_uA)
-{
- struct regmap *regmap = rdev->regmap;
- unsigned int chg_min_uA = rdev->constraints->min_uA;
- int sel = 0;
-
- while (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel < min_uA)
- sel++;
-
- if (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel > max_uA)
- return -EINVAL;
-
- sel += 2;
-
- return regmap_write(regmap, MAX77843_CHG_REG_CHG_CNFG_02, sel);
-}
-
-static struct regulator_ops max77843_charger_ops = {
- .is_enabled = regulator_is_enabled_regmap,
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .get_current_limit = max77843_reg_get_current_limit,
- .set_current_limit = max77843_reg_set_current_limit,
-};
-
-static struct regulator_ops max77843_regulator_ops = {
- .is_enabled = regulator_is_enabled_regmap,
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .list_voltage = regulator_list_voltage_table,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
-};
-
-#define MAX77843_SAFEOUT(num) { \
- .name = "SAFEOUT" # num, \
- .id = MAX77843_SAFEOUT ## num, \
- .ops = &max77843_regulator_ops, \
- .of_match = of_match_ptr("SAFEOUT" # num), \
- .regulators_node = of_match_ptr("regulators"), \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- .n_voltages = ARRAY_SIZE(max77843_safeout_voltage_table), \
- .volt_table = max77843_safeout_voltage_table, \
- .enable_reg = MAX77843_SYS_REG_SAFEOUTCTRL, \
- .enable_mask = MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT ## num, \
- .vsel_reg = MAX77843_SYS_REG_SAFEOUTCTRL, \
- .vsel_mask = MAX77843_REG_SAFEOUTCTRL_SAFEOUT ## num ## _MASK, \
-}
-
-static const struct regulator_desc max77843_supported_regulators[] = {
- [MAX77843_SAFEOUT1] = MAX77843_SAFEOUT(1),
- [MAX77843_SAFEOUT2] = MAX77843_SAFEOUT(2),
- [MAX77843_CHARGER] = {
- .name = "CHARGER",
- .id = MAX77843_CHARGER,
- .ops = &max77843_charger_ops,
- .of_match = of_match_ptr("CHARGER"),
- .regulators_node = of_match_ptr("regulators"),
- .type = REGULATOR_CURRENT,
- .owner = THIS_MODULE,
- .enable_reg = MAX77843_CHG_REG_CHG_CNFG_00,
- .enable_mask = MAX77843_CHG_MASK | MAX77843_CHG_BUCK_MASK,
- .enable_val = MAX77843_CHG_MASK | MAX77843_CHG_BUCK_MASK,
- },
-};
-
-static struct regmap *max77843_get_regmap(struct max77843 *max77843, int reg_id)
-{
- switch (reg_id) {
- case MAX77843_SAFEOUT1:
- case MAX77843_SAFEOUT2:
- return max77843->regmap;
- case MAX77843_CHARGER:
- return max77843->regmap_chg;
- default:
- return max77843->regmap;
- }
-}
-
-static int max77843_regulator_probe(struct platform_device *pdev)
-{
- struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent);
- struct regulator_config config = {};
- int i;
-
- config.dev = max77843->dev;
- config.driver_data = max77843;
-
- for (i = 0; i < ARRAY_SIZE(max77843_supported_regulators); i++) {
- struct regulator_dev *regulator;
-
- config.regmap = max77843_get_regmap(max77843,
- max77843_supported_regulators[i].id);
-
- regulator = devm_regulator_register(&pdev->dev,
- &max77843_supported_regulators[i], &config);
- if (IS_ERR(regulator)) {
- dev_err(&pdev->dev,
- "Failed to regiser regulator-%d\n", i);
- return PTR_ERR(regulator);
- }
- }
-
- return 0;
-}
-
-static const struct platform_device_id max77843_regulator_id[] = {
- { "max77843-regulator", },
- { /* sentinel */ },
-};
-
-static struct platform_driver max77843_regulator_driver = {
- .driver = {
- .name = "max77843-regulator",
- },
- .probe = max77843_regulator_probe,
- .id_table = max77843_regulator_id,
-};
-
-static int __init max77843_regulator_init(void)
-{
- return platform_driver_register(&max77843_regulator_driver);
-}
-subsys_initcall(max77843_regulator_init);
-
-static void __exit max77843_regulator_exit(void)
-{
- platform_driver_unregister(&max77843_regulator_driver);
-}
-module_exit(max77843_regulator_exit);
-
-MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
-MODULE_AUTHOR("Beomho Seo <beomho.seo@samsung.com>");
-MODULE_DESCRIPTION("Maxim MAX77843 regulator driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index 4071d74fa828..b87f62dd484e 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -518,7 +518,6 @@ static struct i2c_driver max8660_driver = {
.probe = max8660_probe,
.driver = {
.name = "max8660",
- .owner = THIS_MODULE,
},
.id_table = max8660_id,
};
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 6f2bdad8b4d8..5b75b7c2e3ea 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -75,6 +75,7 @@
#define MAX8973_DISCH_ENBABLE BIT(5)
#define MAX8973_FT_ENABLE BIT(4)
+#define MAX8973_CKKADV_TRIP_MASK 0xC
#define MAX8973_CKKADV_TRIP_DISABLE 0xC
#define MAX8973_CKKADV_TRIP_75mV_PER_US 0x0
#define MAX8973_CKKADV_TRIP_150mV_PER_US 0x4
@@ -282,6 +283,55 @@ static int max8973_set_ramp_delay(struct regulator_dev *rdev,
return ret;
}
+static int max8973_set_current_limit(struct regulator_dev *rdev,
+ int min_ua, int max_ua)
+{
+ struct max8973_chip *max = rdev_get_drvdata(rdev);
+ unsigned int val;
+ int ret;
+
+ if (max_ua <= 9000000)
+ val = MAX8973_CKKADV_TRIP_75mV_PER_US;
+ else if (max_ua <= 12000000)
+ val = MAX8973_CKKADV_TRIP_150mV_PER_US;
+ else
+ val = MAX8973_CKKADV_TRIP_DISABLE;
+
+ ret = regmap_update_bits(max->regmap, MAX8973_CONTROL2,
+ MAX8973_CKKADV_TRIP_MASK, val);
+ if (ret < 0) {
+ dev_err(max->dev, "register %d update failed: %d\n",
+ MAX8973_CONTROL2, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int max8973_get_current_limit(struct regulator_dev *rdev)
+{
+ struct max8973_chip *max = rdev_get_drvdata(rdev);
+ unsigned int control2;
+ int ret;
+
+ ret = regmap_read(max->regmap, MAX8973_CONTROL2, &control2);
+ if (ret < 0) {
+ dev_err(max->dev, "register %d read failed: %d\n",
+ MAX8973_CONTROL2, ret);
+ return ret;
+ }
+ switch (control2 & MAX8973_CKKADV_TRIP_MASK) {
+ case MAX8973_CKKADV_TRIP_DISABLE:
+ return 15000000;
+ case MAX8973_CKKADV_TRIP_150mV_PER_US:
+ return 12000000;
+ case MAX8973_CKKADV_TRIP_75mV_PER_US:
+ return 9000000;
+ default:
+ break;
+ }
+ return 9000000;
+}
+
static const struct regulator_ops max8973_dcdc_ops = {
.get_voltage_sel = max8973_dcdc_get_voltage_sel,
.set_voltage_sel = max8973_dcdc_set_voltage_sel,
@@ -421,6 +471,8 @@ static struct max8973_regulator_platform_data *max8973_parse_dt(
struct device_node *np = dev->of_node;
int ret;
u32 pval;
+ bool etr_enable;
+ bool etr_sensitivity_high;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -450,7 +502,24 @@ static struct max8973_regulator_platform_data *max8973_parse_dt(
pdata->control_flags |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE;
if (of_property_read_bool(np, "maxim,enable-bias-control"))
- pdata->control_flags |= MAX8973_BIAS_ENABLE;
+ pdata->control_flags |= MAX8973_CONTROL_BIAS_ENABLE;
+
+ etr_enable = of_property_read_bool(np, "maxim,enable-etr");
+ etr_sensitivity_high = of_property_read_bool(np,
+ "maxim,enable-high-etr-sensitivity");
+ if (etr_sensitivity_high)
+ etr_enable = true;
+
+ if (etr_enable) {
+ if (etr_sensitivity_high)
+ pdata->control_flags |=
+ MAX8973_CONTROL_CLKADV_TRIP_75mV_PER_US;
+ else
+ pdata->control_flags |=
+ MAX8973_CONTROL_CLKADV_TRIP_150mV_PER_US;
+ } else {
+ pdata->control_flags |= MAX8973_CONTROL_CLKADV_TRIP_DISABLED;
+ }
return pdata;
}
@@ -568,6 +637,15 @@ static int max8973_probe(struct i2c_client *client,
max->lru_index[i] = i;
max->lru_index[0] = max->curr_vout_reg;
max->lru_index[max->curr_vout_reg] = 0;
+ } else {
+ /*
+ * If there is no DVS GPIO, the VOUT register
+ * address is fixed.
+ */
+ max->ops.set_voltage_sel = regulator_set_voltage_sel_regmap;
+ max->ops.get_voltage_sel = regulator_get_voltage_sel_regmap;
+ max->desc.vsel_reg = max->curr_vout_reg;
+ max->desc.vsel_mask = MAX8973_VOUT_MASK;
}
if (pdata_from_dt)
@@ -613,6 +691,8 @@ static int max8973_probe(struct i2c_client *client,
max->ops.enable = regulator_enable_regmap;
max->ops.disable = regulator_disable_regmap;
max->ops.is_enabled = regulator_is_enabled_regmap;
+ max->ops.set_current_limit = max8973_set_current_limit;
+ max->ops.get_current_limit = max8973_get_current_limit;
break;
default:
break;
@@ -652,7 +732,6 @@ static struct i2c_driver max8973_i2c_driver = {
.driver = {
.name = "max8973",
.of_match_table = of_max8973_match_tbl,
- .owner = THIS_MODULE,
},
.probe = max8973_probe,
.id_table = max8973_id,
diff --git a/drivers/regulator/mt6311-regulator.c b/drivers/regulator/mt6311-regulator.c
new file mode 100644
index 000000000000..02c4e5feca8e
--- /dev/null
+++ b/drivers/regulator/mt6311-regulator.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Henry Chen <henryc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/mt6311.h>
+#include <linux/slab.h>
+#include "mt6311-regulator.h"
+
+static const struct regmap_config mt6311_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MT6311_FQMTR_CON4,
+};
+
+/* Default limits measured in millivolts and milliamps */
+#define MT6311_MIN_UV 600000
+#define MT6311_MAX_UV 1393750
+#define MT6311_STEP_UV 6250
+
+static const struct regulator_linear_range buck_volt_range[] = {
+ REGULATOR_LINEAR_RANGE(MT6311_MIN_UV, 0, 0x7f, MT6311_STEP_UV),
+};
+
+static const struct regulator_ops mt6311_buck_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static const struct regulator_ops mt6311_ldo_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+#define MT6311_BUCK(_id) \
+{\
+ .name = #_id,\
+ .ops = &mt6311_buck_ops,\
+ .of_match = of_match_ptr(#_id),\
+ .regulators_node = of_match_ptr("regulators"),\
+ .type = REGULATOR_VOLTAGE,\
+ .id = MT6311_ID_##_id,\
+ .n_voltages = (MT6311_MAX_UV - MT6311_MIN_UV) / MT6311_STEP_UV + 1,\
+ .min_uV = MT6311_MIN_UV,\
+ .uV_step = MT6311_STEP_UV,\
+ .owner = THIS_MODULE,\
+ .linear_ranges = buck_volt_range, \
+ .n_linear_ranges = ARRAY_SIZE(buck_volt_range), \
+ .enable_reg = MT6311_VDVFS11_CON9,\
+ .enable_mask = MT6311_PMIC_VDVFS11_EN_MASK,\
+ .vsel_reg = MT6311_VDVFS11_CON12,\
+ .vsel_mask = MT6311_PMIC_VDVFS11_VOSEL_MASK,\
+}
+
+#define MT6311_LDO(_id) \
+{\
+ .name = #_id,\
+ .ops = &mt6311_ldo_ops,\
+ .of_match = of_match_ptr(#_id),\
+ .regulators_node = of_match_ptr("regulators"),\
+ .type = REGULATOR_VOLTAGE,\
+ .id = MT6311_ID_##_id,\
+ .owner = THIS_MODULE,\
+ .enable_reg = MT6311_LDO_CON3,\
+ .enable_mask = MT6311_PMIC_RG_VBIASN_EN_MASK,\
+}
+
+static const struct regulator_desc mt6311_regulators[] = {
+ MT6311_BUCK(VDVFS),
+ MT6311_LDO(VBIASN),
+};
+
+/*
+ * I2C driver interface functions
+ */
+static int mt6311_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ int i, ret;
+ unsigned int data;
+
+ regmap = devm_regmap_init_i2c(i2c, &mt6311_regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = regmap_read(regmap, MT6311_SWCID, &data);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Failed to read DEVICE_ID reg: %d\n", ret);
+ return ret;
+ }
+
+ switch (data) {
+ case MT6311_E1_CID_CODE:
+ case MT6311_E2_CID_CODE:
+ case MT6311_E3_CID_CODE:
+ break;
+ default:
+ dev_err(&i2c->dev, "Unsupported device id = 0x%x.\n", data);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < MT6311_MAX_REGULATORS; i++) {
+ config.dev = &i2c->dev;
+ config.regmap = regmap;
+
+ rdev = devm_regulator_register(&i2c->dev,
+ &mt6311_regulators[i], &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&i2c->dev,
+ "Failed to register MT6311 regulator\n");
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id mt6311_i2c_id[] = {
+ {"mt6311", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, mt6311_i2c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id mt6311_dt_ids[] = {
+ { .compatible = "mediatek,mt6311-regulator",
+ .data = &mt6311_i2c_id[0] },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mt6311_dt_ids);
+#endif
+
+static struct i2c_driver mt6311_regulator_driver = {
+ .driver = {
+ .name = "mt6311",
+ .of_match_table = of_match_ptr(mt6311_dt_ids),
+ },
+ .probe = mt6311_i2c_probe,
+ .id_table = mt6311_i2c_id,
+};
+
+module_i2c_driver(mt6311_regulator_driver);
+
+MODULE_AUTHOR("Henry Chen <henryc.chen@mediatek.com>");
+MODULE_DESCRIPTION("Regulator device driver for Mediatek MT6311");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/mt6311-regulator.h b/drivers/regulator/mt6311-regulator.h
new file mode 100644
index 000000000000..5218db46a798
--- /dev/null
+++ b/drivers/regulator/mt6311-regulator.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Henry Chen <henryc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT6311_REGULATOR_H__
+#define __MT6311_REGULATOR_H__
+
+#define MT6311_SWCID 0x01
+
+#define MT6311_TOP_INT_CON 0x18
+#define MT6311_TOP_INT_MON 0x19
+
+#define MT6311_VDVFS11_CON0 0x87
+#define MT6311_VDVFS11_CON7 0x88
+#define MT6311_VDVFS11_CON8 0x89
+#define MT6311_VDVFS11_CON9 0x8A
+#define MT6311_VDVFS11_CON10 0x8B
+#define MT6311_VDVFS11_CON11 0x8C
+#define MT6311_VDVFS11_CON12 0x8D
+#define MT6311_VDVFS11_CON13 0x8E
+#define MT6311_VDVFS11_CON14 0x8F
+#define MT6311_VDVFS11_CON15 0x90
+#define MT6311_VDVFS11_CON16 0x91
+#define MT6311_VDVFS11_CON17 0x92
+#define MT6311_VDVFS11_CON18 0x93
+#define MT6311_VDVFS11_CON19 0x94
+
+#define MT6311_LDO_CON0 0xCC
+#define MT6311_LDO_OCFB0 0xCD
+#define MT6311_LDO_CON2 0xCE
+#define MT6311_LDO_CON3 0xCF
+#define MT6311_LDO_CON4 0xD0
+#define MT6311_FQMTR_CON0 0xD1
+#define MT6311_FQMTR_CON1 0xD2
+#define MT6311_FQMTR_CON2 0xD3
+#define MT6311_FQMTR_CON3 0xD4
+#define MT6311_FQMTR_CON4 0xD5
+
+#define MT6311_PMIC_RG_INT_POL_MASK 0x1
+#define MT6311_PMIC_RG_INT_EN_MASK 0x2
+#define MT6311_PMIC_RG_BUCK_OC_INT_STATUS_MASK 0x10
+
+#define MT6311_PMIC_VDVFS11_EN_CTRL_MASK 0x1
+#define MT6311_PMIC_VDVFS11_VOSEL_CTRL_MASK 0x2
+#define MT6311_PMIC_VDVFS11_EN_SEL_MASK 0x3
+#define MT6311_PMIC_VDVFS11_VOSEL_SEL_MASK 0xc
+#define MT6311_PMIC_VDVFS11_EN_MASK 0x1
+#define MT6311_PMIC_VDVFS11_VOSEL_MASK 0x7F
+#define MT6311_PMIC_VDVFS11_VOSEL_ON_MASK 0x7F
+#define MT6311_PMIC_VDVFS11_VOSEL_SLEEP_MASK 0x7F
+#define MT6311_PMIC_NI_VDVFS11_VOSEL_MASK 0x7F
+
+#define MT6311_PMIC_RG_VBIASN_EN_MASK 0x1
+
+#endif
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index b1c485b24ab2..250700c853bf 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -107,6 +107,9 @@ static void of_get_regulation_constraints(struct device_node *np,
if (!of_property_read_u32(np, "regulator-system-load", &pval))
constraints->system_load = pval;
+ constraints->over_current_protection = of_property_read_bool(np,
+ "regulator-over-current-protection");
+
for (i = 0; i < ARRAY_SIZE(regulator_states); i++) {
switch (i) {
case PM_SUSPEND_MEM:
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index bd2b75c0d1d1..4fa7bcaf454e 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -30,6 +30,7 @@
struct pbias_reg_info {
u32 enable;
u32 enable_mask;
+ u32 disable_val;
u32 vmode;
unsigned int enable_time;
char *name;
@@ -62,6 +63,7 @@ static const struct pbias_reg_info pbias_mmc_omap2430 = {
.enable = BIT(1),
.enable_mask = BIT(1),
.vmode = BIT(0),
+ .disable_val = 0,
.enable_time = 100,
.name = "pbias_mmc_omap2430"
};
@@ -77,6 +79,7 @@ static const struct pbias_reg_info pbias_sim_omap3 = {
static const struct pbias_reg_info pbias_mmc_omap4 = {
.enable = BIT(26) | BIT(22),
.enable_mask = BIT(26) | BIT(25) | BIT(22),
+ .disable_val = BIT(25),
.vmode = BIT(21),
.enable_time = 100,
.name = "pbias_mmc_omap4"
@@ -85,6 +88,7 @@ static const struct pbias_reg_info pbias_mmc_omap4 = {
static const struct pbias_reg_info pbias_mmc_omap5 = {
.enable = BIT(27) | BIT(26),
.enable_mask = BIT(27) | BIT(25) | BIT(26),
+ .disable_val = BIT(25),
.vmode = BIT(21),
.enable_time = 100,
.name = "pbias_mmc_omap5"
@@ -159,6 +163,7 @@ static int pbias_regulator_probe(struct platform_device *pdev)
drvdata[data_idx].desc.enable_reg = res->start;
drvdata[data_idx].desc.enable_mask = info->enable_mask;
drvdata[data_idx].desc.enable_val = info->enable;
+ drvdata[data_idx].desc.disable_val = info->disable_val;
cfg.init_data = pbias_matches[idx].init_data;
cfg.driver_data = &drvdata[data_idx];
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 8cc8d1877c44..2a44e5dd9c2a 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -643,7 +643,6 @@ static struct i2c_driver pfuze_driver = {
.id_table = pfuze_device_id,
.driver = {
.name = "pfuze100-regulator",
- .owner = THIS_MODULE,
.of_match_table = pfuze_dt_ids,
},
.probe = pfuze100_regulator_probe,
@@ -653,4 +652,3 @@ module_i2c_driver(pfuze_driver);
MODULE_AUTHOR("Robin Gong <b38343@freescale.com>");
MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100/PFUZE200 PMIC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("i2c:pfuze100-regulator");
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index ffa96124a5e7..fc3166dfcbfa 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -10,6 +10,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/delay.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
@@ -21,9 +22,15 @@
#include <linux/pwm.h>
struct pwm_regulator_data {
- struct pwm_voltages *duty_cycle_table;
+ /* Shared */
struct pwm_device *pwm;
+
+ /* Voltage table */
+ struct pwm_voltages *duty_cycle_table;
int state;
+
+ /* Continuous voltage */
+ int volt_uV;
};
struct pwm_voltages {
@@ -31,6 +38,9 @@ struct pwm_voltages {
unsigned int dutycycle;
};
+/**
+ * Voltage table call-backs
+ */
static int pwm_regulator_get_voltage_sel(struct regulator_dev *rdev)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
@@ -79,29 +89,129 @@ static int pwm_regulator_list_voltage(struct regulator_dev *rdev,
return drvdata->duty_cycle_table[selector].uV;
}
-static struct regulator_ops pwm_regulator_voltage_ops = {
+/**
+ * Continuous voltage call-backs
+ */
+static int pwm_voltage_to_duty_cycle_percentage(struct regulator_dev *rdev, int req_uV)
+{
+ int min_uV = rdev->constraints->min_uV;
+ int max_uV = rdev->constraints->max_uV;
+ int diff = max_uV - min_uV;
+
+ return 100 - (((req_uV * 100) - (min_uV * 100)) / diff);
+}
+
+static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
+
+ return drvdata->volt_uV;
+}
+
+static int pwm_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
+ unsigned int ramp_delay = rdev->constraints->ramp_delay;
+ unsigned int period = pwm_get_period(drvdata->pwm);
+ int duty_cycle;
+ int ret;
+
+ duty_cycle = pwm_voltage_to_duty_cycle_percentage(rdev, min_uV);
+
+ ret = pwm_config(drvdata->pwm, (period / 100) * duty_cycle, period);
+ if (ret) {
+ dev_err(&rdev->dev, "Failed to configure PWM\n");
+ return ret;
+ }
+
+ ret = pwm_enable(drvdata->pwm);
+ if (ret) {
+ dev_err(&rdev->dev, "Failed to enable PWM\n");
+ return ret;
+ }
+ drvdata->volt_uV = min_uV;
+
+ /* Delay required by PWM regulator to settle to the new voltage */
+ usleep_range(ramp_delay, ramp_delay + 1000);
+
+ return 0;
+}
+
+static struct regulator_ops pwm_regulator_voltage_table_ops = {
.set_voltage_sel = pwm_regulator_set_voltage_sel,
.get_voltage_sel = pwm_regulator_get_voltage_sel,
.list_voltage = pwm_regulator_list_voltage,
.map_voltage = regulator_map_voltage_iterate,
};
+static struct regulator_ops pwm_regulator_voltage_continuous_ops = {
+ .get_voltage = pwm_regulator_get_voltage,
+ .set_voltage = pwm_regulator_set_voltage,
+};
+
static struct regulator_desc pwm_regulator_desc = {
.name = "pwm-regulator",
- .ops = &pwm_regulator_voltage_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.supply_name = "pwm",
};
+static int pwm_regulator_init_table(struct platform_device *pdev,
+ struct pwm_regulator_data *drvdata)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct pwm_voltages *duty_cycle_table;
+ unsigned int length = 0;
+ int ret;
+
+ of_find_property(np, "voltage-table", &length);
+
+ if ((length < sizeof(*duty_cycle_table)) ||
+ (length % sizeof(*duty_cycle_table))) {
+ dev_err(&pdev->dev,
+ "voltage-table length(%d) is invalid\n",
+ length);
+ return -EINVAL;
+ }
+
+ duty_cycle_table = devm_kzalloc(&pdev->dev, length, GFP_KERNEL);
+ if (!duty_cycle_table)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(np, "voltage-table",
+ (u32 *)duty_cycle_table,
+ length / sizeof(u32));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read voltage-table\n");
+ return ret;
+ }
+
+ drvdata->duty_cycle_table = duty_cycle_table;
+ pwm_regulator_desc.ops = &pwm_regulator_voltage_table_ops;
+ pwm_regulator_desc.n_voltages = length / sizeof(*duty_cycle_table);
+
+ return 0;
+}
+
+static int pwm_regulator_init_continuous(struct platform_device *pdev,
+ struct pwm_regulator_data *drvdata)
+{
+ pwm_regulator_desc.ops = &pwm_regulator_voltage_continuous_ops;
+ pwm_regulator_desc.continuous_voltage_range = true;
+
+ return 0;
+}
+
static int pwm_regulator_probe(struct platform_device *pdev)
{
+ const struct regulator_init_data *init_data;
struct pwm_regulator_data *drvdata;
- struct property *prop;
struct regulator_dev *regulator;
struct regulator_config config = { };
struct device_node *np = pdev->dev.of_node;
- int length, ret;
+ int ret;
if (!np) {
dev_err(&pdev->dev, "Device Tree node missing\n");
@@ -112,44 +222,22 @@ static int pwm_regulator_probe(struct platform_device *pdev)
if (!drvdata)
return -ENOMEM;
- /* determine the number of voltage-table */
- prop = of_find_property(np, "voltage-table", &length);
- if (!prop) {
- dev_err(&pdev->dev, "No voltage-table\n");
- return -EINVAL;
- }
-
- if ((length < sizeof(*drvdata->duty_cycle_table)) ||
- (length % sizeof(*drvdata->duty_cycle_table))) {
- dev_err(&pdev->dev, "voltage-table length(%d) is invalid\n",
- length);
- return -EINVAL;
- }
-
- pwm_regulator_desc.n_voltages = length / sizeof(*drvdata->duty_cycle_table);
-
- drvdata->duty_cycle_table = devm_kzalloc(&pdev->dev,
- length, GFP_KERNEL);
- if (!drvdata->duty_cycle_table)
- return -ENOMEM;
-
- /* read voltage table from DT property */
- ret = of_property_read_u32_array(np, "voltage-table",
- (u32 *)drvdata->duty_cycle_table,
- length / sizeof(u32));
- if (ret < 0) {
- dev_err(&pdev->dev, "read voltage-table failed\n");
+ if (of_find_property(np, "voltage-table", NULL))
+ ret = pwm_regulator_init_table(pdev, drvdata);
+ else
+ ret = pwm_regulator_init_continuous(pdev, drvdata);
+ if (ret)
return ret;
- }
- config.init_data = of_get_regulator_init_data(&pdev->dev, np,
- &pwm_regulator_desc);
- if (!config.init_data)
+ init_data = of_get_regulator_init_data(&pdev->dev, np,
+ &pwm_regulator_desc);
+ if (!init_data)
return -ENOMEM;
config.of_node = np;
config.dev = &pdev->dev;
config.driver_data = drvdata;
+ config.init_data = init_data;
drvdata->pwm = devm_pwm_get(&pdev->dev, NULL);
if (IS_ERR(drvdata->pwm)) {
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
new file mode 100644
index 000000000000..9c6167dd2c8b
--- /dev/null
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+struct qcom_rpm_reg {
+ struct device *dev;
+
+ struct qcom_smd_rpm *rpm;
+
+ u32 type;
+ u32 id;
+
+ struct regulator_desc desc;
+
+ int is_enabled;
+ int uV;
+};
+
+struct rpm_regulator_req {
+ u32 key;
+ u32 nbytes;
+ u32 value;
+};
+
+#define RPM_KEY_SWEN 0x6e657773 /* "swen" */
+#define RPM_KEY_UV 0x00007675 /* "uv" */
+#define RPM_KEY_MA 0x0000616d /* "ma" */
+
+static int rpm_reg_write_active(struct qcom_rpm_reg *vreg,
+ struct rpm_regulator_req *req,
+ size_t size)
+{
+ return qcom_rpm_smd_write(vreg->rpm,
+ QCOM_SMD_RPM_ACTIVE_STATE,
+ vreg->type,
+ vreg->id,
+ req, size);
+}
+
+static int rpm_reg_enable(struct regulator_dev *rdev)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ struct rpm_regulator_req req;
+ int ret;
+
+ req.key = RPM_KEY_SWEN;
+ req.nbytes = sizeof(u32);
+ req.value = 1;
+
+ ret = rpm_reg_write_active(vreg, &req, sizeof(req));
+ if (!ret)
+ vreg->is_enabled = 1;
+
+ return ret;
+}
+
+static int rpm_reg_is_enabled(struct regulator_dev *rdev)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->is_enabled;
+}
+
+static int rpm_reg_disable(struct regulator_dev *rdev)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ struct rpm_regulator_req req;
+ int ret;
+
+ req.key = RPM_KEY_SWEN;
+ req.nbytes = sizeof(u32);
+ req.value = 0;
+
+ ret = rpm_reg_write_active(vreg, &req, sizeof(req));
+ if (!ret)
+ vreg->is_enabled = 0;
+
+ return ret;
+}
+
+static int rpm_reg_get_voltage(struct regulator_dev *rdev)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->uV;
+}
+
+static int rpm_reg_set_voltage(struct regulator_dev *rdev,
+ int min_uV,
+ int max_uV,
+ unsigned *selector)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ struct rpm_regulator_req req;
+ int ret = 0;
+
+ req.key = RPM_KEY_UV;
+ req.nbytes = sizeof(u32);
+ req.value = min_uV;
+
+ ret = rpm_reg_write_active(vreg, &req, sizeof(req));
+ if (!ret)
+ vreg->uV = min_uV;
+
+ return ret;
+}
+
+static int rpm_reg_set_load(struct regulator_dev *rdev, int load_uA)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ struct rpm_regulator_req req;
+
+ req.key = RPM_KEY_MA;
+ req.nbytes = sizeof(u32);
+ req.value = load_uA;
+
+ return rpm_reg_write_active(vreg, &req, sizeof(req));
+}
+
+static const struct regulator_ops rpm_smps_ldo_ops = {
+ .enable = rpm_reg_enable,
+ .disable = rpm_reg_disable,
+ .is_enabled = rpm_reg_is_enabled,
+
+ .get_voltage = rpm_reg_get_voltage,
+ .set_voltage = rpm_reg_set_voltage,
+
+ .set_load = rpm_reg_set_load,
+};
+
+static const struct regulator_ops rpm_switch_ops = {
+ .enable = rpm_reg_enable,
+ .disable = rpm_reg_disable,
+ .is_enabled = rpm_reg_is_enabled,
+};
+
+static const struct regulator_desc pm8x41_hfsmps = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE( 375000, 0, 95, 12500),
+ REGULATOR_LINEAR_RANGE(1550000, 96, 158, 25000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 159,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8841_ftsmps = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000),
+ REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 340,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8941_boost = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(4000000, 0, 15, 100000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 16,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8941_pldo = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE( 750000, 0, 30, 25000),
+ REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 100,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8941_nldo = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(750000, 0, 63, 12500),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 64,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8941_lnldo = {
+ .fixed_uV = 1740000,
+ .n_voltages = 1,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8941_switch = {
+ .ops = &rpm_switch_ops,
+};
+
+struct rpm_regulator_data {
+ const char *name;
+ u32 type;
+ u32 id;
+ const struct regulator_desc *desc;
+ const char *supply;
+};
+
+static const struct rpm_regulator_data rpm_pm8841_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPB, 1, &pm8x41_hfsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPB, 2, &pm8841_ftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPB, 3, &pm8x41_hfsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPB, 4, &pm8841_ftsmps, "vdd_s4" },
+ { "s5", QCOM_SMD_RPM_SMPB, 5, &pm8841_ftsmps, "vdd_s5" },
+ { "s6", QCOM_SMD_RPM_SMPB, 6, &pm8841_ftsmps, "vdd_s6" },
+ { "s7", QCOM_SMD_RPM_SMPB, 7, &pm8841_ftsmps, "vdd_s7" },
+ { "s8", QCOM_SMD_RPM_SMPB, 8, &pm8841_ftsmps, "vdd_s8" },
+ {}
+};
+
+static const struct rpm_regulator_data rpm_pm8941_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8x41_hfsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8x41_hfsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8x41_hfsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_BOOST, 1, &pm8941_boost },
+
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8941_nldo, "vdd_l1_l3" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8941_nldo, "vdd_l2_lvs1_2_3" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8941_nldo, "vdd_l1_l3" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8941_nldo, "vdd_l4_l11" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8941_lnldo, "vdd_l5_l7" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8941_pldo, "vdd_l6_l12_l14_l15" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8941_lnldo, "vdd_l5_l7" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8941_pldo, "vdd_l8_l16_l18_l19" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8941_pldo, "vdd_l9_l10_l17_l22" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8941_pldo, "vdd_l9_l10_l17_l22" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8941_nldo, "vdd_l4_l11" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8941_pldo, "vdd_l6_l12_l14_l15" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8941_pldo, "vdd_l13_l20_l23_l24" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8941_pldo, "vdd_l6_l12_l14_l15" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8941_pldo, "vdd_l6_l12_l14_l15" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8941_pldo, "vdd_l8_l16_l18_l19" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8941_pldo, "vdd_l9_l10_l17_l22" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8941_pldo, "vdd_l8_l16_l18_l19" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm8941_pldo, "vdd_l8_l16_l18_l19" },
+ { "l20", QCOM_SMD_RPM_LDOA, 20, &pm8941_pldo, "vdd_l13_l20_l23_l24" },
+ { "l21", QCOM_SMD_RPM_LDOA, 21, &pm8941_pldo, "vdd_l21" },
+ { "l22", QCOM_SMD_RPM_LDOA, 22, &pm8941_pldo, "vdd_l9_l10_l17_l22" },
+ { "l23", QCOM_SMD_RPM_LDOA, 23, &pm8941_pldo, "vdd_l13_l20_l23_l24" },
+ { "l24", QCOM_SMD_RPM_LDOA, 24, &pm8941_pldo, "vdd_l13_l20_l23_l24" },
+
+ { "lvs1", QCOM_SMD_RPM_VSA, 1, &pm8941_switch, "vdd_l2_lvs1_2_3" },
+ { "lvs2", QCOM_SMD_RPM_VSA, 2, &pm8941_switch, "vdd_l2_lvs1_2_3" },
+ { "lvs3", QCOM_SMD_RPM_VSA, 3, &pm8941_switch, "vdd_l2_lvs1_2_3" },
+
+ { "5vs1", QCOM_SMD_RPM_VSA, 4, &pm8941_switch, "vin_5vs" },
+ { "5vs2", QCOM_SMD_RPM_VSA, 5, &pm8941_switch, "vin_5vs" },
+
+ {}
+};
+
+static const struct of_device_id rpm_of_match[] = {
+ { .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
+ { .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rpm_of_match);
+
+static int rpm_reg_probe(struct platform_device *pdev)
+{
+ const struct rpm_regulator_data *reg;
+ const struct of_device_id *match;
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ struct qcom_rpm_reg *vreg;
+ struct qcom_smd_rpm *rpm;
+
+ rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpm) {
+ dev_err(&pdev->dev, "unable to retrieve handle to rpm\n");
+ return -ENODEV;
+ }
+
+ match = of_match_device(rpm_of_match, &pdev->dev);
+ for (reg = match->data; reg->name; reg++) {
+ vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg)
+ return -ENOMEM;
+
+ vreg->dev = &pdev->dev;
+ vreg->type = reg->type;
+ vreg->id = reg->id;
+ vreg->rpm = rpm;
+
+ memcpy(&vreg->desc, reg->desc, sizeof(vreg->desc));
+
+ vreg->desc.id = -1;
+ vreg->desc.owner = THIS_MODULE;
+ vreg->desc.type = REGULATOR_VOLTAGE;
+ vreg->desc.name = reg->name;
+ vreg->desc.supply_name = reg->supply;
+ vreg->desc.of_match = reg->name;
+
+ config.dev = &pdev->dev;
+ config.driver_data = vreg;
+ rdev = devm_regulator_register(&pdev->dev, &vreg->desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register %s\n", reg->name);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver rpm_reg_driver = {
+ .probe = rpm_reg_probe,
+ .driver = {
+ .name = "qcom_rpm_smd_regulator",
+ .of_match_table = rpm_of_match,
+ },
+};
+
+static int __init rpm_reg_init(void)
+{
+ return platform_driver_register(&rpm_reg_driver);
+}
+subsys_initcall(rpm_reg_init);
+
+static void __exit rpm_reg_exit(void)
+{
+ platform_driver_unregister(&rpm_reg_driver);
+}
+module_exit(rpm_reg_exit)
+
+MODULE_DESCRIPTION("Qualcomm RPM regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 850a30a95b5b..88a5dc88badc 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -26,6 +26,70 @@
#include <linux/regmap.h>
#include <linux/list.h>
+/* Pin control enable input pins. */
+#define SPMI_REGULATOR_PIN_CTRL_ENABLE_NONE 0x00
+#define SPMI_REGULATOR_PIN_CTRL_ENABLE_EN0 0x01
+#define SPMI_REGULATOR_PIN_CTRL_ENABLE_EN1 0x02
+#define SPMI_REGULATOR_PIN_CTRL_ENABLE_EN2 0x04
+#define SPMI_REGULATOR_PIN_CTRL_ENABLE_EN3 0x08
+#define SPMI_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT 0x10
+
+/* Pin control high power mode input pins. */
+#define SPMI_REGULATOR_PIN_CTRL_HPM_NONE 0x00
+#define SPMI_REGULATOR_PIN_CTRL_HPM_EN0 0x01
+#define SPMI_REGULATOR_PIN_CTRL_HPM_EN1 0x02
+#define SPMI_REGULATOR_PIN_CTRL_HPM_EN2 0x04
+#define SPMI_REGULATOR_PIN_CTRL_HPM_EN3 0x08
+#define SPMI_REGULATOR_PIN_CTRL_HPM_SLEEP_B 0x10
+#define SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT 0x20
+
+/*
+ * Used with enable parameters to specify that hardware default register values
+ * should be left unaltered.
+ */
+#define SPMI_REGULATOR_USE_HW_DEFAULT 2
+
+/* Soft start strength of a voltage switch type regulator */
+enum spmi_vs_soft_start_str {
+ SPMI_VS_SOFT_START_STR_0P05_UA = 0,
+ SPMI_VS_SOFT_START_STR_0P25_UA,
+ SPMI_VS_SOFT_START_STR_0P55_UA,
+ SPMI_VS_SOFT_START_STR_0P75_UA,
+ SPMI_VS_SOFT_START_STR_HW_DEFAULT,
+};
+
+/**
+ * struct spmi_regulator_init_data - spmi-regulator initialization data
+ * @pin_ctrl_enable: Bit mask specifying which hardware pins should be
+ * used to enable the regulator, if any
+ * Value should be an ORing of
+ * SPMI_REGULATOR_PIN_CTRL_ENABLE_* constants. If
+ * the bit specified by
+ * SPMI_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT is
+ * set, then pin control enable hardware registers
+ * will not be modified.
+ * @pin_ctrl_hpm: Bit mask specifying which hardware pins should be
+ * used to force the regulator into high power
+ * mode, if any
+ * Value should be an ORing of
+ * SPMI_REGULATOR_PIN_CTRL_HPM_* constants. If
+ * the bit specified by
+ * SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT is
+ * set, then pin control mode hardware registers
+ * will not be modified.
+ * @vs_soft_start_strength: This parameter sets the soft start strength for
+ * voltage switch type regulators. Its value
+ * should be one of SPMI_VS_SOFT_START_STR_*. If
+ * its value is SPMI_VS_SOFT_START_STR_HW_DEFAULT,
+ * then the soft start strength will be left at its
+ * default hardware value.
+ */
+struct spmi_regulator_init_data {
+ unsigned pin_ctrl_enable;
+ unsigned pin_ctrl_hpm;
+ enum spmi_vs_soft_start_str vs_soft_start_strength;
+};
+
/* These types correspond to unique register layouts. */
enum spmi_regulator_logical_type {
SPMI_REGULATOR_LOGICAL_TYPE_SMPS,
@@ -458,6 +522,14 @@ static int spmi_regulator_vs_enable(struct regulator_dev *rdev)
return spmi_regulator_common_enable(rdev);
}
+static int spmi_regulator_vs_ocp(struct regulator_dev *rdev)
+{
+ struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+ u8 reg = SPMI_VS_OCP_OVERRIDE;
+
+ return spmi_vreg_write(vreg, SPMI_VS_REG_OCP, &reg, 1);
+}
+
static int spmi_regulator_common_disable(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
@@ -504,8 +576,7 @@ static int spmi_regulator_select_voltage(struct spmi_regulator *vreg,
* Force uV to be an allowed set point by applying a ceiling function to
* the uV value.
*/
- *voltage_sel = (uV - range->min_uV + range->step_uV - 1)
- / range->step_uV;
+ *voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
uV = *voltage_sel * range->step_uV + range->min_uV;
if (uV > max_uV) {
@@ -792,6 +863,9 @@ static unsigned int spmi_regulator_common_get_mode(struct regulator_dev *rdev)
if (reg & SPMI_COMMON_MODE_HPM_MASK)
return REGULATOR_MODE_NORMAL;
+ if (reg & SPMI_COMMON_MODE_AUTO_MASK)
+ return REGULATOR_MODE_FAST;
+
return REGULATOR_MODE_IDLE;
}
@@ -799,11 +873,13 @@ static int
spmi_regulator_common_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
- u8 mask = SPMI_COMMON_MODE_HPM_MASK;
+ u8 mask = SPMI_COMMON_MODE_HPM_MASK | SPMI_COMMON_MODE_AUTO_MASK;
u8 val = 0;
if (mode == REGULATOR_MODE_NORMAL)
- val = mask;
+ val = SPMI_COMMON_MODE_HPM_MASK;
+ else if (mode == REGULATOR_MODE_FAST)
+ val = SPMI_COMMON_MODE_AUTO_MASK;
return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_MODE, val, mask);
}
@@ -973,6 +1049,7 @@ static struct regulator_ops spmi_vs_ops = {
.is_enabled = spmi_regulator_common_is_enabled,
.set_pull_down = spmi_regulator_common_set_pull_down,
.set_soft_start = spmi_regulator_common_set_soft_start,
+ .set_over_current_protection = spmi_regulator_vs_ocp,
};
static struct regulator_ops spmi_boost_ops = {
@@ -1203,10 +1280,111 @@ static int spmi_regulator_ftsmps_init_slew_rate(struct spmi_regulator *vreg)
return ret;
}
+static int spmi_regulator_init_registers(struct spmi_regulator *vreg,
+ const struct spmi_regulator_init_data *data)
+{
+ int ret;
+ enum spmi_regulator_logical_type type;
+ u8 ctrl_reg[8], reg, mask;
+
+ type = vreg->logical_type;
+
+ ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_VOLTAGE_RANGE, ctrl_reg, 8);
+ if (ret)
+ return ret;
+
+ /* Set up enable pin control. */
+ if ((type == SPMI_REGULATOR_LOGICAL_TYPE_SMPS
+ || type == SPMI_REGULATOR_LOGICAL_TYPE_LDO
+ || type == SPMI_REGULATOR_LOGICAL_TYPE_VS)
+ && !(data->pin_ctrl_enable
+ & SPMI_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT)) {
+ ctrl_reg[SPMI_COMMON_IDX_ENABLE] &=
+ ~SPMI_COMMON_ENABLE_FOLLOW_ALL_MASK;
+ ctrl_reg[SPMI_COMMON_IDX_ENABLE] |=
+ data->pin_ctrl_enable & SPMI_COMMON_ENABLE_FOLLOW_ALL_MASK;
+ }
+
+ /* Set up mode pin control. */
+ if ((type == SPMI_REGULATOR_LOGICAL_TYPE_SMPS
+ || type == SPMI_REGULATOR_LOGICAL_TYPE_LDO)
+ && !(data->pin_ctrl_hpm
+ & SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
+ ctrl_reg[SPMI_COMMON_IDX_MODE] &=
+ ~SPMI_COMMON_MODE_FOLLOW_ALL_MASK;
+ ctrl_reg[SPMI_COMMON_IDX_MODE] |=
+ data->pin_ctrl_hpm & SPMI_COMMON_MODE_FOLLOW_ALL_MASK;
+ }
+
+ if (type == SPMI_REGULATOR_LOGICAL_TYPE_VS
+ && !(data->pin_ctrl_hpm & SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
+ ctrl_reg[SPMI_COMMON_IDX_MODE] &=
+ ~SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK;
+ ctrl_reg[SPMI_COMMON_IDX_MODE] |=
+ data->pin_ctrl_hpm & SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK;
+ }
+
+ if ((type == SPMI_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS
+ || type == SPMI_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS
+ || type == SPMI_REGULATOR_LOGICAL_TYPE_ULT_LDO)
+ && !(data->pin_ctrl_hpm
+ & SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
+ ctrl_reg[SPMI_COMMON_IDX_MODE] &=
+ ~SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK;
+ ctrl_reg[SPMI_COMMON_IDX_MODE] |=
+ data->pin_ctrl_hpm & SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK;
+ }
+
+ /* Write back any control register values that were modified. */
+ ret = spmi_vreg_write(vreg, SPMI_COMMON_REG_VOLTAGE_RANGE, ctrl_reg, 8);
+ if (ret)
+ return ret;
+
+ /* Set soft start strength and over current protection for VS. */
+ if (type == SPMI_REGULATOR_LOGICAL_TYPE_VS) {
+ if (data->vs_soft_start_strength
+ != SPMI_VS_SOFT_START_STR_HW_DEFAULT) {
+ reg = data->vs_soft_start_strength
+ & SPMI_VS_SOFT_START_SEL_MASK;
+ mask = SPMI_VS_SOFT_START_SEL_MASK;
+ return spmi_vreg_update_bits(vreg,
+ SPMI_VS_REG_SOFT_START,
+ reg, mask);
+ }
+ }
+
+ return 0;
+}
+
+static void spmi_regulator_get_dt_config(struct spmi_regulator *vreg,
+ struct device_node *node, struct spmi_regulator_init_data *data)
+{
+ /*
+ * Initialize configuration parameters to use hardware default in case
+ * no value is specified via device tree.
+ */
+ data->pin_ctrl_enable = SPMI_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT;
+ data->pin_ctrl_hpm = SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT;
+ data->vs_soft_start_strength = SPMI_VS_SOFT_START_STR_HW_DEFAULT;
+
+ /* These bindings are optional, so it is okay if they aren't found. */
+ of_property_read_u32(node, "qcom,ocp-max-retries",
+ &vreg->ocp_max_retries);
+ of_property_read_u32(node, "qcom,ocp-retry-delay",
+ &vreg->ocp_retry_delay_ms);
+ of_property_read_u32(node, "qcom,pin-ctrl-enable",
+ &data->pin_ctrl_enable);
+ of_property_read_u32(node, "qcom,pin-ctrl-hpm", &data->pin_ctrl_hpm);
+ of_property_read_u32(node, "qcom,vs-soft-start-strength",
+ &data->vs_soft_start_strength);
+}
+
static unsigned int spmi_regulator_of_map_mode(unsigned int mode)
{
- if (mode)
+ if (mode == 1)
return REGULATOR_MODE_NORMAL;
+ if (mode == 2)
+ return REGULATOR_MODE_FAST;
return REGULATOR_MODE_IDLE;
}
@@ -1215,12 +1393,23 @@ static int spmi_regulator_of_parse(struct device_node *node,
const struct regulator_desc *desc,
struct regulator_config *config)
{
+ struct spmi_regulator_init_data data = { };
struct spmi_regulator *vreg = config->driver_data;
struct device *dev = config->dev;
int ret;
- vreg->ocp_max_retries = SPMI_VS_OCP_DEFAULT_MAX_RETRIES;
- vreg->ocp_retry_delay_ms = SPMI_VS_OCP_DEFAULT_RETRY_DELAY_MS;
+ spmi_regulator_get_dt_config(vreg, node, &data);
+
+ if (!vreg->ocp_max_retries)
+ vreg->ocp_max_retries = SPMI_VS_OCP_DEFAULT_MAX_RETRIES;
+ if (!vreg->ocp_retry_delay_ms)
+ vreg->ocp_retry_delay_ms = SPMI_VS_OCP_DEFAULT_RETRY_DELAY_MS;
+
+ ret = spmi_regulator_init_registers(vreg, &data);
+ if (ret) {
+ dev_err(dev, "common initialization failed, ret=%d\n", ret);
+ return ret;
+ }
if (vreg->logical_type == SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS) {
ret = spmi_regulator_ftsmps_init_slew_rate(vreg);
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 3fd44353cc80..d86a3dcd61e2 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -16,12 +16,16 @@
* more details.
*/
-#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
#include <linux/i2c.h>
-#include <linux/mfd/rk808.h>
+#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/mfd/rk808.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
+#include <linux/gpio/consumer.h>
/* Field Definitions */
#define RK808_BUCK_VSEL_MASK 0x3f
@@ -36,12 +40,25 @@
#define RK808_RAMP_RATE_6MV_PER_US (2 << RK808_RAMP_RATE_OFFSET)
#define RK808_RAMP_RATE_10MV_PER_US (3 << RK808_RAMP_RATE_OFFSET)
+#define RK808_DVS2_POL BIT(2)
+#define RK808_DVS1_POL BIT(1)
+
/* Offset from XXX_ON_VSEL to XXX_SLP_VSEL */
#define RK808_SLP_REG_OFFSET 1
+/* Offset from XXX_ON_VSEL to XXX_DVS_VSEL */
+#define RK808_DVS_REG_OFFSET 2
+
/* Offset from XXX_EN_REG to SLEEP_SET_OFF_XXX */
#define RK808_SLP_SET_OFF_REG_OFFSET 2
+/* max steps for increase voltage of Buck1/2, equal 100mv*/
+#define MAX_STEPS_ONE_TIME 8
+
+struct rk808_regulator_data {
+ struct gpio_desc *dvs_gpio[2];
+};
+
static const int rk808_buck_config_regs[] = {
RK808_BUCK1_CONFIG_REG,
RK808_BUCK2_CONFIG_REG,
@@ -70,6 +87,131 @@ static const struct regulator_linear_range rk808_ldo6_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(800000, 0, 17, 100000),
};
+static int rk808_buck1_2_get_voltage_sel_regmap(struct regulator_dev *rdev)
+{
+ struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
+ int id = rdev->desc->id - RK808_ID_DCDC1;
+ struct gpio_desc *gpio = pdata->dvs_gpio[id];
+ unsigned int val;
+ int ret;
+
+ if (!gpio || gpiod_get_value(gpio) == 0)
+ return regulator_get_voltage_sel_regmap(rdev);
+
+ ret = regmap_read(rdev->regmap,
+ rdev->desc->vsel_reg + RK808_DVS_REG_OFFSET,
+ &val);
+ if (ret != 0)
+ return ret;
+
+ val &= rdev->desc->vsel_mask;
+ val >>= ffs(rdev->desc->vsel_mask) - 1;
+
+ return val;
+}
+
+static int rk808_buck1_2_i2c_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned sel)
+{
+ int ret, delta_sel;
+ unsigned int old_sel, tmp, val, mask = rdev->desc->vsel_mask;
+
+ ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val);
+ if (ret != 0)
+ return ret;
+
+ tmp = val & ~mask;
+ old_sel = val & mask;
+ old_sel >>= ffs(mask) - 1;
+ delta_sel = sel - old_sel;
+
+ /*
+ * If directly modify the register to change the voltage, we will face
+ * the risk of overshoot. Put it into a multi-step, can effectively
+ * avoid this problem, a step is 100mv here.
+ */
+ while (delta_sel > MAX_STEPS_ONE_TIME) {
+ old_sel += MAX_STEPS_ONE_TIME;
+ val = old_sel << (ffs(mask) - 1);
+ val |= tmp;
+
+ /*
+ * i2c is 400kHz (2.5us per bit) and we must transmit _at least_
+ * 3 bytes (24 bits) plus start and stop so 26 bits. So we've
+ * got more than 65 us between each voltage change and thus
+ * won't ramp faster than ~1500 uV / us.
+ */
+ ret = regmap_write(rdev->regmap, rdev->desc->vsel_reg, val);
+ delta_sel = sel - old_sel;
+ }
+
+ sel <<= ffs(mask) - 1;
+ val = tmp | sel;
+ ret = regmap_write(rdev->regmap, rdev->desc->vsel_reg, val);
+
+ /*
+ * When we change the voltage register directly, the ramp rate is about
+ * 100000uv/us, wait 1us to make sure the target voltage to be stable,
+ * so we needn't wait extra time after that.
+ */
+ udelay(1);
+
+ return ret;
+}
+
+static int rk808_buck1_2_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned sel)
+{
+ struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
+ int id = rdev->desc->id - RK808_ID_DCDC1;
+ struct gpio_desc *gpio = pdata->dvs_gpio[id];
+ unsigned int reg = rdev->desc->vsel_reg;
+ unsigned old_sel;
+ int ret, gpio_level;
+
+ if (!gpio)
+ return rk808_buck1_2_i2c_set_voltage_sel(rdev, sel);
+
+ gpio_level = gpiod_get_value(gpio);
+ if (gpio_level == 0) {
+ reg += RK808_DVS_REG_OFFSET;
+ ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &old_sel);
+ } else {
+ ret = regmap_read(rdev->regmap,
+ reg + RK808_DVS_REG_OFFSET,
+ &old_sel);
+ }
+
+ if (ret != 0)
+ return ret;
+
+ sel <<= ffs(rdev->desc->vsel_mask) - 1;
+ sel |= old_sel & ~rdev->desc->vsel_mask;
+
+ ret = regmap_write(rdev->regmap, reg, sel);
+ if (ret)
+ return ret;
+
+ gpiod_set_value(gpio, !gpio_level);
+
+ return ret;
+}
+
+static int rk808_buck1_2_set_voltage_time_sel(struct regulator_dev *rdev,
+ unsigned int old_selector,
+ unsigned int new_selector)
+{
+ struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
+ int id = rdev->desc->id - RK808_ID_DCDC1;
+ struct gpio_desc *gpio = pdata->dvs_gpio[id];
+
+ /* if there is no dvs1/2 pin, we don't need wait extra time here. */
+ if (!gpio)
+ return 0;
+
+ return regulator_set_voltage_time_sel(rdev, old_selector, new_selector);
+}
+
static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
{
unsigned int ramp_value = RK808_RAMP_RATE_10MV_PER_US;
@@ -137,8 +279,9 @@ static int rk808_set_suspend_disable(struct regulator_dev *rdev)
static struct regulator_ops rk808_buck1_2_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = rk808_buck1_2_get_voltage_sel_regmap,
+ .set_voltage_sel = rk808_buck1_2_set_voltage_sel,
+ .set_voltage_time_sel = rk808_buck1_2_set_voltage_time_sel,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -380,25 +523,69 @@ static struct of_regulator_match rk808_reg_matches[] = {
[RK808_ID_SWITCH2] = { .name = "SWITCH_REG2" },
};
+static int rk808_regulator_dt_parse_pdata(struct device *dev,
+ struct device *client_dev,
+ struct regmap *map,
+ struct rk808_regulator_data *pdata)
+{
+ struct device_node *np;
+ int tmp, ret, i;
+
+ np = of_get_child_by_name(client_dev->of_node, "regulators");
+ if (!np)
+ return -ENXIO;
+
+ ret = of_regulator_match(dev, np, rk808_reg_matches,
+ RK808_NUM_REGULATORS);
+ if (ret < 0)
+ goto dt_parse_end;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->dvs_gpio); i++) {
+ pdata->dvs_gpio[i] =
+ devm_gpiod_get_index_optional(client_dev, "dvs", i,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(pdata->dvs_gpio[i])) {
+ ret = PTR_ERR(pdata->dvs_gpio[i]);
+ dev_err(dev, "failed to get dvs%d gpio (%d)\n", i, ret);
+ goto dt_parse_end;
+ }
+
+ if (!pdata->dvs_gpio[i]) {
+ dev_warn(dev, "there is no dvs%d gpio\n", i);
+ continue;
+ }
+
+ tmp = i ? RK808_DVS2_POL : RK808_DVS1_POL;
+ ret = regmap_update_bits(map, RK808_IO_POL_REG, tmp,
+ gpiod_is_active_low(pdata->dvs_gpio[i]) ?
+ 0 : tmp);
+ }
+
+dt_parse_end:
+ of_node_put(np);
+ return ret;
+}
+
static int rk808_regulator_probe(struct platform_device *pdev)
{
struct rk808 *rk808 = dev_get_drvdata(pdev->dev.parent);
struct i2c_client *client = rk808->i2c;
- struct device_node *reg_np;
struct regulator_config config = {};
struct regulator_dev *rk808_rdev;
+ struct rk808_regulator_data *pdata;
int ret, i;
- reg_np = of_get_child_by_name(client->dev.of_node, "regulators");
- if (!reg_np)
- return -ENXIO;
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
- ret = of_regulator_match(&pdev->dev, reg_np, rk808_reg_matches,
- RK808_NUM_REGULATORS);
- of_node_put(reg_np);
+ ret = rk808_regulator_dt_parse_pdata(&pdev->dev, &client->dev,
+ rk808->regmap, pdata);
if (ret < 0)
return ret;
+ platform_set_drvdata(pdev, pdata);
+
/* Instantiate the regulators */
for (i = 0; i < RK808_NUM_REGULATORS; i++) {
if (!rk808_reg_matches[i].init_data ||
@@ -406,7 +593,7 @@ static int rk808_regulator_probe(struct platform_device *pdev)
continue;
config.dev = &client->dev;
- config.driver_data = rk808;
+ config.driver_data = pdata;
config.regmap = rk808->regmap;
config.of_node = rk808_reg_matches[i].of_node;
config.init_data = rk808_reg_matches[i].init_data;
@@ -427,6 +614,7 @@ static struct platform_driver rk808_regulator_driver = {
.probe = rk808_regulator_probe,
.driver = {
.name = "rk808-regulator",
+ .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 326ffb553371..72fc3c32db49 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -34,6 +34,8 @@
#include <linux/mfd/samsung/s2mps14.h>
#include <linux/mfd/samsung/s2mpu02.h>
+/* The highest number of possible regulators for supported devices. */
+#define S2MPS_REGULATOR_MAX S2MPS13_REGULATOR_MAX
struct s2mps11_info {
unsigned int rdev_num;
int ramp_delay2;
@@ -49,7 +51,7 @@ struct s2mps11_info {
* One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether
* the suspend mode was enabled.
*/
- unsigned long long s2mps14_suspend_state:50;
+ DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX);
/* Array of size rdev_num with GPIO-s for external sleep control */
int *ext_control_gpio;
@@ -500,7 +502,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
switch (s2mps11->dev_type) {
case S2MPS13X:
case S2MPS14X:
- if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
+ if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
val = S2MPS14_ENABLE_SUSPEND;
else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)]))
val = S2MPS14_ENABLE_EXT_CONTROL;
@@ -508,7 +510,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
val = rdev->desc->enable_mask;
break;
case S2MPU02:
- if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
+ if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
val = S2MPU02_ENABLE_SUSPEND;
else
val = rdev->desc->enable_mask;
@@ -562,7 +564,7 @@ static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev)
if (ret < 0)
return ret;
- s2mps11->s2mps14_suspend_state |= (1 << rdev_get_id(rdev));
+ set_bit(rdev_get_id(rdev), s2mps11->suspend_state);
/*
* Don't enable suspend mode if regulator is already disabled because
* this would effectively for a short time turn on the regulator after
@@ -960,18 +962,22 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
case S2MPS11X:
s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators);
regulators = s2mps11_regulators;
+ BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
break;
case S2MPS13X:
s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators);
regulators = s2mps13_regulators;
+ BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
break;
case S2MPS14X:
s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators);
regulators = s2mps14_regulators;
+ BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
break;
case S2MPU02:
s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators);
regulators = s2mpu02_regulators;
+ BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
break;
default:
dev_err(&pdev->dev, "Invalid device type: %u\n",
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c
index c213e37eb69e..572816e30095 100644
--- a/drivers/regulator/tps51632-regulator.c
+++ b/drivers/regulator/tps51632-regulator.c
@@ -362,7 +362,6 @@ MODULE_DEVICE_TABLE(i2c, tps51632_id);
static struct i2c_driver tps51632_i2c_driver = {
.driver = {
.name = "tps51632",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(tps51632_of_match),
},
.probe = tps51632_probe,
diff --git a/drivers/regulator/tps62360-regulator.c b/drivers/regulator/tps62360-regulator.c
index a1fd626c6c96..f6a6d36a6533 100644
--- a/drivers/regulator/tps62360-regulator.c
+++ b/drivers/regulator/tps62360-regulator.c
@@ -515,7 +515,6 @@ MODULE_DEVICE_TABLE(i2c, tps62360_id);
static struct i2c_driver tps62360_i2c_driver = {
.driver = {
.name = "tps62360",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(tps62360_of_match),
},
.probe = tps62360_probe,
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index b941e564b3f3..5cc19b44974a 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -410,7 +410,6 @@ MODULE_DEVICE_TABLE(i2c, tps_65023_id);
static struct i2c_driver tps_65023_i2c_driver = {
.driver = {
.name = "tps65023",
- .owner = THIS_MODULE,
},
.probe = tps_65023_probe,
.id_table = tps_65023_id,
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 2852de05d64d..9e9d22038017 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -422,12 +422,12 @@ static struct tps6586x_platform_data *tps6586x_parse_regulator_dt(
return NULL;
for (i = 0; i < num; i++) {
- int id;
+ uintptr_t id;
if (!tps6586x_matches[i].init_data)
continue;
pdata->reg_init_data[i] = tps6586x_matches[i].init_data;
- id = (int)tps6586x_matches[i].driver_data;
+ id = (uintptr_t)tps6586x_matches[i].driver_data;
if (id == TPS6586X_ID_SYS)
sys_rail = pdata->reg_init_data[i]->constraints.name;
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 157d421f755b..85d5904e5480 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -1,5 +1,8 @@
obj-$(CONFIG_RESET_CONTROLLER) += core.o
+obj-$(CONFIG_ARCH_LPC18XX) += reset-lpc18xx.o
obj-$(CONFIG_ARCH_SOCFPGA) += reset-socfpga.o
obj-$(CONFIG_ARCH_BERLIN) += reset-berlin.o
obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o
obj-$(CONFIG_ARCH_STI) += sti/
+obj-$(CONFIG_ARCH_ZYNQ) += reset-zynq.o
+obj-$(CONFIG_ATH79) += reset-ath79.o
diff --git a/drivers/reset/reset-ath79.c b/drivers/reset/reset-ath79.c
new file mode 100644
index 000000000000..9aaf646ece55
--- /dev/null
+++ b/drivers/reset/reset-ath79.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2015 Alban Bedel <albeu@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+
+struct ath79_reset {
+ struct reset_controller_dev rcdev;
+ void __iomem *base;
+ spinlock_t lock;
+};
+
+static int ath79_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct ath79_reset *ath79_reset =
+ container_of(rcdev, struct ath79_reset, rcdev);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ath79_reset->lock, flags);
+ val = readl(ath79_reset->base);
+ if (assert)
+ val |= BIT(id);
+ else
+ val &= ~BIT(id);
+ writel(val, ath79_reset->base);
+ spin_unlock_irqrestore(&ath79_reset->lock, flags);
+
+ return 0;
+}
+
+static int ath79_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return ath79_reset_update(rcdev, id, true);
+}
+
+static int ath79_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return ath79_reset_update(rcdev, id, false);
+}
+
+static int ath79_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct ath79_reset *ath79_reset =
+ container_of(rcdev, struct ath79_reset, rcdev);
+ u32 val;
+
+ val = readl(ath79_reset->base);
+
+ return !!(val & BIT(id));
+}
+
+static struct reset_control_ops ath79_reset_ops = {
+ .assert = ath79_reset_assert,
+ .deassert = ath79_reset_deassert,
+ .status = ath79_reset_status,
+};
+
+static int ath79_reset_probe(struct platform_device *pdev)
+{
+ struct ath79_reset *ath79_reset;
+ struct resource *res;
+
+ ath79_reset = devm_kzalloc(&pdev->dev,
+ sizeof(*ath79_reset), GFP_KERNEL);
+ if (!ath79_reset)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ath79_reset);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ath79_reset->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ath79_reset->base))
+ return PTR_ERR(ath79_reset->base);
+
+ spin_lock_init(&ath79_reset->lock);
+ ath79_reset->rcdev.ops = &ath79_reset_ops;
+ ath79_reset->rcdev.owner = THIS_MODULE;
+ ath79_reset->rcdev.of_node = pdev->dev.of_node;
+ ath79_reset->rcdev.of_reset_n_cells = 1;
+ ath79_reset->rcdev.nr_resets = 32;
+
+ return reset_controller_register(&ath79_reset->rcdev);
+}
+
+static int ath79_reset_remove(struct platform_device *pdev)
+{
+ struct ath79_reset *ath79_reset = platform_get_drvdata(pdev);
+
+ reset_controller_unregister(&ath79_reset->rcdev);
+
+ return 0;
+}
+
+static const struct of_device_id ath79_reset_dt_ids[] = {
+ { .compatible = "qca,ar7100-reset", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ath79_reset_dt_ids);
+
+static struct platform_driver ath79_reset_driver = {
+ .probe = ath79_reset_probe,
+ .remove = ath79_reset_remove,
+ .driver = {
+ .name = "ath79-reset",
+ .of_match_table = ath79_reset_dt_ids,
+ },
+};
+module_platform_driver(ath79_reset_driver);
+
+MODULE_AUTHOR("Alban Bedel <albeu@free.fr>");
+MODULE_DESCRIPTION("AR71xx Reset Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/reset/reset-lpc18xx.c b/drivers/reset/reset-lpc18xx.c
new file mode 100644
index 000000000000..70922e9ac27f
--- /dev/null
+++ b/drivers/reset/reset-lpc18xx.c
@@ -0,0 +1,258 @@
+/*
+ * Reset driver for NXP LPC18xx/43xx Reset Generation Unit (RGU).
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+
+/* LPC18xx RGU registers */
+#define LPC18XX_RGU_CTRL0 0x100
+#define LPC18XX_RGU_CTRL1 0x104
+#define LPC18XX_RGU_ACTIVE_STATUS0 0x150
+#define LPC18XX_RGU_ACTIVE_STATUS1 0x154
+
+#define LPC18XX_RGU_RESETS_PER_REG 32
+
+/* Internal reset outputs */
+#define LPC18XX_RGU_CORE_RST 0
+#define LPC43XX_RGU_M0SUB_RST 12
+#define LPC43XX_RGU_M0APP_RST 56
+
+struct lpc18xx_rgu_data {
+ struct reset_controller_dev rcdev;
+ struct clk *clk_delay;
+ struct clk *clk_reg;
+ void __iomem *base;
+ spinlock_t lock;
+ u32 delay_us;
+};
+
+#define to_rgu_data(p) container_of(p, struct lpc18xx_rgu_data, rcdev)
+
+static void __iomem *rgu_base;
+
+static int lpc18xx_rgu_restart(struct notifier_block *this, unsigned long mode,
+ void *cmd)
+{
+ writel(BIT(LPC18XX_RGU_CORE_RST), rgu_base + LPC18XX_RGU_CTRL0);
+ mdelay(2000);
+
+ pr_emerg("%s: unable to restart system\n", __func__);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block lpc18xx_rgu_restart_nb = {
+ .notifier_call = lpc18xx_rgu_restart,
+ .priority = 192,
+};
+
+/*
+ * The LPC18xx RGU has mostly self-deasserting resets except for the
+ * two reset lines going to the internal Cortex-M0 cores.
+ *
+ * To prevent the M0 core resets from accidentally getting deasserted
+ * status register must be check and bits in control register set to
+ * preserve the state.
+ */
+static int lpc18xx_rgu_setclear_reset(struct reset_controller_dev *rcdev,
+ unsigned long id, bool set)
+{
+ struct lpc18xx_rgu_data *rc = to_rgu_data(rcdev);
+ u32 stat_offset = LPC18XX_RGU_ACTIVE_STATUS0;
+ u32 ctrl_offset = LPC18XX_RGU_CTRL0;
+ unsigned long flags;
+ u32 stat, rst_bit;
+
+ stat_offset += (id / LPC18XX_RGU_RESETS_PER_REG) * sizeof(u32);
+ ctrl_offset += (id / LPC18XX_RGU_RESETS_PER_REG) * sizeof(u32);
+ rst_bit = 1 << (id % LPC18XX_RGU_RESETS_PER_REG);
+
+ spin_lock_irqsave(&rc->lock, flags);
+ stat = ~readl(rc->base + stat_offset);
+ if (set)
+ writel(stat | rst_bit, rc->base + ctrl_offset);
+ else
+ writel(stat & ~rst_bit, rc->base + ctrl_offset);
+ spin_unlock_irqrestore(&rc->lock, flags);
+
+ return 0;
+}
+
+static int lpc18xx_rgu_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return lpc18xx_rgu_setclear_reset(rcdev, id, true);
+}
+
+static int lpc18xx_rgu_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return lpc18xx_rgu_setclear_reset(rcdev, id, false);
+}
+
+/* Only M0 cores require explicit reset deassert */
+static int lpc18xx_rgu_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct lpc18xx_rgu_data *rc = to_rgu_data(rcdev);
+
+ lpc18xx_rgu_assert(rcdev, id);
+ udelay(rc->delay_us);
+
+ switch (id) {
+ case LPC43XX_RGU_M0SUB_RST:
+ case LPC43XX_RGU_M0APP_RST:
+ lpc18xx_rgu_setclear_reset(rcdev, id, false);
+ }
+
+ return 0;
+}
+
+static int lpc18xx_rgu_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct lpc18xx_rgu_data *rc = to_rgu_data(rcdev);
+ u32 bit, offset = LPC18XX_RGU_ACTIVE_STATUS0;
+
+ offset += (id / LPC18XX_RGU_RESETS_PER_REG) * sizeof(u32);
+ bit = 1 << (id % LPC18XX_RGU_RESETS_PER_REG);
+
+ return !(readl(rc->base + offset) & bit);
+}
+
+static struct reset_control_ops lpc18xx_rgu_ops = {
+ .reset = lpc18xx_rgu_reset,
+ .assert = lpc18xx_rgu_assert,
+ .deassert = lpc18xx_rgu_deassert,
+ .status = lpc18xx_rgu_status,
+};
+
+static int lpc18xx_rgu_probe(struct platform_device *pdev)
+{
+ struct lpc18xx_rgu_data *rc;
+ struct resource *res;
+ u32 fcclk, firc;
+ int ret;
+
+ rc = devm_kzalloc(&pdev->dev, sizeof(*rc), GFP_KERNEL);
+ if (!rc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rc->base))
+ return PTR_ERR(rc->base);
+
+ rc->clk_reg = devm_clk_get(&pdev->dev, "reg");
+ if (IS_ERR(rc->clk_reg)) {
+ dev_err(&pdev->dev, "reg clock not found\n");
+ return PTR_ERR(rc->clk_reg);
+ }
+
+ rc->clk_delay = devm_clk_get(&pdev->dev, "delay");
+ if (IS_ERR(rc->clk_delay)) {
+ dev_err(&pdev->dev, "delay clock not found\n");
+ return PTR_ERR(rc->clk_delay);
+ }
+
+ ret = clk_prepare_enable(rc->clk_reg);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable reg clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(rc->clk_delay);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable delay clock\n");
+ goto dis_clk_reg;
+ }
+
+ fcclk = clk_get_rate(rc->clk_reg) / USEC_PER_SEC;
+ firc = clk_get_rate(rc->clk_delay) / USEC_PER_SEC;
+ if (fcclk == 0 || firc == 0)
+ rc->delay_us = 2;
+ else
+ rc->delay_us = DIV_ROUND_UP(fcclk, firc * firc);
+
+ spin_lock_init(&rc->lock);
+
+ rc->rcdev.owner = THIS_MODULE;
+ rc->rcdev.nr_resets = 64;
+ rc->rcdev.ops = &lpc18xx_rgu_ops;
+ rc->rcdev.of_node = pdev->dev.of_node;
+
+ platform_set_drvdata(pdev, rc);
+
+ ret = reset_controller_register(&rc->rcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register device\n");
+ goto dis_clks;
+ }
+
+ rgu_base = rc->base;
+ ret = register_restart_handler(&lpc18xx_rgu_restart_nb);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to register restart handler\n");
+
+ return 0;
+
+dis_clks:
+ clk_disable_unprepare(rc->clk_delay);
+dis_clk_reg:
+ clk_disable_unprepare(rc->clk_reg);
+
+ return ret;
+}
+
+static int lpc18xx_rgu_remove(struct platform_device *pdev)
+{
+ struct lpc18xx_rgu_data *rc = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = unregister_restart_handler(&lpc18xx_rgu_restart_nb);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to unregister restart handler\n");
+
+ reset_controller_unregister(&rc->rcdev);
+
+ clk_disable_unprepare(rc->clk_delay);
+ clk_disable_unprepare(rc->clk_reg);
+
+ return 0;
+}
+
+static const struct of_device_id lpc18xx_rgu_match[] = {
+ { .compatible = "nxp,lpc1850-rgu" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_rgu_match);
+
+static struct platform_driver lpc18xx_rgu_driver = {
+ .probe = lpc18xx_rgu_probe,
+ .remove = lpc18xx_rgu_remove,
+ .driver = {
+ .name = "lpc18xx-reset",
+ .of_match_table = lpc18xx_rgu_match,
+ },
+};
+module_platform_driver(lpc18xx_rgu_driver);
+
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_DESCRIPTION("Reset driver for LPC18xx/43xx RGU");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index 0a8def35ea2e..1a6c5d66c83b 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -24,11 +24,11 @@
#include <linux/types.h>
#define NR_BANKS 4
-#define OFFSET_MODRST 0x10
struct socfpga_reset_data {
spinlock_t lock;
void __iomem *membase;
+ u32 modrst_offset;
struct reset_controller_dev rcdev;
};
@@ -45,8 +45,8 @@ static int socfpga_reset_assert(struct reset_controller_dev *rcdev,
spin_lock_irqsave(&data->lock, flags);
- reg = readl(data->membase + OFFSET_MODRST + (bank * NR_BANKS));
- writel(reg | BIT(offset), data->membase + OFFSET_MODRST +
+ reg = readl(data->membase + data->modrst_offset + (bank * NR_BANKS));
+ writel(reg | BIT(offset), data->membase + data->modrst_offset +
(bank * NR_BANKS));
spin_unlock_irqrestore(&data->lock, flags);
@@ -67,8 +67,8 @@ static int socfpga_reset_deassert(struct reset_controller_dev *rcdev,
spin_lock_irqsave(&data->lock, flags);
- reg = readl(data->membase + OFFSET_MODRST + (bank * NR_BANKS));
- writel(reg & ~BIT(offset), data->membase + OFFSET_MODRST +
+ reg = readl(data->membase + data->modrst_offset + (bank * NR_BANKS));
+ writel(reg & ~BIT(offset), data->membase + data->modrst_offset +
(bank * NR_BANKS));
spin_unlock_irqrestore(&data->lock, flags);
@@ -85,7 +85,7 @@ static int socfpga_reset_status(struct reset_controller_dev *rcdev,
int offset = id % BITS_PER_LONG;
u32 reg;
- reg = readl(data->membase + OFFSET_MODRST + (bank * NR_BANKS));
+ reg = readl(data->membase + data->modrst_offset + (bank * NR_BANKS));
return !(reg & BIT(offset));
}
@@ -100,6 +100,8 @@ static int socfpga_reset_probe(struct platform_device *pdev)
{
struct socfpga_reset_data *data;
struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
/*
* The binding was mainlined without the required property.
@@ -120,6 +122,11 @@ static int socfpga_reset_probe(struct platform_device *pdev)
if (IS_ERR(data->membase))
return PTR_ERR(data->membase);
+ if (of_property_read_u32(np, "altr,modrst-offset", &data->modrst_offset)) {
+ dev_warn(dev, "missing altr,modrst-offset property, assuming 0x10!\n");
+ data->modrst_offset = 0x10;
+ }
+
spin_lock_init(&data->lock);
data->rcdev.owner = THIS_MODULE;
diff --git a/drivers/reset/reset-zynq.c b/drivers/reset/reset-zynq.c
new file mode 100644
index 000000000000..89318a5d5bd7
--- /dev/null
+++ b/drivers/reset/reset-zynq.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2015, National Instruments Corp.
+ *
+ * Xilinx Zynq Reset controller driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+struct zynq_reset_data {
+ struct regmap *slcr;
+ struct reset_controller_dev rcdev;
+ u32 offset;
+};
+
+#define to_zynq_reset_data(p) \
+ container_of((p), struct zynq_reset_data, rcdev)
+
+static int zynq_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct zynq_reset_data *priv = to_zynq_reset_data(rcdev);
+
+ int bank = id / BITS_PER_LONG;
+ int offset = id % BITS_PER_LONG;
+
+ pr_debug("%s: %s reset bank %u offset %u\n", KBUILD_MODNAME, __func__,
+ bank, offset);
+
+ return regmap_update_bits(priv->slcr,
+ priv->offset + (bank * 4),
+ BIT(offset),
+ BIT(offset));
+}
+
+static int zynq_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct zynq_reset_data *priv = to_zynq_reset_data(rcdev);
+
+ int bank = id / BITS_PER_LONG;
+ int offset = id % BITS_PER_LONG;
+
+ pr_debug("%s: %s reset bank %u offset %u\n", KBUILD_MODNAME, __func__,
+ bank, offset);
+
+ return regmap_update_bits(priv->slcr,
+ priv->offset + (bank * 4),
+ BIT(offset),
+ ~BIT(offset));
+}
+
+static int zynq_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct zynq_reset_data *priv = to_zynq_reset_data(rcdev);
+
+ int bank = id / BITS_PER_LONG;
+ int offset = id % BITS_PER_LONG;
+ int ret;
+ u32 reg;
+
+ pr_debug("%s: %s reset bank %u offset %u\n", KBUILD_MODNAME, __func__,
+ bank, offset);
+
+ ret = regmap_read(priv->slcr, priv->offset + (bank * 4), &reg);
+ if (ret)
+ return ret;
+
+ return !!(reg & BIT(offset));
+}
+
+static struct reset_control_ops zynq_reset_ops = {
+ .assert = zynq_reset_assert,
+ .deassert = zynq_reset_deassert,
+ .status = zynq_reset_status,
+};
+
+static int zynq_reset_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct zynq_reset_data *priv;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, priv);
+
+ priv->slcr = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "syscon");
+ if (IS_ERR(priv->slcr)) {
+ dev_err(&pdev->dev, "unable to get zynq-slcr regmap");
+ return PTR_ERR(priv->slcr);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "missing IO resource\n");
+ return -ENODEV;
+ }
+
+ priv->offset = res->start;
+
+ priv->rcdev.owner = THIS_MODULE;
+ priv->rcdev.nr_resets = resource_size(res) / 4 * BITS_PER_LONG;
+ priv->rcdev.ops = &zynq_reset_ops;
+ priv->rcdev.of_node = pdev->dev.of_node;
+ reset_controller_register(&priv->rcdev);
+
+ return 0;
+}
+
+static int zynq_reset_remove(struct platform_device *pdev)
+{
+ struct zynq_reset_data *priv = platform_get_drvdata(pdev);
+
+ reset_controller_unregister(&priv->rcdev);
+
+ return 0;
+}
+
+static const struct of_device_id zynq_reset_dt_ids[] = {
+ { .compatible = "xlnx,zynq-reset", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver zynq_reset_driver = {
+ .probe = zynq_reset_probe,
+ .remove = zynq_reset_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = zynq_reset_dt_ids,
+ },
+};
+module_platform_driver(zynq_reset_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Moritz Fischer <moritz.fischer@ettus.com>");
+MODULE_DESCRIPTION("Zynq Reset Controller Driver");
diff --git a/drivers/reset/sti/reset-stih407.c b/drivers/reset/sti/reset-stih407.c
index d83db5d72d08..827eb3dae47d 100644
--- a/drivers/reset/sti/reset-stih407.c
+++ b/drivers/reset/sti/reset-stih407.c
@@ -11,7 +11,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <dt-bindings/reset-controller/stih407-resets.h>
+#include <dt-bindings/reset/stih407-resets.h>
#include "reset-syscfg.h"
/* STiH407 Peripheral powerdown definitions. */
@@ -126,7 +126,7 @@ static const struct syscfg_reset_controller_data stih407_picophyreset_controller
.channels = stih407_picophyresets,
};
-static struct of_device_id stih407_reset_match[] = {
+static const struct of_device_id stih407_reset_match[] = {
{
.compatible = "st,stih407-powerdown",
.data = &stih407_powerdown_controller,
diff --git a/drivers/reset/sti/reset-stih415.c b/drivers/reset/sti/reset-stih415.c
index 8dad603d863c..6f220cdbef46 100644
--- a/drivers/reset/sti/reset-stih415.c
+++ b/drivers/reset/sti/reset-stih415.c
@@ -13,7 +13,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <dt-bindings/reset-controller/stih415-resets.h>
+#include <dt-bindings/reset/stih415-resets.h>
#include "reset-syscfg.h"
@@ -89,7 +89,7 @@ static struct syscfg_reset_controller_data stih415_softreset_controller = {
.channels = stih415_softresets,
};
-static struct of_device_id stih415_reset_match[] = {
+static const struct of_device_id stih415_reset_match[] = {
{ .compatible = "st,stih415-powerdown",
.data = &stih415_powerdown_controller, },
{ .compatible = "st,stih415-softreset",
diff --git a/drivers/reset/sti/reset-stih416.c b/drivers/reset/sti/reset-stih416.c
index 79aed70a26c0..c581d606ef0f 100644
--- a/drivers/reset/sti/reset-stih416.c
+++ b/drivers/reset/sti/reset-stih416.c
@@ -13,7 +13,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <dt-bindings/reset-controller/stih416-resets.h>
+#include <dt-bindings/reset/stih416-resets.h>
#include "reset-syscfg.h"
@@ -120,7 +120,7 @@ static struct syscfg_reset_controller_data stih416_softreset_controller = {
.channels = stih416_softresets,
};
-static struct of_device_id stih416_reset_match[] = {
+static const struct of_device_id stih416_reset_match[] = {
{ .compatible = "st,stih416-powerdown",
.data = &stih416_powerdown_controller, },
{ .compatible = "st,stih416-softreset",
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 83b4b89b9d5a..9d4290617cee 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -945,11 +945,11 @@ config RTC_DRV_DA9055
will be called rtc-da9055
config RTC_DRV_DA9063
- tristate "Dialog Semiconductor DA9063 RTC"
- depends on MFD_DA9063
+ tristate "Dialog Semiconductor DA9063/DA9062 RTC"
+ depends on MFD_DA9063 || MFD_DA9062
help
If you say yes here you will get support for the RTC subsystem
- of the Dialog Semiconductor DA9063.
+ for the Dialog Semiconductor PMIC chips DA9063 and DA9062.
This driver can also be built as a module. If so, the module
will be called "rtc-da9063".
@@ -1116,6 +1116,13 @@ config RTC_DRV_OPAL
This driver can also be built as a module. If so, the module
will be called rtc-opal.
+config RTC_DRV_ZYNQMP
+ tristate "Xilinx Zynq Ultrascale+ MPSoC RTC"
+ depends on OF
+ help
+ If you say yes here you get support for the RTC controller found on
+ Xilinx Zynq Ultrascale+ MPSoC.
+
comment "on-CPU RTC drivers"
config RTC_DRV_DAVINCI
@@ -1306,11 +1313,13 @@ config RTC_DRV_GENERIC
just say Y.
config RTC_DRV_PXA
- tristate "PXA27x/PXA3xx"
- depends on ARCH_PXA
- help
- If you say Y here you will get access to the real time clock
- built into your PXA27x or PXA3xx CPU.
+ tristate "PXA27x/PXA3xx"
+ depends on ARCH_PXA
+ select RTC_DRV_SA1100
+ help
+ If you say Y here you will get access to the real time clock
+ built into your PXA27x or PXA3xx CPU. This RTC is actually 2 RTCs
+ consisting of an SA1100 compatible RTC and the extended PXA RTC.
This RTC driver uses PXA RTC registers available since pxa27x
series (RDxR, RYxR) instead of legacy RCNR, RTAR.
@@ -1456,6 +1465,18 @@ config RTC_DRV_JZ4740
This driver can also be buillt as a module. If so, the module
will be called rtc-jz4740.
+config RTC_DRV_LPC24XX
+ tristate "NXP RTC for LPC178x/18xx/408x/43xx"
+ depends on ARCH_LPC18XX || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ help
+ This enables support for the NXP RTC found which can be found on
+ NXP LPC178x/18xx/408x/43xx devices.
+
+ If you have one of the devices above enable this driver to use
+ the hardware RTC. This driver can also be buillt as a module. If
+ so, the module will be called rtc-lpc24xx.
+
config RTC_DRV_LPC32XX
depends on ARCH_LPC32XX
tristate "NXP LPC32XX RTC"
@@ -1523,6 +1544,7 @@ config RTC_DRV_MXC
config RTC_DRV_SNVS
tristate "Freescale SNVS RTC support"
+ select REGMAP_MMIO
depends on HAS_IOMEM
depends on OF
help
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 1b09a62fcf4b..e491eb524434 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -74,6 +74,7 @@ obj-$(CONFIG_RTC_DRV_ISL12057) += rtc-isl12057.o
obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o
obj-$(CONFIG_RTC_DRV_LP8788) += rtc-lp8788.o
+obj-$(CONFIG_RTC_DRV_LPC24XX) += rtc-lpc24xx.o
obj-$(CONFIG_RTC_DRV_LPC32XX) += rtc-lpc32xx.o
obj-$(CONFIG_RTC_DRV_LOONGSON1) += rtc-ls1x.o
obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o
@@ -158,3 +159,4 @@ obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o
obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o
obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
obj-$(CONFIG_RTC_DRV_XGENE) += rtc-xgene.o
+obj-$(CONFIG_RTC_DRV_ZYNQMP) += rtc-zynqmp.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index ea2a315df6b7..de86578bcd6d 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -202,6 +202,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
rtc->max_user_freq = 64;
rtc->dev.parent = dev;
rtc->dev.class = rtc_class;
+ rtc->dev.groups = rtc_get_dev_attribute_groups();
rtc->dev.release = rtc_device_release;
mutex_init(&rtc->ops_lock);
@@ -234,12 +235,12 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
err = device_register(&rtc->dev);
if (err) {
+ /* This will free both memory and the ID */
put_device(&rtc->dev);
- goto exit_kfree;
+ goto exit;
}
rtc_dev_add_device(rtc);
- rtc_sysfs_add_device(rtc);
rtc_proc_add_device(rtc);
dev_info(dev, "rtc core: registered %s as %s\n",
@@ -247,9 +248,6 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
return rtc;
-exit_kfree:
- kfree(rtc);
-
exit_ida:
ida_simple_remove(&rtc_ida, id);
@@ -268,19 +266,17 @@ EXPORT_SYMBOL_GPL(rtc_device_register);
*/
void rtc_device_unregister(struct rtc_device *rtc)
{
- if (get_device(&rtc->dev) != NULL) {
- mutex_lock(&rtc->ops_lock);
- /* remove innards of this RTC, then disable it, before
- * letting any rtc_class_open() users access it again
- */
- rtc_sysfs_del_device(rtc);
- rtc_dev_del_device(rtc);
- rtc_proc_del_device(rtc);
- device_unregister(&rtc->dev);
- rtc->ops = NULL;
- mutex_unlock(&rtc->ops_lock);
- put_device(&rtc->dev);
- }
+ mutex_lock(&rtc->ops_lock);
+ /*
+ * Remove innards of this RTC, then disable it, before
+ * letting any rtc_class_open() users access it again
+ */
+ rtc_dev_del_device(rtc);
+ rtc_proc_del_device(rtc);
+ device_del(&rtc->dev);
+ rtc->ops = NULL;
+ mutex_unlock(&rtc->ops_lock);
+ put_device(&rtc->dev);
}
EXPORT_SYMBOL_GPL(rtc_device_unregister);
@@ -363,7 +359,6 @@ static int __init rtc_init(void)
}
rtc_class->pm = RTC_CLASS_DEV_PM_OPS;
rtc_dev_init();
- rtc_sysfs_init(rtc_class);
return 0;
}
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 11b639067312..5836751b8203 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -564,7 +564,7 @@ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
void rtc_update_irq(struct rtc_device *rtc,
unsigned long num, unsigned long events)
{
- if (unlikely(IS_ERR_OR_NULL(rtc)))
+ if (IS_ERR_OR_NULL(rtc))
return;
pm_stay_awake(rtc->dev.parent);
diff --git a/drivers/rtc/rtc-88pm80x.c b/drivers/rtc/rtc-88pm80x.c
index 7df0579d9852..466bf7f9a285 100644
--- a/drivers/rtc/rtc-88pm80x.c
+++ b/drivers/rtc/rtc-88pm80x.c
@@ -251,17 +251,26 @@ static SIMPLE_DEV_PM_OPS(pm80x_rtc_pm_ops, pm80x_rtc_suspend, pm80x_rtc_resume);
static int pm80x_rtc_probe(struct platform_device *pdev)
{
struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
- struct pm80x_platform_data *pm80x_pdata =
- dev_get_platdata(pdev->dev.parent);
- struct pm80x_rtc_pdata *pdata = NULL;
+ struct pm80x_rtc_pdata *pdata = dev_get_platdata(&pdev->dev);
struct pm80x_rtc_info *info;
+ struct device_node *node = pdev->dev.of_node;
struct rtc_time tm;
unsigned long ticks = 0;
int ret;
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata == NULL)
- dev_warn(&pdev->dev, "No platform data!\n");
+ if (!pdata && !node) {
+ dev_err(&pdev->dev,
+ "pm80x-rtc requires platform data or of_node\n");
+ return -EINVAL;
+ }
+
+ if (!pdata) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+ }
info =
devm_kzalloc(&pdev->dev, sizeof(struct pm80x_rtc_info), GFP_KERNEL);
@@ -327,11 +336,8 @@ static int pm80x_rtc_probe(struct platform_device *pdev)
regmap_update_bits(info->map, PM800_RTC_CONTROL, PM800_RTC1_USE_XO,
PM800_RTC1_USE_XO);
- if (pm80x_pdata) {
- pdata = pm80x_pdata->rtc;
- if (pdata)
- info->rtc_dev->dev.platform_data = &pdata->rtc_wakeup;
- }
+ /* remember whether this power up is caused by PMIC RTC or not */
+ info->rtc_dev->dev.platform_data = &pdata->rtc_wakeup;
device_init_wakeup(&pdev->dev, 1);
diff --git a/drivers/rtc/rtc-ab-b5ze-s3.c b/drivers/rtc/rtc-ab-b5ze-s3.c
index b5cbc1bf5a3e..a319bf1e49de 100644
--- a/drivers/rtc/rtc-ab-b5ze-s3.c
+++ b/drivers/rtc/rtc-ab-b5ze-s3.c
@@ -1009,6 +1009,7 @@ static const struct of_device_id abb5zes3_dt_match[] = {
{ .compatible = "abracon,abb5zes3" },
{ },
};
+MODULE_DEVICE_TABLE(of, abb5zes3_dt_match);
#endif
static const struct i2c_device_id abb5zes3_id[] = {
@@ -1020,7 +1021,6 @@ MODULE_DEVICE_TABLE(i2c, abb5zes3_id);
static struct i2c_driver abb5zes3_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = &abb5zes3_rtc_pm_ops,
.of_match_table = of_match_ptr(abb5zes3_dt_match),
},
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index 133d2e2e1a25..51407c4c7bd2 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -445,7 +445,9 @@ static const struct rtc_class_ops ab8540_rtc_ops = {
static const struct platform_device_id ab85xx_rtc_ids[] = {
{ "ab8500-rtc", (kernel_ulong_t)&ab8500_rtc_ops, },
{ "ab8540-rtc", (kernel_ulong_t)&ab8540_rtc_ops, },
+ { /* sentinel */ }
};
+MODULE_DEVICE_TABLE(platform, ab85xx_rtc_ids);
static int ab8500_rtc_probe(struct platform_device *pdev)
{
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index 4337c3bc6ace..afea84c7a155 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -28,7 +28,7 @@
#define ABX8XX_REG_WD 0x07
#define ABX8XX_REG_CTRL1 0x10
-#define ABX8XX_CTRL_WRITE BIT(1)
+#define ABX8XX_CTRL_WRITE BIT(0)
#define ABX8XX_CTRL_12_24 BIT(6)
#define ABX8XX_REG_CFG_KEY 0x1f
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 4b62d1a875e4..9a3f2a6f512e 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -40,13 +40,6 @@ struct armada38x_rtc {
void __iomem *regs;
void __iomem *regs_soc;
spinlock_t lock;
- /*
- * While setting the time, the RTC TIME register should not be
- * accessed. Setting the RTC time involves sleeping during
- * 100ms, so a mutex instead of a spinlock is used to protect
- * it
- */
- struct mutex mutex_time;
int irq;
};
@@ -64,9 +57,9 @@ static void rtc_delayed_write(u32 val, struct armada38x_rtc *rtc, int offset)
static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct armada38x_rtc *rtc = dev_get_drvdata(dev);
- unsigned long time, time_check;
+ unsigned long time, time_check, flags;
- mutex_lock(&rtc->mutex_time);
+ spin_lock_irqsave(&rtc->lock, flags);
time = readl(rtc->regs + RTC_TIME);
/*
* WA for failing time set attempts. As stated in HW ERRATA if
@@ -77,7 +70,7 @@ static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm)
if ((time_check - time) > 1)
time_check = readl(rtc->regs + RTC_TIME);
- mutex_unlock(&rtc->mutex_time);
+ spin_unlock_irqrestore(&rtc->lock, flags);
rtc_time_to_tm(time_check, tm);
@@ -95,16 +88,16 @@ static int armada38x_rtc_set_time(struct device *dev, struct rtc_time *tm)
if (ret)
goto out;
/*
- * Setting the RTC time not always succeeds. According to the
- * errata we need to first write on the status register and
- * then wait for 100ms before writing to the time register to be
- * sure that the data will be taken into account.
+ * According to errata FE-3124064, Write to RTC TIME register
+ * may fail. As a workaround, after writing to RTC TIME
+ * register, issue a dummy write of 0x0 twice to RTC Status
+ * register.
*/
- mutex_lock(&rtc->mutex_time);
- rtc_delayed_write(0, rtc, RTC_STATUS);
- msleep(100);
+ spin_lock_irqsave(&rtc->lock, flags);
rtc_delayed_write(time, rtc, RTC_TIME);
- mutex_unlock(&rtc->mutex_time);
+ rtc_delayed_write(0, rtc, RTC_STATUS);
+ rtc_delayed_write(0, rtc, RTC_STATUS);
+ spin_unlock_irqrestore(&rtc->lock, flags);
out:
return ret;
@@ -229,7 +222,6 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&rtc->lock);
- mutex_init(&rtc->mutex_time);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rtc");
rtc->regs = devm_ioremap_resource(&pdev->dev, res);
@@ -303,6 +295,7 @@ static const struct of_device_id armada38x_rtc_of_match_table[] = {
{ .compatible = "marvell,armada-380-rtc", },
{}
};
+MODULE_DEVICE_TABLE(of, armada38x_rtc_of_match_table);
#endif
static struct platform_driver armada38x_rtc_driver = {
diff --git a/drivers/rtc/rtc-as3722.c b/drivers/rtc/rtc-as3722.c
index 9f38eda69154..56cc5821118b 100644
--- a/drivers/rtc/rtc-as3722.c
+++ b/drivers/rtc/rtc-as3722.c
@@ -45,7 +45,7 @@ static void as3722_time_to_reg(u8 *rbuff, struct rtc_time *tm)
rbuff[1] = bin2bcd(tm->tm_min);
rbuff[2] = bin2bcd(tm->tm_hour);
rbuff[3] = bin2bcd(tm->tm_mday);
- rbuff[4] = bin2bcd(tm->tm_mon);
+ rbuff[4] = bin2bcd(tm->tm_mon + 1);
rbuff[5] = bin2bcd(tm->tm_year - (AS3722_RTC_START_YEAR - 1900));
}
@@ -55,7 +55,7 @@ static void as3722_reg_to_time(u8 *rbuff, struct rtc_time *tm)
tm->tm_min = bcd2bin(rbuff[1] & 0x7F);
tm->tm_hour = bcd2bin(rbuff[2] & 0x3F);
tm->tm_mday = bcd2bin(rbuff[3] & 0x3F);
- tm->tm_mon = bcd2bin(rbuff[4] & 0x1F);
+ tm->tm_mon = bcd2bin(rbuff[4] & 0x1F) - 1;
tm->tm_year = (AS3722_RTC_START_YEAR - 1900) + bcd2bin(rbuff[5] & 0x7F);
return;
}
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 35efd3f75b18..cb62e214b52a 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -18,20 +18,21 @@
*
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/time.h>
-#include <linux/rtc.h>
#include <linux/bcd.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
#include <linux/interrupt.h>
-#include <linux/spinlock.h>
#include <linux/ioctl.h>
-#include <linux/completion.h>
#include <linux/io.h>
-#include <linux/of.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/spinlock.h>
#include <linux/suspend.h>
+#include <linux/time.h>
#include <linux/uaccess.h>
#include "rtc-at91rm9200.h"
@@ -59,6 +60,7 @@ static bool suspended;
static DEFINE_SPINLOCK(suspended_lock);
static unsigned long cached_events;
static u32 at91_rtc_imr;
+static struct clk *sclk;
static void at91_rtc_write_ier(u32 mask)
{
@@ -407,6 +409,16 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ sclk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sclk))
+ return PTR_ERR(sclk);
+
+ ret = clk_prepare_enable(sclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not enable slow clock\n");
+ return ret;
+ }
+
at91_rtc_write(AT91_RTC_CR, 0);
at91_rtc_write(AT91_RTC_MR, 0); /* 24 hour mode */
@@ -420,7 +432,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
"at91_rtc", pdev);
if (ret) {
dev_err(&pdev->dev, "IRQ %d already in use.\n", irq);
- return ret;
+ goto err_clk;
}
/* cpu init code should really have flagged this device as
@@ -431,8 +443,10 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&at91_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc))
- return PTR_ERR(rtc);
+ if (IS_ERR(rtc)) {
+ ret = PTR_ERR(rtc);
+ goto err_clk;
+ }
platform_set_drvdata(pdev, rtc);
/* enable SECEV interrupt in order to initialize at91_rtc_upd_rdy
@@ -442,6 +456,11 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "AT91 Real Time Clock driver.\n");
return 0;
+
+err_clk:
+ clk_disable_unprepare(sclk);
+
+ return ret;
}
/*
@@ -454,6 +473,8 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
AT91_RTC_SECEV | AT91_RTC_TIMEV |
AT91_RTC_CALEV);
+ clk_disable_unprepare(sclk);
+
return 0;
}
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 5ccaee32df72..7206e2fa4383 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -11,20 +11,20 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/time.h>
-#include <linux/rtc.h>
+#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/ioctl.h>
-#include <linux/slab.h>
-#include <linux/platform_data/atmel.h>
#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
#include <linux/suspend.h>
-#include <linux/clk.h>
+#include <linux/time.h>
/*
* This driver uses two configurable hardware resources that live in the
@@ -425,18 +425,19 @@ static int at91_rtc_probe(struct platform_device *pdev)
if (IS_ERR(rtc->sclk))
return PTR_ERR(rtc->sclk);
- sclk_rate = clk_get_rate(rtc->sclk);
- if (!sclk_rate || sclk_rate > AT91_RTT_RTPRES) {
- dev_err(&pdev->dev, "Invalid slow clock rate\n");
- return -EINVAL;
- }
-
ret = clk_prepare_enable(rtc->sclk);
if (ret) {
dev_err(&pdev->dev, "Could not enable slow clock\n");
return ret;
}
+ sclk_rate = clk_get_rate(rtc->sclk);
+ if (!sclk_rate || sclk_rate > AT91_RTT_RTPRES) {
+ dev_err(&pdev->dev, "Invalid slow clock rate\n");
+ ret = -EINVAL;
+ goto err_clk;
+ }
+
mr = rtt_readl(rtc, MR);
/* unless RTT is counting at 1 Hz, re-initialize it */
@@ -451,8 +452,10 @@ static int at91_rtc_probe(struct platform_device *pdev)
rtc->rtcdev = devm_rtc_device_register(&pdev->dev, pdev->name,
&at91_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc->rtcdev))
- return PTR_ERR(rtc->rtcdev);
+ if (IS_ERR(rtc->rtcdev)) {
+ ret = PTR_ERR(rtc->rtcdev);
+ goto err_clk;
+ }
/* register irq handler after we know what name we'll use */
ret = devm_request_irq(&pdev->dev, rtc->irq, at91_rtc_interrupt,
@@ -460,7 +463,7 @@ static int at91_rtc_probe(struct platform_device *pdev)
dev_name(&rtc->rtcdev->dev), rtc);
if (ret) {
dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq);
- return ret;
+ goto err_clk;
}
/* NOTE: sam9260 rev A silicon has a ROM bug which resets the
@@ -474,6 +477,11 @@ static int at91_rtc_probe(struct platform_device *pdev)
dev_name(&rtc->rtcdev->dev));
return 0;
+
+err_clk:
+ clk_disable_unprepare(rtc->sclk);
+
+ return ret;
}
/*
@@ -487,8 +495,7 @@ static int at91_rtc_remove(struct platform_device *pdev)
/* disable all interrupts */
rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN));
- if (!IS_ERR(rtc->sclk))
- clk_disable_unprepare(rtc->sclk);
+ clk_disable_unprepare(rtc->sclk);
return 0;
}
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index 3d44b11721ea..535a5f9338d0 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -361,7 +361,7 @@ static int bfin_rtc_probe(struct platform_device *pdev)
/* Register our RTC with the RTC framework */
rtc->rtc_dev = devm_rtc_device_register(dev, pdev->name, &bfin_rtc_ops,
THIS_MODULE);
- if (unlikely(IS_ERR(rtc->rtc_dev)))
+ if (IS_ERR(rtc->rtc_dev))
return PTR_ERR(rtc->rtc_dev);
/* Grab the IRQ and init the hardware */
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
index 92679df6d6e2..0299988b4f13 100644
--- a/drivers/rtc/rtc-bq32k.c
+++ b/drivers/rtc/rtc-bq32k.c
@@ -212,7 +212,7 @@ static int bq32k_probe(struct i2c_client *client,
if (error)
return error;
- if (client && client->dev.of_node)
+ if (client->dev.of_node)
trickle_charger_of_init(dev, client->dev.of_node);
rtc = devm_rtc_device_register(&client->dev, bq32k_driver.driver.name,
@@ -234,7 +234,6 @@ MODULE_DEVICE_TABLE(i2c, bq32k_id);
static struct i2c_driver bq32k_driver = {
.driver = {
.name = "bq32k",
- .owner = THIS_MODULE,
},
.probe = bq32k_probe,
.id_table = bq32k_id,
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index a82556a0757a..8f7034ba7d9e 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -41,7 +41,6 @@
#include <linux/pm.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/dmi.h>
/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
#include <asm-generic/rtc.h>
@@ -51,6 +50,7 @@ struct cmos_rtc {
struct device *dev;
int irq;
struct resource *iomem;
+ time64_t alarm_expires;
void (*wake_on)(struct device *);
void (*wake_off)(struct device *);
@@ -377,53 +377,11 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
spin_unlock_irq(&rtc_lock);
- return 0;
-}
-
-/*
- * Do not disable RTC alarm on shutdown - workaround for b0rked BIOSes.
- */
-static bool alarm_disable_quirk;
+ cmos->alarm_expires = rtc_tm_to_time64(&t->time);
-static int __init set_alarm_disable_quirk(const struct dmi_system_id *id)
-{
- alarm_disable_quirk = true;
- pr_info("BIOS has alarm-disable quirk - RTC alarms disabled\n");
return 0;
}
-static const struct dmi_system_id rtc_quirks[] __initconst = {
- /* https://bugzilla.novell.com/show_bug.cgi?id=805740 */
- {
- .callback = set_alarm_disable_quirk,
- .ident = "IBM Truman",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "4852570"),
- },
- },
- /* https://bugzilla.novell.com/show_bug.cgi?id=812592 */
- {
- .callback = set_alarm_disable_quirk,
- .ident = "Gigabyte GA-990XA-UD3",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "Gigabyte Technology Co., Ltd."),
- DMI_MATCH(DMI_PRODUCT_NAME, "GA-990XA-UD3"),
- },
- },
- /* http://permalink.gmane.org/gmane.linux.kernel/1604474 */
- {
- .callback = set_alarm_disable_quirk,
- .ident = "Toshiba Satellite L300",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
- },
- },
- {}
-};
-
static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
@@ -432,9 +390,6 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
if (!is_valid_irq(cmos->irq))
return -EINVAL;
- if (alarm_disable_quirk)
- return 0;
-
spin_lock_irqsave(&rtc_lock, flags);
if (enabled)
@@ -512,13 +467,6 @@ cmos_nvram_read(struct file *filp, struct kobject *kobj,
{
int retval;
- if (unlikely(off >= attr->size))
- return 0;
- if (unlikely(off < 0))
- return -EINVAL;
- if ((off + count) > attr->size)
- count = attr->size - off;
-
off += NVRAM_OFFSET;
spin_lock_irq(&rtc_lock);
for (retval = 0; count; count--, off++, retval++) {
@@ -543,12 +491,6 @@ cmos_nvram_write(struct file *filp, struct kobject *kobj,
int retval;
cmos = dev_get_drvdata(container_of(kobj, struct device, kobj));
- if (unlikely(off >= attr->size))
- return -EFBIG;
- if (unlikely(off < 0))
- return -EINVAL;
- if ((off + count) > attr->size)
- count = attr->size - off;
/* NOTE: on at least PCs and Ataris, the boot firmware uses a
* checksum on part of the NVRAM data. That's currently ignored
@@ -860,6 +802,51 @@ static void __exit cmos_do_remove(struct device *dev)
cmos->dev = NULL;
}
+static int cmos_aie_poweroff(struct device *dev)
+{
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+ struct rtc_time now;
+ time64_t t_now;
+ int retval = 0;
+ unsigned char rtc_control;
+
+ if (!cmos->alarm_expires)
+ return -EINVAL;
+
+ spin_lock_irq(&rtc_lock);
+ rtc_control = CMOS_READ(RTC_CONTROL);
+ spin_unlock_irq(&rtc_lock);
+
+ /* We only care about the situation where AIE is disabled. */
+ if (rtc_control & RTC_AIE)
+ return -EBUSY;
+
+ cmos_read_time(dev, &now);
+ t_now = rtc_tm_to_time64(&now);
+
+ /*
+ * When enabling "RTC wake-up" in BIOS setup, the machine reboots
+ * automatically right after shutdown on some buggy boxes.
+ * This automatic rebooting issue won't happen when the alarm
+ * time is larger than now+1 seconds.
+ *
+ * If the alarm time is equal to now+1 seconds, the issue can be
+ * prevented by cancelling the alarm.
+ */
+ if (cmos->alarm_expires == t_now + 1) {
+ struct rtc_wkalrm alarm;
+
+ /* Cancel the AIE timer by configuring the past time. */
+ rtc_time64_to_tm(t_now - 1, &alarm.time);
+ alarm.enabled = 0;
+ retval = cmos_set_alarm(dev, &alarm);
+ } else if (cmos->alarm_expires > t_now + 1) {
+ retval = -EBUSY;
+ }
+
+ return retval;
+}
+
#ifdef CONFIG_PM
static int cmos_suspend(struct device *dev)
@@ -1094,8 +1081,12 @@ static void cmos_pnp_shutdown(struct pnp_dev *pnp)
struct device *dev = &pnp->dev;
struct cmos_rtc *cmos = dev_get_drvdata(dev);
- if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(dev))
- return;
+ if (system_state == SYSTEM_POWER_OFF) {
+ int retval = cmos_poweroff(dev);
+
+ if (cmos_aie_poweroff(dev) < 0 && !retval)
+ return;
+ }
cmos_do_shutdown(cmos->irq);
}
@@ -1200,8 +1191,12 @@ static void cmos_platform_shutdown(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct cmos_rtc *cmos = dev_get_drvdata(dev);
- if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(dev))
- return;
+ if (system_state == SYSTEM_POWER_OFF) {
+ int retval = cmos_poweroff(dev);
+
+ if (cmos_aie_poweroff(dev) < 0 && !retval)
+ return;
+ }
cmos_do_shutdown(cmos->irq);
}
@@ -1243,8 +1238,6 @@ static int __init cmos_init(void)
platform_driver_registered = true;
}
- dmi_check_system(rtc_quirks);
-
if (retval == 0)
return 0;
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index 56343b2fbc68..101b7a240e0f 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -263,6 +263,7 @@ static const struct of_device_id coh901331_dt_match[] = {
{ .compatible = "stericsson,coh901331" },
{},
};
+MODULE_DEVICE_TABLE(of, coh901331_dt_match);
static struct platform_driver coh901331_driver = {
.driver = {
diff --git a/drivers/rtc/rtc-core.h b/drivers/rtc/rtc-core.h
index 5f9df7430a22..a098aea197fc 100644
--- a/drivers/rtc/rtc-core.h
+++ b/drivers/rtc/rtc-core.h
@@ -48,23 +48,10 @@ static inline void rtc_proc_del_device(struct rtc_device *rtc)
#endif
#ifdef CONFIG_RTC_INTF_SYSFS
-
-extern void __init rtc_sysfs_init(struct class *);
-extern void rtc_sysfs_add_device(struct rtc_device *rtc);
-extern void rtc_sysfs_del_device(struct rtc_device *rtc);
-
+const struct attribute_group **rtc_get_dev_attribute_groups(void);
#else
-
-static inline void rtc_sysfs_init(struct class *rtc)
-{
-}
-
-static inline void rtc_sysfs_add_device(struct rtc_device *rtc)
+static inline const struct attribute_group **rtc_get_dev_attribute_groups(void)
{
+ return NULL;
}
-
-static inline void rtc_sysfs_del_device(struct rtc_device *rtc)
-{
-}
-
#endif
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index 7ffc5707f8b9..00a8f7f4f87c 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -12,15 +12,18 @@
* Library General Public License for more details.
*/
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/interrupt.h>
+#include <linux/regmap.h>
#include <linux/rtc.h>
#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/regmap.h>
+
+#include <linux/mfd/da9062/registers.h>
#include <linux/mfd/da9063/registers.h>
#include <linux/mfd/da9063/core.h>
@@ -29,99 +32,231 @@
#define YEARS_FROM_DA9063(year) ((year) + 100)
#define MONTHS_FROM_DA9063(month) ((month) - 1)
-#define RTC_ALARM_DATA_LEN (DA9063_AD_REG_ALARM_Y - DA9063_AD_REG_ALARM_MI + 1)
-
-#define RTC_DATA_LEN (DA9063_REG_COUNT_Y - DA9063_REG_COUNT_S + 1)
-#define RTC_SEC 0
-#define RTC_MIN 1
-#define RTC_HOUR 2
-#define RTC_DAY 3
-#define RTC_MONTH 4
-#define RTC_YEAR 5
-
-struct da9063_rtc {
- struct rtc_device *rtc_dev;
- struct da9063 *hw;
- struct rtc_time alarm_time;
- bool rtc_sync;
- int alarm_year;
- int alarm_start;
- int alarm_len;
- int data_start;
+enum {
+ RTC_SEC = 0,
+ RTC_MIN = 1,
+ RTC_HOUR = 2,
+ RTC_DAY = 3,
+ RTC_MONTH = 4,
+ RTC_YEAR = 5,
+ RTC_DATA_LEN
+};
+
+struct da9063_compatible_rtc_regmap {
+ /* REGS */
+ int rtc_enable_reg;
+ int rtc_enable_32k_crystal_reg;
+ int rtc_alarm_secs_reg;
+ int rtc_alarm_year_reg;
+ int rtc_count_secs_reg;
+ int rtc_count_year_reg;
+ int rtc_event_reg;
+ /* MASKS */
+ int rtc_enable_mask;
+ int rtc_crystal_mask;
+ int rtc_event_alarm_mask;
+ int rtc_alarm_on_mask;
+ int rtc_alarm_status_mask;
+ int rtc_tick_on_mask;
+ int rtc_ready_to_read_mask;
+ int rtc_count_sec_mask;
+ int rtc_count_min_mask;
+ int rtc_count_hour_mask;
+ int rtc_count_day_mask;
+ int rtc_count_month_mask;
+ int rtc_count_year_mask;
+ /* ALARM CONFIG */
+ int rtc_data_start;
+ int rtc_alarm_len;
+};
+
+struct da9063_compatible_rtc {
+ struct rtc_device *rtc_dev;
+ struct rtc_time alarm_time;
+ struct regmap *regmap;
+ const struct da9063_compatible_rtc_regmap *config;
+ bool rtc_sync;
+};
+
+static const struct da9063_compatible_rtc_regmap da9063_ad_regs = {
+ /* REGS */
+ .rtc_enable_reg = DA9063_REG_CONTROL_E,
+ .rtc_alarm_secs_reg = DA9063_AD_REG_ALARM_MI,
+ .rtc_alarm_year_reg = DA9063_AD_REG_ALARM_Y,
+ .rtc_count_secs_reg = DA9063_REG_COUNT_S,
+ .rtc_count_year_reg = DA9063_REG_COUNT_Y,
+ .rtc_event_reg = DA9063_REG_EVENT_A,
+ /* MASKS */
+ .rtc_enable_mask = DA9063_RTC_EN,
+ .rtc_crystal_mask = DA9063_CRYSTAL,
+ .rtc_enable_32k_crystal_reg = DA9063_REG_EN_32K,
+ .rtc_event_alarm_mask = DA9063_E_ALARM,
+ .rtc_alarm_on_mask = DA9063_ALARM_ON,
+ .rtc_alarm_status_mask = DA9063_ALARM_STATUS_ALARM |
+ DA9063_ALARM_STATUS_TICK,
+ .rtc_tick_on_mask = DA9063_TICK_ON,
+ .rtc_ready_to_read_mask = DA9063_RTC_READ,
+ .rtc_count_sec_mask = DA9063_COUNT_SEC_MASK,
+ .rtc_count_min_mask = DA9063_COUNT_MIN_MASK,
+ .rtc_count_hour_mask = DA9063_COUNT_HOUR_MASK,
+ .rtc_count_day_mask = DA9063_COUNT_DAY_MASK,
+ .rtc_count_month_mask = DA9063_COUNT_MONTH_MASK,
+ .rtc_count_year_mask = DA9063_COUNT_YEAR_MASK,
+ /* ALARM CONFIG */
+ .rtc_data_start = RTC_MIN,
+ .rtc_alarm_len = RTC_DATA_LEN - 1,
+};
+
+static const struct da9063_compatible_rtc_regmap da9063_bb_regs = {
+ /* REGS */
+ .rtc_enable_reg = DA9063_REG_CONTROL_E,
+ .rtc_alarm_secs_reg = DA9063_BB_REG_ALARM_S,
+ .rtc_alarm_year_reg = DA9063_BB_REG_ALARM_Y,
+ .rtc_count_secs_reg = DA9063_REG_COUNT_S,
+ .rtc_count_year_reg = DA9063_REG_COUNT_Y,
+ .rtc_event_reg = DA9063_REG_EVENT_A,
+ /* MASKS */
+ .rtc_enable_mask = DA9063_RTC_EN,
+ .rtc_crystal_mask = DA9063_CRYSTAL,
+ .rtc_enable_32k_crystal_reg = DA9063_REG_EN_32K,
+ .rtc_event_alarm_mask = DA9063_E_ALARM,
+ .rtc_alarm_on_mask = DA9063_ALARM_ON,
+ .rtc_alarm_status_mask = DA9063_ALARM_STATUS_ALARM |
+ DA9063_ALARM_STATUS_TICK,
+ .rtc_tick_on_mask = DA9063_TICK_ON,
+ .rtc_ready_to_read_mask = DA9063_RTC_READ,
+ .rtc_count_sec_mask = DA9063_COUNT_SEC_MASK,
+ .rtc_count_min_mask = DA9063_COUNT_MIN_MASK,
+ .rtc_count_hour_mask = DA9063_COUNT_HOUR_MASK,
+ .rtc_count_day_mask = DA9063_COUNT_DAY_MASK,
+ .rtc_count_month_mask = DA9063_COUNT_MONTH_MASK,
+ .rtc_count_year_mask = DA9063_COUNT_YEAR_MASK,
+ /* ALARM CONFIG */
+ .rtc_data_start = RTC_SEC,
+ .rtc_alarm_len = RTC_DATA_LEN,
+};
+
+static const struct da9063_compatible_rtc_regmap da9062_aa_regs = {
+ /* REGS */
+ .rtc_enable_reg = DA9062AA_CONTROL_E,
+ .rtc_alarm_secs_reg = DA9062AA_ALARM_S,
+ .rtc_alarm_year_reg = DA9062AA_ALARM_Y,
+ .rtc_count_secs_reg = DA9062AA_COUNT_S,
+ .rtc_count_year_reg = DA9062AA_COUNT_Y,
+ .rtc_event_reg = DA9062AA_EVENT_A,
+ /* MASKS */
+ .rtc_enable_mask = DA9062AA_RTC_EN_MASK,
+ .rtc_crystal_mask = DA9062AA_CRYSTAL_MASK,
+ .rtc_enable_32k_crystal_reg = DA9062AA_EN_32K,
+ .rtc_event_alarm_mask = DA9062AA_M_ALARM_MASK,
+ .rtc_alarm_on_mask = DA9062AA_ALARM_ON_MASK,
+ .rtc_alarm_status_mask = (0x02 << 6),
+ .rtc_tick_on_mask = DA9062AA_TICK_ON_MASK,
+ .rtc_ready_to_read_mask = DA9062AA_RTC_READ_MASK,
+ .rtc_count_sec_mask = DA9062AA_COUNT_SEC_MASK,
+ .rtc_count_min_mask = DA9062AA_COUNT_MIN_MASK,
+ .rtc_count_hour_mask = DA9062AA_COUNT_HOUR_MASK,
+ .rtc_count_day_mask = DA9062AA_COUNT_DAY_MASK,
+ .rtc_count_month_mask = DA9062AA_COUNT_MONTH_MASK,
+ .rtc_count_year_mask = DA9062AA_COUNT_YEAR_MASK,
+ /* ALARM CONFIG */
+ .rtc_data_start = RTC_SEC,
+ .rtc_alarm_len = RTC_DATA_LEN,
+};
+
+static const struct of_device_id da9063_compatible_reg_id_table[] = {
+ { .compatible = "dlg,da9063-rtc", .data = &da9063_bb_regs },
+ { .compatible = "dlg,da9062-rtc", .data = &da9062_aa_regs },
+ { },
};
+MODULE_DEVICE_TABLE(of, da9063_compatible_reg_id_table);
-static void da9063_data_to_tm(u8 *data, struct rtc_time *tm)
+static void da9063_data_to_tm(u8 *data, struct rtc_time *tm,
+ struct da9063_compatible_rtc *rtc)
{
- tm->tm_sec = data[RTC_SEC] & DA9063_COUNT_SEC_MASK;
- tm->tm_min = data[RTC_MIN] & DA9063_COUNT_MIN_MASK;
- tm->tm_hour = data[RTC_HOUR] & DA9063_COUNT_HOUR_MASK;
- tm->tm_mday = data[RTC_DAY] & DA9063_COUNT_DAY_MASK;
+ const struct da9063_compatible_rtc_regmap *config = rtc->config;
+
+ tm->tm_sec = data[RTC_SEC] & config->rtc_count_sec_mask;
+ tm->tm_min = data[RTC_MIN] & config->rtc_count_min_mask;
+ tm->tm_hour = data[RTC_HOUR] & config->rtc_count_hour_mask;
+ tm->tm_mday = data[RTC_DAY] & config->rtc_count_day_mask;
tm->tm_mon = MONTHS_FROM_DA9063(data[RTC_MONTH] &
- DA9063_COUNT_MONTH_MASK);
+ config->rtc_count_month_mask);
tm->tm_year = YEARS_FROM_DA9063(data[RTC_YEAR] &
- DA9063_COUNT_YEAR_MASK);
+ config->rtc_count_year_mask);
}
-static void da9063_tm_to_data(struct rtc_time *tm, u8 *data)
+static void da9063_tm_to_data(struct rtc_time *tm, u8 *data,
+ struct da9063_compatible_rtc *rtc)
{
- data[RTC_SEC] &= ~DA9063_COUNT_SEC_MASK;
- data[RTC_SEC] |= tm->tm_sec & DA9063_COUNT_SEC_MASK;
+ const struct da9063_compatible_rtc_regmap *config = rtc->config;
+
+ data[RTC_SEC] &= ~config->rtc_count_sec_mask;
+ data[RTC_SEC] |= tm->tm_sec & config->rtc_count_sec_mask;
- data[RTC_MIN] &= ~DA9063_COUNT_MIN_MASK;
- data[RTC_MIN] |= tm->tm_min & DA9063_COUNT_MIN_MASK;
+ data[RTC_MIN] &= ~config->rtc_count_min_mask;
+ data[RTC_MIN] |= tm->tm_min & config->rtc_count_min_mask;
- data[RTC_HOUR] &= ~DA9063_COUNT_HOUR_MASK;
- data[RTC_HOUR] |= tm->tm_hour & DA9063_COUNT_HOUR_MASK;
+ data[RTC_HOUR] &= ~config->rtc_count_hour_mask;
+ data[RTC_HOUR] |= tm->tm_hour & config->rtc_count_hour_mask;
- data[RTC_DAY] &= ~DA9063_COUNT_DAY_MASK;
- data[RTC_DAY] |= tm->tm_mday & DA9063_COUNT_DAY_MASK;
+ data[RTC_DAY] &= ~config->rtc_count_day_mask;
+ data[RTC_DAY] |= tm->tm_mday & config->rtc_count_day_mask;
- data[RTC_MONTH] &= ~DA9063_COUNT_MONTH_MASK;
+ data[RTC_MONTH] &= ~config->rtc_count_month_mask;
data[RTC_MONTH] |= MONTHS_TO_DA9063(tm->tm_mon) &
- DA9063_COUNT_MONTH_MASK;
+ config->rtc_count_month_mask;
- data[RTC_YEAR] &= ~DA9063_COUNT_YEAR_MASK;
+ data[RTC_YEAR] &= ~config->rtc_count_year_mask;
data[RTC_YEAR] |= YEARS_TO_DA9063(tm->tm_year) &
- DA9063_COUNT_YEAR_MASK;
+ config->rtc_count_year_mask;
}
static int da9063_rtc_stop_alarm(struct device *dev)
{
- struct da9063_rtc *rtc = dev_get_drvdata(dev);
+ struct da9063_compatible_rtc *rtc = dev_get_drvdata(dev);
+ const struct da9063_compatible_rtc_regmap *config = rtc->config;
- return regmap_update_bits(rtc->hw->regmap, rtc->alarm_year,
- DA9063_ALARM_ON, 0);
+ return regmap_update_bits(rtc->regmap,
+ config->rtc_alarm_year_reg,
+ config->rtc_alarm_on_mask,
+ 0);
}
static int da9063_rtc_start_alarm(struct device *dev)
{
- struct da9063_rtc *rtc = dev_get_drvdata(dev);
+ struct da9063_compatible_rtc *rtc = dev_get_drvdata(dev);
+ const struct da9063_compatible_rtc_regmap *config = rtc->config;
- return regmap_update_bits(rtc->hw->regmap, rtc->alarm_year,
- DA9063_ALARM_ON, DA9063_ALARM_ON);
+ return regmap_update_bits(rtc->regmap,
+ config->rtc_alarm_year_reg,
+ config->rtc_alarm_on_mask,
+ config->rtc_alarm_on_mask);
}
static int da9063_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct da9063_rtc *rtc = dev_get_drvdata(dev);
+ struct da9063_compatible_rtc *rtc = dev_get_drvdata(dev);
+ const struct da9063_compatible_rtc_regmap *config = rtc->config;
unsigned long tm_secs;
unsigned long al_secs;
u8 data[RTC_DATA_LEN];
int ret;
- ret = regmap_bulk_read(rtc->hw->regmap, DA9063_REG_COUNT_S,
+ ret = regmap_bulk_read(rtc->regmap,
+ config->rtc_count_secs_reg,
data, RTC_DATA_LEN);
if (ret < 0) {
dev_err(dev, "Failed to read RTC time data: %d\n", ret);
return ret;
}
- if (!(data[RTC_SEC] & DA9063_RTC_READ)) {
+ if (!(data[RTC_SEC] & config->rtc_ready_to_read_mask)) {
dev_dbg(dev, "RTC not yet ready to be read by the host\n");
return -EINVAL;
}
- da9063_data_to_tm(data, tm);
+ da9063_data_to_tm(data, tm, rtc);
rtc_tm_to_time(tm, &tm_secs);
rtc_tm_to_time(&rtc->alarm_time, &al_secs);
@@ -137,12 +272,14 @@ static int da9063_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int da9063_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct da9063_rtc *rtc = dev_get_drvdata(dev);
+ struct da9063_compatible_rtc *rtc = dev_get_drvdata(dev);
+ const struct da9063_compatible_rtc_regmap *config = rtc->config;
u8 data[RTC_DATA_LEN];
int ret;
- da9063_tm_to_data(tm, data);
- ret = regmap_bulk_write(rtc->hw->regmap, DA9063_REG_COUNT_S,
+ da9063_tm_to_data(tm, data, rtc);
+ ret = regmap_bulk_write(rtc->regmap,
+ config->rtc_count_secs_reg,
data, RTC_DATA_LEN);
if (ret < 0)
dev_err(dev, "Failed to set RTC time data: %d\n", ret);
@@ -152,26 +289,31 @@ static int da9063_rtc_set_time(struct device *dev, struct rtc_time *tm)
static int da9063_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct da9063_rtc *rtc = dev_get_drvdata(dev);
+ struct da9063_compatible_rtc *rtc = dev_get_drvdata(dev);
+ const struct da9063_compatible_rtc_regmap *config = rtc->config;
u8 data[RTC_DATA_LEN];
int ret;
unsigned int val;
data[RTC_SEC] = 0;
- ret = regmap_bulk_read(rtc->hw->regmap, rtc->alarm_start,
- &data[rtc->data_start], rtc->alarm_len);
+ ret = regmap_bulk_read(rtc->regmap,
+ config->rtc_alarm_secs_reg,
+ &data[config->rtc_data_start],
+ config->rtc_alarm_len);
if (ret < 0)
return ret;
- da9063_data_to_tm(data, &alrm->time);
+ da9063_data_to_tm(data, &alrm->time, rtc);
- alrm->enabled = !!(data[RTC_YEAR] & DA9063_ALARM_ON);
+ alrm->enabled = !!(data[RTC_YEAR] & config->rtc_alarm_on_mask);
- ret = regmap_read(rtc->hw->regmap, DA9063_REG_EVENT_A, &val);
+ ret = regmap_read(rtc->regmap,
+ config->rtc_event_reg,
+ &val);
if (ret < 0)
return ret;
- if (val & (DA9063_E_ALARM))
+ if (val & config->rtc_event_alarm_mask)
alrm->pending = 1;
else
alrm->pending = 0;
@@ -181,11 +323,12 @@ static int da9063_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int da9063_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct da9063_rtc *rtc = dev_get_drvdata(dev);
+ struct da9063_compatible_rtc *rtc = dev_get_drvdata(dev);
+ const struct da9063_compatible_rtc_regmap *config = rtc->config;
u8 data[RTC_DATA_LEN];
int ret;
- da9063_tm_to_data(&alrm->time, data);
+ da9063_tm_to_data(&alrm->time, data, rtc);
ret = da9063_rtc_stop_alarm(dev);
if (ret < 0) {
@@ -193,14 +336,16 @@ static int da9063_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return ret;
}
- ret = regmap_bulk_write(rtc->hw->regmap, rtc->alarm_start,
- &data[rtc->data_start], rtc->alarm_len);
+ ret = regmap_bulk_write(rtc->regmap,
+ config->rtc_alarm_secs_reg,
+ &data[config->rtc_data_start],
+ config->rtc_alarm_len);
if (ret < 0) {
dev_err(dev, "Failed to write alarm: %d\n", ret);
return ret;
}
- da9063_data_to_tm(data, &rtc->alarm_time);
+ da9063_data_to_tm(data, &rtc->alarm_time, rtc);
if (alrm->enabled) {
ret = da9063_rtc_start_alarm(dev);
@@ -213,7 +358,8 @@ static int da9063_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return ret;
}
-static int da9063_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+static int da9063_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
{
if (enabled)
return da9063_rtc_start_alarm(dev);
@@ -223,10 +369,13 @@ static int da9063_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
static irqreturn_t da9063_alarm_event(int irq, void *data)
{
- struct da9063_rtc *rtc = data;
+ struct da9063_compatible_rtc *rtc = data;
+ const struct da9063_compatible_rtc_regmap *config = rtc->config;
- regmap_update_bits(rtc->hw->regmap, rtc->alarm_year,
- DA9063_ALARM_ON, 0);
+ regmap_update_bits(rtc->regmap,
+ config->rtc_alarm_year_reg,
+ config->rtc_alarm_on_mask,
+ 0);
rtc->rtc_sync = true;
rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF);
@@ -244,72 +393,92 @@ static const struct rtc_class_ops da9063_rtc_ops = {
static int da9063_rtc_probe(struct platform_device *pdev)
{
- struct da9063 *da9063 = dev_get_drvdata(pdev->dev.parent);
- struct da9063_rtc *rtc;
+ struct da9063_compatible_rtc *rtc;
+ const struct da9063_compatible_rtc_regmap *config;
+ const struct of_device_id *match;
int irq_alarm;
u8 data[RTC_DATA_LEN];
int ret;
- ret = regmap_update_bits(da9063->regmap, DA9063_REG_CONTROL_E,
- DA9063_RTC_EN, DA9063_RTC_EN);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to enable RTC\n");
- goto err;
- }
+ if (!pdev->dev.of_node)
+ return -ENXIO;
- ret = regmap_update_bits(da9063->regmap, DA9063_REG_EN_32K,
- DA9063_CRYSTAL, DA9063_CRYSTAL);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to run 32kHz oscillator\n");
- goto err;
- }
+ match = of_match_node(da9063_compatible_reg_id_table,
+ pdev->dev.of_node);
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
- if (da9063->variant_code == PMIC_DA9063_AD) {
- rtc->alarm_year = DA9063_AD_REG_ALARM_Y;
- rtc->alarm_start = DA9063_AD_REG_ALARM_MI;
- rtc->alarm_len = RTC_ALARM_DATA_LEN;
- rtc->data_start = RTC_MIN;
- } else {
- rtc->alarm_year = DA9063_BB_REG_ALARM_Y;
- rtc->alarm_start = DA9063_BB_REG_ALARM_S;
- rtc->alarm_len = RTC_DATA_LEN;
- rtc->data_start = RTC_SEC;
+ rtc->config = match->data;
+ if (of_device_is_compatible(pdev->dev.of_node, "dlg,da9063-rtc")) {
+ struct da9063 *chip = dev_get_drvdata(pdev->dev.parent);
+
+ if (chip->variant_code == PMIC_DA9063_AD)
+ rtc->config = &da9063_ad_regs;
}
- ret = regmap_update_bits(da9063->regmap, rtc->alarm_start,
- DA9063_ALARM_STATUS_TICK | DA9063_ALARM_STATUS_ALARM,
- 0);
+ rtc->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!rtc->regmap) {
+ dev_warn(&pdev->dev, "Parent regmap unavailable.\n");
+ return -ENXIO;
+ }
+
+ config = rtc->config;
+ ret = regmap_update_bits(rtc->regmap,
+ config->rtc_enable_reg,
+ config->rtc_enable_mask,
+ config->rtc_enable_mask);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to enable RTC\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(rtc->regmap,
+ config->rtc_enable_32k_crystal_reg,
+ config->rtc_crystal_mask,
+ config->rtc_crystal_mask);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to run 32kHz oscillator\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(rtc->regmap,
+ config->rtc_alarm_secs_reg,
+ config->rtc_alarm_status_mask,
+ 0);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to access RTC alarm register\n");
- goto err;
+ return ret;
}
- ret = regmap_update_bits(da9063->regmap, rtc->alarm_start,
+ ret = regmap_update_bits(rtc->regmap,
+ config->rtc_alarm_secs_reg,
DA9063_ALARM_STATUS_ALARM,
DA9063_ALARM_STATUS_ALARM);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to access RTC alarm register\n");
- goto err;
+ return ret;
}
- ret = regmap_update_bits(da9063->regmap, rtc->alarm_year,
- DA9063_TICK_ON, 0);
+ ret = regmap_update_bits(rtc->regmap,
+ config->rtc_alarm_year_reg,
+ config->rtc_tick_on_mask,
+ 0);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to disable TICKs\n");
- goto err;
+ return ret;
}
data[RTC_SEC] = 0;
- ret = regmap_bulk_read(da9063->regmap, rtc->alarm_start,
- &data[rtc->data_start], rtc->alarm_len);
+ ret = regmap_bulk_read(rtc->regmap,
+ config->rtc_alarm_secs_reg,
+ &data[config->rtc_data_start],
+ config->rtc_alarm_len);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to read initial alarm data: %d\n",
ret);
- goto err;
+ return ret;
}
platform_set_drvdata(pdev, rtc);
@@ -322,18 +491,16 @@ static int da9063_rtc_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n",
irq_alarm, ret);
- goto err;
+ return ret;
}
- rtc->hw = da9063;
rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, DA9063_DRVNAME_RTC,
&da9063_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc->rtc_dev))
return PTR_ERR(rtc->rtc_dev);
- da9063_data_to_tm(data, &rtc->alarm_time);
+ da9063_data_to_tm(data, &rtc->alarm_time, rtc);
rtc->rtc_sync = false;
-err:
return ret;
}
@@ -341,6 +508,7 @@ static struct platform_driver da9063_rtc_driver = {
.probe = da9063_rtc_probe,
.driver = {
.name = DA9063_DRVNAME_RTC,
+ .of_match_table = da9063_compatible_reg_id_table,
},
};
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 799c34bcb26f..a6d9434addf6 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -477,6 +477,7 @@ void rtc_dev_prepare(struct rtc_device *rtc)
cdev_init(&rtc->char_dev, &rtc_dev_fops);
rtc->char_dev.owner = rtc->owner;
+ rtc->char_dev.kobj.parent = &rtc->dev.kobj;
}
void rtc_dev_add_device(struct rtc_device *rtc)
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 12b07158a366..baa5d047f9c8 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -538,15 +538,6 @@ ds1305_nvram_read(struct file *filp, struct kobject *kobj,
spi = container_of(kobj, struct spi_device, dev.kobj);
- if (unlikely(off >= DS1305_NVRAM_LEN))
- return 0;
- if (count >= DS1305_NVRAM_LEN)
- count = DS1305_NVRAM_LEN;
- if ((off + count) > DS1305_NVRAM_LEN)
- count = DS1305_NVRAM_LEN - off;
- if (unlikely(!count))
- return count;
-
addr = DS1305_NVRAM + off;
msg_init(&m, x, &addr, count, NULL, buf);
@@ -569,15 +560,6 @@ ds1305_nvram_write(struct file *filp, struct kobject *kobj,
spi = container_of(kobj, struct spi_device, dev.kobj);
- if (unlikely(off >= DS1305_NVRAM_LEN))
- return -EFBIG;
- if (count >= DS1305_NVRAM_LEN)
- count = DS1305_NVRAM_LEN;
- if ((off + count) > DS1305_NVRAM_LEN)
- count = DS1305_NVRAM_LEN - off;
- if (unlikely(!count))
- return count;
-
addr = (DS1305_WRITE | DS1305_NVRAM) + off;
msg_init(&m, x, &addr, count, buf, NULL);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 6e76de1856fc..a705e6490808 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -11,14 +11,17 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
+#include <linux/bcd.h>
+#include <linux/i2c.h>
#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/rtc/ds1307.h>
+#include <linux/rtc.h>
#include <linux/slab.h>
-#include <linux/i2c.h>
#include <linux/string.h>
-#include <linux/rtc.h>
-#include <linux/bcd.h>
-#include <linux/rtc/ds1307.h>
/*
* We can't determine type by probing, but if we expect pre-Linux code
@@ -114,7 +117,7 @@ struct ds1307 {
#define HAS_ALARM 1 /* bit 1 == irq claimed */
struct i2c_client *client;
struct rtc_device *rtc;
- struct work_struct work;
+ int wakeirq;
s32 (*read_block_data)(const struct i2c_client *client, u8 command,
u8 length, u8 *values);
s32 (*write_block_data)(const struct i2c_client *client, u8 command,
@@ -311,27 +314,17 @@ static s32 ds1307_native_smbus_read_block_data(const struct i2c_client *client,
/*----------------------------------------------------------------------*/
/*
- * The IRQ logic includes a "real" handler running in IRQ context just
- * long enough to schedule this workqueue entry. We need a task context
- * to talk to the RTC, since I2C I/O calls require that; and disable the
- * IRQ until we clear its status on the chip, so that this handler can
- * work with any type of triggering (not just falling edge).
- *
* The ds1337 and ds1339 both have two alarms, but we only use the first
* one (with a "seconds" field). For ds1337 we expect nINTA is our alarm
* signal; ds1339 chips have only one alarm signal.
*/
-static void ds1307_work(struct work_struct *work)
+static irqreturn_t ds1307_irq(int irq, void *dev_id)
{
- struct ds1307 *ds1307;
- struct i2c_client *client;
- struct mutex *lock;
+ struct i2c_client *client = dev_id;
+ struct ds1307 *ds1307 = i2c_get_clientdata(client);
+ struct mutex *lock = &ds1307->rtc->ops_lock;
int stat, control;
- ds1307 = container_of(work, struct ds1307, work);
- client = ds1307->client;
- lock = &ds1307->rtc->ops_lock;
-
mutex_lock(lock);
stat = i2c_smbus_read_byte_data(client, DS1337_REG_STATUS);
if (stat < 0)
@@ -352,18 +345,8 @@ static void ds1307_work(struct work_struct *work)
}
out:
- if (test_bit(HAS_ALARM, &ds1307->flags))
- enable_irq(client->irq);
mutex_unlock(lock);
-}
-static irqreturn_t ds1307_irq(int irq, void *dev_id)
-{
- struct i2c_client *client = dev_id;
- struct ds1307 *ds1307 = i2c_get_clientdata(client);
-
- disable_irq_nosync(irq);
- schedule_work(&ds1307->work);
return IRQ_HANDLED;
}
@@ -634,13 +617,14 @@ static const struct rtc_class_ops ds13xx_rtc_ops = {
MCP794XX_BIT_ALMX_C1 | \
MCP794XX_BIT_ALMX_C2)
-static void mcp794xx_work(struct work_struct *work)
+static irqreturn_t mcp794xx_irq(int irq, void *dev_id)
{
- struct ds1307 *ds1307 = container_of(work, struct ds1307, work);
- struct i2c_client *client = ds1307->client;
+ struct i2c_client *client = dev_id;
+ struct ds1307 *ds1307 = i2c_get_clientdata(client);
+ struct mutex *lock = &ds1307->rtc->ops_lock;
int reg, ret;
- mutex_lock(&ds1307->rtc->ops_lock);
+ mutex_lock(lock);
/* Check and clear alarm 0 interrupt flag. */
reg = i2c_smbus_read_byte_data(client, MCP794XX_REG_ALARM0_CTRL);
@@ -665,9 +649,9 @@ static void mcp794xx_work(struct work_struct *work)
rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF);
out:
- if (test_bit(HAS_ALARM, &ds1307->flags))
- enable_irq(client->irq);
- mutex_unlock(&ds1307->rtc->ops_lock);
+ mutex_unlock(lock);
+
+ return IRQ_HANDLED;
}
static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t)
@@ -798,13 +782,6 @@ ds1307_nvram_read(struct file *filp, struct kobject *kobj,
client = kobj_to_i2c_client(kobj);
ds1307 = i2c_get_clientdata(client);
- if (unlikely(off >= ds1307->nvram->size))
- return 0;
- if ((off + count) > ds1307->nvram->size)
- count = ds1307->nvram->size - off;
- if (unlikely(!count))
- return count;
-
result = ds1307->read_block_data(client, ds1307->nvram_offset + off,
count, buf);
if (result < 0)
@@ -824,13 +801,6 @@ ds1307_nvram_write(struct file *filp, struct kobject *kobj,
client = kobj_to_i2c_client(kobj);
ds1307 = i2c_get_clientdata(client);
- if (unlikely(off >= ds1307->nvram->size))
- return -EFBIG;
- if ((off + count) > ds1307->nvram->size)
- count = ds1307->nvram->size - off;
- if (unlikely(!count))
- return count;
-
result = ds1307->write_block_data(client, ds1307->nvram_offset + off,
count, buf);
if (result < 0) {
@@ -896,6 +866,8 @@ static int ds1307_probe(struct i2c_client *client,
bool want_irq = false;
unsigned char *buf;
struct ds1307_platform_data *pdata = dev_get_platdata(&client->dev);
+ irq_handler_t irq_handler = ds1307_irq;
+
static const int bbsqi_bitpos[] = {
[ds_1337] = 0,
[ds_1339] = DS1339_BIT_BBSQI,
@@ -962,8 +934,6 @@ static int ds1307_probe(struct i2c_client *client,
* running on Vbackup (BBSQI/BBSQW)
*/
if (ds1307->client->irq > 0 && chip->alarm) {
- INIT_WORK(&ds1307->work, ds1307_work);
-
ds1307->regs[0] |= DS1337_BIT_INTCN
| bbsqi_bitpos[ds1307->type];
ds1307->regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE);
@@ -1053,7 +1023,7 @@ static int ds1307_probe(struct i2c_client *client,
case mcp794xx:
rtc_ops = &mcp794xx_rtc_ops;
if (ds1307->client->irq > 0 && chip->alarm) {
- INIT_WORK(&ds1307->work, mcp794xx_work);
+ irq_handler = mcp794xx_irq;
want_irq = true;
}
break;
@@ -1176,18 +1146,43 @@ read_rtc:
}
if (want_irq) {
- err = request_irq(client->irq, ds1307_irq, IRQF_SHARED,
- ds1307->rtc->name, client);
+ struct device_node *node = client->dev.of_node;
+
+ err = devm_request_threaded_irq(&client->dev,
+ client->irq, NULL, irq_handler,
+ IRQF_SHARED | IRQF_ONESHOT,
+ ds1307->rtc->name, client);
if (err) {
client->irq = 0;
dev_err(&client->dev, "unable to request IRQ!\n");
- } else {
+ goto no_irq;
+ }
+
+ set_bit(HAS_ALARM, &ds1307->flags);
+ dev_dbg(&client->dev, "got IRQ %d\n", client->irq);
+
+ /* Currently supported by OF code only! */
+ if (!node)
+ goto no_irq;
+
+ err = of_irq_get(node, 1);
+ if (err <= 0) {
+ if (err == -EPROBE_DEFER)
+ goto exit;
+ goto no_irq;
+ }
+ ds1307->wakeirq = err;
- set_bit(HAS_ALARM, &ds1307->flags);
- dev_dbg(&client->dev, "got IRQ %d\n", client->irq);
+ err = dev_pm_set_dedicated_wake_irq(&client->dev,
+ ds1307->wakeirq);
+ if (err) {
+ dev_err(&client->dev, "unable to setup wakeIRQ %d!\n",
+ err);
+ goto exit;
}
}
+no_irq:
if (chip->nvram_size) {
ds1307->nvram = devm_kzalloc(&client->dev,
@@ -1231,10 +1226,8 @@ static int ds1307_remove(struct i2c_client *client)
{
struct ds1307 *ds1307 = i2c_get_clientdata(client);
- if (test_and_clear_bit(HAS_ALARM, &ds1307->flags)) {
- free_irq(client->irq, client);
- cancel_work_sync(&ds1307->work);
- }
+ if (ds1307->wakeirq)
+ dev_pm_clear_wake_irq(&client->dev);
if (test_and_clear_bit(HAS_NVRAM, &ds1307->flags))
sysfs_remove_bin_file(&client->dev.kobj, ds1307->nvram);
@@ -1245,7 +1238,6 @@ static int ds1307_remove(struct i2c_client *client)
static struct i2c_driver ds1307_driver = {
.driver = {
.name = "rtc-ds1307",
- .owner = THIS_MODULE,
},
.probe = ds1307_probe,
.remove = ds1307_remove,
diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c
index ae9f997223b1..79a06dd3c185 100644
--- a/drivers/rtc/rtc-ds1343.c
+++ b/drivers/rtc/rtc-ds1343.c
@@ -162,12 +162,6 @@ static ssize_t ds1343_nvram_write(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct ds1343_priv *priv = dev_get_drvdata(dev);
- if (unlikely(!count))
- return count;
-
- if ((count + off) > DS1343_NVRAM_LEN)
- count = DS1343_NVRAM_LEN - off;
-
address = DS1343_NVRAM + off;
ret = regmap_bulk_write(priv->map, address, buf, count);
@@ -187,12 +181,6 @@ static ssize_t ds1343_nvram_read(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct ds1343_priv *priv = dev_get_drvdata(dev);
- if (unlikely(!count))
- return count;
-
- if ((count + off) > DS1343_NVRAM_LEN)
- count = DS1343_NVRAM_LEN - off;
-
address = DS1343_NVRAM + off;
ret = regmap_bulk_read(priv->map, address, buf, count);
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 167783fa7ac1..3b3049c8c9e0 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -664,11 +664,8 @@ static int ds1374_remove(struct i2c_client *client)
{
struct ds1374 *ds1374 = i2c_get_clientdata(client);
#ifdef CONFIG_RTC_DRV_DS1374_WDT
- int res;
-
- res = misc_deregister(&ds1374_miscdev);
- if (!res)
- ds1374_miscdev.parent = NULL;
+ misc_deregister(&ds1374_miscdev);
+ ds1374_miscdev.parent = NULL;
unregister_reboot_notifier(&ds1374_wdt_notifier);
#endif
@@ -689,7 +686,7 @@ static int ds1374_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- if (client->irq >= 0 && device_may_wakeup(&client->dev))
+ if (client->irq > 0 && device_may_wakeup(&client->dev))
enable_irq_wake(client->irq);
return 0;
}
@@ -698,7 +695,7 @@ static int ds1374_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- if (client->irq >= 0 && device_may_wakeup(&client->dev))
+ if (client->irq > 0 && device_may_wakeup(&client->dev))
disable_irq_wake(client->irq);
return 0;
}
@@ -709,7 +706,6 @@ static SIMPLE_DEV_PM_OPS(ds1374_pm, ds1374_suspend, ds1374_resume);
static struct i2c_driver ds1374_driver = {
.driver = {
.name = "rtc-ds1374",
- .owner = THIS_MODULE,
.pm = &ds1374_pm,
},
.probe = ds1374_probe,
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 7415c2b4d6e8..da3d04ce83bd 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -64,7 +64,7 @@ enum ds1511reg {
#define DS1511_KIE 0x04
#define DS1511_WDE 0x02
#define DS1511_WDS 0x01
-#define DS1511_RAM_MAX 0xff
+#define DS1511_RAM_MAX 0x100
#define RTC_CMD DS1511_CONTROL_B
#define RTC_CMD1 DS1511_CONTROL_A
@@ -159,7 +159,7 @@ ds1511_wdog_set(unsigned long deciseconds)
/*
* set wdog enable and wdog 'steering' bit to issue a reset
*/
- rtc_write(DS1511_WDE | DS1511_WDS, RTC_CMD);
+ rtc_write(rtc_read(RTC_CMD) | DS1511_WDE | DS1511_WDS, RTC_CMD);
}
void
@@ -407,26 +407,10 @@ ds1511_nvram_read(struct file *filp, struct kobject *kobj,
{
ssize_t count;
- /*
- * if count is more than one, turn on "burst" mode
- * turn it off when you're done
- */
- if (size > 1)
- rtc_write((rtc_read(RTC_CMD) | DS1511_BME), RTC_CMD);
-
- if (pos > DS1511_RAM_MAX)
- pos = DS1511_RAM_MAX;
-
- if (size + pos > DS1511_RAM_MAX + 1)
- size = DS1511_RAM_MAX - pos + 1;
-
rtc_write(pos, DS1511_RAMADDR_LSB);
- for (count = 0; size > 0; count++, size--)
+ for (count = 0; count < size; count++)
*buf++ = rtc_read(DS1511_RAMDATA);
- if (count > 1)
- rtc_write((rtc_read(RTC_CMD) & ~DS1511_BME), RTC_CMD);
-
return count;
}
@@ -437,26 +421,10 @@ ds1511_nvram_write(struct file *filp, struct kobject *kobj,
{
ssize_t count;
- /*
- * if count is more than one, turn on "burst" mode
- * turn it off when you're done
- */
- if (size > 1)
- rtc_write((rtc_read(RTC_CMD) | DS1511_BME), RTC_CMD);
-
- if (pos > DS1511_RAM_MAX)
- pos = DS1511_RAM_MAX;
-
- if (size + pos > DS1511_RAM_MAX + 1)
- size = DS1511_RAM_MAX - pos + 1;
-
rtc_write(pos, DS1511_RAMADDR_LSB);
- for (count = 0; size > 0; count++, size--)
+ for (count = 0; count < size; count++)
rtc_write(*buf++, DS1511_RAMDATA);
- if (count > 1)
- rtc_write((rtc_read(RTC_CMD) & ~DS1511_BME), RTC_CMD);
-
return count;
}
@@ -490,7 +458,7 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
/*
* turn on the clock and the crystal, etc.
*/
- rtc_write(0, RTC_CMD);
+ rtc_write(DS1511_BME, RTC_CMD);
rtc_write(0, RTC_CMD1);
/*
* clear the wdog counter
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index a24e091bcb41..38422ab4ec5a 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -245,7 +245,7 @@ static ssize_t ds1553_nvram_read(struct file *filp, struct kobject *kobj,
void __iomem *ioaddr = pdata->ioaddr;
ssize_t count;
- for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
+ for (count = 0; count < size; count++)
*buf++ = readb(ioaddr + pos++);
return count;
}
@@ -260,7 +260,7 @@ static ssize_t ds1553_nvram_write(struct file *filp, struct kobject *kobj,
void __iomem *ioaddr = pdata->ioaddr;
ssize_t count;
- for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
+ for (count = 0; count < size; count++)
writeb(*buf++, ioaddr + pos++);
return count;
}
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 818a3635a8c8..05a51ef52703 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -2145,27 +2145,7 @@ static struct platform_driver ds1685_rtc_driver = {
.probe = ds1685_rtc_probe,
.remove = ds1685_rtc_remove,
};
-
-/**
- * ds1685_rtc_init - rtc module init.
- */
-static int __init
-ds1685_rtc_init(void)
-{
- return platform_driver_register(&ds1685_rtc_driver);
-}
-
-/**
- * ds1685_rtc_exit - rtc module exit.
- */
-static void __exit
-ds1685_rtc_exit(void)
-{
- platform_driver_unregister(&ds1685_rtc_driver);
-}
-
-module_init(ds1685_rtc_init);
-module_exit(ds1685_rtc_exit);
+module_platform_driver(ds1685_rtc_driver);
/* ----------------------------------------------------------------------- */
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 0f8d8ace1515..c5168b3bcf1a 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -134,7 +134,7 @@ static ssize_t ds1742_nvram_read(struct file *filp, struct kobject *kobj,
void __iomem *ioaddr = pdata->ioaddr_nvram;
ssize_t count;
- for (count = 0; size > 0 && pos < pdata->size_nvram; count++, size--)
+ for (count = 0; count < size; count++)
*buf++ = readb(ioaddr + pos++);
return count;
}
@@ -149,7 +149,7 @@ static ssize_t ds1742_nvram_write(struct file *filp, struct kobject *kobj,
void __iomem *ioaddr = pdata->ioaddr_nvram;
ssize_t count;
- for (count = 0; size > 0 && pos < pdata->size_nvram; count++, size--)
+ for (count = 0; count < size; count++)
writeb(*buf++, ioaddr + pos++);
return count;
}
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 7e48e532214f..4e99ace66f74 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -443,7 +443,7 @@ static int ds3232_remove(struct i2c_client *client)
{
struct ds3232 *ds3232 = i2c_get_clientdata(client);
- if (client->irq >= 0) {
+ if (client->irq > 0) {
mutex_lock(&ds3232->mutex);
ds3232->exiting = 1;
mutex_unlock(&ds3232->mutex);
@@ -463,7 +463,10 @@ static int ds3232_suspend(struct device *dev)
if (device_can_wakeup(dev)) {
ds3232->suspended = true;
- irq_set_irq_wake(client->irq, 1);
+ if (irq_set_irq_wake(client->irq, 1)) {
+ dev_warn_once(dev, "Cannot set wakeup source\n");
+ ds3232->suspended = false;
+ }
}
return 0;
@@ -500,7 +503,6 @@ MODULE_DEVICE_TABLE(i2c, ds3232_id);
static struct i2c_driver ds3232_driver = {
.driver = {
.name = "rtc-ds3232",
- .owner = THIS_MODULE,
.pm = &ds3232_pm_ops,
},
.probe = ds3232_probe,
diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c
index 83c3b3029fa7..576eadbba296 100644
--- a/drivers/rtc/rtc-fm3130.c
+++ b/drivers/rtc/rtc-fm3130.c
@@ -523,7 +523,6 @@ exit_free:
static struct i2c_driver fm3130_driver = {
.driver = {
.name = "rtc-fm3130",
- .owner = THIS_MODULE,
},
.probe = fm3130_probe,
.id_table = fm3130_id,
diff --git a/drivers/rtc/rtc-gemini.c b/drivers/rtc/rtc-gemini.c
index 35f4486738fc..e84184647d15 100644
--- a/drivers/rtc/rtc-gemini.c
+++ b/drivers/rtc/rtc-gemini.c
@@ -148,10 +148,7 @@ static int gemini_rtc_probe(struct platform_device *pdev)
rtc->rtc_dev = rtc_device_register(pdev->name, dev,
&gemini_rtc_ops, THIS_MODULE);
- if (likely(IS_ERR(rtc->rtc_dev)))
- return PTR_ERR(rtc->rtc_dev);
-
- return 0;
+ return PTR_ERR_OR_ZERO(rtc->rtc_dev);
}
static int gemini_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index e9da7959d3fe..097325d96db5 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -599,7 +599,6 @@ MODULE_DEVICE_TABLE(of, hym8563_dt_idtable);
static struct i2c_driver hym8563_driver = {
.driver = {
.name = "rtc-hym8563",
- .owner = THIS_MODULE,
.pm = &hym8563_pm_ops,
.of_match_table = hym8563_dt_idtable,
},
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index f9b082784b90..839d1fd63cd7 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -151,12 +151,7 @@ static int isl12022_get_datetime(struct i2c_client *client, struct rtc_time *tm)
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
- /* The clock can give out invalid datetime, but we cannot return
- * -EINVAL otherwise hwclock will refuse to set the time on bootup. */
- if (rtc_valid_tm(tm) < 0)
- dev_err(&client->dev, "retrieved date and time is invalid.\n");
-
- return 0;
+ return rtc_valid_tm(tm);
}
static int isl12022_set_datetime(struct i2c_client *client, struct rtc_time *tm)
@@ -279,6 +274,7 @@ static const struct of_device_id isl12022_dt_match[] = {
{ .compatible = "isil,isl12022" },
{ },
};
+MODULE_DEVICE_TABLE(of, isl12022_dt_match);
#endif
static const struct i2c_device_id isl12022_id[] = {
diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c
index da818d3337ce..a0462e5430c7 100644
--- a/drivers/rtc/rtc-isl12057.c
+++ b/drivers/rtc/rtc-isl12057.c
@@ -648,6 +648,7 @@ static const struct of_device_id isl12057_dt_match[] = {
{ .compatible = "isil,isl12057" },
{ },
};
+MODULE_DEVICE_TABLE(of, isl12057_dt_match);
#endif
static const struct i2c_device_id isl12057_id[] = {
@@ -659,7 +660,6 @@ MODULE_DEVICE_TABLE(i2c, isl12057_id);
static struct i2c_driver isl12057_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = &isl12057_rtc_pm_ops,
.of_match_table = of_match_ptr(isl12057_dt_match),
},
diff --git a/drivers/rtc/rtc-lpc24xx.c b/drivers/rtc/rtc-lpc24xx.c
new file mode 100644
index 000000000000..59d99596fdeb
--- /dev/null
+++ b/drivers/rtc/rtc-lpc24xx.c
@@ -0,0 +1,310 @@
+/*
+ * RTC driver for NXP LPC178x/18xx/43xx Real-Time Clock (RTC)
+ *
+ * Copyright (C) 2011 NXP Semiconductors
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+/* LPC24xx RTC register offsets and bits */
+#define LPC24XX_ILR 0x00
+#define LPC24XX_RTCCIF BIT(0)
+#define LPC24XX_RTCALF BIT(1)
+#define LPC24XX_CTC 0x04
+#define LPC24XX_CCR 0x08
+#define LPC24XX_CLKEN BIT(0)
+#define LPC178X_CCALEN BIT(4)
+#define LPC24XX_CIIR 0x0c
+#define LPC24XX_AMR 0x10
+#define LPC24XX_ALARM_DISABLE 0xff
+#define LPC24XX_CTIME0 0x14
+#define LPC24XX_CTIME1 0x18
+#define LPC24XX_CTIME2 0x1c
+#define LPC24XX_SEC 0x20
+#define LPC24XX_MIN 0x24
+#define LPC24XX_HOUR 0x28
+#define LPC24XX_DOM 0x2c
+#define LPC24XX_DOW 0x30
+#define LPC24XX_DOY 0x34
+#define LPC24XX_MONTH 0x38
+#define LPC24XX_YEAR 0x3c
+#define LPC24XX_ALSEC 0x60
+#define LPC24XX_ALMIN 0x64
+#define LPC24XX_ALHOUR 0x68
+#define LPC24XX_ALDOM 0x6c
+#define LPC24XX_ALDOW 0x70
+#define LPC24XX_ALDOY 0x74
+#define LPC24XX_ALMON 0x78
+#define LPC24XX_ALYEAR 0x7c
+
+/* Macros to read fields in consolidated time (CT) registers */
+#define CT0_SECS(x) (((x) >> 0) & 0x3f)
+#define CT0_MINS(x) (((x) >> 8) & 0x3f)
+#define CT0_HOURS(x) (((x) >> 16) & 0x1f)
+#define CT0_DOW(x) (((x) >> 24) & 0x07)
+#define CT1_DOM(x) (((x) >> 0) & 0x1f)
+#define CT1_MONTH(x) (((x) >> 8) & 0x0f)
+#define CT1_YEAR(x) (((x) >> 16) & 0xfff)
+#define CT2_DOY(x) (((x) >> 0) & 0xfff)
+
+#define rtc_readl(dev, reg) readl((dev)->rtc_base + (reg))
+#define rtc_writel(dev, reg, val) writel((val), (dev)->rtc_base + (reg))
+
+struct lpc24xx_rtc {
+ void __iomem *rtc_base;
+ struct rtc_device *rtc;
+ struct clk *clk_rtc;
+ struct clk *clk_reg;
+};
+
+static int lpc24xx_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct lpc24xx_rtc *rtc = dev_get_drvdata(dev);
+
+ /* Disable RTC during update */
+ rtc_writel(rtc, LPC24XX_CCR, LPC178X_CCALEN);
+
+ rtc_writel(rtc, LPC24XX_SEC, tm->tm_sec);
+ rtc_writel(rtc, LPC24XX_MIN, tm->tm_min);
+ rtc_writel(rtc, LPC24XX_HOUR, tm->tm_hour);
+ rtc_writel(rtc, LPC24XX_DOW, tm->tm_wday);
+ rtc_writel(rtc, LPC24XX_DOM, tm->tm_mday);
+ rtc_writel(rtc, LPC24XX_DOY, tm->tm_yday);
+ rtc_writel(rtc, LPC24XX_MONTH, tm->tm_mon);
+ rtc_writel(rtc, LPC24XX_YEAR, tm->tm_year);
+
+ rtc_writel(rtc, LPC24XX_CCR, LPC24XX_CLKEN | LPC178X_CCALEN);
+
+ return 0;
+}
+
+static int lpc24xx_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct lpc24xx_rtc *rtc = dev_get_drvdata(dev);
+ u32 ct0, ct1, ct2;
+
+ ct0 = rtc_readl(rtc, LPC24XX_CTIME0);
+ ct1 = rtc_readl(rtc, LPC24XX_CTIME1);
+ ct2 = rtc_readl(rtc, LPC24XX_CTIME2);
+
+ tm->tm_sec = CT0_SECS(ct0);
+ tm->tm_min = CT0_MINS(ct0);
+ tm->tm_hour = CT0_HOURS(ct0);
+ tm->tm_wday = CT0_DOW(ct0);
+ tm->tm_mon = CT1_MONTH(ct1);
+ tm->tm_mday = CT1_DOM(ct1);
+ tm->tm_year = CT1_YEAR(ct1);
+ tm->tm_yday = CT2_DOY(ct2);
+
+ return rtc_valid_tm(tm);
+}
+
+static int lpc24xx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+{
+ struct lpc24xx_rtc *rtc = dev_get_drvdata(dev);
+ struct rtc_time *tm = &wkalrm->time;
+
+ tm->tm_sec = rtc_readl(rtc, LPC24XX_ALSEC);
+ tm->tm_min = rtc_readl(rtc, LPC24XX_ALMIN);
+ tm->tm_hour = rtc_readl(rtc, LPC24XX_ALHOUR);
+ tm->tm_mday = rtc_readl(rtc, LPC24XX_ALDOM);
+ tm->tm_wday = rtc_readl(rtc, LPC24XX_ALDOW);
+ tm->tm_yday = rtc_readl(rtc, LPC24XX_ALDOY);
+ tm->tm_mon = rtc_readl(rtc, LPC24XX_ALMON);
+ tm->tm_year = rtc_readl(rtc, LPC24XX_ALYEAR);
+
+ wkalrm->enabled = rtc_readl(rtc, LPC24XX_AMR) == 0;
+ wkalrm->pending = !!(rtc_readl(rtc, LPC24XX_ILR) & LPC24XX_RTCCIF);
+
+ return rtc_valid_tm(&wkalrm->time);
+}
+
+static int lpc24xx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+{
+ struct lpc24xx_rtc *rtc = dev_get_drvdata(dev);
+ struct rtc_time *tm = &wkalrm->time;
+
+ /* Disable alarm irq during update */
+ rtc_writel(rtc, LPC24XX_AMR, LPC24XX_ALARM_DISABLE);
+
+ rtc_writel(rtc, LPC24XX_ALSEC, tm->tm_sec);
+ rtc_writel(rtc, LPC24XX_ALMIN, tm->tm_min);
+ rtc_writel(rtc, LPC24XX_ALHOUR, tm->tm_hour);
+ rtc_writel(rtc, LPC24XX_ALDOM, tm->tm_mday);
+ rtc_writel(rtc, LPC24XX_ALDOW, tm->tm_wday);
+ rtc_writel(rtc, LPC24XX_ALDOY, tm->tm_yday);
+ rtc_writel(rtc, LPC24XX_ALMON, tm->tm_mon);
+ rtc_writel(rtc, LPC24XX_ALYEAR, tm->tm_year);
+
+ if (wkalrm->enabled)
+ rtc_writel(rtc, LPC24XX_AMR, 0);
+
+ return 0;
+}
+
+static int lpc24xx_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
+{
+ struct lpc24xx_rtc *rtc = dev_get_drvdata(dev);
+
+ if (enable)
+ rtc_writel(rtc, LPC24XX_AMR, 0);
+ else
+ rtc_writel(rtc, LPC24XX_AMR, LPC24XX_ALARM_DISABLE);
+
+ return 0;
+}
+
+static irqreturn_t lpc24xx_rtc_interrupt(int irq, void *data)
+{
+ unsigned long events = RTC_IRQF;
+ struct lpc24xx_rtc *rtc = data;
+ u32 rtc_iir;
+
+ /* Check interrupt cause */
+ rtc_iir = rtc_readl(rtc, LPC24XX_ILR);
+ if (rtc_iir & LPC24XX_RTCALF) {
+ events |= RTC_AF;
+ rtc_writel(rtc, LPC24XX_AMR, LPC24XX_ALARM_DISABLE);
+ }
+
+ /* Clear interrupt status and report event */
+ rtc_writel(rtc, LPC24XX_ILR, rtc_iir);
+ rtc_update_irq(rtc->rtc, 1, events);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops lpc24xx_rtc_ops = {
+ .read_time = lpc24xx_rtc_read_time,
+ .set_time = lpc24xx_rtc_set_time,
+ .read_alarm = lpc24xx_rtc_read_alarm,
+ .set_alarm = lpc24xx_rtc_set_alarm,
+ .alarm_irq_enable = lpc24xx_rtc_alarm_irq_enable,
+};
+
+static int lpc24xx_rtc_probe(struct platform_device *pdev)
+{
+ struct lpc24xx_rtc *rtc;
+ struct resource *res;
+ int irq, ret;
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rtc->rtc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rtc->rtc_base))
+ return PTR_ERR(rtc->rtc_base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_warn(&pdev->dev, "can't get interrupt resource\n");
+ return irq;
+ }
+
+ rtc->clk_rtc = devm_clk_get(&pdev->dev, "rtc");
+ if (IS_ERR(rtc->clk_rtc)) {
+ dev_err(&pdev->dev, "error getting rtc clock\n");
+ return PTR_ERR(rtc->clk_rtc);
+ }
+
+ rtc->clk_reg = devm_clk_get(&pdev->dev, "reg");
+ if (IS_ERR(rtc->clk_reg)) {
+ dev_err(&pdev->dev, "error getting reg clock\n");
+ return PTR_ERR(rtc->clk_reg);
+ }
+
+ ret = clk_prepare_enable(rtc->clk_rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable rtc clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(rtc->clk_reg);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable reg clock\n");
+ goto disable_rtc_clk;
+ }
+
+ platform_set_drvdata(pdev, rtc);
+
+ /* Clear any pending interrupts */
+ rtc_writel(rtc, LPC24XX_ILR, LPC24XX_RTCCIF | LPC24XX_RTCALF);
+
+ /* Enable RTC count */
+ rtc_writel(rtc, LPC24XX_CCR, LPC24XX_CLKEN | LPC178X_CCALEN);
+
+ ret = devm_request_irq(&pdev->dev, irq, lpc24xx_rtc_interrupt, 0,
+ pdev->name, rtc);
+ if (ret < 0) {
+ dev_warn(&pdev->dev, "can't request interrupt\n");
+ goto disable_clks;
+ }
+
+ rtc->rtc = devm_rtc_device_register(&pdev->dev, "lpc24xx-rtc",
+ &lpc24xx_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rtc)) {
+ dev_err(&pdev->dev, "can't register rtc device\n");
+ ret = PTR_ERR(rtc->rtc);
+ goto disable_clks;
+ }
+
+ return 0;
+
+disable_clks:
+ clk_disable_unprepare(rtc->clk_reg);
+disable_rtc_clk:
+ clk_disable_unprepare(rtc->clk_rtc);
+ return ret;
+}
+
+static int lpc24xx_rtc_remove(struct platform_device *pdev)
+{
+ struct lpc24xx_rtc *rtc = platform_get_drvdata(pdev);
+
+ /* Ensure all interrupt sources are masked */
+ rtc_writel(rtc, LPC24XX_AMR, LPC24XX_ALARM_DISABLE);
+ rtc_writel(rtc, LPC24XX_CIIR, 0);
+
+ rtc_writel(rtc, LPC24XX_CCR, LPC178X_CCALEN);
+
+ clk_disable_unprepare(rtc->clk_rtc);
+ clk_disable_unprepare(rtc->clk_reg);
+
+ return 0;
+}
+
+static const struct of_device_id lpc24xx_rtc_match[] = {
+ { .compatible = "nxp,lpc1788-rtc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpc24xx_rtc_match);
+
+static struct platform_driver lpc24xx_rtc_driver = {
+ .probe = lpc24xx_rtc_probe,
+ .remove = lpc24xx_rtc_remove,
+ .driver = {
+ .name = "lpc24xx-rtc",
+ .of_match_table = lpc24xx_rtc_match,
+ },
+};
+module_platform_driver(lpc24xx_rtc_driver);
+
+MODULE_AUTHOR("Kevin Wells <wellsk40@gmail.com>");
+MODULE_DESCRIPTION("RTC driver for the LPC178x/18xx/408x/43xx SoCs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 90abb5bd589c..d99a705bec07 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -345,11 +345,12 @@ static ssize_t m48t59_nvram_read(struct file *filp, struct kobject *kobj,
ssize_t cnt = 0;
unsigned long flags;
- for (; size > 0 && pos < pdata->offset; cnt++, size--) {
- spin_lock_irqsave(&m48t59->lock, flags);
+ spin_lock_irqsave(&m48t59->lock, flags);
+
+ for (; cnt < size; cnt++)
*buf++ = M48T59_READ(cnt);
- spin_unlock_irqrestore(&m48t59->lock, flags);
- }
+
+ spin_unlock_irqrestore(&m48t59->lock, flags);
return cnt;
}
@@ -365,11 +366,12 @@ static ssize_t m48t59_nvram_write(struct file *filp, struct kobject *kobj,
ssize_t cnt = 0;
unsigned long flags;
- for (; size > 0 && pos < pdata->offset; cnt++, size--) {
- spin_lock_irqsave(&m48t59->lock, flags);
+ spin_lock_irqsave(&m48t59->lock, flags);
+
+ for (; cnt < size; cnt++)
M48T59_WRITE(*buf++, cnt);
- spin_unlock_irqrestore(&m48t59->lock, flags);
- }
+
+ spin_unlock_irqrestore(&m48t59->lock, flags);
return cnt;
}
diff --git a/drivers/rtc/rtc-max8997.c b/drivers/rtc/rtc-max8997.c
index 9e02bcda0c09..db984d4bf952 100644
--- a/drivers/rtc/rtc-max8997.c
+++ b/drivers/rtc/rtc-max8997.c
@@ -521,6 +521,7 @@ static const struct platform_device_id rtc_id[] = {
{ "max8997-rtc", 0 },
{},
};
+MODULE_DEVICE_TABLE(platform, rtc_id);
static struct platform_driver max8997_rtc_driver = {
.driver = {
diff --git a/drivers/rtc/rtc-moxart.c b/drivers/rtc/rtc-moxart.c
index 73759c9a4527..07b30a373a92 100644
--- a/drivers/rtc/rtc-moxart.c
+++ b/drivers/rtc/rtc-moxart.c
@@ -312,6 +312,7 @@ static const struct of_device_id moxart_rtc_match[] = {
{ .compatible = "moxa,moxart-rtc" },
{ },
};
+MODULE_DEVICE_TABLE(of, moxart_rtc_match);
static struct platform_driver moxart_rtc_driver = {
.probe = moxart_rtc_probe,
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 1767e18d5bd4..4ca4daa0b8f3 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -406,6 +406,7 @@ static const struct of_device_id mpc5121_rtc_match[] = {
{ .compatible = "fsl,mpc5200-rtc", },
{},
};
+MODULE_DEVICE_TABLE(of, mpc5121_rtc_match);
#endif
static struct platform_driver mpc5121_rtc_driver = {
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index c0090b698ff3..06a5c52b292f 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -343,6 +343,8 @@ static int mtk_rtc_probe(struct platform_device *pdev)
goto out_dispose_irq;
}
+ device_init_wakeup(&pdev->dev, 1);
+
rtc->rtc_dev = rtc_device_register("mt6397-rtc", &pdev->dev,
&mtk_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc->rtc_dev)) {
@@ -351,8 +353,6 @@ static int mtk_rtc_probe(struct platform_device *pdev)
goto out_free_irq;
}
- device_init_wakeup(&pdev->dev, 1);
-
return 0;
out_free_irq:
@@ -373,15 +373,42 @@ static int mtk_rtc_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int mt6397_rtc_suspend(struct device *dev)
+{
+ struct mt6397_rtc *rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(rtc->irq);
+
+ return 0;
+}
+
+static int mt6397_rtc_resume(struct device *dev)
+{
+ struct mt6397_rtc *rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(rtc->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_rtc_suspend,
+ mt6397_rtc_resume);
+
static const struct of_device_id mt6397_rtc_of_match[] = {
{ .compatible = "mediatek,mt6397-rtc", },
{ }
};
+MODULE_DEVICE_TABLE(of, mt6397_rtc_of_match);
static struct platform_driver mtk_rtc_driver = {
.driver = {
.name = "mt6397-rtc",
.of_match_table = mt6397_rtc_of_match,
+ .pm = &mt6397_pm_ops,
},
.probe = mtk_rtc_probe,
.remove = mtk_rtc_remove,
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index 7f50d2ef7f6e..79bb28617d45 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -324,6 +324,7 @@ static const struct of_device_id rtc_mv_of_match_table[] = {
{ .compatible = "marvell,orion-rtc", },
{}
};
+MODULE_DEVICE_TABLE(of, rtc_mv_of_match_table);
#endif
static struct platform_driver mv_rtc_driver = {
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 5fc292c2dfdf..7bd89d90048f 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -16,6 +16,8 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#define RTC_INPUT_CLK_32768HZ (0x00 << 5)
#define RTC_INPUT_CLK_32000HZ (0x01 << 5)
@@ -79,7 +81,8 @@ struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr;
int irq;
- struct clk *clk;
+ struct clk *clk_ref;
+ struct clk *clk_ipg;
struct rtc_time g_rtc_alarm;
enum imx_rtc_type devtype;
};
@@ -97,6 +100,15 @@ static const struct platform_device_id imx_rtc_devtype[] = {
};
MODULE_DEVICE_TABLE(platform, imx_rtc_devtype);
+#ifdef CONFIG_OF
+static const struct of_device_id imx_rtc_dt_ids[] = {
+ { .compatible = "fsl,imx1-rtc", .data = (const void *)IMX1_RTC },
+ { .compatible = "fsl,imx21-rtc", .data = (const void *)IMX21_RTC },
+ {}
+};
+MODULE_DEVICE_TABLE(of, imx_rtc_dt_ids);
+#endif
+
static inline int is_imx1_rtc(struct rtc_plat_data *data)
{
return data->devtype == IMX1_RTC;
@@ -361,29 +373,45 @@ static int mxc_rtc_probe(struct platform_device *pdev)
u32 reg;
unsigned long rate;
int ret;
+ const struct of_device_id *of_id;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- pdata->devtype = pdev->id_entry->driver_data;
+ of_id = of_match_device(imx_rtc_dt_ids, &pdev->dev);
+ if (of_id)
+ pdata->devtype = (enum imx_rtc_type)of_id->data;
+ else
+ pdata->devtype = pdev->id_entry->driver_data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pdata->ioaddr))
return PTR_ERR(pdata->ioaddr);
- pdata->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pdata->clk)) {
- dev_err(&pdev->dev, "unable to get clock!\n");
- return PTR_ERR(pdata->clk);
+ pdata->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(pdata->clk_ipg)) {
+ dev_err(&pdev->dev, "unable to get ipg clock!\n");
+ return PTR_ERR(pdata->clk_ipg);
}
- ret = clk_prepare_enable(pdata->clk);
+ ret = clk_prepare_enable(pdata->clk_ipg);
if (ret)
return ret;
- rate = clk_get_rate(pdata->clk);
+ pdata->clk_ref = devm_clk_get(&pdev->dev, "ref");
+ if (IS_ERR(pdata->clk_ref)) {
+ dev_err(&pdev->dev, "unable to get ref clock!\n");
+ ret = PTR_ERR(pdata->clk_ref);
+ goto exit_put_clk_ipg;
+ }
+
+ ret = clk_prepare_enable(pdata->clk_ref);
+ if (ret)
+ goto exit_put_clk_ipg;
+
+ rate = clk_get_rate(pdata->clk_ref);
if (rate == 32768)
reg = RTC_INPUT_CLK_32768HZ;
@@ -394,7 +422,7 @@ static int mxc_rtc_probe(struct platform_device *pdev)
else {
dev_err(&pdev->dev, "rtc clock is not valid (%lu)\n", rate);
ret = -EINVAL;
- goto exit_put_clk;
+ goto exit_put_clk_ref;
}
reg |= RTC_ENABLE_BIT;
@@ -402,7 +430,7 @@ static int mxc_rtc_probe(struct platform_device *pdev)
if (((readw(pdata->ioaddr + RTC_RTCCTL)) & RTC_ENABLE_BIT) == 0) {
dev_err(&pdev->dev, "hardware module can't be enabled!\n");
ret = -EIO;
- goto exit_put_clk;
+ goto exit_put_clk_ref;
}
platform_set_drvdata(pdev, pdata);
@@ -424,15 +452,17 @@ static int mxc_rtc_probe(struct platform_device *pdev)
THIS_MODULE);
if (IS_ERR(rtc)) {
ret = PTR_ERR(rtc);
- goto exit_put_clk;
+ goto exit_put_clk_ref;
}
pdata->rtc = rtc;
return 0;
-exit_put_clk:
- clk_disable_unprepare(pdata->clk);
+exit_put_clk_ref:
+ clk_disable_unprepare(pdata->clk_ref);
+exit_put_clk_ipg:
+ clk_disable_unprepare(pdata->clk_ipg);
return ret;
}
@@ -441,7 +471,8 @@ static int mxc_rtc_remove(struct platform_device *pdev)
{
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
- clk_disable_unprepare(pdata->clk);
+ clk_disable_unprepare(pdata->clk_ref);
+ clk_disable_unprepare(pdata->clk_ipg);
return 0;
}
@@ -473,6 +504,7 @@ static SIMPLE_DEV_PM_OPS(mxc_rtc_pm_ops, mxc_rtc_suspend, mxc_rtc_resume);
static struct platform_driver mxc_rtc_driver = {
.driver = {
.name = "mxc_rtc",
+ .of_match_table = of_match_ptr(imx_rtc_dt_ids),
.pm = &mxc_rtc_pm_ops,
},
.id_table = imx_rtc_devtype,
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 8b6355ffaff9..ec2e9c5fb993 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -25,6 +25,7 @@
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/io.h>
+#include <linux/clk.h>
/*
* The OMAP RTC is a year/month/day/hours/minutes/seconds BCD clock
@@ -107,6 +108,7 @@
/* OMAP_RTC_OSC_REG bit fields: */
#define OMAP_RTC_OSC_32KCLK_EN BIT(6)
+#define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3)
/* OMAP_RTC_IRQWAKEEN bit fields: */
#define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1)
@@ -132,10 +134,12 @@ struct omap_rtc_device_type {
struct omap_rtc {
struct rtc_device *rtc;
void __iomem *base;
+ struct clk *clk;
int irq_alarm;
int irq_timer;
u8 interrupts_reg;
bool is_pmic_controller;
+ bool has_ext_clk;
const struct omap_rtc_device_type *type;
};
@@ -553,6 +557,15 @@ static int omap_rtc_probe(struct platform_device *pdev)
if (rtc->irq_alarm <= 0)
return -ENOENT;
+ rtc->clk = devm_clk_get(&pdev->dev, "ext-clk");
+ if (!IS_ERR(rtc->clk))
+ rtc->has_ext_clk = true;
+ else
+ rtc->clk = devm_clk_get(&pdev->dev, "int-clk");
+
+ if (!IS_ERR(rtc->clk))
+ clk_prepare_enable(rtc->clk);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rtc->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(rtc->base))
@@ -627,6 +640,16 @@ static int omap_rtc_probe(struct platform_device *pdev)
if (reg != new_ctrl)
rtc_write(rtc, OMAP_RTC_CTRL_REG, new_ctrl);
+ /*
+ * If we have the external clock then switch to it so we can keep
+ * ticking across suspend.
+ */
+ if (rtc->has_ext_clk) {
+ reg = rtc_read(rtc, OMAP_RTC_OSC_REG);
+ rtc_write(rtc, OMAP_RTC_OSC_REG,
+ reg | OMAP_RTC_OSC_SEL_32KCLK_SRC);
+ }
+
rtc->type->lock(rtc);
device_init_wakeup(&pdev->dev, true);
@@ -672,6 +695,7 @@ err:
static int __exit omap_rtc_remove(struct platform_device *pdev)
{
struct omap_rtc *rtc = platform_get_drvdata(pdev);
+ u8 reg;
if (pm_power_off == omap_rtc_power_off &&
omap_rtc_power_off_rtc == rtc) {
@@ -681,10 +705,19 @@ static int __exit omap_rtc_remove(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 0);
+ if (!IS_ERR(rtc->clk))
+ clk_disable_unprepare(rtc->clk);
+
rtc->type->unlock(rtc);
/* leave rtc running, but disable irqs */
rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0);
+ if (rtc->has_ext_clk) {
+ reg = rtc_read(rtc, OMAP_RTC_OSC_REG);
+ reg &= ~OMAP_RTC_OSC_SEL_32KCLK_SRC;
+ rtc_write(rtc, OMAP_RTC_OSC_REG, reg);
+ }
+
rtc->type->lock(rtc);
/* Disable the clock/module */
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
index 7061dcae2b09..6fbf9e617151 100644
--- a/drivers/rtc/rtc-opal.c
+++ b/drivers/rtc/rtc-opal.c
@@ -190,11 +190,9 @@ exit:
return rc;
}
-static const struct rtc_class_ops opal_rtc_ops = {
+static struct rtc_class_ops opal_rtc_ops = {
.read_time = opal_get_rtc_time,
.set_time = opal_set_rtc_time,
- .read_alarm = opal_get_tpo_time,
- .set_alarm = opal_set_tpo_time,
};
static int opal_rtc_probe(struct platform_device *pdev)
@@ -202,8 +200,11 @@ static int opal_rtc_probe(struct platform_device *pdev)
struct rtc_device *rtc;
if (pdev->dev.of_node && of_get_property(pdev->dev.of_node, "has-tpo",
- NULL))
+ NULL)) {
device_set_wakeup_capable(&pdev->dev, true);
+ opal_rtc_ops.read_alarm = opal_get_tpo_time;
+ opal_rtc_ops.set_alarm = opal_set_tpo_time;
+ }
rtc = devm_rtc_device_register(&pdev->dev, DRVNAME, &opal_rtc_ops,
THIS_MODULE);
@@ -236,7 +237,6 @@ static struct platform_driver opal_rtc_driver = {
.id_table = opal_rtc_driver_ids,
.driver = {
.name = DRVNAME,
- .owner = THIS_MODULE,
.of_match_table = opal_rtc_match,
},
};
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index 8a7556cbcb7f..1c47650fe624 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -165,13 +165,7 @@ static int pcf2123_rtc_read_time(struct device *dev, struct rtc_time *tm)
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
- /* the clock can give out invalid datetime, but we cannot return
- * -EINVAL otherwise hwclock will refuse to set the time on bootup.
- */
- if (rtc_valid_tm(tm) < 0)
- dev_err(dev, "retrieved date/time is not valid.\n");
-
- return 0;
+ return rtc_valid_tm(tm);
}
static int pcf2123_rtc_set_time(struct device *dev, struct rtc_time *tm)
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 9bd842e97749..4b11d31f7174 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -33,11 +33,14 @@
#define PCF2127_REG_MO (0x08)
#define PCF2127_REG_YR (0x09)
+#define PCF2127_OSF BIT(7) /* Oscillator Fail flag */
+
static struct i2c_driver pcf2127_driver;
struct pcf2127 {
struct rtc_device *rtc;
int voltage_low; /* indicates if a low_voltage was detected */
+ int oscillator_failed; /* OSF was detected and date is unreliable */
};
/*
@@ -59,7 +62,18 @@ static int pcf2127_get_datetime(struct i2c_client *client, struct rtc_time *tm)
if (buf[PCF2127_REG_CTRL3] & 0x04) {
pcf2127->voltage_low = 1;
dev_info(&client->dev,
- "low voltage detected, date/time is not reliable.\n");
+ "low voltage detected, check/replace RTC battery.\n");
+ }
+
+ if (buf[PCF2127_REG_SC] & PCF2127_OSF) {
+ /*
+ * no need clear the flag here,
+ * it will be cleared once the new date is saved
+ */
+ pcf2127->oscillator_failed = 1;
+ dev_warn(&client->dev,
+ "oscillator stop detected, date/time is not reliable\n");
+ return -EINVAL;
}
dev_dbg(&client->dev,
@@ -88,17 +102,12 @@ static int pcf2127_get_datetime(struct i2c_client *client, struct rtc_time *tm)
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
- /* the clock can give out invalid datetime, but we cannot return
- * -EINVAL otherwise hwclock will refuse to set the time on bootup.
- */
- if (rtc_valid_tm(tm) < 0)
- dev_err(&client->dev, "retrieved date/time is not valid.\n");
-
- return 0;
+ return rtc_valid_tm(tm);
}
static int pcf2127_set_datetime(struct i2c_client *client, struct rtc_time *tm)
{
+ struct pcf2127 *pcf2127 = i2c_get_clientdata(client);
unsigned char buf[8];
int i = 0, err;
@@ -112,7 +121,7 @@ static int pcf2127_set_datetime(struct i2c_client *client, struct rtc_time *tm)
buf[i++] = PCF2127_REG_SC;
/* hours, minutes and seconds */
- buf[i++] = bin2bcd(tm->tm_sec);
+ buf[i++] = bin2bcd(tm->tm_sec); /* this will also clear OSF flag */
buf[i++] = bin2bcd(tm->tm_min);
buf[i++] = bin2bcd(tm->tm_hour);
buf[i++] = bin2bcd(tm->tm_mday);
@@ -132,6 +141,9 @@ static int pcf2127_set_datetime(struct i2c_client *client, struct rtc_time *tm)
return -EIO;
}
+ /* clear OSF flag in client data */
+ pcf2127->oscillator_failed = 0;
+
return 0;
}
@@ -144,7 +156,9 @@ static int pcf2127_rtc_ioctl(struct device *dev,
switch (cmd) {
case RTC_VL_READ:
if (pcf2127->voltage_low)
- dev_info(dev, "low voltage detected, date/time is not reliable.\n");
+ dev_info(dev, "low voltage detected, check/replace battery\n");
+ if (pcf2127->oscillator_failed)
+ dev_info(dev, "oscillator stop detected, date/time is not reliable\n");
if (copy_to_user((void __user *)arg, &pcf2127->voltage_low,
sizeof(int)))
@@ -217,7 +231,6 @@ MODULE_DEVICE_TABLE(of, pcf2127_of_match);
static struct i2c_driver pcf2127_driver = {
.driver = {
.name = "rtc-pcf2127",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pcf2127_of_match),
},
.probe = pcf2127_probe,
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 6a12bf62c504..b6d73dd881f2 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -189,7 +189,6 @@ MODULE_DEVICE_TABLE(of, pcf85063_of_match);
static struct i2c_driver pcf85063_driver = {
.driver = {
.name = "rtc-pcf85063",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pcf85063_of_match),
},
.probe = pcf85063_probe,
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 4cdb64be061b..e7ebcc0b7e59 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -334,7 +334,6 @@ MODULE_DEVICE_TABLE(of, pcf8523_of_match);
static struct i2c_driver pcf8523_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pcf8523_of_match),
},
.probe = pcf8523_probe,
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 8bba022be946..e569243db57e 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -483,7 +483,6 @@ MODULE_DEVICE_TABLE(of, pcf8563_of_match);
static struct i2c_driver pcf8563_driver = {
.driver = {
.name = "rtc-pcf8563",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pcf8563_of_match),
},
.probe = pcf8563_probe,
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index 5911a6dca291..7ca9e8871d77 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -309,7 +309,6 @@ MODULE_DEVICE_TABLE(i2c, pcf8583_id);
static struct i2c_driver pcf8583_driver = {
.driver = {
.name = "pcf8583",
- .owner = THIS_MODULE,
},
.probe = pcf8583_probe,
.id_table = pcf8583_id,
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 99181fff88fd..41dcb7ddb906 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -476,6 +476,6 @@ static struct amba_driver pl031_driver = {
module_amba_driver(pl031_driver);
-MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net");
+MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
MODULE_DESCRIPTION("ARM AMBA PL031 RTC Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index 4561f375327d..fe4985b54608 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -32,6 +32,8 @@
#include <mach/hardware.h>
+#include "rtc-sa1100.h"
+
#define RTC_DEF_DIVIDER (32768 - 1)
#define RTC_DEF_TRIM 0
#define MAXFREQ_PERIODIC 1000
@@ -86,10 +88,9 @@
__raw_writel((value), (pxa_rtc)->base + (reg))
struct pxa_rtc {
+ struct sa1100_rtc sa1100_rtc;
struct resource *ress;
void __iomem *base;
- int irq_1Hz;
- int irq_Alrm;
struct rtc_device *rtc;
spinlock_t lock; /* Protects this structure */
};
@@ -184,25 +185,25 @@ static int pxa_rtc_open(struct device *dev)
struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
int ret;
- ret = request_irq(pxa_rtc->irq_1Hz, pxa_rtc_irq, 0,
+ ret = request_irq(pxa_rtc->sa1100_rtc.irq_1hz, pxa_rtc_irq, 0,
"rtc 1Hz", dev);
if (ret < 0) {
- dev_err(dev, "can't get irq %i, err %d\n", pxa_rtc->irq_1Hz,
- ret);
+ dev_err(dev, "can't get irq %i, err %d\n",
+ pxa_rtc->sa1100_rtc.irq_1hz, ret);
goto err_irq_1Hz;
}
- ret = request_irq(pxa_rtc->irq_Alrm, pxa_rtc_irq, 0,
+ ret = request_irq(pxa_rtc->sa1100_rtc.irq_alarm, pxa_rtc_irq, 0,
"rtc Alrm", dev);
if (ret < 0) {
- dev_err(dev, "can't get irq %i, err %d\n", pxa_rtc->irq_Alrm,
- ret);
+ dev_err(dev, "can't get irq %i, err %d\n",
+ pxa_rtc->sa1100_rtc.irq_alarm, ret);
goto err_irq_Alrm;
}
return 0;
err_irq_Alrm:
- free_irq(pxa_rtc->irq_1Hz, dev);
+ free_irq(pxa_rtc->sa1100_rtc.irq_1hz, dev);
err_irq_1Hz:
return ret;
}
@@ -215,8 +216,8 @@ static void pxa_rtc_release(struct device *dev)
rtsr_clear_bits(pxa_rtc, RTSR_PIALE | RTSR_RDALE1 | RTSR_HZE);
spin_unlock_irq(&pxa_rtc->lock);
- free_irq(pxa_rtc->irq_Alrm, dev);
- free_irq(pxa_rtc->irq_1Hz, dev);
+ free_irq(pxa_rtc->sa1100_rtc.irq_1hz, dev);
+ free_irq(pxa_rtc->sa1100_rtc.irq_alarm, dev);
}
static int pxa_alarm_irq_enable(struct device *dev, unsigned int enabled)
@@ -320,12 +321,13 @@ static int __init pxa_rtc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct pxa_rtc *pxa_rtc;
+ struct sa1100_rtc *sa1100_rtc;
int ret;
- u32 rttr;
pxa_rtc = devm_kzalloc(dev, sizeof(*pxa_rtc), GFP_KERNEL);
if (!pxa_rtc)
return -ENOMEM;
+ sa1100_rtc = &pxa_rtc->sa1100_rtc;
spin_lock_init(&pxa_rtc->lock);
platform_set_drvdata(pdev, pxa_rtc);
@@ -336,13 +338,13 @@ static int __init pxa_rtc_probe(struct platform_device *pdev)
return -ENXIO;
}
- pxa_rtc->irq_1Hz = platform_get_irq(pdev, 0);
- if (pxa_rtc->irq_1Hz < 0) {
+ sa1100_rtc->irq_1hz = platform_get_irq(pdev, 0);
+ if (sa1100_rtc->irq_1hz < 0) {
dev_err(dev, "No 1Hz IRQ resource defined\n");
return -ENXIO;
}
- pxa_rtc->irq_Alrm = platform_get_irq(pdev, 1);
- if (pxa_rtc->irq_Alrm < 0) {
+ sa1100_rtc->irq_alarm = platform_get_irq(pdev, 1);
+ if (sa1100_rtc->irq_alarm < 0) {
dev_err(dev, "No alarm IRQ resource defined\n");
return -ENXIO;
}
@@ -354,15 +356,14 @@ static int __init pxa_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
}
- /*
- * If the clock divider is uninitialized then reset it to the
- * default value to get the 1Hz clock.
- */
- if (rtc_readl(pxa_rtc, RTTR) == 0) {
- rttr = RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16);
- rtc_writel(pxa_rtc, RTTR, rttr);
- dev_warn(dev, "warning: initializing default clock"
- " divider/trim value\n");
+ sa1100_rtc->rcnr = pxa_rtc->base + 0x0;
+ sa1100_rtc->rtsr = pxa_rtc->base + 0x8;
+ sa1100_rtc->rtar = pxa_rtc->base + 0x4;
+ sa1100_rtc->rttr = pxa_rtc->base + 0xc;
+ ret = sa1100_rtc_init(pdev, sa1100_rtc);
+ if (!ret) {
+ dev_err(dev, "Unable to init SA1100 RTC sub-device\n");
+ return ret;
}
rtsr_clear_bits(pxa_rtc, RTSR_PIALE | RTSR_RDALE1 | RTSR_HZE);
@@ -402,7 +403,7 @@ static int pxa_rtc_suspend(struct device *dev)
struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
- enable_irq_wake(pxa_rtc->irq_Alrm);
+ enable_irq_wake(pxa_rtc->sa1100_rtc.irq_alarm);
return 0;
}
@@ -411,7 +412,7 @@ static int pxa_rtc_resume(struct device *dev)
struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
- disable_irq_wake(pxa_rtc->irq_Alrm);
+ disable_irq_wake(pxa_rtc->sa1100_rtc.irq_alarm);
return 0;
}
#endif
diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c
index b548551f385c..026035373ae6 100644
--- a/drivers/rtc/rtc-rp5c01.c
+++ b/drivers/rtc/rtc-rp5c01.c
@@ -170,7 +170,7 @@ static ssize_t rp5c01_nvram_read(struct file *filp, struct kobject *kobj,
spin_lock_irq(&priv->lock);
- for (count = 0; size > 0 && pos < RP5C01_MODE; count++, size--) {
+ for (count = 0; count < size; count++) {
u8 data;
rp5c01_write(priv,
@@ -200,7 +200,7 @@ static ssize_t rp5c01_nvram_write(struct file *filp, struct kobject *kobj,
spin_lock_irq(&priv->lock);
- for (count = 0; size > 0 && pos < RP5C01_MODE; count++, size--) {
+ for (count = 0; count < size; count++) {
u8 data = *buf++;
rp5c01_write(priv,
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index e6298e02b400..24c3d69ce1b9 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -18,13 +18,11 @@
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/bcd.h>
+#include <linux/bitops.h>
#include <linux/i2c.h>
-#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/rtc.h>
/* Register definitions */
@@ -48,17 +46,17 @@
#define RX8025_BIT_CTRL1_CT (7 << 0)
/* 1 Hz periodic level irq */
#define RX8025_BIT_CTRL1_CT_1HZ 4
-#define RX8025_BIT_CTRL1_TEST (1 << 3)
-#define RX8025_BIT_CTRL1_1224 (1 << 5)
-#define RX8025_BIT_CTRL1_DALE (1 << 6)
-#define RX8025_BIT_CTRL1_WALE (1 << 7)
-
-#define RX8025_BIT_CTRL2_DAFG (1 << 0)
-#define RX8025_BIT_CTRL2_WAFG (1 << 1)
-#define RX8025_BIT_CTRL2_CTFG (1 << 2)
-#define RX8025_BIT_CTRL2_PON (1 << 4)
-#define RX8025_BIT_CTRL2_XST (1 << 5)
-#define RX8025_BIT_CTRL2_VDET (1 << 6)
+#define RX8025_BIT_CTRL1_TEST BIT(3)
+#define RX8025_BIT_CTRL1_1224 BIT(5)
+#define RX8025_BIT_CTRL1_DALE BIT(6)
+#define RX8025_BIT_CTRL1_WALE BIT(7)
+
+#define RX8025_BIT_CTRL2_DAFG BIT(0)
+#define RX8025_BIT_CTRL2_WAFG BIT(1)
+#define RX8025_BIT_CTRL2_CTFG BIT(2)
+#define RX8025_BIT_CTRL2_PON BIT(4)
+#define RX8025_BIT_CTRL2_XST BIT(5)
+#define RX8025_BIT_CTRL2_VDET BIT(6)
/* Clock precision adjustment */
#define RX8025_ADJ_RESOLUTION 3050 /* in ppb */
@@ -74,84 +72,84 @@ MODULE_DEVICE_TABLE(i2c, rx8025_id);
struct rx8025_data {
struct i2c_client *client;
struct rtc_device *rtc;
- struct work_struct work;
u8 ctrl1;
- unsigned exiting:1;
};
-static int rx8025_read_reg(struct i2c_client *client, int number, u8 *value)
+static s32 rx8025_read_reg(const struct i2c_client *client, u8 number)
{
- int ret = i2c_smbus_read_byte_data(client, (number << 4) | 0x08);
-
- if (ret < 0) {
- dev_err(&client->dev, "Unable to read register #%d\n", number);
- return ret;
- }
-
- *value = ret;
- return 0;
+ return i2c_smbus_read_byte_data(client, number << 4);
}
-static int rx8025_read_regs(struct i2c_client *client,
- int number, u8 length, u8 *values)
+static int rx8025_read_regs(const struct i2c_client *client,
+ u8 number, u8 length, u8 *values)
{
- int ret = i2c_smbus_read_i2c_block_data(client, (number << 4) | 0x08,
- length, values);
-
- if (ret != length) {
- dev_err(&client->dev, "Unable to read registers #%d..#%d\n",
- number, number + length - 1);
+ int ret = i2c_smbus_read_i2c_block_data(client, number << 4, length,
+ values);
+ if (ret != length)
return ret < 0 ? ret : -EIO;
- }
return 0;
}
-static int rx8025_write_reg(struct i2c_client *client, int number, u8 value)
+static s32 rx8025_write_reg(const struct i2c_client *client, u8 number,
+ u8 value)
{
- int ret = i2c_smbus_write_byte_data(client, number << 4, value);
-
- if (ret)
- dev_err(&client->dev, "Unable to write register #%d\n",
- number);
+ return i2c_smbus_write_byte_data(client, number << 4, value);
+}
- return ret;
+static s32 rx8025_write_regs(const struct i2c_client *client,
+ u8 number, u8 length, const u8 *values)
+{
+ return i2c_smbus_write_i2c_block_data(client, number << 4,
+ length, values);
}
-static int rx8025_write_regs(struct i2c_client *client,
- int number, u8 length, u8 *values)
+static int rx8025_check_validity(struct device *dev)
{
- int ret = i2c_smbus_write_i2c_block_data(client, (number << 4) | 0x08,
- length, values);
+ struct rx8025_data *rx8025 = dev_get_drvdata(dev);
+ int ctrl2;
+
+ ctrl2 = rx8025_read_reg(rx8025->client, RX8025_REG_CTRL2);
+ if (ctrl2 < 0)
+ return ctrl2;
+
+ if (ctrl2 & RX8025_BIT_CTRL2_VDET)
+ dev_warn(dev, "power voltage drop detected\n");
+
+ if (ctrl2 & RX8025_BIT_CTRL2_PON) {
+ dev_warn(dev, "power-on reset detected, date is invalid\n");
+ return -EINVAL;
+ }
- if (ret)
- dev_err(&client->dev, "Unable to write registers #%d..#%d\n",
- number, number + length - 1);
+ if (!(ctrl2 & RX8025_BIT_CTRL2_XST)) {
+ dev_warn(dev, "crystal stopped, date is invalid\n");
+ return -EINVAL;
+ }
- return ret;
+ return 0;
}
-static irqreturn_t rx8025_irq(int irq, void *dev_id)
+static int rx8025_reset_validity(struct i2c_client *client)
{
- struct i2c_client *client = dev_id;
- struct rx8025_data *rx8025 = i2c_get_clientdata(client);
+ int ctrl2 = rx8025_read_reg(client, RX8025_REG_CTRL2);
- disable_irq_nosync(irq);
- schedule_work(&rx8025->work);
- return IRQ_HANDLED;
+ if (ctrl2 < 0)
+ return ctrl2;
+
+ ctrl2 &= ~(RX8025_BIT_CTRL2_PON | RX8025_BIT_CTRL2_VDET);
+
+ return rx8025_write_reg(client, RX8025_REG_CTRL2,
+ ctrl2 | RX8025_BIT_CTRL2_XST);
}
-static void rx8025_work(struct work_struct *work)
+static irqreturn_t rx8025_handle_irq(int irq, void *dev_id)
{
- struct rx8025_data *rx8025 = container_of(work, struct rx8025_data,
- work);
- struct i2c_client *client = rx8025->client;
- struct mutex *lock = &rx8025->rtc->ops_lock;
- u8 status;
-
- mutex_lock(lock);
+ struct i2c_client *client = dev_id;
+ struct rx8025_data *rx8025 = i2c_get_clientdata(client);
+ int status;
- if (rx8025_read_reg(client, RX8025_REG_CTRL2, &status))
+ status = rx8025_read_reg(client, RX8025_REG_CTRL2);
+ if (status < 0)
goto out;
if (!(status & RX8025_BIT_CTRL2_XST))
@@ -161,9 +159,7 @@ static void rx8025_work(struct work_struct *work)
if (status & RX8025_BIT_CTRL2_CTFG) {
/* periodic */
status &= ~RX8025_BIT_CTRL2_CTFG;
- local_irq_disable();
rtc_update_irq(rx8025->rtc, 1, RTC_PF | RTC_IRQF);
- local_irq_enable();
}
if (status & RX8025_BIT_CTRL2_DAFG) {
@@ -172,20 +168,11 @@ static void rx8025_work(struct work_struct *work)
if (rx8025_write_reg(client, RX8025_REG_CTRL1,
rx8025->ctrl1 & ~RX8025_BIT_CTRL1_DALE))
goto out;
- local_irq_disable();
rtc_update_irq(rx8025->rtc, 1, RTC_AF | RTC_IRQF);
- local_irq_enable();
}
- /* acknowledge IRQ */
- rx8025_write_reg(client, RX8025_REG_CTRL2,
- status | RX8025_BIT_CTRL2_XST);
-
out:
- if (!rx8025->exiting)
- enable_irq(client->irq);
-
- mutex_unlock(lock);
+ return IRQ_HANDLED;
}
static int rx8025_get_time(struct device *dev, struct rtc_time *dt)
@@ -194,6 +181,10 @@ static int rx8025_get_time(struct device *dev, struct rtc_time *dt)
u8 date[7];
int err;
+ err = rx8025_check_validity(dev);
+ if (err)
+ return err;
+
err = rx8025_read_regs(rx8025->client, RX8025_REG_SEC, 7, date);
if (err)
return err;
@@ -213,10 +204,7 @@ static int rx8025_get_time(struct device *dev, struct rtc_time *dt)
dt->tm_mday = bcd2bin(date[RX8025_REG_MDAY] & 0x3f);
dt->tm_mon = bcd2bin(date[RX8025_REG_MONTH] & 0x1f) - 1;
- dt->tm_year = bcd2bin(date[RX8025_REG_YEAR]);
-
- if (dt->tm_year < 70)
- dt->tm_year += 100;
+ dt->tm_year = bcd2bin(date[RX8025_REG_YEAR]) + 100;
dev_dbg(dev, "%s: date %ds %dm %dh %dmd %dm %dy\n", __func__,
dt->tm_sec, dt->tm_min, dt->tm_hour,
@@ -229,12 +217,10 @@ static int rx8025_set_time(struct device *dev, struct rtc_time *dt)
{
struct rx8025_data *rx8025 = dev_get_drvdata(dev);
u8 date[7];
+ int ret;
- /*
- * BUG: The HW assumes every year that is a multiple of 4 to be a leap
- * year. Next time this is wrong is 2100, which will not be a leap
- * year.
- */
+ if ((dt->tm_year < 100) || (dt->tm_year > 199))
+ return -EINVAL;
/*
* Here the read-only bits are written as "0". I'm not sure if that
@@ -251,17 +237,21 @@ static int rx8025_set_time(struct device *dev, struct rtc_time *dt)
date[RX8025_REG_WDAY] = bin2bcd(dt->tm_wday);
date[RX8025_REG_MDAY] = bin2bcd(dt->tm_mday);
date[RX8025_REG_MONTH] = bin2bcd(dt->tm_mon + 1);
- date[RX8025_REG_YEAR] = bin2bcd(dt->tm_year % 100);
+ date[RX8025_REG_YEAR] = bin2bcd(dt->tm_year - 100);
dev_dbg(dev,
"%s: write 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
__func__,
date[0], date[1], date[2], date[3], date[4], date[5], date[6]);
- return rx8025_write_regs(rx8025->client, RX8025_REG_SEC, 7, date);
+ ret = rx8025_write_regs(rx8025->client, RX8025_REG_SEC, 7, date);
+ if (ret < 0)
+ return ret;
+
+ return rx8025_reset_validity(rx8025->client);
}
-static int rx8025_init_client(struct i2c_client *client, int *need_reset)
+static int rx8025_init_client(struct i2c_client *client)
{
struct rx8025_data *rx8025 = i2c_get_clientdata(client);
u8 ctrl[2], ctrl2;
@@ -275,38 +265,18 @@ static int rx8025_init_client(struct i2c_client *client, int *need_reset)
/* Keep test bit zero ! */
rx8025->ctrl1 = ctrl[0] & ~RX8025_BIT_CTRL1_TEST;
- if (ctrl[1] & RX8025_BIT_CTRL2_PON) {
- dev_warn(&client->dev, "power-on reset was detected, "
- "you may have to readjust the clock\n");
- *need_reset = 1;
- }
-
- if (ctrl[1] & RX8025_BIT_CTRL2_VDET) {
- dev_warn(&client->dev, "a power voltage drop was detected, "
- "you may have to readjust the clock\n");
- *need_reset = 1;
- }
-
- if (!(ctrl[1] & RX8025_BIT_CTRL2_XST)) {
- dev_warn(&client->dev, "Oscillation stop was detected,"
- "you may have to readjust the clock\n");
- *need_reset = 1;
- }
-
if (ctrl[1] & (RX8025_BIT_CTRL2_DAFG | RX8025_BIT_CTRL2_WAFG)) {
dev_warn(&client->dev, "Alarm was detected\n");
need_clear = 1;
}
- if (!(ctrl[1] & RX8025_BIT_CTRL2_CTFG))
+ if (ctrl[1] & RX8025_BIT_CTRL2_CTFG)
need_clear = 1;
- if (*need_reset || need_clear) {
- ctrl2 = ctrl[0];
- ctrl2 &= ~(RX8025_BIT_CTRL2_PON | RX8025_BIT_CTRL2_VDET |
- RX8025_BIT_CTRL2_CTFG | RX8025_BIT_CTRL2_WAFG |
+ if (need_clear) {
+ ctrl2 = ctrl[1];
+ ctrl2 &= ~(RX8025_BIT_CTRL2_CTFG | RX8025_BIT_CTRL2_WAFG |
RX8025_BIT_CTRL2_DAFG);
- ctrl2 |= RX8025_BIT_CTRL2_XST;
err = rx8025_write_reg(client, RX8025_REG_CTRL2, ctrl2);
}
@@ -319,8 +289,8 @@ static int rx8025_read_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct rx8025_data *rx8025 = dev_get_drvdata(dev);
struct i2c_client *client = rx8025->client;
- u8 ctrl2, ald[2];
- int err;
+ u8 ald[2];
+ int ctrl2, err;
if (client->irq <= 0)
return -EINVAL;
@@ -329,9 +299,9 @@ static int rx8025_read_alarm(struct device *dev, struct rtc_wkalrm *t)
if (err)
return err;
- err = rx8025_read_reg(client, RX8025_REG_CTRL2, &ctrl2);
- if (err)
- return err;
+ ctrl2 = rx8025_read_reg(client, RX8025_REG_CTRL2);
+ if (ctrl2 < 0)
+ return ctrl2;
dev_dbg(dev, "%s: read alarm 0x%02x 0x%02x ctrl2 %02x\n",
__func__, ald[0], ald[1], ctrl2);
@@ -452,12 +422,11 @@ static struct rtc_class_ops rx8025_rtc_ops = {
static int rx8025_get_clock_adjust(struct device *dev, int *adj)
{
struct i2c_client *client = to_i2c_client(dev);
- u8 digoff;
- int err;
+ int digoff;
- err = rx8025_read_reg(client, RX8025_REG_DIGOFF, &digoff);
- if (err)
- return err;
+ digoff = rx8025_read_reg(client, RX8025_REG_DIGOFF);
+ if (digoff < 0)
+ return digoff;
*adj = digoff >= 64 ? digoff - 128 : digoff;
if (*adj > 0)
@@ -539,88 +508,53 @@ static int rx8025_probe(struct i2c_client *client,
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct rx8025_data *rx8025;
- int err, need_reset = 0;
+ int err = 0;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
| I2C_FUNC_SMBUS_I2C_BLOCK)) {
dev_err(&adapter->dev,
"doesn't support required functionality\n");
- err = -EIO;
- goto errout;
+ return -EIO;
}
rx8025 = devm_kzalloc(&client->dev, sizeof(*rx8025), GFP_KERNEL);
if (!rx8025) {
- err = -ENOMEM;
- goto errout;
+ return -ENOMEM;
}
rx8025->client = client;
i2c_set_clientdata(client, rx8025);
- INIT_WORK(&rx8025->work, rx8025_work);
- err = rx8025_init_client(client, &need_reset);
+ err = rx8025_init_client(client);
if (err)
- goto errout;
-
- if (need_reset) {
- struct rtc_time tm;
- dev_info(&client->dev,
- "bad conditions detected, resetting date\n");
- rtc_time_to_tm(0, &tm); /* 1970/1/1 */
- rx8025_set_time(&client->dev, &tm);
- }
+ return err;
rx8025->rtc = devm_rtc_device_register(&client->dev, client->name,
&rx8025_rtc_ops, THIS_MODULE);
if (IS_ERR(rx8025->rtc)) {
- err = PTR_ERR(rx8025->rtc);
dev_err(&client->dev, "unable to register the class device\n");
- goto errout;
+ return PTR_ERR(rx8025->rtc);
}
if (client->irq > 0) {
dev_info(&client->dev, "IRQ %d supplied\n", client->irq);
- err = request_irq(client->irq, rx8025_irq,
- 0, "rx8025", client);
+ err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ rx8025_handle_irq, 0, "rx8025",
+ client);
if (err) {
- dev_err(&client->dev, "unable to request IRQ\n");
- goto errout;
+ dev_err(&client->dev, "unable to request IRQ, alarms disabled\n");
+ client->irq = 0;
}
}
- rx8025->rtc->irq_freq = 1;
rx8025->rtc->max_user_freq = 1;
err = rx8025_sysfs_register(&client->dev);
- if (err)
- goto errout_irq;
-
- return 0;
-
-errout_irq:
- if (client->irq > 0)
- free_irq(client->irq, client);
-
-errout:
- dev_err(&adapter->dev, "probing for rx8025 failed\n");
return err;
}
static int rx8025_remove(struct i2c_client *client)
{
- struct rx8025_data *rx8025 = i2c_get_clientdata(client);
- struct mutex *lock = &rx8025->rtc->ops_lock;
-
- if (client->irq > 0) {
- mutex_lock(lock);
- rx8025->exiting = 1;
- mutex_unlock(lock);
-
- free_irq(client->irq, client);
- cancel_work_sync(&rx8025->work);
- }
-
rx8025_sysfs_unregister(&client->dev);
return 0;
}
@@ -628,7 +562,6 @@ static int rx8025_remove(struct i2c_client *client)
static struct i2c_driver rx8025_driver = {
.driver = {
.name = "rtc-rx8025",
- .owner = THIS_MODULE,
},
.probe = rx8025_probe,
.remove = rx8025_remove,
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index de8d9c427782..161e25d016c3 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -315,7 +315,6 @@ MODULE_DEVICE_TABLE(i2c, rx8581_id);
static struct i2c_driver rx8581_driver = {
.driver = {
.name = "rtc-rx8581",
- .owner = THIS_MODULE,
},
.probe = rx8581_probe,
.id_table = rx8581_id,
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index a0f832362199..7cc8f73a3fe8 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -39,6 +39,7 @@ struct s3c_rtc {
void __iomem *base;
struct clk *rtc_clk;
struct clk *rtc_src_clk;
+ bool clk_disabled;
struct s3c_rtc_data *data;
@@ -71,9 +72,12 @@ static void s3c_rtc_enable_clk(struct s3c_rtc *info)
unsigned long irq_flags;
spin_lock_irqsave(&info->alarm_clk_lock, irq_flags);
- clk_enable(info->rtc_clk);
- if (info->data->needs_src_clk)
- clk_enable(info->rtc_src_clk);
+ if (info->clk_disabled) {
+ clk_enable(info->rtc_clk);
+ if (info->data->needs_src_clk)
+ clk_enable(info->rtc_src_clk);
+ info->clk_disabled = false;
+ }
spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags);
}
@@ -82,9 +86,12 @@ static void s3c_rtc_disable_clk(struct s3c_rtc *info)
unsigned long irq_flags;
spin_lock_irqsave(&info->alarm_clk_lock, irq_flags);
- if (info->data->needs_src_clk)
- clk_disable(info->rtc_src_clk);
- clk_disable(info->rtc_clk);
+ if (!info->clk_disabled) {
+ if (info->data->needs_src_clk)
+ clk_disable(info->rtc_src_clk);
+ clk_disable(info->rtc_clk);
+ info->clk_disabled = true;
+ }
spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags);
}
@@ -128,6 +135,11 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
s3c_rtc_disable_clk(info);
+ if (enabled)
+ s3c_rtc_enable_clk(info);
+ else
+ s3c_rtc_disable_clk(info);
+
return 0;
}
@@ -410,8 +422,9 @@ static int s3c_rtc_remove(struct platform_device *pdev)
s3c_rtc_setaie(info->dev, 0);
+ if (info->data->needs_src_clk)
+ clk_unprepare(info->rtc_src_clk);
clk_unprepare(info->rtc_clk);
- info->rtc_clk = NULL;
return 0;
}
@@ -482,6 +495,7 @@ static int s3c_rtc_probe(struct platform_device *pdev)
if (IS_ERR(info->rtc_src_clk)) {
dev_err(&pdev->dev,
"failed to find rtc source clock\n");
+ clk_disable_unprepare(info->rtc_clk);
return PTR_ERR(info->rtc_src_clk);
}
clk_prepare_enable(info->rtc_src_clk);
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index 8c70d785ba73..f2504b4eef34 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -635,6 +635,16 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
case S2MPS13X:
data[0] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
ret = regmap_write(info->regmap, info->regs->ctrl, data[0]);
+ if (ret < 0)
+ break;
+
+ /*
+ * Should set WUDR & (RUDR or AUDR) bits to high after writing
+ * RTC_CTRL register like writing Alarm registers. We can't find
+ * the description from datasheet but vendor code does that
+ * really.
+ */
+ ret = s5m8767_rtc_set_alarm_reg(info);
break;
default:
@@ -797,6 +807,7 @@ static const struct platform_device_id s5m_rtc_id[] = {
{ "s2mps14-rtc", S2MPS14X },
{ },
};
+MODULE_DEVICE_TABLE(platform, s5m_rtc_id);
static struct platform_driver s5m_rtc_driver = {
.driver = {
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index b6e1ca08c2c0..c2187bf6c7e4 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -35,24 +35,17 @@
#include <linux/bitops.h>
#include <linux/io.h>
-#include <mach/hardware.h>
-#include <mach/irqs.h>
+#define RTSR_HZE BIT(3) /* HZ interrupt enable */
+#define RTSR_ALE BIT(2) /* RTC alarm interrupt enable */
+#define RTSR_HZ BIT(1) /* HZ rising-edge detected */
+#define RTSR_AL BIT(0) /* RTC alarm detected */
-#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
-#include <mach/regs-rtc.h>
-#endif
+#include "rtc-sa1100.h"
#define RTC_DEF_DIVIDER (32768 - 1)
#define RTC_DEF_TRIM 0
#define RTC_FREQ 1024
-struct sa1100_rtc {
- spinlock_t lock;
- int irq_1hz;
- int irq_alarm;
- struct rtc_device *rtc;
- struct clk *clk;
-};
static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id)
{
@@ -63,16 +56,16 @@ static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id)
spin_lock(&info->lock);
- rtsr = RTSR;
+ rtsr = readl_relaxed(info->rtsr);
/* clear interrupt sources */
- RTSR = 0;
+ writel_relaxed(0, info->rtsr);
/* Fix for a nasty initialization problem the in SA11xx RTSR register.
* See also the comments in sa1100_rtc_probe(). */
if (rtsr & (RTSR_ALE | RTSR_HZE)) {
/* This is the original code, before there was the if test
* above. This code does not clear interrupts that were not
* enabled. */
- RTSR = (RTSR_AL | RTSR_HZ) & (rtsr >> 2);
+ writel_relaxed((RTSR_AL | RTSR_HZ) & (rtsr >> 2), info->rtsr);
} else {
/* For some reason, it is possible to enter this routine
* without interruptions enabled, it has been tested with
@@ -81,13 +74,13 @@ static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id)
* This situation leads to an infinite "loop" of interrupt
* routine calling and as a result the processor seems to
* lock on its first call to open(). */
- RTSR = RTSR_AL | RTSR_HZ;
+ writel_relaxed(RTSR_AL | RTSR_HZ, info->rtsr);
}
/* clear alarm interrupt if it has occurred */
if (rtsr & RTSR_AL)
rtsr &= ~RTSR_ALE;
- RTSR = rtsr & (RTSR_ALE | RTSR_HZE);
+ writel_relaxed(rtsr & (RTSR_ALE | RTSR_HZE), info->rtsr);
/* update irq data & counter */
if (rtsr & RTSR_AL)
@@ -135,7 +128,7 @@ static void sa1100_rtc_release(struct device *dev)
struct sa1100_rtc *info = dev_get_drvdata(dev);
spin_lock_irq(&info->lock);
- RTSR = 0;
+ writel_relaxed(0, info->rtsr);
spin_unlock_irq(&info->lock);
free_irq(info->irq_alarm, dev);
@@ -144,39 +137,46 @@ static void sa1100_rtc_release(struct device *dev)
static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
+ u32 rtsr;
struct sa1100_rtc *info = dev_get_drvdata(dev);
spin_lock_irq(&info->lock);
+ rtsr = readl_relaxed(info->rtsr);
if (enabled)
- RTSR |= RTSR_ALE;
+ rtsr |= RTSR_ALE;
else
- RTSR &= ~RTSR_ALE;
+ rtsr &= ~RTSR_ALE;
+ writel_relaxed(rtsr, info->rtsr);
spin_unlock_irq(&info->lock);
return 0;
}
static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- rtc_time_to_tm(RCNR, tm);
+ struct sa1100_rtc *info = dev_get_drvdata(dev);
+
+ rtc_time_to_tm(readl_relaxed(info->rcnr), tm);
return 0;
}
static int sa1100_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
+ struct sa1100_rtc *info = dev_get_drvdata(dev);
unsigned long time;
int ret;
ret = rtc_tm_to_time(tm, &time);
if (ret == 0)
- RCNR = time;
+ writel_relaxed(time, info->rcnr);
return ret;
}
static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
u32 rtsr;
+ struct sa1100_rtc *info = dev_get_drvdata(dev);
- rtsr = RTSR;
+ rtsr = readl_relaxed(info->rtsr);
alrm->enabled = (rtsr & RTSR_ALE) ? 1 : 0;
alrm->pending = (rtsr & RTSR_AL) ? 1 : 0;
return 0;
@@ -192,12 +192,13 @@ static int sa1100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
ret = rtc_tm_to_time(&alrm->time, &time);
if (ret != 0)
goto out;
- RTSR = RTSR & (RTSR_HZE|RTSR_ALE|RTSR_AL);
- RTAR = time;
+ writel_relaxed(readl_relaxed(info->rtsr) &
+ (RTSR_HZE | RTSR_ALE | RTSR_AL), info->rtsr);
+ writel_relaxed(time, info->rtar);
if (alrm->enabled)
- RTSR |= RTSR_ALE;
+ writel_relaxed(readl_relaxed(info->rtsr) | RTSR_ALE, info->rtsr);
else
- RTSR &= ~RTSR_ALE;
+ writel_relaxed(readl_relaxed(info->rtsr) & ~RTSR_ALE, info->rtsr);
out:
spin_unlock_irq(&info->lock);
@@ -206,8 +207,10 @@ out:
static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq)
{
- seq_printf(seq, "trim/divider\t\t: 0x%08x\n", (u32) RTTR);
- seq_printf(seq, "RTSR\t\t\t: 0x%08x\n", (u32)RTSR);
+ struct sa1100_rtc *info = dev_get_drvdata(dev);
+
+ seq_printf(seq, "trim/divider\t\t: 0x%08x\n", readl_relaxed(info->rttr));
+ seq_printf(seq, "RTSR\t\t\t: 0x%08x\n", readl_relaxed(info->rtsr));
return 0;
}
@@ -223,29 +226,18 @@ static const struct rtc_class_ops sa1100_rtc_ops = {
.alarm_irq_enable = sa1100_rtc_alarm_irq_enable,
};
-static int sa1100_rtc_probe(struct platform_device *pdev)
+int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info)
{
struct rtc_device *rtc;
- struct sa1100_rtc *info;
- int irq_1hz, irq_alarm, ret = 0;
+ int ret;
- irq_1hz = platform_get_irq_byname(pdev, "rtc 1Hz");
- irq_alarm = platform_get_irq_byname(pdev, "rtc alarm");
- if (irq_1hz < 0 || irq_alarm < 0)
- return -ENODEV;
+ spin_lock_init(&info->lock);
- info = devm_kzalloc(&pdev->dev, sizeof(struct sa1100_rtc), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
info->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(info->clk)) {
dev_err(&pdev->dev, "failed to find rtc clock source\n");
return PTR_ERR(info->clk);
}
- info->irq_1hz = irq_1hz;
- info->irq_alarm = irq_alarm;
- spin_lock_init(&info->lock);
- platform_set_drvdata(pdev, info);
ret = clk_prepare_enable(info->clk);
if (ret)
@@ -257,22 +249,19 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
* If the clock divider is uninitialized then reset it to the
* default value to get the 1Hz clock.
*/
- if (RTTR == 0) {
- RTTR = RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16);
+ if (readl_relaxed(info->rttr) == 0) {
+ writel_relaxed(RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16), info->rttr);
dev_warn(&pdev->dev, "warning: "
"initializing default clock divider/trim value\n");
/* The current RTC value probably doesn't make sense either */
- RCNR = 0;
+ writel_relaxed(0, info->rcnr);
}
- device_init_wakeup(&pdev->dev, 1);
-
rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &sa1100_rtc_ops,
THIS_MODULE);
-
if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
- goto err_dev;
+ clk_disable_unprepare(info->clk);
+ return PTR_ERR(rtc);
}
info->rtc = rtc;
@@ -298,12 +287,52 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
*
* Notice that clearing bit 1 and 0 is accomplished by writting ONES to
* the corresponding bits in RTSR. */
- RTSR = RTSR_AL | RTSR_HZ;
+ writel_relaxed(RTSR_AL | RTSR_HZ, info->rtsr);
return 0;
-err_dev:
- clk_disable_unprepare(info->clk);
- return ret;
+}
+EXPORT_SYMBOL_GPL(sa1100_rtc_init);
+
+static int sa1100_rtc_probe(struct platform_device *pdev)
+{
+ struct sa1100_rtc *info;
+ struct resource *iores;
+ void __iomem *base;
+ int irq_1hz, irq_alarm;
+
+ irq_1hz = platform_get_irq_byname(pdev, "rtc 1Hz");
+ irq_alarm = platform_get_irq_byname(pdev, "rtc alarm");
+ if (irq_1hz < 0 || irq_alarm < 0)
+ return -ENODEV;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(struct sa1100_rtc), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ info->irq_1hz = irq_1hz;
+ info->irq_alarm = irq_alarm;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, iores);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (IS_ENABLED(CONFIG_ARCH_SA1100) ||
+ of_device_is_compatible(pdev->dev.of_node, "mrvl,sa1100-rtc")) {
+ info->rcnr = base + 0x04;
+ info->rtsr = base + 0x10;
+ info->rtar = base + 0x00;
+ info->rttr = base + 0x08;
+ } else {
+ info->rcnr = base + 0x0;
+ info->rtsr = base + 0x8;
+ info->rtar = base + 0x4;
+ info->rttr = base + 0xc;
+ }
+
+ platform_set_drvdata(pdev, info);
+ device_init_wakeup(&pdev->dev, 1);
+
+ return sa1100_rtc_init(pdev, info);
}
static int sa1100_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-sa1100.h b/drivers/rtc/rtc-sa1100.h
new file mode 100644
index 000000000000..2c79c0c57822
--- /dev/null
+++ b/drivers/rtc/rtc-sa1100.h
@@ -0,0 +1,23 @@
+#ifndef __RTC_SA1100_H__
+#define __RTC_SA1100_H__
+
+#include <linux/kernel.h>
+
+struct clk;
+struct platform_device;
+
+struct sa1100_rtc {
+ spinlock_t lock;
+ void __iomem *rcnr;
+ void __iomem *rtar;
+ void __iomem *rtsr;
+ void __iomem *rttr;
+ int irq_1hz;
+ int irq_alarm;
+ struct rtc_device *rtc;
+ struct clk *clk;
+};
+
+int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info);
+
+#endif
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
index edc3b43282d4..7367f617145c 100644
--- a/drivers/rtc/rtc-sirfsoc.c
+++ b/drivers/rtc/rtc-sirfsoc.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/regmap.h>
#include <linux/rtc/sirfsoc_rtciobrg.h>
@@ -48,12 +49,27 @@ struct sirfsoc_rtc_drv {
/* Overflow for every 8 years extra time */
u32 overflow_rtc;
spinlock_t lock;
+ struct regmap *regmap;
#ifdef CONFIG_PM
u32 saved_counter;
u32 saved_overflow_rtc;
#endif
};
+static u32 sirfsoc_rtc_readl(struct sirfsoc_rtc_drv *rtcdrv, u32 offset)
+{
+ u32 val;
+
+ regmap_read(rtcdrv->regmap, rtcdrv->rtc_base + offset, &val);
+ return val;
+}
+
+static void sirfsoc_rtc_writel(struct sirfsoc_rtc_drv *rtcdrv,
+ u32 offset, u32 val)
+{
+ regmap_write(rtcdrv->regmap, rtcdrv->rtc_base + offset, val);
+}
+
static int sirfsoc_rtc_read_alarm(struct device *dev,
struct rtc_wkalrm *alrm)
{
@@ -64,9 +80,9 @@ static int sirfsoc_rtc_read_alarm(struct device *dev,
spin_lock_irq(&rtcdrv->lock);
- rtc_count = sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN);
+ rtc_count = sirfsoc_rtc_readl(rtcdrv, RTC_CN);
- rtc_alarm = sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_ALARM0);
+ rtc_alarm = sirfsoc_rtc_readl(rtcdrv, RTC_ALARM0);
memset(alrm, 0, sizeof(struct rtc_wkalrm));
/*
@@ -82,8 +98,7 @@ static int sirfsoc_rtc_read_alarm(struct device *dev,
rtc_time_to_tm(rtcdrv->overflow_rtc
<< (BITS_PER_LONG - RTC_SHIFT)
| rtc_alarm >> RTC_SHIFT, &(alrm->time));
- if (sirfsoc_rtc_iobrg_readl(
- rtcdrv->rtc_base + RTC_STATUS) & SIRFSOC_RTC_AL0E)
+ if (sirfsoc_rtc_readl(rtcdrv, RTC_STATUS) & SIRFSOC_RTC_AL0E)
alrm->enabled = 1;
spin_unlock_irq(&rtcdrv->lock);
@@ -103,8 +118,7 @@ static int sirfsoc_rtc_set_alarm(struct device *dev,
spin_lock_irq(&rtcdrv->lock);
- rtc_status_reg = sirfsoc_rtc_iobrg_readl(
- rtcdrv->rtc_base + RTC_STATUS);
+ rtc_status_reg = sirfsoc_rtc_readl(rtcdrv, RTC_STATUS);
if (rtc_status_reg & SIRFSOC_RTC_AL0E) {
/*
* An ongoing alarm in progress - ingore it and not
@@ -113,8 +127,7 @@ static int sirfsoc_rtc_set_alarm(struct device *dev,
dev_info(dev, "An old alarm was set, will be replaced by a new one\n");
}
- sirfsoc_rtc_iobrg_writel(
- rtc_alarm << RTC_SHIFT, rtcdrv->rtc_base + RTC_ALARM0);
+ sirfsoc_rtc_writel(rtcdrv, RTC_ALARM0, rtc_alarm << RTC_SHIFT);
rtc_status_reg &= ~0x07; /* mask out the lower status bits */
/*
* This bit RTC_AL sets it as a wake-up source for Sleep Mode
@@ -123,8 +136,7 @@ static int sirfsoc_rtc_set_alarm(struct device *dev,
rtc_status_reg |= SIRFSOC_RTC_AL0;
/* enable the RTC alarm interrupt */
rtc_status_reg |= SIRFSOC_RTC_AL0E;
- sirfsoc_rtc_iobrg_writel(
- rtc_status_reg, rtcdrv->rtc_base + RTC_STATUS);
+ sirfsoc_rtc_writel(rtcdrv, RTC_STATUS, rtc_status_reg);
spin_unlock_irq(&rtcdrv->lock);
} else {
@@ -135,8 +147,7 @@ static int sirfsoc_rtc_set_alarm(struct device *dev,
*/
spin_lock_irq(&rtcdrv->lock);
- rtc_status_reg = sirfsoc_rtc_iobrg_readl(
- rtcdrv->rtc_base + RTC_STATUS);
+ rtc_status_reg = sirfsoc_rtc_readl(rtcdrv, RTC_STATUS);
if (rtc_status_reg & SIRFSOC_RTC_AL0E) {
/* clear the RTC status register's alarm bit */
rtc_status_reg &= ~0x07;
@@ -145,8 +156,8 @@ static int sirfsoc_rtc_set_alarm(struct device *dev,
/* Clear the Alarm enable bit */
rtc_status_reg &= ~(SIRFSOC_RTC_AL0E);
- sirfsoc_rtc_iobrg_writel(rtc_status_reg,
- rtcdrv->rtc_base + RTC_STATUS);
+ sirfsoc_rtc_writel(rtcdrv, RTC_STATUS,
+ rtc_status_reg);
}
spin_unlock_irq(&rtcdrv->lock);
@@ -167,9 +178,9 @@ static int sirfsoc_rtc_read_time(struct device *dev,
* fail, read several times to make sure get stable value.
*/
do {
- tmp_rtc = sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN);
+ tmp_rtc = sirfsoc_rtc_readl(rtcdrv, RTC_CN);
cpu_relax();
- } while (tmp_rtc != sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN));
+ } while (tmp_rtc != sirfsoc_rtc_readl(rtcdrv, RTC_CN));
rtc_time_to_tm(rtcdrv->overflow_rtc << (BITS_PER_LONG - RTC_SHIFT) |
tmp_rtc >> RTC_SHIFT, tm);
@@ -187,10 +198,8 @@ static int sirfsoc_rtc_set_time(struct device *dev,
rtcdrv->overflow_rtc = rtc_time >> (BITS_PER_LONG - RTC_SHIFT);
- sirfsoc_rtc_iobrg_writel(rtcdrv->overflow_rtc,
- rtcdrv->rtc_base + RTC_SW_VALUE);
- sirfsoc_rtc_iobrg_writel(
- rtc_time << RTC_SHIFT, rtcdrv->rtc_base + RTC_CN);
+ sirfsoc_rtc_writel(rtcdrv, RTC_SW_VALUE, rtcdrv->overflow_rtc);
+ sirfsoc_rtc_writel(rtcdrv, RTC_CN, rtc_time << RTC_SHIFT);
return 0;
}
@@ -222,14 +231,13 @@ static int sirfsoc_rtc_alarm_irq_enable(struct device *dev,
spin_lock_irq(&rtcdrv->lock);
- rtc_status_reg = sirfsoc_rtc_iobrg_readl(
- rtcdrv->rtc_base + RTC_STATUS);
+ rtc_status_reg = sirfsoc_rtc_readl(rtcdrv, RTC_STATUS);
if (enabled)
rtc_status_reg |= SIRFSOC_RTC_AL0E;
else
rtc_status_reg &= ~SIRFSOC_RTC_AL0E;
- sirfsoc_rtc_iobrg_writel(rtc_status_reg, rtcdrv->rtc_base + RTC_STATUS);
+ sirfsoc_rtc_writel(rtcdrv, RTC_STATUS, rtc_status_reg);
spin_unlock_irq(&rtcdrv->lock);
@@ -254,7 +262,7 @@ static irqreturn_t sirfsoc_rtc_irq_handler(int irq, void *pdata)
spin_lock(&rtcdrv->lock);
- rtc_status_reg = sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_STATUS);
+ rtc_status_reg = sirfsoc_rtc_readl(rtcdrv, RTC_STATUS);
/* this bit will be set ONLY if an alarm was active
* and it expired NOW
* So this is being used as an ASSERT
@@ -270,7 +278,8 @@ static irqreturn_t sirfsoc_rtc_irq_handler(int irq, void *pdata)
/* Clear the Alarm enable bit */
rtc_status_reg &= ~(SIRFSOC_RTC_AL0E);
}
- sirfsoc_rtc_iobrg_writel(rtc_status_reg, rtcdrv->rtc_base + RTC_STATUS);
+
+ sirfsoc_rtc_writel(rtcdrv, RTC_STATUS, rtc_status_reg);
spin_unlock(&rtcdrv->lock);
@@ -287,6 +296,13 @@ static const struct of_device_id sirfsoc_rtc_of_match[] = {
{ .compatible = "sirf,prima2-sysrtc"},
{},
};
+
+const struct regmap_config sysrtc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
MODULE_DEVICE_TABLE(of, sirfsoc_rtc_of_match);
static int sirfsoc_rtc_probe(struct platform_device *pdev)
@@ -314,27 +330,35 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
/* Register rtc alarm as a wakeup source */
device_init_wakeup(&pdev->dev, 1);
+ rtcdrv->regmap = devm_regmap_init_iobg(&pdev->dev,
+ &sysrtc_regmap_config);
+ if (IS_ERR(rtcdrv->regmap)) {
+ err = PTR_ERR(rtcdrv->regmap);
+ dev_err(&pdev->dev, "Failed to allocate register map: %d\n",
+ err);
+ return err;
+ }
+
/*
* Set SYS_RTC counter in RTC_HZ HZ Units
* We are using 32K RTC crystal (32768 / RTC_HZ / 2) -1
* If 16HZ, therefore RTC_DIV = 1023;
*/
rtc_div = ((32768 / RTC_HZ) / 2) - 1;
- sirfsoc_rtc_iobrg_writel(rtc_div, rtcdrv->rtc_base + RTC_DIV);
+ sirfsoc_rtc_writel(rtcdrv, RTC_DIV, rtc_div);
/* 0x3 -> RTC_CLK */
- sirfsoc_rtc_iobrg_writel(SIRFSOC_RTC_CLK,
- rtcdrv->rtc_base + RTC_CLOCK_SWITCH);
+ sirfsoc_rtc_writel(rtcdrv, RTC_CLOCK_SWITCH, SIRFSOC_RTC_CLK);
/* reset SYS RTC ALARM0 */
- sirfsoc_rtc_iobrg_writel(0x0, rtcdrv->rtc_base + RTC_ALARM0);
+ sirfsoc_rtc_writel(rtcdrv, RTC_ALARM0, 0x0);
/* reset SYS RTC ALARM1 */
- sirfsoc_rtc_iobrg_writel(0x0, rtcdrv->rtc_base + RTC_ALARM1);
+ sirfsoc_rtc_writel(rtcdrv, RTC_ALARM1, 0x0);
/* Restore RTC Overflow From Register After Command Reboot */
rtcdrv->overflow_rtc =
- sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_SW_VALUE);
+ sirfsoc_rtc_readl(rtcdrv, RTC_SW_VALUE);
rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&sirfsoc_rtc_ops, THIS_MODULE);
@@ -372,10 +396,10 @@ static int sirfsoc_rtc_suspend(struct device *dev)
{
struct sirfsoc_rtc_drv *rtcdrv = dev_get_drvdata(dev);
rtcdrv->overflow_rtc =
- sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_SW_VALUE);
+ sirfsoc_rtc_readl(rtcdrv, RTC_SW_VALUE);
rtcdrv->saved_counter =
- sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN);
+ sirfsoc_rtc_readl(rtcdrv, RTC_CN);
rtcdrv->saved_overflow_rtc = rtcdrv->overflow_rtc;
if (device_may_wakeup(dev) && !enable_irq_wake(rtcdrv->irq))
rtcdrv->irq_wake = 1;
@@ -392,12 +416,10 @@ static int sirfsoc_rtc_resume(struct device *dev)
* if resume from snapshot and the rtc power is lost,
* restroe the rtc settings
*/
- if (SIRFSOC_RTC_CLK != sirfsoc_rtc_iobrg_readl(
- rtcdrv->rtc_base + RTC_CLOCK_SWITCH)) {
+ if (SIRFSOC_RTC_CLK != sirfsoc_rtc_readl(rtcdrv, RTC_CLOCK_SWITCH)) {
u32 rtc_div;
/* 0x3 -> RTC_CLK */
- sirfsoc_rtc_iobrg_writel(SIRFSOC_RTC_CLK,
- rtcdrv->rtc_base + RTC_CLOCK_SWITCH);
+ sirfsoc_rtc_writel(rtcdrv, RTC_CLOCK_SWITCH, SIRFSOC_RTC_CLK);
/*
* Set SYS_RTC counter in RTC_HZ HZ Units
* We are using 32K RTC crystal (32768 / RTC_HZ / 2) -1
@@ -405,13 +427,13 @@ static int sirfsoc_rtc_resume(struct device *dev)
*/
rtc_div = ((32768 / RTC_HZ) / 2) - 1;
- sirfsoc_rtc_iobrg_writel(rtc_div, rtcdrv->rtc_base + RTC_DIV);
+ sirfsoc_rtc_writel(rtcdrv, RTC_DIV, rtc_div);
/* reset SYS RTC ALARM0 */
- sirfsoc_rtc_iobrg_writel(0x0, rtcdrv->rtc_base + RTC_ALARM0);
+ sirfsoc_rtc_writel(rtcdrv, RTC_ALARM0, 0x0);
/* reset SYS RTC ALARM1 */
- sirfsoc_rtc_iobrg_writel(0x0, rtcdrv->rtc_base + RTC_ALARM1);
+ sirfsoc_rtc_writel(rtcdrv, RTC_ALARM1, 0x0);
}
rtcdrv->overflow_rtc = rtcdrv->saved_overflow_rtc;
@@ -419,15 +441,14 @@ static int sirfsoc_rtc_resume(struct device *dev)
* if current counter is small than previous,
* it means overflow in sleep
*/
- tmp = sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN);
+ tmp = sirfsoc_rtc_readl(rtcdrv, RTC_CN);
if (tmp <= rtcdrv->saved_counter)
rtcdrv->overflow_rtc++;
/*
*PWRC Value Be Changed When Suspend, Restore Overflow
* In Memory To Register
*/
- sirfsoc_rtc_iobrg_writel(rtcdrv->overflow_rtc,
- rtcdrv->rtc_base + RTC_SW_VALUE);
+ sirfsoc_rtc_writel(rtcdrv, RTC_SW_VALUE, rtcdrv->overflow_rtc);
if (device_may_wakeup(dev) && rtcdrv->irq_wake) {
disable_irq_wake(rtcdrv->irq);
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index d87a85cefb66..950c5d0b6dca 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -18,6 +18,10 @@
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#define SNVS_LPREGISTER_OFFSET 0x34
/* These register offsets are relative to LP (Low Power) range */
#define SNVS_LPCR 0x04
@@ -37,31 +41,36 @@
struct snvs_rtc_data {
struct rtc_device *rtc;
- void __iomem *ioaddr;
+ struct regmap *regmap;
+ int offset;
int irq;
- spinlock_t lock;
struct clk *clk;
};
-static u32 rtc_read_lp_counter(void __iomem *ioaddr)
+static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
{
u64 read1, read2;
+ u32 val;
do {
- read1 = readl(ioaddr + SNVS_LPSRTCMR);
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &val);
+ read1 = val;
read1 <<= 32;
- read1 |= readl(ioaddr + SNVS_LPSRTCLR);
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &val);
+ read1 |= val;
- read2 = readl(ioaddr + SNVS_LPSRTCMR);
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &val);
+ read2 = val;
read2 <<= 32;
- read2 |= readl(ioaddr + SNVS_LPSRTCLR);
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &val);
+ read2 |= val;
} while (read1 != read2);
/* Convert 47-bit counter to 32-bit raw second count */
return (u32) (read1 >> CNTR_TO_SECS_SH);
}
-static void rtc_write_sync_lp(void __iomem *ioaddr)
+static void rtc_write_sync_lp(struct snvs_rtc_data *data)
{
u32 count1, count2, count3;
int i;
@@ -69,15 +78,15 @@ static void rtc_write_sync_lp(void __iomem *ioaddr)
/* Wait for 3 CKIL cycles */
for (i = 0; i < 3; i++) {
do {
- count1 = readl(ioaddr + SNVS_LPSRTCLR);
- count2 = readl(ioaddr + SNVS_LPSRTCLR);
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count2);
} while (count1 != count2);
/* Now wait until counter value changes */
do {
do {
- count2 = readl(ioaddr + SNVS_LPSRTCLR);
- count3 = readl(ioaddr + SNVS_LPSRTCLR);
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count2);
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count3);
} while (count2 != count3);
} while (count3 == count1);
}
@@ -85,23 +94,14 @@ static void rtc_write_sync_lp(void __iomem *ioaddr)
static int snvs_rtc_enable(struct snvs_rtc_data *data, bool enable)
{
- unsigned long flags;
int timeout = 1000;
u32 lpcr;
- spin_lock_irqsave(&data->lock, flags);
-
- lpcr = readl(data->ioaddr + SNVS_LPCR);
- if (enable)
- lpcr |= SNVS_LPCR_SRTC_ENV;
- else
- lpcr &= ~SNVS_LPCR_SRTC_ENV;
- writel(lpcr, data->ioaddr + SNVS_LPCR);
-
- spin_unlock_irqrestore(&data->lock, flags);
+ regmap_update_bits(data->regmap, data->offset + SNVS_LPCR, SNVS_LPCR_SRTC_ENV,
+ enable ? SNVS_LPCR_SRTC_ENV : 0);
while (--timeout) {
- lpcr = readl(data->ioaddr + SNVS_LPCR);
+ regmap_read(data->regmap, data->offset + SNVS_LPCR, &lpcr);
if (enable) {
if (lpcr & SNVS_LPCR_SRTC_ENV)
@@ -121,7 +121,7 @@ static int snvs_rtc_enable(struct snvs_rtc_data *data, bool enable)
static int snvs_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct snvs_rtc_data *data = dev_get_drvdata(dev);
- unsigned long time = rtc_read_lp_counter(data->ioaddr);
+ unsigned long time = rtc_read_lp_counter(data);
rtc_time_to_tm(time, tm);
@@ -139,8 +139,8 @@ static int snvs_rtc_set_time(struct device *dev, struct rtc_time *tm)
snvs_rtc_enable(data, false);
/* Write 32-bit time to 47-bit timer, leaving 15 LSBs blank */
- writel(time << CNTR_TO_SECS_SH, data->ioaddr + SNVS_LPSRTCLR);
- writel(time >> (32 - CNTR_TO_SECS_SH), data->ioaddr + SNVS_LPSRTCMR);
+ regmap_write(data->regmap, data->offset + SNVS_LPSRTCLR, time << CNTR_TO_SECS_SH);
+ regmap_write(data->regmap, data->offset + SNVS_LPSRTCMR, time >> (32 - CNTR_TO_SECS_SH));
/* Enable RTC again */
snvs_rtc_enable(data, true);
@@ -153,10 +153,10 @@ static int snvs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct snvs_rtc_data *data = dev_get_drvdata(dev);
u32 lptar, lpsr;
- lptar = readl(data->ioaddr + SNVS_LPTAR);
+ regmap_read(data->regmap, data->offset + SNVS_LPTAR, &lptar);
rtc_time_to_tm(lptar, &alrm->time);
- lpsr = readl(data->ioaddr + SNVS_LPSR);
+ regmap_read(data->regmap, data->offset + SNVS_LPSR, &lpsr);
alrm->pending = (lpsr & SNVS_LPSR_LPTA) ? 1 : 0;
return 0;
@@ -165,21 +165,12 @@ static int snvs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int snvs_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
{
struct snvs_rtc_data *data = dev_get_drvdata(dev);
- u32 lpcr;
- unsigned long flags;
-
- spin_lock_irqsave(&data->lock, flags);
- lpcr = readl(data->ioaddr + SNVS_LPCR);
- if (enable)
- lpcr |= (SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN);
- else
- lpcr &= ~(SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN);
- writel(lpcr, data->ioaddr + SNVS_LPCR);
+ regmap_update_bits(data->regmap, data->offset + SNVS_LPCR,
+ (SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN),
+ enable ? (SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN) : 0);
- spin_unlock_irqrestore(&data->lock, flags);
-
- rtc_write_sync_lp(data->ioaddr);
+ rtc_write_sync_lp(data);
return 0;
}
@@ -189,24 +180,14 @@ static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct snvs_rtc_data *data = dev_get_drvdata(dev);
struct rtc_time *alrm_tm = &alrm->time;
unsigned long time;
- unsigned long flags;
- u32 lpcr;
rtc_tm_to_time(alrm_tm, &time);
- spin_lock_irqsave(&data->lock, flags);
-
- /* Have to clear LPTA_EN before programming new alarm time in LPTAR */
- lpcr = readl(data->ioaddr + SNVS_LPCR);
- lpcr &= ~SNVS_LPCR_LPTA_EN;
- writel(lpcr, data->ioaddr + SNVS_LPCR);
-
- spin_unlock_irqrestore(&data->lock, flags);
-
- writel(time, data->ioaddr + SNVS_LPTAR);
+ regmap_update_bits(data->regmap, data->offset + SNVS_LPCR, SNVS_LPCR_LPTA_EN, 0);
+ regmap_write(data->regmap, data->offset + SNVS_LPTAR, time);
/* Clear alarm interrupt status bit */
- writel(SNVS_LPSR_LPTA, data->ioaddr + SNVS_LPSR);
+ regmap_write(data->regmap, data->offset + SNVS_LPSR, SNVS_LPSR_LPTA);
return snvs_rtc_alarm_irq_enable(dev, alrm->enabled);
}
@@ -226,7 +207,7 @@ static irqreturn_t snvs_rtc_irq_handler(int irq, void *dev_id)
u32 lpsr;
u32 events = 0;
- lpsr = readl(data->ioaddr + SNVS_LPSR);
+ regmap_read(data->regmap, data->offset + SNVS_LPSR, &lpsr);
if (lpsr & SNVS_LPSR_LPTA) {
events |= (RTC_AF | RTC_IRQF);
@@ -238,25 +219,48 @@ static irqreturn_t snvs_rtc_irq_handler(int irq, void *dev_id)
}
/* clear interrupt status */
- writel(lpsr, data->ioaddr + SNVS_LPSR);
+ regmap_write(data->regmap, data->offset + SNVS_LPSR, lpsr);
return events ? IRQ_HANDLED : IRQ_NONE;
}
+static const struct regmap_config snvs_rtc_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
static int snvs_rtc_probe(struct platform_device *pdev)
{
struct snvs_rtc_data *data;
struct resource *res;
int ret;
+ void __iomem *mmio;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->ioaddr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(data->ioaddr))
- return PTR_ERR(data->ioaddr);
+ data->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "regmap");
+
+ if (IS_ERR(data->regmap)) {
+ dev_warn(&pdev->dev, "snvs rtc: you use old dts file, please update it\n");
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ mmio = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mmio))
+ return PTR_ERR(mmio);
+
+ data->regmap = devm_regmap_init_mmio(&pdev->dev, mmio, &snvs_rtc_config);
+ } else {
+ data->offset = SNVS_LPREGISTER_OFFSET;
+ of_property_read_u32(pdev->dev.of_node, "offset", &data->offset);
+ }
+
+ if (!data->regmap) {
+ dev_err(&pdev->dev, "Can't find snvs syscon\n");
+ return -ENODEV;
+ }
data->irq = platform_get_irq(pdev, 0);
if (data->irq < 0)
@@ -276,13 +280,11 @@ static int snvs_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
- spin_lock_init(&data->lock);
-
/* Initialize glitch detect */
- writel(SNVS_LPPGDR_INIT, data->ioaddr + SNVS_LPPGDR);
+ regmap_write(data->regmap, data->offset + SNVS_LPPGDR, SNVS_LPPGDR_INIT);
/* Clear interrupt status */
- writel(0xffffffff, data->ioaddr + SNVS_LPSR);
+ regmap_write(data->regmap, data->offset + SNVS_LPSR, 0xffffffff);
/* Enable RTC */
snvs_rtc_enable(data, true);
diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c
index 3f9d0acb81c7..74c0a336ceea 100644
--- a/drivers/rtc/rtc-st-lpc.c
+++ b/drivers/rtc/rtc-st-lpc.c
@@ -208,7 +208,7 @@ static int st_rtc_probe(struct platform_device *pdev)
return -EINVAL;
}
- /* LPC can either run in RTC or WDT mode */
+ /* LPC can either run as a Clocksource or in RTC or WDT mode */
if (mode != ST_LPC_MODE_RTC)
return -ENODEV;
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index 0e93b714ee41..ba6a83b5b5c9 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -254,7 +254,7 @@ static ssize_t stk17ta8_nvram_read(struct file *filp, struct kobject *kobj,
void __iomem *ioaddr = pdata->ioaddr;
ssize_t count;
- for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
+ for (count = 0; count < size; count++)
*buf++ = readb(ioaddr + pos++);
return count;
}
@@ -269,7 +269,7 @@ static ssize_t stk17ta8_nvram_write(struct file *filp, struct kobject *kobj,
void __iomem *ioaddr = pdata->ioaddr;
ssize_t count;
- for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
+ for (count = 0; count < size; count++)
writeb(*buf++, ioaddr + pos++);
return count;
}
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index babd43bf3ddc..7273855ed02e 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -122,20 +122,8 @@ hctosys_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(hctosys);
-static struct attribute *rtc_attrs[] = {
- &dev_attr_name.attr,
- &dev_attr_date.attr,
- &dev_attr_time.attr,
- &dev_attr_since_epoch.attr,
- &dev_attr_max_user_freq.attr,
- &dev_attr_hctosys.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(rtc);
-
static ssize_t
-rtc_sysfs_show_wakealarm(struct device *dev, struct device_attribute *attr,
- char *buf)
+wakealarm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
ssize_t retval;
unsigned long alarm;
@@ -159,7 +147,7 @@ rtc_sysfs_show_wakealarm(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-rtc_sysfs_set_wakealarm(struct device *dev, struct device_attribute *attr,
+wakealarm_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t n)
{
ssize_t retval;
@@ -221,45 +209,57 @@ rtc_sysfs_set_wakealarm(struct device *dev, struct device_attribute *attr,
retval = rtc_set_alarm(rtc, &alm);
return (retval < 0) ? retval : n;
}
-static DEVICE_ATTR(wakealarm, S_IRUGO | S_IWUSR,
- rtc_sysfs_show_wakealarm, rtc_sysfs_set_wakealarm);
+static DEVICE_ATTR_RW(wakealarm);
+static struct attribute *rtc_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_date.attr,
+ &dev_attr_time.attr,
+ &dev_attr_since_epoch.attr,
+ &dev_attr_max_user_freq.attr,
+ &dev_attr_hctosys.attr,
+ &dev_attr_wakealarm.attr,
+ NULL,
+};
/* The reason to trigger an alarm with no process watching it (via sysfs)
* is its side effect: waking from a system state like suspend-to-RAM or
* suspend-to-disk. So: no attribute unless that side effect is possible.
* (Userspace may disable that mechanism later.)
*/
-static inline int rtc_does_wakealarm(struct rtc_device *rtc)
+static bool rtc_does_wakealarm(struct rtc_device *rtc)
{
if (!device_can_wakeup(rtc->dev.parent))
- return 0;
+ return false;
+
return rtc->ops->set_alarm != NULL;
}
-
-void rtc_sysfs_add_device(struct rtc_device *rtc)
+static umode_t rtc_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
{
- int err;
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct rtc_device *rtc = to_rtc_device(dev);
+ umode_t mode = attr->mode;
- /* not all RTCs support both alarms and wakeup */
- if (!rtc_does_wakealarm(rtc))
- return;
+ if (attr == &dev_attr_wakealarm.attr)
+ if (!rtc_does_wakealarm(rtc))
+ mode = 0;
- err = device_create_file(&rtc->dev, &dev_attr_wakealarm);
- if (err)
- dev_err(rtc->dev.parent,
- "failed to create alarm attribute, %d\n", err);
+ return mode;
}
-void rtc_sysfs_del_device(struct rtc_device *rtc)
-{
- /* REVISIT did we add it successfully? */
- if (rtc_does_wakealarm(rtc))
- device_remove_file(&rtc->dev, &dev_attr_wakealarm);
-}
+static struct attribute_group rtc_attr_group = {
+ .is_visible = rtc_attr_is_visible,
+ .attrs = rtc_attrs,
+};
+
+static const struct attribute_group *rtc_attr_groups[] = {
+ &rtc_attr_group,
+ NULL
+};
-void __init rtc_sysfs_init(struct class *rtc_class)
+const struct attribute_group **rtc_get_dev_attribute_groups(void)
{
- rtc_class->dev_groups = rtc_groups;
+ return rtc_attr_groups;
}
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index cb7f94ede516..560d9a5e0225 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -199,8 +199,7 @@ static ssize_t tx4939_rtc_nvram_read(struct file *filp, struct kobject *kobj,
ssize_t count;
spin_lock_irq(&pdata->lock);
- for (count = 0; size > 0 && pos < TX4939_RTC_REG_RAMSIZE;
- count++, size--) {
+ for (count = 0; count < size; count++) {
__raw_writel(pos++, &rtcreg->adr);
*buf++ = __raw_readl(&rtcreg->dat);
}
@@ -218,8 +217,7 @@ static ssize_t tx4939_rtc_nvram_write(struct file *filp, struct kobject *kobj,
ssize_t count;
spin_lock_irq(&pdata->lock);
- for (count = 0; size > 0 && pos < TX4939_RTC_REG_RAMSIZE;
- count++, size--) {
+ for (count = 0; count < size; count++) {
__raw_writel(pos++, &rtcreg->adr);
__raw_writel(*buf++, &rtcreg->dat);
}
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index a58b6d17e6f0..27e896995e9b 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -271,6 +271,7 @@ static const struct of_device_id wmt_dt_ids[] = {
{ .compatible = "via,vt8500-rtc", },
{}
};
+MODULE_DEVICE_TABLE(of, wmt_dt_ids);
static struct platform_driver vt8500_rtc_driver = {
.probe = vt8500_rtc_probe,
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
new file mode 100644
index 000000000000..8b28762f06df
--- /dev/null
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -0,0 +1,279 @@
+/*
+ * Xilinx Zynq Ultrascale+ MPSoC Real Time Clock Driver
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+/* RTC Registers */
+#define RTC_SET_TM_WR 0x00
+#define RTC_SET_TM_RD 0x04
+#define RTC_CALIB_WR 0x08
+#define RTC_CALIB_RD 0x0C
+#define RTC_CUR_TM 0x10
+#define RTC_CUR_TICK 0x14
+#define RTC_ALRM 0x18
+#define RTC_INT_STS 0x20
+#define RTC_INT_MASK 0x24
+#define RTC_INT_EN 0x28
+#define RTC_INT_DIS 0x2C
+#define RTC_CTRL 0x40
+
+#define RTC_FR_EN BIT(20)
+#define RTC_FR_DATSHIFT 16
+#define RTC_TICK_MASK 0xFFFF
+#define RTC_INT_SEC BIT(0)
+#define RTC_INT_ALRM BIT(1)
+#define RTC_OSC_EN BIT(24)
+
+#define RTC_CALIB_DEF 0x198233
+#define RTC_CALIB_MASK 0x1FFFFF
+#define RTC_SEC_MAX_VAL 0xFFFFFFFF
+
+struct xlnx_rtc_dev {
+ struct rtc_device *rtc;
+ void __iomem *reg_base;
+ int alarm_irq;
+ int sec_irq;
+};
+
+static int xlnx_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+ unsigned long new_time;
+
+ new_time = rtc_tm_to_time64(tm);
+
+ if (new_time > RTC_SEC_MAX_VAL)
+ return -EINVAL;
+
+ writel(new_time, xrtcdev->reg_base + RTC_SET_TM_WR);
+
+ return 0;
+}
+
+static int xlnx_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+
+ rtc_time64_to_tm(readl(xrtcdev->reg_base + RTC_CUR_TM), tm);
+
+ return rtc_valid_tm(tm);
+}
+
+static int xlnx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+
+ rtc_time64_to_tm(readl(xrtcdev->reg_base + RTC_ALRM), &alrm->time);
+ alrm->enabled = readl(xrtcdev->reg_base + RTC_INT_MASK) & RTC_INT_ALRM;
+
+ return 0;
+}
+
+static int xlnx_rtc_alarm_irq_enable(struct device *dev, u32 enabled)
+{
+ struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+
+ if (enabled)
+ writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_EN);
+ else
+ writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_DIS);
+
+ return 0;
+}
+
+static int xlnx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+ unsigned long alarm_time;
+
+ alarm_time = rtc_tm_to_time64(&alrm->time);
+
+ if (alarm_time > RTC_SEC_MAX_VAL)
+ return -EINVAL;
+
+ writel((u32)alarm_time, (xrtcdev->reg_base + RTC_ALRM));
+
+ xlnx_rtc_alarm_irq_enable(dev, alrm->enabled);
+
+ return 0;
+}
+
+static void xlnx_init_rtc(struct xlnx_rtc_dev *xrtcdev, u32 calibval)
+{
+ /*
+ * Based on crystal freq of 33.330 KHz
+ * set the seconds counter and enable, set fractions counter
+ * to default value suggested as per design spec
+ * to correct RTC delay in frequency over period of time.
+ */
+ calibval &= RTC_CALIB_MASK;
+ writel(calibval, (xrtcdev->reg_base + RTC_CALIB_WR));
+}
+
+static const struct rtc_class_ops xlnx_rtc_ops = {
+ .set_time = xlnx_rtc_set_time,
+ .read_time = xlnx_rtc_read_time,
+ .read_alarm = xlnx_rtc_read_alarm,
+ .set_alarm = xlnx_rtc_set_alarm,
+ .alarm_irq_enable = xlnx_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t xlnx_rtc_interrupt(int irq, void *id)
+{
+ struct xlnx_rtc_dev *xrtcdev = (struct xlnx_rtc_dev *)id;
+ unsigned int status;
+
+ status = readl(xrtcdev->reg_base + RTC_INT_STS);
+ /* Check if interrupt asserted */
+ if (!(status & (RTC_INT_SEC | RTC_INT_ALRM)))
+ return IRQ_NONE;
+
+ /* Clear interrupt */
+ writel(status, xrtcdev->reg_base + RTC_INT_STS);
+
+ if (status & RTC_INT_SEC)
+ rtc_update_irq(xrtcdev->rtc, 1, RTC_IRQF | RTC_UF);
+ if (status & RTC_INT_ALRM)
+ rtc_update_irq(xrtcdev->rtc, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static int xlnx_rtc_probe(struct platform_device *pdev)
+{
+ struct xlnx_rtc_dev *xrtcdev;
+ struct resource *res;
+ int ret;
+ unsigned int calibvalue;
+
+ xrtcdev = devm_kzalloc(&pdev->dev, sizeof(*xrtcdev), GFP_KERNEL);
+ if (!xrtcdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, xrtcdev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ xrtcdev->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xrtcdev->reg_base))
+ return PTR_ERR(xrtcdev->reg_base);
+
+ xrtcdev->alarm_irq = platform_get_irq_byname(pdev, "alarm");
+ if (xrtcdev->alarm_irq < 0) {
+ dev_err(&pdev->dev, "no irq resource\n");
+ return xrtcdev->alarm_irq;
+ }
+ ret = devm_request_irq(&pdev->dev, xrtcdev->alarm_irq,
+ xlnx_rtc_interrupt, 0,
+ dev_name(&pdev->dev), xrtcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "request irq failed\n");
+ return ret;
+ }
+
+ xrtcdev->sec_irq = platform_get_irq_byname(pdev, "sec");
+ if (xrtcdev->sec_irq < 0) {
+ dev_err(&pdev->dev, "no irq resource\n");
+ return xrtcdev->sec_irq;
+ }
+ ret = devm_request_irq(&pdev->dev, xrtcdev->sec_irq,
+ xlnx_rtc_interrupt, 0,
+ dev_name(&pdev->dev), xrtcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "request irq failed\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node, "calibration",
+ &calibvalue);
+ if (ret)
+ calibvalue = RTC_CALIB_DEF;
+
+ xlnx_init_rtc(xrtcdev, calibvalue);
+
+ device_init_wakeup(&pdev->dev, 1);
+
+ xrtcdev->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &xlnx_rtc_ops, THIS_MODULE);
+ return PTR_ERR_OR_ZERO(xrtcdev->rtc);
+}
+
+static int xlnx_rtc_remove(struct platform_device *pdev)
+{
+ xlnx_rtc_alarm_irq_enable(&pdev->dev, 0);
+ device_init_wakeup(&pdev->dev, 0);
+
+ return 0;
+}
+
+static int __maybe_unused xlnx_rtc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xlnx_rtc_dev *xrtcdev = platform_get_drvdata(pdev);
+
+ if (device_may_wakeup(&pdev->dev))
+ enable_irq_wake(xrtcdev->alarm_irq);
+ else
+ xlnx_rtc_alarm_irq_enable(dev, 0);
+
+ return 0;
+}
+
+static int __maybe_unused xlnx_rtc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xlnx_rtc_dev *xrtcdev = platform_get_drvdata(pdev);
+
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(xrtcdev->alarm_irq);
+ else
+ xlnx_rtc_alarm_irq_enable(dev, 1);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xlnx_rtc_pm_ops, xlnx_rtc_suspend, xlnx_rtc_resume);
+
+static const struct of_device_id xlnx_rtc_of_match[] = {
+ {.compatible = "xlnx,zynqmp-rtc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xlnx_rtc_of_match);
+
+static struct platform_driver xlnx_rtc_driver = {
+ .probe = xlnx_rtc_probe,
+ .remove = xlnx_rtc_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .pm = &xlnx_rtc_pm_ops,
+ .of_match_table = xlnx_rtc_of_match,
+ },
+};
+
+module_platform_driver(xlnx_rtc_driver);
+
+MODULE_DESCRIPTION("Xilinx Zynq MPSoC RTC driver");
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index 95bccfd3f169..e5225ad9c5b1 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -2,7 +2,7 @@
# Makefile for the S/390 specific device drivers
#
-obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/
+obj-y += cio/ block/ char/ crypto/ net/ scsi/ virtio/
drivers-y += drivers/s390/built-in.o
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 1aec8ff0b587..f73d2f579a7e 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1863,6 +1863,33 @@ static void __dasd_device_check_expire(struct dasd_device *device)
}
/*
+ * return 1 when device is not eligible for IO
+ */
+static int __dasd_device_is_unusable(struct dasd_device *device,
+ struct dasd_ccw_req *cqr)
+{
+ int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM);
+
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ /* dasd is being set offline. */
+ return 1;
+ }
+ if (device->stopped) {
+ if (device->stopped & mask) {
+ /* stopped and CQR will not change that. */
+ return 1;
+ }
+ if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
+ /* CQR is not able to change device to
+ * operational. */
+ return 1;
+ }
+ /* CQR required to get device operational. */
+ }
+ return 0;
+}
+
+/*
* Take a look at the first request on the ccw queue and check
* if it needs to be started.
*/
@@ -1876,13 +1903,8 @@ static void __dasd_device_start_head(struct dasd_device *device)
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
if (cqr->status != DASD_CQR_QUEUED)
return;
- /* when device is stopped, return request to previous layer
- * exception: only the disconnect or unresumed bits are set and the
- * cqr is a path verification request
- */
- if (device->stopped &&
- !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
- && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) {
+ /* if device is not usable return request to upper layer */
+ if (__dasd_device_is_unusable(device, cqr)) {
cqr->intrc = -EAGAIN;
cqr->status = DASD_CQR_CLEARED;
dasd_schedule_device_bh(device);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index a2597e683e79..fe07f3139bf6 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -58,7 +58,7 @@ static struct alias_server *_find_server(struct dasd_uid *uid)
&& !strncmp(pos->uid.serial, uid->serial,
sizeof(uid->serial)))
return pos;
- };
+ }
return NULL;
}
@@ -69,7 +69,7 @@ static struct alias_lcu *_find_lcu(struct alias_server *server,
list_for_each_entry(pos, &server->lculist, lcu) {
if (pos->uid.ssid == uid->ssid)
return pos;
- };
+ }
return NULL;
}
@@ -97,7 +97,7 @@ static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
if (pos->uid.base_unit_addr == search_unit_addr &&
!strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
return pos;
- };
+ }
return NULL;
}
@@ -699,7 +699,8 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
struct dasd_device, alias_list);
spin_unlock_irqrestore(&lcu->lock, flags);
alias_priv = (struct dasd_eckd_private *) alias_device->private;
- if ((alias_priv->count < private->count) && !alias_device->stopped)
+ if ((alias_priv->count < private->count) && !alias_device->stopped &&
+ !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags))
return alias_device;
else
return NULL;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 6215f6455eb8..62a323539226 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1036,7 +1036,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
{
void *conf_data;
int conf_len, conf_data_saved;
- int rc, path_err;
+ int rc, path_err, pos;
__u8 lpm, opm;
struct dasd_eckd_private *private, path_private;
struct dasd_path *path_data;
@@ -1068,6 +1068,17 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
path_data->opm |= lpm;
continue; /* no error */
}
+ /* translate path mask to position in mask */
+ pos = 8 - ffs(lpm);
+ kfree(private->path_conf_data[pos]);
+ if ((__u8 *)private->path_conf_data[pos] ==
+ private->conf_data) {
+ private->conf_data = NULL;
+ private->conf_len = 0;
+ conf_data_saved = 0;
+ }
+ private->path_conf_data[pos] =
+ (struct dasd_conf_data *) conf_data;
/* save first valid configuration data */
if (!conf_data_saved) {
kfree(private->conf_data);
@@ -1095,7 +1106,6 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
kfree(conf_data);
continue;
}
-
if (dasd_eckd_compare_path_uid(
device, &path_private)) {
uid = &path_private.uid;
@@ -1157,9 +1167,6 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
path_data->cablepm &= ~lpm;
path_data->hpfpm &= ~lpm;
path_data->cuirpm &= ~lpm;
-
- if (conf_data != private->conf_data)
- kfree(conf_data);
}
return path_err;
@@ -1259,7 +1266,11 @@ static void do_path_verification_work(struct work_struct *work)
schedule_work(work);
return;
}
-
+ /* check if path verification already running and delay if so */
+ if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
+ schedule_work(work);
+ return;
+ }
opm = 0;
npm = 0;
ppm = 0;
@@ -1402,7 +1413,7 @@ static void do_path_verification_work(struct work_struct *work)
device->path_data.hpfpm |= hpfpm;
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
-
+ clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
dasd_put_device(device);
if (data->isglobal)
mutex_unlock(&dasd_path_verification_mutex);
@@ -1810,6 +1821,7 @@ out_err1:
static void dasd_eckd_uncheck_device(struct dasd_device *device)
{
struct dasd_eckd_private *private;
+ int i;
private = (struct dasd_eckd_private *) device->private;
dasd_alias_disconnect_device_from_lcu(device);
@@ -1818,6 +1830,15 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
private->vdsneq = NULL;
private->gneq = NULL;
private->conf_len = 0;
+ for (i = 0; i < 8; i++) {
+ kfree(private->path_conf_data[i]);
+ if ((__u8 *)private->path_conf_data[i] ==
+ private->conf_data) {
+ private->conf_data = NULL;
+ private->conf_len = 0;
+ }
+ private->path_conf_data[i] = NULL;
+ }
kfree(private->conf_data);
private->conf_data = NULL;
}
@@ -3968,7 +3989,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
rc = -EFAULT;
if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
goto out;
- if (is_compat_task() || sizeof(long) == 4) {
+ if (is_compat_task()) {
/* Make sure pointers are sane even on 31 bit. */
rc = -EINVAL;
if ((usrparm.psf_data >> 32) != 0)
@@ -4525,12 +4546,13 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device,
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
- cqr->retries = 256;
cqr->expires = 10 * HZ;
-
- /* we need to check for messages on exactly this path */
set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
- cqr->lpm = lpum;
+ /* dasd_sleep_on_immediatly does not do complex error
+ * recovery so clear erp flag and set retry counter to
+ * do basic erp */
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ cqr->retries = 256;
/* Prepare for Read Subsystem Data */
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
@@ -4605,10 +4627,10 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
psf_cuir->message_id = message_id;
psf_cuir->cssid = sch_id.cssid;
psf_cuir->ssid = sch_id.ssid;
-
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->cda = (__u32)(addr_t)psf_cuir;
+ ccw->flags = CCW_FLAG_SLI;
ccw->count = sizeof(struct dasd_psf_cuir_response);
cqr->startdev = device;
@@ -4618,6 +4640,7 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
cqr->expires = 10*HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
+ set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
rc = dasd_sleep_on(cqr);
@@ -4625,118 +4648,252 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
return rc;
}
-static int dasd_eckd_cuir_change_state(struct dasd_device *device, __u8 lpum)
+/*
+ * return configuration data that is referenced by record selector
+ * if a record selector is specified or per default return the
+ * conf_data pointer for the path specified by lpum
+ */
+static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
+ __u8 lpum,
+ struct dasd_cuir_message *cuir)
{
- unsigned long flags;
- __u8 tbcpm;
+ struct dasd_eckd_private *private;
+ struct dasd_conf_data *conf_data;
+ int path, pos;
- spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- tbcpm = device->path_data.opm & ~lpum;
- if (tbcpm) {
- device->path_data.opm = tbcpm;
- device->path_data.cuirpm |= lpum;
+ private = (struct dasd_eckd_private *) device->private;
+ if (cuir->record_selector == 0)
+ goto out;
+ for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
+ conf_data = private->path_conf_data[pos];
+ if (conf_data->gneq.record_selector ==
+ cuir->record_selector)
+ return conf_data;
+ }
+out:
+ return private->path_conf_data[8 - ffs(lpum)];
+}
+
+/*
+ * This function determines the scope of a reconfiguration request by
+ * analysing the path and device selection data provided in the CUIR request.
+ * Returns a path mask containing CUIR affected paths for the give device.
+ *
+ * If the CUIR request does not contain the required information return the
+ * path mask of the path the attention message for the CUIR request was reveived
+ * on.
+ */
+static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
+ struct dasd_cuir_message *cuir)
+{
+ struct dasd_conf_data *ref_conf_data;
+ unsigned long bitmask = 0, mask = 0;
+ struct dasd_eckd_private *private;
+ struct dasd_conf_data *conf_data;
+ unsigned int pos, path;
+ char *ref_gneq, *gneq;
+ char *ref_ned, *ned;
+ int tbcpm = 0;
+
+ /* if CUIR request does not specify the scope use the path
+ the attention message was presented on */
+ if (!cuir->ned_map ||
+ !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
+ return lpum;
+
+ private = (struct dasd_eckd_private *) device->private;
+ /* get reference conf data */
+ ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
+ /* reference ned is determined by ned_map field */
+ pos = 8 - ffs(cuir->ned_map);
+ ref_ned = (char *)&ref_conf_data->neds[pos];
+ ref_gneq = (char *)&ref_conf_data->gneq;
+ /* transfer 24 bit neq_map to mask */
+ mask = cuir->neq_map[2];
+ mask |= cuir->neq_map[1] << 8;
+ mask |= cuir->neq_map[0] << 16;
+
+ for (path = 0x80; path; path >>= 1) {
+ /* initialise data per path */
+ bitmask = mask;
+ pos = 8 - ffs(path);
+ conf_data = private->path_conf_data[pos];
+ pos = 8 - ffs(cuir->ned_map);
+ ned = (char *) &conf_data->neds[pos];
+ /* compare reference ned and per path ned */
+ if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
+ continue;
+ gneq = (char *)&conf_data->gneq;
+ /* compare reference gneq and per_path gneq under
+ 24 bit mask where mask bit 0 equals byte 7 of
+ the gneq and mask bit 24 equals byte 31 */
+ while (bitmask) {
+ pos = ffs(bitmask) - 1;
+ if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
+ != 0)
+ break;
+ clear_bit(pos, &bitmask);
+ }
+ if (bitmask)
+ continue;
+ /* device and path match the reference values
+ add path to CUIR scope */
+ tbcpm |= path;
+ }
+ return tbcpm;
+}
+
+static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
+ unsigned long paths,
+ struct subchannel_id sch_id, int action)
+{
+ struct channel_path_desc *desc;
+ int pos;
+
+ while (paths) {
+ /* get position of bit in mask */
+ pos = ffs(paths) - 1;
+ /* get channel path descriptor from this position */
+ desc = ccw_device_get_chp_desc(device->cdev, 7 - pos);
+ if (action == CUIR_QUIESCE)
+ pr_warn("Service on the storage server caused path "
+ "%x.%02x to go offline", sch_id.cssid,
+ desc ? desc->chpid : 0);
+ else if (action == CUIR_RESUME)
+ pr_info("Path %x.%02x is back online after service "
+ "on the storage server", sch_id.cssid,
+ desc ? desc->chpid : 0);
+ kfree(desc);
+ clear_bit(pos, &paths);
}
- spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
- return tbcpm ? 0 : PSF_CUIR_LAST_PATH;
+}
+
+static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
+ struct dasd_cuir_message *cuir)
+{
+ unsigned long tbcpm;
+
+ tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
+ /* nothing to do if path is not in use */
+ if (!(device->path_data.opm & tbcpm))
+ return 0;
+ if (!(device->path_data.opm & ~tbcpm)) {
+ /* no path would be left if the CUIR action is taken
+ return error */
+ return -EINVAL;
+ }
+ /* remove device from operational path mask */
+ device->path_data.opm &= ~tbcpm;
+ device->path_data.cuirpm |= tbcpm;
+ return tbcpm;
}
/*
- * walk through all devices and quiesce them
- * if it is the last path return error
+ * walk through all devices and build a path mask to quiesce them
+ * return an error if the last path to a device would be removed
*
* if only part of the devices are quiesced and an error
* occurs no onlining necessary, the storage server will
* notify the already set offline devices again
*/
static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
- struct channel_path_desc *desc,
- struct subchannel_id sch_id)
+ struct subchannel_id sch_id,
+ struct dasd_cuir_message *cuir)
{
struct alias_pav_group *pavgroup, *tempgroup;
struct dasd_eckd_private *private;
struct dasd_device *dev, *n;
- int rc;
+ unsigned long paths = 0;
+ unsigned long flags;
+ int tbcpm;
private = (struct dasd_eckd_private *) device->private;
- rc = 0;
-
/* active devices */
- list_for_each_entry_safe(dev, n,
- &private->lcu->active_devices,
+ list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
alias_list) {
- rc = dasd_eckd_cuir_change_state(dev, lpum);
- if (rc)
- goto out;
+ spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
+ tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
+ spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
+ if (tbcpm < 0)
+ goto out_err;
+ paths |= tbcpm;
}
-
/* inactive devices */
- list_for_each_entry_safe(dev, n,
- &private->lcu->inactive_devices,
+ list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
alias_list) {
- rc = dasd_eckd_cuir_change_state(dev, lpum);
- if (rc)
- goto out;
+ spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
+ tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
+ spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
+ if (tbcpm < 0)
+ goto out_err;
+ paths |= tbcpm;
}
-
/* devices in PAV groups */
list_for_each_entry_safe(pavgroup, tempgroup,
&private->lcu->grouplist, group) {
list_for_each_entry_safe(dev, n, &pavgroup->baselist,
alias_list) {
- rc = dasd_eckd_cuir_change_state(dev, lpum);
- if (rc)
- goto out;
+ spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
+ tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
+ spin_unlock_irqrestore(
+ get_ccwdev_lock(dev->cdev), flags);
+ if (tbcpm < 0)
+ goto out_err;
+ paths |= tbcpm;
}
list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
alias_list) {
- rc = dasd_eckd_cuir_change_state(dev, lpum);
- if (rc)
- goto out;
+ spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
+ tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
+ spin_unlock_irqrestore(
+ get_ccwdev_lock(dev->cdev), flags);
+ if (tbcpm < 0)
+ goto out_err;
+ paths |= tbcpm;
}
}
-
- pr_warn("Service on the storage server caused path %x.%02x to go offline",
- sch_id.cssid, desc ? desc->chpid : 0);
- rc = PSF_CUIR_COMPLETED;
-out:
- return rc;
+ /* notify user about all paths affected by CUIR action */
+ dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_QUIESCE);
+ return 0;
+out_err:
+ return tbcpm;
}
static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
- struct channel_path_desc *desc,
- struct subchannel_id sch_id)
+ struct subchannel_id sch_id,
+ struct dasd_cuir_message *cuir)
{
struct alias_pav_group *pavgroup, *tempgroup;
struct dasd_eckd_private *private;
struct dasd_device *dev, *n;
+ unsigned long paths = 0;
+ int tbcpm;
- pr_info("Path %x.%02x is back online after service on the storage server",
- sch_id.cssid, desc ? desc->chpid : 0);
private = (struct dasd_eckd_private *) device->private;
-
/*
* the path may have been added through a generic path event before
* only trigger path verification if the path is not already in use
*/
-
list_for_each_entry_safe(dev, n,
&private->lcu->active_devices,
alias_list) {
- if (!(dev->path_data.opm & lpum)) {
- dev->path_data.tbvpm |= lpum;
+ tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
+ paths |= tbcpm;
+ if (!(dev->path_data.opm & tbcpm)) {
+ dev->path_data.tbvpm |= tbcpm;
dasd_schedule_device_bh(dev);
}
}
-
list_for_each_entry_safe(dev, n,
&private->lcu->inactive_devices,
alias_list) {
- if (!(dev->path_data.opm & lpum)) {
- dev->path_data.tbvpm |= lpum;
+ tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
+ paths |= tbcpm;
+ if (!(dev->path_data.opm & tbcpm)) {
+ dev->path_data.tbvpm |= tbcpm;
dasd_schedule_device_bh(dev);
}
}
-
/* devices in PAV groups */
list_for_each_entry_safe(pavgroup, tempgroup,
&private->lcu->grouplist,
@@ -4744,21 +4901,27 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
list_for_each_entry_safe(dev, n,
&pavgroup->baselist,
alias_list) {
- if (!(dev->path_data.opm & lpum)) {
- dev->path_data.tbvpm |= lpum;
+ tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
+ paths |= tbcpm;
+ if (!(dev->path_data.opm & tbcpm)) {
+ dev->path_data.tbvpm |= tbcpm;
dasd_schedule_device_bh(dev);
}
}
list_for_each_entry_safe(dev, n,
&pavgroup->aliaslist,
alias_list) {
- if (!(dev->path_data.opm & lpum)) {
- dev->path_data.tbvpm |= lpum;
+ tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
+ paths |= tbcpm;
+ if (!(dev->path_data.opm & tbcpm)) {
+ dev->path_data.tbvpm |= tbcpm;
dasd_schedule_device_bh(dev);
}
}
}
- return PSF_CUIR_COMPLETED;
+ /* notify user about all paths affected by CUIR action */
+ dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_RESUME);
+ return 0;
}
static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
@@ -4768,8 +4931,12 @@ static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
struct channel_path_desc *desc;
struct subchannel_id sch_id;
int pos, response;
- ccw_device_get_schid(device->cdev, &sch_id);
+ DBF_DEV_EVENT(DBF_WARNING, device,
+ "CUIR request: %016llx %016llx %016llx %08x",
+ ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
+ ((u32 *)cuir)[3]);
+ ccw_device_get_schid(device->cdev, &sch_id);
/* get position of path in mask */
pos = 8 - ffs(lpum);
/* get channel path descriptor from this position */
@@ -4777,18 +4944,26 @@ static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
if (cuir->code == CUIR_QUIESCE) {
/* quiesce */
- response = dasd_eckd_cuir_quiesce(device, lpum, desc, sch_id);
+ if (dasd_eckd_cuir_quiesce(device, lpum, sch_id, cuir))
+ response = PSF_CUIR_LAST_PATH;
+ else
+ response = PSF_CUIR_COMPLETED;
} else if (cuir->code == CUIR_RESUME) {
/* resume */
- response = dasd_eckd_cuir_resume(device, lpum, desc, sch_id);
+ dasd_eckd_cuir_resume(device, lpum, sch_id, cuir);
+ response = PSF_CUIR_COMPLETED;
} else
response = PSF_CUIR_NOT_SUPPORTED;
- dasd_eckd_psf_cuir_response(device, response, cuir->message_id,
- desc, sch_id);
-
+ dasd_eckd_psf_cuir_response(device, response,
+ cuir->message_id, desc, sch_id);
+ DBF_DEV_EVENT(DBF_WARNING, device,
+ "CUIR response: %d on message ID %08x", response,
+ cuir->message_id);
/* free descriptor copy */
kfree(desc);
+ /* to make sure there is no attention left schedule work again */
+ device->discipline->check_attention(device, lpum);
}
static void dasd_eckd_check_attention_work(struct work_struct *work)
@@ -4800,22 +4975,18 @@ static void dasd_eckd_check_attention_work(struct work_struct *work)
data = container_of(work, struct check_attention_work_data, worker);
device = data->device;
-
messages = kzalloc(sizeof(*messages), GFP_KERNEL);
if (!messages) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate attention message buffer");
goto out;
}
-
rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
if (rc)
goto out;
-
if (messages->length == ATTENTION_LENGTH_CUIR &&
messages->format == ATTENTION_FORMAT_CUIR)
dasd_eckd_handle_cuir(device, messages, data->lpum);
-
out:
dasd_put_device(device);
kfree(messages);
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index ddab7df36e25..f8f91ee652d3 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -355,7 +355,8 @@ struct dasd_gneq {
__u8 identifier:2;
__u8 reserved:6;
} __attribute__ ((packed)) flags;
- __u8 reserved[5];
+ __u8 record_selector;
+ __u8 reserved[4];
struct {
__u8 value:2;
__u8 number:6;
@@ -492,10 +493,18 @@ struct alias_pav_group {
struct dasd_device *next;
};
+struct dasd_conf_data {
+ struct dasd_ned neds[5];
+ u8 reserved[64];
+ struct dasd_gneq gneq;
+} __packed;
+
struct dasd_eckd_private {
struct dasd_eckd_characteristics rdc_data;
u8 *conf_data;
int conf_len;
+ /* per path configuration data */
+ struct dasd_conf_data *path_conf_data[8];
/* pointers to specific parts in the conf_data */
struct dasd_ned *ned;
struct dasd_sneq *sneq;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 227e3dea3155..4aed5ed70836 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -534,6 +534,7 @@ struct dasd_attention_data {
#define DASD_FLAG_SAFE_OFFLINE 10 /* safe offline processing requested*/
#define DASD_FLAG_SAFE_OFFLINE_RUNNING 11 /* safe offline running */
#define DASD_FLAG_ABORTALL 12 /* Abort all noretry requests */
+#define DASD_FLAG_PATH_VERIFY 13 /* Path verification worker running */
#define DASD_SLEEPON_START_TAG ((void *) 1)
#define DASD_SLEEPON_END_TAG ((void *) 2)
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index da212813f2d5..5ed44fe21380 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -29,7 +29,7 @@ static int dcssblk_open(struct block_device *bdev, fmode_t mode);
static void dcssblk_release(struct gendisk *disk, fmode_t mode);
static void dcssblk_make_request(struct request_queue *q, struct bio *bio);
static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
- void **kaddr, unsigned long *pfn, long size);
+ void __pmem **kaddr, unsigned long *pfn);
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
@@ -548,10 +548,10 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
*/
num_of_segments = 0;
for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) {
- for (j = i; (buf[j] != ':') &&
+ for (j = i; j < count &&
+ (buf[j] != ':') &&
(buf[j] != '\0') &&
- (buf[j] != '\n') &&
- j < count; j++) {
+ (buf[j] != '\n'); j++) {
local_buf[j-i] = toupper(buf[j]);
}
local_buf[j-i] = '\0';
@@ -723,7 +723,7 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
/*
* parse input
*/
- for (i = 0; ((*(buf+i)!='\0') && (*(buf+i)!='\n') && i < count); i++) {
+ for (i = 0; (i < count && (*(buf+i)!='\0') && (*(buf+i)!='\n')); i++) {
local_buf[i] = toupper(buf[i]);
}
local_buf[i] = '\0';
@@ -826,6 +826,8 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
unsigned long source_addr;
unsigned long bytes_done;
+ blk_queue_split(q, &bio, q->bio_split);
+
bytes_done = 0;
dev_info = bio->bi_bdev->bd_disk->private_data;
if (dev_info == NULL)
@@ -871,7 +873,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
}
bytes_done += bvec.bv_len;
}
- bio_endio(bio, 0);
+ bio_endio(bio);
return;
fail:
bio_io_error(bio);
@@ -879,18 +881,20 @@ fail:
static long
dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
- void **kaddr, unsigned long *pfn, long size)
+ void __pmem **kaddr, unsigned long *pfn)
{
struct dcssblk_dev_info *dev_info;
unsigned long offset, dev_sz;
+ void *addr;
dev_info = bdev->bd_disk->private_data;
if (!dev_info)
return -ENODEV;
dev_sz = dev_info->end - dev_info->start;
offset = secnum * 512;
- *kaddr = (void *) (dev_info->start + offset);
- *pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT;
+ addr = (void *) (dev_info->start + offset);
+ *pfn = virt_to_phys(addr) >> PAGE_SHIFT;
+ *kaddr = (void __pmem *) addr;
return dev_sz - offset;
}
@@ -904,10 +908,10 @@ dcssblk_check_params(void)
for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0');
i++) {
- for (j = i; (dcssblk_segments[j] != ',') &&
+ for (j = i; (j < DCSSBLK_PARM_LEN) &&
+ (dcssblk_segments[j] != ',') &&
(dcssblk_segments[j] != '\0') &&
- (dcssblk_segments[j] != '(') &&
- (j < DCSSBLK_PARM_LEN); j++)
+ (dcssblk_segments[j] != '('); j++)
{
buf[j-i] = dcssblk_segments[j];
}
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 7d4e9397ac31..02871f1db562 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -190,6 +190,8 @@ static void xpram_make_request(struct request_queue *q, struct bio *bio)
unsigned long page_addr;
unsigned long bytes;
+ blk_queue_split(q, &bio, q->bio_split);
+
if ((bio->bi_iter.bi_sector & 7) != 0 ||
(bio->bi_iter.bi_size & 4095) != 0)
/* Request is not page-aligned. */
@@ -220,8 +222,7 @@ static void xpram_make_request(struct request_queue *q, struct bio *bio)
index++;
}
}
- set_bit(BIO_UPTODATE, &bio->bi_flags);
- bio_endio(bio, 0);
+ bio_endio(bio);
return;
fail:
bio_io_error(bio);
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 75ffe9980c3e..7c511add5aa7 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -413,6 +413,10 @@ con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
else
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.cmd.count;
+ } else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
+ /* Interrupt without an outstanding request -> update all */
+ cp->update_flags = CON_UPDATE_ALL;
+ con3270_set_timer(cp, 1);
}
return RAW3270_IO_DONE;
}
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
index 8de2deb176d7..f7d92584b993 100644
--- a/drivers/s390/char/ctrlchar.c
+++ b/drivers/s390/char/ctrlchar.c
@@ -14,15 +14,21 @@
#include "ctrlchar.h"
#ifdef CONFIG_MAGIC_SYSRQ
-static int ctrlchar_sysrq_key;
+static struct sysrq_work ctrlchar_sysrq;
static void
ctrlchar_handle_sysrq(struct work_struct *work)
{
- handle_sysrq(ctrlchar_sysrq_key);
+ struct sysrq_work *sysrq = container_of(work, struct sysrq_work, work);
+
+ handle_sysrq(sysrq->key);
}
-static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq);
+void schedule_sysrq_work(struct sysrq_work *sw)
+{
+ INIT_WORK(&sw->work, ctrlchar_handle_sysrq);
+ schedule_work(&sw->work);
+}
#endif
@@ -51,8 +57,8 @@ ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
#ifdef CONFIG_MAGIC_SYSRQ
/* racy */
if (len == 3 && buf[1] == '-') {
- ctrlchar_sysrq_key = buf[2];
- schedule_work(&ctrlchar_work);
+ ctrlchar_sysrq.key = buf[2];
+ schedule_sysrq_work(&ctrlchar_sysrq);
return CTRLCHAR_SYSRQ;
}
#endif
diff --git a/drivers/s390/char/ctrlchar.h b/drivers/s390/char/ctrlchar.h
index 1a53552f4981..59c2d6e55e55 100644
--- a/drivers/s390/char/ctrlchar.h
+++ b/drivers/s390/char/ctrlchar.h
@@ -7,6 +7,8 @@
*/
#include <linux/tty.h>
+#include <linux/sysrq.h>
+#include <linux/workqueue.h>
extern unsigned int
ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty);
@@ -17,3 +19,13 @@ ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty);
#define CTRLCHAR_SYSRQ (3 << 8)
#define CTRLCHAR_MASK (~0xffu)
+
+
+#ifdef CONFIG_MAGIC_SYSRQ
+struct sysrq_work {
+ int key;
+ struct work_struct work;
+};
+
+void schedule_sysrq_work(struct sysrq_work *sw);
+#endif
diff --git a/drivers/s390/char/diag_ftp.c b/drivers/s390/char/diag_ftp.c
index 93889632fdf9..12db8db04cdd 100644
--- a/drivers/s390/char/diag_ftp.c
+++ b/drivers/s390/char/diag_ftp.c
@@ -223,7 +223,7 @@ int diag_ftp_startup(void)
if (rc)
return rc;
- ctl_set_bit(0, 63 - 22);
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
return 0;
}
@@ -232,6 +232,6 @@ int diag_ftp_startup(void)
*/
void diag_ftp_shutdown(void)
{
- ctl_clear_bit(0, 63 - 22);
+ irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
unregister_external_irq(EXT_IRQ_CP_SERVICE, diag_ftp_handler);
}
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 0da3ae3cd63b..b7d60306b0bc 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -95,7 +95,7 @@ static void dcss_mkname(char *ascii_name, char *ebcdic_name)
if (ascii_name[i] == '\0')
break;
ebcdic_name[i] = toupper(ascii_name[i]);
- };
+ }
for (; i < 8; i++)
ebcdic_name[i] = ' ';
ASCEBC(ebcdic_name, 8);
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 5e20513c0587..f58bf4c6c3ee 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -53,7 +53,7 @@ static DECLARE_COMPLETION(sclp_request_queue_flushed);
/* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
int sclp_console_pages = SCLP_CONSOLE_PAGES;
/* Flag to indicate if buffer pages are dropped on buffer full condition */
-int sclp_console_drop = 0;
+int sclp_console_drop = 1;
/* Number of times the console dropped buffer pages */
unsigned long sclp_console_full;
@@ -79,8 +79,8 @@ static int __init sclp_setup_console_drop(char *str)
int drop, rc;
rc = kstrtoint(str, 0, &drop);
- if (!rc && drop)
- sclp_console_drop = 1;
+ if (!rc)
+ sclp_console_drop = drop;
return 1;
}
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index e9485fbbb373..806239c2cf2f 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -25,6 +25,7 @@
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/sclp.h>
+#include <asm/numa.h>
#include "sclp.h"
@@ -388,11 +389,11 @@ static struct notifier_block sclp_mem_nb = {
};
static void __init align_to_block_size(unsigned long long *start,
- unsigned long long *size)
+ unsigned long long *size,
+ unsigned long long alignment)
{
- unsigned long long start_align, size_align, alignment;
+ unsigned long long start_align, size_align;
- alignment = memory_block_size_bytes();
start_align = roundup(*start, alignment);
size_align = rounddown(*start + *size, alignment) - start_align;
@@ -404,8 +405,8 @@ static void __init align_to_block_size(unsigned long long *start,
static void __init add_memory_merged(u16 rn)
{
+ unsigned long long start, size, addr, block_size;
static u16 first_rn, num;
- unsigned long long start, size;
if (rn && first_rn && (first_rn + num == rn)) {
num++;
@@ -423,9 +424,12 @@ static void __init add_memory_merged(u16 rn)
goto skip_add;
if (memory_end_set && (start + size > memory_end))
size = memory_end - start;
- align_to_block_size(&start, &size);
- if (size)
- add_memory(0, start, size);
+ block_size = memory_block_size_bytes();
+ align_to_block_size(&start, &size, block_size);
+ if (!size)
+ goto skip_add;
+ for (addr = start; addr < start + size; addr += block_size)
+ add_memory(numa_pfn_to_nid(PFN_DOWN(addr)), addr, block_size);
skip_add:
first_rn = rn;
num = 1;
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index aeed7969fd79..7bc6df3100ef 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -7,6 +7,7 @@
#define KMSG_COMPONENT "sclp_early"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/errno.h>
#include <asm/ctl_reg.h>
#include <asm/sclp.h>
#include <asm/ipl.h>
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index ae67386c03d3..68d6ee7ae504 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -12,6 +12,7 @@
#include <linux/wait.h>
#include <linux/timer.h>
#include <linux/kernel.h>
+#include <linux/sysrq.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
@@ -27,6 +28,7 @@
#include <asm/uaccess.h>
#include "sclp.h"
+#include "ctrlchar.h"
#define SCLP_VT220_MAJOR TTY_MAJOR
#define SCLP_VT220_MINOR 65
@@ -477,6 +479,53 @@ sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
#define SCLP_VT220_SESSION_STARTED 0x80
#define SCLP_VT220_SESSION_DATA 0x00
+#ifdef CONFIG_MAGIC_SYSRQ
+
+static int sysrq_pressed;
+static struct sysrq_work sysrq;
+
+static void sclp_vt220_reset_session(void)
+{
+ sysrq_pressed = 0;
+}
+
+static void sclp_vt220_handle_input(const char *buffer, unsigned int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ /* Handle magic sys request */
+ if (buffer[i] == ('O' ^ 0100)) { /* CTRL-O */
+ /*
+ * If pressed again, reset sysrq_pressed
+ * and flip CTRL-O character
+ */
+ sysrq_pressed = !sysrq_pressed;
+ if (sysrq_pressed)
+ continue;
+ } else if (sysrq_pressed) {
+ sysrq.key = buffer[i];
+ schedule_sysrq_work(&sysrq);
+ sysrq_pressed = 0;
+ continue;
+ }
+ tty_insert_flip_char(&sclp_vt220_port, buffer[i], 0);
+ }
+}
+
+#else
+
+static void sclp_vt220_reset_session(void)
+{
+}
+
+static void sclp_vt220_handle_input(const char *buffer, unsigned int count)
+{
+ tty_insert_flip_string(&sclp_vt220_port, buffer, count);
+}
+
+#endif
+
/*
* Called by the SCLP to report incoming event buffers.
*/
@@ -492,12 +541,13 @@ sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
switch (*buffer) {
case SCLP_VT220_SESSION_ENDED:
case SCLP_VT220_SESSION_STARTED:
+ sclp_vt220_reset_session();
break;
case SCLP_VT220_SESSION_DATA:
/* Send input to line discipline */
buffer++;
count--;
- tty_insert_flip_string(&sclp_vt220_port, buffer, count);
+ sclp_vt220_handle_input(buffer, count);
tty_flip_buffer_push(&sclp_vt220_port);
break;
}
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index e91b89dc6d1f..e96fc7fd9498 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -659,6 +659,10 @@ tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
else
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.cmd.count;
+ } else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
+ /* Interrupt without an outstanding request -> update all */
+ tp->update_flags = TTY_UPDATE_ALL;
+ tty3270_set_timer(tp, 1);
}
return RAW3270_IO_DONE;
}
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index e3bf885f4a6c..548a18916a31 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -21,6 +21,7 @@
#include <asm/chsc.h>
#include <asm/crw.h>
#include <asm/isc.h>
+#include <asm/ebcdic.h>
#include "css.h"
#include "cio.h"
@@ -272,36 +273,6 @@ static void s390_process_res_acc(struct chp_link *link)
css_schedule_reprobe();
}
-static int
-__get_chpid_from_lir(void *data)
-{
- struct lir {
- u8 iq;
- u8 ic;
- u16 sci;
- /* incident-node descriptor */
- u32 indesc[28];
- /* attached-node descriptor */
- u32 andesc[28];
- /* incident-specific information */
- u32 isinfo[28];
- } __attribute__ ((packed)) *lir;
-
- lir = data;
- if (!(lir->iq&0x80))
- /* NULL link incident record */
- return -EINVAL;
- if (!(lir->indesc[0]&0xc0000000))
- /* node descriptor not valid */
- return -EINVAL;
- if (!(lir->indesc[0]&0x10000000))
- /* don't handle device-type nodes - FIXME */
- return -EINVAL;
- /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
-
- return (u16) (lir->indesc[0]&0x000000ff);
-}
-
struct chsc_sei_nt0_area {
u8 flags;
u8 vf; /* validity flags */
@@ -341,22 +312,132 @@ struct chsc_sei {
} u;
} __packed;
+/*
+ * Node Descriptor as defined in SA22-7204, "Common I/O-Device Commands"
+ */
+
+#define ND_VALIDITY_VALID 0
+#define ND_VALIDITY_OUTDATED 1
+#define ND_VALIDITY_INVALID 2
+
+struct node_descriptor {
+ /* Flags. */
+ union {
+ struct {
+ u32 validity:3;
+ u32 reserved:5;
+ } __packed;
+ u8 byte0;
+ } __packed;
+
+ /* Node parameters. */
+ u32 params:24;
+
+ /* Node ID. */
+ char type[6];
+ char model[3];
+ char manufacturer[3];
+ char plant[2];
+ char seq[12];
+ u16 tag;
+} __packed;
+
+/*
+ * Link Incident Record as defined in SA22-7202, "ESCON I/O Interface"
+ */
+
+#define LIR_IQ_CLASS_INFO 0
+#define LIR_IQ_CLASS_DEGRADED 1
+#define LIR_IQ_CLASS_NOT_OPERATIONAL 2
+
+struct lir {
+ struct {
+ u32 null:1;
+ u32 reserved:3;
+ u32 class:2;
+ u32 reserved2:2;
+ } __packed iq;
+ u32 ic:8;
+ u32 reserved:16;
+ struct node_descriptor incident_node;
+ struct node_descriptor attached_node;
+ u8 reserved2[32];
+} __packed;
+
+#define PARAMS_LEN 10 /* PARAMS=xx,xxxxxx */
+#define NODEID_LEN 35 /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
+
+/* Copy EBCIDC text, convert to ASCII and optionally add delimiter. */
+static char *store_ebcdic(char *dest, const char *src, unsigned long len,
+ char delim)
+{
+ memcpy(dest, src, len);
+ EBCASC(dest, len);
+
+ if (delim)
+ dest[len++] = delim;
+
+ return dest + len;
+}
+
+/* Format node ID and parameters for output in LIR log message. */
+static void format_node_data(char *params, char *id, struct node_descriptor *nd)
+{
+ memset(params, 0, PARAMS_LEN);
+ memset(id, 0, NODEID_LEN);
+
+ if (nd->validity != ND_VALIDITY_VALID) {
+ strncpy(params, "n/a", PARAMS_LEN - 1);
+ strncpy(id, "n/a", NODEID_LEN - 1);
+ return;
+ }
+
+ /* PARAMS=xx,xxxxxx */
+ snprintf(params, PARAMS_LEN, "%02x,%06x", nd->byte0, nd->params);
+ /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
+ id = store_ebcdic(id, nd->type, sizeof(nd->type), '/');
+ id = store_ebcdic(id, nd->model, sizeof(nd->model), ',');
+ id = store_ebcdic(id, nd->manufacturer, sizeof(nd->manufacturer), '.');
+ id = store_ebcdic(id, nd->plant, sizeof(nd->plant), 0);
+ id = store_ebcdic(id, nd->seq, sizeof(nd->seq), ',');
+ sprintf(id, "%04X", nd->tag);
+}
+
static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
{
- struct chp_id chpid;
- int id;
+ struct lir *lir = (struct lir *) &sei_area->ccdf;
+ char iuparams[PARAMS_LEN], iunodeid[NODEID_LEN], auparams[PARAMS_LEN],
+ aunodeid[NODEID_LEN];
- CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
- sei_area->rs, sei_area->rsid);
- if (sei_area->rs != 4)
+ CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x, iq=%02x)\n",
+ sei_area->rs, sei_area->rsid, sei_area->ccdf[0]);
+
+ /* Ignore NULL Link Incident Records. */
+ if (lir->iq.null)
return;
- id = __get_chpid_from_lir(sei_area->ccdf);
- if (id < 0)
- CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
- else {
- chp_id_init(&chpid);
- chpid.id = id;
- chsc_chp_offline(chpid);
+
+ /* Inform user that a link requires maintenance actions because it has
+ * become degraded or not operational. Note that this log message is
+ * the primary intention behind a Link Incident Record. */
+
+ format_node_data(iuparams, iunodeid, &lir->incident_node);
+ format_node_data(auparams, aunodeid, &lir->attached_node);
+
+ switch (lir->iq.class) {
+ case LIR_IQ_CLASS_DEGRADED:
+ pr_warn("Link degraded: RS=%02x RSID=%04x IC=%02x "
+ "IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n",
+ sei_area->rs, sei_area->rsid, lir->ic, iuparams,
+ iunodeid, auparams, aunodeid);
+ break;
+ case LIR_IQ_CLASS_NOT_OPERATIONAL:
+ pr_err("Link stopped: RS=%02x RSID=%04x IC=%02x "
+ "IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n",
+ sei_area->rs, sei_area->rsid, lir->ic, iuparams,
+ iunodeid, auparams, aunodeid);
+ break;
+ default:
+ break;
}
}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index f3c417943dad..6acd0b577694 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -540,7 +540,7 @@ int ccw_device_stlck(struct ccw_device *cdev)
if (rc)
goto out_unlock;
/* Perform operation. */
- cdev->private->state = DEV_STATE_STEAL_LOCK,
+ cdev->private->state = DEV_STATE_STEAL_LOCK;
ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
spin_unlock_irq(sch->lock);
/* Wait for operation to finish. */
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index bee8c11cd086..b3f44bc7f644 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -336,7 +336,6 @@ static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
{
struct eadm_private *private;
unsigned long flags;
- int ret = 0;
spin_lock_irqsave(sch->lock, flags);
if (!device_is_registered(&sch->dev))
@@ -356,7 +355,7 @@ static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
out_unlock:
spin_unlock_irqrestore(sch->lock, flags);
- return ret;
+ return 0;
}
static struct css_device_id eadm_subchannel_ids[] = {
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 559a9dcdb15d..d78b3d629d78 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1372,7 +1372,7 @@ static int ap_probe_device_type(struct ap_device *ap_dev)
/* Wait for the test message to complete. */
for (i = 0; i < 6; i++) {
- mdelay(300);
+ msleep(300);
status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
if (status.response_code == AP_RESPONSE_NORMAL &&
psmid == 0x0102030405060708ULL)
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 08f1830cbfc4..4eb45546a3aa 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -54,6 +54,10 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
+static int zcrypt_hwrng_seed = 1;
+module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP);
+MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
+
static DEFINE_SPINLOCK(zcrypt_device_lock);
static LIST_HEAD(zcrypt_device_list);
static int zcrypt_device_count = 0;
@@ -1202,16 +1206,8 @@ static void sprinthx(unsigned char *title, struct seq_file *m,
static void sprinthx4(unsigned char *title, struct seq_file *m,
unsigned int *array, unsigned int len)
{
- int r;
-
seq_printf(m, "\n%s\n", title);
- for (r = 0; r < len; r++) {
- if ((r % 8) == 0)
- seq_printf(m, " ");
- seq_printf(m, "%08X ", array[r]);
- if ((r % 8) == 7)
- seq_putc(m, '\n');
- }
+ seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, array, len, false);
seq_putc(m, '\n');
}
@@ -1373,6 +1369,7 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
static struct hwrng zcrypt_rng_dev = {
.name = "zcrypt",
.data_read = zcrypt_rng_data_read,
+ .quality = 990,
};
static int zcrypt_rng_device_add(void)
@@ -1387,6 +1384,8 @@ static int zcrypt_rng_device_add(void)
goto out;
}
zcrypt_rng_buffer_index = 0;
+ if (!zcrypt_hwrng_seed)
+ zcrypt_rng_dev.quality = 0;
rc = hwrng_register(&zcrypt_rng_dev);
if (rc)
goto out_free;
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 899ffa19f5ec..f41852768953 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -182,7 +182,7 @@ static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev)
/* Wait for the test message to complete. */
for (i = 0; i < 6; i++) {
- mdelay(300);
+ msleep(300);
rc = ap_recv(ap_dev->qid, &psmid, reply, 4096);
if (rc == 0 && psmid == 0x0102030405060708ULL)
break;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 2e65b989a9ea..a8556692f632 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -390,10 +390,8 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
return rc;
}
-static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
+static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
{
- int rc = 0;
-
QETH_DBF_TEXT(SETUP , 2, "stopcard");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
@@ -427,7 +425,6 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
qeth_clear_cmd_buffers(&card->read);
qeth_clear_cmd_buffers(&card->write);
}
- return rc;
}
static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 70eb2f61bb92..a1aaa36e9ebb 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2158,10 +2158,8 @@ static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
return card ;
}
-static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
+static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
{
- int rc = 0;
-
QETH_DBF_TEXT(SETUP, 2, "stopcard");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
@@ -2196,7 +2194,6 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
qeth_clear_cmd_buffers(&card->read);
qeth_clear_cmd_buffers(&card->write);
}
- return rc;
}
/*
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 01a73395a017..c00ac4650dce 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -529,7 +529,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
list_add_tail(&port->list, &adapter->port_list);
write_unlock_irq(&adapter->port_list_lock);
- atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
+ atomic_or(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
return port;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index acde3f5d6e9e..3fb410977014 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -190,7 +190,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
if (!(act_status & ZFCP_STATUS_ERP_NO_REF))
if (scsi_device_get(sdev))
return NULL;
- atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+ atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
&zfcp_sdev->status);
erp_action = &zfcp_sdev->erp_action;
memset(erp_action, 0, sizeof(struct zfcp_erp_action));
@@ -206,7 +206,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
if (!get_device(&port->dev))
return NULL;
zfcp_erp_action_dismiss_port(port);
- atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
+ atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
erp_action = &port->erp_action;
memset(erp_action, 0, sizeof(struct zfcp_erp_action));
erp_action->port = port;
@@ -217,7 +217,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
kref_get(&adapter->ref);
zfcp_erp_action_dismiss_adapter(adapter);
- atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
+ atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
erp_action = &adapter->erp_action;
memset(erp_action, 0, sizeof(struct zfcp_erp_action));
if (!(atomic_read(&adapter->status) &
@@ -254,7 +254,7 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
if (!act)
goto out;
- atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
+ atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
++adapter->erp_total_count;
list_add_tail(&act->list, &adapter->erp_ready_head);
wake_up(&adapter->erp_ready_wq);
@@ -486,14 +486,14 @@ static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
{
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
zfcp_dbf_rec_run("eraubl1", &adapter->erp_action);
- atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
+ atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
}
static void zfcp_erp_port_unblock(struct zfcp_port *port)
{
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
zfcp_dbf_rec_run("erpubl1", &port->erp_action);
- atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
+ atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
}
static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
@@ -502,7 +502,7 @@ static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
- atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
+ atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
}
static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
@@ -642,7 +642,7 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
read_lock_irqsave(&adapter->erp_lock, flags);
if (list_empty(&adapter->erp_ready_head) &&
list_empty(&adapter->erp_running_head)) {
- atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
+ atomic_andnot(ZFCP_STATUS_ADAPTER_ERP_PENDING,
&adapter->status);
wake_up(&adapter->erp_done_wqh);
}
@@ -665,16 +665,16 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
int sleep = 1;
struct zfcp_adapter *adapter = erp_action->adapter;
- atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
+ atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
for (retries = 7; retries; retries--) {
- atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+ atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
&adapter->status);
write_lock_irq(&adapter->erp_lock);
zfcp_erp_action_to_running(erp_action);
write_unlock_irq(&adapter->erp_lock);
if (zfcp_fsf_exchange_config_data(erp_action)) {
- atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+ atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
&adapter->status);
return ZFCP_ERP_FAILED;
}
@@ -692,7 +692,7 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
sleep *= 2;
}
- atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+ atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
&adapter->status);
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK))
@@ -764,7 +764,7 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
/* all ports and LUNs are closed */
zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN);
- atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
+ atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
}
@@ -773,7 +773,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
struct zfcp_adapter *adapter = act->adapter;
if (zfcp_qdio_open(adapter->qdio)) {
- atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
+ atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
&adapter->status);
return ZFCP_ERP_FAILED;
@@ -784,7 +784,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
return ZFCP_ERP_FAILED;
}
- atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &adapter->status);
+ atomic_or(ZFCP_STATUS_COMMON_OPEN, &adapter->status);
return ZFCP_ERP_SUCCEEDED;
}
@@ -948,7 +948,7 @@ static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
- atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED,
+ atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED,
&zfcp_sdev->status);
}
@@ -1187,18 +1187,18 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
switch (erp_action->action) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
- atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+ atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
&zfcp_sdev->status);
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
case ZFCP_ERP_ACTION_REOPEN_PORT:
- atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+ atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
&erp_action->port->status);
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
- atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+ atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
&erp_action->adapter->status);
break;
}
@@ -1422,19 +1422,19 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
unsigned long flags;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
- atomic_set_mask(mask, &adapter->status);
+ atomic_or(mask, &adapter->status);
if (!common_mask)
return;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list)
- atomic_set_mask(common_mask, &port->status);
+ atomic_or(common_mask, &port->status);
read_unlock_irqrestore(&adapter->port_list_lock, flags);
spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host)
- atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+ atomic_or(common_mask, &sdev_to_zfcp(sdev)->status);
spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
}
@@ -1453,7 +1453,7 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
- atomic_clear_mask(mask, &adapter->status);
+ atomic_andnot(mask, &adapter->status);
if (!common_mask)
return;
@@ -1463,7 +1463,7 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list) {
- atomic_clear_mask(common_mask, &port->status);
+ atomic_andnot(common_mask, &port->status);
if (clear_counter)
atomic_set(&port->erp_counter, 0);
}
@@ -1471,7 +1471,7 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host) {
- atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+ atomic_andnot(common_mask, &sdev_to_zfcp(sdev)->status);
if (clear_counter)
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
}
@@ -1491,7 +1491,7 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
unsigned long flags;
- atomic_set_mask(mask, &port->status);
+ atomic_or(mask, &port->status);
if (!common_mask)
return;
@@ -1499,7 +1499,7 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
- atomic_set_mask(common_mask,
+ atomic_or(common_mask,
&sdev_to_zfcp(sdev)->status);
spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
}
@@ -1518,7 +1518,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
unsigned long flags;
- atomic_clear_mask(mask, &port->status);
+ atomic_andnot(mask, &port->status);
if (!common_mask)
return;
@@ -1529,7 +1529,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port) {
- atomic_clear_mask(common_mask,
+ atomic_andnot(common_mask,
&sdev_to_zfcp(sdev)->status);
if (clear_counter)
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
@@ -1546,7 +1546,7 @@ void zfcp_erp_set_lun_status(struct scsi_device *sdev, u32 mask)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
- atomic_set_mask(mask, &zfcp_sdev->status);
+ atomic_or(mask, &zfcp_sdev->status);
}
/**
@@ -1558,7 +1558,7 @@ void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
- atomic_clear_mask(mask, &zfcp_sdev->status);
+ atomic_andnot(mask, &zfcp_sdev->status);
if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
atomic_set(&zfcp_sdev->erp_counter, 0);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 25d49f32ca63..237688af179b 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -508,7 +508,7 @@ static void zfcp_fc_adisc_handler(void *data)
/* port is good, unblock rport without going through erp */
zfcp_scsi_schedule_rport_register(port);
out:
- atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+ atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
put_device(&port->dev);
kmem_cache_free(zfcp_fc_req_cache, fc_req);
}
@@ -564,14 +564,14 @@ void zfcp_fc_link_test_work(struct work_struct *work)
if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
goto out;
- atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+ atomic_or(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
retval = zfcp_fc_adisc(port);
if (retval == 0)
return;
/* send of ADISC was not possible */
- atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+ atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
out:
@@ -640,7 +640,7 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
return;
- atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
+ atomic_andnot(ZFCP_STATUS_COMMON_NOESC, &port->status);
if ((port->supported_classes != 0) ||
!list_empty(&port->unit_list))
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 21ec5e2f584c..522a633c866a 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -114,7 +114,7 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
return;
- atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
+ atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
zfcp_scsi_schedule_rports_block(adapter);
@@ -204,7 +204,7 @@ static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
break;
case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
zfcp_fsf_link_down_info_eval(req, NULL);
- };
+ }
}
static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
@@ -345,7 +345,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
break;
case FSF_PROT_HOST_CONNECTION_INITIALIZING:
- atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+ atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
&adapter->status);
break;
case FSF_PROT_DUPLICATE_REQUEST_ID:
@@ -554,7 +554,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
return;
}
- atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
+ atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
&adapter->status);
break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
@@ -567,7 +567,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
/* avoids adapter shutdown to be able to recognize
* events such as LINK UP */
- atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
+ atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
&adapter->status);
zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info);
@@ -1394,9 +1394,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
break;
case FSF_GOOD:
port->handle = header->port_handle;
- atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
+ atomic_or(ZFCP_STATUS_COMMON_OPEN |
ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
- atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED,
+ atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
&port->status);
/* check whether D_ID has changed during open */
/*
@@ -1677,10 +1677,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
case FSF_PORT_BOXED:
/* can't use generic zfcp_erp_modify_port_status because
* ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
- atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
+ atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
- atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
+ atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
&sdev_to_zfcp(sdev)->status);
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
@@ -1700,10 +1700,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
/* can't use generic zfcp_erp_modify_port_status because
* ZFCP_STATUS_COMMON_OPEN must not be reset for the port
*/
- atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
+ atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
- atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
+ atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
&sdev_to_zfcp(sdev)->status);
break;
}
@@ -1766,7 +1766,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
zfcp_sdev = sdev_to_zfcp(sdev);
- atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
+ atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED |
ZFCP_STATUS_COMMON_ACCESS_BOXED,
&zfcp_sdev->status);
@@ -1822,7 +1822,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
case FSF_GOOD:
zfcp_sdev->lun_handle = header->lun_handle;
- atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
+ atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
break;
}
}
@@ -1913,7 +1913,7 @@ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
}
break;
case FSF_GOOD:
- atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
+ atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
break;
}
}
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 495e1cb3afa6..dbf2b54703f7 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -349,7 +349,7 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
spin_lock_irq(&qdio->req_q_lock);
- atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
+ atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
spin_unlock_irq(&qdio->req_q_lock);
wake_up(&qdio->req_q_wq);
@@ -384,7 +384,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
return -EIO;
- atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
+ atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
&qdio->adapter->status);
zfcp_qdio_setup_init_data(&init_data, qdio);
@@ -396,14 +396,14 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
goto failed_qdio;
if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
- atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
+ atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
&qdio->adapter->status);
if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
- atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
+ atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
} else {
- atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
+ atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
}
@@ -427,7 +427,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
/* set index of first available SBALS / number of available SBALS */
qdio->req_q_idx = 0;
atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
- atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
+ atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
if (adapter->scsi_host) {
adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
@@ -499,6 +499,6 @@ void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
rc = ccw_device_siosl(adapter->ccw_device);
if (!rc)
- atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
+ atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
&adapter->status);
}
diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/virtio/Makefile
index 241891a57caf..241891a57caf 100644
--- a/drivers/s390/kvm/Makefile
+++ b/drivers/s390/virtio/Makefile
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c
index 53fb975c404b..53fb975c404b 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/virtio/kvm_virtio.c
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index f8d8fdb26b72..f8d8fdb26b72 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 82abfce1cb42..a209c3418898 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -299,7 +299,7 @@ NCR_700_detect(struct scsi_host_template *tpnt,
memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
&pScript, GFP_KERNEL);
if(memory == NULL) {
- printk(KERN_ERR "53c700: Failed to allocate memory for driver, detatching\n");
+ printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
return NULL;
}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 456e1567841c..95f7a76cfafc 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -345,6 +345,7 @@ source "drivers/scsi/cxgbi/Kconfig"
source "drivers/scsi/bnx2i/Kconfig"
source "drivers/scsi/bnx2fc/Kconfig"
source "drivers/scsi/be2iscsi/Kconfig"
+source "drivers/scsi/cxlflash/Kconfig"
config SGIWD93_SCSI
tristate "SGI WD93C93 SCSI Driver"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 91209e3d27e3..1a8c9b53fafa 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -102,6 +102,7 @@ obj-$(CONFIG_SCSI_7000FASST) += wd7000.o
obj-$(CONFIG_SCSI_EATA) += eata.o
obj-$(CONFIG_SCSI_DC395x) += dc395x.o
obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o
+obj-$(CONFIG_CXLFLASH) += cxlflash/
obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
@@ -171,6 +172,7 @@ scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
scsi_mod-y += scsi_trace.o scsi_logging.o
scsi_mod-$(CONFIG_PM) += scsi_pm.o
+scsi_mod-$(CONFIG_SCSI_DH) += scsi_dh.o
hv_storvsc-y := storvsc_drv.o
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index cac6b37d7b1b..8086bd0ac9fd 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -888,7 +888,7 @@ static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struc
scb->sense_len = SENSE_SIZE;
scb->cdb_len = cmd->cmd_len;
if (scb->cdb_len >= IMAX_CDB) {
- printk("max cdb length= %x\b", cmd->cmd_len);
+ printk("max cdb length= %x\n", cmd->cmd_len);
scb->cdb_len = IMAX_CDB;
}
scb->ident = (u8)(cmd->device->lun & 0xff) | DISC_ALLOW;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index c4829d84b335..64ab9eaec428 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -79,7 +79,7 @@ struct ahc_hard_error_entry {
static const struct ahc_hard_error_entry ahc_hard_errors[] = {
{ ILLHADDR, "Illegal Host Access" },
- { ILLSADDR, "Illegal Sequencer Address referrenced" },
+ { ILLSADDR, "Illegal Sequencer Address referenced" },
{ ILLOPCODE, "Illegal Opcode in sequencer program" },
{ SQPARERR, "Sequencer Parity Error" },
{ DPARERR, "Data-path Parity Error" },
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 4b135cca42a1..f6c336b05d5b 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -100,15 +100,11 @@ static int asd_map_memio(struct asd_ha_struct *asd_ha)
pci_name(asd_ha->pcidev));
goto Err;
}
- if (io_handle->flags & IORESOURCE_CACHEABLE)
- io_handle->addr = ioremap(io_handle->start,
- io_handle->len);
- else
- io_handle->addr = ioremap_nocache(io_handle->start,
- io_handle->len);
+ io_handle->addr = ioremap(io_handle->start, io_handle->len);
if (!io_handle->addr) {
asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1,
pci_name(asd_ha->pcidev));
+ err = -ENOMEM;
goto Err_unreq;
}
}
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index edb43fda9f36..c831e30411fa 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -983,7 +983,7 @@ static int asd_process_ctrl_a_user(struct asd_ha_struct *asd_ha,
{
int err, i;
u32 offs, size;
- struct asd_ll_el *el;
+ struct asd_ll_el *el = NULL;
struct asd_ctrla_phy_settings *ps;
struct asd_ctrla_phy_settings dflt_ps;
@@ -1004,6 +1004,7 @@ static int asd_process_ctrl_a_user(struct asd_ha_struct *asd_ha,
size = sizeof(struct asd_ctrla_phy_settings);
ps = &dflt_ps;
+ goto out_process;
}
if (size == 0)
@@ -1028,7 +1029,7 @@ static int asd_process_ctrl_a_user(struct asd_ha_struct *asd_ha,
ASD_DPRINTK("couldn't find ctrla phy settings struct\n");
goto out2;
}
-
+out_process:
err = asd_process_ctrla_phy_settings(asd_ha, ps);
if (err) {
ASD_DPRINTK("couldn't process ctrla phy settings\n");
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 914c39f9f388..333db5953607 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -259,10 +259,7 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
addr = (unsigned long)pci_resource_start(pdev, 0);
range = pci_resource_len(pdev, 0);
flags = pci_resource_flags(pdev, 0);
- if (flags & IORESOURCE_CACHEABLE)
- mem_base0 = ioremap(addr, range);
- else
- mem_base0 = ioremap_nocache(addr, range);
+ mem_base0 = ioremap(addr, range);
if (!mem_base0) {
pr_notice("arcmsr%d: memory mapping region fail\n",
acb->host->host_no);
@@ -3264,7 +3261,7 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
reg->doneq_index = 0;
writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell);
if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
- printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
+ printk(KERN_NOTICE "arcmsr%d: cannot set driver mode\n", \
acb->host->host_no);
return 1;
}
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 315d6d6dcfc8..98f7e8cca52d 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -3665,19 +3665,19 @@ bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
if (sfp->state_query_cbfn)
sfp->state_query_cbfn(sfp->state_query_cbarg,
sfp->status);
- sfp->media = NULL;
- }
+ sfp->media = NULL;
+ }
- if (sfp->portspeed) {
- sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
- if (sfp->state_query_cbfn)
- sfp->state_query_cbfn(sfp->state_query_cbarg,
- sfp->status);
- sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
- }
+ if (sfp->portspeed) {
+ sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
+ if (sfp->state_query_cbfn)
+ sfp->state_query_cbfn(sfp->state_query_cbarg,
+ sfp->status);
+ sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
+ }
- sfp->state_query_lock = 0;
- sfp->state_query_cbfn = NULL;
+ sfp->state_query_lock = 0;
+ sfp->state_query_cbfn = NULL;
}
/*
@@ -3878,7 +3878,7 @@ bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
bfa_trc(sfp, sfp->data_valid);
if (sfp->data_valid) {
u32 size = sizeof(struct sfp_mem_s);
- u8 *des = (u8 *) &(sfp->sfpmem);
+ u8 *des = (u8 *)(sfp->sfpmem);
memcpy(des, sfp->dbuf_kva, size);
}
/*
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 7223b0006740..8367c11d554b 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -851,6 +851,8 @@ bfad_im_module_exit(void)
if (bfad_im_scsi_vport_transport_template)
fc_release_transport(bfad_im_scsi_vport_transport_template);
+
+ idr_destroy(&bfad_im_port_index);
}
void
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 98d06d151958..d5cdc4776707 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2051,9 +2051,49 @@ static int bnx2fc_disable(struct net_device *netdev)
return rc;
}
+static uint bnx2fc_npiv_create_vports(struct fc_lport *lport,
+ struct cnic_fc_npiv_tbl *npiv_tbl)
+{
+ struct fc_vport_identifiers vpid;
+ uint i, created = 0;
+
+ if (npiv_tbl->count > MAX_NPIV_ENTRIES) {
+ BNX2FC_HBA_DBG(lport, "Exceeded count max of npiv table\n");
+ goto done;
+ }
+
+ /* Sanity check the first entry to make sure it's not 0 */
+ if (wwn_to_u64(npiv_tbl->wwnn[0]) == 0 &&
+ wwn_to_u64(npiv_tbl->wwpn[0]) == 0) {
+ BNX2FC_HBA_DBG(lport, "First NPIV table entries invalid.\n");
+ goto done;
+ }
+
+ vpid.roles = FC_PORT_ROLE_FCP_INITIATOR;
+ vpid.vport_type = FC_PORTTYPE_NPIV;
+ vpid.disable = false;
+
+ for (i = 0; i < npiv_tbl->count; i++) {
+ vpid.node_name = wwn_to_u64(npiv_tbl->wwnn[i]);
+ vpid.port_name = wwn_to_u64(npiv_tbl->wwpn[i]);
+ scnprintf(vpid.symbolic_name, sizeof(vpid.symbolic_name),
+ "NPIV[%u]:%016llx-%016llx",
+ created, vpid.port_name, vpid.node_name);
+ if (fc_vport_create(lport->host, 0, &vpid))
+ created++;
+ else
+ BNX2FC_HBA_DBG(lport, "Failed to create vport\n");
+ }
+done:
+ return created;
+}
+
static int __bnx2fc_enable(struct fcoe_ctlr *ctlr)
{
struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
+ struct bnx2fc_hba *hba;
+ struct cnic_fc_npiv_tbl npiv_tbl;
+ struct fc_lport *lport;
if (interface->enabled == false) {
if (!ctlr->lp) {
@@ -2064,6 +2104,32 @@ static int __bnx2fc_enable(struct fcoe_ctlr *ctlr)
interface->enabled = true;
}
}
+
+ /* Create static NPIV ports if any are contained in NVRAM */
+ hba = interface->hba;
+ lport = ctlr->lp;
+
+ if (!hba)
+ goto done;
+
+ if (!hba->cnic)
+ goto done;
+
+ if (!lport)
+ goto done;
+
+ if (!lport->host)
+ goto done;
+
+ if (!hba->cnic->get_fc_npiv_tbl)
+ goto done;
+
+ memset(&npiv_tbl, 0, sizeof(npiv_tbl));
+ if (hba->cnic->get_fc_npiv_tbl(hba->cnic, &npiv_tbl))
+ goto done;
+
+ bnx2fc_npiv_create_vports(lport, &npiv_tbl);
+done:
return 0;
}
diff --git a/drivers/scsi/cxlflash/Kconfig b/drivers/scsi/cxlflash/Kconfig
new file mode 100644
index 000000000000..c052104e523e
--- /dev/null
+++ b/drivers/scsi/cxlflash/Kconfig
@@ -0,0 +1,11 @@
+#
+# IBM CXL-attached Flash Accelerator SCSI Driver
+#
+
+config CXLFLASH
+ tristate "Support for IBM CAPI Flash"
+ depends on PCI && SCSI && CXL && EEH
+ default m
+ help
+ Allows CAPI Accelerated IO to Flash
+ If unsure, say N.
diff --git a/drivers/scsi/cxlflash/Makefile b/drivers/scsi/cxlflash/Makefile
new file mode 100644
index 000000000000..9e39866d473b
--- /dev/null
+++ b/drivers/scsi/cxlflash/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CXLFLASH) += cxlflash.o
+cxlflash-y += main.o superpipe.o lunmgt.o vlun.o
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
new file mode 100644
index 000000000000..1c56037146e1
--- /dev/null
+++ b/drivers/scsi/cxlflash/common.h
@@ -0,0 +1,208 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
+ * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2015 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _CXLFLASH_COMMON_H
+#define _CXLFLASH_COMMON_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+
+
+#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */
+
+#define CXLFLASH_BLOCK_SIZE 4096 /* 4K blocks */
+#define CXLFLASH_MAX_XFER_SIZE 16777216 /* 16MB transfer */
+#define CXLFLASH_MAX_SECTORS (CXLFLASH_MAX_XFER_SIZE/512) /* SCSI wants
+ max_sectors
+ in units of
+ 512 byte
+ sectors
+ */
+
+#define NUM_RRQ_ENTRY 16 /* for master issued cmds */
+#define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry))
+
+/* AFU command retry limit */
+#define MC_RETRY_CNT 5 /* sufficient for SCSI check and
+ certain AFU errors */
+
+/* Command management definitions */
+#define CXLFLASH_NUM_CMDS (2 * CXLFLASH_MAX_CMDS) /* Must be a pow2 for
+ alignment and more
+ efficient array
+ index derivation
+ */
+
+#define CXLFLASH_MAX_CMDS 16
+#define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS
+
+
+static inline void check_sizes(void)
+{
+ BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_NUM_CMDS);
+}
+
+/* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */
+#define CMD_BUFSIZE SIZE_4K
+
+/* flags in IOA status area for host use */
+#define B_DONE 0x01
+#define B_ERROR 0x02 /* set with B_DONE */
+#define B_TIMEOUT 0x04 /* set with B_DONE & B_ERROR */
+
+enum cxlflash_lr_state {
+ LINK_RESET_INVALID,
+ LINK_RESET_REQUIRED,
+ LINK_RESET_COMPLETE
+};
+
+enum cxlflash_init_state {
+ INIT_STATE_NONE,
+ INIT_STATE_PCI,
+ INIT_STATE_AFU,
+ INIT_STATE_SCSI
+};
+
+enum cxlflash_state {
+ STATE_NORMAL, /* Normal running state, everything good */
+ STATE_LIMBO, /* Limbo running state, trying to reset/recover */
+ STATE_FAILTERM /* Failed/terminating state, error out users/threads */
+};
+
+/*
+ * Each context has its own set of resource handles that is visible
+ * only from that context.
+ */
+
+struct cxlflash_cfg {
+ struct afu *afu;
+ struct cxl_context *mcctx;
+
+ struct pci_dev *dev;
+ struct pci_device_id *dev_id;
+ struct Scsi_Host *host;
+
+ ulong cxlflash_regs_pci;
+
+ struct work_struct work_q;
+ enum cxlflash_init_state init_state;
+ enum cxlflash_lr_state lr_state;
+ int lr_port;
+
+ struct cxl_afu *cxl_afu;
+
+ struct pci_pool *cxlflash_cmd_pool;
+ struct pci_dev *parent_dev;
+
+ atomic_t recovery_threads;
+ struct mutex ctx_recovery_mutex;
+ struct mutex ctx_tbl_list_mutex;
+ struct ctx_info *ctx_tbl[MAX_CONTEXT];
+ struct list_head ctx_err_recovery; /* contexts w/ recovery pending */
+ struct file_operations cxl_fops;
+
+ atomic_t num_user_contexts;
+
+ /* Parameters that are LUN table related */
+ int last_lun_index[CXLFLASH_NUM_FC_PORTS];
+ int promote_lun_index;
+ struct list_head lluns; /* list of llun_info structs */
+
+ wait_queue_head_t tmf_waitq;
+ bool tmf_active;
+ wait_queue_head_t limbo_waitq;
+ enum cxlflash_state state;
+};
+
+struct afu_cmd {
+ struct sisl_ioarcb rcb; /* IOARCB (cache line aligned) */
+ struct sisl_ioasa sa; /* IOASA must follow IOARCB */
+ spinlock_t slock;
+ struct completion cevent;
+ char *buf; /* per command buffer */
+ struct afu *parent;
+ int slot;
+ atomic_t free;
+
+ u8 cmd_tmf:1;
+
+ /* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned.
+ * However for performance reasons the IOARCB/IOASA should be
+ * cache line aligned.
+ */
+} __aligned(cache_line_size());
+
+struct afu {
+ /* Stuff requiring alignment go first. */
+
+ u64 rrq_entry[NUM_RRQ_ENTRY]; /* 128B RRQ */
+ /*
+ * Command & data for AFU commands.
+ */
+ struct afu_cmd cmd[CXLFLASH_NUM_CMDS];
+
+ /* Beware of alignment till here. Preferably introduce new
+ * fields after this point
+ */
+
+ /* AFU HW */
+ struct cxl_ioctl_start_work work;
+ struct cxlflash_afu_map *afu_map; /* entire MMIO map */
+ struct sisl_host_map *host_map; /* MC host map */
+ struct sisl_ctrl_map *ctrl_map; /* MC control map */
+
+ ctx_hndl_t ctx_hndl; /* master's context handle */
+ u64 *hrrq_start;
+ u64 *hrrq_end;
+ u64 *hrrq_curr;
+ bool toggle;
+ bool read_room;
+ atomic64_t room;
+ u64 hb;
+ u32 cmd_couts; /* Number of command checkouts */
+ u32 internal_lun; /* User-desired LUN mode for this AFU */
+
+ char version[8];
+ u64 interface_version;
+
+ struct cxlflash_cfg *parent; /* Pointer back to parent cxlflash_cfg */
+
+};
+
+static inline u64 lun_to_lunid(u64 lun)
+{
+ u64 lun_id;
+
+ int_to_scsilun(lun, (struct scsi_lun *)&lun_id);
+ return swab64(lun_id);
+}
+
+int cxlflash_send_cmd(struct afu *, struct afu_cmd *);
+void cxlflash_wait_resp(struct afu *, struct afu_cmd *);
+int cxlflash_afu_reset(struct cxlflash_cfg *);
+struct afu_cmd *cxlflash_cmd_checkout(struct afu *);
+void cxlflash_cmd_checkin(struct afu_cmd *);
+int cxlflash_afu_sync(struct afu *, ctx_hndl_t, res_hndl_t, u8);
+void cxlflash_list_init(void);
+void cxlflash_term_global_luns(void);
+void cxlflash_free_errpage(void);
+int cxlflash_ioctl(struct scsi_device *, int, void __user *);
+void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *);
+int cxlflash_mark_contexts_error(struct cxlflash_cfg *);
+void cxlflash_term_local_luns(struct cxlflash_cfg *);
+void cxlflash_restore_luntable(struct cxlflash_cfg *);
+
+#endif /* ifndef _CXLFLASH_COMMON_H */
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
new file mode 100644
index 000000000000..d98ad0ff64c1
--- /dev/null
+++ b/drivers/scsi/cxlflash/lunmgt.c
@@ -0,0 +1,266 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
+ * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2015 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <misc/cxl.h>
+#include <asm/unaligned.h>
+
+#include <scsi/scsi_host.h>
+#include <uapi/scsi/cxlflash_ioctl.h>
+
+#include "sislite.h"
+#include "common.h"
+#include "vlun.h"
+#include "superpipe.h"
+
+/**
+ * create_local() - allocate and initialize a local LUN information structure
+ * @sdev: SCSI device associated with LUN.
+ * @wwid: World Wide Node Name for LUN.
+ *
+ * Return: Allocated local llun_info structure on success, NULL on failure
+ */
+static struct llun_info *create_local(struct scsi_device *sdev, u8 *wwid)
+{
+ struct llun_info *lli = NULL;
+
+ lli = kzalloc(sizeof(*lli), GFP_KERNEL);
+ if (unlikely(!lli)) {
+ pr_err("%s: could not allocate lli\n", __func__);
+ goto out;
+ }
+
+ lli->sdev = sdev;
+ lli->newly_created = true;
+ lli->host_no = sdev->host->host_no;
+ lli->in_table = false;
+
+ memcpy(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN);
+out:
+ return lli;
+}
+
+/**
+ * create_global() - allocate and initialize a global LUN information structure
+ * @sdev: SCSI device associated with LUN.
+ * @wwid: World Wide Node Name for LUN.
+ *
+ * Return: Allocated global glun_info structure on success, NULL on failure
+ */
+static struct glun_info *create_global(struct scsi_device *sdev, u8 *wwid)
+{
+ struct glun_info *gli = NULL;
+
+ gli = kzalloc(sizeof(*gli), GFP_KERNEL);
+ if (unlikely(!gli)) {
+ pr_err("%s: could not allocate gli\n", __func__);
+ goto out;
+ }
+
+ mutex_init(&gli->mutex);
+ memcpy(gli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN);
+out:
+ return gli;
+}
+
+/**
+ * refresh_local() - find and update local LUN information structure by WWID
+ * @cfg: Internal structure associated with the host.
+ * @wwid: WWID associated with LUN.
+ *
+ * When the LUN is found, mark it by updating it's newly_created field.
+ *
+ * Return: Found local lun_info structure on success, NULL on failure
+ * If a LUN with the WWID is found in the list, refresh it's state.
+ */
+static struct llun_info *refresh_local(struct cxlflash_cfg *cfg, u8 *wwid)
+{
+ struct llun_info *lli, *temp;
+
+ list_for_each_entry_safe(lli, temp, &cfg->lluns, list)
+ if (!memcmp(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN)) {
+ lli->newly_created = false;
+ return lli;
+ }
+
+ return NULL;
+}
+
+/**
+ * lookup_global() - find a global LUN information structure by WWID
+ * @wwid: WWID associated with LUN.
+ *
+ * Return: Found global lun_info structure on success, NULL on failure
+ */
+static struct glun_info *lookup_global(u8 *wwid)
+{
+ struct glun_info *gli, *temp;
+
+ list_for_each_entry_safe(gli, temp, &global.gluns, list)
+ if (!memcmp(gli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN))
+ return gli;
+
+ return NULL;
+}
+
+/**
+ * find_and_create_lun() - find or create a local LUN information structure
+ * @sdev: SCSI device associated with LUN.
+ * @wwid: WWID associated with LUN.
+ *
+ * The LUN is kept both in a local list (per adapter) and in a global list
+ * (across all adapters). Certain attributes of the LUN are local to the
+ * adapter (such as index, port selection mask etc.).
+ * The block allocation map is shared across all adapters (i.e. associated
+ * wih the global list). Since different attributes are associated with
+ * the per adapter and global entries, allocate two separate structures for each
+ * LUN (one local, one global).
+ *
+ * Keep a pointer back from the local to the global entry.
+ *
+ * Return: Found/Allocated local lun_info structure on success, NULL on failure
+ */
+static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
+{
+ struct llun_info *lli = NULL;
+ struct glun_info *gli = NULL;
+ struct Scsi_Host *shost = sdev->host;
+ struct cxlflash_cfg *cfg = shost_priv(shost);
+
+ mutex_lock(&global.mutex);
+ if (unlikely(!wwid))
+ goto out;
+
+ lli = refresh_local(cfg, wwid);
+ if (lli)
+ goto out;
+
+ lli = create_local(sdev, wwid);
+ if (unlikely(!lli))
+ goto out;
+
+ gli = lookup_global(wwid);
+ if (gli) {
+ lli->parent = gli;
+ list_add(&lli->list, &cfg->lluns);
+ goto out;
+ }
+
+ gli = create_global(sdev, wwid);
+ if (unlikely(!gli)) {
+ kfree(lli);
+ lli = NULL;
+ goto out;
+ }
+
+ lli->parent = gli;
+ list_add(&lli->list, &cfg->lluns);
+
+ list_add(&gli->list, &global.gluns);
+
+out:
+ mutex_unlock(&global.mutex);
+ pr_debug("%s: returning %p\n", __func__, lli);
+ return lli;
+}
+
+/**
+ * cxlflash_term_local_luns() - Delete all entries from local LUN list, free.
+ * @cfg: Internal structure associated with the host.
+ */
+void cxlflash_term_local_luns(struct cxlflash_cfg *cfg)
+{
+ struct llun_info *lli, *temp;
+
+ mutex_lock(&global.mutex);
+ list_for_each_entry_safe(lli, temp, &cfg->lluns, list) {
+ list_del(&lli->list);
+ kfree(lli);
+ }
+ mutex_unlock(&global.mutex);
+}
+
+/**
+ * cxlflash_list_init() - initializes the global LUN list
+ */
+void cxlflash_list_init(void)
+{
+ INIT_LIST_HEAD(&global.gluns);
+ mutex_init(&global.mutex);
+ global.err_page = NULL;
+}
+
+/**
+ * cxlflash_term_global_luns() - frees resources associated with global LUN list
+ */
+void cxlflash_term_global_luns(void)
+{
+ struct glun_info *gli, *temp;
+
+ mutex_lock(&global.mutex);
+ list_for_each_entry_safe(gli, temp, &global.gluns, list) {
+ list_del(&gli->list);
+ cxlflash_ba_terminate(&gli->blka.ba_lun);
+ kfree(gli);
+ }
+ mutex_unlock(&global.mutex);
+}
+
+/**
+ * cxlflash_manage_lun() - handles LUN management activities
+ * @sdev: SCSI device associated with LUN.
+ * @manage: Manage ioctl data structure.
+ *
+ * This routine is used to notify the driver about a LUN's WWID and associate
+ * SCSI devices (sdev) with a global LUN instance. Additionally it serves to
+ * change a LUN's operating mode: legacy or superpipe.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int cxlflash_manage_lun(struct scsi_device *sdev,
+ struct dk_cxlflash_manage_lun *manage)
+{
+ int rc = 0;
+ struct llun_info *lli = NULL;
+ u64 flags = manage->hdr.flags;
+ u32 chan = sdev->channel;
+
+ lli = find_and_create_lun(sdev, manage->wwid);
+ pr_debug("%s: ENTER: WWID = %016llX%016llX, flags = %016llX li = %p\n",
+ __func__, get_unaligned_le64(&manage->wwid[0]),
+ get_unaligned_le64(&manage->wwid[8]),
+ manage->hdr.flags, lli);
+ if (unlikely(!lli)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (flags & DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE) {
+ if (lli->newly_created)
+ lli->port_sel = CHAN2PORT(chan);
+ else
+ lli->port_sel = BOTH_PORTS;
+ /* Store off lun in unpacked, AFU-friendly format */
+ lli->lun_id[chan] = lun_to_lunid(sdev->lun);
+ sdev->hostdata = lli;
+ } else if (flags & DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE) {
+ if (lli->parent->mode != MODE_NONE)
+ rc = -EBUSY;
+ else
+ sdev->hostdata = NULL;
+ }
+
+out:
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
new file mode 100644
index 000000000000..3e3ccf16e7c2
--- /dev/null
+++ b/drivers/scsi/cxlflash/main.c
@@ -0,0 +1,2494 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
+ * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2015 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <asm/unaligned.h>
+
+#include <misc/cxl.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <uapi/scsi/cxlflash_ioctl.h>
+
+#include "main.h"
+#include "sislite.h"
+#include "common.h"
+
+MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
+MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
+MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
+MODULE_LICENSE("GPL");
+
+
+/**
+ * cxlflash_cmd_checkout() - checks out an AFU command
+ * @afu: AFU to checkout from.
+ *
+ * Commands are checked out in a round-robin fashion. Note that since
+ * the command pool is larger than the hardware queue, the majority of
+ * times we will only loop once or twice before getting a command. The
+ * buffer and CDB within the command are initialized (zeroed) prior to
+ * returning.
+ *
+ * Return: The checked out command or NULL when command pool is empty.
+ */
+struct afu_cmd *cxlflash_cmd_checkout(struct afu *afu)
+{
+ int k, dec = CXLFLASH_NUM_CMDS;
+ struct afu_cmd *cmd;
+
+ while (dec--) {
+ k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1));
+
+ cmd = &afu->cmd[k];
+
+ if (!atomic_dec_if_positive(&cmd->free)) {
+ pr_debug("%s: returning found index=%d\n",
+ __func__, cmd->slot);
+ memset(cmd->buf, 0, CMD_BUFSIZE);
+ memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
+ return cmd;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * cxlflash_cmd_checkin() - checks in an AFU command
+ * @cmd: AFU command to checkin.
+ *
+ * Safe to pass commands that have already been checked in. Several
+ * internal tracking fields are reset as part of the checkin. Note
+ * that these are intentionally reset prior to toggling the free bit
+ * to avoid clobbering values in the event that the command is checked
+ * out right away.
+ */
+void cxlflash_cmd_checkin(struct afu_cmd *cmd)
+{
+ cmd->rcb.scp = NULL;
+ cmd->rcb.timeout = 0;
+ cmd->sa.ioasc = 0;
+ cmd->cmd_tmf = false;
+ cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */
+
+ if (unlikely(atomic_inc_return(&cmd->free) != 1)) {
+ pr_err("%s: Freeing cmd (%d) that is not in use!\n",
+ __func__, cmd->slot);
+ return;
+ }
+
+ pr_debug("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
+}
+
+/**
+ * process_cmd_err() - command error handler
+ * @cmd: AFU command that experienced the error.
+ * @scp: SCSI command associated with the AFU command in error.
+ *
+ * Translates error bits from AFU command to SCSI command results.
+ */
+static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
+{
+ struct sisl_ioarcb *ioarcb;
+ struct sisl_ioasa *ioasa;
+
+ if (unlikely(!cmd))
+ return;
+
+ ioarcb = &(cmd->rcb);
+ ioasa = &(cmd->sa);
+
+ if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
+ pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
+ __func__, cmd, scp);
+ scp->result = (DID_ERROR << 16);
+ }
+
+ if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
+ pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
+ __func__, cmd, scp);
+ scp->result = (DID_ERROR << 16);
+ }
+
+ pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d "
+ "afu_extra=0x%X, scsi_entra=0x%X, fc_extra=0x%X\n",
+ __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc,
+ ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra,
+ ioasa->fc_extra);
+
+ if (ioasa->rc.scsi_rc) {
+ /* We have a SCSI status */
+ if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
+ memcpy(scp->sense_buffer, ioasa->sense_data,
+ SISL_SENSE_DATA_LEN);
+ scp->result = ioasa->rc.scsi_rc;
+ } else
+ scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
+ }
+
+ /*
+ * We encountered an error. Set scp->result based on nature
+ * of error.
+ */
+ if (ioasa->rc.fc_rc) {
+ /* We have an FC status */
+ switch (ioasa->rc.fc_rc) {
+ case SISL_FC_RC_LINKDOWN:
+ scp->result = (DID_REQUEUE << 16);
+ break;
+ case SISL_FC_RC_RESID:
+ /* This indicates an FCP resid underrun */
+ if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
+ /* If the SISL_RC_FLAGS_OVERRUN flag was set,
+ * then we will handle this error else where.
+ * If not then we must handle it here.
+ * This is probably an AFU bug. We will
+ * attempt a retry to see if that resolves it.
+ */
+ scp->result = (DID_ERROR << 16);
+ }
+ break;
+ case SISL_FC_RC_RESIDERR:
+ /* Resid mismatch between adapter and device */
+ case SISL_FC_RC_TGTABORT:
+ case SISL_FC_RC_ABORTOK:
+ case SISL_FC_RC_ABORTFAIL:
+ case SISL_FC_RC_NOLOGI:
+ case SISL_FC_RC_ABORTPEND:
+ case SISL_FC_RC_WRABORTPEND:
+ case SISL_FC_RC_NOEXP:
+ case SISL_FC_RC_INUSE:
+ scp->result = (DID_ERROR << 16);
+ break;
+ }
+ }
+
+ if (ioasa->rc.afu_rc) {
+ /* We have an AFU error */
+ switch (ioasa->rc.afu_rc) {
+ case SISL_AFU_RC_NO_CHANNELS:
+ scp->result = (DID_MEDIUM_ERROR << 16);
+ break;
+ case SISL_AFU_RC_DATA_DMA_ERR:
+ switch (ioasa->afu_extra) {
+ case SISL_AFU_DMA_ERR_PAGE_IN:
+ /* Retry */
+ scp->result = (DID_IMM_RETRY << 16);
+ break;
+ case SISL_AFU_DMA_ERR_INVALID_EA:
+ default:
+ scp->result = (DID_ERROR << 16);
+ }
+ break;
+ case SISL_AFU_RC_OUT_OF_DATA_BUFS:
+ /* Retry */
+ scp->result = (DID_ALLOC_FAILURE << 16);
+ break;
+ default:
+ scp->result = (DID_ERROR << 16);
+ }
+ }
+}
+
+/**
+ * cmd_complete() - command completion handler
+ * @cmd: AFU command that has completed.
+ *
+ * Prepares and submits command that has either completed or timed out to
+ * the SCSI stack. Checks AFU command back into command pool for non-internal
+ * (rcb.scp populated) commands.
+ */
+static void cmd_complete(struct afu_cmd *cmd)
+{
+ struct scsi_cmnd *scp;
+ u32 resid;
+ ulong lock_flags;
+ struct afu *afu = cmd->parent;
+ struct cxlflash_cfg *cfg = afu->parent;
+ bool cmd_is_tmf;
+
+ spin_lock_irqsave(&cmd->slock, lock_flags);
+ cmd->sa.host_use_b[0] |= B_DONE;
+ spin_unlock_irqrestore(&cmd->slock, lock_flags);
+
+ if (cmd->rcb.scp) {
+ scp = cmd->rcb.scp;
+ if (unlikely(cmd->sa.rc.afu_rc ||
+ cmd->sa.rc.scsi_rc ||
+ cmd->sa.rc.fc_rc))
+ process_cmd_err(cmd, scp);
+ else
+ scp->result = (DID_OK << 16);
+
+ resid = cmd->sa.resid;
+ cmd_is_tmf = cmd->cmd_tmf;
+ cxlflash_cmd_checkin(cmd); /* Don't use cmd after here */
+
+ pr_debug("%s: calling scsi_set_resid, scp=%p "
+ "result=%X resid=%d\n", __func__,
+ scp, scp->result, resid);
+
+ scsi_set_resid(scp, resid);
+ scsi_dma_unmap(scp);
+ scp->scsi_done(scp);
+
+ if (cmd_is_tmf) {
+ spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+ cfg->tmf_active = false;
+ wake_up_all_locked(&cfg->tmf_waitq);
+ spin_unlock_irqrestore(&cfg->tmf_waitq.lock,
+ lock_flags);
+ }
+ } else
+ complete(&cmd->cevent);
+}
+
+/**
+ * send_tmf() - sends a Task Management Function (TMF)
+ * @afu: AFU to checkout from.
+ * @scp: SCSI command from stack.
+ * @tmfcmd: TMF command to send.
+ *
+ * Return:
+ * 0 on success
+ * SCSI_MLQUEUE_HOST_BUSY when host is busy
+ */
+static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
+{
+ struct afu_cmd *cmd;
+
+ u32 port_sel = scp->device->channel + 1;
+ short lflag = 0;
+ struct Scsi_Host *host = scp->device->host;
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+ ulong lock_flags;
+ int rc = 0;
+
+ cmd = cxlflash_cmd_checkout(afu);
+ if (unlikely(!cmd)) {
+ pr_err("%s: could not get a free command\n", __func__);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ /* If a Task Management Function is active, do not send one more.
+ */
+ spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+ if (cfg->tmf_active)
+ wait_event_interruptible_locked_irq(cfg->tmf_waitq,
+ !cfg->tmf_active);
+ cfg->tmf_active = true;
+ cmd->cmd_tmf = true;
+ spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+
+ cmd->rcb.ctx_id = afu->ctx_hndl;
+ cmd->rcb.port_sel = port_sel;
+ cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
+
+ lflag = SISL_REQ_FLAGS_TMF_CMD;
+
+ cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
+ SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
+
+ /* Stash the scp in the reserved field, for reuse during interrupt */
+ cmd->rcb.scp = scp;
+
+ /* Copy the CDB from the cmd passed in */
+ memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
+
+ /* Send the command */
+ rc = cxlflash_send_cmd(afu, cmd);
+ if (unlikely(rc)) {
+ cxlflash_cmd_checkin(cmd);
+ spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+ cfg->tmf_active = false;
+ spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+ goto out;
+ }
+
+ spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+ wait_event_interruptible_locked_irq(cfg->tmf_waitq, !cfg->tmf_active);
+ spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+out:
+ return rc;
+}
+
+/**
+ * cxlflash_driver_info() - information handler for this host driver
+ * @host: SCSI host associated with device.
+ *
+ * Return: A string describing the device.
+ */
+static const char *cxlflash_driver_info(struct Scsi_Host *host)
+{
+ return CXLFLASH_ADAPTER_NAME;
+}
+
+/**
+ * cxlflash_queuecommand() - sends a mid-layer request
+ * @host: SCSI host associated with device.
+ * @scp: SCSI command to send.
+ *
+ * Return:
+ * 0 on success
+ * SCSI_MLQUEUE_HOST_BUSY when host is busy
+ */
+static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
+{
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+ struct afu *afu = cfg->afu;
+ struct pci_dev *pdev = cfg->dev;
+ struct afu_cmd *cmd;
+ u32 port_sel = scp->device->channel + 1;
+ int nseg, i, ncount;
+ struct scatterlist *sg;
+ ulong lock_flags;
+ short lflag = 0;
+ int rc = 0;
+
+ pr_debug("%s: (scp=%p) %d/%d/%d/%llu cdb=(%08X-%08X-%08X-%08X)\n",
+ __func__, scp, host->host_no, scp->device->channel,
+ scp->device->id, scp->device->lun,
+ get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+
+ /* If a Task Management Function is active, wait for it to complete
+ * before continuing with regular commands.
+ */
+ spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+ if (cfg->tmf_active) {
+ spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+ spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+
+ switch (cfg->state) {
+ case STATE_LIMBO:
+ dev_dbg_ratelimited(&cfg->dev->dev, "%s: device in limbo!\n",
+ __func__);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ case STATE_FAILTERM:
+ dev_dbg_ratelimited(&cfg->dev->dev, "%s: device has failed!\n",
+ __func__);
+ scp->result = (DID_NO_CONNECT << 16);
+ scp->scsi_done(scp);
+ rc = 0;
+ goto out;
+ default:
+ break;
+ }
+
+ cmd = cxlflash_cmd_checkout(afu);
+ if (unlikely(!cmd)) {
+ pr_err("%s: could not get a free command\n", __func__);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ cmd->rcb.ctx_id = afu->ctx_hndl;
+ cmd->rcb.port_sel = port_sel;
+ cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
+
+ if (scp->sc_data_direction == DMA_TO_DEVICE)
+ lflag = SISL_REQ_FLAGS_HOST_WRITE;
+ else
+ lflag = SISL_REQ_FLAGS_HOST_READ;
+
+ cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
+ SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
+
+ /* Stash the scp in the reserved field, for reuse during interrupt */
+ cmd->rcb.scp = scp;
+
+ nseg = scsi_dma_map(scp);
+ if (unlikely(nseg < 0)) {
+ dev_err(&pdev->dev, "%s: Fail DMA map! nseg=%d\n",
+ __func__, nseg);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ ncount = scsi_sg_count(scp);
+ scsi_for_each_sg(scp, sg, ncount, i) {
+ cmd->rcb.data_len = sg_dma_len(sg);
+ cmd->rcb.data_ea = sg_dma_address(sg);
+ }
+
+ /* Copy the CDB from the scsi_cmnd passed in */
+ memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
+
+ /* Send the command */
+ rc = cxlflash_send_cmd(afu, cmd);
+ if (unlikely(rc)) {
+ cxlflash_cmd_checkin(cmd);
+ scsi_dma_unmap(scp);
+ }
+
+out:
+ return rc;
+}
+
+/**
+ * cxlflash_eh_device_reset_handler() - reset a single LUN
+ * @scp: SCSI command to send.
+ *
+ * Return:
+ * SUCCESS as defined in scsi/scsi.h
+ * FAILED as defined in scsi/scsi.h
+ */
+static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
+{
+ int rc = SUCCESS;
+ struct Scsi_Host *host = scp->device->host;
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+ struct afu *afu = cfg->afu;
+ int rcr = 0;
+
+ pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
+ "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
+ host->host_no, scp->device->channel,
+ scp->device->id, scp->device->lun,
+ get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+
+ switch (cfg->state) {
+ case STATE_NORMAL:
+ rcr = send_tmf(afu, scp, TMF_LUN_RESET);
+ if (unlikely(rcr))
+ rc = FAILED;
+ break;
+ case STATE_LIMBO:
+ wait_event(cfg->limbo_waitq, cfg->state != STATE_LIMBO);
+ if (cfg->state == STATE_NORMAL)
+ break;
+ /* fall through */
+ default:
+ rc = FAILED;
+ break;
+ }
+
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * cxlflash_eh_host_reset_handler() - reset the host adapter
+ * @scp: SCSI command from stack identifying host.
+ *
+ * Return:
+ * SUCCESS as defined in scsi/scsi.h
+ * FAILED as defined in scsi/scsi.h
+ */
+static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
+{
+ int rc = SUCCESS;
+ int rcr = 0;
+ struct Scsi_Host *host = scp->device->host;
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+
+ pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
+ "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
+ host->host_no, scp->device->channel,
+ scp->device->id, scp->device->lun,
+ get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+
+ switch (cfg->state) {
+ case STATE_NORMAL:
+ cfg->state = STATE_LIMBO;
+ scsi_block_requests(cfg->host);
+ cxlflash_mark_contexts_error(cfg);
+ rcr = cxlflash_afu_reset(cfg);
+ if (rcr) {
+ rc = FAILED;
+ cfg->state = STATE_FAILTERM;
+ } else
+ cfg->state = STATE_NORMAL;
+ wake_up_all(&cfg->limbo_waitq);
+ scsi_unblock_requests(cfg->host);
+ break;
+ case STATE_LIMBO:
+ wait_event(cfg->limbo_waitq, cfg->state != STATE_LIMBO);
+ if (cfg->state == STATE_NORMAL)
+ break;
+ /* fall through */
+ default:
+ rc = FAILED;
+ break;
+ }
+
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * cxlflash_change_queue_depth() - change the queue depth for the device
+ * @sdev: SCSI device destined for queue depth change.
+ * @qdepth: Requested queue depth value to set.
+ *
+ * The requested queue depth is capped to the maximum supported value.
+ *
+ * Return: The actual queue depth set.
+ */
+static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+
+ if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
+ qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
+
+ scsi_change_queue_depth(sdev, qdepth);
+ return sdev->queue_depth;
+}
+
+/**
+ * cxlflash_show_port_status() - queries and presents the current port status
+ * @dev: Generic device associated with the host owning the port.
+ * @attr: Device attribute representing the port.
+ * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t cxlflash_show_port_status(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+ struct afu *afu = cfg->afu;
+
+ char *disp_status;
+ int rc;
+ u32 port;
+ u64 status;
+ u64 *fc_regs;
+
+ rc = kstrtouint((attr->attr.name + 4), 10, &port);
+ if (rc || (port >= NUM_FC_PORTS))
+ return 0;
+
+ fc_regs = &afu->afu_map->global.fc_regs[port][0];
+ status =
+ (readq_be(&fc_regs[FC_MTIP_STATUS / 8]) & FC_MTIP_STATUS_MASK);
+
+ if (status == FC_MTIP_STATUS_ONLINE)
+ disp_status = "online";
+ else if (status == FC_MTIP_STATUS_OFFLINE)
+ disp_status = "offline";
+ else
+ disp_status = "unknown";
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", disp_status);
+}
+
+/**
+ * cxlflash_show_lun_mode() - presents the current LUN mode of the host
+ * @dev: Generic device associated with the host.
+ * @attr: Device attribute representing the lun mode.
+ * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t cxlflash_show_lun_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+ struct afu *afu = cfg->afu;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
+}
+
+/**
+ * cxlflash_store_lun_mode() - sets the LUN mode of the host
+ * @dev: Generic device associated with the host.
+ * @attr: Device attribute representing the lun mode.
+ * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
+ * @count: Length of data resizing in @buf.
+ *
+ * The CXL Flash AFU supports a dummy LUN mode where the external
+ * links and storage are not required. Space on the FPGA is used
+ * to create 1 or 2 small LUNs which are presented to the system
+ * as if they were a normal storage device. This feature is useful
+ * during development and also provides manufacturing with a way
+ * to test the AFU without an actual device.
+ *
+ * 0 = external LUN[s] (default)
+ * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
+ * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
+ * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
+ * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t cxlflash_store_lun_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+ struct afu *afu = cfg->afu;
+ int rc;
+ u32 lun_mode;
+
+ rc = kstrtouint(buf, 10, &lun_mode);
+ if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
+ afu->internal_lun = lun_mode;
+ cxlflash_afu_reset(cfg);
+ scsi_scan_host(cfg->host);
+ }
+
+ return count;
+}
+
+/**
+ * cxlflash_show_ioctl_version() - presents the current ioctl version of the host
+ * @dev: Generic device associated with the host.
+ * @attr: Device attribute representing the ioctl version.
+ * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t cxlflash_show_ioctl_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
+}
+
+/**
+ * cxlflash_show_dev_mode() - presents the current mode of the device
+ * @dev: Generic device associated with the device.
+ * @attr: Device attribute representing the device mode.
+ * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t cxlflash_show_dev_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ sdev->hostdata ? "superpipe" : "legacy");
+}
+
+/**
+ * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
+ * @cxlflash: Internal structure associated with the host.
+ */
+static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
+{
+ struct pci_dev *pdev = cfg->dev;
+
+ if (pci_channel_offline(pdev))
+ wait_event_timeout(cfg->limbo_waitq,
+ !pci_channel_offline(pdev),
+ CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
+}
+
+/*
+ * Host attributes
+ */
+static DEVICE_ATTR(port0, S_IRUGO, cxlflash_show_port_status, NULL);
+static DEVICE_ATTR(port1, S_IRUGO, cxlflash_show_port_status, NULL);
+static DEVICE_ATTR(lun_mode, S_IRUGO | S_IWUSR, cxlflash_show_lun_mode,
+ cxlflash_store_lun_mode);
+static DEVICE_ATTR(ioctl_version, S_IRUGO, cxlflash_show_ioctl_version, NULL);
+
+static struct device_attribute *cxlflash_host_attrs[] = {
+ &dev_attr_port0,
+ &dev_attr_port1,
+ &dev_attr_lun_mode,
+ &dev_attr_ioctl_version,
+ NULL
+};
+
+/*
+ * Device attributes
+ */
+static DEVICE_ATTR(mode, S_IRUGO, cxlflash_show_dev_mode, NULL);
+
+static struct device_attribute *cxlflash_dev_attrs[] = {
+ &dev_attr_mode,
+ NULL
+};
+
+/*
+ * Host template
+ */
+static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
+ .name = CXLFLASH_ADAPTER_NAME,
+ .info = cxlflash_driver_info,
+ .ioctl = cxlflash_ioctl,
+ .proc_name = CXLFLASH_NAME,
+ .queuecommand = cxlflash_queuecommand,
+ .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
+ .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
+ .change_queue_depth = cxlflash_change_queue_depth,
+ .cmd_per_lun = 16,
+ .can_queue = CXLFLASH_MAX_CMDS,
+ .this_id = -1,
+ .sg_tablesize = SG_NONE, /* No scatter gather support. */
+ .max_sectors = CXLFLASH_MAX_SECTORS,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = cxlflash_host_attrs,
+ .sdev_attrs = cxlflash_dev_attrs,
+};
+
+/*
+ * Device dependent values
+ */
+static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
+
+/*
+ * PCI device binding table
+ */
+static struct pci_device_id cxlflash_pci_table[] = {
+ {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
+
+/**
+ * free_mem() - free memory associated with the AFU
+ * @cxlflash: Internal structure associated with the host.
+ */
+static void free_mem(struct cxlflash_cfg *cfg)
+{
+ int i;
+ char *buf = NULL;
+ struct afu *afu = cfg->afu;
+
+ if (cfg->afu) {
+ for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
+ buf = afu->cmd[i].buf;
+ if (!((u64)buf & (PAGE_SIZE - 1)))
+ free_page((ulong)buf);
+ }
+
+ free_pages((ulong)afu, get_order(sizeof(struct afu)));
+ cfg->afu = NULL;
+ }
+}
+
+/**
+ * stop_afu() - stops the AFU command timers and unmaps the MMIO space
+ * @cxlflash: Internal structure associated with the host.
+ *
+ * Safe to call with AFU in a partially allocated/initialized state.
+ */
+static void stop_afu(struct cxlflash_cfg *cfg)
+{
+ int i;
+ struct afu *afu = cfg->afu;
+
+ if (likely(afu)) {
+ for (i = 0; i < CXLFLASH_NUM_CMDS; i++)
+ complete(&afu->cmd[i].cevent);
+
+ if (likely(afu->afu_map)) {
+ cxl_psa_unmap((void *)afu->afu_map);
+ afu->afu_map = NULL;
+ }
+ }
+}
+
+/**
+ * term_mc() - terminates the master context
+ * @cxlflash: Internal structure associated with the host.
+ * @level: Depth of allocation, where to begin waterfall tear down.
+ *
+ * Safe to call with AFU/MC in partially allocated/initialized state.
+ */
+static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
+{
+ int rc = 0;
+ struct afu *afu = cfg->afu;
+
+ if (!afu || !cfg->mcctx) {
+ pr_err("%s: returning from term_mc with NULL afu or MC\n",
+ __func__);
+ return;
+ }
+
+ switch (level) {
+ case UNDO_START:
+ rc = cxl_stop_context(cfg->mcctx);
+ BUG_ON(rc);
+ case UNMAP_THREE:
+ cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
+ case UNMAP_TWO:
+ cxl_unmap_afu_irq(cfg->mcctx, 2, afu);
+ case UNMAP_ONE:
+ cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
+ case FREE_IRQ:
+ cxl_free_afu_irqs(cfg->mcctx);
+ case RELEASE_CONTEXT:
+ cfg->mcctx = NULL;
+ }
+}
+
+/**
+ * term_afu() - terminates the AFU
+ * @cxlflash: Internal structure associated with the host.
+ *
+ * Safe to call with AFU/MC in partially allocated/initialized state.
+ */
+static void term_afu(struct cxlflash_cfg *cfg)
+{
+ term_mc(cfg, UNDO_START);
+
+ if (cfg->afu)
+ stop_afu(cfg);
+
+ pr_debug("%s: returning\n", __func__);
+}
+
+/**
+ * cxlflash_remove() - PCI entry point to tear down host
+ * @pdev: PCI device associated with the host.
+ *
+ * Safe to use as a cleanup in partially allocated/initialized state.
+ */
+static void cxlflash_remove(struct pci_dev *pdev)
+{
+ struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
+ ulong lock_flags;
+
+ /* If a Task Management Function is active, wait for it to complete
+ * before continuing with remove.
+ */
+ spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+ if (cfg->tmf_active)
+ wait_event_interruptible_locked_irq(cfg->tmf_waitq,
+ !cfg->tmf_active);
+ spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+
+ cfg->state = STATE_FAILTERM;
+ cxlflash_stop_term_user_contexts(cfg);
+
+ switch (cfg->init_state) {
+ case INIT_STATE_SCSI:
+ cxlflash_term_local_luns(cfg);
+ scsi_remove_host(cfg->host);
+ scsi_host_put(cfg->host);
+ /* Fall through */
+ case INIT_STATE_AFU:
+ term_afu(cfg);
+ case INIT_STATE_PCI:
+ pci_release_regions(cfg->dev);
+ pci_disable_device(pdev);
+ case INIT_STATE_NONE:
+ flush_work(&cfg->work_q);
+ free_mem(cfg);
+ break;
+ }
+
+ pr_debug("%s: returning\n", __func__);
+}
+
+/**
+ * alloc_mem() - allocates the AFU and its command pool
+ * @cxlflash: Internal structure associated with the host.
+ *
+ * A partially allocated state remains on failure.
+ *
+ * Return:
+ * 0 on success
+ * -ENOMEM on failure to allocate memory
+ */
+static int alloc_mem(struct cxlflash_cfg *cfg)
+{
+ int rc = 0;
+ int i;
+ char *buf = NULL;
+
+ /* This allocation is about 12K, i.e. only 1 64k page
+ * and upto 4 4k pages
+ */
+ cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(sizeof(struct afu)));
+ if (unlikely(!cfg->afu)) {
+ pr_err("%s: cannot get %d free pages\n",
+ __func__, get_order(sizeof(struct afu)));
+ rc = -ENOMEM;
+ goto out;
+ }
+ cfg->afu->parent = cfg;
+ cfg->afu->afu_map = NULL;
+
+ for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) {
+ if (!((u64)buf & (PAGE_SIZE - 1))) {
+ buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ if (unlikely(!buf)) {
+ pr_err("%s: Allocate command buffers fail!\n",
+ __func__);
+ rc = -ENOMEM;
+ free_mem(cfg);
+ goto out;
+ }
+ }
+
+ cfg->afu->cmd[i].buf = buf;
+ atomic_set(&cfg->afu->cmd[i].free, 1);
+ cfg->afu->cmd[i].slot = i;
+ }
+
+out:
+ return rc;
+}
+
+/**
+ * init_pci() - initializes the host as a PCI device
+ * @cxlflash: Internal structure associated with the host.
+ *
+ * Return:
+ * 0 on success
+ * -EIO on unable to communicate with device
+ * A return code from the PCI sub-routines
+ */
+static int init_pci(struct cxlflash_cfg *cfg)
+{
+ struct pci_dev *pdev = cfg->dev;
+ int rc = 0;
+
+ cfg->cxlflash_regs_pci = pci_resource_start(pdev, 0);
+ rc = pci_request_regions(pdev, CXLFLASH_NAME);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "%s: Couldn't register memory range of registers\n",
+ __func__);
+ goto out;
+ }
+
+ rc = pci_enable_device(pdev);
+ if (rc || pci_channel_offline(pdev)) {
+ if (pci_channel_offline(pdev)) {
+ cxlflash_wait_for_pci_err_recovery(cfg);
+ rc = pci_enable_device(pdev);
+ }
+
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Cannot enable adapter\n",
+ __func__);
+ cxlflash_wait_for_pci_err_recovery(cfg);
+ goto out_release_regions;
+ }
+ }
+
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc < 0) {
+ dev_dbg(&pdev->dev, "%s: Failed to set 64 bit PCI DMA mask\n",
+ __func__);
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ }
+
+ if (rc < 0) {
+ dev_err(&pdev->dev, "%s: Failed to set PCI DMA mask\n",
+ __func__);
+ goto out_disable;
+ }
+
+ pci_set_master(pdev);
+
+ if (pci_channel_offline(pdev)) {
+ cxlflash_wait_for_pci_err_recovery(cfg);
+ if (pci_channel_offline(pdev)) {
+ rc = -EIO;
+ goto out_msi_disable;
+ }
+ }
+
+ rc = pci_save_state(pdev);
+
+ if (rc != PCIBIOS_SUCCESSFUL) {
+ dev_err(&pdev->dev, "%s: Failed to save PCI config space\n",
+ __func__);
+ rc = -EIO;
+ goto cleanup_nolog;
+ }
+
+out:
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+
+cleanup_nolog:
+out_msi_disable:
+ cxlflash_wait_for_pci_err_recovery(cfg);
+out_disable:
+ pci_disable_device(pdev);
+out_release_regions:
+ pci_release_regions(pdev);
+ goto out;
+
+}
+
+/**
+ * init_scsi() - adds the host to the SCSI stack and kicks off host scan
+ * @cxlflash: Internal structure associated with the host.
+ *
+ * Return:
+ * 0 on success
+ * A return code from adding the host
+ */
+static int init_scsi(struct cxlflash_cfg *cfg)
+{
+ struct pci_dev *pdev = cfg->dev;
+ int rc = 0;
+
+ rc = scsi_add_host(cfg->host, &pdev->dev);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n",
+ __func__, rc);
+ goto out;
+ }
+
+ scsi_scan_host(cfg->host);
+
+out:
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * set_port_online() - transitions the specified host FC port to online state
+ * @fc_regs: Top of MMIO region defined for specified port.
+ *
+ * The provided MMIO region must be mapped prior to call. Online state means
+ * that the FC link layer has synced, completed the handshaking process, and
+ * is ready for login to start.
+ */
+static void set_port_online(u64 *fc_regs)
+{
+ u64 cmdcfg;
+
+ cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
+ cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
+ cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
+ writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
+}
+
+/**
+ * set_port_offline() - transitions the specified host FC port to offline state
+ * @fc_regs: Top of MMIO region defined for specified port.
+ *
+ * The provided MMIO region must be mapped prior to call.
+ */
+static void set_port_offline(u64 *fc_regs)
+{
+ u64 cmdcfg;
+
+ cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
+ cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
+ cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
+ writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
+}
+
+/**
+ * wait_port_online() - waits for the specified host FC port come online
+ * @fc_regs: Top of MMIO region defined for specified port.
+ * @delay_us: Number of microseconds to delay between reading port status.
+ * @nretry: Number of cycles to retry reading port status.
+ *
+ * The provided MMIO region must be mapped prior to call. This will timeout
+ * when the cable is not plugged in.
+ *
+ * Return:
+ * TRUE (1) when the specified port is online
+ * FALSE (0) when the specified port fails to come online after timeout
+ * -EINVAL when @delay_us is less than 1000
+ */
+static int wait_port_online(u64 *fc_regs, u32 delay_us, u32 nretry)
+{
+ u64 status;
+
+ if (delay_us < 1000) {
+ pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
+ return -EINVAL;
+ }
+
+ do {
+ msleep(delay_us / 1000);
+ status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
+ } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
+ nretry--);
+
+ return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
+}
+
+/**
+ * wait_port_offline() - waits for the specified host FC port go offline
+ * @fc_regs: Top of MMIO region defined for specified port.
+ * @delay_us: Number of microseconds to delay between reading port status.
+ * @nretry: Number of cycles to retry reading port status.
+ *
+ * The provided MMIO region must be mapped prior to call.
+ *
+ * Return:
+ * TRUE (1) when the specified port is offline
+ * FALSE (0) when the specified port fails to go offline after timeout
+ * -EINVAL when @delay_us is less than 1000
+ */
+static int wait_port_offline(u64 *fc_regs, u32 delay_us, u32 nretry)
+{
+ u64 status;
+
+ if (delay_us < 1000) {
+ pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
+ return -EINVAL;
+ }
+
+ do {
+ msleep(delay_us / 1000);
+ status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
+ } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
+ nretry--);
+
+ return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
+}
+
+/**
+ * afu_set_wwpn() - configures the WWPN for the specified host FC port
+ * @afu: AFU associated with the host that owns the specified FC port.
+ * @port: Port number being configured.
+ * @fc_regs: Top of MMIO region defined for specified port.
+ * @wwpn: The world-wide-port-number previously discovered for port.
+ *
+ * The provided MMIO region must be mapped prior to call. As part of the
+ * sequence to configure the WWPN, the port is toggled offline and then back
+ * online. This toggling action can cause this routine to delay up to a few
+ * seconds. When configured to use the internal LUN feature of the AFU, a
+ * failure to come online is overridden.
+ *
+ * Return:
+ * 0 when the WWPN is successfully written and the port comes back online
+ * -1 when the port fails to go offline or come back up online
+ */
+static int afu_set_wwpn(struct afu *afu, int port, u64 *fc_regs, u64 wwpn)
+{
+ int ret = 0;
+
+ set_port_offline(fc_regs);
+
+ if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
+ FC_PORT_STATUS_RETRY_CNT)) {
+ pr_debug("%s: wait on port %d to go offline timed out\n",
+ __func__, port);
+ ret = -1; /* but continue on to leave the port back online */
+ }
+
+ if (ret == 0)
+ writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
+
+ set_port_online(fc_regs);
+
+ if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
+ FC_PORT_STATUS_RETRY_CNT)) {
+ pr_debug("%s: wait on port %d to go online timed out\n",
+ __func__, port);
+ ret = -1;
+
+ /*
+ * Override for internal lun!!!
+ */
+ if (afu->internal_lun) {
+ pr_debug("%s: Overriding port %d online timeout!!!\n",
+ __func__, port);
+ ret = 0;
+ }
+ }
+
+ pr_debug("%s: returning rc=%d\n", __func__, ret);
+
+ return ret;
+}
+
+/**
+ * afu_link_reset() - resets the specified host FC port
+ * @afu: AFU associated with the host that owns the specified FC port.
+ * @port: Port number being configured.
+ * @fc_regs: Top of MMIO region defined for specified port.
+ *
+ * The provided MMIO region must be mapped prior to call. The sequence to
+ * reset the port involves toggling it offline and then back online. This
+ * action can cause this routine to delay up to a few seconds. An effort
+ * is made to maintain link with the device by switching to host to use
+ * the alternate port exclusively while the reset takes place.
+ * failure to come online is overridden.
+ */
+static void afu_link_reset(struct afu *afu, int port, u64 *fc_regs)
+{
+ u64 port_sel;
+
+ /* first switch the AFU to the other links, if any */
+ port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
+ port_sel &= ~(1ULL << port);
+ writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
+ cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
+
+ set_port_offline(fc_regs);
+ if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
+ FC_PORT_STATUS_RETRY_CNT))
+ pr_err("%s: wait on port %d to go offline timed out\n",
+ __func__, port);
+
+ set_port_online(fc_regs);
+ if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
+ FC_PORT_STATUS_RETRY_CNT))
+ pr_err("%s: wait on port %d to go online timed out\n",
+ __func__, port);
+
+ /* switch back to include this port */
+ port_sel |= (1ULL << port);
+ writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
+ cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
+
+ pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel);
+}
+
+/*
+ * Asynchronous interrupt information table
+ */
+static const struct asyc_intr_info ainfo[] = {
+ {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
+ {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
+ {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
+ {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, 0},
+ {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
+ {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, 0},
+ {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
+ {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
+ {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
+ {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
+ {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
+ {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, 0},
+ {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
+ {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, 0},
+ {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
+ {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
+ {0x0, "", 0, 0} /* terminator */
+};
+
+/**
+ * find_ainfo() - locates and returns asynchronous interrupt information
+ * @status: Status code set by AFU on error.
+ *
+ * Return: The located information or NULL when the status code is invalid.
+ */
+static const struct asyc_intr_info *find_ainfo(u64 status)
+{
+ const struct asyc_intr_info *info;
+
+ for (info = &ainfo[0]; info->status; info++)
+ if (info->status == status)
+ return info;
+
+ return NULL;
+}
+
+/**
+ * afu_err_intr_init() - clears and initializes the AFU for error interrupts
+ * @afu: AFU associated with the host.
+ */
+static void afu_err_intr_init(struct afu *afu)
+{
+ int i;
+ u64 reg;
+
+ /* global async interrupts: AFU clears afu_ctrl on context exit
+ * if async interrupts were sent to that context. This prevents
+ * the AFU form sending further async interrupts when
+ * there is
+ * nobody to receive them.
+ */
+
+ /* mask all */
+ writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
+ /* set LISN# to send and point to master context */
+ reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
+
+ if (afu->internal_lun)
+ reg |= 1; /* Bit 63 indicates local lun */
+ writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
+ /* clear all */
+ writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
+ /* unmask bits that are of interest */
+ /* note: afu can send an interrupt after this step */
+ writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
+ /* clear again in case a bit came on after previous clear but before */
+ /* unmask */
+ writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
+
+ /* Clear/Set internal lun bits */
+ reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
+ reg &= SISL_FC_INTERNAL_MASK;
+ if (afu->internal_lun)
+ reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
+ writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
+
+ /* now clear FC errors */
+ for (i = 0; i < NUM_FC_PORTS; i++) {
+ writeq_be(0xFFFFFFFFU,
+ &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]);
+ writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]);
+ }
+
+ /* sync interrupts for master's IOARRIN write */
+ /* note that unlike asyncs, there can be no pending sync interrupts */
+ /* at this time (this is a fresh context and master has not written */
+ /* IOARRIN yet), so there is nothing to clear. */
+
+ /* set LISN#, it is always sent to the context that wrote IOARRIN */
+ writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl);
+ writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask);
+}
+
+/**
+ * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
+ * @irq: Interrupt number.
+ * @data: Private data provided at interrupt registration, the AFU.
+ *
+ * Return: Always return IRQ_HANDLED.
+ */
+static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
+{
+ struct afu *afu = (struct afu *)data;
+ u64 reg;
+ u64 reg_unmasked;
+
+ reg = readq_be(&afu->host_map->intr_status);
+ reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
+
+ if (reg_unmasked == 0UL) {
+ pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n",
+ __func__, (u64)afu, reg);
+ goto cxlflash_sync_err_irq_exit;
+ }
+
+ pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n",
+ __func__, (u64)afu, reg);
+
+ writeq_be(reg_unmasked, &afu->host_map->intr_clear);
+
+cxlflash_sync_err_irq_exit:
+ pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED);
+ return IRQ_HANDLED;
+}
+
+/**
+ * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
+ * @irq: Interrupt number.
+ * @data: Private data provided at interrupt registration, the AFU.
+ *
+ * Return: Always return IRQ_HANDLED.
+ */
+static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
+{
+ struct afu *afu = (struct afu *)data;
+ struct afu_cmd *cmd;
+ bool toggle = afu->toggle;
+ u64 entry,
+ *hrrq_start = afu->hrrq_start,
+ *hrrq_end = afu->hrrq_end,
+ *hrrq_curr = afu->hrrq_curr;
+
+ /* Process however many RRQ entries that are ready */
+ while (true) {
+ entry = *hrrq_curr;
+
+ if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
+ break;
+
+ cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT);
+ cmd_complete(cmd);
+
+ /* Advance to next entry or wrap and flip the toggle bit */
+ if (hrrq_curr < hrrq_end)
+ hrrq_curr++;
+ else {
+ hrrq_curr = hrrq_start;
+ toggle ^= SISL_RESP_HANDLE_T_BIT;
+ }
+ }
+
+ afu->hrrq_curr = hrrq_curr;
+ afu->toggle = toggle;
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
+ * @irq: Interrupt number.
+ * @data: Private data provided at interrupt registration, the AFU.
+ *
+ * Return: Always return IRQ_HANDLED.
+ */
+static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
+{
+ struct afu *afu = (struct afu *)data;
+ struct cxlflash_cfg *cfg;
+ u64 reg_unmasked;
+ const struct asyc_intr_info *info;
+ struct sisl_global_map *global = &afu->afu_map->global;
+ u64 reg;
+ u8 port;
+ int i;
+
+ cfg = afu->parent;
+
+ reg = readq_be(&global->regs.aintr_status);
+ reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
+
+ if (reg_unmasked == 0) {
+ pr_err("%s: spurious interrupt, aintr_status 0x%016llX\n",
+ __func__, reg);
+ goto out;
+ }
+
+ /* it is OK to clear AFU status before FC_ERROR */
+ writeq_be(reg_unmasked, &global->regs.aintr_clear);
+
+ /* check each bit that is on */
+ for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
+ info = find_ainfo(1ULL << i);
+ if ((reg_unmasked & 0x1) || !info)
+ continue;
+
+ port = info->port;
+
+ pr_err("%s: FC Port %d -> %s, fc_status 0x%08llX\n",
+ __func__, port, info->desc,
+ readq_be(&global->fc_regs[port][FC_STATUS / 8]));
+
+ /*
+ * do link reset first, some OTHER errors will set FC_ERROR
+ * again if cleared before or w/o a reset
+ */
+ if (info->action & LINK_RESET) {
+ pr_err("%s: FC Port %d: resetting link\n",
+ __func__, port);
+ cfg->lr_state = LINK_RESET_REQUIRED;
+ cfg->lr_port = port;
+ schedule_work(&cfg->work_q);
+ }
+
+ if (info->action & CLR_FC_ERROR) {
+ reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
+
+ /*
+ * since all errors are unmasked, FC_ERROR and FC_ERRCAP
+ * should be the same and tracing one is sufficient.
+ */
+
+ pr_err("%s: fc %d: clearing fc_error 0x%08llX\n",
+ __func__, port, reg);
+
+ writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
+ writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
+ }
+ }
+
+out:
+ pr_debug("%s: returning rc=%d, afu=%p\n", __func__, IRQ_HANDLED, afu);
+ return IRQ_HANDLED;
+}
+
+/**
+ * start_context() - starts the master context
+ * @cxlflash: Internal structure associated with the host.
+ *
+ * Return: A success or failure value from CXL services.
+ */
+static int start_context(struct cxlflash_cfg *cfg)
+{
+ int rc = 0;
+
+ rc = cxl_start_context(cfg->mcctx,
+ cfg->afu->work.work_element_descriptor,
+ NULL);
+
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * read_vpd() - obtains the WWPNs from VPD
+ * @cxlflash: Internal structure associated with the host.
+ * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs
+ *
+ * Return:
+ * 0 on success
+ * -ENODEV when VPD or WWPN keywords not found
+ */
+static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
+{
+ struct pci_dev *dev = cfg->parent_dev;
+ int rc = 0;
+ int ro_start, ro_size, i, j, k;
+ ssize_t vpd_size;
+ char vpd_data[CXLFLASH_VPD_LEN];
+ char tmp_buf[WWPN_BUF_LEN] = { 0 };
+ char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
+
+ /* Get the VPD data from the device */
+ vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
+ if (unlikely(vpd_size <= 0)) {
+ pr_err("%s: Unable to read VPD (size = %ld)\n",
+ __func__, vpd_size);
+ rc = -ENODEV;
+ goto out;
+ }
+
+ /* Get the read only section offset */
+ ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
+ PCI_VPD_LRDT_RO_DATA);
+ if (unlikely(ro_start < 0)) {
+ pr_err("%s: VPD Read-only data not found\n", __func__);
+ rc = -ENODEV;
+ goto out;
+ }
+
+ /* Get the read only section size, cap when extends beyond read VPD */
+ ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
+ j = ro_size;
+ i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
+ if (unlikely((i + j) > vpd_size)) {
+ pr_debug("%s: Might need to read more VPD (%d > %ld)\n",
+ __func__, (i + j), vpd_size);
+ ro_size = vpd_size - i;
+ }
+
+ /*
+ * Find the offset of the WWPN tag within the read only
+ * VPD data and validate the found field (partials are
+ * no good to us). Convert the ASCII data to an integer
+ * value. Note that we must copy to a temporary buffer
+ * because the conversion service requires that the ASCII
+ * string be terminated.
+ */
+ for (k = 0; k < NUM_FC_PORTS; k++) {
+ j = ro_size;
+ i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
+
+ i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
+ if (unlikely(i < 0)) {
+ pr_err("%s: Port %d WWPN not found in VPD\n",
+ __func__, k);
+ rc = -ENODEV;
+ goto out;
+ }
+
+ j = pci_vpd_info_field_size(&vpd_data[i]);
+ i += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
+ pr_err("%s: Port %d WWPN incomplete or VPD corrupt\n",
+ __func__, k);
+ rc = -ENODEV;
+ goto out;
+ }
+
+ memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
+ rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
+ if (unlikely(rc)) {
+ pr_err("%s: Fail to convert port %d WWPN to integer\n",
+ __func__, k);
+ rc = -ENODEV;
+ goto out;
+ }
+ }
+
+out:
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * cxlflash_context_reset() - timeout handler for AFU commands
+ * @cmd: AFU command that timed out.
+ *
+ * Sends a reset to the AFU.
+ */
+void cxlflash_context_reset(struct afu_cmd *cmd)
+{
+ int nretry = 0;
+ u64 rrin = 0x1;
+ u64 room = 0;
+ struct afu *afu = cmd->parent;
+ ulong lock_flags;
+
+ pr_debug("%s: cmd=%p\n", __func__, cmd);
+
+ spin_lock_irqsave(&cmd->slock, lock_flags);
+
+ /* Already completed? */
+ if (cmd->sa.host_use_b[0] & B_DONE) {
+ spin_unlock_irqrestore(&cmd->slock, lock_flags);
+ return;
+ }
+
+ cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
+ spin_unlock_irqrestore(&cmd->slock, lock_flags);
+
+ /*
+ * We really want to send this reset at all costs, so spread
+ * out wait time on successive retries for available room.
+ */
+ do {
+ room = readq_be(&afu->host_map->cmd_room);
+ atomic64_set(&afu->room, room);
+ if (room)
+ goto write_rrin;
+ udelay(nretry);
+ } while (nretry++ < MC_ROOM_RETRY_CNT);
+
+ pr_err("%s: no cmd_room to send reset\n", __func__);
+ return;
+
+write_rrin:
+ nretry = 0;
+ writeq_be(rrin, &afu->host_map->ioarrin);
+ do {
+ rrin = readq_be(&afu->host_map->ioarrin);
+ if (rrin != 0x1)
+ break;
+ /* Double delay each time */
+ udelay(2 ^ nretry);
+ } while (nretry++ < MC_ROOM_RETRY_CNT);
+}
+
+/**
+ * init_pcr() - initialize the provisioning and control registers
+ * @cxlflash: Internal structure associated with the host.
+ *
+ * Also sets up fast access to the mapped registers and initializes AFU
+ * command fields that never change.
+ */
+void init_pcr(struct cxlflash_cfg *cfg)
+{
+ struct afu *afu = cfg->afu;
+ struct sisl_ctrl_map *ctrl_map;
+ int i;
+
+ for (i = 0; i < MAX_CONTEXT; i++) {
+ ctrl_map = &afu->afu_map->ctrls[i].ctrl;
+ /* disrupt any clients that could be running */
+ /* e. g. clients that survived a master restart */
+ writeq_be(0, &ctrl_map->rht_start);
+ writeq_be(0, &ctrl_map->rht_cnt_id);
+ writeq_be(0, &ctrl_map->ctx_cap);
+ }
+
+ /* copy frequently used fields into afu */
+ afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
+ /* ctx_hndl is 16 bits in CAIA */
+ afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
+ afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
+
+ /* Program the Endian Control for the master context */
+ writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
+
+ /* initialize cmd fields that never change */
+ for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
+ afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
+ afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
+ afu->cmd[i].rcb.rrq = 0x0;
+ }
+}
+
+/**
+ * init_global() - initialize AFU global registers
+ * @cxlflash: Internal structure associated with the host.
+ */
+int init_global(struct cxlflash_cfg *cfg)
+{
+ struct afu *afu = cfg->afu;
+ u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */
+ int i = 0, num_ports = 0;
+ int rc = 0;
+ u64 reg;
+
+ rc = read_vpd(cfg, &wwpn[0]);
+ if (rc) {
+ pr_err("%s: could not read vpd rc=%d\n", __func__, rc);
+ goto out;
+ }
+
+ pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);
+
+ /* set up RRQ in AFU for master issued cmds */
+ writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
+ writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
+
+ /* AFU configuration */
+ reg = readq_be(&afu->afu_map->global.regs.afu_config);
+ reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
+ /* enable all auto retry options and control endianness */
+ /* leave others at default: */
+ /* CTX_CAP write protected, mbox_r does not clear on read and */
+ /* checker on if dual afu */
+ writeq_be(reg, &afu->afu_map->global.regs.afu_config);
+
+ /* global port select: select either port */
+ if (afu->internal_lun) {
+ /* only use port 0 */
+ writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
+ num_ports = NUM_FC_PORTS - 1;
+ } else {
+ writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel);
+ num_ports = NUM_FC_PORTS;
+ }
+
+ for (i = 0; i < num_ports; i++) {
+ /* unmask all errors (but they are still masked at AFU) */
+ writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
+ /* clear CRC error cnt & set a threshold */
+ (void)readq_be(&afu->afu_map->global.
+ fc_regs[i][FC_CNT_CRCERR / 8]);
+ writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
+ [FC_CRC_THRESH / 8]);
+
+ /* set WWPNs. If already programmed, wwpn[i] is 0 */
+ if (wwpn[i] != 0 &&
+ afu_set_wwpn(afu, i,
+ &afu->afu_map->global.fc_regs[i][0],
+ wwpn[i])) {
+ pr_err("%s: failed to set WWPN on port %d\n",
+ __func__, i);
+ rc = -EIO;
+ goto out;
+ }
+ /* Programming WWPN back to back causes additional
+ * offline/online transitions and a PLOGI
+ */
+ msleep(100);
+
+ }
+
+ /* set up master's own CTX_CAP to allow real mode, host translation */
+ /* tbls, afu cmds and read/write GSCSI cmds. */
+ /* First, unlock ctx_cap write by reading mbox */
+ (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */
+ writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
+ SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
+ SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
+ &afu->ctrl_map->ctx_cap);
+ /* init heartbeat */
+ afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
+
+out:
+ return rc;
+}
+
+/**
+ * start_afu() - initializes and starts the AFU
+ * @cxlflash: Internal structure associated with the host.
+ */
+static int start_afu(struct cxlflash_cfg *cfg)
+{
+ struct afu *afu = cfg->afu;
+ struct afu_cmd *cmd;
+
+ int i = 0;
+ int rc = 0;
+
+ for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
+ cmd = &afu->cmd[i];
+
+ init_completion(&cmd->cevent);
+ spin_lock_init(&cmd->slock);
+ cmd->parent = afu;
+ }
+
+ init_pcr(cfg);
+
+ /* initialize RRQ pointers */
+ afu->hrrq_start = &afu->rrq_entry[0];
+ afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
+ afu->hrrq_curr = afu->hrrq_start;
+ afu->toggle = 1;
+
+ rc = init_global(cfg);
+
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * init_mc() - create and register as the master context
+ * @cxlflash: Internal structure associated with the host.
+ *
+ * Return:
+ * 0 on success
+ * -ENOMEM when unable to obtain a context from CXL services
+ * A failure value from CXL services.
+ */
+static int init_mc(struct cxlflash_cfg *cfg)
+{
+ struct cxl_context *ctx;
+ struct device *dev = &cfg->dev->dev;
+ struct afu *afu = cfg->afu;
+ int rc = 0;
+ enum undo_level level;
+
+ ctx = cxl_get_context(cfg->dev);
+ if (unlikely(!ctx))
+ return -ENOMEM;
+ cfg->mcctx = ctx;
+
+ /* Set it up as a master with the CXL */
+ cxl_set_master(ctx);
+
+ /* During initialization reset the AFU to start from a clean slate */
+ rc = cxl_afu_reset(cfg->mcctx);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
+ __func__, rc);
+ level = RELEASE_CONTEXT;
+ goto out;
+ }
+
+ rc = cxl_allocate_afu_irqs(ctx, 3);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
+ __func__, rc);
+ level = RELEASE_CONTEXT;
+ goto out;
+ }
+
+ rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
+ "SISL_MSI_SYNC_ERROR");
+ if (unlikely(rc <= 0)) {
+ dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n",
+ __func__);
+ level = FREE_IRQ;
+ goto out;
+ }
+
+ rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
+ "SISL_MSI_RRQ_UPDATED");
+ if (unlikely(rc <= 0)) {
+ dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n",
+ __func__);
+ level = UNMAP_ONE;
+ goto out;
+ }
+
+ rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
+ "SISL_MSI_ASYNC_ERROR");
+ if (unlikely(rc <= 0)) {
+ dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n",
+ __func__);
+ level = UNMAP_TWO;
+ goto out;
+ }
+
+ rc = 0;
+
+ /* This performs the equivalent of the CXL_IOCTL_START_WORK.
+ * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
+ * element (pe) that is embedded in the context (ctx)
+ */
+ rc = start_context(cfg);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
+ level = UNMAP_THREE;
+ goto out;
+ }
+ret:
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+out:
+ term_mc(cfg, level);
+ goto ret;
+}
+
+/**
+ * init_afu() - setup as master context and start AFU
+ * @cxlflash: Internal structure associated with the host.
+ *
+ * This routine is a higher level of control for configuring the
+ * AFU on probe and reset paths.
+ *
+ * Return:
+ * 0 on success
+ * -ENOMEM when unable to map the AFU MMIO space
+ * A failure value from internal services.
+ */
+static int init_afu(struct cxlflash_cfg *cfg)
+{
+ u64 reg;
+ int rc = 0;
+ struct afu *afu = cfg->afu;
+ struct device *dev = &cfg->dev->dev;
+
+ cxl_perst_reloads_same_image(cfg->cxl_afu, true);
+
+ rc = init_mc(cfg);
+ if (rc) {
+ dev_err(dev, "%s: call to init_mc failed, rc=%d!\n",
+ __func__, rc);
+ goto err1;
+ }
+
+ /* Map the entire MMIO space of the AFU.
+ */
+ afu->afu_map = cxl_psa_map(cfg->mcctx);
+ if (!afu->afu_map) {
+ rc = -ENOMEM;
+ term_mc(cfg, UNDO_START);
+ dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__);
+ goto err1;
+ }
+
+ /* don't byte reverse on reading afu_version, else the string form */
+ /* will be backwards */
+ reg = afu->afu_map->global.regs.afu_version;
+ memcpy(afu->version, &reg, 8);
+ afu->interface_version =
+ readq_be(&afu->afu_map->global.regs.interface_version);
+ pr_debug("%s: afu version %s, interface version 0x%llX\n",
+ __func__, afu->version, afu->interface_version);
+
+ rc = start_afu(cfg);
+ if (rc) {
+ dev_err(dev, "%s: call to start_afu failed, rc=%d!\n",
+ __func__, rc);
+ term_mc(cfg, UNDO_START);
+ cxl_psa_unmap((void *)afu->afu_map);
+ afu->afu_map = NULL;
+ goto err1;
+ }
+
+ afu_err_intr_init(cfg->afu);
+ atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
+
+ /* Restore the LUN mappings */
+ cxlflash_restore_luntable(cfg);
+err1:
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * cxlflash_send_cmd() - sends an AFU command
+ * @afu: AFU associated with the host.
+ * @cmd: AFU command to send.
+ *
+ * Return:
+ * 0 on success
+ * -1 on failure
+ */
+int cxlflash_send_cmd(struct afu *afu, struct afu_cmd *cmd)
+{
+ struct cxlflash_cfg *cfg = afu->parent;
+ int nretry = 0;
+ int rc = 0;
+ u64 room;
+ long newval;
+
+ /*
+ * This routine is used by critical users such an AFU sync and to
+ * send a task management function (TMF). Thus we want to retry a
+ * bit before returning an error. To avoid the performance penalty
+ * of MMIO, we spread the update of 'room' over multiple commands.
+ */
+retry:
+ newval = atomic64_dec_if_positive(&afu->room);
+ if (!newval) {
+ do {
+ room = readq_be(&afu->host_map->cmd_room);
+ atomic64_set(&afu->room, room);
+ if (room)
+ goto write_ioarrin;
+ udelay(nretry);
+ } while (nretry++ < MC_ROOM_RETRY_CNT);
+
+ pr_err("%s: no cmd_room to send 0x%X\n",
+ __func__, cmd->rcb.cdb[0]);
+
+ goto no_room;
+ } else if (unlikely(newval < 0)) {
+ /* This should be rare. i.e. Only if two threads race and
+ * decrement before the MMIO read is done. In this case
+ * just benefit from the other thread having updated
+ * afu->room.
+ */
+ if (nretry++ < MC_ROOM_RETRY_CNT) {
+ udelay(nretry);
+ goto retry;
+ }
+
+ goto no_room;
+ }
+
+write_ioarrin:
+ writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
+out:
+ pr_debug("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
+ cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
+ return rc;
+
+no_room:
+ afu->read_room = true;
+ schedule_work(&cfg->work_q);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+}
+
+/**
+ * cxlflash_wait_resp() - polls for a response or timeout to a sent AFU command
+ * @afu: AFU associated with the host.
+ * @cmd: AFU command that was sent.
+ */
+void cxlflash_wait_resp(struct afu *afu, struct afu_cmd *cmd)
+{
+ ulong timeout = jiffies + (cmd->rcb.timeout * 2 * HZ);
+
+ timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
+ if (!timeout)
+ cxlflash_context_reset(cmd);
+
+ if (unlikely(cmd->sa.ioasc != 0))
+ pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
+ "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
+ cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
+ cmd->sa.rc.fc_rc);
+}
+
+/**
+ * cxlflash_afu_sync() - builds and sends an AFU sync command
+ * @afu: AFU associated with the host.
+ * @ctx_hndl_u: Identifies context requesting sync.
+ * @res_hndl_u: Identifies resource requesting sync.
+ * @mode: Type of sync to issue (lightweight, heavyweight, global).
+ *
+ * The AFU can only take 1 sync command at a time. This routine enforces this
+ * limitation by using a mutex to provide exlusive access to the AFU during
+ * the sync. This design point requires calling threads to not be on interrupt
+ * context due to the possibility of sleeping during concurrent sync operations.
+ *
+ * AFU sync operations are only necessary and allowed when the device is
+ * operating normally. When not operating normally, sync requests can occur as
+ * part of cleaning up resources associated with an adapter prior to removal.
+ * In this scenario, these requests are simply ignored (safe due to the AFU
+ * going away).
+ *
+ * Return:
+ * 0 on success
+ * -1 on failure
+ */
+int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
+ res_hndl_t res_hndl_u, u8 mode)
+{
+ struct cxlflash_cfg *cfg = afu->parent;
+ struct afu_cmd *cmd = NULL;
+ int rc = 0;
+ int retry_cnt = 0;
+ static DEFINE_MUTEX(sync_active);
+
+ if (cfg->state != STATE_NORMAL) {
+ pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state);
+ return 0;
+ }
+
+ mutex_lock(&sync_active);
+retry:
+ cmd = cxlflash_cmd_checkout(afu);
+ if (unlikely(!cmd)) {
+ retry_cnt++;
+ udelay(1000 * retry_cnt);
+ if (retry_cnt < MC_RETRY_CNT)
+ goto retry;
+ pr_err("%s: could not get a free command\n", __func__);
+ rc = -1;
+ goto out;
+ }
+
+ pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
+
+ memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
+
+ cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
+ cmd->rcb.port_sel = 0x0; /* NA */
+ cmd->rcb.lun_id = 0x0; /* NA */
+ cmd->rcb.data_len = 0x0;
+ cmd->rcb.data_ea = 0x0;
+ cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
+
+ cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */
+ cmd->rcb.cdb[1] = mode;
+
+ /* The cdb is aligned, no unaligned accessors required */
+ *((u16 *)&cmd->rcb.cdb[2]) = swab16(ctx_hndl_u);
+ *((u32 *)&cmd->rcb.cdb[4]) = swab32(res_hndl_u);
+
+ rc = cxlflash_send_cmd(afu, cmd);
+ if (unlikely(rc))
+ goto out;
+
+ cxlflash_wait_resp(afu, cmd);
+
+ /* set on timeout */
+ if (unlikely((cmd->sa.ioasc != 0) ||
+ (cmd->sa.host_use_b[0] & B_ERROR)))
+ rc = -1;
+out:
+ mutex_unlock(&sync_active);
+ if (cmd)
+ cxlflash_cmd_checkin(cmd);
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * cxlflash_afu_reset() - resets the AFU
+ * @cxlflash: Internal structure associated with the host.
+ *
+ * Return:
+ * 0 on success
+ * A failure value from internal services.
+ */
+int cxlflash_afu_reset(struct cxlflash_cfg *cfg)
+{
+ int rc = 0;
+ /* Stop the context before the reset. Since the context is
+ * no longer available restart it after the reset is complete
+ */
+
+ term_afu(cfg);
+
+ rc = init_afu(cfg);
+
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * cxlflash_worker_thread() - work thread handler for the AFU
+ * @work: Work structure contained within cxlflash associated with host.
+ *
+ * Handles the following events:
+ * - Link reset which cannot be performed on interrupt context due to
+ * blocking up to a few seconds
+ * - Read AFU command room
+ */
+static void cxlflash_worker_thread(struct work_struct *work)
+{
+ struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
+ work_q);
+ struct afu *afu = cfg->afu;
+ int port;
+ ulong lock_flags;
+
+ /* Avoid MMIO if the device has failed */
+
+ if (cfg->state != STATE_NORMAL)
+ return;
+
+ spin_lock_irqsave(cfg->host->host_lock, lock_flags);
+
+ if (cfg->lr_state == LINK_RESET_REQUIRED) {
+ port = cfg->lr_port;
+ if (port < 0)
+ pr_err("%s: invalid port index %d\n", __func__, port);
+ else {
+ spin_unlock_irqrestore(cfg->host->host_lock,
+ lock_flags);
+
+ /* The reset can block... */
+ afu_link_reset(afu, port,
+ &afu->afu_map->
+ global.fc_regs[port][0]);
+ spin_lock_irqsave(cfg->host->host_lock, lock_flags);
+ }
+
+ cfg->lr_state = LINK_RESET_COMPLETE;
+ }
+
+ if (afu->read_room) {
+ atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
+ afu->read_room = false;
+ }
+
+ spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
+}
+
+/**
+ * cxlflash_probe() - PCI entry point to add host
+ * @pdev: PCI device associated with the host.
+ * @dev_id: PCI device id associated with device.
+ *
+ * Return: 0 on success / non-zero on failure
+ */
+static int cxlflash_probe(struct pci_dev *pdev,
+ const struct pci_device_id *dev_id)
+{
+ struct Scsi_Host *host;
+ struct cxlflash_cfg *cfg = NULL;
+ struct device *phys_dev;
+ struct dev_dependent_vals *ddv;
+ int rc = 0;
+
+ dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
+ __func__, pdev->irq);
+
+ ddv = (struct dev_dependent_vals *)dev_id->driver_data;
+ driver_template.max_sectors = ddv->max_sectors;
+
+ host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
+ if (!host) {
+ dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
+ __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
+ host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
+ host->max_channel = NUM_FC_PORTS - 1;
+ host->unique_id = host->host_no;
+ host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
+
+ cfg = (struct cxlflash_cfg *)host->hostdata;
+ cfg->host = host;
+ rc = alloc_mem(cfg);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
+ __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ cfg->init_state = INIT_STATE_NONE;
+ cfg->dev = pdev;
+
+ /*
+ * The promoted LUNs move to the top of the LUN table. The rest stay
+ * on the bottom half. The bottom half grows from the end
+ * (index = 255), whereas the top half grows from the beginning
+ * (index = 0).
+ */
+ cfg->promote_lun_index = 0;
+ cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1;
+ cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
+
+ cfg->dev_id = (struct pci_device_id *)dev_id;
+ cfg->mcctx = NULL;
+
+ init_waitqueue_head(&cfg->tmf_waitq);
+ init_waitqueue_head(&cfg->limbo_waitq);
+
+ INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
+ cfg->lr_state = LINK_RESET_INVALID;
+ cfg->lr_port = -1;
+ mutex_init(&cfg->ctx_tbl_list_mutex);
+ mutex_init(&cfg->ctx_recovery_mutex);
+ INIT_LIST_HEAD(&cfg->ctx_err_recovery);
+ INIT_LIST_HEAD(&cfg->lluns);
+
+ pci_set_drvdata(pdev, cfg);
+
+ /* Use the special service provided to look up the physical
+ * PCI device, since we are called on the probe of the virtual
+ * PCI host bus (vphb)
+ */
+ phys_dev = cxl_get_phys_dev(pdev);
+ if (!dev_is_pci(phys_dev)) {
+ pr_err("%s: not a pci dev\n", __func__);
+ rc = -ENODEV;
+ goto out_remove;
+ }
+ cfg->parent_dev = to_pci_dev(phys_dev);
+
+ cfg->cxl_afu = cxl_pci_to_afu(pdev);
+
+ rc = init_pci(cfg);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: call to init_pci "
+ "failed rc=%d!\n", __func__, rc);
+ goto out_remove;
+ }
+ cfg->init_state = INIT_STATE_PCI;
+
+ rc = init_afu(cfg);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: call to init_afu "
+ "failed rc=%d!\n", __func__, rc);
+ goto out_remove;
+ }
+ cfg->init_state = INIT_STATE_AFU;
+
+
+ rc = init_scsi(cfg);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: call to init_scsi "
+ "failed rc=%d!\n", __func__, rc);
+ goto out_remove;
+ }
+ cfg->init_state = INIT_STATE_SCSI;
+
+out:
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+
+out_remove:
+ cxlflash_remove(pdev);
+ goto out;
+}
+
+/**
+ * cxlflash_pci_error_detected() - called when a PCI error is detected
+ * @pdev: PCI device struct.
+ * @state: PCI channel state.
+ *
+ * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
+ */
+static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ int rc = 0;
+ struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
+ struct device *dev = &cfg->dev->dev;
+
+ dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
+
+ switch (state) {
+ case pci_channel_io_frozen:
+ cfg->state = STATE_LIMBO;
+
+ /* Turn off legacy I/O */
+ scsi_block_requests(cfg->host);
+ rc = cxlflash_mark_contexts_error(cfg);
+ if (unlikely(rc))
+ dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
+ __func__, rc);
+ term_mc(cfg, UNDO_START);
+ stop_afu(cfg);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ cfg->state = STATE_FAILTERM;
+ wake_up_all(&cfg->limbo_waitq);
+ scsi_unblock_requests(cfg->host);
+ return PCI_ERS_RESULT_DISCONNECT;
+ default:
+ break;
+ }
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * cxlflash_pci_slot_reset() - called when PCI slot has been reset
+ * @pdev: PCI device struct.
+ *
+ * This routine is called by the pci error recovery code after the PCI
+ * slot has been reset, just before we should resume normal operations.
+ *
+ * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
+ */
+static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
+{
+ int rc = 0;
+ struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
+ struct device *dev = &cfg->dev->dev;
+
+ dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
+
+ rc = init_afu(cfg);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * cxlflash_pci_resume() - called when normal operation can resume
+ * @pdev: PCI device struct
+ */
+static void cxlflash_pci_resume(struct pci_dev *pdev)
+{
+ struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
+ struct device *dev = &cfg->dev->dev;
+
+ dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
+
+ cfg->state = STATE_NORMAL;
+ wake_up_all(&cfg->limbo_waitq);
+ scsi_unblock_requests(cfg->host);
+}
+
+static const struct pci_error_handlers cxlflash_err_handler = {
+ .error_detected = cxlflash_pci_error_detected,
+ .slot_reset = cxlflash_pci_slot_reset,
+ .resume = cxlflash_pci_resume,
+};
+
+/*
+ * PCI device structure
+ */
+static struct pci_driver cxlflash_driver = {
+ .name = CXLFLASH_NAME,
+ .id_table = cxlflash_pci_table,
+ .probe = cxlflash_probe,
+ .remove = cxlflash_remove,
+ .err_handler = &cxlflash_err_handler,
+};
+
+/**
+ * init_cxlflash() - module entry point
+ *
+ * Return: 0 on success / non-zero on failure
+ */
+static int __init init_cxlflash(void)
+{
+ pr_info("%s: IBM Power CXL Flash Adapter: %s\n",
+ __func__, CXLFLASH_DRIVER_DATE);
+
+ cxlflash_list_init();
+
+ return pci_register_driver(&cxlflash_driver);
+}
+
+/**
+ * exit_cxlflash() - module exit point
+ */
+static void __exit exit_cxlflash(void)
+{
+ cxlflash_term_global_luns();
+ cxlflash_free_errpage();
+
+ pci_unregister_driver(&cxlflash_driver);
+}
+
+module_init(init_cxlflash);
+module_exit(exit_cxlflash);
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
new file mode 100644
index 000000000000..cf0e80938b13
--- /dev/null
+++ b/drivers/scsi/cxlflash/main.h
@@ -0,0 +1,108 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
+ * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2015 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _CXLFLASH_MAIN_H
+#define _CXLFLASH_MAIN_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+
+#define CXLFLASH_NAME "cxlflash"
+#define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter"
+#define CXLFLASH_DRIVER_DATE "(August 13, 2015)"
+
+#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
+#define CXLFLASH_SUBS_DEV_ID 0x04F0
+
+/* Since there is only one target, make it 0 */
+#define CXLFLASH_TARGET 0
+#define CXLFLASH_MAX_CDB_LEN 16
+
+/* Really only one target per bus since the Texan is directly attached */
+#define CXLFLASH_MAX_NUM_TARGETS_PER_BUS 1
+#define CXLFLASH_MAX_NUM_LUNS_PER_TARGET 65536
+
+#define CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT (120 * HZ)
+
+#define NUM_FC_PORTS CXLFLASH_NUM_FC_PORTS /* ports per AFU */
+
+/* FC defines */
+#define FC_MTIP_CMDCONFIG 0x010
+#define FC_MTIP_STATUS 0x018
+
+#define FC_PNAME 0x300
+#define FC_CONFIG 0x320
+#define FC_CONFIG2 0x328
+#define FC_STATUS 0x330
+#define FC_ERROR 0x380
+#define FC_ERRCAP 0x388
+#define FC_ERRMSK 0x390
+#define FC_CNT_CRCERR 0x538
+#define FC_CRC_THRESH 0x580
+
+#define FC_MTIP_CMDCONFIG_ONLINE 0x20ULL
+#define FC_MTIP_CMDCONFIG_OFFLINE 0x40ULL
+
+#define FC_MTIP_STATUS_MASK 0x30ULL
+#define FC_MTIP_STATUS_ONLINE 0x20ULL
+#define FC_MTIP_STATUS_OFFLINE 0x10ULL
+
+/* TIMEOUT and RETRY definitions */
+
+/* AFU command timeout values */
+#define MC_AFU_SYNC_TIMEOUT 5 /* 5 secs */
+
+/* AFU command room retry limit */
+#define MC_ROOM_RETRY_CNT 10
+
+/* FC CRC clear periodic timer */
+#define MC_CRC_THRESH 100 /* threshold in 5 mins */
+
+#define FC_PORT_STATUS_RETRY_CNT 100 /* 100 100ms retries = 10 seconds */
+#define FC_PORT_STATUS_RETRY_INTERVAL_US 100000 /* microseconds */
+
+/* VPD defines */
+#define CXLFLASH_VPD_LEN 256
+#define WWPN_LEN 16
+#define WWPN_BUF_LEN (WWPN_LEN + 1)
+
+enum undo_level {
+ RELEASE_CONTEXT = 0,
+ FREE_IRQ,
+ UNMAP_ONE,
+ UNMAP_TWO,
+ UNMAP_THREE,
+ UNDO_START
+};
+
+struct dev_dependent_vals {
+ u64 max_sectors;
+};
+
+struct asyc_intr_info {
+ u64 status;
+ char *desc;
+ u8 port;
+ u8 action;
+#define CLR_FC_ERROR 0x01
+#define LINK_RESET 0x02
+};
+
+#ifndef CONFIG_CXL_EEH
+#define cxl_perst_reloads_same_image(_a, _b) do { } while (0)
+#endif
+
+#endif /* _CXLFLASH_MAIN_H */
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
new file mode 100644
index 000000000000..63bf394fe78c
--- /dev/null
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -0,0 +1,472 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
+ * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2015 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _SISLITE_H
+#define _SISLITE_H
+
+#include <linux/types.h>
+
+typedef u16 ctx_hndl_t;
+typedef u32 res_hndl_t;
+
+#define SIZE_4K 4096
+#define SIZE_64K 65536
+
+/*
+ * IOARCB: 64 bytes, min 16 byte alignment required, host native endianness
+ * except for SCSI CDB which remains big endian per SCSI standards.
+ */
+struct sisl_ioarcb {
+ u16 ctx_id; /* ctx_hndl_t */
+ u16 req_flags;
+#define SISL_REQ_FLAGS_RES_HNDL 0x8000U /* bit 0 (MSB) */
+#define SISL_REQ_FLAGS_PORT_LUN_ID 0x0000U
+
+#define SISL_REQ_FLAGS_SUP_UNDERRUN 0x4000U /* bit 1 */
+
+#define SISL_REQ_FLAGS_TIMEOUT_SECS 0x0000U /* bits 8,9 */
+#define SISL_REQ_FLAGS_TIMEOUT_MSECS 0x0040U
+#define SISL_REQ_FLAGS_TIMEOUT_USECS 0x0080U
+#define SISL_REQ_FLAGS_TIMEOUT_CYCLES 0x00C0U
+
+#define SISL_REQ_FLAGS_TMF_CMD 0x0004u /* bit 13 */
+
+#define SISL_REQ_FLAGS_AFU_CMD 0x0002U /* bit 14 */
+
+#define SISL_REQ_FLAGS_HOST_WRITE 0x0001U /* bit 15 (LSB) */
+#define SISL_REQ_FLAGS_HOST_READ 0x0000U
+
+ union {
+ u32 res_hndl; /* res_hndl_t */
+ u32 port_sel; /* this is a selection mask:
+ * 0x1 -> port#0 can be selected,
+ * 0x2 -> port#1 can be selected.
+ * Can be bitwise ORed.
+ */
+ };
+ u64 lun_id;
+ u32 data_len; /* 4K for read/write */
+ u32 ioadl_len;
+ union {
+ u64 data_ea; /* min 16 byte aligned */
+ u64 ioadl_ea;
+ };
+ u8 msi; /* LISN to send on RRQ write */
+#define SISL_MSI_CXL_PFAULT 0 /* reserved for CXL page faults */
+#define SISL_MSI_SYNC_ERROR 1 /* recommended for AFU sync error */
+#define SISL_MSI_RRQ_UPDATED 2 /* recommended for IO completion */
+#define SISL_MSI_ASYNC_ERROR 3 /* master only - for AFU async error */
+
+ u8 rrq; /* 0 for a single RRQ */
+ u16 timeout; /* in units specified by req_flags */
+ u32 rsvd1;
+ u8 cdb[16]; /* must be in big endian */
+ struct scsi_cmnd *scp;
+} __packed;
+
+struct sisl_rc {
+ u8 flags;
+#define SISL_RC_FLAGS_SENSE_VALID 0x80U
+#define SISL_RC_FLAGS_FCP_RSP_CODE_VALID 0x40U
+#define SISL_RC_FLAGS_OVERRUN 0x20U
+#define SISL_RC_FLAGS_UNDERRUN 0x10U
+
+ u8 afu_rc;
+#define SISL_AFU_RC_RHT_INVALID 0x01U /* user error */
+#define SISL_AFU_RC_RHT_UNALIGNED 0x02U /* should never happen */
+#define SISL_AFU_RC_RHT_OUT_OF_BOUNDS 0x03u /* user error */
+#define SISL_AFU_RC_RHT_DMA_ERR 0x04u /* see afu_extra
+ may retry if afu_retry is off
+ possible on master exit
+ */
+#define SISL_AFU_RC_RHT_RW_PERM 0x05u /* no RW perms, user error */
+#define SISL_AFU_RC_LXT_UNALIGNED 0x12U /* should never happen */
+#define SISL_AFU_RC_LXT_OUT_OF_BOUNDS 0x13u /* user error */
+#define SISL_AFU_RC_LXT_DMA_ERR 0x14u /* see afu_extra
+ may retry if afu_retry is off
+ possible on master exit
+ */
+#define SISL_AFU_RC_LXT_RW_PERM 0x15u /* no RW perms, user error */
+
+#define SISL_AFU_RC_NOT_XLATE_HOST 0x1au /* possible if master exited */
+
+ /* NO_CHANNELS means the FC ports selected by dest_port in
+ * IOARCB or in the LXT entry are down when the AFU tried to select
+ * a FC port. If the port went down on an active IO, it will set
+ * fc_rc to =0x54(NOLOGI) or 0x57(LINKDOWN) instead.
+ */
+#define SISL_AFU_RC_NO_CHANNELS 0x20U /* see afu_extra, may retry */
+#define SISL_AFU_RC_CAP_VIOLATION 0x21U /* either user error or
+ afu reset/master restart
+ */
+#define SISL_AFU_RC_OUT_OF_DATA_BUFS 0x30U /* always retry */
+#define SISL_AFU_RC_DATA_DMA_ERR 0x31U /* see afu_extra
+ may retry if afu_retry is off
+ */
+
+ u8 scsi_rc; /* SCSI status byte, retry as appropriate */
+#define SISL_SCSI_RC_CHECK 0x02U
+#define SISL_SCSI_RC_BUSY 0x08u
+
+ u8 fc_rc; /* retry */
+ /*
+ * We should only see fc_rc=0x57 (LINKDOWN) or 0x54(NOLOGI) for
+ * commands that are in flight when a link goes down or is logged out.
+ * If the link is down or logged out before AFU selects the port, either
+ * it will choose the other port or we will get afu_rc=0x20 (no_channel)
+ * if there is no valid port to use.
+ *
+ * ABORTPEND/ABORTOK/ABORTFAIL/TGTABORT can be retried, typically these
+ * would happen if a frame is dropped and something times out.
+ * NOLOGI or LINKDOWN can be retried if the other port is up.
+ * RESIDERR can be retried as well.
+ *
+ * ABORTFAIL might indicate that lots of frames are getting CRC errors.
+ * So it maybe retried once and reset the link if it happens again.
+ * The link can also be reset on the CRC error threshold interrupt.
+ */
+#define SISL_FC_RC_ABORTPEND 0x52 /* exchange timeout or abort request */
+#define SISL_FC_RC_WRABORTPEND 0x53 /* due to write XFER_RDY invalid */
+#define SISL_FC_RC_NOLOGI 0x54 /* port not logged in, in-flight cmds */
+#define SISL_FC_RC_NOEXP 0x55 /* FC protocol error or HW bug */
+#define SISL_FC_RC_INUSE 0x56 /* tag already in use, HW bug */
+#define SISL_FC_RC_LINKDOWN 0x57 /* link down, in-flight cmds */
+#define SISL_FC_RC_ABORTOK 0x58 /* pending abort completed w/success */
+#define SISL_FC_RC_ABORTFAIL 0x59 /* pending abort completed w/fail */
+#define SISL_FC_RC_RESID 0x5A /* ioasa underrun/overrun flags set */
+#define SISL_FC_RC_RESIDERR 0x5B /* actual data len does not match SCSI
+ reported len, possbly due to dropped
+ frames */
+#define SISL_FC_RC_TGTABORT 0x5C /* command aborted by target */
+};
+
+#define SISL_SENSE_DATA_LEN 20 /* Sense data length */
+
+/*
+ * IOASA: 64 bytes & must follow IOARCB, min 16 byte alignment required,
+ * host native endianness
+ */
+struct sisl_ioasa {
+ union {
+ struct sisl_rc rc;
+ u32 ioasc;
+#define SISL_IOASC_GOOD_COMPLETION 0x00000000U
+ };
+ u32 resid;
+ u8 port;
+ u8 afu_extra;
+ /* when afu_rc=0x04, 0x14, 0x31 (_xxx_DMA_ERR):
+ * afu_exta contains PSL response code. Useful codes are:
+ */
+#define SISL_AFU_DMA_ERR_PAGE_IN 0x0A /* AFU_retry_on_pagein Action
+ * Enabled N/A
+ * Disabled retry
+ */
+#define SISL_AFU_DMA_ERR_INVALID_EA 0x0B /* this is a hard error
+ * afu_rc Implies
+ * 0x04, 0x14 master exit.
+ * 0x31 user error.
+ */
+ /* when afu rc=0x20 (no channels):
+ * afu_extra bits [4:5]: available portmask, [6:7]: requested portmask.
+ */
+#define SISL_AFU_NO_CLANNELS_AMASK(afu_extra) (((afu_extra) & 0x0C) >> 2)
+#define SISL_AFU_NO_CLANNELS_RMASK(afu_extra) ((afu_extra) & 0x03)
+
+ u8 scsi_extra;
+ u8 fc_extra;
+ u8 sense_data[SISL_SENSE_DATA_LEN];
+
+ /* These fields are defined by the SISlite architecture for the
+ * host to use as they see fit for their implementation.
+ */
+ union {
+ u64 host_use[4];
+ u8 host_use_b[32];
+ };
+} __packed;
+
+#define SISL_RESP_HANDLE_T_BIT 0x1ULL /* Toggle bit */
+
+/* MMIO space is required to support only 64-bit access */
+
+/*
+ * This AFU has two mechanisms to deal with endian-ness.
+ * One is a global configuration (in the afu_config) register
+ * below that specifies the endian-ness of the host.
+ * The other is a per context (i.e. application) specification
+ * controlled by the endian_ctrl field here. Since the master
+ * context is one such application the master context's
+ * endian-ness is set to be the same as the host.
+ *
+ * As per the SISlite spec, the MMIO registers are always
+ * big endian.
+ */
+#define SISL_ENDIAN_CTRL_BE 0x8000000000000080ULL
+#define SISL_ENDIAN_CTRL_LE 0x0000000000000000ULL
+
+#ifdef __BIG_ENDIAN
+#define SISL_ENDIAN_CTRL SISL_ENDIAN_CTRL_BE
+#else
+#define SISL_ENDIAN_CTRL SISL_ENDIAN_CTRL_LE
+#endif
+
+/* per context host transport MMIO */
+struct sisl_host_map {
+ __be64 endian_ctrl; /* Per context Endian Control. The AFU will
+ * operate on whatever the context is of the
+ * host application.
+ */
+
+ __be64 intr_status; /* this sends LISN# programmed in ctx_ctrl.
+ * Only recovery in a PERM_ERR is a context
+ * exit since there is no way to tell which
+ * command caused the error.
+ */
+#define SISL_ISTATUS_PERM_ERR_CMDROOM 0x0010ULL /* b59, user error */
+#define SISL_ISTATUS_PERM_ERR_RCB_READ 0x0008ULL /* b60, user error */
+#define SISL_ISTATUS_PERM_ERR_SA_WRITE 0x0004ULL /* b61, user error */
+#define SISL_ISTATUS_PERM_ERR_RRQ_WRITE 0x0002ULL /* b62, user error */
+ /* Page in wait accessing RCB/IOASA/RRQ is reported in b63.
+ * Same error in data/LXT/RHT access is reported via IOASA.
+ */
+#define SISL_ISTATUS_TEMP_ERR_PAGEIN 0x0001ULL /* b63, can be generated
+ * only when AFU auto
+ * retry is disabled.
+ * If user can determine
+ * the command that
+ * caused the error, it
+ * can be retried.
+ */
+#define SISL_ISTATUS_UNMASK (0x001FULL) /* 1 means unmasked */
+#define SISL_ISTATUS_MASK ~(SISL_ISTATUS_UNMASK) /* 1 means masked */
+
+ __be64 intr_clear;
+ __be64 intr_mask;
+ __be64 ioarrin; /* only write what cmd_room permits */
+ __be64 rrq_start; /* start & end are both inclusive */
+ __be64 rrq_end; /* write sequence: start followed by end */
+ __be64 cmd_room;
+ __be64 ctx_ctrl; /* least signiifcant byte or b56:63 is LISN# */
+ __be64 mbox_w; /* restricted use */
+};
+
+/* per context provisioning & control MMIO */
+struct sisl_ctrl_map {
+ __be64 rht_start;
+ __be64 rht_cnt_id;
+ /* both cnt & ctx_id args must be ULL */
+#define SISL_RHT_CNT_ID(cnt, ctx_id) (((cnt) << 48) | ((ctx_id) << 32))
+
+ __be64 ctx_cap; /* afu_rc below is when the capability is violated */
+#define SISL_CTX_CAP_PROXY_ISSUE 0x8000000000000000ULL /* afu_rc 0x21 */
+#define SISL_CTX_CAP_REAL_MODE 0x4000000000000000ULL /* afu_rc 0x21 */
+#define SISL_CTX_CAP_HOST_XLATE 0x2000000000000000ULL /* afu_rc 0x1a */
+#define SISL_CTX_CAP_PROXY_TARGET 0x1000000000000000ULL /* afu_rc 0x21 */
+#define SISL_CTX_CAP_AFU_CMD 0x0000000000000008ULL /* afu_rc 0x21 */
+#define SISL_CTX_CAP_GSCSI_CMD 0x0000000000000004ULL /* afu_rc 0x21 */
+#define SISL_CTX_CAP_WRITE_CMD 0x0000000000000002ULL /* afu_rc 0x21 */
+#define SISL_CTX_CAP_READ_CMD 0x0000000000000001ULL /* afu_rc 0x21 */
+ __be64 mbox_r;
+};
+
+/* single copy global regs */
+struct sisl_global_regs {
+ __be64 aintr_status;
+ /* In cxlflash, each FC port/link gets a byte of status */
+#define SISL_ASTATUS_FC0_OTHER 0x8000ULL /* b48, other err,
+ FC_ERRCAP[31:20] */
+#define SISL_ASTATUS_FC0_LOGO 0x4000ULL /* b49, target sent FLOGI/PLOGI/LOGO
+ while logged in */
+#define SISL_ASTATUS_FC0_CRC_T 0x2000ULL /* b50, CRC threshold exceeded */
+#define SISL_ASTATUS_FC0_LOGI_R 0x1000ULL /* b51, login state mechine timed out
+ and retrying */
+#define SISL_ASTATUS_FC0_LOGI_F 0x0800ULL /* b52, login failed,
+ FC_ERROR[19:0] */
+#define SISL_ASTATUS_FC0_LOGI_S 0x0400ULL /* b53, login succeeded */
+#define SISL_ASTATUS_FC0_LINK_DN 0x0200ULL /* b54, link online to offline */
+#define SISL_ASTATUS_FC0_LINK_UP 0x0100ULL /* b55, link offline to online */
+
+#define SISL_ASTATUS_FC1_OTHER 0x0080ULL /* b56 */
+#define SISL_ASTATUS_FC1_LOGO 0x0040ULL /* b57 */
+#define SISL_ASTATUS_FC1_CRC_T 0x0020ULL /* b58 */
+#define SISL_ASTATUS_FC1_LOGI_R 0x0010ULL /* b59 */
+#define SISL_ASTATUS_FC1_LOGI_F 0x0008ULL /* b60 */
+#define SISL_ASTATUS_FC1_LOGI_S 0x0004ULL /* b61 */
+#define SISL_ASTATUS_FC1_LINK_DN 0x0002ULL /* b62 */
+#define SISL_ASTATUS_FC1_LINK_UP 0x0001ULL /* b63 */
+
+#define SISL_FC_INTERNAL_UNMASK 0x0000000300000000ULL /* 1 means unmasked */
+#define SISL_FC_INTERNAL_MASK ~(SISL_FC_INTERNAL_UNMASK)
+#define SISL_FC_INTERNAL_SHIFT 32
+
+#define SISL_ASTATUS_UNMASK 0xFFFFULL /* 1 means unmasked */
+#define SISL_ASTATUS_MASK ~(SISL_ASTATUS_UNMASK) /* 1 means masked */
+
+ __be64 aintr_clear;
+ __be64 aintr_mask;
+ __be64 afu_ctrl;
+ __be64 afu_hb;
+ __be64 afu_scratch_pad;
+ __be64 afu_port_sel;
+#define SISL_AFUCONF_AR_IOARCB 0x4000ULL
+#define SISL_AFUCONF_AR_LXT 0x2000ULL
+#define SISL_AFUCONF_AR_RHT 0x1000ULL
+#define SISL_AFUCONF_AR_DATA 0x0800ULL
+#define SISL_AFUCONF_AR_RSRC 0x0400ULL
+#define SISL_AFUCONF_AR_IOASA 0x0200ULL
+#define SISL_AFUCONF_AR_RRQ 0x0100ULL
+/* Aggregate all Auto Retry Bits */
+#define SISL_AFUCONF_AR_ALL (SISL_AFUCONF_AR_IOARCB|SISL_AFUCONF_AR_LXT| \
+ SISL_AFUCONF_AR_RHT|SISL_AFUCONF_AR_DATA| \
+ SISL_AFUCONF_AR_RSRC|SISL_AFUCONF_AR_IOASA| \
+ SISL_AFUCONF_AR_RRQ)
+#ifdef __BIG_ENDIAN
+#define SISL_AFUCONF_ENDIAN 0x0000ULL
+#else
+#define SISL_AFUCONF_ENDIAN 0x0020ULL
+#endif
+#define SISL_AFUCONF_MBOX_CLR_READ 0x0010ULL
+ __be64 afu_config;
+ __be64 rsvd[0xf8];
+ __be64 afu_version;
+ __be64 interface_version;
+};
+
+#define CXLFLASH_NUM_FC_PORTS 2
+#define CXLFLASH_MAX_CONTEXT 512 /* how many contexts per afu */
+#define CXLFLASH_NUM_VLUNS 512
+
+struct sisl_global_map {
+ union {
+ struct sisl_global_regs regs;
+ char page0[SIZE_4K]; /* page 0 */
+ };
+
+ char page1[SIZE_4K]; /* page 1 */
+
+ /* pages 2 & 3 */
+ __be64 fc_regs[CXLFLASH_NUM_FC_PORTS][CXLFLASH_NUM_VLUNS];
+
+ /* pages 4 & 5 (lun tbl) */
+ __be64 fc_port[CXLFLASH_NUM_FC_PORTS][CXLFLASH_NUM_VLUNS];
+
+};
+
+/*
+ * CXL Flash Memory Map
+ *
+ * +-------------------------------+
+ * | 512 * 64 KB User MMIO |
+ * | (per context) |
+ * | User Accessible |
+ * +-------------------------------+
+ * | 512 * 128 B per context |
+ * | Provisioning and Control |
+ * | Trusted Process accessible |
+ * +-------------------------------+
+ * | 64 KB Global |
+ * | Trusted Process accessible |
+ * +-------------------------------+
+*/
+struct cxlflash_afu_map {
+ union {
+ struct sisl_host_map host;
+ char harea[SIZE_64K]; /* 64KB each */
+ } hosts[CXLFLASH_MAX_CONTEXT];
+
+ union {
+ struct sisl_ctrl_map ctrl;
+ char carea[cache_line_size()]; /* 128B each */
+ } ctrls[CXLFLASH_MAX_CONTEXT];
+
+ union {
+ struct sisl_global_map global;
+ char garea[SIZE_64K]; /* 64KB single block */
+ };
+};
+
+/*
+ * LXT - LBA Translation Table
+ * LXT control blocks
+ */
+struct sisl_lxt_entry {
+ u64 rlba_base; /* bits 0:47 is base
+ * b48:55 is lun index
+ * b58:59 is write & read perms
+ * (if no perm, afu_rc=0x15)
+ * b60:63 is port_sel mask
+ */
+};
+
+/*
+ * RHT - Resource Handle Table
+ * Per the SISlite spec, RHT entries are to be 16-byte aligned
+ */
+struct sisl_rht_entry {
+ struct sisl_lxt_entry *lxt_start;
+ u32 lxt_cnt;
+ u16 rsvd;
+ u8 fp; /* format & perm nibbles.
+ * (if no perm, afu_rc=0x05)
+ */
+ u8 nmask;
+} __packed __aligned(16);
+
+struct sisl_rht_entry_f1 {
+ u64 lun_id;
+ union {
+ struct {
+ u8 valid;
+ u8 rsvd[5];
+ u8 fp;
+ u8 port_sel;
+ };
+
+ u64 dw;
+ };
+} __packed __aligned(16);
+
+/* make the fp byte */
+#define SISL_RHT_FP(fmt, perm) (((fmt) << 4) | (perm))
+
+/* make the fp byte for a clone from a source fp and clone flags
+ * flags must be only 2 LSB bits.
+ */
+#define SISL_RHT_FP_CLONE(src_fp, cln_flags) ((src_fp) & (0xFC | (cln_flags)))
+
+#define RHT_PERM_READ 0x01U
+#define RHT_PERM_WRITE 0x02U
+#define RHT_PERM_RW (RHT_PERM_READ | RHT_PERM_WRITE)
+
+/* extract the perm bits from a fp */
+#define SISL_RHT_PERM(fp) ((fp) & RHT_PERM_RW)
+
+#define PORT0 0x01U
+#define PORT1 0x02U
+#define BOTH_PORTS (PORT0 | PORT1)
+
+/* AFU Sync Mode byte */
+#define AFU_LW_SYNC 0x0U
+#define AFU_HW_SYNC 0x1U
+#define AFU_GSYNC 0x2U
+
+/* Special Task Management Function CDB */
+#define TMF_LUN_RESET 0x1U
+#define TMF_CLEAR_ACA 0x2U
+
+
+#define SISLITE_MAX_WS_BLOCKS 512
+
+#endif /* _SISLITE_H */
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
new file mode 100644
index 000000000000..f1b62cea75b1
--- /dev/null
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -0,0 +1,2084 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
+ * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2015 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/file.h>
+#include <linux/syscalls.h>
+#include <misc/cxl.h>
+#include <asm/unaligned.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_eh.h>
+#include <uapi/scsi/cxlflash_ioctl.h>
+
+#include "sislite.h"
+#include "common.h"
+#include "vlun.h"
+#include "superpipe.h"
+
+struct cxlflash_global global;
+
+/**
+ * marshal_rele_to_resize() - translate release to resize structure
+ * @rele: Source structure from which to translate/copy.
+ * @resize: Destination structure for the translate/copy.
+ */
+static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
+ struct dk_cxlflash_resize *resize)
+{
+ resize->hdr = release->hdr;
+ resize->context_id = release->context_id;
+ resize->rsrc_handle = release->rsrc_handle;
+}
+
+/**
+ * marshal_det_to_rele() - translate detach to release structure
+ * @detach: Destination structure for the translate/copy.
+ * @rele: Source structure from which to translate/copy.
+ */
+static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
+ struct dk_cxlflash_release *release)
+{
+ release->hdr = detach->hdr;
+ release->context_id = detach->context_id;
+}
+
+/**
+ * cxlflash_free_errpage() - frees resources associated with global error page
+ */
+void cxlflash_free_errpage(void)
+{
+
+ mutex_lock(&global.mutex);
+ if (global.err_page) {
+ __free_page(global.err_page);
+ global.err_page = NULL;
+ }
+ mutex_unlock(&global.mutex);
+}
+
+/**
+ * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts
+ * @cfg: Internal structure associated with the host.
+ *
+ * When the host needs to go down, all users must be quiesced and their
+ * memory freed. This is accomplished by putting the contexts in error
+ * state which will notify the user and let them 'drive' the tear-down.
+ * Meanwhile, this routine camps until all user contexts have been removed.
+ */
+void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
+{
+ struct device *dev = &cfg->dev->dev;
+ int i, found;
+
+ cxlflash_mark_contexts_error(cfg);
+
+ while (true) {
+ found = false;
+
+ for (i = 0; i < MAX_CONTEXT; i++)
+ if (cfg->ctx_tbl[i]) {
+ found = true;
+ break;
+ }
+
+ if (!found && list_empty(&cfg->ctx_err_recovery))
+ return;
+
+ dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
+ __func__);
+ wake_up_all(&cfg->limbo_waitq);
+ ssleep(1);
+ }
+}
+
+/**
+ * find_error_context() - locates a context by cookie on the error recovery list
+ * @cfg: Internal structure associated with the host.
+ * @rctxid: Desired context by id.
+ * @file: Desired context by file.
+ *
+ * Return: Found context on success, NULL on failure
+ */
+static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid,
+ struct file *file)
+{
+ struct ctx_info *ctxi;
+
+ list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list)
+ if ((ctxi->ctxid == rctxid) || (ctxi->file == file))
+ return ctxi;
+
+ return NULL;
+}
+
+/**
+ * get_context() - obtains a validated and locked context reference
+ * @cfg: Internal structure associated with the host.
+ * @rctxid: Desired context (raw, un-decoded format).
+ * @arg: LUN information or file associated with request.
+ * @ctx_ctrl: Control information to 'steer' desired lookup.
+ *
+ * NOTE: despite the name pid, in linux, current->pid actually refers
+ * to the lightweight process id (tid) and can change if the process is
+ * multi threaded. The tgid remains constant for the process and only changes
+ * when the process of fork. For all intents and purposes, think of tgid
+ * as a pid in the traditional sense.
+ *
+ * Return: Validated context on success, NULL on failure
+ */
+struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
+ void *arg, enum ctx_ctrl ctx_ctrl)
+{
+ struct device *dev = &cfg->dev->dev;
+ struct ctx_info *ctxi = NULL;
+ struct lun_access *lun_access = NULL;
+ struct file *file = NULL;
+ struct llun_info *lli = arg;
+ u64 ctxid = DECODE_CTXID(rctxid);
+ int rc;
+ pid_t pid = current->tgid, ctxpid = 0;
+
+ if (ctx_ctrl & CTX_CTRL_FILE) {
+ lli = NULL;
+ file = (struct file *)arg;
+ }
+
+ if (ctx_ctrl & CTX_CTRL_CLONE)
+ pid = current->parent->tgid;
+
+ if (likely(ctxid < MAX_CONTEXT)) {
+ while (true) {
+ rc = mutex_lock_interruptible(&cfg->ctx_tbl_list_mutex);
+ if (rc)
+ goto out;
+
+ ctxi = cfg->ctx_tbl[ctxid];
+ if (ctxi)
+ if ((file && (ctxi->file != file)) ||
+ (!file && (ctxi->ctxid != rctxid)))
+ ctxi = NULL;
+
+ if ((ctx_ctrl & CTX_CTRL_ERR) ||
+ (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK)))
+ ctxi = find_error_context(cfg, rctxid, file);
+ if (!ctxi) {
+ mutex_unlock(&cfg->ctx_tbl_list_mutex);
+ goto out;
+ }
+
+ /*
+ * Need to acquire ownership of the context while still
+ * under the table/list lock to serialize with a remove
+ * thread. Use the 'try' to avoid stalling the
+ * table/list lock for a single context.
+ *
+ * Note that the lock order is:
+ *
+ * cfg->ctx_tbl_list_mutex -> ctxi->mutex
+ *
+ * Therefore release ctx_tbl_list_mutex before retrying.
+ */
+ rc = mutex_trylock(&ctxi->mutex);
+ mutex_unlock(&cfg->ctx_tbl_list_mutex);
+ if (rc)
+ break; /* got the context's lock! */
+ }
+
+ if (ctxi->unavail)
+ goto denied;
+
+ ctxpid = ctxi->pid;
+ if (likely(!(ctx_ctrl & CTX_CTRL_NOPID)))
+ if (pid != ctxpid)
+ goto denied;
+
+ if (lli) {
+ list_for_each_entry(lun_access, &ctxi->luns, list)
+ if (lun_access->lli == lli)
+ goto out;
+ goto denied;
+ }
+ }
+
+out:
+ dev_dbg(dev, "%s: rctxid=%016llX ctxinfo=%p ctxpid=%u pid=%u "
+ "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
+ ctx_ctrl);
+
+ return ctxi;
+
+denied:
+ mutex_unlock(&ctxi->mutex);
+ ctxi = NULL;
+ goto out;
+}
+
+/**
+ * put_context() - release a context that was retrieved from get_context()
+ * @ctxi: Context to release.
+ *
+ * For now, releasing the context equates to unlocking it's mutex.
+ */
+void put_context(struct ctx_info *ctxi)
+{
+ mutex_unlock(&ctxi->mutex);
+}
+
+/**
+ * afu_attach() - attach a context to the AFU
+ * @cfg: Internal structure associated with the host.
+ * @ctxi: Context to attach.
+ *
+ * Upon setting the context capabilities, they must be confirmed with
+ * a read back operation as the context might have been closed since
+ * the mailbox was unlocked. When this occurs, registration is failed.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
+{
+ struct device *dev = &cfg->dev->dev;
+ struct afu *afu = cfg->afu;
+ struct sisl_ctrl_map *ctrl_map = ctxi->ctrl_map;
+ int rc = 0;
+ u64 val;
+
+ /* Unlock cap and restrict user to read/write cmds in translated mode */
+ readq_be(&ctrl_map->mbox_r);
+ val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD);
+ writeq_be(val, &ctrl_map->ctx_cap);
+ val = readq_be(&ctrl_map->ctx_cap);
+ if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
+ dev_err(dev, "%s: ctx may be closed val=%016llX\n",
+ __func__, val);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ /* Set up MMIO registers pointing to the RHT */
+ writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
+ val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(afu->ctx_hndl));
+ writeq_be(val, &ctrl_map->rht_cnt_id);
+out:
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * read_cap16() - issues a SCSI READ_CAP16 command
+ * @sdev: SCSI device associated with LUN.
+ * @lli: LUN destined for capacity request.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
+{
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct device *dev = &cfg->dev->dev;
+ struct glun_info *gli = lli->parent;
+ u8 *cmd_buf = NULL;
+ u8 *scsi_cmd = NULL;
+ u8 *sense_buf = NULL;
+ int rc = 0;
+ int result = 0;
+ int retry_cnt = 0;
+ u32 tout = (MC_DISCOVERY_TIMEOUT * HZ);
+
+retry:
+ cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
+ scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
+ sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+ if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ scsi_cmd[0] = SERVICE_ACTION_IN_16; /* read cap(16) */
+ scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */
+ put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
+
+ dev_dbg(dev, "%s: %ssending cmd(0x%x)\n", __func__,
+ retry_cnt ? "re" : "", scsi_cmd[0]);
+
+ result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
+ CMD_BUFSIZE, sense_buf, tout, 5, 0, NULL);
+
+ if (driver_byte(result) == DRIVER_SENSE) {
+ result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
+ if (result & SAM_STAT_CHECK_CONDITION) {
+ struct scsi_sense_hdr sshdr;
+
+ scsi_normalize_sense(sense_buf, SCSI_SENSE_BUFFERSIZE,
+ &sshdr);
+ switch (sshdr.sense_key) {
+ case NO_SENSE:
+ case RECOVERED_ERROR:
+ /* fall through */
+ case NOT_READY:
+ result &= ~SAM_STAT_CHECK_CONDITION;
+ break;
+ case UNIT_ATTENTION:
+ switch (sshdr.asc) {
+ case 0x29: /* Power on Reset or Device Reset */
+ /* fall through */
+ case 0x2A: /* Device capacity changed */
+ case 0x3F: /* Report LUNs changed */
+ /* Retry the command once more */
+ if (retry_cnt++ < 1) {
+ kfree(cmd_buf);
+ kfree(scsi_cmd);
+ kfree(sense_buf);
+ goto retry;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ if (result) {
+ dev_err(dev, "%s: command failed, result=0x%x\n",
+ __func__, result);
+ rc = -EIO;
+ goto out;
+ }
+
+ /*
+ * Read cap was successful, grab values from the buffer;
+ * note that we don't need to worry about unaligned access
+ * as the buffer is allocated on an aligned boundary.
+ */
+ mutex_lock(&gli->mutex);
+ gli->max_lba = be64_to_cpu(*((u64 *)&cmd_buf[0]));
+ gli->blk_len = be32_to_cpu(*((u32 *)&cmd_buf[8]));
+ mutex_unlock(&gli->mutex);
+
+out:
+ kfree(cmd_buf);
+ kfree(scsi_cmd);
+ kfree(sense_buf);
+
+ dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
+ __func__, gli->max_lba, gli->blk_len, rc);
+ return rc;
+}
+
+/**
+ * get_rhte() - obtains validated resource handle table entry reference
+ * @ctxi: Context owning the resource handle.
+ * @rhndl: Resource handle associated with entry.
+ * @lli: LUN associated with request.
+ *
+ * Return: Validated RHTE on success, NULL on failure
+ */
+struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
+ struct llun_info *lli)
+{
+ struct sisl_rht_entry *rhte = NULL;
+
+ if (unlikely(!ctxi->rht_start)) {
+ pr_debug("%s: Context does not have allocated RHT!\n",
+ __func__);
+ goto out;
+ }
+
+ if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
+ pr_debug("%s: Bad resource handle! (%d)\n", __func__, rhndl);
+ goto out;
+ }
+
+ if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
+ pr_debug("%s: Bad resource handle LUN! (%d)\n",
+ __func__, rhndl);
+ goto out;
+ }
+
+ rhte = &ctxi->rht_start[rhndl];
+ if (unlikely(rhte->nmask == 0)) {
+ pr_debug("%s: Unopened resource handle! (%d)\n",
+ __func__, rhndl);
+ rhte = NULL;
+ goto out;
+ }
+
+out:
+ return rhte;
+}
+
+/**
+ * rhte_checkout() - obtains free/empty resource handle table entry
+ * @ctxi: Context owning the resource handle.
+ * @lli: LUN associated with request.
+ *
+ * Return: Free RHTE on success, NULL on failure
+ */
+struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
+ struct llun_info *lli)
+{
+ struct sisl_rht_entry *rhte = NULL;
+ int i;
+
+ /* Find a free RHT entry */
+ for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
+ if (ctxi->rht_start[i].nmask == 0) {
+ rhte = &ctxi->rht_start[i];
+ ctxi->rht_out++;
+ break;
+ }
+
+ if (likely(rhte))
+ ctxi->rht_lun[i] = lli;
+
+ pr_debug("%s: returning rhte=%p (%d)\n", __func__, rhte, i);
+ return rhte;
+}
+
+/**
+ * rhte_checkin() - releases a resource handle table entry
+ * @ctxi: Context owning the resource handle.
+ * @rhte: RHTE to release.
+ */
+void rhte_checkin(struct ctx_info *ctxi,
+ struct sisl_rht_entry *rhte)
+{
+ u32 rsrc_handle = rhte - ctxi->rht_start;
+
+ rhte->nmask = 0;
+ rhte->fp = 0;
+ ctxi->rht_out--;
+ ctxi->rht_lun[rsrc_handle] = NULL;
+ ctxi->rht_needs_ws[rsrc_handle] = false;
+}
+
+/**
+ * rhte_format1() - populates a RHTE for format 1
+ * @rhte: RHTE to populate.
+ * @lun_id: LUN ID of LUN associated with RHTE.
+ * @perm: Desired permissions for RHTE.
+ * @port_sel: Port selection mask
+ */
+static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm,
+ u32 port_sel)
+{
+ /*
+ * Populate the Format 1 RHT entry for direct access (physical
+ * LUN) using the synchronization sequence defined in the
+ * SISLite specification.
+ */
+ struct sisl_rht_entry_f1 dummy = { 0 };
+ struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
+
+ memset(rhte_f1, 0, sizeof(*rhte_f1));
+ rhte_f1->fp = SISL_RHT_FP(1U, 0);
+ dma_wmb(); /* Make setting of format bit visible */
+
+ rhte_f1->lun_id = lun_id;
+ dma_wmb(); /* Make setting of LUN id visible */
+
+ /*
+ * Use a dummy RHT Format 1 entry to build the second dword
+ * of the entry that must be populated in a single write when
+ * enabled (valid bit set to TRUE).
+ */
+ dummy.valid = 0x80;
+ dummy.fp = SISL_RHT_FP(1U, perm);
+ dummy.port_sel = port_sel;
+ rhte_f1->dw = dummy.dw;
+
+ dma_wmb(); /* Make remaining RHT entry fields visible */
+}
+
+/**
+ * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode
+ * @gli: LUN to attach.
+ * @mode: Desired mode of the LUN.
+ * @locked: Mutex status on current thread.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
+{
+ int rc = 0;
+
+ if (!locked)
+ mutex_lock(&gli->mutex);
+
+ if (gli->mode == MODE_NONE)
+ gli->mode = mode;
+ else if (gli->mode != mode) {
+ pr_debug("%s: LUN operating in mode %d, requested mode %d\n",
+ __func__, gli->mode, mode);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ gli->users++;
+ WARN_ON(gli->users <= 0);
+out:
+ pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
+ __func__, rc, gli->mode, gli->users);
+ if (!locked)
+ mutex_unlock(&gli->mutex);
+ return rc;
+}
+
+/**
+ * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode
+ * @gli: LUN to detach.
+ *
+ * When resetting the mode, terminate block allocation resources as they
+ * are no longer required (service is safe to call even when block allocation
+ * resources were not present - such as when transitioning from physical mode).
+ * These resources will be reallocated when needed (subsequent transition to
+ * virtual mode).
+ */
+void cxlflash_lun_detach(struct glun_info *gli)
+{
+ mutex_lock(&gli->mutex);
+ WARN_ON(gli->mode == MODE_NONE);
+ if (--gli->users == 0) {
+ gli->mode = MODE_NONE;
+ cxlflash_ba_terminate(&gli->blka.ba_lun);
+ }
+ pr_debug("%s: gli->users=%u\n", __func__, gli->users);
+ WARN_ON(gli->users < 0);
+ mutex_unlock(&gli->mutex);
+}
+
+/**
+ * _cxlflash_disk_release() - releases the specified resource entry
+ * @sdev: SCSI device associated with LUN.
+ * @ctxi: Context owning resources.
+ * @release: Release ioctl data structure.
+ *
+ * For LUNs in virtual mode, the virtual LUN associated with the specified
+ * resource handle is resized to 0 prior to releasing the RHTE. Note that the
+ * AFU sync should _not_ be performed when the context is sitting on the error
+ * recovery list. A context on the error recovery list is not known to the AFU
+ * due to reset. When the context is recovered, it will be reattached and made
+ * known again to the AFU.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int _cxlflash_disk_release(struct scsi_device *sdev,
+ struct ctx_info *ctxi,
+ struct dk_cxlflash_release *release)
+{
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct device *dev = &cfg->dev->dev;
+ struct llun_info *lli = sdev->hostdata;
+ struct glun_info *gli = lli->parent;
+ struct afu *afu = cfg->afu;
+ bool put_ctx = false;
+
+ struct dk_cxlflash_resize size;
+ res_hndl_t rhndl = release->rsrc_handle;
+
+ int rc = 0;
+ u64 ctxid = DECODE_CTXID(release->context_id),
+ rctxid = release->context_id;
+
+ struct sisl_rht_entry *rhte;
+ struct sisl_rht_entry_f1 *rhte_f1;
+
+ dev_dbg(dev, "%s: ctxid=%llu rhndl=0x%llx gli->mode=%u gli->users=%u\n",
+ __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
+
+ if (!ctxi) {
+ ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
+ if (unlikely(!ctxi)) {
+ dev_dbg(dev, "%s: Bad context! (%llu)\n",
+ __func__, ctxid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ put_ctx = true;
+ }
+
+ rhte = get_rhte(ctxi, rhndl, lli);
+ if (unlikely(!rhte)) {
+ dev_dbg(dev, "%s: Bad resource handle! (%d)\n",
+ __func__, rhndl);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Resize to 0 for virtual LUNS by setting the size
+ * to 0. This will clear LXT_START and LXT_CNT fields
+ * in the RHT entry and properly sync with the AFU.
+ *
+ * Afterwards we clear the remaining fields.
+ */
+ switch (gli->mode) {
+ case MODE_VIRTUAL:
+ marshal_rele_to_resize(release, &size);
+ size.req_size = 0;
+ rc = _cxlflash_vlun_resize(sdev, ctxi, &size);
+ if (rc) {
+ dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc);
+ goto out;
+ }
+
+ break;
+ case MODE_PHYSICAL:
+ /*
+ * Clear the Format 1 RHT entry for direct access
+ * (physical LUN) using the synchronization sequence
+ * defined in the SISLite specification.
+ */
+ rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
+
+ rhte_f1->valid = 0;
+ dma_wmb(); /* Make revocation of RHT entry visible */
+
+ rhte_f1->lun_id = 0;
+ dma_wmb(); /* Make clearing of LUN id visible */
+
+ rhte_f1->dw = 0;
+ dma_wmb(); /* Make RHT entry bottom-half clearing visible */
+
+ if (!ctxi->err_recovery_active)
+ cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
+ break;
+ default:
+ WARN(1, "Unsupported LUN mode!");
+ goto out;
+ }
+
+ rhte_checkin(ctxi, rhte);
+ cxlflash_lun_detach(gli);
+
+out:
+ if (put_ctx)
+ put_context(ctxi);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int cxlflash_disk_release(struct scsi_device *sdev,
+ struct dk_cxlflash_release *release)
+{
+ return _cxlflash_disk_release(sdev, NULL, release);
+}
+
+/**
+ * destroy_context() - releases a context
+ * @cfg: Internal structure associated with the host.
+ * @ctxi: Context to release.
+ *
+ * Note that the rht_lun member of the context was cut from a single
+ * allocation when the context was created and therefore does not need
+ * to be explicitly freed. Also note that we conditionally check for the
+ * existence of the context control map before clearing the RHT registers
+ * and context capabilities because it is possible to destroy a context
+ * while the context is in the error state (previous mapping was removed
+ * [so we don't have to worry about clearing] and context is waiting for
+ * a new mapping).
+ */
+static void destroy_context(struct cxlflash_cfg *cfg,
+ struct ctx_info *ctxi)
+{
+ struct afu *afu = cfg->afu;
+
+ WARN_ON(!list_empty(&ctxi->luns));
+
+ /* Clear RHT registers and drop all capabilities for this context */
+ if (afu->afu_map && ctxi->ctrl_map) {
+ writeq_be(0, &ctxi->ctrl_map->rht_start);
+ writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
+ writeq_be(0, &ctxi->ctrl_map->ctx_cap);
+ }
+
+ /* Free memory associated with context */
+ free_page((ulong)ctxi->rht_start);
+ kfree(ctxi->rht_needs_ws);
+ kfree(ctxi->rht_lun);
+ kfree(ctxi);
+ atomic_dec_if_positive(&cfg->num_user_contexts);
+}
+
+/**
+ * create_context() - allocates and initializes a context
+ * @cfg: Internal structure associated with the host.
+ * @ctx: Previously obtained CXL context reference.
+ * @ctxid: Previously obtained process element associated with CXL context.
+ * @adap_fd: Previously obtained adapter fd associated with CXL context.
+ * @file: Previously obtained file associated with CXL context.
+ * @perms: User-specified permissions.
+ *
+ * The context's mutex is locked when an allocated context is returned.
+ *
+ * Return: Allocated context on success, NULL on failure
+ */
+static struct ctx_info *create_context(struct cxlflash_cfg *cfg,
+ struct cxl_context *ctx, int ctxid,
+ int adap_fd, struct file *file,
+ u32 perms)
+{
+ struct device *dev = &cfg->dev->dev;
+ struct afu *afu = cfg->afu;
+ struct ctx_info *ctxi = NULL;
+ struct llun_info **lli = NULL;
+ bool *ws = NULL;
+ struct sisl_rht_entry *rhte;
+
+ ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
+ lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
+ ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
+ if (unlikely(!ctxi || !lli || !ws)) {
+ dev_err(dev, "%s: Unable to allocate context!\n", __func__);
+ goto err;
+ }
+
+ rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
+ if (unlikely(!rhte)) {
+ dev_err(dev, "%s: Unable to allocate RHT!\n", __func__);
+ goto err;
+ }
+
+ ctxi->rht_lun = lli;
+ ctxi->rht_needs_ws = ws;
+ ctxi->rht_start = rhte;
+ ctxi->rht_perms = perms;
+
+ ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
+ ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
+ ctxi->lfd = adap_fd;
+ ctxi->pid = current->tgid; /* tgid = pid */
+ ctxi->ctx = ctx;
+ ctxi->file = file;
+ mutex_init(&ctxi->mutex);
+ INIT_LIST_HEAD(&ctxi->luns);
+ INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
+
+ atomic_inc(&cfg->num_user_contexts);
+ mutex_lock(&ctxi->mutex);
+out:
+ return ctxi;
+
+err:
+ kfree(ws);
+ kfree(lli);
+ kfree(ctxi);
+ ctxi = NULL;
+ goto out;
+}
+
+/**
+ * _cxlflash_disk_detach() - detaches a LUN from a context
+ * @sdev: SCSI device associated with LUN.
+ * @ctxi: Context owning resources.
+ * @detach: Detach ioctl data structure.
+ *
+ * As part of the detach, all per-context resources associated with the LUN
+ * are cleaned up. When detaching the last LUN for a context, the context
+ * itself is cleaned up and released.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _cxlflash_disk_detach(struct scsi_device *sdev,
+ struct ctx_info *ctxi,
+ struct dk_cxlflash_detach *detach)
+{
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct device *dev = &cfg->dev->dev;
+ struct llun_info *lli = sdev->hostdata;
+ struct lun_access *lun_access, *t;
+ struct dk_cxlflash_release rel;
+ bool put_ctx = false;
+
+ int i;
+ int rc = 0;
+ int lfd;
+ u64 ctxid = DECODE_CTXID(detach->context_id),
+ rctxid = detach->context_id;
+
+ dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid);
+
+ if (!ctxi) {
+ ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
+ if (unlikely(!ctxi)) {
+ dev_dbg(dev, "%s: Bad context! (%llu)\n",
+ __func__, ctxid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ put_ctx = true;
+ }
+
+ /* Cleanup outstanding resources tied to this LUN */
+ if (ctxi->rht_out) {
+ marshal_det_to_rele(detach, &rel);
+ for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
+ if (ctxi->rht_lun[i] == lli) {
+ rel.rsrc_handle = i;
+ _cxlflash_disk_release(sdev, ctxi, &rel);
+ }
+
+ /* No need to loop further if we're done */
+ if (ctxi->rht_out == 0)
+ break;
+ }
+ }
+
+ /* Take our LUN out of context, free the node */
+ list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
+ if (lun_access->lli == lli) {
+ list_del(&lun_access->list);
+ kfree(lun_access);
+ lun_access = NULL;
+ break;
+ }
+
+ /* Tear down context following last LUN cleanup */
+ if (list_empty(&ctxi->luns)) {
+ ctxi->unavail = true;
+ mutex_unlock(&ctxi->mutex);
+ mutex_lock(&cfg->ctx_tbl_list_mutex);
+ mutex_lock(&ctxi->mutex);
+
+ /* Might not have been in error list so conditionally remove */
+ if (!list_empty(&ctxi->list))
+ list_del(&ctxi->list);
+ cfg->ctx_tbl[ctxid] = NULL;
+ mutex_unlock(&cfg->ctx_tbl_list_mutex);
+ mutex_unlock(&ctxi->mutex);
+
+ lfd = ctxi->lfd;
+ destroy_context(cfg, ctxi);
+ ctxi = NULL;
+ put_ctx = false;
+
+ /*
+ * As a last step, clean up external resources when not
+ * already on an external cleanup thread, i.e.: close(adap_fd).
+ *
+ * NOTE: this will free up the context from the CXL services,
+ * allowing it to dole out the same context_id on a future
+ * (or even currently in-flight) disk_attach operation.
+ */
+ if (lfd != -1)
+ sys_close(lfd);
+ }
+
+out:
+ if (put_ctx)
+ put_context(ctxi);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int cxlflash_disk_detach(struct scsi_device *sdev,
+ struct dk_cxlflash_detach *detach)
+{
+ return _cxlflash_disk_detach(sdev, NULL, detach);
+}
+
+/**
+ * cxlflash_cxl_release() - release handler for adapter file descriptor
+ * @inode: File-system inode associated with fd.
+ * @file: File installed with adapter file descriptor.
+ *
+ * This routine is the release handler for the fops registered with
+ * the CXL services on an initial attach for a context. It is called
+ * when a close is performed on the adapter file descriptor returned
+ * to the user. Programmatically, the user is not required to perform
+ * the close, as it is handled internally via the detach ioctl when
+ * a context is being removed. Note that nothing prevents the user
+ * from performing a close, but the user should be aware that doing
+ * so is considered catastrophic and subsequent usage of the superpipe
+ * API with previously saved off tokens will fail.
+ *
+ * When initiated from an external close (either by the user or via
+ * a process tear down), the routine derives the context reference
+ * and calls detach for each LUN associated with the context. The
+ * final detach operation will cause the context itself to be freed.
+ * Note that the saved off lfd is reset prior to calling detach to
+ * signify that the final detach should not perform a close.
+ *
+ * When initiated from a detach operation as part of the tear down
+ * of a context, the context is first completely freed and then the
+ * close is performed. This routine will fail to derive the context
+ * reference (due to the context having already been freed) and then
+ * call into the CXL release entry point.
+ *
+ * Thus, with exception to when the CXL process element (context id)
+ * lookup fails (a case that should theoretically never occur), every
+ * call into this routine results in a complete freeing of a context.
+ *
+ * As part of the detach, all per-context resources associated with the LUN
+ * are cleaned up. When detaching the last LUN for a context, the context
+ * itself is cleaned up and released.
+ *
+ * Return: 0 on success
+ */
+static int cxlflash_cxl_release(struct inode *inode, struct file *file)
+{
+ struct cxl_context *ctx = cxl_fops_get_context(file);
+ struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
+ cxl_fops);
+ struct device *dev = &cfg->dev->dev;
+ struct ctx_info *ctxi = NULL;
+ struct dk_cxlflash_detach detach = { { 0 }, 0 };
+ struct lun_access *lun_access, *t;
+ enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
+ int ctxid;
+
+ ctxid = cxl_process_element(ctx);
+ if (unlikely(ctxid < 0)) {
+ dev_err(dev, "%s: Context %p was closed! (%d)\n",
+ __func__, ctx, ctxid);
+ goto out;
+ }
+
+ ctxi = get_context(cfg, ctxid, file, ctrl);
+ if (unlikely(!ctxi)) {
+ ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
+ if (!ctxi) {
+ dev_dbg(dev, "%s: Context %d already free!\n",
+ __func__, ctxid);
+ goto out_release;
+ }
+
+ dev_dbg(dev, "%s: Another process owns context %d!\n",
+ __func__, ctxid);
+ put_context(ctxi);
+ goto out;
+ }
+
+ dev_dbg(dev, "%s: close(%d) for context %d\n",
+ __func__, ctxi->lfd, ctxid);
+
+ /* Reset the file descriptor to indicate we're on a close() thread */
+ ctxi->lfd = -1;
+ detach.context_id = ctxi->ctxid;
+ list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
+ _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
+out_release:
+ cxl_fd_release(inode, file);
+out:
+ dev_dbg(dev, "%s: returning\n", __func__);
+ return 0;
+}
+
+/**
+ * unmap_context() - clears a previously established mapping
+ * @ctxi: Context owning the mapping.
+ *
+ * This routine is used to switch between the error notification page
+ * (dummy page of all 1's) and the real mapping (established by the CXL
+ * fault handler).
+ */
+static void unmap_context(struct ctx_info *ctxi)
+{
+ unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1);
+}
+
+/**
+ * get_err_page() - obtains and allocates the error notification page
+ *
+ * Return: error notification page on success, NULL on failure
+ */
+static struct page *get_err_page(void)
+{
+ struct page *err_page = global.err_page;
+
+ if (unlikely(!err_page)) {
+ err_page = alloc_page(GFP_KERNEL);
+ if (unlikely(!err_page)) {
+ pr_err("%s: Unable to allocate err_page!\n", __func__);
+ goto out;
+ }
+
+ memset(page_address(err_page), -1, PAGE_SIZE);
+
+ /* Serialize update w/ other threads to avoid a leak */
+ mutex_lock(&global.mutex);
+ if (likely(!global.err_page))
+ global.err_page = err_page;
+ else {
+ __free_page(err_page);
+ err_page = global.err_page;
+ }
+ mutex_unlock(&global.mutex);
+ }
+
+out:
+ pr_debug("%s: returning err_page=%p\n", __func__, err_page);
+ return err_page;
+}
+
+/**
+ * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor
+ * @vma: VM area associated with mapping.
+ * @vmf: VM fault associated with current fault.
+ *
+ * To support error notification via MMIO, faults are 'caught' by this routine
+ * that was inserted before passing back the adapter file descriptor on attach.
+ * When a fault occurs, this routine evaluates if error recovery is active and
+ * if so, installs the error page to 'notify' the user about the error state.
+ * During normal operation, the fault is simply handled by the original fault
+ * handler that was installed by CXL services as part of initializing the
+ * adapter file descriptor. The VMA's page protection bits are toggled to
+ * indicate cached/not-cached depending on the memory backing the fault.
+ *
+ * Return: 0 on success, VM_FAULT_SIGBUS on failure
+ */
+static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct file *file = vma->vm_file;
+ struct cxl_context *ctx = cxl_fops_get_context(file);
+ struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
+ cxl_fops);
+ struct device *dev = &cfg->dev->dev;
+ struct ctx_info *ctxi = NULL;
+ struct page *err_page = NULL;
+ enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
+ int rc = 0;
+ int ctxid;
+
+ ctxid = cxl_process_element(ctx);
+ if (unlikely(ctxid < 0)) {
+ dev_err(dev, "%s: Context %p was closed! (%d)\n",
+ __func__, ctx, ctxid);
+ goto err;
+ }
+
+ ctxi = get_context(cfg, ctxid, file, ctrl);
+ if (unlikely(!ctxi)) {
+ dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid);
+ goto err;
+ }
+
+ dev_dbg(dev, "%s: fault(%d) for context %d\n",
+ __func__, ctxi->lfd, ctxid);
+
+ if (likely(!ctxi->err_recovery_active)) {
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ rc = ctxi->cxl_mmap_vmops->fault(vma, vmf);
+ } else {
+ dev_dbg(dev, "%s: err recovery active, use err_page!\n",
+ __func__);
+
+ err_page = get_err_page();
+ if (unlikely(!err_page)) {
+ dev_err(dev, "%s: Could not obtain error page!\n",
+ __func__);
+ rc = VM_FAULT_RETRY;
+ goto out;
+ }
+
+ get_page(err_page);
+ vmf->page = err_page;
+ vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
+ }
+
+out:
+ if (likely(ctxi))
+ put_context(ctxi);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+ return rc;
+
+err:
+ rc = VM_FAULT_SIGBUS;
+ goto out;
+}
+
+/*
+ * Local MMAP vmops to 'catch' faults
+ */
+static const struct vm_operations_struct cxlflash_mmap_vmops = {
+ .fault = cxlflash_mmap_fault,
+};
+
+/**
+ * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor
+ * @file: File installed with adapter file descriptor.
+ * @vma: VM area associated with mapping.
+ *
+ * Installs local mmap vmops to 'catch' faults for error notification support.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct cxl_context *ctx = cxl_fops_get_context(file);
+ struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
+ cxl_fops);
+ struct device *dev = &cfg->dev->dev;
+ struct ctx_info *ctxi = NULL;
+ enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
+ int ctxid;
+ int rc = 0;
+
+ ctxid = cxl_process_element(ctx);
+ if (unlikely(ctxid < 0)) {
+ dev_err(dev, "%s: Context %p was closed! (%d)\n",
+ __func__, ctx, ctxid);
+ rc = -EIO;
+ goto out;
+ }
+
+ ctxi = get_context(cfg, ctxid, file, ctrl);
+ if (unlikely(!ctxi)) {
+ dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid);
+ rc = -EIO;
+ goto out;
+ }
+
+ dev_dbg(dev, "%s: mmap(%d) for context %d\n",
+ __func__, ctxi->lfd, ctxid);
+
+ rc = cxl_fd_mmap(file, vma);
+ if (likely(!rc)) {
+ /* Insert ourself in the mmap fault handler path */
+ ctxi->cxl_mmap_vmops = vma->vm_ops;
+ vma->vm_ops = &cxlflash_mmap_vmops;
+ }
+
+out:
+ if (likely(ctxi))
+ put_context(ctxi);
+ return rc;
+}
+
+/*
+ * Local fops for adapter file descriptor
+ */
+static const struct file_operations cxlflash_cxl_fops = {
+ .owner = THIS_MODULE,
+ .mmap = cxlflash_cxl_mmap,
+ .release = cxlflash_cxl_release,
+};
+
+/**
+ * cxlflash_mark_contexts_error() - move contexts to error state and list
+ * @cfg: Internal structure associated with the host.
+ *
+ * A context is only moved over to the error list when there are no outstanding
+ * references to it. This ensures that a running operation has completed.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg)
+{
+ int i, rc = 0;
+ struct ctx_info *ctxi = NULL;
+
+ mutex_lock(&cfg->ctx_tbl_list_mutex);
+
+ for (i = 0; i < MAX_CONTEXT; i++) {
+ ctxi = cfg->ctx_tbl[i];
+ if (ctxi) {
+ mutex_lock(&ctxi->mutex);
+ cfg->ctx_tbl[i] = NULL;
+ list_add(&ctxi->list, &cfg->ctx_err_recovery);
+ ctxi->err_recovery_active = true;
+ ctxi->ctrl_map = NULL;
+ unmap_context(ctxi);
+ mutex_unlock(&ctxi->mutex);
+ }
+ }
+
+ mutex_unlock(&cfg->ctx_tbl_list_mutex);
+ return rc;
+}
+
+/*
+ * Dummy NULL fops
+ */
+static const struct file_operations null_fops = {
+ .owner = THIS_MODULE,
+};
+
+/**
+ * cxlflash_disk_attach() - attach a LUN to a context
+ * @sdev: SCSI device associated with LUN.
+ * @attach: Attach ioctl data structure.
+ *
+ * Creates a context and attaches LUN to it. A LUN can only be attached
+ * one time to a context (subsequent attaches for the same context/LUN pair
+ * are not supported). Additional LUNs can be attached to a context by
+ * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int cxlflash_disk_attach(struct scsi_device *sdev,
+ struct dk_cxlflash_attach *attach)
+{
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct device *dev = &cfg->dev->dev;
+ struct afu *afu = cfg->afu;
+ struct llun_info *lli = sdev->hostdata;
+ struct glun_info *gli = lli->parent;
+ struct cxl_ioctl_start_work *work;
+ struct ctx_info *ctxi = NULL;
+ struct lun_access *lun_access = NULL;
+ int rc = 0;
+ u32 perms;
+ int ctxid = -1;
+ u64 rctxid = 0UL;
+ struct file *file;
+
+ struct cxl_context *ctx;
+
+ int fd = -1;
+
+ /* On first attach set fileops */
+ if (atomic_read(&cfg->num_user_contexts) == 0)
+ cfg->cxl_fops = cxlflash_cxl_fops;
+
+ if (attach->num_interrupts > 4) {
+ dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
+ __func__, attach->num_interrupts);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (gli->max_lba == 0) {
+ dev_dbg(dev, "%s: No capacity info for this LUN (%016llX)\n",
+ __func__, lli->lun_id[sdev->channel]);
+ rc = read_cap16(sdev, lli);
+ if (rc) {
+ dev_err(dev, "%s: Invalid device! (%d)\n",
+ __func__, rc);
+ rc = -ENODEV;
+ goto out;
+ }
+ dev_dbg(dev, "%s: LBA = %016llX\n", __func__, gli->max_lba);
+ dev_dbg(dev, "%s: BLK_LEN = %08X\n", __func__, gli->blk_len);
+ }
+
+ if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
+ rctxid = attach->context_id;
+ ctxi = get_context(cfg, rctxid, NULL, 0);
+ if (!ctxi) {
+ dev_dbg(dev, "%s: Bad context! (%016llX)\n",
+ __func__, rctxid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ list_for_each_entry(lun_access, &ctxi->luns, list)
+ if (lun_access->lli == lli) {
+ dev_dbg(dev, "%s: Already attached!\n",
+ __func__);
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
+ lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
+ if (unlikely(!lun_access)) {
+ dev_err(dev, "%s: Unable to allocate lun_access!\n", __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ lun_access->lli = lli;
+ lun_access->sdev = sdev;
+
+ /* Non-NULL context indicates reuse */
+ if (ctxi) {
+ dev_dbg(dev, "%s: Reusing context for LUN! (%016llX)\n",
+ __func__, rctxid);
+ list_add(&lun_access->list, &ctxi->luns);
+ fd = ctxi->lfd;
+ goto out_attach;
+ }
+
+ ctx = cxl_dev_context_init(cfg->dev);
+ if (unlikely(IS_ERR_OR_NULL(ctx))) {
+ dev_err(dev, "%s: Could not initialize context %p\n",
+ __func__, ctx);
+ rc = -ENODEV;
+ goto err0;
+ }
+
+ ctxid = cxl_process_element(ctx);
+ if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
+ dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
+ rc = -EPERM;
+ goto err1;
+ }
+
+ file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
+ if (unlikely(fd < 0)) {
+ rc = -ENODEV;
+ dev_err(dev, "%s: Could not get file descriptor\n", __func__);
+ goto err1;
+ }
+
+ /* Translate read/write O_* flags from fcntl.h to AFU permission bits */
+ perms = SISL_RHT_PERM(attach->hdr.flags + 1);
+
+ ctxi = create_context(cfg, ctx, ctxid, fd, file, perms);
+ if (unlikely(!ctxi)) {
+ dev_err(dev, "%s: Failed to create context! (%d)\n",
+ __func__, ctxid);
+ goto err2;
+ }
+
+ work = &ctxi->work;
+ work->num_interrupts = attach->num_interrupts;
+ work->flags = CXL_START_WORK_NUM_IRQS;
+
+ rc = cxl_start_work(ctx, work);
+ if (unlikely(rc)) {
+ dev_dbg(dev, "%s: Could not start context rc=%d\n",
+ __func__, rc);
+ goto err3;
+ }
+
+ rc = afu_attach(cfg, ctxi);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
+ goto err4;
+ }
+
+ /*
+ * No error paths after this point. Once the fd is installed it's
+ * visible to user space and can't be undone safely on this thread.
+ * There is no need to worry about a deadlock here because no one
+ * knows about us yet; we can be the only one holding our mutex.
+ */
+ list_add(&lun_access->list, &ctxi->luns);
+ mutex_unlock(&ctxi->mutex);
+ mutex_lock(&cfg->ctx_tbl_list_mutex);
+ mutex_lock(&ctxi->mutex);
+ cfg->ctx_tbl[ctxid] = ctxi;
+ mutex_unlock(&cfg->ctx_tbl_list_mutex);
+ fd_install(fd, file);
+
+out_attach:
+ attach->hdr.return_flags = 0;
+ attach->context_id = ctxi->ctxid;
+ attach->block_size = gli->blk_len;
+ attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
+ attach->last_lba = gli->max_lba;
+ attach->max_xfer = (sdev->host->max_sectors * 512) / gli->blk_len;
+
+out:
+ attach->adap_fd = fd;
+
+ if (ctxi)
+ put_context(ctxi);
+
+ dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
+ __func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
+ return rc;
+
+err4:
+ cxl_stop_context(ctx);
+err3:
+ put_context(ctxi);
+ destroy_context(cfg, ctxi);
+ ctxi = NULL;
+err2:
+ /*
+ * Here, we're overriding the fops with a dummy all-NULL fops because
+ * fput() calls the release fop, which will cause us to mistakenly
+ * call into the CXL code. Rather than try to add yet more complexity
+ * to that routine (cxlflash_cxl_release) we should try to fix the
+ * issue here.
+ */
+ file->f_op = &null_fops;
+ fput(file);
+ put_unused_fd(fd);
+ fd = -1;
+err1:
+ cxl_release_context(ctx);
+err0:
+ kfree(lun_access);
+ goto out;
+}
+
+/**
+ * recover_context() - recovers a context in error
+ * @cfg: Internal structure associated with the host.
+ * @ctxi: Context to release.
+ *
+ * Restablishes the state for a context-in-error.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
+{
+ struct device *dev = &cfg->dev->dev;
+ int rc = 0;
+ int old_fd, fd = -1;
+ int ctxid = -1;
+ struct file *file;
+ struct cxl_context *ctx;
+ struct afu *afu = cfg->afu;
+
+ ctx = cxl_dev_context_init(cfg->dev);
+ if (unlikely(IS_ERR_OR_NULL(ctx))) {
+ dev_err(dev, "%s: Could not initialize context %p\n",
+ __func__, ctx);
+ rc = -ENODEV;
+ goto out;
+ }
+
+ ctxid = cxl_process_element(ctx);
+ if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
+ dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
+ rc = -EPERM;
+ goto err1;
+ }
+
+ file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
+ if (unlikely(fd < 0)) {
+ rc = -ENODEV;
+ dev_err(dev, "%s: Could not get file descriptor\n", __func__);
+ goto err1;
+ }
+
+ rc = cxl_start_work(ctx, &ctxi->work);
+ if (unlikely(rc)) {
+ dev_dbg(dev, "%s: Could not start context rc=%d\n",
+ __func__, rc);
+ goto err2;
+ }
+
+ /* Update with new MMIO area based on updated context id */
+ ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
+
+ rc = afu_attach(cfg, ctxi);
+ if (rc) {
+ dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
+ goto err3;
+ }
+
+ /*
+ * No error paths after this point. Once the fd is installed it's
+ * visible to user space and can't be undone safely on this thread.
+ */
+ old_fd = ctxi->lfd;
+ ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
+ ctxi->lfd = fd;
+ ctxi->ctx = ctx;
+ ctxi->file = file;
+
+ /*
+ * Put context back in table (note the reinit of the context list);
+ * we must first drop the context's mutex and then acquire it in
+ * order with the table/list mutex to avoid a deadlock - safe to do
+ * here because no one can find us at this moment in time.
+ */
+ mutex_unlock(&ctxi->mutex);
+ mutex_lock(&cfg->ctx_tbl_list_mutex);
+ mutex_lock(&ctxi->mutex);
+ list_del_init(&ctxi->list);
+ cfg->ctx_tbl[ctxid] = ctxi;
+ mutex_unlock(&cfg->ctx_tbl_list_mutex);
+ fd_install(fd, file);
+
+ /* Release the original adapter fd and associated CXL resources */
+ sys_close(old_fd);
+out:
+ dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
+ __func__, ctxid, fd, rc);
+ return rc;
+
+err3:
+ cxl_stop_context(ctx);
+err2:
+ fput(file);
+ put_unused_fd(fd);
+err1:
+ cxl_release_context(ctx);
+ goto out;
+}
+
+/**
+ * check_state() - checks and responds to the current adapter state
+ * @cfg: Internal structure associated with the host.
+ *
+ * This routine can block and should only be used on process context.
+ * Note that when waking up from waiting in limbo, the state is unknown
+ * and must be checked again before proceeding.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int check_state(struct cxlflash_cfg *cfg)
+{
+ struct device *dev = &cfg->dev->dev;
+ int rc = 0;
+
+retry:
+ switch (cfg->state) {
+ case STATE_LIMBO:
+ dev_dbg(dev, "%s: Limbo, going to wait...\n", __func__);
+ rc = wait_event_interruptible(cfg->limbo_waitq,
+ cfg->state != STATE_LIMBO);
+ if (unlikely(rc))
+ break;
+ goto retry;
+ case STATE_FAILTERM:
+ dev_dbg(dev, "%s: Failed/Terminating!\n", __func__);
+ rc = -ENODEV;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * cxlflash_afu_recover() - initiates AFU recovery
+ * @sdev: SCSI device associated with LUN.
+ * @recover: Recover ioctl data structure.
+ *
+ * Only a single recovery is allowed at a time to avoid exhausting CXL
+ * resources (leading to recovery failure) in the event that we're up
+ * against the maximum number of contexts limit. For similar reasons,
+ * a context recovery is retried if there are multiple recoveries taking
+ * place at the same time and the failure was due to CXL services being
+ * unable to keep up.
+ *
+ * Because a user can detect an error condition before the kernel, it is
+ * quite possible for this routine to act as the kernel's EEH detection
+ * source (MMIO read of mbox_r). Because of this, there is a window of
+ * time where an EEH might have been detected but not yet 'serviced'
+ * (callback invoked, causing the device to enter limbo state). To avoid
+ * looping in this routine during that window, a 1 second sleep is in place
+ * between the time the MMIO failure is detected and the time a wait on the
+ * limbo wait queue is attempted via check_state().
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int cxlflash_afu_recover(struct scsi_device *sdev,
+ struct dk_cxlflash_recover_afu *recover)
+{
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct device *dev = &cfg->dev->dev;
+ struct llun_info *lli = sdev->hostdata;
+ struct afu *afu = cfg->afu;
+ struct ctx_info *ctxi = NULL;
+ struct mutex *mutex = &cfg->ctx_recovery_mutex;
+ u64 ctxid = DECODE_CTXID(recover->context_id),
+ rctxid = recover->context_id;
+ long reg;
+ int lretry = 20; /* up to 2 seconds */
+ int rc = 0;
+
+ atomic_inc(&cfg->recovery_threads);
+ rc = mutex_lock_interruptible(mutex);
+ if (rc)
+ goto out;
+
+ dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n",
+ __func__, recover->reason, rctxid);
+
+retry:
+ /* Ensure that this process is attached to the context */
+ ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
+ if (unlikely(!ctxi)) {
+ dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (ctxi->err_recovery_active) {
+retry_recover:
+ rc = recover_context(cfg, ctxi);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: Recovery failed for context %llu (rc=%d)\n",
+ __func__, ctxid, rc);
+ if ((rc == -ENODEV) &&
+ ((atomic_read(&cfg->recovery_threads) > 1) ||
+ (lretry--))) {
+ dev_dbg(dev, "%s: Going to try again!\n",
+ __func__);
+ mutex_unlock(mutex);
+ msleep(100);
+ rc = mutex_lock_interruptible(mutex);
+ if (rc)
+ goto out;
+ goto retry_recover;
+ }
+
+ goto out;
+ }
+
+ ctxi->err_recovery_active = false;
+ recover->context_id = ctxi->ctxid;
+ recover->adap_fd = ctxi->lfd;
+ recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
+ recover->hdr.return_flags |=
+ DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
+ goto out;
+ }
+
+ /* Test if in error state */
+ reg = readq_be(&afu->ctrl_map->mbox_r);
+ if (reg == -1) {
+ dev_dbg(dev, "%s: MMIO read fail! Wait for recovery...\n",
+ __func__);
+ mutex_unlock(&ctxi->mutex);
+ ctxi = NULL;
+ ssleep(1);
+ rc = check_state(cfg);
+ if (unlikely(rc))
+ goto out;
+ goto retry;
+ }
+
+ dev_dbg(dev, "%s: MMIO working, no recovery required!\n", __func__);
+out:
+ if (likely(ctxi))
+ put_context(ctxi);
+ mutex_unlock(mutex);
+ atomic_dec_if_positive(&cfg->recovery_threads);
+ return rc;
+}
+
+/**
+ * process_sense() - evaluates and processes sense data
+ * @sdev: SCSI device associated with LUN.
+ * @verify: Verify ioctl data structure.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int process_sense(struct scsi_device *sdev,
+ struct dk_cxlflash_verify *verify)
+{
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct device *dev = &cfg->dev->dev;
+ struct llun_info *lli = sdev->hostdata;
+ struct glun_info *gli = lli->parent;
+ u64 prev_lba = gli->max_lba;
+ struct scsi_sense_hdr sshdr = { 0 };
+ int rc = 0;
+
+ rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
+ DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
+ if (!rc) {
+ dev_err(dev, "%s: Failed to normalize sense data!\n", __func__);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ switch (sshdr.sense_key) {
+ case NO_SENSE:
+ case RECOVERED_ERROR:
+ /* fall through */
+ case NOT_READY:
+ break;
+ case UNIT_ATTENTION:
+ switch (sshdr.asc) {
+ case 0x29: /* Power on Reset or Device Reset */
+ /* fall through */
+ case 0x2A: /* Device settings/capacity changed */
+ rc = read_cap16(sdev, lli);
+ if (rc) {
+ rc = -ENODEV;
+ break;
+ }
+ if (prev_lba != gli->max_lba)
+ dev_dbg(dev, "%s: Capacity changed old=%lld "
+ "new=%lld\n", __func__, prev_lba,
+ gli->max_lba);
+ break;
+ case 0x3F: /* Report LUNs changed, Rescan. */
+ scsi_scan_host(cfg->host);
+ break;
+ default:
+ rc = -EIO;
+ break;
+ }
+ break;
+ default:
+ rc = -EIO;
+ break;
+ }
+out:
+ dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__,
+ sshdr.sense_key, sshdr.asc, sshdr.ascq, rc);
+ return rc;
+}
+
+/**
+ * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes
+ * @sdev: SCSI device associated with LUN.
+ * @verify: Verify ioctl data structure.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int cxlflash_disk_verify(struct scsi_device *sdev,
+ struct dk_cxlflash_verify *verify)
+{
+ int rc = 0;
+ struct ctx_info *ctxi = NULL;
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct device *dev = &cfg->dev->dev;
+ struct llun_info *lli = sdev->hostdata;
+ struct glun_info *gli = lli->parent;
+ struct sisl_rht_entry *rhte = NULL;
+ res_hndl_t rhndl = verify->rsrc_handle;
+ u64 ctxid = DECODE_CTXID(verify->context_id),
+ rctxid = verify->context_id;
+ u64 last_lba = 0;
+
+ dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llX, hint=%016llX, "
+ "flags=%016llX\n", __func__, ctxid, verify->rsrc_handle,
+ verify->hint, verify->hdr.flags);
+
+ ctxi = get_context(cfg, rctxid, lli, 0);
+ if (unlikely(!ctxi)) {
+ dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rhte = get_rhte(ctxi, rhndl, lli);
+ if (unlikely(!rhte)) {
+ dev_dbg(dev, "%s: Bad resource handle! (%d)\n",
+ __func__, rhndl);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Look at the hint/sense to see if it requires us to redrive
+ * inquiry (i.e. the Unit attention is due to the WWN changing).
+ */
+ if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
+ rc = process_sense(sdev, verify);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: Failed to validate sense data (%d)\n",
+ __func__, rc);
+ goto out;
+ }
+ }
+
+ switch (gli->mode) {
+ case MODE_PHYSICAL:
+ last_lba = gli->max_lba;
+ break;
+ case MODE_VIRTUAL:
+ /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */
+ last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len);
+ last_lba /= CXLFLASH_BLOCK_SIZE;
+ last_lba--;
+ break;
+ default:
+ WARN(1, "Unsupported LUN mode!");
+ }
+
+ verify->last_lba = last_lba;
+
+out:
+ if (likely(ctxi))
+ put_context(ctxi);
+ dev_dbg(dev, "%s: returning rc=%d llba=%llX\n",
+ __func__, rc, verify->last_lba);
+ return rc;
+}
+
+/**
+ * decode_ioctl() - translates an encoded ioctl to an easily identifiable string
+ * @cmd: The ioctl command to decode.
+ *
+ * Return: A string identifying the decoded ioctl.
+ */
+static char *decode_ioctl(int cmd)
+{
+ switch (cmd) {
+ case DK_CXLFLASH_ATTACH:
+ return __stringify_1(DK_CXLFLASH_ATTACH);
+ case DK_CXLFLASH_USER_DIRECT:
+ return __stringify_1(DK_CXLFLASH_USER_DIRECT);
+ case DK_CXLFLASH_USER_VIRTUAL:
+ return __stringify_1(DK_CXLFLASH_USER_VIRTUAL);
+ case DK_CXLFLASH_VLUN_RESIZE:
+ return __stringify_1(DK_CXLFLASH_VLUN_RESIZE);
+ case DK_CXLFLASH_RELEASE:
+ return __stringify_1(DK_CXLFLASH_RELEASE);
+ case DK_CXLFLASH_DETACH:
+ return __stringify_1(DK_CXLFLASH_DETACH);
+ case DK_CXLFLASH_VERIFY:
+ return __stringify_1(DK_CXLFLASH_VERIFY);
+ case DK_CXLFLASH_VLUN_CLONE:
+ return __stringify_1(DK_CXLFLASH_VLUN_CLONE);
+ case DK_CXLFLASH_RECOVER_AFU:
+ return __stringify_1(DK_CXLFLASH_RECOVER_AFU);
+ case DK_CXLFLASH_MANAGE_LUN:
+ return __stringify_1(DK_CXLFLASH_MANAGE_LUN);
+ }
+
+ return "UNKNOWN";
+}
+
+/**
+ * cxlflash_disk_direct_open() - opens a direct (physical) disk
+ * @sdev: SCSI device associated with LUN.
+ * @arg: UDirect ioctl data structure.
+ *
+ * On successful return, the user is informed of the resource handle
+ * to be used to identify the direct lun and the size (in blocks) of
+ * the direct lun in last LBA format.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
+{
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct device *dev = &cfg->dev->dev;
+ struct afu *afu = cfg->afu;
+ struct llun_info *lli = sdev->hostdata;
+ struct glun_info *gli = lli->parent;
+
+ struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
+
+ u64 ctxid = DECODE_CTXID(pphys->context_id),
+ rctxid = pphys->context_id;
+ u64 lun_size = 0;
+ u64 last_lba = 0;
+ u64 rsrc_handle = -1;
+ u32 port = CHAN2PORT(sdev->channel);
+
+ int rc = 0;
+
+ struct ctx_info *ctxi = NULL;
+ struct sisl_rht_entry *rhte = NULL;
+
+ pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size);
+
+ rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
+ if (unlikely(rc)) {
+ dev_dbg(dev, "%s: Failed to attach to LUN! (PHYSICAL)\n",
+ __func__);
+ goto out;
+ }
+
+ ctxi = get_context(cfg, rctxid, lli, 0);
+ if (unlikely(!ctxi)) {
+ dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
+ rc = -EINVAL;
+ goto err1;
+ }
+
+ rhte = rhte_checkout(ctxi, lli);
+ if (unlikely(!rhte)) {
+ dev_dbg(dev, "%s: too many opens for this context\n", __func__);
+ rc = -EMFILE; /* too many opens */
+ goto err1;
+ }
+
+ rsrc_handle = (rhte - ctxi->rht_start);
+
+ rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
+ cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
+
+ last_lba = gli->max_lba;
+ pphys->hdr.return_flags = 0;
+ pphys->last_lba = last_lba;
+ pphys->rsrc_handle = rsrc_handle;
+
+out:
+ if (likely(ctxi))
+ put_context(ctxi);
+ dev_dbg(dev, "%s: returning handle 0x%llx rc=%d llba %lld\n",
+ __func__, rsrc_handle, rc, last_lba);
+ return rc;
+
+err1:
+ cxlflash_lun_detach(gli);
+ goto out;
+}
+
+/**
+ * ioctl_common() - common IOCTL handler for driver
+ * @sdev: SCSI device associated with LUN.
+ * @cmd: IOCTL command.
+ *
+ * Handles common fencing operations that are valid for multiple ioctls. Always
+ * allow through ioctls that are cleanup oriented in nature, even when operating
+ * in a failed/terminating state.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ioctl_common(struct scsi_device *sdev, int cmd)
+{
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct device *dev = &cfg->dev->dev;
+ struct llun_info *lli = sdev->hostdata;
+ int rc = 0;
+
+ if (unlikely(!lli)) {
+ dev_dbg(dev, "%s: Unknown LUN\n", __func__);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = check_state(cfg);
+ if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) {
+ switch (cmd) {
+ case DK_CXLFLASH_VLUN_RESIZE:
+ case DK_CXLFLASH_RELEASE:
+ case DK_CXLFLASH_DETACH:
+ dev_dbg(dev, "%s: Command override! (%d)\n",
+ __func__, rc);
+ rc = 0;
+ break;
+ }
+ }
+out:
+ return rc;
+}
+
+/**
+ * cxlflash_ioctl() - IOCTL handler for driver
+ * @sdev: SCSI device associated with LUN.
+ * @cmd: IOCTL command.
+ * @arg: Userspace ioctl data structure.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+{
+ typedef int (*sioctl) (struct scsi_device *, void *);
+
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct device *dev = &cfg->dev->dev;
+ struct afu *afu = cfg->afu;
+ struct dk_cxlflash_hdr *hdr;
+ char buf[sizeof(union cxlflash_ioctls)];
+ size_t size = 0;
+ bool known_ioctl = false;
+ int idx;
+ int rc = 0;
+ struct Scsi_Host *shost = sdev->host;
+ sioctl do_ioctl = NULL;
+
+ static const struct {
+ size_t size;
+ sioctl ioctl;
+ } ioctl_tbl[] = { /* NOTE: order matters here */
+ {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach},
+ {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
+ {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release},
+ {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach},
+ {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify},
+ {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover},
+ {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun},
+ {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open},
+ {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize},
+ {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone},
+ };
+
+ /* Restrict command set to physical support only for internal LUN */
+ if (afu->internal_lun)
+ switch (cmd) {
+ case DK_CXLFLASH_RELEASE:
+ case DK_CXLFLASH_USER_VIRTUAL:
+ case DK_CXLFLASH_VLUN_RESIZE:
+ case DK_CXLFLASH_VLUN_CLONE:
+ dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n",
+ __func__, decode_ioctl(cmd), afu->internal_lun);
+ rc = -EINVAL;
+ goto cxlflash_ioctl_exit;
+ }
+
+ switch (cmd) {
+ case DK_CXLFLASH_ATTACH:
+ case DK_CXLFLASH_USER_DIRECT:
+ case DK_CXLFLASH_RELEASE:
+ case DK_CXLFLASH_DETACH:
+ case DK_CXLFLASH_VERIFY:
+ case DK_CXLFLASH_RECOVER_AFU:
+ case DK_CXLFLASH_USER_VIRTUAL:
+ case DK_CXLFLASH_VLUN_RESIZE:
+ case DK_CXLFLASH_VLUN_CLONE:
+ dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n",
+ __func__, decode_ioctl(cmd), cmd, shost->host_no,
+ sdev->channel, sdev->id, sdev->lun);
+ rc = ioctl_common(sdev, cmd);
+ if (unlikely(rc))
+ goto cxlflash_ioctl_exit;
+
+ /* fall through */
+
+ case DK_CXLFLASH_MANAGE_LUN:
+ known_ioctl = true;
+ idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH);
+ size = ioctl_tbl[idx].size;
+ do_ioctl = ioctl_tbl[idx].ioctl;
+
+ if (likely(do_ioctl))
+ break;
+
+ /* fall through */
+ default:
+ rc = -EINVAL;
+ goto cxlflash_ioctl_exit;
+ }
+
+ if (unlikely(copy_from_user(&buf, arg, size))) {
+ dev_err(dev, "%s: copy_from_user() fail! "
+ "size=%lu cmd=%d (%s) arg=%p\n",
+ __func__, size, cmd, decode_ioctl(cmd), arg);
+ rc = -EFAULT;
+ goto cxlflash_ioctl_exit;
+ }
+
+ hdr = (struct dk_cxlflash_hdr *)&buf;
+ if (hdr->version != DK_CXLFLASH_VERSION_0) {
+ dev_dbg(dev, "%s: Version %u not supported for %s\n",
+ __func__, hdr->version, decode_ioctl(cmd));
+ rc = -EINVAL;
+ goto cxlflash_ioctl_exit;
+ }
+
+ if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
+ dev_dbg(dev, "%s: Reserved/rflags populated!\n", __func__);
+ rc = -EINVAL;
+ goto cxlflash_ioctl_exit;
+ }
+
+ rc = do_ioctl(sdev, (void *)&buf);
+ if (likely(!rc))
+ if (unlikely(copy_to_user(arg, &buf, size))) {
+ dev_err(dev, "%s: copy_to_user() fail! "
+ "size=%lu cmd=%d (%s) arg=%p\n",
+ __func__, size, cmd, decode_ioctl(cmd), arg);
+ rc = -EFAULT;
+ }
+
+ /* fall through to exit */
+
+cxlflash_ioctl_exit:
+ if (unlikely(rc && known_ioctl))
+ dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
+ "returned rc %d\n", __func__,
+ decode_ioctl(cmd), cmd, shost->host_no,
+ sdev->channel, sdev->id, sdev->lun, rc);
+ else
+ dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
+ "returned rc %d\n", __func__, decode_ioctl(cmd),
+ cmd, shost->host_no, sdev->channel, sdev->id,
+ sdev->lun, rc);
+ return rc;
+}
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
new file mode 100644
index 000000000000..d7dc88bc64a4
--- /dev/null
+++ b/drivers/scsi/cxlflash/superpipe.h
@@ -0,0 +1,147 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
+ * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2015 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _CXLFLASH_SUPERPIPE_H
+#define _CXLFLASH_SUPERPIPE_H
+
+extern struct cxlflash_global global;
+
+/*
+ * Terminology: use afu (and not adapter) to refer to the HW.
+ * Adapter is the entire slot and includes PSL out of which
+ * only the AFU is visible to user space.
+ */
+
+/* Chunk size parms: note sislite minimum chunk size is
+ 0x10000 LBAs corresponding to a NMASK or 16.
+*/
+#define MC_CHUNK_SIZE (1 << MC_RHT_NMASK) /* in LBAs */
+
+#define MC_DISCOVERY_TIMEOUT 5 /* 5 secs */
+
+#define CHAN2PORT(_x) ((_x) + 1)
+#define PORT2CHAN(_x) ((_x) - 1)
+
+enum lun_mode {
+ MODE_NONE = 0,
+ MODE_VIRTUAL,
+ MODE_PHYSICAL
+};
+
+/* Global (entire driver, spans adapters) lun_info structure */
+struct glun_info {
+ u64 max_lba; /* from read cap(16) */
+ u32 blk_len; /* from read cap(16) */
+ enum lun_mode mode; /* NONE, VIRTUAL, PHYSICAL */
+ int users; /* Number of users w/ references to LUN */
+
+ u8 wwid[16];
+
+ struct mutex mutex;
+
+ struct blka blka;
+ struct list_head list;
+};
+
+/* Local (per-adapter) lun_info structure */
+struct llun_info {
+ u64 lun_id[CXLFLASH_NUM_FC_PORTS]; /* from REPORT_LUNS */
+ u32 lun_index; /* Index in the LUN table */
+ u32 host_no; /* host_no from Scsi_host */
+ u32 port_sel; /* What port to use for this LUN */
+ bool newly_created; /* Whether the LUN was just discovered */
+ bool in_table; /* Whether a LUN table entry was created */
+
+ u8 wwid[16]; /* Keep a duplicate copy here? */
+
+ struct glun_info *parent; /* Pointer to entry in global LUN structure */
+ struct scsi_device *sdev;
+ struct list_head list;
+};
+
+struct lun_access {
+ struct llun_info *lli;
+ struct scsi_device *sdev;
+ struct list_head list;
+};
+
+enum ctx_ctrl {
+ CTX_CTRL_CLONE = (1 << 1),
+ CTX_CTRL_ERR = (1 << 2),
+ CTX_CTRL_ERR_FALLBACK = (1 << 3),
+ CTX_CTRL_NOPID = (1 << 4),
+ CTX_CTRL_FILE = (1 << 5)
+};
+
+#define ENCODE_CTXID(_ctx, _id) (((((u64)_ctx) & 0xFFFFFFFF0) << 28) | _id)
+#define DECODE_CTXID(_val) (_val & 0xFFFFFFFF)
+
+struct ctx_info {
+ struct sisl_ctrl_map *ctrl_map; /* initialized at startup */
+ struct sisl_rht_entry *rht_start; /* 1 page (req'd for alignment),
+ alloc/free on attach/detach */
+ u32 rht_out; /* Number of checked out RHT entries */
+ u32 rht_perms; /* User-defined permissions for RHT entries */
+ struct llun_info **rht_lun; /* Mapping of RHT entries to LUNs */
+ bool *rht_needs_ws; /* User-desired write-same function per RHTE */
+
+ struct cxl_ioctl_start_work work;
+ u64 ctxid;
+ int lfd;
+ pid_t pid;
+ bool unavail;
+ bool err_recovery_active;
+ struct mutex mutex; /* Context protection */
+ struct cxl_context *ctx;
+ struct list_head luns; /* LUNs attached to this context */
+ const struct vm_operations_struct *cxl_mmap_vmops;
+ struct file *file;
+ struct list_head list; /* Link contexts in error recovery */
+};
+
+struct cxlflash_global {
+ struct mutex mutex;
+ struct list_head gluns;/* list of glun_info structs */
+ struct page *err_page; /* One page of all 0xF for error notification */
+};
+
+int cxlflash_vlun_resize(struct scsi_device *, struct dk_cxlflash_resize *);
+int _cxlflash_vlun_resize(struct scsi_device *, struct ctx_info *,
+ struct dk_cxlflash_resize *);
+
+int cxlflash_disk_release(struct scsi_device *, struct dk_cxlflash_release *);
+int _cxlflash_disk_release(struct scsi_device *, struct ctx_info *,
+ struct dk_cxlflash_release *);
+
+int cxlflash_disk_clone(struct scsi_device *, struct dk_cxlflash_clone *);
+
+int cxlflash_disk_virtual_open(struct scsi_device *, void *);
+
+int cxlflash_lun_attach(struct glun_info *, enum lun_mode, bool);
+void cxlflash_lun_detach(struct glun_info *);
+
+struct ctx_info *get_context(struct cxlflash_cfg *, u64, void *, enum ctx_ctrl);
+void put_context(struct ctx_info *);
+
+struct sisl_rht_entry *get_rhte(struct ctx_info *, res_hndl_t,
+ struct llun_info *);
+
+struct sisl_rht_entry *rhte_checkout(struct ctx_info *, struct llun_info *);
+void rhte_checkin(struct ctx_info *, struct sisl_rht_entry *);
+
+void cxlflash_ba_terminate(struct ba_lun *);
+
+int cxlflash_manage_lun(struct scsi_device *, struct dk_cxlflash_manage_lun *);
+
+#endif /* ifndef _CXLFLASH_SUPERPIPE_H */
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
new file mode 100644
index 000000000000..6155cb1d4ed3
--- /dev/null
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -0,0 +1,1243 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
+ * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2015 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/syscalls.h>
+#include <misc/cxl.h>
+#include <asm/unaligned.h>
+#include <asm/bitsperlong.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <uapi/scsi/cxlflash_ioctl.h>
+
+#include "sislite.h"
+#include "common.h"
+#include "vlun.h"
+#include "superpipe.h"
+
+/**
+ * marshal_virt_to_resize() - translate uvirtual to resize structure
+ * @virt: Source structure from which to translate/copy.
+ * @resize: Destination structure for the translate/copy.
+ */
+static void marshal_virt_to_resize(struct dk_cxlflash_uvirtual *virt,
+ struct dk_cxlflash_resize *resize)
+{
+ resize->hdr = virt->hdr;
+ resize->context_id = virt->context_id;
+ resize->rsrc_handle = virt->rsrc_handle;
+ resize->req_size = virt->lun_size;
+ resize->last_lba = virt->last_lba;
+}
+
+/**
+ * marshal_clone_to_rele() - translate clone to release structure
+ * @clone: Source structure from which to translate/copy.
+ * @rele: Destination structure for the translate/copy.
+ */
+static void marshal_clone_to_rele(struct dk_cxlflash_clone *clone,
+ struct dk_cxlflash_release *release)
+{
+ release->hdr = clone->hdr;
+ release->context_id = clone->context_id_dst;
+}
+
+/**
+ * ba_init() - initializes a block allocator
+ * @ba_lun: Block allocator to initialize.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ba_init(struct ba_lun *ba_lun)
+{
+ struct ba_lun_info *bali = NULL;
+ int lun_size_au = 0, i = 0;
+ int last_word_underflow = 0;
+ u64 *lam;
+
+ pr_debug("%s: Initializing LUN: lun_id = %llX, "
+ "ba_lun->lsize = %lX, ba_lun->au_size = %lX\n",
+ __func__, ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size);
+
+ /* Calculate bit map size */
+ lun_size_au = ba_lun->lsize / ba_lun->au_size;
+ if (lun_size_au == 0) {
+ pr_debug("%s: Requested LUN size of 0!\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Allocate lun information container */
+ bali = kzalloc(sizeof(struct ba_lun_info), GFP_KERNEL);
+ if (unlikely(!bali)) {
+ pr_err("%s: Failed to allocate lun_info for lun_id %llX\n",
+ __func__, ba_lun->lun_id);
+ return -ENOMEM;
+ }
+
+ bali->total_aus = lun_size_au;
+ bali->lun_bmap_size = lun_size_au / BITS_PER_LONG;
+
+ if (lun_size_au % BITS_PER_LONG)
+ bali->lun_bmap_size++;
+
+ /* Allocate bitmap space */
+ bali->lun_alloc_map = kzalloc((bali->lun_bmap_size * sizeof(u64)),
+ GFP_KERNEL);
+ if (unlikely(!bali->lun_alloc_map)) {
+ pr_err("%s: Failed to allocate lun allocation map: "
+ "lun_id = %llX\n", __func__, ba_lun->lun_id);
+ kfree(bali);
+ return -ENOMEM;
+ }
+
+ /* Initialize the bit map size and set all bits to '1' */
+ bali->free_aun_cnt = lun_size_au;
+
+ for (i = 0; i < bali->lun_bmap_size; i++)
+ bali->lun_alloc_map[i] = 0xFFFFFFFFFFFFFFFFULL;
+
+ /* If the last word not fully utilized, mark extra bits as allocated */
+ last_word_underflow = (bali->lun_bmap_size * BITS_PER_LONG);
+ last_word_underflow -= bali->free_aun_cnt;
+ if (last_word_underflow > 0) {
+ lam = &bali->lun_alloc_map[bali->lun_bmap_size - 1];
+ for (i = (HIBIT - last_word_underflow + 1);
+ i < BITS_PER_LONG;
+ i++)
+ clear_bit(i, (ulong *)lam);
+ }
+
+ /* Initialize high elevator index, low/curr already at 0 from kzalloc */
+ bali->free_high_idx = bali->lun_bmap_size;
+
+ /* Allocate clone map */
+ bali->aun_clone_map = kzalloc((bali->total_aus * sizeof(u8)),
+ GFP_KERNEL);
+ if (unlikely(!bali->aun_clone_map)) {
+ pr_err("%s: Failed to allocate clone map: lun_id = %llX\n",
+ __func__, ba_lun->lun_id);
+ kfree(bali->lun_alloc_map);
+ kfree(bali);
+ return -ENOMEM;
+ }
+
+ /* Pass the allocated lun info as a handle to the user */
+ ba_lun->ba_lun_handle = bali;
+
+ pr_debug("%s: Successfully initialized the LUN: "
+ "lun_id = %llX, bitmap size = %X, free_aun_cnt = %llX\n",
+ __func__, ba_lun->lun_id, bali->lun_bmap_size,
+ bali->free_aun_cnt);
+ return 0;
+}
+
+/**
+ * find_free_range() - locates a free bit within the block allocator
+ * @low: First word in block allocator to start search.
+ * @high: Last word in block allocator to search.
+ * @bali: LUN information structure owning the block allocator to search.
+ * @bit_word: Passes back the word in the block allocator owning the free bit.
+ *
+ * Return: The bit position within the passed back word, -1 on failure
+ */
+static int find_free_range(u32 low,
+ u32 high,
+ struct ba_lun_info *bali, int *bit_word)
+{
+ int i;
+ u64 bit_pos = -1;
+ ulong *lam, num_bits;
+
+ for (i = low; i < high; i++)
+ if (bali->lun_alloc_map[i] != 0) {
+ lam = (ulong *)&bali->lun_alloc_map[i];
+ num_bits = (sizeof(*lam) * BITS_PER_BYTE);
+ bit_pos = find_first_bit(lam, num_bits);
+
+ pr_devel("%s: Found free bit %llX in lun "
+ "map entry %llX at bitmap index = %X\n",
+ __func__, bit_pos, bali->lun_alloc_map[i],
+ i);
+
+ *bit_word = i;
+ bali->free_aun_cnt--;
+ clear_bit(bit_pos, lam);
+ break;
+ }
+
+ return bit_pos;
+}
+
+/**
+ * ba_alloc() - allocates a block from the block allocator
+ * @ba_lun: Block allocator from which to allocate a block.
+ *
+ * Return: The allocated block, -1 on failure
+ */
+static u64 ba_alloc(struct ba_lun *ba_lun)
+{
+ u64 bit_pos = -1;
+ int bit_word = 0;
+ struct ba_lun_info *bali = NULL;
+
+ bali = ba_lun->ba_lun_handle;
+
+ pr_debug("%s: Received block allocation request: "
+ "lun_id = %llX, free_aun_cnt = %llX\n",
+ __func__, ba_lun->lun_id, bali->free_aun_cnt);
+
+ if (bali->free_aun_cnt == 0) {
+ pr_debug("%s: No space left on LUN: lun_id = %llX\n",
+ __func__, ba_lun->lun_id);
+ return -1ULL;
+ }
+
+ /* Search to find a free entry, curr->high then low->curr */
+ bit_pos = find_free_range(bali->free_curr_idx,
+ bali->free_high_idx, bali, &bit_word);
+ if (bit_pos == -1) {
+ bit_pos = find_free_range(bali->free_low_idx,
+ bali->free_curr_idx,
+ bali, &bit_word);
+ if (bit_pos == -1) {
+ pr_debug("%s: Could not find an allocation unit on LUN:"
+ " lun_id = %llX\n", __func__, ba_lun->lun_id);
+ return -1ULL;
+ }
+ }
+
+ /* Update the free_curr_idx */
+ if (bit_pos == HIBIT)
+ bali->free_curr_idx = bit_word + 1;
+ else
+ bali->free_curr_idx = bit_word;
+
+ pr_debug("%s: Allocating AU number %llX, on lun_id %llX, "
+ "free_aun_cnt = %llX\n", __func__,
+ ((bit_word * BITS_PER_LONG) + bit_pos), ba_lun->lun_id,
+ bali->free_aun_cnt);
+
+ return (u64) ((bit_word * BITS_PER_LONG) + bit_pos);
+}
+
+/**
+ * validate_alloc() - validates the specified block has been allocated
+ * @ba_lun_info: LUN info owning the block allocator.
+ * @aun: Block to validate.
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int validate_alloc(struct ba_lun_info *bali, u64 aun)
+{
+ int idx = 0, bit_pos = 0;
+
+ idx = aun / BITS_PER_LONG;
+ bit_pos = aun % BITS_PER_LONG;
+
+ if (test_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]))
+ return -1;
+
+ return 0;
+}
+
+/**
+ * ba_free() - frees a block from the block allocator
+ * @ba_lun: Block allocator from which to allocate a block.
+ * @to_free: Block to free.
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int ba_free(struct ba_lun *ba_lun, u64 to_free)
+{
+ int idx = 0, bit_pos = 0;
+ struct ba_lun_info *bali = NULL;
+
+ bali = ba_lun->ba_lun_handle;
+
+ if (validate_alloc(bali, to_free)) {
+ pr_debug("%s: The AUN %llX is not allocated on lun_id %llX\n",
+ __func__, to_free, ba_lun->lun_id);
+ return -1;
+ }
+
+ pr_debug("%s: Received a request to free AU %llX on lun_id %llX, "
+ "free_aun_cnt = %llX\n", __func__, to_free, ba_lun->lun_id,
+ bali->free_aun_cnt);
+
+ if (bali->aun_clone_map[to_free] > 0) {
+ pr_debug("%s: AUN %llX on lun_id %llX has been cloned. Clone "
+ "count = %X\n", __func__, to_free, ba_lun->lun_id,
+ bali->aun_clone_map[to_free]);
+ bali->aun_clone_map[to_free]--;
+ return 0;
+ }
+
+ idx = to_free / BITS_PER_LONG;
+ bit_pos = to_free % BITS_PER_LONG;
+
+ set_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]);
+ bali->free_aun_cnt++;
+
+ if (idx < bali->free_low_idx)
+ bali->free_low_idx = idx;
+ else if (idx > bali->free_high_idx)
+ bali->free_high_idx = idx;
+
+ pr_debug("%s: Successfully freed AU at bit_pos %X, bit map index %X on "
+ "lun_id %llX, free_aun_cnt = %llX\n", __func__, bit_pos, idx,
+ ba_lun->lun_id, bali->free_aun_cnt);
+
+ return 0;
+}
+
+/**
+ * ba_clone() - Clone a chunk of the block allocation table
+ * @ba_lun: Block allocator from which to allocate a block.
+ * @to_free: Block to free.
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int ba_clone(struct ba_lun *ba_lun, u64 to_clone)
+{
+ struct ba_lun_info *bali = ba_lun->ba_lun_handle;
+
+ if (validate_alloc(bali, to_clone)) {
+ pr_debug("%s: AUN %llX is not allocated on lun_id %llX\n",
+ __func__, to_clone, ba_lun->lun_id);
+ return -1;
+ }
+
+ pr_debug("%s: Received a request to clone AUN %llX on lun_id %llX\n",
+ __func__, to_clone, ba_lun->lun_id);
+
+ if (bali->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) {
+ pr_debug("%s: AUN %llX on lun_id %llX hit max clones already\n",
+ __func__, to_clone, ba_lun->lun_id);
+ return -1;
+ }
+
+ bali->aun_clone_map[to_clone]++;
+
+ return 0;
+}
+
+/**
+ * ba_space() - returns the amount of free space left in the block allocator
+ * @ba_lun: Block allocator.
+ *
+ * Return: Amount of free space in block allocator
+ */
+static u64 ba_space(struct ba_lun *ba_lun)
+{
+ struct ba_lun_info *bali = ba_lun->ba_lun_handle;
+
+ return bali->free_aun_cnt;
+}
+
+/**
+ * cxlflash_ba_terminate() - frees resources associated with the block allocator
+ * @ba_lun: Block allocator.
+ *
+ * Safe to call in a partially allocated state.
+ */
+void cxlflash_ba_terminate(struct ba_lun *ba_lun)
+{
+ struct ba_lun_info *bali = ba_lun->ba_lun_handle;
+
+ if (bali) {
+ kfree(bali->aun_clone_map);
+ kfree(bali->lun_alloc_map);
+ kfree(bali);
+ ba_lun->ba_lun_handle = NULL;
+ }
+}
+
+/**
+ * init_vlun() - initializes a LUN for virtual use
+ * @lun_info: LUN information structure that owns the block allocator.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int init_vlun(struct llun_info *lli)
+{
+ int rc = 0;
+ struct glun_info *gli = lli->parent;
+ struct blka *blka = &gli->blka;
+
+ memset(blka, 0, sizeof(*blka));
+ mutex_init(&blka->mutex);
+
+ /* LUN IDs are unique per port, save the index instead */
+ blka->ba_lun.lun_id = lli->lun_index;
+ blka->ba_lun.lsize = gli->max_lba + 1;
+ blka->ba_lun.lba_size = gli->blk_len;
+
+ blka->ba_lun.au_size = MC_CHUNK_SIZE;
+ blka->nchunk = blka->ba_lun.lsize / MC_CHUNK_SIZE;
+
+ rc = ba_init(&blka->ba_lun);
+ if (unlikely(rc))
+ pr_debug("%s: cannot init block_alloc, rc=%d\n", __func__, rc);
+
+ pr_debug("%s: returning rc=%d lli=%p\n", __func__, rc, lli);
+ return rc;
+}
+
+/**
+ * write_same16() - sends a SCSI WRITE_SAME16 (0) command to specified LUN
+ * @sdev: SCSI device associated with LUN.
+ * @lba: Logical block address to start write same.
+ * @nblks: Number of logical blocks to write same.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int write_same16(struct scsi_device *sdev,
+ u64 lba,
+ u32 nblks)
+{
+ u8 *cmd_buf = NULL;
+ u8 *scsi_cmd = NULL;
+ u8 *sense_buf = NULL;
+ int rc = 0;
+ int result = 0;
+ int ws_limit = SISLITE_MAX_WS_BLOCKS;
+ u64 offset = lba;
+ int left = nblks;
+ u32 tout = sdev->request_queue->rq_timeout;
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct device *dev = &cfg->dev->dev;
+
+ cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
+ scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
+ sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+ if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ while (left > 0) {
+
+ scsi_cmd[0] = WRITE_SAME_16;
+ put_unaligned_be64(offset, &scsi_cmd[2]);
+ put_unaligned_be32(ws_limit < left ? ws_limit : left,
+ &scsi_cmd[10]);
+
+ result = scsi_execute(sdev, scsi_cmd, DMA_TO_DEVICE, cmd_buf,
+ CMD_BUFSIZE, sense_buf, tout, 5, 0, NULL);
+ if (result) {
+ dev_err_ratelimited(dev, "%s: command failed for "
+ "offset %lld result=0x%x\n",
+ __func__, offset, result);
+ rc = -EIO;
+ goto out;
+ }
+ left -= ws_limit;
+ offset += ws_limit;
+ }
+
+out:
+ kfree(cmd_buf);
+ kfree(scsi_cmd);
+ kfree(sense_buf);
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * grow_lxt() - expands the translation table associated with the specified RHTE
+ * @afu: AFU associated with the host.
+ * @sdev: SCSI device associated with LUN.
+ * @ctxid: Context ID of context owning the RHTE.
+ * @rhndl: Resource handle associated with the RHTE.
+ * @rhte: Resource handle entry (RHTE).
+ * @new_size: Number of translation entries associated with RHTE.
+ *
+ * By design, this routine employs a 'best attempt' allocation and will
+ * truncate the requested size down if there is not sufficient space in
+ * the block allocator to satisfy the request but there does exist some
+ * amount of space. The user is made aware of this by returning the size
+ * allocated.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int grow_lxt(struct afu *afu,
+ struct scsi_device *sdev,
+ ctx_hndl_t ctxid,
+ res_hndl_t rhndl,
+ struct sisl_rht_entry *rhte,
+ u64 *new_size)
+{
+ struct sisl_lxt_entry *lxt = NULL, *lxt_old = NULL;
+ struct llun_info *lli = sdev->hostdata;
+ struct glun_info *gli = lli->parent;
+ struct blka *blka = &gli->blka;
+ u32 av_size;
+ u32 ngrps, ngrps_old;
+ u64 aun; /* chunk# allocated by block allocator */
+ u64 delta = *new_size - rhte->lxt_cnt;
+ u64 my_new_size;
+ int i, rc = 0;
+
+ /*
+ * Check what is available in the block allocator before re-allocating
+ * LXT array. This is done up front under the mutex which must not be
+ * released until after allocation is complete.
+ */
+ mutex_lock(&blka->mutex);
+ av_size = ba_space(&blka->ba_lun);
+ if (unlikely(av_size <= 0)) {
+ pr_debug("%s: ba_space error: av_size %d\n", __func__, av_size);
+ mutex_unlock(&blka->mutex);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ if (av_size < delta)
+ delta = av_size;
+
+ lxt_old = rhte->lxt_start;
+ ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
+ ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt + delta);
+
+ if (ngrps != ngrps_old) {
+ /* reallocate to fit new size */
+ lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
+ GFP_KERNEL);
+ if (unlikely(!lxt)) {
+ mutex_unlock(&blka->mutex);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* copy over all old entries */
+ memcpy(lxt, lxt_old, (sizeof(*lxt) * rhte->lxt_cnt));
+ } else
+ lxt = lxt_old;
+
+ /* nothing can fail from now on */
+ my_new_size = rhte->lxt_cnt + delta;
+
+ /* add new entries to the end */
+ for (i = rhte->lxt_cnt; i < my_new_size; i++) {
+ /*
+ * Due to the earlier check of available space, ba_alloc
+ * cannot fail here. If it did due to internal error,
+ * leave a rlba_base of -1u which will likely be a
+ * invalid LUN (too large).
+ */
+ aun = ba_alloc(&blka->ba_lun);
+ if ((aun == -1ULL) || (aun >= blka->nchunk))
+ pr_debug("%s: ba_alloc error: allocated chunk# %llX, "
+ "max %llX\n", __func__, aun, blka->nchunk - 1);
+
+ /* select both ports, use r/w perms from RHT */
+ lxt[i].rlba_base = ((aun << MC_CHUNK_SHIFT) |
+ (lli->lun_index << LXT_LUNIDX_SHIFT) |
+ (RHT_PERM_RW << LXT_PERM_SHIFT |
+ lli->port_sel));
+ }
+
+ mutex_unlock(&blka->mutex);
+
+ /*
+ * The following sequence is prescribed in the SISlite spec
+ * for syncing up with the AFU when adding LXT entries.
+ */
+ dma_wmb(); /* Make LXT updates are visible */
+
+ rhte->lxt_start = lxt;
+ dma_wmb(); /* Make RHT entry's LXT table update visible */
+
+ rhte->lxt_cnt = my_new_size;
+ dma_wmb(); /* Make RHT entry's LXT table size update visible */
+
+ cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
+
+ /* free old lxt if reallocated */
+ if (lxt != lxt_old)
+ kfree(lxt_old);
+ *new_size = my_new_size;
+out:
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * shrink_lxt() - reduces translation table associated with the specified RHTE
+ * @afu: AFU associated with the host.
+ * @sdev: SCSI device associated with LUN.
+ * @rhndl: Resource handle associated with the RHTE.
+ * @rhte: Resource handle entry (RHTE).
+ * @ctxi: Context owning resources.
+ * @new_size: Number of translation entries associated with RHTE.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int shrink_lxt(struct afu *afu,
+ struct scsi_device *sdev,
+ res_hndl_t rhndl,
+ struct sisl_rht_entry *rhte,
+ struct ctx_info *ctxi,
+ u64 *new_size)
+{
+ struct sisl_lxt_entry *lxt, *lxt_old;
+ struct llun_info *lli = sdev->hostdata;
+ struct glun_info *gli = lli->parent;
+ struct blka *blka = &gli->blka;
+ ctx_hndl_t ctxid = DECODE_CTXID(ctxi->ctxid);
+ bool needs_ws = ctxi->rht_needs_ws[rhndl];
+ bool needs_sync = !ctxi->err_recovery_active;
+ u32 ngrps, ngrps_old;
+ u64 aun; /* chunk# allocated by block allocator */
+ u64 delta = rhte->lxt_cnt - *new_size;
+ u64 my_new_size;
+ int i, rc = 0;
+
+ lxt_old = rhte->lxt_start;
+ ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
+ ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt - delta);
+
+ if (ngrps != ngrps_old) {
+ /* Reallocate to fit new size unless new size is 0 */
+ if (ngrps) {
+ lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
+ GFP_KERNEL);
+ if (unlikely(!lxt)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Copy over old entries that will remain */
+ memcpy(lxt, lxt_old,
+ (sizeof(*lxt) * (rhte->lxt_cnt - delta)));
+ } else
+ lxt = NULL;
+ } else
+ lxt = lxt_old;
+
+ /* Nothing can fail from now on */
+ my_new_size = rhte->lxt_cnt - delta;
+
+ /*
+ * The following sequence is prescribed in the SISlite spec
+ * for syncing up with the AFU when removing LXT entries.
+ */
+ rhte->lxt_cnt = my_new_size;
+ dma_wmb(); /* Make RHT entry's LXT table size update visible */
+
+ rhte->lxt_start = lxt;
+ dma_wmb(); /* Make RHT entry's LXT table update visible */
+
+ if (needs_sync)
+ cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
+
+ if (needs_ws) {
+ /*
+ * Mark the context as unavailable, so that we can release
+ * the mutex safely.
+ */
+ ctxi->unavail = true;
+ mutex_unlock(&ctxi->mutex);
+ }
+
+ /* Free LBAs allocated to freed chunks */
+ mutex_lock(&blka->mutex);
+ for (i = delta - 1; i >= 0; i--) {
+ /* Mask the higher 48 bits before shifting, even though
+ * it is a noop
+ */
+ aun = (lxt_old[my_new_size + i].rlba_base & SISL_ASTATUS_MASK);
+ aun = (aun >> MC_CHUNK_SHIFT);
+ if (needs_ws)
+ write_same16(sdev, aun, MC_CHUNK_SIZE);
+ ba_free(&blka->ba_lun, aun);
+ }
+ mutex_unlock(&blka->mutex);
+
+ if (needs_ws) {
+ /* Make the context visible again */
+ mutex_lock(&ctxi->mutex);
+ ctxi->unavail = false;
+ }
+
+ /* Free old lxt if reallocated */
+ if (lxt != lxt_old)
+ kfree(lxt_old);
+ *new_size = my_new_size;
+out:
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * _cxlflash_vlun_resize() - changes the size of a virtual lun
+ * @sdev: SCSI device associated with LUN owning virtual LUN.
+ * @ctxi: Context owning resources.
+ * @resize: Resize ioctl data structure.
+ *
+ * On successful return, the user is informed of the new size (in blocks)
+ * of the virtual lun in last LBA format. When the size of the virtual
+ * lun is zero, the last LBA is reflected as -1. See comment in the
+ * prologue for _cxlflash_disk_release() regarding AFU syncs and contexts
+ * on the error recovery list.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int _cxlflash_vlun_resize(struct scsi_device *sdev,
+ struct ctx_info *ctxi,
+ struct dk_cxlflash_resize *resize)
+{
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct llun_info *lli = sdev->hostdata;
+ struct glun_info *gli = lli->parent;
+ struct afu *afu = cfg->afu;
+ bool put_ctx = false;
+
+ res_hndl_t rhndl = resize->rsrc_handle;
+ u64 new_size;
+ u64 nsectors;
+ u64 ctxid = DECODE_CTXID(resize->context_id),
+ rctxid = resize->context_id;
+
+ struct sisl_rht_entry *rhte;
+
+ int rc = 0;
+
+ /*
+ * The requested size (req_size) is always assumed to be in 4k blocks,
+ * so we have to convert it here from 4k to chunk size.
+ */
+ nsectors = (resize->req_size * CXLFLASH_BLOCK_SIZE) / gli->blk_len;
+ new_size = DIV_ROUND_UP(nsectors, MC_CHUNK_SIZE);
+
+ pr_debug("%s: ctxid=%llu rhndl=0x%llx, req_size=0x%llx,"
+ "new_size=%llx\n", __func__, ctxid, resize->rsrc_handle,
+ resize->req_size, new_size);
+
+ if (unlikely(gli->mode != MODE_VIRTUAL)) {
+ pr_debug("%s: LUN mode does not support resize! (%d)\n",
+ __func__, gli->mode);
+ rc = -EINVAL;
+ goto out;
+
+ }
+
+ if (!ctxi) {
+ ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
+ if (unlikely(!ctxi)) {
+ pr_debug("%s: Bad context! (%llu)\n", __func__, ctxid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ put_ctx = true;
+ }
+
+ rhte = get_rhte(ctxi, rhndl, lli);
+ if (unlikely(!rhte)) {
+ pr_debug("%s: Bad resource handle! (%u)\n", __func__, rhndl);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (new_size > rhte->lxt_cnt)
+ rc = grow_lxt(afu, sdev, ctxid, rhndl, rhte, &new_size);
+ else if (new_size < rhte->lxt_cnt)
+ rc = shrink_lxt(afu, sdev, rhndl, rhte, ctxi, &new_size);
+
+ resize->hdr.return_flags = 0;
+ resize->last_lba = (new_size * MC_CHUNK_SIZE * gli->blk_len);
+ resize->last_lba /= CXLFLASH_BLOCK_SIZE;
+ resize->last_lba--;
+
+out:
+ if (put_ctx)
+ put_context(ctxi);
+ pr_debug("%s: resized to %lld returning rc=%d\n",
+ __func__, resize->last_lba, rc);
+ return rc;
+}
+
+int cxlflash_vlun_resize(struct scsi_device *sdev,
+ struct dk_cxlflash_resize *resize)
+{
+ return _cxlflash_vlun_resize(sdev, NULL, resize);
+}
+
+/**
+ * cxlflash_restore_luntable() - Restore LUN table to prior state
+ * @cfg: Internal structure associated with the host.
+ */
+void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
+{
+ struct llun_info *lli, *temp;
+ u32 chan;
+ u32 lind;
+ struct afu *afu = cfg->afu;
+ struct sisl_global_map *agm = &afu->afu_map->global;
+
+ mutex_lock(&global.mutex);
+
+ list_for_each_entry_safe(lli, temp, &cfg->lluns, list) {
+ if (!lli->in_table)
+ continue;
+
+ lind = lli->lun_index;
+
+ if (lli->port_sel == BOTH_PORTS) {
+ writeq_be(lli->lun_id[0], &agm->fc_port[0][lind]);
+ writeq_be(lli->lun_id[1], &agm->fc_port[1][lind]);
+ pr_debug("%s: Virtual LUN on slot %d id0=%llx, "
+ "id1=%llx\n", __func__, lind,
+ lli->lun_id[0], lli->lun_id[1]);
+ } else {
+ chan = PORT2CHAN(lli->port_sel);
+ writeq_be(lli->lun_id[chan], &agm->fc_port[chan][lind]);
+ pr_debug("%s: Virtual LUN on slot %d chan=%d, "
+ "id=%llx\n", __func__, lind, chan,
+ lli->lun_id[chan]);
+ }
+ }
+
+ mutex_unlock(&global.mutex);
+}
+
+/**
+ * init_luntable() - write an entry in the LUN table
+ * @cfg: Internal structure associated with the host.
+ * @lli: Per adapter LUN information structure.
+ *
+ * On successful return, a LUN table entry is created.
+ * At the top for LUNs visible on both ports.
+ * At the bottom for LUNs visible only on one port.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
+{
+ u32 chan;
+ u32 lind;
+ int rc = 0;
+ struct afu *afu = cfg->afu;
+ struct sisl_global_map *agm = &afu->afu_map->global;
+
+ mutex_lock(&global.mutex);
+
+ if (lli->in_table)
+ goto out;
+
+ if (lli->port_sel == BOTH_PORTS) {
+ /*
+ * If this LUN is visible from both ports, we will put
+ * it in the top half of the LUN table.
+ */
+ if ((cfg->promote_lun_index == cfg->last_lun_index[0]) ||
+ (cfg->promote_lun_index == cfg->last_lun_index[1])) {
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ lind = lli->lun_index = cfg->promote_lun_index;
+ writeq_be(lli->lun_id[0], &agm->fc_port[0][lind]);
+ writeq_be(lli->lun_id[1], &agm->fc_port[1][lind]);
+ cfg->promote_lun_index++;
+ pr_debug("%s: Virtual LUN on slot %d id0=%llx, id1=%llx\n",
+ __func__, lind, lli->lun_id[0], lli->lun_id[1]);
+ } else {
+ /*
+ * If this LUN is visible only from one port, we will put
+ * it in the bottom half of the LUN table.
+ */
+ chan = PORT2CHAN(lli->port_sel);
+ if (cfg->promote_lun_index == cfg->last_lun_index[chan]) {
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ lind = lli->lun_index = cfg->last_lun_index[chan];
+ writeq_be(lli->lun_id[chan], &agm->fc_port[chan][lind]);
+ cfg->last_lun_index[chan]--;
+ pr_debug("%s: Virtual LUN on slot %d chan=%d, id=%llx\n",
+ __func__, lind, chan, lli->lun_id[chan]);
+ }
+
+ lli->in_table = true;
+out:
+ mutex_unlock(&global.mutex);
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * cxlflash_disk_virtual_open() - open a virtual disk of specified size
+ * @sdev: SCSI device associated with LUN owning virtual LUN.
+ * @arg: UVirtual ioctl data structure.
+ *
+ * On successful return, the user is informed of the resource handle
+ * to be used to identify the virtual lun and the size (in blocks) of
+ * the virtual lun in last LBA format. When the size of the virtual lun
+ * is zero, the last LBA is reflected as -1.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
+{
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct device *dev = &cfg->dev->dev;
+ struct llun_info *lli = sdev->hostdata;
+ struct glun_info *gli = lli->parent;
+
+ struct dk_cxlflash_uvirtual *virt = (struct dk_cxlflash_uvirtual *)arg;
+ struct dk_cxlflash_resize resize;
+
+ u64 ctxid = DECODE_CTXID(virt->context_id),
+ rctxid = virt->context_id;
+ u64 lun_size = virt->lun_size;
+ u64 last_lba = 0;
+ u64 rsrc_handle = -1;
+
+ int rc = 0;
+
+ struct ctx_info *ctxi = NULL;
+ struct sisl_rht_entry *rhte = NULL;
+
+ pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size);
+
+ mutex_lock(&gli->mutex);
+ if (gli->mode == MODE_NONE) {
+ /* Setup the LUN table and block allocator on first call */
+ rc = init_luntable(cfg, lli);
+ if (rc) {
+ dev_err(dev, "%s: call to init_luntable failed "
+ "rc=%d!\n", __func__, rc);
+ goto err0;
+ }
+
+ rc = init_vlun(lli);
+ if (rc) {
+ dev_err(dev, "%s: call to init_vlun failed rc=%d!\n",
+ __func__, rc);
+ rc = -ENOMEM;
+ goto err0;
+ }
+ }
+
+ rc = cxlflash_lun_attach(gli, MODE_VIRTUAL, true);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: Failed to attach to LUN! (VIRTUAL)\n",
+ __func__);
+ goto err0;
+ }
+ mutex_unlock(&gli->mutex);
+
+ ctxi = get_context(cfg, rctxid, lli, 0);
+ if (unlikely(!ctxi)) {
+ dev_err(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
+ rc = -EINVAL;
+ goto err1;
+ }
+
+ rhte = rhte_checkout(ctxi, lli);
+ if (unlikely(!rhte)) {
+ dev_err(dev, "%s: too many opens for this context\n", __func__);
+ rc = -EMFILE; /* too many opens */
+ goto err1;
+ }
+
+ rsrc_handle = (rhte - ctxi->rht_start);
+
+ /* Populate RHT format 0 */
+ rhte->nmask = MC_RHT_NMASK;
+ rhte->fp = SISL_RHT_FP(0U, ctxi->rht_perms);
+
+ /* Resize even if requested size is 0 */
+ marshal_virt_to_resize(virt, &resize);
+ resize.rsrc_handle = rsrc_handle;
+ rc = _cxlflash_vlun_resize(sdev, ctxi, &resize);
+ if (rc) {
+ dev_err(dev, "%s: resize failed rc %d\n", __func__, rc);
+ goto err2;
+ }
+ last_lba = resize.last_lba;
+
+ if (virt->hdr.flags & DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME)
+ ctxi->rht_needs_ws[rsrc_handle] = true;
+
+ virt->hdr.return_flags = 0;
+ virt->last_lba = last_lba;
+ virt->rsrc_handle = rsrc_handle;
+
+out:
+ if (likely(ctxi))
+ put_context(ctxi);
+ pr_debug("%s: returning handle 0x%llx rc=%d llba %lld\n",
+ __func__, rsrc_handle, rc, last_lba);
+ return rc;
+
+err2:
+ rhte_checkin(ctxi, rhte);
+err1:
+ cxlflash_lun_detach(gli);
+ goto out;
+err0:
+ /* Special common cleanup prior to successful LUN attach */
+ cxlflash_ba_terminate(&gli->blka.ba_lun);
+ mutex_unlock(&gli->mutex);
+ goto out;
+}
+
+/**
+ * clone_lxt() - copies translation tables from source to destination RHTE
+ * @afu: AFU associated with the host.
+ * @blka: Block allocator associated with LUN.
+ * @ctxid: Context ID of context owning the RHTE.
+ * @rhndl: Resource handle associated with the RHTE.
+ * @rhte: Destination resource handle entry (RHTE).
+ * @rhte_src: Source resource handle entry (RHTE).
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int clone_lxt(struct afu *afu,
+ struct blka *blka,
+ ctx_hndl_t ctxid,
+ res_hndl_t rhndl,
+ struct sisl_rht_entry *rhte,
+ struct sisl_rht_entry *rhte_src)
+{
+ struct sisl_lxt_entry *lxt;
+ u32 ngrps;
+ u64 aun; /* chunk# allocated by block allocator */
+ int i, j;
+
+ ngrps = LXT_NUM_GROUPS(rhte_src->lxt_cnt);
+
+ if (ngrps) {
+ /* allocate new LXTs for clone */
+ lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
+ GFP_KERNEL);
+ if (unlikely(!lxt))
+ return -ENOMEM;
+
+ /* copy over */
+ memcpy(lxt, rhte_src->lxt_start,
+ (sizeof(*lxt) * rhte_src->lxt_cnt));
+
+ /* clone the LBAs in block allocator via ref_cnt */
+ mutex_lock(&blka->mutex);
+ for (i = 0; i < rhte_src->lxt_cnt; i++) {
+ aun = (lxt[i].rlba_base >> MC_CHUNK_SHIFT);
+ if (ba_clone(&blka->ba_lun, aun) == -1ULL) {
+ /* free the clones already made */
+ for (j = 0; j < i; j++) {
+ aun = (lxt[j].rlba_base >>
+ MC_CHUNK_SHIFT);
+ ba_free(&blka->ba_lun, aun);
+ }
+
+ mutex_unlock(&blka->mutex);
+ kfree(lxt);
+ return -EIO;
+ }
+ }
+ mutex_unlock(&blka->mutex);
+ } else {
+ lxt = NULL;
+ }
+
+ /*
+ * The following sequence is prescribed in the SISlite spec
+ * for syncing up with the AFU when adding LXT entries.
+ */
+ dma_wmb(); /* Make LXT updates are visible */
+
+ rhte->lxt_start = lxt;
+ dma_wmb(); /* Make RHT entry's LXT table update visible */
+
+ rhte->lxt_cnt = rhte_src->lxt_cnt;
+ dma_wmb(); /* Make RHT entry's LXT table size update visible */
+
+ cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
+
+ pr_debug("%s: returning\n", __func__);
+ return 0;
+}
+
+/**
+ * cxlflash_disk_clone() - clone a context by making snapshot of another
+ * @sdev: SCSI device associated with LUN owning virtual LUN.
+ * @clone: Clone ioctl data structure.
+ *
+ * This routine effectively performs cxlflash_disk_open operation for each
+ * in-use virtual resource in the source context. Note that the destination
+ * context must be in pristine state and cannot have any resource handles
+ * open at the time of the clone.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int cxlflash_disk_clone(struct scsi_device *sdev,
+ struct dk_cxlflash_clone *clone)
+{
+ struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+ struct llun_info *lli = sdev->hostdata;
+ struct glun_info *gli = lli->parent;
+ struct blka *blka = &gli->blka;
+ struct afu *afu = cfg->afu;
+ struct dk_cxlflash_release release = { { 0 }, 0 };
+
+ struct ctx_info *ctxi_src = NULL,
+ *ctxi_dst = NULL;
+ struct lun_access *lun_access_src, *lun_access_dst;
+ u32 perms;
+ u64 ctxid_src = DECODE_CTXID(clone->context_id_src),
+ ctxid_dst = DECODE_CTXID(clone->context_id_dst),
+ rctxid_src = clone->context_id_src,
+ rctxid_dst = clone->context_id_dst;
+ int adap_fd_src = clone->adap_fd_src;
+ int i, j;
+ int rc = 0;
+ bool found;
+ LIST_HEAD(sidecar);
+
+ pr_debug("%s: ctxid_src=%llu ctxid_dst=%llu adap_fd_src=%d\n",
+ __func__, ctxid_src, ctxid_dst, adap_fd_src);
+
+ /* Do not clone yourself */
+ if (unlikely(rctxid_src == rctxid_dst)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (unlikely(gli->mode != MODE_VIRTUAL)) {
+ rc = -EINVAL;
+ pr_debug("%s: Clone not supported on physical LUNs! (%d)\n",
+ __func__, gli->mode);
+ goto out;
+ }
+
+ ctxi_src = get_context(cfg, rctxid_src, lli, CTX_CTRL_CLONE);
+ ctxi_dst = get_context(cfg, rctxid_dst, lli, 0);
+ if (unlikely(!ctxi_src || !ctxi_dst)) {
+ pr_debug("%s: Bad context! (%llu,%llu)\n", __func__,
+ ctxid_src, ctxid_dst);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (unlikely(adap_fd_src != ctxi_src->lfd)) {
+ pr_debug("%s: Invalid source adapter fd! (%d)\n",
+ __func__, adap_fd_src);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Verify there is no open resource handle in the destination context */
+ for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
+ if (ctxi_dst->rht_start[i].nmask != 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Clone LUN access list */
+ list_for_each_entry(lun_access_src, &ctxi_src->luns, list) {
+ found = false;
+ list_for_each_entry(lun_access_dst, &ctxi_dst->luns, list)
+ if (lun_access_dst->sdev == lun_access_src->sdev) {
+ found = true;
+ break;
+ }
+
+ if (!found) {
+ lun_access_dst = kzalloc(sizeof(*lun_access_dst),
+ GFP_KERNEL);
+ if (unlikely(!lun_access_dst)) {
+ pr_err("%s: Unable to allocate lun_access!\n",
+ __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ *lun_access_dst = *lun_access_src;
+ list_add(&lun_access_dst->list, &sidecar);
+ }
+ }
+
+ if (unlikely(!ctxi_src->rht_out)) {
+ pr_debug("%s: Nothing to clone!\n", __func__);
+ goto out_success;
+ }
+
+ /* User specified permission on attach */
+ perms = ctxi_dst->rht_perms;
+
+ /*
+ * Copy over checked-out RHT (and their associated LXT) entries by
+ * hand, stopping after we've copied all outstanding entries and
+ * cleaning up if the clone fails.
+ *
+ * Note: This loop is equivalent to performing cxlflash_disk_open and
+ * cxlflash_vlun_resize. As such, LUN accounting needs to be taken into
+ * account by attaching after each successful RHT entry clone. In the
+ * event that a clone failure is experienced, the LUN detach is handled
+ * via the cleanup performed by _cxlflash_disk_release.
+ */
+ for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
+ if (ctxi_src->rht_out == ctxi_dst->rht_out)
+ break;
+ if (ctxi_src->rht_start[i].nmask == 0)
+ continue;
+
+ /* Consume a destination RHT entry */
+ ctxi_dst->rht_out++;
+ ctxi_dst->rht_start[i].nmask = ctxi_src->rht_start[i].nmask;
+ ctxi_dst->rht_start[i].fp =
+ SISL_RHT_FP_CLONE(ctxi_src->rht_start[i].fp, perms);
+ ctxi_dst->rht_lun[i] = ctxi_src->rht_lun[i];
+
+ rc = clone_lxt(afu, blka, ctxid_dst, i,
+ &ctxi_dst->rht_start[i],
+ &ctxi_src->rht_start[i]);
+ if (rc) {
+ marshal_clone_to_rele(clone, &release);
+ for (j = 0; j < i; j++) {
+ release.rsrc_handle = j;
+ _cxlflash_disk_release(sdev, ctxi_dst,
+ &release);
+ }
+
+ /* Put back the one we failed on */
+ rhte_checkin(ctxi_dst, &ctxi_dst->rht_start[i]);
+ goto err;
+ }
+
+ cxlflash_lun_attach(gli, gli->mode, false);
+ }
+
+out_success:
+ list_splice(&sidecar, &ctxi_dst->luns);
+ sys_close(adap_fd_src);
+
+ /* fall through */
+out:
+ if (ctxi_src)
+ put_context(ctxi_src);
+ if (ctxi_dst)
+ put_context(ctxi_dst);
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+
+err:
+ list_for_each_entry_safe(lun_access_src, lun_access_dst, &sidecar, list)
+ kfree(lun_access_src);
+ goto out;
+}
diff --git a/drivers/scsi/cxlflash/vlun.h b/drivers/scsi/cxlflash/vlun.h
new file mode 100644
index 000000000000..8b29a74946e4
--- /dev/null
+++ b/drivers/scsi/cxlflash/vlun.h
@@ -0,0 +1,86 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
+ * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2015 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _CXLFLASH_VLUN_H
+#define _CXLFLASH_VLUN_H
+
+/* RHT - Resource Handle Table */
+#define MC_RHT_NMASK 16 /* in bits */
+#define MC_CHUNK_SHIFT MC_RHT_NMASK /* shift to go from LBA to chunk# */
+
+#define HIBIT (BITS_PER_LONG - 1)
+
+#define MAX_AUN_CLONE_CNT 0xFF
+
+/*
+ * LXT - LBA Translation Table
+ *
+ * +-------+-------+-------+-------+-------+-------+-------+---+---+
+ * | RLBA_BASE |LUN_IDX| P |SEL|
+ * +-------+-------+-------+-------+-------+-------+-------+---+---+
+ *
+ * The LXT Entry contains the physical LBA where the chunk starts (RLBA_BASE).
+ * AFU ORes the low order bits from the virtual LBA (offset into the chunk)
+ * with RLBA_BASE. The result is the physical LBA to be sent to storage.
+ * The LXT Entry also contains an index to a LUN TBL and a bitmask of which
+ * outgoing (FC) * ports can be selected. The port select bit-mask is ANDed
+ * with a global port select bit-mask maintained by the driver.
+ * In addition, it has permission bits that are ANDed with the
+ * RHT permissions to arrive at the final permissions for the chunk.
+ *
+ * LXT tables are allocated dynamically in groups. This is done to avoid
+ * a malloc/free overhead each time the LXT has to grow or shrink.
+ *
+ * Based on the current lxt_cnt (used), it is always possible to know
+ * how many are allocated (used+free). The number of allocated entries is
+ * not stored anywhere.
+ *
+ * The LXT table is re-allocated whenever it needs to cross into another group.
+*/
+#define LXT_GROUP_SIZE 8
+#define LXT_NUM_GROUPS(lxt_cnt) (((lxt_cnt) + 7)/8) /* alloc'ed groups */
+#define LXT_LUNIDX_SHIFT 8 /* LXT entry, shift for LUN index */
+#define LXT_PERM_SHIFT 4 /* LXT entry, shift for permission bits */
+
+struct ba_lun_info {
+ u64 *lun_alloc_map;
+ u32 lun_bmap_size;
+ u32 total_aus;
+ u64 free_aun_cnt;
+
+ /* indices to be used for elevator lookup of free map */
+ u32 free_low_idx;
+ u32 free_curr_idx;
+ u32 free_high_idx;
+
+ u8 *aun_clone_map;
+};
+
+struct ba_lun {
+ u64 lun_id;
+ u64 wwpn;
+ size_t lsize; /* LUN size in number of LBAs */
+ size_t lba_size; /* LBA size in number of bytes */
+ size_t au_size; /* Allocation Unit size in number of LBAs */
+ struct ba_lun_info *ba_lun_handle;
+};
+
+/* Block Allocator */
+struct blka {
+ struct ba_lun ba_lun;
+ u64 nchunk; /* number of chunks */
+ struct mutex mutex;
+};
+
+#endif /* ifndef _CXLFLASH_SUPERPIPE_H */
diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
index 69abd0ad48e2..e5647d59224f 100644
--- a/drivers/scsi/device_handler/Kconfig
+++ b/drivers/scsi/device_handler/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig SCSI_DH
- tristate "SCSI Device Handlers"
+ bool "SCSI Device Handlers"
depends on SCSI
default n
help
diff --git a/drivers/scsi/device_handler/Makefile b/drivers/scsi/device_handler/Makefile
index e1d2ea083e15..09866c50fbb4 100644
--- a/drivers/scsi/device_handler/Makefile
+++ b/drivers/scsi/device_handler/Makefile
@@ -1,7 +1,6 @@
#
# SCSI Device Handler
#
-obj-$(CONFIG_SCSI_DH) += scsi_dh.o
obj-$(CONFIG_SCSI_DH_RDAC) += scsi_dh_rdac.o
obj-$(CONFIG_SCSI_DH_HP_SW) += scsi_dh_hp_sw.o
obj-$(CONFIG_SCSI_DH_EMC) += scsi_dh_emc.o
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
deleted file mode 100644
index 1efebc9eedfb..000000000000
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ /dev/null
@@ -1,621 +0,0 @@
-/*
- * SCSI device handler infrastruture.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright IBM Corporation, 2007
- * Authors:
- * Chandra Seetharaman <sekharan@us.ibm.com>
- * Mike Anderson <andmike@linux.vnet.ibm.com>
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <scsi/scsi_dh.h>
-#include "../scsi_priv.h"
-
-static DEFINE_SPINLOCK(list_lock);
-static LIST_HEAD(scsi_dh_list);
-
-static struct scsi_device_handler *get_device_handler(const char *name)
-{
- struct scsi_device_handler *tmp, *found = NULL;
-
- spin_lock(&list_lock);
- list_for_each_entry(tmp, &scsi_dh_list, list) {
- if (!strncmp(tmp->name, name, strlen(tmp->name))) {
- found = tmp;
- break;
- }
- }
- spin_unlock(&list_lock);
- return found;
-}
-
-/*
- * device_handler_match_function - Match a device handler to a device
- * @sdev - SCSI device to be tested
- *
- * Tests @sdev against the match function of all registered device_handler.
- * Returns the found device handler or NULL if not found.
- */
-static struct scsi_device_handler *
-device_handler_match_function(struct scsi_device *sdev)
-{
- struct scsi_device_handler *tmp_dh, *found_dh = NULL;
-
- spin_lock(&list_lock);
- list_for_each_entry(tmp_dh, &scsi_dh_list, list) {
- if (tmp_dh->match && tmp_dh->match(sdev)) {
- found_dh = tmp_dh;
- break;
- }
- }
- spin_unlock(&list_lock);
- return found_dh;
-}
-
-/*
- * device_handler_match - Attach a device handler to a device
- * @scsi_dh - The device handler to match against or NULL
- * @sdev - SCSI device to be tested against @scsi_dh
- *
- * Tests @sdev against the device handler @scsi_dh or against
- * all registered device_handler if @scsi_dh == NULL.
- * Returns the found device handler or NULL if not found.
- */
-static struct scsi_device_handler *
-device_handler_match(struct scsi_device_handler *scsi_dh,
- struct scsi_device *sdev)
-{
- struct scsi_device_handler *found_dh;
-
- found_dh = device_handler_match_function(sdev);
-
- if (scsi_dh && found_dh != scsi_dh)
- found_dh = NULL;
-
- return found_dh;
-}
-
-/*
- * scsi_dh_handler_attach - Attach a device handler to a device
- * @sdev - SCSI device the device handler should attach to
- * @scsi_dh - The device handler to attach
- */
-static int scsi_dh_handler_attach(struct scsi_device *sdev,
- struct scsi_device_handler *scsi_dh)
-{
- struct scsi_dh_data *d;
-
- if (sdev->scsi_dh_data) {
- if (sdev->scsi_dh_data->scsi_dh != scsi_dh)
- return -EBUSY;
-
- kref_get(&sdev->scsi_dh_data->kref);
- return 0;
- }
-
- if (!try_module_get(scsi_dh->module))
- return -EINVAL;
-
- d = scsi_dh->attach(sdev);
- if (IS_ERR(d)) {
- sdev_printk(KERN_ERR, sdev, "%s: Attach failed (%ld)\n",
- scsi_dh->name, PTR_ERR(d));
- module_put(scsi_dh->module);
- return PTR_ERR(d);
- }
-
- d->scsi_dh = scsi_dh;
- kref_init(&d->kref);
- d->sdev = sdev;
-
- spin_lock_irq(sdev->request_queue->queue_lock);
- sdev->scsi_dh_data = d;
- spin_unlock_irq(sdev->request_queue->queue_lock);
- return 0;
-}
-
-static void __detach_handler (struct kref *kref)
-{
- struct scsi_dh_data *scsi_dh_data =
- container_of(kref, struct scsi_dh_data, kref);
- struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh;
- struct scsi_device *sdev = scsi_dh_data->sdev;
-
- scsi_dh->detach(sdev);
-
- spin_lock_irq(sdev->request_queue->queue_lock);
- sdev->scsi_dh_data = NULL;
- spin_unlock_irq(sdev->request_queue->queue_lock);
-
- sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name);
- module_put(scsi_dh->module);
-}
-
-/*
- * scsi_dh_handler_detach - Detach a device handler from a device
- * @sdev - SCSI device the device handler should be detached from
- * @scsi_dh - Device handler to be detached
- *
- * Detach from a device handler. If a device handler is specified,
- * only detach if the currently attached handler matches @scsi_dh.
- */
-static void scsi_dh_handler_detach(struct scsi_device *sdev,
- struct scsi_device_handler *scsi_dh)
-{
- if (!sdev->scsi_dh_data)
- return;
-
- if (scsi_dh && scsi_dh != sdev->scsi_dh_data->scsi_dh)
- return;
-
- if (!scsi_dh)
- scsi_dh = sdev->scsi_dh_data->scsi_dh;
-
- if (scsi_dh)
- kref_put(&sdev->scsi_dh_data->kref, __detach_handler);
-}
-
-/*
- * Functions for sysfs attribute 'dh_state'
- */
-static ssize_t
-store_dh_state(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct scsi_device_handler *scsi_dh;
- int err = -EINVAL;
-
- if (sdev->sdev_state == SDEV_CANCEL ||
- sdev->sdev_state == SDEV_DEL)
- return -ENODEV;
-
- if (!sdev->scsi_dh_data) {
- /*
- * Attach to a device handler
- */
- if (!(scsi_dh = get_device_handler(buf)))
- return err;
- err = scsi_dh_handler_attach(sdev, scsi_dh);
- } else {
- scsi_dh = sdev->scsi_dh_data->scsi_dh;
- if (!strncmp(buf, "detach", 6)) {
- /*
- * Detach from a device handler
- */
- scsi_dh_handler_detach(sdev, scsi_dh);
- err = 0;
- } else if (!strncmp(buf, "activate", 8)) {
- /*
- * Activate a device handler
- */
- if (scsi_dh->activate)
- err = scsi_dh->activate(sdev, NULL, NULL);
- else
- err = 0;
- }
- }
-
- return err<0?err:count;
-}
-
-static ssize_t
-show_dh_state(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
-
- if (!sdev->scsi_dh_data)
- return snprintf(buf, 20, "detached\n");
-
- return snprintf(buf, 20, "%s\n", sdev->scsi_dh_data->scsi_dh->name);
-}
-
-static struct device_attribute scsi_dh_state_attr =
- __ATTR(dh_state, S_IRUGO | S_IWUSR, show_dh_state,
- store_dh_state);
-
-/*
- * scsi_dh_sysfs_attr_add - Callback for scsi_init_dh
- */
-static int scsi_dh_sysfs_attr_add(struct device *dev, void *data)
-{
- struct scsi_device *sdev;
- int err;
-
- if (!scsi_is_sdev_device(dev))
- return 0;
-
- sdev = to_scsi_device(dev);
-
- err = device_create_file(&sdev->sdev_gendev,
- &scsi_dh_state_attr);
-
- return 0;
-}
-
-/*
- * scsi_dh_sysfs_attr_remove - Callback for scsi_exit_dh
- */
-static int scsi_dh_sysfs_attr_remove(struct device *dev, void *data)
-{
- struct scsi_device *sdev;
-
- if (!scsi_is_sdev_device(dev))
- return 0;
-
- sdev = to_scsi_device(dev);
-
- device_remove_file(&sdev->sdev_gendev,
- &scsi_dh_state_attr);
-
- return 0;
-}
-
-/*
- * scsi_dh_notifier - notifier chain callback
- */
-static int scsi_dh_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct device *dev = data;
- struct scsi_device *sdev;
- int err = 0;
- struct scsi_device_handler *devinfo = NULL;
-
- if (!scsi_is_sdev_device(dev))
- return 0;
-
- sdev = to_scsi_device(dev);
-
- if (action == BUS_NOTIFY_ADD_DEVICE) {
- err = device_create_file(dev, &scsi_dh_state_attr);
- /* don't care about err */
- devinfo = device_handler_match(NULL, sdev);
- if (devinfo)
- err = scsi_dh_handler_attach(sdev, devinfo);
- } else if (action == BUS_NOTIFY_DEL_DEVICE) {
- device_remove_file(dev, &scsi_dh_state_attr);
- scsi_dh_handler_detach(sdev, NULL);
- }
- return err;
-}
-
-/*
- * scsi_dh_notifier_add - Callback for scsi_register_device_handler
- */
-static int scsi_dh_notifier_add(struct device *dev, void *data)
-{
- struct scsi_device_handler *scsi_dh = data;
- struct scsi_device *sdev;
-
- if (!scsi_is_sdev_device(dev))
- return 0;
-
- if (!get_device(dev))
- return 0;
-
- sdev = to_scsi_device(dev);
-
- if (device_handler_match(scsi_dh, sdev))
- scsi_dh_handler_attach(sdev, scsi_dh);
-
- put_device(dev);
-
- return 0;
-}
-
-/*
- * scsi_dh_notifier_remove - Callback for scsi_unregister_device_handler
- */
-static int scsi_dh_notifier_remove(struct device *dev, void *data)
-{
- struct scsi_device_handler *scsi_dh = data;
- struct scsi_device *sdev;
-
- if (!scsi_is_sdev_device(dev))
- return 0;
-
- if (!get_device(dev))
- return 0;
-
- sdev = to_scsi_device(dev);
-
- scsi_dh_handler_detach(sdev, scsi_dh);
-
- put_device(dev);
-
- return 0;
-}
-
-/*
- * scsi_register_device_handler - register a device handler personality
- * module.
- * @scsi_dh - device handler to be registered.
- *
- * Returns 0 on success, -EBUSY if handler already registered.
- */
-int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
-{
-
- if (get_device_handler(scsi_dh->name))
- return -EBUSY;
-
- if (!scsi_dh->attach || !scsi_dh->detach)
- return -EINVAL;
-
- spin_lock(&list_lock);
- list_add(&scsi_dh->list, &scsi_dh_list);
- spin_unlock(&list_lock);
-
- bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
- printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
-
- return SCSI_DH_OK;
-}
-EXPORT_SYMBOL_GPL(scsi_register_device_handler);
-
-/*
- * scsi_unregister_device_handler - register a device handler personality
- * module.
- * @scsi_dh - device handler to be unregistered.
- *
- * Returns 0 on success, -ENODEV if handler not registered.
- */
-int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
-{
-
- if (!get_device_handler(scsi_dh->name))
- return -ENODEV;
-
- bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
- scsi_dh_notifier_remove);
-
- spin_lock(&list_lock);
- list_del(&scsi_dh->list);
- spin_unlock(&list_lock);
- printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name);
-
- return SCSI_DH_OK;
-}
-EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
-
-/*
- * scsi_dh_activate - activate the path associated with the scsi_device
- * corresponding to the given request queue.
- * Returns immediately without waiting for activation to be completed.
- * @q - Request queue that is associated with the scsi_device to be
- * activated.
- * @fn - Function to be called upon completion of the activation.
- * Function fn is called with data (below) and the error code.
- * Function fn may be called from the same calling context. So,
- * do not hold the lock in the caller which may be needed in fn.
- * @data - data passed to the function fn upon completion.
- *
- */
-int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
-{
- int err = 0;
- unsigned long flags;
- struct scsi_device *sdev;
- struct scsi_device_handler *scsi_dh = NULL;
- struct device *dev = NULL;
-
- spin_lock_irqsave(q->queue_lock, flags);
- sdev = q->queuedata;
- if (!sdev) {
- spin_unlock_irqrestore(q->queue_lock, flags);
- err = SCSI_DH_NOSYS;
- if (fn)
- fn(data, err);
- return err;
- }
-
- if (sdev->scsi_dh_data)
- scsi_dh = sdev->scsi_dh_data->scsi_dh;
- dev = get_device(&sdev->sdev_gendev);
- if (!scsi_dh || !dev ||
- sdev->sdev_state == SDEV_CANCEL ||
- sdev->sdev_state == SDEV_DEL)
- err = SCSI_DH_NOSYS;
- if (sdev->sdev_state == SDEV_OFFLINE)
- err = SCSI_DH_DEV_OFFLINED;
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- if (err) {
- if (fn)
- fn(data, err);
- goto out;
- }
-
- if (scsi_dh->activate)
- err = scsi_dh->activate(sdev, fn, data);
-out:
- put_device(dev);
- return err;
-}
-EXPORT_SYMBOL_GPL(scsi_dh_activate);
-
-/*
- * scsi_dh_set_params - set the parameters for the device as per the
- * string specified in params.
- * @q - Request queue that is associated with the scsi_device for
- * which the parameters to be set.
- * @params - parameters in the following format
- * "no_of_params\0param1\0param2\0param3\0...\0"
- * for example, string for 2 parameters with value 10 and 21
- * is specified as "2\010\021\0".
- */
-int scsi_dh_set_params(struct request_queue *q, const char *params)
-{
- int err = -SCSI_DH_NOSYS;
- unsigned long flags;
- struct scsi_device *sdev;
- struct scsi_device_handler *scsi_dh = NULL;
-
- spin_lock_irqsave(q->queue_lock, flags);
- sdev = q->queuedata;
- if (sdev && sdev->scsi_dh_data)
- scsi_dh = sdev->scsi_dh_data->scsi_dh;
- if (scsi_dh && scsi_dh->set_params && get_device(&sdev->sdev_gendev))
- err = 0;
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- if (err)
- return err;
- err = scsi_dh->set_params(sdev, params);
- put_device(&sdev->sdev_gendev);
- return err;
-}
-EXPORT_SYMBOL_GPL(scsi_dh_set_params);
-
-/*
- * scsi_dh_handler_exist - Return TRUE(1) if a device handler exists for
- * the given name. FALSE(0) otherwise.
- * @name - name of the device handler.
- */
-int scsi_dh_handler_exist(const char *name)
-{
- return (get_device_handler(name) != NULL);
-}
-EXPORT_SYMBOL_GPL(scsi_dh_handler_exist);
-
-/*
- * scsi_dh_attach - Attach device handler
- * @q - Request queue that is associated with the scsi_device
- * the handler should be attached to
- * @name - name of the handler to attach
- */
-int scsi_dh_attach(struct request_queue *q, const char *name)
-{
- unsigned long flags;
- struct scsi_device *sdev;
- struct scsi_device_handler *scsi_dh;
- int err = 0;
-
- scsi_dh = get_device_handler(name);
- if (!scsi_dh)
- return -EINVAL;
-
- spin_lock_irqsave(q->queue_lock, flags);
- sdev = q->queuedata;
- if (!sdev || !get_device(&sdev->sdev_gendev))
- err = -ENODEV;
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- if (!err) {
- err = scsi_dh_handler_attach(sdev, scsi_dh);
- put_device(&sdev->sdev_gendev);
- }
- return err;
-}
-EXPORT_SYMBOL_GPL(scsi_dh_attach);
-
-/*
- * scsi_dh_detach - Detach device handler
- * @q - Request queue that is associated with the scsi_device
- * the handler should be detached from
- *
- * This function will detach the device handler only
- * if the sdev is not part of the internal list, ie
- * if it has been attached manually.
- */
-void scsi_dh_detach(struct request_queue *q)
-{
- unsigned long flags;
- struct scsi_device *sdev;
- struct scsi_device_handler *scsi_dh = NULL;
-
- spin_lock_irqsave(q->queue_lock, flags);
- sdev = q->queuedata;
- if (!sdev || !get_device(&sdev->sdev_gendev))
- sdev = NULL;
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- if (!sdev)
- return;
-
- if (sdev->scsi_dh_data) {
- scsi_dh = sdev->scsi_dh_data->scsi_dh;
- scsi_dh_handler_detach(sdev, scsi_dh);
- }
- put_device(&sdev->sdev_gendev);
-}
-EXPORT_SYMBOL_GPL(scsi_dh_detach);
-
-/*
- * scsi_dh_attached_handler_name - Get attached device handler's name
- * @q - Request queue that is associated with the scsi_device
- * that may have a device handler attached
- * @gfp - the GFP mask used in the kmalloc() call when allocating memory
- *
- * Returns name of attached handler, NULL if no handler is attached.
- * Caller must take care to free the returned string.
- */
-const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
-{
- unsigned long flags;
- struct scsi_device *sdev;
- const char *handler_name = NULL;
-
- spin_lock_irqsave(q->queue_lock, flags);
- sdev = q->queuedata;
- if (!sdev || !get_device(&sdev->sdev_gendev))
- sdev = NULL;
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- if (!sdev)
- return NULL;
-
- if (sdev->scsi_dh_data)
- handler_name = kstrdup(sdev->scsi_dh_data->scsi_dh->name, gfp);
-
- put_device(&sdev->sdev_gendev);
- return handler_name;
-}
-EXPORT_SYMBOL_GPL(scsi_dh_attached_handler_name);
-
-static struct notifier_block scsi_dh_nb = {
- .notifier_call = scsi_dh_notifier
-};
-
-static int __init scsi_dh_init(void)
-{
- int r;
-
- r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
-
- if (!r)
- bus_for_each_dev(&scsi_bus_type, NULL, NULL,
- scsi_dh_sysfs_attr_add);
-
- return r;
-}
-
-static void __exit scsi_dh_exit(void)
-{
- bus_for_each_dev(&scsi_bus_type, NULL, NULL,
- scsi_dh_sysfs_attr_remove);
- bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
-}
-
-module_init(scsi_dh_init);
-module_exit(scsi_dh_exit);
-
-MODULE_DESCRIPTION("SCSI device handler");
-MODULE_AUTHOR("Chandra Seetharaman <sekharan@us.ibm.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 854b568b9931..cc2773b5de68 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -62,7 +62,6 @@
#define ALUA_OPTIMIZE_STPG 1
struct alua_dh_data {
- struct scsi_dh_data dh_data;
int group_id;
int rel_port;
int tpgs;
@@ -86,11 +85,6 @@ struct alua_dh_data {
static char print_alua_state(int);
static int alua_check_sense(struct scsi_device *, struct scsi_sense_hdr *);
-static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev)
-{
- return container_of(sdev->scsi_dh_data, struct alua_dh_data, dh_data);
-}
-
static int realloc_buffer(struct alua_dh_data *h, unsigned len)
{
if (h->buff && h->buff != h->inq)
@@ -708,7 +702,7 @@ out:
*/
static int alua_set_params(struct scsi_device *sdev, const char *params)
{
- struct alua_dh_data *h = get_alua_data(sdev);
+ struct alua_dh_data *h = sdev->handler_data;
unsigned int optimize = 0, argc;
const char *p = params;
int result = SCSI_DH_OK;
@@ -746,7 +740,7 @@ MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than
static int alua_activate(struct scsi_device *sdev,
activate_complete fn, void *data)
{
- struct alua_dh_data *h = get_alua_data(sdev);
+ struct alua_dh_data *h = sdev->handler_data;
int err = SCSI_DH_OK;
int stpg = 0;
@@ -804,7 +798,7 @@ out:
*/
static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
{
- struct alua_dh_data *h = get_alua_data(sdev);
+ struct alua_dh_data *h = sdev->handler_data;
int ret = BLKPREP_OK;
if (h->state == TPGS_STATE_TRANSITIONING)
@@ -819,23 +813,18 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
}
-static bool alua_match(struct scsi_device *sdev)
-{
- return (scsi_device_tpgs(sdev) != 0);
-}
-
/*
* alua_bus_attach - Attach device handler
* @sdev: device to be attached to
*/
-static struct scsi_dh_data *alua_bus_attach(struct scsi_device *sdev)
+static int alua_bus_attach(struct scsi_device *sdev)
{
struct alua_dh_data *h;
int err;
h = kzalloc(sizeof(*h) , GFP_KERNEL);
if (!h)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
h->tpgs = TPGS_MODE_UNINITIALIZED;
h->state = TPGS_STATE_OPTIMIZED;
h->group_id = -1;
@@ -848,11 +837,11 @@ static struct scsi_dh_data *alua_bus_attach(struct scsi_device *sdev)
if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED)
goto failed;
- sdev_printk(KERN_NOTICE, sdev, "%s: Attached\n", ALUA_DH_NAME);
- return &h->dh_data;
+ sdev->handler_data = h;
+ return 0;
failed:
kfree(h);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
/*
@@ -861,10 +850,11 @@ failed:
*/
static void alua_bus_detach(struct scsi_device *sdev)
{
- struct alua_dh_data *h = get_alua_data(sdev);
+ struct alua_dh_data *h = sdev->handler_data;
if (h->buff && h->inq != h->buff)
kfree(h->buff);
+ sdev->handler_data = NULL;
kfree(h);
}
@@ -877,7 +867,6 @@ static struct scsi_device_handler alua_dh = {
.check_sense = alua_check_sense,
.activate = alua_activate,
.set_params = alua_set_params,
- .match = alua_match,
};
static int __init alua_init(void)
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 6ed1caadbc6a..e6fb97cb12f4 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -72,7 +72,6 @@ static const char * lun_state[] =
};
struct clariion_dh_data {
- struct scsi_dh_data dh_data;
/*
* Flags:
* CLARIION_SHORT_TRESPASS
@@ -114,13 +113,6 @@ struct clariion_dh_data {
int current_sp;
};
-static inline struct clariion_dh_data
- *get_clariion_data(struct scsi_device *sdev)
-{
- return container_of(sdev->scsi_dh_data, struct clariion_dh_data,
- dh_data);
-}
-
/*
* Parse MODE_SELECT cmd reply.
*/
@@ -450,7 +442,7 @@ static int clariion_check_sense(struct scsi_device *sdev,
static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
{
- struct clariion_dh_data *h = get_clariion_data(sdev);
+ struct clariion_dh_data *h = sdev->handler_data;
int ret = BLKPREP_OK;
if (h->lun_state != CLARIION_LUN_OWNED) {
@@ -533,7 +525,7 @@ retry:
static int clariion_activate(struct scsi_device *sdev,
activate_complete fn, void *data)
{
- struct clariion_dh_data *csdev = get_clariion_data(sdev);
+ struct clariion_dh_data *csdev = sdev->handler_data;
int result;
result = clariion_send_inquiry(sdev, csdev);
@@ -574,7 +566,7 @@ done:
*/
static int clariion_set_params(struct scsi_device *sdev, const char *params)
{
- struct clariion_dh_data *csdev = get_clariion_data(sdev);
+ struct clariion_dh_data *csdev = sdev->handler_data;
unsigned int hr = 0, st = 0, argc;
const char *p = params;
int result = SCSI_DH_OK;
@@ -622,42 +614,14 @@ done:
return result;
}
-static const struct {
- char *vendor;
- char *model;
-} clariion_dev_list[] = {
- {"DGC", "RAID"},
- {"DGC", "DISK"},
- {"DGC", "VRAID"},
- {NULL, NULL},
-};
-
-static bool clariion_match(struct scsi_device *sdev)
-{
- int i;
-
- if (scsi_device_tpgs(sdev))
- return false;
-
- for (i = 0; clariion_dev_list[i].vendor; i++) {
- if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor,
- strlen(clariion_dev_list[i].vendor)) &&
- !strncmp(sdev->model, clariion_dev_list[i].model,
- strlen(clariion_dev_list[i].model))) {
- return true;
- }
- }
- return false;
-}
-
-static struct scsi_dh_data *clariion_bus_attach(struct scsi_device *sdev)
+static int clariion_bus_attach(struct scsi_device *sdev)
{
struct clariion_dh_data *h;
int err;
h = kzalloc(sizeof(*h) , GFP_KERNEL);
if (!h)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
h->lun_state = CLARIION_LUN_UNINITIALIZED;
h->default_sp = CLARIION_UNBOUND_LU;
h->current_sp = CLARIION_UNBOUND_LU;
@@ -675,18 +639,19 @@ static struct scsi_dh_data *clariion_bus_attach(struct scsi_device *sdev)
CLARIION_NAME, h->current_sp + 'A',
h->port, lun_state[h->lun_state],
h->default_sp + 'A');
- return &h->dh_data;
+
+ sdev->handler_data = h;
+ return 0;
failed:
kfree(h);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
static void clariion_bus_detach(struct scsi_device *sdev)
{
- struct clariion_dh_data *h = get_clariion_data(sdev);
-
- kfree(h);
+ kfree(sdev->handler_data);
+ sdev->handler_data = NULL;
}
static struct scsi_device_handler clariion_dh = {
@@ -698,7 +663,6 @@ static struct scsi_device_handler clariion_dh = {
.activate = clariion_activate,
.prep_fn = clariion_prep_fn,
.set_params = clariion_set_params,
- .match = clariion_match,
};
static int __init clariion_init(void)
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 485d99544a15..9406d5f4a3d3 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -38,7 +38,6 @@
#define HP_SW_PATH_PASSIVE 1
struct hp_sw_dh_data {
- struct scsi_dh_data dh_data;
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
int path_state;
int retries;
@@ -50,11 +49,6 @@ struct hp_sw_dh_data {
static int hp_sw_start_stop(struct hp_sw_dh_data *);
-static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev)
-{
- return container_of(sdev->scsi_dh_data, struct hp_sw_dh_data, dh_data);
-}
-
/*
* tur_done - Handle TEST UNIT READY return status
* @sdev: sdev the command has been sent to
@@ -267,7 +261,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
{
- struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
+ struct hp_sw_dh_data *h = sdev->handler_data;
int ret = BLKPREP_OK;
if (h->path_state != HP_SW_PATH_ACTIVE) {
@@ -292,7 +286,7 @@ static int hp_sw_activate(struct scsi_device *sdev,
activate_complete fn, void *data)
{
int ret = SCSI_DH_OK;
- struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
+ struct hp_sw_dh_data *h = sdev->handler_data;
ret = hp_sw_tur(sdev, h);
@@ -311,43 +305,14 @@ static int hp_sw_activate(struct scsi_device *sdev,
return 0;
}
-static const struct {
- char *vendor;
- char *model;
-} hp_sw_dh_data_list[] = {
- {"COMPAQ", "MSA1000 VOLUME"},
- {"COMPAQ", "HSV110"},
- {"HP", "HSV100"},
- {"DEC", "HSG80"},
- {NULL, NULL},
-};
-
-static bool hp_sw_match(struct scsi_device *sdev)
-{
- int i;
-
- if (scsi_device_tpgs(sdev))
- return false;
-
- for (i = 0; hp_sw_dh_data_list[i].vendor; i++) {
- if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
- strlen(hp_sw_dh_data_list[i].vendor)) &&
- !strncmp(sdev->model, hp_sw_dh_data_list[i].model,
- strlen(hp_sw_dh_data_list[i].model))) {
- return true;
- }
- }
- return false;
-}
-
-static struct scsi_dh_data *hp_sw_bus_attach(struct scsi_device *sdev)
+static int hp_sw_bus_attach(struct scsi_device *sdev)
{
struct hp_sw_dh_data *h;
int ret;
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
h->path_state = HP_SW_PATH_UNINITIALIZED;
h->retries = HP_SW_RETRIES;
h->sdev = sdev;
@@ -359,17 +324,18 @@ static struct scsi_dh_data *hp_sw_bus_attach(struct scsi_device *sdev)
sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n",
HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE?
"active":"passive");
- return &h->dh_data;
+
+ sdev->handler_data = h;
+ return 0;
failed:
kfree(h);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
static void hp_sw_bus_detach( struct scsi_device *sdev )
{
- struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
-
- kfree(h);
+ kfree(sdev->handler_data);
+ sdev->handler_data = NULL;
}
static struct scsi_device_handler hp_sw_dh = {
@@ -379,7 +345,6 @@ static struct scsi_device_handler hp_sw_dh = {
.detach = hp_sw_bus_detach,
.activate = hp_sw_activate,
.prep_fn = hp_sw_prep_fn,
- .match = hp_sw_match,
};
static int __init hp_sw_init(void)
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index b46ace3d4bf0..361358134315 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -181,7 +181,6 @@ struct c2_inquiry {
};
struct rdac_dh_data {
- struct scsi_dh_data dh_data;
struct rdac_controller *ctlr;
#define UNINITIALIZED_LUN (1 << 8)
unsigned lun;
@@ -260,11 +259,6 @@ do { \
sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \
} while (0);
-static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev)
-{
- return container_of(sdev->scsi_dh_data, struct rdac_dh_data, dh_data);
-}
-
static struct request *get_rdac_req(struct scsi_device *sdev,
void *buffer, unsigned buflen, int rw)
{
@@ -544,7 +538,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
{
struct scsi_sense_hdr sense_hdr;
int err = SCSI_DH_IO, ret;
- struct rdac_dh_data *h = get_rdac_data(sdev);
+ struct rdac_dh_data *h = sdev->handler_data;
ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
if (!ret)
@@ -589,7 +583,7 @@ static void send_mode_select(struct work_struct *work)
container_of(work, struct rdac_controller, ms_work);
struct request *rq;
struct scsi_device *sdev = ctlr->ms_sdev;
- struct rdac_dh_data *h = get_rdac_data(sdev);
+ struct rdac_dh_data *h = sdev->handler_data;
struct request_queue *q = sdev->request_queue;
int err, retry_cnt = RDAC_RETRY_COUNT;
struct rdac_queue_data *tmp, *qdata;
@@ -648,7 +642,7 @@ static int queue_mode_select(struct scsi_device *sdev,
if (!qdata)
return SCSI_DH_RETRY;
- qdata->h = get_rdac_data(sdev);
+ qdata->h = sdev->handler_data;
qdata->callback_fn = fn;
qdata->callback_data = data;
@@ -667,7 +661,7 @@ static int queue_mode_select(struct scsi_device *sdev,
static int rdac_activate(struct scsi_device *sdev,
activate_complete fn, void *data)
{
- struct rdac_dh_data *h = get_rdac_data(sdev);
+ struct rdac_dh_data *h = sdev->handler_data;
int err = SCSI_DH_OK;
int act = 0;
@@ -702,7 +696,7 @@ done:
static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
{
- struct rdac_dh_data *h = get_rdac_data(sdev);
+ struct rdac_dh_data *h = sdev->handler_data;
int ret = BLKPREP_OK;
if (h->state != RDAC_STATE_ACTIVE) {
@@ -716,7 +710,7 @@ static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
static int rdac_check_sense(struct scsi_device *sdev,
struct scsi_sense_hdr *sense_hdr)
{
- struct rdac_dh_data *h = get_rdac_data(sdev);
+ struct rdac_dh_data *h = sdev->handler_data;
RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, "
"I/O returned with sense %02x/%02x/%02x",
@@ -778,56 +772,7 @@ static int rdac_check_sense(struct scsi_device *sdev,
return SCSI_RETURN_NOT_HANDLED;
}
-static const struct {
- char *vendor;
- char *model;
-} rdac_dev_list[] = {
- {"IBM", "1722"},
- {"IBM", "1724"},
- {"IBM", "1726"},
- {"IBM", "1742"},
- {"IBM", "1745"},
- {"IBM", "1746"},
- {"IBM", "1813"},
- {"IBM", "1814"},
- {"IBM", "1815"},
- {"IBM", "1818"},
- {"IBM", "3526"},
- {"SGI", "TP9"},
- {"SGI", "IS"},
- {"STK", "OPENstorage D280"},
- {"STK", "FLEXLINE 380"},
- {"SUN", "CSM"},
- {"SUN", "LCSM100"},
- {"SUN", "STK6580_6780"},
- {"SUN", "SUN_6180"},
- {"SUN", "ArrayStorage"},
- {"DELL", "MD3"},
- {"NETAPP", "INF-01-00"},
- {"LSI", "INF-01-00"},
- {"ENGENIO", "INF-01-00"},
- {NULL, NULL},
-};
-
-static bool rdac_match(struct scsi_device *sdev)
-{
- int i;
-
- if (scsi_device_tpgs(sdev))
- return false;
-
- for (i = 0; rdac_dev_list[i].vendor; i++) {
- if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
- strlen(rdac_dev_list[i].vendor)) &&
- !strncmp(sdev->model, rdac_dev_list[i].model,
- strlen(rdac_dev_list[i].model))) {
- return true;
- }
- }
- return false;
-}
-
-static struct scsi_dh_data *rdac_bus_attach(struct scsi_device *sdev)
+static int rdac_bus_attach(struct scsi_device *sdev)
{
struct rdac_dh_data *h;
int err;
@@ -836,7 +781,7 @@ static struct scsi_dh_data *rdac_bus_attach(struct scsi_device *sdev)
h = kzalloc(sizeof(*h) , GFP_KERNEL);
if (!h)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
h->lun = UNINITIALIZED_LUN;
h->state = RDAC_STATE_ACTIVE;
@@ -861,7 +806,8 @@ static struct scsi_dh_data *rdac_bus_attach(struct scsi_device *sdev)
RDAC_NAME, h->lun, mode[(int)h->mode],
lun_state[(int)h->lun_state]);
- return &h->dh_data;
+ sdev->handler_data = h;
+ return 0;
clean_ctlr:
spin_lock(&list_lock);
@@ -870,12 +816,12 @@ clean_ctlr:
failed:
kfree(h);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
static void rdac_bus_detach( struct scsi_device *sdev )
{
- struct rdac_dh_data *h = get_rdac_data(sdev);
+ struct rdac_dh_data *h = sdev->handler_data;
if (h->ctlr && h->ctlr->ms_queued)
flush_workqueue(kmpath_rdacd);
@@ -884,6 +830,7 @@ static void rdac_bus_detach( struct scsi_device *sdev )
if (h->ctlr)
kref_put(&h->ctlr->kref, release_controller);
spin_unlock(&list_lock);
+ sdev->handler_data = NULL;
kfree(h);
}
@@ -895,7 +842,6 @@ static struct scsi_device_handler rdac_dh = {
.attach = rdac_bus_attach,
.detach = rdac_bus_detach,
.activate = rdac_activate,
- .match = rdac_match,
};
static int __init rdac_init(void)
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index f35ed53adaac..d4cda5e9600e 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -1924,6 +1924,9 @@ static void adpt_alpha_info(sysInfo_S* si)
#endif
#if defined __i386__
+
+#include <uapi/asm/vm86.h>
+
static void adpt_i386_info(sysInfo_S* si)
{
// This is all the info we need for now
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index ec193a8357d7..d3eb80c46bbe 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -364,7 +364,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
* on the ethertype for the given device
*/
fcoe->fcoe_packet_type.func = fcoe_rcv;
- fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
+ fcoe->fcoe_packet_type.type = htons(ETH_P_FCOE);
fcoe->fcoe_packet_type.dev = netdev;
dev_add_pack(&fcoe->fcoe_packet_type);
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index bdc89899561a..d7597c08fa11 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -58,7 +58,7 @@ MODULE_PARM_DESC(show, " Show attached FCoE transports");
module_param_call(create, fcoe_transport_create, NULL,
(void *)FIP_MODE_FABRIC, S_IWUSR);
__MODULE_PARM_TYPE(create, "string");
-MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
+MODULE_PARM_DESC(create, " Creates fcoe instance on an ethernet interface");
module_param_call(create_vn2vn, fcoe_transport_create, NULL,
(void *)FIP_MODE_VN2VN, S_IWUSR);
@@ -68,15 +68,15 @@ MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance "
module_param_call(destroy, fcoe_transport_destroy, NULL, NULL, S_IWUSR);
__MODULE_PARM_TYPE(destroy, "string");
-MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
+MODULE_PARM_DESC(destroy, " Destroys fcoe instance on an ethernet interface");
module_param_call(enable, fcoe_transport_enable, NULL, NULL, S_IWUSR);
__MODULE_PARM_TYPE(enable, "string");
-MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
+MODULE_PARM_DESC(enable, " Enables fcoe on an ethernet interface.");
module_param_call(disable, fcoe_transport_disable, NULL, NULL, S_IWUSR);
__MODULE_PARM_TYPE(disable, "string");
-MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
+MODULE_PARM_DESC(disable, " Disables fcoe on an ethernet interface.");
/* notification function for packets from net device */
static struct notifier_block libfcoe_notifier = {
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 26270c351624..ce129e595b55 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.17"
+#define DRV_VERSION "1.6.0.17a"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 155b286f1a9d..25436cd2860c 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -425,6 +425,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
unsigned long ptr;
struct fc_rport_priv *rdata;
spinlock_t *io_lock = NULL;
+ int io_lock_acquired = 0;
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
return SCSI_MLQUEUE_HOST_BUSY;
@@ -518,6 +519,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
spin_lock_irqsave(io_lock, flags);
/* initialize rest of io_req */
+ io_lock_acquired = 1;
io_req->port_id = rport->port_id;
io_req->start_time = jiffies;
CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
@@ -571,7 +573,7 @@ out:
(((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
/* if only we issued IO, will we have the io lock */
- if (CMD_FLAGS(sc) & FNIC_IO_INITIALIZED)
+ if (io_lock_acquired)
spin_unlock_irqrestore(io_lock, flags);
atomic_dec(&fnic->in_flight);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 1dafeb43333b..40669f8dd0df 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1,6 +1,7 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
- * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
+ * Copyright 2014-2015 PMC-Sierra, Inc.
+ * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -11,11 +12,7 @@
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ * Questions/Comments/Bugfixes to storagedev@pmcs.com
*
*/
@@ -132,6 +129,11 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
+ {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
+ {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
+ {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
+ {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
+ {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
@@ -190,6 +192,11 @@ static struct board_type products[] = {
{0x21CD103C, "Smart Array", &SA5_access},
{0x21CE103C, "Smart HBA", &SA5_access},
{0x05809005, "SmartHBA-SA", &SA5_access},
+ {0x05819005, "SmartHBA-SA 8i", &SA5_access},
+ {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
+ {0x05839005, "SmartHBA-SA 8e", &SA5_access},
+ {0x05849005, "SmartHBA-SA 16i", &SA5_access},
+ {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
{0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
{0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
{0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
@@ -267,6 +274,7 @@ static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
static void hpsa_command_resubmit_worker(struct work_struct *work);
static u32 lockup_detected(struct ctlr_info *h);
static int detect_controller_lockup(struct ctlr_info *h);
+static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device);
static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
{
@@ -325,7 +333,7 @@ static int check_for_unit_attention(struct ctlr_info *h,
decode_sense_data(c->err_info->SenseInfo, sense_len,
&sense_key, &asc, &ascq);
- if (sense_key != UNIT_ATTENTION || asc == -1)
+ if (sense_key != UNIT_ATTENTION || asc == 0xff)
return 0;
switch (asc) {
@@ -717,12 +725,107 @@ static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
return snprintf(buf, 20, "%d\n", offload_enabled);
}
+#define MAX_PATHS 8
+#define PATH_STRING_LEN 50
+
+static ssize_t path_info_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct scsi_device *sdev;
+ struct hpsa_scsi_dev_t *hdev;
+ unsigned long flags;
+ int i;
+ int output_len = 0;
+ u8 box;
+ u8 bay;
+ u8 path_map_index = 0;
+ char *active;
+ unsigned char phys_connector[2];
+ unsigned char path[MAX_PATHS][PATH_STRING_LEN];
+
+ memset(path, 0, MAX_PATHS * PATH_STRING_LEN);
+ sdev = to_scsi_device(dev);
+ h = sdev_to_hba(sdev);
+ spin_lock_irqsave(&h->devlock, flags);
+ hdev = sdev->hostdata;
+ if (!hdev) {
+ spin_unlock_irqrestore(&h->devlock, flags);
+ return -ENODEV;
+ }
+
+ bay = hdev->bay;
+ for (i = 0; i < MAX_PATHS; i++) {
+ path_map_index = 1<<i;
+ if (i == hdev->active_path_index)
+ active = "Active";
+ else if (hdev->path_map & path_map_index)
+ active = "Inactive";
+ else
+ continue;
+
+ output_len = snprintf(path[i],
+ PATH_STRING_LEN, "[%d:%d:%d:%d] %20.20s ",
+ h->scsi_host->host_no,
+ hdev->bus, hdev->target, hdev->lun,
+ scsi_device_type(hdev->devtype));
+
+ if (is_ext_target(h, hdev) ||
+ (hdev->devtype == TYPE_RAID) ||
+ is_logical_dev_addr_mode(hdev->scsi3addr)) {
+ output_len += snprintf(path[i] + output_len,
+ PATH_STRING_LEN, "%s\n",
+ active);
+ continue;
+ }
+
+ box = hdev->box[i];
+ memcpy(&phys_connector, &hdev->phys_connector[i],
+ sizeof(phys_connector));
+ if (phys_connector[0] < '0')
+ phys_connector[0] = '0';
+ if (phys_connector[1] < '0')
+ phys_connector[1] = '0';
+ if (hdev->phys_connector[i] > 0)
+ output_len += snprintf(path[i] + output_len,
+ PATH_STRING_LEN,
+ "PORT: %.2s ",
+ phys_connector);
+ if (hdev->devtype == TYPE_DISK &&
+ hdev->expose_state != HPSA_DO_NOT_EXPOSE) {
+ if (box == 0 || box == 0xFF) {
+ output_len += snprintf(path[i] + output_len,
+ PATH_STRING_LEN,
+ "BAY: %hhu %s\n",
+ bay, active);
+ } else {
+ output_len += snprintf(path[i] + output_len,
+ PATH_STRING_LEN,
+ "BOX: %hhu BAY: %hhu %s\n",
+ box, bay, active);
+ }
+ } else if (box != 0 && box != 0xFF) {
+ output_len += snprintf(path[i] + output_len,
+ PATH_STRING_LEN, "BOX: %hhu %s\n",
+ box, active);
+ } else
+ output_len += snprintf(path[i] + output_len,
+ PATH_STRING_LEN, "%s\n", active);
+ }
+
+ spin_unlock_irqrestore(&h->devlock, flags);
+ return snprintf(buf, output_len+1, "%s%s%s%s%s%s%s%s",
+ path[0], path[1], path[2], path[3],
+ path[4], path[5], path[6], path[7]);
+}
+
static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
host_show_hp_ssd_smart_path_enabled, NULL);
+static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
host_show_hp_ssd_smart_path_status,
host_store_hp_ssd_smart_path_status);
@@ -744,6 +847,7 @@ static struct device_attribute *hpsa_sdev_attrs[] = {
&dev_attr_lunid,
&dev_attr_unique_id,
&dev_attr_hp_ssd_smart_path_enabled,
+ &dev_attr_path_info,
&dev_attr_lockup_detected,
NULL,
};
@@ -1083,17 +1187,19 @@ static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
/* This is a non-zero lun of a multi-lun device.
* Search through our list and find the device which
- * has the same 8 byte LUN address, excepting byte 4.
+ * has the same 8 byte LUN address, excepting byte 4 and 5.
* Assign the same bus and target for this new LUN.
* Use the logical unit number from the firmware.
*/
memcpy(addr1, device->scsi3addr, 8);
addr1[4] = 0;
+ addr1[5] = 0;
for (i = 0; i < n; i++) {
sd = h->dev[i];
memcpy(addr2, sd->scsi3addr, 8);
addr2[4] = 0;
- /* differ only in byte 4? */
+ addr2[5] = 0;
+ /* differ only in byte 4 and 5? */
if (memcmp(addr1, addr2, 8) == 0) {
device->bus = sd->bus;
device->target = sd->target;
@@ -1286,8 +1392,9 @@ static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
return 1;
if (dev1->offload_enabled != dev2->offload_enabled)
return 1;
- if (dev1->queue_depth != dev2->queue_depth)
- return 1;
+ if (!is_logical_dev_addr_mode(dev1->scsi3addr))
+ if (dev1->queue_depth != dev2->queue_depth)
+ return 1;
return 0;
}
@@ -1376,17 +1483,23 @@ static void hpsa_show_volume_status(struct ctlr_info *h,
h->scsi_host->host_no,
sd->bus, sd->target, sd->lun);
break;
+ case HPSA_LV_NOT_AVAILABLE:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
case HPSA_LV_UNDERGOING_RPI:
dev_info(&h->pdev->dev,
- "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
+ "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
h->scsi_host->host_no,
sd->bus, sd->target, sd->lun);
break;
case HPSA_LV_PENDING_RPI:
dev_info(&h->pdev->dev,
- "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
- h->scsi_host->host_no,
- sd->bus, sd->target, sd->lun);
+ "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
break;
case HPSA_LV_ENCRYPTED_NO_KEY:
dev_info(&h->pdev->dev,
@@ -2585,34 +2698,6 @@ out:
return rc;
}
-static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
- unsigned char *scsi3addr, unsigned char page,
- struct bmic_controller_parameters *buf, size_t bufsize)
-{
- int rc = IO_OK;
- struct CommandList *c;
- struct ErrorInfo *ei;
-
- c = cmd_alloc(h);
- if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
- page, scsi3addr, TYPE_CMD)) {
- rc = -1;
- goto out;
- }
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
- if (rc)
- goto out;
- ei = c->err_info;
- if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
- hpsa_scsi_interpret_error(h, c);
- rc = -1;
- }
-out:
- cmd_free(h, c);
- return rc;
-}
-
static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
u8 reset_type, int reply_queue)
{
@@ -2749,11 +2834,10 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
lockup_detected(h));
if (unlikely(lockup_detected(h))) {
- dev_warn(&h->pdev->dev,
- "Controller lockup detected during reset wait\n");
- mutex_unlock(&h->reset_mutex);
- rc = -ENODEV;
- }
+ dev_warn(&h->pdev->dev,
+ "Controller lockup detected during reset wait\n");
+ rc = -ENODEV;
+ }
if (unlikely(rc))
atomic_set(&dev->reset_cmds_out, 0);
@@ -3186,6 +3270,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
/* Keep volume offline in certain cases: */
switch (ldstat) {
case HPSA_LV_UNDERGOING_ERASE:
+ case HPSA_LV_NOT_AVAILABLE:
case HPSA_LV_UNDERGOING_RPI:
case HPSA_LV_PENDING_RPI:
case HPSA_LV_ENCRYPTED_NO_KEY:
@@ -3562,29 +3647,6 @@ static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
return NULL;
}
-static int hpsa_hba_mode_enabled(struct ctlr_info *h)
-{
- int rc;
- int hba_mode_enabled;
- struct bmic_controller_parameters *ctlr_params;
- ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
- GFP_KERNEL);
-
- if (!ctlr_params)
- return -ENOMEM;
- rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
- sizeof(struct bmic_controller_parameters));
- if (rc) {
- kfree(ctlr_params);
- return rc;
- }
-
- hba_mode_enabled =
- ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
- kfree(ctlr_params);
- return hba_mode_enabled;
-}
-
/* get physical drive ioaccel handle and queue depth */
static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
struct hpsa_scsi_dev_t *dev,
@@ -3615,6 +3677,31 @@ static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
atomic_set(&dev->reset_cmds_out, 0);
}
+static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
+ u8 *lunaddrbytes,
+ struct bmic_identify_physical_device *id_phys)
+{
+ if (PHYS_IOACCEL(lunaddrbytes)
+ && this_device->ioaccel_handle)
+ this_device->hba_ioaccel_enabled = 1;
+
+ memcpy(&this_device->active_path_index,
+ &id_phys->active_path_number,
+ sizeof(this_device->active_path_index));
+ memcpy(&this_device->path_map,
+ &id_phys->redundant_path_present_map,
+ sizeof(this_device->path_map));
+ memcpy(&this_device->box,
+ &id_phys->alternate_paths_phys_box_on_port,
+ sizeof(this_device->box));
+ memcpy(&this_device->phys_connector,
+ &id_phys->alternate_paths_phys_connector,
+ sizeof(this_device->phys_connector));
+ memcpy(&this_device->bay,
+ &id_phys->phys_bay_in_box,
+ sizeof(this_device->bay));
+}
+
static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
{
/* the idea here is we could get notified
@@ -3637,7 +3724,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
int ncurrent = 0;
int i, n_ext_target_devs, ndevs_to_allocate;
int raid_ctlr_position;
- int rescan_hba_mode;
DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
@@ -3653,17 +3739,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
}
memset(lunzerobits, 0, sizeof(lunzerobits));
- rescan_hba_mode = hpsa_hba_mode_enabled(h);
- if (rescan_hba_mode < 0)
- goto out;
-
- if (!h->hba_mode_enabled && rescan_hba_mode)
- dev_warn(&h->pdev->dev, "HBA mode enabled\n");
- else if (h->hba_mode_enabled && !rescan_hba_mode)
- dev_warn(&h->pdev->dev, "HBA mode disabled\n");
-
- h->hba_mode_enabled = rescan_hba_mode;
-
if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
logdev_list, &nlogicals))
goto out;
@@ -3739,9 +3814,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
/* do not expose masked devices */
if (MASKED_DEVICE(lunaddrbytes) &&
i < nphysicals + (raid_ctlr_position == 0)) {
- if (h->hba_mode_enabled)
- dev_warn(&h->pdev->dev,
- "Masked physical device detected\n");
this_device->expose_state = HPSA_DO_NOT_EXPOSE;
} else {
this_device->expose_state =
@@ -3761,30 +3833,21 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
ncurrent++;
break;
case TYPE_DISK:
- if (i >= nphysicals) {
- ncurrent++;
- break;
- }
-
- if (h->hba_mode_enabled)
- /* never use raid mapper in HBA mode */
+ if (i < nphysicals + (raid_ctlr_position == 0)) {
+ /* The disk is in HBA mode. */
+ /* Never use RAID mapper in HBA mode. */
this_device->offload_enabled = 0;
- else if (!(h->transMethod & CFGTBL_Trans_io_accel1 ||
- h->transMethod & CFGTBL_Trans_io_accel2))
- break;
-
- hpsa_get_ioaccel_drive_info(h, this_device,
- lunaddrbytes, id_phys);
- atomic_set(&this_device->ioaccel_cmds_out, 0);
+ hpsa_get_ioaccel_drive_info(h, this_device,
+ lunaddrbytes, id_phys);
+ hpsa_get_path_info(this_device, lunaddrbytes,
+ id_phys);
+ }
ncurrent++;
break;
case TYPE_TAPE:
case TYPE_MEDIUM_CHANGER:
- ncurrent++;
- break;
case TYPE_ENCLOSURE:
- if (h->hba_mode_enabled)
- ncurrent++;
+ ncurrent++;
break;
case TYPE_RAID:
/* Only present the Smartarray HBA as a RAID controller.
@@ -5104,7 +5167,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
int rc;
struct ctlr_info *h;
struct hpsa_scsi_dev_t *dev;
- char msg[40];
+ char msg[48];
/* find the controller to which the command to be aborted was sent */
h = sdev_to_hba(scsicmd->device);
@@ -5122,16 +5185,18 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
/* if controller locked up, we can guarantee command won't complete */
if (lockup_detected(h)) {
- sprintf(msg, "cmd %d RESET FAILED, lockup detected",
- hpsa_get_cmd_index(scsicmd));
+ snprintf(msg, sizeof(msg),
+ "cmd %d RESET FAILED, lockup detected",
+ hpsa_get_cmd_index(scsicmd));
hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
return FAILED;
}
/* this reset request might be the result of a lockup; check */
if (detect_controller_lockup(h)) {
- sprintf(msg, "cmd %d RESET FAILED, new lockup detected",
- hpsa_get_cmd_index(scsicmd));
+ snprintf(msg, sizeof(msg),
+ "cmd %d RESET FAILED, new lockup detected",
+ hpsa_get_cmd_index(scsicmd));
hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
return FAILED;
}
@@ -5145,7 +5210,8 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
/* send a reset to the SCSI LUN which the command was sent to */
rc = hpsa_do_reset(h, dev, dev->scsi3addr, HPSA_RESET_TYPE_LUN,
DEFAULT_REPLY_QUEUE);
- sprintf(msg, "reset %s", rc == 0 ? "completed successfully" : "failed");
+ snprintf(msg, sizeof(msg), "reset %s",
+ rc == 0 ? "completed successfully" : "failed");
hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
return rc == 0 ? SUCCESS : FAILED;
}
@@ -7989,7 +8055,6 @@ reinit_after_soft_reset:
pci_set_drvdata(pdev, h);
h->ndevices = 0;
- h->hba_mode_enabled = 0;
spin_lock_init(&h->devlock);
rc = hpsa_put_ctlr_into_performant_mode(h);
@@ -8054,7 +8119,7 @@ reinit_after_soft_reset:
rc = hpsa_kdump_soft_reset(h);
if (rc)
/* Neither hard nor soft reset worked, we're hosed. */
- goto clean9;
+ goto clean7;
dev_info(&h->pdev->dev, "Board READY.\n");
dev_info(&h->pdev->dev,
@@ -8100,8 +8165,6 @@ reinit_after_soft_reset:
h->heartbeat_sample_interval);
return 0;
-clean9: /* wq, sh, perf, sg, cmd, irq, shost, pci, lu, aer/h */
- kfree(h->hba_inquiry_data);
clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
hpsa_free_performant_mode(h);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
@@ -8209,6 +8272,14 @@ static void hpsa_remove_one(struct pci_dev *pdev)
destroy_workqueue(h->rescan_ctlr_wq);
destroy_workqueue(h->resubmit_wq);
+ /*
+ * Call before disabling interrupts.
+ * scsi_remove_host can trigger I/O operations especially
+ * when multipath is enabled. There can be SYNCHRONIZE CACHE
+ * operations which cannot complete and will hang the system.
+ */
+ if (h->scsi_host)
+ scsi_remove_host(h->scsi_host); /* init_one 8 */
/* includes hpsa_free_irqs - init_one 4 */
/* includes hpsa_disable_interrupt_mode - pci_init 2 */
hpsa_shutdown(pdev);
@@ -8217,8 +8288,6 @@ static void hpsa_remove_one(struct pci_dev *pdev)
kfree(h->hba_inquiry_data); /* init_one 10 */
h->hba_inquiry_data = NULL; /* init_one 10 */
- if (h->scsi_host)
- scsi_remove_host(h->scsi_host); /* init_one 8 */
hpsa_free_ioaccel2_sg_chain_blocks(h);
hpsa_free_performant_mode(h); /* init_one 7 */
hpsa_free_sg_chain_blocks(h); /* init_one 6 */
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 6ee4da6b1153..27debb363529 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -1,6 +1,7 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
- * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
+ * Copyright 2014-2015 PMC-Sierra, Inc.
+ * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -11,11 +12,7 @@
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ * Questions/Comments/Bugfixes to storagedev@pmcs.com
*
*/
#ifndef HPSA_H
@@ -53,6 +50,11 @@ struct hpsa_scsi_dev_t {
* device via "ioaccel" path.
*/
u32 ioaccel_handle;
+ u8 active_path_index;
+ u8 path_map;
+ u8 bay;
+ u8 box[8];
+ u16 phys_connector[8];
int offload_config; /* I/O accel RAID offload configured */
int offload_enabled; /* I/O accel RAID offload enabled */
int offload_to_be_enabled;
@@ -114,7 +116,6 @@ struct bmic_controller_parameters {
u8 automatic_drive_slamming;
u8 reserved1;
u8 nvram_flags;
-#define HBA_MODE_ENABLED_FLAG (1 << 3)
u8 cache_nvram_flags;
u8 drive_config_flags;
u16 reserved2;
@@ -153,7 +154,6 @@ struct ctlr_info {
unsigned int msi_vector;
int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
struct access_method access;
- char hba_mode_enabled;
/* queue and queue Info */
unsigned int Qdepth;
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index c601622cc98e..47c756ba8dce 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -1,6 +1,7 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
- * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
+ * Copyright 2014-2015 PMC-Sierra, Inc.
+ * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -11,11 +12,7 @@
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ * Questions/Comments/Bugfixes to storagedev@pmcs.com
*
*/
#ifndef HPSA_CMD_H
@@ -167,6 +164,7 @@
/* Logical volume states */
#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff
#define HPSA_LV_OK 0x0
+#define HPSA_LV_NOT_AVAILABLE 0x0b
#define HPSA_LV_UNDERGOING_ERASE 0x0F
#define HPSA_LV_UNDERGOING_RPI 0x12
#define HPSA_LV_PENDING_RPI 0x13
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index e995218476ed..a83f705ed8a5 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1,6 +1,6 @@
/*
* HighPoint RR3xxx/4xxx controller driver for Linux
- * Copyright (C) 2006-2012 HighPoint Technologies, Inc. All Rights Reserved.
+ * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -42,7 +42,7 @@ MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
static char driver_name[] = "hptiop";
static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
-static const char driver_ver[] = "v1.8";
+static const char driver_ver[] = "v1.10.0";
static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
@@ -764,9 +764,7 @@ static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
scsi_set_resid(scp,
scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
scp->result = SAM_STAT_CHECK_CONDITION;
- memcpy(scp->sense_buffer, &req->sg_list,
- min_t(size_t, SCSI_SENSE_BUFFERSIZE,
- le32_to_cpu(req->dataxfer_length)));
+ memcpy(scp->sense_buffer, &req->sg_list, SCSI_SENSE_BUFFERSIZE);
goto skip_resid;
break;
@@ -1037,8 +1035,9 @@ static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
scp->result = 0;
- if (scp->device->channel || scp->device->lun ||
- scp->device->id > hba->max_devices) {
+ if (scp->device->channel ||
+ (scp->device->id > hba->max_devices) ||
+ ((scp->device->id == (hba->max_devices-1)) && scp->device->lun)) {
scp->result = DID_BAD_TARGET << 16;
free_req(hba, _req);
goto cmd_done;
@@ -1168,6 +1167,14 @@ static struct device_attribute *hptiop_attrs[] = {
NULL
};
+static int hptiop_slave_config(struct scsi_device *sdev)
+{
+ if (sdev->type == TYPE_TAPE)
+ blk_queue_max_hw_sectors(sdev->request_queue, 8192);
+
+ return 0;
+}
+
static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = driver_name,
@@ -1179,6 +1186,7 @@ static struct scsi_host_template driver_template = {
.use_clustering = ENABLE_CLUSTERING,
.proc_name = driver_name,
.shost_attrs = hptiop_attrs,
+ .slave_configure = hptiop_slave_config,
.this_id = -1,
.change_queue_depth = hptiop_adjust_disk_queue_depth,
};
@@ -1323,6 +1331,7 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
}
hba = (struct hptiop_hba *)host->hostdata;
+ memset(hba, 0, sizeof(struct hptiop_hba));
hba->ops = iop_ops;
hba->pcidev = pcidev;
@@ -1336,7 +1345,7 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
init_waitqueue_head(&hba->reset_wq);
init_waitqueue_head(&hba->ioctl_wq);
- host->max_lun = 1;
+ host->max_lun = 128;
host->max_channel = 0;
host->io_port = 0;
host->n_io_port = 0;
@@ -1428,34 +1437,33 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
hba->req_size = req_size;
- start_virt = dma_alloc_coherent(&pcidev->dev,
- hba->req_size*hba->max_requests + 0x20,
- &start_phy, GFP_KERNEL);
+ hba->req_list = NULL;
- if (!start_virt) {
- printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
- hba->host->host_no);
- goto free_request_irq;
- }
+ for (i = 0; i < hba->max_requests; i++) {
+ start_virt = dma_alloc_coherent(&pcidev->dev,
+ hba->req_size + 0x20,
+ &start_phy, GFP_KERNEL);
+
+ if (!start_virt) {
+ printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
+ hba->host->host_no);
+ goto free_request_mem;
+ }
- hba->dma_coherent = start_virt;
- hba->dma_coherent_handle = start_phy;
+ hba->dma_coherent[i] = start_virt;
+ hba->dma_coherent_handle[i] = start_phy;
- if ((start_phy & 0x1f) != 0) {
- offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
- start_phy += offset;
- start_virt += offset;
- }
+ if ((start_phy & 0x1f) != 0) {
+ offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
+ start_phy += offset;
+ start_virt += offset;
+ }
- hba->req_list = NULL;
- for (i = 0; i < hba->max_requests; i++) {
hba->reqs[i].next = NULL;
hba->reqs[i].req_virt = start_virt;
hba->reqs[i].req_shifted_phy = start_phy >> 5;
hba->reqs[i].index = i;
free_req(hba, &hba->reqs[i]);
- start_virt = (char *)start_virt + hba->req_size;
- start_phy = start_phy + hba->req_size;
}
/* Enable Interrupt and start background task */
@@ -1474,11 +1482,16 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
return 0;
free_request_mem:
- dma_free_coherent(&hba->pcidev->dev,
- hba->req_size * hba->max_requests + 0x20,
- hba->dma_coherent, hba->dma_coherent_handle);
+ for (i = 0; i < hba->max_requests; i++) {
+ if (hba->dma_coherent[i] && hba->dma_coherent_handle[i])
+ dma_free_coherent(&hba->pcidev->dev,
+ hba->req_size + 0x20,
+ hba->dma_coherent[i],
+ hba->dma_coherent_handle[i]);
+ else
+ break;
+ }
-free_request_irq:
free_irq(hba->pcidev->irq, hba);
unmap_pci_bar:
@@ -1546,6 +1559,7 @@ static void hptiop_remove(struct pci_dev *pcidev)
{
struct Scsi_Host *host = pci_get_drvdata(pcidev);
struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
+ u32 i;
dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
@@ -1555,10 +1569,15 @@ static void hptiop_remove(struct pci_dev *pcidev)
free_irq(hba->pcidev->irq, hba);
- dma_free_coherent(&hba->pcidev->dev,
- hba->req_size * hba->max_requests + 0x20,
- hba->dma_coherent,
- hba->dma_coherent_handle);
+ for (i = 0; i < hba->max_requests; i++) {
+ if (hba->dma_coherent[i] && hba->dma_coherent_handle[i])
+ dma_free_coherent(&hba->pcidev->dev,
+ hba->req_size + 0x20,
+ hba->dma_coherent[i],
+ hba->dma_coherent_handle[i]);
+ else
+ break;
+ }
hba->ops->internal_memfree(hba);
@@ -1653,6 +1672,14 @@ static struct pci_device_id hptiop_id_table[] = {
{ PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
{ PCI_VDEVICE(TTI, 0x4520), (kernel_ulong_t)&hptiop_mvfrey_ops },
{ PCI_VDEVICE(TTI, 0x4522), (kernel_ulong_t)&hptiop_mvfrey_ops },
+ { PCI_VDEVICE(TTI, 0x3610), (kernel_ulong_t)&hptiop_mvfrey_ops },
+ { PCI_VDEVICE(TTI, 0x3611), (kernel_ulong_t)&hptiop_mvfrey_ops },
+ { PCI_VDEVICE(TTI, 0x3620), (kernel_ulong_t)&hptiop_mvfrey_ops },
+ { PCI_VDEVICE(TTI, 0x3622), (kernel_ulong_t)&hptiop_mvfrey_ops },
+ { PCI_VDEVICE(TTI, 0x3640), (kernel_ulong_t)&hptiop_mvfrey_ops },
+ { PCI_VDEVICE(TTI, 0x3660), (kernel_ulong_t)&hptiop_mvfrey_ops },
+ { PCI_VDEVICE(TTI, 0x3680), (kernel_ulong_t)&hptiop_mvfrey_ops },
+ { PCI_VDEVICE(TTI, 0x3690), (kernel_ulong_t)&hptiop_mvfrey_ops },
{},
};
diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
index 020619d60b08..4d1c51153b70 100644
--- a/drivers/scsi/hptiop.h
+++ b/drivers/scsi/hptiop.h
@@ -1,6 +1,6 @@
/*
* HighPoint RR3xxx/4xxx controller driver for Linux
- * Copyright (C) 2006-2012 HighPoint Technologies, Inc. All Rights Reserved.
+ * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -327,8 +327,8 @@ struct hptiop_hba {
struct hptiop_request reqs[HPTIOP_MAX_REQUESTS];
/* used to free allocated dma area */
- void *dma_coherent;
- dma_addr_t dma_coherent_handle;
+ void *dma_coherent[HPTIOP_MAX_REQUESTS];
+ dma_addr_t dma_coherent_handle[HPTIOP_MAX_REQUESTS];
atomic_t reset_count;
atomic_t resetting;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 882744852aac..b62836ddbbee 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -599,9 +599,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
{
struct ipr_trace_entry *trace_entry;
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ unsigned int trace_index;
- trace_entry = &ioa_cfg->trace[atomic_add_return
- (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
+ trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
+ trace_entry = &ioa_cfg->trace[trace_index];
trace_entry->time = jiffies;
trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
trace_entry->type = type;
@@ -1051,10 +1052,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
{
+ unsigned int hrrq;
+
if (ioa_cfg->hrrq_num == 1)
- return 0;
- else
- return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
+ hrrq = 0;
+ else {
+ hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
+ hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
+ }
+ return hrrq;
}
/**
@@ -1159,7 +1165,8 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
if (ioa_cfg->sis64) {
proto = cfgtew->u.cfgte64->proto;
- res->res_flags = cfgtew->u.cfgte64->res_flags;
+ res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
+ res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
res->qmodel = IPR_QUEUEING_MODEL64(res);
res->type = cfgtew->u.cfgte64->res_type;
@@ -1307,8 +1314,8 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
int new_path = 0;
if (res->ioa_cfg->sis64) {
- res->flags = cfgtew->u.cfgte64->flags;
- res->res_flags = cfgtew->u.cfgte64->res_flags;
+ res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
+ res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
res->type = cfgtew->u.cfgte64->res_type;
memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
@@ -1894,7 +1901,7 @@ static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
* Return value:
* none
**/
-static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
+static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
{
int i;
@@ -2264,7 +2271,7 @@ static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
((unsigned long)fabric + be16_to_cpu(fabric->length));
}
- ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
+ ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
}
/**
@@ -2358,7 +2365,7 @@ static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
((unsigned long)fabric + be16_to_cpu(fabric->length));
}
- ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
+ ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
}
/**
@@ -4449,7 +4456,7 @@ static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *a
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
res = (struct ipr_resource_entry *)sdev->hostdata;
if (res && ioa_cfg->sis64)
- len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
+ len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
else if (res)
len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
@@ -4548,7 +4555,7 @@ static ssize_t ipr_store_raw_mode(struct device *dev,
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
res = (struct ipr_resource_entry *)sdev->hostdata;
if (res) {
- if (ioa_cfg->sis64 && ipr_is_af_dasd_device(res)) {
+ if (ipr_is_af_dasd_device(res)) {
res->raw_mode = simple_strtoul(buf, NULL, 10);
len = strlen(buf);
if (res->sdev)
@@ -6263,21 +6270,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
- unsigned long hrrq_flags;
+ unsigned long lock_flags;
scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
scsi_dma_unmap(scsi_cmd);
- spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
+ spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
- spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
+ spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
} else {
- spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ spin_lock(&ipr_cmd->hrrq->_lock);
ipr_erp_start(ioa_cfg, ipr_cmd);
- spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
}
}
@@ -6374,9 +6383,13 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
(!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
}
- if (res->raw_mode && ipr_is_af_dasd_device(res))
+ if (res->raw_mode && ipr_is_af_dasd_device(res)) {
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
+ if (scsi_cmd->underflow == 0)
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
+ }
+
if (ioa_cfg->sis64)
rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
else
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 73790a1d0969..e4fb17a58649 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -39,8 +39,8 @@
/*
* Literals
*/
-#define IPR_DRIVER_VERSION "2.6.1"
-#define IPR_DRIVER_DATE "(March 12, 2015)"
+#define IPR_DRIVER_VERSION "2.6.2"
+#define IPR_DRIVER_DATE "(June 11, 2015)"
/*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -1005,13 +1005,13 @@ struct ipr_hostrcb_type_24_error {
struct ipr_hostrcb_type_07_error {
u8 failure_reason[64];
struct ipr_vpd vpd;
- u32 data[222];
+ __be32 data[222];
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb_type_17_error {
u8 failure_reason[64];
struct ipr_ext_vpd vpd;
- u32 data[476];
+ __be32 data[476];
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb_config_element {
@@ -1289,18 +1289,17 @@ struct ipr_resource_entry {
(((res)->bus << 24) | ((res)->target << 8) | (res)->lun)
u8 ata_class;
-
- u8 flags;
- __be16 res_flags;
-
u8 type;
+ u16 flags;
+ u16 res_flags;
+
u8 qmodel;
struct ipr_std_inq_data std_inq_data;
__be32 res_handle;
__be64 dev_id;
- __be64 lun_wwn;
+ u64 lun_wwn;
struct scsi_lun dev_lun;
u8 res_path[8];
@@ -1486,6 +1485,7 @@ struct ipr_ioa_cfg {
#define IPR_NUM_TRACE_INDEX_BITS 8
#define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS)
+#define IPR_TRACE_INDEX_MASK (IPR_NUM_TRACE_ENTRIES - 1)
#define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES)
char trace_start[8];
#define IPR_TRACE_START_LABEL "trace"
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 1b3a09473452..30f9ef0c0d4f 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
if (resp) {
resp(sp, fp, arg);
res = true;
- } else if (!IS_ERR(fp)) {
- fc_frame_free(fp);
}
spin_lock_bh(&ep->ex_lock);
@@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
* If new exch resp handler is valid then call that
* first.
*/
- fc_invoke_resp(ep, sp, fp);
+ if (!fc_invoke_resp(ep, sp, fp))
+ fc_frame_free(fp);
fc_exch_release(ep);
return;
@@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
fc_exch_hold(ep);
if (!rc)
fc_exch_delete(ep);
- fc_invoke_resp(ep, sp, fp);
+ if (!fc_invoke_resp(ep, sp, fp))
+ fc_frame_free(fp);
if (has_rec)
fc_exch_timer_set(ep, ep->r_a_tov);
fc_exch_release(ep);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index c6795941b45d..5121272f28fd 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -191,7 +191,7 @@ static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
}
/**
- * fc_fcp_pkt_destory() - Release hold on a fcp_pkt
+ * fc_fcp_pkt_destroy() - Release hold on a fcp_pkt
* @seq: The sequence that the FCP packet is on (required by destructor API)
* @fsp: The FCP packet to be released
*
@@ -1039,11 +1039,26 @@ restart:
fc_fcp_pkt_hold(fsp);
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
- if (!fc_fcp_lock_pkt(fsp)) {
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ if (!(fsp->state & FC_SRB_COMPL)) {
+ fsp->state |= FC_SRB_COMPL;
+ /*
+ * TODO: dropping scsi_pkt_lock and then reacquiring
+ * again around fc_fcp_cleanup_cmd() is required,
+ * since fc_fcp_cleanup_cmd() calls into
+ * fc_seq_set_resp() and that func preempts cpu using
+ * schedule. May be schedule and related code should be
+ * removed instead of unlocking here to avoid scheduling
+ * while atomic bug.
+ */
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+
fc_fcp_cleanup_cmd(fsp, error);
+
+ spin_lock_bh(&fsp->scsi_pkt_lock);
fc_io_compl(fsp);
- fc_fcp_unlock_pkt(fsp);
}
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
fc_fcp_pkt_release(fsp);
spin_lock_irqsave(&si->scsi_queue_lock, flags);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 8053f24f0349..33c74d3436c9 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -853,12 +853,9 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
SAM_STAT_CHECK_CONDITION;
scsi_build_sense_buffer(1, sc->sense_buffer,
ILLEGAL_REQUEST, 0x10, ascq);
- sc->sense_buffer[7] = 0xc; /* Additional sense length */
- sc->sense_buffer[8] = 0; /* Information desc type */
- sc->sense_buffer[9] = 0xa; /* Additional desc length */
- sc->sense_buffer[10] = 0x80; /* Validity bit */
-
- put_unaligned_be64(sector, &sc->sense_buffer[12]);
+ scsi_set_sense_information(sc->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
+ sector);
goto out;
}
}
@@ -2941,10 +2938,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- unsigned long flags;
del_timer_sync(&conn->transport_timer);
+ mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->frwd_lock);
conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
if (session->leadconn == conn) {
@@ -2956,28 +2953,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
}
spin_unlock_bh(&session->frwd_lock);
- /*
- * Block until all in-progress commands for this connection
- * time out or fail.
- */
- for (;;) {
- spin_lock_irqsave(session->host->host_lock, flags);
- if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
- spin_unlock_irqrestore(session->host->host_lock, flags);
- break;
- }
- spin_unlock_irqrestore(session->host->host_lock, flags);
- msleep_interruptible(500);
- iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
- "host_busy %d host_failed %d\n",
- atomic_read(&session->host->host_busy),
- session->host->host_failed);
- /*
- * force eh_abort() to unblock
- */
- wake_up(&conn->ehwait);
- }
-
/* flush queued up work because we free the connection below */
iscsi_suspend_tx(conn);
@@ -2994,6 +2969,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
if (session->leadconn == conn)
session->leadconn = NULL;
spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
iscsi_destroy_conn(cls_conn);
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index ce96d5bf8ae7..759cbebed7c7 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -701,7 +701,7 @@ lpfc_work_done(struct lpfc_hba *phba)
HA_RXMASK));
}
}
- if ((phba->sli_rev == LPFC_SLI_REV4) &
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
(!list_empty(&pring->txq)))
lpfc_drain_txq(phba);
/*
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index eb627724417e..4abb93a83e0f 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2284,7 +2284,7 @@ lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
(struct lpfc_rdp_context *)(mbox->context2);
if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
- goto error;
+ goto error_mbuf_free;
lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2,
DMP_SFF_PAGE_A2_SIZE);
@@ -2299,13 +2299,14 @@ lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
- goto error;
+ goto error_cmd_free;
return;
-error:
+error_mbuf_free:
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
+error_cmd_free:
lpfc_sli4_mbox_cmd_free(phba, mbox);
rdp_context->cmpl(phba, rdp_context, FAILURE);
}
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index bc7b34c02723..9d05302a3bcd 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -268,8 +268,8 @@ mega_query_adapter(adapter_t *adapter)
raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */
if ((retval = issue_scb_block(adapter, raw_mbox)))
- printk(KERN_WARNING
- "megaraid: Product_info cmd failed with error: %d\n",
+ dev_warn(&adapter->dev->dev,
+ "Product_info cmd failed with error: %d\n",
retval);
pci_unmap_single(adapter->dev, prod_info_dma_handle,
@@ -334,7 +334,7 @@ mega_query_adapter(adapter_t *adapter)
adapter->bios_version[4] = 0;
}
- printk(KERN_NOTICE "megaraid: [%s:%s] detected %d logical drives.\n",
+ dev_notice(&adapter->dev->dev, "[%s:%s] detected %d logical drives\n",
adapter->fw_version, adapter->bios_version, adapter->numldrv);
/*
@@ -342,7 +342,7 @@ mega_query_adapter(adapter_t *adapter)
*/
adapter->support_ext_cdb = mega_support_ext_cdb(adapter);
if (adapter->support_ext_cdb)
- printk(KERN_NOTICE "megaraid: supports extended CDBs.\n");
+ dev_notice(&adapter->dev->dev, "supports extended CDBs\n");
return 0;
@@ -678,11 +678,11 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
if(!(adapter->flag & (1L << cmd->device->channel))) {
- printk(KERN_NOTICE
- "scsi%d: scanning scsi channel %d ",
+ dev_notice(&adapter->dev->dev,
+ "scsi%d: scanning scsi channel %d "
+ "for logical drives\n",
adapter->host->host_no,
cmd->device->channel);
- printk("for logical drives.\n");
adapter->flag |= (1L << cmd->device->channel);
}
@@ -983,11 +983,11 @@ mega_prepare_passthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd,
case READ_CAPACITY:
if(!(adapter->flag & (1L << cmd->device->channel))) {
- printk(KERN_NOTICE
- "scsi%d: scanning scsi channel %d [P%d] ",
+ dev_notice(&adapter->dev->dev,
+ "scsi%d: scanning scsi channel %d [P%d] "
+ "for physical devices\n",
adapter->host->host_no,
cmd->device->channel, channel);
- printk("for physical devices.\n");
adapter->flag |= (1L << cmd->device->channel);
}
@@ -1045,11 +1045,11 @@ mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd,
case READ_CAPACITY:
if(!(adapter->flag & (1L << cmd->device->channel))) {
- printk(KERN_NOTICE
- "scsi%d: scanning scsi channel %d [P%d] ",
+ dev_notice(&adapter->dev->dev,
+ "scsi%d: scanning scsi channel %d [P%d] "
+ "for physical devices\n",
adapter->host->host_no,
cmd->device->channel, channel);
- printk("for physical devices.\n");
adapter->flag |= (1L << cmd->device->channel);
}
@@ -1241,7 +1241,7 @@ issue_scb_block(adapter_t *adapter, u_char *raw_mbox)
return mbox->m_in.status;
bug_blocked_mailbox:
- printk(KERN_WARNING "megaraid: Blocked mailbox......!!\n");
+ dev_warn(&adapter->dev->dev, "Blocked mailbox......!!\n");
udelay (1000);
return -1;
}
@@ -1454,9 +1454,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
* Make sure f/w has completed a valid command
*/
if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) {
- printk(KERN_CRIT
- "megaraid: invalid command ");
- printk("Id %d, scb->state:%x, scsi cmd:%p\n",
+ dev_crit(&adapter->dev->dev, "invalid command "
+ "Id %d, scb->state:%x, scsi cmd:%p\n",
cmdid, scb->state, scb->cmd);
continue;
@@ -1467,8 +1466,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
*/
if( scb->state & SCB_ABORT ) {
- printk(KERN_WARNING
- "megaraid: aborted cmd [%x] complete.\n",
+ dev_warn(&adapter->dev->dev,
+ "aborted cmd [%x] complete\n",
scb->idx);
scb->cmd->result = (DID_ABORT << 16);
@@ -1486,8 +1485,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
*/
if( scb->state & SCB_RESET ) {
- printk(KERN_WARNING
- "megaraid: reset cmd [%x] complete.\n",
+ dev_warn(&adapter->dev->dev,
+ "reset cmd [%x] complete\n",
scb->idx);
scb->cmd->result = (DID_RESET << 16);
@@ -1553,8 +1552,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
if( sg_page(sgl) ) {
c = *(unsigned char *) sg_virt(&sgl[0]);
} else {
- printk(KERN_WARNING
- "megaraid: invalid sg.\n");
+ dev_warn(&adapter->dev->dev, "invalid sg\n");
c = 0;
}
@@ -1902,11 +1900,10 @@ megaraid_reset(struct scsi_cmnd *cmd)
mc.opcode = MEGA_RESET_RESERVATIONS;
if( mega_internal_command(adapter, &mc, NULL) != 0 ) {
- printk(KERN_WARNING
- "megaraid: reservation reset failed.\n");
+ dev_warn(&adapter->dev->dev, "reservation reset failed\n");
}
else {
- printk(KERN_INFO "megaraid: reservation reset.\n");
+ dev_info(&adapter->dev->dev, "reservation reset\n");
}
#endif
@@ -1939,7 +1936,7 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
struct list_head *pos, *next;
scb_t *scb;
- printk(KERN_WARNING "megaraid: %s cmd=%x <c=%d t=%d l=%d>\n",
+ dev_warn(&adapter->dev->dev, "%s cmd=%x <c=%d t=%d l=%d>\n",
(aor == SCB_ABORT)? "ABORTING":"RESET",
cmd->cmnd[0], cmd->device->channel,
cmd->device->id, (u32)cmd->device->lun);
@@ -1963,8 +1960,8 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
*/
if( scb->state & SCB_ISSUED ) {
- printk(KERN_WARNING
- "megaraid: %s[%x], fw owner.\n",
+ dev_warn(&adapter->dev->dev,
+ "%s[%x], fw owner\n",
(aor==SCB_ABORT) ? "ABORTING":"RESET",
scb->idx);
@@ -1976,8 +1973,8 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
* Not yet issued! Remove from the pending
* list
*/
- printk(KERN_WARNING
- "megaraid: %s-[%x], driver owner.\n",
+ dev_warn(&adapter->dev->dev,
+ "%s-[%x], driver owner\n",
(aor==SCB_ABORT) ? "ABORTING":"RESET",
scb->idx);
@@ -2197,7 +2194,7 @@ proc_show_rebuild_rate(struct seq_file *m, void *v)
if( mega_adapinq(adapter, dma_handle) != 0 ) {
seq_puts(m, "Adapter inquiry failed.\n");
- printk(KERN_WARNING "megaraid: inquiry failed.\n");
+ dev_warn(&adapter->dev->dev, "inquiry failed\n");
goto free_inquiry;
}
@@ -2241,7 +2238,7 @@ proc_show_battery(struct seq_file *m, void *v)
if( mega_adapinq(adapter, dma_handle) != 0 ) {
seq_puts(m, "Adapter inquiry failed.\n");
- printk(KERN_WARNING "megaraid: inquiry failed.\n");
+ dev_warn(&adapter->dev->dev, "inquiry failed\n");
goto free_inquiry;
}
@@ -2350,7 +2347,7 @@ proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel)
if( mega_adapinq(adapter, dma_handle) != 0 ) {
seq_puts(m, "Adapter inquiry failed.\n");
- printk(KERN_WARNING "megaraid: inquiry failed.\n");
+ dev_warn(&adapter->dev->dev, "inquiry failed\n");
goto free_inquiry;
}
@@ -2525,7 +2522,7 @@ proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end )
if( mega_adapinq(adapter, dma_handle) != 0 ) {
seq_puts(m, "Adapter inquiry failed.\n");
- printk(KERN_WARNING "megaraid: inquiry failed.\n");
+ dev_warn(&adapter->dev->dev, "inquiry failed\n");
goto free_inquiry;
}
@@ -2799,7 +2796,7 @@ mega_create_proc_entry(int index, struct proc_dir_entry *parent)
dir = adapter->controller_proc_dir_entry =
proc_mkdir_data(string, 0, parent, adapter);
if(!dir) {
- printk(KERN_WARNING "\nmegaraid: proc_mkdir failed\n");
+ dev_warn(&adapter->dev->dev, "proc_mkdir failed\n");
return;
}
@@ -2807,7 +2804,7 @@ mega_create_proc_entry(int index, struct proc_dir_entry *parent)
de = proc_create_data(f->name, S_IRUSR, dir, &mega_proc_fops,
f->show);
if (!de) {
- printk(KERN_WARNING "\nmegaraid: proc_create failed\n");
+ dev_warn(&adapter->dev->dev, "proc_create failed\n");
return;
}
@@ -2874,9 +2871,9 @@ megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
return rval;
}
- printk(KERN_INFO
- "megaraid: invalid partition on this disk on channel %d\n",
- sdev->channel);
+ dev_info(&adapter->dev->dev,
+ "invalid partition on this disk on channel %d\n",
+ sdev->channel);
/* Default heads (64) & sectors (32) */
heads = 64;
@@ -2936,7 +2933,7 @@ mega_init_scb(adapter_t *adapter)
scb->sgl = (mega_sglist *)scb->sgl64;
if( !scb->sgl ) {
- printk(KERN_WARNING "RAID: Can't allocate sglist.\n");
+ dev_warn(&adapter->dev->dev, "RAID: Can't allocate sglist\n");
mega_free_sgl(adapter);
return -1;
}
@@ -2946,7 +2943,7 @@ mega_init_scb(adapter_t *adapter)
&scb->pthru_dma_addr);
if( !scb->pthru ) {
- printk(KERN_WARNING "RAID: Can't allocate passthru.\n");
+ dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n");
mega_free_sgl(adapter);
return -1;
}
@@ -2956,8 +2953,8 @@ mega_init_scb(adapter_t *adapter)
&scb->epthru_dma_addr);
if( !scb->epthru ) {
- printk(KERN_WARNING
- "Can't allocate extended passthru.\n");
+ dev_warn(&adapter->dev->dev,
+ "Can't allocate extended passthru\n");
mega_free_sgl(adapter);
return -1;
}
@@ -3154,8 +3151,8 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
* Do we support this feature
*/
if( !adapter->support_random_del ) {
- printk(KERN_WARNING "megaraid: logdrv ");
- printk("delete on non-supporting F/W.\n");
+ dev_warn(&adapter->dev->dev, "logdrv "
+ "delete on non-supporting F/W\n");
return (-EINVAL);
}
@@ -3179,7 +3176,7 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 ||
uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) {
- printk(KERN_WARNING "megaraid: rejected passthru.\n");
+ dev_warn(&adapter->dev->dev, "rejected passthru\n");
return (-EINVAL);
}
@@ -3683,11 +3680,11 @@ mega_enum_raid_scsi(adapter_t *adapter)
for( i = 0; i < adapter->product_info.nchannels; i++ ) {
if( (adapter->mega_ch_class >> i) & 0x01 ) {
- printk(KERN_INFO "megaraid: channel[%d] is raid.\n",
+ dev_info(&adapter->dev->dev, "channel[%d] is raid\n",
i);
}
else {
- printk(KERN_INFO "megaraid: channel[%d] is scsi.\n",
+ dev_info(&adapter->dev->dev, "channel[%d] is scsi\n",
i);
}
}
@@ -3893,7 +3890,7 @@ mega_do_del_logdrv(adapter_t *adapter, int logdrv)
/* log this event */
if(rval) {
- printk(KERN_WARNING "megaraid: Delete LD-%d failed.", logdrv);
+ dev_warn(&adapter->dev->dev, "Delete LD-%d failed", logdrv);
return rval;
}
@@ -4161,7 +4158,7 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
* this information.
*/
if (rval && trace_level) {
- printk("megaraid: cmd [%x, %x, %x] status:[%x]\n",
+ dev_info(&adapter->dev->dev, "cmd [%x, %x, %x] status:[%x]\n",
mc->cmd, mc->opcode, mc->subopcode, rval);
}
@@ -4244,11 +4241,8 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
subsysvid = pdev->subsystem_vendor;
subsysid = pdev->subsystem_device;
- printk(KERN_NOTICE "megaraid: found 0x%4.04x:0x%4.04x:bus %d:",
- id->vendor, id->device, pci_bus);
-
- printk("slot %d:func %d\n",
- PCI_SLOT(pci_dev_func), PCI_FUNC(pci_dev_func));
+ dev_notice(&pdev->dev, "found 0x%4.04x:0x%4.04x\n",
+ id->vendor, id->device);
/* Read the base port and IRQ from PCI */
mega_baseport = pci_resource_start(pdev, 0);
@@ -4259,14 +4253,13 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
flag |= BOARD_MEMMAP;
if (!request_mem_region(mega_baseport, 128, "megaraid")) {
- printk(KERN_WARNING "megaraid: mem region busy!\n");
+ dev_warn(&pdev->dev, "mem region busy!\n");
goto out_disable_device;
}
mega_baseport = (unsigned long)ioremap(mega_baseport, 128);
if (!mega_baseport) {
- printk(KERN_WARNING
- "megaraid: could not map hba memory\n");
+ dev_warn(&pdev->dev, "could not map hba memory\n");
goto out_release_region;
}
} else {
@@ -4285,7 +4278,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
adapter = (adapter_t *)host->hostdata;
memset(adapter, 0, sizeof(adapter_t));
- printk(KERN_NOTICE
+ dev_notice(&pdev->dev,
"scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n",
host->host_no, mega_baseport, irq);
@@ -4323,21 +4316,20 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
adapter->mega_buffer = pci_alloc_consistent(adapter->dev,
MEGA_BUFFER_SIZE, &adapter->buf_dma_handle);
if (!adapter->mega_buffer) {
- printk(KERN_WARNING "megaraid: out of RAM.\n");
+ dev_warn(&pdev->dev, "out of RAM\n");
goto out_host_put;
}
adapter->scb_list = kmalloc(sizeof(scb_t) * MAX_COMMANDS, GFP_KERNEL);
if (!adapter->scb_list) {
- printk(KERN_WARNING "megaraid: out of RAM.\n");
+ dev_warn(&pdev->dev, "out of RAM\n");
goto out_free_cmd_buffer;
}
if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ?
megaraid_isr_memmapped : megaraid_isr_iomapped,
IRQF_SHARED, "megaraid", adapter)) {
- printk(KERN_WARNING
- "megaraid: Couldn't register IRQ %d!\n", irq);
+ dev_warn(&pdev->dev, "Couldn't register IRQ %d!\n", irq);
goto out_free_scb_list;
}
@@ -4357,9 +4349,9 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (!strcmp(adapter->fw_version, "3.00") ||
!strcmp(adapter->fw_version, "3.01")) {
- printk( KERN_WARNING
- "megaraid: Your card is a Dell PERC "
- "2/SC RAID controller with "
+ dev_warn(&pdev->dev,
+ "Your card is a Dell PERC "
+ "2/SC RAID controller with "
"firmware\nmegaraid: 3.00 or 3.01. "
"This driver is known to have "
"corruption issues\nmegaraid: with "
@@ -4390,12 +4382,12 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (!strcmp(adapter->fw_version, "H01.07") ||
!strcmp(adapter->fw_version, "H01.08") ||
!strcmp(adapter->fw_version, "H01.09") ) {
- printk(KERN_WARNING
- "megaraid: Firmware H.01.07, "
+ dev_warn(&pdev->dev,
+ "Firmware H.01.07, "
"H.01.08, and H.01.09 on 1M/2M "
"controllers\n"
- "megaraid: do not support 64 bit "
- "addressing.\nmegaraid: DISABLING "
+ "do not support 64 bit "
+ "addressing.\nDISABLING "
"64 bit support.\n");
adapter->flag &= ~BOARD_64BIT;
}
@@ -4503,8 +4495,8 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
*/
adapter->has_cluster = mega_support_cluster(adapter);
if (adapter->has_cluster) {
- printk(KERN_NOTICE
- "megaraid: Cluster driver, initiator id:%d\n",
+ dev_notice(&pdev->dev,
+ "Cluster driver, initiator id:%d\n",
adapter->this_id);
}
#endif
@@ -4571,7 +4563,7 @@ __megaraid_shutdown(adapter_t *adapter)
issue_scb_block(adapter, raw_mbox);
if (atomic_read(&adapter->pend_cmds) > 0)
- printk(KERN_WARNING "megaraid: pending commands!!\n");
+ dev_warn(&adapter->dev->dev, "pending commands!!\n");
/*
* Have a delibrate delay to make sure all the caches are
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 71b884dae27c..eaa81e552fd2 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -216,7 +216,7 @@ struct megasas_cmd *megasas_get_cmd(struct megasas_instance
struct megasas_cmd, list);
list_del_init(&cmd->list);
} else {
- printk(KERN_ERR "megasas: Command pool empty!\n");
+ dev_err(&instance->pdev->dev, "Command pool empty!\n");
}
spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
@@ -273,6 +273,7 @@ static inline void
megasas_enable_intr_xscale(struct megasas_instance *instance)
{
struct megasas_register_set __iomem *regs;
+
regs = instance->reg_set;
writel(0, &(regs)->outbound_intr_mask);
@@ -289,6 +290,7 @@ megasas_disable_intr_xscale(struct megasas_instance *instance)
{
struct megasas_register_set __iomem *regs;
u32 mask = 0x1f;
+
regs = instance->reg_set;
writel(mask, &regs->outbound_intr_mask);
/* Dummy readl to force pci flush */
@@ -313,6 +315,7 @@ megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
{
u32 status;
u32 mfiStatus = 0;
+
/*
* Check if it is our interrupt
*/
@@ -348,6 +351,7 @@ megasas_fire_cmd_xscale(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
unsigned long flags;
+
spin_lock_irqsave(&instance->hba_lock, flags);
writel((frame_phys_addr >> 3)|(frame_count),
&(regs)->inbound_queue_port);
@@ -364,15 +368,16 @@ megasas_adp_reset_xscale(struct megasas_instance *instance,
{
u32 i;
u32 pcidata;
+
writel(MFI_ADP_RESET, &regs->inbound_doorbell);
for (i = 0; i < 3; i++)
msleep(1000); /* sleep for 3 secs */
pcidata = 0;
pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
- printk(KERN_NOTICE "pcidata = %x\n", pcidata);
+ dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
if (pcidata & 0x2) {
- printk(KERN_NOTICE "mfi 1068 offset read=%x\n", pcidata);
+ dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
pcidata &= ~0x2;
pci_write_config_dword(instance->pdev,
MFI_1068_PCSR_OFFSET, pcidata);
@@ -383,9 +388,9 @@ megasas_adp_reset_xscale(struct megasas_instance *instance,
pcidata = 0;
pci_read_config_dword(instance->pdev,
MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
- printk(KERN_NOTICE "1068 offset handshake read=%x\n", pcidata);
+ dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
- printk(KERN_NOTICE "1068 offset pcidt=%x\n", pcidata);
+ dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
pcidata = 0;
pci_write_config_dword(instance->pdev,
MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
@@ -402,7 +407,6 @@ static int
megasas_check_reset_xscale(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
-
if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
(le32_to_cpu(*instance->consumer) ==
MEGASAS_ADPRESET_INPROG_SIGN))
@@ -433,7 +437,7 @@ static struct megasas_instance_template megasas_instance_template_xscale = {
/**
* The following functions are defined for ppc (deviceid : 0x60)
-* controllers
+* controllers
*/
/**
@@ -444,6 +448,7 @@ static inline void
megasas_enable_intr_ppc(struct megasas_instance *instance)
{
struct megasas_register_set __iomem *regs;
+
regs = instance->reg_set;
writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
@@ -462,6 +467,7 @@ megasas_disable_intr_ppc(struct megasas_instance *instance)
{
struct megasas_register_set __iomem *regs;
u32 mask = 0xFFFFFFFF;
+
regs = instance->reg_set;
writel(mask, &regs->outbound_intr_mask);
/* Dummy readl to force pci flush */
@@ -522,6 +528,7 @@ megasas_fire_cmd_ppc(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
unsigned long flags;
+
spin_lock_irqsave(&instance->hba_lock, flags);
writel((frame_phys_addr | (frame_count<<1))|1,
&(regs)->inbound_queue_port);
@@ -566,6 +573,7 @@ static inline void
megasas_enable_intr_skinny(struct megasas_instance *instance)
{
struct megasas_register_set __iomem *regs;
+
regs = instance->reg_set;
writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
@@ -584,6 +592,7 @@ megasas_disable_intr_skinny(struct megasas_instance *instance)
{
struct megasas_register_set __iomem *regs;
u32 mask = 0xFFFFFFFF;
+
regs = instance->reg_set;
writel(mask, &regs->outbound_intr_mask);
/* Dummy readl to force pci flush */
@@ -634,8 +643,8 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
writel(status, &regs->outbound_intr_status);
/*
- * dummy read to flush PCI
- */
+ * dummy read to flush PCI
+ */
readl(&regs->outbound_intr_status);
return mfiStatus;
@@ -654,6 +663,7 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
unsigned long flags;
+
spin_lock_irqsave(&instance->hba_lock, flags);
writel(upper_32_bits(frame_phys_addr),
&(regs)->inbound_high_queue_port);
@@ -706,6 +716,7 @@ static inline void
megasas_enable_intr_gen2(struct megasas_instance *instance)
{
struct megasas_register_set __iomem *regs;
+
regs = instance->reg_set;
writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
@@ -725,6 +736,7 @@ megasas_disable_intr_gen2(struct megasas_instance *instance)
{
struct megasas_register_set __iomem *regs;
u32 mask = 0xFFFFFFFF;
+
regs = instance->reg_set;
writel(mask, &regs->outbound_intr_mask);
/* Dummy readl to force pci flush */
@@ -750,6 +762,7 @@ megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
{
u32 status;
u32 mfiStatus = 0;
+
/*
* Check if it is our interrupt
*/
@@ -786,6 +799,7 @@ megasas_fire_cmd_gen2(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
unsigned long flags;
+
spin_lock_irqsave(&instance->hba_lock, flags);
writel((frame_phys_addr | (frame_count<<1))|1,
&(regs)->inbound_queue_port);
@@ -800,10 +814,10 @@ static int
megasas_adp_reset_gen2(struct megasas_instance *instance,
struct megasas_register_set __iomem *reg_set)
{
- u32 retry = 0 ;
- u32 HostDiag;
- u32 __iomem *seq_offset = &reg_set->seq_offset;
- u32 __iomem *hostdiag_offset = &reg_set->host_diag;
+ u32 retry = 0 ;
+ u32 HostDiag;
+ u32 __iomem *seq_offset = &reg_set->seq_offset;
+ u32 __iomem *hostdiag_offset = &reg_set->host_diag;
if (instance->instancet == &megasas_instance_template_skinny) {
seq_offset = &reg_set->fusion_seq_offset;
@@ -821,10 +835,10 @@ megasas_adp_reset_gen2(struct megasas_instance *instance,
HostDiag = (u32)readl(hostdiag_offset);
- while ( !( HostDiag & DIAG_WRITE_ENABLE) ) {
+ while (!(HostDiag & DIAG_WRITE_ENABLE)) {
msleep(100);
HostDiag = (u32)readl(hostdiag_offset);
- printk(KERN_NOTICE "RESETGEN2: retry=%x, hostdiag=%x\n",
+ dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
retry, HostDiag);
if (retry++ >= 100)
@@ -832,17 +846,17 @@ megasas_adp_reset_gen2(struct megasas_instance *instance,
}
- printk(KERN_NOTICE "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
+ dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
ssleep(10);
HostDiag = (u32)readl(hostdiag_offset);
- while ( ( HostDiag & DIAG_RESET_ADAPTER) ) {
+ while (HostDiag & DIAG_RESET_ADAPTER) {
msleep(100);
HostDiag = (u32)readl(hostdiag_offset);
- printk(KERN_NOTICE "RESET_GEN2: retry=%x, hostdiag=%x\n",
+ dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
retry, HostDiag);
if (retry++ >= 1000)
@@ -904,7 +918,6 @@ int
megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
{
int seconds;
-
struct megasas_header *frame_hdr = &cmd->frame->hdr;
frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
@@ -940,6 +953,7 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
struct megasas_cmd *cmd, int timeout)
{
int ret = 0;
+
cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
instance->instancet->issue_dcmd(instance, cmd);
@@ -1120,7 +1134,7 @@ static u32 megasas_get_frame_count(struct megasas_instance *instance,
int num_cnt;
int sge_bytes;
u32 sge_sz;
- u32 frame_count=0;
+ u32 frame_count = 0;
sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
sizeof(struct megasas_sge32);
@@ -1151,14 +1165,14 @@ static u32 megasas_get_frame_count(struct megasas_instance *instance,
num_cnt = sge_count - 3;
}
- if(num_cnt>0){
+ if (num_cnt > 0) {
sge_bytes = sge_sz * num_cnt;
frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
}
/* Main frame */
- frame_count +=1;
+ frame_count += 1;
if (frame_count > 7)
frame_count = 8;
@@ -1215,9 +1229,9 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
/*
- * If the command is for the tape device, set the
- * pthru timeout to the os layer timeout value.
- */
+ * If the command is for the tape device, set the
+ * pthru timeout to the os layer timeout value.
+ */
if (scp->device->type == TYPE_TAPE) {
if ((scp->request->timeout / HZ) > 0xFFFF)
pthru->timeout = cpu_to_le16(0xFFFF);
@@ -1241,7 +1255,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
&pthru->sgl);
if (pthru->sge_count > instance->max_num_sge) {
- printk(KERN_ERR "megasas: DCDB two many SGE NUM=%x\n",
+ dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
pthru->sge_count);
return 0;
}
@@ -1382,7 +1396,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
if (ldio->sge_count > instance->max_num_sge) {
- printk(KERN_ERR "megasas: build_ld_io: sge_count = %x\n",
+ dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
ldio->sge_count);
return 0;
}
@@ -1435,7 +1449,7 @@ inline int megasas_cmd_type(struct scsi_cmnd *cmd)
/**
* megasas_dump_pending_frames - Dumps the frame address of all pending cmds
- * in FW
+ * in FW
* @instance: Adapter soft state
*/
static inline void
@@ -1449,63 +1463,60 @@ megasas_dump_pending_frames(struct megasas_instance *instance)
u32 sgcount;
u32 max_cmd = instance->max_fw_cmds;
- printk(KERN_ERR "\nmegasas[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
- printk(KERN_ERR "megasas[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
+ dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
+ dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
if (IS_DMA64)
- printk(KERN_ERR "\nmegasas[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
+ dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
else
- printk(KERN_ERR "\nmegasas[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
+ dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
- printk(KERN_ERR "megasas[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
+ dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
for (i = 0; i < max_cmd; i++) {
cmd = instance->cmd_list[i];
- if(!cmd->scmd)
+ if (!cmd->scmd)
continue;
- printk(KERN_ERR "megasas[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
+ dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
ldio = (struct megasas_io_frame *)cmd->frame;
mfi_sgl = &ldio->sgl;
sgcount = ldio->sge_count;
- printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
+ dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
" lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
- }
- else {
+ } else {
pthru = (struct megasas_pthru_frame *) cmd->frame;
mfi_sgl = &pthru->sgl;
sgcount = pthru->sge_count;
- printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
+ dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
"lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
}
- if(megasas_dbg_lvl & MEGASAS_DBG_LVL){
- for (n = 0; n < sgcount; n++){
- if (IS_DMA64)
- printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%llx ",
- le32_to_cpu(mfi_sgl->sge64[n].length),
- le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
- else
- printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",
- le32_to_cpu(mfi_sgl->sge32[n].length),
- le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
+ if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
+ for (n = 0; n < sgcount; n++) {
+ if (IS_DMA64)
+ dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
+ le32_to_cpu(mfi_sgl->sge64[n].length),
+ le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
+ else
+ dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
+ le32_to_cpu(mfi_sgl->sge32[n].length),
+ le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
}
}
- printk(KERN_ERR "\n");
} /*for max_cmd*/
- printk(KERN_ERR "\nmegasas[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
+ dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
for (i = 0; i < max_cmd; i++) {
cmd = instance->cmd_list[i];
- if(cmd->sync_cmd == 1){
- printk(KERN_ERR "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
- }
+ if (cmd->sync_cmd == 1)
+ dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
}
- printk(KERN_ERR "megasas[%d]: Dumping Done.\n\n",instance->host->host_no);
+ dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
}
u32
@@ -1623,7 +1634,7 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
}
if (instance->instancet->build_and_issue_cmd(instance, scmd)) {
- printk(KERN_ERR "megasas: Err returned from build_and_issue_cmd\n");
+ dev_err(&instance->pdev->dev, "Err returned from build_and_issue_cmd\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -1651,8 +1662,8 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no)
static int megasas_slave_configure(struct scsi_device *sdev)
{
/*
- * The RAID firmware may require extended timeouts.
- */
+ * The RAID firmware may require extended timeouts.
+ */
blk_queue_rq_timeout(sdev->request_queue,
MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
@@ -1661,8 +1672,9 @@ static int megasas_slave_configure(struct scsi_device *sdev)
static int megasas_slave_alloc(struct scsi_device *sdev)
{
- u16 pd_index = 0;
+ u16 pd_index = 0;
struct megasas_instance *instance ;
+
instance = megasas_lookup_instance(sdev->host->host_no);
if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
/*
@@ -1728,8 +1740,7 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance)
(instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
- writel(MFI_STOP_ADP,
- &instance->reg_set->doorbell);
+ writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
/* Flush */
readl(&instance->reg_set->doorbell);
if (instance->mpio && instance->requestorId)
@@ -1783,7 +1794,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
unsigned long flags;
/* If we have already declared adapter dead, donot complete cmds */
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR )
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
return;
spin_lock_irqsave(&instance->completion_lock, flags);
@@ -1794,7 +1805,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
while (consumer != producer) {
context = le32_to_cpu(instance->reply_queue[consumer]);
if (context >= instance->max_fw_cmds) {
- printk(KERN_ERR "Unexpected context value %x\n",
+ dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
context);
BUG();
}
@@ -1873,8 +1884,8 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
cmd = megasas_get_cmd(instance);
if (!cmd) {
- printk(KERN_DEBUG "megasas: megasas_get_ld_vf_affiliation_111:"
- "Failed to get cmd for scsi%d.\n",
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
+ "Failed to get cmd for scsi%d\n",
instance->host->host_no);
return -ENOMEM;
}
@@ -1882,8 +1893,8 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
dcmd = &cmd->frame->dcmd;
if (!instance->vf_affiliation_111) {
- printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
- "affiliation for scsi%d.\n", instance->host->host_no);
+ dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
+ "affiliation for scsi%d\n", instance->host->host_no);
megasas_return_cmd(instance, cmd);
return -ENOMEM;
}
@@ -1897,8 +1908,8 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
sizeof(struct MR_LD_VF_AFFILIATION_111),
&new_affiliation_111_h);
if (!new_affiliation_111) {
- printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
- "memory for new affiliation for scsi%d.\n",
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
+ "memory for new affiliation for scsi%d\n",
instance->host->host_no);
megasas_return_cmd(instance, cmd);
return -ENOMEM;
@@ -1929,14 +1940,14 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
dcmd->sgl.sge32[0].length = cpu_to_le32(
sizeof(struct MR_LD_VF_AFFILIATION_111));
- printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
+ dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
"scsi%d\n", instance->host->host_no);
megasas_issue_blocked_cmd(instance, cmd, 0);
if (dcmd->cmd_status) {
- printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD"
- " failed with status 0x%x for scsi%d.\n",
+ dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
+ " failed with status 0x%x for scsi%d\n",
dcmd->cmd_status, instance->host->host_no);
retval = 1; /* Do a scan if we couldn't get affiliation */
goto out;
@@ -1947,9 +1958,8 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
new_affiliation_111->map[ld].policy[thisVf]) {
- printk(KERN_WARNING "megasas: SR-IOV: "
- "Got new LD/VF affiliation "
- "for scsi%d.\n",
+ dev_warn(&instance->pdev->dev, "SR-IOV: "
+ "Got new LD/VF affiliation for scsi%d\n",
instance->host->host_no);
memcpy(instance->vf_affiliation_111,
new_affiliation_111,
@@ -1985,8 +1995,8 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
cmd = megasas_get_cmd(instance);
if (!cmd) {
- printk(KERN_DEBUG "megasas: megasas_get_ld_vf_affiliation12: "
- "Failed to get cmd for scsi%d.\n",
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
+ "Failed to get cmd for scsi%d\n",
instance->host->host_no);
return -ENOMEM;
}
@@ -1994,8 +2004,8 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
dcmd = &cmd->frame->dcmd;
if (!instance->vf_affiliation) {
- printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
- "affiliation for scsi%d.\n", instance->host->host_no);
+ dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
+ "affiliation for scsi%d\n", instance->host->host_no);
megasas_return_cmd(instance, cmd);
return -ENOMEM;
}
@@ -2010,8 +2020,8 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
sizeof(struct MR_LD_VF_AFFILIATION),
&new_affiliation_h);
if (!new_affiliation) {
- printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
- "memory for new affiliation for scsi%d.\n",
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
+ "memory for new affiliation for scsi%d\n",
instance->host->host_no);
megasas_return_cmd(instance, cmd);
return -ENOMEM;
@@ -2042,14 +2052,14 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION));
- printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
+ dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
"scsi%d\n", instance->host->host_no);
megasas_issue_blocked_cmd(instance, cmd, 0);
if (dcmd->cmd_status) {
- printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD"
- " failed with status 0x%x for scsi%d.\n",
+ dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
+ " failed with status 0x%x for scsi%d\n",
dcmd->cmd_status, instance->host->host_no);
retval = 1; /* Do a scan if we couldn't get affiliation */
goto out;
@@ -2057,8 +2067,8 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
if (!initial) {
if (!new_affiliation->ldCount) {
- printk(KERN_WARNING "megasas: SR-IOV: Got new LD/VF "
- "affiliation for passive path for scsi%d.\n",
+ dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
+ "affiliation for passive path for scsi%d\n",
instance->host->host_no);
retval = 1;
goto out;
@@ -2123,8 +2133,8 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
}
out:
if (doscan) {
- printk(KERN_WARNING "megasas: SR-IOV: Got new LD/VF "
- "affiliation for scsi%d.\n", instance->host->host_no);
+ dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
+ "affiliation for scsi%d\n", instance->host->host_no);
memcpy(instance->vf_affiliation, new_affiliation,
new_affiliation->size);
retval = 1;
@@ -2164,8 +2174,8 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
cmd = megasas_get_cmd(instance);
if (!cmd) {
- printk(KERN_DEBUG "megasas: megasas_sriov_start_heartbeat: "
- "Failed to get cmd for scsi%d.\n",
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
+ "Failed to get cmd for scsi%d\n",
instance->host->host_no);
return -ENOMEM;
}
@@ -2178,9 +2188,9 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
sizeof(struct MR_CTRL_HB_HOST_MEM),
&instance->hb_host_mem_h);
if (!instance->hb_host_mem) {
- printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate"
- " memory for heartbeat host memory for "
- "scsi%d.\n", instance->host->host_no);
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
+ " memory for heartbeat host memory for scsi%d\n",
+ instance->host->host_no);
retval = -ENOMEM;
goto out;
}
@@ -2200,7 +2210,7 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h);
dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
- printk(KERN_WARNING "megasas: SR-IOV: Starting heartbeat for scsi%d\n",
+ dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
instance->host->host_no);
if (instance->ctrl_context && !instance->mask_interrupts)
@@ -2236,7 +2246,7 @@ void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
mod_timer(&instance->sriov_heartbeat_timer,
jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
} else {
- printk(KERN_WARNING "megasas: SR-IOV: Heartbeat never "
+ dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
"completed for scsi%d\n", instance->host->host_no);
schedule_work(&instance->work_init);
}
@@ -2274,7 +2284,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
&clist_local);
spin_unlock_irqrestore(&instance->hba_lock, flags);
- printk(KERN_NOTICE "megasas: HBA reset wait ...\n");
+ dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
for (i = 0; i < wait_time; i++) {
msleep(1000);
spin_lock_irqsave(&instance->hba_lock, flags);
@@ -2285,28 +2295,28 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
}
if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
- printk(KERN_NOTICE "megasas: reset: Stopping HBA.\n");
+ dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
spin_lock_irqsave(&instance->hba_lock, flags);
- instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+ instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
spin_unlock_irqrestore(&instance->hba_lock, flags);
return FAILED;
}
- reset_index = 0;
+ reset_index = 0;
while (!list_empty(&clist_local)) {
- reset_cmd = list_entry((&clist_local)->next,
+ reset_cmd = list_entry((&clist_local)->next,
struct megasas_cmd, list);
list_del_init(&reset_cmd->list);
if (reset_cmd->scmd) {
reset_cmd->scmd->result = DID_RESET << 16;
- printk(KERN_NOTICE "%d:%p reset [%02x]\n",
+ dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
reset_index, reset_cmd,
reset_cmd->scmd->cmnd[0]);
reset_cmd->scmd->scsi_done(reset_cmd->scmd);
megasas_return_cmd(instance, reset_cmd);
} else if (reset_cmd->sync_cmd) {
- printk(KERN_NOTICE "megasas:%p synch cmds"
+ dev_notice(&instance->pdev->dev, "%p synch cmds"
"reset queue\n",
reset_cmd);
@@ -2315,7 +2325,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
reset_cmd->frame_phys_addr,
0, instance->reg_set);
} else {
- printk(KERN_NOTICE "megasas: %p unexpected"
+ dev_notice(&instance->pdev->dev, "%p unexpected"
"cmds lst\n",
reset_cmd);
}
@@ -2326,14 +2336,13 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
}
for (i = 0; i < resetwaittime; i++) {
-
int outstanding = atomic_read(&instance->fw_outstanding);
if (!outstanding)
break;
if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
- printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
+ dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
"commands to complete\n",i,outstanding);
/*
* Call cmd completion routine. Cmd to be
@@ -2365,10 +2374,8 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
i++;
} while (i <= 3);
- if (atomic_read(&instance->fw_outstanding) &&
- !kill_adapter_flag) {
+ if (atomic_read(&instance->fw_outstanding) && !kill_adapter_flag) {
if (instance->disableOnlineCtrlReset == 0) {
-
megasas_do_ocr(instance);
/* wait for 5 secs to let FW finish the pending cmds */
@@ -2384,11 +2391,11 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
if (atomic_read(&instance->fw_outstanding) ||
(kill_adapter_flag == 2)) {
- printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n");
+ dev_notice(&instance->pdev->dev, "pending cmds after reset\n");
/*
- * Send signal to FW to stop processing any pending cmds.
- * The controller will be taken offline by the OS now.
- */
+ * Send signal to FW to stop processing any pending cmds.
+ * The controller will be taken offline by the OS now.
+ */
if ((instance->pdev->device ==
PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device ==
@@ -2401,12 +2408,12 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
}
megasas_dump_pending_frames(instance);
spin_lock_irqsave(&instance->hba_lock, flags);
- instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+ instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
spin_unlock_irqrestore(&instance->hba_lock, flags);
return FAILED;
}
- printk(KERN_NOTICE "megaraid_sas: no pending cmds after reset\n");
+ dev_notice(&instance->pdev->dev, "no pending cmds after reset\n");
return SUCCESS;
}
@@ -2430,16 +2437,15 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
scmd->cmnd[0], scmd->retries);
if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
- printk(KERN_ERR "megasas: cannot recover from previous reset "
- "failures\n");
+ dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
return FAILED;
}
ret_val = megasas_wait_for_outstanding(instance);
if (ret_val == SUCCESS)
- printk(KERN_NOTICE "megasas: reset successful \n");
+ dev_notice(&instance->pdev->dev, "reset successful\n");
else
- printk(KERN_ERR "megasas: failed to do reset\n");
+ dev_err(&instance->pdev->dev, "failed to do reset\n");
return ret_val;
}
@@ -2481,14 +2487,10 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
*/
static int megasas_reset_device(struct scsi_cmnd *scmd)
{
- int ret;
-
/*
* First wait for all commands to complete
*/
- ret = megasas_generic_reset(scmd);
-
- return ret;
+ return megasas_generic_reset(scmd);
}
/**
@@ -2498,6 +2500,7 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
{
int ret;
struct megasas_instance *instance;
+
instance = (struct megasas_instance *)scmd->device->host->hostdata;
/*
@@ -2516,7 +2519,7 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
/**
* megasas_bios_param - Returns disk geometry for a disk
- * @sdev: device handle
+ * @sdev: device handle
* @bdev: block device
* @capacity: drive capacity
* @geom: geometry parameters
@@ -2529,6 +2532,7 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
int sectors;
sector_t cylinders;
unsigned long tmp;
+
/* Default heads (64) & sectors (32) */
heads = 64;
sectors = 32;
@@ -2575,6 +2579,7 @@ static void
megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
{
unsigned long flags;
+
/*
* Don't signal app if it is just an aborted previously registered aen
*/
@@ -2595,9 +2600,10 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
if ((instance->unload == 0) &&
((instance->issuepend_done == 1))) {
struct megasas_aen_event *ev;
+
ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
if (!ev) {
- printk(KERN_ERR "megasas_service_aen: out of memory\n");
+ dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
} else {
ev->instance = instance;
instance->ev = ev;
@@ -2654,8 +2660,7 @@ megasas_fw_crash_buffer_show(struct device *cdev,
buff_addr = (unsigned long) buf;
- if (buff_offset >
- (instance->fw_crash_buffer_size * dmachunk)) {
+ if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
dev_err(&instance->pdev->dev,
"Firmware crash dump offset is out of range\n");
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
@@ -2667,7 +2672,7 @@ megasas_fw_crash_buffer_show(struct device *cdev,
src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
(buff_offset % dmachunk);
- memcpy(buf, (void *)src_addr, size);
+ memcpy(buf, (void *)src_addr, size);
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
return size;
@@ -2727,6 +2732,7 @@ megasas_fw_crash_state_show(struct device *cdev,
struct Scsi_Host *shost = class_to_shost(cdev);
struct megasas_instance *instance =
(struct megasas_instance *) shost->hostdata;
+
return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
}
@@ -2811,8 +2817,6 @@ megasas_complete_abort(struct megasas_instance *instance,
cmd->cmd_status_drv = 0;
wake_up(&instance->abort_cmd_wait_q);
}
-
- return;
}
/**
@@ -2820,10 +2824,10 @@ megasas_complete_abort(struct megasas_instance *instance,
* @instance: Adapter soft state
* @cmd: Command to be completed
* @alt_status: If non-zero, use this value as status to
- * SCSI mid-layer instead of the value returned
- * by the FW. This should be used if caller wants
- * an alternate status (as in the case of aborted
- * commands)
+ * SCSI mid-layer instead of the value returned
+ * by the FW. This should be used if caller wants
+ * an alternate status (as in the case of aborted
+ * commands)
*/
void
megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
@@ -2847,10 +2851,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
when booting the kdump kernel. Ignore this command to
prevent a kernel panic on shutdown of the kdump kernel. */
- printk(KERN_WARNING "megaraid_sas: MFI_CMD_INVALID command "
- "completed.\n");
- printk(KERN_WARNING "megaraid_sas: If you have a controller "
- "other than PERC5, please upgrade your firmware.\n");
+ dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
+ "completed\n");
+ dev_warn(&instance->pdev->dev, "If you have a controller "
+ "other than PERC5, please upgrade your firmware\n");
break;
case MFI_CMD_PD_SCSI_IO:
case MFI_CMD_LD_SCSI_IO:
@@ -2918,7 +2922,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
break;
default:
- printk(KERN_DEBUG "megasas: MFI FW status %#x\n",
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
hdr->cmd_status);
cmd->scmd->result = DID_ERROR << 16;
break;
@@ -2944,8 +2948,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
if (cmd->frame->hdr.cmd_status != 0) {
if (cmd->frame->hdr.cmd_status !=
MFI_STAT_NOT_FOUND)
- printk(KERN_WARNING "megasas: map sync"
- "failed, status = 0x%x.\n",
+ dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
cmd->frame->hdr.cmd_status);
else {
megasas_return_cmd(instance, cmd);
@@ -2997,7 +3000,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
break;
default:
- printk("megasas: Unknown command completed! [0x%X]\n",
+ dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
hdr->cmd);
break;
}
@@ -3005,7 +3008,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
/**
* megasas_issue_pending_cmds_again - issue all pending cmds
- * in FW again because of the fw reset
+ * in FW again because of the fw reset
* @instance: Adapter soft state
*/
static inline void
@@ -3023,19 +3026,19 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance)
spin_unlock_irqrestore(&instance->hba_lock, flags);
while (!list_empty(&clist_local)) {
- cmd = list_entry((&clist_local)->next,
+ cmd = list_entry((&clist_local)->next,
struct megasas_cmd, list);
list_del_init(&cmd->list);
if (cmd->sync_cmd || cmd->scmd) {
- printk(KERN_NOTICE "megaraid_sas: command %p, %p:%d"
- "detected to be pending while HBA reset.\n",
+ dev_notice(&instance->pdev->dev, "command %p, %p:%d"
+ "detected to be pending while HBA reset\n",
cmd, cmd->scmd, cmd->sync_cmd);
cmd->retry_for_fw_reset++;
if (cmd->retry_for_fw_reset == 3) {
- printk(KERN_NOTICE "megaraid_sas: cmd %p, %p:%d"
+ dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
"was tried multiple times during reset."
"Shutting down the HBA\n",
cmd, cmd->scmd, cmd->sync_cmd);
@@ -3048,18 +3051,18 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance)
if (cmd->sync_cmd == 1) {
if (cmd->scmd) {
- printk(KERN_NOTICE "megaraid_sas: unexpected"
+ dev_notice(&instance->pdev->dev, "unexpected"
"cmd attached to internal command!\n");
}
- printk(KERN_NOTICE "megasas: %p synchronous cmd"
+ dev_notice(&instance->pdev->dev, "%p synchronous cmd"
"on the internal reset queue,"
"issue it again.\n", cmd);
cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
instance->instancet->fire_cmd(instance,
- cmd->frame_phys_addr ,
+ cmd->frame_phys_addr,
0, instance->reg_set);
} else if (cmd->scmd) {
- printk(KERN_NOTICE "megasas: %p scsi cmd [%02x]"
+ dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
"detected on the internal queue, issue again.\n",
cmd, cmd->scmd->cmnd[0]);
@@ -3068,22 +3071,22 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance)
cmd->frame_phys_addr,
cmd->frame_count-1, instance->reg_set);
} else {
- printk(KERN_NOTICE "megasas: %p unexpected cmd on the"
+ dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
"internal reset defer list while re-issue!!\n",
cmd);
}
}
if (instance->aen_cmd) {
- printk(KERN_NOTICE "megaraid_sas: aen_cmd in def process\n");
+ dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
megasas_return_cmd(instance, instance->aen_cmd);
- instance->aen_cmd = NULL;
+ instance->aen_cmd = NULL;
}
/*
- * Initiate AEN (Asynchronous Event Notification)
- */
+ * Initiate AEN (Asynchronous Event Notification)
+ */
seq_num = instance->last_seq_num;
class_locale.members.reserved = 0;
class_locale.members.locale = MR_EVT_LOCALE_ALL;
@@ -3110,17 +3113,17 @@ megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
u32 defer_index;
unsigned long flags;
- defer_index = 0;
+ defer_index = 0;
spin_lock_irqsave(&instance->mfi_pool_lock, flags);
for (i = 0; i < max_cmd; i++) {
cmd = instance->cmd_list[i];
if (cmd->sync_cmd == 1 || cmd->scmd) {
- printk(KERN_NOTICE "megasas: moving cmd[%d]:%p:%d:%p"
+ dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
"on the defer queue as internal\n",
defer_index, cmd, cmd->sync_cmd, cmd->scmd);
if (!list_empty(&cmd->list)) {
- printk(KERN_NOTICE "megaraid_sas: ERROR while"
+ dev_notice(&instance->pdev->dev, "ERROR while"
" moving this cmd:%p, %d %p, it was"
"discovered on some list?\n",
cmd, cmd->sync_cmd, cmd->scmd);
@@ -3145,13 +3148,13 @@ process_fw_state_change_wq(struct work_struct *work)
unsigned long flags;
if (instance->adprecovery != MEGASAS_ADPRESET_SM_INFAULT) {
- printk(KERN_NOTICE "megaraid_sas: error, recovery st %x \n",
+ dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
instance->adprecovery);
return ;
}
if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
- printk(KERN_NOTICE "megaraid_sas: FW detected to be in fault"
+ dev_notice(&instance->pdev->dev, "FW detected to be in fault"
"state, restarting it...\n");
instance->instancet->disable_intr(instance);
@@ -3159,21 +3162,21 @@ process_fw_state_change_wq(struct work_struct *work)
atomic_set(&instance->fw_reset_no_pci_access, 1);
instance->instancet->adp_reset(instance, instance->reg_set);
- atomic_set(&instance->fw_reset_no_pci_access, 0 );
+ atomic_set(&instance->fw_reset_no_pci_access, 0);
- printk(KERN_NOTICE "megaraid_sas: FW restarted successfully,"
+ dev_notice(&instance->pdev->dev, "FW restarted successfully,"
"initiating next stage...\n");
- printk(KERN_NOTICE "megaraid_sas: HBA recovery state machine,"
+ dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
"state 2 starting...\n");
- /*waitting for about 20 second before start the second init*/
+ /* waiting for about 20 second before start the second init */
for (wait = 0; wait < 30; wait++) {
msleep(1000);
}
if (megasas_transition_to_ready(instance, 1)) {
- printk(KERN_NOTICE "megaraid_sas:adapter not ready\n");
+ dev_notice(&instance->pdev->dev, "adapter not ready\n");
atomic_set(&instance->fw_reset_no_pci_access, 1);
megaraid_sas_kill_hba(instance);
@@ -3200,15 +3203,14 @@ process_fw_state_change_wq(struct work_struct *work)
megasas_issue_pending_cmds_again(instance);
instance->issuepend_done = 1;
}
- return ;
}
/**
* megasas_deplete_reply_queue - Processes all completed commands
* @instance: Adapter soft state
* @alt_status: Alternate status to be returned to
- * SCSI mid-layer instead of the status
- * returned by the FW
+ * SCSI mid-layer instead of the status
+ * returned by the FW
* Note: this must be called with hba lock held
*/
static int
@@ -3238,13 +3240,13 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
instance->reg_set) & MFI_STATE_MASK;
if (fw_state != MFI_STATE_FAULT) {
- printk(KERN_NOTICE "megaraid_sas: fw state:%x\n",
+ dev_notice(&instance->pdev->dev, "fw state:%x\n",
fw_state);
}
if ((fw_state == MFI_STATE_FAULT) &&
(instance->disableOnlineCtrlReset == 0)) {
- printk(KERN_NOTICE "megaraid_sas: wait adp restart\n");
+ dev_notice(&instance->pdev->dev, "wait adp restart\n");
if ((instance->pdev->device ==
PCI_DEVICE_ID_LSI_SAS1064R) ||
@@ -3265,14 +3267,14 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
atomic_set(&instance->fw_outstanding, 0);
megasas_internal_reset_defer_cmds(instance);
- printk(KERN_NOTICE "megasas: fwState=%x, stage:%d\n",
+ dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
fw_state, instance->adprecovery);
schedule_work(&instance->work_init);
return IRQ_HANDLED;
} else {
- printk(KERN_NOTICE "megasas: fwstate:%x, dis_OCR=%x\n",
+ dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
fw_state, instance->disableOnlineCtrlReset);
}
}
@@ -3288,13 +3290,13 @@ static irqreturn_t megasas_isr(int irq, void *devp)
struct megasas_irq_context *irq_context = devp;
struct megasas_instance *instance = irq_context->instance;
unsigned long flags;
- irqreturn_t rc;
+ irqreturn_t rc;
if (atomic_read(&instance->fw_reset_no_pci_access))
return IRQ_HANDLED;
spin_lock_irqsave(&instance->hba_lock, flags);
- rc = megasas_deplete_reply_queue(instance, DID_OK);
+ rc = megasas_deplete_reply_queue(instance, DID_OK);
spin_unlock_irqrestore(&instance->hba_lock, flags);
return rc;
@@ -3322,7 +3324,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
fw_state = abs_state & MFI_STATE_MASK;
if (fw_state != MFI_STATE_READY)
- printk(KERN_INFO "megasas: Waiting for FW to come to ready"
+ dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
" state\n");
while (fw_state != MFI_STATE_READY) {
@@ -3330,7 +3332,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
switch (fw_state) {
case MFI_STATE_FAULT:
- printk(KERN_DEBUG "megasas: FW in FAULT state!!\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n");
if (ocr) {
max_wait = MEGASAS_RESET_WAIT_TIME;
cur_state = MFI_STATE_FAULT;
@@ -3469,7 +3471,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
break;
default:
- printk(KERN_DEBUG "megasas: Unknown state 0x%x\n",
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
fw_state);
return -ENODEV;
}
@@ -3491,7 +3493,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
* Return error if fw_state hasn't changed after max_wait
*/
if (curr_abs_state == abs_state) {
- printk(KERN_DEBUG "FW state [%d] hasn't changed "
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
"in %d secs\n", fw_state, max_wait);
return -ENODEV;
}
@@ -3499,7 +3501,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
abs_state = curr_abs_state;
fw_state = curr_abs_state & MFI_STATE_MASK;
}
- printk(KERN_INFO "megasas: FW now in Ready state\n");
+ dev_info(&instance->pdev->dev, "FW now in Ready state\n");
return 0;
}
@@ -3570,9 +3572,8 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
sizeof(struct megasas_sge32);
- if (instance->flag_ieee) {
+ if (instance->flag_ieee)
sge_sz = sizeof(struct megasas_sge_skinny);
- }
/*
* For MFI controllers.
@@ -3594,7 +3595,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
instance->pdev, total_sz, 256, 0);
if (!instance->frame_dma_pool) {
- printk(KERN_DEBUG "megasas: failed to setup frame pool\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
return -ENOMEM;
}
@@ -3602,7 +3603,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
instance->pdev, 128, 4, 0);
if (!instance->sense_dma_pool) {
- printk(KERN_DEBUG "megasas: failed to setup sense pool\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
pci_pool_destroy(instance->frame_dma_pool);
instance->frame_dma_pool = NULL;
@@ -3630,7 +3631,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
* whatever has been allocated
*/
if (!cmd->frame || !cmd->sense) {
- printk(KERN_DEBUG "megasas: pci_pool_alloc failed \n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n");
megasas_teardown_frame_pool(instance);
return -ENOMEM;
}
@@ -3656,6 +3657,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
void megasas_free_cmds(struct megasas_instance *instance)
{
int i;
+
/* First free the MFI frame pool */
megasas_teardown_frame_pool(instance);
@@ -3708,7 +3710,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
if (!instance->cmd_list) {
- printk(KERN_DEBUG "megasas: out of memory\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
return -ENOMEM;
}
@@ -3744,7 +3746,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
* Create a frame pool and assign one frame to each cmd
*/
if (megasas_create_frame_pool(instance)) {
- printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
megasas_free_cmds(instance);
}
@@ -3773,7 +3775,7 @@ megasas_get_pd_list(struct megasas_instance *instance)
cmd = megasas_get_cmd(instance);
if (!cmd) {
- printk(KERN_DEBUG "megasas (get_pd_list): Failed to get cmd\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
return -ENOMEM;
}
@@ -3783,7 +3785,7 @@ megasas_get_pd_list(struct megasas_instance *instance)
MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
if (!ci) {
- printk(KERN_DEBUG "Failed to alloc mem for pd_list\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for pd_list\n");
megasas_return_cmd(instance, cmd);
return -ENOMEM;
}
@@ -3811,12 +3813,12 @@ megasas_get_pd_list(struct megasas_instance *instance)
ret = megasas_issue_polled(instance, cmd);
/*
- * the following function will get the instance PD LIST.
- */
+ * the following function will get the instance PD LIST.
+ */
pd_addr = ci->addr;
- if ( ret == 0 &&
+ if (ret == 0 &&
(le32_to_cpu(ci->count) <
(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
@@ -3868,7 +3870,7 @@ megasas_get_ld_list(struct megasas_instance *instance)
cmd = megasas_get_cmd(instance);
if (!cmd) {
- printk(KERN_DEBUG "megasas_get_ld_list: Failed to get cmd\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
return -ENOMEM;
}
@@ -3879,7 +3881,7 @@ megasas_get_ld_list(struct megasas_instance *instance)
&ci_h);
if (!ci) {
- printk(KERN_DEBUG "Failed to alloc mem in get_ld_list\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem in get_ld_list\n");
megasas_return_cmd(instance, cmd);
return -ENOMEM;
}
@@ -3954,8 +3956,8 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
cmd = megasas_get_cmd(instance);
if (!cmd) {
- printk(KERN_WARNING
- "megasas:(megasas_ld_list_query): Failed to get cmd\n");
+ dev_warn(&instance->pdev->dev,
+ "megasas_ld_list_query: Failed to get cmd\n");
return -ENOMEM;
}
@@ -3965,8 +3967,8 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
if (!ci) {
- printk(KERN_WARNING
- "megasas: Failed to alloc mem for ld_list_query\n");
+ dev_warn(&instance->pdev->dev,
+ "Failed to alloc mem for ld_list_query\n");
megasas_return_cmd(instance, cmd);
return -ENOMEM;
}
@@ -4052,11 +4054,11 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
"Legacy(64 VD) firmware");
- old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
+ old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
(sizeof(struct MR_LD_SPAN_MAP) *
(instance->fw_supported_vd_count - 1));
- new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
- fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) +
+ new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
+ fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) +
(sizeof(struct MR_LD_SPAN_MAP) *
(instance->drv_supported_vd_count - 1));
@@ -4067,7 +4069,6 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
fusion->current_map_sz = new_map_sz;
else
fusion->current_map_sz = old_map_sz;
-
}
/**
@@ -4093,7 +4094,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
cmd = megasas_get_cmd(instance);
if (!cmd) {
- printk(KERN_DEBUG "megasas: Failed to get a free cmd\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
return -ENOMEM;
}
@@ -4103,7 +4104,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
sizeof(struct megasas_ctrl_info), &ci_h);
if (!ci) {
- printk(KERN_DEBUG "Failed to alloc mem for ctrl info\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ctrl info\n");
megasas_return_cmd(instance, cmd);
return -ENOMEM;
}
@@ -4214,9 +4215,7 @@ static int
megasas_issue_init_mfi(struct megasas_instance *instance)
{
__le32 context;
-
struct megasas_cmd *cmd;
-
struct megasas_init_frame *init_frame;
struct megasas_init_queue_info *initq_info;
dma_addr_t init_frame_h;
@@ -4269,7 +4268,7 @@ megasas_issue_init_mfi(struct megasas_instance *instance)
*/
if (megasas_issue_polled(instance, cmd)) {
- printk(KERN_ERR "megasas: Failed to init firmware\n");
+ dev_err(&instance->pdev->dev, "Failed to init firmware\n");
megasas_return_cmd(instance, cmd);
goto fail_fw_init;
}
@@ -4342,7 +4341,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
&instance->reply_queue_h);
if (!instance->reply_queue) {
- printk(KERN_DEBUG "megasas: Out of DMA mem for reply queue\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
goto fail_reply_queue;
}
@@ -4361,7 +4360,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
(instance->instancet->read_fw_status_reg(reg_set) &
0x04000000);
- printk(KERN_NOTICE "megasas_init_mfi: fw_support_ieee=%d",
+ dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
instance->fw_support_ieee);
if (instance->fw_support_ieee)
@@ -4505,7 +4504,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
if (pci_request_selected_regions(instance->pdev, instance->bar,
"megasas: LSI")) {
- printk(KERN_DEBUG "megasas: IO memory region busy!\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
return -EBUSY;
}
@@ -4513,7 +4512,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->reg_set = ioremap_nocache(base_addr, 8192);
if (!instance->reg_set) {
- printk(KERN_DEBUG "megasas: Failed to map IO mem\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
goto fail_ioremap;
}
@@ -4551,7 +4550,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
(instance, instance->reg_set);
atomic_set(&instance->fw_reset_no_pci_access, 0);
dev_info(&instance->pdev->dev,
- "megasas: FW restarted successfully from %s!\n",
+ "FW restarted successfully from %s!\n",
__func__);
/*waitting for about 30 second before retry*/
@@ -4652,16 +4651,15 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->instancet->enable_intr(instance);
- printk(KERN_ERR "megasas: INIT adapter done\n");
+ dev_err(&instance->pdev->dev, "INIT adapter done\n");
/** for passthrough
- * the following function will get the PD LIST.
- */
-
- memset(instance->pd_list, 0 ,
+ * the following function will get the PD LIST.
+ */
+ memset(instance->pd_list, 0,
(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
if (megasas_get_pd_list(instance) < 0) {
- printk(KERN_ERR "megasas: failed to get PD list\n");
+ dev_err(&instance->pdev->dev, "failed to get PD list\n");
goto fail_get_pd_list;
}
@@ -4686,7 +4684,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
le16_to_cpu(ctrl_info->max_strips_per_io);
max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
- tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
+ tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
instance->disableOnlineCtrlReset =
ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
@@ -4960,7 +4958,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
aen_cmd, 30);
if (ret_val) {
- printk(KERN_DEBUG "megasas: Failed to abort "
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
"previous AEN command\n");
return ret_val;
}
@@ -5051,7 +5049,7 @@ static int megasas_start_aen(struct megasas_instance *instance)
static int megasas_io_attach(struct megasas_instance *instance)
{
struct Scsi_Host *host = instance->host;
- u32 error;
+ u32 error;
/*
* Export parameters required by SCSI mid-layer
@@ -5079,7 +5077,7 @@ static int megasas_io_attach(struct megasas_instance *instance)
(max_sectors <= MEGASAS_MAX_SECTORS)) {
instance->max_sectors_per_req = max_sectors;
} else {
- printk(KERN_INFO "megasas: max_sectors should be > 0"
+ dev_info(&instance->pdev->dev, "max_sectors should be > 0"
"and <= %d (or < 1MB for GEN2 controller)\n",
instance->max_sectors_per_req);
}
@@ -5126,7 +5124,7 @@ static int
megasas_set_dma_mask(struct pci_dev *pdev)
{
/*
- * All our contollers are capable of performing 64-bit DMA
+ * All our controllers are capable of performing 64-bit DMA
*/
if (IS_DMA64) {
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
@@ -5206,13 +5204,13 @@ static int megasas_probe_one(struct pci_dev *pdev,
sizeof(struct megasas_instance));
if (!host) {
- printk(KERN_DEBUG "megasas: scsi_host_alloc failed\n");
+ dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
goto fail_alloc_instance;
}
instance = (struct megasas_instance *)host->hostdata;
memset(instance, 0, sizeof(*instance));
- atomic_set( &instance->fw_reset_no_pci_access, 0 );
+ atomic_set(&instance->fw_reset_no_pci_access, 0);
instance->pdev = pdev;
switch (instance->pdev->device) {
@@ -5226,7 +5224,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL,
instance->ctrl_context_pages);
if (!instance->ctrl_context) {
- printk(KERN_DEBUG "megasas: Failed to allocate "
+ dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
"memory for Fusion context info\n");
goto fail_alloc_dma_buf;
}
@@ -5245,7 +5243,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
&instance->consumer_h);
if (!instance->producer || !instance->consumer) {
- printk(KERN_DEBUG "megasas: Failed to allocate"
+ dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate"
"memory for producer, consumer\n");
goto fail_alloc_dma_buf;
}
@@ -5276,7 +5274,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
CRASH_DMA_BUF_SIZE,
&instance->crash_dump_h);
if (!instance->crash_dump_buf)
- dev_err(&instance->pdev->dev, "Can't allocate Firmware "
+ dev_err(&pdev->dev, "Can't allocate Firmware "
"crash dump DMA buffer\n");
megasas_poll_wait_aen = 0;
@@ -5292,7 +5290,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
&instance->evt_detail_h);
if (!instance->evt_detail) {
- printk(KERN_DEBUG "megasas: Failed to allocate memory for "
+ dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate memory for "
"event detail structure\n");
goto fail_alloc_dma_buf;
}
@@ -5356,7 +5354,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
&instance->vf_affiliation_111_h);
if (!instance->vf_affiliation_111)
- printk(KERN_WARNING "megasas: Can't allocate "
+ dev_warn(&pdev->dev, "Can't allocate "
"memory for VF affiliation buffer\n");
} else {
instance->vf_affiliation =
@@ -5365,7 +5363,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
sizeof(struct MR_LD_VF_AFFILIATION),
&instance->vf_affiliation_h);
if (!instance->vf_affiliation)
- printk(KERN_WARNING "megasas: Can't allocate "
+ dev_warn(&pdev->dev, "Can't allocate "
"memory for VF affiliation buffer\n");
}
}
@@ -5399,7 +5397,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
* Initiate AEN (Asynchronous Event Notification)
*/
if (megasas_start_aen(instance)) {
- printk(KERN_DEBUG "megasas: start aen failed\n");
+ dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
goto fail_start_aen;
}
@@ -5409,8 +5407,8 @@ static int megasas_probe_one(struct pci_dev *pdev,
return 0;
- fail_start_aen:
- fail_io_attach:
+fail_start_aen:
+fail_io_attach:
megasas_mgmt_info.count--;
megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
megasas_mgmt_info.max_index--;
@@ -5428,7 +5426,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
if (instance->msix_vectors)
pci_disable_msix(instance->pdev);
fail_init_mfi:
- fail_alloc_dma_buf:
+fail_alloc_dma_buf:
if (instance->evt_detail)
pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
instance->evt_detail,
@@ -5442,8 +5440,8 @@ fail_init_mfi:
instance->consumer_h);
scsi_host_put(host);
- fail_alloc_instance:
- fail_set_dma_mask:
+fail_alloc_instance:
+fail_set_dma_mask:
pci_disable_device(pdev);
return -ENODEV;
@@ -5485,8 +5483,6 @@ static void megasas_flush_cache(struct megasas_instance *instance)
" from %s\n", __func__);
megasas_return_cmd(instance, cmd);
-
- return;
}
/**
@@ -5532,8 +5528,6 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
"from %s\n", __func__);
megasas_return_cmd(instance, cmd);
-
- return;
}
#ifdef CONFIG_PM
@@ -5607,7 +5601,7 @@ megasas_resume(struct pci_dev *pdev)
rval = pci_enable_device_mem(pdev);
if (rval) {
- printk(KERN_ERR "megasas: Enable device failed\n");
+ dev_err(&pdev->dev, "Enable device failed\n");
return rval;
}
@@ -5686,7 +5680,7 @@ megasas_resume(struct pci_dev *pdev)
* Initiate AEN (Asynchronous Event Notification)
*/
if (megasas_start_aen(instance))
- printk(KERN_ERR "megasas: Start AEN failed\n");
+ dev_err(&instance->pdev->dev, "Start AEN failed\n");
return 0;
@@ -5839,8 +5833,6 @@ static void megasas_detach_one(struct pci_dev *pdev)
scsi_host_put(host);
pci_disable_device(pdev);
-
- return;
}
/**
@@ -5909,11 +5901,11 @@ static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
{
unsigned int mask;
unsigned long flags;
+
poll_wait(file, &megasas_poll_wait, wait);
spin_lock_irqsave(&poll_aen_lock, flags);
if (megasas_poll_wait_aen)
- mask = (POLLIN | POLLRDNORM);
-
+ mask = (POLLIN | POLLRDNORM);
else
mask = 0;
megasas_poll_wait_aen = 0;
@@ -5927,8 +5919,7 @@ static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
* @cmd: MFI command frame
*/
-static int megasas_set_crash_dump_params_ioctl(
- struct megasas_cmd *cmd)
+static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
{
struct megasas_instance *local_instance;
int i, error = 0;
@@ -5982,14 +5973,14 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
memset(kbuff_arr, 0, sizeof(kbuff_arr));
if (ioc->sge_count > MAX_IOCTL_SGE) {
- printk(KERN_DEBUG "megasas: SGE count [%d] > max limit [%d]\n",
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n",
ioc->sge_count, MAX_IOCTL_SGE);
return -EINVAL;
}
cmd = megasas_get_cmd(instance);
if (!cmd) {
- printk(KERN_DEBUG "megasas: Failed to get a cmd packet\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
return -ENOMEM;
}
@@ -6034,8 +6025,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
ioc->sgl[i].iov_len,
&buf_handle, GFP_KERNEL);
if (!kbuff_arr[i]) {
- printk(KERN_DEBUG "megasas: Failed to alloc "
- "kernel SGL buffer for IOCTL \n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
+ "kernel SGL buffer for IOCTL\n");
error = -ENOMEM;
goto out;
}
@@ -6108,7 +6099,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
sense, ioc->sense_len)) {
- printk(KERN_ERR "megasas: Failed to copy out to user "
+ dev_err(&instance->pdev->dev, "Failed to copy out to user "
"sense data\n");
error = -EFAULT;
goto out;
@@ -6120,11 +6111,11 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
*/
if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
&cmd->frame->hdr.cmd_status, sizeof(u8))) {
- printk(KERN_DEBUG "megasas: Error copying out cmd_status\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
error = -EFAULT;
}
- out:
+out:
if (sense) {
dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
sense, sense_handle);
@@ -6180,7 +6171,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
}
if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
- printk(KERN_ERR "Controller in crit error\n");
+ dev_err(&instance->pdev->dev, "Controller in crit error\n");
error = -ENODEV;
goto out_kfree_ioc;
}
@@ -6205,7 +6196,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
spin_unlock_irqrestore(&instance->hba_lock, flags);
if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
- printk(KERN_NOTICE "megasas: waiting"
+ dev_notice(&instance->pdev->dev, "waiting"
"for controller reset to finish\n");
}
@@ -6216,7 +6207,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
- printk(KERN_ERR "megaraid_sas: timed out while"
+ dev_err(&instance->pdev->dev, "timed out while"
"waiting for HBA to recover\n");
error = -ENODEV;
goto out_up;
@@ -6224,10 +6215,10 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
spin_unlock_irqrestore(&instance->hba_lock, flags);
error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
- out_up:
+out_up:
up(&instance->ioctl_sem);
- out_kfree_ioc:
+out_kfree_ioc:
kfree(ioc);
return error;
}
@@ -6275,7 +6266,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
spin_unlock_irqrestore(&instance->hba_lock, flags);
if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
- printk(KERN_NOTICE "megasas: waiting for"
+ dev_notice(&instance->pdev->dev, "waiting for"
"controller reset to finish\n");
}
@@ -6285,8 +6276,8 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
spin_lock_irqsave(&instance->hba_lock, flags);
if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
- printk(KERN_ERR "megaraid_sas: timed out while waiting"
- "for HBA to recover.\n");
+ dev_err(&instance->pdev->dev, "timed out while waiting"
+ "for HBA to recover\n");
return -ENODEV;
}
spin_unlock_irqrestore(&instance->hba_lock, flags);
@@ -6462,7 +6453,8 @@ static ssize_t
megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count)
{
int retval = count;
- if(sscanf(buf,"%u",&megasas_dbg_lvl)<1){
+
+ if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
printk(KERN_ERR "megasas: could not set dbg_lvl\n");
retval = -EINVAL;
}
@@ -6502,7 +6494,7 @@ megasas_aen_polling(struct work_struct *work)
if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL)
break;
if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
- printk(KERN_NOTICE "megasas: %s waiting for "
+ dev_notice(&instance->pdev->dev, "%s waiting for "
"controller reset to finish for scsi%d\n",
__func__, instance->host->host_no);
}
@@ -6524,14 +6516,12 @@ megasas_aen_polling(struct work_struct *work)
pd_index =
(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
- sdev1 =
- scsi_device_lookup(host, i, j, 0);
+ sdev1 = scsi_device_lookup(host, i, j, 0);
if (instance->pd_list[pd_index].driveState
== MR_PD_STATE_SYSTEM) {
- if (!sdev1) {
+ if (!sdev1)
scsi_add_device(host, i, j, 0);
- }
if (sdev1)
scsi_device_put(sdev1);
@@ -6552,14 +6542,12 @@ megasas_aen_polling(struct work_struct *work)
pd_index =
(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
- sdev1 =
- scsi_device_lookup(host, i, j, 0);
+ sdev1 = scsi_device_lookup(host, i, j, 0);
if (instance->pd_list[pd_index].driveState
== MR_PD_STATE_SYSTEM) {
- if (sdev1) {
+ if (sdev1)
scsi_device_put(sdev1);
- }
} else {
if (sdev1) {
scsi_remove_device(sdev1);
@@ -6644,13 +6632,13 @@ megasas_aen_polling(struct work_struct *work)
break;
}
} else {
- printk(KERN_ERR "invalid evt_detail!\n");
+ dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
kfree(ev);
return;
}
if (doscan) {
- printk(KERN_INFO "megaraid_sas: scanning for scsi%d...\n",
+ dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
instance->host->host_no);
if (megasas_get_pd_list(instance) == 0) {
for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
@@ -6705,7 +6693,7 @@ megasas_aen_polling(struct work_struct *work)
}
}
- if ( instance->aen_cmd != NULL ) {
+ if (instance->aen_cmd != NULL) {
kfree(ev);
return ;
}
@@ -6722,7 +6710,7 @@ megasas_aen_polling(struct work_struct *work)
mutex_unlock(&instance->aen_mutex);
if (error)
- printk(KERN_ERR "register aen failed error %x\n", error);
+ dev_err(&instance->pdev->dev, "register aen failed error %x\n", error);
kfree(ev);
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 46a0f8f4f677..f0837cc3b163 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -221,7 +221,7 @@ static void megasas_teardown_frame_pool_fusion(
struct megasas_cmd_fusion *cmd;
if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) {
- printk(KERN_ERR "megasas: dma pool is null. SG Pool %p, "
+ dev_err(&instance->pdev->dev, "dma pool is null. SG Pool %p, "
"sense pool : %p\n", fusion->sg_dma_pool,
fusion->sense_dma_pool);
return;
@@ -332,8 +332,7 @@ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
total_sz_chain_frame, 4,
0);
if (!fusion->sg_dma_pool) {
- printk(KERN_DEBUG "megasas: failed to setup request pool "
- "fusion\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup request pool fusion\n");
return -ENOMEM;
}
fusion->sense_dma_pool = pci_pool_create("megasas sense pool fusion",
@@ -341,8 +340,7 @@ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
SCSI_SENSE_BUFFERSIZE, 64, 0);
if (!fusion->sense_dma_pool) {
- printk(KERN_DEBUG "megasas: failed to setup sense pool "
- "fusion\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool fusion\n");
pci_pool_destroy(fusion->sg_dma_pool);
fusion->sg_dma_pool = NULL;
return -ENOMEM;
@@ -366,7 +364,7 @@ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
* whatever has been allocated
*/
if (!cmd->sg_frame || !cmd->sense) {
- printk(KERN_DEBUG "megasas: pci_pool_alloc failed\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n");
megasas_teardown_frame_pool_fusion(instance);
return -ENOMEM;
}
@@ -412,7 +410,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
&fusion->req_frames_desc_phys, GFP_KERNEL);
if (!fusion->req_frames_desc) {
- printk(KERN_ERR "megasas; Could not allocate memory for "
+ dev_err(&instance->pdev->dev, "Could not allocate memory for "
"request_frames\n");
goto fail_req_desc;
}
@@ -423,7 +421,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
fusion->reply_alloc_sz * count, 16, 0);
if (!fusion->reply_frames_desc_pool) {
- printk(KERN_ERR "megasas; Could not allocate memory for "
+ dev_err(&instance->pdev->dev, "Could not allocate memory for "
"reply_frame pool\n");
goto fail_reply_desc;
}
@@ -432,7 +430,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL,
&fusion->reply_frames_desc_phys);
if (!fusion->reply_frames_desc) {
- printk(KERN_ERR "megasas; Could not allocate memory for "
+ dev_err(&instance->pdev->dev, "Could not allocate memory for "
"reply_frame pool\n");
pci_pool_destroy(fusion->reply_frames_desc_pool);
goto fail_reply_desc;
@@ -449,7 +447,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
fusion->io_frames_alloc_sz, 16, 0);
if (!fusion->io_request_frames_pool) {
- printk(KERN_ERR "megasas: Could not allocate memory for "
+ dev_err(&instance->pdev->dev, "Could not allocate memory for "
"io_request_frame pool\n");
goto fail_io_frames;
}
@@ -458,7 +456,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL,
&fusion->io_request_frames_phys);
if (!fusion->io_request_frames) {
- printk(KERN_ERR "megasas: Could not allocate memory for "
+ dev_err(&instance->pdev->dev, "Could not allocate memory for "
"io_request_frames frames\n");
pci_pool_destroy(fusion->io_request_frames_pool);
goto fail_io_frames;
@@ -473,7 +471,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
* max_cmd, GFP_KERNEL);
if (!fusion->cmd_list) {
- printk(KERN_DEBUG "megasas: out of memory. Could not alloc "
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory. Could not alloc "
"memory for cmd_list_fusion\n");
goto fail_cmd_list;
}
@@ -483,7 +481,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion),
GFP_KERNEL);
if (!fusion->cmd_list[i]) {
- printk(KERN_ERR "Could not alloc cmd list fusion\n");
+ dev_err(&instance->pdev->dev, "Could not alloc cmd list fusion\n");
for (j = 0; j < i; j++)
kfree(fusion->cmd_list[j]);
@@ -527,7 +525,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
* Create a frame pool and assign one frame to each cmd
*/
if (megasas_create_frame_pool_fusion(instance)) {
- printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
megasas_free_cmds_fusion(instance);
goto fail_req_desc;
}
@@ -613,7 +611,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
cmd = megasas_get_cmd(instance);
if (!cmd) {
- printk(KERN_ERR "Could not allocate cmd for INIT Frame\n");
+ dev_err(&instance->pdev->dev, "Could not allocate cmd for INIT Frame\n");
ret = 1;
goto fail_get_cmd;
}
@@ -624,7 +622,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
&ioc_init_handle, GFP_KERNEL);
if (!IOCInitMessage) {
- printk(KERN_ERR "Could not allocate memory for "
+ dev_err(&instance->pdev->dev, "Could not allocate memory for "
"IOCInitMessage\n");
ret = 1;
goto fail_fw_init;
@@ -714,7 +712,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
ret = 1;
goto fail_fw_init;
}
- printk(KERN_ERR "megasas:IOC Init cmd success\n");
+ dev_err(&instance->pdev->dev, "Init cmd success\n");
ret = 0;
@@ -757,7 +755,7 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
cmd = megasas_get_cmd(instance);
if (!cmd) {
- printk(KERN_DEBUG "megasas: Failed to get cmd for map info.\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n");
return -ENOMEM;
}
@@ -776,7 +774,7 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
if (!ci) {
- printk(KERN_DEBUG "Failed to alloc mem for ld_map_info\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n");
megasas_return_cmd(instance, cmd);
return -ENOMEM;
}
@@ -851,8 +849,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
cmd = megasas_get_cmd(instance);
if (!cmd) {
- printk(KERN_DEBUG "megasas: Failed to get cmd for sync"
- "info.\n");
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n");
return -ENOMEM;
}
@@ -1097,7 +1094,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
&fusion->ld_map_phys[i],
GFP_KERNEL);
if (!fusion->ld_map[i]) {
- printk(KERN_ERR "megasas: Could not allocate memory "
+ dev_err(&instance->pdev->dev, "Could not allocate memory "
"for map info\n");
goto fail_map_info;
}
@@ -1162,7 +1159,7 @@ map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status)
cmd->scmd->result = DID_IMM_RETRY << 16;
break;
default:
- printk(KERN_DEBUG "megasas: FW status %#x\n", status);
+ dev_printk(KERN_DEBUG, &cmd->instance->pdev->dev, "FW status %#x\n", status);
cmd->scmd->result = DID_ERROR << 16;
break;
}
@@ -1851,7 +1848,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
&io_request->SGL, cmd);
if (sge_count > instance->max_num_sge) {
- printk(KERN_ERR "megasas: Error. sge_count (0x%x) exceeds "
+ dev_err(&instance->pdev->dev, "Error. sge_count (0x%x) exceeds "
"max (0x%x) allowed\n", sge_count,
instance->max_num_sge);
return 1;
@@ -1885,7 +1882,7 @@ megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
struct fusion_context *fusion;
if (index >= instance->max_fw_cmds) {
- printk(KERN_ERR "megasas: Invalid SMID (0x%x)request for "
+ dev_err(&instance->pdev->dev, "Invalid SMID (0x%x)request for "
"descriptor for scsi%d\n", index,
instance->host->host_no);
return NULL;
@@ -1927,7 +1924,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
if (megasas_build_io_fusion(instance, scmd, cmd)) {
megasas_return_cmd_fusion(instance, cmd);
- printk(KERN_ERR "megasas: Error building command.\n");
+ dev_err(&instance->pdev->dev, "Error building command\n");
cmd->request_desc = NULL;
return 1;
}
@@ -1937,7 +1934,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
if (cmd->io_request->ChainOffset != 0 &&
cmd->io_request->ChainOffset != 0xF)
- printk(KERN_ERR "megasas: The chain offset value is not "
+ dev_err(&instance->pdev->dev, "The chain offset value is not "
"correct : %x\n", cmd->io_request->ChainOffset);
/*
@@ -2025,7 +2022,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
if (reply_descript_type ==
MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
if (megasas_dbg_lvl == 5)
- printk(KERN_ERR "\nmegasas: FAST Path "
+ dev_err(&instance->pdev->dev, "\nFAST Path "
"IO Success\n");
}
/* Fall thru and complete IO */
@@ -2186,7 +2183,7 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
else if (fw_state == MFI_STATE_FAULT)
schedule_work(&instance->work_init);
} else if (fw_state == MFI_STATE_FAULT) {
- printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt"
+ dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
"for scsi%d\n", instance->host->host_no);
schedule_work(&instance->work_init);
}
@@ -2269,7 +2266,7 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
u16 index;
if (build_mpt_mfi_pass_thru(instance, cmd)) {
- printk(KERN_ERR "Couldn't build MFI pass thru cmd\n");
+ dev_err(&instance->pdev->dev, "Couldn't build MFI pass thru cmd\n");
return NULL;
}
@@ -2303,7 +2300,7 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance,
req_desc = build_mpt_cmd(instance, cmd);
if (!req_desc) {
- printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n");
+ dev_err(&instance->pdev->dev, "Couldn't issue MFI pass thru cmd\n");
return;
}
megasas_fire_cmd_fusion(instance, req_desc);
@@ -2413,7 +2410,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
fw_state = instance->instancet->read_fw_status_reg(
instance->reg_set) & MFI_STATE_MASK;
if (fw_state == MFI_STATE_FAULT) {
- printk(KERN_WARNING "megasas: Found FW in FAULT state,"
+ dev_warn(&instance->pdev->dev, "Found FW in FAULT state,"
" will reset adapter scsi%d.\n",
instance->host->host_no);
retval = 1;
@@ -2436,7 +2433,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
hb_seconds_missed++;
if (hb_seconds_missed ==
(MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
- printk(KERN_WARNING "megasas: SR-IOV:"
+ dev_warn(&instance->pdev->dev, "SR-IOV:"
" Heartbeat never completed "
" while polling during I/O "
" timeout handling for "
@@ -2454,7 +2451,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
goto out;
if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
- printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
+ dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
"commands to complete for scsi%d\n", i,
outstanding, instance->host->host_no);
megasas_complete_cmd_dpc_fusion(
@@ -2464,7 +2461,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
}
if (atomic_read(&instance->fw_outstanding)) {
- printk("megaraid_sas: pending commands remain after waiting, "
+ dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
"will reset adapter scsi%d.\n",
instance->host->host_no);
retval = 1;
@@ -2564,7 +2561,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
mutex_lock(&instance->reset_mutex);
if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
- printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
+ dev_warn(&instance->pdev->dev, "Hardware critical error, "
"returning FAILED for scsi%d.\n",
instance->host->host_no);
mutex_unlock(&instance->reset_mutex);
@@ -2618,7 +2615,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
if (megasas_wait_for_outstanding_fusion(instance, iotimeout,
&convert)) {
instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
- printk(KERN_WARNING "megaraid_sas: resetting fusion "
+ dev_warn(&instance->pdev->dev, "resetting fusion "
"adapter scsi%d.\n", instance->host->host_no);
if (convert)
iotimeout = 0;
@@ -2645,7 +2642,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
if (instance->disableOnlineCtrlReset ||
(abs_state == MFI_STATE_FAULT && !reset_adapter)) {
/* Reset not supported, kill adapter */
- printk(KERN_WARNING "megaraid_sas: Reset not supported"
+ dev_warn(&instance->pdev->dev, "Reset not supported"
", killing adapter scsi%d.\n",
instance->host->host_no);
megaraid_sas_kill_hba(instance);
@@ -2663,7 +2660,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
instance->hb_host_mem->HB.driverCounter)) {
instance->hb_host_mem->HB.driverCounter =
instance->hb_host_mem->HB.fwCounter;
- printk(KERN_WARNING "megasas: SR-IOV:"
+ dev_warn(&instance->pdev->dev, "SR-IOV:"
"Late FW heartbeat update for "
"scsi%d.\n",
instance->host->host_no);
@@ -2679,8 +2676,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
abs_state = status_reg &
MFI_STATE_MASK;
if (abs_state == MFI_STATE_READY) {
- printk(KERN_WARNING "megasas"
- ": SR-IOV: FW was found"
+ dev_warn(&instance->pdev->dev,
+ "SR-IOV: FW was found"
"to be in ready state "
"for scsi%d.\n",
instance->host->host_no);
@@ -2689,7 +2686,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
msleep(20);
}
if (abs_state != MFI_STATE_READY) {
- printk(KERN_WARNING "megasas: SR-IOV: "
+ dev_warn(&instance->pdev->dev, "SR-IOV: "
"FW not in ready state after %d"
" seconds for scsi%d, status_reg = "
"0x%x.\n",
@@ -2731,7 +2728,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
host_diag =
readl(&instance->reg_set->fusion_host_diag);
if (retry++ == 100) {
- printk(KERN_WARNING "megaraid_sas: "
+ dev_warn(&instance->pdev->dev,
"Host diag unlock failed! "
"for scsi%d\n",
instance->host->host_no);
@@ -2754,7 +2751,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
host_diag =
readl(&instance->reg_set->fusion_host_diag);
if (retry++ == 1000) {
- printk(KERN_WARNING "megaraid_sas: "
+ dev_warn(&instance->pdev->dev,
"Diag reset adapter never "
"cleared for scsi%d!\n",
instance->host->host_no);
@@ -2777,7 +2774,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
instance->reg_set) & MFI_STATE_MASK;
}
if (abs_state <= MFI_STATE_FW_INIT) {
- printk(KERN_WARNING "megaraid_sas: firmware "
+ dev_warn(&instance->pdev->dev, "firmware "
"state < MFI_STATE_FW_INIT, state = "
"0x%x for scsi%d\n", abs_state,
instance->host->host_no);
@@ -2786,7 +2783,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
/* Wait for FW to become ready */
if (megasas_transition_to_ready(instance, 1)) {
- printk(KERN_WARNING "megaraid_sas: Failed to "
+ dev_warn(&instance->pdev->dev, "Failed to "
"transition controller to ready "
"for scsi%d.\n",
instance->host->host_no);
@@ -2795,7 +2792,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
megasas_reset_reply_desc(instance);
if (megasas_ioc_init_fusion(instance)) {
- printk(KERN_WARNING "megaraid_sas: "
+ dev_warn(&instance->pdev->dev,
"megasas_ioc_init_fusion() failed!"
" for scsi%d\n",
instance->host->host_no);
@@ -2836,7 +2833,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
}
/* Adapter reset completed successfully */
- printk(KERN_WARNING "megaraid_sas: Reset "
+ dev_warn(&instance->pdev->dev, "Reset "
"successful for scsi%d.\n",
instance->host->host_no);
@@ -2852,7 +2849,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
goto out;
}
/* Reset failed, kill the adapter */
- printk(KERN_WARNING "megaraid_sas: Reset failed, killing "
+ dev_warn(&instance->pdev->dev, "Reset failed, killing "
"adapter scsi%d.\n", instance->host->host_no);
megaraid_sas_kill_hba(instance);
instance->skip_heartbeat_timer_del = 1;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 11248de92b3b..c167911221e9 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -112,9 +112,12 @@ _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
if (ret)
return ret;
+ /* global ioc spinlock to protect controller list on list operations */
printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug);
+ spin_lock(&gioc_lock);
list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
ioc->fwfault_debug = mpt2sas_fwfault_debug;
+ spin_unlock(&gioc_lock);
return 0;
}
@@ -1557,7 +1560,8 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
goto out_fail;
}
- for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
+ for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
+ (!memap_sz || !pio_sz); i++) {
if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
if (pio_sz)
continue;
@@ -1572,16 +1576,17 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
chip_phys = (u64)ioc->chip_phys;
memap_sz = pci_resource_len(pdev, i);
ioc->chip = ioremap(ioc->chip_phys, memap_sz);
- if (ioc->chip == NULL) {
- printk(MPT2SAS_ERR_FMT "unable to map "
- "adapter memory!\n", ioc->name);
- r = -EINVAL;
- goto out_fail;
- }
}
}
}
+ if (ioc->chip == NULL) {
+ printk(MPT2SAS_ERR_FMT "unable to map adapter memory! "
+ "or resource not found\n", ioc->name);
+ r = -EINVAL;
+ goto out_fail;
+ }
+
_base_mask_interrupts(ioc);
r = _base_get_ioc_facts(ioc, CAN_SLEEP);
@@ -4435,6 +4440,8 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
__func__));
+ /* synchronizing freeing resource with pci_access_mutex lock */
+ mutex_lock(&ioc->pci_access_mutex);
if (ioc->chip_phys && ioc->chip) {
_base_mask_interrupts(ioc);
ioc->shost_recovery = 1;
@@ -4454,6 +4461,7 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
+ mutex_unlock(&ioc->pci_access_mutex);
return;
}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index caff8d10cca4..97ea360c6920 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -238,6 +238,7 @@
* @flags: MPT_TARGET_FLAGS_XXX flags
* @deleted: target flaged for deletion
* @tm_busy: target is busy with TM request.
+ * @sdev: The sas_device associated with this target
*/
struct MPT2SAS_TARGET {
struct scsi_target *starget;
@@ -248,6 +249,7 @@ struct MPT2SAS_TARGET {
u32 flags;
u8 deleted;
u8 tm_busy;
+ struct _sas_device *sdev;
};
@@ -376,8 +378,24 @@ struct _sas_device {
u8 phy;
u8 responding;
u8 pfa_led_on;
+ struct kref refcount;
};
+static inline void sas_device_get(struct _sas_device *s)
+{
+ kref_get(&s->refcount);
+}
+
+static inline void sas_device_free(struct kref *r)
+{
+ kfree(container_of(r, struct _sas_device, refcount));
+}
+
+static inline void sas_device_put(struct _sas_device *s)
+{
+ kref_put(&s->refcount, sas_device_free);
+}
+
/**
* struct _raid_device - raid volume link list
* @list: sas device list
@@ -799,6 +817,12 @@ typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc);
* @delayed_tr_list: target reset link list
* @delayed_tr_volume_list: volume target reset link list
* @@temp_sensors_count: flag to carry the number of temperature sensors
+ * @pci_access_mutex: Mutex to synchronize ioctl,sysfs show path and
+ * pci resource handling. PCI resource freeing will lead to free
+ * vital hardware/memory resource, which might be in use by cli/sysfs
+ * path functions resulting in Null pointer reference followed by kernel
+ * crash. To avoid the above race condition we use mutex syncrhonization
+ * which ensures the syncrhonization between cli/sysfs_show path
*/
struct MPT2SAS_ADAPTER {
struct list_head list;
@@ -1015,6 +1039,7 @@ struct MPT2SAS_ADAPTER {
u8 mfg_pg10_hide_flag;
u8 hide_drives;
+ struct mutex pci_access_mutex;
};
typedef u8 (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1023,6 +1048,17 @@ typedef u8 (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
/* base shared API */
extern struct list_head mpt2sas_ioc_list;
+/* spinlock on list operations over IOCs
+ * Case: when multiple warpdrive cards(IOCs) are in use
+ * Each IOC will added to the ioc list stucture on initialization.
+ * Watchdog threads run at regular intervals to check IOC for any
+ * fault conditions which will trigger the dead_ioc thread to
+ * deallocate pci resource, resulting deleting the IOC netry from list,
+ * this deletion need to protected by spinlock to enusre that
+ * ioc removal is syncrhonized, if not synchronized it might lead to
+ * list_del corruption as the ioc list is traversed in cli path
+ */
+extern spinlock_t gioc_lock;
void mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc);
void mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc);
@@ -1095,11 +1131,12 @@ struct _sas_node *mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *
u16 handle);
struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAPTER
*ioc, u64 sas_address);
-struct _sas_device *mpt2sas_scsih_sas_device_find_by_sas_address(
+struct _sas_device *mpt2sas_get_sdev_by_addr(
+ struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
+struct _sas_device *__mpt2sas_get_sdev_by_addr(
struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
void mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc);
-
void mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase);
/* config shared API */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 4e509604b571..3694b63bd993 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -427,13 +427,16 @@ static int
_ctl_verify_adapter(int ioc_number, struct MPT2SAS_ADAPTER **iocpp)
{
struct MPT2SAS_ADAPTER *ioc;
-
+ /* global ioc lock to protect controller on list operations */
+ spin_lock(&gioc_lock);
list_for_each_entry(ioc, &mpt2sas_ioc_list, list) {
if (ioc->id != ioc_number)
continue;
+ spin_unlock(&gioc_lock);
*iocpp = ioc;
return ioc_number;
}
+ spin_unlock(&gioc_lock);
*iocpp = NULL;
return -1;
}
@@ -522,10 +525,15 @@ _ctl_poll(struct file *filep, poll_table *wait)
poll_wait(filep, &ctl_poll_wait, wait);
+ /* global ioc lock to protect controller on list operations */
+ spin_lock(&gioc_lock);
list_for_each_entry(ioc, &mpt2sas_ioc_list, list) {
- if (ioc->aen_event_read_flag)
+ if (ioc->aen_event_read_flag) {
+ spin_unlock(&gioc_lock);
return POLLIN | POLLRDNORM;
+ }
}
+ spin_unlock(&gioc_lock);
return 0;
}
@@ -2168,16 +2176,23 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
if (_ctl_verify_adapter(ioctl_header.ioc_number, &ioc) == -1 || !ioc)
return -ENODEV;
+ /* pci_access_mutex lock acquired by ioctl path */
+ mutex_lock(&ioc->pci_access_mutex);
if (ioc->shost_recovery || ioc->pci_error_recovery ||
- ioc->is_driver_loading)
- return -EAGAIN;
+ ioc->is_driver_loading || ioc->remove_host) {
+ ret = -EAGAIN;
+ goto out_unlock_pciaccess;
+ }
state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
if (state == NON_BLOCKING) {
- if (!mutex_trylock(&ioc->ctl_cmds.mutex))
- return -EAGAIN;
+ if (!mutex_trylock(&ioc->ctl_cmds.mutex)) {
+ ret = -EAGAIN;
+ goto out_unlock_pciaccess;
+ }
} else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
- return -ERESTARTSYS;
+ ret = -ERESTARTSYS;
+ goto out_unlock_pciaccess;
}
switch (cmd) {
@@ -2258,6 +2273,8 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
}
mutex_unlock(&ioc->ctl_cmds.mutex);
+out_unlock_pciaccess:
+ mutex_unlock(&ioc->pci_access_mutex);
return ret;
}
@@ -2711,6 +2728,12 @@ _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
"warpdrive\n", ioc->name, __func__);
goto out;
}
+ /* pci_access_mutex lock acquired by sysfs show path */
+ mutex_lock(&ioc->pci_access_mutex);
+ if (ioc->pci_error_recovery || ioc->remove_host) {
+ mutex_unlock(&ioc->pci_access_mutex);
+ return 0;
+ }
/* allocate upto GPIOVal 36 entries */
sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
@@ -2749,6 +2772,7 @@ _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
out:
kfree(io_unit_pg3);
+ mutex_unlock(&ioc->pci_access_mutex);
return rc;
}
static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 3f26147bbc64..0ad09b2bff9c 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -79,7 +79,8 @@ static int _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time);
/* global parameters */
LIST_HEAD(mpt2sas_ioc_list);
-
+/* global ioc lock for list operations */
+DEFINE_SPINLOCK(gioc_lock);
/* local parameters */
static u8 scsi_io_cb_idx = -1;
static u8 tm_cb_idx = -1;
@@ -176,9 +177,37 @@ struct fw_event_work {
u8 VP_ID;
u8 ignore;
u16 event;
+ struct kref refcount;
char event_data[0] __aligned(4);
};
+static void fw_event_work_free(struct kref *r)
+{
+ kfree(container_of(r, struct fw_event_work, refcount));
+}
+
+static void fw_event_work_get(struct fw_event_work *fw_work)
+{
+ kref_get(&fw_work->refcount);
+}
+
+static void fw_event_work_put(struct fw_event_work *fw_work)
+{
+ kref_put(&fw_work->refcount, fw_event_work_free);
+}
+
+static struct fw_event_work *alloc_fw_event_work(int len)
+{
+ struct fw_event_work *fw_event;
+
+ fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
+ if (!fw_event)
+ return NULL;
+
+ kref_init(&fw_event->refcount);
+ return fw_event;
+}
+
/* raid transport support */
static struct raid_template *mpt2sas_raid_template;
@@ -293,8 +322,10 @@ _scsih_set_debug_level(const char *val, struct kernel_param *kp)
return ret;
printk(KERN_INFO "setting logging_level(0x%08x)\n", logging_level);
+ spin_lock(&gioc_lock);
list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
ioc->logging_level = logging_level;
+ spin_unlock(&gioc_lock);
return 0;
}
module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
@@ -526,8 +557,61 @@ _scsih_determine_boot_device(struct MPT2SAS_ADAPTER *ioc,
}
}
+static struct _sas_device *
+__mpt2sas_get_sdev_from_target(struct MPT2SAS_ADAPTER *ioc,
+ struct MPT2SAS_TARGET *tgt_priv)
+{
+ struct _sas_device *ret;
+
+ assert_spin_locked(&ioc->sas_device_lock);
+
+ ret = tgt_priv->sdev;
+ if (ret)
+ sas_device_get(ret);
+
+ return ret;
+}
+
+static struct _sas_device *
+mpt2sas_get_sdev_from_target(struct MPT2SAS_ADAPTER *ioc,
+ struct MPT2SAS_TARGET *tgt_priv)
+{
+ struct _sas_device *ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ ret = __mpt2sas_get_sdev_from_target(ioc, tgt_priv);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ return ret;
+}
+
+
+struct _sas_device *
+__mpt2sas_get_sdev_by_addr(struct MPT2SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ struct _sas_device *sas_device;
+
+ assert_spin_locked(&ioc->sas_device_lock);
+
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list)
+ if (sas_device->sas_address == sas_address)
+ goto found_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
+ if (sas_device->sas_address == sas_address)
+ goto found_device;
+
+ return NULL;
+
+found_device:
+ sas_device_get(sas_device);
+ return sas_device;
+}
+
/**
- * mpt2sas_scsih_sas_device_find_by_sas_address - sas device search
+ * mpt2sas_get_sdev_by_addr - sas device search
* @ioc: per adapter object
* @sas_address: sas address
* Context: Calling function should acquire ioc->sas_device_lock
@@ -536,24 +620,44 @@ _scsih_determine_boot_device(struct MPT2SAS_ADAPTER *ioc,
* object.
*/
struct _sas_device *
-mpt2sas_scsih_sas_device_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
+mpt2sas_get_sdev_by_addr(struct MPT2SAS_ADAPTER *ioc,
u64 sas_address)
{
struct _sas_device *sas_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = __mpt2sas_get_sdev_by_addr(ioc,
+ sas_address);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ return sas_device;
+}
+
+static struct _sas_device *
+__mpt2sas_get_sdev_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_device *sas_device;
+
+ assert_spin_locked(&ioc->sas_device_lock);
list_for_each_entry(sas_device, &ioc->sas_device_list, list)
- if (sas_device->sas_address == sas_address)
- return sas_device;
+ if (sas_device->handle == handle)
+ goto found_device;
list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
- if (sas_device->sas_address == sas_address)
- return sas_device;
+ if (sas_device->handle == handle)
+ goto found_device;
return NULL;
+
+found_device:
+ sas_device_get(sas_device);
+ return sas_device;
}
/**
- * _scsih_sas_device_find_by_handle - sas device search
+ * mpt2sas_get_sdev_by_handle - sas device search
* @ioc: per adapter object
* @handle: sas device handle (assigned by firmware)
* Context: Calling function should acquire ioc->sas_device_lock
@@ -562,19 +666,16 @@ mpt2sas_scsih_sas_device_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
* object.
*/
static struct _sas_device *
-_scsih_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+mpt2sas_get_sdev_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
{
struct _sas_device *sas_device;
+ unsigned long flags;
- list_for_each_entry(sas_device, &ioc->sas_device_list, list)
- if (sas_device->handle == handle)
- return sas_device;
-
- list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
- if (sas_device->handle == handle)
- return sas_device;
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return NULL;
+ return sas_device;
}
/**
@@ -583,7 +684,7 @@ _scsih_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
* @sas_device: the sas_device object
* Context: This function will acquire ioc->sas_device_lock.
*
- * Removing object and freeing associated memory from the ioc->sas_device_list.
+ * If sas_device is on the list, remove it and decrement its reference count.
*/
static void
_scsih_sas_device_remove(struct MPT2SAS_ADAPTER *ioc,
@@ -594,9 +695,15 @@ _scsih_sas_device_remove(struct MPT2SAS_ADAPTER *ioc,
if (!sas_device)
return;
+ /*
+ * The lock serializes access to the list, but we still need to verify
+ * that nobody removed the entry while we were waiting on the lock.
+ */
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- list_del(&sas_device->list);
- kfree(sas_device);
+ if (!list_empty(&sas_device->list)) {
+ list_del_init(&sas_device->list);
+ sas_device_put(sas_device);
+ }
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
@@ -620,6 +727,7 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
sas_device->handle, (unsigned long long)sas_device->sas_address));
spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device_get(sas_device);
list_add_tail(&sas_device->list, &ioc->sas_device_list);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -659,6 +767,7 @@ _scsih_sas_device_init_add(struct MPT2SAS_ADAPTER *ioc,
sas_device->handle, (unsigned long long)sas_device->sas_address));
spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device_get(sas_device);
list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
_scsih_determine_boot_device(ioc, sas_device, 0);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -1208,12 +1317,15 @@ _scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
goto not_sata;
if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
goto not_sata;
+
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
- sas_device_priv_data->sas_target->sas_address);
- if (sas_device && sas_device->device_info &
- MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
- max_depth = MPT2SAS_SATA_QUEUE_DEPTH;
+ sas_device = __mpt2sas_get_sdev_from_target(ioc, sas_target_priv_data);
+ if (sas_device) {
+ if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ max_depth = MPT2SAS_SATA_QUEUE_DEPTH;
+
+ sas_device_put(sas_device);
+ }
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
not_sata:
@@ -1271,18 +1383,20 @@ _scsih_target_alloc(struct scsi_target *starget)
/* sas/sata devices */
spin_lock_irqsave(&ioc->sas_device_lock, flags);
rphy = dev_to_rphy(starget->dev.parent);
- sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device = __mpt2sas_get_sdev_by_addr(ioc,
rphy->identify.sas_address);
if (sas_device) {
sas_target_priv_data->handle = sas_device->handle;
sas_target_priv_data->sas_address = sas_device->sas_address;
+ sas_target_priv_data->sdev = sas_device;
sas_device->starget = starget;
sas_device->id = starget->id;
sas_device->channel = starget->channel;
if (test_bit(sas_device->handle, ioc->pd_handles))
sas_target_priv_data->flags |=
MPT_TARGET_FLAGS_RAID_COMPONENT;
+
}
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -1324,13 +1438,21 @@ _scsih_target_destroy(struct scsi_target *starget)
spin_lock_irqsave(&ioc->sas_device_lock, flags);
rphy = dev_to_rphy(starget->dev.parent);
- sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
- rphy->identify.sas_address);
+ sas_device = __mpt2sas_get_sdev_from_target(ioc, sas_target_priv_data);
if (sas_device && (sas_device->starget == starget) &&
(sas_device->id == starget->id) &&
(sas_device->channel == starget->channel))
sas_device->starget = NULL;
+ if (sas_device) {
+ /*
+ * Corresponding get() is in _scsih_target_alloc()
+ */
+ sas_target_priv_data->sdev = NULL;
+ sas_device_put(sas_device);
+
+ sas_device_put(sas_device);
+ }
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
out:
@@ -1386,7 +1508,7 @@ _scsih_slave_alloc(struct scsi_device *sdev)
if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device = __mpt2sas_get_sdev_by_addr(ioc,
sas_target_priv_data->sas_address);
if (sas_device && (sas_device->starget == NULL)) {
sdev_printk(KERN_INFO, sdev,
@@ -1394,6 +1516,10 @@ _scsih_slave_alloc(struct scsi_device *sdev)
__func__, __LINE__);
sas_device->starget = starget;
}
+
+ if (sas_device)
+ sas_device_put(sas_device);
+
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
@@ -1428,10 +1554,13 @@ _scsih_slave_destroy(struct scsi_device *sdev)
if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
- sas_target_priv_data->sas_address);
+ sas_device = __mpt2sas_get_sdev_from_target(ioc,
+ sas_target_priv_data);
if (sas_device && !sas_target_priv_data->num_luns)
sas_device->starget = NULL;
+
+ if (sas_device)
+ sas_device_put(sas_device);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
@@ -2078,7 +2207,7 @@ _scsih_slave_configure(struct scsi_device *sdev)
}
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device = __mpt2sas_get_sdev_by_addr(ioc,
sas_device_priv_data->sas_target->sas_address);
if (!sas_device) {
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -2112,17 +2241,18 @@ _scsih_slave_configure(struct scsi_device *sdev)
(unsigned long long) sas_device->enclosure_logical_id,
sas_device->slot);
+ sas_device_put(sas_device);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (!ssp_target)
_scsih_display_sata_capabilities(ioc, handle, sdev);
-
_scsih_change_queue_depth(sdev, qdepth);
if (ssp_target) {
sas_read_port_mode_page(sdev);
_scsih_enable_tlr(ioc, sdev);
}
+
return 0;
}
@@ -2509,8 +2639,7 @@ _scsih_tm_display_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
device_str, (unsigned long long)priv_target->sas_address);
} else {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
- priv_target->sas_address);
+ sas_device = __mpt2sas_get_sdev_from_target(ioc, priv_target);
if (sas_device) {
if (priv_target->flags &
MPT_TARGET_FLAGS_RAID_COMPONENT) {
@@ -2529,6 +2658,8 @@ _scsih_tm_display_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
"enclosure_logical_id(0x%016llx), slot(%d)\n",
(unsigned long long)sas_device->enclosure_logical_id,
sas_device->slot);
+
+ sas_device_put(sas_device);
}
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
@@ -2604,12 +2735,12 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
{
struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT2SAS_DEVICE *sas_device_priv_data;
- struct _sas_device *sas_device;
- unsigned long flags;
+ struct _sas_device *sas_device = NULL;
u16 handle;
int r;
struct scsi_target *starget = scmd->device->sdev_target;
+ struct MPT2SAS_TARGET *target_priv_data = starget->hostdata;
starget_printk(KERN_INFO, starget, "attempting device reset! "
"scmd(%p)\n", scmd);
@@ -2629,12 +2760,10 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
handle = 0;
if (sas_device_priv_data->sas_target->flags &
MPT_TARGET_FLAGS_RAID_COMPONENT) {
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc,
- sas_device_priv_data->sas_target->handle);
+ sas_device = mpt2sas_get_sdev_from_target(ioc,
+ target_priv_data);
if (sas_device)
handle = sas_device->volume_handle;
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
} else
handle = sas_device_priv_data->sas_target->handle;
@@ -2651,6 +2780,10 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
out:
sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ if (sas_device)
+ sas_device_put(sas_device);
+
return r;
}
@@ -2665,11 +2798,11 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
{
struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT2SAS_DEVICE *sas_device_priv_data;
- struct _sas_device *sas_device;
- unsigned long flags;
+ struct _sas_device *sas_device = NULL;
u16 handle;
int r;
struct scsi_target *starget = scmd->device->sdev_target;
+ struct MPT2SAS_TARGET *target_priv_data = starget->hostdata;
starget_printk(KERN_INFO, starget, "attempting target reset! "
"scmd(%p)\n", scmd);
@@ -2689,12 +2822,10 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
handle = 0;
if (sas_device_priv_data->sas_target->flags &
MPT_TARGET_FLAGS_RAID_COMPONENT) {
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc,
- sas_device_priv_data->sas_target->handle);
+ sas_device = mpt2sas_get_sdev_from_target(ioc,
+ target_priv_data);
if (sas_device)
handle = sas_device->volume_handle;
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
} else
handle = sas_device_priv_data->sas_target->handle;
@@ -2711,6 +2842,10 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
out:
starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ if (sas_device)
+ sas_device_put(sas_device);
+
return r;
}
@@ -2768,36 +2903,39 @@ _scsih_fw_event_add(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
return;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ fw_event_work_get(fw_event);
list_add_tail(&fw_event->list, &ioc->fw_event_list);
INIT_DELAYED_WORK(&fw_event->delayed_work, _firmware_event_work);
+ fw_event_work_get(fw_event);
queue_delayed_work(ioc->firmware_event_thread,
&fw_event->delayed_work, 0);
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
/**
- * _scsih_fw_event_free - delete fw_event
+ * _scsih_fw_event_del_from_list - delete fw_event from the list
* @ioc: per adapter object
* @fw_event: object describing the event
* Context: This function will acquire ioc->fw_event_lock.
*
- * This removes firmware event object from link list, frees associated memory.
+ * If the fw_event is on the fw_event_list, remove it and do a put.
*
* Return nothing.
*/
static void
-_scsih_fw_event_free(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
+_scsih_fw_event_del_from_list(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
*fw_event)
{
unsigned long flags;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
- list_del(&fw_event->list);
- kfree(fw_event);
+ if (!list_empty(&fw_event->list)) {
+ list_del_init(&fw_event->list);
+ fw_event_work_put(fw_event);
+ }
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
-
/**
* _scsih_error_recovery_delete_devices - remove devices not responding
* @ioc: per adapter object
@@ -2812,13 +2950,14 @@ _scsih_error_recovery_delete_devices(struct MPT2SAS_ADAPTER *ioc)
if (ioc->is_driver_loading)
return;
- fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ fw_event = alloc_fw_event_work(0);
if (!fw_event)
return;
fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES;
fw_event->ioc = ioc;
_scsih_fw_event_add(ioc, fw_event);
+ fw_event_work_put(fw_event);
}
/**
@@ -2832,12 +2971,29 @@ mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc)
{
struct fw_event_work *fw_event;
- fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ fw_event = alloc_fw_event_work(0);
if (!fw_event)
return;
fw_event->event = MPT2SAS_PORT_ENABLE_COMPLETE;
fw_event->ioc = ioc;
_scsih_fw_event_add(ioc, fw_event);
+ fw_event_work_put(fw_event);
+}
+
+static struct fw_event_work *dequeue_next_fw_event(struct MPT2SAS_ADAPTER *ioc)
+{
+ unsigned long flags;
+ struct fw_event_work *fw_event = NULL;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ if (!list_empty(&ioc->fw_event_list)) {
+ fw_event = list_first_entry(&ioc->fw_event_list,
+ struct fw_event_work, list);
+ list_del_init(&fw_event->list);
+ }
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+
+ return fw_event;
}
/**
@@ -2852,17 +3008,25 @@ mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc)
static void
_scsih_fw_event_cleanup_queue(struct MPT2SAS_ADAPTER *ioc)
{
- struct fw_event_work *fw_event, *next;
+ struct fw_event_work *fw_event;
if (list_empty(&ioc->fw_event_list) ||
!ioc->firmware_event_thread || in_interrupt())
return;
- list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
- if (cancel_delayed_work_sync(&fw_event->delayed_work)) {
- _scsih_fw_event_free(ioc, fw_event);
- continue;
- }
+ while ((fw_event = dequeue_next_fw_event(ioc))) {
+ /*
+ * Wait on the fw_event to complete. If this returns 1, then
+ * the event was never executed, and we need a put for the
+ * reference the delayed_work had on the fw_event.
+ *
+ * If it did execute, we wait for it to finish, and the put will
+ * happen from _firmware_event_work()
+ */
+ if (cancel_delayed_work_sync(&fw_event->delayed_work))
+ fw_event_work_put(fw_event);
+
+ fw_event_work_put(fw_event);
}
}
@@ -3002,15 +3166,15 @@ _scsih_block_io_to_children_attached_to_ex(struct MPT2SAS_ADAPTER *ioc,
list_for_each_entry(mpt2sas_port,
&sas_expander->sas_port_list, port_list) {
- if (mpt2sas_port->remote_identify.device_type ==
- SAS_END_DEVICE) {
+ if (mpt2sas_port->remote_identify.device_type == SAS_END_DEVICE) {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device =
- mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
- mpt2sas_port->remote_identify.sas_address);
- if (sas_device)
+ sas_device = __mpt2sas_get_sdev_by_addr(ioc,
+ mpt2sas_port->remote_identify.sas_address);
+ if (sas_device) {
set_bit(sas_device->handle,
- ioc->blocking_handles);
+ ioc->blocking_handles);
+ sas_device_put(sas_device);
+ }
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
}
@@ -3080,7 +3244,7 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
u16 smid;
- struct _sas_device *sas_device;
+ struct _sas_device *sas_device = NULL;
struct MPT2SAS_TARGET *sas_target_priv_data = NULL;
u64 sas_address = 0;
unsigned long flags;
@@ -3110,7 +3274,7 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
return;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle);
if (sas_device && sas_device->starget &&
sas_device->starget->hostdata) {
sas_target_priv_data = sas_device->starget->hostdata;
@@ -3131,14 +3295,14 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
if (!smid) {
delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
if (!delayed_tr)
- return;
+ goto out;
INIT_LIST_HEAD(&delayed_tr->list);
delayed_tr->handle = handle;
list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
"DELAYED:tr:handle(0x%04x), (open)\n",
ioc->name, handle));
- return;
+ goto out;
}
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "tr_send:handle(0x%04x), "
@@ -3150,6 +3314,9 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
mpt2sas_base_put_smid_hi_priority(ioc, smid);
+out:
+ if (sas_device)
+ sas_device_put(sas_device);
}
@@ -4068,7 +4235,6 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
char *desc_scsi_state = ioc->tmp_string;
u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
struct _sas_device *sas_device = NULL;
- unsigned long flags;
struct scsi_target *starget = scmd->device->sdev_target;
struct MPT2SAS_TARGET *priv_target = starget->hostdata;
char *device_str = NULL;
@@ -4200,9 +4366,7 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
printk(MPT2SAS_WARN_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
device_str, (unsigned long long)priv_target->sas_address);
} else {
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
- priv_target->sas_address);
+ sas_device = mpt2sas_get_sdev_from_target(ioc, priv_target);
if (sas_device) {
printk(MPT2SAS_WARN_FMT "\tsas_address(0x%016llx), "
"phy(%d)\n", ioc->name, sas_device->sas_address,
@@ -4211,8 +4375,9 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
"\tenclosure_logical_id(0x%016llx), slot(%d)\n",
ioc->name, sas_device->enclosure_logical_id,
sas_device->slot);
+
+ sas_device_put(sas_device);
}
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
printk(MPT2SAS_WARN_FMT "\thandle(0x%04x), ioc_status(%s)(0x%04x), "
@@ -4259,7 +4424,7 @@ _scsih_turn_on_pfa_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
Mpi2SepRequest_t mpi_request;
struct _sas_device *sas_device;
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ sas_device = mpt2sas_get_sdev_by_handle(ioc, handle);
if (!sas_device)
return;
@@ -4274,7 +4439,7 @@ _scsih_turn_on_pfa_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
&mpi_request)) != 0) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
__FILE__, __LINE__, __func__);
- return;
+ goto out;
}
sas_device->pfa_led_on = 1;
@@ -4284,8 +4449,10 @@ _scsih_turn_on_pfa_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
"enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
le32_to_cpu(mpi_reply.IOCLogInfo)));
- return;
+ goto out;
}
+out:
+ sas_device_put(sas_device);
}
/**
@@ -4340,13 +4507,14 @@ _scsih_send_event_to_turn_on_pfa_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
{
struct fw_event_work *fw_event;
- fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ fw_event = alloc_fw_event_work(0);
if (!fw_event)
return;
fw_event->event = MPT2SAS_TURN_ON_PFA_LED;
fw_event->device_handle = handle;
fw_event->ioc = ioc;
_scsih_fw_event_add(ioc, fw_event);
+ fw_event_work_put(fw_event);
}
/**
@@ -4370,19 +4538,17 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
/* only handle non-raid devices */
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle);
if (!sas_device) {
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return;
+ goto out_unlock;
}
starget = sas_device->starget;
sas_target_priv_data = starget->hostdata;
if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
- ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) {
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return;
- }
+ ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
+ goto out_unlock;
+
starget_printk(KERN_WARNING, starget, "predicted fault\n");
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -4396,7 +4562,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
if (!event_reply) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
- return;
+ goto out;
}
event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
@@ -4413,6 +4579,14 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
mpt2sas_ctl_add_to_event_log(ioc, event_reply);
kfree(event_reply);
+out:
+ if (sas_device)
+ sas_device_put(sas_device);
+ return;
+
+out_unlock:
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ goto out;
}
/**
@@ -5148,14 +5322,13 @@ _scsih_check_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
- sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device = __mpt2sas_get_sdev_by_addr(ioc,
sas_address);
if (!sas_device) {
printk(MPT2SAS_ERR_FMT "device is not present "
"handle(0x%04x), no sas_device!!!\n", ioc->name, handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return;
+ goto out_unlock;
}
if (unlikely(sas_device->handle != handle)) {
@@ -5172,19 +5345,24 @@ _scsih_check_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
printk(MPT2SAS_ERR_FMT "device is not present "
"handle(0x%04x), flags!!!\n", ioc->name, handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return;
+ goto out_unlock;
}
/* check if there were any issues with discovery */
if (_scsih_check_access_status(ioc, sas_address, handle,
- sas_device_pg0.AccessStatus)) {
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return;
- }
+ sas_device_pg0.AccessStatus))
+ goto out_unlock;
+
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
_scsih_ublock_io_device(ioc, sas_address);
+ if (sas_device)
+ sas_device_put(sas_device);
+ return;
+out_unlock:
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ sas_device_put(sas_device);
}
/**
@@ -5208,7 +5386,6 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
u32 ioc_status;
__le64 sas_address;
u32 device_info;
- unsigned long flags;
if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
@@ -5250,14 +5427,13 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
return -1;
}
-
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device = mpt2sas_get_sdev_by_addr(ioc,
sas_address);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (sas_device)
+ if (sas_device) {
+ sas_device_put(sas_device);
return 0;
+ }
sas_device = kzalloc(sizeof(struct _sas_device),
GFP_KERNEL);
@@ -5267,6 +5443,7 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
return -1;
}
+ kref_init(&sas_device->refcount);
sas_device->handle = handle;
if (_scsih_get_sas_address(ioc, le16_to_cpu
(sas_device_pg0.ParentDevHandle),
@@ -5296,6 +5473,7 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
else
_scsih_sas_device_add(ioc, sas_device);
+ sas_device_put(sas_device);
return 0;
}
@@ -5344,7 +5522,6 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc,
"handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
sas_device->handle, (unsigned long long)
sas_device->sas_address));
- kfree(sas_device);
}
/**
* _scsih_device_remove_by_handle - removing device object by handle
@@ -5363,12 +5540,17 @@ _scsih_device_remove_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
return;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
- if (sas_device)
- list_del(&sas_device->list);
+ sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle);
+ if (sas_device) {
+ list_del_init(&sas_device->list);
+ sas_device_put(sas_device);
+ }
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (sas_device)
+
+ if (sas_device) {
_scsih_remove_device(ioc, sas_device);
+ sas_device_put(sas_device);
+ }
}
/**
@@ -5389,13 +5571,17 @@ mpt2sas_device_remove_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
return;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
- sas_address);
- if (sas_device)
- list_del(&sas_device->list);
+ sas_device = __mpt2sas_get_sdev_by_addr(ioc, sas_address);
+ if (sas_device) {
+ list_del_init(&sas_device->list);
+ sas_device_put(sas_device);
+ }
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (sas_device)
+
+ if (sas_device) {
_scsih_remove_device(ioc, sas_device);
+ sas_device_put(sas_device);
+ }
}
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
/**
@@ -5716,26 +5902,28 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_address = le64_to_cpu(event_data->SASAddress);
- sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device = __mpt2sas_get_sdev_by_addr(ioc,
sas_address);
- if (!sas_device || !sas_device->starget) {
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return;
- }
+ if (!sas_device || !sas_device->starget)
+ goto out;
target_priv_data = sas_device->starget->hostdata;
- if (!target_priv_data) {
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return;
- }
+ if (!target_priv_data)
+ goto out;
if (event_data->ReasonCode ==
MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
target_priv_data->tm_busy = 1;
else
target_priv_data->tm_busy = 0;
+
+out:
+ if (sas_device)
+ sas_device_put(sas_device);
+
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
}
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
@@ -6123,7 +6311,7 @@ _scsih_sas_pd_expose(struct MPT2SAS_ADAPTER *ioc,
u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle);
if (sas_device) {
sas_device->volume_handle = 0;
sas_device->volume_wwid = 0;
@@ -6142,6 +6330,8 @@ _scsih_sas_pd_expose(struct MPT2SAS_ADAPTER *ioc,
/* exposing raid component */
if (starget)
starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
+
+ sas_device_put(sas_device);
}
/**
@@ -6170,7 +6360,7 @@ _scsih_sas_pd_hide(struct MPT2SAS_ADAPTER *ioc,
&volume_wwid);
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle);
if (sas_device) {
set_bit(handle, ioc->pd_handles);
if (sas_device->starget && sas_device->starget->hostdata) {
@@ -6189,6 +6379,8 @@ _scsih_sas_pd_hide(struct MPT2SAS_ADAPTER *ioc,
/* hiding raid component */
if (starget)
starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
+
+ sas_device_put(sas_device);
}
/**
@@ -6221,7 +6413,6 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc,
Mpi2EventIrConfigElement_t *element)
{
struct _sas_device *sas_device;
- unsigned long flags;
u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
Mpi2ConfigReply_t mpi_reply;
Mpi2SasDevicePage0_t sas_device_pg0;
@@ -6231,11 +6422,11 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc,
set_bit(handle, ioc->pd_handles);
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (sas_device)
+ sas_device = mpt2sas_get_sdev_by_handle(ioc, handle);
+ if (sas_device) {
+ sas_device_put(sas_device);
return;
+ }
if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
@@ -6509,7 +6700,6 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
u16 handle, parent_handle;
u32 state;
struct _sas_device *sas_device;
- unsigned long flags;
Mpi2ConfigReply_t mpi_reply;
Mpi2SasDevicePage0_t sas_device_pg0;
u32 ioc_status;
@@ -6542,12 +6732,11 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
if (!ioc->is_warpdrive)
set_bit(handle, ioc->pd_handles);
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-
- if (sas_device)
+ sas_device = mpt2sas_get_sdev_by_handle(ioc, handle);
+ if (sas_device) {
+ sas_device_put(sas_device);
return;
+ }
if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
&sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
@@ -7015,6 +7204,7 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
struct _raid_device *raid_device, *raid_device_next;
struct list_head tmp_list;
unsigned long flags;
+ LIST_HEAD(head);
printk(MPT2SAS_INFO_FMT "removing unresponding devices: start\n",
ioc->name);
@@ -7022,14 +7212,29 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
/* removing unresponding end devices */
printk(MPT2SAS_INFO_FMT "removing unresponding devices: end-devices\n",
ioc->name);
+
+ /*
+ * Iterate, pulling off devices marked as non-responding. We become the
+ * owner for the reference the list had on any object we prune.
+ */
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
list_for_each_entry_safe(sas_device, sas_device_next,
- &ioc->sas_device_list, list) {
+ &ioc->sas_device_list, list) {
if (!sas_device->responding)
- mpt2sas_device_remove_by_sas_address(ioc,
- sas_device->sas_address);
+ list_move_tail(&sas_device->list, &head);
else
sas_device->responding = 0;
}
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ /*
+ * Now, uninitialize and remove the unresponding devices we pruned.
+ */
+ list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
+ _scsih_remove_device(ioc, sas_device);
+ list_del_init(&sas_device->list);
+ sas_device_put(sas_device);
+ }
/* removing unresponding volumes */
if (ioc->ir_firmware) {
@@ -7179,11 +7384,11 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
}
phys_disk_num = pd_pg0.PhysDiskNum;
handle = le16_to_cpu(pd_pg0.DevHandle);
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (sas_device)
+ sas_device = mpt2sas_get_sdev_by_handle(ioc, handle);
+ if (sas_device) {
+ sas_device_put(sas_device);
continue;
+ }
if (mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
&sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
handle) != 0)
@@ -7302,12 +7507,12 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
if (!(_scsih_is_end_device(
le32_to_cpu(sas_device_pg0.DeviceInfo))))
continue;
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device = mpt2sas_get_sdev_by_addr(ioc,
le64_to_cpu(sas_device_pg0.SASAddress));
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (sas_device)
+ if (sas_device) {
+ sas_device_put(sas_device);
continue;
+ }
parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
printk(MPT2SAS_INFO_FMT "\tBEFORE adding end device: "
@@ -7410,17 +7615,27 @@ _firmware_event_work(struct work_struct *work)
struct fw_event_work, delayed_work.work);
struct MPT2SAS_ADAPTER *ioc = fw_event->ioc;
+ _scsih_fw_event_del_from_list(ioc, fw_event);
+
/* the queue is being flushed so ignore this event */
- if (ioc->remove_host ||
- ioc->pci_error_recovery) {
- _scsih_fw_event_free(ioc, fw_event);
+ if (ioc->remove_host || ioc->pci_error_recovery) {
+ fw_event_work_put(fw_event);
return;
}
switch (fw_event->event) {
case MPT2SAS_REMOVE_UNRESPONDING_DEVICES:
- while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery)
+ while (scsi_host_in_recovery(ioc->shost) ||
+ ioc->shost_recovery) {
+ /*
+ * If we're unloading, bail. Otherwise, this can become
+ * an infinite loop.
+ */
+ if (ioc->remove_host)
+ goto out;
+
ssleep(1);
+ }
_scsih_remove_unresponding_sas_devices(ioc);
_scsih_scan_for_devices_after_reset(ioc);
break;
@@ -7469,7 +7684,8 @@ _firmware_event_work(struct work_struct *work)
_scsih_sas_ir_operation_status_event(ioc, fw_event);
break;
}
- _scsih_fw_event_free(ioc, fw_event);
+out:
+ fw_event_work_put(fw_event);
}
/**
@@ -7607,7 +7823,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
}
sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
- fw_event = kzalloc(sizeof(*fw_event) + sz, GFP_ATOMIC);
+ fw_event = alloc_fw_event_work(sz);
if (!fw_event) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
@@ -7620,6 +7836,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
fw_event->VP_ID = mpi_reply->VP_ID;
fw_event->event = event;
_scsih_fw_event_add(ioc, fw_event);
+ fw_event_work_put(fw_event);
return;
}
@@ -7867,7 +8084,9 @@ _scsih_remove(struct pci_dev *pdev)
sas_remove_host(shost);
scsi_remove_host(shost);
mpt2sas_base_detach(ioc);
+ spin_lock(&gioc_lock);
list_del(&ioc->list);
+ spin_unlock(&gioc_lock);
scsi_host_put(shost);
}
@@ -7966,6 +8185,48 @@ _scsih_probe_raid(struct MPT2SAS_ADAPTER *ioc)
}
}
+static struct _sas_device *get_next_sas_device(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct _sas_device *sas_device = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ if (!list_empty(&ioc->sas_device_init_list)) {
+ sas_device = list_first_entry(&ioc->sas_device_init_list,
+ struct _sas_device, list);
+ sas_device_get(sas_device);
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ return sas_device;
+}
+
+static void sas_device_make_active(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+
+ /*
+ * Since we dropped the lock during the call to port_add(), we need to
+ * be careful here that somebody else didn't move or delete this item
+ * while we were busy with other things.
+ *
+ * If it was on the list, we need a put() for the reference the list
+ * had. Either way, we need a get() for the destination list.
+ */
+ if (!list_empty(&sas_device->list)) {
+ list_del_init(&sas_device->list);
+ sas_device_put(sas_device);
+ }
+
+ sas_device_get(sas_device);
+ list_add_tail(&sas_device->list, &ioc->sas_device_list);
+
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
/**
* _scsih_probe_sas - reporting sas devices to sas transport
* @ioc: per adapter object
@@ -7975,34 +8236,30 @@ _scsih_probe_raid(struct MPT2SAS_ADAPTER *ioc)
static void
_scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
{
- struct _sas_device *sas_device, *next;
- unsigned long flags;
-
- /* SAS Device List */
- list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list,
- list) {
+ struct _sas_device *sas_device;
- if (ioc->hide_drives)
- continue;
+ if (ioc->hide_drives)
+ return;
+ while ((sas_device = get_next_sas_device(ioc))) {
if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
- sas_device->sas_address_parent)) {
- list_del(&sas_device->list);
- kfree(sas_device);
+ sas_device->sas_address_parent)) {
+ _scsih_sas_device_remove(ioc, sas_device);
+ sas_device_put(sas_device);
continue;
} else if (!sas_device->starget) {
if (!ioc->is_driver_loading) {
mpt2sas_transport_port_remove(ioc,
- sas_device->sas_address,
- sas_device->sas_address_parent);
- list_del(&sas_device->list);
- kfree(sas_device);
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+ _scsih_sas_device_remove(ioc, sas_device);
+ sas_device_put(sas_device);
continue;
}
}
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- list_move_tail(&sas_device->list, &ioc->sas_device_list);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ sas_device_make_active(ioc, sas_device);
+ sas_device_put(sas_device);
}
}
@@ -8142,7 +8399,9 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc = shost_priv(shost);
memset(ioc, 0, sizeof(struct MPT2SAS_ADAPTER));
INIT_LIST_HEAD(&ioc->list);
+ spin_lock(&gioc_lock);
list_add_tail(&ioc->list, &mpt2sas_ioc_list);
+ spin_unlock(&gioc_lock);
ioc->shost = shost;
ioc->id = mpt_ids++;
sprintf(ioc->name, "%s%d", MPT2SAS_DRIVER_NAME, ioc->id);
@@ -8167,6 +8426,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
/* misc semaphores and spin locks */
mutex_init(&ioc->reset_in_progress_mutex);
+ /* initializing pci_access_mutex lock */
+ mutex_init(&ioc->pci_access_mutex);
spin_lock_init(&ioc->ioc_reset_in_progress_lock);
spin_lock_init(&ioc->scsi_lookup_lock);
spin_lock_init(&ioc->sas_device_lock);
@@ -8269,7 +8530,9 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
out_attach_fail:
destroy_workqueue(ioc->firmware_event_thread);
out_thread_fail:
+ spin_lock(&gioc_lock);
list_del(&ioc->list);
+ spin_unlock(&gioc_lock);
scsi_host_put(shost);
return rv;
}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index ff2500ab9ba4..af868009395d 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -1323,15 +1323,17 @@ _transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
int rc;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device = __mpt2sas_get_sdev_by_addr(ioc,
rphy->identify.sas_address);
if (sas_device) {
*identifier = sas_device->enclosure_logical_id;
rc = 0;
+ sas_device_put(sas_device);
} else {
*identifier = 0;
rc = -ENXIO;
}
+
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
return rc;
}
@@ -1351,12 +1353,14 @@ _transport_get_bay_identifier(struct sas_rphy *rphy)
int rc;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device = __mpt2sas_get_sdev_by_addr(ioc,
rphy->identify.sas_address);
- if (sas_device)
+ if (sas_device) {
rc = sas_device->slot;
- else
+ sas_device_put(sas_device);
+ } else {
rc = -ENXIO;
+ }
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
return rc;
}
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index c34c1157907b..ec27ad2d186f 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.31
+ * mpi2.h Version: 02.00.35
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -88,6 +88,10 @@
* Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET.
* 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT.
* 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 01-08-14 02.00.34 Bumped MPI2_HEADER_VERSION_UNIT
+ * 06-13-14 02.00.35 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -121,7 +125,7 @@
#define MPI2_VERSION_02_05 (0x0205)
/*Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x1F)
+#define MPI2_HEADER_VERSION_UNIT (0x23)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index e261a3153bb3..581fdb375db5 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.26
+ * mpi2_cnfg.h Version: 02.00.29
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -165,6 +165,20 @@
* match the specification.
* 08-19-13 02.00.26 Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for
* future use.
+ * 12-05-13 02.00.27 Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for
+ * MPI2_CONFIG_PAGE_MAN_7.
+ * Added EnclosureLevel and ConnectorName fields to
+ * MPI2_CONFIG_PAGE_SAS_DEV_0.
+ * Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for
+ * MPI2_CONFIG_PAGE_SAS_DEV_0.
+ * Added EnclosureLevel field to
+ * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ * Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for
+ * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ * 01-08-14 02.00.28 Added more defines for the BiosOptions field of
+ * MPI2_CONFIG_PAGE_BIOS_1.
+ * 06-13-14 02.00.29 Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and
+ * more defines for the BiosOptions field..
* --------------------------------------------------------------------------
*/
@@ -724,6 +738,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7 {
#define MPI2_MANUFACTURING7_PAGEVERSION (0x01)
/*defines for the Flags field */
+#define MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL (0x00000008)
#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002)
#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
@@ -1311,7 +1326,9 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 {
MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
U32 BiosOptions; /*0x04 */
U32 IOCSettings; /*0x08 */
- U32 Reserved1; /*0x0C */
+ U8 SSUTimeout; /*0x0C */
+ U8 Reserved1; /*0x0D */
+ U16 Reserved2; /*0x0E */
U32 DeviceSettings; /*0x10 */
U16 NumberOfDevices; /*0x14 */
U16 UEFIVersion; /*0x16 */
@@ -1323,9 +1340,24 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 {
*PTR_MPI2_CONFIG_PAGE_BIOS_1,
Mpi2BiosPage1_t, *pMpi2BiosPage1_t;
-#define MPI2_BIOSPAGE1_PAGEVERSION (0x05)
+#define MPI2_BIOSPAGE1_PAGEVERSION (0x07)
/*values for BIOS Page 1 BiosOptions field */
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL (0x00000000)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE (0x00000800)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_LWWID (0x00001000)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_PSENS (0x00001800)
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_ESPHY (0x00002000)
+
+#define MPI2_BIOSPAGE1_OPTIONS_X86_DISABLE_BIOS (0x00000400)
+
+#define MPI2_BIOSPAGE1_OPTIONS_MASK_REGISTRATION_UEFI_BSD (0x00000300)
+#define MPI2_BIOSPAGE1_OPTIONS_USE_BIT0_REGISTRATION_UEFI_BSD (0x00000000)
+#define MPI2_BIOSPAGE1_OPTIONS_FULL_REGISTRATION_UEFI_BSD (0x00000100)
+#define MPI2_BIOSPAGE1_OPTIONS_ADAPTER_REGISTRATION_UEFI_BSD (0x00000200)
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_REGISTRATION_UEFI_BSD (0x00000300)
+
#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0)
#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000)
@@ -2633,9 +2665,9 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 {
U8
ControlGroup; /*0x2E */
U8
- Reserved1; /*0x2F */
+ EnclosureLevel; /*0x2F */
U32
- Reserved2; /*0x30 */
+ ConnectorName[4]; /*0x30 */
U32
Reserved3; /*0x34 */
} MPI2_CONFIG_PAGE_SAS_DEV_0,
@@ -2643,7 +2675,7 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 {
Mpi2SasDevicePage0_t,
*pMpi2SasDevicePage0_t;
-#define MPI2_SASDEVICE0_PAGEVERSION (0x08)
+#define MPI2_SASDEVICE0_PAGEVERSION (0x09)
/*values for SAS Device Page 0 AccessStatus field */
#define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00)
@@ -2683,6 +2715,7 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 {
#define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020)
#define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010)
#define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008)
+#define MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002)
#define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
@@ -3019,8 +3052,10 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
NumSlots; /*0x18 */
U16
StartSlot; /*0x1A */
- U16
+ U8
Reserved2; /*0x1C */
+ U8
+ EnclosureLevel; /*0x1D */
U16
SEPDevHandle; /*0x1E */
U32
@@ -3031,9 +3066,10 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
*PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t;
-#define MPI2_SASENCLOSURE0_PAGEVERSION (0x03)
+#define MPI2_SASENCLOSURE0_PAGEVERSION (0x04)
/*values for SAS Enclosure Page 0 Flags field */
+#define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010)
#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F)
#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index 490830957806..d7598cc4bb8e 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
- * mpi2_ioc.h Version: 02.00.23
+ * mpi2_ioc.h Version: 02.00.24
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -132,6 +132,7 @@
* Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE.
* Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY.
* Added Encrypted Hash Extended Image.
+ * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS.
* --------------------------------------------------------------------------
*/
@@ -1598,6 +1599,7 @@ Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t;
/* values for HashImageType */
#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00)
#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01)
+#define MPI25_HASH_IMAGE_TYPE_BIOS (0x02)
/* values for HashAlgorithm */
#define MPI25_HASH_ALGORITHM_UNUSED (0x00)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
index 904910d8a737..1629e5bce7e1 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
@@ -6,7 +6,7 @@
* Title: MPI diagnostic tool structures and definitions
* Creation Date: March 26, 2007
*
- * mpi2_tool.h Version: 02.00.11
+ * mpi2_tool.h Version: 02.00.12
*
* Version History
* ---------------
@@ -33,6 +33,7 @@
* 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that
* it uses MPI Chain SGE as well as MPI Simple SGE.
* 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
+ * 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC.
* --------------------------------------------------------------------------
*/
@@ -100,6 +101,7 @@ typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST {
#define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000)
#define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000)
#define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000)
+#define MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC (0x04000000)
#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000)
#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000)
#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 14a781b6b88d..d4f1dcdb8361 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -83,10 +83,10 @@ static int msix_disable = -1;
module_param(msix_disable, int, 0);
MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
-static int max_msix_vectors = 8;
+static int max_msix_vectors = -1;
module_param(max_msix_vectors, int, 0);
MODULE_PARM_DESC(max_msix_vectors,
- " max msix vectors - (default=8)");
+ " max msix vectors");
static int mpt3sas_fwfault_debug;
MODULE_PARM_DESC(mpt3sas_fwfault_debug,
@@ -1009,8 +1009,30 @@ _base_interrupt(int irq, void *bus_id)
}
wmb();
- writel(reply_q->reply_post_host_index | (msix_index <<
- MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex);
+
+ /* Update Reply Post Host Index.
+ * For those HBA's which support combined reply queue feature
+ * 1. Get the correct Supplemental Reply Post Host Index Register.
+ * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
+ * Index Register address bank i.e replyPostRegisterIndex[],
+ * 2. Then update this register with new reply host index value
+ * in ReplyPostIndex field and the MSIxIndex field with
+ * msix_index value reduced to a value between 0 and 7,
+ * using a modulo 8 operation. Since each Supplemental Reply Post
+ * Host Index Register supports 8 MSI-X vectors.
+ *
+ * For other HBA's just update the Reply Post Host Index register with
+ * new reply host index value in ReplyPostIndex Field and msix_index
+ * value in MSIxIndex field.
+ */
+ if (ioc->msix96_vector)
+ writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
+ MPI2_RPHI_MSIX_INDEX_SHIFT),
+ ioc->replyPostRegisterIndex[msix_index/8]);
+ else
+ writel(reply_q->reply_post_host_index | (msix_index <<
+ MPI2_RPHI_MSIX_INDEX_SHIFT),
+ &ioc->chip->ReplyPostHostIndex);
atomic_dec(&reply_q->busy);
return IRQ_HANDLED;
}
@@ -1338,7 +1360,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
sg_scmd = scsi_sglist(scmd);
sges_left = scsi_dma_map(scmd);
- if (!sges_left) {
+ if (sges_left < 0) {
sdev_printk(KERN_ERR, scmd->device,
"pci_map_sg failed: request for %d bytes!\n",
scsi_bufflen(scmd));
@@ -1407,7 +1429,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
fill_in_last_segment:
/* fill the last segment */
- while (sges_left) {
+ while (sges_left > 0) {
if (sges_left == 1)
_base_add_sg_single_ieee(sg_local,
simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
@@ -1560,8 +1582,6 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
pci_read_config_word(ioc->pdev, base + 2, &message_control);
ioc->msix_vector_count = (message_control & 0x3FF) + 1;
- if (ioc->msix_vector_count > 8)
- ioc->msix_vector_count = 8;
dinitprintk(ioc, pr_info(MPT3SAS_FMT
"msix is supported, vector_count(%d)\n",
ioc->name, ioc->msix_vector_count));
@@ -1793,6 +1813,36 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * mpt3sas_base_unmap_resources - free controller resources
+ * @ioc: per adapter object
+ */
+void
+mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct pci_dev *pdev = ioc->pdev;
+
+ dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n",
+ ioc->name, __func__));
+
+ _base_free_irq(ioc);
+ _base_disable_msix(ioc);
+
+ if (ioc->msix96_vector)
+ kfree(ioc->replyPostRegisterIndex);
+
+ if (ioc->chip_phys) {
+ iounmap(ioc->chip);
+ ioc->chip_phys = 0;
+ }
+
+ if (pci_is_enabled(pdev)) {
+ pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ }
+}
+
+/**
* mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
* @ioc: per adapter object
*
@@ -1843,7 +1893,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
goto out_fail;
}
- for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
+ for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
+ (!memap_sz || !pio_sz); i++) {
if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
if (pio_sz)
continue;
@@ -1856,15 +1907,16 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
chip_phys = (u64)ioc->chip_phys;
memap_sz = pci_resource_len(pdev, i);
ioc->chip = ioremap(ioc->chip_phys, memap_sz);
- if (ioc->chip == NULL) {
- pr_err(MPT3SAS_FMT "unable to map adapter memory!\n",
- ioc->name);
- r = -EINVAL;
- goto out_fail;
- }
}
}
+ if (ioc->chip == NULL) {
+ pr_err(MPT3SAS_FMT "unable to map adapter memory! "
+ " or resource not found\n", ioc->name);
+ r = -EINVAL;
+ goto out_fail;
+ }
+
_base_mask_interrupts(ioc);
r = _base_get_ioc_facts(ioc, CAN_SLEEP);
@@ -1880,6 +1932,36 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
if (r)
goto out_fail;
+ /* Use the Combined reply queue feature only for SAS3 C0 & higher
+ * revision HBAs and also only when reply queue count is greater than 8
+ */
+ if (ioc->msix96_vector && ioc->reply_queue_count > 8) {
+ /* Determine the Supplemental Reply Post Host Index Registers
+ * Addresse. Supplemental Reply Post Host Index Registers
+ * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
+ * each register is at offset bytes of
+ * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
+ */
+ ioc->replyPostRegisterIndex = kcalloc(
+ MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT,
+ sizeof(resource_size_t *), GFP_KERNEL);
+ if (!ioc->replyPostRegisterIndex) {
+ dfailprintk(ioc, printk(MPT3SAS_FMT
+ "allocation for reply Post Register Index failed!!!\n",
+ ioc->name));
+ r = -ENOMEM;
+ goto out_fail;
+ }
+
+ for (i = 0; i < MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT; i++) {
+ ioc->replyPostRegisterIndex[i] = (resource_size_t *)
+ ((u8 *)&ioc->chip->Doorbell +
+ MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
+ (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
+ }
+ } else
+ ioc->msix96_vector = 0;
+
list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
@@ -1895,12 +1977,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
return 0;
out_fail:
- if (ioc->chip_phys)
- iounmap(ioc->chip);
- ioc->chip_phys = 0;
- pci_release_selected_regions(ioc->pdev, ioc->bars);
- pci_disable_pcie_error_reporting(pdev);
- pci_disable_device(pdev);
+ mpt3sas_base_unmap_resources(ioc);
return r;
}
@@ -2290,6 +2367,99 @@ _base_display_intel_branding(struct MPT3SAS_ADAPTER *ioc)
/**
+ * _base_display_dell_branding - Display branding string
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_dell_branding(struct MPT3SAS_ADAPTER *ioc)
+{
+ if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_DELL)
+ return;
+
+ switch (ioc->pdev->device) {
+ case MPI25_MFGPAGE_DEVID_SAS3008:
+ switch (ioc->pdev->subsystem_device) {
+ case MPT3SAS_DELL_12G_HBA_SSDID:
+ pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ MPT3SAS_DELL_12G_HBA_BRANDING);
+ break;
+ default:
+ pr_info(MPT3SAS_FMT
+ "Dell 12Gbps HBA: Subsystem ID: 0x%X\n", ioc->name,
+ ioc->pdev->subsystem_device);
+ break;
+ }
+ break;
+ default:
+ pr_info(MPT3SAS_FMT
+ "Dell 12Gbps HBA: Subsystem ID: 0x%X\n", ioc->name,
+ ioc->pdev->subsystem_device);
+ break;
+ }
+}
+
+/**
+ * _base_display_cisco_branding - Display branding string
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_cisco_branding(struct MPT3SAS_ADAPTER *ioc)
+{
+ if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_CISCO)
+ return;
+
+ switch (ioc->pdev->device) {
+ case MPI25_MFGPAGE_DEVID_SAS3008:
+ switch (ioc->pdev->subsystem_device) {
+ case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
+ pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
+ break;
+ case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
+ pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
+ break;
+ case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
+ pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
+ break;
+ default:
+ pr_info(MPT3SAS_FMT
+ "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->name, ioc->pdev->subsystem_device);
+ break;
+ }
+ break;
+ case MPI25_MFGPAGE_DEVID_SAS3108_1:
+ switch (ioc->pdev->subsystem_device) {
+ case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
+ pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
+ break;
+ case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
+ pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
+ break;
+ default:
+ pr_info(MPT3SAS_FMT
+ "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->name, ioc->pdev->subsystem_device);
+ break;
+ }
+ break;
+ default:
+ pr_info(MPT3SAS_FMT
+ "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->name, ioc->pdev->subsystem_device);
+ break;
+ }
+}
+
+/**
* _base_display_ioc_capabilities - Disply IOC's capabilities.
* @ioc: per adapter object
*
@@ -2319,6 +2489,8 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
bios_version & 0x000000FF);
_base_display_intel_branding(ioc);
+ _base_display_dell_branding(ioc);
+ _base_display_cisco_branding(ioc);
pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
@@ -3137,6 +3309,9 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
* Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
*/
static int
+_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
+
+static int
_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
int sleep_flag)
{
@@ -3679,6 +3854,64 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
}
/**
+ * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
+ * @ioc: per adapter object
+ * @timeout:
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout,
+ int sleep_flag)
+{
+ u32 ioc_state;
+ int rc;
+
+ dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->pci_error_recovery) {
+ dfailprintk(ioc, printk(MPT3SAS_FMT
+ "%s: host in pci error recovery\n", ioc->name, __func__));
+ return -EFAULT;
+ }
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
+ ioc->name, __func__, ioc_state));
+
+ if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
+ (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
+ return 0;
+
+ if (ioc_state & MPI2_DOORBELL_USED) {
+ dhsprintk(ioc, printk(MPT3SAS_FMT
+ "unexpected doorbell active!\n", ioc->name));
+ goto issue_diag_reset;
+ }
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ mpt3sas_base_fault_info(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ goto issue_diag_reset;
+ }
+
+ ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
+ timeout, sleep_flag);
+ if (ioc_state) {
+ dfailprintk(ioc, printk(MPT3SAS_FMT
+ "%s: failed going to ready state (ioc_state=0x%x)\n",
+ ioc->name, __func__, ioc_state));
+ return -EFAULT;
+ }
+
+ issue_diag_reset:
+ rc = _base_diag_reset(ioc, sleep_flag);
+ return rc;
+}
+
+/**
* _base_get_ioc_facts - obtain ioc facts reply and save in ioc
* @ioc: per adapter object
* @sleep_flag: CAN_SLEEP or NO_SLEEP
@@ -3696,6 +3929,13 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
__func__));
+ r = _base_wait_for_iocstate(ioc, 10, sleep_flag);
+ if (r) {
+ dfailprintk(ioc, printk(MPT3SAS_FMT
+ "%s: failed getting to correct state\n",
+ ioc->name, __func__));
+ return r;
+ }
mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
memset(&mpi_request, 0, mpi_request_sz);
@@ -3781,7 +4021,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
mpi_request.VF_ID = 0; /* TODO */
mpi_request.VP_ID = 0;
- mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
+ mpi_request.MsgVersion = cpu_to_le16(MPI25_VERSION);
mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
if (_base_is_controller_msix_enabled(ioc))
@@ -4522,8 +4762,15 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
/* initialize reply post host index */
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
- writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT,
- &ioc->chip->ReplyPostHostIndex);
+ if (ioc->msix96_vector)
+ writel((reply_q->msix_index & 7)<<
+ MPI2_RPHI_MSIX_INDEX_SHIFT,
+ ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
+ else
+ writel(reply_q->msix_index <<
+ MPI2_RPHI_MSIX_INDEX_SHIFT,
+ &ioc->chip->ReplyPostHostIndex);
+
if (!_base_is_controller_msix_enabled(ioc))
goto skip_init_reply_post_host_index;
}
@@ -4562,8 +4809,6 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
void
mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
{
- struct pci_dev *pdev = ioc->pdev;
-
dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
__func__));
@@ -4574,18 +4819,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
ioc->shost_recovery = 0;
}
- _base_free_irq(ioc);
- _base_disable_msix(ioc);
-
- if (ioc->chip_phys && ioc->chip)
- iounmap(ioc->chip);
- ioc->chip_phys = 0;
-
- if (pci_is_enabled(pdev)) {
- pci_release_selected_regions(ioc->pdev, ioc->bars);
- pci_disable_pcie_error_reporting(pdev);
- pci_disable_device(pdev);
- }
+ mpt3sas_base_unmap_resources(ioc);
return;
}
@@ -4600,6 +4834,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
{
int r, i;
int cpu_id, last_cpu_id = 0;
+ u8 revision;
dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
__func__));
@@ -4619,6 +4854,20 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
goto out_free_resources;
}
+ /* Check whether the controller revision is C0 or above.
+ * only C0 and above revision controllers support 96 MSI-X vectors.
+ */
+ revision = ioc->pdev->revision;
+
+ if ((ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3004 ||
+ ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3008 ||
+ ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3108_1 ||
+ ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3108_2 ||
+ ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3108_5 ||
+ ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3108_6) &&
+ (revision >= 0x02))
+ ioc->msix96_vector = 1;
+
ioc->rdpq_array_enable_assigned = 0;
ioc->dma_mask = 0;
r = mpt3sas_base_map_resources(ioc);
@@ -4641,7 +4890,6 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
ioc->build_sg = &_base_build_sg_ieee;
ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
- ioc->mpi25 = 1;
ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
/*
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index afa881682bef..f0e462b0880d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -71,8 +71,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "04.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 4
+#define MPT3SAS_DRIVER_VERSION "09.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 9
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -152,12 +152,49 @@
#define MPT3SAS_INTEL_RS3UC080_SSDID 0x3524
/*
+ * Dell HBA branding
+ */
+#define MPT3SAS_DELL_12G_HBA_BRANDING \
+ "Dell 12Gbps HBA"
+
+/*
+ * Dell HBA SSDIDs
+ */
+#define MPT3SAS_DELL_12G_HBA_SSDID 0x1F46
+
+/*
+ * Cisco HBA branding
+ */
+#define MPT3SAS_CISCO_12G_8E_HBA_BRANDING \
+ "Cisco 9300-8E 12G SAS HBA"
+#define MPT3SAS_CISCO_12G_8I_HBA_BRANDING \
+ "Cisco 9300-8i 12G SAS HBA"
+#define MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING \
+ "Cisco 12G Modular SAS Pass through Controller"
+#define MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING \
+ "UCS C3X60 12G SAS Pass through Controller"
+/*
+ * Cisco HBA SSSDIDs
+ */
+#define MPT3SAS_CISCO_12G_8E_HBA_SSDID 0x14C
+#define MPT3SAS_CISCO_12G_8I_HBA_SSDID 0x154
+#define MPT3SAS_CISCO_12G_AVILA_HBA_SSDID 0x155
+#define MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID 0x156
+
+/*
* status bits for ioc->diag_buffer_status
*/
#define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01)
#define MPT3_DIAG_BUFFER_IS_RELEASED (0x02)
#define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04)
+/*
+ * Combined Reply Queue constants,
+ * There are twelve Supplemental Reply Post Host Index Registers
+ * and each register is at offset 0x10 bytes from the previous one.
+ */
+#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT 12
+#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
/* OEM Identifiers */
#define MFG10_OEM_ID_INVALID (0x00000000)
@@ -173,6 +210,8 @@
#define MFG10_GF0_SSD_DATA_SCRUB_DISABLE (0x00000008)
#define MFG10_GF0_SINGLE_DRIVE_R0 (0x00000010)
+#define VIRTUAL_IO_FAILED_RETRY (0x32010081)
+
/* OEM Specific Flags will come from OEM specific header files */
struct Mpi2ManufacturingPage10_t {
MPI2_CONFIG_PAGE_HEADER Header; /* 00h */
@@ -294,7 +333,8 @@ struct _internal_cmd {
* @responding: used in _scsih_sas_device_mark_responding
* @fast_path: fast path feature enable bit
* @pfa_led_on: flag for PFA LED status
- *
+ * @pend_sas_rphy_add: flag to check if device is in sas_rphy_add()
+ * addition routine.
*/
struct _sas_device {
struct list_head list;
@@ -315,6 +355,9 @@ struct _sas_device {
u8 responding;
u8 fast_path;
u8 pfa_led_on;
+ u8 pend_sas_rphy_add;
+ u8 enclosure_level;
+ u8 connector_name[4];
};
/**
@@ -728,7 +771,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* is assigned only ones
* @reply_queue_count: number of reply queue's
* @reply_queue_list: link list contaning the reply queue info
- * @reply_post_host_index: head index in the pool where FW completes IO
+ * @msix96_vector: 96 MSI-X vector support
+ * @replyPostRegisterIndex: index of next position in Reply Desc Post Queue
* @delayed_tr_list: target reset link list
* @delayed_tr_volume_list: volume target reset link list
* @@temp_sensors_count: flag to carry the number of temperature sensors
@@ -814,7 +858,6 @@ struct MPT3SAS_ADAPTER {
MPT_BUILD_SG_SCMD build_sg_scmd;
MPT_BUILD_SG build_sg;
MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge;
- u8 mpi25;
u16 sge_size_ieee;
/* function ptr for MPI sg elements only */
@@ -937,6 +980,10 @@ struct MPT3SAS_ADAPTER {
u8 reply_queue_count;
struct list_head reply_queue_list;
+ u8 msix96_vector;
+ /* reply post register index */
+ resource_size_t **replyPostRegisterIndex;
+
struct list_head delayed_tr_list;
struct list_head delayed_tr_volume_list;
u8 temp_sensors_count;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 5a97e3286719..8ccef38523fa 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -585,6 +585,22 @@ _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
if (!sas_device)
return;
+ pr_info(MPT3SAS_FMT
+ "removing handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, sas_device->handle,
+ (unsigned long long) sas_device->sas_address);
+
+ if (sas_device->enclosure_handle != 0)
+ pr_info(MPT3SAS_FMT
+ "removing enclosure logical id(0x%016llx), slot(%d)\n",
+ ioc->name, (unsigned long long)
+ sas_device->enclosure_logical_id, sas_device->slot);
+
+ if (sas_device->connector_name[0] != '\0')
+ pr_info(MPT3SAS_FMT
+ "removing enclosure level(0x%04x), connector name( %s)\n",
+ ioc->name, sas_device->enclosure_level,
+ sas_device->connector_name);
spin_lock_irqsave(&ioc->sas_device_lock, flags);
list_del(&sas_device->list);
@@ -663,6 +679,18 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
ioc->name, __func__, sas_device->handle,
(unsigned long long)sas_device->sas_address));
+ if (sas_device->enclosure_handle != 0)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enclosure logical id(0x%016llx), slot( %d)\n",
+ ioc->name, __func__, (unsigned long long)
+ sas_device->enclosure_logical_id, sas_device->slot));
+
+ if (sas_device->connector_name[0] != '\0')
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enclosure level(0x%04x), connector name( %s)\n",
+ ioc->name, __func__,
+ sas_device->enclosure_level, sas_device->connector_name));
+
spin_lock_irqsave(&ioc->sas_device_lock, flags);
list_add_tail(&sas_device->list, &ioc->sas_device_list);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -704,6 +732,18 @@ _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
__func__, sas_device->handle,
(unsigned long long)sas_device->sas_address));
+ if (sas_device->enclosure_handle != 0)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enclosure logical id(0x%016llx), slot( %d)\n",
+ ioc->name, __func__, (unsigned long long)
+ sas_device->enclosure_logical_id, sas_device->slot));
+
+ if (sas_device->connector_name[0] != '\0')
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enclosure level(0x%04x), connector name( %s)\n",
+ ioc->name, __func__, sas_device->enclosure_level,
+ sas_device->connector_name));
+
spin_lock_irqsave(&ioc->sas_device_lock, flags);
list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
_scsih_determine_boot_device(ioc, sas_device, 0);
@@ -1772,10 +1812,16 @@ _scsih_slave_configure(struct scsi_device *sdev)
"sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
ds, handle, (unsigned long long)sas_device->sas_address,
sas_device->phy, (unsigned long long)sas_device->device_name);
- sdev_printk(KERN_INFO, sdev,
- "%s: enclosure_logical_id(0x%016llx), slot(%d)\n",
- ds, (unsigned long long)
- sas_device->enclosure_logical_id, sas_device->slot);
+ if (sas_device->enclosure_handle != 0)
+ sdev_printk(KERN_INFO, sdev,
+ "%s: enclosure_logical_id(0x%016llx), slot(%d)\n",
+ ds, (unsigned long long)
+ sas_device->enclosure_logical_id, sas_device->slot);
+ if (sas_device->connector_name[0] != '\0')
+ sdev_printk(KERN_INFO, sdev,
+ "%s: enclosure level(0x%04x), connector name( %s)\n",
+ ds, sas_device->enclosure_level,
+ sas_device->connector_name);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -2189,10 +2235,17 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
sas_device->handle,
(unsigned long long)sas_device->sas_address,
sas_device->phy);
- starget_printk(KERN_INFO, starget,
- "enclosure_logical_id(0x%016llx), slot(%d)\n",
- (unsigned long long)sas_device->enclosure_logical_id,
- sas_device->slot);
+ if (sas_device->enclosure_handle != 0)
+ starget_printk(KERN_INFO, starget,
+ "enclosure_logical_id(0x%016llx), slot(%d)\n",
+ (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot);
+ if (sas_device->connector_name)
+ starget_printk(KERN_INFO, starget,
+ "enclosure level(0x%04x),connector name(%s)\n",
+ sas_device->enclosure_level,
+ sas_device->connector_name);
}
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
@@ -2552,6 +2605,75 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * _scsih_internal_device_block - block the sdev device
+ * @sdev: per device object
+ * @sas_device_priv_data : per device driver private data
+ *
+ * make sure device is blocked without error, if not
+ * print an error
+ */
+static void
+_scsih_internal_device_block(struct scsi_device *sdev,
+ struct MPT3SAS_DEVICE *sas_device_priv_data)
+{
+ int r = 0;
+
+ sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
+ sas_device_priv_data->sas_target->handle);
+ sas_device_priv_data->block = 1;
+
+ r = scsi_internal_device_block(sdev);
+ if (r == -EINVAL)
+ sdev_printk(KERN_WARNING, sdev,
+ "device_block failed with return(%d) for handle(0x%04x)\n",
+ sas_device_priv_data->sas_target->handle, r);
+}
+
+/**
+ * _scsih_internal_device_unblock - unblock the sdev device
+ * @sdev: per device object
+ * @sas_device_priv_data : per device driver private data
+ * make sure device is unblocked without error, if not retry
+ * by blocking and then unblocking
+ */
+
+static void
+_scsih_internal_device_unblock(struct scsi_device *sdev,
+ struct MPT3SAS_DEVICE *sas_device_priv_data)
+{
+ int r = 0;
+
+ sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
+ "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
+ sas_device_priv_data->block = 0;
+ r = scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+ if (r == -EINVAL) {
+ /* The device has been set to SDEV_RUNNING by SD layer during
+ * device addition but the request queue is still stopped by
+ * our earlier block call. We need to perform a block again
+ * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
+
+ sdev_printk(KERN_WARNING, sdev,
+ "device_unblock failed with return(%d) for handle(0x%04x) "
+ "performing a block followed by an unblock\n",
+ sas_device_priv_data->sas_target->handle, r);
+ sas_device_priv_data->block = 1;
+ r = scsi_internal_device_block(sdev);
+ if (r)
+ sdev_printk(KERN_WARNING, sdev, "retried device_block "
+ "failed with return(%d) for handle(0x%04x)\n",
+ sas_device_priv_data->sas_target->handle, r);
+
+ sas_device_priv_data->block = 0;
+ r = scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+ if (r)
+ sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
+ " failed with return(%d) for handle(0x%04x)\n",
+ sas_device_priv_data->sas_target->handle, r);
+ }
+}
+
+/**
* _scsih_ublock_io_all_device - unblock every device
* @ioc: per adapter object
*
@@ -2570,11 +2692,10 @@ _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
if (!sas_device_priv_data->block)
continue;
- sas_device_priv_data->block = 0;
dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
"device_running, handle(0x%04x)\n",
sas_device_priv_data->sas_target->handle));
- scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+ _scsih_internal_device_unblock(sdev, sas_device_priv_data);
}
}
@@ -2599,10 +2720,9 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
if (sas_device_priv_data->sas_target->sas_address
!= sas_address)
continue;
- if (sas_device_priv_data->block) {
- sas_device_priv_data->block = 0;
- scsi_internal_device_unblock(sdev, SDEV_RUNNING);
- }
+ if (sas_device_priv_data->block)
+ _scsih_internal_device_unblock(sdev,
+ sas_device_priv_data);
}
}
@@ -2625,10 +2745,7 @@ _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
continue;
if (sas_device_priv_data->block)
continue;
- sas_device_priv_data->block = 1;
- scsi_internal_device_block(sdev);
- sdev_printk(KERN_INFO, sdev, "device_blocked, handle(0x%04x)\n",
- sas_device_priv_data->sas_target->handle);
+ _scsih_internal_device_block(sdev, sas_device_priv_data);
}
}
@@ -2644,6 +2761,11 @@ _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
{
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct scsi_device *sdev;
+ struct _sas_device *sas_device;
+
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (!sas_device)
+ return;
shost_for_each_device(sdev, ioc->shost) {
sas_device_priv_data = sdev->hostdata;
@@ -2653,10 +2775,9 @@ _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
continue;
if (sas_device_priv_data->block)
continue;
- sas_device_priv_data->block = 1;
- scsi_internal_device_block(sdev);
- sdev_printk(KERN_INFO, sdev,
- "device_blocked, handle(0x%04x)\n", handle);
+ if (sas_device->pend_sas_rphy_add)
+ continue;
+ _scsih_internal_device_block(sdev, sas_device_priv_data);
}
}
@@ -2806,6 +2927,18 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
"setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
ioc->name, handle,
(unsigned long long)sas_address));
+ if (sas_device->enclosure_handle != 0)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting delete flag:enclosure logical id(0x%016llx),"
+ " slot(%d)\n", ioc->name, (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot));
+ if (sas_device->connector_name)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting delete flag: enclosure level(0x%04x),"
+ " connector name( %s)\n", ioc->name,
+ sas_device->enclosure_level,
+ sas_device->connector_name));
_scsih_ublock_io_device(ioc, sas_address);
sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
}
@@ -3821,10 +3954,19 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
"\tsas_address(0x%016llx), phy(%d)\n",
ioc->name, (unsigned long long)
sas_device->sas_address, sas_device->phy);
- pr_warn(MPT3SAS_FMT
- "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
- ioc->name, (unsigned long long)
- sas_device->enclosure_logical_id, sas_device->slot);
+ if (sas_device->enclosure_handle != 0)
+ pr_warn(MPT3SAS_FMT
+ "\tenclosure_logical_id(0x%016llx),"
+ "slot(%d)\n", ioc->name,
+ (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot);
+ if (sas_device->connector_name[0])
+ pr_warn(MPT3SAS_FMT
+ "\tenclosure level(0x%04x),"
+ " connector name( %s)\n", ioc->name,
+ sas_device->enclosure_level,
+ sas_device->connector_name);
}
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
@@ -3999,7 +4141,16 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
return;
}
- starget_printk(KERN_WARNING, starget, "predicted fault\n");
+ if (sas_device->enclosure_handle != 0)
+ starget_printk(KERN_INFO, starget, "predicted fault, "
+ "enclosure logical id(0x%016llx), slot(%d)\n",
+ (unsigned long long)sas_device->enclosure_logical_id,
+ sas_device->slot);
+ if (sas_device->connector_name[0] != '\0')
+ starget_printk(KERN_WARNING, starget, "predicted fault, "
+ "enclosure level(0x%04x), connector name( %s)\n",
+ sas_device->enclosure_level,
+ sas_device->connector_name);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
@@ -4119,8 +4270,15 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
_scsih_smart_predicted_fault(ioc,
le16_to_cpu(mpi_reply->DevHandle));
mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
- }
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (!(ioc->logging_level & MPT_DEBUG_REPLY) &&
+ ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
+ (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
+ (scmd->sense_buffer[2] == HARDWARE_ERROR)))
+ _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
+#endif
+ }
switch (ioc_status) {
case MPI2_IOCSTATUS_BUSY:
case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
@@ -4146,6 +4304,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
scmd->device->expecting_cc_ua = 1;
}
break;
+ } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
+ scmd->result = DID_RESET << 16;
+ break;
}
scmd->result = DID_SOFT_ERROR << 16;
break;
@@ -4788,6 +4949,16 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
sas_device->handle, handle);
sas_target_priv_data->handle = handle;
sas_device->handle = handle;
+ if (sas_device_pg0.Flags &
+ MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
+ sas_device->enclosure_level =
+ le16_to_cpu(sas_device_pg0.EnclosureLevel);
+ memcpy(&sas_device->connector_name[0],
+ &sas_device_pg0.ConnectorName[0], 4);
+ } else {
+ sas_device->enclosure_level = 0;
+ sas_device->connector_name[0] = '\0';
+ }
}
/* check if device is present */
@@ -4894,14 +5065,24 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
ioc->name, __FILE__, __LINE__, __func__);
sas_device->enclosure_handle =
le16_to_cpu(sas_device_pg0.EnclosureHandle);
- sas_device->slot =
- le16_to_cpu(sas_device_pg0.Slot);
+ if (sas_device->enclosure_handle != 0)
+ sas_device->slot =
+ le16_to_cpu(sas_device_pg0.Slot);
sas_device->device_info = device_info;
sas_device->sas_address = sas_address;
sas_device->phy = sas_device_pg0.PhyNum;
sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
+ if (sas_device_pg0.Flags & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
+ sas_device->enclosure_level =
+ le16_to_cpu(sas_device_pg0.EnclosureLevel);
+ memcpy(&sas_device->connector_name[0],
+ &sas_device_pg0.ConnectorName[0], 4);
+ } else {
+ sas_device->enclosure_level = 0;
+ sas_device->connector_name[0] = '\0';
+ }
/* get enclosure_logical_id */
if (sas_device->enclosure_handle && !(mpt3sas_config_get_enclosure_pg0(
ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
@@ -4943,6 +5124,18 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
ioc->name, __func__,
sas_device->handle, (unsigned long long)
sas_device->sas_address));
+ if (sas_device->enclosure_handle != 0)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
+ ioc->name, __func__,
+ (unsigned long long)sas_device->enclosure_logical_id,
+ sas_device->slot));
+ if (sas_device->connector_name[0] != '\0')
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter: enclosure level(0x%04x), connector name( %s)\n",
+ ioc->name, __func__,
+ sas_device->enclosure_level,
+ sas_device->connector_name));
if (sas_device->starget && sas_device->starget->hostdata) {
sas_target_priv_data = sas_device->starget->hostdata;
@@ -4959,12 +5152,34 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
"removing handle(0x%04x), sas_addr(0x%016llx)\n",
ioc->name, sas_device->handle,
(unsigned long long) sas_device->sas_address);
+ if (sas_device->enclosure_handle != 0)
+ pr_info(MPT3SAS_FMT
+ "removing : enclosure logical id(0x%016llx), slot(%d)\n",
+ ioc->name,
+ (unsigned long long)sas_device->enclosure_logical_id,
+ sas_device->slot);
+ if (sas_device->connector_name[0] != '\0')
+ pr_info(MPT3SAS_FMT
+ "removing enclosure level(0x%04x), connector name( %s)\n",
+ ioc->name, sas_device->enclosure_level,
+ sas_device->connector_name);
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
ioc->name, __func__,
- sas_device->handle, (unsigned long long)
- sas_device->sas_address));
+ sas_device->handle, (unsigned long long)
+ sas_device->sas_address));
+ if (sas_device->enclosure_handle != 0)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
+ ioc->name, __func__,
+ (unsigned long long)sas_device->enclosure_logical_id,
+ sas_device->slot));
+ if (sas_device->connector_name[0] != '\0')
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: exit: enclosure level(0x%04x), connector name(%s)\n",
+ ioc->name, __func__, sas_device->enclosure_level,
+ sas_device->connector_name));
kfree(sas_device);
}
@@ -6357,9 +6572,7 @@ _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
/**
* _scsih_mark_responding_sas_device - mark a sas_devices as responding
* @ioc: per adapter object
- * @sas_address: sas address
- * @slot: enclosure slot id
- * @handle: device handle
+ * @sas_device_pg0: SAS Device page 0
*
* After host reset, find out whether devices are still responding.
* Used in _scsih_remove_unresponsive_sas_devices.
@@ -6367,8 +6580,8 @@ _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
* Return nothing.
*/
static void
-_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
- u16 slot, u16 handle)
+_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
+Mpi2SasDevicePage0_t *sas_device_pg0)
{
struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
struct scsi_target *starget;
@@ -6377,8 +6590,8 @@ _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
spin_lock_irqsave(&ioc->sas_device_lock, flags);
list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
- if (sas_device->sas_address == sas_address &&
- sas_device->slot == slot) {
+ if ((sas_device->sas_address == sas_device_pg0->SASAddress) &&
+ (sas_device->slot == sas_device_pg0->Slot)) {
sas_device->responding = 1;
starget = sas_device->starget;
if (starget && starget->hostdata) {
@@ -6387,22 +6600,40 @@ _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
sas_target_priv_data->deleted = 0;
} else
sas_target_priv_data = NULL;
- if (starget)
+ if (starget) {
starget_printk(KERN_INFO, starget,
- "handle(0x%04x), sas_addr(0x%016llx), "
- "enclosure logical id(0x%016llx), "
- "slot(%d)\n", handle,
- (unsigned long long)sas_device->sas_address,
+ "handle(0x%04x), sas_addr(0x%016llx)\n",
+ sas_device_pg0->DevHandle,
(unsigned long long)
- sas_device->enclosure_logical_id,
- sas_device->slot);
- if (sas_device->handle == handle)
+ sas_device->sas_address);
+
+ if (sas_device->enclosure_handle != 0)
+ starget_printk(KERN_INFO, starget,
+ "enclosure logical id(0x%016llx),"
+ " slot(%d)\n",
+ (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot);
+ }
+ if (sas_device_pg0->Flags &
+ MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
+ sas_device->enclosure_level =
+ le16_to_cpu(sas_device_pg0->EnclosureLevel);
+ memcpy(&sas_device->connector_name[0],
+ &sas_device_pg0->ConnectorName[0], 4);
+ } else {
+ sas_device->enclosure_level = 0;
+ sas_device->connector_name[0] = '\0';
+ }
+
+ if (sas_device->handle == sas_device_pg0->DevHandle)
goto out;
pr_info("\thandle changed from(0x%04x)!!!\n",
sas_device->handle);
- sas_device->handle = handle;
+ sas_device->handle = sas_device_pg0->DevHandle;
if (sas_target_priv_data)
- sas_target_priv_data->handle = handle;
+ sas_target_priv_data->handle =
+ sas_device_pg0->DevHandle;
goto out;
}
}
@@ -6441,13 +6672,15 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
break;
- handle = le16_to_cpu(sas_device_pg0.DevHandle);
+ handle = sas_device_pg0.DevHandle =
+ le16_to_cpu(sas_device_pg0.DevHandle);
device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
if (!(_scsih_is_end_device(device_info)))
continue;
- _scsih_mark_responding_sas_device(ioc,
- le64_to_cpu(sas_device_pg0.SASAddress),
- le16_to_cpu(sas_device_pg0.Slot), handle);
+ sas_device_pg0.SASAddress =
+ le64_to_cpu(sas_device_pg0.SASAddress);
+ sas_device_pg0.Slot = le16_to_cpu(sas_device_pg0.Slot);
+ _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
}
out:
@@ -7854,8 +8087,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* event thread */
snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
"fw_event%d", ioc->id);
- ioc->firmware_event_thread = create_singlethread_workqueue(
- ioc->firmware_event_name);
+ ioc->firmware_event_thread = alloc_ordered_workqueue(
+ ioc->firmware_event_name, WQ_MEM_RECLAIM);
if (!ioc->firmware_event_thread) {
pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index efb98afc46e0..70fd019e7ee5 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -649,6 +649,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
unsigned long flags;
struct _sas_node *sas_node;
struct sas_rphy *rphy;
+ struct _sas_device *sas_device = NULL;
int i;
struct sas_port *port;
@@ -731,10 +732,27 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
mpt3sas_port->remote_identify.device_type);
rphy->identify = mpt3sas_port->remote_identify;
+
+ if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ if (!sas_device) {
+ dfailprintk(ioc, printk(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__));
+ goto out_fail;
+ }
+ sas_device->pend_sas_rphy_add = 1;
+ }
+
if ((sas_rphy_add(rphy))) {
pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
}
+
+ if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE)
+ sas_device->pend_sas_rphy_add = 0;
+
if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
dev_printk(KERN_INFO, &rphy->dev,
"add: handle(0x%04x), sas_addr(0x%016llx)\n",
@@ -1946,7 +1964,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
- if (!dma_addr_out) {
+ if (pci_dma_mapping_error(ioc->pdev, dma_addr_out)) {
pr_info(MPT3SAS_FMT "%s(): DMA Addr out = NULL\n",
ioc->name, __func__);
rc = -ENOMEM;
@@ -1968,7 +1986,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
} else {
dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
- if (!dma_addr_in) {
+ if (pci_dma_mapping_error(ioc->pdev, dma_addr_in)) {
pr_info(MPT3SAS_FMT "%s(): DMA Addr in = NULL\n",
ioc->name, __func__);
rc = -ENOMEM;
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index d40d734aa53a..e2d555c1bffc 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -324,13 +324,9 @@ int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
goto err_out;
res_flag_ex = pci_resource_flags(pdev, bar_ex);
- if (res_flag_ex & IORESOURCE_MEM) {
- if (res_flag_ex & IORESOURCE_CACHEABLE)
- mvi->regs_ex = ioremap(res_start, res_len);
- else
- mvi->regs_ex = ioremap_nocache(res_start,
- res_len);
- } else
+ if (res_flag_ex & IORESOURCE_MEM)
+ mvi->regs_ex = ioremap(res_start, res_len);
+ else
mvi->regs_ex = (void *)res_start;
if (!mvi->regs_ex)
goto err_out;
@@ -338,14 +334,14 @@ int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
res_start = pci_resource_start(pdev, bar);
res_len = pci_resource_len(pdev, bar);
- if (!res_start || !res_len)
+ if (!res_start || !res_len) {
+ iounmap(mvi->regs_ex);
+ mvi->regs_ex = NULL;
goto err_out;
+ }
res_flag = pci_resource_flags(pdev, bar);
- if (res_flag & IORESOURCE_CACHEABLE)
- mvi->regs = ioremap(res_start, res_len);
- else
- mvi->regs = ioremap_nocache(res_start, res_len);
+ mvi->regs = ioremap(res_start, res_len);
if (!mvi->regs) {
if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM))
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 74a4bb9af07b..f14ec6e042b9 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -49,13 +49,15 @@ enum chip_flavors {
chip_8019,
chip_8074,
chip_8076,
- chip_8077
+ chip_8077,
+ chip_8006,
};
enum phy_speed {
PHY_SPEED_15 = 0x01,
PHY_SPEED_30 = 0x02,
PHY_SPEED_60 = 0x04,
+ PHY_SPEED_120 = 0x08,
};
enum data_direction {
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 96dcc097a463..04e67a190652 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -2642,6 +2642,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
default:
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
@@ -3263,6 +3264,10 @@ void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
struct sas_phy *sas_phy = phy->sas_phy.phy;
switch (link_rate) {
+ case PHY_SPEED_120:
+ phy->sas_phy.linkrate = SAS_LINK_RATE_12_0_GBPS;
+ phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_12_0_GBPS;
+ break;
case PHY_SPEED_60:
phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS;
phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index a132f2664d2f..5c0356fb6310 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -57,6 +57,7 @@ static const struct pm8001_chip_info pm8001_chips[] = {
[chip_8074] = {0, 8, &pm8001_80xx_dispatch,},
[chip_8076] = {0, 16, &pm8001_80xx_dispatch,},
[chip_8077] = {0, 16, &pm8001_80xx_dispatch,},
+ [chip_8006] = {0, 16, &pm8001_80xx_dispatch,},
};
static int pm8001_id;
@@ -1107,6 +1108,8 @@ err_out_enable:
*/
static struct pci_device_id pm8001_pci_table[] = {
{ PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
+ { PCI_VDEVICE(PMC_Sierra, 0x8006), chip_8006 },
+ { PCI_VDEVICE(ADAPTEC2, 0x8006), chip_8006 },
{ PCI_VDEVICE(ATTO, 0x0042), chip_8001 },
/* Support for SPC/SPCv/SPCve controllers */
{ PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },
@@ -1217,7 +1220,7 @@ MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>");
MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>");
MODULE_AUTHOR("Nikith Ganigarakoppal <Nikith.Ganigarakoppal@pmcs.com>");
MODULE_DESCRIPTION(
- "PMC-Sierra PM8001/8081/8088/8089/8074/8076/8077 "
+ "PMC-Sierra PM8001/8006/8081/8088/8089/8074/8076/8077 "
"SAS/SATA controller driver");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index b93f289b42b3..949198c01ced 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -790,6 +790,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
ccb->device = pm8001_dev;
ccb->ccb_tag = ccb_tag;
ccb->task = task;
+ ccb->n_elem = 0;
res = PM8001_CHIP_DISP->task_abort(pm8001_ha,
pm8001_dev, flag, task_tag, ccb_tag);
@@ -975,19 +976,27 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
phy = sas_get_local_phy(dev);
if (dev_is_sata(dev)) {
- DECLARE_COMPLETION_ONSTACK(completion_setstate);
if (scsi_is_sas_phy_local(phy)) {
rc = 0;
goto out;
}
rc = sas_phy_reset(phy, 1);
+ if (rc) {
+ PM8001_EH_DBG(pm8001_ha,
+ pm8001_printk("phy reset failed for device %x\n"
+ "with rc %d\n", pm8001_dev->device_id, rc));
+ rc = TMF_RESP_FUNC_FAILED;
+ goto out;
+ }
msleep(2000);
rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
dev, 1, 0);
- pm8001_dev->setds_completion = &completion_setstate;
- rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
- pm8001_dev, 0x01);
- wait_for_completion(&completion_setstate);
+ if (rc) {
+ PM8001_EH_DBG(pm8001_ha,
+ pm8001_printk("task abort failed %x\n"
+ "with rc %d\n", pm8001_dev->device_id, rc));
+ rc = TMF_RESP_FUNC_FAILED;
+ }
} else {
rc = sas_phy_reset(phy, 1);
msleep(2000);
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 8dd8b7840f04..e2e97db38ae8 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -58,7 +58,7 @@
#include "pm8001_defs.h"
#define DRV_NAME "pm80xx"
-#define DRV_VERSION "0.1.37"
+#define DRV_VERSION "0.1.38"
#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */
#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
@@ -241,7 +241,7 @@ struct pm8001_chip_info {
struct pm8001_port {
struct asd_sas_port sas_port;
u8 port_attached;
- u8 wide_port_phymap;
+ u16 wide_port_phymap;
u8 port_state;
struct list_head list;
};
@@ -569,6 +569,14 @@ struct pm8001_fw_image_header {
#define NCQ_READ_LOG_FLAG 0x80000000
#define NCQ_ABORT_ALL_FLAG 0x40000000
#define NCQ_2ND_RLE_FLAG 0x20000000
+
+/* Device states */
+#define DS_OPERATIONAL 0x01
+#define DS_PORT_IN_RESET 0x02
+#define DS_IN_RECOVERY 0x03
+#define DS_IN_ERROR 0x04
+#define DS_NON_OPERATIONAL 0x07
+
/**
* brief param structure for firmware flash update.
*/
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 05cce463ab01..9a389f1508de 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -309,6 +309,9 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset =
pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET);
+ /* read port recover and reset timeout */
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer =
+ pm8001_mr32(address, MAIN_PORT_RECOVERY_TIMER);
}
/**
@@ -585,6 +588,12 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay);
+
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &= 0xffff0000;
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |=
+ PORT_RECOVERY_TIMEOUT;
+ pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
}
/**
@@ -843,6 +852,7 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
int rc;
u32 tag;
u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
+ u32 page_code;
memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
@@ -851,8 +861,14 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = cpu_to_le32(tag);
+
+ if (IS_SPCV_12G(pm8001_ha->pdev))
+ page_code = THERMAL_PAGE_CODE_7H;
+ else
+ page_code = THERMAL_PAGE_CODE_8H;
+
payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) |
- (THERMAL_ENABLE << 8) | THERMAL_OP_CODE;
+ (THERMAL_ENABLE << 8) | page_code;
payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
@@ -1593,6 +1609,13 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
+ case IO_XFER_ERROR_INVALID_SSP_RSP_FRAME:
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_ERROR_INVALID_SSP_RSP_FRAME\n"));
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
@@ -2314,6 +2337,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
default:
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
@@ -2829,6 +2853,32 @@ static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
u32 phyId, u32 phy_op);
+static void hw_event_port_recover(struct pm8001_hba_info *pm8001_ha,
+ void *piomb)
+{
+ struct hw_event_resp *pPayload = (struct hw_event_resp *)(piomb + 4);
+ u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+ u8 phy_id = (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+ u32 lr_status_evt_portid =
+ le32_to_cpu(pPayload->lr_status_evt_portid);
+ u8 deviceType = pPayload->sas_identify.dev_type;
+ u8 link_rate = (u8)((lr_status_evt_portid & 0xF0000000) >> 28);
+ struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+ struct pm8001_port *port = &pm8001_ha->port[port_id];
+
+ if (deviceType == SAS_END_DEVICE) {
+ pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id,
+ PHY_NOTIFY_ENABLE_SPINUP);
+ }
+
+ port->wide_port_phymap |= (1U << phy_id);
+ pm8001_get_lrate_mode(phy, link_rate);
+ phy->sas_phy.oob_mode = SAS_OOB_MODE;
+ phy->phy_state = PHY_STATE_LINK_UP_SPCV;
+ phy->phy_attached = 1;
+}
+
/**
* hw_event_sas_phy_up -FW tells me a SAS phy up event.
* @pm8001_ha: our hba card information
@@ -2856,6 +2906,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
unsigned long flags;
u8 deviceType = pPayload->sas_identify.dev_type;
port->port_state = portstate;
+ port->wide_port_phymap |= (1U << phy_id);
phy->phy_state = PHY_STATE_LINK_UP_SPCV;
PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
"portid:%d; phyid:%d; linkrate:%d; "
@@ -2981,7 +3032,6 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct pm8001_port *port = &pm8001_ha->port[port_id];
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
port->port_state = portstate;
- phy->phy_type = 0;
phy->identify.device_type = 0;
phy->phy_attached = 0;
memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
@@ -2993,9 +3043,13 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk(" PortInvalid portID %d\n", port_id));
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk(" Last phy Down and port invalid\n"));
- port->port_attached = 0;
- pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
- port_id, phy_id, 0, 0);
+ if (phy->phy_type & PORT_TYPE_SATA) {
+ phy->phy_type = 0;
+ port->port_attached = 0;
+ pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+ port_id, phy_id, 0, 0);
+ }
+ sas_phy_disconnected(&phy->sas_phy);
break;
case PORT_IN_RESET:
PM8001_MSG_DBG(pm8001_ha,
@@ -3003,22 +3057,26 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
case PORT_NOT_ESTABLISHED:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
+ pm8001_printk(" Phy Down and PORT_NOT_ESTABLISHED\n"));
port->port_attached = 0;
break;
case PORT_LOSTCOMM:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
+ pm8001_printk(" Phy Down and PORT_LOSTCOMM\n"));
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk(" Last phy Down and port invalid\n"));
- port->port_attached = 0;
- pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
- port_id, phy_id, 0, 0);
+ if (phy->phy_type & PORT_TYPE_SATA) {
+ port->port_attached = 0;
+ phy->phy_type = 0;
+ pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+ port_id, phy_id, 0, 0);
+ }
+ sas_phy_disconnected(&phy->sas_phy);
break;
default:
port->port_attached = 0;
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" phy Down and(default) = 0x%x\n",
+ pm8001_printk(" Phy Down and(default) = 0x%x\n",
portstate));
break;
@@ -3084,7 +3142,7 @@ static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
*/
static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
- unsigned long flags;
+ unsigned long flags, i;
struct hw_event_resp *pPayload =
(struct hw_event_resp *)(piomb + 4);
u32 lr_status_evt_portid =
@@ -3097,9 +3155,9 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
(u16)((lr_status_evt_portid & 0x00FFFF00) >> 8);
u8 status =
(u8)((lr_status_evt_portid & 0x0F000000) >> 24);
-
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ struct pm8001_port *port = &pm8001_ha->port[port_id];
struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n",
@@ -3125,7 +3183,9 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
case HW_EVENT_PHY_DOWN:
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("HW_EVENT_PHY_DOWN\n"));
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+ if (phy->phy_type & PORT_TYPE_SATA)
+ sas_ha->notify_phy_event(&phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL);
phy->phy_attached = 0;
phy->phy_state = 0;
hw_event_phy_down(pm8001_ha, piomb);
@@ -3169,9 +3229,6 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n"));
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
- sas_phy_disconnected(sas_phy);
- phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
break;
case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
PM8001_MSG_DBG(pm8001_ha,
@@ -3179,9 +3236,6 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_DISPARITY_ERROR,
port_id, phy_id, 0, 0);
- sas_phy_disconnected(sas_phy);
- phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
break;
case HW_EVENT_LINK_ERR_CODE_VIOLATION:
PM8001_MSG_DBG(pm8001_ha,
@@ -3189,9 +3243,6 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_CODE_VIOLATION,
port_id, phy_id, 0, 0);
- sas_phy_disconnected(sas_phy);
- phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
break;
case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
@@ -3199,9 +3250,6 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
port_id, phy_id, 0, 0);
- sas_phy_disconnected(sas_phy);
- phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
break;
case HW_EVENT_MALFUNCTION:
PM8001_MSG_DBG(pm8001_ha,
@@ -3257,13 +3305,19 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_PORT_RECOVERY_TIMER_TMO,
port_id, phy_id, 0, 0);
- sas_phy_disconnected(sas_phy);
- phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+ if (port->wide_port_phymap & (1 << i)) {
+ phy = &pm8001_ha->phy[i];
+ sas_ha->notify_phy_event(&phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL);
+ port->wide_port_phymap &= ~(1 << i);
+ }
+ }
break;
case HW_EVENT_PORT_RECOVER:
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("HW_EVENT_PORT_RECOVER\n"));
+ hw_event_port_recover(pm8001_ha, piomb);
break;
case HW_EVENT_PORT_RESET_COMPLETE:
PM8001_MSG_DBG(pm8001_ha,
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index 9970a385795d..7a443bad6163 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -177,7 +177,8 @@
/* Thermal related */
#define THERMAL_ENABLE 0x1
#define THERMAL_LOG_ENABLE 0x1
-#define THERMAL_OP_CODE 0x6
+#define THERMAL_PAGE_CODE_7H 0x6
+#define THERMAL_PAGE_CODE_8H 0x7
#define LTEMPHIL 70
#define RTEMPHIL 100
@@ -1174,7 +1175,7 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define IO_XFER_ERROR_INTERNAL_CRC_ERROR 0x54
#define MPI_IO_RQE_BUSY_FULL 0x55
#define IO_XFER_ERR_EOB_DATA_OVERRUN 0x56
-#define IO_XFR_ERROR_INVALID_SSP_RSP_FRAME 0x57
+#define IO_XFER_ERROR_INVALID_SSP_RSP_FRAME 0x57
#define IO_OPEN_CNX_ERROR_OPEN_PREEMPTED 0x58
#define MPI_ERR_IO_RESOURCE_UNAVAILABLE 0x1004
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 33f60c92e20e..a0f732b138e4 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -32,10 +32,10 @@ config SCSI_QLA_FC
They are also included in the linux-firmware tree as well.
config TCM_QLA2XXX
- tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
+ tristate "TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs"
depends on SCSI_QLA_FC && TARGET_CORE
depends on LIBFC
select BTREE
default n
---help---
- Say Y here to enable the TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs
+ Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 82b92c414a9c..6b942d9e5b74 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -738,7 +738,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
ql_log(ql_log_info, vha, 0x706f,
"Issuing MPI reset.\n");
- if (IS_QLA83XX(ha)) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
uint32_t idc_control;
qla83xx_idc_lock(vha, 0);
@@ -884,7 +884,6 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
int rval;
- uint16_t actual_size;
if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
return 0;
@@ -901,7 +900,6 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
}
do_read:
- actual_size = 0;
memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
@@ -1079,8 +1077,7 @@ qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- return scnprintf(buf, PAGE_SIZE, "%s\n",
- vha->hw->model_desc ? vha->hw->model_desc : "");
+ return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
}
static ssize_t
@@ -1348,7 +1345,8 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
+ !IS_QLA27XX(ha))
return scnprintf(buf, PAGE_SIZE, "\n");
return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
@@ -1537,6 +1535,20 @@ qla2x00_allow_cna_fw_dump_store(struct device *dev,
return strlen(buf);
}
+static ssize_t
+qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA27XX(ha))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
+ ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]);
+}
+
static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1581,6 +1593,7 @@ static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
qla2x00_allow_cna_fw_dump_show,
qla2x00_allow_cna_fw_dump_store);
+static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_driver_version,
@@ -1614,6 +1627,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_diag_megabytes,
&dev_attr_fw_dump_size,
&dev_attr_allow_cna_fw_dump,
+ &dev_attr_pep_version,
NULL,
};
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 2e2bb6f45ce6..c26acde797f0 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -405,7 +405,7 @@ done:
return rval;
}
-inline uint16_t
+static inline uint16_t
qla24xx_calc_ct_iocbs(uint16_t dsds)
{
uint16_t iocbs;
@@ -1733,7 +1733,6 @@ qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
struct Scsi_Host *host = bsg_job->shost;
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
- uint16_t thread_id;
uint32_t rval = EXT_STATUS_OK;
uint16_t req_sg_cnt = 0;
uint16_t rsp_sg_cnt = 0;
@@ -1790,8 +1789,6 @@ qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
goto done;
}
- thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
-
mutex_lock(&ha->selflogin_lock);
if (vha->self_login_loop_id == 0) {
/* Initialize all required fields of fcport */
@@ -2174,7 +2171,6 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
{
int ret = -EINVAL;
struct fc_rport *rport;
- fc_port_t *fcport = NULL;
struct Scsi_Host *host;
scsi_qla_host_t *vha;
@@ -2183,7 +2179,6 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
rport = bsg_job->rport;
- fcport = *(fc_port_t **) rport->dd_data;
host = rport_to_shost(rport);
vha = shost_priv(host);
} else {
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 0e6ee3ca30e6..34dc9a35670b 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -19,14 +19,14 @@
* | Device Discovery | 0x2016 | 0x2020-0x2022, |
* | | | 0x2011-0x2012, |
* | | | 0x2099-0x20a4 |
- * | Queue Command and IO tracing | 0x3059 | 0x300b |
+ * | Queue Command and IO tracing | 0x3075 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
* | | | 0x302d,0x3033 |
* | | | 0x3036,0x3038 |
* | | | 0x303a |
* | DPC Thread | 0x4023 | 0x4002,0x4013 |
- * | Async Events | 0x5087 | 0x502b-0x502f |
+ * | Async Events | 0x508a | 0x502b-0x502f |
* | | | 0x5047 |
* | | | 0x5084,0x5075 |
* | | | 0x503d,0x5044 |
@@ -67,10 +67,10 @@
* | | | 0xd031-0xd0ff |
* | | | 0xd101-0xd1fe |
* | | | 0xd214-0xd2fe |
- * | Target Mode | 0xe079 | |
- * | Target Mode Management | 0xf072 | 0xf002 |
+ * | Target Mode | 0xe080 | |
+ * | Target Mode Management | 0xf096 | 0xf002 |
* | | | 0xf046-0xf049 |
- * | Target Mode Task Management | 0x1000b | |
+ * | Target Mode Task Management | 0x1000d | |
* ----------------------------------------------------------------------
*/
@@ -117,7 +117,7 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
{
int rval;
uint32_t cnt, stat, timer, dwords, idx;
- uint16_t mb0, mb1;
+ uint16_t mb0;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
dma_addr_t dump_dma = ha->gid_list_dma;
uint32_t *dump = (uint32_t *)ha->gid_list;
@@ -161,7 +161,7 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
&ha->mbx_cmd_flags);
mb0 = RD_REG_WORD(&reg->mailbox0);
- mb1 = RD_REG_WORD(&reg->mailbox1);
+ RD_REG_WORD(&reg->mailbox1);
WRT_REG_DWORD(&reg->hccr,
HCCRX_CLR_RISC_INT);
@@ -486,7 +486,7 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
return ptr;
*last_chain = &fcec->type;
- fcec->type = __constant_htonl(DUMP_CHAIN_FCE);
+ fcec->type = htonl(DUMP_CHAIN_FCE);
fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
fce_calc_size(ha->fce_bufs));
fcec->size = htonl(fce_calc_size(ha->fce_bufs));
@@ -527,7 +527,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
/* aqp = ha->atio_q_map[que]; */
q = ptr;
*last_chain = &q->type;
- q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+ q->type = htonl(DUMP_CHAIN_QUEUE);
q->chain_size = htonl(
sizeof(struct qla2xxx_mqueue_chain) +
sizeof(struct qla2xxx_mqueue_header) +
@@ -536,7 +536,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
/* Add header. */
qh = ptr;
- qh->queue = __constant_htonl(TYPE_ATIO_QUEUE);
+ qh->queue = htonl(TYPE_ATIO_QUEUE);
qh->number = htonl(que);
qh->size = htonl(aqp->length * sizeof(request_t));
ptr += sizeof(struct qla2xxx_mqueue_header);
@@ -571,7 +571,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
/* Add chain. */
q = ptr;
*last_chain = &q->type;
- q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+ q->type = htonl(DUMP_CHAIN_QUEUE);
q->chain_size = htonl(
sizeof(struct qla2xxx_mqueue_chain) +
sizeof(struct qla2xxx_mqueue_header) +
@@ -580,7 +580,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
/* Add header. */
qh = ptr;
- qh->queue = __constant_htonl(TYPE_REQUEST_QUEUE);
+ qh->queue = htonl(TYPE_REQUEST_QUEUE);
qh->number = htonl(que);
qh->size = htonl(req->length * sizeof(request_t));
ptr += sizeof(struct qla2xxx_mqueue_header);
@@ -599,7 +599,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
/* Add chain. */
q = ptr;
*last_chain = &q->type;
- q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+ q->type = htonl(DUMP_CHAIN_QUEUE);
q->chain_size = htonl(
sizeof(struct qla2xxx_mqueue_chain) +
sizeof(struct qla2xxx_mqueue_header) +
@@ -608,7 +608,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
/* Add header. */
qh = ptr;
- qh->queue = __constant_htonl(TYPE_RESPONSE_QUEUE);
+ qh->queue = htonl(TYPE_RESPONSE_QUEUE);
qh->number = htonl(que);
qh->size = htonl(rsp->length * sizeof(response_t));
ptr += sizeof(struct qla2xxx_mqueue_header);
@@ -627,15 +627,15 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
uint32_t cnt, que_idx;
uint8_t que_cnt;
struct qla2xxx_mq_chain *mq = ptr;
- device_reg_t __iomem *reg;
+ device_reg_t *reg;
if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
return ptr;
mq = ptr;
*last_chain = &mq->type;
- mq->type = __constant_htonl(DUMP_CHAIN_MQ);
- mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
+ mq->type = htonl(DUMP_CHAIN_MQ);
+ mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain));
que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
ha->max_req_queues : ha->max_rsp_queues;
@@ -695,8 +695,10 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
flags = 0;
+#ifndef __CHECKER__
if (!hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd002,
@@ -832,8 +834,12 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla2xxx_dump_post_process(base_vha, rval);
qla2300_fw_dump_failed:
+#ifndef __CHECKER__
if (!hardware_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#else
+ ;
+#endif
}
/**
@@ -859,8 +865,10 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
mb0 = mb2 = 0;
flags = 0;
+#ifndef __CHECKER__
if (!hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd004,
@@ -1030,8 +1038,12 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla2xxx_dump_post_process(base_vha, rval);
qla2100_fw_dump_failed:
+#ifndef __CHECKER__
if (!hardware_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#else
+ ;
+#endif
}
void
@@ -1039,7 +1051,6 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
{
int rval;
uint32_t cnt;
- uint32_t risc_address;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
uint32_t __iomem *dmp_reg;
@@ -1047,7 +1058,6 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
uint16_t __iomem *mbx_reg;
unsigned long flags;
struct qla24xx_fw_dump *fw;
- uint32_t ext_mem_cnt;
void *nxt;
void *nxt_chain;
uint32_t *last_chain = NULL;
@@ -1056,12 +1066,13 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
if (IS_P3P_TYPE(ha))
return;
- risc_address = ext_mem_cnt = 0;
flags = 0;
ha->fw_dump_cap_flags = 0;
+#ifndef __CHECKER__
if (!hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd006,
@@ -1274,8 +1285,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
- ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
- *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+ ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
+ *last_chain |= htonl(DUMP_CHAIN_LAST);
}
/* Adjust valid length. */
@@ -1285,8 +1296,12 @@ qla24xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
qla24xx_fw_dump_failed:
+#ifndef __CHECKER__
if (!hardware_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#else
+ ;
+#endif
}
void
@@ -1294,7 +1309,6 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
{
int rval;
uint32_t cnt;
- uint32_t risc_address;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
uint32_t __iomem *dmp_reg;
@@ -1302,17 +1316,17 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
uint16_t __iomem *mbx_reg;
unsigned long flags;
struct qla25xx_fw_dump *fw;
- uint32_t ext_mem_cnt;
void *nxt, *nxt_chain;
uint32_t *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- risc_address = ext_mem_cnt = 0;
flags = 0;
ha->fw_dump_cap_flags = 0;
+#ifndef __CHECKER__
if (!hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd008,
@@ -1329,7 +1343,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
}
fw = &ha->fw_dump->isp.isp25;
qla2xxx_prep_dump(ha, ha->fw_dump);
- ha->fw_dump->version = __constant_htonl(2);
+ ha->fw_dump->version = htonl(2);
fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
@@ -1593,8 +1607,8 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
- ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
- *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+ ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
+ *last_chain |= htonl(DUMP_CHAIN_LAST);
}
/* Adjust valid length. */
@@ -1604,8 +1618,12 @@ qla25xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
qla25xx_fw_dump_failed:
+#ifndef __CHECKER__
if (!hardware_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#else
+ ;
+#endif
}
void
@@ -1613,7 +1631,6 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
{
int rval;
uint32_t cnt;
- uint32_t risc_address;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
uint32_t __iomem *dmp_reg;
@@ -1621,17 +1638,17 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
uint16_t __iomem *mbx_reg;
unsigned long flags;
struct qla81xx_fw_dump *fw;
- uint32_t ext_mem_cnt;
void *nxt, *nxt_chain;
uint32_t *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- risc_address = ext_mem_cnt = 0;
flags = 0;
ha->fw_dump_cap_flags = 0;
+#ifndef __CHECKER__
if (!hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd00a,
@@ -1914,8 +1931,8 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
- ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
- *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+ ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
+ *last_chain |= htonl(DUMP_CHAIN_LAST);
}
/* Adjust valid length. */
@@ -1925,16 +1942,19 @@ qla81xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
qla81xx_fw_dump_failed:
+#ifndef __CHECKER__
if (!hardware_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#else
+ ;
+#endif
}
void
qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
{
int rval;
- uint32_t cnt, reg_data;
- uint32_t risc_address;
+ uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
uint32_t __iomem *dmp_reg;
@@ -1942,17 +1962,17 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
uint16_t __iomem *mbx_reg;
unsigned long flags;
struct qla83xx_fw_dump *fw;
- uint32_t ext_mem_cnt;
void *nxt, *nxt_chain;
uint32_t *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- risc_address = ext_mem_cnt = 0;
flags = 0;
ha->fw_dump_cap_flags = 0;
+#ifndef __CHECKER__
if (!hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd00c,
@@ -1979,16 +1999,16 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
dmp_reg = &reg->iobase_window;
- reg_data = RD_REG_DWORD(dmp_reg);
+ RD_REG_DWORD(dmp_reg);
WRT_REG_DWORD(dmp_reg, 0);
dmp_reg = &reg->unused_4_1[0];
- reg_data = RD_REG_DWORD(dmp_reg);
+ RD_REG_DWORD(dmp_reg);
WRT_REG_DWORD(dmp_reg, 0);
WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
dmp_reg = &reg->unused_4_1[2];
- reg_data = RD_REG_DWORD(dmp_reg);
+ RD_REG_DWORD(dmp_reg);
WRT_REG_DWORD(dmp_reg, 0);
/* select PCR and disable ecc checking and correction */
@@ -2420,8 +2440,8 @@ copy_queue:
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
- ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
- *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+ ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
+ *last_chain |= htonl(DUMP_CHAIN_LAST);
}
/* Adjust valid length. */
@@ -2431,8 +2451,12 @@ qla83xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
qla83xx_fw_dump_failed:
+#ifndef __CHECKER__
if (!hardware_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#else
+ ;
+#endif
}
/****************************************************************************/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e86201d3b8c6..388d79088b59 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -274,6 +274,7 @@
#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
struct req_que;
+struct qla_tgt_sess;
/*
* (sd.h is not exported, hence local inclusion)
@@ -2026,6 +2027,7 @@ typedef struct fc_port {
uint16_t port_id;
unsigned long retry_delay_timestamp;
+ struct qla_tgt_sess *tgt_session;
} fc_port_t;
#include "qla_mr.h"
@@ -3059,6 +3061,7 @@ struct qla_hw_data {
#define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031
#define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071
#define PCI_DEVICE_ID_QLOGIC_ISP2271 0x2271
+#define PCI_DEVICE_ID_QLOGIC_ISP2261 0x2261
uint32_t device_type;
#define DT_ISP2100 BIT_0
@@ -3082,7 +3085,8 @@ struct qla_hw_data {
#define DT_ISP8044 BIT_18
#define DT_ISP2071 BIT_19
#define DT_ISP2271 BIT_20
-#define DT_ISP_LAST (DT_ISP2271 << 1)
+#define DT_ISP2261 BIT_21
+#define DT_ISP_LAST (DT_ISP2261 << 1)
#define DT_T10_PI BIT_25
#define DT_IIDMA BIT_26
@@ -3114,6 +3118,7 @@ struct qla_hw_data {
#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00)
#define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071)
#define IS_QLA2271(ha) (DT_MASK(ha) & DT_ISP2271)
+#define IS_QLA2261(ha) (DT_MASK(ha) & DT_ISP2261)
#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -3122,7 +3127,7 @@ struct qla_hw_data {
#define IS_QLA25XX(ha) (IS_QLA2532(ha))
#define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha))
#define IS_QLA84XX(ha) (IS_QLA8432(ha))
-#define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha))
+#define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha) || IS_QLA2261(ha))
#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
IS_QLA84XX(ha))
#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
@@ -3154,16 +3159,17 @@ struct qla_hw_data {
/* Bit 21 of fw_attributes decides the MCTP capabilities */
#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \
((ha)->fw_attributes_ext[0] & BIT_0))
-#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha))
-#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha))
+#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0)
-#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha))
+#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
(((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
-#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))
+#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha))
#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_FAWWN_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
/* HBA serial number */
uint8_t serial0;
@@ -3286,6 +3292,7 @@ struct qla_hw_data {
uint8_t mpi_version[3];
uint32_t mpi_capabilities;
uint8_t phy_version[3];
+ uint8_t pep_version[3];
/* Firmware dump template */
void *fw_dump_template;
@@ -3418,9 +3425,9 @@ struct qla_hw_data {
mempool_t *ctx_mempool;
#define FCP_CMND_DMA_POOL_SIZE 512
- unsigned long nx_pcibase; /* Base I/O address */
- uint8_t *nxdb_rd_ptr; /* Doorbell read pointer */
- unsigned long nxdb_wr_ptr; /* Door bell write pointer */
+ void __iomem *nx_pcibase; /* Base I/O address */
+ void __iomem *nxdb_rd_ptr; /* Doorbell read pointer */
+ void __iomem *nxdb_wr_ptr; /* Door bell write pointer */
uint32_t crb_win;
uint32_t curr_window;
@@ -3579,6 +3586,16 @@ typedef struct scsi_qla_host {
uint16_t fcoe_fcf_idx;
uint8_t fcoe_vn_port_mac[6];
+ /* list of commands waiting on workqueue */
+ struct list_head qla_cmd_list;
+ struct list_head qla_sess_op_cmd_list;
+ spinlock_t cmd_list_lock;
+
+ /* Counter to detect races between ELS and RSCN events */
+ atomic_t generation_tick;
+ /* Time when global fcport update has been scheduled */
+ int total_fcport_update_gen;
+
uint32_t vp_abort_cnt;
struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index dccc4dcc39c8..94e8a8592f69 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -35,10 +35,10 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
ms_pkt->entry_type = MS_IOCB_TYPE;
ms_pkt->entry_count = 1;
SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
- ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
+ ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
- ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
- ms_pkt->total_dsd_count = __constant_cpu_to_le16(2);
+ ms_pkt->cmd_dsd_count = cpu_to_le16(1);
+ ms_pkt->total_dsd_count = cpu_to_le16(2);
ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
ms_pkt->req_bytecount = cpu_to_le32(req_size);
@@ -74,10 +74,10 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
ct_pkt->entry_type = CT_IOCB_TYPE;
ct_pkt->entry_count = 1;
- ct_pkt->nport_handle = __constant_cpu_to_le16(NPH_SNS);
+ ct_pkt->nport_handle = cpu_to_le16(NPH_SNS);
ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
- ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
- ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
+ ct_pkt->cmd_dsd_count = cpu_to_le16(1);
+ ct_pkt->rsp_dsd_count = cpu_to_le16(1);
ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
@@ -142,7 +142,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
case CS_DATA_UNDERRUN:
case CS_DATA_OVERRUN: /* Overrun? */
if (ct_rsp->header.response !=
- __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
+ cpu_to_be16(CT_ACCEPT_RESPONSE)) {
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
"%s failed rejected request on port_id: %02x%02x%02x Compeltion status 0x%x, response 0x%x\n",
routine, vha->d_id.b.domain,
@@ -1153,10 +1153,10 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
ms_pkt->entry_type = MS_IOCB_TYPE;
ms_pkt->entry_count = 1;
SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
- ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
+ ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
- ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
- ms_pkt->total_dsd_count = __constant_cpu_to_le16(2);
+ ms_pkt->cmd_dsd_count = cpu_to_le16(1);
+ ms_pkt->total_dsd_count = cpu_to_le16(2);
ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
ms_pkt->req_bytecount = cpu_to_le32(req_size);
@@ -1193,8 +1193,8 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
ct_pkt->entry_count = 1;
ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
- ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
- ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
+ ct_pkt->cmd_dsd_count = cpu_to_le16(1);
+ ct_pkt->rsp_dsd_count = cpu_to_le16(1);
ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
@@ -1281,19 +1281,19 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
/* Prepare FDMI command arguments -- attribute block, attributes. */
memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
- ct_req->req.rhba.entry_count = __constant_cpu_to_be32(1);
+ ct_req->req.rhba.entry_count = cpu_to_be32(1);
memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
size = 2 * WWN_SIZE + 4 + 4;
/* Attributes */
ct_req->req.rhba.attrs.count =
- __constant_cpu_to_be32(FDMI_HBA_ATTR_COUNT);
+ cpu_to_be32(FDMI_HBA_ATTR_COUNT);
entries = ct_req->req.rhba.hba_identifier;
/* Nodename. */
eiter = entries + size;
- eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME);
- eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE);
+ eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
+ eiter->len = cpu_to_be16(4 + WWN_SIZE);
memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
size += 4 + WWN_SIZE;
@@ -1302,7 +1302,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
/* Manufacturer. */
eiter = entries + size;
- eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER);
+ eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
alen = strlen(QLA2XXX_MANUFACTURER);
snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
"%s", "QLogic Corporation");
@@ -1315,7 +1315,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
/* Serial number. */
eiter = entries + size;
- eiter->type = __constant_cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
+ eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
if (IS_FWI2_CAPABLE(ha))
qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
sizeof(eiter->a.serial_num));
@@ -1335,7 +1335,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
/* Model name. */
eiter = entries + size;
- eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL);
+ eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
snprintf(eiter->a.model, sizeof(eiter->a.model),
"%s", ha->model_number);
alen = strlen(eiter->a.model);
@@ -1348,7 +1348,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
/* Model description. */
eiter = entries + size;
- eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
+ eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
"%s", ha->model_desc);
alen = strlen(eiter->a.model_desc);
@@ -1361,7 +1361,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
/* Hardware version. */
eiter = entries + size;
- eiter->type = __constant_cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
+ eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
if (!IS_FWI2_CAPABLE(ha)) {
snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
"HW:%s", ha->adapter_id);
@@ -1385,7 +1385,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
/* Driver version. */
eiter = entries + size;
- eiter->type = __constant_cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
+ eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
"%s", qla2x00_version_str);
alen = strlen(eiter->a.driver_version);
@@ -1398,7 +1398,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
/* Option ROM version. */
eiter = entries + size;
- eiter->type = __constant_cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
+ eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
"%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
alen = strlen(eiter->a.orom_version);
@@ -1411,7 +1411,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
/* Firmware version */
eiter = entries + size;
- eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
+ eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
sizeof(eiter->a.fw_version));
alen = strlen(eiter->a.fw_version);
@@ -2484,8 +2484,8 @@ qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *vha, uint32_t req_size,
ct_pkt->entry_count = 1;
ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
- ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
- ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
+ ct_pkt->cmd_dsd_count = cpu_to_le16(1);
+ ct_pkt->rsp_dsd_count = cpu_to_le16(1);
ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 664013115c9d..16a1935cc9c1 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -115,6 +115,8 @@ qla2x00_async_iocb_timeout(void *data)
QLA_LOGIO_LOGIN_RETRIED : 0;
qla2x00_post_async_login_done_work(fcport->vha, fcport,
lio->u.logio.data);
+ } else if (sp->type == SRB_LOGOUT_CMD) {
+ qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
}
}
@@ -497,7 +499,10 @@ void
qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
+ /* Don't re-login in target mode */
+ if (!fcport->tgt_session)
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
+ qlt_logo_completion_handler(fcport, data[0]);
return;
}
@@ -1127,7 +1132,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
unsigned long flags = 0;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t cnt, d2;
+ uint32_t cnt;
uint16_t wd;
static int abts_cnt; /* ISP abort retry counts */
int rval = QLA_SUCCESS;
@@ -1159,7 +1164,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
udelay(100);
/* Wait for firmware to complete NVRAM accesses. */
- d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
+ RD_REG_WORD(&reg->mailbox0);
for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) {
barrier();
@@ -1178,7 +1183,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
RD_REG_DWORD(&reg->mailbox0));
/* Wait for soft-reset to complete. */
- d2 = RD_REG_DWORD(&reg->ctrl_status);
+ RD_REG_DWORD(&reg->ctrl_status);
for (cnt = 0; cnt < 6000000; cnt++) {
barrier();
if ((RD_REG_DWORD(&reg->ctrl_status) &
@@ -1221,7 +1226,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
RD_REG_DWORD(&reg->hccr);
- d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
+ RD_REG_WORD(&reg->mailbox0);
for (cnt = 6000000; RD_REG_WORD(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) {
barrier();
@@ -1272,16 +1277,19 @@ qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
static void
qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
{
- struct qla_hw_data *ha = vha->hw;
uint32_t wd32 = 0;
uint delta_msec = 100;
uint elapsed_msec = 0;
uint timeout_msec;
ulong n;
- if (!IS_QLA25XX(ha) && !IS_QLA2031(ha))
+ if (vha->hw->pdev->subsystem_device != 0x0175 &&
+ vha->hw->pdev->subsystem_device != 0x0240)
return;
+ WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
+ udelay(100);
+
attempt:
timeout_msec = TIMEOUT_SEMAPHORE;
n = timeout_msec / delta_msec;
@@ -1538,7 +1546,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
mem_size = (ha->fw_memory_size - 0x11000 + 1) *
sizeof(uint16_t);
} else if (IS_FWI2_CAPABLE(ha)) {
- if (IS_QLA83XX(ha))
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
else if (IS_QLA81XX(ha))
fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
@@ -1550,7 +1558,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
mem_size = (ha->fw_memory_size - 0x100000 + 1) *
sizeof(uint32_t);
if (ha->mqenable) {
- if (!IS_QLA83XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
mq_size = sizeof(struct qla2xxx_mq_chain);
/*
* Allocate maximum buffer size for all queues.
@@ -1685,7 +1693,7 @@ allocate:
ha->fw_dump->signature[1] = 'L';
ha->fw_dump->signature[2] = 'G';
ha->fw_dump->signature[3] = 'C';
- ha->fw_dump->version = __constant_htonl(1);
+ ha->fw_dump->version = htonl(1);
ha->fw_dump->fixed_size = htonl(fixed_size);
ha->fw_dump->mem_size = htonl(mem_size);
@@ -2065,8 +2073,8 @@ qla2x00_config_rings(struct scsi_qla_host *vha)
struct rsp_que *rsp = ha->rsp_q_map[0];
/* Setup ring parameters in initialization control block. */
- ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0);
- ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0);
+ ha->init_cb->request_q_outpointer = cpu_to_le16(0);
+ ha->init_cb->response_q_inpointer = cpu_to_le16(0);
ha->init_cb->request_q_length = cpu_to_le16(req->length);
ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
@@ -2085,7 +2093,7 @@ void
qla24xx_config_rings(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
- device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0);
+ device_reg_t *reg = ISP_QUE_REG(ha, 0);
struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
struct qla_msix_entry *msix;
struct init_cb_24xx *icb;
@@ -2095,8 +2103,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
/* Setup ring parameters in initialization control block. */
icb = (struct init_cb_24xx *)ha->init_cb;
- icb->request_q_outpointer = __constant_cpu_to_le16(0);
- icb->response_q_inpointer = __constant_cpu_to_le16(0);
+ icb->request_q_outpointer = cpu_to_le16(0);
+ icb->response_q_inpointer = cpu_to_le16(0);
icb->request_q_length = cpu_to_le16(req->length);
icb->response_q_length = cpu_to_le16(rsp->length);
icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
@@ -2105,18 +2113,17 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
/* Setup ATIO queue dma pointers for target mode */
- icb->atio_q_inpointer = __constant_cpu_to_le16(0);
+ icb->atio_q_inpointer = cpu_to_le16(0);
icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
if (IS_SHADOW_REG_CAPABLE(ha))
- icb->firmware_options_2 |=
- __constant_cpu_to_le32(BIT_30|BIT_29);
+ icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
- icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
- icb->rid = __constant_cpu_to_le16(rid);
+ icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
+ icb->rid = cpu_to_le16(rid);
if (ha->flags.msix_enabled) {
msix = &ha->msix_entries[1];
ql_dbg(ql_dbg_init, vha, 0x00fd,
@@ -2126,26 +2133,22 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
}
/* Use alternate PCI bus number */
if (MSB(rid))
- icb->firmware_options_2 |=
- __constant_cpu_to_le32(BIT_19);
+ icb->firmware_options_2 |= cpu_to_le32(BIT_19);
/* Use alternate PCI devfn */
if (LSB(rid))
- icb->firmware_options_2 |=
- __constant_cpu_to_le32(BIT_18);
+ icb->firmware_options_2 |= cpu_to_le32(BIT_18);
/* Use Disable MSIX Handshake mode for capable adapters */
if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
(ha->flags.msix_enabled)) {
- icb->firmware_options_2 &=
- __constant_cpu_to_le32(~BIT_22);
+ icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
ha->flags.disable_msix_handshake = 1;
ql_dbg(ql_dbg_init, vha, 0x00fe,
"MSIX Handshake Disable Mode turned on.\n");
} else {
- icb->firmware_options_2 |=
- __constant_cpu_to_le32(BIT_22);
+ icb->firmware_options_2 |= cpu_to_le32(BIT_22);
}
- icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
+ icb->firmware_options_2 |= cpu_to_le32(BIT_23);
WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
@@ -2243,7 +2246,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
}
if (IS_FWI2_CAPABLE(ha)) {
- mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
+ mid_init_cb->options = cpu_to_le16(BIT_1);
mid_init_cb->init_cb.execution_throttle =
cpu_to_le16(ha->fw_xcb_count);
/* D-Port Status */
@@ -2672,8 +2675,8 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
nv->frame_payload_size = 1024;
}
- nv->max_iocb_allocation = __constant_cpu_to_le16(256);
- nv->execution_throttle = __constant_cpu_to_le16(16);
+ nv->max_iocb_allocation = cpu_to_le16(256);
+ nv->execution_throttle = cpu_to_le16(16);
nv->retry_count = 8;
nv->retry_delay = 1;
@@ -2691,7 +2694,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
nv->host_p[1] = BIT_2;
nv->reset_delay = 5;
nv->port_down_retry_count = 8;
- nv->max_luns_per_target = __constant_cpu_to_le16(8);
+ nv->max_luns_per_target = cpu_to_le16(8);
nv->link_down_timeout = 60;
rval = 1;
@@ -2819,7 +2822,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
memcpy(vha->node_name, icb->node_name, WWN_SIZE);
memcpy(vha->port_name, icb->port_name, WWN_SIZE);
- icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
+ icb->execution_throttle = cpu_to_le16(0xFFFF);
ha->retry_count = nv->retry_count;
@@ -2871,10 +2874,10 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
if (ql2xloginretrycount)
ha->login_retry_count = ql2xloginretrycount;
- icb->lun_enables = __constant_cpu_to_le16(0);
+ icb->lun_enables = cpu_to_le16(0);
icb->command_resource_count = 0;
icb->immediate_notify_resource_count = 0;
- icb->timeout = __constant_cpu_to_le16(0);
+ icb->timeout = cpu_to_le16(0);
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
/* Enable RIO */
@@ -2922,21 +2925,14 @@ qla2x00_rport_del(void *data)
{
fc_port_t *fcport = data;
struct fc_rport *rport;
- scsi_qla_host_t *vha = fcport->vha;
unsigned long flags;
spin_lock_irqsave(fcport->vha->host->host_lock, flags);
rport = fcport->drport ? fcport->drport: fcport->rport;
fcport->drport = NULL;
spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
- if (rport) {
+ if (rport)
fc_remote_port_delete(rport);
- /*
- * Release the target mode FC NEXUS in qla_target.c code
- * if target mod is enabled.
- */
- qlt_fc_port_deleted(vha, fcport);
- }
}
/**
@@ -3303,6 +3299,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
* Create target mode FC NEXUS in qla_target.c if target mode is
* enabled..
*/
+
qlt_fc_port_added(vha, fcport);
spin_lock_irqsave(fcport->vha->host->host_lock, flags);
@@ -3341,8 +3338,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
if (IS_QLAFX00(vha->hw)) {
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
- qla2x00_reg_remote_port(vha, fcport);
- return;
+ goto reg_port;
}
fcport->login_retry = 0;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
@@ -3350,7 +3346,16 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
qla2x00_iidma_fcport(vha, fcport);
qla24xx_update_fcport_fcp_prio(vha, fcport);
- qla2x00_reg_remote_port(vha, fcport);
+
+reg_port:
+ if (qla_ini_mode_enabled(vha))
+ qla2x00_reg_remote_port(vha, fcport);
+ else {
+ /*
+ * Create target mode FC NEXUS in qla_target.c
+ */
+ qlt_fc_port_added(vha, fcport);
+ }
}
/*
@@ -3375,6 +3380,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
LIST_HEAD(new_fcports);
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ int discovery_gen;
/* If FL port exists, then SNS is present */
if (IS_FWI2_CAPABLE(ha))
@@ -3445,6 +3451,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
fcport->scan_state = QLA_FCPORT_SCAN;
}
+ /* Mark the time right before querying FW for connected ports.
+ * This process is long, asynchronous and by the time it's done,
+ * collected information might not be accurate anymore. E.g.
+ * disconnected port might have re-connected and a brand new
+ * session has been created. In this case session's generation
+ * will be newer than discovery_gen. */
+ qlt_do_generation_tick(vha, &discovery_gen);
+
rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
if (rval != QLA_SUCCESS)
break;
@@ -3460,20 +3474,44 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
continue;
- if (fcport->scan_state == QLA_FCPORT_SCAN &&
- atomic_read(&fcport->state) == FCS_ONLINE) {
- qla2x00_mark_device_lost(vha, fcport,
- ql2xplogiabsentdevice, 0);
- if (fcport->loop_id != FC_NO_LOOP_ID &&
- (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
- fcport->port_type != FCT_INITIATOR &&
- fcport->port_type != FCT_BROADCAST) {
- ha->isp_ops->fabric_logout(vha,
- fcport->loop_id,
- fcport->d_id.b.domain,
- fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
- qla2x00_clear_loop_id(fcport);
+ if (fcport->scan_state == QLA_FCPORT_SCAN) {
+ if (qla_ini_mode_enabled(base_vha) &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
+ qla2x00_mark_device_lost(vha, fcport,
+ ql2xplogiabsentdevice, 0);
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
+ (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
+ fcport->port_type != FCT_INITIATOR &&
+ fcport->port_type != FCT_BROADCAST) {
+ ha->isp_ops->fabric_logout(vha,
+ fcport->loop_id,
+ fcport->d_id.b.domain,
+ fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
+ qla2x00_clear_loop_id(fcport);
+ }
+ } else if (!qla_ini_mode_enabled(base_vha)) {
+ /*
+ * In target mode, explicitly kill
+ * sessions and log out of devices
+ * that are gone, so that we don't
+ * end up with an initiator using the
+ * wrong ACL (if the fabric recycles
+ * an FC address and we have a stale
+ * session around) and so that we don't
+ * report initiators that are no longer
+ * on the fabric.
+ */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077,
+ "port gone, logging out/killing session: "
+ "%8phC state 0x%x flags 0x%x fc4_type 0x%x "
+ "scan_state %d\n",
+ fcport->port_name,
+ atomic_read(&fcport->state),
+ fcport->flags, fcport->fc4_type,
+ fcport->scan_state);
+ qlt_fc_port_deleted(vha, fcport,
+ discovery_gen);
}
}
}
@@ -3494,6 +3532,28 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
(fcport->flags & FCF_LOGIN_NEEDED) == 0)
continue;
+ /*
+ * If we're not an initiator, skip looking for devices
+ * and logging in. There's no reason for us to do it,
+ * and it seems to actively cause problems in target
+ * mode if we race with the initiator logging into us
+ * (we might get the "port ID used" status back from
+ * our login command and log out the initiator, which
+ * seems to cause havoc).
+ */
+ if (!qla_ini_mode_enabled(base_vha)) {
+ if (fcport->scan_state == QLA_FCPORT_FOUND) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078,
+ "port %8phC state 0x%x flags 0x%x fc4_type 0x%x "
+ "scan_state %d (initiator mode disabled; skipping "
+ "login)\n", fcport->port_name,
+ atomic_read(&fcport->state),
+ fcport->flags, fcport->fc4_type,
+ fcport->scan_state);
+ }
+ continue;
+ }
+
if (fcport->loop_id == FC_NO_LOOP_ID) {
fcport->loop_id = next_loopid;
rval = qla2x00_find_new_loop_id(
@@ -3520,16 +3580,38 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
- /* Find a new loop ID to use. */
- fcport->loop_id = next_loopid;
- rval = qla2x00_find_new_loop_id(base_vha, fcport);
- if (rval != QLA_SUCCESS) {
- /* Ran out of IDs to use */
- break;
- }
+ /*
+ * If we're not an initiator, skip looking for devices
+ * and logging in. There's no reason for us to do it,
+ * and it seems to actively cause problems in target
+ * mode if we race with the initiator logging into us
+ * (we might get the "port ID used" status back from
+ * our login command and log out the initiator, which
+ * seems to cause havoc).
+ */
+ if (qla_ini_mode_enabled(base_vha)) {
+ /* Find a new loop ID to use. */
+ fcport->loop_id = next_loopid;
+ rval = qla2x00_find_new_loop_id(base_vha,
+ fcport);
+ if (rval != QLA_SUCCESS) {
+ /* Ran out of IDs to use */
+ break;
+ }
- /* Login and update database */
- qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+ /* Login and update database */
+ qla2x00_fabric_dev_login(vha, fcport,
+ &next_loopid);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079,
+ "new port %8phC state 0x%x flags 0x%x fc4_type "
+ "0x%x scan_state %d (initiator mode disabled; "
+ "skipping login)\n",
+ fcport->port_name,
+ atomic_read(&fcport->state),
+ fcport->flags, fcport->fc4_type,
+ fcport->scan_state);
+ }
list_move_tail(&fcport->list, &vha->vp_fcports);
}
@@ -3725,11 +3807,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
fcport->fp_speed = new_fcport->fp_speed;
/*
- * If address the same and state FCS_ONLINE, nothing
- * changed.
+ * If address the same and state FCS_ONLINE
+ * (or in target mode), nothing changed.
*/
if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
- atomic_read(&fcport->state) == FCS_ONLINE) {
+ (atomic_read(&fcport->state) == FCS_ONLINE ||
+ !qla_ini_mode_enabled(base_vha))) {
break;
}
@@ -3749,6 +3832,22 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
* Log it out if still logged in and mark it for
* relogin later.
*/
+ if (!qla_ini_mode_enabled(base_vha)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
+ "port changed FC ID, %8phC"
+ " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
+ fcport->port_name,
+ fcport->d_id.b.domain,
+ fcport->d_id.b.area,
+ fcport->d_id.b.al_pa,
+ fcport->loop_id,
+ new_fcport->d_id.b.domain,
+ new_fcport->d_id.b.area,
+ new_fcport->d_id.b.al_pa);
+ fcport->d_id.b24 = new_fcport->d_id.b24;
+ break;
+ }
+
fcport->d_id.b24 = new_fcport->d_id.b24;
fcport->flags |= FCF_LOGIN_NEEDED;
if (fcport->loop_id != FC_NO_LOOP_ID &&
@@ -3768,6 +3867,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
if (found)
continue;
/* If device was not in our fcports list, then add it. */
+ new_fcport->scan_state = QLA_FCPORT_FOUND;
list_add_tail(&new_fcport->list, new_fcports);
/* Allocate a new replacement fcport. */
@@ -3856,12 +3956,10 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
uint16_t *next_loopid)
{
int rval;
- int retry;
uint8_t opts;
struct qla_hw_data *ha = vha->hw;
rval = QLA_SUCCESS;
- retry = 0;
if (IS_ALOGIO_CAPABLE(ha)) {
if (fcport->flags & FCF_ASYNC_SENT)
@@ -4188,6 +4286,14 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
spin_unlock_irqrestore(&ha->vport_slock, flags);
qla2x00_rport_del(fcport);
+
+ /*
+ * Release the target mode FC NEXUS in
+ * qla_target.c, if target mod is enabled.
+ */
+ qlt_fc_port_deleted(vha, fcport,
+ base_vha->total_fcport_update_gen);
+
spin_lock_irqsave(&ha->vport_slock, flags);
}
}
@@ -5007,7 +5113,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
/* Bad NVRAM data, set defaults parameters. */
if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
|| nv->id[3] != ' ' ||
- nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
+ nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
/* Reset NVRAM data. */
ql_log(ql_log_warn, vha, 0x006b,
"Inconsistent NVRAM detected: checksum=0x%x id=%c "
@@ -5020,12 +5126,12 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
* Set default initialization control block.
*/
memset(nv, 0, ha->nvram_size);
- nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
- nv->version = __constant_cpu_to_le16(ICB_VERSION);
+ nv->nvram_version = cpu_to_le16(ICB_VERSION);
+ nv->version = cpu_to_le16(ICB_VERSION);
nv->frame_payload_size = 2048;
- nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
- nv->exchange_count = __constant_cpu_to_le16(0);
- nv->hard_address = __constant_cpu_to_le16(124);
+ nv->execution_throttle = cpu_to_le16(0xFFFF);
+ nv->exchange_count = cpu_to_le16(0);
+ nv->hard_address = cpu_to_le16(124);
nv->port_name[0] = 0x21;
nv->port_name[1] = 0x00 + ha->port_no + 1;
nv->port_name[2] = 0x00;
@@ -5043,29 +5149,29 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
nv->node_name[6] = 0x55;
nv->node_name[7] = 0x86;
qla24xx_nvram_wwn_from_ofw(vha, nv);
- nv->login_retry_count = __constant_cpu_to_le16(8);
- nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
- nv->login_timeout = __constant_cpu_to_le16(0);
+ nv->login_retry_count = cpu_to_le16(8);
+ nv->interrupt_delay_timer = cpu_to_le16(0);
+ nv->login_timeout = cpu_to_le16(0);
nv->firmware_options_1 =
- __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
- nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
- nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
- nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
- nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
- nv->efi_parameters = __constant_cpu_to_le32(0);
+ cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
+ nv->firmware_options_2 = cpu_to_le32(2 << 4);
+ nv->firmware_options_2 |= cpu_to_le32(BIT_12);
+ nv->firmware_options_3 = cpu_to_le32(2 << 13);
+ nv->host_p = cpu_to_le32(BIT_11|BIT_10);
+ nv->efi_parameters = cpu_to_le32(0);
nv->reset_delay = 5;
- nv->max_luns_per_target = __constant_cpu_to_le16(128);
- nv->port_down_retry_count = __constant_cpu_to_le16(30);
- nv->link_down_timeout = __constant_cpu_to_le16(30);
+ nv->max_luns_per_target = cpu_to_le16(128);
+ nv->port_down_retry_count = cpu_to_le16(30);
+ nv->link_down_timeout = cpu_to_le16(30);
rval = 1;
}
if (!qla_ini_mode_enabled(vha)) {
/* Don't enable full login after initial LIP */
- nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+ nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
/* Don't enable LIP full login for initiator */
- nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+ nv->host_p &= cpu_to_le32(~BIT_10);
}
qlt_24xx_config_nvram_stage1(vha, nv);
@@ -5099,14 +5205,14 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
qlt_24xx_config_nvram_stage2(vha, icb);
- if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
+ if (nv->host_p & cpu_to_le32(BIT_15)) {
/* Use alternate WWN? */
memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
}
/* Prepare nodename */
- if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
+ if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
/*
* Firmware will apply the following mask if the nodename was
* not provided.
@@ -5138,7 +5244,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
memcpy(vha->node_name, icb->node_name, WWN_SIZE);
memcpy(vha->port_name, icb->port_name, WWN_SIZE);
- icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
+ icb->execution_throttle = cpu_to_le16(0xFFFF);
ha->retry_count = le16_to_cpu(nv->login_retry_count);
@@ -5146,7 +5252,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
if (le16_to_cpu(nv->login_timeout) < 4)
- nv->login_timeout = __constant_cpu_to_le16(4);
+ nv->login_timeout = cpu_to_le16(4);
ha->login_timeout = le16_to_cpu(nv->login_timeout);
icb->login_timeout = nv->login_timeout;
@@ -5197,7 +5303,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
le16_to_cpu(icb->interrupt_delay_timer): 2;
}
- icb->firmware_options_2 &= __constant_cpu_to_le32(
+ icb->firmware_options_2 &= cpu_to_le32(
~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
vha->flags.process_response_queue = 0;
if (ha->zio_mode != QLA_ZIO_DISABLED) {
@@ -5953,7 +6059,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
/* Bad NVRAM data, set defaults parameters. */
if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
|| nv->id[3] != ' ' ||
- nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
+ nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
/* Reset NVRAM data. */
ql_log(ql_log_info, vha, 0x0073,
"Inconsistent NVRAM detected: checksum=0x%x id=%c "
@@ -5967,11 +6073,11 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
* Set default initialization control block.
*/
memset(nv, 0, ha->nvram_size);
- nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
- nv->version = __constant_cpu_to_le16(ICB_VERSION);
+ nv->nvram_version = cpu_to_le16(ICB_VERSION);
+ nv->version = cpu_to_le16(ICB_VERSION);
nv->frame_payload_size = 2048;
- nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
- nv->exchange_count = __constant_cpu_to_le16(0);
+ nv->execution_throttle = cpu_to_le16(0xFFFF);
+ nv->exchange_count = cpu_to_le16(0);
nv->port_name[0] = 0x21;
nv->port_name[1] = 0x00 + ha->port_no + 1;
nv->port_name[2] = 0x00;
@@ -5988,20 +6094,20 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
nv->node_name[5] = 0x1c;
nv->node_name[6] = 0x55;
nv->node_name[7] = 0x86;
- nv->login_retry_count = __constant_cpu_to_le16(8);
- nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
- nv->login_timeout = __constant_cpu_to_le16(0);
+ nv->login_retry_count = cpu_to_le16(8);
+ nv->interrupt_delay_timer = cpu_to_le16(0);
+ nv->login_timeout = cpu_to_le16(0);
nv->firmware_options_1 =
- __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
- nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
- nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
- nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
- nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
- nv->efi_parameters = __constant_cpu_to_le32(0);
+ cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
+ nv->firmware_options_2 = cpu_to_le32(2 << 4);
+ nv->firmware_options_2 |= cpu_to_le32(BIT_12);
+ nv->firmware_options_3 = cpu_to_le32(2 << 13);
+ nv->host_p = cpu_to_le32(BIT_11|BIT_10);
+ nv->efi_parameters = cpu_to_le32(0);
nv->reset_delay = 5;
- nv->max_luns_per_target = __constant_cpu_to_le16(128);
- nv->port_down_retry_count = __constant_cpu_to_le16(30);
- nv->link_down_timeout = __constant_cpu_to_le16(180);
+ nv->max_luns_per_target = cpu_to_le16(128);
+ nv->port_down_retry_count = cpu_to_le16(30);
+ nv->link_down_timeout = cpu_to_le16(180);
nv->enode_mac[0] = 0x00;
nv->enode_mac[1] = 0xC0;
nv->enode_mac[2] = 0xDD;
@@ -6060,13 +6166,13 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
qlt_81xx_config_nvram_stage2(vha, icb);
/* Use alternate WWN? */
- if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
+ if (nv->host_p & cpu_to_le32(BIT_15)) {
memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
}
/* Prepare nodename */
- if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
+ if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
/*
* Firmware will apply the following mask if the nodename was
* not provided.
@@ -6095,7 +6201,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
memcpy(vha->node_name, icb->node_name, WWN_SIZE);
memcpy(vha->port_name, icb->port_name, WWN_SIZE);
- icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
+ icb->execution_throttle = cpu_to_le16(0xFFFF);
ha->retry_count = le16_to_cpu(nv->login_retry_count);
@@ -6103,7 +6209,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
if (le16_to_cpu(nv->login_timeout) < 4)
- nv->login_timeout = __constant_cpu_to_le16(4);
+ nv->login_timeout = cpu_to_le16(4);
ha->login_timeout = le16_to_cpu(nv->login_timeout);
icb->login_timeout = nv->login_timeout;
@@ -6149,7 +6255,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
/* if not running MSI-X we need handshaking on interrupts */
if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
- icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
+ icb->firmware_options_2 |= cpu_to_le32(BIT_22);
/* Enable ZIO. */
if (!vha->flags.init_done) {
@@ -6158,7 +6264,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
le16_to_cpu(icb->interrupt_delay_timer): 2;
}
- icb->firmware_options_2 &= __constant_cpu_to_le32(
+ icb->firmware_options_2 &= cpu_to_le32(
~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
vha->flags.process_response_queue = 0;
if (ha->zio_mode != QLA_ZIO_DISABLED) {
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 36fbd4c7af8f..c49df34e9b35 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -108,8 +108,7 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
cont_pkt = (cont_entry_t *)req->ring_ptr;
/* Load packet defaults. */
- *((uint32_t *)(&cont_pkt->entry_type)) =
- __constant_cpu_to_le32(CONTINUE_TYPE);
+ *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
return (cont_pkt);
}
@@ -138,8 +137,8 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
/* Load packet defaults. */
*((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
- __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
- __constant_cpu_to_le32(CONTINUE_A64_TYPE);
+ cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
+ cpu_to_le32(CONTINUE_A64_TYPE);
return (cont_pkt);
}
@@ -204,11 +203,11 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
/* Update entry type to indicate Command Type 2 IOCB */
*((uint32_t *)(&cmd_pkt->entry_type)) =
- __constant_cpu_to_le32(COMMAND_TYPE);
+ cpu_to_le32(COMMAND_TYPE);
/* No data transfer */
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
- cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ cmd_pkt->byte_count = cpu_to_le32(0);
return;
}
@@ -261,12 +260,11 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
cmd = GET_CMD_SP(sp);
/* Update entry type to indicate Command Type 3 IOCB */
- *((uint32_t *)(&cmd_pkt->entry_type)) =
- __constant_cpu_to_le32(COMMAND_A64_TYPE);
+ *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
/* No data transfer */
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
- cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ cmd_pkt->byte_count = cpu_to_le32(0);
return;
}
@@ -310,7 +308,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
int
qla2x00_start_scsi(srb_t *sp)
{
- int ret, nseg;
+ int nseg;
unsigned long flags;
scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
@@ -327,7 +325,6 @@ qla2x00_start_scsi(srb_t *sp)
struct rsp_que *rsp;
/* Setup device pointers. */
- ret = 0;
vha = sp->fcport->vha;
ha = vha->hw;
reg = &ha->iobase->isp;
@@ -403,7 +400,7 @@ qla2x00_start_scsi(srb_t *sp)
/* Set target ID and LUN number*/
SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
- cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
+ cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
/* Load SCSI command packet. */
memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
@@ -454,7 +451,7 @@ void
qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
{
struct qla_hw_data *ha = vha->hw;
- device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
+ device_reg_t *reg = ISP_QUE_REG(ha, req->id);
if (IS_P3P_TYPE(ha)) {
qla82xx_start_iocbs(vha);
@@ -597,12 +594,11 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
cmd = GET_CMD_SP(sp);
/* Update entry type to indicate Command Type 3 IOCB */
- *((uint32_t *)(&cmd_pkt->entry_type)) =
- __constant_cpu_to_le32(COMMAND_TYPE_6);
+ *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
/* No data transfer */
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
- cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ cmd_pkt->byte_count = cpu_to_le32(0);
return 0;
}
@@ -611,13 +607,11 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
- cmd_pkt->control_flags =
- __constant_cpu_to_le16(CF_WRITE_DATA);
+ cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
vha->qla_stats.output_bytes += scsi_bufflen(cmd);
vha->qla_stats.output_requests++;
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
- cmd_pkt->control_flags =
- __constant_cpu_to_le16(CF_READ_DATA);
+ cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
vha->qla_stats.input_bytes += scsi_bufflen(cmd);
vha->qla_stats.input_requests++;
}
@@ -680,7 +674,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
*
* Returns the number of dsd list needed to store @dsds.
*/
-inline uint16_t
+static inline uint16_t
qla24xx_calc_dsd_lists(uint16_t dsds)
{
uint16_t dsd_lists = 0;
@@ -700,7 +694,7 @@ qla24xx_calc_dsd_lists(uint16_t dsds)
* @cmd_pkt: Command type 3 IOCB
* @tot_dsds: Total number of segments to transfer
*/
-inline void
+static inline void
qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
uint16_t tot_dsds)
{
@@ -710,32 +704,27 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
struct scsi_cmnd *cmd;
struct scatterlist *sg;
int i;
- struct req_que *req;
cmd = GET_CMD_SP(sp);
/* Update entry type to indicate Command Type 3 IOCB */
- *((uint32_t *)(&cmd_pkt->entry_type)) =
- __constant_cpu_to_le32(COMMAND_TYPE_7);
+ *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
/* No data transfer */
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
- cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ cmd_pkt->byte_count = cpu_to_le32(0);
return;
}
vha = sp->fcport->vha;
- req = vha->req;
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
- cmd_pkt->task_mgmt_flags =
- __constant_cpu_to_le16(TMF_WRITE_DATA);
+ cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
vha->qla_stats.output_bytes += scsi_bufflen(cmd);
vha->qla_stats.output_requests++;
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
- cmd_pkt->task_mgmt_flags =
- __constant_cpu_to_le16(TMF_READ_DATA);
+ cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
vha->qla_stats.input_bytes += scsi_bufflen(cmd);
vha->qla_stats.input_requests++;
}
@@ -809,7 +798,7 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
* match LBA in CDB + N
*/
case SCSI_PROT_DIF_TYPE2:
- pkt->app_tag = __constant_cpu_to_le16(0);
+ pkt->app_tag = cpu_to_le16(0);
pkt->app_tag_mask[0] = 0x0;
pkt->app_tag_mask[1] = 0x0;
@@ -840,7 +829,7 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
case SCSI_PROT_DIF_TYPE1:
pkt->ref_tag = cpu_to_le32((uint32_t)
(0xffffffff & scsi_get_lba(cmd)));
- pkt->app_tag = __constant_cpu_to_le16(0);
+ pkt->app_tag = cpu_to_le16(0);
pkt->app_tag_mask[0] = 0x0;
pkt->app_tag_mask[1] = 0x0;
@@ -933,11 +922,9 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
dma_addr_t sle_dma;
uint32_t sle_dma_len, tot_prot_dma_len = 0;
struct scsi_cmnd *cmd;
- struct scsi_qla_host *vha;
memset(&sgx, 0, sizeof(struct qla2_sgx));
if (sp) {
- vha = sp->fcport->vha;
cmd = GET_CMD_SP(sp);
prot_int = cmd->device->sector_size;
@@ -947,7 +934,6 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
sg_prot = scsi_prot_sglist(cmd);
} else if (tc) {
- vha = tc->vha;
prot_int = tc->blk_sz;
sgx.tot_bytes = tc->bufflen;
sgx.cur_sg = tc->sg;
@@ -1047,15 +1033,12 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
int i;
uint16_t used_dsds = tot_dsds;
struct scsi_cmnd *cmd;
- struct scsi_qla_host *vha;
if (sp) {
cmd = GET_CMD_SP(sp);
sgl = scsi_sglist(cmd);
- vha = sp->fcport->vha;
} else if (tc) {
sgl = tc->sg;
- vha = tc->vha;
} else {
BUG();
return 1;
@@ -1231,7 +1214,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
uint32_t *cur_dsd, *fcp_dl;
scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
- int sgc;
uint32_t total_bytes = 0;
uint32_t data_bytes;
uint32_t dif_bytes;
@@ -1247,10 +1229,8 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
cmd = GET_CMD_SP(sp);
- sgc = 0;
/* Update entry type to indicate Command Type CRC_2 IOCB */
- *((uint32_t *)(&cmd_pkt->entry_type)) =
- __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
+ *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
vha = sp->fcport->vha;
ha = vha->hw;
@@ -1258,7 +1238,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
/* No data transfer */
data_bytes = scsi_bufflen(cmd);
if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
- cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ cmd_pkt->byte_count = cpu_to_le32(0);
return QLA_SUCCESS;
}
@@ -1267,10 +1247,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->control_flags =
- __constant_cpu_to_le16(CF_WRITE_DATA);
+ cpu_to_le16(CF_WRITE_DATA);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->control_flags =
- __constant_cpu_to_le16(CF_READ_DATA);
+ cpu_to_le16(CF_READ_DATA);
}
if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
@@ -1392,7 +1372,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
- crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
+ crc_ctx_pkt->guard_seed = cpu_to_le16(0);
/* Fibre channel byte count */
cmd_pkt->byte_count = cpu_to_le32(total_bytes);
fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
@@ -1400,13 +1380,12 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
*fcp_dl = htonl(total_bytes);
if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
- cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ cmd_pkt->byte_count = cpu_to_le32(0);
return QLA_SUCCESS;
}
/* Walks data segments */
- cmd_pkt->control_flags |=
- __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
+ cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
if (!bundling && tot_prot_dsds) {
if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
@@ -1418,8 +1397,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
if (bundling && tot_prot_dsds) {
/* Walks dif segments */
- cmd_pkt->control_flags |=
- __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
+ cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
tot_prot_dsds, NULL))
@@ -1442,7 +1420,7 @@ crc_queuing_error:
int
qla24xx_start_scsi(srb_t *sp)
{
- int ret, nseg;
+ int nseg;
unsigned long flags;
uint32_t *clr_ptr;
uint32_t index;
@@ -1458,8 +1436,6 @@ qla24xx_start_scsi(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
/* Setup device pointers. */
- ret = 0;
-
qla25xx_set_que(sp, &rsp);
req = vha->req;
@@ -1753,7 +1729,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
cmd_pkt->entry_count = (uint8_t)req_cnt;
/* Specify response queue number where completion should happen */
cmd_pkt->entry_status = (uint8_t) rsp->id;
- cmd_pkt->timeout = __constant_cpu_to_le16(0);
+ cmd_pkt->timeout = cpu_to_le16(0);
wmb();
/* Adjust ring index. */
@@ -1819,7 +1795,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
{
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
- device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
+ device_reg_t *reg = ISP_QUE_REG(ha, req->id);
uint32_t index, handle;
request_t *pkt;
uint16_t cnt, req_cnt;
@@ -1943,6 +1919,9 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags =
cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
+ if (!sp->fcport->tgt_session ||
+ !sp->fcport->tgt_session->keep_nport_handle)
+ logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
@@ -2041,10 +2020,10 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->entry_status = 0;
els_iocb->handle = sp->handle;
els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
+ els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
els_iocb->vp_index = sp->fcport->vha->vp_idx;
els_iocb->sof_type = EST_SOFI3;
- els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+ els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
els_iocb->opcode =
sp->type == SRB_ELS_CMD_RPT ?
@@ -2088,7 +2067,6 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
struct qla_hw_data *ha = vha->hw;
struct fc_bsg_job *bsg_job = sp->u.bsg_job;
int loop_iterartion = 0;
- int cont_iocb_prsnt = 0;
int entry_count = 1;
memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
@@ -2096,13 +2074,13 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
ct_iocb->entry_status = 0;
ct_iocb->handle1 = sp->handle;
SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
- ct_iocb->status = __constant_cpu_to_le16(0);
- ct_iocb->control_flags = __constant_cpu_to_le16(0);
+ ct_iocb->status = cpu_to_le16(0);
+ ct_iocb->control_flags = cpu_to_le16(0);
ct_iocb->timeout = 0;
ct_iocb->cmd_dsd_count =
- __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
+ cpu_to_le16(bsg_job->request_payload.sg_cnt);
ct_iocb->total_dsd_count =
- __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
+ cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
ct_iocb->req_bytecount =
cpu_to_le32(bsg_job->request_payload.payload_len);
ct_iocb->rsp_bytecount =
@@ -2139,7 +2117,6 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
vha->hw->req_q_map[0]);
cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
avail_dsds = 5;
- cont_iocb_prsnt = 1;
entry_count++;
}
@@ -2167,7 +2144,6 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
struct qla_hw_data *ha = vha->hw;
struct fc_bsg_job *bsg_job = sp->u.bsg_job;
int loop_iterartion = 0;
- int cont_iocb_prsnt = 0;
int entry_count = 1;
ct_iocb->entry_type = CT_IOCB_TYPE;
@@ -2177,13 +2153,13 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
ct_iocb->vp_index = sp->fcport->vha->vp_idx;
- ct_iocb->comp_status = __constant_cpu_to_le16(0);
+ ct_iocb->comp_status = cpu_to_le16(0);
ct_iocb->cmd_dsd_count =
- __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
+ cpu_to_le16(bsg_job->request_payload.sg_cnt);
ct_iocb->timeout = 0;
ct_iocb->rsp_dsd_count =
- __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+ cpu_to_le16(bsg_job->reply_payload.sg_cnt);
ct_iocb->rsp_byte_count =
cpu_to_le32(bsg_job->reply_payload.payload_len);
ct_iocb->cmd_byte_count =
@@ -2214,7 +2190,6 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
ha->req_q_map[0]);
cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
avail_dsds = 5;
- cont_iocb_prsnt = 1;
entry_count++;
}
@@ -2237,7 +2212,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
int
qla82xx_start_scsi(srb_t *sp)
{
- int ret, nseg;
+ int nseg;
unsigned long flags;
struct scsi_cmnd *cmd;
uint32_t *clr_ptr;
@@ -2257,7 +2232,6 @@ qla82xx_start_scsi(srb_t *sp)
struct rsp_que *rsp = NULL;
/* Setup device pointers. */
- ret = 0;
reg = &ha->iobase->isp82;
cmd = GET_CMD_SP(sp);
req = vha->req;
@@ -2536,16 +2510,12 @@ sufficient_dsds:
/* write, read and verify logic */
dbval = dbval | (req->id << 8) | (req->ring_index << 16);
if (ql2xdbwr)
- qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
+ qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
else {
- WRT_REG_DWORD(
- (unsigned long __iomem *)ha->nxdb_wr_ptr,
- dbval);
+ WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
wmb();
- while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
- WRT_REG_DWORD(
- (unsigned long __iomem *)ha->nxdb_wr_ptr,
- dbval);
+ while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+ WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
wmb();
}
}
@@ -2679,7 +2649,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
/*Update entry type to indicate bidir command */
*((uint32_t *)(&cmd_pkt->entry_type)) =
- __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
+ cpu_to_le32(COMMAND_BIDIRECTIONAL);
/* Set the transfer direction, in this set both flags
* Also set the BD_WRAP_BACK flag, firmware will take care
@@ -2687,8 +2657,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
*/
cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
- cmd_pkt->control_flags =
- __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
+ cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
BD_WRAP_BACK);
req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 5559d5e75bbf..ccf6a7f99024 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -116,7 +116,7 @@ bool
qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
{
/* Check for PCI disconnection */
- if (reg == 0xffffffff) {
+ if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
!test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
!test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
@@ -560,6 +560,17 @@ qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
return ret;
}
+static inline fc_port_t *
+qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
+{
+ fc_port_t *fcport;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ if (fcport->loop_id == loop_id)
+ return fcport;
+ return NULL;
+}
+
/**
* qla2x00_async_event() - Process aynchronous events.
* @ha: SCSI driver HA context
@@ -575,7 +586,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
- uint32_t rscn_entry, host_pid, tmp_pid;
+ uint32_t rscn_entry, host_pid;
unsigned long flags;
fc_port_t *fcport = NULL;
@@ -897,11 +908,29 @@ skip_rio:
(mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
break;
- /* Global event -- port logout or port unavailable. */
- if (mb[1] == 0xffff && mb[2] == 0x7) {
+ if (mb[2] == 0x7) {
ql_dbg(ql_dbg_async, vha, 0x5010,
- "Port unavailable %04x %04x %04x.\n",
+ "Port %s %04x %04x %04x.\n",
+ mb[1] == 0xffff ? "unavailable" : "logout",
mb[1], mb[2], mb[3]);
+
+ if (mb[1] == 0xffff)
+ goto global_port_update;
+
+ /* Port logout */
+ fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
+ if (!fcport)
+ break;
+ if (atomic_read(&fcport->state) != FCS_ONLINE)
+ break;
+ ql_dbg(ql_dbg_async, vha, 0x508a,
+ "Marking port lost loopid=%04x portid=%06x.\n",
+ fcport->loop_id, fcport->d_id.b24);
+ qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+ break;
+
+global_port_update:
+ /* Port unavailable. */
ql_log(ql_log_warn, vha, 0x505e,
"Link is offline.\n");
@@ -998,7 +1027,6 @@ skip_rio:
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (atomic_read(&fcport->state) != FCS_ONLINE)
continue;
- tmp_pid = fcport->d_id.b24;
if (fcport->d_id.b24 == rscn_entry) {
qla2x00_mark_device_lost(vha, fcport, 0, 0);
break;
@@ -1565,7 +1593,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
"Async-%s error - hdl=%x entry-status(%x).\n",
type, sp->handle, sts->entry_status);
iocb->u.tmf.data = QLA_FUNCTION_FAILED;
- } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+ } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
ql_log(ql_log_warn, fcport->vha, 0x5039,
"Async-%s error - hdl=%x completion status(%x).\n",
type, sp->handle, sts->comp_status);
@@ -2045,14 +2073,18 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
}
/* Validate handle. */
- if (handle < req->num_outstanding_cmds)
+ if (handle < req->num_outstanding_cmds) {
sp = req->outstanding_cmds[handle];
- else
- sp = NULL;
-
- if (sp == NULL) {
+ if (!sp) {
+ ql_dbg(ql_dbg_io, vha, 0x3075,
+ "%s(%ld): Already returned command for status handle (0x%x).\n",
+ __func__, vha->host_no, sts->handle);
+ return;
+ }
+ } else {
ql_dbg(ql_dbg_io, vha, 0x3017,
- "Invalid status handle (0x%x).\n", sts->handle);
+ "Invalid status handle, out of range (0x%x).\n",
+ sts->handle);
if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
if (IS_P3P_TYPE(ha))
@@ -2339,12 +2371,12 @@ out:
ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
"FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
"portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
- "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
+ "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
comp_status, scsi_status, res, vha->host_no,
cp->device->id, cp->device->lun, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
cp->cmnd, scsi_bufflen(cp), rsp_info_len,
- resid_len, fw_resid_len);
+ resid_len, fw_resid_len, sp, cp);
if (rsp->status_srb == NULL)
sp->done(ha, sp, res);
@@ -2441,13 +2473,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
}
fatal:
ql_log(ql_log_warn, vha, 0x5030,
- "Error entry - invalid handle/queue.\n");
-
- if (IS_P3P_TYPE(ha))
- set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
- else
- set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
+ "Error entry - invalid handle/queue (%04x).\n", que);
}
/**
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 02b1c1c5355b..cb11e04be568 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -555,7 +555,9 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
if (IS_FWI2_CAPABLE(ha))
mcp->in_mb |= MBX_17|MBX_16|MBX_15;
if (IS_QLA27XX(ha))
- mcp->in_mb |= MBX_21|MBX_20|MBX_19|MBX_18;
+ mcp->in_mb |= MBX_23 | MBX_22 | MBX_21 | MBX_20 | MBX_19 |
+ MBX_18 | MBX_14 | MBX_13 | MBX_11 | MBX_10 | MBX_9 | MBX_8;
+
mcp->flags = 0;
mcp->tov = MBX_TOV_SECONDS;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -571,6 +573,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
else
ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
+
if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
ha->mpi_version[0] = mcp->mb[10] & 0xff;
ha->mpi_version[1] = mcp->mb[11] >> 8;
@@ -580,6 +583,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
ha->phy_version[1] = mcp->mb[9] >> 8;
ha->phy_version[2] = mcp->mb[9] & 0xff;
}
+
if (IS_FWI2_CAPABLE(ha)) {
ha->fw_attributes_h = mcp->mb[15];
ha->fw_attributes_ext[0] = mcp->mb[16];
@@ -591,7 +595,14 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
"%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
__func__, mcp->mb[17], mcp->mb[16]);
}
+
if (IS_QLA27XX(ha)) {
+ ha->mpi_version[0] = mcp->mb[10] & 0xff;
+ ha->mpi_version[1] = mcp->mb[11] >> 8;
+ ha->mpi_version[2] = mcp->mb[11] & 0xff;
+ ha->pep_version[0] = mcp->mb[13] & 0xff;
+ ha->pep_version[1] = mcp->mb[14] >> 8;
+ ha->pep_version[2] = mcp->mb[14] & 0xff;
ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
}
@@ -1135,20 +1146,22 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
}
/* If FA-WWN supported */
- if (mcp->mb[7] & BIT_14) {
- vha->port_name[0] = MSB(mcp->mb[16]);
- vha->port_name[1] = LSB(mcp->mb[16]);
- vha->port_name[2] = MSB(mcp->mb[17]);
- vha->port_name[3] = LSB(mcp->mb[17]);
- vha->port_name[4] = MSB(mcp->mb[18]);
- vha->port_name[5] = LSB(mcp->mb[18]);
- vha->port_name[6] = MSB(mcp->mb[19]);
- vha->port_name[7] = LSB(mcp->mb[19]);
- fc_host_port_name(vha->host) =
- wwn_to_u64(vha->port_name);
- ql_dbg(ql_dbg_mbx, vha, 0x10ca,
- "FA-WWN acquired %016llx\n",
- wwn_to_u64(vha->port_name));
+ if (IS_FAWWN_CAPABLE(vha->hw)) {
+ if (mcp->mb[7] & BIT_14) {
+ vha->port_name[0] = MSB(mcp->mb[16]);
+ vha->port_name[1] = LSB(mcp->mb[16]);
+ vha->port_name[2] = MSB(mcp->mb[17]);
+ vha->port_name[3] = LSB(mcp->mb[17]);
+ vha->port_name[4] = MSB(mcp->mb[18]);
+ vha->port_name[5] = LSB(mcp->mb[18]);
+ vha->port_name[6] = MSB(mcp->mb[19]);
+ vha->port_name[7] = LSB(mcp->mb[19]);
+ fc_host_port_name(vha->host) =
+ wwn_to_u64(vha->port_name);
+ ql_dbg(ql_dbg_mbx, vha, 0x10ca,
+ "FA-WWN acquired %016llx\n",
+ wwn_to_u64(vha->port_name));
+ }
}
}
@@ -1239,7 +1252,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
"Entered %s.\n", __func__);
if (IS_P3P_TYPE(ha) && ql2xdbwr)
- qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
+ qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
(0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
if (ha->flags.npiv_supported)
@@ -1865,7 +1878,6 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
uint32_t iop[2];
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
- struct rsp_que *rsp;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
"Entered %s.\n", __func__);
@@ -1874,7 +1886,6 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
req = ha->req_q_map[0];
else
req = vha->req;
- rsp = req->rsp;
lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
if (lg == NULL) {
@@ -1888,11 +1899,11 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
lg->entry_count = 1;
lg->handle = MAKE_HANDLE(req->id, lg->handle);
lg->nport_handle = cpu_to_le16(loop_id);
- lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI);
+ lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
if (opt & BIT_0)
- lg->control_flags |= __constant_cpu_to_le16(LCF_COND_PLOGI);
+ lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
if (opt & BIT_1)
- lg->control_flags |= __constant_cpu_to_le16(LCF_SKIP_PRLI);
+ lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
lg->port_id[0] = al_pa;
lg->port_id[1] = area;
lg->port_id[2] = domain;
@@ -1907,7 +1918,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
"Failed to complete IOCB -- error status (%x).\n",
lg->entry_status);
rval = QLA_FUNCTION_FAILED;
- } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+ } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
iop[0] = le32_to_cpu(lg->io_parameter[0]);
iop[1] = le32_to_cpu(lg->io_parameter[1]);
@@ -1961,7 +1972,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
mb[10] |= BIT_0; /* Class 2. */
if (lg->io_parameter[9] || lg->io_parameter[10])
mb[10] |= BIT_1; /* Class 3. */
- if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7))
+ if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
mb[10] |= BIT_7; /* Confirmed Completion
* Allowed
*/
@@ -2142,7 +2153,6 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
dma_addr_t lg_dma;
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
- struct rsp_que *rsp;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
"Entered %s.\n", __func__);
@@ -2159,13 +2169,12 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
req = ha->req_q_map[0];
else
req = vha->req;
- rsp = req->rsp;
lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
lg->entry_count = 1;
lg->handle = MAKE_HANDLE(req->id, lg->handle);
lg->nport_handle = cpu_to_le16(loop_id);
lg->control_flags =
- __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
+ cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
LCF_FREE_NPORT);
lg->port_id[0] = al_pa;
lg->port_id[1] = area;
@@ -2181,7 +2190,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
"Failed to complete IOCB -- error status (%x).\n",
lg->entry_status);
rval = QLA_FUNCTION_FAILED;
- } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+ } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
ql_dbg(ql_dbg_mbx, vha, 0x1071,
"Failed to complete IOCB -- completion status (%x) "
"ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
@@ -2415,7 +2424,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
*orig_iocb_cnt = mcp->mb[10];
if (vha->hw->flags.npiv_supported && max_npiv_vports)
*max_npiv_vports = mcp->mb[11];
- if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs)
+ if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) ||
+ IS_QLA27XX(vha->hw)) && max_fcfs)
*max_fcfs = mcp->mb[12];
}
@@ -2672,7 +2682,7 @@ qla24xx_abort_command(srb_t *sp)
"Failed to complete IOCB -- error status (%x).\n",
abt->entry_status);
rval = QLA_FUNCTION_FAILED;
- } else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
+ } else if (abt->nport_handle != cpu_to_le16(0)) {
ql_dbg(ql_dbg_mbx, vha, 0x1090,
"Failed to complete IOCB -- completion status (%x).\n",
le16_to_cpu(abt->nport_handle));
@@ -2755,8 +2765,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
"Failed to complete IOCB -- error status (%x).\n",
sts->entry_status);
rval = QLA_FUNCTION_FAILED;
- } else if (sts->comp_status !=
- __constant_cpu_to_le16(CS_COMPLETE)) {
+ } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
ql_dbg(ql_dbg_mbx, vha, 0x1096,
"Failed to complete IOCB -- completion status (%x).\n",
le16_to_cpu(sts->comp_status));
@@ -2852,7 +2861,8 @@ qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw))
+ if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
+ !IS_QLA27XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
@@ -2890,7 +2900,8 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw))
+ if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
+ !IS_QLA27XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
@@ -3482,7 +3493,7 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
"Failed to complete IOCB -- error status (%x).\n",
vpmod->comp_status);
rval = QLA_FUNCTION_FAILED;
- } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+ } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
ql_dbg(ql_dbg_mbx, vha, 0x10bf,
"Failed to complete IOCB -- completion status (%x).\n",
le16_to_cpu(vpmod->comp_status));
@@ -3541,7 +3552,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
vce->entry_type = VP_CTRL_IOCB_TYPE;
vce->entry_count = 1;
vce->command = cpu_to_le16(cmd);
- vce->vp_count = __constant_cpu_to_le16(1);
+ vce->vp_count = cpu_to_le16(1);
/* index map in firmware starts with 1; decrement index
* this is ok as we never use index 0
@@ -3561,7 +3572,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
"Failed to complete IOCB -- error status (%x).\n",
vce->entry_status);
rval = QLA_FUNCTION_FAILED;
- } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+ } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
ql_dbg(ql_dbg_mbx, vha, 0x10c5,
"Failed to complet IOCB -- completion status (%x).\n",
le16_to_cpu(vce->comp_status));
@@ -3898,7 +3909,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(rsp->options & BIT_0)) {
WRT_REG_DWORD(rsp->rsp_q_out, 0);
- if (!IS_QLA83XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
WRT_REG_DWORD(rsp->rsp_q_in, 0);
}
@@ -5345,7 +5356,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA83XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index cc94192511cf..c5dd594f6c31 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -371,7 +371,6 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
void
qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
{
- int ret;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *vp;
unsigned long flags = 0;
@@ -392,7 +391,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
- ret = qla2x00_do_dpc_vp(vp);
+ qla2x00_do_dpc_vp(vp);
spin_lock_irqsave(&ha->vport_slock, flags);
atomic_dec(&vp->vref_count);
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 6d190b4b82a0..b5029e543b91 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -862,7 +862,7 @@ qlafx00_config_queues(struct scsi_qla_host *vha)
dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2);
req->length = ha->req_que_len;
- req->ring = (void *)ha->iobase + ha->req_que_off;
+ req->ring = (void __force *)ha->iobase + ha->req_que_off;
req->dma = bar2_hdl + ha->req_que_off;
if ((!req->ring) || (req->length == 0)) {
ql_log_pci(ql_log_info, ha->pdev, 0x012f,
@@ -877,7 +877,7 @@ qlafx00_config_queues(struct scsi_qla_host *vha)
ha->req_que_off, (u64)req->dma);
rsp->length = ha->rsp_que_len;
- rsp->ring = (void *)ha->iobase + ha->rsp_que_off;
+ rsp->ring = (void __force *)ha->iobase + ha->rsp_que_off;
rsp->dma = bar2_hdl + ha->rsp_que_off;
if ((!rsp->ring) || (rsp->length == 0)) {
ql_log_pci(ql_log_info, ha->pdev, 0x0131,
@@ -1317,10 +1317,10 @@ int
qlafx00_configure_devices(scsi_qla_host_t *vha)
{
int rval;
- unsigned long flags, save_flags;
+ unsigned long flags;
rval = QLA_SUCCESS;
- save_flags = flags = vha->dpc_flags;
+ flags = vha->dpc_flags;
ql_dbg(ql_dbg_disc, vha, 0x2090,
"Configure devices -- dpc flags =0x%lx\n", flags);
@@ -1425,7 +1425,7 @@ qlafx00_init_response_q_entries(struct rsp_que *rsp)
pkt = rsp->ring_ptr;
for (cnt = 0; cnt < rsp->length; cnt++) {
pkt->signature = RESPONSE_PROCESSED;
- WRT_REG_DWORD((void __iomem *)&pkt->signature,
+ WRT_REG_DWORD((void __force __iomem *)&pkt->signature,
RESPONSE_PROCESSED);
pkt++;
}
@@ -2279,7 +2279,6 @@ qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
struct sts_entry_fx00 *sts;
__le16 comp_status;
__le16 scsi_status;
- uint16_t ox_id;
__le16 lscsi_status;
int32_t resid;
uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
@@ -2344,7 +2343,6 @@ qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
fcport = sp->fcport;
- ox_id = 0;
sense_len = par_sense_len = rsp_info_len = resid_len =
fw_resid_len = 0;
if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))
@@ -2528,12 +2526,12 @@ check_scsi_status:
ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
"FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
"tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x "
- "rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, "
+ "rsp_info=%p resid=0x%x fw_resid=0x%x sense_len=0x%x, "
"par_sense_len=0x%x, rsp_info_len=0x%x\n",
comp_status, scsi_status, res, vha->host_no,
cp->device->id, cp->device->lun, fcport->tgt_id,
lscsi_status, cp->cmnd, scsi_bufflen(cp),
- rsp_info_len, resid_len, fw_resid_len, sense_len,
+ rsp_info, resid_len, fw_resid_len, sense_len,
par_sense_len, rsp_info_len);
if (rsp->status_srb == NULL)
@@ -3009,7 +3007,7 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
/* No data transfer */
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
- lcmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ lcmd_pkt->byte_count = cpu_to_le32(0);
return;
}
@@ -3071,7 +3069,7 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
int
qlafx00_start_scsi(srb_t *sp)
{
- int ret, nseg;
+ int nseg;
unsigned long flags;
uint32_t index;
uint32_t handle;
@@ -3088,8 +3086,6 @@ qlafx00_start_scsi(srb_t *sp)
struct scsi_lun llun;
/* Setup device pointers. */
- ret = 0;
-
rsp = ha->rsp_q_map[0];
req = vha->req;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 1620b0ec977b..eb0cc5475c45 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -347,32 +347,31 @@ char *qdev_state(uint32_t dev_state)
}
/*
- * In: 'off' is offset from CRB space in 128M pci map
- * Out: 'off' is 2M pci map addr
+ * In: 'off_in' is offset from CRB space in 128M pci map
+ * Out: 'off_out' is 2M pci map addr
* side effect: lock crb window
*/
static void
-qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
+qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in,
+ void __iomem **off_out)
{
u32 win_read;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
- ha->crb_win = CRB_HI(*off);
- writel(ha->crb_win,
- (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ ha->crb_win = CRB_HI(off_in);
+ writel(ha->crb_win, CRB_WINDOW_2M + ha->nx_pcibase);
/* Read back value to make sure write has gone through before trying
* to use it.
*/
- win_read = RD_REG_DWORD((void __iomem *)
- (CRB_WINDOW_2M + ha->nx_pcibase));
+ win_read = RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase);
if (win_read != ha->crb_win) {
ql_dbg(ql_dbg_p3p, vha, 0xb000,
"%s: Written crbwin (0x%x) "
"!= Read crbwin (0x%x), off=0x%lx.\n",
- __func__, ha->crb_win, win_read, *off);
+ __func__, ha->crb_win, win_read, off_in);
}
- *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
+ *off_out = (off_in & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
}
static inline unsigned long
@@ -417,29 +416,30 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
}
static int
-qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
+qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
+ void __iomem **off_out)
{
struct crb_128M_2M_sub_block_map *m;
- if (*off >= QLA82XX_CRB_MAX)
+ if (off_in >= QLA82XX_CRB_MAX)
return -1;
- if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
- *off = (*off - QLA82XX_PCI_CAMQM) +
+ if (off_in >= QLA82XX_PCI_CAMQM && off_in < QLA82XX_PCI_CAMQM_2M_END) {
+ *off_out = (off_in - QLA82XX_PCI_CAMQM) +
QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
return 0;
}
- if (*off < QLA82XX_PCI_CRBSPACE)
+ if (off_in < QLA82XX_PCI_CRBSPACE)
return -1;
- *off -= QLA82XX_PCI_CRBSPACE;
+ *off_out = (void __iomem *)(off_in - QLA82XX_PCI_CRBSPACE);
/* Try direct map */
- m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
+ m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)];
- if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
- *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
+ if (m->valid && (m->start_128M <= off_in) && (m->end_128M > off_in)) {
+ *off_out = off_in + m->start_2M - m->start_128M + ha->nx_pcibase;
return 0;
}
/* Not in direct map, use crb window */
@@ -465,51 +465,61 @@ static int qla82xx_crb_win_lock(struct qla_hw_data *ha)
}
int
-qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data)
+qla82xx_wr_32(struct qla_hw_data *ha, ulong off_in, u32 data)
{
+ void __iomem *off;
unsigned long flags = 0;
int rv;
- rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
+ rv = qla82xx_pci_get_crb_addr_2M(ha, off_in, &off);
BUG_ON(rv == -1);
if (rv == 1) {
+#ifndef __CHECKER__
write_lock_irqsave(&ha->hw_lock, flags);
+#endif
qla82xx_crb_win_lock(ha);
- qla82xx_pci_set_crbwindow_2M(ha, &off);
+ qla82xx_pci_set_crbwindow_2M(ha, off_in, &off);
}
writel(data, (void __iomem *)off);
if (rv == 1) {
qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
+#ifndef __CHECKER__
write_unlock_irqrestore(&ha->hw_lock, flags);
+#endif
}
return 0;
}
int
-qla82xx_rd_32(struct qla_hw_data *ha, ulong off)
+qla82xx_rd_32(struct qla_hw_data *ha, ulong off_in)
{
+ void __iomem *off;
unsigned long flags = 0;
int rv;
u32 data;
- rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
+ rv = qla82xx_pci_get_crb_addr_2M(ha, off_in, &off);
BUG_ON(rv == -1);
if (rv == 1) {
+#ifndef __CHECKER__
write_lock_irqsave(&ha->hw_lock, flags);
+#endif
qla82xx_crb_win_lock(ha);
- qla82xx_pci_set_crbwindow_2M(ha, &off);
+ qla82xx_pci_set_crbwindow_2M(ha, off_in, &off);
}
- data = RD_REG_DWORD((void __iomem *)off);
+ data = RD_REG_DWORD(off);
if (rv == 1) {
qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
+#ifndef __CHECKER__
write_unlock_irqrestore(&ha->hw_lock, flags);
+#endif
}
return data;
}
@@ -547,9 +557,6 @@ void qla82xx_idc_unlock(struct qla_hw_data *ha)
qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
}
-/* PCI Windowing for DDR regions. */
-#define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
- (((addr) <= (high)) && ((addr) >= (low)))
/*
* check memory access boundary.
* used by test agent. support ddr access only for now
@@ -558,9 +565,9 @@ static unsigned long
qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
unsigned long long addr, int size)
{
- if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
+ if (!addr_in_range(addr, QLA82XX_ADDR_DDR_NET,
QLA82XX_ADDR_DDR_NET_MAX) ||
- !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET,
+ !addr_in_range(addr + size - 1, QLA82XX_ADDR_DDR_NET,
QLA82XX_ADDR_DDR_NET_MAX) ||
((size != 1) && (size != 2) && (size != 4) && (size != 8)))
return 0;
@@ -577,7 +584,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
u32 win_read;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
- if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
+ if (addr_in_range(addr, QLA82XX_ADDR_DDR_NET,
QLA82XX_ADDR_DDR_NET_MAX)) {
/* DDR network side */
window = MN_WIN(addr);
@@ -592,7 +599,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
__func__, window, win_read);
}
addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
- } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
+ } else if (addr_in_range(addr, QLA82XX_ADDR_OCM0,
QLA82XX_ADDR_OCM0_MAX)) {
unsigned int temp1;
if ((addr & 0x00ff800) == 0xff800) {
@@ -615,7 +622,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
}
addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
- } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
+ } else if (addr_in_range(addr, QLA82XX_ADDR_QDR_NET,
QLA82XX_P3_ADDR_QDR_NET_MAX)) {
/* QDR network side */
window = MS_WIN(addr);
@@ -656,16 +663,16 @@ static int qla82xx_pci_is_same_window(struct qla_hw_data *ha,
qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
/* DDR network side */
- if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
+ if (addr_in_range(addr, QLA82XX_ADDR_DDR_NET,
QLA82XX_ADDR_DDR_NET_MAX))
BUG();
- else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
+ else if (addr_in_range(addr, QLA82XX_ADDR_OCM0,
QLA82XX_ADDR_OCM0_MAX))
return 1;
- else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1,
+ else if (addr_in_range(addr, QLA82XX_ADDR_OCM1,
QLA82XX_ADDR_OCM1_MAX))
return 1;
- else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
+ else if (addr_in_range(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
/* QDR network side */
window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
if (ha->qdr_sn_window == window)
@@ -922,20 +929,18 @@ qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
{
uint32_t off_value, rval = 0;
- WRT_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase),
- (off & 0xFFFF0000));
+ WRT_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000);
/* Read back value to make sure write has gone through */
- RD_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase);
off_value = (off & 0x0000FFFF);
if (flag)
- WRT_REG_DWORD((void __iomem *)
- (off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
- data);
+ WRT_REG_DWORD(off_value + CRB_INDIRECT_2M + ha->nx_pcibase,
+ data);
else
- rval = RD_REG_DWORD((void __iomem *)
- (off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
+ rval = RD_REG_DWORD(off_value + CRB_INDIRECT_2M +
+ ha->nx_pcibase);
return rval;
}
@@ -1663,8 +1668,7 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
}
len = pci_resource_len(ha->pdev, 0);
- ha->nx_pcibase =
- (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
+ ha->nx_pcibase = ioremap(pci_resource_start(ha->pdev, 0), len);
if (!ha->nx_pcibase) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x000e,
"Cannot remap pcibase MMIO, aborting.\n");
@@ -1673,17 +1677,13 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
/* Mapping of IO base pointer */
if (IS_QLA8044(ha)) {
- ha->iobase =
- (device_reg_t *)((uint8_t *)ha->nx_pcibase);
+ ha->iobase = ha->nx_pcibase;
} else if (IS_QLA82XX(ha)) {
- ha->iobase =
- (device_reg_t *)((uint8_t *)ha->nx_pcibase +
- 0xbc000 + (ha->pdev->devfn << 11));
+ ha->iobase = ha->nx_pcibase + 0xbc000 + (ha->pdev->devfn << 11);
}
if (!ql2xdbwr) {
- ha->nxdb_wr_ptr =
- (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
+ ha->nxdb_wr_ptr = ioremap((pci_resource_start(ha->pdev, 4) +
(ha->pdev->devfn << 12)), 4);
if (!ha->nxdb_wr_ptr) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x000f,
@@ -1694,10 +1694,10 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
/* Mapping of IO base pointer,
* door bell read and write pointer
*/
- ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
+ ha->nxdb_rd_ptr = ha->nx_pcibase + (512 * 1024) +
(ha->pdev->devfn * 8);
} else {
- ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ?
+ ha->nxdb_wr_ptr = (void __iomem *)(ha->pdev->devfn == 6 ?
QLA82XX_CAMRAM_DB1 :
QLA82XX_CAMRAM_DB2);
}
@@ -1707,12 +1707,12 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
"nx_pci_base=%p iobase=%p "
"max_req_queues=%d msix_count=%d.\n",
- (void *)ha->nx_pcibase, ha->iobase,
+ ha->nx_pcibase, ha->iobase,
ha->max_req_queues, ha->msix_count);
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
"nx_pci_base=%p iobase=%p "
"max_req_queues=%d msix_count=%d.\n",
- (void *)ha->nx_pcibase, ha->iobase,
+ ha->nx_pcibase, ha->iobase,
ha->max_req_queues, ha->msix_count);
return 0;
@@ -1740,8 +1740,8 @@ qla82xx_pci_config(scsi_qla_host_t *vha)
ret = pci_set_mwi(ha->pdev);
ha->chip_revision = ha->pdev->revision;
ql_dbg(ql_dbg_init, vha, 0x0043,
- "Chip revision:%d.\n",
- ha->chip_revision);
+ "Chip revision:%d; pci_set_mwi() returned %d.\n",
+ ha->chip_revision, ret);
return 0;
}
@@ -1768,8 +1768,8 @@ void qla82xx_config_rings(struct scsi_qla_host *vha)
/* Setup ring parameters in initialization control block. */
icb = (struct init_cb_81xx *)ha->init_cb;
- icb->request_q_outpointer = __constant_cpu_to_le16(0);
- icb->response_q_inpointer = __constant_cpu_to_le16(0);
+ icb->request_q_outpointer = cpu_to_le16(0);
+ icb->response_q_inpointer = cpu_to_le16(0);
icb->request_q_length = cpu_to_le16(req->length);
icb->response_q_length = cpu_to_le16(rsp->length);
icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
@@ -1777,9 +1777,9 @@ void qla82xx_config_rings(struct scsi_qla_host *vha)
icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
- WRT_REG_DWORD((unsigned long __iomem *)&reg->req_q_out[0], 0);
- WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_in[0], 0);
- WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_out[0], 0);
+ WRT_REG_DWORD(&reg->req_q_out[0], 0);
+ WRT_REG_DWORD(&reg->rsp_q_in[0], 0);
+ WRT_REG_DWORD(&reg->rsp_q_out[0], 0);
}
static int
@@ -2298,7 +2298,7 @@ void qla82xx_init_flags(struct qla_hw_data *ha)
ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
}
-inline void
+static inline void
qla82xx_set_idc_version(scsi_qla_host_t *vha)
{
int idc_ver;
@@ -2481,14 +2481,12 @@ try_blob_fw:
ql_log(ql_log_info, vha, 0x00a5,
"Firmware loaded successfully from binary blob.\n");
return QLA_SUCCESS;
- } else {
- ql_log(ql_log_fatal, vha, 0x00a6,
- "Firmware load failed for binary blob.\n");
- blob->fw = NULL;
- blob = NULL;
- goto fw_load_failed;
}
- return QLA_SUCCESS;
+
+ ql_log(ql_log_fatal, vha, 0x00a6,
+ "Firmware load failed for binary blob.\n");
+ blob->fw = NULL;
+ blob = NULL;
fw_load_failed:
return QLA_FUNCTION_FAILED;
@@ -2549,7 +2547,7 @@ qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
"Do ROM fast read failed.\n");
goto done_read;
}
- dwptr[i] = __constant_cpu_to_le32(val);
+ dwptr[i] = cpu_to_le32(val);
}
done_read:
return dwptr;
@@ -2671,7 +2669,7 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
{
int ret;
uint32_t liter;
- uint32_t sec_mask, rest_addr;
+ uint32_t rest_addr;
dma_addr_t optrom_dma;
void *optrom = NULL;
int page_mode = 0;
@@ -2693,7 +2691,6 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
}
rest_addr = ha->fdt_block_size - 1;
- sec_mask = ~rest_addr;
ret = qla82xx_unprotect_flash(ha);
if (ret) {
@@ -2789,7 +2786,6 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
- struct device_reg_82xx __iomem *reg;
uint32_t dbval;
/* Adjust ring index. */
@@ -2800,18 +2796,16 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha)
} else
req->ring_ptr++;
- reg = &ha->iobase->isp82;
dbval = 0x04 | (ha->portnum << 5);
dbval = dbval | (req->id << 8) | (req->ring_index << 16);
if (ql2xdbwr)
- qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
+ qla82xx_wr_32(ha, (unsigned long)ha->nxdb_wr_ptr, dbval);
else {
- WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
+ WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
wmb();
- while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
- WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr,
- dbval);
+ while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+ WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
wmb();
}
}
@@ -3842,8 +3836,7 @@ qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
loop_cnt = ocm_hdr->op_count;
for (i = 0; i < loop_cnt; i++) {
- r_value = RD_REG_DWORD((void __iomem *)
- (r_addr + ha->nx_pcibase));
+ r_value = RD_REG_DWORD(r_addr + ha->nx_pcibase);
*data_ptr++ = cpu_to_le32(r_value);
r_addr += r_stride;
}
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 000c57e4d033..007192d7bad8 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -462,12 +462,11 @@ qla8044_flash_lock(scsi_qla_host_t *vha)
static void
qla8044_flash_unlock(scsi_qla_host_t *vha)
{
- int ret_val;
struct qla_hw_data *ha = vha->hw;
/* Reading FLASH_UNLOCK register unlocks the Flash */
qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF);
- ret_val = qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK);
+ qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK);
}
@@ -561,7 +560,7 @@ qla8044_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
return buf;
}
-inline int
+static inline int
qla8044_need_reset(struct scsi_qla_host *vha)
{
uint32_t drv_state, drv_active;
@@ -1130,9 +1129,9 @@ qla8044_ms_mem_write_128b(struct scsi_qla_host *vha,
}
for (i = 0; i < count; i++, addr += 16) {
- if (!((QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_QDR_NET,
+ if (!((addr_in_range(addr, QLA8044_ADDR_QDR_NET,
QLA8044_ADDR_QDR_NET_MAX)) ||
- (QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_DDR_NET,
+ (addr_in_range(addr, QLA8044_ADDR_DDR_NET,
QLA8044_ADDR_DDR_NET_MAX)))) {
ret_val = QLA_FUNCTION_FAILED;
goto exit_ms_mem_write_unlock;
@@ -1605,7 +1604,7 @@ qla8044_set_idc_dontreset(struct scsi_qla_host *vha)
qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
}
-inline void
+static inline void
qla8044_set_rst_ready(struct scsi_qla_host *vha)
{
uint32_t drv_state;
@@ -2992,7 +2991,7 @@ qla8044_minidump_process_rddfe(struct scsi_qla_host *vha,
uint32_t addr1, addr2, value, data, temp, wrVal;
uint8_t stride, stride2;
uint16_t count;
- uint32_t poll, mask, data_size, modify_mask;
+ uint32_t poll, mask, modify_mask;
uint32_t wait_count = 0;
uint32_t *data_ptr = *d_ptr;
@@ -3009,7 +3008,6 @@ qla8044_minidump_process_rddfe(struct scsi_qla_host *vha,
poll = rddfe->poll;
mask = rddfe->mask;
modify_mask = rddfe->modify_mask;
- data_size = rddfe->data_size;
addr2 = addr1 + stride;
@@ -3091,7 +3089,7 @@ qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha,
uint8_t stride1, stride2;
uint32_t addr3, addr4, addr5, addr6, addr7;
uint16_t count, loop_cnt;
- uint32_t poll, mask;
+ uint32_t mask;
uint32_t *data_ptr = *d_ptr;
struct qla8044_minidump_entry_rdmdio *rdmdio;
@@ -3105,7 +3103,6 @@ qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha,
stride2 = rdmdio->stride_2;
count = rdmdio->count;
- poll = rdmdio->poll;
mask = rdmdio->mask;
value2 = rdmdio->value_2;
@@ -3164,7 +3161,7 @@ error:
static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha,
struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
{
- uint32_t addr1, addr2, value1, value2, poll, mask, r_value;
+ uint32_t addr1, addr2, value1, value2, poll, r_value;
uint32_t wait_count = 0;
struct qla8044_minidump_entry_pollwr *pollwr_hdr;
@@ -3175,7 +3172,6 @@ static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha,
value2 = pollwr_hdr->value_2;
poll = pollwr_hdr->poll;
- mask = pollwr_hdr->mask;
while (wait_count < poll) {
qla8044_rd_reg_indirect(vha, addr1, &r_value);
diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h
index ada36057d7cd..02fe3c4cdf55 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.h
+++ b/drivers/scsi/qla2xxx/qla_nx2.h
@@ -58,8 +58,10 @@
#define QLA8044_PCI_QDR_NET_MAX ((unsigned long)0x043fffff)
/* PCI Windowing for DDR regions. */
-#define QLA8044_ADDR_IN_RANGE(addr, low, high) \
- (((addr) <= (high)) && ((addr) >= (low)))
+static inline bool addr_in_range(u64 addr, u64 low, u64 high)
+{
+ return addr <= high && addr >= low;
+}
/* Indirectly Mapped Registers */
#define QLA8044_FLASH_SPI_STATUS 0x2808E010
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index a28815b8276f..c2dd17b1d26f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -656,7 +656,7 @@ qla2x00_sp_compl(void *data, void *ptr, int res)
"SP reference-count to ZERO -- sp=%p cmd=%p.\n",
sp, GET_CMD_SP(sp));
if (ql2xextended_error_logging & ql_dbg_io)
- BUG();
+ WARN_ON(atomic_read(&sp->ref_count) == 0);
return;
}
if (!atomic_dec_and_test(&sp->ref_count))
@@ -958,8 +958,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
}
ql_dbg(ql_dbg_taskm, vha, 0x8002,
- "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p\n",
- vha->host_no, id, lun, sp, cmd);
+ "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
+ vha->host_no, id, lun, sp, cmd, sp->handle);
/* Get a reference to the sp and drop the lock.*/
sp_get(sp);
@@ -967,14 +967,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
rval = ha->isp_ops->abort_command(sp);
if (rval) {
- if (rval == QLA_FUNCTION_PARAMETER_ERROR) {
- /*
- * Decrement the ref_count since we can't find the
- * command
- */
- atomic_dec(&sp->ref_count);
+ if (rval == QLA_FUNCTION_PARAMETER_ERROR)
ret = SUCCESS;
- } else
+ else
ret = FAILED;
ql_dbg(ql_dbg_taskm, vha, 0x8003,
@@ -986,12 +981,6 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
}
spin_lock_irqsave(&ha->hardware_lock, flags);
- /*
- * Clear the slot in the oustanding_cmds array if we can't find the
- * command to reclaim the resources.
- */
- if (rval == QLA_FUNCTION_PARAMETER_ERROR)
- vha->req->outstanding_cmds[sp->handle] = NULL;
sp->done(ha, sp, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2219,6 +2208,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
+ case PCI_DEVICE_ID_QLOGIC_ISP2261:
+ ha->device_type |= DT_ISP2261;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
}
if (IS_QLA82XX(ha))
@@ -2296,7 +2292,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
- pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271) {
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261) {
bars = pci_select_bars(pdev, IORESOURCE_MEM);
mem_only = 1;
ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2504,6 +2501,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
ha->gid_list_info_size = 8;
@@ -2973,7 +2971,6 @@ qla2x00_shutdown(struct pci_dev *pdev)
static void
qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
{
- struct Scsi_Host *scsi_host;
scsi_qla_host_t *vha;
unsigned long flags;
@@ -2984,7 +2981,7 @@ qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
BUG_ON(base_vha->list.next == &ha->vp_list);
/* This assumes first entry in ha->vp_list is always base vha */
vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
- scsi_host = scsi_host_get(vha->host);
+ scsi_host_get(vha->host);
spin_unlock_irqrestore(&ha->vport_slock, flags);
mutex_unlock(&ha->vport_lock);
@@ -3229,11 +3226,15 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
spin_lock_irqsave(vha->host->host_lock, flags);
fcport->drport = rport;
spin_unlock_irqrestore(vha->host->host_lock, flags);
+ qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen);
set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
qla2xxx_wake_dpc(base_vha);
} else {
- fc_remote_port_delete(rport);
- qlt_fc_port_deleted(vha, fcport);
+ int now;
+ if (rport)
+ fc_remote_port_delete(rport);
+ qlt_do_generation_tick(vha, &now);
+ qlt_fc_port_deleted(vha, fcport, now);
}
}
@@ -3270,9 +3271,10 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
if (!do_login)
return;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+
if (fcport->login_retry == 0) {
fcport->login_retry = vha->hw->login_retry_count;
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
ql_dbg(ql_dbg_disc, vha, 0x2067,
"Port login retry %8phN, id = 0x%04x retry cnt=%d.\n",
@@ -3763,8 +3765,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->vp_fcports);
INIT_LIST_HEAD(&vha->work_list);
INIT_LIST_HEAD(&vha->list);
+ INIT_LIST_HEAD(&vha->qla_cmd_list);
+ INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
spin_lock_init(&vha->work_lock);
+ spin_lock_init(&vha->cmd_list_lock);
sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
ql_dbg(ql_dbg_init, vha, 0x0041,
@@ -4793,7 +4798,6 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
static int
qla2x00_do_dpc(void *data)
{
- int rval;
scsi_qla_host_t *base_vha;
struct qla_hw_data *ha;
@@ -5025,7 +5029,7 @@ loop_resync_check:
if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
&base_vha->dpc_flags))) {
- rval = qla2x00_loop_resync(base_vha);
+ qla2x00_loop_resync(base_vha);
clear_bit(LOOP_RESYNC_ACTIVE,
&base_vha->dpc_flags);
@@ -5709,6 +5713,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 028e8c8a7de9..3272ed5bbcc7 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -316,7 +316,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
wprot_old = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base));
stat = qla2x00_write_nvram_word_tmo(ha, ha->nvram_base,
- __constant_cpu_to_le16(0x1234), 100000);
+ cpu_to_le16(0x1234), 100000);
wprot = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base));
if (stat != QLA_SUCCESS || wprot != 0x1234) {
/* Write enable. */
@@ -691,9 +691,9 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
region = (struct qla_flt_region *)&flt[1];
ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
flt_addr << 2, OPTROM_BURST_SIZE);
- if (*wptr == __constant_cpu_to_le16(0xffff))
+ if (*wptr == cpu_to_le16(0xffff))
goto no_flash_data;
- if (flt->version != __constant_cpu_to_le16(1)) {
+ if (flt->version != cpu_to_le16(1)) {
ql_log(ql_log_warn, vha, 0x0047,
"Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
le16_to_cpu(flt->version), le16_to_cpu(flt->length),
@@ -892,7 +892,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
fdt = (struct qla_fdt_layout *)req->ring;
ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
- if (*wptr == __constant_cpu_to_le16(0xffff))
+ if (*wptr == cpu_to_le16(0xffff))
goto no_flash_data;
if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
fdt->sig[3] != 'D')
@@ -991,7 +991,7 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
QLA82XX_IDC_PARAM_ADDR , 8);
- if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
+ if (*wptr == cpu_to_le32(0xffffffff)) {
ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
ha->fcoe_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT;
} else {
@@ -1051,9 +1051,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
- if (hdr.version == __constant_cpu_to_le16(0xffff))
+ if (hdr.version == cpu_to_le16(0xffff))
return;
- if (hdr.version != __constant_cpu_to_le16(1)) {
+ if (hdr.version != cpu_to_le16(1)) {
ql_dbg(ql_dbg_user, vha, 0x7090,
"Unsupported NPIV-Config "
"detected: version=0x%x entries=0x%x checksum=0x%x.\n",
@@ -1697,7 +1697,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha)
{
uint32_t led_select_value = 0;
- if (!IS_QLA83XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
goto out;
if (ha->port_no == 0)
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index b749026aa592..75514a15bea0 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -113,6 +113,11 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
struct atio_from_isp *atio, uint16_t status, int qfull);
static void qlt_disable_vha(struct scsi_qla_host *vha);
+static void qlt_clear_tgt_db(struct qla_tgt *tgt);
+static void qlt_send_notify_ack(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *ntfy,
+ uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
+ uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
/*
* Global Variables
*/
@@ -122,6 +127,16 @@ static struct workqueue_struct *qla_tgt_wq;
static DEFINE_MUTEX(qla_tgt_mutex);
static LIST_HEAD(qla_tgt_glist);
+/* This API intentionally takes dest as a parameter, rather than returning
+ * int value to avoid caller forgetting to issue wmb() after the store */
+void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
+{
+ scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
+ *dest = atomic_inc_return(&base_vha->generation_tick);
+ /* memory barrier */
+ wmb();
+}
+
/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
static struct qla_tgt_sess *qlt_find_sess_by_port_name(
struct qla_tgt *tgt,
@@ -381,14 +396,73 @@ static void qlt_free_session_done(struct work_struct *work)
struct qla_tgt *tgt = sess->tgt;
struct scsi_qla_host *vha = sess->vha;
struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+ bool logout_started = false;
+ fc_port_t fcport;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
+ "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
+ " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n",
+ __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
+ sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
+ sess->logout_on_delete, sess->keep_nport_handle,
+ sess->plogi_ack_needed);
BUG_ON(!tgt);
+
+ if (sess->logout_on_delete) {
+ int rc;
+
+ memset(&fcport, 0, sizeof(fcport));
+ fcport.loop_id = sess->loop_id;
+ fcport.d_id = sess->s_id;
+ memcpy(fcport.port_name, sess->port_name, WWN_SIZE);
+ fcport.vha = vha;
+ fcport.tgt_session = sess;
+
+ rc = qla2x00_post_async_logout_work(vha, &fcport, NULL);
+ if (rc != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0xf085,
+ "Schedule logo failed sess %p rc %d\n",
+ sess, rc);
+ else
+ logout_started = true;
+ }
+
/*
* Release the target session for FC Nexus from fabric module code.
*/
if (sess->se_sess != NULL)
ha->tgt.tgt_ops->free_session(sess);
+ if (logout_started) {
+ bool traced = false;
+
+ while (!ACCESS_ONCE(sess->logout_completed)) {
+ if (!traced) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
+ "%s: waiting for sess %p logout\n",
+ __func__, sess);
+ traced = true;
+ }
+ msleep(100);
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
+ "%s: sess %p logout completed\n",
+ __func__, sess);
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (sess->plogi_ack_needed)
+ qlt_send_notify_ack(vha, &sess->tm_iocb,
+ 0, 0, 0, 0, 0, 0);
+
+ list_del(&sess->sess_list_entry);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
"Unregistration of sess %p finished\n", sess);
@@ -409,9 +483,9 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
- list_del(&sess->sess_list_entry);
- if (sess->deleted)
- list_del(&sess->del_list_entry);
+ if (!list_empty(&sess->del_list_entry))
+ list_del_init(&sess->del_list_entry);
+ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
INIT_WORK(&sess->free_work, qlt_free_session_done);
schedule_work(&sess->free_work);
@@ -431,10 +505,10 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
loop_id = le16_to_cpu(n->u.isp24.nport_handle);
if (loop_id == 0xFFFF) {
-#if 0 /* FIXME: Re-enable Global event handling.. */
/* Global event */
- atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
- qlt_clear_tgt_db(ha->tgt.qla_tgt);
+ atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
+ qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
+#if 0 /* FIXME: do we need to choose a session here? */
if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
typeof(*sess), sess_list_entry);
@@ -489,27 +563,38 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
struct qla_tgt *tgt = sess->tgt;
uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
- if (sess->deleted)
- return;
+ if (sess->deleted) {
+ /* Upgrade to unconditional deletion in case it was temporary */
+ if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING)
+ list_del(&sess->del_list_entry);
+ else
+ return;
+ }
ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
"Scheduling sess %p for deletion\n", sess);
- list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
- sess->deleted = 1;
- if (immediate)
+ if (immediate) {
dev_loss_tmo = 0;
+ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+ list_add(&sess->del_list_entry, &tgt->del_sess_list);
+ } else {
+ sess->deleted = QLA_SESS_DELETION_PENDING;
+ list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
+ }
sess->expires = jiffies + dev_loss_tmo * HZ;
ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
- "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
- "deletion in %u secs (expires: %lu) immed: %d\n",
- sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo,
- sess->expires, immediate);
+ "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
+ " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
+ sess->vha->vp_idx, sess->port_name, sess->loop_id,
+ sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
+ dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete,
+ sess->generation);
if (immediate)
- schedule_delayed_work(&tgt->sess_del_work, 0);
+ mod_delayed_work(system_wq, &tgt->sess_del_work, 0);
else
schedule_delayed_work(&tgt->sess_del_work,
sess->expires - jiffies);
@@ -578,9 +663,9 @@ out_free_id_list:
/* ha->hardware_lock supposed to be held on entry */
static void qlt_undelete_sess(struct qla_tgt_sess *sess)
{
- BUG_ON(!sess->deleted);
+ BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
- list_del(&sess->del_list_entry);
+ list_del_init(&sess->del_list_entry);
sess->deleted = 0;
}
@@ -599,7 +684,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
del_list_entry);
elapsed = jiffies;
if (time_after_eq(elapsed, sess->expires)) {
- qlt_undelete_sess(sess);
+ /* No turning back */
+ list_del_init(&sess->del_list_entry);
+ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
"Timeout: sess %p about to be deleted\n",
@@ -643,6 +730,13 @@ static struct qla_tgt_sess *qlt_create_sess(
fcport->d_id.b.al_pa, fcport->d_id.b.area,
fcport->loop_id);
+ /* Cannot undelete at this point */
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
+ return NULL;
+ }
+
if (sess->deleted)
qlt_undelete_sess(sess);
@@ -652,6 +746,9 @@ static struct qla_tgt_sess *qlt_create_sess(
if (sess->local && !local)
sess->local = 0;
+
+ qlt_do_generation_tick(vha, &sess->generation);
+
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return sess;
@@ -673,6 +770,14 @@ static struct qla_tgt_sess *qlt_create_sess(
sess->s_id = fcport->d_id;
sess->loop_id = fcport->loop_id;
sess->local = local;
+ INIT_LIST_HEAD(&sess->del_list_entry);
+
+ /* Under normal circumstances we want to logout from firmware when
+ * session eventually ends and release corresponding nport handle.
+ * In the exception cases (e.g. when new PLOGI is waiting) corresponding
+ * code will adjust these flags as necessary. */
+ sess->logout_on_delete = 1;
+ sess->keep_nport_handle = 0;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
"Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
@@ -705,6 +810,7 @@ static struct qla_tgt_sess *qlt_create_sess(
spin_lock_irqsave(&ha->hardware_lock, flags);
list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
vha->vha_tgt.qla_tgt->sess_count++;
+ qlt_do_generation_tick(vha, &sess->generation);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
@@ -718,7 +824,7 @@ static struct qla_tgt_sess *qlt_create_sess(
}
/*
- * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
+ * Called from qla2x00_reg_remote_port()
*/
void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
{
@@ -750,6 +856,10 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
mutex_unlock(&vha->vha_tgt.tgt_mutex);
spin_lock_irqsave(&ha->hardware_lock, flags);
+ } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ /* Point of no return */
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
} else {
kref_get(&sess->se_sess->sess_kref);
@@ -780,27 +890,36 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
-void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
+/*
+ * max_gen - specifies maximum session generation
+ * at which this deletion requestion is still valid
+ */
+void
+qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
{
- struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_sess *sess;
- unsigned long flags;
if (!vha->hw->tgt.tgt_ops)
return;
- if (!tgt || (fcport->port_type != FCT_INITIATOR))
+ if (!tgt)
return;
- spin_lock_irqsave(&ha->hardware_lock, flags);
if (tgt->tgt_stop) {
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
return;
}
sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
if (!sess) {
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+ }
+
+ if (max_gen - sess->generation < 0) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
+ "Ignoring stale deletion request for se_sess %p / sess %p"
+ " for port %8phC, req_gen %d, sess_gen %d\n",
+ sess->se_sess, sess, sess->port_name, max_gen,
+ sess->generation);
return;
}
@@ -808,7 +927,6 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
sess->local = 1;
qlt_schedule_sess_for_deletion(sess, false);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static inline int test_tgt_sess_count(struct qla_tgt *tgt)
@@ -1023,7 +1141,7 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
nack->u.isp24.flags = ntfy->u.isp24.flags &
- __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
}
nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
nack->u.isp24.status = ntfy->u.isp24.status;
@@ -1081,7 +1199,7 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
resp->sof_type = abts->sof_type;
resp->exchange_address = abts->exchange_address;
resp->fcp_hdr_le = abts->fcp_hdr_le;
- f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
+ f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
F_CTL_LAST_SEQ | F_CTL_END_SEQ |
F_CTL_SEQ_INITIATIVE);
p = (uint8_t *)&f_ctl;
@@ -1156,15 +1274,14 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
ctio->entry_count = 1;
ctio->nport_handle = entry->nport_handle;
ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
- ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio->vp_index = vha->vp_idx;
ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
ctio->exchange_addr = entry->exchange_addr_to_abort;
- ctio->u.status1.flags =
- __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
- CTIO7_FLAGS_TERMINATE);
+ ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+ CTIO7_FLAGS_TERMINATE);
ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
/* Memory Barrier */
@@ -1175,6 +1292,70 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
FCP_TMF_CMPL, true);
}
+static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
+{
+ struct qla_tgt_sess_op *op;
+ struct qla_tgt_cmd *cmd;
+
+ spin_lock(&vha->cmd_list_lock);
+
+ list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
+ if (tag == op->atio.u.isp24.exchange_addr) {
+ op->aborted = true;
+ spin_unlock(&vha->cmd_list_lock);
+ return 1;
+ }
+ }
+
+ list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
+ if (tag == cmd->atio.u.isp24.exchange_addr) {
+ cmd->state = QLA_TGT_STATE_ABORTED;
+ spin_unlock(&vha->cmd_list_lock);
+ return 1;
+ }
+ }
+
+ spin_unlock(&vha->cmd_list_lock);
+ return 0;
+}
+
+/* drop cmds for the given lun
+ * XXX only looks for cmds on the port through which lun reset was recieved
+ * XXX does not go through the list of other port (which may have cmds
+ * for the same lun)
+ */
+static void abort_cmds_for_lun(struct scsi_qla_host *vha,
+ uint32_t lun, uint8_t *s_id)
+{
+ struct qla_tgt_sess_op *op;
+ struct qla_tgt_cmd *cmd;
+ uint32_t key;
+
+ key = sid_to_key(s_id);
+ spin_lock(&vha->cmd_list_lock);
+ list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
+ uint32_t op_key;
+ uint32_t op_lun;
+
+ op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+ op_lun = scsilun_to_int(
+ (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
+ if (op_key == key && op_lun == lun)
+ op->aborted = true;
+ }
+ list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
+ uint32_t cmd_key;
+ uint32_t cmd_lun;
+
+ cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
+ cmd_lun = scsilun_to_int(
+ (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
+ if (cmd_key == key && cmd_lun == lun)
+ cmd->state = QLA_TGT_STATE_ABORTED;
+ }
+ spin_unlock(&vha->cmd_list_lock);
+}
+
/* ha->hardware_lock supposed to be held on entry */
static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
@@ -1199,8 +1380,19 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
}
spin_unlock(&se_sess->sess_cmd_lock);
- if (!found_lun)
- return -ENOENT;
+ /* cmd not in LIO lists, look in qla list */
+ if (!found_lun) {
+ if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
+ /* send TASK_ABORT response immediately */
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
+ return 0;
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
+ "unable to find cmd in driver or LIO for tag 0x%x\n",
+ abts->exchange_addr_to_abort);
+ return -ENOENT;
+ }
+ }
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
"qla_target(%d): task abort (tag=%d)\n",
@@ -1284,6 +1476,11 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
return;
}
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ return;
+ }
+
rc = __qlt_24xx_handle_abts(vha, abts, sess);
if (rc != 0) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
@@ -1324,20 +1521,19 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
ctio->entry_count = 1;
ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
ctio->nport_handle = mcmd->sess->loop_id;
- ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio->vp_index = ha->vp_idx;
ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
ctio->exchange_addr = atio->u.isp24.exchange_addr;
ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
- __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
- CTIO7_FLAGS_SEND_STATUS);
+ cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
ctio->u.status1.ox_id = cpu_to_le16(temp);
ctio->u.status1.scsi_status =
- __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
- ctio->u.status1.response_len = __constant_cpu_to_le16(8);
+ cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
+ ctio->u.status1.response_len = cpu_to_le16(8);
ctio->u.status1.sense_data[0] = resp_code;
/* Memory Barrier */
@@ -1588,7 +1784,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
pkt->nport_handle = prm->cmd->loop_id;
- pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
@@ -1726,20 +1922,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
struct qla_hw_data *ha = vha->hw;
struct se_cmd *se_cmd = &cmd->se_cmd;
- if (unlikely(cmd->aborted)) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
- "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%lld)",
- vha->vp_idx, cmd, se_cmd, se_cmd->tag);
-
- cmd->state = QLA_TGT_STATE_ABORTED;
- cmd->cmd_flags |= BIT_6;
-
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
-
- /* !! At this point cmd could be already freed !! */
- return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
- }
-
prm->cmd = cmd;
prm->tgt = tgt;
prm->rq_result = scsi_status;
@@ -1903,10 +2085,9 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
{
prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
(uint32_t)sizeof(ctio->u.status1.sense_data));
- ctio->u.status0.flags |=
- __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
+ ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
- ctio->u.status0.flags |= __constant_cpu_to_le16(
+ ctio->u.status0.flags |= cpu_to_le16(
CTIO7_FLAGS_EXPLICIT_CONFORM |
CTIO7_FLAGS_CONFORM_REQ);
}
@@ -1923,17 +2104,17 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
"non GOOD status\n");
goto skip_explict_conf;
}
- ctio->u.status1.flags |= __constant_cpu_to_le16(
+ ctio->u.status1.flags |= cpu_to_le16(
CTIO7_FLAGS_EXPLICIT_CONFORM |
CTIO7_FLAGS_CONFORM_REQ);
}
skip_explict_conf:
ctio->u.status1.flags &=
- ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+ ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
ctio->u.status1.flags |=
- __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+ cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
ctio->u.status1.scsi_status |=
- __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
+ cpu_to_le16(SS_SENSE_LEN_VALID);
ctio->u.status1.sense_length =
cpu_to_le16(prm->sense_buffer_len);
for (i = 0; i < prm->sense_buffer_len/4; i++)
@@ -1953,9 +2134,9 @@ skip_explict_conf:
#endif
} else {
ctio->u.status1.flags &=
- ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+ ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
ctio->u.status1.flags |=
- __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+ cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
ctio->u.status1.sense_length = 0;
memset(ctio->u.status1.sense_data, 0,
sizeof(ctio->u.status1.sense_data));
@@ -2077,7 +2258,6 @@ static inline int
qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
{
uint32_t *cur_dsd;
- int sgc;
uint32_t transfer_length = 0;
uint32_t data_bytes;
uint32_t dif_bytes;
@@ -2094,7 +2274,6 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
struct atio_from_isp *atio = &prm->cmd->atio;
uint16_t t16;
- sgc = 0;
ha = vha->hw;
pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
@@ -2184,7 +2363,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
pkt->nport_handle = prm->cmd->loop_id;
- pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
@@ -2200,9 +2379,9 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
/* Set transfer direction */
if (cmd->dma_data_direction == DMA_TO_DEVICE)
- pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN);
+ pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
- pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
+ pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
pkt->dseg_count = prm->tot_dsds;
@@ -2254,11 +2433,11 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
- crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
+ crc_ctx_pkt->guard_seed = cpu_to_le16(0);
/* Walks data segments */
- pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
+ pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
if (!bundling && prm->prot_seg_cnt) {
if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
@@ -2301,6 +2480,19 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
unsigned long flags = 0;
int res;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ cmd->state = QLA_TGT_STATE_PROCESSED;
+ if (cmd->sess->logout_completed)
+ /* no need to terminate. FW already freed exchange. */
+ qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+ else
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
memset(&prm, 0, sizeof(prm));
qlt_check_srr_debug(cmd, &xmit_type);
@@ -2313,9 +2505,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
&full_req_cnt);
if (unlikely(res != 0)) {
- if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
- return 0;
-
return res;
}
@@ -2345,15 +2534,16 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
res = qlt_build_ctio_crc2_pkt(&prm, vha);
else
res = qlt_24xx_build_ctio_pkt(&prm, vha);
- if (unlikely(res != 0))
+ if (unlikely(res != 0)) {
+ vha->req->cnt += full_req_cnt;
goto out_unmap_unlock;
-
+ }
pkt = (struct ctio7_to_24xx *)prm.pkt;
if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
pkt->u.status0.flags |=
- __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
+ cpu_to_le16(CTIO7_FLAGS_DATA_IN |
CTIO7_FLAGS_STATUS_MODE_0);
if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
@@ -2365,11 +2555,11 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
cpu_to_le16(prm.rq_result);
pkt->u.status0.residual =
cpu_to_le32(prm.residual);
- pkt->u.status0.flags |= __constant_cpu_to_le16(
+ pkt->u.status0.flags |= cpu_to_le16(
CTIO7_FLAGS_SEND_STATUS);
if (qlt_need_explicit_conf(ha, cmd, 0)) {
pkt->u.status0.flags |=
- __constant_cpu_to_le16(
+ cpu_to_le16(
CTIO7_FLAGS_EXPLICIT_CONFORM |
CTIO7_FLAGS_CONFORM_REQ);
}
@@ -2397,12 +2587,12 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
ctio->entry_count = 1;
ctio->entry_type = CTIO_TYPE7;
ctio->dseg_count = 0;
- ctio->u.status1.flags &= ~__constant_cpu_to_le16(
+ ctio->u.status1.flags &= ~cpu_to_le16(
CTIO7_FLAGS_DATA_IN);
/* Real finish is ctio_m1's finish */
pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
- pkt->u.status0.flags |= __constant_cpu_to_le16(
+ pkt->u.status0.flags |= cpu_to_le16(
CTIO7_FLAGS_DONT_RET_CTIO);
/* qlt_24xx_init_ctio_to_isp will correct
@@ -2461,7 +2651,8 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
+ if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) ||
+ (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
/*
* Either a chip reset is active or this request was from
* previous life, just abort the processing.
@@ -2485,10 +2676,13 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
else
res = qlt_24xx_build_ctio_pkt(&prm, vha);
- if (unlikely(res != 0))
+ if (unlikely(res != 0)) {
+ vha->req->cnt += prm.req_cnt;
goto out_unlock_free_unmap;
+ }
+
pkt = (struct ctio7_to_24xx *)prm.pkt;
- pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
+ pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
CTIO7_FLAGS_STATUS_MODE_0);
if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
@@ -2563,7 +2757,7 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
/* Update protection tag */
if (cmd->prot_sg_cnt) {
- uint32_t i, j = 0, k = 0, num_ent;
+ uint32_t i, k = 0, num_ent;
struct scatterlist *sg, *sgl;
@@ -2576,7 +2770,6 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
k += num_ent;
continue;
}
- j = blocks_done - k - 1;
k = blocks_done;
break;
}
@@ -2651,6 +2844,89 @@ out:
/* If hardware_lock held on entry, might drop it, then reaquire */
/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
+static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *ntfy)
+{
+ struct nack_to_isp *nack;
+ struct qla_hw_data *ha = vha->hw;
+ request_t *pkt;
+ int ret = 0;
+
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
+ "Sending TERM ELS CTIO (ha=%p)\n", ha);
+
+ pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
+ if (pkt == NULL) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe080,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet\n", vha->vp_idx, __func__);
+ return -ENOMEM;
+ }
+
+ pkt->entry_type = NOTIFY_ACK_TYPE;
+ pkt->entry_count = 1;
+ pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+ nack = (struct nack_to_isp *)pkt;
+ nack->ox_id = ntfy->ox_id;
+
+ nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
+ if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
+ nack->u.isp24.flags = ntfy->u.isp24.flags &
+ __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ }
+
+ /* terminate */
+ nack->u.isp24.flags |=
+ __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
+
+ nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
+ nack->u.isp24.status = ntfy->u.isp24.status;
+ nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+ nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
+ nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
+ nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
+ nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
+ nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+
+ qla2x00_start_iocbs(vha, vha->req);
+ return ret;
+}
+
+static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *imm, int ha_locked)
+{
+ unsigned long flags = 0;
+ int rc;
+
+ if (qlt_issue_marker(vha, ha_locked) < 0)
+ return;
+
+ if (ha_locked) {
+ rc = __qlt_send_term_imm_notif(vha, imm);
+
+#if 0 /* Todo */
+ if (rc == -ENOMEM)
+ qlt_alloc_qfull_cmd(vha, imm, 0, 0);
+#endif
+ goto done;
+ }
+
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ rc = __qlt_send_term_imm_notif(vha, imm);
+
+#if 0 /* Todo */
+ if (rc == -ENOMEM)
+ qlt_alloc_qfull_cmd(vha, imm, 0, 0);
+#endif
+
+done:
+ if (!ha_locked)
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
+
+/* If hardware_lock held on entry, might drop it, then reaquire */
+/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
struct qla_tgt_cmd *cmd,
struct atio_from_isp *atio)
@@ -2687,14 +2963,14 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
ctio24 = (struct ctio7_to_24xx *)pkt;
ctio24->entry_type = CTIO_TYPE7;
ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
- ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio24->vp_index = vha->vp_idx;
ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
ctio24->exchange_addr = atio->u.isp24.exchange_addr;
ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
- __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+ cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
CTIO7_FLAGS_TERMINATE);
temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
ctio24->u.status1.ox_id = cpu_to_le16(temp);
@@ -2715,7 +2991,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
static void qlt_send_term_exchange(struct scsi_qla_host *vha,
struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
{
- unsigned long flags;
+ unsigned long flags = 0;
int rc;
if (qlt_issue_marker(vha, ha_locked) < 0)
@@ -2731,17 +3007,18 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
rc = __qlt_send_term_exchange(vha, cmd, atio);
if (rc == -ENOMEM)
qlt_alloc_qfull_cmd(vha, atio, 0, 0);
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
done:
if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
!cmd->cmd_sent_to_fw)) {
- if (!ha_locked && !in_interrupt())
- msleep(250); /* just in case */
-
- qlt_unmap_sg(vha, cmd);
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
vha->hw->tgt.tgt_ops->free_cmd(cmd);
}
+
+ if (!ha_locked)
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
return;
}
@@ -2792,6 +3069,24 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
}
+void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
+{
+ struct qla_tgt *tgt = cmd->tgt;
+ struct scsi_qla_host *vha = tgt->vha;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
+ "qla_target(%d): terminating exchange for aborted cmd=%p "
+ "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
+ se_cmd->tag);
+
+ cmd->state = QLA_TGT_STATE_ABORTED;
+ cmd->cmd_flags |= BIT_6;
+
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
+}
+EXPORT_SYMBOL(qlt_abort_cmd);
+
void qlt_free_cmd(struct qla_tgt_cmd *cmd)
{
struct qla_tgt_sess *sess = cmd->sess;
@@ -2915,7 +3210,7 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
if (ctio != NULL) {
struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
term = !(c->flags &
- __constant_cpu_to_le16(OF_TERM_EXCH));
+ cpu_to_le16(OF_TERM_EXCH));
} else
term = 1;
@@ -3015,7 +3310,7 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
dump_stack();
}
- cmd->cmd_flags |= BIT_12;
+ cmd->cmd_flags |= BIT_17;
ha->tgt.tgt_ops->free_cmd(cmd);
}
@@ -3063,7 +3358,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
{
struct qla_hw_data *ha = vha->hw;
struct se_cmd *se_cmd;
- const struct target_core_fabric_ops *tfo;
struct qla_tgt_cmd *cmd;
if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
@@ -3081,7 +3375,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
return;
se_cmd = &cmd->se_cmd;
- tfo = se_cmd->se_tfo;
cmd->cmd_sent_to_fw = 0;
qlt_unmap_sg(vha, cmd);
@@ -3177,23 +3470,21 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
skip_term:
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
- ;
+ cmd->cmd_flags |= BIT_12;
} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
- int rx_status = 0;
-
cmd->state = QLA_TGT_STATE_DATA_IN;
- if (unlikely(status != CTIO_SUCCESS))
- rx_status = -EIO;
- else
+ if (status == CTIO_SUCCESS)
cmd->write_data_transferred = 1;
ha->tgt.tgt_ops->handle_data(cmd);
return;
} else if (cmd->state == QLA_TGT_STATE_ABORTED) {
+ cmd->cmd_flags |= BIT_18;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
"Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
} else {
+ cmd->cmd_flags |= BIT_19;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
"qla_target(%d): A command in state (%d) should "
"not return a CTIO complete\n", vha->vp_idx, cmd->state);
@@ -3205,7 +3496,6 @@ skip_term:
dump_stack();
}
-
ha->tgt.tgt_ops->free_cmd(cmd);
}
@@ -3263,6 +3553,13 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
if (tgt->tgt_stop)
goto out_term;
+ if (cmd->state == QLA_TGT_STATE_ABORTED) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
+ "cmd with tag %u is aborted\n",
+ cmd->atio.u.isp24.exchange_addr);
+ goto out_term;
+ }
+
cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
cmd->unpacked_lun = scsilun_to_int(
@@ -3316,6 +3613,12 @@ out_term:
static void qlt_do_work(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ scsi_qla_host_t *vha = cmd->vha;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
+ list_del(&cmd->cmd_list);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
__qlt_do_work(cmd);
}
@@ -3345,6 +3648,11 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
cmd->loop_id = sess->loop_id;
cmd->conf_compl_supported = sess->conf_compl_supported;
+ cmd->cmd_flags = 0;
+ cmd->jiffies_at_alloc = get_jiffies_64();
+
+ cmd->reset_count = vha->hw->chip_reset;
+
return cmd;
}
@@ -3362,14 +3670,25 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
unsigned long flags;
uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
+ list_del(&op->cmd_list);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+
+ if (op->aborted) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
+ "sess_op with tag %u is aborted\n",
+ op->atio.u.isp24.exchange_addr);
+ goto out_term;
+ }
+
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
- "qla_target(%d): Unable to find wwn login"
- " (s_id %x:%x:%x), trying to create it manually\n",
- vha->vp_idx, s_id[0], s_id[1], s_id[2]);
+ "qla_target(%d): Unable to find wwn login"
+ " (s_id %x:%x:%x), trying to create it manually\n",
+ vha->vp_idx, s_id[0], s_id[1], s_id[2]);
if (op->atio.u.raw.entry_count > 1) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
- "Dropping multy entry atio %p\n", &op->atio);
+ "Dropping multy entry atio %p\n", &op->atio);
goto out_term;
}
@@ -3434,10 +3753,25 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
memcpy(&op->atio, atio, sizeof(*atio));
op->vha = vha;
+
+ spin_lock(&vha->cmd_list_lock);
+ list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
+ spin_unlock(&vha->cmd_list_lock);
+
INIT_WORK(&op->work, qlt_create_sess_from_atio);
queue_work(qla_tgt_wq, &op->work);
return 0;
}
+
+ /* Another WWN used to have our s_id. Our PLOGI scheduled its
+ * session deletion, but it's still in sess_del_work wq */
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ ql_dbg(ql_dbg_io, vha, 0x3061,
+ "New command while old session %p is being deleted\n",
+ sess);
+ return -EFAULT;
+ }
+
/*
* Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
*/
@@ -3451,13 +3785,13 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
return -ENOMEM;
}
- cmd->cmd_flags = 0;
- cmd->jiffies_at_alloc = get_jiffies_64();
-
- cmd->reset_count = vha->hw->chip_reset;
-
cmd->cmd_in_wq = 1;
cmd->cmd_flags |= BIT_0;
+
+ spin_lock(&vha->cmd_list_lock);
+ list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
+ spin_unlock(&vha->cmd_list_lock);
+
INIT_WORK(&cmd->work, qlt_do_work);
queue_work(qla_tgt_wq, &cmd->work);
return 0;
@@ -3471,6 +3805,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
struct scsi_qla_host *vha = sess->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_mgmt_cmd *mcmd;
+ struct atio_from_isp *a = (struct atio_from_isp *)iocb;
int res;
uint8_t tmr_func;
@@ -3511,6 +3846,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
"qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
tmr_func = TMR_LUN_RESET;
+ abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
break;
case QLA_TGT_CLEAR_TS:
@@ -3580,12 +3916,11 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
struct qla_tgt *tgt;
struct qla_tgt_sess *sess;
uint32_t lun, unpacked_lun;
- int lun_size, fn;
+ int fn;
tgt = vha->vha_tgt.qla_tgt;
lun = a->u.isp24.fcp_cmnd.lun;
- lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
a->u.isp24.fcp_hdr.s_id);
@@ -3599,6 +3934,9 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
sizeof(struct atio_from_isp));
}
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)
+ return -EFAULT;
+
return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
}
@@ -3664,22 +4002,280 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
return __qlt_abort_task(vha, iocb, sess);
}
+void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
+{
+ if (fcport->tgt_session) {
+ if (rc != MBS_COMMAND_COMPLETE) {
+ ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
+ "%s: se_sess %p / sess %p from"
+ " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
+ " LOGO failed: %#x\n",
+ __func__,
+ fcport->tgt_session->se_sess,
+ fcport->tgt_session,
+ fcport->port_name, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa, rc);
+ }
+
+ fcport->tgt_session->logout_completed = 1;
+ }
+}
+
+static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
+ struct imm_ntfy_from_isp *b)
+{
+ struct imm_ntfy_from_isp tmp;
+ memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp));
+ memcpy(a, b, sizeof(struct imm_ntfy_from_isp));
+ memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp));
+}
+
+/*
+* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
+*
+* Schedules sessions with matching port_id/loop_id but different wwn for
+* deletion. Returns existing session with matching wwn if present.
+* Null otherwise.
+*/
+static struct qla_tgt_sess *
+qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
+ port_id_t port_id, uint16_t loop_id)
+{
+ struct qla_tgt_sess *sess = NULL, *other_sess;
+ uint64_t other_wwn;
+
+ list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
+
+ other_wwn = wwn_to_u64(other_sess->port_name);
+
+ if (wwn == other_wwn) {
+ WARN_ON(sess);
+ sess = other_sess;
+ continue;
+ }
+
+ /* find other sess with nport_id collision */
+ if (port_id.b24 == other_sess->s_id.b24) {
+ if (loop_id != other_sess->loop_id) {
+ ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c,
+ "Invalidating sess %p loop_id %d wwn %llx.\n",
+ other_sess, other_sess->loop_id, other_wwn);
+
+ /*
+ * logout_on_delete is set by default, but another
+ * session that has the same s_id/loop_id combo
+ * might have cleared it when requested this session
+ * deletion, so don't touch it
+ */
+ qlt_schedule_sess_for_deletion(other_sess, true);
+ } else {
+ /*
+ * Another wwn used to have our s_id/loop_id
+ * combo - kill the session, but don't log out
+ */
+ sess->logout_on_delete = 0;
+ qlt_schedule_sess_for_deletion(other_sess,
+ true);
+ }
+ continue;
+ }
+
+ /* find other sess with nport handle collision */
+ if (loop_id == other_sess->loop_id) {
+ ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d,
+ "Invalidating sess %p loop_id %d wwn %llx.\n",
+ other_sess, other_sess->loop_id, other_wwn);
+
+ /* Same loop_id but different s_id
+ * Ok to kill and logout */
+ qlt_schedule_sess_for_deletion(other_sess, true);
+ }
+ }
+
+ return sess;
+}
+
+/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
+static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
+{
+ struct qla_tgt_sess_op *op;
+ struct qla_tgt_cmd *cmd;
+ uint32_t key;
+ int count = 0;
+
+ key = (((u32)s_id->b.domain << 16) |
+ ((u32)s_id->b.area << 8) |
+ ((u32)s_id->b.al_pa));
+
+ spin_lock(&vha->cmd_list_lock);
+ list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
+ uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+ if (op_key == key) {
+ op->aborted = true;
+ count++;
+ }
+ }
+ list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
+ uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
+ if (cmd_key == key) {
+ cmd->state = QLA_TGT_STATE_ABORTED;
+ count++;
+ }
+ }
+ spin_unlock(&vha->cmd_list_lock);
+
+ return count;
+}
+
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
struct imm_ntfy_from_isp *iocb)
{
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess = NULL;
+ uint64_t wwn;
+ port_id_t port_id;
+ uint16_t loop_id;
+ uint16_t wd3_lo;
int res = 0;
+ wwn = wwn_to_u64(iocb->u.isp24.port_name);
+
+ port_id.b.domain = iocb->u.isp24.port_id[2];
+ port_id.b.area = iocb->u.isp24.port_id[1];
+ port_id.b.al_pa = iocb->u.isp24.port_id[0];
+ port_id.b.rsvd_1 = 0;
+
+ loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
+
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
"qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
+ /* res = 1 means ack at the end of thread
+ * res = 0 means ack async/later.
+ */
switch (iocb->u.isp24.status_subcode) {
case ELS_PLOGI:
- case ELS_FLOGI:
+
+ /* Mark all stale commands in qla_tgt_wq for deletion */
+ abort_cmds_for_s_id(vha, &port_id);
+
+ if (wwn)
+ sess = qlt_find_sess_invalidate_other(tgt, wwn,
+ port_id, loop_id);
+
+ if (!sess || IS_SW_RESV_ADDR(sess->s_id)) {
+ res = 1;
+ break;
+ }
+
+ if (sess->plogi_ack_needed) {
+ /*
+ * Initiator sent another PLOGI before last PLOGI could
+ * finish. Swap plogi iocbs and terminate old one
+ * without acking, new one will get acked when session
+ * deletion completes.
+ */
+ ql_log(ql_log_warn, sess->vha, 0xf094,
+ "sess %p received double plogi.\n", sess);
+
+ qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb);
+
+ qlt_send_term_imm_notif(vha, iocb, 1);
+
+ res = 0;
+ break;
+ }
+
+ res = 0;
+
+ /*
+ * Save immediate Notif IOCB for Ack when sess is done
+ * and being deleted.
+ */
+ memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb));
+ sess->plogi_ack_needed = 1;
+
+ /*
+ * Under normal circumstances we want to release nport handle
+ * during LOGO process to avoid nport handle leaks inside FW.
+ * The exception is when LOGO is done while another PLOGI with
+ * the same nport handle is waiting as might be the case here.
+ * Note: there is always a possibily of a race where session
+ * deletion has already started for other reasons (e.g. ACL
+ * removal) and now PLOGI arrives:
+ * 1. if PLOGI arrived in FW after nport handle has been freed,
+ * FW must have assigned this PLOGI a new/same handle and we
+ * can proceed ACK'ing it as usual when session deletion
+ * completes.
+ * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
+ * bit reached it, the handle has now been released. We'll
+ * get an error when we ACK this PLOGI. Nothing will be sent
+ * back to initiator. Initiator should eventually retry
+ * PLOGI and situation will correct itself.
+ */
+ sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
+ (sess->s_id.b24 == port_id.b24));
+ qlt_schedule_sess_for_deletion(sess, true);
+ break;
+
case ELS_PRLI:
+ wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
+
+ if (wwn)
+ sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
+ loop_id);
+
+ if (sess != NULL) {
+ if (sess->deleted) {
+ /*
+ * Impatient initiator sent PRLI before last
+ * PLOGI could finish. Will force him to re-try,
+ * while last one finishes.
+ */
+ ql_log(ql_log_warn, sess->vha, 0xf095,
+ "sess %p PRLI received, before plogi ack.\n",
+ sess);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ res = 0;
+ break;
+ }
+
+ /*
+ * This shouldn't happen under normal circumstances,
+ * since we have deleted the old session during PLOGI
+ */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
+ "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
+ sess->loop_id, sess, iocb->u.isp24.nport_handle);
+
+ sess->local = 0;
+ sess->loop_id = loop_id;
+ sess->s_id = port_id;
+
+ if (wd3_lo & BIT_7)
+ sess->conf_compl_supported = 1;
+
+ }
+ res = 1; /* send notify ack */
+
+ /* Make session global (not used in fabric mode) */
+ if (ha->current_topology != ISP_CFG_F) {
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else {
+ /* todo: else - create sess here. */
+ res = 1; /* send notify ack */
+ }
+
+ break;
+
case ELS_LOGO:
case ELS_PRLO:
res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
@@ -3697,6 +4293,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
break;
}
+ case ELS_FLOGI: /* should never happen */
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
"qla_target(%d): Unsupported ELS command %x "
@@ -3968,16 +4565,20 @@ static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
unsigned long flags = 0;
+#ifndef __CHECKER__
if (!ha_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
NOTIFY_ACK_SRR_FLAGS_REJECT,
NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+#ifndef __CHECKER__
if (!ha_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#endif
kfree(imm);
}
@@ -4321,14 +4922,14 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
ctio24 = (struct ctio7_to_24xx *)pkt;
ctio24->entry_type = CTIO_TYPE7;
ctio24->nport_handle = sess->loop_id;
- ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio24->vp_index = vha->vp_idx;
ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
ctio24->exchange_addr = atio->u.isp24.exchange_addr;
ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
- __constant_cpu_to_le16(
+ cpu_to_le16(
CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
CTIO7_FLAGS_DONT_RET_CTIO);
/*
@@ -4656,7 +5257,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
int rc;
if (atio->u.isp2x.status !=
- __constant_cpu_to_le16(ATIO_CDB_VALID)) {
+ cpu_to_le16(ATIO_CDB_VALID)) {
ql_dbg(ql_dbg_tgt, vha, 0xe05e,
"qla_target(%d): ATIO with error "
"status %x received\n", vha->vp_idx,
@@ -4730,7 +5331,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
le16_to_cpu(entry->u.isp2x.status));
tgt->notify_ack_expected--;
if (entry->u.isp2x.status !=
- __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
+ cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
ql_dbg(ql_dbg_tgt, vha, 0xe061,
"qla_target(%d): NOTIFY_ACK "
"failed %x\n", vha->vp_idx,
@@ -5012,6 +5613,11 @@ static void qlt_abort_work(struct qla_tgt *tgt,
if (!sess)
goto out_term;
} else {
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ sess = NULL;
+ goto out_term;
+ }
+
kref_get(&sess->se_sess->sess_kref);
}
@@ -5044,7 +5650,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
uint8_t *s_id = NULL; /* to hide compiler warnings */
int rc;
uint32_t lun, unpacked_lun;
- int lun_size, fn;
+ int fn;
void *iocb;
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -5066,12 +5672,16 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
if (!sess)
goto out_term;
} else {
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ sess = NULL;
+ goto out_term;
+ }
+
kref_get(&sess->se_sess->sess_kref);
}
iocb = a;
lun = a->u.isp24.fcp_cmnd.lun;
- lun_size = sizeof(lun);
fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
@@ -5552,6 +6162,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
/* Adjust ring index */
WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
+ RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
}
void
@@ -5594,19 +6205,19 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
ha->tgt.saved_set = 1;
}
- nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
+ nv->exchange_count = cpu_to_le16(0xFFFF);
/* Enable target mode */
- nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
+ nv->firmware_options_1 |= cpu_to_le32(BIT_4);
/* Disable ini mode, if requested */
if (!qla_ini_mode_enabled(vha))
- nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5);
+ nv->firmware_options_1 |= cpu_to_le32(BIT_5);
/* Disable Full Login after LIP */
- nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+ nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
/* Enable initial LIP */
- nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
+ nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
if (ql2xtgt_tape_enable)
/* Enable FC Tape support */
nv->firmware_options_2 |= cpu_to_le32(BIT_12);
@@ -5615,9 +6226,9 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
/* Disable Full Login after LIP */
- nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+ nv->host_p &= cpu_to_le32(~BIT_10);
/* Enable target PRLI control */
- nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
+ nv->firmware_options_2 |= cpu_to_le32(BIT_14);
} else {
if (ha->tgt.saved_set) {
nv->exchange_count = ha->tgt.saved_exchange_count;
@@ -5639,12 +6250,12 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
fc_host_supported_classes(vha->host) =
FC_COS_CLASS2 | FC_COS_CLASS3;
- nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
+ nv->firmware_options_2 |= cpu_to_le32(BIT_8);
} else {
if (vha->flags.init_done)
fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
- nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
+ nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
}
}
@@ -5656,7 +6267,7 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
if (ha->tgt.node_name_set) {
memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
- icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
+ icb->firmware_options_1 |= cpu_to_le32(BIT_14);
}
}
@@ -5681,20 +6292,19 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
ha->tgt.saved_set = 1;
}
- nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
+ nv->exchange_count = cpu_to_le16(0xFFFF);
/* Enable target mode */
- nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
+ nv->firmware_options_1 |= cpu_to_le32(BIT_4);
/* Disable ini mode, if requested */
if (!qla_ini_mode_enabled(vha))
- nv->firmware_options_1 |=
- __constant_cpu_to_le32(BIT_5);
+ nv->firmware_options_1 |= cpu_to_le32(BIT_5);
/* Disable Full Login after LIP */
- nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+ nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
/* Enable initial LIP */
- nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
+ nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
if (ql2xtgt_tape_enable)
/* Enable FC tape support */
nv->firmware_options_2 |= cpu_to_le32(BIT_12);
@@ -5703,9 +6313,9 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
/* Disable Full Login after LIP */
- nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+ nv->host_p &= cpu_to_le32(~BIT_10);
/* Enable target PRLI control */
- nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
+ nv->firmware_options_2 |= cpu_to_le32(BIT_14);
} else {
if (ha->tgt.saved_set) {
nv->exchange_count = ha->tgt.saved_exchange_count;
@@ -5727,12 +6337,12 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
fc_host_supported_classes(vha->host) =
FC_COS_CLASS2 | FC_COS_CLASS3;
- nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
+ nv->firmware_options_2 |= cpu_to_le32(BIT_8);
} else {
if (vha->flags.init_done)
fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
- nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
+ nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
}
}
@@ -5747,7 +6357,7 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
if (ha->tgt.node_name_set) {
memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
- icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
+ icb->firmware_options_1 |= cpu_to_le32(BIT_14);
}
}
@@ -5793,7 +6403,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
if (!QLA_TGT_MODE_ENABLED())
return;
- if (ha->mqenable || IS_QLA83XX(ha)) {
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
} else {
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 985d76dd706b..bca584ae45b7 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -167,7 +167,24 @@ struct imm_ntfy_from_isp {
uint32_t srr_rel_offs;
uint16_t srr_ui;
uint16_t srr_ox_id;
- uint8_t reserved_4[19];
+ union {
+ struct {
+ uint8_t node_name[8];
+ } plogi; /* PLOGI/ADISC/PDISC */
+ struct {
+ /* PRLI word 3 bit 0-15 */
+ uint16_t wd3_lo;
+ uint8_t resv0[6];
+ } prli;
+ struct {
+ uint8_t port_id[3];
+ uint8_t resv1;
+ uint16_t nport_handle;
+ uint16_t resv2;
+ } req_els;
+ } u;
+ uint8_t port_name[8];
+ uint8_t resv3[3];
uint8_t vp_index;
uint32_t reserved_5;
uint8_t port_id[3];
@@ -234,6 +251,7 @@ struct nack_to_isp {
uint8_t reserved[2];
uint16_t ox_id;
} __packed;
+#define NOTIFY_ACK_FLAGS_TERMINATE BIT_3
#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
#define NOTIFY_ACK_SRR_FLAGS_REJECT 1
@@ -790,13 +808,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
#define FC_TM_REJECT 4
#define FC_TM_FAILED 5
-/*
- * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
- * terminated, so no more actions is needed and success should be returned
- * to target.
- */
-#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717
-
#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
#define pci_dma_lo32(a) (a & 0xffffffff)
#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
@@ -874,6 +885,15 @@ struct qla_tgt_sess_op {
struct scsi_qla_host *vha;
struct atio_from_isp atio;
struct work_struct work;
+ struct list_head cmd_list;
+ bool aborted;
+};
+
+enum qla_sess_deletion {
+ QLA_SESS_DELETION_NONE = 0,
+ QLA_SESS_DELETION_PENDING = 1, /* hopefully we can get rid of
+ * this one */
+ QLA_SESS_DELETION_IN_PROGRESS = 2,
};
/*
@@ -884,8 +904,15 @@ struct qla_tgt_sess {
port_id_t s_id;
unsigned int conf_compl_supported:1;
- unsigned int deleted:1;
+ unsigned int deleted:2;
unsigned int local:1;
+ unsigned int logout_on_delete:1;
+ unsigned int plogi_ack_needed:1;
+ unsigned int keep_nport_handle:1;
+
+ unsigned char logout_completed;
+
+ int generation;
struct se_session *se_sess;
struct scsi_qla_host *vha;
@@ -897,6 +924,10 @@ struct qla_tgt_sess {
uint8_t port_name[WWN_SIZE];
struct work_struct free_work;
+
+ union {
+ struct imm_ntfy_from_isp tm_iocb;
+ };
};
struct qla_tgt_cmd {
@@ -912,7 +943,6 @@ struct qla_tgt_cmd {
unsigned int conf_compl_supported:1;
unsigned int sg_mapped:1;
unsigned int free_sg:1;
- unsigned int aborted:1; /* Needed in case of SRR */
unsigned int write_data_transferred:1;
unsigned int ctx_dsd_alloced:1;
unsigned int q_full:1;
@@ -961,6 +991,9 @@ struct qla_tgt_cmd {
* BIT_14 - Back end data received/sent.
* BIT_15 - SRR prepare ctio
* BIT_16 - complete free
+ * BIT_17 - flush - qlt_abort_cmd_on_host_reset
+ * BIT_18 - completion w/abort status
+ * BIT_19 - completion w/unknown status
*/
uint32_t cmd_flags;
};
@@ -1026,6 +1059,10 @@ struct qla_tgt_srr_ctio {
struct qla_tgt_cmd *cmd;
};
+/* Check for Switch reserved address */
+#define IS_SW_RESV_ADDR(_s_id) \
+ ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc))
+
#define QLA_TGT_XMIT_DATA 1
#define QLA_TGT_XMIT_STATUS 2
#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
@@ -1043,7 +1080,7 @@ extern int qlt_lport_register(void *, u64, u64, u64,
extern void qlt_lport_deregister(struct scsi_qla_host *);
extern void qlt_unreg_sess(struct qla_tgt_sess *);
extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
-extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
+extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
extern int __init qlt_init(void);
extern void qlt_exit(void);
extern void qlt_update_vp_map(struct scsi_qla_host *, int);
@@ -1073,12 +1110,23 @@ static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
ha->host->active_mode |= MODE_INITIATOR;
}
+static inline uint32_t sid_to_key(const uint8_t *s_id)
+{
+ uint32_t key;
+
+ key = (((unsigned long)s_id[0] << 16) |
+ ((unsigned long)s_id[1] << 8) |
+ (unsigned long)s_id[2]);
+ return key;
+}
+
/*
* Exported symbols from qla_target.c LLD logic used by qla2xxx code..
*/
extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
+extern void qlt_abort_cmd(struct qla_tgt_cmd *);
extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
@@ -1109,5 +1157,7 @@ extern void qlt_stop_phase2(struct qla_tgt *);
extern irqreturn_t qla83xx_msix_atio_q(int, void *);
extern void qlt_83xx_iospace_config(struct qla_hw_data *);
extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
+extern void qlt_logo_completion_handler(fc_port_t *, int);
+extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 962cb89fe0ae..ddbe2e7ac14d 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -137,39 +137,39 @@ qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
}
static inline void
-qla27xx_read8(void *window, void *buf, ulong *len)
+qla27xx_read8(void __iomem *window, void *buf, ulong *len)
{
uint8_t value = ~0;
if (buf) {
- value = RD_REG_BYTE((__iomem void *)window);
+ value = RD_REG_BYTE(window);
}
qla27xx_insert32(value, buf, len);
}
static inline void
-qla27xx_read16(void *window, void *buf, ulong *len)
+qla27xx_read16(void __iomem *window, void *buf, ulong *len)
{
uint16_t value = ~0;
if (buf) {
- value = RD_REG_WORD((__iomem void *)window);
+ value = RD_REG_WORD(window);
}
qla27xx_insert32(value, buf, len);
}
static inline void
-qla27xx_read32(void *window, void *buf, ulong *len)
+qla27xx_read32(void __iomem *window, void *buf, ulong *len)
{
uint32_t value = ~0;
if (buf) {
- value = RD_REG_DWORD((__iomem void *)window);
+ value = RD_REG_DWORD(window);
}
qla27xx_insert32(value, buf, len);
}
-static inline void (*qla27xx_read_vector(uint width))(void *, void *, ulong *)
+static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *)
{
return
(width == 1) ? qla27xx_read8 :
@@ -181,7 +181,7 @@ static inline void
qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
uint offset, void *buf, ulong *len)
{
- void *window = (void *)reg + offset;
+ void __iomem *window = (void __iomem *)reg + offset;
qla27xx_read32(window, buf, len);
}
@@ -202,8 +202,8 @@ qla27xx_read_window(__iomem struct device_reg_24xx *reg,
uint32_t addr, uint offset, uint count, uint width, void *buf,
ulong *len)
{
- void *window = (void *)reg + offset;
- void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width);
+ void __iomem *window = (void __iomem *)reg + offset;
+ void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width);
qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
while (count--) {
@@ -805,9 +805,8 @@ static void
qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
{
uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
- int rval = 0;
- rval = sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
+ sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
v+0, v+1, v+2, v+3, v+4, v+5);
tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
@@ -940,8 +939,10 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
{
ulong flags = 0;
+#ifndef __CHECKER__
if (!hardware_locked)
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+#endif
if (!vha->hw->fw_dump)
ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
@@ -954,6 +955,8 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
else
qla27xx_execute_fwdt_template(vha);
+#ifndef __CHECKER__
if (!hardware_locked)
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+#endif
}
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 2ed9ab90a455..6d31faa8c57b 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.07.00.18-k"
+#define QLA2XXX_VERSION "8.07.00.26-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 7
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index d9a8c6084346..ac65cb7b4886 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -374,7 +374,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
-
+ cmd->cmd_flags |= BIT_3;
cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
@@ -405,7 +405,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
- 3000);
+ 3 * HZ);
return 0;
}
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -420,6 +420,12 @@ static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
{
+ if (!(se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
+ struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ struct qla_tgt_cmd, se_cmd);
+ return cmd->state;
+ }
+
return 0;
}
@@ -541,12 +547,10 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
cmd->cmd_flags |= BIT_4;
cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
- cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
cmd->sg_cnt = se_cmd->t_data_nents;
cmd->sg = se_cmd->t_data_sg;
cmd->offset = 0;
- cmd->cmd_flags |= BIT_3;
cmd->prot_sg_cnt = se_cmd->t_prot_nents;
cmd->prot_sg = se_cmd->t_prot_sg;
@@ -571,7 +575,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
cmd->sg_cnt = 0;
cmd->offset = 0;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
- cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
if (cmd->cmd_flags & BIT_5) {
pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
dump_stack();
@@ -636,14 +639,7 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
- struct scsi_qla_host *vha = cmd->vha;
- struct qla_hw_data *ha = vha->hw;
-
- if (!cmd->sg_mapped)
- return;
-
- pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
- cmd->sg_mapped = 0;
+ qlt_abort_cmd(cmd);
}
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
@@ -1149,9 +1145,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
return NULL;
}
- key = (((unsigned long)s_id[0] << 16) |
- ((unsigned long)s_id[1] << 8) |
- (unsigned long)s_id[2]);
+ key = sid_to_key(s_id);
pr_debug("find_sess_by_s_id: 0x%06x\n", key);
se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
@@ -1186,9 +1180,7 @@ static void tcm_qla2xxx_set_sess_by_s_id(
void *slot;
int rc;
- key = (((unsigned long)s_id[0] << 16) |
- ((unsigned long)s_id[1] << 8) |
- (unsigned long)s_id[2]);
+ key = sid_to_key(s_id);
pr_debug("set_sess_by_s_id: %06x\n", key);
slot = btree_lookup32(&lport->lport_fcport_map, key);
@@ -1367,9 +1359,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
struct qla_hw_data *ha = tgt->ha;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
struct se_session *se_sess;
- struct se_node_acl *se_nacl;
struct tcm_qla2xxx_lport *lport;
- struct tcm_qla2xxx_nacl *nacl;
BUG_ON(in_interrupt());
@@ -1379,8 +1369,6 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
dump_stack();
return;
}
- se_nacl = se_sess->se_node_acl;
- nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
lport = vha->vha_tgt.target_lport_ptr;
if (!lport) {
@@ -1544,6 +1532,10 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
}
sess->conf_compl_supported = conf_compl_supported;
+
+ /* Reset logout parameters to default */
+ sess->logout_on_delete = 1;
+ sess->keep_nport_handle = 0;
}
/*
@@ -1684,7 +1676,6 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
(struct tcm_qla2xxx_lport *)target_lport_ptr;
struct tcm_qla2xxx_lport *base_lport =
(struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr;
- struct tcm_qla2xxx_tpg *base_tpg;
struct fc_vport_identifiers vport_id;
if (!qla_tgt_mode_enabled(base_vha)) {
@@ -1697,7 +1688,6 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
pr_err("qla2xxx base_lport or tpg_1 not available\n");
return -EPERM;
}
- base_tpg = base_lport->tpg_1;
memset(&vport_id, 0, sizeof(vport_id));
vport_id.port_name = npiv_wwpn;
@@ -1814,6 +1804,11 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
.module = THIS_MODULE,
.name = "qla2xxx",
.node_acl_size = sizeof(struct tcm_qla2xxx_nacl),
+ /*
+ * XXX: Limit assumes single page per scatter-gather-list entry.
+ * Current maximum is ~4.9 MB per se_cmd->t_data_sg with PAGE_SIZE=4096
+ */
+ .max_data_sg_nents = 1200,
.get_fabric_name = tcm_qla2xxx_get_fabric_name,
.tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
.tpg_get_tag = tcm_qla2xxx_get_tag,
@@ -1962,7 +1957,7 @@ static void __exit tcm_qla2xxx_exit(void)
tcm_qla2xxx_deregister_configfs();
}
-MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver");
+MODULE_DESCRIPTION("TCM QLA24XX+ series NPIV enabled fabric driver");
MODULE_LICENSE("GPL");
module_init(tcm_qla2xxx_init);
module_exit(tcm_qla2xxx_exit);
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index 2ff092252b76..c126966130ab 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -5,6 +5,8 @@
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/errno.h>
+#include <asm/unaligned.h>
#include <scsi/scsi_common.h>
/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
@@ -176,3 +178,110 @@ bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
return true;
}
EXPORT_SYMBOL(scsi_normalize_sense);
+
+/**
+ * scsi_sense_desc_find - search for a given descriptor type in descriptor sense data format.
+ * @sense_buffer: byte array of descriptor format sense data
+ * @sb_len: number of valid bytes in sense_buffer
+ * @desc_type: value of descriptor type to find
+ * (e.g. 0 -> information)
+ *
+ * Notes:
+ * only valid when sense data is in descriptor format
+ *
+ * Return value:
+ * pointer to start of (first) descriptor if found else NULL
+ */
+const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
+ int desc_type)
+{
+ int add_sen_len, add_len, desc_len, k;
+ const u8 * descp;
+
+ if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7])))
+ return NULL;
+ if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73))
+ return NULL;
+ add_sen_len = (add_sen_len < (sb_len - 8)) ?
+ add_sen_len : (sb_len - 8);
+ descp = &sense_buffer[8];
+ for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) {
+ descp += desc_len;
+ add_len = (k < (add_sen_len - 1)) ? descp[1]: -1;
+ desc_len = add_len + 2;
+ if (descp[0] == desc_type)
+ return descp;
+ if (add_len < 0) // short descriptor ??
+ break;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(scsi_sense_desc_find);
+
+/**
+ * scsi_build_sense_buffer - build sense data in a buffer
+ * @desc: Sense format (non zero == descriptor format,
+ * 0 == fixed format)
+ * @buf: Where to build sense data
+ * @key: Sense key
+ * @asc: Additional sense code
+ * @ascq: Additional sense code qualifier
+ *
+ **/
+void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
+{
+ if (desc) {
+ buf[0] = 0x72; /* descriptor, current */
+ buf[1] = key;
+ buf[2] = asc;
+ buf[3] = ascq;
+ buf[7] = 0;
+ } else {
+ buf[0] = 0x70; /* fixed, current */
+ buf[2] = key;
+ buf[7] = 0xa;
+ buf[12] = asc;
+ buf[13] = ascq;
+ }
+}
+EXPORT_SYMBOL(scsi_build_sense_buffer);
+
+/**
+ * scsi_set_sense_information - set the information field in a
+ * formatted sense data buffer
+ * @buf: Where to build sense data
+ * @buf_len: buffer length
+ * @info: 64-bit information value to be set
+ *
+ * Return value:
+ * 0 on success or EINVAL for invalid sense buffer length
+ **/
+int scsi_set_sense_information(u8 *buf, int buf_len, u64 info)
+{
+ if ((buf[0] & 0x7f) == 0x72) {
+ u8 *ucp, len;
+
+ len = buf[7];
+ ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
+ if (!ucp) {
+ buf[7] = len + 0xc;
+ ucp = buf + 8 + len;
+ }
+
+ if (buf_len < len + 0xc)
+ /* Not enough room for info */
+ return -EINVAL;
+
+ ucp[0] = 0;
+ ucp[1] = 0xa;
+ ucp[2] = 0x80; /* Valid bit */
+ ucp[3] = 0;
+ put_unaligned_be64(info, &ucp[4]);
+ } else if ((buf[0] & 0x7f) == 0x70) {
+ buf[0] |= 0x80;
+ put_unaligned_be64(info, &buf[3]);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(scsi_set_sense_information);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 30268bb2ddb6..dfcc45bb03b1 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -25,6 +25,9 @@
* module options to "modprobe scsi_debug num_tgts=2" [20021221]
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
#include <linux/module.h>
#include <linux/kernel.h>
@@ -201,7 +204,6 @@ static const char *scsi_debug_version_date = "20141022";
/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
* or "peripheral device" addressing (value 0) */
#define SAM2_LUN_ADDRESS_METHOD 0
-#define SAM2_WLUN_REPORT_LUNS 0xc101
/* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
* (for response) at one time. Can be reduced by max_queue option. Command
@@ -698,7 +700,7 @@ static void sdebug_max_tgts_luns(void)
else
hpnt->max_id = scsi_debug_num_tgts;
/* scsi_debug_max_luns; */
- hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
+ hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
}
spin_unlock(&sdebug_host_list_lock);
}
@@ -1288,7 +1290,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
if (! arr)
return DID_REQUEUE << 16;
- have_wlun = (scp->device->lun == SAM2_WLUN_REPORT_LUNS);
+ have_wlun = (scp->device->lun == SCSI_W_LUN_REPORT_LUNS);
if (have_wlun)
pq_pdt = 0x1e; /* present, wlun */
else if (scsi_debug_no_lun_0 && (0 == devip->lun))
@@ -1427,12 +1429,11 @@ static int resp_requests(struct scsi_cmnd * scp,
unsigned char * sbuff;
unsigned char *cmd = scp->cmnd;
unsigned char arr[SCSI_SENSE_BUFFERSIZE];
- bool dsense, want_dsense;
+ bool dsense;
int len = 18;
memset(arr, 0, sizeof(arr));
dsense = !!(cmd[1] & 1);
- want_dsense = dsense || scsi_debug_dsense;
sbuff = scp->sense_buffer;
if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
if (dsense) {
@@ -2446,8 +2447,7 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
__be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
if (sdt->guard_tag != csum) {
- pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
- __func__,
+ pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
(unsigned long)sector,
be16_to_cpu(sdt->guard_tag),
be16_to_cpu(csum));
@@ -2455,14 +2455,14 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
}
if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
- pr_err("%s: REF check failed on sector %lu\n",
- __func__, (unsigned long)sector);
+ pr_err("REF check failed on sector %lu\n",
+ (unsigned long)sector);
return 0x03;
}
if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
be32_to_cpu(sdt->ref_tag) != ei_lba) {
- pr_err("%s: REF check failed on sector %lu\n",
- __func__, (unsigned long)sector);
+ pr_err("REF check failed on sector %lu\n",
+ (unsigned long)sector);
return 0x03;
}
return 0;
@@ -2680,7 +2680,7 @@ resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return 0;
}
-void dump_sector(unsigned char *buf, int len)
+static void dump_sector(unsigned char *buf, int len)
{
int i, j, n;
@@ -3365,8 +3365,8 @@ static int resp_report_luns(struct scsi_cmnd * scp,
one_lun[i].scsi_lun[1] = lun & 0xff;
}
if (want_wlun) {
- one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
- one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
+ one_lun[i].scsi_lun[0] = (SCSI_W_LUN_REPORT_LUNS >> 8) & 0xff;
+ one_lun[i].scsi_lun[1] = SCSI_W_LUN_REPORT_LUNS & 0xff;
i++;
}
alloc_len = (unsigned char *)(one_lun + i) - arr;
@@ -3449,7 +3449,7 @@ static void sdebug_q_cmd_complete(unsigned long indx)
atomic_inc(&sdebug_completions);
qa_indx = indx;
if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
- pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
+ pr_err("wild qa_indx=%d\n", qa_indx);
return;
}
spin_lock_irqsave(&queued_arr_lock, iflags);
@@ -3457,21 +3457,21 @@ static void sdebug_q_cmd_complete(unsigned long indx)
scp = sqcp->a_cmnd;
if (NULL == scp) {
spin_unlock_irqrestore(&queued_arr_lock, iflags);
- pr_err("%s: scp is NULL\n", __func__);
+ pr_err("scp is NULL\n");
return;
}
devip = (struct sdebug_dev_info *)scp->device->hostdata;
if (devip)
atomic_dec(&devip->num_in_q);
else
- pr_err("%s: devip=NULL\n", __func__);
+ pr_err("devip=NULL\n");
if (atomic_read(&retired_max_queue) > 0)
retiring = 1;
sqcp->a_cmnd = NULL;
if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
spin_unlock_irqrestore(&queued_arr_lock, iflags);
- pr_err("%s: Unexpected completion\n", __func__);
+ pr_err("Unexpected completion\n");
return;
}
@@ -3481,7 +3481,7 @@ static void sdebug_q_cmd_complete(unsigned long indx)
retval = atomic_read(&retired_max_queue);
if (qa_indx >= retval) {
spin_unlock_irqrestore(&queued_arr_lock, iflags);
- pr_err("%s: index %d too large\n", __func__, retval);
+ pr_err("index %d too large\n", retval);
return;
}
k = find_last_bit(queued_in_use_bm, retval);
@@ -3509,7 +3509,7 @@ sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
atomic_inc(&sdebug_completions);
qa_indx = sd_hrtp->qa_indx;
if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
- pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
+ pr_err("wild qa_indx=%d\n", qa_indx);
goto the_end;
}
spin_lock_irqsave(&queued_arr_lock, iflags);
@@ -3517,21 +3517,21 @@ sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
scp = sqcp->a_cmnd;
if (NULL == scp) {
spin_unlock_irqrestore(&queued_arr_lock, iflags);
- pr_err("%s: scp is NULL\n", __func__);
+ pr_err("scp is NULL\n");
goto the_end;
}
devip = (struct sdebug_dev_info *)scp->device->hostdata;
if (devip)
atomic_dec(&devip->num_in_q);
else
- pr_err("%s: devip=NULL\n", __func__);
+ pr_err("devip=NULL\n");
if (atomic_read(&retired_max_queue) > 0)
retiring = 1;
sqcp->a_cmnd = NULL;
if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
spin_unlock_irqrestore(&queued_arr_lock, iflags);
- pr_err("%s: Unexpected completion\n", __func__);
+ pr_err("Unexpected completion\n");
goto the_end;
}
@@ -3541,7 +3541,7 @@ sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
retval = atomic_read(&retired_max_queue);
if (qa_indx >= retval) {
spin_unlock_irqrestore(&queued_arr_lock, iflags);
- pr_err("%s: index %d too large\n", __func__, retval);
+ pr_err("index %d too large\n", retval);
goto the_end;
}
k = find_last_bit(queued_in_use_bm, retval);
@@ -3580,7 +3580,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
return devip;
sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
if (!sdbg_host) {
- pr_err("%s: Host info NULL\n", __func__);
+ pr_err("Host info NULL\n");
return NULL;
}
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
@@ -3596,8 +3596,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
if (!open_devip) { /* try and make a new one */
open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
if (!open_devip) {
- printk(KERN_ERR "%s: out of memory at line %d\n",
- __func__, __LINE__);
+ pr_err("out of memory at line %d\n", __LINE__);
return NULL;
}
}
@@ -3615,7 +3614,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
static int scsi_debug_slave_alloc(struct scsi_device *sdp)
{
if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
- printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n",
+ pr_info("slave_alloc <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
return 0;
@@ -3626,7 +3625,7 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
struct sdebug_dev_info *devip;
if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
- printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n",
+ pr_info("slave_configure <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
@@ -3646,7 +3645,7 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
(struct sdebug_dev_info *)sdp->hostdata;
if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
- printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n",
+ pr_info("slave_destroy <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
if (devip) {
/* make this slot available for re-use */
@@ -3897,8 +3896,7 @@ static void __init sdebug_build_parts(unsigned char *ramp,
return;
if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
scsi_debug_num_parts = SDEBUG_MAX_PARTS;
- pr_warn("%s: reducing partitions to %d\n", __func__,
- SDEBUG_MAX_PARTS);
+ pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
}
num_sectors = (int)sdebug_store_sectors;
sectors_per_part = (num_sectors - sdebug_sectors_per)
@@ -3942,14 +3940,20 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
unsigned long iflags;
int k, num_in_q, qdepth, inject;
struct sdebug_queued_cmd *sqcp = NULL;
- struct scsi_device *sdp = cmnd->device;
+ struct scsi_device *sdp;
+
+ /* this should never happen */
+ if (WARN_ON(!cmnd))
+ return SCSI_MLQUEUE_HOST_BUSY;
- if (NULL == cmnd || NULL == devip) {
- pr_warn("%s: called with NULL cmnd or devip pointer\n",
- __func__);
+ if (NULL == devip) {
+ pr_warn("called devip == NULL\n");
/* no particularly good error to report back */
return SCSI_MLQUEUE_HOST_BUSY;
}
+
+ sdp = cmnd->device;
+
if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
__func__, scsi_result);
@@ -4383,8 +4387,7 @@ static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
fake_storep = vmalloc(sz);
if (NULL == fake_storep) {
- pr_err("%s: out of memory, 9\n",
- __func__);
+ pr_err("out of memory, 9\n");
return -ENOMEM;
}
memset(fake_storep, 0, sz);
@@ -4784,8 +4787,7 @@ static int __init scsi_debug_init(void)
atomic_set(&retired_max_queue, 0);
if (scsi_debug_ndelay >= 1000000000) {
- pr_warn("%s: ndelay must be less than 1 second, ignored\n",
- __func__);
+ pr_warn("ndelay must be less than 1 second, ignored\n");
scsi_debug_ndelay = 0;
} else if (scsi_debug_ndelay > 0)
scsi_debug_delay = DELAY_OVERRIDDEN;
@@ -4797,8 +4799,7 @@ static int __init scsi_debug_init(void)
case 4096:
break;
default:
- pr_err("%s: invalid sector_size %d\n", __func__,
- scsi_debug_sector_size);
+ pr_err("invalid sector_size %d\n", scsi_debug_sector_size);
return -EINVAL;
}
@@ -4811,29 +4812,28 @@ static int __init scsi_debug_init(void)
break;
default:
- pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__);
+ pr_err("dif must be 0, 1, 2 or 3\n");
return -EINVAL;
}
if (scsi_debug_guard > 1) {
- pr_err("%s: guard must be 0 or 1\n", __func__);
+ pr_err("guard must be 0 or 1\n");
return -EINVAL;
}
if (scsi_debug_ato > 1) {
- pr_err("%s: ato must be 0 or 1\n", __func__);
+ pr_err("ato must be 0 or 1\n");
return -EINVAL;
}
if (scsi_debug_physblk_exp > 15) {
- pr_err("%s: invalid physblk_exp %u\n", __func__,
- scsi_debug_physblk_exp);
+ pr_err("invalid physblk_exp %u\n", scsi_debug_physblk_exp);
return -EINVAL;
}
if (scsi_debug_lowest_aligned > 0x3fff) {
- pr_err("%s: lowest_aligned too big: %u\n", __func__,
- scsi_debug_lowest_aligned);
+ pr_err("lowest_aligned too big: %u\n",
+ scsi_debug_lowest_aligned);
return -EINVAL;
}
@@ -4863,7 +4863,7 @@ static int __init scsi_debug_init(void)
if (0 == scsi_debug_fake_rw) {
fake_storep = vmalloc(sz);
if (NULL == fake_storep) {
- pr_err("%s: out of memory, 1\n", __func__);
+ pr_err("out of memory, 1\n");
return -ENOMEM;
}
memset(fake_storep, 0, sz);
@@ -4877,11 +4877,10 @@ static int __init scsi_debug_init(void)
dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
dif_storep = vmalloc(dif_size);
- pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size,
- dif_storep);
+ pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
if (dif_storep == NULL) {
- pr_err("%s: out of mem. (DIX)\n", __func__);
+ pr_err("out of mem. (DIX)\n");
ret = -ENOMEM;
goto free_vm;
}
@@ -4903,18 +4902,17 @@ static int __init scsi_debug_init(void)
if (scsi_debug_unmap_alignment &&
scsi_debug_unmap_granularity <=
scsi_debug_unmap_alignment) {
- pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n",
- __func__);
+ pr_err("ERR: unmap_granularity <= unmap_alignment\n");
return -EINVAL;
}
map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
- pr_info("%s: %lu provisioning blocks\n", __func__, map_size);
+ pr_info("%lu provisioning blocks\n", map_size);
if (map_storep == NULL) {
- pr_err("%s: out of mem. (MAP)\n", __func__);
+ pr_err("out of mem. (MAP)\n");
ret = -ENOMEM;
goto free_vm;
}
@@ -4928,18 +4926,18 @@ static int __init scsi_debug_init(void)
pseudo_primary = root_device_register("pseudo_0");
if (IS_ERR(pseudo_primary)) {
- pr_warn("%s: root_device_register() error\n", __func__);
+ pr_warn("root_device_register() error\n");
ret = PTR_ERR(pseudo_primary);
goto free_vm;
}
ret = bus_register(&pseudo_lld_bus);
if (ret < 0) {
- pr_warn("%s: bus_register error: %d\n", __func__, ret);
+ pr_warn("bus_register error: %d\n", ret);
goto dev_unreg;
}
ret = driver_register(&sdebug_driverfs_driver);
if (ret < 0) {
- pr_warn("%s: driver_register error: %d\n", __func__, ret);
+ pr_warn("driver_register error: %d\n", ret);
goto bus_unreg;
}
@@ -4948,16 +4946,14 @@ static int __init scsi_debug_init(void)
for (k = 0; k < host_to_add; k++) {
if (sdebug_add_adapter()) {
- pr_err("%s: sdebug_add_adapter failed k=%d\n",
- __func__, k);
+ pr_err("sdebug_add_adapter failed k=%d\n", k);
break;
}
}
- if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
- pr_info("%s: built %d host(s)\n", __func__,
- scsi_debug_add_host);
- }
+ if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+ pr_info("built %d host(s)\n", scsi_debug_add_host);
+
return 0;
bus_unreg:
@@ -4965,10 +4961,8 @@ bus_unreg:
dev_unreg:
root_device_unregister(pseudo_primary);
free_vm:
- if (map_storep)
- vfree(map_storep);
- if (dif_storep)
- vfree(dif_storep);
+ vfree(map_storep);
+ vfree(dif_storep);
vfree(fake_storep);
return ret;
@@ -4986,9 +4980,7 @@ static void __exit scsi_debug_exit(void)
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
- if (dif_storep)
- vfree(dif_storep);
-
+ vfree(dif_storep);
vfree(fake_storep);
}
@@ -5012,8 +5004,7 @@ static int sdebug_add_adapter(void)
sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
if (NULL == sdbg_host) {
- printk(KERN_ERR "%s: out of memory at line %d\n",
- __func__, __LINE__);
+ pr_err("out of memory at line %d\n", __LINE__);
return -ENOMEM;
}
@@ -5023,8 +5014,7 @@ static int sdebug_add_adapter(void)
for (k = 0; k < devs_per_host; k++) {
sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
if (!sdbg_devinfo) {
- printk(KERN_ERR "%s: out of memory at line %d\n",
- __func__, __LINE__);
+ pr_err("out of memory at line %d\n", __LINE__);
error = -ENOMEM;
goto clean;
}
@@ -5178,7 +5168,7 @@ scsi_debug_queuecommand(struct scsi_cmnd *scp)
}
sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
}
- has_wlun_rl = (sdp->lun == SAM2_WLUN_REPORT_LUNS);
+ has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl)
return schedule_resp(scp, NULL, errsts_no_connect, 0);
@@ -5338,7 +5328,7 @@ static int sdebug_driver_probe(struct device * dev)
sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
if (NULL == hpnt) {
- pr_err("%s: scsi_host_alloc failed\n", __func__);
+ pr_err("scsi_host_alloc failed\n");
error = -ENODEV;
return error;
}
@@ -5349,7 +5339,8 @@ static int sdebug_driver_probe(struct device * dev)
hpnt->max_id = scsi_debug_num_tgts + 1;
else
hpnt->max_id = scsi_debug_num_tgts;
- hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
+ /* = scsi_debug_max_luns; */
+ hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
host_prot = 0;
@@ -5381,7 +5372,7 @@ static int sdebug_driver_probe(struct device * dev)
scsi_host_set_prot(hpnt, host_prot);
- printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
+ pr_info("host protection%s%s%s%s%s%s%s\n",
(host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
(host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
(host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
@@ -5409,7 +5400,7 @@ static int sdebug_driver_probe(struct device * dev)
error = scsi_add_host(hpnt, &sdbg_host->dev);
if (error) {
- printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
+ pr_err("scsi_add_host failed\n");
error = -ENODEV;
scsi_host_put(hpnt);
} else
@@ -5426,8 +5417,7 @@ static int sdebug_driver_remove(struct device * dev)
sdbg_host = to_sdebug_host(dev);
if (!sdbg_host) {
- printk(KERN_ERR "%s: Unable to locate host info\n",
- __func__);
+ pr_err("Unable to locate host info\n");
return -ENODEV;
}
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
new file mode 100644
index 000000000000..edb044a7b56d
--- /dev/null
+++ b/drivers/scsi/scsi_dh.c
@@ -0,0 +1,437 @@
+/*
+ * SCSI device handler infrastruture.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2007
+ * Authors:
+ * Chandra Seetharaman <sekharan@us.ibm.com>
+ * Mike Anderson <andmike@linux.vnet.ibm.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <scsi/scsi_dh.h>
+#include "scsi_priv.h"
+
+static DEFINE_SPINLOCK(list_lock);
+static LIST_HEAD(scsi_dh_list);
+
+struct scsi_dh_blist {
+ const char *vendor;
+ const char *model;
+ const char *driver;
+};
+
+static const struct scsi_dh_blist scsi_dh_blist[] = {
+ {"DGC", "RAID", "clariion" },
+ {"DGC", "DISK", "clariion" },
+ {"DGC", "VRAID", "clariion" },
+
+ {"COMPAQ", "MSA1000 VOLUME", "hp_sw" },
+ {"COMPAQ", "HSV110", "hp_sw" },
+ {"HP", "HSV100", "hp_sw"},
+ {"DEC", "HSG80", "hp_sw"},
+
+ {"IBM", "1722", "rdac", },
+ {"IBM", "1724", "rdac", },
+ {"IBM", "1726", "rdac", },
+ {"IBM", "1742", "rdac", },
+ {"IBM", "1745", "rdac", },
+ {"IBM", "1746", "rdac", },
+ {"IBM", "1813", "rdac", },
+ {"IBM", "1814", "rdac", },
+ {"IBM", "1815", "rdac", },
+ {"IBM", "1818", "rdac", },
+ {"IBM", "3526", "rdac", },
+ {"SGI", "TP9", "rdac", },
+ {"SGI", "IS", "rdac", },
+ {"STK", "OPENstorage D280", "rdac", },
+ {"STK", "FLEXLINE 380", "rdac", },
+ {"SUN", "CSM", "rdac", },
+ {"SUN", "LCSM100", "rdac", },
+ {"SUN", "STK6580_6780", "rdac", },
+ {"SUN", "SUN_6180", "rdac", },
+ {"SUN", "ArrayStorage", "rdac", },
+ {"DELL", "MD3", "rdac", },
+ {"NETAPP", "INF-01-00", "rdac", },
+ {"LSI", "INF-01-00", "rdac", },
+ {"ENGENIO", "INF-01-00", "rdac", },
+ {NULL, NULL, NULL },
+};
+
+static const char *
+scsi_dh_find_driver(struct scsi_device *sdev)
+{
+ const struct scsi_dh_blist *b;
+
+ if (scsi_device_tpgs(sdev))
+ return "alua";
+
+ for (b = scsi_dh_blist; b->vendor; b++) {
+ if (!strncmp(sdev->vendor, b->vendor, strlen(b->vendor)) &&
+ !strncmp(sdev->model, b->model, strlen(b->model))) {
+ return b->driver;
+ }
+ }
+ return NULL;
+}
+
+
+static struct scsi_device_handler *__scsi_dh_lookup(const char *name)
+{
+ struct scsi_device_handler *tmp, *found = NULL;
+
+ spin_lock(&list_lock);
+ list_for_each_entry(tmp, &scsi_dh_list, list) {
+ if (!strncmp(tmp->name, name, strlen(tmp->name))) {
+ found = tmp;
+ break;
+ }
+ }
+ spin_unlock(&list_lock);
+ return found;
+}
+
+static struct scsi_device_handler *scsi_dh_lookup(const char *name)
+{
+ struct scsi_device_handler *dh;
+
+ dh = __scsi_dh_lookup(name);
+ if (!dh) {
+ request_module(name);
+ dh = __scsi_dh_lookup(name);
+ }
+
+ return dh;
+}
+
+/*
+ * scsi_dh_handler_attach - Attach a device handler to a device
+ * @sdev - SCSI device the device handler should attach to
+ * @scsi_dh - The device handler to attach
+ */
+static int scsi_dh_handler_attach(struct scsi_device *sdev,
+ struct scsi_device_handler *scsi_dh)
+{
+ int error;
+
+ if (!try_module_get(scsi_dh->module))
+ return -EINVAL;
+
+ error = scsi_dh->attach(sdev);
+ if (error) {
+ sdev_printk(KERN_ERR, sdev, "%s: Attach failed (%d)\n",
+ scsi_dh->name, error);
+ module_put(scsi_dh->module);
+ } else
+ sdev->handler = scsi_dh;
+
+ return error;
+}
+
+/*
+ * scsi_dh_handler_detach - Detach a device handler from a device
+ * @sdev - SCSI device the device handler should be detached from
+ */
+static void scsi_dh_handler_detach(struct scsi_device *sdev)
+{
+ sdev->handler->detach(sdev);
+ sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", sdev->handler->name);
+ module_put(sdev->handler->module);
+}
+
+/*
+ * Functions for sysfs attribute 'dh_state'
+ */
+static ssize_t
+store_dh_state(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct scsi_device_handler *scsi_dh;
+ int err = -EINVAL;
+
+ if (sdev->sdev_state == SDEV_CANCEL ||
+ sdev->sdev_state == SDEV_DEL)
+ return -ENODEV;
+
+ if (!sdev->handler) {
+ /*
+ * Attach to a device handler
+ */
+ scsi_dh = scsi_dh_lookup(buf);
+ if (!scsi_dh)
+ return err;
+ err = scsi_dh_handler_attach(sdev, scsi_dh);
+ } else {
+ if (!strncmp(buf, "detach", 6)) {
+ /*
+ * Detach from a device handler
+ */
+ sdev_printk(KERN_WARNING, sdev,
+ "can't detach handler %s.\n",
+ sdev->handler->name);
+ err = -EINVAL;
+ } else if (!strncmp(buf, "activate", 8)) {
+ /*
+ * Activate a device handler
+ */
+ if (sdev->handler->activate)
+ err = sdev->handler->activate(sdev, NULL, NULL);
+ else
+ err = 0;
+ }
+ }
+
+ return err<0?err:count;
+}
+
+static ssize_t
+show_dh_state(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ if (!sdev->handler)
+ return snprintf(buf, 20, "detached\n");
+
+ return snprintf(buf, 20, "%s\n", sdev->handler->name);
+}
+
+static struct device_attribute scsi_dh_state_attr =
+ __ATTR(dh_state, S_IRUGO | S_IWUSR, show_dh_state,
+ store_dh_state);
+
+int scsi_dh_add_device(struct scsi_device *sdev)
+{
+ struct scsi_device_handler *devinfo = NULL;
+ const char *drv;
+ int err;
+
+ err = device_create_file(&sdev->sdev_gendev, &scsi_dh_state_attr);
+ if (err)
+ return err;
+
+ drv = scsi_dh_find_driver(sdev);
+ if (drv)
+ devinfo = scsi_dh_lookup(drv);
+ if (devinfo)
+ err = scsi_dh_handler_attach(sdev, devinfo);
+ return err;
+}
+
+void scsi_dh_remove_device(struct scsi_device *sdev)
+{
+ if (sdev->handler)
+ scsi_dh_handler_detach(sdev);
+ device_remove_file(&sdev->sdev_gendev, &scsi_dh_state_attr);
+}
+
+/*
+ * scsi_register_device_handler - register a device handler personality
+ * module.
+ * @scsi_dh - device handler to be registered.
+ *
+ * Returns 0 on success, -EBUSY if handler already registered.
+ */
+int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
+{
+ if (__scsi_dh_lookup(scsi_dh->name))
+ return -EBUSY;
+
+ if (!scsi_dh->attach || !scsi_dh->detach)
+ return -EINVAL;
+
+ spin_lock(&list_lock);
+ list_add(&scsi_dh->list, &scsi_dh_list);
+ spin_unlock(&list_lock);
+
+ printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
+
+ return SCSI_DH_OK;
+}
+EXPORT_SYMBOL_GPL(scsi_register_device_handler);
+
+/*
+ * scsi_unregister_device_handler - register a device handler personality
+ * module.
+ * @scsi_dh - device handler to be unregistered.
+ *
+ * Returns 0 on success, -ENODEV if handler not registered.
+ */
+int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
+{
+ if (!__scsi_dh_lookup(scsi_dh->name))
+ return -ENODEV;
+
+ spin_lock(&list_lock);
+ list_del(&scsi_dh->list);
+ spin_unlock(&list_lock);
+ printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name);
+
+ return SCSI_DH_OK;
+}
+EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
+
+static struct scsi_device *get_sdev_from_queue(struct request_queue *q)
+{
+ struct scsi_device *sdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ sdev = q->queuedata;
+ if (!sdev || !get_device(&sdev->sdev_gendev))
+ sdev = NULL;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return sdev;
+}
+
+/*
+ * scsi_dh_activate - activate the path associated with the scsi_device
+ * corresponding to the given request queue.
+ * Returns immediately without waiting for activation to be completed.
+ * @q - Request queue that is associated with the scsi_device to be
+ * activated.
+ * @fn - Function to be called upon completion of the activation.
+ * Function fn is called with data (below) and the error code.
+ * Function fn may be called from the same calling context. So,
+ * do not hold the lock in the caller which may be needed in fn.
+ * @data - data passed to the function fn upon completion.
+ *
+ */
+int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
+{
+ struct scsi_device *sdev;
+ int err = SCSI_DH_NOSYS;
+
+ sdev = get_sdev_from_queue(q);
+ if (!sdev) {
+ if (fn)
+ fn(data, err);
+ return err;
+ }
+
+ if (!sdev->handler)
+ goto out_fn;
+ err = SCSI_DH_NOTCONN;
+ if (sdev->sdev_state == SDEV_CANCEL ||
+ sdev->sdev_state == SDEV_DEL)
+ goto out_fn;
+
+ err = SCSI_DH_DEV_OFFLINED;
+ if (sdev->sdev_state == SDEV_OFFLINE)
+ goto out_fn;
+
+ if (sdev->handler->activate)
+ err = sdev->handler->activate(sdev, fn, data);
+
+out_put_device:
+ put_device(&sdev->sdev_gendev);
+ return err;
+
+out_fn:
+ if (fn)
+ fn(data, err);
+ goto out_put_device;
+}
+EXPORT_SYMBOL_GPL(scsi_dh_activate);
+
+/*
+ * scsi_dh_set_params - set the parameters for the device as per the
+ * string specified in params.
+ * @q - Request queue that is associated with the scsi_device for
+ * which the parameters to be set.
+ * @params - parameters in the following format
+ * "no_of_params\0param1\0param2\0param3\0...\0"
+ * for example, string for 2 parameters with value 10 and 21
+ * is specified as "2\010\021\0".
+ */
+int scsi_dh_set_params(struct request_queue *q, const char *params)
+{
+ struct scsi_device *sdev;
+ int err = -SCSI_DH_NOSYS;
+
+ sdev = get_sdev_from_queue(q);
+ if (!sdev)
+ return err;
+
+ if (sdev->handler && sdev->handler->set_params)
+ err = sdev->handler->set_params(sdev, params);
+ put_device(&sdev->sdev_gendev);
+ return err;
+}
+EXPORT_SYMBOL_GPL(scsi_dh_set_params);
+
+/*
+ * scsi_dh_attach - Attach device handler
+ * @q - Request queue that is associated with the scsi_device
+ * the handler should be attached to
+ * @name - name of the handler to attach
+ */
+int scsi_dh_attach(struct request_queue *q, const char *name)
+{
+ struct scsi_device *sdev;
+ struct scsi_device_handler *scsi_dh;
+ int err = 0;
+
+ sdev = get_sdev_from_queue(q);
+ if (!sdev)
+ return -ENODEV;
+
+ scsi_dh = scsi_dh_lookup(name);
+ if (!scsi_dh) {
+ err = -EINVAL;
+ goto out_put_device;
+ }
+
+ if (sdev->handler) {
+ if (sdev->handler != scsi_dh)
+ err = -EBUSY;
+ goto out_put_device;
+ }
+
+ err = scsi_dh_handler_attach(sdev, scsi_dh);
+
+out_put_device:
+ put_device(&sdev->sdev_gendev);
+ return err;
+}
+EXPORT_SYMBOL_GPL(scsi_dh_attach);
+
+/*
+ * scsi_dh_attached_handler_name - Get attached device handler's name
+ * @q - Request queue that is associated with the scsi_device
+ * that may have a device handler attached
+ * @gfp - the GFP mask used in the kmalloc() call when allocating memory
+ *
+ * Returns name of attached handler, NULL if no handler is attached.
+ * Caller must take care to free the returned string.
+ */
+const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
+{
+ struct scsi_device *sdev;
+ const char *handler_name = NULL;
+
+ sdev = get_sdev_from_queue(q);
+ if (!sdev)
+ return NULL;
+
+ if (sdev->handler)
+ handler_name = kstrdup(sdev->handler->name, gfp);
+ put_device(&sdev->sdev_gendev);
+ return handler_name;
+}
+EXPORT_SYMBOL_GPL(scsi_dh_attached_handler_name);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 106884a5444e..66a96cd98b97 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -26,7 +26,6 @@
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
-#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -34,9 +33,11 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
+#include <scsi/scsi_common.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_dh.h>
#include <scsi/sg.h>
#include "scsi_priv.h"
@@ -421,6 +422,10 @@ static void scsi_report_sense(struct scsi_device *sdev,
evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED;
sdev_printk(KERN_WARNING, sdev,
"Mode parameters changed");
+ } else if (sshdr->asc == 0x2a && sshdr->ascq == 0x06) {
+ evt_type = SDEV_EVT_ALUA_STATE_CHANGE_REPORTED;
+ sdev_printk(KERN_WARNING, sdev,
+ "Asymmetric access state changed");
} else if (sshdr->asc == 0x2a && sshdr->ascq == 0x09) {
evt_type = SDEV_EVT_CAPACITY_CHANGE_REPORTED;
sdev_printk(KERN_WARNING, sdev,
@@ -460,11 +465,10 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
if (scsi_sense_is_deferred(&sshdr))
return NEEDS_RETRY;
- if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh &&
- sdev->scsi_dh_data->scsi_dh->check_sense) {
+ if (sdev->handler && sdev->handler->check_sense) {
int rc;
- rc = sdev->scsi_dh_data->scsi_dh->check_sense(sdev, &sshdr);
+ rc = sdev->handler->check_sense(sdev, &sshdr);
if (rc != SCSI_RETURN_NOT_HANDLED)
return rc;
/* handler does not care. Drop down to default handling */
@@ -944,7 +948,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
scmd->sdb.length);
scmd->sdb.table.sgl = &ses->sense_sgl;
scmd->sc_data_direction = DMA_FROM_DEVICE;
- scmd->sdb.table.nents = 1;
+ scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1;
scmd->cmnd[0] = REQUEST_SENSE;
scmd->cmnd[4] = scmd->sdb.length;
scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
@@ -1156,8 +1160,13 @@ int scsi_eh_get_sense(struct list_head *work_q,
struct Scsi_Host *shost;
int rtn;
+ /*
+ * If SCSI_EH_ABORT_SCHEDULED has been set, it is timeout IO,
+ * should not get sense.
+ */
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
+ (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) ||
SCSI_SENSE_VALID(scmd))
continue;
@@ -2170,8 +2179,17 @@ int scsi_error_handler(void *data)
* We never actually get interrupted because kthread_run
* disables signal delivery for the created thread.
*/
- while (!kthread_should_stop()) {
+ while (true) {
+ /*
+ * The sequence in kthread_stop() sets the stop flag first
+ * then wakes the process. To avoid missed wakeups, the task
+ * should always be in a non running state before the stop
+ * flag is checked
+ */
set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop())
+ break;
+
if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
shost->host_failed != atomic_read(&shost->host_busy)) {
SCSI_LOG_ERROR_RECOVERY(1,
@@ -2408,45 +2426,6 @@ bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
EXPORT_SYMBOL(scsi_command_normalize_sense);
/**
- * scsi_sense_desc_find - search for a given descriptor type in descriptor sense data format.
- * @sense_buffer: byte array of descriptor format sense data
- * @sb_len: number of valid bytes in sense_buffer
- * @desc_type: value of descriptor type to find
- * (e.g. 0 -> information)
- *
- * Notes:
- * only valid when sense data is in descriptor format
- *
- * Return value:
- * pointer to start of (first) descriptor if found else NULL
- */
-const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
- int desc_type)
-{
- int add_sen_len, add_len, desc_len, k;
- const u8 * descp;
-
- if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7])))
- return NULL;
- if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73))
- return NULL;
- add_sen_len = (add_sen_len < (sb_len - 8)) ?
- add_sen_len : (sb_len - 8);
- descp = &sense_buffer[8];
- for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) {
- descp += desc_len;
- add_len = (k < (add_sen_len - 1)) ? descp[1]: -1;
- desc_len = add_len + 2;
- if (descp[0] == desc_type)
- return descp;
- if (add_len < 0) // short descriptor ??
- break;
- }
- return NULL;
-}
-EXPORT_SYMBOL(scsi_sense_desc_find);
-
-/**
* scsi_get_sense_info_fld - get information field from sense data (either fixed or descriptor format)
* @sense_buffer: byte array of sense data
* @sb_len: number of valid bytes in sense_buffer
@@ -2495,61 +2474,3 @@ int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
}
}
EXPORT_SYMBOL(scsi_get_sense_info_fld);
-
-/**
- * scsi_build_sense_buffer - build sense data in a buffer
- * @desc: Sense format (non zero == descriptor format,
- * 0 == fixed format)
- * @buf: Where to build sense data
- * @key: Sense key
- * @asc: Additional sense code
- * @ascq: Additional sense code qualifier
- *
- **/
-void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
-{
- if (desc) {
- buf[0] = 0x72; /* descriptor, current */
- buf[1] = key;
- buf[2] = asc;
- buf[3] = ascq;
- buf[7] = 0;
- } else {
- buf[0] = 0x70; /* fixed, current */
- buf[2] = key;
- buf[7] = 0xa;
- buf[12] = asc;
- buf[13] = ascq;
- }
-}
-EXPORT_SYMBOL(scsi_build_sense_buffer);
-
-/**
- * scsi_set_sense_information - set the information field in a
- * formatted sense data buffer
- * @buf: Where to build sense data
- * @info: 64-bit information value to be set
- *
- **/
-void scsi_set_sense_information(u8 *buf, u64 info)
-{
- if ((buf[0] & 0x7f) == 0x72) {
- u8 *ucp, len;
-
- len = buf[7];
- ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
- if (!ucp) {
- buf[7] = len + 0xa;
- ucp = buf + 8 + len;
- }
- ucp[0] = 0;
- ucp[1] = 0xa;
- ucp[2] = 0x80; /* Valid bit */
- ucp[3] = 0;
- put_unaligned_be64(info, &ucp[4]);
- } else if ((buf[0] & 0x7f) == 0x70) {
- buf[0] |= 0x80;
- put_unaligned_be64(info, &buf[3]);
- }
-}
-EXPORT_SYMBOL(scsi_set_sense_information);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b1a263137a23..cbfc5990052b 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -31,6 +31,7 @@
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_dh.h>
#include <trace/events/scsi.h>
@@ -583,7 +584,7 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
{
- if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS)
+ if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS)
return;
__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
}
@@ -597,8 +598,8 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
if (mq) {
if (nents <= SCSI_MAX_SG_SEGMENTS) {
- sdb->table.nents = nents;
- sg_init_table(sdb->table.sgl, sdb->table.nents);
+ sdb->table.nents = sdb->table.orig_nents = nents;
+ sg_init_table(sdb->table.sgl, nents);
return 0;
}
first_chunk = sdb->table.sgl;
@@ -1248,9 +1249,8 @@ static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
{
struct scsi_cmnd *cmd = req->special;
- if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
- && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
- int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
+ if (unlikely(sdev->handler && sdev->handler->prep_fn)) {
+ int ret = sdev->handler->prep_fn(sdev, req);
if (ret != BLKPREP_OK)
return ret;
}
@@ -2423,7 +2423,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
unsigned char cmd[12];
int use_10_for_ms;
int header_length;
- int result;
+ int result, retry_count = retries;
struct scsi_sense_hdr my_sshdr;
memset(data, 0, sizeof(*data));
@@ -2502,6 +2502,11 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
data->block_descriptor_length = buffer[3];
}
data->header_length = header_length;
+ } else if ((status_byte(result) == CHECK_CONDITION) &&
+ scsi_sense_valid(sshdr) &&
+ sshdr->sense_key == UNIT_ATTENTION && retry_count) {
+ retry_count--;
+ goto retry;
}
return result;
@@ -2707,6 +2712,9 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
case SDEV_EVT_LUN_CHANGE_REPORTED:
envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
break;
+ case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
+ envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
+ break;
default:
/* do nothing */
break;
@@ -2810,6 +2818,7 @@ struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
case SDEV_EVT_LUN_CHANGE_REPORTED:
+ case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
default:
/* do nothing */
break;
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 9e43ae1d2163..e4b799837948 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -217,15 +217,15 @@ static int sdev_runtime_suspend(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
struct scsi_device *sdev = to_scsi_device(dev);
- int err;
+ int err = 0;
- err = blk_pre_runtime_suspend(sdev->request_queue);
- if (err)
- return err;
- if (pm && pm->runtime_suspend)
+ if (pm && pm->runtime_suspend) {
+ err = blk_pre_runtime_suspend(sdev->request_queue);
+ if (err)
+ return err;
err = pm->runtime_suspend(dev);
- blk_post_runtime_suspend(sdev->request_queue, err);
-
+ blk_post_runtime_suspend(sdev->request_queue, err);
+ }
return err;
}
@@ -248,11 +248,11 @@ static int sdev_runtime_resume(struct device *dev)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int err = 0;
- blk_pre_runtime_resume(sdev->request_queue);
- if (pm && pm->runtime_resume)
+ if (pm && pm->runtime_resume) {
+ blk_pre_runtime_resume(sdev->request_queue);
err = pm->runtime_resume(dev);
- blk_post_runtime_resume(sdev->request_queue, err);
-
+ blk_post_runtime_resume(sdev->request_queue, err);
+ }
return err;
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index e3902fc66278..644bb7339b55 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -170,6 +170,15 @@ static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
extern struct async_domain scsi_sd_pm_domain;
extern struct async_domain scsi_sd_probe_domain;
+/* scsi_dh.c */
+#ifdef CONFIG_SCSI_DH
+int scsi_dh_add_device(struct scsi_device *sdev);
+void scsi_dh_remove_device(struct scsi_device *sdev);
+#else
+static inline int scsi_dh_add_device(struct scsi_device *sdev) { return 0; }
+static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
+#endif
+
/*
* internal scsi timeout functions: for use by mid-layer and transport
* classes.
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 1ac38e73df7e..b333389f248f 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -859,7 +859,7 @@ sdev_store_queue_depth(struct device *dev, struct device_attribute *attr,
depth = simple_strtoul(buf, NULL, 0);
- if (depth < 1 || depth > sht->can_queue)
+ if (depth < 1 || depth > sdev->host->can_queue)
return -EINVAL;
retval = sht->change_queue_depth(sdev, depth);
@@ -1030,11 +1030,20 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
"failed to add device: %d\n", error);
return error;
}
+
+ error = scsi_dh_add_device(sdev);
+ if (error) {
+ sdev_printk(KERN_INFO, sdev,
+ "failed to add device handler: %d\n", error);
+ return error;
+ }
+
device_enable_async_suspend(&sdev->sdev_dev);
error = device_add(&sdev->sdev_dev);
if (error) {
sdev_printk(KERN_INFO, sdev,
"failed to add class device: %d\n", error);
+ scsi_dh_remove_device(sdev);
device_del(&sdev->sdev_gendev);
return error;
}
@@ -1074,6 +1083,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
bsg_unregister_queue(sdev->request_queue);
device_unregister(&sdev->sdev_dev);
transport_remove_device(dev);
+ scsi_dh_remove_device(sdev);
device_del(dev);
} else
put_device(&sdev->sdev_dev);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 55647aae065c..e4b3d8f4fd85 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2042,6 +2042,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
session->transport = transport;
session->creator = -1;
session->recovery_tmo = 120;
+ session->recovery_tmo_sysfs_override = false;
session->state = ISCSI_SESSION_FREE;
INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
INIT_LIST_HEAD(&session->sess_list);
@@ -2786,7 +2787,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
switch (ev->u.set_param.param) {
case ISCSI_PARAM_SESS_RECOVERY_TMO:
sscanf(data, "%d", &value);
- session->recovery_tmo = value;
+ if (!session->recovery_tmo_sysfs_override)
+ session->recovery_tmo = value;
break;
default:
err = transport->set_param(conn, ev->u.set_param.param,
@@ -3037,7 +3039,7 @@ iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh)
shost = scsi_host_lookup(ev->u.get_chap.host_no);
if (!shost) {
- printk(KERN_ERR "%s: failed. Cound not find host no %u\n",
+ printk(KERN_ERR "%s: failed. Could not find host no %u\n",
__func__, ev->u.get_chap.host_no);
return -ENODEV;
}
@@ -4049,13 +4051,15 @@ store_priv_session_##field(struct device *dev, \
if ((session->state == ISCSI_SESSION_FREE) || \
(session->state == ISCSI_SESSION_FAILED)) \
return -EBUSY; \
- if (strncmp(buf, "off", 3) == 0) \
+ if (strncmp(buf, "off", 3) == 0) { \
session->field = -1; \
- else { \
+ session->field##_sysfs_override = true; \
+ } else { \
val = simple_strtoul(buf, &cp, 0); \
if (*cp != '\0' && *cp != '\n') \
return -EINVAL; \
session->field = val; \
+ session->field##_sysfs_override = true; \
} \
return count; \
}
@@ -4066,6 +4070,7 @@ store_priv_session_##field(struct device *dev, \
static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \
show_priv_session_##field, \
store_priv_session_##field)
+
iscsi_priv_session_rw_attr(recovery_tmo, "%d");
static struct attribute *iscsi_session_attrs[] = {
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 9a058194b9bd..30d26e345dcc 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1222,13 +1222,6 @@ show_sas_rphy_enclosure_identifier(struct device *dev,
u64 identifier;
int error;
- /*
- * Only devices behind an expander are supported, because the
- * enclosure identifier is a SMP feature.
- */
- if (scsi_is_sas_phy_local(phy))
- return -EINVAL;
-
error = i->f->get_enclosure_identifier(rphy, &identifier);
if (error)
return error;
@@ -1248,9 +1241,6 @@ show_sas_rphy_bay_identifier(struct device *dev,
struct sas_internal *i = to_sas_internal(shost->transportt);
int val;
- if (scsi_is_sas_phy_local(phy))
- return -EINVAL;
-
val = i->f->get_bay_identifier(rphy);
if (val < 0)
return val;
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 31bbb0da3397..319868f3f674 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -786,10 +786,10 @@ spi_dv_retrain(struct scsi_device *sdev, u8 *buffer, u8 *ptr,
* IU, then QAS (if we can control them), then finally
* fall down the periods */
if (i->f->set_iu && spi_iu(starget)) {
- starget_printk(KERN_ERR, starget, "Domain Validation Disabing Information Units\n");
+ starget_printk(KERN_ERR, starget, "Domain Validation Disabling Information Units\n");
DV_SET(iu, 0);
} else if (i->f->set_qas && spi_qas(starget)) {
- starget_printk(KERN_ERR, starget, "Domain Validation Disabing Quick Arbitration and Selection\n");
+ starget_printk(KERN_ERR, starget, "Domain Validation Disabling Quick Arbitration and Selection\n");
DV_SET(qas, 0);
} else {
newperiod = spi_period(starget);
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index a85292b1d09d..e3cd3ece4412 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -203,7 +203,7 @@ static ssize_t srp_show_tmo(char *buf, int tmo)
return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n");
}
-static int srp_parse_tmo(int *tmo, const char *buf)
+int srp_parse_tmo(int *tmo, const char *buf)
{
int res = 0;
@@ -214,6 +214,7 @@ static int srp_parse_tmo(int *tmo, const char *buf)
return res;
}
+EXPORT_SYMBOL(srp_parse_tmo);
static ssize_t show_reconnect_delay(struct device *dev,
struct device_attribute *attr, char *buf)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3b2fcb4fada0..3f370228bf31 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -647,7 +647,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
switch (mode) {
case SD_LBP_DISABLE:
- q->limits.max_discard_sectors = 0;
+ blk_queue_max_discard_sectors(q, 0);
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
return;
@@ -675,7 +675,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
break;
}
- q->limits.max_discard_sectors = max_blocks * (logical_block_size >> 9);
+ blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
}
@@ -2770,9 +2770,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
max_xfer = sdkp->max_xfer_blocks;
max_xfer <<= ilog2(sdp->sector_size) - 9;
- max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
- max_xfer);
- blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
+ sdkp->disk->queue->limits.max_sectors =
+ min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
+
set_capacity(disk, sdkp->capacity);
sd_config_write_same(sdkp);
kfree(buffer);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 3f25b8fa921d..b37b9b00c4b4 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -85,6 +85,7 @@ static int debug_flag;
static struct class st_sysfs_class;
static const struct attribute_group *st_dev_groups[];
+static const struct attribute_group *st_drv_groups[];
MODULE_AUTHOR("Kai Makisara");
MODULE_DESCRIPTION("SCSI tape (st) driver");
@@ -198,15 +199,13 @@ static int sgl_unmap_user_pages(struct st_buffer *, const unsigned int, int);
static int st_probe(struct device *);
static int st_remove(struct device *);
-static int do_create_sysfs_files(void);
-static void do_remove_sysfs_files(void);
-
static struct scsi_driver st_template = {
.gendrv = {
.name = "st",
.owner = THIS_MODULE,
.probe = st_probe,
.remove = st_remove,
+ .groups = st_drv_groups,
},
};
@@ -1329,9 +1328,9 @@ static int st_open(struct inode *inode, struct file *filp)
spin_lock(&st_use_lock);
STp->in_use = 0;
spin_unlock(&st_use_lock);
- scsi_tape_put(STp);
if (resumed)
scsi_autopm_put_device(STp->device);
+ scsi_tape_put(STp);
return retval;
}
@@ -4404,14 +4403,8 @@ static int __init init_st(void)
if (err)
goto err_chrdev;
- err = do_create_sysfs_files();
- if (err)
- goto err_scsidrv;
-
return 0;
-err_scsidrv:
- scsi_unregister_driver(&st_template.gendrv);
err_chrdev:
unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
ST_MAX_TAPE_ENTRIES);
@@ -4422,11 +4415,11 @@ err_class:
static void __exit exit_st(void)
{
- do_remove_sysfs_files();
scsi_unregister_driver(&st_template.gendrv);
unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
ST_MAX_TAPE_ENTRIES);
class_unregister(&st_sysfs_class);
+ idr_destroy(&st_index_idr);
printk(KERN_INFO "st: Unloaded.\n");
}
@@ -4435,68 +4428,38 @@ module_exit(exit_st);
/* The sysfs driver interface. Read-only at the moment */
-static ssize_t st_try_direct_io_show(struct device_driver *ddp, char *buf)
+static ssize_t try_direct_io_show(struct device_driver *ddp, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", try_direct_io);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", try_direct_io);
}
-static DRIVER_ATTR(try_direct_io, S_IRUGO, st_try_direct_io_show, NULL);
+static DRIVER_ATTR_RO(try_direct_io);
-static ssize_t st_fixed_buffer_size_show(struct device_driver *ddp, char *buf)
+static ssize_t fixed_buffer_size_show(struct device_driver *ddp, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", st_fixed_buffer_size);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", st_fixed_buffer_size);
}
-static DRIVER_ATTR(fixed_buffer_size, S_IRUGO, st_fixed_buffer_size_show, NULL);
+static DRIVER_ATTR_RO(fixed_buffer_size);
-static ssize_t st_max_sg_segs_show(struct device_driver *ddp, char *buf)
+static ssize_t max_sg_segs_show(struct device_driver *ddp, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", st_max_sg_segs);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", st_max_sg_segs);
}
-static DRIVER_ATTR(max_sg_segs, S_IRUGO, st_max_sg_segs_show, NULL);
+static DRIVER_ATTR_RO(max_sg_segs);
-static ssize_t st_version_show(struct device_driver *ddd, char *buf)
+static ssize_t version_show(struct device_driver *ddd, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "[%s]\n", verstr);
+ return scnprintf(buf, PAGE_SIZE, "[%s]\n", verstr);
}
-static DRIVER_ATTR(version, S_IRUGO, st_version_show, NULL);
+static DRIVER_ATTR_RO(version);
-static int do_create_sysfs_files(void)
-{
- struct device_driver *sysfs = &st_template.gendrv;
- int err;
-
- err = driver_create_file(sysfs, &driver_attr_try_direct_io);
- if (err)
- return err;
- err = driver_create_file(sysfs, &driver_attr_fixed_buffer_size);
- if (err)
- goto err_try_direct_io;
- err = driver_create_file(sysfs, &driver_attr_max_sg_segs);
- if (err)
- goto err_attr_fixed_buf;
- err = driver_create_file(sysfs, &driver_attr_version);
- if (err)
- goto err_attr_max_sg;
-
- return 0;
-
-err_attr_max_sg:
- driver_remove_file(sysfs, &driver_attr_max_sg_segs);
-err_attr_fixed_buf:
- driver_remove_file(sysfs, &driver_attr_fixed_buffer_size);
-err_try_direct_io:
- driver_remove_file(sysfs, &driver_attr_try_direct_io);
- return err;
-}
-
-static void do_remove_sysfs_files(void)
-{
- struct device_driver *sysfs = &st_template.gendrv;
-
- driver_remove_file(sysfs, &driver_attr_version);
- driver_remove_file(sysfs, &driver_attr_max_sg_segs);
- driver_remove_file(sysfs, &driver_attr_fixed_buffer_size);
- driver_remove_file(sysfs, &driver_attr_try_direct_io);
-}
+static struct attribute *st_drv_attrs[] = {
+ &driver_attr_try_direct_io.attr,
+ &driver_attr_fixed_buffer_size.attr,
+ &driver_attr_max_sg_segs.attr,
+ &driver_attr_version.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(st_drv);
/* The sysfs simple class interface */
static ssize_t
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 3c6584ff65c1..40c43aeb4ff3 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -56,15 +56,18 @@
* V1 RC > 2008/1/31: 2.0
* Win7: 4.2
* Win8: 5.1
+ * Win8.1: 6.0
+ * Win10: 6.2
*/
+#define VMSTOR_PROTO_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \
+ (((MINOR_) & 0xff)))
-#define VMSTOR_WIN7_MAJOR 4
-#define VMSTOR_WIN7_MINOR 2
-
-#define VMSTOR_WIN8_MAJOR 5
-#define VMSTOR_WIN8_MINOR 1
-
+#define VMSTOR_PROTO_VERSION_WIN6 VMSTOR_PROTO_VERSION(2, 0)
+#define VMSTOR_PROTO_VERSION_WIN7 VMSTOR_PROTO_VERSION(4, 2)
+#define VMSTOR_PROTO_VERSION_WIN8 VMSTOR_PROTO_VERSION(5, 1)
+#define VMSTOR_PROTO_VERSION_WIN8_1 VMSTOR_PROTO_VERSION(6, 0)
+#define VMSTOR_PROTO_VERSION_WIN10 VMSTOR_PROTO_VERSION(6, 2)
/* Packet structure describing virtual storage requests. */
enum vstor_packet_operation {
@@ -148,21 +151,18 @@ struct hv_fc_wwn_packet {
/*
* Sense buffer size changed in win8; have a run-time
- * variable to track the size we should use.
+ * variable to track the size we should use. This value will
+ * likely change during protocol negotiation but it is valid
+ * to start by assuming pre-Win8.
*/
-static int sense_buffer_size;
+static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
/*
- * The size of the vmscsi_request has changed in win8. The
- * additional size is because of new elements added to the
- * structure. These elements are valid only when we are talking
- * to a win8 host.
- * Track the correction to size we need to apply.
- */
-
-static int vmscsi_size_delta;
-static int vmstor_current_major;
-static int vmstor_current_minor;
+ * The storage protocol version is determined during the
+ * initial exchange with the host. It will indicate which
+ * storage functionality is available in the host.
+*/
+static int vmstor_proto_version;
struct vmscsi_win8_extension {
/*
@@ -207,6 +207,56 @@ struct vmscsi_request {
/*
+ * The size of the vmscsi_request has changed in win8. The
+ * additional size is because of new elements added to the
+ * structure. These elements are valid only when we are talking
+ * to a win8 host.
+ * Track the correction to size we need to apply. This value
+ * will likely change during protocol negotiation but it is
+ * valid to start by assuming pre-Win8.
+ */
+static int vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
+
+/*
+ * The list of storage protocols in order of preference.
+ */
+struct vmstor_protocol {
+ int protocol_version;
+ int sense_buffer_size;
+ int vmscsi_size_delta;
+};
+
+
+static const struct vmstor_protocol vmstor_protocols[] = {
+ {
+ VMSTOR_PROTO_VERSION_WIN10,
+ POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
+ 0
+ },
+ {
+ VMSTOR_PROTO_VERSION_WIN8_1,
+ POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
+ 0
+ },
+ {
+ VMSTOR_PROTO_VERSION_WIN8,
+ POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
+ 0
+ },
+ {
+ VMSTOR_PROTO_VERSION_WIN7,
+ PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
+ sizeof(struct vmscsi_win8_extension),
+ },
+ {
+ VMSTOR_PROTO_VERSION_WIN6,
+ PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
+ sizeof(struct vmscsi_win8_extension),
+ }
+};
+
+
+/*
* This structure is sent during the intialization phase to get the different
* properties of the channel.
*/
@@ -426,7 +476,6 @@ static void storvsc_host_scan(struct work_struct *work)
struct storvsc_scan_work *wrk;
struct Scsi_Host *host;
struct scsi_device *sdev;
- unsigned long flags;
wrk = container_of(work, struct storvsc_scan_work, work);
host = wrk->host;
@@ -443,14 +492,8 @@ static void storvsc_host_scan(struct work_struct *work)
* may have been removed this way.
*/
mutex_lock(&host->scan_mutex);
- spin_lock_irqsave(host->host_lock, flags);
- list_for_each_entry(sdev, &host->__devices, siblings) {
- spin_unlock_irqrestore(host->host_lock, flags);
+ shost_for_each_device(sdev, host)
scsi_test_unit_ready(sdev, 1, 1, NULL);
- spin_lock_irqsave(host->host_lock, flags);
- continue;
- }
- spin_unlock_irqrestore(host->host_lock, flags);
mutex_unlock(&host->scan_mutex);
/*
* Now scan the host to discover LUNs that may have been added.
@@ -481,18 +524,6 @@ done:
kfree(wrk);
}
-/*
- * Major/minor macros. Minor version is in LSB, meaning that earlier flat
- * version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1).
- */
-
-static inline u16 storvsc_get_version(u8 major, u8 minor)
-{
- u16 version;
-
- version = ((major << 8) | minor);
- return version;
-}
/*
* We can get incoming messages from the host that are not in response to
@@ -885,7 +916,7 @@ static int storvsc_channel_init(struct hv_device *device)
struct storvsc_device *stor_device;
struct storvsc_cmd_request *request;
struct vstor_packet *vstor_packet;
- int ret, t;
+ int ret, t, i;
int max_chns;
bool process_sub_channels = false;
@@ -921,41 +952,65 @@ static int storvsc_channel_init(struct hv_device *device)
}
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
- vstor_packet->status != 0)
+ vstor_packet->status != 0) {
+ ret = -EINVAL;
goto cleanup;
+ }
- /* reuse the packet for version range supported */
- memset(vstor_packet, 0, sizeof(struct vstor_packet));
- vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
- vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+ for (i = 0; i < ARRAY_SIZE(vmstor_protocols); i++) {
+ /* reuse the packet for version range supported */
+ memset(vstor_packet, 0, sizeof(struct vstor_packet));
+ vstor_packet->operation =
+ VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
- vstor_packet->version.major_minor =
- storvsc_get_version(vmstor_current_major, vmstor_current_minor);
+ vstor_packet->version.major_minor =
+ vmstor_protocols[i].protocol_version;
- /*
- * The revision number is only used in Windows; set it to 0.
- */
- vstor_packet->version.revision = 0;
+ /*
+ * The revision number is only used in Windows; set it to 0.
+ */
+ vstor_packet->version.revision = 0;
- ret = vmbus_sendpacket(device->channel, vstor_packet,
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
(sizeof(struct vstor_packet) -
vmscsi_size_delta),
(unsigned long)request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret != 0)
- goto cleanup;
+ if (ret != 0)
+ goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ if (vstor_packet->status == 0) {
+ vmstor_proto_version =
+ vmstor_protocols[i].protocol_version;
+
+ sense_buffer_size =
+ vmstor_protocols[i].sense_buffer_size;
+
+ vmscsi_size_delta =
+ vmstor_protocols[i].vmscsi_size_delta;
+
+ break;
+ }
}
- if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
- vstor_packet->status != 0)
+ if (vstor_packet->status != 0) {
+ ret = -EINVAL;
goto cleanup;
+ }
memset(vstor_packet, 0, sizeof(struct vstor_packet));
@@ -979,8 +1034,10 @@ static int storvsc_channel_init(struct hv_device *device)
}
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
- vstor_packet->status != 0)
+ vstor_packet->status != 0) {
+ ret = -EINVAL;
goto cleanup;
+ }
/*
* Check to see if multi-channel support is there.
@@ -988,8 +1045,7 @@ static int storvsc_channel_init(struct hv_device *device)
* support multi-channel.
*/
max_chns = vstor_packet->storage_channel_properties.max_channel_cnt;
- if ((vmbus_proto_version != VERSION_WIN7) &&
- (vmbus_proto_version != VERSION_WS2008)) {
+ if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN8) {
if (vstor_packet->storage_channel_properties.flags &
STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL)
process_sub_channels = true;
@@ -1018,8 +1074,10 @@ static int storvsc_channel_init(struct hv_device *device)
}
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
- vstor_packet->status != 0)
+ vstor_packet->status != 0) {
+ ret = -EINVAL;
goto cleanup;
+ }
if (process_sub_channels)
handle_multichannel_storage(device, max_chns);
@@ -1428,15 +1486,19 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
/*
* If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
- * if the device is a MSFT virtual device.
+ * if the device is a MSFT virtual device. If the host is
+ * WIN10 or newer, allow write_same.
*/
if (!strncmp(sdevice->vendor, "Msft", 4)) {
- switch (vmbus_proto_version) {
- case VERSION_WIN8:
- case VERSION_WIN8_1:
+ switch (vmstor_proto_version) {
+ case VMSTOR_PROTO_VERSION_WIN8:
+ case VMSTOR_PROTO_VERSION_WIN8_1:
sdevice->scsi_level = SCSI_SPC_3;
break;
}
+
+ if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN10)
+ sdevice->no_write_same = 0;
}
return 0;
@@ -1563,7 +1625,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
u32 payload_sz;
u32 length;
- if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) {
+ if (vmstor_proto_version <= VMSTOR_PROTO_VERSION_WIN8) {
/*
* On legacy hosts filter unimplemented commands.
* Future hosts are expected to correctly handle
@@ -1598,10 +1660,18 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
vm_srb->data_in = READ_TYPE;
vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN;
break;
- default:
+ case DMA_NONE:
vm_srb->data_in = UNKNOWN_TYPE;
vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
break;
+ default:
+ /*
+ * This is DMA_BIDIRECTIONAL or something else we are never
+ * supposed to see here.
+ */
+ WARN(1, "Unexpected data direction: %d\n",
+ scmnd->sc_data_direction);
+ return -EINVAL;
}
@@ -1758,22 +1828,11 @@ static int storvsc_probe(struct hv_device *device,
* set state to properly communicate with the host.
*/
- switch (vmbus_proto_version) {
- case VERSION_WS2008:
- case VERSION_WIN7:
- sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
- vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
- vmstor_current_major = VMSTOR_WIN7_MAJOR;
- vmstor_current_minor = VMSTOR_WIN7_MINOR;
+ if (vmbus_proto_version < VERSION_WIN8) {
max_luns_per_target = STORVSC_IDE_MAX_LUNS_PER_TARGET;
max_targets = STORVSC_IDE_MAX_TARGETS;
max_channels = STORVSC_IDE_MAX_CHANNELS;
- break;
- default:
- sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
- vmscsi_size_delta = 0;
- vmstor_current_major = VMSTOR_WIN8_MAJOR;
- vmstor_current_minor = VMSTOR_WIN8_MINOR;
+ } else {
max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET;
max_targets = STORVSC_MAX_TARGETS;
max_channels = STORVSC_MAX_CHANNELS;
@@ -1783,7 +1842,6 @@ static int storvsc_probe(struct hv_device *device,
* VCPUs in the guest.
*/
max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel);
- break;
}
scsi_driver.can_queue = (max_outstanding_req_per_channel *
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index e26e81de7c45..d50c5ed8f428 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -12,9 +12,9 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <asm/sun3x.h>
-#include <asm/io.h>
#include <asm/dma.h>
#include <asm/dvma.h>
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 285f77544c36..7dbbb29d24c6 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -949,7 +949,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
{
struct Scsi_Host *shost;
struct virtio_scsi *vscsi;
- int err, host_prot;
+ int err;
u32 sg_elems, num_targets;
u32 cmd_per_lun;
u32 num_queues;
@@ -1009,6 +1009,8 @@ static int virtscsi_probe(struct virtio_device *vdev)
#ifdef CONFIG_BLK_DEV_INTEGRITY
if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
+ int host_prot;
+
host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 61346aa73178..e3da1a2fdb66 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -590,7 +590,7 @@ static inline void wd719x_interrupt_SCB(struct wd719x *wd,
dev_dbg(&wd->pdev->dev, "selection timeout\n");
break;
case WD719X_SUE_RESET:
- dev_dbg(&wd->pdev->dev, "bus reset occured\n");
+ dev_dbg(&wd->pdev->dev, "bus reset occurred\n");
result = DID_RESET;
break;
case WD719X_SUE_BUSERROR:
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index fad22caf0eff..9dc8687bf048 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -377,7 +377,6 @@ static int map_data_for_request(struct vscsifrnt_info *info,
unsigned int data_len = scsi_bufflen(sc);
unsigned int data_grants = 0, seg_grants = 0;
struct scatterlist *sg;
- unsigned long mfn;
struct scsiif_request_segment *seg;
ring_req->nr_segments = 0;
@@ -420,9 +419,9 @@ static int map_data_for_request(struct vscsifrnt_info *info,
ref = gnttab_claim_grant_reference(&gref_head);
BUG_ON(ref == -ENOSPC);
- mfn = pfn_to_mfn(page_to_pfn(page));
gnttab_grant_foreign_access_ref(ref,
- info->dev->otherend_id, mfn, 1);
+ info->dev->otherend_id,
+ xen_page_to_gfn(page), 1);
shadow->gref[ref_cnt] = ref;
ring_req->seg[ref_cnt].gref = ref;
ring_req->seg[ref_cnt].offset = (uint16_t)off;
@@ -454,9 +453,10 @@ static int map_data_for_request(struct vscsifrnt_info *info,
ref = gnttab_claim_grant_reference(&gref_head);
BUG_ON(ref == -ENOSPC);
- mfn = pfn_to_mfn(page_to_pfn(page));
gnttab_grant_foreign_access_ref(ref,
- info->dev->otherend_id, mfn, grant_ro);
+ info->dev->otherend_id,
+ xen_page_to_gfn(page),
+ grant_ro);
shadow->gref[ref_cnt] = ref;
seg->gref = ref;
diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c
index 46427b48e2f1..358df7510186 100644
--- a/drivers/sh/intc/chip.c
+++ b/drivers/sh/intc/chip.c
@@ -22,7 +22,7 @@ void _intc_enable(struct irq_data *data, unsigned long handle)
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
#ifdef CONFIG_SMP
- if (!cpumask_test_cpu(cpu, data->affinity))
+ if (!cpumask_test_cpu(cpu, irq_data_get_affinity_mask(data)))
continue;
#endif
addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
@@ -50,7 +50,7 @@ static void intc_disable(struct irq_data *data)
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
#ifdef CONFIG_SMP
- if (!cpumask_test_cpu(cpu, data->affinity))
+ if (!cpumask_test_cpu(cpu, irq_data_get_affinity_mask(data)))
continue;
#endif
addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
@@ -72,7 +72,7 @@ static int intc_set_affinity(struct irq_data *data,
if (!cpumask_intersects(cpumask, cpu_online_mask))
return -1;
- cpumask_copy(data->affinity, cpumask);
+ cpumask_copy(irq_data_get_affinity_mask(data), cpumask);
return IRQ_SET_MASK_OK_NOCOPY;
}
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 156b790072b4..043419dcee92 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -67,7 +67,7 @@ void intc_set_prio_level(unsigned int irq, unsigned int level)
static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
{
- generic_handle_irq((unsigned int)irq_get_handler_data(irq));
+ generic_handle_irq((unsigned int)irq_desc_get_handler_data(desc));
}
static void __init intc_register_irq(struct intc_desc *desc,
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c
index f5f1b821241a..bafc51c6f0ba 100644
--- a/drivers/sh/intc/virq.c
+++ b/drivers/sh/intc/virq.c
@@ -83,12 +83,11 @@ EXPORT_SYMBOL_GPL(intc_irq_lookup);
static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
{
- struct intc_virq_list **last, *entry;
- struct irq_data *data = irq_get_irq_data(irq);
+ struct intc_virq_list *entry;
+ struct intc_virq_list **last = NULL;
/* scan for duplicates */
- last = (struct intc_virq_list **)&data->handler_data;
- for_each_virq(entry, data->handler_data) {
+ for_each_virq(entry, irq_get_handler_data(irq)) {
if (entry->irq == virq)
return 0;
last = &entry->next;
@@ -102,14 +101,18 @@ static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
entry->irq = virq;
- *last = entry;
+ if (last)
+ *last = entry;
+ else
+ irq_set_handler_data(irq, entry);
return 0;
}
-static void intc_virq_handler(unsigned int irq, struct irq_desc *desc)
+static void intc_virq_handler(unsigned int __irq, struct irq_desc *desc)
{
- struct irq_data *data = irq_get_irq_data(irq);
+ unsigned int irq = irq_desc_get_irq(desc);
+ struct irq_data *data = irq_desc_get_irq_data(desc);
struct irq_chip *chip = irq_data_get_irq_chip(data);
struct intc_virq_list *entry, *vlist = irq_data_get_irq_handler_data(data);
struct intc_desc_int *d = get_intc_desc(irq);
@@ -118,12 +121,14 @@ static void intc_virq_handler(unsigned int irq, struct irq_desc *desc)
for_each_virq(entry, vlist) {
unsigned long addr, handle;
+ struct irq_desc *vdesc = irq_to_desc(entry->irq);
- handle = (unsigned long)irq_get_handler_data(entry->irq);
- addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
-
- if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
- generic_handle_irq(entry->irq);
+ if (vdesc) {
+ handle = (unsigned long)irq_desc_get_handler_data(vdesc);
+ addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
+ if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
+ generic_handle_irq_desc(entry->irq, vdesc);
+ }
}
chip->irq_unmask(data);
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 7dc7c0d8a2c1..0b12d777d3c4 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -2,6 +2,7 @@
# Makefile for the Linux Kernel SOC specific device drivers.
#
+obj-$(CONFIG_MACH_DOVE) += dove/
obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
obj-$(CONFIG_ARCH_QCOM) += qcom/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
diff --git a/drivers/soc/dove/Makefile b/drivers/soc/dove/Makefile
new file mode 100644
index 000000000000..2db8e65513a3
--- /dev/null
+++ b/drivers/soc/dove/Makefile
@@ -0,0 +1 @@
+obj-y += pmu.o
diff --git a/drivers/soc/dove/pmu.c b/drivers/soc/dove/pmu.c
new file mode 100644
index 000000000000..6792aae9e2e5
--- /dev/null
+++ b/drivers/soc/dove/pmu.c
@@ -0,0 +1,412 @@
+/*
+ * Marvell Dove PMU support
+ */
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/reset.h>
+#include <linux/reset-controller.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/soc/dove/pmu.h>
+#include <linux/spinlock.h>
+
+#define NR_PMU_IRQS 7
+
+#define PMC_SW_RST 0x30
+#define PMC_IRQ_CAUSE 0x50
+#define PMC_IRQ_MASK 0x54
+
+#define PMU_PWR 0x10
+#define PMU_ISO 0x58
+
+struct pmu_data {
+ spinlock_t lock;
+ struct device_node *of_node;
+ void __iomem *pmc_base;
+ void __iomem *pmu_base;
+ struct irq_chip_generic *irq_gc;
+ struct irq_domain *irq_domain;
+#ifdef CONFIG_RESET_CONTROLLER
+ struct reset_controller_dev reset;
+#endif
+};
+
+/*
+ * The PMU contains a register to reset various subsystems within the
+ * SoC. Export this as a reset controller.
+ */
+#ifdef CONFIG_RESET_CONTROLLER
+#define rcdev_to_pmu(rcdev) container_of(rcdev, struct pmu_data, reset)
+
+static int pmu_reset_reset(struct reset_controller_dev *rc, unsigned long id)
+{
+ struct pmu_data *pmu = rcdev_to_pmu(rc);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+ val = readl_relaxed(pmu->pmc_base + PMC_SW_RST);
+ writel_relaxed(val & ~BIT(id), pmu->pmc_base + PMC_SW_RST);
+ writel_relaxed(val | BIT(id), pmu->pmc_base + PMC_SW_RST);
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static int pmu_reset_assert(struct reset_controller_dev *rc, unsigned long id)
+{
+ struct pmu_data *pmu = rcdev_to_pmu(rc);
+ unsigned long flags;
+ u32 val = ~BIT(id);
+
+ spin_lock_irqsave(&pmu->lock, flags);
+ val &= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
+ writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static int pmu_reset_deassert(struct reset_controller_dev *rc, unsigned long id)
+{
+ struct pmu_data *pmu = rcdev_to_pmu(rc);
+ unsigned long flags;
+ u32 val = BIT(id);
+
+ spin_lock_irqsave(&pmu->lock, flags);
+ val |= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
+ writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static struct reset_control_ops pmu_reset_ops = {
+ .reset = pmu_reset_reset,
+ .assert = pmu_reset_assert,
+ .deassert = pmu_reset_deassert,
+};
+
+static struct reset_controller_dev pmu_reset __initdata = {
+ .ops = &pmu_reset_ops,
+ .owner = THIS_MODULE,
+ .nr_resets = 32,
+};
+
+static void __init pmu_reset_init(struct pmu_data *pmu)
+{
+ int ret;
+
+ pmu->reset = pmu_reset;
+ pmu->reset.of_node = pmu->of_node;
+
+ ret = reset_controller_register(&pmu->reset);
+ if (ret)
+ pr_err("pmu: %s failed: %d\n", "reset_controller_register", ret);
+}
+#else
+static void __init pmu_reset_init(struct pmu_data *pmu)
+{
+}
+#endif
+
+struct pmu_domain {
+ struct pmu_data *pmu;
+ u32 pwr_mask;
+ u32 rst_mask;
+ u32 iso_mask;
+ struct generic_pm_domain base;
+};
+
+#define to_pmu_domain(dom) container_of(dom, struct pmu_domain, base)
+
+/*
+ * This deals with the "old" Marvell sequence of bringing a power domain
+ * down/up, which is: apply power, release reset, disable isolators.
+ *
+ * Later devices apparantly use a different sequence: power up, disable
+ * isolators, assert repair signal, enable SRMA clock, enable AXI clock,
+ * enable module clock, deassert reset.
+ *
+ * Note: reading the assembly, it seems that the IO accessors have an
+ * unfortunate side-effect - they cause memory already read into registers
+ * for the if () to be re-read for the bit-set or bit-clear operation.
+ * The code is written to avoid this.
+ */
+static int pmu_domain_power_off(struct generic_pm_domain *domain)
+{
+ struct pmu_domain *pmu_dom = to_pmu_domain(domain);
+ struct pmu_data *pmu = pmu_dom->pmu;
+ unsigned long flags;
+ unsigned int val;
+ void __iomem *pmu_base = pmu->pmu_base;
+ void __iomem *pmc_base = pmu->pmc_base;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ /* Enable isolators */
+ if (pmu_dom->iso_mask) {
+ val = ~pmu_dom->iso_mask;
+ val &= readl_relaxed(pmu_base + PMU_ISO);
+ writel_relaxed(val, pmu_base + PMU_ISO);
+ }
+
+ /* Reset unit */
+ if (pmu_dom->rst_mask) {
+ val = ~pmu_dom->rst_mask;
+ val &= readl_relaxed(pmc_base + PMC_SW_RST);
+ writel_relaxed(val, pmc_base + PMC_SW_RST);
+ }
+
+ /* Power down */
+ val = readl_relaxed(pmu_base + PMU_PWR) | pmu_dom->pwr_mask;
+ writel_relaxed(val, pmu_base + PMU_PWR);
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static int pmu_domain_power_on(struct generic_pm_domain *domain)
+{
+ struct pmu_domain *pmu_dom = to_pmu_domain(domain);
+ struct pmu_data *pmu = pmu_dom->pmu;
+ unsigned long flags;
+ unsigned int val;
+ void __iomem *pmu_base = pmu->pmu_base;
+ void __iomem *pmc_base = pmu->pmc_base;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ /* Power on */
+ val = ~pmu_dom->pwr_mask & readl_relaxed(pmu_base + PMU_PWR);
+ writel_relaxed(val, pmu_base + PMU_PWR);
+
+ /* Release reset */
+ if (pmu_dom->rst_mask) {
+ val = pmu_dom->rst_mask;
+ val |= readl_relaxed(pmc_base + PMC_SW_RST);
+ writel_relaxed(val, pmc_base + PMC_SW_RST);
+ }
+
+ /* Disable isolators */
+ if (pmu_dom->iso_mask) {
+ val = pmu_dom->iso_mask;
+ val |= readl_relaxed(pmu_base + PMU_ISO);
+ writel_relaxed(val, pmu_base + PMU_ISO);
+ }
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static void __pmu_domain_register(struct pmu_domain *domain,
+ struct device_node *np)
+{
+ unsigned int val = readl_relaxed(domain->pmu->pmu_base + PMU_PWR);
+
+ domain->base.power_off = pmu_domain_power_off;
+ domain->base.power_on = pmu_domain_power_on;
+
+ pm_genpd_init(&domain->base, NULL, !(val & domain->pwr_mask));
+
+ if (np)
+ of_genpd_add_provider_simple(np, &domain->base);
+}
+
+/* PMU IRQ controller */
+static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct pmu_data *pmu = irq_get_handler_data(irq);
+ struct irq_chip_generic *gc = pmu->irq_gc;
+ struct irq_domain *domain = pmu->irq_domain;
+ void __iomem *base = gc->reg_base;
+ u32 stat = readl_relaxed(base + PMC_IRQ_CAUSE) & gc->mask_cache;
+ u32 done = ~0;
+
+ if (stat == 0) {
+ handle_bad_irq(irq, desc);
+ return;
+ }
+
+ while (stat) {
+ u32 hwirq = fls(stat) - 1;
+
+ stat &= ~(1 << hwirq);
+ done &= ~(1 << hwirq);
+
+ generic_handle_irq(irq_find_mapping(domain, hwirq));
+ }
+
+ /*
+ * The PMU mask register is not RW0C: it is RW. This means that
+ * the bits take whatever value is written to them; if you write
+ * a '1', you will set the interrupt.
+ *
+ * Unfortunately this means there is NO race free way to clear
+ * these interrupts.
+ *
+ * So, let's structure the code so that the window is as small as
+ * possible.
+ */
+ irq_gc_lock(gc);
+ done &= readl_relaxed(base + PMC_IRQ_CAUSE);
+ writel_relaxed(done, base + PMC_IRQ_CAUSE);
+ irq_gc_unlock(gc);
+}
+
+static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq)
+{
+ const char *name = "pmu_irq";
+ struct irq_chip_generic *gc;
+ struct irq_domain *domain;
+ int ret;
+
+ /* mask and clear all interrupts */
+ writel(0, pmu->pmc_base + PMC_IRQ_MASK);
+ writel(0, pmu->pmc_base + PMC_IRQ_CAUSE);
+
+ domain = irq_domain_add_linear(pmu->of_node, NR_PMU_IRQS,
+ &irq_generic_chip_ops, NULL);
+ if (!domain) {
+ pr_err("%s: unable to add irq domain\n", name);
+ return -ENOMEM;
+ }
+
+ ret = irq_alloc_domain_generic_chips(domain, NR_PMU_IRQS, 1, name,
+ handle_level_irq,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0,
+ IRQ_GC_INIT_MASK_CACHE);
+ if (ret) {
+ pr_err("%s: unable to alloc irq domain gc: %d\n", name, ret);
+ irq_domain_remove(domain);
+ return ret;
+ }
+
+ gc = irq_get_domain_generic_chip(domain, 0);
+ gc->reg_base = pmu->pmc_base;
+ gc->chip_types[0].regs.mask = PMC_IRQ_MASK;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+
+ pmu->irq_domain = domain;
+ pmu->irq_gc = gc;
+
+ irq_set_handler_data(irq, pmu);
+ irq_set_chained_handler(irq, pmu_irq_handler);
+
+ return 0;
+}
+
+/*
+ * pmu: power-manager@d0000 {
+ * compatible = "marvell,dove-pmu";
+ * reg = <0xd0000 0x8000> <0xd8000 0x8000>;
+ * interrupts = <33>;
+ * interrupt-controller;
+ * #reset-cells = 1;
+ * vpu_domain: vpu-domain {
+ * #power-domain-cells = <0>;
+ * marvell,pmu_pwr_mask = <0x00000008>;
+ * marvell,pmu_iso_mask = <0x00000001>;
+ * resets = <&pmu 16>;
+ * };
+ * gpu_domain: gpu-domain {
+ * #power-domain-cells = <0>;
+ * marvell,pmu_pwr_mask = <0x00000004>;
+ * marvell,pmu_iso_mask = <0x00000002>;
+ * resets = <&pmu 18>;
+ * };
+ * };
+ */
+int __init dove_init_pmu(void)
+{
+ struct device_node *np_pmu, *domains_node, *np;
+ struct pmu_data *pmu;
+ int ret, parent_irq;
+
+ /* Lookup the PMU node */
+ np_pmu = of_find_compatible_node(NULL, NULL, "marvell,dove-pmu");
+ if (!np_pmu)
+ return 0;
+
+ domains_node = of_get_child_by_name(np_pmu, "domains");
+ if (!domains_node) {
+ pr_err("%s: failed to find domains sub-node\n", np_pmu->name);
+ return 0;
+ }
+
+ pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+
+ spin_lock_init(&pmu->lock);
+ pmu->of_node = np_pmu;
+ pmu->pmc_base = of_iomap(pmu->of_node, 0);
+ pmu->pmu_base = of_iomap(pmu->of_node, 1);
+ if (!pmu->pmc_base || !pmu->pmu_base) {
+ pr_err("%s: failed to map PMU\n", np_pmu->name);
+ iounmap(pmu->pmu_base);
+ iounmap(pmu->pmc_base);
+ kfree(pmu);
+ return -ENOMEM;
+ }
+
+ pmu_reset_init(pmu);
+
+ for_each_available_child_of_node(domains_node, np) {
+ struct of_phandle_args args;
+ struct pmu_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ break;
+
+ domain->pmu = pmu;
+ domain->base.name = kstrdup(np->name, GFP_KERNEL);
+ if (!domain->base.name) {
+ kfree(domain);
+ break;
+ }
+
+ of_property_read_u32(np, "marvell,pmu_pwr_mask",
+ &domain->pwr_mask);
+ of_property_read_u32(np, "marvell,pmu_iso_mask",
+ &domain->iso_mask);
+
+ /*
+ * We parse the reset controller property directly here
+ * to ensure that we can operate when the reset controller
+ * support is not configured into the kernel.
+ */
+ ret = of_parse_phandle_with_args(np, "resets", "#reset-cells",
+ 0, &args);
+ if (ret == 0) {
+ if (args.np == pmu->of_node)
+ domain->rst_mask = BIT(args.args[0]);
+ of_node_put(args.np);
+ }
+
+ __pmu_domain_register(domain, np);
+ }
+ pm_genpd_poweroff_unused();
+
+ /* Loss of the interrupt controller is not a fatal error. */
+ parent_irq = irq_of_parse_and_map(pmu->of_node, 0);
+ if (!parent_irq) {
+ pr_err("%s: no interrupt specified\n", np_pmu->name);
+ } else {
+ ret = dove_init_pmu_irq(pmu, parent_irq);
+ if (ret)
+ pr_err("dove_init_pmu_irq() failed: %d\n", ret);
+ }
+
+ return 0;
+}
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 3c1850332a90..9d5068248aa0 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -1,6 +1,15 @@
#
# MediaTek SoC drivers
#
+config MTK_INFRACFG
+ bool "MediaTek INFRACFG Support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select REGMAP
+ help
+ Say yes here to add support for the MediaTek INFRACFG controller. The
+ INFRACFG controller contains various infrastructure registers not
+ directly associated to any device.
+
config MTK_PMIC_WRAP
tristate "MediaTek PMIC Wrapper Support"
depends on ARCH_MEDIATEK
@@ -10,3 +19,13 @@ config MTK_PMIC_WRAP
Say yes here to add support for MediaTek PMIC Wrapper found
on different MediaTek SoCs. The PMIC wrapper is a proprietary
hardware to connect the PMIC.
+
+config MTK_SCPSYS
+ bool "MediaTek SCPSYS Support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select REGMAP
+ select MTK_INFRACFG
+ select PM_GENERIC_DOMAINS if PM
+ help
+ Say yes here to add support for the MediaTek SCPSYS power domain
+ driver.
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
index ecaf4defd7f6..12998b08819e 100644
--- a/drivers/soc/mediatek/Makefile
+++ b/drivers/soc/mediatek/Makefile
@@ -1 +1,3 @@
+obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o
obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o
+obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
diff --git a/drivers/soc/mediatek/mtk-infracfg.c b/drivers/soc/mediatek/mtk-infracfg.c
new file mode 100644
index 000000000000..dba3055a9493
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-infracfg.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2015 Pengutronix, Sascha Hauer <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/jiffies.h>
+#include <linux/regmap.h>
+#include <linux/soc/mediatek/infracfg.h>
+#include <asm/processor.h>
+
+#define INFRA_TOPAXI_PROTECTEN 0x0220
+#define INFRA_TOPAXI_PROTECTSTA1 0x0228
+
+/**
+ * mtk_infracfg_set_bus_protection - enable bus protection
+ * @regmap: The infracfg regmap
+ * @mask: The mask containing the protection bits to be enabled.
+ *
+ * This function enables the bus protection bits for disabled power
+ * domains so that the system does not hang when some unit accesses the
+ * bus while in power down.
+ */
+int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask)
+{
+ unsigned long expired;
+ u32 val;
+ int ret;
+
+ regmap_update_bits(infracfg, INFRA_TOPAXI_PROTECTEN, mask, mask);
+
+ expired = jiffies + HZ;
+
+ while (1) {
+ ret = regmap_read(infracfg, INFRA_TOPAXI_PROTECTSTA1, &val);
+ if (ret)
+ return ret;
+
+ if ((val & mask) == mask)
+ break;
+
+ cpu_relax();
+ if (time_after(jiffies, expired))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * mtk_infracfg_clear_bus_protection - disable bus protection
+ * @regmap: The infracfg regmap
+ * @mask: The mask containing the protection bits to be disabled.
+ *
+ * This function disables the bus protection bits previously enabled with
+ * mtk_infracfg_set_bus_protection.
+ */
+int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask)
+{
+ unsigned long expired;
+ int ret;
+
+ regmap_update_bits(infracfg, INFRA_TOPAXI_PROTECTEN, mask, 0);
+
+ expired = jiffies + HZ;
+
+ while (1) {
+ u32 val;
+
+ ret = regmap_read(infracfg, INFRA_TOPAXI_PROTECTSTA1, &val);
+ if (ret)
+ return ret;
+
+ if (!(val & mask))
+ break;
+
+ cpu_relax();
+ if (time_after(jiffies, expired))
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index f432291feee9..8bc7b41b09fd 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -926,7 +926,6 @@ err_out1:
static struct platform_driver pwrap_drv = {
.driver = {
.name = "mt-pmic-pwrap",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(of_pwrap_match_tbl),
},
.probe = pwrap_probe,
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
new file mode 100644
index 000000000000..164a7d8439b1
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -0,0 +1,488 @@
+/*
+ * Copyright (c) 2015 Pengutronix, Sascha Hauer <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/soc/mediatek/infracfg.h>
+#include <dt-bindings/power/mt8173-power.h>
+
+#define SPM_VDE_PWR_CON 0x0210
+#define SPM_MFG_PWR_CON 0x0214
+#define SPM_VEN_PWR_CON 0x0230
+#define SPM_ISP_PWR_CON 0x0238
+#define SPM_DIS_PWR_CON 0x023c
+#define SPM_VEN2_PWR_CON 0x0298
+#define SPM_AUDIO_PWR_CON 0x029c
+#define SPM_MFG_2D_PWR_CON 0x02c0
+#define SPM_MFG_ASYNC_PWR_CON 0x02c4
+#define SPM_USB_PWR_CON 0x02cc
+#define SPM_PWR_STATUS 0x060c
+#define SPM_PWR_STATUS_2ND 0x0610
+
+#define PWR_RST_B_BIT BIT(0)
+#define PWR_ISO_BIT BIT(1)
+#define PWR_ON_BIT BIT(2)
+#define PWR_ON_2ND_BIT BIT(3)
+#define PWR_CLK_DIS_BIT BIT(4)
+
+#define PWR_STATUS_DISP BIT(3)
+#define PWR_STATUS_MFG BIT(4)
+#define PWR_STATUS_ISP BIT(5)
+#define PWR_STATUS_VDEC BIT(7)
+#define PWR_STATUS_VENC_LT BIT(20)
+#define PWR_STATUS_VENC BIT(21)
+#define PWR_STATUS_MFG_2D BIT(22)
+#define PWR_STATUS_MFG_ASYNC BIT(23)
+#define PWR_STATUS_AUDIO BIT(24)
+#define PWR_STATUS_USB BIT(25)
+
+enum clk_id {
+ MT8173_CLK_MM,
+ MT8173_CLK_MFG,
+ MT8173_CLK_NONE,
+ MT8173_CLK_MAX = MT8173_CLK_NONE,
+};
+
+struct scp_domain_data {
+ const char *name;
+ u32 sta_mask;
+ int ctl_offs;
+ u32 sram_pdn_bits;
+ u32 sram_pdn_ack_bits;
+ u32 bus_prot_mask;
+ enum clk_id clk_id;
+};
+
+static const struct scp_domain_data scp_domain_data[] __initconst = {
+ [MT8173_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
+ .sta_mask = PWR_STATUS_VDEC,
+ .ctl_offs = SPM_VDE_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = MT8173_CLK_MM,
+ },
+ [MT8173_POWER_DOMAIN_VENC] = {
+ .name = "venc",
+ .sta_mask = PWR_STATUS_VENC,
+ .ctl_offs = SPM_VEN_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = MT8173_CLK_MM,
+ },
+ [MT8173_POWER_DOMAIN_ISP] = {
+ .name = "isp",
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = SPM_ISP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = MT8173_CLK_MM,
+ },
+ [MT8173_POWER_DOMAIN_MM] = {
+ .name = "mm",
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = SPM_DIS_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = MT8173_CLK_MM,
+ .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MM_M0 |
+ MT8173_TOP_AXI_PROT_EN_MM_M1,
+ },
+ [MT8173_POWER_DOMAIN_VENC_LT] = {
+ .name = "venc_lt",
+ .sta_mask = PWR_STATUS_VENC_LT,
+ .ctl_offs = SPM_VEN2_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = MT8173_CLK_MM,
+ },
+ [MT8173_POWER_DOMAIN_AUDIO] = {
+ .name = "audio",
+ .sta_mask = PWR_STATUS_AUDIO,
+ .ctl_offs = SPM_AUDIO_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = MT8173_CLK_NONE,
+ },
+ [MT8173_POWER_DOMAIN_USB] = {
+ .name = "usb",
+ .sta_mask = PWR_STATUS_USB,
+ .ctl_offs = SPM_USB_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = MT8173_CLK_NONE,
+ },
+ [MT8173_POWER_DOMAIN_MFG_ASYNC] = {
+ .name = "mfg_async",
+ .sta_mask = PWR_STATUS_MFG_ASYNC,
+ .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = 0,
+ .clk_id = MT8173_CLK_MFG,
+ },
+ [MT8173_POWER_DOMAIN_MFG_2D] = {
+ .name = "mfg_2d",
+ .sta_mask = PWR_STATUS_MFG_2D,
+ .ctl_offs = SPM_MFG_2D_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = MT8173_CLK_NONE,
+ },
+ [MT8173_POWER_DOMAIN_MFG] = {
+ .name = "mfg",
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = SPM_MFG_PWR_CON,
+ .sram_pdn_bits = GENMASK(13, 8),
+ .sram_pdn_ack_bits = GENMASK(21, 16),
+ .clk_id = MT8173_CLK_NONE,
+ .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MFG_S |
+ MT8173_TOP_AXI_PROT_EN_MFG_M0 |
+ MT8173_TOP_AXI_PROT_EN_MFG_M1 |
+ MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT,
+ },
+};
+
+#define NUM_DOMAINS ARRAY_SIZE(scp_domain_data)
+
+struct scp;
+
+struct scp_domain {
+ struct generic_pm_domain genpd;
+ struct scp *scp;
+ struct clk *clk;
+ u32 sta_mask;
+ void __iomem *ctl_addr;
+ u32 sram_pdn_bits;
+ u32 sram_pdn_ack_bits;
+ u32 bus_prot_mask;
+};
+
+struct scp {
+ struct scp_domain domains[NUM_DOMAINS];
+ struct genpd_onecell_data pd_data;
+ struct device *dev;
+ void __iomem *base;
+ struct regmap *infracfg;
+};
+
+static int scpsys_domain_is_on(struct scp_domain *scpd)
+{
+ struct scp *scp = scpd->scp;
+
+ u32 status = readl(scp->base + SPM_PWR_STATUS) & scpd->sta_mask;
+ u32 status2 = readl(scp->base + SPM_PWR_STATUS_2ND) & scpd->sta_mask;
+
+ /*
+ * A domain is on when both status bits are set. If only one is set
+ * return an error. This happens while powering up a domain
+ */
+
+ if (status && status2)
+ return true;
+ if (!status && !status2)
+ return false;
+
+ return -EINVAL;
+}
+
+static int scpsys_power_on(struct generic_pm_domain *genpd)
+{
+ struct scp_domain *scpd = container_of(genpd, struct scp_domain, genpd);
+ struct scp *scp = scpd->scp;
+ unsigned long timeout;
+ bool expired;
+ void __iomem *ctl_addr = scpd->ctl_addr;
+ u32 sram_pdn_ack = scpd->sram_pdn_ack_bits;
+ u32 val;
+ int ret;
+
+ if (scpd->clk) {
+ ret = clk_prepare_enable(scpd->clk);
+ if (ret)
+ goto err_clk;
+ }
+
+ val = readl(ctl_addr);
+ val |= PWR_ON_BIT;
+ writel(val, ctl_addr);
+ val |= PWR_ON_2ND_BIT;
+ writel(val, ctl_addr);
+
+ /* wait until PWR_ACK = 1 */
+ timeout = jiffies + HZ;
+ expired = false;
+ while (1) {
+ ret = scpsys_domain_is_on(scpd);
+ if (ret > 0)
+ break;
+
+ if (expired) {
+ ret = -ETIMEDOUT;
+ goto err_pwr_ack;
+ }
+
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ expired = true;
+ }
+
+ val &= ~PWR_CLK_DIS_BIT;
+ writel(val, ctl_addr);
+
+ val &= ~PWR_ISO_BIT;
+ writel(val, ctl_addr);
+
+ val |= PWR_RST_B_BIT;
+ writel(val, ctl_addr);
+
+ val &= ~scpd->sram_pdn_bits;
+ writel(val, ctl_addr);
+
+ /* wait until SRAM_PDN_ACK all 0 */
+ timeout = jiffies + HZ;
+ expired = false;
+ while (sram_pdn_ack && (readl(ctl_addr) & sram_pdn_ack)) {
+
+ if (expired) {
+ ret = -ETIMEDOUT;
+ goto err_pwr_ack;
+ }
+
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ expired = true;
+ }
+
+ if (scpd->bus_prot_mask) {
+ ret = mtk_infracfg_clear_bus_protection(scp->infracfg,
+ scpd->bus_prot_mask);
+ if (ret)
+ goto err_pwr_ack;
+ }
+
+ return 0;
+
+err_pwr_ack:
+ clk_disable_unprepare(scpd->clk);
+err_clk:
+ dev_err(scp->dev, "Failed to power on domain %s\n", genpd->name);
+
+ return ret;
+}
+
+static int scpsys_power_off(struct generic_pm_domain *genpd)
+{
+ struct scp_domain *scpd = container_of(genpd, struct scp_domain, genpd);
+ struct scp *scp = scpd->scp;
+ unsigned long timeout;
+ bool expired;
+ void __iomem *ctl_addr = scpd->ctl_addr;
+ u32 pdn_ack = scpd->sram_pdn_ack_bits;
+ u32 val;
+ int ret;
+
+ if (scpd->bus_prot_mask) {
+ ret = mtk_infracfg_set_bus_protection(scp->infracfg,
+ scpd->bus_prot_mask);
+ if (ret)
+ goto out;
+ }
+
+ val = readl(ctl_addr);
+ val |= scpd->sram_pdn_bits;
+ writel(val, ctl_addr);
+
+ /* wait until SRAM_PDN_ACK all 1 */
+ timeout = jiffies + HZ;
+ expired = false;
+ while (pdn_ack && (readl(ctl_addr) & pdn_ack) != pdn_ack) {
+ if (expired) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ expired = true;
+ }
+
+ val |= PWR_ISO_BIT;
+ writel(val, ctl_addr);
+
+ val &= ~PWR_RST_B_BIT;
+ writel(val, ctl_addr);
+
+ val |= PWR_CLK_DIS_BIT;
+ writel(val, ctl_addr);
+
+ val &= ~PWR_ON_BIT;
+ writel(val, ctl_addr);
+
+ val &= ~PWR_ON_2ND_BIT;
+ writel(val, ctl_addr);
+
+ /* wait until PWR_ACK = 0 */
+ timeout = jiffies + HZ;
+ expired = false;
+ while (1) {
+ ret = scpsys_domain_is_on(scpd);
+ if (ret == 0)
+ break;
+
+ if (expired) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ expired = true;
+ }
+
+ if (scpd->clk)
+ clk_disable_unprepare(scpd->clk);
+
+ return 0;
+
+out:
+ dev_err(scp->dev, "Failed to power off domain %s\n", genpd->name);
+
+ return ret;
+}
+
+static int __init scpsys_probe(struct platform_device *pdev)
+{
+ struct genpd_onecell_data *pd_data;
+ struct resource *res;
+ int i, ret;
+ struct scp *scp;
+ struct clk *clk[MT8173_CLK_MAX];
+
+ scp = devm_kzalloc(&pdev->dev, sizeof(*scp), GFP_KERNEL);
+ if (!scp)
+ return -ENOMEM;
+
+ scp->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ scp->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(scp->base))
+ return PTR_ERR(scp->base);
+
+ pd_data = &scp->pd_data;
+
+ pd_data->domains = devm_kzalloc(&pdev->dev,
+ sizeof(*pd_data->domains) * NUM_DOMAINS, GFP_KERNEL);
+ if (!pd_data->domains)
+ return -ENOMEM;
+
+ clk[MT8173_CLK_MM] = devm_clk_get(&pdev->dev, "mm");
+ if (IS_ERR(clk[MT8173_CLK_MM]))
+ return PTR_ERR(clk[MT8173_CLK_MM]);
+
+ clk[MT8173_CLK_MFG] = devm_clk_get(&pdev->dev, "mfg");
+ if (IS_ERR(clk[MT8173_CLK_MFG]))
+ return PTR_ERR(clk[MT8173_CLK_MFG]);
+
+ scp->infracfg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "infracfg");
+ if (IS_ERR(scp->infracfg)) {
+ dev_err(&pdev->dev, "Cannot find infracfg controller: %ld\n",
+ PTR_ERR(scp->infracfg));
+ return PTR_ERR(scp->infracfg);
+ }
+
+ pd_data->num_domains = NUM_DOMAINS;
+
+ for (i = 0; i < NUM_DOMAINS; i++) {
+ struct scp_domain *scpd = &scp->domains[i];
+ struct generic_pm_domain *genpd = &scpd->genpd;
+ const struct scp_domain_data *data = &scp_domain_data[i];
+
+ pd_data->domains[i] = genpd;
+ scpd->scp = scp;
+
+ scpd->sta_mask = data->sta_mask;
+ scpd->ctl_addr = scp->base + data->ctl_offs;
+ scpd->sram_pdn_bits = data->sram_pdn_bits;
+ scpd->sram_pdn_ack_bits = data->sram_pdn_ack_bits;
+ scpd->bus_prot_mask = data->bus_prot_mask;
+ if (data->clk_id != MT8173_CLK_NONE)
+ scpd->clk = clk[data->clk_id];
+
+ genpd->name = data->name;
+ genpd->power_off = scpsys_power_off;
+ genpd->power_on = scpsys_power_on;
+
+ /*
+ * Initially turn on all domains to make the domains usable
+ * with !CONFIG_PM and to get the hardware in sync with the
+ * software. The unused domains will be switched off during
+ * late_init time.
+ */
+ genpd->power_on(genpd);
+
+ pm_genpd_init(genpd, NULL, false);
+ }
+
+ /*
+ * We are not allowed to fail here since there is no way to unregister
+ * a power domain. Once registered above we have to keep the domains
+ * valid.
+ */
+
+ ret = pm_genpd_add_subdomain(pd_data->domains[MT8173_POWER_DOMAIN_MFG_ASYNC],
+ pd_data->domains[MT8173_POWER_DOMAIN_MFG_2D]);
+ if (ret && IS_ENABLED(CONFIG_PM))
+ dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret);
+
+ ret = pm_genpd_add_subdomain(pd_data->domains[MT8173_POWER_DOMAIN_MFG_2D],
+ pd_data->domains[MT8173_POWER_DOMAIN_MFG]);
+ if (ret && IS_ENABLED(CONFIG_PM))
+ dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret);
+
+ ret = of_genpd_add_provider_onecell(pdev->dev.of_node, pd_data);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to add OF provider: %d\n", ret);
+
+ return 0;
+}
+
+static const struct of_device_id of_scpsys_match_tbl[] = {
+ {
+ .compatible = "mediatek,mt8173-scpsys",
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver scpsys_drv = {
+ .driver = {
+ .name = "mtk-scpsys",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(of_scpsys_match_tbl),
+ },
+};
+
+module_platform_driver_probe(scpsys_drv, scpsys_probe);
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 5eea374c8fa6..ba47b70f4d85 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -13,7 +13,38 @@ config QCOM_GSBI
config QCOM_PM
bool "Qualcomm Power Management"
depends on ARCH_QCOM && !ARM64
+ select QCOM_SCM
help
QCOM Platform specific power driver to manage cores and L2 low power
modes. It interface with various system drivers to put the cores in
low power modes.
+
+config QCOM_SMD
+ tristate "Qualcomm Shared Memory Driver (SMD)"
+ depends on QCOM_SMEM
+ help
+ Say y here to enable support for the Qualcomm Shared Memory Driver
+ providing communication channels to remote processors in Qualcomm
+ platforms.
+
+config QCOM_SMD_RPM
+ tristate "Qualcomm Resource Power Manager (RPM) over SMD"
+ depends on QCOM_SMD && OF
+ help
+ If you say yes to this option, support will be included for the
+ Resource Power Manager system found in the Qualcomm 8974 based
+ devices.
+
+ This is required to access many regulators, clocks and bus
+ frequencies controlled by the RPM on these devices.
+
+ Say M here if you want to include support for the Qualcomm RPM as a
+ module. This will build a module called "qcom-smd-rpm".
+
+config QCOM_SMEM
+ tristate "Qualcomm Shared Memory Manager (SMEM)"
+ depends on ARCH_QCOM
+ help
+ Say y here to enable support for the Qualcomm Shared Memory Manager.
+ The driver provides an interface to items in a heap shared among all
+ processors in a Qualcomm platform.
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 931d385386c5..10a93d168e0e 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,2 +1,5 @@
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_PM) += spm.o
+obj-$(CONFIG_QCOM_SMD) += smd.o
+obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o
+obj-$(CONFIG_QCOM_SMEM) += smem.o
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
new file mode 100644
index 000000000000..1392ccf14a20
--- /dev/null
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+
+#include <linux/soc/qcom/smd.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#define RPM_REQUEST_TIMEOUT (5 * HZ)
+
+/**
+ * struct qcom_smd_rpm - state of the rpm device driver
+ * @rpm_channel: reference to the smd channel
+ * @ack: completion for acks
+ * @lock: mutual exclusion around the send/complete pair
+ * @ack_status: result of the rpm request
+ */
+struct qcom_smd_rpm {
+ struct qcom_smd_channel *rpm_channel;
+
+ struct completion ack;
+ struct mutex lock;
+ int ack_status;
+};
+
+/**
+ * struct qcom_rpm_header - header for all rpm requests and responses
+ * @service_type: identifier of the service
+ * @length: length of the payload
+ */
+struct qcom_rpm_header {
+ u32 service_type;
+ u32 length;
+};
+
+/**
+ * struct qcom_rpm_request - request message to the rpm
+ * @msg_id: identifier of the outgoing message
+ * @flags: active/sleep state flags
+ * @type: resource type
+ * @id: resource id
+ * @data_len: length of the payload following this header
+ */
+struct qcom_rpm_request {
+ u32 msg_id;
+ u32 flags;
+ u32 type;
+ u32 id;
+ u32 data_len;
+};
+
+/**
+ * struct qcom_rpm_message - response message from the rpm
+ * @msg_type: indicator of the type of message
+ * @length: the size of this message, including the message header
+ * @msg_id: message id
+ * @message: textual message from the rpm
+ *
+ * Multiple of these messages can be stacked in an rpm message.
+ */
+struct qcom_rpm_message {
+ u32 msg_type;
+ u32 length;
+ union {
+ u32 msg_id;
+ u8 message[0];
+ };
+};
+
+#define RPM_SERVICE_TYPE_REQUEST 0x00716572 /* "req\0" */
+
+#define RPM_MSG_TYPE_ERR 0x00727265 /* "err\0" */
+#define RPM_MSG_TYPE_MSG_ID 0x2367736d /* "msg#" */
+
+/**
+ * qcom_rpm_smd_write - write @buf to @type:@id
+ * @rpm: rpm handle
+ * @type: resource type
+ * @id: resource identifier
+ * @buf: the data to be written
+ * @count: number of bytes in @buf
+ */
+int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
+ int state,
+ u32 type, u32 id,
+ void *buf,
+ size_t count)
+{
+ static unsigned msg_id = 1;
+ int left;
+ int ret;
+
+ struct {
+ struct qcom_rpm_header hdr;
+ struct qcom_rpm_request req;
+ u8 payload[count];
+ } pkt;
+
+ /* SMD packets to the RPM may not exceed 256 bytes */
+ if (WARN_ON(sizeof(pkt) >= 256))
+ return -EINVAL;
+
+ mutex_lock(&rpm->lock);
+
+ pkt.hdr.service_type = RPM_SERVICE_TYPE_REQUEST;
+ pkt.hdr.length = sizeof(struct qcom_rpm_request) + count;
+
+ pkt.req.msg_id = msg_id++;
+ pkt.req.flags = BIT(state);
+ pkt.req.type = type;
+ pkt.req.id = id;
+ pkt.req.data_len = count;
+ memcpy(pkt.payload, buf, count);
+
+ ret = qcom_smd_send(rpm->rpm_channel, &pkt, sizeof(pkt));
+ if (ret)
+ goto out;
+
+ left = wait_for_completion_timeout(&rpm->ack, RPM_REQUEST_TIMEOUT);
+ if (!left)
+ ret = -ETIMEDOUT;
+ else
+ ret = rpm->ack_status;
+
+out:
+ mutex_unlock(&rpm->lock);
+ return ret;
+}
+EXPORT_SYMBOL(qcom_rpm_smd_write);
+
+static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev,
+ const void *data,
+ size_t count)
+{
+ const struct qcom_rpm_header *hdr = data;
+ const struct qcom_rpm_message *msg;
+ struct qcom_smd_rpm *rpm = dev_get_drvdata(&qsdev->dev);
+ const u8 *buf = data + sizeof(struct qcom_rpm_header);
+ const u8 *end = buf + hdr->length;
+ char msgbuf[32];
+ int status = 0;
+ u32 len;
+
+ if (hdr->service_type != RPM_SERVICE_TYPE_REQUEST ||
+ hdr->length < sizeof(struct qcom_rpm_message)) {
+ dev_err(&qsdev->dev, "invalid request\n");
+ return 0;
+ }
+
+ while (buf < end) {
+ msg = (struct qcom_rpm_message *)buf;
+ switch (msg->msg_type) {
+ case RPM_MSG_TYPE_MSG_ID:
+ break;
+ case RPM_MSG_TYPE_ERR:
+ len = min_t(u32, ALIGN(msg->length, 4), sizeof(msgbuf));
+ memcpy_fromio(msgbuf, msg->message, len);
+ msgbuf[len - 1] = 0;
+
+ if (!strcmp(msgbuf, "resource does not exist"))
+ status = -ENXIO;
+ else
+ status = -EINVAL;
+ break;
+ }
+
+ buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg->length, 4);
+ }
+
+ rpm->ack_status = status;
+ complete(&rpm->ack);
+ return 0;
+}
+
+static int qcom_smd_rpm_probe(struct qcom_smd_device *sdev)
+{
+ struct qcom_smd_rpm *rpm;
+
+ rpm = devm_kzalloc(&sdev->dev, sizeof(*rpm), GFP_KERNEL);
+ if (!rpm)
+ return -ENOMEM;
+
+ mutex_init(&rpm->lock);
+ init_completion(&rpm->ack);
+
+ rpm->rpm_channel = sdev->channel;
+
+ dev_set_drvdata(&sdev->dev, rpm);
+
+ return of_platform_populate(sdev->dev.of_node, NULL, NULL, &sdev->dev);
+}
+
+static void qcom_smd_rpm_remove(struct qcom_smd_device *sdev)
+{
+ of_platform_depopulate(&sdev->dev);
+}
+
+static const struct of_device_id qcom_smd_rpm_of_match[] = {
+ { .compatible = "qcom,rpm-msm8974" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match);
+
+static struct qcom_smd_driver qcom_smd_rpm_driver = {
+ .probe = qcom_smd_rpm_probe,
+ .remove = qcom_smd_rpm_remove,
+ .callback = qcom_smd_rpm_callback,
+ .driver = {
+ .name = "qcom_smd_rpm",
+ .owner = THIS_MODULE,
+ .of_match_table = qcom_smd_rpm_of_match,
+ },
+};
+
+static int __init qcom_smd_rpm_init(void)
+{
+ return qcom_smd_driver_register(&qcom_smd_rpm_driver);
+}
+arch_initcall(qcom_smd_rpm_init);
+
+static void __exit qcom_smd_rpm_exit(void)
+{
+ qcom_smd_driver_unregister(&qcom_smd_rpm_driver);
+}
+module_exit(qcom_smd_rpm_exit);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm SMD backed RPM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c
new file mode 100644
index 000000000000..a6155c917d52
--- /dev/null
+++ b/drivers/soc/qcom/smd.c
@@ -0,0 +1,1327 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smd.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/wait.h>
+
+/*
+ * The Qualcomm Shared Memory communication solution provides point-to-point
+ * channels for clients to send and receive streaming or packet based data.
+ *
+ * Each channel consists of a control item (channel info) and a ring buffer
+ * pair. The channel info carry information related to channel state, flow
+ * control and the offsets within the ring buffer.
+ *
+ * All allocated channels are listed in an allocation table, identifying the
+ * pair of items by name, type and remote processor.
+ *
+ * Upon creating a new channel the remote processor allocates channel info and
+ * ring buffer items from the smem heap and populate the allocation table. An
+ * interrupt is sent to the other end of the channel and a scan for new
+ * channels should be done. A channel never goes away, it will only change
+ * state.
+ *
+ * The remote processor signals it intent for bring up the communication
+ * channel by setting the state of its end of the channel to "opening" and
+ * sends out an interrupt. We detect this change and register a smd device to
+ * consume the channel. Upon finding a consumer we finish the handshake and the
+ * channel is up.
+ *
+ * Upon closing a channel, the remote processor will update the state of its
+ * end of the channel and signal us, we will then unregister any attached
+ * device and close our end of the channel.
+ *
+ * Devices attached to a channel can use the qcom_smd_send function to push
+ * data to the channel, this is done by copying the data into the tx ring
+ * buffer, updating the pointers in the channel info and signaling the remote
+ * processor.
+ *
+ * The remote processor does the equivalent when it transfer data and upon
+ * receiving the interrupt we check the channel info for new data and delivers
+ * this to the attached device. If the device is not ready to receive the data
+ * we leave it in the ring buffer for now.
+ */
+
+struct smd_channel_info;
+struct smd_channel_info_word;
+
+#define SMD_ALLOC_TBL_COUNT 2
+#define SMD_ALLOC_TBL_SIZE 64
+
+/*
+ * This lists the various smem heap items relevant for the allocation table and
+ * smd channel entries.
+ */
+static const struct {
+ unsigned alloc_tbl_id;
+ unsigned info_base_id;
+ unsigned fifo_base_id;
+} smem_items[SMD_ALLOC_TBL_COUNT] = {
+ {
+ .alloc_tbl_id = 13,
+ .info_base_id = 14,
+ .fifo_base_id = 338
+ },
+ {
+ .alloc_tbl_id = 14,
+ .info_base_id = 266,
+ .fifo_base_id = 202,
+ },
+};
+
+/**
+ * struct qcom_smd_edge - representing a remote processor
+ * @smd: handle to qcom_smd
+ * @of_node: of_node handle for information related to this edge
+ * @edge_id: identifier of this edge
+ * @remote_pid: identifier of remote processor
+ * @irq: interrupt for signals on this edge
+ * @ipc_regmap: regmap handle holding the outgoing ipc register
+ * @ipc_offset: offset within @ipc_regmap of the register for ipc
+ * @ipc_bit: bit in the register at @ipc_offset of @ipc_regmap
+ * @channels: list of all channels detected on this edge
+ * @channels_lock: guard for modifications of @channels
+ * @allocated: array of bitmaps representing already allocated channels
+ * @need_rescan: flag that the @work needs to scan smem for new channels
+ * @smem_available: last available amount of smem triggering a channel scan
+ * @work: work item for edge house keeping
+ */
+struct qcom_smd_edge {
+ struct qcom_smd *smd;
+ struct device_node *of_node;
+ unsigned edge_id;
+ unsigned remote_pid;
+
+ int irq;
+
+ struct regmap *ipc_regmap;
+ int ipc_offset;
+ int ipc_bit;
+
+ struct list_head channels;
+ spinlock_t channels_lock;
+
+ DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE);
+
+ bool need_rescan;
+ unsigned smem_available;
+
+ struct work_struct work;
+};
+
+/*
+ * SMD channel states.
+ */
+enum smd_channel_state {
+ SMD_CHANNEL_CLOSED,
+ SMD_CHANNEL_OPENING,
+ SMD_CHANNEL_OPENED,
+ SMD_CHANNEL_FLUSHING,
+ SMD_CHANNEL_CLOSING,
+ SMD_CHANNEL_RESET,
+ SMD_CHANNEL_RESET_OPENING
+};
+
+/**
+ * struct qcom_smd_channel - smd channel struct
+ * @edge: qcom_smd_edge this channel is living on
+ * @qsdev: reference to a associated smd client device
+ * @name: name of the channel
+ * @state: local state of the channel
+ * @remote_state: remote state of the channel
+ * @tx_info: byte aligned outgoing channel info
+ * @rx_info: byte aligned incoming channel info
+ * @tx_info_word: word aligned outgoing channel info
+ * @rx_info_word: word aligned incoming channel info
+ * @tx_lock: lock to make writes to the channel mutually exclusive
+ * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR
+ * @tx_fifo: pointer to the outgoing ring buffer
+ * @rx_fifo: pointer to the incoming ring buffer
+ * @fifo_size: size of each ring buffer
+ * @bounce_buffer: bounce buffer for reading wrapped packets
+ * @cb: callback function registered for this channel
+ * @recv_lock: guard for rx info modifications and cb pointer
+ * @pkt_size: size of the currently handled packet
+ * @list: lite entry for @channels in qcom_smd_edge
+ */
+struct qcom_smd_channel {
+ struct qcom_smd_edge *edge;
+
+ struct qcom_smd_device *qsdev;
+
+ char *name;
+ enum smd_channel_state state;
+ enum smd_channel_state remote_state;
+
+ struct smd_channel_info *tx_info;
+ struct smd_channel_info *rx_info;
+
+ struct smd_channel_info_word *tx_info_word;
+ struct smd_channel_info_word *rx_info_word;
+
+ struct mutex tx_lock;
+ wait_queue_head_t fblockread_event;
+
+ void *tx_fifo;
+ void *rx_fifo;
+ int fifo_size;
+
+ void *bounce_buffer;
+ int (*cb)(struct qcom_smd_device *, const void *, size_t);
+
+ spinlock_t recv_lock;
+
+ int pkt_size;
+
+ struct list_head list;
+};
+
+/**
+ * struct qcom_smd - smd struct
+ * @dev: device struct
+ * @num_edges: number of entries in @edges
+ * @edges: array of edges to be handled
+ */
+struct qcom_smd {
+ struct device *dev;
+
+ unsigned num_edges;
+ struct qcom_smd_edge edges[0];
+};
+
+/*
+ * Format of the smd_info smem items, for byte aligned channels.
+ */
+struct smd_channel_info {
+ u32 state;
+ u8 fDSR;
+ u8 fCTS;
+ u8 fCD;
+ u8 fRI;
+ u8 fHEAD;
+ u8 fTAIL;
+ u8 fSTATE;
+ u8 fBLOCKREADINTR;
+ u32 tail;
+ u32 head;
+};
+
+/*
+ * Format of the smd_info smem items, for word aligned channels.
+ */
+struct smd_channel_info_word {
+ u32 state;
+ u32 fDSR;
+ u32 fCTS;
+ u32 fCD;
+ u32 fRI;
+ u32 fHEAD;
+ u32 fTAIL;
+ u32 fSTATE;
+ u32 fBLOCKREADINTR;
+ u32 tail;
+ u32 head;
+};
+
+#define GET_RX_CHANNEL_INFO(channel, param) \
+ (channel->rx_info_word ? \
+ channel->rx_info_word->param : \
+ channel->rx_info->param)
+
+#define SET_RX_CHANNEL_INFO(channel, param, value) \
+ (channel->rx_info_word ? \
+ (channel->rx_info_word->param = value) : \
+ (channel->rx_info->param = value))
+
+#define GET_TX_CHANNEL_INFO(channel, param) \
+ (channel->tx_info_word ? \
+ channel->tx_info_word->param : \
+ channel->tx_info->param)
+
+#define SET_TX_CHANNEL_INFO(channel, param, value) \
+ (channel->tx_info_word ? \
+ (channel->tx_info_word->param = value) : \
+ (channel->tx_info->param = value))
+
+/**
+ * struct qcom_smd_alloc_entry - channel allocation entry
+ * @name: channel name
+ * @cid: channel index
+ * @flags: channel flags and edge id
+ * @ref_count: reference count of the channel
+ */
+struct qcom_smd_alloc_entry {
+ u8 name[20];
+ u32 cid;
+ u32 flags;
+ u32 ref_count;
+} __packed;
+
+#define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff
+#define SMD_CHANNEL_FLAGS_STREAM BIT(8)
+#define SMD_CHANNEL_FLAGS_PACKET BIT(9)
+
+/*
+ * Each smd packet contains a 20 byte header, with the first 4 being the length
+ * of the packet.
+ */
+#define SMD_PACKET_HEADER_LEN 20
+
+/*
+ * Signal the remote processor associated with 'channel'.
+ */
+static void qcom_smd_signal_channel(struct qcom_smd_channel *channel)
+{
+ struct qcom_smd_edge *edge = channel->edge;
+
+ regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit));
+}
+
+/*
+ * Initialize the tx channel info
+ */
+static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
+{
+ SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
+ SET_TX_CHANNEL_INFO(channel, fDSR, 0);
+ SET_TX_CHANNEL_INFO(channel, fCTS, 0);
+ SET_TX_CHANNEL_INFO(channel, fCD, 0);
+ SET_TX_CHANNEL_INFO(channel, fRI, 0);
+ SET_TX_CHANNEL_INFO(channel, fHEAD, 0);
+ SET_TX_CHANNEL_INFO(channel, fTAIL, 0);
+ SET_TX_CHANNEL_INFO(channel, fSTATE, 1);
+ SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1);
+ SET_TX_CHANNEL_INFO(channel, head, 0);
+ SET_TX_CHANNEL_INFO(channel, tail, 0);
+
+ qcom_smd_signal_channel(channel);
+
+ channel->state = SMD_CHANNEL_CLOSED;
+ channel->pkt_size = 0;
+}
+
+/*
+ * Calculate the amount of data available in the rx fifo
+ */
+static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel)
+{
+ unsigned head;
+ unsigned tail;
+
+ head = GET_RX_CHANNEL_INFO(channel, head);
+ tail = GET_RX_CHANNEL_INFO(channel, tail);
+
+ return (head - tail) & (channel->fifo_size - 1);
+}
+
+/*
+ * Set tx channel state and inform the remote processor
+ */
+static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
+ int state)
+{
+ struct qcom_smd_edge *edge = channel->edge;
+ bool is_open = state == SMD_CHANNEL_OPENED;
+
+ if (channel->state == state)
+ return;
+
+ dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state);
+
+ SET_TX_CHANNEL_INFO(channel, fDSR, is_open);
+ SET_TX_CHANNEL_INFO(channel, fCTS, is_open);
+ SET_TX_CHANNEL_INFO(channel, fCD, is_open);
+
+ SET_TX_CHANNEL_INFO(channel, state, state);
+ SET_TX_CHANNEL_INFO(channel, fSTATE, 1);
+
+ channel->state = state;
+ qcom_smd_signal_channel(channel);
+}
+
+/*
+ * Copy count bytes of data using 32bit accesses, if that's required.
+ */
+static void smd_copy_to_fifo(void __iomem *_dst,
+ const void *_src,
+ size_t count,
+ bool word_aligned)
+{
+ u32 *dst = (u32 *)_dst;
+ u32 *src = (u32 *)_src;
+
+ if (word_aligned) {
+ count /= sizeof(u32);
+ while (count--)
+ writel_relaxed(*src++, dst++);
+ } else {
+ memcpy_toio(_dst, _src, count);
+ }
+}
+
+/*
+ * Copy count bytes of data using 32bit accesses, if that is required.
+ */
+static void smd_copy_from_fifo(void *_dst,
+ const void __iomem *_src,
+ size_t count,
+ bool word_aligned)
+{
+ u32 *dst = (u32 *)_dst;
+ u32 *src = (u32 *)_src;
+
+ if (word_aligned) {
+ count /= sizeof(u32);
+ while (count--)
+ *dst++ = readl_relaxed(src++);
+ } else {
+ memcpy_fromio(_dst, _src, count);
+ }
+}
+
+/*
+ * Read count bytes of data from the rx fifo into buf, but don't advance the
+ * tail.
+ */
+static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel,
+ void *buf, size_t count)
+{
+ bool word_aligned;
+ unsigned tail;
+ size_t len;
+
+ word_aligned = channel->rx_info_word != NULL;
+ tail = GET_RX_CHANNEL_INFO(channel, tail);
+
+ len = min_t(size_t, count, channel->fifo_size - tail);
+ if (len) {
+ smd_copy_from_fifo(buf,
+ channel->rx_fifo + tail,
+ len,
+ word_aligned);
+ }
+
+ if (len != count) {
+ smd_copy_from_fifo(buf + len,
+ channel->rx_fifo,
+ count - len,
+ word_aligned);
+ }
+
+ return count;
+}
+
+/*
+ * Advance the rx tail by count bytes.
+ */
+static void qcom_smd_channel_advance(struct qcom_smd_channel *channel,
+ size_t count)
+{
+ unsigned tail;
+
+ tail = GET_RX_CHANNEL_INFO(channel, tail);
+ tail += count;
+ tail &= (channel->fifo_size - 1);
+ SET_RX_CHANNEL_INFO(channel, tail, tail);
+}
+
+/*
+ * Read out a single packet from the rx fifo and deliver it to the device
+ */
+static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel)
+{
+ struct qcom_smd_device *qsdev = channel->qsdev;
+ unsigned tail;
+ size_t len;
+ void *ptr;
+ int ret;
+
+ if (!channel->cb)
+ return 0;
+
+ tail = GET_RX_CHANNEL_INFO(channel, tail);
+
+ /* Use bounce buffer if the data wraps */
+ if (tail + channel->pkt_size >= channel->fifo_size) {
+ ptr = channel->bounce_buffer;
+ len = qcom_smd_channel_peek(channel, ptr, channel->pkt_size);
+ } else {
+ ptr = channel->rx_fifo + tail;
+ len = channel->pkt_size;
+ }
+
+ ret = channel->cb(qsdev, ptr, len);
+ if (ret < 0)
+ return ret;
+
+ /* Only forward the tail if the client consumed the data */
+ qcom_smd_channel_advance(channel, len);
+
+ channel->pkt_size = 0;
+
+ return 0;
+}
+
+/*
+ * Per channel interrupt handling
+ */
+static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
+{
+ bool need_state_scan = false;
+ int remote_state;
+ u32 pktlen;
+ int avail;
+ int ret;
+
+ /* Handle state changes */
+ remote_state = GET_RX_CHANNEL_INFO(channel, state);
+ if (remote_state != channel->remote_state) {
+ channel->remote_state = remote_state;
+ need_state_scan = true;
+ }
+ /* Indicate that we have seen any state change */
+ SET_RX_CHANNEL_INFO(channel, fSTATE, 0);
+
+ /* Signal waiting qcom_smd_send() about the interrupt */
+ if (!GET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR))
+ wake_up_interruptible(&channel->fblockread_event);
+
+ /* Don't consume any data until we've opened the channel */
+ if (channel->state != SMD_CHANNEL_OPENED)
+ goto out;
+
+ /* Indicate that we've seen the new data */
+ SET_RX_CHANNEL_INFO(channel, fHEAD, 0);
+
+ /* Consume data */
+ for (;;) {
+ avail = qcom_smd_channel_get_rx_avail(channel);
+
+ if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) {
+ qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen));
+ qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN);
+ channel->pkt_size = pktlen;
+ } else if (channel->pkt_size && avail >= channel->pkt_size) {
+ ret = qcom_smd_channel_recv_single(channel);
+ if (ret)
+ break;
+ } else {
+ break;
+ }
+ }
+
+ /* Indicate that we have seen and updated tail */
+ SET_RX_CHANNEL_INFO(channel, fTAIL, 1);
+
+ /* Signal the remote that we've consumed the data (if requested) */
+ if (!GET_RX_CHANNEL_INFO(channel, fBLOCKREADINTR)) {
+ /* Ensure ordering of channel info updates */
+ wmb();
+
+ qcom_smd_signal_channel(channel);
+ }
+
+out:
+ return need_state_scan;
+}
+
+/*
+ * The edge interrupts are triggered by the remote processor on state changes,
+ * channel info updates or when new channels are created.
+ */
+static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
+{
+ struct qcom_smd_edge *edge = data;
+ struct qcom_smd_channel *channel;
+ unsigned available;
+ bool kick_worker = false;
+
+ /*
+ * Handle state changes or data on each of the channels on this edge
+ */
+ spin_lock(&edge->channels_lock);
+ list_for_each_entry(channel, &edge->channels, list) {
+ spin_lock(&channel->recv_lock);
+ kick_worker |= qcom_smd_channel_intr(channel);
+ spin_unlock(&channel->recv_lock);
+ }
+ spin_unlock(&edge->channels_lock);
+
+ /*
+ * Creating a new channel requires allocating an smem entry, so we only
+ * have to scan if the amount of available space in smem have changed
+ * since last scan.
+ */
+ available = qcom_smem_get_free_space(edge->remote_pid);
+ if (available != edge->smem_available) {
+ edge->smem_available = available;
+ edge->need_rescan = true;
+ kick_worker = true;
+ }
+
+ if (kick_worker)
+ schedule_work(&edge->work);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Delivers any outstanding packets in the rx fifo, can be used after probe of
+ * the clients to deliver any packets that wasn't delivered before the client
+ * was setup.
+ */
+static void qcom_smd_channel_resume(struct qcom_smd_channel *channel)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->recv_lock, flags);
+ qcom_smd_channel_intr(channel);
+ spin_unlock_irqrestore(&channel->recv_lock, flags);
+}
+
+/*
+ * Calculate how much space is available in the tx fifo.
+ */
+static size_t qcom_smd_get_tx_avail(struct qcom_smd_channel *channel)
+{
+ unsigned head;
+ unsigned tail;
+ unsigned mask = channel->fifo_size - 1;
+
+ head = GET_TX_CHANNEL_INFO(channel, head);
+ tail = GET_TX_CHANNEL_INFO(channel, tail);
+
+ return mask - ((head - tail) & mask);
+}
+
+/*
+ * Write count bytes of data into channel, possibly wrapping in the ring buffer
+ */
+static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
+ const void *data,
+ size_t count)
+{
+ bool word_aligned;
+ unsigned head;
+ size_t len;
+
+ word_aligned = channel->tx_info_word != NULL;
+ head = GET_TX_CHANNEL_INFO(channel, head);
+
+ len = min_t(size_t, count, channel->fifo_size - head);
+ if (len) {
+ smd_copy_to_fifo(channel->tx_fifo + head,
+ data,
+ len,
+ word_aligned);
+ }
+
+ if (len != count) {
+ smd_copy_to_fifo(channel->tx_fifo,
+ data + len,
+ count - len,
+ word_aligned);
+ }
+
+ head += count;
+ head &= (channel->fifo_size - 1);
+ SET_TX_CHANNEL_INFO(channel, head, head);
+
+ return count;
+}
+
+/**
+ * qcom_smd_send - write data to smd channel
+ * @channel: channel handle
+ * @data: buffer of data to write
+ * @len: number of bytes to write
+ *
+ * This is a blocking write of len bytes into the channel's tx ring buffer and
+ * signal the remote end. It will sleep until there is enough space available
+ * in the tx buffer, utilizing the fBLOCKREADINTR signaling mechanism to avoid
+ * polling.
+ */
+int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
+{
+ u32 hdr[5] = {len,};
+ int tlen = sizeof(hdr) + len;
+ int ret;
+
+ /* Word aligned channels only accept word size aligned data */
+ if (channel->rx_info_word != NULL && len % 4)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&channel->tx_lock);
+ if (ret)
+ return ret;
+
+ while (qcom_smd_get_tx_avail(channel) < tlen) {
+ if (channel->state != SMD_CHANNEL_OPENED) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0);
+
+ ret = wait_event_interruptible(channel->fblockread_event,
+ qcom_smd_get_tx_avail(channel) >= tlen ||
+ channel->state != SMD_CHANNEL_OPENED);
+ if (ret)
+ goto out;
+
+ SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1);
+ }
+
+ SET_TX_CHANNEL_INFO(channel, fTAIL, 0);
+
+ qcom_smd_write_fifo(channel, hdr, sizeof(hdr));
+ qcom_smd_write_fifo(channel, data, len);
+
+ SET_TX_CHANNEL_INFO(channel, fHEAD, 1);
+
+ /* Ensure ordering of channel info updates */
+ wmb();
+
+ qcom_smd_signal_channel(channel);
+
+out:
+ mutex_unlock(&channel->tx_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_smd_send);
+
+static struct qcom_smd_device *to_smd_device(struct device *dev)
+{
+ return container_of(dev, struct qcom_smd_device, dev);
+}
+
+static struct qcom_smd_driver *to_smd_driver(struct device *dev)
+{
+ struct qcom_smd_device *qsdev = to_smd_device(dev);
+
+ return container_of(qsdev->dev.driver, struct qcom_smd_driver, driver);
+}
+
+static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv)
+{
+ return of_driver_match_device(dev, drv);
+}
+
+/*
+ * Probe the smd client.
+ *
+ * The remote side have indicated that it want the channel to be opened, so
+ * complete the state handshake and probe our client driver.
+ */
+static int qcom_smd_dev_probe(struct device *dev)
+{
+ struct qcom_smd_device *qsdev = to_smd_device(dev);
+ struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
+ struct qcom_smd_channel *channel = qsdev->channel;
+ size_t bb_size;
+ int ret;
+
+ /*
+ * Packets are maximum 4k, but reduce if the fifo is smaller
+ */
+ bb_size = min(channel->fifo_size, SZ_4K);
+ channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL);
+ if (!channel->bounce_buffer)
+ return -ENOMEM;
+
+ channel->cb = qsdrv->callback;
+
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING);
+
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED);
+
+ ret = qsdrv->probe(qsdev);
+ if (ret)
+ goto err;
+
+ qcom_smd_channel_resume(channel);
+
+ return 0;
+
+err:
+ dev_err(&qsdev->dev, "probe failed\n");
+
+ channel->cb = NULL;
+ kfree(channel->bounce_buffer);
+ channel->bounce_buffer = NULL;
+
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+ return ret;
+}
+
+/*
+ * Remove the smd client.
+ *
+ * The channel is going away, for some reason, so remove the smd client and
+ * reset the channel state.
+ */
+static int qcom_smd_dev_remove(struct device *dev)
+{
+ struct qcom_smd_device *qsdev = to_smd_device(dev);
+ struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
+ struct qcom_smd_channel *channel = qsdev->channel;
+ unsigned long flags;
+
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING);
+
+ /*
+ * Make sure we don't race with the code receiving data.
+ */
+ spin_lock_irqsave(&channel->recv_lock, flags);
+ channel->cb = NULL;
+ spin_unlock_irqrestore(&channel->recv_lock, flags);
+
+ /* Wake up any sleepers in qcom_smd_send() */
+ wake_up_interruptible(&channel->fblockread_event);
+
+ /*
+ * We expect that the client might block in remove() waiting for any
+ * outstanding calls to qcom_smd_send() to wake up and finish.
+ */
+ if (qsdrv->remove)
+ qsdrv->remove(qsdev);
+
+ /*
+ * The client is now gone, cleanup and reset the channel state.
+ */
+ channel->qsdev = NULL;
+ kfree(channel->bounce_buffer);
+ channel->bounce_buffer = NULL;
+
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+
+ qcom_smd_channel_reset(channel);
+
+ return 0;
+}
+
+static struct bus_type qcom_smd_bus = {
+ .name = "qcom_smd",
+ .match = qcom_smd_dev_match,
+ .probe = qcom_smd_dev_probe,
+ .remove = qcom_smd_dev_remove,
+};
+
+/*
+ * Release function for the qcom_smd_device object.
+ */
+static void qcom_smd_release_device(struct device *dev)
+{
+ struct qcom_smd_device *qsdev = to_smd_device(dev);
+
+ kfree(qsdev);
+}
+
+/*
+ * Finds the device_node for the smd child interested in this channel.
+ */
+static struct device_node *qcom_smd_match_channel(struct device_node *edge_node,
+ const char *channel)
+{
+ struct device_node *child;
+ const char *name;
+ const char *key;
+ int ret;
+
+ for_each_available_child_of_node(edge_node, child) {
+ key = "qcom,smd-channels";
+ ret = of_property_read_string(child, key, &name);
+ if (ret) {
+ of_node_put(child);
+ continue;
+ }
+
+ if (strcmp(name, channel) == 0)
+ return child;
+ }
+
+ return NULL;
+}
+
+/*
+ * Create a smd client device for channel that is being opened.
+ */
+static int qcom_smd_create_device(struct qcom_smd_channel *channel)
+{
+ struct qcom_smd_device *qsdev;
+ struct qcom_smd_edge *edge = channel->edge;
+ struct device_node *node;
+ struct qcom_smd *smd = edge->smd;
+ int ret;
+
+ if (channel->qsdev)
+ return -EEXIST;
+
+ node = qcom_smd_match_channel(edge->of_node, channel->name);
+ if (!node) {
+ dev_dbg(smd->dev, "no match for '%s'\n", channel->name);
+ return -ENXIO;
+ }
+
+ dev_dbg(smd->dev, "registering '%s'\n", channel->name);
+
+ qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
+ if (!qsdev)
+ return -ENOMEM;
+
+ dev_set_name(&qsdev->dev, "%s.%s", edge->of_node->name, node->name);
+ qsdev->dev.parent = smd->dev;
+ qsdev->dev.bus = &qcom_smd_bus;
+ qsdev->dev.release = qcom_smd_release_device;
+ qsdev->dev.of_node = node;
+
+ qsdev->channel = channel;
+
+ channel->qsdev = qsdev;
+
+ ret = device_register(&qsdev->dev);
+ if (ret) {
+ dev_err(smd->dev, "device_register failed: %d\n", ret);
+ put_device(&qsdev->dev);
+ }
+
+ return ret;
+}
+
+/*
+ * Destroy a smd client device for a channel that's going away.
+ */
+static void qcom_smd_destroy_device(struct qcom_smd_channel *channel)
+{
+ struct device *dev;
+
+ BUG_ON(!channel->qsdev);
+
+ dev = &channel->qsdev->dev;
+
+ device_unregister(dev);
+ of_node_put(dev->of_node);
+ put_device(dev);
+}
+
+/**
+ * qcom_smd_driver_register - register a smd driver
+ * @qsdrv: qcom_smd_driver struct
+ */
+int qcom_smd_driver_register(struct qcom_smd_driver *qsdrv)
+{
+ qsdrv->driver.bus = &qcom_smd_bus;
+ return driver_register(&qsdrv->driver);
+}
+EXPORT_SYMBOL(qcom_smd_driver_register);
+
+/**
+ * qcom_smd_driver_unregister - unregister a smd driver
+ * @qsdrv: qcom_smd_driver struct
+ */
+void qcom_smd_driver_unregister(struct qcom_smd_driver *qsdrv)
+{
+ driver_unregister(&qsdrv->driver);
+}
+EXPORT_SYMBOL(qcom_smd_driver_unregister);
+
+/*
+ * Allocate the qcom_smd_channel object for a newly found smd channel,
+ * retrieving and validating the smem items involved.
+ */
+static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *edge,
+ unsigned smem_info_item,
+ unsigned smem_fifo_item,
+ char *name)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd *smd = edge->smd;
+ size_t fifo_size;
+ size_t info_size;
+ void *fifo_base;
+ void *info;
+ int ret;
+
+ channel = devm_kzalloc(smd->dev, sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return ERR_PTR(-ENOMEM);
+
+ channel->edge = edge;
+ channel->name = devm_kstrdup(smd->dev, name, GFP_KERNEL);
+ if (!channel->name)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&channel->tx_lock);
+ spin_lock_init(&channel->recv_lock);
+ init_waitqueue_head(&channel->fblockread_event);
+
+ ret = qcom_smem_get(edge->remote_pid, smem_info_item, (void **)&info,
+ &info_size);
+ if (ret)
+ goto free_name_and_channel;
+
+ /*
+ * Use the size of the item to figure out which channel info struct to
+ * use.
+ */
+ if (info_size == 2 * sizeof(struct smd_channel_info_word)) {
+ channel->tx_info_word = info;
+ channel->rx_info_word = info + sizeof(struct smd_channel_info_word);
+ } else if (info_size == 2 * sizeof(struct smd_channel_info)) {
+ channel->tx_info = info;
+ channel->rx_info = info + sizeof(struct smd_channel_info);
+ } else {
+ dev_err(smd->dev,
+ "channel info of size %zu not supported\n", info_size);
+ ret = -EINVAL;
+ goto free_name_and_channel;
+ }
+
+ ret = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_base,
+ &fifo_size);
+ if (ret)
+ goto free_name_and_channel;
+
+ /* The channel consist of a rx and tx fifo of equal size */
+ fifo_size /= 2;
+
+ dev_dbg(smd->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
+ name, info_size, fifo_size);
+
+ channel->tx_fifo = fifo_base;
+ channel->rx_fifo = fifo_base + fifo_size;
+ channel->fifo_size = fifo_size;
+
+ qcom_smd_channel_reset(channel);
+
+ return channel;
+
+free_name_and_channel:
+ devm_kfree(smd->dev, channel->name);
+ devm_kfree(smd->dev, channel);
+
+ return ERR_PTR(ret);
+}
+
+/*
+ * Scans the allocation table for any newly allocated channels, calls
+ * qcom_smd_create_channel() to create representations of these and add
+ * them to the edge's list of channels.
+ */
+static void qcom_discover_channels(struct qcom_smd_edge *edge)
+{
+ struct qcom_smd_alloc_entry *alloc_tbl;
+ struct qcom_smd_alloc_entry *entry;
+ struct qcom_smd_channel *channel;
+ struct qcom_smd *smd = edge->smd;
+ unsigned long flags;
+ unsigned fifo_id;
+ unsigned info_id;
+ int ret;
+ int tbl;
+ int i;
+
+ for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) {
+ ret = qcom_smem_get(edge->remote_pid,
+ smem_items[tbl].alloc_tbl_id,
+ (void **)&alloc_tbl,
+ NULL);
+ if (ret < 0)
+ continue;
+
+ for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) {
+ entry = &alloc_tbl[i];
+ if (test_bit(i, edge->allocated[tbl]))
+ continue;
+
+ if (entry->ref_count == 0)
+ continue;
+
+ if (!entry->name[0])
+ continue;
+
+ if (!(entry->flags & SMD_CHANNEL_FLAGS_PACKET))
+ continue;
+
+ if ((entry->flags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id)
+ continue;
+
+ info_id = smem_items[tbl].info_base_id + entry->cid;
+ fifo_id = smem_items[tbl].fifo_base_id + entry->cid;
+
+ channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name);
+ if (IS_ERR(channel))
+ continue;
+
+ spin_lock_irqsave(&edge->channels_lock, flags);
+ list_add(&channel->list, &edge->channels);
+ spin_unlock_irqrestore(&edge->channels_lock, flags);
+
+ dev_dbg(smd->dev, "new channel found: '%s'\n", channel->name);
+ set_bit(i, edge->allocated[tbl]);
+ }
+ }
+
+ schedule_work(&edge->work);
+}
+
+/*
+ * This per edge worker scans smem for any new channels and register these. It
+ * then scans all registered channels for state changes that should be handled
+ * by creating or destroying smd client devices for the registered channels.
+ *
+ * LOCKING: edge->channels_lock is not needed to be held during the traversal
+ * of the channels list as it's done synchronously with the only writer.
+ */
+static void qcom_channel_state_worker(struct work_struct *work)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd_edge *edge = container_of(work,
+ struct qcom_smd_edge,
+ work);
+ unsigned remote_state;
+
+ /*
+ * Rescan smem if we have reason to belive that there are new channels.
+ */
+ if (edge->need_rescan) {
+ edge->need_rescan = false;
+ qcom_discover_channels(edge);
+ }
+
+ /*
+ * Register a device for any closed channel where the remote processor
+ * is showing interest in opening the channel.
+ */
+ list_for_each_entry(channel, &edge->channels, list) {
+ if (channel->state != SMD_CHANNEL_CLOSED)
+ continue;
+
+ remote_state = GET_RX_CHANNEL_INFO(channel, state);
+ if (remote_state != SMD_CHANNEL_OPENING &&
+ remote_state != SMD_CHANNEL_OPENED)
+ continue;
+
+ qcom_smd_create_device(channel);
+ }
+
+ /*
+ * Unregister the device for any channel that is opened where the
+ * remote processor is closing the channel.
+ */
+ list_for_each_entry(channel, &edge->channels, list) {
+ if (channel->state != SMD_CHANNEL_OPENING &&
+ channel->state != SMD_CHANNEL_OPENED)
+ continue;
+
+ remote_state = GET_RX_CHANNEL_INFO(channel, state);
+ if (remote_state == SMD_CHANNEL_OPENING ||
+ remote_state == SMD_CHANNEL_OPENED)
+ continue;
+
+ qcom_smd_destroy_device(channel);
+ }
+}
+
+/*
+ * Parses an of_node describing an edge.
+ */
+static int qcom_smd_parse_edge(struct device *dev,
+ struct device_node *node,
+ struct qcom_smd_edge *edge)
+{
+ struct device_node *syscon_np;
+ const char *key;
+ int irq;
+ int ret;
+
+ INIT_LIST_HEAD(&edge->channels);
+ spin_lock_init(&edge->channels_lock);
+
+ INIT_WORK(&edge->work, qcom_channel_state_worker);
+
+ edge->of_node = of_node_get(node);
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq < 0) {
+ dev_err(dev, "required smd interrupt missing\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(dev, irq,
+ qcom_smd_edge_intr, IRQF_TRIGGER_RISING,
+ node->name, edge);
+ if (ret) {
+ dev_err(dev, "failed to request smd irq\n");
+ return ret;
+ }
+
+ edge->irq = irq;
+
+ key = "qcom,smd-edge";
+ ret = of_property_read_u32(node, key, &edge->edge_id);
+ if (ret) {
+ dev_err(dev, "edge missing %s property\n", key);
+ return -EINVAL;
+ }
+
+ edge->remote_pid = QCOM_SMEM_HOST_ANY;
+ key = "qcom,remote-pid";
+ of_property_read_u32(node, key, &edge->remote_pid);
+
+ syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
+ if (!syscon_np) {
+ dev_err(dev, "no qcom,ipc node\n");
+ return -ENODEV;
+ }
+
+ edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
+ if (IS_ERR(edge->ipc_regmap))
+ return PTR_ERR(edge->ipc_regmap);
+
+ key = "qcom,ipc";
+ ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
+ if (ret < 0) {
+ dev_err(dev, "no offset in %s\n", key);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
+ if (ret < 0) {
+ dev_err(dev, "no bit in %s\n", key);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qcom_smd_probe(struct platform_device *pdev)
+{
+ struct qcom_smd_edge *edge;
+ struct device_node *node;
+ struct qcom_smd *smd;
+ size_t array_size;
+ int num_edges;
+ int ret;
+ int i = 0;
+
+ /* Wait for smem */
+ ret = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL, NULL);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ num_edges = of_get_available_child_count(pdev->dev.of_node);
+ array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge);
+ smd = devm_kzalloc(&pdev->dev, array_size, GFP_KERNEL);
+ if (!smd)
+ return -ENOMEM;
+ smd->dev = &pdev->dev;
+
+ smd->num_edges = num_edges;
+ for_each_available_child_of_node(pdev->dev.of_node, node) {
+ edge = &smd->edges[i++];
+ edge->smd = smd;
+
+ ret = qcom_smd_parse_edge(&pdev->dev, node, edge);
+ if (ret)
+ continue;
+
+ edge->need_rescan = true;
+ schedule_work(&edge->work);
+ }
+
+ platform_set_drvdata(pdev, smd);
+
+ return 0;
+}
+
+/*
+ * Shut down all smd clients by making sure that each edge stops processing
+ * events and scanning for new channels, then call destroy on the devices.
+ */
+static int qcom_smd_remove(struct platform_device *pdev)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd_edge *edge;
+ struct qcom_smd *smd = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < smd->num_edges; i++) {
+ edge = &smd->edges[i];
+
+ disable_irq(edge->irq);
+ cancel_work_sync(&edge->work);
+
+ list_for_each_entry(channel, &edge->channels, list) {
+ if (!channel->qsdev)
+ continue;
+
+ qcom_smd_destroy_device(channel);
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smd_of_match[] = {
+ { .compatible = "qcom,smd" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smd_of_match);
+
+static struct platform_driver qcom_smd_driver = {
+ .probe = qcom_smd_probe,
+ .remove = qcom_smd_remove,
+ .driver = {
+ .name = "qcom-smd",
+ .of_match_table = qcom_smd_of_match,
+ },
+};
+
+static int __init qcom_smd_init(void)
+{
+ int ret;
+
+ ret = bus_register(&qcom_smd_bus);
+ if (ret) {
+ pr_err("failed to register smd bus: %d\n", ret);
+ return ret;
+ }
+
+ return platform_driver_register(&qcom_smd_driver);
+}
+postcore_initcall(qcom_smd_init);
+
+static void __exit qcom_smd_exit(void)
+{
+ platform_driver_unregister(&qcom_smd_driver);
+ bus_unregister(&qcom_smd_bus);
+}
+module_exit(qcom_smd_exit);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm Shared Memory Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
new file mode 100644
index 000000000000..52365188a1c2
--- /dev/null
+++ b/drivers/soc/qcom/smem.c
@@ -0,0 +1,769 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hwspinlock.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem.h>
+
+/*
+ * The Qualcomm shared memory system is a allocate only heap structure that
+ * consists of one of more memory areas that can be accessed by the processors
+ * in the SoC.
+ *
+ * All systems contains a global heap, accessible by all processors in the SoC,
+ * with a table of contents data structure (@smem_header) at the beginning of
+ * the main shared memory block.
+ *
+ * The global header contains meta data for allocations as well as a fixed list
+ * of 512 entries (@smem_global_entry) that can be initialized to reference
+ * parts of the shared memory space.
+ *
+ *
+ * In addition to this global heap a set of "private" heaps can be set up at
+ * boot time with access restrictions so that only certain processor pairs can
+ * access the data.
+ *
+ * These partitions are referenced from an optional partition table
+ * (@smem_ptable), that is found 4kB from the end of the main smem region. The
+ * partition table entries (@smem_ptable_entry) lists the involved processors
+ * (or hosts) and their location in the main shared memory region.
+ *
+ * Each partition starts with a header (@smem_partition_header) that identifies
+ * the partition and holds properties for the two internal memory regions. The
+ * two regions are cached and non-cached memory respectively. Each region
+ * contain a link list of allocation headers (@smem_private_entry) followed by
+ * their data.
+ *
+ * Items in the non-cached region are allocated from the start of the partition
+ * while items in the cached region are allocated from the end. The free area
+ * is hence the region between the cached and non-cached offsets.
+ *
+ *
+ * To synchronize allocations in the shared memory heaps a remote spinlock must
+ * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
+ * platforms.
+ *
+ */
+
+/*
+ * Item 3 of the global heap contains an array of versions for the various
+ * software components in the SoC. We verify that the boot loader version is
+ * what the expected version (SMEM_EXPECTED_VERSION) as a sanity check.
+ */
+#define SMEM_ITEM_VERSION 3
+#define SMEM_MASTER_SBL_VERSION_INDEX 7
+#define SMEM_EXPECTED_VERSION 11
+
+/*
+ * The first 8 items are only to be allocated by the boot loader while
+ * initializing the heap.
+ */
+#define SMEM_ITEM_LAST_FIXED 8
+
+/* Highest accepted item number, for both global and private heaps */
+#define SMEM_ITEM_COUNT 512
+
+/* Processor/host identifier for the application processor */
+#define SMEM_HOST_APPS 0
+
+/* Max number of processors/hosts in a system */
+#define SMEM_HOST_COUNT 9
+
+/**
+ * struct smem_proc_comm - proc_comm communication struct (legacy)
+ * @command: current command to be executed
+ * @status: status of the currently requested command
+ * @params: parameters to the command
+ */
+struct smem_proc_comm {
+ u32 command;
+ u32 status;
+ u32 params[2];
+};
+
+/**
+ * struct smem_global_entry - entry to reference smem items on the heap
+ * @allocated: boolean to indicate if this entry is used
+ * @offset: offset to the allocated space
+ * @size: size of the allocated space, 8 byte aligned
+ * @aux_base: base address for the memory region used by this unit, or 0 for
+ * the default region. bits 0,1 are reserved
+ */
+struct smem_global_entry {
+ u32 allocated;
+ u32 offset;
+ u32 size;
+ u32 aux_base; /* bits 1:0 reserved */
+};
+#define AUX_BASE_MASK 0xfffffffc
+
+/**
+ * struct smem_header - header found in beginning of primary smem region
+ * @proc_comm: proc_comm communication interface (legacy)
+ * @version: array of versions for the various subsystems
+ * @initialized: boolean to indicate that smem is initialized
+ * @free_offset: index of the first unallocated byte in smem
+ * @available: number of bytes available for allocation
+ * @reserved: reserved field, must be 0
+ * toc: array of references to items
+ */
+struct smem_header {
+ struct smem_proc_comm proc_comm[4];
+ u32 version[32];
+ u32 initialized;
+ u32 free_offset;
+ u32 available;
+ u32 reserved;
+ struct smem_global_entry toc[SMEM_ITEM_COUNT];
+};
+
+/**
+ * struct smem_ptable_entry - one entry in the @smem_ptable list
+ * @offset: offset, within the main shared memory region, of the partition
+ * @size: size of the partition
+ * @flags: flags for the partition (currently unused)
+ * @host0: first processor/host with access to this partition
+ * @host1: second processor/host with access to this partition
+ * @reserved: reserved entries for later use
+ */
+struct smem_ptable_entry {
+ u32 offset;
+ u32 size;
+ u32 flags;
+ u16 host0;
+ u16 host1;
+ u32 reserved[8];
+};
+
+/**
+ * struct smem_ptable - partition table for the private partitions
+ * @magic: magic number, must be SMEM_PTABLE_MAGIC
+ * @version: version of the partition table
+ * @num_entries: number of partitions in the table
+ * @reserved: for now reserved entries
+ * @entry: list of @smem_ptable_entry for the @num_entries partitions
+ */
+struct smem_ptable {
+ u32 magic;
+ u32 version;
+ u32 num_entries;
+ u32 reserved[5];
+ struct smem_ptable_entry entry[];
+};
+#define SMEM_PTABLE_MAGIC 0x434f5424 /* "$TOC" */
+
+/**
+ * struct smem_partition_header - header of the partitions
+ * @magic: magic number, must be SMEM_PART_MAGIC
+ * @host0: first processor/host with access to this partition
+ * @host1: second processor/host with access to this partition
+ * @size: size of the partition
+ * @offset_free_uncached: offset to the first free byte of uncached memory in
+ * this partition
+ * @offset_free_cached: offset to the first free byte of cached memory in this
+ * partition
+ * @reserved: for now reserved entries
+ */
+struct smem_partition_header {
+ u32 magic;
+ u16 host0;
+ u16 host1;
+ u32 size;
+ u32 offset_free_uncached;
+ u32 offset_free_cached;
+ u32 reserved[3];
+};
+#define SMEM_PART_MAGIC 0x54525024 /* "$PRT" */
+
+/**
+ * struct smem_private_entry - header of each item in the private partition
+ * @canary: magic number, must be SMEM_PRIVATE_CANARY
+ * @item: identifying number of the smem item
+ * @size: size of the data, including padding bytes
+ * @padding_data: number of bytes of padding of data
+ * @padding_hdr: number of bytes of padding between the header and the data
+ * @reserved: for now reserved entry
+ */
+struct smem_private_entry {
+ u16 canary;
+ u16 item;
+ u32 size; /* includes padding bytes */
+ u16 padding_data;
+ u16 padding_hdr;
+ u32 reserved;
+};
+#define SMEM_PRIVATE_CANARY 0xa5a5
+
+/**
+ * struct smem_region - representation of a chunk of memory used for smem
+ * @aux_base: identifier of aux_mem base
+ * @virt_base: virtual base address of memory with this aux_mem identifier
+ * @size: size of the memory region
+ */
+struct smem_region {
+ u32 aux_base;
+ void __iomem *virt_base;
+ size_t size;
+};
+
+/**
+ * struct qcom_smem - device data for the smem device
+ * @dev: device pointer
+ * @hwlock: reference to a hwspinlock
+ * @partitions: list of pointers to partitions affecting the current
+ * processor/host
+ * @num_regions: number of @regions
+ * @regions: list of the memory regions defining the shared memory
+ */
+struct qcom_smem {
+ struct device *dev;
+
+ struct hwspinlock *hwlock;
+
+ struct smem_partition_header *partitions[SMEM_HOST_COUNT];
+
+ unsigned num_regions;
+ struct smem_region regions[0];
+};
+
+/* Pointer to the one and only smem handle */
+static struct qcom_smem *__smem;
+
+/* Timeout (ms) for the trylock of remote spinlocks */
+#define HWSPINLOCK_TIMEOUT 1000
+
+static int qcom_smem_alloc_private(struct qcom_smem *smem,
+ unsigned host,
+ unsigned item,
+ size_t size)
+{
+ struct smem_partition_header *phdr;
+ struct smem_private_entry *hdr;
+ size_t alloc_size;
+ void *p;
+
+ phdr = smem->partitions[host];
+
+ p = (void *)phdr + sizeof(*phdr);
+ while (p < (void *)phdr + phdr->offset_free_uncached) {
+ hdr = p;
+
+ if (hdr->canary != SMEM_PRIVATE_CANARY) {
+ dev_err(smem->dev,
+ "Found invalid canary in host %d partition\n",
+ host);
+ return -EINVAL;
+ }
+
+ if (hdr->item == item)
+ return -EEXIST;
+
+ p += sizeof(*hdr) + hdr->padding_hdr + hdr->size;
+ }
+
+ /* Check that we don't grow into the cached region */
+ alloc_size = sizeof(*hdr) + ALIGN(size, 8);
+ if (p + alloc_size >= (void *)phdr + phdr->offset_free_cached) {
+ dev_err(smem->dev, "Out of memory\n");
+ return -ENOSPC;
+ }
+
+ hdr = p;
+ hdr->canary = SMEM_PRIVATE_CANARY;
+ hdr->item = item;
+ hdr->size = ALIGN(size, 8);
+ hdr->padding_data = hdr->size - size;
+ hdr->padding_hdr = 0;
+
+ /*
+ * Ensure the header is written before we advance the free offset, so
+ * that remote processors that does not take the remote spinlock still
+ * gets a consistent view of the linked list.
+ */
+ wmb();
+ phdr->offset_free_uncached += alloc_size;
+
+ return 0;
+}
+
+static int qcom_smem_alloc_global(struct qcom_smem *smem,
+ unsigned item,
+ size_t size)
+{
+ struct smem_header *header;
+ struct smem_global_entry *entry;
+
+ if (WARN_ON(item >= SMEM_ITEM_COUNT))
+ return -EINVAL;
+
+ header = smem->regions[0].virt_base;
+ entry = &header->toc[item];
+ if (entry->allocated)
+ return -EEXIST;
+
+ size = ALIGN(size, 8);
+ if (WARN_ON(size > header->available))
+ return -ENOMEM;
+
+ entry->offset = header->free_offset;
+ entry->size = size;
+
+ /*
+ * Ensure the header is consistent before we mark the item allocated,
+ * so that remote processors will get a consistent view of the item
+ * even though they do not take the spinlock on read.
+ */
+ wmb();
+ entry->allocated = 1;
+
+ header->free_offset += size;
+ header->available -= size;
+
+ return 0;
+}
+
+/**
+ * qcom_smem_alloc() - allocate space for a smem item
+ * @host: remote processor id, or -1
+ * @item: smem item handle
+ * @size: number of bytes to be allocated
+ *
+ * Allocate space for a given smem item of size @size, given that the item is
+ * not yet allocated.
+ */
+int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
+{
+ unsigned long flags;
+ int ret;
+
+ if (!__smem)
+ return -EPROBE_DEFER;
+
+ if (item < SMEM_ITEM_LAST_FIXED) {
+ dev_err(__smem->dev,
+ "Rejecting allocation of static entry %d\n", item);
+ return -EINVAL;
+ }
+
+ ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+ HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret)
+ return ret;
+
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host])
+ ret = qcom_smem_alloc_private(__smem, host, item, size);
+ else
+ ret = qcom_smem_alloc_global(__smem, item, size);
+
+ hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_smem_alloc);
+
+static int qcom_smem_get_global(struct qcom_smem *smem,
+ unsigned item,
+ void **ptr,
+ size_t *size)
+{
+ struct smem_header *header;
+ struct smem_region *area;
+ struct smem_global_entry *entry;
+ u32 aux_base;
+ unsigned i;
+
+ if (WARN_ON(item >= SMEM_ITEM_COUNT))
+ return -EINVAL;
+
+ header = smem->regions[0].virt_base;
+ entry = &header->toc[item];
+ if (!entry->allocated)
+ return -ENXIO;
+
+ if (ptr != NULL) {
+ aux_base = entry->aux_base & AUX_BASE_MASK;
+
+ for (i = 0; i < smem->num_regions; i++) {
+ area = &smem->regions[i];
+
+ if (area->aux_base == aux_base || !aux_base) {
+ *ptr = area->virt_base + entry->offset;
+ break;
+ }
+ }
+ }
+ if (size != NULL)
+ *size = entry->size;
+
+ return 0;
+}
+
+static int qcom_smem_get_private(struct qcom_smem *smem,
+ unsigned host,
+ unsigned item,
+ void **ptr,
+ size_t *size)
+{
+ struct smem_partition_header *phdr;
+ struct smem_private_entry *hdr;
+ void *p;
+
+ phdr = smem->partitions[host];
+
+ p = (void *)phdr + sizeof(*phdr);
+ while (p < (void *)phdr + phdr->offset_free_uncached) {
+ hdr = p;
+
+ if (hdr->canary != SMEM_PRIVATE_CANARY) {
+ dev_err(smem->dev,
+ "Found invalid canary in host %d partition\n",
+ host);
+ return -EINVAL;
+ }
+
+ if (hdr->item == item) {
+ if (ptr != NULL)
+ *ptr = p + sizeof(*hdr) + hdr->padding_hdr;
+
+ if (size != NULL)
+ *size = hdr->size - hdr->padding_data;
+
+ return 0;
+ }
+
+ p += sizeof(*hdr) + hdr->padding_hdr + hdr->size;
+ }
+
+ return -ENOENT;
+}
+
+/**
+ * qcom_smem_get() - resolve ptr of size of a smem item
+ * @host: the remote processor, or -1
+ * @item: smem item handle
+ * @ptr: pointer to be filled out with address of the item
+ * @size: pointer to be filled out with size of the item
+ *
+ * Looks up pointer and size of a smem item.
+ */
+int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size)
+{
+ unsigned long flags;
+ int ret;
+
+ if (!__smem)
+ return -EPROBE_DEFER;
+
+ ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+ HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret)
+ return ret;
+
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host])
+ ret = qcom_smem_get_private(__smem, host, item, ptr, size);
+ else
+ ret = qcom_smem_get_global(__smem, item, ptr, size);
+
+ hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+ return ret;
+
+}
+EXPORT_SYMBOL(qcom_smem_get);
+
+/**
+ * qcom_smem_get_free_space() - retrieve amount of free space in a partition
+ * @host: the remote processor identifying a partition, or -1
+ *
+ * To be used by smem clients as a quick way to determine if any new
+ * allocations has been made.
+ */
+int qcom_smem_get_free_space(unsigned host)
+{
+ struct smem_partition_header *phdr;
+ struct smem_header *header;
+ unsigned ret;
+
+ if (!__smem)
+ return -EPROBE_DEFER;
+
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
+ phdr = __smem->partitions[host];
+ ret = phdr->offset_free_cached - phdr->offset_free_uncached;
+ } else {
+ header = __smem->regions[0].virt_base;
+ ret = header->available;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_smem_get_free_space);
+
+static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
+{
+ unsigned *versions;
+ size_t size;
+ int ret;
+
+ ret = qcom_smem_get_global(smem, SMEM_ITEM_VERSION,
+ (void **)&versions, &size);
+ if (ret < 0) {
+ dev_err(smem->dev, "Unable to read the version item\n");
+ return -ENOENT;
+ }
+
+ if (size < sizeof(unsigned) * SMEM_MASTER_SBL_VERSION_INDEX) {
+ dev_err(smem->dev, "Version item is too small\n");
+ return -EINVAL;
+ }
+
+ return versions[SMEM_MASTER_SBL_VERSION_INDEX];
+}
+
+static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
+ unsigned local_host)
+{
+ struct smem_partition_header *header;
+ struct smem_ptable_entry *entry;
+ struct smem_ptable *ptable;
+ unsigned remote_host;
+ int i;
+
+ ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
+ if (ptable->magic != SMEM_PTABLE_MAGIC)
+ return 0;
+
+ if (ptable->version != 1) {
+ dev_err(smem->dev,
+ "Unsupported partition header version %d\n",
+ ptable->version);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ptable->num_entries; i++) {
+ entry = &ptable->entry[i];
+
+ if (entry->host0 != local_host && entry->host1 != local_host)
+ continue;
+
+ if (!entry->offset)
+ continue;
+
+ if (!entry->size)
+ continue;
+
+ if (entry->host0 == local_host)
+ remote_host = entry->host1;
+ else
+ remote_host = entry->host0;
+
+ if (remote_host >= SMEM_HOST_COUNT) {
+ dev_err(smem->dev,
+ "Invalid remote host %d\n",
+ remote_host);
+ return -EINVAL;
+ }
+
+ if (smem->partitions[remote_host]) {
+ dev_err(smem->dev,
+ "Already found a partition for host %d\n",
+ remote_host);
+ return -EINVAL;
+ }
+
+ header = smem->regions[0].virt_base + entry->offset;
+
+ if (header->magic != SMEM_PART_MAGIC) {
+ dev_err(smem->dev,
+ "Partition %d has invalid magic\n", i);
+ return -EINVAL;
+ }
+
+ if (header->host0 != local_host && header->host1 != local_host) {
+ dev_err(smem->dev,
+ "Partition %d hosts are invalid\n", i);
+ return -EINVAL;
+ }
+
+ if (header->host0 != remote_host && header->host1 != remote_host) {
+ dev_err(smem->dev,
+ "Partition %d hosts are invalid\n", i);
+ return -EINVAL;
+ }
+
+ if (header->size != entry->size) {
+ dev_err(smem->dev,
+ "Partition %d has invalid size\n", i);
+ return -EINVAL;
+ }
+
+ if (header->offset_free_uncached > header->size) {
+ dev_err(smem->dev,
+ "Partition %d has invalid free pointer\n", i);
+ return -EINVAL;
+ }
+
+ smem->partitions[remote_host] = header;
+ }
+
+ return 0;
+}
+
+static int qcom_smem_count_mem_regions(struct platform_device *pdev)
+{
+ struct resource *res;
+ int num_regions = 0;
+ int i;
+
+ for (i = 0; i < pdev->num_resources; i++) {
+ res = &pdev->resource[i];
+
+ if (resource_type(res) == IORESOURCE_MEM)
+ num_regions++;
+ }
+
+ return num_regions;
+}
+
+static int qcom_smem_probe(struct platform_device *pdev)
+{
+ struct smem_header *header;
+ struct device_node *np;
+ struct qcom_smem *smem;
+ struct resource *res;
+ struct resource r;
+ size_t array_size;
+ int num_regions = 0;
+ int hwlock_id;
+ u32 version;
+ int ret;
+ int i;
+
+ num_regions = qcom_smem_count_mem_regions(pdev) + 1;
+
+ array_size = num_regions * sizeof(struct smem_region);
+ smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
+ if (!smem)
+ return -ENOMEM;
+
+ smem->dev = &pdev->dev;
+ smem->num_regions = num_regions;
+
+ np = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "No memory-region specified\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(np, 0, &r);
+ of_node_put(np);
+ if (ret)
+ return ret;
+
+ smem->regions[0].aux_base = (u32)r.start;
+ smem->regions[0].size = resource_size(&r);
+ smem->regions[0].virt_base = devm_ioremap_nocache(&pdev->dev,
+ r.start,
+ resource_size(&r));
+ if (!smem->regions[0].virt_base)
+ return -ENOMEM;
+
+ for (i = 1; i < num_regions; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i - 1);
+
+ smem->regions[i].aux_base = (u32)res->start;
+ smem->regions[i].size = resource_size(res);
+ smem->regions[i].virt_base = devm_ioremap_nocache(&pdev->dev,
+ res->start,
+ resource_size(res));
+ if (!smem->regions[i].virt_base)
+ return -ENOMEM;
+ }
+
+ header = smem->regions[0].virt_base;
+ if (header->initialized != 1 || header->reserved) {
+ dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
+ return -EINVAL;
+ }
+
+ version = qcom_smem_get_sbl_version(smem);
+ if (version >> 16 != SMEM_EXPECTED_VERSION) {
+ dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
+ return -EINVAL;
+ }
+
+ ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
+ if (ret < 0)
+ return ret;
+
+ hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
+ if (hwlock_id < 0) {
+ dev_err(&pdev->dev, "failed to retrieve hwlock\n");
+ return hwlock_id;
+ }
+
+ smem->hwlock = hwspin_lock_request_specific(hwlock_id);
+ if (!smem->hwlock)
+ return -ENXIO;
+
+ __smem = smem;
+
+ return 0;
+}
+
+static int qcom_smem_remove(struct platform_device *pdev)
+{
+ __smem = NULL;
+ hwspin_lock_free(__smem->hwlock);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smem_of_match[] = {
+ { .compatible = "qcom,smem" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
+
+static struct platform_driver qcom_smem_driver = {
+ .probe = qcom_smem_probe,
+ .remove = qcom_smem_remove,
+ .driver = {
+ .name = "qcom-smem",
+ .of_match_table = qcom_smem_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init qcom_smem_init(void)
+{
+ return platform_driver_register(&qcom_smem_driver);
+}
+arch_initcall(qcom_smem_init);
+
+static void __exit qcom_smem_exit(void)
+{
+ platform_driver_unregister(&qcom_smem_driver);
+}
+module_exit(qcom_smem_exit)
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/tegra/Makefile b/drivers/soc/tegra/Makefile
index cdaad9d53a05..ae857ff7d53d 100644
--- a/drivers/soc/tegra/Makefile
+++ b/drivers/soc/tegra/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_ARCH_TEGRA) += fuse/
+obj-y += fuse/
-obj-$(CONFIG_ARCH_TEGRA) += common.o
-obj-$(CONFIG_ARCH_TEGRA) += pmc.o
+obj-y += common.o
+obj-y += pmc.o
diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c
index a71cb74f3674..cd8f41351add 100644
--- a/drivers/soc/tegra/common.c
+++ b/drivers/soc/tegra/common.c
@@ -15,6 +15,8 @@ static const struct of_device_id tegra_machine_match[] = {
{ .compatible = "nvidia,tegra30", },
{ .compatible = "nvidia,tegra114", },
{ .compatible = "nvidia,tegra124", },
+ { .compatible = "nvidia,tegra132", },
+ { .compatible = "nvidia,tegra210", },
{ }
};
diff --git a/drivers/soc/tegra/fuse/Makefile b/drivers/soc/tegra/fuse/Makefile
index 3af357da91f3..21bc27580178 100644
--- a/drivers/soc/tegra/fuse/Makefile
+++ b/drivers/soc/tegra/fuse/Makefile
@@ -6,3 +6,5 @@ obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += speedo-tegra20.o
obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += speedo-tegra30.o
obj-$(CONFIG_ARCH_TEGRA_114_SOC) += speedo-tegra114.o
obj-$(CONFIG_ARCH_TEGRA_124_SOC) += speedo-tegra124.o
+obj-$(CONFIG_ARCH_TEGRA_132_SOC) += speedo-tegra124.o
+obj-$(CONFIG_ARCH_TEGRA_210_SOC) += speedo-tegra210.o
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index c0d660f1aaac..de2c1bfe28b5 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -15,9 +15,10 @@
*
*/
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/kobject.h>
-#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -28,8 +29,6 @@
#include "fuse.h"
-static u32 (*fuse_readl)(const unsigned int offset);
-static int fuse_size;
struct tegra_sku_info tegra_sku_info;
EXPORT_SYMBOL(tegra_sku_info);
@@ -42,11 +41,11 @@ static const char *tegra_revision_name[TEGRA_REVISION_MAX] = {
[TEGRA_REVISION_A04] = "A04",
};
-static u8 fuse_readb(const unsigned int offset)
+static u8 fuse_readb(struct tegra_fuse *fuse, unsigned int offset)
{
u32 val;
- val = fuse_readl(round_down(offset, 4));
+ val = fuse->read(fuse, round_down(offset, 4));
val >>= (offset % 4) * 8;
val &= 0xff;
@@ -54,19 +53,21 @@ static u8 fuse_readb(const unsigned int offset)
}
static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t pos, size_t size)
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t size)
{
+ struct device *dev = kobj_to_dev(kobj);
+ struct tegra_fuse *fuse = dev_get_drvdata(dev);
int i;
- if (pos < 0 || pos >= fuse_size)
+ if (pos < 0 || pos >= attr->size)
return 0;
- if (size > fuse_size - pos)
- size = fuse_size - pos;
+ if (size > attr->size - pos)
+ size = attr->size - pos;
for (i = 0; i < size; i++)
- buf[i] = fuse_readb(pos + i);
+ buf[i] = fuse_readb(fuse, pos + i);
return i;
}
@@ -76,89 +77,239 @@ static struct bin_attribute fuse_bin_attr = {
.read = fuse_read,
};
+static int tegra_fuse_create_sysfs(struct device *dev, unsigned int size,
+ const struct tegra_fuse_info *info)
+{
+ fuse_bin_attr.size = size;
+
+ return device_create_bin_file(dev, &fuse_bin_attr);
+}
+
static const struct of_device_id car_match[] __initconst = {
{ .compatible = "nvidia,tegra20-car", },
{ .compatible = "nvidia,tegra30-car", },
{ .compatible = "nvidia,tegra114-car", },
{ .compatible = "nvidia,tegra124-car", },
{ .compatible = "nvidia,tegra132-car", },
+ { .compatible = "nvidia,tegra210-car", },
{},
};
-static void tegra_enable_fuse_clk(void __iomem *base)
+static struct tegra_fuse *fuse = &(struct tegra_fuse) {
+ .base = NULL,
+ .soc = NULL,
+};
+
+static const struct of_device_id tegra_fuse_match[] = {
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+ { .compatible = "nvidia,tegra210-efuse", .data = &tegra210_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_132_SOC
+ { .compatible = "nvidia,tegra132-efuse", .data = &tegra124_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+ { .compatible = "nvidia,tegra124-efuse", .data = &tegra124_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+ { .compatible = "nvidia,tegra114-efuse", .data = &tegra114_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+ { .compatible = "nvidia,tegra30-efuse", .data = &tegra30_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ { .compatible = "nvidia,tegra20-efuse", .data = &tegra20_fuse_soc },
+#endif
+ { /* sentinel */ }
+};
+
+static int tegra_fuse_probe(struct platform_device *pdev)
{
- u32 reg;
+ void __iomem *base = fuse->base;
+ struct resource *res;
+ int err;
+
+ /* take over the memory region from the early initialization */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fuse->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fuse->base))
+ return PTR_ERR(fuse->base);
+
+ fuse->clk = devm_clk_get(&pdev->dev, "fuse");
+ if (IS_ERR(fuse->clk)) {
+ dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
+ PTR_ERR(fuse->clk));
+ return PTR_ERR(fuse->clk);
+ }
- reg = readl_relaxed(base + 0x48);
- reg |= 1 << 28;
- writel(reg, base + 0x48);
+ platform_set_drvdata(pdev, fuse);
+ fuse->dev = &pdev->dev;
- /*
- * Enable FUSE clock. This needs to be hardcoded because the clock
- * subsystem is not active during early boot.
- */
- reg = readl(base + 0x14);
- reg |= 1 << 7;
- writel(reg, base + 0x14);
+ if (fuse->soc->probe) {
+ err = fuse->soc->probe(fuse);
+ if (err < 0)
+ return err;
+ }
+
+ if (tegra_fuse_create_sysfs(&pdev->dev, fuse->soc->info->size,
+ fuse->soc->info))
+ return -ENODEV;
+
+ /* release the early I/O memory mapping */
+ iounmap(base);
+
+ return 0;
+}
+
+static struct platform_driver tegra_fuse_driver = {
+ .driver = {
+ .name = "tegra-fuse",
+ .of_match_table = tegra_fuse_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = tegra_fuse_probe,
+};
+module_platform_driver(tegra_fuse_driver);
+
+bool __init tegra_fuse_read_spare(unsigned int spare)
+{
+ unsigned int offset = fuse->soc->info->spare + spare * 4;
+
+ return fuse->read_early(fuse, offset) & 1;
+}
+
+u32 __init tegra_fuse_read_early(unsigned int offset)
+{
+ return fuse->read_early(fuse, offset);
}
int tegra_fuse_readl(unsigned long offset, u32 *value)
{
- if (!fuse_readl)
+ if (!fuse->read)
return -EPROBE_DEFER;
- *value = fuse_readl(offset);
+ *value = fuse->read(fuse, offset);
return 0;
}
EXPORT_SYMBOL(tegra_fuse_readl);
-int tegra_fuse_create_sysfs(struct device *dev, int size,
- u32 (*readl)(const unsigned int offset))
+static void tegra_enable_fuse_clk(void __iomem *base)
{
- if (fuse_size)
- return -ENODEV;
-
- fuse_bin_attr.size = size;
- fuse_bin_attr.read = fuse_read;
+ u32 reg;
- fuse_size = size;
- fuse_readl = readl;
+ reg = readl_relaxed(base + 0x48);
+ reg |= 1 << 28;
+ writel(reg, base + 0x48);
- return device_create_bin_file(dev, &fuse_bin_attr);
+ /*
+ * Enable FUSE clock. This needs to be hardcoded because the clock
+ * subsystem is not active during early boot.
+ */
+ reg = readl(base + 0x14);
+ reg |= 1 << 7;
+ writel(reg, base + 0x14);
}
static int __init tegra_init_fuse(void)
{
+ const struct of_device_id *match;
struct device_node *np;
- void __iomem *car_base;
-
- if (!soc_is_tegra())
- return 0;
+ struct resource regs;
tegra_init_apbmisc();
- np = of_find_matching_node(NULL, car_match);
- car_base = of_iomap(np, 0);
- if (car_base) {
- tegra_enable_fuse_clk(car_base);
- iounmap(car_base);
+ np = of_find_matching_node_and_match(NULL, tegra_fuse_match, &match);
+ if (!np) {
+ /*
+ * Fall back to legacy initialization for 32-bit ARM only. All
+ * 64-bit ARM device tree files for Tegra are required to have
+ * a FUSE node.
+ *
+ * This is for backwards-compatibility with old device trees
+ * that didn't contain a FUSE node.
+ */
+ if (IS_ENABLED(CONFIG_ARM) && soc_is_tegra()) {
+ u8 chip = tegra_get_chip_id();
+
+ regs.start = 0x7000f800;
+ regs.end = 0x7000fbff;
+ regs.flags = IORESOURCE_MEM;
+
+ switch (chip) {
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ case TEGRA20:
+ fuse->soc = &tegra20_fuse_soc;
+ break;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+ case TEGRA30:
+ fuse->soc = &tegra30_fuse_soc;
+ break;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+ case TEGRA114:
+ fuse->soc = &tegra114_fuse_soc;
+ break;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+ case TEGRA124:
+ fuse->soc = &tegra124_fuse_soc;
+ break;
+#endif
+
+ default:
+ pr_warn("Unsupported SoC: %02x\n", chip);
+ break;
+ }
+ } else {
+ /*
+ * At this point we're not running on Tegra, so play
+ * nice with multi-platform kernels.
+ */
+ return 0;
+ }
} else {
- pr_err("Could not enable fuse clk. ioremap tegra car failed.\n");
+ /*
+ * Extract information from the device tree if we've found a
+ * matching node.
+ */
+ if (of_address_to_resource(np, 0, &regs) < 0) {
+ pr_err("failed to get FUSE register\n");
+ return -ENXIO;
+ }
+
+ fuse->soc = match->data;
+ }
+
+ np = of_find_matching_node(NULL, car_match);
+ if (np) {
+ void __iomem *base = of_iomap(np, 0);
+ if (base) {
+ tegra_enable_fuse_clk(base);
+ iounmap(base);
+ } else {
+ pr_err("failed to map clock registers\n");
+ return -ENXIO;
+ }
+ }
+
+ fuse->base = ioremap_nocache(regs.start, resource_size(&regs));
+ if (!fuse->base) {
+ pr_err("failed to map FUSE registers\n");
return -ENXIO;
}
- if (tegra_get_chip_id() == TEGRA20)
- tegra20_init_fuse_early();
- else
- tegra30_init_fuse_early();
+ fuse->soc->init(fuse);
- pr_info("Tegra Revision: %s SKU: %d CPU Process: %d Core Process: %d\n",
+ pr_info("Tegra Revision: %s SKU: %d CPU Process: %d SoC Process: %d\n",
tegra_revision_name[tegra_sku_info.revision],
tegra_sku_info.sku_id, tegra_sku_info.cpu_process_id,
- tegra_sku_info.core_process_id);
- pr_debug("Tegra CPU Speedo ID %d, Soc Speedo ID %d\n",
- tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id);
+ tegra_sku_info.soc_process_id);
+ pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n",
+ tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id);
return 0;
}
diff --git a/drivers/soc/tegra/fuse/fuse-tegra20.c b/drivers/soc/tegra/fuse/fuse-tegra20.c
index 6acc2c44ee2c..294413a969a0 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra20.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra20.c
@@ -34,159 +34,107 @@
#include "fuse.h"
#define FUSE_BEGIN 0x100
-#define FUSE_SIZE 0x1f8
#define FUSE_UID_LOW 0x08
#define FUSE_UID_HIGH 0x0c
-static phys_addr_t fuse_phys;
-static struct clk *fuse_clk;
-static void __iomem __initdata *fuse_base;
-
-static DEFINE_MUTEX(apb_dma_lock);
-static DECLARE_COMPLETION(apb_dma_wait);
-static struct dma_chan *apb_dma_chan;
-static struct dma_slave_config dma_sconfig;
-static u32 *apb_buffer;
-static dma_addr_t apb_buffer_phys;
+static u32 tegra20_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
+{
+ return readl_relaxed(fuse->base + FUSE_BEGIN + offset);
+}
static void apb_dma_complete(void *args)
{
- complete(&apb_dma_wait);
+ struct tegra_fuse *fuse = args;
+
+ complete(&fuse->apbdma.wait);
}
-static u32 tegra20_fuse_readl(const unsigned int offset)
+static u32 tegra20_fuse_read(struct tegra_fuse *fuse, unsigned int offset)
{
- int ret;
- u32 val = 0;
+ unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
struct dma_async_tx_descriptor *dma_desc;
unsigned long time_left;
+ u32 value = 0;
+ int err;
+
+ mutex_lock(&fuse->apbdma.lock);
- mutex_lock(&apb_dma_lock);
+ fuse->apbdma.config.src_addr = fuse->apbdma.phys + FUSE_BEGIN + offset;
- dma_sconfig.src_addr = fuse_phys + FUSE_BEGIN + offset;
- ret = dmaengine_slave_config(apb_dma_chan, &dma_sconfig);
- if (ret)
+ err = dmaengine_slave_config(fuse->apbdma.chan, &fuse->apbdma.config);
+ if (err)
goto out;
- dma_desc = dmaengine_prep_slave_single(apb_dma_chan, apb_buffer_phys,
- sizeof(u32), DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ dma_desc = dmaengine_prep_slave_single(fuse->apbdma.chan,
+ fuse->apbdma.phys,
+ sizeof(u32), DMA_DEV_TO_MEM,
+ flags);
if (!dma_desc)
goto out;
dma_desc->callback = apb_dma_complete;
- dma_desc->callback_param = NULL;
+ dma_desc->callback_param = fuse;
- reinit_completion(&apb_dma_wait);
+ reinit_completion(&fuse->apbdma.wait);
- clk_prepare_enable(fuse_clk);
+ clk_prepare_enable(fuse->clk);
dmaengine_submit(dma_desc);
- dma_async_issue_pending(apb_dma_chan);
- time_left = wait_for_completion_timeout(&apb_dma_wait,
+ dma_async_issue_pending(fuse->apbdma.chan);
+ time_left = wait_for_completion_timeout(&fuse->apbdma.wait,
msecs_to_jiffies(50));
if (WARN(time_left == 0, "apb read dma timed out"))
- dmaengine_terminate_all(apb_dma_chan);
+ dmaengine_terminate_all(fuse->apbdma.chan);
else
- val = *apb_buffer;
+ value = *fuse->apbdma.virt;
- clk_disable_unprepare(fuse_clk);
-out:
- mutex_unlock(&apb_dma_lock);
+ clk_disable_unprepare(fuse->clk);
- return val;
+out:
+ mutex_unlock(&fuse->apbdma.lock);
+ return value;
}
-static const struct of_device_id tegra20_fuse_of_match[] = {
- { .compatible = "nvidia,tegra20-efuse" },
- {},
-};
-
-static int apb_dma_init(void)
+static int tegra20_fuse_probe(struct tegra_fuse *fuse)
{
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- apb_dma_chan = dma_request_channel(mask, NULL, NULL);
- if (!apb_dma_chan)
+
+ fuse->apbdma.chan = dma_request_channel(mask, NULL, NULL);
+ if (!fuse->apbdma.chan)
return -EPROBE_DEFER;
- apb_buffer = dma_alloc_coherent(NULL, sizeof(u32), &apb_buffer_phys,
- GFP_KERNEL);
- if (!apb_buffer) {
- dma_release_channel(apb_dma_chan);
+ fuse->apbdma.virt = dma_alloc_coherent(fuse->dev, sizeof(u32),
+ &fuse->apbdma.phys,
+ GFP_KERNEL);
+ if (!fuse->apbdma.virt) {
+ dma_release_channel(fuse->apbdma.chan);
return -ENOMEM;
}
- dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dma_sconfig.src_maxburst = 1;
- dma_sconfig.dst_maxburst = 1;
+ fuse->apbdma.config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ fuse->apbdma.config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ fuse->apbdma.config.src_maxburst = 1;
+ fuse->apbdma.config.dst_maxburst = 1;
- return 0;
-}
-
-static int tegra20_fuse_probe(struct platform_device *pdev)
-{
- struct resource *res;
- int err;
-
- fuse_clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(fuse_clk)) {
- dev_err(&pdev->dev, "missing clock");
- return PTR_ERR(fuse_clk);
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
- fuse_phys = res->start;
-
- err = apb_dma_init();
- if (err)
- return err;
-
- if (tegra_fuse_create_sysfs(&pdev->dev, FUSE_SIZE, tegra20_fuse_readl))
- return -ENODEV;
-
- dev_dbg(&pdev->dev, "loaded\n");
+ init_completion(&fuse->apbdma.wait);
+ mutex_init(&fuse->apbdma.lock);
+ fuse->read = tegra20_fuse_read;
return 0;
}
-static struct platform_driver tegra20_fuse_driver = {
- .probe = tegra20_fuse_probe,
- .driver = {
- .name = "tegra20_fuse",
- .of_match_table = tegra20_fuse_of_match,
- }
+static const struct tegra_fuse_info tegra20_fuse_info = {
+ .read = tegra20_fuse_read,
+ .size = 0x1f8,
+ .spare = 0x100,
};
-static int __init tegra20_fuse_init(void)
-{
- return platform_driver_register(&tegra20_fuse_driver);
-}
-postcore_initcall(tegra20_fuse_init);
-
/* Early boot code. This code is called before the devices are created */
-u32 __init tegra20_fuse_early(const unsigned int offset)
-{
- return readl_relaxed(fuse_base + FUSE_BEGIN + offset);
-}
-
-bool __init tegra20_spare_fuse_early(int spare_bit)
-{
- u32 offset = spare_bit * 4;
- bool value;
-
- value = tegra20_fuse_early(offset + 0x100);
-
- return value;
-}
-
static void __init tegra20_fuse_add_randomness(void)
{
u32 randomness[7];
@@ -195,22 +143,27 @@ static void __init tegra20_fuse_add_randomness(void)
randomness[1] = tegra_read_straps();
randomness[2] = tegra_read_chipid();
randomness[3] = tegra_sku_info.cpu_process_id << 16;
- randomness[3] |= tegra_sku_info.core_process_id;
+ randomness[3] |= tegra_sku_info.soc_process_id;
randomness[4] = tegra_sku_info.cpu_speedo_id << 16;
randomness[4] |= tegra_sku_info.soc_speedo_id;
- randomness[5] = tegra20_fuse_early(FUSE_UID_LOW);
- randomness[6] = tegra20_fuse_early(FUSE_UID_HIGH);
+ randomness[5] = tegra_fuse_read_early(FUSE_UID_LOW);
+ randomness[6] = tegra_fuse_read_early(FUSE_UID_HIGH);
add_device_randomness(randomness, sizeof(randomness));
}
-void __init tegra20_init_fuse_early(void)
+static void __init tegra20_fuse_init(struct tegra_fuse *fuse)
{
- fuse_base = ioremap(TEGRA_FUSE_BASE, TEGRA_FUSE_SIZE);
+ fuse->read_early = tegra20_fuse_read_early;
tegra_init_revision();
- tegra20_init_speedo_data(&tegra_sku_info);
+ fuse->soc->speedo_init(&tegra_sku_info);
tegra20_fuse_add_randomness();
-
- iounmap(fuse_base);
}
+
+const struct tegra_fuse_soc tegra20_fuse_soc = {
+ .init = tegra20_fuse_init,
+ .speedo_init = tegra20_init_speedo_data,
+ .probe = tegra20_fuse_probe,
+ .info = &tegra20_fuse_info,
+};
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index 4d2f71bf65c5..882607bcaa6c 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -42,113 +42,33 @@
#define FUSE_HAS_REVISION_INFO BIT(0)
-enum speedo_idx {
- SPEEDO_TEGRA30 = 0,
- SPEEDO_TEGRA114,
- SPEEDO_TEGRA124,
-};
-
-struct tegra_fuse_info {
- int size;
- int spare_bit;
- enum speedo_idx speedo_idx;
-};
-
-static void __iomem *fuse_base;
-static struct clk *fuse_clk;
-static const struct tegra_fuse_info *fuse_info;
-
-u32 tegra30_fuse_readl(const unsigned int offset)
+#if defined(CONFIG_ARCH_TEGRA_3x_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_114_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_132_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_210_SOC)
+static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
{
- u32 val;
-
- /*
- * early in the boot, the fuse clock will be enabled by
- * tegra_init_fuse()
- */
-
- if (fuse_clk)
- clk_prepare_enable(fuse_clk);
-
- val = readl_relaxed(fuse_base + FUSE_BEGIN + offset);
-
- if (fuse_clk)
- clk_disable_unprepare(fuse_clk);
-
- return val;
+ return readl_relaxed(fuse->base + FUSE_BEGIN + offset);
}
-static const struct tegra_fuse_info tegra30_info = {
- .size = 0x2a4,
- .spare_bit = 0x144,
- .speedo_idx = SPEEDO_TEGRA30,
-};
-
-static const struct tegra_fuse_info tegra114_info = {
- .size = 0x2a0,
- .speedo_idx = SPEEDO_TEGRA114,
-};
-
-static const struct tegra_fuse_info tegra124_info = {
- .size = 0x300,
- .speedo_idx = SPEEDO_TEGRA124,
-};
-
-static const struct of_device_id tegra30_fuse_of_match[] = {
- { .compatible = "nvidia,tegra30-efuse", .data = &tegra30_info },
- { .compatible = "nvidia,tegra114-efuse", .data = &tegra114_info },
- { .compatible = "nvidia,tegra124-efuse", .data = &tegra124_info },
- {},
-};
-
-static int tegra30_fuse_probe(struct platform_device *pdev)
+static u32 tegra30_fuse_read(struct tegra_fuse *fuse, unsigned int offset)
{
- const struct of_device_id *of_dev_id;
-
- of_dev_id = of_match_device(tegra30_fuse_of_match, &pdev->dev);
- if (!of_dev_id)
- return -ENODEV;
+ u32 value;
+ int err;
- fuse_clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(fuse_clk)) {
- dev_err(&pdev->dev, "missing clock");
- return PTR_ERR(fuse_clk);
+ err = clk_prepare_enable(fuse->clk);
+ if (err < 0) {
+ dev_err(fuse->dev, "failed to enable FUSE clock: %d\n", err);
+ return 0;
}
- platform_set_drvdata(pdev, NULL);
-
- if (tegra_fuse_create_sysfs(&pdev->dev, fuse_info->size,
- tegra30_fuse_readl))
- return -ENODEV;
+ value = readl_relaxed(fuse->base + FUSE_BEGIN + offset);
- dev_dbg(&pdev->dev, "loaded\n");
+ clk_disable_unprepare(fuse->clk);
- return 0;
-}
-
-static struct platform_driver tegra30_fuse_driver = {
- .probe = tegra30_fuse_probe,
- .driver = {
- .name = "tegra_fuse",
- .of_match_table = tegra30_fuse_of_match,
- }
-};
-
-static int __init tegra30_fuse_init(void)
-{
- return platform_driver_register(&tegra30_fuse_driver);
+ return value;
}
-postcore_initcall(tegra30_fuse_init);
-
-/* Early boot code. This code is called before the devices are created */
-
-typedef void (*speedo_f)(struct tegra_sku_info *sku_info);
-
-static speedo_f __initdata speedo_tbl[] = {
- [SPEEDO_TEGRA30] = tegra30_init_speedo_data,
- [SPEEDO_TEGRA114] = tegra114_init_speedo_data,
- [SPEEDO_TEGRA124] = tegra124_init_speedo_data,
-};
static void __init tegra30_fuse_add_randomness(void)
{
@@ -158,67 +78,83 @@ static void __init tegra30_fuse_add_randomness(void)
randomness[1] = tegra_read_straps();
randomness[2] = tegra_read_chipid();
randomness[3] = tegra_sku_info.cpu_process_id << 16;
- randomness[3] |= tegra_sku_info.core_process_id;
+ randomness[3] |= tegra_sku_info.soc_process_id;
randomness[4] = tegra_sku_info.cpu_speedo_id << 16;
randomness[4] |= tegra_sku_info.soc_speedo_id;
- randomness[5] = tegra30_fuse_readl(FUSE_VENDOR_CODE);
- randomness[6] = tegra30_fuse_readl(FUSE_FAB_CODE);
- randomness[7] = tegra30_fuse_readl(FUSE_LOT_CODE_0);
- randomness[8] = tegra30_fuse_readl(FUSE_LOT_CODE_1);
- randomness[9] = tegra30_fuse_readl(FUSE_WAFER_ID);
- randomness[10] = tegra30_fuse_readl(FUSE_X_COORDINATE);
- randomness[11] = tegra30_fuse_readl(FUSE_Y_COORDINATE);
+ randomness[5] = tegra_fuse_read_early(FUSE_VENDOR_CODE);
+ randomness[6] = tegra_fuse_read_early(FUSE_FAB_CODE);
+ randomness[7] = tegra_fuse_read_early(FUSE_LOT_CODE_0);
+ randomness[8] = tegra_fuse_read_early(FUSE_LOT_CODE_1);
+ randomness[9] = tegra_fuse_read_early(FUSE_WAFER_ID);
+ randomness[10] = tegra_fuse_read_early(FUSE_X_COORDINATE);
+ randomness[11] = tegra_fuse_read_early(FUSE_Y_COORDINATE);
add_device_randomness(randomness, sizeof(randomness));
}
-static void __init legacy_fuse_init(void)
+static void __init tegra30_fuse_init(struct tegra_fuse *fuse)
{
- switch (tegra_get_chip_id()) {
- case TEGRA30:
- fuse_info = &tegra30_info;
- break;
- case TEGRA114:
- fuse_info = &tegra114_info;
- break;
- case TEGRA124:
- case TEGRA132:
- fuse_info = &tegra124_info;
- break;
- default:
- return;
- }
+ fuse->read_early = tegra30_fuse_read_early;
+ fuse->read = tegra30_fuse_read;
- fuse_base = ioremap(TEGRA_FUSE_BASE, TEGRA_FUSE_SIZE);
+ tegra_init_revision();
+ fuse->soc->speedo_init(&tegra_sku_info);
+ tegra30_fuse_add_randomness();
}
+#endif
-bool __init tegra30_spare_fuse(int spare_bit)
-{
- u32 offset = fuse_info->spare_bit + spare_bit * 4;
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+static const struct tegra_fuse_info tegra30_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x2a4,
+ .spare = 0x144,
+};
- return tegra30_fuse_readl(offset) & 1;
-}
+const struct tegra_fuse_soc tegra30_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .speedo_init = tegra30_init_speedo_data,
+ .info = &tegra30_fuse_info,
+};
+#endif
-void __init tegra30_init_fuse_early(void)
-{
- struct device_node *np;
- const struct of_device_id *of_match;
-
- np = of_find_matching_node_and_match(NULL, tegra30_fuse_of_match,
- &of_match);
- if (np) {
- fuse_base = of_iomap(np, 0);
- fuse_info = (struct tegra_fuse_info *)of_match->data;
- } else
- legacy_fuse_init();
-
- if (!fuse_base) {
- pr_warn("fuse DT node missing and unknown chip id: 0x%02x\n",
- tegra_get_chip_id());
- return;
- }
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+static const struct tegra_fuse_info tegra114_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x2a0,
+ .spare = 0x180,
+};
- tegra_init_revision();
- speedo_tbl[fuse_info->speedo_idx](&tegra_sku_info);
- tegra30_fuse_add_randomness();
-}
+const struct tegra_fuse_soc tegra114_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .speedo_init = tegra114_init_speedo_data,
+ .info = &tegra114_fuse_info,
+};
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
+static const struct tegra_fuse_info tegra124_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x300,
+ .spare = 0x200,
+};
+
+const struct tegra_fuse_soc tegra124_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .speedo_init = tegra124_init_speedo_data,
+ .info = &tegra124_fuse_info,
+};
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+static const struct tegra_fuse_info tegra210_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x300,
+ .spare = 0x280,
+};
+
+const struct tegra_fuse_soc tegra210_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .speedo_init = tegra210_init_speedo_data,
+ .info = &tegra210_fuse_info,
+};
+#endif
diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
index 3a398bf3572c..10c2076d5089 100644
--- a/drivers/soc/tegra/fuse/fuse.h
+++ b/drivers/soc/tegra/fuse/fuse.h
@@ -19,53 +19,90 @@
#ifndef __DRIVERS_MISC_TEGRA_FUSE_H
#define __DRIVERS_MISC_TEGRA_FUSE_H
-#define TEGRA_FUSE_BASE 0x7000f800
-#define TEGRA_FUSE_SIZE 0x400
+#include <linux/dmaengine.h>
+#include <linux/types.h>
-int tegra_fuse_create_sysfs(struct device *dev, int size,
- u32 (*readl)(const unsigned int offset));
+struct tegra_fuse;
+
+struct tegra_fuse_info {
+ u32 (*read)(struct tegra_fuse *fuse, unsigned int offset);
+ unsigned int size;
+ unsigned int spare;
+};
+
+struct tegra_fuse_soc {
+ void (*init)(struct tegra_fuse *fuse);
+ void (*speedo_init)(struct tegra_sku_info *info);
+ int (*probe)(struct tegra_fuse *fuse);
+
+ const struct tegra_fuse_info *info;
+};
+
+struct tegra_fuse {
+ struct device *dev;
+ void __iomem *base;
+ phys_addr_t phys;
+ struct clk *clk;
+
+ u32 (*read_early)(struct tegra_fuse *fuse, unsigned int offset);
+ u32 (*read)(struct tegra_fuse *fuse, unsigned int offset);
+ const struct tegra_fuse_soc *soc;
+
+ /* APBDMA on Tegra20 */
+ struct {
+ struct mutex lock;
+ struct completion wait;
+ struct dma_chan *chan;
+ struct dma_slave_config config;
+ dma_addr_t phys;
+ u32 *virt;
+ } apbdma;
+};
-bool tegra30_spare_fuse(int bit);
-u32 tegra30_fuse_readl(const unsigned int offset);
-void tegra30_init_fuse_early(void);
void tegra_init_revision(void);
void tegra_init_apbmisc(void);
+bool __init tegra_fuse_read_spare(unsigned int spare);
+u32 __init tegra_fuse_read_early(unsigned int offset);
+
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
void tegra20_init_speedo_data(struct tegra_sku_info *sku_info);
-bool tegra20_spare_fuse_early(int spare_bit);
-void tegra20_init_fuse_early(void);
-u32 tegra20_fuse_early(const unsigned int offset);
-#else
-static inline void tegra20_init_speedo_data(struct tegra_sku_info *sku_info) {}
-static inline bool tegra20_spare_fuse_early(int spare_bit)
-{
- return false;
-}
-static inline void tegra20_init_fuse_early(void) {}
-static inline u32 tegra20_fuse_early(const unsigned int offset)
-{
- return 0;
-}
#endif
-
#ifdef CONFIG_ARCH_TEGRA_3x_SOC
void tegra30_init_speedo_data(struct tegra_sku_info *sku_info);
-#else
-static inline void tegra30_init_speedo_data(struct tegra_sku_info *sku_info) {}
#endif
#ifdef CONFIG_ARCH_TEGRA_114_SOC
void tegra114_init_speedo_data(struct tegra_sku_info *sku_info);
-#else
-static inline void tegra114_init_speedo_data(struct tegra_sku_info *sku_info) {}
#endif
-#ifdef CONFIG_ARCH_TEGRA_124_SOC
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
void tegra124_init_speedo_data(struct tegra_sku_info *sku_info);
-#else
-static inline void tegra124_init_speedo_data(struct tegra_sku_info *sku_info) {}
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+void tegra210_init_speedo_data(struct tegra_sku_info *sku_info);
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+extern const struct tegra_fuse_soc tegra20_fuse_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+extern const struct tegra_fuse_soc tegra30_fuse_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+extern const struct tegra_fuse_soc tegra114_fuse_soc;
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
+extern const struct tegra_fuse_soc tegra124_fuse_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+extern const struct tegra_fuse_soc tegra210_fuse_soc;
#endif
#endif
diff --git a/drivers/soc/tegra/fuse/speedo-tegra114.c b/drivers/soc/tegra/fuse/speedo-tegra114.c
index 2a6ca036f09f..1ba41ebbb23d 100644
--- a/drivers/soc/tegra/fuse/speedo-tegra114.c
+++ b/drivers/soc/tegra/fuse/speedo-tegra114.c
@@ -22,7 +22,7 @@
#include "fuse.h"
-#define CORE_PROCESS_CORNERS 2
+#define SOC_PROCESS_CORNERS 2
#define CPU_PROCESS_CORNERS 2
enum {
@@ -31,7 +31,7 @@ enum {
THRESHOLD_INDEX_COUNT,
};
-static const u32 __initconst core_process_speedos[][CORE_PROCESS_CORNERS] = {
+static const u32 __initconst soc_process_speedos[][SOC_PROCESS_CORNERS] = {
{1123, UINT_MAX},
{0, UINT_MAX},
};
@@ -74,8 +74,8 @@ static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info,
}
if (rev == TEGRA_REVISION_A01) {
- tmp = tegra30_fuse_readl(0x270) << 1;
- tmp |= tegra30_fuse_readl(0x26c);
+ tmp = tegra_fuse_read_early(0x270) << 1;
+ tmp |= tegra_fuse_read_early(0x26c);
if (!tmp)
sku_info->cpu_speedo_id = 0;
}
@@ -84,27 +84,27 @@ static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info,
void __init tegra114_init_speedo_data(struct tegra_sku_info *sku_info)
{
u32 cpu_speedo_val;
- u32 core_speedo_val;
+ u32 soc_speedo_val;
int threshold;
int i;
BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
THRESHOLD_INDEX_COUNT);
- BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) !=
+ BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
THRESHOLD_INDEX_COUNT);
rev_sku_to_speedo_ids(sku_info, &threshold);
- cpu_speedo_val = tegra30_fuse_readl(0x12c) + 1024;
- core_speedo_val = tegra30_fuse_readl(0x134);
+ cpu_speedo_val = tegra_fuse_read_early(0x12c) + 1024;
+ soc_speedo_val = tegra_fuse_read_early(0x134);
for (i = 0; i < CPU_PROCESS_CORNERS; i++)
if (cpu_speedo_val < cpu_process_speedos[threshold][i])
break;
sku_info->cpu_process_id = i;
- for (i = 0; i < CORE_PROCESS_CORNERS; i++)
- if (core_speedo_val < core_process_speedos[threshold][i])
+ for (i = 0; i < SOC_PROCESS_CORNERS; i++)
+ if (soc_speedo_val < soc_process_speedos[threshold][i])
break;
- sku_info->core_process_id = i;
+ sku_info->soc_process_id = i;
}
diff --git a/drivers/soc/tegra/fuse/speedo-tegra124.c b/drivers/soc/tegra/fuse/speedo-tegra124.c
index 46362387d974..a63a134101ab 100644
--- a/drivers/soc/tegra/fuse/speedo-tegra124.c
+++ b/drivers/soc/tegra/fuse/speedo-tegra124.c
@@ -24,7 +24,7 @@
#define CPU_PROCESS_CORNERS 2
#define GPU_PROCESS_CORNERS 2
-#define CORE_PROCESS_CORNERS 2
+#define SOC_PROCESS_CORNERS 2
#define FUSE_CPU_SPEEDO_0 0x14
#define FUSE_CPU_SPEEDO_1 0x2c
@@ -53,7 +53,7 @@ static const u32 __initconst gpu_process_speedos[][GPU_PROCESS_CORNERS] = {
{0, UINT_MAX},
};
-static const u32 __initconst core_process_speedos[][CORE_PROCESS_CORNERS] = {
+static const u32 __initconst soc_process_speedos[][SOC_PROCESS_CORNERS] = {
{2101, UINT_MAX},
{0, UINT_MAX},
};
@@ -119,19 +119,19 @@ void __init tegra124_init_speedo_data(struct tegra_sku_info *sku_info)
THRESHOLD_INDEX_COUNT);
BUILD_BUG_ON(ARRAY_SIZE(gpu_process_speedos) !=
THRESHOLD_INDEX_COUNT);
- BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) !=
+ BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
THRESHOLD_INDEX_COUNT);
- cpu_speedo_0_value = tegra30_fuse_readl(FUSE_CPU_SPEEDO_0);
+ cpu_speedo_0_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_0);
/* GPU Speedo is stored in CPU_SPEEDO_2 */
- sku_info->gpu_speedo_value = tegra30_fuse_readl(FUSE_CPU_SPEEDO_2);
+ sku_info->gpu_speedo_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
- soc_speedo_0_value = tegra30_fuse_readl(FUSE_SOC_SPEEDO_0);
+ soc_speedo_0_value = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0);
- cpu_iddq_value = tegra30_fuse_readl(FUSE_CPU_IDDQ);
- soc_iddq_value = tegra30_fuse_readl(FUSE_SOC_IDDQ);
- gpu_iddq_value = tegra30_fuse_readl(FUSE_GPU_IDDQ);
+ cpu_iddq_value = tegra_fuse_read_early(FUSE_CPU_IDDQ);
+ soc_iddq_value = tegra_fuse_read_early(FUSE_SOC_IDDQ);
+ gpu_iddq_value = tegra_fuse_read_early(FUSE_GPU_IDDQ);
sku_info->cpu_speedo_value = cpu_speedo_0_value;
@@ -143,7 +143,7 @@ void __init tegra124_init_speedo_data(struct tegra_sku_info *sku_info)
rev_sku_to_speedo_ids(sku_info, &threshold);
- sku_info->cpu_iddq_value = tegra30_fuse_readl(FUSE_CPU_IDDQ);
+ sku_info->cpu_iddq_value = tegra_fuse_read_early(FUSE_CPU_IDDQ);
for (i = 0; i < GPU_PROCESS_CORNERS; i++)
if (sku_info->gpu_speedo_value <
@@ -157,11 +157,11 @@ void __init tegra124_init_speedo_data(struct tegra_sku_info *sku_info)
break;
sku_info->cpu_process_id = i;
- for (i = 0; i < CORE_PROCESS_CORNERS; i++)
+ for (i = 0; i < SOC_PROCESS_CORNERS; i++)
if (soc_speedo_0_value <
- core_process_speedos[threshold][i])
+ soc_process_speedos[threshold][i])
break;
- sku_info->core_process_id = i;
+ sku_info->soc_process_id = i;
pr_debug("Tegra GPU Speedo ID=%d, Speedo Value=%d\n",
sku_info->gpu_speedo_id, sku_info->gpu_speedo_value);
diff --git a/drivers/soc/tegra/fuse/speedo-tegra20.c b/drivers/soc/tegra/fuse/speedo-tegra20.c
index eff1b63f330d..5f7818bf6072 100644
--- a/drivers/soc/tegra/fuse/speedo-tegra20.c
+++ b/drivers/soc/tegra/fuse/speedo-tegra20.c
@@ -28,11 +28,11 @@
#define CPU_SPEEDO_REDUND_MSBIT 39
#define CPU_SPEEDO_REDUND_OFFS (CPU_SPEEDO_REDUND_MSBIT - CPU_SPEEDO_MSBIT)
-#define CORE_SPEEDO_LSBIT 40
-#define CORE_SPEEDO_MSBIT 47
-#define CORE_SPEEDO_REDUND_LSBIT 48
-#define CORE_SPEEDO_REDUND_MSBIT 55
-#define CORE_SPEEDO_REDUND_OFFS (CORE_SPEEDO_REDUND_MSBIT - CORE_SPEEDO_MSBIT)
+#define SOC_SPEEDO_LSBIT 40
+#define SOC_SPEEDO_MSBIT 47
+#define SOC_SPEEDO_REDUND_LSBIT 48
+#define SOC_SPEEDO_REDUND_MSBIT 55
+#define SOC_SPEEDO_REDUND_OFFS (SOC_SPEEDO_REDUND_MSBIT - SOC_SPEEDO_MSBIT)
#define SPEEDO_MULT 4
@@ -56,7 +56,7 @@ static const u32 __initconst cpu_process_speedos[][PROCESS_CORNERS_NUM] = {
{316, 331, 383, UINT_MAX},
};
-static const u32 __initconst core_process_speedos[][PROCESS_CORNERS_NUM] = {
+static const u32 __initconst soc_process_speedos[][PROCESS_CORNERS_NUM] = {
{165, 195, 224, UINT_MAX},
{165, 195, 224, UINT_MAX},
{165, 195, 224, UINT_MAX},
@@ -69,7 +69,7 @@ void __init tegra20_init_speedo_data(struct tegra_sku_info *sku_info)
int i;
BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) != SPEEDO_ID_COUNT);
- BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) != SPEEDO_ID_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) != SPEEDO_ID_COUNT);
if (SPEEDO_ID_SELECT_0(sku_info->revision))
sku_info->soc_speedo_id = SPEEDO_ID_0;
@@ -80,8 +80,8 @@ void __init tegra20_init_speedo_data(struct tegra_sku_info *sku_info)
val = 0;
for (i = CPU_SPEEDO_MSBIT; i >= CPU_SPEEDO_LSBIT; i--) {
- reg = tegra20_spare_fuse_early(i) |
- tegra20_spare_fuse_early(i + CPU_SPEEDO_REDUND_OFFS);
+ reg = tegra_fuse_read_spare(i) |
+ tegra_fuse_read_spare(i + CPU_SPEEDO_REDUND_OFFS);
val = (val << 1) | (reg & 0x1);
}
val = val * SPEEDO_MULT;
@@ -94,17 +94,17 @@ void __init tegra20_init_speedo_data(struct tegra_sku_info *sku_info)
sku_info->cpu_process_id = i;
val = 0;
- for (i = CORE_SPEEDO_MSBIT; i >= CORE_SPEEDO_LSBIT; i--) {
- reg = tegra20_spare_fuse_early(i) |
- tegra20_spare_fuse_early(i + CORE_SPEEDO_REDUND_OFFS);
+ for (i = SOC_SPEEDO_MSBIT; i >= SOC_SPEEDO_LSBIT; i--) {
+ reg = tegra_fuse_read_spare(i) |
+ tegra_fuse_read_spare(i + SOC_SPEEDO_REDUND_OFFS);
val = (val << 1) | (reg & 0x1);
}
val = val * SPEEDO_MULT;
pr_debug("Core speedo value %u\n", val);
for (i = 0; i < (PROCESS_CORNERS_NUM - 1); i++) {
- if (val <= core_process_speedos[sku_info->soc_speedo_id][i])
+ if (val <= soc_process_speedos[sku_info->soc_speedo_id][i])
break;
}
- sku_info->core_process_id = i;
+ sku_info->soc_process_id = i;
}
diff --git a/drivers/soc/tegra/fuse/speedo-tegra210.c b/drivers/soc/tegra/fuse/speedo-tegra210.c
new file mode 100644
index 000000000000..5373f4c16b54
--- /dev/null
+++ b/drivers/soc/tegra/fuse/speedo-tegra210.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2013-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/bug.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define CPU_PROCESS_CORNERS 2
+#define GPU_PROCESS_CORNERS 2
+#define SOC_PROCESS_CORNERS 3
+
+#define FUSE_CPU_SPEEDO_0 0x014
+#define FUSE_CPU_SPEEDO_1 0x02c
+#define FUSE_CPU_SPEEDO_2 0x030
+#define FUSE_SOC_SPEEDO_0 0x034
+#define FUSE_SOC_SPEEDO_1 0x038
+#define FUSE_SOC_SPEEDO_2 0x03c
+#define FUSE_CPU_IDDQ 0x018
+#define FUSE_SOC_IDDQ 0x040
+#define FUSE_GPU_IDDQ 0x128
+#define FUSE_FT_REV 0x028
+
+enum {
+ THRESHOLD_INDEX_0,
+ THRESHOLD_INDEX_1,
+ THRESHOLD_INDEX_COUNT,
+};
+
+static const u32 __initconst cpu_process_speedos[][CPU_PROCESS_CORNERS] = {
+ { 2119, UINT_MAX },
+ { 2119, UINT_MAX },
+};
+
+static const u32 __initconst gpu_process_speedos[][GPU_PROCESS_CORNERS] = {
+ { UINT_MAX, UINT_MAX },
+ { UINT_MAX, UINT_MAX },
+};
+
+static const u32 __initconst soc_process_speedos[][SOC_PROCESS_CORNERS] = {
+ { 1950, 2100, UINT_MAX },
+ { 1950, 2100, UINT_MAX },
+};
+
+static u8 __init get_speedo_revision(void)
+{
+ return tegra_fuse_read_spare(4) << 2 |
+ tegra_fuse_read_spare(3) << 1 |
+ tegra_fuse_read_spare(2) << 0;
+}
+
+static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info,
+ u8 speedo_rev, int *threshold)
+{
+ int sku = sku_info->sku_id;
+
+ /* Assign to default */
+ sku_info->cpu_speedo_id = 0;
+ sku_info->soc_speedo_id = 0;
+ sku_info->gpu_speedo_id = 0;
+ *threshold = THRESHOLD_INDEX_0;
+
+ switch (sku) {
+ case 0x00: /* Engineering SKU */
+ case 0x01: /* Engineering SKU */
+ case 0x07:
+ case 0x17:
+ case 0x27:
+ if (speedo_rev >= 2)
+ sku_info->gpu_speedo_id = 1;
+ break;
+
+ case 0x13:
+ if (speedo_rev >= 2)
+ sku_info->gpu_speedo_id = 1;
+
+ sku_info->cpu_speedo_id = 1;
+ break;
+
+ default:
+ pr_err("Tegra210: unknown SKU %#04x\n", sku);
+ /* Using the default for the error case */
+ break;
+ }
+}
+
+static int get_process_id(int value, const u32 *speedos, unsigned int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ if (value < speedos[num])
+ return i;
+
+ return -EINVAL;
+}
+
+void __init tegra210_init_speedo_data(struct tegra_sku_info *sku_info)
+{
+ int cpu_speedo[3], soc_speedo[3], cpu_iddq, gpu_iddq, soc_iddq;
+ unsigned int index;
+ u8 speedo_revision;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(gpu_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+
+ /* Read speedo/IDDQ fuses */
+ cpu_speedo[0] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_0);
+ cpu_speedo[1] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_1);
+ cpu_speedo[2] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
+
+ soc_speedo[0] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0);
+ soc_speedo[1] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_1);
+ soc_speedo[2] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
+
+ cpu_iddq = tegra_fuse_read_early(FUSE_CPU_IDDQ) * 4;
+ soc_iddq = tegra_fuse_read_early(FUSE_SOC_IDDQ) * 4;
+ gpu_iddq = tegra_fuse_read_early(FUSE_GPU_IDDQ) * 5;
+
+ /*
+ * Determine CPU, GPU and SoC speedo values depending on speedo fusing
+ * revision. Note that GPU speedo value is fused in CPU_SPEEDO_2.
+ */
+ speedo_revision = get_speedo_revision();
+ pr_info("Speedo Revision %u\n", speedo_revision);
+
+ if (speedo_revision >= 3) {
+ sku_info->cpu_speedo_value = cpu_speedo[0];
+ sku_info->gpu_speedo_value = cpu_speedo[2];
+ sku_info->soc_speedo_value = soc_speedo[0];
+ } else if (speedo_revision == 2) {
+ sku_info->cpu_speedo_value = (-1938 + (1095 * cpu_speedo[0] / 100)) / 10;
+ sku_info->gpu_speedo_value = (-1662 + (1082 * cpu_speedo[2] / 100)) / 10;
+ sku_info->soc_speedo_value = ( -705 + (1037 * soc_speedo[0] / 100)) / 10;
+ } else {
+ sku_info->cpu_speedo_value = 2100;
+ sku_info->gpu_speedo_value = cpu_speedo[2] - 75;
+ sku_info->soc_speedo_value = 1900;
+ }
+
+ if ((sku_info->cpu_speedo_value <= 0) ||
+ (sku_info->gpu_speedo_value <= 0) ||
+ (sku_info->soc_speedo_value <= 0)) {
+ WARN(1, "speedo value not fused\n");
+ return;
+ }
+
+ rev_sku_to_speedo_ids(sku_info, speedo_revision, &index);
+
+ sku_info->gpu_process_id = get_process_id(sku_info->gpu_speedo_value,
+ gpu_process_speedos[index],
+ GPU_PROCESS_CORNERS);
+
+ sku_info->cpu_process_id = get_process_id(sku_info->cpu_speedo_value,
+ cpu_process_speedos[index],
+ CPU_PROCESS_CORNERS);
+
+ sku_info->soc_process_id = get_process_id(sku_info->soc_speedo_value,
+ soc_process_speedos[index],
+ SOC_PROCESS_CORNERS);
+
+ pr_debug("Tegra GPU Speedo ID=%d, Speedo Value=%d\n",
+ sku_info->gpu_speedo_id, sku_info->gpu_speedo_value);
+}
diff --git a/drivers/soc/tegra/fuse/speedo-tegra30.c b/drivers/soc/tegra/fuse/speedo-tegra30.c
index b17f0dcdfebe..9b010b3ef009 100644
--- a/drivers/soc/tegra/fuse/speedo-tegra30.c
+++ b/drivers/soc/tegra/fuse/speedo-tegra30.c
@@ -22,7 +22,7 @@
#include "fuse.h"
-#define CORE_PROCESS_CORNERS 1
+#define SOC_PROCESS_CORNERS 1
#define CPU_PROCESS_CORNERS 6
#define FUSE_SPEEDO_CALIB_0 0x14
@@ -54,7 +54,7 @@ enum {
THRESHOLD_INDEX_COUNT,
};
-static const u32 __initconst core_process_speedos[][CORE_PROCESS_CORNERS] = {
+static const u32 __initconst soc_process_speedos[][SOC_PROCESS_CORNERS] = {
{180},
{170},
{195},
@@ -93,25 +93,25 @@ static void __init fuse_speedo_calib(u32 *speedo_g, u32 *speedo_lp)
int bit_minus1;
int bit_minus2;
- reg = tegra30_fuse_readl(FUSE_SPEEDO_CALIB_0);
+ reg = tegra_fuse_read_early(FUSE_SPEEDO_CALIB_0);
*speedo_lp = (reg & 0xFFFF) * 4;
*speedo_g = ((reg >> 16) & 0xFFFF) * 4;
- ate_ver = tegra30_fuse_readl(FUSE_TEST_PROG_VER);
+ ate_ver = tegra_fuse_read_early(FUSE_TEST_PROG_VER);
pr_debug("Tegra ATE prog ver %d.%d\n", ate_ver/10, ate_ver%10);
if (ate_ver >= 26) {
- bit_minus1 = tegra30_spare_fuse(LP_SPEEDO_BIT_MINUS1);
- bit_minus1 |= tegra30_spare_fuse(LP_SPEEDO_BIT_MINUS1_R);
- bit_minus2 = tegra30_spare_fuse(LP_SPEEDO_BIT_MINUS2);
- bit_minus2 |= tegra30_spare_fuse(LP_SPEEDO_BIT_MINUS2_R);
+ bit_minus1 = tegra_fuse_read_spare(LP_SPEEDO_BIT_MINUS1);
+ bit_minus1 |= tegra_fuse_read_spare(LP_SPEEDO_BIT_MINUS1_R);
+ bit_minus2 = tegra_fuse_read_spare(LP_SPEEDO_BIT_MINUS2);
+ bit_minus2 |= tegra_fuse_read_spare(LP_SPEEDO_BIT_MINUS2_R);
*speedo_lp |= (bit_minus1 << 1) | bit_minus2;
- bit_minus1 = tegra30_spare_fuse(G_SPEEDO_BIT_MINUS1);
- bit_minus1 |= tegra30_spare_fuse(G_SPEEDO_BIT_MINUS1_R);
- bit_minus2 = tegra30_spare_fuse(G_SPEEDO_BIT_MINUS2);
- bit_minus2 |= tegra30_spare_fuse(G_SPEEDO_BIT_MINUS2_R);
+ bit_minus1 = tegra_fuse_read_spare(G_SPEEDO_BIT_MINUS1);
+ bit_minus1 |= tegra_fuse_read_spare(G_SPEEDO_BIT_MINUS1_R);
+ bit_minus2 = tegra_fuse_read_spare(G_SPEEDO_BIT_MINUS2);
+ bit_minus2 |= tegra_fuse_read_spare(G_SPEEDO_BIT_MINUS2_R);
*speedo_g |= (bit_minus1 << 1) | bit_minus2;
} else {
*speedo_lp |= 0x3;
@@ -121,7 +121,7 @@ static void __init fuse_speedo_calib(u32 *speedo_g, u32 *speedo_lp)
static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info)
{
- int package_id = tegra30_fuse_readl(FUSE_PACKAGE_INFO) & 0x0F;
+ int package_id = tegra_fuse_read_early(FUSE_PACKAGE_INFO) & 0x0F;
switch (sku_info->revision) {
case TEGRA_REVISION_A01:
@@ -246,19 +246,19 @@ static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info)
void __init tegra30_init_speedo_data(struct tegra_sku_info *sku_info)
{
u32 cpu_speedo_val;
- u32 core_speedo_val;
+ u32 soc_speedo_val;
int i;
BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
THRESHOLD_INDEX_COUNT);
- BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) !=
+ BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
THRESHOLD_INDEX_COUNT);
rev_sku_to_speedo_ids(sku_info);
- fuse_speedo_calib(&cpu_speedo_val, &core_speedo_val);
+ fuse_speedo_calib(&cpu_speedo_val, &soc_speedo_val);
pr_debug("Tegra CPU speedo value %u\n", cpu_speedo_val);
- pr_debug("Tegra Core speedo value %u\n", core_speedo_val);
+ pr_debug("Tegra Core speedo value %u\n", soc_speedo_val);
for (i = 0; i < CPU_PROCESS_CORNERS; i++) {
if (cpu_speedo_val < cpu_process_speedos[threshold_index][i])
@@ -273,16 +273,16 @@ void __init tegra30_init_speedo_data(struct tegra_sku_info *sku_info)
sku_info->cpu_speedo_id = 1;
}
- for (i = 0; i < CORE_PROCESS_CORNERS; i++) {
- if (core_speedo_val < core_process_speedos[threshold_index][i])
+ for (i = 0; i < SOC_PROCESS_CORNERS; i++) {
+ if (soc_speedo_val < soc_process_speedos[threshold_index][i])
break;
}
- sku_info->core_process_id = i - 1;
+ sku_info->soc_process_id = i - 1;
- if (sku_info->core_process_id == -1) {
- pr_warn("Tegra CORE speedo value %3d out of range",
- core_speedo_val);
- sku_info->core_process_id = 0;
+ if (sku_info->soc_process_id == -1) {
+ pr_warn("Tegra SoC speedo value %3d out of range",
+ soc_speedo_val);
+ sku_info->soc_process_id = 0;
sku_info->soc_speedo_id = 1;
}
}
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
index 73fad05d8f2c..5b18f6ffa45c 100644
--- a/drivers/soc/tegra/fuse/tegra-apbmisc.c
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -21,11 +21,10 @@
#include <linux/io.h>
#include <soc/tegra/fuse.h>
+#include <soc/tegra/common.h>
#include "fuse.h"
-#define APBMISC_BASE 0x70000800
-#define APBMISC_SIZE 0x64
#define FUSE_SKU_INFO 0x10
#define PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT 4
@@ -95,8 +94,8 @@ void __init tegra_init_revision(void)
rev = TEGRA_REVISION_A02;
break;
case 3:
- if (chip_id == TEGRA20 && (tegra20_spare_fuse_early(18) ||
- tegra20_spare_fuse_early(19)))
+ if (chip_id == TEGRA20 && (tegra_fuse_read_spare(18) ||
+ tegra_fuse_read_spare(19)))
rev = TEGRA_REVISION_A03p;
else
rev = TEGRA_REVISION_A03;
@@ -110,27 +109,74 @@ void __init tegra_init_revision(void)
tegra_sku_info.revision = rev;
- if (chip_id == TEGRA20)
- tegra_sku_info.sku_id = tegra20_fuse_early(FUSE_SKU_INFO);
- else
- tegra_sku_info.sku_id = tegra30_fuse_readl(FUSE_SKU_INFO);
+ tegra_sku_info.sku_id = tegra_fuse_read_early(FUSE_SKU_INFO);
}
void __init tegra_init_apbmisc(void)
{
+ struct resource apbmisc, straps;
struct device_node *np;
np = of_find_matching_node(NULL, apbmisc_match);
- apbmisc_base = of_iomap(np, 0);
- if (!apbmisc_base) {
- pr_warn("ioremap tegra apbmisc failed. using %08x instead\n",
- APBMISC_BASE);
- apbmisc_base = ioremap(APBMISC_BASE, APBMISC_SIZE);
+ if (!np) {
+ /*
+ * Fall back to legacy initialization for 32-bit ARM only. All
+ * 64-bit ARM device tree files for Tegra are required to have
+ * an APBMISC node.
+ *
+ * This is for backwards-compatibility with old device trees
+ * that didn't contain an APBMISC node.
+ */
+ if (IS_ENABLED(CONFIG_ARM) && soc_is_tegra()) {
+ /* APBMISC registers (chip revision, ...) */
+ apbmisc.start = 0x70000800;
+ apbmisc.end = 0x70000863;
+ apbmisc.flags = IORESOURCE_MEM;
+
+ /* strapping options */
+ if (tegra_get_chip_id() == TEGRA124) {
+ straps.start = 0x7000e864;
+ straps.end = 0x7000e867;
+ } else {
+ straps.start = 0x70000008;
+ straps.end = 0x7000000b;
+ }
+
+ straps.flags = IORESOURCE_MEM;
+
+ pr_warn("Using APBMISC region %pR\n", &apbmisc);
+ pr_warn("Using strapping options registers %pR\n",
+ &straps);
+ } else {
+ /*
+ * At this point we're not running on Tegra, so play
+ * nice with multi-platform kernels.
+ */
+ return;
+ }
+ } else {
+ /*
+ * Extract information from the device tree if we've found a
+ * matching node.
+ */
+ if (of_address_to_resource(np, 0, &apbmisc) < 0) {
+ pr_err("failed to get APBMISC registers\n");
+ return;
+ }
+
+ if (of_address_to_resource(np, 1, &straps) < 0) {
+ pr_err("failed to get strapping options registers\n");
+ return;
+ }
}
- strapping_base = of_iomap(np, 1);
+ apbmisc_base = ioremap_nocache(apbmisc.start, resource_size(&apbmisc));
+ if (!apbmisc_base)
+ pr_err("failed to map APBMISC registers\n");
+
+ strapping_base = ioremap_nocache(straps.start, resource_size(&straps));
if (!strapping_base)
- pr_err("ioremap tegra strapping_base failed\n");
+ pr_err("failed to map strapping options registers\n");
long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code");
}
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 75d0457a77b7..bc34cf7482fb 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -17,6 +17,8 @@
*
*/
+#define pr_fmt(fmt) "tegra-pmc: " fmt
+
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/clk/tegra.h>
@@ -457,7 +459,6 @@ static int tegra_io_rail_prepare(int id, unsigned long *request,
unsigned long *status, unsigned int *bit)
{
unsigned long rate, value;
- struct clk *clk;
*bit = id % 32;
@@ -476,12 +477,7 @@ static int tegra_io_rail_prepare(int id, unsigned long *request,
*request = IO_DPD2_REQ;
}
- clk = clk_get_sys(NULL, "pclk");
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- rate = clk_get_rate(clk);
- clk_put(clk);
+ rate = clk_get_rate(pmc->clk);
tegra_pmc_writel(DPD_SAMPLE_ENABLE, DPD_SAMPLE);
@@ -535,8 +531,10 @@ int tegra_io_rail_power_on(int id)
tegra_pmc_writel(value, request);
err = tegra_io_rail_poll(status, mask, 0, 250);
- if (err < 0)
+ if (err < 0) {
+ pr_info("tegra_io_rail_poll() failed: %d\n", err);
return err;
+ }
tegra_io_rail_unprepare();
@@ -551,8 +549,10 @@ int tegra_io_rail_power_off(int id)
int err;
err = tegra_io_rail_prepare(id, &request, &status, &bit);
- if (err < 0)
+ if (err < 0) {
+ pr_info("tegra_io_rail_prepare() failed: %d\n", err);
return err;
+ }
mask = 1 << bit;
@@ -736,12 +736,12 @@ void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
u32 value, checksum;
if (!pmc->soc->has_tsense_reset)
- goto out;
+ return;
np = of_find_node_by_name(pmc->dev->of_node, "i2c-thermtrip");
if (!np) {
dev_warn(dev, "i2c-thermtrip node not found, %s.\n", disabled);
- goto out;
+ return;
}
if (of_property_read_u32(np, "nvidia,i2c-controller-id", &ctrl_id)) {
@@ -801,7 +801,6 @@ void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
out:
of_node_put(np);
- return;
}
static int tegra_pmc_probe(struct platform_device *pdev)
@@ -1002,7 +1001,56 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
.has_gpu_clamps = true,
};
+static const char * const tegra210_powergates[] = {
+ [TEGRA_POWERGATE_CPU] = "crail",
+ [TEGRA_POWERGATE_3D] = "3d",
+ [TEGRA_POWERGATE_VENC] = "venc",
+ [TEGRA_POWERGATE_PCIE] = "pcie",
+ [TEGRA_POWERGATE_L2] = "l2",
+ [TEGRA_POWERGATE_MPE] = "mpe",
+ [TEGRA_POWERGATE_HEG] = "heg",
+ [TEGRA_POWERGATE_SATA] = "sata",
+ [TEGRA_POWERGATE_CPU1] = "cpu1",
+ [TEGRA_POWERGATE_CPU2] = "cpu2",
+ [TEGRA_POWERGATE_CPU3] = "cpu3",
+ [TEGRA_POWERGATE_CELP] = "celp",
+ [TEGRA_POWERGATE_CPU0] = "cpu0",
+ [TEGRA_POWERGATE_C0NC] = "c0nc",
+ [TEGRA_POWERGATE_C1NC] = "c1nc",
+ [TEGRA_POWERGATE_SOR] = "sor",
+ [TEGRA_POWERGATE_DIS] = "dis",
+ [TEGRA_POWERGATE_DISB] = "disb",
+ [TEGRA_POWERGATE_XUSBA] = "xusba",
+ [TEGRA_POWERGATE_XUSBB] = "xusbb",
+ [TEGRA_POWERGATE_XUSBC] = "xusbc",
+ [TEGRA_POWERGATE_VIC] = "vic",
+ [TEGRA_POWERGATE_IRAM] = "iram",
+ [TEGRA_POWERGATE_NVDEC] = "nvdec",
+ [TEGRA_POWERGATE_NVJPG] = "nvjpg",
+ [TEGRA_POWERGATE_AUD] = "aud",
+ [TEGRA_POWERGATE_DFD] = "dfd",
+ [TEGRA_POWERGATE_VE2] = "ve2",
+};
+
+static const u8 tegra210_cpu_powergates[] = {
+ TEGRA_POWERGATE_CPU0,
+ TEGRA_POWERGATE_CPU1,
+ TEGRA_POWERGATE_CPU2,
+ TEGRA_POWERGATE_CPU3,
+};
+
+static const struct tegra_pmc_soc tegra210_pmc_soc = {
+ .num_powergates = ARRAY_SIZE(tegra210_powergates),
+ .powergates = tegra210_powergates,
+ .num_cpu_powergates = ARRAY_SIZE(tegra210_cpu_powergates),
+ .cpu_powergates = tegra210_cpu_powergates,
+ .has_tsense_reset = true,
+ .has_gpu_clamps = true,
+};
+
static const struct of_device_id tegra_pmc_match[] = {
+ { .compatible = "nvidia,tegra210-pmc", .data = &tegra210_pmc_soc },
+ { .compatible = "nvidia,tegra132-pmc", .data = &tegra124_pmc_soc },
{ .compatible = "nvidia,tegra124-pmc", .data = &tegra124_pmc_soc },
{ .compatible = "nvidia,tegra114-pmc", .data = &tegra114_pmc_soc },
{ .compatible = "nvidia,tegra30-pmc", .data = &tegra30_pmc_soc },
@@ -1035,25 +1083,44 @@ static int __init tegra_pmc_early_init(void)
bool invert;
u32 value;
- if (!soc_is_tegra())
- return 0;
-
np = of_find_matching_node_and_match(NULL, tegra_pmc_match, &match);
if (!np) {
- pr_warn("PMC device node not found, disabling powergating\n");
-
- regs.start = 0x7000e400;
- regs.end = 0x7000e7ff;
- regs.flags = IORESOURCE_MEM;
-
- pr_warn("Using memory region %pR\n", &regs);
+ /*
+ * Fall back to legacy initialization for 32-bit ARM only. All
+ * 64-bit ARM device tree files for Tegra are required to have
+ * a PMC node.
+ *
+ * This is for backwards-compatibility with old device trees
+ * that didn't contain a PMC node. Note that in this case the
+ * SoC data can't be matched and therefore powergating is
+ * disabled.
+ */
+ if (IS_ENABLED(CONFIG_ARM) && soc_is_tegra()) {
+ pr_warn("DT node not found, powergating disabled\n");
+
+ regs.start = 0x7000e400;
+ regs.end = 0x7000e7ff;
+ regs.flags = IORESOURCE_MEM;
+
+ pr_warn("Using memory region %pR\n", &regs);
+ } else {
+ /*
+ * At this point we're not running on Tegra, so play
+ * nice with multi-platform kernels.
+ */
+ return 0;
+ }
} else {
- pmc->soc = match->data;
- }
+ /*
+ * Extract information from the device tree if we've found a
+ * matching node.
+ */
+ if (of_address_to_resource(np, 0, &regs) < 0) {
+ pr_err("failed to get PMC registers\n");
+ return -ENXIO;
+ }
- if (of_address_to_resource(np, 0, &regs) < 0) {
- pr_err("failed to get PMC registers\n");
- return -ENXIO;
+ pmc->soc = match->data;
}
pmc->base = ioremap_nocache(regs.start, resource_size(&regs));
@@ -1064,6 +1131,10 @@ static int __init tegra_pmc_early_init(void)
mutex_init(&pmc->powergates_lock);
+ /*
+ * Invert the interrupt polarity if a PMC device tree node exists and
+ * contains the nvidia,invert-interrupt property.
+ */
invert = of_property_read_bool(np, "nvidia,invert-interrupt");
value = tegra_pmc_readl(PMC_CNTRL);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 0cae1694014d..4887f317ea58 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -326,6 +326,15 @@ config SPI_MESON_SPIFC
This enables master mode support for the SPIFC (SPI flash
controller) available in Amlogic Meson SoCs.
+config SPI_MT65XX
+ tristate "MediaTek SPI controller"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ This selects the MediaTek(R) SPI bus driver.
+ If you want to use MediaTek(R) SPI interface,
+ say Y or M here.If you are not sure, say N.
+ SPI drivers for Mediatek MT65XX and MT81XX series ARM SoCs.
+
config SPI_OC_TINY
tristate "OpenCores tiny SPI"
depends on GPIOLIB || COMPILE_TEST
@@ -598,6 +607,17 @@ config SPI_XILINX
Or for the DS570, see "XPS Serial Peripheral Interface (SPI) (v2.00b)"
+config SPI_XLP
+ tristate "Netlogic XLP SPI controller driver"
+ depends on CPU_XLP || COMPILE_TEST
+ help
+ Enable support for the SPI controller on the Netlogic XLP SoCs.
+ Currently supported XLP variants are XLP8XX, XLP3XX, XLP2XX, XLP9XX
+ and XLP5XX.
+
+ If you have a Netlogic XLP platform say Y here.
+ If unsure, say N.
+
config SPI_XTENSA_XTFPGA
tristate "Xtensa SPI controller for xtfpga"
depends on (XTENSA && XTENSA_PLATFORM_XTFPGA) || COMPILE_TEST
@@ -612,7 +632,7 @@ config SPI_XTENSA_XTFPGA
config SPI_ZYNQMP_GQSPI
tristate "Xilinx ZynqMP GQSPI controller"
- depends on SPI_MASTER
+ depends on SPI_MASTER && HAS_DMA
help
Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 1154dbac8f2c..6a7f6f9d0d1c 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_SPI_MESON_SPIFC) += spi-meson-spifc.o
obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
+obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o
obj-$(CONFIG_SPI_MXS) += spi-mxs.o
obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o
obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o
@@ -88,5 +89,6 @@ obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o
obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
+obj-$(CONFIG_SPI_XLP) += spi-xlp.o
obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o
obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index c9eca347787d..bf9ed380bb1c 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -19,7 +19,6 @@
#include <linux/interrupt.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
-#include <linux/platform_data/atmel.h>
#include <linux/platform_data/dma-atmel.h>
#include <linux/of.h>
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 59705ab23577..e7874a6171ec 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -480,7 +480,7 @@ static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *tfr,
u32 cs,
- unsigned long xfer_time_us)
+ unsigned long long xfer_time_us)
{
struct bcm2835_spi *bs = spi_master_get_devdata(master);
unsigned long timeout;
@@ -531,7 +531,8 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
{
struct bcm2835_spi *bs = spi_master_get_devdata(master);
unsigned long spi_hz, clk_hz, cdiv;
- unsigned long spi_used_hz, xfer_time_us;
+ unsigned long spi_used_hz;
+ unsigned long long xfer_time_us;
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
/* set clock */
@@ -553,13 +554,11 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
spi_used_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536);
bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
- /* handle all the modes */
+ /* handle all the 3-wire mode */
if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf))
cs |= BCM2835_SPI_CS_REN;
- if (spi->mode & SPI_CPOL)
- cs |= BCM2835_SPI_CS_CPOL;
- if (spi->mode & SPI_CPHA)
- cs |= BCM2835_SPI_CS_CPHA;
+ else
+ cs &= ~BCM2835_SPI_CS_REN;
/* for gpio_cs set dummy CS so that no HW-CS get changed
* we can not run this in bcm2835_spi_set_cs, as it does
@@ -575,9 +574,10 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
bs->rx_len = tfr->len;
/* calculate the estimated time in us the transfer runs */
- xfer_time_us = tfr->len
+ xfer_time_us = (unsigned long long)tfr->len
* 9 /* clocks/byte - SPI-HW waits 1 clock after each byte */
- * 1000000 / spi_used_hz;
+ * 1000000;
+ do_div(xfer_time_us, spi_used_hz);
/* for short requests run polling*/
if (xfer_time_us <= BCM2835_SPI_POLLING_LIMIT_US)
@@ -592,6 +592,25 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs);
}
+static int bcm2835_spi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_device *spi = msg->spi;
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+
+ cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA);
+
+ if (spi->mode & SPI_CPOL)
+ cs |= BCM2835_SPI_CS_CPOL;
+ if (spi->mode & SPI_CPHA)
+ cs |= BCM2835_SPI_CS_CPHA;
+
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs);
+
+ return 0;
+}
+
static void bcm2835_spi_handle_err(struct spi_master *master,
struct spi_message *msg)
{
@@ -739,6 +758,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
master->set_cs = bcm2835_spi_set_cs;
master->transfer_one = bcm2835_spi_transfer_one;
master->handle_err = bcm2835_spi_handle_err;
+ master->prepare_message = bcm2835_spi_prepare_message;
master->dev.of_node = pdev->dev.of_node;
bs = spi_master_get_devdata(master);
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index f5ca6dc3a157..55789f7cda92 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -76,6 +76,7 @@
#define HSSPI_FIFO_REG(x) (0x200 + (x) * 0x200)
+#define HSSPI_OP_MULTIBIT BIT(11)
#define HSSPI_OP_CODE_SHIFT 13
#define HSSPI_OP_SLEEP (0 << HSSPI_OP_CODE_SHIFT)
#define HSSPI_OP_READ_WRITE (1 << HSSPI_OP_CODE_SHIFT)
@@ -171,9 +172,12 @@ static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
if (opcode != HSSPI_OP_READ)
step_size -= HSSPI_OPCODE_LEN;
- __raw_writel(0 << MODE_CTRL_PREPENDBYTE_CNT_SHIFT |
- 2 << MODE_CTRL_MULTIDATA_WR_STRT_SHIFT |
- 2 << MODE_CTRL_MULTIDATA_RD_STRT_SHIFT | 0xff,
+ if ((opcode == HSSPI_OP_READ && t->rx_nbits == SPI_NBITS_DUAL) ||
+ (opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL))
+ opcode |= HSSPI_OP_MULTIBIT;
+
+ __raw_writel(1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT |
+ 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT | 0xff,
bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select));
while (pending > 0) {
@@ -374,7 +378,8 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
master->num_chipselect = 8;
master->setup = bcm63xx_hsspi_setup;
master->transfer_one_message = bcm63xx_hsspi_transfer_one;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
+ SPI_RX_DUAL | SPI_TX_DUAL;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->auto_runtime_pm = true;
diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h
index 06b34e5bcfa3..47bb9b898dfd 100644
--- a/drivers/spi/spi-bitbang-txrx.h
+++ b/drivers/spi/spi-bitbang-txrx.h
@@ -49,7 +49,7 @@ bitbang_txrx_be_cpha0(struct spi_device *spi,
{
/* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
- bool oldbit = !(word & 1);
+ u32 oldbit = (!(word & (1<<(bits-1)))) << 31;
/* clock starts at inactive polarity */
for (word <<= (32 - bits); likely(bits); bits--) {
@@ -81,7 +81,7 @@ bitbang_txrx_be_cpha1(struct spi_device *spi,
{
/* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
- bool oldbit = !(word & (1 << 31));
+ u32 oldbit = (!(word & (1<<(bits-1)))) << 31;
/* clock starts at inactive polarity */
for (word <<= (32 - bits); likely(bits); bits--) {
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 987afebea093..3cf9faa6cc3f 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -139,6 +139,8 @@ struct davinci_spi {
u32 (*get_tx)(struct davinci_spi *);
u8 *bytes_per_word;
+
+ u8 prescaler_limit;
};
static struct davinci_spi_config davinci_spi_default_cfg;
@@ -255,7 +257,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
* This function calculates the prescale value that generates a clock rate
* less than or equal to the specified maximum.
*
- * Returns: calculated prescale - 1 for easy programming into SPI registers
+ * Returns: calculated prescale value for easy programming into SPI registers
* or negative error number if valid prescalar cannot be updated.
*/
static inline int davinci_spi_get_prescale(struct davinci_spi *dspi,
@@ -263,12 +265,13 @@ static inline int davinci_spi_get_prescale(struct davinci_spi *dspi,
{
int ret;
- ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz);
+ /* Subtract 1 to match what will be programmed into SPI register. */
+ ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz) - 1;
- if (ret < 1 || ret > 256)
+ if (ret < dspi->prescaler_limit || ret > 255)
return -EINVAL;
- return ret - 1;
+ return ret;
}
/**
@@ -832,13 +835,40 @@ rx_dma_failed:
}
#if defined(CONFIG_OF)
+
+/* OF SPI data structure */
+struct davinci_spi_of_data {
+ u8 version;
+ u8 prescaler_limit;
+};
+
+static const struct davinci_spi_of_data dm6441_spi_data = {
+ .version = SPI_VERSION_1,
+ .prescaler_limit = 2,
+};
+
+static const struct davinci_spi_of_data da830_spi_data = {
+ .version = SPI_VERSION_2,
+ .prescaler_limit = 2,
+};
+
+static const struct davinci_spi_of_data keystone_spi_data = {
+ .version = SPI_VERSION_1,
+ .prescaler_limit = 0,
+};
+
static const struct of_device_id davinci_spi_of_match[] = {
{
.compatible = "ti,dm6441-spi",
+ .data = &dm6441_spi_data,
},
{
.compatible = "ti,da830-spi",
- .data = (void *)SPI_VERSION_2,
+ .data = &da830_spi_data,
+ },
+ {
+ .compatible = "ti,keystone-spi",
+ .data = &keystone_spi_data,
},
{ },
};
@@ -857,21 +887,21 @@ static int spi_davinci_get_pdata(struct platform_device *pdev,
struct davinci_spi *dspi)
{
struct device_node *node = pdev->dev.of_node;
+ struct davinci_spi_of_data *spi_data;
struct davinci_spi_platform_data *pdata;
unsigned int num_cs, intr_line = 0;
const struct of_device_id *match;
pdata = &dspi->pdata;
- pdata->version = SPI_VERSION_1;
match = of_match_device(davinci_spi_of_match, &pdev->dev);
if (!match)
return -ENODEV;
- /* match data has the SPI version number for SPI_VERSION_2 */
- if (match->data == (void *)SPI_VERSION_2)
- pdata->version = SPI_VERSION_2;
+ spi_data = (struct davinci_spi_of_data *)match->data;
+ pdata->version = spi_data->version;
+ pdata->prescaler_limit = spi_data->prescaler_limit;
/*
* default num_cs is 1 and all chipsel are internal to the chip
* indicated by chip_sel being NULL or cs_gpios being NULL or
@@ -991,7 +1021,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
dspi->bitbang.chipselect = davinci_spi_chipselect;
dspi->bitbang.setup_transfer = davinci_spi_setup_transfer;
-
+ dspi->prescaler_limit = pdata->prescaler_limit;
dspi->version = pdata->version;
dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index eb03e1215195..7edede6e024b 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -74,6 +74,9 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
dws->max_freq = clk_get_rate(dwsmmio->clk);
+ of_property_read_u32(pdev->dev.of_node, "reg-io-width",
+ &dws->reg_io_width);
+
num_cs = 4;
if (pdev->dev.of_node)
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 8d67d03c71eb..4fbfcdc5cb24 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -194,7 +194,7 @@ static void dw_writer(struct dw_spi *dws)
else
txw = *(u16 *)(dws->tx);
}
- dw_writel(dws, DW_SPI_DR, txw);
+ dw_write_io_reg(dws, DW_SPI_DR, txw);
dws->tx += dws->n_bytes;
}
}
@@ -205,7 +205,7 @@ static void dw_reader(struct dw_spi *dws)
u16 rxw;
while (max--) {
- rxw = dw_readl(dws, DW_SPI_DR);
+ rxw = dw_read_io_reg(dws, DW_SPI_DR);
/* Care rx only if the transfer's original "rx" is not null */
if (dws->rx_end - dws->len) {
if (dws->n_bytes == 1)
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 6c91391c1a4f..b75ed327d5a2 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -109,6 +109,7 @@ struct dw_spi {
u32 fifo_len; /* depth of the FIFO buffer */
u32 max_freq; /* max bus freq supported */
+ u32 reg_io_width; /* DR I/O width in bytes */
u16 bus_num;
u16 num_cs; /* supported slave numbers */
@@ -145,11 +146,45 @@ static inline u32 dw_readl(struct dw_spi *dws, u32 offset)
return __raw_readl(dws->regs + offset);
}
+static inline u16 dw_readw(struct dw_spi *dws, u32 offset)
+{
+ return __raw_readw(dws->regs + offset);
+}
+
static inline void dw_writel(struct dw_spi *dws, u32 offset, u32 val)
{
__raw_writel(val, dws->regs + offset);
}
+static inline void dw_writew(struct dw_spi *dws, u32 offset, u16 val)
+{
+ __raw_writew(val, dws->regs + offset);
+}
+
+static inline u32 dw_read_io_reg(struct dw_spi *dws, u32 offset)
+{
+ switch (dws->reg_io_width) {
+ case 2:
+ return dw_readw(dws, offset);
+ case 4:
+ default:
+ return dw_readl(dws, offset);
+ }
+}
+
+static inline void dw_write_io_reg(struct dw_spi *dws, u32 offset, u32 val)
+{
+ switch (dws->reg_io_width) {
+ case 2:
+ dw_writew(dws, offset, val);
+ break;
+ case 4:
+ default:
+ dw_writel(dws, offset, val);
+ break;
+ }
+}
+
static inline void spi_enable_chip(struct dw_spi *dws, int enable)
{
dw_writel(dws, DW_SPI_SSIENR, (enable ? 1 : 0));
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index d3f05a0525a4..c27124a5ec8e 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -21,6 +21,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
+#include <linux/pm_runtime.h>
#include <sysdev/fsl_soc.h>
#include "spi-fsl-lib.h"
@@ -85,6 +86,8 @@ struct fsl_espi_transfer {
#define SPCOM_TRANLEN(x) ((x) << 0)
#define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */
+#define AUTOSUSPEND_TIMEOUT 2000
+
static void fsl_espi_change_mode(struct spi_device *spi)
{
struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
@@ -485,6 +488,8 @@ static int fsl_espi_setup(struct spi_device *spi)
mpc8xxx_spi = spi_master_get_devdata(spi->master);
reg_base = mpc8xxx_spi->reg_base;
+ pm_runtime_get_sync(mpc8xxx_spi->dev);
+
hw_mode = cs->hw_mode; /* Save original settings */
cs->hw_mode = mpc8xxx_spi_read_reg(
&reg_base->csmode[spi->chip_select]);
@@ -507,6 +512,10 @@ static int fsl_espi_setup(struct spi_device *spi)
mpc8xxx_spi_write_reg(&reg_base->mode, loop_mode);
retval = fsl_espi_setup_transfer(spi, NULL);
+
+ pm_runtime_mark_last_busy(mpc8xxx_spi->dev);
+ pm_runtime_put_autosuspend(mpc8xxx_spi->dev);
+
if (retval < 0) {
cs->hw_mode = hw_mode; /* Restore settings */
return retval;
@@ -604,20 +613,14 @@ static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
return ret;
}
-static void fsl_espi_remove(struct mpc8xxx_spi *mspi)
+#ifdef CONFIG_PM
+static int fsl_espi_runtime_suspend(struct device *dev)
{
- iounmap(mspi->reg_base);
-}
-
-static int fsl_espi_suspend(struct spi_master *master)
-{
- struct mpc8xxx_spi *mpc8xxx_spi;
- struct fsl_espi_reg *reg_base;
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+ struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base;
u32 regval;
- mpc8xxx_spi = spi_master_get_devdata(master);
- reg_base = mpc8xxx_spi->reg_base;
-
regval = mpc8xxx_spi_read_reg(&reg_base->mode);
regval &= ~SPMODE_ENABLE;
mpc8xxx_spi_write_reg(&reg_base->mode, regval);
@@ -625,21 +628,20 @@ static int fsl_espi_suspend(struct spi_master *master)
return 0;
}
-static int fsl_espi_resume(struct spi_master *master)
+static int fsl_espi_runtime_resume(struct device *dev)
{
- struct mpc8xxx_spi *mpc8xxx_spi;
- struct fsl_espi_reg *reg_base;
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+ struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base;
u32 regval;
- mpc8xxx_spi = spi_master_get_devdata(master);
- reg_base = mpc8xxx_spi->reg_base;
-
regval = mpc8xxx_spi_read_reg(&reg_base->mode);
regval |= SPMODE_ENABLE;
mpc8xxx_spi_write_reg(&reg_base->mode, regval);
return 0;
}
+#endif
static struct spi_master * fsl_espi_probe(struct device *dev,
struct resource *mem, unsigned int irq)
@@ -667,25 +669,23 @@ static struct spi_master * fsl_espi_probe(struct device *dev,
master->setup = fsl_espi_setup;
master->cleanup = fsl_espi_cleanup;
master->transfer_one_message = fsl_espi_do_one_msg;
- master->prepare_transfer_hardware = fsl_espi_resume;
- master->unprepare_transfer_hardware = fsl_espi_suspend;
+ master->auto_runtime_pm = true;
mpc8xxx_spi = spi_master_get_devdata(master);
- mpc8xxx_spi->spi_remove = fsl_espi_remove;
- mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem));
- if (!mpc8xxx_spi->reg_base) {
- ret = -ENOMEM;
+ mpc8xxx_spi->reg_base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(mpc8xxx_spi->reg_base)) {
+ ret = PTR_ERR(mpc8xxx_spi->reg_base);
goto err_probe;
}
reg_base = mpc8xxx_spi->reg_base;
/* Register for SPI Interrupt */
- ret = request_irq(mpc8xxx_spi->irq, fsl_espi_irq,
+ ret = devm_request_irq(dev, mpc8xxx_spi->irq, fsl_espi_irq,
0, "fsl_espi", mpc8xxx_spi);
if (ret)
- goto free_irq;
+ goto err_probe;
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
mpc8xxx_spi->rx_shift = 16;
@@ -731,18 +731,27 @@ static struct spi_master * fsl_espi_probe(struct device *dev,
mpc8xxx_spi_write_reg(&reg_base->mode, regval);
- ret = spi_register_master(master);
+ pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ ret = devm_spi_register_master(dev, master);
if (ret < 0)
- goto unreg_master;
+ goto err_pm;
dev_info(dev, "at 0x%p (irq = %d)\n", reg_base, mpc8xxx_spi->irq);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return master;
-unreg_master:
- free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
-free_irq:
- iounmap(mpc8xxx_spi->reg_base);
+err_pm:
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
err_probe:
spi_master_put(master);
err:
@@ -809,7 +818,9 @@ err:
static int of_fsl_espi_remove(struct platform_device *dev)
{
- return mpc8xxx_spi_remove(&dev->dev);
+ pm_runtime_disable(&dev->dev);
+
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -824,7 +835,11 @@ static int of_fsl_espi_suspend(struct device *dev)
return ret;
}
- return fsl_espi_suspend(master);
+ ret = pm_runtime_force_suspend(dev);
+ if (ret < 0)
+ return ret;
+
+ return 0;
}
static int of_fsl_espi_resume(struct device *dev)
@@ -834,7 +849,7 @@ static int of_fsl_espi_resume(struct device *dev)
struct mpc8xxx_spi *mpc8xxx_spi;
struct fsl_espi_reg *reg_base;
u32 regval;
- int i;
+ int i, ret;
mpc8xxx_spi = spi_master_get_devdata(master);
reg_base = mpc8xxx_spi->reg_base;
@@ -854,11 +869,17 @@ static int of_fsl_espi_resume(struct device *dev)
mpc8xxx_spi_write_reg(&reg_base->mode, regval);
+ ret = pm_runtime_force_resume(dev);
+ if (ret < 0)
+ return ret;
+
return spi_master_resume(master);
}
#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops espi_pm = {
+ SET_RUNTIME_PM_OPS(fsl_espi_runtime_suspend,
+ fsl_espi_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(of_fsl_espi_suspend, of_fsl_espi_resume)
};
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index cb35d2f0d0e6..1e43412cd9f8 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -114,25 +114,6 @@ void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
}
EXPORT_SYMBOL_GPL(mpc8xxx_spi_probe);
-int mpc8xxx_spi_remove(struct device *dev)
-{
- struct mpc8xxx_spi *mpc8xxx_spi;
- struct spi_master *master;
-
- master = dev_get_drvdata(dev);
- mpc8xxx_spi = spi_master_get_devdata(master);
-
- spi_unregister_master(master);
-
- free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
-
- if (mpc8xxx_spi->spi_remove)
- mpc8xxx_spi->spi_remove(mpc8xxx_spi);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mpc8xxx_spi_remove);
-
int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
index 1326a392adca..84f5dcb7a897 100644
--- a/drivers/spi/spi-fsl-lib.h
+++ b/drivers/spi/spi-fsl-lib.h
@@ -54,9 +54,6 @@ struct mpc8xxx_spi {
void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
u32(*get_tx) (struct mpc8xxx_spi *);
- /* hooks for different controller driver */
- void (*spi_remove) (struct mpc8xxx_spi *mspi);
-
unsigned int count;
unsigned int irq;
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 60c590790854..8b290d9d7935 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -559,12 +559,6 @@ static irqreturn_t fsl_spi_irq(s32 irq, void *context_data)
return ret;
}
-static void fsl_spi_remove(struct mpc8xxx_spi *mspi)
-{
- iounmap(mspi->reg_base);
- fsl_spi_cpm_free(mspi);
-}
-
static void fsl_spi_grlib_cs_control(struct spi_device *spi, bool on)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
@@ -631,7 +625,6 @@ static struct spi_master * fsl_spi_probe(struct device *dev,
master->transfer_one_message = fsl_spi_do_one_msg;
mpc8xxx_spi = spi_master_get_devdata(master);
- mpc8xxx_spi->spi_remove = fsl_spi_remove;
mpc8xxx_spi->max_bits_per_word = 32;
mpc8xxx_spi->type = fsl_spi_get_type(dev);
@@ -639,10 +632,10 @@ static struct spi_master * fsl_spi_probe(struct device *dev,
if (ret)
goto err_cpm_init;
- mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem));
- if (mpc8xxx_spi->reg_base == NULL) {
- ret = -ENOMEM;
- goto err_ioremap;
+ mpc8xxx_spi->reg_base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(mpc8xxx_spi->reg_base)) {
+ ret = PTR_ERR(mpc8xxx_spi->reg_base);
+ goto err_probe;
}
if (mpc8xxx_spi->type == TYPE_GRLIB)
@@ -661,11 +654,11 @@ static struct spi_master * fsl_spi_probe(struct device *dev,
&mpc8xxx_spi->tx_shift, 8, 1);
/* Register for SPI Interrupt */
- ret = request_irq(mpc8xxx_spi->irq, fsl_spi_irq,
- 0, "fsl_spi", mpc8xxx_spi);
+ ret = devm_request_irq(dev, mpc8xxx_spi->irq, fsl_spi_irq,
+ 0, "fsl_spi", mpc8xxx_spi);
if (ret != 0)
- goto free_irq;
+ goto err_probe;
reg_base = mpc8xxx_spi->reg_base;
@@ -686,20 +679,16 @@ static struct spi_master * fsl_spi_probe(struct device *dev,
mpc8xxx_spi_write_reg(&reg_base->mode, regval);
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(dev, master);
if (ret < 0)
- goto unreg_master;
+ goto err_probe;
dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base,
mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags));
return master;
-unreg_master:
- free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
-free_irq:
- iounmap(mpc8xxx_spi->reg_base);
-err_ioremap:
+err_probe:
fsl_spi_cpm_free(mpc8xxx_spi);
err_cpm_init:
spi_master_put(master);
@@ -866,11 +855,8 @@ static int of_fsl_spi_remove(struct platform_device *ofdev)
{
struct spi_master *master = platform_get_drvdata(ofdev);
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
- int ret;
- ret = mpc8xxx_spi_remove(&ofdev->dev);
- if (ret)
- return ret;
+ fsl_spi_cpm_free(mpc8xxx_spi);
if (mpc8xxx_spi->type == TYPE_FSL)
of_fsl_spi_free_chipselects(&ofdev->dev);
return 0;
@@ -916,7 +902,12 @@ static int plat_mpc8xxx_spi_probe(struct platform_device *pdev)
static int plat_mpc8xxx_spi_remove(struct platform_device *pdev)
{
- return mpc8xxx_spi_remove(&pdev->dev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+
+ fsl_spi_cpm_free(mpc8xxx_spi);
+
+ return 0;
}
MODULE_ALIAS("platform:mpc8xxx_spi");
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 788e2b176a4f..823cbc92d1e7 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -40,6 +40,7 @@
#define SPFI_CONTROL_SOFT_RESET BIT(11)
#define SPFI_CONTROL_SEND_DMA BIT(10)
#define SPFI_CONTROL_GET_DMA BIT(9)
+#define SPFI_CONTROL_SE BIT(8)
#define SPFI_CONTROL_TMODE_SHIFT 5
#define SPFI_CONTROL_TMODE_MASK 0x7
#define SPFI_CONTROL_TMODE_SINGLE 0
@@ -104,6 +105,10 @@ struct img_spfi {
bool rx_dma_busy;
};
+struct img_spfi_device_data {
+ bool gpio_requested;
+};
+
static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg)
{
return readl(spfi->regs + reg);
@@ -266,15 +271,15 @@ static int img_spfi_start_pio(struct spi_master *master,
cpu_relax();
}
- ret = spfi_wait_all_done(spfi);
- if (ret < 0)
- return ret;
-
if (rx_bytes > 0 || tx_bytes > 0) {
dev_err(spfi->dev, "PIO transfer timed out\n");
return -ETIMEDOUT;
}
+ ret = spfi_wait_all_done(spfi);
+ if (ret < 0)
+ return ret;
+
return 0;
}
@@ -439,21 +444,50 @@ static int img_spfi_unprepare(struct spi_master *master,
static int img_spfi_setup(struct spi_device *spi)
{
- int ret;
-
- ret = gpio_request_one(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ?
- GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH,
- dev_name(&spi->dev));
- if (ret)
- dev_err(&spi->dev, "can't request chipselect gpio %d\n",
+ int ret = -EINVAL;
+ struct img_spfi_device_data *spfi_data = spi_get_ctldata(spi);
+
+ if (!spfi_data) {
+ spfi_data = kzalloc(sizeof(*spfi_data), GFP_KERNEL);
+ if (!spfi_data)
+ return -ENOMEM;
+ spfi_data->gpio_requested = false;
+ spi_set_ctldata(spi, spfi_data);
+ }
+ if (!spfi_data->gpio_requested) {
+ ret = gpio_request_one(spi->cs_gpio,
+ (spi->mode & SPI_CS_HIGH) ?
+ GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH,
+ dev_name(&spi->dev));
+ if (ret)
+ dev_err(&spi->dev, "can't request chipselect gpio %d\n",
spi->cs_gpio);
-
+ else
+ spfi_data->gpio_requested = true;
+ } else {
+ if (gpio_is_valid(spi->cs_gpio)) {
+ int mode = ((spi->mode & SPI_CS_HIGH) ?
+ GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH);
+
+ ret = gpio_direction_output(spi->cs_gpio, mode);
+ if (ret)
+ dev_err(&spi->dev, "chipselect gpio %d setup failed (%d)\n",
+ spi->cs_gpio, ret);
+ }
+ }
return ret;
}
static void img_spfi_cleanup(struct spi_device *spi)
{
- gpio_free(spi->cs_gpio);
+ struct img_spfi_device_data *spfi_data = spi_get_ctldata(spi);
+
+ if (spfi_data) {
+ if (spfi_data->gpio_requested)
+ gpio_free(spi->cs_gpio);
+ kfree(spfi_data);
+ spi_set_ctldata(spi, NULL);
+ }
}
static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
@@ -491,6 +525,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
xfer->rx_nbits == SPI_NBITS_QUAD)
val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
+ val |= SPFI_CONTROL_SE;
spfi_writel(spfi, val, SPFI_CONTROL);
}
@@ -546,6 +581,7 @@ static int img_spfi_probe(struct platform_device *pdev)
struct img_spfi *spfi;
struct resource *res;
int ret;
+ u32 max_speed_hz;
master = spi_alloc_master(&pdev->dev, sizeof(*spfi));
if (!master)
@@ -610,6 +646,19 @@ static int img_spfi_probe(struct platform_device *pdev)
master->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4;
master->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512;
+ /*
+ * Maximum speed supported by spfi is limited to the lower value
+ * between 1/4 of the SPFI clock or to "spfi-max-frequency"
+ * defined in the device tree.
+ * If no value is defined in the device tree assume the maximum
+ * speed supported to be 1/4 of the SPFI clock.
+ */
+ if (!of_property_read_u32(spfi->dev->of_node, "spfi-max-frequency",
+ &max_speed_hz)) {
+ if (master->max_speed_hz > max_speed_hz)
+ master->max_speed_hz = max_speed_hz;
+ }
+
master->setup = img_spfi_setup;
master->cleanup = img_spfi_cleanup;
master->transfer_one = img_spfi_transfer_one;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index eb7d3a6fb14c..f9deb84e4e55 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -201,8 +201,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
- if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml)
- && (transfer->len > spi_imx->tx_wml))
+ if (spi_imx->dma_is_inited
+ && transfer->len > spi_imx->rx_wml * sizeof(u32)
+ && transfer->len > spi_imx->tx_wml * sizeof(u32))
return true;
return false;
}
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 965d2bdcfdcc..1e75341689a6 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -30,11 +30,37 @@
#include <linux/gpio.h>
#include <asm/mpc52xx_psc.h>
+enum {
+ TYPE_MPC5121,
+ TYPE_MPC5125,
+};
+
+/*
+ * This macro abstracts the differences in the PSC register layout between
+ * MPC5121 (which uses a struct mpc52xx_psc) and MPC5125 (using mpc5125_psc).
+ */
+#define psc_addr(mps, regname) ({ \
+ void *__ret = NULL; \
+ switch (mps->type) { \
+ case TYPE_MPC5121: { \
+ struct mpc52xx_psc __iomem *psc = mps->psc; \
+ __ret = &psc->regname; \
+ }; \
+ break; \
+ case TYPE_MPC5125: { \
+ struct mpc5125_psc __iomem *psc = mps->psc; \
+ __ret = &psc->regname; \
+ }; \
+ break; \
+ } \
+ __ret; })
+
struct mpc512x_psc_spi {
void (*cs_control)(struct spi_device *spi, bool on);
/* driver internal data */
- struct mpc52xx_psc __iomem *psc;
+ int type;
+ void __iomem *psc;
struct mpc512x_psc_fifo __iomem *fifo;
unsigned int irq;
u8 bits_per_word;
@@ -71,13 +97,12 @@ static void mpc512x_psc_spi_activate_cs(struct spi_device *spi)
{
struct mpc512x_psc_spi_cs *cs = spi->controller_state;
struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
- struct mpc52xx_psc __iomem *psc = mps->psc;
u32 sicr;
u32 ccr;
int speed;
u16 bclkdiv;
- sicr = in_be32(&psc->sicr);
+ sicr = in_be32(psc_addr(mps, sicr));
/* Set clock phase and polarity */
if (spi->mode & SPI_CPHA)
@@ -94,9 +119,9 @@ static void mpc512x_psc_spi_activate_cs(struct spi_device *spi)
sicr |= 0x10000000;
else
sicr &= ~0x10000000;
- out_be32(&psc->sicr, sicr);
+ out_be32(psc_addr(mps, sicr), sicr);
- ccr = in_be32(&psc->ccr);
+ ccr = in_be32(psc_addr(mps, ccr));
ccr &= 0xFF000000;
speed = cs->speed_hz;
if (!speed)
@@ -104,7 +129,7 @@ static void mpc512x_psc_spi_activate_cs(struct spi_device *spi)
bclkdiv = (mps->mclk_rate / speed) - 1;
ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8));
- out_be32(&psc->ccr, ccr);
+ out_be32(psc_addr(mps, ccr), ccr);
mps->bits_per_word = cs->bits_per_word;
if (mps->cs_control && gpio_is_valid(spi->cs_gpio))
@@ -315,16 +340,15 @@ static int mpc512x_psc_spi_msg_xfer(struct spi_master *master,
static int mpc512x_psc_spi_prep_xfer_hw(struct spi_master *master)
{
struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
- struct mpc52xx_psc __iomem *psc = mps->psc;
dev_dbg(&master->dev, "%s()\n", __func__);
/* Zero MR2 */
- in_8(&psc->mode);
- out_8(&psc->mode, 0x0);
+ in_8(psc_addr(mps, mr2));
+ out_8(psc_addr(mps, mr2), 0x0);
/* enable transmitter/receiver */
- out_8(&psc->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE);
+ out_8(psc_addr(mps, command), MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE);
return 0;
}
@@ -332,13 +356,12 @@ static int mpc512x_psc_spi_prep_xfer_hw(struct spi_master *master)
static int mpc512x_psc_spi_unprep_xfer_hw(struct spi_master *master)
{
struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
- struct mpc52xx_psc __iomem *psc = mps->psc;
struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
dev_dbg(&master->dev, "%s()\n", __func__);
/* disable transmitter/receiver and fifo interrupt */
- out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
+ out_8(psc_addr(mps, command), MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
out_be32(&fifo->tximr, 0);
return 0;
@@ -388,7 +411,6 @@ static void mpc512x_psc_spi_cleanup(struct spi_device *spi)
static int mpc512x_psc_spi_port_config(struct spi_master *master,
struct mpc512x_psc_spi *mps)
{
- struct mpc52xx_psc __iomem *psc = mps->psc;
struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
u32 sicr;
u32 ccr;
@@ -396,12 +418,12 @@ static int mpc512x_psc_spi_port_config(struct spi_master *master,
u16 bclkdiv;
/* Reset the PSC into a known state */
- out_8(&psc->command, MPC52xx_PSC_RST_RX);
- out_8(&psc->command, MPC52xx_PSC_RST_TX);
- out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
+ out_8(psc_addr(mps, command), MPC52xx_PSC_RST_RX);
+ out_8(psc_addr(mps, command), MPC52xx_PSC_RST_TX);
+ out_8(psc_addr(mps, command), MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
/* Disable psc interrupts all useful interrupts are in fifo */
- out_be16(&psc->isr_imr.imr, 0);
+ out_be16(psc_addr(mps, isr_imr.imr), 0);
/* Disable fifo interrupts, will be enabled later */
out_be32(&fifo->tximr, 0);
@@ -417,18 +439,18 @@ static int mpc512x_psc_spi_port_config(struct spi_master *master,
0x00004000 | /* MSTR = 1 -- SPI master */
0x00000800; /* UseEOF = 1 -- SS low until EOF */
- out_be32(&psc->sicr, sicr);
+ out_be32(psc_addr(mps, sicr), sicr);
- ccr = in_be32(&psc->ccr);
+ ccr = in_be32(psc_addr(mps, ccr));
ccr &= 0xFF000000;
speed = 1000000; /* default 1MHz */
bclkdiv = (mps->mclk_rate / speed) - 1;
ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8));
- out_be32(&psc->ccr, ccr);
+ out_be32(psc_addr(mps, ccr), ccr);
/* Set 2ms DTL delay */
- out_8(&psc->ctur, 0x00);
- out_8(&psc->ctlr, 0x82);
+ out_8(psc_addr(mps, ctur), 0x00);
+ out_8(psc_addr(mps, ctlr), 0x82);
/* we don't use the alarms */
out_be32(&fifo->rxalarm, 0xfff);
@@ -482,6 +504,7 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
dev_set_drvdata(dev, master);
mps = spi_master_get_devdata(master);
+ mps->type = (int)of_device_get_match_data(dev);
mps->irq = irq;
if (pdata == NULL) {
@@ -589,7 +612,8 @@ static int mpc512x_psc_spi_of_remove(struct platform_device *op)
}
static const struct of_device_id mpc512x_psc_spi_of_match[] = {
- { .compatible = "fsl,mpc5121-psc-spi", },
+ { .compatible = "fsl,mpc5121-psc-spi", .data = (void *)TYPE_MPC5121 },
+ { .compatible = "fsl,mpc5125-psc-spi", .data = (void *)TYPE_MPC5125 },
{},
};
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
new file mode 100644
index 000000000000..5f6315c47920
--- /dev/null
+++ b/drivers/spi/spi-mt65xx.c
@@ -0,0 +1,726 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Leilk Liu <leilk.liu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/spi-mt65xx.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+
+#define SPI_CFG0_REG 0x0000
+#define SPI_CFG1_REG 0x0004
+#define SPI_TX_SRC_REG 0x0008
+#define SPI_RX_DST_REG 0x000c
+#define SPI_TX_DATA_REG 0x0010
+#define SPI_RX_DATA_REG 0x0014
+#define SPI_CMD_REG 0x0018
+#define SPI_STATUS0_REG 0x001c
+#define SPI_PAD_SEL_REG 0x0024
+
+#define SPI_CFG0_SCK_HIGH_OFFSET 0
+#define SPI_CFG0_SCK_LOW_OFFSET 8
+#define SPI_CFG0_CS_HOLD_OFFSET 16
+#define SPI_CFG0_CS_SETUP_OFFSET 24
+
+#define SPI_CFG1_CS_IDLE_OFFSET 0
+#define SPI_CFG1_PACKET_LOOP_OFFSET 8
+#define SPI_CFG1_PACKET_LENGTH_OFFSET 16
+#define SPI_CFG1_GET_TICK_DLY_OFFSET 30
+
+#define SPI_CFG1_CS_IDLE_MASK 0xff
+#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
+#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
+
+#define SPI_CMD_ACT BIT(0)
+#define SPI_CMD_RESUME BIT(1)
+#define SPI_CMD_RST BIT(2)
+#define SPI_CMD_PAUSE_EN BIT(4)
+#define SPI_CMD_DEASSERT BIT(5)
+#define SPI_CMD_CPHA BIT(8)
+#define SPI_CMD_CPOL BIT(9)
+#define SPI_CMD_RX_DMA BIT(10)
+#define SPI_CMD_TX_DMA BIT(11)
+#define SPI_CMD_TXMSBF BIT(12)
+#define SPI_CMD_RXMSBF BIT(13)
+#define SPI_CMD_RX_ENDIAN BIT(14)
+#define SPI_CMD_TX_ENDIAN BIT(15)
+#define SPI_CMD_FINISH_IE BIT(16)
+#define SPI_CMD_PAUSE_IE BIT(17)
+
+#define MT8173_SPI_MAX_PAD_SEL 3
+
+#define MTK_SPI_PAUSE_INT_STATUS 0x2
+
+#define MTK_SPI_IDLE 0
+#define MTK_SPI_PAUSED 1
+
+#define MTK_SPI_MAX_FIFO_SIZE 32
+#define MTK_SPI_PACKET_SIZE 1024
+
+struct mtk_spi_compatible {
+ bool need_pad_sel;
+ /* Must explicitly send dummy Tx bytes to do Rx only transfer */
+ bool must_tx;
+};
+
+struct mtk_spi {
+ void __iomem *base;
+ u32 state;
+ u32 pad_sel;
+ struct clk *spi_clk, *parent_clk;
+ struct spi_transfer *cur_transfer;
+ u32 xfer_len;
+ struct scatterlist *tx_sgl, *rx_sgl;
+ u32 tx_sgl_len, rx_sgl_len;
+ const struct mtk_spi_compatible *dev_comp;
+};
+
+static const struct mtk_spi_compatible mt6589_compat;
+static const struct mtk_spi_compatible mt8135_compat;
+static const struct mtk_spi_compatible mt8173_compat = {
+ .need_pad_sel = true,
+ .must_tx = true,
+};
+
+/*
+ * A piece of default chip info unless the platform
+ * supplies it.
+ */
+static const struct mtk_chip_config mtk_default_chip_info = {
+ .rx_mlsb = 1,
+ .tx_mlsb = 1,
+};
+
+static const struct of_device_id mtk_spi_of_match[] = {
+ { .compatible = "mediatek,mt6589-spi", .data = (void *)&mt6589_compat },
+ { .compatible = "mediatek,mt8135-spi", .data = (void *)&mt8135_compat },
+ { .compatible = "mediatek,mt8173-spi", .data = (void *)&mt8173_compat },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
+
+static void mtk_spi_reset(struct mtk_spi *mdata)
+{
+ u32 reg_val;
+
+ /* set the software reset bit in SPI_CMD_REG. */
+ reg_val = readl(mdata->base + SPI_CMD_REG);
+ reg_val |= SPI_CMD_RST;
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+
+ reg_val = readl(mdata->base + SPI_CMD_REG);
+ reg_val &= ~SPI_CMD_RST;
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+}
+
+static void mtk_spi_config(struct mtk_spi *mdata,
+ struct mtk_chip_config *chip_config)
+{
+ u32 reg_val;
+
+ reg_val = readl(mdata->base + SPI_CMD_REG);
+
+ /* set the mlsbx and mlsbtx */
+ if (chip_config->tx_mlsb)
+ reg_val |= SPI_CMD_TXMSBF;
+ else
+ reg_val &= ~SPI_CMD_TXMSBF;
+ if (chip_config->rx_mlsb)
+ reg_val |= SPI_CMD_RXMSBF;
+ else
+ reg_val &= ~SPI_CMD_RXMSBF;
+
+ /* set the tx/rx endian */
+#ifdef __LITTLE_ENDIAN
+ reg_val &= ~SPI_CMD_TX_ENDIAN;
+ reg_val &= ~SPI_CMD_RX_ENDIAN;
+#else
+ reg_val |= SPI_CMD_TX_ENDIAN;
+ reg_val |= SPI_CMD_RX_ENDIAN;
+#endif
+
+ /* set finish and pause interrupt always enable */
+ reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE;
+
+ /* disable dma mode */
+ reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
+
+ /* disable deassert mode */
+ reg_val &= ~SPI_CMD_DEASSERT;
+
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+
+ /* pad select */
+ if (mdata->dev_comp->need_pad_sel)
+ writel(mdata->pad_sel, mdata->base + SPI_PAD_SEL_REG);
+}
+
+static int mtk_spi_prepare_hardware(struct spi_master *master)
+{
+ struct spi_transfer *trans;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+ struct spi_message *msg = master->cur_msg;
+
+ trans = list_first_entry(&msg->transfers, struct spi_transfer,
+ transfer_list);
+ if (!trans->cs_change) {
+ mdata->state = MTK_SPI_IDLE;
+ mtk_spi_reset(mdata);
+ }
+
+ return 0;
+}
+
+static int mtk_spi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ u32 reg_val;
+ u8 cpha, cpol;
+ struct mtk_chip_config *chip_config;
+ struct spi_device *spi = msg->spi;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ cpha = spi->mode & SPI_CPHA ? 1 : 0;
+ cpol = spi->mode & SPI_CPOL ? 1 : 0;
+
+ reg_val = readl(mdata->base + SPI_CMD_REG);
+ if (cpha)
+ reg_val |= SPI_CMD_CPHA;
+ else
+ reg_val &= ~SPI_CMD_CPHA;
+ if (cpol)
+ reg_val |= SPI_CMD_CPOL;
+ else
+ reg_val &= ~SPI_CMD_CPOL;
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+
+ chip_config = spi->controller_data;
+ if (!chip_config) {
+ chip_config = (void *)&mtk_default_chip_info;
+ spi->controller_data = chip_config;
+ }
+ mtk_spi_config(mdata, chip_config);
+
+ return 0;
+}
+
+static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ u32 reg_val;
+ struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
+
+ reg_val = readl(mdata->base + SPI_CMD_REG);
+ if (!enable)
+ reg_val |= SPI_CMD_PAUSE_EN;
+ else
+ reg_val &= ~SPI_CMD_PAUSE_EN;
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+}
+
+static void mtk_spi_prepare_transfer(struct spi_master *master,
+ struct spi_transfer *xfer)
+{
+ u32 spi_clk_hz, div, sck_time, cs_time, reg_val = 0;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ spi_clk_hz = clk_get_rate(mdata->spi_clk);
+ if (xfer->speed_hz < spi_clk_hz / 2)
+ div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz);
+ else
+ div = 1;
+
+ sck_time = (div + 1) / 2;
+ cs_time = sck_time * 2;
+
+ reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_HIGH_OFFSET);
+ reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
+ reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
+ reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET);
+ writel(reg_val, mdata->base + SPI_CFG0_REG);
+
+ reg_val = readl(mdata->base + SPI_CFG1_REG);
+ reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
+ reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
+ writel(reg_val, mdata->base + SPI_CFG1_REG);
+}
+
+static void mtk_spi_setup_packet(struct spi_master *master)
+{
+ u32 packet_size, packet_loop, reg_val;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ packet_size = min_t(u32, mdata->xfer_len, MTK_SPI_PACKET_SIZE);
+ packet_loop = mdata->xfer_len / packet_size;
+
+ reg_val = readl(mdata->base + SPI_CFG1_REG);
+ reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK | SPI_CFG1_PACKET_LOOP_MASK);
+ reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
+ reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
+ writel(reg_val, mdata->base + SPI_CFG1_REG);
+}
+
+static void mtk_spi_enable_transfer(struct spi_master *master)
+{
+ u32 cmd;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ cmd = readl(mdata->base + SPI_CMD_REG);
+ if (mdata->state == MTK_SPI_IDLE)
+ cmd |= SPI_CMD_ACT;
+ else
+ cmd |= SPI_CMD_RESUME;
+ writel(cmd, mdata->base + SPI_CMD_REG);
+}
+
+static int mtk_spi_get_mult_delta(u32 xfer_len)
+{
+ u32 mult_delta;
+
+ if (xfer_len > MTK_SPI_PACKET_SIZE)
+ mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
+ else
+ mult_delta = 0;
+
+ return mult_delta;
+}
+
+static void mtk_spi_update_mdata_len(struct spi_master *master)
+{
+ int mult_delta;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ if (mdata->tx_sgl_len && mdata->rx_sgl_len) {
+ if (mdata->tx_sgl_len > mdata->rx_sgl_len) {
+ mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
+ mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
+ mdata->rx_sgl_len = mult_delta;
+ mdata->tx_sgl_len -= mdata->xfer_len;
+ } else {
+ mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
+ mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
+ mdata->tx_sgl_len = mult_delta;
+ mdata->rx_sgl_len -= mdata->xfer_len;
+ }
+ } else if (mdata->tx_sgl_len) {
+ mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
+ mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
+ mdata->tx_sgl_len = mult_delta;
+ } else if (mdata->rx_sgl_len) {
+ mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
+ mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
+ mdata->rx_sgl_len = mult_delta;
+ }
+}
+
+static void mtk_spi_setup_dma_addr(struct spi_master *master,
+ struct spi_transfer *xfer)
+{
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ if (mdata->tx_sgl)
+ writel(xfer->tx_dma, mdata->base + SPI_TX_SRC_REG);
+ if (mdata->rx_sgl)
+ writel(xfer->rx_dma, mdata->base + SPI_RX_DST_REG);
+}
+
+static int mtk_spi_fifo_transfer(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ int cnt;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ mdata->cur_transfer = xfer;
+ mdata->xfer_len = xfer->len;
+ mtk_spi_prepare_transfer(master, xfer);
+ mtk_spi_setup_packet(master);
+
+ if (xfer->len % 4)
+ cnt = xfer->len / 4 + 1;
+ else
+ cnt = xfer->len / 4;
+ iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
+
+ mtk_spi_enable_transfer(master);
+
+ return 1;
+}
+
+static int mtk_spi_dma_transfer(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ int cmd;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ mdata->tx_sgl = NULL;
+ mdata->rx_sgl = NULL;
+ mdata->tx_sgl_len = 0;
+ mdata->rx_sgl_len = 0;
+ mdata->cur_transfer = xfer;
+
+ mtk_spi_prepare_transfer(master, xfer);
+
+ cmd = readl(mdata->base + SPI_CMD_REG);
+ if (xfer->tx_buf)
+ cmd |= SPI_CMD_TX_DMA;
+ if (xfer->rx_buf)
+ cmd |= SPI_CMD_RX_DMA;
+ writel(cmd, mdata->base + SPI_CMD_REG);
+
+ if (xfer->tx_buf)
+ mdata->tx_sgl = xfer->tx_sg.sgl;
+ if (xfer->rx_buf)
+ mdata->rx_sgl = xfer->rx_sg.sgl;
+
+ if (mdata->tx_sgl) {
+ xfer->tx_dma = sg_dma_address(mdata->tx_sgl);
+ mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
+ }
+ if (mdata->rx_sgl) {
+ xfer->rx_dma = sg_dma_address(mdata->rx_sgl);
+ mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
+ }
+
+ mtk_spi_update_mdata_len(master);
+ mtk_spi_setup_packet(master);
+ mtk_spi_setup_dma_addr(master, xfer);
+ mtk_spi_enable_transfer(master);
+
+ return 1;
+}
+
+static int mtk_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ if (master->can_dma(master, spi, xfer))
+ return mtk_spi_dma_transfer(master, spi, xfer);
+ else
+ return mtk_spi_fifo_transfer(master, spi, xfer);
+}
+
+static bool mtk_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ return xfer->len > MTK_SPI_MAX_FIFO_SIZE;
+}
+
+static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
+{
+ u32 cmd, reg_val, cnt;
+ struct spi_master *master = dev_id;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+ struct spi_transfer *trans = mdata->cur_transfer;
+
+ reg_val = readl(mdata->base + SPI_STATUS0_REG);
+ if (reg_val & MTK_SPI_PAUSE_INT_STATUS)
+ mdata->state = MTK_SPI_PAUSED;
+ else
+ mdata->state = MTK_SPI_IDLE;
+
+ if (!master->can_dma(master, master->cur_msg->spi, trans)) {
+ if (trans->rx_buf) {
+ if (mdata->xfer_len % 4)
+ cnt = mdata->xfer_len / 4 + 1;
+ else
+ cnt = mdata->xfer_len / 4;
+ ioread32_rep(mdata->base + SPI_RX_DATA_REG,
+ trans->rx_buf, cnt);
+ }
+ spi_finalize_current_transfer(master);
+ return IRQ_HANDLED;
+ }
+
+ if (mdata->tx_sgl)
+ trans->tx_dma += mdata->xfer_len;
+ if (mdata->rx_sgl)
+ trans->rx_dma += mdata->xfer_len;
+
+ if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) {
+ mdata->tx_sgl = sg_next(mdata->tx_sgl);
+ if (mdata->tx_sgl) {
+ trans->tx_dma = sg_dma_address(mdata->tx_sgl);
+ mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
+ }
+ }
+ if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) {
+ mdata->rx_sgl = sg_next(mdata->rx_sgl);
+ if (mdata->rx_sgl) {
+ trans->rx_dma = sg_dma_address(mdata->rx_sgl);
+ mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
+ }
+ }
+
+ if (!mdata->tx_sgl && !mdata->rx_sgl) {
+ /* spi disable dma */
+ cmd = readl(mdata->base + SPI_CMD_REG);
+ cmd &= ~SPI_CMD_TX_DMA;
+ cmd &= ~SPI_CMD_RX_DMA;
+ writel(cmd, mdata->base + SPI_CMD_REG);
+
+ spi_finalize_current_transfer(master);
+ return IRQ_HANDLED;
+ }
+
+ mtk_spi_update_mdata_len(master);
+ mtk_spi_setup_packet(master);
+ mtk_spi_setup_dma_addr(master, trans);
+ mtk_spi_enable_transfer(master);
+
+ return IRQ_HANDLED;
+}
+
+static int mtk_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct mtk_spi *mdata;
+ const struct of_device_id *of_id;
+ struct resource *res;
+ int irq, ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
+ if (!master) {
+ dev_err(&pdev->dev, "failed to alloc spi master\n");
+ return -ENOMEM;
+ }
+
+ master->auto_runtime_pm = true;
+ master->dev.of_node = pdev->dev.of_node;
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+
+ master->set_cs = mtk_spi_set_cs;
+ master->prepare_transfer_hardware = mtk_spi_prepare_hardware;
+ master->prepare_message = mtk_spi_prepare_message;
+ master->transfer_one = mtk_spi_transfer_one;
+ master->can_dma = mtk_spi_can_dma;
+
+ of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node);
+ if (!of_id) {
+ dev_err(&pdev->dev, "failed to probe of_node\n");
+ ret = -EINVAL;
+ goto err_put_master;
+ }
+
+ mdata = spi_master_get_devdata(master);
+ mdata->dev_comp = of_id->data;
+ if (mdata->dev_comp->must_tx)
+ master->flags = SPI_MASTER_MUST_TX;
+
+ if (mdata->dev_comp->need_pad_sel) {
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "mediatek,pad-select",
+ &mdata->pad_sel);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to read pad select: %d\n",
+ ret);
+ goto err_put_master;
+ }
+
+ if (mdata->pad_sel > MT8173_SPI_MAX_PAD_SEL) {
+ dev_err(&pdev->dev, "wrong pad-select: %u\n",
+ mdata->pad_sel);
+ ret = -EINVAL;
+ goto err_put_master;
+ }
+ }
+
+ platform_set_drvdata(pdev, master);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "failed to determine base address\n");
+ goto err_put_master;
+ }
+
+ mdata->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mdata->base)) {
+ ret = PTR_ERR(mdata->base);
+ goto err_put_master;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq (%d)\n", irq);
+ ret = irq;
+ goto err_put_master;
+ }
+
+ if (!pdev->dev.dma_mask)
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+ ret = devm_request_irq(&pdev->dev, irq, mtk_spi_interrupt,
+ IRQF_TRIGGER_NONE, dev_name(&pdev->dev), master);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
+ goto err_put_master;
+ }
+
+ mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
+ if (IS_ERR(mdata->spi_clk)) {
+ ret = PTR_ERR(mdata->spi_clk);
+ dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
+ goto err_put_master;
+ }
+
+ mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
+ if (IS_ERR(mdata->parent_clk)) {
+ ret = PTR_ERR(mdata->parent_clk);
+ dev_err(&pdev->dev, "failed to get parent-clk: %d\n", ret);
+ goto err_put_master;
+ }
+
+ ret = clk_prepare_enable(mdata->spi_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
+ goto err_put_master;
+ }
+
+ ret = clk_set_parent(mdata->spi_clk, mdata->parent_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
+ goto err_disable_clk;
+ }
+
+ clk_disable_unprepare(mdata->spi_clk);
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
+ goto err_put_master;
+ }
+
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(mdata->spi_clk);
+err_put_master:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int mtk_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ pm_runtime_disable(&pdev->dev);
+
+ mtk_spi_reset(mdata);
+ clk_disable_unprepare(mdata->spi_clk);
+ spi_master_put(master);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_spi_suspend(struct device *dev)
+{
+ int ret;
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev))
+ clk_disable_unprepare(mdata->spi_clk);
+
+ return ret;
+}
+
+static int mtk_spi_resume(struct device *dev)
+{
+ int ret;
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ if (!pm_runtime_suspended(dev)) {
+ ret = clk_prepare_enable(mdata->spi_clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ ret = spi_master_resume(master);
+ if (ret < 0)
+ clk_disable_unprepare(mdata->spi_clk);
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int mtk_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(mdata->spi_clk);
+
+ return 0;
+}
+
+static int mtk_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(mdata->spi_clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops mtk_spi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume)
+ SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend,
+ mtk_spi_runtime_resume, NULL)
+};
+
+static struct platform_driver mtk_spi_driver = {
+ .driver = {
+ .name = "mtk-spi",
+ .pm = &mtk_spi_pm,
+ .of_match_table = mtk_spi_of_match,
+ },
+ .probe = mtk_spi_probe,
+ .remove = mtk_spi_remove,
+};
+
+module_platform_driver(mtk_spi_driver);
+
+MODULE_DESCRIPTION("MTK SPI Controller driver");
+MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mtk-spi");
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 58673841286c..3d09e0b69b73 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -245,6 +245,7 @@ static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
u32 l;
/* The controller handles the inverted chip selects
@@ -255,6 +256,12 @@ static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
enable = !enable;
if (spi->controller_state) {
+ int err = pm_runtime_get_sync(mcspi->dev);
+ if (err < 0) {
+ dev_err(mcspi->dev, "failed to get sync: %d\n", err);
+ return;
+ }
+
l = mcspi_cached_chconf0(spi);
if (enable)
@@ -263,6 +270,9 @@ static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
l |= OMAP2_MCSPI_CHCONF_FORCE;
mcspi_write_chconf0(spi, l);
+
+ pm_runtime_mark_last_busy(mcspi->dev);
+ pm_runtime_put_autosuspend(mcspi->dev);
}
}
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 8cad107a5b3f..a87cfd4ba17b 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -41,6 +41,11 @@
#define ORION_SPI_DATA_OUT_REG 0x08
#define ORION_SPI_DATA_IN_REG 0x0c
#define ORION_SPI_INT_CAUSE_REG 0x10
+#define ORION_SPI_TIMING_PARAMS_REG 0x18
+
+#define ORION_SPI_TMISO_SAMPLE_MASK (0x3 << 6)
+#define ORION_SPI_TMISO_SAMPLE_1 (1 << 6)
+#define ORION_SPI_TMISO_SAMPLE_2 (2 << 6)
#define ORION_SPI_MODE_CPOL (1 << 11)
#define ORION_SPI_MODE_CPHA (1 << 12)
@@ -70,6 +75,7 @@ struct orion_spi_dev {
unsigned int min_divisor;
unsigned int max_divisor;
u32 prescale_mask;
+ bool is_errata_50mhz_ac;
};
struct orion_spi {
@@ -195,6 +201,41 @@ orion_spi_mode_set(struct spi_device *spi)
writel(reg, spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
}
+static void
+orion_spi_50mhz_ac_timing_erratum(struct spi_device *spi, unsigned int speed)
+{
+ u32 reg;
+ struct orion_spi *orion_spi;
+
+ orion_spi = spi_master_get_devdata(spi->master);
+
+ /*
+ * Erratum description: (Erratum NO. FE-9144572) The device
+ * SPI interface supports frequencies of up to 50 MHz.
+ * However, due to this erratum, when the device core clock is
+ * 250 MHz and the SPI interfaces is configured for 50MHz SPI
+ * clock and CPOL=CPHA=1 there might occur data corruption on
+ * reads from the SPI device.
+ * Erratum Workaround:
+ * Work in one of the following configurations:
+ * 1. Set CPOL=CPHA=0 in "SPI Interface Configuration
+ * Register".
+ * 2. Set TMISO_SAMPLE value to 0x2 in "SPI Timing Parameters 1
+ * Register" before setting the interface.
+ */
+ reg = readl(spi_reg(orion_spi, ORION_SPI_TIMING_PARAMS_REG));
+ reg &= ~ORION_SPI_TMISO_SAMPLE_MASK;
+
+ if (clk_get_rate(orion_spi->clk) == 250000000 &&
+ speed == 50000000 && spi->mode & SPI_CPOL &&
+ spi->mode & SPI_CPHA)
+ reg |= ORION_SPI_TMISO_SAMPLE_2;
+ else
+ reg |= ORION_SPI_TMISO_SAMPLE_1; /* This is the default value */
+
+ writel(reg, spi_reg(orion_spi, ORION_SPI_TIMING_PARAMS_REG));
+}
+
/*
* called only when no transfer is active on the bus
*/
@@ -216,6 +257,9 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
orion_spi_mode_set(spi);
+ if (orion_spi->devdata->is_errata_50mhz_ac)
+ orion_spi_50mhz_ac_timing_erratum(spi, speed);
+
rc = orion_spi_baudrate_set(spi, speed);
if (rc)
return rc;
@@ -413,6 +457,14 @@ static const struct orion_spi_dev armada_375_spi_dev_data = {
.prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
};
+static const struct orion_spi_dev armada_380_spi_dev_data = {
+ .typ = ARMADA_SPI,
+ .max_hz = 50000000,
+ .max_divisor = 1920,
+ .prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
+ .is_errata_50mhz_ac = true,
+};
+
static const struct of_device_id orion_spi_of_match_table[] = {
{
.compatible = "marvell,orion-spi",
@@ -428,7 +480,7 @@ static const struct of_device_id orion_spi_of_match_table[] = {
},
{
.compatible = "marvell,armada-380-spi",
- .data = &armada_xp_spi_dev_data,
+ .data = &armada_380_spi_dev_data,
},
{
.compatible = "marvell,armada-390-spi",
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 3cfd4357489a..d19d7f28aecb 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -7,7 +7,6 @@
#include <linux/of_device.h>
#include <linux/module.h>
#include <linux/spi/pxa2xx_spi.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/dmaengine.h>
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 7293d6d875c5..fdd791977041 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -21,6 +21,7 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/spi/spi.h>
@@ -97,6 +98,15 @@ static const struct lpss_config lpss_platforms[] = {
.tx_threshold_lo = 160,
.tx_threshold_hi = 224,
},
+ { /* LPSS_SPT_SSP */
+ .offset = 0x200,
+ .reg_general = -1,
+ .reg_ssp = 0x20,
+ .reg_cs_ctrl = 0x24,
+ .rx_threshold = 1,
+ .tx_threshold_lo = 32,
+ .tx_threshold_hi = 56,
+ },
};
static inline const struct lpss_config
@@ -110,6 +120,7 @@ static bool is_lpss_ssp(const struct driver_data *drv_data)
switch (drv_data->ssp_type) {
case LPSS_LPT_SSP:
case LPSS_BYT_SSP:
+ case LPSS_SPT_SSP:
return true;
default:
return false;
@@ -1107,6 +1118,7 @@ static int setup(struct spi_device *spi)
break;
case LPSS_LPT_SSP:
case LPSS_BYT_SSP:
+ case LPSS_SPT_SSP:
config = lpss_get_config(drv_data);
tx_thres = config->tx_threshold_lo;
tx_hi_thres = config->tx_threshold_hi;
@@ -1276,6 +1288,31 @@ static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
+/*
+ * PCI IDs of compound devices that integrate both host controller and private
+ * integrated DMA engine. Please note these are not used in module
+ * autoloading and probing in this module but matching the LPSS SSP type.
+ */
+static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
+ /* SPT-LP */
+ { PCI_VDEVICE(INTEL, 0x9d29), LPSS_SPT_SSP },
+ { PCI_VDEVICE(INTEL, 0x9d2a), LPSS_SPT_SSP },
+ /* SPT-H */
+ { PCI_VDEVICE(INTEL, 0xa129), LPSS_SPT_SSP },
+ { PCI_VDEVICE(INTEL, 0xa12a), LPSS_SPT_SSP },
+ { },
+};
+
+static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
+{
+ struct device *dev = param;
+
+ if (dev != chan->device->dev->parent)
+ return false;
+
+ return true;
+}
+
static struct pxa2xx_spi_master *
pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
{
@@ -1283,16 +1320,25 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
struct acpi_device *adev;
struct ssp_device *ssp;
struct resource *res;
- const struct acpi_device_id *id;
+ const struct acpi_device_id *adev_id = NULL;
+ const struct pci_device_id *pcidev_id = NULL;
int devid, type;
if (!ACPI_HANDLE(&pdev->dev) ||
acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
return NULL;
- id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
- if (id)
- type = (int)id->driver_data;
+ if (dev_is_pci(pdev->dev.parent))
+ pcidev_id = pci_match_id(pxa2xx_spi_pci_compound_match,
+ to_pci_dev(pdev->dev.parent));
+ else
+ adev_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
+ &pdev->dev);
+
+ if (adev_id)
+ type = (int)adev_id->driver_data;
+ else if (pcidev_id)
+ type = (int)pcidev_id->driver_data;
else
return NULL;
@@ -1311,6 +1357,12 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
if (IS_ERR(ssp->mmio_base))
return NULL;
+ if (pcidev_id) {
+ pdata->tx_param = pdev->dev.parent;
+ pdata->rx_param = pdev->dev.parent;
+ pdata->dma_filter = pxa2xx_spi_idma_filter;
+ }
+
ssp->clk = devm_clk_get(&pdev->dev, NULL);
ssp->irq = platform_get_irq(pdev, 0);
ssp->type = type;
@@ -1362,8 +1414,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
return -ENODEV;
}
- /* Allocate master with space for drv_data and null dma buffer */
- master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
+ master = spi_alloc_master(dev, sizeof(struct driver_data));
if (!master) {
dev_err(&pdev->dev, "cannot alloc spi_master\n");
pxa_ssp_free(ssp);
@@ -1390,7 +1441,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
master->auto_runtime_pm = true;
drv_data->ssp_type = ssp->type;
- drv_data->null_dma_buf = (u32 *)PTR_ALIGN(&drv_data[1], DMA_ALIGNMENT);
drv_data->ioaddr = ssp->mmio_base;
drv_data->ssdr_physical = ssp->phys_base + SSDR;
@@ -1424,8 +1474,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
}
/* Setup DMA if requested */
- drv_data->tx_channel = -1;
- drv_data->rx_channel = -1;
if (platform_info->enable_dma) {
status = pxa2xx_spi_dma_setup(drv_data);
if (status) {
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 9f01e9c9aa75..0a9b6390a817 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -36,11 +36,6 @@ struct driver_data {
/* PXA hookup */
struct pxa2xx_spi_master *master_info;
- /* PXA private DMA setup stuff */
- int rx_channel;
- int tx_channel;
- u32 *null_dma_buf;
-
/* SSP register addresses */
void __iomem *ioaddr;
u32 ssdr_physical;
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 68e7efeb9a27..79a8bc4f6cec 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -645,7 +645,6 @@ static int rockchip_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
rs = spi_master_get_devdata(master);
- memset(rs, 0, sizeof(struct rockchip_spi));
/* Get basic io resource and map it */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index f9189a0c8cec..818843336932 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -383,7 +383,8 @@ static void qspi_update(const struct rspi_data *rspi, u8 mask, u8 val, u8 reg)
rspi_write8(rspi, data, reg);
}
-static int qspi_set_send_trigger(struct rspi_data *rspi, unsigned int len)
+static unsigned int qspi_set_send_trigger(struct rspi_data *rspi,
+ unsigned int len)
{
unsigned int n;
@@ -724,25 +725,25 @@ static int rspi_rz_transfer_one(struct spi_master *master,
static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
u8 *rx, unsigned int len)
{
- int i, n, ret;
- int error;
+ unsigned int i, n;
+ int ret;
while (len > 0) {
n = qspi_set_send_trigger(rspi, len);
qspi_set_receive_trigger(rspi, len);
if (n == QSPI_BUFFER_SIZE) {
- error = rspi_wait_for_tx_empty(rspi);
- if (error < 0) {
+ ret = rspi_wait_for_tx_empty(rspi);
+ if (ret < 0) {
dev_err(&rspi->master->dev, "transmit timeout\n");
- return error;
+ return ret;
}
for (i = 0; i < n; i++)
rspi_write_data(rspi, *tx++);
- error = rspi_wait_for_rx_full(rspi);
- if (error < 0) {
+ ret = rspi_wait_for_rx_full(rspi);
+ if (ret < 0) {
dev_err(&rspi->master->dev, "receive timeout\n");
- return error;
+ return ret;
}
for (i = 0; i < n; i++)
*rx++ = rspi_read_data(rspi);
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index f747ca269986..f36bc320a807 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -501,7 +501,6 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
}
hw = spi_master_get_devdata(master);
- memset(hw, 0, sizeof(struct s3c24xx_spi));
hw->master = master;
hw->pdata = pdata = dev_get_platdata(&pdev->dev);
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 2a8c513c4d07..cd1cfac0447f 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1191,8 +1191,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
sdd->port_id, master->num_chipselect);
- dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tDMA=[Rx-%d, Tx-%d]\n",
- mem_res,
+ dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\tDMA=[Rx-%d, Tx-%d]\n",
+ mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1,
sdd->rx_dma.dmach, sdd->tx_dma.dmach);
return 0;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index d3370a612d84..a7934ab00b96 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -48,8 +48,8 @@ struct sh_msiof_spi_priv {
const struct sh_msiof_chipdata *chipdata;
struct sh_msiof_spi_info *info;
struct completion done;
- int tx_fifo_size;
- int rx_fifo_size;
+ unsigned int tx_fifo_size;
+ unsigned int rx_fifo_size;
void *tx_dma_page;
void *rx_dma_page;
dma_addr_t tx_dma_addr;
@@ -95,8 +95,6 @@ struct sh_msiof_spi_priv {
#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
#define MDR2_GRPMASK1 0x00000001 /* Group Output Mask 1 (SH, A1) */
-#define MAX_WDLEN 256U
-
/* TSCR and RSCR */
#define SCR_BRPS_MASK 0x1f00 /* Prescaler Setting (1-32) */
#define SCR_BRPS(i) (((i) - 1) << 8)
@@ -850,7 +848,12 @@ static int sh_msiof_transfer_one(struct spi_master *master,
* DMA supports 32-bit words only, hence pack 8-bit and 16-bit
* words, with byte resp. word swapping.
*/
- unsigned int l = min(len, MAX_WDLEN * 4);
+ unsigned int l = 0;
+
+ if (tx_buf)
+ l = min(len, p->tx_fifo_size * 4);
+ if (rx_buf)
+ l = min(len, p->rx_fifo_size * 4);
if (bits <= 8) {
if (l & 3)
@@ -963,7 +966,7 @@ static const struct sh_msiof_chipdata sh_data = {
static const struct sh_msiof_chipdata r8a779x_data = {
.tx_fifo_size = 64,
- .rx_fifo_size = 256,
+ .rx_fifo_size = 64,
.master_flags = SPI_MASTER_MUST_TX,
};
@@ -1265,11 +1268,6 @@ static int sh_msiof_spi_remove(struct platform_device *pdev)
static const struct platform_device_id spi_driver_ids[] = {
{ "spi_sh_msiof", (kernel_ulong_t)&sh_data },
- { "spi_r8a7790_msiof", (kernel_ulong_t)&r8a779x_data },
- { "spi_r8a7791_msiof", (kernel_ulong_t)&r8a779x_data },
- { "spi_r8a7792_msiof", (kernel_ulong_t)&r8a779x_data },
- { "spi_r8a7793_msiof", (kernel_ulong_t)&r8a779x_data },
- { "spi_r8a7794_msiof", (kernel_ulong_t)&r8a779x_data },
{},
};
MODULE_DEVICE_TABLE(platform, spi_driver_ids);
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 5c0616870358..aa6d284131e0 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -99,6 +99,8 @@ struct ti_qspi {
#define QSPI_INVAL (4 << 16)
#define QSPI_WC_CMD_INT_EN (1 << 14)
#define QSPI_FLEN(n) ((n - 1) << 0)
+#define QSPI_WLEN_MAX_BITS 128
+#define QSPI_WLEN_MAX_BYTES 16
/* STATUS REGISTER */
#define BUSY 0x01
@@ -217,14 +219,16 @@ static inline u32 qspi_is_busy(struct ti_qspi *qspi)
static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
{
- int wlen, count;
+ int wlen, count, xfer_len;
unsigned int cmd;
const u8 *txbuf;
+ u32 data;
txbuf = t->tx_buf;
cmd = qspi->cmd | QSPI_WR_SNGL;
count = t->len;
wlen = t->bits_per_word >> 3; /* in bytes */
+ xfer_len = wlen;
while (count) {
if (qspi_is_busy(qspi))
@@ -234,7 +238,29 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
case 1:
dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
cmd, qspi->dc, *txbuf);
- writeb(*txbuf, qspi->base + QSPI_SPI_DATA_REG);
+ if (count >= QSPI_WLEN_MAX_BYTES) {
+ u32 *txp = (u32 *)txbuf;
+
+ data = cpu_to_be32(*txp++);
+ writel(data, qspi->base +
+ QSPI_SPI_DATA_REG_3);
+ data = cpu_to_be32(*txp++);
+ writel(data, qspi->base +
+ QSPI_SPI_DATA_REG_2);
+ data = cpu_to_be32(*txp++);
+ writel(data, qspi->base +
+ QSPI_SPI_DATA_REG_1);
+ data = cpu_to_be32(*txp++);
+ writel(data, qspi->base +
+ QSPI_SPI_DATA_REG);
+ xfer_len = QSPI_WLEN_MAX_BYTES;
+ cmd |= QSPI_WLEN(QSPI_WLEN_MAX_BITS);
+ } else {
+ writeb(*txbuf, qspi->base + QSPI_SPI_DATA_REG);
+ cmd = qspi->cmd | QSPI_WR_SNGL;
+ xfer_len = wlen;
+ cmd |= QSPI_WLEN(wlen);
+ }
break;
case 2:
dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %04x\n",
@@ -254,8 +280,8 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
dev_err(qspi->dev, "write timed out\n");
return -ETIMEDOUT;
}
- txbuf += wlen;
- count -= wlen;
+ txbuf += xfer_len;
+ count -= xfer_len;
}
return 0;
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
index bb478dccf1d8..3c28e24b10f5 100644
--- a/drivers/spi/spi-xcomm.c
+++ b/drivers/spi/spi-xcomm.c
@@ -237,11 +237,11 @@ static const struct i2c_device_id spi_xcomm_ids[] = {
{ "spi-xcomm" },
{ },
};
+MODULE_DEVICE_TABLE(i2c, spi_xcomm_ids);
static struct i2c_driver spi_xcomm_driver = {
.driver = {
.name = "spi-xcomm",
- .owner = THIS_MODULE,
},
.id_table = spi_xcomm_ids,
.probe = spi_xcomm_probe,
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 133f53a9c1d4..a339c1e9997a 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -249,19 +249,23 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
xspi->tx_ptr = t->tx_buf;
xspi->rx_ptr = t->rx_buf;
remaining_words = t->len / xspi->bytes_per_word;
- reinit_completion(&xspi->done);
if (xspi->irq >= 0 && remaining_words > xspi->buffer_size) {
+ u32 isr;
use_irq = true;
- xspi->write_fn(XSPI_INTR_TX_EMPTY,
- xspi->regs + XIPIF_V123B_IISR_OFFSET);
- /* Enable the global IPIF interrupt */
- xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
- xspi->regs + XIPIF_V123B_DGIER_OFFSET);
/* Inhibit irq to avoid spurious irqs on tx_empty*/
cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
xspi->regs + XSPI_CR_OFFSET);
+ /* ACK old irqs (if any) */
+ isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET);
+ if (isr)
+ xspi->write_fn(isr,
+ xspi->regs + XIPIF_V123B_IISR_OFFSET);
+ /* Enable the global IPIF interrupt */
+ xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
+ xspi->regs + XIPIF_V123B_DGIER_OFFSET);
+ reinit_completion(&xspi->done);
}
while (remaining_words) {
@@ -302,8 +306,10 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
remaining_words -= n_words;
}
- if (use_irq)
+ if (use_irq) {
xspi->write_fn(0, xspi->regs + XIPIF_V123B_DGIER_OFFSET);
+ xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+ }
return t->len;
}
diff --git a/drivers/spi/spi-xlp.c b/drivers/spi/spi-xlp.c
new file mode 100644
index 000000000000..8f04feca6ee3
--- /dev/null
+++ b/drivers/spi/spi-xlp.c
@@ -0,0 +1,456 @@
+/*
+ * Copyright (C) 2003-2015 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 (GPL v2)
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/of.h>
+#include <linux/interrupt.h>
+
+/* SPI Configuration Register */
+#define XLP_SPI_CONFIG 0x00
+#define XLP_SPI_CPHA BIT(0)
+#define XLP_SPI_CPOL BIT(1)
+#define XLP_SPI_CS_POL BIT(2)
+#define XLP_SPI_TXMISO_EN BIT(3)
+#define XLP_SPI_TXMOSI_EN BIT(4)
+#define XLP_SPI_RXMISO_EN BIT(5)
+#define XLP_SPI_CS_LSBFE BIT(10)
+#define XLP_SPI_RXCAP_EN BIT(11)
+
+/* SPI Frequency Divider Register */
+#define XLP_SPI_FDIV 0x04
+
+/* SPI Command Register */
+#define XLP_SPI_CMD 0x08
+#define XLP_SPI_CMD_IDLE_MASK 0x0
+#define XLP_SPI_CMD_TX_MASK 0x1
+#define XLP_SPI_CMD_RX_MASK 0x2
+#define XLP_SPI_CMD_TXRX_MASK 0x3
+#define XLP_SPI_CMD_CONT BIT(4)
+#define XLP_SPI_XFR_BITCNT_SHIFT 16
+
+/* SPI Status Register */
+#define XLP_SPI_STATUS 0x0c
+#define XLP_SPI_XFR_PENDING BIT(0)
+#define XLP_SPI_XFR_DONE BIT(1)
+#define XLP_SPI_TX_INT BIT(2)
+#define XLP_SPI_RX_INT BIT(3)
+#define XLP_SPI_TX_UF BIT(4)
+#define XLP_SPI_RX_OF BIT(5)
+#define XLP_SPI_STAT_MASK 0x3f
+
+/* SPI Interrupt Enable Register */
+#define XLP_SPI_INTR_EN 0x10
+#define XLP_SPI_INTR_DONE BIT(0)
+#define XLP_SPI_INTR_TXTH BIT(1)
+#define XLP_SPI_INTR_RXTH BIT(2)
+#define XLP_SPI_INTR_TXUF BIT(3)
+#define XLP_SPI_INTR_RXOF BIT(4)
+
+/* SPI FIFO Threshold Register */
+#define XLP_SPI_FIFO_THRESH 0x14
+
+/* SPI FIFO Word Count Register */
+#define XLP_SPI_FIFO_WCNT 0x18
+#define XLP_SPI_RXFIFO_WCNT_MASK 0xf
+#define XLP_SPI_TXFIFO_WCNT_MASK 0xf0
+#define XLP_SPI_TXFIFO_WCNT_SHIFT 4
+
+/* SPI Transmit Data FIFO Register */
+#define XLP_SPI_TXDATA_FIFO 0x1c
+
+/* SPI Receive Data FIFO Register */
+#define XLP_SPI_RXDATA_FIFO 0x20
+
+/* SPI System Control Register */
+#define XLP_SPI_SYSCTRL 0x100
+#define XLP_SPI_SYS_RESET BIT(0)
+#define XLP_SPI_SYS_CLKDIS BIT(1)
+#define XLP_SPI_SYS_PMEN BIT(8)
+
+#define SPI_CS_OFFSET 0x40
+#define XLP_SPI_TXRXTH 0x80
+#define XLP_SPI_FIFO_SIZE 8
+#define XLP_SPI_MAX_CS 4
+#define XLP_SPI_DEFAULT_FREQ 133333333
+#define XLP_SPI_FDIV_MIN 4
+#define XLP_SPI_FDIV_MAX 65535
+/*
+ * SPI can transfer only 28 bytes properly at a time. So split the
+ * transfer into 28 bytes size.
+ */
+#define XLP_SPI_XFER_SIZE 28
+
+struct xlp_spi_priv {
+ struct device dev; /* device structure */
+ void __iomem *base; /* spi registers base address */
+ const u8 *tx_buf; /* tx data buffer */
+ u8 *rx_buf; /* rx data buffer */
+ int tx_len; /* tx xfer length */
+ int rx_len; /* rx xfer length */
+ int txerrors; /* TXFIFO underflow count */
+ int rxerrors; /* RXFIFO overflow count */
+ int cs; /* slave device chip select */
+ u32 spi_clk; /* spi clock frequency */
+ bool cmd_cont; /* cs active */
+ struct completion done; /* completion notification */
+};
+
+static inline u32 xlp_spi_reg_read(struct xlp_spi_priv *priv,
+ int cs, int regoff)
+{
+ return readl(priv->base + regoff + cs * SPI_CS_OFFSET);
+}
+
+static inline void xlp_spi_reg_write(struct xlp_spi_priv *priv, int cs,
+ int regoff, u32 val)
+{
+ writel(val, priv->base + regoff + cs * SPI_CS_OFFSET);
+}
+
+static inline void xlp_spi_sysctl_write(struct xlp_spi_priv *priv,
+ int regoff, u32 val)
+{
+ writel(val, priv->base + regoff);
+}
+
+/*
+ * Setup global SPI_SYSCTRL register for all SPI channels.
+ */
+static void xlp_spi_sysctl_setup(struct xlp_spi_priv *xspi)
+{
+ int cs;
+
+ for (cs = 0; cs < XLP_SPI_MAX_CS; cs++)
+ xlp_spi_sysctl_write(xspi, XLP_SPI_SYSCTRL,
+ XLP_SPI_SYS_RESET << cs);
+ xlp_spi_sysctl_write(xspi, XLP_SPI_SYSCTRL, XLP_SPI_SYS_PMEN);
+}
+
+static int xlp_spi_setup(struct spi_device *spi)
+{
+ struct xlp_spi_priv *xspi;
+ u32 fdiv, cfg;
+ int cs;
+
+ xspi = spi_master_get_devdata(spi->master);
+ cs = spi->chip_select;
+ /*
+ * The value of fdiv must be between 4 and 65535.
+ */
+ fdiv = DIV_ROUND_UP(xspi->spi_clk, spi->max_speed_hz);
+ if (fdiv > XLP_SPI_FDIV_MAX)
+ fdiv = XLP_SPI_FDIV_MAX;
+ else if (fdiv < XLP_SPI_FDIV_MIN)
+ fdiv = XLP_SPI_FDIV_MIN;
+
+ xlp_spi_reg_write(xspi, cs, XLP_SPI_FDIV, fdiv);
+ xlp_spi_reg_write(xspi, cs, XLP_SPI_FIFO_THRESH, XLP_SPI_TXRXTH);
+ cfg = xlp_spi_reg_read(xspi, cs, XLP_SPI_CONFIG);
+ if (spi->mode & SPI_CPHA)
+ cfg |= XLP_SPI_CPHA;
+ else
+ cfg &= ~XLP_SPI_CPHA;
+ if (spi->mode & SPI_CPOL)
+ cfg |= XLP_SPI_CPOL;
+ else
+ cfg &= ~XLP_SPI_CPOL;
+ if (!(spi->mode & SPI_CS_HIGH))
+ cfg |= XLP_SPI_CS_POL;
+ else
+ cfg &= ~XLP_SPI_CS_POL;
+ if (spi->mode & SPI_LSB_FIRST)
+ cfg |= XLP_SPI_CS_LSBFE;
+ else
+ cfg &= ~XLP_SPI_CS_LSBFE;
+
+ cfg |= XLP_SPI_TXMOSI_EN | XLP_SPI_RXMISO_EN;
+ if (fdiv == 4)
+ cfg |= XLP_SPI_RXCAP_EN;
+ xlp_spi_reg_write(xspi, cs, XLP_SPI_CONFIG, cfg);
+
+ return 0;
+}
+
+static void xlp_spi_read_rxfifo(struct xlp_spi_priv *xspi)
+{
+ u32 rx_data, rxfifo_cnt;
+ int i, j, nbytes;
+
+ rxfifo_cnt = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_FIFO_WCNT);
+ rxfifo_cnt &= XLP_SPI_RXFIFO_WCNT_MASK;
+ while (rxfifo_cnt) {
+ rx_data = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_RXDATA_FIFO);
+ j = 0;
+ nbytes = min(xspi->rx_len, 4);
+ for (i = nbytes - 1; i >= 0; i--, j++)
+ xspi->rx_buf[i] = (rx_data >> (j * 8)) & 0xff;
+
+ xspi->rx_len -= nbytes;
+ xspi->rx_buf += nbytes;
+ rxfifo_cnt--;
+ }
+}
+
+static void xlp_spi_fill_txfifo(struct xlp_spi_priv *xspi)
+{
+ u32 tx_data, txfifo_cnt;
+ int i, j, nbytes;
+
+ txfifo_cnt = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_FIFO_WCNT);
+ txfifo_cnt &= XLP_SPI_TXFIFO_WCNT_MASK;
+ txfifo_cnt >>= XLP_SPI_TXFIFO_WCNT_SHIFT;
+ while (xspi->tx_len && (txfifo_cnt < XLP_SPI_FIFO_SIZE)) {
+ j = 0;
+ tx_data = 0;
+ nbytes = min(xspi->tx_len, 4);
+ for (i = nbytes - 1; i >= 0; i--, j++)
+ tx_data |= xspi->tx_buf[i] << (j * 8);
+
+ xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_TXDATA_FIFO, tx_data);
+ xspi->tx_len -= nbytes;
+ xspi->tx_buf += nbytes;
+ txfifo_cnt++;
+ }
+}
+
+static irqreturn_t xlp_spi_interrupt(int irq, void *dev_id)
+{
+ struct xlp_spi_priv *xspi = dev_id;
+ u32 stat;
+
+ stat = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_STATUS) &
+ XLP_SPI_STAT_MASK;
+ if (!stat)
+ return IRQ_NONE;
+
+ if (stat & XLP_SPI_TX_INT) {
+ if (xspi->tx_len)
+ xlp_spi_fill_txfifo(xspi);
+ if (stat & XLP_SPI_TX_UF)
+ xspi->txerrors++;
+ }
+
+ if (stat & XLP_SPI_RX_INT) {
+ if (xspi->rx_len)
+ xlp_spi_read_rxfifo(xspi);
+ if (stat & XLP_SPI_RX_OF)
+ xspi->rxerrors++;
+ }
+
+ /* write status back to clear interrupts */
+ xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_STATUS, stat);
+ if (stat & XLP_SPI_XFR_DONE)
+ complete(&xspi->done);
+
+ return IRQ_HANDLED;
+}
+
+static void xlp_spi_send_cmd(struct xlp_spi_priv *xspi, int xfer_len,
+ int cmd_cont)
+{
+ u32 cmd = 0;
+
+ if (xspi->tx_buf)
+ cmd |= XLP_SPI_CMD_TX_MASK;
+ if (xspi->rx_buf)
+ cmd |= XLP_SPI_CMD_RX_MASK;
+ if (cmd_cont)
+ cmd |= XLP_SPI_CMD_CONT;
+ cmd |= ((xfer_len * 8 - 1) << XLP_SPI_XFR_BITCNT_SHIFT);
+ xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_CMD, cmd);
+}
+
+static int xlp_spi_xfer_block(struct xlp_spi_priv *xs,
+ const unsigned char *tx_buf,
+ unsigned char *rx_buf, int xfer_len, int cmd_cont)
+{
+ int timeout;
+ u32 intr_mask = 0;
+
+ xs->tx_buf = tx_buf;
+ xs->rx_buf = rx_buf;
+ xs->tx_len = (xs->tx_buf == NULL) ? 0 : xfer_len;
+ xs->rx_len = (xs->rx_buf == NULL) ? 0 : xfer_len;
+ xs->txerrors = xs->rxerrors = 0;
+
+ /* fill TXDATA_FIFO, then send the CMD */
+ if (xs->tx_len)
+ xlp_spi_fill_txfifo(xs);
+
+ xlp_spi_send_cmd(xs, xfer_len, cmd_cont);
+
+ /*
+ * We are getting some spurious tx interrupts, so avoid enabling
+ * tx interrupts when only rx is in process.
+ * Enable all the interrupts in tx case.
+ */
+ if (xs->tx_len)
+ intr_mask |= XLP_SPI_INTR_TXTH | XLP_SPI_INTR_TXUF |
+ XLP_SPI_INTR_RXTH | XLP_SPI_INTR_RXOF;
+ else
+ intr_mask |= XLP_SPI_INTR_RXTH | XLP_SPI_INTR_RXOF;
+
+ intr_mask |= XLP_SPI_INTR_DONE;
+ xlp_spi_reg_write(xs, xs->cs, XLP_SPI_INTR_EN, intr_mask);
+
+ timeout = wait_for_completion_timeout(&xs->done,
+ msecs_to_jiffies(1000));
+ /* Disable interrupts */
+ xlp_spi_reg_write(xs, xs->cs, XLP_SPI_INTR_EN, 0x0);
+ if (!timeout) {
+ dev_err(&xs->dev, "xfer timedout!\n");
+ goto out;
+ }
+ if (xs->txerrors || xs->rxerrors)
+ dev_err(&xs->dev, "Over/Underflow rx %d tx %d xfer %d!\n",
+ xs->rxerrors, xs->txerrors, xfer_len);
+
+ return xfer_len;
+out:
+ return -ETIMEDOUT;
+}
+
+static int xlp_spi_txrx_bufs(struct xlp_spi_priv *xs, struct spi_transfer *t)
+{
+ int bytesleft, sz;
+ unsigned char *rx_buf;
+ const unsigned char *tx_buf;
+
+ tx_buf = t->tx_buf;
+ rx_buf = t->rx_buf;
+ bytesleft = t->len;
+ while (bytesleft) {
+ if (bytesleft > XLP_SPI_XFER_SIZE)
+ sz = xlp_spi_xfer_block(xs, tx_buf, rx_buf,
+ XLP_SPI_XFER_SIZE, 1);
+ else
+ sz = xlp_spi_xfer_block(xs, tx_buf, rx_buf,
+ bytesleft, xs->cmd_cont);
+ if (sz < 0)
+ return sz;
+ bytesleft -= sz;
+ if (tx_buf)
+ tx_buf += sz;
+ if (rx_buf)
+ rx_buf += sz;
+ }
+ return bytesleft;
+}
+
+static int xlp_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct xlp_spi_priv *xspi = spi_master_get_devdata(master);
+ int ret = 0;
+
+ xspi->cs = spi->chip_select;
+ xspi->dev = spi->dev;
+
+ if (spi_transfer_is_last(master, t))
+ xspi->cmd_cont = 0;
+ else
+ xspi->cmd_cont = 1;
+
+ if (xlp_spi_txrx_bufs(xspi, t))
+ ret = -EIO;
+
+ spi_finalize_current_transfer(master);
+ return ret;
+}
+
+static int xlp_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct xlp_spi_priv *xspi;
+ struct resource *res;
+ struct clk *clk;
+ int irq, err;
+
+ xspi = devm_kzalloc(&pdev->dev, sizeof(*xspi), GFP_KERNEL);
+ if (!xspi)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xspi->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xspi->base))
+ return PTR_ERR(xspi->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no IRQ resource found\n");
+ return -EINVAL;
+ }
+ err = devm_request_irq(&pdev->dev, irq, xlp_spi_interrupt, 0,
+ pdev->name, xspi);
+ if (err) {
+ dev_err(&pdev->dev, "unable to request irq %d\n", irq);
+ return err;
+ }
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "could not get spi clock\n");
+ return -ENODEV;
+ }
+ xspi->spi_clk = clk_get_rate(clk);
+
+ master = spi_alloc_master(&pdev->dev, 0);
+ if (!master) {
+ dev_err(&pdev->dev, "could not alloc master\n");
+ return -ENOMEM;
+ }
+
+ master->bus_num = 0;
+ master->num_chipselect = XLP_SPI_MAX_CS;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->setup = xlp_spi_setup;
+ master->transfer_one = xlp_spi_transfer_one;
+ master->dev.of_node = pdev->dev.of_node;
+
+ init_completion(&xspi->done);
+ spi_master_set_devdata(master, xspi);
+ xlp_spi_sysctl_setup(xspi);
+
+ /* register spi controller */
+ err = devm_spi_register_master(&pdev->dev, master);
+ if (err) {
+ dev_err(&pdev->dev, "spi register master failed!\n");
+ spi_master_put(master);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id xlp_spi_dt_id[] = {
+ { .compatible = "netlogic,xlp832-spi" },
+ { },
+};
+
+static struct platform_driver xlp_spi_driver = {
+ .probe = xlp_spi_probe,
+ .driver = {
+ .name = "xlp-spi",
+ .of_match_table = xlp_spi_dt_id,
+ },
+};
+module_platform_driver(xlp_spi_driver);
+
+MODULE_AUTHOR("Kamlakant Patel <kamlakant.patel@broadcom.com>");
+MODULE_DESCRIPTION("Netlogic XLP SPI controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 87b20a511a6b..f23f36ebaf3d 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -214,6 +214,7 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
case GQSPI_SELECT_FLASH_CS_BOTH:
instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER |
GQSPI_GENFIFO_CS_UPPER;
+ break;
case GQSPI_SELECT_FLASH_CS_UPPER:
instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER;
break;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 9ce2f156d382..a5f53de813d3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -67,11 +67,141 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
}
static DEVICE_ATTR_RO(modalias);
+#define SPI_STATISTICS_ATTRS(field, file) \
+static ssize_t spi_master_##field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct spi_master *master = container_of(dev, \
+ struct spi_master, dev); \
+ return spi_statistics_##field##_show(&master->statistics, buf); \
+} \
+static struct device_attribute dev_attr_spi_master_##field = { \
+ .attr = { .name = file, .mode = S_IRUGO }, \
+ .show = spi_master_##field##_show, \
+}; \
+static ssize_t spi_device_##field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct spi_device *spi = container_of(dev, \
+ struct spi_device, dev); \
+ return spi_statistics_##field##_show(&spi->statistics, buf); \
+} \
+static struct device_attribute dev_attr_spi_device_##field = { \
+ .attr = { .name = file, .mode = S_IRUGO }, \
+ .show = spi_device_##field##_show, \
+}
+
+#define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
+static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
+ char *buf) \
+{ \
+ unsigned long flags; \
+ ssize_t len; \
+ spin_lock_irqsave(&stat->lock, flags); \
+ len = sprintf(buf, format_string, stat->field); \
+ spin_unlock_irqrestore(&stat->lock, flags); \
+ return len; \
+} \
+SPI_STATISTICS_ATTRS(name, file)
+
+#define SPI_STATISTICS_SHOW(field, format_string) \
+ SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
+ field, format_string)
+
+SPI_STATISTICS_SHOW(messages, "%lu");
+SPI_STATISTICS_SHOW(transfers, "%lu");
+SPI_STATISTICS_SHOW(errors, "%lu");
+SPI_STATISTICS_SHOW(timedout, "%lu");
+
+SPI_STATISTICS_SHOW(spi_sync, "%lu");
+SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
+SPI_STATISTICS_SHOW(spi_async, "%lu");
+
+SPI_STATISTICS_SHOW(bytes, "%llu");
+SPI_STATISTICS_SHOW(bytes_rx, "%llu");
+SPI_STATISTICS_SHOW(bytes_tx, "%llu");
+
static struct attribute *spi_dev_attrs[] = {
&dev_attr_modalias.attr,
NULL,
};
-ATTRIBUTE_GROUPS(spi_dev);
+
+static const struct attribute_group spi_dev_group = {
+ .attrs = spi_dev_attrs,
+};
+
+static struct attribute *spi_device_statistics_attrs[] = {
+ &dev_attr_spi_device_messages.attr,
+ &dev_attr_spi_device_transfers.attr,
+ &dev_attr_spi_device_errors.attr,
+ &dev_attr_spi_device_timedout.attr,
+ &dev_attr_spi_device_spi_sync.attr,
+ &dev_attr_spi_device_spi_sync_immediate.attr,
+ &dev_attr_spi_device_spi_async.attr,
+ &dev_attr_spi_device_bytes.attr,
+ &dev_attr_spi_device_bytes_rx.attr,
+ &dev_attr_spi_device_bytes_tx.attr,
+ NULL,
+};
+
+static const struct attribute_group spi_device_statistics_group = {
+ .name = "statistics",
+ .attrs = spi_device_statistics_attrs,
+};
+
+static const struct attribute_group *spi_dev_groups[] = {
+ &spi_dev_group,
+ &spi_device_statistics_group,
+ NULL,
+};
+
+static struct attribute *spi_master_statistics_attrs[] = {
+ &dev_attr_spi_master_messages.attr,
+ &dev_attr_spi_master_transfers.attr,
+ &dev_attr_spi_master_errors.attr,
+ &dev_attr_spi_master_timedout.attr,
+ &dev_attr_spi_master_spi_sync.attr,
+ &dev_attr_spi_master_spi_sync_immediate.attr,
+ &dev_attr_spi_master_spi_async.attr,
+ &dev_attr_spi_master_bytes.attr,
+ &dev_attr_spi_master_bytes_rx.attr,
+ &dev_attr_spi_master_bytes_tx.attr,
+ NULL,
+};
+
+static const struct attribute_group spi_master_statistics_group = {
+ .name = "statistics",
+ .attrs = spi_master_statistics_attrs,
+};
+
+static const struct attribute_group *spi_master_groups[] = {
+ &spi_master_statistics_group,
+ NULL,
+};
+
+void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
+ struct spi_transfer *xfer,
+ struct spi_master *master)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&stats->lock, flags);
+
+ stats->transfers++;
+
+ stats->bytes += xfer->len;
+ if ((xfer->tx_buf) &&
+ (xfer->tx_buf != master->dummy_tx))
+ stats->bytes_tx += xfer->len;
+ if ((xfer->rx_buf) &&
+ (xfer->rx_buf != master->dummy_rx))
+ stats->bytes_rx += xfer->len;
+
+ spin_unlock_irqrestore(&stats->lock, flags);
+}
+EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
* and the sysfs version makes coldplug work too.
@@ -249,6 +379,9 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
spi->cs_gpio = -ENOENT;
+
+ spin_lock_init(&spi->statistics.lock);
+
device_initialize(&spi->dev);
return spi;
}
@@ -476,21 +609,30 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
enum dma_data_direction dir)
{
const bool vmalloced_buf = is_vmalloc_addr(buf);
- const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len;
- const int sgs = DIV_ROUND_UP(len, desc_len);
+ int desc_len;
+ int sgs;
struct page *vm_page;
void *sg_buf;
size_t min;
int i, ret;
+ if (vmalloced_buf) {
+ desc_len = PAGE_SIZE;
+ sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
+ } else {
+ desc_len = master->max_dma_len;
+ sgs = DIV_ROUND_UP(len, desc_len);
+ }
+
ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
if (ret != 0)
return ret;
for (i = 0; i < sgs; i++) {
- min = min_t(size_t, len, desc_len);
if (vmalloced_buf) {
+ min = min_t(size_t,
+ len, desc_len - offset_in_page(buf));
vm_page = vmalloc_to_page(buf);
if (!vm_page) {
sg_free_table(sgt);
@@ -499,6 +641,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
sg_set_page(&sgt->sgl[i], vm_page,
min, offset_in_page(buf));
} else {
+ min = min_t(size_t, len, desc_len);
sg_buf = buf;
sg_set_buf(&sgt->sgl[i], sg_buf, min);
}
@@ -539,8 +682,15 @@ static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
if (!master->can_dma)
return 0;
- tx_dev = master->dma_tx->device->dev;
- rx_dev = master->dma_rx->device->dev;
+ if (master->dma_tx)
+ tx_dev = master->dma_tx->device->dev;
+ else
+ tx_dev = &master->dev;
+
+ if (master->dma_rx)
+ rx_dev = master->dma_rx->device->dev;
+ else
+ rx_dev = &master->dev;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (!master->can_dma(master, msg->spi, xfer))
@@ -579,8 +729,15 @@ static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
if (!master->cur_msg_mapped || !master->can_dma)
return 0;
- tx_dev = master->dma_tx->device->dev;
- rx_dev = master->dma_rx->device->dev;
+ if (master->dma_tx)
+ tx_dev = master->dma_tx->device->dev;
+ else
+ tx_dev = &master->dev;
+
+ if (master->dma_rx)
+ rx_dev = master->dma_rx->device->dev;
+ else
+ rx_dev = &master->dev;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (!master->can_dma(master, msg->spi, xfer))
@@ -689,17 +846,29 @@ static int spi_transfer_one_message(struct spi_master *master,
bool keep_cs = false;
int ret = 0;
unsigned long ms = 1;
+ struct spi_statistics *statm = &master->statistics;
+ struct spi_statistics *stats = &msg->spi->statistics;
spi_set_cs(msg->spi, true);
+ SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
+ SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
+
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
trace_spi_transfer_start(msg, xfer);
+ spi_statistics_add_transfer_stats(statm, xfer, master);
+ spi_statistics_add_transfer_stats(stats, xfer, master);
+
if (xfer->tx_buf || xfer->rx_buf) {
reinit_completion(&master->xfer_completion);
ret = master->transfer_one(master, msg->spi, xfer);
if (ret < 0) {
+ SPI_STATISTICS_INCREMENT_FIELD(statm,
+ errors);
+ SPI_STATISTICS_INCREMENT_FIELD(stats,
+ errors);
dev_err(&msg->spi->dev,
"SPI transfer failed: %d\n", ret);
goto out;
@@ -715,6 +884,10 @@ static int spi_transfer_one_message(struct spi_master *master,
}
if (ms == 0) {
+ SPI_STATISTICS_INCREMENT_FIELD(statm,
+ timedout);
+ SPI_STATISTICS_INCREMENT_FIELD(stats,
+ timedout);
dev_err(&msg->spi->dev,
"SPI transfer timed out\n");
msg->status = -ETIMEDOUT;
@@ -1416,10 +1589,10 @@ static struct class spi_master_class = {
.name = "spi_master",
.owner = THIS_MODULE,
.dev_release = spi_master_release,
+ .dev_groups = spi_master_groups,
};
-
/**
* spi_alloc_master - allocate SPI master controller
* @dev: the controller, possibly using the platform_bus
@@ -1584,6 +1757,8 @@ int spi_register_master(struct spi_master *master)
goto done;
}
}
+ /* add statistics */
+ spin_lock_init(&master->statistics.lock);
mutex_lock(&board_lock);
list_add_tail(&master->list, &spi_master_list);
@@ -1739,6 +1914,20 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
* other core methods are currently defined as inline functions.
*/
+static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word)
+{
+ if (master->bits_per_word_mask) {
+ /* Only 32 bits fit in the mask */
+ if (bits_per_word > 32)
+ return -EINVAL;
+ if (!(master->bits_per_word_mask &
+ SPI_BPW_MASK(bits_per_word)))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/**
* spi_setup - setup SPI mode and clock rate
* @spi: the device whose settings are being modified
@@ -1797,6 +1986,9 @@ int spi_setup(struct spi_device *spi)
if (!spi->bits_per_word)
spi->bits_per_word = 8;
+ if (__spi_validate_bits_per_word(spi->master, spi->bits_per_word))
+ return -EINVAL;
+
if (!spi->max_speed_hz)
spi->max_speed_hz = spi->master->max_speed_hz;
@@ -1859,19 +2051,15 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
if (!xfer->speed_hz)
xfer->speed_hz = spi->max_speed_hz;
+ if (!xfer->speed_hz)
+ xfer->speed_hz = master->max_speed_hz;
if (master->max_speed_hz &&
xfer->speed_hz > master->max_speed_hz)
xfer->speed_hz = master->max_speed_hz;
- if (master->bits_per_word_mask) {
- /* Only 32 bits fit in the mask */
- if (xfer->bits_per_word > 32)
- return -EINVAL;
- if (!(master->bits_per_word_mask &
- BIT(xfer->bits_per_word - 1)))
- return -EINVAL;
- }
+ if (__spi_validate_bits_per_word(master, xfer->bits_per_word))
+ return -EINVAL;
/*
* SPI transfer length should be multiple of SPI word size
@@ -1938,6 +2126,9 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
message->spi = spi;
+ SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async);
+ SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
+
trace_spi_message_submit(message);
return master->transfer(spi, message);
@@ -2074,6 +2265,9 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
message->context = &done;
message->spi = spi;
+ SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
+ SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
+
if (!bus_locked)
mutex_lock(&master->bus_lock_mutex);
@@ -2101,8 +2295,13 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
/* Push out the messages in the calling context if we
* can.
*/
- if (master->transfer == spi_queued_transfer)
+ if (master->transfer == spi_queued_transfer) {
+ SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
+ spi_sync_immediate);
+ SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
+ spi_sync_immediate);
__spi_pump_messages(master, false);
+ }
wait_for_completion(&done);
status = message->status;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index dd616ff0ffc5..fba92a526531 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -602,11 +602,11 @@ static int spidev_open(struct inode *inode, struct file *filp)
if (!spidev->tx_buffer) {
spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
if (!spidev->tx_buffer) {
- dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
- status = -ENOMEM;
+ dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ status = -ENOMEM;
goto err_find_dev;
- }
}
+ }
if (!spidev->rx_buffer) {
spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
@@ -693,6 +693,7 @@ static struct class *spidev_class;
#ifdef CONFIG_OF
static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "rohm,dh2228fv" },
+ { .compatible = "lineartechnology,ltc2488" },
{},
};
MODULE_DEVICE_TABLE(of, spidev_dt_ids);
@@ -708,7 +709,7 @@ static int spidev_probe(struct spi_device *spi)
/*
* spidev should never be referenced in DT without a specific
- * compatbile string, it is a Linux implementation thing
+ * compatible string, it is a Linux implementation thing
* rather than a description of the hardware.
*/
if (spi->dev.of_node && !of_match_device(spidev_dt_ids, &spi->dev)) {
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig
index 982580af1d16..0d3b70b3bda8 100644
--- a/drivers/spmi/Kconfig
+++ b/drivers/spmi/Kconfig
@@ -12,7 +12,7 @@ if SPMI
config SPMI_MSM_PMIC_ARB
tristate "Qualcomm MSM SPMI Controller (PMIC Arbiter)"
- depends on IRQ_DOMAIN
+ select IRQ_DOMAIN
depends on ARCH_QCOM || COMPILE_TEST
depends on HAS_IOMEM
default ARCH_QCOM
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index d7119db49cfe..bdfb3c84c3cb 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -453,8 +453,8 @@ static void periph_interrupt(struct spmi_pmic_arb_dev *pa, u8 apid)
static void pmic_arb_chained_irq(unsigned int irq, struct irq_desc *desc)
{
- struct spmi_pmic_arb_dev *pa = irq_get_handler_data(irq);
- struct irq_chip *chip = irq_get_chip(irq);
+ struct spmi_pmic_arb_dev *pa = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
void __iomem *intr = pa->intr;
int first = pa->min_apid >> 5;
int last = pa->max_apid >> 5;
@@ -575,6 +575,22 @@ static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
return 0;
}
+static int qpnpint_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool *state)
+{
+ u8 irq = d->hwirq >> 8;
+ u8 status = 0;
+
+ if (which != IRQCHIP_STATE_LINE_LEVEL)
+ return -EINVAL;
+
+ qpnpint_spmi_read(d, QPNPINT_REG_RT_STS, &status, 1);
+ *state = !!(status & BIT(irq));
+
+ return 0;
+}
+
static struct irq_chip pmic_arb_irqchip = {
.name = "pmic_arb",
.irq_enable = qpnpint_irq_enable,
@@ -582,6 +598,7 @@ static struct irq_chip pmic_arb_irqchip = {
.irq_mask = qpnpint_irq_mask,
.irq_unmask = qpnpint_irq_unmask,
.irq_set_type = qpnpint_irq_set_type,
+ .irq_get_irqchip_state = qpnpint_get_irqchip_state,
.flags = IRQCHIP_MASK_ON_SUSPEND
| IRQCHIP_SKIP_SET_WAKE,
};
@@ -928,8 +945,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
goto err_put_ctrl;
}
- irq_set_handler_data(pa->irq, pa);
- irq_set_chained_handler(pa->irq, pmic_arb_chained_irq);
+ irq_set_chained_handler_and_data(pa->irq, pmic_arb_chained_irq, pa);
err = spmi_controller_add(ctrl);
if (err)
@@ -938,8 +954,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
return 0;
err_domain_remove:
- irq_set_chained_handler(pa->irq, NULL);
- irq_set_handler_data(pa->irq, NULL);
+ irq_set_chained_handler_and_data(pa->irq, NULL, NULL);
irq_domain_remove(pa->domain);
err_put_ctrl:
spmi_controller_put(ctrl);
@@ -951,8 +966,7 @@ static int spmi_pmic_arb_remove(struct platform_device *pdev)
struct spmi_controller *ctrl = platform_get_drvdata(pdev);
struct spmi_pmic_arb_dev *pa = spmi_controller_get_drvdata(ctrl);
spmi_controller_remove(ctrl);
- irq_set_chained_handler(pa->irq, NULL);
- irq_set_handler_data(pa->irq, NULL);
+ irq_set_chained_handler_and_data(pa->irq, NULL, NULL);
irq_domain_remove(pa->domain);
spmi_controller_put(ctrl);
return 0;
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 94938436aef9..11467e17bdd8 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -22,6 +22,8 @@
#include <linux/pm_runtime.h>
#include <dt-bindings/spmi/spmi.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/spmi.h>
static DEFINE_IDA(ctrl_ida);
@@ -96,28 +98,42 @@ EXPORT_SYMBOL_GPL(spmi_device_remove);
static inline int
spmi_cmd(struct spmi_controller *ctrl, u8 opcode, u8 sid)
{
+ int ret;
+
if (!ctrl || !ctrl->cmd || ctrl->dev.type != &spmi_ctrl_type)
return -EINVAL;
- return ctrl->cmd(ctrl, opcode, sid);
+ ret = ctrl->cmd(ctrl, opcode, sid);
+ trace_spmi_cmd(opcode, sid, ret);
+ return ret;
}
static inline int spmi_read_cmd(struct spmi_controller *ctrl, u8 opcode,
u8 sid, u16 addr, u8 *buf, size_t len)
{
+ int ret;
+
if (!ctrl || !ctrl->read_cmd || ctrl->dev.type != &spmi_ctrl_type)
return -EINVAL;
- return ctrl->read_cmd(ctrl, opcode, sid, addr, buf, len);
+ trace_spmi_read_begin(opcode, sid, addr);
+ ret = ctrl->read_cmd(ctrl, opcode, sid, addr, buf, len);
+ trace_spmi_read_end(opcode, sid, addr, ret, len, buf);
+ return ret;
}
static inline int spmi_write_cmd(struct spmi_controller *ctrl, u8 opcode,
u8 sid, u16 addr, const u8 *buf, size_t len)
{
+ int ret;
+
if (!ctrl || !ctrl->write_cmd || ctrl->dev.type != &spmi_ctrl_type)
return -EINVAL;
- return ctrl->write_cmd(ctrl, opcode, sid, addr, buf, len);
+ trace_spmi_write_begin(opcode, sid, addr, len, buf);
+ ret = ctrl->write_cmd(ctrl, opcode, sid, addr, buf, len);
+ trace_spmi_write_end(opcode, sid, addr, ret);
+ return ret;
}
/**
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 7f6cae5beb90..39d950584c9f 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -56,8 +56,6 @@ source "drivers/staging/vt6656/Kconfig"
source "drivers/staging/iio/Kconfig"
-source "drivers/staging/sm7xxfb/Kconfig"
-
source "drivers/staging/sm750fb/Kconfig"
source "drivers/staging/xgifb/Kconfig"
@@ -74,12 +72,12 @@ source "drivers/staging/nvec/Kconfig"
source "drivers/staging/media/Kconfig"
+source "drivers/staging/rdma/Kconfig"
+
source "drivers/staging/android/Kconfig"
source "drivers/staging/board/Kconfig"
-source "drivers/staging/ozwpan/Kconfig"
-
source "drivers/staging/gdm72xx/Kconfig"
source "drivers/staging/gdm724x/Kconfig"
@@ -112,4 +110,6 @@ source "drivers/staging/fsl-mc/Kconfig"
source "drivers/staging/wilc1000/Kconfig"
+source "drivers/staging/most/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 347f6477aa3e..e4f33d91872b 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -22,7 +22,6 @@ obj-$(CONFIG_VT6655) += vt6655/
obj-$(CONFIG_VT6656) += vt6656/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_IIO) += iio/
-obj-$(CONFIG_FB_SM7XX) += sm7xxfb/
obj-$(CONFIG_FB_SM750) += sm750fb/
obj-$(CONFIG_FB_XGI) += xgifb/
obj-$(CONFIG_USB_EMXX) += emxx_udc/
@@ -30,9 +29,9 @@ obj-$(CONFIG_FT1000) += ft1000/
obj-$(CONFIG_SPEAKUP) += speakup/
obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/
obj-$(CONFIG_MFD_NVEC) += nvec/
+obj-$(CONFIG_STAGING_RDMA) += rdma/
obj-$(CONFIG_ANDROID) += android/
obj-$(CONFIG_STAGING_BOARD) += board/
-obj-$(CONFIG_USB_WPAN_HCD) += ozwpan/
obj-$(CONFIG_WIMAX_GDM72XX) += gdm72xx/
obj-$(CONFIG_LTE_GDM724X) += gdm724x/
obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
@@ -48,3 +47,4 @@ obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
obj-$(CONFIG_FB_TFT) += fbtft/
obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
obj-$(CONFIG_WILC1000) += wilc1000/
+obj-$(CONFIG_MOST) += most/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 24d657b3ab99..68307121c9c1 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -20,7 +20,8 @@ config ANDROID_TIMED_OUTPUT
config ANDROID_TIMED_GPIO
tristate "Android timed gpio driver"
- depends on GPIOLIB && ANDROID_TIMED_OUTPUT
+ depends on GPIOLIB || COMPILE_TEST
+ depends on ANDROID_TIMED_OUTPUT
default n
---help---
Unlike generic gpio is to allow programs to access and manipulate gpio
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index 06954cdf3dba..20288fc53946 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -2,16 +2,8 @@ TODO:
- checkpatch.pl cleanups
- sparse fixes
- rename files to be not so "generic"
- - make sure things build as modules properly
- add proper arch dependencies as needed
- audit userspace interfaces to make sure they are sane
- - kuid_t should never be exposed to user space as it is
- kernel internal type. Data structure for this kuid_t is:
- typedef struct {
- uid_t val;
- } kuid_t;
- - This bug is introduced by Xiong Zhou in the patch bd471258f2e09
- - ("staging: android: logger: use kuid_t instead of uid_t")
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
-Brian Swetland <swetland@google.com>
+Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com>
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index c5c037ccf32c..60200a3da821 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -388,7 +388,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
/* ... and allocate the backing shmem file */
vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
- if (unlikely(IS_ERR(vmfile))) {
+ if (IS_ERR(vmfile)) {
ret = PTR_ERR(vmfile);
goto out;
}
@@ -660,7 +660,7 @@ restart:
if (page_range_subsumed_by_range(range, pgstart, pgend))
return 0;
if (page_range_in_range(range, pgstart, pgend)) {
- pgstart = min_t(size_t, range->pgstart, pgstart),
+ pgstart = min_t(size_t, range->pgstart, pgstart);
pgend = max_t(size_t, range->pgend, pgend);
purged |= range->purged;
range_del(range);
@@ -863,14 +863,9 @@ static int __init ashmem_init(void)
static void __exit ashmem_exit(void)
{
- int ret;
-
unregister_shrinker(&ashmem_shrinker);
- ret = misc_deregister(&ashmem_misc);
- if (unlikely(ret))
- pr_err("failed to unregister misc device!\n");
-
+ misc_deregister(&ashmem_misc);
kmem_cache_destroy(ashmem_range_cachep);
kmem_cache_destroy(ashmem_area_cachep);
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 6f4811263557..217aa537c4eb 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -997,7 +997,7 @@ static void ion_vm_close(struct vm_area_struct *vma)
mutex_unlock(&buffer->lock);
}
-static struct vm_operations_struct ion_vma_ops = {
+static const struct vm_operations_struct ion_vma_ops = {
.open = ion_vm_open,
.close = ion_vm_close,
.fault = ion_vm_fault,
@@ -1103,10 +1103,10 @@ static struct dma_buf_ops dma_buf_ops = {
struct dma_buf *ion_share_dma_buf(struct ion_client *client,
struct ion_handle *handle)
{
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct ion_buffer *buffer;
struct dma_buf *dmabuf;
bool valid_handle;
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
mutex_lock(&client->lock);
valid_handle = ion_handle_validate(client, handle);
@@ -1466,7 +1466,6 @@ static const struct file_operations debug_heap_fops = {
.release = single_release,
};
-#ifdef DEBUG_HEAP_SHRINKER
static int debug_shrink_set(void *data, u64 val)
{
struct ion_heap *heap = data;
@@ -1474,15 +1473,14 @@ static int debug_shrink_set(void *data, u64 val)
int objs;
sc.gfp_mask = -1;
- sc.nr_to_scan = 0;
+ sc.nr_to_scan = val;
- if (!val)
- return 0;
-
- objs = heap->shrinker.shrink(&heap->shrinker, &sc);
- sc.nr_to_scan = objs;
+ if (!val) {
+ objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
+ sc.nr_to_scan = objs;
+ }
- heap->shrinker.shrink(&heap->shrinker, &sc);
+ heap->shrinker.scan_objects(&heap->shrinker, &sc);
return 0;
}
@@ -1495,14 +1493,13 @@ static int debug_shrink_get(void *data, u64 *val)
sc.gfp_mask = -1;
sc.nr_to_scan = 0;
- objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+ objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
*val = objs;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
debug_shrink_set, "%llu\n");
-#endif
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
{
@@ -1540,8 +1537,7 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
path, heap->name);
}
-#ifdef DEBUG_HEAP_SHRINKER
- if (heap->shrinker.shrink) {
+ if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
char debug_name[64];
snprintf(debug_name, 64, "%s_shrink", heap->name);
@@ -1556,7 +1552,7 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
path, debug_name);
}
}
-#endif
+
up_write(&dev->lock);
}
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 54746157d799..195c41d7bd53 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -81,7 +81,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
err:
sg = table->sgl;
for (i -= 1; i >= 0; i--) {
- gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+ gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
sg->length);
sg = sg_next(sg);
}
@@ -109,7 +109,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
DMA_BIDIRECTIONAL);
for_each_sg(table->sgl, sg, table->nents, i) {
- gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+ gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
sg->length);
}
chunk_heap->allocated -= allocated_size;
@@ -173,8 +173,8 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
chunk_heap->heap.ops = &chunk_heap_ops;
chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
- pr_debug("%s: base %lu size %zu align %ld\n", __func__, chunk_heap->base,
- heap_data->size, heap_data->align);
+ pr_debug("%s: base %lu size %zu align %ld\n", __func__,
+ chunk_heap->base, heap_data->size, heap_data->align);
return &chunk_heap->heap;
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index f4211f1be488..0b2448c32495 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -73,8 +73,8 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
if (!info->table)
goto free_mem;
- if (dma_common_get_sgtable
- (dev, info->table, info->cpu_addr, info->handle, len))
+ if (dma_get_sgtable(dev, info->table, info->cpu_addr, info->handle,
+ len))
goto free_table;
/* keep this for memory release */
buffer->priv_virt = info;
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index 4b88f11e52d3..19ad3aba499a 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -116,7 +116,7 @@ static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
int nr_to_scan)
{
- int freed;
+ int freed = 0;
bool high;
if (current_is_kswapd())
@@ -127,7 +127,7 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
if (nr_to_scan == 0)
return ion_page_pool_total(pool, high);
- for (freed = 0; freed < nr_to_scan; freed++) {
+ while (freed < nr_to_scan) {
struct page *page;
mutex_lock(&pool->mutex);
@@ -141,6 +141,7 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
}
mutex_unlock(&pool->mutex);
ion_page_pool_free_pages(pool, page);
+ freed += (1 << pool->order);
}
return freed;
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index da2a63c0a9ba..7a7a9a047230 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -212,14 +212,26 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
{
struct ion_system_heap *sys_heap;
int nr_total = 0;
- int i;
+ int i, nr_freed;
+ int only_scan = 0;
sys_heap = container_of(heap, struct ion_system_heap, heap);
+ if (!nr_to_scan)
+ only_scan = 1;
+
for (i = 0; i < num_orders; i++) {
struct ion_page_pool *pool = sys_heap->pools[i];
- nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
+ nr_freed = ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
+ nr_total += nr_freed;
+
+ if (!only_scan) {
+ nr_to_scan -= nr_freed;
+ /* shrink completed */
+ if (nr_to_scan <= 0)
+ break;
+ }
}
return nr_total;
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
index 7d6e6b6bc894..b8dcf5a26cc4 100644
--- a/drivers/staging/android/ion/ion_test.c
+++ b/drivers/staging/android/ion/ion_test.c
@@ -269,7 +269,8 @@ static int ion_test_remove(struct platform_device *pdev)
if (!testdev)
return -ENODATA;
- return misc_deregister(&testdev->misc);
+ misc_deregister(&testdev->misc);
+ return 0;
}
static struct platform_device *ion_test_pdev;
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index a21b79fb4c8e..61f8a3aede96 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -337,11 +337,11 @@ int sync_fence_wait(struct sync_fence *fence, long timeout);
#ifdef CONFIG_DEBUG_FS
-extern void sync_timeline_debug_add(struct sync_timeline *obj);
-extern void sync_timeline_debug_remove(struct sync_timeline *obj);
-extern void sync_fence_debug_add(struct sync_fence *fence);
-extern void sync_fence_debug_remove(struct sync_fence *fence);
-extern void sync_dump(void);
+void sync_timeline_debug_add(struct sync_timeline *obj);
+void sync_timeline_debug_remove(struct sync_timeline *obj);
+void sync_fence_debug_add(struct sync_fence *fence);
+void sync_fence_debug_remove(struct sync_fence *fence);
+void sync_dump(void);
#else
# define sync_timeline_debug_add(obj)
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
index 938a35cd99bb..ce11726f1a6c 100644
--- a/drivers/staging/android/timed_gpio.c
+++ b/drivers/staging/android/timed_gpio.c
@@ -61,9 +61,9 @@ static int gpio_get_time(struct timed_output_dev *dev)
static void gpio_enable(struct timed_output_dev *dev, int value)
{
- struct timed_gpio_data *data =
+ struct timed_gpio_data *data =
container_of(dev, struct timed_gpio_data, dev);
- unsigned long flags;
+ unsigned long flags;
spin_lock_irqsave(&data->lock, flags);
diff --git a/drivers/staging/board/Kconfig b/drivers/staging/board/Kconfig
index b8ee81840666..3f287c48e082 100644
--- a/drivers/staging/board/Kconfig
+++ b/drivers/staging/board/Kconfig
@@ -1,6 +1,6 @@
config STAGING_BOARD
bool "Staging Board Support"
- depends on OF_ADDRESS
+ depends on OF_ADDRESS && OF_IRQ && CLKDEV_LOOKUP
help
Select to enable per-board staging support code.
diff --git a/drivers/staging/board/armadillo800eva.c b/drivers/staging/board/armadillo800eva.c
index 81df77bd55cc..9c41652ee908 100644
--- a/drivers/staging/board/armadillo800eva.c
+++ b/drivers/staging/board/armadillo800eva.c
@@ -91,7 +91,7 @@ static const struct board_staging_dev armadillo800eva_devices[] __initconst = {
.pdev = &lcdc0_device,
.clocks = lcdc0_clocks,
.nclocks = ARRAY_SIZE(lcdc0_clocks),
- .domain = "a4lc",
+ .domain = "/system-controller@e6180000/pm-domains/c5/a4lc@1"
},
};
diff --git a/drivers/staging/board/board.c b/drivers/staging/board/board.c
index 29d456e29f38..3eb5eb8f069c 100644
--- a/drivers/staging/board/board.c
+++ b/drivers/staging/board/board.c
@@ -135,6 +135,40 @@ int __init board_staging_register_clock(const struct board_staging_clk *bsc)
return error;
}
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+static int board_staging_add_dev_domain(struct platform_device *pdev,
+ const char *domain)
+{
+ struct of_phandle_args pd_args;
+ struct generic_pm_domain *pd;
+ struct device_node *np;
+
+ np = of_find_node_by_path(domain);
+ if (!np) {
+ pr_err("Cannot find domain node %s\n", domain);
+ return -ENOENT;
+ }
+
+ pd_args.np = np;
+ pd_args.args_count = 0;
+ pd = of_genpd_get_from_provider(&pd_args);
+ if (IS_ERR(pd)) {
+ pr_err("Cannot find genpd %s (%ld)\n", domain, PTR_ERR(pd));
+ return PTR_ERR(pd);
+
+ }
+ pr_debug("Found genpd %s for device %s\n", pd->name, pdev->name);
+
+ return pm_genpd_add_device(pd, &pdev->dev);
+}
+#else
+static inline int board_staging_add_dev_domain(struct platform_device *pdev,
+ const char *domain)
+{
+ return 0;
+}
+#endif
+
int __init board_staging_register_device(const struct board_staging_dev *dev)
{
struct platform_device *pdev = dev->pdev;
@@ -161,7 +195,7 @@ int __init board_staging_register_device(const struct board_staging_dev *dev)
}
if (dev->domain)
- __pm_genpd_name_add_device(dev->domain, &pdev->dev, NULL);
+ board_staging_add_dev_domain(pdev, dev->domain);
return error;
}
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
index 5455bf3d5a91..b8e2f611fd47 100644
--- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
+++ b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
@@ -19,6 +19,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/slab.h>
#include <linux/io.h>
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 7dee73dfbf88..57e71f9f14a2 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -2,7 +2,7 @@ config COMEDI
tristate "Data acquisition support (comedi)"
depends on m
---help---
- Enable support a wide range of data acquisition devices
+ Enable support for a wide range of data acquisition devices
for Linux.
if COMEDI
diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c
index 25848244c4b1..f356386d833a 100644
--- a/drivers/staging/comedi/comedi_compat32.c
+++ b/drivers/staging/comedi/comedi_compat32.c
@@ -202,7 +202,8 @@ static int get_compat_cmd(struct comedi_cmd __user *cmd,
err |= __get_user(temp.uint, &cmd32->stop_arg);
err |= __put_user(temp.uint, &cmd->stop_arg);
err |= __get_user(temp.uptr, &cmd32->chanlist);
- err |= __put_user(compat_ptr(temp.uptr), &cmd->chanlist);
+ err |= __put_user((unsigned int __force *)compat_ptr(temp.uptr),
+ &cmd->chanlist);
err |= __get_user(temp.uint, &cmd32->chanlist_len);
err |= __put_user(temp.uint, &cmd->chanlist_len);
err |= __get_user(temp.uptr, &cmd32->data);
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 985d94b6cbfd..0e8a45102933 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -215,7 +215,6 @@ static struct comedi_subdevice
struct comedi_subdevice *s;
unsigned int i = minor - COMEDI_NUM_BOARD_MINORS;
- BUG_ON(i >= COMEDI_NUM_SUBDEVICE_MINORS);
mutex_lock(&comedi_subdevice_minor_table_lock);
s = comedi_subdevice_minor_table[i];
if (s && s->device != dev)
@@ -228,7 +227,6 @@ static struct comedi_device *comedi_dev_get_from_board_minor(unsigned minor)
{
struct comedi_device *dev;
- BUG_ON(minor >= COMEDI_NUM_BOARD_MINORS);
mutex_lock(&comedi_board_minor_table_lock);
dev = comedi_dev_get(comedi_board_minor_table[minor]);
mutex_unlock(&comedi_board_minor_table_lock);
@@ -241,7 +239,6 @@ static struct comedi_device *comedi_dev_get_from_subdevice_minor(unsigned minor)
struct comedi_subdevice *s;
unsigned int i = minor - COMEDI_NUM_BOARD_MINORS;
- BUG_ON(i >= COMEDI_NUM_SUBDEVICE_MINORS);
mutex_lock(&comedi_subdevice_minor_table_lock);
s = comedi_subdevice_minor_table[i];
dev = comedi_dev_get(s ? s->device : NULL);
@@ -2159,7 +2156,7 @@ static void comedi_vm_close(struct vm_area_struct *area)
comedi_buf_map_put(bm);
}
-static struct vm_operations_struct comedi_vm_ops = {
+static const struct vm_operations_struct comedi_vm_ops = {
.open = comedi_vm_open,
.close = comedi_vm_close,
};
@@ -2599,14 +2596,14 @@ static int comedi_open(struct inode *inode, struct file *file)
cfp->dev = dev;
mutex_lock(&dev->mutex);
- if (!dev->attached && !capable(CAP_NET_ADMIN)) {
- dev_dbg(dev->class_dev, "not attached and not CAP_NET_ADMIN\n");
+ if (!dev->attached && !capable(CAP_SYS_ADMIN)) {
+ dev_dbg(dev->class_dev, "not attached and not CAP_SYS_ADMIN\n");
rc = -ENODEV;
goto out;
}
if (dev->attached && dev->use_count == 0) {
if (!try_module_get(dev->driver->module)) {
- rc = -ENOSYS;
+ rc = -ENXIO;
goto out;
}
if (dev->open) {
@@ -2777,12 +2774,6 @@ struct comedi_device *comedi_alloc_board_minor(struct device *hardware_device)
return dev;
}
-static void comedi_free_board_minor(unsigned minor)
-{
- BUG_ON(minor >= COMEDI_NUM_BOARD_MINORS);
- comedi_free_board_dev(comedi_clear_board_minor(minor));
-}
-
void comedi_release_hardware_device(struct device *hardware_device)
{
int minor;
@@ -2838,12 +2829,10 @@ void comedi_free_subdevice_minor(struct comedi_subdevice *s)
if (!s)
return;
- if (s->minor < 0)
+ if (s->minor < COMEDI_NUM_BOARD_MINORS ||
+ s->minor >= COMEDI_NUM_MINORS)
return;
- BUG_ON(s->minor >= COMEDI_NUM_MINORS);
- BUG_ON(s->minor < COMEDI_NUM_BOARD_MINORS);
-
i = s->minor - COMEDI_NUM_BOARD_MINORS;
mutex_lock(&comedi_subdevice_minor_table_lock);
if (s == comedi_subdevice_minor_table[i])
@@ -2857,10 +2846,13 @@ void comedi_free_subdevice_minor(struct comedi_subdevice *s)
static void comedi_cleanup_board_minors(void)
{
+ struct comedi_device *dev;
unsigned i;
- for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++)
- comedi_free_board_minor(i);
+ for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
+ dev = comedi_clear_board_minor(i);
+ comedi_free_board_dev(dev);
+ }
}
static int __init comedi_init(void)
@@ -2932,14 +2924,7 @@ module_init(comedi_init);
static void __exit comedi_cleanup(void)
{
- int i;
-
comedi_cleanup_board_minors();
- for (i = 0; i < COMEDI_NUM_BOARD_MINORS; ++i)
- BUG_ON(comedi_board_minor_table[i]);
- for (i = 0; i < COMEDI_NUM_SUBDEVICE_MINORS; ++i)
- BUG_ON(comedi_subdevice_minor_table[i]);
-
class_destroy(comedi_class);
cdev_del(&comedi_cdev);
unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index ed0b60c925de..b03bc6639f79 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -820,7 +820,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
"driver '%s' does not support attach using comedi_config\n",
driv->driver_name);
module_put(driv->module);
- ret = -ENOSYS;
+ ret = -EIO;
goto out;
}
dev->driver = driv;
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
index fa99c8ca4f95..f0c0d58383ca 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
@@ -1,22 +1,3 @@
-/* Digital Input IRQ Function Selection */
-#define APCI1564_DI_INT_OR (0 << 1)
-#define APCI1564_DI_INT_AND (1 << 1)
-
-/* Digital Input Interrupt Enable Disable. */
-#define APCI1564_DI_INT_ENABLE 0x4
-#define APCI1564_DI_INT_DISABLE 0xfffffffb
-
-/* Digital Output Interrupt Enable Disable. */
-#define APCI1564_DO_VCC_INT_ENABLE 0x1
-#define APCI1564_DO_VCC_INT_DISABLE 0xfffffffe
-#define APCI1564_DO_CC_INT_ENABLE 0x2
-#define APCI1564_DO_CC_INT_DISABLE 0xfffffffd
-
-/* TIMER COUNTER WATCHDOG DEFINES */
-#define ADDIDATA_TIMER 0
-#define ADDIDATA_COUNTER 1
-#define ADDIDATA_WATCHDOG 2
-
static int apci1564_timer_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -27,15 +8,16 @@ static int apci1564_timer_insn_config(struct comedi_device *dev,
devpriv->tsk_current = current;
- /* First Stop The Timer */
+ /* Stop the timer */
ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
- ctrl &= 0xfffff9fe;
- /* Stop The Timer */
+ ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
+ ADDI_TCW_CTRL_ENA);
outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
if (data[1] == 1) {
/* Enable timer int & disable all the other int sources */
- outl(0x02, devpriv->timer + ADDI_TCW_CTRL_REG);
+ outl(ADDI_TCW_CTRL_IRQ_ENA,
+ devpriv->timer + ADDI_TCW_CTRL_REG);
outl(0x0, dev->iobase + APCI1564_DI_IRQ_REG);
outl(0x0, dev->iobase + APCI1564_DO_IRQ_REG);
outl(0x0, dev->iobase + APCI1564_WDOG_IRQ_REG);
@@ -59,9 +41,11 @@ static int apci1564_timer_insn_config(struct comedi_device *dev,
outl(data[3], devpriv->timer + ADDI_TCW_RELOAD_REG);
ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
- ctrl &= 0xfff719e2;
- ctrl |= (2 << 13) | 0x10;
- /* mode 2 */
+ ctrl &= ~(ADDI_TCW_CTRL_CNTR_ENA | ADDI_TCW_CTRL_MODE_MASK |
+ ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
+ ADDI_TCW_CTRL_TIMER_ENA | ADDI_TCW_CTRL_RESET_ENA |
+ ADDI_TCW_CTRL_WARN_ENA | ADDI_TCW_CTRL_ENA);
+ ctrl |= ADDI_TCW_CTRL_MODE(2) | ADDI_TCW_CTRL_TIMER_ENA;
outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
return insn->n;
@@ -76,13 +60,13 @@ static int apci1564_timer_insn_write(struct comedi_device *dev,
unsigned int ctrl;
ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
+ ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG);
switch (data[1]) {
case 0: /* Stop The Timer */
- ctrl &= 0xfffff9fe;
+ ctrl &= ~ADDI_TCW_CTRL_ENA;
break;
case 1: /* Enable the Timer */
- ctrl &= 0xfffff9ff;
- ctrl |= 0x1;
+ ctrl |= ADDI_TCW_CTRL_ENA;
break;
}
outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
@@ -98,7 +82,8 @@ static int apci1564_timer_insn_read(struct comedi_device *dev,
struct apci1564_private *devpriv = dev->private;
/* Stores the status of the Timer */
- data[0] = inl(devpriv->timer + ADDI_TCW_STATUS_REG) & 0x1;
+ data[0] = inl(devpriv->timer + ADDI_TCW_STATUS_REG) &
+ ADDI_TCW_STATUS_OVERFLOW;
/* Stores the Actual value of the Timer */
data[1] = inl(devpriv->timer + ADDI_TCW_VAL_REG);
@@ -118,35 +103,34 @@ static int apci1564_counter_insn_config(struct comedi_device *dev,
devpriv->tsk_current = current;
- /* First Stop The Counter */
- ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
- ctrl &= 0xfffff9fe;
/* Stop The Timer */
+ ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
+ ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
+ ADDI_TCW_CTRL_ENA);
outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
/* Set the reload value */
outl(data[3], iobase + ADDI_TCW_RELOAD_REG);
- /* Set the mode : */
- /* - Disable the hardware */
- /* - Disable the counter mode */
- /* - Disable the warning */
- /* - Disable the reset */
- /* - Disable the timer mode */
- /* - Enable the counter mode */
-
- ctrl &= 0xfffc19e2;
- ctrl |= 0x80000 | (data[4] << 16);
+ /* Set the mode */
+ ctrl &= ~(ADDI_TCW_CTRL_EXT_CLK_MASK | ADDI_TCW_CTRL_MODE_MASK |
+ ADDI_TCW_CTRL_TIMER_ENA | ADDI_TCW_CTRL_RESET_ENA |
+ ADDI_TCW_CTRL_WARN_ENA);
+ ctrl |= ADDI_TCW_CTRL_CNTR_ENA | ADDI_TCW_CTRL_MODE(data[4]);
outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
/* Enable or Disable Interrupt */
- ctrl &= 0xfffff9fd;
- ctrl |= (data[1] << 1);
+ if (data[1])
+ ctrl |= ADDI_TCW_CTRL_IRQ_ENA;
+ else
+ ctrl &= ~ADDI_TCW_CTRL_IRQ_ENA;
outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
/* Set the Up/Down selection */
- ctrl &= 0xfffbf9ff;
- ctrl |= (data[6] << 18);
+ if (data[6])
+ ctrl |= ADDI_TCW_CTRL_CNT_UP;
+ else
+ ctrl &= ~ADDI_TCW_CTRL_CNT_UP;
outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
return insn->n;
@@ -163,17 +147,16 @@ static int apci1564_counter_insn_write(struct comedi_device *dev,
unsigned int ctrl;
ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
+ ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG);
switch (data[1]) {
case 0: /* Stops the Counter subdevice */
ctrl = 0;
break;
case 1: /* Start the Counter subdevice */
- ctrl &= 0xfffff9ff;
- ctrl |= 0x1;
+ ctrl |= ADDI_TCW_CTRL_ENA;
break;
case 2: /* Clears the Counter subdevice */
- ctrl &= 0xfffff9ff;
- ctrl |= 0x400;
+ ctrl |= ADDI_TCW_CTRL_GATE;
break;
}
outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
@@ -195,10 +178,10 @@ static int apci1564_counter_insn_read(struct comedi_device *dev,
data[0] = inl(iobase + ADDI_TCW_VAL_REG);
status = inl(iobase + ADDI_TCW_STATUS_REG);
- data[1] = (status >> 1) & 1; /* software trigger status */
- data[2] = (status >> 2) & 1; /* hardware trigger status */
- data[3] = (status >> 3) & 1; /* software clear status */
- data[4] = (status >> 0) & 1; /* overflow status */
+ data[1] = (status & ADDI_TCW_STATUS_SOFT_TRIG) ? 1 : 0;
+ data[2] = (status & ADDI_TCW_STATUS_HARDWARE_TRIG) ? 1 : 0;
+ data[3] = (status & ADDI_TCW_STATUS_SOFT_CLR) ? 1 : 0;
+ data[4] = (status & ADDI_TCW_STATUS_OVERFLOW) ? 1 : 0;
return insn->n;
}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
index 1f2f78186d58..375707497896 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
@@ -22,54 +22,50 @@ static int apci3501_config_insn_timer(struct comedi_device *dev,
unsigned int *data)
{
struct apci3501_private *devpriv = dev->private;
- unsigned int ul_Command1 = 0;
+ unsigned int ctrl;
+
+ if (data[0] != ADDIDATA_WATCHDOG &&
+ data[0] != ADDIDATA_TIMER)
+ return -EINVAL;
devpriv->tsk_Current = current;
- if (data[0] == ADDIDATA_WATCHDOG) {
-
- devpriv->b_TimerSelectMode = ADDIDATA_WATCHDOG;
- /* Disable the watchdog */
- outl(0x0, dev->iobase + APCI3501_TIMER_CTRL_REG);
-
- if (data[1] == 1) {
- /* Enable TIMER int & DISABLE ALL THE OTHER int SOURCES */
- outl(0x02, dev->iobase + APCI3501_TIMER_CTRL_REG);
- } else {
- /* disable Timer interrupt */
- outl(0x0, dev->iobase + APCI3501_TIMER_CTRL_REG);
- }
- outl(data[2], dev->iobase + APCI3501_TIMER_TIMEBASE_REG);
- outl(data[3], dev->iobase + APCI3501_TIMER_RELOAD_REG);
+ devpriv->timer_mode = data[0];
- /* Set the mode (e2->e0) */
- ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG) | 0xFFF819E0UL;
- outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
+ /* first, disable the watchdog or stop the timer */
+ if (devpriv->timer_mode == ADDIDATA_WATCHDOG) {
+ ctrl = 0;
+ } else {
+ ctrl = inl(devpriv->tcw + ADDI_TCW_CTRL_REG);
+ ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
+ ADDI_TCW_CTRL_ENA);
}
-
- else if (data[0] == ADDIDATA_TIMER) {
- /* First Stop The Timer */
- ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
- ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
- outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
- devpriv->b_TimerSelectMode = ADDIDATA_TIMER;
- if (data[1] == 1) {
- /* Enable TIMER int & DISABLE ALL THE OTHER int SOURCES */
- outl(0x02, dev->iobase + APCI3501_TIMER_CTRL_REG);
- } else {
- /* disable Timer interrupt */
- outl(0x0, dev->iobase + APCI3501_TIMER_CTRL_REG);
- }
-
- outl(data[2], dev->iobase + APCI3501_TIMER_TIMEBASE_REG);
- outl(data[3], dev->iobase + APCI3501_TIMER_RELOAD_REG);
-
+ outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
+
+ /* enable/disable the timer interrupt */
+ ctrl = (data[1] == 1) ? ADDI_TCW_CTRL_IRQ_ENA : 0;
+ outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
+
+ outl(data[2], devpriv->tcw + ADDI_TCW_TIMEBASE_REG);
+ outl(data[3], devpriv->tcw + ADDI_TCW_RELOAD_REG);
+
+ ctrl = inl(devpriv->tcw + ADDI_TCW_CTRL_REG);
+ if (devpriv->timer_mode == ADDIDATA_WATCHDOG) {
+ /* Set the mode (e2->e0) NOTE: this doesn't look correct */
+ ctrl |= ~(ADDI_TCW_CTRL_CNT_UP | ADDI_TCW_CTRL_EXT_CLK_MASK |
+ ADDI_TCW_CTRL_MODE_MASK | ADDI_TCW_CTRL_GATE |
+ ADDI_TCW_CTRL_TRIG | ADDI_TCW_CTRL_TIMER_ENA |
+ ADDI_TCW_CTRL_RESET_ENA | ADDI_TCW_CTRL_WARN_ENA |
+ ADDI_TCW_CTRL_IRQ_ENA | ADDI_TCW_CTRL_ENA);
+ } else {
/* mode 2 */
- ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
- ul_Command1 =
- (ul_Command1 & 0xFFF719E2UL) | 2UL << 13UL | 0x10UL;
- outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
+ ctrl &= ~(ADDI_TCW_CTRL_CNTR_ENA | ADDI_TCW_CTRL_MODE_MASK |
+ ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
+ ADDI_TCW_CTRL_TIMER_ENA | ADDI_TCW_CTRL_RESET_ENA |
+ ADDI_TCW_CTRL_WARN_ENA | ADDI_TCW_CTRL_ENA);
+ ctrl |= ADDI_TCW_CTRL_MODE(2) | ADDI_TCW_CTRL_TIMER_ENA;
}
+ outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
return insn->n;
}
@@ -92,49 +88,27 @@ static int apci3501_write_insn_timer(struct comedi_device *dev,
unsigned int *data)
{
struct apci3501_private *devpriv = dev->private;
- unsigned int ul_Command1 = 0;
-
- if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG) {
-
- if (data[1] == 1) {
- ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
- ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x1UL;
- /* Enable the Watchdog */
- outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
- } else if (data[1] == 0) { /* Stop The Watchdog */
- ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
- ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
- outl(0x0, dev->iobase + APCI3501_TIMER_CTRL_REG);
- } else if (data[1] == 2) {
- ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
- ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x200UL;
- outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
- }
- }
-
- if (devpriv->b_TimerSelectMode == ADDIDATA_TIMER) {
- if (data[1] == 1) {
-
- ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
- ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x1UL;
- /* Enable the Timer */
- outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
- } else if (data[1] == 0) {
- /* Stop The Timer */
- ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
- ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
- outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
- }
-
- else if (data[1] == 2) {
- /* Trigger the Timer */
- ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
- ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x200UL;
- outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
+ unsigned int ctrl;
+
+ if (devpriv->timer_mode == ADDIDATA_WATCHDOG ||
+ devpriv->timer_mode == ADDIDATA_TIMER) {
+ ctrl = inl(devpriv->tcw + ADDI_TCW_CTRL_REG);
+ ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG);
+
+ if (data[1] == 1) { /* enable */
+ ctrl |= ADDI_TCW_CTRL_ENA;
+ } else if (data[1] == 0) { /* stop */
+ if (devpriv->timer_mode == ADDIDATA_WATCHDOG)
+ ctrl = 0;
+ else
+ ctrl &= ~ADDI_TCW_CTRL_ENA;
+ } else if (data[1] == 2) { /* trigger */
+ ctrl |= ADDI_TCW_CTRL_TRIG;
}
+ outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
}
- inl(dev->iobase + APCI3501_TIMER_STATUS_REG);
+ inl(devpriv->tcw + ADDI_TCW_STATUS_REG);
return insn->n;
}
@@ -155,19 +129,13 @@ static int apci3501_read_insn_timer(struct comedi_device *dev,
{
struct apci3501_private *devpriv = dev->private;
- if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG) {
- data[0] = inl(dev->iobase + APCI3501_TIMER_STATUS_REG) & 0x1;
- data[1] = inl(dev->iobase + APCI3501_TIMER_SYNC_REG);
- }
+ if (devpriv->timer_mode != ADDIDATA_TIMER &&
+ devpriv->timer_mode != ADDIDATA_WATCHDOG)
+ return -EINVAL;
- else if (devpriv->b_TimerSelectMode == ADDIDATA_TIMER) {
- data[0] = inl(dev->iobase + APCI3501_TIMER_STATUS_REG) & 0x1;
- data[1] = inl(dev->iobase + APCI3501_TIMER_SYNC_REG);
- }
+ data[0] = inl(devpriv->tcw + ADDI_TCW_STATUS_REG) &
+ ADDI_TCW_STATUS_OVERFLOW;
+ data[1] = inl(devpriv->tcw + ADDI_TCW_VAL_REG);
- else if ((devpriv->b_TimerSelectMode != ADDIDATA_TIMER)
- && (devpriv->b_TimerSelectMode != ADDIDATA_WATCHDOG)) {
- dev_err(dev->class_dev, "Invalid subdevice.\n");
- }
return insn->n;
}
diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c
index 33e58b9a21b2..f1ccfbd4c578 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1564.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1564.c
@@ -44,12 +44,12 @@
* 0x48 - 0x64 Timer 12-Bit
*/
#define APCI1564_EEPROM_REG 0x00
-#define APCI1564_EEPROM_VCC_STATUS (1 << 8)
+#define APCI1564_EEPROM_VCC_STATUS BIT(8)
#define APCI1564_EEPROM_TO_REV(x) (((x) >> 4) & 0xf)
-#define APCI1564_EEPROM_DI (1 << 3)
-#define APCI1564_EEPROM_DO (1 << 2)
-#define APCI1564_EEPROM_CS (1 << 1)
-#define APCI1564_EEPROM_CLK (1 << 0)
+#define APCI1564_EEPROM_DI BIT(3)
+#define APCI1564_EEPROM_DO BIT(2)
+#define APCI1564_EEPROM_CS BIT(1)
+#define APCI1564_EEPROM_CLK BIT(0)
#define APCI1564_REV1_TIMER_IOBASE 0x04
#define APCI1564_REV2_MAIN_IOBASE 0x04
#define APCI1564_REV2_TIMER_IOBASE 0x48
@@ -79,10 +79,17 @@
#define APCI1564_DI_INT_MODE2_REG 0x08
#define APCI1564_DI_INT_STATUS_REG 0x0c
#define APCI1564_DI_IRQ_REG 0x10
+#define APCI1564_DI_IRQ_ENA BIT(2)
+#define APCI1564_DI_IRQ_MODE BIT(1) /* 1=AND, 0=OR */
#define APCI1564_DO_REG 0x14
#define APCI1564_DO_INT_CTRL_REG 0x18
+#define APCI1564_DO_INT_CTRL_CC_INT_ENA BIT(1)
+#define APCI1564_DO_INT_CTRL_VCC_INT_ENA BIT(0)
#define APCI1564_DO_INT_STATUS_REG 0x1c
+#define APCI1564_DO_INT_STATUS_CC BIT(1)
+#define APCI1564_DO_INT_STATUS_VCC BIT(0)
#define APCI1564_DO_IRQ_REG 0x20
+#define APCI1564_DO_IRQ_INTR BIT(0)
#define APCI1564_WDOG_REG 0x24
#define APCI1564_WDOG_RELOAD_REG 0x28
#define APCI1564_WDOG_TIMEBASE_REG 0x2c
@@ -159,9 +166,9 @@ static irqreturn_t apci1564_interrupt(int irq, void *d)
unsigned int chan;
status = inl(dev->iobase + APCI1564_DI_IRQ_REG);
- if (status & APCI1564_DI_INT_ENABLE) {
+ if (status & APCI1564_DI_IRQ_ENA) {
/* disable the interrupt */
- outl(status & APCI1564_DI_INT_DISABLE,
+ outl(status & ~APCI1564_DI_IRQ_ENA,
dev->iobase + APCI1564_DI_IRQ_REG);
s->state = inl(dev->iobase + APCI1564_DI_INT_STATUS_REG) &
@@ -300,11 +307,9 @@ static int apci1564_cos_insn_config(struct comedi_device *dev,
outl(0x0, dev->iobase + APCI1564_DI_INT_MODE2_REG);
break;
case COMEDI_DIGITAL_TRIG_ENABLE_EDGES:
- if (devpriv->ctrl != (APCI1564_DI_INT_ENABLE |
- APCI1564_DI_INT_OR)) {
+ if (devpriv->ctrl != APCI1564_DI_IRQ_ENA) {
/* switching to 'OR' mode */
- devpriv->ctrl = APCI1564_DI_INT_ENABLE |
- APCI1564_DI_INT_OR;
+ devpriv->ctrl = APCI1564_DI_IRQ_ENA;
/* wipe old channels */
devpriv->mode1 = 0;
devpriv->mode2 = 0;
@@ -318,11 +323,11 @@ static int apci1564_cos_insn_config(struct comedi_device *dev,
devpriv->mode2 |= data[5] << shift;
break;
case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS:
- if (devpriv->ctrl != (APCI1564_DI_INT_ENABLE |
- APCI1564_DI_INT_AND)) {
+ if (devpriv->ctrl != (APCI1564_DI_IRQ_ENA |
+ APCI1564_DI_IRQ_MODE)) {
/* switching to 'AND' mode */
- devpriv->ctrl = APCI1564_DI_INT_ENABLE |
- APCI1564_DI_INT_AND;
+ devpriv->ctrl = APCI1564_DI_IRQ_ENA |
+ APCI1564_DI_IRQ_MODE;
/* wipe old channels */
devpriv->mode1 = 0;
devpriv->mode2 = 0;
diff --git a/drivers/staging/comedi/drivers/addi_apci_3501.c b/drivers/staging/comedi/drivers/addi_apci_3501.c
index 73786a3f3df9..40ff91411139 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3501.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3501.c
@@ -27,27 +27,21 @@
#include <linux/sched.h>
#include "../comedi_pci.h"
+#include "addi_tcw.h"
#include "amcc_s5933.h"
/*
* PCI bar 1 register I/O map
*/
#define APCI3501_AO_CTRL_STATUS_REG 0x00
-#define APCI3501_AO_CTRL_BIPOLAR (1 << 0)
-#define APCI3501_AO_STATUS_READY (1 << 8)
+#define APCI3501_AO_CTRL_BIPOLAR BIT(0)
+#define APCI3501_AO_STATUS_READY BIT(8)
#define APCI3501_AO_DATA_REG 0x04
#define APCI3501_AO_DATA_CHAN(x) ((x) << 0)
#define APCI3501_AO_DATA_VAL(x) ((x) << 8)
-#define APCI3501_AO_DATA_BIPOLAR (1 << 31)
+#define APCI3501_AO_DATA_BIPOLAR BIT(31)
#define APCI3501_AO_TRIG_SCS_REG 0x08
-#define APCI3501_TIMER_SYNC_REG 0x20
-#define APCI3501_TIMER_RELOAD_REG 0x24
-#define APCI3501_TIMER_TIMEBASE_REG 0x28
-#define APCI3501_TIMER_CTRL_REG 0x2c
-#define APCI3501_TIMER_STATUS_REG 0x30
-#define APCI3501_TIMER_IRQ_REG 0x34
-#define APCI3501_TIMER_WARN_RELOAD_REG 0x38
-#define APCI3501_TIMER_WARN_TIMEBASE_REG 0x3c
+#define APCI3501_TIMER_BASE 0x20
#define APCI3501_DO_REG 0x40
#define APCI3501_DI_REG 0x50
@@ -72,9 +66,10 @@
#define EEPROM_TIMER_WATCHDOG_COUNTER 10
struct apci3501_private {
- int i_IobaseAmcc;
+ unsigned long amcc;
+ unsigned long tcw;
struct task_struct *tsk_Current;
- unsigned char b_TimerSelectMode;
+ unsigned char timer_mode;
};
static struct comedi_lrange apci3501_ao_range = {
@@ -222,11 +217,10 @@ static unsigned short apci3501_eeprom_readw(unsigned long iobase,
static int apci3501_eeprom_get_ao_n_chan(struct comedi_device *dev)
{
struct apci3501_private *devpriv = dev->private;
- unsigned long iobase = devpriv->i_IobaseAmcc;
unsigned char nfuncs;
int i;
- nfuncs = apci3501_eeprom_readw(iobase, 10) & 0xff;
+ nfuncs = apci3501_eeprom_readw(devpriv->amcc, 10) & 0xff;
/* Read functionality details */
for (i = 0; i < nfuncs; i++) {
@@ -235,11 +229,11 @@ static int apci3501_eeprom_get_ao_n_chan(struct comedi_device *dev)
unsigned char func;
unsigned short val;
- func = apci3501_eeprom_readw(iobase, 12 + offset) & 0x3f;
- addr = apci3501_eeprom_readw(iobase, 14 + offset);
+ func = apci3501_eeprom_readw(devpriv->amcc, 12 + offset) & 0x3f;
+ addr = apci3501_eeprom_readw(devpriv->amcc, 14 + offset);
if (func == EEPROM_ANALOGOUTPUT) {
- val = apci3501_eeprom_readw(iobase, addr + 10);
+ val = apci3501_eeprom_readw(devpriv->amcc, addr + 10);
return (val >> 4) & 0x3ff;
}
}
@@ -254,7 +248,7 @@ static int apci3501_eeprom_insn_read(struct comedi_device *dev,
struct apci3501_private *devpriv = dev->private;
unsigned short addr = CR_CHAN(insn->chanspec);
- data[0] = apci3501_eeprom_readw(devpriv->i_IobaseAmcc, 2 * addr);
+ data[0] = apci3501_eeprom_readw(devpriv->amcc, 2 * addr);
return insn->n;
}
@@ -263,26 +257,29 @@ static irqreturn_t apci3501_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct apci3501_private *devpriv = dev->private;
- unsigned int ui_Timer_AOWatchdog;
- unsigned long ul_Command1;
+ unsigned int status;
+ unsigned int ctrl;
/* Disable Interrupt */
- ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
- ul_Command1 = ul_Command1 & 0xFFFFF9FDul;
- outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
+ ctrl = inl(devpriv->tcw + ADDI_TCW_CTRL_REG);
+ ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
+ ADDI_TCW_CTRL_IRQ_ENA);
+ outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
- ui_Timer_AOWatchdog = inl(dev->iobase + APCI3501_TIMER_IRQ_REG) & 0x1;
- if ((!ui_Timer_AOWatchdog)) {
+ status = inl(devpriv->tcw + ADDI_TCW_IRQ_REG);
+ if (!(status & ADDI_TCW_IRQ)) {
dev_err(dev->class_dev, "IRQ from unknown source\n");
return IRQ_NONE;
}
/* Enable Interrupt Send a signal to from kernel to user space */
send_sig(SIGIO, devpriv->tsk_Current, 0);
- ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
- ul_Command1 = (ul_Command1 & 0xFFFFF9FDul) | 1 << 1;
- outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
- inl(dev->iobase + APCI3501_TIMER_STATUS_REG);
+ ctrl = inl(devpriv->tcw + ADDI_TCW_CTRL_REG);
+ ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
+ ADDI_TCW_CTRL_IRQ_ENA);
+ ctrl |= ADDI_TCW_CTRL_IRQ_ENA;
+ outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
+ inl(devpriv->tcw + ADDI_TCW_STATUS_REG);
return IRQ_HANDLED;
}
@@ -334,8 +331,9 @@ static int apci3501_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
+ devpriv->amcc = pci_resource_start(pcidev, 0);
dev->iobase = pci_resource_start(pcidev, 1);
- devpriv->i_IobaseAmcc = pci_resource_start(pcidev, 0);
+ devpriv->tcw = dev->iobase + APCI3501_TIMER_BASE;
ao_n_chan = apci3501_eeprom_get_ao_n_chan(dev);
diff --git a/drivers/staging/comedi/drivers/addi_tcw.h b/drivers/staging/comedi/drivers/addi_tcw.h
index 8794d4cbbfb0..db6d5a4e8889 100644
--- a/drivers/staging/comedi/drivers/addi_tcw.h
+++ b/drivers/staging/comedi/drivers/addi_tcw.h
@@ -10,44 +10,51 @@
#define ADDI_TCW_VAL_REG 0x00
#define ADDI_TCW_SYNC_REG 0x00
-#define ADDI_TCW_SYNC_CTR_TRIG (1 << 8)
-#define ADDI_TCW_SYNC_CTR_DIS (1 << 7)
-#define ADDI_TCW_SYNC_CTR_ENA (1 << 6)
-#define ADDI_TCW_SYNC_TIMER_TRIG (1 << 5)
-#define ADDI_TCW_SYNC_TIMER_DIS (1 << 4)
-#define ADDI_TCW_SYNC_TIMER_ENA (1 << 3)
-#define ADDI_TCW_SYNC_WDOG_TRIG (1 << 2)
-#define ADDI_TCW_SYNC_WDOG_DIS (1 << 1)
-#define ADDI_TCW_SYNC_WDOG_ENA (1 << 0)
+#define ADDI_TCW_SYNC_CTR_TRIG BIT(8)
+#define ADDI_TCW_SYNC_CTR_DIS BIT(7)
+#define ADDI_TCW_SYNC_CTR_ENA BIT(6)
+#define ADDI_TCW_SYNC_TIMER_TRIG BIT(5)
+#define ADDI_TCW_SYNC_TIMER_DIS BIT(4)
+#define ADDI_TCW_SYNC_TIMER_ENA BIT(3)
+#define ADDI_TCW_SYNC_WDOG_TRIG BIT(2)
+#define ADDI_TCW_SYNC_WDOG_DIS BIT(1)
+#define ADDI_TCW_SYNC_WDOG_ENA BIT(0)
#define ADDI_TCW_RELOAD_REG 0x04
#define ADDI_TCW_TIMEBASE_REG 0x08
#define ADDI_TCW_CTRL_REG 0x0c
-#define ADDI_TCW_CTRL_EXT_CLK_STATUS (1 << 21)
-#define ADDI_TCW_CTRL_CASCADE (1 << 20)
-#define ADDI_TCW_CTRL_CNTR_ENA (1 << 19)
-#define ADDI_TCW_CTRL_CNT_UP (1 << 18)
-#define ADDI_TCW_CTRL_EXT_CLK(x) ((x) << 16)
-#define ADDI_TCW_CTRL_OUT(x) ((x) << 11)
-#define ADDI_TCW_CTRL_GATE (1 << 10)
-#define ADDI_TCW_CTRL_TRIG (1 << 9)
-#define ADDI_TCW_CTRL_EXT_GATE(x) ((x) << 7)
-#define ADDI_TCW_CTRL_EXT_TRIG(x) ((x) << 5)
-#define ADDI_TCW_CTRL_TIMER_ENA (1 << 4)
-#define ADDI_TCW_CTRL_RESET_ENA (1 << 3)
-#define ADDI_TCW_CTRL_WARN_ENA (1 << 2)
-#define ADDI_TCW_CTRL_IRQ_ENA (1 << 1)
-#define ADDI_TCW_CTRL_ENA (1 << 0)
+#define ADDI_TCW_CTRL_EXT_CLK_STATUS BIT(21)
+#define ADDI_TCW_CTRL_CASCADE BIT(20)
+#define ADDI_TCW_CTRL_CNTR_ENA BIT(19)
+#define ADDI_TCW_CTRL_CNT_UP BIT(18)
+#define ADDI_TCW_CTRL_EXT_CLK(x) (((x) & 3) << 16)
+#define ADDI_TCW_CTRL_EXT_CLK_MASK ADDI_TCW_CTRL_EXT_CLK(3)
+#define ADDI_TCW_CTRL_MODE(x) (((x) & 7) << 13)
+#define ADDI_TCW_CTRL_MODE_MASK ADDI_TCW_CTRL_MODE(7)
+#define ADDI_TCW_CTRL_OUT(x) (((x) & 3) << 11)
+#define ADDI_TCW_CTRL_OUT_MASK ADDI_TCW_CTRL_OUT(3)
+#define ADDI_TCW_CTRL_GATE BIT(10)
+#define ADDI_TCW_CTRL_TRIG BIT(9)
+#define ADDI_TCW_CTRL_EXT_GATE(x) (((x) & 3) << 7)
+#define ADDI_TCW_CTRL_EXT_GATE_MASK ADDI_TCW_CTRL_EXT_GATE(3)
+#define ADDI_TCW_CTRL_EXT_TRIG(x) (((x) & 3) << 5)
+#define ADDI_TCW_CTRL_EXT_TRIG_MASK ADDI_TCW_CTRL_EXT_TRIG(3)
+#define ADDI_TCW_CTRL_TIMER_ENA BIT(4)
+#define ADDI_TCW_CTRL_RESET_ENA BIT(3)
+#define ADDI_TCW_CTRL_WARN_ENA BIT(2)
+#define ADDI_TCW_CTRL_IRQ_ENA BIT(1)
+#define ADDI_TCW_CTRL_ENA BIT(0)
#define ADDI_TCW_STATUS_REG 0x10
-#define ADDI_TCW_STATUS_SOFT_CLR (1 << 3)
-#define ADDI_TCW_STATUS_SOFT_TRIG (1 << 1)
-#define ADDI_TCW_STATUS_OVERFLOW (1 << 0)
+#define ADDI_TCW_STATUS_SOFT_CLR BIT(3)
+#define ADDI_TCW_STATUS_HARDWARE_TRIG BIT(2)
+#define ADDI_TCW_STATUS_SOFT_TRIG BIT(1)
+#define ADDI_TCW_STATUS_OVERFLOW BIT(0)
#define ADDI_TCW_IRQ_REG 0x14
-#define ADDI_TCW_IRQ (1 << 0)
+#define ADDI_TCW_IRQ BIT(0)
#define ADDI_TCW_WARN_TIMEVAL_REG 0x18
diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c
index 934af3ff7897..b0fc027cf485 100644
--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c
+++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c
@@ -120,8 +120,20 @@ static int adl_pci7x3x_do_insn_bits(struct comedi_device *dev,
{
unsigned long reg = (unsigned long)s->private;
- if (comedi_dio_update_state(s, data))
- outl(s->state, dev->iobase + reg);
+ if (comedi_dio_update_state(s, data)) {
+ unsigned int val = s->state;
+
+ if (s->n_chan == 16) {
+ /*
+ * It seems the PCI-7230 needs the 16-bit DO state
+ * to be shifted left by 16 bits before being written
+ * to the 32-bit register. Set the value in both
+ * halves of the register to be sure.
+ */
+ val |= val << 16;
+ }
+ outl(val, dev->iobase + reg);
+ }
data[1] = s->state;
diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
index 4ebf5aae5019..47e38398921e 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
@@ -141,11 +141,13 @@ static const struct comedi_lrange cb_pcimdas_ai_uni_range = {
* jumper-settable on the board. The settings are not software-readable.
*/
static const struct comedi_lrange cb_pcimdas_ao_range = {
- 4, {
+ 6, {
BIP_RANGE(10),
BIP_RANGE(5),
UNI_RANGE(10),
- UNI_RANGE(5)
+ UNI_RANGE(5),
+ RANGE_ext(-1, 1),
+ RANGE_ext(0, 1)
}
};
diff --git a/drivers/staging/comedi/drivers/dac02.c b/drivers/staging/comedi/drivers/dac02.c
index a6798ad8fa7f..a562df498b01 100644
--- a/drivers/staging/comedi/drivers/dac02.c
+++ b/drivers/staging/comedi/drivers/dac02.c
@@ -130,11 +130,7 @@ static int dac02_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->range_table = &das02_ao_ranges;
s->insn_write = dac02_ao_insn_write;
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- return 0;
+ return comedi_alloc_subdev_readback(s);
}
static struct comedi_driver dac02_driver = {
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index 93fab6890161..9c02b17a2834 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -108,7 +108,7 @@ static struct pcmcia_driver das08_cs_driver = {
};
module_comedi_pcmcia_driver(driver_das08_cs, das08_cs_driver);
-MODULE_AUTHOR("David A. Schleef <ds@schleef.org>, "
- "Frank Mori Hess <fmhess@users.sourceforge.net>");
+MODULE_AUTHOR("David A. Schleef <ds@schleef.org>");
+MODULE_AUTHOR("Frank Mori Hess <fmhess@users.sourceforge.net>");
MODULE_DESCRIPTION("Comedi driver for ComputerBoards DAS-08 PCMCIA boards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das16.c b/drivers/staging/comedi/drivers/das16.c
index d7cf4b153f7c..056bca9c67d5 100644
--- a/drivers/staging/comedi/drivers/das16.c
+++ b/drivers/staging/comedi/drivers/das16.c
@@ -1032,8 +1032,7 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* check that clock setting is valid */
if (it->options[3]) {
- if (it->options[3] != 0 &&
- it->options[3] != 1 && it->options[3] != 10) {
+ if (it->options[3] != 1 && it->options[3] != 10) {
dev_err(dev->class_dev,
"Invalid option. Master clock must be set to 1 or 10 (MHz)\n");
return -EINVAL;
diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c
index a18a8878bdb8..3a37373fbb6f 100644
--- a/drivers/staging/comedi/drivers/das16m1.c
+++ b/drivers/staging/comedi/drivers/das16m1.c
@@ -69,18 +69,18 @@ irq can be omitted, although the cmd interface will not work without it.
"cio-das16/m1"
- 0 a/d bits 0-3, mux start 12 bit
- 1 a/d bits 4-11 unused
- 2 status control
- 3 di 4 bit do 4 bit
- 4 unused clear interrupt
- 5 interrupt, pacer
- 6 channel/gain queue address
- 7 channel/gain queue data
- 89ab 8254
- cdef 8254
- 400 8255
- 404-407 8254
+ 0 a/d bits 0-3, mux start 12 bit
+ 1 a/d bits 4-11 unused
+ 2 status control
+ 3 di 4 bit do 4 bit
+ 4 unused clear interrupt
+ 5 interrupt, pacer
+ 6 channel/gain queue address
+ 7 channel/gain queue data
+ 89ab 8254
+ cdef 8254
+ 400 8255
+ 404-407 8254
*/
@@ -411,15 +411,18 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
hw_counter = comedi_8254_read(devpriv->counter, 1);
/* make sure hardware counter reading is not bogus due to initial value
* not having been loaded yet */
- if (devpriv->adc_count == 0 && hw_counter == devpriv->initial_hw_count) {
+ if (devpriv->adc_count == 0 &&
+ hw_counter == devpriv->initial_hw_count) {
num_samples = 0;
} else {
- /* The calculation of num_samples looks odd, but it uses the following facts.
- * 16 bit hardware counter is initialized with value of zero (which really
- * means 0x1000). The counter decrements by one on each conversion
- * (when the counter decrements from zero it goes to 0xffff). num_samples
- * is a 16 bit variable, so it will roll over in a similar fashion to the
- * hardware counter. Work it out, and this is what you get. */
+ /* The calculation of num_samples looks odd, but it uses the
+ * following facts. 16 bit hardware counter is initialized with
+ * value of zero (which really means 0x1000). The counter
+ * decrements by one on each conversion (when the counter
+ * decrements from zero it goes to 0xffff). num_samples is a
+ * 16 bit variable, so it will roll over in a similar fashion
+ * to the hardware counter. Work it out, and this is what you
+ * get. */
num_samples = -hw_counter - devpriv->adc_count;
}
/* check if we only need some of the points */
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index bfa42620a3f6..940781183fac 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -1266,6 +1266,7 @@ static const struct das1800_board *das1800_probe(struct comedi_device *dev)
if (index == das1801hc || index == das1802hc)
return board;
index = das1801hc;
+ break;
default:
dev_err(dev->class_dev,
"Board model: probe returned 0x%x (unknown, please report)\n",
diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c
index bb2883c83afa..958c0d4aae5c 100644
--- a/drivers/staging/comedi/drivers/dmm32at.c
+++ b/drivers/staging/comedi/drivers/dmm32at.c
@@ -607,11 +607,7 @@ static int dmm32at_attach(struct comedi_device *dev,
/* Digital I/O subdevice */
s = &dev->subdevices[2];
- ret = subdev_8255_init(dev, s, dmm32at_8255_io, DMM32AT_8255_IOBASE);
- if (ret)
- return ret;
-
- return 0;
+ return subdev_8255_init(dev, s, dmm32at_8255_io, DMM32AT_8255_IOBASE);
}
static struct comedi_driver dmm32at_driver = {
diff --git a/drivers/staging/comedi/drivers/fl512.c b/drivers/staging/comedi/drivers/fl512.c
index e1f493241cd6..55cae61458cb 100644
--- a/drivers/staging/comedi/drivers/fl512.c
+++ b/drivers/staging/comedi/drivers/fl512.c
@@ -136,11 +136,7 @@ static int fl512_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->range_table = &range_fl512;
s->insn_write = fl512_ao_insn_write;
- ret = comedi_alloc_subdev_readback(s);
- if (ret)
- return ret;
-
- return 0;
+ return comedi_alloc_subdev_readback(s);
}
static struct comedi_driver fl512_driver = {
diff --git a/drivers/staging/comedi/drivers/ii_pci20kc.c b/drivers/staging/comedi/drivers/ii_pci20kc.c
index 0768bc42a5db..14ef1f67dd42 100644
--- a/drivers/staging/comedi/drivers/ii_pci20kc.c
+++ b/drivers/staging/comedi/drivers/ii_pci20kc.c
@@ -28,6 +28,7 @@
*/
#include <linux/module.h>
+#include <linux/io.h>
#include "../comedidev.h"
/*
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
index a8f3ca48784b..15a53204a36a 100644
--- a/drivers/staging/comedi/drivers/me4000.c
+++ b/drivers/staging/comedi/drivers/me4000.c
@@ -1,43 +1,41 @@
/*
- comedi/drivers/me4000.c
- Source code for the Meilhaus ME-4000 board family.
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ * me4000.c
+ * Source code for the Meilhaus ME-4000 board family.
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
-/*
-Driver: me4000
-Description: Meilhaus ME-4000 series boards
-Devices: [Meilhaus] ME-4650 (me4000), ME-4670i, ME-4680, ME-4680i, ME-4680is
-Author: gg (Guenter Gebhardt <g.gebhardt@meilhaus.com>)
-Updated: Mon, 18 Mar 2002 15:34:01 -0800
-Status: broken (no support for loading firmware)
-
-Supports:
-
- - Analog Input
- - Analog Output
- - Digital I/O
- - Counter
-
-Configuration Options: not applicable, uses PCI auto config
-
-The firmware required by these boards is available in the
-comedi_nonfree_firmware tarball available from
-http://www.comedi.org. However, the driver's support for
-loading the firmware through comedi_config is currently
-broken.
+/*
+ * Driver: me4000
+ * Description: Meilhaus ME-4000 series boards
+ * Devices: [Meilhaus] ME-4650 (me4000), ME-4670i, ME-4680, ME-4680i,
+ * ME-4680is
+ * Author: gg (Guenter Gebhardt <g.gebhardt@meilhaus.com>)
+ * Updated: Mon, 18 Mar 2002 15:34:01 -0800
+ * Status: untested
+ *
+ * Supports:
+ * - Analog Input
+ * - Analog Output
+ * - Digital I/O
+ * - Counter
+ *
+ * Configuration Options: not applicable, uses PCI auto config
+ *
+ * The firmware required by these boards is available in the
+ * comedi_nonfree_firmware tarball available from
+ * http://www.comedi.org.
*/
#include <linux/module.h>
@@ -57,66 +55,61 @@ broken.
#define ME4000_AO_CHAN(x) ((x) * 0x18)
#define ME4000_AO_CTRL_REG(x) (0x00 + ME4000_AO_CHAN(x))
-#define ME4000_AO_CTRL_BIT_MODE_0 (1 << 0)
-#define ME4000_AO_CTRL_BIT_MODE_1 (1 << 1)
-#define ME4000_AO_CTRL_MASK_MODE (3 << 0)
-#define ME4000_AO_CTRL_BIT_STOP (1 << 2)
-#define ME4000_AO_CTRL_BIT_ENABLE_FIFO (1 << 3)
-#define ME4000_AO_CTRL_BIT_ENABLE_EX_TRIG (1 << 4)
-#define ME4000_AO_CTRL_BIT_EX_TRIG_EDGE (1 << 5)
-#define ME4000_AO_CTRL_BIT_IMMEDIATE_STOP (1 << 7)
-#define ME4000_AO_CTRL_BIT_ENABLE_DO (1 << 8)
-#define ME4000_AO_CTRL_BIT_ENABLE_IRQ (1 << 9)
-#define ME4000_AO_CTRL_BIT_RESET_IRQ (1 << 10)
+#define ME4000_AO_CTRL_MODE_0 BIT(0)
+#define ME4000_AO_CTRL_MODE_1 BIT(1)
+#define ME4000_AO_CTRL_STOP BIT(2)
+#define ME4000_AO_CTRL_ENABLE_FIFO BIT(3)
+#define ME4000_AO_CTRL_ENABLE_EX_TRIG BIT(4)
+#define ME4000_AO_CTRL_EX_TRIG_EDGE BIT(5)
+#define ME4000_AO_CTRL_IMMEDIATE_STOP BIT(7)
+#define ME4000_AO_CTRL_ENABLE_DO BIT(8)
+#define ME4000_AO_CTRL_ENABLE_IRQ BIT(9)
+#define ME4000_AO_CTRL_RESET_IRQ BIT(10)
#define ME4000_AO_STATUS_REG(x) (0x04 + ME4000_AO_CHAN(x))
-#define ME4000_AO_STATUS_BIT_FSM (1 << 0)
-#define ME4000_AO_STATUS_BIT_FF (1 << 1)
-#define ME4000_AO_STATUS_BIT_HF (1 << 2)
-#define ME4000_AO_STATUS_BIT_EF (1 << 3)
+#define ME4000_AO_STATUS_FSM BIT(0)
+#define ME4000_AO_STATUS_FF BIT(1)
+#define ME4000_AO_STATUS_HF BIT(2)
+#define ME4000_AO_STATUS_EF BIT(3)
#define ME4000_AO_FIFO_REG(x) (0x08 + ME4000_AO_CHAN(x))
#define ME4000_AO_SINGLE_REG(x) (0x0c + ME4000_AO_CHAN(x))
#define ME4000_AO_TIMER_REG(x) (0x10 + ME4000_AO_CHAN(x))
#define ME4000_AI_CTRL_REG 0x74
#define ME4000_AI_STATUS_REG 0x74
-#define ME4000_AI_CTRL_BIT_MODE_0 (1 << 0)
-#define ME4000_AI_CTRL_BIT_MODE_1 (1 << 1)
-#define ME4000_AI_CTRL_BIT_MODE_2 (1 << 2)
-#define ME4000_AI_CTRL_BIT_SAMPLE_HOLD (1 << 3)
-#define ME4000_AI_CTRL_BIT_IMMEDIATE_STOP (1 << 4)
-#define ME4000_AI_CTRL_BIT_STOP (1 << 5)
-#define ME4000_AI_CTRL_BIT_CHANNEL_FIFO (1 << 6)
-#define ME4000_AI_CTRL_BIT_DATA_FIFO (1 << 7)
-#define ME4000_AI_CTRL_BIT_FULLSCALE (1 << 8)
-#define ME4000_AI_CTRL_BIT_OFFSET (1 << 9)
-#define ME4000_AI_CTRL_BIT_EX_TRIG_ANALOG (1 << 10)
-#define ME4000_AI_CTRL_BIT_EX_TRIG (1 << 11)
-#define ME4000_AI_CTRL_BIT_EX_TRIG_FALLING (1 << 12)
-#define ME4000_AI_CTRL_BIT_EX_IRQ (1 << 13)
-#define ME4000_AI_CTRL_BIT_EX_IRQ_RESET (1 << 14)
-#define ME4000_AI_CTRL_BIT_LE_IRQ (1 << 15)
-#define ME4000_AI_CTRL_BIT_LE_IRQ_RESET (1 << 16)
-#define ME4000_AI_CTRL_BIT_HF_IRQ (1 << 17)
-#define ME4000_AI_CTRL_BIT_HF_IRQ_RESET (1 << 18)
-#define ME4000_AI_CTRL_BIT_SC_IRQ (1 << 19)
-#define ME4000_AI_CTRL_BIT_SC_IRQ_RESET (1 << 20)
-#define ME4000_AI_CTRL_BIT_SC_RELOAD (1 << 21)
-#define ME4000_AI_STATUS_BIT_EF_CHANNEL (1 << 22)
-#define ME4000_AI_STATUS_BIT_HF_CHANNEL (1 << 23)
-#define ME4000_AI_STATUS_BIT_FF_CHANNEL (1 << 24)
-#define ME4000_AI_STATUS_BIT_EF_DATA (1 << 25)
-#define ME4000_AI_STATUS_BIT_HF_DATA (1 << 26)
-#define ME4000_AI_STATUS_BIT_FF_DATA (1 << 27)
-#define ME4000_AI_STATUS_BIT_LE (1 << 28)
-#define ME4000_AI_STATUS_BIT_FSM (1 << 29)
-#define ME4000_AI_CTRL_BIT_EX_TRIG_BOTH (1 << 31)
+#define ME4000_AI_CTRL_MODE_0 BIT(0)
+#define ME4000_AI_CTRL_MODE_1 BIT(1)
+#define ME4000_AI_CTRL_MODE_2 BIT(2)
+#define ME4000_AI_CTRL_SAMPLE_HOLD BIT(3)
+#define ME4000_AI_CTRL_IMMEDIATE_STOP BIT(4)
+#define ME4000_AI_CTRL_STOP BIT(5)
+#define ME4000_AI_CTRL_CHANNEL_FIFO BIT(6)
+#define ME4000_AI_CTRL_DATA_FIFO BIT(7)
+#define ME4000_AI_CTRL_FULLSCALE BIT(8)
+#define ME4000_AI_CTRL_OFFSET BIT(9)
+#define ME4000_AI_CTRL_EX_TRIG_ANALOG BIT(10)
+#define ME4000_AI_CTRL_EX_TRIG BIT(11)
+#define ME4000_AI_CTRL_EX_TRIG_FALLING BIT(12)
+#define ME4000_AI_CTRL_EX_IRQ BIT(13)
+#define ME4000_AI_CTRL_EX_IRQ_RESET BIT(14)
+#define ME4000_AI_CTRL_LE_IRQ BIT(15)
+#define ME4000_AI_CTRL_LE_IRQ_RESET BIT(16)
+#define ME4000_AI_CTRL_HF_IRQ BIT(17)
+#define ME4000_AI_CTRL_HF_IRQ_RESET BIT(18)
+#define ME4000_AI_CTRL_SC_IRQ BIT(19)
+#define ME4000_AI_CTRL_SC_IRQ_RESET BIT(20)
+#define ME4000_AI_CTRL_SC_RELOAD BIT(21)
+#define ME4000_AI_STATUS_EF_CHANNEL BIT(22)
+#define ME4000_AI_STATUS_HF_CHANNEL BIT(23)
+#define ME4000_AI_STATUS_FF_CHANNEL BIT(24)
+#define ME4000_AI_STATUS_EF_DATA BIT(25)
+#define ME4000_AI_STATUS_HF_DATA BIT(26)
+#define ME4000_AI_STATUS_FF_DATA BIT(27)
+#define ME4000_AI_STATUS_LE BIT(28)
+#define ME4000_AI_STATUS_FSM BIT(29)
+#define ME4000_AI_CTRL_EX_TRIG_BOTH BIT(31)
#define ME4000_AI_CHANNEL_LIST_REG 0x78
-#define ME4000_AI_LIST_INPUT_SINGLE_ENDED (0 << 5)
-#define ME4000_AI_LIST_INPUT_DIFFERENTIAL (1 << 5)
-#define ME4000_AI_LIST_RANGE_BIPOLAR_10 (0 << 6)
-#define ME4000_AI_LIST_RANGE_BIPOLAR_2_5 (1 << 6)
-#define ME4000_AI_LIST_RANGE_UNIPOLAR_10 (2 << 6)
-#define ME4000_AI_LIST_RANGE_UNIPOLAR_2_5 (3 << 6)
-#define ME4000_AI_LIST_LAST_ENTRY (1 << 8)
+#define ME4000_AI_LIST_INPUT_DIFFERENTIAL BIT(5)
+#define ME4000_AI_LIST_RANGE(x) ((3 - ((x) & 3)) << 6)
+#define ME4000_AI_LIST_LAST_ENTRY BIT(8)
#define ME4000_AI_DATA_REG 0x7c
#define ME4000_AI_CHAN_TIMER_REG 0x80
#define ME4000_AI_CHAN_PRE_TIMER_REG 0x84
@@ -126,14 +119,14 @@ broken.
#define ME4000_AI_SCAN_PRE_TIMER_HIGH_REG 0x94
#define ME4000_AI_START_REG 0x98
#define ME4000_IRQ_STATUS_REG 0x9c
-#define ME4000_IRQ_STATUS_BIT_EX (1 << 0)
-#define ME4000_IRQ_STATUS_BIT_LE (1 << 1)
-#define ME4000_IRQ_STATUS_BIT_AI_HF (1 << 2)
-#define ME4000_IRQ_STATUS_BIT_AO_0_HF (1 << 3)
-#define ME4000_IRQ_STATUS_BIT_AO_1_HF (1 << 4)
-#define ME4000_IRQ_STATUS_BIT_AO_2_HF (1 << 5)
-#define ME4000_IRQ_STATUS_BIT_AO_3_HF (1 << 6)
-#define ME4000_IRQ_STATUS_BIT_SC (1 << 7)
+#define ME4000_IRQ_STATUS_EX BIT(0)
+#define ME4000_IRQ_STATUS_LE BIT(1)
+#define ME4000_IRQ_STATUS_AI_HF BIT(2)
+#define ME4000_IRQ_STATUS_AO_0_HF BIT(3)
+#define ME4000_IRQ_STATUS_AO_1_HF BIT(4)
+#define ME4000_IRQ_STATUS_AO_2_HF BIT(5)
+#define ME4000_IRQ_STATUS_AO_3_HF BIT(6)
+#define ME4000_IRQ_STATUS_SC BIT(7)
#define ME4000_DIO_PORT_0_REG 0xa0
#define ME4000_DIO_PORT_1_REG 0xa4
#define ME4000_DIO_PORT_2_REG 0xa8
@@ -141,20 +134,20 @@ broken.
#define ME4000_DIO_DIR_REG 0xb0
#define ME4000_AO_LOADSETREG_XX 0xb4
#define ME4000_DIO_CTRL_REG 0xb8
-#define ME4000_DIO_CTRL_BIT_MODE_0 (1 << 0)
-#define ME4000_DIO_CTRL_BIT_MODE_1 (1 << 1)
-#define ME4000_DIO_CTRL_BIT_MODE_2 (1 << 2)
-#define ME4000_DIO_CTRL_BIT_MODE_3 (1 << 3)
-#define ME4000_DIO_CTRL_BIT_MODE_4 (1 << 4)
-#define ME4000_DIO_CTRL_BIT_MODE_5 (1 << 5)
-#define ME4000_DIO_CTRL_BIT_MODE_6 (1 << 6)
-#define ME4000_DIO_CTRL_BIT_MODE_7 (1 << 7)
-#define ME4000_DIO_CTRL_BIT_FUNCTION_0 (1 << 8)
-#define ME4000_DIO_CTRL_BIT_FUNCTION_1 (1 << 9)
-#define ME4000_DIO_CTRL_BIT_FIFO_HIGH_0 (1 << 10)
-#define ME4000_DIO_CTRL_BIT_FIFO_HIGH_1 (1 << 11)
-#define ME4000_DIO_CTRL_BIT_FIFO_HIGH_2 (1 << 12)
-#define ME4000_DIO_CTRL_BIT_FIFO_HIGH_3 (1 << 13)
+#define ME4000_DIO_CTRL_MODE_0 BIT(0)
+#define ME4000_DIO_CTRL_MODE_1 BIT(1)
+#define ME4000_DIO_CTRL_MODE_2 BIT(2)
+#define ME4000_DIO_CTRL_MODE_3 BIT(3)
+#define ME4000_DIO_CTRL_MODE_4 BIT(4)
+#define ME4000_DIO_CTRL_MODE_5 BIT(5)
+#define ME4000_DIO_CTRL_MODE_6 BIT(6)
+#define ME4000_DIO_CTRL_MODE_7 BIT(7)
+#define ME4000_DIO_CTRL_FUNCTION_0 BIT(8)
+#define ME4000_DIO_CTRL_FUNCTION_1 BIT(9)
+#define ME4000_DIO_CTRL_FIFO_HIGH_0 BIT(10)
+#define ME4000_DIO_CTRL_FIFO_HIGH_1 BIT(11)
+#define ME4000_DIO_CTRL_FIFO_HIGH_2 BIT(12)
+#define ME4000_DIO_CTRL_FIFO_HIGH_3 BIT(13)
#define ME4000_AO_DEMUX_ADJUST_REG 0xbc
#define ME4000_AO_DEMUX_ADJUST_VALUE 0x4c
#define ME4000_AI_SAMPLE_COUNTER_REG 0xc0
@@ -166,8 +159,12 @@ broken.
#define ME4000_AI_CHANNEL_LIST_COUNT 1024
-struct me4000_info {
+struct me4000_private {
unsigned long plx_regbase;
+ unsigned int ai_ctrl_mode;
+ unsigned int ai_init_ticks;
+ unsigned int ai_scan_ticks;
+ unsigned int ai_chan_ticks;
};
enum me4000_boardid {
@@ -188,134 +185,126 @@ enum me4000_boardid {
struct me4000_board {
const char *name;
- int ao_nchan;
- int ao_fifo;
int ai_nchan;
- int ai_diff_nchan;
- int ai_sh_nchan;
- int ex_trig_analog;
- int dio_nchan;
- int has_counter;
+ unsigned int can_do_diff_ai:1;
+ unsigned int can_do_sh_ai:1; /* sample & hold (8 channels) */
+ unsigned int ex_trig_analog:1;
+ unsigned int has_ao:1;
+ unsigned int has_ao_fifo:1;
+ unsigned int has_counter:1;
};
static const struct me4000_board me4000_boards[] = {
[BOARD_ME4650] = {
.name = "ME-4650",
.ai_nchan = 16,
- .dio_nchan = 32,
},
[BOARD_ME4660] = {
.name = "ME-4660",
.ai_nchan = 32,
- .ai_diff_nchan = 16,
- .dio_nchan = 32,
+ .can_do_diff_ai = 1,
.has_counter = 1,
},
[BOARD_ME4660I] = {
.name = "ME-4660i",
.ai_nchan = 32,
- .ai_diff_nchan = 16,
- .dio_nchan = 32,
+ .can_do_diff_ai = 1,
.has_counter = 1,
},
[BOARD_ME4660S] = {
.name = "ME-4660s",
.ai_nchan = 32,
- .ai_diff_nchan = 16,
- .ai_sh_nchan = 8,
- .dio_nchan = 32,
+ .can_do_diff_ai = 1,
+ .can_do_sh_ai = 1,
.has_counter = 1,
},
[BOARD_ME4660IS] = {
.name = "ME-4660is",
.ai_nchan = 32,
- .ai_diff_nchan = 16,
- .ai_sh_nchan = 8,
- .dio_nchan = 32,
+ .can_do_diff_ai = 1,
+ .can_do_sh_ai = 1,
.has_counter = 1,
},
[BOARD_ME4670] = {
.name = "ME-4670",
- .ao_nchan = 4,
.ai_nchan = 32,
- .ai_diff_nchan = 16,
+ .can_do_diff_ai = 1,
.ex_trig_analog = 1,
- .dio_nchan = 32,
+ .has_ao = 1,
.has_counter = 1,
},
[BOARD_ME4670I] = {
.name = "ME-4670i",
- .ao_nchan = 4,
.ai_nchan = 32,
- .ai_diff_nchan = 16,
+ .can_do_diff_ai = 1,
.ex_trig_analog = 1,
- .dio_nchan = 32,
+ .has_ao = 1,
.has_counter = 1,
},
[BOARD_ME4670S] = {
.name = "ME-4670s",
- .ao_nchan = 4,
.ai_nchan = 32,
- .ai_diff_nchan = 16,
- .ai_sh_nchan = 8,
+ .can_do_diff_ai = 1,
+ .can_do_sh_ai = 1,
.ex_trig_analog = 1,
- .dio_nchan = 32,
+ .has_ao = 1,
.has_counter = 1,
},
[BOARD_ME4670IS] = {
.name = "ME-4670is",
- .ao_nchan = 4,
.ai_nchan = 32,
- .ai_diff_nchan = 16,
- .ai_sh_nchan = 8,
+ .can_do_diff_ai = 1,
+ .can_do_sh_ai = 1,
.ex_trig_analog = 1,
- .dio_nchan = 32,
+ .has_ao = 1,
.has_counter = 1,
},
[BOARD_ME4680] = {
.name = "ME-4680",
- .ao_nchan = 4,
- .ao_fifo = 4,
.ai_nchan = 32,
- .ai_diff_nchan = 16,
+ .can_do_diff_ai = 1,
.ex_trig_analog = 1,
- .dio_nchan = 32,
+ .has_ao = 1,
+ .has_ao_fifo = 1,
.has_counter = 1,
},
[BOARD_ME4680I] = {
.name = "ME-4680i",
- .ao_nchan = 4,
- .ao_fifo = 4,
.ai_nchan = 32,
- .ai_diff_nchan = 16,
+ .can_do_diff_ai = 1,
.ex_trig_analog = 1,
- .dio_nchan = 32,
+ .has_ao = 1,
+ .has_ao_fifo = 1,
.has_counter = 1,
},
[BOARD_ME4680S] = {
.name = "ME-4680s",
- .ao_nchan = 4,
- .ao_fifo = 4,
.ai_nchan = 32,
- .ai_diff_nchan = 16,
- .ai_sh_nchan = 8,
+ .can_do_diff_ai = 1,
+ .can_do_sh_ai = 1,
.ex_trig_analog = 1,
- .dio_nchan = 32,
+ .has_ao = 1,
+ .has_ao_fifo = 1,
.has_counter = 1,
},
[BOARD_ME4680IS] = {
.name = "ME-4680is",
- .ao_nchan = 4,
- .ao_fifo = 4,
.ai_nchan = 32,
- .ai_diff_nchan = 16,
- .ai_sh_nchan = 8,
+ .can_do_diff_ai = 1,
+ .can_do_sh_ai = 1,
.ex_trig_analog = 1,
- .dio_nchan = 32,
+ .has_ao = 1,
+ .has_ao_fifo = 1,
.has_counter = 1,
},
};
+/*
+ * NOTE: the ranges here are inverted compared to the values
+ * written to the ME4000_AI_CHANNEL_LIST_REG,
+ *
+ * The ME4000_AI_LIST_RANGE() macro handles the inversion.
+ */
static const struct comedi_lrange me4000_ai_range = {
4, {
UNI_RANGE(2.5),
@@ -330,7 +319,7 @@ static int me4000_xilinx_download(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct me4000_info *info = dev->private;
+ struct me4000_private *devpriv = dev->private;
unsigned long xilinx_iobase = pci_resource_start(pcidev, 5);
unsigned int file_length;
unsigned int val;
@@ -343,42 +332,42 @@ static int me4000_xilinx_download(struct comedi_device *dev,
* Set PLX local interrupt 2 polarity to high.
* Interrupt is thrown by init pin of xilinx.
*/
- outl(PLX9052_INTCSR_LI2POL, info->plx_regbase + PLX9052_INTCSR);
+ outl(PLX9052_INTCSR_LI2POL, devpriv->plx_regbase + PLX9052_INTCSR);
/* Set /CS and /WRITE of the Xilinx */
- val = inl(info->plx_regbase + PLX9052_CNTRL);
+ val = inl(devpriv->plx_regbase + PLX9052_CNTRL);
val |= PLX9052_CNTRL_UIO2_DATA;
- outl(val, info->plx_regbase + PLX9052_CNTRL);
+ outl(val, devpriv->plx_regbase + PLX9052_CNTRL);
/* Init Xilinx with CS1 */
inb(xilinx_iobase + 0xC8);
/* Wait until /INIT pin is set */
- udelay(20);
- val = inl(info->plx_regbase + PLX9052_INTCSR);
+ usleep_range(20, 1000);
+ val = inl(devpriv->plx_regbase + PLX9052_INTCSR);
if (!(val & PLX9052_INTCSR_LI2STAT)) {
dev_err(dev->class_dev, "Can't init Xilinx\n");
return -EIO;
}
/* Reset /CS and /WRITE of the Xilinx */
- val = inl(info->plx_regbase + PLX9052_CNTRL);
+ val = inl(devpriv->plx_regbase + PLX9052_CNTRL);
val &= ~PLX9052_CNTRL_UIO2_DATA;
- outl(val, info->plx_regbase + PLX9052_CNTRL);
+ outl(val, devpriv->plx_regbase + PLX9052_CNTRL);
/* Download Xilinx firmware */
file_length = (((unsigned int)data[0] & 0xff) << 24) +
(((unsigned int)data[1] & 0xff) << 16) +
(((unsigned int)data[2] & 0xff) << 8) +
((unsigned int)data[3] & 0xff);
- udelay(10);
+ usleep_range(10, 1000);
for (i = 0; i < file_length; i++) {
outb(data[16 + i], xilinx_iobase);
- udelay(10);
+ usleep_range(10, 1000);
/* Check if BUSY flag is low */
- val = inl(info->plx_regbase + PLX9052_CNTRL);
+ val = inl(devpriv->plx_regbase + PLX9052_CNTRL);
if (val & PLX9052_CNTRL_UIO1_DATA) {
dev_err(dev->class_dev,
"Xilinx is still busy (i = %d)\n", i);
@@ -387,7 +376,7 @@ static int me4000_xilinx_download(struct comedi_device *dev,
}
/* If done flag is high download was successful */
- val = inl(info->plx_regbase + PLX9052_CNTRL);
+ val = inl(devpriv->plx_regbase + PLX9052_CNTRL);
if (!(val & PLX9052_CNTRL_UIO0_DATA)) {
dev_err(dev->class_dev, "DONE flag is not set\n");
dev_err(dev->class_dev, "Download not successful\n");
@@ -395,44 +384,53 @@ static int me4000_xilinx_download(struct comedi_device *dev,
}
/* Set /CS and /WRITE */
- val = inl(info->plx_regbase + PLX9052_CNTRL);
+ val = inl(devpriv->plx_regbase + PLX9052_CNTRL);
val |= PLX9052_CNTRL_UIO2_DATA;
- outl(val, info->plx_regbase + PLX9052_CNTRL);
+ outl(val, devpriv->plx_regbase + PLX9052_CNTRL);
return 0;
}
+static void me4000_ai_reset(struct comedi_device *dev)
+{
+ unsigned int ctrl;
+
+ /* Stop any running conversion */
+ ctrl = inl(dev->iobase + ME4000_AI_CTRL_REG);
+ ctrl |= ME4000_AI_CTRL_STOP | ME4000_AI_CTRL_IMMEDIATE_STOP;
+ outl(ctrl, dev->iobase + ME4000_AI_CTRL_REG);
+
+ /* Clear the control register */
+ outl(0x0, dev->iobase + ME4000_AI_CTRL_REG);
+}
+
static void me4000_reset(struct comedi_device *dev)
{
- struct me4000_info *info = dev->private;
+ struct me4000_private *devpriv = dev->private;
unsigned int val;
int chan;
- /* Make a hardware reset */
- val = inl(info->plx_regbase + PLX9052_CNTRL);
+ /* Disable interrupts on the PLX */
+ outl(0, devpriv->plx_regbase + PLX9052_INTCSR);
+
+ /* Software reset the PLX */
+ val = inl(devpriv->plx_regbase + PLX9052_CNTRL);
val |= PLX9052_CNTRL_PCI_RESET;
- outl(val, info->plx_regbase + PLX9052_CNTRL);
+ outl(val, devpriv->plx_regbase + PLX9052_CNTRL);
val &= ~PLX9052_CNTRL_PCI_RESET;
- outl(val, info->plx_regbase + PLX9052_CNTRL);
+ outl(val, devpriv->plx_regbase + PLX9052_CNTRL);
/* 0x8000 to the DACs means an output voltage of 0V */
for (chan = 0; chan < 4; chan++)
outl(0x8000, dev->iobase + ME4000_AO_SINGLE_REG(chan));
- /* Set both stop bits in the analog input control register */
- outl(ME4000_AI_CTRL_BIT_IMMEDIATE_STOP | ME4000_AI_CTRL_BIT_STOP,
- dev->iobase + ME4000_AI_CTRL_REG);
+ me4000_ai_reset(dev);
/* Set both stop bits in the analog output control register */
- val = ME4000_AO_CTRL_BIT_IMMEDIATE_STOP | ME4000_AO_CTRL_BIT_STOP;
+ val = ME4000_AO_CTRL_IMMEDIATE_STOP | ME4000_AO_CTRL_STOP;
for (chan = 0; chan < 4; chan++)
outl(val, dev->iobase + ME4000_AO_CTRL_REG(chan));
- /* Enable interrupts on the PLX */
- outl(PLX9052_INTCSR_LI1ENAB |
- PLX9052_INTCSR_LI1POL |
- PLX9052_INTCSR_PCIENAB, info->plx_regbase + PLX9052_INTCSR);
-
/* Set the adustment register for AO demux */
outl(ME4000_AO_DEMUX_ADJUST_VALUE,
dev->iobase + ME4000_AO_DEMUX_ADJUST_REG);
@@ -445,96 +443,68 @@ static void me4000_reset(struct comedi_device *dev)
outl(0x1, dev->iobase + ME4000_DIO_CTRL_REG);
}
-/*=============================================================================
- Analog input section
- ===========================================================================*/
-
-static int me4000_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *subdevice,
- struct comedi_insn *insn, unsigned int *data)
+static unsigned int me4000_ai_get_sample(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- const struct me4000_board *board = dev->board_ptr;
- int chan = CR_CHAN(insn->chanspec);
- int rang = CR_RANGE(insn->chanspec);
- int aref = CR_AREF(insn->chanspec);
+ unsigned int val;
- unsigned int entry = 0;
- unsigned int tmp;
- unsigned int lval;
+ /* read two's complement value and munge to offset binary */
+ val = inl(dev->iobase + ME4000_AI_DATA_REG);
+ return comedi_offset_munge(s, val);
+}
- if (insn->n == 0) {
+static int me4000_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inl(dev->iobase + ME4000_AI_STATUS_REG);
+ if (status & ME4000_AI_STATUS_EF_DATA)
return 0;
- } else if (insn->n > 1) {
- dev_err(dev->class_dev, "Invalid instruction length %d\n",
- insn->n);
- return -EINVAL;
- }
+ return -EBUSY;
+}
- switch (rang) {
- case 0:
- entry |= ME4000_AI_LIST_RANGE_UNIPOLAR_2_5;
- break;
- case 1:
- entry |= ME4000_AI_LIST_RANGE_UNIPOLAR_10;
- break;
- case 2:
- entry |= ME4000_AI_LIST_RANGE_BIPOLAR_2_5;
- break;
- case 3:
- entry |= ME4000_AI_LIST_RANGE_BIPOLAR_10;
- break;
- default:
- dev_err(dev->class_dev, "Invalid range specified\n");
- return -EINVAL;
- }
+static int me4000_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ unsigned int aref = CR_AREF(insn->chanspec);
+ unsigned int entry;
+ int ret = 0;
+ int i;
- switch (aref) {
- case AREF_GROUND:
- case AREF_COMMON:
- if (chan >= board->ai_nchan) {
+ entry = chan | ME4000_AI_LIST_RANGE(range);
+ if (aref == AREF_DIFF) {
+ if (!(s->subdev_flags & SDF_DIFF)) {
dev_err(dev->class_dev,
- "Analog input is not available\n");
+ "Differential inputs are not available\n");
return -EINVAL;
}
- entry |= ME4000_AI_LIST_INPUT_SINGLE_ENDED | chan;
- break;
- case AREF_DIFF:
- if (rang == 0 || rang == 1) {
+ if (!comedi_range_is_bipolar(s, range)) {
dev_err(dev->class_dev,
"Range must be bipolar when aref = diff\n");
return -EINVAL;
}
- if (chan >= board->ai_diff_nchan) {
+ if (chan >= (s->n_chan / 2)) {
dev_err(dev->class_dev,
"Analog input is not available\n");
return -EINVAL;
}
- entry |= ME4000_AI_LIST_INPUT_DIFFERENTIAL | chan;
- break;
- default:
- dev_err(dev->class_dev, "Invalid aref specified\n");
- return -EINVAL;
+ entry |= ME4000_AI_LIST_INPUT_DIFFERENTIAL;
}
entry |= ME4000_AI_LIST_LAST_ENTRY;
- /* Clear channel list, data fifo and both stop bits */
- tmp = inl(dev->iobase + ME4000_AI_CTRL_REG);
- tmp &= ~(ME4000_AI_CTRL_BIT_CHANNEL_FIFO |
- ME4000_AI_CTRL_BIT_DATA_FIFO |
- ME4000_AI_CTRL_BIT_STOP | ME4000_AI_CTRL_BIT_IMMEDIATE_STOP);
- outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
-
- /* Set the acquisition mode to single */
- tmp &= ~(ME4000_AI_CTRL_BIT_MODE_0 | ME4000_AI_CTRL_BIT_MODE_1 |
- ME4000_AI_CTRL_BIT_MODE_2);
- outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
-
- /* Enable channel list and data fifo */
- tmp |= ME4000_AI_CTRL_BIT_CHANNEL_FIFO | ME4000_AI_CTRL_BIT_DATA_FIFO;
- outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
+ /* Enable channel list and data fifo for single acquisition mode */
+ outl(ME4000_AI_CTRL_CHANNEL_FIFO | ME4000_AI_CTRL_DATA_FIFO,
+ dev->iobase + ME4000_AI_CTRL_REG);
/* Generate channel list entry */
outl(entry, dev->iobase + ME4000_AI_CHANNEL_LIST_REG);
@@ -543,36 +513,29 @@ static int me4000_ai_insn_read(struct comedi_device *dev,
outl(ME4000_AI_MIN_TICKS, dev->iobase + ME4000_AI_CHAN_TIMER_REG);
outl(ME4000_AI_MIN_TICKS, dev->iobase + ME4000_AI_CHAN_PRE_TIMER_REG);
- /* Start conversion by dummy read */
- inl(dev->iobase + ME4000_AI_START_REG);
+ for (i = 0; i < insn->n; i++) {
+ unsigned int val;
- /* Wait until ready */
- udelay(10);
- if (!(inl(dev->iobase + ME4000_AI_STATUS_REG) &
- ME4000_AI_STATUS_BIT_EF_DATA)) {
- dev_err(dev->class_dev, "Value not available after wait\n");
- return -EIO;
+ /* start conversion by dummy read */
+ inl(dev->iobase + ME4000_AI_START_REG);
+
+ ret = comedi_timeout(dev, s, insn, me4000_ai_eoc, 0);
+ if (ret)
+ break;
+
+ val = me4000_ai_get_sample(dev, s);
+ data[i] = comedi_offset_munge(s, val);
}
- /* Read value from data fifo */
- lval = inl(dev->iobase + ME4000_AI_DATA_REG) & 0xFFFF;
- data[0] = lval ^ 0x8000;
+ me4000_ai_reset(dev);
- return 1;
+ return ret ? ret : insn->n;
}
static int me4000_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- unsigned int tmp;
-
- /* Stop any running conversion */
- tmp = inl(dev->iobase + ME4000_AI_CTRL_REG);
- tmp &= ~(ME4000_AI_CTRL_BIT_STOP | ME4000_AI_CTRL_BIT_IMMEDIATE_STOP);
- outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
-
- /* Clear the control register */
- outl(0x0, dev->iobase + ME4000_AI_CTRL_REG);
+ me4000_ai_reset(dev);
return 0;
}
@@ -581,8 +544,6 @@ static int me4000_ai_check_chanlist(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
- const struct me4000_board *board = dev->board_ptr;
- unsigned int max_diff_chan = board->ai_diff_nchan;
unsigned int aref0 = CR_AREF(cmd->chanlist[0]);
int i;
@@ -598,7 +559,13 @@ static int me4000_ai_check_chanlist(struct comedi_device *dev,
}
if (aref == AREF_DIFF) {
- if (chan >= max_diff_chan) {
+ if (!(s->subdev_flags & SDF_DIFF)) {
+ dev_err(dev->class_dev,
+ "Differential inputs are not available\n");
+ return -EINVAL;
+ }
+
+ if (chan >= (s->n_chan / 2)) {
dev_dbg(dev->class_dev,
"Channel number to high\n");
return -EINVAL;
@@ -615,202 +582,127 @@ static int me4000_ai_check_chanlist(struct comedi_device *dev,
return 0;
}
-static int ai_round_cmd_args(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd,
- unsigned int *init_ticks,
- unsigned int *scan_ticks, unsigned int *chan_ticks)
+static void me4000_ai_round_cmd_args(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
+ struct me4000_private *devpriv = dev->private;
int rest;
- *init_ticks = 0;
- *scan_ticks = 0;
- *chan_ticks = 0;
+ devpriv->ai_init_ticks = 0;
+ devpriv->ai_scan_ticks = 0;
+ devpriv->ai_chan_ticks = 0;
if (cmd->start_arg) {
- *init_ticks = (cmd->start_arg * 33) / 1000;
+ devpriv->ai_init_ticks = (cmd->start_arg * 33) / 1000;
rest = (cmd->start_arg * 33) % 1000;
if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_NEAREST) {
if (rest > 33)
- (*init_ticks)++;
+ devpriv->ai_init_ticks++;
} else if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_UP) {
if (rest)
- (*init_ticks)++;
+ devpriv->ai_init_ticks++;
}
}
if (cmd->scan_begin_arg) {
- *scan_ticks = (cmd->scan_begin_arg * 33) / 1000;
+ devpriv->ai_scan_ticks = (cmd->scan_begin_arg * 33) / 1000;
rest = (cmd->scan_begin_arg * 33) % 1000;
if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_NEAREST) {
if (rest > 33)
- (*scan_ticks)++;
+ devpriv->ai_scan_ticks++;
} else if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_UP) {
if (rest)
- (*scan_ticks)++;
+ devpriv->ai_scan_ticks++;
}
}
if (cmd->convert_arg) {
- *chan_ticks = (cmd->convert_arg * 33) / 1000;
+ devpriv->ai_chan_ticks = (cmd->convert_arg * 33) / 1000;
rest = (cmd->convert_arg * 33) % 1000;
if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_NEAREST) {
if (rest > 33)
- (*chan_ticks)++;
+ devpriv->ai_chan_ticks++;
} else if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_UP) {
if (rest)
- (*chan_ticks)++;
+ devpriv->ai_chan_ticks++;
}
}
-
- return 0;
-}
-
-static void ai_write_timer(struct comedi_device *dev,
- unsigned int init_ticks,
- unsigned int scan_ticks, unsigned int chan_ticks)
-{
- outl(init_ticks - 1, dev->iobase + ME4000_AI_SCAN_PRE_TIMER_LOW_REG);
- outl(0x0, dev->iobase + ME4000_AI_SCAN_PRE_TIMER_HIGH_REG);
-
- if (scan_ticks) {
- outl(scan_ticks - 1, dev->iobase + ME4000_AI_SCAN_TIMER_LOW_REG);
- outl(0x0, dev->iobase + ME4000_AI_SCAN_TIMER_HIGH_REG);
- }
-
- outl(chan_ticks - 1, dev->iobase + ME4000_AI_CHAN_PRE_TIMER_REG);
- outl(chan_ticks - 1, dev->iobase + ME4000_AI_CHAN_TIMER_REG);
}
-static int ai_write_chanlist(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
+static void me4000_ai_write_chanlist(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
- unsigned int entry;
- unsigned int chan;
- unsigned int rang;
- unsigned int aref;
int i;
for (i = 0; i < cmd->chanlist_len; i++) {
- chan = CR_CHAN(cmd->chanlist[i]);
- rang = CR_RANGE(cmd->chanlist[i]);
- aref = CR_AREF(cmd->chanlist[i]);
-
- entry = chan;
+ unsigned int chan = CR_CHAN(cmd->chanlist[i]);
+ unsigned int range = CR_RANGE(cmd->chanlist[i]);
+ unsigned int aref = CR_AREF(cmd->chanlist[i]);
+ unsigned int entry;
- if (rang == 0)
- entry |= ME4000_AI_LIST_RANGE_UNIPOLAR_2_5;
- else if (rang == 1)
- entry |= ME4000_AI_LIST_RANGE_UNIPOLAR_10;
- else if (rang == 2)
- entry |= ME4000_AI_LIST_RANGE_BIPOLAR_2_5;
- else
- entry |= ME4000_AI_LIST_RANGE_BIPOLAR_10;
+ entry = chan | ME4000_AI_LIST_RANGE(range);
if (aref == AREF_DIFF)
entry |= ME4000_AI_LIST_INPUT_DIFFERENTIAL;
- else
- entry |= ME4000_AI_LIST_INPUT_SINGLE_ENDED;
+
+ if (i == (cmd->chanlist_len - 1))
+ entry |= ME4000_AI_LIST_LAST_ENTRY;
outl(entry, dev->iobase + ME4000_AI_CHANNEL_LIST_REG);
}
-
- return 0;
}
-static int ai_prepare(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd,
- unsigned int init_ticks,
- unsigned int scan_ticks, unsigned int chan_ticks)
+static int me4000_ai_do_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- unsigned int tmp = 0;
+ struct me4000_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int ctrl;
/* Write timer arguments */
- ai_write_timer(dev, init_ticks, scan_ticks, chan_ticks);
+ outl(devpriv->ai_init_ticks - 1,
+ dev->iobase + ME4000_AI_SCAN_PRE_TIMER_LOW_REG);
+ outl(0x0, dev->iobase + ME4000_AI_SCAN_PRE_TIMER_HIGH_REG);
+
+ if (devpriv->ai_scan_ticks) {
+ outl(devpriv->ai_scan_ticks - 1,
+ dev->iobase + ME4000_AI_SCAN_TIMER_LOW_REG);
+ outl(0x0, dev->iobase + ME4000_AI_SCAN_TIMER_HIGH_REG);
+ }
- /* Reset control register */
- outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
+ outl(devpriv->ai_chan_ticks - 1,
+ dev->iobase + ME4000_AI_CHAN_PRE_TIMER_REG);
+ outl(devpriv->ai_chan_ticks - 1,
+ dev->iobase + ME4000_AI_CHAN_TIMER_REG);
/* Start sources */
- if ((cmd->start_src == TRIG_EXT &&
- cmd->scan_begin_src == TRIG_TIMER &&
- cmd->convert_src == TRIG_TIMER) ||
- (cmd->start_src == TRIG_EXT &&
- cmd->scan_begin_src == TRIG_FOLLOW &&
- cmd->convert_src == TRIG_TIMER)) {
- tmp = ME4000_AI_CTRL_BIT_MODE_1 |
- ME4000_AI_CTRL_BIT_CHANNEL_FIFO |
- ME4000_AI_CTRL_BIT_DATA_FIFO;
- } else if (cmd->start_src == TRIG_EXT &&
- cmd->scan_begin_src == TRIG_EXT &&
- cmd->convert_src == TRIG_TIMER) {
- tmp = ME4000_AI_CTRL_BIT_MODE_2 |
- ME4000_AI_CTRL_BIT_CHANNEL_FIFO |
- ME4000_AI_CTRL_BIT_DATA_FIFO;
- } else if (cmd->start_src == TRIG_EXT &&
- cmd->scan_begin_src == TRIG_EXT &&
- cmd->convert_src == TRIG_EXT) {
- tmp = ME4000_AI_CTRL_BIT_MODE_0 |
- ME4000_AI_CTRL_BIT_MODE_1 |
- ME4000_AI_CTRL_BIT_CHANNEL_FIFO |
- ME4000_AI_CTRL_BIT_DATA_FIFO;
- } else {
- tmp = ME4000_AI_CTRL_BIT_MODE_0 |
- ME4000_AI_CTRL_BIT_CHANNEL_FIFO |
- ME4000_AI_CTRL_BIT_DATA_FIFO;
- }
+ ctrl = devpriv->ai_ctrl_mode |
+ ME4000_AI_CTRL_CHANNEL_FIFO |
+ ME4000_AI_CTRL_DATA_FIFO;
/* Stop triggers */
if (cmd->stop_src == TRIG_COUNT) {
outl(cmd->chanlist_len * cmd->stop_arg,
dev->iobase + ME4000_AI_SAMPLE_COUNTER_REG);
- tmp |= ME4000_AI_CTRL_BIT_HF_IRQ | ME4000_AI_CTRL_BIT_SC_IRQ;
+ ctrl |= ME4000_AI_CTRL_SC_IRQ;
} else if (cmd->stop_src == TRIG_NONE &&
cmd->scan_end_src == TRIG_COUNT) {
outl(cmd->scan_end_arg,
dev->iobase + ME4000_AI_SAMPLE_COUNTER_REG);
- tmp |= ME4000_AI_CTRL_BIT_HF_IRQ | ME4000_AI_CTRL_BIT_SC_IRQ;
- } else {
- tmp |= ME4000_AI_CTRL_BIT_HF_IRQ;
+ ctrl |= ME4000_AI_CTRL_SC_IRQ;
}
+ ctrl |= ME4000_AI_CTRL_HF_IRQ;
/* Write the setup to the control register */
- outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
+ outl(ctrl, dev->iobase + ME4000_AI_CTRL_REG);
/* Write the channel list */
- ai_write_chanlist(dev, s, cmd);
-
- return 0;
-}
-
-static int me4000_ai_do_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- int err;
- unsigned int init_ticks = 0;
- unsigned int scan_ticks = 0;
- unsigned int chan_ticks = 0;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- /* Reset the analog input */
- err = me4000_ai_cancel(dev, s);
- if (err)
- return err;
-
- /* Round the timer arguments */
- err = ai_round_cmd_args(dev,
- s, cmd, &init_ticks, &scan_ticks, &chan_ticks);
- if (err)
- return err;
-
- /* Prepare the AI for acquisition */
- err = ai_prepare(dev, s, cmd, init_ticks, scan_ticks, chan_ticks);
- if (err)
- return err;
+ me4000_ai_write_chanlist(dev, s, cmd);
/* Start acquistion by dummy read */
inl(dev->iobase + ME4000_AI_START_REG);
@@ -822,14 +714,9 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
- unsigned int init_ticks;
- unsigned int chan_ticks;
- unsigned int scan_ticks;
+ struct me4000_private *devpriv = dev->private;
int err = 0;
- /* Round the timer arguments */
- ai_round_cmd_args(dev, s, cmd, &init_ticks, &scan_ticks, &chan_ticks);
-
/* Step 1 : check if triggers are trivially valid */
err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
@@ -857,21 +744,28 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
if (cmd->start_src == TRIG_NOW &&
cmd->scan_begin_src == TRIG_TIMER &&
cmd->convert_src == TRIG_TIMER) {
+ devpriv->ai_ctrl_mode = ME4000_AI_CTRL_MODE_0;
} else if (cmd->start_src == TRIG_NOW &&
cmd->scan_begin_src == TRIG_FOLLOW &&
cmd->convert_src == TRIG_TIMER) {
+ devpriv->ai_ctrl_mode = ME4000_AI_CTRL_MODE_0;
} else if (cmd->start_src == TRIG_EXT &&
cmd->scan_begin_src == TRIG_TIMER &&
cmd->convert_src == TRIG_TIMER) {
+ devpriv->ai_ctrl_mode = ME4000_AI_CTRL_MODE_1;
} else if (cmd->start_src == TRIG_EXT &&
cmd->scan_begin_src == TRIG_FOLLOW &&
cmd->convert_src == TRIG_TIMER) {
+ devpriv->ai_ctrl_mode = ME4000_AI_CTRL_MODE_1;
} else if (cmd->start_src == TRIG_EXT &&
cmd->scan_begin_src == TRIG_EXT &&
cmd->convert_src == TRIG_TIMER) {
+ devpriv->ai_ctrl_mode = ME4000_AI_CTRL_MODE_2;
} else if (cmd->start_src == TRIG_EXT &&
cmd->scan_begin_src == TRIG_EXT &&
cmd->convert_src == TRIG_EXT) {
+ devpriv->ai_ctrl_mode = ME4000_AI_CTRL_MODE_0 |
+ ME4000_AI_CTRL_MODE_1;
} else {
err |= -EINVAL;
}
@@ -887,15 +781,19 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
cmd->chanlist_len = 1;
err |= -EINVAL;
}
- if (init_ticks < 66) {
+
+ /* Round the timer arguments */
+ me4000_ai_round_cmd_args(dev, s, cmd);
+
+ if (devpriv->ai_init_ticks < 66) {
cmd->start_arg = 2000;
err |= -EINVAL;
}
- if (scan_ticks && scan_ticks < 67) {
+ if (devpriv->ai_scan_ticks && devpriv->ai_scan_ticks < 67) {
cmd->scan_begin_arg = 2031;
err |= -EINVAL;
}
- if (chan_ticks < 66) {
+ if (devpriv->ai_chan_ticks < 66) {
cmd->convert_arg = 2000;
err |= -EINVAL;
}
@@ -915,17 +813,18 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
cmd->scan_begin_src == TRIG_TIMER &&
cmd->convert_src == TRIG_TIMER) {
/* Check timer arguments */
- if (init_ticks < ME4000_AI_MIN_TICKS) {
+ if (devpriv->ai_init_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid start arg\n");
cmd->start_arg = 2000; /* 66 ticks at least */
err++;
}
- if (chan_ticks < ME4000_AI_MIN_TICKS) {
+ if (devpriv->ai_chan_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid convert arg\n");
cmd->convert_arg = 2000; /* 66 ticks at least */
err++;
}
- if (scan_ticks <= cmd->chanlist_len * chan_ticks) {
+ if (devpriv->ai_scan_ticks <=
+ cmd->chanlist_len * devpriv->ai_chan_ticks) {
dev_err(dev->class_dev, "Invalid scan end arg\n");
/* At least one tick more */
@@ -936,12 +835,12 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
cmd->scan_begin_src == TRIG_FOLLOW &&
cmd->convert_src == TRIG_TIMER) {
/* Check timer arguments */
- if (init_ticks < ME4000_AI_MIN_TICKS) {
+ if (devpriv->ai_init_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid start arg\n");
cmd->start_arg = 2000; /* 66 ticks at least */
err++;
}
- if (chan_ticks < ME4000_AI_MIN_TICKS) {
+ if (devpriv->ai_chan_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid convert arg\n");
cmd->convert_arg = 2000; /* 66 ticks at least */
err++;
@@ -950,17 +849,18 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
cmd->scan_begin_src == TRIG_TIMER &&
cmd->convert_src == TRIG_TIMER) {
/* Check timer arguments */
- if (init_ticks < ME4000_AI_MIN_TICKS) {
+ if (devpriv->ai_init_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid start arg\n");
cmd->start_arg = 2000; /* 66 ticks at least */
err++;
}
- if (chan_ticks < ME4000_AI_MIN_TICKS) {
+ if (devpriv->ai_chan_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid convert arg\n");
cmd->convert_arg = 2000; /* 66 ticks at least */
err++;
}
- if (scan_ticks <= cmd->chanlist_len * chan_ticks) {
+ if (devpriv->ai_scan_ticks <=
+ cmd->chanlist_len * devpriv->ai_chan_ticks) {
dev_err(dev->class_dev, "Invalid scan end arg\n");
/* At least one tick more */
@@ -971,12 +871,12 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
cmd->scan_begin_src == TRIG_FOLLOW &&
cmd->convert_src == TRIG_TIMER) {
/* Check timer arguments */
- if (init_ticks < ME4000_AI_MIN_TICKS) {
+ if (devpriv->ai_init_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid start arg\n");
cmd->start_arg = 2000; /* 66 ticks at least */
err++;
}
- if (chan_ticks < ME4000_AI_MIN_TICKS) {
+ if (devpriv->ai_chan_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid convert arg\n");
cmd->convert_arg = 2000; /* 66 ticks at least */
err++;
@@ -985,12 +885,12 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
cmd->scan_begin_src == TRIG_EXT &&
cmd->convert_src == TRIG_TIMER) {
/* Check timer arguments */
- if (init_ticks < ME4000_AI_MIN_TICKS) {
+ if (devpriv->ai_init_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid start arg\n");
cmd->start_arg = 2000; /* 66 ticks at least */
err++;
}
- if (chan_ticks < ME4000_AI_MIN_TICKS) {
+ if (devpriv->ai_chan_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid convert arg\n");
cmd->convert_arg = 2000; /* 66 ticks at least */
err++;
@@ -999,7 +899,7 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
cmd->scan_begin_src == TRIG_EXT &&
cmd->convert_src == TRIG_EXT) {
/* Check timer arguments */
- if (init_ticks < ME4000_AI_MIN_TICKS) {
+ if (devpriv->ai_init_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid start arg\n");
cmd->start_arg = 2000; /* 66 ticks at least */
err++;
@@ -1039,103 +939,57 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
return IRQ_NONE;
if (inl(dev->iobase + ME4000_IRQ_STATUS_REG) &
- ME4000_IRQ_STATUS_BIT_AI_HF) {
+ ME4000_IRQ_STATUS_AI_HF) {
/* Read status register to find out what happened */
- tmp = inl(dev->iobase + ME4000_AI_CTRL_REG);
-
- if (!(tmp & ME4000_AI_STATUS_BIT_FF_DATA) &&
- !(tmp & ME4000_AI_STATUS_BIT_HF_DATA) &&
- (tmp & ME4000_AI_STATUS_BIT_EF_DATA)) {
- c = ME4000_AI_FIFO_COUNT;
-
- /*
- * FIFO overflow, so stop conversion
- * and disable all interrupts
- */
- tmp |= ME4000_AI_CTRL_BIT_IMMEDIATE_STOP;
- tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ |
- ME4000_AI_CTRL_BIT_SC_IRQ);
- outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
-
- s->async->events |= COMEDI_CB_ERROR;
+ tmp = inl(dev->iobase + ME4000_AI_STATUS_REG);
+ if (!(tmp & ME4000_AI_STATUS_FF_DATA) &&
+ !(tmp & ME4000_AI_STATUS_HF_DATA) &&
+ (tmp & ME4000_AI_STATUS_EF_DATA)) {
dev_err(dev->class_dev, "FIFO overflow\n");
- } else if ((tmp & ME4000_AI_STATUS_BIT_FF_DATA)
- && !(tmp & ME4000_AI_STATUS_BIT_HF_DATA)
- && (tmp & ME4000_AI_STATUS_BIT_EF_DATA)) {
+ s->async->events |= COMEDI_CB_ERROR;
+ c = ME4000_AI_FIFO_COUNT;
+ } else if ((tmp & ME4000_AI_STATUS_FF_DATA) &&
+ !(tmp & ME4000_AI_STATUS_HF_DATA) &&
+ (tmp & ME4000_AI_STATUS_EF_DATA)) {
c = ME4000_AI_FIFO_COUNT / 2;
} else {
- dev_err(dev->class_dev,
- "Can't determine state of fifo\n");
- c = 0;
-
- /*
- * Undefined state, so stop conversion
- * and disable all interrupts
- */
- tmp |= ME4000_AI_CTRL_BIT_IMMEDIATE_STOP;
- tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ |
- ME4000_AI_CTRL_BIT_SC_IRQ);
- outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
-
- s->async->events |= COMEDI_CB_ERROR;
-
dev_err(dev->class_dev, "Undefined FIFO state\n");
+ s->async->events |= COMEDI_CB_ERROR;
+ c = 0;
}
for (i = 0; i < c; i++) {
- /* Read value from data fifo */
- lval = inl(dev->iobase + ME4000_AI_DATA_REG) & 0xFFFF;
- lval ^= 0x8000;
-
- if (!comedi_buf_write_samples(s, &lval, 1)) {
- /*
- * Buffer overflow, so stop conversion
- * and disable all interrupts
- */
- tmp |= ME4000_AI_CTRL_BIT_IMMEDIATE_STOP;
- tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ |
- ME4000_AI_CTRL_BIT_SC_IRQ);
- outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
+ lval = me4000_ai_get_sample(dev, s);
+ if (!comedi_buf_write_samples(s, &lval, 1))
break;
- }
}
/* Work is done, so reset the interrupt */
- tmp |= ME4000_AI_CTRL_BIT_HF_IRQ_RESET;
+ tmp |= ME4000_AI_CTRL_HF_IRQ_RESET;
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
- tmp &= ~ME4000_AI_CTRL_BIT_HF_IRQ_RESET;
+ tmp &= ~ME4000_AI_CTRL_HF_IRQ_RESET;
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
}
if (inl(dev->iobase + ME4000_IRQ_STATUS_REG) &
- ME4000_IRQ_STATUS_BIT_SC) {
+ ME4000_IRQ_STATUS_SC) {
+ /* Acquisition is complete */
s->async->events |= COMEDI_CB_EOA;
- /*
- * Acquisition is complete, so stop
- * conversion and disable all interrupts
- */
- tmp = inl(dev->iobase + ME4000_AI_CTRL_REG);
- tmp |= ME4000_AI_CTRL_BIT_IMMEDIATE_STOP;
- tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ | ME4000_AI_CTRL_BIT_SC_IRQ);
- outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
-
/* Poll data until fifo empty */
- while (inl(dev->iobase + ME4000_AI_CTRL_REG) &
- ME4000_AI_STATUS_BIT_EF_DATA) {
- /* Read value from data fifo */
- lval = inl(dev->iobase + ME4000_AI_DATA_REG) & 0xFFFF;
- lval ^= 0x8000;
-
+ while (inl(dev->iobase + ME4000_AI_STATUS_REG) &
+ ME4000_AI_STATUS_EF_DATA) {
+ lval = me4000_ai_get_sample(dev, s);
if (!comedi_buf_write_samples(s, &lval, 1))
break;
}
/* Work is done, so reset the interrupt */
- tmp |= ME4000_AI_CTRL_BIT_SC_IRQ_RESET;
+ tmp = inl(dev->iobase + ME4000_AI_CTRL_REG);
+ tmp |= ME4000_AI_CTRL_SC_IRQ_RESET;
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
- tmp &= ~ME4000_AI_CTRL_BIT_SC_IRQ_RESET;
+ tmp &= ~ME4000_AI_CTRL_SC_IRQ_RESET;
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
}
@@ -1149,12 +1003,12 @@ static int me4000_ao_insn_write(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- int chan = CR_CHAN(insn->chanspec);
+ unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int tmp;
/* Stop any running conversion */
tmp = inl(dev->iobase + ME4000_AO_CTRL_REG(chan));
- tmp |= ME4000_AO_CTRL_BIT_IMMEDIATE_STOP;
+ tmp |= ME4000_AO_CTRL_IMMEDIATE_STOP;
outl(tmp, dev->iobase + ME4000_AO_CTRL_REG(chan));
/* Clear control register and set to single mode */
@@ -1217,18 +1071,18 @@ static int me4000_dio_insn_config(struct comedi_device *dev,
return ret;
tmp = inl(dev->iobase + ME4000_DIO_CTRL_REG);
- tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_0 | ME4000_DIO_CTRL_BIT_MODE_1 |
- ME4000_DIO_CTRL_BIT_MODE_2 | ME4000_DIO_CTRL_BIT_MODE_3 |
- ME4000_DIO_CTRL_BIT_MODE_4 | ME4000_DIO_CTRL_BIT_MODE_5 |
- ME4000_DIO_CTRL_BIT_MODE_6 | ME4000_DIO_CTRL_BIT_MODE_7);
+ tmp &= ~(ME4000_DIO_CTRL_MODE_0 | ME4000_DIO_CTRL_MODE_1 |
+ ME4000_DIO_CTRL_MODE_2 | ME4000_DIO_CTRL_MODE_3 |
+ ME4000_DIO_CTRL_MODE_4 | ME4000_DIO_CTRL_MODE_5 |
+ ME4000_DIO_CTRL_MODE_6 | ME4000_DIO_CTRL_MODE_7);
if (s->io_bits & 0x000000ff)
- tmp |= ME4000_DIO_CTRL_BIT_MODE_0;
+ tmp |= ME4000_DIO_CTRL_MODE_0;
if (s->io_bits & 0x0000ff00)
- tmp |= ME4000_DIO_CTRL_BIT_MODE_2;
+ tmp |= ME4000_DIO_CTRL_MODE_2;
if (s->io_bits & 0x00ff0000)
- tmp |= ME4000_DIO_CTRL_BIT_MODE_4;
+ tmp |= ME4000_DIO_CTRL_MODE_4;
if (s->io_bits & 0xff000000)
- tmp |= ME4000_DIO_CTRL_BIT_MODE_6;
+ tmp |= ME4000_DIO_CTRL_MODE_6;
/*
* Check for optoisolated ME-4000 version.
@@ -1238,9 +1092,8 @@ static int me4000_dio_insn_config(struct comedi_device *dev,
if (inl(dev->iobase + ME4000_DIO_DIR_REG)) {
s->io_bits |= 0x000000ff;
s->io_bits &= ~0x0000ff00;
- tmp |= ME4000_DIO_CTRL_BIT_MODE_0;
- tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_2 |
- ME4000_DIO_CTRL_BIT_MODE_3);
+ tmp |= ME4000_DIO_CTRL_MODE_0;
+ tmp &= ~(ME4000_DIO_CTRL_MODE_2 | ME4000_DIO_CTRL_MODE_3);
}
outl(tmp, dev->iobase + ME4000_DIO_CTRL_REG);
@@ -1253,7 +1106,7 @@ static int me4000_auto_attach(struct comedi_device *dev,
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct me4000_board *board = NULL;
- struct me4000_info *info;
+ struct me4000_private *devpriv;
struct comedi_subdevice *s;
int result;
@@ -1264,17 +1117,17 @@ static int me4000_auto_attach(struct comedi_device *dev,
dev->board_ptr = board;
dev->board_name = board->name;
- info = comedi_alloc_devpriv(dev, sizeof(*info));
- if (!info)
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+ if (!devpriv)
return -ENOMEM;
result = comedi_pci_enable(dev);
if (result)
return result;
- info->plx_regbase = pci_resource_start(pcidev, 1);
+ devpriv->plx_regbase = pci_resource_start(pcidev, 1);
dev->iobase = pci_resource_start(pcidev, 2);
- if (!info->plx_regbase || !dev->iobase)
+ if (!devpriv->plx_regbase || !dev->iobase)
return -ENODEV;
result = comedi_load_firmware(dev, &pcidev->dev, ME4000_FIRMWARE,
@@ -1287,79 +1140,66 @@ static int me4000_auto_attach(struct comedi_device *dev,
if (pcidev->irq > 0) {
result = request_irq(pcidev->irq, me4000_ai_isr, IRQF_SHARED,
dev->board_name, dev);
- if (result == 0)
+ if (result == 0) {
dev->irq = pcidev->irq;
+
+ /* Enable interrupts on the PLX */
+ outl(PLX9052_INTCSR_LI1ENAB | PLX9052_INTCSR_LI1POL |
+ PLX9052_INTCSR_PCIENAB,
+ devpriv->plx_regbase + PLX9052_INTCSR);
+ }
}
result = comedi_alloc_subdevices(dev, 4);
if (result)
return result;
- /*=========================================================================
- Analog input subdevice
- ========================================================================*/
-
+ /* Analog Input subdevice */
s = &dev->subdevices[0];
-
- if (board->ai_nchan) {
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags =
- SDF_READABLE | SDF_COMMON | SDF_GROUND | SDF_DIFF;
- s->n_chan = board->ai_nchan;
- s->maxdata = 0xFFFF; /* 16 bit ADC */
- s->len_chanlist = ME4000_AI_CHANNEL_LIST_COUNT;
- s->range_table = &me4000_ai_range;
- s->insn_read = me4000_ai_insn_read;
-
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->cancel = me4000_ai_cancel;
- s->do_cmdtest = me4000_ai_do_cmd_test;
- s->do_cmd = me4000_ai_do_cmd;
- }
- } else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND;
+ if (board->can_do_diff_ai)
+ s->subdev_flags |= SDF_DIFF;
+ s->n_chan = board->ai_nchan;
+ s->maxdata = 0xffff;
+ s->len_chanlist = ME4000_AI_CHANNEL_LIST_COUNT;
+ s->range_table = &me4000_ai_range;
+ s->insn_read = me4000_ai_insn_read;
+
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->cancel = me4000_ai_cancel;
+ s->do_cmdtest = me4000_ai_do_cmd_test;
+ s->do_cmd = me4000_ai_do_cmd;
}
- /*=========================================================================
- Analog output subdevice
- ========================================================================*/
-
+ /* Analog Output subdevice */
s = &dev->subdevices[1];
-
- if (board->ao_nchan) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_COMMON | SDF_GROUND;
- s->n_chan = board->ao_nchan;
- s->maxdata = 0xFFFF; /* 16 bit DAC */
- s->range_table = &range_bipolar10;
- s->insn_write = me4000_ao_insn_write;
+ if (board->has_ao) {
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE | SDF_COMMON | SDF_GROUND;
+ s->n_chan = 4;
+ s->maxdata = 0xffff;
+ s->range_table = &range_bipolar10;
+ s->insn_write = me4000_ao_insn_write;
result = comedi_alloc_subdev_readback(s);
if (result)
return result;
} else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->type = COMEDI_SUBD_UNUSED;
}
- /*=========================================================================
- Digital I/O subdevice
- ========================================================================*/
-
+ /* Digital I/O subdevice */
s = &dev->subdevices[2];
-
- if (board->dio_nchan) {
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = board->dio_nchan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = me4000_dio_insn_bits;
- s->insn_config = me4000_dio_insn_config;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = 32;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = me4000_dio_insn_bits;
+ s->insn_config = me4000_dio_insn_config;
/*
* Check for optoisolated ME-4000 version. If one the first
@@ -1367,7 +1207,7 @@ static int me4000_auto_attach(struct comedi_device *dev,
*/
if (!inl(dev->iobase + ME4000_DIO_DIR_REG)) {
s->io_bits |= 0xFF;
- outl(ME4000_DIO_CTRL_BIT_MODE_0,
+ outl(ME4000_DIO_CTRL_MODE_0,
dev->iobase + ME4000_DIO_DIR_REG);
}
@@ -1393,8 +1233,12 @@ static int me4000_auto_attach(struct comedi_device *dev,
static void me4000_detach(struct comedi_device *dev)
{
- if (dev->iobase)
- me4000_reset(dev);
+ if (dev->irq) {
+ struct me4000_private *devpriv = dev->private;
+
+ /* Disable interrupts on the PLX */
+ outl(0, devpriv->plx_regbase + PLX9052_INTCSR);
+ }
comedi_pci_detach(dev);
}
@@ -1438,6 +1282,6 @@ static struct pci_driver me4000_pci_driver = {
module_comedi_pci_driver(me4000_driver, me4000_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for Meilhaus ME-4000 series boards");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(ME4000_FIRMWARE);
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index a208cb348437..d9de83ab0267 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -55,11 +55,7 @@ static int dio24_auto_attach(struct comedi_device *dev,
/* 8255 dio */
s = &dev->subdevices[0];
- ret = subdev_8255_init(dev, s, NULL, 0x00);
- if (ret)
- return ret;
-
- return 0;
+ return subdev_8255_init(dev, s, NULL, 0x00);
}
static struct comedi_driver driver_dio24 = {
diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c
index 5f649f88d55c..88de8da3eff3 100644
--- a/drivers/staging/comedi/drivers/ni_usb6501.c
+++ b/drivers/staging/comedi/drivers/ni_usb6501.c
@@ -172,7 +172,7 @@ struct ni6501_private {
};
static int ni6501_port_command(struct comedi_device *dev, int command,
- const u8 *port, u8 *bitmap)
+ unsigned int val, u8 *bitmap)
{
struct usb_device *usb = comedi_to_usb_dev(dev);
struct ni6501_private *devpriv = dev->private;
@@ -190,22 +190,22 @@ static int ni6501_port_command(struct comedi_device *dev, int command,
request_size = sizeof(READ_PORT_REQUEST);
response_size = sizeof(READ_PORT_RESPONSE);
memcpy(tx, READ_PORT_REQUEST, request_size);
- tx[14] = port[0];
+ tx[14] = val & 0xff;
break;
case WRITE_PORT:
request_size = sizeof(WRITE_PORT_REQUEST);
response_size = sizeof(GENERIC_RESPONSE);
memcpy(tx, WRITE_PORT_REQUEST, request_size);
- tx[14] = port[0];
- tx[17] = bitmap[0];
+ tx[14] = val & 0xff;
+ tx[17] = *bitmap;
break;
case SET_PORT_DIR:
request_size = sizeof(SET_PORT_DIR_REQUEST);
response_size = sizeof(GENERIC_RESPONSE);
memcpy(tx, SET_PORT_DIR_REQUEST, request_size);
- tx[14] = port[0];
- tx[15] = port[1];
- tx[16] = port[2];
+ tx[14] = val & 0xff;
+ tx[15] = (val >> 8) & 0xff;
+ tx[16] = (val >> 16) & 0xff;
break;
default:
ret = -EINVAL;
@@ -235,7 +235,7 @@ static int ni6501_port_command(struct comedi_device *dev, int command,
/* Check if results are valid */
if (command == READ_PORT) {
- bitmap[0] = devpriv->usb_rx_buf[14];
+ *bitmap = devpriv->usb_rx_buf[14];
/* mask bitmap for comparing */
devpriv->usb_rx_buf[14] = 0x00;
@@ -349,17 +349,12 @@ static int ni6501_dio_insn_config(struct comedi_device *dev,
unsigned int *data)
{
int ret;
- u8 port[3];
ret = comedi_dio_insn_config(dev, s, insn, data, 0);
if (ret)
return ret;
- port[0] = (s->io_bits) & 0xff;
- port[1] = (s->io_bits >> 8) & 0xff;
- port[2] = (s->io_bits >> 16) & 0xff;
-
- ret = ni6501_port_command(dev, SET_PORT_DIR, port, NULL);
+ ret = ni6501_port_command(dev, SET_PORT_DIR, s->io_bits, NULL);
if (ret)
return ret;
@@ -382,7 +377,7 @@ static int ni6501_dio_insn_bits(struct comedi_device *dev,
if (mask & (0xFF << port * 8)) {
bitmap = (s->state >> port * 8) & 0xFF;
ret = ni6501_port_command(dev, WRITE_PORT,
- &port, &bitmap);
+ port, &bitmap);
if (ret)
return ret;
}
@@ -391,7 +386,7 @@ static int ni6501_dio_insn_bits(struct comedi_device *dev,
data[1] = 0;
for (port = 0; port < 3; port++) {
- ret = ni6501_port_command(dev, READ_PORT, &port, &bitmap);
+ ret = ni6501_port_command(dev, READ_PORT, port, &bitmap);
if (ret)
return ret;
data[1] |= bitmap << port * 8;
diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c
index 781b321587dc..a353d1b155bb 100644
--- a/drivers/staging/comedi/drivers/pcl816.c
+++ b/drivers/staging/comedi/drivers/pcl816.c
@@ -305,7 +305,7 @@ static int check_channel_list(struct comedi_device *dev,
chansegment[0] = chanlist[0];
for (i = 1, seglen = 1; i < chanlen; i++, seglen++) {
/* we detect loop, this must by finish */
- if (chanlist[0] == chanlist[i])
+ if (chanlist[0] == chanlist[i])
break;
nowmustbechan =
(CR_CHAN(chansegment[i - 1]) + 1) % chanlen;
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 781918d8d85f..35f0f676eb28 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -2852,11 +2852,7 @@ static int s626_auto_attach(struct comedi_device *dev,
s->insn_read = s626_enc_insn_read;
s->insn_write = s626_enc_insn_write;
- ret = s626_initialize(dev);
- if (ret)
- return ret;
-
- return 0;
+ return s626_initialize(dev);
}
static void s626_detach(struct comedi_device *dev)
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
index 83da162deb52..5f19374c460d 100644
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ b/drivers/staging/comedi/drivers/serial2002.c
@@ -32,6 +32,7 @@ Status: in development
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/ktime.h>
#include <linux/termios.h>
#include <asm/ioctls.h>
@@ -121,9 +122,9 @@ static int serial2002_tty_write(struct file *f, unsigned char *buf, int count)
static void serial2002_tty_read_poll_wait(struct file *f, int timeout)
{
struct poll_wqueues table;
- struct timeval start, now;
+ ktime_t start, now;
- do_gettimeofday(&start);
+ start = ktime_get();
poll_initwait(&table);
while (1) {
long elapsed;
@@ -134,9 +135,8 @@ static void serial2002_tty_read_poll_wait(struct file *f, int timeout)
POLLHUP | POLLERR)) {
break;
}
- do_gettimeofday(&now);
- elapsed = 1000000 * (now.tv_sec - start.tv_sec) +
- now.tv_usec - start.tv_usec;
+ now = ktime_get();
+ elapsed = ktime_us_delta(now, start);
if (elapsed > timeout)
break;
set_current_state(TASK_INTERRUPTIBLE);
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index eaa9add491df..649cf47184a4 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -1,6 +1,6 @@
/*
* usbduxsigma.c
- * Copyright (C) 2011-2014 Bernd Porr, mail@berndporr.me.uk
+ * Copyright (C) 2011-2015 Bernd Porr, mail@berndporr.me.uk
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -18,7 +18,7 @@
* Description: University of Stirling USB DAQ & INCITE Technology Limited
* Devices: [ITL] USB-DUX-SIGMA (usbduxsigma)
* Author: Bernd Porr <mail@berndporr.me.uk>
- * Updated: 10 Oct 2014
+ * Updated: 20 July 2015
* Status: stable
*/
@@ -39,6 +39,7 @@
* 0.4: fixed D/A voltage range
* 0.5: various bug fixes, health check at startup
* 0.6: corrected wrong input range
+ * 0.7: rewrite code that urb->interval is always 1
*/
#include <linux/kernel.h>
@@ -122,7 +123,7 @@
#define RETRIES 10
/* bulk transfer commands to usbduxsigma */
-#define USBBUXSIGMA_AD_CMD 0
+#define USBBUXSIGMA_AD_CMD 9
#define USBDUXSIGMA_DA_CMD 1
#define USBDUXSIGMA_DIO_CFG_CMD 2
#define USBDUXSIGMA_DIO_BITS_CMD 3
@@ -217,24 +218,28 @@ static void usbduxsigma_ai_handle_urb(struct comedi_device *dev,
int ret;
int i;
- devpriv->ai_counter--;
- if (devpriv->ai_counter == 0) {
- devpriv->ai_counter = devpriv->ai_timer;
-
- /* get the data from the USB bus and hand it over to comedi */
- for (i = 0; i < cmd->chanlist_len; i++) {
- /* transfer data, note first byte is the DIO state */
- val = be32_to_cpu(devpriv->in_buf[i+1]);
- val &= 0x00ffffff; /* strip status byte */
- val ^= 0x00800000; /* convert to unsigned */
+ if ((urb->actual_length > 0) && (urb->status != -EXDEV)) {
+ devpriv->ai_counter--;
+ if (devpriv->ai_counter == 0) {
+ devpriv->ai_counter = devpriv->ai_timer;
+
+ /* get the data from the USB bus
+ and hand it over to comedi */
+ for (i = 0; i < cmd->chanlist_len; i++) {
+ /* transfer data,
+ note first byte is the DIO state */
+ val = be32_to_cpu(devpriv->in_buf[i+1]);
+ val &= 0x00ffffff; /* strip status byte */
+ val ^= 0x00800000; /* convert to unsigned */
+
+ if (!comedi_buf_write_samples(s, &val, 1))
+ return;
+ }
- if (!comedi_buf_write_samples(s, &val, 1))
- return;
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg)
+ async->events |= COMEDI_CB_EOA;
}
-
- if (cmd->stop_src == TRIG_COUNT &&
- async->scans_done >= cmd->stop_arg)
- async->events |= COMEDI_CB_EOA;
}
/* if command is still running, resubmit urb */
@@ -374,10 +379,7 @@ static void usbduxsigma_ao_handle_urb(struct comedi_device *dev,
urb->transfer_buffer_length = SIZEOUTBUF;
urb->dev = comedi_to_usb_dev(dev);
urb->status = 0;
- if (devpriv->high_speed)
- urb->interval = 8; /* uframes */
- else
- urb->interval = 1; /* frames */
+ urb->interval = 1; /* (u)frames */
urb->number_of_packets = 1;
urb->iso_frame_desc[0].offset = 0;
urb->iso_frame_desc[0].length = SIZEOUTBUF;
@@ -441,7 +443,6 @@ static int usbduxsigma_submit_urbs(struct comedi_device *dev,
int input_urb)
{
struct usb_device *usb = comedi_to_usb_dev(dev);
- struct usbduxsigma_private *devpriv = dev->private;
struct urb *urb;
int ret;
int i;
@@ -452,7 +453,7 @@ static int usbduxsigma_submit_urbs(struct comedi_device *dev,
/* in case of a resubmission after an unlink... */
if (input_urb)
- urb->interval = devpriv->ai_interval;
+ urb->interval = 1;
urb->context = dev;
urb->dev = usb;
urb->status = 0;
@@ -481,6 +482,7 @@ static int usbduxsigma_ai_cmdtest(struct comedi_device *dev,
struct usbduxsigma_private *devpriv = dev->private;
int high_speed = devpriv->high_speed;
int interval = usbduxsigma_chans_to_interval(cmd->chanlist_len);
+ unsigned int tmp;
int err = 0;
/* Step 1 : check if triggers are trivially valid */
@@ -508,35 +510,20 @@ static int usbduxsigma_ai_cmdtest(struct comedi_device *dev,
err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- if (cmd->scan_begin_src == TRIG_FOLLOW) /* internal trigger */
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- unsigned int tmp;
-
- if (high_speed) {
- /*
- * In high speed mode microframes are possible.
- * However, during one microframe we can roughly
- * sample two channels. Thus, the more channels
- * are in the channel list the more time we need.
- */
- err |= comedi_check_trigger_arg_min(&cmd->
- scan_begin_arg,
- (1000000 / 8 *
- interval));
-
- tmp = (cmd->scan_begin_arg / 125000) * 125000;
- } else {
- /* full speed */
- /* 1kHz scans every USB frame */
- err |= comedi_check_trigger_arg_min(&cmd->
- scan_begin_arg,
- 1000000);
-
- tmp = (cmd->scan_begin_arg / 1000000) * 1000000;
- }
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, tmp);
+ if (high_speed) {
+ /*
+ * In high speed mode microframes are possible.
+ * However, during one microframe we can roughly
+ * sample two channels. Thus, the more channels
+ * are in the channel list the more time we need.
+ */
+ err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
+ (125000 * interval));
+ } else {
+ /* full speed */
+ /* 1kHz scans every USB frame */
+ err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
+ 1000000);
}
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
@@ -552,21 +539,8 @@ static int usbduxsigma_ai_cmdtest(struct comedi_device *dev,
/* Step 4: fix up any arguments */
- if (high_speed) {
- /*
- * every 2 channels get a time window of 125us. Thus, if we
- * sample all 16 channels we need 1ms. If we sample only one
- * channel we need only 125us
- */
- devpriv->ai_interval = interval;
- devpriv->ai_timer = cmd->scan_begin_arg / (125000 * interval);
- } else {
- /* interval always 1ms */
- devpriv->ai_interval = 1;
- devpriv->ai_timer = cmd->scan_begin_arg / 1000000;
- }
- if (devpriv->ai_timer < 1)
- err |= -EINVAL;
+ tmp = rounddown(cmd->scan_begin_arg, high_speed ? 125000 : 1000000);
+ err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, tmp);
if (err)
return 4;
@@ -668,19 +642,36 @@ static int usbduxsigma_ai_cmd(struct comedi_device *dev,
down(&devpriv->sem);
+ if (devpriv->high_speed) {
+ /*
+ * every 2 channels get a time window of 125us. Thus, if we
+ * sample all 16 channels we need 1ms. If we sample only one
+ * channel we need only 125us
+ */
+ unsigned int interval = usbduxsigma_chans_to_interval(len);
+
+ devpriv->ai_interval = interval;
+ devpriv->ai_timer = cmd->scan_begin_arg / (125000 * interval);
+ } else {
+ /* interval always 1ms */
+ devpriv->ai_interval = 1;
+ devpriv->ai_timer = cmd->scan_begin_arg / 1000000;
+ }
+
for (i = 0; i < len; i++) {
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
create_adc_command(chan, &muxsg0, &muxsg1);
}
- devpriv->dux_commands[1] = len; /* num channels per time step */
- devpriv->dux_commands[2] = 0x12; /* CONFIG0 */
- devpriv->dux_commands[3] = 0x03; /* CONFIG1: 23kHz sample, delay 0us */
- devpriv->dux_commands[4] = 0x00; /* CONFIG3: diff. channels off */
- devpriv->dux_commands[5] = muxsg0;
- devpriv->dux_commands[6] = muxsg1;
- devpriv->dux_commands[7] = sysred;
+ devpriv->dux_commands[1] = devpriv->ai_interval;
+ devpriv->dux_commands[2] = len; /* num channels per time step */
+ devpriv->dux_commands[3] = 0x12; /* CONFIG0 */
+ devpriv->dux_commands[4] = 0x03; /* CONFIG1: 23kHz sample, delay 0us */
+ devpriv->dux_commands[5] = 0x00; /* CONFIG3: diff. channels off */
+ devpriv->dux_commands[6] = muxsg0;
+ devpriv->dux_commands[7] = muxsg1;
+ devpriv->dux_commands[8] = sysred;
ret = usbbuxsigma_send_cmd(dev, USBBUXSIGMA_AD_CMD);
if (ret < 0) {
@@ -848,29 +839,22 @@ static int usbduxsigma_ao_cmdtest(struct comedi_device *dev,
struct comedi_cmd *cmd)
{
struct usbduxsigma_private *devpriv = dev->private;
+ unsigned int tmp;
int err = 0;
- int high_speed;
- unsigned int flags;
-
- /* high speed conversions are not used yet */
- high_speed = 0; /* (devpriv->high_speed) */
/* Step 1 : check if triggers are trivially valid */
err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT);
- if (high_speed) {
- /*
- * start immediately a new scan
- * the sampling rate is set by the coversion rate
- */
- flags = TRIG_FOLLOW;
- } else {
- /* start a new scan (output at once) with a timer */
- flags = TRIG_TIMER;
- }
- err |= comedi_check_trigger_src(&cmd->scan_begin_src, flags);
-
+ /*
+ * For now, always use "scan" timing with all channels updated at once
+ * (cmd->scan_begin_src == TRIG_TIMER, cmd->convert_src == TRIG_NOW).
+ *
+ * In a future version, "convert" timing with channels updated
+ * indivually may be supported in high speed mode
+ * (cmd->scan_begin_src == TRIG_FOLLOW, cmd->convert_src == TRIG_TIMER).
+ */
+ err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER);
err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
@@ -894,17 +878,7 @@ static int usbduxsigma_ao_cmdtest(struct comedi_device *dev,
err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
- if (cmd->scan_begin_src == TRIG_FOLLOW) /* internal trigger */
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- 1000000);
- }
-
- /* not used now, is for later use */
- if (cmd->convert_src == TRIG_TIMER)
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 125000);
+ err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, 1000000);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
@@ -919,19 +893,8 @@ static int usbduxsigma_ao_cmdtest(struct comedi_device *dev,
/* Step 4: fix up any arguments */
- /* we count in timer steps */
- if (high_speed) {
- /* timing of the conversion itself: every 125 us */
- devpriv->ao_timer = cmd->convert_arg / 125000;
- } else {
- /*
- * timing of the scan: every 1ms
- * we get all channels at once
- */
- devpriv->ao_timer = cmd->scan_begin_arg / 1000000;
- }
- if (devpriv->ao_timer < 1)
- err |= -EINVAL;
+ tmp = rounddown(cmd->scan_begin_arg, 1000000);
+ err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, tmp);
if (err)
return 4;
@@ -948,6 +911,14 @@ static int usbduxsigma_ao_cmd(struct comedi_device *dev,
down(&devpriv->sem);
+ /*
+ * For now, only "scan" timing is supported. A future version may
+ * support "convert" timing in high speed mode.
+ *
+ * Timing of the scan: every 1ms all channels updated at once.
+ */
+ devpriv->ao_timer = cmd->scan_begin_arg / 1000000;
+
devpriv->ao_counter = devpriv->ao_timer;
if (cmd->start_src == TRIG_NOW) {
@@ -1427,10 +1398,7 @@ static int usbduxsigma_alloc_usb_buffers(struct comedi_device *dev)
urb->transfer_buffer_length = SIZEOUTBUF;
urb->iso_frame_desc[0].offset = 0;
urb->iso_frame_desc[0].length = SIZEOUTBUF;
- if (devpriv->high_speed)
- urb->interval = 8; /* uframes */
- else
- urb->interval = 1; /* frames */
+ urb->interval = 1; /* (u)frames */
}
if (devpriv->pwm_buf_sz) {
@@ -1653,7 +1621,7 @@ static struct usb_driver usbduxsigma_usb_driver = {
};
module_comedi_usb_driver(usbduxsigma_driver, usbduxsigma_usb_driver);
-MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com");
-MODULE_DESCRIPTION("Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com");
+MODULE_AUTHOR("Bernd Porr, mail@berndporr.me.uk");
+MODULE_DESCRIPTION("Stirling/ITL USB-DUX SIGMA -- mail@berndporr.me.uk");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE);
diff --git a/drivers/staging/comedi/range.c b/drivers/staging/comedi/range.c
index 6a393b24bdd9..ce3a58a7a171 100644
--- a/drivers/staging/comedi/range.c
+++ b/drivers/staging/comedi/range.c
@@ -102,7 +102,18 @@ int do_rangeinfo_ioctl(struct comedi_device *dev,
* @s: comedi_subdevice struct
* @n: number of elements in the chanlist
* @chanlist: the chanlist to validate
-*/
+ *
+ * Each element consists of a channel number, a range index, an analog
+ * reference type and some flags, all packed into an unsigned int.
+ *
+ * This checks that the channel number and range index are supported by
+ * the comedi subdevice. It does not check whether the analog reference
+ * type and the flags are supported. Drivers that care should check those
+ * themselves.
+ *
+ * Return: %0 if all @chanlist elements are valid (success),
+ * %-EINVAL if one or more elements are invalid.
+ */
int comedi_check_chanlist(struct comedi_subdevice *s, int n,
unsigned int *chanlist)
{
diff --git a/drivers/staging/dgap/dgap.c b/drivers/staging/dgap/dgap.c
index 26b0446d943a..9112dd2bf4d7 100644
--- a/drivers/staging/dgap/dgap.c
+++ b/drivers/staging/dgap/dgap.c
@@ -4953,9 +4953,8 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- rc = put_user(C_CLOCAL(tty) ? 1 : 0,
+ return put_user(C_CLOCAL(tty) ? 1 : 0,
(unsigned long __user *) arg);
- return rc;
case TIOCSSOFTCAR:
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
@@ -7004,25 +7003,29 @@ static void dgap_cleanup_board(struct board_t *brd)
kfree(brd);
}
-static void dgap_remove_one(struct pci_dev *dev)
+static void dgap_stop(bool removesys, struct pci_driver *drv)
{
- unsigned int i;
- ulong lock_flags;
- struct pci_driver *drv = to_pci_driver(dev->dev.driver);
+ unsigned long lock_flags;
spin_lock_irqsave(&dgap_poll_lock, lock_flags);
dgap_poll_stop = 1;
spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
- /* Turn off poller right away. */
del_timer_sync(&dgap_poll_timer);
-
- dgap_remove_driver_sysfiles(drv);
+ if (removesys)
+ dgap_remove_driver_sysfiles(drv);
device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0));
class_destroy(dgap_class);
unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
+}
+static void dgap_remove_one(struct pci_dev *dev)
+{
+ unsigned int i;
+ struct pci_driver *drv = to_pci_driver(dev->dev.driver);
+
+ dgap_stop(true, drv);
for (i = 0; i < dgap_numboards; ++i) {
dgap_remove_ports_sysfiles(dgap_board[i]);
dgap_cleanup_tty(dgap_board[i]);
@@ -7096,21 +7099,6 @@ failed_class:
return rc;
}
-static void dgap_stop(void)
-{
- unsigned long lock_flags;
-
- spin_lock_irqsave(&dgap_poll_lock, lock_flags);
- dgap_poll_stop = 1;
- spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
-
- del_timer_sync(&dgap_poll_timer);
-
- device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0));
- class_destroy(dgap_class);
- unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
-}
-
/************************************************************************
*
* Driver load/unload functions
@@ -7133,8 +7121,10 @@ static int dgap_init_module(void)
return rc;
rc = pci_register_driver(&dgap_driver);
- if (rc)
- goto err_stop;
+ if (rc) {
+ dgap_stop(false, NULL);
+ return rc;
+ }
rc = dgap_create_driver_sysfiles(&dgap_driver);
if (rc)
@@ -7146,9 +7136,6 @@ static int dgap_init_module(void)
err_unregister:
pci_unregister_driver(&dgap_driver);
-err_stop:
- dgap_stop();
-
return rc;
}
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
index d04671fa4b75..06ece5151fe4 100644
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -21,9 +21,9 @@
#ifndef __DGNC_DRIVER_H
#define __DGNC_DRIVER_H
-#include <linux/types.h> /* To pick up the varions Linux types */
-#include <linux/tty.h> /* To pick up the various tty structs/defines */
-#include <linux/interrupt.h> /* For irqreturn_t type */
+#include <linux/types.h>
+#include <linux/tty.h>
+#include <linux/interrupt.h>
#include "digi.h" /* Digi specific ioctl header */
#include "dgnc_sysfs.h" /* Support for SYSFS */
diff --git a/drivers/staging/dgnc/dgnc_sysfs.h b/drivers/staging/dgnc/dgnc_sysfs.h
index be0f90a6768b..7be7d55bc49e 100644
--- a/drivers/staging/dgnc/dgnc_sysfs.h
+++ b/drivers/staging/dgnc/dgnc_sysfs.h
@@ -25,16 +25,16 @@ struct un_t;
struct pci_driver;
struct class_device;
-extern void dgnc_create_ports_sysfiles(struct dgnc_board *bd);
-extern void dgnc_remove_ports_sysfiles(struct dgnc_board *bd);
+void dgnc_create_ports_sysfiles(struct dgnc_board *bd);
+void dgnc_remove_ports_sysfiles(struct dgnc_board *bd);
-extern void dgnc_create_driver_sysfiles(struct pci_driver *);
-extern void dgnc_remove_driver_sysfiles(struct pci_driver *);
+void dgnc_create_driver_sysfiles(struct pci_driver *);
+void dgnc_remove_driver_sysfiles(struct pci_driver *);
-extern int dgnc_tty_class_init(void);
-extern int dgnc_tty_class_destroy(void);
+int dgnc_tty_class_init(void);
+int dgnc_tty_class_destroy(void);
-extern void dgnc_create_tty_sysfs(struct un_t *un, struct device *c);
-extern void dgnc_remove_tty_sysfs(struct device *c);
+void dgnc_create_tty_sysfs(struct un_t *un, struct device *c);
+void dgnc_remove_tty_sysfs(struct device *c);
#endif
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index 4178d96f94cf..b6b76ff09657 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -3153,36 +3153,46 @@ static const struct usb_gadget_ops nbu2ss_gadget_ops = {
.ioctl = nbu2ss_gad_ioctl,
};
-static const char g_ep0_name[] = "ep0";
-static const char g_ep1_name[] = "ep1-bulk";
-static const char g_ep2_name[] = "ep2-bulk";
-static const char g_ep3_name[] = "ep3in-int";
-static const char g_ep4_name[] = "ep4-iso";
-static const char g_ep5_name[] = "ep5-iso";
-static const char g_ep6_name[] = "ep6-bulk";
-static const char g_ep7_name[] = "ep7-bulk";
-static const char g_ep8_name[] = "ep8in-int";
-static const char g_ep9_name[] = "ep9-iso";
-static const char g_epa_name[] = "epa-iso";
-static const char g_epb_name[] = "epb-bulk";
-static const char g_epc_name[] = "epc-nulk";
-static const char g_epd_name[] = "epdin-int";
-
-static const char *gp_ep_name[NUM_ENDPOINTS] = {
- g_ep0_name,
- g_ep1_name,
- g_ep2_name,
- g_ep3_name,
- g_ep4_name,
- g_ep5_name,
- g_ep6_name,
- g_ep7_name,
- g_ep8_name,
- g_ep9_name,
- g_epa_name,
- g_epb_name,
- g_epc_name,
- g_epd_name,
+static const struct {
+ const char *name;
+ const struct usb_ep_caps caps;
+} ep_info[NUM_ENDPOINTS] = {
+#define EP_INFO(_name, _caps) \
+ { \
+ .name = _name, \
+ .caps = _caps, \
+ }
+
+ EP_INFO("ep0",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep1-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep2-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep3in-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep4-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep5-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep6-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep7-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep8in-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep9-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("epa-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("epb-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("epc-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("epdin-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
+
+#undef EP_INFO
};
/*-------------------------------------------------------------------------*/
@@ -3200,10 +3210,12 @@ static void __init nbu2ss_drv_ep_init(struct nbu2ss_udc *udc)
ep->desc = NULL;
ep->ep.driver_data = NULL;
- ep->ep.name = gp_ep_name[i];
+ ep->ep.name = ep_info[i].name;
+ ep->ep.caps = ep_info[i].caps;
ep->ep.ops = &nbu2ss_ep_ops;
- ep->ep.maxpacket = (i == 0 ? EP0_PACKETSIZE : EP_PACKETSIZE);
+ usb_ep_set_maxpacket_limit(&ep->ep,
+ i == 0 ? EP0_PACKETSIZE : EP_PACKETSIZE);
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
INIT_LIST_HEAD(&ep->queue);
diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig
index d4018780ce58..d473010fa474 100644
--- a/drivers/staging/fbtft/Kconfig
+++ b/drivers/staging/fbtft/Kconfig
@@ -1,6 +1,7 @@
menuconfig FB_TFT
tristate "Support for small TFT LCD display modules"
- depends on FB && SPI && GPIOLIB
+ depends on FB && SPI
+ depends on GPIOLIB || COMPILE_TEST
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
@@ -152,6 +153,12 @@ config FB_TFT_TLS8204
help
Generic Framebuffer support for TLS8204
+config FB_TFT_UC1611
+ tristate "FB driver for the UC1611 LCD controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for UC1611
+
config FB_TFT_UC1701
tristate "FB driver for the UC1701 LCD Controller"
depends on FB_TFT
diff --git a/drivers/staging/fbtft/Makefile b/drivers/staging/fbtft/Makefile
index 554b5260b0ee..b26efdc87775 100644
--- a/drivers/staging/fbtft/Makefile
+++ b/drivers/staging/fbtft/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_FB_TFT_SSD1351) += fb_ssd1351.o
obj-$(CONFIG_FB_TFT_ST7735R) += fb_st7735r.o
obj-$(CONFIG_FB_TFT_TINYLCD) += fb_tinylcd.o
obj-$(CONFIG_FB_TFT_TLS8204) += fb_tls8204.o
+obj-$(CONFIG_FB_TFT_UC1611) += fb_uc1611.o
obj-$(CONFIG_FB_TFT_UC1701) += fb_uc1701.o
obj-$(CONFIG_FB_TFT_UPD161704) += fb_upd161704.o
obj-$(CONFIG_FB_TFT_WATTEROTT) += fb_watterott.o
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
new file mode 100644
index 000000000000..32f3a9d921d6
--- /dev/null
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -0,0 +1,350 @@
+/*
+ * FB driver for the UltraChip UC1611 LCD controller
+ *
+ * The display is 4-bit grayscale (16 shades) 240x160.
+ *
+ * Copyright (C) 2015 Henri Chain
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_uc1611"
+#define WIDTH 240
+#define HEIGHT 160
+#define BPP 8
+#define FPS 40
+
+/*
+ * LCD voltage is a combination of ratio, gain, pot and temp
+ *
+ * V_LCD = V_BIAS * ratio
+ * V_LCD = (C_V0 + C_PM × pot) * (1 + (T - 25) * temp)
+ * C_V0 and C_PM depend on ratio and gain
+ * T is ambient temperature
+ */
+
+/* BR -> actual ratio: 0-3 -> 5, 10, 11, 13 */
+static unsigned ratio = 2;
+module_param(ratio, uint, 0);
+MODULE_PARM_DESC(ratio, "BR[1:0] Bias voltage ratio: 0-3 (default: 2)");
+
+static unsigned gain = 3;
+module_param(gain, uint, 0);
+MODULE_PARM_DESC(gain, "GN[1:0] Bias voltage gain: 0-3 (default: 3)");
+
+static unsigned pot = 16;
+module_param(pot, uint, 0);
+MODULE_PARM_DESC(pot, "PM[6:0] Bias voltage pot.: 0-63 (default: 16)");
+
+/* TC -> % compensation per deg C: 0-3 -> -.05, -.10, -.015, -.20 */
+static unsigned temp;
+module_param(temp, uint, 0);
+MODULE_PARM_DESC(temp, "TC[1:0] Temperature compensation: 0-3 (default: 0)");
+
+/* PC[1:0] -> LCD capacitance: 0-3 -> <20nF, 20-28 nF, 29-40 nF, 40-56 nF */
+static unsigned load = 1;
+module_param(load, uint, 0);
+MODULE_PARM_DESC(load, "PC[1:0] Panel Loading: 0-3 (default: 1)");
+
+/* PC[3:2] -> V_LCD: 0, 1, 3 -> ext., int. with ratio = 5, int. standard */
+static unsigned pump = 3;
+module_param(pump, uint, 0);
+MODULE_PARM_DESC(pump, "PC[3:2] Pump control: 0,1,3 (default: 3)");
+
+static int init_display(struct fbtft_par *par)
+{
+ int ret;
+
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ /* Set CS active high */
+ par->spi->mode |= SPI_CS_HIGH;
+ ret = par->spi->master->setup(par->spi);
+ if (ret) {
+ dev_err(par->info->device, "Could not set SPI_CS_HIGH\n");
+ return ret;
+ }
+
+ /* Reset controller */
+ write_reg(par, 0xE2);
+
+ /* Set bias ratio */
+ write_reg(par, 0xE8 | (ratio & 0x03));
+
+ /* Set bias gain and potentiometer */
+ write_reg(par, 0x81);
+ write_reg(par, (gain & 0x03) << 6 | (pot & 0x3F));
+
+ /* Set temperature compensation */
+ write_reg(par, 0x24 | (temp & 0x03));
+
+ /* Set panel loading */
+ write_reg(par, 0x28 | (load & 0x03));
+
+ /* Set pump control */
+ write_reg(par, 0x2C | (pump & 0x03));
+
+ /* Set inverse display */
+ write_reg(par, 0xA6 | (0x01 & 0x01));
+
+ /* Set 4-bit grayscale mode */
+ write_reg(par, 0xD0 | (0x02 & 0x03));
+
+ /* Set Display enable */
+ write_reg(par, 0xA8 | 0x07);
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n",
+ __func__, xs, ys, xe, ye);
+
+ switch (par->info->var.rotate) {
+ case 90:
+ case 270:
+ /* Set column address */
+ write_reg(par, ys & 0x0F);
+ write_reg(par, 0x10 | (ys >> 4));
+
+ /* Set page address (divide xs by 2) (not used by driver) */
+ write_reg(par, 0x60 | ((xs >> 1) & 0x0F));
+ write_reg(par, 0x70 | (xs >> 5));
+ break;
+ default:
+ /* Set column address (not used by driver) */
+ write_reg(par, xs & 0x0F);
+ write_reg(par, 0x10 | (xs >> 4));
+
+ /* Set page address (divide ys by 2) */
+ write_reg(par, 0x60 | ((ys >> 1) & 0x0F));
+ write_reg(par, 0x70 | (ys >> 5));
+ break;
+ }
+}
+
+static int blank(struct fbtft_par *par, bool on)
+{
+ fbtft_par_dbg(DEBUG_BLANK, par, "%s(blank=%s)\n",
+ __func__, on ? "true" : "false");
+
+ if (on)
+ write_reg(par, 0xA8 | 0x00);
+ else
+ write_reg(par, 0xA8 | 0x07);
+ return 0;
+}
+
+static int set_var(struct fbtft_par *par)
+{
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ /* par->info->fix.visual = FB_VISUAL_PSEUDOCOLOR; */
+ par->info->var.grayscale = 1;
+ par->info->var.red.offset = 0;
+ par->info->var.red.length = 8;
+ par->info->var.green.offset = 0;
+ par->info->var.green.length = 8;
+ par->info->var.blue.offset = 0;
+ par->info->var.blue.length = 8;
+ par->info->var.transp.offset = 0;
+ par->info->var.transp.length = 0;
+
+ switch (par->info->var.rotate) {
+ case 90:
+ /* Set RAM address control */
+ write_reg(par, 0x88
+ | (0x0 & 0x1) << 2 /* Increment positively */
+ | (0x1 & 0x1) << 1 /* Increment page first */
+ | (0x1 & 0x1)); /* Wrap around (default) */
+
+ /* Set LCD mapping */
+ write_reg(par, 0xC0
+ | (0x0 & 0x1) << 2 /* Mirror Y OFF */
+ | (0x0 & 0x1) << 1 /* Mirror X OFF */
+ | (0x0 & 0x1)); /* MS nibble last (default) */
+ break;
+ case 180:
+ /* Set RAM address control */
+ write_reg(par, 0x88
+ | (0x0 & 0x1) << 2 /* Increment positively */
+ | (0x0 & 0x1) << 1 /* Increment column first */
+ | (0x1 & 0x1)); /* Wrap around (default) */
+
+ /* Set LCD mapping */
+ write_reg(par, 0xC0
+ | (0x1 & 0x1) << 2 /* Mirror Y ON */
+ | (0x0 & 0x1) << 1 /* Mirror X OFF */
+ | (0x0 & 0x1)); /* MS nibble last (default) */
+ break;
+ case 270:
+ /* Set RAM address control */
+ write_reg(par, 0x88
+ | (0x0 & 0x1) << 2 /* Increment positively */
+ | (0x1 & 0x1) << 1 /* Increment page first */
+ | (0x1 & 0x1)); /* Wrap around (default) */
+
+ /* Set LCD mapping */
+ write_reg(par, 0xC0
+ | (0x1 & 0x1) << 2 /* Mirror Y ON */
+ | (0x1 & 0x1) << 1 /* Mirror X ON */
+ | (0x0 & 0x1)); /* MS nibble last (default) */
+ break;
+ default:
+ /* Set RAM address control */
+ write_reg(par, 0x88
+ | (0x0 & 0x1) << 2 /* Increment positively */
+ | (0x0 & 0x1) << 1 /* Increment column first */
+ | (0x1 & 0x1)); /* Wrap around (default) */
+
+ /* Set LCD mapping */
+ write_reg(par, 0xC0
+ | (0x0 & 0x1) << 2 /* Mirror Y OFF */
+ | (0x1 & 0x1) << 1 /* Mirror X ON */
+ | (0x0 & 0x1)); /* MS nibble last (default) */
+ break;
+ }
+
+ return 0;
+}
+
+static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
+{
+ u8 *vmem8 = (u8 *)(par->info->screen_base);
+ u8 *buf8 = (u8 *)(par->txbuf.buf);
+ u16 *buf16 = (u16 *)(par->txbuf.buf);
+ int line_length = par->info->fix.line_length;
+ int y_start = (offset / line_length);
+ int y_end = (offset + len - 1) / line_length;
+ int x, y, i;
+ int ret = 0;
+
+ fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s()\n", __func__);
+
+ switch (par->pdata->display.buswidth) {
+ case 8:
+ switch (par->info->var.rotate) {
+ case 90:
+ case 270:
+ i = y_start * line_length;
+ for (y = y_start; y <= y_end; y++) {
+ for (x = 0; x < line_length; x += 2) {
+ *buf8 = vmem8[i] >> 4;
+ *buf8 |= vmem8[i + 1] & 0xF0;
+ buf8++;
+ i += 2;
+ }
+ }
+ break;
+ default:
+ /* Must be even because pages are two lines */
+ y_start &= 0xFE;
+ i = y_start * line_length;
+ for (y = y_start; y <= y_end; y += 2) {
+ for (x = 0; x < line_length; x++) {
+ *buf8 = vmem8[i] >> 4;
+ *buf8 |= vmem8[i + line_length] & 0xF0;
+ buf8++;
+ i++;
+ }
+ i += line_length;
+ }
+ break;
+ }
+ gpio_set_value(par->gpio.dc, 1);
+
+ /* Write data */
+ ret = par->fbtftops.write(par, par->txbuf.buf, len / 2);
+ break;
+ case 9:
+ switch (par->info->var.rotate) {
+ case 90:
+ case 270:
+ i = y_start * line_length;
+ for (y = y_start; y <= y_end; y++) {
+ for (x = 0; x < line_length; x += 2) {
+ *buf16 = 0x100;
+ *buf16 |= vmem8[i] >> 4;
+ *buf16 |= vmem8[i + 1] & 0xF0;
+ buf16++;
+ i += 2;
+ }
+ }
+ break;
+ default:
+ /* Must be even because pages are two lines */
+ y_start &= 0xFE;
+ i = y_start * line_length;
+ for (y = y_start; y <= y_end; y += 2) {
+ for (x = 0; x < line_length; x++) {
+ *buf16 = 0x100;
+ *buf16 |= vmem8[i] >> 4;
+ *buf16 |= vmem8[i + line_length] & 0xF0;
+ buf16++;
+ i++;
+ }
+ i += line_length;
+ }
+ break;
+ }
+
+ /* Write data */
+ ret = par->fbtftops.write(par, par->txbuf.buf, len);
+ break;
+ default:
+ dev_err(par->info->device, "unsupported buswidth %d\n",
+ par->pdata->display.buswidth);
+ }
+
+ if (ret < 0)
+ dev_err(par->info->device, "write failed and returned: %d\n",
+ ret);
+
+ return ret;
+}
+
+static struct fbtft_display display = {
+ .txbuflen = -1,
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .bpp = BPP,
+ .fps = FPS,
+ .fbtftops = {
+ .write_vmem = write_vmem,
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ .blank = blank,
+ },
+};
+
+FBTFT_REGISTER_DRIVER(DRVNAME, "ultrachip,uc1611", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:uc1611");
+MODULE_ALIAS("platform:uc1611");
+
+MODULE_DESCRIPTION("FB driver for the UC1611 LCD controller");
+MODULE_AUTHOR("Henri Chain");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 9cc81412be69..23392eb6799e 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -677,13 +677,13 @@ static void fbtft_merge_fbtftops(struct fbtft_ops *dst, struct fbtft_ops *src)
*
*/
struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
- struct device *dev)
+ struct device *dev,
+ struct fbtft_platform_data *pdata)
{
struct fb_info *info;
struct fbtft_par *par;
struct fb_ops *fbops = NULL;
struct fb_deferred_io *fbdefio = NULL;
- struct fbtft_platform_data *pdata = dev->platform_data;
u8 *vmem = NULL;
void *txbuf = NULL;
void *buf = NULL;
@@ -828,7 +828,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
par = info->par;
par->info = info;
- par->pdata = dev->platform_data;
+ par->pdata = pdata;
par->debug = display->debug;
par->buf = buf;
spin_lock_init(&par->dirty_lock);
@@ -1076,6 +1076,11 @@ static int fbtft_init_display_dt(struct fbtft_par *par)
p = of_prop_next_u32(prop, NULL, &val);
if (!p)
return -EINVAL;
+
+ par->fbtftops.reset(par);
+ if (par->gpio.cs != -1)
+ gpio_set_value(par->gpio.cs, 0); /* Activate chip */
+
while (p) {
if (val & FBTFT_OF_INIT_CMD) {
val &= 0xFFFF;
@@ -1260,12 +1265,11 @@ EXPORT_SYMBOL(fbtft_init_display);
*/
static int fbtft_verify_gpios(struct fbtft_par *par)
{
- struct fbtft_platform_data *pdata;
+ struct fbtft_platform_data *pdata = par->pdata;
int i;
fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
- pdata = par->info->device->platform_data;
if (pdata->display.buswidth != 9 && par->startbyte == 0 &&
par->gpio.dc < 0) {
dev_err(par->info->device,
@@ -1383,10 +1387,9 @@ int fbtft_probe_common(struct fbtft_display *display,
pdata = fbtft_probe_dt(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- dev->platform_data = pdata;
}
- info = fbtft_framebuffer_alloc(display, dev);
+ info = fbtft_framebuffer_alloc(display, dev, pdata);
if (!info)
return -ENOMEM;
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 7d817eb26eab..7e9a506d65f9 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -262,39 +262,38 @@ struct fbtft_par {
par->fbtftops.write_register(par, NUMARGS(__VA_ARGS__), __VA_ARGS__)
/* fbtft-core.c */
-extern void fbtft_dbg_hex(const struct device *dev,
- int groupsize, void *buf, size_t len, const char *fmt, ...);
-extern struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
- struct device *dev);
-extern void fbtft_framebuffer_release(struct fb_info *info);
-extern int fbtft_register_framebuffer(struct fb_info *fb_info);
-extern int fbtft_unregister_framebuffer(struct fb_info *fb_info);
-extern void fbtft_register_backlight(struct fbtft_par *par);
-extern void fbtft_unregister_backlight(struct fbtft_par *par);
-extern int fbtft_init_display(struct fbtft_par *par);
-extern int fbtft_probe_common(struct fbtft_display *display,
- struct spi_device *sdev, struct platform_device *pdev);
-extern int fbtft_remove_common(struct device *dev, struct fb_info *info);
+void fbtft_dbg_hex(const struct device *dev, int groupsize,
+ void *buf, size_t len, const char *fmt, ...);
+struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
+ struct device *dev,
+ struct fbtft_platform_data *pdata);
+void fbtft_framebuffer_release(struct fb_info *info);
+int fbtft_register_framebuffer(struct fb_info *fb_info);
+int fbtft_unregister_framebuffer(struct fb_info *fb_info);
+void fbtft_register_backlight(struct fbtft_par *par);
+void fbtft_unregister_backlight(struct fbtft_par *par);
+int fbtft_init_display(struct fbtft_par *par);
+int fbtft_probe_common(struct fbtft_display *display, struct spi_device *sdev,
+ struct platform_device *pdev);
+int fbtft_remove_common(struct device *dev, struct fb_info *info);
/* fbtft-io.c */
-extern int fbtft_write_spi(struct fbtft_par *par, void *buf, size_t len);
-extern int fbtft_write_spi_emulate_9(struct fbtft_par *par,
- void *buf, size_t len);
-extern int fbtft_read_spi(struct fbtft_par *par, void *buf, size_t len);
-extern int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len);
-extern int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len);
-extern int fbtft_write_gpio16_wr_latched(struct fbtft_par *par,
- void *buf, size_t len);
+int fbtft_write_spi(struct fbtft_par *par, void *buf, size_t len);
+int fbtft_write_spi_emulate_9(struct fbtft_par *par, void *buf, size_t len);
+int fbtft_read_spi(struct fbtft_par *par, void *buf, size_t len);
+int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len);
+int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len);
+int fbtft_write_gpio16_wr_latched(struct fbtft_par *par, void *buf, size_t len);
/* fbtft-bus.c */
-extern int fbtft_write_vmem8_bus8(struct fbtft_par *par, size_t offset, size_t len);
-extern int fbtft_write_vmem16_bus16(struct fbtft_par *par, size_t offset, size_t len);
-extern int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len);
-extern int fbtft_write_vmem16_bus9(struct fbtft_par *par, size_t offset, size_t len);
-extern void fbtft_write_reg8_bus8(struct fbtft_par *par, int len, ...);
-extern void fbtft_write_reg8_bus9(struct fbtft_par *par, int len, ...);
-extern void fbtft_write_reg16_bus8(struct fbtft_par *par, int len, ...);
-extern void fbtft_write_reg16_bus16(struct fbtft_par *par, int len, ...);
+int fbtft_write_vmem8_bus8(struct fbtft_par *par, size_t offset, size_t len);
+int fbtft_write_vmem16_bus16(struct fbtft_par *par, size_t offset, size_t len);
+int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len);
+int fbtft_write_vmem16_bus9(struct fbtft_par *par, size_t offset, size_t len);
+void fbtft_write_reg8_bus8(struct fbtft_par *par, int len, ...);
+void fbtft_write_reg8_bus9(struct fbtft_par *par, int len, ...);
+void fbtft_write_reg16_bus8(struct fbtft_par *par, int len, ...);
+void fbtft_write_reg16_bus16(struct fbtft_par *par, int len, ...);
#define FBTFT_REGISTER_DRIVER(_name, _compatible, _display) \
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
index 211d504901f2..fa916e88d549 100644
--- a/drivers/staging/fbtft/fbtft_device.c
+++ b/drivers/staging/fbtft/fbtft_device.c
@@ -397,6 +397,37 @@ static struct fbtft_device_display displays[] = {
}
}
}, {
+ .name = "ew24ha0",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_uc1611",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_3,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 8,
+ },
+ .gpios = (const struct fbtft_gpio []) {
+ { "dc", 24 },
+ {},
+ },
+ }
+ }
+ }, {
+ .name = "ew24ha0_9bit",
+ .spi = &(struct spi_board_info) {
+ .modalias = "fb_uc1611",
+ .max_speed_hz = 32000000,
+ .mode = SPI_MODE_3,
+ .platform_data = &(struct fbtft_platform_data) {
+ .display = {
+ .buswidth = 9,
+ },
+ .gpios = (const struct fbtft_gpio []) {
+ {},
+ },
+ }
+ }
+ }, {
.name = "flexfb",
.spi = &(struct spi_board_info) {
.modalias = "flexfb",
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
index 2c4ce07f5106..c763efc5de7d 100644
--- a/drivers/staging/fbtft/flexfb.c
+++ b/drivers/staging/fbtft/flexfb.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
@@ -30,7 +26,6 @@
#define DRVNAME "flexfb"
-
static char *chip;
module_param(chip, charp, 0);
MODULE_PARM_DESC(chip, "LCD controller");
@@ -68,7 +63,6 @@ static bool latched;
module_param(latched, bool, 0);
MODULE_PARM_DESC(latched, "Use with latched 16-bit databus");
-
static int *initp;
static int initp_num;
@@ -132,14 +126,115 @@ static int ssd1351_init[] = { -1, 0xfd, 0x12, -1, 0xfd, 0xb1, -1, 0xae, -1, 0xb3
-1, 0xab, 0x01, -1, 0xb1, 0x32, -1, 0xb4, 0xa0, 0xb5, 0x55, -1, 0xbb, 0x17, -1, 0xbe, 0x05,
-1, 0xc1, 0xc8, 0x80, 0xc8, -1, 0xc7, 0x0f, -1, 0xb6, 0x01, -1, 0xa6, -1, 0xaf, -3 };
+/**
+ * struct flexfb_lcd_controller - Describes the LCD controller properties
+ * @name: Model name of the chip
+ * @width: Width of display in pixels
+ * @height: Height of display in pixels
+ * @setaddrwin: Which set_addr_win() implementation to use
+ * @regwidth: LCD Controller Register width in bits
+ * @init_seq: LCD initialization sequence
+ * @init_seq_sz: Size of LCD initialization sequence
+ */
+struct flexfb_lcd_controller {
+ const char *name;
+ unsigned int width;
+ unsigned int height;
+ unsigned int setaddrwin;
+ unsigned int regwidth;
+ int *init_seq;
+ int init_seq_sz;
+};
+
+static const struct flexfb_lcd_controller flexfb_chip_table[] = {
+ {
+ .name = "st7735r",
+ .width = 120,
+ .height = 160,
+ .init_seq = st7735r_init,
+ .init_seq_sz = ARRAY_SIZE(st7735r_init),
+ },
+ {
+ .name = "hx8340bn",
+ .width = 176,
+ .height = 220,
+ .init_seq = hx8340bn_init,
+ .init_seq_sz = ARRAY_SIZE(hx8340bn_init),
+ },
+ {
+ .name = "ili9225",
+ .width = 176,
+ .height = 220,
+ .regwidth = 16,
+ .init_seq = ili9225_init,
+ .init_seq_sz = ARRAY_SIZE(ili9225_init),
+ },
+ {
+ .name = "ili9225",
+ .width = 176,
+ .height = 220,
+ .regwidth = 16,
+ .init_seq = ili9225_init,
+ .init_seq_sz = ARRAY_SIZE(ili9225_init),
+ },
+ {
+ .name = "ili9225",
+ .width = 176,
+ .height = 220,
+ .regwidth = 16,
+ .init_seq = ili9225_init,
+ .init_seq_sz = ARRAY_SIZE(ili9225_init),
+ },
+ {
+ .name = "ili9320",
+ .width = 240,
+ .height = 320,
+ .setaddrwin = 1,
+ .regwidth = 16,
+ .init_seq = ili9320_init,
+ .init_seq_sz = ARRAY_SIZE(ili9320_init),
+ },
+ {
+ .name = "ili9325",
+ .width = 240,
+ .height = 320,
+ .setaddrwin = 1,
+ .regwidth = 16,
+ .init_seq = ili9325_init,
+ .init_seq_sz = ARRAY_SIZE(ili9325_init),
+ },
+ {
+ .name = "ili9341",
+ .width = 240,
+ .height = 320,
+ .init_seq = ili9341_init,
+ .init_seq_sz = ARRAY_SIZE(ili9341_init),
+ },
+ {
+ .name = "ssd1289",
+ .width = 240,
+ .height = 320,
+ .setaddrwin = 2,
+ .regwidth = 16,
+ .init_seq = ssd1289_init,
+ .init_seq_sz = ARRAY_SIZE(ssd1289_init),
+ },
+ {
+ .name = "ssd1351",
+ .width = 128,
+ .height = 128,
+ .setaddrwin = 3,
+ .init_seq = ssd1351_init,
+ .init_seq_sz = ARRAY_SIZE(ssd1351_init),
+ },
+};
/* ili9320, ili9325 */
static void flexfb_set_addr_win_1(struct fbtft_par *par,
int xs, int ys, int xe, int ye)
{
- fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
- "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n",
- __func__, xs, ys, xe, ye);
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par, "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n",
+ __func__, xs, ys, xe, ye);
switch (par->info->var.rotate) {
/* R20h = Horizontal GRAM Start Address */
/* R21h = Vertical GRAM Start Address */
@@ -242,7 +337,7 @@ static int flexfb_verify_gpios_db(struct fbtft_par *par)
return -EINVAL;
}
if (latched)
- num_db = buswidth/2;
+ num_db = buswidth / 2;
for (i = 0; i < num_db; i++) {
if (par->gpio.db[i] < 0) {
dev_err(par->info->device,
@@ -255,8 +350,38 @@ static int flexfb_verify_gpios_db(struct fbtft_par *par)
return 0;
}
+static void flexfb_chip_load_param(const struct flexfb_lcd_controller *chip)
+{
+ if (!width)
+ width = chip->width;
+ if (!height)
+ height = chip->height;
+ setaddrwin = chip->setaddrwin;
+ if (chip->regwidth)
+ regwidth = chip->regwidth;
+ if (!init_num) {
+ initp = chip->init_seq;
+ initp_num = chip->init_seq_sz;
+ }
+}
+
static struct fbtft_display flex_display = { };
+static int flexfb_chip_init(const struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(flexfb_chip_table); i++)
+ if (!strcmp(chip, flexfb_chip_table[i].name)) {
+ flexfb_chip_load_param(&flexfb_chip_table[i]);
+ return 0;
+ }
+
+ dev_err(dev, "chip=%s is not supported\n", chip);
+
+ return -EINVAL;
+}
+
static int flexfb_probe_common(struct spi_device *sdev,
struct platform_device *pdev)
{
@@ -277,110 +402,9 @@ static int flexfb_probe_common(struct spi_device *sdev,
sdev ? "'SPI device'" : "'Platform device'");
if (chip) {
-
- if (!strcmp(chip, "st7735r")) {
- if (!width)
- width = 128;
- if (!height)
- height = 160;
- if (init_num == 0) {
- initp = st7735r_init;
- initp_num = ARRAY_SIZE(st7735r_init);
- }
-
-
- } else if (!strcmp(chip, "hx8340bn")) {
- if (!width)
- width = 176;
- if (!height)
- height = 220;
- setaddrwin = 0;
- if (init_num == 0) {
- initp = hx8340bn_init;
- initp_num = ARRAY_SIZE(hx8340bn_init);
- }
-
-
- } else if (!strcmp(chip, "ili9225")) {
- if (!width)
- width = 176;
- if (!height)
- height = 220;
- setaddrwin = 0;
- regwidth = 16;
- if (init_num == 0) {
- initp = ili9225_init;
- initp_num = ARRAY_SIZE(ili9225_init);
- }
-
-
-
- } else if (!strcmp(chip, "ili9320")) {
- if (!width)
- width = 240;
- if (!height)
- height = 320;
- setaddrwin = 1;
- regwidth = 16;
- if (init_num == 0) {
- initp = ili9320_init;
- initp_num = ARRAY_SIZE(ili9320_init);
- }
-
-
- } else if (!strcmp(chip, "ili9325")) {
- if (!width)
- width = 240;
- if (!height)
- height = 320;
- setaddrwin = 1;
- regwidth = 16;
- if (init_num == 0) {
- initp = ili9325_init;
- initp_num = ARRAY_SIZE(ili9325_init);
- }
-
- } else if (!strcmp(chip, "ili9341")) {
- if (!width)
- width = 240;
- if (!height)
- height = 320;
- setaddrwin = 0;
- regwidth = 8;
- if (init_num == 0) {
- initp = ili9341_init;
- initp_num = ARRAY_SIZE(ili9341_init);
- }
-
-
- } else if (!strcmp(chip, "ssd1289")) {
- if (!width)
- width = 240;
- if (!height)
- height = 320;
- setaddrwin = 2;
- regwidth = 16;
- if (init_num == 0) {
- initp = ssd1289_init;
- initp_num = ARRAY_SIZE(ssd1289_init);
- }
-
-
-
- } else if (!strcmp(chip, "ssd1351")) {
- if (!width)
- width = 128;
- if (!height)
- height = 128;
- setaddrwin = 3;
- if (init_num == 0) {
- initp = ssd1351_init;
- initp_num = ARRAY_SIZE(ssd1351_init);
- }
- } else {
- dev_err(dev, "chip=%s is not supported\n", chip);
- return -EINVAL;
- }
+ ret = flexfb_chip_init(dev);
+ if (ret)
+ return ret;
}
if (width == 0 || height == 0) {
@@ -395,7 +419,7 @@ static int flexfb_probe_common(struct spi_device *sdev,
fbtft_init_dbg(dev, "regwidth = %d\n", regwidth);
fbtft_init_dbg(dev, "buswidth = %d\n", buswidth);
- info = fbtft_framebuffer_alloc(&flex_display, dev);
+ info = fbtft_framebuffer_alloc(&flex_display, dev, dev->platform_data);
if (!info)
return -ENOMEM;
@@ -527,8 +551,8 @@ static int flexfb_remove_common(struct device *dev, struct fb_info *info)
return -EINVAL;
par = info->par;
if (par)
- fbtft_par_dbg(DEBUG_DRIVER_INIT_FUNCTIONS, par,
- "%s()\n", __func__);
+ fbtft_par_dbg(DEBUG_DRIVER_INIT_FUNCTIONS, par, "%s()\n",
+ __func__);
fbtft_unregister_framebuffer(info);
fbtft_framebuffer_release(info);
diff --git a/drivers/staging/fsl-mc/README.txt b/drivers/staging/fsl-mc/README.txt
new file mode 100644
index 000000000000..8214102f104b
--- /dev/null
+++ b/drivers/staging/fsl-mc/README.txt
@@ -0,0 +1,364 @@
+Copyright (C) 2015 Freescale Semiconductor Inc.
+
+DPAA2 (Data Path Acceleration Architecture Gen2)
+------------------------------------------------
+
+This document provides an overview of the Freescale DPAA2 architecture
+and how it is integrated into the Linux kernel.
+
+Contents summary
+ -DPAA2 overview
+ -Overview of DPAA2 objects
+ -DPAA2 Linux driver architecture overview
+ -bus driver
+ -dprc driver
+ -allocator
+ -dpio driver
+ -Ethernet
+ -mac
+
+DPAA2 Overview
+--------------
+
+DPAA2 is a hardware architecture designed for high-speeed network
+packet processing. DPAA2 consists of sophisticated mechanisms for
+processing Ethernet packets, queue management, buffer management,
+autonomous L2 switching, virtual Ethernet bridging, and accelerator
+(e.g. crypto) sharing.
+
+A DPAA2 hardware component called the Management Complex (or MC) manages the
+DPAA2 hardware resources. The MC provides an object-based abstraction for
+software drivers to use the DPAA2 hardware.
+
+The MC uses DPAA2 hardware resources such as queues, buffer pools, and
+network ports to create functional objects/devices such as network
+interfaces, an L2 switch, or accelerator instances.
+
+The MC provides memory-mapped I/O command interfaces (MC portals)
+which DPAA2 software drivers use to operate on DPAA2 objects:
+
+ +--------------------------------------+
+ | OS |
+ | DPAA2 drivers |
+ | | |
+ +-----------------------------|--------+
+ |
+ | (create,discover,connect
+ | config,use,destroy)
+ |
+ DPAA2 |
+ +------------------------| mc portal |-+
+ | | |
+ | +- - - - - - - - - - - - -V- - -+ |
+ | | | |
+ | | Management Complex (MC) | |
+ | | | |
+ | +- - - - - - - - - - - - - - - -+ |
+ | |
+ | Hardware Hardware |
+ | Resources Objects |
+ | --------- ------- |
+ | -queues -DPRC |
+ | -buffer pools -DPMCP |
+ | -Eth MACs/ports -DPIO |
+ | -network interface -DPNI |
+ | profiles -DPMAC |
+ | -queue portals -DPBP |
+ | -MC portals ... |
+ | ... |
+ | |
+ +--------------------------------------+
+
+The MC mediates operations such as create, discover,
+connect, configuration, and destroy. Fast-path operations
+on data, such as packet transmit/receive, are not mediated by
+the MC and are done directly using memory mapped regions in
+DPIO objects.
+
+Overview of DPAA2 Objects
+-------------------------
+The section provides a brief overview of some key objects
+in the DPAA2 hardware. A simple scenario is described illustrating
+the objects involved in creating a network interfaces.
+
+-DPRC (Datapath Resource Container)
+
+ A DPRC is an container object that holds all the other
+ types of DPAA2 objects. In the example diagram below there
+ are 8 objects of 5 types (DPMCP, DPIO, DPBP, DPNI, and DPMAC)
+ in the container.
+
+ +---------------------------------------------------------+
+ | DPRC |
+ | |
+ | +-------+ +-------+ +-------+ +-------+ +-------+ |
+ | | DPMCP | | DPIO | | DPBP | | DPNI | | DPMAC | |
+ | +-------+ +-------+ +-------+ +---+---+ +---+---+ |
+ | | DPMCP | | DPIO | |
+ | +-------+ +-------+ |
+ | | DPMCP | |
+ | +-------+ |
+ | |
+ +---------------------------------------------------------+
+
+ From the point of view of an OS, a DPRC is bus-like. Like
+ a plug-and-play bus, such as PCI, DPRC commands can be used to
+ enumerate the contents of the DPRC, discover the hardware
+ objects present (including mappable regions and interrupts).
+
+ dprc.1 (bus)
+ |
+ +--+--------+-------+-------+-------+
+ | | | | |
+ dpmcp.1 dpio.1 dpbp.1 dpni.1 dpmac.1
+ dpmcp.2 dpio.2
+ dpmcp.3
+
+ Hardware objects can be created and destroyed dynamically, providing
+ the ability to hot plug/unplug objects in and out of the DPRC.
+
+ A DPRC has a mappable mmio region (an MC portal) that can be used
+ to send MC commands. It has an interrupt for status events (like
+ hotplug).
+
+ All objects in a container share the same hardware "isolation context".
+ This means that with respect to an IOMMU the isolation granularity
+ is at the DPRC (container) level, not at the individual object
+ level.
+
+ DPRCs can be defined statically and populated with objects
+ via a config file passed to the MC when firmware starts
+ it. There is also a Linux user space tool called "restool"
+ that can be used to create/destroy containers and objects
+ dynamically.
+
+-DPAA2 Objects for an Ethernet Network Interface
+
+ A typical Ethernet NIC is monolithic-- the NIC device contains TX/RX
+ queuing mechanisms, configuration mechanisms, buffer management,
+ physical ports, and interrupts. DPAA2 uses a more granular approach
+ utilizing multiple hardware objects. Each object has specialized
+ functions, and are used together by software to provide Ethernet network
+ interface functionality. This approach provides efficient use of finite
+ hardware resources, flexibility, and performance advantages.
+
+ The diagram below shows the objects needed for a simple
+ network interface configuration on a system with 2 CPUs.
+
+ +---+---+ +---+---+
+ CPU0 CPU1
+ +---+---+ +---+---+
+ | |
+ +---+---+ +---+---+
+ DPIO DPIO
+ +---+---+ +---+---+
+ \ /
+ \ /
+ \ /
+ +---+---+
+ DPNI --- DPBP,DPMCP
+ +---+---+
+ |
+ |
+ +---+---+
+ DPMAC
+ +---+---+
+ |
+ port/PHY
+
+ Below the objects are described. For each object a brief description
+ is provided along with a summary of the kinds of operations the object
+ supports and a summary of key resources of the object (mmio regions
+ and irqs).
+
+ -DPMAC (Datapath Ethernet MAC): represents an Ethernet MAC, a
+ hardware device that connects to an Ethernet PHY and allows
+ physical transmission and reception of Ethernet frames.
+ -mmio regions: none
+ -irqs: dpni link change
+ -commands: set link up/down, link config, get stats,
+ irq config, enable, reset
+
+ -DPNI (Datapath Network Interface): contains TX/RX queues,
+ network interface configuration, and rx buffer pool configuration
+ mechanisms.
+ -mmio regions: none
+ -irqs: link state
+ -commands: port config, offload config, queue config,
+ parse/classify config, irq config, enable, reset
+
+ -DPIO (Datapath I/O): provides interfaces to enqueue and dequeue
+ packets and do hardware buffer pool management operations. For
+ optimum performance there is typically DPIO per CPU. This allows
+ each CPU to perform simultaneous enqueue/dequeue operations.
+ -mmio regions: queue operations, buffer mgmt
+ -irqs: data availability, congestion notification, buffer
+ pool depletion
+ -commands: irq config, enable, reset
+
+ -DPBP (Datapath Buffer Pool): represents a hardware buffer
+ pool.
+ -mmio regions: none
+ -irqs: none
+ -commands: enable, reset
+
+ -DPMCP (Datapath MC Portal): provides an MC command portal.
+ Used by drivers to send commands to the MC to manage
+ objects.
+ -mmio regions: MC command portal
+ -irqs: command completion
+ -commands: irq config, enable, reset
+
+ Object Connections
+ ------------------
+ Some objects have explicit relationships that must
+ be configured:
+
+ -DPNI <--> DPMAC
+ -DPNI <--> DPNI
+ -DPNI <--> L2-switch-port
+ A DPNI must be connected to something such as a DPMAC,
+ another DPNI, or L2 switch port. The DPNI connection
+ is made via a DPRC command.
+
+ +-------+ +-------+
+ | DPNI | | DPMAC |
+ +---+---+ +---+---+
+ | |
+ +==========+
+
+ -DPNI <--> DPBP
+ A network interface requires a 'buffer pool' (DPBP
+ object) which provides a list of pointers to memory
+ where received Ethernet data is to be copied. The
+ Ethernet driver configures the DPBPs associated with
+ the network interface.
+
+ Interrupts
+ ----------
+ All interrupts generated by DPAA2 objects are message
+ interrupts. At the hardware level message interrupts
+ generated by devices will normally have 3 components--
+ 1) a non-spoofable 'device-id' expressed on the hardware
+ bus, 2) an address, 3) a data value.
+
+ In the case of DPAA2 devices/objects, all objects in the
+ same container/DPRC share the same 'device-id'.
+ For ARM-based SoC this is the same as the stream ID.
+
+
+DPAA2 Linux Driver Overview
+---------------------------
+
+This section provides an overview of the Linux kernel drivers for
+DPAA2-- 1) the bus driver and associated "DPAA2 infrastructure"
+drivers and 2) functional object drivers (such as Ethernet).
+
+As described previously, a DPRC is a container that holds the other
+types of DPAA2 objects. It is functionally similar to a plug-and-play
+bus controller.
+
+Each object in the DPRC is a Linux "device" and is bound to a driver.
+The diagram below shows the Linux drivers involved in a networking
+scenario and the objects bound to each driver. A brief description
+of each driver follows.
+
+ +------------+
+ | OS Network |
+ | Stack |
+ +------------+ +------------+
+ | Allocator |. . . . . . . | Ethernet |
+ |(dpmcp,dpbp)| | (dpni) |
+ +-.----------+ +---+---+----+
+ . . ^ |
+ . . <data avail, | |<enqueue,
+ . . tx confirm> | | dequeue>
+ +-------------+ . | |
+ | DPRC driver | . +---+---V----+ +---------+
+ | (dprc) | . . . . . .| DPIO driver| | MAC |
+ +----------+--+ | (dpio) | | (dpmac) |
+ | +------+-----+ +-----+---+
+ |<dev add/remove> | |
+ | | |
+ +----+--------------+ | +--+---+
+ | mc-bus driver | | | PHY |
+ | | | |driver|
+ | /fsl-mc@80c000000 | | +--+---+
+ +-------------------+ | |
+ | |
+ ================================ HARDWARE =========|=================|======
+ DPIO |
+ | |
+ DPNI---DPBP |
+ | |
+ DPMAC |
+ | |
+ PHY ---------------+
+ ===================================================|========================
+
+A brief description of each driver is provided below.
+
+ mc-bus driver
+ -------------
+ The mc-bus driver is a platform driver and is probed from an
+ "/fsl-mc@xxxx" node in the device tree passed in by boot firmware.
+ It is responsible for bootstrapping the DPAA2 kernel infrastructure.
+ Key functions include:
+ -registering a new bus type named "fsl-mc" with the kernel,
+ and implementing bus call-backs (e.g. match/uevent/dev_groups)
+ -implemeting APIs for DPAA2 driver registration and for device
+ add/remove
+ -creates an MSI irq domain
+ -do a device add of the 'root' DPRC device, which is needed
+ to bootstrap things
+
+ DPRC driver
+ -----------
+ The dprc-driver is bound DPRC objects and does runtime management
+ of a bus instance. It performs the initial bus scan of the DPRC
+ and handles interrupts for container events such as hot plug.
+
+ Allocator
+ ----------
+ Certain objects such as DPMCP and DPBP are generic and fungible,
+ and are intended to be used by other drivers. For example,
+ the DPAA2 Ethernet driver needs:
+ -DPMCPs to send MC commands, to configure network interfaces
+ -DPBPs for network buffer pools
+
+ The allocator driver registers for these allocatable object types
+ and those objects are bound to the allocator when the bus is probed.
+ The allocator maintains a pool of objects that are available for
+ allocation by other DPAA2 drivers.
+
+ DPIO driver
+ -----------
+ The DPIO driver is bound to DPIO objects and provides services that allow
+ other drivers such as the Ethernet driver to receive and transmit data.
+ Key services include:
+ -data availability notifications
+ -hardware queuing operations (enqueue and dequeue of data)
+ -hardware buffer pool management
+
+ There is typically one DPIO object per physical CPU for optimum
+ performance, allowing each CPU to simultaneously enqueue
+ and dequeue data.
+
+ The DPIO driver operates on behalf of all DPAA2 drivers
+ active in the kernel-- Ethernet, crypto, compression,
+ etc.
+
+ Ethernet
+ --------
+ The Ethernet driver is bound to a DPNI and implements the kernel
+ interfaces needed to connect the DPAA2 network interface to
+ the network stack.
+
+ Each DPNI corresponds to a Linux network interface.
+
+ MAC driver
+ ----------
+ An Ethernet PHY is an off-chip, board specific component and is managed
+ by the appropriate PHY driver via an mdio bus. The MAC driver
+ plays a role of being a proxy between the PHY driver and the
+ MC. It does this proxy via the MC commands to a DPMAC object.
diff --git a/drivers/staging/fsl-mc/TODO b/drivers/staging/fsl-mc/TODO
index d78288b4e721..389436891b93 100644
--- a/drivers/staging/fsl-mc/TODO
+++ b/drivers/staging/fsl-mc/TODO
@@ -1,13 +1,31 @@
-* Add README file (with ASCII art) describing relationships between
- DPAA2 objects and how combine them to make a NIC, an LS2 switch, etc.
- Also, define all acronyms used.
-
* Decide if multiple root fsl-mc buses will be supported per Linux instance,
and if so add support for this.
* Add at least one device driver for a DPAA2 object (child device of the
- fsl-mc bus).
+ fsl-mc bus). Most likely candidate for this is adding DPAA2 Ethernet
+ driver support, which depends on drivers for several objects: DPNI,
+ DPIO, DPMAC. Other pre-requisites include:
+
+ * interrupt support. for meaningful driver support we need
+ interrupts, and thus need message interrupt support by the bus
+ driver.
+ -Note: this has dependencies on generic MSI support work
+ in process upstream, see [1] and [2].
+
+ * Management Complex (MC) command serialization. locking mechanisms
+ are needed by drivers to serialize commands sent to the MC, including
+ from atomic context.
+
+ * MC firmware uprev. The MC firmware upon which the fsl-mc
+ bus driver and DPAA2 object drivers are based is continuing
+ to evolve, so minor updates are needed to keep in sync with binary
+ interface changes to the MC.
+
+* Cleanup
Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
german.rivera@freescale.com, devel@driverdev.osuosl.org,
linux-kernel@vger.kernel.org
+
+[1] https://lkml.org/lkml/2015/7/9/93
+[2] https://lkml.org/lkml/2015/7/7/712
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h b/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
index 5992670f7747..e1861cf5de73 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
@@ -42,17 +42,16 @@ struct ft1000_pcmcia {
struct pcmcia_device;
struct net_device;
-extern struct net_device *init_ft1000_card(struct pcmcia_device *link,
- void *ft1000_reset);
-extern void stop_ft1000_card(struct net_device *dev);
-extern int card_download(struct net_device *dev, const u8 *pFileStart,
- size_t FileLength);
+struct net_device *init_ft1000_card(struct pcmcia_device *link,
+ void *ft1000_reset);
+void stop_ft1000_card(struct net_device *dev);
+int card_download(struct net_device *dev, const u8 *pFileStart,
+ size_t FileLength);
-extern u16 ft1000_read_dpram(struct net_device *dev, int offset);
-extern void card_bootload(struct net_device *dev);
-extern u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset,
- int Index);
-extern u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset);
+u16 ft1000_read_dpram(struct net_device *dev, int offset);
+void card_bootload(struct net_device *dev);
+u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index);
+u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset);
void ft1000_write_dpram_mag_32(struct net_device *dev, int offset, u32 value);
/* Read the value of a given ASIC register. */
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
index 409266b1a886..f241a3a5a684 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
@@ -260,7 +260,8 @@ void ft1000_destroy_dev(struct net_device *netdev)
/* Make sure we free any memory reserve for slow Queue */
for (i = 0; i < MAX_NUM_APP; i++) {
while (list_empty(&dev->app_info[i].app_sqlist) == 0) {
- pdpram_blk = list_entry(dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
+ pdpram_blk = list_entry(dev->app_info[i].app_sqlist.next,
+ struct dpram_blk, list);
list_del(&pdpram_blk->list);
ft1000_free_buffer(pdpram_blk, &freercvpool);
@@ -415,12 +416,19 @@ static long ft1000_ioctl(struct file *file, unsigned int command,
struct timeval tv;
struct IOCTL_GET_VER get_ver_data;
struct IOCTL_GET_DSP_STAT get_stat_data;
- u8 ConnectionMsg[] = {0x00, 0x44, 0x10, 0x20, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x93, 0x64,
- 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x0a,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x02, 0x37, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x7f, 0x00,
- 0x00, 0x01, 0x00, 0x00};
+ u8 ConnectionMsg[] = {
+ 0x00, 0x44, 0x10, 0x20, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x93, 0x64,
+ 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x0a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x37, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x7f, 0x00,
+ 0x00, 0x01, 0x00, 0x00
+ };
unsigned short ledStat = 0;
unsigned short conStat = 0;
@@ -495,10 +503,12 @@ static long ft1000_ioctl(struct file *file, unsigned int command,
memcpy(get_stat_data.eui64, info->eui64, EUISZ);
if (info->ProgConStat != 0xFF) {
- ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_LED, (u8 *)&ledStat, FT1000_MAG_DSP_LED_INDX);
+ ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_LED,
+ (u8 *)&ledStat, FT1000_MAG_DSP_LED_INDX);
get_stat_data.LedStat = ntohs(ledStat);
pr_debug("LedStat = 0x%x\n", get_stat_data.LedStat);
- ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_CON_STATE, (u8 *)&conStat, FT1000_MAG_DSP_CON_STATE_INDX);
+ ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_CON_STATE,
+ (u8 *)&conStat, FT1000_MAG_DSP_CON_STATE_INDX);
get_stat_data.ConStat = ntohs(conStat);
pr_debug("ConStat = 0x%x\n", get_stat_data.ConStat);
} else {
@@ -689,7 +699,8 @@ static long ft1000_ioctl(struct file *file, unsigned int command,
if (list_empty(&ft1000dev->app_info[i].app_sqlist) == 0) {
/* pr_debug("Message detected in slow queue\n"); */
spin_lock_irqsave(&free_buff_lock, flags);
- pdpram_blk = list_entry(ft1000dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
+ pdpram_blk = list_entry(ft1000dev->app_info[i].app_sqlist.next,
+ struct dpram_blk, list);
list_del(&pdpram_blk->list);
ft1000dev->app_info[i].NumOfMsg--;
/* pr_debug("NumOfMsg for app %d = %d\n", i, ft1000dev->app_info[i].NumOfMsg); */
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
index 5def347beb08..297b7aece506 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
@@ -95,7 +95,6 @@ struct dsp_file_hdr {
long nDspImages; /* Number of DSP images in file. */
};
-#pragma pack(1)
struct dsp_image_info {
long coff_date; /* Date/time when DSP Coff image was built. */
long begin_offset; /* Offset in file where image begins. */
@@ -105,7 +104,7 @@ struct dsp_image_info {
long version; /* Embedded version # of DSP code. */
unsigned short checksum; /* DSP File checksum */
unsigned short pad1;
-};
+} __packed;
/* checks if the doorbell register is cleared */
@@ -180,7 +179,8 @@ static u16 get_handshake(struct ft1000_usb *ft1000dev, u16 expected_value)
}
status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_HANDSHAKE_LOC, (u8 *)&handshake, 1);
+ DWNLD_MAG1_HANDSHAKE_LOC,
+ (u8 *)&handshake, 1);
handshake = ntohs(handshake);
if (status)
@@ -281,12 +281,14 @@ static u16 get_request_type(struct ft1000_usb *ft1000dev)
if (ft1000dev->bootmode == 1) {
status = fix_ft1000_read_dpram32(ft1000dev,
- DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx);
+ DWNLD_MAG1_TYPE_LOC,
+ (u8 *)&tempx);
tempx = ntohl(tempx);
} else {
tempx = 0;
status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_TYPE_LOC, (u8 *)&tempword, 1);
+ DWNLD_MAG1_TYPE_LOC,
+ (u8 *)&tempword, 1);
tempx |= (tempword << 16);
tempx = ntohl(tempx);
}
@@ -304,7 +306,8 @@ static u16 get_request_type_usb(struct ft1000_usb *ft1000dev)
if (ft1000dev->bootmode == 1) {
status = fix_ft1000_read_dpram32(ft1000dev,
- DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx);
+ DWNLD_MAG1_TYPE_LOC,
+ (u8 *)&tempx);
tempx = ntohl(tempx);
} else {
if (ft1000dev->usbboot == 2) {
@@ -332,14 +335,17 @@ static long get_request_value(struct ft1000_usb *ft1000dev)
if (ft1000dev->bootmode == 1) {
status = fix_ft1000_read_dpram32(ft1000dev,
- DWNLD_MAG1_SIZE_LOC, (u8 *)&value);
+ DWNLD_MAG1_SIZE_LOC,
+ (u8 *)&value);
value = ntohl(value);
} else {
status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 0);
+ DWNLD_MAG1_SIZE_LOC,
+ (u8 *)&tempword, 0);
value = tempword;
status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 1);
+ DWNLD_MAG1_SIZE_LOC,
+ (u8 *)&tempword, 1);
value |= (tempword << 16);
value = ntohl(value);
}
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
index e6b5976a09e3..96209703ba25 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
@@ -842,7 +842,6 @@ static int ft1000_copy_up_pkt(struct urb *urb)
skb = dev_alloc_skb(len + 12 + 2);
if (skb == NULL) {
- pr_debug("No Network buffers available\n");
info->stats.rx_errors++;
ft1000_submit_rx_urb(info);
return -1;
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
index fea60d5651a7..9b5050fcbb66 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
@@ -134,8 +134,8 @@ extern spinlock_t free_buff_lock;
int ft1000_create_dev(struct ft1000_usb *dev);
void ft1000_destroy_dev(struct net_device *dev);
-extern int card_send_command(struct ft1000_usb *ft1000dev,
- void *ptempbuffer, int size);
+int card_send_command(struct ft1000_usb *ft1000dev,
+ void *ptempbuffer, int size);
struct dpram_blk *ft1000_get_buffer(struct list_head *bufflist);
void ft1000_free_buffer(struct dpram_blk *pdpram_blk, struct list_head *plist);
diff --git a/drivers/staging/gdm72xx/usb_ids.h b/drivers/staging/gdm72xx/usb_ids.h
index 8ce544de7342..7afb9ba5fdba 100644
--- a/drivers/staging/gdm72xx/usb_ids.h
+++ b/drivers/staging/gdm72xx/usb_ids.h
@@ -32,7 +32,9 @@
#define BL_PID_MASK 0xffc0
#define USB_DEVICE_BOOTLOADER(vid, pid) \
- {USB_DEVICE((vid), ((pid)&BL_PID_MASK)|B_DOWNLOAD)}, \
+ {USB_DEVICE((vid), ((pid)&BL_PID_MASK)|B_DOWNLOAD)}
+
+#define USB_DEVICE_BOOTLOADER_DRV(vid, pid) \
{USB_DEVICE((vid), ((pid)&BL_PID_MASK)|B_DOWNLOAD|B_DIFF_DL_DRV)}
#define USB_DEVICE_CDC_DATA(vid, pid) \
@@ -40,6 +42,7 @@
static const struct usb_device_id id_table[] = {
USB_DEVICE_BOOTLOADER(GCT_VID, GCT_PID1),
+ USB_DEVICE_BOOTLOADER_DRV(GCT_VID, GCT_PID1),
USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1),
USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x1),
USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x2),
@@ -58,6 +61,7 @@ static const struct usb_device_id id_table[] = {
USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xf),
USB_DEVICE_BOOTLOADER(GCT_VID, GCT_PID2),
+ USB_DEVICE_BOOTLOADER_DRV(GCT_VID, GCT_PID2),
USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2),
USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x1),
USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x2),
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index d7c5223f1c3e..3f7715c9968b 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -1,5 +1,5 @@
/*
- * Freescale i.MX28 LRADC driver
+ * Freescale MXS LRADC driver
*
* Copyright (c) 2012 DENX Software Engineering, GmbH.
* Marek Vasut <marex@denx.de>
@@ -15,34 +15,30 @@
* GNU General Public License for more details.
*/
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/device.h>
#include <linux/err.h>
+#include <linux/input.h>
#include <linux/interrupt.h>
-#include <linux/device.h>
+#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/io.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
+#include <linux/slab.h>
#include <linux/stmp_device.h>
-#include <linux/bitops.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/input.h>
-#include <linux/clk.h>
+#include <linux/sysfs.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/sysfs.h>
#define DRIVER_NAME "mxs-lradc"
@@ -65,14 +61,14 @@
* Once the pen touches the touchscreen, the touchscreen switches from
* IRQ-driven mode to polling mode to prevent interrupt storm. The polling
* is realized by worker thread, which is called every 20 or so milliseconds.
- * This gives the touchscreen enough fluence and does not strain the system
+ * This gives the touchscreen enough fluency and does not strain the system
* too much.
*/
#define LRADC_TS_SAMPLE_DELAY_MS 5
/*
* The LRADC reads the following amount of samples from each touchscreen
- * channel and the driver then computes avarage of these.
+ * channel and the driver then computes average of these.
*/
#define LRADC_TS_SAMPLE_AMOUNT 4
@@ -238,7 +234,7 @@ struct mxs_lradc {
* CH5 -- Touch screen YNLR
* CH6 -- Touch screen WIPER (5-wire only)
*
- * The bitfields below represents which parts of the LRADC block are
+ * The bit fields below represents which parts of the LRADC block are
* switched into special mode of operation. These channels can not
* be sampled as regular LRADC channels. The driver will refuse any
* attempt to sample these channels.
@@ -252,7 +248,7 @@ struct mxs_lradc {
struct input_dev *ts_input;
enum mxs_lradc_id soc;
- enum lradc_ts_plate cur_plate; /* statemachine */
+ enum lradc_ts_plate cur_plate; /* state machine */
bool ts_valid;
unsigned ts_x_pos;
unsigned ts_y_pos;
@@ -812,7 +808,7 @@ static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val)
int ret;
/*
- * See if there is no buffered operation in progess. If there is, simply
+ * See if there is no buffered operation in progress. If there is, simply
* bail out. This can be improved to support both buffered and raw IO at
* the same time, yet the code becomes horribly complicated. Therefore I
* applied KISS principle here.
@@ -1369,7 +1365,7 @@ static const struct iio_buffer_setup_ops mxs_lradc_buffer_ops = {
* Driver initialization
*/
-#define MXS_ADC_CHAN(idx, chan_type) { \
+#define MXS_ADC_CHAN(idx, chan_type, name) { \
.type = (chan_type), \
.indexed = 1, \
.scan_index = (idx), \
@@ -1382,17 +1378,18 @@ static const struct iio_buffer_setup_ops mxs_lradc_buffer_ops = {
.realbits = LRADC_RESOLUTION, \
.storagebits = 32, \
}, \
+ .datasheet_name = (name), \
}
-static const struct iio_chan_spec mxs_lradc_chan_spec[] = {
- MXS_ADC_CHAN(0, IIO_VOLTAGE),
- MXS_ADC_CHAN(1, IIO_VOLTAGE),
- MXS_ADC_CHAN(2, IIO_VOLTAGE),
- MXS_ADC_CHAN(3, IIO_VOLTAGE),
- MXS_ADC_CHAN(4, IIO_VOLTAGE),
- MXS_ADC_CHAN(5, IIO_VOLTAGE),
- MXS_ADC_CHAN(6, IIO_VOLTAGE),
- MXS_ADC_CHAN(7, IIO_VOLTAGE), /* VBATT */
+static const struct iio_chan_spec mx23_lradc_chan_spec[] = {
+ MXS_ADC_CHAN(0, IIO_VOLTAGE, "LRADC0"),
+ MXS_ADC_CHAN(1, IIO_VOLTAGE, "LRADC1"),
+ MXS_ADC_CHAN(2, IIO_VOLTAGE, "LRADC2"),
+ MXS_ADC_CHAN(3, IIO_VOLTAGE, "LRADC3"),
+ MXS_ADC_CHAN(4, IIO_VOLTAGE, "LRADC4"),
+ MXS_ADC_CHAN(5, IIO_VOLTAGE, "LRADC5"),
+ MXS_ADC_CHAN(6, IIO_VOLTAGE, "VDDIO"),
+ MXS_ADC_CHAN(7, IIO_VOLTAGE, "VBATT"),
/* Combined Temperature sensors */
{
.type = IIO_TEMP,
@@ -1403,6 +1400,7 @@ static const struct iio_chan_spec mxs_lradc_chan_spec[] = {
BIT(IIO_CHAN_INFO_SCALE),
.channel = 8,
.scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,},
+ .datasheet_name = "TEMP_DIE",
},
/* Hidden channel to keep indexes */
{
@@ -1411,12 +1409,48 @@ static const struct iio_chan_spec mxs_lradc_chan_spec[] = {
.scan_index = -1,
.channel = 9,
},
- MXS_ADC_CHAN(10, IIO_VOLTAGE), /* VDDIO */
- MXS_ADC_CHAN(11, IIO_VOLTAGE), /* VTH */
- MXS_ADC_CHAN(12, IIO_VOLTAGE), /* VDDA */
- MXS_ADC_CHAN(13, IIO_VOLTAGE), /* VDDD */
- MXS_ADC_CHAN(14, IIO_VOLTAGE), /* VBG */
- MXS_ADC_CHAN(15, IIO_VOLTAGE), /* VDD5V */
+ MXS_ADC_CHAN(10, IIO_VOLTAGE, NULL),
+ MXS_ADC_CHAN(11, IIO_VOLTAGE, NULL),
+ MXS_ADC_CHAN(12, IIO_VOLTAGE, "USB_DP"),
+ MXS_ADC_CHAN(13, IIO_VOLTAGE, "USB_DN"),
+ MXS_ADC_CHAN(14, IIO_VOLTAGE, "VBG"),
+ MXS_ADC_CHAN(15, IIO_VOLTAGE, "VDD5V"),
+};
+
+static const struct iio_chan_spec mx28_lradc_chan_spec[] = {
+ MXS_ADC_CHAN(0, IIO_VOLTAGE, "LRADC0"),
+ MXS_ADC_CHAN(1, IIO_VOLTAGE, "LRADC1"),
+ MXS_ADC_CHAN(2, IIO_VOLTAGE, "LRADC2"),
+ MXS_ADC_CHAN(3, IIO_VOLTAGE, "LRADC3"),
+ MXS_ADC_CHAN(4, IIO_VOLTAGE, "LRADC4"),
+ MXS_ADC_CHAN(5, IIO_VOLTAGE, "LRADC5"),
+ MXS_ADC_CHAN(6, IIO_VOLTAGE, "LRADC6"),
+ MXS_ADC_CHAN(7, IIO_VOLTAGE, "VBATT"),
+ /* Combined Temperature sensors */
+ {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .scan_index = 8,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .channel = 8,
+ .scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,},
+ .datasheet_name = "TEMP_DIE",
+ },
+ /* Hidden channel to keep indexes */
+ {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .scan_index = -1,
+ .channel = 9,
+ },
+ MXS_ADC_CHAN(10, IIO_VOLTAGE, "VDDIO"),
+ MXS_ADC_CHAN(11, IIO_VOLTAGE, "VTH"),
+ MXS_ADC_CHAN(12, IIO_VOLTAGE, "VDDA"),
+ MXS_ADC_CHAN(13, IIO_VOLTAGE, "VDDD"),
+ MXS_ADC_CHAN(14, IIO_VOLTAGE, "VBG"),
+ MXS_ADC_CHAN(15, IIO_VOLTAGE, "VDD5V"),
};
static int mxs_lradc_hw_init(struct mxs_lradc *lradc)
@@ -1612,10 +1646,16 @@ static int mxs_lradc_probe(struct platform_device *pdev)
iio->dev.parent = &pdev->dev;
iio->info = &mxs_lradc_iio_info;
iio->modes = INDIO_DIRECT_MODE;
- iio->channels = mxs_lradc_chan_spec;
- iio->num_channels = ARRAY_SIZE(mxs_lradc_chan_spec);
iio->masklength = LRADC_MAX_TOTAL_CHANS;
+ if (lradc->soc == IMX23_LRADC) {
+ iio->channels = mx23_lradc_chan_spec;
+ iio->num_channels = ARRAY_SIZE(mx23_lradc_chan_spec);
+ } else {
+ iio->channels = mx28_lradc_chan_spec;
+ iio->num_channels = ARRAY_SIZE(mx28_lradc_chan_spec);
+ }
+
ret = iio_triggered_buffer_setup(iio, &iio_pollfunc_store_time,
&mxs_lradc_trigger_handler,
&mxs_lradc_buffer_ops);
@@ -1707,6 +1747,6 @@ static struct platform_driver mxs_lradc_driver = {
module_platform_driver(mxs_lradc_driver);
MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
-MODULE_DESCRIPTION("Freescale i.MX28 LRADC driver");
+MODULE_DESCRIPTION("Freescale MXS LRADC driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
index 75ddd4f801a3..78fe0b557280 100644
--- a/drivers/staging/iio/addac/adt7316-i2c.c
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -124,7 +124,6 @@ static struct i2c_driver adt7316_driver = {
.driver = {
.name = "adt7316",
.pm = ADT7316_PM_OPS,
- .owner = THIS_MODULE,
},
.probe = adt7316_i2c_probe,
.id_table = adt7316_i2c_id,
diff --git a/drivers/staging/iio/iio_dummy_evgen.c b/drivers/staging/iio/iio_dummy_evgen.c
index c54d5b5443a6..6d38854c38c8 100644
--- a/drivers/staging/iio/iio_dummy_evgen.c
+++ b/drivers/staging/iio/iio_dummy_evgen.c
@@ -214,6 +214,7 @@ static struct device iio_evgen_dev = {
.groups = iio_evgen_groups,
.release = &iio_evgen_release,
};
+
static __init int iio_dummy_evgen_init(void)
{
int ret = iio_dummy_evgen_create();
diff --git a/drivers/staging/iio/iio_simple_dummy.c b/drivers/staging/iio/iio_simple_dummy.c
index 1629a8a6bf26..381f90ff468a 100644
--- a/drivers/staging/iio/iio_simple_dummy.c
+++ b/drivers/staging/iio/iio_simple_dummy.c
@@ -611,7 +611,6 @@ static int iio_dummy_probe(int index)
*/
iio_dummy_devs[index] = indio_dev;
-
/*
* Set the device name.
*
@@ -675,7 +674,6 @@ static void iio_dummy_remove(int index)
*/
struct iio_dev *indio_dev = iio_dummy_devs[index];
-
/* Unregister the device */
iio_device_unregister(indio_dev);
diff --git a/drivers/staging/iio/iio_simple_dummy.h b/drivers/staging/iio/iio_simple_dummy.h
index e877a99540ab..8d00224e6fad 100644
--- a/drivers/staging/iio/iio_simple_dummy.h
+++ b/drivers/staging/iio/iio_simple_dummy.h
@@ -119,6 +119,7 @@ static inline int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev)
{
return 0;
};
+
static inline
void iio_simple_dummy_unconfigure_buffer(struct iio_dev *indio_dev)
{};
diff --git a/drivers/staging/iio/iio_simple_dummy_buffer.c b/drivers/staging/iio/iio_simple_dummy_buffer.c
index a651b8922d0a..00ed7745f3c5 100644
--- a/drivers/staging/iio/iio_simple_dummy_buffer.c
+++ b/drivers/staging/iio/iio_simple_dummy_buffer.c
@@ -32,6 +32,7 @@ static const s16 fakedata[] = {
[diffvoltage3m4] = -2,
[accelx] = 344,
};
+
/**
* iio_simple_dummy_trigger_h() - the trigger handler function
* @irq: the interrupt number
@@ -178,7 +179,6 @@ error_free_buffer:
iio_kfifo_free(indio_dev->buffer);
error_ret:
return ret;
-
}
/**
diff --git a/drivers/staging/iio/iio_simple_dummy_events.c b/drivers/staging/iio/iio_simple_dummy_events.c
index ecc563cb6cb9..73108baf80ad 100644
--- a/drivers/staging/iio/iio_simple_dummy_events.c
+++ b/drivers/staging/iio/iio_simple_dummy_events.c
@@ -120,7 +120,7 @@ int iio_simple_dummy_read_event_value(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
- enum iio_event_info info,
+ enum iio_event_info info,
int *val, int *val2)
{
struct iio_dummy_state *st = iio_priv(indio_dev);
@@ -143,7 +143,7 @@ int iio_simple_dummy_write_event_value(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
- enum iio_event_info info,
+ enum iio_event_info info,
int val, int val2)
{
struct iio_dummy_state *st = iio_priv(indio_dev);
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index e646c5d24004..019ba5245c23 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -838,7 +838,6 @@ static struct i2c_driver isl29018_driver = {
.name = "isl29018",
.acpi_match_table = ACPI_PTR(isl29018_acpi_match),
.pm = ISL29018_PM_OPS,
- .owner = THIS_MODULE,
.of_match_table = isl29018_of_match,
},
.probe = isl29018_probe,
diff --git a/drivers/staging/iio/light/isl29028.c b/drivers/staging/iio/light/isl29028.c
index e5b2fdc2334b..cd6f2727aa58 100644
--- a/drivers/staging/iio/light/isl29028.c
+++ b/drivers/staging/iio/light/isl29028.c
@@ -547,7 +547,6 @@ static struct i2c_driver isl29028_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "isl29028",
- .owner = THIS_MODULE,
.of_match_table = isl29028_of_match,
},
.probe = isl29028_probe,
diff --git a/drivers/staging/iio/meter/ade7854.h b/drivers/staging/iio/meter/ade7854.h
index 52ca5412a18d..52f4195cf6f4 100644
--- a/drivers/staging/iio/meter/ade7854.h
+++ b/drivers/staging/iio/meter/ade7854.h
@@ -168,7 +168,7 @@ struct ade7854_state {
};
-extern int ade7854_probe(struct iio_dev *indio_dev, struct device *dev);
-extern int ade7854_remove(struct iio_dev *indio_dev);
+int ade7854_probe(struct iio_dev *indio_dev, struct device *dev);
+int ade7854_remove(struct iio_dev *indio_dev);
#endif
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
index 3c1c8c6c4a6c..9fe48ef11473 100644
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
@@ -79,7 +79,8 @@ static int iio_bfin_tmr_set_state(struct iio_trigger *trig, bool state)
}
static ssize_t iio_bfin_tmr_frequency_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct iio_trigger *trig = to_iio_trigger(dev);
struct bfin_tmr_state *st = iio_trigger_get_drvdata(trig);
@@ -116,8 +117,8 @@ static ssize_t iio_bfin_tmr_frequency_store(struct device *dev,
}
static ssize_t iio_bfin_tmr_frequency_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_trigger *trig = to_iio_trigger(dev);
struct bfin_tmr_state *st = iio_trigger_get_drvdata(trig);
diff --git a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
index 0c1976ddee74..2db885750fb8 100644
--- a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
+++ b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
@@ -37,7 +37,7 @@ static int iio_trig_periodic_rtc_set_state(struct iio_trigger *trig, bool state)
if (trig_info->frequency == 0 && state)
return -EINVAL;
dev_dbg(&trig_info->rtc->dev, "trigger frequency is %u\n",
- trig_info->frequency);
+ trig_info->frequency);
ret = rtc_irq_set_state(trig_info->rtc, &trig_info->task, state);
if (ret == 0)
trig_info->state = state;
@@ -74,8 +74,9 @@ static ssize_t iio_trig_periodic_write_freq(struct device *dev,
if (ret == 0 && trig_info->state && trig_info->frequency == 0)
ret = rtc_irq_set_state(trig_info->rtc,
&trig_info->task, 1);
- } else
+ } else {
ret = rtc_irq_set_state(trig_info->rtc, &trig_info->task, 0);
+ }
if (ret)
goto error_ret;
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index 5dd9cdfae30c..01961d9e6c36 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -134,7 +134,7 @@ void cfs_get_random_bytes(void *buf, int size);
/* container_of depends on "likely" which is defined in libcfs_private.h */
static inline void *__container_of(void *ptr, unsigned long shift)
{
- if (unlikely(IS_ERR(ptr) || ptr == NULL))
+ if (IS_ERR_OR_NULL(ptr))
return ptr;
return (char *)ptr - shift;
}
@@ -148,4 +148,17 @@ void *libcfs_kvzalloc(size_t size, gfp_t flags);
void *libcfs_kvzalloc_cpt(struct cfs_cpt_table *cptab, int cpt, size_t size,
gfp_t flags);
+extern struct miscdevice libcfs_dev;
+/**
+ * The path of debug log dump upcall script.
+ */
+extern char lnet_upcall[1024];
+extern char lnet_debug_log_upcall[1024];
+
+extern void libcfs_init_nidstrings(void);
+
+extern struct cfs_psdev_ops libcfs_psdev_ops;
+
+extern struct cfs_wi_sched *cfs_sched_rehash;
+
#endif /* _LIBCFS_H */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
index 8251ac932e37..a3aa644154e2 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -50,7 +50,6 @@ extern unsigned int libcfs_stack;
extern unsigned int libcfs_debug;
extern unsigned int libcfs_printk;
extern unsigned int libcfs_console_ratelimit;
-extern unsigned int libcfs_watchdog_ratelimit;
extern unsigned int libcfs_console_max_delay;
extern unsigned int libcfs_console_min_delay;
extern unsigned int libcfs_console_backoff;
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
index eea55d94e6be..aa69c6a33d19 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
@@ -79,14 +79,16 @@ static inline int cfs_fail_check_set(__u32 id, __u32 value,
{
int ret = 0;
- if (unlikely(CFS_FAIL_PRECHECK(id) &&
- (ret = __cfs_fail_check_set(id, value, set)))) {
- if (quiet) {
- CDEBUG(D_INFO, "*** cfs_fail_loc=%x, val=%u***\n",
- id, value);
- } else {
- LCONSOLE_INFO("*** cfs_fail_loc=%x, val=%u***\n",
- id, value);
+ if (unlikely(CFS_FAIL_PRECHECK(id))) {
+ ret = __cfs_fail_check_set(id, value, set);
+ if (ret) {
+ if (quiet) {
+ CDEBUG(D_INFO, "*** cfs_fail_loc=%x, val=%u***\n",
+ id, value);
+ } else {
+ LCONSOLE_INFO("*** cfs_fail_loc=%x, val=%u***\n",
+ id, value);
+ }
}
}
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index ed37d26eb20d..9544860e3292 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -87,24 +87,6 @@ do { \
lbug_with_loc(&msgdata); \
} while (0)
-extern atomic_t libcfs_kmemory;
-/*
- * Memory
- */
-
-# define libcfs_kmem_inc(ptr, size) \
-do { \
- atomic_add(size, &libcfs_kmemory); \
-} while (0)
-
-# define libcfs_kmem_dec(ptr, size) \
-do { \
- atomic_sub(size, &libcfs_kmemory); \
-} while (0)
-
-# define libcfs_kmem_read() \
- atomic_read(&libcfs_kmemory)
-
#ifndef LIBCFS_VMALLOC_SIZE
#define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */
#endif
@@ -121,14 +103,9 @@ do { \
if (unlikely((ptr) == NULL)) { \
CERROR("LNET: out of memory at %s:%d (tried to alloc '" \
#ptr "' = %d)\n", __FILE__, __LINE__, (int)(size)); \
- CERROR("LNET: %d total bytes allocated by lnet\n", \
- libcfs_kmem_read()); \
} else { \
memset((ptr), 0, (size)); \
- libcfs_kmem_inc((ptr), (size)); \
- CDEBUG(D_MALLOC, "alloc '" #ptr "': %d at %p (tot %d).\n", \
- (int)(size), (ptr), libcfs_kmem_read()); \
- } \
+ } \
} while (0)
/**
@@ -180,9 +157,6 @@ do { \
"%s:%d\n", s, __FILE__, __LINE__); \
break; \
} \
- libcfs_kmem_dec((ptr), s); \
- CDEBUG(D_MALLOC, "kfreed '" #ptr "': %d at %p (tot %d).\n", \
- s, (ptr), libcfs_kmem_read()); \
if (unlikely(s > LIBCFS_VMALLOC_SIZE)) \
vfree(ptr); \
else \
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
index 509dc1e5c3b1..478e9582ff54 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
@@ -102,6 +102,4 @@ int cfs_ip_addr_parse(char *str, int len, struct list_head *list);
int cfs_ip_addr_match(__u32 addr, struct list_head *list);
void cfs_ip_addr_free(struct list_head *list);
-#define strtoul(str, endp, base) simple_strtoul(str, endp, base)
-
#endif
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 4eb24a11b02a..c29d2ced258c 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -1390,7 +1390,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
.max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
.page_shift = PAGE_SHIFT,
.access = (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE),
+ IB_ACCESS_REMOTE_WRITE),
.pool_size = fps->fps_pool_size,
.dirty_watermark = fps->fps_flush_trigger,
.flush_function = NULL,
@@ -1789,140 +1789,6 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
goto again;
}
-void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr)
-{
- kib_pmr_pool_t *ppo = pmr->pmr_pool;
- struct ib_mr *mr = pmr->pmr_mr;
-
- pmr->pmr_mr = NULL;
- kiblnd_pool_free_node(&ppo->ppo_pool, &pmr->pmr_list);
- if (mr != NULL)
- ib_dereg_mr(mr);
-}
-
-int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
- kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr)
-{
- kib_phys_mr_t *pmr;
- struct list_head *node;
- int rc;
- int i;
-
- node = kiblnd_pool_alloc_node(&pps->pps_poolset);
- if (node == NULL) {
- CERROR("Failed to allocate PMR descriptor\n");
- return -ENOMEM;
- }
-
- pmr = container_of(node, kib_phys_mr_t, pmr_list);
- if (pmr->pmr_pool->ppo_hdev != hdev) {
- kiblnd_pool_free_node(&pmr->pmr_pool->ppo_pool, node);
- return -EAGAIN;
- }
-
- for (i = 0; i < rd->rd_nfrags; i++) {
- pmr->pmr_ipb[i].addr = rd->rd_frags[i].rf_addr;
- pmr->pmr_ipb[i].size = rd->rd_frags[i].rf_nob;
- }
-
- pmr->pmr_mr = ib_reg_phys_mr(hdev->ibh_pd,
- pmr->pmr_ipb, rd->rd_nfrags,
- IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE,
- iova);
- if (!IS_ERR(pmr->pmr_mr)) {
- pmr->pmr_iova = *iova;
- *pp_pmr = pmr;
- return 0;
- }
-
- rc = PTR_ERR(pmr->pmr_mr);
- CERROR("Failed ib_reg_phys_mr: %d\n", rc);
-
- pmr->pmr_mr = NULL;
- kiblnd_pool_free_node(&pmr->pmr_pool->ppo_pool, node);
-
- return rc;
-}
-
-static void kiblnd_destroy_pmr_pool(kib_pool_t *pool)
-{
- kib_pmr_pool_t *ppo = container_of(pool, kib_pmr_pool_t, ppo_pool);
- kib_phys_mr_t *pmr;
- kib_phys_mr_t *tmp;
-
- LASSERT(pool->po_allocated == 0);
-
- list_for_each_entry_safe(pmr, tmp, &pool->po_free_list, pmr_list) {
- LASSERT(pmr->pmr_mr == NULL);
- list_del(&pmr->pmr_list);
-
- if (pmr->pmr_ipb != NULL) {
- LIBCFS_FREE(pmr->pmr_ipb,
- IBLND_MAX_RDMA_FRAGS *
- sizeof(struct ib_phys_buf));
- }
-
- LIBCFS_FREE(pmr, sizeof(kib_phys_mr_t));
- }
-
- kiblnd_fini_pool(pool);
- if (ppo->ppo_hdev != NULL)
- kiblnd_hdev_decref(ppo->ppo_hdev);
-
- LIBCFS_FREE(ppo, sizeof(kib_pmr_pool_t));
-}
-
-static inline int kiblnd_pmr_pool_size(int ncpts)
-{
- int size = *kiblnd_tunables.kib_pmr_pool_size / ncpts;
-
- return max(IBLND_PMR_POOL, size);
-}
-
-static int kiblnd_create_pmr_pool(kib_poolset_t *ps, int size,
- kib_pool_t **pp_po)
-{
- struct kib_pmr_pool *ppo;
- struct kib_pool *pool;
- kib_phys_mr_t *pmr;
- int i;
-
- LIBCFS_CPT_ALLOC(ppo, lnet_cpt_table(),
- ps->ps_cpt, sizeof(kib_pmr_pool_t));
- if (ppo == NULL) {
- CERROR("Failed to allocate PMR pool\n");
- return -ENOMEM;
- }
-
- pool = &ppo->ppo_pool;
- kiblnd_init_pool(ps, pool, size);
-
- for (i = 0; i < size; i++) {
- LIBCFS_CPT_ALLOC(pmr, lnet_cpt_table(),
- ps->ps_cpt, sizeof(kib_phys_mr_t));
- if (pmr == NULL)
- break;
-
- pmr->pmr_pool = ppo;
- LIBCFS_CPT_ALLOC(pmr->pmr_ipb, lnet_cpt_table(), ps->ps_cpt,
- IBLND_MAX_RDMA_FRAGS * sizeof(*pmr->pmr_ipb));
- if (pmr->pmr_ipb == NULL)
- break;
-
- list_add(&pmr->pmr_list, &pool->po_free_list);
- }
-
- if (i < size) {
- ps->ps_pool_destroy(pool);
- return -ENOMEM;
- }
-
- ppo->ppo_hdev = kiblnd_current_hdev(ps->ps_net->ibn_dev);
- *pp_po = pool;
- return 0;
-}
-
static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
{
kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
@@ -2078,7 +1944,6 @@ static void kiblnd_net_fini_pools(kib_net_t *net)
cfs_cpt_for_each(i, lnet_cpt_table()) {
kib_tx_poolset_t *tps;
kib_fmr_poolset_t *fps;
- kib_pmr_poolset_t *pps;
if (net->ibn_tx_ps != NULL) {
tps = net->ibn_tx_ps[i];
@@ -2089,11 +1954,6 @@ static void kiblnd_net_fini_pools(kib_net_t *net)
fps = net->ibn_fmr_ps[i];
kiblnd_fini_fmr_poolset(fps);
}
-
- if (net->ibn_pmr_ps != NULL) {
- pps = net->ibn_pmr_ps[i];
- kiblnd_fini_poolset(&pps->pps_poolset);
- }
}
if (net->ibn_tx_ps != NULL) {
@@ -2105,18 +1965,13 @@ static void kiblnd_net_fini_pools(kib_net_t *net)
cfs_percpt_free(net->ibn_fmr_ps);
net->ibn_fmr_ps = NULL;
}
-
- if (net->ibn_pmr_ps != NULL) {
- cfs_percpt_free(net->ibn_pmr_ps);
- net->ibn_pmr_ps = NULL;
- }
}
static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
{
unsigned long flags;
int cpt;
- int rc;
+ int rc = 0;
int i;
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
@@ -2137,12 +1992,16 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
goto failed;
}
- /* TX pool must be created later than FMR/PMR, see LU-2268
- * for details */
+ /*
+ * TX pool must be created later than FMR, see LU-2268
+ * for details
+ */
LASSERT(net->ibn_tx_ps == NULL);
- /* premapping can fail if ibd_nmr > 1, so we always create
- * FMR/PMR pool and map-on-demand if premapping failed */
+ /*
+ * premapping can fail if ibd_nmr > 1, so we always create
+ * FMR pool and map-on-demand if premapping failed
+ */
net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(kib_fmr_poolset_t));
@@ -2158,7 +2017,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
kiblnd_fmr_pool_size(ncpts),
kiblnd_fmr_flush_trigger(ncpts));
if (rc == -ENOSYS && i == 0) /* no FMR */
- break; /* create PMR pool */
+ break;
if (rc != 0) { /* a real error */
CERROR("Can't initialize FMR pool for CPT %d: %d\n",
@@ -2175,38 +2034,8 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
cfs_percpt_free(net->ibn_fmr_ps);
net->ibn_fmr_ps = NULL;
- CWARN("Device does not support FMR, failing back to PMR\n");
-
- if (*kiblnd_tunables.kib_pmr_pool_size <
- *kiblnd_tunables.kib_ntx / 4) {
- CERROR("Can't set pmr pool size (%d) < ntx / 4(%d)\n",
- *kiblnd_tunables.kib_pmr_pool_size,
- *kiblnd_tunables.kib_ntx / 4);
- rc = -EINVAL;
- goto failed;
- }
-
- net->ibn_pmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(kib_pmr_poolset_t));
- if (net->ibn_pmr_ps == NULL) {
- CERROR("Failed to allocate PMR pool array\n");
- rc = -ENOMEM;
- goto failed;
- }
-
- for (i = 0; i < ncpts; i++) {
- cpt = (cpts == NULL) ? i : cpts[i];
- rc = kiblnd_init_poolset(&net->ibn_pmr_ps[cpt]->pps_poolset,
- cpt, net, "PMR",
- kiblnd_pmr_pool_size(ncpts),
- kiblnd_create_pmr_pool,
- kiblnd_destroy_pmr_pool, NULL, NULL);
- if (rc != 0) {
- CERROR("Can't initialize PMR pool for CPT %d: %d\n",
- cpt, rc);
+ CWARN("Device does not support FMR\n");
goto failed;
- }
- }
create_tx_pool:
net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
@@ -2318,17 +2147,13 @@ void kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
{
struct ib_mr *mr;
- int i;
int rc;
- __u64 mm_size;
- __u64 mr_size;
int acflags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
rc = kiblnd_hdev_get_attr(hdev);
if (rc != 0)
return rc;
- if (hdev->ibh_mr_shift == 64) {
LIBCFS_ALLOC(hdev->ibh_mrs, 1 * sizeof(*hdev->ibh_mrs));
if (hdev->ibh_mrs == NULL) {
CERROR("Failed to allocate MRs table\n");
@@ -2347,53 +2172,6 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
hdev->ibh_mrs[0] = mr;
- goto out;
- }
-
- mr_size = 1ULL << hdev->ibh_mr_shift;
- mm_size = (unsigned long)high_memory - PAGE_OFFSET;
-
- hdev->ibh_nmrs = (int)((mm_size + mr_size - 1) >> hdev->ibh_mr_shift);
-
- if (hdev->ibh_mr_shift < 32 || hdev->ibh_nmrs > 1024) {
- /* it's 4T..., assume we will re-code at that time */
- CERROR("Can't support memory size: x%#llx with MR size: x%#llx\n",
- mm_size, mr_size);
- return -EINVAL;
- }
-
- /* create an array of MRs to cover all memory */
- LIBCFS_ALLOC(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs);
- if (hdev->ibh_mrs == NULL) {
- CERROR("Failed to allocate MRs' table\n");
- return -ENOMEM;
- }
-
- for (i = 0; i < hdev->ibh_nmrs; i++) {
- struct ib_phys_buf ipb;
- __u64 iova;
-
- ipb.size = hdev->ibh_mr_size;
- ipb.addr = i * mr_size;
- iova = ipb.addr;
-
- mr = ib_reg_phys_mr(hdev->ibh_pd, &ipb, 1, acflags, &iova);
- if (IS_ERR(mr)) {
- CERROR("Failed ib_reg_phys_mr addr %#llx size %#llx : %ld\n",
- ipb.addr, ipb.size, PTR_ERR(mr));
- kiblnd_hdev_cleanup_mrs(hdev);
- return PTR_ERR(mr);
- }
-
- LASSERT(iova == ipb.addr);
-
- hdev->ibh_mrs[i] = mr;
- }
-
-out:
- if (hdev->ibh_mr_size != ~0ULL || hdev->ibh_nmrs != 1)
- LCONSOLE_INFO("Register global MR array, MR size: %#llx, array size: %d\n",
- hdev->ibh_mr_size, hdev->ibh_nmrs);
return 0;
}
@@ -2564,14 +2342,9 @@ int kiblnd_dev_failover(kib_dev_t *dev)
kiblnd_fail_poolset(&net->ibn_tx_ps[i]->tps_poolset,
&zombie_tpo);
- if (net->ibn_fmr_ps != NULL) {
+ if (net->ibn_fmr_ps)
kiblnd_fail_fmr_poolset(net->ibn_fmr_ps[i],
&zombie_fpo);
-
- } else if (net->ibn_pmr_ps != NULL) {
- kiblnd_fail_poolset(&net->ibn_pmr_ps[i]->
- pps_poolset, &zombie_ppo);
- }
}
}
@@ -2667,9 +2440,6 @@ static void kiblnd_base_shutdown(void)
LASSERT(list_empty(&kiblnd_data.kib_devs));
- CDEBUG(D_MALLOC, "before LND base cleanup: kmem %d\n",
- atomic_read(&libcfs_kmemory));
-
switch (kiblnd_data.kib_init) {
default:
LBUG();
@@ -2720,9 +2490,6 @@ static void kiblnd_base_shutdown(void)
if (kiblnd_data.kib_scheds != NULL)
cfs_percpt_free(kiblnd_data.kib_scheds);
- CDEBUG(D_MALLOC, "after LND base cleanup: kmem %d\n",
- atomic_read(&libcfs_kmemory));
-
kiblnd_data.kib_init = IBLND_INIT_NOTHING;
module_put(THIS_MODULE);
}
@@ -2739,9 +2506,6 @@ void kiblnd_shutdown(lnet_ni_t *ni)
if (net == NULL)
goto out;
- CDEBUG(D_MALLOC, "before LND net cleanup: kmem %d\n",
- atomic_read(&libcfs_kmemory));
-
write_lock_irqsave(g_lock, flags);
net->ibn_shutdown = 1;
write_unlock_irqrestore(g_lock, flags);
@@ -2786,9 +2550,6 @@ void kiblnd_shutdown(lnet_ni_t *ni)
break;
}
- CDEBUG(D_MALLOC, "after LND net cleanup: kmem %d\n",
- atomic_read(&libcfs_kmemory));
-
net->ibn_init = IBLND_INIT_NOTHING;
ni->ni_data = NULL;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index f5d1d9f8f1ed..f4b6c33ac318 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -48,7 +48,7 @@
#include <linux/uio.h>
#include <linux/uaccess.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/fs.h>
#include <linux/file.h>
@@ -104,7 +104,6 @@ typedef struct {
int *kib_map_on_demand; /* map-on-demand if RD has more
* fragments than this value, 0
* disable map-on-demand */
- int *kib_pmr_pool_size; /* # physical MR in pool */
int *kib_fmr_pool_size; /* # FMRs in pool */
int *kib_fmr_flush_trigger; /* When to trigger FMR flush */
int *kib_fmr_cache; /* enable FMR pool cache? */
@@ -120,7 +119,7 @@ extern kib_tunables_t kiblnd_tunables;
#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */
-#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t*) 0)->ibm_credits)) - 1) /* Max # of peer credits */
+#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t *) 0)->ibm_credits)) - 1) /* Max # of peer credits */
#define IBLND_MSG_QUEUE_SIZE(v) ((v) == IBLND_MSG_VERSION_1 ? \
IBLND_MSG_QUEUE_SIZE_V1 : \
@@ -163,7 +162,6 @@ kiblnd_concurrent_sends_v1(void)
/* Pools (shared by connections on each CPT) */
/* These pools can grow at runtime, so don't need give a very large value */
#define IBLND_TX_POOL 256
-#define IBLND_PMR_POOL 256
#define IBLND_FMR_POOL 256
#define IBLND_FMR_POOL_FLUSH 192
@@ -232,17 +230,6 @@ typedef struct {
struct page *ibp_pages[0]; /* page array */
} kib_pages_t;
-struct kib_pmr_pool;
-
-typedef struct {
- struct list_head pmr_list; /* chain node */
- struct ib_phys_buf *pmr_ipb; /* physical buffer */
- struct ib_mr *pmr_mr; /* IB MR */
- struct kib_pmr_pool *pmr_pool; /* owner of this MR */
- __u64 pmr_iova; /* Virtual I/O address */
- int pmr_refcount; /* reference count */
-} kib_phys_mr_t;
-
struct kib_pool;
struct kib_poolset;
@@ -299,15 +286,6 @@ typedef struct {
} kib_tx_pool_t;
typedef struct {
- kib_poolset_t pps_poolset; /* pool-set */
-} kib_pmr_poolset_t;
-
-typedef struct kib_pmr_pool {
- struct kib_hca_dev *ppo_hdev; /* device for this pool */
- kib_pool_t ppo_pool; /* pool */
-} kib_pmr_pool_t;
-
-typedef struct {
spinlock_t fps_lock; /* serialize */
struct kib_net *fps_net; /* IB network */
struct list_head fps_pool_list; /* FMR pool list */
@@ -347,7 +325,6 @@ typedef struct kib_net {
kib_tx_poolset_t **ibn_tx_ps; /* tx pool-set */
kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */
- kib_pmr_poolset_t **ibn_pmr_ps; /* pmr pool-set */
kib_dev_t *ibn_dev; /* underlying IB device */
} kib_net_t;
@@ -519,7 +496,7 @@ typedef struct kib_rx /* receive message */
enum ib_wc_status rx_status; /* completion status */
kib_msg_t *rx_msg; /* message buffer (host vaddr) */
__u64 rx_msgaddr; /* message buffer (I/O addr) */
- DECLARE_PCI_UNMAP_ADDR (rx_msgunmap); /* for dma_unmap_single() */
+ DECLARE_PCI_UNMAP_ADDR(rx_msgunmap); /* for dma_unmap_single() */
struct ib_recv_wr rx_wrq; /* receive work item... */
struct ib_sge rx_sge; /* ...and its memory */
} kib_rx_t;
@@ -546,7 +523,7 @@ typedef struct kib_tx /* transmit message */
* completion */
kib_msg_t *tx_msg; /* message buffer (host vaddr) */
__u64 tx_msgaddr; /* message buffer (I/O addr) */
- DECLARE_PCI_UNMAP_ADDR (tx_msgunmap); /* for dma_unmap_single() */
+ DECLARE_PCI_UNMAP_ADDR(tx_msgunmap); /* for dma_unmap_single() */
int tx_nwrq; /* # send work items */
struct ib_send_wr *tx_wrq; /* send work items... */
struct ib_sge *tx_sge; /* ...and their memory */
@@ -554,10 +531,7 @@ typedef struct kib_tx /* transmit message */
int tx_nfrags; /* # entries in... */
struct scatterlist *tx_frags; /* dma_map_sg descriptor */
__u64 *tx_pages; /* rdma phys page addrs */
- union {
- kib_phys_mr_t *pmr; /* MR for physical buffer */
- kib_fmr_t fmr; /* FMR */
- } tx_u;
+ kib_fmr_t fmr; /* FMR */
int tx_dmadir; /* dma direction */
} kib_tx_t;
@@ -642,19 +616,19 @@ typedef struct kib_peer {
extern kib_data_t kiblnd_data;
-extern void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
+void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
static inline void
kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
{
- LASSERT (atomic_read(&hdev->ibh_ref) > 0);
+ LASSERT(atomic_read(&hdev->ibh_ref) > 0);
atomic_inc(&hdev->ibh_ref);
}
static inline void
kiblnd_hdev_decref(kib_hca_dev_t *hdev)
{
- LASSERT (atomic_read(&hdev->ibh_ref) > 0);
+ LASSERT(atomic_read(&hdev->ibh_ref) > 0);
if (atomic_dec_and_test(&hdev->ibh_ref))
kiblnd_hdev_destroy(hdev);
}
@@ -701,7 +675,7 @@ do { \
do { \
CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \
(peer), libcfs_nid2str((peer)->ibp_nid), \
- atomic_read (&(peer)->ibp_refcount)); \
+ atomic_read(&(peer)->ibp_refcount)); \
atomic_inc(&(peer)->ibp_refcount); \
} while (0)
@@ -709,32 +683,32 @@ do { \
do { \
CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \
(peer), libcfs_nid2str((peer)->ibp_nid), \
- atomic_read (&(peer)->ibp_refcount)); \
+ atomic_read(&(peer)->ibp_refcount)); \
LASSERT_ATOMIC_POS(&(peer)->ibp_refcount); \
if (atomic_dec_and_test(&(peer)->ibp_refcount)) \
kiblnd_destroy_peer(peer); \
} while (0)
static inline struct list_head *
-kiblnd_nid2peerlist (lnet_nid_t nid)
+kiblnd_nid2peerlist(lnet_nid_t nid)
{
unsigned int hash =
((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
- return (&kiblnd_data.kib_peers [hash]);
+ return &kiblnd_data.kib_peers[hash];
}
static inline int
-kiblnd_peer_active (kib_peer_t *peer)
+kiblnd_peer_active(kib_peer_t *peer)
{
/* Am I in the peer hash table? */
- return (!list_empty(&peer->ibp_list));
+ return !list_empty(&peer->ibp_list);
}
static inline kib_conn_t *
-kiblnd_get_conn_locked (kib_peer_t *peer)
+kiblnd_get_conn_locked(kib_peer_t *peer)
{
- LASSERT (!list_empty(&peer->ibp_conns));
+ LASSERT(!list_empty(&peer->ibp_conns));
/* just return the first connection */
return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
@@ -751,7 +725,7 @@ kiblnd_send_keepalive(kib_conn_t *conn)
static inline int
kiblnd_need_noop(kib_conn_t *conn)
{
- LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+ LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
if (conn->ibc_outstanding_credits <
IBLND_CREDITS_HIGHWATER(conn->ibc_version) &&
@@ -788,7 +762,7 @@ kiblnd_abort_receives(kib_conn_t *conn)
}
static inline const char *
-kiblnd_queue2str (kib_conn_t *conn, struct list_head *q)
+kiblnd_queue2str(kib_conn_t *conn, struct list_head *q)
{
if (q == &conn->ibc_tx_queue)
return "tx_queue";
@@ -815,43 +789,43 @@ kiblnd_queue2str (kib_conn_t *conn, struct list_head *q)
#define IBLND_WID_MASK 3UL
static inline __u64
-kiblnd_ptr2wreqid (void *ptr, int type)
+kiblnd_ptr2wreqid(void *ptr, int type)
{
unsigned long lptr = (unsigned long)ptr;
- LASSERT ((lptr & IBLND_WID_MASK) == 0);
- LASSERT ((type & ~IBLND_WID_MASK) == 0);
+ LASSERT((lptr & IBLND_WID_MASK) == 0);
+ LASSERT((type & ~IBLND_WID_MASK) == 0);
return (__u64)(lptr | type);
}
static inline void *
-kiblnd_wreqid2ptr (__u64 wreqid)
+kiblnd_wreqid2ptr(__u64 wreqid)
{
return (void *)(((unsigned long)wreqid) & ~IBLND_WID_MASK);
}
static inline int
-kiblnd_wreqid2type (__u64 wreqid)
+kiblnd_wreqid2type(__u64 wreqid)
{
- return (wreqid & IBLND_WID_MASK);
+ return wreqid & IBLND_WID_MASK;
}
static inline void
-kiblnd_set_conn_state (kib_conn_t *conn, int state)
+kiblnd_set_conn_state(kib_conn_t *conn, int state)
{
conn->ibc_state = state;
mb();
}
static inline void
-kiblnd_init_msg (kib_msg_t *msg, int type, int body_nob)
+kiblnd_init_msg(kib_msg_t *msg, int type, int body_nob)
{
msg->ibm_type = type;
msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob;
}
static inline int
-kiblnd_rd_size (kib_rdma_desc_t *rd)
+kiblnd_rd_size(kib_rdma_desc_t *rd)
{
int i;
int size;
@@ -887,7 +861,7 @@ kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
rd->rd_frags[index].rf_addr += nob;
rd->rd_frags[index].rf_nob -= nob;
} else {
- index ++;
+ index++;
}
return index;
@@ -896,8 +870,8 @@ kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
static inline int
kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n)
{
- LASSERT (msgtype == IBLND_MSG_GET_REQ ||
- msgtype == IBLND_MSG_PUT_ACK);
+ LASSERT(msgtype == IBLND_MSG_GET_REQ ||
+ msgtype == IBLND_MSG_PUT_ACK);
return msgtype == IBLND_MSG_GET_REQ ?
offsetof(kib_get_msg_t, ibgm_rd.rd_frags[n]) :
@@ -978,57 +952,53 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
int npages, __u64 iov, kib_fmr_t *fmr);
void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
-int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
- kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr);
-void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr);
-
-int kiblnd_startup (lnet_ni_t *ni);
-void kiblnd_shutdown (lnet_ni_t *ni);
-int kiblnd_ctl (lnet_ni_t *ni, unsigned int cmd, void *arg);
-void kiblnd_query (struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
+int kiblnd_startup(lnet_ni_t *ni);
+void kiblnd_shutdown(lnet_ni_t *ni);
+int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
+void kiblnd_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
int kiblnd_tunables_init(void);
void kiblnd_tunables_fini(void);
-int kiblnd_connd (void *arg);
+int kiblnd_connd(void *arg);
int kiblnd_scheduler(void *arg);
int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
-int kiblnd_failover_thread (void *arg);
+int kiblnd_failover_thread(void *arg);
int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
-void kiblnd_free_pages (kib_pages_t *p);
+void kiblnd_free_pages(kib_pages_t *p);
int kiblnd_cm_callback(struct rdma_cm_id *cmid,
struct rdma_cm_event *event);
int kiblnd_translate_mtu(int value);
int kiblnd_dev_failover(kib_dev_t *dev);
-int kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
-void kiblnd_destroy_peer (kib_peer_t *peer);
-void kiblnd_destroy_dev (kib_dev_t *dev);
-void kiblnd_unlink_peer_locked (kib_peer_t *peer);
-void kiblnd_peer_alive (kib_peer_t *peer);
-kib_peer_t *kiblnd_find_peer_locked (lnet_nid_t nid);
-void kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error);
-int kiblnd_close_stale_conns_locked (kib_peer_t *peer,
+int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
+void kiblnd_destroy_peer(kib_peer_t *peer);
+void kiblnd_destroy_dev(kib_dev_t *dev);
+void kiblnd_unlink_peer_locked(kib_peer_t *peer);
+void kiblnd_peer_alive(kib_peer_t *peer);
+kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid);
+void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error);
+int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
int version, __u64 incarnation);
-int kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why);
+int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why);
void kiblnd_connreq_done(kib_conn_t *conn, int status);
-kib_conn_t *kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid,
+kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
int state, int version);
-void kiblnd_destroy_conn (kib_conn_t *conn);
-void kiblnd_close_conn (kib_conn_t *conn, int error);
-void kiblnd_close_conn_locked (kib_conn_t *conn, int error);
+void kiblnd_destroy_conn(kib_conn_t *conn);
+void kiblnd_close_conn(kib_conn_t *conn, int error);
+void kiblnd_close_conn_locked(kib_conn_t *conn, int error);
-int kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
+int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);
-void kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
-void kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn);
-void kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn);
-void kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
-void kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist,
+void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
+void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn);
+void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn);
+void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
+void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist,
int status);
void kiblnd_check_sends (kib_conn_t *conn);
@@ -1036,10 +1006,10 @@ void kiblnd_qp_event(struct ib_event *event, void *arg);
void kiblnd_cq_event(struct ib_event *event, void *arg);
void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
-void kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version,
+void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
int credits, lnet_nid_t dstnid, __u64 dststamp);
int kiblnd_unpack_msg(kib_msg_t *msg, int nob);
-int kiblnd_post_rx (kib_rx_t *rx, int credit);
+int kiblnd_post_rx(kib_rx_t *rx, int credit);
int kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 477aa8b76f32..a23a6d956a4d 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -121,7 +121,6 @@ kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target)
LASSERT(tx->tx_conn == NULL);
LASSERT(tx->tx_lntmsg[0] == NULL);
LASSERT(tx->tx_lntmsg[1] == NULL);
- LASSERT(tx->tx_u.pmr == NULL);
LASSERT(tx->tx_nfrags == 0);
return tx;
@@ -575,7 +574,7 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
fps = net->ibn_fmr_ps[cpt];
- rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->tx_u.fmr);
+ rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->fmr);
if (rc != 0) {
CERROR("Can't map %d pages: %d\n", npages, rc);
return rc;
@@ -583,8 +582,8 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
/* If rd is not tx_rd, it's going to get sent to a peer, who will need
* the rkey */
- rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey :
- tx->tx_u.fmr.fmr_pfmr->fmr->lkey;
+ rd->rd_key = (rd != tx->tx_rd) ? tx->fmr.fmr_pfmr->fmr->rkey :
+ tx->fmr.fmr_pfmr->fmr->lkey;
rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
rd->rd_frags[0].rf_nob = nob;
rd->rd_nfrags = 1;
@@ -592,42 +591,6 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
return 0;
}
-static int
-kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
-{
- kib_hca_dev_t *hdev;
- kib_pmr_poolset_t *pps;
- __u64 iova;
- int cpt;
- int rc;
-
- LASSERT(tx->tx_pool != NULL);
- LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
-
- hdev = tx->tx_pool->tpo_hdev;
-
- iova = rd->rd_frags[0].rf_addr & ~hdev->ibh_page_mask;
-
- cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
-
- pps = net->ibn_pmr_ps[cpt];
- rc = kiblnd_pmr_pool_map(pps, hdev, rd, &iova, &tx->tx_u.pmr);
- if (rc != 0) {
- CERROR("Failed to create MR by phybuf: %d\n", rc);
- return rc;
- }
-
- /* If rd is not tx_rd, it's going to get sent to a peer, who will need
- * the rkey */
- rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.pmr->pmr_mr->rkey :
- tx->tx_u.pmr->pmr_mr->lkey;
- rd->rd_nfrags = 1;
- rd->rd_frags[0].rf_addr = iova;
- rd->rd_frags[0].rf_nob = nob;
-
- return 0;
-}
-
void
kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
{
@@ -635,13 +598,9 @@ kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
LASSERT(net != NULL);
- if (net->ibn_fmr_ps != NULL && tx->tx_u.fmr.fmr_pfmr != NULL) {
- kiblnd_fmr_pool_unmap(&tx->tx_u.fmr, tx->tx_status);
- tx->tx_u.fmr.fmr_pfmr = NULL;
-
- } else if (net->ibn_pmr_ps != NULL && tx->tx_u.pmr != NULL) {
- kiblnd_pmr_pool_unmap(tx->tx_u.pmr);
- tx->tx_u.pmr = NULL;
+ if (net->ibn_fmr_ps && tx->fmr.fmr_pfmr) {
+ kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status);
+ tx->fmr.fmr_pfmr = NULL;
}
if (tx->tx_nfrags != 0) {
@@ -687,8 +646,6 @@ kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
if (net->ibn_fmr_ps != NULL)
return kiblnd_fmr_map_tx(net, tx, rd, nob);
- else if (net->ibn_pmr_ps != NULL)
- return kiblnd_pmr_map_tx(net, tx, rd, nob);
return -EINVAL;
}
@@ -3133,8 +3090,7 @@ kiblnd_connd(void *arg)
dropped_lock = 0;
if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
- conn = list_entry(kiblnd_data. \
- kib_connd_zombies.next,
+ conn = list_entry(kiblnd_data.kib_connd_zombies.next,
kib_conn_t, ibc_list);
list_del(&conn->ibc_list);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index b0e00361cfce..b3d1b5d627cb 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -126,11 +126,6 @@ static int fmr_cache = 1;
module_param(fmr_cache, int, 0444);
MODULE_PARM_DESC(fmr_cache, "non-zero to enable FMR caching");
-/* NB: this value is shared by all CPTs, it can grow at runtime */
-static int pmr_pool_size = 512;
-module_param(pmr_pool_size, int, 0444);
-MODULE_PARM_DESC(pmr_pool_size, "size of MR cache pmr pool on each CPT");
-
/*
* 0: disable failover
* 1: enable failover if necessary
@@ -170,7 +165,6 @@ kib_tunables_t kiblnd_tunables = {
.kib_fmr_pool_size = &fmr_pool_size,
.kib_fmr_flush_trigger = &fmr_flush_trigger,
.kib_fmr_cache = &fmr_cache,
- .kib_pmr_pool_size = &pmr_pool_size,
.kib_require_priv_port = &require_privileged_port,
.kib_use_priv_port = &use_privileged_port,
.kib_nscheds = &nscheds
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 4128a92218a9..d8bfcadd184a 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -2252,8 +2252,6 @@ ksocknal_base_shutdown(void)
int i;
int j;
- CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
- atomic_read(&libcfs_kmemory));
LASSERT(ksocknal_data.ksnd_nnets == 0);
switch (ksocknal_data.ksnd_init) {
@@ -2331,9 +2329,6 @@ ksocknal_base_shutdown(void)
break;
}
- CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
- atomic_read(&libcfs_kmemory));
-
module_put(THIS_MODULE);
}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index 7125eb955ae5..a0fcbc39f86b 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -31,7 +31,6 @@
#define DEBUG_PORTAL_ALLOC
#define DEBUG_SUBSYSTEM S_LND
-#include <asm/irq.h>
#include <linux/crc32.h>
#include <linux/errno.h>
#include <linux/if.h>
@@ -47,6 +46,7 @@
#include <linux/sysctl.h>
#include <linux/uio.h>
#include <linux/unistd.h>
+#include <asm/irq.h>
#include <net/sock.h>
#include <net/tcp.h>
@@ -520,8 +520,8 @@ ksocknal_conn_addref(ksock_conn_t *conn)
atomic_inc(&conn->ksnc_conn_refcount);
}
-extern void ksocknal_queue_zombie_conn(ksock_conn_t *conn);
-extern void ksocknal_finalize_zcreq(ksock_conn_t *conn);
+void ksocknal_queue_zombie_conn(ksock_conn_t *conn);
+void ksocknal_finalize_zcreq(ksock_conn_t *conn);
static inline void
ksocknal_conn_decref(ksock_conn_t *conn)
@@ -566,8 +566,8 @@ ksocknal_tx_addref(ksock_tx_t *tx)
atomic_inc(&tx->tx_refcount);
}
-extern void ksocknal_tx_prep(ksock_conn_t *, ksock_tx_t *tx);
-extern void ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx);
+void ksocknal_tx_prep(ksock_conn_t *, ksock_tx_t *tx);
+void ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx);
static inline void
ksocknal_tx_decref(ksock_tx_t *tx)
@@ -584,7 +584,7 @@ ksocknal_route_addref(ksock_route_t *route)
atomic_inc(&route->ksnr_refcount);
}
-extern void ksocknal_destroy_route(ksock_route_t *route);
+void ksocknal_destroy_route(ksock_route_t *route);
static inline void
ksocknal_route_decref(ksock_route_t *route)
@@ -601,7 +601,7 @@ ksocknal_peer_addref(ksock_peer_t *peer)
atomic_inc(&peer->ksnp_refcount);
}
-extern void ksocknal_destroy_peer(ksock_peer_t *peer);
+void ksocknal_destroy_peer(ksock_peer_t *peer);
static inline void
ksocknal_peer_decref(ksock_peer_t *peer)
@@ -621,70 +621,69 @@ int ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
unsigned int offset, unsigned int mlen, unsigned int rlen);
int ksocknal_accept(lnet_ni_t *ni, struct socket *sock);
-extern int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip, int port);
-extern ksock_peer_t *ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id);
-extern ksock_peer_t *ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id);
-extern void ksocknal_peer_failed(ksock_peer_t *peer);
-extern int ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
- struct socket *sock, int type);
-extern void ksocknal_close_conn_locked(ksock_conn_t *conn, int why);
-extern void ksocknal_terminate_conn(ksock_conn_t *conn);
-extern void ksocknal_destroy_conn(ksock_conn_t *conn);
-extern int ksocknal_close_peer_conns_locked(ksock_peer_t *peer,
- __u32 ipaddr, int why);
-extern int ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why);
-extern int ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr);
-extern ksock_conn_t *ksocknal_find_conn_locked(ksock_peer_t *peer,
- ksock_tx_t *tx, int nonblk);
-
-extern int ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx,
- lnet_process_id_t id);
-extern ksock_tx_t *ksocknal_alloc_tx(int type, int size);
-extern void ksocknal_free_tx(ksock_tx_t *tx);
-extern ksock_tx_t *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
-extern void ksocknal_next_tx_carrier(ksock_conn_t *conn);
-extern void ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn);
-extern void ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist,
- int error);
-extern void ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive);
-extern void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
-extern int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
-extern void ksocknal_thread_fini(void);
-extern void ksocknal_launch_all_connections_locked(ksock_peer_t *peer);
-extern ksock_route_t *ksocknal_find_connectable_route_locked(ksock_peer_t *peer);
-extern ksock_route_t *ksocknal_find_connecting_route_locked(ksock_peer_t *peer);
-extern int ksocknal_new_packet(ksock_conn_t *conn, int skip);
-extern int ksocknal_scheduler(void *arg);
-extern int ksocknal_connd(void *arg);
-extern int ksocknal_reaper(void *arg);
-extern int ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn,
- lnet_nid_t peer_nid, ksock_hello_msg_t *hello);
-extern int ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
- ksock_hello_msg_t *hello, lnet_process_id_t *id,
- __u64 *incarnation);
-extern void ksocknal_read_callback(ksock_conn_t *conn);
-extern void ksocknal_write_callback(ksock_conn_t *conn);
-
-extern int ksocknal_lib_zc_capable(ksock_conn_t *conn);
-extern void ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn);
-extern void ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn);
-extern void ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn);
-extern void ksocknal_lib_push_conn(ksock_conn_t *conn);
-extern int ksocknal_lib_get_conn_addrs(ksock_conn_t *conn);
-extern int ksocknal_lib_setup_sock(struct socket *so);
-extern int ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx);
-extern int ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx);
-extern void ksocknal_lib_eager_ack(ksock_conn_t *conn);
-extern int ksocknal_lib_recv_iov(ksock_conn_t *conn);
-extern int ksocknal_lib_recv_kiov(ksock_conn_t *conn);
-extern int ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem,
- int *rxmem, int *nagle);
-
-extern int ksocknal_tunables_init(void);
-
-extern void ksocknal_lib_csum_tx(ksock_tx_t *tx);
-
-extern int ksocknal_lib_memory_pressure(ksock_conn_t *conn);
-extern int ksocknal_lib_bind_thread_to_cpu(int id);
+int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip, int port);
+ksock_peer_t *ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id);
+ksock_peer_t *ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id);
+void ksocknal_peer_failed(ksock_peer_t *peer);
+int ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
+ struct socket *sock, int type);
+void ksocknal_close_conn_locked(ksock_conn_t *conn, int why);
+void ksocknal_terminate_conn(ksock_conn_t *conn);
+void ksocknal_destroy_conn(ksock_conn_t *conn);
+int ksocknal_close_peer_conns_locked(ksock_peer_t *peer,
+ __u32 ipaddr, int why);
+int ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why);
+int ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr);
+ksock_conn_t *ksocknal_find_conn_locked(ksock_peer_t *peer,
+ ksock_tx_t *tx, int nonblk);
+
+int ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx,
+ lnet_process_id_t id);
+ksock_tx_t *ksocknal_alloc_tx(int type, int size);
+void ksocknal_free_tx(ksock_tx_t *tx);
+ksock_tx_t *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
+void ksocknal_next_tx_carrier(ksock_conn_t *conn);
+void ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn);
+void ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error);
+void ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive);
+void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
+int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
+void ksocknal_thread_fini(void);
+void ksocknal_launch_all_connections_locked(ksock_peer_t *peer);
+ksock_route_t *ksocknal_find_connectable_route_locked(ksock_peer_t *peer);
+ksock_route_t *ksocknal_find_connecting_route_locked(ksock_peer_t *peer);
+int ksocknal_new_packet(ksock_conn_t *conn, int skip);
+int ksocknal_scheduler(void *arg);
+int ksocknal_connd(void *arg);
+int ksocknal_reaper(void *arg);
+int ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+ lnet_nid_t peer_nid, ksock_hello_msg_t *hello);
+int ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+ ksock_hello_msg_t *hello, lnet_process_id_t *id,
+ __u64 *incarnation);
+void ksocknal_read_callback(ksock_conn_t *conn);
+void ksocknal_write_callback(ksock_conn_t *conn);
+
+int ksocknal_lib_zc_capable(ksock_conn_t *conn);
+void ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn);
+void ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn);
+void ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn);
+void ksocknal_lib_push_conn(ksock_conn_t *conn);
+int ksocknal_lib_get_conn_addrs(ksock_conn_t *conn);
+int ksocknal_lib_setup_sock(struct socket *so);
+int ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx);
+int ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx);
+void ksocknal_lib_eager_ack(ksock_conn_t *conn);
+int ksocknal_lib_recv_iov(ksock_conn_t *conn);
+int ksocknal_lib_recv_kiov(ksock_conn_t *conn);
+int ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem,
+ int *rxmem, int *nagle);
+
+int ksocknal_tunables_init(void);
+
+void ksocknal_lib_csum_tx(ksock_tx_t *tx);
+
+int ksocknal_lib_memory_pressure(ksock_conn_t *conn);
+int ksocknal_lib_bind_thread_to_cpu(int id);
#endif /* _SOCKLND_SOCKLND_H_ */
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index fe2a83a540cd..0d5aac6a2bb3 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -526,8 +526,7 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
counter++; /* exponential backoff warnings */
if ((counter & (-counter)) == counter)
- CWARN("%u ENOMEM tx %p (%u allocated)\n",
- counter, conn, atomic_read(&libcfs_kmemory));
+ CWARN("%u ENOMEM tx %p\n", counter, conn);
/* Queue on ksnd_enomem_conns for retry after a timeout */
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index ee902dc43823..40f418b82960 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -32,17 +32,6 @@
static struct ctl_table_header *lnet_table_header;
-#define CTL_LNET (0x100)
-enum {
- PSDEV_LNET_STATS = 100,
- PSDEV_LNET_ROUTES,
- PSDEV_LNET_ROUTERS,
- PSDEV_LNET_PEERS,
- PSDEV_LNET_BUFFERS,
- PSDEV_LNET_NIS,
- PSDEV_LNET_PTL_ROTOR,
-};
-
#define LNET_LOFFT_BITS (sizeof(loff_t) * 8)
/*
* NB: max allowed LNET_CPT_BITS is 8 on 64-bit system and 2 on 32-bit system
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index c4cf0aed80e1..cdce2dd6be7c 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -187,51 +187,49 @@ lstcon_id2hash (lnet_process_id_t id, struct list_head *hash)
int lstcon_console_init(void);
int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data);
int lstcon_console_fini(void);
-extern int lstcon_session_match(lst_sid_t sid);
-extern int lstcon_session_new(char *name, int key, unsigned version,
- int timeout, int flags, lst_sid_t *sid_up);
-extern int lstcon_session_info(lst_sid_t *sid_up, int *key, unsigned *verp,
- lstcon_ndlist_ent_t *entp, char *name_up, int len);
-extern int lstcon_session_end(void);
-extern int lstcon_session_debug(int timeout, struct list_head *result_up);
-extern int lstcon_session_feats_check(unsigned feats);
-extern int lstcon_batch_debug(int timeout, char *name,
- int client, struct list_head *result_up);
-extern int lstcon_group_debug(int timeout, char *name,
- struct list_head *result_up);
-extern int lstcon_nodes_debug(int timeout, int nnd, lnet_process_id_t *nds_up,
- struct list_head *result_up);
-extern int lstcon_group_add(char *name);
-extern int lstcon_group_del(char *name);
-extern int lstcon_group_clean(char *name, int args);
-extern int lstcon_group_refresh(char *name, struct list_head *result_up);
-extern int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t *nds_up,
- unsigned *featp, struct list_head *result_up);
-extern int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t *nds_up,
- struct list_head *result_up);
-extern int lstcon_group_info(char *name, lstcon_ndlist_ent_t *gent_up,
- int *index_p, int *ndent_p, lstcon_node_ent_t *ndents_up);
-extern int lstcon_group_list(int idx, int len, char *name_up);
-extern int lstcon_batch_add(char *name);
-extern int lstcon_batch_run(char *name, int timeout,
+int lstcon_session_match(lst_sid_t sid);
+int lstcon_session_new(char *name, int key, unsigned version,
+ int timeout, int flags, lst_sid_t *sid_up);
+int lstcon_session_info(lst_sid_t *sid_up, int *key, unsigned *verp,
+ lstcon_ndlist_ent_t *entp, char *name_up, int len);
+int lstcon_session_end(void);
+int lstcon_session_debug(int timeout, struct list_head *result_up);
+int lstcon_session_feats_check(unsigned feats);
+int lstcon_batch_debug(int timeout, char *name,
+ int client, struct list_head *result_up);
+int lstcon_group_debug(int timeout, char *name,
+ struct list_head *result_up);
+int lstcon_nodes_debug(int timeout, int nnd, lnet_process_id_t *nds_up,
+ struct list_head *result_up);
+int lstcon_group_add(char *name);
+int lstcon_group_del(char *name);
+int lstcon_group_clean(char *name, int args);
+int lstcon_group_refresh(char *name, struct list_head *result_up);
+int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t *nds_up,
+ unsigned *featp, struct list_head *result_up);
+int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t *nds_up,
+ struct list_head *result_up);
+int lstcon_group_info(char *name, lstcon_ndlist_ent_t *gent_up,
+ int *index_p, int *ndent_p, lstcon_node_ent_t *ndents_up);
+int lstcon_group_list(int idx, int len, char *name_up);
+int lstcon_batch_add(char *name);
+int lstcon_batch_run(char *name, int timeout, struct list_head *result_up);
+int lstcon_batch_stop(char *name, int force, struct list_head *result_up);
+int lstcon_test_batch_query(char *name, int testidx,
+ int client, int timeout,
struct list_head *result_up);
-extern int lstcon_batch_stop(char *name, int force,
- struct list_head *result_up);
-extern int lstcon_test_batch_query(char *name, int testidx,
- int client, int timeout,
- struct list_head *result_up);
-extern int lstcon_batch_del(char *name);
-extern int lstcon_batch_list(int idx, int namelen, char *name_up);
-extern int lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up,
- int server, int testidx, int *index_p,
- int *ndent_p, lstcon_node_ent_t *dents_up);
-extern int lstcon_group_stat(char *grp_name, int timeout,
- struct list_head *result_up);
-extern int lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
- int timeout, struct list_head *result_up);
-extern int lstcon_test_add(char *batch_name, int type, int loop,
- int concur, int dist, int span,
- char *src_name, char *dst_name,
- void *param, int paramlen, int *retp,
- struct list_head *result_up);
+int lstcon_batch_del(char *name);
+int lstcon_batch_list(int idx, int namelen, char *name_up);
+int lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up,
+ int server, int testidx, int *index_p,
+ int *ndent_p, lstcon_node_ent_t *dents_up);
+int lstcon_group_stat(char *grp_name, int timeout,
+ struct list_head *result_up);
+int lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
+ int timeout, struct list_head *result_up);
+int lstcon_test_add(char *batch_name, int type, int loop,
+ int concur, int dist, int span,
+ char *src_name, char *dst_name,
+ void *param, int paramlen, int *retp,
+ struct list_head *result_up);
#endif
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index 7c5185a2a795..257de3537671 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -203,7 +203,8 @@ sfw_deactivate_session(void)
sfw_batch_t *tsb;
sfw_test_case_t *tsc;
- if (sn == NULL) return;
+ if (sn == NULL)
+ return;
LASSERT(!sn->sn_timer_active);
@@ -613,7 +614,8 @@ sfw_destroy_test_instance(sfw_test_instance_t *tsi)
srpc_client_rpc_t *rpc;
sfw_test_unit_t *tsu;
- if (!tsi->tsi_is_client) goto clean;
+ if (!tsi->tsi_is_client)
+ goto clean;
tsi->tsi_ops->tso_fini(tsi);
@@ -1700,7 +1702,8 @@ sfw_startup(void)
for (i = 0; ; i++) {
sv = &sfw_services[i];
- if (sv->sv_name == NULL) break;
+ if (sv->sv_name == NULL)
+ break;
sv->sv_bulk_ready = NULL;
sv->sv_handler = sfw_handle_server_rpc;
@@ -1717,7 +1720,8 @@ sfw_startup(void)
}
/* about to sfw_shutdown, no need to add buffer */
- if (error) continue;
+ if (error)
+ continue;
rc = srpc_service_add_buffers(sv, sv->sv_wi_total);
if (rc != 0) {
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index 1362783b7eab..a16d577c6cb1 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -498,11 +498,11 @@ int client_fid_init(struct obd_device *obd,
int rc;
cli->cl_seq = kzalloc(sizeof(*cli->cl_seq), GFP_NOFS);
- if (cli->cl_seq == NULL)
+ if (!cli->cl_seq)
return -ENOMEM;
prefix = kzalloc(MAX_OBD_NAME + 5, GFP_NOFS);
- if (prefix == NULL) {
+ if (!prefix) {
rc = -ENOMEM;
goto out_free_seq;
}
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index ec2fc4339a2e..1b1066b2461c 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -70,7 +70,7 @@ struct fld_cache *fld_cache_init(const char *name,
LASSERT(cache_threshold < cache_size);
cache = kzalloc(sizeof(*cache), GFP_NOFS);
- if (cache == NULL)
+ if (!cache)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&cache->fci_entries_head);
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index c3b47f2346df..1e450bf95383 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -222,7 +222,7 @@ int fld_client_add_target(struct lu_client_fld *fld,
fld->lcf_name, name, tar->ft_idx);
target = kzalloc(sizeof(*target), GFP_NOFS);
- if (target == NULL)
+ if (!target)
return -ENOMEM;
spin_lock(&fld->lcf_lock);
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
index 513c81f43d6e..6b14406b2920 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
@@ -64,98 +64,6 @@
#define LTIME_S(time) (time.tv_sec)
-/* inode_dio_wait(i) use as-is for write lock */
-# define inode_dio_write_done(i) do {} while (0) /* for write unlock */
-# define inode_dio_read(i) atomic_inc(&(i)->i_dio_count)
-/* inode_dio_done(i) use as-is for read unlock */
-
-
-#ifndef FS_HAS_FIEMAP
-#define FS_HAS_FIEMAP (0)
-#endif
-
-#define ll_vfs_rmdir(dir, entry, mnt) vfs_rmdir(dir, entry)
-#define ll_vfs_mkdir(inode, dir, mnt, mode) vfs_mkdir(inode, dir, mode)
-#define ll_vfs_link(old, mnt, dir, new, mnt1) vfs_link(old, dir, new)
-#define ll_vfs_unlink(inode, entry, mnt) vfs_unlink(inode, entry)
-#define ll_vfs_mknod(dir, entry, mnt, mode, dev) \
- vfs_mknod(dir, entry, mode, dev)
-#define ll_security_inode_unlink(dir, entry, mnt) \
- security_inode_unlink(dir, entry)
-#define ll_vfs_rename(old, old_dir, mnt, new, new_dir, mnt1) \
- vfs_rename(old, old_dir, new, new_dir, NULL, 0)
-
-#define cfs_bio_io_error(a, b) bio_io_error((a))
-#define cfs_bio_endio(a, b, c) bio_endio((a), (c))
-
-#define cfs_path_put(nd) path_put(&(nd)->path)
-
-
-#ifndef SLAB_DESTROY_BY_RCU
-#define SLAB_DESTROY_BY_RCU 0
-#endif
-
-
-
-static inline int
-ll_quota_on(struct super_block *sb, int off, int ver, char *name, int remount)
-{
- int rc;
-
- if (sb->s_qcop->quota_on) {
- struct path path;
-
- rc = kern_path(name, LOOKUP_FOLLOW, &path);
- if (!rc)
- return rc;
- rc = sb->s_qcop->quota_on(sb, off, ver
- , &path
- );
- path_put(&path);
- return rc;
- } else
- return -ENOSYS;
-}
-
-static inline int ll_quota_off(struct super_block *sb, int off, int remount)
-{
- if (sb->s_qcop->quota_off) {
- return sb->s_qcop->quota_off(sb, off
- );
- } else
- return -ENOSYS;
-}
-
-
-# define ll_vfs_dq_init dquot_initialize
-# define ll_vfs_dq_drop dquot_drop
-# define ll_vfs_dq_transfer dquot_transfer
-# define ll_vfs_dq_off(sb, remount) dquot_suspend(sb, -1)
-
-
-
-
-
-#define queue_max_phys_segments(rq) queue_max_segments(rq)
-#define queue_max_hw_segments(rq) queue_max_segments(rq)
-
-
-#define ll_d_hlist_node hlist_node
-#define ll_d_hlist_empty(list) hlist_empty(list)
-#define ll_d_hlist_entry(ptr, type, name) hlist_entry(ptr.first, type, name)
-#define ll_d_hlist_for_each(tmp, i_dentry) hlist_for_each(tmp, i_dentry)
-#define ll_d_hlist_for_each_entry(dentry, p, i_dentry, alias) \
- p = NULL; hlist_for_each_entry(dentry, i_dentry, alias)
-
-
-#define bio_hw_segments(q, bio) 0
-
-
-#define ll_pagevec_init(pv, cold) do {} while (0)
-#define ll_pagevec_add(pv, pg) (0)
-#define ll_pagevec_lru_add_file(pv) do {} while (0)
-
-
#ifndef QUOTA_OK
# define QUOTA_OK 0
#endif
@@ -163,17 +71,6 @@ static inline int ll_quota_off(struct super_block *sb, int off, int remount)
# define NO_QUOTA (-EDQUOT)
#endif
-#ifndef SEEK_DATA
-#define SEEK_DATA 3 /* seek to the next data */
-#endif
-#ifndef SEEK_HOLE
-#define SEEK_HOLE 4 /* seek to the next hole */
-#endif
-
-#ifndef FMODE_UNSIGNED_OFFSET
-#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000)
-#endif
-
#if !defined(_ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_) && !defined(ext2_set_bit)
# define ext2_set_bit __test_and_set_bit_le
# define ext2_clear_bit __test_and_clear_bit_le
@@ -182,20 +79,4 @@ static inline int ll_quota_off(struct super_block *sb, int off, int remount)
# define ext2_find_next_zero_bit find_next_zero_bit_le
#endif
-#ifdef ATTR_TIMES_SET
-# define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
-#else
-# define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET)
-#endif
-
-
-#include <linux/version.h>
-#include <linux/fs.h>
-
-# define ll_umode_t umode_t
-
-#include <linux/dcache.h>
-
-# define ll_dirty_inode(inode, flag) (inode)->i_sb->s_op->dirty_inode((inode), flag)
-
#endif /* _COMPAT25_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
index 14562788e4e0..ebe8d68ed813 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
@@ -60,18 +60,6 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
ll_delete_from_page_cache(page);
}
-#ifdef ATTR_OPEN
-# define ATTR_FROM_OPEN ATTR_OPEN
-#else
-# ifndef ATTR_FROM_OPEN
-# define ATTR_FROM_OPEN 0
-# endif
-#endif /* ATTR_OPEN */
-
-#ifndef ATTR_RAW
-#define ATTR_RAW 0
-#endif
-
#ifndef ATTR_CTIME_SET
/*
* set ATTR_CTIME_SET to a high value to avoid any risk of collision with other
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index 8ede2a00ca4f..fd3c4df319c2 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -375,12 +375,11 @@ static inline void s2dhms(struct dhms *ts, time_t secs)
#define JOBSTATS_PROCNAME_UID "procname_uid"
#define JOBSTATS_NODELOCAL "nodelocal"
-extern int lprocfs_write_frac_helper(const char __user *buffer,
- unsigned long count, int *val, int mult);
-extern int lprocfs_read_frac_helper(char *buffer, unsigned long count,
- long val, int mult);
-extern int lprocfs_stats_alloc_one(struct lprocfs_stats *stats,
- unsigned int cpuid);
+int lprocfs_write_frac_helper(const char __user *buffer,
+ unsigned long count, int *val, int mult);
+int lprocfs_read_frac_helper(char *buffer, unsigned long count,
+ long val, int mult);
+int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid);
/*
* \return value
* < 0 : on error (only possible for opc as LPROCFS_GET_SMP_ID)
@@ -497,20 +496,18 @@ lprocfs_stats_counter_get(struct lprocfs_stats *stats, unsigned int cpuid,
* count itself to reside within a single cache line.
*/
-extern void lprocfs_counter_add(struct lprocfs_stats *stats, int idx,
- long amount);
-extern void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx,
- long amount);
+void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount);
+void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx, long amount);
#define lprocfs_counter_incr(stats, idx) \
lprocfs_counter_add(stats, idx, 1)
#define lprocfs_counter_decr(stats, idx) \
lprocfs_counter_sub(stats, idx, 1)
-extern __s64 lprocfs_read_helper(struct lprocfs_counter *lc,
- struct lprocfs_counter_header *header,
- enum lprocfs_stats_flags flags,
- enum lprocfs_fields_flags field);
+__s64 lprocfs_read_helper(struct lprocfs_counter *lc,
+ struct lprocfs_counter_header *header,
+ enum lprocfs_stats_flags flags,
+ enum lprocfs_fields_flags field);
static inline __u64 lprocfs_stats_collector(struct lprocfs_stats *stats,
int idx,
enum lprocfs_fields_flags field)
@@ -537,107 +534,103 @@ static inline __u64 lprocfs_stats_collector(struct lprocfs_stats *stats,
extern struct lprocfs_stats *
lprocfs_alloc_stats(unsigned int num, enum lprocfs_stats_flags flags);
-extern void lprocfs_clear_stats(struct lprocfs_stats *stats);
-extern void lprocfs_free_stats(struct lprocfs_stats **stats);
-extern void lprocfs_init_ops_stats(int num_private_stats,
- struct lprocfs_stats *stats);
-extern void lprocfs_init_mps_stats(int num_private_stats,
- struct lprocfs_stats *stats);
-extern void lprocfs_init_ldlm_stats(struct lprocfs_stats *ldlm_stats);
-extern int lprocfs_alloc_obd_stats(struct obd_device *obddev,
- unsigned int num_private_stats);
-extern int lprocfs_alloc_md_stats(struct obd_device *obddev,
- unsigned int num_private_stats);
-extern void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
- unsigned conf, const char *name,
- const char *units);
-extern void lprocfs_free_obd_stats(struct obd_device *obddev);
-extern void lprocfs_free_md_stats(struct obd_device *obddev);
+void lprocfs_clear_stats(struct lprocfs_stats *stats);
+void lprocfs_free_stats(struct lprocfs_stats **stats);
+void lprocfs_init_ops_stats(int num_private_stats, struct lprocfs_stats *stats);
+void lprocfs_init_mps_stats(int num_private_stats, struct lprocfs_stats *stats);
+void lprocfs_init_ldlm_stats(struct lprocfs_stats *ldlm_stats);
+int lprocfs_alloc_obd_stats(struct obd_device *obddev,
+ unsigned int num_private_stats);
+int lprocfs_alloc_md_stats(struct obd_device *obddev,
+ unsigned int num_private_stats);
+void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
+ unsigned conf, const char *name, const char *units);
+void lprocfs_free_obd_stats(struct obd_device *obddev);
+void lprocfs_free_md_stats(struct obd_device *obddev);
struct obd_export;
-extern int lprocfs_exp_cleanup(struct obd_export *exp);
-extern struct dentry *ldebugfs_add_simple(struct dentry *root,
- char *name,
- void *data,
- struct file_operations *fops);
-extern struct dentry *
+int lprocfs_exp_cleanup(struct obd_export *exp);
+struct dentry *ldebugfs_add_simple(struct dentry *root,
+ char *name,
+ void *data,
+ struct file_operations *fops);
+struct dentry *
ldebugfs_add_symlink(const char *name, struct dentry *parent,
- const char *format, ...);
+ const char *format, ...);
-extern int ldebugfs_register_stats(struct dentry *parent,
- const char *name,
- struct lprocfs_stats *stats);
+int ldebugfs_register_stats(struct dentry *parent,
+ const char *name,
+ struct lprocfs_stats *stats);
/* lprocfs_status.c */
-extern int ldebugfs_add_vars(struct dentry *parent,
- struct lprocfs_vars *var,
- void *data);
-
-extern struct dentry *ldebugfs_register(const char *name,
- struct dentry *parent,
- struct lprocfs_vars *list,
- void *data);
-
-extern void ldebugfs_remove(struct dentry **entryp);
-
-extern int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list,
- struct attribute_group *attrs);
-extern int lprocfs_obd_cleanup(struct obd_device *obd);
-
-extern int ldebugfs_seq_create(struct dentry *parent,
- const char *name,
- umode_t mode,
- const struct file_operations *seq_fops,
- void *data);
-extern int ldebugfs_obd_seq_create(struct obd_device *dev,
- const char *name,
- umode_t mode,
- const struct file_operations *seq_fops,
- void *data);
+int ldebugfs_add_vars(struct dentry *parent,
+ struct lprocfs_vars *var,
+ void *data);
+
+struct dentry *ldebugfs_register(const char *name,
+ struct dentry *parent,
+ struct lprocfs_vars *list,
+ void *data);
+
+void ldebugfs_remove(struct dentry **entryp);
+
+int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list,
+ struct attribute_group *attrs);
+int lprocfs_obd_cleanup(struct obd_device *obd);
+
+int ldebugfs_seq_create(struct dentry *parent,
+ const char *name,
+ umode_t mode,
+ const struct file_operations *seq_fops,
+ void *data);
+int ldebugfs_obd_seq_create(struct obd_device *dev,
+ const char *name,
+ umode_t mode,
+ const struct file_operations *seq_fops,
+ void *data);
/* Generic callbacks */
-extern int lprocfs_rd_u64(struct seq_file *m, void *data);
-extern int lprocfs_rd_atomic(struct seq_file *m, void *data);
-extern int lprocfs_wr_atomic(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-extern int lprocfs_rd_uint(struct seq_file *m, void *data);
-extern int lprocfs_wr_uint(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-extern int lprocfs_rd_name(struct seq_file *m, void *data);
-extern int lprocfs_rd_server_uuid(struct seq_file *m, void *data);
-extern int lprocfs_rd_conn_uuid(struct seq_file *m, void *data);
-extern int lprocfs_rd_import(struct seq_file *m, void *data);
-extern int lprocfs_rd_state(struct seq_file *m, void *data);
-extern int lprocfs_rd_connect_flags(struct seq_file *m, void *data);
+int lprocfs_rd_u64(struct seq_file *m, void *data);
+int lprocfs_rd_atomic(struct seq_file *m, void *data);
+int lprocfs_wr_atomic(struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+int lprocfs_rd_uint(struct seq_file *m, void *data);
+int lprocfs_wr_uint(struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+int lprocfs_rd_name(struct seq_file *m, void *data);
+int lprocfs_rd_server_uuid(struct seq_file *m, void *data);
+int lprocfs_rd_conn_uuid(struct seq_file *m, void *data);
+int lprocfs_rd_import(struct seq_file *m, void *data);
+int lprocfs_rd_state(struct seq_file *m, void *data);
+int lprocfs_rd_connect_flags(struct seq_file *m, void *data);
struct adaptive_timeout;
-extern int lprocfs_at_hist_helper(struct seq_file *m,
- struct adaptive_timeout *at);
-extern int lprocfs_rd_timeouts(struct seq_file *m, void *data);
-extern int lprocfs_wr_timeouts(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-extern int lprocfs_wr_evict_client(struct file *file, const char __user *buffer,
+int lprocfs_at_hist_helper(struct seq_file *m, struct adaptive_timeout *at);
+int lprocfs_rd_timeouts(struct seq_file *m, void *data);
+int lprocfs_wr_timeouts(struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+int lprocfs_wr_evict_client(struct file *file, const char __user *buffer,
size_t count, loff_t *off);
-extern int lprocfs_wr_ping(struct file *file, const char __user *buffer,
- size_t count, loff_t *off);
-extern int lprocfs_wr_import(struct file *file, const char __user *buffer,
+int lprocfs_wr_ping(struct file *file, const char __user *buffer,
+ size_t count, loff_t *off);
+int lprocfs_wr_import(struct file *file, const char __user *buffer,
size_t count, loff_t *off);
-extern int lprocfs_rd_pinger_recov(struct seq_file *m, void *n);
-extern int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer,
- size_t count, loff_t *off);
+int lprocfs_rd_pinger_recov(struct seq_file *m, void *n);
+int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer,
+ size_t count, loff_t *off);
/* Statfs helpers */
-extern int lprocfs_write_helper(const char __user *buffer, unsigned long count,
- int *val);
-extern int lprocfs_seq_read_frac_helper(struct seq_file *m, long val, int mult);
-extern int lprocfs_write_u64_helper(const char __user *buffer,
- unsigned long count, __u64 *val);
-extern int lprocfs_write_frac_u64_helper(const char *buffer,
- unsigned long count,
- __u64 *val, int mult);
-extern char *lprocfs_find_named_value(const char *buffer, const char *name,
- size_t *count);
+int lprocfs_write_helper(const char __user *buffer, unsigned long count,
+ int *val);
+int lprocfs_seq_read_frac_helper(struct seq_file *m, long val, int mult);
+int lprocfs_write_u64_helper(const char __user *buffer,
+ unsigned long count, __u64 *val);
+int lprocfs_write_frac_u64_helper(const char *buffer,
+ unsigned long count,
+ __u64 *val, int mult);
+char *lprocfs_find_named_value(const char *buffer, const char *name,
+ size_t *count);
void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value);
void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value);
void lprocfs_oh_clear(struct obd_histogram *oh);
@@ -646,8 +639,8 @@ unsigned long lprocfs_oh_sum(struct obd_histogram *oh);
void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
struct lprocfs_counter *cnt);
-extern int lprocfs_single_release(struct inode *, struct file *);
-extern int lprocfs_seq_release(struct inode *, struct file *);
+int lprocfs_single_release(struct inode *, struct file *);
+int lprocfs_seq_release(struct inode *, struct file *);
/* You must use these macros when you want to refer to
* the import in a client obd_device for a lprocfs entry */
@@ -746,7 +739,7 @@ extern const struct sysfs_ops lustre_sysfs_ops;
/* lproc_ptlrpc.c */
struct ptlrpc_request;
-extern void target_print_req(void *seq_file, struct ptlrpc_request *req);
+void target_print_req(void *seq_file, struct ptlrpc_request *req);
/* lproc_status.c */
int lprocfs_obd_rd_max_pages_per_rpc(struct seq_file *m, void *data);
@@ -754,62 +747,62 @@ int lprocfs_obd_wr_max_pages_per_rpc(struct file *file, const char *buffer,
size_t count, loff_t *off);
/* all quota proc functions */
-extern int lprocfs_quota_rd_bunit(char *page, char **start,
- loff_t off, int count,
- int *eof, void *data);
-extern int lprocfs_quota_wr_bunit(struct file *file, const char *buffer,
- unsigned long count, void *data);
-extern int lprocfs_quota_rd_btune(char *page, char **start,
- loff_t off, int count,
- int *eof, void *data);
-extern int lprocfs_quota_wr_btune(struct file *file, const char *buffer,
- unsigned long count, void *data);
-extern int lprocfs_quota_rd_iunit(char *page, char **start,
- loff_t off, int count,
- int *eof, void *data);
-extern int lprocfs_quota_wr_iunit(struct file *file, const char *buffer,
- unsigned long count, void *data);
-extern int lprocfs_quota_rd_itune(char *page, char **start,
- loff_t off, int count,
- int *eof, void *data);
-extern int lprocfs_quota_wr_itune(struct file *file, const char *buffer,
- unsigned long count, void *data);
-extern int lprocfs_quota_rd_type(char *page, char **start, loff_t off, int count,
- int *eof, void *data);
-extern int lprocfs_quota_wr_type(struct file *file, const char *buffer,
- unsigned long count, void *data);
-extern int lprocfs_quota_rd_switch_seconds(char *page, char **start, loff_t off,
- int count, int *eof, void *data);
-extern int lprocfs_quota_wr_switch_seconds(struct file *file,
- const char *buffer,
- unsigned long count, void *data);
-extern int lprocfs_quota_rd_sync_blk(char *page, char **start, loff_t off,
+int lprocfs_quota_rd_bunit(char *page, char **start,
+ loff_t off, int count,
+ int *eof, void *data);
+int lprocfs_quota_wr_bunit(struct file *file, const char *buffer,
+ unsigned long count, void *data);
+int lprocfs_quota_rd_btune(char *page, char **start,
+ loff_t off, int count,
+ int *eof, void *data);
+int lprocfs_quota_wr_btune(struct file *file, const char *buffer,
+ unsigned long count, void *data);
+int lprocfs_quota_rd_iunit(char *page, char **start,
+ loff_t off, int count,
+ int *eof, void *data);
+int lprocfs_quota_wr_iunit(struct file *file, const char *buffer,
+ unsigned long count, void *data);
+int lprocfs_quota_rd_itune(char *page, char **start,
+ loff_t off, int count,
+ int *eof, void *data);
+int lprocfs_quota_wr_itune(struct file *file, const char *buffer,
+ unsigned long count, void *data);
+int lprocfs_quota_rd_type(char *page, char **start, loff_t off, int count,
+ int *eof, void *data);
+int lprocfs_quota_wr_type(struct file *file, const char *buffer,
+ unsigned long count, void *data);
+int lprocfs_quota_rd_switch_seconds(char *page, char **start, loff_t off,
+ int count, int *eof, void *data);
+int lprocfs_quota_wr_switch_seconds(struct file *file,
+ const char *buffer,
+ unsigned long count, void *data);
+int lprocfs_quota_rd_sync_blk(char *page, char **start, loff_t off,
+ int count, int *eof, void *data);
+int lprocfs_quota_wr_sync_blk(struct file *file, const char *buffer,
+ unsigned long count, void *data);
+int lprocfs_quota_rd_switch_qs(char *page, char **start, loff_t off,
+ int count, int *eof, void *data);
+int lprocfs_quota_wr_switch_qs(struct file *file,
+ const char *buffer, unsigned long count,
+ void *data);
+int lprocfs_quota_rd_boundary_factor(char *page, char **start, loff_t off,
int count, int *eof, void *data);
-extern int lprocfs_quota_wr_sync_blk(struct file *file, const char *buffer,
- unsigned long count, void *data);
-extern int lprocfs_quota_rd_switch_qs(char *page, char **start, loff_t off,
- int count, int *eof, void *data);
-extern int lprocfs_quota_wr_switch_qs(struct file *file,
- const char *buffer,
- unsigned long count, void *data);
-extern int lprocfs_quota_rd_boundary_factor(char *page, char **start, loff_t off,
- int count, int *eof, void *data);
-extern int lprocfs_quota_wr_boundary_factor(struct file *file,
- const char *buffer,
- unsigned long count, void *data);
-extern int lprocfs_quota_rd_least_bunit(char *page, char **start, loff_t off,
- int count, int *eof, void *data);
-extern int lprocfs_quota_wr_least_bunit(struct file *file,
- const char *buffer,
- unsigned long count, void *data);
-extern int lprocfs_quota_rd_least_iunit(char *page, char **start, loff_t off,
- int count, int *eof, void *data);
-extern int lprocfs_quota_wr_least_iunit(struct file *file,
- const char *buffer,
- unsigned long count, void *data);
-extern int lprocfs_quota_rd_qs_factor(char *page, char **start, loff_t off,
- int count, int *eof, void *data);
-extern int lprocfs_quota_wr_qs_factor(struct file *file,
- const char *buffer,
- unsigned long count, void *data);
+int lprocfs_quota_wr_boundary_factor(struct file *file,
+ const char *buffer, unsigned long count,
+ void *data);
+int lprocfs_quota_rd_least_bunit(char *page, char **start, loff_t off,
+ int count, int *eof, void *data);
+int lprocfs_quota_wr_least_bunit(struct file *file,
+ const char *buffer, unsigned long count,
+ void *data);
+int lprocfs_quota_rd_least_iunit(char *page, char **start, loff_t off,
+ int count, int *eof, void *data);
+int lprocfs_quota_wr_least_iunit(struct file *file,
+ const char *buffer, unsigned long count,
+ void *data);
+int lprocfs_quota_rd_qs_factor(char *page, char **start, loff_t off,
+ int count, int *eof, void *data);
+int lprocfs_quota_wr_qs_factor(struct file *file,
+ const char *buffer, unsigned long count,
+ void *data);
#endif /* LPROCFS_SNMP_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 4d72d6ed26b0..ac78dbc38b9f 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -747,7 +747,7 @@ static inline ino_t lu_igif_ino(const struct lu_fid *fid)
return fid_seq(fid);
}
-extern void lustre_swab_ost_id(struct ost_id *oid);
+void lustre_swab_ost_id(struct ost_id *oid);
/**
* Get inode generation from a igif.
@@ -814,8 +814,8 @@ static inline int fid_is_zero(const struct lu_fid *fid)
return fid_seq(fid) == 0 && fid_oid(fid) == 0;
}
-extern void lustre_swab_lu_fid(struct lu_fid *fid);
-extern void lustre_swab_lu_seq_range(struct lu_seq_range *range);
+void lustre_swab_lu_fid(struct lu_fid *fid);
+void lustre_swab_lu_seq_range(struct lu_seq_range *range);
static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
{
@@ -1131,7 +1131,7 @@ struct ptlrpc_body_v2 {
__u64 pb_padding[4];
};
-extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
+void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
/* message body offset for lustre_msg_v2 */
/* ptlrpc body offset in all request/reply messages */
@@ -1395,7 +1395,7 @@ struct obd_connect_data {
* reserve the flag for future use. */
-extern void lustre_swab_connect(struct obd_connect_data *ocd);
+void lustre_swab_connect(struct obd_connect_data *ocd);
/*
* Supported checksum algorithms. Up to 32 checksum types are supported.
@@ -1737,10 +1737,10 @@ struct hsm_state_set {
__u64 hss_clearmask;
};
-extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-extern void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
+void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
-extern void lustre_swab_obd_statfs (struct obd_statfs *os);
+void lustre_swab_obd_statfs(struct obd_statfs *os);
/* ost_body.data values for OST_BRW */
@@ -1780,7 +1780,7 @@ struct obd_ioobj {
#define ioobj_max_brw_set(ioo, num) \
do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
-extern void lustre_swab_obd_ioobj (struct obd_ioobj *ioo);
+void lustre_swab_obd_ioobj(struct obd_ioobj *ioo);
/* multiple of 8 bytes => can array */
struct niobuf_remote {
@@ -1789,7 +1789,7 @@ struct niobuf_remote {
__u32 flags;
};
-extern void lustre_swab_niobuf_remote (struct niobuf_remote *nbr);
+void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
/* lock value block communicated between the filter and llite */
@@ -1811,7 +1811,7 @@ struct ost_lvb_v1 {
__u64 lvb_blocks;
};
-extern void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
+void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
struct ost_lvb {
__u64 lvb_size;
@@ -1825,7 +1825,7 @@ struct ost_lvb {
__u32 lvb_padding;
};
-extern void lustre_swab_ost_lvb(struct ost_lvb *lvb);
+void lustre_swab_ost_lvb(struct ost_lvb *lvb);
/*
* lquota data structures
@@ -1864,7 +1864,7 @@ struct obd_quotactl {
struct obd_dqblk qc_dqblk;
};
-extern void lustre_swab_obd_quotactl(struct obd_quotactl *q);
+void lustre_swab_obd_quotactl(struct obd_quotactl *q);
#define Q_QUOTACHECK 0x800100 /* deprecated as of 2.4 */
#define Q_INITQUOTA 0x800101 /* deprecated as of 2.4 */
@@ -1913,7 +1913,7 @@ struct quota_body {
#define QUOTA_DQACQ_FL_REL 0x4 /* release quota */
#define QUOTA_DQACQ_FL_REPORT 0x8 /* report usage */
-extern void lustre_swab_quota_body(struct quota_body *b);
+void lustre_swab_quota_body(struct quota_body *b);
/* Quota types currently supported */
enum {
@@ -1993,7 +1993,7 @@ struct lquota_lvb {
__u64 lvb_pad1;
};
-extern void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
+void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
/* LVB used with global quota lock */
#define lvb_glb_ver lvb_id_may_rel /* current version of the global index */
@@ -2072,7 +2072,7 @@ typedef enum {
REINT_MAX
} mds_reint_t, mdt_reint_t;
-extern void lustre_swab_generic_32s (__u32 *val);
+void lustre_swab_generic_32s(__u32 *val);
/* the disposition of the intent outlines what was executed */
#define DISP_IT_EXECD 0x00000001
@@ -2112,7 +2112,7 @@ extern void lustre_swab_generic_32s (__u32 *val);
/* This FULL lock is useful to take on unlink sort of operations */
#define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1)
-extern void lustre_swab_ll_fid (struct ll_fid *fid);
+void lustre_swab_ll_fid(struct ll_fid *fid);
/* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
* but was moved into name[1] along with the OID to avoid consuming the
@@ -2231,7 +2231,7 @@ struct mdt_body {
__u64 padding_10;
}; /* 216 */
-extern void lustre_swab_mdt_body (struct mdt_body *b);
+void lustre_swab_mdt_body(struct mdt_body *b);
struct mdt_ioepoch {
struct lustre_handle handle;
@@ -2240,7 +2240,7 @@ struct mdt_ioepoch {
__u32 padding;
};
-extern void lustre_swab_mdt_ioepoch (struct mdt_ioepoch *b);
+void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b);
/* permissions for md_perm.mp_perm */
enum {
@@ -2264,7 +2264,7 @@ struct mdt_remote_perm {
__u32 rp_padding;
};
-extern void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p);
+void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p);
struct mdt_rec_setattr {
__u32 sa_opcode;
@@ -2294,7 +2294,7 @@ struct mdt_rec_setattr {
__u32 sa_padding_5;
};
-extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa);
+void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
/*
* Attribute flags used in mdt_rec_setattr::sa_valid.
@@ -2584,7 +2584,7 @@ struct mdt_rec_reint {
__u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
};
-extern void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
+void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
struct lmv_desc {
__u32 ld_tgt_count; /* how many MDS's */
@@ -2600,7 +2600,7 @@ struct lmv_desc {
struct obd_uuid ld_uuid;
};
-extern void lustre_swab_lmv_desc (struct lmv_desc *ld);
+void lustre_swab_lmv_desc(struct lmv_desc *ld);
/* TODO: lmv_stripe_md should contain mds capabilities for all slave fids */
struct lmv_stripe_md {
@@ -2612,7 +2612,7 @@ struct lmv_stripe_md {
struct lu_fid mea_ids[0];
};
-extern void lustre_swab_lmv_stripe_md(struct lmv_stripe_md *mea);
+void lustre_swab_lmv_stripe_md(struct lmv_stripe_md *mea);
/* lmv structures */
#define MEA_MAGIC_LAST_CHAR 0xb2221ca1
@@ -2670,7 +2670,7 @@ struct lov_desc {
#define ld_magic ld_active_tgt_count /* for swabbing from llogs */
-extern void lustre_swab_lov_desc (struct lov_desc *ld);
+void lustre_swab_lov_desc(struct lov_desc *ld);
/*
* LDLM requests:
@@ -2697,7 +2697,7 @@ struct ldlm_res_id {
#define PLDLMRES(res) (res)->lr_name.name[0], (res)->lr_name.name[1], \
(res)->lr_name.name[2], (res)->lr_name.name[3]
-extern void lustre_swab_ldlm_res_id (struct ldlm_res_id *id);
+void lustre_swab_ldlm_res_id(struct ldlm_res_id *id);
static inline int ldlm_res_eq(const struct ldlm_res_id *res0,
const struct ldlm_res_id *res1)
@@ -2774,19 +2774,19 @@ typedef union {
struct ldlm_inodebits l_inodebits;
} ldlm_wire_policy_data_t;
-extern void lustre_swab_ldlm_policy_data (ldlm_wire_policy_data_t *d);
+void lustre_swab_ldlm_policy_data(ldlm_wire_policy_data_t *d);
union ldlm_gl_desc {
struct ldlm_gl_lquota_desc lquota_desc;
};
-extern void lustre_swab_gl_desc(union ldlm_gl_desc *);
+void lustre_swab_gl_desc(union ldlm_gl_desc *);
struct ldlm_intent {
__u64 opc;
};
-extern void lustre_swab_ldlm_intent (struct ldlm_intent *i);
+void lustre_swab_ldlm_intent(struct ldlm_intent *i);
struct ldlm_resource_desc {
ldlm_type_t lr_type;
@@ -2794,7 +2794,7 @@ struct ldlm_resource_desc {
struct ldlm_res_id lr_name;
};
-extern void lustre_swab_ldlm_resource_desc (struct ldlm_resource_desc *r);
+void lustre_swab_ldlm_resource_desc(struct ldlm_resource_desc *r);
struct ldlm_lock_desc {
struct ldlm_resource_desc l_resource;
@@ -2803,7 +2803,7 @@ struct ldlm_lock_desc {
ldlm_wire_policy_data_t l_policy_data;
};
-extern void lustre_swab_ldlm_lock_desc (struct ldlm_lock_desc *l);
+void lustre_swab_ldlm_lock_desc(struct ldlm_lock_desc *l);
#define LDLM_LOCKREQ_HANDLES 2
#define LDLM_ENQUEUE_CANCEL_OFF 1
@@ -2815,7 +2815,7 @@ struct ldlm_request {
struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
};
-extern void lustre_swab_ldlm_request (struct ldlm_request *rq);
+void lustre_swab_ldlm_request(struct ldlm_request *rq);
/* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
* Otherwise, 2 are available. */
@@ -2837,7 +2837,7 @@ struct ldlm_reply {
__u64 lock_policy_res2;
};
-extern void lustre_swab_ldlm_reply (struct ldlm_reply *r);
+void lustre_swab_ldlm_reply(struct ldlm_reply *r);
#define ldlm_flags_to_wire(flags) ((__u32)(flags))
#define ldlm_flags_from_wire(flags) ((__u64)(flags))
@@ -2881,7 +2881,8 @@ struct mgs_target_info {
__u64 mti_nids[MTI_NIDS_MAX]; /* host nids (lnet_nid_t)*/
char mti_params[MTI_PARAM_MAXLEN];
};
-extern void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
+
+void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
struct mgs_nidtbl_entry {
__u64 mne_version; /* table version of this entry */
@@ -2896,7 +2897,8 @@ struct mgs_nidtbl_entry {
lnet_nid_t nids[0]; /* variable size buffer for NIDs. */
} u;
};
-extern void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
+
+void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
struct mgs_config_body {
char mcb_name[MTI_NAME_MAXLEN]; /* logname */
@@ -2906,13 +2908,15 @@ struct mgs_config_body {
__u8 mcb_bits; /* bits unit size of config log */
__u32 mcb_units; /* # of units for bulk transfer */
};
-extern void lustre_swab_mgs_config_body(struct mgs_config_body *body);
+
+void lustre_swab_mgs_config_body(struct mgs_config_body *body);
struct mgs_config_res {
__u64 mcr_offset; /* index of last config log */
__u64 mcr_size; /* size of the log */
};
-extern void lustre_swab_mgs_config_res(struct mgs_config_res *body);
+
+void lustre_swab_mgs_config_res(struct mgs_config_res *body);
/* Config marker flags (in config log) */
#define CM_START 0x01
@@ -2933,8 +2937,7 @@ struct cfg_marker {
char cm_comment[MTI_NAME_MAXLEN];
};
-extern void lustre_swab_cfg_marker(struct cfg_marker *marker,
- int swab, int size);
+void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size);
/*
* Opcodes for multiple servers.
@@ -3334,7 +3337,7 @@ static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd,
}
}
-extern void lustre_swab_obdo (struct obdo *o);
+void lustre_swab_obdo(struct obdo *o);
/* request structure for OST's */
struct ost_body {
@@ -3348,25 +3351,25 @@ struct ll_fiemap_info_key {
struct ll_user_fiemap fiemap;
};
-extern void lustre_swab_ost_body (struct ost_body *b);
-extern void lustre_swab_ost_last_id(__u64 *id);
-extern void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);
+void lustre_swab_ost_body(struct ost_body *b);
+void lustre_swab_ost_last_id(__u64 *id);
+void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);
-extern void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
-extern void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
-extern void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
- int stripe_count);
-extern void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
+void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
+void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
+void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
+ int stripe_count);
+void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
/* llog_swab.c */
-extern void lustre_swab_llogd_body (struct llogd_body *d);
-extern void lustre_swab_llog_hdr (struct llog_log_hdr *h);
-extern void lustre_swab_llogd_conn_body (struct llogd_conn_body *d);
-extern void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
-extern void lustre_swab_llog_id(struct llog_logid *lid);
+void lustre_swab_llogd_body(struct llogd_body *d);
+void lustre_swab_llog_hdr(struct llog_log_hdr *h);
+void lustre_swab_llogd_conn_body(struct llogd_conn_body *d);
+void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
+void lustre_swab_llog_id(struct llog_logid *lid);
struct lustre_cfg;
-extern void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
+void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
/* Functions for dumping PTLRPC fields */
void dump_rniobuf(struct niobuf_remote *rnb);
@@ -3418,7 +3421,8 @@ struct idx_info {
__u64 ii_pad2;
__u64 ii_pad3;
};
-extern void lustre_swab_idx_info(struct idx_info *ii);
+
+void lustre_swab_idx_info(struct idx_info *ii);
#define II_END_OFF MDS_DIR_END_OFF /* all entries have been read */
@@ -3450,7 +3454,8 @@ struct lu_idxpage {
* For the time being, we only support fixed-size key & record. */
char lip_entries[0];
};
-extern void lustre_swab_lip_header(struct lu_idxpage *lip);
+
+void lustre_swab_lip_header(struct lu_idxpage *lip);
#define LIP_HDR_SIZE (offsetof(struct lu_idxpage, lip_entries))
@@ -3490,7 +3495,7 @@ struct lustre_capa {
__u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */
} __attribute__((packed));
-extern void lustre_swab_lustre_capa(struct lustre_capa *c);
+void lustre_swab_lustre_capa(struct lustre_capa *c);
/** lustre_capa::lc_opc */
enum {
@@ -3548,7 +3553,7 @@ struct lustre_capa_key {
__u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */
} __attribute__((packed));
-extern void lustre_swab_lustre_capa_key(struct lustre_capa_key *k);
+void lustre_swab_lustre_capa_key(struct lustre_capa_key *k);
/** The link ea holds 1 \a link_ea_entry for each hardlink */
#define LINK_EA_MAGIC 0x11EAF1DFUL
@@ -3620,12 +3625,12 @@ struct hsm_progress_kernel {
__u64 hpk_padding2;
} __attribute__((packed));
-extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-extern void lustre_swab_hsm_current_action(struct hsm_current_action *action);
-extern void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
-extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-extern void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
-extern void lustre_swab_hsm_request(struct hsm_request *hr);
+void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+void lustre_swab_hsm_current_action(struct hsm_current_action *action);
+void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
+void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
+void lustre_swab_hsm_request(struct hsm_request *hr);
/**
* These are object update opcode under UPDATE_OBJ, which is currently
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index e095ada40ed2..9b1bb23c4d3c 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -406,7 +406,7 @@ static inline int lmv_user_md_size(int stripes, int lmm_magic)
stripes * sizeof(struct lmv_user_mds_data);
}
-extern void lustre_swab_lmv_user_md(struct lmv_user_md *lum);
+void lustre_swab_lmv_user_md(struct lmv_user_md *lum);
struct ll_recreate_obj {
__u64 lrc_id;
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index f6f4c037fb30..355254689dee 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -1017,7 +1017,7 @@ extern struct obd_ops ldlm_obd_ops;
extern char *ldlm_lockname[];
extern char *ldlm_typename[];
-extern char *ldlm_it2str(int it);
+char *ldlm_it2str(int it);
/**
* Just a fancy CDEBUG call with log level preset to LDLM_DEBUG.
diff --git a/drivers/staging/lustre/lustre/include/lustre_export.h b/drivers/staging/lustre/lustre/include/lustre_export.h
index 3b992b42fd91..5189fad0b73c 100644
--- a/drivers/staging/lustre/lustre/include/lustre_export.h
+++ b/drivers/staging/lustre/lustre/include/lustre_export.h
@@ -368,8 +368,8 @@ static inline bool imp_connect_disp_stripe(struct obd_import *imp)
return ocd->ocd_connect_flags & OBD_CONNECT_DISP_STRIPE;
}
-extern struct obd_export *class_conn2export(struct lustre_handle *conn);
-extern struct obd_device *class_conn2obd(struct lustre_handle *conn);
+struct obd_export *class_conn2export(struct lustre_handle *conn);
+struct obd_device *class_conn2obd(struct lustre_handle *conn);
/** @} export */
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
index dcc807676c49..5a38f3d5e011 100644
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -375,8 +375,8 @@ extern unsigned int at_max;
/* genops.c */
struct obd_export;
-extern struct obd_import *class_exp2cliimp(struct obd_export *);
-extern struct obd_import *class_conn2cliimp(struct lustre_handle *);
+struct obd_import *class_exp2cliimp(struct obd_export *);
+struct obd_import *class_conn2cliimp(struct lustre_handle *);
/** @} import */
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index 77a7de98fc8e..48ad60b22122 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -2183,7 +2183,7 @@ struct ptlrpcd_ctl {
*/
struct ptlrpc_request_set *pc_set;
/**
- * Thread name used in cfs_daemonize()
+ * Thread name used in kthread_run()
*/
char pc_name[16];
/**
@@ -2277,18 +2277,18 @@ static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc,
/* ptlrpc/events.c */
extern lnet_handle_eq_t ptlrpc_eq_h;
-extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
- lnet_process_id_t *peer, lnet_nid_t *self);
+int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
+ lnet_process_id_t *peer, lnet_nid_t *self);
/**
* These callbacks are invoked by LNet when something happened to
* underlying buffer
* @{
*/
-extern void request_out_callback(lnet_event_t *ev);
-extern void reply_in_callback(lnet_event_t *ev);
-extern void client_bulk_callback(lnet_event_t *ev);
-extern void request_in_callback(lnet_event_t *ev);
-extern void reply_out_callback(lnet_event_t *ev);
+void request_out_callback(lnet_event_t *ev);
+void reply_in_callback(lnet_event_t *ev);
+void client_bulk_callback(lnet_event_t *ev);
+void request_in_callback(lnet_event_t *ev);
+void reply_out_callback(lnet_event_t *ev);
/** @} */
/* ptlrpc/connection.c */
@@ -2299,7 +2299,7 @@ int ptlrpc_connection_put(struct ptlrpc_connection *c);
struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
int ptlrpc_connection_init(void);
void ptlrpc_connection_fini(void);
-extern lnet_pid_t ptl_get_pid(void);
+lnet_pid_t ptl_get_pid(void);
/* ptlrpc/niobuf.c */
/**
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index 55452e562bd4..9ad8c268da10 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -1472,7 +1472,7 @@ static inline bool filename_is_volatile(const char *name, int namelen, int *idx)
}
/* we have an idx, read it */
start = name + LUSTRE_VOLATILE_HDR_LEN + 1;
- *idx = strtoul(start, &end, 0);
+ *idx = simple_strtoul(start, &end, 0);
/* error cases:
* no digit, no trailing :, negative value
*/
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 36ed78127830..87bb2cedca7d 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -64,10 +64,10 @@ extern struct obd_device *obd_devs[MAX_OBD_DEVICES];
extern rwlock_t obd_dev_lock;
/* OBD Operations Declarations */
-extern struct obd_device *class_conn2obd(struct lustre_handle *);
-extern struct obd_device *class_exp2obd(struct obd_export *);
-extern int class_handle_ioctl(unsigned int cmd, unsigned long arg);
-extern int lustre_get_jobid(char *jobid);
+struct obd_device *class_conn2obd(struct lustre_handle *);
+struct obd_device *class_exp2obd(struct obd_export *);
+int class_handle_ioctl(unsigned int cmd, unsigned long arg);
+int lustre_get_jobid(char *jobid);
struct lu_device_type;
@@ -139,7 +139,7 @@ int class_add_conn(struct obd_device *obd, struct lustre_cfg *lcfg);
int class_add_uuid(const char *uuid, __u64 nid);
/*obdecho*/
-extern void lprocfs_echo_init_vars(struct lprocfs_static_vars *lvars);
+void lprocfs_echo_init_vars(struct lprocfs_static_vars *lvars);
#define CFG_F_START 0x01 /* Set when we start updating from a log */
#define CFG_F_MARKER 0x02 /* We are within a maker */
@@ -1823,8 +1823,8 @@ static inline int md_revalidate_lock(struct obd_export *exp,
/* OBD Metadata Support */
-extern int obd_init_caches(void);
-extern void obd_cleanup_caches(void);
+int obd_init_caches(void);
+void obd_cleanup_caches(void);
/* support routines */
extern struct kmem_cache *obdo_cachep;
@@ -1869,8 +1869,7 @@ extern int (*ptlrpc_put_connection_superhack)(struct ptlrpc_connection *c);
/* obd_mount.c */
/* sysctl.c */
-extern void obd_sysctl_init (void);
-extern void obd_sysctl_clean (void);
+int obd_sysctl_init(void);
/* uuid.c */
typedef __u8 class_uuid_t[16];
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index 73e2d4880b9b..18aec796a724 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -56,9 +56,7 @@ extern unsigned int obd_dump_on_eviction;
/* obd_timeout should only be used for recovery, not for
networking / disk / timings affected by load (use Adaptive Timeouts) */
extern unsigned int obd_timeout; /* seconds */
-extern unsigned int ldlm_timeout; /* seconds */
extern unsigned int obd_timeout_set;
-extern unsigned int ldlm_timeout_set;
extern unsigned int at_min;
extern unsigned int at_max;
extern unsigned int at_history;
@@ -105,8 +103,6 @@ int obd_alloc_fail(const void *ptr, const char *name, const char *type,
/* Timeout definitions */
#define OBD_TIMEOUT_DEFAULT 100
-#define LDLM_TIMEOUT_DEFAULT 20
-#define MDS_LDLM_TIMEOUT_DEFAULT 6
/* Time to wait for all clients to reconnect during recovery (hard limit) */
#define OBD_RECOVERY_TIME_HARD (obd_timeout * 9)
/* Time to wait for all clients to reconnect during recovery (soft limit) */
@@ -505,9 +501,7 @@ int obd_alloc_fail(const void *ptr, const char *name, const char *type,
#define OBD_FAIL_ONCE CFS_FAIL_ONCE
#define OBD_FAILED CFS_FAILED
-extern atomic_t libcfs_kmemory;
-
-extern void obd_update_maxusage(void);
+void obd_update_maxusage(void);
#define obd_memory_add(size) \
lprocfs_counter_add(obd_memory, OBD_MEMORY_STAT, (long)(size))
@@ -526,8 +520,8 @@ extern void obd_update_maxusage(void);
lprocfs_stats_collector(obd_memory, OBD_MEMORY_PAGES_STAT, \
LPROCFS_FIELDS_FLAGS_SUM)
-extern __u64 obd_memory_max(void);
-extern __u64 obd_pages_max(void);
+__u64 obd_memory_max(void);
+__u64 obd_pages_max(void);
#define OBD_DEBUG_MEMUSAGE (1)
@@ -622,8 +616,8 @@ do { \
if (unlikely((ptr) == NULL)) { \
CERROR("vmalloc of '" #ptr "' (%d bytes) failed\n", \
(int)(size)); \
- CERROR("%llu total bytes allocated by Lustre, %d by LNET\n", \
- obd_memory_sum(), atomic_read(&libcfs_kmemory)); \
+ CERROR("%llu total bytes allocated by Lustre\n", \
+ obd_memory_sum()); \
} else { \
OBD_ALLOC_POST(ptr, size, "vmalloced"); \
} \
@@ -769,12 +763,10 @@ do { \
"failed\n", (int)1, \
(__u64)(1 << PAGE_CACHE_SHIFT)); \
CERROR("%llu total bytes and %llu total pages " \
- "(%llu bytes) allocated by Lustre, " \
- "%d total bytes by LNET\n", \
+ "(%llu bytes) allocated by Lustre\n", \
obd_memory_sum(), \
obd_pages_sum() << PAGE_CACHE_SHIFT, \
- obd_pages_sum(), \
- atomic_read(&libcfs_kmemory)); \
+ obd_pages_sum()); \
} else { \
obd_pages_add(0); \
CDEBUG(D_MALLOC, "alloc_pages '" #ptr "': %d page(s) / " \
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
index e0c1ccafbd63..9053f8116298 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
@@ -203,7 +203,7 @@ struct lu_device *ccc_device_alloc(const struct lu_env *env,
int rc;
vdv = kzalloc(sizeof(*vdv), GFP_NOFS);
- if (vdv == NULL)
+ if (!vdv)
return ERR_PTR(-ENOMEM);
lud = &vdv->cdv_cl.cd_lu_dev;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index 6601e6b12c32..fa4b7c760d49 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -209,7 +209,7 @@ struct ldlm_state {
/* interval tree, for LDLM_EXTENT. */
extern struct kmem_cache *ldlm_interval_slab; /* slab cache for ldlm_interval */
-extern void ldlm_interval_attach(struct ldlm_interval *n, struct ldlm_lock *l);
+void ldlm_interval_attach(struct ldlm_interval *n, struct ldlm_lock *l);
struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l);
struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock);
void ldlm_interval_free(struct ldlm_interval *node);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index 764f98684d74..badd227e4f67 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -656,7 +656,8 @@ int target_pack_pool_reply(struct ptlrpc_request *req)
}
EXPORT_SYMBOL(target_pack_pool_reply);
-int target_send_reply_msg(struct ptlrpc_request *req, int rc, int fail_id)
+static int
+target_send_reply_msg(struct ptlrpc_request *req, int rc, int fail_id)
{
if (OBD_FAIL_CHECK_ORSET(fail_id & ~OBD_FAIL_ONCE, OBD_FAIL_ONCE)) {
DEBUG_REQ(D_ERROR, req, "dropping reply");
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index bb2246d3b22b..cd340fc8ceab 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -1528,7 +1528,7 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
if (lvb_len) {
lock->l_lvb_len = lvb_len;
lock->l_lvb_data = kzalloc(lvb_len, GFP_NOFS);
- if (lock->l_lvb_data == NULL)
+ if (!lock->l_lvb_data)
goto out;
}
@@ -1813,7 +1813,7 @@ int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
return 0;
arg = kzalloc(sizeof(*arg), GFP_NOFS);
- if (arg == NULL)
+ if (!arg)
return -ENOMEM;
atomic_set(&arg->restart, 0);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index b7b6ca1196b7..ac79db952da7 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -76,15 +76,6 @@ inline unsigned long round_timeout(unsigned long timeout)
return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
}
-/* timeout for initial callback (AST) reply (bz10399) */
-static inline unsigned int ldlm_get_rq_timeout(void)
-{
- /* Non-AT value */
- unsigned int timeout = min(ldlm_timeout, obd_timeout / 3);
-
- return timeout < 1 ? 1 : timeout;
-}
-
#define ELT_STOPPED 0
#define ELT_READY 1
#define ELT_TERMINATE 2
@@ -225,7 +216,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
void *lvb_data;
lvb_data = kzalloc(lvb_len, GFP_NOFS);
- if (lvb_data == NULL) {
+ if (!lvb_data) {
LDLM_ERROR(lock, "No memory: %d.\n", lvb_len);
rc = -ENOMEM;
goto out;
@@ -453,7 +444,7 @@ static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
struct ldlm_bl_work_item *blwi;
blwi = kzalloc(sizeof(*blwi), GFP_NOFS);
- if (blwi == NULL)
+ if (!blwi)
return -ENOMEM;
init_blwi(blwi, ns, ld, cancels, count, lock, cancel_flags);
@@ -1053,7 +1044,7 @@ static int ldlm_setup(void)
return -EALREADY;
ldlm_state = kzalloc(sizeof(*ldlm_state), GFP_NOFS);
- if (ldlm_state == NULL)
+ if (!ldlm_state)
return -ENOMEM;
ldlm_kobj = kobject_create_and_add("ldlm", lustre_kobj);
@@ -1123,7 +1114,7 @@ static int ldlm_setup(void)
blp = kzalloc(sizeof(*blp), GFP_NOFS);
- if (blp == NULL) {
+ if (!blp) {
rc = -ENOMEM;
goto out;
}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 1605b9c69271..c234acb85f10 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -1422,7 +1422,7 @@ static int ldlm_pools_thread_start(void)
return -EALREADY;
ldlm_pools_thread = kzalloc(sizeof(*ldlm_pools_thread), GFP_NOFS);
- if (ldlm_pools_thread == NULL)
+ if (!ldlm_pools_thread)
return -ENOMEM;
init_completion(&ldlm_pools_comp);
@@ -1486,8 +1486,10 @@ EXPORT_SYMBOL(ldlm_pools_init);
void ldlm_pools_fini(void)
{
- unregister_shrinker(&ldlm_pools_srv_shrinker);
- unregister_shrinker(&ldlm_pools_cli_shrinker);
+ if (ldlm_pools_thread) {
+ unregister_shrinker(&ldlm_pools_srv_shrinker);
+ unregister_shrinker(&ldlm_pools_cli_shrinker);
+ }
ldlm_pools_thread_stop();
}
EXPORT_SYMBOL(ldlm_pools_fini);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index cdb63665a113..4bb3173bcd5f 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -939,6 +939,7 @@ void ldlm_namespace_free_post(struct ldlm_namespace *ns)
ldlm_pool_fini(&ns->ns_pool);
ldlm_namespace_debugfs_unregister(ns);
+ ldlm_namespace_sysfs_unregister(ns);
cfs_hash_putref(ns->ns_rs_hash);
/* Namespace \a ns should be not on list at this time, otherwise
* this will cause issues related to using freed \a ns in poold
diff --git a/drivers/staging/lustre/lustre/libcfs/debug.c b/drivers/staging/lustre/lustre/libcfs/debug.c
index 021c92fa0333..e93f556fac0d 100644
--- a/drivers/staging/lustre/lustre/libcfs/debug.c
+++ b/drivers/staging/lustre/lustre/libcfs/debug.c
@@ -57,8 +57,42 @@ module_param(libcfs_debug, int, 0644);
MODULE_PARM_DESC(libcfs_debug, "Lustre kernel debug mask");
EXPORT_SYMBOL(libcfs_debug);
+static int libcfs_param_debug_mb_set(const char *val,
+ const struct kernel_param *kp)
+{
+ int rc;
+ unsigned num;
+
+ rc = kstrtouint(val, 0, &num);
+ if (rc < 0)
+ return rc;
+
+ if (!*((unsigned int *)kp->arg)) {
+ *((unsigned int *)kp->arg) = num;
+ return 0;
+ }
+
+ rc = cfs_trace_set_debug_mb(num);
+
+ if (!rc)
+ *((unsigned int *)kp->arg) = cfs_trace_get_debug_mb();
+
+ return rc;
+}
+
+/* While debug_mb setting look like unsigned int, in fact
+ * it needs quite a bunch of extra processing, so we define special
+ * debugmb parameter type with corresponding methods to handle this case */
+static struct kernel_param_ops param_ops_debugmb = {
+ .set = libcfs_param_debug_mb_set,
+ .get = param_get_uint,
+};
+
+#define param_check_debugmb(name, p) \
+ __param_check(name, p, unsigned int)
+
static unsigned int libcfs_debug_mb;
-module_param(libcfs_debug_mb, uint, 0644);
+module_param(libcfs_debug_mb, debugmb, 0644);
MODULE_PARM_DESC(libcfs_debug_mb, "Total debug buffer size.");
EXPORT_SYMBOL(libcfs_debug_mb);
@@ -72,18 +106,106 @@ module_param(libcfs_console_ratelimit, uint, 0644);
MODULE_PARM_DESC(libcfs_console_ratelimit, "Lustre kernel debug console ratelimit (0 to disable)");
EXPORT_SYMBOL(libcfs_console_ratelimit);
+static int param_set_delay_minmax(const char *val,
+ const struct kernel_param *kp,
+ long min, long max)
+{
+ long d;
+ int sec;
+ int rc;
+
+ rc = kstrtoint(val, 0, &sec);
+ if (rc)
+ return -EINVAL;
+
+ d = cfs_time_seconds(sec) / 100;
+ if (d < min || d > max)
+ return -EINVAL;
+
+ *((unsigned int *)kp->arg) = d;
+
+ return 0;
+}
+
+static int param_get_delay(char *buffer, const struct kernel_param *kp)
+{
+ unsigned int d = *(unsigned int *)kp->arg;
+
+ return sprintf(buffer, "%u", (unsigned int)cfs_duration_sec(d * 100));
+}
+
unsigned int libcfs_console_max_delay;
-module_param(libcfs_console_max_delay, uint, 0644);
-MODULE_PARM_DESC(libcfs_console_max_delay, "Lustre kernel debug console max delay (jiffies)");
EXPORT_SYMBOL(libcfs_console_max_delay);
-
unsigned int libcfs_console_min_delay;
-module_param(libcfs_console_min_delay, uint, 0644);
-MODULE_PARM_DESC(libcfs_console_min_delay, "Lustre kernel debug console min delay (jiffies)");
EXPORT_SYMBOL(libcfs_console_min_delay);
+static int param_set_console_max_delay(const char *val,
+ const struct kernel_param *kp)
+{
+ return param_set_delay_minmax(val, kp,
+ libcfs_console_min_delay, INT_MAX);
+}
+
+static struct kernel_param_ops param_ops_console_max_delay = {
+ .set = param_set_console_max_delay,
+ .get = param_get_delay,
+};
+
+#define param_check_console_max_delay(name, p) \
+ __param_check(name, p, unsigned int)
+
+module_param(libcfs_console_max_delay, console_max_delay, 0644);
+MODULE_PARM_DESC(libcfs_console_max_delay, "Lustre kernel debug console max delay (jiffies)");
+
+static int param_set_console_min_delay(const char *val,
+ const struct kernel_param *kp)
+{
+ return param_set_delay_minmax(val, kp,
+ 1, libcfs_console_max_delay);
+}
+
+static struct kernel_param_ops param_ops_console_min_delay = {
+ .set = param_set_console_min_delay,
+ .get = param_get_delay,
+};
+
+#define param_check_console_min_delay(name, p) \
+ __param_check(name, p, unsigned int)
+
+module_param(libcfs_console_min_delay, console_min_delay, 0644);
+MODULE_PARM_DESC(libcfs_console_min_delay, "Lustre kernel debug console min delay (jiffies)");
+
+static int param_set_uint_minmax(const char *val,
+ const struct kernel_param *kp,
+ unsigned int min, unsigned int max)
+{
+ unsigned int num;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+ ret = kstrtouint(val, 0, &num);
+ if (ret < 0 || num < min || num > max)
+ return -EINVAL;
+ *((unsigned int *)kp->arg) = num;
+ return 0;
+}
+
+static int param_set_uintpos(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, 1, -1);
+}
+
+static struct kernel_param_ops param_ops_uintpos = {
+ .set = param_set_uintpos,
+ .get = param_get_uint,
+};
+
+#define param_check_uintpos(name, p) \
+ __param_check(name, p, unsigned int)
+
unsigned int libcfs_console_backoff = CDEBUG_DEFAULT_BACKOFF;
-module_param(libcfs_console_backoff, uint, 0644);
+module_param(libcfs_console_backoff, uintpos, 0644);
MODULE_PARM_DESC(libcfs_console_backoff, "Lustre kernel debug console backoff factor");
EXPORT_SYMBOL(libcfs_console_backoff);
@@ -93,23 +215,14 @@ EXPORT_SYMBOL(libcfs_debug_binary);
unsigned int libcfs_stack = 3 * THREAD_SIZE / 4;
EXPORT_SYMBOL(libcfs_stack);
-static unsigned int portal_enter_debugger;
-EXPORT_SYMBOL(portal_enter_debugger);
-
unsigned int libcfs_catastrophe;
EXPORT_SYMBOL(libcfs_catastrophe);
-unsigned int libcfs_watchdog_ratelimit = 300;
-EXPORT_SYMBOL(libcfs_watchdog_ratelimit);
-
unsigned int libcfs_panic_on_lbug = 1;
module_param(libcfs_panic_on_lbug, uint, 0644);
MODULE_PARM_DESC(libcfs_panic_on_lbug, "Lustre kernel panic on LBUG");
EXPORT_SYMBOL(libcfs_panic_on_lbug);
-atomic_t libcfs_kmemory = ATOMIC_INIT(0);
-EXPORT_SYMBOL(libcfs_kmemory);
-
static wait_queue_head_t debug_ctlwq;
char libcfs_debug_file_path_arr[PATH_MAX] = LIBCFS_DEBUG_FILE_PATH_DEFAULT;
@@ -414,8 +527,10 @@ int libcfs_debug_init(unsigned long bufsize)
}
rc = cfs_tracefile_init(max);
- if (rc == 0)
+ if (rc == 0) {
libcfs_register_panic_notifier();
+ libcfs_debug_mb = cfs_trace_get_debug_mb();
+ }
return rc;
}
diff --git a/drivers/staging/lustre/lustre/libcfs/fail.c b/drivers/staging/lustre/lustre/libcfs/fail.c
index 7b7fc215e633..42d615fbd664 100644
--- a/drivers/staging/lustre/lustre/libcfs/fail.c
+++ b/drivers/staging/lustre/lustre/libcfs/fail.c
@@ -123,7 +123,7 @@ EXPORT_SYMBOL(__cfs_fail_check_set);
int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
{
- int ret = 0;
+ int ret;
ret = __cfs_fail_check_set(id, value, set);
if (ret) {
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
index 31a558115a96..933525c73da1 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
@@ -78,7 +78,7 @@ EXPORT_SYMBOL(cfs_cpt_table_free);
int
cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
{
- int rc = 0;
+ int rc;
rc = snprintf(buf, len, "%d\t: %d\n", 0, 0);
len -= rc;
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c b/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
index 76d4392bd282..efe5e667a2e5 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
@@ -231,7 +231,7 @@ cfs_str2num_check(char *str, int nob, unsigned *num,
char *endp;
str = cfs_trimwhite(str);
- *num = strtoul(str, &endp, 0);
+ *num = simple_strtoul(str, &endp, 0);
if (endp == str)
return 0;
@@ -400,7 +400,7 @@ cfs_expr_list_free(struct cfs_expr_list *expr_list)
struct cfs_range_expr *expr;
expr = list_entry(expr_list->el_exprs.next,
- struct cfs_range_expr, re_link),
+ struct cfs_range_expr, re_link);
list_del(&expr->re_link);
LIBCFS_FREE(expr, sizeof(*expr));
}
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c
index aa3fffed1519..fbbc8a7e308d 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c
@@ -114,7 +114,7 @@ int cfs_crypto_hash_digest(unsigned char alg_id,
crypto_free_hash(hdesc.tfm);
return -ENOSPC;
}
- sg_init_one(&sl, (void *)buf, buf_len);
+ sg_init_one(&sl, buf, buf_len);
hdesc.flags = 0;
err = crypto_hash_digest(&hdesc, &sl, sl.length, hash);
@@ -165,7 +165,7 @@ int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc,
{
struct scatterlist sl;
- sg_init_one(&sl, (void *)buf, buf_len);
+ sg_init_one(&sl, buf, buf_len);
return crypto_hash_update((struct hash_desc *)hdesc, &sl, sl.length);
}
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
index e962f89683a6..64a984b42845 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
@@ -49,7 +49,7 @@ int libcfs_ioctl_getdata(char *buf, char *end, void *arg)
hdr = (struct libcfs_ioctl_hdr *)buf;
data = (struct libcfs_ioctl_data *)buf;
- if (copy_from_user(buf, (void *)arg, sizeof(*hdr)))
+ if (copy_from_user(buf, arg, sizeof(*hdr)))
return -EFAULT;
if (hdr->ioc_version != LIBCFS_IOCTL_VERSION) {
@@ -69,7 +69,7 @@ int libcfs_ioctl_getdata(char *buf, char *end, void *arg)
}
orig_len = hdr->ioc_len;
- if (copy_from_user(buf, (void *)arg, hdr->ioc_len))
+ if (copy_from_user(buf, arg, hdr->ioc_len))
return -EFAULT;
if (orig_len != data->ioc_len)
return -EINVAL;
@@ -96,8 +96,6 @@ int libcfs_ioctl_popdata(void *arg, void *data, int size)
return 0;
}
-extern struct cfs_psdev_ops libcfs_psdev_ops;
-
static int
libcfs_psdev_open(struct inode *inode, struct file *file)
{
diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
index e60b2e9b9194..806f9747a3a2 100644
--- a/drivers/staging/lustre/lustre/libcfs/module.c
+++ b/drivers/staging/lustre/lustre/libcfs/module.c
@@ -50,6 +50,7 @@
#include <linux/list.h>
#include <linux/sysctl.h>
+#include <linux/debugfs.h>
# define DEBUG_SUBSYSTEM S_LNET
@@ -65,48 +66,12 @@ MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
MODULE_DESCRIPTION("Portals v3.1");
MODULE_LICENSE("GPL");
-extern struct miscdevice libcfs_dev;
-extern struct cfs_wi_sched *cfs_sched_rehash;
-extern void libcfs_init_nidstrings(void);
+static void insert_debugfs(void);
+static void remove_debugfs(void);
-static int insert_proc(void);
-static void remove_proc(void);
+static struct dentry *lnet_debugfs_root;
-static struct ctl_table_header *lnet_table_header;
-extern char lnet_upcall[1024];
-/**
- * The path of debug log dump upcall script.
- */
-extern char lnet_debug_log_upcall[1024];
-
-#define CTL_LNET (0x100)
-
-enum {
- PSDEV_DEBUG = 1, /* control debugging */
- PSDEV_SUBSYSTEM_DEBUG, /* control debugging */
- PSDEV_PRINTK, /* force all messages to console */
- PSDEV_CONSOLE_RATELIMIT, /* ratelimit console messages */
- PSDEV_CONSOLE_MAX_DELAY_CS, /* maximum delay over which we skip messages */
- PSDEV_CONSOLE_MIN_DELAY_CS, /* initial delay over which we skip messages */
- PSDEV_CONSOLE_BACKOFF, /* delay increase factor */
- PSDEV_DEBUG_PATH, /* crashdump log location */
- PSDEV_DEBUG_DUMP_PATH, /* crashdump tracelog location */
- PSDEV_CPT_TABLE, /* information about cpu partitions */
- PSDEV_LNET_UPCALL, /* User mode upcall script */
- PSDEV_LNET_MEMUSED, /* bytes currently PORTAL_ALLOCated */
- PSDEV_LNET_CATASTROPHE, /* if we have LBUGged or panic'd */
- PSDEV_LNET_PANIC_ON_LBUG, /* flag to panic on LBUG */
- PSDEV_LNET_DUMP_KERNEL, /* snapshot kernel debug buffer to file */
- PSDEV_LNET_DAEMON_FILE, /* spool kernel debug buffer to file */
- PSDEV_LNET_DEBUG_MB, /* size of debug buffer */
- PSDEV_LNET_DEBUG_LOG_UPCALL, /* debug log upcall script */
- PSDEV_LNET_WATCHDOG_RATELIMIT, /* ratelimit watchdog messages */
- PSDEV_LNET_FORCE_LBUG, /* hook to force an LBUG */
- PSDEV_LNET_FAIL_LOC, /* control test failures instrumentation */
- PSDEV_LNET_FAIL_VAL, /* userdata for fail loc */
-};
-
-static void kportal_memhog_free (struct libcfs_device_userstate *ldu)
+static void kportal_memhog_free(struct libcfs_device_userstate *ldu)
{
struct page **level0p = &ldu->ldu_memhog_root_page;
struct page **level1p;
@@ -146,7 +111,7 @@ static void kportal_memhog_free (struct libcfs_device_userstate *ldu)
*level0p = NULL;
}
- LASSERT (ldu->ldu_memhog_pages == 0);
+ LASSERT(ldu->ldu_memhog_pages == 0);
}
static int kportal_memhog_alloc(struct libcfs_device_userstate *ldu, int npages,
@@ -158,8 +123,8 @@ static int kportal_memhog_alloc(struct libcfs_device_userstate *ldu, int npages,
int count1;
int count2;
- LASSERT (ldu->ldu_memhog_pages == 0);
- LASSERT (ldu->ldu_memhog_root_page == NULL);
+ LASSERT(ldu->ldu_memhog_pages == 0);
+ LASSERT(ldu->ldu_memhog_root_page == NULL);
if (npages < 0)
return -EINVAL;
@@ -338,7 +303,7 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile, unsigned long cmd,
if (err != -EINVAL) {
if (err == 0)
err = libcfs_ioctl_popdata(arg,
- data, sizeof (*data));
+ data, sizeof(*data));
break;
}
}
@@ -361,7 +326,7 @@ static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, void *a
return -ENOMEM;
/* 'cmd' and permissions get checked in our arch-specific caller */
- if (libcfs_ioctl_getdata(buf, buf + 800, (void *)arg)) {
+ if (libcfs_ioctl_getdata(buf, buf + 800, arg)) {
CERROR("PORTALS ioctl: data error\n");
err = -EINVAL;
goto out;
@@ -428,17 +393,10 @@ static int init_libcfs_module(void)
goto cleanup_wi;
}
+ insert_debugfs();
- rc = insert_proc();
- if (rc) {
- CERROR("insert_proc: error %d\n", rc);
- goto cleanup_crypto;
- }
-
- CDEBUG (D_OTHER, "portals setup OK\n");
+ CDEBUG(D_OTHER, "portals setup OK\n");
return 0;
- cleanup_crypto:
- cfs_crypto_unregister();
cleanup_wi:
cfs_wi_shutdown();
cleanup_deregister:
@@ -454,10 +412,7 @@ static void exit_libcfs_module(void)
{
int rc;
- remove_proc();
-
- CDEBUG(D_MALLOC, "before Portals cleanup: kmem %d\n",
- atomic_read(&libcfs_kmemory));
+ remove_debugfs();
if (cfs_sched_rehash != NULL) {
cfs_wi_sched_destroy(cfs_sched_rehash);
@@ -467,16 +422,10 @@ static void exit_libcfs_module(void)
cfs_crypto_unregister();
cfs_wi_shutdown();
- rc = misc_deregister(&libcfs_dev);
- if (rc)
- CERROR("misc_deregister error %d\n", rc);
+ misc_deregister(&libcfs_dev);
cfs_cpu_fini();
- if (atomic_read(&libcfs_kmemory) != 0)
- CERROR("Portals memory leaked: %d bytes\n",
- atomic_read(&libcfs_kmemory));
-
rc = libcfs_debug_cleanup();
if (rc)
pr_err("LustreError: libcfs_debug_cleanup: %d\n", rc);
@@ -551,9 +500,6 @@ static int proc_dobitmasks(struct ctl_table *table, int write,
__proc_dobitmasks);
}
-static int min_watchdog_ratelimit; /* disable ratelimiting */
-static int max_watchdog_ratelimit = (24*60*60); /* limit to once per day */
-
static int __proc_dump_kernel(void *data, int write,
loff_t pos, void __user *buffer, int nob)
{
@@ -593,125 +539,6 @@ static int proc_daemon_file(struct ctl_table *table, int write,
__proc_daemon_file);
}
-static int __proc_debug_mb(void *data, int write,
- loff_t pos, void __user *buffer, int nob)
-{
- if (!write) {
- char tmpstr[32];
- int len = snprintf(tmpstr, sizeof(tmpstr), "%d",
- cfs_trace_get_debug_mb());
-
- if (pos >= len)
- return 0;
-
- return cfs_trace_copyout_string(buffer, nob, tmpstr + pos,
- "\n");
- }
-
- return cfs_trace_set_debug_mb_usrstr(buffer, nob);
-}
-
-static int proc_debug_mb(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- return proc_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_debug_mb);
-}
-
-static int proc_console_max_delay_cs(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
-{
- int rc, max_delay_cs;
- struct ctl_table dummy = *table;
- long d;
-
- dummy.data = &max_delay_cs;
- dummy.proc_handler = &proc_dointvec;
-
- if (!write) { /* read */
- max_delay_cs = cfs_duration_sec(libcfs_console_max_delay * 100);
- rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
- return rc;
- }
-
- /* write */
- max_delay_cs = 0;
- rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
- if (rc < 0)
- return rc;
- if (max_delay_cs <= 0)
- return -EINVAL;
-
- d = cfs_time_seconds(max_delay_cs) / 100;
- if (d == 0 || d < libcfs_console_min_delay)
- return -EINVAL;
- libcfs_console_max_delay = d;
-
- return rc;
-}
-
-static int proc_console_min_delay_cs(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
-{
- int rc, min_delay_cs;
- struct ctl_table dummy = *table;
- long d;
-
- dummy.data = &min_delay_cs;
- dummy.proc_handler = &proc_dointvec;
-
- if (!write) { /* read */
- min_delay_cs = cfs_duration_sec(libcfs_console_min_delay * 100);
- rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
- return rc;
- }
-
- /* write */
- min_delay_cs = 0;
- rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
- if (rc < 0)
- return rc;
- if (min_delay_cs <= 0)
- return -EINVAL;
-
- d = cfs_time_seconds(min_delay_cs) / 100;
- if (d == 0 || d > libcfs_console_max_delay)
- return -EINVAL;
- libcfs_console_min_delay = d;
-
- return rc;
-}
-
-static int proc_console_backoff(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- int rc, backoff;
- struct ctl_table dummy = *table;
-
- dummy.data = &backoff;
- dummy.proc_handler = &proc_dointvec;
-
- if (!write) { /* read */
- backoff = libcfs_console_backoff;
- rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
- return rc;
- }
-
- /* write */
- backoff = 0;
- rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
- if (rc < 0)
- return rc;
- if (backoff <= 0)
- return -EINVAL;
-
- libcfs_console_backoff = backoff;
-
- return rc;
-}
-
static int libcfs_force_lbug(struct ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
@@ -809,40 +636,6 @@ static struct ctl_table lnet_table[] = {
.proc_handler = &proc_dobitmasks,
},
{
- .procname = "console_ratelimit",
- .data = &libcfs_console_ratelimit,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .procname = "console_max_delay_centisecs",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_console_max_delay_cs
- },
- {
- .procname = "console_min_delay_centisecs",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_console_min_delay_cs
- },
- {
- .procname = "console_backoff",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_console_backoff
- },
-
- {
- .procname = "debug_path",
- .data = libcfs_debug_file_path_arr,
- .maxlen = sizeof(libcfs_debug_file_path_arr),
- .mode = 0644,
- .proc_handler = &proc_dostring,
- },
-
- {
.procname = "cpu_partition_table",
.maxlen = 128,
.mode = 0444,
@@ -864,13 +657,6 @@ static struct ctl_table lnet_table[] = {
.proc_handler = &proc_dostring,
},
{
- .procname = "lnet_memused",
- .data = (int *)&libcfs_kmemory.counter,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- },
- {
.procname = "catastrophe",
.data = &libcfs_catastrophe,
.maxlen = sizeof(int),
@@ -878,13 +664,6 @@ static struct ctl_table lnet_table[] = {
.proc_handler = &proc_dointvec,
},
{
- .procname = "panic_on_lbug",
- .data = &libcfs_panic_on_lbug,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
- {
.procname = "dump_kernel",
.maxlen = 256,
.mode = 0200,
@@ -897,20 +676,6 @@ static struct ctl_table lnet_table[] = {
.proc_handler = &proc_daemon_file,
},
{
- .procname = "debug_mb",
- .mode = 0644,
- .proc_handler = &proc_debug_mb,
- },
- {
- .procname = "watchdog_ratelimit",
- .data = &libcfs_watchdog_ratelimit,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .extra1 = &min_watchdog_ratelimit,
- .extra2 = &max_watchdog_ratelimit,
- },
- {
.procname = "force_lbug",
.data = NULL,
.maxlen = 0,
@@ -935,31 +700,93 @@ static struct ctl_table lnet_table[] = {
}
};
-static struct ctl_table top_table[] = {
- {
- .procname = "lnet",
- .mode = 0555,
- .data = NULL,
- .maxlen = 0,
- .child = lnet_table,
- },
- {
- }
+struct lnet_debugfs_symlink_def {
+ char *name;
+ char *target;
+};
+
+static const struct lnet_debugfs_symlink_def lnet_debugfs_symlinks[] = {
+ { "console_ratelimit",
+ "/sys/module/libcfs/parameters/libcfs_console_ratelimit"},
+ { "debug_path",
+ "/sys/module/libcfs/parameters/libcfs_debug_file_path"},
+ { "panic_on_lbug",
+ "/sys/module/libcfs/parameters/libcfs_panic_on_lbug"},
+ { "libcfs_console_backoff",
+ "/sys/module/libcfs/parameters/libcfs_console_backoff"},
+ { "debug_mb",
+ "/sys/module/libcfs/parameters/libcfs_debug_mb"},
+ { "console_min_delay_centisecs",
+ "/sys/module/libcfs/parameters/libcfs_console_min_delay"},
+ { "console_max_delay_centisecs",
+ "/sys/module/libcfs/parameters/libcfs_console_max_delay"},
+ {},
};
-static int insert_proc(void)
+static ssize_t lnet_debugfs_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
{
- if (lnet_table_header == NULL)
- lnet_table_header = register_sysctl_table(top_table);
- return 0;
+ struct ctl_table *table = filp->private_data;
+ int error;
+
+ error = table->proc_handler(table, 0, (void __user *)buf, &count, ppos);
+ if (!error)
+ error = count;
+
+ return error;
+}
+
+static ssize_t lnet_debugfs_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ctl_table *table = filp->private_data;
+ int error;
+
+ error = table->proc_handler(table, 1, (void __user *)buf, &count, ppos);
+ if (!error)
+ error = count;
+
+ return error;
+}
+
+static const struct file_operations lnet_debugfs_file_operations = {
+ .open = simple_open,
+ .read = lnet_debugfs_read,
+ .write = lnet_debugfs_write,
+ .llseek = default_llseek,
+};
+
+static void insert_debugfs(void)
+{
+ struct ctl_table *table;
+ struct dentry *entry;
+ const struct lnet_debugfs_symlink_def *symlinks;
+
+ if (lnet_debugfs_root == NULL)
+ lnet_debugfs_root = debugfs_create_dir("lnet", NULL);
+
+ /* Even if we cannot create, just ignore it altogether) */
+ if (IS_ERR_OR_NULL(lnet_debugfs_root))
+ return;
+
+ for (table = lnet_table; table->procname; table++)
+ entry = debugfs_create_file(table->procname, table->mode,
+ lnet_debugfs_root, table,
+ &lnet_debugfs_file_operations);
+
+ for (symlinks = lnet_debugfs_symlinks; symlinks->name; symlinks++)
+ entry = debugfs_create_symlink(symlinks->name,
+ lnet_debugfs_root,
+ symlinks->target);
+
}
-static void remove_proc(void)
+static void remove_debugfs(void)
{
- if (lnet_table_header != NULL)
- unregister_sysctl_table(lnet_table_header);
+ if (lnet_debugfs_root != NULL)
+ debugfs_remove_recursive(lnet_debugfs_root);
- lnet_table_header = NULL;
+ lnet_debugfs_root = NULL;
}
MODULE_VERSION("1.0.0");
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.c b/drivers/staging/lustre/lustre/libcfs/tracefile.c
index 6ee2adcf8890..effa2af58c13 100644
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lustre/libcfs/tracefile.c
@@ -937,18 +937,6 @@ int cfs_trace_set_debug_mb(int mb)
return 0;
}
-int cfs_trace_set_debug_mb_usrstr(void __user *usr_str, int usr_str_nob)
-{
- char str[32];
- int rc;
-
- rc = cfs_trace_copyin_string(str, sizeof(str), usr_str, usr_str_nob);
- if (rc < 0)
- return rc;
-
- return cfs_trace_set_debug_mb(simple_strtoul(str, NULL, 0));
-}
-
int cfs_trace_get_debug_mb(void)
{
int i;
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.h b/drivers/staging/lustre/lustre/libcfs/tracefile.h
index 0601476e1dc3..e931f6d98de9 100644
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.h
+++ b/drivers/staging/lustre/lustre/libcfs/tracefile.h
@@ -47,7 +47,7 @@
extern char cfs_tracefile[TRACEFILE_NAME_SIZE];
extern long long cfs_tracefile_size;
-extern void libcfs_run_debug_log_upcall(char *file);
+void libcfs_run_debug_log_upcall(char *file);
int cfs_tracefile_init_arch(void);
void cfs_tracefile_fini_arch(void);
@@ -77,14 +77,13 @@ int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob);
int cfs_trace_daemon_command(char *str);
int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob);
int cfs_trace_set_debug_mb(int mb);
-int cfs_trace_set_debug_mb_usrstr(void __user *usr_str, int usr_str_nob);
int cfs_trace_get_debug_mb(void);
-extern void libcfs_debug_dumplog_internal(void *arg);
-extern void libcfs_register_panic_notifier(void);
-extern void libcfs_unregister_panic_notifier(void);
+void libcfs_debug_dumplog_internal(void *arg);
+void libcfs_register_panic_notifier(void);
+void libcfs_unregister_panic_notifier(void);
extern int libcfs_panic_in_progress;
-extern int cfs_trace_max_debug_mb(void);
+int cfs_trace_max_debug_mb(void);
#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
@@ -253,15 +252,15 @@ struct cfs_trace_page {
unsigned short type;
};
-extern void cfs_set_ptldebug_header(struct ptldebug_header *header,
- struct libcfs_debug_msg_data *m,
- unsigned long stack);
-extern void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
- const char *buf, int len, const char *file,
- const char *fn);
+void cfs_set_ptldebug_header(struct ptldebug_header *header,
+ struct libcfs_debug_msg_data *m,
+ unsigned long stack);
+void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
+ const char *buf, int len, const char *file,
+ const char *fn);
-extern int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
-extern void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
+int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
+void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
/**
* trace_buf_type_t, trace_buf_idx_get() and trace_console_buffers[][]
@@ -271,7 +270,7 @@ extern void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
*/
extern char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
-extern cfs_trace_buf_type_t cfs_trace_buf_idx_get(void);
+cfs_trace_buf_type_t cfs_trace_buf_idx_get(void);
static inline char *
cfs_trace_get_console_buffer(void)
@@ -314,8 +313,8 @@ int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
struct cfs_trace_page *tage);
-extern void cfs_trace_assertion_failed(const char *str,
- struct libcfs_debug_msg_data *m);
+void cfs_trace_assertion_failed(const char *str,
+ struct libcfs_debug_msg_data *m);
/* ASSERTION that is safe to use within the debug system */
#define __LASSERT(cond) \
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index 7b008a64707d..b86685912d28 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -250,7 +250,6 @@ void ll_intent_release(struct lookup_intent *it)
void ll_invalidate_aliases(struct inode *inode)
{
struct dentry *dentry;
- struct ll_d_hlist_node *p;
LASSERT(inode != NULL);
@@ -258,7 +257,7 @@ void ll_invalidate_aliases(struct inode *inode)
inode->i_ino, inode->i_generation, inode);
ll_lock_dcache(inode);
- ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_u.d_alias) {
+ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
CDEBUG(D_DENTRY, "dentry in drop %pd (%p) parent %p inode %p flags %d\n",
dentry, dentry, dentry->d_parent,
d_inode(dentry), dentry->d_flags);
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 3d746a94f92e..769b61193d87 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -203,7 +203,6 @@ static int ll_dir_filler(void *_hash, struct page *page0)
CDEBUG(D_VFSTRACE, "read %d/%d pages\n", nrdpgs, npages);
- ll_pagevec_init(&lru_pvec, 0);
for (i = 1; i < npages; i++) {
unsigned long offset;
int ret;
@@ -228,15 +227,12 @@ static int ll_dir_filler(void *_hash, struct page *page0)
GFP_KERNEL);
if (ret == 0) {
unlock_page(page);
- if (ll_pagevec_add(&lru_pvec, page) == 0)
- ll_pagevec_lru_add_file(&lru_pvec);
} else {
CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
offset, ret);
}
page_cache_release(page);
}
- ll_pagevec_lru_add_file(&lru_pvec);
if (page_pool != &page0)
kfree(page_pool);
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index 3075db211106..dcd0c6d65efb 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -702,8 +702,7 @@ out_och_free:
out_openerr:
if (opendir_set != 0)
ll_stop_statahead(inode, lli->lli_opendir_key);
- if (fd != NULL)
- ll_file_data_put(fd);
+ ll_file_data_put(fd);
} else {
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN, 1);
}
@@ -3005,7 +3004,7 @@ int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
struct inode *inode = d_inode(de);
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ll_inode_info *lli = ll_i2info(inode);
- int res = 0;
+ int res;
res = ll_inode_revalidate(de, MDS_INODELOCK_UPDATE |
MDS_INODELOCK_LOOKUP);
diff --git a/drivers/staging/lustre/lustre/llite/llite_capa.c b/drivers/staging/lustre/lustre/llite/llite_capa.c
index a6268718b76e..24590ae36090 100644
--- a/drivers/staging/lustre/lustre/llite/llite_capa.c
+++ b/drivers/staging/lustre/lustre/llite/llite_capa.c
@@ -70,7 +70,8 @@ static unsigned long long ll_capa_renewal_retries;
static int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa);
-static inline void update_capa_timer(struct obd_capa *ocapa, unsigned long expiry)
+static inline void update_capa_timer(struct obd_capa *ocapa,
+ unsigned long expiry)
{
if (time_before(expiry, ll_capa_timer.expires) ||
!timer_pending(&ll_capa_timer)) {
@@ -102,13 +103,13 @@ static inline int have_expired_capa(void)
spin_lock(&capa_lock);
if (!list_empty(ll_capa_list)) {
ocapa = list_entry(ll_capa_list->next, struct obd_capa,
- c_list);
+ c_list);
expired = capa_is_to_expire(ocapa);
if (!expired)
update_capa_timer(ocapa, capa_renewal_time(ocapa));
} else if (!list_empty(&ll_idle_capas)) {
ocapa = list_entry(ll_idle_capas.next, struct obd_capa,
- c_list);
+ c_list);
expired = capa_is_expired(ocapa);
if (!expired)
update_capa_timer(ocapa, ocapa->c_expiry);
@@ -165,7 +166,8 @@ static void ll_delete_capa(struct obd_capa *ocapa)
/* three places where client capa is deleted:
* 1. capa_thread_main(), main place to delete expired capa.
* 2. ll_clear_inode_capas() in ll_clear_inode().
- * 3. ll_truncate_free_capa() delete truncate capa explicitly in ll_setattr_ost().
+ * 3. ll_truncate_free_capa() delete truncate capa explicitly in
+ * ll_setattr_ost().
*/
static int capa_thread_main(void *unused)
{
@@ -206,7 +208,8 @@ static int capa_thread_main(void *unused)
* lock.
*/
/* ibits may be changed by ll_have_md_lock() so we have
- * to set it each time */
+ * to set it each time
+ */
ibits = MDS_INODELOCK_LOOKUP;
if (capa_for_mds(&ocapa->c_capa) &&
!S_ISDIR(ocapa->u.cli.inode->i_mode) &&
@@ -225,14 +228,15 @@ static int capa_thread_main(void *unused)
if (capa_for_oss(&ocapa->c_capa) &&
obd_capa_open_count(ocapa) == 0) {
/* oss capa with open count == 0 won't renew,
- * move to idle list */
+ * move to idle list
+ */
sort_add_capa(ocapa, &ll_idle_capas);
continue;
}
/* NB iput() is in ll_update_capa() */
inode = igrab(ocapa->u.cli.inode);
- if (inode == NULL) {
+ if (!inode) {
DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
"igrab failed for");
continue;
@@ -255,7 +259,7 @@ static int capa_thread_main(void *unused)
update_capa_timer(next, capa_renewal_time(next));
list_for_each_entry_safe(ocapa, tmp, &ll_idle_capas,
- c_list) {
+ c_list) {
if (!capa_is_expired(ocapa)) {
if (!next)
update_capa_timer(ocapa,
@@ -299,11 +303,11 @@ int ll_capa_thread_start(void)
task = kthread_run(capa_thread_main, NULL, "ll_capa");
if (IS_ERR(task)) {
CERROR("cannot start expired capa thread: rc %ld\n",
- PTR_ERR(task));
+ PTR_ERR(task));
return PTR_ERR(task);
}
wait_event(ll_capa_thread.t_ctl_waitq,
- thread_is_running(&ll_capa_thread));
+ thread_is_running(&ll_capa_thread));
return 0;
}
@@ -313,7 +317,7 @@ void ll_capa_thread_stop(void)
thread_set_flags(&ll_capa_thread, SVC_STOPPING);
wake_up(&ll_capa_thread.t_ctl_waitq);
wait_event(ll_capa_thread.t_ctl_waitq,
- thread_is_stopped(&ll_capa_thread));
+ thread_is_stopped(&ll_capa_thread));
}
struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
@@ -360,7 +364,7 @@ struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
ocapa = NULL;
if (atomic_read(&ll_capa_debug)) {
- CERROR("no capability for "DFID" opc %#llx\n",
+ CERROR("no capability for " DFID " opc %#llx\n",
PFID(&lli->lli_fid), opc);
atomic_set(&ll_capa_debug, 0);
}
@@ -376,7 +380,7 @@ struct obd_capa *ll_mdscapa_get(struct inode *inode)
struct ll_inode_info *lli = ll_i2info(inode);
struct obd_capa *ocapa;
- LASSERT(inode != NULL);
+ LASSERT(inode);
if ((ll_i2sbi(inode)->ll_flags & LL_SBI_MDS_CAPA) == 0)
return NULL;
@@ -385,7 +389,7 @@ struct obd_capa *ll_mdscapa_get(struct inode *inode)
ocapa = capa_get(lli->lli_mds_capa);
spin_unlock(&capa_lock);
if (!ocapa && atomic_read(&ll_capa_debug)) {
- CERROR("no mds capability for "DFID"\n", PFID(&lli->lli_fid));
+ CERROR("no mds capability for " DFID "\n", PFID(&lli->lli_fid));
atomic_set(&ll_capa_debug, 0);
}
@@ -447,7 +451,8 @@ static inline void inode_add_oss_capa(struct inode *inode,
struct list_head *next = NULL;
/* capa is sorted in lli_oss_capas so lookup can always find the
- * latest one */
+ * latest one
+ */
list_for_each_entry(tmp, &lli->lli_oss_capas, u.cli.lli_list) {
if (cfs_time_after(ocapa->c_expiry, tmp->c_expiry)) {
next = &tmp->u.cli.lli_list;
@@ -537,7 +542,8 @@ static int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa)
ll_capa_renewal_failed++;
/* failed capa won't be renewed any longer, but if -EIO,
- * client might be doing recovery, retry in 2 min. */
+ * client might be doing recovery, retry in 2 min.
+ */
if (rc == -EIO && !capa_is_expired(ocapa)) {
delay_capa_renew(ocapa, 120);
DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
@@ -638,7 +644,7 @@ void ll_clear_inode_capas(struct inode *inode)
ll_delete_capa(ocapa);
list_for_each_entry_safe(ocapa, tmp, &lli->lli_oss_capas,
- u.cli.lli_list)
+ u.cli.lli_list)
ll_delete_capa(ocapa);
spin_unlock(&capa_lock);
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index f097d4d167d5..ec8fff463208 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -456,7 +456,6 @@ struct eacl_table {
};
struct ll_sb_info {
- struct list_head ll_list;
/* this protects pglist and ra_info. It isn't safe to
* grab from interrupt contexts */
spinlock_t ll_lock;
@@ -711,11 +710,11 @@ extern struct file_operations ll_file_operations;
extern struct file_operations ll_file_operations_flock;
extern struct file_operations ll_file_operations_noflock;
extern struct inode_operations ll_file_inode_operations;
-extern int ll_have_md_lock(struct inode *inode, __u64 *bits,
- ldlm_mode_t l_req_mode);
-extern ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
- struct lustre_handle *lockh, __u64 flags,
- ldlm_mode_t mode);
+int ll_have_md_lock(struct inode *inode, __u64 *bits,
+ ldlm_mode_t l_req_mode);
+ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
+ struct lustre_handle *lockh, __u64 flags,
+ ldlm_mode_t mode);
int ll_file_open(struct inode *inode, struct file *file);
int ll_file_release(struct inode *inode, struct file *file);
int ll_glimpse_ioctl(struct ll_sb_info *sbi,
@@ -1376,9 +1375,9 @@ static inline void cl_stats_tally(struct cl_device *dev, enum cl_req_type crt,
ll_stats_ops_tally(ll_s2sbi(cl2ccc_dev(dev)->cdv_sb), opc, rc);
}
-extern ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
- int rw, struct inode *inode,
- struct ll_dio_pages *pv);
+ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
+ int rw, struct inode *inode,
+ struct ll_dio_pages *pv);
static inline int ll_file_nolock(const struct file *file)
{
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 25139885b5a7..b4ed6c89af3d 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -60,9 +60,6 @@ struct kmem_cache *ll_file_data_slab;
struct dentry *llite_root;
struct kset *llite_kset;
-static LIST_HEAD(ll_super_blocks);
-static DEFINE_SPINLOCK(ll_sb_lock);
-
#ifndef log2
#define log2(n) ffz(~(n))
#endif
@@ -112,10 +109,6 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
- spin_lock(&ll_sb_lock);
- list_add_tail(&sbi->ll_list, &ll_super_blocks);
- spin_unlock(&ll_sb_lock);
-
sbi->ll_flags |= LL_SBI_VERBOSE;
sbi->ll_flags |= LL_SBI_CHECKSUM;
@@ -144,12 +137,7 @@ static void ll_free_sbi(struct super_block *sb)
{
struct ll_sb_info *sbi = ll_s2sbi(sb);
- if (sbi != NULL) {
- spin_lock(&ll_sb_lock);
- list_del(&sbi->ll_list);
- spin_unlock(&ll_sb_lock);
- kfree(sbi);
- }
+ kfree(sbi);
}
static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
@@ -1114,7 +1102,7 @@ void ll_clear_inode(struct inode *inode)
if (lli->lli_mds_read_och)
ll_md_real_close(inode, FMODE_READ);
- if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
+ if (S_ISLNK(inode->i_mode)) {
kfree(lli->lli_symlink_name);
lli->lli_symlink_name = NULL;
}
@@ -1150,6 +1138,8 @@ void ll_clear_inode(struct inode *inode)
lli->lli_has_smd = false;
}
+#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
+
static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
struct md_open_data **mod)
{
@@ -1354,11 +1344,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
if (!op_data)
return -ENOMEM;
- if (!S_ISDIR(inode->i_mode)) {
- if (attr->ia_valid & ATTR_SIZE)
- inode_dio_write_done(inode);
+ if (!S_ISDIR(inode->i_mode))
mutex_unlock(&inode->i_mutex);
- }
memcpy(&op_data->op_attr, attr, sizeof(*attr));
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index cc00fd10fbcf..5f0d80cc9718 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -162,7 +162,7 @@ static int max_loop = MAX_LOOP_DEFAULT;
static struct lloop_device *loop_dev;
static struct gendisk **disks;
static struct mutex lloop_mutex;
-static void *ll_iocontrol_magic = NULL;
+static void *ll_iocontrol_magic;
static loff_t get_loop_size(struct lloop_device *lo, struct file *file)
{
@@ -340,6 +340,8 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
int rw = bio_rw(old_bio);
int inactive;
+ blk_queue_split(q, &old_bio, q->bio_split);
+
if (!lo)
goto err;
@@ -365,7 +367,7 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
loop_add_bio(lo, old_bio);
return;
err:
- cfs_bio_io_error(old_bio, old_bio->bi_iter.bi_size);
+ bio_io_error(old_bio);
}
@@ -376,7 +378,8 @@ static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
while (bio) {
struct bio *tmp = bio->bi_next;
bio->bi_next = NULL;
- cfs_bio_endio(bio, bio->bi_iter.bi_size, ret);
+ bio->bi_error = ret;
+ bio_endio(bio);
bio = tmp;
}
}
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 72ce6e72845f..05e7dc85989e 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -144,10 +144,9 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash,
static void ll_invalidate_negative_children(struct inode *dir)
{
struct dentry *dentry, *tmp_subdir;
- struct ll_d_hlist_node *p;
ll_lock_dcache(dir);
- ll_d_hlist_for_each_entry(dentry, p, &dir->i_dentry, d_u.d_alias) {
+ hlist_for_each_entry(dentry, &dir->i_dentry, d_u.d_alias) {
spin_lock(&dentry->d_lock);
if (!list_empty(&dentry->d_subdirs)) {
struct dentry *child;
@@ -334,15 +333,14 @@ void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2)
static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
{
struct dentry *alias, *discon_alias, *invalid_alias;
- struct ll_d_hlist_node *p;
- if (ll_d_hlist_empty(&inode->i_dentry))
+ if (hlist_empty(&inode->i_dentry))
return NULL;
discon_alias = invalid_alias = NULL;
ll_lock_dcache(inode);
- ll_d_hlist_for_each_entry(alias, p, &inode->i_dentry, d_u.d_alias) {
+ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
LASSERT(alias != dentry);
spin_lock(&alias->d_lock);
@@ -690,7 +688,7 @@ static struct inode *ll_create_node(struct inode *dir, struct lookup_intent *it)
goto out;
}
- LASSERT(ll_d_hlist_empty(&inode->i_dentry));
+ LASSERT(hlist_empty(&inode->i_dentry));
/* We asked for a lock on the directory, but were granted a
* lock on the inode. Since we finally have an inode pointer,
@@ -1008,7 +1006,7 @@ static int ll_unlink(struct inode *dir, struct dentry *dentry)
return rc;
}
-static int ll_mkdir(struct inode *dir, struct dentry *dentry, ll_umode_t mode)
+static int ll_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int err;
diff --git a/drivers/staging/lustre/lustre/llite/remote_perm.c b/drivers/staging/lustre/lustre/llite/remote_perm.c
index a58182600dae..39022ea88b5f 100644
--- a/drivers/staging/lustre/lustre/llite/remote_perm.c
+++ b/drivers/staging/lustre/lustre/llite/remote_perm.c
@@ -54,8 +54,8 @@
#include "../include/lustre_param.h"
#include "llite_internal.h"
-struct kmem_cache *ll_remote_perm_cachep = NULL;
-struct kmem_cache *ll_rmtperm_hash_cachep = NULL;
+struct kmem_cache *ll_remote_perm_cachep;
+struct kmem_cache *ll_rmtperm_hash_cachep;
static inline struct ll_remote_perm *alloc_ll_remote_perm(void)
{
@@ -104,8 +104,7 @@ void free_rmtperm_hash(struct hlist_head *hash)
return;
for (i = 0; i < REMOTE_PERM_HASHSIZE; i++)
- hlist_for_each_entry_safe(lrp, next, hash + i,
- lrp_list)
+ hlist_for_each_entry_safe(lrp, next, hash + i, lrp_list)
free_ll_remote_perm(lrp);
OBD_SLAB_FREE(hash, ll_rmtperm_hash_cachep,
REMOTE_PERM_HASHSIZE * sizeof(*hash));
@@ -117,7 +116,8 @@ static inline int remote_perm_hashfunc(uid_t uid)
}
/* NB: setxid permission is not checked here, instead it's done on
- * MDT when client get remote permission. */
+ * MDT when client get remote permission.
+ */
static int do_check_remote_perm(struct ll_inode_info *lli, int mask)
{
struct hlist_head *head;
@@ -184,7 +184,7 @@ int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm)
if (!lli->lli_remote_perms) {
perm_hash = alloc_rmtperm_hash();
- if (perm_hash == NULL) {
+ if (!perm_hash) {
CERROR("alloc lli_remote_perms failed!\n");
return -ENOMEM;
}
@@ -287,7 +287,7 @@ int lustre_check_remote_perm(struct inode *inode, int mask)
perm = req_capsule_server_swab_get(&req->rq_pill, &RMF_ACL,
lustre_swab_mdt_remote_perm);
- if (unlikely(perm == NULL)) {
+ if (unlikely(!perm)) {
mutex_unlock(&lli->lli_rmtperm_mutex);
rc = -EPROTO;
break;
@@ -321,8 +321,7 @@ void ll_free_remote_perms(struct inode *inode)
spin_lock(&lli->lli_lock);
for (i = 0; i < REMOTE_PERM_HASHSIZE; i++) {
- hlist_for_each_entry_safe(lrp, node, next, hash + i,
- lrp_list)
+ hlist_for_each_entry_safe(lrp, node, next, hash + i, lrp_list)
free_ll_remote_perm(lrp);
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 91bba79678cf..a659962e09c8 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -455,12 +455,11 @@ static void vvp_io_setattr_end(const struct lu_env *env,
struct cl_io *io = ios->cis_io;
struct inode *inode = ccc_object_inode(io->ci_obj);
- if (cl_io_is_trunc(io)) {
+ if (cl_io_is_trunc(io))
/* Truncate in memory pages - they must be clean pages
* because osc has already notified to destroy osc_extents. */
vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
- inode_dio_write_done(inode);
- }
+
mutex_unlock(&inode->i_mutex);
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 954ed08c6af2..a3cf5ad20c60 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -227,11 +227,16 @@ static int vvp_page_prep_write(const struct lu_env *env,
struct cl_io *unused)
{
struct page *vmpage = cl2vm_page(slice);
+ struct cl_page *pg = slice->cpl_page;
LASSERT(PageLocked(vmpage));
LASSERT(!PageDirty(vmpage));
- set_page_writeback(vmpage);
+ /* ll_writepage path is not a sync write, so need to set page writeback
+ * flag */
+ if (!pg->cp_sync_io)
+ set_page_writeback(vmpage);
+
vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
return 0;
@@ -298,9 +303,6 @@ static void vvp_page_completion_write(const struct lu_env *env,
struct cl_page *pg = slice->cpl_page;
struct page *vmpage = cp->cpg_page;
- LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
- LASSERT(PageWriteback(vmpage));
-
CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
/*
@@ -316,14 +318,19 @@ static void vvp_page_completion_write(const struct lu_env *env,
cp->cpg_write_queued = 0;
vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
- /*
- * Only mark the page error only when it's an async write because
- * applications won't wait for IO to finish.
- */
- if (pg->cp_sync_io == NULL)
+ if (pg->cp_sync_io != NULL) {
+ LASSERT(PageLocked(vmpage));
+ LASSERT(!PageWriteback(vmpage));
+ } else {
+ LASSERT(PageWriteback(vmpage));
+ /*
+ * Only mark the page error only when it's an async write
+ * because applications won't wait for IO to finish.
+ */
vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret);
- end_page_writeback(vmpage);
+ end_page_writeback(vmpage);
+ }
}
/**
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index 6956dec53fcc..9e763ce244e3 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -357,7 +357,7 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
struct ll_inode_info *lli = ll_i2info(inode);
struct mdt_body *body;
__u32 *xsizes;
- int rc = 0, i;
+ int rc, i;
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index cb35f6341fb2..eebe45bdceb6 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -100,7 +100,7 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm,
}
op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
- if (op_data == NULL) {
+ if (!op_data) {
rc = -ENOMEM;
goto out;
}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index ac5053cd5da5..c9e0536e9f2a 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -716,7 +716,7 @@ repeat_fid2path:
if (remote_gf == NULL) {
remote_gf_size = sizeof(*remote_gf) + PATH_MAX;
remote_gf = kzalloc(remote_gf_size, GFP_NOFS);
- if (remote_gf == NULL) {
+ if (!remote_gf) {
rc = -ENOMEM;
goto out_fid2path;
}
@@ -1398,7 +1398,7 @@ static int lmv_statfs(const struct lu_env *env, struct obd_export *exp,
return rc;
temp = kzalloc(sizeof(*temp), GFP_NOFS);
- if (temp == NULL)
+ if (!temp)
return -ENOMEM;
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
@@ -1730,7 +1730,7 @@ lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
}
rdata = kzalloc(sizeof(*rdata), GFP_NOFS);
- if (rdata == NULL) {
+ if (!rdata) {
rc = -ENOMEM;
goto out;
}
@@ -1993,7 +1993,7 @@ static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
- int rc = 0;
+ int rc;
rc = lmv_check_connect(obd);
if (rc)
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
index 504b24a468fc..8c3bbe574723 100644
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -478,7 +478,7 @@ static struct lu_device *lov_device_alloc(const struct lu_env *env,
int rc;
ld = kzalloc(sizeof(*ld), GFP_NOFS);
- if (ld == NULL)
+ if (!ld)
return ERR_PTR(-ENOMEM);
cl_device_init(&ld->ld_cl, t);
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index 11c1081b1d3d..bf3629151d68 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -181,7 +181,7 @@ static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio,
} else {
sub->sub_io = kzalloc(sizeof(*sub->sub_io),
GFP_NOFS);
- if (sub->sub_io == NULL)
+ if (!sub->sub_io)
result = -ENOMEM;
}
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index b7e7bfabe382..dd1cf3d2d039 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -123,6 +123,7 @@ int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
if (shrink) {
for (; stripe < lsm->lsm_stripe_count; stripe++) {
struct lov_oinfo *loi = lsm->lsm_oinfo[stripe];
+
kms = lov_size_to_stripe(lsm, size, stripe);
CDEBUG(D_INODE,
"stripe %d KMS %sing %llu->%llu\n",
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index 96c55acd52ae..c5c67d982ef2 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -107,6 +107,10 @@ static void lov_putref(struct obd_device *obd)
/* Disconnect */
__lov_del_obd(obd, tgt);
}
+
+ if (lov->lov_tgts_kobj)
+ kobject_put(lov->lov_tgts_kobj);
+
} else {
mutex_unlock(&lov->lov_lock);
}
@@ -322,9 +326,6 @@ static int lov_disconnect(struct obd_export *exp)
}
}
- if (lov->lov_tgts_kobj)
- kobject_put(lov->lov_tgts_kobj);
-
obd_putref(obd);
out:
@@ -976,7 +977,7 @@ static int lov_recreate(struct obd_export *exp, struct obdo *src_oa,
src_oa->o_flags & OBD_FL_RECREATE_OBJS);
obj_mdp = kzalloc(sizeof(*obj_mdp), GFP_NOFS);
- if (obj_mdp == NULL)
+ if (!obj_mdp)
return -ENOMEM;
ost_idx = src_oa->o_nlink;
@@ -1439,7 +1440,7 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
__u32 *genp;
len = 0;
- if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
+ if (obd_ioctl_getdata(&buf, &len, uarg))
return -EINVAL;
data = (struct obd_ioctl_data *)buf;
@@ -1472,7 +1473,7 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
*genp = lov->lov_tgts[i]->ltd_gen;
}
- if (copy_to_user((void *)uarg, buf, len))
+ if (copy_to_user(uarg, buf, len))
rc = -EFAULT;
obd_ioctl_freedata(buf, len);
break;
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index 1e4d3fbee323..c59b1402616e 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -431,7 +431,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
return -ENAMETOOLONG;
new_pool = kzalloc(sizeof(*new_pool), GFP_NOFS);
- if (new_pool == NULL)
+ if (!new_pool)
return -ENOMEM;
strncpy(new_pool->pool_name, poolname, LOV_MAXPOOLNAME);
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index f4de8b84c5c2..416e42ed7792 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -275,7 +275,7 @@ int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo,
int rc = 0, i;
set = kzalloc(sizeof(*set), GFP_NOFS);
- if (set == NULL)
+ if (!set)
return -ENOMEM;
lov_init_set(set);
@@ -301,7 +301,7 @@ int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo,
}
req = kzalloc(sizeof(*req), GFP_NOFS);
- if (req == NULL) {
+ if (!req) {
rc = -ENOMEM;
goto out_set;
}
@@ -358,7 +358,7 @@ int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo,
int rc = 0, i;
set = kzalloc(sizeof(*set), GFP_NOFS);
- if (set == NULL)
+ if (!set)
return -ENOMEM;
lov_init_set(set);
@@ -384,7 +384,7 @@ int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo,
}
req = kzalloc(sizeof(*req), GFP_NOFS);
- if (req == NULL) {
+ if (!req) {
rc = -ENOMEM;
goto out_set;
}
@@ -477,7 +477,7 @@ int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
int rc = 0, i;
set = kzalloc(sizeof(*set), GFP_NOFS);
- if (set == NULL)
+ if (!set)
return -ENOMEM;
lov_init_set(set);
@@ -500,7 +500,7 @@ int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
}
req = kzalloc(sizeof(*req), GFP_NOFS);
- if (req == NULL) {
+ if (!req) {
rc = -ENOMEM;
goto out_set;
}
@@ -704,7 +704,7 @@ int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
int rc = 0, i;
set = kzalloc(sizeof(*set), GFP_NOFS);
- if (set == NULL)
+ if (!set)
return -ENOMEM;
lov_init_set(set);
@@ -730,14 +730,14 @@ int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
}
req = kzalloc(sizeof(*req), GFP_NOFS);
- if (req == NULL) {
+ if (!req) {
rc = -ENOMEM;
goto out_set;
}
req->rq_oi.oi_osfs = kzalloc(sizeof(*req->rq_oi.oi_osfs),
GFP_NOFS);
- if (req->rq_oi.oi_osfs == NULL) {
+ if (!req->rq_oi.oi_osfs) {
kfree(req);
rc = -ENOMEM;
goto out_set;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index d3234cb1ea22..1a850ea26849 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -286,7 +286,7 @@ static inline __u64 attr_pack(unsigned int ia_valid)
sa_valid |= MDS_ATTR_KILL_SGID;
if (ia_valid & ATTR_CTIME_SET)
sa_valid |= MDS_ATTR_CTIME_SET;
- if (ia_valid & ATTR_FROM_OPEN)
+ if (ia_valid & ATTR_OPEN)
sa_valid |= MDS_ATTR_FROM_OPEN;
if (ia_valid & ATTR_BLOCKS)
sa_valid |= MDS_ATTR_BLOCKS;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 7f208a6621e6..204d51262560 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -1202,7 +1202,7 @@ static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf)
/* Key is KEY_FID2PATH + getinfo_fid2path description */
keylen = cfs_size_round(sizeof(KEY_FID2PATH)) + sizeof(*gf);
key = kzalloc(keylen, GFP_NOFS);
- if (key == NULL)
+ if (!key)
return -ENOMEM;
memcpy(key, KEY_FID2PATH, sizeof(KEY_FID2PATH));
memcpy(key + cfs_size_round(sizeof(KEY_FID2PATH)), gf, sizeof(*gf));
@@ -1605,7 +1605,7 @@ static int mdc_changelog_send_thread(void *csdata)
cs->cs_fp, cs->cs_startrec);
cs->cs_buf = kzalloc(KUC_CHANGELOG_MSG_MAXSIZE, GFP_NOFS);
- if (cs->cs_buf == NULL) {
+ if (!cs->cs_buf) {
rc = -ENOMEM;
goto out;
}
@@ -1934,7 +1934,7 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
struct obd_quotactl *oqctl;
oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
- if (oqctl == NULL) {
+ if (!oqctl) {
rc = -ENOMEM;
goto out;
}
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 174dfc32876b..019ee2f256aa 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -1128,7 +1128,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
LASSERT(cfg->cfg_sb == cfg->cfg_instance);
inst = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
- if (inst == NULL)
+ if (!inst)
return -ENOMEM;
if (!IS_SERVER(lsi)) {
@@ -1232,7 +1232,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
pos += sprintf(obdname + pos, "-%s%04x",
is_ost ? "OST" : "MDT", entry->mne_index);
- cname = is_ost ? "osc" : "mdc",
+ cname = is_ost ? "osc" : "mdc";
pos += sprintf(obdname + pos, "-%s-%s", cname, inst);
lustre_cfg_bufs_reset(&bufs, obdname);
@@ -1493,7 +1493,7 @@ static int mgc_process_cfg_log(struct obd_device *mgc,
lsi = s2lsi(cld->cld_cfg.cfg_sb);
env = kzalloc(sizeof(*env), GFP_NOFS);
- if (env == NULL)
+ if (!env)
return -ENOMEM;
rc = lu_env_init(env, LCT_MG_THREAD);
diff --git a/drivers/staging/lustre/lustre/obdclass/acl.c b/drivers/staging/lustre/lustre/obdclass/acl.c
index bc3fc4780cb9..933456c502d1 100644
--- a/drivers/staging/lustre/lustre/obdclass/acl.c
+++ b/drivers/staging/lustre/lustre/obdclass/acl.c
@@ -104,11 +104,10 @@ static int lustre_posix_acl_xattr_reduce_space(posix_acl_xattr_header **header,
if (unlikely(old_count <= new_count))
return old_size;
- new = kzalloc(new_size, GFP_NOFS);
+ new = kmemdup(*header, new_size, GFP_NOFS);
if (unlikely(new == NULL))
return -ENOMEM;
- memcpy(new, *header, new_size);
kfree(*header);
*header = new;
return new_size;
@@ -125,11 +124,10 @@ static int lustre_ext_acl_xattr_reduce_space(ext_acl_xattr_header **header,
if (unlikely(old_count <= ext_count))
return 0;
- new = kzalloc(ext_size, GFP_NOFS);
+ new = kmemdup(*header, ext_size, GFP_NOFS);
if (unlikely(new == NULL))
return -ENOMEM;
- memcpy(new, *header, ext_size);
kfree(*header);
*header = new;
return 0;
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index a7f3032f34dd..d5fb81f84cd4 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -51,13 +51,13 @@
static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
int radix);
-# define PASSERT(env, page, expr) \
- do { \
- if (unlikely(!(expr))) { \
- CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
- LASSERT(0); \
- } \
- } while (0)
+# define PASSERT(env, page, expr) \
+ do { \
+ if (unlikely(!(expr))) { \
+ CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
+ LASSERT(0); \
+ } \
+ } while (0)
# define PINVRNT(env, page, exp) \
((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
@@ -169,6 +169,7 @@ int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
idx, CLT_PVEC_SIZE)) > 0) {
int end_of_region = 0;
+
idx = pvec[nr - 1]->cp_index + 1;
for (i = 0, j = 0; i < nr; ++i) {
page = pvec[i];
@@ -286,6 +287,7 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
GFP_NOFS);
if (page != NULL) {
int result = 0;
+
atomic_set(&page->cp_ref, 1);
if (type == CPT_CACHEABLE) /* for radix tree */
atomic_inc(&page->cp_ref);
@@ -352,8 +354,10 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
/* fast path. */
if (type == CPT_CACHEABLE) {
- /* vmpage lock is used to protect the child/parent
- * relationship */
+ /*
+ * vmpage lock is used to protect the child/parent
+ * relationship
+ */
KLASSERT(PageLocked(vmpage));
/*
* cl_vmpage_page() can be called here without any locks as
@@ -372,9 +376,8 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
idx) == page));
}
- if (page != NULL) {
+ if (page != NULL)
return page;
- }
/* allocate and initialize cl_page */
page = cl_page_alloc(env, o, idx, vmpage, type);
@@ -1189,9 +1192,6 @@ int cl_page_prep(const struct lu_env *env, struct cl_io *io,
if (result == 0)
cl_page_io_start(env, pg, crt);
- KLASSERT(ergo(crt == CRT_WRITE && pg->cp_type == CPT_CACHEABLE,
- equi(result == 0,
- PageWriteback(cl_page_vmpage(env, pg)))));
CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
return result;
}
@@ -1425,7 +1425,7 @@ void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
(const struct lu_env *,
- const struct cl_page_slice *,int, int),
+ const struct cl_page_slice *, int, int),
from, to);
}
EXPORT_SYMBOL(cl_page_clip);
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index 1bc37566b3a5..2c705d76211f 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -78,12 +78,8 @@ atomic_t obd_dirty_pages;
EXPORT_SYMBOL(obd_dirty_pages);
unsigned int obd_timeout = OBD_TIMEOUT_DEFAULT; /* seconds */
EXPORT_SYMBOL(obd_timeout);
-unsigned int ldlm_timeout = LDLM_TIMEOUT_DEFAULT; /* seconds */
-EXPORT_SYMBOL(ldlm_timeout);
unsigned int obd_timeout_set;
EXPORT_SYMBOL(obd_timeout_set);
-unsigned int ldlm_timeout_set;
-EXPORT_SYMBOL(ldlm_timeout_set);
/* Adaptive timeout defs here instead of ptlrpc module for /proc/sys/ access */
unsigned int at_min = 0;
EXPORT_SYMBOL(at_min);
@@ -144,11 +140,11 @@ int obd_alloc_fail(const void *ptr, const char *name, const char *type,
CERROR("%s%salloc of %s (%llu bytes) failed at %s:%d\n",
ptr ? "force " :"", type, name, (__u64)size, file,
line);
- CERROR("%llu total bytes and %llu total pages (%llu bytes) allocated by Lustre, %d total bytes by LNET\n",
+ CERROR("%llu total bytes and %llu total pages"
+ " (%llu bytes) allocated by Lustre\n",
obd_memory_sum(),
obd_pages_sum() << PAGE_CACHE_SHIFT,
- obd_pages_sum(),
- atomic_read(&libcfs_kmemory));
+ obd_pages_sum());
return 1;
}
return 0;
@@ -232,7 +228,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
goto out;
}
lcfg = kzalloc(data->ioc_plen1, GFP_NOFS);
- if (lcfg == NULL) {
+ if (!lcfg) {
err = -ENOMEM;
goto out;
}
@@ -571,12 +567,14 @@ static int __init init_obdclass(void)
if (err)
return err;
- obd_sysctl_init();
-
err = class_procfs_init();
if (err)
return err;
+ err = obd_sysctl_init();
+ if (err)
+ return err;
+
err = lu_global_init();
if (err)
return err;
@@ -661,7 +659,6 @@ static void cleanup_obdclass(void)
lu_global_fini();
obd_cleanup_caches();
- obd_sysctl_clean();
class_procfs_clean();
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
index 9c934e6d2ea1..c61add46b426 100644
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
@@ -40,7 +40,7 @@
#define DEBUG_SUBSYSTEM D_OTHER
-#include <linux/unaligned/access_ok.h>
+#include <asm/unaligned.h>
#include "../include/obd_support.h"
#include "../include/lustre_debug.h"
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index 978c3c5c460a..0ca730948f7a 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -45,7 +45,7 @@
spinlock_t obd_types_lock;
-struct kmem_cache *obd_device_cachep;
+static struct kmem_cache *obd_device_cachep;
struct kmem_cache *obdo_cachep;
EXPORT_SYMBOL(obdo_cachep);
static struct kmem_cache *import_cachep;
@@ -71,9 +71,8 @@ static struct obd_device *obd_device_alloc(void)
struct obd_device *obd;
OBD_SLAB_ALLOC_PTR_GFP(obd, obd_device_cachep, GFP_NOFS);
- if (obd != NULL) {
+ if (obd != NULL)
obd->obd_magic = OBD_DEVICE_MAGIC;
- }
return obd;
}
@@ -172,7 +171,7 @@ int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops,
rc = -ENOMEM;
type = kzalloc(sizeof(*type), GFP_NOFS);
- if (type == NULL)
+ if (!type)
return rc;
type->typ_dt_ops = kzalloc(sizeof(*type->typ_dt_ops), GFP_NOFS);
@@ -294,7 +293,7 @@ struct obd_device *class_newdev(const char *type_name, const char *name)
}
type = class_get_type(type_name);
- if (type == NULL){
+ if (type == NULL) {
CERROR("OBD: unknown type: %s\n", type_name);
return ERR_PTR(-ENODEV);
}
@@ -999,7 +998,8 @@ void class_import_put(struct obd_import *imp)
}
EXPORT_SYMBOL(class_import_put);
-static void init_imp_at(struct imp_at *at) {
+static void init_imp_at(struct imp_at *at)
+{
int i;
at_init(&at->iat_net_latency, 0, 0);
for (i = 0; i < IMP_AT_MAX_PORTALS; i++) {
@@ -1016,7 +1016,7 @@ struct obd_import *class_new_import(struct obd_device *obd)
struct obd_import *imp;
imp = kzalloc(sizeof(*imp), GFP_NOFS);
- if (imp == NULL)
+ if (!imp)
return NULL;
INIT_LIST_HEAD(&imp->imp_pinger_chain);
@@ -1642,7 +1642,8 @@ static int obd_zombie_impexp_check(void *arg)
/**
* Add export to the obd_zombie thread and notify it.
*/
-static void obd_zombie_export_add(struct obd_export *exp) {
+static void obd_zombie_export_add(struct obd_export *exp)
+{
spin_lock(&exp->exp_obd->obd_dev_lock);
LASSERT(!list_empty(&exp->exp_obd_chain));
list_del_init(&exp->exp_obd_chain);
@@ -1658,7 +1659,8 @@ static void obd_zombie_export_add(struct obd_export *exp) {
/**
* Add import to the obd_zombie thread and notify it.
*/
-static void obd_zombie_import_add(struct obd_import *imp) {
+static void obd_zombie_import_add(struct obd_import *imp)
+{
LASSERT(imp->imp_sec == NULL);
LASSERT(imp->imp_rq_pool == NULL);
spin_lock(&obd_zombie_impexp_lock);
@@ -1819,7 +1821,7 @@ void *kuc_alloc(int payload_len, int transport, int type)
int len = kuc_len(payload_len);
lh = kzalloc(len, GFP_NOFS);
- if (lh == NULL)
+ if (!lh)
return ERR_PTR(-ENOMEM);
lh->kuc_magic = KUC_MAGIC;
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index 84f75dce0d4c..6218ef34ee80 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -385,7 +385,7 @@ static int obd_device_list_seq_show(struct seq_file *p, void *v)
return 0;
}
-struct seq_operations obd_device_list_sops = {
+static const struct seq_operations obd_device_list_sops = {
.start = obd_device_list_seq_start,
.stop = obd_device_list_seq_stop,
.next = obd_device_list_seq_next,
@@ -406,7 +406,7 @@ static int obd_device_list_open(struct inode *inode, struct file *file)
return 0;
}
-struct file_operations obd_device_list_fops = {
+static const struct file_operations obd_device_list_fops = {
.owner = THIS_MODULE,
.open = obd_device_list_open,
.read = seq_read,
@@ -423,7 +423,7 @@ static struct attribute_group lustre_attr_group = {
int class_procfs_init(void)
{
- int rc = 0;
+ int rc = -ENOMEM;
struct dentry *file;
lustre_kobj = kobject_create_and_add("lustre", fs_kobj);
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
index 54f0a81f7b51..1515163a81a5 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -50,331 +50,119 @@
#include "../../include/obd_support.h"
#include "../../include/lprocfs_status.h"
-#ifdef CONFIG_SYSCTL
-static struct ctl_table_header *obd_table_header;
-#endif
+struct static_lustre_uintvalue_attr {
+ struct {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t len);
+ } u;
+ int *value;
+};
-#ifdef CONFIG_SYSCTL
-static int proc_set_timeout(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+static ssize_t static_uintvalue_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
{
- int rc;
+ struct static_lustre_uintvalue_attr *lattr = (void *)attr;
- rc = proc_dointvec(table, write, buffer, lenp, ppos);
- if (ldlm_timeout >= obd_timeout)
- ldlm_timeout = max(obd_timeout / 3, 1U);
- return rc;
+ return sprintf(buf, "%d\n", *lattr->value);
}
-static int proc_memory_alloc(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+static ssize_t static_uintvalue_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer, size_t count)
{
- char buf[22];
- int len;
-
- if (!*lenp || (*ppos && !write)) {
- *lenp = 0;
- return 0;
- }
- if (write)
- return -EINVAL;
+ struct static_lustre_uintvalue_attr *lattr = (void *)attr;
+ int rc;
+ unsigned int val;
- len = snprintf(buf, sizeof(buf), "%llu\n", obd_memory_sum());
- if (len > *lenp)
- len = *lenp;
- buf[len] = '\0';
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
- *lenp = len;
- *ppos += *lenp;
- return 0;
-}
+ rc = kstrtouint(buffer, 10, &val);
+ if (rc)
+ return rc;
-static int proc_pages_alloc(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- char buf[22];
- int len;
+ *lattr->value = val;
- if (!*lenp || (*ppos && !write)) {
- *lenp = 0;
- return 0;
- }
- if (write)
- return -EINVAL;
-
- len = snprintf(buf, sizeof(buf), "%llu\n", obd_pages_sum());
- if (len > *lenp)
- len = *lenp;
- buf[len] = '\0';
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
- *lenp = len;
- *ppos += *lenp;
- return 0;
+ return count;
}
-static int proc_mem_max(struct ctl_table *table, int write, void __user *buffer,
- size_t *lenp, loff_t *ppos)
-{
- char buf[22];
- int len;
+#define LUSTRE_STATIC_UINT_ATTR(name, value) \
+static struct static_lustre_uintvalue_attr lustre_sattr_##name = \
+ {__ATTR(name, 0644, \
+ static_uintvalue_show, \
+ static_uintvalue_store),\
+ value }
- if (!*lenp || (*ppos && !write)) {
- *lenp = 0;
- return 0;
- }
- if (write)
- return -EINVAL;
+LUSTRE_STATIC_UINT_ATTR(timeout, &obd_timeout);
- len = snprintf(buf, sizeof(buf), "%llu\n", obd_memory_max());
- if (len > *lenp)
- len = *lenp;
- buf[len] = '\0';
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
- *lenp = len;
- *ppos += *lenp;
- return 0;
+static ssize_t max_dirty_mb_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%ul\n",
+ obd_max_dirty_pages / (1 << (20 - PAGE_CACHE_SHIFT)));
}
-static int proc_pages_max(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
{
- char buf[22];
- int len;
+ int rc;
+ unsigned long val;
- if (!*lenp || (*ppos && !write)) {
- *lenp = 0;
- return 0;
- }
- if (write)
- return -EINVAL;
+ rc = kstrtoul(buffer, 10, &val);
+ if (rc)
+ return rc;
- len = snprintf(buf, sizeof(buf), "%llu\n", obd_pages_max());
- if (len > *lenp)
- len = *lenp;
- buf[len] = '\0';
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
- *lenp = len;
- *ppos += *lenp;
- return 0;
-}
+ val *= 1 << (20 - PAGE_CACHE_SHIFT); /* convert to pages */
-static int proc_max_dirty_pages_in_mb(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- int rc = 0;
-
- if (!table->data || !table->maxlen || !*lenp || (*ppos && !write)) {
- *lenp = 0;
- return 0;
+ if (val > ((totalram_pages / 10) * 9)) {
+ /* Somebody wants to assign too much memory to dirty pages */
+ return -EINVAL;
}
- if (write) {
- rc = lprocfs_write_frac_helper(buffer, *lenp,
- (unsigned int *)table->data,
- 1 << (20 - PAGE_CACHE_SHIFT));
- /* Don't allow them to let dirty pages exceed 90% of system
- * memory and set a hard minimum of 4MB. */
- if (obd_max_dirty_pages > ((totalram_pages / 10) * 9)) {
- CERROR("Refusing to set max dirty pages to %u, which is more than 90%% of available RAM; setting to %lu\n",
- obd_max_dirty_pages,
- ((totalram_pages / 10) * 9));
- obd_max_dirty_pages = (totalram_pages / 10) * 9;
- } else if (obd_max_dirty_pages < 4 << (20 - PAGE_CACHE_SHIFT)) {
- obd_max_dirty_pages = 4 << (20 - PAGE_CACHE_SHIFT);
- }
- } else {
- char buf[21];
- int len;
- len = lprocfs_read_frac_helper(buf, sizeof(buf),
- *(unsigned int *)table->data,
- 1 << (20 - PAGE_CACHE_SHIFT));
- if (len > *lenp)
- len = *lenp;
- buf[len] = '\0';
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
- *lenp = len;
+ if (val < 4 << (20 - PAGE_CACHE_SHIFT)) {
+ /* Less than 4 Mb for dirty cache is also bad */
+ return -EINVAL;
}
- *ppos += *lenp;
- return rc;
-}
-static int proc_alloc_fail_rate(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- int rc = 0;
+ obd_max_dirty_pages = val;
- if (!table->data || !table->maxlen || !*lenp || (*ppos && !write)) {
- *lenp = 0;
- return 0;
- }
- if (write) {
- rc = lprocfs_write_frac_helper(buffer, *lenp,
- (unsigned int *)table->data,
- OBD_ALLOC_FAIL_MULT);
- } else {
- char buf[21];
- int len;
-
- len = lprocfs_read_frac_helper(buf, 21,
- *(unsigned int *)table->data,
- OBD_ALLOC_FAIL_MULT);
- if (len > *lenp)
- len = *lenp;
- buf[len] = '\0';
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
- *lenp = len;
- }
- *ppos += *lenp;
- return rc;
+ return count;
}
-
-static struct ctl_table obd_table[] = {
- {
- .procname = "timeout",
- .data = &obd_timeout,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_set_timeout
- },
- {
- .procname = "debug_peer_on_timeout",
- .data = &obd_debug_peer_on_timeout,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .procname = "dump_on_timeout",
- .data = &obd_dump_on_timeout,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .procname = "dump_on_eviction",
- .data = &obd_dump_on_eviction,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .procname = "memused",
- .data = NULL,
- .maxlen = 0,
- .mode = 0444,
- .proc_handler = &proc_memory_alloc
- },
- {
- .procname = "pagesused",
- .data = NULL,
- .maxlen = 0,
- .mode = 0444,
- .proc_handler = &proc_pages_alloc
- },
- {
- .procname = "memused_max",
- .data = NULL,
- .maxlen = 0,
- .mode = 0444,
- .proc_handler = &proc_mem_max
- },
- {
- .procname = "pagesused_max",
- .data = NULL,
- .maxlen = 0,
- .mode = 0444,
- .proc_handler = &proc_pages_max
- },
- {
- .procname = "ldlm_timeout",
- .data = &ldlm_timeout,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_set_timeout
- },
- {
- .procname = "alloc_fail_rate",
- .data = &obd_alloc_fail_rate,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_alloc_fail_rate
- },
- {
- .procname = "max_dirty_mb",
- .data = &obd_max_dirty_pages,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_max_dirty_pages_in_mb
- },
- {
- .procname = "at_min",
- .data = &at_min,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
- {
- .procname = "at_max",
- .data = &at_max,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
- {
- .procname = "at_extra",
- .data = &at_extra,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
- {
- .procname = "at_early_margin",
- .data = &at_early_margin,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
- {
- .procname = "at_history",
- .data = &at_history,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
- {}
+LUSTRE_RW_ATTR(max_dirty_mb);
+
+LUSTRE_STATIC_UINT_ATTR(debug_peer_on_timeout, &obd_debug_peer_on_timeout);
+LUSTRE_STATIC_UINT_ATTR(dump_on_timeout, &obd_dump_on_timeout);
+LUSTRE_STATIC_UINT_ATTR(dump_on_eviction, &obd_dump_on_eviction);
+LUSTRE_STATIC_UINT_ATTR(at_min, &at_min);
+LUSTRE_STATIC_UINT_ATTR(at_max, &at_max);
+LUSTRE_STATIC_UINT_ATTR(at_extra, &at_extra);
+LUSTRE_STATIC_UINT_ATTR(at_early_margin, &at_early_margin);
+LUSTRE_STATIC_UINT_ATTR(at_history, &at_history);
+
+static struct attribute *lustre_attrs[] = {
+ &lustre_sattr_timeout.u.attr,
+ &lustre_attr_max_dirty_mb.attr,
+ &lustre_sattr_debug_peer_on_timeout.u.attr,
+ &lustre_sattr_dump_on_timeout.u.attr,
+ &lustre_sattr_dump_on_eviction.u.attr,
+ &lustre_sattr_at_min.u.attr,
+ &lustre_sattr_at_max.u.attr,
+ &lustre_sattr_at_extra.u.attr,
+ &lustre_sattr_at_early_margin.u.attr,
+ &lustre_sattr_at_history.u.attr,
+ NULL,
};
-static struct ctl_table parent_table[] = {
- {
- .procname = "lustre",
- .data = NULL,
- .maxlen = 0,
- .mode = 0555,
- .child = obd_table
- },
- {}
+static struct attribute_group lustre_attr_group = {
+ .attrs = lustre_attrs,
};
-#endif
-void obd_sysctl_init(void)
+int obd_sysctl_init(void)
{
-#ifdef CONFIG_SYSCTL
- if (!obd_table_header)
- obd_table_header = register_sysctl_table(parent_table);
-#endif
+ return sysfs_create_group(lustre_kobj, &lustre_attr_group);
}
void obd_sysctl_clean(void)
{
-#ifdef CONFIG_SYSCTL
- if (obd_table_header)
- unregister_sysctl_table(obd_table_header);
- obd_table_header = NULL;
-#endif
}
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index 4fa52d1b79d1..facc8351fcea 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -61,7 +61,7 @@ static struct llog_handle *llog_alloc_handle(void)
struct llog_handle *loghandle;
loghandle = kzalloc(sizeof(*loghandle), GFP_NOFS);
- if (loghandle == NULL)
+ if (!loghandle)
return NULL;
init_rwsem(&loghandle->lgh_lock);
@@ -208,7 +208,7 @@ int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
LASSERT(handle->lgh_hdr == NULL);
llh = kzalloc(sizeof(*llh), GFP_NOFS);
- if (llh == NULL)
+ if (!llh)
return -ENOMEM;
handle->lgh_hdr = llh;
/* first assign flags to use llog_client_ops */
@@ -435,7 +435,7 @@ int llog_process_or_fork(const struct lu_env *env,
int rc;
lpi = kzalloc(sizeof(*lpi), GFP_NOFS);
- if (lpi == NULL) {
+ if (!lpi) {
CERROR("cannot alloc pointer\n");
return -ENOMEM;
}
@@ -907,7 +907,7 @@ int llog_is_empty(const struct lu_env *env, struct llog_ctxt *ctxt,
char *name)
{
struct llog_handle *llh;
- int rc = 0;
+ int rc;
rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS);
if (rc < 0) {
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index 17e7c1807863..08d1f0edf98d 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -275,7 +275,7 @@ struct dentry *ldebugfs_add_symlink(const char *name, struct dentry *parent,
return NULL;
dest = kzalloc(MAX_STRING_SIZE + 1, GFP_KERNEL);
- if (dest == NULL)
+ if (!dest)
return NULL;
va_start(ap, format);
@@ -329,7 +329,7 @@ EXPORT_SYMBOL(ldebugfs_add_vars);
void ldebugfs_remove(struct dentry **entryp)
{
- debugfs_remove(*entryp);
+ debugfs_remove_recursive(*entryp);
*entryp = NULL;
}
EXPORT_SYMBOL(ldebugfs_remove);
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 4d9b6333eeae..8e472327c880 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -602,7 +602,7 @@ static struct lu_object *lu_object_new(const struct lu_env *env,
struct lu_site_bkt_data *bkt;
o = lu_object_alloc(env, dev, f, conf);
- if (unlikely(IS_ERR(o)))
+ if (IS_ERR(o))
return o;
hs = dev->ld_site->ls_obj_hash;
@@ -666,7 +666,7 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
* operations, including fld queries, inode loading, etc.
*/
o = lu_object_alloc(env, dev, f, conf);
- if (unlikely(IS_ERR(o)))
+ if (IS_ERR(o))
return o;
LASSERT(lu_fid_eq(lu_object_fid(o), f));
@@ -674,7 +674,7 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
cfs_hash_bd_lock(hs, &bd, 1);
shadow = htable_lookup(s, &bd, f, waiter, &version);
- if (likely(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT)) {
+ if (likely(PTR_ERR(shadow) == -ENOENT)) {
struct lu_site_bkt_data *bkt;
bkt = cfs_hash_bd_extra_get(hs, &bd);
@@ -1558,7 +1558,7 @@ static int keys_fill(struct lu_context *ctx)
LINVRNT(key->lct_index == i);
value = key->lct_init(ctx, key);
- if (unlikely(IS_ERR(value)))
+ if (IS_ERR(value))
return PTR_ERR(value);
if (!(ctx->lc_tags & LCT_NOREF))
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
index 5cc6435cc47a..d6184f821cd0 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
@@ -105,7 +105,7 @@ int class_add_uuid(const char *uuid, __u64 nid)
return -EOVERFLOW;
data = kzalloc(sizeof(*data), GFP_NOFS);
- if (data == NULL)
+ if (!data)
return -ENOMEM;
obd_str2uuid(&data->un_uuid, uuid);
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index fbdb748a36b9..93805ac93c5a 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -835,7 +835,7 @@ int class_add_profile(int proflen, char *prof, int osclen, char *osc,
CDEBUG(D_CONFIG, "Add profile %s\n", prof);
lprof = kzalloc(sizeof(*lprof), GFP_NOFS);
- if (lprof == NULL)
+ if (!lprof)
return -ENOMEM;
INIT_LIST_HEAD(&lprof->lp_list);
@@ -979,7 +979,7 @@ struct lustre_cfg *lustre_cfg_rename(struct lustre_cfg *cfg,
new_len = LUSTRE_CFG_BUFLEN(cfg, 1) + strlen(new_name) - name_len;
new_param = kzalloc(new_len, GFP_NOFS);
- if (new_param == NULL)
+ if (!new_param)
return ERR_PTR(-ENOMEM);
strcpy(new_param, new_name);
@@ -987,7 +987,7 @@ struct lustre_cfg *lustre_cfg_rename(struct lustre_cfg *cfg,
strcat(new_param, value);
bufs = kzalloc(sizeof(*bufs), GFP_NOFS);
- if (bufs == NULL) {
+ if (!bufs) {
kfree(new_param);
return ERR_PTR(-ENOMEM);
}
@@ -1123,12 +1123,7 @@ int class_process_config(struct lustre_cfg *lcfg)
goto out;
}
case LCFG_SET_LDLM_TIMEOUT: {
- CDEBUG(D_IOCTL, "changing lustre ldlm_timeout from %d to %d\n",
- ldlm_timeout, lcfg->lcfg_num);
- ldlm_timeout = max(lcfg->lcfg_num, 1U);
- if (ldlm_timeout >= obd_timeout)
- ldlm_timeout = max(obd_timeout / 3, 1U);
- ldlm_timeout_set = 1;
+ /* ldlm_timeout is not used on the client */
err = 0;
goto out;
}
@@ -1461,7 +1456,7 @@ int class_config_llog_handler(const struct lu_env *env,
inst_len = LUSTRE_CFG_BUFLEN(lcfg, 0) +
sizeof(clli->cfg_instance) * 2 + 4;
inst_name = kzalloc(inst_len, GFP_NOFS);
- if (inst_name == NULL) {
+ if (!inst_name) {
rc = -ENOMEM;
goto out;
}
@@ -1639,7 +1634,7 @@ int class_config_dump_handler(const struct lu_env *env,
int rc = 0;
outstr = kzalloc(256, GFP_NOFS);
- if (outstr == NULL)
+ if (!outstr)
return -ENOMEM;
if (rec->lrh_type == OBD_CFG_REC) {
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index ce4a71f7171a..7c5bab377f5c 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -85,7 +85,7 @@ int lustre_process_log(struct super_block *sb, char *logname,
LASSERT(cfg);
bufs = kzalloc(sizeof(*bufs), GFP_NOFS);
- if (bufs == NULL)
+ if (!bufs)
return -ENOMEM;
/* mgc_process_config */
@@ -247,18 +247,18 @@ int lustre_start_mgc(struct super_block *sb)
mutex_lock(&mgc_start_lock);
len = strlen(LUSTRE_MGC_OBDNAME) + strlen(libcfs_nid2str(nid)) + 1;
- mgcname = kzalloc(len, GFP_NOFS);
- niduuid = kzalloc(len + 2, GFP_NOFS);
+ mgcname = kasprintf(GFP_NOFS,
+ "%s%s", LUSTRE_MGC_OBDNAME, libcfs_nid2str(nid));
+ niduuid = kasprintf(GFP_NOFS, "%s_%x", mgcname, i);
if (!mgcname || !niduuid) {
rc = -ENOMEM;
goto out_free;
}
- sprintf(mgcname, "%s%s", LUSTRE_MGC_OBDNAME, libcfs_nid2str(nid));
mgssec = lsi->lsi_lmd->lmd_mgssec ? lsi->lsi_lmd->lmd_mgssec : "";
data = kzalloc(sizeof(*data), GFP_NOFS);
- if (data == NULL) {
+ if (!data) {
rc = -ENOMEM;
goto out_free;
}
@@ -326,7 +326,6 @@ int lustre_start_mgc(struct super_block *sb)
/* Add the primary nids for the MGS */
i = 0;
- sprintf(niduuid, "%s_%x", mgcname, i);
if (IS_SERVER(lsi)) {
ptr = lsi->lsi_lmd->lmd_mgs;
if (IS_MGS(lsi)) {
@@ -885,7 +884,7 @@ static int lmd_parse_mgssec(struct lustre_mount_data *lmd, char *ptr)
length = tail - ptr;
lmd->lmd_mgssec = kzalloc(length + 1, GFP_NOFS);
- if (lmd->lmd_mgssec == NULL)
+ if (!lmd->lmd_mgssec)
return -ENOMEM;
memcpy(lmd->lmd_mgssec, ptr, length);
@@ -911,7 +910,7 @@ static int lmd_parse_string(char **handle, char *ptr)
length = tail - ptr;
*handle = kzalloc(length + 1, GFP_NOFS);
- if (*handle == NULL)
+ if (!*handle)
return -ENOMEM;
memcpy(*handle, ptr, length);
@@ -941,7 +940,7 @@ static int lmd_parse_mgs(struct lustre_mount_data *lmd, char **ptr)
oldlen = strlen(lmd->lmd_mgs) + 1;
mgsnid = kzalloc(oldlen + length + 1, GFP_NOFS);
- if (mgsnid == NULL)
+ if (!mgsnid)
return -ENOMEM;
if (lmd->lmd_mgs != NULL) {
@@ -983,7 +982,7 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
lmd->lmd_magic = LMD_MAGIC;
lmd->lmd_params = kzalloc(4096, GFP_NOFS);
- if (lmd->lmd_params == NULL)
+ if (!lmd->lmd_params)
return -ENOMEM;
lmd->lmd_params[0] = '\0';
@@ -1120,10 +1119,9 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
/* Remove leading /s from fsname */
while (*++s1 == '/') ;
/* Freed in lustre_free_lsi */
- lmd->lmd_profile = kzalloc(strlen(s1) + 8, GFP_NOFS);
+ lmd->lmd_profile = kasprintf(GFP_NOFS, "%s-client", s1);
if (!lmd->lmd_profile)
return -ENOMEM;
- sprintf(lmd->lmd_profile, "%s-client", s1);
}
/* Freed in lustre_free_lsi */
@@ -1281,7 +1279,7 @@ struct file_system_type lustre_fs_type = {
.mount = lustre_mount,
.kill_sb = lustre_kill_super,
.fs_flags = FS_BINARY_MOUNTDATA | FS_REQUIRES_DEV |
- FS_HAS_FIEMAP | FS_RENAME_DOES_D_MOVE,
+ FS_RENAME_DOES_D_MOVE,
};
MODULE_ALIAS_FS("lustre");
diff --git a/drivers/staging/lustre/lustre/obdclass/uuid.c b/drivers/staging/lustre/lustre/obdclass/uuid.c
index ff0a01bcf8da..b0b0157a6334 100644
--- a/drivers/staging/lustre/lustre/obdclass/uuid.c
+++ b/drivers/staging/lustre/lustre/obdclass/uuid.c
@@ -43,40 +43,8 @@
#include "../include/obd_support.h"
#include "../include/obd_class.h"
-
-static inline __u32 consume(int nob, __u8 **ptr)
-{
- __u32 value;
-
- LASSERT(nob <= sizeof(value));
-
- for (value = 0; nob > 0; --nob)
- value = (value << 8) | *((*ptr)++);
- return value;
-}
-
-#define CONSUME(val, ptr) (val) = consume(sizeof(val), (ptr))
-
-static void uuid_unpack(class_uuid_t in, __u16 *uu, int nr)
-{
- __u8 *ptr = in;
-
- LASSERT(nr * sizeof(*uu) == sizeof(class_uuid_t));
-
- while (nr-- > 0)
- CONSUME(uu[nr], &ptr);
-}
-
void class_uuid_unparse(class_uuid_t uu, struct obd_uuid *out)
{
- /* uu as an array of __u16's */
- __u16 uuid[sizeof(class_uuid_t) / sizeof(__u16)];
-
- CLASSERT(ARRAY_SIZE(uuid) == 8);
-
- uuid_unpack(uu, uuid, ARRAY_SIZE(uuid));
- sprintf(out->uuid, "%04x%04x-%04x-%04x-%04x-%04x%04x%04x",
- uuid[0], uuid[1], uuid[2], uuid[3],
- uuid[4], uuid[5], uuid[6], uuid[7]);
+ sprintf(out->uuid, "%pU", uu);
}
EXPORT_SYMBOL(class_uuid_unparse);
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 0222fd2e4757..27bd170c3a28 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -480,11 +480,11 @@ static int echo_alloc_memmd(struct echo_device *ed,
LASSERT(*lsmp == NULL);
*lsmp = kzalloc(lsm_size, GFP_NOFS);
- if (*lsmp == NULL)
+ if (!*lsmp)
return -ENOMEM;
(*lsmp)->lsm_oinfo[0] = kzalloc(sizeof(struct lov_oinfo), GFP_NOFS);
- if ((*lsmp)->lsm_oinfo[0] == NULL) {
+ if (!(*lsmp)->lsm_oinfo[0]) {
kfree(*lsmp);
return -ENOMEM;
}
@@ -701,7 +701,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
int cleanup = 0;
ed = kzalloc(sizeof(*ed), GFP_NOFS);
- if (ed == NULL) {
+ if (!ed) {
rc = -ENOMEM;
goto out;
}
@@ -1878,7 +1878,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
return rc;
env = kzalloc(sizeof(*env), GFP_NOFS);
- if (env == NULL)
+ if (!env)
return -ENOMEM;
rc = lu_env_init(env, LCT_DT_THREAD);
@@ -2049,7 +2049,7 @@ static int echo_client_setup(const struct lu_env *env,
ec->ec_nstripes = 0;
ocd = kzalloc(sizeof(*ocd), GFP_NOFS);
- if (ocd == NULL) {
+ if (!ocd) {
CERROR("Can't alloc ocd connecting to %s\n",
lustre_cfg_string(lcfg, 1));
return -ENOMEM;
@@ -2139,7 +2139,7 @@ static struct obd_ops echo_client_obd_ops = {
.o_disconnect = echo_client_disconnect
};
-int echo_client_init(void)
+static int echo_client_init(void)
{
int rc;
@@ -2154,7 +2154,7 @@ int echo_client_init(void)
return rc;
}
-void echo_client_exit(void)
+static void echo_client_exit(void)
{
class_unregister_type(LUSTRE_ECHO_CLIENT_NAME);
lu_kmem_fini(echo_caches);
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 5592d32a1a95..c72035e048aa 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -1837,12 +1837,6 @@ static int try_to_add_extent_for_io(struct client_obd *cli,
oap2 = list_first_entry(&tmp->oe_pages, struct osc_async_page,
oap_pending_item);
EASSERT(tmp->oe_owner == current, tmp);
-#if 0
- if (overlapped(tmp, ext)) {
- OSC_EXTENT_DUMP(D_ERROR, tmp, "overlapped %p.\n", ext);
- EASSERT(0, ext);
- }
-#endif
if (oap2cl_page(oap)->cp_type != oap2cl_page(oap2)->cp_type) {
CDEBUG(D_CACHE, "Do not permit different type of IO"
" for a same RPC\n");
diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c
index 9222c9f4faae..91fdec44792b 100644
--- a/drivers/staging/lustre/lustre/osc/osc_dev.c
+++ b/drivers/staging/lustre/lustre/osc/osc_dev.c
@@ -218,7 +218,7 @@ static struct lu_device *osc_device_alloc(const struct lu_env *env,
int rc;
od = kzalloc(sizeof(*od), GFP_NOFS);
- if (od == NULL)
+ if (!od)
return ERR_PTR(-ENOMEM);
cl_device_init(&od->od_cl, t);
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index 43dfa73dd3a6..f9cf5cea643d 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -471,7 +471,7 @@ static int osc_page_flush(const struct lu_env *env,
struct cl_io *io)
{
struct osc_page *opg = cl2osc_page(slice);
- int rc = 0;
+ int rc;
rc = osc_flush_async_page(env, io, opg);
return rc;
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index f84b4c78a8a0..12113dfd87b8 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -119,7 +119,7 @@ static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
if (*lmmp == NULL) {
*lmmp = kzalloc(lmm_size, GFP_NOFS);
- if (*lmmp == NULL)
+ if (!*lmmp)
return -ENOMEM;
}
@@ -1909,7 +1909,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
mpflag = cfs_memory_pressure_get_and_set();
crattr = kzalloc(sizeof(*crattr), GFP_NOFS);
- if (crattr == NULL) {
+ if (!crattr) {
rc = -ENOMEM;
goto out;
}
@@ -2665,7 +2665,7 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
buf = NULL;
len = 0;
- if (obd_ioctl_getdata(&buf, &len, (void *)uarg)) {
+ if (obd_ioctl_getdata(&buf, &len, uarg)) {
err = -EINVAL;
goto out;
}
@@ -2695,7 +2695,7 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
- err = copy_to_user((void *)uarg, buf, len);
+ err = copy_to_user(uarg, buf, len);
if (err)
err = -EFAULT;
obd_ioctl_freedata(buf, len);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index a12cd66b2365..c83a34a01e65 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -971,7 +971,7 @@ int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
struct ptlrpc_set_cbdata *cbdata;
cbdata = kzalloc(sizeof(*cbdata), GFP_NOFS);
- if (cbdata == NULL)
+ if (!cbdata)
return -ENOMEM;
cbdata->psc_interpret = fn;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index 8cb1929fd31d..c8ef9e578263 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -485,7 +485,7 @@ int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
return rc;
}
-void ptlrpc_ni_fini(void)
+static void ptlrpc_ni_fini(void)
{
wait_queue_head_t waitq;
struct l_wait_info lwi;
@@ -529,7 +529,7 @@ lnet_pid_t ptl_get_pid(void)
return pid;
}
-int ptlrpc_ni_init(void)
+static int ptlrpc_ni_init(void)
{
int rc;
lnet_pid_t pid;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index c9b8481dd384..1eae3896c037 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -555,14 +555,12 @@ static int import_select_connection(struct obd_import *imp)
imp_conn->oic_last_attempt = cfs_time_current_64();
/* switch connection, don't mind if it's same as the current one */
- if (imp->imp_connection)
- ptlrpc_connection_put(imp->imp_connection);
+ ptlrpc_connection_put(imp->imp_connection);
imp->imp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
dlmexp = class_conn2export(&imp->imp_dlm_handle);
LASSERT(dlmexp != NULL);
- if (dlmexp->exp_connection)
- ptlrpc_connection_put(dlmexp->exp_connection);
+ ptlrpc_connection_put(dlmexp->exp_connection);
dlmexp->exp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
class_export_put(dlmexp);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index aaaabbf5f1b9..53f9af1f2f3e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -652,7 +652,7 @@ static ssize_t ptlrpc_lprocfs_nrs_seq_write(struct file *file,
return -EINVAL;
cmd = kzalloc(LPROCFS_NRS_WR_MAX_CMD, GFP_NOFS);
- if (cmd == NULL)
+ if (!cmd)
return -ENOMEM;
/**
* strsep() modifies its argument, so keep a copy
@@ -819,7 +819,7 @@ ptlrpc_lprocfs_svc_req_history_start(struct seq_file *s, loff_t *pos)
}
srhi = kzalloc(sizeof(*srhi), GFP_NOFS);
- if (srhi == NULL)
+ if (!srhi)
return NULL;
srhi->srhi_seq = 0;
@@ -1219,7 +1219,7 @@ int lprocfs_wr_evict_client(struct file *file, const char __user *buffer,
char *tmpbuf;
kbuf = kzalloc(BUFLEN, GFP_NOFS);
- if (kbuf == NULL)
+ if (!kbuf)
return -ENOMEM;
/*
@@ -1303,7 +1303,7 @@ int lprocfs_wr_import(struct file *file, const char __user *buffer,
return -EINVAL;
kbuf = kzalloc(count + 1, GFP_NOFS);
- if (kbuf == NULL)
+ if (!kbuf)
return -ENOMEM;
if (copy_from_user(kbuf, buffer, count)) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index 9516acadb7a1..d37cdd5ac580 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -1156,7 +1156,7 @@ int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf)
}
desc = kzalloc(sizeof(*desc), GFP_NOFS);
- if (desc == NULL) {
+ if (!desc) {
rc = -ENOMEM;
goto fail;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index 2787bfd67165..84937ad90570 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -52,6 +52,8 @@
#include "../include/obd_cksum.h"
#include "../include/lustre/ll_fiemap.h"
+#include "ptlrpc_internal.h"
+
static inline int lustre_msg_hdr_size_v2(int count)
{
return cfs_size_round(offsetof(struct lustre_msg_v2,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
index d05c37c1fd30..f8edb791a998 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -318,8 +318,6 @@ int ptlrpc_start_pinger(void)
strcpy(pinger_thread.t_name, "ll_ping");
- /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
- * just drop the VM and FILES in cfs_daemonize_ctxt() right away. */
rc = PTR_ERR(kthread_run(ptlrpc_pinger_main, &pinger_thread,
"%s", pinger_thread.t_name));
if (IS_ERR_VALUE(rc)) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
index 5268887ca6b3..ae99180d6036 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
@@ -51,7 +51,7 @@ extern spinlock_t ptlrpc_rs_debug_lock;
extern struct mutex pinger_mutex;
extern struct mutex ptlrpcd_mutex;
-__init int ptlrpc_init(void)
+static int __init ptlrpc_init(void)
{
int rc, cleanup_phase = 0;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index e591cff323ec..17cc81d5074f 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -739,7 +739,7 @@ static int ptlrpcd_init(void)
size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
ptlrpcds = kzalloc(size, GFP_NOFS);
- if (ptlrpcds == NULL) {
+ if (!ptlrpcds) {
rc = -ENOMEM;
goto out;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 69d73c430696..2ee3e8b2e879 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -415,7 +415,7 @@ static int enc_pools_add_pages(int npages)
for (i = 0; i < npools; i++) {
pools[i] = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
- if (pools[i] == NULL)
+ if (!pools[i])
goto out_pools;
for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
index 31da43e8b3c6..e7f2f333257d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
@@ -564,7 +564,7 @@ struct sptlrpc_conf *sptlrpc_conf_get(const char *fsname,
return NULL;
conf = kzalloc(sizeof(*conf), GFP_NOFS);
- if (conf == NULL)
+ if (!conf)
return NULL;
strcpy(conf->sc_fsname, fsname);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
index 53ce0d14bd46..a243db60f697 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -444,7 +444,7 @@ struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
plsec = kzalloc(sizeof(*plsec), GFP_NOFS);
- if (plsec == NULL)
+ if (!plsec)
return NULL;
/*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 9117f1c15a8e..003344ccfffc 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -43,7 +43,7 @@
#include "ptlrpc_internal.h"
/* The following are visible and mutable through /sys/module/ptlrpc */
-int test_req_buffer_pressure = 0;
+int test_req_buffer_pressure;
module_param(test_req_buffer_pressure, int, 0444);
MODULE_PARM_DESC(test_req_buffer_pressure, "set non-zero to put pressure on request buffer pools");
module_param(at_min, int, 0644);
@@ -69,7 +69,7 @@ LIST_HEAD(ptlrpc_all_services);
/** Used to protect the \e ptlrpc_all_services list */
struct mutex ptlrpc_all_services_mutex;
-struct ptlrpc_request_buffer_desc *
+static struct ptlrpc_request_buffer_desc *
ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt)
{
struct ptlrpc_service *svc = svcpt->scp_service;
@@ -101,7 +101,7 @@ ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt)
return rqbd;
}
-void
+static void
ptlrpc_free_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
{
struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
@@ -118,7 +118,7 @@ ptlrpc_free_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
kfree(rqbd);
}
-int
+static int
ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post)
{
struct ptlrpc_service *svc = svcpt->scp_service;
@@ -732,7 +732,7 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf,
service = kzalloc(offsetof(struct ptlrpc_service, srv_parts[ncpts]),
GFP_NOFS);
- if (service == NULL) {
+ if (!service) {
kfree(cpts);
return ERR_PTR(-ENOMEM);
}
@@ -2298,7 +2298,7 @@ static int ptlrpc_main(void *arg)
}
env = kzalloc(sizeof(*env), GFP_NOFS);
- if (env == NULL) {
+ if (!env) {
rc = -ENOMEM;
goto out_srv_fini;
}
@@ -2826,9 +2826,7 @@ void ptlrpc_hr_fini(void)
ptlrpc_stop_hr_threads();
cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
- if (hrp->hrp_thrs != NULL) {
- kfree(hrp->hrp_thrs);
- }
+ kfree(hrp->hrp_thrs);
}
cfs_percpt_free(ptlrpc_hr.hr_partitions);
@@ -3054,7 +3052,7 @@ EXPORT_SYMBOL(ptlrpc_unregister_service);
* Right now, it just checks to make sure that requests aren't languishing
* in the queue. We'll use this health check to govern whether a node needs
* to be shot, so it's intentionally non-aggressive. */
-int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
+static int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
{
struct ptlrpc_request *request = NULL;
struct timeval right_now;
diff --git a/drivers/staging/lustre/sysfs-fs-lustre b/drivers/staging/lustre/sysfs-fs-lustre
index 1e302e8516ce..873e2cf31217 100644
--- a/drivers/staging/lustre/sysfs-fs-lustre
+++ b/drivers/staging/lustre/sysfs-fs-lustre
@@ -40,6 +40,109 @@ Description:
e.g. dd.1253
nodelocal - use jobid_name value from above.
+What: /sys/fs/lustre/timeout
+Date: June 2015
+Contact: "Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+ Controls "lustre timeout" variable, also known as obd_timeout
+ in some old manual. In the past obd_timeout was of paramount
+ importance as the timeout value used everywhere and where
+ other timeouts were derived from. These days it's much less
+ important as network timeouts are mostly determined by
+ AT (adaptive timeouts).
+ Unit: seconds, default: 100
+
+What: /sys/fs/lustre/max_dirty_mb
+Date: June 2015
+Contact: "Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+ Controls total number of dirty cache (in megabytes) allowed
+ across all mounted lustre filesystems.
+ Since writeout of dirty pages in Lustre is somewhat expensive,
+ when you allow to many dirty pages, this might lead to
+ performance degradations as kernel tries to desperately
+ find some pages to free/writeout.
+ Default 1/2 RAM. Min value 4, max value 9/10 of RAM.
+
+What: /sys/fs/lustre/debug_peer_on_timeout
+Date: June 2015
+Contact: "Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+ Control if lnet debug information should be printed when
+ an RPC timeout occurs.
+ 0 disabled (default)
+ 1 enabled
+
+What: /sys/fs/lustre/dump_on_timeout
+Date: June 2015
+Contact: "Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+ Controls if Lustre debug log should be dumped when an RPC
+ timeout occurs. This is useful if yout debug buffer typically
+ rolls over by the time you notice RPC timeouts.
+
+What: /sys/fs/lustre/dump_on_eviction
+Date: June 2015
+Contact: "Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+ Controls if Lustre debug log should be dumped when an this
+ client is evicted from one of the servers.
+ This is useful if yout debug buffer typically rolls over
+ by the time you notice the eviction event.
+
+What: /sys/fs/lustre/at_min
+Date: July 2015
+Contact: "Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+ Controls minimum adaptive timeout in seconds. If you encounter
+ a case where clients timeout due to server-reported processing
+ time being too short, you might consider increasing this value.
+ One common case of this if the underlying network has
+ unpredictable long delays.
+ Default: 0
+
+What: /sys/fs/lustre/at_max
+Date: July 2015
+Contact: "Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+ Controls maximum adaptive timeout in seconds. If at_max timeout
+ is reached for an RPC, the RPC will time out.
+ Some genuinuely slow network hardware might warrant increasing
+ this value.
+ Setting this value to 0 disables Adaptive Timeouts
+ functionality and old-style obd_timeout value is then used.
+ Default: 600
+
+What: /sys/fs/lustre/at_extra
+Date: July 2015
+Contact: "Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+ Controls how much extra time to request for unfinished requests
+ in processing in seconds. Normally a server-side parameter, it
+ is also used on the client for responses to various LDLM ASTs
+ that are handled with a special server thread on the client.
+ This is a way for the servers to ask the clients not to time
+ out the request that reached current servicing time estimate
+ yet and give it some more time.
+ Default: 30
+
+What: /sys/fs/lustre/at_early_margin
+Date: July 2015
+Contact: "Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+ Controls when to send the early reply for requests that are
+ about to timeout as an offset to the estimated service time in
+ seconds..
+ Default: 5
+
+What: /sys/fs/lustre/at_history
+Date: July 2015
+Contact: "Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+ Controls for how many seconds to remember slowest events
+ encountered by adaptive timeouts code.
+ Default: 600
+
What: /sys/fs/lustre/llite/<fsname>-<uuid>/blocksize
Date: May 2015
Contact: "Oleg Drokin" <oleg.drokin@intel.com>
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 8bc68e2b4052..fb55e5941445 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -342,14 +342,6 @@ static struct region_info region_configs[] = {
.deemphasis = 50,
.region = 3,
},
- /* Japan wide band */
- {
- .channel_spacing = 10,
- .bottom_frequency = 76000,
- .top_frequency = 108000,
- .deemphasis = 50,
- .region = 4,
- },
};
/*
@@ -741,6 +733,18 @@ static int bcm2048_set_region(struct bcm2048_device *bdev, u8 region)
mutex_lock(&bdev->mutex);
bdev->region_info = region_configs[region];
+
+ if (region_configs[region].bottom_frequency < 87500)
+ bdev->cache_fm_ctrl |= BCM2048_BAND_SELECT;
+ else
+ bdev->cache_fm_ctrl &= ~BCM2048_BAND_SELECT;
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_CTRL,
+ bdev->cache_fm_ctrl);
+ if (err) {
+ mutex_unlock(&bdev->mutex);
+ goto done;
+ }
mutex_unlock(&bdev->mutex);
if (bdev->frequency < region_configs[region].bottom_frequency ||
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index 62ec9f70dae4..534b8103ae80 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -739,7 +739,7 @@ static int imon_probe(struct usb_interface *interface,
ep = &iface_desc->endpoint[i].desc;
ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
- ep_type = ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ ep_type = usb_endpoint_type(ep);
if (!ir_ep_found &&
ep_dir == USB_DIR_IN &&
@@ -785,13 +785,13 @@ static int imon_probe(struct usb_interface *interface,
}
driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
- if (!driver) {
+ if (!driver)
goto free_context;
- }
+
rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
- if (!rbuf) {
+ if (!rbuf)
goto free_driver;
- }
+
if (lirc_buffer_init(rbuf, BUF_CHUNK_SIZE, BUF_SIZE)) {
dev_err(dev, "%s: lirc_buffer_init failed\n", __func__);
goto free_rbuf;
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index 9e5674341abe..b247649a99eb 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -184,7 +184,7 @@ static void deregister_from_lirc(struct sasem_context *context)
__func__, retval);
else
dev_info(&context->dev->dev,
- "Deregistered Sasem driver (minor:%d)\n", minor);
+ "Deregistered Sasem driver (minor:%d)\n", minor);
}
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index dc7984455c3a..465796a686c4 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -327,9 +327,6 @@ static void safe_udelay(unsigned long usecs)
* time
*/
-/* So send_pulse can quickly convert microseconds to clocks */
-static unsigned long conv_us_to_clocks;
-
static int init_timing_params(unsigned int new_duty_cycle,
unsigned int new_freq)
{
@@ -344,7 +341,6 @@ static int init_timing_params(unsigned int new_duty_cycle,
/* How many clocks in a microsecond?, avoiding long long divide */
work = loops_per_sec;
work *= 4295; /* 4295 = 2^32 / 1e6 */
- conv_us_to_clocks = work >> 32;
/*
* Carrier period in clocks, approach good up to 32GHz clock,
@@ -357,10 +353,9 @@ static int init_timing_params(unsigned int new_duty_cycle,
pulse_width = period * duty_cycle / 100;
space_width = period - pulse_width;
dprintk("in init_timing_params, freq=%d, duty_cycle=%d, "
- "clk/jiffy=%ld, pulse=%ld, space=%ld, "
- "conv_us_to_clocks=%ld\n",
+ "clk/jiffy=%ld, pulse=%ld, space=%ld\n",
freq, duty_cycle, __this_cpu_read(cpu_info.loops_per_jiffy),
- pulse_width, space_width, conv_us_to_clocks);
+ pulse_width, space_width);
return 0;
}
#else /* ! USE_RDTSC */
@@ -431,63 +426,14 @@ static long send_pulse_irdeo(unsigned long length)
return ret;
}
-#ifdef USE_RDTSC
-/* Version that uses Pentium rdtsc instruction to measure clocks */
-
-/*
- * This version does sub-microsecond timing using rdtsc instruction,
- * and does away with the fudged LIRC_SERIAL_TRANSMITTER_LATENCY
- * Implicitly i586 architecture... - Steve
- */
-
-static long send_pulse_homebrew_softcarrier(unsigned long length)
-{
- int flag;
- unsigned long target, start, now;
-
- /* Get going quick as we can */
- rdtscl(start);
- on();
- /* Convert length from microseconds to clocks */
- length *= conv_us_to_clocks;
- /* And loop till time is up - flipping at right intervals */
- now = start;
- target = pulse_width;
- flag = 1;
- /*
- * FIXME: This looks like a hard busy wait, without even an occasional,
- * polite, cpu_relax() call. There's got to be a better way?
- *
- * The i2c code has the result of a lot of bit-banging work, I wonder if
- * there's something there which could be helpful here.
- */
- while ((now - start) < length) {
- /* Delay till flip time */
- do {
- rdtscl(now);
- } while ((now - start) < target);
-
- /* flip */
- if (flag) {
- rdtscl(now);
- off();
- target += space_width;
- } else {
- rdtscl(now); on();
- target += pulse_width;
- }
- flag = !flag;
- }
- rdtscl(now);
- return ((now - start) - length) / conv_us_to_clocks;
-}
-#else /* ! USE_RDTSC */
/* Version using udelay() */
/*
* here we use fixed point arithmetic, with 8
* fractional bits. that gets us within 0.1% or so of the right average
* frequency, albeit with some jitter in pulse length - Steve
+ *
+ * This should use ndelay instead.
*/
/* To match 8 fractional bits used for pulse/space length */
@@ -520,7 +466,6 @@ static long send_pulse_homebrew_softcarrier(unsigned long length)
}
return (actual-length) >> 8;
}
-#endif /* USE_RDTSC */
static long send_pulse_homebrew(unsigned long length)
{
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 261e27d6b054..ce3b5f230e2e 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -1364,10 +1364,10 @@ static const struct i2c_device_id ir_transceiver_id[] = {
{ "ir_rx_z8f0811_hdpvr", ID_FLAG_HDPVR },
{ }
};
+MODULE_DEVICE_TABLE(i2c, ir_transceiver_id);
static struct i2c_driver driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "Zilog/Hauppauge i2c IR",
},
.probe = ir_probe,
diff --git a/drivers/staging/media/mn88472/mn88472.c b/drivers/staging/media/mn88472/mn88472.c
index a8d45f44765c..cf2e96bcf395 100644
--- a/drivers/staging/media/mn88472/mn88472.c
+++ b/drivers/staging/media/mn88472/mn88472.c
@@ -561,7 +561,6 @@ MODULE_DEVICE_TABLE(i2c, mn88472_id_table);
static struct i2c_driver mn88472_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "mn88472",
},
.probe = mn88472_probe,
diff --git a/drivers/staging/media/mn88473/mn88473.c b/drivers/staging/media/mn88473/mn88473.c
index f9146a146d07..a222e99935d2 100644
--- a/drivers/staging/media/mn88473/mn88473.c
+++ b/drivers/staging/media/mn88473/mn88473.c
@@ -507,7 +507,6 @@ MODULE_DEVICE_TABLE(i2c, mn88473_id_table);
static struct i2c_driver mn88473_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "mn88473",
},
.probe = mn88473_probe,
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
index 072dac04a750..8d4e3bd1bfe1 100644
--- a/drivers/staging/media/omap4iss/Kconfig
+++ b/drivers/staging/media/omap4iss/Kconfig
@@ -1,5 +1,5 @@
config VIDEO_OMAP4
- bool "OMAP 4 Camera support"
+ tristate "OMAP 4 Camera support"
depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
depends on HAS_DMA
select MFD_SYSCON
diff --git a/drivers/staging/media/omap4iss/TODO b/drivers/staging/media/omap4iss/TODO
index fcde88860a2c..4d220ef82653 100644
--- a/drivers/staging/media/omap4iss/TODO
+++ b/drivers/staging/media/omap4iss/TODO
@@ -1,4 +1,3 @@
-* Make the driver compile as a module
* Fix FIFO/buffer overflows and underflows
* Replace dummy resizer code with a real implementation
* Fix checkpatch errors and warnings
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 85c54fedddda..40405d8710a6 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -640,76 +640,6 @@ iss_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
}
static int
-iss_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
-{
- struct iss_video *video = video_drvdata(file);
- struct v4l2_subdev *subdev;
- int ret;
-
- subdev = iss_video_remote_subdev(video, NULL);
- if (subdev == NULL)
- return -EINVAL;
-
- mutex_lock(&video->mutex);
- ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
- mutex_unlock(&video->mutex);
-
- return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
-}
-
-static int
-iss_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
-{
- struct iss_video *video = video_drvdata(file);
- struct v4l2_subdev_format format;
- struct v4l2_subdev *subdev;
- u32 pad;
- int ret;
-
- subdev = iss_video_remote_subdev(video, &pad);
- if (subdev == NULL)
- return -EINVAL;
-
- /* Try the get crop operation first and fallback to get format if not
- * implemented.
- */
- ret = v4l2_subdev_call(subdev, video, g_crop, crop);
- if (ret != -ENOIOCTLCMD)
- return ret;
-
- format.pad = pad;
- format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
- if (ret < 0)
- return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
-
- crop->c.left = 0;
- crop->c.top = 0;
- crop->c.width = format.format.width;
- crop->c.height = format.format.height;
-
- return 0;
-}
-
-static int
-iss_video_set_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
-{
- struct iss_video *video = video_drvdata(file);
- struct v4l2_subdev *subdev;
- int ret;
-
- subdev = iss_video_remote_subdev(video, NULL);
- if (subdev == NULL)
- return -EINVAL;
-
- mutex_lock(&video->mutex);
- ret = v4l2_subdev_call(subdev, video, s_crop, crop);
- mutex_unlock(&video->mutex);
-
- return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
-}
-
-static int
iss_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
{
struct iss_video_fh *vfh = to_iss_video_fh(fh);
@@ -1018,9 +948,6 @@ static const struct v4l2_ioctl_ops iss_video_ioctl_ops = {
.vidioc_g_fmt_vid_out = iss_video_get_format,
.vidioc_s_fmt_vid_out = iss_video_set_format,
.vidioc_try_fmt_vid_out = iss_video_try_format,
- .vidioc_cropcap = iss_video_cropcap,
- .vidioc_g_crop = iss_video_get_crop,
- .vidioc_s_crop = iss_video_set_crop,
.vidioc_g_parm = iss_video_get_param,
.vidioc_s_parm = iss_video_set_param,
.vidioc_reqbufs = iss_video_reqbufs,
diff --git a/drivers/staging/most/Documentation/ABI/sysfs-class-most.txt b/drivers/staging/most/Documentation/ABI/sysfs-class-most.txt
new file mode 100644
index 000000000000..380c137089d0
--- /dev/null
+++ b/drivers/staging/most/Documentation/ABI/sysfs-class-most.txt
@@ -0,0 +1,181 @@
+What: /sys/class/most/mostcore/aims
+Date: June 2015
+KernelVersion: 4.3
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ List of AIMs that have been loaded.
+Users:
+
+What: /sys/class/most/mostcore/aims/<aim>/add_link
+Date: June 2015
+KernelVersion: 4.3
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to establish a connection of a channel and the
+ current AIM.
+Users:
+
+What: /sys/class/most/mostcore/aims/<aim>/remove_link
+Date: June 2015
+KernelVersion: 4.3
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to remove a connected channel from the
+ current AIM.
+Users:
+
+What: /sys/class/most/mostcore/devices
+Date: June 2015
+KernelVersion: 4.3
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ List of attached MOST interfaces.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/description
+Date: June 2015
+KernelVersion: 4.3
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Provides information about the interface type and the physical
+ location of the device. Hardware attached via USB, for instance,
+ might return <usb_device 1-1.1:1.0>
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/interface
+Date: June 2015
+KernelVersion: 4.3
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the type of peripherial interface the current device
+ uses.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/<channel>/
+Date: June 2015
+KernelVersion: 4.3
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ For every channel of the device a directory is created, whose
+ name is dictated by the HDM. This enables an application to
+ collect information about the channel's capabilities and
+ configure it.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/<channel>/available_datatypes
+Date: June 2015
+KernelVersion: 4.3
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the data types the current channel can transport.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/<channel>/available_directions
+Date: June 2015
+KernelVersion: 4.3
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the directions the current channel is capable of.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/<channel>/number_of_packet_buffers
+Date: June 2015
+KernelVersion: 4.3
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the number of packet buffers the current channel can
+ handle.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/<channel>/number_of_stream_buffers
+Date: June 2015
+KernelVersion: 4.3
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the number of streaming buffers the current channel can
+ handle.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/<channel>/size_of_packet_buffer
+Date: June 2015
+KernelVersion: 4.3
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the size of a packet buffer the current channel can
+ handle.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/<channel>/size_of_stream_buffer
+Date: June 2015
+KernelVersion: 4.3
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the size of a streaming buffer the current channel can
+ handle.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/<channel>/set_number_of_buffers
+Date: June 2015
+KernelVersion: 4.3
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is to configure the number of buffers of the current channel.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/<channel>/set_buffer_size
+Date: June 2015
+KernelVersion: 4.3
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is to configure the size of a buffer of the current channel.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/<channel>/set_direction
+Date: June 2015
+KernelVersion: 4.3
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is to configure the direction of the current channel.
+ The following strings will be accepted:
+ 'dir_tx',
+ 'dir_rx'
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/<channel>/set_datatype
+Date: June 2015
+KernelVersion: 4.3
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is to configure the data type of the current channel.
+ The following strings will be accepted:
+ 'control',
+ 'async',
+ 'sync',
+ 'isoc_avp'
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/<channel>/set_subbuffer_size
+Date: June 2015
+KernelVersion: 4.3
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is to configure the subbuffer size of the current channel.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/<channel>/set_packets_per_xact
+Date: June 2015
+KernelVersion: 4.3
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is to configure the number of packets per transaction of
+ the current channel. This is only needed network interface
+ controller is attached via USB.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/<channel>/channel_starving
+Date: June 2015
+KernelVersion: 4.3
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates whether current channel ran out of buffers.
+Users:
diff --git a/drivers/staging/most/Documentation/driver_usage.txt b/drivers/staging/most/Documentation/driver_usage.txt
new file mode 100644
index 000000000000..a4dc0c348fbc
--- /dev/null
+++ b/drivers/staging/most/Documentation/driver_usage.txt
@@ -0,0 +1,180 @@
+
+ Section 1 Overview
+
+The Media Oriented Systems Transport (MOST) driver gives Linux applications
+access a MOST network: The Automotive Information Backbone and the de-facto
+standard for high-bandwidth automotive multimedia networking.
+
+MOST defines the protocol, hardware and software layers necessary to allow
+for the efficient and low-cost transport of control, real-time and packet
+data using a single medium (physical layer). Media currently in use are
+fiber optics, unshielded twisted pair cables (UTP) and coax cables. MOST
+also supports various speed grades up to 150 Mbps.
+For more information on MOST, visit the MOST Cooperation website:
+www.mostcooperation.com.
+
+Cars continue to evolve into sophisticated consumer electronics platforms,
+increasing the demand for reliable and simple solutions to support audio,
+video and data communications. MOST can be used to connect multiple
+consumer devices via optical or electrical physical layers directly to one
+another or in a network configuration. As a synchronous network, MOST
+provides excellent Quality of Service and seamless connectivity for
+audio/video streaming. Therefore, the driver perfectly fits to the mission
+of Automotive Grade Linux to create open source software solutions for
+automotive applications.
+
+The driver consists basically of three layers. The hardware layer, the
+core layer and the application layer. The core layer consists of the core
+module only. This module handles the communication flow through all three
+layers, the configuration of the driver, the configuration interface
+representation in sysfs, and the buffer management.
+For each of the other two layers a selection of modules is provided. These
+modules can arbitrarily be combined to meet the needs of the desired
+system architecture. A module of the hardware layer is referred to as an
+HDM (hardware dependent module). Each module of this layer handles exactly
+one of the peripheral interfaces of a network interface controller (e.g.
+USB, MediaLB, I2C). A module of the application layer is referred to as an
+AIM (application interfacing module). The modules of this layer give access
+to MOST via one the following ways: character devices, ALSA, Networking or
+V4L2.
+
+To physically access MOST, an Intelligent Network Interface Controller
+(INIC) is needed. For more information on available controllers visit:
+www.microchip.com
+
+
+
+ Section 1.1 Hardware Layer
+
+The hardware layer contains so called hardware dependent modules (HDM). For each
+peripheral interface the hardware supports the driver has a suitable module
+that handles the interface.
+
+The HDMs encapsulate the peripheral interface specific knowledge of the driver
+and provides an easy way of extending the number of supported interfaces.
+Currently the following HDMs are available:
+
+ 1) MediaLB (DIM2)
+ Host wants to communicate with hardware via MediaLB.
+
+ 2) I2C
+ Host wants to communicate with the hardware via I2C.
+
+ 3) USB
+ Host wants to communicate with the hardware via USB.
+
+
+ Section 1.2 Core Layer
+
+The core layer contains the mostcore module only, which processes the driver
+configuration via sysfs, buffer management and data forwarding.
+
+
+
+ Section 1.2 Application Layer
+
+The application layer contains so called application interfacing modules (AIM).
+Depending on how the driver should interface to the application, one or more
+suitable modules can be selected.
+
+The AIMs encapsulate the application interface specific knowledge of the driver
+and provides access to user space or other kernel subsystems.
+Currently the following AIMs are available
+
+ 1) Character Device
+ Applications can access the driver by means of character devices.
+
+ 2) Networking
+ Standard networking applications (e.g. iperf) can by used to access
+ the driver via the networking subsystem.
+
+ 3) Video4Linux (v4l2)
+ Standard video applications (e.g. VLC) can by used to access the
+ driver via the V4L subsystem.
+
+ 4) Advanced Linux Sound Architecture (ALSA)
+ Standard sound applications (e.g. aplay, arecord, audacity) can by
+ used to access the driver via the ALSA subsystem.
+
+
+
+ Section 2 Configuration
+
+See ABI/sysfs-class-most.txt
+
+
+
+ Section 3 USB Padding
+
+When transceiving synchronous or isochronous data, the number of packets per USB
+transaction and the sub-buffer size need to be configured. These values
+are needed for the driver to process buffer padding, as expected by hardware,
+which is for performance optimization purposes of the USB transmission.
+
+When transmitting synchronous data the allocated channel width needs to be
+written to 'set_subbuffer_size'. Additionally, the number of MOST frames that
+should travel to the host within one USB transaction need to be written to
+'packets_per_xact'.
+
+Internally the synchronous threshold is calculated as follows:
+
+ frame_size = set_subbuffer_size * packets_per_xact
+
+In case 'packets_per_xact' is set to 0xFF the maximum number of packets,
+allocated within one MOST frame, is calculated that fit into _one_ 512 byte
+USB full packet.
+
+ frame_size = floor(MTU_USB / bandwidth_sync) * bandwidth_sync
+
+This frame_size is the number of synchronous data within an USB transaction,
+which renders MTU_USB - frame_size bytes for padding.
+
+When transmitting isochronous AVP data the desired packet size needs to be
+written to 'set_subbuffer_size' and hardware will always expect two isochronous
+packets within one USB transaction. This renders
+
+ MTU_USB - (2 * set_subbuffer_size)
+
+bytes for padding.
+
+Note that at least 2 times set_subbuffer_size bytes for isochronous data or
+set_subbuffer_size times packts_per_xact bytes for synchronous data need to be
+put in the transmission buffer and passed to the driver.
+
+Since HDMs are allowed to change a chosen configuration to best fit its
+constraints, it is recommended to always double check the configuration and read
+back the previously written files.
+
+
+
+ Section 4 Routing Channels
+
+To connect a channel that has been configured as outlined above to an AIM and
+make it accessible to user space applications, the attribute file 'add_link' is
+used. To actually bind a channel to the AIM a string needs to be written to the
+file that complies with the following syntax:
+
+ "most_device:channel_name:link_name[.param]"
+
+The example above links the channel "channel_name" of the device "most_device"
+to the AIM. In case the AIM interfaces the VFS this would also create a device
+node "link_name" in the /dev directory. The parameter "param" is an AIM dependent
+string, which can be omitted in case the used AIM does not make any use of it.
+
+Cdev AIM example:
+ $ echo "mdev0:ep_81:my_rx_channel" >add_link
+ $ echo "mdev0:ep_81" >add_link
+
+
+Sound/ALSA AIM example:
+
+The sound/ALSA AIM needs an additional parameter to determine the audio resolution
+that is going to be used. The following strings can be used:
+
+ - "1x8" (Mono)
+ - "2x16" (16-bit stereo)
+ - "2x24" (24-bit stereo)
+ - "2x32" (32-bit stereo)
+
+ $ echo "mdev0:ep_81:audio_rx.2x16" >add_link
+ $ echo "mdev0:ep_81" >add_link
diff --git a/drivers/staging/most/Kconfig b/drivers/staging/most/Kconfig
new file mode 100644
index 000000000000..d50de03de7b9
--- /dev/null
+++ b/drivers/staging/most/Kconfig
@@ -0,0 +1,30 @@
+menuconfig MOST
+ tristate "MOST driver"
+ select MOSTCORE
+ default n
+ ---help---
+ This option allows you to enable support for MOST Network transceivers.
+
+ If in doubt, say N here.
+
+
+
+if MOST
+
+source "drivers/staging/most/mostcore/Kconfig"
+
+source "drivers/staging/most/aim-cdev/Kconfig"
+
+source "drivers/staging/most/aim-network/Kconfig"
+
+source "drivers/staging/most/aim-sound/Kconfig"
+
+source "drivers/staging/most/aim-v4l2/Kconfig"
+
+source "drivers/staging/most/hdm-dim2/Kconfig"
+
+source "drivers/staging/most/hdm-i2c/Kconfig"
+
+source "drivers/staging/most/hdm-usb/Kconfig"
+
+endif
diff --git a/drivers/staging/most/Makefile b/drivers/staging/most/Makefile
new file mode 100644
index 000000000000..9ee981c7786b
--- /dev/null
+++ b/drivers/staging/most/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_MOSTCORE) += mostcore/
+obj-$(CONFIG_AIM_CDEV) += aim-cdev/
+obj-$(CONFIG_AIM_NETWORK) += aim-network/
+obj-$(CONFIG_AIM_SOUND) += aim-sound/
+obj-$(CONFIG_AIM_V4L2) += aim-v4l2/
+obj-$(CONFIG_HDM_DIM2) += hdm-dim2/
+obj-$(CONFIG_HDM_I2C) += hdm-i2c/
+obj-$(CONFIG_HDM_USB) += hdm-usb/
diff --git a/drivers/staging/most/TODO b/drivers/staging/most/TODO
new file mode 100644
index 000000000000..4fa11a9d2cf7
--- /dev/null
+++ b/drivers/staging/most/TODO
@@ -0,0 +1,8 @@
+* Get through code review with Greg Kroah-Hartman
+
+Contact:
+To:
+Christian Gromm <christian.gromm@microchip.com>
+Cc:
+Michael Fabry <Michael.Fabry@microchip.com>
+Christian Gromm <chris@engineersdelight.de>
diff --git a/drivers/staging/most/aim-cdev/Kconfig b/drivers/staging/most/aim-cdev/Kconfig
new file mode 100644
index 000000000000..3c59f1bac127
--- /dev/null
+++ b/drivers/staging/most/aim-cdev/Kconfig
@@ -0,0 +1,12 @@
+#
+# MOST Cdev configuration
+#
+
+config AIM_CDEV
+ tristate "Cdev AIM"
+
+ ---help---
+ Say Y here if you want to commumicate via character devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called aim_cdev. \ No newline at end of file
diff --git a/drivers/staging/most/aim-cdev/Makefile b/drivers/staging/most/aim-cdev/Makefile
new file mode 100644
index 000000000000..0bcc6c637b75
--- /dev/null
+++ b/drivers/staging/most/aim-cdev/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_AIM_CDEV) += aim_cdev.o
+
+aim_cdev-objs := cdev.o
+ccflags-y += -Idrivers/staging/most/mostcore/ \ No newline at end of file
diff --git a/drivers/staging/most/aim-cdev/cdev.c b/drivers/staging/most/aim-cdev/cdev.c
new file mode 100644
index 000000000000..0a13d8d0fa39
--- /dev/null
+++ b/drivers/staging/most/aim-cdev/cdev.c
@@ -0,0 +1,528 @@
+/*
+ * cdev.c - Application interfacing module for character devices
+ *
+ * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/kfifo.h>
+#include <linux/uaccess.h>
+#include <linux/idr.h>
+#include "mostcore.h"
+
+static dev_t aim_devno;
+static struct class *aim_class;
+static struct ida minor_id;
+static unsigned int major;
+
+struct aim_channel {
+ wait_queue_head_t wq;
+ struct cdev cdev;
+ struct device *dev;
+ struct mutex io_mutex;
+ struct most_interface *iface;
+ struct most_channel_config *cfg;
+ unsigned int channel_id;
+ dev_t devno;
+ bool keep_mbo;
+ unsigned int mbo_offs;
+ struct mbo *stacked_mbo;
+ DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
+ atomic_t access_ref;
+ struct list_head list;
+};
+#define to_channel(d) container_of(d, struct aim_channel, cdev)
+static struct list_head channel_list;
+static spinlock_t ch_list_lock;
+
+
+static struct aim_channel *get_channel(struct most_interface *iface, int id)
+{
+ struct aim_channel *channel, *tmp;
+ unsigned long flags;
+ int found_channel = 0;
+
+ spin_lock_irqsave(&ch_list_lock, flags);
+ list_for_each_entry_safe(channel, tmp, &channel_list, list) {
+ if ((channel->iface == iface) && (channel->channel_id == id)) {
+ found_channel = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ch_list_lock, flags);
+ if (!found_channel)
+ return NULL;
+ return channel;
+}
+
+/**
+ * aim_open - implements the syscall to open the device
+ * @inode: inode pointer
+ * @filp: file pointer
+ *
+ * This stores the channel pointer in the private data field of
+ * the file structure and activates the channel within the core.
+ */
+static int aim_open(struct inode *inode, struct file *filp)
+{
+ struct aim_channel *channel;
+ int ret;
+
+ channel = to_channel(inode->i_cdev);
+ filp->private_data = channel;
+
+ if (((channel->cfg->direction == MOST_CH_RX) &&
+ ((filp->f_flags & O_ACCMODE) != O_RDONLY))
+ || ((channel->cfg->direction == MOST_CH_TX) &&
+ ((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
+ pr_info("WARN: Access flags mismatch\n");
+ return -EACCES;
+ }
+ if (!atomic_inc_and_test(&channel->access_ref)) {
+ pr_info("WARN: Device is busy\n");
+ atomic_dec(&channel->access_ref);
+ return -EBUSY;
+ }
+
+ ret = most_start_channel(channel->iface, channel->channel_id);
+ if (ret)
+ atomic_dec(&channel->access_ref);
+ return ret;
+}
+
+/**
+ * aim_close - implements the syscall to close the device
+ * @inode: inode pointer
+ * @filp: file pointer
+ *
+ * This stops the channel within the core.
+ */
+static int aim_close(struct inode *inode, struct file *filp)
+{
+ int ret;
+ struct mbo *mbo;
+ struct aim_channel *channel = to_channel(inode->i_cdev);
+
+ mutex_lock(&channel->io_mutex);
+ if (!channel->dev) {
+ mutex_unlock(&channel->io_mutex);
+ atomic_dec(&channel->access_ref);
+ device_destroy(aim_class, channel->devno);
+ cdev_del(&channel->cdev);
+ kfifo_free(&channel->fifo);
+ list_del(&channel->list);
+ ida_simple_remove(&minor_id, MINOR(channel->devno));
+ wake_up_interruptible(&channel->wq);
+ kfree(channel);
+ return 0;
+ }
+ mutex_unlock(&channel->io_mutex);
+
+ while (0 != kfifo_out((struct kfifo *)&channel->fifo, &mbo, 1))
+ most_put_mbo(mbo);
+ if (channel->keep_mbo == true)
+ most_put_mbo(channel->stacked_mbo);
+ ret = most_stop_channel(channel->iface, channel->channel_id);
+ atomic_dec(&channel->access_ref);
+ wake_up_interruptible(&channel->wq);
+ return ret;
+}
+
+/**
+ * aim_write - implements the syscall to write to the device
+ * @filp: file pointer
+ * @buf: pointer to user buffer
+ * @count: number of bytes to write
+ * @offset: offset from where to start writing
+ */
+static ssize_t aim_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offset)
+{
+ int ret, err;
+ size_t actual_len = 0;
+ size_t max_len = 0;
+ ssize_t retval;
+ struct mbo *mbo;
+ struct aim_channel *channel = filp->private_data;
+
+ mutex_lock(&channel->io_mutex);
+ if (unlikely(!channel->dev)) {
+ mutex_unlock(&channel->io_mutex);
+ return -EPIPE;
+ }
+ mutex_unlock(&channel->io_mutex);
+
+ mbo = most_get_mbo(channel->iface, channel->channel_id);
+
+ if (!mbo && channel->dev) {
+ if ((filp->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+ if (wait_event_interruptible(
+ channel->wq,
+ (mbo = most_get_mbo(channel->iface,
+ channel->channel_id)) ||
+ (channel->dev == NULL)))
+ return -ERESTARTSYS;
+ }
+
+ mutex_lock(&channel->io_mutex);
+ if (unlikely(!channel->dev)) {
+ mutex_unlock(&channel->io_mutex);
+ err = -EPIPE;
+ goto error;
+ }
+ mutex_unlock(&channel->io_mutex);
+
+ max_len = channel->cfg->buffer_size;
+ actual_len = min(count, max_len);
+ mbo->buffer_length = actual_len;
+
+ retval = copy_from_user(mbo->virt_address, buf, mbo->buffer_length);
+ if (retval) {
+ err = -EIO;
+ goto error;
+ }
+
+ ret = most_submit_mbo(mbo);
+ if (ret) {
+ pr_info("submitting MBO to core failed\n");
+ err = ret;
+ goto error;
+ }
+ return actual_len - retval;
+error:
+ if (mbo)
+ most_put_mbo(mbo);
+ return err;
+}
+
+/**
+ * aim_read - implements the syscall to read from the device
+ * @filp: file pointer
+ * @buf: pointer to user buffer
+ * @count: number of bytes to read
+ * @offset: offset from where to start reading
+ */
+static ssize_t
+aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
+{
+ ssize_t retval;
+ size_t not_copied, proc_len;
+ struct mbo *mbo;
+ struct aim_channel *channel = filp->private_data;
+
+ if (channel->keep_mbo == true) {
+ mbo = channel->stacked_mbo;
+ channel->keep_mbo = false;
+ goto start_copy;
+ }
+ while ((0 == kfifo_out(&channel->fifo, &mbo, 1))
+ && (channel->dev != NULL)) {
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ if (wait_event_interruptible(channel->wq,
+ (!kfifo_is_empty(&channel->fifo) ||
+ (channel->dev == NULL))))
+ return -ERESTARTSYS;
+ }
+
+start_copy:
+ /* make sure we don't submit to gone devices */
+ mutex_lock(&channel->io_mutex);
+ if (unlikely(!channel->dev)) {
+ mutex_unlock(&channel->io_mutex);
+ return -EIO;
+ }
+
+ if (count < mbo->processed_length)
+ channel->keep_mbo = true;
+
+ proc_len = min((int)count,
+ (int)(mbo->processed_length - channel->mbo_offs));
+
+ not_copied = copy_to_user(buf,
+ mbo->virt_address + channel->mbo_offs,
+ proc_len);
+
+ retval = not_copied ? proc_len - not_copied : proc_len;
+
+ if (channel->keep_mbo == true) {
+ channel->mbo_offs = retval;
+ channel->stacked_mbo = mbo;
+ } else {
+ most_put_mbo(mbo);
+ channel->mbo_offs = 0;
+ }
+ mutex_unlock(&channel->io_mutex);
+ return retval;
+}
+
+/**
+ * Initialization of struct file_operations
+ */
+static const struct file_operations channel_fops = {
+ .owner = THIS_MODULE,
+ .read = aim_read,
+ .write = aim_write,
+ .open = aim_open,
+ .release = aim_close,
+};
+
+/**
+ * aim_disconnect_channel - disconnect a channel
+ * @iface: pointer to interface instance
+ * @channel_id: channel index
+ *
+ * This frees allocated memory and removes the cdev that represents this
+ * channel in user space.
+ */
+static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
+{
+ struct aim_channel *channel;
+ unsigned long flags;
+
+ if (!iface) {
+ pr_info("Bad interface pointer\n");
+ return -EINVAL;
+ }
+
+ channel = get_channel(iface, channel_id);
+ if (channel == NULL)
+ return -ENXIO;
+
+ mutex_lock(&channel->io_mutex);
+ channel->dev = NULL;
+ mutex_unlock(&channel->io_mutex);
+
+ if (atomic_read(&channel->access_ref)) {
+ device_destroy(aim_class, channel->devno);
+ cdev_del(&channel->cdev);
+ kfifo_free(&channel->fifo);
+ ida_simple_remove(&minor_id, MINOR(channel->devno));
+ spin_lock_irqsave(&ch_list_lock, flags);
+ list_del(&channel->list);
+ spin_unlock_irqrestore(&ch_list_lock, flags);
+ kfree(channel);
+ } else {
+ wake_up_interruptible(&channel->wq);
+ }
+ return 0;
+}
+
+/**
+ * aim_rx_completion - completion handler for rx channels
+ * @mbo: pointer to buffer object that has completed
+ *
+ * This searches for the channel linked to this MBO and stores it in the local
+ * fifo buffer.
+ */
+static int aim_rx_completion(struct mbo *mbo)
+{
+ struct aim_channel *channel;
+
+ if (!mbo)
+ return -EINVAL;
+
+ channel = get_channel(mbo->ifp, mbo->hdm_channel_id);
+ if (channel == NULL)
+ return -ENXIO;
+
+ kfifo_in(&channel->fifo, &mbo, 1);
+#ifdef DEBUG_MESG
+ if (kfifo_is_full(&channel->fifo))
+ pr_info("WARN: Fifo is full\n");
+#endif
+ wake_up_interruptible(&channel->wq);
+ return 0;
+}
+
+/**
+ * aim_tx_completion - completion handler for tx channels
+ * @iface: pointer to interface instance
+ * @channel_id: channel index/ID
+ *
+ * This wakes sleeping processes in the wait-queue.
+ */
+static int aim_tx_completion(struct most_interface *iface, int channel_id)
+{
+ struct aim_channel *channel;
+
+ if (!iface) {
+ pr_info("Bad interface pointer\n");
+ return -EINVAL;
+ }
+ if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
+ pr_info("Channel ID out of range\n");
+ return -EINVAL;
+ }
+
+ channel = get_channel(iface, channel_id);
+ if (channel == NULL)
+ return -ENXIO;
+ wake_up_interruptible(&channel->wq);
+ return 0;
+}
+
+static struct most_aim cdev_aim;
+
+/**
+ * aim_probe - probe function of the driver module
+ * @iface: pointer to interface instance
+ * @channel_id: channel index/ID
+ * @cfg: pointer to actual channel configuration
+ * @parent: pointer to kobject (needed for sysfs hook-up)
+ * @name: name of the device to be created
+ *
+ * This allocates achannel object and creates the device node in /dev
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int aim_probe(struct most_interface *iface, int channel_id,
+ struct most_channel_config *cfg,
+ struct kobject *parent, char *name)
+{
+ struct aim_channel *channel;
+ unsigned long cl_flags;
+ int retval;
+ int current_minor;
+
+ if ((!iface) || (!cfg) || (!parent) || (!name)) {
+ pr_info("Probing AIM with bad arguments");
+ return -EINVAL;
+ }
+ channel = get_channel(iface, channel_id);
+ if (channel)
+ return -EEXIST;
+
+ current_minor = ida_simple_get(&minor_id, 0, 0, GFP_KERNEL);
+ if (current_minor < 0)
+ return current_minor;
+
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel) {
+ pr_info("failed to alloc channel object\n");
+ retval = -ENOMEM;
+ goto error_alloc_channel;
+ }
+
+ channel->devno = MKDEV(major, current_minor);
+ cdev_init(&channel->cdev, &channel_fops);
+ channel->cdev.owner = THIS_MODULE;
+ cdev_add(&channel->cdev, channel->devno, 1);
+ channel->iface = iface;
+ channel->cfg = cfg;
+ channel->channel_id = channel_id;
+ channel->mbo_offs = 0;
+ atomic_set(&channel->access_ref, -1);
+ INIT_KFIFO(channel->fifo);
+ retval = kfifo_alloc(&channel->fifo, cfg->num_buffers, GFP_KERNEL);
+ if (retval) {
+ pr_info("failed to alloc channel kfifo");
+ goto error_alloc_kfifo;
+ }
+ init_waitqueue_head(&channel->wq);
+ mutex_init(&channel->io_mutex);
+ spin_lock_irqsave(&ch_list_lock, cl_flags);
+ list_add_tail(&channel->list, &channel_list);
+ spin_unlock_irqrestore(&ch_list_lock, cl_flags);
+ channel->dev = device_create(aim_class,
+ NULL,
+ channel->devno,
+ NULL,
+ "%s", name);
+
+ retval = IS_ERR(channel->dev);
+ if (retval) {
+ pr_info("failed to create new device node %s\n", name);
+ goto error_create_device;
+ }
+ kobject_uevent(&channel->dev->kobj, KOBJ_ADD);
+ return 0;
+
+error_create_device:
+ kfifo_free(&channel->fifo);
+ list_del(&channel->list);
+error_alloc_kfifo:
+ cdev_del(&channel->cdev);
+ kfree(channel);
+error_alloc_channel:
+ ida_simple_remove(&minor_id, current_minor);
+ return retval;
+}
+
+static struct most_aim cdev_aim = {
+ .name = "cdev",
+ .probe_channel = aim_probe,
+ .disconnect_channel = aim_disconnect_channel,
+ .rx_completion = aim_rx_completion,
+ .tx_completion = aim_tx_completion,
+};
+
+static int __init mod_init(void)
+{
+ pr_info("init()\n");
+
+ INIT_LIST_HEAD(&channel_list);
+ spin_lock_init(&ch_list_lock);
+ ida_init(&minor_id);
+
+ if (alloc_chrdev_region(&aim_devno, 0, 50, "cdev") < 0)
+ return -EIO;
+ major = MAJOR(aim_devno);
+
+ aim_class = class_create(THIS_MODULE, "most_cdev_aim");
+ if (IS_ERR(aim_class)) {
+ pr_err("no udev support\n");
+ goto free_cdev;
+ }
+
+ if (most_register_aim(&cdev_aim))
+ goto dest_class;
+ return 0;
+
+dest_class:
+ class_destroy(aim_class);
+free_cdev:
+ unregister_chrdev_region(aim_devno, 1);
+ return -EIO;
+}
+
+static void __exit mod_exit(void)
+{
+ struct aim_channel *channel, *tmp;
+
+ pr_info("exit module\n");
+
+ most_deregister_aim(&cdev_aim);
+
+ list_for_each_entry_safe(channel, tmp, &channel_list, list) {
+ device_destroy(aim_class, channel->devno);
+ cdev_del(&channel->cdev);
+ kfifo_free(&channel->fifo);
+ list_del(&channel->list);
+ ida_simple_remove(&minor_id, MINOR(channel->devno));
+ kfree(channel);
+ }
+ class_destroy(aim_class);
+ unregister_chrdev_region(aim_devno, 1);
+ ida_destroy(&minor_id);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("character device AIM for mostcore");
diff --git a/drivers/staging/most/aim-network/Kconfig b/drivers/staging/most/aim-network/Kconfig
new file mode 100644
index 000000000000..4c66b24cf73c
--- /dev/null
+++ b/drivers/staging/most/aim-network/Kconfig
@@ -0,0 +1,13 @@
+#
+# MOST Networking configuration
+#
+
+config AIM_NETWORK
+ tristate "Networking AIM"
+ depends on NET
+
+ ---help---
+ Say Y here if you want to commumicate via a networking device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called aim_network.
diff --git a/drivers/staging/most/aim-network/Makefile b/drivers/staging/most/aim-network/Makefile
new file mode 100644
index 000000000000..840c1dd94873
--- /dev/null
+++ b/drivers/staging/most/aim-network/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_AIM_NETWORK) += aim_network.o
+
+aim_network-objs := networking.o
+ccflags-y += -Idrivers/staging/most/mostcore/
diff --git a/drivers/staging/most/aim-network/networking.c b/drivers/staging/most/aim-network/networking.c
new file mode 100644
index 000000000000..c8ab2399faad
--- /dev/null
+++ b/drivers/staging/most/aim-network/networking.c
@@ -0,0 +1,567 @@
+/*
+ * Networking AIM - Networking Application Interface Module for MostCore
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/kobject.h>
+#include "mostcore.h"
+#include "networking.h"
+
+
+#define MEP_HDR_LEN 8
+#define MDP_HDR_LEN 16
+#define MAMAC_DATA_LEN (1024 - MDP_HDR_LEN)
+
+#define PMHL 5
+
+#define PMS_TELID_UNSEGM_MAMAC 0x0A
+#define PMS_FIFONO_MDP 0x01
+#define PMS_FIFONO_MEP 0x04
+#define PMS_MSGTYPE_DATA 0x04
+#define PMS_DEF_PRIO 0
+#define MEP_DEF_RETRY 15
+
+#define PMS_FIFONO_MASK 0x07
+#define PMS_FIFONO_SHIFT 3
+#define PMS_RETRY_SHIFT 4
+#define PMS_TELID_MASK 0x0F
+#define PMS_TELID_SHIFT 4
+
+#define HB(value) ((u8)((u16)(value) >> 8))
+#define LB(value) ((u8)(value))
+
+
+
+#define EXTRACT_BIT_SET(bitset_name, value) \
+ (((value) >> bitset_name##_SHIFT) & bitset_name##_MASK)
+
+#define PMS_IS_MEP(buf, len) \
+ ((len) > MEP_HDR_LEN && \
+ EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MEP)
+
+#define PMS_IS_MAMAC(buf, len) \
+ ((len) > MDP_HDR_LEN && \
+ EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MDP && \
+ EXTRACT_BIT_SET(PMS_TELID, (buf)[14]) == PMS_TELID_UNSEGM_MAMAC)
+
+struct net_dev_channel {
+ bool linked;
+ int ch_id;
+};
+
+struct net_dev_context {
+ struct most_interface *iface;
+ bool channels_opened;
+ bool is_mamac;
+ unsigned char link_stat;
+ struct net_device *dev;
+ struct net_dev_channel rx;
+ struct net_dev_channel tx;
+ struct list_head list;
+};
+
+static struct list_head net_devices = LIST_HEAD_INIT(net_devices);
+static struct spinlock list_lock;
+static struct most_aim aim;
+
+
+static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
+{
+ u8 *buff = mbo->virt_address;
+ const u8 broadcast[] = { 0x03, 0xFF };
+ const u8 *dest_addr = skb->data + 4;
+ const u8 *eth_type = skb->data + 12;
+ unsigned int payload_len = skb->len - ETH_HLEN;
+ unsigned int mdp_len = payload_len + MDP_HDR_LEN;
+
+ if (mbo->buffer_length < mdp_len) {
+ pr_err("drop: too small buffer! (%d for %d)\n",
+ mbo->buffer_length, mdp_len);
+ return -EINVAL;
+ }
+
+ if (skb->len < ETH_HLEN) {
+ pr_err("drop: too small packet! (%d)\n", skb->len);
+ return -EINVAL;
+ }
+
+ if (dest_addr[0] == 0xFF && dest_addr[1] == 0xFF)
+ dest_addr = broadcast;
+
+ *buff++ = HB(mdp_len - 2);
+ *buff++ = LB(mdp_len - 2);
+
+ *buff++ = PMHL;
+ *buff++ = (PMS_FIFONO_MDP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
+ *buff++ = PMS_DEF_PRIO;
+ *buff++ = dest_addr[0];
+ *buff++ = dest_addr[1];
+ *buff++ = 0x00;
+
+ *buff++ = HB(payload_len + 6);
+ *buff++ = LB(payload_len + 6);
+
+ /* end of FPH here */
+
+ *buff++ = eth_type[0];
+ *buff++ = eth_type[1];
+ *buff++ = 0;
+ *buff++ = 0;
+
+ *buff++ = PMS_TELID_UNSEGM_MAMAC << 4 | HB(payload_len);
+ *buff++ = LB(payload_len);
+
+ memcpy(buff, skb->data + ETH_HLEN, payload_len);
+ mbo->buffer_length = mdp_len;
+ return 0;
+}
+
+static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo)
+{
+ u8 *buff = mbo->virt_address;
+ unsigned int mep_len = skb->len + MEP_HDR_LEN;
+
+ if (mbo->buffer_length < mep_len) {
+ pr_err("drop: too small buffer! (%d for %d)\n",
+ mbo->buffer_length, mep_len);
+ return -EINVAL;
+ }
+
+ *buff++ = HB(mep_len - 2);
+ *buff++ = LB(mep_len - 2);
+
+ *buff++ = PMHL;
+ *buff++ = (PMS_FIFONO_MEP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
+ *buff++ = (MEP_DEF_RETRY << PMS_RETRY_SHIFT) | PMS_DEF_PRIO;
+ *buff++ = 0;
+ *buff++ = 0;
+ *buff++ = 0;
+
+ memcpy(buff, skb->data, skb->len);
+ mbo->buffer_length = mep_len;
+ return 0;
+}
+
+static int most_nd_set_mac_address(struct net_device *dev, void *p)
+{
+ struct net_dev_context *nd = dev->ml_priv;
+ int err = eth_mac_addr(dev, p);
+
+ if (err)
+ return err;
+
+ BUG_ON(nd->dev != dev);
+
+ nd->is_mamac =
+ (dev->dev_addr[0] == 0 && dev->dev_addr[1] == 0 &&
+ dev->dev_addr[2] == 0 && dev->dev_addr[3] == 0);
+
+ /*
+ * Set default MTU for the given packet type.
+ * It is still possible to change MTU using ip tools afterwards.
+ */
+ dev->mtu = nd->is_mamac ? MAMAC_DATA_LEN : ETH_DATA_LEN;
+
+ return 0;
+}
+
+static int most_nd_open(struct net_device *dev)
+{
+ struct net_dev_context *nd = dev->ml_priv;
+
+ pr_info("open net device %s\n", dev->name);
+
+ BUG_ON(nd->dev != dev);
+
+ if (nd->channels_opened)
+ return -EFAULT;
+
+ BUG_ON(!nd->tx.linked || !nd->rx.linked);
+
+ if (most_start_channel(nd->iface, nd->rx.ch_id)) {
+ pr_err("most_start_channel() failed\n");
+ return -EBUSY;
+ }
+
+ if (most_start_channel(nd->iface, nd->tx.ch_id)) {
+ pr_err("most_start_channel() failed\n");
+ most_stop_channel(nd->iface, nd->rx.ch_id);
+ return -EBUSY;
+ }
+
+ nd->channels_opened = true;
+
+ if (nd->is_mamac) {
+ nd->link_stat = 1;
+ netif_wake_queue(dev);
+ } else {
+ nd->iface->request_netinfo(nd->iface, nd->tx.ch_id);
+ }
+
+ return 0;
+}
+
+static int most_nd_stop(struct net_device *dev)
+{
+ struct net_dev_context *nd = dev->ml_priv;
+
+ pr_info("stop net device %s\n", dev->name);
+
+ BUG_ON(nd->dev != dev);
+ netif_stop_queue(dev);
+
+ if (nd->channels_opened) {
+ most_stop_channel(nd->iface, nd->rx.ch_id);
+ most_stop_channel(nd->iface, nd->tx.ch_id);
+ nd->channels_opened = false;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t most_nd_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct net_dev_context *nd = dev->ml_priv;
+ struct mbo *mbo;
+ int ret;
+
+ BUG_ON(nd->dev != dev);
+
+ mbo = most_get_mbo(nd->iface, nd->tx.ch_id);
+
+ if (!mbo) {
+ netif_stop_queue(dev);
+ dev->stats.tx_fifo_errors++;
+ return NETDEV_TX_BUSY;
+ }
+
+ if (nd->is_mamac)
+ ret = skb_to_mamac(skb, mbo);
+ else
+ ret = skb_to_mep(skb, mbo);
+
+ if (ret) {
+ most_put_mbo(mbo);
+ dev->stats.tx_dropped++;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ most_submit_mbo(mbo);
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops most_nd_ops = {
+ .ndo_open = most_nd_open,
+ .ndo_stop = most_nd_stop,
+ .ndo_start_xmit = most_nd_start_xmit,
+ .ndo_set_mac_address = most_nd_set_mac_address,
+};
+
+static void most_nd_setup(struct net_device *dev)
+{
+ pr_info("setup net device %s\n", dev->name);
+ ether_setup(dev);
+ dev->netdev_ops = &most_nd_ops;
+}
+
+static void most_net_rm_netdev_safe(struct net_dev_context *nd)
+{
+ if (!nd->dev)
+ return;
+
+ pr_info("remove net device %p\n", nd->dev);
+
+ unregister_netdev(nd->dev);
+ free_netdev(nd->dev);
+ nd->dev = 0;
+}
+
+static struct net_dev_context *get_net_dev_context(
+ struct most_interface *iface)
+{
+ struct net_dev_context *nd, *tmp;
+
+ spin_lock(&list_lock);
+ list_for_each_entry_safe(nd, tmp, &net_devices, list) {
+ if (nd->iface == iface) {
+ spin_unlock(&list_lock);
+ return nd;
+ }
+ }
+ spin_unlock(&list_lock);
+ return NULL;
+}
+
+static int aim_probe_channel(struct most_interface *iface, int channel_idx,
+ struct most_channel_config *ccfg,
+ struct kobject *parent, char *name)
+{
+ struct net_dev_context *nd;
+ struct net_dev_channel *ch;
+
+ if (!iface)
+ return -EINVAL;
+
+ if (ccfg->data_type != MOST_CH_ASYNC)
+ return -EINVAL;
+
+ nd = get_net_dev_context(iface);
+
+ if (!nd) {
+ nd = kzalloc(sizeof(*nd), GFP_KERNEL);
+ if (!nd)
+ return -ENOMEM;
+
+ nd->iface = iface;
+
+ spin_lock(&list_lock);
+ list_add(&nd->list, &net_devices);
+ spin_unlock(&list_lock);
+ }
+
+ ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
+ if (ch->linked) {
+ pr_err("only one channel per instance & direction allowed\n");
+ return -EINVAL;
+ }
+
+ if (nd->tx.linked || nd->rx.linked) {
+ struct net_device *dev =
+ alloc_netdev(0, "meth%d", NET_NAME_UNKNOWN, most_nd_setup);
+
+ if (!dev) {
+ pr_err("no memory for net_device\n");
+ return -ENOMEM;
+ }
+
+ nd->dev = dev;
+
+ dev->ml_priv = nd;
+ if (register_netdev(dev)) {
+ pr_err("registering net device failed\n");
+ free_netdev(dev);
+ return -EINVAL;
+ }
+ }
+
+ ch->ch_id = channel_idx;
+ ch->linked = true;
+
+ return 0;
+}
+
+static int aim_disconnect_channel(struct most_interface *iface,
+ int channel_idx)
+{
+ struct net_dev_context *nd;
+ struct net_dev_channel *ch;
+
+ nd = get_net_dev_context(iface);
+ if (!nd)
+ return -EINVAL;
+
+ if (nd->rx.linked && channel_idx == nd->rx.ch_id)
+ ch = &nd->rx;
+ else if (nd->tx.linked && channel_idx == nd->tx.ch_id)
+ ch = &nd->tx;
+ else
+ return -EINVAL;
+
+ ch->linked = false;
+
+ /*
+ * do not call most_stop_channel() here, because channels are
+ * going to be closed in ndo_stop() after unregister_netdev()
+ */
+ most_net_rm_netdev_safe(nd);
+
+ if (!nd->rx.linked && !nd->tx.linked) {
+ spin_lock(&list_lock);
+ list_del(&nd->list);
+ spin_unlock(&list_lock);
+ kfree(nd);
+ }
+
+ return 0;
+}
+
+static int aim_resume_tx_channel(struct most_interface *iface,
+ int channel_idx)
+{
+ struct net_dev_context *nd;
+
+ nd = get_net_dev_context(iface);
+ if (!nd || !nd->channels_opened || nd->tx.ch_id != channel_idx)
+ return 0;
+
+ if (!nd->dev)
+ return 0;
+
+ netif_wake_queue(nd->dev);
+ return 0;
+}
+
+static int aim_rx_data(struct mbo *mbo)
+{
+ const u32 zero = 0;
+ struct net_dev_context *nd;
+ char *buf = mbo->virt_address;
+ uint32_t len = mbo->processed_length;
+ struct sk_buff *skb;
+ struct net_device *dev;
+
+ nd = get_net_dev_context(mbo->ifp);
+ if (!nd || !nd->channels_opened || nd->rx.ch_id != mbo->hdm_channel_id)
+ return -EIO;
+
+ dev = nd->dev;
+ if (!dev) {
+ pr_err_once("drop packet: missing net_device\n");
+ return -EIO;
+ }
+
+ if (nd->is_mamac) {
+ if (!PMS_IS_MAMAC(buf, len))
+ return -EIO;
+
+ skb = dev_alloc_skb(len - MDP_HDR_LEN + 2 * ETH_ALEN + 2);
+ } else {
+ if (!PMS_IS_MEP(buf, len))
+ return -EIO;
+
+ skb = dev_alloc_skb(len - MEP_HDR_LEN);
+ }
+
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ pr_err_once("drop packet: no memory for skb\n");
+ goto out;
+ }
+
+ skb->dev = dev;
+
+ if (nd->is_mamac) {
+ /* dest */
+ memcpy(skb_put(skb, ETH_ALEN), dev->dev_addr, ETH_ALEN);
+
+ /* src */
+ memcpy(skb_put(skb, 4), &zero, 4);
+ memcpy(skb_put(skb, 2), buf + 5, 2);
+
+ /* eth type */
+ memcpy(skb_put(skb, 2), buf + 10, 2);
+
+ buf += MDP_HDR_LEN;
+ len -= MDP_HDR_LEN;
+ } else {
+ buf += MEP_HDR_LEN;
+ len -= MEP_HDR_LEN;
+ }
+
+ memcpy(skb_put(skb, len), buf, len);
+ skb->protocol = eth_type_trans(skb, dev);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+ netif_rx(skb);
+
+out:
+ most_put_mbo(mbo);
+ return 0;
+}
+
+static int __init most_net_init(void)
+{
+ pr_info("most_net_init()\n");
+ spin_lock_init(&list_lock);
+ aim.name = "networking";
+ aim.probe_channel = aim_probe_channel;
+ aim.disconnect_channel = aim_disconnect_channel;
+ aim.tx_completion = aim_resume_tx_channel;
+ aim.rx_completion = aim_rx_data;
+ return most_register_aim(&aim);
+}
+
+static void __exit most_net_exit(void)
+{
+ struct net_dev_context *nd, *tmp;
+
+ spin_lock(&list_lock);
+ list_for_each_entry_safe(nd, tmp, &net_devices, list) {
+ list_del(&nd->list);
+ spin_unlock(&list_lock);
+ /*
+ * do not call most_stop_channel() here, because channels are
+ * going to be closed in ndo_stop() after unregister_netdev()
+ */
+ most_net_rm_netdev_safe(nd);
+ kfree(nd);
+ spin_lock(&list_lock);
+ }
+ spin_unlock(&list_lock);
+
+ most_deregister_aim(&aim);
+ pr_info("most_net_exit()\n");
+}
+
+/**
+ * most_deliver_netinfo - callback for HDM to be informed about HW's MAC
+ * @param iface - most interface instance
+ * @param link_stat - link status
+ * @param mac_addr - MAC address
+ */
+void most_deliver_netinfo(struct most_interface *iface,
+ unsigned char link_stat, unsigned char *mac_addr)
+{
+ struct net_dev_context *nd;
+ struct net_device *dev;
+
+ pr_info("Received netinfo from %s\n", iface->description);
+
+ nd = get_net_dev_context(iface);
+ if (!nd)
+ return;
+
+ dev = nd->dev;
+ if (!dev)
+ return;
+
+ if (mac_addr)
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+
+ if (nd->link_stat != link_stat) {
+ nd->link_stat = link_stat;
+ if (nd->link_stat)
+ netif_wake_queue(dev);
+ else
+ netif_stop_queue(dev);
+ }
+}
+EXPORT_SYMBOL(most_deliver_netinfo);
+
+module_init(most_net_init);
+module_exit(most_net_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
+MODULE_DESCRIPTION("Networking Application Interface Module for MostCore");
diff --git a/drivers/staging/most/aim-network/networking.h b/drivers/staging/most/aim-network/networking.h
new file mode 100644
index 000000000000..1b8b434fabb0
--- /dev/null
+++ b/drivers/staging/most/aim-network/networking.h
@@ -0,0 +1,23 @@
+/*
+ * Networking AIM - Networking Application Interface Module for MostCore
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+#ifndef _NETWORKING_H_
+#define _NETWORKING_H_
+
+#include "mostcore.h"
+
+
+void most_deliver_netinfo(struct most_interface *iface,
+ unsigned char link_stat, unsigned char *mac_addr);
+
+
+#endif
diff --git a/drivers/staging/most/aim-sound/Kconfig b/drivers/staging/most/aim-sound/Kconfig
new file mode 100644
index 000000000000..3194c219ff14
--- /dev/null
+++ b/drivers/staging/most/aim-sound/Kconfig
@@ -0,0 +1,13 @@
+#
+# MOST ALSA configuration
+#
+
+config AIM_SOUND
+ tristate "ALSA AIM"
+ depends on SND
+ select SND_PCM
+ ---help---
+ Say Y here if you want to commumicate via ALSA/sound devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called aim_sound.
diff --git a/drivers/staging/most/aim-sound/Makefile b/drivers/staging/most/aim-sound/Makefile
new file mode 100644
index 000000000000..beba9586fd28
--- /dev/null
+++ b/drivers/staging/most/aim-sound/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_AIM_SOUND) += aim_sound.o
+
+aim_sound-objs := sound.o
+ccflags-y += -Idrivers/staging/most/mostcore/
diff --git a/drivers/staging/most/aim-sound/sound.c b/drivers/staging/most/aim-sound/sound.c
new file mode 100644
index 000000000000..860302eebda7
--- /dev/null
+++ b/drivers/staging/most/aim-sound/sound.c
@@ -0,0 +1,758 @@
+/*
+ * sound.c - Audio Application Interface Module for Mostcore
+ *
+ * Copyright (C) 2015 Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <mostcore.h>
+
+#define DRIVER_NAME "sound"
+
+static struct list_head dev_list;
+
+/**
+ * struct channel - private structure to keep channel specific data
+ * @substream: stores the substream structure
+ * @iface: interface for which the channel belongs to
+ * @cfg: channel configuration
+ * @card: registered sound card
+ * @list: list for private use
+ * @id: channel index
+ * @period_pos: current period position (ring buffer)
+ * @buffer_pos: current buffer position (ring buffer)
+ * @is_stream_running: identifies whether a stream is running or not
+ * @opened: set when the stream is opened
+ * @playback_task: playback thread
+ * @playback_waitq: waitq used by playback thread
+ */
+struct channel {
+ struct snd_pcm_substream *substream;
+ struct most_interface *iface;
+ struct most_channel_config *cfg;
+ struct snd_card *card;
+ struct list_head list;
+ int id;
+ unsigned int period_pos;
+ unsigned int buffer_pos;
+ bool is_stream_running;
+
+ struct task_struct *playback_task;
+ wait_queue_head_t playback_waitq;
+
+ void (*copy_fn)(void *alsa, void *most, unsigned int bytes);
+};
+
+#define MOST_PCM_INFO (SNDRV_PCM_INFO_MMAP | \
+ SNDRV_PCM_INFO_MMAP_VALID | \
+ SNDRV_PCM_INFO_BATCH | \
+ SNDRV_PCM_INFO_INTERLEAVED | \
+ SNDRV_PCM_INFO_BLOCK_TRANSFER)
+
+/**
+ * Initialization of struct snd_pcm_hardware
+ */
+static struct snd_pcm_hardware pcm_hardware_template = {
+ .info = MOST_PCM_INFO,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .channels_min = 1,
+ .channels_max = 8,
+};
+
+#define swap16(val) ( \
+ (((u16)(val) << 8) & (u16)0xFF00) | \
+ (((u16)(val) >> 8) & (u16)0x00FF))
+
+#define swap32(val) ( \
+ (((u32)(val) << 24) & (u32)0xFF000000) | \
+ (((u32)(val) << 8) & (u32)0x00FF0000) | \
+ (((u32)(val) >> 8) & (u32)0x0000FF00) | \
+ (((u32)(val) >> 24) & (u32)0x000000FF))
+
+static void swap_copy16(u16 *dest, const u16 *source, unsigned int bytes)
+{
+ unsigned int i = 0;
+
+ while (i < (bytes / 2)) {
+ dest[i] = swap16(source[i]);
+ i++;
+ }
+}
+
+static void swap_copy24(u8 *dest, const u8 *source, unsigned int bytes)
+{
+ unsigned int i = 0;
+
+ while (i < bytes - 2) {
+ dest[i] = source[i + 2];
+ dest[i + 1] = source[i + 1];
+ dest[i + 2] = source[i];
+ i += 3;
+ }
+}
+
+static void swap_copy32(u32 *dest, const u32 *source, unsigned int bytes)
+{
+ unsigned int i = 0;
+
+ while (i < bytes / 4) {
+ dest[i] = swap32(source[i]);
+ i++;
+ }
+}
+
+static void alsa_to_most_memcpy(void *alsa, void *most, unsigned int bytes)
+{
+ memcpy(most, alsa, bytes);
+}
+
+static void alsa_to_most_copy16(void *alsa, void *most, unsigned int bytes)
+{
+ swap_copy16(most, alsa, bytes);
+}
+
+static void alsa_to_most_copy24(void *alsa, void *most, unsigned int bytes)
+{
+ swap_copy24(most, alsa, bytes);
+}
+
+static void alsa_to_most_copy32(void *alsa, void *most, unsigned int bytes)
+{
+ swap_copy32(most, alsa, bytes);
+}
+
+static void most_to_alsa_memcpy(void *alsa, void *most, unsigned int bytes)
+{
+ memcpy(alsa, most, bytes);
+}
+
+static void most_to_alsa_copy16(void *alsa, void *most, unsigned int bytes)
+{
+ swap_copy16(alsa, most, bytes);
+}
+
+static void most_to_alsa_copy24(void *alsa, void *most, unsigned int bytes)
+{
+ swap_copy24(alsa, most, bytes);
+}
+
+static void most_to_alsa_copy32(void *alsa, void *most, unsigned int bytes)
+{
+ swap_copy32(alsa, most, bytes);
+}
+
+/**
+ * get_channel - get pointer to channel
+ * @iface: interface structure
+ * @channel_id: channel ID
+ *
+ * This traverses the channel list and returns the channel matching the
+ * ID and interface.
+ *
+ * Returns pointer to channel on success or NULL otherwise.
+ */
+static struct channel *get_channel(struct most_interface *iface,
+ int channel_id)
+{
+ struct channel *channel, *tmp;
+
+ list_for_each_entry_safe(channel, tmp, &dev_list, list) {
+ if ((channel->iface == iface) && (channel->id == channel_id))
+ return channel;
+ }
+
+ return NULL;
+}
+
+/**
+ * copy_data - implements data copying function
+ * @channel: channel
+ * @mbo: MBO from core
+ *
+ * Copy data from/to ring buffer to/from MBO and update the buffer position
+ */
+static bool copy_data(struct channel *channel, struct mbo *mbo)
+{
+ struct snd_pcm_runtime *const runtime = channel->substream->runtime;
+ unsigned int const frame_bytes = channel->cfg->subbuffer_size;
+ unsigned int const buffer_size = runtime->buffer_size;
+ unsigned int frames;
+ unsigned int fr0;
+
+ if (channel->cfg->direction & MOST_CH_RX)
+ frames = mbo->processed_length / frame_bytes;
+ else
+ frames = mbo->buffer_length / frame_bytes;
+ fr0 = min(buffer_size - channel->buffer_pos, frames);
+
+ channel->copy_fn(runtime->dma_area + channel->buffer_pos * frame_bytes,
+ mbo->virt_address,
+ fr0 * frame_bytes);
+
+ if (frames > fr0) {
+ /* wrap around at end of ring buffer */
+ channel->copy_fn(runtime->dma_area,
+ mbo->virt_address + fr0 * frame_bytes,
+ (frames - fr0) * frame_bytes);
+ }
+
+ channel->buffer_pos += frames;
+ if (channel->buffer_pos >= buffer_size)
+ channel->buffer_pos -= buffer_size;
+ channel->period_pos += frames;
+ if (channel->period_pos >= runtime->period_size) {
+ channel->period_pos -= runtime->period_size;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * playback_thread - function implements the playback thread
+ * @data: private data
+ *
+ * Thread which does the playback functionality in a loop. It waits for a free
+ * MBO from mostcore for a particular channel and copy the data from ring buffer
+ * to MBO. Submit the MBO back to mostcore, after copying the data.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int playback_thread(void *data)
+{
+ struct channel *const channel = data;
+
+ pr_info("playback thread started\n");
+
+ while (!kthread_should_stop()) {
+ struct mbo *mbo = NULL;
+ bool period_elapsed = false;
+ int ret;
+
+ wait_event_interruptible(
+ channel->playback_waitq,
+ kthread_should_stop() ||
+ (mbo = most_get_mbo(channel->iface, channel->id)));
+
+ if (!mbo)
+ continue;
+
+ if (channel->is_stream_running)
+ period_elapsed = copy_data(channel, mbo);
+ else
+ memset(mbo->virt_address, 0, mbo->buffer_length);
+
+ ret = most_submit_mbo(mbo);
+ if (ret)
+ channel->is_stream_running = false;
+
+ if (period_elapsed)
+ snd_pcm_period_elapsed(channel->substream);
+ }
+
+ return 0;
+}
+
+/**
+ * pcm_open - implements open callback function for PCM middle layer
+ * @substream: pointer to ALSA PCM substream
+ *
+ * This is called when a PCM substream is opened. At least, the function should
+ * initialize the runtime->hw record.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_open(struct snd_pcm_substream *substream)
+{
+ struct channel *channel = substream->private_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct most_channel_config *cfg = channel->cfg;
+
+ pr_info("pcm_open(), %s\n", substream->name);
+
+ channel->substream = substream;
+
+ if (cfg->direction == MOST_CH_TX) {
+ init_waitqueue_head(&channel->playback_waitq);
+ channel->playback_task = kthread_run(&playback_thread, channel,
+ "most_audio_playback");
+ if (IS_ERR(channel->playback_task))
+ return PTR_ERR(channel->playback_task);
+ }
+
+ if (most_start_channel(channel->iface, channel->id)) {
+ pr_err("most_start_channel() failed!\n");
+ if (cfg->direction == MOST_CH_TX)
+ kthread_stop(channel->playback_task);
+ return -EBUSY;
+ }
+
+ runtime->hw = pcm_hardware_template;
+ runtime->hw.buffer_bytes_max = cfg->num_buffers * cfg->buffer_size;
+ runtime->hw.period_bytes_min = cfg->buffer_size;
+ runtime->hw.period_bytes_max = cfg->buffer_size;
+ runtime->hw.periods_min = 1;
+ runtime->hw.periods_max = cfg->num_buffers;
+
+ return 0;
+}
+
+/**
+ * pcm_close - implements close callback function for PCM middle layer
+ * @substream: sub-stream pointer
+ *
+ * Obviously, this is called when a PCM substream is closed. Any private
+ * instance for a PCM substream allocated in the open callback will be
+ * released here.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_close(struct snd_pcm_substream *substream)
+{
+ struct channel *channel = substream->private_data;
+
+ pr_info("pcm_close(), %s\n", substream->name);
+
+ if (channel->cfg->direction == MOST_CH_TX)
+ kthread_stop(channel->playback_task);
+ most_stop_channel(channel->iface, channel->id);
+
+ return 0;
+}
+
+/**
+ * pcm_hw_params - implements hw_params callback function for PCM middle layer
+ * @substream: sub-stream pointer
+ * @hw_params: contains the hardware parameters set by the application
+ *
+ * This is called when the hardware parameters is set by the application, that
+ * is, once when the buffer size, the period size, the format, etc. are defined
+ * for the PCM substream. Many hardware setups should be done is this callback,
+ * including the allocation of buffers.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ pr_info("pcm_hw_params()\n");
+
+ return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+ params_buffer_bytes(hw_params));
+}
+
+/**
+ * pcm_hw_free - implements hw_free callback function for PCM middle layer
+ * @substream: substream pointer
+ *
+ * This is called to release the resources allocated via hw_params.
+ * This function will be always called before the close callback is called.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ pr_info("pcm_hw_free()\n");
+
+ return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+/**
+ * pcm_prepare - implements prepare callback function for PCM middle layer
+ * @substream: substream pointer
+ *
+ * This callback is called when the PCM is "prepared". Format rate, sample rate,
+ * etc., can be set here. This callback can be called many times at each setup.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_prepare(struct snd_pcm_substream *substream)
+{
+ struct channel *channel = substream->private_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct most_channel_config *cfg = channel->cfg;
+ int width = snd_pcm_format_physical_width(runtime->format);
+
+ channel->copy_fn = NULL;
+
+ if (cfg->direction == MOST_CH_TX) {
+ if (snd_pcm_format_big_endian(runtime->format) || width == 8)
+ channel->copy_fn = alsa_to_most_memcpy;
+ else if (width == 16)
+ channel->copy_fn = alsa_to_most_copy16;
+ else if (width == 24)
+ channel->copy_fn = alsa_to_most_copy24;
+ else if (width == 32)
+ channel->copy_fn = alsa_to_most_copy32;
+ } else {
+ if (snd_pcm_format_big_endian(runtime->format) || width == 8)
+ channel->copy_fn = most_to_alsa_memcpy;
+ else if (width == 16)
+ channel->copy_fn = most_to_alsa_copy16;
+ else if (width == 24)
+ channel->copy_fn = most_to_alsa_copy24;
+ else if (width == 32)
+ channel->copy_fn = most_to_alsa_copy32;
+ }
+
+ if (!channel->copy_fn) {
+ pr_err("unsupported format\n");
+ return -EINVAL;
+ }
+
+ channel->period_pos = 0;
+ channel->buffer_pos = 0;
+
+ return 0;
+}
+
+/**
+ * pcm_trigger - implements trigger callback function for PCM middle layer
+ * @substream: substream pointer
+ * @cmd: action to perform
+ *
+ * This is called when the PCM is started, stopped or paused. The action will be
+ * specified in the second argument, SNDRV_PCM_TRIGGER_XXX
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct channel *channel = substream->private_data;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ channel->is_stream_running = true;
+ return 0;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ channel->is_stream_running = false;
+ return 0;
+
+ default:
+ pr_info("pcm_trigger(), invalid\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * pcm_pointer - implements pointer callback function for PCM middle layer
+ * @substream: substream pointer
+ *
+ * This callback is called when the PCM middle layer inquires the current
+ * hardware position on the buffer. The position must be returned in frames,
+ * ranging from 0 to buffer_size-1.
+ */
+static snd_pcm_uframes_t pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct channel *channel = substream->private_data;
+
+ return channel->buffer_pos;
+}
+
+/**
+ * Initialization of struct snd_pcm_ops
+ */
+static struct snd_pcm_ops pcm_ops = {
+ .open = pcm_open,
+ .close = pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = pcm_hw_params,
+ .hw_free = pcm_hw_free,
+ .prepare = pcm_prepare,
+ .trigger = pcm_trigger,
+ .pointer = pcm_pointer,
+ .page = snd_pcm_lib_get_vmalloc_page,
+ .mmap = snd_pcm_lib_mmap_vmalloc,
+};
+
+
+int split_arg_list(char *buf, char **card_name, char **pcm_format)
+{
+ *card_name = strsep(&buf, ".");
+ if (!*card_name)
+ return -EIO;
+ *pcm_format = strsep(&buf, ".\n");
+ if (!*pcm_format)
+ return -EIO;
+ return 0;
+}
+
+int audio_set_pcm_format(char *pcm_format, struct most_channel_config *cfg)
+{
+ if (!strcmp(pcm_format, "1x8")) {
+ if (cfg->subbuffer_size != 1)
+ goto error;
+ pr_info("PCM format is 8-bit mono\n");
+ pcm_hardware_template.formats = SNDRV_PCM_FMTBIT_S8;
+ } else if (!strcmp(pcm_format, "2x16")) {
+ if (cfg->subbuffer_size != 4)
+ goto error;
+ pr_info("PCM format is 16-bit stereo\n");
+ pcm_hardware_template.formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S16_BE;
+ } else if (!strcmp(pcm_format, "2x24")) {
+ if (cfg->subbuffer_size != 6)
+ goto error;
+ pr_info("PCM format is 24-bit stereo\n");
+ pcm_hardware_template.formats = SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S24_3BE;
+ } else if (!strcmp(pcm_format, "2x32")) {
+ if (cfg->subbuffer_size != 8)
+ goto error;
+ pr_info("PCM format is 32-bit stereo\n");
+ pcm_hardware_template.formats = SNDRV_PCM_FMTBIT_S32_LE |
+ SNDRV_PCM_FMTBIT_S32_BE;
+ } else {
+ pr_err("PCM format %s not supported\n", pcm_format);
+ return -EIO;
+ }
+ return 0;
+error:
+ pr_err("Audio resolution doesn't fit subbuffer size\n");
+ return -EINVAL;
+}
+
+/**
+ * audio_probe_channel - probe function of the driver module
+ * @iface: pointer to interface instance
+ * @channel_id: channel index/ID
+ * @cfg: pointer to actual channel configuration
+ * @parent: pointer to kobject (needed for sysfs hook-up)
+ * @arg_list: string that provides the name of the device to be created in /dev
+ * plus the desired audio resolution
+ *
+ * Creates sound card, pcm device, sets pcm ops and registers sound card.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int audio_probe_channel(struct most_interface *iface, int channel_id,
+ struct most_channel_config *cfg,
+ struct kobject *parent, char *arg_list)
+{
+ struct channel *channel;
+ struct snd_card *card;
+ struct snd_pcm *pcm;
+ int playback_count = 0;
+ int capture_count = 0;
+ int ret;
+ int direction;
+ char *card_name;
+ char *pcm_format;
+
+ pr_info("sound_probe_channel()\n");
+
+ if (!iface)
+ return -EINVAL;
+
+ if (cfg->data_type != MOST_CH_SYNC) {
+ pr_err("Incompatible channel type\n");
+ return -EINVAL;
+ }
+
+ if (get_channel(iface, channel_id)) {
+ pr_err("channel (%s:%d) is already linked\n",
+ iface->description, channel_id);
+ return -EINVAL;
+ }
+
+ if (cfg->direction == MOST_CH_TX) {
+ playback_count = 1;
+ direction = SNDRV_PCM_STREAM_PLAYBACK;
+ } else {
+ capture_count = 1;
+ direction = SNDRV_PCM_STREAM_CAPTURE;
+ }
+
+ ret = split_arg_list(arg_list, &card_name, &pcm_format);
+ if (ret < 0) {
+ pr_info("PCM format missing\n");
+ return ret;
+ }
+ if (audio_set_pcm_format(pcm_format, cfg))
+ return ret;
+
+ ret = snd_card_new(NULL, -1, card_name, THIS_MODULE,
+ sizeof(*channel), &card);
+ if (ret < 0)
+ return ret;
+
+ channel = card->private_data;
+ channel->card = card;
+ channel->cfg = cfg;
+ channel->iface = iface;
+ channel->id = channel_id;
+
+ snprintf(card->driver, sizeof(card->driver), "%s", DRIVER_NAME);
+ snprintf(card->shortname, sizeof(card->shortname), "MOST:%d",
+ card->number);
+ snprintf(card->longname, sizeof(card->longname), "%s at %s, ch %d",
+ card->shortname, iface->description, channel_id);
+
+ ret = snd_pcm_new(card, card_name, 0, playback_count,
+ capture_count, &pcm);
+ if (ret < 0)
+ goto err_free_card;
+
+ pcm->private_data = channel;
+
+ snd_pcm_set_ops(pcm, direction, &pcm_ops);
+
+ ret = snd_card_register(card);
+ if (ret < 0)
+ goto err_free_card;
+
+ list_add_tail(&channel->list, &dev_list);
+
+ return 0;
+
+err_free_card:
+ snd_card_free(card);
+ return ret;
+}
+
+/**
+ * audio_disconnect_channel - function to disconnect a channel
+ * @iface: pointer to interface instance
+ * @channel_id: channel index
+ *
+ * This frees allocated memory and removes the sound card from ALSA
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int audio_disconnect_channel(struct most_interface *iface,
+ int channel_id)
+{
+ struct channel *channel;
+
+ pr_info("sound_disconnect_channel()\n");
+
+ channel = get_channel(iface, channel_id);
+ if (!channel) {
+ pr_err("sound_disconnect_channel(), invalid channel %d\n",
+ channel_id);
+ return -EINVAL;
+ }
+
+ list_del(&channel->list);
+ snd_card_free(channel->card);
+
+ return 0;
+}
+
+/**
+ * audio_rx_completion - completion handler for rx channels
+ * @mbo: pointer to buffer object that has completed
+ *
+ * This searches for the channel this MBO belongs to and copy the data from MBO
+ * to ring buffer
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int audio_rx_completion(struct mbo *mbo)
+{
+ struct channel *channel = get_channel(mbo->ifp, mbo->hdm_channel_id);
+ bool period_elapsed = false;
+
+ if (!channel) {
+ pr_err("sound_rx_completion(), invalid channel %d\n",
+ mbo->hdm_channel_id);
+ return -EINVAL;
+ }
+
+ if (channel->is_stream_running)
+ period_elapsed = copy_data(channel, mbo);
+
+ most_put_mbo(mbo);
+
+ if (period_elapsed)
+ snd_pcm_period_elapsed(channel->substream);
+
+ return 0;
+}
+
+/**
+ * audio_tx_completion - completion handler for tx channels
+ * @iface: pointer to interface instance
+ * @channel_id: channel index/ID
+ *
+ * This searches the channel that belongs to this combination of interface
+ * pointer and channel ID and wakes a process sitting in the wait queue of
+ * this channel.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int audio_tx_completion(struct most_interface *iface, int channel_id)
+{
+ struct channel *channel = get_channel(iface, channel_id);
+
+ if (!channel) {
+ pr_err("sound_tx_completion(), invalid channel %d\n",
+ channel_id);
+ return -EINVAL;
+ }
+
+ wake_up_interruptible(&channel->playback_waitq);
+
+ return 0;
+}
+
+/**
+ * Initialization of the struct most_aim
+ */
+static struct most_aim audio_aim = {
+ .name = DRIVER_NAME,
+ .probe_channel = audio_probe_channel,
+ .disconnect_channel = audio_disconnect_channel,
+ .rx_completion = audio_rx_completion,
+ .tx_completion = audio_tx_completion,
+};
+
+static int __init audio_init(void)
+{
+ pr_info("init()\n");
+
+ INIT_LIST_HEAD(&dev_list);
+
+ return most_register_aim(&audio_aim);
+}
+
+static void __exit audio_exit(void)
+{
+ struct channel *channel, *tmp;
+
+ pr_info("exit()\n");
+
+ list_for_each_entry_safe(channel, tmp, &dev_list, list) {
+ list_del(&channel->list);
+ snd_card_free(channel->card);
+ }
+
+ most_deregister_aim(&audio_aim);
+}
+
+module_init(audio_init);
+module_exit(audio_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
+MODULE_DESCRIPTION("Audio Application Interface Module for MostCore");
diff --git a/drivers/staging/most/aim-v4l2/Kconfig b/drivers/staging/most/aim-v4l2/Kconfig
new file mode 100644
index 000000000000..d70eaaf0936c
--- /dev/null
+++ b/drivers/staging/most/aim-v4l2/Kconfig
@@ -0,0 +1,12 @@
+#
+# MOST V4L2 configuration
+#
+
+config AIM_V4L2
+ tristate "V4L2 AIM"
+ depends on VIDEO_V4L2
+ ---help---
+ Say Y here if you want to commumicate via Video 4 Linux.
+
+ To compile this driver as a module, choose M here: the
+ module will be called aim_v4l2. \ No newline at end of file
diff --git a/drivers/staging/most/aim-v4l2/Makefile b/drivers/staging/most/aim-v4l2/Makefile
new file mode 100644
index 000000000000..28aa948d6609
--- /dev/null
+++ b/drivers/staging/most/aim-v4l2/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_AIM_V4L2) += aim_v4l2.o
+
+aim_v4l2-objs := video.o
+
+ccflags-y += -Idrivers/staging/most/mostcore/
+ccflags-y += -Idrivers/media/video
diff --git a/drivers/staging/most/aim-v4l2/video.c b/drivers/staging/most/aim-v4l2/video.c
new file mode 100644
index 000000000000..d9687910e4a4
--- /dev/null
+++ b/drivers/staging/most/aim-v4l2/video.c
@@ -0,0 +1,635 @@
+/*
+ * V4L2 AIM - V4L2 Application Interface Module for MostCore
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/suspend.h>
+#include <linux/videodev2.h>
+#include <linux/mutex.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+
+#include "mostcore.h"
+
+
+#define V4L2_AIM_MAX_INPUT 1
+
+
+struct most_video_dev {
+ struct most_interface *iface;
+ int ch_idx;
+ struct list_head list;
+ bool mute;
+
+ struct list_head pending_mbos;
+ spinlock_t list_lock;
+
+ struct v4l2_device v4l2_dev;
+ atomic_t access_ref;
+ struct video_device *vdev;
+ unsigned int ctrl_input;
+
+ struct mutex lock;
+
+ wait_queue_head_t wait_data;
+};
+
+struct aim_fh {
+ /* must be the first field of this struct! */
+ struct v4l2_fh fh;
+ struct most_video_dev *mdev;
+ u32 offs;
+};
+
+
+static struct list_head video_devices = LIST_HEAD_INIT(video_devices);
+static struct spinlock list_lock;
+static struct most_aim aim_info;
+
+
+static inline bool data_ready(struct most_video_dev *mdev)
+{
+ return !list_empty(&mdev->pending_mbos);
+}
+
+static inline struct mbo *get_top_mbo(struct most_video_dev *mdev)
+{
+ return list_first_entry(&mdev->pending_mbos, struct mbo, list);
+}
+
+
+static int aim_vdev_open(struct file *filp)
+{
+ int ret;
+ struct video_device *vdev = video_devdata(filp);
+ struct most_video_dev *mdev = video_drvdata(filp);
+ struct aim_fh *fh;
+
+ pr_info("aim_vdev_open()\n");
+
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_GRABBER:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fh = kzalloc(sizeof(struct aim_fh), GFP_KERNEL);
+ if (!fh)
+ return -ENOMEM;
+
+ if (!atomic_inc_and_test(&mdev->access_ref)) {
+ pr_err("too many clients\n");
+ ret = -EBUSY;
+ goto err_dec;
+ }
+
+ fh->mdev = mdev;
+ v4l2_fh_init(&fh->fh, vdev);
+ filp->private_data = fh;
+
+ v4l2_fh_add(&fh->fh);
+
+ ret = most_start_channel(mdev->iface, mdev->ch_idx);
+ if (ret) {
+ pr_err("most_start_channel() failed\n");
+ goto err_rm;
+ }
+
+ return 0;
+
+err_rm:
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
+
+err_dec:
+ atomic_dec(&mdev->access_ref);
+ kfree(fh);
+ return ret;
+}
+
+static int aim_vdev_close(struct file *filp)
+{
+ struct aim_fh *fh = filp->private_data;
+ struct most_video_dev *mdev = fh->mdev;
+ struct mbo *mbo, *tmp;
+
+ pr_info("aim_vdev_close()\n");
+
+ /*
+ * We need to put MBOs back before we call most_stop_channel()
+ * to deallocate MBOs.
+ * From the other hand mostcore still calling rx_completion()
+ * to deliver MBOs until most_stop_channel() is called.
+ * Use mute to work around this issue.
+ * This must be implemented in core.
+ */
+
+ spin_lock(&mdev->list_lock);
+ mdev->mute = true;
+ list_for_each_entry_safe(mbo, tmp, &mdev->pending_mbos, list) {
+ list_del(&mbo->list);
+ spin_unlock(&mdev->list_lock);
+ most_put_mbo(mbo);
+ spin_lock(&mdev->list_lock);
+ }
+ spin_unlock(&mdev->list_lock);
+ most_stop_channel(mdev->iface, mdev->ch_idx);
+ mdev->mute = false;
+
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
+
+ atomic_dec(&mdev->access_ref);
+ kfree(fh);
+ return 0;
+}
+
+static ssize_t aim_vdev_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct aim_fh *fh = filp->private_data;
+ struct most_video_dev *mdev = fh->mdev;
+ int ret = 0;
+
+ if (*pos)
+ return -ESPIPE;
+
+ if (!mdev)
+ return -ENODEV;
+
+ /* wait for the first buffer */
+ if (!(filp->f_flags & O_NONBLOCK)) {
+ if (wait_event_interruptible(mdev->wait_data, data_ready(mdev)))
+ return -ERESTARTSYS;
+ }
+
+ if (!data_ready(mdev))
+ return -EAGAIN;
+
+ while (count > 0 && data_ready(mdev)) {
+ struct mbo *const mbo = get_top_mbo(mdev);
+ int const rem = mbo->processed_length - fh->offs;
+ int const cnt = rem < count ? rem : count;
+
+ if (copy_to_user(buf, mbo->virt_address + fh->offs, cnt)) {
+ pr_err("read: copy_to_user failed\n");
+ if (!ret)
+ ret = -EFAULT;
+ return ret;
+ }
+
+ fh->offs += cnt;
+ count -= cnt;
+ buf += cnt;
+ ret += cnt;
+
+ if (cnt >= rem) {
+ fh->offs = 0;
+ spin_lock(&mdev->list_lock);
+ list_del(&mbo->list);
+ spin_unlock(&mdev->list_lock);
+ most_put_mbo(mbo);
+ }
+ }
+ return ret;
+}
+
+static unsigned int aim_vdev_poll(struct file *filp, poll_table *wait)
+{
+ struct aim_fh *fh = filp->private_data;
+ struct most_video_dev *mdev = fh->mdev;
+ unsigned int mask = 0;
+
+ /* only wait if no data is available */
+ if (!data_ready(mdev))
+ poll_wait(filp, &mdev->wait_data, wait);
+ if (data_ready(mdev))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+
+static void aim_set_format_struct(struct v4l2_format *f)
+{
+ f->fmt.pix.width = 8;
+ f->fmt.pix.height = 8;
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage = 188 * 2;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.priv = 0;
+}
+
+static int aim_set_format(struct most_video_dev *mdev, unsigned int cmd,
+ struct v4l2_format *format)
+{
+#if 0
+ u32 const pixfmt = format->fmt.pix.pixelformat;
+ const char *fmt;
+
+ if (pixfmt != V4L2_PIX_FMT_MPEG) {
+ if (cmd == VIDIOC_TRY_FMT)
+ fmt = KERN_ERR "try %c%c%c%c failed\n";
+ else
+ fmt = KERN_ERR "set %c%c%c%c failed\n";
+ } else {
+ if (cmd == VIDIOC_TRY_FMT)
+ fmt = KERN_ERR "try %c%c%c%c\n";
+ else
+ fmt = KERN_ERR "set %c%c%c%c\n";
+ }
+ printk(fmt,
+ (pixfmt) & 255,
+ (pixfmt >> 8) & 255,
+ (pixfmt >> 16) & 255,
+ (pixfmt >> 24) & 255);
+#endif
+
+ if (format->fmt.pix.pixelformat != V4L2_PIX_FMT_MPEG)
+ return -EINVAL;
+
+ if (cmd == VIDIOC_TRY_FMT)
+ return 0;
+
+ aim_set_format_struct(format);
+
+ return 0;
+}
+
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct aim_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ pr_info("vidioc_querycap()\n");
+
+ strlcpy(cap->driver, "v4l2_most_aim", sizeof(cap->driver));
+ strlcpy(cap->card, "my_card", sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "%s", mdev->iface->description);
+
+ cap->capabilities =
+ V4L2_CAP_READWRITE |
+ V4L2_CAP_TUNER |
+ V4L2_CAP_VIDEO_CAPTURE;
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ pr_info("vidioc_enum_fmt_vid_cap() %d\n", f->index);
+
+ if (f->index)
+ return -EINVAL;
+
+ strcpy(f->description, "MPEG");
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ f->flags = V4L2_FMT_FLAG_COMPRESSED;
+ f->pixelformat = V4L2_PIX_FMT_MPEG;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ pr_info("vidioc_g_fmt_vid_cap()\n");
+
+ aim_set_format_struct(f);
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct aim_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ return aim_set_format(mdev, VIDIOC_TRY_FMT, f);
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct aim_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ return aim_set_format(mdev, VIDIOC_S_FMT, f);
+}
+
+static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
+{
+ pr_info("vidioc_g_std()\n");
+
+ *norm = V4L2_STD_UNKNOWN;
+ return 0;
+}
+
+static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *input)
+{
+ struct aim_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ if (input->index >= V4L2_AIM_MAX_INPUT)
+ return -EINVAL;
+
+ strcpy(input->name, "MOST Video");
+ input->type |= V4L2_INPUT_TYPE_CAMERA;
+ input->audioset = 0;
+
+ input->std = mdev->vdev->tvnorms;
+
+ return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct aim_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+ *i = mdev->ctrl_input;
+ return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *priv, unsigned int index)
+{
+ struct aim_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ pr_info("vidioc_s_input(%d)\n", index);
+
+ if (index >= V4L2_AIM_MAX_INPUT)
+ return -EINVAL;
+ mdev->ctrl_input = index;
+ return 0;
+}
+
+static struct v4l2_file_operations aim_fops = {
+ .owner = THIS_MODULE,
+ .open = aim_vdev_open,
+ .release = aim_vdev_close,
+ .read = aim_vdev_read,
+ .poll = aim_vdev_poll,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops video_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_g_std = vidioc_g_std,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+};
+
+static const struct video_device aim_videodev_template = {
+ .fops = &aim_fops,
+ .release = video_device_release,
+ .ioctl_ops = &video_ioctl_ops,
+ .tvnorms = V4L2_STD_UNKNOWN,
+};
+
+/**************************************************************************/
+
+static struct most_video_dev *get_aim_dev(
+ struct most_interface *iface, int channel_idx)
+{
+ struct most_video_dev *mdev, *tmp;
+
+ spin_lock(&list_lock);
+ list_for_each_entry_safe(mdev, tmp, &video_devices, list) {
+ if (mdev->iface == iface && mdev->ch_idx == channel_idx) {
+ spin_unlock(&list_lock);
+ return mdev;
+ }
+ }
+ spin_unlock(&list_lock);
+ return 0;
+}
+
+static int aim_rx_data(struct mbo *mbo)
+{
+ struct most_video_dev *mdev =
+ get_aim_dev(mbo->ifp, mbo->hdm_channel_id);
+
+ if (!mdev)
+ return -EIO;
+
+ spin_lock(&mdev->list_lock);
+ if (unlikely(mdev->mute)) {
+ spin_unlock(&mdev->list_lock);
+ return -EIO;
+ }
+
+ list_add_tail(&mbo->list, &mdev->pending_mbos);
+ spin_unlock(&mdev->list_lock);
+ wake_up_interruptible(&mdev->wait_data);
+ return 0;
+}
+
+static int aim_register_videodev(struct most_video_dev *mdev)
+{
+ int retval = -ENOMEM;
+ int ret;
+
+ pr_info("aim_register_videodev()\n");
+
+ init_waitqueue_head(&mdev->wait_data);
+
+ /* allocate and fill v4l2 video struct */
+ mdev->vdev = video_device_alloc();
+ if (!mdev->vdev)
+ return -ENOMEM;
+
+ /* Fill the video capture device struct */
+ *mdev->vdev = aim_videodev_template;
+ mdev->vdev->v4l2_dev = &mdev->v4l2_dev;
+ mdev->vdev->lock = &mdev->lock;
+ strcpy(mdev->vdev->name, "most v4l2 aim video");
+
+ /* Register the v4l2 device */
+ video_set_drvdata(mdev->vdev, mdev);
+ retval = video_register_device(mdev->vdev, VFL_TYPE_GRABBER, -1);
+ if (retval != 0) {
+ pr_err("video_register_device failed (%d)\n", retval);
+ ret = -ENODEV;
+ goto err_vbi_dev;
+ }
+
+ return 0;
+
+err_vbi_dev:
+ video_device_release(mdev->vdev);
+ return ret;
+}
+
+static void aim_unregister_videodev(struct most_video_dev *mdev)
+{
+ pr_info("aim_unregister_videodev()\n");
+
+ video_unregister_device(mdev->vdev);
+}
+
+
+static void aim_v4l2_dev_release(struct v4l2_device *v4l2_dev)
+{
+ struct most_video_dev *mdev =
+ container_of(v4l2_dev, struct most_video_dev, v4l2_dev);
+
+ v4l2_device_unregister(v4l2_dev);
+ kfree(mdev);
+}
+
+static int aim_probe_channel(struct most_interface *iface, int channel_idx,
+ struct most_channel_config *ccfg,
+ struct kobject *parent, char *name)
+{
+ int ret;
+ struct most_video_dev *mdev = get_aim_dev(iface, channel_idx);
+
+ pr_info("aim_probe_channel()\n");
+
+ if (mdev) {
+ pr_err("channel already linked\n");
+ return -EEXIST;
+ }
+
+ if (ccfg->direction != MOST_CH_RX) {
+ pr_err("wrong direction, expect rx\n");
+ return -EINVAL;
+ }
+
+ if (ccfg->data_type != MOST_CH_SYNC &&
+ ccfg->data_type != MOST_CH_ISOC_AVP) {
+ pr_err("wrong channel type, expect sync or isoc_avp\n");
+ return -EINVAL;
+ }
+
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ mutex_init(&mdev->lock);
+ atomic_set(&mdev->access_ref, -1);
+ spin_lock_init(&mdev->list_lock);
+ INIT_LIST_HEAD(&mdev->pending_mbos);
+ mdev->iface = iface;
+ mdev->ch_idx = channel_idx;
+ mdev->v4l2_dev.release = aim_v4l2_dev_release;
+
+ /* Create the v4l2_device */
+ strlcpy(mdev->v4l2_dev.name, "most_video_device",
+ sizeof(mdev->v4l2_dev.name));
+ ret = v4l2_device_register(NULL, &mdev->v4l2_dev);
+ if (ret) {
+ pr_err("v4l2_device_register() failed\n");
+ kfree(mdev);
+ return ret;
+ }
+
+ ret = aim_register_videodev(mdev);
+ if (ret)
+ goto err_unreg;
+
+ spin_lock(&list_lock);
+ list_add(&mdev->list, &video_devices);
+ spin_unlock(&list_lock);
+ return 0;
+
+err_unreg:
+ v4l2_device_disconnect(&mdev->v4l2_dev);
+ v4l2_device_put(&mdev->v4l2_dev);
+ return ret;
+}
+
+static int aim_disconnect_channel(struct most_interface *iface,
+ int channel_idx)
+{
+ struct most_video_dev *mdev = get_aim_dev(iface, channel_idx);
+
+ pr_info("aim_disconnect_channel()\n");
+
+ if (!mdev) {
+ pr_err("no such channel is linked\n");
+ return -ENOENT;
+ }
+
+ spin_lock(&list_lock);
+ list_del(&mdev->list);
+ spin_unlock(&list_lock);
+
+ aim_unregister_videodev(mdev);
+ v4l2_device_disconnect(&mdev->v4l2_dev);
+ v4l2_device_put(&mdev->v4l2_dev);
+ return 0;
+}
+
+static int __init aim_init(void)
+{
+ spin_lock_init(&list_lock);
+
+ aim_info.name = "v4l";
+ aim_info.probe_channel = aim_probe_channel;
+ aim_info.disconnect_channel = aim_disconnect_channel;
+ aim_info.rx_completion = aim_rx_data;
+ return most_register_aim(&aim_info);
+}
+
+static void __exit aim_exit(void)
+{
+ struct most_video_dev *mdev, *tmp;
+
+ /*
+ * As the mostcore currently doesn't call disconnect_channel()
+ * for linked channels while we call most_deregister_aim()
+ * we simulate this call here.
+ * This must be fixed in core.
+ */
+ spin_lock(&list_lock);
+ list_for_each_entry_safe(mdev, tmp, &video_devices, list) {
+ list_del(&mdev->list);
+ spin_unlock(&list_lock);
+
+ aim_unregister_videodev(mdev);
+ v4l2_device_disconnect(&mdev->v4l2_dev);
+ v4l2_device_put(&mdev->v4l2_dev);
+ spin_lock(&list_lock);
+ }
+ spin_unlock(&list_lock);
+
+ most_deregister_aim(&aim_info);
+ BUG_ON(!list_empty(&video_devices));
+}
+
+module_init(aim_init);
+module_exit(aim_exit);
+
+MODULE_DESCRIPTION("V4L2 Application Interface Module for MostCore");
+MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/most/hdm-dim2/Kconfig b/drivers/staging/most/hdm-dim2/Kconfig
new file mode 100644
index 000000000000..1d4ad1d67758
--- /dev/null
+++ b/drivers/staging/most/hdm-dim2/Kconfig
@@ -0,0 +1,16 @@
+#
+# MediaLB configuration
+#
+
+config HDM_DIM2
+ tristate "DIM2 HDM"
+ depends on AIM_NETWORK
+
+ ---help---
+ Say Y here if you want to connect via MediaLB to network transceiver.
+ This device driver is platform dependent and needs an addtional
+ platform driver to be installed. For more information contact
+ maintainer of this driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hdm_dim2.
diff --git a/drivers/staging/most/hdm-dim2/Makefile b/drivers/staging/most/hdm-dim2/Makefile
new file mode 100644
index 000000000000..6bbee879a8ea
--- /dev/null
+++ b/drivers/staging/most/hdm-dim2/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_HDM_DIM2) += hdm_dim2.o
+
+hdm_dim2-objs := dim2_hdm.o dim2_hal.o dim2_sysfs.o
+ccflags-y += -Idrivers/staging/most/mostcore/
+ccflags-y += -Idrivers/staging/most/aim-network/
diff --git a/drivers/staging/most/hdm-dim2/dim2_errors.h b/drivers/staging/most/hdm-dim2/dim2_errors.h
new file mode 100644
index 000000000000..314f7de2be73
--- /dev/null
+++ b/drivers/staging/most/hdm-dim2/dim2_errors.h
@@ -0,0 +1,67 @@
+/*
+ * dim2_errors.h - Definitions of errors for DIM2 HAL API
+ * (MediaLB, Device Interface Macro IP, OS62420)
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#ifndef _MOST_DIM_ERRORS_H
+#define _MOST_DIM_ERRORS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * MOST DIM errors.
+ */
+enum dim_errors_t {
+ /** Not an error */
+ DIM_NO_ERROR = 0,
+
+ /** Bad base address for DIM2 IP */
+ DIM_INIT_ERR_DIM_ADDR = 0x10,
+
+ /**< Bad MediaLB clock */
+ DIM_INIT_ERR_MLB_CLOCK,
+
+ /** Bad channel address */
+ DIM_INIT_ERR_CHANNEL_ADDRESS,
+
+ /** Out of DBR memory */
+ DIM_INIT_ERR_OUT_OF_MEMORY,
+
+ /** DIM API is called while DIM is not initialized successfully */
+ DIM_ERR_DRIVER_NOT_INITIALIZED = 0x20,
+
+ /**
+ * Configuration does not respect hardware limitations
+ * for isochronous or synchronous channels
+ */
+ DIM_ERR_BAD_CONFIG,
+
+ /**
+ * Buffer size does not respect hardware limitations
+ * for isochronous or synchronous channels
+ */
+ DIM_ERR_BAD_BUFFER_SIZE,
+
+ DIM_ERR_UNDERFLOW,
+
+ DIM_ERR_OVERFLOW,
+};
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MOST_DIM_ERRORS_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.c b/drivers/staging/most/hdm-dim2/dim2_hal.c
new file mode 100644
index 000000000000..a54cf2cedac3
--- /dev/null
+++ b/drivers/staging/most/hdm-dim2/dim2_hal.c
@@ -0,0 +1,919 @@
+/*
+ * dim2_hal.c - DIM2 HAL implementation
+ * (MediaLB, Device Interface Macro IP, OS62420)
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
+
+#include "dim2_hal.h"
+#include "dim2_errors.h"
+#include "dim2_reg.h"
+#include <linux/stddef.h>
+
+
+/*
+ * The number of frames per sub-buffer for synchronous channels.
+ * Allowed values: 1, 2, 4, 8, 16, 32, 64.
+ */
+#define FRAMES_PER_SUBBUFF 16
+
+/*
+ * Size factor for synchronous DBR buffer.
+ * Minimal value is 4*FRAMES_PER_SUBBUFF.
+ */
+#define SYNC_DBR_FACTOR (4u * (u16)FRAMES_PER_SUBBUFF)
+
+/*
+ * Size factor for isochronous DBR buffer.
+ * Minimal value is 3.
+ */
+#define ISOC_DBR_FACTOR 3u
+
+/*
+ * Number of 32-bit units for DBR map.
+ *
+ * 1: block size is 512, max allocation is 16K
+ * 2: block size is 256, max allocation is 8K
+ * 4: block size is 128, max allocation is 4K
+ * 8: block size is 64, max allocation is 2K
+ *
+ * Min allocated space is block size.
+ * Max possible allocated space is 32 blocks.
+ */
+#define DBR_MAP_SIZE 2
+
+
+/* -------------------------------------------------------------------------- */
+/* not configurable area */
+
+#define CDT 0x00
+#define ADT 0x40
+#define MLB_CAT 0x80
+#define AHB_CAT 0x88
+
+#define DBR_SIZE (16*1024) /* specified by IP */
+#define DBR_BLOCK_SIZE (DBR_SIZE / 32 / DBR_MAP_SIZE)
+
+
+/* -------------------------------------------------------------------------- */
+/* generic helper functions and macros */
+
+#define MLBC0_FCNT_VAL_MACRO(n) MLBC0_FCNT_VAL_ ## n ## FPSB
+#define MLBC0_FCNT_VAL(fpsb) MLBC0_FCNT_VAL_MACRO(fpsb)
+
+static inline u32 bit_mask(u8 position)
+{
+ return (u32)1 << position;
+}
+
+static inline bool dim_on_error(u8 error_id, const char *error_message)
+{
+ DIMCB_OnError(error_id, error_message);
+ return false;
+}
+
+
+/* -------------------------------------------------------------------------- */
+/* types and local variables */
+
+struct lld_global_vars_t {
+ bool dim_is_initialized;
+ bool mcm_is_initialized;
+ struct dim2_regs *dim2; /* DIM2 core base address */
+ u32 dbr_map[DBR_MAP_SIZE];
+};
+
+static struct lld_global_vars_t g = { false };
+
+
+/* -------------------------------------------------------------------------- */
+
+static int dbr_get_mask_size(u16 size)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ if (size <= (DBR_BLOCK_SIZE << i))
+ return 1 << i;
+ return 0;
+}
+
+/**
+ * Allocates DBR memory.
+ * @param size Allocating memory size.
+ * @return Offset in DBR memory by success or DBR_SIZE if out of memory.
+ */
+static int alloc_dbr(u16 size)
+{
+ int mask_size;
+ int i, block_idx = 0;
+
+ if (size <= 0)
+ return DBR_SIZE; /* out of memory */
+
+ mask_size = dbr_get_mask_size(size);
+ if (mask_size == 0)
+ return DBR_SIZE; /* out of memory */
+
+ for (i = 0; i < DBR_MAP_SIZE; i++) {
+ u32 const blocks = (size + DBR_BLOCK_SIZE - 1) / DBR_BLOCK_SIZE;
+ u32 mask = ~((~(u32)0) << blocks);
+
+ do {
+ if ((g.dbr_map[i] & mask) == 0) {
+ g.dbr_map[i] |= mask;
+ return block_idx * DBR_BLOCK_SIZE;
+ }
+ block_idx += mask_size;
+ /* do shift left with 2 steps for case mask_size == 32 */
+ mask <<= mask_size - 1;
+ } while ((mask <<= 1) != 0);
+ }
+
+ return DBR_SIZE; /* out of memory */
+}
+
+static void free_dbr(int offs, int size)
+{
+ int block_idx = offs / DBR_BLOCK_SIZE;
+ u32 const blocks = (size + DBR_BLOCK_SIZE - 1) / DBR_BLOCK_SIZE;
+ u32 mask = ~((~(u32)0) << blocks);
+
+ mask <<= block_idx % 32;
+ g.dbr_map[block_idx / 32] &= ~mask;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static u32 dim2_read_ctr(u32 ctr_addr, u16 mdat_idx)
+{
+ DIMCB_IoWrite(&g.dim2->MADR, ctr_addr);
+
+ /* wait till transfer is completed */
+ while ((DIMCB_IoRead(&g.dim2->MCTL) & 1) != 1)
+ continue;
+
+ DIMCB_IoWrite(&g.dim2->MCTL, 0); /* clear transfer complete */
+
+ return DIMCB_IoRead((&g.dim2->MDAT0) + mdat_idx);
+}
+
+static void dim2_write_ctr_mask(u32 ctr_addr, const u32 *mask, const u32 *value)
+{
+ enum { MADR_WNR_BIT = 31 };
+
+ DIMCB_IoWrite(&g.dim2->MCTL, 0); /* clear transfer complete */
+
+ if (mask[0] != 0)
+ DIMCB_IoWrite(&g.dim2->MDAT0, value[0]);
+ if (mask[1] != 0)
+ DIMCB_IoWrite(&g.dim2->MDAT1, value[1]);
+ if (mask[2] != 0)
+ DIMCB_IoWrite(&g.dim2->MDAT2, value[2]);
+ if (mask[3] != 0)
+ DIMCB_IoWrite(&g.dim2->MDAT3, value[3]);
+
+ DIMCB_IoWrite(&g.dim2->MDWE0, mask[0]);
+ DIMCB_IoWrite(&g.dim2->MDWE1, mask[1]);
+ DIMCB_IoWrite(&g.dim2->MDWE2, mask[2]);
+ DIMCB_IoWrite(&g.dim2->MDWE3, mask[3]);
+
+ DIMCB_IoWrite(&g.dim2->MADR, bit_mask(MADR_WNR_BIT) | ctr_addr);
+
+ /* wait till transfer is completed */
+ while ((DIMCB_IoRead(&g.dim2->MCTL) & 1) != 1)
+ continue;
+
+ DIMCB_IoWrite(&g.dim2->MCTL, 0); /* clear transfer complete */
+}
+
+static inline void dim2_write_ctr(u32 ctr_addr, const u32 *value)
+{
+ u32 const mask[4] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+
+ dim2_write_ctr_mask(ctr_addr, mask, value);
+}
+
+static inline void dim2_clear_ctr(u32 ctr_addr)
+{
+ u32 const value[4] = { 0, 0, 0, 0 };
+
+ dim2_write_ctr(ctr_addr, value);
+}
+
+static void dim2_configure_cat(u8 cat_base, u8 ch_addr, u8 ch_type,
+ bool read_not_write, bool sync_mfe)
+{
+ u16 const cat =
+ (read_not_write << CAT_RNW_BIT) |
+ (ch_type << CAT_CT_SHIFT) |
+ (ch_addr << CAT_CL_SHIFT) |
+ (sync_mfe << CAT_MFE_BIT) |
+ (false << CAT_MT_BIT) |
+ (true << CAT_CE_BIT);
+ u8 const ctr_addr = cat_base + ch_addr / 8;
+ u8 const idx = (ch_addr % 8) / 2;
+ u8 const shift = (ch_addr % 2) * 16;
+ u32 mask[4] = { 0, 0, 0, 0 };
+ u32 value[4] = { 0, 0, 0, 0 };
+
+ mask[idx] = (u32)0xFFFF << shift;
+ value[idx] = cat << shift;
+ dim2_write_ctr_mask(ctr_addr, mask, value);
+}
+
+static void dim2_clear_cat(u8 cat_base, u8 ch_addr)
+{
+ u8 const ctr_addr = cat_base + ch_addr / 8;
+ u8 const idx = (ch_addr % 8) / 2;
+ u8 const shift = (ch_addr % 2) * 16;
+ u32 mask[4] = { 0, 0, 0, 0 };
+ u32 value[4] = { 0, 0, 0, 0 };
+
+ mask[idx] = (u32)0xFFFF << shift;
+ dim2_write_ctr_mask(ctr_addr, mask, value);
+}
+
+static void dim2_configure_cdt(u8 ch_addr, u16 dbr_address, u16 hw_buffer_size,
+ u16 packet_length)
+{
+ u32 cdt[4] = { 0, 0, 0, 0 };
+
+ if (packet_length)
+ cdt[1] = ((packet_length - 1) << CDT1_BS_ISOC_SHIFT);
+
+ cdt[3] =
+ ((hw_buffer_size - 1) << CDT3_BD_SHIFT) |
+ (dbr_address << CDT3_BA_SHIFT);
+ dim2_write_ctr(CDT + ch_addr, cdt);
+}
+
+static void dim2_clear_cdt(u8 ch_addr)
+{
+ u32 cdt[4] = { 0, 0, 0, 0 };
+
+ dim2_write_ctr(CDT + ch_addr, cdt);
+}
+
+static void dim2_configure_adt(u8 ch_addr)
+{
+ u32 adt[4] = { 0, 0, 0, 0 };
+
+ adt[0] =
+ (true << ADT0_CE_BIT) |
+ (true << ADT0_LE_BIT) |
+ (0 << ADT0_PG_BIT);
+
+ dim2_write_ctr(ADT + ch_addr, adt);
+}
+
+static void dim2_clear_adt(u8 ch_addr)
+{
+ u32 adt[4] = { 0, 0, 0, 0 };
+
+ dim2_write_ctr(ADT + ch_addr, adt);
+}
+
+static void dim2_start_ctrl_async(u8 ch_addr, u8 idx, u32 buf_addr,
+ u16 buffer_size)
+{
+ u8 const shift = idx * 16;
+
+ u32 mask[4] = { 0, 0, 0, 0 };
+ u32 adt[4] = { 0, 0, 0, 0 };
+
+ mask[1] =
+ bit_mask(ADT1_PS_BIT + shift) |
+ bit_mask(ADT1_RDY_BIT + shift) |
+ (ADT1_CTRL_ASYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
+ adt[1] =
+ (true << (ADT1_PS_BIT + shift)) |
+ (true << (ADT1_RDY_BIT + shift)) |
+ ((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
+
+ mask[idx + 2] = 0xFFFFFFFF;
+ adt[idx + 2] = buf_addr;
+
+ dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
+}
+
+static void dim2_start_isoc_sync(u8 ch_addr, u8 idx, u32 buf_addr,
+ u16 buffer_size)
+{
+ u8 const shift = idx * 16;
+
+ u32 mask[4] = { 0, 0, 0, 0 };
+ u32 adt[4] = { 0, 0, 0, 0 };
+
+ mask[1] =
+ bit_mask(ADT1_RDY_BIT + shift) |
+ (ADT1_ISOC_SYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
+ adt[1] =
+ (true << (ADT1_RDY_BIT + shift)) |
+ ((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
+
+ mask[idx + 2] = 0xFFFFFFFF;
+ adt[idx + 2] = buf_addr;
+
+ dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
+}
+
+
+static void dim2_clear_ctram(void)
+{
+ u32 ctr_addr;
+
+ for (ctr_addr = 0; ctr_addr < 0x90; ctr_addr++)
+ dim2_clear_ctr(ctr_addr);
+}
+
+static void dim2_configure_channel(
+ u8 ch_addr, u8 type, u8 is_tx, u16 dbr_address, u16 hw_buffer_size,
+ u16 packet_length, bool sync_mfe)
+{
+ dim2_configure_cdt(ch_addr, dbr_address, hw_buffer_size, packet_length);
+ dim2_configure_cat(MLB_CAT, ch_addr, type, is_tx ? 1 : 0, sync_mfe);
+
+ dim2_configure_adt(ch_addr);
+ dim2_configure_cat(AHB_CAT, ch_addr, type, is_tx ? 0 : 1, sync_mfe);
+
+ /* unmask interrupt for used channel, enable mlb_sys_int[0] interrupt */
+ DIMCB_IoWrite(&g.dim2->ACMR0,
+ DIMCB_IoRead(&g.dim2->ACMR0) | bit_mask(ch_addr));
+}
+
+static void dim2_clear_channel(u8 ch_addr)
+{
+ /* mask interrupt for used channel, disable mlb_sys_int[0] interrupt */
+ DIMCB_IoWrite(&g.dim2->ACMR0,
+ DIMCB_IoRead(&g.dim2->ACMR0) & ~bit_mask(ch_addr));
+
+ dim2_clear_cat(AHB_CAT, ch_addr);
+ dim2_clear_adt(ch_addr);
+
+ dim2_clear_cat(MLB_CAT, ch_addr);
+ dim2_clear_cdt(ch_addr);
+}
+
+/* -------------------------------------------------------------------------- */
+/* channel state helpers */
+
+static void state_init(struct int_ch_state *state)
+{
+ state->request_counter = 0;
+ state->service_counter = 0;
+
+ state->idx1 = 0;
+ state->idx2 = 0;
+ state->level = 0;
+}
+
+/* -------------------------------------------------------------------------- */
+/* macro helper functions */
+
+static inline bool check_channel_address(u32 ch_address)
+{
+ return ch_address > 0 && (ch_address % 2) == 0 &&
+ (ch_address / 2) <= (u32)CAT_CL_MASK;
+}
+
+static inline bool check_packet_length(u32 packet_length)
+{
+ u16 const max_size = ((u16)CDT3_BD_ISOC_MASK + 1u) / ISOC_DBR_FACTOR;
+
+ if (packet_length <= 0)
+ return false; /* too small */
+
+ if (packet_length > max_size)
+ return false; /* too big */
+
+ if (packet_length - 1u > (u32)CDT1_BS_ISOC_MASK)
+ return false; /* too big */
+
+ return true;
+}
+
+static inline bool check_bytes_per_frame(u32 bytes_per_frame)
+{
+ u16 const max_size = ((u16)CDT3_BD_MASK + 1u) / SYNC_DBR_FACTOR;
+
+ if (bytes_per_frame <= 0)
+ return false; /* too small */
+
+ if (bytes_per_frame > max_size)
+ return false; /* too big */
+
+ return true;
+}
+
+static inline u16 norm_ctrl_async_buffer_size(u16 buf_size)
+{
+ u16 const max_size = (u16)ADT1_CTRL_ASYNC_BD_MASK + 1u;
+
+ if (buf_size > max_size)
+ return max_size;
+
+ return buf_size;
+}
+
+static inline u16 norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
+{
+ u16 n;
+ u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
+
+ if (buf_size > max_size)
+ buf_size = max_size;
+
+ n = buf_size / packet_length;
+
+ if (n < 2u)
+ return 0; /* too small buffer for given packet_length */
+
+ return packet_length * n;
+}
+
+static inline u16 norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
+{
+ u16 n;
+ u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
+ u32 const unit = bytes_per_frame * (u16)FRAMES_PER_SUBBUFF;
+
+ if (buf_size > max_size)
+ buf_size = max_size;
+
+ n = buf_size / unit;
+
+ if (n < 1u)
+ return 0; /* too small buffer for given bytes_per_frame */
+
+ return unit * n;
+}
+
+static void dim2_cleanup(void)
+{
+ /* disable MediaLB */
+ DIMCB_IoWrite(&g.dim2->MLBC0, false << MLBC0_MLBEN_BIT);
+
+ dim2_clear_ctram();
+
+ /* disable mlb_int interrupt */
+ DIMCB_IoWrite(&g.dim2->MIEN, 0);
+
+ /* clear status for all dma channels */
+ DIMCB_IoWrite(&g.dim2->ACSR0, 0xFFFFFFFF);
+ DIMCB_IoWrite(&g.dim2->ACSR1, 0xFFFFFFFF);
+
+ /* mask interrupts for all channels */
+ DIMCB_IoWrite(&g.dim2->ACMR0, 0);
+ DIMCB_IoWrite(&g.dim2->ACMR1, 0);
+}
+
+static void dim2_initialize(bool enable_6pin, u8 mlb_clock)
+{
+ dim2_cleanup();
+
+ /* configure and enable MediaLB */
+ DIMCB_IoWrite(&g.dim2->MLBC0,
+ enable_6pin << MLBC0_MLBPEN_BIT |
+ mlb_clock << MLBC0_MLBCLK_SHIFT |
+ MLBC0_FCNT_VAL(FRAMES_PER_SUBBUFF) << MLBC0_FCNT_SHIFT |
+ true << MLBC0_MLBEN_BIT);
+
+ /* activate all HBI channels */
+ DIMCB_IoWrite(&g.dim2->HCMR0, 0xFFFFFFFF);
+ DIMCB_IoWrite(&g.dim2->HCMR1, 0xFFFFFFFF);
+
+ /* enable HBI */
+ DIMCB_IoWrite(&g.dim2->HCTL, bit_mask(HCTL_EN_BIT));
+
+ /* configure DMA */
+ DIMCB_IoWrite(&g.dim2->ACTL,
+ ACTL_DMA_MODE_VAL_DMA_MODE_1 << ACTL_DMA_MODE_BIT |
+ true << ACTL_SCE_BIT);
+
+#if 0
+ DIMCB_IoWrite(&g.dim2->MIEN,
+ bit_mask(MIEN_CTX_BREAK_BIT) |
+ bit_mask(MIEN_CTX_PE_BIT) |
+ bit_mask(MIEN_CTX_DONE_BIT) |
+ bit_mask(MIEN_CRX_BREAK_BIT) |
+ bit_mask(MIEN_CRX_PE_BIT) |
+ bit_mask(MIEN_CRX_DONE_BIT) |
+ bit_mask(MIEN_ATX_BREAK_BIT) |
+ bit_mask(MIEN_ATX_PE_BIT) |
+ bit_mask(MIEN_ATX_DONE_BIT) |
+ bit_mask(MIEN_ARX_BREAK_BIT) |
+ bit_mask(MIEN_ARX_PE_BIT) |
+ bit_mask(MIEN_ARX_DONE_BIT));
+#endif
+}
+
+static bool dim2_is_mlb_locked(void)
+{
+ u32 const mask0 = bit_mask(MLBC0_MLBLK_BIT);
+ u32 const mask1 = bit_mask(MLBC1_CLKMERR_BIT) |
+ bit_mask(MLBC1_LOCKERR_BIT);
+ u32 const c1 = DIMCB_IoRead(&g.dim2->MLBC1);
+ u32 const nda_mask = (u32)MLBC1_NDA_MASK << MLBC1_NDA_SHIFT;
+
+ DIMCB_IoWrite(&g.dim2->MLBC1, c1 & nda_mask);
+ return (DIMCB_IoRead(&g.dim2->MLBC1) & mask1) == 0 &&
+ (DIMCB_IoRead(&g.dim2->MLBC0) & mask0) != 0;
+}
+
+
+/* -------------------------------------------------------------------------- */
+/* channel help routines */
+
+static inline bool service_channel(u8 ch_addr, u8 idx)
+{
+ u8 const shift = idx * 16;
+ u32 const adt1 = dim2_read_ctr(ADT + ch_addr, 1);
+
+ if (((adt1 >> (ADT1_DNE_BIT + shift)) & 1) == 0)
+ return false;
+
+ {
+ u32 mask[4] = { 0, 0, 0, 0 };
+ u32 adt_w[4] = { 0, 0, 0, 0 };
+
+ mask[1] =
+ bit_mask(ADT1_DNE_BIT + shift) |
+ bit_mask(ADT1_ERR_BIT + shift) |
+ bit_mask(ADT1_RDY_BIT + shift);
+ dim2_write_ctr_mask(ADT + ch_addr, mask, adt_w);
+ }
+
+ /* clear channel status bit */
+ DIMCB_IoWrite(&g.dim2->ACSR0, bit_mask(ch_addr));
+
+ return true;
+}
+
+
+/* -------------------------------------------------------------------------- */
+/* channel init routines */
+
+static void isoc_init(struct dim_channel *ch, u8 ch_addr, u16 packet_length)
+{
+ state_init(&ch->state);
+
+ ch->addr = ch_addr;
+
+ ch->packet_length = packet_length;
+ ch->bytes_per_frame = 0;
+ ch->done_sw_buffers_number = 0;
+}
+
+static void sync_init(struct dim_channel *ch, u8 ch_addr, u16 bytes_per_frame)
+{
+ state_init(&ch->state);
+
+ ch->addr = ch_addr;
+
+ ch->packet_length = 0;
+ ch->bytes_per_frame = bytes_per_frame;
+ ch->done_sw_buffers_number = 0;
+}
+
+static void channel_init(struct dim_channel *ch, u8 ch_addr)
+{
+ state_init(&ch->state);
+
+ ch->addr = ch_addr;
+
+ ch->packet_length = 0;
+ ch->bytes_per_frame = 0;
+ ch->done_sw_buffers_number = 0;
+}
+
+/* returns true if channel interrupt state is cleared */
+static bool channel_service_interrupt(struct dim_channel *ch)
+{
+ struct int_ch_state *const state = &ch->state;
+
+ if (!service_channel(ch->addr, state->idx2))
+ return false;
+
+ state->idx2 ^= 1;
+ state->request_counter++;
+ return true;
+}
+
+static bool channel_start(struct dim_channel *ch, u32 buf_addr, u16 buf_size)
+{
+ struct int_ch_state *const state = &ch->state;
+
+ if (buf_size <= 0)
+ return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE, "Bad buffer size");
+
+ if (ch->packet_length == 0 && ch->bytes_per_frame == 0 &&
+ buf_size != norm_ctrl_async_buffer_size(buf_size))
+ return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
+ "Bad control/async buffer size");
+
+ if (ch->packet_length &&
+ buf_size != norm_isoc_buffer_size(buf_size, ch->packet_length))
+ return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
+ "Bad isochronous buffer size");
+
+ if (ch->bytes_per_frame &&
+ buf_size != norm_sync_buffer_size(buf_size, ch->bytes_per_frame))
+ return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
+ "Bad synchronous buffer size");
+
+ if (state->level >= 2u)
+ return dim_on_error(DIM_ERR_OVERFLOW, "Channel overflow");
+
+ ++state->level;
+
+ if (ch->packet_length || ch->bytes_per_frame)
+ dim2_start_isoc_sync(ch->addr, state->idx1, buf_addr, buf_size);
+ else
+ dim2_start_ctrl_async(ch->addr, state->idx1, buf_addr, buf_size);
+ state->idx1 ^= 1;
+
+ return true;
+}
+
+static u8 channel_service(struct dim_channel *ch)
+{
+ struct int_ch_state *const state = &ch->state;
+
+ if (state->service_counter != state->request_counter) {
+ state->service_counter++;
+ if (state->level == 0)
+ return DIM_ERR_UNDERFLOW;
+
+ --state->level;
+ ch->done_sw_buffers_number++;
+ }
+
+ return DIM_NO_ERROR;
+}
+
+static bool channel_detach_buffers(struct dim_channel *ch, u16 buffers_number)
+{
+ if (buffers_number > ch->done_sw_buffers_number)
+ return dim_on_error(DIM_ERR_UNDERFLOW, "Channel underflow");
+
+ ch->done_sw_buffers_number -= buffers_number;
+ return true;
+}
+
+
+/* -------------------------------------------------------------------------- */
+/* API */
+
+u8 DIM_Startup(void *dim_base_address, u32 mlb_clock)
+{
+ g.dim_is_initialized = false;
+
+ if (!dim_base_address)
+ return DIM_INIT_ERR_DIM_ADDR;
+
+ /* MediaLB clock: 0 - 256 fs, 1 - 512 fs, 2 - 1024 fs, 3 - 2048 fs */
+ /* MediaLB clock: 4 - 3072 fs, 5 - 4096 fs, 6 - 6144 fs, 7 - 8192 fs */
+ if (mlb_clock >= 8)
+ return DIM_INIT_ERR_MLB_CLOCK;
+
+ g.dim2 = dim_base_address;
+ g.dbr_map[0] = g.dbr_map[1] = 0;
+
+ dim2_initialize(mlb_clock >= 3, mlb_clock);
+
+ g.dim_is_initialized = true;
+
+ return DIM_NO_ERROR;
+}
+
+void DIM_Shutdown(void)
+{
+ g.dim_is_initialized = false;
+ dim2_cleanup();
+}
+
+bool DIM_GetLockState(void)
+{
+ return dim2_is_mlb_locked();
+}
+
+static u8 init_ctrl_async(struct dim_channel *ch, u8 type, u8 is_tx,
+ u16 ch_address, u16 hw_buffer_size)
+{
+ if (!g.dim_is_initialized || !ch)
+ return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+ if (!check_channel_address(ch_address))
+ return DIM_INIT_ERR_CHANNEL_ADDRESS;
+
+ ch->dbr_size = hw_buffer_size;
+ ch->dbr_addr = alloc_dbr(ch->dbr_size);
+ if (ch->dbr_addr >= DBR_SIZE)
+ return DIM_INIT_ERR_OUT_OF_MEMORY;
+
+ channel_init(ch, ch_address / 2);
+
+ dim2_configure_channel(ch->addr, type, is_tx,
+ ch->dbr_addr, ch->dbr_size, 0, false);
+
+ return DIM_NO_ERROR;
+}
+
+u16 DIM_NormCtrlAsyncBufferSize(u16 buf_size)
+{
+ return norm_ctrl_async_buffer_size(buf_size);
+}
+
+/**
+ * Retrieves maximal possible correct buffer size for isochronous data type
+ * conform to given packet length and not bigger than given buffer size.
+ *
+ * Returns non-zero correct buffer size or zero by error.
+ */
+u16 DIM_NormIsocBufferSize(u16 buf_size, u16 packet_length)
+{
+ if (!check_packet_length(packet_length))
+ return 0;
+
+ return norm_isoc_buffer_size(buf_size, packet_length);
+}
+
+/**
+ * Retrieves maximal possible correct buffer size for synchronous data type
+ * conform to given bytes per frame and not bigger than given buffer size.
+ *
+ * Returns non-zero correct buffer size or zero by error.
+ */
+u16 DIM_NormSyncBufferSize(u16 buf_size, u16 bytes_per_frame)
+{
+ if (!check_bytes_per_frame(bytes_per_frame))
+ return 0;
+
+ return norm_sync_buffer_size(buf_size, bytes_per_frame);
+}
+
+u8 DIM_InitControl(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 max_buffer_size)
+{
+ return init_ctrl_async(ch, CAT_CT_VAL_CONTROL, is_tx, ch_address,
+ max_buffer_size * 2);
+}
+
+u8 DIM_InitAsync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 max_buffer_size)
+{
+ return init_ctrl_async(ch, CAT_CT_VAL_ASYNC, is_tx, ch_address,
+ max_buffer_size * 2);
+}
+
+u8 DIM_InitIsoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 packet_length)
+{
+ if (!g.dim_is_initialized || !ch)
+ return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+ if (!check_channel_address(ch_address))
+ return DIM_INIT_ERR_CHANNEL_ADDRESS;
+
+ if (!check_packet_length(packet_length))
+ return DIM_ERR_BAD_CONFIG;
+
+ ch->dbr_size = packet_length * ISOC_DBR_FACTOR;
+ ch->dbr_addr = alloc_dbr(ch->dbr_size);
+ if (ch->dbr_addr >= DBR_SIZE)
+ return DIM_INIT_ERR_OUT_OF_MEMORY;
+
+ isoc_init(ch, ch_address / 2, packet_length);
+
+ dim2_configure_channel(ch->addr, CAT_CT_VAL_ISOC, is_tx, ch->dbr_addr,
+ ch->dbr_size, packet_length, false);
+
+ return DIM_NO_ERROR;
+}
+
+u8 DIM_InitSync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 bytes_per_frame)
+{
+ if (!g.dim_is_initialized || !ch)
+ return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+ if (!check_channel_address(ch_address))
+ return DIM_INIT_ERR_CHANNEL_ADDRESS;
+
+ if (!check_bytes_per_frame(bytes_per_frame))
+ return DIM_ERR_BAD_CONFIG;
+
+ ch->dbr_size = bytes_per_frame * SYNC_DBR_FACTOR;
+ ch->dbr_addr = alloc_dbr(ch->dbr_size);
+ if (ch->dbr_addr >= DBR_SIZE)
+ return DIM_INIT_ERR_OUT_OF_MEMORY;
+
+ sync_init(ch, ch_address / 2, bytes_per_frame);
+
+ dim2_configure_channel(ch->addr, CAT_CT_VAL_SYNC, is_tx,
+ ch->dbr_addr, ch->dbr_size, 0, true);
+
+ return DIM_NO_ERROR;
+}
+
+u8 DIM_DestroyChannel(struct dim_channel *ch)
+{
+ if (!g.dim_is_initialized || !ch)
+ return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+ dim2_clear_channel(ch->addr);
+ if (ch->dbr_addr < DBR_SIZE)
+ free_dbr(ch->dbr_addr, ch->dbr_size);
+ ch->dbr_addr = DBR_SIZE;
+
+ return DIM_NO_ERROR;
+}
+
+void DIM_ServiceIrq(struct dim_channel *const *channels)
+{
+ bool state_changed;
+
+ if (!g.dim_is_initialized) {
+ dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
+ "DIM is not initialized");
+ return;
+ }
+
+ if (!channels) {
+ dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED, "Bad channels");
+ return;
+ }
+
+ /*
+ * Use while-loop and a flag to make sure the age is changed back at least once,
+ * otherwise the interrupt may never come if CPU generates interrupt on changing age.
+ *
+ * This cycle runs not more than number of channels, because service_interrupts
+ * routine doesn't start the channel again.
+ */
+ do {
+ struct dim_channel *const *ch = channels;
+
+ state_changed = false;
+
+ while (*ch) {
+ state_changed |= channel_service_interrupt(*ch);
+ ++ch;
+ }
+ } while (state_changed);
+
+ /* clear pending Interrupts */
+ DIMCB_IoWrite(&g.dim2->MS0, 0);
+ DIMCB_IoWrite(&g.dim2->MS1, 0);
+}
+
+u8 DIM_ServiceChannel(struct dim_channel *ch)
+{
+ if (!g.dim_is_initialized || !ch)
+ return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+ return channel_service(ch);
+}
+
+struct dim_ch_state_t *DIM_GetChannelState(struct dim_channel *ch,
+ struct dim_ch_state_t *state_ptr)
+{
+ if (!ch || !state_ptr)
+ return NULL;
+
+ state_ptr->ready = ch->state.level < 2;
+ state_ptr->done_buffers = ch->done_sw_buffers_number;
+
+ return state_ptr;
+}
+
+bool DIM_EnqueueBuffer(struct dim_channel *ch, u32 buffer_addr, u16 buffer_size)
+{
+ if (!ch)
+ return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED, "Bad channel");
+
+ return channel_start(ch, buffer_addr, buffer_size);
+}
+
+bool DIM_DetachBuffers(struct dim_channel *ch, u16 buffers_number)
+{
+ if (!ch)
+ return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED, "Bad channel");
+
+ return channel_detach_buffers(ch, buffers_number);
+}
+
+u32 DIM_ReadRegister(u8 register_index)
+{
+ return DIMCB_IoRead((u32 *)g.dim2 + register_index);
+}
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.h b/drivers/staging/most/hdm-dim2/dim2_hal.h
new file mode 100644
index 000000000000..8929af9712ef
--- /dev/null
+++ b/drivers/staging/most/hdm-dim2/dim2_hal.h
@@ -0,0 +1,124 @@
+/*
+ * dim2_hal.h - DIM2 HAL interface
+ * (MediaLB, Device Interface Macro IP, OS62420)
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#ifndef _DIM2_HAL_H
+#define _DIM2_HAL_H
+
+#include <linux/types.h>
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The values below are specified in the hardware specification.
+ * So, they should not be changed until the hardware specification changes.
+ */
+enum mlb_clk_speed {
+ CLK_256FS = 0,
+ CLK_512FS = 1,
+ CLK_1024FS = 2,
+ CLK_2048FS = 3,
+ CLK_3072FS = 4,
+ CLK_4096FS = 5,
+ CLK_6144FS = 6,
+ CLK_8192FS = 7,
+};
+
+struct dim_ch_state_t {
+ bool ready; /* Shows readiness to enqueue next buffer */
+ u16 done_buffers; /* Number of completed buffers */
+};
+
+typedef int atomic_counter_t;
+
+struct int_ch_state {
+ /* changed only in interrupt context */
+ volatile atomic_counter_t request_counter;
+
+ /* changed only in task context */
+ volatile atomic_counter_t service_counter;
+
+ u8 idx1;
+ u8 idx2;
+ u8 level; /* [0..2], buffering level */
+};
+
+struct dim_channel {
+ struct int_ch_state state;
+ u8 addr;
+ u16 dbr_addr;
+ u16 dbr_size;
+ u16 packet_length; /*< Isochronous packet length in bytes. */
+ u16 bytes_per_frame; /*< Synchronous bytes per frame. */
+ u16 done_sw_buffers_number; /*< Done software buffers number. */
+};
+
+
+u8 DIM_Startup(void *dim_base_address, u32 mlb_clock);
+
+void DIM_Shutdown(void);
+
+bool DIM_GetLockState(void);
+
+u16 DIM_NormCtrlAsyncBufferSize(u16 buf_size);
+
+u16 DIM_NormIsocBufferSize(u16 buf_size, u16 packet_length);
+
+u16 DIM_NormSyncBufferSize(u16 buf_size, u16 bytes_per_frame);
+
+u8 DIM_InitControl(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 max_buffer_size);
+
+u8 DIM_InitAsync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 max_buffer_size);
+
+u8 DIM_InitIsoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 packet_length);
+
+u8 DIM_InitSync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+ u16 bytes_per_frame);
+
+u8 DIM_DestroyChannel(struct dim_channel *ch);
+
+void DIM_ServiceIrq(struct dim_channel *const *channels);
+
+u8 DIM_ServiceChannel(struct dim_channel *ch);
+
+struct dim_ch_state_t *DIM_GetChannelState(struct dim_channel *ch,
+ struct dim_ch_state_t *dim_ch_state_ptr);
+
+bool DIM_EnqueueBuffer(struct dim_channel *ch, u32 buffer_addr,
+ u16 buffer_size);
+
+bool DIM_DetachBuffers(struct dim_channel *ch, u16 buffers_number);
+
+u32 DIM_ReadRegister(u8 register_index);
+
+
+u32 DIMCB_IoRead(u32 *ptr32);
+
+void DIMCB_IoWrite(u32 *ptr32, u32 value);
+
+void DIMCB_OnError(u8 error_id, const char *error_message);
+
+void DIMCB_OnFail(const char *filename, int linenum);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _DIM2_HAL_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.c b/drivers/staging/most/hdm-dim2/dim2_hdm.c
new file mode 100644
index 000000000000..6a5a3a2775f3
--- /dev/null
+++ b/drivers/staging/most/hdm-dim2/dim2_hdm.c
@@ -0,0 +1,964 @@
+/*
+ * dim2_hdm.c - MediaLB DIM2 Hardware Dependent Module
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+
+#include <mostcore.h>
+#include <networking.h>
+#include "dim2_hal.h"
+#include "dim2_hdm.h"
+#include "dim2_errors.h"
+#include "dim2_sysfs.h"
+
+#define DMA_CHANNELS (32 - 1) /* channel 0 is a system channel */
+
+#define MAX_BUFFERS_PACKET 32
+#define MAX_BUFFERS_STREAMING 32
+#define MAX_BUF_SIZE_PACKET 2048
+#define MAX_BUF_SIZE_STREAMING (8*1024)
+
+/* command line parameter to select clock speed */
+static char *clock_speed;
+module_param(clock_speed, charp, 0);
+MODULE_PARM_DESC(clock_speed, "MediaLB Clock Speed");
+
+/*
+ * #############################################################################
+ *
+ * The define below activates an utility function used by HAL-simu
+ * for calling DIM interrupt handler.
+ * It is used only for TEST PURPOSE and shall be commented before release.
+ *
+ * #############################################################################
+ */
+/* #define ENABLE_HDM_TEST */
+
+static DEFINE_SPINLOCK(dim_lock);
+
+static void dim2_tasklet_fn(unsigned long data);
+static DECLARE_TASKLET(dim2_tasklet, dim2_tasklet_fn, 0);
+
+/**
+ * struct hdm_channel - private structure to keep channel specific data
+ * @is_initialized: identifier to know whether the channel is initialized
+ * @ch: HAL specific channel data
+ * @pending_list: list to keep MBO's before starting transfer
+ * @started_list: list to keep MBO's after starting transfer
+ * @direction: channel direction (TX or RX)
+ * @data_type: channel data type
+ */
+struct hdm_channel {
+ char name[sizeof "caNNN"];
+ bool is_initialized;
+ struct dim_channel ch;
+ struct list_head pending_list; /* before DIM_EnqueueBuffer() */
+ struct list_head started_list; /* after DIM_EnqueueBuffer() */
+ enum most_channel_direction direction;
+ enum most_channel_data_type data_type;
+};
+
+/**
+ * struct dim2_hdm - private structure to keep interface specific data
+ * @hch: an array of channel specific data
+ * @most_iface: most interface structure
+ * @capabilities: an array of channel capability data
+ * @io_base: I/O register base address
+ * @irq_ahb0: dim2 AHB0 irq number
+ * @clk_speed: user selectable (through command line parameter) clock speed
+ * @netinfo_task: thread to deliver network status
+ * @netinfo_waitq: waitq for the thread to sleep
+ * @deliver_netinfo: to identify whether network status received
+ * @mac_addrs: INIC mac address
+ * @link_state: network link state
+ * @atx_idx: index of async tx channel
+ */
+struct dim2_hdm {
+ struct hdm_channel hch[DMA_CHANNELS];
+ struct most_channel_capability capabilities[DMA_CHANNELS];
+ struct most_interface most_iface;
+ char name[16 + sizeof "dim2-"];
+ void *io_base;
+ unsigned int irq_ahb0;
+ int clk_speed;
+ struct task_struct *netinfo_task;
+ wait_queue_head_t netinfo_waitq;
+ int deliver_netinfo;
+ unsigned char mac_addrs[6];
+ unsigned char link_state;
+ int atx_idx;
+ struct medialb_bus bus;
+};
+
+#define iface_to_hdm(iface) container_of(iface, struct dim2_hdm, most_iface)
+
+/* Macro to identify a network status message */
+#define PACKET_IS_NET_INFO(p) \
+ (((p)[1] == 0x18) && ((p)[2] == 0x05) && ((p)[3] == 0x0C) && \
+ ((p)[13] == 0x3C) && ((p)[14] == 0x00) && ((p)[15] == 0x0A))
+
+#if defined(ENABLE_HDM_TEST)
+static struct dim2_hdm *test_dev;
+#endif
+
+bool dim2_sysfs_get_state_cb(void)
+{
+ bool state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dim_lock, flags);
+ state = DIM_GetLockState();
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ return state;
+}
+
+/**
+ * DIMCB_IoRead - callback from HAL to read an I/O register
+ * @ptr32: register address
+ */
+u32 DIMCB_IoRead(u32 *ptr32)
+{
+ return __raw_readl(ptr32);
+}
+
+/**
+ * DIMCB_IoWrite - callback from HAL to write value to an I/O register
+ * @ptr32: register address
+ * @value: value to write
+ */
+void DIMCB_IoWrite(u32 *ptr32, u32 value)
+{
+ __raw_writel(value, ptr32);
+}
+
+/**
+ * DIMCB_OnError - callback from HAL to report miscommunication between
+ * HDM and HAL
+ * @error_id: Error ID
+ * @error_message: Error message. Some text in a free format
+ */
+void DIMCB_OnError(u8 error_id, const char *error_message)
+{
+ pr_err("DIMCB_OnError: error_id - %d, error_message - %s\n", error_id,
+ error_message);
+}
+
+/**
+ * DIMCB_OnFail - callback from HAL to report unrecoverable errors
+ * @filename: Source file where the error happened
+ * @linenum: Line number of the file where the error happened
+ */
+void DIMCB_OnFail(const char *filename, int linenum)
+{
+ pr_err("DIMCB_OnFail: file - %s, line no. - %d\n", filename, linenum);
+}
+
+/**
+ * startup_dim - initialize the dim2 interface
+ * @pdev: platform device
+ *
+ * Get the value of command line parameter "clock_speed" if given or use the
+ * default value, enable the clock and PLL, and initialize the dim2 interface.
+ */
+static int startup_dim(struct platform_device *pdev)
+{
+ struct dim2_hdm *dev = platform_get_drvdata(pdev);
+ struct dim2_platform_data *pdata = pdev->dev.platform_data;
+ u8 hal_ret;
+
+ dev->clk_speed = -1;
+
+ if (clock_speed) {
+ if (!strcmp(clock_speed, "256fs"))
+ dev->clk_speed = CLK_256FS;
+ else if (!strcmp(clock_speed, "512fs"))
+ dev->clk_speed = CLK_512FS;
+ else if (!strcmp(clock_speed, "1024fs"))
+ dev->clk_speed = CLK_1024FS;
+ else if (!strcmp(clock_speed, "2048fs"))
+ dev->clk_speed = CLK_2048FS;
+ else if (!strcmp(clock_speed, "3072fs"))
+ dev->clk_speed = CLK_3072FS;
+ else if (!strcmp(clock_speed, "4096fs"))
+ dev->clk_speed = CLK_4096FS;
+ else if (!strcmp(clock_speed, "6144fs"))
+ dev->clk_speed = CLK_6144FS;
+ else if (!strcmp(clock_speed, "8192fs"))
+ dev->clk_speed = CLK_8192FS;
+ }
+
+ if (dev->clk_speed == -1) {
+ pr_info("Bad or missing clock speed parameter,"
+ " using default value: 3072fs\n");
+ dev->clk_speed = CLK_3072FS;
+ } else
+ pr_info("Selected clock speed: %s\n", clock_speed);
+
+ if (pdata && pdata->init) {
+ int ret = pdata->init(pdata, dev->io_base, dev->clk_speed);
+
+ if (ret)
+ return ret;
+ }
+
+ hal_ret = DIM_Startup(dev->io_base, dev->clk_speed);
+ if (hal_ret != DIM_NO_ERROR) {
+ pr_err("DIM_Startup failed: %d\n", hal_ret);
+ if (pdata && pdata->destroy)
+ pdata->destroy(pdata);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * try_start_dim_transfer - try to transfer a buffer on a channel
+ * @hdm_ch: channel specific data
+ *
+ * Transfer a buffer from pending_list if the channel is ready
+ */
+static int try_start_dim_transfer(struct hdm_channel *hdm_ch)
+{
+ u16 buf_size;
+ struct list_head *head = &hdm_ch->pending_list;
+ struct mbo *mbo;
+ unsigned long flags;
+ struct dim_ch_state_t st;
+
+ BUG_ON(hdm_ch == 0);
+ BUG_ON(!hdm_ch->is_initialized);
+
+ spin_lock_irqsave(&dim_lock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ return -EAGAIN;
+ }
+
+ if (!DIM_GetChannelState(&hdm_ch->ch, &st)->ready) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ return -EAGAIN;
+ }
+
+ mbo = list_entry(head->next, struct mbo, list);
+ buf_size = mbo->buffer_length;
+
+ BUG_ON(mbo->bus_address == 0);
+ if (!DIM_EnqueueBuffer(&hdm_ch->ch, mbo->bus_address, buf_size)) {
+ list_del(head->next);
+ spin_unlock_irqrestore(&dim_lock, flags);
+ mbo->processed_length = 0;
+ mbo->status = MBO_E_INVAL;
+ mbo->complete(mbo);
+ return -EFAULT;
+ }
+
+ list_move_tail(head->next, &hdm_ch->started_list);
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ return 0;
+}
+
+/**
+ * deliver_netinfo_thread - thread to deliver network status to mostcore
+ * @data: private data
+ *
+ * Wait for network status and deliver it to mostcore once it is received
+ */
+static int deliver_netinfo_thread(void *data)
+{
+ struct dim2_hdm *dev = (struct dim2_hdm *)data;
+
+ while (!kthread_should_stop()) {
+ wait_event_interruptible(dev->netinfo_waitq,
+ dev->deliver_netinfo ||
+ kthread_should_stop());
+
+ if (dev->deliver_netinfo) {
+ dev->deliver_netinfo--;
+ most_deliver_netinfo(&dev->most_iface, dev->link_state,
+ dev->mac_addrs);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * retrieve_netinfo - retrieve network status from received buffer
+ * @dev: private data
+ * @mbo: received MBO
+ *
+ * Parse the message in buffer and get node address, link state, MAC address.
+ * Wake up a thread to deliver this status to mostcore
+ */
+static void retrieve_netinfo(struct dim2_hdm *dev, struct mbo *mbo)
+{
+ u8 *data = mbo->virt_address;
+ u8 *mac = dev->mac_addrs;
+
+ pr_info("Node Address: 0x%03x\n", (u16)data[16] << 8 | data[17]);
+ dev->link_state = data[18];
+ pr_info("NIState: %d\n", dev->link_state);
+ memcpy(mac, data + 19, 6);
+ pr_info("MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ dev->deliver_netinfo++;
+ wake_up_interruptible(&dev->netinfo_waitq);
+}
+
+/**
+ * service_done_flag - handle completed buffers
+ * @dev: private data
+ * @ch_idx: channel index
+ *
+ * Return back the completed buffers to mostcore, using completion callback
+ */
+static void service_done_flag(struct dim2_hdm *dev, int ch_idx)
+{
+ struct hdm_channel *hdm_ch = dev->hch + ch_idx;
+ struct dim_ch_state_t st;
+ struct list_head *head;
+ struct mbo *mbo;
+ int done_buffers;
+ unsigned long flags;
+ u8 *data;
+
+ BUG_ON(hdm_ch == 0);
+ BUG_ON(!hdm_ch->is_initialized);
+
+ spin_lock_irqsave(&dim_lock, flags);
+
+ done_buffers = DIM_GetChannelState(&hdm_ch->ch, &st)->done_buffers;
+ if (!done_buffers) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ return;
+ }
+
+ if (!DIM_DetachBuffers(&hdm_ch->ch, done_buffers)) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ head = &hdm_ch->started_list;
+
+ while (done_buffers) {
+ spin_lock_irqsave(&dim_lock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ pr_crit("hard error: started_mbo list is empty "
+ "whereas DIM2 has sent buffers\n");
+ break;
+ }
+
+ mbo = list_entry(head->next, struct mbo, list);
+ list_del(head->next);
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ data = mbo->virt_address;
+
+ if (hdm_ch->data_type == MOST_CH_ASYNC &&
+ hdm_ch->direction == MOST_CH_RX &&
+ PACKET_IS_NET_INFO(data)) {
+
+ retrieve_netinfo(dev, mbo);
+
+ spin_lock_irqsave(&dim_lock, flags);
+ list_add_tail(&mbo->list, &hdm_ch->pending_list);
+ spin_unlock_irqrestore(&dim_lock, flags);
+ } else {
+ if (hdm_ch->data_type == MOST_CH_CONTROL ||
+ hdm_ch->data_type == MOST_CH_ASYNC) {
+
+ u32 const data_size =
+ (u32)data[0] * 256 + data[1] + 2;
+
+ mbo->processed_length =
+ min(data_size, (u32)mbo->buffer_length);
+ } else {
+ mbo->processed_length = mbo->buffer_length;
+ }
+ mbo->status = MBO_SUCCESS;
+ mbo->complete(mbo);
+ }
+
+ done_buffers--;
+ }
+}
+
+static struct dim_channel **get_active_channels(struct dim2_hdm *dev,
+ struct dim_channel **buffer)
+{
+ int idx = 0;
+ int ch_idx;
+
+ for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
+ if (dev->hch[ch_idx].is_initialized)
+ buffer[idx++] = &dev->hch[ch_idx].ch;
+ }
+ buffer[idx++] = 0;
+
+ return buffer;
+}
+
+/**
+ * dim2_tasklet_fn - tasklet function
+ * @data: private data
+ *
+ * Service each initialized channel, if needed
+ */
+static void dim2_tasklet_fn(unsigned long data)
+{
+ struct dim2_hdm *dev = (struct dim2_hdm *)data;
+ unsigned long flags;
+ int ch_idx;
+
+ for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
+ if (!dev->hch[ch_idx].is_initialized)
+ continue;
+
+ spin_lock_irqsave(&dim_lock, flags);
+ DIM_ServiceChannel(&(dev->hch[ch_idx].ch));
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ service_done_flag(dev, ch_idx);
+ while (!try_start_dim_transfer(dev->hch + ch_idx))
+ continue;
+ }
+}
+
+/**
+ * dim2_ahb_isr - interrupt service routine
+ * @irq: irq number
+ * @_dev: private data
+ *
+ * Acknowledge the interrupt and schedule a tasklet to service channels.
+ * Return IRQ_HANDLED.
+ */
+static irqreturn_t dim2_ahb_isr(int irq, void *_dev)
+{
+ struct dim2_hdm *dev = (struct dim2_hdm *)_dev;
+ struct dim_channel *buffer[DMA_CHANNELS + 1];
+ unsigned long flags;
+
+ spin_lock_irqsave(&dim_lock, flags);
+ DIM_ServiceIrq(get_active_channels(dev, buffer));
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+#if !defined(ENABLE_HDM_TEST)
+ dim2_tasklet.data = (unsigned long)dev;
+ tasklet_schedule(&dim2_tasklet);
+#else
+ dim2_tasklet_fn((unsigned long)dev);
+#endif
+ return IRQ_HANDLED;
+}
+
+#if defined(ENABLE_HDM_TEST)
+
+/*
+ * Utility function used by HAL-simu for calling DIM interrupt handler.
+ * It is used only for TEST PURPOSE.
+ */
+void raise_dim_interrupt(void)
+{
+ (void)dim2_ahb_isr(0, test_dev);
+}
+#endif
+
+/**
+ * complete_all_mbos - complete MBO's in a list
+ * @head: list head
+ *
+ * Delete all the entries in list and return back MBO's to mostcore using
+ * completion call back.
+ */
+static void complete_all_mbos(struct list_head *head)
+{
+ unsigned long flags;
+ struct mbo *mbo;
+
+ for (;;) {
+ spin_lock_irqsave(&dim_lock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ break;
+ }
+
+ mbo = list_entry(head->next, struct mbo, list);
+ list_del(head->next);
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ mbo->processed_length = 0;
+ mbo->status = MBO_E_CLOSE;
+ mbo->complete(mbo);
+ }
+}
+
+/**
+ * configure_channel - initialize a channel
+ * @iface: interface the channel belongs to
+ * @channel: channel to be configured
+ * @channel_config: structure that holds the configuration information
+ *
+ * Receives configuration information from mostcore and initialize
+ * the corresponding channel. Return 0 on success, negative on failure.
+ */
+static int configure_channel(struct most_interface *most_iface, int ch_idx,
+ struct most_channel_config *ccfg)
+{
+ struct dim2_hdm *dev = iface_to_hdm(most_iface);
+ bool const is_tx = ccfg->direction == MOST_CH_TX;
+ u16 const sub_size = ccfg->subbuffer_size;
+ u16 const buf_size = ccfg->buffer_size;
+ u16 new_size;
+ unsigned long flags;
+ u8 hal_ret;
+ int const ch_addr = ch_idx * 2 + 2;
+ struct hdm_channel *const hdm_ch = dev->hch + ch_idx;
+
+ BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
+
+ if (hdm_ch->is_initialized)
+ return -EPERM;
+
+ switch (ccfg->data_type) {
+ case MOST_CH_CONTROL:
+ new_size = DIM_NormCtrlAsyncBufferSize(buf_size);
+ if (new_size == 0) {
+ pr_err("%s: too small buffer size\n", hdm_ch->name);
+ return -EINVAL;
+ }
+ ccfg->buffer_size = new_size;
+ if (new_size != buf_size)
+ pr_warn("%s: fixed buffer size (%d -> %d)\n",
+ hdm_ch->name, buf_size, new_size);
+ spin_lock_irqsave(&dim_lock, flags);
+ hal_ret = DIM_InitControl(&hdm_ch->ch, is_tx, ch_addr, buf_size);
+ break;
+ case MOST_CH_ASYNC:
+ new_size = DIM_NormCtrlAsyncBufferSize(buf_size);
+ if (new_size == 0) {
+ pr_err("%s: too small buffer size\n", hdm_ch->name);
+ return -EINVAL;
+ }
+ ccfg->buffer_size = new_size;
+ if (new_size != buf_size)
+ pr_warn("%s: fixed buffer size (%d -> %d)\n",
+ hdm_ch->name, buf_size, new_size);
+ spin_lock_irqsave(&dim_lock, flags);
+ hal_ret = DIM_InitAsync(&hdm_ch->ch, is_tx, ch_addr, buf_size);
+ break;
+ case MOST_CH_ISOC_AVP:
+ new_size = DIM_NormIsocBufferSize(buf_size, sub_size);
+ if (new_size == 0) {
+ pr_err("%s: invalid sub-buffer size or "
+ "too small buffer size\n", hdm_ch->name);
+ return -EINVAL;
+ }
+ ccfg->buffer_size = new_size;
+ if (new_size != buf_size)
+ pr_warn("%s: fixed buffer size (%d -> %d)\n",
+ hdm_ch->name, buf_size, new_size);
+ spin_lock_irqsave(&dim_lock, flags);
+ hal_ret = DIM_InitIsoc(&hdm_ch->ch, is_tx, ch_addr, sub_size);
+ break;
+ case MOST_CH_SYNC:
+ new_size = DIM_NormSyncBufferSize(buf_size, sub_size);
+ if (new_size == 0) {
+ pr_err("%s: invalid sub-buffer size or "
+ "too small buffer size\n", hdm_ch->name);
+ return -EINVAL;
+ }
+ ccfg->buffer_size = new_size;
+ if (new_size != buf_size)
+ pr_warn("%s: fixed buffer size (%d -> %d)\n",
+ hdm_ch->name, buf_size, new_size);
+ spin_lock_irqsave(&dim_lock, flags);
+ hal_ret = DIM_InitSync(&hdm_ch->ch, is_tx, ch_addr, sub_size);
+ break;
+ default:
+ pr_err("%s: configure failed, bad channel type: %d\n",
+ hdm_ch->name, ccfg->data_type);
+ return -EINVAL;
+ }
+
+ if (hal_ret != DIM_NO_ERROR) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ pr_err("%s: configure failed (%d), type: %d, is_tx: %d\n",
+ hdm_ch->name, hal_ret, ccfg->data_type, (int)is_tx);
+ return -ENODEV;
+ }
+
+ hdm_ch->data_type = ccfg->data_type;
+ hdm_ch->direction = ccfg->direction;
+ hdm_ch->is_initialized = true;
+
+ if (hdm_ch->data_type == MOST_CH_ASYNC &&
+ hdm_ch->direction == MOST_CH_TX &&
+ dev->atx_idx < 0)
+ dev->atx_idx = ch_idx;
+
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ return 0;
+}
+
+/**
+ * enqueue - enqueue a buffer for data transfer
+ * @iface: intended interface
+ * @channel: ID of the channel the buffer is intended for
+ * @mbo: pointer to the buffer object
+ *
+ * Push the buffer into pending_list and try to transfer one buffer from
+ * pending_list. Return 0 on success, negative on failure.
+ */
+static int enqueue(struct most_interface *most_iface, int ch_idx,
+ struct mbo *mbo)
+{
+ struct dim2_hdm *dev = iface_to_hdm(most_iface);
+ struct hdm_channel *hdm_ch = dev->hch + ch_idx;
+ unsigned long flags;
+
+ BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
+
+ if (!hdm_ch->is_initialized)
+ return -EPERM;
+
+ if (mbo->bus_address == 0)
+ return -EFAULT;
+
+ spin_lock_irqsave(&dim_lock, flags);
+ list_add_tail(&mbo->list, &hdm_ch->pending_list);
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ (void)try_start_dim_transfer(hdm_ch);
+
+ return 0;
+}
+
+/**
+ * request_netinfo - triggers retrieving of network info
+ * @iface: pointer to the interface
+ * @channel_id: corresponding channel ID
+ *
+ * Send a command to INIC which triggers retrieving of network info by means of
+ * "Message exchange over MDP/MEP". Return 0 on success, negative on failure.
+ */
+static void request_netinfo(struct most_interface *most_iface, int ch_idx)
+{
+ struct dim2_hdm *dev = iface_to_hdm(most_iface);
+ struct mbo *mbo;
+ u8 *data;
+
+ if (dev->atx_idx < 0) {
+ pr_err("Async Tx Not initialized\n");
+ return;
+ }
+
+ mbo = most_get_mbo(&dev->most_iface, dev->atx_idx);
+ if (!mbo)
+ return;
+
+ mbo->buffer_length = 5;
+
+ data = mbo->virt_address;
+
+ data[0] = 0x00; /* PML High byte */
+ data[1] = 0x03; /* PML Low byte */
+ data[2] = 0x02; /* PMHL */
+ data[3] = 0x08; /* FPH */
+ data[4] = 0x40; /* FMF (FIFO cmd msg - Triggers NAOverMDP) */
+
+ most_submit_mbo(mbo);
+}
+
+/**
+ * poison_channel - poison buffers of a channel
+ * @iface: pointer to the interface the channel to be poisoned belongs to
+ * @channel_id: corresponding channel ID
+ *
+ * Destroy a channel and complete all the buffers in both started_list &
+ * pending_list. Return 0 on success, negative on failure.
+ */
+static int poison_channel(struct most_interface *most_iface, int ch_idx)
+{
+ struct dim2_hdm *dev = iface_to_hdm(most_iface);
+ struct hdm_channel *hdm_ch = dev->hch + ch_idx;
+ unsigned long flags;
+ u8 hal_ret;
+ int ret = 0;
+
+ BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
+
+ if (!hdm_ch->is_initialized)
+ return -EPERM;
+
+ spin_lock_irqsave(&dim_lock, flags);
+ hal_ret = DIM_DestroyChannel(&hdm_ch->ch);
+ hdm_ch->is_initialized = false;
+ if (ch_idx == dev->atx_idx)
+ dev->atx_idx = -1;
+ spin_unlock_irqrestore(&dim_lock, flags);
+ if (hal_ret != DIM_NO_ERROR) {
+ pr_err("HAL Failed to close channel %s\n", hdm_ch->name);
+ ret = -EFAULT;
+ }
+
+ complete_all_mbos(&hdm_ch->started_list);
+ complete_all_mbos(&hdm_ch->pending_list);
+
+ return ret;
+}
+
+/*
+ * dim2_probe - dim2 probe handler
+ * @pdev: platform device structure
+ *
+ * Register the dim2 interface with mostcore and initialize it.
+ * Return 0 on success, negative on failure.
+ */
+static int dim2_probe(struct platform_device *pdev)
+{
+ struct dim2_hdm *dev;
+ struct resource *res;
+ int ret, i;
+ struct kobject *kobj;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->atx_idx = -1;
+
+ platform_set_drvdata(pdev, dev);
+#if defined(ENABLE_HDM_TEST)
+ test_dev = dev;
+#else
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ pr_err("no memory region defined\n");
+ ret = -ENOENT;
+ goto err_free_dev;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ pr_err("failed to request mem region\n");
+ ret = -EBUSY;
+ goto err_free_dev;
+ }
+
+ dev->io_base = ioremap(res->start, resource_size(res));
+ if (!dev->io_base) {
+ pr_err("failed to ioremap\n");
+ ret = -ENOMEM;
+ goto err_release_mem;
+ }
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ pr_err("failed to get irq\n");
+ goto err_unmap_io;
+ }
+ dev->irq_ahb0 = ret;
+
+ ret = request_irq(dev->irq_ahb0, dim2_ahb_isr, 0, "mlb_ahb0", dev);
+ if (ret) {
+ pr_err("failed to request IRQ: %d, err: %d\n", dev->irq_ahb0, ret);
+ goto err_unmap_io;
+ }
+#endif
+ init_waitqueue_head(&dev->netinfo_waitq);
+ dev->deliver_netinfo = 0;
+ dev->netinfo_task = kthread_run(&deliver_netinfo_thread, (void *)dev,
+ "dim2_netinfo");
+ if (IS_ERR(dev->netinfo_task)) {
+ ret = PTR_ERR(dev->netinfo_task);
+ goto err_free_irq;
+ }
+
+ for (i = 0; i < DMA_CHANNELS; i++) {
+ struct most_channel_capability *cap = dev->capabilities + i;
+ struct hdm_channel *hdm_ch = dev->hch + i;
+
+ INIT_LIST_HEAD(&hdm_ch->pending_list);
+ INIT_LIST_HEAD(&hdm_ch->started_list);
+ hdm_ch->is_initialized = false;
+ snprintf(hdm_ch->name, sizeof(hdm_ch->name), "ca%d", i * 2 + 2);
+
+ cap->name_suffix = hdm_ch->name;
+ cap->direction = MOST_CH_RX | MOST_CH_TX;
+ cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
+ MOST_CH_ISOC_AVP | MOST_CH_SYNC;
+ cap->num_buffers_packet = MAX_BUFFERS_PACKET;
+ cap->buffer_size_packet = MAX_BUF_SIZE_PACKET;
+ cap->num_buffers_streaming = MAX_BUFFERS_STREAMING;
+ cap->buffer_size_streaming = MAX_BUF_SIZE_STREAMING;
+ }
+
+ {
+ const char *fmt;
+
+ if (sizeof(res->start) == sizeof(long long))
+ fmt = "dim2-%016llx";
+ else if (sizeof(res->start) == sizeof(long))
+ fmt = "dim2-%016lx";
+ else
+ fmt = "dim2-%016x";
+
+ snprintf(dev->name, sizeof(dev->name), fmt, res->start);
+ }
+
+ dev->most_iface.interface = ITYPE_MEDIALB_DIM2;
+ dev->most_iface.description = dev->name;
+ dev->most_iface.num_channels = DMA_CHANNELS;
+ dev->most_iface.channel_vector = dev->capabilities;
+ dev->most_iface.configure = configure_channel;
+ dev->most_iface.enqueue = enqueue;
+ dev->most_iface.poison_channel = poison_channel;
+ dev->most_iface.request_netinfo = request_netinfo;
+
+ kobj = most_register_interface(&dev->most_iface);
+ if (IS_ERR(kobj)) {
+ ret = PTR_ERR(kobj);
+ pr_err("failed to register MOST interface\n");
+ goto err_stop_thread;
+ }
+
+ ret = dim2_sysfs_probe(&dev->bus, kobj);
+ if (ret)
+ goto err_unreg_iface;
+
+ ret = startup_dim(pdev);
+ if (ret) {
+ pr_err("failed to initialize DIM2\n");
+ goto err_destroy_bus;
+ }
+
+ return 0;
+
+err_destroy_bus:
+ dim2_sysfs_destroy(&dev->bus);
+err_unreg_iface:
+ most_deregister_interface(&dev->most_iface);
+err_stop_thread:
+ kthread_stop(dev->netinfo_task);
+err_free_irq:
+#if !defined(ENABLE_HDM_TEST)
+ free_irq(dev->irq_ahb0, dev);
+err_unmap_io:
+ iounmap(dev->io_base);
+err_release_mem:
+ release_mem_region(res->start, resource_size(res));
+err_free_dev:
+#endif
+ kfree(dev);
+
+ return ret;
+}
+
+/**
+ * dim2_remove - dim2 remove handler
+ * @pdev: platform device structure
+ *
+ * Unregister the interface from mostcore
+ */
+static int dim2_remove(struct platform_device *pdev)
+{
+ struct dim2_hdm *dev = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct dim2_platform_data *pdata = pdev->dev.platform_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dim_lock, flags);
+ DIM_Shutdown();
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ if (pdata && pdata->destroy)
+ pdata->destroy(pdata);
+
+ dim2_sysfs_destroy(&dev->bus);
+ most_deregister_interface(&dev->most_iface);
+ kthread_stop(dev->netinfo_task);
+#if !defined(ENABLE_HDM_TEST)
+ free_irq(dev->irq_ahb0, dev);
+ iounmap(dev->io_base);
+ release_mem_region(res->start, resource_size(res));
+#endif
+ kfree(dev);
+ platform_set_drvdata(pdev, NULL);
+
+ /*
+ * break link to local platform_device_id struct
+ * to prevent crash by unload platform device module
+ */
+ pdev->id_entry = 0;
+
+ return 0;
+}
+
+static struct platform_device_id dim2_id[] = {
+ { "medialb_dim2" },
+ { }, /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(platform, dim2_id);
+
+static struct platform_driver dim2_driver = {
+ .probe = dim2_probe,
+ .remove = dim2_remove,
+ .id_table = dim2_id,
+ .driver = {
+ .name = "hdm_dim2",
+ .owner = THIS_MODULE,
+ },
+};
+
+/**
+ * dim2_hdm_init - Driver Registration Routine
+ */
+static int __init dim2_hdm_init(void)
+{
+ pr_info("dim2_hdm_init()\n");
+ return platform_driver_register(&dim2_driver);
+}
+
+/**
+ * dim2_hdm_exit - Driver Cleanup Routine
+ **/
+static void __exit dim2_hdm_exit(void)
+{
+ pr_info("dim2_hdm_exit()\n");
+ platform_driver_unregister(&dim2_driver);
+}
+
+module_init(dim2_hdm_init);
+module_exit(dim2_hdm_exit);
+
+MODULE_AUTHOR("Jain Roy Ambi <JainRoy.Ambi@microchip.com>");
+MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
+MODULE_DESCRIPTION("MediaLB DIM2 Hardware Dependent Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.h b/drivers/staging/most/hdm-dim2/dim2_hdm.h
new file mode 100644
index 000000000000..6e6883232809
--- /dev/null
+++ b/drivers/staging/most/hdm-dim2/dim2_hdm.h
@@ -0,0 +1,26 @@
+/*
+ * dim2_hdm.h - MediaLB DIM2 HDM Header
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#ifndef DIM2_HDM_H
+#define DIM2_HDM_H
+
+struct device;
+
+/* platform dependent data for dim2 interface */
+struct dim2_platform_data {
+ int (*init)(struct dim2_platform_data *pd, void *io_base, int clk_speed);
+ void (*destroy)(struct dim2_platform_data *pd);
+ void *priv;
+};
+
+#endif /* DIM2_HDM_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_reg.h b/drivers/staging/most/hdm-dim2/dim2_reg.h
new file mode 100644
index 000000000000..476f66f4c566
--- /dev/null
+++ b/drivers/staging/most/hdm-dim2/dim2_reg.h
@@ -0,0 +1,176 @@
+/*
+ * dim2_reg.h - Definitions for registers of DIM2
+ * (MediaLB, Device Interface Macro IP, OS62420)
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#ifndef DIM2_OS62420_H
+#define DIM2_OS62420_H
+
+#include <linux/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct dim2_regs {
+ /* 0x00 */ u32 MLBC0;
+ /* 0x01 */ u32 rsvd0[1];
+ /* 0x02 */ u32 MLBPC0;
+ /* 0x03 */ u32 MS0;
+ /* 0x04 */ u32 rsvd1[1];
+ /* 0x05 */ u32 MS1;
+ /* 0x06 */ u32 rsvd2[2];
+ /* 0x08 */ u32 MSS;
+ /* 0x09 */ u32 MSD;
+ /* 0x0A */ u32 rsvd3[1];
+ /* 0x0B */ u32 MIEN;
+ /* 0x0C */ u32 rsvd4[1];
+ /* 0x0D */ u32 MLBPC2;
+ /* 0x0E */ u32 MLBPC1;
+ /* 0x0F */ u32 MLBC1;
+ /* 0x10 */ u32 rsvd5[0x10];
+ /* 0x20 */ u32 HCTL;
+ /* 0x21 */ u32 rsvd6[1];
+ /* 0x22 */ u32 HCMR0;
+ /* 0x23 */ u32 HCMR1;
+ /* 0x24 */ u32 HCER0;
+ /* 0x25 */ u32 HCER1;
+ /* 0x26 */ u32 HCBR0;
+ /* 0x27 */ u32 HCBR1;
+ /* 0x28 */ u32 rsvd7[8];
+ /* 0x30 */ u32 MDAT0;
+ /* 0x31 */ u32 MDAT1;
+ /* 0x32 */ u32 MDAT2;
+ /* 0x33 */ u32 MDAT3;
+ /* 0x34 */ u32 MDWE0;
+ /* 0x35 */ u32 MDWE1;
+ /* 0x36 */ u32 MDWE2;
+ /* 0x37 */ u32 MDWE3;
+ /* 0x38 */ u32 MCTL;
+ /* 0x39 */ u32 MADR;
+ /* 0x3A */ u32 rsvd8[0xB6];
+ /* 0xF0 */ u32 ACTL;
+ /* 0xF1 */ u32 rsvd9[3];
+ /* 0xF4 */ u32 ACSR0;
+ /* 0xF5 */ u32 ACSR1;
+ /* 0xF6 */ u32 ACMR0;
+ /* 0xF7 */ u32 ACMR1;
+};
+
+
+#define DIM2_MASK(n) (~((~(u32)0)<<(n)))
+
+enum {
+ MLBC0_MLBLK_BIT = 7,
+
+ MLBC0_MLBPEN_BIT = 5,
+
+ MLBC0_MLBCLK_SHIFT = 2,
+ MLBC0_MLBCLK_VAL_256FS = 0,
+ MLBC0_MLBCLK_VAL_512FS = 1,
+ MLBC0_MLBCLK_VAL_1024FS = 2,
+ MLBC0_MLBCLK_VAL_2048FS = 3,
+
+ MLBC0_FCNT_SHIFT = 15,
+ MLBC0_FCNT_MASK = 7,
+ MLBC0_FCNT_VAL_1FPSB = 0,
+ MLBC0_FCNT_VAL_2FPSB = 1,
+ MLBC0_FCNT_VAL_4FPSB = 2,
+ MLBC0_FCNT_VAL_8FPSB = 3,
+ MLBC0_FCNT_VAL_16FPSB = 4,
+ MLBC0_FCNT_VAL_32FPSB = 5,
+ MLBC0_FCNT_VAL_64FPSB = 6,
+
+ MLBC0_MLBEN_BIT = 0,
+
+ MIEN_CTX_BREAK_BIT = 29,
+ MIEN_CTX_PE_BIT = 28,
+ MIEN_CTX_DONE_BIT = 27,
+
+ MIEN_CRX_BREAK_BIT = 26,
+ MIEN_CRX_PE_BIT = 25,
+ MIEN_CRX_DONE_BIT = 24,
+
+ MIEN_ATX_BREAK_BIT = 22,
+ MIEN_ATX_PE_BIT = 21,
+ MIEN_ATX_DONE_BIT = 20,
+
+ MIEN_ARX_BREAK_BIT = 19,
+ MIEN_ARX_PE_BIT = 18,
+ MIEN_ARX_DONE_BIT = 17,
+
+ MIEN_SYNC_PE_BIT = 16,
+
+ MIEN_ISOC_BUFO_BIT = 1,
+ MIEN_ISOC_PE_BIT = 0,
+
+ MLBC1_NDA_SHIFT = 8,
+ MLBC1_NDA_MASK = 0xFF,
+
+ MLBC1_CLKMERR_BIT = 7,
+ MLBC1_LOCKERR_BIT = 6,
+
+ ACTL_DMA_MODE_BIT = 2,
+ ACTL_DMA_MODE_VAL_DMA_MODE_0 = 0,
+ ACTL_DMA_MODE_VAL_DMA_MODE_1 = 1,
+ ACTL_SCE_BIT = 0,
+
+ HCTL_EN_BIT = 15
+};
+
+enum {
+ CDT1_BS_ISOC_SHIFT = 0,
+ CDT1_BS_ISOC_MASK = DIM2_MASK(9),
+
+ CDT3_BD_SHIFT = 0,
+ CDT3_BD_MASK = DIM2_MASK(12),
+ CDT3_BD_ISOC_MASK = DIM2_MASK(13),
+ CDT3_BA_SHIFT = 16,
+
+ ADT0_CE_BIT = 15,
+ ADT0_LE_BIT = 14,
+ ADT0_PG_BIT = 13,
+
+ ADT1_RDY_BIT = 15,
+ ADT1_DNE_BIT = 14,
+ ADT1_ERR_BIT = 13,
+ ADT1_PS_BIT = 12,
+ ADT1_MEP_BIT = 11,
+ ADT1_BD_SHIFT = 0,
+ ADT1_CTRL_ASYNC_BD_MASK = DIM2_MASK(11),
+ ADT1_ISOC_SYNC_BD_MASK = DIM2_MASK(13),
+
+ CAT_MFE_BIT = 14,
+
+ CAT_MT_BIT = 13,
+
+ CAT_RNW_BIT = 12,
+
+ CAT_CE_BIT = 11,
+
+ CAT_CT_SHIFT = 8,
+ CAT_CT_VAL_SYNC = 0,
+ CAT_CT_VAL_CONTROL = 1,
+ CAT_CT_VAL_ASYNC = 2,
+ CAT_CT_VAL_ISOC = 3,
+
+ CAT_CL_SHIFT = 0,
+ CAT_CL_MASK = DIM2_MASK(6)
+};
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* DIM2_OS62420_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_sysfs.c b/drivers/staging/most/hdm-dim2/dim2_sysfs.c
new file mode 100644
index 000000000000..8e331a286fc3
--- /dev/null
+++ b/drivers/staging/most/hdm-dim2/dim2_sysfs.c
@@ -0,0 +1,116 @@
+/*
+ * dim2_sysfs.c - MediaLB sysfs information
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include "dim2_sysfs.h"
+
+struct bus_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct medialb_bus *bus, char *buf);
+ ssize_t (*store)(struct medialb_bus *bus, const char *buf, size_t count);
+};
+
+static ssize_t state_show(struct medialb_bus *bus, char *buf)
+{
+ bool state = dim2_sysfs_get_state_cb();
+
+ return sprintf(buf, "%s\n", state ? "locked" : "");
+}
+
+static struct bus_attr state_attr = __ATTR_RO(state);
+
+static struct attribute *bus_default_attrs[] = {
+ &state_attr.attr,
+ NULL,
+};
+
+static struct attribute_group bus_attr_group = {
+ .attrs = bus_default_attrs,
+};
+
+static void bus_kobj_release(struct kobject *kobj)
+{
+}
+
+static ssize_t bus_kobj_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct medialb_bus *bus =
+ container_of(kobj, struct medialb_bus, kobj_group);
+ struct bus_attr *xattr = container_of(attr, struct bus_attr, attr);
+
+ if (!xattr->show)
+ return -EIO;
+
+ return xattr->show(bus, buf);
+}
+
+static ssize_t bus_kobj_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ ssize_t ret;
+ struct medialb_bus *bus =
+ container_of(kobj, struct medialb_bus, kobj_group);
+ struct bus_attr *xattr = container_of(attr, struct bus_attr, attr);
+
+ if (!xattr->store)
+ return -EIO;
+
+ ret = xattr->store(bus, buf, count);
+ return ret;
+}
+
+static struct sysfs_ops const bus_kobj_sysfs_ops = {
+ .show = bus_kobj_attr_show,
+ .store = bus_kobj_attr_store,
+};
+
+static struct kobj_type bus_ktype = {
+ .release = bus_kobj_release,
+ .sysfs_ops = &bus_kobj_sysfs_ops,
+};
+
+int dim2_sysfs_probe(struct medialb_bus *bus, struct kobject *parent_kobj)
+{
+ int err;
+
+ kobject_init(&bus->kobj_group, &bus_ktype);
+ err = kobject_add(&bus->kobj_group, parent_kobj, "bus");
+ if (err) {
+ pr_err("kobject_add() failed: %d\n", err);
+ goto err_kobject_add;
+ }
+
+ err = sysfs_create_group(&bus->kobj_group, &bus_attr_group);
+ if (err) {
+ pr_err("sysfs_create_group() failed: %d\n", err);
+ goto err_create_group;
+ }
+
+ return 0;
+
+err_create_group:
+ kobject_put(&bus->kobj_group);
+
+err_kobject_add:
+ return err;
+}
+
+void dim2_sysfs_destroy(struct medialb_bus *bus)
+{
+ kobject_put(&bus->kobj_group);
+}
diff --git a/drivers/staging/most/hdm-dim2/dim2_sysfs.h b/drivers/staging/most/hdm-dim2/dim2_sysfs.h
new file mode 100644
index 000000000000..e719691035b0
--- /dev/null
+++ b/drivers/staging/most/hdm-dim2/dim2_sysfs.h
@@ -0,0 +1,39 @@
+/*
+ * dim2_sysfs.h - MediaLB sysfs information
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
+
+#ifndef DIM2_SYSFS_H
+#define DIM2_SYSFS_H
+
+
+#include <linux/kobject.h>
+
+
+struct medialb_bus {
+ struct kobject kobj_group;
+};
+
+struct dim2_hdm;
+
+int dim2_sysfs_probe(struct medialb_bus *bus, struct kobject *parent_kobj);
+void dim2_sysfs_destroy(struct medialb_bus *bus);
+
+/*
+ * callback,
+ * must deliver MediaLB state as true if locked or false if unlocked
+ */
+bool dim2_sysfs_get_state_cb(void);
+
+
+#endif /* DIM2_SYSFS_H */
diff --git a/drivers/staging/most/hdm-i2c/Kconfig b/drivers/staging/most/hdm-i2c/Kconfig
new file mode 100644
index 000000000000..6fd7983668ad
--- /dev/null
+++ b/drivers/staging/most/hdm-i2c/Kconfig
@@ -0,0 +1,12 @@
+#
+# MOST I2C configuration
+#
+
+config HDM_I2C
+ tristate "I2C HDM"
+ depends on I2C
+ ---help---
+ Say Y here if you want to connect via I2C to network tranceiver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hdm_i2c.
diff --git a/drivers/staging/most/hdm-i2c/Makefile b/drivers/staging/most/hdm-i2c/Makefile
new file mode 100644
index 000000000000..03a4a59b1f9f
--- /dev/null
+++ b/drivers/staging/most/hdm-i2c/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_HDM_I2C) += hdm_i2c.o
+
+ccflags-y += -Idrivers/staging/most/mostcore/
diff --git a/drivers/staging/most/hdm-i2c/hdm_i2c.c b/drivers/staging/most/hdm-i2c/hdm_i2c.c
new file mode 100644
index 000000000000..029ded3f250b
--- /dev/null
+++ b/drivers/staging/most/hdm-i2c/hdm_i2c.c
@@ -0,0 +1,451 @@
+/*
+ * hdm_i2c.c - Hardware Dependent Module for I2C Interface
+ *
+ * Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+
+#include <mostcore.h>
+
+enum { CH_RX, CH_TX, NUM_CHANNELS };
+
+#define MAX_BUFFERS_CONTROL 32
+#define MAX_BUF_SIZE_CONTROL 256
+
+/**
+ * list_first_mbo - get the first mbo from a list
+ * @ptr: the list head to take the mbo from.
+ */
+#define list_first_mbo(ptr) \
+ list_first_entry(ptr, struct mbo, list)
+
+
+/* IRQ / Polling option */
+static bool polling_req;
+module_param(polling_req, bool, S_IRUGO);
+MODULE_PARM_DESC(polling_req, "Request Polling. Default = 0 (use irq)");
+
+/* Polling Rate */
+static int scan_rate = 100;
+module_param(scan_rate, int, 0644);
+MODULE_PARM_DESC(scan_rate, "Polling rate in times/sec. Default = 100");
+
+struct hdm_i2c {
+ bool is_open[NUM_CHANNELS];
+ bool polling_mode;
+ struct most_interface most_iface;
+ struct most_channel_capability capabilities[NUM_CHANNELS];
+ struct i2c_client *client;
+ struct rx {
+ struct delayed_work dwork;
+ wait_queue_head_t waitq;
+ struct list_head list;
+ struct mutex list_mutex;
+ } rx;
+ char name[64];
+};
+
+#define to_hdm(iface) container_of(iface, struct hdm_i2c, most_iface)
+
+/**
+ * configure_channel - called from MOST core to configure a channel
+ * @iface: interface the channel belongs to
+ * @channel: channel to be configured
+ * @channel_config: structure that holds the configuration information
+ *
+ * Return 0 on success, negative on failure.
+ *
+ * Receives configuration information from MOST core and initialize the
+ * corresponding channel.
+ */
+static int configure_channel(struct most_interface *most_iface,
+ int ch_idx,
+ struct most_channel_config *channel_config)
+{
+ struct hdm_i2c *dev = to_hdm(most_iface);
+
+ BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
+ BUG_ON(dev->is_open[ch_idx]);
+
+ if (channel_config->data_type != MOST_CH_CONTROL) {
+ pr_err("bad data type for channel %d\n", ch_idx);
+ return -EPERM;
+ }
+
+ if (channel_config->direction != dev->capabilities[ch_idx].direction) {
+ pr_err("bad direction for channel %d\n", ch_idx);
+ return -EPERM;
+ }
+
+ if (channel_config->direction == MOST_CH_RX) {
+ if (dev->polling_mode)
+ schedule_delayed_work(&dev->rx.dwork,
+ msecs_to_jiffies(MSEC_PER_SEC / 4));
+ }
+ dev->is_open[ch_idx] = true;
+
+ return 0;
+}
+
+/**
+ * enqueue - called from MOST core to enqueue a buffer for data transfer
+ * @iface: intended interface
+ * @channel: ID of the channel the buffer is intended for
+ * @mbo: pointer to the buffer object
+ *
+ * Return 0 on success, negative on failure.
+ *
+ * Transmit the data over I2C if it is a "write" request or push the buffer into
+ * list if it is an "read" request
+ */
+static int enqueue(struct most_interface *most_iface,
+ int ch_idx, struct mbo *mbo)
+{
+ struct hdm_i2c *dev = to_hdm(most_iface);
+ int ret;
+
+ BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
+ BUG_ON(!dev->is_open[ch_idx]);
+
+ if (ch_idx == CH_RX) {
+ /* RX */
+ mutex_lock(&dev->rx.list_mutex);
+ list_add_tail(&mbo->list, &dev->rx.list);
+ mutex_unlock(&dev->rx.list_mutex);
+ wake_up_interruptible(&dev->rx.waitq);
+ } else {
+ /* TX */
+ ret = i2c_master_send(dev->client, mbo->virt_address,
+ mbo->buffer_length);
+ if (ret <= 0) {
+ mbo->processed_length = 0;
+ mbo->status = MBO_E_INVAL;
+ } else {
+ mbo->processed_length = mbo->buffer_length;
+ mbo->status = MBO_SUCCESS;
+ }
+ mbo->complete(mbo);
+ }
+
+ return 0;
+}
+
+/**
+ * poison_channel - called from MOST core to poison buffers of a channel
+ * @iface: pointer to the interface the channel to be poisoned belongs to
+ * @channel_id: corresponding channel ID
+ *
+ * Return 0 on success, negative on failure.
+ *
+ * If channel direction is RX, complete the buffers in list with
+ * status MBO_E_CLOSE
+ */
+static int poison_channel(struct most_interface *most_iface,
+ int ch_idx)
+{
+ struct hdm_i2c *dev = to_hdm(most_iface);
+ struct mbo *mbo;
+
+ BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
+ BUG_ON(!dev->is_open[ch_idx]);
+
+ dev->is_open[ch_idx] = false;
+
+ if (ch_idx == CH_RX) {
+ mutex_lock(&dev->rx.list_mutex);
+ while (!list_empty(&dev->rx.list)) {
+ mbo = list_first_mbo(&dev->rx.list);
+ list_del(&mbo->list);
+ mutex_unlock(&dev->rx.list_mutex);
+
+ mbo->processed_length = 0;
+ mbo->status = MBO_E_CLOSE;
+ mbo->complete(mbo);
+
+ mutex_lock(&dev->rx.list_mutex);
+ }
+ mutex_unlock(&dev->rx.list_mutex);
+ wake_up_interruptible(&dev->rx.waitq);
+ }
+
+ return 0;
+}
+
+static void request_netinfo(struct most_interface *most_iface,
+ int ch_idx)
+{
+ pr_info("request_netinfo()\n");
+}
+
+static void do_rx_work(struct hdm_i2c *dev)
+{
+ struct mbo *mbo;
+ unsigned char msg[MAX_BUF_SIZE_CONTROL];
+ int ret, ch_idx = CH_RX;
+ uint16_t pml, data_size;
+
+ /* Read PML (2 bytes) */
+ ret = i2c_master_recv(dev->client, msg, 2);
+ if (ret <= 0) {
+ pr_err("Failed to receive PML\n");
+ return;
+ }
+
+ pml = (msg[0] << 8) | msg[1];
+ if (!pml)
+ return;
+
+ data_size = pml + 2;
+
+ /* Read the whole message, including PML */
+ ret = i2c_master_recv(dev->client, msg, data_size);
+ if (ret <= 0) {
+ pr_err("Failed to receive a Port Message\n");
+ return;
+ }
+
+ for (;;) {
+ /* Conditions to wait for: poisoned channel or free buffer
+ available for reading */
+ if (wait_event_interruptible(dev->rx.waitq,
+ !dev->is_open[ch_idx] ||
+ !list_empty(&dev->rx.list))) {
+ pr_err("wait_event_interruptible() failed\n");
+ return;
+ }
+
+ if (!dev->is_open[ch_idx])
+ return;
+
+ mutex_lock(&dev->rx.list_mutex);
+
+ /* list may be empty if poison or remove is called */
+ if (!list_empty(&dev->rx.list))
+ break;
+
+ mutex_unlock(&dev->rx.list_mutex);
+ }
+
+ mbo = list_first_mbo(&dev->rx.list);
+ list_del(&mbo->list);
+ mutex_unlock(&dev->rx.list_mutex);
+
+ mbo->processed_length = min(data_size, mbo->buffer_length);
+ memcpy(mbo->virt_address, msg, mbo->processed_length);
+ mbo->status = MBO_SUCCESS;
+ mbo->complete(mbo);
+}
+
+/**
+ * pending_rx_work - Read pending messages through I2C
+ * @work: definition of this work item
+ *
+ * Invoked by the Interrupt Service Routine, most_irq_handler()
+ */
+static void pending_rx_work(struct work_struct *work)
+{
+ struct hdm_i2c *dev = container_of(work, struct hdm_i2c, rx.dwork.work);
+
+ do_rx_work(dev);
+
+ if (dev->polling_mode) {
+ if (dev->is_open[CH_RX])
+ schedule_delayed_work(&dev->rx.dwork,
+ msecs_to_jiffies(MSEC_PER_SEC
+ / scan_rate));
+ } else
+ enable_irq(dev->client->irq);
+}
+
+/*
+ * most_irq_handler - Interrupt Service Routine
+ * @irq: irq number
+ * @_dev: private data
+ *
+ * Schedules a delayed work
+ *
+ * By default the interrupt line behavior is Active Low. Once an interrupt is
+ * generated by the device, until driver clears the interrupt (by reading
+ * the PMP message), device keeps the interrupt line in low state. Since i2c
+ * read is done in work queue, the interrupt line must be disabled temporarily
+ * to avoid ISR being called repeatedly. Re-enable the interrupt in workqueue,
+ * after reading the message.
+ *
+ * Note: If we use the interrupt line in Falling edge mode, there is a
+ * possibility to miss interrupts when ISR is getting executed.
+ *
+ */
+static irqreturn_t most_irq_handler(int irq, void *_dev)
+{
+ struct hdm_i2c *dev = _dev;
+
+ disable_irq_nosync(irq);
+
+ schedule_delayed_work(&dev->rx.dwork, 0);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * i2c_probe - i2c probe handler
+ * @client: i2c client device structure
+ * @id: i2c client device id
+ *
+ * Return 0 on success, negative on failure.
+ *
+ * Register the i2c client device as a MOST interface
+ */
+static int i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct hdm_i2c *dev;
+ int ret, i;
+ struct kobject *kobj;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ /* ID format: i2c-<bus>-<address> */
+ snprintf(dev->name, sizeof(dev->name), "i2c-%d-%04x",
+ client->adapter->nr, client->addr);
+
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ dev->is_open[i] = false;
+ dev->capabilities[i].data_type = MOST_CH_CONTROL;
+ dev->capabilities[i].num_buffers_packet = MAX_BUFFERS_CONTROL;
+ dev->capabilities[i].buffer_size_packet = MAX_BUF_SIZE_CONTROL;
+ }
+ dev->capabilities[CH_RX].direction = MOST_CH_RX;
+ dev->capabilities[CH_RX].name_suffix = "rx";
+ dev->capabilities[CH_TX].direction = MOST_CH_TX;
+ dev->capabilities[CH_TX].name_suffix = "tx";
+
+ dev->most_iface.interface = ITYPE_I2C;
+ dev->most_iface.description = dev->name;
+ dev->most_iface.num_channels = NUM_CHANNELS;
+ dev->most_iface.channel_vector = dev->capabilities;
+ dev->most_iface.configure = configure_channel;
+ dev->most_iface.enqueue = enqueue;
+ dev->most_iface.poison_channel = poison_channel;
+ dev->most_iface.request_netinfo = request_netinfo;
+
+ INIT_LIST_HEAD(&dev->rx.list);
+ mutex_init(&dev->rx.list_mutex);
+ init_waitqueue_head(&dev->rx.waitq);
+
+ INIT_DELAYED_WORK(&dev->rx.dwork, pending_rx_work);
+
+ dev->client = client;
+ i2c_set_clientdata(client, dev);
+
+ kobj = most_register_interface(&dev->most_iface);
+ if (IS_ERR(kobj)) {
+ pr_err("Failed to register i2c as a MOST interface\n");
+ kfree(dev);
+ return PTR_ERR(kobj);
+ }
+
+ dev->polling_mode = polling_req || client->irq <= 0;
+ if (!dev->polling_mode) {
+ pr_info("Requesting IRQ: %d\n", client->irq);
+ ret = request_irq(client->irq, most_irq_handler, IRQF_SHARED,
+ client->name, dev);
+ if (ret) {
+ pr_info("IRQ request failed: %d, "
+ "falling back to polling\n", ret);
+ dev->polling_mode = true;
+ }
+ }
+
+ if (dev->polling_mode)
+ pr_info("Using polling at rate: %d times/sec\n", scan_rate);
+
+ return 0;
+}
+
+/*
+ * i2c_remove - i2c remove handler
+ * @client: i2c client device structure
+ *
+ * Return 0 on success.
+ *
+ * Unregister the i2c client device as a MOST interface
+ */
+static int i2c_remove(struct i2c_client *client)
+{
+ struct hdm_i2c *dev = i2c_get_clientdata(client);
+ int i;
+
+ if (!dev->polling_mode)
+ free_irq(client->irq, dev);
+
+ most_deregister_interface(&dev->most_iface);
+
+ for (i = 0 ; i < NUM_CHANNELS; i++)
+ if (dev->is_open[i])
+ poison_channel(&dev->most_iface, i);
+ cancel_delayed_work_sync(&dev->rx.dwork);
+ kfree(dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id i2c_id[] = {
+ { "most_i2c", 0 },
+ { }, /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(i2c, i2c_id);
+
+static struct i2c_driver i2c_driver = {
+ .driver = {
+ .name = "hdm_i2c",
+ .owner = THIS_MODULE,
+ },
+ .probe = i2c_probe,
+ .remove = i2c_remove,
+ .id_table = i2c_id,
+};
+
+/**
+ * hdm_i2c_init - Driver Registration Routine
+ */
+static int __init hdm_i2c_init(void)
+{
+ pr_info("hdm_i2c_init()\n");
+
+ return i2c_add_driver(&i2c_driver);
+}
+
+/**
+ * hdm_i2c_exit - Driver Cleanup Routine
+ **/
+static void __exit hdm_i2c_exit(void)
+{
+ i2c_del_driver(&i2c_driver);
+ pr_info("hdm_i2c_exit()\n");
+}
+
+module_init(hdm_i2c_init);
+module_exit(hdm_i2c_exit);
+
+MODULE_AUTHOR("Jain Roy Ambi <JainRoy.Ambi@microchip.com>");
+MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
+MODULE_DESCRIPTION("I2C Hardware Dependent Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/most/hdm-usb/Kconfig b/drivers/staging/most/hdm-usb/Kconfig
new file mode 100644
index 000000000000..a482c3fdf34b
--- /dev/null
+++ b/drivers/staging/most/hdm-usb/Kconfig
@@ -0,0 +1,14 @@
+#
+# MOST USB configuration
+#
+
+config HDM_USB
+ tristate "USB HDM"
+ depends on USB
+ select AIM_NETWORK
+ ---help---
+ Say Y here if you want to connect via USB to network tranceiver.
+ This device driver depends on the networking AIM.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hdm_usb.
diff --git a/drivers/staging/most/hdm-usb/Makefile b/drivers/staging/most/hdm-usb/Makefile
new file mode 100644
index 000000000000..6bbacb41e94b
--- /dev/null
+++ b/drivers/staging/most/hdm-usb/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_HDM_USB) += hdm_usb.o
+
+ccflags-y += -Idrivers/staging/most/mostcore/
+ccflags-y += -Idrivers/staging/most/aim-network/
diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c
new file mode 100644
index 000000000000..305303f2f17c
--- /dev/null
+++ b/drivers/staging/most/hdm-usb/hdm_usb.c
@@ -0,0 +1,1454 @@
+/*
+ * hdm_usb.c - Hardware dependent module for USB
+ *
+ * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/sysfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/uaccess.h>
+#include "mostcore.h"
+#include "networking.h"
+
+#define USB_MTU 512
+#define NO_ISOCHRONOUS_URB 0
+#define AV_PACKETS_PER_XACT 2
+#define BUF_CHAIN_SIZE 0xFFFF
+#define MAX_NUM_ENDPOINTS 30
+#define MAX_SUFFIX_LEN 10
+#define MAX_STRING_LEN 80
+#define MAX_BUF_SIZE 0xFFFF
+#define CEILING(x, y) (((x) + (y) - 1) / (y))
+
+#define USB_VENDOR_ID_SMSC 0x0424 /* VID: SMSC */
+#define USB_DEV_ID_BRDG 0xC001 /* PID: USB Bridge */
+#define USB_DEV_ID_INIC 0xCF18 /* PID: USB INIC */
+#define HW_RESYNC 0x0000
+/* DRCI Addresses */
+#define DRCI_REG_NI_STATE 0x0100
+#define DRCI_REG_PACKET_BW 0x0101
+#define DRCI_REG_NODE_ADDR 0x0102
+#define DRCI_REG_NODE_POS 0x0103
+#define DRCI_REG_MEP_FILTER 0x0140
+#define DRCI_REG_HASH_TBL0 0x0141
+#define DRCI_REG_HASH_TBL1 0x0142
+#define DRCI_REG_HASH_TBL2 0x0143
+#define DRCI_REG_HASH_TBL3 0x0144
+#define DRCI_REG_HW_ADDR_HI 0x0145
+#define DRCI_REG_HW_ADDR_MI 0x0146
+#define DRCI_REG_HW_ADDR_LO 0x0147
+#define DRCI_READ_REQ 0xA0
+#define DRCI_WRITE_REQ 0xA1
+
+/**
+ * struct buf_anchor - used to create a list of pending URBs
+ * @urb: pointer to USB request block
+ * @clear_work_obj:
+ * @list: linked list
+ * @urb_completion:
+ */
+struct buf_anchor {
+ struct urb *urb;
+ struct work_struct clear_work_obj;
+ struct list_head list;
+ struct completion urb_compl;
+};
+#define to_buf_anchor(w) container_of(w, struct buf_anchor, clear_work_obj)
+
+/**
+ * struct most_dci_obj - Direct Communication Interface
+ * @kobj:position in sysfs
+ * @usb_device: pointer to the usb device
+ */
+struct most_dci_obj {
+ struct kobject kobj;
+ struct usb_device *usb_device;
+};
+#define to_dci_obj(p) container_of(p, struct most_dci_obj, kobj)
+
+/**
+ * struct most_dev - holds all usb interface specific stuff
+ * @parent: parent object in sysfs
+ * @usb_device: pointer to usb device
+ * @iface: hardware interface
+ * @cap: channel capabilities
+ * @conf: channel configuration
+ * @dci: direct communication interface of hardware
+ * @hw_addr: MAC address of hardware
+ * @ep_address: endpoint address table
+ * @link_stat: link status of hardware
+ * @description: device description
+ * @suffix: suffix for channel name
+ * @anchor_list_lock: locks list access
+ * @padding_active: indicates channel uses padding
+ * @is_channel_healthy: health status table of each channel
+ * @anchor_list: list of anchored items
+ * @io_mutex: synchronize I/O with disconnect
+ * @link_stat_timer: timer for link status reports
+ * @poll_work_obj: work for polling link status
+ */
+struct most_dev {
+ struct kobject *parent;
+ struct usb_device *usb_device;
+ struct most_interface iface;
+ struct most_channel_capability *cap;
+ struct most_channel_config *conf;
+ struct most_dci_obj *dci;
+ u8 hw_addr[6];
+ u8 *ep_address;
+ u16 link_stat;
+ char description[MAX_STRING_LEN];
+ char suffix[MAX_NUM_ENDPOINTS][MAX_SUFFIX_LEN];
+ spinlock_t anchor_list_lock[MAX_NUM_ENDPOINTS];
+ bool padding_active[MAX_NUM_ENDPOINTS];
+ bool is_channel_healthy[MAX_NUM_ENDPOINTS];
+ struct list_head *anchor_list;
+ struct mutex io_mutex;
+ struct timer_list link_stat_timer;
+ struct work_struct poll_work_obj;
+};
+#define to_mdev(d) container_of(d, struct most_dev, iface)
+#define to_mdev_from_work(w) container_of(w, struct most_dev, poll_work_obj)
+
+static struct workqueue_struct *schedule_usb_work;
+static void wq_clear_halt(struct work_struct *wq_obj);
+static void wq_netinfo(struct work_struct *wq_obj);
+
+/**
+ * trigger_resync_vr - Vendor request to trigger HW re-sync mechanism
+ * @dev: usb device
+ *
+ */
+static void trigger_resync_vr(struct usb_device *dev)
+{
+ int retval;
+ u8 request_type = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT;
+ int *data = kzalloc(sizeof(*data), GFP_KERNEL);
+
+ if (!data)
+ goto error;
+ *data = HW_RESYNC;
+ retval = usb_control_msg(dev,
+ usb_sndctrlpipe(dev, 0),
+ 0,
+ request_type,
+ 0,
+ 0,
+ data,
+ 0,
+ 5 * HZ);
+ kfree(data);
+ if (retval >= 0)
+ return;
+error:
+ dev_err(&dev->dev, "Vendor request \"stall\" failed\n");
+}
+
+/**
+ * drci_rd_reg - read a DCI register
+ * @dev: usb device
+ * @reg: register address
+ * @buf: buffer to store data
+ *
+ * This is reads data from INIC's direct register communication interface
+ */
+static inline int drci_rd_reg(struct usb_device *dev, u16 reg, void *buf)
+{
+ return usb_control_msg(dev,
+ usb_rcvctrlpipe(dev, 0),
+ DRCI_READ_REQ,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0x0000,
+ reg,
+ buf,
+ 2,
+ 5 * HZ);
+}
+
+/**
+ * drci_wr_reg - write a DCI register
+ * @dev: usb device
+ * @reg: register address
+ * @data: data to write
+ *
+ * This is writes data to INIC's direct register communication interface
+ */
+static inline int drci_wr_reg(struct usb_device *dev, u16 reg, u16 data)
+{
+ return usb_control_msg(dev,
+ usb_sndctrlpipe(dev, 0),
+ DRCI_WRITE_REQ,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ data,
+ reg,
+ NULL,
+ 0,
+ 5 * HZ);
+}
+
+/**
+ * free_anchored_buffers - free device's anchored items
+ * @mdev: the device
+ * @channel: channel ID
+ */
+static void free_anchored_buffers(struct most_dev *mdev, unsigned int channel)
+{
+ struct mbo *mbo;
+ struct buf_anchor *anchor, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
+ list_for_each_entry_safe(anchor, tmp, &mdev->anchor_list[channel], list) {
+ struct urb *urb = anchor->urb;
+
+ spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
+ if (likely(urb)) {
+ mbo = urb->context;
+ if (!irqs_disabled()) {
+ usb_kill_urb(urb);
+ } else {
+ usb_unlink_urb(urb);
+ wait_for_completion(&anchor->urb_compl);
+ }
+ if ((mbo) && (mbo->complete)) {
+ mbo->status = MBO_E_CLOSE;
+ mbo->processed_length = 0;
+ mbo->complete(mbo);
+ }
+ usb_free_urb(urb);
+ }
+ spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
+ list_del(&anchor->list);
+ kfree(anchor);
+ }
+ spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
+}
+
+/**
+ * get_stream_frame_size - calculate frame size of current configuration
+ * @cfg: channel configuration
+ */
+static unsigned int get_stream_frame_size(struct most_channel_config *cfg)
+{
+ unsigned int frame_size = 0;
+ unsigned int sub_size = cfg->subbuffer_size;
+
+ if (!sub_size) {
+ pr_warn("Misconfig: Subbuffer size zero.\n");
+ return frame_size;
+ }
+ switch (cfg->data_type) {
+ case MOST_CH_ISOC_AVP:
+ frame_size = AV_PACKETS_PER_XACT * sub_size;
+ break;
+ case MOST_CH_SYNC:
+ if (cfg->packets_per_xact == 0) {
+ pr_warn("Misconfig: Packets per XACT zero\n");
+ frame_size = 0;
+ } else if (cfg->packets_per_xact == 0xFF)
+ frame_size = (USB_MTU / sub_size) * sub_size;
+ else
+ frame_size = cfg->packets_per_xact * sub_size;
+ break;
+ default:
+ pr_warn("Query frame size of non-streaming channel\n");
+ break;
+ }
+ return frame_size;
+}
+
+/**
+ * hdm_poison_channel - mark buffers of this channel as invalid
+ * @iface: pointer to the interface
+ * @channel: channel ID
+ *
+ * This unlinks all URBs submitted to the HCD,
+ * calls the associated completion function of the core and removes
+ * them from the list.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int hdm_poison_channel(struct most_interface *iface, int channel)
+{
+ struct most_dev *mdev;
+
+ mdev = to_mdev(iface);
+ if (unlikely(!iface)) {
+ dev_warn(&mdev->usb_device->dev, "Poison: Bad interface.\n");
+ return -EIO;
+ }
+ if (unlikely((channel < 0) || (channel >= iface->num_channels))) {
+ dev_warn(&mdev->usb_device->dev, "Channel ID out of range.\n");
+ return -ECHRNG;
+ }
+
+ mdev->is_channel_healthy[channel] = false;
+
+ mutex_lock(&mdev->io_mutex);
+ free_anchored_buffers(mdev, channel);
+ if (mdev->padding_active[channel] == true)
+ mdev->padding_active[channel] = false;
+
+ if (mdev->conf[channel].data_type == MOST_CH_ASYNC) {
+ del_timer_sync(&mdev->link_stat_timer);
+ cancel_work_sync(&mdev->poll_work_obj);
+ }
+ mutex_unlock(&mdev->io_mutex);
+ return 0;
+}
+
+/**
+ * hdm_add_padding - add padding bytes
+ * @mdev: most device
+ * @channel: channel ID
+ * @mbo: buffer object
+ *
+ * This inserts the INIC hardware specific padding bytes into a streaming
+ * channel's buffer
+ */
+static int hdm_add_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
+{
+ struct most_channel_config *conf = &mdev->conf[channel];
+ unsigned int j, num_frames, frame_size;
+ u16 rd_addr, wr_addr;
+
+ frame_size = get_stream_frame_size(conf);
+ if (!frame_size)
+ return -EIO;
+ num_frames = mbo->buffer_length / frame_size;
+
+ if (num_frames < 1) {
+ dev_err(&mdev->usb_device->dev,
+ "Missed minimal transfer unit.\n");
+ return -EIO;
+ }
+
+ for (j = 1; j < num_frames; j++) {
+ wr_addr = (num_frames - j) * USB_MTU;
+ rd_addr = (num_frames - j) * frame_size;
+ memmove(mbo->virt_address + wr_addr,
+ mbo->virt_address + rd_addr,
+ frame_size);
+ }
+ mbo->buffer_length = num_frames * USB_MTU;
+ return 0;
+}
+
+/**
+ * hdm_remove_padding - remove padding bytes
+ * @mdev: most device
+ * @channel: channel ID
+ * @mbo: buffer object
+ *
+ * This takes the INIC hardware specific padding bytes off a streaming
+ * channel's buffer.
+ */
+static int hdm_remove_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
+{
+ unsigned int j, num_frames, frame_size;
+ struct most_channel_config *const conf = &mdev->conf[channel];
+
+ frame_size = get_stream_frame_size(conf);
+ if (!frame_size)
+ return -EIO;
+ num_frames = mbo->processed_length / USB_MTU;
+
+ for (j = 1; j < num_frames; j++)
+ memmove(mbo->virt_address + frame_size * j,
+ mbo->virt_address + USB_MTU * j,
+ frame_size);
+
+ mbo->processed_length = frame_size * num_frames;
+ return 0;
+}
+
+/**
+ * hdm_write_completion - completion function for submitted Tx URBs
+ * @urb: the URB that has been completed
+ *
+ * This checks the status of the completed URB. In case the URB has been
+ * unlinked before, it is immediately freed. On any other error the MBO
+ * transfer flag is set. On success it frees allocated resources and calls
+ * the completion function.
+ *
+ * Context: interrupt!
+ */
+static void hdm_write_completion(struct urb *urb)
+{
+ struct mbo *mbo;
+ struct buf_anchor *anchor;
+ struct most_dev *mdev;
+ struct device *dev;
+ unsigned int channel;
+ unsigned long flags;
+
+ mbo = urb->context;
+ anchor = mbo->priv;
+ mdev = to_mdev(mbo->ifp);
+ channel = mbo->hdm_channel_id;
+ dev = &mdev->usb_device->dev;
+
+ if ((urb->status == -ENOENT) || (urb->status == -ECONNRESET) ||
+ (mdev->is_channel_healthy[channel] == false)) {
+ complete(&anchor->urb_compl);
+ return;
+ }
+
+ if (unlikely(urb->status && !(urb->status == -ENOENT ||
+ urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN))) {
+ mbo->processed_length = 0;
+ switch (urb->status) {
+ case -EPIPE:
+ dev_warn(dev, "Broken OUT pipe detected\n");
+ most_stop_enqueue(&mdev->iface, channel);
+ mbo->status = MBO_E_INVAL;
+ usb_unlink_urb(urb);
+ INIT_WORK(&anchor->clear_work_obj, wq_clear_halt);
+ queue_work(schedule_usb_work, &anchor->clear_work_obj);
+ return;
+ case -ENODEV:
+ case -EPROTO:
+ mbo->status = MBO_E_CLOSE;
+ break;
+ default:
+ mbo->status = MBO_E_INVAL;
+ break;
+ }
+ } else {
+ mbo->status = MBO_SUCCESS;
+ mbo->processed_length = urb->actual_length;
+ }
+
+ spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
+ list_del(&anchor->list);
+ spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
+ kfree(anchor);
+
+ if (likely(mbo->complete))
+ mbo->complete(mbo);
+ usb_free_urb(urb);
+}
+
+/**
+ * hdm_read_completion - completion funciton for submitted Rx URBs
+ * @urb: the URB that has been completed
+ *
+ * This checks the status of the completed URB. In case the URB has been
+ * unlinked before it is immediately freed. On any other error the MBO transfer
+ * flag is set. On success it frees allocated resources, removes
+ * padding bytes -if necessary- and calls the completion function.
+ *
+ * Context: interrupt!
+ *
+ * **************************************************************************
+ * Error codes returned by in urb->status
+ * or in iso_frame_desc[n].status (for ISO)
+ * *************************************************************************
+ *
+ * USB device drivers may only test urb status values in completion handlers.
+ * This is because otherwise there would be a race between HCDs updating
+ * these values on one CPU, and device drivers testing them on another CPU.
+ *
+ * A transfer's actual_length may be positive even when an error has been
+ * reported. That's because transfers often involve several packets, so that
+ * one or more packets could finish before an error stops further endpoint I/O.
+ *
+ * For isochronous URBs, the urb status value is non-zero only if the URB is
+ * unlinked, the device is removed, the host controller is disabled or the total
+ * transferred length is less than the requested length and the URB_SHORT_NOT_OK
+ * flag is set. Completion handlers for isochronous URBs should only see
+ * urb->status set to zero, -ENOENT, -ECONNRESET, -ESHUTDOWN, or -EREMOTEIO.
+ * Individual frame descriptor status fields may report more status codes.
+ *
+ *
+ * 0 Transfer completed successfully
+ *
+ * -ENOENT URB was synchronously unlinked by usb_unlink_urb
+ *
+ * -EINPROGRESS URB still pending, no results yet
+ * (That is, if drivers see this it's a bug.)
+ *
+ * -EPROTO (*, **) a) bitstuff error
+ * b) no response packet received within the
+ * prescribed bus turn-around time
+ * c) unknown USB error
+ *
+ * -EILSEQ (*, **) a) CRC mismatch
+ * b) no response packet received within the
+ * prescribed bus turn-around time
+ * c) unknown USB error
+ *
+ * Note that often the controller hardware does not
+ * distinguish among cases a), b), and c), so a
+ * driver cannot tell whether there was a protocol
+ * error, a failure to respond (often caused by
+ * device disconnect), or some other fault.
+ *
+ * -ETIME (**) No response packet received within the prescribed
+ * bus turn-around time. This error may instead be
+ * reported as -EPROTO or -EILSEQ.
+ *
+ * -ETIMEDOUT Synchronous USB message functions use this code
+ * to indicate timeout expired before the transfer
+ * completed, and no other error was reported by HC.
+ *
+ * -EPIPE (**) Endpoint stalled. For non-control endpoints,
+ * reset this status with usb_clear_halt().
+ *
+ * -ECOMM During an IN transfer, the host controller
+ * received data from an endpoint faster than it
+ * could be written to system memory
+ *
+ * -ENOSR During an OUT transfer, the host controller
+ * could not retrieve data from system memory fast
+ * enough to keep up with the USB data rate
+ *
+ * -EOVERFLOW (*) The amount of data returned by the endpoint was
+ * greater than either the max packet size of the
+ * endpoint or the remaining buffer size. "Babble".
+ *
+ * -EREMOTEIO The data read from the endpoint did not fill the
+ * specified buffer, and URB_SHORT_NOT_OK was set in
+ * urb->transfer_flags.
+ *
+ * -ENODEV Device was removed. Often preceded by a burst of
+ * other errors, since the hub driver doesn't detect
+ * device removal events immediately.
+ *
+ * -EXDEV ISO transfer only partially completed
+ * (only set in iso_frame_desc[n].status, not urb->status)
+ *
+ * -EINVAL ISO madness, if this happens: Log off and go home
+ *
+ * -ECONNRESET URB was asynchronously unlinked by usb_unlink_urb
+ *
+ * -ESHUTDOWN The device or host controller has been disabled due
+ * to some problem that could not be worked around,
+ * such as a physical disconnect.
+ *
+ *
+ * (*) Error codes like -EPROTO, -EILSEQ and -EOVERFLOW normally indicate
+ * hardware problems such as bad devices (including firmware) or cables.
+ *
+ * (**) This is also one of several codes that different kinds of host
+ * controller use to indicate a transfer has failed because of device
+ * disconnect. In the interval before the hub driver starts disconnect
+ * processing, devices may receive such fault reports for every request.
+ *
+ * See <https://www.kernel.org/doc/Documentation/usb/error-codes.txt>
+ */
+static void hdm_read_completion(struct urb *urb)
+{
+ struct mbo *mbo;
+ struct buf_anchor *anchor;
+ struct most_dev *mdev;
+ struct device *dev;
+ unsigned long flags;
+ unsigned int channel;
+ struct most_channel_config *conf;
+
+ mbo = urb->context;
+ anchor = mbo->priv;
+ mdev = to_mdev(mbo->ifp);
+ channel = mbo->hdm_channel_id;
+ dev = &mdev->usb_device->dev;
+
+ if ((urb->status == -ENOENT) || (urb->status == -ECONNRESET) ||
+ (mdev->is_channel_healthy[channel] == false)) {
+ complete(&anchor->urb_compl);
+ return;
+ }
+
+ conf = &mdev->conf[channel];
+
+ if (unlikely(urb->status && !(urb->status == -ENOENT ||
+ urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN))) {
+ mbo->processed_length = 0;
+ switch (urb->status) {
+ case -EPIPE:
+ dev_warn(dev, "Broken IN pipe detected\n");
+ mbo->status = MBO_E_INVAL;
+ usb_unlink_urb(urb);
+ INIT_WORK(&anchor->clear_work_obj, wq_clear_halt);
+ queue_work(schedule_usb_work, &anchor->clear_work_obj);
+ return;
+ case -ENODEV:
+ case -EPROTO:
+ mbo->status = MBO_E_CLOSE;
+ break;
+ case -EOVERFLOW:
+ dev_warn(dev, "Babble on IN pipe detected\n");
+ default:
+ mbo->status = MBO_E_INVAL;
+ break;
+ }
+ } else {
+ mbo->processed_length = urb->actual_length;
+ if (mdev->padding_active[channel] == false) {
+ mbo->status = MBO_SUCCESS;
+ } else {
+ if (hdm_remove_padding(mdev, channel, mbo)) {
+ mbo->processed_length = 0;
+ mbo->status = MBO_E_INVAL;
+ } else {
+ mbo->status = MBO_SUCCESS;
+ }
+ }
+ }
+ spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
+ list_del(&anchor->list);
+ spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
+ kfree(anchor);
+
+ if (likely(mbo->complete))
+ mbo->complete(mbo);
+ usb_free_urb(urb);
+}
+
+/**
+ * hdm_enqueue - receive a buffer to be used for data transfer
+ * @iface: interface to enqueue to
+ * @channel: ID of the channel
+ * @mbo: pointer to the buffer object
+ *
+ * This allocates a new URB and fills it according to the channel
+ * that is being used for transmission of data. Before the URB is
+ * submitted it is stored in the private anchor list.
+ *
+ * Returns 0 on success. On any error the URB is freed and a error code
+ * is returned.
+ *
+ * Context: Could in _some_ cases be interrupt!
+ */
+static int hdm_enqueue(struct most_interface *iface, int channel, struct mbo *mbo)
+{
+ struct most_dev *mdev;
+ struct buf_anchor *anchor;
+ struct most_channel_config *conf;
+ struct device *dev;
+ int retval = 0;
+ struct urb *urb;
+ unsigned long flags;
+ unsigned long length;
+ void *virt_address;
+
+ if (unlikely(!iface || !mbo))
+ return -EIO;
+ if (unlikely(iface->num_channels <= channel) || (channel < 0))
+ return -ECHRNG;
+
+ mdev = to_mdev(iface);
+ conf = &mdev->conf[channel];
+ dev = &mdev->usb_device->dev;
+
+ if (!mdev->usb_device)
+ return -ENODEV;
+
+ urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_ATOMIC);
+ if (!urb) {
+ dev_err(dev, "Failed to allocate URB\n");
+ return -ENOMEM;
+ }
+
+ anchor = kzalloc(sizeof(*anchor), GFP_ATOMIC);
+ if (!anchor) {
+ retval = -ENOMEM;
+ goto _error;
+ }
+
+ anchor->urb = urb;
+ init_completion(&anchor->urb_compl);
+ mbo->priv = anchor;
+
+ spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
+ list_add_tail(&anchor->list, &mdev->anchor_list[channel]);
+ spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
+
+ if ((mdev->padding_active[channel] == true) &&
+ (conf->direction & MOST_CH_TX))
+ if (hdm_add_padding(mdev, channel, mbo)) {
+ retval = -EIO;
+ goto _error_1;
+ }
+
+ urb->transfer_dma = mbo->bus_address;
+ virt_address = mbo->virt_address;
+ length = mbo->buffer_length;
+
+ if (conf->direction & MOST_CH_TX) {
+ usb_fill_bulk_urb(urb, mdev->usb_device,
+ usb_sndbulkpipe(mdev->usb_device,
+ mdev->ep_address[channel]),
+ virt_address,
+ length,
+ hdm_write_completion,
+ mbo);
+ if (conf->data_type != MOST_CH_ISOC_AVP)
+ urb->transfer_flags |= URB_ZERO_PACKET;
+ } else {
+ usb_fill_bulk_urb(urb, mdev->usb_device,
+ usb_rcvbulkpipe(mdev->usb_device,
+ mdev->ep_address[channel]),
+ virt_address,
+ length,
+ hdm_read_completion,
+ mbo);
+ }
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ retval = usb_submit_urb(urb, GFP_KERNEL);
+ if (retval) {
+ dev_err(dev, "URB submit failed with error %d.\n", retval);
+ goto _error_1;
+ }
+ return 0;
+
+_error_1:
+ spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
+ list_del(&anchor->list);
+ spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
+ kfree(anchor);
+_error:
+ usb_free_urb(urb);
+ return retval;
+}
+
+/**
+ * hdm_configure_channel - receive channel configuration from core
+ * @iface: interface
+ * @channel: channel ID
+ * @conf: structure that holds the configuration information
+ */
+static int hdm_configure_channel(struct most_interface *iface, int channel,
+ struct most_channel_config *conf)
+{
+ unsigned int num_frames;
+ unsigned int frame_size;
+ unsigned int temp_size;
+ unsigned int tail_space;
+ struct most_dev *mdev;
+ struct device *dev;
+
+ mdev = to_mdev(iface);
+ mdev->is_channel_healthy[channel] = true;
+ dev = &mdev->usb_device->dev;
+
+ if (unlikely(!iface || !conf)) {
+ dev_err(dev, "Bad interface or config pointer.\n");
+ return -EINVAL;
+ }
+ if (unlikely((channel < 0) || (channel >= iface->num_channels))) {
+ dev_err(dev, "Channel ID out of range.\n");
+ return -EINVAL;
+ }
+ if ((!conf->num_buffers) || (!conf->buffer_size)) {
+ dev_err(dev, "Misconfig: buffer size or #buffers zero.\n");
+ return -EINVAL;
+ }
+
+ if (!(conf->data_type == MOST_CH_SYNC) &&
+ !((conf->data_type == MOST_CH_ISOC_AVP) &&
+ (conf->packets_per_xact != 0xFF))) {
+ mdev->padding_active[channel] = false;
+ goto exit;
+ }
+
+ mdev->padding_active[channel] = true;
+ temp_size = conf->buffer_size;
+
+ if ((conf->data_type != MOST_CH_SYNC) &&
+ (conf->data_type != MOST_CH_ISOC_AVP)) {
+ dev_warn(dev, "Unsupported data type\n");
+ return -EINVAL;
+ }
+
+ frame_size = get_stream_frame_size(conf);
+ if ((frame_size == 0) || (frame_size > USB_MTU)) {
+ dev_warn(dev, "Misconfig: frame size wrong\n");
+ return -EINVAL;
+ }
+
+ if (conf->buffer_size % frame_size) {
+ u16 tmp_val;
+
+ tmp_val = conf->buffer_size / frame_size;
+ conf->buffer_size = tmp_val * frame_size;
+ dev_notice(dev,
+ "Channel %d - rouding buffer size to %d bytes, "
+ "channel config says %d bytes\n",
+ channel,
+ conf->buffer_size,
+ temp_size);
+ }
+
+ num_frames = conf->buffer_size / frame_size;
+ tail_space = num_frames * (USB_MTU - frame_size);
+ temp_size += tail_space;
+
+ /* calculate extra length to comply w/ HW padding */
+ conf->extra_len = (CEILING(temp_size, USB_MTU) * USB_MTU)
+ - conf->buffer_size;
+exit:
+ mdev->conf[channel] = *conf;
+ return 0;
+}
+
+/**
+ * hdm_update_netinfo - retrieve latest networking information
+ * @mdev: device interface
+ *
+ * This triggers the USB vendor requests to read the hardware address and
+ * the current link status of the attached device.
+ */
+static int hdm_update_netinfo(struct most_dev *mdev)
+{
+ struct device *dev = &mdev->usb_device->dev;
+ int i;
+ u16 link;
+ u8 addr[6];
+
+ if (!is_valid_ether_addr(mdev->hw_addr)) {
+ if (0 > drci_rd_reg(mdev->usb_device,
+ DRCI_REG_HW_ADDR_HI, addr)) {
+ dev_err(dev, "Vendor request \"hw_addr_hi\" failed\n");
+ return -1;
+ }
+ if (0 > drci_rd_reg(mdev->usb_device,
+ DRCI_REG_HW_ADDR_MI, addr + 2)) {
+ dev_err(dev, "Vendor request \"hw_addr_mid\" failed\n");
+ return -1;
+ }
+ if (0 > drci_rd_reg(mdev->usb_device,
+ DRCI_REG_HW_ADDR_LO, addr + 4)) {
+ dev_err(dev, "Vendor request \"hw_addr_low\" failed\n");
+ return -1;
+ }
+ mutex_lock(&mdev->io_mutex);
+ for (i = 0; i < 6; i++)
+ mdev->hw_addr[i] = addr[i];
+ mutex_unlock(&mdev->io_mutex);
+
+ }
+ if (0 > drci_rd_reg(mdev->usb_device, DRCI_REG_NI_STATE, &link)) {
+ dev_err(dev, "Vendor request \"link status\" failed\n");
+ return -1;
+ }
+ le16_to_cpus(&link);
+ mutex_lock(&mdev->io_mutex);
+ mdev->link_stat = link;
+ mutex_unlock(&mdev->io_mutex);
+ return 0;
+}
+
+/**
+ * hdm_request_netinfo - request network information
+ * @iface: pointer to interface
+ * @channel: channel ID
+ *
+ * This is used as trigger to set up the link status timer that
+ * polls for the NI state of the INIC every 2 seconds.
+ *
+ */
+static void hdm_request_netinfo(struct most_interface *iface, int channel)
+{
+ struct most_dev *mdev;
+
+ BUG_ON(!iface);
+ mdev = to_mdev(iface);
+ mdev->link_stat_timer.expires = jiffies + HZ;
+ mod_timer(&mdev->link_stat_timer, mdev->link_stat_timer.expires);
+}
+
+/**
+ * link_stat_timer_handler - add work to link_stat work queue
+ * @data: pointer to USB device instance
+ *
+ * The handler runs in interrupt context. That's why we need to defer the
+ * tasks to a work queue.
+ */
+static void link_stat_timer_handler(unsigned long data)
+{
+ struct most_dev *mdev = (struct most_dev *)data;
+
+ queue_work(schedule_usb_work, &mdev->poll_work_obj);
+ mdev->link_stat_timer.expires = jiffies + (2 * HZ);
+ add_timer(&mdev->link_stat_timer);
+}
+
+/**
+ * wq_netinfo - work queue function
+ * @wq_obj: object that holds data for our deferred work to do
+ *
+ * This retrieves the network interface status of the USB INIC
+ * and compares it with the current status. If the status has
+ * changed, it updates the status of the core.
+ */
+static void wq_netinfo(struct work_struct *wq_obj)
+{
+ struct most_dev *mdev;
+ int i, prev_link_stat;
+ u8 prev_hw_addr[6];
+
+ mdev = to_mdev_from_work(wq_obj);
+ prev_link_stat = mdev->link_stat;
+
+ for (i = 0; i < 6; i++)
+ prev_hw_addr[i] = mdev->hw_addr[i];
+
+ if (0 > hdm_update_netinfo(mdev))
+ return;
+ if ((prev_link_stat != mdev->link_stat) ||
+ (prev_hw_addr[0] != mdev->hw_addr[0]) ||
+ (prev_hw_addr[1] != mdev->hw_addr[1]) ||
+ (prev_hw_addr[2] != mdev->hw_addr[2]) ||
+ (prev_hw_addr[3] != mdev->hw_addr[3]) ||
+ (prev_hw_addr[4] != mdev->hw_addr[4]) ||
+ (prev_hw_addr[5] != mdev->hw_addr[5]))
+ most_deliver_netinfo(&mdev->iface, mdev->link_stat,
+ &mdev->hw_addr[0]);
+}
+
+/**
+ * wq_clear_halt - work queue function
+ * @wq_obj: work_struct object to execute
+ *
+ * This sends a clear_halt to the given USB pipe.
+ */
+static void wq_clear_halt(struct work_struct *wq_obj)
+{
+ struct buf_anchor *anchor;
+ struct most_dev *mdev;
+ struct mbo *mbo;
+ struct urb *urb;
+ unsigned int channel;
+ unsigned long flags;
+
+ anchor = to_buf_anchor(wq_obj);
+ urb = anchor->urb;
+ mbo = urb->context;
+ mdev = to_mdev(mbo->ifp);
+ channel = mbo->hdm_channel_id;
+
+ if (usb_clear_halt(urb->dev, urb->pipe))
+ dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");
+
+ usb_free_urb(urb);
+ spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
+ list_del(&anchor->list);
+ spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
+
+ if (likely(mbo->complete))
+ mbo->complete(mbo);
+ if (mdev->conf[channel].direction & MOST_CH_TX)
+ most_resume_enqueue(&mdev->iface, channel);
+
+ kfree(anchor);
+}
+
+/**
+ * hdm_usb_fops - file operation table for USB driver
+ */
+static const struct file_operations hdm_usb_fops = {
+ .owner = THIS_MODULE,
+};
+
+/**
+ * usb_device_id - ID table for HCD device probing
+ */
+static struct usb_device_id usbid[] = {
+ { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_BRDG), },
+ { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_INIC), },
+ { } /* Terminating entry */
+};
+
+#define MOST_DCI_RO_ATTR(_name) \
+ struct most_dci_attribute most_dci_attr_##_name = \
+ __ATTR(_name, S_IRUGO, show_value, NULL)
+
+#define MOST_DCI_ATTR(_name) \
+ struct most_dci_attribute most_dci_attr_##_name = \
+ __ATTR(_name, S_IRUGO | S_IWUSR, show_value, store_value)
+
+/**
+ * struct most_dci_attribute - to access the attributes of a dci object
+ * @attr: attributes of a dci object
+ * @show: pointer to the show function
+ * @store: pointer to the store function
+ */
+struct most_dci_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct most_dci_obj *d,
+ struct most_dci_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct most_dci_obj *d,
+ struct most_dci_attribute *attr,
+ const char *buf,
+ size_t count);
+};
+#define to_dci_attr(a) container_of(a, struct most_dci_attribute, attr)
+
+
+/**
+ * dci_attr_show - show function for dci object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ */
+static ssize_t dci_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct most_dci_attribute *dci_attr = to_dci_attr(attr);
+ struct most_dci_obj *dci_obj = to_dci_obj(kobj);
+
+ if (!dci_attr->show)
+ return -EIO;
+
+ return dci_attr->show(dci_obj, dci_attr, buf);
+}
+
+/**
+ * dci_attr_store - store function for dci object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ * @len: length of buffer
+ */
+static ssize_t dci_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct most_dci_attribute *dci_attr = to_dci_attr(attr);
+ struct most_dci_obj *dci_obj = to_dci_obj(kobj);
+
+ if (!dci_attr->store)
+ return -EIO;
+
+ return dci_attr->store(dci_obj, dci_attr, buf, len);
+}
+
+static const struct sysfs_ops most_dci_sysfs_ops = {
+ .show = dci_attr_show,
+ .store = dci_attr_store,
+};
+
+/**
+ * most_dci_release - release function for dci object
+ * @kobj: pointer to kobject
+ *
+ * This frees the memory allocated for the dci object
+ */
+static void most_dci_release(struct kobject *kobj)
+{
+ struct most_dci_obj *dci_obj = to_dci_obj(kobj);
+
+ kfree(dci_obj);
+}
+
+static ssize_t show_value(struct most_dci_obj *dci_obj,
+ struct most_dci_attribute *attr, char *buf)
+{
+ u16 tmp_val;
+ u16 reg_addr;
+ int err;
+
+ if (!strcmp(attr->attr.name, "ni_state"))
+ reg_addr = DRCI_REG_NI_STATE;
+ else if (!strcmp(attr->attr.name, "packet_bandwidth"))
+ reg_addr = DRCI_REG_PACKET_BW;
+ else if (!strcmp(attr->attr.name, "node_address"))
+ reg_addr = DRCI_REG_NODE_ADDR;
+ else if (!strcmp(attr->attr.name, "node_position"))
+ reg_addr = DRCI_REG_NODE_POS;
+ else if (!strcmp(attr->attr.name, "mep_filter"))
+ reg_addr = DRCI_REG_MEP_FILTER;
+ else if (!strcmp(attr->attr.name, "mep_hash0"))
+ reg_addr = DRCI_REG_HASH_TBL0;
+ else if (!strcmp(attr->attr.name, "mep_hash1"))
+ reg_addr = DRCI_REG_HASH_TBL1;
+ else if (!strcmp(attr->attr.name, "mep_hash2"))
+ reg_addr = DRCI_REG_HASH_TBL2;
+ else if (!strcmp(attr->attr.name, "mep_hash3"))
+ reg_addr = DRCI_REG_HASH_TBL3;
+ else if (!strcmp(attr->attr.name, "mep_eui48_hi"))
+ reg_addr = DRCI_REG_HW_ADDR_HI;
+ else if (!strcmp(attr->attr.name, "mep_eui48_mi"))
+ reg_addr = DRCI_REG_HW_ADDR_MI;
+ else if (!strcmp(attr->attr.name, "mep_eui48_lo"))
+ reg_addr = DRCI_REG_HW_ADDR_LO;
+ else
+ return -EIO;
+
+ err = drci_rd_reg(dci_obj->usb_device, reg_addr, &tmp_val);
+ if (err < 0)
+ return err;
+
+ return snprintf(buf, PAGE_SIZE, "%04x\n", le16_to_cpu(tmp_val));
+}
+
+static ssize_t store_value(struct most_dci_obj *dci_obj,
+ struct most_dci_attribute *attr,
+ const char *buf, size_t count)
+{
+ u16 v16;
+ u16 reg_addr;
+ int err;
+
+ if (!strcmp(attr->attr.name, "mep_filter"))
+ reg_addr = DRCI_REG_MEP_FILTER;
+ else if (!strcmp(attr->attr.name, "mep_hash0"))
+ reg_addr = DRCI_REG_HASH_TBL0;
+ else if (!strcmp(attr->attr.name, "mep_hash1"))
+ reg_addr = DRCI_REG_HASH_TBL1;
+ else if (!strcmp(attr->attr.name, "mep_hash2"))
+ reg_addr = DRCI_REG_HASH_TBL2;
+ else if (!strcmp(attr->attr.name, "mep_hash3"))
+ reg_addr = DRCI_REG_HASH_TBL3;
+ else if (!strcmp(attr->attr.name, "mep_eui48_hi"))
+ reg_addr = DRCI_REG_HW_ADDR_HI;
+ else if (!strcmp(attr->attr.name, "mep_eui48_mi"))
+ reg_addr = DRCI_REG_HW_ADDR_MI;
+ else if (!strcmp(attr->attr.name, "mep_eui48_lo"))
+ reg_addr = DRCI_REG_HW_ADDR_LO;
+ else
+ return -EIO;
+
+ err = kstrtou16(buf, 16, &v16);
+ if (err)
+ return err;
+
+ err = drci_wr_reg(dci_obj->usb_device, reg_addr, cpu_to_le16(v16));
+ if (err < 0)
+ return err;
+
+ return count;
+}
+
+static MOST_DCI_RO_ATTR(ni_state);
+static MOST_DCI_RO_ATTR(packet_bandwidth);
+static MOST_DCI_RO_ATTR(node_address);
+static MOST_DCI_RO_ATTR(node_position);
+static MOST_DCI_ATTR(mep_filter);
+static MOST_DCI_ATTR(mep_hash0);
+static MOST_DCI_ATTR(mep_hash1);
+static MOST_DCI_ATTR(mep_hash2);
+static MOST_DCI_ATTR(mep_hash3);
+static MOST_DCI_ATTR(mep_eui48_hi);
+static MOST_DCI_ATTR(mep_eui48_mi);
+static MOST_DCI_ATTR(mep_eui48_lo);
+
+/**
+ * most_dci_def_attrs - array of default attribute files of the dci object
+ */
+static struct attribute *most_dci_def_attrs[] = {
+ &most_dci_attr_ni_state.attr,
+ &most_dci_attr_packet_bandwidth.attr,
+ &most_dci_attr_node_address.attr,
+ &most_dci_attr_node_position.attr,
+ &most_dci_attr_mep_filter.attr,
+ &most_dci_attr_mep_hash0.attr,
+ &most_dci_attr_mep_hash1.attr,
+ &most_dci_attr_mep_hash2.attr,
+ &most_dci_attr_mep_hash3.attr,
+ &most_dci_attr_mep_eui48_hi.attr,
+ &most_dci_attr_mep_eui48_mi.attr,
+ &most_dci_attr_mep_eui48_lo.attr,
+ NULL,
+};
+
+/**
+ * DCI ktype
+ */
+static struct kobj_type most_dci_ktype = {
+ .sysfs_ops = &most_dci_sysfs_ops,
+ .release = most_dci_release,
+ .default_attrs = most_dci_def_attrs,
+};
+
+/**
+ * create_most_dci_obj - allocates a dci object
+ * @parent: parent kobject
+ *
+ * This creates a dci object and registers it with sysfs.
+ * Returns a pointer to the object or NULL when something went wrong.
+ */
+static struct
+most_dci_obj *create_most_dci_obj(struct kobject *parent)
+{
+ struct most_dci_obj *most_dci;
+ int retval;
+
+ most_dci = kzalloc(sizeof(*most_dci), GFP_KERNEL);
+ if (!most_dci)
+ return NULL;
+
+ retval = kobject_init_and_add(&most_dci->kobj, &most_dci_ktype, parent,
+ "dci");
+ if (retval) {
+ kobject_put(&most_dci->kobj);
+ return NULL;
+ }
+ return most_dci;
+}
+
+/**
+ * destroy_most_dci_obj - DCI object release function
+ * @p: pointer to dci object
+ */
+static void destroy_most_dci_obj(struct most_dci_obj *p)
+{
+ kobject_put(&p->kobj);
+}
+
+/**
+ * hdm_probe - probe function of USB device driver
+ * @interface: Interface of the attached USB device
+ * @id: Pointer to the USB ID table.
+ *
+ * This allocates and initializes the device instance, adds the new
+ * entry to the internal list, scans the USB descriptors and registers
+ * the interface with the core.
+ * Additionally, the DCI objects are created and the hardware is sync'd.
+ *
+ * Return 0 on success. In case of an error a negative number is returned.
+ */
+static int
+hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
+{
+ unsigned int i;
+ unsigned int num_endpoints;
+ struct most_channel_capability *tmp_cap;
+ struct most_dev *mdev;
+ struct usb_device *usb_dev;
+ struct device *dev;
+ struct usb_host_interface *usb_iface_desc;
+ struct usb_endpoint_descriptor *ep_desc;
+ int ret = 0;
+
+ usb_iface_desc = interface->cur_altsetting;
+ usb_dev = interface_to_usbdev(interface);
+ dev = &usb_dev->dev;
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ goto exit_ENOMEM;
+
+ usb_set_intfdata(interface, mdev);
+ num_endpoints = usb_iface_desc->desc.bNumEndpoints;
+ mutex_init(&mdev->io_mutex);
+ INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
+ init_timer(&mdev->link_stat_timer);
+
+ mdev->usb_device = usb_dev;
+ mdev->link_stat_timer.function = link_stat_timer_handler;
+ mdev->link_stat_timer.data = (unsigned long)mdev;
+ mdev->link_stat_timer.expires = jiffies + (2 * HZ);
+
+ mdev->iface.mod = hdm_usb_fops.owner;
+ mdev->iface.interface = ITYPE_USB;
+ mdev->iface.configure = hdm_configure_channel;
+ mdev->iface.request_netinfo = hdm_request_netinfo;
+ mdev->iface.enqueue = hdm_enqueue;
+ mdev->iface.poison_channel = hdm_poison_channel;
+ mdev->iface.description = mdev->description;
+ mdev->iface.num_channels = num_endpoints;
+
+ snprintf(mdev->description, sizeof(mdev->description),
+ "usb_device %d-%s:%d.%d",
+ usb_dev->bus->busnum,
+ usb_dev->devpath,
+ usb_dev->config->desc.bConfigurationValue,
+ usb_iface_desc->desc.bInterfaceNumber);
+
+ mdev->conf = kcalloc(num_endpoints, sizeof(*mdev->conf), GFP_KERNEL);
+ if (!mdev->conf)
+ goto exit_free;
+
+ mdev->cap = kcalloc(num_endpoints, sizeof(*mdev->cap), GFP_KERNEL);
+ if (!mdev->cap)
+ goto exit_free1;
+
+ mdev->iface.channel_vector = mdev->cap;
+ mdev->iface.priv = NULL;
+
+ mdev->ep_address =
+ kcalloc(num_endpoints, sizeof(*mdev->ep_address), GFP_KERNEL);
+ if (!mdev->ep_address)
+ goto exit_free2;
+
+ mdev->anchor_list =
+ kcalloc(num_endpoints, sizeof(*mdev->anchor_list), GFP_KERNEL);
+ if (!mdev->anchor_list)
+ goto exit_free3;
+
+ tmp_cap = mdev->cap;
+ for (i = 0; i < num_endpoints; i++) {
+ ep_desc = &usb_iface_desc->endpoint[i].desc;
+ mdev->ep_address[i] = ep_desc->bEndpointAddress;
+ mdev->padding_active[i] = false;
+ mdev->is_channel_healthy[i] = true;
+
+ snprintf(&mdev->suffix[i][0], MAX_SUFFIX_LEN, "ep%02x",
+ mdev->ep_address[i]);
+
+ tmp_cap->name_suffix = &mdev->suffix[i][0];
+ tmp_cap->buffer_size_packet = MAX_BUF_SIZE;
+ tmp_cap->buffer_size_streaming = MAX_BUF_SIZE;
+ tmp_cap->num_buffers_packet = BUF_CHAIN_SIZE;
+ tmp_cap->num_buffers_streaming = BUF_CHAIN_SIZE;
+ tmp_cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
+ MOST_CH_ISOC_AVP | MOST_CH_SYNC;
+ if (ep_desc->bEndpointAddress & USB_DIR_IN)
+ tmp_cap->direction = MOST_CH_RX;
+ else
+ tmp_cap->direction = MOST_CH_TX;
+ tmp_cap++;
+ INIT_LIST_HEAD(&mdev->anchor_list[i]);
+ spin_lock_init(&mdev->anchor_list_lock[i]);
+ }
+ dev_notice(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
+ le16_to_cpu(usb_dev->descriptor.idVendor),
+ le16_to_cpu(usb_dev->descriptor.idProduct),
+ usb_dev->bus->busnum,
+ usb_dev->devnum);
+
+ dev_notice(dev, "device path: /sys/bus/usb/devices/%d-%s:%d.%d\n",
+ usb_dev->bus->busnum,
+ usb_dev->devpath,
+ usb_dev->config->desc.bConfigurationValue,
+ usb_iface_desc->desc.bInterfaceNumber);
+
+ mdev->parent = most_register_interface(&mdev->iface);
+ if (IS_ERR(mdev->parent)) {
+ ret = PTR_ERR(mdev->parent);
+ goto exit_free4;
+ }
+
+ mutex_lock(&mdev->io_mutex);
+ if (le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_INIC) {
+ /* this increments the reference count of the instance
+ * object of the core
+ */
+ mdev->dci = create_most_dci_obj(mdev->parent);
+ if (!mdev->dci) {
+ mutex_unlock(&mdev->io_mutex);
+ most_deregister_interface(&mdev->iface);
+ ret = -ENOMEM;
+ goto exit_free4;
+ }
+
+ kobject_uevent(&mdev->dci->kobj, KOBJ_ADD);
+ mdev->dci->usb_device = mdev->usb_device;
+ trigger_resync_vr(usb_dev);
+ }
+ mutex_unlock(&mdev->io_mutex);
+ return 0;
+
+exit_free4:
+ kfree(mdev->anchor_list);
+exit_free3:
+ kfree(mdev->ep_address);
+exit_free2:
+ kfree(mdev->cap);
+exit_free1:
+ kfree(mdev->conf);
+exit_free:
+ kfree(mdev);
+exit_ENOMEM:
+ if (ret == 0 || ret == -ENOMEM) {
+ ret = -ENOMEM;
+ dev_err(dev, "out of memory\n");
+ }
+ return ret;
+}
+
+/**
+ * hdm_disconnect - disconnect function of USB device driver
+ * @interface: Interface of the attached USB device
+ *
+ * This deregisters the interface with the core, removes the kernel timer
+ * and frees resources.
+ *
+ * Context: hub kernel thread
+ */
+static void hdm_disconnect(struct usb_interface *interface)
+{
+ struct most_dev *mdev;
+
+ mdev = usb_get_intfdata(interface);
+ mutex_lock(&mdev->io_mutex);
+ usb_set_intfdata(interface, NULL);
+ mdev->usb_device = NULL;
+ mutex_unlock(&mdev->io_mutex);
+
+ del_timer_sync(&mdev->link_stat_timer);
+ cancel_work_sync(&mdev->poll_work_obj);
+
+ destroy_most_dci_obj(mdev->dci);
+ most_deregister_interface(&mdev->iface);
+
+ kfree(mdev->anchor_list);
+ kfree(mdev->cap);
+ kfree(mdev->conf);
+ kfree(mdev->ep_address);
+ kfree(mdev);
+}
+
+static struct usb_driver hdm_usb = {
+ .name = "hdm_usb",
+ .id_table = usbid,
+ .probe = hdm_probe,
+ .disconnect = hdm_disconnect,
+};
+
+static int __init hdm_usb_init(void)
+{
+ pr_info("hdm_usb_init()\n");
+ if (usb_register(&hdm_usb)) {
+ pr_err("could not register hdm_usb driver\n");
+ return -EIO;
+ }
+ schedule_usb_work = create_workqueue("hdmu_work");
+ if (schedule_usb_work == NULL) {
+ pr_err("could not create workqueue\n");
+ usb_deregister(&hdm_usb);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void __exit hdm_usb_exit(void)
+{
+ pr_info("hdm_usb_exit()\n");
+ destroy_workqueue(schedule_usb_work);
+ usb_deregister(&hdm_usb);
+}
+
+module_init(hdm_usb_init);
+module_exit(hdm_usb_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
+MODULE_DESCRIPTION("HDM_4_USB");
diff --git a/drivers/staging/most/mostcore/Kconfig b/drivers/staging/most/mostcore/Kconfig
new file mode 100644
index 000000000000..38abf1b21b66
--- /dev/null
+++ b/drivers/staging/most/mostcore/Kconfig
@@ -0,0 +1,13 @@
+#
+# MOSTCore configuration
+#
+
+config MOSTCORE
+ tristate "MOST Core"
+
+ ---help---
+ Say Y here if you want to enable MOST support.
+ This device driver needs at least an additional AIM and HDM to work.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mostcore.
diff --git a/drivers/staging/most/mostcore/Makefile b/drivers/staging/most/mostcore/Makefile
new file mode 100644
index 000000000000..a078f01cf7c2
--- /dev/null
+++ b/drivers/staging/most/mostcore/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MOSTCORE) += mostcore.o
+
+mostcore-objs := core.o
diff --git a/drivers/staging/most/mostcore/core.c b/drivers/staging/most/mostcore/core.c
new file mode 100644
index 000000000000..7bb16db42893
--- /dev/null
+++ b/drivers/staging/most/mostcore/core.c
@@ -0,0 +1,1932 @@
+/*
+ * core.c - Implementation of core module of MOST Linux driver stack
+ *
+ * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/kobject.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/sysfs.h>
+#include <linux/kthread.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include "mostcore.h"
+
+#define MAX_CHANNELS 64
+#define STRING_SIZE 80
+
+static struct class *most_class;
+static struct device *class_glue_dir;
+static struct ida mdev_id;
+static int modref;
+
+struct most_c_obj {
+ struct kobject kobj;
+ struct completion cleanup;
+ atomic_t mbo_ref;
+ atomic_t mbo_nq_level;
+ uint16_t channel_id;
+ bool is_poisoned;
+ bool is_started;
+ int is_starving;
+ struct most_interface *iface;
+ struct most_inst_obj *inst;
+ struct most_channel_config cfg;
+ bool keep_mbo;
+ bool enqueue_halt;
+ struct list_head fifo;
+ spinlock_t fifo_lock;
+ struct list_head halt_fifo;
+ struct list_head list;
+ struct most_aim *first_aim;
+ struct most_aim *second_aim;
+ struct list_head trash_fifo;
+ struct task_struct *hdm_enqueue_task;
+ struct mutex stop_task_mutex;
+ wait_queue_head_t hdm_fifo_wq;
+};
+#define to_c_obj(d) container_of(d, struct most_c_obj, kobj)
+
+struct most_inst_obj {
+ int dev_id;
+ atomic_t tainted;
+ struct most_interface *iface;
+ struct list_head channel_list;
+ struct most_c_obj *channel[MAX_CHANNELS];
+ struct kobject kobj;
+ struct list_head list;
+};
+#define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
+
+/**
+ * list_pop_mbo - retrieves the first MBO of the list and removes it
+ * @ptr: the list head to grab the MBO from.
+ */
+#define list_pop_mbo(ptr) \
+({ \
+ struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \
+ list_del(&_mbo->list); \
+ _mbo; \
+})
+
+static struct mutex deregister_mutex;
+
+/* ___ ___
+ * ___C H A N N E L___
+ */
+
+/**
+ * struct most_c_attr - to access the attributes of a channel object
+ * @attr: attributes of a channel
+ * @show: pointer to the show function
+ * @store: pointer to the store function
+ */
+struct most_c_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct most_c_obj *d,
+ struct most_c_attr *attr,
+ char *buf);
+ ssize_t (*store)(struct most_c_obj *d,
+ struct most_c_attr *attr,
+ const char *buf,
+ size_t count);
+};
+#define to_channel_attr(a) container_of(a, struct most_c_attr, attr)
+
+#define MOST_CHNL_ATTR(_name, _mode, _show, _store) \
+ struct most_c_attr most_chnl_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+/**
+ * channel_attr_show - show function of channel object
+ * @kobj: pointer to its kobject
+ * @attr: pointer to its attributes
+ * @buf: buffer
+ */
+static ssize_t channel_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct most_c_attr *channel_attr = to_channel_attr(attr);
+ struct most_c_obj *c_obj = to_c_obj(kobj);
+
+ if (!channel_attr->show)
+ return -EIO;
+
+ return channel_attr->show(c_obj, channel_attr, buf);
+}
+
+/**
+ * channel_attr_store - store function of channel object
+ * @kobj: pointer to its kobject
+ * @attr: pointer to its attributes
+ * @buf: buffer
+ * @len: length of buffer
+ */
+static ssize_t channel_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct most_c_attr *channel_attr = to_channel_attr(attr);
+ struct most_c_obj *c_obj = to_c_obj(kobj);
+
+ if (!channel_attr->store)
+ return -EIO;
+ return channel_attr->store(c_obj, channel_attr, buf, len);
+}
+
+static const struct sysfs_ops most_channel_sysfs_ops = {
+ .show = channel_attr_show,
+ .store = channel_attr_store,
+};
+
+/**
+ * most_free_mbo_coherent - free an MBO and its coherent buffer
+ * @mbo: buffer to be released
+ *
+ */
+static void most_free_mbo_coherent(struct mbo *mbo)
+{
+ struct most_c_obj *c = mbo->context;
+ u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
+
+ dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
+ mbo->bus_address);
+ kfree(mbo);
+ if (atomic_sub_and_test(1, &c->mbo_ref))
+ complete(&c->cleanup);
+}
+
+/**
+ * flush_channel_fifos - clear the channel fifos
+ * @c: pointer to channel object
+ */
+static void flush_channel_fifos(struct most_c_obj *c)
+{
+ unsigned long flags, hf_flags;
+ struct mbo *mbo, *tmp;
+
+ if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
+ return;
+
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
+ list_del(&mbo->list);
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+ if (likely(mbo))
+ most_free_mbo_coherent(mbo);
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ }
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+
+ spin_lock_irqsave(&c->fifo_lock, hf_flags);
+ list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
+ list_del(&mbo->list);
+ spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
+ if (likely(mbo))
+ most_free_mbo_coherent(mbo);
+ spin_lock_irqsave(&c->fifo_lock, hf_flags);
+ }
+ spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
+
+ if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
+ pr_info("WARN: fifo | trash fifo not empty\n");
+}
+
+/**
+ * flush_trash_fifo - clear the trash fifo
+ * @c: pointer to channel object
+ */
+static int flush_trash_fifo(struct most_c_obj *c)
+{
+ struct mbo *mbo, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
+ list_del(&mbo->list);
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+ most_free_mbo_coherent(mbo);
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ }
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+ return 0;
+}
+
+/**
+ * most_channel_release - release function of channel object
+ * @kobj: pointer to channel's kobject
+ */
+static void most_channel_release(struct kobject *kobj)
+{
+ struct most_c_obj *c = to_c_obj(kobj);
+
+ kfree(c);
+}
+
+static ssize_t show_available_directions(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ unsigned int i = c->channel_id;
+
+ strcpy(buf, "");
+ if (c->iface->channel_vector[i].direction & MOST_CH_RX)
+ strcat(buf, "dir_rx ");
+ if (c->iface->channel_vector[i].direction & MOST_CH_TX)
+ strcat(buf, "dir_tx ");
+ strcat(buf, "\n");
+ return strlen(buf) + 1;
+}
+
+static ssize_t show_available_datatypes(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ unsigned int i = c->channel_id;
+
+ strcpy(buf, "");
+ if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
+ strcat(buf, "control ");
+ if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
+ strcat(buf, "async ");
+ if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
+ strcat(buf, "sync ");
+ if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC_AVP)
+ strcat(buf, "isoc_avp ");
+ strcat(buf, "\n");
+ return strlen(buf) + 1;
+}
+
+static
+ssize_t show_number_of_packet_buffers(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ unsigned int i = c->channel_id;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ c->iface->channel_vector[i].num_buffers_packet);
+}
+
+static
+ssize_t show_number_of_stream_buffers(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ unsigned int i = c->channel_id;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ c->iface->channel_vector[i].num_buffers_streaming);
+}
+
+static
+ssize_t show_size_of_packet_buffer(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ unsigned int i = c->channel_id;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ c->iface->channel_vector[i].buffer_size_packet);
+}
+
+static
+ssize_t show_size_of_stream_buffer(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ unsigned int i = c->channel_id;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ c->iface->channel_vector[i].buffer_size_streaming);
+}
+
+static ssize_t show_channel_starving(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
+}
+
+
+#define create_show_channel_attribute(val) \
+ static MOST_CHNL_ATTR(val, S_IRUGO, show_##val, NULL)
+
+create_show_channel_attribute(available_directions);
+create_show_channel_attribute(available_datatypes);
+create_show_channel_attribute(number_of_packet_buffers);
+create_show_channel_attribute(number_of_stream_buffers);
+create_show_channel_attribute(size_of_stream_buffer);
+create_show_channel_attribute(size_of_packet_buffer);
+create_show_channel_attribute(channel_starving);
+
+static ssize_t show_set_number_of_buffers(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
+}
+
+static ssize_t store_set_number_of_buffers(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ const char *buf,
+ size_t count)
+{
+ int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
+
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t show_set_buffer_size(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
+}
+
+static ssize_t store_set_buffer_size(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ const char *buf,
+ size_t count)
+{
+ int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
+
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t show_set_direction(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ if (c->cfg.direction & MOST_CH_TX)
+ return snprintf(buf, PAGE_SIZE, "dir_tx\n");
+ else if (c->cfg.direction & MOST_CH_RX)
+ return snprintf(buf, PAGE_SIZE, "dir_rx\n");
+ return snprintf(buf, PAGE_SIZE, "unconfigured\n");
+}
+
+static ssize_t store_set_direction(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ const char *buf,
+ size_t count)
+{
+ if (!strcmp(buf, "dir_rx\n"))
+ c->cfg.direction = MOST_CH_RX;
+ else if (!strcmp(buf, "dir_tx\n"))
+ c->cfg.direction = MOST_CH_TX;
+ else {
+ pr_info("WARN: invalid attribute settings\n");
+ return -EINVAL;
+ }
+ return count;
+}
+
+static ssize_t show_set_datatype(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ if (c->cfg.data_type & MOST_CH_CONTROL)
+ return snprintf(buf, PAGE_SIZE, "control\n");
+ else if (c->cfg.data_type & MOST_CH_ASYNC)
+ return snprintf(buf, PAGE_SIZE, "async\n");
+ else if (c->cfg.data_type & MOST_CH_SYNC)
+ return snprintf(buf, PAGE_SIZE, "sync\n");
+ else if (c->cfg.data_type & MOST_CH_ISOC_AVP)
+ return snprintf(buf, PAGE_SIZE, "isoc_avp\n");
+ return snprintf(buf, PAGE_SIZE, "unconfigured\n");
+}
+
+static ssize_t store_set_datatype(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ const char *buf,
+ size_t count)
+{
+ if (!strcmp(buf, "control\n"))
+ c->cfg.data_type = MOST_CH_CONTROL;
+ else if (!strcmp(buf, "async\n"))
+ c->cfg.data_type = MOST_CH_ASYNC;
+ else if (!strcmp(buf, "sync\n"))
+ c->cfg.data_type = MOST_CH_SYNC;
+ else if (!strcmp(buf, "isoc_avp\n"))
+ c->cfg.data_type = MOST_CH_ISOC_AVP;
+ else {
+ pr_info("WARN: invalid attribute settings\n");
+ return -EINVAL;
+ }
+ return count;
+}
+
+static ssize_t show_set_subbuffer_size(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
+}
+
+static ssize_t store_set_subbuffer_size(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ const char *buf,
+ size_t count)
+{
+ int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
+
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t show_set_packets_per_xact(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
+}
+
+static ssize_t store_set_packets_per_xact(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ const char *buf,
+ size_t count)
+{
+ int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
+
+ if (ret)
+ return ret;
+ return count;
+}
+
+#define create_channel_attribute(value) \
+ static MOST_CHNL_ATTR(value, S_IRUGO | S_IWUSR, \
+ show_##value, \
+ store_##value)
+
+create_channel_attribute(set_buffer_size);
+create_channel_attribute(set_number_of_buffers);
+create_channel_attribute(set_direction);
+create_channel_attribute(set_datatype);
+create_channel_attribute(set_subbuffer_size);
+create_channel_attribute(set_packets_per_xact);
+
+
+/**
+ * most_channel_def_attrs - array of default attributes of channel object
+ */
+static struct attribute *most_channel_def_attrs[] = {
+ &most_chnl_attr_available_directions.attr,
+ &most_chnl_attr_available_datatypes.attr,
+ &most_chnl_attr_number_of_packet_buffers.attr,
+ &most_chnl_attr_number_of_stream_buffers.attr,
+ &most_chnl_attr_size_of_packet_buffer.attr,
+ &most_chnl_attr_size_of_stream_buffer.attr,
+ &most_chnl_attr_set_number_of_buffers.attr,
+ &most_chnl_attr_set_buffer_size.attr,
+ &most_chnl_attr_set_direction.attr,
+ &most_chnl_attr_set_datatype.attr,
+ &most_chnl_attr_set_subbuffer_size.attr,
+ &most_chnl_attr_set_packets_per_xact.attr,
+ &most_chnl_attr_channel_starving.attr,
+ NULL,
+};
+
+static struct kobj_type most_channel_ktype = {
+ .sysfs_ops = &most_channel_sysfs_ops,
+ .release = most_channel_release,
+ .default_attrs = most_channel_def_attrs,
+};
+
+static struct kset *most_channel_kset;
+
+/**
+ * create_most_c_obj - allocates a channel object
+ * @name: name of the channel object
+ * @parent: parent kobject
+ *
+ * This create a channel object and registers it with sysfs.
+ * Returns a pointer to the object or NULL when something went wrong.
+ */
+static struct most_c_obj *
+create_most_c_obj(const char *name, struct kobject *parent)
+{
+ struct most_c_obj *c;
+ int retval;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return NULL;
+ c->kobj.kset = most_channel_kset;
+ retval = kobject_init_and_add(&c->kobj, &most_channel_ktype, parent,
+ "%s", name);
+ if (retval) {
+ kobject_put(&c->kobj);
+ return NULL;
+ }
+ kobject_uevent(&c->kobj, KOBJ_ADD);
+ return c;
+}
+
+/**
+ * destroy_most_c_obj - channel release function
+ * @c: pointer to channel object
+ *
+ * This decrements the reference counter of the channel object.
+ * If the reference count turns zero, its release function is called.
+ */
+static void destroy_most_c_obj(struct most_c_obj *c)
+{
+ if (c->first_aim)
+ c->first_aim->disconnect_channel(c->iface, c->channel_id);
+ if (c->second_aim)
+ c->second_aim->disconnect_channel(c->iface, c->channel_id);
+ c->first_aim = NULL;
+ c->second_aim = NULL;
+
+ mutex_lock(&deregister_mutex);
+ flush_trash_fifo(c);
+ flush_channel_fifos(c);
+ mutex_unlock(&deregister_mutex);
+ kobject_put(&c->kobj);
+}
+
+/* ___ ___
+ * ___I N S T A N C E___
+ */
+#define MOST_INST_ATTR(_name, _mode, _show, _store) \
+ struct most_inst_attribute most_inst_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+static struct list_head instance_list;
+
+/**
+ * struct most_inst_attribute - to access the attributes of instance object
+ * @attr: attributes of an instance
+ * @show: pointer to the show function
+ * @store: pointer to the store function
+ */
+struct most_inst_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct most_inst_obj *d,
+ struct most_inst_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct most_inst_obj *d,
+ struct most_inst_attribute *attr,
+ const char *buf,
+ size_t count);
+};
+#define to_instance_attr(a) \
+ container_of(a, struct most_inst_attribute, attr)
+
+/**
+ * instance_attr_show - show function for an instance object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ */
+static ssize_t instance_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct most_inst_attribute *instance_attr;
+ struct most_inst_obj *instance_obj;
+
+ instance_attr = to_instance_attr(attr);
+ instance_obj = to_inst_obj(kobj);
+
+ if (!instance_attr->show)
+ return -EIO;
+
+ return instance_attr->show(instance_obj, instance_attr, buf);
+}
+
+/**
+ * instance_attr_store - store function for an instance object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ * @len: length of buffer
+ */
+static ssize_t instance_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct most_inst_attribute *instance_attr;
+ struct most_inst_obj *instance_obj;
+
+ instance_attr = to_instance_attr(attr);
+ instance_obj = to_inst_obj(kobj);
+
+ if (!instance_attr->store)
+ return -EIO;
+
+ return instance_attr->store(instance_obj, instance_attr, buf, len);
+}
+
+static const struct sysfs_ops most_inst_sysfs_ops = {
+ .show = instance_attr_show,
+ .store = instance_attr_store,
+};
+
+/**
+ * most_inst_release - release function for instance object
+ * @kobj: pointer to instance's kobject
+ *
+ * This frees the allocated memory for the instance object
+ */
+static void most_inst_release(struct kobject *kobj)
+{
+ struct most_inst_obj *inst = to_inst_obj(kobj);
+
+ kfree(inst);
+}
+
+static ssize_t show_description(struct most_inst_obj *instance_obj,
+ struct most_inst_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ instance_obj->iface->description);
+}
+
+static ssize_t show_interface(struct most_inst_obj *instance_obj,
+ struct most_inst_attribute *attr,
+ char *buf)
+{
+ switch (instance_obj->iface->interface) {
+ case ITYPE_LOOPBACK:
+ return snprintf(buf, PAGE_SIZE, "loopback\n");
+ case ITYPE_I2C:
+ return snprintf(buf, PAGE_SIZE, "i2c\n");
+ case ITYPE_I2S:
+ return snprintf(buf, PAGE_SIZE, "i2s\n");
+ case ITYPE_TSI:
+ return snprintf(buf, PAGE_SIZE, "tsi\n");
+ case ITYPE_HBI:
+ return snprintf(buf, PAGE_SIZE, "hbi\n");
+ case ITYPE_MEDIALB_DIM:
+ return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
+ case ITYPE_MEDIALB_DIM2:
+ return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
+ case ITYPE_USB:
+ return snprintf(buf, PAGE_SIZE, "usb\n");
+ case ITYPE_PCIE:
+ return snprintf(buf, PAGE_SIZE, "pcie\n");
+ }
+ return snprintf(buf, PAGE_SIZE, "unknown\n");
+}
+
+#define create_inst_attribute(value) \
+ static MOST_INST_ATTR(value, S_IRUGO, show_##value, NULL)
+
+create_inst_attribute(description);
+create_inst_attribute(interface);
+
+static struct attribute *most_inst_def_attrs[] = {
+ &most_inst_attr_description.attr,
+ &most_inst_attr_interface.attr,
+ NULL,
+};
+
+static struct kobj_type most_inst_ktype = {
+ .sysfs_ops = &most_inst_sysfs_ops,
+ .release = most_inst_release,
+ .default_attrs = most_inst_def_attrs,
+};
+
+static struct kset *most_inst_kset;
+
+
+/**
+ * create_most_inst_obj - creates an instance object
+ * @name: name of the object to be created
+ *
+ * This allocates memory for an instance structure, assigns the proper kset
+ * and registers it with sysfs.
+ *
+ * Returns a pointer to the instance object or NULL when something went wrong.
+ */
+static struct most_inst_obj *create_most_inst_obj(const char *name)
+{
+ struct most_inst_obj *inst;
+ int retval;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return NULL;
+ inst->kobj.kset = most_inst_kset;
+ retval = kobject_init_and_add(&inst->kobj, &most_inst_ktype, NULL,
+ "%s", name);
+ if (retval) {
+ kobject_put(&inst->kobj);
+ return NULL;
+ }
+ kobject_uevent(&inst->kobj, KOBJ_ADD);
+ return inst;
+}
+
+/**
+ * destroy_most_inst_obj - MOST instance release function
+ * @inst: pointer to the instance object
+ *
+ * This decrements the reference counter of the instance object.
+ * If the reference count turns zero, its release function is called
+ */
+static void destroy_most_inst_obj(struct most_inst_obj *inst)
+{
+ struct most_c_obj *c, *tmp;
+
+ /* need to destroy channels first, since
+ * each channel incremented the
+ * reference count of the inst->kobj
+ */
+ list_for_each_entry_safe(c, tmp, &inst->channel_list, list) {
+ destroy_most_c_obj(c);
+ }
+ kobject_put(&inst->kobj);
+}
+
+/* ___ ___
+ * ___A I M___
+ */
+struct most_aim_obj {
+ struct kobject kobj;
+ struct list_head list;
+ struct most_aim *driver;
+ char add_link[STRING_SIZE];
+ char remove_link[STRING_SIZE];
+};
+#define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
+
+static struct list_head aim_list;
+
+
+/**
+ * struct most_aim_attribute - to access the attributes of AIM object
+ * @attr: attributes of an AIM
+ * @show: pointer to the show function
+ * @store: pointer to the store function
+ */
+struct most_aim_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct most_aim_obj *d,
+ struct most_aim_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct most_aim_obj *d,
+ struct most_aim_attribute *attr,
+ const char *buf,
+ size_t count);
+};
+#define to_aim_attr(a) container_of(a, struct most_aim_attribute, attr)
+
+/**
+ * aim_attr_show - show function of an AIM object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ */
+static ssize_t aim_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct most_aim_attribute *aim_attr;
+ struct most_aim_obj *aim_obj;
+
+ aim_attr = to_aim_attr(attr);
+ aim_obj = to_aim_obj(kobj);
+
+ if (!aim_attr->show)
+ return -EIO;
+
+ return aim_attr->show(aim_obj, aim_attr, buf);
+}
+
+/**
+ * aim_attr_store - store function of an AIM object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ * @len: length of buffer
+ */
+static ssize_t aim_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct most_aim_attribute *aim_attr;
+ struct most_aim_obj *aim_obj;
+
+ aim_attr = to_aim_attr(attr);
+ aim_obj = to_aim_obj(kobj);
+
+ if (!aim_attr->store)
+ return -EIO;
+ return aim_attr->store(aim_obj, aim_attr, buf, len);
+}
+
+static const struct sysfs_ops most_aim_sysfs_ops = {
+ .show = aim_attr_show,
+ .store = aim_attr_store,
+};
+
+/**
+ * most_aim_release - AIM release function
+ * @kobj: pointer to AIM's kobject
+ */
+static void most_aim_release(struct kobject *kobj)
+{
+ struct most_aim_obj *aim_obj = to_aim_obj(kobj);
+
+ kfree(aim_obj);
+}
+
+static ssize_t show_add_link(struct most_aim_obj *aim_obj,
+ struct most_aim_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->add_link);
+}
+
+/**
+ * split_string - parses and changes string in the buffer buf and
+ * splits it into two mandatory and one optional substrings.
+ *
+ * @buf: complete string from attribute 'add_channel'
+ * @a: address of pointer to 1st substring (=instance name)
+ * @b: address of pointer to 2nd substring (=channel name)
+ * @c: optional address of pointer to 3rd substring (=user defined name)
+ *
+ * Examples:
+ *
+ * Input: "mdev0:ch0@ep_81:my_channel\n" or
+ * "mdev0:ch0@ep_81:my_channel"
+ *
+ * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> "my_channel"
+ *
+ * Input: "mdev0:ch0@ep_81\n"
+ * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> ""
+ *
+ * Input: "mdev0:ch0@ep_81"
+ * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c == NULL
+ */
+static int split_string(char *buf, char **a, char **b, char **c)
+{
+ *a = strsep(&buf, ":");
+ if (!*a)
+ return -EIO;
+
+ *b = strsep(&buf, ":\n");
+ if (!*b)
+ return -EIO;
+
+ if (c)
+ *c = strsep(&buf, ":\n");
+
+ return 0;
+}
+
+/**
+ * get_channel_by_name - get pointer to channel object
+ * @mdev: name of the device instance
+ * @mdev_ch: name of the respective channel
+ *
+ * This retrieves the pointer to a channel object.
+ */
+static struct
+most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
+{
+ struct most_c_obj *c, *tmp;
+ struct most_inst_obj *i, *i_tmp;
+ int found = 0;
+
+ list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
+ if (!strcmp(kobject_name(&i->kobj), mdev)) {
+ found++;
+ break;
+ }
+ }
+ if (unlikely(!found))
+ return ERR_PTR(-EIO);
+
+ list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
+ if (!strcmp(kobject_name(&c->kobj), mdev_ch)) {
+ found++;
+ break;
+ }
+ }
+ if (unlikely(2 > found))
+ return ERR_PTR(-EIO);
+ return c;
+}
+
+/**
+ * store_add_link - store() function for add_link attribute
+ * @aim_obj: pointer to AIM object
+ * @attr: its attributes
+ * @buf: buffer
+ * @len: buffer length
+ *
+ * This parses the string given by buf and splits it into
+ * three substrings. Note: third substring is optional. In case a cdev
+ * AIM is loaded the optional 3rd substring will make up the name of
+ * device node in the /dev directory. If omitted, the device node will
+ * inherit the channel's name within sysfs.
+ *
+ * Searches for a pair of device and channel and probes the AIM
+ *
+ * Example:
+ * (1) echo -n -e "mdev0:ch0@ep_81:my_rxchannel\n" >add_link
+ * (2) echo -n -e "mdev0:ch0@ep_81\n" >add_link
+ *
+ * (1) would create the device node /dev/my_rxchannel
+ * (2) would create the device node /dev/mdev0-ch0@ep_81
+ */
+static ssize_t store_add_link(struct most_aim_obj *aim_obj,
+ struct most_aim_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct most_c_obj *c;
+ struct most_aim **aim_ptr;
+ char buffer[STRING_SIZE];
+ char *mdev;
+ char *mdev_ch;
+ char *mdev_devnod;
+ char devnod_buf[STRING_SIZE];
+ int ret;
+ size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
+
+ strlcpy(buffer, buf, max_len);
+ strlcpy(aim_obj->add_link, buf, max_len);
+
+ ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
+ if (ret)
+ return ret;
+
+ if (!mdev_devnod || *mdev_devnod == 0) {
+ snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev, mdev_ch);
+ mdev_devnod = devnod_buf;
+ }
+
+ c = get_channel_by_name(mdev, mdev_ch);
+ if (IS_ERR(c))
+ return -ENODEV;
+
+ if (!c->first_aim)
+ aim_ptr = &c->first_aim;
+ else if (!c->second_aim)
+ aim_ptr = &c->second_aim;
+ else
+ return -ENOSPC;
+
+ ret = aim_obj->driver->probe_channel(c->iface, c->channel_id,
+ &c->cfg, &c->kobj, mdev_devnod);
+ if (ret)
+ return ret;
+ *aim_ptr = aim_obj->driver;
+ return len;
+}
+
+static struct most_aim_attribute most_aim_attr_add_link =
+ __ATTR(add_link, S_IRUGO | S_IWUSR, show_add_link, store_add_link);
+
+static ssize_t show_remove_link(struct most_aim_obj *aim_obj,
+ struct most_aim_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->remove_link);
+}
+
+/**
+ * store_remove_link - store function for remove_link attribute
+ * @aim_obj: pointer to AIM object
+ * @attr: its attributes
+ * @buf: buffer
+ * @len: buffer length
+ *
+ * Example:
+ * echo -n -e "mdev0:ch0@ep_81\n" >remove_link
+ */
+static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
+ struct most_aim_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct most_c_obj *c;
+ char buffer[STRING_SIZE];
+ char *mdev;
+ char *mdev_ch;
+ int ret;
+ size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
+
+ strlcpy(buffer, buf, max_len);
+ strlcpy(aim_obj->remove_link, buf, max_len);
+ ret = split_string(buffer, &mdev, &mdev_ch, NULL);
+ if (ret)
+ return ret;
+
+ c = get_channel_by_name(mdev, mdev_ch);
+ if (IS_ERR(c))
+ return -ENODEV;
+
+ if (c->first_aim == aim_obj->driver)
+ c->first_aim = NULL;
+ if (c->second_aim == aim_obj->driver)
+ c->second_aim = NULL;
+ if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
+ return -EIO;
+ return len;
+}
+
+static struct most_aim_attribute most_aim_attr_remove_link =
+ __ATTR(remove_link, S_IRUGO | S_IWUSR, show_remove_link, store_remove_link);
+
+static struct attribute *most_aim_def_attrs[] = {
+ &most_aim_attr_add_link.attr,
+ &most_aim_attr_remove_link.attr,
+ NULL,
+};
+
+static struct kobj_type most_aim_ktype = {
+ .sysfs_ops = &most_aim_sysfs_ops,
+ .release = most_aim_release,
+ .default_attrs = most_aim_def_attrs,
+};
+
+static struct kset *most_aim_kset;
+
+/**
+ * create_most_aim_obj - creates an AIM object
+ * @name: name of the AIM
+ *
+ * This creates an AIM object assigns the proper kset and registers
+ * it with sysfs.
+ * Returns a pointer to the object or NULL if something went wrong.
+ */
+static struct most_aim_obj *create_most_aim_obj(const char *name)
+{
+ struct most_aim_obj *most_aim;
+ int retval;
+
+ most_aim = kzalloc(sizeof(*most_aim), GFP_KERNEL);
+ if (!most_aim)
+ return NULL;
+ most_aim->kobj.kset = most_aim_kset;
+ retval = kobject_init_and_add(&most_aim->kobj, &most_aim_ktype,
+ NULL, "%s", name);
+ if (retval) {
+ kobject_put(&most_aim->kobj);
+ return NULL;
+ }
+ kobject_uevent(&most_aim->kobj, KOBJ_ADD);
+ return most_aim;
+}
+
+/**
+ * destroy_most_aim_obj - AIM release function
+ * @p: pointer to AIM object
+ *
+ * This decrements the reference counter of the AIM object. If the
+ * reference count turns zero, its release function will be called.
+ */
+static void destroy_most_aim_obj(struct most_aim_obj *p)
+{
+ kobject_put(&p->kobj);
+}
+
+
+/* ___ ___
+ * ___C O R E___
+ */
+
+/**
+ * Instantiation of the MOST bus
+ */
+static struct bus_type most_bus = {
+ .name = "most",
+};
+
+/**
+ * Instantiation of the core driver
+ */
+static struct device_driver mostcore = {
+ .name = "mostcore",
+ .bus = &most_bus,
+};
+
+static inline void trash_mbo(struct mbo *mbo)
+{
+ unsigned long flags;
+ struct most_c_obj *c = mbo->context;
+
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ list_add(&mbo->list, &c->trash_fifo);
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+}
+
+static struct mbo *get_hdm_mbo(struct most_c_obj *c)
+{
+ unsigned long flags;
+ struct mbo *mbo;
+
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ if (c->enqueue_halt || list_empty(&c->halt_fifo))
+ mbo = NULL;
+ else
+ mbo = list_pop_mbo(&c->halt_fifo);
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+ return mbo;
+}
+
+static void nq_hdm_mbo(struct mbo *mbo)
+{
+ unsigned long flags;
+ struct most_c_obj *c = mbo->context;
+
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ list_add_tail(&mbo->list, &c->halt_fifo);
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+ wake_up_interruptible(&c->hdm_fifo_wq);
+}
+
+static int hdm_enqueue_thread(void *data)
+{
+ struct most_c_obj *c = data;
+ struct mbo *mbo;
+ typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
+
+ while (likely(!kthread_should_stop())) {
+ wait_event_interruptible(c->hdm_fifo_wq,
+ (mbo = get_hdm_mbo(c))
+ || kthread_should_stop());
+
+ if (unlikely(!mbo))
+ continue;
+
+ if (c->cfg.direction == MOST_CH_RX)
+ mbo->buffer_length = c->cfg.buffer_size;
+
+ if (unlikely(enqueue(mbo->ifp, mbo->hdm_channel_id, mbo))) {
+ pr_err("hdm enqueue failed\n");
+ nq_hdm_mbo(mbo);
+ c->hdm_enqueue_task = NULL;
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static int run_enqueue_thread(struct most_c_obj *c, int channel_id)
+{
+ struct task_struct *task =
+ kthread_run(&hdm_enqueue_thread, c, "hdm_fifo_%d", channel_id);
+
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+
+ c->hdm_enqueue_task = task;
+ return 0;
+}
+
+/**
+ * arm_mbo - recycle MBO for further usage
+ * @mbo: buffer object
+ *
+ * This puts an MBO back to the list to have it ready for up coming
+ * tx transactions.
+ *
+ * In case the MBO belongs to a channel that recently has been
+ * poisoned, the MBO is scheduled to be trashed.
+ * Calls the completion handler of an attached AIM.
+ */
+static void arm_mbo(struct mbo *mbo)
+{
+ unsigned long flags;
+ struct most_c_obj *c;
+
+ BUG_ON((!mbo) || (!mbo->context));
+ c = mbo->context;
+
+ if (c->is_poisoned) {
+ trash_mbo(mbo);
+ return;
+ }
+
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ list_add_tail(&mbo->list, &c->fifo);
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+
+ if (c->second_aim && c->second_aim->tx_completion)
+ c->second_aim->tx_completion(c->iface, c->channel_id);
+ if (c->first_aim && c->first_aim->tx_completion)
+ c->first_aim->tx_completion(c->iface, c->channel_id);
+}
+
+/**
+ * arm_mbo_chain - helper function that arms an MBO chain for the HDM
+ * @c: pointer to interface channel
+ * @dir: direction of the channel
+ * @compl: pointer to completion function
+ *
+ * This allocates buffer objects including the containing DMA coherent
+ * buffer and puts them in the fifo.
+ * Buffers of Rx channels are put in the kthread fifo, hence immediately
+ * submitted to the HDM.
+ *
+ * Returns the number of allocated and enqueued MBOs.
+ */
+static int arm_mbo_chain(struct most_c_obj *c, int dir,
+ void (*compl)(struct mbo *))
+{
+ unsigned int i;
+ int retval;
+ struct mbo *mbo;
+ u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
+
+ atomic_set(&c->mbo_nq_level, 0);
+
+ for (i = 0; i < c->cfg.num_buffers; i++) {
+ mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
+ if (!mbo) {
+ pr_info("WARN: Allocation of MBO failed.\n");
+ retval = i;
+ goto _exit;
+ }
+ mbo->context = c;
+ mbo->ifp = c->iface;
+ mbo->hdm_channel_id = c->channel_id;
+ mbo->virt_address = dma_alloc_coherent(NULL,
+ coherent_buf_size,
+ &mbo->bus_address,
+ GFP_KERNEL);
+ if (!mbo->virt_address) {
+ pr_info("WARN: No DMA coherent buffer.\n");
+ retval = i;
+ goto _error1;
+ }
+ mbo->complete = compl;
+ if (dir == MOST_CH_RX) {
+ nq_hdm_mbo(mbo);
+ atomic_inc(&c->mbo_nq_level);
+ } else {
+ arm_mbo(mbo);
+ }
+ }
+ return i;
+
+_error1:
+ kfree(mbo);
+_exit:
+ return retval;
+}
+
+/**
+ * most_submit_mbo - submits an MBO to fifo
+ * @mbo: pointer to the MBO
+ *
+ */
+int most_submit_mbo(struct mbo *mbo)
+{
+ struct most_c_obj *c;
+ struct most_inst_obj *i;
+
+ if (unlikely((!mbo) || (!mbo->context))) {
+ pr_err("Bad MBO or missing channel reference\n");
+ return -EINVAL;
+ }
+ c = mbo->context;
+ i = c->inst;
+
+ if (unlikely(atomic_read(&i->tainted)))
+ return -ENODEV;
+
+ nq_hdm_mbo(mbo);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(most_submit_mbo);
+
+/**
+ * most_write_completion - write completion handler
+ * @mbo: pointer to MBO
+ *
+ * This recycles the MBO for further usage. In case the channel has been
+ * poisoned, the MBO is scheduled to be trashed.
+ */
+static void most_write_completion(struct mbo *mbo)
+{
+ struct most_c_obj *c;
+
+ BUG_ON((!mbo) || (!mbo->context));
+
+ c = mbo->context;
+ if (mbo->status == MBO_E_INVAL)
+ pr_info("WARN: Tx MBO status: invalid\n");
+ if (unlikely((c->is_poisoned == true) || (mbo->status == MBO_E_CLOSE)))
+ trash_mbo(mbo);
+ else
+ arm_mbo(mbo);
+}
+
+/**
+ * get_channel_by_iface - get pointer to channel object
+ * @iface: pointer to interface instance
+ * @id: channel ID
+ *
+ * This retrieves a pointer to a channel of the given interface and channel ID.
+ */
+static struct
+most_c_obj *get_channel_by_iface(struct most_interface *iface, int id)
+{
+ struct most_inst_obj *i;
+
+ if (unlikely(!iface)) {
+ pr_err("Bad interface\n");
+ return NULL;
+ }
+ if (unlikely((id < 0) || (id >= iface->num_channels))) {
+ pr_err("Channel index (%d) out of range\n", id);
+ return NULL;
+ }
+ i = iface->priv;
+ if (unlikely(!i)) {
+ pr_err("interface is not registered\n");
+ return NULL;
+ }
+ return i->channel[id];
+}
+
+/**
+ * most_get_mbo - get pointer to an MBO of pool
+ * @iface: pointer to interface instance
+ * @id: channel ID
+ *
+ * This attempts to get a free buffer out of the channel fifo.
+ * Returns a pointer to MBO on success or NULL otherwise.
+ */
+struct mbo *most_get_mbo(struct most_interface *iface, int id)
+{
+ struct mbo *mbo;
+ struct most_c_obj *c;
+ unsigned long flags;
+
+ c = get_channel_by_iface(iface, id);
+ if (unlikely(!c))
+ return NULL;
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ if (list_empty(&c->fifo)) {
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+ return NULL;
+ }
+ mbo = list_pop_mbo(&c->fifo);
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+ mbo->buffer_length = c->cfg.buffer_size;
+ return mbo;
+}
+EXPORT_SYMBOL_GPL(most_get_mbo);
+
+
+/**
+ * most_put_mbo - return buffer to pool
+ * @mbo: buffer object
+ */
+void most_put_mbo(struct mbo *mbo)
+{
+ struct most_c_obj *c;
+ struct most_inst_obj *i;
+
+ c = mbo->context;
+ i = c->inst;
+
+ if (unlikely(atomic_read(&i->tainted))) {
+ mbo->status = MBO_E_CLOSE;
+ trash_mbo(mbo);
+ return;
+ }
+ if (c->cfg.direction == MOST_CH_TX) {
+ arm_mbo(mbo);
+ return;
+ }
+ nq_hdm_mbo(mbo);
+ atomic_inc(&c->mbo_nq_level);
+}
+EXPORT_SYMBOL_GPL(most_put_mbo);
+
+/**
+ * most_read_completion - read completion handler
+ * @mbo: pointer to MBO
+ *
+ * This function is called by the HDM when data has been received from the
+ * hardware and copied to the buffer of the MBO.
+ *
+ * In case the channel has been poisoned it puts the buffer in the trash queue.
+ * Otherwise, it passes the buffer to an AIM for further processing.
+ */
+static void most_read_completion(struct mbo *mbo)
+{
+ struct most_c_obj *c;
+
+ c = mbo->context;
+ if (unlikely((c->is_poisoned == true) || (mbo->status == MBO_E_CLOSE)))
+ goto release_mbo;
+
+ if (mbo->status == MBO_E_INVAL) {
+ nq_hdm_mbo(mbo);
+ atomic_inc(&c->mbo_nq_level);
+ return;
+ }
+
+ if (atomic_sub_and_test(1, &c->mbo_nq_level)) {
+ pr_info("WARN: rx device out of buffers\n");
+ c->is_starving = 1;
+ }
+
+ if (c->first_aim && c->first_aim->rx_completion &&
+ c->first_aim->rx_completion(mbo) == 0)
+ return;
+ if (c->second_aim && c->second_aim->rx_completion &&
+ c->second_aim->rx_completion(mbo) == 0)
+ return;
+ pr_info("WARN: no driver linked with this channel\n");
+ mbo->status = MBO_E_CLOSE;
+release_mbo:
+ trash_mbo(mbo);
+}
+
+/**
+ * most_start_channel - prepares a channel for communication
+ * @iface: pointer to interface instance
+ * @id: channel ID
+ *
+ * This prepares the channel for usage. Cross-checks whether the
+ * channel's been properly configured.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+int most_start_channel(struct most_interface *iface, int id)
+{
+ int num_buffer;
+ int ret;
+ struct most_c_obj *c = get_channel_by_iface(iface, id);
+
+ if (unlikely(!c))
+ return -EINVAL;
+
+ if (c->is_started)
+ return -EBUSY;
+
+ if (!try_module_get(iface->mod)) {
+ pr_info("failed to acquire HDM lock\n");
+ return -ENOLCK;
+ }
+ modref++;
+
+ c->cfg.extra_len = 0;
+ if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
+ pr_info("channel configuration failed. Go check settings...\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ init_waitqueue_head(&c->hdm_fifo_wq);
+
+ if (c->cfg.direction == MOST_CH_RX)
+ num_buffer = arm_mbo_chain(c, c->cfg.direction,
+ most_read_completion);
+ else
+ num_buffer = arm_mbo_chain(c, c->cfg.direction,
+ most_write_completion);
+ if (unlikely(0 == num_buffer)) {
+ pr_info("failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = run_enqueue_thread(c, id);
+ if (ret)
+ goto error;
+
+ c->is_started = true;
+ c->is_starving = 0;
+ atomic_set(&c->mbo_ref, num_buffer);
+ return 0;
+error:
+ if (iface->mod)
+ module_put(iface->mod);
+ modref--;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(most_start_channel);
+
+/**
+ * most_stop_channel - stops a running channel
+ * @iface: pointer to interface instance
+ * @id: channel ID
+ */
+int most_stop_channel(struct most_interface *iface, int id)
+{
+ struct most_c_obj *c;
+
+ if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
+ pr_err("Bad interface or index out of range\n");
+ return -EINVAL;
+ }
+ c = get_channel_by_iface(iface, id);
+ if (unlikely(!c))
+ return -EINVAL;
+
+ if (!c->is_started)
+ return 0;
+
+ /* FIXME: we need to know calling AIM to reset only one link */
+ c->first_aim = NULL;
+ c->second_aim = NULL;
+ /* do not go into recursion calling aim->disconnect_channel */
+
+ mutex_lock(&c->stop_task_mutex);
+ if (c->hdm_enqueue_task)
+ kthread_stop(c->hdm_enqueue_task);
+ c->hdm_enqueue_task = NULL;
+ mutex_unlock(&c->stop_task_mutex);
+
+ mutex_lock(&deregister_mutex);
+ if (atomic_read(&c->inst->tainted)) {
+ mutex_unlock(&deregister_mutex);
+ return -ENODEV;
+ }
+ mutex_unlock(&deregister_mutex);
+
+ if (iface->mod && modref) {
+ module_put(iface->mod);
+ modref--;
+ }
+
+ c->is_poisoned = true;
+ if (c->iface->poison_channel(c->iface, c->channel_id)) {
+ pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
+ c->iface->description);
+ return -EAGAIN;
+ }
+ flush_trash_fifo(c);
+ flush_channel_fifos(c);
+
+#ifdef CMPL_INTERRUPTIBLE
+ if (wait_for_completion_interruptible(&c->cleanup)) {
+ pr_info("Interrupted while clean up ch %d\n", c->channel_id);
+ return -EINTR;
+ }
+#else
+ wait_for_completion(&c->cleanup);
+#endif
+ c->is_poisoned = false;
+ c->is_started = false;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(most_stop_channel);
+
+/**
+ * most_register_aim - registers an AIM (driver) with the core
+ * @aim: instance of AIM to be registered
+ */
+int most_register_aim(struct most_aim *aim)
+{
+ struct most_aim_obj *aim_obj;
+
+ if (!aim) {
+ pr_err("Bad driver\n");
+ return -EINVAL;
+ }
+ aim_obj = create_most_aim_obj(aim->name);
+ if (!aim_obj) {
+ pr_info("failed to alloc driver object\n");
+ return -ENOMEM;
+ }
+ aim_obj->driver = aim;
+ aim->context = aim_obj;
+ pr_info("registered new application interfacing module %s\n",
+ aim->name);
+ list_add_tail(&aim_obj->list, &aim_list);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(most_register_aim);
+
+/**
+ * most_deregister_aim - deregisters an AIM (driver) with the core
+ * @aim: AIM to be removed
+ */
+int most_deregister_aim(struct most_aim *aim)
+{
+ struct most_aim_obj *aim_obj;
+ struct most_c_obj *c, *tmp;
+ struct most_inst_obj *i, *i_tmp;
+
+ if (!aim) {
+ pr_err("Bad driver\n");
+ return -EINVAL;
+ }
+
+ aim_obj = aim->context;
+ if (!aim_obj) {
+ pr_info("driver not registered.\n");
+ return -EINVAL;
+ }
+ list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
+ list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
+ if (c->first_aim == aim || c->second_aim == aim)
+ aim->disconnect_channel(
+ c->iface, c->channel_id);
+ if (c->first_aim == aim)
+ c->first_aim = NULL;
+ if (c->second_aim == aim)
+ c->second_aim = NULL;
+ }
+ }
+ list_del(&aim_obj->list);
+ destroy_most_aim_obj(aim_obj);
+ pr_info("deregistering application interfacing module %s\n", aim->name);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(most_deregister_aim);
+
+/**
+ * most_register_interface - registers an interface with core
+ * @iface: pointer to the instance of the interface description.
+ *
+ * Allocates and initializes a new interface instance and all of its channels.
+ * Returns a pointer to kobject or an error pointer.
+ */
+struct kobject *most_register_interface(struct most_interface *iface)
+{
+ unsigned int i;
+ int id;
+ char name[STRING_SIZE];
+ char channel_name[STRING_SIZE];
+ struct most_c_obj *c;
+ struct most_inst_obj *inst;
+
+ if (!iface || !iface->enqueue || !iface->configure ||
+ !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
+ pr_err("Bad interface or channel overflow\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ pr_info("Failed to alloc mdev ID\n");
+ return ERR_PTR(id);
+ }
+ snprintf(name, STRING_SIZE, "mdev%d", id);
+
+ inst = create_most_inst_obj(name);
+ if (!inst) {
+ pr_info("Failed to allocate interface instance\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ iface->priv = inst;
+ INIT_LIST_HEAD(&inst->channel_list);
+ inst->iface = iface;
+ inst->dev_id = id;
+ atomic_set(&inst->tainted, 0);
+ list_add_tail(&inst->list, &instance_list);
+
+ for (i = 0; i < iface->num_channels; i++) {
+ const char *name_suffix = iface->channel_vector[i].name_suffix;
+
+ if (!name_suffix)
+ snprintf(channel_name, STRING_SIZE, "ch%d", i);
+ else if (name_suffix[0] == '@')
+ snprintf(channel_name, STRING_SIZE, "ch%d%s", i,
+ name_suffix);
+ else
+ snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
+
+ /* this increments the reference count of this instance */
+ c = create_most_c_obj(channel_name, &inst->kobj);
+ if (!c)
+ goto free_instance;
+ inst->channel[i] = c;
+ c->is_starving = 0;
+ c->iface = iface;
+ c->inst = inst;
+ c->channel_id = i;
+ c->keep_mbo = false;
+ c->enqueue_halt = false;
+ c->is_poisoned = false;
+ c->is_started = false;
+ c->cfg.direction = 0;
+ c->cfg.data_type = 0;
+ c->cfg.num_buffers = 0;
+ c->cfg.buffer_size = 0;
+ c->cfg.subbuffer_size = 0;
+ c->cfg.packets_per_xact = 0;
+ spin_lock_init(&c->fifo_lock);
+ INIT_LIST_HEAD(&c->fifo);
+ INIT_LIST_HEAD(&c->trash_fifo);
+ INIT_LIST_HEAD(&c->halt_fifo);
+ init_completion(&c->cleanup);
+ atomic_set(&c->mbo_ref, 0);
+ mutex_init(&c->stop_task_mutex);
+ list_add_tail(&c->list, &inst->channel_list);
+ }
+ pr_info("registered new MOST device mdev%d (%s)\n",
+ inst->dev_id, iface->description);
+ return &inst->kobj;
+
+free_instance:
+ pr_info("Failed allocate channel(s)\n");
+ list_del(&inst->list);
+ destroy_most_inst_obj(inst);
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL_GPL(most_register_interface);
+
+/**
+ * most_deregister_interface - deregisters an interface with core
+ * @iface: pointer to the interface instance description.
+ *
+ * Before removing an interface instance from the list, all running
+ * channels are stopped and poisoned.
+ */
+void most_deregister_interface(struct most_interface *iface)
+{
+ struct most_inst_obj *i = iface->priv;
+ struct most_c_obj *c;
+
+ mutex_lock(&deregister_mutex);
+ if (unlikely(!i)) {
+ pr_info("Bad Interface\n");
+ mutex_unlock(&deregister_mutex);
+ return;
+ }
+ pr_info("deregistering MOST device %s (%s)\n", i->kobj.name,
+ iface->description);
+
+ atomic_set(&i->tainted, 1);
+ mutex_unlock(&deregister_mutex);
+
+ while (modref) {
+ if (iface->mod && modref)
+ module_put(iface->mod);
+ modref--;
+ }
+
+ list_for_each_entry(c, &i->channel_list, list) {
+ if (!c->is_started)
+ continue;
+
+ mutex_lock(&c->stop_task_mutex);
+ if (c->hdm_enqueue_task)
+ kthread_stop(c->hdm_enqueue_task);
+ c->hdm_enqueue_task = NULL;
+ mutex_unlock(&c->stop_task_mutex);
+
+ if (iface->poison_channel(iface, c->channel_id))
+ pr_err("Can't poison channel %d\n", c->channel_id);
+ }
+ ida_simple_remove(&mdev_id, i->dev_id);
+ list_del(&i->list);
+ destroy_most_inst_obj(i);
+}
+EXPORT_SYMBOL_GPL(most_deregister_interface);
+
+/**
+ * most_stop_enqueue - prevents core from enqueueing MBOs
+ * @iface: pointer to interface
+ * @id: channel id
+ *
+ * This is called by an HDM that _cannot_ attend to its duties and
+ * is imminent to get run over by the core. The core is not going to
+ * enqueue any further packets unless the flagging HDM calls
+ * most_resume enqueue().
+ */
+void most_stop_enqueue(struct most_interface *iface, int id)
+{
+ struct most_c_obj *c = get_channel_by_iface(iface, id);
+
+ if (likely(c))
+ c->enqueue_halt = true;
+}
+EXPORT_SYMBOL_GPL(most_stop_enqueue);
+
+/**
+ * most_resume_enqueue - allow core to enqueue MBOs again
+ * @iface: pointer to interface
+ * @id: channel id
+ *
+ * This clears the enqueue halt flag and enqueues all MBOs currently
+ * sitting in the wait fifo.
+ */
+void most_resume_enqueue(struct most_interface *iface, int id)
+{
+ struct most_c_obj *c = get_channel_by_iface(iface, id);
+
+ if (unlikely(!c))
+ return;
+ c->enqueue_halt = false;
+
+ wake_up_interruptible(&c->hdm_fifo_wq);
+}
+EXPORT_SYMBOL_GPL(most_resume_enqueue);
+
+static int __init most_init(void)
+{
+ pr_info("init()\n");
+ INIT_LIST_HEAD(&instance_list);
+ INIT_LIST_HEAD(&aim_list);
+ mutex_init(&deregister_mutex);
+ ida_init(&mdev_id);
+
+ if (bus_register(&most_bus)) {
+ pr_info("Cannot register most bus\n");
+ goto exit;
+ }
+
+ most_class = class_create(THIS_MODULE, "most");
+ if (IS_ERR(most_class)) {
+ pr_info("No udev support.\n");
+ goto exit_bus;
+ }
+ if (driver_register(&mostcore)) {
+ pr_info("Cannot register core driver\n");
+ goto exit_class;
+ }
+
+ class_glue_dir =
+ device_create(most_class, NULL, 0, NULL, "mostcore");
+ if (!class_glue_dir)
+ goto exit_driver;
+
+ most_aim_kset =
+ kset_create_and_add("aims", NULL, &class_glue_dir->kobj);
+ if (!most_aim_kset)
+ goto exit_class_container;
+
+ most_inst_kset =
+ kset_create_and_add("devices", NULL, &class_glue_dir->kobj);
+ if (!most_inst_kset)
+ goto exit_driver_kset;
+
+ return 0;
+
+exit_driver_kset:
+ kset_unregister(most_aim_kset);
+exit_class_container:
+ device_destroy(most_class, 0);
+exit_driver:
+ driver_unregister(&mostcore);
+exit_class:
+ class_destroy(most_class);
+exit_bus:
+ bus_unregister(&most_bus);
+exit:
+ return -ENOMEM;
+}
+
+static void __exit most_exit(void)
+{
+ struct most_inst_obj *i, *i_tmp;
+ struct most_aim_obj *d, *d_tmp;
+
+ pr_info("exit core module\n");
+ list_for_each_entry_safe(d, d_tmp, &aim_list, list) {
+ destroy_most_aim_obj(d);
+ }
+
+ list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
+ list_del(&i->list);
+ destroy_most_inst_obj(i);
+ }
+ kset_unregister(most_inst_kset);
+ kset_unregister(most_aim_kset);
+ device_destroy(most_class, 0);
+ driver_unregister(&mostcore);
+ class_destroy(most_class);
+ bus_unregister(&most_bus);
+ ida_destroy(&mdev_id);
+}
+
+module_init(most_init);
+module_exit(most_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
+MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");
diff --git a/drivers/staging/most/mostcore/mostcore.h b/drivers/staging/most/mostcore/mostcore.h
new file mode 100644
index 000000000000..299c7d5aa73a
--- /dev/null
+++ b/drivers/staging/most/mostcore/mostcore.h
@@ -0,0 +1,316 @@
+/*
+ * mostcore.h - Interface between MostCore,
+ * Hardware Dependent Module (HDM) and Application Interface Module (AIM).
+ *
+ * Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+/*
+ * Authors:
+ * Andrey Shvetsov <andrey.shvetsov@k2l.de>
+ * Christian Gromm <christian.gromm@microchip.com>
+ * Sebastian Graf
+ */
+
+#ifndef __MOST_CORE_H__
+#define __MOST_CORE_H__
+
+#include <linux/types.h>
+
+struct kobject;
+struct module;
+
+/**
+ * Interface type
+ */
+enum most_interface_type {
+ ITYPE_LOOPBACK = 1,
+ ITYPE_I2C,
+ ITYPE_I2S,
+ ITYPE_TSI,
+ ITYPE_HBI,
+ ITYPE_MEDIALB_DIM,
+ ITYPE_MEDIALB_DIM2,
+ ITYPE_USB,
+ ITYPE_PCIE
+};
+
+/**
+ * Channel direction.
+ */
+enum most_channel_direction {
+ MOST_CH_RX = 1 << 0,
+ MOST_CH_TX = 1 << 1,
+};
+
+/**
+ * Channel data type.
+ */
+enum most_channel_data_type {
+ MOST_CH_CONTROL = 1 << 0,
+ MOST_CH_ASYNC = 1 << 1,
+ MOST_CH_ISOC_AVP = 1 << 2,
+ MOST_CH_SYNC = 1 << 5,
+};
+
+
+enum mbo_status_flags {
+ /* MBO was processed successfully (data was send or received )*/
+ MBO_SUCCESS = 0,
+ /* The MBO contains wrong or missing information. */
+ MBO_E_INVAL,
+ /* MBO was completed as HDM Channel will be closed */
+ MBO_E_CLOSE,
+};
+
+/**
+ * struct most_channel_capability - Channel capability
+ * @direction: Supported channel directions.
+ * The value is bitwise OR-combination of the values from the
+ * enumeration most_channel_direction. Zero is allowed value and means
+ * "channel may not be used".
+ * @data_type: Supported channel data types.
+ * The value is bitwise OR-combination of the values from the
+ * enumeration most_channel_data_type. Zero is allowed value and means
+ * "channel may not be used".
+ * @num_buffer_packet: Maximum number of buffers supported by this channel
+ * for packet data types (Async,Control,QoS)
+ * @buffer_size_packet: Maximum buffer size supported by this channel
+ * for packet data types (Async,Control,QoS)
+ * @num_buffer_streaming: Maximum number of buffers supported by this channel
+ * for streaming data types (Sync,AV Packetized)
+ * @buffer_size_streaming: Maximum buffer size supported by this channel
+ * for streaming data types (Sync,AV Packetized)
+ * @name_suffix: Optional suffix providean by an HDM that is attached to the
+ * regular channel name.
+ *
+ * Describes the capabilities of a MostCore channel like supported Data Types
+ * and directions. This information is provided by an HDM for the MostCore.
+ *
+ * The Core creates read only sysfs attribute files in
+ * /sys/devices/virtual/most/mostcore/devices/mdev-#/mdev#-ch#/ with the
+ * following attributes:
+ * -available_directions
+ * -available_datatypes
+ * -number_of_packet_buffers
+ * -number_of_stream_buffers
+ * -size_of_packet_buffer
+ * -size_of_stream_buffer
+ * where content of each file is a string with all supported properties of this
+ * very channel attribute.
+ */
+struct most_channel_capability {
+ u16 direction;
+ u16 data_type;
+ u16 num_buffers_packet;
+ u16 buffer_size_packet;
+ u16 num_buffers_streaming;
+ u16 buffer_size_streaming;
+ char *name_suffix;
+};
+
+/**
+ * struct most_channel_config - stores channel configuration
+ * @direction: direction of the channel
+ * @data_type: data type travelling over this channel
+ * @num_buffers: number of buffers
+ * @buffer_size: size of a buffer for AIM.
+ * Buffer size may be cutted down by HDM in a configure callback
+ * to match to a given interface and channel type.
+ * @extra_len: additional buffer space for internal HDM purposes like padding.
+ * May be set by HDM in a configure callback if needed.
+ * @subbuffer_size: size of a subbuffer
+ * @packets_per_xact: number of MOST frames that are packet inside one USB
+ * packet. This is USB specific
+ *
+ * Describes the configuration for a MostCore channel. This information is
+ * provided from the MostCore to a HDM (like the Medusa PCIe Interface) as a
+ * parameter of the "configure" function call.
+ */
+struct most_channel_config {
+ enum most_channel_direction direction;
+ enum most_channel_data_type data_type;
+ u16 num_buffers;
+ u16 buffer_size;
+ u16 extra_len;
+ u16 subbuffer_size;
+ u16 packets_per_xact;
+};
+
+/*
+ * struct mbo - MOST Buffer Object.
+ * @context: context for core completion handler
+ * @priv: private data for HDM
+ *
+ * public: documented fields that are used for the communications
+ * between MostCore and HDMs
+ *
+ * @list: list head for use by the mbo's current owner
+ * @ifp: (in) associated interface instance
+ * @hdm_channel_id: (in) HDM channel instance
+ * @virt_address: (in) kernel virtual address of the buffer
+ * @bus_address: (in) bus address of the buffer
+ * @buffer_length: (in) buffer payload length
+ * @processed_length: (out) processed length
+ * @status: (out) transfer status
+ * @complete: (in) completion routine
+ *
+ * The MostCore allocates and initializes the MBO.
+ *
+ * The HDM receives MBO for transfer from MostCore with the call to enqueue().
+ * The HDM copies the data to- or from the buffer depending on configured
+ * channel direction, set "processed_length" and "status" and completes
+ * the transfer procedure by calling the completion routine.
+ *
+ * At the end the MostCore deallocates the MBO or recycles it for further
+ * transfers for the same or different HDM.
+ *
+ * Directions of usage:
+ * The core driver should never access any MBO fields (even if marked
+ * as "public") while the MBO is owned by an HDM. The ownership starts with
+ * the call of enqueue() and ends with the call of its complete() routine.
+ *
+ * II.
+ * Every HDM attached to the core driver _must_ ensure that it returns any MBO
+ * it owns (due to a previous call to enqueue() by the core driver) before it
+ * de-registers an interface or gets unloaded from the kernel. If this direction
+ * is violated memory leaks will occur, since the core driver does _not_ track
+ * MBOs it is currently not in control of.
+ *
+ */
+struct mbo {
+ void *context;
+ void *priv;
+ struct list_head list;
+ struct most_interface *ifp;
+ u16 hdm_channel_id;
+ void *virt_address;
+ dma_addr_t bus_address;
+ u16 buffer_length;
+ u16 processed_length;
+ enum mbo_status_flags status;
+ void (*complete)(struct mbo *);
+};
+
+/**
+ * Interface instance description.
+ *
+ * Describes one instance of an interface like Medusa PCIe or Vantage USB.
+ * This structure is allocated and initialized in the HDM. MostCore may not
+ * modify this structure.
+ *
+ * @interface Interface type. \sa most_interface_type.
+ * @description PRELIMINARY.
+ * Unique description of the device instance from point of view of the
+ * interface in free text form (ASCII).
+ * It may be a hexadecimal presentation of the memory address for the MediaLB
+ * IP or USB device ID with USB properties for USB interface, etc.
+ * @num_channels Number of channels and size of the channel_vector.
+ * @channel_vector Properties of the channels.
+ * Array index represents channel ID by the driver.
+ * @configure Callback to change data type for the channel of the
+ * interface instance. May be zero if the instance of the interface is not
+ * configurable. Parameter channel_config describes direction and data
+ * type for the channel, configured by the higher level. The content of
+ * @enqueue Delivers MBO to the HDM for processing.
+ * After HDM completes Rx- or Tx- operation the processed MBO shall
+ * be returned back to the MostCore using completion routine.
+ * The reason to get the MBO delivered from the MostCore after the channel
+ * is poisoned is the re-opening of the channel by the application.
+ * In this case the HDM shall hold MBOs and service the channel as usual.
+ * The HDM must be able to hold at least one MBO for each channel.
+ * The callback returns a negative value on error, otherwise 0.
+ * @poison_channel Informs HDM about closing the channel. The HDM shall
+ * cancel all transfers and synchronously or asynchronously return
+ * all enqueued for this channel MBOs using the completion routine.
+ * The callback returns a negative value on error, otherwise 0.
+ * @request_netinfo: triggers retrieving of network info from the HDM by
+ * means of "Message exchange over MDP/MEP"
+ * @priv Private field used by mostcore to store context information.
+ */
+struct most_interface {
+ struct module *mod;
+ enum most_interface_type interface;
+ const char *description;
+ int num_channels;
+ struct most_channel_capability *channel_vector;
+ int (*configure)(struct most_interface *iface, int channel_idx,
+ struct most_channel_config *channel_config);
+ int (*enqueue)(struct most_interface *iface, int channel_idx,
+ struct mbo *mbo);
+ int (*poison_channel)(struct most_interface *iface, int channel_idx);
+ void (*request_netinfo)(struct most_interface *iface, int channel_idx);
+ void *priv;
+};
+
+/**
+ * struct most_aim - identifies MOST device driver to mostcore
+ * @name: Driver name
+ * @probe_channel: function for core to notify driver about channel connection
+ * @disconnect_channel: notification that a certain channel isn't available anymore
+ * @rx_completion: completion handler for received packets
+ * @tx_completion: completion handler for transmitted packets
+ * @context: context pointer to be used by mostcore
+ */
+struct most_aim {
+ const char *name;
+ int (*probe_channel)(struct most_interface *iface, int channel_idx,
+ struct most_channel_config *cfg,
+ struct kobject *parent, char *name);
+ int (*disconnect_channel)(struct most_interface *iface,
+ int channel_idx);
+ int (*rx_completion)(struct mbo *mbo);
+ int (*tx_completion)(struct most_interface *iface, int channel_idx);
+ void *context;
+};
+
+/**
+ * most_register_interface - Registers instance of the interface.
+ * @iface: Pointer to the interface instance description.
+ *
+ * Returns a pointer to the kobject of the generated instance.
+ *
+ * Note: HDM has to ensure that any reference held on the kobj is
+ * released before deregistering the interface.
+ */
+struct kobject *most_register_interface(struct most_interface *iface);
+
+/**
+ * Deregisters instance of the interface.
+ * @intf_instance Pointer to the interface instance description.
+ */
+void most_deregister_interface(struct most_interface *iface);
+int most_submit_mbo(struct mbo *mbo);
+
+/**
+ * most_stop_enqueue - prevents core from enqueing MBOs
+ * @iface: pointer to interface
+ * @channel_idx: channel index
+ */
+void most_stop_enqueue(struct most_interface *iface, int channel_idx);
+
+/**
+ * most_resume_enqueue - allow core to enqueue MBOs again
+ * @iface: pointer to interface
+ * @channel_idx: channel index
+ *
+ * This clears the enqueue halt flag and enqueues all MBOs currently
+ * in wait fifo.
+ */
+void most_resume_enqueue(struct most_interface *iface, int channel_idx);
+int most_register_aim(struct most_aim *aim);
+int most_deregister_aim(struct most_aim *aim);
+struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx);
+void most_put_mbo(struct mbo *mbo);
+int most_start_channel(struct most_interface *iface, int channel_idx);
+int most_stop_channel(struct most_interface *iface, int channel_idx);
+
+
+#endif /* MOST_CORE_H_ */
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index 7285c64bac24..ad30ce4206ef 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -948,7 +948,6 @@ static const struct of_device_id spinand_dt[] = {
static struct spi_driver spinand_driver = {
.driver = {
.name = "mt29f",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.of_match_table = spinand_dt,
},
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.h b/drivers/staging/mt29f_spinand/mt29f_spinand.h
index 7f2c24dc51b4..6c8e413b5b63 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.h
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.h
@@ -101,7 +101,7 @@ struct spinand_cmd {
u8 *rx_buf; /* Rx buf */
};
-extern int spinand_mtd(struct mtd_info *mtd);
-extern void spinand_mtd_release(struct mtd_info *mtd);
+int spinand_mtd(struct mtd_info *mtd);
+void spinand_mtd_release(struct mtd_info *mtd);
#endif /* __LINUX_MTD_SPI_NAND_H */
diff --git a/drivers/staging/netlogic/platform_net.c b/drivers/staging/netlogic/platform_net.c
index 77c3c3522afa..e914147d7379 100644
--- a/drivers/staging/netlogic/platform_net.c
+++ b/drivers/staging/netlogic/platform_net.c
@@ -163,7 +163,7 @@ static void xls_gmac_init(void)
switch (nlm_prom_info.board_major_version) {
case 12:
/* first block RGMII or XAUI, use RGMII */
- ndata0.phy_interface = PHY_INTERFACE_MODE_RGMII,
+ ndata0.phy_interface = PHY_INTERFACE_MODE_RGMII;
ndata0.tx_stnid[0] = FMN_STNID_GMAC0_TX0;
ndata0.phy_addr[0] = 0;
diff --git a/drivers/staging/netlogic/xlr_net.h b/drivers/staging/netlogic/xlr_net.h
index 13e03f0a0a46..2f65ec5a615c 100644
--- a/drivers/staging/netlogic/xlr_net.h
+++ b/drivers/staging/netlogic/xlr_net.h
@@ -1102,4 +1102,4 @@ struct xlr_net_priv {
u64 *class_3_spill;
};
-extern void xlr_set_gmac_speed(struct xlr_net_priv *priv);
+void xlr_set_gmac_speed(struct xlr_net_priv *priv);
diff --git a/drivers/staging/nvec/nvec.h b/drivers/staging/nvec/nvec.h
index e271375053fa..2ec9de906ca3 100644
--- a/drivers/staging/nvec/nvec.h
+++ b/drivers/staging/nvec/nvec.h
@@ -165,19 +165,18 @@ struct nvec_chip {
int state;
};
-extern int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
- short size);
+int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
+ short size);
-extern struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
- const unsigned char *data, short size);
+struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
+ const unsigned char *data, short size);
-extern int nvec_register_notifier(struct nvec_chip *nvec,
- struct notifier_block *nb,
- unsigned int events);
+int nvec_register_notifier(struct nvec_chip *nvec,
+ struct notifier_block *nb,
+ unsigned int events);
-extern int nvec_unregister_notifier(struct nvec_chip *dev,
- struct notifier_block *nb);
+int nvec_unregister_notifier(struct nvec_chip *dev, struct notifier_block *nb);
-extern void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg);
+void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg);
#endif
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
index a530b55f27d8..5ed8483fc24d 100644
--- a/drivers/staging/octeon/ethernet-mdio.h
+++ b/drivers/staging/octeon/ethernet-mdio.h
@@ -25,7 +25,7 @@
extern const struct ethtool_ops cvm_oct_ethtool_ops;
-extern void octeon_mdiobus_force_mod_depencency(void);
+void octeon_mdiobus_force_mod_depencency(void);
int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
int cvm_oct_phy_setup_device(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index beb7aac9c289..51dcb611702f 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -118,9 +118,10 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
}
/* Since the 10Mbps preamble workaround is allowed we need to enable
- preamble checking, FCS stripping, and clear error bits on
- every speed change. If errors occur during 10Mbps operation
- the above code will change this stuff */
+ * preamble checking, FCS stripping, and clear error bits on
+ * every speed change. If errors occur during 10Mbps operation
+ * the above code will change this stuff
+ */
cvm_oct_set_hw_preamble(priv, true);
if (priv->phydev == NULL) {
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 22853d33da05..d1a33a927f6d 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -70,7 +70,14 @@ static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
*/
static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
{
- if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) {
+ int port;
+
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ port = work->word0.pip.cn68xx.pknd;
+ else
+ port = work->word1.cn38xx.ipprt;
+
+ if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) {
/*
* Ignore length errors on min size packets. Some
* equipment incorrectly pads packets to 64+4FCS
@@ -87,8 +94,8 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
* packet to determine if we can remove a non spec
* preamble and generate a correct packet.
*/
- int interface = cvmx_helper_get_interface_num(work->ipprt);
- int index = cvmx_helper_get_interface_index_num(work->ipprt);
+ int interface = cvmx_helper_get_interface_num(port);
+ int index = cvmx_helper_get_interface_index_num(port);
union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
gmxx_rxx_frm_ctl.u64 =
@@ -99,7 +106,7 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
cvmx_phys_to_ptr(work->packet_ptr.s.addr);
int i = 0;
- while (i < work->len - 1) {
+ while (i < work->word1.len - 1) {
if (*ptr != 0x55)
break;
ptr++;
@@ -109,18 +116,18 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
if (*ptr == 0xd5) {
/*
printk_ratelimited("Port %d received 0xd5 preamble\n",
- work->ipprt);
+ port);
*/
work->packet_ptr.s.addr += i + 1;
- work->len -= i + 5;
+ work->word1.len -= i + 5;
} else if ((*ptr & 0xf) == 0xd) {
/*
printk_ratelimited("Port %d received 0x?d preamble\n",
- work->ipprt);
+ port);
*/
work->packet_ptr.s.addr += i;
- work->len -= i + 4;
- for (i = 0; i < work->len; i++) {
+ work->word1.len -= i + 4;
+ for (i = 0; i < work->word1.len; i++) {
*ptr =
((*ptr & 0xf0) >> 4) |
((*(ptr + 1) & 0xf) << 4);
@@ -128,7 +135,7 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
}
} else {
printk_ratelimited("Port %d unknown preamble, packet dropped\n",
- work->ipprt);
+ port);
/*
cvmx_helper_dump_packet(work);
*/
@@ -138,7 +145,7 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
}
} else {
printk_ratelimited("Port %d receive error code %d, packet dropped\n",
- work->ipprt, work->word2.snoip.err_code);
+ port, work->word2.snoip.err_code);
cvm_oct_free_work(work);
return 1;
}
@@ -172,9 +179,16 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
}
/* Only allow work for our group (and preserve priorities) */
- old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
- cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
- (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
+ cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
+ 1ull << pow_receive_group);
+ cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
+ } else {
+ old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
+ cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
+ (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
+ }
if (USE_ASYNC_IOBDMA) {
cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
@@ -186,6 +200,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
struct sk_buff **pskb = NULL;
int skb_in_hw;
cvmx_wqe_t *work;
+ int port;
if (USE_ASYNC_IOBDMA && did_work_request)
work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
@@ -195,12 +210,19 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
prefetch(work);
did_work_request = 0;
if (work == NULL) {
- union cvmx_pow_wq_int wq_int;
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
+ 1ull << pow_receive_group);
+ cvmx_write_csr(CVMX_SSO_WQ_INT,
+ 1ull << pow_receive_group);
+ } else {
+ union cvmx_pow_wq_int wq_int;
- wq_int.u64 = 0;
- wq_int.s.iq_dis = 1 << pow_receive_group;
- wq_int.s.wq_int = 1 << pow_receive_group;
- cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
+ wq_int.u64 = 0;
+ wq_int.s.iq_dis = 1 << pow_receive_group;
+ wq_int.s.wq_int = 1 << pow_receive_group;
+ cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
+ }
break;
}
pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) -
@@ -220,7 +242,13 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
prefetch(&skb->head);
prefetch(&skb->len);
}
- prefetch(cvm_oct_device[work->ipprt]);
+
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ port = work->word0.pip.cn68xx.pknd;
+ else
+ port = work->word1.cn38xx.ipprt;
+
+ prefetch(cvm_oct_device[port]);
/* Immediately throw away all packets with receive errors */
if (unlikely(work->word2.snoip.rcv_error)) {
@@ -237,7 +265,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
skb->data = skb->head + work->packet_ptr.s.addr -
cvmx_ptr_to_phys(skb->head);
prefetch(skb->data);
- skb->len = work->len;
+ skb->len = work->word1.len;
skb_set_tail_pointer(skb, skb->len);
packet_not_copied = 1;
} else {
@@ -245,7 +273,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
* We have to copy the packet. First allocate
* an skbuff for it.
*/
- skb = dev_alloc_skb(work->len);
+ skb = dev_alloc_skb(work->word1.len);
if (!skb) {
cvm_oct_free_work(work);
continue;
@@ -268,13 +296,14 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
else
ptr += 6;
}
- memcpy(skb_put(skb, work->len), ptr, work->len);
+ memcpy(skb_put(skb, work->word1.len), ptr,
+ work->word1.len);
/* No packet buffers to free */
} else {
int segments = work->word2.s.bufs;
union cvmx_buf_ptr segment_ptr =
work->packet_ptr;
- int len = work->len;
+ int len = work->word1.len;
while (segments--) {
union cvmx_buf_ptr next_ptr =
@@ -310,10 +339,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
}
packet_not_copied = 0;
}
-
- if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) &&
- cvm_oct_device[work->ipprt])) {
- struct net_device *dev = cvm_oct_device[work->ipprt];
+ if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
+ cvm_oct_device[port])) {
+ struct net_device *dev = cvm_oct_device[port];
struct octeon_ethernet *priv = netdev_priv(dev);
/*
@@ -333,7 +361,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* Increment RX stats for virtual ports */
- if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
+ if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
#ifdef CONFIG_64BIT
atomic64_add(1,
(atomic64_t *)&priv->stats.rx_packets);
@@ -368,7 +396,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
* doesn't exist.
*/
printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
- work->ipprt);
+ port);
dev_kfree_skb_irq(skb);
}
/*
@@ -390,7 +418,13 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
}
}
/* Restore the original POW group mask */
- cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask);
+ cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
+ } else {
+ cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
+ }
+
if (USE_ASYNC_IOBDMA) {
/* Restore the scratch area */
cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
@@ -422,8 +456,6 @@ void cvm_oct_rx_initialize(void)
{
int i;
struct net_device *dev_for_napi = NULL;
- union cvmx_pow_wq_int_thrx int_thr;
- union cvmx_pow_wq_int_pc int_pc;
for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
if (cvm_oct_device[i]) {
@@ -449,15 +481,34 @@ void cvm_oct_rx_initialize(void)
disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
- int_thr.u64 = 0;
- int_thr.s.tc_en = 1;
- int_thr.s.tc_thr = 1;
/* Enable POW interrupt when our port has at least one packet */
- cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64);
-
- int_pc.u64 = 0;
- int_pc.s.pc_thr = 5;
- cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ union cvmx_sso_wq_int_thrx int_thr;
+ union cvmx_pow_wq_int_pc int_pc;
+
+ int_thr.u64 = 0;
+ int_thr.s.tc_en = 1;
+ int_thr.s.tc_thr = 1;
+ cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group),
+ int_thr.u64);
+
+ int_pc.u64 = 0;
+ int_pc.s.pc_thr = 5;
+ cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
+ } else {
+ union cvmx_pow_wq_int_thrx int_thr;
+ union cvmx_pow_wq_int_pc int_pc;
+
+ int_thr.u64 = 0;
+ int_thr.s.tc_en = 1;
+ int_thr.s.tc_thr = 1;
+ cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
+ int_thr.u64);
+
+ int_pc.u64 = 0;
+ int_pc.s.pc_thr = 5;
+ cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
+ }
/* Schedule NAPI now. This will indirectly enable the interrupt. */
napi_schedule(&cvm_oct_napi);
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 7c1c1b052b7d..9e2116f4c915 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -396,10 +396,12 @@ dont_put_skbuff_in_hw:
/* Check if we can use the hardware checksumming */
if ((skb->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) &&
- ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == htons(1 << 14)))
- && ((ip_hdr(skb)->protocol == IPPROTO_TCP)
- || (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
+ (ip_hdr(skb)->version == 4) &&
+ (ip_hdr(skb)->ihl == 5) &&
+ ((ip_hdr(skb)->frag_off == 0) ||
+ (ip_hdr(skb)->frag_off == htons(1 << 14))) &&
+ ((ip_hdr(skb)->protocol == IPPROTO_TCP) ||
+ (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
/* Use hardware checksum calc */
pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
}
@@ -589,13 +591,14 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
* Fill in some of the work queue fields. We may need to add
* more if the software at the other end needs them.
*/
- work->hw_chksum = skb->csum;
- work->len = skb->len;
- work->ipprt = priv->port;
- work->qos = priv->port & 0x7;
- work->grp = pow_send_group;
- work->tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
- work->tag = pow_send_group; /* FIXME */
+ if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
+ work->word0.pip.cn38xx.hw_chksum = skb->csum;
+ work->word1.len = skb->len;
+ cvmx_wqe_set_port(work, priv->port);
+ cvmx_wqe_set_qos(work, priv->port & 0x7);
+ cvmx_wqe_set_grp(work, pow_send_group);
+ work->word1.tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+ work->word1.tag = pow_send_group; /* FIXME */
/* Default to zero. Sets of zero later are commented out */
work->word2.u64 = 0;
work->word2.s.bufs = 1;
@@ -675,8 +678,8 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
}
/* Submit the packet to the POW */
- cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos,
- work->grp);
+ cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type,
+ cvmx_wqe_get_qos(work), cvmx_wqe_get_grp(work));
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len;
dev_consume_skb_any(skb);
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
index 1ba789a7741b..45f024bc5e33 100644
--- a/drivers/staging/octeon/ethernet-util.h
+++ b/drivers/staging/octeon/ethernet-util.h
@@ -8,6 +8,10 @@
* published by the Free Software Foundation.
*/
+#include <asm/octeon/cvmx-pip.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-util.h>
+
/**
* cvm_oct_get_buffer_ptr - convert packet data address to pointer
* @packet_ptr: Packet data hardware address
@@ -28,14 +32,12 @@ static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr)
*/
static inline int INTERFACE(int ipd_port)
{
- if (ipd_port < 32) /* Interface 0 or 1 for RGMII,GMII,SPI, etc */
- return ipd_port >> 4;
- else if (ipd_port < 36) /* Interface 2 for NPI */
- return 2;
- else if (ipd_port < 40) /* Interface 3 for loopback */
- return 3;
- else if (ipd_port == 40) /* Non existent interface for POW0 */
- return 4;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+
+ if (interface >= 0)
+ return interface;
+ else if (ipd_port == CVMX_PIP_NUM_INPUT_PORTS)
+ return 10;
panic("Illegal ipd_port %d passed to INTERFACE\n", ipd_port);
}
@@ -47,7 +49,5 @@ static inline int INTERFACE(int ipd_port)
*/
static inline int INDEX(int ipd_port)
{
- if (ipd_port < 32)
- return ipd_port & 15;
- return ipd_port & 3;
+ return cvmx_helper_get_interface_index_num(ipd_port);
}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index f9dba23a3759..7274fda0b77b 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -152,11 +152,12 @@ static void cvm_oct_configure_common_hw(void)
num_packet_buffers);
if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
- CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
+ CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
#ifdef __LITTLE_ENDIAN
{
union cvmx_ipd_ctl_status ipd_ctl_status;
+
ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
ipd_ctl_status.s.pkt_lend = 1;
ipd_ctl_status.s.wqe_lend = 1;
@@ -859,7 +860,10 @@ static int cvm_oct_remove(struct platform_device *pdev)
int port;
/* Disable POW interrupt */
- cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group), 0);
+ else
+ cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
cvmx_ipd_disable();
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index e9d3e9a7e8a7..a242c700bc53 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -53,20 +53,20 @@ struct octeon_ethernet {
int cvm_oct_free_work(void *work_queue_entry);
-extern int cvm_oct_rgmii_init(struct net_device *dev);
-extern void cvm_oct_rgmii_uninit(struct net_device *dev);
-extern int cvm_oct_rgmii_open(struct net_device *dev);
+int cvm_oct_rgmii_init(struct net_device *dev);
+void cvm_oct_rgmii_uninit(struct net_device *dev);
+int cvm_oct_rgmii_open(struct net_device *dev);
-extern int cvm_oct_sgmii_init(struct net_device *dev);
-extern int cvm_oct_sgmii_open(struct net_device *dev);
+int cvm_oct_sgmii_init(struct net_device *dev);
+int cvm_oct_sgmii_open(struct net_device *dev);
-extern int cvm_oct_spi_init(struct net_device *dev);
-extern void cvm_oct_spi_uninit(struct net_device *dev);
-extern int cvm_oct_xaui_init(struct net_device *dev);
-extern int cvm_oct_xaui_open(struct net_device *dev);
+int cvm_oct_spi_init(struct net_device *dev);
+void cvm_oct_spi_uninit(struct net_device *dev);
+int cvm_oct_xaui_init(struct net_device *dev);
+int cvm_oct_xaui_open(struct net_device *dev);
-extern int cvm_oct_common_init(struct net_device *dev);
-extern void cvm_oct_common_uninit(struct net_device *dev);
+int cvm_oct_common_init(struct net_device *dev);
+void cvm_oct_common_uninit(struct net_device *dev);
void cvm_oct_adjust_link(struct net_device *dev);
int cvm_oct_common_stop(struct net_device *dev);
int cvm_oct_common_open(struct net_device *dev,
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
index aec98958f795..d06e19db1b80 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -98,7 +98,7 @@ struct dcon_platform_data {
#include <linux/interrupt.h>
-extern irqreturn_t dcon_interrupt(int irq, void *id);
+irqreturn_t dcon_interrupt(int irq, void *id);
#ifdef CONFIG_FB_OLPC_DCON_1
extern struct dcon_platform_data dcon_pdata_xo_1;
diff --git a/drivers/staging/ozwpan/Kconfig b/drivers/staging/ozwpan/Kconfig
deleted file mode 100644
index 7904caec546a..000000000000
--- a/drivers/staging/ozwpan/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-config USB_WPAN_HCD
- tristate "USB over WiFi Host Controller"
- depends on USB && NET
- help
- A driver for USB Host Controllers that are compatible with
- Ozmo Devices USB over WiFi technology.
-
- To compile this driver a module, choose M here: the module
- will be called "ozwpan".
diff --git a/drivers/staging/ozwpan/Makefile b/drivers/staging/ozwpan/Makefile
deleted file mode 100644
index 29529c1a8e3c..000000000000
--- a/drivers/staging/ozwpan/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2011 Ozmo Inc
-# Released under the GNU General Public License Version 2 (GPLv2).
-# -----------------------------------------------------------------------------
-
-obj-$(CONFIG_USB_WPAN_HCD) += ozwpan.o
-ozwpan-y := \
- ozmain.o \
- ozpd.o \
- ozusbsvc.o \
- ozusbsvc1.o \
- ozhcd.o \
- ozeltbuf.o \
- ozproto.o \
- ozcdev.o \
- ozurbparanoia.o
diff --git a/drivers/staging/ozwpan/README b/drivers/staging/ozwpan/README
deleted file mode 100644
index 7c055ec99544..000000000000
--- a/drivers/staging/ozwpan/README
+++ /dev/null
@@ -1,25 +0,0 @@
-OZWPAN USB Host Controller Driver
----------------------------------
-This driver is a USB HCD driver that does not have an associated a physical
-device but instead uses Wi-Fi to communicate with the wireless peripheral.
-The USB requests are converted into a layer 2 network protocol and transmitted
-on the network using an ethertype (0x892e) regestered to Ozmo Device Inc.
-This driver is compatible with existing wireless devices that use Ozmo Devices
-technology.
-
-To operate the driver must be bound to a suitable network interface. This can
-be done when the module is loaded (specifying the name of the network interface
-as a parameter - e.g. 'insmod ozwpan g_net_dev=go0') or can be bound after
-loading using an ioctl call. See the ozappif.h file and the ioctls
-OZ_IOCTL_ADD_BINDING and OZ_IOCTL_REMOVE_BINDING.
-
-The devices connect to the host use Wi-Fi Direct so a network card that supports
-Wi-Fi direct is required. A recent version (0.8.x or later) version of the
-wpa_supplicant can be used to setup the network interface to create a persistent
-autonomous group (for older pre-WFD peripherals) or put in a listen state to
-allow group negotiation to occur for more recent devices that support WFD.
-
-The protocol used over the network does not directly mimic the USB bus
-transactions as this would be rather busy and inefficient. Instead the chapter 9
-requests are converted into a request/response pair of messages. (See
-ozprotocol.h for data structures used in the protocol).
diff --git a/drivers/staging/ozwpan/TODO b/drivers/staging/ozwpan/TODO
deleted file mode 100644
index f32c1c0bc875..000000000000
--- a/drivers/staging/ozwpan/TODO
+++ /dev/null
@@ -1,14 +0,0 @@
-TODO:
- - Convert event tracing code to in-kernel tracing infrastructure
- - Check for remaining ioctl & check if that can be converted into
- sysfs entries
- - Convert debug prints to appropriate dev_debug or something better
- - Modify Kconfig to add CONFIG option for enabling/disabling event
- tracing.
- - check USB HCD implementation is complete and correct.
- - code review by USB developer community.
- - testing with as many devices as possible.
-
-Please send any patches for this driver to
-Shigekatsu Tateno <shigekatsu.tateno@atmel.com>
-and Greg Kroah-Hartman <gregkh@linuxfoundation.org>.
diff --git a/drivers/staging/ozwpan/ozappif.h b/drivers/staging/ozwpan/ozappif.h
deleted file mode 100644
index ea1b271fdcda..000000000000
--- a/drivers/staging/ozwpan/ozappif.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZAPPIF_H
-#define _OZAPPIF_H
-
-#define OZ_IOCTL_MAGIC 0xf4
-
-struct oz_mac_addr {
- __u8 a[6];
-};
-
-#define OZ_MAX_PDS 8
-
-struct oz_pd_list {
- __u32 count;
- struct oz_mac_addr addr[OZ_MAX_PDS];
-};
-
-#define OZ_MAX_BINDING_LEN 32
-
-struct oz_binding_info {
- char name[OZ_MAX_BINDING_LEN];
-};
-
-#define OZ_IOCTL_GET_PD_LIST _IOR(OZ_IOCTL_MAGIC, 0, struct oz_pd_list)
-#define OZ_IOCTL_SET_ACTIVE_PD _IOW(OZ_IOCTL_MAGIC, 1, struct oz_mac_addr)
-#define OZ_IOCTL_GET_ACTIVE_PD _IOR(OZ_IOCTL_MAGIC, 2, struct oz_mac_addr)
-#define OZ_IOCTL_ADD_BINDING _IOW(OZ_IOCTL_MAGIC, 3, struct oz_binding_info)
-#define OZ_IOCTL_REMOVE_BINDING _IOW(OZ_IOCTL_MAGIC, 4, struct oz_binding_info)
-#define OZ_IOCTL_MAX 5
-
-
-#endif /* _OZAPPIF_H */
diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
deleted file mode 100644
index da0e1fd50f26..000000000000
--- a/drivers/staging/ozwpan/ozcdev.c
+++ /dev/null
@@ -1,554 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/uaccess.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/poll.h>
-#include <linux/sched.h>
-#include "ozdbg.h"
-#include "ozprotocol.h"
-#include "ozappif.h"
-#include "ozeltbuf.h"
-#include "ozpd.h"
-#include "ozproto.h"
-#include "ozcdev.h"
-
-#define OZ_RD_BUF_SZ 256
-struct oz_cdev {
- dev_t devnum;
- struct cdev cdev;
- wait_queue_head_t rdq;
- spinlock_t lock;
- u8 active_addr[ETH_ALEN];
- struct oz_pd *active_pd;
-};
-
-/* Per PD context for the serial service stored in the PD. */
-struct oz_serial_ctx {
- atomic_t ref_count;
- u8 tx_seq_num;
- u8 rx_seq_num;
- u8 rd_buf[OZ_RD_BUF_SZ];
- int rd_in;
- int rd_out;
-};
-
-static struct oz_cdev g_cdev;
-static struct class *g_oz_class;
-
-/*
- * Context: process and softirq
- */
-static struct oz_serial_ctx *oz_cdev_claim_ctx(struct oz_pd *pd)
-{
- struct oz_serial_ctx *ctx;
-
- spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL]);
- ctx = (struct oz_serial_ctx *) pd->app_ctx[OZ_APPID_SERIAL];
- if (ctx)
- atomic_inc(&ctx->ref_count);
- spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL]);
- return ctx;
-}
-
-/*
- * Context: softirq or process
- */
-static void oz_cdev_release_ctx(struct oz_serial_ctx *ctx)
-{
- if (atomic_dec_and_test(&ctx->ref_count)) {
- oz_dbg(ON, "Dealloc serial context\n");
- kfree(ctx);
- }
-}
-
-/*
- * Context: process
- */
-static int oz_cdev_open(struct inode *inode, struct file *filp)
-{
- struct oz_cdev *dev = container_of(inode->i_cdev, struct oz_cdev, cdev);
-
- oz_dbg(ON, "major = %d minor = %d\n", imajor(inode), iminor(inode));
-
- filp->private_data = dev;
- return 0;
-}
-
-/*
- * Context: process
- */
-static int oz_cdev_release(struct inode *inode, struct file *filp)
-{
- return 0;
-}
-
-/*
- * Context: process
- */
-static ssize_t oz_cdev_read(struct file *filp, char __user *buf, size_t count,
- loff_t *fpos)
-{
- int n;
- int ix;
-
- struct oz_pd *pd;
- struct oz_serial_ctx *ctx;
-
- spin_lock_bh(&g_cdev.lock);
- pd = g_cdev.active_pd;
- if (pd)
- oz_pd_get(pd);
- spin_unlock_bh(&g_cdev.lock);
- if (pd == NULL)
- return -1;
- ctx = oz_cdev_claim_ctx(pd);
- if (ctx == NULL)
- goto out2;
- n = ctx->rd_in - ctx->rd_out;
- if (n < 0)
- n += OZ_RD_BUF_SZ;
- if (count > n)
- count = n;
- ix = ctx->rd_out;
- n = OZ_RD_BUF_SZ - ix;
- if (n > count)
- n = count;
- if (copy_to_user(buf, &ctx->rd_buf[ix], n)) {
- count = 0;
- goto out1;
- }
- ix += n;
- if (ix == OZ_RD_BUF_SZ)
- ix = 0;
- if (n < count) {
- if (copy_to_user(&buf[n], ctx->rd_buf, count-n)) {
- count = 0;
- goto out1;
- }
- ix = count-n;
- }
- ctx->rd_out = ix;
-out1:
- oz_cdev_release_ctx(ctx);
-out2:
- oz_pd_put(pd);
- return count;
-}
-
-/*
- * Context: process
- */
-static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *fpos)
-{
- struct oz_pd *pd;
- struct oz_elt_buf *eb;
- struct oz_elt_info *ei;
- struct oz_elt *elt;
- struct oz_app_hdr *app_hdr;
- struct oz_serial_ctx *ctx;
-
- if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
- return -EINVAL;
-
- spin_lock_bh(&g_cdev.lock);
- pd = g_cdev.active_pd;
- if (pd)
- oz_pd_get(pd);
- spin_unlock_bh(&g_cdev.lock);
- if (pd == NULL)
- return -ENXIO;
- if (!(pd->state & OZ_PD_S_CONNECTED))
- return -EAGAIN;
- eb = &pd->elt_buff;
- ei = oz_elt_info_alloc(eb);
- if (ei == NULL) {
- count = 0;
- goto out;
- }
- elt = (struct oz_elt *)ei->data;
- app_hdr = (struct oz_app_hdr *)(elt+1);
- elt->length = sizeof(struct oz_app_hdr) + count;
- elt->type = OZ_ELT_APP_DATA;
- ei->app_id = OZ_APPID_SERIAL;
- ei->length = elt->length + sizeof(struct oz_elt);
- app_hdr->app_id = OZ_APPID_SERIAL;
- if (copy_from_user(app_hdr+1, buf, count))
- goto out;
- spin_lock_bh(&pd->app_lock[OZ_APPID_USB]);
- ctx = (struct oz_serial_ctx *) pd->app_ctx[OZ_APPID_SERIAL];
- if (ctx) {
- app_hdr->elt_seq_num = ctx->tx_seq_num++;
- if (ctx->tx_seq_num == 0)
- ctx->tx_seq_num = 1;
- spin_lock(&eb->lock);
- if (oz_queue_elt_info(eb, 0, 0, ei) == 0)
- ei = NULL;
- spin_unlock(&eb->lock);
- }
- spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]);
-out:
- if (ei) {
- count = 0;
- spin_lock_bh(&eb->lock);
- oz_elt_info_free(eb, ei);
- spin_unlock_bh(&eb->lock);
- }
- oz_pd_put(pd);
- return count;
-}
-
-/*
- * Context: process
- */
-static int oz_set_active_pd(const u8 *addr)
-{
- int rc = 0;
- struct oz_pd *pd;
- struct oz_pd *old_pd;
-
- pd = oz_pd_find(addr);
- if (pd) {
- spin_lock_bh(&g_cdev.lock);
- ether_addr_copy(g_cdev.active_addr, addr);
- old_pd = g_cdev.active_pd;
- g_cdev.active_pd = pd;
- spin_unlock_bh(&g_cdev.lock);
- if (old_pd)
- oz_pd_put(old_pd);
- } else {
- if (is_zero_ether_addr(addr)) {
- spin_lock_bh(&g_cdev.lock);
- pd = g_cdev.active_pd;
- g_cdev.active_pd = NULL;
- memset(g_cdev.active_addr, 0,
- sizeof(g_cdev.active_addr));
- spin_unlock_bh(&g_cdev.lock);
- if (pd)
- oz_pd_put(pd);
- } else {
- rc = -1;
- }
- }
- return rc;
-}
-
-/*
- * Context: process
- */
-static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- int rc = 0;
-
- if (_IOC_TYPE(cmd) != OZ_IOCTL_MAGIC)
- return -ENOTTY;
- if (_IOC_NR(cmd) > OZ_IOCTL_MAX)
- return -ENOTTY;
- if (_IOC_DIR(cmd) & _IOC_READ)
- rc = !access_ok(VERIFY_WRITE, (void __user *)arg,
- _IOC_SIZE(cmd));
- else if (_IOC_DIR(cmd) & _IOC_WRITE)
- rc = !access_ok(VERIFY_READ, (void __user *)arg,
- _IOC_SIZE(cmd));
- if (rc)
- return -EFAULT;
- switch (cmd) {
- case OZ_IOCTL_GET_PD_LIST: {
- struct oz_pd_list list;
-
- oz_dbg(ON, "OZ_IOCTL_GET_PD_LIST\n");
- memset(&list, 0, sizeof(list));
- list.count = oz_get_pd_list(list.addr, OZ_MAX_PDS);
- if (copy_to_user((void __user *)arg, &list,
- sizeof(list)))
- return -EFAULT;
- }
- break;
- case OZ_IOCTL_SET_ACTIVE_PD: {
- u8 addr[ETH_ALEN];
-
- oz_dbg(ON, "OZ_IOCTL_SET_ACTIVE_PD\n");
- if (copy_from_user(addr, (void __user *)arg, ETH_ALEN))
- return -EFAULT;
- rc = oz_set_active_pd(addr);
- }
- break;
- case OZ_IOCTL_GET_ACTIVE_PD: {
- u8 addr[ETH_ALEN];
-
- oz_dbg(ON, "OZ_IOCTL_GET_ACTIVE_PD\n");
- spin_lock_bh(&g_cdev.lock);
- ether_addr_copy(addr, g_cdev.active_addr);
- spin_unlock_bh(&g_cdev.lock);
- if (copy_to_user((void __user *)arg, addr, ETH_ALEN))
- return -EFAULT;
- }
- break;
- case OZ_IOCTL_ADD_BINDING:
- case OZ_IOCTL_REMOVE_BINDING: {
- struct oz_binding_info b;
-
- if (copy_from_user(&b, (void __user *)arg,
- sizeof(struct oz_binding_info))) {
- return -EFAULT;
- }
- /* Make sure name is null terminated. */
- b.name[OZ_MAX_BINDING_LEN-1] = 0;
- if (cmd == OZ_IOCTL_ADD_BINDING)
- oz_binding_add(b.name);
- else
- oz_binding_remove(b.name);
- }
- break;
- }
- return rc;
-}
-
-/*
- * Context: process
- */
-static unsigned int oz_cdev_poll(struct file *filp, poll_table *wait)
-{
- unsigned int ret = 0;
- struct oz_cdev *dev = filp->private_data;
-
- oz_dbg(ON, "Poll called wait = %p\n", wait);
- spin_lock_bh(&dev->lock);
- if (dev->active_pd) {
- struct oz_serial_ctx *ctx = oz_cdev_claim_ctx(dev->active_pd);
-
- if (ctx) {
- if (ctx->rd_in != ctx->rd_out)
- ret |= POLLIN | POLLRDNORM;
- oz_cdev_release_ctx(ctx);
- }
- }
- spin_unlock_bh(&dev->lock);
- if (wait)
- poll_wait(filp, &dev->rdq, wait);
- return ret;
-}
-
-/*
- */
-static const struct file_operations oz_fops = {
- .owner = THIS_MODULE,
- .open = oz_cdev_open,
- .release = oz_cdev_release,
- .read = oz_cdev_read,
- .write = oz_cdev_write,
- .unlocked_ioctl = oz_cdev_ioctl,
- .poll = oz_cdev_poll
-};
-
-/*
- * Context: process
- */
-int oz_cdev_register(void)
-{
- int err;
- struct device *dev;
-
- memset(&g_cdev, 0, sizeof(g_cdev));
- err = alloc_chrdev_region(&g_cdev.devnum, 0, 1, "ozwpan");
- if (err < 0)
- return err;
- oz_dbg(ON, "Alloc dev number %d:%d\n",
- MAJOR(g_cdev.devnum), MINOR(g_cdev.devnum));
- cdev_init(&g_cdev.cdev, &oz_fops);
- g_cdev.cdev.owner = THIS_MODULE;
- spin_lock_init(&g_cdev.lock);
- init_waitqueue_head(&g_cdev.rdq);
- err = cdev_add(&g_cdev.cdev, g_cdev.devnum, 1);
- if (err < 0) {
- oz_dbg(ON, "Failed to add cdev\n");
- goto unregister;
- }
- g_oz_class = class_create(THIS_MODULE, "ozmo_wpan");
- if (IS_ERR(g_oz_class)) {
- oz_dbg(ON, "Failed to register ozmo_wpan class\n");
- err = PTR_ERR(g_oz_class);
- goto delete;
- }
- dev = device_create(g_oz_class, NULL, g_cdev.devnum, NULL, "ozwpan");
- if (IS_ERR(dev)) {
- oz_dbg(ON, "Failed to create sysfs entry for cdev\n");
- err = PTR_ERR(dev);
- goto delete;
- }
- return 0;
-
-delete:
- cdev_del(&g_cdev.cdev);
-unregister:
- unregister_chrdev_region(g_cdev.devnum, 1);
- return err;
-}
-
-/*
- * Context: process
- */
-int oz_cdev_deregister(void)
-{
- cdev_del(&g_cdev.cdev);
- unregister_chrdev_region(g_cdev.devnum, 1);
- if (g_oz_class) {
- device_destroy(g_oz_class, g_cdev.devnum);
- class_destroy(g_oz_class);
- }
- return 0;
-}
-
-/*
- * Context: process
- */
-int oz_cdev_init(void)
-{
- oz_app_enable(OZ_APPID_SERIAL, 1);
- return 0;
-}
-
-/*
- * Context: process
- */
-void oz_cdev_term(void)
-{
- oz_app_enable(OZ_APPID_SERIAL, 0);
-}
-
-/*
- * Context: softirq-serialized
- */
-int oz_cdev_start(struct oz_pd *pd, int resume)
-{
- struct oz_serial_ctx *ctx;
- struct oz_serial_ctx *old_ctx;
-
- if (resume) {
- oz_dbg(ON, "Serial service resumed\n");
- return 0;
- }
- ctx = kzalloc(sizeof(struct oz_serial_ctx), GFP_ATOMIC);
- if (ctx == NULL)
- return -ENOMEM;
- atomic_set(&ctx->ref_count, 1);
- ctx->tx_seq_num = 1;
- spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL]);
- old_ctx = pd->app_ctx[OZ_APPID_SERIAL];
- if (old_ctx) {
- spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL]);
- kfree(ctx);
- } else {
- pd->app_ctx[OZ_APPID_SERIAL] = ctx;
- spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL]);
- }
- spin_lock(&g_cdev.lock);
- if ((g_cdev.active_pd == NULL) &&
- ether_addr_equal(pd->mac_addr, g_cdev.active_addr)) {
- oz_pd_get(pd);
- g_cdev.active_pd = pd;
- oz_dbg(ON, "Active PD arrived\n");
- }
- spin_unlock(&g_cdev.lock);
- oz_dbg(ON, "Serial service started\n");
- return 0;
-}
-
-/*
- * Context: softirq or process
- */
-void oz_cdev_stop(struct oz_pd *pd, int pause)
-{
- struct oz_serial_ctx *ctx;
-
- if (pause) {
- oz_dbg(ON, "Serial service paused\n");
- return;
- }
- spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL]);
- ctx = (struct oz_serial_ctx *) pd->app_ctx[OZ_APPID_SERIAL];
- pd->app_ctx[OZ_APPID_SERIAL] = NULL;
- spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL]);
- if (ctx)
- oz_cdev_release_ctx(ctx);
- spin_lock(&g_cdev.lock);
- if (pd == g_cdev.active_pd)
- g_cdev.active_pd = NULL;
- else
- pd = NULL;
- spin_unlock(&g_cdev.lock);
- if (pd) {
- oz_pd_put(pd);
- oz_dbg(ON, "Active PD departed\n");
- }
- oz_dbg(ON, "Serial service stopped\n");
-}
-
-/*
- * Context: softirq-serialized
- */
-void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
-{
- struct oz_serial_ctx *ctx;
- struct oz_app_hdr *app_hdr;
- u8 *data;
- int len;
- int space;
- int copy_sz;
- int ix;
-
- ctx = oz_cdev_claim_ctx(pd);
- if (ctx == NULL) {
- oz_dbg(ON, "Cannot claim serial context\n");
- return;
- }
-
- app_hdr = (struct oz_app_hdr *)(elt+1);
- /* If sequence number is non-zero then check it is not a duplicate.
- */
- if (app_hdr->elt_seq_num != 0) {
- if (((ctx->rx_seq_num - app_hdr->elt_seq_num) & 0x80) == 0) {
- /* Reject duplicate element. */
- oz_dbg(ON, "Duplicate element:%02x %02x\n",
- app_hdr->elt_seq_num, ctx->rx_seq_num);
- goto out;
- }
- }
- ctx->rx_seq_num = app_hdr->elt_seq_num;
- len = elt->length - sizeof(struct oz_app_hdr);
- data = ((u8 *)(elt+1)) + sizeof(struct oz_app_hdr);
- if (len <= 0)
- goto out;
- space = ctx->rd_out - ctx->rd_in - 1;
- if (space < 0)
- space += OZ_RD_BUF_SZ;
- if (len > space) {
- oz_dbg(ON, "Not enough space:%d %d\n", len, space);
- len = space;
- }
- ix = ctx->rd_in;
- copy_sz = OZ_RD_BUF_SZ - ix;
- if (copy_sz > len)
- copy_sz = len;
- memcpy(&ctx->rd_buf[ix], data, copy_sz);
- len -= copy_sz;
- ix += copy_sz;
- if (ix == OZ_RD_BUF_SZ)
- ix = 0;
- if (len) {
- memcpy(ctx->rd_buf, data+copy_sz, len);
- ix = len;
- }
- ctx->rd_in = ix;
- wake_up(&g_cdev.rdq);
-out:
- oz_cdev_release_ctx(ctx);
-}
diff --git a/drivers/staging/ozwpan/ozcdev.h b/drivers/staging/ozwpan/ozcdev.h
deleted file mode 100644
index dd11935a093f..000000000000
--- a/drivers/staging/ozwpan/ozcdev.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZCDEV_H
-#define _OZCDEV_H
-
-int oz_cdev_register(void);
-int oz_cdev_deregister(void);
-int oz_cdev_init(void);
-void oz_cdev_term(void);
-int oz_cdev_start(struct oz_pd *pd, int resume);
-void oz_cdev_stop(struct oz_pd *pd, int pause);
-void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt);
-
-#endif /* _OZCDEV_H */
diff --git a/drivers/staging/ozwpan/ozdbg.h b/drivers/staging/ozwpan/ozdbg.h
deleted file mode 100644
index b86a2b7e0178..000000000000
--- a/drivers/staging/ozwpan/ozdbg.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * ---------------------------------------------------------------------------*/
-
-#ifndef _OZDBG_H
-#define _OZDBG_H
-
-#define OZ_WANT_DBG 0
-#define OZ_WANT_VERBOSE_DBG 1
-
-#define OZ_DBG_ON 0x0
-#define OZ_DBG_STREAM 0x1
-#define OZ_DBG_URB 0x2
-#define OZ_DBG_CTRL_DETAIL 0x4
-#define OZ_DBG_HUB 0x8
-#define OZ_DBG_RX_FRAMES 0x10
-#define OZ_DBG_TX_FRAMES 0x20
-
-#define OZ_DEFAULT_DBG_MASK \
- ( \
- /* OZ_DBG_STREAM | */ \
- /* OZ_DBG_URB | */ \
- /* OZ_DBG_CTRL_DETAIL | */ \
- OZ_DBG_HUB | \
- /* OZ_DBG_RX_FRAMES | */ \
- /* OZ_DBG_TX_FRAMES | */ \
- 0)
-
-extern unsigned int oz_dbg_mask;
-
-#define oz_want_dbg(mask) \
- ((OZ_WANT_DBG && (OZ_DBG_##mask == OZ_DBG_ON)) || \
- (OZ_WANT_VERBOSE_DBG && (OZ_DBG_##mask & oz_dbg_mask)))
-
-#define oz_dbg(mask, fmt, ...) \
-do { \
- if (oz_want_dbg(mask)) \
- pr_debug(fmt, ##__VA_ARGS__); \
-} while (0)
-
-#define oz_cdev_dbg(cdev, mask, fmt, ...) \
-do { \
- if (oz_want_dbg(mask)) \
- netdev_dbg((cdev)->dev, fmt, ##__VA_ARGS__); \
-} while (0)
-
-#define oz_pd_dbg(pd, mask, fmt, ...) \
-do { \
- if (oz_want_dbg(mask)) \
- pr_debug(fmt, ##__VA_ARGS__); \
-} while (0)
-
-#endif /* _OZDBG_H */
diff --git a/drivers/staging/ozwpan/ozeltbuf.c b/drivers/staging/ozwpan/ozeltbuf.c
deleted file mode 100644
index 01b25da44241..000000000000
--- a/drivers/staging/ozwpan/ozeltbuf.c
+++ /dev/null
@@ -1,252 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include "ozdbg.h"
-#include "ozprotocol.h"
-#include "ozeltbuf.h"
-#include "ozpd.h"
-
-/*
- * Context: softirq-serialized
- */
-void oz_elt_buf_init(struct oz_elt_buf *buf)
-{
- memset(buf, 0, sizeof(struct oz_elt_buf));
- INIT_LIST_HEAD(&buf->stream_list);
- INIT_LIST_HEAD(&buf->order_list);
- INIT_LIST_HEAD(&buf->isoc_list);
- spin_lock_init(&buf->lock);
-}
-
-/*
- * Context: softirq or process
- */
-void oz_elt_buf_term(struct oz_elt_buf *buf)
-{
- struct oz_elt_info *ei, *n;
-
- list_for_each_entry_safe(ei, n, &buf->isoc_list, link_order)
- kfree(ei);
- list_for_each_entry_safe(ei, n, &buf->order_list, link_order)
- kfree(ei);
-}
-
-/*
- * Context: softirq or process
- */
-struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf)
-{
- struct oz_elt_info *ei;
-
- ei = kmem_cache_zalloc(oz_elt_info_cache, GFP_ATOMIC);
- if (ei) {
- INIT_LIST_HEAD(&ei->link);
- INIT_LIST_HEAD(&ei->link_order);
- }
- return ei;
-}
-
-/*
- * Precondition: oz_elt_buf.lock must be held.
- * Context: softirq or process
- */
-void oz_elt_info_free(struct oz_elt_buf *buf, struct oz_elt_info *ei)
-{
- if (ei)
- kmem_cache_free(oz_elt_info_cache, ei);
-}
-
-/*------------------------------------------------------------------------------
- * Context: softirq
- */
-void oz_elt_info_free_chain(struct oz_elt_buf *buf, struct list_head *list)
-{
- struct oz_elt_info *ei, *n;
-
- spin_lock_bh(&buf->lock);
- list_for_each_entry_safe(ei, n, list->next, link)
- oz_elt_info_free(buf, ei);
- spin_unlock_bh(&buf->lock);
-}
-
-int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count)
-{
- struct oz_elt_stream *st;
-
- oz_dbg(ON, "%s: (0x%x)\n", __func__, id);
-
- st = kzalloc(sizeof(struct oz_elt_stream), GFP_ATOMIC);
- if (st == NULL)
- return -ENOMEM;
- atomic_set(&st->ref_count, 1);
- st->id = id;
- st->max_buf_count = max_buf_count;
- INIT_LIST_HEAD(&st->elt_list);
- spin_lock_bh(&buf->lock);
- list_add_tail(&st->link, &buf->stream_list);
- spin_unlock_bh(&buf->lock);
- return 0;
-}
-
-int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id)
-{
- struct list_head *e, *n;
- struct oz_elt_stream *st = NULL;
-
- oz_dbg(ON, "%s: (0x%x)\n", __func__, id);
- spin_lock_bh(&buf->lock);
- list_for_each(e, &buf->stream_list) {
- st = list_entry(e, struct oz_elt_stream, link);
- if (st->id == id) {
- list_del(e);
- break;
- }
- st = NULL;
- }
- if (!st) {
- spin_unlock_bh(&buf->lock);
- return -1;
- }
- list_for_each_safe(e, n, &st->elt_list) {
- struct oz_elt_info *ei =
- list_entry(e, struct oz_elt_info, link);
- list_del_init(&ei->link);
- list_del_init(&ei->link_order);
- st->buf_count -= ei->length;
- oz_dbg(STREAM, "Stream down: %d %d %d\n",
- st->buf_count, ei->length, atomic_read(&st->ref_count));
- oz_elt_stream_put(st);
- oz_elt_info_free(buf, ei);
- }
- spin_unlock_bh(&buf->lock);
- oz_elt_stream_put(st);
- return 0;
-}
-
-void oz_elt_stream_get(struct oz_elt_stream *st)
-{
- atomic_inc(&st->ref_count);
-}
-
-void oz_elt_stream_put(struct oz_elt_stream *st)
-{
- if (atomic_dec_and_test(&st->ref_count)) {
- oz_dbg(ON, "Stream destroyed\n");
- kfree(st);
- }
-}
-
-/*
- * Precondition: Element buffer lock must be held.
- * If this function fails the caller is responsible for deallocating the elt
- * info structure.
- */
-int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id,
- struct oz_elt_info *ei)
-{
- struct oz_elt_stream *st = NULL;
- struct list_head *e;
-
- if (id) {
- list_for_each(e, &buf->stream_list) {
- st = list_entry(e, struct oz_elt_stream, link);
- if (st->id == id)
- break;
- }
- if (e == &buf->stream_list) {
- /* Stream specified but stream not known so fail.
- * Caller deallocates element info. */
- return -1;
- }
- }
- if (st) {
- /* If this is an ISOC fixed element that needs a frame number
- * then insert that now. Earlier we stored the unit count in
- * this field.
- */
- struct oz_isoc_fixed *body = (struct oz_isoc_fixed *)
- &ei->data[sizeof(struct oz_elt)];
- if ((body->app_id == OZ_APPID_USB) && (body->type
- == OZ_USB_ENDPOINT_DATA) &&
- (body->format == OZ_DATA_F_ISOC_FIXED)) {
- u8 unit_count = body->frame_number;
-
- body->frame_number = st->frame_number;
- st->frame_number += unit_count;
- }
- /* Claim stream and update accounts */
- oz_elt_stream_get(st);
- ei->stream = st;
- st->buf_count += ei->length;
- /* Add to list in stream. */
- list_add_tail(&ei->link, &st->elt_list);
- oz_dbg(STREAM, "Stream up: %d %d\n", st->buf_count, ei->length);
- /* Check if we have too much buffered for this stream. If so
- * start dropping elements until we are back in bounds.
- */
- while ((st->buf_count > st->max_buf_count) &&
- !list_empty(&st->elt_list)) {
- struct oz_elt_info *ei2 =
- list_first_entry(&st->elt_list,
- struct oz_elt_info, link);
- list_del_init(&ei2->link);
- list_del_init(&ei2->link_order);
- st->buf_count -= ei2->length;
- oz_elt_info_free(buf, ei2);
- oz_elt_stream_put(st);
- }
- }
- list_add_tail(&ei->link_order, isoc ?
- &buf->isoc_list : &buf->order_list);
- return 0;
-}
-
-int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
- unsigned max_len, struct list_head *list)
-{
- int count = 0;
- struct list_head *el;
- struct oz_elt_info *ei, *n;
-
- spin_lock_bh(&buf->lock);
- if (isoc)
- el = &buf->isoc_list;
- else
- el = &buf->order_list;
-
- list_for_each_entry_safe(ei, n, el, link_order) {
- if ((*len + ei->length) <= max_len) {
- struct oz_app_hdr *app_hdr = (struct oz_app_hdr *)
- &ei->data[sizeof(struct oz_elt)];
- app_hdr->elt_seq_num = buf->tx_seq_num[ei->app_id]++;
- if (buf->tx_seq_num[ei->app_id] == 0)
- buf->tx_seq_num[ei->app_id] = 1;
- *len += ei->length;
- list_del(&ei->link);
- list_del(&ei->link_order);
- if (ei->stream) {
- ei->stream->buf_count -= ei->length;
- oz_dbg(STREAM, "Stream down: %d %d\n",
- ei->stream->buf_count, ei->length);
- oz_elt_stream_put(ei->stream);
- ei->stream = NULL;
- }
- INIT_LIST_HEAD(&ei->link_order);
- list_add_tail(&ei->link, list);
- count++;
- } else {
- break;
- }
- }
- spin_unlock_bh(&buf->lock);
- return count;
-}
-
-int oz_are_elts_available(struct oz_elt_buf *buf)
-{
- return !list_empty(&buf->order_list);
-}
diff --git a/drivers/staging/ozwpan/ozeltbuf.h b/drivers/staging/ozwpan/ozeltbuf.h
deleted file mode 100644
index f09f5fe3ffbe..000000000000
--- a/drivers/staging/ozwpan/ozeltbuf.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZELTBUF_H
-#define _OZELTBUF_H
-
-#include "ozprotocol.h"
-
-/*-----------------------------------------------------------------------------
- */
-struct oz_pd;
-typedef void (*oz_elt_callback_t)(struct oz_pd *pd, long context);
-
-struct oz_elt_stream {
- struct list_head link;
- struct list_head elt_list;
- atomic_t ref_count;
- unsigned buf_count;
- unsigned max_buf_count;
- u8 frame_number;
- u8 id;
-};
-
-#define OZ_MAX_ELT_PAYLOAD 255
-struct oz_elt_info {
- struct list_head link;
- struct list_head link_order;
- u8 flags;
- u8 app_id;
- oz_elt_callback_t callback;
- long context;
- struct oz_elt_stream *stream;
- u8 data[sizeof(struct oz_elt) + OZ_MAX_ELT_PAYLOAD];
- int length;
-};
-/* Flags values */
-#define OZ_EI_F_MARKED 0x1
-
-struct oz_elt_buf {
- spinlock_t lock;
- struct list_head stream_list;
- struct list_head order_list;
- struct list_head isoc_list;
- u8 tx_seq_num[OZ_NB_APPS];
-};
-
-void oz_elt_buf_init(struct oz_elt_buf *buf);
-void oz_elt_buf_term(struct oz_elt_buf *buf);
-struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf);
-void oz_elt_info_free(struct oz_elt_buf *buf, struct oz_elt_info *ei);
-void oz_elt_info_free_chain(struct oz_elt_buf *buf, struct list_head *list);
-int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count);
-int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id);
-void oz_elt_stream_get(struct oz_elt_stream *st);
-void oz_elt_stream_put(struct oz_elt_stream *st);
-int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id,
- struct oz_elt_info *ei);
-int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
- unsigned max_len, struct list_head *list);
-int oz_are_elts_available(struct oz_elt_buf *buf);
-
-#endif /* _OZELTBUF_H */
-
diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
deleted file mode 100644
index 784b5ecfa849..000000000000
--- a/drivers/staging/ozwpan/ozhcd.c
+++ /dev/null
@@ -1,2301 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- *
- * This file provides the implementation of a USB host controller device that
- * does not have any associated hardware. Instead the virtual device is
- * connected to the WiFi network and emulates the operation of a USB hcd by
- * receiving and sending network frames.
- * Note:
- * We take great pains to reduce the amount of code where interrupts need to be
- * disabled and in this respect we are different from standard HCD's. In
- * particular we don't want in_irq() code bleeding over to the protocol side of
- * the driver.
- * The troublesome functions are the urb enqueue and dequeue functions both of
- * which can be called in_irq(). So for these functions we put the urbs into a
- * queue and request a tasklet to process them. This means that a spinlock with
- * interrupts disabled must be held for insertion and removal but most code is
- * is in tasklet or soft irq context. The lock that protects this list is called
- * the tasklet lock and serves the purpose of the 'HCD lock' which must be held
- * when calling the following functions.
- * usb_hcd_link_urb_to_ep()
- * usb_hcd_unlink_urb_from_ep()
- * usb_hcd_flush_endpoint()
- * usb_hcd_check_unlink_urb()
- * -----------------------------------------------------------------------------
- */
-#include <linux/platform_device.h>
-#include <linux/usb.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include "linux/usb/hcd.h"
-#include <asm/unaligned.h>
-#include "ozdbg.h"
-#include "ozusbif.h"
-#include "ozurbparanoia.h"
-#include "ozhcd.h"
-
-/*
- * Number of units of buffering to capture for an isochronous IN endpoint before
- * allowing data to be indicated up.
- */
-#define OZ_IN_BUFFERING_UNITS 100
-
-/* Name of our platform device.
- */
-#define OZ_PLAT_DEV_NAME "ozwpan"
-
-/*EP0 timeout before ep0 request is again added to TX queue. (13*8 = 98mSec)
- */
-#define EP0_TIMEOUT_COUNTER 13
-
-/* Debounce time HCD driver should wait before unregistering.
- */
-#define OZ_HUB_DEBOUNCE_TIMEOUT 1500
-
-/*
- * Used to link urbs together and also store some status information for each
- * urb.
- * A cache of these are kept in a pool to reduce number of calls to kmalloc.
- */
-struct oz_urb_link {
- struct list_head link;
- struct urb *urb;
- struct oz_port *port;
- u8 req_id;
- u8 ep_num;
- unsigned submit_counter;
-};
-
-static struct kmem_cache *oz_urb_link_cache;
-
-/* Holds state information about a USB endpoint.
- */
-#define OZ_EP_BUFFER_SIZE_ISOC (1024 * 24)
-#define OZ_EP_BUFFER_SIZE_INT 512
-struct oz_endpoint {
- struct list_head urb_list; /* List of oz_urb_link items. */
- struct list_head link; /* For isoc ep, links in to isoc
- lists of oz_port. */
- struct timespec timestamp;
- int credit;
- int credit_ceiling;
- u8 ep_num;
- u8 attrib;
- u8 *buffer;
- int buffer_size;
- int in_ix;
- int out_ix;
- int buffered_units;
- unsigned flags;
- int start_frame;
-};
-
-/* Bits in the flags field. */
-#define OZ_F_EP_BUFFERING 0x1
-#define OZ_F_EP_HAVE_STREAM 0x2
-
-/* Holds state information about a USB interface.
- */
-struct oz_interface {
- unsigned ep_mask;
- u8 alt;
-};
-
-/* Holds state information about an hcd port.
- */
-#define OZ_NB_ENDPOINTS 16
-struct oz_port {
- unsigned flags;
- unsigned status;
- void *hpd;
- struct oz_hcd *ozhcd;
- spinlock_t port_lock;
- u8 bus_addr;
- u8 next_req_id;
- u8 config_num;
- int num_iface;
- struct oz_interface *iface;
- struct oz_endpoint *out_ep[OZ_NB_ENDPOINTS];
- struct oz_endpoint *in_ep[OZ_NB_ENDPOINTS];
- struct list_head isoc_out_ep;
- struct list_head isoc_in_ep;
-};
-
-#define OZ_PORT_F_PRESENT 0x1
-#define OZ_PORT_F_CHANGED 0x2
-#define OZ_PORT_F_DYING 0x4
-
-/* Data structure in the private context area of struct usb_hcd.
- */
-#define OZ_NB_PORTS 8
-struct oz_hcd {
- spinlock_t hcd_lock;
- struct list_head urb_pending_list;
- struct list_head urb_cancel_list;
- struct list_head orphanage;
- int conn_port; /* Port that is currently connecting, -1 if none.*/
- struct oz_port ports[OZ_NB_PORTS];
- uint flags;
- struct usb_hcd *hcd;
-};
-
-/* Bits in flags field.
- */
-#define OZ_HDC_F_SUSPENDED 0x1
-
-/*
- * Static function prototypes.
- */
-static int oz_hcd_start(struct usb_hcd *hcd);
-static void oz_hcd_stop(struct usb_hcd *hcd);
-static void oz_hcd_shutdown(struct usb_hcd *hcd);
-static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
- gfp_t mem_flags);
-static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
-static void oz_hcd_endpoint_disable(struct usb_hcd *hcd,
- struct usb_host_endpoint *ep);
-static void oz_hcd_endpoint_reset(struct usb_hcd *hcd,
- struct usb_host_endpoint *ep);
-static int oz_hcd_get_frame_number(struct usb_hcd *hcd);
-static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf);
-static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
- u16 windex, char *buf, u16 wlength);
-static int oz_hcd_bus_suspend(struct usb_hcd *hcd);
-static int oz_hcd_bus_resume(struct usb_hcd *hcd);
-static int oz_plat_probe(struct platform_device *dev);
-static int oz_plat_remove(struct platform_device *dev);
-static void oz_plat_shutdown(struct platform_device *dev);
-static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg);
-static int oz_plat_resume(struct platform_device *dev);
-static void oz_urb_process_tasklet(unsigned long unused);
-static int oz_build_endpoints_for_config(struct usb_hcd *hcd,
- struct oz_port *port, struct usb_host_config *config,
- gfp_t mem_flags);
-static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
- struct oz_port *port);
-static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
- struct oz_port *port,
- struct usb_host_interface *intf, gfp_t mem_flags);
-static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
- struct oz_port *port, int if_ix);
-static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
- gfp_t mem_flags);
-static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
- struct urb *urb);
-static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status);
-
-/*
- * Static external variables.
- */
-static struct platform_device *g_plat_dev;
-static struct oz_hcd *g_ozhcd;
-static DEFINE_SPINLOCK(g_hcdlock); /* Guards g_ozhcd. */
-static const char g_hcd_name[] = "Ozmo WPAN";
-static DEFINE_SPINLOCK(g_tasklet_lock);
-static struct tasklet_struct g_urb_process_tasklet;
-static struct tasklet_struct g_urb_cancel_tasklet;
-static atomic_t g_pending_urbs = ATOMIC_INIT(0);
-static atomic_t g_usb_frame_number = ATOMIC_INIT(0);
-static const struct hc_driver g_oz_hc_drv = {
- .description = g_hcd_name,
- .product_desc = "Ozmo Devices WPAN",
- .hcd_priv_size = sizeof(struct oz_hcd),
- .flags = HCD_USB11,
- .start = oz_hcd_start,
- .stop = oz_hcd_stop,
- .shutdown = oz_hcd_shutdown,
- .urb_enqueue = oz_hcd_urb_enqueue,
- .urb_dequeue = oz_hcd_urb_dequeue,
- .endpoint_disable = oz_hcd_endpoint_disable,
- .endpoint_reset = oz_hcd_endpoint_reset,
- .get_frame_number = oz_hcd_get_frame_number,
- .hub_status_data = oz_hcd_hub_status_data,
- .hub_control = oz_hcd_hub_control,
- .bus_suspend = oz_hcd_bus_suspend,
- .bus_resume = oz_hcd_bus_resume,
-};
-
-static struct platform_driver g_oz_plat_drv = {
- .probe = oz_plat_probe,
- .remove = oz_plat_remove,
- .shutdown = oz_plat_shutdown,
- .suspend = oz_plat_suspend,
- .resume = oz_plat_resume,
- .driver = {
- .name = OZ_PLAT_DEV_NAME,
- },
-};
-
-/*
- * Gets our private context area (which is of type struct oz_hcd) from the
- * usb_hcd structure.
- * Context: any
- */
-static inline struct oz_hcd *oz_hcd_private(struct usb_hcd *hcd)
-{
- return (struct oz_hcd *)hcd->hcd_priv;
-}
-
-/*
- * Searches list of ports to find the index of the one with a specified USB
- * bus address. If none of the ports has the bus address then the connection
- * port is returned, if there is one or -1 otherwise.
- * Context: any
- */
-static int oz_get_port_from_addr(struct oz_hcd *ozhcd, u8 bus_addr)
-{
- int i;
-
- for (i = 0; i < OZ_NB_PORTS; i++) {
- if (ozhcd->ports[i].bus_addr == bus_addr)
- return i;
- }
- return ozhcd->conn_port;
-}
-
-/*
- * Context: any
- */
-static struct oz_urb_link *oz_alloc_urb_link(void)
-{
- return kmem_cache_alloc(oz_urb_link_cache, GFP_ATOMIC);
-}
-
-/*
- * Context: any
- */
-static void oz_free_urb_link(struct oz_urb_link *urbl)
-{
- if (!urbl)
- return;
-
- kmem_cache_free(oz_urb_link_cache, urbl);
-}
-
-/*
- * Allocates endpoint structure and optionally a buffer. If a buffer is
- * allocated it immediately follows the endpoint structure.
- * Context: softirq
- */
-static struct oz_endpoint *oz_ep_alloc(int buffer_size, gfp_t mem_flags)
-{
- struct oz_endpoint *ep;
-
- ep = kzalloc(sizeof(struct oz_endpoint)+buffer_size, mem_flags);
- if (!ep)
- return NULL;
-
- INIT_LIST_HEAD(&ep->urb_list);
- INIT_LIST_HEAD(&ep->link);
- ep->credit = -1;
- if (buffer_size) {
- ep->buffer_size = buffer_size;
- ep->buffer = (u8 *)(ep+1);
- }
-
- return ep;
-}
-
-/*
- * Pre-condition: Must be called with g_tasklet_lock held and interrupts
- * disabled.
- * Context: softirq or process
- */
-static struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd,
- struct urb *urb)
-{
- struct oz_urb_link *urbl;
-
- list_for_each_entry(urbl, &ozhcd->urb_cancel_list, link) {
- if (urb == urbl->urb) {
- list_del_init(&urbl->link);
- return urbl;
- }
- }
- return NULL;
-}
-
-/*
- * This is called when we have finished processing an urb. It unlinks it from
- * the ep and returns it to the core.
- * Context: softirq or process
- */
-static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb,
- int status)
-{
- struct oz_hcd *ozhcd = oz_hcd_private(hcd);
- unsigned long irq_state;
- struct oz_urb_link *cancel_urbl;
-
- spin_lock_irqsave(&g_tasklet_lock, irq_state);
- usb_hcd_unlink_urb_from_ep(hcd, urb);
- /* Clear hcpriv which will prevent it being put in the cancel list
- * in the event that an attempt is made to cancel it.
- */
- urb->hcpriv = NULL;
- /* Walk the cancel list in case the urb is already sitting there.
- * Since we process the cancel list in a tasklet rather than in
- * the dequeue function this could happen.
- */
- cancel_urbl = oz_uncancel_urb(ozhcd, urb);
- /* Note: we release lock but do not enable local irqs.
- * It appears that usb_hcd_giveback_urb() expects irqs to be disabled,
- * or at least other host controllers disable interrupts at this point
- * so we do the same. We must, however, release the lock otherwise a
- * deadlock will occur if an urb is submitted to our driver in the urb
- * completion function. Because we disable interrupts it is possible
- * that the urb_enqueue function can be called with them disabled.
- */
- spin_unlock(&g_tasklet_lock);
- if (oz_forget_urb(urb)) {
- oz_dbg(ON, "ERROR Unknown URB %p\n", urb);
- } else {
- atomic_dec(&g_pending_urbs);
- usb_hcd_giveback_urb(hcd, urb, status);
- }
- spin_lock(&g_tasklet_lock);
- spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
- oz_free_urb_link(cancel_urbl);
-}
-
-/*
- * Deallocates an endpoint including deallocating any associated stream and
- * returning any queued urbs to the core.
- * Context: softirq
- */
-static void oz_ep_free(struct oz_port *port, struct oz_endpoint *ep)
-{
- if (port) {
- LIST_HEAD(list);
- struct oz_hcd *ozhcd = port->ozhcd;
-
- if (ep->flags & OZ_F_EP_HAVE_STREAM)
- oz_usb_stream_delete(port->hpd, ep->ep_num);
- /* Transfer URBs to the orphanage while we hold the lock. */
- spin_lock_bh(&ozhcd->hcd_lock);
- /* Note: this works even if ep->urb_list is empty.*/
- list_replace_init(&ep->urb_list, &list);
- /* Put the URBs in the orphanage. */
- list_splice_tail(&list, &ozhcd->orphanage);
- spin_unlock_bh(&ozhcd->hcd_lock);
- }
- oz_dbg(ON, "Freeing endpoint memory\n");
- kfree(ep);
-}
-
-/*
- * Context: softirq
- */
-static void oz_complete_buffered_urb(struct oz_port *port,
- struct oz_endpoint *ep,
- struct urb *urb)
-{
- int data_len, available_space, copy_len;
-
- data_len = ep->buffer[ep->out_ix];
- if (data_len <= urb->transfer_buffer_length)
- available_space = data_len;
- else
- available_space = urb->transfer_buffer_length;
-
- if (++ep->out_ix == ep->buffer_size)
- ep->out_ix = 0;
- copy_len = ep->buffer_size - ep->out_ix;
- if (copy_len >= available_space)
- copy_len = available_space;
- memcpy(urb->transfer_buffer, &ep->buffer[ep->out_ix], copy_len);
-
- if (copy_len < available_space) {
- memcpy((urb->transfer_buffer + copy_len), ep->buffer,
- (available_space - copy_len));
- ep->out_ix = available_space - copy_len;
- } else {
- ep->out_ix += copy_len;
- }
- urb->actual_length = available_space;
- if (ep->out_ix == ep->buffer_size)
- ep->out_ix = 0;
-
- ep->buffered_units--;
- oz_dbg(ON, "Trying to give back buffered frame of size=%d\n",
- available_space);
- oz_complete_urb(port->ozhcd->hcd, urb, 0);
-}
-
-/*
- * Context: softirq
- */
-static int oz_enqueue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
- struct urb *urb, u8 req_id)
-{
- struct oz_urb_link *urbl;
- struct oz_endpoint *ep = NULL;
- int err = 0;
-
- if (ep_addr >= OZ_NB_ENDPOINTS) {
- oz_dbg(ON, "%s: Invalid endpoint number\n", __func__);
- return -EINVAL;
- }
- urbl = oz_alloc_urb_link();
- if (!urbl)
- return -ENOMEM;
- urbl->submit_counter = 0;
- urbl->urb = urb;
- urbl->req_id = req_id;
- urbl->ep_num = ep_addr;
- /* Hold lock while we insert the URB into the list within the
- * endpoint structure.
- */
- spin_lock_bh(&port->ozhcd->hcd_lock);
- /* If the urb has been unlinked while out of any list then
- * complete it now.
- */
- if (urb->unlinked) {
- spin_unlock_bh(&port->ozhcd->hcd_lock);
- oz_dbg(ON, "urb %p unlinked so complete immediately\n", urb);
- oz_complete_urb(port->ozhcd->hcd, urb, 0);
- oz_free_urb_link(urbl);
- return 0;
- }
-
- if (in_dir)
- ep = port->in_ep[ep_addr];
- else
- ep = port->out_ep[ep_addr];
- if (!ep) {
- err = -ENOMEM;
- goto out;
- }
-
- /*For interrupt endpoint check for buffered data
- * & complete urb
- */
- if (((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT)
- && ep->buffered_units > 0) {
- oz_free_urb_link(urbl);
- spin_unlock_bh(&port->ozhcd->hcd_lock);
- oz_complete_buffered_urb(port, ep, urb);
- return 0;
- }
-
- if (port->hpd) {
- list_add_tail(&urbl->link, &ep->urb_list);
- if (!in_dir && ep_addr && (ep->credit < 0)) {
- getrawmonotonic(&ep->timestamp);
- ep->credit = 0;
- }
- } else {
- err = -EPIPE;
- }
-out:
- spin_unlock_bh(&port->ozhcd->hcd_lock);
- if (err)
- oz_free_urb_link(urbl);
- return err;
-}
-
-/*
- * Removes an urb from the queue in the endpoint.
- * Returns 0 if it is found and -EIDRM otherwise.
- * Context: softirq
- */
-static int oz_dequeue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
- struct urb *urb)
-{
- struct oz_urb_link *urbl = NULL;
- struct oz_endpoint *ep;
-
- spin_lock_bh(&port->ozhcd->hcd_lock);
- if (in_dir)
- ep = port->in_ep[ep_addr];
- else
- ep = port->out_ep[ep_addr];
- if (ep) {
- struct list_head *e;
-
- list_for_each(e, &ep->urb_list) {
- urbl = list_entry(e, struct oz_urb_link, link);
- if (urbl->urb == urb) {
- list_del_init(e);
- break;
- }
- urbl = NULL;
- }
- }
- spin_unlock_bh(&port->ozhcd->hcd_lock);
- oz_free_urb_link(urbl);
- return urbl ? 0 : -EIDRM;
-}
-
-/*
- * Finds an urb given its request id.
- * Context: softirq
- */
-static struct urb *oz_find_urb_by_id(struct oz_port *port, int ep_ix,
- u8 req_id)
-{
- struct oz_hcd *ozhcd = port->ozhcd;
- struct urb *urb = NULL;
- struct oz_urb_link *urbl;
- struct oz_endpoint *ep;
-
- spin_lock_bh(&ozhcd->hcd_lock);
- ep = port->out_ep[ep_ix];
- if (ep) {
- struct list_head *e;
-
- list_for_each(e, &ep->urb_list) {
- urbl = list_entry(e, struct oz_urb_link, link);
- if (urbl->req_id == req_id) {
- urb = urbl->urb;
- list_del_init(e);
- break;
- }
- }
- }
- spin_unlock_bh(&ozhcd->hcd_lock);
- /* If urb is non-zero then we we must have an urb link to delete.
- */
- if (urb)
- oz_free_urb_link(urbl);
- return urb;
-}
-
-/*
- * Pre-condition: Port lock must be held.
- * Context: softirq
- */
-static void oz_acquire_port(struct oz_port *port, void *hpd)
-{
- INIT_LIST_HEAD(&port->isoc_out_ep);
- INIT_LIST_HEAD(&port->isoc_in_ep);
- port->flags |= OZ_PORT_F_PRESENT | OZ_PORT_F_CHANGED;
- port->status |= USB_PORT_STAT_CONNECTION |
- (USB_PORT_STAT_C_CONNECTION << 16);
- oz_usb_get(hpd);
- port->hpd = hpd;
-}
-
-/*
- * Context: softirq
- */
-static struct oz_hcd *oz_hcd_claim(void)
-{
- struct oz_hcd *ozhcd;
-
- spin_lock_bh(&g_hcdlock);
- ozhcd = g_ozhcd;
- if (ozhcd)
- usb_get_hcd(ozhcd->hcd);
- spin_unlock_bh(&g_hcdlock);
- return ozhcd;
-}
-
-/*
- * Context: softirq
- */
-static inline void oz_hcd_put(struct oz_hcd *ozhcd)
-{
- if (ozhcd)
- usb_put_hcd(ozhcd->hcd);
-}
-
-/*
- * This is called by the protocol handler to notify that a PD has arrived.
- * We allocate a port to associate with the PD and create a structure for
- * endpoint 0. This port is made the connection port.
- * In the event that one of the other port is already a connection port then
- * we fail.
- * TODO We should be able to do better than fail and should be able remember
- * that this port needs configuring and make it the connection port once the
- * current connection port has been assigned an address. Collisions here are
- * probably very rare indeed.
- * Context: softirq
- */
-struct oz_port *oz_hcd_pd_arrived(void *hpd)
-{
- int i;
- struct oz_port *hport;
- struct oz_hcd *ozhcd;
- struct oz_endpoint *ep;
-
- ozhcd = oz_hcd_claim();
- if (!ozhcd)
- return NULL;
- /* Allocate an endpoint object in advance (before holding hcd lock) to
- * use for out endpoint 0.
- */
- ep = oz_ep_alloc(0, GFP_ATOMIC);
- if (!ep)
- goto err_put;
-
- spin_lock_bh(&ozhcd->hcd_lock);
- if (ozhcd->conn_port >= 0)
- goto err_unlock;
-
- for (i = 0; i < OZ_NB_PORTS; i++) {
- struct oz_port *port = &ozhcd->ports[i];
-
- spin_lock(&port->port_lock);
- if (!(port->flags & (OZ_PORT_F_PRESENT | OZ_PORT_F_CHANGED))) {
- oz_acquire_port(port, hpd);
- spin_unlock(&port->port_lock);
- break;
- }
- spin_unlock(&port->port_lock);
- }
- if (i == OZ_NB_PORTS)
- goto err_unlock;
-
- ozhcd->conn_port = i;
- hport = &ozhcd->ports[i];
- hport->out_ep[0] = ep;
- spin_unlock_bh(&ozhcd->hcd_lock);
- if (ozhcd->flags & OZ_HDC_F_SUSPENDED)
- usb_hcd_resume_root_hub(ozhcd->hcd);
- usb_hcd_poll_rh_status(ozhcd->hcd);
- oz_hcd_put(ozhcd);
-
- return hport;
-
-err_unlock:
- spin_unlock_bh(&ozhcd->hcd_lock);
- oz_ep_free(NULL, ep);
-err_put:
- oz_hcd_put(ozhcd);
- return NULL;
-}
-
-/*
- * This is called by the protocol handler to notify that the PD has gone away.
- * We need to deallocate all resources and then request that the root hub is
- * polled. We release the reference we hold on the PD.
- * Context: softirq
- */
-void oz_hcd_pd_departed(struct oz_port *port)
-{
- struct oz_hcd *ozhcd;
- void *hpd;
- struct oz_endpoint *ep = NULL;
-
- if (port == NULL) {
- oz_dbg(ON, "%s: port = 0\n", __func__);
- return;
- }
- ozhcd = port->ozhcd;
- if (ozhcd == NULL)
- return;
- /* Check if this is the connection port - if so clear it.
- */
- spin_lock_bh(&ozhcd->hcd_lock);
- if ((ozhcd->conn_port >= 0) &&
- (port == &ozhcd->ports[ozhcd->conn_port])) {
- oz_dbg(ON, "Clearing conn_port\n");
- ozhcd->conn_port = -1;
- }
- spin_lock(&port->port_lock);
- port->flags |= OZ_PORT_F_DYING;
- spin_unlock(&port->port_lock);
- spin_unlock_bh(&ozhcd->hcd_lock);
-
- oz_clean_endpoints_for_config(ozhcd->hcd, port);
- spin_lock_bh(&port->port_lock);
- hpd = port->hpd;
- port->hpd = NULL;
- port->bus_addr = 0xff;
- port->config_num = 0;
- port->flags &= ~(OZ_PORT_F_PRESENT | OZ_PORT_F_DYING);
- port->flags |= OZ_PORT_F_CHANGED;
- port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE);
- port->status |= (USB_PORT_STAT_C_CONNECTION << 16);
- /* If there is an endpont 0 then clear the pointer while we hold
- * the spinlock be we deallocate it after releasing the lock.
- */
- if (port->out_ep[0]) {
- ep = port->out_ep[0];
- port->out_ep[0] = NULL;
- }
- spin_unlock_bh(&port->port_lock);
- if (ep)
- oz_ep_free(port, ep);
- usb_hcd_poll_rh_status(ozhcd->hcd);
- oz_usb_put(hpd);
-}
-
-/*
- * Context: softirq
- */
-void oz_hcd_pd_reset(void *hpd, void *hport)
-{
- /* Cleanup the current configuration and report reset to the core.
- */
- struct oz_port *port = hport;
- struct oz_hcd *ozhcd = port->ozhcd;
-
- oz_dbg(ON, "PD Reset\n");
- spin_lock_bh(&port->port_lock);
- port->flags |= OZ_PORT_F_CHANGED;
- port->status |= USB_PORT_STAT_RESET;
- port->status |= (USB_PORT_STAT_C_RESET << 16);
- spin_unlock_bh(&port->port_lock);
- oz_clean_endpoints_for_config(ozhcd->hcd, port);
- usb_hcd_poll_rh_status(ozhcd->hcd);
-}
-
-/*
- * Context: softirq
- */
-void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, const u8 *desc,
- u8 length, u16 offset, u16 total_size)
-{
- struct oz_port *port = hport;
- struct urb *urb;
- int err = 0;
-
- oz_dbg(ON, "oz_hcd_get_desc_cnf length = %d offs = %d tot_size = %d\n",
- length, offset, total_size);
- urb = oz_find_urb_by_id(port, 0, req_id);
- if (!urb)
- return;
- if (status == 0) {
- unsigned int copy_len;
- unsigned int required_size = urb->transfer_buffer_length;
-
- if (required_size > total_size)
- required_size = total_size;
- copy_len = required_size-offset;
- if (length <= copy_len)
- copy_len = length;
- memcpy(urb->transfer_buffer+offset, desc, copy_len);
- offset += copy_len;
- if (offset < required_size) {
- struct usb_ctrlrequest *setup =
- (struct usb_ctrlrequest *)urb->setup_packet;
- unsigned wvalue = le16_to_cpu(setup->wValue);
-
- if (oz_enqueue_ep_urb(port, 0, 0, urb, req_id))
- err = -ENOMEM;
- else if (oz_usb_get_desc_req(port->hpd, req_id,
- setup->bRequestType, (u8)(wvalue>>8),
- (u8)wvalue, setup->wIndex, offset,
- required_size-offset)) {
- oz_dequeue_ep_urb(port, 0, 0, urb);
- err = -ENOMEM;
- }
- if (err == 0)
- return;
- }
- }
- urb->actual_length = total_size;
- oz_complete_urb(port->ozhcd->hcd, urb, 0);
-}
-
-/*
- * Context: softirq
- */
-static void oz_display_conf_type(u8 t)
-{
- switch (t) {
- case USB_REQ_GET_STATUS:
- oz_dbg(ON, "USB_REQ_GET_STATUS - cnf\n");
- break;
- case USB_REQ_CLEAR_FEATURE:
- oz_dbg(ON, "USB_REQ_CLEAR_FEATURE - cnf\n");
- break;
- case USB_REQ_SET_FEATURE:
- oz_dbg(ON, "USB_REQ_SET_FEATURE - cnf\n");
- break;
- case USB_REQ_SET_ADDRESS:
- oz_dbg(ON, "USB_REQ_SET_ADDRESS - cnf\n");
- break;
- case USB_REQ_GET_DESCRIPTOR:
- oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
- break;
- case USB_REQ_SET_DESCRIPTOR:
- oz_dbg(ON, "USB_REQ_SET_DESCRIPTOR - cnf\n");
- break;
- case USB_REQ_GET_CONFIGURATION:
- oz_dbg(ON, "USB_REQ_GET_CONFIGURATION - cnf\n");
- break;
- case USB_REQ_SET_CONFIGURATION:
- oz_dbg(ON, "USB_REQ_SET_CONFIGURATION - cnf\n");
- break;
- case USB_REQ_GET_INTERFACE:
- oz_dbg(ON, "USB_REQ_GET_INTERFACE - cnf\n");
- break;
- case USB_REQ_SET_INTERFACE:
- oz_dbg(ON, "USB_REQ_SET_INTERFACE - cnf\n");
- break;
- case USB_REQ_SYNCH_FRAME:
- oz_dbg(ON, "USB_REQ_SYNCH_FRAME - cnf\n");
- break;
- }
-}
-
-/*
- * Context: softirq
- */
-static void oz_hcd_complete_set_config(struct oz_port *port, struct urb *urb,
- u8 rcode, u8 config_num)
-{
- int rc = 0;
- struct usb_hcd *hcd = port->ozhcd->hcd;
-
- if (rcode == 0) {
- port->config_num = config_num;
- oz_clean_endpoints_for_config(hcd, port);
- if (oz_build_endpoints_for_config(hcd, port,
- &urb->dev->config[port->config_num-1], GFP_ATOMIC)) {
- rc = -ENOMEM;
- }
- } else {
- rc = -ENOMEM;
- }
- oz_complete_urb(hcd, urb, rc);
-}
-
-/*
- * Context: softirq
- */
-static void oz_hcd_complete_set_interface(struct oz_port *port, struct urb *urb,
- u8 rcode, u8 if_num, u8 alt)
-{
- struct usb_hcd *hcd = port->ozhcd->hcd;
- int rc = 0;
-
- if ((rcode == 0) && (port->config_num > 0)) {
- struct usb_host_config *config;
- struct usb_host_interface *intf;
-
- oz_dbg(ON, "Set interface %d alt %d\n", if_num, alt);
- oz_clean_endpoints_for_interface(hcd, port, if_num);
- config = &urb->dev->config[port->config_num-1];
- intf = &config->intf_cache[if_num]->altsetting[alt];
- if (oz_build_endpoints_for_interface(hcd, port, intf,
- GFP_ATOMIC))
- rc = -ENOMEM;
- else
- port->iface[if_num].alt = alt;
- } else {
- rc = -ENOMEM;
- }
- oz_complete_urb(hcd, urb, rc);
-}
-
-/*
- * Context: softirq
- */
-void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data,
- int data_len)
-{
- struct oz_port *port = hport;
- struct urb *urb;
- struct usb_ctrlrequest *setup;
- struct usb_hcd *hcd = port->ozhcd->hcd;
- unsigned windex;
- unsigned wvalue;
-
- oz_dbg(ON, "oz_hcd_control_cnf rcode=%u len=%d\n", rcode, data_len);
- urb = oz_find_urb_by_id(port, 0, req_id);
- if (!urb) {
- oz_dbg(ON, "URB not found\n");
- return;
- }
- setup = (struct usb_ctrlrequest *)urb->setup_packet;
- windex = le16_to_cpu(setup->wIndex);
- wvalue = le16_to_cpu(setup->wValue);
- if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
- /* Standard requests */
- oz_display_conf_type(setup->bRequest);
- switch (setup->bRequest) {
- case USB_REQ_SET_CONFIGURATION:
- oz_hcd_complete_set_config(port, urb, rcode,
- (u8)wvalue);
- break;
- case USB_REQ_SET_INTERFACE:
- oz_hcd_complete_set_interface(port, urb, rcode,
- (u8)windex, (u8)wvalue);
- break;
- default:
- oz_complete_urb(hcd, urb, 0);
- }
-
- } else {
- int copy_len;
-
- oz_dbg(ON, "VENDOR-CLASS - cnf\n");
- if (data_len) {
- if (data_len <= urb->transfer_buffer_length)
- copy_len = data_len;
- else
- copy_len = urb->transfer_buffer_length;
- memcpy(urb->transfer_buffer, data, copy_len);
- urb->actual_length = copy_len;
- }
- oz_complete_urb(hcd, urb, 0);
- }
-}
-
-/*
- * Context: softirq-serialized
- */
-static int oz_hcd_buffer_data(struct oz_endpoint *ep, const u8 *data,
- int data_len)
-{
- int space;
- int copy_len;
-
- if (!ep->buffer)
- return -1;
- space = ep->out_ix-ep->in_ix-1;
- if (space < 0)
- space += ep->buffer_size;
- if (space < (data_len+1)) {
- oz_dbg(ON, "Buffer full\n");
- return -1;
- }
- ep->buffer[ep->in_ix] = (u8)data_len;
- if (++ep->in_ix == ep->buffer_size)
- ep->in_ix = 0;
- copy_len = ep->buffer_size - ep->in_ix;
- if (copy_len > data_len)
- copy_len = data_len;
- memcpy(&ep->buffer[ep->in_ix], data, copy_len);
-
- if (copy_len < data_len) {
- memcpy(ep->buffer, data+copy_len, data_len-copy_len);
- ep->in_ix = data_len-copy_len;
- } else {
- ep->in_ix += copy_len;
- }
- if (ep->in_ix == ep->buffer_size)
- ep->in_ix = 0;
- ep->buffered_units++;
- return 0;
-}
-
-/*
- * Context: softirq-serialized
- */
-void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len)
-{
- struct oz_port *port = (struct oz_port *)hport;
- struct oz_endpoint *ep;
- struct oz_hcd *ozhcd = port->ozhcd;
-
- spin_lock_bh(&ozhcd->hcd_lock);
- ep = port->in_ep[endpoint & USB_ENDPOINT_NUMBER_MASK];
- if (ep == NULL)
- goto done;
- switch (ep->attrib & USB_ENDPOINT_XFERTYPE_MASK) {
- case USB_ENDPOINT_XFER_INT:
- case USB_ENDPOINT_XFER_BULK:
- if (!list_empty(&ep->urb_list)) {
- struct oz_urb_link *urbl =
- list_first_entry(&ep->urb_list,
- struct oz_urb_link, link);
- struct urb *urb;
- int copy_len;
-
- list_del_init(&urbl->link);
- spin_unlock_bh(&ozhcd->hcd_lock);
- urb = urbl->urb;
- oz_free_urb_link(urbl);
- if (data_len <= urb->transfer_buffer_length)
- copy_len = data_len;
- else
- copy_len = urb->transfer_buffer_length;
- memcpy(urb->transfer_buffer, data, copy_len);
- urb->actual_length = copy_len;
- oz_complete_urb(port->ozhcd->hcd, urb, 0);
- return;
- }
- oz_dbg(ON, "buffering frame as URB is not available\n");
- oz_hcd_buffer_data(ep, data, data_len);
- break;
- case USB_ENDPOINT_XFER_ISOC:
- oz_hcd_buffer_data(ep, data, data_len);
- break;
- }
-done:
- spin_unlock_bh(&ozhcd->hcd_lock);
-}
-
-/*
- * Context: unknown
- */
-static inline int oz_usb_get_frame_number(void)
-{
- return atomic_inc_return(&g_usb_frame_number);
-}
-
-/*
- * Context: softirq
- */
-int oz_hcd_heartbeat(void *hport)
-{
- int rc = 0;
- struct oz_port *port = hport;
- struct oz_hcd *ozhcd = port->ozhcd;
- struct oz_urb_link *urbl, *n;
- LIST_HEAD(xfr_list);
- struct urb *urb;
- struct oz_endpoint *ep;
- struct timespec ts, delta;
-
- getrawmonotonic(&ts);
- /* Check the OUT isoc endpoints to see if any URB data can be sent.
- */
- spin_lock_bh(&ozhcd->hcd_lock);
- list_for_each_entry(ep, &port->isoc_out_ep, link) {
- if (ep->credit < 0)
- continue;
- delta = timespec_sub(ts, ep->timestamp);
- ep->credit += div_u64(timespec_to_ns(&delta), NSEC_PER_MSEC);
- if (ep->credit > ep->credit_ceiling)
- ep->credit = ep->credit_ceiling;
- ep->timestamp = ts;
- while (ep->credit && !list_empty(&ep->urb_list)) {
- urbl = list_first_entry(&ep->urb_list,
- struct oz_urb_link, link);
- urb = urbl->urb;
- if ((ep->credit + 1) < urb->number_of_packets)
- break;
- ep->credit -= urb->number_of_packets;
- if (ep->credit < 0)
- ep->credit = 0;
- list_move_tail(&urbl->link, &xfr_list);
- }
- }
- spin_unlock_bh(&ozhcd->hcd_lock);
- /* Send to PD and complete URBs.
- */
- list_for_each_entry_safe(urbl, n, &xfr_list, link) {
- urb = urbl->urb;
- list_del_init(&urbl->link);
- urb->error_count = 0;
- urb->start_frame = oz_usb_get_frame_number();
- oz_usb_send_isoc(port->hpd, urbl->ep_num, urb);
- oz_free_urb_link(urbl);
- oz_complete_urb(port->ozhcd->hcd, urb, 0);
- }
- /* Check the IN isoc endpoints to see if any URBs can be completed.
- */
- spin_lock_bh(&ozhcd->hcd_lock);
- list_for_each_entry(ep, &port->isoc_in_ep, link) {
- if (ep->flags & OZ_F_EP_BUFFERING) {
- if (ep->buffered_units >= OZ_IN_BUFFERING_UNITS) {
- ep->flags &= ~OZ_F_EP_BUFFERING;
- ep->credit = 0;
- ep->timestamp = ts;
- ep->start_frame = 0;
- }
- continue;
- }
- delta = timespec_sub(ts, ep->timestamp);
- ep->credit += div_u64(timespec_to_ns(&delta), NSEC_PER_MSEC);
- ep->timestamp = ts;
- list_for_each_entry_safe(urbl, n, &ep->urb_list, link) {
- struct urb *urb = urbl->urb;
- int len = 0;
- int copy_len;
- int i;
-
- if (ep->credit < urb->number_of_packets)
- break;
- if (ep->buffered_units < urb->number_of_packets)
- break;
- urb->actual_length = 0;
- for (i = 0; i < urb->number_of_packets; i++) {
- len = ep->buffer[ep->out_ix];
- if (++ep->out_ix == ep->buffer_size)
- ep->out_ix = 0;
- copy_len = ep->buffer_size - ep->out_ix;
- if (copy_len > len)
- copy_len = len;
- memcpy(urb->transfer_buffer,
- &ep->buffer[ep->out_ix], copy_len);
- if (copy_len < len) {
- memcpy(urb->transfer_buffer+copy_len,
- ep->buffer, len-copy_len);
- ep->out_ix = len-copy_len;
- } else
- ep->out_ix += copy_len;
- if (ep->out_ix == ep->buffer_size)
- ep->out_ix = 0;
- urb->iso_frame_desc[i].offset =
- urb->actual_length;
- urb->actual_length += len;
- urb->iso_frame_desc[i].actual_length = len;
- urb->iso_frame_desc[i].status = 0;
- }
- ep->buffered_units -= urb->number_of_packets;
- urb->error_count = 0;
- urb->start_frame = ep->start_frame;
- ep->start_frame += urb->number_of_packets;
- list_move_tail(&urbl->link, &xfr_list);
- ep->credit -= urb->number_of_packets;
- }
- }
- if (!list_empty(&port->isoc_out_ep) || !list_empty(&port->isoc_in_ep))
- rc = 1;
- spin_unlock_bh(&ozhcd->hcd_lock);
- /* Complete the filled URBs.
- */
- list_for_each_entry_safe(urbl, n, &xfr_list, link) {
- urb = urbl->urb;
- list_del_init(&urbl->link);
- oz_free_urb_link(urbl);
- oz_complete_urb(port->ozhcd->hcd, urb, 0);
- }
- /* Check if there are any ep0 requests that have timed out.
- * If so resent to PD.
- */
- ep = port->out_ep[0];
- if (ep) {
- spin_lock_bh(&ozhcd->hcd_lock);
- list_for_each_entry_safe(urbl, n, &ep->urb_list, link) {
- if (urbl->submit_counter > EP0_TIMEOUT_COUNTER) {
- oz_dbg(ON, "Request 0x%p timeout\n", urbl->urb);
- list_move_tail(&urbl->link, &xfr_list);
- urbl->submit_counter = 0;
- } else {
- urbl->submit_counter++;
- }
- }
- if (!list_empty(&ep->urb_list))
- rc = 1;
- spin_unlock_bh(&ozhcd->hcd_lock);
- list_for_each_entry_safe(urbl, n, &xfr_list, link) {
- oz_dbg(ON, "Resending request to PD\n");
- oz_process_ep0_urb(ozhcd, urbl->urb, GFP_ATOMIC);
- oz_free_urb_link(urbl);
- }
- }
- return rc;
-}
-
-/*
- * Context: softirq
- */
-static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
- struct oz_port *port,
- struct usb_host_interface *intf, gfp_t mem_flags)
-{
- struct oz_hcd *ozhcd = port->ozhcd;
- int i;
- int if_ix = intf->desc.bInterfaceNumber;
- int request_heartbeat = 0;
-
- oz_dbg(ON, "interface[%d] = %p\n", if_ix, intf);
- if (if_ix >= port->num_iface || port->iface == NULL)
- return -ENOMEM;
- for (i = 0; i < intf->desc.bNumEndpoints; i++) {
- struct usb_host_endpoint *hep = &intf->endpoint[i];
- u8 ep_addr = hep->desc.bEndpointAddress;
- u8 ep_num = ep_addr & USB_ENDPOINT_NUMBER_MASK;
- struct oz_endpoint *ep;
- int buffer_size = 0;
-
- oz_dbg(ON, "%d bEndpointAddress = %x\n", i, ep_addr);
- if (ep_addr & USB_ENDPOINT_DIR_MASK) {
- switch (hep->desc.bmAttributes &
- USB_ENDPOINT_XFERTYPE_MASK) {
- case USB_ENDPOINT_XFER_ISOC:
- buffer_size = OZ_EP_BUFFER_SIZE_ISOC;
- break;
- case USB_ENDPOINT_XFER_INT:
- buffer_size = OZ_EP_BUFFER_SIZE_INT;
- break;
- }
- }
-
- ep = oz_ep_alloc(buffer_size, mem_flags);
- if (!ep) {
- oz_clean_endpoints_for_interface(hcd, port, if_ix);
- return -ENOMEM;
- }
- ep->attrib = hep->desc.bmAttributes;
- ep->ep_num = ep_num;
- if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
- == USB_ENDPOINT_XFER_ISOC) {
- oz_dbg(ON, "wMaxPacketSize = %d\n",
- usb_endpoint_maxp(&hep->desc));
- ep->credit_ceiling = 200;
- if (ep_addr & USB_ENDPOINT_DIR_MASK) {
- ep->flags |= OZ_F_EP_BUFFERING;
- } else {
- ep->flags |= OZ_F_EP_HAVE_STREAM;
- if (oz_usb_stream_create(port->hpd, ep_num))
- ep->flags &= ~OZ_F_EP_HAVE_STREAM;
- }
- }
- spin_lock_bh(&ozhcd->hcd_lock);
- if (ep_addr & USB_ENDPOINT_DIR_MASK) {
- port->in_ep[ep_num] = ep;
- port->iface[if_ix].ep_mask |=
- (1<<(ep_num+OZ_NB_ENDPOINTS));
- if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
- == USB_ENDPOINT_XFER_ISOC) {
- list_add_tail(&ep->link, &port->isoc_in_ep);
- request_heartbeat = 1;
- }
- } else {
- port->out_ep[ep_num] = ep;
- port->iface[if_ix].ep_mask |= (1<<ep_num);
- if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
- == USB_ENDPOINT_XFER_ISOC) {
- list_add_tail(&ep->link, &port->isoc_out_ep);
- request_heartbeat = 1;
- }
- }
- spin_unlock_bh(&ozhcd->hcd_lock);
- if (request_heartbeat && port->hpd)
- oz_usb_request_heartbeat(port->hpd);
- }
- return 0;
-}
-
-/*
- * Context: softirq
- */
-static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
- struct oz_port *port, int if_ix)
-{
- struct oz_hcd *ozhcd = port->ozhcd;
- unsigned mask;
- int i;
- LIST_HEAD(ep_list);
- struct oz_endpoint *ep, *n;
-
- oz_dbg(ON, "Deleting endpoints for interface %d\n", if_ix);
- if (if_ix >= port->num_iface)
- return;
- spin_lock_bh(&ozhcd->hcd_lock);
- mask = port->iface[if_ix].ep_mask;
- port->iface[if_ix].ep_mask = 0;
- for (i = 0; i < OZ_NB_ENDPOINTS; i++) {
- struct list_head *e;
- /* Gather OUT endpoints.
- */
- if ((mask & (1<<i)) && port->out_ep[i]) {
- e = &port->out_ep[i]->link;
- port->out_ep[i] = NULL;
- /* Remove from isoc list if present.
- */
- list_move_tail(e, &ep_list);
- }
- /* Gather IN endpoints.
- */
- if ((mask & (1<<(i+OZ_NB_ENDPOINTS))) && port->in_ep[i]) {
- e = &port->in_ep[i]->link;
- port->in_ep[i] = NULL;
- list_move_tail(e, &ep_list);
- }
- }
- spin_unlock_bh(&ozhcd->hcd_lock);
- list_for_each_entry_safe(ep, n, &ep_list, link) {
- list_del_init(&ep->link);
- oz_ep_free(port, ep);
- }
-}
-
-/*
- * Context: softirq
- */
-static int oz_build_endpoints_for_config(struct usb_hcd *hcd,
- struct oz_port *port, struct usb_host_config *config,
- gfp_t mem_flags)
-{
- struct oz_hcd *ozhcd = port->ozhcd;
- int i;
- int num_iface = config->desc.bNumInterfaces;
-
- if (num_iface) {
- struct oz_interface *iface;
-
- iface = kmalloc_array(num_iface, sizeof(struct oz_interface),
- mem_flags | __GFP_ZERO);
- if (!iface)
- return -ENOMEM;
- spin_lock_bh(&ozhcd->hcd_lock);
- port->iface = iface;
- port->num_iface = num_iface;
- spin_unlock_bh(&ozhcd->hcd_lock);
- }
- for (i = 0; i < num_iface; i++) {
- struct usb_host_interface *intf =
- &config->intf_cache[i]->altsetting[0];
- if (oz_build_endpoints_for_interface(hcd, port, intf,
- mem_flags))
- goto fail;
- }
- return 0;
-fail:
- oz_clean_endpoints_for_config(hcd, port);
- return -1;
-}
-
-/*
- * Context: softirq
- */
-static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
- struct oz_port *port)
-{
- struct oz_hcd *ozhcd = port->ozhcd;
- int i;
-
- oz_dbg(ON, "Deleting endpoints for configuration\n");
- for (i = 0; i < port->num_iface; i++)
- oz_clean_endpoints_for_interface(hcd, port, i);
- spin_lock_bh(&ozhcd->hcd_lock);
- if (port->iface) {
- oz_dbg(ON, "Freeing interfaces object\n");
- kfree(port->iface);
- port->iface = NULL;
- }
- port->num_iface = 0;
- spin_unlock_bh(&ozhcd->hcd_lock);
-}
-
-/*
- * Context: tasklet
- */
-static void *oz_claim_hpd(struct oz_port *port)
-{
- void *hpd;
- struct oz_hcd *ozhcd = port->ozhcd;
-
- spin_lock_bh(&ozhcd->hcd_lock);
- hpd = port->hpd;
- if (hpd)
- oz_usb_get(hpd);
- spin_unlock_bh(&ozhcd->hcd_lock);
- return hpd;
-}
-
-/*
- * Context: tasklet
- */
-static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
- gfp_t mem_flags)
-{
- struct usb_ctrlrequest *setup;
- unsigned windex;
- unsigned wvalue;
- unsigned wlength;
- void *hpd;
- u8 req_id;
- int rc = 0;
- unsigned complete = 0;
-
- int port_ix = -1;
- struct oz_port *port = NULL;
-
- oz_dbg(URB, "[%s]:(%p)\n", __func__, urb);
- port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
- if (port_ix < 0) {
- rc = -EPIPE;
- goto out;
- }
- port = &ozhcd->ports[port_ix];
- if (((port->flags & OZ_PORT_F_PRESENT) == 0)
- || (port->flags & OZ_PORT_F_DYING)) {
- oz_dbg(ON, "Refusing URB port_ix = %d devnum = %d\n",
- port_ix, urb->dev->devnum);
- rc = -EPIPE;
- goto out;
- }
- /* Store port in private context data.
- */
- urb->hcpriv = port;
- setup = (struct usb_ctrlrequest *)urb->setup_packet;
- windex = le16_to_cpu(setup->wIndex);
- wvalue = le16_to_cpu(setup->wValue);
- wlength = le16_to_cpu(setup->wLength);
- oz_dbg(CTRL_DETAIL, "bRequestType = %x\n", setup->bRequestType);
- oz_dbg(CTRL_DETAIL, "bRequest = %x\n", setup->bRequest);
- oz_dbg(CTRL_DETAIL, "wValue = %x\n", wvalue);
- oz_dbg(CTRL_DETAIL, "wIndex = %x\n", windex);
- oz_dbg(CTRL_DETAIL, "wLength = %x\n", wlength);
-
- req_id = port->next_req_id++;
- hpd = oz_claim_hpd(port);
- if (hpd == NULL) {
- oz_dbg(ON, "Cannot claim port\n");
- rc = -EPIPE;
- goto out;
- }
-
- if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
- /* Standard requests
- */
- switch (setup->bRequest) {
- case USB_REQ_GET_DESCRIPTOR:
- oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - req\n");
- break;
- case USB_REQ_SET_ADDRESS:
- oz_dbg(ON, "USB_REQ_SET_ADDRESS - req\n");
- oz_dbg(ON, "Port %d address is 0x%x\n",
- ozhcd->conn_port,
- (u8)le16_to_cpu(setup->wValue));
- spin_lock_bh(&ozhcd->hcd_lock);
- if (ozhcd->conn_port >= 0) {
- ozhcd->ports[ozhcd->conn_port].bus_addr =
- (u8)le16_to_cpu(setup->wValue);
- oz_dbg(ON, "Clearing conn_port\n");
- ozhcd->conn_port = -1;
- }
- spin_unlock_bh(&ozhcd->hcd_lock);
- complete = 1;
- break;
- case USB_REQ_SET_CONFIGURATION:
- oz_dbg(ON, "USB_REQ_SET_CONFIGURATION - req\n");
- break;
- case USB_REQ_GET_CONFIGURATION:
- /* We short circuit this case and reply directly since
- * we have the selected configuration number cached.
- */
- oz_dbg(ON, "USB_REQ_GET_CONFIGURATION - reply now\n");
- if (urb->transfer_buffer_length >= 1) {
- urb->actual_length = 1;
- *((u8 *)urb->transfer_buffer) =
- port->config_num;
- complete = 1;
- } else {
- rc = -EPIPE;
- }
- break;
- case USB_REQ_GET_INTERFACE:
- /* We short circuit this case and reply directly since
- * we have the selected interface alternative cached.
- */
- oz_dbg(ON, "USB_REQ_GET_INTERFACE - reply now\n");
- if (urb->transfer_buffer_length >= 1) {
- urb->actual_length = 1;
- *((u8 *)urb->transfer_buffer) =
- port->iface[(u8)windex].alt;
- oz_dbg(ON, "interface = %d alt = %d\n",
- windex, port->iface[(u8)windex].alt);
- complete = 1;
- } else {
- rc = -EPIPE;
- }
- break;
- case USB_REQ_SET_INTERFACE:
- oz_dbg(ON, "USB_REQ_SET_INTERFACE - req\n");
- break;
- }
- }
- if (!rc && !complete) {
- int data_len = 0;
-
- if ((setup->bRequestType & USB_DIR_IN) == 0)
- data_len = wlength;
- urb->actual_length = data_len;
- if (oz_usb_control_req(port->hpd, req_id, setup,
- urb->transfer_buffer, data_len)) {
- rc = -ENOMEM;
- } else {
- /* Note: we are queuing the request after we have
- * submitted it to be transmitted. If the request were
- * to complete before we queued it then it would not
- * be found in the queue. It seems impossible for
- * this to happen but if it did the request would
- * be resubmitted so the problem would hopefully
- * resolve itself. Putting the request into the
- * queue before it has been sent is worse since the
- * urb could be cancelled while we are using it
- * to build the request.
- */
- if (oz_enqueue_ep_urb(port, 0, 0, urb, req_id))
- rc = -ENOMEM;
- }
- }
- oz_usb_put(hpd);
-out:
- if (rc || complete) {
- oz_dbg(ON, "Completing request locally\n");
- oz_complete_urb(ozhcd->hcd, urb, rc);
- } else {
- oz_usb_request_heartbeat(port->hpd);
- }
-}
-
-/*
- * Context: tasklet
- */
-static int oz_urb_process(struct oz_hcd *ozhcd, struct urb *urb)
-{
- int rc = 0;
- struct oz_port *port = urb->hcpriv;
- u8 ep_addr;
-
- /* When we are paranoid we keep a list of urbs which we check against
- * before handing one back. This is just for debugging during
- * development and should be turned off in the released driver.
- */
- oz_remember_urb(urb);
- /* Check buffer is valid.
- */
- if (!urb->transfer_buffer && urb->transfer_buffer_length)
- return -EINVAL;
- /* Check if there is a device at the port - refuse if not.
- */
- if ((port->flags & OZ_PORT_F_PRESENT) == 0)
- return -EPIPE;
- ep_addr = usb_pipeendpoint(urb->pipe);
- if (ep_addr) {
- /* If the request is not for EP0 then queue it.
- */
- if (oz_enqueue_ep_urb(port, ep_addr, usb_pipein(urb->pipe),
- urb, 0))
- rc = -EPIPE;
- } else {
- oz_process_ep0_urb(ozhcd, urb, GFP_ATOMIC);
- }
- return rc;
-}
-
-/*
- * Context: tasklet
- */
-static void oz_urb_process_tasklet(unsigned long unused)
-{
- unsigned long irq_state;
- struct urb *urb;
- struct oz_hcd *ozhcd = oz_hcd_claim();
- struct oz_urb_link *urbl, *n;
- int rc = 0;
-
- if (ozhcd == NULL)
- return;
- /* This is called from a tasklet so is in softirq context but the urb
- * list is filled from any context so we need to lock
- * appropriately while removing urbs.
- */
- spin_lock_irqsave(&g_tasklet_lock, irq_state);
- list_for_each_entry_safe(urbl, n, &ozhcd->urb_pending_list, link) {
- list_del_init(&urbl->link);
- spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
- urb = urbl->urb;
- oz_free_urb_link(urbl);
- rc = oz_urb_process(ozhcd, urb);
- if (rc)
- oz_complete_urb(ozhcd->hcd, urb, rc);
- spin_lock_irqsave(&g_tasklet_lock, irq_state);
- }
- spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
- oz_hcd_put(ozhcd);
-}
-
-/*
- * This function searches for the urb in any of the lists it could be in.
- * If it is found it is removed from the list and completed. If the urb is
- * being processed then it won't be in a list so won't be found. However, the
- * call to usb_hcd_check_unlink_urb() will set the value of the unlinked field
- * to a non-zero value. When an attempt is made to put the urb back in a list
- * the unlinked field will be checked and the urb will then be completed.
- * Context: tasklet
- */
-static void oz_urb_cancel(struct oz_port *port, u8 ep_num, struct urb *urb)
-{
- struct oz_urb_link *urbl = NULL;
- struct list_head *e;
- struct oz_hcd *ozhcd;
- unsigned long irq_state;
- u8 ix;
-
- if (port == NULL) {
- oz_dbg(ON, "%s: ERROR: (%p) port is null\n", __func__, urb);
- return;
- }
- ozhcd = port->ozhcd;
- if (ozhcd == NULL) {
- oz_dbg(ON, "%s; ERROR: (%p) ozhcd is null\n", __func__, urb);
- return;
- }
-
- /* Look in the tasklet queue.
- */
- spin_lock_irqsave(&g_tasklet_lock, irq_state);
- list_for_each(e, &ozhcd->urb_cancel_list) {
- urbl = list_entry(e, struct oz_urb_link, link);
- if (urb == urbl->urb) {
- list_del_init(e);
- spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
- goto out2;
- }
- }
- spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
- urbl = NULL;
-
- /* Look in the orphanage.
- */
- spin_lock_irqsave(&ozhcd->hcd_lock, irq_state);
- list_for_each(e, &ozhcd->orphanage) {
- urbl = list_entry(e, struct oz_urb_link, link);
- if (urbl->urb == urb) {
- list_del(e);
- oz_dbg(ON, "Found urb in orphanage\n");
- goto out;
- }
- }
- ix = (ep_num & 0xf);
- urbl = NULL;
- if ((ep_num & USB_DIR_IN) && ix)
- urbl = oz_remove_urb(port->in_ep[ix], urb);
- else
- urbl = oz_remove_urb(port->out_ep[ix], urb);
-out:
- spin_unlock_irqrestore(&ozhcd->hcd_lock, irq_state);
-out2:
- if (urbl) {
- urb->actual_length = 0;
- oz_free_urb_link(urbl);
- oz_complete_urb(ozhcd->hcd, urb, -EPIPE);
- }
-}
-
-/*
- * Context: tasklet
- */
-static void oz_urb_cancel_tasklet(unsigned long unused)
-{
- unsigned long irq_state;
- struct urb *urb;
- struct oz_urb_link *urbl, *n;
- struct oz_hcd *ozhcd = oz_hcd_claim();
-
- if (ozhcd == NULL)
- return;
- spin_lock_irqsave(&g_tasklet_lock, irq_state);
- list_for_each_entry_safe(urbl, n, &ozhcd->urb_cancel_list, link) {
- list_del_init(&urbl->link);
- spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
- urb = urbl->urb;
- if (urb->unlinked)
- oz_urb_cancel(urbl->port, urbl->ep_num, urb);
- oz_free_urb_link(urbl);
- spin_lock_irqsave(&g_tasklet_lock, irq_state);
- }
- spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
- oz_hcd_put(ozhcd);
-}
-
-/*
- * Context: unknown
- */
-static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status)
-{
- if (ozhcd) {
- struct oz_urb_link *urbl, *n;
-
- list_for_each_entry_safe(urbl, n, &ozhcd->orphanage, link) {
- list_del(&urbl->link);
- oz_complete_urb(ozhcd->hcd, urbl->urb, status);
- oz_free_urb_link(urbl);
- }
- }
-}
-
-/*
- * Context: unknown
- */
-static int oz_hcd_start(struct usb_hcd *hcd)
-{
- hcd->power_budget = 200;
- hcd->state = HC_STATE_RUNNING;
- hcd->uses_new_polling = 1;
- return 0;
-}
-
-/*
- * Context: unknown
- */
-static void oz_hcd_stop(struct usb_hcd *hcd)
-{
-}
-
-/*
- * Context: unknown
- */
-static void oz_hcd_shutdown(struct usb_hcd *hcd)
-{
-}
-
-/*
- * Called to queue an urb for the device.
- * This function should return a non-zero error code if it fails the urb but
- * should not call usb_hcd_giveback_urb().
- * Context: any
- */
-static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
- gfp_t mem_flags)
-{
- struct oz_hcd *ozhcd = oz_hcd_private(hcd);
- int rc;
- int port_ix;
- struct oz_port *port;
- unsigned long irq_state;
- struct oz_urb_link *urbl;
-
- oz_dbg(URB, "%s: (%p)\n", __func__, urb);
- if (unlikely(ozhcd == NULL)) {
- oz_dbg(URB, "Refused urb(%p) not ozhcd\n", urb);
- return -EPIPE;
- }
- if (unlikely(hcd->state != HC_STATE_RUNNING)) {
- oz_dbg(URB, "Refused urb(%p) not running\n", urb);
- return -EPIPE;
- }
- port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
- if (port_ix < 0)
- return -EPIPE;
- port = &ozhcd->ports[port_ix];
- if (port == NULL)
- return -EPIPE;
- if (!(port->flags & OZ_PORT_F_PRESENT) ||
- (port->flags & OZ_PORT_F_CHANGED)) {
- oz_dbg(ON, "Refusing URB port_ix = %d devnum = %d\n",
- port_ix, urb->dev->devnum);
- return -EPIPE;
- }
- urb->hcpriv = port;
- /* Put request in queue for processing by tasklet.
- */
- urbl = oz_alloc_urb_link();
- if (unlikely(urbl == NULL))
- return -ENOMEM;
- urbl->urb = urb;
- spin_lock_irqsave(&g_tasklet_lock, irq_state);
- rc = usb_hcd_link_urb_to_ep(hcd, urb);
- if (unlikely(rc)) {
- spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
- oz_free_urb_link(urbl);
- return rc;
- }
- list_add_tail(&urbl->link, &ozhcd->urb_pending_list);
- spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
- tasklet_schedule(&g_urb_process_tasklet);
- atomic_inc(&g_pending_urbs);
- return 0;
-}
-
-/*
- * Context: tasklet
- */
-static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
- struct urb *urb)
-{
- struct oz_urb_link *urbl;
-
- if (unlikely(ep == NULL))
- return NULL;
-
- list_for_each_entry(urbl, &ep->urb_list, link) {
- if (urbl->urb == urb) {
- list_del_init(&urbl->link);
- if (usb_pipeisoc(urb->pipe)) {
- ep->credit -= urb->number_of_packets;
- if (ep->credit < 0)
- ep->credit = 0;
- }
- return urbl;
- }
- }
- return NULL;
-}
-
-/*
- * Called to dequeue a previously submitted urb for the device.
- * Context: any
- */
-static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
-{
- struct oz_hcd *ozhcd = oz_hcd_private(hcd);
- struct oz_urb_link *urbl;
- int rc;
- unsigned long irq_state;
-
- oz_dbg(URB, "%s: (%p)\n", __func__, urb);
- urbl = oz_alloc_urb_link();
- if (unlikely(urbl == NULL))
- return -ENOMEM;
- spin_lock_irqsave(&g_tasklet_lock, irq_state);
- /* The following function checks the urb is still in the queue
- * maintained by the core and that the unlinked field is zero.
- * If both are true the function sets the unlinked field and returns
- * zero. Otherwise it returns an error.
- */
- rc = usb_hcd_check_unlink_urb(hcd, urb, status);
- /* We have to check we haven't completed the urb or are about
- * to complete it. When we do we set hcpriv to 0 so if this has
- * already happened we don't put the urb in the cancel queue.
- */
- if ((rc == 0) && urb->hcpriv) {
- urbl->urb = urb;
- urbl->port = (struct oz_port *)urb->hcpriv;
- urbl->ep_num = usb_pipeendpoint(urb->pipe);
- if (usb_pipein(urb->pipe))
- urbl->ep_num |= USB_DIR_IN;
- list_add_tail(&urbl->link, &ozhcd->urb_cancel_list);
- spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
- tasklet_schedule(&g_urb_cancel_tasklet);
- } else {
- spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
- oz_free_urb_link(urbl);
- }
- return rc;
-}
-
-/*
- * Context: unknown
- */
-static void oz_hcd_endpoint_disable(struct usb_hcd *hcd,
- struct usb_host_endpoint *ep)
-{
-}
-
-/*
- * Context: unknown
- */
-static void oz_hcd_endpoint_reset(struct usb_hcd *hcd,
- struct usb_host_endpoint *ep)
-{
-}
-
-/*
- * Context: unknown
- */
-static int oz_hcd_get_frame_number(struct usb_hcd *hcd)
-{
- oz_dbg(ON, "oz_hcd_get_frame_number\n");
- return oz_usb_get_frame_number();
-}
-
-/*
- * Context: softirq
- * This is called as a consquence of us calling usb_hcd_poll_rh_status() and we
- * always do that in softirq context.
- */
-static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
-{
- struct oz_hcd *ozhcd = oz_hcd_private(hcd);
- int i;
-
- buf[0] = 0;
- buf[1] = 0;
-
- spin_lock_bh(&ozhcd->hcd_lock);
- for (i = 0; i < OZ_NB_PORTS; i++) {
- if (ozhcd->ports[i].flags & OZ_PORT_F_CHANGED) {
- oz_dbg(HUB, "Port %d changed\n", i);
- ozhcd->ports[i].flags &= ~OZ_PORT_F_CHANGED;
- if (i < 7)
- buf[0] |= 1 << (i + 1);
- else
- buf[1] |= 1 << (i - 7);
- }
- }
- spin_unlock_bh(&ozhcd->hcd_lock);
- if (buf[0] != 0 || buf[1] != 0)
- return 2;
- return 0;
-}
-
-/*
- * Context: process
- */
-static void oz_get_hub_descriptor(struct usb_hcd *hcd,
- struct usb_hub_descriptor *desc)
-{
- memset(desc, 0, sizeof(*desc));
- desc->bDescriptorType = 0x29;
- desc->bDescLength = 9;
- desc->wHubCharacteristics = cpu_to_le16(0x0001);
- desc->bNbrPorts = OZ_NB_PORTS;
-}
-
-/*
- * Context: process
- */
-static int oz_set_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
-{
- struct oz_port *port;
- u8 port_id = (u8)windex;
- struct oz_hcd *ozhcd = oz_hcd_private(hcd);
- unsigned set_bits = 0;
- unsigned clear_bits = 0;
-
- if ((port_id < 1) || (port_id > OZ_NB_PORTS))
- return -EPIPE;
- port = &ozhcd->ports[port_id-1];
- switch (wvalue) {
- case USB_PORT_FEAT_CONNECTION:
- oz_dbg(HUB, "USB_PORT_FEAT_CONNECTION\n");
- break;
- case USB_PORT_FEAT_ENABLE:
- oz_dbg(HUB, "USB_PORT_FEAT_ENABLE\n");
- break;
- case USB_PORT_FEAT_SUSPEND:
- oz_dbg(HUB, "USB_PORT_FEAT_SUSPEND\n");
- break;
- case USB_PORT_FEAT_OVER_CURRENT:
- oz_dbg(HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
- break;
- case USB_PORT_FEAT_RESET:
- oz_dbg(HUB, "USB_PORT_FEAT_RESET\n");
- set_bits = USB_PORT_STAT_ENABLE | (USB_PORT_STAT_C_RESET<<16);
- clear_bits = USB_PORT_STAT_RESET;
- ozhcd->ports[port_id-1].bus_addr = 0;
- break;
- case USB_PORT_FEAT_POWER:
- oz_dbg(HUB, "USB_PORT_FEAT_POWER\n");
- set_bits |= USB_PORT_STAT_POWER;
- break;
- case USB_PORT_FEAT_LOWSPEED:
- oz_dbg(HUB, "USB_PORT_FEAT_LOWSPEED\n");
- break;
- case USB_PORT_FEAT_C_CONNECTION:
- oz_dbg(HUB, "USB_PORT_FEAT_C_CONNECTION\n");
- break;
- case USB_PORT_FEAT_C_ENABLE:
- oz_dbg(HUB, "USB_PORT_FEAT_C_ENABLE\n");
- break;
- case USB_PORT_FEAT_C_SUSPEND:
- oz_dbg(HUB, "USB_PORT_FEAT_C_SUSPEND\n");
- break;
- case USB_PORT_FEAT_C_OVER_CURRENT:
- oz_dbg(HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
- break;
- case USB_PORT_FEAT_C_RESET:
- oz_dbg(HUB, "USB_PORT_FEAT_C_RESET\n");
- break;
- case USB_PORT_FEAT_TEST:
- oz_dbg(HUB, "USB_PORT_FEAT_TEST\n");
- break;
- case USB_PORT_FEAT_INDICATOR:
- oz_dbg(HUB, "USB_PORT_FEAT_INDICATOR\n");
- break;
- default:
- oz_dbg(HUB, "Other %d\n", wvalue);
- break;
- }
- if (set_bits || clear_bits) {
- spin_lock_bh(&port->port_lock);
- port->status &= ~clear_bits;
- port->status |= set_bits;
- spin_unlock_bh(&port->port_lock);
- }
- oz_dbg(HUB, "Port[%d] status = 0x%x\n", port_id, port->status);
- return 0;
-}
-
-/*
- * Context: process
- */
-static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
-{
- struct oz_port *port;
- u8 port_id = (u8)windex;
- struct oz_hcd *ozhcd = oz_hcd_private(hcd);
- unsigned clear_bits = 0;
-
- if ((port_id < 1) || (port_id > OZ_NB_PORTS))
- return -EPIPE;
- port = &ozhcd->ports[port_id-1];
- switch (wvalue) {
- case USB_PORT_FEAT_CONNECTION:
- oz_dbg(HUB, "USB_PORT_FEAT_CONNECTION\n");
- break;
- case USB_PORT_FEAT_ENABLE:
- oz_dbg(HUB, "USB_PORT_FEAT_ENABLE\n");
- clear_bits = USB_PORT_STAT_ENABLE;
- break;
- case USB_PORT_FEAT_SUSPEND:
- oz_dbg(HUB, "USB_PORT_FEAT_SUSPEND\n");
- break;
- case USB_PORT_FEAT_OVER_CURRENT:
- oz_dbg(HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
- break;
- case USB_PORT_FEAT_RESET:
- oz_dbg(HUB, "USB_PORT_FEAT_RESET\n");
- break;
- case USB_PORT_FEAT_POWER:
- oz_dbg(HUB, "USB_PORT_FEAT_POWER\n");
- clear_bits |= USB_PORT_STAT_POWER;
- break;
- case USB_PORT_FEAT_LOWSPEED:
- oz_dbg(HUB, "USB_PORT_FEAT_LOWSPEED\n");
- break;
- case USB_PORT_FEAT_C_CONNECTION:
- oz_dbg(HUB, "USB_PORT_FEAT_C_CONNECTION\n");
- clear_bits = USB_PORT_STAT_C_CONNECTION << 16;
- break;
- case USB_PORT_FEAT_C_ENABLE:
- oz_dbg(HUB, "USB_PORT_FEAT_C_ENABLE\n");
- clear_bits = USB_PORT_STAT_C_ENABLE << 16;
- break;
- case USB_PORT_FEAT_C_SUSPEND:
- oz_dbg(HUB, "USB_PORT_FEAT_C_SUSPEND\n");
- break;
- case USB_PORT_FEAT_C_OVER_CURRENT:
- oz_dbg(HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
- break;
- case USB_PORT_FEAT_C_RESET:
- oz_dbg(HUB, "USB_PORT_FEAT_C_RESET\n");
- clear_bits = USB_PORT_FEAT_C_RESET << 16;
- break;
- case USB_PORT_FEAT_TEST:
- oz_dbg(HUB, "USB_PORT_FEAT_TEST\n");
- break;
- case USB_PORT_FEAT_INDICATOR:
- oz_dbg(HUB, "USB_PORT_FEAT_INDICATOR\n");
- break;
- default:
- oz_dbg(HUB, "Other %d\n", wvalue);
- break;
- }
- if (clear_bits) {
- spin_lock_bh(&port->port_lock);
- port->status &= ~clear_bits;
- spin_unlock_bh(&port->port_lock);
- }
- oz_dbg(HUB, "Port[%d] status = 0x%x\n",
- port_id, ozhcd->ports[port_id-1].status);
- return 0;
-}
-
-/*
- * Context: process
- */
-static int oz_get_port_status(struct usb_hcd *hcd, u16 windex, char *buf)
-{
- struct oz_hcd *ozhcd;
- u32 status;
-
- if ((windex < 1) || (windex > OZ_NB_PORTS))
- return -EPIPE;
- ozhcd = oz_hcd_private(hcd);
- oz_dbg(HUB, "GetPortStatus windex = %d\n", windex);
- status = ozhcd->ports[windex-1].status;
- put_unaligned(cpu_to_le32(status), (__le32 *)buf);
- oz_dbg(HUB, "Port[%d] status = %x\n", windex, status);
- return 0;
-}
-
-/*
- * Context: process
- */
-static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
- u16 windex, char *buf, u16 wlength)
-{
- int err = 0;
-
- switch (req_type) {
- case ClearHubFeature:
- oz_dbg(HUB, "ClearHubFeature: %d\n", req_type);
- break;
- case ClearPortFeature:
- err = oz_clear_port_feature(hcd, wvalue, windex);
- break;
- case GetHubDescriptor:
- oz_get_hub_descriptor(hcd, (struct usb_hub_descriptor *)buf);
- break;
- case GetHubStatus:
- oz_dbg(HUB, "GetHubStatus: req_type = 0x%x\n", req_type);
- put_unaligned(cpu_to_le32(0), (__le32 *)buf);
- break;
- case GetPortStatus:
- err = oz_get_port_status(hcd, windex, buf);
- break;
- case SetHubFeature:
- oz_dbg(HUB, "SetHubFeature: %d\n", req_type);
- break;
- case SetPortFeature:
- err = oz_set_port_feature(hcd, wvalue, windex);
- break;
- default:
- oz_dbg(HUB, "Other: %d\n", req_type);
- break;
- }
- return err;
-}
-
-/*
- * Context: process
- */
-static int oz_hcd_bus_suspend(struct usb_hcd *hcd)
-{
- struct oz_hcd *ozhcd;
-
- ozhcd = oz_hcd_private(hcd);
- spin_lock_bh(&ozhcd->hcd_lock);
- hcd->state = HC_STATE_SUSPENDED;
- ozhcd->flags |= OZ_HDC_F_SUSPENDED;
- spin_unlock_bh(&ozhcd->hcd_lock);
- return 0;
-}
-
-/*
- * Context: process
- */
-static int oz_hcd_bus_resume(struct usb_hcd *hcd)
-{
- struct oz_hcd *ozhcd;
-
- ozhcd = oz_hcd_private(hcd);
- spin_lock_bh(&ozhcd->hcd_lock);
- ozhcd->flags &= ~OZ_HDC_F_SUSPENDED;
- hcd->state = HC_STATE_RUNNING;
- spin_unlock_bh(&ozhcd->hcd_lock);
- return 0;
-}
-
-static void oz_plat_shutdown(struct platform_device *dev)
-{
-}
-
-/*
- * Context: process
- */
-static int oz_plat_probe(struct platform_device *dev)
-{
- int i;
- int err;
- struct usb_hcd *hcd;
- struct oz_hcd *ozhcd;
-
- hcd = usb_create_hcd(&g_oz_hc_drv, &dev->dev, dev_name(&dev->dev));
- if (hcd == NULL) {
- oz_dbg(ON, "Failed to created hcd object OK\n");
- return -ENOMEM;
- }
- ozhcd = oz_hcd_private(hcd);
- memset(ozhcd, 0, sizeof(*ozhcd));
- INIT_LIST_HEAD(&ozhcd->urb_pending_list);
- INIT_LIST_HEAD(&ozhcd->urb_cancel_list);
- INIT_LIST_HEAD(&ozhcd->orphanage);
- ozhcd->hcd = hcd;
- ozhcd->conn_port = -1;
- spin_lock_init(&ozhcd->hcd_lock);
- for (i = 0; i < OZ_NB_PORTS; i++) {
- struct oz_port *port = &ozhcd->ports[i];
-
- port->ozhcd = ozhcd;
- port->flags = 0;
- port->status = 0;
- port->bus_addr = 0xff;
- spin_lock_init(&port->port_lock);
- }
- err = usb_add_hcd(hcd, 0, 0);
- if (err) {
- oz_dbg(ON, "Failed to add hcd object OK\n");
- usb_put_hcd(hcd);
- return -1;
- }
- device_wakeup_enable(hcd->self.controller);
-
- spin_lock_bh(&g_hcdlock);
- g_ozhcd = ozhcd;
- spin_unlock_bh(&g_hcdlock);
- return 0;
-}
-
-/*
- * Context: unknown
- */
-static int oz_plat_remove(struct platform_device *dev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(dev);
- struct oz_hcd *ozhcd;
-
- if (hcd == NULL)
- return -1;
- ozhcd = oz_hcd_private(hcd);
- spin_lock_bh(&g_hcdlock);
- if (ozhcd == g_ozhcd)
- g_ozhcd = NULL;
- spin_unlock_bh(&g_hcdlock);
- oz_dbg(ON, "Clearing orphanage\n");
- oz_hcd_clear_orphanage(ozhcd, -EPIPE);
- oz_dbg(ON, "Removing hcd\n");
- usb_remove_hcd(hcd);
- usb_put_hcd(hcd);
- return 0;
-}
-
-/*
- * Context: unknown
- */
-static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg)
-{
- return 0;
-}
-
-
-/*
- * Context: unknown
- */
-static int oz_plat_resume(struct platform_device *dev)
-{
- return 0;
-}
-
-/*
- * Context: process
- */
-int oz_hcd_init(void)
-{
- int err;
-
- if (usb_disabled())
- return -ENODEV;
-
- oz_urb_link_cache = KMEM_CACHE(oz_urb_link, 0);
- if (!oz_urb_link_cache)
- return -ENOMEM;
-
- tasklet_init(&g_urb_process_tasklet, oz_urb_process_tasklet, 0);
- tasklet_init(&g_urb_cancel_tasklet, oz_urb_cancel_tasklet, 0);
- err = platform_driver_register(&g_oz_plat_drv);
- oz_dbg(ON, "platform_driver_register() returned %d\n", err);
- if (err)
- goto error;
- g_plat_dev = platform_device_alloc(OZ_PLAT_DEV_NAME, -1);
- if (g_plat_dev == NULL) {
- err = -ENOMEM;
- goto error1;
- }
- oz_dbg(ON, "platform_device_alloc() succeeded\n");
- err = platform_device_add(g_plat_dev);
- if (err)
- goto error2;
- oz_dbg(ON, "platform_device_add() succeeded\n");
- return 0;
-error2:
- platform_device_put(g_plat_dev);
-error1:
- platform_driver_unregister(&g_oz_plat_drv);
-error:
- tasklet_disable(&g_urb_process_tasklet);
- tasklet_disable(&g_urb_cancel_tasklet);
- oz_dbg(ON, "oz_hcd_init() failed %d\n", err);
- return err;
-}
-
-/*
- * Context: process
- */
-void oz_hcd_term(void)
-{
- msleep(OZ_HUB_DEBOUNCE_TIMEOUT);
- tasklet_kill(&g_urb_process_tasklet);
- tasklet_kill(&g_urb_cancel_tasklet);
- platform_device_unregister(g_plat_dev);
- platform_driver_unregister(&g_oz_plat_drv);
- oz_dbg(ON, "Pending urbs:%d\n", atomic_read(&g_pending_urbs));
- kmem_cache_destroy(oz_urb_link_cache);
-}
diff --git a/drivers/staging/ozwpan/ozhcd.h b/drivers/staging/ozwpan/ozhcd.h
deleted file mode 100644
index 55e97b1c7079..000000000000
--- a/drivers/staging/ozwpan/ozhcd.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * ---------------------------------------------------------------------------*/
-#ifndef _OZHCD_H
-#define _OZHCD_H
-
-int oz_hcd_init(void);
-void oz_hcd_term(void);
-struct oz_port *oz_hcd_pd_arrived(void *ctx);
-void oz_hcd_pd_departed(struct oz_port *hport);
-void oz_hcd_pd_reset(void *hpd, void *hport);
-
-#endif /* _OZHCD_H */
-
diff --git a/drivers/staging/ozwpan/ozmain.c b/drivers/staging/ozwpan/ozmain.c
deleted file mode 100644
index 74ef34815b98..000000000000
--- a/drivers/staging/ozwpan/ozmain.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/timer.h>
-#include <linux/sched.h>
-#include <linux/netdevice.h>
-#include <linux/errno.h>
-#include <linux/ieee80211.h>
-#include "ozdbg.h"
-#include "ozpd.h"
-#include "ozproto.h"
-#include "ozcdev.h"
-
-unsigned int oz_dbg_mask = OZ_DEFAULT_DBG_MASK;
-
-/*
- * The name of the 802.11 mac device. Empty string is the default value but a
- * value can be supplied as a parameter to the module. An empty string means
- * bind to nothing. '*' means bind to all netcards - this includes non-802.11
- * netcards. Bindings can be added later using an IOCTL.
- */
-static char *g_net_dev = "";
-module_param(g_net_dev, charp, S_IRUGO);
-MODULE_PARM_DESC(g_net_dev, "The device(s) to bind to; "
- "'*' means all, '' (empty string; default) means none.");
-
-/*
- * Context: process
- */
-static int __init ozwpan_init(void)
-{
- int err;
-
- err = oz_cdev_register();
- if (err)
- return err;
- err = oz_protocol_init(g_net_dev);
- if (err)
- goto err_protocol;
- oz_app_enable(OZ_APPID_USB, 1);
- oz_apps_init();
- return 0;
-
-err_protocol:
- oz_cdev_deregister();
- return err;
-}
-
-/*
- * Context: process
- */
-static void __exit ozwpan_exit(void)
-{
- oz_protocol_term();
- oz_apps_term();
- oz_cdev_deregister();
-}
-
-module_init(ozwpan_init);
-module_exit(ozwpan_exit);
-
-MODULE_AUTHOR("Chris Kelly");
-MODULE_DESCRIPTION("Ozmo Devices USB over WiFi hcd driver");
-MODULE_VERSION("1.0.13");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/staging/ozwpan/ozpd.c b/drivers/staging/ozwpan/ozpd.c
deleted file mode 100644
index 021d74a132dd..000000000000
--- a/drivers/staging/ozwpan/ozpd.c
+++ /dev/null
@@ -1,886 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-
-#include <linux/module.h>
-#include <linux/timer.h>
-#include <linux/sched.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/errno.h>
-#include "ozdbg.h"
-#include "ozprotocol.h"
-#include "ozeltbuf.h"
-#include "ozpd.h"
-#include "ozproto.h"
-#include "ozcdev.h"
-#include "ozusbsvc.h"
-#include <asm/unaligned.h>
-#include <linux/uaccess.h>
-#include <net/psnap.h>
-
-static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
-static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
-static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
-static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
-static int oz_send_isoc_frame(struct oz_pd *pd);
-static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
-static void oz_isoc_stream_free(struct oz_isoc_stream *st);
-static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data);
-static void oz_isoc_destructor(struct sk_buff *skb);
-
-/*
- * Counts the uncompleted isoc frames submitted to netcard.
- */
-static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
-
-/* Application handler functions.
- */
-static const struct oz_app_if g_app_if[OZ_NB_APPS] = {
- [OZ_APPID_USB] = {
- .init = oz_usb_init,
- .term = oz_usb_term,
- .start = oz_usb_start,
- .stop = oz_usb_stop,
- .rx = oz_usb_rx,
- .heartbeat = oz_usb_heartbeat,
- .farewell = oz_usb_farewell,
- },
- [OZ_APPID_SERIAL] = {
- .init = oz_cdev_init,
- .term = oz_cdev_term,
- .start = oz_cdev_start,
- .stop = oz_cdev_stop,
- .rx = oz_cdev_rx,
- },
-};
-
-
-/*
- * Context: softirq or process
- */
-void oz_pd_set_state(struct oz_pd *pd, unsigned state)
-{
- pd->state = state;
- switch (state) {
- case OZ_PD_S_IDLE:
- oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_IDLE\n");
- break;
- case OZ_PD_S_CONNECTED:
- oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_CONNECTED\n");
- break;
- case OZ_PD_S_STOPPED:
- oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_STOPPED\n");
- break;
- case OZ_PD_S_SLEEP:
- oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_SLEEP\n");
- break;
- }
-}
-
-/*
- * Context: softirq or process
- */
-void oz_pd_get(struct oz_pd *pd)
-{
- atomic_inc(&pd->ref_count);
-}
-
-/*
- * Context: softirq or process
- */
-void oz_pd_put(struct oz_pd *pd)
-{
- if (atomic_dec_and_test(&pd->ref_count))
- oz_pd_destroy(pd);
-}
-
-/*
- * Context: softirq-serialized
- */
-struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
-{
- struct oz_pd *pd;
- int i;
-
- pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
- if (!pd)
- return NULL;
-
- atomic_set(&pd->ref_count, 2);
- for (i = 0; i < OZ_NB_APPS; i++)
- spin_lock_init(&pd->app_lock[i]);
- pd->last_rx_pkt_num = 0xffffffff;
- oz_pd_set_state(pd, OZ_PD_S_IDLE);
- pd->max_tx_size = OZ_MAX_TX_SIZE;
- ether_addr_copy(pd->mac_addr, mac_addr);
- oz_elt_buf_init(&pd->elt_buff);
- spin_lock_init(&pd->tx_frame_lock);
- INIT_LIST_HEAD(&pd->tx_queue);
- INIT_LIST_HEAD(&pd->farewell_list);
- pd->last_sent_frame = &pd->tx_queue;
- spin_lock_init(&pd->stream_lock);
- INIT_LIST_HEAD(&pd->stream_list);
- tasklet_init(&pd->heartbeat_tasklet, oz_pd_heartbeat_handler,
- (unsigned long)pd);
- tasklet_init(&pd->timeout_tasklet, oz_pd_timeout_handler,
- (unsigned long)pd);
- hrtimer_init(&pd->heartbeat, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_init(&pd->timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- pd->heartbeat.function = oz_pd_heartbeat_event;
- pd->timeout.function = oz_pd_timeout_event;
-
- return pd;
-}
-
-/*
- * Context: softirq or process
- */
-static void oz_pd_free(struct work_struct *work)
-{
- struct list_head *e, *n;
- struct oz_pd *pd;
-
- oz_pd_dbg(pd, ON, "Destroying PD\n");
- pd = container_of(work, struct oz_pd, workitem);
- /*Disable timer tasklets*/
- tasklet_kill(&pd->heartbeat_tasklet);
- tasklet_kill(&pd->timeout_tasklet);
-
- /* Free streams, queued tx frames and farewells. */
-
- list_for_each_safe(e, n, &pd->stream_list)
- oz_isoc_stream_free(list_entry(e, struct oz_isoc_stream, link));
-
- list_for_each_safe(e, n, &pd->tx_queue) {
- struct oz_tx_frame *f = list_entry(e, struct oz_tx_frame, link);
-
- if (f->skb != NULL)
- kfree_skb(f->skb);
- oz_retire_frame(pd, f);
- }
-
- oz_elt_buf_term(&pd->elt_buff);
-
- list_for_each_safe(e, n, &pd->farewell_list)
- kfree(list_entry(e, struct oz_farewell, link));
-
- if (pd->net_dev)
- dev_put(pd->net_dev);
- kfree(pd);
-}
-
-/*
- * Context: softirq or Process
- */
-void oz_pd_destroy(struct oz_pd *pd)
-{
- if (hrtimer_active(&pd->timeout))
- hrtimer_cancel(&pd->timeout);
- if (hrtimer_active(&pd->heartbeat))
- hrtimer_cancel(&pd->heartbeat);
-
- INIT_WORK(&pd->workitem, oz_pd_free);
- if (!schedule_work(&pd->workitem))
- oz_pd_dbg(pd, ON, "failed to schedule workitem\n");
-}
-
-/*
- * Context: softirq-serialized
- */
-int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
-{
- int i, rc = 0;
-
- oz_pd_dbg(pd, ON, "%s: (0x%x) resume(%d)\n", __func__, apps, resume);
- for (i = 0; i < OZ_NB_APPS; i++) {
- if (g_app_if[i].start && (apps & (1 << i))) {
- if (g_app_if[i].start(pd, resume)) {
- rc = -1;
- oz_pd_dbg(pd, ON,
- "Unable to start service %d\n", i);
- break;
- }
- spin_lock_bh(&g_polling_lock);
- pd->total_apps |= (1 << i);
- if (resume)
- pd->paused_apps &= ~(1 << i);
- spin_unlock_bh(&g_polling_lock);
- }
- }
- return rc;
-}
-
-/*
- * Context: softirq or process
- */
-void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
-{
- int i;
-
- oz_pd_dbg(pd, ON, "%s: (0x%x) pause(%d)\n", __func__, apps, pause);
- for (i = 0; i < OZ_NB_APPS; i++) {
- if (g_app_if[i].stop && (apps & (1 << i))) {
- spin_lock_bh(&g_polling_lock);
- if (pause) {
- pd->paused_apps |= (1 << i);
- } else {
- pd->total_apps &= ~(1 << i);
- pd->paused_apps &= ~(1 << i);
- }
- spin_unlock_bh(&g_polling_lock);
- g_app_if[i].stop(pd, pause);
- }
- }
-}
-
-/*
- * Context: softirq
- */
-void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
-{
- int i, more = 0;
-
- for (i = 0; i < OZ_NB_APPS; i++) {
- if (g_app_if[i].heartbeat && (apps & (1 << i))) {
- if (g_app_if[i].heartbeat(pd))
- more = 1;
- }
- }
- if ((!more) && (hrtimer_active(&pd->heartbeat)))
- hrtimer_cancel(&pd->heartbeat);
- if (pd->mode & OZ_F_ISOC_ANYTIME) {
- int count = 8;
-
- while (count-- && (oz_send_isoc_frame(pd) >= 0))
- ;
- }
-}
-
-/*
- * Context: softirq or process
- */
-void oz_pd_stop(struct oz_pd *pd)
-{
- u16 stop_apps;
-
- oz_dbg(ON, "oz_pd_stop() State = 0x%x\n", pd->state);
- oz_pd_indicate_farewells(pd);
- spin_lock_bh(&g_polling_lock);
- stop_apps = pd->total_apps;
- pd->total_apps = 0;
- pd->paused_apps = 0;
- spin_unlock_bh(&g_polling_lock);
- oz_services_stop(pd, stop_apps, 0);
- spin_lock_bh(&g_polling_lock);
- oz_pd_set_state(pd, OZ_PD_S_STOPPED);
- /* Remove from PD list.*/
- list_del(&pd->link);
- spin_unlock_bh(&g_polling_lock);
- oz_dbg(ON, "pd ref count = %d\n", atomic_read(&pd->ref_count));
- oz_pd_put(pd);
-}
-
-/*
- * Context: softirq
- */
-int oz_pd_sleep(struct oz_pd *pd)
-{
- int do_stop = 0;
- u16 stop_apps;
-
- spin_lock_bh(&g_polling_lock);
- if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
- spin_unlock_bh(&g_polling_lock);
- return 0;
- }
- if (pd->keep_alive && pd->session_id)
- oz_pd_set_state(pd, OZ_PD_S_SLEEP);
- else
- do_stop = 1;
-
- stop_apps = pd->total_apps;
- spin_unlock_bh(&g_polling_lock);
- if (do_stop) {
- oz_pd_stop(pd);
- } else {
- oz_services_stop(pd, stop_apps, 1);
- oz_timer_add(pd, OZ_TIMER_STOP, pd->keep_alive);
- }
- return do_stop;
-}
-
-/*
- * Context: softirq
- */
-static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
-{
- struct oz_tx_frame *f;
-
- f = kmem_cache_alloc(oz_tx_frame_cache, GFP_ATOMIC);
- if (f) {
- f->total_size = sizeof(struct oz_hdr);
- INIT_LIST_HEAD(&f->link);
- INIT_LIST_HEAD(&f->elt_list);
- }
- return f;
-}
-
-/*
- * Context: softirq or process
- */
-static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
-{
- pd->nb_queued_isoc_frames--;
- list_del_init(&f->link);
-
- kmem_cache_free(oz_tx_frame_cache, f);
-
- oz_dbg(TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
- pd->nb_queued_isoc_frames);
-}
-
-/*
- * Context: softirq or process
- */
-static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
-{
- kmem_cache_free(oz_tx_frame_cache, f);
-}
-
-/*
- * Context: softirq-serialized
- */
-static void oz_set_more_bit(struct sk_buff *skb)
-{
- struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
-
- oz_hdr->control |= OZ_F_MORE_DATA;
-}
-
-/*
- * Context: softirq-serialized
- */
-static void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
-{
- struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
-
- oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
-}
-
-/*
- * Context: softirq
- */
-int oz_prepare_frame(struct oz_pd *pd, int empty)
-{
- struct oz_tx_frame *f;
-
- if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
- return -1;
- if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
- return -1;
- if (!empty && !oz_are_elts_available(&pd->elt_buff))
- return -1;
- f = oz_tx_frame_alloc(pd);
- if (f == NULL)
- return -1;
- f->skb = NULL;
- f->hdr.control =
- (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
- ++pd->last_tx_pkt_num;
- put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
- if (empty == 0) {
- oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
- pd->max_tx_size, &f->elt_list);
- }
- spin_lock(&pd->tx_frame_lock);
- list_add_tail(&f->link, &pd->tx_queue);
- pd->nb_queued_frames++;
- spin_unlock(&pd->tx_frame_lock);
- return 0;
-}
-
-/*
- * Context: softirq-serialized
- */
-static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
-{
- struct sk_buff *skb;
- struct net_device *dev = pd->net_dev;
- struct oz_hdr *oz_hdr;
- struct oz_elt *elt;
- struct oz_elt_info *ei;
-
- /* Allocate skb with enough space for the lower layers as well
- * as the space we need.
- */
- skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
- if (skb == NULL)
- return NULL;
- /* Reserve the head room for lower layers.
- */
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
- skb_reset_network_header(skb);
- skb->dev = dev;
- skb->protocol = htons(OZ_ETHERTYPE);
- if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
- dev->dev_addr, skb->len) < 0)
- goto fail;
- /* Push the tail to the end of the area we are going to copy to.
- */
- oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
- f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
- memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
- /* Copy the elements into the frame body.
- */
- elt = (struct oz_elt *)(oz_hdr+1);
- list_for_each_entry(ei, &f->elt_list, link) {
- memcpy(elt, ei->data, ei->length);
- elt = oz_next_elt(elt);
- }
- return skb;
-fail:
- kfree_skb(skb);
- return NULL;
-}
-
-/*
- * Context: softirq or process
- */
-static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
-{
- struct oz_elt_info *ei, *n;
-
- list_for_each_entry_safe(ei, n, &f->elt_list, link) {
- list_del_init(&ei->link);
- if (ei->callback)
- ei->callback(pd, ei->context);
- spin_lock_bh(&pd->elt_buff.lock);
- oz_elt_info_free(&pd->elt_buff, ei);
- spin_unlock_bh(&pd->elt_buff.lock);
- }
- oz_tx_frame_free(pd, f);
-}
-
-/*
- * Context: softirq-serialized
- */
-static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
-{
- struct sk_buff *skb;
- struct oz_tx_frame *f;
- struct list_head *e;
-
- spin_lock(&pd->tx_frame_lock);
- e = pd->last_sent_frame->next;
- if (e == &pd->tx_queue) {
- spin_unlock(&pd->tx_frame_lock);
- return -1;
- }
- f = list_entry(e, struct oz_tx_frame, link);
-
- if (f->skb != NULL) {
- skb = f->skb;
- oz_tx_isoc_free(pd, f);
- spin_unlock(&pd->tx_frame_lock);
- if (more_data)
- oz_set_more_bit(skb);
- oz_set_last_pkt_nb(pd, skb);
- if ((int)atomic_read(&g_submitted_isoc) <
- OZ_MAX_SUBMITTED_ISOC) {
- if (dev_queue_xmit(skb) < 0) {
- oz_dbg(TX_FRAMES, "Dropping ISOC Frame\n");
- return -1;
- }
- atomic_inc(&g_submitted_isoc);
- oz_dbg(TX_FRAMES, "Sending ISOC Frame, nb_isoc= %d\n",
- pd->nb_queued_isoc_frames);
- return 0;
- }
- kfree_skb(skb);
- oz_dbg(TX_FRAMES, "Dropping ISOC Frame>\n");
- return -1;
- }
-
- pd->last_sent_frame = e;
- skb = oz_build_frame(pd, f);
- spin_unlock(&pd->tx_frame_lock);
- if (!skb)
- return -1;
- if (more_data)
- oz_set_more_bit(skb);
- oz_dbg(TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
- if (dev_queue_xmit(skb) < 0)
- return -1;
-
- return 0;
-}
-
-/*
- * Context: softirq-serialized
- */
-void oz_send_queued_frames(struct oz_pd *pd, int backlog)
-{
- while (oz_prepare_frame(pd, 0) >= 0)
- backlog++;
-
- switch (pd->mode & (OZ_F_ISOC_NO_ELTS | OZ_F_ISOC_ANYTIME)) {
-
- case OZ_F_ISOC_NO_ELTS: {
- backlog += pd->nb_queued_isoc_frames;
- if (backlog <= 0)
- goto out;
- if (backlog > OZ_MAX_SUBMITTED_ISOC)
- backlog = OZ_MAX_SUBMITTED_ISOC;
- break;
- }
- case OZ_NO_ELTS_ANYTIME: {
- if ((backlog <= 0) && (pd->isoc_sent == 0))
- goto out;
- break;
- }
- default: {
- if (backlog <= 0)
- goto out;
- break;
- }
- }
- while (backlog--) {
- if (oz_send_next_queued_frame(pd, backlog) < 0)
- break;
- }
- return;
-
-out: oz_prepare_frame(pd, 1);
- oz_send_next_queued_frame(pd, 0);
-}
-
-/*
- * Context: softirq
- */
-static int oz_send_isoc_frame(struct oz_pd *pd)
-{
- struct sk_buff *skb;
- struct net_device *dev = pd->net_dev;
- struct oz_hdr *oz_hdr;
- struct oz_elt *elt;
- struct oz_elt_info *ei;
- LIST_HEAD(list);
- int total_size = sizeof(struct oz_hdr);
-
- oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
- pd->max_tx_size, &list);
- if (list_empty(&list))
- return 0;
- skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
- if (skb == NULL) {
- oz_dbg(ON, "Cannot alloc skb\n");
- oz_elt_info_free_chain(&pd->elt_buff, &list);
- return -1;
- }
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
- skb_reset_network_header(skb);
- skb->dev = dev;
- skb->protocol = htons(OZ_ETHERTYPE);
- if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
- dev->dev_addr, skb->len) < 0) {
- kfree_skb(skb);
- return -1;
- }
- oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
- oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
- oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
- elt = (struct oz_elt *)(oz_hdr+1);
-
- list_for_each_entry(ei, &list, link) {
- memcpy(elt, ei->data, ei->length);
- elt = oz_next_elt(elt);
- }
- dev_queue_xmit(skb);
- oz_elt_info_free_chain(&pd->elt_buff, &list);
- return 0;
-}
-
-/*
- * Context: softirq-serialized
- */
-void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
-{
- struct oz_tx_frame *f, *tmp = NULL;
- u8 diff;
- u32 pkt_num;
-
- LIST_HEAD(list);
-
- spin_lock(&pd->tx_frame_lock);
- list_for_each_entry(f, &pd->tx_queue, link) {
- pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
- diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
- if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
- break;
- oz_dbg(TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
- pkt_num, pd->nb_queued_frames);
- tmp = f;
- pd->nb_queued_frames--;
- }
- if (tmp)
- list_cut_position(&list, &pd->tx_queue, &tmp->link);
- pd->last_sent_frame = &pd->tx_queue;
- spin_unlock(&pd->tx_frame_lock);
-
- list_for_each_entry_safe(f, tmp, &list, link)
- oz_retire_frame(pd, f);
-}
-
-/*
- * Precondition: stream_lock must be held.
- * Context: softirq
- */
-static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
-{
- struct oz_isoc_stream *st;
-
- list_for_each_entry(st, &pd->stream_list, link) {
- if (st->ep_num == ep_num)
- return st;
- }
- return NULL;
-}
-
-/*
- * Context: softirq
- */
-int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
-{
- struct oz_isoc_stream *st;
-
- st = kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
- if (!st)
- return -ENOMEM;
- st->ep_num = ep_num;
- spin_lock_bh(&pd->stream_lock);
- if (!pd_stream_find(pd, ep_num)) {
- list_add(&st->link, &pd->stream_list);
- st = NULL;
- }
- spin_unlock_bh(&pd->stream_lock);
- kfree(st);
- return 0;
-}
-
-/*
- * Context: softirq or process
- */
-static void oz_isoc_stream_free(struct oz_isoc_stream *st)
-{
- kfree_skb(st->skb);
- kfree(st);
-}
-
-/*
- * Context: softirq
- */
-int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
-{
- struct oz_isoc_stream *st;
-
- spin_lock_bh(&pd->stream_lock);
- st = pd_stream_find(pd, ep_num);
- if (st)
- list_del(&st->link);
- spin_unlock_bh(&pd->stream_lock);
- if (st)
- oz_isoc_stream_free(st);
- return 0;
-}
-
-/*
- * Context: any
- */
-static void oz_isoc_destructor(struct sk_buff *skb)
-{
- atomic_dec(&g_submitted_isoc);
-}
-
-/*
- * Context: softirq
- */
-int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
-{
- struct net_device *dev = pd->net_dev;
- struct oz_isoc_stream *st;
- u8 nb_units = 0;
- struct sk_buff *skb = NULL;
- struct oz_hdr *oz_hdr = NULL;
- int size = 0;
-
- spin_lock_bh(&pd->stream_lock);
- st = pd_stream_find(pd, ep_num);
- if (st) {
- skb = st->skb;
- st->skb = NULL;
- nb_units = st->nb_units;
- st->nb_units = 0;
- oz_hdr = st->oz_hdr;
- size = st->size;
- }
- spin_unlock_bh(&pd->stream_lock);
- if (!st)
- return 0;
- if (!skb) {
- /* Allocate enough space for max size frame. */
- skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
- GFP_ATOMIC);
- if (skb == NULL)
- return 0;
- /* Reserve the head room for lower layers. */
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
- skb_reset_network_header(skb);
- skb->dev = dev;
- skb->protocol = htons(OZ_ETHERTYPE);
- /* For audio packet set priority to AC_VO */
- skb->priority = 0x7;
- size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
- oz_hdr = (struct oz_hdr *)skb_put(skb, size);
- }
- memcpy(skb_put(skb, len), data, len);
- size += len;
- if (++nb_units < pd->ms_per_isoc) {
- spin_lock_bh(&pd->stream_lock);
- st->skb = skb;
- st->nb_units = nb_units;
- st->oz_hdr = oz_hdr;
- st->size = size;
- spin_unlock_bh(&pd->stream_lock);
- } else {
- struct oz_hdr oz;
- struct oz_isoc_large iso;
-
- spin_lock_bh(&pd->stream_lock);
- iso.frame_number = st->frame_num;
- st->frame_num += nb_units;
- spin_unlock_bh(&pd->stream_lock);
- oz.control =
- (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
- oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
- oz.pkt_num = 0;
- iso.endpoint = ep_num;
- iso.format = OZ_DATA_F_ISOC_LARGE;
- iso.ms_data = nb_units;
- memcpy(oz_hdr, &oz, sizeof(oz));
- memcpy(oz_hdr+1, &iso, sizeof(iso));
- if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
- dev->dev_addr, skb->len) < 0)
- goto out;
-
- skb->destructor = oz_isoc_destructor;
- /*Queue for Xmit if mode is not ANYTIME*/
- if (!(pd->mode & OZ_F_ISOC_ANYTIME)) {
- struct oz_tx_frame *isoc_unit = NULL;
- int nb = pd->nb_queued_isoc_frames;
-
- if (nb >= pd->isoc_latency) {
- struct oz_tx_frame *f;
-
- oz_dbg(TX_FRAMES, "Dropping ISOC Unit nb= %d\n",
- nb);
- spin_lock(&pd->tx_frame_lock);
- list_for_each_entry(f, &pd->tx_queue, link) {
- if (f->skb != NULL) {
- oz_tx_isoc_free(pd, f);
- break;
- }
- }
- spin_unlock(&pd->tx_frame_lock);
- }
- isoc_unit = oz_tx_frame_alloc(pd);
- if (isoc_unit == NULL)
- goto out;
- isoc_unit->hdr = oz;
- isoc_unit->skb = skb;
- spin_lock_bh(&pd->tx_frame_lock);
- list_add_tail(&isoc_unit->link, &pd->tx_queue);
- pd->nb_queued_isoc_frames++;
- spin_unlock_bh(&pd->tx_frame_lock);
- oz_dbg(TX_FRAMES,
- "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
- pd->nb_queued_isoc_frames, pd->nb_queued_frames);
- return 0;
- }
-
- /*In ANYTIME mode Xmit unit immediately*/
- if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
- atomic_inc(&g_submitted_isoc);
- if (dev_queue_xmit(skb) < 0)
- return -1;
- return 0;
- }
-
-out: kfree_skb(skb);
- return -1;
-
- }
- return 0;
-}
-
-/*
- * Context: process
- */
-void oz_apps_init(void)
-{
- int i;
-
- for (i = 0; i < OZ_NB_APPS; i++) {
- if (g_app_if[i].init)
- g_app_if[i].init();
- }
-}
-
-/*
- * Context: process
- */
-void oz_apps_term(void)
-{
- int i;
-
- /* Terminate all the apps. */
- for (i = 0; i < OZ_NB_APPS; i++) {
- if (g_app_if[i].term)
- g_app_if[i].term();
- }
-}
-
-/*
- * Context: softirq-serialized
- */
-void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
-{
- if (app_id < OZ_NB_APPS && g_app_if[app_id].rx)
- g_app_if[app_id].rx(pd, elt);
-}
-
-/*
- * Context: softirq or process
- */
-void oz_pd_indicate_farewells(struct oz_pd *pd)
-{
- struct oz_farewell *f;
- const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB];
-
- while (1) {
- spin_lock_bh(&g_polling_lock);
- if (list_empty(&pd->farewell_list)) {
- spin_unlock_bh(&g_polling_lock);
- break;
- }
- f = list_first_entry(&pd->farewell_list,
- struct oz_farewell, link);
- list_del(&f->link);
- spin_unlock_bh(&g_polling_lock);
- if (ai->farewell)
- ai->farewell(pd, f->ep_num, f->report, f->len);
- kfree(f);
- }
-}
diff --git a/drivers/staging/ozwpan/ozpd.h b/drivers/staging/ozwpan/ozpd.h
deleted file mode 100644
index 212fab0d807a..000000000000
--- a/drivers/staging/ozwpan/ozpd.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZPD_H_
-#define _OZPD_H_
-
-#include <linux/interrupt.h>
-#include "ozeltbuf.h"
-
-/* PD state
- */
-#define OZ_PD_S_IDLE 0x1
-#define OZ_PD_S_CONNECTED 0x2
-#define OZ_PD_S_SLEEP 0x4
-#define OZ_PD_S_STOPPED 0x8
-
-/* Timer event types.
- */
-#define OZ_TIMER_TOUT 1
-#define OZ_TIMER_HEARTBEAT 2
-#define OZ_TIMER_STOP 3
-
-/*
- *External spinlock variable
- */
-extern spinlock_t g_polling_lock;
-
-/* Data structure that hold information on a frame for transmisson. This is
- * built when the frame is first transmitted and is used to rebuild the frame
- * if a re-transmission is required.
- */
-struct oz_tx_frame {
- struct list_head link;
- struct list_head elt_list;
- struct oz_hdr hdr;
- struct sk_buff *skb;
- int total_size;
-};
-
-struct oz_isoc_stream {
- struct list_head link;
- u8 ep_num;
- u8 frame_num;
- u8 nb_units;
- int size;
- struct sk_buff *skb;
- struct oz_hdr *oz_hdr;
-};
-
-struct oz_farewell {
- struct list_head link;
- u8 ep_num;
- u8 index;
- u8 len;
- u8 report[0];
-};
-
-/* Data structure that holds information on a specific peripheral device (PD).
- */
-struct oz_pd {
- struct list_head link;
- atomic_t ref_count;
- u8 mac_addr[ETH_ALEN];
- unsigned state;
- unsigned state_flags;
- unsigned send_flags;
- u16 total_apps;
- u16 paused_apps;
- u8 session_id;
- u8 param_rsp_status;
- u8 pd_info;
- u8 isoc_sent;
- u32 last_rx_pkt_num;
- u32 last_tx_pkt_num;
- struct timespec last_rx_timestamp;
- u32 trigger_pkt_num;
- unsigned long pulse_time;
- unsigned long pulse_period;
- unsigned long presleep;
- unsigned long keep_alive;
- struct oz_elt_buf elt_buff;
- void *app_ctx[OZ_NB_APPS];
- spinlock_t app_lock[OZ_NB_APPS];
- int max_tx_size;
- u8 mode;
- u8 ms_per_isoc;
- unsigned isoc_latency;
- unsigned max_stream_buffering;
- int nb_queued_frames;
- int nb_queued_isoc_frames;
- spinlock_t tx_frame_lock;
- struct list_head *last_sent_frame;
- struct list_head tx_queue;
- struct list_head farewell_list;
- spinlock_t stream_lock;
- struct list_head stream_list;
- struct net_device *net_dev;
- struct hrtimer heartbeat;
- struct hrtimer timeout;
- u8 timeout_type;
- struct tasklet_struct heartbeat_tasklet;
- struct tasklet_struct timeout_tasklet;
- struct work_struct workitem;
-};
-
-#define OZ_MAX_QUEUED_FRAMES 4
-
-struct oz_pd *oz_pd_alloc(const u8 *mac_addr);
-void oz_pd_destroy(struct oz_pd *pd);
-void oz_pd_get(struct oz_pd *pd);
-void oz_pd_put(struct oz_pd *pd);
-void oz_pd_set_state(struct oz_pd *pd, unsigned state);
-void oz_pd_indicate_farewells(struct oz_pd *pd);
-int oz_pd_sleep(struct oz_pd *pd);
-void oz_pd_stop(struct oz_pd *pd);
-void oz_pd_heartbeat(struct oz_pd *pd, u16 apps);
-int oz_services_start(struct oz_pd *pd, u16 apps, int resume);
-void oz_services_stop(struct oz_pd *pd, u16 apps, int pause);
-int oz_prepare_frame(struct oz_pd *pd, int empty);
-void oz_send_queued_frames(struct oz_pd *pd, int backlog);
-void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn);
-int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num);
-int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num);
-int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len);
-void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt);
-void oz_apps_init(void);
-void oz_apps_term(void);
-
-extern struct kmem_cache *oz_elt_info_cache;
-extern struct kmem_cache *oz_tx_frame_cache;
-
-#endif /* Sentry */
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c
deleted file mode 100644
index 1ba24a2aef83..000000000000
--- a/drivers/staging/ozwpan/ozproto.c
+++ /dev/null
@@ -1,813 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-
-#include <linux/module.h>
-#include <linux/timer.h>
-#include <linux/sched.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/errno.h>
-#include <linux/ieee80211.h>
-#include <linux/slab.h>
-#include "ozdbg.h"
-#include "ozprotocol.h"
-#include "ozeltbuf.h"
-#include "ozpd.h"
-#include "ozproto.h"
-#include "ozusbsvc.h"
-
-#include "ozappif.h"
-#include <asm/unaligned.h>
-#include <linux/uaccess.h>
-#include <net/psnap.h>
-
-#define OZ_CF_CONN_SUCCESS 1
-#define OZ_CF_CONN_FAILURE 2
-
-#define OZ_DO_STOP 1
-#define OZ_DO_SLEEP 2
-
-struct oz_binding {
- struct packet_type ptype;
- char name[OZ_MAX_BINDING_LEN];
- struct list_head link;
-};
-
-/*
- * External variable
- */
-
-DEFINE_SPINLOCK(g_polling_lock);
-/*
- * Static external variables.
- */
-static LIST_HEAD(g_pd_list);
-static LIST_HEAD(g_binding);
-static DEFINE_SPINLOCK(g_binding_lock);
-static struct sk_buff_head g_rx_queue;
-static u8 g_session_id;
-static u16 g_apps = 0x1;
-static int g_processing_rx;
-
-struct kmem_cache *oz_elt_info_cache;
-struct kmem_cache *oz_tx_frame_cache;
-
-/*
- * Context: softirq-serialized
- */
-static u8 oz_get_new_session_id(u8 exclude)
-{
- if (++g_session_id == 0)
- g_session_id = 1;
- if (g_session_id == exclude) {
- if (++g_session_id == 0)
- g_session_id = 1;
- }
- return g_session_id;
-}
-
-/*
- * Context: softirq-serialized
- */
-static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
-{
- struct sk_buff *skb;
- struct net_device *dev = pd->net_dev;
- struct oz_hdr *oz_hdr;
- struct oz_elt *elt;
- struct oz_elt_connect_rsp *body;
-
- int sz = sizeof(struct oz_hdr) + sizeof(struct oz_elt) +
- sizeof(struct oz_elt_connect_rsp);
- skb = alloc_skb(sz + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
- if (skb == NULL)
- return;
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
- skb_reset_network_header(skb);
- oz_hdr = (struct oz_hdr *)skb_put(skb, sz);
- elt = (struct oz_elt *)(oz_hdr+1);
- body = (struct oz_elt_connect_rsp *)(elt+1);
- skb->dev = dev;
- skb->protocol = htons(OZ_ETHERTYPE);
- /* Fill in device header */
- if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
- dev->dev_addr, skb->len) < 0) {
- kfree_skb(skb);
- return;
- }
- oz_hdr->control = OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT;
- oz_hdr->last_pkt_num = 0;
- put_unaligned(0, &oz_hdr->pkt_num);
- elt->type = OZ_ELT_CONNECT_RSP;
- elt->length = sizeof(struct oz_elt_connect_rsp);
- memset(body, 0, sizeof(struct oz_elt_connect_rsp));
- body->status = status;
- if (status == 0) {
- body->mode = pd->mode;
- body->session_id = pd->session_id;
- put_unaligned(cpu_to_le16(pd->total_apps), &body->apps);
- }
- oz_dbg(ON, "TX: OZ_ELT_CONNECT_RSP %d", status);
- dev_queue_xmit(skb);
-}
-
-/*
- * Context: softirq-serialized
- */
-static void pd_set_keepalive(struct oz_pd *pd, u8 kalive)
-{
- unsigned long keep_alive = kalive & OZ_KALIVE_VALUE_MASK;
-
- switch (kalive & OZ_KALIVE_TYPE_MASK) {
- case OZ_KALIVE_SPECIAL:
- pd->keep_alive = keep_alive * 1000*60*60*24*20;
- break;
- case OZ_KALIVE_SECS:
- pd->keep_alive = keep_alive*1000;
- break;
- case OZ_KALIVE_MINS:
- pd->keep_alive = keep_alive*1000*60;
- break;
- case OZ_KALIVE_HOURS:
- pd->keep_alive = keep_alive*1000*60*60;
- break;
- default:
- pd->keep_alive = 0;
- }
- oz_dbg(ON, "Keepalive = %lu mSec\n", pd->keep_alive);
-}
-
-/*
- * Context: softirq-serialized
- */
-static void pd_set_presleep(struct oz_pd *pd, u8 presleep, u8 start_timer)
-{
- if (presleep)
- pd->presleep = presleep*100;
- else
- pd->presleep = OZ_PRESLEEP_TOUT;
- if (start_timer) {
- spin_unlock(&g_polling_lock);
- oz_timer_add(pd, OZ_TIMER_TOUT, pd->presleep);
- spin_lock(&g_polling_lock);
- }
- oz_dbg(ON, "Presleep time = %lu mSec\n", pd->presleep);
-}
-
-/*
- * Context: softirq-serialized
- */
-static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
- const u8 *pd_addr, struct net_device *net_dev)
-{
- struct oz_pd *pd;
- struct oz_elt_connect_req *body =
- (struct oz_elt_connect_req *)(elt+1);
- u8 rsp_status = OZ_STATUS_SUCCESS;
- u8 stop_needed = 0;
- u16 new_apps = g_apps;
- struct net_device *old_net_dev = NULL;
- struct oz_pd *free_pd = NULL;
-
- if (cur_pd) {
- pd = cur_pd;
- spin_lock_bh(&g_polling_lock);
- } else {
- struct oz_pd *pd2 = NULL;
- struct list_head *e;
-
- pd = oz_pd_alloc(pd_addr);
- if (pd == NULL)
- return NULL;
- getnstimeofday(&pd->last_rx_timestamp);
- spin_lock_bh(&g_polling_lock);
- list_for_each(e, &g_pd_list) {
- pd2 = list_entry(e, struct oz_pd, link);
- if (ether_addr_equal(pd2->mac_addr, pd_addr)) {
- free_pd = pd;
- pd = pd2;
- break;
- }
- }
- if (pd != pd2)
- list_add_tail(&pd->link, &g_pd_list);
- }
- if (pd == NULL) {
- spin_unlock_bh(&g_polling_lock);
- return NULL;
- }
- if (pd->net_dev != net_dev) {
- old_net_dev = pd->net_dev;
- dev_hold(net_dev);
- pd->net_dev = net_dev;
- }
- oz_dbg(ON, "Host vendor: %d\n", body->host_vendor);
- pd->max_tx_size = OZ_MAX_TX_SIZE;
- pd->mode = body->mode;
- pd->pd_info = body->pd_info;
- if (pd->mode & OZ_F_ISOC_NO_ELTS) {
- pd->ms_per_isoc = body->ms_per_isoc;
- if (!pd->ms_per_isoc)
- pd->ms_per_isoc = 4;
-
- switch (body->ms_isoc_latency & OZ_LATENCY_MASK) {
- case OZ_ONE_MS_LATENCY:
- pd->isoc_latency = (body->ms_isoc_latency &
- ~OZ_LATENCY_MASK) / pd->ms_per_isoc;
- break;
- case OZ_TEN_MS_LATENCY:
- pd->isoc_latency = ((body->ms_isoc_latency &
- ~OZ_LATENCY_MASK) * 10) / pd->ms_per_isoc;
- break;
- default:
- pd->isoc_latency = OZ_MAX_TX_QUEUE_ISOC;
- }
- }
- if (body->max_len_div16)
- pd->max_tx_size = ((u16)body->max_len_div16)<<4;
- oz_dbg(ON, "Max frame:%u Ms per isoc:%u\n",
- pd->max_tx_size, pd->ms_per_isoc);
- pd->max_stream_buffering = 3*1024;
- pd->pulse_period = OZ_QUANTUM;
- pd_set_presleep(pd, body->presleep, 0);
- pd_set_keepalive(pd, body->keep_alive);
-
- new_apps &= le16_to_cpu(get_unaligned(&body->apps));
- if ((new_apps & 0x1) && (body->session_id)) {
- if (pd->session_id) {
- if (pd->session_id != body->session_id) {
- rsp_status = OZ_STATUS_SESSION_MISMATCH;
- goto done;
- }
- } else {
- new_apps &= ~0x1; /* Resume not permitted */
- pd->session_id =
- oz_get_new_session_id(body->session_id);
- }
- } else {
- if (pd->session_id && !body->session_id) {
- rsp_status = OZ_STATUS_SESSION_TEARDOWN;
- stop_needed = 1;
- } else {
- new_apps &= ~0x1; /* Resume not permitted */
- pd->session_id =
- oz_get_new_session_id(body->session_id);
- }
- }
-done:
- if (rsp_status == OZ_STATUS_SUCCESS) {
- u16 start_apps = new_apps & ~pd->total_apps & ~0x1;
- u16 stop_apps = pd->total_apps & ~new_apps & ~0x1;
- u16 resume_apps = new_apps & pd->paused_apps & ~0x1;
-
- spin_unlock_bh(&g_polling_lock);
- oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
- oz_dbg(ON, "new_apps=0x%x total_apps=0x%x paused_apps=0x%x\n",
- new_apps, pd->total_apps, pd->paused_apps);
- if (start_apps) {
- if (oz_services_start(pd, start_apps, 0))
- rsp_status = OZ_STATUS_TOO_MANY_PDS;
- }
- if (resume_apps)
- if (oz_services_start(pd, resume_apps, 1))
- rsp_status = OZ_STATUS_TOO_MANY_PDS;
- if (stop_apps)
- oz_services_stop(pd, stop_apps, 0);
- oz_pd_request_heartbeat(pd);
- } else {
- spin_unlock_bh(&g_polling_lock);
- }
- oz_send_conn_rsp(pd, rsp_status);
- if (rsp_status != OZ_STATUS_SUCCESS) {
- if (stop_needed)
- oz_pd_stop(pd);
- oz_pd_put(pd);
- pd = NULL;
- }
- if (old_net_dev)
- dev_put(old_net_dev);
- if (free_pd)
- oz_pd_destroy(free_pd);
- return pd;
-}
-
-/*
- * Context: softirq-serialized
- */
-static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index,
- const u8 *report, u8 len)
-{
- struct oz_farewell *f;
- struct oz_farewell *f2;
- int found = 0;
-
- f = kmalloc(sizeof(struct oz_farewell) + len, GFP_ATOMIC);
- if (!f)
- return;
- f->ep_num = ep_num;
- f->index = index;
- f->len = len;
- memcpy(f->report, report, len);
- oz_dbg(ON, "RX: Adding farewell report\n");
- spin_lock(&g_polling_lock);
- list_for_each_entry(f2, &pd->farewell_list, link) {
- if ((f2->ep_num == ep_num) && (f2->index == index)) {
- found = 1;
- list_del(&f2->link);
- break;
- }
- }
- list_add_tail(&f->link, &pd->farewell_list);
- spin_unlock(&g_polling_lock);
- if (found)
- kfree(f2);
-}
-
-/*
- * Context: softirq-serialized
- */
-static void oz_rx_frame(struct sk_buff *skb)
-{
- u8 *mac_hdr;
- u8 *src_addr;
- struct oz_elt *elt;
- int length;
- struct oz_pd *pd = NULL;
- struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
- struct timespec current_time;
- int dup = 0;
- u32 pkt_num;
-
- oz_dbg(RX_FRAMES, "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
- oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control);
- mac_hdr = skb_mac_header(skb);
- src_addr = &mac_hdr[ETH_ALEN];
- length = skb->len;
-
- /* Check the version field */
- if (oz_get_prot_ver(oz_hdr->control) != OZ_PROTOCOL_VERSION) {
- oz_dbg(ON, "Incorrect protocol version: %d\n",
- oz_get_prot_ver(oz_hdr->control));
- goto done;
- }
-
- pkt_num = le32_to_cpu(get_unaligned(&oz_hdr->pkt_num));
-
- pd = oz_pd_find(src_addr);
- if (pd) {
- if (!(pd->state & OZ_PD_S_CONNECTED))
- oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
- getnstimeofday(&current_time);
- if ((current_time.tv_sec != pd->last_rx_timestamp.tv_sec) ||
- (pd->presleep < MSEC_PER_SEC)) {
- oz_timer_add(pd, OZ_TIMER_TOUT, pd->presleep);
- pd->last_rx_timestamp = current_time;
- }
- if (pkt_num != pd->last_rx_pkt_num) {
- pd->last_rx_pkt_num = pkt_num;
- } else {
- dup = 1;
- oz_dbg(ON, "Duplicate frame\n");
- }
- }
-
- if (pd && !dup && ((pd->mode & OZ_MODE_MASK) == OZ_MODE_TRIGGERED)) {
- oz_dbg(RX_FRAMES, "Received TRIGGER Frame\n");
- pd->last_sent_frame = &pd->tx_queue;
- if (oz_hdr->control & OZ_F_ACK) {
- /* Retire completed frames */
- oz_retire_tx_frames(pd, oz_hdr->last_pkt_num);
- }
- if ((oz_hdr->control & OZ_F_ACK_REQUESTED) &&
- (pd->state == OZ_PD_S_CONNECTED)) {
- int backlog = pd->nb_queued_frames;
-
- pd->trigger_pkt_num = pkt_num;
- /* Send queued frames */
- oz_send_queued_frames(pd, backlog);
- }
- }
-
- length -= sizeof(struct oz_hdr);
- elt = (struct oz_elt *)((u8 *)oz_hdr + sizeof(struct oz_hdr));
-
- while (length >= sizeof(struct oz_elt)) {
- length -= sizeof(struct oz_elt) + elt->length;
- if (length < 0)
- break;
- switch (elt->type) {
- case OZ_ELT_CONNECT_REQ:
- oz_dbg(ON, "RX: OZ_ELT_CONNECT_REQ\n");
- pd = oz_connect_req(pd, elt, src_addr, skb->dev);
- break;
- case OZ_ELT_DISCONNECT:
- oz_dbg(ON, "RX: OZ_ELT_DISCONNECT\n");
- if (pd)
- oz_pd_sleep(pd);
- break;
- case OZ_ELT_UPDATE_PARAM_REQ: {
- struct oz_elt_update_param *body =
- (struct oz_elt_update_param *)(elt + 1);
- oz_dbg(ON, "RX: OZ_ELT_UPDATE_PARAM_REQ\n");
- if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
- spin_lock(&g_polling_lock);
- pd_set_keepalive(pd, body->keepalive);
- pd_set_presleep(pd, body->presleep, 1);
- spin_unlock(&g_polling_lock);
- }
- }
- break;
- case OZ_ELT_FAREWELL_REQ: {
- struct oz_elt_farewell *body =
- (struct oz_elt_farewell *)(elt + 1);
- oz_dbg(ON, "RX: OZ_ELT_FAREWELL_REQ\n");
- oz_add_farewell(pd, body->ep_num,
- body->index, body->report,
- elt->length + 1 - sizeof(*body));
- }
- break;
- case OZ_ELT_APP_DATA:
- if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
- struct oz_app_hdr *app_hdr =
- (struct oz_app_hdr *)(elt+1);
- if (dup)
- break;
- oz_handle_app_elt(pd, app_hdr->app_id, elt);
- }
- break;
- default:
- oz_dbg(ON, "RX: Unknown elt %02x\n", elt->type);
- }
- elt = oz_next_elt(elt);
- }
-done:
- if (pd)
- oz_pd_put(pd);
- consume_skb(skb);
-}
-
-/*
- * Context: process
- */
-void oz_protocol_term(void)
-{
- struct oz_binding *b, *t;
-
- /* Walk the list of bindings and remove each one.
- */
- spin_lock_bh(&g_binding_lock);
- list_for_each_entry_safe(b, t, &g_binding, link) {
- list_del(&b->link);
- spin_unlock_bh(&g_binding_lock);
- dev_remove_pack(&b->ptype);
- if (b->ptype.dev)
- dev_put(b->ptype.dev);
- kfree(b);
- spin_lock_bh(&g_binding_lock);
- }
- spin_unlock_bh(&g_binding_lock);
- /* Walk the list of PDs and stop each one. This causes the PD to be
- * removed from the list so we can just pull each one from the head
- * of the list.
- */
- spin_lock_bh(&g_polling_lock);
- while (!list_empty(&g_pd_list)) {
- struct oz_pd *pd =
- list_first_entry(&g_pd_list, struct oz_pd, link);
- oz_pd_get(pd);
- spin_unlock_bh(&g_polling_lock);
- oz_pd_stop(pd);
- oz_pd_put(pd);
- spin_lock_bh(&g_polling_lock);
- }
- spin_unlock_bh(&g_polling_lock);
- oz_dbg(ON, "Protocol stopped\n");
-
- kmem_cache_destroy(oz_tx_frame_cache);
- kmem_cache_destroy(oz_elt_info_cache);
-}
-
-/*
- * Context: softirq
- */
-void oz_pd_heartbeat_handler(unsigned long data)
-{
- struct oz_pd *pd = (struct oz_pd *)data;
- u16 apps = 0;
-
- spin_lock_bh(&g_polling_lock);
- if (pd->state & OZ_PD_S_CONNECTED)
- apps = pd->total_apps;
- spin_unlock_bh(&g_polling_lock);
- if (apps)
- oz_pd_heartbeat(pd, apps);
- oz_pd_put(pd);
-}
-
-/*
- * Context: softirq
- */
-void oz_pd_timeout_handler(unsigned long data)
-{
- int type;
- struct oz_pd *pd = (struct oz_pd *)data;
-
- spin_lock_bh(&g_polling_lock);
- type = pd->timeout_type;
- spin_unlock_bh(&g_polling_lock);
- switch (type) {
- case OZ_TIMER_TOUT:
- oz_pd_sleep(pd);
- break;
- case OZ_TIMER_STOP:
- oz_pd_stop(pd);
- break;
- }
- oz_pd_put(pd);
-}
-
-/*
- * Context: Interrupt
- */
-enum hrtimer_restart oz_pd_heartbeat_event(struct hrtimer *timer)
-{
- struct oz_pd *pd;
-
- pd = container_of(timer, struct oz_pd, heartbeat);
- hrtimer_forward_now(timer, ktime_set(pd->pulse_period /
- MSEC_PER_SEC, (pd->pulse_period % MSEC_PER_SEC) * NSEC_PER_MSEC));
- oz_pd_get(pd);
- tasklet_schedule(&pd->heartbeat_tasklet);
- return HRTIMER_RESTART;
-}
-
-/*
- * Context: Interrupt
- */
-enum hrtimer_restart oz_pd_timeout_event(struct hrtimer *timer)
-{
- struct oz_pd *pd;
-
- pd = container_of(timer, struct oz_pd, timeout);
- oz_pd_get(pd);
- tasklet_schedule(&pd->timeout_tasklet);
- return HRTIMER_NORESTART;
-}
-
-/*
- * Context: softirq or process
- */
-void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time)
-{
- spin_lock_bh(&g_polling_lock);
- switch (type) {
- case OZ_TIMER_TOUT:
- case OZ_TIMER_STOP:
- if (hrtimer_active(&pd->timeout)) {
- hrtimer_set_expires(&pd->timeout, ktime_set(due_time /
- MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
- NSEC_PER_MSEC));
- hrtimer_start_expires(&pd->timeout, HRTIMER_MODE_REL);
- } else {
- hrtimer_start(&pd->timeout, ktime_set(due_time /
- MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
- NSEC_PER_MSEC), HRTIMER_MODE_REL);
- }
- pd->timeout_type = type;
- break;
- case OZ_TIMER_HEARTBEAT:
- if (!hrtimer_active(&pd->heartbeat))
- hrtimer_start(&pd->heartbeat, ktime_set(due_time /
- MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
- NSEC_PER_MSEC), HRTIMER_MODE_REL);
- break;
- }
- spin_unlock_bh(&g_polling_lock);
-}
-
-/*
- * Context: softirq or process
- */
-void oz_pd_request_heartbeat(struct oz_pd *pd)
-{
- oz_timer_add(pd, OZ_TIMER_HEARTBEAT, pd->pulse_period > 0 ?
- pd->pulse_period : OZ_QUANTUM);
-}
-
-/*
- * Context: softirq or process
- */
-struct oz_pd *oz_pd_find(const u8 *mac_addr)
-{
- struct oz_pd *pd;
-
- spin_lock_bh(&g_polling_lock);
- list_for_each_entry(pd, &g_pd_list, link) {
- if (ether_addr_equal(pd->mac_addr, mac_addr)) {
- oz_pd_get(pd);
- spin_unlock_bh(&g_polling_lock);
- return pd;
- }
- }
- spin_unlock_bh(&g_polling_lock);
- return NULL;
-}
-
-/*
- * Context: process
- */
-void oz_app_enable(int app_id, int enable)
-{
- if (app_id < OZ_NB_APPS) {
- spin_lock_bh(&g_polling_lock);
- if (enable)
- g_apps |= (1<<app_id);
- else
- g_apps &= ~(1<<app_id);
- spin_unlock_bh(&g_polling_lock);
- }
-}
-
-/*
- * Context: softirq
- */
-static int oz_pkt_recv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev)
-{
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (skb == NULL)
- return 0;
- spin_lock_bh(&g_rx_queue.lock);
- if (g_processing_rx) {
- /* We already hold the lock so use __ variant.
- */
- __skb_queue_head(&g_rx_queue, skb);
- spin_unlock_bh(&g_rx_queue.lock);
- } else {
- g_processing_rx = 1;
- do {
-
- spin_unlock_bh(&g_rx_queue.lock);
- oz_rx_frame(skb);
- spin_lock_bh(&g_rx_queue.lock);
- if (skb_queue_empty(&g_rx_queue)) {
- g_processing_rx = 0;
- spin_unlock_bh(&g_rx_queue.lock);
- break;
- }
- /* We already hold the lock so use __ variant.
- */
- skb = __skb_dequeue(&g_rx_queue);
- } while (1);
- }
- return 0;
-}
-
-/*
- * Context: process
- */
-void oz_binding_add(const char *net_dev)
-{
- struct oz_binding *binding;
-
- binding = kzalloc(sizeof(struct oz_binding), GFP_KERNEL);
- if (!binding)
- return;
-
- binding->ptype.type = htons(OZ_ETHERTYPE);
- binding->ptype.func = oz_pkt_recv;
- if (net_dev && *net_dev) {
- memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
- oz_dbg(ON, "Adding binding: %s\n", net_dev);
- binding->ptype.dev = dev_get_by_name(&init_net, net_dev);
- if (binding->ptype.dev == NULL) {
- oz_dbg(ON, "Netdev %s not found\n", net_dev);
- kfree(binding);
- return;
- }
- }
- dev_add_pack(&binding->ptype);
- spin_lock_bh(&g_binding_lock);
- list_add_tail(&binding->link, &g_binding);
- spin_unlock_bh(&g_binding_lock);
-}
-
-/*
- * Context: process
- */
-static void pd_stop_all_for_device(struct net_device *net_dev)
-{
- LIST_HEAD(h);
- struct oz_pd *pd;
- struct oz_pd *n;
-
- spin_lock_bh(&g_polling_lock);
- list_for_each_entry_safe(pd, n, &g_pd_list, link) {
- if (pd->net_dev == net_dev) {
- list_move(&pd->link, &h);
- oz_pd_get(pd);
- }
- }
- spin_unlock_bh(&g_polling_lock);
- while (!list_empty(&h)) {
- pd = list_first_entry(&h, struct oz_pd, link);
- oz_pd_stop(pd);
- oz_pd_put(pd);
- }
-}
-
-/*
- * Context: process
- */
-void oz_binding_remove(const char *net_dev)
-{
- struct oz_binding *binding;
- int found = 0;
-
- oz_dbg(ON, "Removing binding: %s\n", net_dev);
- spin_lock_bh(&g_binding_lock);
- list_for_each_entry(binding, &g_binding, link) {
- if (strncmp(binding->name, net_dev, OZ_MAX_BINDING_LEN) == 0) {
- oz_dbg(ON, "Binding '%s' found\n", net_dev);
- found = 1;
- break;
- }
- }
- spin_unlock_bh(&g_binding_lock);
- if (found) {
- dev_remove_pack(&binding->ptype);
- if (binding->ptype.dev) {
- dev_put(binding->ptype.dev);
- pd_stop_all_for_device(binding->ptype.dev);
- }
- list_del(&binding->link);
- kfree(binding);
- }
-}
-
-/*
- * Context: process
- */
-static char *oz_get_next_device_name(char *s, char *dname, int max_size)
-{
- while (*s == ',')
- s++;
- while (*s && (*s != ',') && max_size > 1) {
- *dname++ = *s++;
- max_size--;
- }
- *dname = 0;
- return s;
-}
-
-/*
- * Context: process
- */
-int oz_protocol_init(char *devs)
-{
- oz_elt_info_cache = KMEM_CACHE(oz_elt_info, 0);
- if (!oz_elt_info_cache)
- return -ENOMEM;
-
- oz_tx_frame_cache = KMEM_CACHE(oz_tx_frame, 0);
- if (!oz_tx_frame_cache) {
- kmem_cache_destroy(oz_elt_info_cache);
- return -ENOMEM;
- }
-
- skb_queue_head_init(&g_rx_queue);
- if (devs[0] == '*') {
- oz_binding_add(NULL);
- } else {
- char d[32];
-
- while (*devs) {
- devs = oz_get_next_device_name(devs, d, sizeof(d));
- if (d[0])
- oz_binding_add(d);
- }
- }
- return 0;
-}
-
-/*
- * Context: process
- */
-int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
-{
- struct oz_pd *pd;
- int count = 0;
-
- spin_lock_bh(&g_polling_lock);
- list_for_each_entry(pd, &g_pd_list, link) {
- if (count >= max_count)
- break;
- ether_addr_copy((u8 *)&addr[count++], pd->mac_addr);
- }
- spin_unlock_bh(&g_polling_lock);
- return count;
-}
-
diff --git a/drivers/staging/ozwpan/ozproto.h b/drivers/staging/ozwpan/ozproto.h
deleted file mode 100644
index 30c2db91c000..000000000000
--- a/drivers/staging/ozwpan/ozproto.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZPROTO_H
-#define _OZPROTO_H
-
-#include <asm/byteorder.h>
-#include "ozdbg.h"
-#include "ozappif.h"
-
-#define OZ_ALLOCATED_SPACE(__x) (LL_RESERVED_SPACE(__x)+(__x)->needed_tailroom)
-
-/* Quantum in MS */
-#define OZ_QUANTUM 8
-/* Default timeouts.
- */
-#define OZ_PRESLEEP_TOUT 11
-
-/* Maximun sizes of tx frames. */
-#define OZ_MAX_TX_SIZE 760
-
-/* Maximum number of uncompleted isoc frames that can be pending in network. */
-#define OZ_MAX_SUBMITTED_ISOC 16
-
-/* Maximum number of uncompleted isoc frames that can be pending in Tx Queue. */
-#define OZ_MAX_TX_QUEUE_ISOC 32
-
-/* Application handler functions.
- */
-struct oz_app_if {
- int (*init)(void);
- void (*term)(void);
- int (*start)(struct oz_pd *pd, int resume);
- void (*stop)(struct oz_pd *pd, int pause);
- void (*rx)(struct oz_pd *pd, struct oz_elt *elt);
- int (*heartbeat)(struct oz_pd *pd);
- void (*farewell)(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len);
-};
-
-int oz_protocol_init(char *devs);
-void oz_protocol_term(void);
-int oz_get_pd_list(struct oz_mac_addr *addr, int max_count);
-void oz_app_enable(int app_id, int enable);
-struct oz_pd *oz_pd_find(const u8 *mac_addr);
-void oz_binding_add(const char *net_dev);
-void oz_binding_remove(const char *net_dev);
-void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time);
-void oz_timer_delete(struct oz_pd *pd, int type);
-void oz_pd_request_heartbeat(struct oz_pd *pd);
-void oz_pd_heartbeat_handler(unsigned long data);
-void oz_pd_timeout_handler(unsigned long data);
-enum hrtimer_restart oz_pd_heartbeat_event(struct hrtimer *timer);
-enum hrtimer_restart oz_pd_timeout_event(struct hrtimer *timer);
-int oz_get_pd_status_list(char *pd_list, int max_count);
-int oz_get_binding_list(char *buf, int max_if);
-
-extern struct kmem_cache *oz_elt_info_cache;
-extern struct kmem_cache *oz_tx_frame_cache;
-
-#endif /* _OZPROTO_H */
diff --git a/drivers/staging/ozwpan/ozprotocol.h b/drivers/staging/ozwpan/ozprotocol.h
deleted file mode 100644
index 4642072596da..000000000000
--- a/drivers/staging/ozwpan/ozprotocol.h
+++ /dev/null
@@ -1,375 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZPROTOCOL_H
-#define _OZPROTOCOL_H
-
-#define PACKED __packed
-
-#define OZ_ETHERTYPE 0x892e
-
-/* Status codes
- */
-#define OZ_STATUS_SUCCESS 0
-#define OZ_STATUS_INVALID_PARAM 1
-#define OZ_STATUS_TOO_MANY_PDS 2
-#define OZ_STATUS_NOT_ALLOWED 4
-#define OZ_STATUS_SESSION_MISMATCH 5
-#define OZ_STATUS_SESSION_TEARDOWN 6
-
-/* This is the generic element header.
- Every element starts with this.
- */
-struct oz_elt {
- u8 type;
- u8 length;
-} PACKED;
-
-#define oz_next_elt(__elt) \
- (struct oz_elt *)((u8 *)((__elt) + 1) + (__elt)->length)
-
-/* Protocol element IDs.
- */
-#define OZ_ELT_CONNECT_REQ 0x06
-#define OZ_ELT_CONNECT_RSP 0x07
-#define OZ_ELT_DISCONNECT 0x08
-#define OZ_ELT_UPDATE_PARAM_REQ 0x11
-#define OZ_ELT_FAREWELL_REQ 0x12
-#define OZ_ELT_APP_DATA 0x31
-
-/* This is the Ozmo header which is the first Ozmo specific part
- * of a frame and comes after the MAC header.
- */
-struct oz_hdr {
- u8 control;
- u8 last_pkt_num;
- u32 pkt_num;
-} PACKED;
-
-#define OZ_PROTOCOL_VERSION 0x1
-/* Bits in the control field. */
-#define OZ_VERSION_MASK 0xc
-#define OZ_VERSION_SHIFT 2
-#define OZ_F_ACK 0x10
-#define OZ_F_ISOC 0x20
-#define OZ_F_MORE_DATA 0x40
-#define OZ_F_ACK_REQUESTED 0x80
-
-#define oz_get_prot_ver(__x) (((__x) & OZ_VERSION_MASK) >> OZ_VERSION_SHIFT)
-
-/* Used to select the bits of packet number to put in the last_pkt_num.
- */
-#define OZ_LAST_PN_MASK 0x00ff
-
-#define OZ_LAST_PN_HALF_CYCLE 127
-
-#define OZ_LATENCY_MASK 0xc0
-#define OZ_ONE_MS_LATENCY 0x40
-#define OZ_TEN_MS_LATENCY 0x80
-
-/* Connect request data structure.
- */
-struct oz_elt_connect_req {
- u8 mode;
- u8 resv1[16];
- u8 pd_info;
- u8 session_id;
- u8 presleep;
- u8 ms_isoc_latency;
- u8 host_vendor;
- u8 keep_alive;
- u16 apps;
- u8 max_len_div16;
- u8 ms_per_isoc;
- u8 resv3[2];
-} PACKED;
-
-/* mode field bits.
- */
-#define OZ_MODE_POLLED 0x0
-#define OZ_MODE_TRIGGERED 0x1
-#define OZ_MODE_MASK 0xf
-#define OZ_F_ISOC_NO_ELTS 0x40
-#define OZ_F_ISOC_ANYTIME 0x80
-#define OZ_NO_ELTS_ANYTIME 0xc0
-
-/* Keep alive field.
- */
-#define OZ_KALIVE_TYPE_MASK 0xc0
-#define OZ_KALIVE_VALUE_MASK 0x3f
-#define OZ_KALIVE_SPECIAL 0x00
-#define OZ_KALIVE_SECS 0x40
-#define OZ_KALIVE_MINS 0x80
-#define OZ_KALIVE_HOURS 0xc0
-
-/* Connect response data structure.
- */
-struct oz_elt_connect_rsp {
- u8 mode;
- u8 status;
- u8 resv1[3];
- u8 session_id;
- u16 apps;
- u32 resv2;
-} PACKED;
-
-struct oz_elt_farewell {
- u8 ep_num;
- u8 index;
- u8 report[1];
-} PACKED;
-
-struct oz_elt_update_param {
- u8 resv1[16];
- u8 presleep;
- u8 resv2;
- u8 host_vendor;
- u8 keepalive;
-} PACKED;
-
-/* Header common to all application elements.
- */
-struct oz_app_hdr {
- u8 app_id;
- u8 elt_seq_num;
-} PACKED;
-
-/* Values for app_id.
- */
-#define OZ_APPID_USB 0x1
-#define OZ_APPID_SERIAL 0x4
-#define OZ_APPID_MAX OZ_APPID_SERIAL
-#define OZ_NB_APPS (OZ_APPID_MAX+1)
-
-/* USB header common to all elements for the USB application.
- * This header extends the oz_app_hdr and comes directly after
- * the element header in a USB application.
- */
-struct oz_usb_hdr {
- u8 app_id;
- u8 elt_seq_num;
- u8 type;
-} PACKED;
-
-
-
-/* USB requests element subtypes (type field of hs_usb_hdr).
- */
-#define OZ_GET_DESC_REQ 1
-#define OZ_GET_DESC_RSP 2
-#define OZ_SET_CONFIG_REQ 3
-#define OZ_SET_CONFIG_RSP 4
-#define OZ_SET_INTERFACE_REQ 5
-#define OZ_SET_INTERFACE_RSP 6
-#define OZ_VENDOR_CLASS_REQ 7
-#define OZ_VENDOR_CLASS_RSP 8
-#define OZ_GET_STATUS_REQ 9
-#define OZ_GET_STATUS_RSP 10
-#define OZ_CLEAR_FEATURE_REQ 11
-#define OZ_CLEAR_FEATURE_RSP 12
-#define OZ_SET_FEATURE_REQ 13
-#define OZ_SET_FEATURE_RSP 14
-#define OZ_GET_CONFIGURATION_REQ 15
-#define OZ_GET_CONFIGURATION_RSP 16
-#define OZ_GET_INTERFACE_REQ 17
-#define OZ_GET_INTERFACE_RSP 18
-#define OZ_SYNCH_FRAME_REQ 19
-#define OZ_SYNCH_FRAME_RSP 20
-#define OZ_USB_ENDPOINT_DATA 23
-
-#define OZ_REQD_D2H 0x80
-
-struct oz_get_desc_req {
- u8 app_id;
- u8 elt_seq_num;
- u8 type;
- u8 req_id;
- u16 offset;
- u16 size;
- u8 req_type;
- u8 desc_type;
- __le16 w_index;
- u8 index;
-} PACKED;
-
-/* Values for desc_type field.
-*/
-#define OZ_DESC_DEVICE 0x01
-#define OZ_DESC_CONFIG 0x02
-#define OZ_DESC_STRING 0x03
-
-/* Values for req_type field.
- */
-#define OZ_RECP_MASK 0x1F
-#define OZ_RECP_DEVICE 0x00
-#define OZ_RECP_INTERFACE 0x01
-#define OZ_RECP_ENDPOINT 0x02
-
-#define OZ_REQT_MASK 0x60
-#define OZ_REQT_STD 0x00
-#define OZ_REQT_CLASS 0x20
-#define OZ_REQT_VENDOR 0x40
-
-struct oz_get_desc_rsp {
- u8 app_id;
- u8 elt_seq_num;
- u8 type;
- u8 req_id;
- __le16 offset;
- __le16 total_size;
- u8 rcode;
- u8 data[1];
-} PACKED;
-
-struct oz_feature_req {
- u8 app_id;
- u8 elt_seq_num;
- u8 type;
- u8 req_id;
- u8 recipient;
- u8 index;
- u16 feature;
-} PACKED;
-
-struct oz_feature_rsp {
- u8 app_id;
- u8 elt_seq_num;
- u8 type;
- u8 req_id;
- u8 rcode;
-} PACKED;
-
-struct oz_set_config_req {
- u8 app_id;
- u8 elt_seq_num;
- u8 type;
- u8 req_id;
- u8 index;
-} PACKED;
-
-struct oz_set_config_rsp {
- u8 app_id;
- u8 elt_seq_num;
- u8 type;
- u8 req_id;
- u8 rcode;
-} PACKED;
-
-struct oz_set_interface_req {
- u8 app_id;
- u8 elt_seq_num;
- u8 type;
- u8 req_id;
- u8 index;
- u8 alternative;
-} PACKED;
-
-struct oz_set_interface_rsp {
- u8 app_id;
- u8 elt_seq_num;
- u8 type;
- u8 req_id;
- u8 rcode;
-} PACKED;
-
-struct oz_get_interface_req {
- u8 app_id;
- u8 elt_seq_num;
- u8 type;
- u8 req_id;
- u8 index;
-} PACKED;
-
-struct oz_get_interface_rsp {
- u8 app_id;
- u8 elt_seq_num;
- u8 type;
- u8 req_id;
- u8 rcode;
- u8 alternative;
-} PACKED;
-
-struct oz_vendor_class_req {
- u8 app_id;
- u8 elt_seq_num;
- u8 type;
- u8 req_id;
- u8 req_type;
- u8 request;
- u16 value;
- u16 index;
- u8 data[1];
-} PACKED;
-
-struct oz_vendor_class_rsp {
- u8 app_id;
- u8 elt_seq_num;
- u8 type;
- u8 req_id;
- u8 rcode;
- u8 data[1];
-} PACKED;
-
-struct oz_data {
- u8 app_id;
- u8 elt_seq_num;
- u8 type;
- u8 endpoint;
- u8 format;
-} PACKED;
-
-struct oz_isoc_fixed {
- u8 app_id;
- u8 elt_seq_num;
- u8 type;
- u8 endpoint;
- u8 format;
- u8 unit_size;
- u8 frame_number;
- u8 data[1];
-} PACKED;
-
-struct oz_multiple_fixed {
- u8 app_id;
- u8 elt_seq_num;
- u8 type;
- u8 endpoint;
- u8 format;
- u8 unit_size;
- u8 data[1];
-} PACKED;
-
-struct oz_fragmented {
- u8 app_id;
- u8 elt_seq_num;
- u8 type;
- u8 endpoint;
- u8 format;
- u16 total_size;
- u16 offset;
- u8 data[1];
-} PACKED;
-
-/* Note: the following does not get packaged in an element in the same way
- * that other data formats are packaged. Instead the data is put in a frame
- * directly after the oz_header and is the only permitted data in such a
- * frame. The length of the data is directly determined from the frame size.
- */
-struct oz_isoc_large {
- u8 endpoint;
- u8 format;
- u8 ms_data;
- u8 frame_number;
-} PACKED;
-
-#define OZ_DATA_F_TYPE_MASK 0xF
-#define OZ_DATA_F_MULTIPLE_FIXED 0x1
-#define OZ_DATA_F_MULTIPLE_VAR 0x2
-#define OZ_DATA_F_ISOC_FIXED 0x3
-#define OZ_DATA_F_ISOC_VAR 0x4
-#define OZ_DATA_F_FRAGMENTED 0x5
-#define OZ_DATA_F_ISOC_LARGE 0x7
-
-#endif /* _OZPROTOCOL_H */
diff --git a/drivers/staging/ozwpan/ozurbparanoia.c b/drivers/staging/ozwpan/ozurbparanoia.c
deleted file mode 100644
index cf6278a198a8..000000000000
--- a/drivers/staging/ozwpan/ozurbparanoia.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#include <linux/usb.h>
-#include "ozdbg.h"
-
-#ifdef WANT_URB_PARANOIA
-
-#include "ozurbparanoia.h"
-
-#define OZ_MAX_URBS 1000
-struct urb *g_urb_memory[OZ_MAX_URBS];
-int g_nb_urbs;
-DEFINE_SPINLOCK(g_urb_mem_lock);
-
-void oz_remember_urb(struct urb *urb)
-{
- unsigned long irq_state;
-
- spin_lock_irqsave(&g_urb_mem_lock, irq_state);
- if (g_nb_urbs < OZ_MAX_URBS) {
- g_urb_memory[g_nb_urbs++] = urb;
- oz_dbg(ON, "urb up = %d %p\n", g_nb_urbs, urb);
- } else {
- oz_dbg(ON, "ERROR urb buffer full\n");
- }
- spin_unlock_irqrestore(&g_urb_mem_lock, irq_state);
-}
-
-/*
- */
-int oz_forget_urb(struct urb *urb)
-{
- unsigned long irq_state;
- int i;
- int rc = -1;
-
- spin_lock_irqsave(&g_urb_mem_lock, irq_state);
- for (i = 0; i < g_nb_urbs; i++) {
- if (g_urb_memory[i] == urb) {
- rc = 0;
- if (--g_nb_urbs > i)
- memcpy(&g_urb_memory[i], &g_urb_memory[i+1],
- (g_nb_urbs - i) * sizeof(struct urb *));
- oz_dbg(ON, "urb down = %d %p\n", g_nb_urbs, urb);
- }
- }
- spin_unlock_irqrestore(&g_urb_mem_lock, irq_state);
- return rc;
-}
-#endif /* #ifdef WANT_URB_PARANOIA */
-
diff --git a/drivers/staging/ozwpan/ozurbparanoia.h b/drivers/staging/ozwpan/ozurbparanoia.h
deleted file mode 100644
index 5080ea76f507..000000000000
--- a/drivers/staging/ozwpan/ozurbparanoia.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef _OZURBPARANOIA_H
-#define _OZURBPARANOIA_H
-/* -----------------------------------------------------------------------------
- * Released under the GNU General Public License Version 2 (GPLv2).
- * Copyright (c) 2011 Ozmo Inc
- * -----------------------------------------------------------------------------
- */
-
-#ifdef WANT_URB_PARANOIA
-void oz_remember_urb(struct urb *urb);
-int oz_forget_urb(struct urb *urb);
-#else
-static inline void oz_remember_urb(struct urb *urb) {}
-static inline int oz_forget_urb(struct urb *urb) { return 0; }
-#endif /* WANT_URB_PARANOIA */
-
-
-#endif /* _OZURBPARANOIA_H */
-
diff --git a/drivers/staging/ozwpan/ozusbif.h b/drivers/staging/ozwpan/ozusbif.h
deleted file mode 100644
index d2a6085345be..000000000000
--- a/drivers/staging/ozwpan/ozusbif.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZUSBIF_H
-#define _OZUSBIF_H
-
-#include <linux/usb.h>
-
-/* Reference counting functions.
- */
-void oz_usb_get(void *hpd);
-void oz_usb_put(void *hpd);
-
-/* Stream functions.
- */
-int oz_usb_stream_create(void *hpd, u8 ep_num);
-int oz_usb_stream_delete(void *hpd, u8 ep_num);
-
-/* Request functions.
- */
-int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
- const u8 *data, int data_len);
-int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
- u8 index, __le16 windex, int offset, int len);
-int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb);
-void oz_usb_request_heartbeat(void *hpd);
-
-/* Confirmation functions.
- */
-void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status,
- const u8 *desc, u8 length, u16 offset, u16 total_size);
-void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode,
- const u8 *data, int data_len);
-
-/* Indication functions.
- */
-void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len);
-
-int oz_hcd_heartbeat(void *hport);
-
-#endif /* _OZUSBIF_H */
diff --git a/drivers/staging/ozwpan/ozusbsvc.c b/drivers/staging/ozwpan/ozusbsvc.c
deleted file mode 100644
index bf15dc301cb5..000000000000
--- a/drivers/staging/ozwpan/ozusbsvc.c
+++ /dev/null
@@ -1,263 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- *
- * This file provides protocol independent part of the implementation of the USB
- * service for a PD.
- * The implementation of this service is split into two parts the first of which
- * is protocol independent and the second contains protocol specific details.
- * This split is to allow alternative protocols to be defined.
- * The implementation of this service uses ozhcd.c to implement a USB HCD.
- * -----------------------------------------------------------------------------
- */
-
-#include <linux/module.h>
-#include <linux/timer.h>
-#include <linux/sched.h>
-#include <linux/netdevice.h>
-#include <linux/errno.h>
-#include <linux/input.h>
-#include <asm/unaligned.h>
-#include "ozdbg.h"
-#include "ozprotocol.h"
-#include "ozeltbuf.h"
-#include "ozpd.h"
-#include "ozproto.h"
-#include "ozusbif.h"
-#include "ozhcd.h"
-#include "ozusbsvc.h"
-
-/*
- * This is called once when the driver is loaded to initialise the USB service.
- * Context: process
- */
-int oz_usb_init(void)
-{
- return oz_hcd_init();
-}
-
-/*
- * This is called once when the driver is unloaded to terminate the USB service.
- * Context: process
- */
-void oz_usb_term(void)
-{
- oz_hcd_term();
-}
-
-/*
- * This is called when the USB service is started or resumed for a PD.
- * Context: softirq
- */
-int oz_usb_start(struct oz_pd *pd, int resume)
-{
- int rc = 0;
- struct oz_usb_ctx *usb_ctx;
- struct oz_usb_ctx *old_ctx;
-
- if (resume) {
- oz_dbg(ON, "USB service resumed\n");
- return 0;
- }
- oz_dbg(ON, "USB service started\n");
- /* Create a USB context in case we need one. If we find the PD already
- * has a USB context then we will destroy it.
- */
- usb_ctx = kzalloc(sizeof(struct oz_usb_ctx), GFP_ATOMIC);
- if (usb_ctx == NULL)
- return -ENOMEM;
- atomic_set(&usb_ctx->ref_count, 1);
- usb_ctx->pd = pd;
- usb_ctx->stopped = 0;
- /* Install the USB context if the PD doesn't already have one.
- * If it does already have one then destroy the one we have just
- * created.
- */
- spin_lock_bh(&pd->app_lock[OZ_APPID_USB]);
- old_ctx = pd->app_ctx[OZ_APPID_USB];
- if (old_ctx == NULL)
- pd->app_ctx[OZ_APPID_USB] = usb_ctx;
- oz_usb_get(pd->app_ctx[OZ_APPID_USB]);
- spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]);
- if (old_ctx) {
- oz_dbg(ON, "Already have USB context\n");
- kfree(usb_ctx);
- usb_ctx = old_ctx;
- } else if (usb_ctx) {
- /* Take a reference to the PD. This will be released when
- * the USB context is destroyed.
- */
- oz_pd_get(pd);
- }
- /* If we already had a USB context and had obtained a port from
- * the USB HCD then just reset the port. If we didn't have a port
- * then report the arrival to the USB HCD so we get one.
- */
- if (usb_ctx->hport) {
- oz_hcd_pd_reset(usb_ctx, usb_ctx->hport);
- } else {
- usb_ctx->hport = oz_hcd_pd_arrived(usb_ctx);
- if (usb_ctx->hport == NULL) {
- oz_dbg(ON, "USB hub returned null port\n");
- spin_lock_bh(&pd->app_lock[OZ_APPID_USB]);
- pd->app_ctx[OZ_APPID_USB] = NULL;
- spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]);
- oz_usb_put(usb_ctx);
- rc = -1;
- }
- }
- oz_usb_put(usb_ctx);
- return rc;
-}
-
-/*
- * This is called when the USB service is stopped or paused for a PD.
- * Context: softirq or process
- */
-void oz_usb_stop(struct oz_pd *pd, int pause)
-{
- struct oz_usb_ctx *usb_ctx;
-
- if (pause) {
- oz_dbg(ON, "USB service paused\n");
- return;
- }
- spin_lock_bh(&pd->app_lock[OZ_APPID_USB]);
- usb_ctx = (struct oz_usb_ctx *) pd->app_ctx[OZ_APPID_USB];
- pd->app_ctx[OZ_APPID_USB] = NULL;
- spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]);
- if (usb_ctx) {
- struct timespec ts, now;
-
- getnstimeofday(&ts);
- oz_dbg(ON, "USB service stopping...\n");
- usb_ctx->stopped = 1;
- /* At this point the reference count on the usb context should
- * be 2 - one from when we created it and one from the hcd
- * which claims a reference. Since stopped = 1 no one else
- * should get in but someone may already be in. So wait
- * until they leave but timeout after 1 second.
- */
- while ((atomic_read(&usb_ctx->ref_count) > 2)) {
- getnstimeofday(&now);
- /*Approx 1 Sec. this is not perfect calculation*/
- if (now.tv_sec != ts.tv_sec)
- break;
- }
- oz_dbg(ON, "USB service stopped\n");
- oz_hcd_pd_departed(usb_ctx->hport);
- /* Release the reference taken in oz_usb_start.
- */
- oz_usb_put(usb_ctx);
- }
-}
-
-/*
- * This increments the reference count of the context area for a specific PD.
- * This ensures this context area does not disappear while still in use.
- * Context: softirq
- */
-void oz_usb_get(void *hpd)
-{
- struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
-
- atomic_inc(&usb_ctx->ref_count);
-}
-
-/*
- * This decrements the reference count of the context area for a specific PD
- * and destroys the context area if the reference count becomes zero.
- * Context: irq or process
- */
-void oz_usb_put(void *hpd)
-{
- struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
-
- if (atomic_dec_and_test(&usb_ctx->ref_count)) {
- oz_dbg(ON, "Dealloc USB context\n");
- oz_pd_put(usb_ctx->pd);
- kfree(usb_ctx);
- }
-}
-
-/*
- * Context: softirq
- */
-int oz_usb_heartbeat(struct oz_pd *pd)
-{
- struct oz_usb_ctx *usb_ctx;
- int rc = 0;
-
- spin_lock_bh(&pd->app_lock[OZ_APPID_USB]);
- usb_ctx = (struct oz_usb_ctx *) pd->app_ctx[OZ_APPID_USB];
- if (usb_ctx)
- oz_usb_get(usb_ctx);
- spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]);
- if (usb_ctx == NULL)
- return rc;
- if (usb_ctx->stopped)
- goto done;
- if (usb_ctx->hport)
- if (oz_hcd_heartbeat(usb_ctx->hport))
- rc = 1;
-done:
- oz_usb_put(usb_ctx);
- return rc;
-}
-
-/*
- * Context: softirq
- */
-int oz_usb_stream_create(void *hpd, u8 ep_num)
-{
- struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
- struct oz_pd *pd = usb_ctx->pd;
-
- oz_dbg(ON, "%s: (0x%x)\n", __func__, ep_num);
- if (pd->mode & OZ_F_ISOC_NO_ELTS) {
- oz_isoc_stream_create(pd, ep_num);
- } else {
- oz_pd_get(pd);
- if (oz_elt_stream_create(&pd->elt_buff, ep_num,
- 4*pd->max_tx_size)) {
- oz_pd_put(pd);
- return -1;
- }
- }
- return 0;
-}
-
-/*
- * Context: softirq
- */
-int oz_usb_stream_delete(void *hpd, u8 ep_num)
-{
- struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
-
- if (usb_ctx) {
- struct oz_pd *pd = usb_ctx->pd;
-
- if (pd) {
- oz_dbg(ON, "%s: (0x%x)\n", __func__, ep_num);
- if (pd->mode & OZ_F_ISOC_NO_ELTS) {
- oz_isoc_stream_delete(pd, ep_num);
- } else {
- if (oz_elt_stream_delete(&pd->elt_buff, ep_num))
- return -1;
- oz_pd_put(pd);
- }
- }
- }
- return 0;
-}
-
-/*
- * Context: softirq or process
- */
-void oz_usb_request_heartbeat(void *hpd)
-{
- struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
-
- if (usb_ctx && usb_ctx->pd)
- oz_pd_request_heartbeat(usb_ctx->pd);
-}
diff --git a/drivers/staging/ozwpan/ozusbsvc.h b/drivers/staging/ozwpan/ozusbsvc.h
deleted file mode 100644
index 58e05a59be31..000000000000
--- a/drivers/staging/ozwpan/ozusbsvc.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZUSBSVC_H
-#define _OZUSBSVC_H
-
-/*------------------------------------------------------------------------------
- * Per PD context info stored in application context area of PD.
- * This object is reference counted to ensure it doesn't disappear while
- * still in use.
- */
-struct oz_usb_ctx {
- atomic_t ref_count;
- u8 tx_seq_num;
- u8 rx_seq_num;
- struct oz_pd *pd;
- void *hport;
- int stopped;
-};
-
-int oz_usb_init(void);
-void oz_usb_term(void);
-int oz_usb_start(struct oz_pd *pd, int resume);
-void oz_usb_stop(struct oz_pd *pd, int pause);
-void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt);
-int oz_usb_heartbeat(struct oz_pd *pd);
-void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len);
-
-#endif /* _OZUSBSVC_H */
-
diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
deleted file mode 100644
index 301fee8625ed..000000000000
--- a/drivers/staging/ozwpan/ozusbsvc1.c
+++ /dev/null
@@ -1,471 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- *
- * This file implements the protocol specific parts of the USB service for a PD.
- * -----------------------------------------------------------------------------
- */
-#include <linux/module.h>
-#include <linux/timer.h>
-#include <linux/sched.h>
-#include <linux/netdevice.h>
-#include <linux/errno.h>
-#include <linux/input.h>
-#include <asm/unaligned.h>
-#include "ozdbg.h"
-#include "ozprotocol.h"
-#include "ozeltbuf.h"
-#include "ozpd.h"
-#include "ozproto.h"
-#include "ozusbif.h"
-#include "ozhcd.h"
-#include "ozusbsvc.h"
-
-#define MAX_ISOC_FIXED_DATA (253-sizeof(struct oz_isoc_fixed))
-
-/*
- * Context: softirq
- */
-static int oz_usb_submit_elt(struct oz_elt_buf *eb, struct oz_elt_info *ei,
- struct oz_usb_ctx *usb_ctx, u8 strid, u8 isoc)
-{
- int ret;
- struct oz_elt *elt = (struct oz_elt *)ei->data;
- struct oz_app_hdr *app_hdr = (struct oz_app_hdr *)(elt+1);
-
- elt->type = OZ_ELT_APP_DATA;
- ei->app_id = OZ_APPID_USB;
- ei->length = elt->length + sizeof(struct oz_elt);
- app_hdr->app_id = OZ_APPID_USB;
- spin_lock_bh(&eb->lock);
- if (isoc == 0) {
- app_hdr->elt_seq_num = usb_ctx->tx_seq_num++;
- if (usb_ctx->tx_seq_num == 0)
- usb_ctx->tx_seq_num = 1;
- }
- ret = oz_queue_elt_info(eb, isoc, strid, ei);
- if (ret)
- oz_elt_info_free(eb, ei);
- spin_unlock_bh(&eb->lock);
- return ret;
-}
-
-/*
- * Context: softirq
- */
-int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
- u8 index, __le16 windex, int offset, int len)
-{
- struct oz_usb_ctx *usb_ctx = hpd;
- struct oz_pd *pd = usb_ctx->pd;
- struct oz_elt *elt;
- struct oz_get_desc_req *body;
- struct oz_elt_buf *eb = &pd->elt_buff;
- struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
-
- oz_dbg(ON, " req_type = 0x%x\n", req_type);
- oz_dbg(ON, " desc_type = 0x%x\n", desc_type);
- oz_dbg(ON, " index = 0x%x\n", index);
- oz_dbg(ON, " windex = 0x%x\n", windex);
- oz_dbg(ON, " offset = 0x%x\n", offset);
- oz_dbg(ON, " len = 0x%x\n", len);
- if (len > 200)
- len = 200;
- if (ei == NULL)
- return -1;
- elt = (struct oz_elt *)ei->data;
- elt->length = sizeof(struct oz_get_desc_req);
- body = (struct oz_get_desc_req *)(elt+1);
- body->type = OZ_GET_DESC_REQ;
- body->req_id = req_id;
- put_unaligned(cpu_to_le16(offset), &body->offset);
- put_unaligned(cpu_to_le16(len), &body->size);
- body->req_type = req_type;
- body->desc_type = desc_type;
- body->w_index = windex;
- body->index = index;
- return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
-}
-
-/*
- * Context: tasklet
- */
-static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index)
-{
- struct oz_usb_ctx *usb_ctx = hpd;
- struct oz_pd *pd = usb_ctx->pd;
- struct oz_elt *elt;
- struct oz_elt_buf *eb = &pd->elt_buff;
- struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
- struct oz_set_config_req *body;
-
- if (ei == NULL)
- return -1;
- elt = (struct oz_elt *)ei->data;
- elt->length = sizeof(struct oz_set_config_req);
- body = (struct oz_set_config_req *)(elt+1);
- body->type = OZ_SET_CONFIG_REQ;
- body->req_id = req_id;
- body->index = index;
- return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
-}
-
-/*
- * Context: tasklet
- */
-static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt)
-{
- struct oz_usb_ctx *usb_ctx = hpd;
- struct oz_pd *pd = usb_ctx->pd;
- struct oz_elt *elt;
- struct oz_elt_buf *eb = &pd->elt_buff;
- struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
- struct oz_set_interface_req *body;
-
- if (ei == NULL)
- return -1;
- elt = (struct oz_elt *)ei->data;
- elt->length = sizeof(struct oz_set_interface_req);
- body = (struct oz_set_interface_req *)(elt+1);
- body->type = OZ_SET_INTERFACE_REQ;
- body->req_id = req_id;
- body->index = index;
- body->alternative = alt;
- return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
-}
-
-/*
- * Context: tasklet
- */
-static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
- u8 recipient, u8 index, __le16 feature)
-{
- struct oz_usb_ctx *usb_ctx = hpd;
- struct oz_pd *pd = usb_ctx->pd;
- struct oz_elt *elt;
- struct oz_elt_buf *eb = &pd->elt_buff;
- struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
- struct oz_feature_req *body;
-
- if (ei == NULL)
- return -1;
- elt = (struct oz_elt *)ei->data;
- elt->length = sizeof(struct oz_feature_req);
- body = (struct oz_feature_req *)(elt+1);
- body->type = type;
- body->req_id = req_id;
- body->recipient = recipient;
- body->index = index;
- put_unaligned(feature, &body->feature);
- return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
-}
-
-/*
- * Context: tasklet
- */
-static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type,
- u8 request, __le16 value, __le16 index, const u8 *data, int data_len)
-{
- struct oz_usb_ctx *usb_ctx = hpd;
- struct oz_pd *pd = usb_ctx->pd;
- struct oz_elt *elt;
- struct oz_elt_buf *eb = &pd->elt_buff;
- struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
- struct oz_vendor_class_req *body;
-
- if (ei == NULL)
- return -1;
- elt = (struct oz_elt *)ei->data;
- elt->length = sizeof(struct oz_vendor_class_req) - 1 + data_len;
- body = (struct oz_vendor_class_req *)(elt+1);
- body->type = OZ_VENDOR_CLASS_REQ;
- body->req_id = req_id;
- body->req_type = req_type;
- body->request = request;
- put_unaligned(value, &body->value);
- put_unaligned(index, &body->index);
- if (data_len)
- memcpy(body->data, data, data_len);
- return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
-}
-
-/*
- * Context: tasklet
- */
-int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
- const u8 *data, int data_len)
-{
- unsigned wvalue = le16_to_cpu(setup->wValue);
- unsigned windex = le16_to_cpu(setup->wIndex);
- unsigned wlength = le16_to_cpu(setup->wLength);
- int rc = 0;
-
- if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
- switch (setup->bRequest) {
- case USB_REQ_GET_DESCRIPTOR:
- rc = oz_usb_get_desc_req(hpd, req_id,
- setup->bRequestType, (u8)(wvalue>>8),
- (u8)wvalue, setup->wIndex, 0, wlength);
- break;
- case USB_REQ_SET_CONFIGURATION:
- rc = oz_usb_set_config_req(hpd, req_id, (u8)wvalue);
- break;
- case USB_REQ_SET_INTERFACE: {
- u8 if_num = (u8)windex;
- u8 alt = (u8)wvalue;
-
- rc = oz_usb_set_interface_req(hpd, req_id,
- if_num, alt);
- }
- break;
- case USB_REQ_SET_FEATURE:
- rc = oz_usb_set_clear_feature_req(hpd, req_id,
- OZ_SET_FEATURE_REQ,
- setup->bRequestType & 0xf, (u8)windex,
- setup->wValue);
- break;
- case USB_REQ_CLEAR_FEATURE:
- rc = oz_usb_set_clear_feature_req(hpd, req_id,
- OZ_CLEAR_FEATURE_REQ,
- setup->bRequestType & 0xf,
- (u8)windex, setup->wValue);
- break;
- }
- } else {
- rc = oz_usb_vendor_class_req(hpd, req_id, setup->bRequestType,
- setup->bRequest, setup->wValue, setup->wIndex,
- data, data_len);
- }
- return rc;
-}
-
-/*
- * Context: softirq
- */
-int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb)
-{
- struct oz_usb_ctx *usb_ctx = hpd;
- struct oz_pd *pd = usb_ctx->pd;
- struct oz_elt_buf *eb;
- int i;
- int hdr_size;
- u8 *data;
- struct usb_iso_packet_descriptor *desc;
-
- if (pd->mode & OZ_F_ISOC_NO_ELTS) {
- for (i = 0; i < urb->number_of_packets; i++) {
- u8 *data;
-
- desc = &urb->iso_frame_desc[i];
- data = ((u8 *)urb->transfer_buffer)+desc->offset;
- oz_send_isoc_unit(pd, ep_num, data, desc->length);
- }
- return 0;
- }
-
- hdr_size = sizeof(struct oz_isoc_fixed) - 1;
- eb = &pd->elt_buff;
- i = 0;
- while (i < urb->number_of_packets) {
- struct oz_elt_info *ei = oz_elt_info_alloc(eb);
- struct oz_elt *elt;
- struct oz_isoc_fixed *body;
- int unit_count;
- int unit_size;
- int rem;
-
- if (ei == NULL)
- return -1;
- rem = MAX_ISOC_FIXED_DATA;
- elt = (struct oz_elt *)ei->data;
- body = (struct oz_isoc_fixed *)(elt + 1);
- body->type = OZ_USB_ENDPOINT_DATA;
- body->endpoint = ep_num;
- body->format = OZ_DATA_F_ISOC_FIXED;
- unit_size = urb->iso_frame_desc[i].length;
- body->unit_size = (u8)unit_size;
- data = ((u8 *)(elt+1)) + hdr_size;
- unit_count = 0;
- while (i < urb->number_of_packets) {
- desc = &urb->iso_frame_desc[i];
- if ((unit_size == desc->length) &&
- (desc->length <= rem)) {
- memcpy(data, ((u8 *)urb->transfer_buffer) +
- desc->offset, unit_size);
- data += unit_size;
- rem -= unit_size;
- unit_count++;
- desc->status = 0;
- desc->actual_length = desc->length;
- i++;
- } else {
- break;
- }
- }
- elt->length = hdr_size + MAX_ISOC_FIXED_DATA - rem;
- /* Store the number of units in body->frame_number for the
- * moment. This field will be correctly determined before
- * the element is sent. */
- body->frame_number = (u8)unit_count;
- oz_usb_submit_elt(eb, ei, usb_ctx, ep_num,
- pd->mode & OZ_F_ISOC_ANYTIME);
- }
- return 0;
-}
-
-/*
- * Context: softirq-serialized
- */
-static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
- struct oz_usb_hdr *usb_hdr, int len)
-{
- struct oz_data *data_hdr = (struct oz_data *)usb_hdr;
-
- switch (data_hdr->format) {
- case OZ_DATA_F_MULTIPLE_FIXED: {
- struct oz_multiple_fixed *body =
- (struct oz_multiple_fixed *)data_hdr;
- u8 *data = body->data;
- unsigned int n;
- if (!body->unit_size ||
- len < sizeof(struct oz_multiple_fixed) - 1)
- break;
- n = (len - (sizeof(struct oz_multiple_fixed) - 1))
- / body->unit_size;
- while (n--) {
- oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
- data, body->unit_size);
- data += body->unit_size;
- }
- }
- break;
- case OZ_DATA_F_ISOC_FIXED: {
- struct oz_isoc_fixed *body =
- (struct oz_isoc_fixed *)data_hdr;
- int data_len;
- int unit_size = body->unit_size;
- u8 *data = body->data;
- int count;
- int i;
-
- if (len < sizeof(struct oz_isoc_fixed) - 1)
- break;
- data_len = len - (sizeof(struct oz_isoc_fixed) - 1);
-
- if (!unit_size)
- break;
- count = data_len/unit_size;
- for (i = 0; i < count; i++) {
- oz_hcd_data_ind(usb_ctx->hport,
- body->endpoint, data, unit_size);
- data += unit_size;
- }
- }
- break;
- }
-
-}
-
-/*
- * This is called when the PD has received a USB element. The type of element
- * is determined and is then passed to an appropriate handler function.
- * Context: softirq-serialized
- */
-void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
-{
- struct oz_usb_hdr *usb_hdr = (struct oz_usb_hdr *)(elt + 1);
- struct oz_usb_ctx *usb_ctx;
-
- spin_lock_bh(&pd->app_lock[OZ_APPID_USB]);
- usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB];
- if (usb_ctx)
- oz_usb_get(usb_ctx);
- spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]);
- if (usb_ctx == NULL)
- return; /* Context has gone so nothing to do. */
- if (usb_ctx->stopped)
- goto done;
- /* If sequence number is non-zero then check it is not a duplicate.
- * Zero sequence numbers are always accepted.
- */
- if (usb_hdr->elt_seq_num != 0) {
- if (((usb_ctx->rx_seq_num - usb_hdr->elt_seq_num) & 0x80) == 0)
- /* Reject duplicate element. */
- goto done;
- }
- usb_ctx->rx_seq_num = usb_hdr->elt_seq_num;
- switch (usb_hdr->type) {
- case OZ_GET_DESC_RSP: {
- struct oz_get_desc_rsp *body =
- (struct oz_get_desc_rsp *)usb_hdr;
- u16 offs, total_size;
- u8 data_len;
-
- if (elt->length < sizeof(struct oz_get_desc_rsp) - 1)
- break;
- data_len = elt->length -
- (sizeof(struct oz_get_desc_rsp) - 1);
- offs = le16_to_cpu(get_unaligned(&body->offset));
- total_size =
- le16_to_cpu(get_unaligned(&body->total_size));
- oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
- oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
- body->rcode, body->data,
- data_len, offs, total_size);
- }
- break;
- case OZ_SET_CONFIG_RSP: {
- struct oz_set_config_rsp *body =
- (struct oz_set_config_rsp *)usb_hdr;
- oz_hcd_control_cnf(usb_ctx->hport, body->req_id,
- body->rcode, NULL, 0);
- }
- break;
- case OZ_SET_INTERFACE_RSP: {
- struct oz_set_interface_rsp *body =
- (struct oz_set_interface_rsp *)usb_hdr;
- oz_hcd_control_cnf(usb_ctx->hport,
- body->req_id, body->rcode, NULL, 0);
- }
- break;
- case OZ_VENDOR_CLASS_RSP: {
- struct oz_vendor_class_rsp *body =
- (struct oz_vendor_class_rsp *)usb_hdr;
-
- if (elt->length <
- sizeof(struct oz_vendor_class_rsp) - 1)
- break;
-
- oz_hcd_control_cnf(usb_ctx->hport, body->req_id,
- body->rcode, body->data, elt->length-
- sizeof(struct oz_vendor_class_rsp)+1);
- }
- break;
- case OZ_USB_ENDPOINT_DATA:
- oz_usb_handle_ep_data(usb_ctx, usb_hdr, elt->length);
- break;
- }
-done:
- oz_usb_put(usb_ctx);
-}
-
-/*
- * Context: softirq, process
- */
-void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len)
-{
- struct oz_usb_ctx *usb_ctx;
-
- spin_lock_bh(&pd->app_lock[OZ_APPID_USB]);
- usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB];
- if (usb_ctx)
- oz_usb_get(usb_ctx);
- spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]);
- if (usb_ctx == NULL)
- return; /* Context has gone so nothing to do. */
- if (!usb_ctx->stopped) {
- oz_dbg(ON, "Farewell indicated ep = 0x%x\n", ep_num);
- oz_hcd_data_ind(usb_ctx->hport, ep_num, data, len);
- }
- oz_usb_put(usb_ctx);
-}
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index bda208da514e..3e9ee7ee6be2 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -781,14 +781,18 @@ static void long_sleep(int ms)
schedule_timeout_interruptible(msecs_to_jiffies(ms));
}
-/* send a serial byte to the LCD panel. The caller is responsible for locking
- if needed. */
+/*
+ * send a serial byte to the LCD panel. The caller is responsible for locking
+ * if needed.
+ */
static void lcd_send_serial(int byte)
{
int bit;
- /* the data bit is set on D0, and the clock on STROBE.
- * LCD reads D0 on STROBE's rising edge. */
+ /*
+ * the data bit is set on D0, and the clock on STROBE.
+ * LCD reads D0 on STROBE's rising edge.
+ */
for (bit = 0; bit < 8; bit++) {
bits.cl = BIT_CLR; /* CLK low */
panel_set_bits();
diff --git a/drivers/staging/rdma/Kconfig b/drivers/staging/rdma/Kconfig
new file mode 100644
index 000000000000..cf5fe9bb87a1
--- /dev/null
+++ b/drivers/staging/rdma/Kconfig
@@ -0,0 +1,31 @@
+menuconfig STAGING_RDMA
+ bool "RDMA staging drivers"
+ depends on INFINIBAND
+ depends on PCI || BROKEN
+ depends on HAS_IOMEM
+ depends on NET
+ depends on INET
+ default n
+ ---help---
+ This option allows you to select a number of RDMA drivers that
+ fall into one of two categories: deprecated drivers being held
+ here before finally being removed or new drivers that still need
+ some work before being moved to the normal RDMA driver area.
+
+ If you wish to work on these drivers, to help improve them, or
+ to report problems you have with them, please use the
+ linux-rdma@vger.kernel.org mailing list.
+
+ If in doubt, say N here.
+
+
+# Please keep entries in alphabetic order
+if STAGING_RDMA
+
+source "drivers/staging/rdma/amso1100/Kconfig"
+
+source "drivers/staging/rdma/hfi1/Kconfig"
+
+source "drivers/staging/rdma/ipath/Kconfig"
+
+endif
diff --git a/drivers/staging/rdma/Makefile b/drivers/staging/rdma/Makefile
new file mode 100644
index 000000000000..cbd915ac7f20
--- /dev/null
+++ b/drivers/staging/rdma/Makefile
@@ -0,0 +1,4 @@
+# Entries for RDMA_STAGING tree
+obj-$(CONFIG_INFINIBAND_AMSO1100) += amso1100/
+obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
+obj-$(CONFIG_INFINIBAND_IPATH) += ipath/
diff --git a/drivers/infiniband/hw/amso1100/Kbuild b/drivers/staging/rdma/amso1100/Kbuild
index 950dfabcd89d..950dfabcd89d 100644
--- a/drivers/infiniband/hw/amso1100/Kbuild
+++ b/drivers/staging/rdma/amso1100/Kbuild
diff --git a/drivers/infiniband/hw/amso1100/Kconfig b/drivers/staging/rdma/amso1100/Kconfig
index e6ce5f209e47..e6ce5f209e47 100644
--- a/drivers/infiniband/hw/amso1100/Kconfig
+++ b/drivers/staging/rdma/amso1100/Kconfig
diff --git a/drivers/staging/rdma/amso1100/TODO b/drivers/staging/rdma/amso1100/TODO
new file mode 100644
index 000000000000..18b00a5cb549
--- /dev/null
+++ b/drivers/staging/rdma/amso1100/TODO
@@ -0,0 +1,4 @@
+7/2015
+
+The amso1100 driver has been deprecated and moved to drivers/staging.
+It will be removed in the 4.6 merge window.
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/staging/rdma/amso1100/c2.c
index 766a71ccefed..766a71ccefed 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/staging/rdma/amso1100/c2.c
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/staging/rdma/amso1100/c2.h
index d619d735838b..d619d735838b 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/staging/rdma/amso1100/c2.h
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/staging/rdma/amso1100/c2_ae.c
index cedda25232be..cedda25232be 100644
--- a/drivers/infiniband/hw/amso1100/c2_ae.c
+++ b/drivers/staging/rdma/amso1100/c2_ae.c
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.h b/drivers/staging/rdma/amso1100/c2_ae.h
index 3a065c33b83b..3a065c33b83b 100644
--- a/drivers/infiniband/hw/amso1100/c2_ae.h
+++ b/drivers/staging/rdma/amso1100/c2_ae.h
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/staging/rdma/amso1100/c2_alloc.c
index 78d247ec6961..78d247ec6961 100644
--- a/drivers/infiniband/hw/amso1100/c2_alloc.c
+++ b/drivers/staging/rdma/amso1100/c2_alloc.c
diff --git a/drivers/infiniband/hw/amso1100/c2_cm.c b/drivers/staging/rdma/amso1100/c2_cm.c
index 23bfa94fbd4e..23bfa94fbd4e 100644
--- a/drivers/infiniband/hw/amso1100/c2_cm.c
+++ b/drivers/staging/rdma/amso1100/c2_cm.c
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/staging/rdma/amso1100/c2_cq.c
index 1b63185b4ad4..1b63185b4ad4 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/staging/rdma/amso1100/c2_cq.c
diff --git a/drivers/infiniband/hw/amso1100/c2_intr.c b/drivers/staging/rdma/amso1100/c2_intr.c
index 3a17d9b36dba..3a17d9b36dba 100644
--- a/drivers/infiniband/hw/amso1100/c2_intr.c
+++ b/drivers/staging/rdma/amso1100/c2_intr.c
diff --git a/drivers/infiniband/hw/amso1100/c2_mm.c b/drivers/staging/rdma/amso1100/c2_mm.c
index 119c4f3d9791..119c4f3d9791 100644
--- a/drivers/infiniband/hw/amso1100/c2_mm.c
+++ b/drivers/staging/rdma/amso1100/c2_mm.c
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.c b/drivers/staging/rdma/amso1100/c2_mq.c
index 0cddc49beae1..0cddc49beae1 100644
--- a/drivers/infiniband/hw/amso1100/c2_mq.c
+++ b/drivers/staging/rdma/amso1100/c2_mq.c
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.h b/drivers/staging/rdma/amso1100/c2_mq.h
index fc1b9a7cec4b..fc1b9a7cec4b 100644
--- a/drivers/infiniband/hw/amso1100/c2_mq.h
+++ b/drivers/staging/rdma/amso1100/c2_mq.h
diff --git a/drivers/infiniband/hw/amso1100/c2_pd.c b/drivers/staging/rdma/amso1100/c2_pd.c
index f3e81dc357bb..f3e81dc357bb 100644
--- a/drivers/infiniband/hw/amso1100/c2_pd.c
+++ b/drivers/staging/rdma/amso1100/c2_pd.c
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/staging/rdma/amso1100/c2_provider.c
index 25c3f0085563..25c3f0085563 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/staging/rdma/amso1100/c2_provider.c
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.h b/drivers/staging/rdma/amso1100/c2_provider.h
index bf189987711f..bf189987711f 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.h
+++ b/drivers/staging/rdma/amso1100/c2_provider.h
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/staging/rdma/amso1100/c2_qp.c
index 86708dee58b1..86708dee58b1 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/staging/rdma/amso1100/c2_qp.c
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/staging/rdma/amso1100/c2_rnic.c
index d2a6d961344b..d2a6d961344b 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/staging/rdma/amso1100/c2_rnic.c
diff --git a/drivers/infiniband/hw/amso1100/c2_status.h b/drivers/staging/rdma/amso1100/c2_status.h
index 6ee4aa92d875..6ee4aa92d875 100644
--- a/drivers/infiniband/hw/amso1100/c2_status.h
+++ b/drivers/staging/rdma/amso1100/c2_status.h
diff --git a/drivers/infiniband/hw/amso1100/c2_user.h b/drivers/staging/rdma/amso1100/c2_user.h
index 7e9e7ad65467..7e9e7ad65467 100644
--- a/drivers/infiniband/hw/amso1100/c2_user.h
+++ b/drivers/staging/rdma/amso1100/c2_user.h
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.c b/drivers/staging/rdma/amso1100/c2_vq.c
index 2ec716fb2edb..2ec716fb2edb 100644
--- a/drivers/infiniband/hw/amso1100/c2_vq.c
+++ b/drivers/staging/rdma/amso1100/c2_vq.c
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.h b/drivers/staging/rdma/amso1100/c2_vq.h
index 33805627a607..33805627a607 100644
--- a/drivers/infiniband/hw/amso1100/c2_vq.h
+++ b/drivers/staging/rdma/amso1100/c2_vq.h
diff --git a/drivers/infiniband/hw/amso1100/c2_wr.h b/drivers/staging/rdma/amso1100/c2_wr.h
index 8d4b4ca463ca..8d4b4ca463ca 100644
--- a/drivers/infiniband/hw/amso1100/c2_wr.h
+++ b/drivers/staging/rdma/amso1100/c2_wr.h
diff --git a/drivers/staging/rdma/hfi1/Kconfig b/drivers/staging/rdma/hfi1/Kconfig
new file mode 100644
index 000000000000..fd25078ee923
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/Kconfig
@@ -0,0 +1,37 @@
+config INFINIBAND_HFI1
+ tristate "Intel OPA Gen1 support"
+ depends on X86_64
+ default m
+ ---help---
+ This is a low-level driver for Intel OPA Gen1 adapter.
+config HFI1_DEBUG_SDMA_ORDER
+ bool "HFI1 SDMA Order debug"
+ depends on INFINIBAND_HFI1
+ default n
+ ---help---
+ This is a debug flag to test for out of order
+ sdma completions for unit testing
+config HFI1_VERBS_31BIT_PSN
+ bool "HFI1 enable 31 bit PSN"
+ depends on INFINIBAND_HFI1
+ default y
+ ---help---
+ Setting this enables 31 BIT PSN
+ For verbs RC/UC
+config SDMA_VERBOSITY
+ bool "Config SDMA Verbosity"
+ depends on INFINIBAND_HFI1
+ default n
+ ---help---
+ This is a configuration flag to enable verbose
+ SDMA debug
+config PRESCAN_RXQ
+ bool "Enable prescanning of the RX queue for ECNs"
+ depends on INFINIBAND_HFI1
+ default n
+ ---help---
+ This option toggles the prescanning of the receive queue for
+ Explicit Congestion Notifications. If an ECN is detected, it
+ is processed as quickly as possible, the ECN is toggled off.
+ After the prescanning step, the receive queue is processed as
+ usual.
diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/staging/rdma/hfi1/Makefile
new file mode 100644
index 000000000000..2e5daa6cdcc2
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/Makefile
@@ -0,0 +1,19 @@
+#
+# HFI driver
+#
+#
+#
+# Called from the kernel module build system.
+#
+obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o
+
+hfi1-y := chip.o cq.o device.o diag.o dma.o driver.o eprom.o file_ops.o firmware.o \
+ init.o intr.o keys.o mad.o mmap.o mr.o pcie.o pio.o pio_copy.o \
+ qp.o qsfp.o rc.o ruc.o sdma.o srq.o sysfs.o trace.o twsi.o \
+ uc.o ud.o user_pages.o user_sdma.o verbs_mcast.o verbs.o
+hfi1-$(CONFIG_DEBUG_FS) += debugfs.o
+
+CFLAGS_trace.o = -I$(src)
+ifdef MVERSION
+CFLAGS_driver.o = -DHFI_DRIVER_VERSION_BASE=\"$(MVERSION)\"
+endif
diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
new file mode 100644
index 000000000000..05de0dad8762
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/TODO
@@ -0,0 +1,6 @@
+July, 2015
+
+- Remove unneeded file entries in sysfs
+- Remove software processing of IB protocol and place in library for use
+ by qib, ipath (if still present), hfi1, and eventually soft-roce
+
diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c
new file mode 100644
index 000000000000..654eafef1d30
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/chip.c
@@ -0,0 +1,10798 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file contains all of the code that is specific to the HFI chip
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
+#include "hfi.h"
+#include "trace.h"
+#include "mad.h"
+#include "pio.h"
+#include "sdma.h"
+#include "eprom.h"
+
+#define NUM_IB_PORTS 1
+
+uint kdeth_qp;
+module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
+MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
+
+uint num_vls = HFI1_MAX_VLS_SUPPORTED;
+module_param(num_vls, uint, S_IRUGO);
+MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
+
+/*
+ * Default time to aggregate two 10K packets from the idle state
+ * (timer not running). The timer starts at the end of the first packet,
+ * so only the time for one 10K packet and header plus a bit extra is needed.
+ * 10 * 1024 + 64 header byte = 10304 byte
+ * 10304 byte / 12.5 GB/s = 824.32ns
+ */
+uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
+module_param(rcv_intr_timeout, uint, S_IRUGO);
+MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
+
+uint rcv_intr_count = 16; /* same as qib */
+module_param(rcv_intr_count, uint, S_IRUGO);
+MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
+
+ushort link_crc_mask = SUPPORTED_CRCS;
+module_param(link_crc_mask, ushort, S_IRUGO);
+MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
+
+uint loopback;
+module_param_named(loopback, loopback, uint, S_IRUGO);
+MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
+
+/* Other driver tunables */
+uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
+static ushort crc_14b_sideband = 1;
+static uint use_flr = 1;
+uint quick_linkup; /* skip LNI */
+
+struct flag_table {
+ u64 flag; /* the flag */
+ char *str; /* description string */
+ u16 extra; /* extra information */
+ u16 unused0;
+ u32 unused1;
+};
+
+/* str must be a string constant */
+#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
+#define FLAG_ENTRY0(str, flag) {flag, str, 0}
+
+/* Send Error Consequences */
+#define SEC_WRITE_DROPPED 0x1
+#define SEC_PACKET_DROPPED 0x2
+#define SEC_SC_HALTED 0x4 /* per-context only */
+#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
+
+#define VL15CTXT 1
+#define MIN_KERNEL_KCTXTS 2
+#define NUM_MAP_REGS 32
+
+/* Bit offset into the GUID which carries HFI id information */
+#define GUID_HFI_INDEX_SHIFT 39
+
+/* extract the emulation revision */
+#define emulator_rev(dd) ((dd)->irev >> 8)
+/* parallel and serial emulation versions are 3 and 4 respectively */
+#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
+#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
+
+/* RSM fields */
+
+/* packet type */
+#define IB_PACKET_TYPE 2ull
+#define QW_SHIFT 6ull
+/* QPN[7..1] */
+#define QPN_WIDTH 7ull
+
+/* LRH.BTH: QW 0, OFFSET 48 - for match */
+#define LRH_BTH_QW 0ull
+#define LRH_BTH_BIT_OFFSET 48ull
+#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
+#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
+#define LRH_BTH_SELECT
+#define LRH_BTH_MASK 3ull
+#define LRH_BTH_VALUE 2ull
+
+/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
+#define LRH_SC_QW 0ull
+#define LRH_SC_BIT_OFFSET 56ull
+#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
+#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
+#define LRH_SC_MASK 128ull
+#define LRH_SC_VALUE 0ull
+
+/* SC[n..0] QW 0, OFFSET 60 - for select */
+#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
+
+/* QPN[m+n:1] QW 1, OFFSET 1 */
+#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
+
+/* defines to build power on SC2VL table */
+#define SC2VL_VAL( \
+ num, \
+ sc0, sc0val, \
+ sc1, sc1val, \
+ sc2, sc2val, \
+ sc3, sc3val, \
+ sc4, sc4val, \
+ sc5, sc5val, \
+ sc6, sc6val, \
+ sc7, sc7val) \
+( \
+ ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
+ ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
+ ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
+ ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
+ ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
+ ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
+ ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
+ ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
+)
+
+#define DC_SC_VL_VAL( \
+ range, \
+ e0, e0val, \
+ e1, e1val, \
+ e2, e2val, \
+ e3, e3val, \
+ e4, e4val, \
+ e5, e5val, \
+ e6, e6val, \
+ e7, e7val, \
+ e8, e8val, \
+ e9, e9val, \
+ e10, e10val, \
+ e11, e11val, \
+ e12, e12val, \
+ e13, e13val, \
+ e14, e14val, \
+ e15, e15val) \
+( \
+ ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
+ ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
+ ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
+ ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
+ ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
+ ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
+ ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
+ ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
+ ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
+ ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
+ ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
+ ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
+ ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
+ ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
+ ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
+ ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
+)
+
+/* all CceStatus sub-block freeze bits */
+#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
+ | CCE_STATUS_RXE_FROZE_SMASK \
+ | CCE_STATUS_TXE_FROZE_SMASK \
+ | CCE_STATUS_TXE_PIO_FROZE_SMASK)
+/* all CceStatus sub-block TXE pause bits */
+#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
+ | CCE_STATUS_TXE_PAUSED_SMASK \
+ | CCE_STATUS_SDMA_PAUSED_SMASK)
+/* all CceStatus sub-block RXE pause bits */
+#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
+
+/*
+ * CCE Error flags.
+ */
+static struct flag_table cce_err_status_flags[] = {
+/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
+ CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
+/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
+ CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
+/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
+ CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
+/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
+ CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
+/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
+ CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
+/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
+ CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
+/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
+ CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
+/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
+ CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
+/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
+ CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
+/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
+ CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
+/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
+ CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
+/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
+ CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
+/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
+ CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
+/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
+ CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
+/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
+ CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
+/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
+ CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
+/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
+ CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
+/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
+ CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
+/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
+ CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
+/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
+ CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
+/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
+ CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
+/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
+ CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
+/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
+ CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
+/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
+ CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
+/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
+ CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
+/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
+ CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
+/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
+ CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
+/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
+ CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
+/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
+ CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
+/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
+ CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
+/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
+ CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
+/*31*/ FLAG_ENTRY0("LATriggered",
+ CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
+/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
+ CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
+/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
+ CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
+/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
+ CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
+/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
+ CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
+/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
+ CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
+/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
+ CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
+/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
+ CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
+/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
+ CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
+/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
+ CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
+/*41-63 reserved*/
+};
+
+/*
+ * Misc Error flags
+ */
+#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
+static struct flag_table misc_err_status_flags[] = {
+/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
+/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
+/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
+/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
+/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
+/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
+/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
+/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
+/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
+/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
+/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
+/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
+/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
+};
+
+/*
+ * TXE PIO Error flags and consequences
+ */
+static struct flag_table pio_err_status_flags[] = {
+/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
+ SEC_WRITE_DROPPED,
+ SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
+/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
+/* 2*/ FLAG_ENTRY("PioCsrParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
+/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
+/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
+/* 5*/ FLAG_ENTRY("PioPccFifoParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
+/* 6*/ FLAG_ENTRY("PioPecFifoParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
+/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
+/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
+/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
+/*10*/ FLAG_ENTRY("PioSmPktResetParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
+/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
+/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
+/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
+ 0,
+ SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
+/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
+ 0,
+ SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
+/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
+/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
+/*17*/ FLAG_ENTRY("PioInitSmIn",
+ 0,
+ SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
+/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
+/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
+/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
+ 0,
+ SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
+/*21*/ FLAG_ENTRY("PioWriteDataParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
+/*22*/ FLAG_ENTRY("PioStateMachine",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
+/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
+ SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
+/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
+ SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
+/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
+/*26*/ FLAG_ENTRY("PioVlfSopParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
+/*27*/ FLAG_ENTRY("PioVlFifoParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
+/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
+/*29*/ FLAG_ENTRY("PioPpmcSopLen",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
+/*30-31 reserved*/
+/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
+/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
+/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
+/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
+ SEC_SPC_FREEZE,
+ SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
+/*36-63 reserved*/
+};
+
+/* TXE PIO errors that cause an SPC freeze */
+#define ALL_PIO_FREEZE_ERR \
+ (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
+ | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
+
+/*
+ * TXE SDMA Error flags
+ */
+static struct flag_table sdma_err_status_flags[] = {
+/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
+ SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
+/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
+ SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
+/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
+ SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
+/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
+ SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
+/*04-63 reserved*/
+};
+
+/* TXE SDMA errors that cause an SPC freeze */
+#define ALL_SDMA_FREEZE_ERR \
+ (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
+ | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
+ | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
+
+/*
+ * TXE Egress Error flags
+ */
+#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
+static struct flag_table egress_err_status_flags[] = {
+/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
+/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
+/* 2 reserved */
+/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
+ SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
+/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
+/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
+/* 6 reserved */
+/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
+ SEES(TX_PIO_LAUNCH_INTF_PARITY)),
+/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
+ SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
+/* 9-10 reserved */
+/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
+ SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
+/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
+/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
+/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
+/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
+/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
+ SEES(TX_SDMA0_DISALLOWED_PACKET)),
+/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
+ SEES(TX_SDMA1_DISALLOWED_PACKET)),
+/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
+ SEES(TX_SDMA2_DISALLOWED_PACKET)),
+/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
+ SEES(TX_SDMA3_DISALLOWED_PACKET)),
+/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
+ SEES(TX_SDMA4_DISALLOWED_PACKET)),
+/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
+ SEES(TX_SDMA5_DISALLOWED_PACKET)),
+/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
+ SEES(TX_SDMA6_DISALLOWED_PACKET)),
+/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
+ SEES(TX_SDMA7_DISALLOWED_PACKET)),
+/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
+ SEES(TX_SDMA8_DISALLOWED_PACKET)),
+/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
+ SEES(TX_SDMA9_DISALLOWED_PACKET)),
+/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
+ SEES(TX_SDMA10_DISALLOWED_PACKET)),
+/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
+ SEES(TX_SDMA11_DISALLOWED_PACKET)),
+/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
+ SEES(TX_SDMA12_DISALLOWED_PACKET)),
+/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
+ SEES(TX_SDMA13_DISALLOWED_PACKET)),
+/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
+ SEES(TX_SDMA14_DISALLOWED_PACKET)),
+/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
+ SEES(TX_SDMA15_DISALLOWED_PACKET)),
+/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
+ SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
+/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
+ SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
+/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
+ SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
+/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
+ SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
+/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
+ SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
+/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
+ SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
+/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
+ SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
+/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
+ SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
+/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
+ SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
+/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
+/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
+/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
+/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
+/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
+/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
+/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
+/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
+/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
+/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
+/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
+/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
+/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
+/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
+/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
+/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
+/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
+/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
+/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
+/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
+/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
+/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
+ SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
+/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
+ SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
+};
+
+/*
+ * TXE Egress Error Info flags
+ */
+#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
+static struct flag_table egress_err_info_flags[] = {
+/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
+/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
+/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
+/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
+/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
+/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
+/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
+/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
+/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
+/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
+/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
+/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
+/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
+/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
+/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
+/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
+/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
+/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
+/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
+/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
+/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
+/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
+};
+
+/* TXE Egress errors that cause an SPC freeze */
+#define ALL_TXE_EGRESS_FREEZE_ERR \
+ (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
+ | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
+ | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
+ | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
+ | SEES(TX_LAUNCH_CSR_PARITY) \
+ | SEES(TX_SBRD_CTL_CSR_PARITY) \
+ | SEES(TX_CONFIG_PARITY) \
+ | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
+ | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
+ | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
+ | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
+ | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
+ | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
+ | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
+ | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
+ | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
+ | SEES(TX_CREDIT_RETURN_PARITY))
+
+/*
+ * TXE Send error flags
+ */
+#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
+static struct flag_table send_err_status_flags[] = {
+/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr", SES(CSR_PARITY)),
+/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
+/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
+};
+
+/*
+ * TXE Send Context Error flags and consequences
+ */
+static struct flag_table sc_err_status_flags[] = {
+/* 0*/ FLAG_ENTRY("InconsistentSop",
+ SEC_PACKET_DROPPED | SEC_SC_HALTED,
+ SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
+/* 1*/ FLAG_ENTRY("DisallowedPacket",
+ SEC_PACKET_DROPPED | SEC_SC_HALTED,
+ SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
+/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
+ SEC_WRITE_DROPPED | SEC_SC_HALTED,
+ SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
+/* 3*/ FLAG_ENTRY("WriteOverflow",
+ SEC_WRITE_DROPPED | SEC_SC_HALTED,
+ SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
+/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
+ SEC_WRITE_DROPPED | SEC_SC_HALTED,
+ SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
+/* 5-63 reserved*/
+};
+
+/*
+ * RXE Receive Error flags
+ */
+#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
+static struct flag_table rxe_err_status_flags[] = {
+/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
+/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
+/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
+/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
+/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
+/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
+/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
+/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
+/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
+/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
+/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
+/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
+/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
+/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
+/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
+/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
+/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
+ RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
+/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
+/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
+/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
+ RXES(RBUF_BLOCK_LIST_READ_UNC)),
+/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
+ RXES(RBUF_BLOCK_LIST_READ_COR)),
+/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
+ RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
+/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
+ RXES(RBUF_CSR_QENT_CNT_PARITY)),
+/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
+ RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
+/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
+ RXES(RBUF_CSR_QVLD_BIT_PARITY)),
+/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
+/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
+/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
+ RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
+/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
+/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
+/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
+/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
+/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
+/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
+/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
+/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
+ RXES(RBUF_FL_INITDONE_PARITY)),
+/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
+ RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
+/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
+/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
+/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
+/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
+ RXES(LOOKUP_DES_PART1_UNC_COR)),
+/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
+ RXES(LOOKUP_DES_PART2_PARITY)),
+/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
+/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
+/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
+/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
+/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
+/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
+/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
+/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
+/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
+/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
+/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
+/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
+/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
+/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
+/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
+/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
+/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
+/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
+/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
+/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
+/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
+/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
+};
+
+/* RXE errors that will trigger an SPC freeze */
+#define ALL_RXE_FREEZE_ERR \
+ (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
+ | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
+
+#define RXE_FREEZE_ABORT_MASK \
+ (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
+ RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
+ RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
+
+/*
+ * DCC Error Flags
+ */
+#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
+static struct flag_table dcc_err_flags[] = {
+ FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
+ FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
+ FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
+ FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
+ FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
+ FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
+ FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
+ FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
+ FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
+ FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
+ FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
+ FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
+ FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
+ FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
+ FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
+ FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
+ FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
+ FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
+ FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
+ FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
+ FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
+ FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
+ FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
+ FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
+ FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
+ FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
+ FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
+ FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
+ FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
+ FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
+ FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
+ FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
+ FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
+ FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
+ FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
+ FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
+ FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
+ FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
+ FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
+ FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
+ FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
+ FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
+ FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
+ FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
+ FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
+ FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
+};
+
+/*
+ * LCB error flags
+ */
+#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
+static struct flag_table lcb_err_flags[] = {
+/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
+/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
+/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
+/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
+ LCBE(ALL_LNS_FAILED_REINIT_TEST)),
+/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
+/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
+/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
+/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
+/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
+/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
+/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
+/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
+/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
+/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
+ LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
+/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
+/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
+/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
+/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
+/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
+/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
+ LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
+/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
+/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
+/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
+/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
+/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
+/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
+/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
+ LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
+/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
+/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
+ LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
+/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
+ LCBE(REDUNDANT_FLIT_PARITY_ERR))
+};
+
+/*
+ * DC8051 Error Flags
+ */
+#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
+static struct flag_table dc8051_err_flags[] = {
+ FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
+ FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
+ FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
+ FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
+ FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
+ FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
+ FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
+ FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
+ FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
+ D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
+ FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
+};
+
+/*
+ * DC8051 Information Error flags
+ *
+ * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
+ */
+static struct flag_table dc8051_info_err_flags[] = {
+ FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
+ FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
+ FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
+ FLAG_ENTRY0("Serdes internal loopback failure",
+ FAILED_SERDES_INTERNAL_LOOPBACK),
+ FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
+ FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
+ FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
+ FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
+ FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
+ FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
+ FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
+ FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT)
+};
+
+/*
+ * DC8051 Information Host Information flags
+ *
+ * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
+ */
+static struct flag_table dc8051_info_host_msg_flags[] = {
+ FLAG_ENTRY0("Host request done", 0x0001),
+ FLAG_ENTRY0("BC SMA message", 0x0002),
+ FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
+ FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
+ FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
+ FLAG_ENTRY0("External device config request", 0x0020),
+ FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
+ FLAG_ENTRY0("LinkUp achieved", 0x0080),
+ FLAG_ENTRY0("Link going down", 0x0100),
+};
+
+
+static u32 encoded_size(u32 size);
+static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
+static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
+static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
+ u8 *continuous);
+static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
+ u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
+static void read_vc_remote_link_width(struct hfi1_devdata *dd,
+ u8 *remote_tx_rate, u16 *link_widths);
+static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
+ u8 *flag_bits, u16 *link_widths);
+static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
+ u8 *device_rev);
+static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
+static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
+static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
+ u8 *tx_polarity_inversion,
+ u8 *rx_polarity_inversion, u8 *max_rate);
+static void handle_sdma_eng_err(struct hfi1_devdata *dd,
+ unsigned int context, u64 err_status);
+static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
+static void handle_dcc_err(struct hfi1_devdata *dd,
+ unsigned int context, u64 err_status);
+static void handle_lcb_err(struct hfi1_devdata *dd,
+ unsigned int context, u64 err_status);
+static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
+static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
+static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
+static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
+static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
+static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
+static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
+static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
+static void set_partition_keys(struct hfi1_pportdata *);
+static const char *link_state_name(u32 state);
+static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
+ u32 state);
+static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
+ u64 *out_data);
+static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
+static int thermal_init(struct hfi1_devdata *dd);
+
+static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
+ int msecs);
+static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
+static void handle_temp_err(struct hfi1_devdata *);
+static void dc_shutdown(struct hfi1_devdata *);
+static void dc_start(struct hfi1_devdata *);
+
+/*
+ * Error interrupt table entry. This is used as input to the interrupt
+ * "clear down" routine used for all second tier error interrupt register.
+ * Second tier interrupt registers have a single bit representing them
+ * in the top-level CceIntStatus.
+ */
+struct err_reg_info {
+ u32 status; /* status CSR offset */
+ u32 clear; /* clear CSR offset */
+ u32 mask; /* mask CSR offset */
+ void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
+ const char *desc;
+};
+
+#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
+#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
+#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
+
+/*
+ * Helpers for building HFI and DC error interrupt table entries. Different
+ * helpers are needed because of inconsistent register names.
+ */
+#define EE(reg, handler, desc) \
+ { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
+ handler, desc }
+#define DC_EE1(reg, handler, desc) \
+ { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
+#define DC_EE2(reg, handler, desc) \
+ { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
+
+/*
+ * Table of the "misc" grouping of error interrupts. Each entry refers to
+ * another register containing more information.
+ */
+static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
+/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
+/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
+/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
+/* 3*/ { 0, 0, 0, NULL }, /* reserved */
+/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
+/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
+/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
+/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
+ /* the rest are reserved */
+};
+
+/*
+ * Index into the Various section of the interrupt sources
+ * corresponding to the Critical Temperature interrupt.
+ */
+#define TCRIT_INT_SOURCE 4
+
+/*
+ * SDMA error interrupt entry - refers to another register containing more
+ * information.
+ */
+static const struct err_reg_info sdma_eng_err =
+ EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
+
+static const struct err_reg_info various_err[NUM_VARIOUS] = {
+/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
+/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
+/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
+/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
+/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
+ /* rest are reserved */
+};
+
+/*
+ * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
+ * register can not be derived from the MTU value because 10K is not
+ * a power of 2. Therefore, we need a constant. Everything else can
+ * be calculated.
+ */
+#define DCC_CFG_PORT_MTU_CAP_10240 7
+
+/*
+ * Table of the DC grouping of error interrupts. Each entry refers to
+ * another register containing more information.
+ */
+static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
+/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
+/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
+/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
+/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
+ /* the rest are reserved */
+};
+
+struct cntr_entry {
+ /*
+ * counter name
+ */
+ char *name;
+
+ /*
+ * csr to read for name (if applicable)
+ */
+ u64 csr;
+
+ /*
+ * offset into dd or ppd to store the counter's value
+ */
+ int offset;
+
+ /*
+ * flags
+ */
+ u8 flags;
+
+ /*
+ * accessor for stat element, context either dd or ppd
+ */
+ u64 (*rw_cntr)(const struct cntr_entry *,
+ void *context,
+ int vl,
+ int mode,
+ u64 data);
+};
+
+#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
+#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
+
+#define CNTR_ELEM(name, csr, offset, flags, accessor) \
+{ \
+ name, \
+ csr, \
+ offset, \
+ flags, \
+ accessor \
+}
+
+/* 32bit RXE */
+#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
+CNTR_ELEM(#name, \
+ (counter * 8 + RCV_COUNTER_ARRAY32), \
+ 0, flags | CNTR_32BIT, \
+ port_access_u32_csr)
+
+#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
+CNTR_ELEM(#name, \
+ (counter * 8 + RCV_COUNTER_ARRAY32), \
+ 0, flags | CNTR_32BIT, \
+ dev_access_u32_csr)
+
+/* 64bit RXE */
+#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
+CNTR_ELEM(#name, \
+ (counter * 8 + RCV_COUNTER_ARRAY64), \
+ 0, flags, \
+ port_access_u64_csr)
+
+#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
+CNTR_ELEM(#name, \
+ (counter * 8 + RCV_COUNTER_ARRAY64), \
+ 0, flags, \
+ dev_access_u64_csr)
+
+#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
+#define OVR_ELM(ctx) \
+CNTR_ELEM("RcvHdrOvr" #ctx, \
+ (RCV_HDR_OVFL_CNT + ctx*0x100), \
+ 0, CNTR_NORMAL, port_access_u64_csr)
+
+/* 32bit TXE */
+#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
+CNTR_ELEM(#name, \
+ (counter * 8 + SEND_COUNTER_ARRAY32), \
+ 0, flags | CNTR_32BIT, \
+ port_access_u32_csr)
+
+/* 64bit TXE */
+#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
+CNTR_ELEM(#name, \
+ (counter * 8 + SEND_COUNTER_ARRAY64), \
+ 0, flags, \
+ port_access_u64_csr)
+
+# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
+CNTR_ELEM(#name,\
+ counter * 8 + SEND_COUNTER_ARRAY64, \
+ 0, \
+ flags, \
+ dev_access_u64_csr)
+
+/* CCE */
+#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
+CNTR_ELEM(#name, \
+ (counter * 8 + CCE_COUNTER_ARRAY32), \
+ 0, flags | CNTR_32BIT, \
+ dev_access_u32_csr)
+
+#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
+CNTR_ELEM(#name, \
+ (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
+ 0, flags | CNTR_32BIT, \
+ dev_access_u32_csr)
+
+/* DC */
+#define DC_PERF_CNTR(name, counter, flags) \
+CNTR_ELEM(#name, \
+ counter, \
+ 0, \
+ flags, \
+ dev_access_u64_csr)
+
+#define DC_PERF_CNTR_LCB(name, counter, flags) \
+CNTR_ELEM(#name, \
+ counter, \
+ 0, \
+ flags, \
+ dc_access_lcb_cntr)
+
+/* ibp counters */
+#define SW_IBP_CNTR(name, cntr) \
+CNTR_ELEM(#name, \
+ 0, \
+ 0, \
+ CNTR_SYNTH, \
+ access_ibp_##cntr)
+
+u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
+{
+ u64 val;
+
+ if (dd->flags & HFI1_PRESENT) {
+ val = readq((void __iomem *)dd->kregbase + offset);
+ return val;
+ }
+ return -1;
+}
+
+void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
+{
+ if (dd->flags & HFI1_PRESENT)
+ writeq(value, (void __iomem *)dd->kregbase + offset);
+}
+
+void __iomem *get_csr_addr(
+ struct hfi1_devdata *dd,
+ u32 offset)
+{
+ return (void __iomem *)dd->kregbase + offset;
+}
+
+static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
+ int mode, u64 value)
+{
+ u64 ret;
+
+
+ if (mode == CNTR_MODE_R) {
+ ret = read_csr(dd, csr);
+ } else if (mode == CNTR_MODE_W) {
+ write_csr(dd, csr, value);
+ ret = value;
+ } else {
+ dd_dev_err(dd, "Invalid cntr register access mode");
+ return 0;
+ }
+
+ hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
+ return ret;
+}
+
+/* Dev Access */
+static u64 dev_access_u32_csr(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+ return read_write_csr(dd, entry->csr, mode, data);
+}
+
+static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
+ int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ u64 val = 0;
+ u64 csr = entry->csr;
+
+ if (entry->flags & CNTR_VL) {
+ if (vl == CNTR_INVALID_VL)
+ return 0;
+ csr += 8 * vl;
+ } else {
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+ }
+
+ val = read_write_csr(dd, csr, mode, data);
+ return val;
+}
+
+static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
+ int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+ u32 csr = entry->csr;
+ int ret = 0;
+
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+ if (mode == CNTR_MODE_R)
+ ret = read_lcb_csr(dd, csr, &data);
+ else if (mode == CNTR_MODE_W)
+ ret = write_lcb_csr(dd, csr, data);
+
+ if (ret) {
+ dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
+ return 0;
+ }
+
+ hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
+ return data;
+}
+
+/* Port Access */
+static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
+ int vl, int mode, u64 data)
+{
+ struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
+
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+ return read_write_csr(ppd->dd, entry->csr, mode, data);
+}
+
+static u64 port_access_u64_csr(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
+ u64 val;
+ u64 csr = entry->csr;
+
+ if (entry->flags & CNTR_VL) {
+ if (vl == CNTR_INVALID_VL)
+ return 0;
+ csr += 8 * vl;
+ } else {
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+ }
+ val = read_write_csr(ppd->dd, csr, mode, data);
+ return val;
+}
+
+/* Software defined */
+static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
+ u64 data)
+{
+ u64 ret;
+
+ if (mode == CNTR_MODE_R) {
+ ret = *cntr;
+ } else if (mode == CNTR_MODE_W) {
+ *cntr = data;
+ ret = data;
+ } else {
+ dd_dev_err(dd, "Invalid cntr sw access mode");
+ return 0;
+ }
+
+ hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
+
+ return ret;
+}
+
+static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
+ int vl, int mode, u64 data)
+{
+ struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
+
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+ return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
+}
+
+static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
+ int vl, int mode, u64 data)
+{
+ struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
+
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+ return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
+}
+
+static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
+
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+
+ return read_write_sw(ppd->dd, &ppd->port_xmit_discards, mode, data);
+}
+
+static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
+
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+
+ return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
+ mode, data);
+}
+
+static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
+
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+
+ return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
+ mode, data);
+}
+
+u64 get_all_cpu_total(u64 __percpu *cntr)
+{
+ int cpu;
+ u64 counter = 0;
+
+ for_each_possible_cpu(cpu)
+ counter += *per_cpu_ptr(cntr, cpu);
+ return counter;
+}
+
+static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
+ u64 __percpu *cntr,
+ int vl, int mode, u64 data)
+{
+
+ u64 ret = 0;
+
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+
+ if (mode == CNTR_MODE_R) {
+ ret = get_all_cpu_total(cntr) - *z_val;
+ } else if (mode == CNTR_MODE_W) {
+ /* A write can only zero the counter */
+ if (data == 0)
+ *z_val = get_all_cpu_total(cntr);
+ else
+ dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
+ } else {
+ dd_dev_err(dd, "Invalid cntr sw cpu access mode");
+ return 0;
+ }
+
+ return ret;
+}
+
+static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
+ mode, data);
+}
+
+static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
+ mode, data);
+}
+
+static u64 access_sw_pio_wait(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->verbs_dev.n_piowait;
+}
+
+static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->verbs_dev.n_txwait;
+}
+
+static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->verbs_dev.n_kmem_wait;
+}
+
+#define def_access_sw_cpu(cntr) \
+static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
+ void *context, int vl, int mode, u64 data) \
+{ \
+ struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
+ return read_write_cpu(ppd->dd, &ppd->ibport_data.z_ ##cntr, \
+ ppd->ibport_data.cntr, vl, \
+ mode, data); \
+}
+
+def_access_sw_cpu(rc_acks);
+def_access_sw_cpu(rc_qacks);
+def_access_sw_cpu(rc_delayed_comp);
+
+#define def_access_ibp_counter(cntr) \
+static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
+ void *context, int vl, int mode, u64 data) \
+{ \
+ struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
+ \
+ if (vl != CNTR_INVALID_VL) \
+ return 0; \
+ \
+ return read_write_sw(ppd->dd, &ppd->ibport_data.n_ ##cntr, \
+ mode, data); \
+}
+
+def_access_ibp_counter(loop_pkts);
+def_access_ibp_counter(rc_resends);
+def_access_ibp_counter(rnr_naks);
+def_access_ibp_counter(other_naks);
+def_access_ibp_counter(rc_timeouts);
+def_access_ibp_counter(pkt_drops);
+def_access_ibp_counter(dmawait);
+def_access_ibp_counter(rc_seqnak);
+def_access_ibp_counter(rc_dupreq);
+def_access_ibp_counter(rdma_seq);
+def_access_ibp_counter(unaligned);
+def_access_ibp_counter(seq_naks);
+
+static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
+[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
+[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
+ CNTR_NORMAL),
+[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
+ CNTR_NORMAL),
+[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
+ RCV_TID_FLOW_GEN_MISMATCH_CNT,
+ CNTR_NORMAL),
+[C_RX_CTX_RHQS] = RXE32_DEV_CNTR_ELEM(RxCtxRHQS, RCV_CONTEXT_RHQ_STALL,
+ CNTR_NORMAL),
+[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
+ CNTR_NORMAL),
+[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
+ RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
+[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
+ CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
+[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
+ CNTR_NORMAL),
+[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
+ CNTR_NORMAL),
+[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
+ CNTR_NORMAL),
+[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
+ CNTR_NORMAL),
+[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
+ CNTR_NORMAL),
+[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
+ CNTR_NORMAL),
+[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
+ CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
+[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
+ CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
+[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
+ CNTR_SYNTH),
+[C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
+[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
+ CNTR_SYNTH),
+[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
+ CNTR_SYNTH),
+[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
+ CNTR_SYNTH),
+[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
+ DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
+[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
+ DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
+ CNTR_SYNTH),
+[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
+ DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
+[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
+ CNTR_SYNTH),
+[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
+ CNTR_SYNTH),
+[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
+ CNTR_SYNTH),
+[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
+ CNTR_SYNTH),
+[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
+ CNTR_SYNTH),
+[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
+ CNTR_SYNTH),
+[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
+ CNTR_SYNTH),
+[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
+ CNTR_SYNTH | CNTR_VL),
+[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
+ CNTR_SYNTH | CNTR_VL),
+[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
+[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
+ CNTR_SYNTH | CNTR_VL),
+[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
+[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
+ CNTR_SYNTH | CNTR_VL),
+[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
+ CNTR_SYNTH),
+[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
+ CNTR_SYNTH | CNTR_VL),
+[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
+ CNTR_SYNTH),
+[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
+ CNTR_SYNTH | CNTR_VL),
+[C_DC_TOTAL_CRC] =
+ DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
+ CNTR_SYNTH),
+[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
+ CNTR_SYNTH),
+[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
+ CNTR_SYNTH),
+[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
+ CNTR_SYNTH),
+[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
+ CNTR_SYNTH),
+[C_DC_CRC_MULT_LN] =
+ DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
+ CNTR_SYNTH),
+[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
+ CNTR_SYNTH),
+[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
+ CNTR_SYNTH),
+[C_DC_SEQ_CRC_CNT] =
+ DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
+ CNTR_SYNTH),
+[C_DC_ESC0_ONLY_CNT] =
+ DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
+ CNTR_SYNTH),
+[C_DC_ESC0_PLUS1_CNT] =
+ DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
+ CNTR_SYNTH),
+[C_DC_ESC0_PLUS2_CNT] =
+ DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
+ CNTR_SYNTH),
+[C_DC_REINIT_FROM_PEER_CNT] =
+ DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
+ CNTR_SYNTH),
+[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
+ CNTR_SYNTH),
+[C_DC_MISC_FLG_CNT] =
+ DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
+ CNTR_SYNTH),
+[C_DC_PRF_GOOD_LTP_CNT] =
+ DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
+[C_DC_PRF_ACCEPTED_LTP_CNT] =
+ DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
+ CNTR_SYNTH),
+[C_DC_PRF_RX_FLIT_CNT] =
+ DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
+[C_DC_PRF_TX_FLIT_CNT] =
+ DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
+[C_DC_PRF_CLK_CNTR] =
+ DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
+[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
+ DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
+[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
+ DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
+ CNTR_SYNTH),
+[C_DC_PG_STS_TX_SBE_CNT] =
+ DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
+[C_DC_PG_STS_TX_MBE_CNT] =
+ DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
+ CNTR_SYNTH),
+[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
+ access_sw_cpu_intr),
+[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
+ access_sw_cpu_rcv_limit),
+[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
+ access_sw_vtx_wait),
+[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
+ access_sw_pio_wait),
+[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
+ access_sw_kmem_wait),
+};
+
+static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
+[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
+ CNTR_NORMAL),
+[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
+ CNTR_NORMAL),
+[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
+ CNTR_NORMAL),
+[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
+ CNTR_NORMAL),
+[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
+ CNTR_NORMAL),
+[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
+ CNTR_NORMAL),
+[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
+ CNTR_NORMAL),
+[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
+[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
+[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
+[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
+ CNTR_SYNTH | CNTR_VL),
+[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
+ CNTR_SYNTH | CNTR_VL),
+[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
+ CNTR_SYNTH | CNTR_VL),
+[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
+[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
+[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
+ access_sw_link_dn_cnt),
+[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
+ access_sw_link_up_cnt),
+[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
+ access_sw_xmit_discards),
+[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
+ CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
+ access_sw_xmit_discards),
+[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
+ access_xmit_constraint_errs),
+[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
+ access_rcv_constraint_errs),
+[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
+[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
+[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
+[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
+[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
+[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
+[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
+[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
+[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
+[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
+[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
+[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
+[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
+ access_sw_cpu_rc_acks),
+[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
+ access_sw_cpu_rc_qacks),
+[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
+ access_sw_cpu_rc_delayed_comp),
+[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
+[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
+[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
+[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
+[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
+[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
+[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
+[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
+[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
+[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
+[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
+[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
+[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
+[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
+[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
+[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
+[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
+[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
+[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
+[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
+[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
+[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
+[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
+[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
+[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
+[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
+[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
+[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
+[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
+[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
+[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
+[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
+[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
+[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
+[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
+[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
+[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
+[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
+[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
+[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
+[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
+[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
+[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
+[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
+[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
+[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
+[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
+[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
+[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
+[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
+[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
+[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
+[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
+[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
+[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
+[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
+[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
+[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
+[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
+[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
+[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
+[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
+[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
+[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
+[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
+[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
+[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
+[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
+[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
+[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
+[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
+[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
+[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
+[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
+[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
+[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
+[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
+[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
+[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
+[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
+};
+
+/* ======================================================================== */
+
+/* return true if this is chip revision revision a0 */
+int is_a0(struct hfi1_devdata *dd)
+{
+ return ((dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
+ & CCE_REVISION_CHIP_REV_MINOR_MASK) == 0;
+}
+
+/* return true if this is chip revision revision a */
+int is_ax(struct hfi1_devdata *dd)
+{
+ u8 chip_rev_minor =
+ dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
+ & CCE_REVISION_CHIP_REV_MINOR_MASK;
+ return (chip_rev_minor & 0xf0) == 0;
+}
+
+/* return true if this is chip revision revision b */
+int is_bx(struct hfi1_devdata *dd)
+{
+ u8 chip_rev_minor =
+ dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
+ & CCE_REVISION_CHIP_REV_MINOR_MASK;
+ return !!(chip_rev_minor & 0x10);
+}
+
+/*
+ * Append string s to buffer buf. Arguments curp and len are the current
+ * position and remaining length, respectively.
+ *
+ * return 0 on success, 1 on out of room
+ */
+static int append_str(char *buf, char **curp, int *lenp, const char *s)
+{
+ char *p = *curp;
+ int len = *lenp;
+ int result = 0; /* success */
+ char c;
+
+ /* add a comma, if first in the buffer */
+ if (p != buf) {
+ if (len == 0) {
+ result = 1; /* out of room */
+ goto done;
+ }
+ *p++ = ',';
+ len--;
+ }
+
+ /* copy the string */
+ while ((c = *s++) != 0) {
+ if (len == 0) {
+ result = 1; /* out of room */
+ goto done;
+ }
+ *p++ = c;
+ len--;
+ }
+
+done:
+ /* write return values */
+ *curp = p;
+ *lenp = len;
+
+ return result;
+}
+
+/*
+ * Using the given flag table, print a comma separated string into
+ * the buffer. End in '*' if the buffer is too short.
+ */
+static char *flag_string(char *buf, int buf_len, u64 flags,
+ struct flag_table *table, int table_size)
+{
+ char extra[32];
+ char *p = buf;
+ int len = buf_len;
+ int no_room = 0;
+ int i;
+
+ /* make sure there is at least 2 so we can form "*" */
+ if (len < 2)
+ return "";
+
+ len--; /* leave room for a nul */
+ for (i = 0; i < table_size; i++) {
+ if (flags & table[i].flag) {
+ no_room = append_str(buf, &p, &len, table[i].str);
+ if (no_room)
+ break;
+ flags &= ~table[i].flag;
+ }
+ }
+
+ /* any undocumented bits left? */
+ if (!no_room && flags) {
+ snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
+ no_room = append_str(buf, &p, &len, extra);
+ }
+
+ /* add * if ran out of room */
+ if (no_room) {
+ /* may need to back up to add space for a '*' */
+ if (len == 0)
+ --p;
+ *p++ = '*';
+ }
+
+ /* add final nul - space already allocated above */
+ *p = 0;
+ return buf;
+}
+
+/* first 8 CCE error interrupt source names */
+static const char * const cce_misc_names[] = {
+ "CceErrInt", /* 0 */
+ "RxeErrInt", /* 1 */
+ "MiscErrInt", /* 2 */
+ "Reserved3", /* 3 */
+ "PioErrInt", /* 4 */
+ "SDmaErrInt", /* 5 */
+ "EgressErrInt", /* 6 */
+ "TxeErrInt" /* 7 */
+};
+
+/*
+ * Return the miscellaneous error interrupt name.
+ */
+static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
+{
+ if (source < ARRAY_SIZE(cce_misc_names))
+ strncpy(buf, cce_misc_names[source], bsize);
+ else
+ snprintf(buf,
+ bsize,
+ "Reserved%u",
+ source + IS_GENERAL_ERR_START);
+
+ return buf;
+}
+
+/*
+ * Return the SDMA engine error interrupt name.
+ */
+static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
+{
+ snprintf(buf, bsize, "SDmaEngErrInt%u", source);
+ return buf;
+}
+
+/*
+ * Return the send context error interrupt name.
+ */
+static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
+{
+ snprintf(buf, bsize, "SendCtxtErrInt%u", source);
+ return buf;
+}
+
+static const char * const various_names[] = {
+ "PbcInt",
+ "GpioAssertInt",
+ "Qsfp1Int",
+ "Qsfp2Int",
+ "TCritInt"
+};
+
+/*
+ * Return the various interrupt name.
+ */
+static char *is_various_name(char *buf, size_t bsize, unsigned int source)
+{
+ if (source < ARRAY_SIZE(various_names))
+ strncpy(buf, various_names[source], bsize);
+ else
+ snprintf(buf, bsize, "Reserved%u", source+IS_VARIOUS_START);
+ return buf;
+}
+
+/*
+ * Return the DC interrupt name.
+ */
+static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
+{
+ static const char * const dc_int_names[] = {
+ "common",
+ "lcb",
+ "8051",
+ "lbm" /* local block merge */
+ };
+
+ if (source < ARRAY_SIZE(dc_int_names))
+ snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
+ else
+ snprintf(buf, bsize, "DCInt%u", source);
+ return buf;
+}
+
+static const char * const sdma_int_names[] = {
+ "SDmaInt",
+ "SdmaIdleInt",
+ "SdmaProgressInt",
+};
+
+/*
+ * Return the SDMA engine interrupt name.
+ */
+static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
+{
+ /* what interrupt */
+ unsigned int what = source / TXE_NUM_SDMA_ENGINES;
+ /* which engine */
+ unsigned int which = source % TXE_NUM_SDMA_ENGINES;
+
+ if (likely(what < 3))
+ snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
+ else
+ snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
+ return buf;
+}
+
+/*
+ * Return the receive available interrupt name.
+ */
+static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
+{
+ snprintf(buf, bsize, "RcvAvailInt%u", source);
+ return buf;
+}
+
+/*
+ * Return the receive urgent interrupt name.
+ */
+static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
+{
+ snprintf(buf, bsize, "RcvUrgentInt%u", source);
+ return buf;
+}
+
+/*
+ * Return the send credit interrupt name.
+ */
+static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
+{
+ snprintf(buf, bsize, "SendCreditInt%u", source);
+ return buf;
+}
+
+/*
+ * Return the reserved interrupt name.
+ */
+static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
+{
+ snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
+ return buf;
+}
+
+static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags,
+ cce_err_status_flags, ARRAY_SIZE(cce_err_status_flags));
+}
+
+static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags,
+ rxe_err_status_flags, ARRAY_SIZE(rxe_err_status_flags));
+}
+
+static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags, misc_err_status_flags,
+ ARRAY_SIZE(misc_err_status_flags));
+}
+
+static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags,
+ pio_err_status_flags, ARRAY_SIZE(pio_err_status_flags));
+}
+
+static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags,
+ sdma_err_status_flags,
+ ARRAY_SIZE(sdma_err_status_flags));
+}
+
+static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags,
+ egress_err_status_flags, ARRAY_SIZE(egress_err_status_flags));
+}
+
+static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags,
+ egress_err_info_flags, ARRAY_SIZE(egress_err_info_flags));
+}
+
+static char *send_err_status_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags,
+ send_err_status_flags,
+ ARRAY_SIZE(send_err_status_flags));
+}
+
+static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
+{
+ char buf[96];
+
+ /*
+ * For most these errors, there is nothing that can be done except
+ * report or record it.
+ */
+ dd_dev_info(dd, "CCE Error: %s\n",
+ cce_err_status_string(buf, sizeof(buf), reg));
+
+ if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK)
+ && is_a0(dd)
+ && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
+ /* this error requires a manual drop into SPC freeze mode */
+ /* then a fix up */
+ start_freeze_handling(dd->pport, FREEZE_SELF);
+ }
+}
+
+/*
+ * Check counters for receive errors that do not have an interrupt
+ * associated with them.
+ */
+#define RCVERR_CHECK_TIME 10
+static void update_rcverr_timer(unsigned long opaque)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
+ struct hfi1_pportdata *ppd = dd->pport;
+ u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
+
+ if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
+ ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
+ dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
+ set_link_down_reason(ppd,
+ OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
+ OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
+ queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
+ }
+ dd->rcv_ovfl_cnt = (u32) cur_ovfl_cnt;
+
+ mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
+}
+
+static int init_rcverr(struct hfi1_devdata *dd)
+{
+ init_timer(&dd->rcverr_timer);
+ dd->rcverr_timer.function = update_rcverr_timer;
+ dd->rcverr_timer.data = (unsigned long) dd;
+ /* Assume the hardware counter has been reset */
+ dd->rcv_ovfl_cnt = 0;
+ return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
+}
+
+static void free_rcverr(struct hfi1_devdata *dd)
+{
+ if (dd->rcverr_timer.data)
+ del_timer_sync(&dd->rcverr_timer);
+ dd->rcverr_timer.data = 0;
+}
+
+static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
+{
+ char buf[96];
+
+ dd_dev_info(dd, "Receive Error: %s\n",
+ rxe_err_status_string(buf, sizeof(buf), reg));
+
+ if (reg & ALL_RXE_FREEZE_ERR) {
+ int flags = 0;
+
+ /*
+ * Freeze mode recovery is disabled for the errors
+ * in RXE_FREEZE_ABORT_MASK
+ */
+ if (is_a0(dd) && (reg & RXE_FREEZE_ABORT_MASK))
+ flags = FREEZE_ABORT;
+
+ start_freeze_handling(dd->pport, flags);
+ }
+}
+
+static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
+{
+ char buf[96];
+
+ dd_dev_info(dd, "Misc Error: %s",
+ misc_err_status_string(buf, sizeof(buf), reg));
+}
+
+static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
+{
+ char buf[96];
+
+ dd_dev_info(dd, "PIO Error: %s\n",
+ pio_err_status_string(buf, sizeof(buf), reg));
+
+ if (reg & ALL_PIO_FREEZE_ERR)
+ start_freeze_handling(dd->pport, 0);
+}
+
+static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
+{
+ char buf[96];
+
+ dd_dev_info(dd, "SDMA Error: %s\n",
+ sdma_err_status_string(buf, sizeof(buf), reg));
+
+ if (reg & ALL_SDMA_FREEZE_ERR)
+ start_freeze_handling(dd->pport, 0);
+}
+
+static void count_port_inactive(struct hfi1_devdata *dd)
+{
+ struct hfi1_pportdata *ppd = dd->pport;
+
+ if (ppd->port_xmit_discards < ~(u64)0)
+ ppd->port_xmit_discards++;
+}
+
+/*
+ * We have had a "disallowed packet" error during egress. Determine the
+ * integrity check which failed, and update relevant error counter, etc.
+ *
+ * Note that the SEND_EGRESS_ERR_INFO register has only a single
+ * bit of state per integrity check, and so we can miss the reason for an
+ * egress error if more than one packet fails the same integrity check
+ * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
+ */
+static void handle_send_egress_err_info(struct hfi1_devdata *dd)
+{
+ struct hfi1_pportdata *ppd = dd->pport;
+ u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
+ u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
+ char buf[96];
+
+ /* clear down all observed info as quickly as possible after read */
+ write_csr(dd, SEND_EGRESS_ERR_INFO, info);
+
+ dd_dev_info(dd,
+ "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
+ info, egress_err_info_string(buf, sizeof(buf), info), src);
+
+ /* Eventually add other counters for each bit */
+
+ if (info & SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK) {
+ if (ppd->port_xmit_discards < ~(u64)0)
+ ppd->port_xmit_discards++;
+ }
+}
+
+/*
+ * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
+ * register. Does it represent a 'port inactive' error?
+ */
+static inline int port_inactive_err(u64 posn)
+{
+ return (posn >= SEES(TX_LINKDOWN) &&
+ posn <= SEES(TX_INCORRECT_LINK_STATE));
+}
+
+/*
+ * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
+ * register. Does it represent a 'disallowed packet' error?
+ */
+static inline int disallowed_pkt_err(u64 posn)
+{
+ return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
+ posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
+}
+
+static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
+{
+ u64 reg_copy = reg, handled = 0;
+ char buf[96];
+
+ if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
+ start_freeze_handling(dd->pport, 0);
+ if (is_a0(dd) && (reg &
+ SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK)
+ && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
+ start_freeze_handling(dd->pport, 0);
+
+ while (reg_copy) {
+ int posn = fls64(reg_copy);
+ /*
+ * fls64() returns a 1-based offset, but we generally
+ * want 0-based offsets.
+ */
+ int shift = posn - 1;
+
+ if (port_inactive_err(shift)) {
+ count_port_inactive(dd);
+ handled |= (1ULL << shift);
+ } else if (disallowed_pkt_err(shift)) {
+ handle_send_egress_err_info(dd);
+ handled |= (1ULL << shift);
+ }
+ clear_bit(shift, (unsigned long *)&reg_copy);
+ }
+
+ reg &= ~handled;
+
+ if (reg)
+ dd_dev_info(dd, "Egress Error: %s\n",
+ egress_err_status_string(buf, sizeof(buf), reg));
+}
+
+static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
+{
+ char buf[96];
+
+ dd_dev_info(dd, "Send Error: %s\n",
+ send_err_status_string(buf, sizeof(buf), reg));
+
+}
+
+/*
+ * The maximum number of times the error clear down will loop before
+ * blocking a repeating error. This value is arbitrary.
+ */
+#define MAX_CLEAR_COUNT 20
+
+/*
+ * Clear and handle an error register. All error interrupts are funneled
+ * through here to have a central location to correctly handle single-
+ * or multi-shot errors.
+ *
+ * For non per-context registers, call this routine with a context value
+ * of 0 so the per-context offset is zero.
+ *
+ * If the handler loops too many times, assume that something is wrong
+ * and can't be fixed, so mask the error bits.
+ */
+static void interrupt_clear_down(struct hfi1_devdata *dd,
+ u32 context,
+ const struct err_reg_info *eri)
+{
+ u64 reg;
+ u32 count;
+
+ /* read in a loop until no more errors are seen */
+ count = 0;
+ while (1) {
+ reg = read_kctxt_csr(dd, context, eri->status);
+ if (reg == 0)
+ break;
+ write_kctxt_csr(dd, context, eri->clear, reg);
+ if (likely(eri->handler))
+ eri->handler(dd, context, reg);
+ count++;
+ if (count > MAX_CLEAR_COUNT) {
+ u64 mask;
+
+ dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
+ eri->desc, reg);
+ /*
+ * Read-modify-write so any other masked bits
+ * remain masked.
+ */
+ mask = read_kctxt_csr(dd, context, eri->mask);
+ mask &= ~reg;
+ write_kctxt_csr(dd, context, eri->mask, mask);
+ break;
+ }
+ }
+}
+
+/*
+ * CCE block "misc" interrupt. Source is < 16.
+ */
+static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
+{
+ const struct err_reg_info *eri = &misc_errs[source];
+
+ if (eri->handler) {
+ interrupt_clear_down(dd, 0, eri);
+ } else {
+ dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
+ source);
+ }
+}
+
+static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags,
+ sc_err_status_flags, ARRAY_SIZE(sc_err_status_flags));
+}
+
+/*
+ * Send context error interrupt. Source (hw_context) is < 160.
+ *
+ * All send context errors cause the send context to halt. The normal
+ * clear-down mechanism cannot be used because we cannot clear the
+ * error bits until several other long-running items are done first.
+ * This is OK because with the context halted, nothing else is going
+ * to happen on it anyway.
+ */
+static void is_sendctxt_err_int(struct hfi1_devdata *dd,
+ unsigned int hw_context)
+{
+ struct send_context_info *sci;
+ struct send_context *sc;
+ char flags[96];
+ u64 status;
+ u32 sw_index;
+
+ sw_index = dd->hw_to_sw[hw_context];
+ if (sw_index >= dd->num_send_contexts) {
+ dd_dev_err(dd,
+ "out of range sw index %u for send context %u\n",
+ sw_index, hw_context);
+ return;
+ }
+ sci = &dd->send_contexts[sw_index];
+ sc = sci->sc;
+ if (!sc) {
+ dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
+ sw_index, hw_context);
+ return;
+ }
+
+ /* tell the software that a halt has begun */
+ sc_stop(sc, SCF_HALTED);
+
+ status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
+
+ dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
+ send_context_err_status_string(flags, sizeof(flags), status));
+
+ if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
+ handle_send_egress_err_info(dd);
+
+ /*
+ * Automatically restart halted kernel contexts out of interrupt
+ * context. User contexts must ask the driver to restart the context.
+ */
+ if (sc->type != SC_USER)
+ queue_work(dd->pport->hfi1_wq, &sc->halt_work);
+}
+
+static void handle_sdma_eng_err(struct hfi1_devdata *dd,
+ unsigned int source, u64 status)
+{
+ struct sdma_engine *sde;
+
+ sde = &dd->per_sdma[source];
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
+ slashstrip(__FILE__), __LINE__, __func__);
+ dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
+ sde->this_idx, source, (unsigned long long)status);
+#endif
+ sdma_engine_error(sde, status);
+}
+
+/*
+ * CCE block SDMA error interrupt. Source is < 16.
+ */
+static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
+{
+#ifdef CONFIG_SDMA_VERBOSITY
+ struct sdma_engine *sde = &dd->per_sdma[source];
+
+ dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
+ slashstrip(__FILE__), __LINE__, __func__);
+ dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
+ source);
+ sdma_dumpstate(sde);
+#endif
+ interrupt_clear_down(dd, source, &sdma_eng_err);
+}
+
+/*
+ * CCE block "various" interrupt. Source is < 8.
+ */
+static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
+{
+ const struct err_reg_info *eri = &various_err[source];
+
+ /*
+ * TCritInt cannot go through interrupt_clear_down()
+ * because it is not a second tier interrupt. The handler
+ * should be called directly.
+ */
+ if (source == TCRIT_INT_SOURCE)
+ handle_temp_err(dd);
+ else if (eri->handler)
+ interrupt_clear_down(dd, 0, eri);
+ else
+ dd_dev_info(dd,
+ "%s: Unimplemented/reserved interrupt %d\n",
+ __func__, source);
+}
+
+static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
+{
+ /* source is always zero */
+ struct hfi1_pportdata *ppd = dd->pport;
+ unsigned long flags;
+ u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
+
+ if (reg & QSFP_HFI0_MODPRST_N) {
+
+ dd_dev_info(dd, "%s: ModPresent triggered QSFP interrupt\n",
+ __func__);
+
+ if (!qsfp_mod_present(ppd)) {
+ ppd->driver_link_ready = 0;
+ /*
+ * Cable removed, reset all our information about the
+ * cache and cable capabilities
+ */
+
+ spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
+ /*
+ * We don't set cache_refresh_required here as we expect
+ * an interrupt when a cable is inserted
+ */
+ ppd->qsfp_info.cache_valid = 0;
+ ppd->qsfp_info.qsfp_interrupt_functional = 0;
+ spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
+ flags);
+ write_csr(dd,
+ dd->hfi1_id ?
+ ASIC_QSFP2_INVERT :
+ ASIC_QSFP1_INVERT,
+ qsfp_int_mgmt);
+ if (ppd->host_link_state == HLS_DN_POLL) {
+ /*
+ * The link is still in POLL. This means
+ * that the normal link down processing
+ * will not happen. We have to do it here
+ * before turning the DC off.
+ */
+ queue_work(ppd->hfi1_wq, &ppd->link_down_work);
+ }
+ } else {
+ spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
+ ppd->qsfp_info.cache_valid = 0;
+ ppd->qsfp_info.cache_refresh_required = 1;
+ spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
+ flags);
+
+ qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
+ write_csr(dd,
+ dd->hfi1_id ?
+ ASIC_QSFP2_INVERT :
+ ASIC_QSFP1_INVERT,
+ qsfp_int_mgmt);
+ }
+ }
+
+ if (reg & QSFP_HFI0_INT_N) {
+
+ dd_dev_info(dd, "%s: IntN triggered QSFP interrupt\n",
+ __func__);
+ spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
+ ppd->qsfp_info.check_interrupt_flags = 1;
+ ppd->qsfp_info.qsfp_interrupt_functional = 1;
+ spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
+ }
+
+ /* Schedule the QSFP work only if there is a cable attached. */
+ if (qsfp_mod_present(ppd))
+ queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
+}
+
+static int request_host_lcb_access(struct hfi1_devdata *dd)
+{
+ int ret;
+
+ ret = do_8051_command(dd, HCMD_MISC,
+ (u64)HCMD_MISC_REQUEST_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
+ NULL);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd, "%s: command failed with error %d\n",
+ __func__, ret);
+ }
+ return ret == HCMD_SUCCESS ? 0 : -EBUSY;
+}
+
+static int request_8051_lcb_access(struct hfi1_devdata *dd)
+{
+ int ret;
+
+ ret = do_8051_command(dd, HCMD_MISC,
+ (u64)HCMD_MISC_GRANT_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
+ NULL);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd, "%s: command failed with error %d\n",
+ __func__, ret);
+ }
+ return ret == HCMD_SUCCESS ? 0 : -EBUSY;
+}
+
+/*
+ * Set the LCB selector - allow host access. The DCC selector always
+ * points to the host.
+ */
+static inline void set_host_lcb_access(struct hfi1_devdata *dd)
+{
+ write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
+ DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
+ | DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
+}
+
+/*
+ * Clear the LCB selector - allow 8051 access. The DCC selector always
+ * points to the host.
+ */
+static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
+{
+ write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
+ DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
+}
+
+/*
+ * Acquire LCB access from the 8051. If the host already has access,
+ * just increment a counter. Otherwise, inform the 8051 that the
+ * host is taking access.
+ *
+ * Returns:
+ * 0 on success
+ * -EBUSY if the 8051 has control and cannot be disturbed
+ * -errno if unable to acquire access from the 8051
+ */
+int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
+{
+ struct hfi1_pportdata *ppd = dd->pport;
+ int ret = 0;
+
+ /*
+ * Use the host link state lock so the operation of this routine
+ * { link state check, selector change, count increment } can occur
+ * as a unit against a link state change. Otherwise there is a
+ * race between the state change and the count increment.
+ */
+ if (sleep_ok) {
+ mutex_lock(&ppd->hls_lock);
+ } else {
+ while (mutex_trylock(&ppd->hls_lock) == EBUSY)
+ udelay(1);
+ }
+
+ /* this access is valid only when the link is up */
+ if ((ppd->host_link_state & HLS_UP) == 0) {
+ dd_dev_info(dd, "%s: link state %s not up\n",
+ __func__, link_state_name(ppd->host_link_state));
+ ret = -EBUSY;
+ goto done;
+ }
+
+ if (dd->lcb_access_count == 0) {
+ ret = request_host_lcb_access(dd);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: unable to acquire LCB access, err %d\n",
+ __func__, ret);
+ goto done;
+ }
+ set_host_lcb_access(dd);
+ }
+ dd->lcb_access_count++;
+done:
+ mutex_unlock(&ppd->hls_lock);
+ return ret;
+}
+
+/*
+ * Release LCB access by decrementing the use count. If the count is moving
+ * from 1 to 0, inform 8051 that it has control back.
+ *
+ * Returns:
+ * 0 on success
+ * -errno if unable to release access to the 8051
+ */
+int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
+{
+ int ret = 0;
+
+ /*
+ * Use the host link state lock because the acquire needed it.
+ * Here, we only need to keep { selector change, count decrement }
+ * as a unit.
+ */
+ if (sleep_ok) {
+ mutex_lock(&dd->pport->hls_lock);
+ } else {
+ while (mutex_trylock(&dd->pport->hls_lock) == EBUSY)
+ udelay(1);
+ }
+
+ if (dd->lcb_access_count == 0) {
+ dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
+ __func__);
+ goto done;
+ }
+
+ if (dd->lcb_access_count == 1) {
+ set_8051_lcb_access(dd);
+ ret = request_8051_lcb_access(dd);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: unable to release LCB access, err %d\n",
+ __func__, ret);
+ /* restore host access if the grant didn't work */
+ set_host_lcb_access(dd);
+ goto done;
+ }
+ }
+ dd->lcb_access_count--;
+done:
+ mutex_unlock(&dd->pport->hls_lock);
+ return ret;
+}
+
+/*
+ * Initialize LCB access variables and state. Called during driver load,
+ * after most of the initialization is finished.
+ *
+ * The DC default is LCB access on for the host. The driver defaults to
+ * leaving access to the 8051. Assign access now - this constrains the call
+ * to this routine to be after all LCB set-up is done. In particular, after
+ * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
+ */
+static void init_lcb_access(struct hfi1_devdata *dd)
+{
+ dd->lcb_access_count = 0;
+}
+
+/*
+ * Write a response back to a 8051 request.
+ */
+static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
+{
+ write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
+ DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK
+ | (u64)return_code << DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
+ | (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
+}
+
+/*
+ * Handle requests from the 8051.
+ */
+static void handle_8051_request(struct hfi1_devdata *dd)
+{
+ u64 reg;
+ u16 data;
+ u8 type;
+
+ reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
+ if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
+ return; /* no request */
+
+ /* zero out COMPLETED so the response is seen */
+ write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
+
+ /* extract request details */
+ type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
+ & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
+ data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
+ & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
+
+ switch (type) {
+ case HREQ_LOAD_CONFIG:
+ case HREQ_SAVE_CONFIG:
+ case HREQ_READ_CONFIG:
+ case HREQ_SET_TX_EQ_ABS:
+ case HREQ_SET_TX_EQ_REL:
+ case HREQ_ENABLE:
+ dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
+ type);
+ hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
+ break;
+
+ case HREQ_CONFIG_DONE:
+ hreq_response(dd, HREQ_SUCCESS, 0);
+ break;
+
+ case HREQ_INTERFACE_TEST:
+ hreq_response(dd, HREQ_SUCCESS, data);
+ break;
+
+ default:
+ dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
+ hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
+ break;
+ }
+}
+
+static void write_global_credit(struct hfi1_devdata *dd,
+ u8 vau, u16 total, u16 shared)
+{
+ write_csr(dd, SEND_CM_GLOBAL_CREDIT,
+ ((u64)total
+ << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
+ | ((u64)shared
+ << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
+ | ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
+}
+
+/*
+ * Set up initial VL15 credits of the remote. Assumes the rest of
+ * the CM credit registers are zero from a previous global or credit reset .
+ */
+void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
+{
+ /* leave shared count at zero for both global and VL15 */
+ write_global_credit(dd, vau, vl15buf, 0);
+
+ /* We may need some credits for another VL when sending packets
+ * with the snoop interface. Dividing it down the middle for VL15
+ * and VL0 should suffice.
+ */
+ if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
+ write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
+ << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
+ write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
+ << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
+ } else {
+ write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
+ << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
+ }
+}
+
+/*
+ * Zero all credit details from the previous connection and
+ * reset the CM manager's internal counters.
+ */
+void reset_link_credits(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /* remove all previous VL credit limits */
+ for (i = 0; i < TXE_NUM_DATA_VL; i++)
+ write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
+ write_csr(dd, SEND_CM_CREDIT_VL15, 0);
+ write_global_credit(dd, 0, 0, 0);
+ /* reset the CM block */
+ pio_send_control(dd, PSC_CM_RESET);
+}
+
+/* convert a vCU to a CU */
+static u32 vcu_to_cu(u8 vcu)
+{
+ return 1 << vcu;
+}
+
+/* convert a CU to a vCU */
+static u8 cu_to_vcu(u32 cu)
+{
+ return ilog2(cu);
+}
+
+/* convert a vAU to an AU */
+static u32 vau_to_au(u8 vau)
+{
+ return 8 * (1 << vau);
+}
+
+static void set_linkup_defaults(struct hfi1_pportdata *ppd)
+{
+ ppd->sm_trap_qp = 0x0;
+ ppd->sa_qp = 0x1;
+}
+
+/*
+ * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
+ */
+static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
+{
+ u64 reg;
+
+ /* clear lcb run: LCB_CFG_RUN.EN = 0 */
+ write_csr(dd, DC_LCB_CFG_RUN, 0);
+ /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
+ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
+ 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
+ /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
+ dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
+ reg = read_csr(dd, DCC_CFG_RESET);
+ write_csr(dd, DCC_CFG_RESET,
+ reg
+ | (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT)
+ | (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
+ (void) read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
+ if (!abort) {
+ udelay(1); /* must hold for the longer of 16cclks or 20ns */
+ write_csr(dd, DCC_CFG_RESET, reg);
+ write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
+ }
+}
+
+/*
+ * This routine should be called after the link has been transitioned to
+ * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
+ * reset).
+ *
+ * The expectation is that the caller of this routine would have taken
+ * care of properly transitioning the link into the correct state.
+ */
+static void dc_shutdown(struct hfi1_devdata *dd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->dc8051_lock, flags);
+ if (dd->dc_shutdown) {
+ spin_unlock_irqrestore(&dd->dc8051_lock, flags);
+ return;
+ }
+ dd->dc_shutdown = 1;
+ spin_unlock_irqrestore(&dd->dc8051_lock, flags);
+ /* Shutdown the LCB */
+ lcb_shutdown(dd, 1);
+ /* Going to OFFLINE would have causes the 8051 to put the
+ * SerDes into reset already. Just need to shut down the 8051,
+ * itself. */
+ write_csr(dd, DC_DC8051_CFG_RST, 0x1);
+}
+
+/* Calling this after the DC has been brought out of reset should not
+ * do any damage. */
+static void dc_start(struct hfi1_devdata *dd)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dd->dc8051_lock, flags);
+ if (!dd->dc_shutdown)
+ goto done;
+ spin_unlock_irqrestore(&dd->dc8051_lock, flags);
+ /* Take the 8051 out of reset */
+ write_csr(dd, DC_DC8051_CFG_RST, 0ull);
+ /* Wait until 8051 is ready */
+ ret = wait_fm_ready(dd, TIMEOUT_8051_START);
+ if (ret) {
+ dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
+ __func__);
+ }
+ /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
+ write_csr(dd, DCC_CFG_RESET, 0x10);
+ /* lcb_shutdown() with abort=1 does not restore these */
+ write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
+ spin_lock_irqsave(&dd->dc8051_lock, flags);
+ dd->dc_shutdown = 0;
+done:
+ spin_unlock_irqrestore(&dd->dc8051_lock, flags);
+}
+
+/*
+ * These LCB adjustments are for the Aurora SerDes core in the FPGA.
+ */
+static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
+{
+ u64 rx_radr, tx_radr;
+ u32 version;
+
+ if (dd->icode != ICODE_FPGA_EMULATION)
+ return;
+
+ /*
+ * These LCB defaults on emulator _s are good, nothing to do here:
+ * LCB_CFG_TX_FIFOS_RADR
+ * LCB_CFG_RX_FIFOS_RADR
+ * LCB_CFG_LN_DCLK
+ * LCB_CFG_IGNORE_LOST_RCLK
+ */
+ if (is_emulator_s(dd))
+ return;
+ /* else this is _p */
+
+ version = emulator_rev(dd);
+ if (!is_a0(dd))
+ version = 0x2d; /* all B0 use 0x2d or higher settings */
+
+ if (version <= 0x12) {
+ /* release 0x12 and below */
+
+ /*
+ * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
+ * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
+ * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
+ */
+ rx_radr =
+ 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
+ | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
+ | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
+ /*
+ * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
+ * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
+ */
+ tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
+ } else if (version <= 0x18) {
+ /* release 0x13 up to 0x18 */
+ /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
+ rx_radr =
+ 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
+ | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
+ | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
+ tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
+ } else if (version == 0x19) {
+ /* release 0x19 */
+ /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
+ rx_radr =
+ 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
+ | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
+ | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
+ tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
+ } else if (version == 0x1a) {
+ /* release 0x1a */
+ /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
+ rx_radr =
+ 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
+ | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
+ | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
+ tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
+ write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
+ } else {
+ /* release 0x1b and higher */
+ /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
+ rx_radr =
+ 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
+ | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
+ | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
+ tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
+ }
+
+ write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
+ /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
+ write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
+ DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
+ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
+}
+
+/*
+ * Handle a SMA idle message
+ *
+ * This is a work-queue function outside of the interrupt.
+ */
+void handle_sma_message(struct work_struct *work)
+{
+ struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+ sma_message_work);
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 msg;
+ int ret;
+
+ /* msg is bytes 1-4 of the 40-bit idle message - the command code
+ is stripped off */
+ ret = read_idle_sma(dd, &msg);
+ if (ret)
+ return;
+ dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
+ /*
+ * React to the SMA message. Byte[1] (0 for us) is the command.
+ */
+ switch (msg & 0xff) {
+ case SMA_IDLE_ARM:
+ /*
+ * See OPAv1 table 9-14 - HFI and External Switch Ports Key
+ * State Transitions
+ *
+ * Only expected in INIT or ARMED, discard otherwise.
+ */
+ if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
+ ppd->neighbor_normal = 1;
+ break;
+ case SMA_IDLE_ACTIVE:
+ /*
+ * See OPAv1 table 9-14 - HFI and External Switch Ports Key
+ * State Transitions
+ *
+ * Can activate the node. Discard otherwise.
+ */
+ if (ppd->host_link_state == HLS_UP_ARMED
+ && ppd->is_active_optimize_enabled) {
+ ppd->neighbor_normal = 1;
+ ret = set_link_state(ppd, HLS_UP_ACTIVE);
+ if (ret)
+ dd_dev_err(
+ dd,
+ "%s: received Active SMA idle message, couldn't set link to Active\n",
+ __func__);
+ }
+ break;
+ default:
+ dd_dev_err(dd,
+ "%s: received unexpected SMA idle message 0x%llx\n",
+ __func__, msg);
+ break;
+ }
+}
+
+static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
+{
+ u64 rcvctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->rcvctrl_lock, flags);
+ rcvctrl = read_csr(dd, RCV_CTRL);
+ rcvctrl |= add;
+ rcvctrl &= ~clear;
+ write_csr(dd, RCV_CTRL, rcvctrl);
+ spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
+}
+
+static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
+{
+ adjust_rcvctrl(dd, add, 0);
+}
+
+static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
+{
+ adjust_rcvctrl(dd, 0, clear);
+}
+
+/*
+ * Called from all interrupt handlers to start handling an SPC freeze.
+ */
+void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ struct send_context *sc;
+ int i;
+
+ if (flags & FREEZE_SELF)
+ write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
+
+ /* enter frozen mode */
+ dd->flags |= HFI1_FROZEN;
+
+ /* notify all SDMA engines that they are going into a freeze */
+ sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
+
+ /* do halt pre-handling on all enabled send contexts */
+ for (i = 0; i < dd->num_send_contexts; i++) {
+ sc = dd->send_contexts[i].sc;
+ if (sc && (sc->flags & SCF_ENABLED))
+ sc_stop(sc, SCF_FROZEN | SCF_HALTED);
+ }
+
+ /* Send context are frozen. Notify user space */
+ hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
+
+ if (flags & FREEZE_ABORT) {
+ dd_dev_err(dd,
+ "Aborted freeze recovery. Please REBOOT system\n");
+ return;
+ }
+ /* queue non-interrupt handler */
+ queue_work(ppd->hfi1_wq, &ppd->freeze_work);
+}
+
+/*
+ * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
+ * depending on the "freeze" parameter.
+ *
+ * No need to return an error if it times out, our only option
+ * is to proceed anyway.
+ */
+static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
+{
+ unsigned long timeout;
+ u64 reg;
+
+ timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
+ while (1) {
+ reg = read_csr(dd, CCE_STATUS);
+ if (freeze) {
+ /* waiting until all indicators are set */
+ if ((reg & ALL_FROZE) == ALL_FROZE)
+ return; /* all done */
+ } else {
+ /* waiting until all indicators are clear */
+ if ((reg & ALL_FROZE) == 0)
+ return; /* all done */
+ }
+
+ if (time_after(jiffies, timeout)) {
+ dd_dev_err(dd,
+ "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
+ freeze ? "" : "un",
+ reg & ALL_FROZE,
+ freeze ? ALL_FROZE : 0ull);
+ return;
+ }
+ usleep_range(80, 120);
+ }
+}
+
+/*
+ * Do all freeze handling for the RXE block.
+ */
+static void rxe_freeze(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /* disable port */
+ clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
+
+ /* disable all receive contexts */
+ for (i = 0; i < dd->num_rcv_contexts; i++)
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
+}
+
+/*
+ * Unfreeze handling for the RXE block - kernel contexts only.
+ * This will also enable the port. User contexts will do unfreeze
+ * handling on a per-context basis as they call into the driver.
+ *
+ */
+static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /* enable all kernel contexts */
+ for (i = 0; i < dd->n_krcv_queues; i++)
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, i);
+
+ /* enable port */
+ add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
+}
+
+/*
+ * Non-interrupt SPC freeze handling.
+ *
+ * This is a work-queue function outside of the triggering interrupt.
+ */
+void handle_freeze(struct work_struct *work)
+{
+ struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+ freeze_work);
+ struct hfi1_devdata *dd = ppd->dd;
+
+ /* wait for freeze indicators on all affected blocks */
+ dd_dev_info(dd, "Entering SPC freeze\n");
+ wait_for_freeze_status(dd, 1);
+
+ /* SPC is now frozen */
+
+ /* do send PIO freeze steps */
+ pio_freeze(dd);
+
+ /* do send DMA freeze steps */
+ sdma_freeze(dd);
+
+ /* do send egress freeze steps - nothing to do */
+
+ /* do receive freeze steps */
+ rxe_freeze(dd);
+
+ /*
+ * Unfreeze the hardware - clear the freeze, wait for each
+ * block's frozen bit to clear, then clear the frozen flag.
+ */
+ write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
+ wait_for_freeze_status(dd, 0);
+
+ if (is_a0(dd)) {
+ write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
+ wait_for_freeze_status(dd, 1);
+ write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
+ wait_for_freeze_status(dd, 0);
+ }
+
+ /* do send PIO unfreeze steps for kernel contexts */
+ pio_kernel_unfreeze(dd);
+
+ /* do send DMA unfreeze steps */
+ sdma_unfreeze(dd);
+
+ /* do send egress unfreeze steps - nothing to do */
+
+ /* do receive unfreeze steps for kernel contexts */
+ rxe_kernel_unfreeze(dd);
+
+ /*
+ * The unfreeze procedure touches global device registers when
+ * it disables and re-enables RXE. Mark the device unfrozen
+ * after all that is done so other parts of the driver waiting
+ * for the device to unfreeze don't do things out of order.
+ *
+ * The above implies that the meaning of HFI1_FROZEN flag is
+ * "Device has gone into freeze mode and freeze mode handling
+ * is still in progress."
+ *
+ * The flag will be removed when freeze mode processing has
+ * completed.
+ */
+ dd->flags &= ~HFI1_FROZEN;
+ wake_up(&dd->event_queue);
+
+ /* no longer frozen */
+ dd_dev_err(dd, "Exiting SPC freeze\n");
+}
+
+/*
+ * Handle a link up interrupt from the 8051.
+ *
+ * This is a work-queue function outside of the interrupt.
+ */
+void handle_link_up(struct work_struct *work)
+{
+ struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+ link_up_work);
+ set_link_state(ppd, HLS_UP_INIT);
+
+ /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
+ read_ltp_rtt(ppd->dd);
+ /*
+ * OPA specifies that certain counters are cleared on a transition
+ * to link up, so do that.
+ */
+ clear_linkup_counters(ppd->dd);
+ /*
+ * And (re)set link up default values.
+ */
+ set_linkup_defaults(ppd);
+
+ /* enforce link speed enabled */
+ if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
+ /* oops - current speed is not enabled, bounce */
+ dd_dev_err(ppd->dd,
+ "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
+ ppd->link_speed_active, ppd->link_speed_enabled);
+ set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
+ OPA_LINKDOWN_REASON_SPEED_POLICY);
+ set_link_state(ppd, HLS_DN_OFFLINE);
+ start_link(ppd);
+ }
+}
+
+/* Several pieces of LNI information were cached for SMA in ppd.
+ * Reset these on link down */
+static void reset_neighbor_info(struct hfi1_pportdata *ppd)
+{
+ ppd->neighbor_guid = 0;
+ ppd->neighbor_port_number = 0;
+ ppd->neighbor_type = 0;
+ ppd->neighbor_fm_security = 0;
+}
+
+/*
+ * Handle a link down interrupt from the 8051.
+ *
+ * This is a work-queue function outside of the interrupt.
+ */
+void handle_link_down(struct work_struct *work)
+{
+ u8 lcl_reason, neigh_reason = 0;
+ struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+ link_down_work);
+
+ /* go offline first, then deal with reasons */
+ set_link_state(ppd, HLS_DN_OFFLINE);
+
+ lcl_reason = 0;
+ read_planned_down_reason_code(ppd->dd, &neigh_reason);
+
+ /*
+ * If no reason, assume peer-initiated but missed
+ * LinkGoingDown idle flits.
+ */
+ if (neigh_reason == 0)
+ lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
+
+ set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
+
+ reset_neighbor_info(ppd);
+
+ /* disable the port */
+ clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
+
+ /* If there is no cable attached, turn the DC off. Otherwise,
+ * start the link bring up. */
+ if (!qsfp_mod_present(ppd))
+ dc_shutdown(ppd->dd);
+ else
+ start_link(ppd);
+}
+
+void handle_link_bounce(struct work_struct *work)
+{
+ struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+ link_bounce_work);
+
+ /*
+ * Only do something if the link is currently up.
+ */
+ if (ppd->host_link_state & HLS_UP) {
+ set_link_state(ppd, HLS_DN_OFFLINE);
+ start_link(ppd);
+ } else {
+ dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
+ __func__, link_state_name(ppd->host_link_state));
+ }
+}
+
+/*
+ * Mask conversion: Capability exchange to Port LTP. The capability
+ * exchange has an implicit 16b CRC that is mandatory.
+ */
+static int cap_to_port_ltp(int cap)
+{
+ int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
+
+ if (cap & CAP_CRC_14B)
+ port_ltp |= PORT_LTP_CRC_MODE_14;
+ if (cap & CAP_CRC_48B)
+ port_ltp |= PORT_LTP_CRC_MODE_48;
+ if (cap & CAP_CRC_12B_16B_PER_LANE)
+ port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
+
+ return port_ltp;
+}
+
+/*
+ * Convert an OPA Port LTP mask to capability mask
+ */
+int port_ltp_to_cap(int port_ltp)
+{
+ int cap_mask = 0;
+
+ if (port_ltp & PORT_LTP_CRC_MODE_14)
+ cap_mask |= CAP_CRC_14B;
+ if (port_ltp & PORT_LTP_CRC_MODE_48)
+ cap_mask |= CAP_CRC_48B;
+ if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
+ cap_mask |= CAP_CRC_12B_16B_PER_LANE;
+
+ return cap_mask;
+}
+
+/*
+ * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
+ */
+static int lcb_to_port_ltp(int lcb_crc)
+{
+ int port_ltp = 0;
+
+ if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
+ port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
+ else if (lcb_crc == LCB_CRC_48B)
+ port_ltp = PORT_LTP_CRC_MODE_48;
+ else if (lcb_crc == LCB_CRC_14B)
+ port_ltp = PORT_LTP_CRC_MODE_14;
+ else
+ port_ltp = PORT_LTP_CRC_MODE_16;
+
+ return port_ltp;
+}
+
+/*
+ * Our neighbor has indicated that we are allowed to act as a fabric
+ * manager, so place the full management partition key in the second
+ * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
+ * that we should already have the limited management partition key in
+ * array element 1, and also that the port is not yet up when
+ * add_full_mgmt_pkey() is invoked.
+ */
+static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+
+ /* Sanity check - ppd->pkeys[2] should be 0 */
+ if (ppd->pkeys[2] != 0)
+ dd_dev_err(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
+ __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
+ ppd->pkeys[2] = FULL_MGMT_P_KEY;
+ (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+}
+
+/*
+ * Convert the given link width to the OPA link width bitmask.
+ */
+static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
+{
+ switch (width) {
+ case 0:
+ /*
+ * Simulator and quick linkup do not set the width.
+ * Just set it to 4x without complaint.
+ */
+ if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
+ return OPA_LINK_WIDTH_4X;
+ return 0; /* no lanes up */
+ case 1: return OPA_LINK_WIDTH_1X;
+ case 2: return OPA_LINK_WIDTH_2X;
+ case 3: return OPA_LINK_WIDTH_3X;
+ default:
+ dd_dev_info(dd, "%s: invalid width %d, using 4\n",
+ __func__, width);
+ /* fall through */
+ case 4: return OPA_LINK_WIDTH_4X;
+ }
+}
+
+/*
+ * Do a population count on the bottom nibble.
+ */
+static const u8 bit_counts[16] = {
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
+};
+static inline u8 nibble_to_count(u8 nibble)
+{
+ return bit_counts[nibble & 0xf];
+}
+
+/*
+ * Read the active lane information from the 8051 registers and return
+ * their widths.
+ *
+ * Active lane information is found in these 8051 registers:
+ * enable_lane_tx
+ * enable_lane_rx
+ */
+static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
+ u16 *rx_width)
+{
+ u16 tx, rx;
+ u8 enable_lane_rx;
+ u8 enable_lane_tx;
+ u8 tx_polarity_inversion;
+ u8 rx_polarity_inversion;
+ u8 max_rate;
+
+ /* read the active lanes */
+ read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
+ &rx_polarity_inversion, &max_rate);
+ read_local_lni(dd, &enable_lane_rx);
+
+ /* convert to counts */
+ tx = nibble_to_count(enable_lane_tx);
+ rx = nibble_to_count(enable_lane_rx);
+
+ /*
+ * Set link_speed_active here, overriding what was set in
+ * handle_verify_cap(). The ASIC 8051 firmware does not correctly
+ * set the max_rate field in handle_verify_cap until v0.19.
+ */
+ if ((dd->icode == ICODE_RTL_SILICON)
+ && (dd->dc8051_ver < dc8051_ver(0, 19))) {
+ /* max_rate: 0 = 12.5G, 1 = 25G */
+ switch (max_rate) {
+ case 0:
+ dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
+ break;
+ default:
+ dd_dev_err(dd,
+ "%s: unexpected max rate %d, using 25Gb\n",
+ __func__, (int)max_rate);
+ /* fall through */
+ case 1:
+ dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
+ break;
+ }
+ }
+
+ dd_dev_info(dd,
+ "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
+ enable_lane_tx, tx, enable_lane_rx, rx);
+ *tx_width = link_width_to_bits(dd, tx);
+ *rx_width = link_width_to_bits(dd, rx);
+}
+
+/*
+ * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
+ * Valid after the end of VerifyCap and during LinkUp. Does not change
+ * after link up. I.e. look elsewhere for downgrade information.
+ *
+ * Bits are:
+ * + bits [7:4] contain the number of active transmitters
+ * + bits [3:0] contain the number of active receivers
+ * These are numbers 1 through 4 and can be different values if the
+ * link is asymmetric.
+ *
+ * verify_cap_local_fm_link_width[0] retains its original value.
+ */
+static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
+ u16 *rx_width)
+{
+ u16 widths, tx, rx;
+ u8 misc_bits, local_flags;
+ u16 active_tx, active_rx;
+
+ read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
+ tx = widths >> 12;
+ rx = (widths >> 8) & 0xf;
+
+ *tx_width = link_width_to_bits(dd, tx);
+ *rx_width = link_width_to_bits(dd, rx);
+
+ /* print the active widths */
+ get_link_widths(dd, &active_tx, &active_rx);
+}
+
+/*
+ * Set ppd->link_width_active and ppd->link_width_downgrade_active using
+ * hardware information when the link first comes up.
+ *
+ * The link width is not available until after VerifyCap.AllFramesReceived
+ * (the trigger for handle_verify_cap), so this is outside that routine
+ * and should be called when the 8051 signals linkup.
+ */
+void get_linkup_link_widths(struct hfi1_pportdata *ppd)
+{
+ u16 tx_width, rx_width;
+
+ /* get end-of-LNI link widths */
+ get_linkup_widths(ppd->dd, &tx_width, &rx_width);
+
+ /* use tx_width as the link is supposed to be symmetric on link up */
+ ppd->link_width_active = tx_width;
+ /* link width downgrade active (LWD.A) starts out matching LW.A */
+ ppd->link_width_downgrade_tx_active = ppd->link_width_active;
+ ppd->link_width_downgrade_rx_active = ppd->link_width_active;
+ /* per OPA spec, on link up LWD.E resets to LWD.S */
+ ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
+ /* cache the active egress rate (units {10^6 bits/sec]) */
+ ppd->current_egress_rate = active_egress_rate(ppd);
+}
+
+/*
+ * Handle a verify capabilities interrupt from the 8051.
+ *
+ * This is a work-queue function outside of the interrupt.
+ */
+void handle_verify_cap(struct work_struct *work)
+{
+ struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+ link_vc_work);
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 reg;
+ u8 power_management;
+ u8 continious;
+ u8 vcu;
+ u8 vau;
+ u8 z;
+ u16 vl15buf;
+ u16 link_widths;
+ u16 crc_mask;
+ u16 crc_val;
+ u16 device_id;
+ u16 active_tx, active_rx;
+ u8 partner_supported_crc;
+ u8 remote_tx_rate;
+ u8 device_rev;
+
+ set_link_state(ppd, HLS_VERIFY_CAP);
+
+ lcb_shutdown(dd, 0);
+ adjust_lcb_for_fpga_serdes(dd);
+
+ /*
+ * These are now valid:
+ * remote VerifyCap fields in the general LNI config
+ * CSR DC8051_STS_REMOTE_GUID
+ * CSR DC8051_STS_REMOTE_NODE_TYPE
+ * CSR DC8051_STS_REMOTE_FM_SECURITY
+ * CSR DC8051_STS_REMOTE_PORT_NO
+ */
+
+ read_vc_remote_phy(dd, &power_management, &continious);
+ read_vc_remote_fabric(
+ dd,
+ &vau,
+ &z,
+ &vcu,
+ &vl15buf,
+ &partner_supported_crc);
+ read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
+ read_remote_device_id(dd, &device_id, &device_rev);
+ /*
+ * And the 'MgmtAllowed' information, which is exchanged during
+ * LNI, is also be available at this point.
+ */
+ read_mgmt_allowed(dd, &ppd->mgmt_allowed);
+ /* print the active widths */
+ get_link_widths(dd, &active_tx, &active_rx);
+ dd_dev_info(dd,
+ "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
+ (int)power_management, (int)continious);
+ dd_dev_info(dd,
+ "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
+ (int)vau,
+ (int)z,
+ (int)vcu,
+ (int)vl15buf,
+ (int)partner_supported_crc);
+ dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
+ (u32)remote_tx_rate, (u32)link_widths);
+ dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
+ (u32)device_id, (u32)device_rev);
+ /*
+ * The peer vAU value just read is the peer receiver value. HFI does
+ * not support a transmit vAU of 0 (AU == 8). We advertised that
+ * with Z=1 in the fabric capabilities sent to the peer. The peer
+ * will see our Z=1, and, if it advertised a vAU of 0, will move its
+ * receive to vAU of 1 (AU == 16). Do the same here. We do not care
+ * about the peer Z value - our sent vAU is 3 (hardwired) and is not
+ * subject to the Z value exception.
+ */
+ if (vau == 0)
+ vau = 1;
+ set_up_vl15(dd, vau, vl15buf);
+
+ /* set up the LCB CRC mode */
+ crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
+
+ /* order is important: use the lowest bit in common */
+ if (crc_mask & CAP_CRC_14B)
+ crc_val = LCB_CRC_14B;
+ else if (crc_mask & CAP_CRC_48B)
+ crc_val = LCB_CRC_48B;
+ else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
+ crc_val = LCB_CRC_12B_16B_PER_LANE;
+ else
+ crc_val = LCB_CRC_16B;
+
+ dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
+ write_csr(dd, DC_LCB_CFG_CRC_MODE,
+ (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
+
+ /* set (14b only) or clear sideband credit */
+ reg = read_csr(dd, SEND_CM_CTRL);
+ if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
+ write_csr(dd, SEND_CM_CTRL,
+ reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
+ } else {
+ write_csr(dd, SEND_CM_CTRL,
+ reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
+ }
+
+ ppd->link_speed_active = 0; /* invalid value */
+ if (dd->dc8051_ver < dc8051_ver(0, 20)) {
+ /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
+ switch (remote_tx_rate) {
+ case 0:
+ ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
+ break;
+ case 1:
+ ppd->link_speed_active = OPA_LINK_SPEED_25G;
+ break;
+ }
+ } else {
+ /* actual rate is highest bit of the ANDed rates */
+ u8 rate = remote_tx_rate & ppd->local_tx_rate;
+
+ if (rate & 2)
+ ppd->link_speed_active = OPA_LINK_SPEED_25G;
+ else if (rate & 1)
+ ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
+ }
+ if (ppd->link_speed_active == 0) {
+ dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
+ __func__, (int)remote_tx_rate);
+ ppd->link_speed_active = OPA_LINK_SPEED_25G;
+ }
+
+ /*
+ * Cache the values of the supported, enabled, and active
+ * LTP CRC modes to return in 'portinfo' queries. But the bit
+ * flags that are returned in the portinfo query differ from
+ * what's in the link_crc_mask, crc_sizes, and crc_val
+ * variables. Convert these here.
+ */
+ ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
+ /* supported crc modes */
+ ppd->port_ltp_crc_mode |=
+ cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
+ /* enabled crc modes */
+ ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
+ /* active crc mode */
+
+ /* set up the remote credit return table */
+ assign_remote_cm_au_table(dd, vcu);
+
+ /*
+ * The LCB is reset on entry to handle_verify_cap(), so this must
+ * be applied on every link up.
+ *
+ * Adjust LCB error kill enable to kill the link if
+ * these RBUF errors are seen:
+ * REPLAY_BUF_MBE_SMASK
+ * FLIT_INPUT_BUF_MBE_SMASK
+ */
+ if (is_a0(dd)) { /* fixed in B0 */
+ reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
+ reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
+ | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
+ write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
+ }
+
+ /* pull LCB fifos out of reset - all fifo clocks must be stable */
+ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
+
+ /* give 8051 access to the LCB CSRs */
+ write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
+ set_8051_lcb_access(dd);
+
+ ppd->neighbor_guid =
+ read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
+ ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
+ DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
+ ppd->neighbor_type =
+ read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
+ DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
+ ppd->neighbor_fm_security =
+ read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
+ DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
+ dd_dev_info(dd,
+ "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
+ ppd->neighbor_guid, ppd->neighbor_type,
+ ppd->mgmt_allowed, ppd->neighbor_fm_security);
+ if (ppd->mgmt_allowed)
+ add_full_mgmt_pkey(ppd);
+
+ /* tell the 8051 to go to LinkUp */
+ set_link_state(ppd, HLS_GOING_UP);
+}
+
+/*
+ * Apply the link width downgrade enabled policy against the current active
+ * link widths.
+ *
+ * Called when the enabled policy changes or the active link widths change.
+ */
+void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
+{
+ int skip = 1;
+ int do_bounce = 0;
+ u16 lwde = ppd->link_width_downgrade_enabled;
+ u16 tx, rx;
+
+ mutex_lock(&ppd->hls_lock);
+ /* only apply if the link is up */
+ if (ppd->host_link_state & HLS_UP)
+ skip = 0;
+ mutex_unlock(&ppd->hls_lock);
+ if (skip)
+ return;
+
+ if (refresh_widths) {
+ get_link_widths(ppd->dd, &tx, &rx);
+ ppd->link_width_downgrade_tx_active = tx;
+ ppd->link_width_downgrade_rx_active = rx;
+ }
+
+ if (lwde == 0) {
+ /* downgrade is disabled */
+
+ /* bounce if not at starting active width */
+ if ((ppd->link_width_active !=
+ ppd->link_width_downgrade_tx_active)
+ || (ppd->link_width_active !=
+ ppd->link_width_downgrade_rx_active)) {
+ dd_dev_err(ppd->dd,
+ "Link downgrade is disabled and link has downgraded, downing link\n");
+ dd_dev_err(ppd->dd,
+ " original 0x%x, tx active 0x%x, rx active 0x%x\n",
+ ppd->link_width_active,
+ ppd->link_width_downgrade_tx_active,
+ ppd->link_width_downgrade_rx_active);
+ do_bounce = 1;
+ }
+ } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0
+ || (lwde & ppd->link_width_downgrade_rx_active) == 0) {
+ /* Tx or Rx is outside the enabled policy */
+ dd_dev_err(ppd->dd,
+ "Link is outside of downgrade allowed, downing link\n");
+ dd_dev_err(ppd->dd,
+ " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
+ lwde,
+ ppd->link_width_downgrade_tx_active,
+ ppd->link_width_downgrade_rx_active);
+ do_bounce = 1;
+ }
+
+ if (do_bounce) {
+ set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
+ OPA_LINKDOWN_REASON_WIDTH_POLICY);
+ set_link_state(ppd, HLS_DN_OFFLINE);
+ start_link(ppd);
+ }
+}
+
+/*
+ * Handle a link downgrade interrupt from the 8051.
+ *
+ * This is a work-queue function outside of the interrupt.
+ */
+void handle_link_downgrade(struct work_struct *work)
+{
+ struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+ link_downgrade_work);
+
+ dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
+ apply_link_downgrade_policy(ppd, 1);
+}
+
+static char *dcc_err_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags, dcc_err_flags,
+ ARRAY_SIZE(dcc_err_flags));
+}
+
+static char *lcb_err_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags, lcb_err_flags,
+ ARRAY_SIZE(lcb_err_flags));
+}
+
+static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags, dc8051_err_flags,
+ ARRAY_SIZE(dc8051_err_flags));
+}
+
+static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
+ ARRAY_SIZE(dc8051_info_err_flags));
+}
+
+static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
+{
+ return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
+ ARRAY_SIZE(dc8051_info_host_msg_flags));
+}
+
+static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
+{
+ struct hfi1_pportdata *ppd = dd->pport;
+ u64 info, err, host_msg;
+ int queue_link_down = 0;
+ char buf[96];
+
+ /* look at the flags */
+ if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
+ /* 8051 information set by firmware */
+ /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
+ info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
+ err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
+ & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
+ host_msg = (info >>
+ DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
+ & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
+
+ /*
+ * Handle error flags.
+ */
+ if (err & FAILED_LNI) {
+ /*
+ * LNI error indications are cleared by the 8051
+ * only when starting polling. Only pay attention
+ * to them when in the states that occur during
+ * LNI.
+ */
+ if (ppd->host_link_state
+ & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
+ queue_link_down = 1;
+ dd_dev_info(dd, "Link error: %s\n",
+ dc8051_info_err_string(buf,
+ sizeof(buf),
+ err & FAILED_LNI));
+ }
+ err &= ~(u64)FAILED_LNI;
+ }
+ if (err) {
+ /* report remaining errors, but do not do anything */
+ dd_dev_err(dd, "8051 info error: %s\n",
+ dc8051_info_err_string(buf, sizeof(buf), err));
+ }
+
+ /*
+ * Handle host message flags.
+ */
+ if (host_msg & HOST_REQ_DONE) {
+ /*
+ * Presently, the driver does a busy wait for
+ * host requests to complete. This is only an
+ * informational message.
+ * NOTE: The 8051 clears the host message
+ * information *on the next 8051 command*.
+ * Therefore, when linkup is achieved,
+ * this flag will still be set.
+ */
+ host_msg &= ~(u64)HOST_REQ_DONE;
+ }
+ if (host_msg & BC_SMA_MSG) {
+ queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
+ host_msg &= ~(u64)BC_SMA_MSG;
+ }
+ if (host_msg & LINKUP_ACHIEVED) {
+ dd_dev_info(dd, "8051: Link up\n");
+ queue_work(ppd->hfi1_wq, &ppd->link_up_work);
+ host_msg &= ~(u64)LINKUP_ACHIEVED;
+ }
+ if (host_msg & EXT_DEVICE_CFG_REQ) {
+ handle_8051_request(dd);
+ host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
+ }
+ if (host_msg & VERIFY_CAP_FRAME) {
+ queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
+ host_msg &= ~(u64)VERIFY_CAP_FRAME;
+ }
+ if (host_msg & LINK_GOING_DOWN) {
+ const char *extra = "";
+ /* no downgrade action needed if going down */
+ if (host_msg & LINK_WIDTH_DOWNGRADED) {
+ host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
+ extra = " (ignoring downgrade)";
+ }
+ dd_dev_info(dd, "8051: Link down%s\n", extra);
+ queue_link_down = 1;
+ host_msg &= ~(u64)LINK_GOING_DOWN;
+ }
+ if (host_msg & LINK_WIDTH_DOWNGRADED) {
+ queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
+ host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
+ }
+ if (host_msg) {
+ /* report remaining messages, but do not do anything */
+ dd_dev_info(dd, "8051 info host message: %s\n",
+ dc8051_info_host_msg_string(buf, sizeof(buf),
+ host_msg));
+ }
+
+ reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
+ }
+ if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
+ /*
+ * Lost the 8051 heartbeat. If this happens, we
+ * receive constant interrupts about it. Disable
+ * the interrupt after the first.
+ */
+ dd_dev_err(dd, "Lost 8051 heartbeat\n");
+ write_csr(dd, DC_DC8051_ERR_EN,
+ read_csr(dd, DC_DC8051_ERR_EN)
+ & ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
+
+ reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
+ }
+ if (reg) {
+ /* report the error, but do not do anything */
+ dd_dev_err(dd, "8051 error: %s\n",
+ dc8051_err_string(buf, sizeof(buf), reg));
+ }
+
+ if (queue_link_down) {
+ /* if the link is already going down or disabled, do not
+ * queue another */
+ if ((ppd->host_link_state
+ & (HLS_GOING_OFFLINE|HLS_LINK_COOLDOWN))
+ || ppd->link_enabled == 0) {
+ dd_dev_info(dd, "%s: not queuing link down\n",
+ __func__);
+ } else {
+ queue_work(ppd->hfi1_wq, &ppd->link_down_work);
+ }
+ }
+}
+
+static const char * const fm_config_txt[] = {
+[0] =
+ "BadHeadDist: Distance violation between two head flits",
+[1] =
+ "BadTailDist: Distance violation between two tail flits",
+[2] =
+ "BadCtrlDist: Distance violation between two credit control flits",
+[3] =
+ "BadCrdAck: Credits return for unsupported VL",
+[4] =
+ "UnsupportedVLMarker: Received VL Marker",
+[5] =
+ "BadPreempt: Exceeded the preemption nesting level",
+[6] =
+ "BadControlFlit: Received unsupported control flit",
+/* no 7 */
+[8] =
+ "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
+};
+
+static const char * const port_rcv_txt[] = {
+[1] =
+ "BadPktLen: Illegal PktLen",
+[2] =
+ "PktLenTooLong: Packet longer than PktLen",
+[3] =
+ "PktLenTooShort: Packet shorter than PktLen",
+[4] =
+ "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
+[5] =
+ "BadDLID: Illegal DLID (0, doesn't match HFI)",
+[6] =
+ "BadL2: Illegal L2 opcode",
+[7] =
+ "BadSC: Unsupported SC",
+[9] =
+ "BadRC: Illegal RC",
+[11] =
+ "PreemptError: Preempting with same VL",
+[12] =
+ "PreemptVL15: Preempting a VL15 packet",
+};
+
+#define OPA_LDR_FMCONFIG_OFFSET 16
+#define OPA_LDR_PORTRCV_OFFSET 0
+static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
+{
+ u64 info, hdr0, hdr1;
+ const char *extra;
+ char buf[96];
+ struct hfi1_pportdata *ppd = dd->pport;
+ u8 lcl_reason = 0;
+ int do_bounce = 0;
+
+ if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
+ if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
+ info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
+ dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
+ /* set status bit */
+ dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
+ }
+ reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
+ }
+
+ if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
+ struct hfi1_pportdata *ppd = dd->pport;
+ /* this counter saturates at (2^32) - 1 */
+ if (ppd->link_downed < (u32)UINT_MAX)
+ ppd->link_downed++;
+ reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
+ }
+
+ if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
+ u8 reason_valid = 1;
+
+ info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
+ if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
+ dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
+ /* set status bit */
+ dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
+ }
+ switch (info) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ extra = fm_config_txt[info];
+ break;
+ case 8:
+ extra = fm_config_txt[info];
+ if (ppd->port_error_action &
+ OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
+ do_bounce = 1;
+ /*
+ * lcl_reason cannot be derived from info
+ * for this error
+ */
+ lcl_reason =
+ OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
+ }
+ break;
+ default:
+ reason_valid = 0;
+ snprintf(buf, sizeof(buf), "reserved%lld", info);
+ extra = buf;
+ break;
+ }
+
+ if (reason_valid && !do_bounce) {
+ do_bounce = ppd->port_error_action &
+ (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
+ lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
+ }
+
+ /* just report this */
+ dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
+ reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
+ }
+
+ if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
+ u8 reason_valid = 1;
+
+ info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
+ hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
+ hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
+ if (!(dd->err_info_rcvport.status_and_code &
+ OPA_EI_STATUS_SMASK)) {
+ dd->err_info_rcvport.status_and_code =
+ info & OPA_EI_CODE_SMASK;
+ /* set status bit */
+ dd->err_info_rcvport.status_and_code |=
+ OPA_EI_STATUS_SMASK;
+ /* save first 2 flits in the packet that caused
+ * the error */
+ dd->err_info_rcvport.packet_flit1 = hdr0;
+ dd->err_info_rcvport.packet_flit2 = hdr1;
+ }
+ switch (info) {
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 9:
+ case 11:
+ case 12:
+ extra = port_rcv_txt[info];
+ break;
+ default:
+ reason_valid = 0;
+ snprintf(buf, sizeof(buf), "reserved%lld", info);
+ extra = buf;
+ break;
+ }
+
+ if (reason_valid && !do_bounce) {
+ do_bounce = ppd->port_error_action &
+ (1 << (OPA_LDR_PORTRCV_OFFSET + info));
+ lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
+ }
+
+ /* just report this */
+ dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
+ dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
+ hdr0, hdr1);
+
+ reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
+ }
+
+ if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
+ /* informative only */
+ dd_dev_info(dd, "8051 access to LCB blocked\n");
+ reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
+ }
+ if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
+ /* informative only */
+ dd_dev_info(dd, "host access to LCB blocked\n");
+ reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
+ }
+
+ /* report any remaining errors */
+ if (reg)
+ dd_dev_info(dd, "DCC Error: %s\n",
+ dcc_err_string(buf, sizeof(buf), reg));
+
+ if (lcl_reason == 0)
+ lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
+
+ if (do_bounce) {
+ dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
+ set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
+ queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
+ }
+}
+
+static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
+{
+ char buf[96];
+
+ dd_dev_info(dd, "LCB Error: %s\n",
+ lcb_err_string(buf, sizeof(buf), reg));
+}
+
+/*
+ * CCE block DC interrupt. Source is < 8.
+ */
+static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
+{
+ const struct err_reg_info *eri = &dc_errs[source];
+
+ if (eri->handler) {
+ interrupt_clear_down(dd, 0, eri);
+ } else if (source == 3 /* dc_lbm_int */) {
+ /*
+ * This indicates that a parity error has occurred on the
+ * address/control lines presented to the LBM. The error
+ * is a single pulse, there is no associated error flag,
+ * and it is non-maskable. This is because if a parity
+ * error occurs on the request the request is dropped.
+ * This should never occur, but it is nice to know if it
+ * ever does.
+ */
+ dd_dev_err(dd, "Parity error in DC LBM block\n");
+ } else {
+ dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
+ }
+}
+
+/*
+ * TX block send credit interrupt. Source is < 160.
+ */
+static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
+{
+ sc_group_release_update(dd, source);
+}
+
+/*
+ * TX block SDMA interrupt. Source is < 48.
+ *
+ * SDMA interrupts are grouped by type:
+ *
+ * 0 - N-1 = SDma
+ * N - 2N-1 = SDmaProgress
+ * 2N - 3N-1 = SDmaIdle
+ */
+static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
+{
+ /* what interrupt */
+ unsigned int what = source / TXE_NUM_SDMA_ENGINES;
+ /* which engine */
+ unsigned int which = source % TXE_NUM_SDMA_ENGINES;
+
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
+ slashstrip(__FILE__), __LINE__, __func__);
+ sdma_dumpstate(&dd->per_sdma[which]);
+#endif
+
+ if (likely(what < 3 && which < dd->num_sdma)) {
+ sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
+ } else {
+ /* should not happen */
+ dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
+ }
+}
+
+/*
+ * RX block receive available interrupt. Source is < 160.
+ */
+static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
+{
+ struct hfi1_ctxtdata *rcd;
+ char *err_detail;
+
+ if (likely(source < dd->num_rcv_contexts)) {
+ rcd = dd->rcd[source];
+ if (rcd) {
+ if (source < dd->first_user_ctxt)
+ rcd->do_interrupt(rcd);
+ else
+ handle_user_interrupt(rcd);
+ return; /* OK */
+ }
+ /* received an interrupt, but no rcd */
+ err_detail = "dataless";
+ } else {
+ /* received an interrupt, but are not using that context */
+ err_detail = "out of range";
+ }
+ dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
+ err_detail, source);
+}
+
+/*
+ * RX block receive urgent interrupt. Source is < 160.
+ */
+static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
+{
+ struct hfi1_ctxtdata *rcd;
+ char *err_detail;
+
+ if (likely(source < dd->num_rcv_contexts)) {
+ rcd = dd->rcd[source];
+ if (rcd) {
+ /* only pay attention to user urgent interrupts */
+ if (source >= dd->first_user_ctxt)
+ handle_user_interrupt(rcd);
+ return; /* OK */
+ }
+ /* received an interrupt, but no rcd */
+ err_detail = "dataless";
+ } else {
+ /* received an interrupt, but are not using that context */
+ err_detail = "out of range";
+ }
+ dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
+ err_detail, source);
+}
+
+/*
+ * Reserved range interrupt. Should not be called in normal operation.
+ */
+static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
+{
+ char name[64];
+
+ dd_dev_err(dd, "unexpected %s interrupt\n",
+ is_reserved_name(name, sizeof(name), source));
+}
+
+static const struct is_table is_table[] = {
+/* start end
+ name func interrupt func */
+{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
+ is_misc_err_name, is_misc_err_int },
+{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
+ is_sdma_eng_err_name, is_sdma_eng_err_int },
+{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
+ is_sendctxt_err_name, is_sendctxt_err_int },
+{ IS_SDMA_START, IS_SDMA_END,
+ is_sdma_eng_name, is_sdma_eng_int },
+{ IS_VARIOUS_START, IS_VARIOUS_END,
+ is_various_name, is_various_int },
+{ IS_DC_START, IS_DC_END,
+ is_dc_name, is_dc_int },
+{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
+ is_rcv_avail_name, is_rcv_avail_int },
+{ IS_RCVURGENT_START, IS_RCVURGENT_END,
+ is_rcv_urgent_name, is_rcv_urgent_int },
+{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
+ is_send_credit_name, is_send_credit_int},
+{ IS_RESERVED_START, IS_RESERVED_END,
+ is_reserved_name, is_reserved_int},
+};
+
+/*
+ * Interrupt source interrupt - called when the given source has an interrupt.
+ * Source is a bit index into an array of 64-bit integers.
+ */
+static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
+{
+ const struct is_table *entry;
+
+ /* avoids a double compare by walking the table in-order */
+ for (entry = &is_table[0]; entry->is_name; entry++) {
+ if (source < entry->end) {
+ trace_hfi1_interrupt(dd, entry, source);
+ entry->is_int(dd, source - entry->start);
+ return;
+ }
+ }
+ /* fell off the end */
+ dd_dev_err(dd, "invalid interrupt source %u\n", source);
+}
+
+/*
+ * General interrupt handler. This is able to correctly handle
+ * all interrupts in case INTx is used.
+ */
+static irqreturn_t general_interrupt(int irq, void *data)
+{
+ struct hfi1_devdata *dd = data;
+ u64 regs[CCE_NUM_INT_CSRS];
+ u32 bit;
+ int i;
+
+ this_cpu_inc(*dd->int_counter);
+
+ /* phase 1: scan and clear all handled interrupts */
+ for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
+ if (dd->gi_mask[i] == 0) {
+ regs[i] = 0; /* used later */
+ continue;
+ }
+ regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
+ dd->gi_mask[i];
+ /* only clear if anything is set */
+ if (regs[i])
+ write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
+ }
+
+ /* phase 2: call the appropriate handler */
+ for_each_set_bit(bit, (unsigned long *)&regs[0],
+ CCE_NUM_INT_CSRS*64) {
+ is_interrupt(dd, bit);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sdma_interrupt(int irq, void *data)
+{
+ struct sdma_engine *sde = data;
+ struct hfi1_devdata *dd = sde->dd;
+ u64 status;
+
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
+ slashstrip(__FILE__), __LINE__, __func__);
+ sdma_dumpstate(sde);
+#endif
+
+ this_cpu_inc(*dd->int_counter);
+
+ /* This read_csr is really bad in the hot path */
+ status = read_csr(dd,
+ CCE_INT_STATUS + (8*(IS_SDMA_START/64)))
+ & sde->imask;
+ if (likely(status)) {
+ /* clear the interrupt(s) */
+ write_csr(dd,
+ CCE_INT_CLEAR + (8*(IS_SDMA_START/64)),
+ status);
+
+ /* handle the interrupt(s) */
+ sdma_engine_interrupt(sde, status);
+ } else
+ dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
+ sde->this_idx);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * NOTE: this routine expects to be on its own MSI-X interrupt. If
+ * multiple receive contexts share the same MSI-X interrupt, then this
+ * routine must check for who received it.
+ */
+static irqreturn_t receive_context_interrupt(int irq, void *data)
+{
+ struct hfi1_ctxtdata *rcd = data;
+ struct hfi1_devdata *dd = rcd->dd;
+
+ trace_hfi1_receive_interrupt(dd, rcd->ctxt);
+ this_cpu_inc(*dd->int_counter);
+
+ /* clear the interrupt */
+ write_csr(rcd->dd, CCE_INT_CLEAR + (8*rcd->ireg), rcd->imask);
+
+ /* handle the interrupt */
+ rcd->do_interrupt(rcd);
+
+ return IRQ_HANDLED;
+}
+
+/* ========================================================================= */
+
+u32 read_physical_state(struct hfi1_devdata *dd)
+{
+ u64 reg;
+
+ reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
+ return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
+ & DC_DC8051_STS_CUR_STATE_PORT_MASK;
+}
+
+static u32 read_logical_state(struct hfi1_devdata *dd)
+{
+ u64 reg;
+
+ reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
+ return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
+ & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
+}
+
+static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
+{
+ u64 reg;
+
+ reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
+ /* clear current state, set new state */
+ reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
+ reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
+ write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
+}
+
+/*
+ * Use the 8051 to read a LCB CSR.
+ */
+static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
+{
+ u32 regno;
+ int ret;
+
+ if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
+ if (acquire_lcb_access(dd, 0) == 0) {
+ *data = read_csr(dd, addr);
+ release_lcb_access(dd, 0);
+ return 0;
+ }
+ return -EBUSY;
+ }
+
+ /* register is an index of LCB registers: (offset - base) / 8 */
+ regno = (addr - DC_LCB_CFG_RUN) >> 3;
+ ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
+ if (ret != HCMD_SUCCESS)
+ return -EBUSY;
+ return 0;
+}
+
+/*
+ * Read an LCB CSR. Access may not be in host control, so check.
+ * Return 0 on success, -EBUSY on failure.
+ */
+int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
+{
+ struct hfi1_pportdata *ppd = dd->pport;
+
+ /* if up, go through the 8051 for the value */
+ if (ppd->host_link_state & HLS_UP)
+ return read_lcb_via_8051(dd, addr, data);
+ /* if going up or down, no access */
+ if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
+ return -EBUSY;
+ /* otherwise, host has access */
+ *data = read_csr(dd, addr);
+ return 0;
+}
+
+/*
+ * Use the 8051 to write a LCB CSR.
+ */
+static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
+{
+
+ if (acquire_lcb_access(dd, 0) == 0) {
+ write_csr(dd, addr, data);
+ release_lcb_access(dd, 0);
+ return 0;
+ }
+ return -EBUSY;
+}
+
+/*
+ * Write an LCB CSR. Access may not be in host control, so check.
+ * Return 0 on success, -EBUSY on failure.
+ */
+int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
+{
+ struct hfi1_pportdata *ppd = dd->pport;
+
+ /* if up, go through the 8051 for the value */
+ if (ppd->host_link_state & HLS_UP)
+ return write_lcb_via_8051(dd, addr, data);
+ /* if going up or down, no access */
+ if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
+ return -EBUSY;
+ /* otherwise, host has access */
+ write_csr(dd, addr, data);
+ return 0;
+}
+
+/*
+ * Returns:
+ * < 0 = Linux error, not able to get access
+ * > 0 = 8051 command RETURN_CODE
+ */
+static int do_8051_command(
+ struct hfi1_devdata *dd,
+ u32 type,
+ u64 in_data,
+ u64 *out_data)
+{
+ u64 reg, completed;
+ int return_code;
+ unsigned long flags;
+ unsigned long timeout;
+
+ hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
+
+ /*
+ * Alternative to holding the lock for a long time:
+ * - keep busy wait - have other users bounce off
+ */
+ spin_lock_irqsave(&dd->dc8051_lock, flags);
+
+ /* We can't send any commands to the 8051 if it's in reset */
+ if (dd->dc_shutdown) {
+ return_code = -ENODEV;
+ goto fail;
+ }
+
+ /*
+ * If an 8051 host command timed out previously, then the 8051 is
+ * stuck.
+ *
+ * On first timeout, attempt to reset and restart the entire DC
+ * block (including 8051). (Is this too big of a hammer?)
+ *
+ * If the 8051 times out a second time, the reset did not bring it
+ * back to healthy life. In that case, fail any subsequent commands.
+ */
+ if (dd->dc8051_timed_out) {
+ if (dd->dc8051_timed_out > 1) {
+ dd_dev_err(dd,
+ "Previous 8051 host command timed out, skipping command %u\n",
+ type);
+ return_code = -ENXIO;
+ goto fail;
+ }
+ spin_unlock_irqrestore(&dd->dc8051_lock, flags);
+ dc_shutdown(dd);
+ dc_start(dd);
+ spin_lock_irqsave(&dd->dc8051_lock, flags);
+ }
+
+ /*
+ * If there is no timeout, then the 8051 command interface is
+ * waiting for a command.
+ */
+
+ /*
+ * Do two writes: the first to stabilize the type and req_data, the
+ * second to activate.
+ */
+ reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
+ << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
+ | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
+ << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
+ write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
+ reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
+ write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
+
+ /* wait for completion, alternate: interrupt */
+ timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
+ while (1) {
+ reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
+ completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
+ if (completed)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dd->dc8051_timed_out++;
+ dd_dev_err(dd, "8051 host command %u timeout\n", type);
+ if (out_data)
+ *out_data = 0;
+ return_code = -ETIMEDOUT;
+ goto fail;
+ }
+ udelay(2);
+ }
+
+ if (out_data) {
+ *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
+ & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
+ if (type == HCMD_READ_LCB_CSR) {
+ /* top 16 bits are in a different register */
+ *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
+ & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
+ << (48
+ - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
+ }
+ }
+ return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
+ & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
+ dd->dc8051_timed_out = 0;
+ /*
+ * Clear command for next user.
+ */
+ write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
+
+fail:
+ spin_unlock_irqrestore(&dd->dc8051_lock, flags);
+
+ return return_code;
+}
+
+static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
+{
+ return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
+}
+
+static int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
+ u8 lane_id, u32 config_data)
+{
+ u64 data;
+ int ret;
+
+ data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
+ | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
+ | (u64)config_data << LOAD_DATA_DATA_SHIFT;
+ ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "load 8051 config: field id %d, lane %d, err %d\n",
+ (int)field_id, (int)lane_id, ret);
+ }
+ return ret;
+}
+
+/*
+ * Read the 8051 firmware "registers". Use the RAM directly. Always
+ * set the result, even on error.
+ * Return 0 on success, -errno on failure
+ */
+static int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
+ u32 *result)
+{
+ u64 big_data;
+ u32 addr;
+ int ret;
+
+ /* address start depends on the lane_id */
+ if (lane_id < 4)
+ addr = (4 * NUM_GENERAL_FIELDS)
+ + (lane_id * 4 * NUM_LANE_FIELDS);
+ else
+ addr = 0;
+ addr += field_id * 4;
+
+ /* read is in 8-byte chunks, hardware will truncate the address down */
+ ret = read_8051_data(dd, addr, 8, &big_data);
+
+ if (ret == 0) {
+ /* extract the 4 bytes we want */
+ if (addr & 0x4)
+ *result = (u32)(big_data >> 32);
+ else
+ *result = (u32)big_data;
+ } else {
+ *result = 0;
+ dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
+ __func__, lane_id, field_id);
+ }
+
+ return ret;
+}
+
+static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
+ u8 continuous)
+{
+ u32 frame;
+
+ frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
+ | power_management << POWER_MANAGEMENT_SHIFT;
+ return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
+ GENERAL_CONFIG, frame);
+}
+
+static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
+ u16 vl15buf, u8 crc_sizes)
+{
+ u32 frame;
+
+ frame = (u32)vau << VAU_SHIFT
+ | (u32)z << Z_SHIFT
+ | (u32)vcu << VCU_SHIFT
+ | (u32)vl15buf << VL15BUF_SHIFT
+ | (u32)crc_sizes << CRC_SIZES_SHIFT;
+ return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
+ GENERAL_CONFIG, frame);
+}
+
+static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
+ u8 *flag_bits, u16 *link_widths)
+{
+ u32 frame;
+
+ read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
+ &frame);
+ *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
+ *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
+ *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
+}
+
+static int write_vc_local_link_width(struct hfi1_devdata *dd,
+ u8 misc_bits,
+ u8 flag_bits,
+ u16 link_widths)
+{
+ u32 frame;
+
+ frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
+ | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
+ | (u32)link_widths << LINK_WIDTH_SHIFT;
+ return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
+ frame);
+}
+
+static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
+ u8 device_rev)
+{
+ u32 frame;
+
+ frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
+ | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
+ return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
+}
+
+static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
+ u8 *device_rev)
+{
+ u32 frame;
+
+ read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
+ *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
+ *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
+ & REMOTE_DEVICE_REV_MASK;
+}
+
+void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
+{
+ u32 frame;
+
+ read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
+ *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
+ *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
+}
+
+static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
+ u8 *continuous)
+{
+ u32 frame;
+
+ read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
+ *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
+ & POWER_MANAGEMENT_MASK;
+ *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
+ & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
+}
+
+static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
+ u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
+{
+ u32 frame;
+
+ read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
+ *vau = (frame >> VAU_SHIFT) & VAU_MASK;
+ *z = (frame >> Z_SHIFT) & Z_MASK;
+ *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
+ *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
+ *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
+}
+
+static void read_vc_remote_link_width(struct hfi1_devdata *dd,
+ u8 *remote_tx_rate,
+ u16 *link_widths)
+{
+ u32 frame;
+
+ read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
+ &frame);
+ *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
+ & REMOTE_TX_RATE_MASK;
+ *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
+}
+
+static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
+{
+ u32 frame;
+
+ read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
+ *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
+}
+
+static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
+{
+ u32 frame;
+
+ read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
+ *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
+}
+
+static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
+{
+ read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
+}
+
+static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
+{
+ read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
+}
+
+void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
+{
+ u32 frame;
+ int ret;
+
+ *link_quality = 0;
+ if (dd->pport->host_link_state & HLS_UP) {
+ ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
+ &frame);
+ if (ret == 0)
+ *link_quality = (frame >> LINK_QUALITY_SHIFT)
+ & LINK_QUALITY_MASK;
+ }
+}
+
+static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
+{
+ u32 frame;
+
+ read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
+ *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
+}
+
+static int read_tx_settings(struct hfi1_devdata *dd,
+ u8 *enable_lane_tx,
+ u8 *tx_polarity_inversion,
+ u8 *rx_polarity_inversion,
+ u8 *max_rate)
+{
+ u32 frame;
+ int ret;
+
+ ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
+ *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
+ & ENABLE_LANE_TX_MASK;
+ *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
+ & TX_POLARITY_INVERSION_MASK;
+ *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
+ & RX_POLARITY_INVERSION_MASK;
+ *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
+ return ret;
+}
+
+static int write_tx_settings(struct hfi1_devdata *dd,
+ u8 enable_lane_tx,
+ u8 tx_polarity_inversion,
+ u8 rx_polarity_inversion,
+ u8 max_rate)
+{
+ u32 frame;
+
+ /* no need to mask, all variable sizes match field widths */
+ frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
+ | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
+ | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
+ | max_rate << MAX_RATE_SHIFT;
+ return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
+}
+
+static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
+{
+ u32 frame, version, prod_id;
+ int ret, lane;
+
+ /* 4 lanes */
+ for (lane = 0; lane < 4; lane++) {
+ ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
+ if (ret) {
+ dd_dev_err(
+ dd,
+ "Unable to read lane %d firmware details\n",
+ lane);
+ continue;
+ }
+ version = (frame >> SPICO_ROM_VERSION_SHIFT)
+ & SPICO_ROM_VERSION_MASK;
+ prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
+ & SPICO_ROM_PROD_ID_MASK;
+ dd_dev_info(dd,
+ "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
+ lane, version, prod_id);
+ }
+}
+
+/*
+ * Read an idle LCB message.
+ *
+ * Returns 0 on success, -EINVAL on error
+ */
+static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
+{
+ int ret;
+
+ ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG,
+ type, data_out);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd, "read idle message: type %d, err %d\n",
+ (u32)type, ret);
+ return -EINVAL;
+ }
+ dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
+ /* return only the payload as we already know the type */
+ *data_out >>= IDLE_PAYLOAD_SHIFT;
+ return 0;
+}
+
+/*
+ * Read an idle SMA message. To be done in response to a notification from
+ * the 8051.
+ *
+ * Returns 0 on success, -EINVAL on error
+ */
+static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
+{
+ return read_idle_message(dd,
+ (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, data);
+}
+
+/*
+ * Send an idle LCB message.
+ *
+ * Returns 0 on success, -EINVAL on error
+ */
+static int send_idle_message(struct hfi1_devdata *dd, u64 data)
+{
+ int ret;
+
+ dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
+ ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
+ data, ret);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Send an idle SMA message.
+ *
+ * Returns 0 on success, -EINVAL on error
+ */
+int send_idle_sma(struct hfi1_devdata *dd, u64 message)
+{
+ u64 data;
+
+ data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT)
+ | ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
+ return send_idle_message(dd, data);
+}
+
+/*
+ * Initialize the LCB then do a quick link up. This may or may not be
+ * in loopback.
+ *
+ * return 0 on success, -errno on error
+ */
+static int do_quick_linkup(struct hfi1_devdata *dd)
+{
+ u64 reg;
+ unsigned long timeout;
+ int ret;
+
+ lcb_shutdown(dd, 0);
+
+ if (loopback) {
+ /* LCB_CFG_LOOPBACK.VAL = 2 */
+ /* LCB_CFG_LANE_WIDTH.VAL = 0 */
+ write_csr(dd, DC_LCB_CFG_LOOPBACK,
+ IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
+ write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
+ }
+
+ /* start the LCBs */
+ /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
+ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
+
+ /* simulator only loopback steps */
+ if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
+ /* LCB_CFG_RUN.EN = 1 */
+ write_csr(dd, DC_LCB_CFG_RUN,
+ 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
+
+ /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
+ timeout = jiffies + msecs_to_jiffies(10);
+ while (1) {
+ reg = read_csr(dd,
+ DC_LCB_STS_LINK_TRANSFER_ACTIVE);
+ if (reg)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dd_dev_err(dd,
+ "timeout waiting for LINK_TRANSFER_ACTIVE\n");
+ return -ETIMEDOUT;
+ }
+ udelay(2);
+ }
+
+ write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
+ 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
+ }
+
+ if (!loopback) {
+ /*
+ * When doing quick linkup and not in loopback, both
+ * sides must be done with LCB set-up before either
+ * starts the quick linkup. Put a delay here so that
+ * both sides can be started and have a chance to be
+ * done with LCB set up before resuming.
+ */
+ dd_dev_err(dd,
+ "Pausing for peer to be finished with LCB set up\n");
+ msleep(5000);
+ dd_dev_err(dd,
+ "Continuing with quick linkup\n");
+ }
+
+ write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
+ set_8051_lcb_access(dd);
+
+ /*
+ * State "quick" LinkUp request sets the physical link state to
+ * LinkUp without a verify capability sequence.
+ * This state is in simulator v37 and later.
+ */
+ ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "%s: set physical link state to quick LinkUp failed with return %d\n",
+ __func__, ret);
+
+ set_host_lcb_access(dd);
+ write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
+
+ if (ret >= 0)
+ ret = -EINVAL;
+ return ret;
+ }
+
+ return 0; /* success */
+}
+
+/*
+ * Set the SerDes to internal loopback mode.
+ * Returns 0 on success, -errno on error.
+ */
+static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
+{
+ int ret;
+
+ ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
+ if (ret == HCMD_SUCCESS)
+ return 0;
+ dd_dev_err(dd,
+ "Set physical link state to SerDes Loopback failed with return %d\n",
+ ret);
+ if (ret >= 0)
+ ret = -EINVAL;
+ return ret;
+}
+
+/*
+ * Do all special steps to set up loopback.
+ */
+static int init_loopback(struct hfi1_devdata *dd)
+{
+ dd_dev_info(dd, "Entering loopback mode\n");
+
+ /* all loopbacks should disable self GUID check */
+ write_csr(dd, DC_DC8051_CFG_MODE,
+ (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
+
+ /*
+ * The simulator has only one loopback option - LCB. Switch
+ * to that option, which includes quick link up.
+ *
+ * Accept all valid loopback values.
+ */
+ if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
+ && (loopback == LOOPBACK_SERDES
+ || loopback == LOOPBACK_LCB
+ || loopback == LOOPBACK_CABLE)) {
+ loopback = LOOPBACK_LCB;
+ quick_linkup = 1;
+ return 0;
+ }
+
+ /* handle serdes loopback */
+ if (loopback == LOOPBACK_SERDES) {
+ /* internal serdes loopack needs quick linkup on RTL */
+ if (dd->icode == ICODE_RTL_SILICON)
+ quick_linkup = 1;
+ return set_serdes_loopback_mode(dd);
+ }
+
+ /* LCB loopback - handled at poll time */
+ if (loopback == LOOPBACK_LCB) {
+ quick_linkup = 1; /* LCB is always quick linkup */
+
+ /* not supported in emulation due to emulation RTL changes */
+ if (dd->icode == ICODE_FPGA_EMULATION) {
+ dd_dev_err(dd,
+ "LCB loopback not supported in emulation\n");
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ /* external cable loopback requires no extra steps */
+ if (loopback == LOOPBACK_CABLE)
+ return 0;
+
+ dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
+ return -EINVAL;
+}
+
+/*
+ * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
+ * used in the Verify Capability link width attribute.
+ */
+static u16 opa_to_vc_link_widths(u16 opa_widths)
+{
+ int i;
+ u16 result = 0;
+
+ static const struct link_bits {
+ u16 from;
+ u16 to;
+ } opa_link_xlate[] = {
+ { OPA_LINK_WIDTH_1X, 1 << (1-1) },
+ { OPA_LINK_WIDTH_2X, 1 << (2-1) },
+ { OPA_LINK_WIDTH_3X, 1 << (3-1) },
+ { OPA_LINK_WIDTH_4X, 1 << (4-1) },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
+ if (opa_widths & opa_link_xlate[i].from)
+ result |= opa_link_xlate[i].to;
+ }
+ return result;
+}
+
+/*
+ * Set link attributes before moving to polling.
+ */
+static int set_local_link_attributes(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u8 enable_lane_tx;
+ u8 tx_polarity_inversion;
+ u8 rx_polarity_inversion;
+ int ret;
+
+ /* reset our fabric serdes to clear any lingering problems */
+ fabric_serdes_reset(dd);
+
+ /* set the local tx rate - need to read-modify-write */
+ ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
+ &rx_polarity_inversion, &ppd->local_tx_rate);
+ if (ret)
+ goto set_local_link_attributes_fail;
+
+ if (dd->dc8051_ver < dc8051_ver(0, 20)) {
+ /* set the tx rate to the fastest enabled */
+ if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
+ ppd->local_tx_rate = 1;
+ else
+ ppd->local_tx_rate = 0;
+ } else {
+ /* set the tx rate to all enabled */
+ ppd->local_tx_rate = 0;
+ if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
+ ppd->local_tx_rate |= 2;
+ if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
+ ppd->local_tx_rate |= 1;
+ }
+ ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
+ rx_polarity_inversion, ppd->local_tx_rate);
+ if (ret != HCMD_SUCCESS)
+ goto set_local_link_attributes_fail;
+
+ /*
+ * DC supports continuous updates.
+ */
+ ret = write_vc_local_phy(dd, 0 /* no power management */,
+ 1 /* continuous updates */);
+ if (ret != HCMD_SUCCESS)
+ goto set_local_link_attributes_fail;
+
+ /* z=1 in the next call: AU of 0 is not supported by the hardware */
+ ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
+ ppd->port_crc_mode_enabled);
+ if (ret != HCMD_SUCCESS)
+ goto set_local_link_attributes_fail;
+
+ ret = write_vc_local_link_width(dd, 0, 0,
+ opa_to_vc_link_widths(ppd->link_width_enabled));
+ if (ret != HCMD_SUCCESS)
+ goto set_local_link_attributes_fail;
+
+ /* let peer know who we are */
+ ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
+ if (ret == HCMD_SUCCESS)
+ return 0;
+
+set_local_link_attributes_fail:
+ dd_dev_err(dd,
+ "Failed to set local link attributes, return 0x%x\n",
+ ret);
+ return ret;
+}
+
+/*
+ * Call this to start the link. Schedule a retry if the cable is not
+ * present or if unable to start polling. Do not do anything if the
+ * link is disabled. Returns 0 if link is disabled or moved to polling
+ */
+int start_link(struct hfi1_pportdata *ppd)
+{
+ if (!ppd->link_enabled) {
+ dd_dev_info(ppd->dd,
+ "%s: stopping link start because link is disabled\n",
+ __func__);
+ return 0;
+ }
+ if (!ppd->driver_link_ready) {
+ dd_dev_info(ppd->dd,
+ "%s: stopping link start because driver is not ready\n",
+ __func__);
+ return 0;
+ }
+
+ if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES ||
+ loopback == LOOPBACK_LCB ||
+ ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
+ return set_link_state(ppd, HLS_DN_POLL);
+
+ dd_dev_info(ppd->dd,
+ "%s: stopping link start because no cable is present\n",
+ __func__);
+ return -EAGAIN;
+}
+
+static void reset_qsfp(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 mask, qsfp_mask;
+
+ mask = (u64)QSFP_HFI0_RESET_N;
+ qsfp_mask = read_csr(dd,
+ dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
+ qsfp_mask |= mask;
+ write_csr(dd,
+ dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE,
+ qsfp_mask);
+
+ qsfp_mask = read_csr(dd,
+ dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
+ qsfp_mask &= ~mask;
+ write_csr(dd,
+ dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT,
+ qsfp_mask);
+
+ udelay(10);
+
+ qsfp_mask |= mask;
+ write_csr(dd,
+ dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT,
+ qsfp_mask);
+}
+
+static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
+ u8 *qsfp_interrupt_status)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+
+ if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
+ (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
+ dd_dev_info(dd,
+ "%s: QSFP cable on fire\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
+ (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
+ dd_dev_info(dd,
+ "%s: QSFP cable temperature too low\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
+ (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
+ dd_dev_info(dd,
+ "%s: QSFP supply voltage too high\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
+ (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
+ dd_dev_info(dd,
+ "%s: QSFP supply voltage too low\n",
+ __func__);
+
+ /* Byte 2 is vendor specific */
+
+ if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
+ (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
+ dd_dev_info(dd,
+ "%s: Cable RX channel 1/2 power too high\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
+ (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
+ dd_dev_info(dd,
+ "%s: Cable RX channel 1/2 power too low\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
+ (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
+ dd_dev_info(dd,
+ "%s: Cable RX channel 3/4 power too high\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
+ (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
+ dd_dev_info(dd,
+ "%s: Cable RX channel 3/4 power too low\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
+ (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
+ dd_dev_info(dd,
+ "%s: Cable TX channel 1/2 bias too high\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
+ (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
+ dd_dev_info(dd,
+ "%s: Cable TX channel 1/2 bias too low\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
+ (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
+ dd_dev_info(dd,
+ "%s: Cable TX channel 3/4 bias too high\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
+ (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
+ dd_dev_info(dd,
+ "%s: Cable TX channel 3/4 bias too low\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
+ (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
+ dd_dev_info(dd,
+ "%s: Cable TX channel 1/2 power too high\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
+ (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
+ dd_dev_info(dd,
+ "%s: Cable TX channel 1/2 power too low\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
+ (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
+ dd_dev_info(dd,
+ "%s: Cable TX channel 3/4 power too high\n",
+ __func__);
+
+ if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
+ (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
+ dd_dev_info(dd,
+ "%s: Cable TX channel 3/4 power too low\n",
+ __func__);
+
+ /* Bytes 9-10 and 11-12 are reserved */
+ /* Bytes 13-15 are vendor specific */
+
+ return 0;
+}
+
+static int do_pre_lni_host_behaviors(struct hfi1_pportdata *ppd)
+{
+ refresh_qsfp_cache(ppd, &ppd->qsfp_info);
+
+ return 0;
+}
+
+static int do_qsfp_intr_fallback(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u8 qsfp_interrupt_status = 0;
+
+ if (qsfp_read(ppd, dd->hfi1_id, 2, &qsfp_interrupt_status, 1)
+ != 1) {
+ dd_dev_info(dd,
+ "%s: Failed to read status of QSFP module\n",
+ __func__);
+ return -EIO;
+ }
+
+ /* We don't care about alarms & warnings with a non-functional INT_N */
+ if (!(qsfp_interrupt_status & QSFP_DATA_NOT_READY))
+ do_pre_lni_host_behaviors(ppd);
+
+ return 0;
+}
+
+/* This routine will only be scheduled if the QSFP module is present */
+static void qsfp_event(struct work_struct *work)
+{
+ struct qsfp_data *qd;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_devdata *dd;
+
+ qd = container_of(work, struct qsfp_data, qsfp_work);
+ ppd = qd->ppd;
+ dd = ppd->dd;
+
+ /* Sanity check */
+ if (!qsfp_mod_present(ppd))
+ return;
+
+ /*
+ * Turn DC back on after cables has been
+ * re-inserted. Up until now, the DC has been in
+ * reset to save power.
+ */
+ dc_start(dd);
+
+ if (qd->cache_refresh_required) {
+ msleep(3000);
+ reset_qsfp(ppd);
+
+ /* Check for QSFP interrupt after t_init (SFF 8679)
+ * + extra
+ */
+ msleep(3000);
+ if (!qd->qsfp_interrupt_functional) {
+ if (do_qsfp_intr_fallback(ppd) < 0)
+ dd_dev_info(dd, "%s: QSFP fallback failed\n",
+ __func__);
+ ppd->driver_link_ready = 1;
+ start_link(ppd);
+ }
+ }
+
+ if (qd->check_interrupt_flags) {
+ u8 qsfp_interrupt_status[16] = {0,};
+
+ if (qsfp_read(ppd, dd->hfi1_id, 6,
+ &qsfp_interrupt_status[0], 16) != 16) {
+ dd_dev_info(dd,
+ "%s: Failed to read status of QSFP module\n",
+ __func__);
+ } else {
+ unsigned long flags;
+ u8 data_status;
+
+ spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
+ ppd->qsfp_info.check_interrupt_flags = 0;
+ spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
+ flags);
+
+ if (qsfp_read(ppd, dd->hfi1_id, 2, &data_status, 1)
+ != 1) {
+ dd_dev_info(dd,
+ "%s: Failed to read status of QSFP module\n",
+ __func__);
+ }
+ if (!(data_status & QSFP_DATA_NOT_READY)) {
+ do_pre_lni_host_behaviors(ppd);
+ start_link(ppd);
+ } else
+ handle_qsfp_error_conditions(ppd,
+ qsfp_interrupt_status);
+ }
+ }
+}
+
+void init_qsfp(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 qsfp_mask;
+
+ if (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
+ ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
+ !HFI1_CAP_IS_KSET(QSFP_ENABLED)) {
+ ppd->driver_link_ready = 1;
+ return;
+ }
+
+ ppd->qsfp_info.ppd = ppd;
+ INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
+
+ qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
+ /* Clear current status to avoid spurious interrupts */
+ write_csr(dd,
+ dd->hfi1_id ?
+ ASIC_QSFP2_CLEAR :
+ ASIC_QSFP1_CLEAR,
+ qsfp_mask);
+
+ /* Handle active low nature of INT_N and MODPRST_N pins */
+ if (qsfp_mod_present(ppd))
+ qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
+ write_csr(dd,
+ dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
+ qsfp_mask);
+
+ /* Allow only INT_N and MODPRST_N to trigger QSFP interrupts */
+ qsfp_mask |= (u64)QSFP_HFI0_MODPRST_N;
+ write_csr(dd,
+ dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
+ qsfp_mask);
+
+ if (qsfp_mod_present(ppd)) {
+ msleep(3000);
+ reset_qsfp(ppd);
+
+ /* Check for QSFP interrupt after t_init (SFF 8679)
+ * + extra
+ */
+ msleep(3000);
+ if (!ppd->qsfp_info.qsfp_interrupt_functional) {
+ if (do_qsfp_intr_fallback(ppd) < 0)
+ dd_dev_info(dd,
+ "%s: QSFP fallback failed\n",
+ __func__);
+ ppd->driver_link_ready = 1;
+ }
+ }
+}
+
+int bringup_serdes(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 guid;
+ int ret;
+
+ if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
+ add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
+
+ guid = ppd->guid;
+ if (!guid) {
+ if (dd->base_guid)
+ guid = dd->base_guid + ppd->port - 1;
+ ppd->guid = guid;
+ }
+
+ /* the link defaults to enabled */
+ ppd->link_enabled = 1;
+ /* Set linkinit_reason on power up per OPA spec */
+ ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
+
+ if (loopback) {
+ ret = init_loopback(dd);
+ if (ret < 0)
+ return ret;
+ }
+
+ return start_link(ppd);
+}
+
+void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+
+ /*
+ * Shut down the link and keep it down. First turn off that the
+ * driver wants to allow the link to be up (driver_link_ready).
+ * Then make sure the link is not automatically restarted
+ * (link_enabled). Cancel any pending restart. And finally
+ * go offline.
+ */
+ ppd->driver_link_ready = 0;
+ ppd->link_enabled = 0;
+
+ set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
+ OPA_LINKDOWN_REASON_SMA_DISABLED);
+ set_link_state(ppd, HLS_DN_OFFLINE);
+
+ /* disable the port */
+ clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
+}
+
+static inline int init_cpu_counters(struct hfi1_devdata *dd)
+{
+ struct hfi1_pportdata *ppd;
+ int i;
+
+ ppd = (struct hfi1_pportdata *)(dd + 1);
+ for (i = 0; i < dd->num_pports; i++, ppd++) {
+ ppd->ibport_data.rc_acks = NULL;
+ ppd->ibport_data.rc_qacks = NULL;
+ ppd->ibport_data.rc_acks = alloc_percpu(u64);
+ ppd->ibport_data.rc_qacks = alloc_percpu(u64);
+ ppd->ibport_data.rc_delayed_comp = alloc_percpu(u64);
+ if ((ppd->ibport_data.rc_acks == NULL) ||
+ (ppd->ibport_data.rc_delayed_comp == NULL) ||
+ (ppd->ibport_data.rc_qacks == NULL))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static const char * const pt_names[] = {
+ "expected",
+ "eager",
+ "invalid"
+};
+
+static const char *pt_name(u32 type)
+{
+ return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
+}
+
+/*
+ * index is the index into the receive array
+ */
+void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
+ u32 type, unsigned long pa, u16 order)
+{
+ u64 reg;
+ void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
+ (dd->kregbase + RCV_ARRAY));
+
+ if (!(dd->flags & HFI1_PRESENT))
+ goto done;
+
+ if (type == PT_INVALID) {
+ pa = 0;
+ } else if (type > PT_INVALID) {
+ dd_dev_err(dd,
+ "unexpected receive array type %u for index %u, not handled\n",
+ type, index);
+ goto done;
+ }
+
+ hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
+ pt_name(type), index, pa, (unsigned long)order);
+
+#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
+ reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
+ | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
+ | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
+ << RCV_ARRAY_RT_ADDR_SHIFT;
+ writeq(reg, base + (index * 8));
+
+ if (type == PT_EAGER)
+ /*
+ * Eager entries are written one-by-one so we have to push them
+ * after we write the entry.
+ */
+ flush_wc();
+done:
+ return;
+}
+
+void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
+{
+ struct hfi1_devdata *dd = rcd->dd;
+ u32 i;
+
+ /* this could be optimized */
+ for (i = rcd->eager_base; i < rcd->eager_base +
+ rcd->egrbufs.alloced; i++)
+ hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
+
+ for (i = rcd->expected_base;
+ i < rcd->expected_base + rcd->expected_count; i++)
+ hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
+}
+
+int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
+ struct hfi1_ctxt_info *kinfo)
+{
+ kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
+ HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
+ return 0;
+}
+
+struct hfi1_message_header *hfi1_get_msgheader(
+ struct hfi1_devdata *dd, __le32 *rhf_addr)
+{
+ u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
+
+ return (struct hfi1_message_header *)
+ (rhf_addr - dd->rhf_offset + offset);
+}
+
+static const char * const ib_cfg_name_strings[] = {
+ "HFI1_IB_CFG_LIDLMC",
+ "HFI1_IB_CFG_LWID_DG_ENB",
+ "HFI1_IB_CFG_LWID_ENB",
+ "HFI1_IB_CFG_LWID",
+ "HFI1_IB_CFG_SPD_ENB",
+ "HFI1_IB_CFG_SPD",
+ "HFI1_IB_CFG_RXPOL_ENB",
+ "HFI1_IB_CFG_LREV_ENB",
+ "HFI1_IB_CFG_LINKLATENCY",
+ "HFI1_IB_CFG_HRTBT",
+ "HFI1_IB_CFG_OP_VLS",
+ "HFI1_IB_CFG_VL_HIGH_CAP",
+ "HFI1_IB_CFG_VL_LOW_CAP",
+ "HFI1_IB_CFG_OVERRUN_THRESH",
+ "HFI1_IB_CFG_PHYERR_THRESH",
+ "HFI1_IB_CFG_LINKDEFAULT",
+ "HFI1_IB_CFG_PKEYS",
+ "HFI1_IB_CFG_MTU",
+ "HFI1_IB_CFG_LSTATE",
+ "HFI1_IB_CFG_VL_HIGH_LIMIT",
+ "HFI1_IB_CFG_PMA_TICKS",
+ "HFI1_IB_CFG_PORT"
+};
+
+static const char *ib_cfg_name(int which)
+{
+ if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
+ return "invalid";
+ return ib_cfg_name_strings[which];
+}
+
+int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ int val = 0;
+
+ switch (which) {
+ case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
+ val = ppd->link_width_enabled;
+ break;
+ case HFI1_IB_CFG_LWID: /* currently active Link-width */
+ val = ppd->link_width_active;
+ break;
+ case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
+ val = ppd->link_speed_enabled;
+ break;
+ case HFI1_IB_CFG_SPD: /* current Link speed */
+ val = ppd->link_speed_active;
+ break;
+
+ case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
+ case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
+ case HFI1_IB_CFG_LINKLATENCY:
+ goto unimplemented;
+
+ case HFI1_IB_CFG_OP_VLS:
+ val = ppd->vls_operational;
+ break;
+ case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
+ val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
+ break;
+ case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
+ val = VL_ARB_LOW_PRIO_TABLE_SIZE;
+ break;
+ case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ val = ppd->overrun_threshold;
+ break;
+ case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ val = ppd->phy_error_threshold;
+ break;
+ case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ val = dd->link_default;
+ break;
+
+ case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
+ case HFI1_IB_CFG_PMA_TICKS:
+ default:
+unimplemented:
+ if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
+ dd_dev_info(
+ dd,
+ "%s: which %s: not implemented\n",
+ __func__,
+ ib_cfg_name(which));
+ break;
+ }
+
+ return val;
+}
+
+/*
+ * The largest MAD packet size.
+ */
+#define MAX_MAD_PACKET 2048
+
+/*
+ * Return the maximum header bytes that can go on the _wire_
+ * for this device. This count includes the ICRC which is
+ * not part of the packet held in memory but it is appended
+ * by the HW.
+ * This is dependent on the device's receive header entry size.
+ * HFI allows this to be set per-receive context, but the
+ * driver presently enforces a global value.
+ */
+u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
+{
+ /*
+ * The maximum non-payload (MTU) bytes in LRH.PktLen are
+ * the Receive Header Entry Size minus the PBC (or RHF) size
+ * plus one DW for the ICRC appended by HW.
+ *
+ * dd->rcd[0].rcvhdrqentsize is in DW.
+ * We use rcd[0] as all context will have the same value. Also,
+ * the first kernel context would have been allocated by now so
+ * we are guaranteed a valid value.
+ */
+ return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
+}
+
+/*
+ * Set Send Length
+ * @ppd - per port data
+ *
+ * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
+ * registers compare against LRH.PktLen, so use the max bytes included
+ * in the LRH.
+ *
+ * This routine changes all VL values except VL15, which it maintains at
+ * the same value.
+ */
+static void set_send_length(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u32 max_hb = lrh_max_header_bytes(dd), maxvlmtu = 0, dcmtu;
+ u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
+ & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
+ SEND_LEN_CHECK1_LEN_VL15_SHIFT;
+ int i;
+
+ for (i = 0; i < ppd->vls_supported; i++) {
+ if (dd->vld[i].mtu > maxvlmtu)
+ maxvlmtu = dd->vld[i].mtu;
+ if (i <= 3)
+ len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
+ & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
+ ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
+ else
+ len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
+ & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
+ ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
+ }
+ write_csr(dd, SEND_LEN_CHECK0, len1);
+ write_csr(dd, SEND_LEN_CHECK1, len2);
+ /* adjust kernel credit return thresholds based on new MTUs */
+ /* all kernel receive contexts have the same hdrqentsize */
+ for (i = 0; i < ppd->vls_supported; i++) {
+ sc_set_cr_threshold(dd->vld[i].sc,
+ sc_mtu_to_threshold(dd->vld[i].sc, dd->vld[i].mtu,
+ dd->rcd[0]->rcvhdrqentsize));
+ }
+ sc_set_cr_threshold(dd->vld[15].sc,
+ sc_mtu_to_threshold(dd->vld[15].sc, dd->vld[15].mtu,
+ dd->rcd[0]->rcvhdrqentsize));
+
+ /* Adjust maximum MTU for the port in DC */
+ dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
+ (ilog2(maxvlmtu >> 8) + 1);
+ len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
+ len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
+ len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
+ DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
+ write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
+}
+
+static void set_lidlmc(struct hfi1_pportdata *ppd)
+{
+ int i;
+ u64 sreg = 0;
+ struct hfi1_devdata *dd = ppd->dd;
+ u32 mask = ~((1U << ppd->lmc) - 1);
+ u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
+
+ if (dd->hfi1_snoop.mode_flag)
+ dd_dev_info(dd, "Set lid/lmc while snooping");
+
+ c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
+ | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
+ c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
+ << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT)|
+ ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
+ << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
+ write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
+
+ /*
+ * Iterate over all the send contexts and set their SLID check
+ */
+ sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
+ SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
+ (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
+ SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
+
+ for (i = 0; i < dd->chip_send_contexts; i++) {
+ hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
+ i, (u32)sreg);
+ write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
+ }
+
+ /* Now we have to do the same thing for the sdma engines */
+ sdma_update_lmc(dd, mask, ppd->lid);
+}
+
+static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
+{
+ unsigned long timeout;
+ u32 curr_state;
+
+ timeout = jiffies + msecs_to_jiffies(msecs);
+ while (1) {
+ curr_state = read_physical_state(dd);
+ if (curr_state == state)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dd_dev_err(dd,
+ "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
+ state, curr_state);
+ return -ETIMEDOUT;
+ }
+ usleep_range(1950, 2050); /* sleep 2ms-ish */
+ }
+
+ return 0;
+}
+
+/*
+ * Helper for set_link_state(). Do not call except from that routine.
+ * Expects ppd->hls_mutex to be held.
+ *
+ * @rem_reason value to be sent to the neighbor
+ *
+ * LinkDownReasons only set if transition succeeds.
+ */
+static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u32 pstate, previous_state;
+ u32 last_local_state;
+ u32 last_remote_state;
+ int ret;
+ int do_transition;
+ int do_wait;
+
+ previous_state = ppd->host_link_state;
+ ppd->host_link_state = HLS_GOING_OFFLINE;
+ pstate = read_physical_state(dd);
+ if (pstate == PLS_OFFLINE) {
+ do_transition = 0; /* in right state */
+ do_wait = 0; /* ...no need to wait */
+ } else if ((pstate & 0xff) == PLS_OFFLINE) {
+ do_transition = 0; /* in an offline transient state */
+ do_wait = 1; /* ...wait for it to settle */
+ } else {
+ do_transition = 1; /* need to move to offline */
+ do_wait = 1; /* ...will need to wait */
+ }
+
+ if (do_transition) {
+ ret = set_physical_link_state(dd,
+ PLS_OFFLINE | (rem_reason << 8));
+
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to transition to Offline link state, return %d\n",
+ ret);
+ return -EINVAL;
+ }
+ if (ppd->offline_disabled_reason == OPA_LINKDOWN_REASON_NONE)
+ ppd->offline_disabled_reason =
+ OPA_LINKDOWN_REASON_TRANSIENT;
+ }
+
+ if (do_wait) {
+ /* it can take a while for the link to go down */
+ ret = wait_phy_linkstate(dd, PLS_OFFLINE, 5000);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* make sure the logical state is also down */
+ wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
+
+ /*
+ * Now in charge of LCB - must be after the physical state is
+ * offline.quiet and before host_link_state is changed.
+ */
+ set_host_lcb_access(dd);
+ write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
+ ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
+
+ /*
+ * The LNI has a mandatory wait time after the physical state
+ * moves to Offline.Quiet. The wait time may be different
+ * depending on how the link went down. The 8051 firmware
+ * will observe the needed wait time and only move to ready
+ * when that is completed. The largest of the quiet timeouts
+ * is 2.5s, so wait that long and then a bit more.
+ */
+ ret = wait_fm_ready(dd, 3000);
+ if (ret) {
+ dd_dev_err(dd,
+ "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
+ /* state is really offline, so make it so */
+ ppd->host_link_state = HLS_DN_OFFLINE;
+ return ret;
+ }
+
+ /*
+ * The state is now offline and the 8051 is ready to accept host
+ * requests.
+ * - change our state
+ * - notify others if we were previously in a linkup state
+ */
+ ppd->host_link_state = HLS_DN_OFFLINE;
+ if (previous_state & HLS_UP) {
+ /* went down while link was up */
+ handle_linkup_change(dd, 0);
+ } else if (previous_state
+ & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
+ /* went down while attempting link up */
+ /* byte 1 of last_*_state is the failure reason */
+ read_last_local_state(dd, &last_local_state);
+ read_last_remote_state(dd, &last_remote_state);
+ dd_dev_err(dd,
+ "LNI failure last states: local 0x%08x, remote 0x%08x\n",
+ last_local_state, last_remote_state);
+ }
+
+ /* the active link width (downgrade) is 0 on link down */
+ ppd->link_width_active = 0;
+ ppd->link_width_downgrade_tx_active = 0;
+ ppd->link_width_downgrade_rx_active = 0;
+ ppd->current_egress_rate = 0;
+ return 0;
+}
+
+/* return the link state name */
+static const char *link_state_name(u32 state)
+{
+ const char *name;
+ int n = ilog2(state);
+ static const char * const names[] = {
+ [__HLS_UP_INIT_BP] = "INIT",
+ [__HLS_UP_ARMED_BP] = "ARMED",
+ [__HLS_UP_ACTIVE_BP] = "ACTIVE",
+ [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
+ [__HLS_DN_POLL_BP] = "POLL",
+ [__HLS_DN_DISABLE_BP] = "DISABLE",
+ [__HLS_DN_OFFLINE_BP] = "OFFLINE",
+ [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
+ [__HLS_GOING_UP_BP] = "GOING_UP",
+ [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
+ [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
+ };
+
+ name = n < ARRAY_SIZE(names) ? names[n] : NULL;
+ return name ? name : "unknown";
+}
+
+/* return the link state reason name */
+static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
+{
+ if (state == HLS_UP_INIT) {
+ switch (ppd->linkinit_reason) {
+ case OPA_LINKINIT_REASON_LINKUP:
+ return "(LINKUP)";
+ case OPA_LINKINIT_REASON_FLAPPING:
+ return "(FLAPPING)";
+ case OPA_LINKINIT_OUTSIDE_POLICY:
+ return "(OUTSIDE_POLICY)";
+ case OPA_LINKINIT_QUARANTINED:
+ return "(QUARANTINED)";
+ case OPA_LINKINIT_INSUFIC_CAPABILITY:
+ return "(INSUFIC_CAPABILITY)";
+ default:
+ break;
+ }
+ }
+ return "";
+}
+
+/*
+ * driver_physical_state - convert the driver's notion of a port's
+ * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
+ * Return -1 (converted to a u32) to indicate error.
+ */
+u32 driver_physical_state(struct hfi1_pportdata *ppd)
+{
+ switch (ppd->host_link_state) {
+ case HLS_UP_INIT:
+ case HLS_UP_ARMED:
+ case HLS_UP_ACTIVE:
+ return IB_PORTPHYSSTATE_LINKUP;
+ case HLS_DN_POLL:
+ return IB_PORTPHYSSTATE_POLLING;
+ case HLS_DN_DISABLE:
+ return IB_PORTPHYSSTATE_DISABLED;
+ case HLS_DN_OFFLINE:
+ return OPA_PORTPHYSSTATE_OFFLINE;
+ case HLS_VERIFY_CAP:
+ return IB_PORTPHYSSTATE_POLLING;
+ case HLS_GOING_UP:
+ return IB_PORTPHYSSTATE_POLLING;
+ case HLS_GOING_OFFLINE:
+ return OPA_PORTPHYSSTATE_OFFLINE;
+ case HLS_LINK_COOLDOWN:
+ return OPA_PORTPHYSSTATE_OFFLINE;
+ case HLS_DN_DOWNDEF:
+ default:
+ dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
+ ppd->host_link_state);
+ return -1;
+ }
+}
+
+/*
+ * driver_logical_state - convert the driver's notion of a port's
+ * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
+ * (converted to a u32) to indicate error.
+ */
+u32 driver_logical_state(struct hfi1_pportdata *ppd)
+{
+ if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
+ return IB_PORT_DOWN;
+
+ switch (ppd->host_link_state & HLS_UP) {
+ case HLS_UP_INIT:
+ return IB_PORT_INIT;
+ case HLS_UP_ARMED:
+ return IB_PORT_ARMED;
+ case HLS_UP_ACTIVE:
+ return IB_PORT_ACTIVE;
+ default:
+ dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
+ ppd->host_link_state);
+ return -1;
+ }
+}
+
+void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
+ u8 neigh_reason, u8 rem_reason)
+{
+ if (ppd->local_link_down_reason.latest == 0 &&
+ ppd->neigh_link_down_reason.latest == 0) {
+ ppd->local_link_down_reason.latest = lcl_reason;
+ ppd->neigh_link_down_reason.latest = neigh_reason;
+ ppd->remote_link_down_reason = rem_reason;
+ }
+}
+
+/*
+ * Change the physical and/or logical link state.
+ *
+ * Do not call this routine while inside an interrupt. It contains
+ * calls to routines that can take multiple seconds to finish.
+ *
+ * Returns 0 on success, -errno on failure.
+ */
+int set_link_state(struct hfi1_pportdata *ppd, u32 state)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ struct ib_event event = {.device = NULL};
+ int ret1, ret = 0;
+ int was_up, is_down;
+ int orig_new_state, poll_bounce;
+
+ mutex_lock(&ppd->hls_lock);
+
+ orig_new_state = state;
+ if (state == HLS_DN_DOWNDEF)
+ state = dd->link_default;
+
+ /* interpret poll -> poll as a link bounce */
+ poll_bounce = ppd->host_link_state == HLS_DN_POLL
+ && state == HLS_DN_POLL;
+
+ dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
+ link_state_name(ppd->host_link_state),
+ link_state_name(orig_new_state),
+ poll_bounce ? "(bounce) " : "",
+ link_state_reason_name(ppd, state));
+
+ was_up = !!(ppd->host_link_state & HLS_UP);
+
+ /*
+ * If we're going to a (HLS_*) link state that implies the logical
+ * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
+ * reset is_sm_config_started to 0.
+ */
+ if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
+ ppd->is_sm_config_started = 0;
+
+ /*
+ * Do nothing if the states match. Let a poll to poll link bounce
+ * go through.
+ */
+ if (ppd->host_link_state == state && !poll_bounce)
+ goto done;
+
+ switch (state) {
+ case HLS_UP_INIT:
+ if (ppd->host_link_state == HLS_DN_POLL && (quick_linkup
+ || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
+ /*
+ * Quick link up jumps from polling to here.
+ *
+ * Whether in normal or loopback mode, the
+ * simulator jumps from polling to link up.
+ * Accept that here.
+ */
+ /* OK */;
+ } else if (ppd->host_link_state != HLS_GOING_UP) {
+ goto unexpected;
+ }
+
+ ppd->host_link_state = HLS_UP_INIT;
+ ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
+ if (ret) {
+ /* logical state didn't change, stay at going_up */
+ ppd->host_link_state = HLS_GOING_UP;
+ dd_dev_err(dd,
+ "%s: logical state did not change to INIT\n",
+ __func__);
+ } else {
+ /* clear old transient LINKINIT_REASON code */
+ if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
+ ppd->linkinit_reason =
+ OPA_LINKINIT_REASON_LINKUP;
+
+ /* enable the port */
+ add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
+
+ handle_linkup_change(dd, 1);
+ }
+ break;
+ case HLS_UP_ARMED:
+ if (ppd->host_link_state != HLS_UP_INIT)
+ goto unexpected;
+
+ ppd->host_link_state = HLS_UP_ARMED;
+ set_logical_state(dd, LSTATE_ARMED);
+ ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
+ if (ret) {
+ /* logical state didn't change, stay at init */
+ ppd->host_link_state = HLS_UP_INIT;
+ dd_dev_err(dd,
+ "%s: logical state did not change to ARMED\n",
+ __func__);
+ }
+ /*
+ * The simulator does not currently implement SMA messages,
+ * so neighbor_normal is not set. Set it here when we first
+ * move to Armed.
+ */
+ if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
+ ppd->neighbor_normal = 1;
+ break;
+ case HLS_UP_ACTIVE:
+ if (ppd->host_link_state != HLS_UP_ARMED)
+ goto unexpected;
+
+ ppd->host_link_state = HLS_UP_ACTIVE;
+ set_logical_state(dd, LSTATE_ACTIVE);
+ ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
+ if (ret) {
+ /* logical state didn't change, stay at armed */
+ ppd->host_link_state = HLS_UP_ARMED;
+ dd_dev_err(dd,
+ "%s: logical state did not change to ACTIVE\n",
+ __func__);
+ } else {
+
+ /* tell all engines to go running */
+ sdma_all_running(dd);
+
+ /* Signal the IB layer that the port has went active */
+ event.device = &dd->verbs_dev.ibdev;
+ event.element.port_num = ppd->port;
+ event.event = IB_EVENT_PORT_ACTIVE;
+ }
+ break;
+ case HLS_DN_POLL:
+ if ((ppd->host_link_state == HLS_DN_DISABLE ||
+ ppd->host_link_state == HLS_DN_OFFLINE) &&
+ dd->dc_shutdown)
+ dc_start(dd);
+ /* Hand LED control to the DC */
+ write_csr(dd, DCC_CFG_LED_CNTRL, 0);
+
+ if (ppd->host_link_state != HLS_DN_OFFLINE) {
+ u8 tmp = ppd->link_enabled;
+
+ ret = goto_offline(ppd, ppd->remote_link_down_reason);
+ if (ret) {
+ ppd->link_enabled = tmp;
+ break;
+ }
+ ppd->remote_link_down_reason = 0;
+
+ if (ppd->driver_link_ready)
+ ppd->link_enabled = 1;
+ }
+
+ ret = set_local_link_attributes(ppd);
+ if (ret)
+ break;
+
+ ppd->port_error_action = 0;
+ ppd->host_link_state = HLS_DN_POLL;
+
+ if (quick_linkup) {
+ /* quick linkup does not go into polling */
+ ret = do_quick_linkup(dd);
+ } else {
+ ret1 = set_physical_link_state(dd, PLS_POLLING);
+ if (ret1 != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to transition to Polling link state, return 0x%x\n",
+ ret1);
+ ret = -EINVAL;
+ }
+ }
+ ppd->offline_disabled_reason = OPA_LINKDOWN_REASON_NONE;
+ /*
+ * If an error occurred above, go back to offline. The
+ * caller may reschedule another attempt.
+ */
+ if (ret)
+ goto_offline(ppd, 0);
+ break;
+ case HLS_DN_DISABLE:
+ /* link is disabled */
+ ppd->link_enabled = 0;
+
+ /* allow any state to transition to disabled */
+
+ /* must transition to offline first */
+ if (ppd->host_link_state != HLS_DN_OFFLINE) {
+ ret = goto_offline(ppd, ppd->remote_link_down_reason);
+ if (ret)
+ break;
+ ppd->remote_link_down_reason = 0;
+ }
+
+ ret1 = set_physical_link_state(dd, PLS_DISABLED);
+ if (ret1 != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to transition to Disabled link state, return 0x%x\n",
+ ret1);
+ ret = -EINVAL;
+ break;
+ }
+ ppd->host_link_state = HLS_DN_DISABLE;
+ dc_shutdown(dd);
+ break;
+ case HLS_DN_OFFLINE:
+ if (ppd->host_link_state == HLS_DN_DISABLE)
+ dc_start(dd);
+
+ /* allow any state to transition to offline */
+ ret = goto_offline(ppd, ppd->remote_link_down_reason);
+ if (!ret)
+ ppd->remote_link_down_reason = 0;
+ break;
+ case HLS_VERIFY_CAP:
+ if (ppd->host_link_state != HLS_DN_POLL)
+ goto unexpected;
+ ppd->host_link_state = HLS_VERIFY_CAP;
+ break;
+ case HLS_GOING_UP:
+ if (ppd->host_link_state != HLS_VERIFY_CAP)
+ goto unexpected;
+
+ ret1 = set_physical_link_state(dd, PLS_LINKUP);
+ if (ret1 != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to transition to link up state, return 0x%x\n",
+ ret1);
+ ret = -EINVAL;
+ break;
+ }
+ ppd->host_link_state = HLS_GOING_UP;
+ break;
+
+ case HLS_GOING_OFFLINE: /* transient within goto_offline() */
+ case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
+ default:
+ dd_dev_info(dd, "%s: state 0x%x: not supported\n",
+ __func__, state);
+ ret = -EINVAL;
+ break;
+ }
+
+ is_down = !!(ppd->host_link_state & (HLS_DN_POLL |
+ HLS_DN_DISABLE | HLS_DN_OFFLINE));
+
+ if (was_up && is_down && ppd->local_link_down_reason.sma == 0 &&
+ ppd->neigh_link_down_reason.sma == 0) {
+ ppd->local_link_down_reason.sma =
+ ppd->local_link_down_reason.latest;
+ ppd->neigh_link_down_reason.sma =
+ ppd->neigh_link_down_reason.latest;
+ }
+
+ goto done;
+
+unexpected:
+ dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
+ __func__, link_state_name(ppd->host_link_state),
+ link_state_name(state));
+ ret = -EINVAL;
+
+done:
+ mutex_unlock(&ppd->hls_lock);
+
+ if (event.device)
+ ib_dispatch_event(&event);
+
+ return ret;
+}
+
+int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
+{
+ u64 reg;
+ int ret = 0;
+
+ switch (which) {
+ case HFI1_IB_CFG_LIDLMC:
+ set_lidlmc(ppd);
+ break;
+ case HFI1_IB_CFG_VL_HIGH_LIMIT:
+ /*
+ * The VL Arbitrator high limit is sent in units of 4k
+ * bytes, while HFI stores it in units of 64 bytes.
+ */
+ val *= 4096/64;
+ reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
+ << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
+ write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
+ break;
+ case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ /* HFI only supports POLL as the default link down state */
+ if (val != HLS_DN_POLL)
+ ret = -EINVAL;
+ break;
+ case HFI1_IB_CFG_OP_VLS:
+ if (ppd->vls_operational != val) {
+ ppd->vls_operational = val;
+ if (!ppd->port)
+ ret = -EINVAL;
+ else
+ ret = sdma_map_init(
+ ppd->dd,
+ ppd->port - 1,
+ val,
+ NULL);
+ }
+ break;
+ /*
+ * For link width, link width downgrade, and speed enable, always AND
+ * the setting with what is actually supported. This has two benefits.
+ * First, enabled can't have unsupported values, no matter what the
+ * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
+ * "fill in with your supported value" have all the bits in the
+ * field set, so simply ANDing with supported has the desired result.
+ */
+ case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
+ ppd->link_width_enabled = val & ppd->link_width_supported;
+ break;
+ case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
+ ppd->link_width_downgrade_enabled =
+ val & ppd->link_width_downgrade_supported;
+ break;
+ case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
+ ppd->link_speed_enabled = val & ppd->link_speed_supported;
+ break;
+ case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ /*
+ * HFI does not follow IB specs, save this value
+ * so we can report it, if asked.
+ */
+ ppd->overrun_threshold = val;
+ break;
+ case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ /*
+ * HFI does not follow IB specs, save this value
+ * so we can report it, if asked.
+ */
+ ppd->phy_error_threshold = val;
+ break;
+
+ case HFI1_IB_CFG_MTU:
+ set_send_length(ppd);
+ break;
+
+ case HFI1_IB_CFG_PKEYS:
+ if (HFI1_CAP_IS_KSET(PKEY_CHECK))
+ set_partition_keys(ppd);
+ break;
+
+ default:
+ if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
+ dd_dev_info(ppd->dd,
+ "%s: which %s, val 0x%x: not implemented\n",
+ __func__, ib_cfg_name(which), val);
+ break;
+ }
+ return ret;
+}
+
+/* begin functions related to vl arbitration table caching */
+static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
+{
+ int i;
+
+ BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
+ VL_ARB_LOW_PRIO_TABLE_SIZE);
+ BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
+ VL_ARB_HIGH_PRIO_TABLE_SIZE);
+
+ /*
+ * Note that we always return values directly from the
+ * 'vl_arb_cache' (and do no CSR reads) in response to a
+ * 'Get(VLArbTable)'. This is obviously correct after a
+ * 'Set(VLArbTable)', since the cache will then be up to
+ * date. But it's also correct prior to any 'Set(VLArbTable)'
+ * since then both the cache, and the relevant h/w registers
+ * will be zeroed.
+ */
+
+ for (i = 0; i < MAX_PRIO_TABLE; i++)
+ spin_lock_init(&ppd->vl_arb_cache[i].lock);
+}
+
+/*
+ * vl_arb_lock_cache
+ *
+ * All other vl_arb_* functions should be called only after locking
+ * the cache.
+ */
+static inline struct vl_arb_cache *
+vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
+{
+ if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
+ return NULL;
+ spin_lock(&ppd->vl_arb_cache[idx].lock);
+ return &ppd->vl_arb_cache[idx];
+}
+
+static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
+{
+ spin_unlock(&ppd->vl_arb_cache[idx].lock);
+}
+
+static void vl_arb_get_cache(struct vl_arb_cache *cache,
+ struct ib_vl_weight_elem *vl)
+{
+ memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
+}
+
+static void vl_arb_set_cache(struct vl_arb_cache *cache,
+ struct ib_vl_weight_elem *vl)
+{
+ memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
+}
+
+static int vl_arb_match_cache(struct vl_arb_cache *cache,
+ struct ib_vl_weight_elem *vl)
+{
+ return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
+}
+/* end functions related to vl arbitration table caching */
+
+static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
+ u32 size, struct ib_vl_weight_elem *vl)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 reg;
+ unsigned int i, is_up = 0;
+ int drain, ret = 0;
+
+ mutex_lock(&ppd->hls_lock);
+
+ if (ppd->host_link_state & HLS_UP)
+ is_up = 1;
+
+ drain = !is_ax(dd) && is_up;
+
+ if (drain)
+ /*
+ * Before adjusting VL arbitration weights, empty per-VL
+ * FIFOs, otherwise a packet whose VL weight is being
+ * set to 0 could get stuck in a FIFO with no chance to
+ * egress.
+ */
+ ret = stop_drain_data_vls(dd);
+
+ if (ret) {
+ dd_dev_err(
+ dd,
+ "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
+ __func__);
+ goto err;
+ }
+
+ for (i = 0; i < size; i++, vl++) {
+ /*
+ * NOTE: The low priority shift and mask are used here, but
+ * they are the same for both the low and high registers.
+ */
+ reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
+ << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
+ | (((u64)vl->weight
+ & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
+ << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
+ write_csr(dd, target + (i * 8), reg);
+ }
+ pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
+
+ if (drain)
+ open_fill_data_vls(dd); /* reopen all VLs */
+
+err:
+ mutex_unlock(&ppd->hls_lock);
+
+ return ret;
+}
+
+/*
+ * Read one credit merge VL register.
+ */
+static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
+ struct vl_limit *vll)
+{
+ u64 reg = read_csr(dd, csr);
+
+ vll->dedicated = cpu_to_be16(
+ (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
+ & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
+ vll->shared = cpu_to_be16(
+ (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
+ & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
+}
+
+/*
+ * Read the current credit merge limits.
+ */
+static int get_buffer_control(struct hfi1_devdata *dd,
+ struct buffer_control *bc, u16 *overall_limit)
+{
+ u64 reg;
+ int i;
+
+ /* not all entries are filled in */
+ memset(bc, 0, sizeof(*bc));
+
+ /* OPA and HFI have a 1-1 mapping */
+ for (i = 0; i < TXE_NUM_DATA_VL; i++)
+ read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8*i), &bc->vl[i]);
+
+ /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
+ read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
+
+ reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
+ bc->overall_shared_limit = cpu_to_be16(
+ (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
+ & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
+ if (overall_limit)
+ *overall_limit = (reg
+ >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
+ & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
+ return sizeof(struct buffer_control);
+}
+
+static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
+{
+ u64 reg;
+ int i;
+
+ /* each register contains 16 SC->VLnt mappings, 4 bits each */
+ reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
+ for (i = 0; i < sizeof(u64); i++) {
+ u8 byte = *(((u8 *)&reg) + i);
+
+ dp->vlnt[2 * i] = byte & 0xf;
+ dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
+ }
+
+ reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
+ for (i = 0; i < sizeof(u64); i++) {
+ u8 byte = *(((u8 *)&reg) + i);
+
+ dp->vlnt[16 + (2 * i)] = byte & 0xf;
+ dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
+ }
+ return sizeof(struct sc2vlnt);
+}
+
+static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
+ struct ib_vl_weight_elem *vl)
+{
+ unsigned int i;
+
+ for (i = 0; i < nelems; i++, vl++) {
+ vl->vl = 0xf;
+ vl->weight = 0;
+ }
+}
+
+static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
+{
+ write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
+ DC_SC_VL_VAL(15_0,
+ 0, dp->vlnt[0] & 0xf,
+ 1, dp->vlnt[1] & 0xf,
+ 2, dp->vlnt[2] & 0xf,
+ 3, dp->vlnt[3] & 0xf,
+ 4, dp->vlnt[4] & 0xf,
+ 5, dp->vlnt[5] & 0xf,
+ 6, dp->vlnt[6] & 0xf,
+ 7, dp->vlnt[7] & 0xf,
+ 8, dp->vlnt[8] & 0xf,
+ 9, dp->vlnt[9] & 0xf,
+ 10, dp->vlnt[10] & 0xf,
+ 11, dp->vlnt[11] & 0xf,
+ 12, dp->vlnt[12] & 0xf,
+ 13, dp->vlnt[13] & 0xf,
+ 14, dp->vlnt[14] & 0xf,
+ 15, dp->vlnt[15] & 0xf));
+ write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
+ DC_SC_VL_VAL(31_16,
+ 16, dp->vlnt[16] & 0xf,
+ 17, dp->vlnt[17] & 0xf,
+ 18, dp->vlnt[18] & 0xf,
+ 19, dp->vlnt[19] & 0xf,
+ 20, dp->vlnt[20] & 0xf,
+ 21, dp->vlnt[21] & 0xf,
+ 22, dp->vlnt[22] & 0xf,
+ 23, dp->vlnt[23] & 0xf,
+ 24, dp->vlnt[24] & 0xf,
+ 25, dp->vlnt[25] & 0xf,
+ 26, dp->vlnt[26] & 0xf,
+ 27, dp->vlnt[27] & 0xf,
+ 28, dp->vlnt[28] & 0xf,
+ 29, dp->vlnt[29] & 0xf,
+ 30, dp->vlnt[30] & 0xf,
+ 31, dp->vlnt[31] & 0xf));
+}
+
+static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
+ u16 limit)
+{
+ if (limit != 0)
+ dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
+ what, (int)limit, idx);
+}
+
+/* change only the shared limit portion of SendCmGLobalCredit */
+static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
+{
+ u64 reg;
+
+ reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
+ reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
+ reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
+ write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
+}
+
+/* change only the total credit limit portion of SendCmGLobalCredit */
+static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
+{
+ u64 reg;
+
+ reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
+ reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
+ reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
+ write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
+}
+
+/* set the given per-VL shared limit */
+static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
+{
+ u64 reg;
+ u32 addr;
+
+ if (vl < TXE_NUM_DATA_VL)
+ addr = SEND_CM_CREDIT_VL + (8 * vl);
+ else
+ addr = SEND_CM_CREDIT_VL15;
+
+ reg = read_csr(dd, addr);
+ reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
+ reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
+ write_csr(dd, addr, reg);
+}
+
+/* set the given per-VL dedicated limit */
+static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
+{
+ u64 reg;
+ u32 addr;
+
+ if (vl < TXE_NUM_DATA_VL)
+ addr = SEND_CM_CREDIT_VL + (8 * vl);
+ else
+ addr = SEND_CM_CREDIT_VL15;
+
+ reg = read_csr(dd, addr);
+ reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
+ reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
+ write_csr(dd, addr, reg);
+}
+
+/* spin until the given per-VL status mask bits clear */
+static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
+ const char *which)
+{
+ unsigned long timeout;
+ u64 reg;
+
+ timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
+ while (1) {
+ reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
+
+ if (reg == 0)
+ return; /* success */
+ if (time_after(jiffies, timeout))
+ break; /* timed out */
+ udelay(1);
+ }
+
+ dd_dev_err(dd,
+ "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
+ which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
+ /*
+ * If this occurs, it is likely there was a credit loss on the link.
+ * The only recovery from that is a link bounce.
+ */
+ dd_dev_err(dd,
+ "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
+}
+
+/*
+ * The number of credits on the VLs may be changed while everything
+ * is "live", but the following algorithm must be followed due to
+ * how the hardware is actually implemented. In particular,
+ * Return_Credit_Status[] is the only correct status check.
+ *
+ * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
+ * set Global_Shared_Credit_Limit = 0
+ * use_all_vl = 1
+ * mask0 = all VLs that are changing either dedicated or shared limits
+ * set Shared_Limit[mask0] = 0
+ * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
+ * if (changing any dedicated limit)
+ * mask1 = all VLs that are lowering dedicated limits
+ * lower Dedicated_Limit[mask1]
+ * spin until Return_Credit_Status[mask1] == 0
+ * raise Dedicated_Limits
+ * raise Shared_Limits
+ * raise Global_Shared_Credit_Limit
+ *
+ * lower = if the new limit is lower, set the limit to the new value
+ * raise = if the new limit is higher than the current value (may be changed
+ * earlier in the algorithm), set the new limit to the new value
+ */
+static int set_buffer_control(struct hfi1_devdata *dd,
+ struct buffer_control *new_bc)
+{
+ u64 changing_mask, ld_mask, stat_mask;
+ int change_count;
+ int i, use_all_mask;
+ int this_shared_changing;
+ /*
+ * A0: add the variable any_shared_limit_changing below and in the
+ * algorithm above. If removing A0 support, it can be removed.
+ */
+ int any_shared_limit_changing;
+ struct buffer_control cur_bc;
+ u8 changing[OPA_MAX_VLS];
+ u8 lowering_dedicated[OPA_MAX_VLS];
+ u16 cur_total;
+ u32 new_total = 0;
+ const u64 all_mask =
+ SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
+ | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
+ | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
+ | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
+ | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
+ | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
+ | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
+ | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
+ | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
+
+#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
+#define NUM_USABLE_VLS 16 /* look at VL15 and less */
+
+
+ /* find the new total credits, do sanity check on unused VLs */
+ for (i = 0; i < OPA_MAX_VLS; i++) {
+ if (valid_vl(i)) {
+ new_total += be16_to_cpu(new_bc->vl[i].dedicated);
+ continue;
+ }
+ nonzero_msg(dd, i, "dedicated",
+ be16_to_cpu(new_bc->vl[i].dedicated));
+ nonzero_msg(dd, i, "shared",
+ be16_to_cpu(new_bc->vl[i].shared));
+ new_bc->vl[i].dedicated = 0;
+ new_bc->vl[i].shared = 0;
+ }
+ new_total += be16_to_cpu(new_bc->overall_shared_limit);
+ if (new_total > (u32)dd->link_credits)
+ return -EINVAL;
+ /* fetch the current values */
+ get_buffer_control(dd, &cur_bc, &cur_total);
+
+ /*
+ * Create the masks we will use.
+ */
+ memset(changing, 0, sizeof(changing));
+ memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
+ /* NOTE: Assumes that the individual VL bits are adjacent and in
+ increasing order */
+ stat_mask =
+ SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
+ changing_mask = 0;
+ ld_mask = 0;
+ change_count = 0;
+ any_shared_limit_changing = 0;
+ for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
+ if (!valid_vl(i))
+ continue;
+ this_shared_changing = new_bc->vl[i].shared
+ != cur_bc.vl[i].shared;
+ if (this_shared_changing)
+ any_shared_limit_changing = 1;
+ if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated
+ || this_shared_changing) {
+ changing[i] = 1;
+ changing_mask |= stat_mask;
+ change_count++;
+ }
+ if (be16_to_cpu(new_bc->vl[i].dedicated) <
+ be16_to_cpu(cur_bc.vl[i].dedicated)) {
+ lowering_dedicated[i] = 1;
+ ld_mask |= stat_mask;
+ }
+ }
+
+ /* bracket the credit change with a total adjustment */
+ if (new_total > cur_total)
+ set_global_limit(dd, new_total);
+
+ /*
+ * Start the credit change algorithm.
+ */
+ use_all_mask = 0;
+ if ((be16_to_cpu(new_bc->overall_shared_limit) <
+ be16_to_cpu(cur_bc.overall_shared_limit))
+ || (is_a0(dd) && any_shared_limit_changing)) {
+ set_global_shared(dd, 0);
+ cur_bc.overall_shared_limit = 0;
+ use_all_mask = 1;
+ }
+
+ for (i = 0; i < NUM_USABLE_VLS; i++) {
+ if (!valid_vl(i))
+ continue;
+
+ if (changing[i]) {
+ set_vl_shared(dd, i, 0);
+ cur_bc.vl[i].shared = 0;
+ }
+ }
+
+ wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
+ "shared");
+
+ if (change_count > 0) {
+ for (i = 0; i < NUM_USABLE_VLS; i++) {
+ if (!valid_vl(i))
+ continue;
+
+ if (lowering_dedicated[i]) {
+ set_vl_dedicated(dd, i,
+ be16_to_cpu(new_bc->vl[i].dedicated));
+ cur_bc.vl[i].dedicated =
+ new_bc->vl[i].dedicated;
+ }
+ }
+
+ wait_for_vl_status_clear(dd, ld_mask, "dedicated");
+
+ /* now raise all dedicated that are going up */
+ for (i = 0; i < NUM_USABLE_VLS; i++) {
+ if (!valid_vl(i))
+ continue;
+
+ if (be16_to_cpu(new_bc->vl[i].dedicated) >
+ be16_to_cpu(cur_bc.vl[i].dedicated))
+ set_vl_dedicated(dd, i,
+ be16_to_cpu(new_bc->vl[i].dedicated));
+ }
+ }
+
+ /* next raise all shared that are going up */
+ for (i = 0; i < NUM_USABLE_VLS; i++) {
+ if (!valid_vl(i))
+ continue;
+
+ if (be16_to_cpu(new_bc->vl[i].shared) >
+ be16_to_cpu(cur_bc.vl[i].shared))
+ set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
+ }
+
+ /* finally raise the global shared */
+ if (be16_to_cpu(new_bc->overall_shared_limit) >
+ be16_to_cpu(cur_bc.overall_shared_limit))
+ set_global_shared(dd,
+ be16_to_cpu(new_bc->overall_shared_limit));
+
+ /* bracket the credit change with a total adjustment */
+ if (new_total < cur_total)
+ set_global_limit(dd, new_total);
+ return 0;
+}
+
+/*
+ * Read the given fabric manager table. Return the size of the
+ * table (in bytes) on success, and a negative error code on
+ * failure.
+ */
+int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
+
+{
+ int size;
+ struct vl_arb_cache *vlc;
+
+ switch (which) {
+ case FM_TBL_VL_HIGH_ARB:
+ size = 256;
+ /*
+ * OPA specifies 128 elements (of 2 bytes each), though
+ * HFI supports only 16 elements in h/w.
+ */
+ vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
+ vl_arb_get_cache(vlc, t);
+ vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
+ break;
+ case FM_TBL_VL_LOW_ARB:
+ size = 256;
+ /*
+ * OPA specifies 128 elements (of 2 bytes each), though
+ * HFI supports only 16 elements in h/w.
+ */
+ vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
+ vl_arb_get_cache(vlc, t);
+ vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
+ break;
+ case FM_TBL_BUFFER_CONTROL:
+ size = get_buffer_control(ppd->dd, t, NULL);
+ break;
+ case FM_TBL_SC2VLNT:
+ size = get_sc2vlnt(ppd->dd, t);
+ break;
+ case FM_TBL_VL_PREEMPT_ELEMS:
+ size = 256;
+ /* OPA specifies 128 elements, of 2 bytes each */
+ get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
+ break;
+ case FM_TBL_VL_PREEMPT_MATRIX:
+ size = 256;
+ /*
+ * OPA specifies that this is the same size as the VL
+ * arbitration tables (i.e., 256 bytes).
+ */
+ break;
+ default:
+ return -EINVAL;
+ }
+ return size;
+}
+
+/*
+ * Write the given fabric manager table.
+ */
+int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
+{
+ int ret = 0;
+ struct vl_arb_cache *vlc;
+
+ switch (which) {
+ case FM_TBL_VL_HIGH_ARB:
+ vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
+ if (vl_arb_match_cache(vlc, t)) {
+ vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
+ break;
+ }
+ vl_arb_set_cache(vlc, t);
+ vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
+ ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
+ VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
+ break;
+ case FM_TBL_VL_LOW_ARB:
+ vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
+ if (vl_arb_match_cache(vlc, t)) {
+ vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
+ break;
+ }
+ vl_arb_set_cache(vlc, t);
+ vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
+ ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
+ VL_ARB_LOW_PRIO_TABLE_SIZE, t);
+ break;
+ case FM_TBL_BUFFER_CONTROL:
+ ret = set_buffer_control(ppd->dd, t);
+ break;
+ case FM_TBL_SC2VLNT:
+ set_sc2vlnt(ppd->dd, t);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/*
+ * Disable all data VLs.
+ *
+ * Return 0 if disabled, non-zero if the VLs cannot be disabled.
+ */
+static int disable_data_vls(struct hfi1_devdata *dd)
+{
+ if (is_a0(dd))
+ return 1;
+
+ pio_send_control(dd, PSC_DATA_VL_DISABLE);
+
+ return 0;
+}
+
+/*
+ * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
+ * Just re-enables all data VLs (the "fill" part happens
+ * automatically - the name was chosen for symmetry with
+ * stop_drain_data_vls()).
+ *
+ * Return 0 if successful, non-zero if the VLs cannot be enabled.
+ */
+int open_fill_data_vls(struct hfi1_devdata *dd)
+{
+ if (is_a0(dd))
+ return 1;
+
+ pio_send_control(dd, PSC_DATA_VL_ENABLE);
+
+ return 0;
+}
+
+/*
+ * drain_data_vls() - assumes that disable_data_vls() has been called,
+ * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
+ * engines to drop to 0.
+ */
+static void drain_data_vls(struct hfi1_devdata *dd)
+{
+ sc_wait(dd);
+ sdma_wait(dd);
+ pause_for_credit_return(dd);
+}
+
+/*
+ * stop_drain_data_vls() - disable, then drain all per-VL fifos.
+ *
+ * Use open_fill_data_vls() to resume using data VLs. This pair is
+ * meant to be used like this:
+ *
+ * stop_drain_data_vls(dd);
+ * // do things with per-VL resources
+ * open_fill_data_vls(dd);
+ */
+int stop_drain_data_vls(struct hfi1_devdata *dd)
+{
+ int ret;
+
+ ret = disable_data_vls(dd);
+ if (ret == 0)
+ drain_data_vls(dd);
+
+ return ret;
+}
+
+/*
+ * Convert a nanosecond time to a cclock count. No matter how slow
+ * the cclock, a non-zero ns will always have a non-zero result.
+ */
+u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
+{
+ u32 cclocks;
+
+ if (dd->icode == ICODE_FPGA_EMULATION)
+ cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
+ else /* simulation pretends to be ASIC */
+ cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
+ if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
+ cclocks = 1;
+ return cclocks;
+}
+
+/*
+ * Convert a cclock count to nanoseconds. Not matter how slow
+ * the cclock, a non-zero cclocks will always have a non-zero result.
+ */
+u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
+{
+ u32 ns;
+
+ if (dd->icode == ICODE_FPGA_EMULATION)
+ ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
+ else /* simulation pretends to be ASIC */
+ ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
+ if (cclocks && !ns)
+ ns = 1;
+ return ns;
+}
+
+/*
+ * Dynamically adjust the receive interrupt timeout for a context based on
+ * incoming packet rate.
+ *
+ * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
+ */
+static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
+{
+ struct hfi1_devdata *dd = rcd->dd;
+ u32 timeout = rcd->rcvavail_timeout;
+
+ /*
+ * This algorithm doubles or halves the timeout depending on whether
+ * the number of packets received in this interrupt were less than or
+ * greater equal the interrupt count.
+ *
+ * The calculations below do not allow a steady state to be achieved.
+ * Only at the endpoints it is possible to have an unchanging
+ * timeout.
+ */
+ if (npkts < rcv_intr_count) {
+ /*
+ * Not enough packets arrived before the timeout, adjust
+ * timeout downward.
+ */
+ if (timeout < 2) /* already at minimum? */
+ return;
+ timeout >>= 1;
+ } else {
+ /*
+ * More than enough packets arrived before the timeout, adjust
+ * timeout upward.
+ */
+ if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
+ return;
+ timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
+ }
+
+ rcd->rcvavail_timeout = timeout;
+ /* timeout cannot be larger than rcv_intr_timeout_csr which has already
+ been verified to be in range */
+ write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
+ (u64)timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
+}
+
+void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
+ u32 intr_adjust, u32 npkts)
+{
+ struct hfi1_devdata *dd = rcd->dd;
+ u64 reg;
+ u32 ctxt = rcd->ctxt;
+
+ /*
+ * Need to write timeout register before updating RcvHdrHead to ensure
+ * that a new value is used when the HW decides to restart counting.
+ */
+ if (intr_adjust)
+ adjust_rcv_timeout(rcd, npkts);
+ if (updegr) {
+ reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
+ << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
+ write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
+ }
+ mmiowb();
+ reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
+ (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
+ << RCV_HDR_HEAD_HEAD_SHIFT);
+ write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
+ mmiowb();
+}
+
+u32 hdrqempty(struct hfi1_ctxtdata *rcd)
+{
+ u32 head, tail;
+
+ head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
+ & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
+
+ if (rcd->rcvhdrtail_kvaddr)
+ tail = get_rcvhdrtail(rcd);
+ else
+ tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
+
+ return head == tail;
+}
+
+/*
+ * Context Control and Receive Array encoding for buffer size:
+ * 0x0 invalid
+ * 0x1 4 KB
+ * 0x2 8 KB
+ * 0x3 16 KB
+ * 0x4 32 KB
+ * 0x5 64 KB
+ * 0x6 128 KB
+ * 0x7 256 KB
+ * 0x8 512 KB (Receive Array only)
+ * 0x9 1 MB (Receive Array only)
+ * 0xa 2 MB (Receive Array only)
+ *
+ * 0xB-0xF - reserved (Receive Array only)
+ *
+ *
+ * This routine assumes that the value has already been sanity checked.
+ */
+static u32 encoded_size(u32 size)
+{
+ switch (size) {
+ case 4*1024: return 0x1;
+ case 8*1024: return 0x2;
+ case 16*1024: return 0x3;
+ case 32*1024: return 0x4;
+ case 64*1024: return 0x5;
+ case 128*1024: return 0x6;
+ case 256*1024: return 0x7;
+ case 512*1024: return 0x8;
+ case 1*1024*1024: return 0x9;
+ case 2*1024*1024: return 0xa;
+ }
+ return 0x1; /* if invalid, go with the minimum size */
+}
+
+void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
+{
+ struct hfi1_ctxtdata *rcd;
+ u64 rcvctrl, reg;
+ int did_enable = 0;
+
+ rcd = dd->rcd[ctxt];
+ if (!rcd)
+ return;
+
+ hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
+
+ rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
+ /* if the context already enabled, don't do the extra steps */
+ if ((op & HFI1_RCVCTRL_CTXT_ENB)
+ && !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
+ /* reset the tail and hdr addresses, and sequence count */
+ write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
+ rcd->rcvhdrq_phys);
+ if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
+ write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
+ rcd->rcvhdrqtailaddr_phys);
+ rcd->seq_cnt = 1;
+
+ /* reset the cached receive header queue head value */
+ rcd->head = 0;
+
+ /*
+ * Zero the receive header queue so we don't get false
+ * positives when checking the sequence number. The
+ * sequence numbers could land exactly on the same spot.
+ * E.g. a rcd restart before the receive header wrapped.
+ */
+ memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
+
+ /* starting timeout */
+ rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
+
+ /* enable the context */
+ rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
+
+ /* clean the egr buffer size first */
+ rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
+ rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
+ & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
+ << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
+
+ /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
+ write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
+ did_enable = 1;
+
+ /* zero RcvEgrIndexHead */
+ write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
+
+ /* set eager count and base index */
+ reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
+ & RCV_EGR_CTRL_EGR_CNT_MASK)
+ << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
+ (((rcd->eager_base >> RCV_SHIFT)
+ & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
+ << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
+ write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
+
+ /*
+ * Set TID (expected) count and base index.
+ * rcd->expected_count is set to individual RcvArray entries,
+ * not pairs, and the CSR takes a pair-count in groups of
+ * four, so divide by 8.
+ */
+ reg = (((rcd->expected_count >> RCV_SHIFT)
+ & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
+ << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
+ (((rcd->expected_base >> RCV_SHIFT)
+ & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
+ << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
+ write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
+ if (ctxt == VL15CTXT)
+ write_csr(dd, RCV_VL15, VL15CTXT);
+ }
+ if (op & HFI1_RCVCTRL_CTXT_DIS) {
+ write_csr(dd, RCV_VL15, 0);
+ rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
+ }
+ if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
+ rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
+ if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
+ rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
+ if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
+ rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
+ if (op & HFI1_RCVCTRL_TAILUPD_DIS)
+ rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
+ if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
+ rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
+ if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
+ rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
+ if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
+ /* In one-packet-per-eager mode, the size comes from
+ the RcvArray entry. */
+ rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
+ rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
+ }
+ if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
+ rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
+ if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
+ rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
+ if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
+ rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
+ if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
+ rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
+ if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
+ rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
+ rcd->rcvctrl = rcvctrl;
+ hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
+ write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
+
+ /* work around sticky RcvCtxtStatus.BlockedRHQFull */
+ if (did_enable
+ && (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
+ reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
+ if (reg != 0) {
+ dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
+ ctxt, reg);
+ read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
+ write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
+ write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
+ read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
+ reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
+ dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
+ ctxt, reg, reg == 0 ? "not" : "still");
+ }
+ }
+
+ if (did_enable) {
+ /*
+ * The interrupt timeout and count must be set after
+ * the context is enabled to take effect.
+ */
+ /* set interrupt timeout */
+ write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
+ (u64)rcd->rcvavail_timeout <<
+ RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
+
+ /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
+ reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
+ write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
+ }
+
+ if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
+ /*
+ * If the context has been disabled and the Tail Update has
+ * been cleared, clear the RCV_HDR_TAIL_ADDR CSR so
+ * it doesn't contain an address that is invalid.
+ */
+ write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, 0);
+}
+
+u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
+ u64 **cntrp)
+{
+ int ret;
+ u64 val = 0;
+
+ if (namep) {
+ ret = dd->cntrnameslen;
+ if (pos != 0) {
+ dd_dev_err(dd, "read_cntrs does not support indexing");
+ return 0;
+ }
+ *namep = dd->cntrnames;
+ } else {
+ const struct cntr_entry *entry;
+ int i, j;
+
+ ret = (dd->ndevcntrs) * sizeof(u64);
+ if (pos != 0) {
+ dd_dev_err(dd, "read_cntrs does not support indexing");
+ return 0;
+ }
+
+ /* Get the start of the block of counters */
+ *cntrp = dd->cntrs;
+
+ /*
+ * Now go and fill in each counter in the block.
+ */
+ for (i = 0; i < DEV_CNTR_LAST; i++) {
+ entry = &dev_cntrs[i];
+ hfi1_cdbg(CNTR, "reading %s", entry->name);
+ if (entry->flags & CNTR_DISABLED) {
+ /* Nothing */
+ hfi1_cdbg(CNTR, "\tDisabled\n");
+ } else {
+ if (entry->flags & CNTR_VL) {
+ hfi1_cdbg(CNTR, "\tPer VL\n");
+ for (j = 0; j < C_VL_COUNT; j++) {
+ val = entry->rw_cntr(entry,
+ dd, j,
+ CNTR_MODE_R,
+ 0);
+ hfi1_cdbg(
+ CNTR,
+ "\t\tRead 0x%llx for %d\n",
+ val, j);
+ dd->cntrs[entry->offset + j] =
+ val;
+ }
+ } else {
+ val = entry->rw_cntr(entry, dd,
+ CNTR_INVALID_VL,
+ CNTR_MODE_R, 0);
+ dd->cntrs[entry->offset] = val;
+ hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
+ }
+ }
+ }
+ }
+ return ret;
+}
+
+/*
+ * Used by sysfs to create files for hfi stats to read
+ */
+u32 hfi1_read_portcntrs(struct hfi1_devdata *dd, loff_t pos, u32 port,
+ char **namep, u64 **cntrp)
+{
+ int ret;
+ u64 val = 0;
+
+ if (namep) {
+ ret = dd->portcntrnameslen;
+ if (pos != 0) {
+ dd_dev_err(dd, "index not supported");
+ return 0;
+ }
+ *namep = dd->portcntrnames;
+ } else {
+ const struct cntr_entry *entry;
+ struct hfi1_pportdata *ppd;
+ int i, j;
+
+ ret = (dd->nportcntrs) * sizeof(u64);
+ if (pos != 0) {
+ dd_dev_err(dd, "indexing not supported");
+ return 0;
+ }
+ ppd = (struct hfi1_pportdata *)(dd + 1 + port);
+ *cntrp = ppd->cntrs;
+
+ for (i = 0; i < PORT_CNTR_LAST; i++) {
+ entry = &port_cntrs[i];
+ hfi1_cdbg(CNTR, "reading %s", entry->name);
+ if (entry->flags & CNTR_DISABLED) {
+ /* Nothing */
+ hfi1_cdbg(CNTR, "\tDisabled\n");
+ continue;
+ }
+
+ if (entry->flags & CNTR_VL) {
+ hfi1_cdbg(CNTR, "\tPer VL");
+ for (j = 0; j < C_VL_COUNT; j++) {
+ val = entry->rw_cntr(entry, ppd, j,
+ CNTR_MODE_R,
+ 0);
+ hfi1_cdbg(
+ CNTR,
+ "\t\tRead 0x%llx for %d",
+ val, j);
+ ppd->cntrs[entry->offset + j] = val;
+ }
+ } else {
+ val = entry->rw_cntr(entry, ppd,
+ CNTR_INVALID_VL,
+ CNTR_MODE_R,
+ 0);
+ ppd->cntrs[entry->offset] = val;
+ hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
+ }
+ }
+ }
+ return ret;
+}
+
+static void free_cntrs(struct hfi1_devdata *dd)
+{
+ struct hfi1_pportdata *ppd;
+ int i;
+
+ if (dd->synth_stats_timer.data)
+ del_timer_sync(&dd->synth_stats_timer);
+ dd->synth_stats_timer.data = 0;
+ ppd = (struct hfi1_pportdata *)(dd + 1);
+ for (i = 0; i < dd->num_pports; i++, ppd++) {
+ kfree(ppd->cntrs);
+ kfree(ppd->scntrs);
+ free_percpu(ppd->ibport_data.rc_acks);
+ free_percpu(ppd->ibport_data.rc_qacks);
+ free_percpu(ppd->ibport_data.rc_delayed_comp);
+ ppd->cntrs = NULL;
+ ppd->scntrs = NULL;
+ ppd->ibport_data.rc_acks = NULL;
+ ppd->ibport_data.rc_qacks = NULL;
+ ppd->ibport_data.rc_delayed_comp = NULL;
+ }
+ kfree(dd->portcntrnames);
+ dd->portcntrnames = NULL;
+ kfree(dd->cntrs);
+ dd->cntrs = NULL;
+ kfree(dd->scntrs);
+ dd->scntrs = NULL;
+ kfree(dd->cntrnames);
+ dd->cntrnames = NULL;
+}
+
+#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
+#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
+
+static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
+ u64 *psval, void *context, int vl)
+{
+ u64 val;
+ u64 sval = *psval;
+
+ if (entry->flags & CNTR_DISABLED) {
+ dd_dev_err(dd, "Counter %s not enabled", entry->name);
+ return 0;
+ }
+
+ hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
+
+ val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
+
+ /* If its a synthetic counter there is more work we need to do */
+ if (entry->flags & CNTR_SYNTH) {
+ if (sval == CNTR_MAX) {
+ /* No need to read already saturated */
+ return CNTR_MAX;
+ }
+
+ if (entry->flags & CNTR_32BIT) {
+ /* 32bit counters can wrap multiple times */
+ u64 upper = sval >> 32;
+ u64 lower = (sval << 32) >> 32;
+
+ if (lower > val) { /* hw wrapped */
+ if (upper == CNTR_32BIT_MAX)
+ val = CNTR_MAX;
+ else
+ upper++;
+ }
+
+ if (val != CNTR_MAX)
+ val = (upper << 32) | val;
+
+ } else {
+ /* If we rolled we are saturated */
+ if ((val < sval) || (val > CNTR_MAX))
+ val = CNTR_MAX;
+ }
+ }
+
+ *psval = val;
+
+ hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
+
+ return val;
+}
+
+static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
+ struct cntr_entry *entry,
+ u64 *psval, void *context, int vl, u64 data)
+{
+ u64 val;
+
+ if (entry->flags & CNTR_DISABLED) {
+ dd_dev_err(dd, "Counter %s not enabled", entry->name);
+ return 0;
+ }
+
+ hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
+
+ if (entry->flags & CNTR_SYNTH) {
+ *psval = data;
+ if (entry->flags & CNTR_32BIT) {
+ val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
+ (data << 32) >> 32);
+ val = data; /* return the full 64bit value */
+ } else {
+ val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
+ data);
+ }
+ } else {
+ val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
+ }
+
+ *psval = val;
+
+ hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
+
+ return val;
+}
+
+u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
+{
+ struct cntr_entry *entry;
+ u64 *sval;
+
+ entry = &dev_cntrs[index];
+ sval = dd->scntrs + entry->offset;
+
+ if (vl != CNTR_INVALID_VL)
+ sval += vl;
+
+ return read_dev_port_cntr(dd, entry, sval, dd, vl);
+}
+
+u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
+{
+ struct cntr_entry *entry;
+ u64 *sval;
+
+ entry = &dev_cntrs[index];
+ sval = dd->scntrs + entry->offset;
+
+ if (vl != CNTR_INVALID_VL)
+ sval += vl;
+
+ return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
+}
+
+u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
+{
+ struct cntr_entry *entry;
+ u64 *sval;
+
+ entry = &port_cntrs[index];
+ sval = ppd->scntrs + entry->offset;
+
+ if (vl != CNTR_INVALID_VL)
+ sval += vl;
+
+ if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
+ (index <= C_RCV_HDR_OVF_LAST)) {
+ /* We do not want to bother for disabled contexts */
+ return 0;
+ }
+
+ return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
+}
+
+u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
+{
+ struct cntr_entry *entry;
+ u64 *sval;
+
+ entry = &port_cntrs[index];
+ sval = ppd->scntrs + entry->offset;
+
+ if (vl != CNTR_INVALID_VL)
+ sval += vl;
+
+ if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
+ (index <= C_RCV_HDR_OVF_LAST)) {
+ /* We do not want to bother for disabled contexts */
+ return 0;
+ }
+
+ return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
+}
+
+static void update_synth_timer(unsigned long opaque)
+{
+ u64 cur_tx;
+ u64 cur_rx;
+ u64 total_flits;
+ u8 update = 0;
+ int i, j, vl;
+ struct hfi1_pportdata *ppd;
+ struct cntr_entry *entry;
+
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
+
+ /*
+ * Rather than keep beating on the CSRs pick a minimal set that we can
+ * check to watch for potential roll over. We can do this by looking at
+ * the number of flits sent/recv. If the total flits exceeds 32bits then
+ * we have to iterate all the counters and update.
+ */
+ entry = &dev_cntrs[C_DC_RCV_FLITS];
+ cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
+
+ entry = &dev_cntrs[C_DC_XMIT_FLITS];
+ cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
+
+ hfi1_cdbg(
+ CNTR,
+ "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
+ dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
+
+ if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
+ /*
+ * May not be strictly necessary to update but it won't hurt and
+ * simplifies the logic here.
+ */
+ update = 1;
+ hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
+ dd->unit);
+ } else {
+ total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
+ hfi1_cdbg(CNTR,
+ "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
+ total_flits, (u64)CNTR_32BIT_MAX);
+ if (total_flits >= CNTR_32BIT_MAX) {
+ hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
+ dd->unit);
+ update = 1;
+ }
+ }
+
+ if (update) {
+ hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
+ for (i = 0; i < DEV_CNTR_LAST; i++) {
+ entry = &dev_cntrs[i];
+ if (entry->flags & CNTR_VL) {
+ for (vl = 0; vl < C_VL_COUNT; vl++)
+ read_dev_cntr(dd, i, vl);
+ } else {
+ read_dev_cntr(dd, i, CNTR_INVALID_VL);
+ }
+ }
+ ppd = (struct hfi1_pportdata *)(dd + 1);
+ for (i = 0; i < dd->num_pports; i++, ppd++) {
+ for (j = 0; j < PORT_CNTR_LAST; j++) {
+ entry = &port_cntrs[j];
+ if (entry->flags & CNTR_VL) {
+ for (vl = 0; vl < C_VL_COUNT; vl++)
+ read_port_cntr(ppd, j, vl);
+ } else {
+ read_port_cntr(ppd, j, CNTR_INVALID_VL);
+ }
+ }
+ }
+
+ /*
+ * We want the value in the register. The goal is to keep track
+ * of the number of "ticks" not the counter value. In other
+ * words if the register rolls we want to notice it and go ahead
+ * and force an update.
+ */
+ entry = &dev_cntrs[C_DC_XMIT_FLITS];
+ dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
+ CNTR_MODE_R, 0);
+
+ entry = &dev_cntrs[C_DC_RCV_FLITS];
+ dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
+ CNTR_MODE_R, 0);
+
+ hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
+ dd->unit, dd->last_tx, dd->last_rx);
+
+ } else {
+ hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
+ }
+
+mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
+}
+
+#define C_MAX_NAME 13 /* 12 chars + one for /0 */
+static int init_cntrs(struct hfi1_devdata *dd)
+{
+ int i, rcv_ctxts, index, j;
+ size_t sz;
+ char *p;
+ char name[C_MAX_NAME];
+ struct hfi1_pportdata *ppd;
+
+ /* set up the stats timer; the add_timer is done at the end */
+ init_timer(&dd->synth_stats_timer);
+ dd->synth_stats_timer.function = update_synth_timer;
+ dd->synth_stats_timer.data = (unsigned long) dd;
+
+ /***********************/
+ /* per device counters */
+ /***********************/
+
+ /* size names and determine how many we have*/
+ dd->ndevcntrs = 0;
+ sz = 0;
+ index = 0;
+
+ for (i = 0; i < DEV_CNTR_LAST; i++) {
+ hfi1_dbg_early("Init cntr %s\n", dev_cntrs[i].name);
+ if (dev_cntrs[i].flags & CNTR_DISABLED) {
+ hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
+ continue;
+ }
+
+ if (dev_cntrs[i].flags & CNTR_VL) {
+ hfi1_dbg_early("\tProcessing VL cntr\n");
+ dev_cntrs[i].offset = index;
+ for (j = 0; j < C_VL_COUNT; j++) {
+ memset(name, '\0', C_MAX_NAME);
+ snprintf(name, C_MAX_NAME, "%s%d",
+ dev_cntrs[i].name,
+ vl_from_idx(j));
+ sz += strlen(name);
+ sz++;
+ hfi1_dbg_early("\t\t%s\n", name);
+ dd->ndevcntrs++;
+ index++;
+ }
+ } else {
+ /* +1 for newline */
+ sz += strlen(dev_cntrs[i].name) + 1;
+ dd->ndevcntrs++;
+ dev_cntrs[i].offset = index;
+ index++;
+ hfi1_dbg_early("\tAdding %s\n", dev_cntrs[i].name);
+ }
+ }
+
+ /* allocate space for the counter values */
+ dd->cntrs = kcalloc(index, sizeof(u64), GFP_KERNEL);
+ if (!dd->cntrs)
+ goto bail;
+
+ dd->scntrs = kcalloc(index, sizeof(u64), GFP_KERNEL);
+ if (!dd->scntrs)
+ goto bail;
+
+
+ /* allocate space for the counter names */
+ dd->cntrnameslen = sz;
+ dd->cntrnames = kmalloc(sz, GFP_KERNEL);
+ if (!dd->cntrnames)
+ goto bail;
+
+ /* fill in the names */
+ for (p = dd->cntrnames, i = 0, index = 0; i < DEV_CNTR_LAST; i++) {
+ if (dev_cntrs[i].flags & CNTR_DISABLED) {
+ /* Nothing */
+ } else {
+ if (dev_cntrs[i].flags & CNTR_VL) {
+ for (j = 0; j < C_VL_COUNT; j++) {
+ memset(name, '\0', C_MAX_NAME);
+ snprintf(name, C_MAX_NAME, "%s%d",
+ dev_cntrs[i].name,
+ vl_from_idx(j));
+ memcpy(p, name, strlen(name));
+ p += strlen(name);
+ *p++ = '\n';
+ }
+ } else {
+ memcpy(p, dev_cntrs[i].name,
+ strlen(dev_cntrs[i].name));
+ p += strlen(dev_cntrs[i].name);
+ *p++ = '\n';
+ }
+ index++;
+ }
+ }
+
+ /*********************/
+ /* per port counters */
+ /*********************/
+
+ /*
+ * Go through the counters for the overflows and disable the ones we
+ * don't need. This varies based on platform so we need to do it
+ * dynamically here.
+ */
+ rcv_ctxts = dd->num_rcv_contexts;
+ for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
+ i <= C_RCV_HDR_OVF_LAST; i++) {
+ port_cntrs[i].flags |= CNTR_DISABLED;
+ }
+
+ /* size port counter names and determine how many we have*/
+ sz = 0;
+ dd->nportcntrs = 0;
+ for (i = 0; i < PORT_CNTR_LAST; i++) {
+ hfi1_dbg_early("Init pcntr %s\n", port_cntrs[i].name);
+ if (port_cntrs[i].flags & CNTR_DISABLED) {
+ hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
+ continue;
+ }
+
+ if (port_cntrs[i].flags & CNTR_VL) {
+ hfi1_dbg_early("\tProcessing VL cntr\n");
+ port_cntrs[i].offset = dd->nportcntrs;
+ for (j = 0; j < C_VL_COUNT; j++) {
+ memset(name, '\0', C_MAX_NAME);
+ snprintf(name, C_MAX_NAME, "%s%d",
+ port_cntrs[i].name,
+ vl_from_idx(j));
+ sz += strlen(name);
+ sz++;
+ hfi1_dbg_early("\t\t%s\n", name);
+ dd->nportcntrs++;
+ }
+ } else {
+ /* +1 for newline */
+ sz += strlen(port_cntrs[i].name) + 1;
+ port_cntrs[i].offset = dd->nportcntrs;
+ dd->nportcntrs++;
+ hfi1_dbg_early("\tAdding %s\n", port_cntrs[i].name);
+ }
+ }
+
+ /* allocate space for the counter names */
+ dd->portcntrnameslen = sz;
+ dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
+ if (!dd->portcntrnames)
+ goto bail;
+
+ /* fill in port cntr names */
+ for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
+ if (port_cntrs[i].flags & CNTR_DISABLED)
+ continue;
+
+ if (port_cntrs[i].flags & CNTR_VL) {
+ for (j = 0; j < C_VL_COUNT; j++) {
+ memset(name, '\0', C_MAX_NAME);
+ snprintf(name, C_MAX_NAME, "%s%d",
+ port_cntrs[i].name,
+ vl_from_idx(j));
+ memcpy(p, name, strlen(name));
+ p += strlen(name);
+ *p++ = '\n';
+ }
+ } else {
+ memcpy(p, port_cntrs[i].name,
+ strlen(port_cntrs[i].name));
+ p += strlen(port_cntrs[i].name);
+ *p++ = '\n';
+ }
+ }
+
+ /* allocate per port storage for counter values */
+ ppd = (struct hfi1_pportdata *)(dd + 1);
+ for (i = 0; i < dd->num_pports; i++, ppd++) {
+ ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
+ if (!ppd->cntrs)
+ goto bail;
+
+ ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
+ if (!ppd->scntrs)
+ goto bail;
+ }
+
+ /* CPU counters need to be allocated and zeroed */
+ if (init_cpu_counters(dd))
+ goto bail;
+
+ mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
+ return 0;
+bail:
+ free_cntrs(dd);
+ return -ENOMEM;
+}
+
+
+static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
+{
+ switch (chip_lstate) {
+ default:
+ dd_dev_err(dd,
+ "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
+ chip_lstate);
+ /* fall through */
+ case LSTATE_DOWN:
+ return IB_PORT_DOWN;
+ case LSTATE_INIT:
+ return IB_PORT_INIT;
+ case LSTATE_ARMED:
+ return IB_PORT_ARMED;
+ case LSTATE_ACTIVE:
+ return IB_PORT_ACTIVE;
+ }
+}
+
+u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
+{
+ /* look at the HFI meta-states only */
+ switch (chip_pstate & 0xf0) {
+ default:
+ dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
+ chip_pstate);
+ /* fall through */
+ case PLS_DISABLED:
+ return IB_PORTPHYSSTATE_DISABLED;
+ case PLS_OFFLINE:
+ return OPA_PORTPHYSSTATE_OFFLINE;
+ case PLS_POLLING:
+ return IB_PORTPHYSSTATE_POLLING;
+ case PLS_CONFIGPHY:
+ return IB_PORTPHYSSTATE_TRAINING;
+ case PLS_LINKUP:
+ return IB_PORTPHYSSTATE_LINKUP;
+ case PLS_PHYTEST:
+ return IB_PORTPHYSSTATE_PHY_TEST;
+ }
+}
+
+/* return the OPA port logical state name */
+const char *opa_lstate_name(u32 lstate)
+{
+ static const char * const port_logical_names[] = {
+ "PORT_NOP",
+ "PORT_DOWN",
+ "PORT_INIT",
+ "PORT_ARMED",
+ "PORT_ACTIVE",
+ "PORT_ACTIVE_DEFER",
+ };
+ if (lstate < ARRAY_SIZE(port_logical_names))
+ return port_logical_names[lstate];
+ return "unknown";
+}
+
+/* return the OPA port physical state name */
+const char *opa_pstate_name(u32 pstate)
+{
+ static const char * const port_physical_names[] = {
+ "PHYS_NOP",
+ "reserved1",
+ "PHYS_POLL",
+ "PHYS_DISABLED",
+ "PHYS_TRAINING",
+ "PHYS_LINKUP",
+ "PHYS_LINK_ERR_RECOVER",
+ "PHYS_PHY_TEST",
+ "reserved8",
+ "PHYS_OFFLINE",
+ "PHYS_GANGED",
+ "PHYS_TEST",
+ };
+ if (pstate < ARRAY_SIZE(port_physical_names))
+ return port_physical_names[pstate];
+ return "unknown";
+}
+
+/*
+ * Read the hardware link state and set the driver's cached value of it.
+ * Return the (new) current value.
+ */
+u32 get_logical_state(struct hfi1_pportdata *ppd)
+{
+ u32 new_state;
+
+ new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
+ if (new_state != ppd->lstate) {
+ dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
+ opa_lstate_name(new_state), new_state);
+ ppd->lstate = new_state;
+ }
+ /*
+ * Set port status flags in the page mapped into userspace
+ * memory. Do it here to ensure a reliable state - this is
+ * the only function called by all state handling code.
+ * Always set the flags due to the fact that the cache value
+ * might have been changed explicitly outside of this
+ * function.
+ */
+ if (ppd->statusp) {
+ switch (ppd->lstate) {
+ case IB_PORT_DOWN:
+ case IB_PORT_INIT:
+ *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
+ HFI1_STATUS_IB_READY);
+ break;
+ case IB_PORT_ARMED:
+ *ppd->statusp |= HFI1_STATUS_IB_CONF;
+ break;
+ case IB_PORT_ACTIVE:
+ *ppd->statusp |= HFI1_STATUS_IB_READY;
+ break;
+ }
+ }
+ return ppd->lstate;
+}
+
+/**
+ * wait_logical_linkstate - wait for an IB link state change to occur
+ * @ppd: port device
+ * @state: the state to wait for
+ * @msecs: the number of milliseconds to wait
+ *
+ * Wait up to msecs milliseconds for IB link state change to occur.
+ * For now, take the easy polling route.
+ * Returns 0 if state reached, otherwise -ETIMEDOUT.
+ */
+static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
+ int msecs)
+{
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(msecs);
+ while (1) {
+ if (get_logical_state(ppd) == state)
+ return 0;
+ if (time_after(jiffies, timeout))
+ break;
+ msleep(20);
+ }
+ dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
+
+ return -ETIMEDOUT;
+}
+
+u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
+{
+ static u32 remembered_state = 0xff;
+ u32 pstate;
+ u32 ib_pstate;
+
+ pstate = read_physical_state(ppd->dd);
+ ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
+ if (remembered_state != ib_pstate) {
+ dd_dev_info(ppd->dd,
+ "%s: physical state changed to %s (0x%x), phy 0x%x\n",
+ __func__, opa_pstate_name(ib_pstate), ib_pstate,
+ pstate);
+ remembered_state = ib_pstate;
+ }
+ return ib_pstate;
+}
+
+/*
+ * Read/modify/write ASIC_QSFP register bits as selected by mask
+ * data: 0 or 1 in the positions depending on what needs to be written
+ * dir: 0 for read, 1 for write
+ * mask: select by setting
+ * I2CCLK (bit 0)
+ * I2CDATA (bit 1)
+ */
+u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
+ u32 mask)
+{
+ u64 qsfp_oe, target_oe;
+
+ target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
+ if (mask) {
+ /* We are writing register bits, so lock access */
+ dir &= mask;
+ data &= mask;
+
+ qsfp_oe = read_csr(dd, target_oe);
+ qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
+ write_csr(dd, target_oe, qsfp_oe);
+ }
+ /* We are exclusively reading bits here, but it is unlikely
+ * we'll get valid data when we set the direction of the pin
+ * in the same call, so read should call this function again
+ * to get valid data
+ */
+ return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
+}
+
+#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
+(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
+
+#define SET_STATIC_RATE_CONTROL_SMASK(r) \
+(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
+
+int hfi1_init_ctxt(struct send_context *sc)
+{
+ if (sc != NULL) {
+ struct hfi1_devdata *dd = sc->dd;
+ u64 reg;
+ u8 set = (sc->type == SC_USER ?
+ HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
+ HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
+ reg = read_kctxt_csr(dd, sc->hw_context,
+ SEND_CTXT_CHECK_ENABLE);
+ if (set)
+ CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
+ else
+ SET_STATIC_RATE_CONTROL_SMASK(reg);
+ write_kctxt_csr(dd, sc->hw_context,
+ SEND_CTXT_CHECK_ENABLE, reg);
+ }
+ return 0;
+}
+
+int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
+{
+ int ret = 0;
+ u64 reg;
+
+ if (dd->icode != ICODE_RTL_SILICON) {
+ if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
+ dd_dev_info(dd, "%s: tempsense not supported by HW\n",
+ __func__);
+ return -EINVAL;
+ }
+ reg = read_csr(dd, ASIC_STS_THERM);
+ temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
+ ASIC_STS_THERM_CURR_TEMP_MASK);
+ temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
+ ASIC_STS_THERM_LO_TEMP_MASK);
+ temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
+ ASIC_STS_THERM_HI_TEMP_MASK);
+ temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
+ ASIC_STS_THERM_CRIT_TEMP_MASK);
+ /* triggers is a 3-bit value - 1 bit per trigger. */
+ temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
+
+ return ret;
+}
+
+/* ========================================================================= */
+
+/*
+ * Enable/disable chip from delivering interrupts.
+ */
+void set_intr_state(struct hfi1_devdata *dd, u32 enable)
+{
+ int i;
+
+ /*
+ * In HFI, the mask needs to be 1 to allow interrupts.
+ */
+ if (enable) {
+ u64 cce_int_mask;
+ const int qsfp1_int_smask = QSFP1_INT % 64;
+ const int qsfp2_int_smask = QSFP2_INT % 64;
+
+ /* enable all interrupts */
+ for (i = 0; i < CCE_NUM_INT_CSRS; i++)
+ write_csr(dd, CCE_INT_MASK + (8*i), ~(u64)0);
+
+ /*
+ * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
+ * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
+ * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
+ * the index of the appropriate CSR in the CCEIntMask CSR array
+ */
+ cce_int_mask = read_csr(dd, CCE_INT_MASK +
+ (8*(QSFP1_INT/64)));
+ if (dd->hfi1_id) {
+ cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
+ write_csr(dd, CCE_INT_MASK + (8*(QSFP1_INT/64)),
+ cce_int_mask);
+ } else {
+ cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
+ write_csr(dd, CCE_INT_MASK + (8*(QSFP2_INT/64)),
+ cce_int_mask);
+ }
+ } else {
+ for (i = 0; i < CCE_NUM_INT_CSRS; i++)
+ write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
+ }
+}
+
+/*
+ * Clear all interrupt sources on the chip.
+ */
+static void clear_all_interrupts(struct hfi1_devdata *dd)
+{
+ int i;
+
+ for (i = 0; i < CCE_NUM_INT_CSRS; i++)
+ write_csr(dd, CCE_INT_CLEAR + (8*i), ~(u64)0);
+
+ write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
+ write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
+ write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
+ write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
+ write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
+ write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
+ write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
+ for (i = 0; i < dd->chip_send_contexts; i++)
+ write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
+ for (i = 0; i < dd->chip_sdma_engines; i++)
+ write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
+
+ write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
+ write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
+ write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
+}
+
+/* Move to pcie.c? */
+static void disable_intx(struct pci_dev *pdev)
+{
+ pci_intx(pdev, 0);
+}
+
+static void clean_up_interrupts(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /* remove irqs - must happen before disabling/turning off */
+ if (dd->num_msix_entries) {
+ /* MSI-X */
+ struct hfi1_msix_entry *me = dd->msix_entries;
+
+ for (i = 0; i < dd->num_msix_entries; i++, me++) {
+ if (me->arg == NULL) /* => no irq, no affinity */
+ break;
+ irq_set_affinity_hint(dd->msix_entries[i].msix.vector,
+ NULL);
+ free_irq(me->msix.vector, me->arg);
+ }
+ } else {
+ /* INTx */
+ if (dd->requested_intx_irq) {
+ free_irq(dd->pcidev->irq, dd);
+ dd->requested_intx_irq = 0;
+ }
+ }
+
+ /* turn off interrupts */
+ if (dd->num_msix_entries) {
+ /* MSI-X */
+ hfi1_nomsix(dd);
+ } else {
+ /* INTx */
+ disable_intx(dd->pcidev);
+ }
+
+ /* clean structures */
+ for (i = 0; i < dd->num_msix_entries; i++)
+ free_cpumask_var(dd->msix_entries[i].mask);
+ kfree(dd->msix_entries);
+ dd->msix_entries = NULL;
+ dd->num_msix_entries = 0;
+}
+
+/*
+ * Remap the interrupt source from the general handler to the given MSI-X
+ * interrupt.
+ */
+static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
+{
+ u64 reg;
+ int m, n;
+
+ /* clear from the handled mask of the general interrupt */
+ m = isrc / 64;
+ n = isrc % 64;
+ dd->gi_mask[m] &= ~((u64)1 << n);
+
+ /* direct the chip source to the given MSI-X interrupt */
+ m = isrc / 8;
+ n = isrc % 8;
+ reg = read_csr(dd, CCE_INT_MAP + (8*m));
+ reg &= ~((u64)0xff << (8*n));
+ reg |= ((u64)msix_intr & 0xff) << (8*n);
+ write_csr(dd, CCE_INT_MAP + (8*m), reg);
+}
+
+static void remap_sdma_interrupts(struct hfi1_devdata *dd,
+ int engine, int msix_intr)
+{
+ /*
+ * SDMA engine interrupt sources grouped by type, rather than
+ * engine. Per-engine interrupts are as follows:
+ * SDMA
+ * SDMAProgress
+ * SDMAIdle
+ */
+ remap_intr(dd, IS_SDMA_START + 0*TXE_NUM_SDMA_ENGINES + engine,
+ msix_intr);
+ remap_intr(dd, IS_SDMA_START + 1*TXE_NUM_SDMA_ENGINES + engine,
+ msix_intr);
+ remap_intr(dd, IS_SDMA_START + 2*TXE_NUM_SDMA_ENGINES + engine,
+ msix_intr);
+}
+
+static void remap_receive_available_interrupt(struct hfi1_devdata *dd,
+ int rx, int msix_intr)
+{
+ remap_intr(dd, IS_RCVAVAIL_START + rx, msix_intr);
+}
+
+static int request_intx_irq(struct hfi1_devdata *dd)
+{
+ int ret;
+
+ snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME"_%d",
+ dd->unit);
+ ret = request_irq(dd->pcidev->irq, general_interrupt,
+ IRQF_SHARED, dd->intx_name, dd);
+ if (ret)
+ dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
+ ret);
+ else
+ dd->requested_intx_irq = 1;
+ return ret;
+}
+
+static int request_msix_irqs(struct hfi1_devdata *dd)
+{
+ const struct cpumask *local_mask;
+ cpumask_var_t def, rcv;
+ bool def_ret, rcv_ret;
+ int first_general, last_general;
+ int first_sdma, last_sdma;
+ int first_rx, last_rx;
+ int first_cpu, restart_cpu, curr_cpu;
+ int rcv_cpu, sdma_cpu;
+ int i, ret = 0, possible;
+ int ht;
+
+ /* calculate the ranges we are going to use */
+ first_general = 0;
+ first_sdma = last_general = first_general + 1;
+ first_rx = last_sdma = first_sdma + dd->num_sdma;
+ last_rx = first_rx + dd->n_krcv_queues;
+
+ /*
+ * Interrupt affinity.
+ *
+ * non-rcv avail gets a default mask that
+ * starts as possible cpus with threads reset
+ * and each rcv avail reset.
+ *
+ * rcv avail gets node relative 1 wrapping back
+ * to the node relative 1 as necessary.
+ *
+ */
+ local_mask = cpumask_of_pcibus(dd->pcidev->bus);
+ /* if first cpu is invalid, use NUMA 0 */
+ if (cpumask_first(local_mask) >= nr_cpu_ids)
+ local_mask = topology_core_cpumask(0);
+
+ def_ret = zalloc_cpumask_var(&def, GFP_KERNEL);
+ rcv_ret = zalloc_cpumask_var(&rcv, GFP_KERNEL);
+ if (!def_ret || !rcv_ret)
+ goto bail;
+ /* use local mask as default */
+ cpumask_copy(def, local_mask);
+ possible = cpumask_weight(def);
+ /* disarm threads from default */
+ ht = cpumask_weight(
+ topology_sibling_cpumask(cpumask_first(local_mask)));
+ for (i = possible/ht; i < possible; i++)
+ cpumask_clear_cpu(i, def);
+ /* reset possible */
+ possible = cpumask_weight(def);
+ /* def now has full cores on chosen node*/
+ first_cpu = cpumask_first(def);
+ if (nr_cpu_ids >= first_cpu)
+ first_cpu++;
+ restart_cpu = first_cpu;
+ curr_cpu = restart_cpu;
+
+ for (i = first_cpu; i < dd->n_krcv_queues + first_cpu; i++) {
+ cpumask_clear_cpu(curr_cpu, def);
+ cpumask_set_cpu(curr_cpu, rcv);
+ if (curr_cpu >= possible)
+ curr_cpu = restart_cpu;
+ else
+ curr_cpu++;
+ }
+ /* def mask has non-rcv, rcv has recv mask */
+ rcv_cpu = cpumask_first(rcv);
+ sdma_cpu = cpumask_first(def);
+
+ /*
+ * Sanity check - the code expects all SDMA chip source
+ * interrupts to be in the same CSR, starting at bit 0. Verify
+ * that this is true by checking the bit location of the start.
+ */
+ BUILD_BUG_ON(IS_SDMA_START % 64);
+
+ for (i = 0; i < dd->num_msix_entries; i++) {
+ struct hfi1_msix_entry *me = &dd->msix_entries[i];
+ const char *err_info;
+ irq_handler_t handler;
+ void *arg;
+ int idx;
+ struct hfi1_ctxtdata *rcd = NULL;
+ struct sdma_engine *sde = NULL;
+
+ /* obtain the arguments to request_irq */
+ if (first_general <= i && i < last_general) {
+ idx = i - first_general;
+ handler = general_interrupt;
+ arg = dd;
+ snprintf(me->name, sizeof(me->name),
+ DRIVER_NAME"_%d", dd->unit);
+ err_info = "general";
+ } else if (first_sdma <= i && i < last_sdma) {
+ idx = i - first_sdma;
+ sde = &dd->per_sdma[idx];
+ handler = sdma_interrupt;
+ arg = sde;
+ snprintf(me->name, sizeof(me->name),
+ DRIVER_NAME"_%d sdma%d", dd->unit, idx);
+ err_info = "sdma";
+ remap_sdma_interrupts(dd, idx, i);
+ } else if (first_rx <= i && i < last_rx) {
+ idx = i - first_rx;
+ rcd = dd->rcd[idx];
+ /* no interrupt if no rcd */
+ if (!rcd)
+ continue;
+ /*
+ * Set the interrupt register and mask for this
+ * context's interrupt.
+ */
+ rcd->ireg = (IS_RCVAVAIL_START+idx) / 64;
+ rcd->imask = ((u64)1) <<
+ ((IS_RCVAVAIL_START+idx) % 64);
+ handler = receive_context_interrupt;
+ arg = rcd;
+ snprintf(me->name, sizeof(me->name),
+ DRIVER_NAME"_%d kctxt%d", dd->unit, idx);
+ err_info = "receive context";
+ remap_receive_available_interrupt(dd, idx, i);
+ } else {
+ /* not in our expected range - complain, then
+ ignore it */
+ dd_dev_err(dd,
+ "Unexpected extra MSI-X interrupt %d\n", i);
+ continue;
+ }
+ /* no argument, no interrupt */
+ if (arg == NULL)
+ continue;
+ /* make sure the name is terminated */
+ me->name[sizeof(me->name)-1] = 0;
+
+ ret = request_irq(me->msix.vector, handler, 0, me->name, arg);
+ if (ret) {
+ dd_dev_err(dd,
+ "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
+ err_info, me->msix.vector, idx, ret);
+ return ret;
+ }
+ /*
+ * assign arg after request_irq call, so it will be
+ * cleaned up
+ */
+ me->arg = arg;
+
+ if (!zalloc_cpumask_var(
+ &dd->msix_entries[i].mask,
+ GFP_KERNEL))
+ goto bail;
+ if (handler == sdma_interrupt) {
+ dd_dev_info(dd, "sdma engine %d cpu %d\n",
+ sde->this_idx, sdma_cpu);
+ cpumask_set_cpu(sdma_cpu, dd->msix_entries[i].mask);
+ sdma_cpu = cpumask_next(sdma_cpu, def);
+ if (sdma_cpu >= nr_cpu_ids)
+ sdma_cpu = cpumask_first(def);
+ } else if (handler == receive_context_interrupt) {
+ dd_dev_info(dd, "rcv ctxt %d cpu %d\n",
+ rcd->ctxt, rcv_cpu);
+ cpumask_set_cpu(rcv_cpu, dd->msix_entries[i].mask);
+ rcv_cpu = cpumask_next(rcv_cpu, rcv);
+ if (rcv_cpu >= nr_cpu_ids)
+ rcv_cpu = cpumask_first(rcv);
+ } else {
+ /* otherwise first def */
+ dd_dev_info(dd, "%s cpu %d\n",
+ err_info, cpumask_first(def));
+ cpumask_set_cpu(
+ cpumask_first(def), dd->msix_entries[i].mask);
+ }
+ irq_set_affinity_hint(
+ dd->msix_entries[i].msix.vector,
+ dd->msix_entries[i].mask);
+ }
+
+out:
+ free_cpumask_var(def);
+ free_cpumask_var(rcv);
+ return ret;
+bail:
+ ret = -ENOMEM;
+ goto out;
+}
+
+/*
+ * Set the general handler to accept all interrupts, remap all
+ * chip interrupts back to MSI-X 0.
+ */
+static void reset_interrupts(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /* all interrupts handled by the general handler */
+ for (i = 0; i < CCE_NUM_INT_CSRS; i++)
+ dd->gi_mask[i] = ~(u64)0;
+
+ /* all chip interrupts map to MSI-X 0 */
+ for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
+ write_csr(dd, CCE_INT_MAP + (8*i), 0);
+}
+
+static int set_up_interrupts(struct hfi1_devdata *dd)
+{
+ struct hfi1_msix_entry *entries;
+ u32 total, request;
+ int i, ret;
+ int single_interrupt = 0; /* we expect to have all the interrupts */
+
+ /*
+ * Interrupt count:
+ * 1 general, "slow path" interrupt (includes the SDMA engines
+ * slow source, SDMACleanupDone)
+ * N interrupts - one per used SDMA engine
+ * M interrupt - one per kernel receive context
+ */
+ total = 1 + dd->num_sdma + dd->n_krcv_queues;
+
+ entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
+ if (!entries) {
+ dd_dev_err(dd, "cannot allocate msix table\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ /* 1-1 MSI-X entry assignment */
+ for (i = 0; i < total; i++)
+ entries[i].msix.entry = i;
+
+ /* ask for MSI-X interrupts */
+ request = total;
+ request_msix(dd, &request, entries);
+
+ if (request == 0) {
+ /* using INTx */
+ /* dd->num_msix_entries already zero */
+ kfree(entries);
+ single_interrupt = 1;
+ dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
+ } else {
+ /* using MSI-X */
+ dd->num_msix_entries = request;
+ dd->msix_entries = entries;
+
+ if (request != total) {
+ /* using MSI-X, with reduced interrupts */
+ dd_dev_err(
+ dd,
+ "cannot handle reduced interrupt case, want %u, got %u\n",
+ total, request);
+ ret = -EINVAL;
+ goto fail;
+ }
+ dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
+ }
+
+ /* mask all interrupts */
+ set_intr_state(dd, 0);
+ /* clear all pending interrupts */
+ clear_all_interrupts(dd);
+
+ /* reset general handler mask, chip MSI-X mappings */
+ reset_interrupts(dd);
+
+ if (single_interrupt)
+ ret = request_intx_irq(dd);
+ else
+ ret = request_msix_irqs(dd);
+ if (ret)
+ goto fail;
+
+ return 0;
+
+fail:
+ clean_up_interrupts(dd);
+ return ret;
+}
+
+/*
+ * Set up context values in dd. Sets:
+ *
+ * num_rcv_contexts - number of contexts being used
+ * n_krcv_queues - number of kernel contexts
+ * first_user_ctxt - first non-kernel context in array of contexts
+ * freectxts - number of free user contexts
+ * num_send_contexts - number of PIO send contexts being used
+ */
+static int set_up_context_variables(struct hfi1_devdata *dd)
+{
+ int num_kernel_contexts;
+ int num_user_contexts;
+ int total_contexts;
+ int ret;
+ unsigned ngroups;
+
+ /*
+ * Kernel contexts: (to be fixed later):
+ * - min or 2 or 1 context/numa
+ * - Context 0 - default/errors
+ * - Context 1 - VL15
+ */
+ if (n_krcvqs)
+ num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS;
+ else
+ num_kernel_contexts = num_online_nodes();
+ num_kernel_contexts =
+ max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
+ /*
+ * Every kernel receive context needs an ACK send context.
+ * one send context is allocated for each VL{0-7} and VL15
+ */
+ if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
+ dd_dev_err(dd,
+ "Reducing # kernel rcv contexts to: %d, from %d\n",
+ (int)(dd->chip_send_contexts - num_vls - 1),
+ (int)num_kernel_contexts);
+ num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
+ }
+ /*
+ * User contexts: (to be fixed later)
+ * - set to num_rcv_contexts if non-zero
+ * - default to 1 user context per CPU
+ */
+ if (num_rcv_contexts)
+ num_user_contexts = num_rcv_contexts;
+ else
+ num_user_contexts = num_online_cpus();
+
+ total_contexts = num_kernel_contexts + num_user_contexts;
+
+ /*
+ * Adjust the counts given a global max.
+ */
+ if (total_contexts > dd->chip_rcv_contexts) {
+ dd_dev_err(dd,
+ "Reducing # user receive contexts to: %d, from %d\n",
+ (int)(dd->chip_rcv_contexts - num_kernel_contexts),
+ (int)num_user_contexts);
+ num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
+ /* recalculate */
+ total_contexts = num_kernel_contexts + num_user_contexts;
+ }
+
+ /* the first N are kernel contexts, the rest are user contexts */
+ dd->num_rcv_contexts = total_contexts;
+ dd->n_krcv_queues = num_kernel_contexts;
+ dd->first_user_ctxt = num_kernel_contexts;
+ dd->freectxts = num_user_contexts;
+ dd_dev_info(dd,
+ "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
+ (int)dd->chip_rcv_contexts,
+ (int)dd->num_rcv_contexts,
+ (int)dd->n_krcv_queues,
+ (int)dd->num_rcv_contexts - dd->n_krcv_queues);
+
+ /*
+ * Receive array allocation:
+ * All RcvArray entries are divided into groups of 8. This
+ * is required by the hardware and will speed up writes to
+ * consecutive entries by using write-combining of the entire
+ * cacheline.
+ *
+ * The number of groups are evenly divided among all contexts.
+ * any left over groups will be given to the first N user
+ * contexts.
+ */
+ dd->rcv_entries.group_size = RCV_INCREMENT;
+ ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
+ dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
+ dd->rcv_entries.nctxt_extra = ngroups -
+ (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
+ dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
+ dd->rcv_entries.ngroups,
+ dd->rcv_entries.nctxt_extra);
+ if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
+ MAX_EAGER_ENTRIES * 2) {
+ dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
+ dd->rcv_entries.group_size;
+ dd_dev_info(dd,
+ "RcvArray group count too high, change to %u\n",
+ dd->rcv_entries.ngroups);
+ dd->rcv_entries.nctxt_extra = 0;
+ }
+ /*
+ * PIO send contexts
+ */
+ ret = init_sc_pools_and_sizes(dd);
+ if (ret >= 0) { /* success */
+ dd->num_send_contexts = ret;
+ dd_dev_info(
+ dd,
+ "send contexts: chip %d, used %d (kernel %d, ack %d, user %d)\n",
+ dd->chip_send_contexts,
+ dd->num_send_contexts,
+ dd->sc_sizes[SC_KERNEL].count,
+ dd->sc_sizes[SC_ACK].count,
+ dd->sc_sizes[SC_USER].count);
+ ret = 0; /* success */
+ }
+
+ return ret;
+}
+
+/*
+ * Set the device/port partition key table. The MAD code
+ * will ensure that, at least, the partial management
+ * partition key is present in the table.
+ */
+static void set_partition_keys(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 reg = 0;
+ int i;
+
+ dd_dev_info(dd, "Setting partition keys\n");
+ for (i = 0; i < hfi1_get_npkeys(dd); i++) {
+ reg |= (ppd->pkeys[i] &
+ RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
+ ((i % 4) *
+ RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
+ /* Each register holds 4 PKey values. */
+ if ((i % 4) == 3) {
+ write_csr(dd, RCV_PARTITION_KEY +
+ ((i - 3) * 2), reg);
+ reg = 0;
+ }
+ }
+
+ /* Always enable HW pkeys check when pkeys table is set */
+ add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
+}
+
+/*
+ * These CSRs and memories are uninitialized on reset and must be
+ * written before reading to set the ECC/parity bits.
+ *
+ * NOTE: All user context CSRs that are not mmaped write-only
+ * (e.g. the TID flows) must be initialized even if the driver never
+ * reads them.
+ */
+static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
+{
+ int i, j;
+
+ /* CceIntMap */
+ for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
+ write_csr(dd, CCE_INT_MAP+(8*i), 0);
+
+ /* SendCtxtCreditReturnAddr */
+ for (i = 0; i < dd->chip_send_contexts; i++)
+ write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
+
+ /* PIO Send buffers */
+ /* SDMA Send buffers */
+ /* These are not normally read, and (presently) have no method
+ to be read, so are not pre-initialized */
+
+ /* RcvHdrAddr */
+ /* RcvHdrTailAddr */
+ /* RcvTidFlowTable */
+ for (i = 0; i < dd->chip_rcv_contexts; i++) {
+ write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
+ write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
+ for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
+ write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE+(8*j), 0);
+ }
+
+ /* RcvArray */
+ for (i = 0; i < dd->chip_rcv_array_count; i++)
+ write_csr(dd, RCV_ARRAY + (8*i),
+ RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
+
+ /* RcvQPMapTable */
+ for (i = 0; i < 32; i++)
+ write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
+}
+
+/*
+ * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
+ */
+static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
+ u64 ctrl_bits)
+{
+ unsigned long timeout;
+ u64 reg;
+
+ /* is the condition present? */
+ reg = read_csr(dd, CCE_STATUS);
+ if ((reg & status_bits) == 0)
+ return;
+
+ /* clear the condition */
+ write_csr(dd, CCE_CTRL, ctrl_bits);
+
+ /* wait for the condition to clear */
+ timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
+ while (1) {
+ reg = read_csr(dd, CCE_STATUS);
+ if ((reg & status_bits) == 0)
+ return;
+ if (time_after(jiffies, timeout)) {
+ dd_dev_err(dd,
+ "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
+ status_bits, reg & status_bits);
+ return;
+ }
+ udelay(1);
+ }
+}
+
+/* set CCE CSRs to chip reset defaults */
+static void reset_cce_csrs(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /* CCE_REVISION read-only */
+ /* CCE_REVISION2 read-only */
+ /* CCE_CTRL - bits clear automatically */
+ /* CCE_STATUS read-only, use CceCtrl to clear */
+ clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
+ clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
+ clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
+ for (i = 0; i < CCE_NUM_SCRATCH; i++)
+ write_csr(dd, CCE_SCRATCH + (8 * i), 0);
+ /* CCE_ERR_STATUS read-only */
+ write_csr(dd, CCE_ERR_MASK, 0);
+ write_csr(dd, CCE_ERR_CLEAR, ~0ull);
+ /* CCE_ERR_FORCE leave alone */
+ for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
+ write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
+ write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
+ /* CCE_PCIE_CTRL leave alone */
+ for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
+ write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
+ write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
+ CCE_MSIX_TABLE_UPPER_RESETCSR);
+ }
+ for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
+ /* CCE_MSIX_PBA read-only */
+ write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
+ write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
+ }
+ for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
+ write_csr(dd, CCE_INT_MAP, 0);
+ for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
+ /* CCE_INT_STATUS read-only */
+ write_csr(dd, CCE_INT_MASK + (8 * i), 0);
+ write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
+ /* CCE_INT_FORCE leave alone */
+ /* CCE_INT_BLOCKED read-only */
+ }
+ for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
+ write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
+}
+
+/* set ASIC CSRs to chip reset defaults */
+static void reset_asic_csrs(struct hfi1_devdata *dd)
+{
+ static DEFINE_MUTEX(asic_mutex);
+ static int called;
+ int i;
+
+ /*
+ * If the HFIs are shared between separate nodes or VMs,
+ * then more will need to be done here. One idea is a module
+ * parameter that returns early, letting the first power-on or
+ * a known first load do the reset and blocking all others.
+ */
+
+ /*
+ * These CSRs should only be reset once - the first one here will
+ * do the work. Use a mutex so that a non-first caller waits until
+ * the first is finished before it can proceed.
+ */
+ mutex_lock(&asic_mutex);
+ if (called)
+ goto done;
+ called = 1;
+
+ if (dd->icode != ICODE_FPGA_EMULATION) {
+ /* emulation does not have an SBus - leave these alone */
+ /*
+ * All writes to ASIC_CFG_SBUS_REQUEST do something.
+ * Notes:
+ * o The reset is not zero if aimed at the core. See the
+ * SBus documentation for details.
+ * o If the SBus firmware has been updated (e.g. by the BIOS),
+ * will the reset revert that?
+ */
+ /* ASIC_CFG_SBUS_REQUEST leave alone */
+ write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
+ }
+ /* ASIC_SBUS_RESULT read-only */
+ write_csr(dd, ASIC_STS_SBUS_COUNTERS, 0);
+ for (i = 0; i < ASIC_NUM_SCRATCH; i++)
+ write_csr(dd, ASIC_CFG_SCRATCH + (8 * i), 0);
+ write_csr(dd, ASIC_CFG_MUTEX, 0); /* this will clear it */
+ write_csr(dd, ASIC_CFG_DRV_STR, 0);
+ write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0);
+ /* ASIC_STS_THERM read-only */
+ /* ASIC_CFG_RESET leave alone */
+
+ write_csr(dd, ASIC_PCIE_SD_HOST_CMD, 0);
+ /* ASIC_PCIE_SD_HOST_STATUS read-only */
+ write_csr(dd, ASIC_PCIE_SD_INTRPT_DATA_CODE, 0);
+ write_csr(dd, ASIC_PCIE_SD_INTRPT_ENABLE, 0);
+ /* ASIC_PCIE_SD_INTRPT_PROGRESS read-only */
+ write_csr(dd, ASIC_PCIE_SD_INTRPT_STATUS, ~0ull); /* clear */
+ /* ASIC_HFI0_PCIE_SD_INTRPT_RSPD_DATA read-only */
+ /* ASIC_HFI1_PCIE_SD_INTRPT_RSPD_DATA read-only */
+ for (i = 0; i < 16; i++)
+ write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (8 * i), 0);
+
+ /* ASIC_GPIO_IN read-only */
+ write_csr(dd, ASIC_GPIO_OE, 0);
+ write_csr(dd, ASIC_GPIO_INVERT, 0);
+ write_csr(dd, ASIC_GPIO_OUT, 0);
+ write_csr(dd, ASIC_GPIO_MASK, 0);
+ /* ASIC_GPIO_STATUS read-only */
+ write_csr(dd, ASIC_GPIO_CLEAR, ~0ull);
+ /* ASIC_GPIO_FORCE leave alone */
+
+ /* ASIC_QSFP1_IN read-only */
+ write_csr(dd, ASIC_QSFP1_OE, 0);
+ write_csr(dd, ASIC_QSFP1_INVERT, 0);
+ write_csr(dd, ASIC_QSFP1_OUT, 0);
+ write_csr(dd, ASIC_QSFP1_MASK, 0);
+ /* ASIC_QSFP1_STATUS read-only */
+ write_csr(dd, ASIC_QSFP1_CLEAR, ~0ull);
+ /* ASIC_QSFP1_FORCE leave alone */
+
+ /* ASIC_QSFP2_IN read-only */
+ write_csr(dd, ASIC_QSFP2_OE, 0);
+ write_csr(dd, ASIC_QSFP2_INVERT, 0);
+ write_csr(dd, ASIC_QSFP2_OUT, 0);
+ write_csr(dd, ASIC_QSFP2_MASK, 0);
+ /* ASIC_QSFP2_STATUS read-only */
+ write_csr(dd, ASIC_QSFP2_CLEAR, ~0ull);
+ /* ASIC_QSFP2_FORCE leave alone */
+
+ write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_RESETCSR);
+ /* this also writes a NOP command, clearing paging mode */
+ write_csr(dd, ASIC_EEP_ADDR_CMD, 0);
+ write_csr(dd, ASIC_EEP_DATA, 0);
+
+done:
+ mutex_unlock(&asic_mutex);
+}
+
+/* set MISC CSRs to chip reset defaults */
+static void reset_misc_csrs(struct hfi1_devdata *dd)
+{
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
+ write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
+ write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
+ }
+ /* MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
+ only be written 128-byte chunks */
+ /* init RSA engine to clear lingering errors */
+ write_csr(dd, MISC_CFG_RSA_CMD, 1);
+ write_csr(dd, MISC_CFG_RSA_MU, 0);
+ write_csr(dd, MISC_CFG_FW_CTRL, 0);
+ /* MISC_STS_8051_DIGEST read-only */
+ /* MISC_STS_SBM_DIGEST read-only */
+ /* MISC_STS_PCIE_DIGEST read-only */
+ /* MISC_STS_FAB_DIGEST read-only */
+ /* MISC_ERR_STATUS read-only */
+ write_csr(dd, MISC_ERR_MASK, 0);
+ write_csr(dd, MISC_ERR_CLEAR, ~0ull);
+ /* MISC_ERR_FORCE leave alone */
+}
+
+/* set TXE CSRs to chip reset defaults */
+static void reset_txe_csrs(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /*
+ * TXE Kernel CSRs
+ */
+ write_csr(dd, SEND_CTRL, 0);
+ __cm_reset(dd, 0); /* reset CM internal state */
+ /* SEND_CONTEXTS read-only */
+ /* SEND_DMA_ENGINES read-only */
+ /* SEND_PIO_MEM_SIZE read-only */
+ /* SEND_DMA_MEM_SIZE read-only */
+ write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
+ pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
+ /* SEND_PIO_ERR_STATUS read-only */
+ write_csr(dd, SEND_PIO_ERR_MASK, 0);
+ write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
+ /* SEND_PIO_ERR_FORCE leave alone */
+ /* SEND_DMA_ERR_STATUS read-only */
+ write_csr(dd, SEND_DMA_ERR_MASK, 0);
+ write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
+ /* SEND_DMA_ERR_FORCE leave alone */
+ /* SEND_EGRESS_ERR_STATUS read-only */
+ write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
+ write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
+ /* SEND_EGRESS_ERR_FORCE leave alone */
+ write_csr(dd, SEND_BTH_QP, 0);
+ write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
+ write_csr(dd, SEND_SC2VLT0, 0);
+ write_csr(dd, SEND_SC2VLT1, 0);
+ write_csr(dd, SEND_SC2VLT2, 0);
+ write_csr(dd, SEND_SC2VLT3, 0);
+ write_csr(dd, SEND_LEN_CHECK0, 0);
+ write_csr(dd, SEND_LEN_CHECK1, 0);
+ /* SEND_ERR_STATUS read-only */
+ write_csr(dd, SEND_ERR_MASK, 0);
+ write_csr(dd, SEND_ERR_CLEAR, ~0ull);
+ /* SEND_ERR_FORCE read-only */
+ for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
+ write_csr(dd, SEND_LOW_PRIORITY_LIST + (8*i), 0);
+ for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
+ write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8*i), 0);
+ for (i = 0; i < dd->chip_send_contexts/NUM_CONTEXTS_PER_SET; i++)
+ write_csr(dd, SEND_CONTEXT_SET_CTRL + (8*i), 0);
+ for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
+ write_csr(dd, SEND_COUNTER_ARRAY32 + (8*i), 0);
+ for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
+ write_csr(dd, SEND_COUNTER_ARRAY64 + (8*i), 0);
+ write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
+ write_csr(dd, SEND_CM_GLOBAL_CREDIT,
+ SEND_CM_GLOBAL_CREDIT_RESETCSR);
+ /* SEND_CM_CREDIT_USED_STATUS read-only */
+ write_csr(dd, SEND_CM_TIMER_CTRL, 0);
+ write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
+ write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
+ write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
+ write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
+ for (i = 0; i < TXE_NUM_DATA_VL; i++)
+ write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
+ write_csr(dd, SEND_CM_CREDIT_VL15, 0);
+ /* SEND_CM_CREDIT_USED_VL read-only */
+ /* SEND_CM_CREDIT_USED_VL15 read-only */
+ /* SEND_EGRESS_CTXT_STATUS read-only */
+ /* SEND_EGRESS_SEND_DMA_STATUS read-only */
+ write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
+ /* SEND_EGRESS_ERR_INFO read-only */
+ /* SEND_EGRESS_ERR_SOURCE read-only */
+
+ /*
+ * TXE Per-Context CSRs
+ */
+ for (i = 0; i < dd->chip_send_contexts; i++) {
+ write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
+ write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
+ write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
+ write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
+ write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
+ write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
+ write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
+ write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
+ write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
+ write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
+ write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
+ write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
+ }
+
+ /*
+ * TXE Per-SDMA CSRs
+ */
+ for (i = 0; i < dd->chip_sdma_engines; i++) {
+ write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
+ /* SEND_DMA_STATUS read-only */
+ write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
+ /* SEND_DMA_HEAD read-only */
+ write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
+ /* SEND_DMA_IDLE_CNT read-only */
+ write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
+ /* SEND_DMA_DESC_FETCHED_CNT read-only */
+ /* SEND_DMA_ENG_ERR_STATUS read-only */
+ write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
+ /* SEND_DMA_ENG_ERR_FORCE leave alone */
+ write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
+ write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
+ }
+}
+
+/*
+ * Expect on entry:
+ * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
+ */
+static void init_rbufs(struct hfi1_devdata *dd)
+{
+ u64 reg;
+ int count;
+
+ /*
+ * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
+ * clear.
+ */
+ count = 0;
+ while (1) {
+ reg = read_csr(dd, RCV_STATUS);
+ if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
+ | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
+ break;
+ /*
+ * Give up after 1ms - maximum wait time.
+ *
+ * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
+ * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
+ * 148 KB / (66% * 250MB/s) = 920us
+ */
+ if (count++ > 500) {
+ dd_dev_err(dd,
+ "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
+ __func__, reg);
+ break;
+ }
+ udelay(2); /* do not busy-wait the CSR */
+ }
+
+ /* start the init - expect RcvCtrl to be 0 */
+ write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
+
+ /*
+ * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
+ * period after the write before RcvStatus.RxRbufInitDone is valid.
+ * The delay in the first run through the loop below is sufficient and
+ * required before the first read of RcvStatus.RxRbufInintDone.
+ */
+ read_csr(dd, RCV_CTRL);
+
+ /* wait for the init to finish */
+ count = 0;
+ while (1) {
+ /* delay is required first time through - see above */
+ udelay(2); /* do not busy-wait the CSR */
+ reg = read_csr(dd, RCV_STATUS);
+ if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
+ break;
+
+ /* give up after 100us - slowest possible at 33MHz is 73us */
+ if (count++ > 50) {
+ dd_dev_err(dd,
+ "%s: RcvStatus.RxRbufInit not set, continuing\n",
+ __func__);
+ break;
+ }
+ }
+}
+
+/* set RXE CSRs to chip reset defaults */
+static void reset_rxe_csrs(struct hfi1_devdata *dd)
+{
+ int i, j;
+
+ /*
+ * RXE Kernel CSRs
+ */
+ write_csr(dd, RCV_CTRL, 0);
+ init_rbufs(dd);
+ /* RCV_STATUS read-only */
+ /* RCV_CONTEXTS read-only */
+ /* RCV_ARRAY_CNT read-only */
+ /* RCV_BUF_SIZE read-only */
+ write_csr(dd, RCV_BTH_QP, 0);
+ write_csr(dd, RCV_MULTICAST, 0);
+ write_csr(dd, RCV_BYPASS, 0);
+ write_csr(dd, RCV_VL15, 0);
+ /* this is a clear-down */
+ write_csr(dd, RCV_ERR_INFO,
+ RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
+ /* RCV_ERR_STATUS read-only */
+ write_csr(dd, RCV_ERR_MASK, 0);
+ write_csr(dd, RCV_ERR_CLEAR, ~0ull);
+ /* RCV_ERR_FORCE leave alone */
+ for (i = 0; i < 32; i++)
+ write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
+ for (i = 0; i < 4; i++)
+ write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
+ for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
+ write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
+ for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
+ write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
+ for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
+ write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
+ write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
+ write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
+ }
+ for (i = 0; i < 32; i++)
+ write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
+
+ /*
+ * RXE Kernel and User Per-Context CSRs
+ */
+ for (i = 0; i < dd->chip_rcv_contexts; i++) {
+ /* kernel */
+ write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
+ /* RCV_CTXT_STATUS read-only */
+ write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
+ write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
+ write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
+ write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
+ write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
+ write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
+ write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
+ write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
+ write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
+ write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
+
+ /* user */
+ /* RCV_HDR_TAIL read-only */
+ write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
+ /* RCV_EGR_INDEX_TAIL read-only */
+ write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
+ /* RCV_EGR_OFFSET_TAIL read-only */
+ for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
+ write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j),
+ 0);
+ }
+ }
+}
+
+/*
+ * Set sc2vl tables.
+ *
+ * They power on to zeros, so to avoid send context errors
+ * they need to be set:
+ *
+ * SC 0-7 -> VL 0-7 (respectively)
+ * SC 15 -> VL 15
+ * otherwise
+ * -> VL 0
+ */
+static void init_sc2vl_tables(struct hfi1_devdata *dd)
+{
+ int i;
+ /* init per architecture spec, constrained by hardware capability */
+
+ /* HFI maps sent packets */
+ write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
+ 0,
+ 0, 0, 1, 1,
+ 2, 2, 3, 3,
+ 4, 4, 5, 5,
+ 6, 6, 7, 7));
+ write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
+ 1,
+ 8, 0, 9, 0,
+ 10, 0, 11, 0,
+ 12, 0, 13, 0,
+ 14, 0, 15, 15));
+ write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
+ 2,
+ 16, 0, 17, 0,
+ 18, 0, 19, 0,
+ 20, 0, 21, 0,
+ 22, 0, 23, 0));
+ write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
+ 3,
+ 24, 0, 25, 0,
+ 26, 0, 27, 0,
+ 28, 0, 29, 0,
+ 30, 0, 31, 0));
+
+ /* DC maps received packets */
+ write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
+ 15_0,
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
+ write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
+ 31_16,
+ 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
+ 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
+
+ /* initialize the cached sc2vl values consistently with h/w */
+ for (i = 0; i < 32; i++) {
+ if (i < 8 || i == 15)
+ *((u8 *)(dd->sc2vl) + i) = (u8)i;
+ else
+ *((u8 *)(dd->sc2vl) + i) = 0;
+ }
+}
+
+/*
+ * Read chip sizes and then reset parts to sane, disabled, values. We cannot
+ * depend on the chip going through a power-on reset - a driver may be loaded
+ * and unloaded many times.
+ *
+ * Do not write any CSR values to the chip in this routine - there may be
+ * a reset following the (possible) FLR in this routine.
+ *
+ */
+static void init_chip(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /*
+ * Put the HFI CSRs in a known state.
+ * Combine this with a DC reset.
+ *
+ * Stop the device from doing anything while we do a
+ * reset. We know there are no other active users of
+ * the device since we are now in charge. Turn off
+ * off all outbound and inbound traffic and make sure
+ * the device does not generate any interrupts.
+ */
+
+ /* disable send contexts and SDMA engines */
+ write_csr(dd, SEND_CTRL, 0);
+ for (i = 0; i < dd->chip_send_contexts; i++)
+ write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
+ for (i = 0; i < dd->chip_sdma_engines; i++)
+ write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
+ /* disable port (turn off RXE inbound traffic) and contexts */
+ write_csr(dd, RCV_CTRL, 0);
+ for (i = 0; i < dd->chip_rcv_contexts; i++)
+ write_csr(dd, RCV_CTXT_CTRL, 0);
+ /* mask all interrupt sources */
+ for (i = 0; i < CCE_NUM_INT_CSRS; i++)
+ write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
+
+ /*
+ * DC Reset: do a full DC reset before the register clear.
+ * A recommended length of time to hold is one CSR read,
+ * so reread the CceDcCtrl. Then, hold the DC in reset
+ * across the clear.
+ */
+ write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
+ (void) read_csr(dd, CCE_DC_CTRL);
+
+ if (use_flr) {
+ /*
+ * A FLR will reset the SPC core and part of the PCIe.
+ * The parts that need to be restored have already been
+ * saved.
+ */
+ dd_dev_info(dd, "Resetting CSRs with FLR\n");
+
+ /* do the FLR, the DC reset will remain */
+ hfi1_pcie_flr(dd);
+
+ /* restore command and BARs */
+ restore_pci_variables(dd);
+
+ if (is_a0(dd)) {
+ dd_dev_info(dd, "Resetting CSRs with FLR\n");
+ hfi1_pcie_flr(dd);
+ restore_pci_variables(dd);
+ }
+
+ } else {
+ dd_dev_info(dd, "Resetting CSRs with writes\n");
+ reset_cce_csrs(dd);
+ reset_txe_csrs(dd);
+ reset_rxe_csrs(dd);
+ reset_asic_csrs(dd);
+ reset_misc_csrs(dd);
+ }
+ /* clear the DC reset */
+ write_csr(dd, CCE_DC_CTRL, 0);
+ /* Set the LED off */
+ if (is_a0(dd))
+ setextled(dd, 0);
+ /*
+ * Clear the QSFP reset.
+ * A0 leaves the out lines floating on power on, then on an FLR
+ * enforces a 0 on all out pins. The driver does not touch
+ * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
+ * anything plugged constantly in reset, if it pays attention
+ * to RESET_N.
+ * A prime example of this is SiPh. For now, set all pins high.
+ * I2CCLK and I2CDAT will change per direction, and INT_N and
+ * MODPRS_N are input only and their value is ignored.
+ */
+ if (is_a0(dd)) {
+ write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
+ write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
+ }
+}
+
+static void init_early_variables(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /* assign link credit variables */
+ dd->vau = CM_VAU;
+ dd->link_credits = CM_GLOBAL_CREDITS;
+ if (is_a0(dd))
+ dd->link_credits--;
+ dd->vcu = cu_to_vcu(hfi1_cu);
+ /* enough room for 8 MAD packets plus header - 17K */
+ dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
+ if (dd->vl15_init > dd->link_credits)
+ dd->vl15_init = dd->link_credits;
+
+ write_uninitialized_csrs_and_memories(dd);
+
+ if (HFI1_CAP_IS_KSET(PKEY_CHECK))
+ for (i = 0; i < dd->num_pports; i++) {
+ struct hfi1_pportdata *ppd = &dd->pport[i];
+
+ set_partition_keys(ppd);
+ }
+ init_sc2vl_tables(dd);
+}
+
+static void init_kdeth_qp(struct hfi1_devdata *dd)
+{
+ /* user changed the KDETH_QP */
+ if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
+ /* out of range or illegal value */
+ dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
+ kdeth_qp = 0;
+ }
+ if (kdeth_qp == 0) /* not set, or failed range check */
+ kdeth_qp = DEFAULT_KDETH_QP;
+
+ write_csr(dd, SEND_BTH_QP,
+ (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK)
+ << SEND_BTH_QP_KDETH_QP_SHIFT);
+
+ write_csr(dd, RCV_BTH_QP,
+ (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK)
+ << RCV_BTH_QP_KDETH_QP_SHIFT);
+}
+
+/**
+ * init_qpmap_table
+ * @dd - device data
+ * @first_ctxt - first context
+ * @last_ctxt - first context
+ *
+ * This return sets the qpn mapping table that
+ * is indexed by qpn[8:1].
+ *
+ * The routine will round robin the 256 settings
+ * from first_ctxt to last_ctxt.
+ *
+ * The first/last looks ahead to having specialized
+ * receive contexts for mgmt and bypass. Normal
+ * verbs traffic will assumed to be on a range
+ * of receive contexts.
+ */
+static void init_qpmap_table(struct hfi1_devdata *dd,
+ u32 first_ctxt,
+ u32 last_ctxt)
+{
+ u64 reg = 0;
+ u64 regno = RCV_QP_MAP_TABLE;
+ int i;
+ u64 ctxt = first_ctxt;
+
+ for (i = 0; i < 256;) {
+ if (ctxt == VL15CTXT) {
+ ctxt++;
+ if (ctxt > last_ctxt)
+ ctxt = first_ctxt;
+ continue;
+ }
+ reg |= ctxt << (8 * (i % 8));
+ i++;
+ ctxt++;
+ if (ctxt > last_ctxt)
+ ctxt = first_ctxt;
+ if (i % 8 == 0) {
+ write_csr(dd, regno, reg);
+ reg = 0;
+ regno += 8;
+ }
+ }
+ if (i % 8)
+ write_csr(dd, regno, reg);
+
+ add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
+ | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
+}
+
+/**
+ * init_qos - init RX qos
+ * @dd - device data
+ * @first_context
+ *
+ * This routine initializes Rule 0 and the
+ * RSM map table to implement qos.
+ *
+ * If all of the limit tests succeed,
+ * qos is applied based on the array
+ * interpretation of krcvqs where
+ * entry 0 is VL0.
+ *
+ * The number of vl bits (n) and the number of qpn
+ * bits (m) are computed to feed both the RSM map table
+ * and the single rule.
+ *
+ */
+static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
+{
+ u8 max_by_vl = 0;
+ unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
+ u64 *rsmmap;
+ u64 reg;
+ u8 rxcontext = is_a0(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
+
+ /* validate */
+ if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
+ num_vls == 1 ||
+ krcvqsset <= 1)
+ goto bail;
+ for (i = 0; i < min_t(unsigned, num_vls, krcvqsset); i++)
+ if (krcvqs[i] > max_by_vl)
+ max_by_vl = krcvqs[i];
+ if (max_by_vl > 32)
+ goto bail;
+ qpns_per_vl = __roundup_pow_of_two(max_by_vl);
+ /* determine bits vl */
+ n = ilog2(num_vls);
+ /* determine bits for qpn */
+ m = ilog2(qpns_per_vl);
+ if ((m + n) > 7)
+ goto bail;
+ if (num_vls * qpns_per_vl > dd->chip_rcv_contexts)
+ goto bail;
+ rsmmap = kmalloc_array(NUM_MAP_REGS, sizeof(u64), GFP_KERNEL);
+ memset(rsmmap, rxcontext, NUM_MAP_REGS * sizeof(u64));
+ /* init the local copy of the table */
+ for (i = 0, ctxt = first_ctxt; i < num_vls; i++) {
+ unsigned tctxt;
+
+ for (qpn = 0, tctxt = ctxt;
+ krcvqs[i] && qpn < qpns_per_vl; qpn++) {
+ unsigned idx, regoff, regidx;
+
+ /* generate index <= 128 */
+ idx = (qpn << n) ^ i;
+ regoff = (idx % 8) * 8;
+ regidx = idx / 8;
+ reg = rsmmap[regidx];
+ /* replace 0xff with context number */
+ reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
+ << regoff);
+ reg |= (u64)(tctxt++) << regoff;
+ rsmmap[regidx] = reg;
+ if (tctxt == ctxt + krcvqs[i])
+ tctxt = ctxt;
+ }
+ ctxt += krcvqs[i];
+ }
+ /* flush cached copies to chip */
+ for (i = 0; i < NUM_MAP_REGS; i++)
+ write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
+ /* add rule0 */
+ write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
+ RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK
+ << RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
+ 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
+ write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
+ LRH_BTH_MATCH_OFFSET
+ << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
+ LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
+ LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
+ ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
+ QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
+ ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
+ write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
+ LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
+ LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
+ LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
+ LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
+ /* Enable RSM */
+ add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
+ kfree(rsmmap);
+ /* map everything else (non-VL15) to context 0 */
+ init_qpmap_table(
+ dd,
+ 0,
+ 0);
+ dd->qos_shift = n + 1;
+ return;
+bail:
+ dd->qos_shift = 1;
+ init_qpmap_table(
+ dd,
+ dd->n_krcv_queues > MIN_KERNEL_KCTXTS ? MIN_KERNEL_KCTXTS : 0,
+ dd->n_krcv_queues - 1);
+}
+
+static void init_rxe(struct hfi1_devdata *dd)
+{
+ /* enable all receive errors */
+ write_csr(dd, RCV_ERR_MASK, ~0ull);
+ /* setup QPN map table - start where VL15 context leaves off */
+ init_qos(
+ dd,
+ dd->n_krcv_queues > MIN_KERNEL_KCTXTS ? MIN_KERNEL_KCTXTS : 0);
+ /*
+ * make sure RcvCtrl.RcvWcb <= PCIe Device Control
+ * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
+ * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
+ * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
+ * Max_PayLoad_Size set to its minimum of 128.
+ *
+ * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
+ * (64 bytes). Max_Payload_Size is possibly modified upward in
+ * tune_pcie_caps() which is called after this routine.
+ */
+}
+
+static void init_other(struct hfi1_devdata *dd)
+{
+ /* enable all CCE errors */
+ write_csr(dd, CCE_ERR_MASK, ~0ull);
+ /* enable *some* Misc errors */
+ write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
+ /* enable all DC errors, except LCB */
+ write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
+ write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
+}
+
+/*
+ * Fill out the given AU table using the given CU. A CU is defined in terms
+ * AUs. The table is a an encoding: given the index, how many AUs does that
+ * represent?
+ *
+ * NOTE: Assumes that the register layout is the same for the
+ * local and remote tables.
+ */
+static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
+ u32 csr0to3, u32 csr4to7)
+{
+ write_csr(dd, csr0to3,
+ 0ull <<
+ SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT
+ | 1ull <<
+ SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT
+ | 2ull * cu <<
+ SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT
+ | 4ull * cu <<
+ SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
+ write_csr(dd, csr4to7,
+ 8ull * cu <<
+ SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT
+ | 16ull * cu <<
+ SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT
+ | 32ull * cu <<
+ SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT
+ | 64ull * cu <<
+ SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
+
+}
+
+static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
+{
+ assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
+ SEND_CM_LOCAL_AU_TABLE4_TO7);
+}
+
+void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
+{
+ assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
+ SEND_CM_REMOTE_AU_TABLE4_TO7);
+}
+
+static void init_txe(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /* enable all PIO, SDMA, general, and Egress errors */
+ write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
+ write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
+ write_csr(dd, SEND_ERR_MASK, ~0ull);
+ write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
+
+ /* enable all per-context and per-SDMA engine errors */
+ for (i = 0; i < dd->chip_send_contexts; i++)
+ write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
+ for (i = 0; i < dd->chip_sdma_engines; i++)
+ write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
+
+ /* set the local CU to AU mapping */
+ assign_local_cm_au_table(dd, dd->vcu);
+
+ /*
+ * Set reasonable default for Credit Return Timer
+ * Don't set on Simulator - causes it to choke.
+ */
+ if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
+ write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
+}
+
+int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
+{
+ struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
+ unsigned sctxt;
+ int ret = 0;
+ u64 reg;
+
+ if (!rcd || !rcd->sc) {
+ ret = -EINVAL;
+ goto done;
+ }
+ sctxt = rcd->sc->hw_context;
+ reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
+ ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
+ SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
+ /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
+ if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
+ reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
+ write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
+ /*
+ * Enable send-side J_KEY integrity check, unless this is A0 h/w
+ * (due to A0 erratum).
+ */
+ if (!is_a0(dd)) {
+ reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
+ reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+ write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
+ }
+
+ /* Enable J_KEY check on receive context. */
+ reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
+ ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
+ RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
+ write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
+done:
+ return ret;
+}
+
+int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
+{
+ struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
+ unsigned sctxt;
+ int ret = 0;
+ u64 reg;
+
+ if (!rcd || !rcd->sc) {
+ ret = -EINVAL;
+ goto done;
+ }
+ sctxt = rcd->sc->hw_context;
+ write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
+ /*
+ * Disable send-side J_KEY integrity check, unless this is A0 h/w.
+ * This check would not have been enabled for A0 h/w, see
+ * set_ctxt_jkey().
+ */
+ if (!is_a0(dd)) {
+ reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
+ reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+ write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
+ }
+ /* Turn off the J_KEY on the receive side */
+ write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
+done:
+ return ret;
+}
+
+int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
+{
+ struct hfi1_ctxtdata *rcd;
+ unsigned sctxt;
+ int ret = 0;
+ u64 reg;
+
+ if (ctxt < dd->num_rcv_contexts)
+ rcd = dd->rcd[ctxt];
+ else {
+ ret = -EINVAL;
+ goto done;
+ }
+ if (!rcd || !rcd->sc) {
+ ret = -EINVAL;
+ goto done;
+ }
+ sctxt = rcd->sc->hw_context;
+ reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
+ SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
+ write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
+ reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
+ reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
+ write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
+done:
+ return ret;
+}
+
+int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
+{
+ struct hfi1_ctxtdata *rcd;
+ unsigned sctxt;
+ int ret = 0;
+ u64 reg;
+
+ if (ctxt < dd->num_rcv_contexts)
+ rcd = dd->rcd[ctxt];
+ else {
+ ret = -EINVAL;
+ goto done;
+ }
+ if (!rcd || !rcd->sc) {
+ ret = -EINVAL;
+ goto done;
+ }
+ sctxt = rcd->sc->hw_context;
+ reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
+ reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
+ write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
+ write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
+done:
+ return ret;
+}
+
+/*
+ * Start doing the clean up the the chip. Our clean up happens in multiple
+ * stages and this is just the first.
+ */
+void hfi1_start_cleanup(struct hfi1_devdata *dd)
+{
+ free_cntrs(dd);
+ free_rcverr(dd);
+ clean_up_interrupts(dd);
+}
+
+#define HFI_BASE_GUID(dev) \
+ ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
+
+/*
+ * Certain chip functions need to be initialized only once per asic
+ * instead of per-device. This function finds the peer device and
+ * checks whether that chip initialization needs to be done by this
+ * device.
+ */
+static void asic_should_init(struct hfi1_devdata *dd)
+{
+ unsigned long flags;
+ struct hfi1_devdata *tmp, *peer = NULL;
+
+ spin_lock_irqsave(&hfi1_devs_lock, flags);
+ /* Find our peer device */
+ list_for_each_entry(tmp, &hfi1_dev_list, list) {
+ if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
+ dd->unit != tmp->unit) {
+ peer = tmp;
+ break;
+ }
+ }
+
+ /*
+ * "Claim" the ASIC for initialization if it hasn't been
+ " "claimed" yet.
+ */
+ if (!peer || !(peer->flags & HFI1_DO_INIT_ASIC))
+ dd->flags |= HFI1_DO_INIT_ASIC;
+ spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+}
+
+/**
+ * Allocate an initialize the device structure for the hfi.
+ * @dev: the pci_dev for hfi1_ib device
+ * @ent: pci_device_id struct for this dev
+ *
+ * Also allocates, initializes, and returns the devdata struct for this
+ * device instance
+ *
+ * This is global, and is called directly at init to set up the
+ * chip-specific function pointers for later use.
+ */
+struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct hfi1_devdata *dd;
+ struct hfi1_pportdata *ppd;
+ u64 reg;
+ int i, ret;
+ static const char * const inames[] = { /* implementation names */
+ "RTL silicon",
+ "RTL VCS simulation",
+ "RTL FPGA emulation",
+ "Functional simulator"
+ };
+
+ dd = hfi1_alloc_devdata(pdev,
+ NUM_IB_PORTS * sizeof(struct hfi1_pportdata));
+ if (IS_ERR(dd))
+ goto bail;
+ ppd = dd->pport;
+ for (i = 0; i < dd->num_pports; i++, ppd++) {
+ int vl;
+ /* init common fields */
+ hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
+ /* DC supports 4 link widths */
+ ppd->link_width_supported =
+ OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
+ OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
+ ppd->link_width_downgrade_supported =
+ ppd->link_width_supported;
+ /* start out enabling only 4X */
+ ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
+ ppd->link_width_downgrade_enabled =
+ ppd->link_width_downgrade_supported;
+ /* link width active is 0 when link is down */
+ /* link width downgrade active is 0 when link is down */
+
+ if (num_vls < HFI1_MIN_VLS_SUPPORTED
+ || num_vls > HFI1_MAX_VLS_SUPPORTED) {
+ hfi1_early_err(&pdev->dev,
+ "Invalid num_vls %u, using %u VLs\n",
+ num_vls, HFI1_MAX_VLS_SUPPORTED);
+ num_vls = HFI1_MAX_VLS_SUPPORTED;
+ }
+ ppd->vls_supported = num_vls;
+ ppd->vls_operational = ppd->vls_supported;
+ /* Set the default MTU. */
+ for (vl = 0; vl < num_vls; vl++)
+ dd->vld[vl].mtu = hfi1_max_mtu;
+ dd->vld[15].mtu = MAX_MAD_PACKET;
+ /*
+ * Set the initial values to reasonable default, will be set
+ * for real when link is up.
+ */
+ ppd->lstate = IB_PORT_DOWN;
+ ppd->overrun_threshold = 0x4;
+ ppd->phy_error_threshold = 0xf;
+ ppd->port_crc_mode_enabled = link_crc_mask;
+ /* initialize supported LTP CRC mode */
+ ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
+ /* initialize enabled LTP CRC mode */
+ ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
+ /* start in offline */
+ ppd->host_link_state = HLS_DN_OFFLINE;
+ init_vl_arb_caches(ppd);
+ }
+
+ dd->link_default = HLS_DN_POLL;
+
+ /*
+ * Do remaining PCIe setup and save PCIe values in dd.
+ * Any error printing is already done by the init code.
+ * On return, we have the chip mapped.
+ */
+ ret = hfi1_pcie_ddinit(dd, pdev, ent);
+ if (ret < 0)
+ goto bail_free;
+
+ /* verify that reads actually work, save revision for reset check */
+ dd->revision = read_csr(dd, CCE_REVISION);
+ if (dd->revision == ~(u64)0) {
+ dd_dev_err(dd, "cannot read chip CSRs\n");
+ ret = -EINVAL;
+ goto bail_cleanup;
+ }
+ dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
+ & CCE_REVISION_CHIP_REV_MAJOR_MASK;
+ dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
+ & CCE_REVISION_CHIP_REV_MINOR_MASK;
+
+ /* obtain the hardware ID - NOT related to unit, which is a
+ software enumeration */
+ reg = read_csr(dd, CCE_REVISION2);
+ dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
+ & CCE_REVISION2_HFI_ID_MASK;
+ /* the variable size will remove unwanted bits */
+ dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
+ dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
+ dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
+ dd->icode < ARRAY_SIZE(inames) ? inames[dd->icode] : "unknown",
+ (int)dd->irev);
+
+ /* speeds the hardware can support */
+ dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
+ /* speeds allowed to run at */
+ dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
+ /* give a reasonable active value, will be set on link up */
+ dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
+
+ dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
+ dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
+ dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
+ dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
+ dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
+ /* fix up link widths for emulation _p */
+ ppd = dd->pport;
+ if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
+ ppd->link_width_supported =
+ ppd->link_width_enabled =
+ ppd->link_width_downgrade_supported =
+ ppd->link_width_downgrade_enabled =
+ OPA_LINK_WIDTH_1X;
+ }
+ /* insure num_vls isn't larger than number of sdma engines */
+ if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
+ dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
+ num_vls, HFI1_MAX_VLS_SUPPORTED);
+ ppd->vls_supported = num_vls = HFI1_MAX_VLS_SUPPORTED;
+ ppd->vls_operational = ppd->vls_supported;
+ }
+
+ /*
+ * Convert the ns parameter to the 64 * cclocks used in the CSR.
+ * Limit the max if larger than the field holds. If timeout is
+ * non-zero, then the calculated field will be at least 1.
+ *
+ * Must be after icode is set up - the cclock rate depends
+ * on knowing the hardware being used.
+ */
+ dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
+ if (dd->rcv_intr_timeout_csr >
+ RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
+ dd->rcv_intr_timeout_csr =
+ RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
+ else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
+ dd->rcv_intr_timeout_csr = 1;
+
+ /* obtain chip sizes, reset chip CSRs */
+ init_chip(dd);
+
+ /* read in the PCIe link speed information */
+ ret = pcie_speeds(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ /* needs to be done before we look for the peer device */
+ read_guid(dd);
+
+ asic_should_init(dd);
+
+ /* read in firmware */
+ ret = hfi1_firmware_init(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ /*
+ * In general, the PCIe Gen3 transition must occur after the
+ * chip has been idled (so it won't initiate any PCIe transactions
+ * e.g. an interrupt) and before the driver changes any registers
+ * (the transition will reset the registers).
+ *
+ * In particular, place this call after:
+ * - init_chip() - the chip will not initiate any PCIe transactions
+ * - pcie_speeds() - reads the current link speed
+ * - hfi1_firmware_init() - the needed firmware is ready to be
+ * downloaded
+ */
+ ret = do_pcie_gen3_transition(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ /* start setting dd values and adjusting CSRs */
+ init_early_variables(dd);
+
+ parse_platform_config(dd);
+
+ /* add board names as they are defined */
+ dd->boardname = kmalloc(64, GFP_KERNEL);
+ if (!dd->boardname)
+ goto bail_cleanup;
+ snprintf(dd->boardname, 64, "Board ID 0x%llx",
+ dd->revision >> CCE_REVISION_BOARD_ID_LOWER_NIBBLE_SHIFT
+ & CCE_REVISION_BOARD_ID_LOWER_NIBBLE_MASK);
+
+ snprintf(dd->boardversion, BOARD_VERS_MAX,
+ "ChipABI %u.%u, %s, ChipRev %u.%u, SW Compat %llu\n",
+ HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
+ dd->boardname,
+ (u32)dd->majrev,
+ (u32)dd->minrev,
+ (dd->revision >> CCE_REVISION_SW_SHIFT)
+ & CCE_REVISION_SW_MASK);
+
+ ret = set_up_context_variables(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ /* set initial RXE CSRs */
+ init_rxe(dd);
+ /* set initial TXE CSRs */
+ init_txe(dd);
+ /* set initial non-RXE, non-TXE CSRs */
+ init_other(dd);
+ /* set up KDETH QP prefix in both RX and TX CSRs */
+ init_kdeth_qp(dd);
+
+ /* send contexts must be set up before receive contexts */
+ ret = init_send_contexts(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ ret = hfi1_create_ctxts(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
+ /*
+ * rcd[0] is guaranteed to be valid by this point. Also, all
+ * context are using the same value, as per the module parameter.
+ */
+ dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
+
+ ret = init_pervl_scs(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ /* sdma init */
+ for (i = 0; i < dd->num_pports; ++i) {
+ ret = sdma_init(dd, i);
+ if (ret)
+ goto bail_cleanup;
+ }
+
+ /* use contexts created by hfi1_create_ctxts */
+ ret = set_up_interrupts(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ /* set up LCB access - must be after set_up_interrupts() */
+ init_lcb_access(dd);
+
+ snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
+ dd->base_guid & 0xFFFFFF);
+
+ dd->oui1 = dd->base_guid >> 56 & 0xFF;
+ dd->oui2 = dd->base_guid >> 48 & 0xFF;
+ dd->oui3 = dd->base_guid >> 40 & 0xFF;
+
+ ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
+ if (ret)
+ goto bail_clear_intr;
+ check_fabric_firmware_versions(dd);
+
+ thermal_init(dd);
+
+ ret = init_cntrs(dd);
+ if (ret)
+ goto bail_clear_intr;
+
+ ret = init_rcverr(dd);
+ if (ret)
+ goto bail_free_cntrs;
+
+ ret = eprom_init(dd);
+ if (ret)
+ goto bail_free_rcverr;
+
+ goto bail;
+
+bail_free_rcverr:
+ free_rcverr(dd);
+bail_free_cntrs:
+ free_cntrs(dd);
+bail_clear_intr:
+ clean_up_interrupts(dd);
+bail_cleanup:
+ hfi1_pcie_ddcleanup(dd);
+bail_free:
+ hfi1_free_devdata(dd);
+ dd = ERR_PTR(ret);
+bail:
+ return dd;
+}
+
+static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
+ u32 dw_len)
+{
+ u32 delta_cycles;
+ u32 current_egress_rate = ppd->current_egress_rate;
+ /* rates here are in units of 10^6 bits/sec */
+
+ if (desired_egress_rate == -1)
+ return 0; /* shouldn't happen */
+
+ if (desired_egress_rate >= current_egress_rate)
+ return 0; /* we can't help go faster, only slower */
+
+ delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
+ egress_cycles(dw_len * 4, current_egress_rate);
+
+ return (u16)delta_cycles;
+}
+
+
+/**
+ * create_pbc - build a pbc for transmission
+ * @flags: special case flags or-ed in built pbc
+ * @srate: static rate
+ * @vl: vl
+ * @dwlen: dword length (header words + data words + pbc words)
+ *
+ * Create a PBC with the given flags, rate, VL, and length.
+ *
+ * NOTE: The PBC created will not insert any HCRC - all callers but one are
+ * for verbs, which does not use this PSM feature. The lone other caller
+ * is for the diagnostic interface which calls this if the user does not
+ * supply their own PBC.
+ */
+u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
+ u32 dw_len)
+{
+ u64 pbc, delay = 0;
+
+ if (unlikely(srate_mbs))
+ delay = delay_cycles(ppd, srate_mbs, dw_len);
+
+ pbc = flags
+ | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
+ | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
+ | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
+ | (dw_len & PBC_LENGTH_DWS_MASK)
+ << PBC_LENGTH_DWS_SHIFT;
+
+ return pbc;
+}
+
+#define SBUS_THERMAL 0x4f
+#define SBUS_THERM_MONITOR_MODE 0x1
+
+#define THERM_FAILURE(dev, ret, reason) \
+ dd_dev_err((dd), \
+ "Thermal sensor initialization failed: %s (%d)\n", \
+ (reason), (ret))
+
+/*
+ * Initialize the Avago Thermal sensor.
+ *
+ * After initialization, enable polling of thermal sensor through
+ * SBus interface. In order for this to work, the SBus Master
+ * firmware has to be loaded due to the fact that the HW polling
+ * logic uses SBus interrupts, which are not supported with
+ * default firmware. Otherwise, no data will be returned through
+ * the ASIC_STS_THERM CSR.
+ */
+static int thermal_init(struct hfi1_devdata *dd)
+{
+ int ret = 0;
+
+ if (dd->icode != ICODE_RTL_SILICON ||
+ !(dd->flags & HFI1_DO_INIT_ASIC))
+ return ret;
+
+ acquire_hw_mutex(dd);
+ dd_dev_info(dd, "Initializing thermal sensor\n");
+ /* Thermal Sensor Initialization */
+ /* Step 1: Reset the Thermal SBus Receiver */
+ ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
+ RESET_SBUS_RECEIVER, 0);
+ if (ret) {
+ THERM_FAILURE(dd, ret, "Bus Reset");
+ goto done;
+ }
+ /* Step 2: Set Reset bit in Thermal block */
+ ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
+ WRITE_SBUS_RECEIVER, 0x1);
+ if (ret) {
+ THERM_FAILURE(dd, ret, "Therm Block Reset");
+ goto done;
+ }
+ /* Step 3: Write clock divider value (100MHz -> 2MHz) */
+ ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
+ WRITE_SBUS_RECEIVER, 0x32);
+ if (ret) {
+ THERM_FAILURE(dd, ret, "Write Clock Div");
+ goto done;
+ }
+ /* Step 4: Select temperature mode */
+ ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
+ WRITE_SBUS_RECEIVER,
+ SBUS_THERM_MONITOR_MODE);
+ if (ret) {
+ THERM_FAILURE(dd, ret, "Write Mode Sel");
+ goto done;
+ }
+ /* Step 5: De-assert block reset and start conversion */
+ ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
+ WRITE_SBUS_RECEIVER, 0x2);
+ if (ret) {
+ THERM_FAILURE(dd, ret, "Write Reset Deassert");
+ goto done;
+ }
+ /* Step 5.1: Wait for first conversion (21.5ms per spec) */
+ msleep(22);
+
+ /* Enable polling of thermal readings */
+ write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
+done:
+ release_hw_mutex(dd);
+ return ret;
+}
+
+static void handle_temp_err(struct hfi1_devdata *dd)
+{
+ struct hfi1_pportdata *ppd = &dd->pport[0];
+ /*
+ * Thermal Critical Interrupt
+ * Put the device into forced freeze mode, take link down to
+ * offline, and put DC into reset.
+ */
+ dd_dev_emerg(dd,
+ "Critical temperature reached! Forcing device into freeze mode!\n");
+ dd->flags |= HFI1_FORCED_FREEZE;
+ start_freeze_handling(ppd, FREEZE_SELF|FREEZE_ABORT);
+ /*
+ * Shut DC down as much and as quickly as possible.
+ *
+ * Step 1: Take the link down to OFFLINE. This will cause the
+ * 8051 to put the Serdes in reset. However, we don't want to
+ * go through the entire link state machine since we want to
+ * shutdown ASAP. Furthermore, this is not a graceful shutdown
+ * but rather an attempt to save the chip.
+ * Code below is almost the same as quiet_serdes() but avoids
+ * all the extra work and the sleeps.
+ */
+ ppd->driver_link_ready = 0;
+ ppd->link_enabled = 0;
+ set_physical_link_state(dd, PLS_OFFLINE |
+ (OPA_LINKDOWN_REASON_SMA_DISABLED << 8));
+ /*
+ * Step 2: Shutdown LCB and 8051
+ * After shutdown, do not restore DC_CFG_RESET value.
+ */
+ dc_shutdown(dd);
+}
diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h
new file mode 100644
index 000000000000..f89a432c7334
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/chip.h
@@ -0,0 +1,1035 @@
+#ifndef _CHIP_H
+#define _CHIP_H
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file contains all of the defines that is specific to the HFI chip
+ */
+
+/* sizes */
+#define CCE_NUM_MSIX_VECTORS 256
+#define CCE_NUM_INT_CSRS 12
+#define CCE_NUM_INT_MAP_CSRS 96
+#define NUM_INTERRUPT_SOURCES 768
+#define RXE_NUM_CONTEXTS 160
+#define RXE_PER_CONTEXT_SIZE 0x1000 /* 4k */
+#define RXE_NUM_TID_FLOWS 32
+#define RXE_NUM_DATA_VL 8
+#define TXE_NUM_CONTEXTS 160
+#define TXE_NUM_SDMA_ENGINES 16
+#define NUM_CONTEXTS_PER_SET 8
+#define VL_ARB_HIGH_PRIO_TABLE_SIZE 16
+#define VL_ARB_LOW_PRIO_TABLE_SIZE 16
+#define VL_ARB_TABLE_SIZE 16
+#define TXE_NUM_32_BIT_COUNTER 7
+#define TXE_NUM_64_BIT_COUNTER 30
+#define TXE_NUM_DATA_VL 8
+#define TXE_PIO_SIZE (32 * 0x100000) /* 32 MB */
+#define PIO_BLOCK_SIZE 64 /* bytes */
+#define SDMA_BLOCK_SIZE 64 /* bytes */
+#define RCV_BUF_BLOCK_SIZE 64 /* bytes */
+#define PIO_CMASK 0x7ff /* counter mask for free and fill counters */
+#define MAX_EAGER_ENTRIES 2048 /* max receive eager entries */
+#define MAX_TID_PAIR_ENTRIES 1024 /* max receive expected pairs */
+/* Virtual? Allocation Unit, defined as AU = 8*2^vAU, 64 bytes, AU is fixed
+ at 64 bytes for all generation one devices */
+#define CM_VAU 3
+/* HFI link credit count, AKA receive buffer depth (RBUF_DEPTH) */
+#define CM_GLOBAL_CREDITS 0x940
+/* Number of PKey entries in the HW */
+#define MAX_PKEY_VALUES 16
+
+#include "chip_registers.h"
+
+#define RXE_PER_CONTEXT_USER (RXE + RXE_PER_CONTEXT_OFFSET)
+#define TXE_PIO_SEND (TXE + TXE_PIO_SEND_OFFSET)
+
+/* PBC flags */
+#define PBC_INTR (1ull << 31)
+#define PBC_DC_INFO_SHIFT (30)
+#define PBC_DC_INFO (1ull << PBC_DC_INFO_SHIFT)
+#define PBC_TEST_EBP (1ull << 29)
+#define PBC_PACKET_BYPASS (1ull << 28)
+#define PBC_CREDIT_RETURN (1ull << 25)
+#define PBC_INSERT_BYPASS_ICRC (1ull << 24)
+#define PBC_TEST_BAD_ICRC (1ull << 23)
+#define PBC_FECN (1ull << 22)
+
+/* PbcInsertHcrc field settings */
+#define PBC_IHCRC_LKDETH 0x0 /* insert @ local KDETH offset */
+#define PBC_IHCRC_GKDETH 0x1 /* insert @ global KDETH offset */
+#define PBC_IHCRC_NONE 0x2 /* no HCRC inserted */
+
+/* PBC fields */
+#define PBC_STATIC_RATE_CONTROL_COUNT_SHIFT 32
+#define PBC_STATIC_RATE_CONTROL_COUNT_MASK 0xffffull
+#define PBC_STATIC_RATE_CONTROL_COUNT_SMASK \
+ (PBC_STATIC_RATE_CONTROL_COUNT_MASK << \
+ PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
+
+#define PBC_INSERT_HCRC_SHIFT 26
+#define PBC_INSERT_HCRC_MASK 0x3ull
+#define PBC_INSERT_HCRC_SMASK \
+ (PBC_INSERT_HCRC_MASK << PBC_INSERT_HCRC_SHIFT)
+
+#define PBC_VL_SHIFT 12
+#define PBC_VL_MASK 0xfull
+#define PBC_VL_SMASK (PBC_VL_MASK << PBC_VL_SHIFT)
+
+#define PBC_LENGTH_DWS_SHIFT 0
+#define PBC_LENGTH_DWS_MASK 0xfffull
+#define PBC_LENGTH_DWS_SMASK \
+ (PBC_LENGTH_DWS_MASK << PBC_LENGTH_DWS_SHIFT)
+
+/* Credit Return Fields */
+#define CR_COUNTER_SHIFT 0
+#define CR_COUNTER_MASK 0x7ffull
+#define CR_COUNTER_SMASK (CR_COUNTER_MASK << CR_COUNTER_SHIFT)
+
+#define CR_STATUS_SHIFT 11
+#define CR_STATUS_MASK 0x1ull
+#define CR_STATUS_SMASK (CR_STATUS_MASK << CR_STATUS_SHIFT)
+
+#define CR_CREDIT_RETURN_DUE_TO_PBC_SHIFT 12
+#define CR_CREDIT_RETURN_DUE_TO_PBC_MASK 0x1ull
+#define CR_CREDIT_RETURN_DUE_TO_PBC_SMASK \
+ (CR_CREDIT_RETURN_DUE_TO_PBC_MASK << \
+ CR_CREDIT_RETURN_DUE_TO_PBC_SHIFT)
+
+#define CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SHIFT 13
+#define CR_CREDIT_RETURN_DUE_TO_THRESHOLD_MASK 0x1ull
+#define CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SMASK \
+ (CR_CREDIT_RETURN_DUE_TO_THRESHOLD_MASK << \
+ CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SHIFT)
+
+#define CR_CREDIT_RETURN_DUE_TO_ERR_SHIFT 14
+#define CR_CREDIT_RETURN_DUE_TO_ERR_MASK 0x1ull
+#define CR_CREDIT_RETURN_DUE_TO_ERR_SMASK \
+ (CR_CREDIT_RETURN_DUE_TO_ERR_MASK << \
+ CR_CREDIT_RETURN_DUE_TO_ERR_SHIFT)
+
+#define CR_CREDIT_RETURN_DUE_TO_FORCE_SHIFT 15
+#define CR_CREDIT_RETURN_DUE_TO_FORCE_MASK 0x1ull
+#define CR_CREDIT_RETURN_DUE_TO_FORCE_SMASK \
+ (CR_CREDIT_RETURN_DUE_TO_FORCE_MASK << \
+ CR_CREDIT_RETURN_DUE_TO_FORCE_SHIFT)
+
+/* interrupt source numbers */
+#define IS_GENERAL_ERR_START 0
+#define IS_SDMAENG_ERR_START 16
+#define IS_SENDCTXT_ERR_START 32
+#define IS_SDMA_START 192 /* includes SDmaProgress,SDmaIdle */
+#define IS_VARIOUS_START 240
+#define IS_DC_START 248
+#define IS_RCVAVAIL_START 256
+#define IS_RCVURGENT_START 416
+#define IS_SENDCREDIT_START 576
+#define IS_RESERVED_START 736
+#define IS_MAX_SOURCES 768
+
+/* derived interrupt source values */
+#define IS_GENERAL_ERR_END IS_SDMAENG_ERR_START
+#define IS_SDMAENG_ERR_END IS_SENDCTXT_ERR_START
+#define IS_SENDCTXT_ERR_END IS_SDMA_START
+#define IS_SDMA_END IS_VARIOUS_START
+#define IS_VARIOUS_END IS_DC_START
+#define IS_DC_END IS_RCVAVAIL_START
+#define IS_RCVAVAIL_END IS_RCVURGENT_START
+#define IS_RCVURGENT_END IS_SENDCREDIT_START
+#define IS_SENDCREDIT_END IS_RESERVED_START
+#define IS_RESERVED_END IS_MAX_SOURCES
+
+/* absolute interrupt numbers for QSFP1Int and QSFP2Int */
+#define QSFP1_INT 242
+#define QSFP2_INT 243
+
+/* DCC_CFG_PORT_CONFIG logical link states */
+#define LSTATE_DOWN 0x1
+#define LSTATE_INIT 0x2
+#define LSTATE_ARMED 0x3
+#define LSTATE_ACTIVE 0x4
+
+/* DC8051_STS_CUR_STATE port values (physical link states) */
+#define PLS_DISABLED 0x30
+#define PLS_OFFLINE 0x90
+#define PLS_OFFLINE_QUIET 0x90
+#define PLS_OFFLINE_PLANNED_DOWN_INFORM 0x91
+#define PLS_OFFLINE_READY_TO_QUIET_LT 0x92
+#define PLS_OFFLINE_REPORT_FAILURE 0x93
+#define PLS_OFFLINE_READY_TO_QUIET_BCC 0x94
+#define PLS_POLLING 0x20
+#define PLS_POLLING_QUIET 0x20
+#define PLS_POLLING_ACTIVE 0x21
+#define PLS_CONFIGPHY 0x40
+#define PLS_CONFIGPHY_DEBOUCE 0x40
+#define PLS_CONFIGPHY_ESTCOMM 0x41
+#define PLS_CONFIGPHY_ESTCOMM_TXRX_HUNT 0x42
+#define PLS_CONFIGPHY_ESTcOMM_LOCAL_COMPLETE 0x43
+#define PLS_CONFIGPHY_OPTEQ 0x44
+#define PLS_CONFIGPHY_OPTEQ_OPTIMIZING 0x44
+#define PLS_CONFIGPHY_OPTEQ_LOCAL_COMPLETE 0x45
+#define PLS_CONFIGPHY_VERIFYCAP 0x46
+#define PLS_CONFIGPHY_VERIFYCAP_EXCHANGE 0x46
+#define PLS_CONFIGPHY_VERIFYCAP_LOCAL_COMPLETE 0x47
+#define PLS_CONFIGLT 0x48
+#define PLS_CONFIGLT_CONFIGURE 0x48
+#define PLS_CONFIGLT_LINK_TRANSFER_ACTIVE 0x49
+#define PLS_LINKUP 0x50
+#define PLS_PHYTEST 0xB0
+#define PLS_INTERNAL_SERDES_LOOPBACK 0xe1
+#define PLS_QUICK_LINKUP 0xe2
+
+/* DC_DC8051_CFG_HOST_CMD_0.REQ_TYPE - 8051 host commands */
+#define HCMD_LOAD_CONFIG_DATA 0x01
+#define HCMD_READ_CONFIG_DATA 0x02
+#define HCMD_CHANGE_PHY_STATE 0x03
+#define HCMD_SEND_LCB_IDLE_MSG 0x04
+#define HCMD_MISC 0x05
+#define HCMD_READ_LCB_IDLE_MSG 0x06
+#define HCMD_READ_LCB_CSR 0x07
+#define HCMD_INTERFACE_TEST 0xff
+
+/* DC_DC8051_CFG_HOST_CMD_1.RETURN_CODE - 8051 host command return */
+#define HCMD_SUCCESS 2
+
+/* DC_DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR - error flags */
+#define SPICO_ROM_FAILED (1 << 0)
+#define UNKNOWN_FRAME (1 << 1)
+#define TARGET_BER_NOT_MET (1 << 2)
+#define FAILED_SERDES_INTERNAL_LOOPBACK (1 << 3)
+#define FAILED_SERDES_INIT (1 << 4)
+#define FAILED_LNI_POLLING (1 << 5)
+#define FAILED_LNI_DEBOUNCE (1 << 6)
+#define FAILED_LNI_ESTBCOMM (1 << 7)
+#define FAILED_LNI_OPTEQ (1 << 8)
+#define FAILED_LNI_VERIFY_CAP1 (1 << 9)
+#define FAILED_LNI_VERIFY_CAP2 (1 << 10)
+#define FAILED_LNI_CONFIGLT (1 << 11)
+
+#define FAILED_LNI (FAILED_LNI_POLLING | FAILED_LNI_DEBOUNCE \
+ | FAILED_LNI_ESTBCOMM | FAILED_LNI_OPTEQ \
+ | FAILED_LNI_VERIFY_CAP1 \
+ | FAILED_LNI_VERIFY_CAP2 \
+ | FAILED_LNI_CONFIGLT)
+
+/* DC_DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG - host message flags */
+#define HOST_REQ_DONE (1 << 0)
+#define BC_PWR_MGM_MSG (1 << 1)
+#define BC_SMA_MSG (1 << 2)
+#define BC_BCC_UNKOWN_MSG (1 << 3)
+#define BC_IDLE_UNKNOWN_MSG (1 << 4)
+#define EXT_DEVICE_CFG_REQ (1 << 5)
+#define VERIFY_CAP_FRAME (1 << 6)
+#define LINKUP_ACHIEVED (1 << 7)
+#define LINK_GOING_DOWN (1 << 8)
+#define LINK_WIDTH_DOWNGRADED (1 << 9)
+
+/* DC_DC8051_CFG_EXT_DEV_1.REQ_TYPE - 8051 host requests */
+#define HREQ_LOAD_CONFIG 0x01
+#define HREQ_SAVE_CONFIG 0x02
+#define HREQ_READ_CONFIG 0x03
+#define HREQ_SET_TX_EQ_ABS 0x04
+#define HREQ_SET_TX_EQ_REL 0x05
+#define HREQ_ENABLE 0x06
+#define HREQ_CONFIG_DONE 0xfe
+#define HREQ_INTERFACE_TEST 0xff
+
+/* DC_DC8051_CFG_EXT_DEV_0.RETURN_CODE - 8051 host request return codes */
+#define HREQ_INVALID 0x01
+#define HREQ_SUCCESS 0x02
+#define HREQ_NOT_SUPPORTED 0x03
+#define HREQ_FEATURE_NOT_SUPPORTED 0x04 /* request specific feature */
+#define HREQ_REQUEST_REJECTED 0xfe
+#define HREQ_EXECUTION_ONGOING 0xff
+
+/* MISC host command functions */
+#define HCMD_MISC_REQUEST_LCB_ACCESS 0x1
+#define HCMD_MISC_GRANT_LCB_ACCESS 0x2
+
+/* idle flit message types */
+#define IDLE_PHYSICAL_LINK_MGMT 0x1
+#define IDLE_CRU 0x2
+#define IDLE_SMA 0x3
+#define IDLE_POWER_MGMT 0x4
+
+/* idle flit message send fields (both send and read) */
+#define IDLE_PAYLOAD_MASK 0xffffffffffull /* 40 bits */
+#define IDLE_PAYLOAD_SHIFT 8
+#define IDLE_MSG_TYPE_MASK 0xf
+#define IDLE_MSG_TYPE_SHIFT 0
+
+/* idle flit message read fields */
+#define READ_IDLE_MSG_TYPE_MASK 0xf
+#define READ_IDLE_MSG_TYPE_SHIFT 0
+
+/* SMA idle flit payload commands */
+#define SMA_IDLE_ARM 1
+#define SMA_IDLE_ACTIVE 2
+
+/* DC_DC8051_CFG_MODE.GENERAL bits */
+#define DISABLE_SELF_GUID_CHECK 0x2
+
+/*
+ * Eager buffer minimum and maximum sizes supported by the hardware.
+ * All power-of-two sizes in between are supported as well.
+ * MAX_EAGER_BUFFER_TOTAL is the maximum size of memory
+ * allocatable for Eager buffer to a single context. All others
+ * are limits for the RcvArray entries.
+ */
+#define MIN_EAGER_BUFFER (4 * 1024)
+#define MAX_EAGER_BUFFER (256 * 1024)
+#define MAX_EAGER_BUFFER_TOTAL (64 * (1 << 20)) /* max per ctxt 64MB */
+#define MAX_EXPECTED_BUFFER (2048 * 1024)
+
+/*
+ * Receive expected base and count and eager base and count increment -
+ * the CSR fields hold multiples of this value.
+ */
+#define RCV_SHIFT 3
+#define RCV_INCREMENT (1 << RCV_SHIFT)
+
+/*
+ * Receive header queue entry increment - the CSR holds multiples of
+ * this value.
+ */
+#define HDRQ_SIZE_SHIFT 5
+#define HDRQ_INCREMENT (1 << HDRQ_SIZE_SHIFT)
+
+/*
+ * Freeze handling flags
+ */
+#define FREEZE_ABORT 0x01 /* do not do recovery */
+#define FREEZE_SELF 0x02 /* initiate the freeze */
+#define FREEZE_LINK_DOWN 0x04 /* link is down */
+
+/*
+ * Chip implementation codes.
+ */
+#define ICODE_RTL_SILICON 0x00
+#define ICODE_RTL_VCS_SIMULATION 0x01
+#define ICODE_FPGA_EMULATION 0x02
+#define ICODE_FUNCTIONAL_SIMULATOR 0x03
+
+/*
+ * 8051 data memory size.
+ */
+#define DC8051_DATA_MEM_SIZE 0x1000
+
+/*
+ * 8051 firmware registers
+ */
+#define NUM_GENERAL_FIELDS 0x17
+#define NUM_LANE_FIELDS 0x8
+
+/* 8051 general register Field IDs */
+#define TX_SETTINGS 0x06
+#define VERIFY_CAP_LOCAL_PHY 0x07
+#define VERIFY_CAP_LOCAL_FABRIC 0x08
+#define VERIFY_CAP_LOCAL_LINK_WIDTH 0x09
+#define LOCAL_DEVICE_ID 0x0a
+#define LOCAL_LNI_INFO 0x0c
+#define REMOTE_LNI_INFO 0x0d
+#define MISC_STATUS 0x0e
+#define VERIFY_CAP_REMOTE_PHY 0x0f
+#define VERIFY_CAP_REMOTE_FABRIC 0x10
+#define VERIFY_CAP_REMOTE_LINK_WIDTH 0x11
+#define LAST_LOCAL_STATE_COMPLETE 0x12
+#define LAST_REMOTE_STATE_COMPLETE 0x13
+#define LINK_QUALITY_INFO 0x14
+#define REMOTE_DEVICE_ID 0x15
+
+/* Lane ID for general configuration registers */
+#define GENERAL_CONFIG 4
+
+/* LOAD_DATA 8051 command shifts and fields */
+#define LOAD_DATA_FIELD_ID_SHIFT 40
+#define LOAD_DATA_FIELD_ID_MASK 0xfull
+#define LOAD_DATA_LANE_ID_SHIFT 32
+#define LOAD_DATA_LANE_ID_MASK 0xfull
+#define LOAD_DATA_DATA_SHIFT 0x0
+#define LOAD_DATA_DATA_MASK 0xffffffffull
+
+/* READ_DATA 8051 command shifts and fields */
+#define READ_DATA_FIELD_ID_SHIFT 40
+#define READ_DATA_FIELD_ID_MASK 0xffull
+#define READ_DATA_LANE_ID_SHIFT 32
+#define READ_DATA_LANE_ID_MASK 0xffull
+#define READ_DATA_DATA_SHIFT 0x0
+#define READ_DATA_DATA_MASK 0xffffffffull
+
+/* TX settings fields */
+#define ENABLE_LANE_TX_SHIFT 0
+#define ENABLE_LANE_TX_MASK 0xff
+#define TX_POLARITY_INVERSION_SHIFT 8
+#define TX_POLARITY_INVERSION_MASK 0xff
+#define RX_POLARITY_INVERSION_SHIFT 16
+#define RX_POLARITY_INVERSION_MASK 0xff
+#define MAX_RATE_SHIFT 24
+#define MAX_RATE_MASK 0xff
+
+/* verify capability PHY fields */
+#define CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT 0x4
+#define CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK 0x1
+#define POWER_MANAGEMENT_SHIFT 0x0
+#define POWER_MANAGEMENT_MASK 0xf
+
+/* 8051 lane register Field IDs */
+#define SPICO_FW_VERSION 0x7 /* SPICO firmware version */
+
+/* SPICO firmware version fields */
+#define SPICO_ROM_VERSION_SHIFT 0
+#define SPICO_ROM_VERSION_MASK 0xffff
+#define SPICO_ROM_PROD_ID_SHIFT 16
+#define SPICO_ROM_PROD_ID_MASK 0xffff
+
+/* verify capability fabric fields */
+#define VAU_SHIFT 0
+#define VAU_MASK 0x0007
+#define Z_SHIFT 3
+#define Z_MASK 0x0001
+#define VCU_SHIFT 4
+#define VCU_MASK 0x0007
+#define VL15BUF_SHIFT 8
+#define VL15BUF_MASK 0x0fff
+#define CRC_SIZES_SHIFT 20
+#define CRC_SIZES_MASK 0x7
+
+/* verify capability local link width fields */
+#define LINK_WIDTH_SHIFT 0 /* also for remote link width */
+#define LINK_WIDTH_MASK 0xffff /* also for remote link width */
+#define LOCAL_FLAG_BITS_SHIFT 16
+#define LOCAL_FLAG_BITS_MASK 0xff
+#define MISC_CONFIG_BITS_SHIFT 24
+#define MISC_CONFIG_BITS_MASK 0xff
+
+/* verify capability remote link width fields */
+#define REMOTE_TX_RATE_SHIFT 16
+#define REMOTE_TX_RATE_MASK 0xff
+
+/* LOCAL_DEVICE_ID fields */
+#define LOCAL_DEVICE_REV_SHIFT 0
+#define LOCAL_DEVICE_REV_MASK 0xff
+#define LOCAL_DEVICE_ID_SHIFT 8
+#define LOCAL_DEVICE_ID_MASK 0xffff
+
+/* REMOTE_DEVICE_ID fields */
+#define REMOTE_DEVICE_REV_SHIFT 0
+#define REMOTE_DEVICE_REV_MASK 0xff
+#define REMOTE_DEVICE_ID_SHIFT 8
+#define REMOTE_DEVICE_ID_MASK 0xffff
+
+/* local LNI link width fields */
+#define ENABLE_LANE_RX_SHIFT 16
+#define ENABLE_LANE_RX_MASK 0xff
+
+/* mask, shift for reading 'mgmt_enabled' value from REMOTE_LNI_INFO field */
+#define MGMT_ALLOWED_SHIFT 23
+#define MGMT_ALLOWED_MASK 0x1
+
+/* mask, shift for 'link_quality' within LINK_QUALITY_INFO field */
+#define LINK_QUALITY_SHIFT 24
+#define LINK_QUALITY_MASK 0x7
+
+/*
+ * mask, shift for reading 'planned_down_remote_reason_code'
+ * from LINK_QUALITY_INFO field
+ */
+#define DOWN_REMOTE_REASON_SHIFT 16
+#define DOWN_REMOTE_REASON_MASK 0xff
+
+/* verify capability PHY power management bits */
+#define PWRM_BER_CONTROL 0x1
+#define PWRM_BANDWIDTH_CONTROL 0x2
+
+/* verify capability fabric CRC size bits */
+enum {
+ CAP_CRC_14B = (1 << 0), /* 14b CRC */
+ CAP_CRC_48B = (1 << 1), /* 48b CRC */
+ CAP_CRC_12B_16B_PER_LANE = (1 << 2) /* 12b-16b per lane CRC */
+};
+
+#define SUPPORTED_CRCS (CAP_CRC_14B | CAP_CRC_48B)
+
+/* misc status version fields */
+#define STS_FM_VERSION_A_SHIFT 16
+#define STS_FM_VERSION_A_MASK 0xff
+#define STS_FM_VERSION_B_SHIFT 24
+#define STS_FM_VERSION_B_MASK 0xff
+
+/* LCB_CFG_CRC_MODE TX_VAL and RX_VAL CRC mode values */
+#define LCB_CRC_16B 0x0 /* 16b CRC */
+#define LCB_CRC_14B 0x1 /* 14b CRC */
+#define LCB_CRC_48B 0x2 /* 48b CRC */
+#define LCB_CRC_12B_16B_PER_LANE 0x3 /* 12b-16b per lane CRC */
+
+/* the following enum is (almost) a copy/paste of the definition
+ * in the OPA spec, section 20.2.2.6.8 (PortInfo) */
+enum {
+ PORT_LTP_CRC_MODE_NONE = 0,
+ PORT_LTP_CRC_MODE_14 = 1, /* 14-bit LTP CRC mode (optional) */
+ PORT_LTP_CRC_MODE_16 = 2, /* 16-bit LTP CRC mode */
+ PORT_LTP_CRC_MODE_48 = 4,
+ /* 48-bit overlapping LTP CRC mode (optional) */
+ PORT_LTP_CRC_MODE_PER_LANE = 8
+ /* 12 to 16 bit per lane LTP CRC mode (optional) */
+};
+
+/* timeouts */
+#define LINK_RESTART_DELAY 1000 /* link restart delay, in ms */
+#define TIMEOUT_8051_START 5000 /* 8051 start timeout, in ms */
+#define DC8051_COMMAND_TIMEOUT 20000 /* DC8051 command timeout, in ms */
+#define FREEZE_STATUS_TIMEOUT 20 /* wait for freeze indicators, in ms */
+#define VL_STATUS_CLEAR_TIMEOUT 5000 /* per-VL status clear, in ms */
+#define CCE_STATUS_TIMEOUT 10 /* time to clear CCE Status, in ms */
+
+/* cclock tick time, in picoseconds per tick: 1/speed * 10^12 */
+#define ASIC_CCLOCK_PS 1242 /* 805 MHz */
+#define FPGA_CCLOCK_PS 30300 /* 33 MHz */
+
+/*
+ * Mask of enabled MISC errors. Do not enable the two RSA engine errors -
+ * see firmware.c:run_rsa() for details.
+ */
+#define DRIVER_MISC_MASK \
+ (~(MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK \
+ | MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK))
+
+/* valid values for the loopback module parameter */
+#define LOOPBACK_NONE 0 /* no loopback - default */
+#define LOOPBACK_SERDES 1
+#define LOOPBACK_LCB 2
+#define LOOPBACK_CABLE 3 /* external cable */
+
+/* read and write hardware registers */
+u64 read_csr(const struct hfi1_devdata *dd, u32 offset);
+void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value);
+
+/*
+ * The *_kctxt_* flavor of the CSR read/write functions are for
+ * per-context or per-SDMA CSRs that are not mappable to user-space.
+ * Their spacing is not a PAGE_SIZE multiple.
+ */
+static inline u64 read_kctxt_csr(const struct hfi1_devdata *dd, int ctxt,
+ u32 offset0)
+{
+ /* kernel per-context CSRs are separated by 0x100 */
+ return read_csr(dd, offset0 + (0x100 * ctxt));
+}
+
+static inline void write_kctxt_csr(struct hfi1_devdata *dd, int ctxt,
+ u32 offset0, u64 value)
+{
+ /* kernel per-context CSRs are separated by 0x100 */
+ write_csr(dd, offset0 + (0x100 * ctxt), value);
+}
+
+int read_lcb_csr(struct hfi1_devdata *dd, u32 offset, u64 *data);
+int write_lcb_csr(struct hfi1_devdata *dd, u32 offset, u64 data);
+
+void __iomem *get_csr_addr(
+ struct hfi1_devdata *dd,
+ u32 offset);
+
+static inline void __iomem *get_kctxt_csr_addr(
+ struct hfi1_devdata *dd,
+ int ctxt,
+ u32 offset0)
+{
+ return get_csr_addr(dd, offset0 + (0x100 * ctxt));
+}
+
+/*
+ * The *_uctxt_* flavor of the CSR read/write functions are for
+ * per-context CSRs that are mappable to user space. All these CSRs
+ * are spaced by a PAGE_SIZE multiple in order to be mappable to
+ * different processes without exposing other contexts' CSRs
+ */
+static inline u64 read_uctxt_csr(const struct hfi1_devdata *dd, int ctxt,
+ u32 offset0)
+{
+ /* user per-context CSRs are separated by 0x1000 */
+ return read_csr(dd, offset0 + (0x1000 * ctxt));
+}
+
+static inline void write_uctxt_csr(struct hfi1_devdata *dd, int ctxt,
+ u32 offset0, u64 value)
+{
+ /* user per-context CSRs are separated by 0x1000 */
+ write_csr(dd, offset0 + (0x1000 * ctxt), value);
+}
+
+u64 create_pbc(struct hfi1_pportdata *ppd, u64, int, u32, u32);
+
+/* firmware.c */
+#define NUM_PCIE_SERDES 16 /* number of PCIe serdes on the SBus */
+extern const u8 pcie_serdes_broadcast[];
+extern const u8 pcie_pcs_addrs[2][NUM_PCIE_SERDES];
+/* SBus commands */
+#define RESET_SBUS_RECEIVER 0x20
+#define WRITE_SBUS_RECEIVER 0x21
+void sbus_request(struct hfi1_devdata *dd,
+ u8 receiver_addr, u8 data_addr, u8 command, u32 data_in);
+int sbus_request_slow(struct hfi1_devdata *dd,
+ u8 receiver_addr, u8 data_addr, u8 command, u32 data_in);
+void set_sbus_fast_mode(struct hfi1_devdata *dd);
+void clear_sbus_fast_mode(struct hfi1_devdata *dd);
+int hfi1_firmware_init(struct hfi1_devdata *dd);
+int load_pcie_firmware(struct hfi1_devdata *dd);
+int load_firmware(struct hfi1_devdata *dd);
+void dispose_firmware(void);
+int acquire_hw_mutex(struct hfi1_devdata *dd);
+void release_hw_mutex(struct hfi1_devdata *dd);
+void fabric_serdes_reset(struct hfi1_devdata *dd);
+int read_8051_data(struct hfi1_devdata *dd, u32 addr, u32 len, u64 *result);
+
+/* chip.c */
+void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b);
+void read_guid(struct hfi1_devdata *dd);
+int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout);
+void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
+ u8 neigh_reason, u8 rem_reason);
+int set_link_state(struct hfi1_pportdata *, u32 state);
+int port_ltp_to_cap(int port_ltp);
+void handle_verify_cap(struct work_struct *work);
+void handle_freeze(struct work_struct *work);
+void handle_link_up(struct work_struct *work);
+void handle_link_down(struct work_struct *work);
+void handle_link_downgrade(struct work_struct *work);
+void handle_link_bounce(struct work_struct *work);
+void handle_sma_message(struct work_struct *work);
+void start_freeze_handling(struct hfi1_pportdata *ppd, int flags);
+int send_idle_sma(struct hfi1_devdata *dd, u64 message);
+int start_link(struct hfi1_pportdata *ppd);
+void init_qsfp(struct hfi1_pportdata *ppd);
+int bringup_serdes(struct hfi1_pportdata *ppd);
+void set_intr_state(struct hfi1_devdata *dd, u32 enable);
+void apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
+ int refresh_widths);
+void update_usrhead(struct hfi1_ctxtdata *, u32, u32, u32, u32, u32);
+int stop_drain_data_vls(struct hfi1_devdata *dd);
+int open_fill_data_vls(struct hfi1_devdata *dd);
+u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns);
+u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclock);
+void get_linkup_link_widths(struct hfi1_pportdata *ppd);
+void read_ltp_rtt(struct hfi1_devdata *dd);
+void clear_linkup_counters(struct hfi1_devdata *dd);
+u32 hdrqempty(struct hfi1_ctxtdata *rcd);
+int is_a0(struct hfi1_devdata *dd);
+int is_ax(struct hfi1_devdata *dd);
+int is_bx(struct hfi1_devdata *dd);
+u32 read_physical_state(struct hfi1_devdata *dd);
+u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate);
+u32 get_logical_state(struct hfi1_pportdata *ppd);
+const char *opa_lstate_name(u32 lstate);
+const char *opa_pstate_name(u32 pstate);
+u32 driver_physical_state(struct hfi1_pportdata *ppd);
+u32 driver_logical_state(struct hfi1_pportdata *ppd);
+
+int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok);
+int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok);
+#define LCB_START DC_LCB_CSRS
+#define LCB_END DC_8051_CSRS /* next block is 8051 */
+static inline int is_lcb_offset(u32 offset)
+{
+ return (offset >= LCB_START && offset < LCB_END);
+}
+
+extern uint num_vls;
+
+extern uint disable_integrity;
+u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl);
+u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data);
+u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl);
+u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data);
+
+/* Per VL indexes */
+enum {
+ C_VL_0 = 0,
+ C_VL_1,
+ C_VL_2,
+ C_VL_3,
+ C_VL_4,
+ C_VL_5,
+ C_VL_6,
+ C_VL_7,
+ C_VL_15,
+ C_VL_COUNT
+};
+
+static inline int vl_from_idx(int idx)
+{
+ return (idx == C_VL_15 ? 15 : idx);
+}
+
+static inline int idx_from_vl(int vl)
+{
+ return (vl == 15 ? C_VL_15 : vl);
+}
+
+/* Per device counter indexes */
+enum {
+ C_RCV_OVF = 0,
+ C_RX_TID_FULL,
+ C_RX_TID_INVALID,
+ C_RX_TID_FLGMS,
+ C_RX_CTX_RHQS,
+ C_RX_CTX_EGRS,
+ C_RCV_TID_FLSMS,
+ C_CCE_PCI_CR_ST,
+ C_CCE_PCI_TR_ST,
+ C_CCE_PIO_WR_ST,
+ C_CCE_ERR_INT,
+ C_CCE_SDMA_INT,
+ C_CCE_MISC_INT,
+ C_CCE_RCV_AV_INT,
+ C_CCE_RCV_URG_INT,
+ C_CCE_SEND_CR_INT,
+ C_DC_UNC_ERR,
+ C_DC_RCV_ERR,
+ C_DC_FM_CFG_ERR,
+ C_DC_RMT_PHY_ERR,
+ C_DC_DROPPED_PKT,
+ C_DC_MC_XMIT_PKTS,
+ C_DC_MC_RCV_PKTS,
+ C_DC_XMIT_CERR,
+ C_DC_RCV_CERR,
+ C_DC_RCV_FCC,
+ C_DC_XMIT_FCC,
+ C_DC_XMIT_FLITS,
+ C_DC_RCV_FLITS,
+ C_DC_XMIT_PKTS,
+ C_DC_RCV_PKTS,
+ C_DC_RX_FLIT_VL,
+ C_DC_RX_PKT_VL,
+ C_DC_RCV_FCN,
+ C_DC_RCV_FCN_VL,
+ C_DC_RCV_BCN,
+ C_DC_RCV_BCN_VL,
+ C_DC_RCV_BBL,
+ C_DC_RCV_BBL_VL,
+ C_DC_MARK_FECN,
+ C_DC_MARK_FECN_VL,
+ C_DC_TOTAL_CRC,
+ C_DC_CRC_LN0,
+ C_DC_CRC_LN1,
+ C_DC_CRC_LN2,
+ C_DC_CRC_LN3,
+ C_DC_CRC_MULT_LN,
+ C_DC_TX_REPLAY,
+ C_DC_RX_REPLAY,
+ C_DC_SEQ_CRC_CNT,
+ C_DC_ESC0_ONLY_CNT,
+ C_DC_ESC0_PLUS1_CNT,
+ C_DC_ESC0_PLUS2_CNT,
+ C_DC_REINIT_FROM_PEER_CNT,
+ C_DC_SBE_CNT,
+ C_DC_MISC_FLG_CNT,
+ C_DC_PRF_GOOD_LTP_CNT,
+ C_DC_PRF_ACCEPTED_LTP_CNT,
+ C_DC_PRF_RX_FLIT_CNT,
+ C_DC_PRF_TX_FLIT_CNT,
+ C_DC_PRF_CLK_CNTR,
+ C_DC_PG_DBG_FLIT_CRDTS_CNT,
+ C_DC_PG_STS_PAUSE_COMPLETE_CNT,
+ C_DC_PG_STS_TX_SBE_CNT,
+ C_DC_PG_STS_TX_MBE_CNT,
+ C_SW_CPU_INTR,
+ C_SW_CPU_RCV_LIM,
+ C_SW_VTX_WAIT,
+ C_SW_PIO_WAIT,
+ C_SW_KMEM_WAIT,
+ DEV_CNTR_LAST /* Must be kept last */
+};
+
+/* Per port counter indexes */
+enum {
+ C_TX_UNSUP_VL = 0,
+ C_TX_INVAL_LEN,
+ C_TX_MM_LEN_ERR,
+ C_TX_UNDERRUN,
+ C_TX_FLOW_STALL,
+ C_TX_DROPPED,
+ C_TX_HDR_ERR,
+ C_TX_PKT,
+ C_TX_WORDS,
+ C_TX_WAIT,
+ C_TX_FLIT_VL,
+ C_TX_PKT_VL,
+ C_TX_WAIT_VL,
+ C_RX_PKT,
+ C_RX_WORDS,
+ C_SW_LINK_DOWN,
+ C_SW_LINK_UP,
+ C_SW_XMIT_DSCD,
+ C_SW_XMIT_DSCD_VL,
+ C_SW_XMIT_CSTR_ERR,
+ C_SW_RCV_CSTR_ERR,
+ C_SW_IBP_LOOP_PKTS,
+ C_SW_IBP_RC_RESENDS,
+ C_SW_IBP_RNR_NAKS,
+ C_SW_IBP_OTHER_NAKS,
+ C_SW_IBP_RC_TIMEOUTS,
+ C_SW_IBP_PKT_DROPS,
+ C_SW_IBP_DMA_WAIT,
+ C_SW_IBP_RC_SEQNAK,
+ C_SW_IBP_RC_DUPREQ,
+ C_SW_IBP_RDMA_SEQ,
+ C_SW_IBP_UNALIGNED,
+ C_SW_IBP_SEQ_NAK,
+ C_SW_CPU_RC_ACKS,
+ C_SW_CPU_RC_QACKS,
+ C_SW_CPU_RC_DELAYED_COMP,
+ C_RCV_HDR_OVF_0,
+ C_RCV_HDR_OVF_1,
+ C_RCV_HDR_OVF_2,
+ C_RCV_HDR_OVF_3,
+ C_RCV_HDR_OVF_4,
+ C_RCV_HDR_OVF_5,
+ C_RCV_HDR_OVF_6,
+ C_RCV_HDR_OVF_7,
+ C_RCV_HDR_OVF_8,
+ C_RCV_HDR_OVF_9,
+ C_RCV_HDR_OVF_10,
+ C_RCV_HDR_OVF_11,
+ C_RCV_HDR_OVF_12,
+ C_RCV_HDR_OVF_13,
+ C_RCV_HDR_OVF_14,
+ C_RCV_HDR_OVF_15,
+ C_RCV_HDR_OVF_16,
+ C_RCV_HDR_OVF_17,
+ C_RCV_HDR_OVF_18,
+ C_RCV_HDR_OVF_19,
+ C_RCV_HDR_OVF_20,
+ C_RCV_HDR_OVF_21,
+ C_RCV_HDR_OVF_22,
+ C_RCV_HDR_OVF_23,
+ C_RCV_HDR_OVF_24,
+ C_RCV_HDR_OVF_25,
+ C_RCV_HDR_OVF_26,
+ C_RCV_HDR_OVF_27,
+ C_RCV_HDR_OVF_28,
+ C_RCV_HDR_OVF_29,
+ C_RCV_HDR_OVF_30,
+ C_RCV_HDR_OVF_31,
+ C_RCV_HDR_OVF_32,
+ C_RCV_HDR_OVF_33,
+ C_RCV_HDR_OVF_34,
+ C_RCV_HDR_OVF_35,
+ C_RCV_HDR_OVF_36,
+ C_RCV_HDR_OVF_37,
+ C_RCV_HDR_OVF_38,
+ C_RCV_HDR_OVF_39,
+ C_RCV_HDR_OVF_40,
+ C_RCV_HDR_OVF_41,
+ C_RCV_HDR_OVF_42,
+ C_RCV_HDR_OVF_43,
+ C_RCV_HDR_OVF_44,
+ C_RCV_HDR_OVF_45,
+ C_RCV_HDR_OVF_46,
+ C_RCV_HDR_OVF_47,
+ C_RCV_HDR_OVF_48,
+ C_RCV_HDR_OVF_49,
+ C_RCV_HDR_OVF_50,
+ C_RCV_HDR_OVF_51,
+ C_RCV_HDR_OVF_52,
+ C_RCV_HDR_OVF_53,
+ C_RCV_HDR_OVF_54,
+ C_RCV_HDR_OVF_55,
+ C_RCV_HDR_OVF_56,
+ C_RCV_HDR_OVF_57,
+ C_RCV_HDR_OVF_58,
+ C_RCV_HDR_OVF_59,
+ C_RCV_HDR_OVF_60,
+ C_RCV_HDR_OVF_61,
+ C_RCV_HDR_OVF_62,
+ C_RCV_HDR_OVF_63,
+ C_RCV_HDR_OVF_64,
+ C_RCV_HDR_OVF_65,
+ C_RCV_HDR_OVF_66,
+ C_RCV_HDR_OVF_67,
+ C_RCV_HDR_OVF_68,
+ C_RCV_HDR_OVF_69,
+ C_RCV_HDR_OVF_70,
+ C_RCV_HDR_OVF_71,
+ C_RCV_HDR_OVF_72,
+ C_RCV_HDR_OVF_73,
+ C_RCV_HDR_OVF_74,
+ C_RCV_HDR_OVF_75,
+ C_RCV_HDR_OVF_76,
+ C_RCV_HDR_OVF_77,
+ C_RCV_HDR_OVF_78,
+ C_RCV_HDR_OVF_79,
+ C_RCV_HDR_OVF_80,
+ C_RCV_HDR_OVF_81,
+ C_RCV_HDR_OVF_82,
+ C_RCV_HDR_OVF_83,
+ C_RCV_HDR_OVF_84,
+ C_RCV_HDR_OVF_85,
+ C_RCV_HDR_OVF_86,
+ C_RCV_HDR_OVF_87,
+ C_RCV_HDR_OVF_88,
+ C_RCV_HDR_OVF_89,
+ C_RCV_HDR_OVF_90,
+ C_RCV_HDR_OVF_91,
+ C_RCV_HDR_OVF_92,
+ C_RCV_HDR_OVF_93,
+ C_RCV_HDR_OVF_94,
+ C_RCV_HDR_OVF_95,
+ C_RCV_HDR_OVF_96,
+ C_RCV_HDR_OVF_97,
+ C_RCV_HDR_OVF_98,
+ C_RCV_HDR_OVF_99,
+ C_RCV_HDR_OVF_100,
+ C_RCV_HDR_OVF_101,
+ C_RCV_HDR_OVF_102,
+ C_RCV_HDR_OVF_103,
+ C_RCV_HDR_OVF_104,
+ C_RCV_HDR_OVF_105,
+ C_RCV_HDR_OVF_106,
+ C_RCV_HDR_OVF_107,
+ C_RCV_HDR_OVF_108,
+ C_RCV_HDR_OVF_109,
+ C_RCV_HDR_OVF_110,
+ C_RCV_HDR_OVF_111,
+ C_RCV_HDR_OVF_112,
+ C_RCV_HDR_OVF_113,
+ C_RCV_HDR_OVF_114,
+ C_RCV_HDR_OVF_115,
+ C_RCV_HDR_OVF_116,
+ C_RCV_HDR_OVF_117,
+ C_RCV_HDR_OVF_118,
+ C_RCV_HDR_OVF_119,
+ C_RCV_HDR_OVF_120,
+ C_RCV_HDR_OVF_121,
+ C_RCV_HDR_OVF_122,
+ C_RCV_HDR_OVF_123,
+ C_RCV_HDR_OVF_124,
+ C_RCV_HDR_OVF_125,
+ C_RCV_HDR_OVF_126,
+ C_RCV_HDR_OVF_127,
+ C_RCV_HDR_OVF_128,
+ C_RCV_HDR_OVF_129,
+ C_RCV_HDR_OVF_130,
+ C_RCV_HDR_OVF_131,
+ C_RCV_HDR_OVF_132,
+ C_RCV_HDR_OVF_133,
+ C_RCV_HDR_OVF_134,
+ C_RCV_HDR_OVF_135,
+ C_RCV_HDR_OVF_136,
+ C_RCV_HDR_OVF_137,
+ C_RCV_HDR_OVF_138,
+ C_RCV_HDR_OVF_139,
+ C_RCV_HDR_OVF_140,
+ C_RCV_HDR_OVF_141,
+ C_RCV_HDR_OVF_142,
+ C_RCV_HDR_OVF_143,
+ C_RCV_HDR_OVF_144,
+ C_RCV_HDR_OVF_145,
+ C_RCV_HDR_OVF_146,
+ C_RCV_HDR_OVF_147,
+ C_RCV_HDR_OVF_148,
+ C_RCV_HDR_OVF_149,
+ C_RCV_HDR_OVF_150,
+ C_RCV_HDR_OVF_151,
+ C_RCV_HDR_OVF_152,
+ C_RCV_HDR_OVF_153,
+ C_RCV_HDR_OVF_154,
+ C_RCV_HDR_OVF_155,
+ C_RCV_HDR_OVF_156,
+ C_RCV_HDR_OVF_157,
+ C_RCV_HDR_OVF_158,
+ C_RCV_HDR_OVF_159,
+ PORT_CNTR_LAST /* Must be kept last */
+};
+
+u64 get_all_cpu_total(u64 __percpu *cntr);
+void hfi1_start_cleanup(struct hfi1_devdata *dd);
+void hfi1_clear_tids(struct hfi1_ctxtdata *rcd);
+struct hfi1_message_header *hfi1_get_msgheader(
+ struct hfi1_devdata *dd, __le32 *rhf_addr);
+int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
+ struct hfi1_ctxt_info *kinfo);
+u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
+ u32 mask);
+int hfi1_init_ctxt(struct send_context *sc);
+void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
+ u32 type, unsigned long pa, u16 order);
+void hfi1_quiet_serdes(struct hfi1_pportdata *ppd);
+void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt);
+u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
+ u64 **cntrp);
+u32 hfi1_read_portcntrs(struct hfi1_devdata *dd, loff_t pos, u32 port,
+ char **namep, u64 **cntrp);
+u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd);
+int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which);
+int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val);
+int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey);
+int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt);
+int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey);
+int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt);
+void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality);
+
+/*
+ * Interrupt source table.
+ *
+ * Each entry is an interrupt source "type". It is ordered by increasing
+ * number.
+ */
+struct is_table {
+ int start; /* interrupt source type start */
+ int end; /* interrupt source type end */
+ /* routine that returns the name of the interrupt source */
+ char *(*is_name)(char *name, size_t size, unsigned int source);
+ /* routine to call when receiving an interrupt */
+ void (*is_int)(struct hfi1_devdata *dd, unsigned int source);
+};
+
+#endif /* _CHIP_H */
diff --git a/drivers/staging/rdma/hfi1/chip_registers.h b/drivers/staging/rdma/hfi1/chip_registers.h
new file mode 100644
index 000000000000..bf45de29d8bd
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/chip_registers.h
@@ -0,0 +1,1292 @@
+#ifndef DEF_CHIP_REG
+#define DEF_CHIP_REG
+
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define CORE 0x000000000000
+#define CCE (CORE + 0x000000000000)
+#define ASIC (CORE + 0x000000400000)
+#define MISC (CORE + 0x000000500000)
+#define DC_TOP_CSRS (CORE + 0x000000600000)
+#define CHIP_DEBUG (CORE + 0x000000700000)
+#define RXE (CORE + 0x000001000000)
+#define TXE (CORE + 0x000001800000)
+#define DCC_CSRS (DC_TOP_CSRS + 0x000000000000)
+#define DC_LCB_CSRS (DC_TOP_CSRS + 0x000000001000)
+#define DC_8051_CSRS (DC_TOP_CSRS + 0x000000002000)
+#define PCIE 0
+
+#define ASIC_NUM_SCRATCH 4
+#define CCE_ERR_INT_CNT 0
+#define CCE_MISC_INT_CNT 2
+#define CCE_NUM_32_BIT_COUNTERS 3
+#define CCE_NUM_32_BIT_INT_COUNTERS 6
+#define CCE_NUM_INT_CSRS 12
+#define CCE_NUM_INT_MAP_CSRS 96
+#define CCE_NUM_MSIX_PBAS 4
+#define CCE_NUM_MSIX_VECTORS 256
+#define CCE_NUM_SCRATCH 4
+#define CCE_PCIE_POSTED_CRDT_STALL_CNT 2
+#define CCE_PCIE_TRGT_STALL_CNT 0
+#define CCE_PIO_WR_STALL_CNT 1
+#define CCE_RCV_AVAIL_INT_CNT 3
+#define CCE_RCV_URGENT_INT_CNT 4
+#define CCE_SDMA_INT_CNT 1
+#define CCE_SEND_CREDIT_INT_CNT 5
+#define DCC_CFG_LED_CNTRL (DCC_CSRS + 0x000000000040)
+#define DCC_CFG_LED_CNTRL_LED_CNTRL_SMASK 0x10ull
+#define DCC_CFG_LED_CNTRL_LED_SW_BLINK_RATE_SHIFT 0
+#define DCC_CFG_LED_CNTRL_LED_SW_BLINK_RATE_SMASK 0xFull
+#define DCC_CFG_PORT_CONFIG (DCC_CSRS + 0x000000000008)
+#define DCC_CFG_PORT_CONFIG1 (DCC_CSRS + 0x000000000010)
+#define DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK 0xFFFFull
+#define DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT 16
+#define DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK 0xFFFF0000ull
+#define DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK 0xFFFFull
+#define DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT 0
+#define DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK 0xFFFFull
+#define DCC_CFG_PORT_CONFIG_LINK_STATE_MASK 0x7ull
+#define DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT 48
+#define DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK 0x7000000000000ull
+#define DCC_CFG_PORT_CONFIG_MTU_CAP_MASK 0x7ull
+#define DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT 32
+#define DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK 0x700000000ull
+#define DCC_CFG_RESET (DCC_CSRS + 0x000000000000)
+#define DCC_CFG_RESET_RESET_LCB_SHIFT 0
+#define DCC_CFG_RESET_RESET_RX_FPE_SHIFT 2
+#define DCC_CFG_SC_VL_TABLE_15_0 (DCC_CSRS + 0x000000000028)
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY0_SHIFT 0
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY10_SHIFT 40
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY11_SHIFT 44
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY12_SHIFT 48
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY13_SHIFT 52
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY14_SHIFT 56
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY15_SHIFT 60
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY1_SHIFT 4
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY2_SHIFT 8
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY3_SHIFT 12
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY4_SHIFT 16
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY5_SHIFT 20
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY6_SHIFT 24
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY7_SHIFT 28
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY8_SHIFT 32
+#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY9_SHIFT 36
+#define DCC_CFG_SC_VL_TABLE_31_16 (DCC_CSRS + 0x000000000030)
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY16_SHIFT 0
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY17_SHIFT 4
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY18_SHIFT 8
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY19_SHIFT 12
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY20_SHIFT 16
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY21_SHIFT 20
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY22_SHIFT 24
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY23_SHIFT 28
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY24_SHIFT 32
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY25_SHIFT 36
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY26_SHIFT 40
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY27_SHIFT 44
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY28_SHIFT 48
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY29_SHIFT 52
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY30_SHIFT 56
+#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY31_SHIFT 60
+#define DCC_ERR_DROPPED_PKT_CNT (DCC_CSRS + 0x000000000120)
+#define DCC_ERR_FLG (DCC_CSRS + 0x000000000050)
+#define DCC_ERR_FLG_BAD_CRDT_ACK_ERR_SMASK 0x4000ull
+#define DCC_ERR_FLG_BAD_CTRL_DIST_ERR_SMASK 0x200000ull
+#define DCC_ERR_FLG_BAD_CTRL_FLIT_ERR_SMASK 0x10000ull
+#define DCC_ERR_FLG_BAD_DLID_TARGET_ERR_SMASK 0x200ull
+#define DCC_ERR_FLG_BAD_HEAD_DIST_ERR_SMASK 0x800000ull
+#define DCC_ERR_FLG_BAD_L2_ERR_SMASK 0x2ull
+#define DCC_ERR_FLG_BAD_LVER_ERR_SMASK 0x400ull
+#define DCC_ERR_FLG_BAD_MID_TAIL_ERR_SMASK 0x8ull
+#define DCC_ERR_FLG_BAD_PKT_LENGTH_ERR_SMASK 0x4000000ull
+#define DCC_ERR_FLG_BAD_PREEMPTION_ERR_SMASK 0x10ull
+#define DCC_ERR_FLG_BAD_SC_ERR_SMASK 0x4ull
+#define DCC_ERR_FLG_BAD_TAIL_DIST_ERR_SMASK 0x400000ull
+#define DCC_ERR_FLG_BAD_VL_MARKER_ERR_SMASK 0x80ull
+#define DCC_ERR_FLG_CLR (DCC_CSRS + 0x000000000060)
+#define DCC_ERR_FLG_CSR_ACCESS_BLOCKED_HOST_SMASK 0x8000000000ull
+#define DCC_ERR_FLG_CSR_ACCESS_BLOCKED_UC_SMASK 0x10000000000ull
+#define DCC_ERR_FLG_CSR_INVAL_ADDR_SMASK 0x400000000000ull
+#define DCC_ERR_FLG_CSR_PARITY_ERR_SMASK 0x200000000000ull
+#define DCC_ERR_FLG_DLID_ZERO_ERR_SMASK 0x40000000ull
+#define DCC_ERR_FLG_EN (DCC_CSRS + 0x000000000058)
+#define DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK 0x8000000000ull
+#define DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK 0x10000000000ull
+#define DCC_ERR_FLG_EVENT_CNTR_PARITY_ERR_SMASK 0x20000ull
+#define DCC_ERR_FLG_EVENT_CNTR_ROLLOVER_ERR_SMASK 0x40000ull
+#define DCC_ERR_FLG_FMCONFIG_ERR_SMASK 0x40000000000000ull
+#define DCC_ERR_FLG_FPE_TX_FIFO_OVFLW_ERR_SMASK 0x2000000000ull
+#define DCC_ERR_FLG_FPE_TX_FIFO_UNFLW_ERR_SMASK 0x4000000000ull
+#define DCC_ERR_FLG_LATE_EBP_ERR_SMASK 0x1000000000ull
+#define DCC_ERR_FLG_LATE_LONG_ERR_SMASK 0x800000000ull
+#define DCC_ERR_FLG_LATE_SHORT_ERR_SMASK 0x400000000ull
+#define DCC_ERR_FLG_LENGTH_MTU_ERR_SMASK 0x80000000ull
+#define DCC_ERR_FLG_LINK_ERR_SMASK 0x80000ull
+#define DCC_ERR_FLG_MISC_CNTR_ROLLOVER_ERR_SMASK 0x100000ull
+#define DCC_ERR_FLG_NONVL15_STATE_ERR_SMASK 0x1000000ull
+#define DCC_ERR_FLG_PERM_NVL15_ERR_SMASK 0x10000000ull
+#define DCC_ERR_FLG_PREEMPTION_ERR_SMASK 0x20ull
+#define DCC_ERR_FLG_PREEMPTIONVL15_ERR_SMASK 0x40ull
+#define DCC_ERR_FLG_RCVPORT_ERR_SMASK 0x80000000000000ull
+#define DCC_ERR_FLG_RX_BYTE_SHFT_PARITY_ERR_SMASK 0x1000000000000ull
+#define DCC_ERR_FLG_RX_CTRL_PARITY_MBE_ERR_SMASK 0x100000000000ull
+#define DCC_ERR_FLG_RX_EARLY_DROP_ERR_SMASK 0x200000000ull
+#define DCC_ERR_FLG_SLID_ZERO_ERR_SMASK 0x20000000ull
+#define DCC_ERR_FLG_TX_BYTE_SHFT_PARITY_ERR_SMASK 0x800000000000ull
+#define DCC_ERR_FLG_TX_CTRL_PARITY_ERR_SMASK 0x20000000000ull
+#define DCC_ERR_FLG_TX_CTRL_PARITY_MBE_ERR_SMASK 0x40000000000ull
+#define DCC_ERR_FLG_TX_SC_PARITY_ERR_SMASK 0x80000000000ull
+#define DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK 0x2000ull
+#define DCC_ERR_FLG_UNSUP_PKT_TYPE_SMASK 0x8000ull
+#define DCC_ERR_FLG_UNSUP_VL_ERR_SMASK 0x8000000ull
+#define DCC_ERR_FLG_VL15_MULTI_ERR_SMASK 0x2000000ull
+#define DCC_ERR_FMCONFIG_ERR_CNT (DCC_CSRS + 0x000000000110)
+#define DCC_ERR_INFO_FMCONFIG (DCC_CSRS + 0x000000000090)
+#define DCC_ERR_INFO_PORTRCV (DCC_CSRS + 0x000000000078)
+#define DCC_ERR_INFO_PORTRCV_HDR0 (DCC_CSRS + 0x000000000080)
+#define DCC_ERR_INFO_PORTRCV_HDR1 (DCC_CSRS + 0x000000000088)
+#define DCC_ERR_INFO_UNCORRECTABLE (DCC_CSRS + 0x000000000098)
+#define DCC_ERR_PORTRCV_ERR_CNT (DCC_CSRS + 0x000000000108)
+#define DCC_ERR_RCVREMOTE_PHY_ERR_CNT (DCC_CSRS + 0x000000000118)
+#define DCC_ERR_UNCORRECTABLE_CNT (DCC_CSRS + 0x000000000100)
+#define DCC_PRF_PORT_MARK_FECN_CNT (DCC_CSRS + 0x000000000330)
+#define DCC_PRF_PORT_RCV_BECN_CNT (DCC_CSRS + 0x000000000290)
+#define DCC_PRF_PORT_RCV_BUBBLE_CNT (DCC_CSRS + 0x0000000002E0)
+#define DCC_PRF_PORT_RCV_CORRECTABLE_CNT (DCC_CSRS + 0x000000000140)
+#define DCC_PRF_PORT_RCV_DATA_CNT (DCC_CSRS + 0x000000000198)
+#define DCC_PRF_PORT_RCV_FECN_CNT (DCC_CSRS + 0x000000000240)
+#define DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT (DCC_CSRS + 0x000000000130)
+#define DCC_PRF_PORT_RCV_PKTS_CNT (DCC_CSRS + 0x0000000001A8)
+#define DCC_PRF_PORT_VL_MARK_FECN_CNT (DCC_CSRS + 0x000000000338)
+#define DCC_PRF_PORT_VL_RCV_BECN_CNT (DCC_CSRS + 0x000000000298)
+#define DCC_PRF_PORT_VL_RCV_BUBBLE_CNT (DCC_CSRS + 0x0000000002E8)
+#define DCC_PRF_PORT_VL_RCV_DATA_CNT (DCC_CSRS + 0x0000000001B0)
+#define DCC_PRF_PORT_VL_RCV_FECN_CNT (DCC_CSRS + 0x000000000248)
+#define DCC_PRF_PORT_VL_RCV_PKTS_CNT (DCC_CSRS + 0x0000000001F8)
+#define DCC_PRF_PORT_XMIT_CORRECTABLE_CNT (DCC_CSRS + 0x000000000138)
+#define DCC_PRF_PORT_XMIT_DATA_CNT (DCC_CSRS + 0x000000000190)
+#define DCC_PRF_PORT_XMIT_MULTICAST_CNT (DCC_CSRS + 0x000000000128)
+#define DCC_PRF_PORT_XMIT_PKTS_CNT (DCC_CSRS + 0x0000000001A0)
+#define DCC_PRF_RX_FLOW_CRTL_CNT (DCC_CSRS + 0x000000000180)
+#define DCC_PRF_TX_FLOW_CRTL_CNT (DCC_CSRS + 0x000000000188)
+#define DC_DC8051_CFG_CSR_ACCESS_SEL (DC_8051_CSRS + 0x000000000110)
+#define DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK 0x2ull
+#define DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK 0x1ull
+#define DC_DC8051_CFG_EXT_DEV_0 (DC_8051_CSRS + 0x000000000118)
+#define DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK 0x1ull
+#define DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT 8
+#define DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT 16
+#define DC_DC8051_CFG_EXT_DEV_1 (DC_8051_CSRS + 0x000000000120)
+#define DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK 0xFFFFull
+#define DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT 16
+#define DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK 0xFFFF0000ull
+#define DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK 0x1ull
+#define DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK 0xFFull
+#define DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT 8
+#define DC_DC8051_CFG_HOST_CMD_0 (DC_8051_CSRS + 0x000000000028)
+#define DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK 0xFFFFFFFFFFFFull
+#define DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT 16
+#define DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK 0x1ull
+#define DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK 0xFFull
+#define DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT 8
+#define DC_DC8051_CFG_HOST_CMD_1 (DC_8051_CSRS + 0x000000000030)
+#define DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK 0x1ull
+#define DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK 0xFFull
+#define DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT 8
+#define DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK 0xFFFFFFFFFFFFull
+#define DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT 16
+#define DC_DC8051_CFG_LOCAL_GUID (DC_8051_CSRS + 0x000000000038)
+#define DC_DC8051_CFG_MODE (DC_8051_CSRS + 0x000000000070)
+#define DC_DC8051_CFG_RAM_ACCESS_CTRL (DC_8051_CSRS + 0x000000000008)
+#define DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK 0x7FFFull
+#define DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT 0
+#define DC_DC8051_CFG_RAM_ACCESS_CTRL_WRITE_ENA_SMASK 0x1000000ull
+#define DC_DC8051_CFG_RAM_ACCESS_CTRL_READ_ENA_SMASK 0x10000ull
+#define DC_DC8051_CFG_RAM_ACCESS_SETUP (DC_8051_CSRS + 0x000000000000)
+#define DC_DC8051_CFG_RAM_ACCESS_SETUP_AUTO_INCR_ADDR_SMASK 0x100ull
+#define DC_DC8051_CFG_RAM_ACCESS_SETUP_RAM_SEL_SMASK 0x1ull
+#define DC_DC8051_CFG_RAM_ACCESS_STATUS (DC_8051_CSRS + 0x000000000018)
+#define DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK 0x10000ull
+#define DC_DC8051_CFG_RAM_ACCESS_WR_DATA (DC_8051_CSRS + 0x000000000010)
+#define DC_DC8051_CFG_RAM_ACCESS_RD_DATA (DC_8051_CSRS + 0x000000000020)
+#define DC_DC8051_CFG_RST (DC_8051_CSRS + 0x000000000068)
+#define DC_DC8051_CFG_RST_CRAM_SMASK 0x2ull
+#define DC_DC8051_CFG_RST_DRAM_SMASK 0x4ull
+#define DC_DC8051_CFG_RST_IRAM_SMASK 0x8ull
+#define DC_DC8051_CFG_RST_M8051W_SMASK 0x1ull
+#define DC_DC8051_CFG_RST_SFR_SMASK 0x10ull
+#define DC_DC8051_DBG_ERR_INFO_SET_BY_8051 (DC_8051_CSRS + 0x0000000000D8)
+#define DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK 0xFFFFFFFFull
+#define DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT 16
+#define DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK 0xFFFFull
+#define DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT 0
+#define DC_DC8051_ERR_CLR (DC_8051_CSRS + 0x0000000000E8)
+#define DC_DC8051_ERR_EN (DC_8051_CSRS + 0x0000000000F0)
+#define DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK 0x2ull
+#define DC_DC8051_ERR_FLG (DC_8051_CSRS + 0x0000000000E0)
+#define DC_DC8051_ERR_FLG_CRAM_MBE_SMASK 0x4ull
+#define DC_DC8051_ERR_FLG_CRAM_SBE_SMASK 0x8ull
+#define DC_DC8051_ERR_FLG_DRAM_MBE_SMASK 0x10ull
+#define DC_DC8051_ERR_FLG_DRAM_SBE_SMASK 0x20ull
+#define DC_DC8051_ERR_FLG_INVALID_CSR_ADDR_SMASK 0x400ull
+#define DC_DC8051_ERR_FLG_IRAM_MBE_SMASK 0x40ull
+#define DC_DC8051_ERR_FLG_IRAM_SBE_SMASK 0x80ull
+#define DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK 0x2ull
+#define DC_DC8051_ERR_FLG_SET_BY_8051_SMASK 0x1ull
+#define DC_DC8051_ERR_FLG_UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES_SMASK 0x100ull
+#define DC_DC8051_STS_CUR_STATE (DC_8051_CSRS + 0x000000000060)
+#define DC_DC8051_STS_CUR_STATE_FIRMWARE_MASK 0xFFull
+#define DC_DC8051_STS_CUR_STATE_FIRMWARE_SHIFT 16
+#define DC_DC8051_STS_CUR_STATE_PORT_MASK 0xFFull
+#define DC_DC8051_STS_CUR_STATE_PORT_SHIFT 0
+#define DC_DC8051_STS_LOCAL_FM_SECURITY (DC_8051_CSRS + 0x000000000050)
+#define DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK 0x1ull
+#define DC_DC8051_STS_REMOTE_FM_SECURITY (DC_8051_CSRS + 0x000000000058)
+#define DC_DC8051_STS_REMOTE_GUID (DC_8051_CSRS + 0x000000000040)
+#define DC_DC8051_STS_REMOTE_NODE_TYPE (DC_8051_CSRS + 0x000000000048)
+#define DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK 0x3ull
+#define DC_DC8051_STS_REMOTE_PORT_NO (DC_8051_CSRS + 0x000000000130)
+#define DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK 0xFFull
+#define DC_LCB_CFG_ALLOW_LINK_UP (DC_LCB_CSRS + 0x000000000128)
+#define DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT 0
+#define DC_LCB_CFG_CRC_MODE (DC_LCB_CSRS + 0x000000000058)
+#define DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT 0
+#define DC_LCB_CFG_IGNORE_LOST_RCLK (DC_LCB_CSRS + 0x000000000020)
+#define DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK 0x1ull
+#define DC_LCB_CFG_LANE_WIDTH (DC_LCB_CSRS + 0x000000000100)
+#define DC_LCB_CFG_LINK_KILL_EN (DC_LCB_CSRS + 0x000000000120)
+#define DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK 0x100000ull
+#define DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK 0x400000ull
+#define DC_LCB_CFG_LN_DCLK (DC_LCB_CSRS + 0x000000000060)
+#define DC_LCB_CFG_LOOPBACK (DC_LCB_CSRS + 0x0000000000F8)
+#define DC_LCB_CFG_LOOPBACK_VAL_SHIFT 0
+#define DC_LCB_CFG_RUN (DC_LCB_CSRS + 0x000000000000)
+#define DC_LCB_CFG_RUN_EN_SHIFT 0
+#define DC_LCB_CFG_RX_FIFOS_RADR (DC_LCB_CSRS + 0x000000000018)
+#define DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT 8
+#define DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT 4
+#define DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT 0
+#define DC_LCB_CFG_TX_FIFOS_RADR (DC_LCB_CSRS + 0x000000000010)
+#define DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT 0
+#define DC_LCB_CFG_TX_FIFOS_RESET (DC_LCB_CSRS + 0x000000000008)
+#define DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT 0
+#define DC_LCB_ERR_CLR (DC_LCB_CSRS + 0x000000000308)
+#define DC_LCB_ERR_EN (DC_LCB_CSRS + 0x000000000310)
+#define DC_LCB_ERR_FLG (DC_LCB_CSRS + 0x000000000300)
+#define DC_LCB_ERR_FLG_REDUNDANT_FLIT_PARITY_ERR_SMASK 0x20000000ull
+#define DC_LCB_ERR_FLG_NEG_EDGE_LINK_TRANSFER_ACTIVE_SMASK 0x10000000ull
+#define DC_LCB_ERR_FLG_HOLD_REINIT_SMASK 0x8000000ull
+#define DC_LCB_ERR_FLG_RST_FOR_INCOMPLT_RND_TRIP_SMASK 0x4000000ull
+#define DC_LCB_ERR_FLG_RST_FOR_LINK_TIMEOUT_SMASK 0x2000000ull
+#define DC_LCB_ERR_FLG_CREDIT_RETURN_FLIT_MBE_SMASK 0x1000000ull
+#define DC_LCB_ERR_FLG_REPLAY_BUF_SBE_SMASK 0x800000ull
+#define DC_LCB_ERR_FLG_REPLAY_BUF_MBE_SMASK 0x400000ull
+#define DC_LCB_ERR_FLG_FLIT_INPUT_BUF_SBE_SMASK 0x200000ull
+#define DC_LCB_ERR_FLG_FLIT_INPUT_BUF_MBE_SMASK 0x100000ull
+#define DC_LCB_ERR_FLG_VL_ACK_INPUT_WRONG_CRC_MODE_SMASK 0x80000ull
+#define DC_LCB_ERR_FLG_VL_ACK_INPUT_PARITY_ERR_SMASK 0x40000ull
+#define DC_LCB_ERR_FLG_VL_ACK_INPUT_BUF_OFLW_SMASK 0x20000ull
+#define DC_LCB_ERR_FLG_FLIT_INPUT_BUF_OFLW_SMASK 0x10000ull
+#define DC_LCB_ERR_FLG_ILLEGAL_FLIT_ENCODING_SMASK 0x8000ull
+#define DC_LCB_ERR_FLG_ILLEGAL_NULL_LTP_SMASK 0x4000ull
+#define DC_LCB_ERR_FLG_UNEXPECTED_ROUND_TRIP_MARKER_SMASK 0x2000ull
+#define DC_LCB_ERR_FLG_UNEXPECTED_REPLAY_MARKER_SMASK 0x1000ull
+#define DC_LCB_ERR_FLG_RCLK_STOPPED_SMASK 0x800ull
+#define DC_LCB_ERR_FLG_CRC_ERR_CNT_HIT_LIMIT_SMASK 0x400ull
+#define DC_LCB_ERR_FLG_REINIT_FOR_LN_DEGRADE_SMASK 0x200ull
+#define DC_LCB_ERR_FLG_REINIT_FROM_PEER_SMASK 0x100ull
+#define DC_LCB_ERR_FLG_SEQ_CRC_ERR_SMASK 0x80ull
+#define DC_LCB_ERR_FLG_RX_LESS_THAN_FOUR_LNS_SMASK 0x40ull
+#define DC_LCB_ERR_FLG_TX_LESS_THAN_FOUR_LNS_SMASK 0x20ull
+#define DC_LCB_ERR_FLG_LOST_REINIT_STALL_OR_TOS_SMASK 0x10ull
+#define DC_LCB_ERR_FLG_ALL_LNS_FAILED_REINIT_TEST_SMASK 0x8ull
+#define DC_LCB_ERR_FLG_RST_FOR_FAILED_DESKEW_SMASK 0x4ull
+#define DC_LCB_ERR_FLG_INVALID_CSR_ADDR_SMASK 0x2ull
+#define DC_LCB_ERR_FLG_CSR_PARITY_ERR_SMASK 0x1ull
+#define DC_LCB_ERR_INFO_CRC_ERR_LN0 (DC_LCB_CSRS + 0x000000000328)
+#define DC_LCB_ERR_INFO_CRC_ERR_LN1 (DC_LCB_CSRS + 0x000000000330)
+#define DC_LCB_ERR_INFO_CRC_ERR_LN2 (DC_LCB_CSRS + 0x000000000338)
+#define DC_LCB_ERR_INFO_CRC_ERR_LN3 (DC_LCB_CSRS + 0x000000000340)
+#define DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN (DC_LCB_CSRS + 0x000000000348)
+#define DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT (DC_LCB_CSRS + 0x000000000368)
+#define DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT (DC_LCB_CSRS + 0x000000000370)
+#define DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT (DC_LCB_CSRS + 0x000000000378)
+#define DC_LCB_ERR_INFO_MISC_FLG_CNT (DC_LCB_CSRS + 0x000000000390)
+#define DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT (DC_LCB_CSRS + 0x000000000380)
+#define DC_LCB_ERR_INFO_RX_REPLAY_CNT (DC_LCB_CSRS + 0x000000000358)
+#define DC_LCB_ERR_INFO_SBE_CNT (DC_LCB_CSRS + 0x000000000388)
+#define DC_LCB_ERR_INFO_SEQ_CRC_CNT (DC_LCB_CSRS + 0x000000000360)
+#define DC_LCB_ERR_INFO_TOTAL_CRC_ERR (DC_LCB_CSRS + 0x000000000320)
+#define DC_LCB_ERR_INFO_TX_REPLAY_CNT (DC_LCB_CSRS + 0x000000000350)
+#define DC_LCB_PG_DBG_FLIT_CRDTS_CNT (DC_LCB_CSRS + 0x000000000580)
+#define DC_LCB_PG_STS_PAUSE_COMPLETE_CNT (DC_LCB_CSRS + 0x0000000005F8)
+#define DC_LCB_PG_STS_TX_MBE_CNT (DC_LCB_CSRS + 0x000000000608)
+#define DC_LCB_PG_STS_TX_SBE_CNT (DC_LCB_CSRS + 0x000000000600)
+#define DC_LCB_PRF_ACCEPTED_LTP_CNT (DC_LCB_CSRS + 0x000000000408)
+#define DC_LCB_PRF_CLK_CNTR (DC_LCB_CSRS + 0x000000000420)
+#define DC_LCB_PRF_GOOD_LTP_CNT (DC_LCB_CSRS + 0x000000000400)
+#define DC_LCB_PRF_RX_FLIT_CNT (DC_LCB_CSRS + 0x000000000410)
+#define DC_LCB_PRF_TX_FLIT_CNT (DC_LCB_CSRS + 0x000000000418)
+#define DC_LCB_STS_LINK_TRANSFER_ACTIVE (DC_LCB_CSRS + 0x000000000468)
+#define DC_LCB_STS_ROUND_TRIP_LTP_CNT (DC_LCB_CSRS + 0x0000000004B0)
+#define RCV_BUF_OVFL_CNT 10
+#define RCV_CONTEXT_EGR_STALL 22
+#define RCV_CONTEXT_RHQ_STALL 21
+#define RCV_DATA_PKT_CNT 0
+#define RCV_DWORD_CNT 1
+#define RCV_TID_FLOW_GEN_MISMATCH_CNT 20
+#define RCV_TID_FLOW_SEQ_MISMATCH_CNT 23
+#define RCV_TID_FULL_ERR_CNT 18
+#define RCV_TID_VALID_ERR_CNT 19
+#define RXE_NUM_32_BIT_COUNTERS 24
+#define RXE_NUM_64_BIT_COUNTERS 2
+#define RXE_NUM_RSM_INSTANCES 4
+#define RXE_NUM_TID_FLOWS 32
+#define RXE_PER_CONTEXT_OFFSET 0x0300000
+#define SEND_DATA_PKT_CNT 0
+#define SEND_DATA_PKT_VL0_CNT 12
+#define SEND_DATA_VL0_CNT 3
+#define SEND_DROPPED_PKT_CNT 5
+#define SEND_DWORD_CNT 1
+#define SEND_FLOW_STALL_CNT 4
+#define SEND_HEADERS_ERR_CNT 6
+#define SEND_LEN_ERR_CNT 1
+#define SEND_MAX_MIN_LEN_ERR_CNT 2
+#define SEND_UNDERRUN_CNT 3
+#define SEND_UNSUP_VL_ERR_CNT 0
+#define SEND_WAIT_CNT 2
+#define SEND_WAIT_VL0_CNT 21
+#define TXE_PIO_SEND_OFFSET 0x0800000
+#define ASIC_CFG_DRV_STR (ASIC + 0x000000000048)
+#define ASIC_CFG_MUTEX (ASIC + 0x000000000040)
+#define ASIC_CFG_SBUS_EXECUTE (ASIC + 0x000000000008)
+#define ASIC_CFG_SBUS_EXECUTE_EXECUTE_SMASK 0x1ull
+#define ASIC_CFG_SBUS_EXECUTE_FAST_MODE_SMASK 0x2ull
+#define ASIC_CFG_SBUS_REQUEST (ASIC + 0x000000000000)
+#define ASIC_CFG_SBUS_REQUEST_COMMAND_SHIFT 16
+#define ASIC_CFG_SBUS_REQUEST_DATA_ADDR_SHIFT 8
+#define ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT 32
+#define ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT 0
+#define ASIC_CFG_SCRATCH (ASIC + 0x000000000020)
+#define ASIC_CFG_THERM_POLL_EN (ASIC + 0x000000000050)
+#define ASIC_EEP_ADDR_CMD (ASIC + 0x000000000308)
+#define ASIC_EEP_ADDR_CMD_EP_ADDR_MASK 0xFFFFFFull
+#define ASIC_EEP_CTL_STAT (ASIC + 0x000000000300)
+#define ASIC_EEP_CTL_STAT_EP_RESET_SMASK 0x4ull
+#define ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT 8
+#define ASIC_EEP_CTL_STAT_RESETCSR 0x0000000083818000ull
+#define ASIC_EEP_DATA (ASIC + 0x000000000310)
+#define ASIC_GPIO_CLEAR (ASIC + 0x000000000230)
+#define ASIC_GPIO_FORCE (ASIC + 0x000000000238)
+#define ASIC_GPIO_IN (ASIC + 0x000000000200)
+#define ASIC_GPIO_INVERT (ASIC + 0x000000000210)
+#define ASIC_GPIO_MASK (ASIC + 0x000000000220)
+#define ASIC_GPIO_OE (ASIC + 0x000000000208)
+#define ASIC_GPIO_OUT (ASIC + 0x000000000218)
+#define ASIC_PCIE_SD_HOST_CMD (ASIC + 0x000000000100)
+#define ASIC_PCIE_SD_HOST_CMD_INTRPT_CMD_SHIFT 0
+#define ASIC_PCIE_SD_HOST_CMD_SBR_MODE_SMASK 0x400ull
+#define ASIC_PCIE_SD_HOST_CMD_SBUS_RCVR_ADDR_SHIFT 2
+#define ASIC_PCIE_SD_HOST_CMD_TIMER_MASK 0xFFFFFull
+#define ASIC_PCIE_SD_HOST_CMD_TIMER_SHIFT 12
+#define ASIC_PCIE_SD_HOST_STATUS (ASIC + 0x000000000108)
+#define ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_ERR_MASK 0x7ull
+#define ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_ERR_SHIFT 2
+#define ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_MASK 0x3ull
+#define ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_SHIFT 0
+#define ASIC_PCIE_SD_INTRPT_DATA_CODE (ASIC + 0x000000000110)
+#define ASIC_PCIE_SD_INTRPT_ENABLE (ASIC + 0x000000000118)
+#define ASIC_PCIE_SD_INTRPT_LIST (ASIC + 0x000000000180)
+#define ASIC_PCIE_SD_INTRPT_LIST_INTRPT_CODE_SHIFT 16
+#define ASIC_PCIE_SD_INTRPT_LIST_INTRPT_DATA_SHIFT 0
+#define ASIC_PCIE_SD_INTRPT_STATUS (ASIC + 0x000000000128)
+#define ASIC_QSFP1_CLEAR (ASIC + 0x000000000270)
+#define ASIC_QSFP1_FORCE (ASIC + 0x000000000278)
+#define ASIC_QSFP1_IN (ASIC + 0x000000000240)
+#define ASIC_QSFP1_INVERT (ASIC + 0x000000000250)
+#define ASIC_QSFP1_MASK (ASIC + 0x000000000260)
+#define ASIC_QSFP1_OE (ASIC + 0x000000000248)
+#define ASIC_QSFP1_OUT (ASIC + 0x000000000258)
+#define ASIC_QSFP1_STATUS (ASIC + 0x000000000268)
+#define ASIC_QSFP2_CLEAR (ASIC + 0x0000000002B0)
+#define ASIC_QSFP2_FORCE (ASIC + 0x0000000002B8)
+#define ASIC_QSFP2_IN (ASIC + 0x000000000280)
+#define ASIC_QSFP2_INVERT (ASIC + 0x000000000290)
+#define ASIC_QSFP2_MASK (ASIC + 0x0000000002A0)
+#define ASIC_QSFP2_OE (ASIC + 0x000000000288)
+#define ASIC_QSFP2_OUT (ASIC + 0x000000000298)
+#define ASIC_QSFP2_STATUS (ASIC + 0x0000000002A8)
+#define ASIC_STS_SBUS_COUNTERS (ASIC + 0x000000000018)
+#define ASIC_STS_SBUS_COUNTERS_EXECUTE_CNT_MASK 0xFFFFull
+#define ASIC_STS_SBUS_COUNTERS_EXECUTE_CNT_SHIFT 0
+#define ASIC_STS_SBUS_COUNTERS_RCV_DATA_VALID_CNT_MASK 0xFFFFull
+#define ASIC_STS_SBUS_COUNTERS_RCV_DATA_VALID_CNT_SHIFT 16
+#define ASIC_STS_SBUS_RESULT (ASIC + 0x000000000010)
+#define ASIC_STS_SBUS_RESULT_DONE_SMASK 0x1ull
+#define ASIC_STS_SBUS_RESULT_RCV_DATA_VALID_SMASK 0x2ull
+#define ASIC_STS_THERM (ASIC + 0x000000000058)
+#define ASIC_STS_THERM_CRIT_TEMP_MASK 0x7FFull
+#define ASIC_STS_THERM_CRIT_TEMP_SHIFT 18
+#define ASIC_STS_THERM_CURR_TEMP_MASK 0x7FFull
+#define ASIC_STS_THERM_CURR_TEMP_SHIFT 2
+#define ASIC_STS_THERM_HI_TEMP_MASK 0x7FFull
+#define ASIC_STS_THERM_HI_TEMP_SHIFT 50
+#define ASIC_STS_THERM_LO_TEMP_MASK 0x7FFull
+#define ASIC_STS_THERM_LO_TEMP_SHIFT 34
+#define ASIC_STS_THERM_LOW_SHIFT 13
+#define CCE_COUNTER_ARRAY32 (CCE + 0x000000000060)
+#define CCE_CTRL (CCE + 0x000000000010)
+#define CCE_CTRL_RXE_RESUME_SMASK 0x800ull
+#define CCE_CTRL_SPC_FREEZE_SMASK 0x100ull
+#define CCE_CTRL_SPC_UNFREEZE_SMASK 0x200ull
+#define CCE_CTRL_TXE_RESUME_SMASK 0x2000ull
+#define CCE_DC_CTRL (CCE + 0x0000000000B8)
+#define CCE_DC_CTRL_DC_RESET_SMASK 0x1ull
+#define CCE_DC_CTRL_RESETCSR 0x0000000000000001ull
+#define CCE_ERR_CLEAR (CCE + 0x000000000050)
+#define CCE_ERR_MASK (CCE + 0x000000000048)
+#define CCE_ERR_STATUS (CCE + 0x000000000040)
+#define CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK 0x40ull
+#define CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK 0x1000ull
+#define CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK \
+ 0x200ull
+#define CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK \
+ 0x800ull
+#define CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK \
+ 0x400ull
+#define CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK 0x100ull
+#define CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK 0x80ull
+#define CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK 0x1ull
+#define CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK 0x2ull
+#define CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK 0x4ull
+#define CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK 0x4000000000ull
+#define CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK 0x8000000000ull
+#define CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK 0x10000000000ull
+#define CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK 0x1000000000ull
+#define CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK 0x2000000000ull
+#define CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK 0x400000000ull
+#define CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK 0x20ull
+#define CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK 0x800000000ull
+#define CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK 0x100000000ull
+#define CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK 0x200000000ull
+#define CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK 0x10ull
+#define CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK 0x8ull
+#define CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK 0x40000000ull
+#define CCE_ERR_STATUS_LA_TRIGGERED_SMASK 0x80000000ull
+#define CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK 0x40000ull
+#define CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK 0x4000000ull
+#define CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK 0x20000ull
+#define CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK 0x2000000ull
+#define CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK 0x100000ull
+#define CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK 0x80000ull
+#define CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK 0x10000ull
+#define CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK 0x1000000ull
+#define CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK 0x8000ull
+#define CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK 0x800000ull
+#define CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK 0x20000000ull
+#define CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK 0x2000ull
+#define CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK 0x200000ull
+#define CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK 0x4000ull
+#define CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK 0x400000ull
+#define CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK 0x10000000ull
+#define CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK 0x8000000ull
+#define CCE_INT_CLEAR (CCE + 0x000000110A00)
+#define CCE_INT_COUNTER_ARRAY32 (CCE + 0x000000110D00)
+#define CCE_INT_FORCE (CCE + 0x000000110B00)
+#define CCE_INT_MAP (CCE + 0x000000110500)
+#define CCE_INT_MASK (CCE + 0x000000110900)
+#define CCE_INT_STATUS (CCE + 0x000000110800)
+#define CCE_MSIX_INT_GRANTED (CCE + 0x000000110200)
+#define CCE_MSIX_TABLE_LOWER (CCE + 0x000000100000)
+#define CCE_MSIX_TABLE_UPPER (CCE + 0x000000100008)
+#define CCE_MSIX_TABLE_UPPER_RESETCSR 0x0000000100000000ull
+#define CCE_MSIX_VEC_CLR_WITHOUT_INT (CCE + 0x000000110400)
+#define CCE_REVISION (CCE + 0x000000000000)
+#define CCE_REVISION2 (CCE + 0x000000000008)
+#define CCE_REVISION2_HFI_ID_MASK 0x1ull
+#define CCE_REVISION2_HFI_ID_SHIFT 0
+#define CCE_REVISION2_IMPL_CODE_SHIFT 8
+#define CCE_REVISION2_IMPL_REVISION_SHIFT 16
+#define CCE_REVISION_BOARD_ID_LOWER_NIBBLE_MASK 0xFull
+#define CCE_REVISION_BOARD_ID_LOWER_NIBBLE_SHIFT 32
+#define CCE_REVISION_CHIP_REV_MAJOR_MASK 0xFFull
+#define CCE_REVISION_CHIP_REV_MAJOR_SHIFT 8
+#define CCE_REVISION_CHIP_REV_MINOR_MASK 0xFFull
+#define CCE_REVISION_CHIP_REV_MINOR_SHIFT 0
+#define CCE_REVISION_SW_MASK 0xFFull
+#define CCE_REVISION_SW_SHIFT 24
+#define CCE_SCRATCH (CCE + 0x000000000020)
+#define CCE_STATUS (CCE + 0x000000000018)
+#define CCE_STATUS_RXE_FROZE_SMASK 0x2ull
+#define CCE_STATUS_RXE_PAUSED_SMASK 0x20ull
+#define CCE_STATUS_SDMA_FROZE_SMASK 0x1ull
+#define CCE_STATUS_SDMA_PAUSED_SMASK 0x10ull
+#define CCE_STATUS_TXE_FROZE_SMASK 0x4ull
+#define CCE_STATUS_TXE_PAUSED_SMASK 0x40ull
+#define CCE_STATUS_TXE_PIO_FROZE_SMASK 0x8ull
+#define CCE_STATUS_TXE_PIO_PAUSED_SMASK 0x80ull
+#define MISC_CFG_FW_CTRL (MISC + 0x000000001000)
+#define MISC_CFG_FW_CTRL_FW_8051_LOADED_SMASK 0x2ull
+#define MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT 2
+#define MISC_CFG_FW_CTRL_RSA_STATUS_SMASK 0xCull
+#define MISC_CFG_RSA_CMD (MISC + 0x000000000A08)
+#define MISC_CFG_RSA_MODULUS (MISC + 0x000000000400)
+#define MISC_CFG_RSA_MU (MISC + 0x000000000A10)
+#define MISC_CFG_RSA_R2 (MISC + 0x000000000000)
+#define MISC_CFG_RSA_SIGNATURE (MISC + 0x000000000200)
+#define MISC_CFG_SHA_PRELOAD (MISC + 0x000000000A00)
+#define MISC_ERR_CLEAR (MISC + 0x000000002010)
+#define MISC_ERR_MASK (MISC + 0x000000002008)
+#define MISC_ERR_STATUS (MISC + 0x000000002000)
+#define MISC_ERR_STATUS_MISC_PLL_LOCK_FAIL_ERR_SMASK 0x1000ull
+#define MISC_ERR_STATUS_MISC_MBIST_FAIL_ERR_SMASK 0x800ull
+#define MISC_ERR_STATUS_MISC_INVALID_EEP_CMD_ERR_SMASK 0x400ull
+#define MISC_ERR_STATUS_MISC_EFUSE_DONE_PARITY_ERR_SMASK 0x200ull
+#define MISC_ERR_STATUS_MISC_EFUSE_WRITE_ERR_SMASK 0x100ull
+#define MISC_ERR_STATUS_MISC_EFUSE_READ_BAD_ADDR_ERR_SMASK 0x80ull
+#define MISC_ERR_STATUS_MISC_EFUSE_CSR_PARITY_ERR_SMASK 0x40ull
+#define MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK 0x20ull
+#define MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK 0x10ull
+#define MISC_ERR_STATUS_MISC_SBUS_WRITE_FAILED_ERR_SMASK 0x8ull
+#define MISC_ERR_STATUS_MISC_CSR_WRITE_BAD_ADDR_ERR_SMASK 0x4ull
+#define MISC_ERR_STATUS_MISC_CSR_READ_BAD_ADDR_ERR_SMASK 0x2ull
+#define MISC_ERR_STATUS_MISC_CSR_PARITY_ERR_SMASK 0x1ull
+#define PCI_CFG_MSIX0 (PCIE + 0x0000000000B0)
+#define PCI_CFG_REG1 (PCIE + 0x000000000004)
+#define PCI_CFG_REG11 (PCIE + 0x00000000002C)
+#define PCIE_CFG_SPCIE1 (PCIE + 0x00000000014C)
+#define PCIE_CFG_SPCIE2 (PCIE + 0x000000000150)
+#define PCIE_CFG_TPH2 (PCIE + 0x000000000180)
+#define RCV_ARRAY (RXE + 0x000000200000)
+#define RCV_ARRAY_CNT (RXE + 0x000000000018)
+#define RCV_ARRAY_RT_ADDR_MASK 0xFFFFFFFFFull
+#define RCV_ARRAY_RT_ADDR_SHIFT 0
+#define RCV_ARRAY_RT_BUF_SIZE_SHIFT 36
+#define RCV_ARRAY_RT_WRITE_ENABLE_SMASK 0x8000000000000000ull
+#define RCV_AVAIL_TIME_OUT (RXE + 0x000000100050)
+#define RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK 0xFFull
+#define RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT 0
+#define RCV_BTH_QP (RXE + 0x000000000028)
+#define RCV_BTH_QP_KDETH_QP_MASK 0xFFull
+#define RCV_BTH_QP_KDETH_QP_SHIFT 16
+#define RCV_BYPASS (RXE + 0x000000000038)
+#define RCV_CONTEXTS (RXE + 0x000000000010)
+#define RCV_COUNTER_ARRAY32 (RXE + 0x000000000400)
+#define RCV_COUNTER_ARRAY64 (RXE + 0x000000000500)
+#define RCV_CTRL (RXE + 0x000000000000)
+#define RCV_CTRL_RCV_BYPASS_ENABLE_SMASK 0x10ull
+#define RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK 0x40ull
+#define RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK 0x4ull
+#define RCV_CTRL_RCV_PORT_ENABLE_SMASK 0x1ull
+#define RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK 0x2ull
+#define RCV_CTRL_RCV_RSM_ENABLE_SMASK 0x20ull
+#define RCV_CTRL_RX_RBUF_INIT_SMASK 0x200ull
+#define RCV_CTXT_CTRL (RXE + 0x000000100000)
+#define RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK 0x4ull
+#define RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK 0x8ull
+#define RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK 0x7ull
+#define RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT 8
+#define RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK 0x700ull
+#define RCV_CTXT_CTRL_ENABLE_SMASK 0x1ull
+#define RCV_CTXT_CTRL_INTR_AVAIL_SMASK 0x20ull
+#define RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK 0x2ull
+#define RCV_CTXT_CTRL_TAIL_UPD_SMASK 0x40ull
+#define RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK 0x10ull
+#define RCV_CTXT_STATUS (RXE + 0x000000100008)
+#define RCV_EGR_CTRL (RXE + 0x000000100010)
+#define RCV_EGR_CTRL_EGR_BASE_INDEX_MASK 0x1FFFull
+#define RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT 0
+#define RCV_EGR_CTRL_EGR_CNT_MASK 0x1FFull
+#define RCV_EGR_CTRL_EGR_CNT_SHIFT 32
+#define RCV_EGR_INDEX_HEAD (RXE + 0x000000300018)
+#define RCV_EGR_INDEX_HEAD_HEAD_MASK 0x7FFull
+#define RCV_EGR_INDEX_HEAD_HEAD_SHIFT 0
+#define RCV_ERR_CLEAR (RXE + 0x000000000070)
+#define RCV_ERR_INFO (RXE + 0x000000000050)
+#define RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SC_SMASK 0x1Full
+#define RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK 0x20ull
+#define RCV_ERR_MASK (RXE + 0x000000000068)
+#define RCV_ERR_STATUS (RXE + 0x000000000060)
+#define RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK 0x8000000000000000ull
+#define RCV_ERR_STATUS_RX_CSR_READ_BAD_ADDR_ERR_SMASK 0x2000000000000000ull
+#define RCV_ERR_STATUS_RX_CSR_WRITE_BAD_ADDR_ERR_SMASK \
+ 0x4000000000000000ull
+#define RCV_ERR_STATUS_RX_DC_INTF_PARITY_ERR_SMASK 0x2ull
+#define RCV_ERR_STATUS_RX_DC_SOP_EOP_PARITY_ERR_SMASK 0x200ull
+#define RCV_ERR_STATUS_RX_DMA_CSR_COR_ERR_SMASK 0x1ull
+#define RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK 0x200000000000000ull
+#define RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK 0x1000000000000000ull
+#define RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_COR_ERR_SMASK \
+ 0x40000000000000ull
+#define RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
+ 0x20000000000000ull
+#define RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
+ 0x800000000000000ull
+#define RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
+ 0x400000000000000ull
+#define RCV_ERR_STATUS_RX_DMA_FLAG_COR_ERR_SMASK 0x800ull
+#define RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK 0x400ull
+#define RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_COR_ERR_SMASK 0x10000000000000ull
+#define RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK 0x8000000000000ull
+#define RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK 0x200000000000ull
+#define RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK 0x400000000000ull
+#define RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK 0x100000000000ull
+#define RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
+ 0x10000000000ull
+#define RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK 0x8000000000ull
+#define RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
+ 0x20000000000ull
+#define RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_COR_ERR_SMASK 0x80000000000ull
+#define RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK 0x40000000000ull
+#define RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK 0x40000000ull
+#define RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_COR_ERR_SMASK 0x100000ull
+#define RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK 0x80000ull
+#define RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK 0x400000ull
+#define RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK 0x10000000ull
+#define RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK 0x2000000ull
+#define RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
+ 0x200000ull
+#define RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK 0x800000ull
+#define RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
+ 0x8000000ull
+#define RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK 0x4000000ull
+#define RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK 0x1000000ull
+#define RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK 0x20000000ull
+#define RCV_ERR_STATUS_RX_RBUF_DATA_COR_ERR_SMASK 0x100000000000000ull
+#define RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK 0x80000000000000ull
+#define RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK 0x1000000000000ull
+#define RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK 0x800000000000ull
+#define RCV_ERR_STATUS_RX_RBUF_DESC_PART2_COR_ERR_SMASK 0x4000000000000ull
+#define RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK 0x2000000000000ull
+#define RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK 0x100000000ull
+#define RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK 0x800000000ull
+#define RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
+ 0x1000000000ull
+#define RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK 0x200000000ull
+#define RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK 0x400000000ull
+#define RCV_ERR_STATUS_RX_RBUF_FREE_LIST_COR_ERR_SMASK 0x4000ull
+#define RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK 0x2000ull
+#define RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK 0x80000000ull
+#define RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_COR_ERR_SMASK 0x40000ull
+#define RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK 0x10000ull
+#define RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK 0x8000ull
+#define RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK 0x20000ull
+#define RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_COR_ERR_SMASK 0x4000000000ull
+#define RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK 0x2000000000ull
+#define RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK 0x100ull
+#define RCV_ERR_STATUS_RX_RCV_DATA_COR_ERR_SMASK 0x20ull
+#define RCV_ERR_STATUS_RX_RCV_DATA_UNC_ERR_SMASK 0x10ull
+#define RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK 0x1000ull
+#define RCV_ERR_STATUS_RX_RCV_HDR_COR_ERR_SMASK 0x8ull
+#define RCV_ERR_STATUS_RX_RCV_HDR_UNC_ERR_SMASK 0x4ull
+#define RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_COR_ERR_SMASK 0x80ull
+#define RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK 0x40ull
+#define RCV_HDR_ADDR (RXE + 0x000000100028)
+#define RCV_HDR_CNT (RXE + 0x000000100030)
+#define RCV_HDR_CNT_CNT_MASK 0x1FFull
+#define RCV_HDR_CNT_CNT_SHIFT 0
+#define RCV_HDR_ENT_SIZE (RXE + 0x000000100038)
+#define RCV_HDR_ENT_SIZE_ENT_SIZE_MASK 0x7ull
+#define RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT 0
+#define RCV_HDR_HEAD (RXE + 0x000000300008)
+#define RCV_HDR_HEAD_COUNTER_MASK 0xFFull
+#define RCV_HDR_HEAD_COUNTER_SHIFT 32
+#define RCV_HDR_HEAD_HEAD_MASK 0x7FFFFull
+#define RCV_HDR_HEAD_HEAD_SHIFT 0
+#define RCV_HDR_HEAD_HEAD_SMASK 0x7FFFFull
+#define RCV_HDR_OVFL_CNT (RXE + 0x000000100058)
+#define RCV_HDR_SIZE (RXE + 0x000000100040)
+#define RCV_HDR_SIZE_HDR_SIZE_MASK 0x1Full
+#define RCV_HDR_SIZE_HDR_SIZE_SHIFT 0
+#define RCV_HDR_TAIL (RXE + 0x000000300000)
+#define RCV_HDR_TAIL_ADDR (RXE + 0x000000100048)
+#define RCV_KEY_CTRL (RXE + 0x000000100020)
+#define RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK 0x200000000ull
+#define RCV_KEY_CTRL_JOB_KEY_VALUE_MASK 0xFFFFull
+#define RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT 0
+#define RCV_MULTICAST (RXE + 0x000000000030)
+#define RCV_PARTITION_KEY (RXE + 0x000000000200)
+#define RCV_PARTITION_KEY_PARTITION_KEY_A_MASK 0xFFFFull
+#define RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT 16
+#define RCV_QP_MAP_TABLE (RXE + 0x000000000100)
+#define RCV_RSM_CFG (RXE + 0x000000000600)
+#define RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK 0x1ull
+#define RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT 0
+#define RCV_RSM_CFG_PACKET_TYPE_SHIFT 60
+#define RCV_RSM_MAP_TABLE (RXE + 0x000000000900)
+#define RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK 0xFFull
+#define RCV_RSM_MATCH (RXE + 0x000000000800)
+#define RCV_RSM_MATCH_MASK1_SHIFT 0
+#define RCV_RSM_MATCH_MASK2_SHIFT 16
+#define RCV_RSM_MATCH_VALUE1_SHIFT 8
+#define RCV_RSM_MATCH_VALUE2_SHIFT 24
+#define RCV_RSM_SELECT (RXE + 0x000000000700)
+#define RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT 0
+#define RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT 16
+#define RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT 32
+#define RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT 44
+#define RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT 48
+#define RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT 60
+#define RCV_STATUS (RXE + 0x000000000008)
+#define RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK 0x1ull
+#define RCV_STATUS_RX_RBUF_INIT_DONE_SMASK 0x200ull
+#define RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK 0x40ull
+#define RCV_TID_CTRL (RXE + 0x000000100018)
+#define RCV_TID_CTRL_TID_BASE_INDEX_MASK 0x1FFFull
+#define RCV_TID_CTRL_TID_BASE_INDEX_SHIFT 0
+#define RCV_TID_CTRL_TID_PAIR_CNT_MASK 0x1FFull
+#define RCV_TID_CTRL_TID_PAIR_CNT_SHIFT 32
+#define RCV_TID_FLOW_TABLE (RXE + 0x000000300800)
+#define RCV_VL15 (RXE + 0x000000000048)
+#define SEND_BTH_QP (TXE + 0x0000000000A0)
+#define SEND_BTH_QP_KDETH_QP_MASK 0xFFull
+#define SEND_BTH_QP_KDETH_QP_SHIFT 16
+#define SEND_CM_CREDIT_USED_STATUS (TXE + 0x000000000510)
+#define SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK \
+ 0x1000000000000ull
+#define SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK \
+ 0x8000000000000000ull
+#define SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK \
+ 0x2000000000000ull
+#define SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK \
+ 0x4000000000000ull
+#define SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK \
+ 0x8000000000000ull
+#define SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK \
+ 0x10000000000000ull
+#define SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK \
+ 0x20000000000000ull
+#define SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK \
+ 0x40000000000000ull
+#define SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK \
+ 0x80000000000000ull
+#define SEND_CM_CREDIT_VL (TXE + 0x000000000600)
+#define SEND_CM_CREDIT_VL15 (TXE + 0x000000000678)
+#define SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT 0
+#define SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK 0xFFFFull
+#define SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT 0
+#define SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK 0xFFFFull
+#define SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK 0xFFFFull
+#define SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT 16
+#define SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK 0xFFFF0000ull
+#define SEND_CM_CTRL (TXE + 0x000000000500)
+#define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull
+#define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull
+#define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508)
+#define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16
+#define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull
+#define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull
+#define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0
+#define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK 0xFFFFull
+#define SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK 0xFFFFull
+#define SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT 32
+#define SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK 0xFFFF00000000ull
+#define SEND_CM_LOCAL_AU_TABLE0_TO3 (TXE + 0x000000000520)
+#define SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT 0
+#define SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT 16
+#define SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT 32
+#define SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT 48
+#define SEND_CM_LOCAL_AU_TABLE4_TO7 (TXE + 0x000000000528)
+#define SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT 0
+#define SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT 16
+#define SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT 32
+#define SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT 48
+#define SEND_CM_REMOTE_AU_TABLE0_TO3 (TXE + 0x000000000530)
+#define SEND_CM_REMOTE_AU_TABLE4_TO7 (TXE + 0x000000000538)
+#define SEND_CM_TIMER_CTRL (TXE + 0x000000000518)
+#define SEND_CONTEXTS (TXE + 0x000000000010)
+#define SEND_CONTEXT_SET_CTRL (TXE + 0x000000000200)
+#define SEND_COUNTER_ARRAY32 (TXE + 0x000000000300)
+#define SEND_COUNTER_ARRAY64 (TXE + 0x000000000400)
+#define SEND_CTRL (TXE + 0x000000000000)
+#define SEND_CTRL_CM_RESET_SMASK 0x4ull
+#define SEND_CTRL_SEND_ENABLE_SMASK 0x1ull
+#define SEND_CTRL_VL_ARBITER_ENABLE_SMASK 0x2ull
+#define SEND_CTXT_CHECK_ENABLE (TXE + 0x000000100080)
+#define SEND_CTXT_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK 0x80ull
+#define SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK 0x1ull
+#define SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK 0x4ull
+#define SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK 0x20ull
+#define SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK 0x8ull
+#define SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK 0x10ull
+#define SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK 0x40ull
+#define SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK 0x2ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK 0x20000ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK \
+ 0x200000ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_SMASK 0x800ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_GRH_SMASK 0x400ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK 0x1000ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_NON_KDETH_PACKETS_SMASK 0x2000ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK \
+ 0x100000ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK 0x10000ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK 0x200ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_SMASK 0x100ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK \
+ 0x80000ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK \
+ 0x40000ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK \
+ 0x8000ull
+#define SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK \
+ 0x4000ull
+#define SEND_CTXT_CHECK_JOB_KEY (TXE + 0x000000100090)
+#define SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK 0x100000000ull
+#define SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK 0xFFFF0000ull
+#define SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK 0xFFFFull
+#define SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT 0
+#define SEND_CTXT_CHECK_OPCODE (TXE + 0x0000001000A8)
+#define SEND_CTXT_CHECK_OPCODE_MASK_SHIFT 8
+#define SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT 0
+#define SEND_CTXT_CHECK_PARTITION_KEY (TXE + 0x000000100098)
+#define SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK 0xFFFFull
+#define SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT 0
+#define SEND_CTXT_CHECK_SLID (TXE + 0x0000001000A0)
+#define SEND_CTXT_CHECK_SLID_MASK_MASK 0xFFFFull
+#define SEND_CTXT_CHECK_SLID_MASK_SHIFT 16
+#define SEND_CTXT_CHECK_SLID_VALUE_MASK 0xFFFFull
+#define SEND_CTXT_CHECK_SLID_VALUE_SHIFT 0
+#define SEND_CTXT_CHECK_VL (TXE + 0x000000100088)
+#define SEND_CTXT_CREDIT_CTRL (TXE + 0x000000100010)
+#define SEND_CTXT_CREDIT_CTRL_CREDIT_INTR_SMASK 0x20000ull
+#define SEND_CTXT_CREDIT_CTRL_EARLY_RETURN_SMASK 0x10000ull
+#define SEND_CTXT_CREDIT_CTRL_THRESHOLD_MASK 0x7FFull
+#define SEND_CTXT_CREDIT_CTRL_THRESHOLD_SHIFT 0
+#define SEND_CTXT_CREDIT_CTRL_THRESHOLD_SMASK 0x7FFull
+#define SEND_CTXT_CREDIT_FORCE (TXE + 0x000000100028)
+#define SEND_CTXT_CREDIT_FORCE_FORCE_RETURN_SMASK 0x1ull
+#define SEND_CTXT_CREDIT_RETURN_ADDR (TXE + 0x000000100020)
+#define SEND_CTXT_CREDIT_RETURN_ADDR_ADDRESS_SMASK 0xFFFFFFFFFFC0ull
+#define SEND_CTXT_CTRL (TXE + 0x000000100000)
+#define SEND_CTXT_CTRL_CTXT_BASE_MASK 0x3FFFull
+#define SEND_CTXT_CTRL_CTXT_BASE_SHIFT 32
+#define SEND_CTXT_CTRL_CTXT_DEPTH_MASK 0x7FFull
+#define SEND_CTXT_CTRL_CTXT_DEPTH_SHIFT 48
+#define SEND_CTXT_CTRL_CTXT_ENABLE_SMASK 0x1ull
+#define SEND_CTXT_ERR_CLEAR (TXE + 0x000000100050)
+#define SEND_CTXT_ERR_MASK (TXE + 0x000000100048)
+#define SEND_CTXT_ERR_STATUS (TXE + 0x000000100040)
+#define SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK 0x2ull
+#define SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK 0x1ull
+#define SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK 0x4ull
+#define SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK 0x10ull
+#define SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK 0x8ull
+#define SEND_CTXT_STATUS (TXE + 0x000000100008)
+#define SEND_CTXT_STATUS_CTXT_HALTED_SMASK 0x1ull
+#define SEND_DMA_BASE_ADDR (TXE + 0x000000200010)
+#define SEND_DMA_CHECK_ENABLE (TXE + 0x000000200080)
+#define SEND_DMA_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK 0x80ull
+#define SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK 0x1ull
+#define SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK 0x4ull
+#define SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK 0x20ull
+#define SEND_DMA_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK 0x8ull
+#define SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK 0x10ull
+#define SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK 0x40ull
+#define SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK 0x2ull
+#define SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK 0x20000ull
+#define SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK 0x200000ull
+#define SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK \
+ 0x100000ull
+#define SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK 0x200ull
+#define SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_SMASK 0x100ull
+#define SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK \
+ 0x80000ull
+#define SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK 0x40000ull
+#define SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK \
+ 0x8000ull
+#define SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK 0x4000ull
+#define SEND_DMA_CHECK_JOB_KEY (TXE + 0x000000200090)
+#define SEND_DMA_CHECK_OPCODE (TXE + 0x0000002000A8)
+#define SEND_DMA_CHECK_PARTITION_KEY (TXE + 0x000000200098)
+#define SEND_DMA_CHECK_SLID (TXE + 0x0000002000A0)
+#define SEND_DMA_CHECK_SLID_MASK_MASK 0xFFFFull
+#define SEND_DMA_CHECK_SLID_MASK_SHIFT 16
+#define SEND_DMA_CHECK_SLID_VALUE_MASK 0xFFFFull
+#define SEND_DMA_CHECK_SLID_VALUE_SHIFT 0
+#define SEND_DMA_CHECK_VL (TXE + 0x000000200088)
+#define SEND_DMA_CTRL (TXE + 0x000000200000)
+#define SEND_DMA_CTRL_SDMA_CLEANUP_SMASK 0x4ull
+#define SEND_DMA_CTRL_SDMA_ENABLE_SMASK 0x1ull
+#define SEND_DMA_CTRL_SDMA_HALT_SMASK 0x2ull
+#define SEND_DMA_CTRL_SDMA_INT_ENABLE_SMASK 0x8ull
+#define SEND_DMA_DESC_CNT (TXE + 0x000000200050)
+#define SEND_DMA_DESC_CNT_CNT_MASK 0xFFFFull
+#define SEND_DMA_DESC_CNT_CNT_SHIFT 0
+#define SEND_DMA_ENG_ERR_CLEAR (TXE + 0x000000200070)
+#define SEND_DMA_ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK 0x1ull
+#define SEND_DMA_ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT 18
+#define SEND_DMA_ENG_ERR_MASK (TXE + 0x000000200068)
+#define SEND_DMA_ENG_ERR_STATUS (TXE + 0x000000200060)
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK 0x8000ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK 0x4000ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK 0x10ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK 0x2ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK 0x40ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK 0x800ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK 0x1000ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK \
+ 0x40000ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK 0x400ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK \
+ 0x20000ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK 0x80ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK 0x20ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK \
+ 0x100ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK \
+ 0x10000ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK 0x8ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK 0x2000ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK 0x4ull
+#define SEND_DMA_ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK 0x1ull
+#define SEND_DMA_ENGINES (TXE + 0x000000000018)
+#define SEND_DMA_ERR_CLEAR (TXE + 0x000000000070)
+#define SEND_DMA_ERR_MASK (TXE + 0x000000000068)
+#define SEND_DMA_ERR_STATUS (TXE + 0x000000000060)
+#define SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK 0x2ull
+#define SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK 0x8ull
+#define SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK 0x4ull
+#define SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK 0x1ull
+#define SEND_DMA_HEAD (TXE + 0x000000200028)
+#define SEND_DMA_HEAD_ADDR (TXE + 0x000000200030)
+#define SEND_DMA_LEN_GEN (TXE + 0x000000200018)
+#define SEND_DMA_LEN_GEN_GENERATION_SHIFT 16
+#define SEND_DMA_LEN_GEN_LENGTH_SHIFT 6
+#define SEND_DMA_MEMORY (TXE + 0x0000002000B0)
+#define SEND_DMA_MEMORY_SDMA_MEMORY_CNT_SHIFT 16
+#define SEND_DMA_MEMORY_SDMA_MEMORY_INDEX_SHIFT 0
+#define SEND_DMA_MEM_SIZE (TXE + 0x000000000028)
+#define SEND_DMA_PRIORITY_THLD (TXE + 0x000000200038)
+#define SEND_DMA_RELOAD_CNT (TXE + 0x000000200048)
+#define SEND_DMA_STATUS (TXE + 0x000000200008)
+#define SEND_DMA_STATUS_ENG_CLEANED_UP_SMASK 0x200000000000000ull
+#define SEND_DMA_STATUS_ENG_HALTED_SMASK 0x100000000000000ull
+#define SEND_DMA_TAIL (TXE + 0x000000200020)
+#define SEND_EGRESS_CTXT_STATUS (TXE + 0x000000000800)
+#define SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK 0x10000ull
+#define SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT 0
+#define SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK \
+ 0x3FFFull
+#define SEND_EGRESS_ERR_CLEAR (TXE + 0x000000000090)
+#define SEND_EGRESS_ERR_INFO (TXE + 0x000000000F00)
+#define SEND_EGRESS_ERR_INFO_BAD_PKT_LEN_ERR_SMASK 0x20000ull
+#define SEND_EGRESS_ERR_INFO_BYPASS_ERR_SMASK 0x800ull
+#define SEND_EGRESS_ERR_INFO_GRH_ERR_SMASK 0x400ull
+#define SEND_EGRESS_ERR_INFO_JOB_KEY_ERR_SMASK 0x4ull
+#define SEND_EGRESS_ERR_INFO_KDETH_PACKETS_ERR_SMASK 0x1000ull
+#define SEND_EGRESS_ERR_INFO_NON_KDETH_PACKETS_ERR_SMASK 0x2000ull
+#define SEND_EGRESS_ERR_INFO_OPCODE_ERR_SMASK 0x20ull
+#define SEND_EGRESS_ERR_INFO_PARTITION_KEY_ERR_SMASK 0x8ull
+#define SEND_EGRESS_ERR_INFO_PBC_STATIC_RATE_CONTROL_ERR_SMASK 0x100000ull
+#define SEND_EGRESS_ERR_INFO_PBC_TEST_ERR_SMASK 0x10000ull
+#define SEND_EGRESS_ERR_INFO_RAW_ERR_SMASK 0x100ull
+#define SEND_EGRESS_ERR_INFO_RAW_IPV6_ERR_SMASK 0x200ull
+#define SEND_EGRESS_ERR_INFO_SLID_ERR_SMASK 0x10ull
+#define SEND_EGRESS_ERR_INFO_TOO_LONG_BYPASS_PACKETS_ERR_SMASK 0x80000ull
+#define SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK 0x40000ull
+#define SEND_EGRESS_ERR_INFO_TOO_SMALL_BYPASS_PACKETS_ERR_SMASK 0x8000ull
+#define SEND_EGRESS_ERR_INFO_TOO_SMALL_IB_PACKETS_ERR_SMASK 0x4000ull
+#define SEND_EGRESS_ERR_INFO_VL_ERR_SMASK 0x2ull
+#define SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK 0x40ull
+#define SEND_EGRESS_ERR_MASK (TXE + 0x000000000088)
+#define SEND_EGRESS_ERR_SOURCE (TXE + 0x000000000F08)
+#define SEND_EGRESS_ERR_STATUS (TXE + 0x000000000080)
+#define SEND_EGRESS_ERR_STATUS_TX_CONFIG_PARITY_ERR_SMASK 0x8000ull
+#define SEND_EGRESS_ERR_STATUS_TX_CREDIT_OVERRUN_ERR_SMASK \
+ 0x200000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_PARITY_ERR_SMASK \
+ 0x20000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK \
+ 0x800000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_EGRESS_FIFO_COR_ERR_SMASK \
+ 0x2000000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_EGRESS_FIFO_UNC_ERR_SMASK \
+ 0x200000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR_SMASK \
+ 0x8ull
+#define SEND_EGRESS_ERR_STATUS_TX_HCRC_INSERTION_ERR_SMASK \
+ 0x400000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_ILLEGAL_VL_ERR_SMASK 0x1000ull
+#define SEND_EGRESS_ERR_STATUS_TX_INCORRECT_LINK_STATE_ERR_SMASK 0x20ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_CSR_PARITY_ERR_SMASK 0x2000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO0_COR_ERR_SMASK \
+ 0x1000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR_SMASK \
+ 0x100000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO1_COR_ERR_SMASK \
+ 0x2000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR_SMASK \
+ 0x200000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO2_COR_ERR_SMASK \
+ 0x4000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR_SMASK \
+ 0x400000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO3_COR_ERR_SMASK \
+ 0x8000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR_SMASK \
+ 0x800000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO4_COR_ERR_SMASK \
+ 0x10000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR_SMASK \
+ 0x1000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO5_COR_ERR_SMASK \
+ 0x20000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR_SMASK \
+ 0x2000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO6_COR_ERR_SMASK \
+ 0x40000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR_SMASK \
+ 0x4000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO7_COR_ERR_SMASK \
+ 0x80000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR_SMASK \
+ 0x8000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO8_COR_ERR_SMASK \
+ 0x100000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR_SMASK \
+ 0x10000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_LINKDOWN_ERR_SMASK 0x10ull
+#define SEND_EGRESS_ERR_STATUS_TX_PIO_LAUNCH_INTF_PARITY_ERR_SMASK 0x80ull
+#define SEND_EGRESS_ERR_STATUS_TX_PKT_INTEGRITY_MEM_COR_ERR_SMASK 0x1ull
+#define SEND_EGRESS_ERR_STATUS_TX_PKT_INTEGRITY_MEM_UNC_ERR_SMASK 0x2ull
+#define SEND_EGRESS_ERR_STATUS_TX_READ_PIO_MEMORY_COR_ERR_SMASK \
+ 0x1000000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_READ_PIO_MEMORY_CSR_UNC_ERR_SMASK \
+ 0x8000000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_READ_PIO_MEMORY_UNC_ERR_SMASK \
+ 0x100000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_READ_SDMA_MEMORY_COR_ERR_SMASK \
+ 0x800000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_READ_SDMA_MEMORY_CSR_UNC_ERR_SMASK \
+ 0x4000000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_READ_SDMA_MEMORY_UNC_ERR_SMASK \
+ 0x80000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SB_HDR_COR_ERR_SMASK 0x400000000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SB_HDR_UNC_ERR_SMASK 0x40000000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SBRD_CTL_CSR_PARITY_ERR_SMASK 0x4000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR_SMASK \
+ 0x800ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA0_DISALLOWED_PACKET_ERR_SMASK \
+ 0x10000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA10_DISALLOWED_PACKET_ERR_SMASK \
+ 0x4000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA11_DISALLOWED_PACKET_ERR_SMASK \
+ 0x8000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA12_DISALLOWED_PACKET_ERR_SMASK \
+ 0x10000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA13_DISALLOWED_PACKET_ERR_SMASK \
+ 0x20000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA14_DISALLOWED_PACKET_ERR_SMASK \
+ 0x40000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA15_DISALLOWED_PACKET_ERR_SMASK \
+ 0x80000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA1_DISALLOWED_PACKET_ERR_SMASK \
+ 0x20000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA2_DISALLOWED_PACKET_ERR_SMASK \
+ 0x40000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA3_DISALLOWED_PACKET_ERR_SMASK \
+ 0x80000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA4_DISALLOWED_PACKET_ERR_SMASK \
+ 0x100000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA5_DISALLOWED_PACKET_ERR_SMASK \
+ 0x200000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA6_DISALLOWED_PACKET_ERR_SMASK \
+ 0x400000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA7_DISALLOWED_PACKET_ERR_SMASK \
+ 0x800000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA8_DISALLOWED_PACKET_ERR_SMASK \
+ 0x1000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA9_DISALLOWED_PACKET_ERR_SMASK \
+ 0x2000000ull
+#define SEND_EGRESS_ERR_STATUS_TX_SDMA_LAUNCH_INTF_PARITY_ERR_SMASK \
+ 0x100ull
+#define SEND_EGRESS_SEND_DMA_STATUS (TXE + 0x000000000E00)
+#define SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT 0
+#define SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
+ 0x3FFFull
+#define SEND_ERR_CLEAR (TXE + 0x0000000000F0)
+#define SEND_ERR_MASK (TXE + 0x0000000000E8)
+#define SEND_ERR_STATUS (TXE + 0x0000000000E0)
+#define SEND_ERR_STATUS_SEND_CSR_PARITY_ERR_SMASK 0x1ull
+#define SEND_ERR_STATUS_SEND_CSR_READ_BAD_ADDR_ERR_SMASK 0x2ull
+#define SEND_ERR_STATUS_SEND_CSR_WRITE_BAD_ADDR_ERR_SMASK 0x4ull
+#define SEND_HIGH_PRIORITY_LIMIT (TXE + 0x000000000030)
+#define SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK 0x3FFFull
+#define SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT 0
+#define SEND_HIGH_PRIORITY_LIST (TXE + 0x000000000180)
+#define SEND_LEN_CHECK0 (TXE + 0x0000000000D0)
+#define SEND_LEN_CHECK0_LEN_VL0_MASK 0xFFFull
+#define SEND_LEN_CHECK0_LEN_VL1_SHIFT 12
+#define SEND_LEN_CHECK1 (TXE + 0x0000000000D8)
+#define SEND_LEN_CHECK1_LEN_VL15_MASK 0xFFFull
+#define SEND_LEN_CHECK1_LEN_VL15_SHIFT 48
+#define SEND_LEN_CHECK1_LEN_VL4_MASK 0xFFFull
+#define SEND_LEN_CHECK1_LEN_VL5_SHIFT 12
+#define SEND_LOW_PRIORITY_LIST (TXE + 0x000000000100)
+#define SEND_LOW_PRIORITY_LIST_VL_MASK 0x7ull
+#define SEND_LOW_PRIORITY_LIST_VL_SHIFT 16
+#define SEND_LOW_PRIORITY_LIST_WEIGHT_MASK 0xFFull
+#define SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT 0
+#define SEND_PIO_ERR_CLEAR (TXE + 0x000000000050)
+#define SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK 0x20000ull
+#define SEND_PIO_ERR_MASK (TXE + 0x000000000048)
+#define SEND_PIO_ERR_STATUS (TXE + 0x000000000040)
+#define SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
+ 0x1000000ull
+#define SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK 0x8000ull
+#define SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK 0x4ull
+#define SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
+ 0x100000000ull
+#define SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK 0x100000ull
+#define SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK 0x80000ull
+#define SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK 0x20000ull
+#define SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
+ 0x200000000ull
+#define SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK 0x20ull
+#define SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
+ 0x400000000ull
+#define SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK 0x40ull
+#define SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK \
+ 0x800000000ull
+#define SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK 0x200ull
+#define SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK 0x40000ull
+#define SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK 0x10000000ull
+#define SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK 0x10000ull
+#define SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK 0x20000000ull
+#define SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK 0x8ull
+#define SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK 0x10ull
+#define SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK 0x80ull
+#define SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
+ 0x100ull
+#define SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK 0x400ull
+#define SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK 0x400000ull
+#define SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK 0x8000000ull
+#define SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK 0x4000000ull
+#define SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK 0x2000000ull
+#define SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK 0x2000ull
+#define SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK 0x800ull
+#define SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK 0x4000ull
+#define SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK 0x1000ull
+#define SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK 0x2ull
+#define SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK 0x1ull
+#define SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK 0x200000ull
+#define SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK 0x800000ull
+#define SEND_PIO_INIT_CTXT (TXE + 0x000000000038)
+#define SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK 0x1ull
+#define SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK 0xFFull
+#define SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_SHIFT 8
+#define SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK 0x8ull
+#define SEND_PIO_INIT_CTXT_PIO_INIT_IN_PROGRESS_SMASK 0x4ull
+#define SEND_PIO_INIT_CTXT_PIO_SINGLE_CTXT_INIT_SMASK 0x2ull
+#define SEND_PIO_MEM_SIZE (TXE + 0x000000000020)
+#define SEND_SC2VLT0 (TXE + 0x0000000000B0)
+#define SEND_SC2VLT0_SC0_SHIFT 0
+#define SEND_SC2VLT0_SC1_SHIFT 8
+#define SEND_SC2VLT0_SC2_SHIFT 16
+#define SEND_SC2VLT0_SC3_SHIFT 24
+#define SEND_SC2VLT0_SC4_SHIFT 32
+#define SEND_SC2VLT0_SC5_SHIFT 40
+#define SEND_SC2VLT0_SC6_SHIFT 48
+#define SEND_SC2VLT0_SC7_SHIFT 56
+#define SEND_SC2VLT1 (TXE + 0x0000000000B8)
+#define SEND_SC2VLT1_SC10_SHIFT 16
+#define SEND_SC2VLT1_SC11_SHIFT 24
+#define SEND_SC2VLT1_SC12_SHIFT 32
+#define SEND_SC2VLT1_SC13_SHIFT 40
+#define SEND_SC2VLT1_SC14_SHIFT 48
+#define SEND_SC2VLT1_SC15_SHIFT 56
+#define SEND_SC2VLT1_SC8_SHIFT 0
+#define SEND_SC2VLT1_SC9_SHIFT 8
+#define SEND_SC2VLT2 (TXE + 0x0000000000C0)
+#define SEND_SC2VLT2_SC16_SHIFT 0
+#define SEND_SC2VLT2_SC17_SHIFT 8
+#define SEND_SC2VLT2_SC18_SHIFT 16
+#define SEND_SC2VLT2_SC19_SHIFT 24
+#define SEND_SC2VLT2_SC20_SHIFT 32
+#define SEND_SC2VLT2_SC21_SHIFT 40
+#define SEND_SC2VLT2_SC22_SHIFT 48
+#define SEND_SC2VLT2_SC23_SHIFT 56
+#define SEND_SC2VLT3 (TXE + 0x0000000000C8)
+#define SEND_SC2VLT3_SC24_SHIFT 0
+#define SEND_SC2VLT3_SC25_SHIFT 8
+#define SEND_SC2VLT3_SC26_SHIFT 16
+#define SEND_SC2VLT3_SC27_SHIFT 24
+#define SEND_SC2VLT3_SC28_SHIFT 32
+#define SEND_SC2VLT3_SC29_SHIFT 40
+#define SEND_SC2VLT3_SC30_SHIFT 48
+#define SEND_SC2VLT3_SC31_SHIFT 56
+#define SEND_STATIC_RATE_CONTROL (TXE + 0x0000000000A8)
+#define SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT 0
+#define SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK 0xFFFFull
+#define PCIE_CFG_REG_PL2 (PCIE + 0x000000000708)
+#define PCIE_CFG_REG_PL102 (PCIE + 0x000000000898)
+#define PCIE_CFG_REG_PL102_GEN3_EQ_POST_CURSOR_PSET_SHIFT 12
+#define PCIE_CFG_REG_PL102_GEN3_EQ_CURSOR_PSET_SHIFT 6
+#define PCIE_CFG_REG_PL102_GEN3_EQ_PRE_CURSOR_PSET_SHIFT 0
+#define PCIE_CFG_REG_PL103 (PCIE + 0x00000000089C)
+#define PCIE_CFG_REG_PL105 (PCIE + 0x0000000008A4)
+#define PCIE_CFG_REG_PL105_GEN3_EQ_VIOLATE_COEF_RULES_SMASK 0x1ull
+#define PCIE_CFG_REG_PL2_LOW_PWR_ENT_CNT_SHIFT 24
+#define PCIE_CFG_REG_PL100 (PCIE + 0x000000000890)
+#define PCIE_CFG_REG_PL100_EQ_EIEOS_CNT_SMASK 0x400ull
+#define PCIE_CFG_REG_PL101 (PCIE + 0x000000000894)
+#define PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_FS_SHIFT 6
+#define PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_LF_SHIFT 0
+#define PCIE_CFG_REG_PL106 (PCIE + 0x0000000008A8)
+#define PCIE_CFG_REG_PL106_GEN3_EQ_PSET_REQ_VEC_SHIFT 8
+#define PCIE_CFG_REG_PL106_GEN3_EQ_EVAL2MS_DISABLE_SMASK 0x20ull
+#define PCIE_CFG_REG_PL106_GEN3_EQ_PHASE23_EXIT_MODE_SMASK 0x10ull
+#define CCE_INT_BLOCKED (CCE + 0x000000110C00)
+#define SEND_DMA_IDLE_CNT (TXE + 0x000000200040)
+#define SEND_DMA_DESC_FETCHED_CNT (TXE + 0x000000200058)
+
+#endif /* DEF_CHIP_REG */
diff --git a/drivers/staging/rdma/hfi1/common.h b/drivers/staging/rdma/hfi1/common.h
new file mode 100644
index 000000000000..5f2293729cf9
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/common.h
@@ -0,0 +1,415 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _COMMON_H
+#define _COMMON_H
+
+#include <rdma/hfi/hfi1_user.h>
+
+/*
+ * This file contains defines, structures, etc. that are used
+ * to communicate between kernel and user code.
+ */
+
+/* version of protocol header (known to chip also). In the long run,
+ * we should be able to generate and accept a range of version numbers;
+ * for now we only accept one, and it's compiled in.
+ */
+#define IPS_PROTO_VERSION 2
+
+/*
+ * These are compile time constants that you may want to enable or disable
+ * if you are trying to debug problems with code or performance.
+ * HFI1_VERBOSE_TRACING define as 1 if you want additional tracing in
+ * fast path code
+ * HFI1_TRACE_REGWRITES define as 1 if you want register writes to be
+ * traced in fast path code
+ * _HFI1_TRACING define as 0 if you want to remove all tracing in a
+ * compilation unit
+ */
+
+/*
+ * If a packet's QP[23:16] bits match this value, then it is
+ * a PSM packet and the hardware will expect a KDETH header
+ * following the BTH.
+ */
+#define DEFAULT_KDETH_QP 0x80
+
+/* driver/hw feature set bitmask */
+#define HFI1_CAP_USER_SHIFT 24
+#define HFI1_CAP_MASK ((1UL << HFI1_CAP_USER_SHIFT) - 1)
+/* locked flag - if set, only HFI1_CAP_WRITABLE_MASK bits can be set */
+#define HFI1_CAP_LOCKED_SHIFT 63
+#define HFI1_CAP_LOCKED_MASK 0x1ULL
+#define HFI1_CAP_LOCKED_SMASK (HFI1_CAP_LOCKED_MASK << HFI1_CAP_LOCKED_SHIFT)
+/* extra bits used between kernel and user processes */
+#define HFI1_CAP_MISC_SHIFT (HFI1_CAP_USER_SHIFT * 2)
+#define HFI1_CAP_MISC_MASK ((1ULL << (HFI1_CAP_LOCKED_SHIFT - \
+ HFI1_CAP_MISC_SHIFT)) - 1)
+
+#define HFI1_CAP_KSET(cap) ({ hfi1_cap_mask |= HFI1_CAP_##cap; hfi1_cap_mask; })
+#define HFI1_CAP_KCLEAR(cap) \
+ ({ \
+ hfi1_cap_mask &= ~HFI1_CAP_##cap; \
+ hfi1_cap_mask; \
+ })
+#define HFI1_CAP_USET(cap) \
+ ({ \
+ hfi1_cap_mask |= (HFI1_CAP_##cap << HFI1_CAP_USER_SHIFT); \
+ hfi1_cap_mask; \
+ })
+#define HFI1_CAP_UCLEAR(cap) \
+ ({ \
+ hfi1_cap_mask &= ~(HFI1_CAP_##cap << HFI1_CAP_USER_SHIFT); \
+ hfi1_cap_mask; \
+ })
+#define HFI1_CAP_SET(cap) \
+ ({ \
+ hfi1_cap_mask |= (HFI1_CAP_##cap | (HFI1_CAP_##cap << \
+ HFI1_CAP_USER_SHIFT)); \
+ hfi1_cap_mask; \
+ })
+#define HFI1_CAP_CLEAR(cap) \
+ ({ \
+ hfi1_cap_mask &= ~(HFI1_CAP_##cap | \
+ (HFI1_CAP_##cap << HFI1_CAP_USER_SHIFT)); \
+ hfi1_cap_mask; \
+ })
+#define HFI1_CAP_LOCK() \
+ ({ hfi1_cap_mask |= HFI1_CAP_LOCKED_SMASK; hfi1_cap_mask; })
+#define HFI1_CAP_LOCKED() (!!(hfi1_cap_mask & HFI1_CAP_LOCKED_SMASK))
+/*
+ * The set of capability bits that can be changed after initial load
+ * This set is the same for kernel and user contexts. However, for
+ * user contexts, the set can be further filtered by using the
+ * HFI1_CAP_RESERVED_MASK bits.
+ */
+#define HFI1_CAP_WRITABLE_MASK (HFI1_CAP_SDMA_AHG | \
+ HFI1_CAP_HDRSUPP | \
+ HFI1_CAP_MULTI_PKT_EGR | \
+ HFI1_CAP_NODROP_RHQ_FULL | \
+ HFI1_CAP_NODROP_EGR_FULL | \
+ HFI1_CAP_ALLOW_PERM_JKEY | \
+ HFI1_CAP_STATIC_RATE_CTRL | \
+ HFI1_CAP_PRINT_UNIMPL)
+/*
+ * A set of capability bits that are "global" and are not allowed to be
+ * set in the user bitmask.
+ */
+#define HFI1_CAP_RESERVED_MASK ((HFI1_CAP_SDMA | \
+ HFI1_CAP_USE_SDMA_HEAD | \
+ HFI1_CAP_EXTENDED_PSN | \
+ HFI1_CAP_PRINT_UNIMPL | \
+ HFI1_CAP_QSFP_ENABLED | \
+ HFI1_CAP_NO_INTEGRITY | \
+ HFI1_CAP_PKEY_CHECK) << \
+ HFI1_CAP_USER_SHIFT)
+/*
+ * Set of capabilities that need to be enabled for kernel context in
+ * order to be allowed for user contexts, as well.
+ */
+#define HFI1_CAP_MUST_HAVE_KERN (HFI1_CAP_STATIC_RATE_CTRL)
+/* Default enabled capabilities (both kernel and user) */
+#define HFI1_CAP_MASK_DEFAULT (HFI1_CAP_HDRSUPP | \
+ HFI1_CAP_NODROP_RHQ_FULL | \
+ HFI1_CAP_NODROP_EGR_FULL | \
+ HFI1_CAP_SDMA | \
+ HFI1_CAP_PRINT_UNIMPL | \
+ HFI1_CAP_STATIC_RATE_CTRL | \
+ HFI1_CAP_QSFP_ENABLED | \
+ HFI1_CAP_PKEY_CHECK | \
+ HFI1_CAP_MULTI_PKT_EGR | \
+ HFI1_CAP_EXTENDED_PSN | \
+ ((HFI1_CAP_HDRSUPP | \
+ HFI1_CAP_MULTI_PKT_EGR | \
+ HFI1_CAP_STATIC_RATE_CTRL | \
+ HFI1_CAP_PKEY_CHECK | \
+ HFI1_CAP_EARLY_CREDIT_RETURN) << \
+ HFI1_CAP_USER_SHIFT))
+/*
+ * A bitmask of kernel/global capabilities that should be communicated
+ * to user level processes.
+ */
+#define HFI1_CAP_K2U (HFI1_CAP_SDMA | \
+ HFI1_CAP_EXTENDED_PSN | \
+ HFI1_CAP_PKEY_CHECK | \
+ HFI1_CAP_NO_INTEGRITY)
+
+#define HFI1_USER_SWVERSION ((HFI1_USER_SWMAJOR << 16) | HFI1_USER_SWMINOR)
+
+#ifndef HFI1_KERN_TYPE
+#define HFI1_KERN_TYPE 0
+#endif
+
+/*
+ * Similarly, this is the kernel version going back to the user. It's
+ * slightly different, in that we want to tell if the driver was built as
+ * part of a Intel release, or from the driver from openfabrics.org,
+ * kernel.org, or a standard distribution, for support reasons.
+ * The high bit is 0 for non-Intel and 1 for Intel-built/supplied.
+ *
+ * It's returned by the driver to the user code during initialization in the
+ * spi_sw_version field of hfi1_base_info, so the user code can in turn
+ * check for compatibility with the kernel.
+*/
+#define HFI1_KERN_SWVERSION ((HFI1_KERN_TYPE << 31) | HFI1_USER_SWVERSION)
+
+/*
+ * Define the driver version number. This is something that refers only
+ * to the driver itself, not the software interfaces it supports.
+ */
+#ifndef HFI1_DRIVER_VERSION_BASE
+#define HFI1_DRIVER_VERSION_BASE "0.9-248"
+#endif
+
+/* create the final driver version string */
+#ifdef HFI1_IDSTR
+#define HFI1_DRIVER_VERSION HFI1_DRIVER_VERSION_BASE " " HFI1_IDSTR
+#else
+#define HFI1_DRIVER_VERSION HFI1_DRIVER_VERSION_BASE
+#endif
+
+/*
+ * Diagnostics can send a packet by writing the following
+ * struct to the diag packet special file.
+ *
+ * This allows a custom PBC qword, so that special modes and deliberate
+ * changes to CRCs can be used.
+ */
+#define _DIAG_PKT_VERS 1
+struct diag_pkt {
+ __u16 version; /* structure version */
+ __u16 unit; /* which device */
+ __u16 sw_index; /* send sw index to use */
+ __u16 len; /* data length, in bytes */
+ __u16 port; /* port number */
+ __u16 unused;
+ __u32 flags; /* call flags */
+ __u64 data; /* user data pointer */
+ __u64 pbc; /* PBC for the packet */
+};
+
+/* diag_pkt flags */
+#define F_DIAGPKT_WAIT 0x1 /* wait until packet is sent */
+
+/*
+ * The next set of defines are for packet headers, and chip register
+ * and memory bits that are visible to and/or used by user-mode software.
+ */
+
+/*
+ * Receive Header Flags
+ */
+#define RHF_PKT_LEN_SHIFT 0
+#define RHF_PKT_LEN_MASK 0xfffull
+#define RHF_PKT_LEN_SMASK (RHF_PKT_LEN_MASK << RHF_PKT_LEN_SHIFT)
+
+#define RHF_RCV_TYPE_SHIFT 12
+#define RHF_RCV_TYPE_MASK 0x7ull
+#define RHF_RCV_TYPE_SMASK (RHF_RCV_TYPE_MASK << RHF_RCV_TYPE_SHIFT)
+
+#define RHF_USE_EGR_BFR_SHIFT 15
+#define RHF_USE_EGR_BFR_MASK 0x1ull
+#define RHF_USE_EGR_BFR_SMASK (RHF_USE_EGR_BFR_MASK << RHF_USE_EGR_BFR_SHIFT)
+
+#define RHF_EGR_INDEX_SHIFT 16
+#define RHF_EGR_INDEX_MASK 0x7ffull
+#define RHF_EGR_INDEX_SMASK (RHF_EGR_INDEX_MASK << RHF_EGR_INDEX_SHIFT)
+
+#define RHF_DC_INFO_SHIFT 27
+#define RHF_DC_INFO_MASK 0x1ull
+#define RHF_DC_INFO_SMASK (RHF_DC_INFO_MASK << RHF_DC_INFO_SHIFT)
+
+#define RHF_RCV_SEQ_SHIFT 28
+#define RHF_RCV_SEQ_MASK 0xfull
+#define RHF_RCV_SEQ_SMASK (RHF_RCV_SEQ_MASK << RHF_RCV_SEQ_SHIFT)
+
+#define RHF_EGR_OFFSET_SHIFT 32
+#define RHF_EGR_OFFSET_MASK 0xfffull
+#define RHF_EGR_OFFSET_SMASK (RHF_EGR_OFFSET_MASK << RHF_EGR_OFFSET_SHIFT)
+#define RHF_HDRQ_OFFSET_SHIFT 44
+#define RHF_HDRQ_OFFSET_MASK 0x1ffull
+#define RHF_HDRQ_OFFSET_SMASK (RHF_HDRQ_OFFSET_MASK << RHF_HDRQ_OFFSET_SHIFT)
+#define RHF_K_HDR_LEN_ERR (0x1ull << 53)
+#define RHF_DC_UNC_ERR (0x1ull << 54)
+#define RHF_DC_ERR (0x1ull << 55)
+#define RHF_RCV_TYPE_ERR_SHIFT 56
+#define RHF_RCV_TYPE_ERR_MASK 0x7ul
+#define RHF_RCV_TYPE_ERR_SMASK (RHF_RCV_TYPE_ERR_MASK << RHF_RCV_TYPE_ERR_SHIFT)
+#define RHF_TID_ERR (0x1ull << 59)
+#define RHF_LEN_ERR (0x1ull << 60)
+#define RHF_ECC_ERR (0x1ull << 61)
+#define RHF_VCRC_ERR (0x1ull << 62)
+#define RHF_ICRC_ERR (0x1ull << 63)
+
+#define RHF_ERROR_SMASK 0xffe0000000000000ull /* bits 63:53 */
+
+/* RHF receive types */
+#define RHF_RCV_TYPE_EXPECTED 0
+#define RHF_RCV_TYPE_EAGER 1
+#define RHF_RCV_TYPE_IB 2 /* normal IB, IB Raw, or IPv6 */
+#define RHF_RCV_TYPE_ERROR 3
+#define RHF_RCV_TYPE_BYPASS 4
+#define RHF_RCV_TYPE_INVALID5 5
+#define RHF_RCV_TYPE_INVALID6 6
+#define RHF_RCV_TYPE_INVALID7 7
+
+/* RHF receive type error - expected packet errors */
+#define RHF_RTE_EXPECTED_FLOW_SEQ_ERR 0x2
+#define RHF_RTE_EXPECTED_FLOW_GEN_ERR 0x4
+
+/* RHF receive type error - eager packet errors */
+#define RHF_RTE_EAGER_NO_ERR 0x0
+
+/* RHF receive type error - IB packet errors */
+#define RHF_RTE_IB_NO_ERR 0x0
+
+/* RHF receive type error - error packet errors */
+#define RHF_RTE_ERROR_NO_ERR 0x0
+#define RHF_RTE_ERROR_OP_CODE_ERR 0x1
+#define RHF_RTE_ERROR_KHDR_MIN_LEN_ERR 0x2
+#define RHF_RTE_ERROR_KHDR_HCRC_ERR 0x3
+#define RHF_RTE_ERROR_KHDR_KVER_ERR 0x4
+#define RHF_RTE_ERROR_CONTEXT_ERR 0x5
+#define RHF_RTE_ERROR_KHDR_TID_ERR 0x6
+
+/* RHF receive type error - bypass packet errors */
+#define RHF_RTE_BYPASS_NO_ERR 0x0
+
+/*
+ * This structure contains the first field common to all protocols
+ * that employ this chip.
+ */
+struct hfi1_message_header {
+ __be16 lrh[4];
+};
+
+/* IB - LRH header constants */
+#define HFI1_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */
+#define HFI1_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
+
+/* misc. */
+#define SIZE_OF_CRC 1
+
+#define LIM_MGMT_P_KEY 0x7FFF
+#define FULL_MGMT_P_KEY 0xFFFF
+
+#define DEFAULT_P_KEY LIM_MGMT_P_KEY
+#define HFI1_PERMISSIVE_LID 0xFFFF
+#define HFI1_AETH_CREDIT_SHIFT 24
+#define HFI1_AETH_CREDIT_MASK 0x1F
+#define HFI1_AETH_CREDIT_INVAL 0x1F
+#define HFI1_MSN_MASK 0xFFFFFF
+#define HFI1_QPN_MASK 0xFFFFFF
+#define HFI1_FECN_SHIFT 31
+#define HFI1_FECN_MASK 1
+#define HFI1_FECN_SMASK (1 << HFI1_FECN_SHIFT)
+#define HFI1_BECN_SHIFT 30
+#define HFI1_BECN_MASK 1
+#define HFI1_BECN_SMASK (1 << HFI1_BECN_SHIFT)
+#define HFI1_MULTICAST_LID_BASE 0xC000
+
+static inline __u64 rhf_to_cpu(const __le32 *rbuf)
+{
+ return __le64_to_cpu(*((__le64 *)rbuf));
+}
+
+static inline u64 rhf_err_flags(u64 rhf)
+{
+ return rhf & RHF_ERROR_SMASK;
+}
+
+static inline u32 rhf_rcv_type(u64 rhf)
+{
+ return (rhf >> RHF_RCV_TYPE_SHIFT) & RHF_RCV_TYPE_MASK;
+}
+
+static inline u32 rhf_rcv_type_err(u64 rhf)
+{
+ return (rhf >> RHF_RCV_TYPE_ERR_SHIFT) & RHF_RCV_TYPE_ERR_MASK;
+}
+
+/* return size is in bytes, not DWORDs */
+static inline u32 rhf_pkt_len(u64 rhf)
+{
+ return ((rhf & RHF_PKT_LEN_SMASK) >> RHF_PKT_LEN_SHIFT) << 2;
+}
+
+static inline u32 rhf_egr_index(u64 rhf)
+{
+ return (rhf >> RHF_EGR_INDEX_SHIFT) & RHF_EGR_INDEX_MASK;
+}
+
+static inline u32 rhf_rcv_seq(u64 rhf)
+{
+ return (rhf >> RHF_RCV_SEQ_SHIFT) & RHF_RCV_SEQ_MASK;
+}
+
+/* returned offset is in DWORDS */
+static inline u32 rhf_hdrq_offset(u64 rhf)
+{
+ return (rhf >> RHF_HDRQ_OFFSET_SHIFT) & RHF_HDRQ_OFFSET_MASK;
+}
+
+static inline u64 rhf_use_egr_bfr(u64 rhf)
+{
+ return rhf & RHF_USE_EGR_BFR_SMASK;
+}
+
+static inline u64 rhf_dc_info(u64 rhf)
+{
+ return rhf & RHF_DC_INFO_SMASK;
+}
+
+static inline u32 rhf_egr_buf_offset(u64 rhf)
+{
+ return (rhf >> RHF_EGR_OFFSET_SHIFT) & RHF_EGR_OFFSET_MASK;
+}
+#endif /* _COMMON_H */
diff --git a/drivers/staging/rdma/hfi1/cq.c b/drivers/staging/rdma/hfi1/cq.c
new file mode 100644
index 000000000000..4f046ffe7e60
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/cq.c
@@ -0,0 +1,558 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/kthread.h>
+
+#include "verbs.h"
+#include "hfi.h"
+
+/**
+ * hfi1_cq_enter - add a new entry to the completion queue
+ * @cq: completion queue
+ * @entry: work completion entry to add
+ * @sig: true if @entry is a solicited entry
+ *
+ * This may be called with qp->s_lock held.
+ */
+void hfi1_cq_enter(struct hfi1_cq *cq, struct ib_wc *entry, int solicited)
+{
+ struct hfi1_cq_wc *wc;
+ unsigned long flags;
+ u32 head;
+ u32 next;
+
+ spin_lock_irqsave(&cq->lock, flags);
+
+ /*
+ * Note that the head pointer might be writable by user processes.
+ * Take care to verify it is a sane value.
+ */
+ wc = cq->queue;
+ head = wc->head;
+ if (head >= (unsigned) cq->ibcq.cqe) {
+ head = cq->ibcq.cqe;
+ next = 0;
+ } else
+ next = head + 1;
+ if (unlikely(next == wc->tail)) {
+ spin_unlock_irqrestore(&cq->lock, flags);
+ if (cq->ibcq.event_handler) {
+ struct ib_event ev;
+
+ ev.device = cq->ibcq.device;
+ ev.element.cq = &cq->ibcq;
+ ev.event = IB_EVENT_CQ_ERR;
+ cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
+ }
+ return;
+ }
+ if (cq->ip) {
+ wc->uqueue[head].wr_id = entry->wr_id;
+ wc->uqueue[head].status = entry->status;
+ wc->uqueue[head].opcode = entry->opcode;
+ wc->uqueue[head].vendor_err = entry->vendor_err;
+ wc->uqueue[head].byte_len = entry->byte_len;
+ wc->uqueue[head].ex.imm_data =
+ (__u32 __force)entry->ex.imm_data;
+ wc->uqueue[head].qp_num = entry->qp->qp_num;
+ wc->uqueue[head].src_qp = entry->src_qp;
+ wc->uqueue[head].wc_flags = entry->wc_flags;
+ wc->uqueue[head].pkey_index = entry->pkey_index;
+ wc->uqueue[head].slid = entry->slid;
+ wc->uqueue[head].sl = entry->sl;
+ wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
+ wc->uqueue[head].port_num = entry->port_num;
+ /* Make sure entry is written before the head index. */
+ smp_wmb();
+ } else
+ wc->kqueue[head] = *entry;
+ wc->head = next;
+
+ if (cq->notify == IB_CQ_NEXT_COMP ||
+ (cq->notify == IB_CQ_SOLICITED &&
+ (solicited || entry->status != IB_WC_SUCCESS))) {
+ struct kthread_worker *worker;
+ /*
+ * This will cause send_complete() to be called in
+ * another thread.
+ */
+ smp_read_barrier_depends(); /* see hfi1_cq_exit */
+ worker = cq->dd->worker;
+ if (likely(worker)) {
+ cq->notify = IB_CQ_NONE;
+ cq->triggered++;
+ queue_kthread_work(worker, &cq->comptask);
+ }
+ }
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+}
+
+/**
+ * hfi1_poll_cq - poll for work completion entries
+ * @ibcq: the completion queue to poll
+ * @num_entries: the maximum number of entries to return
+ * @entry: pointer to array where work completions are placed
+ *
+ * Returns the number of completion entries polled.
+ *
+ * This may be called from interrupt context. Also called by ib_poll_cq()
+ * in the generic verbs code.
+ */
+int hfi1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
+{
+ struct hfi1_cq *cq = to_icq(ibcq);
+ struct hfi1_cq_wc *wc;
+ unsigned long flags;
+ int npolled;
+ u32 tail;
+
+ /* The kernel can only poll a kernel completion queue */
+ if (cq->ip) {
+ npolled = -EINVAL;
+ goto bail;
+ }
+
+ spin_lock_irqsave(&cq->lock, flags);
+
+ wc = cq->queue;
+ tail = wc->tail;
+ if (tail > (u32) cq->ibcq.cqe)
+ tail = (u32) cq->ibcq.cqe;
+ for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
+ if (tail == wc->head)
+ break;
+ /* The kernel doesn't need a RMB since it has the lock. */
+ *entry = wc->kqueue[tail];
+ if (tail >= cq->ibcq.cqe)
+ tail = 0;
+ else
+ tail++;
+ }
+ wc->tail = tail;
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+bail:
+ return npolled;
+}
+
+static void send_complete(struct kthread_work *work)
+{
+ struct hfi1_cq *cq = container_of(work, struct hfi1_cq, comptask);
+
+ /*
+ * The completion handler will most likely rearm the notification
+ * and poll for all pending entries. If a new completion entry
+ * is added while we are in this routine, queue_work()
+ * won't call us again until we return so we check triggered to
+ * see if we need to call the handler again.
+ */
+ for (;;) {
+ u8 triggered = cq->triggered;
+
+ /*
+ * IPoIB connected mode assumes the callback is from a
+ * soft IRQ. We simulate this by blocking "bottom halves".
+ * See the implementation for ipoib_cm_handle_tx_wc(),
+ * netif_tx_lock_bh() and netif_tx_lock().
+ */
+ local_bh_disable();
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+ local_bh_enable();
+
+ if (cq->triggered == triggered)
+ return;
+ }
+}
+
+/**
+ * hfi1_create_cq - create a completion queue
+ * @ibdev: the device this completion queue is attached to
+ * @attr: creation attributes
+ * @context: unused by the driver
+ * @udata: user data for libibverbs.so
+ *
+ * Returns a pointer to the completion queue or negative errno values
+ * for failure.
+ *
+ * Called by ib_create_cq() in the generic verbs code.
+ */
+struct ib_cq *hfi1_create_cq(
+ struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct hfi1_ibdev *dev = to_idev(ibdev);
+ struct hfi1_cq *cq;
+ struct hfi1_cq_wc *wc;
+ struct ib_cq *ret;
+ u32 sz;
+ unsigned int entries = attr->cqe;
+
+ if (attr->flags)
+ return ERR_PTR(-EINVAL);
+
+ if (entries < 1 || entries > hfi1_max_cqes)
+ return ERR_PTR(-EINVAL);
+
+ /* Allocate the completion queue structure. */
+ cq = kmalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Allocate the completion queue entries and head/tail pointers.
+ * This is allocated separately so that it can be resized and
+ * also mapped into user space.
+ * We need to use vmalloc() in order to support mmap and large
+ * numbers of entries.
+ */
+ sz = sizeof(*wc);
+ if (udata && udata->outlen >= sizeof(__u64))
+ sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
+ else
+ sz += sizeof(struct ib_wc) * (entries + 1);
+ wc = vmalloc_user(sz);
+ if (!wc) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_cq;
+ }
+
+ /*
+ * Return the address of the WC as the offset to mmap.
+ * See hfi1_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ int err;
+
+ cq->ip = hfi1_create_mmap_info(dev, sz, context, wc);
+ if (!cq->ip) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_wc;
+ }
+
+ err = ib_copy_to_udata(udata, &cq->ip->offset,
+ sizeof(cq->ip->offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_ip;
+ }
+ } else
+ cq->ip = NULL;
+
+ spin_lock(&dev->n_cqs_lock);
+ if (dev->n_cqs_allocated == hfi1_max_cqs) {
+ spin_unlock(&dev->n_cqs_lock);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_ip;
+ }
+
+ dev->n_cqs_allocated++;
+ spin_unlock(&dev->n_cqs_lock);
+
+ if (cq->ip) {
+ spin_lock_irq(&dev->pending_lock);
+ list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+
+ /*
+ * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
+ * The number of entries should be >= the number requested or return
+ * an error.
+ */
+ cq->dd = dd_from_dev(dev);
+ cq->ibcq.cqe = entries;
+ cq->notify = IB_CQ_NONE;
+ cq->triggered = 0;
+ spin_lock_init(&cq->lock);
+ init_kthread_work(&cq->comptask, send_complete);
+ wc->head = 0;
+ wc->tail = 0;
+ cq->queue = wc;
+
+ ret = &cq->ibcq;
+
+ goto done;
+
+bail_ip:
+ kfree(cq->ip);
+bail_wc:
+ vfree(wc);
+bail_cq:
+ kfree(cq);
+done:
+ return ret;
+}
+
+/**
+ * hfi1_destroy_cq - destroy a completion queue
+ * @ibcq: the completion queue to destroy.
+ *
+ * Returns 0 for success.
+ *
+ * Called by ib_destroy_cq() in the generic verbs code.
+ */
+int hfi1_destroy_cq(struct ib_cq *ibcq)
+{
+ struct hfi1_ibdev *dev = to_idev(ibcq->device);
+ struct hfi1_cq *cq = to_icq(ibcq);
+
+ flush_kthread_work(&cq->comptask);
+ spin_lock(&dev->n_cqs_lock);
+ dev->n_cqs_allocated--;
+ spin_unlock(&dev->n_cqs_lock);
+ if (cq->ip)
+ kref_put(&cq->ip->ref, hfi1_release_mmap_info);
+ else
+ vfree(cq->queue);
+ kfree(cq);
+
+ return 0;
+}
+
+/**
+ * hfi1_req_notify_cq - change the notification type for a completion queue
+ * @ibcq: the completion queue
+ * @notify_flags: the type of notification to request
+ *
+ * Returns 0 for success.
+ *
+ * This may be called from interrupt context. Also called by
+ * ib_req_notify_cq() in the generic verbs code.
+ */
+int hfi1_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
+{
+ struct hfi1_cq *cq = to_icq(ibcq);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&cq->lock, flags);
+ /*
+ * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
+ * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
+ */
+ if (cq->notify != IB_CQ_NEXT_COMP)
+ cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
+
+ if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
+ cq->queue->head != cq->queue->tail)
+ ret = 1;
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ return ret;
+}
+
+/**
+ * hfi1_resize_cq - change the size of the CQ
+ * @ibcq: the completion queue
+ *
+ * Returns 0 for success.
+ */
+int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+{
+ struct hfi1_cq *cq = to_icq(ibcq);
+ struct hfi1_cq_wc *old_wc;
+ struct hfi1_cq_wc *wc;
+ u32 head, tail, n;
+ int ret;
+ u32 sz;
+
+ if (cqe < 1 || cqe > hfi1_max_cqes) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /*
+ * Need to use vmalloc() if we want to support large #s of entries.
+ */
+ sz = sizeof(*wc);
+ if (udata && udata->outlen >= sizeof(__u64))
+ sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
+ else
+ sz += sizeof(struct ib_wc) * (cqe + 1);
+ wc = vmalloc_user(sz);
+ if (!wc) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ /* Check that we can write the offset to mmap. */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ __u64 offset = 0;
+
+ ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
+ if (ret)
+ goto bail_free;
+ }
+
+ spin_lock_irq(&cq->lock);
+ /*
+ * Make sure head and tail are sane since they
+ * might be user writable.
+ */
+ old_wc = cq->queue;
+ head = old_wc->head;
+ if (head > (u32) cq->ibcq.cqe)
+ head = (u32) cq->ibcq.cqe;
+ tail = old_wc->tail;
+ if (tail > (u32) cq->ibcq.cqe)
+ tail = (u32) cq->ibcq.cqe;
+ if (head < tail)
+ n = cq->ibcq.cqe + 1 + head - tail;
+ else
+ n = head - tail;
+ if (unlikely((u32)cqe < n)) {
+ ret = -EINVAL;
+ goto bail_unlock;
+ }
+ for (n = 0; tail != head; n++) {
+ if (cq->ip)
+ wc->uqueue[n] = old_wc->uqueue[tail];
+ else
+ wc->kqueue[n] = old_wc->kqueue[tail];
+ if (tail == (u32) cq->ibcq.cqe)
+ tail = 0;
+ else
+ tail++;
+ }
+ cq->ibcq.cqe = cqe;
+ wc->head = n;
+ wc->tail = 0;
+ cq->queue = wc;
+ spin_unlock_irq(&cq->lock);
+
+ vfree(old_wc);
+
+ if (cq->ip) {
+ struct hfi1_ibdev *dev = to_idev(ibcq->device);
+ struct hfi1_mmap_info *ip = cq->ip;
+
+ hfi1_update_mmap_info(dev, ip, sz, wc);
+
+ /*
+ * Return the offset to mmap.
+ * See hfi1_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ ret = ib_copy_to_udata(udata, &ip->offset,
+ sizeof(ip->offset));
+ if (ret)
+ goto bail;
+ }
+
+ spin_lock_irq(&dev->pending_lock);
+ if (list_empty(&ip->pending_mmaps))
+ list_add(&ip->pending_mmaps, &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+
+ ret = 0;
+ goto bail;
+
+bail_unlock:
+ spin_unlock_irq(&cq->lock);
+bail_free:
+ vfree(wc);
+bail:
+ return ret;
+}
+
+int hfi1_cq_init(struct hfi1_devdata *dd)
+{
+ int ret = 0;
+ int cpu;
+ struct task_struct *task;
+
+ if (dd->worker)
+ return 0;
+ dd->worker = kzalloc(sizeof(*dd->worker), GFP_KERNEL);
+ if (!dd->worker)
+ return -ENOMEM;
+ init_kthread_worker(dd->worker);
+ task = kthread_create_on_node(
+ kthread_worker_fn,
+ dd->worker,
+ dd->assigned_node_id,
+ "hfi1_cq%d", dd->unit);
+ if (IS_ERR(task))
+ goto task_fail;
+ cpu = cpumask_first(cpumask_of_node(dd->assigned_node_id));
+ kthread_bind(task, cpu);
+ wake_up_process(task);
+out:
+ return ret;
+task_fail:
+ ret = PTR_ERR(task);
+ kfree(dd->worker);
+ dd->worker = NULL;
+ goto out;
+}
+
+void hfi1_cq_exit(struct hfi1_devdata *dd)
+{
+ struct kthread_worker *worker;
+
+ worker = dd->worker;
+ if (!worker)
+ return;
+ /* blocks future queuing from send_complete() */
+ dd->worker = NULL;
+ smp_wmb(); /* See hfi1_cq_enter */
+ flush_kthread_worker(worker);
+ kthread_stop(worker->task);
+ kfree(worker);
+}
diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/staging/rdma/hfi1/debugfs.c
new file mode 100644
index 000000000000..acd2269e9f14
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/debugfs.c
@@ -0,0 +1,899 @@
+#ifdef CONFIG_DEBUG_FS
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+
+#include "hfi.h"
+#include "debugfs.h"
+#include "device.h"
+#include "qp.h"
+#include "sdma.h"
+
+static struct dentry *hfi1_dbg_root;
+
+#define private2dd(file) (file_inode(file)->i_private)
+#define private2ppd(file) (file_inode(file)->i_private)
+
+#define DEBUGFS_SEQ_FILE_OPS(name) \
+static const struct seq_operations _##name##_seq_ops = { \
+ .start = _##name##_seq_start, \
+ .next = _##name##_seq_next, \
+ .stop = _##name##_seq_stop, \
+ .show = _##name##_seq_show \
+}
+#define DEBUGFS_SEQ_FILE_OPEN(name) \
+static int _##name##_open(struct inode *inode, struct file *s) \
+{ \
+ struct seq_file *seq; \
+ int ret; \
+ ret = seq_open(s, &_##name##_seq_ops); \
+ if (ret) \
+ return ret; \
+ seq = s->private_data; \
+ seq->private = inode->i_private; \
+ return 0; \
+}
+
+#define DEBUGFS_FILE_OPS(name) \
+static const struct file_operations _##name##_file_ops = { \
+ .owner = THIS_MODULE, \
+ .open = _##name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = seq_release \
+}
+
+#define DEBUGFS_FILE_CREATE(name, parent, data, ops, mode) \
+do { \
+ struct dentry *ent; \
+ ent = debugfs_create_file(name, mode, parent, \
+ data, ops); \
+ if (!ent) \
+ pr_warn("create of %s failed\n", name); \
+} while (0)
+
+
+#define DEBUGFS_SEQ_FILE_CREATE(name, parent, data) \
+ DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, S_IRUGO)
+
+static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
+__acquires(RCU)
+{
+ struct hfi1_opcode_stats_perctx *opstats;
+
+ rcu_read_lock();
+ if (*pos >= ARRAY_SIZE(opstats->stats))
+ return NULL;
+ return pos;
+}
+
+static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct hfi1_opcode_stats_perctx *opstats;
+
+ ++*pos;
+ if (*pos >= ARRAY_SIZE(opstats->stats))
+ return NULL;
+ return pos;
+}
+
+
+static void _opcode_stats_seq_stop(struct seq_file *s, void *v)
+__releases(RCU)
+{
+ rcu_read_unlock();
+}
+
+static int _opcode_stats_seq_show(struct seq_file *s, void *v)
+{
+ loff_t *spos = v;
+ loff_t i = *spos, j;
+ u64 n_packets = 0, n_bytes = 0;
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+
+ for (j = 0; j < dd->first_user_ctxt; j++) {
+ if (!dd->rcd[j])
+ continue;
+ n_packets += dd->rcd[j]->opstats->stats[i].n_packets;
+ n_bytes += dd->rcd[j]->opstats->stats[i].n_bytes;
+ }
+ if (!n_packets && !n_bytes)
+ return SEQ_SKIP;
+ seq_printf(s, "%02llx %llu/%llu\n", i,
+ (unsigned long long) n_packets,
+ (unsigned long long) n_bytes);
+
+ return 0;
+}
+
+DEBUGFS_SEQ_FILE_OPS(opcode_stats);
+DEBUGFS_SEQ_FILE_OPEN(opcode_stats)
+DEBUGFS_FILE_OPS(opcode_stats);
+
+static void *_ctx_stats_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+
+ if (!*pos)
+ return SEQ_START_TOKEN;
+ if (*pos >= dd->first_user_ctxt)
+ return NULL;
+ return pos;
+}
+
+static void *_ctx_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+
+ if (v == SEQ_START_TOKEN)
+ return pos;
+
+ ++*pos;
+ if (*pos >= dd->first_user_ctxt)
+ return NULL;
+ return pos;
+}
+
+static void _ctx_stats_seq_stop(struct seq_file *s, void *v)
+{
+ /* nothing allocated */
+}
+
+static int _ctx_stats_seq_show(struct seq_file *s, void *v)
+{
+ loff_t *spos;
+ loff_t i, j;
+ u64 n_packets = 0;
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(s, "Ctx:npkts\n");
+ return 0;
+ }
+
+ spos = v;
+ i = *spos;
+
+ if (!dd->rcd[i])
+ return SEQ_SKIP;
+
+ for (j = 0; j < ARRAY_SIZE(dd->rcd[i]->opstats->stats); j++)
+ n_packets += dd->rcd[i]->opstats->stats[j].n_packets;
+
+ if (!n_packets)
+ return SEQ_SKIP;
+
+ seq_printf(s, " %llu:%llu\n", i, n_packets);
+ return 0;
+}
+
+DEBUGFS_SEQ_FILE_OPS(ctx_stats);
+DEBUGFS_SEQ_FILE_OPEN(ctx_stats)
+DEBUGFS_FILE_OPS(ctx_stats);
+
+static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
+__acquires(RCU)
+{
+ struct qp_iter *iter;
+ loff_t n = *pos;
+
+ rcu_read_lock();
+ iter = qp_iter_init(s->private);
+ if (!iter)
+ return NULL;
+
+ while (n--) {
+ if (qp_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+ }
+
+ return iter;
+}
+
+static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
+ loff_t *pos)
+{
+ struct qp_iter *iter = iter_ptr;
+
+ (*pos)++;
+
+ if (qp_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+
+ return iter;
+}
+
+static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
+__releases(RCU)
+{
+ rcu_read_unlock();
+}
+
+static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr)
+{
+ struct qp_iter *iter = iter_ptr;
+
+ if (!iter)
+ return 0;
+
+ qp_iter_print(s, iter);
+
+ return 0;
+}
+
+DEBUGFS_SEQ_FILE_OPS(qp_stats);
+DEBUGFS_SEQ_FILE_OPEN(qp_stats)
+DEBUGFS_FILE_OPS(qp_stats);
+
+static void *_sdes_seq_start(struct seq_file *s, loff_t *pos)
+__acquires(RCU)
+{
+ struct hfi1_ibdev *ibd;
+ struct hfi1_devdata *dd;
+
+ rcu_read_lock();
+ ibd = (struct hfi1_ibdev *)s->private;
+ dd = dd_from_dev(ibd);
+ if (!dd->per_sdma || *pos >= dd->num_sdma)
+ return NULL;
+ return pos;
+}
+
+static void *_sdes_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+
+ ++*pos;
+ if (!dd->per_sdma || *pos >= dd->num_sdma)
+ return NULL;
+ return pos;
+}
+
+
+static void _sdes_seq_stop(struct seq_file *s, void *v)
+__releases(RCU)
+{
+ rcu_read_unlock();
+}
+
+static int _sdes_seq_show(struct seq_file *s, void *v)
+{
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+ loff_t *spos = v;
+ loff_t i = *spos;
+
+ sdma_seqfile_dump_sde(s, &dd->per_sdma[i]);
+ return 0;
+}
+
+DEBUGFS_SEQ_FILE_OPS(sdes);
+DEBUGFS_SEQ_FILE_OPEN(sdes)
+DEBUGFS_FILE_OPS(sdes);
+
+/* read the per-device counters */
+static ssize_t dev_counters_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u64 *counters;
+ size_t avail;
+ struct hfi1_devdata *dd;
+ ssize_t rval;
+
+ rcu_read_lock();
+ dd = private2dd(file);
+ avail = hfi1_read_cntrs(dd, *ppos, NULL, &counters);
+ rval = simple_read_from_buffer(buf, count, ppos, counters, avail);
+ rcu_read_unlock();
+ return rval;
+}
+
+/* read the per-device counters */
+static ssize_t dev_names_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *names;
+ size_t avail;
+ struct hfi1_devdata *dd;
+ ssize_t rval;
+
+ rcu_read_lock();
+ dd = private2dd(file);
+ avail = hfi1_read_cntrs(dd, *ppos, &names, NULL);
+ rval = simple_read_from_buffer(buf, count, ppos, names, avail);
+ rcu_read_unlock();
+ return rval;
+}
+
+struct counter_info {
+ char *name;
+ const struct file_operations ops;
+};
+
+/*
+ * Could use file_inode(file)->i_ino to figure out which file,
+ * instead of separate routine for each, but for now, this works...
+ */
+
+/* read the per-port names (same for each port) */
+static ssize_t portnames_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *names;
+ size_t avail;
+ struct hfi1_devdata *dd;
+ ssize_t rval;
+
+ rcu_read_lock();
+ dd = private2dd(file);
+ /* port number n/a here since names are constant */
+ avail = hfi1_read_portcntrs(dd, *ppos, 0, &names, NULL);
+ rval = simple_read_from_buffer(buf, count, ppos, names, avail);
+ rcu_read_unlock();
+ return rval;
+}
+
+/* read the per-port counters */
+static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u64 *counters;
+ size_t avail;
+ struct hfi1_devdata *dd;
+ struct hfi1_pportdata *ppd;
+ ssize_t rval;
+
+ rcu_read_lock();
+ ppd = private2ppd(file);
+ dd = ppd->dd;
+ avail = hfi1_read_portcntrs(dd, *ppos, ppd->port - 1, NULL, &counters);
+ rval = simple_read_from_buffer(buf, count, ppos, counters, avail);
+ rcu_read_unlock();
+ return rval;
+}
+
+/*
+ * read the per-port QSFP data for ppd
+ */
+static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hfi1_pportdata *ppd;
+ char *tmp;
+ int ret;
+
+ rcu_read_lock();
+ ppd = private2ppd(file);
+ tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!tmp) {
+ rcu_read_unlock();
+ return -ENOMEM;
+ }
+
+ ret = qsfp_dump(ppd, tmp, PAGE_SIZE);
+ if (ret > 0)
+ ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
+ rcu_read_unlock();
+ kfree(tmp);
+ return ret;
+}
+
+/* Do an i2c write operation on the chain for the given HFI. */
+static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos, u32 target)
+{
+ struct hfi1_pportdata *ppd;
+ char *buff;
+ int ret;
+ int i2c_addr;
+ int offset;
+ int total_written;
+
+ rcu_read_lock();
+ ppd = private2ppd(file);
+
+ buff = kmalloc(count, GFP_KERNEL);
+ if (!buff) {
+ ret = -ENOMEM;
+ goto _return;
+ }
+
+ ret = copy_from_user(buff, buf, count);
+ if (ret > 0) {
+ ret = -EFAULT;
+ goto _free;
+ }
+
+ i2c_addr = (*ppos >> 16) & 0xff;
+ offset = *ppos & 0xffff;
+
+ total_written = i2c_write(ppd, target, i2c_addr, offset, buff, count);
+ if (total_written < 0) {
+ ret = total_written;
+ goto _free;
+ }
+
+ *ppos += total_written;
+
+ ret = total_written;
+
+ _free:
+ kfree(buff);
+ _return:
+ rcu_read_unlock();
+ return ret;
+}
+
+/* Do an i2c write operation on chain for HFI 0. */
+static ssize_t i2c1_debugfs_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return __i2c_debugfs_write(file, buf, count, ppos, 0);
+}
+
+/* Do an i2c write operation on chain for HFI 1. */
+static ssize_t i2c2_debugfs_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return __i2c_debugfs_write(file, buf, count, ppos, 1);
+}
+
+/* Do an i2c read operation on the chain for the given HFI. */
+static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos, u32 target)
+{
+ struct hfi1_pportdata *ppd;
+ char *buff;
+ int ret;
+ int i2c_addr;
+ int offset;
+ int total_read;
+
+ rcu_read_lock();
+ ppd = private2ppd(file);
+
+ buff = kmalloc(count, GFP_KERNEL);
+ if (!buff) {
+ ret = -ENOMEM;
+ goto _return;
+ }
+
+ i2c_addr = (*ppos >> 16) & 0xff;
+ offset = *ppos & 0xffff;
+
+ total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count);
+ if (total_read < 0) {
+ ret = total_read;
+ goto _free;
+ }
+
+ *ppos += total_read;
+
+ ret = copy_to_user(buf, buff, total_read);
+ if (ret > 0) {
+ ret = -EFAULT;
+ goto _free;
+ }
+
+ ret = total_read;
+
+ _free:
+ kfree(buff);
+ _return:
+ rcu_read_unlock();
+ return ret;
+}
+
+/* Do an i2c read operation on chain for HFI 0. */
+static ssize_t i2c1_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return __i2c_debugfs_read(file, buf, count, ppos, 0);
+}
+
+/* Do an i2c read operation on chain for HFI 1. */
+static ssize_t i2c2_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return __i2c_debugfs_read(file, buf, count, ppos, 1);
+}
+
+/* Do a QSFP write operation on the i2c chain for the given HFI. */
+static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos, u32 target)
+{
+ struct hfi1_pportdata *ppd;
+ char *buff;
+ int ret;
+ int total_written;
+
+ rcu_read_lock();
+ if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */
+ ret = -EINVAL;
+ goto _return;
+ }
+
+ ppd = private2ppd(file);
+
+ buff = kmalloc(count, GFP_KERNEL);
+ if (!buff) {
+ ret = -ENOMEM;
+ goto _return;
+ }
+
+ ret = copy_from_user(buff, buf, count);
+ if (ret > 0) {
+ ret = -EFAULT;
+ goto _free;
+ }
+
+ total_written = qsfp_write(ppd, target, *ppos, buff, count);
+ if (total_written < 0) {
+ ret = total_written;
+ goto _free;
+ }
+
+ *ppos += total_written;
+
+ ret = total_written;
+
+ _free:
+ kfree(buff);
+ _return:
+ rcu_read_unlock();
+ return ret;
+}
+
+/* Do a QSFP write operation on i2c chain for HFI 0. */
+static ssize_t qsfp1_debugfs_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return __qsfp_debugfs_write(file, buf, count, ppos, 0);
+}
+
+/* Do a QSFP write operation on i2c chain for HFI 1. */
+static ssize_t qsfp2_debugfs_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return __qsfp_debugfs_write(file, buf, count, ppos, 1);
+}
+
+/* Do a QSFP read operation on the i2c chain for the given HFI. */
+static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos, u32 target)
+{
+ struct hfi1_pportdata *ppd;
+ char *buff;
+ int ret;
+ int total_read;
+
+ rcu_read_lock();
+ if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */
+ ret = -EINVAL;
+ goto _return;
+ }
+
+ ppd = private2ppd(file);
+
+ buff = kmalloc(count, GFP_KERNEL);
+ if (!buff) {
+ ret = -ENOMEM;
+ goto _return;
+ }
+
+ total_read = qsfp_read(ppd, target, *ppos, buff, count);
+ if (total_read < 0) {
+ ret = total_read;
+ goto _free;
+ }
+
+ *ppos += total_read;
+
+ ret = copy_to_user(buf, buff, total_read);
+ if (ret > 0) {
+ ret = -EFAULT;
+ goto _free;
+ }
+
+ ret = total_read;
+
+ _free:
+ kfree(buff);
+ _return:
+ rcu_read_unlock();
+ return ret;
+}
+
+/* Do a QSFP read operation on i2c chain for HFI 0. */
+static ssize_t qsfp1_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return __qsfp_debugfs_read(file, buf, count, ppos, 0);
+}
+
+/* Do a QSFP read operation on i2c chain for HFI 1. */
+static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return __qsfp_debugfs_read(file, buf, count, ppos, 1);
+}
+
+#define DEBUGFS_OPS(nm, readroutine, writeroutine) \
+{ \
+ .name = nm, \
+ .ops = { \
+ .read = readroutine, \
+ .write = writeroutine, \
+ .llseek = generic_file_llseek, \
+ }, \
+}
+
+static const struct counter_info cntr_ops[] = {
+ DEBUGFS_OPS("counter_names", dev_names_read, NULL),
+ DEBUGFS_OPS("counters", dev_counters_read, NULL),
+ DEBUGFS_OPS("portcounter_names", portnames_read, NULL),
+};
+
+static const struct counter_info port_cntr_ops[] = {
+ DEBUGFS_OPS("port%dcounters", portcntrs_debugfs_read, NULL),
+ DEBUGFS_OPS("i2c1", i2c1_debugfs_read, i2c1_debugfs_write),
+ DEBUGFS_OPS("i2c2", i2c2_debugfs_read, i2c2_debugfs_write),
+ DEBUGFS_OPS("qsfp_dump%d", qsfp_debugfs_dump, NULL),
+ DEBUGFS_OPS("qsfp1", qsfp1_debugfs_read, qsfp1_debugfs_write),
+ DEBUGFS_OPS("qsfp2", qsfp2_debugfs_read, qsfp2_debugfs_write),
+};
+
+void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
+{
+ char name[sizeof("port0counters") + 1];
+ char link[10];
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+ struct hfi1_pportdata *ppd;
+ int unit = dd->unit;
+ int i, j;
+
+ if (!hfi1_dbg_root)
+ return;
+ snprintf(name, sizeof(name), "%s_%d", class_name(), unit);
+ snprintf(link, sizeof(link), "%d", unit);
+ ibd->hfi1_ibdev_dbg = debugfs_create_dir(name, hfi1_dbg_root);
+ if (!ibd->hfi1_ibdev_dbg) {
+ pr_warn("create of %s failed\n", name);
+ return;
+ }
+ ibd->hfi1_ibdev_link =
+ debugfs_create_symlink(link, hfi1_dbg_root, name);
+ if (!ibd->hfi1_ibdev_link) {
+ pr_warn("create of %s symlink failed\n", name);
+ return;
+ }
+ DEBUGFS_SEQ_FILE_CREATE(opcode_stats, ibd->hfi1_ibdev_dbg, ibd);
+ DEBUGFS_SEQ_FILE_CREATE(ctx_stats, ibd->hfi1_ibdev_dbg, ibd);
+ DEBUGFS_SEQ_FILE_CREATE(qp_stats, ibd->hfi1_ibdev_dbg, ibd);
+ DEBUGFS_SEQ_FILE_CREATE(sdes, ibd->hfi1_ibdev_dbg, ibd);
+ /* dev counter files */
+ for (i = 0; i < ARRAY_SIZE(cntr_ops); i++)
+ DEBUGFS_FILE_CREATE(cntr_ops[i].name,
+ ibd->hfi1_ibdev_dbg,
+ dd,
+ &cntr_ops[i].ops, S_IRUGO);
+ /* per port files */
+ for (ppd = dd->pport, j = 0; j < dd->num_pports; j++, ppd++)
+ for (i = 0; i < ARRAY_SIZE(port_cntr_ops); i++) {
+ snprintf(name,
+ sizeof(name),
+ port_cntr_ops[i].name,
+ j + 1);
+ DEBUGFS_FILE_CREATE(name,
+ ibd->hfi1_ibdev_dbg,
+ ppd,
+ &port_cntr_ops[i].ops,
+ port_cntr_ops[i].ops.write == NULL ?
+ S_IRUGO : S_IRUGO|S_IWUSR);
+ }
+}
+
+void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd)
+{
+ if (!hfi1_dbg_root)
+ goto out;
+ debugfs_remove(ibd->hfi1_ibdev_link);
+ debugfs_remove_recursive(ibd->hfi1_ibdev_dbg);
+out:
+ ibd->hfi1_ibdev_dbg = NULL;
+ synchronize_rcu();
+}
+
+/*
+ * driver stats field names, one line per stat, single string. Used by
+ * programs like hfistats to print the stats in a way which works for
+ * different versions of drivers, without changing program source.
+ * if hfi1_ib_stats changes, this needs to change. Names need to be
+ * 12 chars or less (w/o newline), for proper display by hfistats utility.
+ */
+static const char * const hfi1_statnames[] = {
+ /* must be element 0*/
+ "KernIntr",
+ "ErrorIntr",
+ "Tx_Errs",
+ "Rcv_Errs",
+ "H/W_Errs",
+ "NoPIOBufs",
+ "CtxtsOpen",
+ "RcvLen_Errs",
+ "EgrBufFull",
+ "EgrHdrFull"
+};
+
+static void *_driver_stats_names_seq_start(struct seq_file *s, loff_t *pos)
+__acquires(RCU)
+{
+ rcu_read_lock();
+ if (*pos >= ARRAY_SIZE(hfi1_statnames))
+ return NULL;
+ return pos;
+}
+
+static void *_driver_stats_names_seq_next(
+ struct seq_file *s,
+ void *v,
+ loff_t *pos)
+{
+ ++*pos;
+ if (*pos >= ARRAY_SIZE(hfi1_statnames))
+ return NULL;
+ return pos;
+}
+
+static void _driver_stats_names_seq_stop(struct seq_file *s, void *v)
+__releases(RCU)
+{
+ rcu_read_unlock();
+}
+
+static int _driver_stats_names_seq_show(struct seq_file *s, void *v)
+{
+ loff_t *spos = v;
+
+ seq_printf(s, "%s\n", hfi1_statnames[*spos]);
+ return 0;
+}
+
+DEBUGFS_SEQ_FILE_OPS(driver_stats_names);
+DEBUGFS_SEQ_FILE_OPEN(driver_stats_names)
+DEBUGFS_FILE_OPS(driver_stats_names);
+
+static void *_driver_stats_seq_start(struct seq_file *s, loff_t *pos)
+__acquires(RCU)
+{
+ rcu_read_lock();
+ if (*pos >= ARRAY_SIZE(hfi1_statnames))
+ return NULL;
+ return pos;
+}
+
+static void *_driver_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ ++*pos;
+ if (*pos >= ARRAY_SIZE(hfi1_statnames))
+ return NULL;
+ return pos;
+}
+
+static void _driver_stats_seq_stop(struct seq_file *s, void *v)
+__releases(RCU)
+{
+ rcu_read_unlock();
+}
+
+static u64 hfi1_sps_ints(void)
+{
+ unsigned long flags;
+ struct hfi1_devdata *dd;
+ u64 sps_ints = 0;
+
+ spin_lock_irqsave(&hfi1_devs_lock, flags);
+ list_for_each_entry(dd, &hfi1_dev_list, list) {
+ sps_ints += get_all_cpu_total(dd->int_counter);
+ }
+ spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+ return sps_ints;
+}
+
+static int _driver_stats_seq_show(struct seq_file *s, void *v)
+{
+ loff_t *spos = v;
+ char *buffer;
+ u64 *stats = (u64 *)&hfi1_stats;
+ size_t sz = seq_get_buf(s, &buffer);
+
+ if (sz < sizeof(u64))
+ return SEQ_SKIP;
+ /* special case for interrupts */
+ if (*spos == 0)
+ *(u64 *)buffer = hfi1_sps_ints();
+ else
+ *(u64 *)buffer = stats[*spos];
+ seq_commit(s, sizeof(u64));
+ return 0;
+}
+
+DEBUGFS_SEQ_FILE_OPS(driver_stats);
+DEBUGFS_SEQ_FILE_OPEN(driver_stats)
+DEBUGFS_FILE_OPS(driver_stats);
+
+void hfi1_dbg_init(void)
+{
+ hfi1_dbg_root = debugfs_create_dir(DRIVER_NAME, NULL);
+ if (!hfi1_dbg_root)
+ pr_warn("init of debugfs failed\n");
+ DEBUGFS_SEQ_FILE_CREATE(driver_stats_names, hfi1_dbg_root, NULL);
+ DEBUGFS_SEQ_FILE_CREATE(driver_stats, hfi1_dbg_root, NULL);
+}
+
+void hfi1_dbg_exit(void)
+{
+ debugfs_remove_recursive(hfi1_dbg_root);
+ hfi1_dbg_root = NULL;
+}
+
+#endif
diff --git a/drivers/staging/rdma/hfi1/debugfs.h b/drivers/staging/rdma/hfi1/debugfs.h
new file mode 100644
index 000000000000..92d6fe146714
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/debugfs.h
@@ -0,0 +1,78 @@
+#ifndef _HFI1_DEBUGFS_H
+#define _HFI1_DEBUGFS_H
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+struct hfi1_ibdev;
+#ifdef CONFIG_DEBUG_FS
+void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd);
+void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd);
+void hfi1_dbg_init(void);
+void hfi1_dbg_exit(void);
+#else
+static inline void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
+{
+}
+
+void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd)
+{
+}
+
+void hfi1_dbg_init(void)
+{
+}
+
+void hfi1_dbg_exit(void)
+{
+}
+
+#endif
+
+#endif /* _HFI1_DEBUGFS_H */
diff --git a/drivers/staging/rdma/hfi1/device.c b/drivers/staging/rdma/hfi1/device.c
new file mode 100644
index 000000000000..07c87a87775f
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/device.c
@@ -0,0 +1,142 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+
+#include "hfi.h"
+#include "device.h"
+
+static struct class *class;
+static dev_t hfi1_dev;
+
+int hfi1_cdev_init(int minor, const char *name,
+ const struct file_operations *fops,
+ struct cdev *cdev, struct device **devp)
+{
+ const dev_t dev = MKDEV(MAJOR(hfi1_dev), minor);
+ struct device *device = NULL;
+ int ret;
+
+ cdev_init(cdev, fops);
+ cdev->owner = THIS_MODULE;
+ kobject_set_name(&cdev->kobj, name);
+
+ ret = cdev_add(cdev, dev, 1);
+ if (ret < 0) {
+ pr_err("Could not add cdev for minor %d, %s (err %d)\n",
+ minor, name, -ret);
+ goto done;
+ }
+
+ device = device_create(class, NULL, dev, NULL, "%s", name);
+ if (!IS_ERR(device))
+ goto done;
+ ret = PTR_ERR(device);
+ device = NULL;
+ pr_err("Could not create device for minor %d, %s (err %d)\n",
+ minor, name, -ret);
+ cdev_del(cdev);
+done:
+ *devp = device;
+ return ret;
+}
+
+void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp)
+{
+ struct device *device = *devp;
+
+ if (device) {
+ device_unregister(device);
+ *devp = NULL;
+
+ cdev_del(cdev);
+ }
+}
+
+static const char *hfi1_class_name = "hfi1";
+
+const char *class_name(void)
+{
+ return hfi1_class_name;
+}
+
+int __init dev_init(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&hfi1_dev, 0, HFI1_NMINORS, DRIVER_NAME);
+ if (ret < 0) {
+ pr_err("Could not allocate chrdev region (err %d)\n", -ret);
+ goto done;
+ }
+
+ class = class_create(THIS_MODULE, class_name());
+ if (IS_ERR(class)) {
+ ret = PTR_ERR(class);
+ pr_err("Could not create device class (err %d)\n", -ret);
+ unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
+ }
+
+done:
+ return ret;
+}
+
+void dev_cleanup(void)
+{
+ if (class) {
+ class_destroy(class);
+ class = NULL;
+ }
+
+ unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
+}
diff --git a/drivers/staging/rdma/hfi1/device.h b/drivers/staging/rdma/hfi1/device.h
new file mode 100644
index 000000000000..98caecd3d807
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/device.h
@@ -0,0 +1,61 @@
+#ifndef _HFI1_DEVICE_H
+#define _HFI1_DEVICE_H
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+int hfi1_cdev_init(int minor, const char *name,
+ const struct file_operations *fops,
+ struct cdev *cdev, struct device **devp);
+void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp);
+const char *class_name(void);
+int __init dev_init(void);
+void dev_cleanup(void);
+
+#endif /* _HFI1_DEVICE_H */
diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c
new file mode 100644
index 000000000000..6777d6b659cf
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/diag.c
@@ -0,0 +1,1873 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file contains support for diagnostic functions. It is accessed by
+ * opening the hfi1_diag device, normally minor number 129. Diagnostic use
+ * of the chip may render the chip or board unusable until the driver
+ * is unloaded, or in some cases, until the system is rebooted.
+ *
+ * Accesses to the chip through this interface are not similar to going
+ * through the /sys/bus/pci resource mmap interface.
+ */
+
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/vmalloc.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <rdma/ib_smi.h>
+#include "hfi.h"
+#include "device.h"
+#include "common.h"
+#include "trace.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+#define snoop_dbg(fmt, ...) \
+ hfi1_cdbg(SNOOP, fmt, ##__VA_ARGS__)
+
+/* Snoop option mask */
+#define SNOOP_DROP_SEND (1 << 0)
+#define SNOOP_USE_METADATA (1 << 1)
+
+static u8 snoop_flags;
+
+/*
+ * Extract packet length from LRH header.
+ * Why & 0x7FF? Because len is only 11 bits in case it wasn't 0'd we throw the
+ * bogus bits away. This is in Dwords so multiply by 4 to get size in bytes
+ */
+#define HFI1_GET_PKT_LEN(x) (((be16_to_cpu((x)->lrh[2]) & 0x7FF)) << 2)
+
+enum hfi1_filter_status {
+ HFI1_FILTER_HIT,
+ HFI1_FILTER_ERR,
+ HFI1_FILTER_MISS
+};
+
+/* snoop processing functions */
+rhf_rcv_function_ptr snoop_rhf_rcv_functions[8] = {
+ [RHF_RCV_TYPE_EXPECTED] = snoop_recv_handler,
+ [RHF_RCV_TYPE_EAGER] = snoop_recv_handler,
+ [RHF_RCV_TYPE_IB] = snoop_recv_handler,
+ [RHF_RCV_TYPE_ERROR] = snoop_recv_handler,
+ [RHF_RCV_TYPE_BYPASS] = snoop_recv_handler,
+ [RHF_RCV_TYPE_INVALID5] = process_receive_invalid,
+ [RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
+ [RHF_RCV_TYPE_INVALID7] = process_receive_invalid
+};
+
+/* Snoop packet structure */
+struct snoop_packet {
+ struct list_head list;
+ u32 total_len;
+ u8 data[];
+};
+
+/* Do not make these an enum or it will blow up the capture_md */
+#define PKT_DIR_EGRESS 0x0
+#define PKT_DIR_INGRESS 0x1
+
+/* Packet capture metadata returned to the user with the packet. */
+struct capture_md {
+ u8 port;
+ u8 dir;
+ u8 reserved[6];
+ union {
+ u64 pbc;
+ u64 rhf;
+ } u;
+};
+
+static atomic_t diagpkt_count = ATOMIC_INIT(0);
+static struct cdev diagpkt_cdev;
+static struct device *diagpkt_device;
+
+static ssize_t diagpkt_write(struct file *fp, const char __user *data,
+ size_t count, loff_t *off);
+
+static const struct file_operations diagpkt_file_ops = {
+ .owner = THIS_MODULE,
+ .write = diagpkt_write,
+ .llseek = noop_llseek,
+};
+
+/*
+ * This is used for communication with user space for snoop extended IOCTLs
+ */
+struct hfi1_link_info {
+ __be64 node_guid;
+ u8 port_mode;
+ u8 port_state;
+ u16 link_speed_active;
+ u16 link_width_active;
+ u16 vl15_init;
+ u8 port_number;
+ /*
+ * Add padding to make this a full IB SMP payload. Note: changing the
+ * size of this structure will make the IOCTLs created with _IOWR
+ * change.
+ * Be sure to run tests on all IOCTLs when making changes to this
+ * structure.
+ */
+ u8 res[47];
+};
+
+/*
+ * This starts our ioctl sequence numbers *way* off from the ones
+ * defined in ib_core.
+ */
+#define SNOOP_CAPTURE_VERSION 0x1
+
+#define IB_IOCTL_MAGIC 0x1b /* See Documentation/ioctl-number.txt */
+#define HFI1_SNOOP_IOC_MAGIC IB_IOCTL_MAGIC
+#define HFI1_SNOOP_IOC_BASE_SEQ 0x80
+
+#define HFI1_SNOOP_IOCGETLINKSTATE \
+ _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ)
+#define HFI1_SNOOP_IOCSETLINKSTATE \
+ _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ+1)
+#define HFI1_SNOOP_IOCCLEARQUEUE \
+ _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ+2)
+#define HFI1_SNOOP_IOCCLEARFILTER \
+ _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ+3)
+#define HFI1_SNOOP_IOCSETFILTER \
+ _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ+4)
+#define HFI1_SNOOP_IOCGETVERSION \
+ _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ+5)
+#define HFI1_SNOOP_IOCSET_OPTS \
+ _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ+6)
+
+/*
+ * These offsets +6/+7 could change, but these are already known and used
+ * IOCTL numbers so don't change them without a good reason.
+ */
+#define HFI1_SNOOP_IOCGETLINKSTATE_EXTRA \
+ _IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ+6, \
+ struct hfi1_link_info)
+#define HFI1_SNOOP_IOCSETLINKSTATE_EXTRA \
+ _IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ+7, \
+ struct hfi1_link_info)
+
+static int hfi1_snoop_open(struct inode *in, struct file *fp);
+static ssize_t hfi1_snoop_read(struct file *fp, char __user *data,
+ size_t pkt_len, loff_t *off);
+static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data,
+ size_t count, loff_t *off);
+static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
+static unsigned int hfi1_snoop_poll(struct file *fp,
+ struct poll_table_struct *wait);
+static int hfi1_snoop_release(struct inode *in, struct file *fp);
+
+struct hfi1_packet_filter_command {
+ int opcode;
+ int length;
+ void *value_ptr;
+};
+
+/* Can't re-use PKT_DIR_*GRESS here because 0 means no packets for this */
+#define HFI1_SNOOP_INGRESS 0x1
+#define HFI1_SNOOP_EGRESS 0x2
+
+enum hfi1_packet_filter_opcodes {
+ FILTER_BY_LID,
+ FILTER_BY_DLID,
+ FILTER_BY_MAD_MGMT_CLASS,
+ FILTER_BY_QP_NUMBER,
+ FILTER_BY_PKT_TYPE,
+ FILTER_BY_SERVICE_LEVEL,
+ FILTER_BY_PKEY,
+ FILTER_BY_DIRECTION,
+};
+
+static const struct file_operations snoop_file_ops = {
+ .owner = THIS_MODULE,
+ .open = hfi1_snoop_open,
+ .read = hfi1_snoop_read,
+ .unlocked_ioctl = hfi1_ioctl,
+ .poll = hfi1_snoop_poll,
+ .write = hfi1_snoop_write,
+ .release = hfi1_snoop_release
+};
+
+struct hfi1_filter_array {
+ int (*filter)(void *, void *, void *);
+};
+
+static int hfi1_filter_lid(void *ibhdr, void *packet_data, void *value);
+static int hfi1_filter_dlid(void *ibhdr, void *packet_data, void *value);
+static int hfi1_filter_mad_mgmt_class(void *ibhdr, void *packet_data,
+ void *value);
+static int hfi1_filter_qp_number(void *ibhdr, void *packet_data, void *value);
+static int hfi1_filter_ibpacket_type(void *ibhdr, void *packet_data,
+ void *value);
+static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data,
+ void *value);
+static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value);
+static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value);
+
+static struct hfi1_filter_array hfi1_filters[] = {
+ { hfi1_filter_lid },
+ { hfi1_filter_dlid },
+ { hfi1_filter_mad_mgmt_class },
+ { hfi1_filter_qp_number },
+ { hfi1_filter_ibpacket_type },
+ { hfi1_filter_ib_service_level },
+ { hfi1_filter_ib_pkey },
+ { hfi1_filter_direction },
+};
+
+#define HFI1_MAX_FILTERS ARRAY_SIZE(hfi1_filters)
+#define HFI1_DIAG_MINOR_BASE 129
+
+static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name);
+
+int hfi1_diag_add(struct hfi1_devdata *dd)
+{
+ char name[16];
+ int ret = 0;
+
+ snprintf(name, sizeof(name), "%s_diagpkt%d", class_name(),
+ dd->unit);
+ /*
+ * Do this for each device as opposed to the normal diagpkt
+ * interface which is one per host
+ */
+ ret = hfi1_snoop_add(dd, name);
+ if (ret)
+ dd_dev_err(dd, "Unable to init snoop/capture device");
+
+ snprintf(name, sizeof(name), "%s_diagpkt", class_name());
+ if (atomic_inc_return(&diagpkt_count) == 1) {
+ ret = hfi1_cdev_init(HFI1_DIAGPKT_MINOR, name,
+ &diagpkt_file_ops, &diagpkt_cdev,
+ &diagpkt_device);
+ }
+
+ return ret;
+}
+
+/* this must be called w/ dd->snoop_in_lock held */
+static void drain_snoop_list(struct list_head *queue)
+{
+ struct list_head *pos, *q;
+ struct snoop_packet *packet;
+
+ list_for_each_safe(pos, q, queue) {
+ packet = list_entry(pos, struct snoop_packet, list);
+ list_del(pos);
+ kfree(packet);
+ }
+}
+
+static void hfi1_snoop_remove(struct hfi1_devdata *dd)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
+ drain_snoop_list(&dd->hfi1_snoop.queue);
+ hfi1_cdev_cleanup(&dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev);
+ spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
+}
+
+void hfi1_diag_remove(struct hfi1_devdata *dd)
+{
+
+ hfi1_snoop_remove(dd);
+ if (atomic_dec_and_test(&diagpkt_count))
+ hfi1_cdev_cleanup(&diagpkt_cdev, &diagpkt_device);
+ hfi1_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);
+}
+
+
+/*
+ * Allocated structure shared between the credit return mechanism and
+ * diagpkt_send().
+ */
+struct diagpkt_wait {
+ struct completion credits_returned;
+ int code;
+ atomic_t count;
+};
+
+/*
+ * When each side is finished with the structure, they call this.
+ * The last user frees the structure.
+ */
+static void put_diagpkt_wait(struct diagpkt_wait *wait)
+{
+ if (atomic_dec_and_test(&wait->count))
+ kfree(wait);
+}
+
+/*
+ * Callback from the credit return code. Set the complete, which
+ * will let diapkt_send() continue.
+ */
+static void diagpkt_complete(void *arg, int code)
+{
+ struct diagpkt_wait *wait = (struct diagpkt_wait *)arg;
+
+ wait->code = code;
+ complete(&wait->credits_returned);
+ put_diagpkt_wait(wait); /* finished with the structure */
+}
+
+/**
+ * diagpkt_send - send a packet
+ * @dp: diag packet descriptor
+ */
+static ssize_t diagpkt_send(struct diag_pkt *dp)
+{
+ struct hfi1_devdata *dd;
+ struct send_context *sc;
+ struct pio_buf *pbuf;
+ u32 *tmpbuf = NULL;
+ ssize_t ret = 0;
+ u32 pkt_len, total_len;
+ pio_release_cb credit_cb = NULL;
+ void *credit_arg = NULL;
+ struct diagpkt_wait *wait = NULL;
+
+ dd = hfi1_lookup(dp->unit);
+ if (!dd || !(dd->flags & HFI1_PRESENT) || !dd->kregbase) {
+ ret = -ENODEV;
+ goto bail;
+ }
+ if (!(dd->flags & HFI1_INITTED)) {
+ /* no hardware, freeze, etc. */
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ if (dp->version != _DIAG_PKT_VERS) {
+ dd_dev_err(dd, "Invalid version %u for diagpkt_write\n",
+ dp->version);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /* send count must be an exact number of dwords */
+ if (dp->len & 3) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /* there is only port 1 */
+ if (dp->port != 1) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /* need a valid context */
+ if (dp->sw_index >= dd->num_send_contexts) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ /* can only use kernel contexts */
+ if (dd->send_contexts[dp->sw_index].type != SC_KERNEL) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ /* must be allocated */
+ sc = dd->send_contexts[dp->sw_index].sc;
+ if (!sc) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ /* must be enabled */
+ if (!(sc->flags & SCF_ENABLED)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /* allocate a buffer and copy the data in */
+ tmpbuf = vmalloc(dp->len);
+ if (!tmpbuf) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ if (copy_from_user(tmpbuf,
+ (const void __user *) (unsigned long) dp->data,
+ dp->len)) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ /*
+ * pkt_len is how much data we have to write, includes header and data.
+ * total_len is length of the packet in Dwords plus the PBC should not
+ * include the CRC.
+ */
+ pkt_len = dp->len >> 2;
+ total_len = pkt_len + 2; /* PBC + packet */
+
+ /* if 0, fill in a default */
+ if (dp->pbc == 0) {
+ struct hfi1_pportdata *ppd = dd->pport;
+
+ hfi1_cdbg(PKT, "Generating PBC");
+ dp->pbc = create_pbc(ppd, 0, 0, 0, total_len);
+ } else {
+ hfi1_cdbg(PKT, "Using passed in PBC");
+ }
+
+ hfi1_cdbg(PKT, "Egress PBC content is 0x%llx", dp->pbc);
+
+ /*
+ * The caller wants to wait until the packet is sent and to
+ * check for errors. The best we can do is wait until
+ * the buffer credits are returned and check if any packet
+ * error has occurred. If there are any late errors, this
+ * could miss it. If there are other senders who generate
+ * an error, this may find it. However, in general, it
+ * should catch most.
+ */
+ if (dp->flags & F_DIAGPKT_WAIT) {
+ /* always force a credit return */
+ dp->pbc |= PBC_CREDIT_RETURN;
+ /* turn on credit return interrupts */
+ sc_add_credit_return_intr(sc);
+ wait = kmalloc(sizeof(*wait), GFP_KERNEL);
+ if (!wait) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+ init_completion(&wait->credits_returned);
+ atomic_set(&wait->count, 2);
+ wait->code = PRC_OK;
+
+ credit_cb = diagpkt_complete;
+ credit_arg = wait;
+ }
+
+ pbuf = sc_buffer_alloc(sc, total_len, credit_cb, credit_arg);
+ if (!pbuf) {
+ /*
+ * No send buffer means no credit callback. Undo
+ * the wait set-up that was done above. We free wait
+ * because the callback will never be called.
+ */
+ if (dp->flags & F_DIAGPKT_WAIT) {
+ sc_del_credit_return_intr(sc);
+ kfree(wait);
+ wait = NULL;
+ }
+ ret = -ENOSPC;
+ goto bail;
+ }
+
+ pio_copy(dd, pbuf, dp->pbc, tmpbuf, pkt_len);
+ /* no flush needed as the HW knows the packet size */
+
+ ret = sizeof(*dp);
+
+ if (dp->flags & F_DIAGPKT_WAIT) {
+ /* wait for credit return */
+ ret = wait_for_completion_interruptible(
+ &wait->credits_returned);
+ /*
+ * If the wait returns an error, the wait was interrupted,
+ * e.g. with a ^C in the user program. The callback is
+ * still pending. This is OK as the wait structure is
+ * kmalloc'ed and the structure will free itself when
+ * all users are done with it.
+ *
+ * A context disable occurs on a send context restart, so
+ * include that in the list of errors below to check for.
+ * NOTE: PRC_FILL_ERR is at best informational and cannot
+ * be depended on.
+ */
+ if (!ret && (((wait->code & PRC_STATUS_ERR)
+ || (wait->code & PRC_FILL_ERR)
+ || (wait->code & PRC_SC_DISABLE))))
+ ret = -EIO;
+
+ put_diagpkt_wait(wait); /* finished with the structure */
+ sc_del_credit_return_intr(sc);
+ }
+
+bail:
+ vfree(tmpbuf);
+ return ret;
+}
+
+static ssize_t diagpkt_write(struct file *fp, const char __user *data,
+ size_t count, loff_t *off)
+{
+ struct hfi1_devdata *dd;
+ struct send_context *sc;
+ u8 vl;
+
+ struct diag_pkt dp;
+
+ if (count != sizeof(dp))
+ return -EINVAL;
+
+ if (copy_from_user(&dp, data, sizeof(dp)))
+ return -EFAULT;
+
+ /*
+ * The Send Context is derived from the PbcVL value
+ * if PBC is populated
+ */
+ if (dp.pbc) {
+ dd = hfi1_lookup(dp.unit);
+ if (dd == NULL)
+ return -ENODEV;
+ vl = (dp.pbc >> PBC_VL_SHIFT) & PBC_VL_MASK;
+ sc = dd->vld[vl].sc;
+ if (sc) {
+ dp.sw_index = sc->sw_index;
+ hfi1_cdbg(
+ PKT,
+ "Packet sent over VL %d via Send Context %u(%u)",
+ vl, sc->sw_index, sc->hw_context);
+ }
+ }
+
+ return diagpkt_send(&dp);
+}
+
+static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name)
+{
+ int ret = 0;
+
+ dd->hfi1_snoop.mode_flag = 0;
+ spin_lock_init(&dd->hfi1_snoop.snoop_lock);
+ INIT_LIST_HEAD(&dd->hfi1_snoop.queue);
+ init_waitqueue_head(&dd->hfi1_snoop.waitq);
+
+ ret = hfi1_cdev_init(HFI1_SNOOP_CAPTURE_BASE + dd->unit, name,
+ &snoop_file_ops,
+ &dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev);
+
+ if (ret) {
+ dd_dev_err(dd, "Couldn't create %s device: %d", name, ret);
+ hfi1_cdev_cleanup(&dd->hfi1_snoop.cdev,
+ &dd->hfi1_snoop.class_dev);
+ }
+
+ return ret;
+}
+
+static struct hfi1_devdata *hfi1_dd_from_sc_inode(struct inode *in)
+{
+ int unit = iminor(in) - HFI1_SNOOP_CAPTURE_BASE;
+ struct hfi1_devdata *dd = NULL;
+
+ dd = hfi1_lookup(unit);
+ return dd;
+
+}
+
+/* clear or restore send context integrity checks */
+static void adjust_integrity_checks(struct hfi1_devdata *dd)
+{
+ struct send_context *sc;
+ unsigned long sc_flags;
+ int i;
+
+ spin_lock_irqsave(&dd->sc_lock, sc_flags);
+ for (i = 0; i < dd->num_send_contexts; i++) {
+ int enable;
+
+ sc = dd->send_contexts[i].sc;
+
+ if (!sc)
+ continue; /* not allocated */
+
+ enable = likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) &&
+ dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE;
+
+ set_pio_integrity(sc);
+
+ if (enable) /* take HFI_CAP_* flags into account */
+ hfi1_init_ctxt(sc);
+ }
+ spin_unlock_irqrestore(&dd->sc_lock, sc_flags);
+}
+
+static int hfi1_snoop_open(struct inode *in, struct file *fp)
+{
+ int ret;
+ int mode_flag = 0;
+ unsigned long flags = 0;
+ struct hfi1_devdata *dd;
+ struct list_head *queue;
+
+ mutex_lock(&hfi1_mutex);
+
+ dd = hfi1_dd_from_sc_inode(in);
+ if (dd == NULL) {
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ /*
+ * File mode determines snoop or capture. Some existing user
+ * applications expect the capture device to be able to be opened RDWR
+ * because they expect a dedicated capture device. For this reason we
+ * support a module param to force capture mode even if the file open
+ * mode matches snoop.
+ */
+ if ((fp->f_flags & O_ACCMODE) == O_RDONLY) {
+ snoop_dbg("Capture Enabled");
+ mode_flag = HFI1_PORT_CAPTURE_MODE;
+ } else if ((fp->f_flags & O_ACCMODE) == O_RDWR) {
+ snoop_dbg("Snoop Enabled");
+ mode_flag = HFI1_PORT_SNOOP_MODE;
+ } else {
+ snoop_dbg("Invalid");
+ ret = -EINVAL;
+ goto bail;
+ }
+ queue = &dd->hfi1_snoop.queue;
+
+ /*
+ * We are not supporting snoop and capture at the same time.
+ */
+ spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
+ if (dd->hfi1_snoop.mode_flag) {
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
+ goto bail;
+ }
+
+ dd->hfi1_snoop.mode_flag = mode_flag;
+ drain_snoop_list(queue);
+
+ dd->hfi1_snoop.filter_callback = NULL;
+ dd->hfi1_snoop.filter_value = NULL;
+
+ /*
+ * Send side packet integrity checks are not helpful when snooping so
+ * disable and re-enable when we stop snooping.
+ */
+ if (mode_flag == HFI1_PORT_SNOOP_MODE) {
+ /* clear after snoop mode is on */
+ adjust_integrity_checks(dd); /* clear */
+
+ /*
+ * We also do not want to be doing the DLID LMC check for
+ * ingressed packets.
+ */
+ dd->hfi1_snoop.dcc_cfg = read_csr(dd, DCC_CFG_PORT_CONFIG1);
+ write_csr(dd, DCC_CFG_PORT_CONFIG1,
+ (dd->hfi1_snoop.dcc_cfg >> 32) << 32);
+ }
+
+ /*
+ * As soon as we set these function pointers the recv and send handlers
+ * are active. This is a race condition so we must make sure to drain
+ * the queue and init filter values above. Technically we should add
+ * locking here but all that will happen is on recv a packet will get
+ * allocated and get stuck on the snoop_lock before getting added to the
+ * queue. Same goes for send.
+ */
+ dd->rhf_rcv_function_map = snoop_rhf_rcv_functions;
+ dd->process_pio_send = snoop_send_pio_handler;
+ dd->process_dma_send = snoop_send_pio_handler;
+ dd->pio_inline_send = snoop_inline_pio_send;
+
+ spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
+ ret = 0;
+
+bail:
+ mutex_unlock(&hfi1_mutex);
+
+ return ret;
+}
+
+static int hfi1_snoop_release(struct inode *in, struct file *fp)
+{
+ unsigned long flags = 0;
+ struct hfi1_devdata *dd;
+ int mode_flag;
+
+ dd = hfi1_dd_from_sc_inode(in);
+ if (dd == NULL)
+ return -ENODEV;
+
+ spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
+
+ /* clear the snoop mode before re-adjusting send context CSRs */
+ mode_flag = dd->hfi1_snoop.mode_flag;
+ dd->hfi1_snoop.mode_flag = 0;
+
+ /*
+ * Drain the queue and clear the filters we are done with it. Don't
+ * forget to restore the packet integrity checks
+ */
+ drain_snoop_list(&dd->hfi1_snoop.queue);
+ if (mode_flag == HFI1_PORT_SNOOP_MODE) {
+ /* restore after snoop mode is clear */
+ adjust_integrity_checks(dd); /* restore */
+
+ /*
+ * Also should probably reset the DCC_CONFIG1 register for DLID
+ * checking on incoming packets again. Use the value saved when
+ * opening the snoop device.
+ */
+ write_csr(dd, DCC_CFG_PORT_CONFIG1, dd->hfi1_snoop.dcc_cfg);
+ }
+
+ dd->hfi1_snoop.filter_callback = NULL;
+ kfree(dd->hfi1_snoop.filter_value);
+ dd->hfi1_snoop.filter_value = NULL;
+
+ /*
+ * User is done snooping and capturing, return control to the normal
+ * handler. Re-enable SDMA handling.
+ */
+ dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions;
+ dd->process_pio_send = hfi1_verbs_send_pio;
+ dd->process_dma_send = hfi1_verbs_send_dma;
+ dd->pio_inline_send = pio_copy;
+
+ spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
+
+ snoop_dbg("snoop/capture device released");
+
+ return 0;
+}
+
+static unsigned int hfi1_snoop_poll(struct file *fp,
+ struct poll_table_struct *wait)
+{
+ int ret = 0;
+ unsigned long flags = 0;
+
+ struct hfi1_devdata *dd;
+
+ dd = hfi1_dd_from_sc_inode(fp->f_inode);
+ if (dd == NULL)
+ return -ENODEV;
+
+ spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
+
+ poll_wait(fp, &dd->hfi1_snoop.waitq, wait);
+ if (!list_empty(&dd->hfi1_snoop.queue))
+ ret |= POLLIN | POLLRDNORM;
+
+ spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
+ return ret;
+
+}
+
+static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data,
+ size_t count, loff_t *off)
+{
+ struct diag_pkt dpkt;
+ struct hfi1_devdata *dd;
+ size_t ret;
+ u8 byte_two, sl, sc5, sc4, vl, byte_one;
+ struct send_context *sc;
+ u32 len;
+ u64 pbc;
+ struct hfi1_ibport *ibp;
+ struct hfi1_pportdata *ppd;
+
+ dd = hfi1_dd_from_sc_inode(fp->f_inode);
+ if (dd == NULL)
+ return -ENODEV;
+
+ ppd = dd->pport;
+ snoop_dbg("received %lu bytes from user", count);
+
+ memset(&dpkt, 0, sizeof(struct diag_pkt));
+ dpkt.version = _DIAG_PKT_VERS;
+ dpkt.unit = dd->unit;
+ dpkt.port = 1;
+
+ if (likely(!(snoop_flags & SNOOP_USE_METADATA))) {
+ /*
+ * We need to generate the PBC and not let diagpkt_send do it,
+ * to do this we need the VL and the length in dwords.
+ * The VL can be determined by using the SL and looking up the
+ * SC. Then the SC can be converted into VL. The exception to
+ * this is those packets which are from an SMI queue pair.
+ * Since we can't detect anything about the QP here we have to
+ * rely on the SC. If its 0xF then we assume its SMI and
+ * do not look at the SL.
+ */
+ if (copy_from_user(&byte_one, data, 1))
+ return -EINVAL;
+
+ if (copy_from_user(&byte_two, data+1, 1))
+ return -EINVAL;
+
+ sc4 = (byte_one >> 4) & 0xf;
+ if (sc4 == 0xF) {
+ snoop_dbg("Detected VL15 packet ignoring SL in packet");
+ vl = sc4;
+ } else {
+ sl = (byte_two >> 4) & 0xf;
+ ibp = to_iport(&dd->verbs_dev.ibdev, 1);
+ sc5 = ibp->sl_to_sc[sl];
+ vl = sc_to_vlt(dd, sc5);
+ if (vl != sc4) {
+ snoop_dbg("VL %d does not match SC %d of packet",
+ vl, sc4);
+ return -EINVAL;
+ }
+ }
+
+ sc = dd->vld[vl].sc; /* Look up the context based on VL */
+ if (sc) {
+ dpkt.sw_index = sc->sw_index;
+ snoop_dbg("Sending on context %u(%u)", sc->sw_index,
+ sc->hw_context);
+ } else {
+ snoop_dbg("Could not find context for vl %d", vl);
+ return -EINVAL;
+ }
+
+ len = (count >> 2) + 2; /* Add in PBC */
+ pbc = create_pbc(ppd, 0, 0, vl, len);
+ } else {
+ if (copy_from_user(&pbc, data, sizeof(pbc)))
+ return -EINVAL;
+ vl = (pbc >> PBC_VL_SHIFT) & PBC_VL_MASK;
+ sc = dd->vld[vl].sc; /* Look up the context based on VL */
+ if (sc) {
+ dpkt.sw_index = sc->sw_index;
+ } else {
+ snoop_dbg("Could not find context for vl %d", vl);
+ return -EINVAL;
+ }
+ data += sizeof(pbc);
+ count -= sizeof(pbc);
+ }
+ dpkt.len = count;
+ dpkt.data = (unsigned long)data;
+
+ snoop_dbg("PBC: vl=0x%llx Length=0x%llx",
+ (pbc >> 12) & 0xf,
+ (pbc & 0xfff));
+
+ dpkt.pbc = pbc;
+ ret = diagpkt_send(&dpkt);
+ /*
+ * diagpkt_send only returns number of bytes in the diagpkt so patch
+ * that up here before returning.
+ */
+ if (ret == sizeof(dpkt))
+ return count;
+
+ return ret;
+}
+
+static ssize_t hfi1_snoop_read(struct file *fp, char __user *data,
+ size_t pkt_len, loff_t *off)
+{
+ ssize_t ret = 0;
+ unsigned long flags = 0;
+ struct snoop_packet *packet = NULL;
+ struct hfi1_devdata *dd;
+
+ dd = hfi1_dd_from_sc_inode(fp->f_inode);
+ if (dd == NULL)
+ return -ENODEV;
+
+ spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
+
+ while (list_empty(&dd->hfi1_snoop.queue)) {
+ spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
+
+ if (fp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(
+ dd->hfi1_snoop.waitq,
+ !list_empty(&dd->hfi1_snoop.queue)))
+ return -EINTR;
+
+ spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
+ }
+
+ if (!list_empty(&dd->hfi1_snoop.queue)) {
+ packet = list_entry(dd->hfi1_snoop.queue.next,
+ struct snoop_packet, list);
+ list_del(&packet->list);
+ spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
+ if (pkt_len >= packet->total_len) {
+ if (copy_to_user(data, packet->data,
+ packet->total_len))
+ ret = -EFAULT;
+ else
+ ret = packet->total_len;
+ } else
+ ret = -EINVAL;
+
+ kfree(packet);
+ } else
+ spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
+
+ return ret;
+}
+
+static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ struct hfi1_devdata *dd;
+ void *filter_value = NULL;
+ long ret = 0;
+ int value = 0;
+ u8 physState = 0;
+ u8 linkState = 0;
+ u16 devState = 0;
+ unsigned long flags = 0;
+ unsigned long *argp = NULL;
+ struct hfi1_packet_filter_command filter_cmd = {0};
+ int mode_flag = 0;
+ struct hfi1_pportdata *ppd = NULL;
+ unsigned int index;
+ struct hfi1_link_info link_info;
+
+ dd = hfi1_dd_from_sc_inode(fp->f_inode);
+ if (dd == NULL)
+ return -ENODEV;
+
+ spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
+
+ mode_flag = dd->hfi1_snoop.mode_flag;
+
+ if (((_IOC_DIR(cmd) & _IOC_READ)
+ && !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd)))
+ || ((_IOC_DIR(cmd) & _IOC_WRITE)
+ && !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd)))) {
+ ret = -EFAULT;
+ } else if (!capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ } else if ((mode_flag & HFI1_PORT_CAPTURE_MODE) &&
+ (cmd != HFI1_SNOOP_IOCCLEARQUEUE) &&
+ (cmd != HFI1_SNOOP_IOCCLEARFILTER) &&
+ (cmd != HFI1_SNOOP_IOCSETFILTER)) {
+ /* Capture devices are allowed only 3 operations
+ * 1.Clear capture queue
+ * 2.Clear capture filter
+ * 3.Set capture filter
+ * Other are invalid.
+ */
+ ret = -EINVAL;
+ } else {
+ switch (cmd) {
+ case HFI1_SNOOP_IOCSETLINKSTATE:
+ snoop_dbg("HFI1_SNOOP_IOCSETLINKSTATE is not valid");
+ ret = -EINVAL;
+ break;
+
+ case HFI1_SNOOP_IOCSETLINKSTATE_EXTRA:
+ memset(&link_info, 0, sizeof(link_info));
+
+ ret = copy_from_user(&link_info,
+ (struct hfi1_link_info __user *)arg,
+ sizeof(link_info));
+ if (ret)
+ break;
+
+ value = link_info.port_state;
+ index = link_info.port_number;
+ if (index > dd->num_pports - 1) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ppd = &dd->pport[index];
+ if (!ppd) {
+ ret = -EINVAL;
+ break;
+ }
+
+ /* What we want to transition to */
+ physState = (value >> 4) & 0xF;
+ linkState = value & 0xF;
+ snoop_dbg("Setting link state 0x%x", value);
+
+ switch (linkState) {
+ case IB_PORT_NOP:
+ if (physState == 0)
+ break;
+ /* fall through */
+ case IB_PORT_DOWN:
+ switch (physState) {
+ case 0:
+ devState = HLS_DN_DOWNDEF;
+ break;
+ case 2:
+ devState = HLS_DN_POLL;
+ break;
+ case 3:
+ devState = HLS_DN_DISABLE;
+ break;
+ default:
+ ret = -EINVAL;
+ goto done;
+ }
+ ret = set_link_state(ppd, devState);
+ break;
+ case IB_PORT_ARMED:
+ ret = set_link_state(ppd, HLS_UP_ARMED);
+ if (!ret)
+ send_idle_sma(dd, SMA_IDLE_ARM);
+ break;
+ case IB_PORT_ACTIVE:
+ ret = set_link_state(ppd, HLS_UP_ACTIVE);
+ if (!ret)
+ send_idle_sma(dd, SMA_IDLE_ACTIVE);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ break;
+ /* fall through */
+ case HFI1_SNOOP_IOCGETLINKSTATE:
+ case HFI1_SNOOP_IOCGETLINKSTATE_EXTRA:
+ if (cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) {
+ memset(&link_info, 0, sizeof(link_info));
+ ret = copy_from_user(&link_info,
+ (struct hfi1_link_info __user *)arg,
+ sizeof(link_info));
+ index = link_info.port_number;
+ } else {
+ ret = __get_user(index, (int __user *) arg);
+ if (ret != 0)
+ break;
+ }
+
+ if (index > dd->num_pports - 1) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ppd = &dd->pport[index];
+ if (!ppd) {
+ ret = -EINVAL;
+ break;
+ }
+ value = hfi1_ibphys_portstate(ppd);
+ value <<= 4;
+ value |= driver_lstate(ppd);
+
+ snoop_dbg("Link port | Link State: %d", value);
+
+ if ((cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) ||
+ (cmd == HFI1_SNOOP_IOCSETLINKSTATE_EXTRA)) {
+ link_info.port_state = value;
+ link_info.node_guid = cpu_to_be64(ppd->guid);
+ link_info.link_speed_active =
+ ppd->link_speed_active;
+ link_info.link_width_active =
+ ppd->link_width_active;
+ ret = copy_to_user(
+ (struct hfi1_link_info __user *)arg,
+ &link_info, sizeof(link_info));
+ } else {
+ ret = __put_user(value, (int __user *)arg);
+ }
+ break;
+
+ case HFI1_SNOOP_IOCCLEARQUEUE:
+ snoop_dbg("Clearing snoop queue");
+ drain_snoop_list(&dd->hfi1_snoop.queue);
+ break;
+
+ case HFI1_SNOOP_IOCCLEARFILTER:
+ snoop_dbg("Clearing filter");
+ if (dd->hfi1_snoop.filter_callback) {
+ /* Drain packets first */
+ drain_snoop_list(&dd->hfi1_snoop.queue);
+ dd->hfi1_snoop.filter_callback = NULL;
+ }
+ kfree(dd->hfi1_snoop.filter_value);
+ dd->hfi1_snoop.filter_value = NULL;
+ break;
+
+ case HFI1_SNOOP_IOCSETFILTER:
+ snoop_dbg("Setting filter");
+ /* just copy command structure */
+ argp = (unsigned long *)arg;
+ ret = copy_from_user(&filter_cmd, (void __user *)argp,
+ sizeof(filter_cmd));
+ if (ret < 0) {
+ pr_alert("Error copying filter command\n");
+ break;
+ }
+ if (filter_cmd.opcode >= HFI1_MAX_FILTERS) {
+ pr_alert("Invalid opcode in request\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ snoop_dbg("Opcode %d Len %d Ptr %p",
+ filter_cmd.opcode, filter_cmd.length,
+ filter_cmd.value_ptr);
+
+ filter_value = kzalloc(
+ filter_cmd.length * sizeof(u8),
+ GFP_KERNEL);
+ if (!filter_value) {
+ pr_alert("Not enough memory\n");
+ ret = -ENOMEM;
+ break;
+ }
+ /* copy remaining data from userspace */
+ ret = copy_from_user((u8 *)filter_value,
+ (void __user *)filter_cmd.value_ptr,
+ filter_cmd.length);
+ if (ret < 0) {
+ kfree(filter_value);
+ pr_alert("Error copying filter data\n");
+ break;
+ }
+ /* Drain packets first */
+ drain_snoop_list(&dd->hfi1_snoop.queue);
+ dd->hfi1_snoop.filter_callback =
+ hfi1_filters[filter_cmd.opcode].filter;
+ /* just in case we see back to back sets */
+ kfree(dd->hfi1_snoop.filter_value);
+ dd->hfi1_snoop.filter_value = filter_value;
+
+ break;
+ case HFI1_SNOOP_IOCGETVERSION:
+ value = SNOOP_CAPTURE_VERSION;
+ snoop_dbg("Getting version: %d", value);
+ ret = __put_user(value, (int __user *)arg);
+ break;
+ case HFI1_SNOOP_IOCSET_OPTS:
+ snoop_flags = 0;
+ ret = __get_user(value, (int __user *) arg);
+ if (ret != 0)
+ break;
+
+ snoop_dbg("Setting snoop option %d", value);
+ if (value & SNOOP_DROP_SEND)
+ snoop_flags |= SNOOP_DROP_SEND;
+ if (value & SNOOP_USE_METADATA)
+ snoop_flags |= SNOOP_USE_METADATA;
+ break;
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+ }
+done:
+ spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
+ return ret;
+}
+
+static void snoop_list_add_tail(struct snoop_packet *packet,
+ struct hfi1_devdata *dd)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
+ if (likely((dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) ||
+ (dd->hfi1_snoop.mode_flag & HFI1_PORT_CAPTURE_MODE))) {
+ list_add_tail(&packet->list, &dd->hfi1_snoop.queue);
+ snoop_dbg("Added packet to list");
+ }
+
+ /*
+ * Technically we can could have closed the snoop device while waiting
+ * on the above lock and it is gone now. The snoop mode_flag will
+ * prevent us from adding the packet to the queue though.
+ */
+
+ spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
+ wake_up_interruptible(&dd->hfi1_snoop.waitq);
+}
+
+static inline int hfi1_filter_check(void *val, const char *msg)
+{
+ if (!val) {
+ snoop_dbg("Error invalid %s value for filter", msg);
+ return HFI1_FILTER_ERR;
+ }
+ return 0;
+}
+
+static int hfi1_filter_lid(void *ibhdr, void *packet_data, void *value)
+{
+ struct hfi1_ib_header *hdr;
+ int ret;
+
+ ret = hfi1_filter_check(ibhdr, "header");
+ if (ret)
+ return ret;
+ ret = hfi1_filter_check(value, "user");
+ if (ret)
+ return ret;
+ hdr = (struct hfi1_ib_header *)ibhdr;
+
+ if (*((u16 *)value) == be16_to_cpu(hdr->lrh[3])) /* matches slid */
+ return HFI1_FILTER_HIT; /* matched */
+
+ return HFI1_FILTER_MISS; /* Not matched */
+}
+
+static int hfi1_filter_dlid(void *ibhdr, void *packet_data, void *value)
+{
+ struct hfi1_ib_header *hdr;
+ int ret;
+
+ ret = hfi1_filter_check(ibhdr, "header");
+ if (ret)
+ return ret;
+ ret = hfi1_filter_check(value, "user");
+ if (ret)
+ return ret;
+
+ hdr = (struct hfi1_ib_header *)ibhdr;
+
+ if (*((u16 *)value) == be16_to_cpu(hdr->lrh[1]))
+ return HFI1_FILTER_HIT;
+
+ return HFI1_FILTER_MISS;
+}
+
+/* Not valid for outgoing packets, send handler passes null for data*/
+static int hfi1_filter_mad_mgmt_class(void *ibhdr, void *packet_data,
+ void *value)
+{
+ struct hfi1_ib_header *hdr;
+ struct hfi1_other_headers *ohdr = NULL;
+ struct ib_smp *smp = NULL;
+ u32 qpn = 0;
+ int ret;
+
+ ret = hfi1_filter_check(ibhdr, "header");
+ if (ret)
+ return ret;
+ ret = hfi1_filter_check(packet_data, "packet_data");
+ if (ret)
+ return ret;
+ ret = hfi1_filter_check(value, "user");
+ if (ret)
+ return ret;
+
+ hdr = (struct hfi1_ib_header *)ibhdr;
+
+ /* Check for GRH */
+ if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
+ ohdr = &hdr->u.oth; /* LRH + BTH + DETH */
+ else
+ ohdr = &hdr->u.l.oth; /* LRH + GRH + BTH + DETH */
+
+ qpn = be32_to_cpu(ohdr->bth[1]) & 0x00FFFFFF;
+ if (qpn <= 1) {
+ smp = (struct ib_smp *)packet_data;
+ if (*((u8 *)value) == smp->mgmt_class)
+ return HFI1_FILTER_HIT;
+ else
+ return HFI1_FILTER_MISS;
+ }
+ return HFI1_FILTER_ERR;
+}
+
+static int hfi1_filter_qp_number(void *ibhdr, void *packet_data, void *value)
+{
+
+ struct hfi1_ib_header *hdr;
+ struct hfi1_other_headers *ohdr = NULL;
+ int ret;
+
+ ret = hfi1_filter_check(ibhdr, "header");
+ if (ret)
+ return ret;
+ ret = hfi1_filter_check(value, "user");
+ if (ret)
+ return ret;
+
+ hdr = (struct hfi1_ib_header *)ibhdr;
+
+ /* Check for GRH */
+ if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
+ ohdr = &hdr->u.oth; /* LRH + BTH + DETH */
+ else
+ ohdr = &hdr->u.l.oth; /* LRH + GRH + BTH + DETH */
+ if (*((u32 *)value) == (be32_to_cpu(ohdr->bth[1]) & 0x00FFFFFF))
+ return HFI1_FILTER_HIT;
+
+ return HFI1_FILTER_MISS;
+}
+
+static int hfi1_filter_ibpacket_type(void *ibhdr, void *packet_data,
+ void *value)
+{
+ u32 lnh = 0;
+ u8 opcode = 0;
+ struct hfi1_ib_header *hdr;
+ struct hfi1_other_headers *ohdr = NULL;
+ int ret;
+
+ ret = hfi1_filter_check(ibhdr, "header");
+ if (ret)
+ return ret;
+ ret = hfi1_filter_check(value, "user");
+ if (ret)
+ return ret;
+
+ hdr = (struct hfi1_ib_header *)ibhdr;
+
+ lnh = (be16_to_cpu(hdr->lrh[0]) & 3);
+
+ if (lnh == HFI1_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else if (lnh == HFI1_LRH_GRH)
+ ohdr = &hdr->u.l.oth;
+ else
+ return HFI1_FILTER_ERR;
+
+ opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+
+ if (*((u8 *)value) == ((opcode >> 5) & 0x7))
+ return HFI1_FILTER_HIT;
+
+ return HFI1_FILTER_MISS;
+}
+
+static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data,
+ void *value)
+{
+ struct hfi1_ib_header *hdr;
+ int ret;
+
+ ret = hfi1_filter_check(ibhdr, "header");
+ if (ret)
+ return ret;
+ ret = hfi1_filter_check(value, "user");
+ if (ret)
+ return ret;
+
+ hdr = (struct hfi1_ib_header *)ibhdr;
+
+ if ((*((u8 *)value)) == ((be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF))
+ return HFI1_FILTER_HIT;
+
+ return HFI1_FILTER_MISS;
+}
+
+static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value)
+{
+
+ u32 lnh = 0;
+ struct hfi1_ib_header *hdr;
+ struct hfi1_other_headers *ohdr = NULL;
+ int ret;
+
+ ret = hfi1_filter_check(ibhdr, "header");
+ if (ret)
+ return ret;
+ ret = hfi1_filter_check(value, "user");
+ if (ret)
+ return ret;
+
+ hdr = (struct hfi1_ib_header *)ibhdr;
+
+ lnh = (be16_to_cpu(hdr->lrh[0]) & 3);
+ if (lnh == HFI1_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else if (lnh == HFI1_LRH_GRH)
+ ohdr = &hdr->u.l.oth;
+ else
+ return HFI1_FILTER_ERR;
+
+ /* P_key is 16-bit entity, however top most bit indicates
+ * type of membership. 0 for limited and 1 for Full.
+ * Limited members cannot accept information from other
+ * Limited members, but communication is allowed between
+ * every other combination of membership.
+ * Hence we'll omit comparing top-most bit while filtering
+ */
+
+ if ((*(u16 *)value & 0x7FFF) ==
+ ((be32_to_cpu(ohdr->bth[0])) & 0x7FFF))
+ return HFI1_FILTER_HIT;
+
+ return HFI1_FILTER_MISS;
+}
+
+/*
+ * If packet_data is NULL then this is coming from one of the send functions.
+ * Thus we know if its an ingressed or egressed packet.
+ */
+static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value)
+{
+ u8 user_dir = *(u8 *)value;
+ int ret;
+
+ ret = hfi1_filter_check(value, "user");
+ if (ret)
+ return ret;
+
+ if (packet_data) {
+ /* Incoming packet */
+ if (user_dir & HFI1_SNOOP_INGRESS)
+ return HFI1_FILTER_HIT;
+ } else {
+ /* Outgoing packet */
+ if (user_dir & HFI1_SNOOP_EGRESS)
+ return HFI1_FILTER_HIT;
+ }
+
+ return HFI1_FILTER_MISS;
+}
+
+/*
+ * Allocate a snoop packet. The structure that is stored in the ring buffer, not
+ * to be confused with an hfi packet type.
+ */
+static struct snoop_packet *allocate_snoop_packet(u32 hdr_len,
+ u32 data_len,
+ u32 md_len)
+{
+
+ struct snoop_packet *packet = NULL;
+
+ packet = kzalloc(sizeof(struct snoop_packet) + hdr_len + data_len
+ + md_len,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (likely(packet))
+ INIT_LIST_HEAD(&packet->list);
+
+
+ return packet;
+}
+
+/*
+ * Instead of having snoop and capture code intermixed with the recv functions,
+ * both the interrupt handler and hfi1_ib_rcv() we are going to hijack the call
+ * and land in here for snoop/capture but if not enabled the call will go
+ * through as before. This gives us a single point to constrain all of the snoop
+ * snoop recv logic. There is nothing special that needs to happen for bypass
+ * packets. This routine should not try to look into the packet. It just copied
+ * it. There is no guarantee for filters when it comes to bypass packets as
+ * there is no specific support. Bottom line is this routine does now even know
+ * what a bypass packet is.
+ */
+int snoop_recv_handler(struct hfi1_packet *packet)
+{
+ struct hfi1_pportdata *ppd = packet->rcd->ppd;
+ struct hfi1_ib_header *hdr = packet->hdr;
+ int header_size = packet->hlen;
+ void *data = packet->ebuf;
+ u32 tlen = packet->tlen;
+ struct snoop_packet *s_packet = NULL;
+ int ret;
+ int snoop_mode = 0;
+ u32 md_len = 0;
+ struct capture_md md;
+
+ snoop_dbg("PACKET IN: hdr size %d tlen %d data %p", header_size, tlen,
+ data);
+
+ trace_snoop_capture(ppd->dd, header_size, hdr, tlen - header_size,
+ data);
+
+ if (!ppd->dd->hfi1_snoop.filter_callback) {
+ snoop_dbg("filter not set");
+ ret = HFI1_FILTER_HIT;
+ } else {
+ ret = ppd->dd->hfi1_snoop.filter_callback(hdr, data,
+ ppd->dd->hfi1_snoop.filter_value);
+ }
+
+ switch (ret) {
+ case HFI1_FILTER_ERR:
+ snoop_dbg("Error in filter call");
+ break;
+ case HFI1_FILTER_MISS:
+ snoop_dbg("Filter Miss");
+ break;
+ case HFI1_FILTER_HIT:
+
+ if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
+ snoop_mode = 1;
+ if ((snoop_mode == 0) ||
+ unlikely(snoop_flags & SNOOP_USE_METADATA))
+ md_len = sizeof(struct capture_md);
+
+
+ s_packet = allocate_snoop_packet(header_size,
+ tlen - header_size,
+ md_len);
+
+ if (unlikely(s_packet == NULL)) {
+ dd_dev_warn_ratelimited(ppd->dd, "Unable to allocate snoop/capture packet\n");
+ break;
+ }
+
+ if (md_len > 0) {
+ memset(&md, 0, sizeof(struct capture_md));
+ md.port = 1;
+ md.dir = PKT_DIR_INGRESS;
+ md.u.rhf = packet->rhf;
+ memcpy(s_packet->data, &md, md_len);
+ }
+
+ /* We should always have a header */
+ if (hdr) {
+ memcpy(s_packet->data + md_len, hdr, header_size);
+ } else {
+ dd_dev_err(ppd->dd, "Unable to copy header to snoop/capture packet\n");
+ kfree(s_packet);
+ break;
+ }
+
+ /*
+ * Packets with no data are possible. If there is no data needed
+ * to take care of the last 4 bytes which are normally included
+ * with data buffers and are included in tlen. Since we kzalloc
+ * the buffer we do not need to set any values but if we decide
+ * not to use kzalloc we should zero them.
+ */
+ if (data)
+ memcpy(s_packet->data + header_size + md_len, data,
+ tlen - header_size);
+
+ s_packet->total_len = tlen + md_len;
+ snoop_list_add_tail(s_packet, ppd->dd);
+
+ /*
+ * If we are snooping the packet not capturing then throw away
+ * after adding to the list.
+ */
+ snoop_dbg("Capturing packet");
+ if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) {
+ snoop_dbg("Throwing packet away");
+ /*
+ * If we are dropping the packet we still may need to
+ * handle the case where error flags are set, this is
+ * normally done by the type specific handler but that
+ * won't be called in this case.
+ */
+ if (unlikely(rhf_err_flags(packet->rhf)))
+ handle_eflags(packet);
+
+ /* throw the packet on the floor */
+ return RHF_RCV_CONTINUE;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * We do not care what type of packet came in here - just pass it off
+ * to the normal handler.
+ */
+ return ppd->dd->normal_rhf_rcv_functions[rhf_rcv_type(packet->rhf)]
+ (packet);
+}
+
+/*
+ * Handle snooping and capturing packets when sdma is being used.
+ */
+int snoop_send_dma_handler(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr,
+ u32 hdrwords, struct hfi1_sge_state *ss, u32 len,
+ u32 plen, u32 dwords, u64 pbc)
+{
+ pr_alert("Snooping/Capture of Send DMA Packets Is Not Supported!\n");
+ snoop_dbg("Unsupported Operation");
+ return hfi1_verbs_send_dma(qp, ibhdr, hdrwords, ss, len, plen, dwords,
+ 0);
+}
+
+/*
+ * Handle snooping and capturing packets when pio is being used. Does not handle
+ * bypass packets. The only way to send a bypass packet currently is to use the
+ * diagpkt interface. When that interface is enable snoop/capture is not.
+ */
+int snoop_send_pio_handler(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
+ u32 hdrwords, struct hfi1_sge_state *ss, u32 len,
+ u32 plen, u32 dwords, u64 pbc)
+{
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct snoop_packet *s_packet = NULL;
+ u32 *hdr = (u32 *)&ahdr->ibh;
+ u32 length = 0;
+ struct hfi1_sge_state temp_ss;
+ void *data = NULL;
+ void *data_start = NULL;
+ int ret;
+ int snoop_mode = 0;
+ int md_len = 0;
+ struct capture_md md;
+ u32 vl;
+ u32 hdr_len = hdrwords << 2;
+ u32 tlen = HFI1_GET_PKT_LEN(&ahdr->ibh);
+
+ md.u.pbc = 0;
+
+ snoop_dbg("PACKET OUT: hdrword %u len %u plen %u dwords %u tlen %u",
+ hdrwords, len, plen, dwords, tlen);
+ if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
+ snoop_mode = 1;
+ if ((snoop_mode == 0) ||
+ unlikely(snoop_flags & SNOOP_USE_METADATA))
+ md_len = sizeof(struct capture_md);
+
+ /* not using ss->total_len as arg 2 b/c that does not count CRC */
+ s_packet = allocate_snoop_packet(hdr_len, tlen - hdr_len, md_len);
+
+ if (unlikely(s_packet == NULL)) {
+ dd_dev_warn_ratelimited(ppd->dd, "Unable to allocate snoop/capture packet\n");
+ goto out;
+ }
+
+ s_packet->total_len = tlen + md_len;
+
+ if (md_len > 0) {
+ memset(&md, 0, sizeof(struct capture_md));
+ md.port = 1;
+ md.dir = PKT_DIR_EGRESS;
+ if (likely(pbc == 0)) {
+ vl = be16_to_cpu(ahdr->ibh.lrh[0]) >> 12;
+ md.u.pbc = create_pbc(ppd, 0, qp->s_srate, vl, plen);
+ } else {
+ md.u.pbc = 0;
+ }
+ memcpy(s_packet->data, &md, md_len);
+ } else {
+ md.u.pbc = pbc;
+ }
+
+ /* Copy header */
+ if (likely(hdr)) {
+ memcpy(s_packet->data + md_len, hdr, hdr_len);
+ } else {
+ dd_dev_err(ppd->dd,
+ "Unable to copy header to snoop/capture packet\n");
+ kfree(s_packet);
+ goto out;
+ }
+
+ if (ss) {
+ data = s_packet->data + hdr_len + md_len;
+ data_start = data;
+
+ /*
+ * Copy SGE State
+ * The update_sge() function below will not modify the
+ * individual SGEs in the array. It will make a copy each time
+ * and operate on that. So we only need to copy this instance
+ * and it won't impact PIO.
+ */
+ temp_ss = *ss;
+ length = len;
+
+ snoop_dbg("Need to copy %d bytes", length);
+ while (length) {
+ void *addr = temp_ss.sge.vaddr;
+ u32 slen = temp_ss.sge.length;
+
+ if (slen > length) {
+ slen = length;
+ snoop_dbg("slen %d > len %d", slen, length);
+ }
+ snoop_dbg("copy %d to %p", slen, addr);
+ memcpy(data, addr, slen);
+ update_sge(&temp_ss, slen);
+ length -= slen;
+ data += slen;
+ snoop_dbg("data is now %p bytes left %d", data, length);
+ }
+ snoop_dbg("Completed SGE copy");
+ }
+
+ /*
+ * Why do the filter check down here? Because the event tracing has its
+ * own filtering and we need to have the walked the SGE list.
+ */
+ if (!ppd->dd->hfi1_snoop.filter_callback) {
+ snoop_dbg("filter not set\n");
+ ret = HFI1_FILTER_HIT;
+ } else {
+ ret = ppd->dd->hfi1_snoop.filter_callback(
+ &ahdr->ibh,
+ NULL,
+ ppd->dd->hfi1_snoop.filter_value);
+ }
+
+ switch (ret) {
+ case HFI1_FILTER_ERR:
+ snoop_dbg("Error in filter call");
+ /* fall through */
+ case HFI1_FILTER_MISS:
+ snoop_dbg("Filter Miss");
+ kfree(s_packet);
+ break;
+ case HFI1_FILTER_HIT:
+ snoop_dbg("Capturing packet");
+ snoop_list_add_tail(s_packet, ppd->dd);
+
+ if (unlikely((snoop_flags & SNOOP_DROP_SEND) &&
+ (ppd->dd->hfi1_snoop.mode_flag &
+ HFI1_PORT_SNOOP_MODE))) {
+ unsigned long flags;
+
+ snoop_dbg("Dropping packet");
+ if (qp->s_wqe) {
+ spin_lock_irqsave(&qp->s_lock, flags);
+ hfi1_send_complete(
+ qp,
+ qp->s_wqe,
+ IB_WC_SUCCESS);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ } else if (qp->ibqp.qp_type == IB_QPT_RC) {
+ spin_lock_irqsave(&qp->s_lock, flags);
+ hfi1_rc_send_complete(qp, &ahdr->ibh);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ }
+ return 0;
+ }
+ break;
+ default:
+ kfree(s_packet);
+ break;
+ }
+out:
+ return hfi1_verbs_send_pio(qp, ahdr, hdrwords, ss, len, plen, dwords,
+ md.u.pbc);
+}
+
+/*
+ * Callers of this must pass a hfi1_ib_header type for the from ptr. Currently
+ * this can be used anywhere, but the intention is for inline ACKs for RC and
+ * CCA packets. We don't restrict this usage though.
+ */
+void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf,
+ u64 pbc, const void *from, size_t count)
+{
+ int snoop_mode = 0;
+ int md_len = 0;
+ struct capture_md md;
+ struct snoop_packet *s_packet = NULL;
+
+ /*
+ * count is in dwords so we need to convert to bytes.
+ * We also need to account for CRC which would be tacked on by hardware.
+ */
+ int packet_len = (count << 2) + 4;
+ int ret;
+
+ snoop_dbg("ACK OUT: len %d", packet_len);
+
+ if (!dd->hfi1_snoop.filter_callback) {
+ snoop_dbg("filter not set");
+ ret = HFI1_FILTER_HIT;
+ } else {
+ ret = dd->hfi1_snoop.filter_callback(
+ (struct hfi1_ib_header *)from,
+ NULL,
+ dd->hfi1_snoop.filter_value);
+ }
+
+ switch (ret) {
+ case HFI1_FILTER_ERR:
+ snoop_dbg("Error in filter call");
+ /* fall through */
+ case HFI1_FILTER_MISS:
+ snoop_dbg("Filter Miss");
+ break;
+ case HFI1_FILTER_HIT:
+ snoop_dbg("Capturing packet");
+ if (dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
+ snoop_mode = 1;
+ if ((snoop_mode == 0) ||
+ unlikely(snoop_flags & SNOOP_USE_METADATA))
+ md_len = sizeof(struct capture_md);
+
+ s_packet = allocate_snoop_packet(packet_len, 0, md_len);
+
+ if (unlikely(s_packet == NULL)) {
+ dd_dev_warn_ratelimited(dd, "Unable to allocate snoop/capture packet\n");
+ goto inline_pio_out;
+ }
+
+ s_packet->total_len = packet_len + md_len;
+
+ /* Fill in the metadata for the packet */
+ if (md_len > 0) {
+ memset(&md, 0, sizeof(struct capture_md));
+ md.port = 1;
+ md.dir = PKT_DIR_EGRESS;
+ md.u.pbc = pbc;
+ memcpy(s_packet->data, &md, md_len);
+ }
+
+ /* Add the packet data which is a single buffer */
+ memcpy(s_packet->data + md_len, from, packet_len);
+
+ snoop_list_add_tail(s_packet, dd);
+
+ if (unlikely((snoop_flags & SNOOP_DROP_SEND) && snoop_mode)) {
+ snoop_dbg("Dropping packet");
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+
+inline_pio_out:
+ pio_copy(dd, pbuf, pbc, from, count);
+
+}
diff --git a/drivers/staging/rdma/hfi1/dma.c b/drivers/staging/rdma/hfi1/dma.c
new file mode 100644
index 000000000000..e03bd735173c
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/dma.c
@@ -0,0 +1,186 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+
+#include "verbs.h"
+
+#define BAD_DMA_ADDRESS ((u64) 0)
+
+/*
+ * The following functions implement driver specific replacements
+ * for the ib_dma_*() functions.
+ *
+ * These functions return kernel virtual addresses instead of
+ * device bus addresses since the driver uses the CPU to copy
+ * data instead of using hardware DMA.
+ */
+
+static int hfi1_mapping_error(struct ib_device *dev, u64 dma_addr)
+{
+ return dma_addr == BAD_DMA_ADDRESS;
+}
+
+static u64 hfi1_dma_map_single(struct ib_device *dev, void *cpu_addr,
+ size_t size, enum dma_data_direction direction)
+{
+ if (WARN_ON(!valid_dma_direction(direction)))
+ return BAD_DMA_ADDRESS;
+
+ return (u64) cpu_addr;
+}
+
+static void hfi1_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ /* This is a stub, nothing to be done here */
+}
+
+static u64 hfi1_dma_map_page(struct ib_device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ u64 addr;
+
+ if (WARN_ON(!valid_dma_direction(direction)))
+ return BAD_DMA_ADDRESS;
+
+ if (offset + size > PAGE_SIZE)
+ return BAD_DMA_ADDRESS;
+
+ addr = (u64) page_address(page);
+ if (addr)
+ addr += offset;
+
+ return addr;
+}
+
+static void hfi1_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ /* This is a stub, nothing to be done here */
+}
+
+static int hfi1_map_sg(struct ib_device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ u64 addr;
+ int i;
+ int ret = nents;
+
+ if (WARN_ON(!valid_dma_direction(direction)))
+ return BAD_DMA_ADDRESS;
+
+ for_each_sg(sgl, sg, nents, i) {
+ addr = (u64) page_address(sg_page(sg));
+ if (!addr) {
+ ret = 0;
+ break;
+ }
+ sg->dma_address = addr + sg->offset;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->dma_length = sg->length;
+#endif
+ }
+ return ret;
+}
+
+static void hfi1_unmap_sg(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ /* This is a stub, nothing to be done here */
+}
+
+static void hfi1_sync_single_for_cpu(struct ib_device *dev, u64 addr,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static void hfi1_sync_single_for_device(struct ib_device *dev, u64 addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+}
+
+static void *hfi1_dma_alloc_coherent(struct ib_device *dev, size_t size,
+ u64 *dma_handle, gfp_t flag)
+{
+ struct page *p;
+ void *addr = NULL;
+
+ p = alloc_pages(flag, get_order(size));
+ if (p)
+ addr = page_address(p);
+ if (dma_handle)
+ *dma_handle = (u64) addr;
+ return addr;
+}
+
+static void hfi1_dma_free_coherent(struct ib_device *dev, size_t size,
+ void *cpu_addr, u64 dma_handle)
+{
+ free_pages((unsigned long) cpu_addr, get_order(size));
+}
+
+struct ib_dma_mapping_ops hfi1_dma_mapping_ops = {
+ .mapping_error = hfi1_mapping_error,
+ .map_single = hfi1_dma_map_single,
+ .unmap_single = hfi1_dma_unmap_single,
+ .map_page = hfi1_dma_map_page,
+ .unmap_page = hfi1_dma_unmap_page,
+ .map_sg = hfi1_map_sg,
+ .unmap_sg = hfi1_unmap_sg,
+ .sync_single_for_cpu = hfi1_sync_single_for_cpu,
+ .sync_single_for_device = hfi1_sync_single_for_device,
+ .alloc_coherent = hfi1_dma_alloc_coherent,
+ .free_coherent = hfi1_dma_free_coherent
+};
diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c
new file mode 100644
index 000000000000..c0a59001e5cd
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/driver.c
@@ -0,0 +1,1241 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/prefetch.h>
+
+#include "hfi.h"
+#include "trace.h"
+#include "qp.h"
+#include "sdma.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
+/*
+ * The size has to be longer than this string, so we can append
+ * board/chip information to it in the initialization code.
+ */
+const char ib_hfi1_version[] = HFI1_DRIVER_VERSION "\n";
+
+DEFINE_SPINLOCK(hfi1_devs_lock);
+LIST_HEAD(hfi1_dev_list);
+DEFINE_MUTEX(hfi1_mutex); /* general driver use */
+
+unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
+module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO);
+MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is 8192");
+
+unsigned int hfi1_cu = 1;
+module_param_named(cu, hfi1_cu, uint, S_IRUGO);
+MODULE_PARM_DESC(cu, "Credit return units");
+
+unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT;
+static int hfi1_caps_set(const char *, const struct kernel_param *);
+static int hfi1_caps_get(char *, const struct kernel_param *);
+static const struct kernel_param_ops cap_ops = {
+ .set = hfi1_caps_set,
+ .get = hfi1_caps_get
+};
+module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features");
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Intel Omni-Path Architecture driver");
+MODULE_VERSION(HFI1_DRIVER_VERSION);
+
+/*
+ * MAX_PKT_RCV is the max # if packets processed per receive interrupt.
+ */
+#define MAX_PKT_RECV 64
+#define EGR_HEAD_UPDATE_THRESHOLD 16
+
+struct hfi1_ib_stats hfi1_stats;
+
+static int hfi1_caps_set(const char *val, const struct kernel_param *kp)
+{
+ int ret = 0;
+ unsigned long *cap_mask_ptr = (unsigned long *)kp->arg,
+ cap_mask = *cap_mask_ptr, value, diff,
+ write_mask = ((HFI1_CAP_WRITABLE_MASK << HFI1_CAP_USER_SHIFT) |
+ HFI1_CAP_WRITABLE_MASK);
+
+ ret = kstrtoul(val, 0, &value);
+ if (ret) {
+ pr_warn("Invalid module parameter value for 'cap_mask'\n");
+ goto done;
+ }
+ /* Get the changed bits (except the locked bit) */
+ diff = value ^ (cap_mask & ~HFI1_CAP_LOCKED_SMASK);
+
+ /* Remove any bits that are not allowed to change after driver load */
+ if (HFI1_CAP_LOCKED() && (diff & ~write_mask)) {
+ pr_warn("Ignoring non-writable capability bits %#lx\n",
+ diff & ~write_mask);
+ diff &= write_mask;
+ }
+
+ /* Mask off any reserved bits */
+ diff &= ~HFI1_CAP_RESERVED_MASK;
+ /* Clear any previously set and changing bits */
+ cap_mask &= ~diff;
+ /* Update the bits with the new capability */
+ cap_mask |= (value & diff);
+ /* Check for any kernel/user restrictions */
+ diff = (cap_mask & (HFI1_CAP_MUST_HAVE_KERN << HFI1_CAP_USER_SHIFT)) ^
+ ((cap_mask & HFI1_CAP_MUST_HAVE_KERN) << HFI1_CAP_USER_SHIFT);
+ cap_mask &= ~diff;
+ /* Set the bitmask to the final set */
+ *cap_mask_ptr = cap_mask;
+done:
+ return ret;
+}
+
+static int hfi1_caps_get(char *buffer, const struct kernel_param *kp)
+{
+ unsigned long cap_mask = *(unsigned long *)kp->arg;
+
+ cap_mask &= ~HFI1_CAP_LOCKED_SMASK;
+ cap_mask |= ((cap_mask & HFI1_CAP_K2U) << HFI1_CAP_USER_SHIFT);
+
+ return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask);
+}
+
+const char *get_unit_name(int unit)
+{
+ static char iname[16];
+
+ snprintf(iname, sizeof(iname), DRIVER_NAME"_%u", unit);
+ return iname;
+}
+
+/*
+ * Return count of units with at least one port ACTIVE.
+ */
+int hfi1_count_active_units(void)
+{
+ struct hfi1_devdata *dd;
+ struct hfi1_pportdata *ppd;
+ unsigned long flags;
+ int pidx, nunits_active = 0;
+
+ spin_lock_irqsave(&hfi1_devs_lock, flags);
+ list_for_each_entry(dd, &hfi1_dev_list, list) {
+ if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase)
+ continue;
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (ppd->lid && ppd->linkup) {
+ nunits_active++;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+ return nunits_active;
+}
+
+/*
+ * Return count of all units, optionally return in arguments
+ * the number of usable (present) units, and the number of
+ * ports that are up.
+ */
+int hfi1_count_units(int *npresentp, int *nupp)
+{
+ int nunits = 0, npresent = 0, nup = 0;
+ struct hfi1_devdata *dd;
+ unsigned long flags;
+ int pidx;
+ struct hfi1_pportdata *ppd;
+
+ spin_lock_irqsave(&hfi1_devs_lock, flags);
+
+ list_for_each_entry(dd, &hfi1_dev_list, list) {
+ nunits++;
+ if ((dd->flags & HFI1_PRESENT) && dd->kregbase)
+ npresent++;
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (ppd->lid && ppd->linkup)
+ nup++;
+ }
+ }
+
+ spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+
+ if (npresentp)
+ *npresentp = npresent;
+ if (nupp)
+ *nupp = nup;
+
+ return nunits;
+}
+
+/*
+ * Get address of eager buffer from it's index (allocated in chunks, not
+ * contiguous).
+ */
+static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf,
+ u8 *update)
+{
+ u32 idx = rhf_egr_index(rhf), offset = rhf_egr_buf_offset(rhf);
+
+ *update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset;
+ return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) +
+ (offset * RCV_BUF_BLOCK_SIZE));
+}
+
+/*
+ * Validate and encode the a given RcvArray Buffer size.
+ * The function will check whether the given size falls within
+ * allowed size ranges for the respective type and, optionally,
+ * return the proper encoding.
+ */
+inline int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded)
+{
+ if (unlikely(!IS_ALIGNED(size, PAGE_SIZE)))
+ return 0;
+ if (unlikely(size < MIN_EAGER_BUFFER))
+ return 0;
+ if (size >
+ (type == PT_EAGER ? MAX_EAGER_BUFFER : MAX_EXPECTED_BUFFER))
+ return 0;
+ if (encoded)
+ *encoded = ilog2(size / PAGE_SIZE) + 1;
+ return 1;
+}
+
+static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
+ struct hfi1_packet *packet)
+{
+ struct hfi1_message_header *rhdr = packet->hdr;
+ u32 rte = rhf_rcv_type_err(packet->rhf);
+ int lnh = be16_to_cpu(rhdr->lrh[0]) & 3;
+ struct hfi1_ibport *ibp = &ppd->ibport_data;
+
+ if (packet->rhf & (RHF_VCRC_ERR | RHF_ICRC_ERR))
+ return;
+
+ if (packet->rhf & RHF_TID_ERR) {
+ /* For TIDERR and RC QPs preemptively schedule a NAK */
+ struct hfi1_ib_header *hdr = (struct hfi1_ib_header *)rhdr;
+ struct hfi1_other_headers *ohdr = NULL;
+ u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */
+ u16 lid = be16_to_cpu(hdr->lrh[1]);
+ u32 qp_num;
+ u32 rcv_flags = 0;
+
+ /* Sanity check packet */
+ if (tlen < 24)
+ goto drop;
+
+ /* Check for GRH */
+ if (lnh == HFI1_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else if (lnh == HFI1_LRH_GRH) {
+ u32 vtf;
+
+ ohdr = &hdr->u.l.oth;
+ if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
+ goto drop;
+ vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
+ if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
+ goto drop;
+ rcv_flags |= HFI1_HAS_GRH;
+ } else
+ goto drop;
+
+ /* Get the destination QP number. */
+ qp_num = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
+ if (lid < HFI1_MULTICAST_LID_BASE) {
+ struct hfi1_qp *qp;
+
+ rcu_read_lock();
+ qp = hfi1_lookup_qpn(ibp, qp_num);
+ if (!qp) {
+ rcu_read_unlock();
+ goto drop;
+ }
+
+ /*
+ * Handle only RC QPs - for other QP types drop error
+ * packet.
+ */
+ spin_lock(&qp->r_lock);
+
+ /* Check for valid receive state. */
+ if (!(ib_hfi1_state_ops[qp->state] &
+ HFI1_PROCESS_RECV_OK)) {
+ ibp->n_pkt_drops++;
+ }
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ hfi1_rc_hdrerr(
+ rcd,
+ hdr,
+ rcv_flags,
+ qp);
+ break;
+ default:
+ /* For now don't handle any other QP types */
+ break;
+ }
+
+ spin_unlock(&qp->r_lock);
+ rcu_read_unlock();
+ } /* Unicast QP */
+ } /* Valid packet with TIDErr */
+
+ /* handle "RcvTypeErr" flags */
+ switch (rte) {
+ case RHF_RTE_ERROR_OP_CODE_ERR:
+ {
+ u32 opcode;
+ void *ebuf = NULL;
+ __be32 *bth = NULL;
+
+ if (rhf_use_egr_bfr(packet->rhf))
+ ebuf = packet->ebuf;
+
+ if (ebuf == NULL)
+ goto drop; /* this should never happen */
+
+ if (lnh == HFI1_LRH_BTH)
+ bth = (__be32 *)ebuf;
+ else if (lnh == HFI1_LRH_GRH)
+ bth = (__be32 *)((char *)ebuf + sizeof(struct ib_grh));
+ else
+ goto drop;
+
+ opcode = be32_to_cpu(bth[0]) >> 24;
+ opcode &= 0xff;
+
+ if (opcode == IB_OPCODE_CNP) {
+ /*
+ * Only in pre-B0 h/w is the CNP_OPCODE handled
+ * via this code path (errata 291394).
+ */
+ struct hfi1_qp *qp = NULL;
+ u32 lqpn, rqpn;
+ u16 rlid;
+ u8 svc_type, sl, sc5;
+
+ sc5 = (be16_to_cpu(rhdr->lrh[0]) >> 12) & 0xf;
+ if (rhf_dc_info(packet->rhf))
+ sc5 |= 0x10;
+ sl = ibp->sc_to_sl[sc5];
+
+ lqpn = be32_to_cpu(bth[1]) & HFI1_QPN_MASK;
+ rcu_read_lock();
+ qp = hfi1_lookup_qpn(ibp, lqpn);
+ if (qp == NULL) {
+ rcu_read_unlock();
+ goto drop;
+ }
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_UD:
+ rlid = 0;
+ rqpn = 0;
+ svc_type = IB_CC_SVCTYPE_UD;
+ break;
+ case IB_QPT_UC:
+ rlid = be16_to_cpu(rhdr->lrh[3]);
+ rqpn = qp->remote_qpn;
+ svc_type = IB_CC_SVCTYPE_UC;
+ break;
+ default:
+ goto drop;
+ }
+
+ process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
+ rcu_read_unlock();
+ }
+
+ packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK;
+ break;
+ }
+ default:
+ break;
+ }
+
+drop:
+ return;
+}
+
+static inline void init_packet(struct hfi1_ctxtdata *rcd,
+ struct hfi1_packet *packet)
+{
+
+ packet->rsize = rcd->rcvhdrqentsize; /* words */
+ packet->maxcnt = rcd->rcvhdrq_cnt * packet->rsize; /* words */
+ packet->rcd = rcd;
+ packet->updegr = 0;
+ packet->etail = -1;
+ packet->rhf_addr = (__le32 *) rcd->rcvhdrq + rcd->head +
+ rcd->dd->rhf_offset;
+ packet->rhf = rhf_to_cpu(packet->rhf_addr);
+ packet->rhqoff = rcd->head;
+ packet->numpkt = 0;
+ packet->rcv_flags = 0;
+}
+
+#ifndef CONFIG_PRESCAN_RXQ
+static void prescan_rxq(struct hfi1_packet *packet) {}
+#else /* CONFIG_PRESCAN_RXQ */
+static int prescan_receive_queue;
+
+static void process_ecn(struct hfi1_qp *qp, struct hfi1_ib_header *hdr,
+ struct hfi1_other_headers *ohdr,
+ u64 rhf, struct ib_grh *grh)
+{
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ u32 bth1;
+ u8 sc5, svc_type;
+ int is_fecn, is_becn;
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_UD:
+ svc_type = IB_CC_SVCTYPE_UD;
+ break;
+ case IB_QPT_UC: /* LATER */
+ case IB_QPT_RC: /* LATER */
+ default:
+ return;
+ }
+
+ is_fecn = (be32_to_cpu(ohdr->bth[1]) >> HFI1_FECN_SHIFT) &
+ HFI1_FECN_MASK;
+ is_becn = (be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT) &
+ HFI1_BECN_MASK;
+
+ sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
+ if (rhf_dc_info(rhf))
+ sc5 |= 0x10;
+
+ if (is_fecn) {
+ u32 src_qpn = be32_to_cpu(ohdr->u.ud.deth[1]) & HFI1_QPN_MASK;
+ u16 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
+ u16 dlid = be16_to_cpu(hdr->lrh[1]);
+ u16 slid = be16_to_cpu(hdr->lrh[3]);
+
+ return_cnp(ibp, qp, src_qpn, pkey, dlid, slid, sc5, grh);
+ }
+
+ if (is_becn) {
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 lqpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
+ u8 sl = ibp->sc_to_sl[sc5];
+
+ process_becn(ppd, sl, 0, lqpn, 0, svc_type);
+ }
+
+ /* turn off BECN, or FECN */
+ bth1 = be32_to_cpu(ohdr->bth[1]);
+ bth1 &= ~(HFI1_FECN_MASK << HFI1_FECN_SHIFT);
+ bth1 &= ~(HFI1_BECN_MASK << HFI1_BECN_SHIFT);
+ ohdr->bth[1] = cpu_to_be32(bth1);
+}
+
+struct ps_mdata {
+ struct hfi1_ctxtdata *rcd;
+ u32 rsize;
+ u32 maxcnt;
+ u32 ps_head;
+ u32 ps_tail;
+ u32 ps_seq;
+};
+
+static inline void init_ps_mdata(struct ps_mdata *mdata,
+ struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+
+ mdata->rcd = rcd;
+ mdata->rsize = packet->rsize;
+ mdata->maxcnt = packet->maxcnt;
+
+ if (rcd->ps_state.initialized == 0) {
+ mdata->ps_head = packet->rhqoff;
+ rcd->ps_state.initialized++;
+ } else
+ mdata->ps_head = rcd->ps_state.ps_head;
+
+ if (HFI1_CAP_IS_KSET(DMA_RTAIL)) {
+ mdata->ps_tail = packet->hdrqtail;
+ mdata->ps_seq = 0; /* not used with DMA_RTAIL */
+ } else {
+ mdata->ps_tail = 0; /* used only with DMA_RTAIL*/
+ mdata->ps_seq = rcd->seq_cnt;
+ }
+}
+
+static inline int ps_done(struct ps_mdata *mdata, u64 rhf)
+{
+ if (HFI1_CAP_IS_KSET(DMA_RTAIL))
+ return mdata->ps_head == mdata->ps_tail;
+ return mdata->ps_seq != rhf_rcv_seq(rhf);
+}
+
+static inline void update_ps_mdata(struct ps_mdata *mdata)
+{
+ struct hfi1_ctxtdata *rcd = mdata->rcd;
+
+ mdata->ps_head += mdata->rsize;
+ if (mdata->ps_head > mdata->maxcnt)
+ mdata->ps_head = 0;
+ rcd->ps_state.ps_head = mdata->ps_head;
+ if (!HFI1_CAP_IS_KSET(DMA_RTAIL)) {
+ if (++mdata->ps_seq > 13)
+ mdata->ps_seq = 1;
+ }
+}
+
+/*
+ * prescan_rxq - search through the receive queue looking for packets
+ * containing Excplicit Congestion Notifications (FECNs, or BECNs).
+ * When an ECN is found, process the Congestion Notification, and toggle
+ * it off.
+ */
+static void prescan_rxq(struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct ps_mdata mdata;
+
+ if (!prescan_receive_queue)
+ return;
+
+ init_ps_mdata(&mdata, packet);
+
+ while (1) {
+ struct hfi1_devdata *dd = rcd->dd;
+ struct hfi1_ibport *ibp = &rcd->ppd->ibport_data;
+ __le32 *rhf_addr = (__le32 *) rcd->rcvhdrq + mdata.ps_head +
+ dd->rhf_offset;
+ struct hfi1_qp *qp;
+ struct hfi1_ib_header *hdr;
+ struct hfi1_other_headers *ohdr;
+ struct ib_grh *grh = NULL;
+ u64 rhf = rhf_to_cpu(rhf_addr);
+ u32 etype = rhf_rcv_type(rhf), qpn;
+ int is_ecn = 0;
+ u8 lnh;
+
+ if (ps_done(&mdata, rhf))
+ break;
+
+ if (etype != RHF_RCV_TYPE_IB)
+ goto next;
+
+ hdr = (struct hfi1_ib_header *)
+ hfi1_get_msgheader(dd, rhf_addr);
+ lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+
+ if (lnh == HFI1_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else if (lnh == HFI1_LRH_GRH) {
+ ohdr = &hdr->u.l.oth;
+ grh = &hdr->u.l.grh;
+ } else
+ goto next; /* just in case */
+
+ is_ecn |= be32_to_cpu(ohdr->bth[1]) &
+ (HFI1_FECN_MASK << HFI1_FECN_SHIFT);
+ is_ecn |= be32_to_cpu(ohdr->bth[1]) &
+ (HFI1_BECN_MASK << HFI1_BECN_SHIFT);
+
+ if (!is_ecn)
+ goto next;
+
+ qpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
+ rcu_read_lock();
+ qp = hfi1_lookup_qpn(ibp, qpn);
+
+ if (qp == NULL) {
+ rcu_read_unlock();
+ goto next;
+ }
+
+ process_ecn(qp, hdr, ohdr, rhf, grh);
+ rcu_read_unlock();
+next:
+ update_ps_mdata(&mdata);
+ }
+}
+#endif /* CONFIG_PRESCAN_RXQ */
+
+#define RCV_PKT_OK 0x0
+#define RCV_PKT_MAX 0x1
+
+static inline int process_rcv_packet(struct hfi1_packet *packet)
+{
+ int ret = RCV_PKT_OK;
+
+ packet->hdr = hfi1_get_msgheader(packet->rcd->dd,
+ packet->rhf_addr);
+ packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
+ packet->etype = rhf_rcv_type(packet->rhf);
+ /* total length */
+ packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
+ /* retrieve eager buffer details */
+ packet->ebuf = NULL;
+ if (rhf_use_egr_bfr(packet->rhf)) {
+ packet->etail = rhf_egr_index(packet->rhf);
+ packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
+ &packet->updegr);
+ /*
+ * Prefetch the contents of the eager buffer. It is
+ * OK to send a negative length to prefetch_range().
+ * The +2 is the size of the RHF.
+ */
+ prefetch_range(packet->ebuf,
+ packet->tlen - ((packet->rcd->rcvhdrqentsize -
+ (rhf_hdrq_offset(packet->rhf)+2)) * 4));
+ }
+
+ /*
+ * Call a type specific handler for the packet. We
+ * should be able to trust that etype won't be beyond
+ * the range of valid indexes. If so something is really
+ * wrong and we can probably just let things come
+ * crashing down. There is no need to eat another
+ * comparison in this performance critical code.
+ */
+ packet->rcd->dd->rhf_rcv_function_map[packet->etype](packet);
+ packet->numpkt++;
+
+ /* Set up for the next packet */
+ packet->rhqoff += packet->rsize;
+ if (packet->rhqoff >= packet->maxcnt)
+ packet->rhqoff = 0;
+
+ if (packet->numpkt == MAX_PKT_RECV) {
+ ret = RCV_PKT_MAX;
+ this_cpu_inc(*packet->rcd->dd->rcv_limit);
+ }
+
+ packet->rhf_addr = (__le32 *) packet->rcd->rcvhdrq + packet->rhqoff +
+ packet->rcd->dd->rhf_offset;
+ packet->rhf = rhf_to_cpu(packet->rhf_addr);
+
+ return ret;
+}
+
+static inline void process_rcv_update(int last, struct hfi1_packet *packet)
+{
+ /*
+ * Update head regs etc., every 16 packets, if not last pkt,
+ * to help prevent rcvhdrq overflows, when many packets
+ * are processed and queue is nearly full.
+ * Don't request an interrupt for intermediate updates.
+ */
+ if (!last && !(packet->numpkt & 0xf)) {
+ update_usrhead(packet->rcd, packet->rhqoff, packet->updegr,
+ packet->etail, 0, 0);
+ packet->updegr = 0;
+ }
+ packet->rcv_flags = 0;
+}
+
+static inline void finish_packet(struct hfi1_packet *packet)
+{
+
+ /*
+ * Nothing we need to free for the packet.
+ *
+ * The only thing we need to do is a final update and call for an
+ * interrupt
+ */
+ update_usrhead(packet->rcd, packet->rcd->head, packet->updegr,
+ packet->etail, rcv_intr_dynamic, packet->numpkt);
+
+}
+
+static inline void process_rcv_qp_work(struct hfi1_packet *packet)
+{
+
+ struct hfi1_ctxtdata *rcd;
+ struct hfi1_qp *qp, *nqp;
+
+ rcd = packet->rcd;
+ rcd->head = packet->rhqoff;
+
+ /*
+ * Iterate over all QPs waiting to respond.
+ * The list won't change since the IRQ is only run on one CPU.
+ */
+ list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
+ list_del_init(&qp->rspwait);
+ if (qp->r_flags & HFI1_R_RSP_NAK) {
+ qp->r_flags &= ~HFI1_R_RSP_NAK;
+ hfi1_send_rc_ack(rcd, qp, 0);
+ }
+ if (qp->r_flags & HFI1_R_RSP_SEND) {
+ unsigned long flags;
+
+ qp->r_flags &= ~HFI1_R_RSP_SEND;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_hfi1_state_ops[qp->state] &
+ HFI1_PROCESS_OR_FLUSH_SEND)
+ hfi1_schedule_send(qp);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ }
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+}
+
+/*
+ * Handle receive interrupts when using the no dma rtail option.
+ */
+void handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd)
+{
+ u32 seq;
+ int last = 0;
+ struct hfi1_packet packet;
+
+ init_packet(rcd, &packet);
+ seq = rhf_rcv_seq(packet.rhf);
+ if (seq != rcd->seq_cnt)
+ goto bail;
+
+ prescan_rxq(&packet);
+
+ while (!last) {
+ last = process_rcv_packet(&packet);
+ seq = rhf_rcv_seq(packet.rhf);
+ if (++rcd->seq_cnt > 13)
+ rcd->seq_cnt = 1;
+ if (seq != rcd->seq_cnt)
+ last = 1;
+ process_rcv_update(last, &packet);
+ }
+ process_rcv_qp_work(&packet);
+bail:
+ finish_packet(&packet);
+}
+
+void handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd)
+{
+ u32 hdrqtail;
+ int last = 0;
+ struct hfi1_packet packet;
+
+ init_packet(rcd, &packet);
+ hdrqtail = get_rcvhdrtail(rcd);
+ if (packet.rhqoff == hdrqtail)
+ goto bail;
+ smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
+
+ prescan_rxq(&packet);
+
+ while (!last) {
+ last = process_rcv_packet(&packet);
+ if (packet.rhqoff == hdrqtail)
+ last = 1;
+ process_rcv_update(last, &packet);
+ }
+ process_rcv_qp_work(&packet);
+bail:
+ finish_packet(&packet);
+
+}
+
+static inline void set_all_nodma_rtail(struct hfi1_devdata *dd)
+{
+ int i;
+
+ for (i = 0; i < dd->first_user_ctxt; i++)
+ dd->rcd[i]->do_interrupt =
+ &handle_receive_interrupt_nodma_rtail;
+}
+
+static inline void set_all_dma_rtail(struct hfi1_devdata *dd)
+{
+ int i;
+
+ for (i = 0; i < dd->first_user_ctxt; i++)
+ dd->rcd[i]->do_interrupt =
+ &handle_receive_interrupt_dma_rtail;
+}
+
+/*
+ * handle_receive_interrupt - receive a packet
+ * @rcd: the context
+ *
+ * Called from interrupt handler for errors or receive interrupt.
+ * This is the slow path interrupt handler.
+ */
+void handle_receive_interrupt(struct hfi1_ctxtdata *rcd)
+{
+
+ struct hfi1_devdata *dd = rcd->dd;
+ u32 hdrqtail;
+ int last = 0, needset = 1;
+ struct hfi1_packet packet;
+
+ init_packet(rcd, &packet);
+
+ if (!HFI1_CAP_IS_KSET(DMA_RTAIL)) {
+ u32 seq = rhf_rcv_seq(packet.rhf);
+
+ if (seq != rcd->seq_cnt)
+ goto bail;
+ hdrqtail = 0;
+ } else {
+ hdrqtail = get_rcvhdrtail(rcd);
+ if (packet.rhqoff == hdrqtail)
+ goto bail;
+ smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
+ }
+
+ prescan_rxq(&packet);
+
+ while (!last) {
+
+ if (unlikely(dd->do_drop && atomic_xchg(&dd->drop_packet,
+ DROP_PACKET_OFF) == DROP_PACKET_ON)) {
+ dd->do_drop = 0;
+
+ /* On to the next packet */
+ packet.rhqoff += packet.rsize;
+ packet.rhf_addr = (__le32 *) rcd->rcvhdrq +
+ packet.rhqoff +
+ dd->rhf_offset;
+ packet.rhf = rhf_to_cpu(packet.rhf_addr);
+
+ } else {
+ last = process_rcv_packet(&packet);
+ }
+
+ if (!HFI1_CAP_IS_KSET(DMA_RTAIL)) {
+ u32 seq = rhf_rcv_seq(packet.rhf);
+
+ if (++rcd->seq_cnt > 13)
+ rcd->seq_cnt = 1;
+ if (seq != rcd->seq_cnt)
+ last = 1;
+ if (needset) {
+ dd_dev_info(dd,
+ "Switching to NO_DMA_RTAIL\n");
+ set_all_nodma_rtail(dd);
+ needset = 0;
+ }
+ } else {
+ if (packet.rhqoff == hdrqtail)
+ last = 1;
+ if (needset) {
+ dd_dev_info(dd,
+ "Switching to DMA_RTAIL\n");
+ set_all_dma_rtail(dd);
+ needset = 0;
+ }
+ }
+
+ process_rcv_update(last, &packet);
+ }
+
+ process_rcv_qp_work(&packet);
+
+bail:
+ /*
+ * Always write head at end, and setup rcv interrupt, even
+ * if no packets were processed.
+ */
+ finish_packet(&packet);
+}
+
+/*
+ * Convert a given MTU size to the on-wire MAD packet enumeration.
+ * Return -1 if the size is invalid.
+ */
+int mtu_to_enum(u32 mtu, int default_if_bad)
+{
+ switch (mtu) {
+ case 0: return OPA_MTU_0;
+ case 256: return OPA_MTU_256;
+ case 512: return OPA_MTU_512;
+ case 1024: return OPA_MTU_1024;
+ case 2048: return OPA_MTU_2048;
+ case 4096: return OPA_MTU_4096;
+ case 8192: return OPA_MTU_8192;
+ case 10240: return OPA_MTU_10240;
+ }
+ return default_if_bad;
+}
+
+u16 enum_to_mtu(int mtu)
+{
+ switch (mtu) {
+ case OPA_MTU_0: return 0;
+ case OPA_MTU_256: return 256;
+ case OPA_MTU_512: return 512;
+ case OPA_MTU_1024: return 1024;
+ case OPA_MTU_2048: return 2048;
+ case OPA_MTU_4096: return 4096;
+ case OPA_MTU_8192: return 8192;
+ case OPA_MTU_10240: return 10240;
+ default: return 0xffff;
+ }
+}
+
+/*
+ * set_mtu - set the MTU
+ * @ppd: the per port data
+ *
+ * We can handle "any" incoming size, the issue here is whether we
+ * need to restrict our outgoing size. We do not deal with what happens
+ * to programs that are already running when the size changes.
+ */
+int set_mtu(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ int i, drain, ret = 0, is_up = 0;
+
+ ppd->ibmtu = 0;
+ for (i = 0; i < ppd->vls_supported; i++)
+ if (ppd->ibmtu < dd->vld[i].mtu)
+ ppd->ibmtu = dd->vld[i].mtu;
+ ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd);
+
+ mutex_lock(&ppd->hls_lock);
+ if (ppd->host_link_state == HLS_UP_INIT
+ || ppd->host_link_state == HLS_UP_ARMED
+ || ppd->host_link_state == HLS_UP_ACTIVE)
+ is_up = 1;
+
+ drain = !is_ax(dd) && is_up;
+
+ if (drain)
+ /*
+ * MTU is specified per-VL. To ensure that no packet gets
+ * stuck (due, e.g., to the MTU for the packet's VL being
+ * reduced), empty the per-VL FIFOs before adjusting MTU.
+ */
+ ret = stop_drain_data_vls(dd);
+
+ if (ret) {
+ dd_dev_err(dd, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n",
+ __func__);
+ goto err;
+ }
+
+ hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_MTU, 0);
+
+ if (drain)
+ open_fill_data_vls(dd); /* reopen all VLs */
+
+err:
+ mutex_unlock(&ppd->hls_lock);
+
+ return ret;
+}
+
+int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+
+ ppd->lid = lid;
+ ppd->lmc = lmc;
+ hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0);
+
+ dd_dev_info(dd, "IB%u:%u got a lid: 0x%x\n", dd->unit, ppd->port, lid);
+
+ return 0;
+}
+
+/*
+ * Following deal with the "obviously simple" task of overriding the state
+ * of the LEDs, which normally indicate link physical and logical status.
+ * The complications arise in dealing with different hardware mappings
+ * and the board-dependent routine being called from interrupts.
+ * and then there's the requirement to _flash_ them.
+ */
+#define LED_OVER_FREQ_SHIFT 8
+#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
+/* Below is "non-zero" to force override, but both actual LEDs are off */
+#define LED_OVER_BOTH_OFF (8)
+
+static void run_led_override(unsigned long opaque)
+{
+ struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)opaque;
+ struct hfi1_devdata *dd = ppd->dd;
+ int timeoff;
+ int ph_idx;
+
+ if (!(dd->flags & HFI1_INITTED))
+ return;
+
+ ph_idx = ppd->led_override_phase++ & 1;
+ ppd->led_override = ppd->led_override_vals[ph_idx];
+ timeoff = ppd->led_override_timeoff;
+
+ /*
+ * don't re-fire the timer if user asked for it to be off; we let
+ * it fire one more time after they turn it off to simplify
+ */
+ if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
+ mod_timer(&ppd->led_override_timer, jiffies + timeoff);
+}
+
+void hfi1_set_led_override(struct hfi1_pportdata *ppd, unsigned int val)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ int timeoff, freq;
+
+ if (!(dd->flags & HFI1_INITTED))
+ return;
+
+ /* First check if we are blinking. If not, use 1HZ polling */
+ timeoff = HZ;
+ freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
+
+ if (freq) {
+ /* For blink, set each phase from one nybble of val */
+ ppd->led_override_vals[0] = val & 0xF;
+ ppd->led_override_vals[1] = (val >> 4) & 0xF;
+ timeoff = (HZ << 4)/freq;
+ } else {
+ /* Non-blink set both phases the same. */
+ ppd->led_override_vals[0] = val & 0xF;
+ ppd->led_override_vals[1] = val & 0xF;
+ }
+ ppd->led_override_timeoff = timeoff;
+
+ /*
+ * If the timer has not already been started, do so. Use a "quick"
+ * timeout so the function will be called soon, to look at our request.
+ */
+ if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
+ /* Need to start timer */
+ init_timer(&ppd->led_override_timer);
+ ppd->led_override_timer.function = run_led_override;
+ ppd->led_override_timer.data = (unsigned long) ppd;
+ ppd->led_override_timer.expires = jiffies + 1;
+ add_timer(&ppd->led_override_timer);
+ } else {
+ if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
+ mod_timer(&ppd->led_override_timer, jiffies + 1);
+ atomic_dec(&ppd->led_override_timer_active);
+ }
+}
+
+/**
+ * hfi1_reset_device - reset the chip if possible
+ * @unit: the device to reset
+ *
+ * Whether or not reset is successful, we attempt to re-initialize the chip
+ * (that is, much like a driver unload/reload). We clear the INITTED flag
+ * so that the various entry points will fail until we reinitialize. For
+ * now, we only allow this if no user contexts are open that use chip resources
+ */
+int hfi1_reset_device(int unit)
+{
+ int ret, i;
+ struct hfi1_devdata *dd = hfi1_lookup(unit);
+ struct hfi1_pportdata *ppd;
+ unsigned long flags;
+ int pidx;
+
+ if (!dd) {
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ dd_dev_info(dd, "Reset on unit %u requested\n", unit);
+
+ if (!dd->kregbase || !(dd->flags & HFI1_PRESENT)) {
+ dd_dev_info(dd,
+ "Invalid unit number %u or not initialized or not present\n",
+ unit);
+ ret = -ENXIO;
+ goto bail;
+ }
+
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ if (dd->rcd)
+ for (i = dd->first_user_ctxt; i < dd->num_rcv_contexts; i++) {
+ if (!dd->rcd[i] || !dd->rcd[i]->cnt)
+ continue;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ ret = -EBUSY;
+ goto bail;
+ }
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (atomic_read(&ppd->led_override_timer_active)) {
+ /* Need to stop LED timer, _then_ shut off LEDs */
+ del_timer_sync(&ppd->led_override_timer);
+ atomic_set(&ppd->led_override_timer_active, 0);
+ }
+
+ /* Shut off LEDs after we are sure timer is not running */
+ ppd->led_override = LED_OVER_BOTH_OFF;
+ }
+ if (dd->flags & HFI1_HAS_SEND_DMA)
+ sdma_exit(dd);
+
+ hfi1_reset_cpu_counters(dd);
+
+ ret = hfi1_init(dd, 1);
+
+ if (ret)
+ dd_dev_err(dd,
+ "Reinitialize unit %u after reset failed with %d\n",
+ unit, ret);
+ else
+ dd_dev_info(dd, "Reinitialized unit %u after resetting\n",
+ unit);
+
+bail:
+ return ret;
+}
+
+void handle_eflags(struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ u32 rte = rhf_rcv_type_err(packet->rhf);
+
+ dd_dev_err(rcd->dd,
+ "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s%s] rte 0x%x\n",
+ rcd->ctxt, packet->rhf,
+ packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "",
+ packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "",
+ packet->rhf & RHF_DC_ERR ? "dc " : "",
+ packet->rhf & RHF_TID_ERR ? "tid " : "",
+ packet->rhf & RHF_LEN_ERR ? "len " : "",
+ packet->rhf & RHF_ECC_ERR ? "ecc " : "",
+ packet->rhf & RHF_VCRC_ERR ? "vcrc " : "",
+ packet->rhf & RHF_ICRC_ERR ? "icrc " : "",
+ rte);
+
+ rcv_hdrerr(rcd, rcd->ppd, packet);
+}
+
+/*
+ * The following functions are called by the interrupt handler. They are type
+ * specific handlers for each packet type.
+ */
+int process_receive_ib(struct hfi1_packet *packet)
+{
+ trace_hfi1_rcvhdr(packet->rcd->ppd->dd,
+ packet->rcd->ctxt,
+ rhf_err_flags(packet->rhf),
+ RHF_RCV_TYPE_IB,
+ packet->hlen,
+ packet->tlen,
+ packet->updegr,
+ rhf_egr_index(packet->rhf));
+
+ if (unlikely(rhf_err_flags(packet->rhf))) {
+ handle_eflags(packet);
+ return RHF_RCV_CONTINUE;
+ }
+
+ hfi1_ib_rcv(packet);
+ return RHF_RCV_CONTINUE;
+}
+
+int process_receive_bypass(struct hfi1_packet *packet)
+{
+ if (unlikely(rhf_err_flags(packet->rhf)))
+ handle_eflags(packet);
+
+ dd_dev_err(packet->rcd->dd,
+ "Bypass packets are not supported in normal operation. Dropping\n");
+ return RHF_RCV_CONTINUE;
+}
+
+int process_receive_error(struct hfi1_packet *packet)
+{
+ handle_eflags(packet);
+
+ if (unlikely(rhf_err_flags(packet->rhf)))
+ dd_dev_err(packet->rcd->dd,
+ "Unhandled error packet received. Dropping.\n");
+
+ return RHF_RCV_CONTINUE;
+}
+
+int kdeth_process_expected(struct hfi1_packet *packet)
+{
+ if (unlikely(rhf_err_flags(packet->rhf)))
+ handle_eflags(packet);
+
+ dd_dev_err(packet->rcd->dd,
+ "Unhandled expected packet received. Dropping.\n");
+ return RHF_RCV_CONTINUE;
+}
+
+int kdeth_process_eager(struct hfi1_packet *packet)
+{
+ if (unlikely(rhf_err_flags(packet->rhf)))
+ handle_eflags(packet);
+
+ dd_dev_err(packet->rcd->dd,
+ "Unhandled eager packet received. Dropping.\n");
+ return RHF_RCV_CONTINUE;
+}
+
+int process_receive_invalid(struct hfi1_packet *packet)
+{
+ dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n",
+ rhf_rcv_type(packet->rhf));
+ return RHF_RCV_CONTINUE;
+}
diff --git a/drivers/staging/rdma/hfi1/eprom.c b/drivers/staging/rdma/hfi1/eprom.c
new file mode 100644
index 000000000000..b61d3ae93ed1
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/eprom.c
@@ -0,0 +1,475 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/delay.h>
+#include "hfi.h"
+#include "common.h"
+#include "eprom.h"
+
+/*
+ * The EPROM is logically divided into two partitions:
+ * partition 0: the first 128K, visible from PCI ROM BAR
+ * partition 1: the rest
+ */
+#define P0_SIZE (128 * 1024)
+#define P1_START P0_SIZE
+
+/* largest erase size supported by the controller */
+#define SIZE_32KB (32 * 1024)
+#define MASK_32KB (SIZE_32KB - 1)
+
+/* controller page size, in bytes */
+#define EP_PAGE_SIZE 256
+#define EEP_PAGE_MASK (EP_PAGE_SIZE - 1)
+
+/* controller commands */
+#define CMD_SHIFT 24
+#define CMD_NOP (0)
+#define CMD_PAGE_PROGRAM(addr) ((0x02 << CMD_SHIFT) | addr)
+#define CMD_READ_DATA(addr) ((0x03 << CMD_SHIFT) | addr)
+#define CMD_READ_SR1 ((0x05 << CMD_SHIFT))
+#define CMD_WRITE_ENABLE ((0x06 << CMD_SHIFT))
+#define CMD_SECTOR_ERASE_32KB(addr) ((0x52 << CMD_SHIFT) | addr)
+#define CMD_CHIP_ERASE ((0x60 << CMD_SHIFT))
+#define CMD_READ_MANUF_DEV_ID ((0x90 << CMD_SHIFT))
+#define CMD_RELEASE_POWERDOWN_NOID ((0xab << CMD_SHIFT))
+
+/* controller interface speeds */
+#define EP_SPEED_FULL 0x2 /* full speed */
+
+/* controller status register 1 bits */
+#define SR1_BUSY 0x1ull /* the BUSY bit in SR1 */
+
+/* sleep length while waiting for controller */
+#define WAIT_SLEEP_US 100 /* must be larger than 5 (see usage) */
+#define COUNT_DELAY_SEC(n) ((n) * (1000000/WAIT_SLEEP_US))
+
+/* GPIO pins */
+#define EPROM_WP_N (1ull << 14) /* EPROM write line */
+
+/*
+ * Use the EP mutex to guard against other callers from within the driver.
+ * Also covers usage of eprom_available.
+ */
+static DEFINE_MUTEX(eprom_mutex);
+static int eprom_available; /* default: not available */
+
+/*
+ * Turn on external enable line that allows writing on the flash.
+ */
+static void write_enable(struct hfi1_devdata *dd)
+{
+ /* raise signal */
+ write_csr(dd, ASIC_GPIO_OUT,
+ read_csr(dd, ASIC_GPIO_OUT) | EPROM_WP_N);
+ /* raise enable */
+ write_csr(dd, ASIC_GPIO_OE,
+ read_csr(dd, ASIC_GPIO_OE) | EPROM_WP_N);
+}
+
+/*
+ * Turn off external enable line that allows writing on the flash.
+ */
+static void write_disable(struct hfi1_devdata *dd)
+{
+ /* lower signal */
+ write_csr(dd, ASIC_GPIO_OUT,
+ read_csr(dd, ASIC_GPIO_OUT) & ~EPROM_WP_N);
+ /* lower enable */
+ write_csr(dd, ASIC_GPIO_OE,
+ read_csr(dd, ASIC_GPIO_OE) & ~EPROM_WP_N);
+}
+
+/*
+ * Wait for the device to become not busy. Must be called after all
+ * write or erase operations.
+ */
+static int wait_for_not_busy(struct hfi1_devdata *dd)
+{
+ unsigned long count = 0;
+ u64 reg;
+ int ret = 0;
+
+ /* starts page mode */
+ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_SR1);
+ while (1) {
+ udelay(WAIT_SLEEP_US);
+ usleep_range(WAIT_SLEEP_US - 5, WAIT_SLEEP_US + 5);
+ count++;
+ reg = read_csr(dd, ASIC_EEP_DATA);
+ if ((reg & SR1_BUSY) == 0)
+ break;
+ /* 200s is the largest time for a 128Mb device */
+ if (count > COUNT_DELAY_SEC(200)) {
+ dd_dev_err(dd, "waited too long for SPI FLASH busy to clear - failing\n");
+ ret = -ETIMEDOUT;
+ break; /* break, not goto - must stop page mode */
+ }
+ }
+
+ /* stop page mode with a NOP */
+ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP);
+
+ return ret;
+}
+
+/*
+ * Read the device ID from the SPI controller.
+ */
+static u32 read_device_id(struct hfi1_devdata *dd)
+{
+ /* read the Manufacture Device ID */
+ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_MANUF_DEV_ID);
+ return (u32)read_csr(dd, ASIC_EEP_DATA);
+}
+
+/*
+ * Erase the whole flash.
+ */
+static int erase_chip(struct hfi1_devdata *dd)
+{
+ int ret;
+
+ write_enable(dd);
+
+ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
+ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_CHIP_ERASE);
+ ret = wait_for_not_busy(dd);
+
+ write_disable(dd);
+
+ return ret;
+}
+
+/*
+ * Erase a range using the 32KB erase command.
+ */
+static int erase_32kb_range(struct hfi1_devdata *dd, u32 start, u32 end)
+{
+ int ret = 0;
+
+ if (end < start)
+ return -EINVAL;
+
+ if ((start & MASK_32KB) || (end & MASK_32KB)) {
+ dd_dev_err(dd,
+ "%s: non-aligned range (0x%x,0x%x) for a 32KB erase\n",
+ __func__, start, end);
+ return -EINVAL;
+ }
+
+ write_enable(dd);
+
+ for (; start < end; start += SIZE_32KB) {
+ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
+ write_csr(dd, ASIC_EEP_ADDR_CMD,
+ CMD_SECTOR_ERASE_32KB(start));
+ ret = wait_for_not_busy(dd);
+ if (ret)
+ goto done;
+ }
+
+done:
+ write_disable(dd);
+
+ return ret;
+}
+
+/*
+ * Read a 256 byte (64 dword) EPROM page.
+ * All callers have verified the offset is at a page boundary.
+ */
+static void read_page(struct hfi1_devdata *dd, u32 offset, u32 *result)
+{
+ int i;
+
+ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_DATA(offset));
+ for (i = 0; i < EP_PAGE_SIZE/sizeof(u32); i++)
+ result[i] = (u32)read_csr(dd, ASIC_EEP_DATA);
+ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP); /* close open page */
+}
+
+/*
+ * Read length bytes starting at offset. Copy to user address addr.
+ */
+static int read_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr)
+{
+ u32 offset;
+ u32 buffer[EP_PAGE_SIZE/sizeof(u32)];
+ int ret = 0;
+
+ /* reject anything not on an EPROM page boundary */
+ if ((start & EEP_PAGE_MASK) || (len & EEP_PAGE_MASK))
+ return -EINVAL;
+
+ for (offset = 0; offset < len; offset += EP_PAGE_SIZE) {
+ read_page(dd, start + offset, buffer);
+ if (copy_to_user((void __user *)(addr + offset),
+ buffer, EP_PAGE_SIZE)) {
+ ret = -EFAULT;
+ goto done;
+ }
+ }
+
+done:
+ return ret;
+}
+
+/*
+ * Write a 256 byte (64 dword) EPROM page.
+ * All callers have verified the offset is at a page boundary.
+ */
+static int write_page(struct hfi1_devdata *dd, u32 offset, u32 *data)
+{
+ int i;
+
+ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
+ write_csr(dd, ASIC_EEP_DATA, data[0]);
+ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_PAGE_PROGRAM(offset));
+ for (i = 1; i < EP_PAGE_SIZE/sizeof(u32); i++)
+ write_csr(dd, ASIC_EEP_DATA, data[i]);
+ /* will close the open page */
+ return wait_for_not_busy(dd);
+}
+
+/*
+ * Write length bytes starting at offset. Read from user address addr.
+ */
+static int write_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr)
+{
+ u32 offset;
+ u32 buffer[EP_PAGE_SIZE/sizeof(u32)];
+ int ret = 0;
+
+ /* reject anything not on an EPROM page boundary */
+ if ((start & EEP_PAGE_MASK) || (len & EEP_PAGE_MASK))
+ return -EINVAL;
+
+ write_enable(dd);
+
+ for (offset = 0; offset < len; offset += EP_PAGE_SIZE) {
+ if (copy_from_user(buffer, (void __user *)(addr + offset),
+ EP_PAGE_SIZE)) {
+ ret = -EFAULT;
+ goto done;
+ }
+ ret = write_page(dd, start + offset, buffer);
+ if (ret)
+ goto done;
+ }
+
+done:
+ write_disable(dd);
+ return ret;
+}
+
+/*
+ * Perform the given operation on the EPROM. Called from user space. The
+ * user credentials have already been checked.
+ *
+ * Return 0 on success, -ERRNO on error
+ */
+int handle_eprom_command(const struct hfi1_cmd *cmd)
+{
+ struct hfi1_devdata *dd;
+ u32 dev_id;
+ int ret = 0;
+
+ /*
+ * The EPROM is per-device, so use unit 0 as that will always
+ * exist.
+ */
+ dd = hfi1_lookup(0);
+ if (!dd) {
+ pr_err("%s: cannot find unit 0!\n", __func__);
+ return -EINVAL;
+ }
+
+ /* lock against other callers touching the ASIC block */
+ mutex_lock(&eprom_mutex);
+
+ /* some platforms do not have an EPROM */
+ if (!eprom_available) {
+ ret = -ENOSYS;
+ goto done_asic;
+ }
+
+ /* lock against the other HFI on another OS */
+ ret = acquire_hw_mutex(dd);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: unable to acquire hw mutex, no EPROM support\n",
+ __func__);
+ goto done_asic;
+ }
+
+ dd_dev_info(dd, "%s: cmd: type %d, len 0x%x, addr 0x%016llx\n",
+ __func__, cmd->type, cmd->len, cmd->addr);
+
+ switch (cmd->type) {
+ case HFI1_CMD_EP_INFO:
+ if (cmd->len != sizeof(u32)) {
+ ret = -ERANGE;
+ break;
+ }
+ dev_id = read_device_id(dd);
+ /* addr points to a u32 user buffer */
+ if (copy_to_user((void __user *)cmd->addr, &dev_id,
+ sizeof(u32)))
+ ret = -EFAULT;
+ break;
+ case HFI1_CMD_EP_ERASE_CHIP:
+ ret = erase_chip(dd);
+ break;
+ case HFI1_CMD_EP_ERASE_P0:
+ if (cmd->len != P0_SIZE) {
+ ret = -ERANGE;
+ break;
+ }
+ ret = erase_32kb_range(dd, 0, cmd->len);
+ break;
+ case HFI1_CMD_EP_ERASE_P1:
+ /* check for overflow */
+ if (P1_START + cmd->len > ASIC_EEP_ADDR_CMD_EP_ADDR_MASK) {
+ ret = -ERANGE;
+ break;
+ }
+ ret = erase_32kb_range(dd, P1_START, P1_START + cmd->len);
+ break;
+ case HFI1_CMD_EP_READ_P0:
+ if (cmd->len != P0_SIZE) {
+ ret = -ERANGE;
+ break;
+ }
+ ret = read_length(dd, 0, cmd->len, cmd->addr);
+ break;
+ case HFI1_CMD_EP_READ_P1:
+ /* check for overflow */
+ if (P1_START + cmd->len > ASIC_EEP_ADDR_CMD_EP_ADDR_MASK) {
+ ret = -ERANGE;
+ break;
+ }
+ ret = read_length(dd, P1_START, cmd->len, cmd->addr);
+ break;
+ case HFI1_CMD_EP_WRITE_P0:
+ if (cmd->len > P0_SIZE) {
+ ret = -ERANGE;
+ break;
+ }
+ ret = write_length(dd, 0, cmd->len, cmd->addr);
+ break;
+ case HFI1_CMD_EP_WRITE_P1:
+ /* check for overflow */
+ if (P1_START + cmd->len > ASIC_EEP_ADDR_CMD_EP_ADDR_MASK) {
+ ret = -ERANGE;
+ break;
+ }
+ ret = write_length(dd, P1_START, cmd->len, cmd->addr);
+ break;
+ default:
+ dd_dev_err(dd, "%s: unexpected command %d\n",
+ __func__, cmd->type);
+ ret = -EINVAL;
+ break;
+ }
+
+ release_hw_mutex(dd);
+done_asic:
+ mutex_unlock(&eprom_mutex);
+ return ret;
+}
+
+/*
+ * Initialize the EPROM handler.
+ */
+int eprom_init(struct hfi1_devdata *dd)
+{
+ int ret = 0;
+
+ /* only the discrete chip has an EPROM, nothing to do */
+ if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0)
+ return 0;
+
+ /* lock against other callers */
+ mutex_lock(&eprom_mutex);
+ if (eprom_available) /* already initialized */
+ goto done_asic;
+
+ /*
+ * Lock against the other HFI on another OS - the mutex above
+ * would have caught anything in this driver. It is OK if
+ * both OSes reset the EPROM - as long as they don't do it at
+ * the same time.
+ */
+ ret = acquire_hw_mutex(dd);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: unable to acquire hw mutex, no EPROM support\n",
+ __func__);
+ goto done_asic;
+ }
+
+ /* reset EPROM to be sure it is in a good state */
+
+ /* set reset */
+ write_csr(dd, ASIC_EEP_CTL_STAT,
+ ASIC_EEP_CTL_STAT_EP_RESET_SMASK);
+ /* clear reset, set speed */
+ write_csr(dd, ASIC_EEP_CTL_STAT,
+ EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT);
+
+ /* wake the device with command "release powerdown NoID" */
+ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID);
+
+ eprom_available = 1;
+ release_hw_mutex(dd);
+done_asic:
+ mutex_unlock(&eprom_mutex);
+ return ret;
+}
diff --git a/drivers/staging/rdma/hfi1/eprom.h b/drivers/staging/rdma/hfi1/eprom.h
new file mode 100644
index 000000000000..64a64276be81
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/eprom.h
@@ -0,0 +1,55 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+struct hfi1_cmd;
+struct hfi1_devdata;
+
+int eprom_init(struct hfi1_devdata *dd);
+int handle_eprom_command(const struct hfi1_cmd *cmd);
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
new file mode 100644
index 000000000000..469861750b76
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/file_ops.c
@@ -0,0 +1,2140 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/cdev.h>
+#include <linux/swap.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <asm/pgtable.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/cred.h>
+#include <linux/uio.h>
+
+#include "hfi.h"
+#include "pio.h"
+#include "device.h"
+#include "common.h"
+#include "trace.h"
+#include "user_sdma.h"
+#include "eprom.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
+#define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */
+
+/*
+ * File operation functions
+ */
+static int hfi1_file_open(struct inode *, struct file *);
+static int hfi1_file_close(struct inode *, struct file *);
+static ssize_t hfi1_file_write(struct file *, const char __user *,
+ size_t, loff_t *);
+static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *);
+static unsigned int hfi1_poll(struct file *, struct poll_table_struct *);
+static int hfi1_file_mmap(struct file *, struct vm_area_struct *);
+
+static u64 kvirt_to_phys(void *);
+static int assign_ctxt(struct file *, struct hfi1_user_info *);
+static int init_subctxts(struct hfi1_ctxtdata *, const struct hfi1_user_info *);
+static int user_init(struct file *);
+static int get_ctxt_info(struct file *, void __user *, __u32);
+static int get_base_info(struct file *, void __user *, __u32);
+static int setup_ctxt(struct file *);
+static int setup_subctxt(struct hfi1_ctxtdata *);
+static int get_user_context(struct file *, struct hfi1_user_info *,
+ int, unsigned);
+static int find_shared_ctxt(struct file *, const struct hfi1_user_info *);
+static int allocate_ctxt(struct file *, struct hfi1_devdata *,
+ struct hfi1_user_info *);
+static unsigned int poll_urgent(struct file *, struct poll_table_struct *);
+static unsigned int poll_next(struct file *, struct poll_table_struct *);
+static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long);
+static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
+static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
+static int vma_fault(struct vm_area_struct *, struct vm_fault *);
+static int exp_tid_setup(struct file *, struct hfi1_tid_info *);
+static int exp_tid_free(struct file *, struct hfi1_tid_info *);
+static void unlock_exp_tids(struct hfi1_ctxtdata *);
+
+static const struct file_operations hfi1_file_ops = {
+ .owner = THIS_MODULE,
+ .write = hfi1_file_write,
+ .write_iter = hfi1_write_iter,
+ .open = hfi1_file_open,
+ .release = hfi1_file_close,
+ .poll = hfi1_poll,
+ .mmap = hfi1_file_mmap,
+ .llseek = noop_llseek,
+};
+
+static struct vm_operations_struct vm_ops = {
+ .fault = vma_fault,
+};
+
+/*
+ * Types of memories mapped into user processes' space
+ */
+enum mmap_types {
+ PIO_BUFS = 1,
+ PIO_BUFS_SOP,
+ PIO_CRED,
+ RCV_HDRQ,
+ RCV_EGRBUF,
+ UREGS,
+ EVENTS,
+ STATUS,
+ RTAIL,
+ SUBCTXT_UREGS,
+ SUBCTXT_RCV_HDRQ,
+ SUBCTXT_EGRBUF,
+ SDMA_COMP
+};
+
+/*
+ * Masks and offsets defining the mmap tokens
+ */
+#define HFI1_MMAP_OFFSET_MASK 0xfffULL
+#define HFI1_MMAP_OFFSET_SHIFT 0
+#define HFI1_MMAP_SUBCTXT_MASK 0xfULL
+#define HFI1_MMAP_SUBCTXT_SHIFT 12
+#define HFI1_MMAP_CTXT_MASK 0xffULL
+#define HFI1_MMAP_CTXT_SHIFT 16
+#define HFI1_MMAP_TYPE_MASK 0xfULL
+#define HFI1_MMAP_TYPE_SHIFT 24
+#define HFI1_MMAP_MAGIC_MASK 0xffffffffULL
+#define HFI1_MMAP_MAGIC_SHIFT 32
+
+#define HFI1_MMAP_MAGIC 0xdabbad00
+
+#define HFI1_MMAP_TOKEN_SET(field, val) \
+ (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT)
+#define HFI1_MMAP_TOKEN_GET(field, token) \
+ (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK)
+#define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \
+ (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \
+ HFI1_MMAP_TOKEN_SET(TYPE, type) | \
+ HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
+ HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \
+ HFI1_MMAP_TOKEN_SET(OFFSET, ((unsigned long)addr & ~PAGE_MASK)))
+
+#define EXP_TID_SET(field, value) \
+ (((value) & EXP_TID_TID##field##_MASK) << \
+ EXP_TID_TID##field##_SHIFT)
+#define EXP_TID_CLEAR(tid, field) { \
+ (tid) &= ~(EXP_TID_TID##field##_MASK << \
+ EXP_TID_TID##field##_SHIFT); \
+ }
+#define EXP_TID_RESET(tid, field, value) do { \
+ EXP_TID_CLEAR(tid, field); \
+ (tid) |= EXP_TID_SET(field, value); \
+ } while (0)
+
+#define dbg(fmt, ...) \
+ pr_info(fmt, ##__VA_ARGS__)
+
+
+static inline int is_valid_mmap(u64 token)
+{
+ return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC);
+}
+
+static int hfi1_file_open(struct inode *inode, struct file *fp)
+{
+ /* The real work is performed later in assign_ctxt() */
+ fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL);
+ if (fp->private_data) /* no cpu affinity by default */
+ ((struct hfi1_filedata *)fp->private_data)->rec_cpu_num = -1;
+ return fp->private_data ? 0 : -ENOMEM;
+}
+
+static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
+ size_t count, loff_t *offset)
+{
+ const struct hfi1_cmd __user *ucmd;
+ struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
+ struct hfi1_cmd cmd;
+ struct hfi1_user_info uinfo;
+ struct hfi1_tid_info tinfo;
+ ssize_t consumed = 0, copy = 0, ret = 0;
+ void *dest = NULL;
+ __u64 user_val = 0;
+ int uctxt_required = 1;
+ int must_be_root = 0;
+
+ if (count < sizeof(cmd)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ ucmd = (const struct hfi1_cmd __user *)data;
+ if (copy_from_user(&cmd, ucmd, sizeof(cmd))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ consumed = sizeof(cmd);
+
+ switch (cmd.type) {
+ case HFI1_CMD_ASSIGN_CTXT:
+ uctxt_required = 0; /* assigned user context not required */
+ copy = sizeof(uinfo);
+ dest = &uinfo;
+ break;
+ case HFI1_CMD_SDMA_STATUS_UPD:
+ case HFI1_CMD_CREDIT_UPD:
+ copy = 0;
+ break;
+ case HFI1_CMD_TID_UPDATE:
+ case HFI1_CMD_TID_FREE:
+ copy = sizeof(tinfo);
+ dest = &tinfo;
+ break;
+ case HFI1_CMD_USER_INFO:
+ case HFI1_CMD_RECV_CTRL:
+ case HFI1_CMD_POLL_TYPE:
+ case HFI1_CMD_ACK_EVENT:
+ case HFI1_CMD_CTXT_INFO:
+ case HFI1_CMD_SET_PKEY:
+ case HFI1_CMD_CTXT_RESET:
+ copy = 0;
+ user_val = cmd.addr;
+ break;
+ case HFI1_CMD_EP_INFO:
+ case HFI1_CMD_EP_ERASE_CHIP:
+ case HFI1_CMD_EP_ERASE_P0:
+ case HFI1_CMD_EP_ERASE_P1:
+ case HFI1_CMD_EP_READ_P0:
+ case HFI1_CMD_EP_READ_P1:
+ case HFI1_CMD_EP_WRITE_P0:
+ case HFI1_CMD_EP_WRITE_P1:
+ uctxt_required = 0; /* assigned user context not required */
+ must_be_root = 1; /* validate user */
+ copy = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /* If the command comes with user data, copy it. */
+ if (copy) {
+ if (copy_from_user(dest, (void __user *)cmd.addr, copy)) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ consumed += copy;
+ }
+
+ /*
+ * Make sure there is a uctxt when needed.
+ */
+ if (uctxt_required && !uctxt) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /* only root can do these operations */
+ if (must_be_root && !capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ switch (cmd.type) {
+ case HFI1_CMD_ASSIGN_CTXT:
+ ret = assign_ctxt(fp, &uinfo);
+ if (ret < 0)
+ goto bail;
+ ret = setup_ctxt(fp);
+ if (ret)
+ goto bail;
+ ret = user_init(fp);
+ break;
+ case HFI1_CMD_CTXT_INFO:
+ ret = get_ctxt_info(fp, (void __user *)(unsigned long)
+ user_val, cmd.len);
+ break;
+ case HFI1_CMD_USER_INFO:
+ ret = get_base_info(fp, (void __user *)(unsigned long)
+ user_val, cmd.len);
+ break;
+ case HFI1_CMD_SDMA_STATUS_UPD:
+ break;
+ case HFI1_CMD_CREDIT_UPD:
+ if (uctxt && uctxt->sc)
+ sc_return_credits(uctxt->sc);
+ break;
+ case HFI1_CMD_TID_UPDATE:
+ ret = exp_tid_setup(fp, &tinfo);
+ if (!ret) {
+ unsigned long addr;
+ /*
+ * Copy the number of tidlist entries we used
+ * and the length of the buffer we registered.
+ * These fields are adjacent in the structure so
+ * we can copy them at the same time.
+ */
+ addr = (unsigned long)cmd.addr +
+ offsetof(struct hfi1_tid_info, tidcnt);
+ if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
+ sizeof(tinfo.tidcnt) +
+ sizeof(tinfo.length)))
+ ret = -EFAULT;
+ }
+ break;
+ case HFI1_CMD_TID_FREE:
+ ret = exp_tid_free(fp, &tinfo);
+ break;
+ case HFI1_CMD_RECV_CTRL:
+ ret = manage_rcvq(uctxt, subctxt_fp(fp), (int)user_val);
+ break;
+ case HFI1_CMD_POLL_TYPE:
+ uctxt->poll_type = (typeof(uctxt->poll_type))user_val;
+ break;
+ case HFI1_CMD_ACK_EVENT:
+ ret = user_event_ack(uctxt, subctxt_fp(fp), user_val);
+ break;
+ case HFI1_CMD_SET_PKEY:
+ if (HFI1_CAP_IS_USET(PKEY_CHECK))
+ ret = set_ctxt_pkey(uctxt, subctxt_fp(fp), user_val);
+ else
+ ret = -EPERM;
+ break;
+ case HFI1_CMD_CTXT_RESET: {
+ struct send_context *sc;
+ struct hfi1_devdata *dd;
+
+ if (!uctxt || !uctxt->dd || !uctxt->sc) {
+ ret = -EINVAL;
+ break;
+ }
+ /*
+ * There is no protection here. User level has to
+ * guarantee that no one will be writing to the send
+ * context while it is being re-initialized.
+ * If user level breaks that guarantee, it will break
+ * it's own context and no one else's.
+ */
+ dd = uctxt->dd;
+ sc = uctxt->sc;
+ /*
+ * Wait until the interrupt handler has marked the
+ * context as halted or frozen. Report error if we time
+ * out.
+ */
+ wait_event_interruptible_timeout(
+ sc->halt_wait, (sc->flags & SCF_HALTED),
+ msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
+ if (!(sc->flags & SCF_HALTED)) {
+ ret = -ENOLCK;
+ break;
+ }
+ /*
+ * If the send context was halted due to a Freeze,
+ * wait until the device has been "unfrozen" before
+ * resetting the context.
+ */
+ if (sc->flags & SCF_FROZEN) {
+ wait_event_interruptible_timeout(
+ dd->event_queue,
+ !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
+ msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
+ if (dd->flags & HFI1_FROZEN) {
+ ret = -ENOLCK;
+ break;
+ }
+ if (dd->flags & HFI1_FORCED_FREEZE) {
+ /* Don't allow context reset if we are into
+ * forced freeze */
+ ret = -ENODEV;
+ break;
+ }
+ sc_disable(sc);
+ ret = sc_enable(sc);
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB,
+ uctxt->ctxt);
+ } else
+ ret = sc_restart(sc);
+ if (!ret)
+ sc_return_credits(sc);
+ break;
+ }
+ case HFI1_CMD_EP_INFO:
+ case HFI1_CMD_EP_ERASE_CHIP:
+ case HFI1_CMD_EP_ERASE_P0:
+ case HFI1_CMD_EP_ERASE_P1:
+ case HFI1_CMD_EP_READ_P0:
+ case HFI1_CMD_EP_READ_P1:
+ case HFI1_CMD_EP_WRITE_P0:
+ case HFI1_CMD_EP_WRITE_P1:
+ ret = handle_eprom_command(&cmd);
+ break;
+ }
+
+ if (ret >= 0)
+ ret = consumed;
+bail:
+ return ret;
+}
+
+static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
+{
+ struct hfi1_user_sdma_pkt_q *pq;
+ struct hfi1_user_sdma_comp_q *cq;
+ int ret = 0, done = 0, reqs = 0;
+ unsigned long dim = from->nr_segs;
+
+ if (!user_sdma_comp_fp(kiocb->ki_filp) ||
+ !user_sdma_pkt_fp(kiocb->ki_filp)) {
+ ret = -EIO;
+ goto done;
+ }
+
+ if (!iter_is_iovec(from) || !dim) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ hfi1_cdbg(SDMA, "SDMA request from %u:%u (%lu)",
+ ctxt_fp(kiocb->ki_filp)->ctxt, subctxt_fp(kiocb->ki_filp),
+ dim);
+ pq = user_sdma_pkt_fp(kiocb->ki_filp);
+ cq = user_sdma_comp_fp(kiocb->ki_filp);
+
+ if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
+ ret = -ENOSPC;
+ goto done;
+ }
+
+ while (dim) {
+ unsigned long count = 0;
+
+ ret = hfi1_user_sdma_process_request(
+ kiocb->ki_filp, (struct iovec *)(from->iov + done),
+ dim, &count);
+ if (ret)
+ goto done;
+ dim -= count;
+ done += count;
+ reqs++;
+ }
+done:
+ return ret ? ret : reqs;
+}
+
+static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+ struct hfi1_ctxtdata *uctxt;
+ struct hfi1_devdata *dd;
+ unsigned long flags, pfn;
+ u64 token = vma->vm_pgoff << PAGE_SHIFT,
+ memaddr = 0;
+ u8 subctxt, mapio = 0, vmf = 0, type;
+ ssize_t memlen = 0;
+ int ret = 0;
+ u16 ctxt;
+
+ uctxt = ctxt_fp(fp);
+ if (!is_valid_mmap(token) || !uctxt ||
+ !(vma->vm_flags & VM_SHARED)) {
+ ret = -EINVAL;
+ goto done;
+ }
+ dd = uctxt->dd;
+ ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token);
+ subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token);
+ type = HFI1_MMAP_TOKEN_GET(TYPE, token);
+ if (ctxt != uctxt->ctxt || subctxt != subctxt_fp(fp)) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ flags = vma->vm_flags;
+
+ switch (type) {
+ case PIO_BUFS:
+ case PIO_BUFS_SOP:
+ memaddr = ((dd->physaddr + TXE_PIO_SEND) +
+ /* chip pio base */
+ (uctxt->sc->hw_context * (1 << 16))) +
+ /* 64K PIO space / ctxt */
+ (type == PIO_BUFS_SOP ?
+ (TXE_PIO_SIZE / 2) : 0); /* sop? */
+ /*
+ * Map only the amount allocated to the context, not the
+ * entire available context's PIO space.
+ */
+ memlen = ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE,
+ PAGE_SIZE);
+ flags &= ~VM_MAYREAD;
+ flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ mapio = 1;
+ break;
+ case PIO_CRED:
+ if (flags & VM_WRITE) {
+ ret = -EPERM;
+ goto done;
+ }
+ /*
+ * The credit return location for this context could be on the
+ * second or third page allocated for credit returns (if number
+ * of enabled contexts > 64 and 128 respectively).
+ */
+ memaddr = dd->cr_base[uctxt->numa_id].pa +
+ (((u64)uctxt->sc->hw_free -
+ (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
+ memlen = PAGE_SIZE;
+ flags &= ~VM_MAYWRITE;
+ flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ /*
+ * The driver has already allocated memory for credit
+ * returns and programmed it into the chip. Has that
+ * memory been flagged as non-cached?
+ */
+ /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
+ mapio = 1;
+ break;
+ case RCV_HDRQ:
+ memaddr = uctxt->rcvhdrq_phys;
+ memlen = uctxt->rcvhdrq_size;
+ break;
+ case RCV_EGRBUF: {
+ unsigned long addr;
+ int i;
+ /*
+ * The RcvEgr buffer need to be handled differently
+ * as multiple non-contiguous pages need to be mapped
+ * into the user process.
+ */
+ memlen = uctxt->egrbufs.size;
+ if ((vma->vm_end - vma->vm_start) != memlen) {
+ dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n",
+ (vma->vm_end - vma->vm_start), memlen);
+ ret = -EINVAL;
+ goto done;
+ }
+ if (vma->vm_flags & VM_WRITE) {
+ ret = -EPERM;
+ goto done;
+ }
+ vma->vm_flags &= ~VM_MAYWRITE;
+ addr = vma->vm_start;
+ for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
+ ret = remap_pfn_range(
+ vma, addr,
+ uctxt->egrbufs.buffers[i].phys >> PAGE_SHIFT,
+ uctxt->egrbufs.buffers[i].len,
+ vma->vm_page_prot);
+ if (ret < 0)
+ goto done;
+ addr += uctxt->egrbufs.buffers[i].len;
+ }
+ ret = 0;
+ goto done;
+ }
+ case UREGS:
+ /*
+ * Map only the page that contains this context's user
+ * registers.
+ */
+ memaddr = (unsigned long)
+ (dd->physaddr + RXE_PER_CONTEXT_USER)
+ + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE);
+ /*
+ * TidFlow table is on the same page as the rest of the
+ * user registers.
+ */
+ memlen = PAGE_SIZE;
+ flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ mapio = 1;
+ break;
+ case EVENTS:
+ /*
+ * Use the page where this context's flags are. User level
+ * knows where it's own bitmap is within the page.
+ */
+ memaddr = ((unsigned long)dd->events +
+ ((uctxt->ctxt - dd->first_user_ctxt) *
+ HFI1_MAX_SHARED_CTXTS)) & PAGE_MASK;
+ memlen = PAGE_SIZE;
+ /*
+ * v3.7 removes VM_RESERVED but the effect is kept by
+ * using VM_IO.
+ */
+ flags |= VM_IO | VM_DONTEXPAND;
+ vmf = 1;
+ break;
+ case STATUS:
+ memaddr = kvirt_to_phys((void *)dd->status);
+ memlen = PAGE_SIZE;
+ flags |= VM_IO | VM_DONTEXPAND;
+ break;
+ case RTAIL:
+ if (!HFI1_CAP_IS_USET(DMA_RTAIL)) {
+ /*
+ * If the memory allocation failed, the context alloc
+ * also would have failed, so we would never get here
+ */
+ ret = -EINVAL;
+ goto done;
+ }
+ if (flags & VM_WRITE) {
+ ret = -EPERM;
+ goto done;
+ }
+ memaddr = uctxt->rcvhdrqtailaddr_phys;
+ memlen = PAGE_SIZE;
+ flags &= ~VM_MAYWRITE;
+ break;
+ case SUBCTXT_UREGS:
+ memaddr = (u64)uctxt->subctxt_uregbase;
+ memlen = PAGE_SIZE;
+ flags |= VM_IO | VM_DONTEXPAND;
+ vmf = 1;
+ break;
+ case SUBCTXT_RCV_HDRQ:
+ memaddr = (u64)uctxt->subctxt_rcvhdr_base;
+ memlen = uctxt->rcvhdrq_size * uctxt->subctxt_cnt;
+ flags |= VM_IO | VM_DONTEXPAND;
+ vmf = 1;
+ break;
+ case SUBCTXT_EGRBUF:
+ memaddr = (u64)uctxt->subctxt_rcvegrbuf;
+ memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt;
+ flags |= VM_IO | VM_DONTEXPAND;
+ flags &= ~VM_MAYWRITE;
+ vmf = 1;
+ break;
+ case SDMA_COMP: {
+ struct hfi1_user_sdma_comp_q *cq;
+
+ if (!user_sdma_comp_fp(fp)) {
+ ret = -EFAULT;
+ goto done;
+ }
+ cq = user_sdma_comp_fp(fp);
+ memaddr = (u64)cq->comps;
+ memlen = ALIGN(sizeof(*cq->comps) * cq->nentries, PAGE_SIZE);
+ flags |= VM_IO | VM_DONTEXPAND;
+ vmf = 1;
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if ((vma->vm_end - vma->vm_start) != memlen) {
+ hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu",
+ uctxt->ctxt, subctxt_fp(fp),
+ (vma->vm_end - vma->vm_start), memlen);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ vma->vm_flags = flags;
+ dd_dev_info(dd,
+ "%s: %u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
+ __func__, ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
+ vma->vm_end - vma->vm_start, vma->vm_flags);
+ pfn = (unsigned long)(memaddr >> PAGE_SHIFT);
+ if (vmf) {
+ vma->vm_pgoff = pfn;
+ vma->vm_ops = &vm_ops;
+ ret = 0;
+ } else if (mapio) {
+ ret = io_remap_pfn_range(vma, vma->vm_start, pfn, memlen,
+ vma->vm_page_prot);
+ } else {
+ ret = remap_pfn_range(vma, vma->vm_start, pfn, memlen,
+ vma->vm_page_prot);
+ }
+done:
+ return ret;
+}
+
+/*
+ * Local (non-chip) user memory is not mapped right away but as it is
+ * accessed by the user-level code.
+ */
+static int vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct page *page;
+
+ page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
+ if (!page)
+ return VM_FAULT_SIGBUS;
+
+ get_page(page);
+ vmf->page = page;
+
+ return 0;
+}
+
+static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt)
+{
+ struct hfi1_ctxtdata *uctxt;
+ unsigned pollflag;
+
+ uctxt = ctxt_fp(fp);
+ if (!uctxt)
+ pollflag = POLLERR;
+ else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT)
+ pollflag = poll_urgent(fp, pt);
+ else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV)
+ pollflag = poll_next(fp, pt);
+ else /* invalid */
+ pollflag = POLLERR;
+
+ return pollflag;
+}
+
+static int hfi1_file_close(struct inode *inode, struct file *fp)
+{
+ struct hfi1_filedata *fdata = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fdata->uctxt;
+ struct hfi1_devdata *dd;
+ unsigned long flags, *ev;
+
+ fp->private_data = NULL;
+
+ if (!uctxt)
+ goto done;
+
+ hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
+ dd = uctxt->dd;
+ mutex_lock(&hfi1_mutex);
+
+ flush_wc();
+ /* drain user sdma queue */
+ if (fdata->pq)
+ hfi1_user_sdma_free_queues(fdata);
+
+ /*
+ * Clear any left over, unhandled events so the next process that
+ * gets this context doesn't get confused.
+ */
+ ev = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) *
+ HFI1_MAX_SHARED_CTXTS) + fdata->subctxt;
+ *ev = 0;
+
+ if (--uctxt->cnt) {
+ uctxt->active_slaves &= ~(1 << fdata->subctxt);
+ uctxt->subpid[fdata->subctxt] = 0;
+ mutex_unlock(&hfi1_mutex);
+ goto done;
+ }
+
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ /*
+ * Disable receive context and interrupt available, reset all
+ * RcvCtxtCtrl bits to default values.
+ */
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
+ HFI1_RCVCTRL_TIDFLOW_DIS |
+ HFI1_RCVCTRL_INTRAVAIL_DIS |
+ HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
+ HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
+ HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt->ctxt);
+ /* Clear the context's J_KEY */
+ hfi1_clear_ctxt_jkey(dd, uctxt->ctxt);
+ /*
+ * Reset context integrity checks to default.
+ * (writes to CSRs probably belong in chip.c)
+ */
+ write_kctxt_csr(dd, uctxt->sc->hw_context, SEND_CTXT_CHECK_ENABLE,
+ hfi1_pkt_default_send_ctxt_mask(dd, uctxt->sc->type));
+ sc_disable(uctxt->sc);
+ uctxt->pid = 0;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+
+ dd->rcd[uctxt->ctxt] = NULL;
+ uctxt->rcvwait_to = 0;
+ uctxt->piowait_to = 0;
+ uctxt->rcvnowait = 0;
+ uctxt->pionowait = 0;
+ uctxt->event_flags = 0;
+
+ hfi1_clear_tids(uctxt);
+ hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
+
+ if (uctxt->tid_pg_list)
+ unlock_exp_tids(uctxt);
+
+ hfi1_stats.sps_ctxts--;
+ dd->freectxts++;
+ mutex_unlock(&hfi1_mutex);
+ hfi1_free_ctxtdata(dd, uctxt);
+done:
+ kfree(fdata);
+ return 0;
+}
+
+/*
+ * Convert kernel *virtual* addresses to physical addresses.
+ * This is used to vmalloc'ed addresses.
+ */
+static u64 kvirt_to_phys(void *addr)
+{
+ struct page *page;
+ u64 paddr = 0;
+
+ page = vmalloc_to_page(addr);
+ if (page)
+ paddr = page_to_pfn(page) << PAGE_SHIFT;
+
+ return paddr;
+}
+
+static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
+{
+ int i_minor, ret = 0;
+ unsigned swmajor, swminor, alg = HFI1_ALG_ACROSS;
+
+ swmajor = uinfo->userversion >> 16;
+ if (swmajor != HFI1_USER_SWMAJOR) {
+ ret = -ENODEV;
+ goto done;
+ }
+
+ swminor = uinfo->userversion & 0xffff;
+
+ if (uinfo->hfi1_alg < HFI1_ALG_COUNT)
+ alg = uinfo->hfi1_alg;
+
+ mutex_lock(&hfi1_mutex);
+ /* First, lets check if we need to setup a shared context? */
+ if (uinfo->subctxt_cnt)
+ ret = find_shared_ctxt(fp, uinfo);
+
+ /*
+ * We execute the following block if we couldn't find a
+ * shared context or if context sharing is not required.
+ */
+ if (!ret) {
+ i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
+ ret = get_user_context(fp, uinfo, i_minor - 1, alg);
+ }
+ mutex_unlock(&hfi1_mutex);
+done:
+ return ret;
+}
+
+static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo,
+ int devno, unsigned alg)
+{
+ struct hfi1_devdata *dd = NULL;
+ int ret = 0, devmax, npresent, nup, dev;
+
+ devmax = hfi1_count_units(&npresent, &nup);
+ if (!npresent) {
+ ret = -ENXIO;
+ goto done;
+ }
+ if (!nup) {
+ ret = -ENETDOWN;
+ goto done;
+ }
+ if (devno >= 0) {
+ dd = hfi1_lookup(devno);
+ if (!dd)
+ ret = -ENODEV;
+ else if (!dd->freectxts)
+ ret = -EBUSY;
+ } else {
+ struct hfi1_devdata *pdd;
+
+ if (alg == HFI1_ALG_ACROSS) {
+ unsigned free = 0U;
+
+ for (dev = 0; dev < devmax; dev++) {
+ pdd = hfi1_lookup(dev);
+ if (pdd && pdd->freectxts &&
+ pdd->freectxts > free) {
+ dd = pdd;
+ free = pdd->freectxts;
+ }
+ }
+ } else {
+ for (dev = 0; dev < devmax; dev++) {
+ pdd = hfi1_lookup(dev);
+ if (pdd && pdd->freectxts) {
+ dd = pdd;
+ break;
+ }
+ }
+ }
+ if (!dd)
+ ret = -EBUSY;
+ }
+done:
+ return ret ? ret : allocate_ctxt(fp, dd, uinfo);
+}
+
+static int find_shared_ctxt(struct file *fp,
+ const struct hfi1_user_info *uinfo)
+{
+ int devmax, ndev, i;
+ int ret = 0;
+
+ devmax = hfi1_count_units(NULL, NULL);
+
+ for (ndev = 0; ndev < devmax; ndev++) {
+ struct hfi1_devdata *dd = hfi1_lookup(ndev);
+
+ /* device portion of usable() */
+ if (!(dd && (dd->flags & HFI1_PRESENT) && dd->kregbase))
+ continue;
+ for (i = dd->first_user_ctxt; i < dd->num_rcv_contexts; i++) {
+ struct hfi1_ctxtdata *uctxt = dd->rcd[i];
+
+ /* Skip ctxts which are not yet open */
+ if (!uctxt || !uctxt->cnt)
+ continue;
+ /* Skip ctxt if it doesn't match the requested one */
+ if (memcmp(uctxt->uuid, uinfo->uuid,
+ sizeof(uctxt->uuid)) ||
+ uctxt->subctxt_id != uinfo->subctxt_id ||
+ uctxt->subctxt_cnt != uinfo->subctxt_cnt)
+ continue;
+
+ /* Verify the sharing process matches the master */
+ if (uctxt->userversion != uinfo->userversion ||
+ uctxt->cnt >= uctxt->subctxt_cnt) {
+ ret = -EINVAL;
+ goto done;
+ }
+ ctxt_fp(fp) = uctxt;
+ subctxt_fp(fp) = uctxt->cnt++;
+ uctxt->subpid[subctxt_fp(fp)] = current->pid;
+ uctxt->active_slaves |= 1 << subctxt_fp(fp);
+ ret = 1;
+ goto done;
+ }
+ }
+
+done:
+ return ret;
+}
+
+static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
+ struct hfi1_user_info *uinfo)
+{
+ struct hfi1_ctxtdata *uctxt;
+ unsigned ctxt;
+ int ret;
+
+ if (dd->flags & HFI1_FROZEN) {
+ /*
+ * Pick an error that is unique from all other errors
+ * that are returned so the user process knows that
+ * it tried to allocate while the SPC was frozen. It
+ * it should be able to retry with success in a short
+ * while.
+ */
+ return -EIO;
+ }
+
+ for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts; ctxt++)
+ if (!dd->rcd[ctxt])
+ break;
+
+ if (ctxt == dd->num_rcv_contexts)
+ return -EBUSY;
+
+ uctxt = hfi1_create_ctxtdata(dd->pport, ctxt);
+ if (!uctxt) {
+ dd_dev_err(dd,
+ "Unable to allocate ctxtdata memory, failing open\n");
+ return -ENOMEM;
+ }
+ /*
+ * Allocate and enable a PIO send context.
+ */
+ uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize,
+ uctxt->numa_id);
+ if (!uctxt->sc)
+ return -ENOMEM;
+
+ dbg("allocated send context %u(%u)\n", uctxt->sc->sw_index,
+ uctxt->sc->hw_context);
+ ret = sc_enable(uctxt->sc);
+ if (ret)
+ return ret;
+ /*
+ * Setup shared context resources if the user-level has requested
+ * shared contexts and this is the 'master' process.
+ * This has to be done here so the rest of the sub-contexts find the
+ * proper master.
+ */
+ if (uinfo->subctxt_cnt && !subctxt_fp(fp)) {
+ ret = init_subctxts(uctxt, uinfo);
+ /*
+ * On error, we don't need to disable and de-allocate the
+ * send context because it will be done during file close
+ */
+ if (ret)
+ return ret;
+ }
+ uctxt->userversion = uinfo->userversion;
+ uctxt->pid = current->pid;
+ uctxt->flags = HFI1_CAP_UGET(MASK);
+ init_waitqueue_head(&uctxt->wait);
+ strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
+ memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
+ uctxt->jkey = generate_jkey(current_uid());
+ INIT_LIST_HEAD(&uctxt->sdma_queues);
+ spin_lock_init(&uctxt->sdma_qlock);
+ hfi1_stats.sps_ctxts++;
+ dd->freectxts--;
+ ctxt_fp(fp) = uctxt;
+
+ return 0;
+}
+
+static int init_subctxts(struct hfi1_ctxtdata *uctxt,
+ const struct hfi1_user_info *uinfo)
+{
+ int ret = 0;
+ unsigned num_subctxts;
+
+ num_subctxts = uinfo->subctxt_cnt;
+ if (num_subctxts > HFI1_MAX_SHARED_CTXTS) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ uctxt->subctxt_cnt = uinfo->subctxt_cnt;
+ uctxt->subctxt_id = uinfo->subctxt_id;
+ uctxt->active_slaves = 1;
+ uctxt->redirect_seq_cnt = 1;
+ set_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
+bail:
+ return ret;
+}
+
+static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
+{
+ int ret = 0;
+ unsigned num_subctxts = uctxt->subctxt_cnt;
+
+ uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
+ if (!uctxt->subctxt_uregbase) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+ /* We can take the size of the RcvHdr Queue from the master */
+ uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size *
+ num_subctxts);
+ if (!uctxt->subctxt_rcvhdr_base) {
+ ret = -ENOMEM;
+ goto bail_ureg;
+ }
+
+ uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size *
+ num_subctxts);
+ if (!uctxt->subctxt_rcvegrbuf) {
+ ret = -ENOMEM;
+ goto bail_rhdr;
+ }
+ goto bail;
+bail_rhdr:
+ vfree(uctxt->subctxt_rcvhdr_base);
+bail_ureg:
+ vfree(uctxt->subctxt_uregbase);
+ uctxt->subctxt_uregbase = NULL;
+bail:
+ return ret;
+}
+
+static int user_init(struct file *fp)
+{
+ int ret;
+ unsigned int rcvctrl_ops = 0;
+ struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
+
+ /* make sure that the context has already been setup */
+ if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ /*
+ * Subctxts don't need to initialize anything since master
+ * has done it.
+ */
+ if (subctxt_fp(fp)) {
+ ret = wait_event_interruptible(uctxt->wait,
+ !test_bit(HFI1_CTXT_MASTER_UNINIT,
+ &uctxt->event_flags));
+ goto done;
+ }
+
+ /* initialize poll variables... */
+ uctxt->urgent = 0;
+ uctxt->urgent_poll = 0;
+
+ /*
+ * Now enable the ctxt for receive.
+ * For chips that are set to DMA the tail register to memory
+ * when they change (and when the update bit transitions from
+ * 0 to 1. So for those chips, we turn it off and then back on.
+ * This will (very briefly) affect any other open ctxts, but the
+ * duration is very short, and therefore isn't an issue. We
+ * explicitly set the in-memory tail copy to 0 beforehand, so we
+ * don't have to wait to be sure the DMA update has happened
+ * (chip resets head/tail to 0 on transition to enable).
+ */
+ if (uctxt->rcvhdrtail_kvaddr)
+ clear_rcvhdrtail(uctxt);
+
+ /* Setup J_KEY before enabling the context */
+ hfi1_set_ctxt_jkey(uctxt->dd, uctxt->ctxt, uctxt->jkey);
+
+ rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
+ if (HFI1_CAP_KGET_MASK(uctxt->flags, HDRSUPP))
+ rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
+ /*
+ * Ignore the bit in the flags for now until proper
+ * support for multiple packet per rcv array entry is
+ * added.
+ */
+ if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
+ rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
+ if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
+ rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
+ if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
+ rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
+ if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
+ rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
+ hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt->ctxt);
+
+ /* Notify any waiting slaves */
+ if (uctxt->subctxt_cnt) {
+ clear_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
+ wake_up(&uctxt->wait);
+ }
+ ret = 0;
+
+done:
+ return ret;
+}
+
+static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
+{
+ struct hfi1_ctxt_info cinfo;
+ struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
+ struct hfi1_filedata *fd = fp->private_data;
+ int ret = 0;
+
+ ret = hfi1_get_base_kinfo(uctxt, &cinfo);
+ if (ret < 0)
+ goto done;
+ cinfo.num_active = hfi1_count_active_units();
+ cinfo.unit = uctxt->dd->unit;
+ cinfo.ctxt = uctxt->ctxt;
+ cinfo.subctxt = subctxt_fp(fp);
+ cinfo.rcvtids = roundup(uctxt->egrbufs.alloced,
+ uctxt->dd->rcv_entries.group_size) +
+ uctxt->expected_count;
+ cinfo.credits = uctxt->sc->credits;
+ cinfo.numa_node = uctxt->numa_id;
+ cinfo.rec_cpu = fd->rec_cpu_num;
+ cinfo.send_ctxt = uctxt->sc->hw_context;
+
+ cinfo.egrtids = uctxt->egrbufs.alloced;
+ cinfo.rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
+ cinfo.rcvhdrq_entsize = uctxt->rcvhdrqentsize << 2;
+ cinfo.sdma_ring_size = user_sdma_comp_fp(fp)->nentries;
+ cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
+
+ trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, subctxt_fp(fp), cinfo);
+ if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
+ ret = -EFAULT;
+done:
+ return ret;
+}
+
+static int setup_ctxt(struct file *fp)
+{
+ struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
+ struct hfi1_devdata *dd = uctxt->dd;
+ int ret = 0;
+
+ /*
+ * Context should be set up only once (including allocation and
+ * programming of eager buffers. This is done if context sharing
+ * is not requested or by the master process.
+ */
+ if (!uctxt->subctxt_cnt || !subctxt_fp(fp)) {
+ ret = hfi1_init_ctxt(uctxt->sc);
+ if (ret)
+ goto done;
+
+ /* Now allocate the RcvHdr queue and eager buffers. */
+ ret = hfi1_create_rcvhdrq(dd, uctxt);
+ if (ret)
+ goto done;
+ ret = hfi1_setup_eagerbufs(uctxt);
+ if (ret)
+ goto done;
+ if (uctxt->subctxt_cnt && !subctxt_fp(fp)) {
+ ret = setup_subctxt(uctxt);
+ if (ret)
+ goto done;
+ }
+ /* Setup Expected Rcv memories */
+ uctxt->tid_pg_list = vzalloc(uctxt->expected_count *
+ sizeof(struct page **));
+ if (!uctxt->tid_pg_list) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ uctxt->physshadow = vzalloc(uctxt->expected_count *
+ sizeof(*uctxt->physshadow));
+ if (!uctxt->physshadow) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ /* allocate expected TID map and initialize the cursor */
+ atomic_set(&uctxt->tidcursor, 0);
+ uctxt->numtidgroups = uctxt->expected_count /
+ dd->rcv_entries.group_size;
+ uctxt->tidmapcnt = uctxt->numtidgroups / BITS_PER_LONG +
+ !!(uctxt->numtidgroups % BITS_PER_LONG);
+ uctxt->tidusemap = kzalloc_node(uctxt->tidmapcnt *
+ sizeof(*uctxt->tidusemap),
+ GFP_KERNEL, uctxt->numa_id);
+ if (!uctxt->tidusemap) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ /*
+ * In case that the number of groups is not a multiple of
+ * 64 (the number of groups in a tidusemap element), mark
+ * the extra ones as used. This will effectively make them
+ * permanently used and should never be assigned. Otherwise,
+ * the code which checks how many free groups we have will
+ * get completely confused about the state of the bits.
+ */
+ if (uctxt->numtidgroups % BITS_PER_LONG)
+ uctxt->tidusemap[uctxt->tidmapcnt - 1] =
+ ~((1ULL << (uctxt->numtidgroups %
+ BITS_PER_LONG)) - 1);
+ trace_hfi1_exp_tid_map(uctxt->ctxt, subctxt_fp(fp), 0,
+ uctxt->tidusemap, uctxt->tidmapcnt);
+ }
+ ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
+ if (ret)
+ goto done;
+
+ set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
+done:
+ return ret;
+}
+
+static int get_base_info(struct file *fp, void __user *ubase, __u32 len)
+{
+ struct hfi1_base_info binfo;
+ struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
+ struct hfi1_devdata *dd = uctxt->dd;
+ ssize_t sz;
+ unsigned offset;
+ int ret = 0;
+
+ trace_hfi1_uctxtdata(uctxt->dd, uctxt);
+
+ memset(&binfo, 0, sizeof(binfo));
+ binfo.hw_version = dd->revision;
+ binfo.sw_version = HFI1_KERN_SWVERSION;
+ binfo.bthqp = kdeth_qp;
+ binfo.jkey = uctxt->jkey;
+ /*
+ * If more than 64 contexts are enabled the allocated credit
+ * return will span two or three contiguous pages. Since we only
+ * map the page containing the context's credit return address,
+ * we need to calculate the offset in the proper page.
+ */
+ offset = ((u64)uctxt->sc->hw_free -
+ (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
+ binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt,
+ subctxt_fp(fp), offset);
+ binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt,
+ subctxt_fp(fp),
+ uctxt->sc->base_addr);
+ binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP,
+ uctxt->ctxt,
+ subctxt_fp(fp),
+ uctxt->sc->base_addr);
+ binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt,
+ subctxt_fp(fp),
+ uctxt->rcvhdrq);
+ binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
+ subctxt_fp(fp),
+ uctxt->egrbufs.rcvtids[0].phys);
+ binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
+ subctxt_fp(fp), 0);
+ /*
+ * user regs are at
+ * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
+ */
+ binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
+ subctxt_fp(fp), 0);
+ offset = ((((uctxt->ctxt - dd->first_user_ctxt) *
+ HFI1_MAX_SHARED_CTXTS) + subctxt_fp(fp)) *
+ sizeof(*dd->events)) & ~PAGE_MASK;
+ binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
+ subctxt_fp(fp),
+ offset);
+ binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
+ subctxt_fp(fp),
+ dd->status);
+ if (HFI1_CAP_IS_USET(DMA_RTAIL))
+ binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
+ subctxt_fp(fp), 0);
+ if (uctxt->subctxt_cnt) {
+ binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
+ uctxt->ctxt,
+ subctxt_fp(fp), 0);
+ binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
+ uctxt->ctxt,
+ subctxt_fp(fp), 0);
+ binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
+ uctxt->ctxt,
+ subctxt_fp(fp), 0);
+ }
+ sz = (len < sizeof(binfo)) ? len : sizeof(binfo);
+ if (copy_to_user(ubase, &binfo, sz))
+ ret = -EFAULT;
+ return ret;
+}
+
+static unsigned int poll_urgent(struct file *fp,
+ struct poll_table_struct *pt)
+{
+ struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
+ struct hfi1_devdata *dd = uctxt->dd;
+ unsigned pollflag;
+
+ poll_wait(fp, &uctxt->wait, pt);
+
+ spin_lock_irq(&dd->uctxt_lock);
+ if (uctxt->urgent != uctxt->urgent_poll) {
+ pollflag = POLLIN | POLLRDNORM;
+ uctxt->urgent_poll = uctxt->urgent;
+ } else {
+ pollflag = 0;
+ set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags);
+ }
+ spin_unlock_irq(&dd->uctxt_lock);
+
+ return pollflag;
+}
+
+static unsigned int poll_next(struct file *fp,
+ struct poll_table_struct *pt)
+{
+ struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
+ struct hfi1_devdata *dd = uctxt->dd;
+ unsigned pollflag;
+
+ poll_wait(fp, &uctxt->wait, pt);
+
+ spin_lock_irq(&dd->uctxt_lock);
+ if (hdrqempty(uctxt)) {
+ set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags);
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt->ctxt);
+ pollflag = 0;
+ } else
+ pollflag = POLLIN | POLLRDNORM;
+ spin_unlock_irq(&dd->uctxt_lock);
+
+ return pollflag;
+}
+
+/*
+ * Find all user contexts in use, and set the specified bit in their
+ * event mask.
+ * See also find_ctxt() for a similar use, that is specific to send buffers.
+ */
+int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
+{
+ struct hfi1_ctxtdata *uctxt;
+ struct hfi1_devdata *dd = ppd->dd;
+ unsigned ctxt;
+ int ret = 0;
+ unsigned long flags;
+
+ if (!dd->events) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts;
+ ctxt++) {
+ uctxt = dd->rcd[ctxt];
+ if (uctxt) {
+ unsigned long *evs = dd->events +
+ (uctxt->ctxt - dd->first_user_ctxt) *
+ HFI1_MAX_SHARED_CTXTS;
+ int i;
+ /*
+ * subctxt_cnt is 0 if not shared, so do base
+ * separately, first, then remaining subctxt, if any
+ */
+ set_bit(evtbit, evs);
+ for (i = 1; i < uctxt->subctxt_cnt; i++)
+ set_bit(evtbit, evs + i);
+ }
+ }
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+done:
+ return ret;
+}
+
+/**
+ * manage_rcvq - manage a context's receive queue
+ * @uctxt: the context
+ * @subctxt: the sub-context
+ * @start_stop: action to carry out
+ *
+ * start_stop == 0 disables receive on the context, for use in queue
+ * overflow conditions. start_stop==1 re-enables, to be used to
+ * re-init the software copy of the head register
+ */
+static int manage_rcvq(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
+ int start_stop)
+{
+ struct hfi1_devdata *dd = uctxt->dd;
+ unsigned int rcvctrl_op;
+
+ if (subctxt)
+ goto bail;
+ /* atomically clear receive enable ctxt. */
+ if (start_stop) {
+ /*
+ * On enable, force in-memory copy of the tail register to
+ * 0, so that protocol code doesn't have to worry about
+ * whether or not the chip has yet updated the in-memory
+ * copy or not on return from the system call. The chip
+ * always resets it's tail register back to 0 on a
+ * transition from disabled to enabled.
+ */
+ if (uctxt->rcvhdrtail_kvaddr)
+ clear_rcvhdrtail(uctxt);
+ rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB;
+ } else
+ rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS;
+ hfi1_rcvctrl(dd, rcvctrl_op, uctxt->ctxt);
+ /* always; new head should be equal to new tail; see above */
+bail:
+ return 0;
+}
+
+/*
+ * clear the event notifier events for this context.
+ * User process then performs actions appropriate to bit having been
+ * set, if desired, and checks again in future.
+ */
+static int user_event_ack(struct hfi1_ctxtdata *uctxt, int subctxt,
+ unsigned long events)
+{
+ int i;
+ struct hfi1_devdata *dd = uctxt->dd;
+ unsigned long *evs;
+
+ if (!dd->events)
+ return 0;
+
+ evs = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) *
+ HFI1_MAX_SHARED_CTXTS) + subctxt;
+
+ for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
+ if (!test_bit(i, &events))
+ continue;
+ clear_bit(i, evs);
+ }
+ return 0;
+}
+
+#define num_user_pages(vaddr, len) \
+ (1 + (((((unsigned long)(vaddr) + \
+ (unsigned long)(len) - 1) & PAGE_MASK) - \
+ ((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT))
+
+/**
+ * tzcnt - count the number of trailing zeros in a 64bit value
+ * @value: the value to be examined
+ *
+ * Returns the number of trailing least significant zeros in the
+ * the input value. If the value is zero, return the number of
+ * bits of the value.
+ */
+static inline u8 tzcnt(u64 value)
+{
+ return value ? __builtin_ctzl(value) : sizeof(value) * 8;
+}
+
+static inline unsigned num_free_groups(unsigned long map, u16 *start)
+{
+ unsigned free;
+ u16 bitidx = *start;
+
+ if (bitidx >= BITS_PER_LONG)
+ return 0;
+ /* "Turn off" any bits set before our bit index */
+ map &= ~((1ULL << bitidx) - 1);
+ free = tzcnt(map) - bitidx;
+ while (!free && bitidx < BITS_PER_LONG) {
+ /* Zero out the last set bit so we look at the rest */
+ map &= ~(1ULL << bitidx);
+ /*
+ * Account for the previously checked bits and advance
+ * the bit index. We don't have to check for bitidx
+ * getting bigger than BITS_PER_LONG here as it would
+ * mean extra instructions that we don't need. If it
+ * did happen, it would push free to a negative value
+ * which will break the loop.
+ */
+ free = tzcnt(map) - ++bitidx;
+ }
+ *start = bitidx;
+ return free;
+}
+
+static int exp_tid_setup(struct file *fp, struct hfi1_tid_info *tinfo)
+{
+ int ret = 0;
+ struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
+ struct hfi1_devdata *dd = uctxt->dd;
+ unsigned tid, mapped = 0, npages, ngroups, exp_groups,
+ tidpairs = uctxt->expected_count / 2;
+ struct page **pages;
+ unsigned long vaddr, tidmap[uctxt->tidmapcnt];
+ dma_addr_t *phys;
+ u32 tidlist[tidpairs], pairidx = 0, tidcursor;
+ u16 useidx, idx, bitidx, tidcnt = 0;
+
+ vaddr = tinfo->vaddr;
+
+ if (vaddr & ~PAGE_MASK) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ npages = num_user_pages(vaddr, tinfo->length);
+ if (!npages) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (!access_ok(VERIFY_WRITE, (void __user *)vaddr,
+ npages * PAGE_SIZE)) {
+ dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
+ (void *)vaddr, npages);
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ memset(tidmap, 0, sizeof(tidmap[0]) * uctxt->tidmapcnt);
+ memset(tidlist, 0, sizeof(tidlist[0]) * tidpairs);
+
+ exp_groups = uctxt->expected_count / dd->rcv_entries.group_size;
+ /* which group set do we look at first? */
+ tidcursor = atomic_read(&uctxt->tidcursor);
+ useidx = (tidcursor >> 16) & 0xffff;
+ bitidx = tidcursor & 0xffff;
+
+ /*
+ * Keep going until we've mapped all pages or we've exhausted all
+ * RcvArray entries.
+ * This iterates over the number of tidmaps + 1
+ * (idx <= uctxt->tidmapcnt) so we check the bitmap which we
+ * started from one more time for any free bits before the
+ * starting point bit.
+ */
+ for (mapped = 0, idx = 0;
+ mapped < npages && idx <= uctxt->tidmapcnt;) {
+ u64 i, offset = 0;
+ unsigned free, pinned, pmapped = 0, bits_used;
+ u16 grp;
+
+ /*
+ * "Reserve" the needed group bits under lock so other
+ * processes can't step in the middle of it. Once
+ * reserved, we don't need the lock anymore since we
+ * are guaranteed the groups.
+ */
+ spin_lock(&uctxt->exp_lock);
+ if (uctxt->tidusemap[useidx] == -1ULL ||
+ bitidx >= BITS_PER_LONG) {
+ /* no free groups in the set, use the next */
+ useidx = (useidx + 1) % uctxt->tidmapcnt;
+ idx++;
+ bitidx = 0;
+ spin_unlock(&uctxt->exp_lock);
+ continue;
+ }
+ ngroups = ((npages - mapped) / dd->rcv_entries.group_size) +
+ !!((npages - mapped) % dd->rcv_entries.group_size);
+
+ /*
+ * If we've gotten here, the current set of groups does have
+ * one or more free groups.
+ */
+ free = num_free_groups(uctxt->tidusemap[useidx], &bitidx);
+ if (!free) {
+ /*
+ * Despite the check above, free could still come back
+ * as 0 because we don't check the entire bitmap but
+ * we start from bitidx.
+ */
+ spin_unlock(&uctxt->exp_lock);
+ continue;
+ }
+ bits_used = min(free, ngroups);
+ tidmap[useidx] |= ((1ULL << bits_used) - 1) << bitidx;
+ uctxt->tidusemap[useidx] |= tidmap[useidx];
+ spin_unlock(&uctxt->exp_lock);
+
+ /*
+ * At this point, we know where in the map we have free bits.
+ * properly offset into the various "shadow" arrays and compute
+ * the RcvArray entry index.
+ */
+ offset = ((useidx * BITS_PER_LONG) + bitidx) *
+ dd->rcv_entries.group_size;
+ pages = uctxt->tid_pg_list + offset;
+ phys = uctxt->physshadow + offset;
+ tid = uctxt->expected_base + offset;
+
+ /* Calculate how many pages we can pin based on free bits */
+ pinned = min((bits_used * dd->rcv_entries.group_size),
+ (npages - mapped));
+ /*
+ * Now that we know how many free RcvArray entries we have,
+ * we can pin that many user pages.
+ */
+ ret = hfi1_get_user_pages(vaddr + (mapped * PAGE_SIZE),
+ pinned, pages);
+ if (ret) {
+ /*
+ * We can't continue because the pages array won't be
+ * initialized. This should never happen,
+ * unless perhaps the user has mpin'ed the pages
+ * themselves.
+ */
+ dd_dev_info(dd,
+ "Failed to lock addr %p, %u pages: errno %d\n",
+ (void *) vaddr, pinned, -ret);
+ /*
+ * Let go of the bits that we reserved since we are not
+ * going to use them.
+ */
+ spin_lock(&uctxt->exp_lock);
+ uctxt->tidusemap[useidx] &=
+ ~(((1ULL << bits_used) - 1) << bitidx);
+ spin_unlock(&uctxt->exp_lock);
+ goto done;
+ }
+ /*
+ * How many groups do we need based on how many pages we have
+ * pinned?
+ */
+ ngroups = (pinned / dd->rcv_entries.group_size) +
+ !!(pinned % dd->rcv_entries.group_size);
+ /*
+ * Keep programming RcvArray entries for all the <ngroups> free
+ * groups.
+ */
+ for (i = 0, grp = 0; grp < ngroups; i++, grp++) {
+ unsigned j;
+ u32 pair_size = 0, tidsize;
+ /*
+ * This inner loop will program an entire group or the
+ * array of pinned pages (which ever limit is hit
+ * first).
+ */
+ for (j = 0; j < dd->rcv_entries.group_size &&
+ pmapped < pinned; j++, pmapped++, tid++) {
+ tidsize = PAGE_SIZE;
+ phys[pmapped] = hfi1_map_page(dd->pcidev,
+ pages[pmapped], 0,
+ tidsize, PCI_DMA_FROMDEVICE);
+ trace_hfi1_exp_rcv_set(uctxt->ctxt,
+ subctxt_fp(fp),
+ tid, vaddr,
+ phys[pmapped],
+ pages[pmapped]);
+ /*
+ * Each RcvArray entry is programmed with one
+ * page * worth of memory. This will handle
+ * the 8K MTU as well as anything smaller
+ * due to the fact that both entries in the
+ * RcvTidPair are programmed with a page.
+ * PSM currently does not handle anything
+ * bigger than 8K MTU, so should we even worry
+ * about 10K here?
+ */
+ hfi1_put_tid(dd, tid, PT_EXPECTED,
+ phys[pmapped],
+ ilog2(tidsize >> PAGE_SHIFT) + 1);
+ pair_size += tidsize >> PAGE_SHIFT;
+ EXP_TID_RESET(tidlist[pairidx], LEN, pair_size);
+ if (!(tid % 2)) {
+ tidlist[pairidx] |=
+ EXP_TID_SET(IDX,
+ (tid - uctxt->expected_base)
+ / 2);
+ tidlist[pairidx] |=
+ EXP_TID_SET(CTRL, 1);
+ tidcnt++;
+ } else {
+ tidlist[pairidx] |=
+ EXP_TID_SET(CTRL, 2);
+ pair_size = 0;
+ pairidx++;
+ }
+ }
+ /*
+ * We've programmed the entire group (or as much of the
+ * group as we'll use. Now, it's time to push it out...
+ */
+ flush_wc();
+ }
+ mapped += pinned;
+ atomic_set(&uctxt->tidcursor,
+ (((useidx & 0xffffff) << 16) |
+ ((bitidx + bits_used) & 0xffffff)));
+ }
+ trace_hfi1_exp_tid_map(uctxt->ctxt, subctxt_fp(fp), 0, uctxt->tidusemap,
+ uctxt->tidmapcnt);
+
+done:
+ /* If we've mapped anything, copy relevant info to user */
+ if (mapped) {
+ if (copy_to_user((void __user *)(unsigned long)tinfo->tidlist,
+ tidlist, sizeof(tidlist[0]) * tidcnt)) {
+ ret = -EFAULT;
+ goto done;
+ }
+ /* copy TID info to user */
+ if (copy_to_user((void __user *)(unsigned long)tinfo->tidmap,
+ tidmap, sizeof(tidmap[0]) * uctxt->tidmapcnt))
+ ret = -EFAULT;
+ }
+bail:
+ /*
+ * Calculate mapped length. New Exp TID protocol does not "unwind" and
+ * report an error if it can't map the entire buffer. It just reports
+ * the length that was mapped.
+ */
+ tinfo->length = mapped * PAGE_SIZE;
+ tinfo->tidcnt = tidcnt;
+ return ret;
+}
+
+static int exp_tid_free(struct file *fp, struct hfi1_tid_info *tinfo)
+{
+ struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
+ struct hfi1_devdata *dd = uctxt->dd;
+ unsigned long tidmap[uctxt->tidmapcnt];
+ struct page **pages;
+ dma_addr_t *phys;
+ u16 idx, bitidx, tid;
+ int ret = 0;
+
+ if (copy_from_user(&tidmap, (void __user *)(unsigned long)
+ tinfo->tidmap,
+ sizeof(tidmap[0]) * uctxt->tidmapcnt)) {
+ ret = -EFAULT;
+ goto done;
+ }
+ for (idx = 0; idx < uctxt->tidmapcnt; idx++) {
+ unsigned long map;
+
+ bitidx = 0;
+ if (!tidmap[idx])
+ continue;
+ map = tidmap[idx];
+ while ((bitidx = tzcnt(map)) < BITS_PER_LONG) {
+ int i, pcount = 0;
+ struct page *pshadow[dd->rcv_entries.group_size];
+ unsigned offset = ((idx * BITS_PER_LONG) + bitidx) *
+ dd->rcv_entries.group_size;
+
+ pages = uctxt->tid_pg_list + offset;
+ phys = uctxt->physshadow + offset;
+ tid = uctxt->expected_base + offset;
+ for (i = 0; i < dd->rcv_entries.group_size;
+ i++, tid++) {
+ if (pages[i]) {
+ hfi1_put_tid(dd, tid, PT_INVALID,
+ 0, 0);
+ trace_hfi1_exp_rcv_free(uctxt->ctxt,
+ subctxt_fp(fp),
+ tid, phys[i],
+ pages[i]);
+ pci_unmap_page(dd->pcidev, phys[i],
+ PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ pshadow[pcount] = pages[i];
+ pages[i] = NULL;
+ pcount++;
+ phys[i] = 0;
+ }
+ }
+ flush_wc();
+ hfi1_release_user_pages(pshadow, pcount);
+ clear_bit(bitidx, &uctxt->tidusemap[idx]);
+ map &= ~(1ULL<<bitidx);
+ }
+ }
+ trace_hfi1_exp_tid_map(uctxt->ctxt, subctxt_fp(fp), 1, uctxt->tidusemap,
+ uctxt->tidmapcnt);
+done:
+ return ret;
+}
+
+static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt)
+{
+ struct hfi1_devdata *dd = uctxt->dd;
+ unsigned tid;
+
+ dd_dev_info(dd, "ctxt %u unlocking any locked expTID pages\n",
+ uctxt->ctxt);
+ for (tid = 0; tid < uctxt->expected_count; tid++) {
+ struct page *p = uctxt->tid_pg_list[tid];
+ dma_addr_t phys;
+
+ if (!p)
+ continue;
+
+ phys = uctxt->physshadow[tid];
+ uctxt->physshadow[tid] = 0;
+ uctxt->tid_pg_list[tid] = NULL;
+ pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ hfi1_release_user_pages(&p, 1);
+ }
+}
+
+static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
+ u16 pkey)
+{
+ int ret = -ENOENT, i, intable = 0;
+ struct hfi1_pportdata *ppd = uctxt->ppd;
+ struct hfi1_devdata *dd = uctxt->dd;
+
+ if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
+ if (pkey == ppd->pkeys[i]) {
+ intable = 1;
+ break;
+ }
+
+ if (intable)
+ ret = hfi1_set_ctxt_pkey(dd, uctxt->ctxt, pkey);
+done:
+ return ret;
+}
+
+static int ui_open(struct inode *inode, struct file *filp)
+{
+ struct hfi1_devdata *dd;
+
+ dd = container_of(inode->i_cdev, struct hfi1_devdata, ui_cdev);
+ filp->private_data = dd; /* for other methods */
+ return 0;
+}
+
+static int ui_release(struct inode *inode, struct file *filp)
+{
+ /* nothing to do */
+ return 0;
+}
+
+static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
+{
+ struct hfi1_devdata *dd = filp->private_data;
+
+ switch (whence) {
+ case SEEK_SET:
+ break;
+ case SEEK_CUR:
+ offset += filp->f_pos;
+ break;
+ case SEEK_END:
+ offset = ((dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) -
+ offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (offset < 0)
+ return -EINVAL;
+
+ if (offset >= (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE)
+ return -EINVAL;
+
+ filp->f_pos = offset;
+
+ return filp->f_pos;
+}
+
+
+/* NOTE: assumes unsigned long is 8 bytes */
+static ssize_t ui_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct hfi1_devdata *dd = filp->private_data;
+ void __iomem *base = dd->kregbase;
+ unsigned long total, csr_off,
+ barlen = (dd->kregend - dd->kregbase);
+ u64 data;
+
+ /* only read 8 byte quantities */
+ if ((count % 8) != 0)
+ return -EINVAL;
+ /* offset must be 8-byte aligned */
+ if ((*f_pos % 8) != 0)
+ return -EINVAL;
+ /* destination buffer must be 8-byte aligned */
+ if ((unsigned long)buf % 8 != 0)
+ return -EINVAL;
+ /* must be in range */
+ if (*f_pos + count > (barlen + DC8051_DATA_MEM_SIZE))
+ return -EINVAL;
+ /* only set the base if we are not starting past the BAR */
+ if (*f_pos < barlen)
+ base += *f_pos;
+ csr_off = *f_pos;
+ for (total = 0; total < count; total += 8, csr_off += 8) {
+ /* accessing LCB CSRs requires more checks */
+ if (is_lcb_offset(csr_off)) {
+ if (read_lcb_csr(dd, csr_off, (u64 *)&data))
+ break; /* failed */
+ }
+ /*
+ * Cannot read ASIC GPIO/QSFP* clear and force CSRs without a
+ * false parity error. Avoid the whole issue by not reading
+ * them. These registers are defined as having a read value
+ * of 0.
+ */
+ else if (csr_off == ASIC_GPIO_CLEAR
+ || csr_off == ASIC_GPIO_FORCE
+ || csr_off == ASIC_QSFP1_CLEAR
+ || csr_off == ASIC_QSFP1_FORCE
+ || csr_off == ASIC_QSFP2_CLEAR
+ || csr_off == ASIC_QSFP2_FORCE)
+ data = 0;
+ else if (csr_off >= barlen) {
+ /*
+ * read_8051_data can read more than just 8 bytes at
+ * a time. However, folding this into the loop and
+ * handling the reads in 8 byte increments allows us
+ * to smoothly transition from chip memory to 8051
+ * memory.
+ */
+ if (read_8051_data(dd,
+ (u32)(csr_off - barlen),
+ sizeof(data), &data))
+ break; /* failed */
+ } else
+ data = readq(base + total);
+ if (put_user(data, (unsigned long __user *)(buf + total)))
+ break;
+ }
+ *f_pos += total;
+ return total;
+}
+
+/* NOTE: assumes unsigned long is 8 bytes */
+static ssize_t ui_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct hfi1_devdata *dd = filp->private_data;
+ void __iomem *base;
+ unsigned long total, data, csr_off;
+ int in_lcb;
+
+ /* only write 8 byte quantities */
+ if ((count % 8) != 0)
+ return -EINVAL;
+ /* offset must be 8-byte aligned */
+ if ((*f_pos % 8) != 0)
+ return -EINVAL;
+ /* source buffer must be 8-byte aligned */
+ if ((unsigned long)buf % 8 != 0)
+ return -EINVAL;
+ /* must be in range */
+ if (*f_pos + count > dd->kregend - dd->kregbase)
+ return -EINVAL;
+
+ base = (void __iomem *)dd->kregbase + *f_pos;
+ csr_off = *f_pos;
+ in_lcb = 0;
+ for (total = 0; total < count; total += 8, csr_off += 8) {
+ if (get_user(data, (unsigned long __user *)(buf + total)))
+ break;
+ /* accessing LCB CSRs requires a special procedure */
+ if (is_lcb_offset(csr_off)) {
+ if (!in_lcb) {
+ int ret = acquire_lcb_access(dd, 1);
+
+ if (ret)
+ break;
+ in_lcb = 1;
+ }
+ } else {
+ if (in_lcb) {
+ release_lcb_access(dd, 1);
+ in_lcb = 0;
+ }
+ }
+ writeq(data, base + total);
+ }
+ if (in_lcb)
+ release_lcb_access(dd, 1);
+ *f_pos += total;
+ return total;
+}
+
+static const struct file_operations ui_file_ops = {
+ .owner = THIS_MODULE,
+ .llseek = ui_lseek,
+ .read = ui_read,
+ .write = ui_write,
+ .open = ui_open,
+ .release = ui_release,
+};
+#define UI_OFFSET 192 /* device minor offset for UI devices */
+static int create_ui = 1;
+
+static struct cdev wildcard_cdev;
+static struct device *wildcard_device;
+
+static atomic_t user_count = ATOMIC_INIT(0);
+
+static void user_remove(struct hfi1_devdata *dd)
+{
+ if (atomic_dec_return(&user_count) == 0)
+ hfi1_cdev_cleanup(&wildcard_cdev, &wildcard_device);
+
+ hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
+ hfi1_cdev_cleanup(&dd->ui_cdev, &dd->ui_device);
+}
+
+static int user_add(struct hfi1_devdata *dd)
+{
+ char name[10];
+ int ret;
+
+ if (atomic_inc_return(&user_count) == 1) {
+ ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops,
+ &wildcard_cdev, &wildcard_device);
+ if (ret)
+ goto done;
+ }
+
+ snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
+ ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops,
+ &dd->user_cdev, &dd->user_device);
+ if (ret)
+ goto done;
+
+ if (create_ui) {
+ snprintf(name, sizeof(name),
+ "%s_ui%d", class_name(), dd->unit);
+ ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops,
+ &dd->ui_cdev, &dd->ui_device);
+ if (ret)
+ goto done;
+ }
+
+ return 0;
+done:
+ user_remove(dd);
+ return ret;
+}
+
+/*
+ * Create per-unit files in /dev
+ */
+int hfi1_device_create(struct hfi1_devdata *dd)
+{
+ int r, ret;
+
+ r = user_add(dd);
+ ret = hfi1_diag_add(dd);
+ if (r && !ret)
+ ret = r;
+ return ret;
+}
+
+/*
+ * Remove per-unit files in /dev
+ * void, core kernel returns no errors for this stuff
+ */
+void hfi1_device_remove(struct hfi1_devdata *dd)
+{
+ user_remove(dd);
+ hfi1_diag_remove(dd);
+}
diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c
new file mode 100644
index 000000000000..5c2f2ed8f224
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/firmware.c
@@ -0,0 +1,1620 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+
+#include "hfi.h"
+#include "trace.h"
+
+/*
+ * Make it easy to toggle firmware file name and if it gets loaded by
+ * editing the following. This may be something we do while in development
+ * but not necessarily something a user would ever need to use.
+ */
+#define DEFAULT_FW_8051_NAME_FPGA "hfi_dc8051.bin"
+#define DEFAULT_FW_8051_NAME_ASIC "hfi1_dc8051.fw"
+#define DEFAULT_FW_FABRIC_NAME "hfi1_fabric.fw"
+#define DEFAULT_FW_SBUS_NAME "hfi1_sbus.fw"
+#define DEFAULT_FW_PCIE_NAME "hfi1_pcie.fw"
+#define DEFAULT_PLATFORM_CONFIG_NAME "hfi1_platform.dat"
+
+static uint fw_8051_load = 1;
+static uint fw_fabric_serdes_load = 1;
+static uint fw_pcie_serdes_load = 1;
+static uint fw_sbus_load = 1;
+static uint platform_config_load = 1;
+
+/* Firmware file names get set in hfi1_firmware_init() based on the above */
+static char *fw_8051_name;
+static char *fw_fabric_serdes_name;
+static char *fw_sbus_name;
+static char *fw_pcie_serdes_name;
+static char *platform_config_name;
+
+#define SBUS_MAX_POLL_COUNT 100
+#define SBUS_COUNTER(reg, name) \
+ (((reg) >> ASIC_STS_SBUS_COUNTERS_##name##_CNT_SHIFT) & \
+ ASIC_STS_SBUS_COUNTERS_##name##_CNT_MASK)
+
+/*
+ * Firmware security header.
+ */
+struct css_header {
+ u32 module_type;
+ u32 header_len;
+ u32 header_version;
+ u32 module_id;
+ u32 module_vendor;
+ u32 date; /* BCD yyyymmdd */
+ u32 size; /* in DWORDs */
+ u32 key_size; /* in DWORDs */
+ u32 modulus_size; /* in DWORDs */
+ u32 exponent_size; /* in DWORDs */
+ u32 reserved[22];
+};
+/* expected field values */
+#define CSS_MODULE_TYPE 0x00000006
+#define CSS_HEADER_LEN 0x000000a1
+#define CSS_HEADER_VERSION 0x00010000
+#define CSS_MODULE_VENDOR 0x00008086
+
+#define KEY_SIZE 256
+#define MU_SIZE 8
+#define EXPONENT_SIZE 4
+
+/* the file itself */
+struct firmware_file {
+ struct css_header css_header;
+ u8 modulus[KEY_SIZE];
+ u8 exponent[EXPONENT_SIZE];
+ u8 signature[KEY_SIZE];
+ u8 firmware[];
+};
+
+struct augmented_firmware_file {
+ struct css_header css_header;
+ u8 modulus[KEY_SIZE];
+ u8 exponent[EXPONENT_SIZE];
+ u8 signature[KEY_SIZE];
+ u8 r2[KEY_SIZE];
+ u8 mu[MU_SIZE];
+ u8 firmware[];
+};
+
+/* augmented file size difference */
+#define AUGMENT_SIZE (sizeof(struct augmented_firmware_file) - \
+ sizeof(struct firmware_file))
+
+struct firmware_details {
+ /* Linux core piece */
+ const struct firmware *fw;
+
+ struct css_header *css_header;
+ u8 *firmware_ptr; /* pointer to binary data */
+ u32 firmware_len; /* length in bytes */
+ u8 *modulus; /* pointer to the modulus */
+ u8 *exponent; /* pointer to the exponent */
+ u8 *signature; /* pointer to the signature */
+ u8 *r2; /* pointer to r2 */
+ u8 *mu; /* pointer to mu */
+ struct augmented_firmware_file dummy_header;
+};
+
+/*
+ * The mutex protects fw_state, fw_err, and all of the firmware_details
+ * variables.
+ */
+static DEFINE_MUTEX(fw_mutex);
+enum fw_state {
+ FW_EMPTY,
+ FW_ACQUIRED,
+ FW_ERR
+};
+static enum fw_state fw_state = FW_EMPTY;
+static int fw_err;
+static struct firmware_details fw_8051;
+static struct firmware_details fw_fabric;
+static struct firmware_details fw_pcie;
+static struct firmware_details fw_sbus;
+static const struct firmware *platform_config;
+
+/* flags for turn_off_spicos() */
+#define SPICO_SBUS 0x1
+#define SPICO_FABRIC 0x2
+#define ENABLE_SPICO_SMASK 0x1
+
+/* security block commands */
+#define RSA_CMD_INIT 0x1
+#define RSA_CMD_START 0x2
+
+/* security block status */
+#define RSA_STATUS_IDLE 0x0
+#define RSA_STATUS_ACTIVE 0x1
+#define RSA_STATUS_DONE 0x2
+#define RSA_STATUS_FAILED 0x3
+
+/* RSA engine timeout, in ms */
+#define RSA_ENGINE_TIMEOUT 100 /* ms */
+
+/* hardware mutex timeout, in ms */
+#define HM_TIMEOUT 4000 /* 4 s */
+
+/* 8051 memory access timeout, in us */
+#define DC8051_ACCESS_TIMEOUT 100 /* us */
+
+/* the number of fabric SerDes on the SBus */
+#define NUM_FABRIC_SERDES 4
+
+/* SBus fabric SerDes addresses, one set per HFI */
+static const u8 fabric_serdes_addrs[2][NUM_FABRIC_SERDES] = {
+ { 0x01, 0x02, 0x03, 0x04 },
+ { 0x28, 0x29, 0x2a, 0x2b }
+};
+
+/* SBus PCIe SerDes addresses, one set per HFI */
+static const u8 pcie_serdes_addrs[2][NUM_PCIE_SERDES] = {
+ { 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16,
+ 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26 },
+ { 0x2f, 0x31, 0x33, 0x35, 0x37, 0x39, 0x3b, 0x3d,
+ 0x3f, 0x41, 0x43, 0x45, 0x47, 0x49, 0x4b, 0x4d }
+};
+
+/* SBus PCIe PCS addresses, one set per HFI */
+const u8 pcie_pcs_addrs[2][NUM_PCIE_SERDES] = {
+ { 0x09, 0x0b, 0x0d, 0x0f, 0x11, 0x13, 0x15, 0x17,
+ 0x19, 0x1b, 0x1d, 0x1f, 0x21, 0x23, 0x25, 0x27 },
+ { 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
+ 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e }
+};
+
+/* SBus fabric SerDes broadcast addresses, one per HFI */
+static const u8 fabric_serdes_broadcast[2] = { 0xe4, 0xe5 };
+static const u8 all_fabric_serdes_broadcast = 0xe1;
+
+/* SBus PCIe SerDes broadcast addresses, one per HFI */
+const u8 pcie_serdes_broadcast[2] = { 0xe2, 0xe3 };
+static const u8 all_pcie_serdes_broadcast = 0xe0;
+
+/* forwards */
+static void dispose_one_firmware(struct firmware_details *fdet);
+
+/*
+ * Read a single 64-bit value from 8051 data memory.
+ *
+ * Expects:
+ * o caller to have already set up data read, no auto increment
+ * o caller to turn off read enable when finished
+ *
+ * The address argument is a byte offset. Bits 0:2 in the address are
+ * ignored - i.e. the hardware will always do aligned 8-byte reads as if
+ * the lower bits are zero.
+ *
+ * Return 0 on success, -ENXIO on a read error (timeout).
+ */
+static int __read_8051_data(struct hfi1_devdata *dd, u32 addr, u64 *result)
+{
+ u64 reg;
+ int count;
+
+ /* start the read at the given address */
+ reg = ((addr & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK)
+ << DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT)
+ | DC_DC8051_CFG_RAM_ACCESS_CTRL_READ_ENA_SMASK;
+ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg);
+
+ /* wait until ACCESS_COMPLETED is set */
+ count = 0;
+ while ((read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_STATUS)
+ & DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK)
+ == 0) {
+ count++;
+ if (count > DC8051_ACCESS_TIMEOUT) {
+ dd_dev_err(dd, "timeout reading 8051 data\n");
+ return -ENXIO;
+ }
+ ndelay(10);
+ }
+
+ /* gather the data */
+ *result = read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_RD_DATA);
+
+ return 0;
+}
+
+/*
+ * Read 8051 data starting at addr, for len bytes. Will read in 8-byte chunks.
+ * Return 0 on success, -errno on error.
+ */
+int read_8051_data(struct hfi1_devdata *dd, u32 addr, u32 len, u64 *result)
+{
+ unsigned long flags;
+ u32 done;
+ int ret = 0;
+
+ spin_lock_irqsave(&dd->dc8051_memlock, flags);
+
+ /* data read set-up, no auto-increment */
+ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, 0);
+
+ for (done = 0; done < len; addr += 8, done += 8, result++) {
+ ret = __read_8051_data(dd, addr, result);
+ if (ret)
+ break;
+ }
+
+ /* turn off read enable */
+ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 0);
+
+ spin_unlock_irqrestore(&dd->dc8051_memlock, flags);
+
+ return ret;
+}
+
+/*
+ * Write data or code to the 8051 code or data RAM.
+ */
+static int write_8051(struct hfi1_devdata *dd, int code, u32 start,
+ const u8 *data, u32 len)
+{
+ u64 reg;
+ u32 offset;
+ int aligned, count;
+
+ /* check alignment */
+ aligned = ((unsigned long)data & 0x7) == 0;
+
+ /* write set-up */
+ reg = (code ? DC_DC8051_CFG_RAM_ACCESS_SETUP_RAM_SEL_SMASK : 0ull)
+ | DC_DC8051_CFG_RAM_ACCESS_SETUP_AUTO_INCR_ADDR_SMASK;
+ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, reg);
+
+ reg = ((start & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK)
+ << DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT)
+ | DC_DC8051_CFG_RAM_ACCESS_CTRL_WRITE_ENA_SMASK;
+ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg);
+
+ /* write */
+ for (offset = 0; offset < len; offset += 8) {
+ int bytes = len - offset;
+
+ if (bytes < 8) {
+ reg = 0;
+ memcpy(&reg, &data[offset], bytes);
+ } else if (aligned) {
+ reg = *(u64 *)&data[offset];
+ } else {
+ memcpy(&reg, &data[offset], 8);
+ }
+ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_WR_DATA, reg);
+
+ /* wait until ACCESS_COMPLETED is set */
+ count = 0;
+ while ((read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_STATUS)
+ & DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK)
+ == 0) {
+ count++;
+ if (count > DC8051_ACCESS_TIMEOUT) {
+ dd_dev_err(dd, "timeout writing 8051 data\n");
+ return -ENXIO;
+ }
+ udelay(1);
+ }
+ }
+
+ /* turn off write access, auto increment (also sets to data access) */
+ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 0);
+ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, 0);
+
+ return 0;
+}
+
+/* return 0 if values match, non-zero and complain otherwise */
+static int invalid_header(struct hfi1_devdata *dd, const char *what,
+ u32 actual, u32 expected)
+{
+ if (actual == expected)
+ return 0;
+
+ dd_dev_err(dd,
+ "invalid firmware header field %s: expected 0x%x, actual 0x%x\n",
+ what, expected, actual);
+ return 1;
+}
+
+/*
+ * Verify that the static fields in the CSS header match.
+ */
+static int verify_css_header(struct hfi1_devdata *dd, struct css_header *css)
+{
+ /* verify CSS header fields (most sizes are in DW, so add /4) */
+ if (invalid_header(dd, "module_type", css->module_type, CSS_MODULE_TYPE)
+ || invalid_header(dd, "header_len", css->header_len,
+ (sizeof(struct firmware_file)/4))
+ || invalid_header(dd, "header_version",
+ css->header_version, CSS_HEADER_VERSION)
+ || invalid_header(dd, "module_vendor",
+ css->module_vendor, CSS_MODULE_VENDOR)
+ || invalid_header(dd, "key_size",
+ css->key_size, KEY_SIZE/4)
+ || invalid_header(dd, "modulus_size",
+ css->modulus_size, KEY_SIZE/4)
+ || invalid_header(dd, "exponent_size",
+ css->exponent_size, EXPONENT_SIZE/4)) {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Make sure there are at least some bytes after the prefix.
+ */
+static int payload_check(struct hfi1_devdata *dd, const char *name,
+ long file_size, long prefix_size)
+{
+ /* make sure we have some payload */
+ if (prefix_size >= file_size) {
+ dd_dev_err(dd,
+ "firmware \"%s\", size %ld, must be larger than %ld bytes\n",
+ name, file_size, prefix_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Request the firmware from the system. Extract the pieces and fill in
+ * fdet. If successful, the caller will need to call dispose_one_firmware().
+ * Returns 0 on success, -ERRNO on error.
+ */
+static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name,
+ struct firmware_details *fdet)
+{
+ struct css_header *css;
+ int ret;
+
+ memset(fdet, 0, sizeof(*fdet));
+
+ ret = request_firmware(&fdet->fw, name, &dd->pcidev->dev);
+ if (ret) {
+ dd_dev_err(dd, "cannot load firmware \"%s\", err %d\n",
+ name, ret);
+ return ret;
+ }
+
+ /* verify the firmware */
+ if (fdet->fw->size < sizeof(struct css_header)) {
+ dd_dev_err(dd, "firmware \"%s\" is too small\n", name);
+ ret = -EINVAL;
+ goto done;
+ }
+ css = (struct css_header *)fdet->fw->data;
+
+ hfi1_cdbg(FIRMWARE, "Firmware %s details:", name);
+ hfi1_cdbg(FIRMWARE, "file size: 0x%lx bytes", fdet->fw->size);
+ hfi1_cdbg(FIRMWARE, "CSS structure:");
+ hfi1_cdbg(FIRMWARE, " module_type 0x%x", css->module_type);
+ hfi1_cdbg(FIRMWARE, " header_len 0x%03x (0x%03x bytes)",
+ css->header_len, 4 * css->header_len);
+ hfi1_cdbg(FIRMWARE, " header_version 0x%x", css->header_version);
+ hfi1_cdbg(FIRMWARE, " module_id 0x%x", css->module_id);
+ hfi1_cdbg(FIRMWARE, " module_vendor 0x%x", css->module_vendor);
+ hfi1_cdbg(FIRMWARE, " date 0x%x", css->date);
+ hfi1_cdbg(FIRMWARE, " size 0x%03x (0x%03x bytes)",
+ css->size, 4 * css->size);
+ hfi1_cdbg(FIRMWARE, " key_size 0x%03x (0x%03x bytes)",
+ css->key_size, 4 * css->key_size);
+ hfi1_cdbg(FIRMWARE, " modulus_size 0x%03x (0x%03x bytes)",
+ css->modulus_size, 4 * css->modulus_size);
+ hfi1_cdbg(FIRMWARE, " exponent_size 0x%03x (0x%03x bytes)",
+ css->exponent_size, 4 * css->exponent_size);
+ hfi1_cdbg(FIRMWARE, "firmware size: 0x%lx bytes",
+ fdet->fw->size - sizeof(struct firmware_file));
+
+ /*
+ * If the file does not have a valid CSS header, fail.
+ * Otherwise, check the CSS size field for an expected size.
+ * The augmented file has r2 and mu inserted after the header
+ * was generated, so there will be a known difference between
+ * the CSS header size and the actual file size. Use this
+ * difference to identify an augmented file.
+ *
+ * Note: css->size is in DWORDs, multiply by 4 to get bytes.
+ */
+ ret = verify_css_header(dd, css);
+ if (ret) {
+ dd_dev_info(dd, "Invalid CSS header for \"%s\"\n", name);
+ } else if ((css->size*4) == fdet->fw->size) {
+ /* non-augmented firmware file */
+ struct firmware_file *ff = (struct firmware_file *)
+ fdet->fw->data;
+
+ /* make sure there are bytes in the payload */
+ ret = payload_check(dd, name, fdet->fw->size,
+ sizeof(struct firmware_file));
+ if (ret == 0) {
+ fdet->css_header = css;
+ fdet->modulus = ff->modulus;
+ fdet->exponent = ff->exponent;
+ fdet->signature = ff->signature;
+ fdet->r2 = fdet->dummy_header.r2; /* use dummy space */
+ fdet->mu = fdet->dummy_header.mu; /* use dummy space */
+ fdet->firmware_ptr = ff->firmware;
+ fdet->firmware_len = fdet->fw->size -
+ sizeof(struct firmware_file);
+ /*
+ * Header does not include r2 and mu - generate here.
+ * For now, fail.
+ */
+ dd_dev_err(dd, "driver is unable to validate firmware without r2 and mu (not in firmware file)\n");
+ ret = -EINVAL;
+ }
+ } else if ((css->size*4) + AUGMENT_SIZE == fdet->fw->size) {
+ /* augmented firmware file */
+ struct augmented_firmware_file *aff =
+ (struct augmented_firmware_file *)fdet->fw->data;
+
+ /* make sure there are bytes in the payload */
+ ret = payload_check(dd, name, fdet->fw->size,
+ sizeof(struct augmented_firmware_file));
+ if (ret == 0) {
+ fdet->css_header = css;
+ fdet->modulus = aff->modulus;
+ fdet->exponent = aff->exponent;
+ fdet->signature = aff->signature;
+ fdet->r2 = aff->r2;
+ fdet->mu = aff->mu;
+ fdet->firmware_ptr = aff->firmware;
+ fdet->firmware_len = fdet->fw->size -
+ sizeof(struct augmented_firmware_file);
+ }
+ } else {
+ /* css->size check failed */
+ dd_dev_err(dd,
+ "invalid firmware header field size: expected 0x%lx or 0x%lx, actual 0x%x\n",
+ fdet->fw->size/4, (fdet->fw->size - AUGMENT_SIZE)/4,
+ css->size);
+
+ ret = -EINVAL;
+ }
+
+done:
+ /* if returning an error, clean up after ourselves */
+ if (ret)
+ dispose_one_firmware(fdet);
+ return ret;
+}
+
+static void dispose_one_firmware(struct firmware_details *fdet)
+{
+ release_firmware(fdet->fw);
+ fdet->fw = NULL;
+}
+
+/*
+ * Called by all HFIs when loading their firmware - i.e. device probe time.
+ * The first one will do the actual firmware load. Use a mutex to resolve
+ * any possible race condition.
+ *
+ * The call to this routine cannot be moved to driver load because the kernel
+ * call request_firmware() requires a device which is only available after
+ * the first device probe.
+ */
+static int obtain_firmware(struct hfi1_devdata *dd)
+{
+ int err = 0;
+
+ mutex_lock(&fw_mutex);
+ if (fw_state == FW_ACQUIRED) {
+ goto done; /* already acquired */
+ } else if (fw_state == FW_ERR) {
+ err = fw_err;
+ goto done; /* already tried and failed */
+ }
+
+ if (fw_8051_load) {
+ err = obtain_one_firmware(dd, fw_8051_name, &fw_8051);
+ if (err)
+ goto done;
+ }
+
+ if (fw_fabric_serdes_load) {
+ err = obtain_one_firmware(dd, fw_fabric_serdes_name,
+ &fw_fabric);
+ if (err)
+ goto done;
+ }
+
+ if (fw_sbus_load) {
+ err = obtain_one_firmware(dd, fw_sbus_name, &fw_sbus);
+ if (err)
+ goto done;
+ }
+
+ if (fw_pcie_serdes_load) {
+ err = obtain_one_firmware(dd, fw_pcie_serdes_name, &fw_pcie);
+ if (err)
+ goto done;
+ }
+
+ if (platform_config_load) {
+ platform_config = NULL;
+ err = request_firmware(&platform_config, platform_config_name,
+ &dd->pcidev->dev);
+ if (err) {
+ err = 0;
+ platform_config = NULL;
+ }
+ }
+
+ /* success */
+ fw_state = FW_ACQUIRED;
+
+done:
+ if (err) {
+ fw_err = err;
+ fw_state = FW_ERR;
+ }
+ mutex_unlock(&fw_mutex);
+
+ return err;
+}
+
+/*
+ * Called when the driver unloads. The timing is asymmetric with its
+ * counterpart, obtain_firmware(). If called at device remove time,
+ * then it is conceivable that another device could probe while the
+ * firmware is being disposed. The mutexes can be moved to do that
+ * safely, but then the firmware would be requested from the OS multiple
+ * times.
+ *
+ * No mutex is needed as the driver is unloading and there cannot be any
+ * other callers.
+ */
+void dispose_firmware(void)
+{
+ dispose_one_firmware(&fw_8051);
+ dispose_one_firmware(&fw_fabric);
+ dispose_one_firmware(&fw_pcie);
+ dispose_one_firmware(&fw_sbus);
+
+ release_firmware(platform_config);
+ platform_config = NULL;
+
+ /* retain the error state, otherwise revert to empty */
+ if (fw_state != FW_ERR)
+ fw_state = FW_EMPTY;
+}
+
+/*
+ * Write a block of data to a given array CSR. All calls will be in
+ * multiples of 8 bytes.
+ */
+static void write_rsa_data(struct hfi1_devdata *dd, int what,
+ const u8 *data, int nbytes)
+{
+ int qw_size = nbytes/8;
+ int i;
+
+ if (((unsigned long)data & 0x7) == 0) {
+ /* aligned */
+ u64 *ptr = (u64 *)data;
+
+ for (i = 0; i < qw_size; i++, ptr++)
+ write_csr(dd, what + (8*i), *ptr);
+ } else {
+ /* not aligned */
+ for (i = 0; i < qw_size; i++, data += 8) {
+ u64 value;
+
+ memcpy(&value, data, 8);
+ write_csr(dd, what + (8*i), value);
+ }
+ }
+}
+
+/*
+ * Write a block of data to a given CSR as a stream of writes. All calls will
+ * be in multiples of 8 bytes.
+ */
+static void write_streamed_rsa_data(struct hfi1_devdata *dd, int what,
+ const u8 *data, int nbytes)
+{
+ u64 *ptr = (u64 *)data;
+ int qw_size = nbytes/8;
+
+ for (; qw_size > 0; qw_size--, ptr++)
+ write_csr(dd, what, *ptr);
+}
+
+/*
+ * Download the signature and start the RSA mechanism. Wait for
+ * RSA_ENGINE_TIMEOUT before giving up.
+ */
+static int run_rsa(struct hfi1_devdata *dd, const char *who,
+ const u8 *signature)
+{
+ unsigned long timeout;
+ u64 reg;
+ u32 status;
+ int ret = 0;
+
+ /* write the signature */
+ write_rsa_data(dd, MISC_CFG_RSA_SIGNATURE, signature, KEY_SIZE);
+
+ /* initialize RSA */
+ write_csr(dd, MISC_CFG_RSA_CMD, RSA_CMD_INIT);
+
+ /*
+ * Make sure the engine is idle and insert a delay between the two
+ * writes to MISC_CFG_RSA_CMD.
+ */
+ status = (read_csr(dd, MISC_CFG_FW_CTRL)
+ & MISC_CFG_FW_CTRL_RSA_STATUS_SMASK)
+ >> MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT;
+ if (status != RSA_STATUS_IDLE) {
+ dd_dev_err(dd, "%s security engine not idle - giving up\n",
+ who);
+ return -EBUSY;
+ }
+
+ /* start RSA */
+ write_csr(dd, MISC_CFG_RSA_CMD, RSA_CMD_START);
+
+ /*
+ * Look for the result.
+ *
+ * The RSA engine is hooked up to two MISC errors. The driver
+ * masks these errors as they do not respond to the standard
+ * error "clear down" mechanism. Look for these errors here and
+ * clear them when possible. This routine will exit with the
+ * errors of the current run still set.
+ *
+ * MISC_FW_AUTH_FAILED_ERR
+ * Firmware authorization failed. This can be cleared by
+ * re-initializing the RSA engine, then clearing the status bit.
+ * Do not re-init the RSA angine immediately after a successful
+ * run - this will reset the current authorization.
+ *
+ * MISC_KEY_MISMATCH_ERR
+ * Key does not match. The only way to clear this is to load
+ * a matching key then clear the status bit. If this error
+ * is raised, it will persist outside of this routine until a
+ * matching key is loaded.
+ */
+ timeout = msecs_to_jiffies(RSA_ENGINE_TIMEOUT) + jiffies;
+ while (1) {
+ status = (read_csr(dd, MISC_CFG_FW_CTRL)
+ & MISC_CFG_FW_CTRL_RSA_STATUS_SMASK)
+ >> MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT;
+
+ if (status == RSA_STATUS_IDLE) {
+ /* should not happen */
+ dd_dev_err(dd, "%s firmware security bad idle state\n",
+ who);
+ ret = -EINVAL;
+ break;
+ } else if (status == RSA_STATUS_DONE) {
+ /* finished successfully */
+ break;
+ } else if (status == RSA_STATUS_FAILED) {
+ /* finished unsuccessfully */
+ ret = -EINVAL;
+ break;
+ }
+ /* else still active */
+
+ if (time_after(jiffies, timeout)) {
+ /*
+ * Timed out while active. We can't reset the engine
+ * if it is stuck active, but run through the
+ * error code to see what error bits are set.
+ */
+ dd_dev_err(dd, "%s firmware security time out\n", who);
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ msleep(20);
+ }
+
+ /*
+ * Arrive here on success or failure. Clear all RSA engine
+ * errors. All current errors will stick - the RSA logic is keeping
+ * error high. All previous errors will clear - the RSA logic
+ * is not keeping the error high.
+ */
+ write_csr(dd, MISC_ERR_CLEAR,
+ MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK
+ | MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK);
+ /*
+ * All that is left are the current errors. Print failure details,
+ * if any.
+ */
+ reg = read_csr(dd, MISC_ERR_STATUS);
+ if (ret) {
+ if (reg & MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK)
+ dd_dev_err(dd, "%s firmware authorization failed\n",
+ who);
+ if (reg & MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK)
+ dd_dev_err(dd, "%s firmware key mismatch\n", who);
+ }
+
+ return ret;
+}
+
+static void load_security_variables(struct hfi1_devdata *dd,
+ struct firmware_details *fdet)
+{
+ /* Security variables a. Write the modulus */
+ write_rsa_data(dd, MISC_CFG_RSA_MODULUS, fdet->modulus, KEY_SIZE);
+ /* Security variables b. Write the r2 */
+ write_rsa_data(dd, MISC_CFG_RSA_R2, fdet->r2, KEY_SIZE);
+ /* Security variables c. Write the mu */
+ write_rsa_data(dd, MISC_CFG_RSA_MU, fdet->mu, MU_SIZE);
+ /* Security variables d. Write the header */
+ write_streamed_rsa_data(dd, MISC_CFG_SHA_PRELOAD,
+ (u8 *)fdet->css_header, sizeof(struct css_header));
+}
+
+/* return the 8051 firmware state */
+static inline u32 get_firmware_state(struct hfi1_devdata *dd)
+{
+ u64 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
+
+ return (reg >> DC_DC8051_STS_CUR_STATE_FIRMWARE_SHIFT)
+ & DC_DC8051_STS_CUR_STATE_FIRMWARE_MASK;
+}
+
+/*
+ * Wait until the firmware is up and ready to take host requests.
+ * Return 0 on success, -ETIMEDOUT on timeout.
+ */
+int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout)
+{
+ unsigned long timeout;
+
+ /* in the simulator, the fake 8051 is always ready */
+ if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
+ return 0;
+
+ timeout = msecs_to_jiffies(mstimeout) + jiffies;
+ while (1) {
+ if (get_firmware_state(dd) == 0xa0) /* ready */
+ return 0;
+ if (time_after(jiffies, timeout)) /* timed out */
+ return -ETIMEDOUT;
+ usleep_range(1950, 2050); /* sleep 2ms-ish */
+ }
+}
+
+/*
+ * Load the 8051 firmware.
+ */
+static int load_8051_firmware(struct hfi1_devdata *dd,
+ struct firmware_details *fdet)
+{
+ u64 reg;
+ int ret;
+ u8 ver_a, ver_b;
+
+ /*
+ * DC Reset sequence
+ * Load DC 8051 firmware
+ */
+ /*
+ * DC reset step 1: Reset DC8051
+ */
+ reg = DC_DC8051_CFG_RST_M8051W_SMASK
+ | DC_DC8051_CFG_RST_CRAM_SMASK
+ | DC_DC8051_CFG_RST_DRAM_SMASK
+ | DC_DC8051_CFG_RST_IRAM_SMASK
+ | DC_DC8051_CFG_RST_SFR_SMASK;
+ write_csr(dd, DC_DC8051_CFG_RST, reg);
+
+ /*
+ * DC reset step 2 (optional): Load 8051 data memory with link
+ * configuration
+ */
+
+ /*
+ * DC reset step 3: Load DC8051 firmware
+ */
+ /* release all but the core reset */
+ reg = DC_DC8051_CFG_RST_M8051W_SMASK;
+ write_csr(dd, DC_DC8051_CFG_RST, reg);
+
+ /* Firmware load step 1 */
+ load_security_variables(dd, fdet);
+
+ /*
+ * Firmware load step 2. Clear MISC_CFG_FW_CTRL.FW_8051_LOADED
+ */
+ write_csr(dd, MISC_CFG_FW_CTRL, 0);
+
+ /* Firmware load steps 3-5 */
+ ret = write_8051(dd, 1/*code*/, 0, fdet->firmware_ptr,
+ fdet->firmware_len);
+ if (ret)
+ return ret;
+
+ /*
+ * DC reset step 4. Host starts the DC8051 firmware
+ */
+ /*
+ * Firmware load step 6. Set MISC_CFG_FW_CTRL.FW_8051_LOADED
+ */
+ write_csr(dd, MISC_CFG_FW_CTRL, MISC_CFG_FW_CTRL_FW_8051_LOADED_SMASK);
+
+ /* Firmware load steps 7-10 */
+ ret = run_rsa(dd, "8051", fdet->signature);
+ if (ret)
+ return ret;
+
+ /* clear all reset bits, releasing the 8051 */
+ write_csr(dd, DC_DC8051_CFG_RST, 0ull);
+
+ /*
+ * DC reset step 5. Wait for firmware to be ready to accept host
+ * requests.
+ */
+ ret = wait_fm_ready(dd, TIMEOUT_8051_START);
+ if (ret) { /* timed out */
+ dd_dev_err(dd, "8051 start timeout, current state 0x%x\n",
+ get_firmware_state(dd));
+ return -ETIMEDOUT;
+ }
+
+ read_misc_status(dd, &ver_a, &ver_b);
+ dd_dev_info(dd, "8051 firmware version %d.%d\n",
+ (int)ver_b, (int)ver_a);
+ dd->dc8051_ver = dc8051_ver(ver_b, ver_a);
+
+ return 0;
+}
+
+/* SBus Master broadcast address */
+#define SBUS_MASTER_BROADCAST 0xfd
+
+/*
+ * Write the SBus request register
+ *
+ * No need for masking - the arguments are sized exactly.
+ */
+void sbus_request(struct hfi1_devdata *dd,
+ u8 receiver_addr, u8 data_addr, u8 command, u32 data_in)
+{
+ write_csr(dd, ASIC_CFG_SBUS_REQUEST,
+ ((u64)data_in << ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT)
+ | ((u64)command << ASIC_CFG_SBUS_REQUEST_COMMAND_SHIFT)
+ | ((u64)data_addr << ASIC_CFG_SBUS_REQUEST_DATA_ADDR_SHIFT)
+ | ((u64)receiver_addr
+ << ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT));
+}
+
+/*
+ * Turn off the SBus and fabric serdes spicos.
+ *
+ * + Must be called with Sbus fast mode turned on.
+ * + Must be called after fabric serdes broadcast is set up.
+ * + Must be called before the 8051 is loaded - assumes 8051 is not loaded
+ * when using MISC_CFG_FW_CTRL.
+ */
+static void turn_off_spicos(struct hfi1_devdata *dd, int flags)
+{
+ /* only needed on A0 */
+ if (!is_a0(dd))
+ return;
+
+ dd_dev_info(dd, "Turning off spicos:%s%s\n",
+ flags & SPICO_SBUS ? " SBus" : "",
+ flags & SPICO_FABRIC ? " fabric" : "");
+
+ write_csr(dd, MISC_CFG_FW_CTRL, ENABLE_SPICO_SMASK);
+ /* disable SBus spico */
+ if (flags & SPICO_SBUS)
+ sbus_request(dd, SBUS_MASTER_BROADCAST, 0x01,
+ WRITE_SBUS_RECEIVER, 0x00000040);
+
+ /* disable the fabric serdes spicos */
+ if (flags & SPICO_FABRIC)
+ sbus_request(dd, fabric_serdes_broadcast[dd->hfi1_id],
+ 0x07, WRITE_SBUS_RECEIVER, 0x00000000);
+ write_csr(dd, MISC_CFG_FW_CTRL, 0);
+}
+
+/*
+ * Reset all of the fabric serdes for our HFI.
+ */
+void fabric_serdes_reset(struct hfi1_devdata *dd)
+{
+ u8 ra;
+
+ if (dd->icode != ICODE_RTL_SILICON) /* only for RTL */
+ return;
+
+ ra = fabric_serdes_broadcast[dd->hfi1_id];
+
+ acquire_hw_mutex(dd);
+ set_sbus_fast_mode(dd);
+ /* place SerDes in reset and disable SPICO */
+ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011);
+ /* wait 100 refclk cycles @ 156.25MHz => 640ns */
+ udelay(1);
+ /* remove SerDes reset */
+ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010);
+ /* turn SPICO enable on */
+ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002);
+ clear_sbus_fast_mode(dd);
+ release_hw_mutex(dd);
+}
+
+/* Access to the SBus in this routine should probably be serialized */
+int sbus_request_slow(struct hfi1_devdata *dd,
+ u8 receiver_addr, u8 data_addr, u8 command, u32 data_in)
+{
+ u64 reg, count = 0;
+
+ sbus_request(dd, receiver_addr, data_addr, command, data_in);
+ write_csr(dd, ASIC_CFG_SBUS_EXECUTE,
+ ASIC_CFG_SBUS_EXECUTE_EXECUTE_SMASK);
+ /* Wait for both DONE and RCV_DATA_VALID to go high */
+ reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
+ while (!((reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) &&
+ (reg & ASIC_STS_SBUS_RESULT_RCV_DATA_VALID_SMASK))) {
+ if (count++ >= SBUS_MAX_POLL_COUNT) {
+ u64 counts = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
+ /*
+ * If the loop has timed out, we are OK if DONE bit
+ * is set and RCV_DATA_VALID and EXECUTE counters
+ * are the same. If not, we cannot proceed.
+ */
+ if ((reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) &&
+ (SBUS_COUNTER(counts, RCV_DATA_VALID) ==
+ SBUS_COUNTER(counts, EXECUTE)))
+ break;
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+ reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
+ }
+ count = 0;
+ write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
+ /* Wait for DONE to clear after EXECUTE is cleared */
+ reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
+ while (reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) {
+ if (count++ >= SBUS_MAX_POLL_COUNT)
+ return -ETIME;
+ udelay(1);
+ reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
+ }
+ return 0;
+}
+
+static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
+ struct firmware_details *fdet)
+{
+ int i, err;
+ const u8 ra = fabric_serdes_broadcast[dd->hfi1_id]; /* receiver addr */
+
+ dd_dev_info(dd, "Downloading fabric firmware\n");
+
+ /* step 1: load security variables */
+ load_security_variables(dd, fdet);
+ /* step 2: place SerDes in reset and disable SPICO */
+ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011);
+ /* wait 100 refclk cycles @ 156.25MHz => 640ns */
+ udelay(1);
+ /* step 3: remove SerDes reset */
+ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010);
+ /* step 4: assert IMEM override */
+ sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x40000000);
+ /* step 5: download SerDes machine code */
+ for (i = 0; i < fdet->firmware_len; i += 4) {
+ sbus_request(dd, ra, 0x0a, WRITE_SBUS_RECEIVER,
+ *(u32 *)&fdet->firmware_ptr[i]);
+ }
+ /* step 6: IMEM override off */
+ sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x00000000);
+ /* step 7: turn ECC on */
+ sbus_request(dd, ra, 0x0b, WRITE_SBUS_RECEIVER, 0x000c0000);
+
+ /* steps 8-11: run the RSA engine */
+ err = run_rsa(dd, "fabric serdes", fdet->signature);
+ if (err)
+ return err;
+
+ /* step 12: turn SPICO enable on */
+ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002);
+ /* step 13: enable core hardware interrupts */
+ sbus_request(dd, ra, 0x08, WRITE_SBUS_RECEIVER, 0x00000000);
+
+ return 0;
+}
+
+static int load_sbus_firmware(struct hfi1_devdata *dd,
+ struct firmware_details *fdet)
+{
+ int i, err;
+ const u8 ra = SBUS_MASTER_BROADCAST; /* receiver address */
+
+ dd_dev_info(dd, "Downloading SBus firmware\n");
+
+ /* step 1: load security variables */
+ load_security_variables(dd, fdet);
+ /* step 2: place SPICO into reset and enable off */
+ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x000000c0);
+ /* step 3: remove reset, enable off, IMEM_CNTRL_EN on */
+ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000240);
+ /* step 4: set starting IMEM address for burst download */
+ sbus_request(dd, ra, 0x03, WRITE_SBUS_RECEIVER, 0x80000000);
+ /* step 5: download the SBus Master machine code */
+ for (i = 0; i < fdet->firmware_len; i += 4) {
+ sbus_request(dd, ra, 0x14, WRITE_SBUS_RECEIVER,
+ *(u32 *)&fdet->firmware_ptr[i]);
+ }
+ /* step 6: set IMEM_CNTL_EN off */
+ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000040);
+ /* step 7: turn ECC on */
+ sbus_request(dd, ra, 0x16, WRITE_SBUS_RECEIVER, 0x000c0000);
+
+ /* steps 8-11: run the RSA engine */
+ err = run_rsa(dd, "SBus", fdet->signature);
+ if (err)
+ return err;
+
+ /* step 12: set SPICO_ENABLE on */
+ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140);
+
+ return 0;
+}
+
+static int load_pcie_serdes_firmware(struct hfi1_devdata *dd,
+ struct firmware_details *fdet)
+{
+ int i;
+ const u8 ra = SBUS_MASTER_BROADCAST; /* receiver address */
+
+ dd_dev_info(dd, "Downloading PCIe firmware\n");
+
+ /* step 1: load security variables */
+ load_security_variables(dd, fdet);
+ /* step 2: assert single step (halts the SBus Master spico) */
+ sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000001);
+ /* step 3: enable XDMEM access */
+ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000d40);
+ /* step 4: load firmware into SBus Master XDMEM */
+ /* NOTE: the dmem address, write_en, and wdata are all pre-packed,
+ we only need to pick up the bytes and write them */
+ for (i = 0; i < fdet->firmware_len; i += 4) {
+ sbus_request(dd, ra, 0x04, WRITE_SBUS_RECEIVER,
+ *(u32 *)&fdet->firmware_ptr[i]);
+ }
+ /* step 5: disable XDMEM access */
+ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140);
+ /* step 6: allow SBus Spico to run */
+ sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000000);
+
+ /* steps 7-11: run RSA, if it succeeds, firmware is available to
+ be swapped */
+ return run_rsa(dd, "PCIe serdes", fdet->signature);
+}
+
+/*
+ * Set the given broadcast values on the given list of devices.
+ */
+static void set_serdes_broadcast(struct hfi1_devdata *dd, u8 bg1, u8 bg2,
+ const u8 *addrs, int count)
+{
+ while (--count >= 0) {
+ /*
+ * Set BROADCAST_GROUP_1 and BROADCAST_GROUP_2, leave
+ * defaults for everything else. Do not read-modify-write,
+ * per instruction from the manufacturer.
+ *
+ * Register 0xfd:
+ * bits what
+ * ----- ---------------------------------
+ * 0 IGNORE_BROADCAST (default 0)
+ * 11:4 BROADCAST_GROUP_1 (default 0xff)
+ * 23:16 BROADCAST_GROUP_2 (default 0xff)
+ */
+ sbus_request(dd, addrs[count], 0xfd, WRITE_SBUS_RECEIVER,
+ (u32)bg1 << 4 | (u32)bg2 << 16);
+ }
+}
+
+int acquire_hw_mutex(struct hfi1_devdata *dd)
+{
+ unsigned long timeout;
+ int try = 0;
+ u8 mask = 1 << dd->hfi1_id;
+ u8 user;
+
+retry:
+ timeout = msecs_to_jiffies(HM_TIMEOUT) + jiffies;
+ while (1) {
+ write_csr(dd, ASIC_CFG_MUTEX, mask);
+ user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
+ if (user == mask)
+ return 0; /* success */
+ if (time_after(jiffies, timeout))
+ break; /* timed out */
+ msleep(20);
+ }
+
+ /* timed out */
+ dd_dev_err(dd,
+ "Unable to acquire hardware mutex, mutex mask %u, my mask %u (%s)\n",
+ (u32)user, (u32)mask, (try == 0) ? "retrying" : "giving up");
+
+ if (try == 0) {
+ /* break mutex and retry */
+ write_csr(dd, ASIC_CFG_MUTEX, 0);
+ try++;
+ goto retry;
+ }
+
+ return -EBUSY;
+}
+
+void release_hw_mutex(struct hfi1_devdata *dd)
+{
+ write_csr(dd, ASIC_CFG_MUTEX, 0);
+}
+
+void set_sbus_fast_mode(struct hfi1_devdata *dd)
+{
+ write_csr(dd, ASIC_CFG_SBUS_EXECUTE,
+ ASIC_CFG_SBUS_EXECUTE_FAST_MODE_SMASK);
+}
+
+void clear_sbus_fast_mode(struct hfi1_devdata *dd)
+{
+ u64 reg, count = 0;
+
+ reg = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
+ while (SBUS_COUNTER(reg, EXECUTE) !=
+ SBUS_COUNTER(reg, RCV_DATA_VALID)) {
+ if (count++ >= SBUS_MAX_POLL_COUNT)
+ break;
+ udelay(1);
+ reg = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
+ }
+ write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
+}
+
+int load_firmware(struct hfi1_devdata *dd)
+{
+ int ret;
+
+ if (fw_sbus_load || fw_fabric_serdes_load) {
+ ret = acquire_hw_mutex(dd);
+ if (ret)
+ return ret;
+
+ set_sbus_fast_mode(dd);
+
+ /*
+ * The SBus contains part of the fabric firmware and so must
+ * also be downloaded.
+ */
+ if (fw_sbus_load) {
+ turn_off_spicos(dd, SPICO_SBUS);
+ ret = load_sbus_firmware(dd, &fw_sbus);
+ if (ret)
+ goto clear;
+ }
+
+ if (fw_fabric_serdes_load) {
+ set_serdes_broadcast(dd, all_fabric_serdes_broadcast,
+ fabric_serdes_broadcast[dd->hfi1_id],
+ fabric_serdes_addrs[dd->hfi1_id],
+ NUM_FABRIC_SERDES);
+ turn_off_spicos(dd, SPICO_FABRIC);
+ ret = load_fabric_serdes_firmware(dd, &fw_fabric);
+ }
+
+clear:
+ clear_sbus_fast_mode(dd);
+ release_hw_mutex(dd);
+ if (ret)
+ return ret;
+ }
+
+ if (fw_8051_load) {
+ ret = load_8051_firmware(dd, &fw_8051);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int hfi1_firmware_init(struct hfi1_devdata *dd)
+{
+ /* only RTL can use these */
+ if (dd->icode != ICODE_RTL_SILICON) {
+ fw_fabric_serdes_load = 0;
+ fw_pcie_serdes_load = 0;
+ fw_sbus_load = 0;
+ }
+
+ /* no 8051 or QSFP on simulator */
+ if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
+ fw_8051_load = 0;
+ platform_config_load = 0;
+ }
+
+ if (!fw_8051_name) {
+ if (dd->icode == ICODE_RTL_SILICON)
+ fw_8051_name = DEFAULT_FW_8051_NAME_ASIC;
+ else
+ fw_8051_name = DEFAULT_FW_8051_NAME_FPGA;
+ }
+ if (!fw_fabric_serdes_name)
+ fw_fabric_serdes_name = DEFAULT_FW_FABRIC_NAME;
+ if (!fw_sbus_name)
+ fw_sbus_name = DEFAULT_FW_SBUS_NAME;
+ if (!fw_pcie_serdes_name)
+ fw_pcie_serdes_name = DEFAULT_FW_PCIE_NAME;
+ if (!platform_config_name)
+ platform_config_name = DEFAULT_PLATFORM_CONFIG_NAME;
+
+ return obtain_firmware(dd);
+}
+
+int parse_platform_config(struct hfi1_devdata *dd)
+{
+ struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
+ u32 *ptr = NULL;
+ u32 header1 = 0, header2 = 0, magic_num = 0, crc = 0;
+ u32 record_idx = 0, table_type = 0, table_length_dwords = 0;
+
+ if (platform_config == NULL) {
+ dd_dev_info(dd, "%s: Missing config file\n", __func__);
+ goto bail;
+ }
+ ptr = (u32 *)platform_config->data;
+
+ magic_num = *ptr;
+ ptr++;
+ if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) {
+ dd_dev_info(dd, "%s: Bad config file\n", __func__);
+ goto bail;
+ }
+
+ while (ptr < (u32 *)(platform_config->data + platform_config->size)) {
+ header1 = *ptr;
+ header2 = *(ptr + 1);
+ if (header1 != ~header2) {
+ dd_dev_info(dd, "%s: Failed validation at offset %ld\n",
+ __func__, (ptr - (u32 *)platform_config->data));
+ goto bail;
+ }
+
+ record_idx = *ptr &
+ ((1 << PLATFORM_CONFIG_HEADER_RECORD_IDX_LEN_BITS) - 1);
+
+ table_length_dwords = (*ptr >>
+ PLATFORM_CONFIG_HEADER_TABLE_LENGTH_SHIFT) &
+ ((1 << PLATFORM_CONFIG_HEADER_TABLE_LENGTH_LEN_BITS) - 1);
+
+ table_type = (*ptr >> PLATFORM_CONFIG_HEADER_TABLE_TYPE_SHIFT) &
+ ((1 << PLATFORM_CONFIG_HEADER_TABLE_TYPE_LEN_BITS) - 1);
+
+ /* Done with this set of headers */
+ ptr += 2;
+
+ if (record_idx) {
+ /* data table */
+ switch (table_type) {
+ case PLATFORM_CONFIG_SYSTEM_TABLE:
+ pcfgcache->config_tables[table_type].num_table =
+ 1;
+ break;
+ case PLATFORM_CONFIG_PORT_TABLE:
+ pcfgcache->config_tables[table_type].num_table =
+ 2;
+ break;
+ case PLATFORM_CONFIG_RX_PRESET_TABLE:
+ /* fall through */
+ case PLATFORM_CONFIG_TX_PRESET_TABLE:
+ /* fall through */
+ case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
+ /* fall through */
+ case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
+ pcfgcache->config_tables[table_type].num_table =
+ table_length_dwords;
+ break;
+ default:
+ dd_dev_info(dd,
+ "%s: Unknown data table %d, offset %ld\n",
+ __func__, table_type,
+ (ptr - (u32 *)platform_config->data));
+ goto bail; /* We don't trust this file now */
+ }
+ pcfgcache->config_tables[table_type].table = ptr;
+ } else {
+ /* metadata table */
+ switch (table_type) {
+ case PLATFORM_CONFIG_SYSTEM_TABLE:
+ /* fall through */
+ case PLATFORM_CONFIG_PORT_TABLE:
+ /* fall through */
+ case PLATFORM_CONFIG_RX_PRESET_TABLE:
+ /* fall through */
+ case PLATFORM_CONFIG_TX_PRESET_TABLE:
+ /* fall through */
+ case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
+ /* fall through */
+ case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
+ break;
+ default:
+ dd_dev_info(dd,
+ "%s: Unknown metadata table %d, offset %ld\n",
+ __func__, table_type,
+ (ptr - (u32 *)platform_config->data));
+ goto bail; /* We don't trust this file now */
+ }
+ pcfgcache->config_tables[table_type].table_metadata =
+ ptr;
+ }
+
+ /* Calculate and check table crc */
+ crc = crc32_le(~(u32)0, (unsigned char const *)ptr,
+ (table_length_dwords * 4));
+ crc ^= ~(u32)0;
+
+ /* Jump the table */
+ ptr += table_length_dwords;
+ if (crc != *ptr) {
+ dd_dev_info(dd, "%s: Failed CRC check at offset %ld\n",
+ __func__, (ptr - (u32 *)platform_config->data));
+ goto bail;
+ }
+ /* Jump the CRC DWORD */
+ ptr++;
+ }
+
+ pcfgcache->cache_valid = 1;
+ return 0;
+bail:
+ memset(pcfgcache, 0, sizeof(struct platform_config_cache));
+ return -EINVAL;
+}
+
+static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table,
+ int field, u32 *field_len_bits, u32 *field_start_bits)
+{
+ struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
+ u32 *src_ptr = NULL;
+
+ if (!pcfgcache->cache_valid)
+ return -EINVAL;
+
+ switch (table) {
+ case PLATFORM_CONFIG_SYSTEM_TABLE:
+ /* fall through */
+ case PLATFORM_CONFIG_PORT_TABLE:
+ /* fall through */
+ case PLATFORM_CONFIG_RX_PRESET_TABLE:
+ /* fall through */
+ case PLATFORM_CONFIG_TX_PRESET_TABLE:
+ /* fall through */
+ case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
+ /* fall through */
+ case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
+ if (field && field < platform_config_table_limits[table])
+ src_ptr =
+ pcfgcache->config_tables[table].table_metadata + field;
+ break;
+ default:
+ dd_dev_info(dd, "%s: Unknown table\n", __func__);
+ break;
+ }
+
+ if (!src_ptr)
+ return -EINVAL;
+
+ if (field_start_bits)
+ *field_start_bits = *src_ptr &
+ ((1 << METADATA_TABLE_FIELD_START_LEN_BITS) - 1);
+
+ if (field_len_bits)
+ *field_len_bits = (*src_ptr >> METADATA_TABLE_FIELD_LEN_SHIFT)
+ & ((1 << METADATA_TABLE_FIELD_LEN_LEN_BITS) - 1);
+
+ return 0;
+}
+
+/* This is the central interface to getting data out of the platform config
+ * file. It depends on parse_platform_config() having populated the
+ * platform_config_cache in hfi1_devdata, and checks the cache_valid member to
+ * validate the sanity of the cache.
+ *
+ * The non-obvious parameters:
+ * @table_index: Acts as a look up key into which instance of the tables the
+ * relevant field is fetched from.
+ *
+ * This applies to the data tables that have multiple instances. The port table
+ * is an exception to this rule as each HFI only has one port and thus the
+ * relevant table can be distinguished by hfi_id.
+ *
+ * @data: pointer to memory that will be populated with the field requested.
+ * @len: length of memory pointed by @data in bytes.
+ */
+int get_platform_config_field(struct hfi1_devdata *dd,
+ enum platform_config_table_type_encoding table_type,
+ int table_index, int field_index, u32 *data, u32 len)
+{
+ int ret = 0, wlen = 0, seek = 0;
+ u32 field_len_bits = 0, field_start_bits = 0, *src_ptr = NULL;
+ struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
+
+ if (data)
+ memset(data, 0, len);
+ else
+ return -EINVAL;
+
+ ret = get_platform_fw_field_metadata(dd, table_type, field_index,
+ &field_len_bits, &field_start_bits);
+ if (ret)
+ return -EINVAL;
+
+ /* Convert length to bits */
+ len *= 8;
+
+ /* Our metadata function checked cache_valid and field_index for us */
+ switch (table_type) {
+ case PLATFORM_CONFIG_SYSTEM_TABLE:
+ src_ptr = pcfgcache->config_tables[table_type].table;
+
+ if (field_index != SYSTEM_TABLE_QSFP_POWER_CLASS_MAX) {
+ if (len < field_len_bits)
+ return -EINVAL;
+
+ seek = field_start_bits/8;
+ wlen = field_len_bits/8;
+
+ src_ptr = (u32 *)((u8 *)src_ptr + seek);
+
+ /* We expect the field to be byte aligned and whole byte
+ * lengths if we are here */
+ memcpy(data, src_ptr, wlen);
+ return 0;
+ }
+ break;
+ case PLATFORM_CONFIG_PORT_TABLE:
+ /* Port table is 4 DWORDS in META_VERSION 0 */
+ src_ptr = dd->hfi1_id ?
+ pcfgcache->config_tables[table_type].table + 4 :
+ pcfgcache->config_tables[table_type].table;
+ break;
+ case PLATFORM_CONFIG_RX_PRESET_TABLE:
+ /* fall through */
+ case PLATFORM_CONFIG_TX_PRESET_TABLE:
+ /* fall through */
+ case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
+ /* fall through */
+ case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
+ src_ptr = pcfgcache->config_tables[table_type].table;
+
+ if (table_index <
+ pcfgcache->config_tables[table_type].num_table)
+ src_ptr += table_index;
+ else
+ src_ptr = NULL;
+ break;
+ default:
+ dd_dev_info(dd, "%s: Unknown table\n", __func__);
+ break;
+ }
+
+ if (!src_ptr || len < field_len_bits)
+ return -EINVAL;
+
+ src_ptr += (field_start_bits/32);
+ *data = (*src_ptr >> (field_start_bits % 32)) &
+ ((1 << field_len_bits) - 1);
+
+ return 0;
+}
+
+/*
+ * Download the firmware needed for the Gen3 PCIe SerDes. An update
+ * to the SBus firmware is needed before updating the PCIe firmware.
+ *
+ * Note: caller must be holding the HW mutex.
+ */
+int load_pcie_firmware(struct hfi1_devdata *dd)
+{
+ int ret = 0;
+
+ /* both firmware loads below use the SBus */
+ set_sbus_fast_mode(dd);
+
+ if (fw_sbus_load) {
+ turn_off_spicos(dd, SPICO_SBUS);
+ ret = load_sbus_firmware(dd, &fw_sbus);
+ if (ret)
+ goto done;
+ }
+
+ if (fw_pcie_serdes_load) {
+ dd_dev_info(dd, "Setting PCIe SerDes broadcast\n");
+ set_serdes_broadcast(dd, all_pcie_serdes_broadcast,
+ pcie_serdes_broadcast[dd->hfi1_id],
+ pcie_serdes_addrs[dd->hfi1_id],
+ NUM_PCIE_SERDES);
+ ret = load_pcie_serdes_firmware(dd, &fw_pcie);
+ if (ret)
+ goto done;
+ }
+
+done:
+ clear_sbus_fast_mode(dd);
+
+ return ret;
+}
+
+/*
+ * Read the GUID from the hardware, store it in dd.
+ */
+void read_guid(struct hfi1_devdata *dd)
+{
+ dd->base_guid = read_csr(dd, DC_DC8051_CFG_LOCAL_GUID);
+ dd_dev_info(dd, "GUID %llx",
+ (unsigned long long)dd->base_guid);
+}
diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h
new file mode 100644
index 000000000000..8ca171bf3e36
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/hfi.h
@@ -0,0 +1,1821 @@
+#ifndef _HFI1_KERNEL_H
+#define _HFI1_KERNEL_H
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/fs.h>
+#include <linux/completion.h>
+#include <linux/kref.h>
+#include <linux/sched.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+
+#include "chip_registers.h"
+#include "common.h"
+#include "verbs.h"
+#include "pio.h"
+#include "chip.h"
+#include "mad.h"
+#include "qsfp.h"
+#include "platform_config.h"
+
+/* bumped 1 from s/w major version of TrueScale */
+#define HFI1_CHIP_VERS_MAJ 3U
+
+/* don't care about this except printing */
+#define HFI1_CHIP_VERS_MIN 0U
+
+/* The Organization Unique Identifier (Mfg code), and its position in GUID */
+#define HFI1_OUI 0x001175
+#define HFI1_OUI_LSB 40
+
+#define DROP_PACKET_OFF 0
+#define DROP_PACKET_ON 1
+
+extern unsigned long hfi1_cap_mask;
+#define HFI1_CAP_KGET_MASK(mask, cap) ((mask) & HFI1_CAP_##cap)
+#define HFI1_CAP_UGET_MASK(mask, cap) \
+ (((mask) >> HFI1_CAP_USER_SHIFT) & HFI1_CAP_##cap)
+#define HFI1_CAP_KGET(cap) (HFI1_CAP_KGET_MASK(hfi1_cap_mask, cap))
+#define HFI1_CAP_UGET(cap) (HFI1_CAP_UGET_MASK(hfi1_cap_mask, cap))
+#define HFI1_CAP_IS_KSET(cap) (!!HFI1_CAP_KGET(cap))
+#define HFI1_CAP_IS_USET(cap) (!!HFI1_CAP_UGET(cap))
+#define HFI1_MISC_GET() ((hfi1_cap_mask >> HFI1_CAP_MISC_SHIFT) & \
+ HFI1_CAP_MISC_MASK)
+
+/*
+ * per driver stats, either not device nor port-specific, or
+ * summed over all of the devices and ports.
+ * They are described by name via ipathfs filesystem, so layout
+ * and number of elements can change without breaking compatibility.
+ * If members are added or deleted hfi1_statnames[] in debugfs.c must
+ * change to match.
+ */
+struct hfi1_ib_stats {
+ __u64 sps_ints; /* number of interrupts handled */
+ __u64 sps_errints; /* number of error interrupts */
+ __u64 sps_txerrs; /* tx-related packet errors */
+ __u64 sps_rcverrs; /* non-crc rcv packet errors */
+ __u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */
+ __u64 sps_nopiobufs; /* no pio bufs avail from kernel */
+ __u64 sps_ctxts; /* number of contexts currently open */
+ __u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */
+ __u64 sps_buffull;
+ __u64 sps_hdrfull;
+};
+
+extern struct hfi1_ib_stats hfi1_stats;
+extern const struct pci_error_handlers hfi1_pci_err_handler;
+
+/*
+ * First-cut criterion for "device is active" is
+ * two thousand dwords combined Tx, Rx traffic per
+ * 5-second interval. SMA packets are 64 dwords,
+ * and occur "a few per second", presumably each way.
+ */
+#define HFI1_TRAFFIC_ACTIVE_THRESHOLD (2000)
+
+/*
+ * Below contains all data related to a single context (formerly called port).
+ */
+
+#ifdef CONFIG_DEBUG_FS
+struct hfi1_opcode_stats_perctx;
+#endif
+
+/*
+ * struct ps_state keeps state associated with RX queue "prescanning"
+ * (prescanning for FECNs, and BECNs), if prescanning is in use.
+ */
+struct ps_state {
+ u32 ps_head;
+ int initialized;
+};
+
+struct ctxt_eager_bufs {
+ ssize_t size; /* total size of eager buffers */
+ u32 count; /* size of buffers array */
+ u32 numbufs; /* number of buffers allocated */
+ u32 alloced; /* number of rcvarray entries used */
+ u32 rcvtid_size; /* size of each eager rcv tid */
+ u32 threshold; /* head update threshold */
+ struct eager_buffer {
+ void *addr;
+ dma_addr_t phys;
+ ssize_t len;
+ } *buffers;
+ struct {
+ void *addr;
+ dma_addr_t phys;
+ } *rcvtids;
+};
+
+struct hfi1_ctxtdata {
+ /* shadow the ctxt's RcvCtrl register */
+ u64 rcvctrl;
+ /* rcvhdrq base, needs mmap before useful */
+ void *rcvhdrq;
+ /* kernel virtual address where hdrqtail is updated */
+ volatile __le64 *rcvhdrtail_kvaddr;
+ /*
+ * Shared page for kernel to signal user processes that send buffers
+ * need disarming. The process should call HFI1_CMD_DISARM_BUFS
+ * or HFI1_CMD_ACK_EVENT with IPATH_EVENT_DISARM_BUFS set.
+ */
+ unsigned long *user_event_mask;
+ /* when waiting for rcv or pioavail */
+ wait_queue_head_t wait;
+ /* rcvhdrq size (for freeing) */
+ size_t rcvhdrq_size;
+ /* number of rcvhdrq entries */
+ u16 rcvhdrq_cnt;
+ /* size of each of the rcvhdrq entries */
+ u16 rcvhdrqentsize;
+ /* mmap of hdrq, must fit in 44 bits */
+ dma_addr_t rcvhdrq_phys;
+ dma_addr_t rcvhdrqtailaddr_phys;
+ struct ctxt_eager_bufs egrbufs;
+ /* this receive context's assigned PIO ACK send context */
+ struct send_context *sc;
+
+ /* dynamic receive available interrupt timeout */
+ u32 rcvavail_timeout;
+ /*
+ * number of opens (including slave sub-contexts) on this instance
+ * (ignoring forks, dup, etc. for now)
+ */
+ int cnt;
+ /*
+ * how much space to leave at start of eager TID entries for
+ * protocol use, on each TID
+ */
+ /* instead of calculating it */
+ unsigned ctxt;
+ /* non-zero if ctxt is being shared. */
+ u16 subctxt_cnt;
+ /* non-zero if ctxt is being shared. */
+ u16 subctxt_id;
+ u8 uuid[16];
+ /* job key */
+ u16 jkey;
+ /* number of RcvArray groups for this context. */
+ u32 rcv_array_groups;
+ /* index of first eager TID entry. */
+ u32 eager_base;
+ /* number of expected TID entries */
+ u32 expected_count;
+ /* index of first expected TID entry. */
+ u32 expected_base;
+ /* cursor into the exp group sets */
+ atomic_t tidcursor;
+ /* number of exp TID groups assigned to the ctxt */
+ u16 numtidgroups;
+ /* size of exp TID group fields in tidusemap */
+ u16 tidmapcnt;
+ /* exp TID group usage bitfield array */
+ unsigned long *tidusemap;
+ /* pinned pages for exp sends, allocated at open */
+ struct page **tid_pg_list;
+ /* dma handles for exp tid pages */
+ dma_addr_t *physshadow;
+ /* lock protecting all Expected TID data */
+ spinlock_t exp_lock;
+ /* number of pio bufs for this ctxt (all procs, if shared) */
+ u32 piocnt;
+ /* first pio buffer for this ctxt */
+ u32 pio_base;
+ /* chip offset of PIO buffers for this ctxt */
+ u32 piobufs;
+ /* per-context configuration flags */
+ u16 flags;
+ /* per-context event flags for fileops/intr communication */
+ unsigned long event_flags;
+ /* WAIT_RCV that timed out, no interrupt */
+ u32 rcvwait_to;
+ /* WAIT_PIO that timed out, no interrupt */
+ u32 piowait_to;
+ /* WAIT_RCV already happened, no wait */
+ u32 rcvnowait;
+ /* WAIT_PIO already happened, no wait */
+ u32 pionowait;
+ /* total number of polled urgent packets */
+ u32 urgent;
+ /* saved total number of polled urgent packets for poll edge trigger */
+ u32 urgent_poll;
+ /* pid of process using this ctxt */
+ pid_t pid;
+ pid_t subpid[HFI1_MAX_SHARED_CTXTS];
+ /* same size as task_struct .comm[], command that opened context */
+ char comm[16];
+ /* so file ops can get at unit */
+ struct hfi1_devdata *dd;
+ /* so functions that need physical port can get it easily */
+ struct hfi1_pportdata *ppd;
+ /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
+ void *subctxt_uregbase;
+ /* An array of pages for the eager receive buffers * N */
+ void *subctxt_rcvegrbuf;
+ /* An array of pages for the eager header queue entries * N */
+ void *subctxt_rcvhdr_base;
+ /* The version of the library which opened this ctxt */
+ u32 userversion;
+ /* Bitmask of active slaves */
+ u32 active_slaves;
+ /* Type of packets or conditions we want to poll for */
+ u16 poll_type;
+ /* receive packet sequence counter */
+ u8 seq_cnt;
+ u8 redirect_seq_cnt;
+ /* ctxt rcvhdrq head offset */
+ u32 head;
+ u32 pkt_count;
+ /* QPs waiting for context processing */
+ struct list_head qp_wait_list;
+ /* interrupt handling */
+ u64 imask; /* clear interrupt mask */
+ int ireg; /* clear interrupt register */
+ unsigned numa_id; /* numa node of this context */
+ /* verbs stats per CTX */
+ struct hfi1_opcode_stats_perctx *opstats;
+ /*
+ * This is the kernel thread that will keep making
+ * progress on the user sdma requests behind the scenes.
+ * There is one per context (shared contexts use the master's).
+ */
+ struct task_struct *progress;
+ struct list_head sdma_queues;
+ spinlock_t sdma_qlock;
+
+#ifdef CONFIG_PRESCAN_RXQ
+ struct ps_state ps_state;
+#endif /* CONFIG_PRESCAN_RXQ */
+
+ /*
+ * The interrupt handler for a particular receive context can vary
+ * throughout it's lifetime. This is not a lock protected data member so
+ * it must be updated atomically and the prev and new value must always
+ * be valid. Worst case is we process an extra interrupt and up to 64
+ * packets with the wrong interrupt handler.
+ */
+ void (*do_interrupt)(struct hfi1_ctxtdata *rcd);
+};
+
+/*
+ * Represents a single packet at a high level. Put commonly computed things in
+ * here so we do not have to keep doing them over and over. The rule of thumb is
+ * if something is used one time to derive some value, store that something in
+ * here. If it is used multiple times, then store the result of that derivation
+ * in here.
+ */
+struct hfi1_packet {
+ void *ebuf;
+ void *hdr;
+ struct hfi1_ctxtdata *rcd;
+ __le32 *rhf_addr;
+ struct hfi1_qp *qp;
+ struct hfi1_other_headers *ohdr;
+ u64 rhf;
+ u32 maxcnt;
+ u32 rhqoff;
+ u32 hdrqtail;
+ int numpkt;
+ u16 tlen;
+ u16 hlen;
+ s16 etail;
+ u16 rsize;
+ u8 updegr;
+ u8 rcv_flags;
+ u8 etype;
+};
+
+static inline bool has_sc4_bit(struct hfi1_packet *p)
+{
+ return !!rhf_dc_info(p->rhf);
+}
+
+/*
+ * Private data for snoop/capture support.
+ */
+struct hfi1_snoop_data {
+ int mode_flag;
+ struct cdev cdev;
+ struct device *class_dev;
+ spinlock_t snoop_lock;
+ struct list_head queue;
+ wait_queue_head_t waitq;
+ void *filter_value;
+ int (*filter_callback)(void *hdr, void *data, void *value);
+ u64 dcc_cfg; /* saved value of DCC Cfg register */
+};
+
+/* snoop mode_flag values */
+#define HFI1_PORT_SNOOP_MODE 1U
+#define HFI1_PORT_CAPTURE_MODE 2U
+
+struct hfi1_sge_state;
+
+/*
+ * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
+ * Mostly for MADs that set or query link parameters, also ipath
+ * config interfaces
+ */
+#define HFI1_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */
+#define HFI1_IB_CFG_LWID_DG_ENB 1 /* allowed Link-width downgrade */
+#define HFI1_IB_CFG_LWID_ENB 2 /* allowed Link-width */
+#define HFI1_IB_CFG_LWID 3 /* currently active Link-width */
+#define HFI1_IB_CFG_SPD_ENB 4 /* allowed Link speeds */
+#define HFI1_IB_CFG_SPD 5 /* current Link spd */
+#define HFI1_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
+#define HFI1_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
+#define HFI1_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */
+#define HFI1_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */
+#define HFI1_IB_CFG_OP_VLS 10 /* operational VLs */
+#define HFI1_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */
+#define HFI1_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */
+#define HFI1_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */
+#define HFI1_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */
+#define HFI1_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */
+#define HFI1_IB_CFG_PKEYS 16 /* update partition keys */
+#define HFI1_IB_CFG_MTU 17 /* update MTU in IBC */
+#define HFI1_IB_CFG_VL_HIGH_LIMIT 19
+#define HFI1_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */
+#define HFI1_IB_CFG_PORT 21 /* switch port we are connected to */
+
+/*
+ * HFI or Host Link States
+ *
+ * These describe the states the driver thinks the logical and physical
+ * states are in. Used as an argument to set_link_state(). Implemented
+ * as bits for easy multi-state checking. The actual state can only be
+ * one.
+ */
+#define __HLS_UP_INIT_BP 0
+#define __HLS_UP_ARMED_BP 1
+#define __HLS_UP_ACTIVE_BP 2
+#define __HLS_DN_DOWNDEF_BP 3 /* link down default */
+#define __HLS_DN_POLL_BP 4
+#define __HLS_DN_DISABLE_BP 5
+#define __HLS_DN_OFFLINE_BP 6
+#define __HLS_VERIFY_CAP_BP 7
+#define __HLS_GOING_UP_BP 8
+#define __HLS_GOING_OFFLINE_BP 9
+#define __HLS_LINK_COOLDOWN_BP 10
+
+#define HLS_UP_INIT (1 << __HLS_UP_INIT_BP)
+#define HLS_UP_ARMED (1 << __HLS_UP_ARMED_BP)
+#define HLS_UP_ACTIVE (1 << __HLS_UP_ACTIVE_BP)
+#define HLS_DN_DOWNDEF (1 << __HLS_DN_DOWNDEF_BP) /* link down default */
+#define HLS_DN_POLL (1 << __HLS_DN_POLL_BP)
+#define HLS_DN_DISABLE (1 << __HLS_DN_DISABLE_BP)
+#define HLS_DN_OFFLINE (1 << __HLS_DN_OFFLINE_BP)
+#define HLS_VERIFY_CAP (1 << __HLS_VERIFY_CAP_BP)
+#define HLS_GOING_UP (1 << __HLS_GOING_UP_BP)
+#define HLS_GOING_OFFLINE (1 << __HLS_GOING_OFFLINE_BP)
+#define HLS_LINK_COOLDOWN (1 << __HLS_LINK_COOLDOWN_BP)
+
+#define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE)
+
+/* use this MTU size if none other is given */
+#define HFI1_DEFAULT_ACTIVE_MTU 8192
+/* use this MTU size as the default maximum */
+#define HFI1_DEFAULT_MAX_MTU 8192
+/* default partition key */
+#define DEFAULT_PKEY 0xffff
+
+/*
+ * Possible fabric manager config parameters for fm_{get,set}_table()
+ */
+#define FM_TBL_VL_HIGH_ARB 1 /* Get/set VL high prio weights */
+#define FM_TBL_VL_LOW_ARB 2 /* Get/set VL low prio weights */
+#define FM_TBL_BUFFER_CONTROL 3 /* Get/set Buffer Control */
+#define FM_TBL_SC2VLNT 4 /* Get/set SC->VLnt */
+#define FM_TBL_VL_PREEMPT_ELEMS 5 /* Get (no set) VL preempt elems */
+#define FM_TBL_VL_PREEMPT_MATRIX 6 /* Get (no set) VL preempt matrix */
+
+/*
+ * Possible "operations" for f_rcvctrl(ppd, op, ctxt)
+ * these are bits so they can be combined, e.g.
+ * HFI1_RCVCTRL_INTRAVAIL_ENB | HFI1_RCVCTRL_CTXT_ENB
+ */
+#define HFI1_RCVCTRL_TAILUPD_ENB 0x01
+#define HFI1_RCVCTRL_TAILUPD_DIS 0x02
+#define HFI1_RCVCTRL_CTXT_ENB 0x04
+#define HFI1_RCVCTRL_CTXT_DIS 0x08
+#define HFI1_RCVCTRL_INTRAVAIL_ENB 0x10
+#define HFI1_RCVCTRL_INTRAVAIL_DIS 0x20
+#define HFI1_RCVCTRL_PKEY_ENB 0x40 /* Note, default is enabled */
+#define HFI1_RCVCTRL_PKEY_DIS 0x80
+#define HFI1_RCVCTRL_TIDFLOW_ENB 0x0400
+#define HFI1_RCVCTRL_TIDFLOW_DIS 0x0800
+#define HFI1_RCVCTRL_ONE_PKT_EGR_ENB 0x1000
+#define HFI1_RCVCTRL_ONE_PKT_EGR_DIS 0x2000
+#define HFI1_RCVCTRL_NO_RHQ_DROP_ENB 0x4000
+#define HFI1_RCVCTRL_NO_RHQ_DROP_DIS 0x8000
+#define HFI1_RCVCTRL_NO_EGR_DROP_ENB 0x10000
+#define HFI1_RCVCTRL_NO_EGR_DROP_DIS 0x20000
+
+/* partition enforcement flags */
+#define HFI1_PART_ENFORCE_IN 0x1
+#define HFI1_PART_ENFORCE_OUT 0x2
+
+/* how often we check for synthetic counter wrap around */
+#define SYNTH_CNT_TIME 2
+
+/* Counter flags */
+#define CNTR_NORMAL 0x0 /* Normal counters, just read register */
+#define CNTR_SYNTH 0x1 /* Synthetic counters, saturate at all 1s */
+#define CNTR_DISABLED 0x2 /* Disable this counter */
+#define CNTR_32BIT 0x4 /* Simulate 64 bits for this counter */
+#define CNTR_VL 0x8 /* Per VL counter */
+#define CNTR_INVALID_VL -1 /* Specifies invalid VL */
+#define CNTR_MODE_W 0x0
+#define CNTR_MODE_R 0x1
+
+/* VLs Supported/Operational */
+#define HFI1_MIN_VLS_SUPPORTED 1
+#define HFI1_MAX_VLS_SUPPORTED 8
+
+static inline void incr_cntr64(u64 *cntr)
+{
+ if (*cntr < (u64)-1LL)
+ (*cntr)++;
+}
+
+static inline void incr_cntr32(u32 *cntr)
+{
+ if (*cntr < (u32)-1LL)
+ (*cntr)++;
+}
+
+#define MAX_NAME_SIZE 64
+struct hfi1_msix_entry {
+ struct msix_entry msix;
+ void *arg;
+ char name[MAX_NAME_SIZE];
+ cpumask_var_t mask;
+};
+
+/* per-SL CCA information */
+struct cca_timer {
+ struct hrtimer hrtimer;
+ struct hfi1_pportdata *ppd; /* read-only */
+ int sl; /* read-only */
+ u16 ccti; /* read/write - current value of CCTI */
+};
+
+struct link_down_reason {
+ /*
+ * SMA-facing value. Should be set from .latest when
+ * HLS_UP_* -> HLS_DN_* transition actually occurs.
+ */
+ u8 sma;
+ u8 latest;
+};
+
+enum {
+ LO_PRIO_TABLE,
+ HI_PRIO_TABLE,
+ MAX_PRIO_TABLE
+};
+
+struct vl_arb_cache {
+ spinlock_t lock;
+ struct ib_vl_weight_elem table[VL_ARB_TABLE_SIZE];
+};
+
+/*
+ * The structure below encapsulates data relevant to a physical IB Port.
+ * Current chips support only one such port, but the separation
+ * clarifies things a bit. Note that to conform to IB conventions,
+ * port-numbers are one-based. The first or only port is port1.
+ */
+struct hfi1_pportdata {
+ struct hfi1_ibport ibport_data;
+
+ struct hfi1_devdata *dd;
+ struct kobject pport_cc_kobj;
+ struct kobject sc2vl_kobj;
+ struct kobject sl2sc_kobj;
+ struct kobject vl2mtu_kobj;
+
+ /* QSFP support */
+ struct qsfp_data qsfp_info;
+
+ /* GUID for this interface, in host order */
+ u64 guid;
+ /* GUID for peer interface, in host order */
+ u64 neighbor_guid;
+
+ /* up or down physical link state */
+ u32 linkup;
+
+ /*
+ * this address is mapped read-only into user processes so they can
+ * get status cheaply, whenever they want. One qword of status per port
+ */
+ u64 *statusp;
+
+ /* SendDMA related entries */
+
+ struct workqueue_struct *hfi1_wq;
+
+ /* move out of interrupt context */
+ struct work_struct link_vc_work;
+ struct work_struct link_up_work;
+ struct work_struct link_down_work;
+ struct work_struct sma_message_work;
+ struct work_struct freeze_work;
+ struct work_struct link_downgrade_work;
+ struct work_struct link_bounce_work;
+ /* host link state variables */
+ struct mutex hls_lock;
+ u32 host_link_state;
+
+ spinlock_t sdma_alllock ____cacheline_aligned_in_smp;
+
+ u32 lstate; /* logical link state */
+
+ /* these are the "32 bit" regs */
+
+ u32 ibmtu; /* The MTU programmed for this unit */
+ /*
+ * Current max size IB packet (in bytes) including IB headers, that
+ * we can send. Changes when ibmtu changes.
+ */
+ u32 ibmaxlen;
+ u32 current_egress_rate; /* units [10^6 bits/sec] */
+ /* LID programmed for this instance */
+ u16 lid;
+ /* list of pkeys programmed; 0 if not set */
+ u16 pkeys[MAX_PKEY_VALUES];
+ u16 link_width_supported;
+ u16 link_width_downgrade_supported;
+ u16 link_speed_supported;
+ u16 link_width_enabled;
+ u16 link_width_downgrade_enabled;
+ u16 link_speed_enabled;
+ u16 link_width_active;
+ u16 link_width_downgrade_tx_active;
+ u16 link_width_downgrade_rx_active;
+ u16 link_speed_active;
+ u8 vls_supported;
+ u8 vls_operational;
+ /* LID mask control */
+ u8 lmc;
+ /* Rx Polarity inversion (compensate for ~tx on partner) */
+ u8 rx_pol_inv;
+
+ u8 hw_pidx; /* physical port index */
+ u8 port; /* IB port number and index into dd->pports - 1 */
+ /* type of neighbor node */
+ u8 neighbor_type;
+ u8 neighbor_normal;
+ u8 neighbor_fm_security; /* 1 if firmware checking is disabled */
+ u8 neighbor_port_number;
+ u8 is_sm_config_started;
+ u8 offline_disabled_reason;
+ u8 is_active_optimize_enabled;
+ u8 driver_link_ready; /* driver ready for active link */
+ u8 link_enabled; /* link enabled? */
+ u8 linkinit_reason;
+ u8 local_tx_rate; /* rate given to 8051 firmware */
+
+ /* placeholders for IB MAD packet settings */
+ u8 overrun_threshold;
+ u8 phy_error_threshold;
+
+ /* used to override LED behavior */
+ u8 led_override; /* Substituted for normal value, if non-zero */
+ u16 led_override_timeoff; /* delta to next timer event */
+ u8 led_override_vals[2]; /* Alternates per blink-frame */
+ u8 led_override_phase; /* Just counts, LSB picks from vals[] */
+ atomic_t led_override_timer_active;
+ /* Used to flash LEDs in override mode */
+ struct timer_list led_override_timer;
+ u32 sm_trap_qp;
+ u32 sa_qp;
+
+ /*
+ * cca_timer_lock protects access to the per-SL cca_timer
+ * structures (specifically the ccti member).
+ */
+ spinlock_t cca_timer_lock ____cacheline_aligned_in_smp;
+ struct cca_timer cca_timer[OPA_MAX_SLS];
+
+ /* List of congestion control table entries */
+ struct ib_cc_table_entry_shadow ccti_entries[CC_TABLE_SHADOW_MAX];
+
+ /* congestion entries, each entry corresponding to a SL */
+ struct opa_congestion_setting_entry_shadow
+ congestion_entries[OPA_MAX_SLS];
+
+ /*
+ * cc_state_lock protects (write) access to the per-port
+ * struct cc_state.
+ */
+ spinlock_t cc_state_lock ____cacheline_aligned_in_smp;
+
+ struct cc_state __rcu *cc_state;
+
+ /* Total number of congestion control table entries */
+ u16 total_cct_entry;
+
+ /* Bit map identifying service level */
+ u32 cc_sl_control_map;
+
+ /* CA's max number of 64 entry units in the congestion control table */
+ u8 cc_max_table_entries;
+
+ /* begin congestion log related entries
+ * cc_log_lock protects all congestion log related data */
+ spinlock_t cc_log_lock ____cacheline_aligned_in_smp;
+ u8 threshold_cong_event_map[OPA_MAX_SLS/8];
+ u16 threshold_event_counter;
+ struct opa_hfi1_cong_log_event_internal cc_events[OPA_CONG_LOG_ELEMS];
+ int cc_log_idx; /* index for logging events */
+ int cc_mad_idx; /* index for reporting events */
+ /* end congestion log related entries */
+
+ struct vl_arb_cache vl_arb_cache[MAX_PRIO_TABLE];
+
+ /* port relative counter buffer */
+ u64 *cntrs;
+ /* port relative synthetic counter buffer */
+ u64 *scntrs;
+ /* we synthesize port_xmit_discards from several egress errors */
+ u64 port_xmit_discards;
+ u64 port_xmit_constraint_errors;
+ u64 port_rcv_constraint_errors;
+ /* count of 'link_err' interrupts from DC */
+ u64 link_downed;
+ /* number of times link retrained successfully */
+ u64 link_up;
+ /* port_ltp_crc_mode is returned in 'portinfo' MADs */
+ u16 port_ltp_crc_mode;
+ /* port_crc_mode_enabled is the crc we support */
+ u8 port_crc_mode_enabled;
+ /* mgmt_allowed is also returned in 'portinfo' MADs */
+ u8 mgmt_allowed;
+ u8 part_enforce; /* partition enforcement flags */
+ struct link_down_reason local_link_down_reason;
+ struct link_down_reason neigh_link_down_reason;
+ /* Value to be sent to link peer on LinkDown .*/
+ u8 remote_link_down_reason;
+ /* Error events that will cause a port bounce. */
+ u32 port_error_action;
+};
+
+typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
+
+typedef void (*opcode_handler)(struct hfi1_packet *packet);
+
+/* return values for the RHF receive functions */
+#define RHF_RCV_CONTINUE 0 /* keep going */
+#define RHF_RCV_DONE 1 /* stop, this packet processed */
+#define RHF_RCV_REPROCESS 2 /* stop. retain this packet */
+
+struct rcv_array_data {
+ u8 group_size;
+ u16 ngroups;
+ u16 nctxt_extra;
+};
+
+struct per_vl_data {
+ u16 mtu;
+ struct send_context *sc;
+};
+
+/* 16 to directly index */
+#define PER_VL_SEND_CONTEXTS 16
+
+struct err_info_rcvport {
+ u8 status_and_code;
+ u64 packet_flit1;
+ u64 packet_flit2;
+};
+
+struct err_info_constraint {
+ u8 status;
+ u16 pkey;
+ u32 slid;
+};
+
+struct hfi1_temp {
+ unsigned int curr; /* current temperature */
+ unsigned int lo_lim; /* low temperature limit */
+ unsigned int hi_lim; /* high temperature limit */
+ unsigned int crit_lim; /* critical temperature limit */
+ u8 triggers; /* temperature triggers */
+};
+
+/* device data struct now contains only "general per-device" info.
+ * fields related to a physical IB port are in a hfi1_pportdata struct.
+ */
+struct sdma_engine;
+struct sdma_vl_map;
+
+#define BOARD_VERS_MAX 96 /* how long the version string can be */
+#define SERIAL_MAX 16 /* length of the serial number */
+
+struct hfi1_devdata {
+ struct hfi1_ibdev verbs_dev; /* must be first */
+ struct list_head list;
+ /* pointers to related structs for this device */
+ /* pci access data structure */
+ struct pci_dev *pcidev;
+ struct cdev user_cdev;
+ struct cdev diag_cdev;
+ struct cdev ui_cdev;
+ struct device *user_device;
+ struct device *diag_device;
+ struct device *ui_device;
+
+ /* mem-mapped pointer to base of chip regs */
+ u8 __iomem *kregbase;
+ /* end of mem-mapped chip space excluding sendbuf and user regs */
+ u8 __iomem *kregend;
+ /* physical address of chip for io_remap, etc. */
+ resource_size_t physaddr;
+ /* receive context data */
+ struct hfi1_ctxtdata **rcd;
+ /* send context data */
+ struct send_context_info *send_contexts;
+ /* map hardware send contexts to software index */
+ u8 *hw_to_sw;
+ /* spinlock for allocating and releasing send context resources */
+ spinlock_t sc_lock;
+ /* Per VL data. Enough for all VLs but not all elements are set/used. */
+ struct per_vl_data vld[PER_VL_SEND_CONTEXTS];
+ /* seqlock for sc2vl */
+ seqlock_t sc2vl_lock;
+ u64 sc2vl[4];
+ /* Send Context initialization lock. */
+ spinlock_t sc_init_lock;
+
+ /* fields common to all SDMA engines */
+
+ /* default flags to last descriptor */
+ u64 default_desc1;
+ volatile __le64 *sdma_heads_dma; /* DMA'ed by chip */
+ dma_addr_t sdma_heads_phys;
+ void *sdma_pad_dma; /* DMA'ed by chip */
+ dma_addr_t sdma_pad_phys;
+ /* for deallocation */
+ size_t sdma_heads_size;
+ /* number from the chip */
+ u32 chip_sdma_engines;
+ /* num used */
+ u32 num_sdma;
+ /* lock for sdma_map */
+ spinlock_t sde_map_lock;
+ /* array of engines sized by num_sdma */
+ struct sdma_engine *per_sdma;
+ /* array of vl maps */
+ struct sdma_vl_map __rcu *sdma_map;
+ /* SPC freeze waitqueue and variable */
+ wait_queue_head_t sdma_unfreeze_wq;
+ atomic_t sdma_unfreeze_count;
+
+
+ /* hfi1_pportdata, points to array of (physical) port-specific
+ * data structs, indexed by pidx (0..n-1)
+ */
+ struct hfi1_pportdata *pport;
+
+ /* mem-mapped pointer to base of PIO buffers */
+ void __iomem *piobase;
+ /*
+ * write-combining mem-mapped pointer to base of RcvArray
+ * memory.
+ */
+ void __iomem *rcvarray_wc;
+ /*
+ * credit return base - a per-NUMA range of DMA address that
+ * the chip will use to update the per-context free counter
+ */
+ struct credit_return_base *cr_base;
+
+ /* send context numbers and sizes for each type */
+ struct sc_config_sizes sc_sizes[SC_MAX];
+
+ u32 lcb_access_count; /* count of LCB users */
+
+ char *boardname; /* human readable board info */
+
+ /* device (not port) flags, basically device capabilities */
+ u32 flags;
+
+ /* reset value */
+ u64 z_int_counter;
+ u64 z_rcv_limit;
+ /* percpu int_counter */
+ u64 __percpu *int_counter;
+ u64 __percpu *rcv_limit;
+
+ /* number of receive contexts in use by the driver */
+ u32 num_rcv_contexts;
+ /* number of pio send contexts in use by the driver */
+ u32 num_send_contexts;
+ /*
+ * number of ctxts available for PSM open
+ */
+ u32 freectxts;
+ /* base receive interrupt timeout, in CSR units */
+ u32 rcv_intr_timeout_csr;
+
+ u64 __iomem *egrtidbase;
+ spinlock_t sendctrl_lock; /* protect changes to SendCtrl */
+ spinlock_t rcvctrl_lock; /* protect changes to RcvCtrl */
+ /* around rcd and (user ctxts) ctxt_cnt use (intr vs free) */
+ spinlock_t uctxt_lock; /* rcd and user context changes */
+ /* exclusive access to 8051 */
+ spinlock_t dc8051_lock;
+ /* exclusive access to 8051 memory */
+ spinlock_t dc8051_memlock;
+ int dc8051_timed_out; /* remember if the 8051 timed out */
+ /*
+ * A page that will hold event notification bitmaps for all
+ * contexts. This page will be mapped into all processes.
+ */
+ unsigned long *events;
+ /*
+ * per unit status, see also portdata statusp
+ * mapped read-only into user processes so they can get unit and
+ * IB link status cheaply
+ */
+ struct hfi1_status *status;
+ u32 freezelen; /* max length of freezemsg */
+
+ /* revision register shadow */
+ u64 revision;
+ /* Base GUID for device (network order) */
+ u64 base_guid;
+
+ /* these are the "32 bit" regs */
+
+ /* value we put in kr_rcvhdrsize */
+ u32 rcvhdrsize;
+ /* number of receive contexts the chip supports */
+ u32 chip_rcv_contexts;
+ /* number of receive array entries */
+ u32 chip_rcv_array_count;
+ /* number of PIO send contexts the chip supports */
+ u32 chip_send_contexts;
+ /* number of bytes in the PIO memory buffer */
+ u32 chip_pio_mem_size;
+ /* number of bytes in the SDMA memory buffer */
+ u32 chip_sdma_mem_size;
+
+ /* size of each rcvegrbuffer */
+ u32 rcvegrbufsize;
+ /* log2 of above */
+ u16 rcvegrbufsize_shift;
+ /* both sides of the PCIe link are gen3 capable */
+ u8 link_gen3_capable;
+ /* localbus width (1, 2,4,8,16,32) from config space */
+ u32 lbus_width;
+ /* localbus speed in MHz */
+ u32 lbus_speed;
+ int unit; /* unit # of this chip */
+ int node; /* home node of this chip */
+
+ /* save these PCI fields to restore after a reset */
+ u32 pcibar0;
+ u32 pcibar1;
+ u32 pci_rom;
+ u16 pci_command;
+ u16 pcie_devctl;
+ u16 pcie_lnkctl;
+ u16 pcie_devctl2;
+ u32 pci_msix0;
+ u32 pci_lnkctl3;
+ u32 pci_tph2;
+
+ /*
+ * ASCII serial number, from flash, large enough for original
+ * all digit strings, and longer serial number format
+ */
+ u8 serial[SERIAL_MAX];
+ /* human readable board version */
+ u8 boardversion[BOARD_VERS_MAX];
+ u8 lbus_info[32]; /* human readable localbus info */
+ /* chip major rev, from CceRevision */
+ u8 majrev;
+ /* chip minor rev, from CceRevision */
+ u8 minrev;
+ /* hardware ID */
+ u8 hfi1_id;
+ /* implementation code */
+ u8 icode;
+ /* default link down value (poll/sleep) */
+ u8 link_default;
+ /* vAU of this device */
+ u8 vau;
+ /* vCU of this device */
+ u8 vcu;
+ /* link credits of this device */
+ u16 link_credits;
+ /* initial vl15 credits to use */
+ u16 vl15_init;
+
+ /* Misc small ints */
+ /* Number of physical ports available */
+ u8 num_pports;
+ /* Lowest context number which can be used by user processes */
+ u8 first_user_ctxt;
+ u8 n_krcv_queues;
+ u8 qos_shift;
+ u8 qpn_mask;
+
+ u16 rhf_offset; /* offset of RHF within receive header entry */
+ u16 irev; /* implementation revision */
+ u16 dc8051_ver; /* 8051 firmware version */
+
+ struct platform_config_cache pcfg_cache;
+ /* control high-level access to qsfp */
+ struct mutex qsfp_i2c_mutex;
+
+ struct diag_client *diag_client;
+ spinlock_t hfi1_diag_trans_lock; /* protect diag observer ops */
+
+ u8 psxmitwait_supported;
+ /* cycle length of PS* counters in HW (in picoseconds) */
+ u16 psxmitwait_check_rate;
+ /* high volume overflow errors deferred to tasklet */
+ struct tasklet_struct error_tasklet;
+ /* per device cq worker */
+ struct kthread_worker *worker;
+
+ /* MSI-X information */
+ struct hfi1_msix_entry *msix_entries;
+ u32 num_msix_entries;
+
+ /* INTx information */
+ u32 requested_intx_irq; /* did we request one? */
+ char intx_name[MAX_NAME_SIZE]; /* INTx name */
+
+ /* general interrupt: mask of handled interrupts */
+ u64 gi_mask[CCE_NUM_INT_CSRS];
+
+ struct rcv_array_data rcv_entries;
+
+ /*
+ * 64 bit synthetic counters
+ */
+ struct timer_list synth_stats_timer;
+
+ /*
+ * device counters
+ */
+ char *cntrnames;
+ size_t cntrnameslen;
+ size_t ndevcntrs;
+ u64 *cntrs;
+ u64 *scntrs;
+
+ /*
+ * remembered values for synthetic counters
+ */
+ u64 last_tx;
+ u64 last_rx;
+
+ /*
+ * per-port counters
+ */
+ size_t nportcntrs;
+ char *portcntrnames;
+ size_t portcntrnameslen;
+
+ struct hfi1_snoop_data hfi1_snoop;
+
+ struct err_info_rcvport err_info_rcvport;
+ struct err_info_constraint err_info_rcv_constraint;
+ struct err_info_constraint err_info_xmit_constraint;
+ u8 err_info_uncorrectable;
+ u8 err_info_fmconfig;
+
+ atomic_t drop_packet;
+ u8 do_drop;
+
+ /* receive interrupt functions */
+ rhf_rcv_function_ptr *rhf_rcv_function_map;
+ rhf_rcv_function_ptr normal_rhf_rcv_functions[8];
+
+ /*
+ * Handlers for outgoing data so that snoop/capture does not
+ * have to have its hooks in the send path
+ */
+ int (*process_pio_send)(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr,
+ u32 hdrwords, struct hfi1_sge_state *ss,
+ u32 len, u32 plen, u32 dwords, u64 pbc);
+ int (*process_dma_send)(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr,
+ u32 hdrwords, struct hfi1_sge_state *ss,
+ u32 len, u32 plen, u32 dwords, u64 pbc);
+ void (*pio_inline_send)(struct hfi1_devdata *dd, struct pio_buf *pbuf,
+ u64 pbc, const void *from, size_t count);
+
+ /* OUI comes from the HW. Used everywhere as 3 separate bytes. */
+ u8 oui1;
+ u8 oui2;
+ u8 oui3;
+ /* Timer and counter used to detect RcvBufOvflCnt changes */
+ struct timer_list rcverr_timer;
+ u32 rcv_ovfl_cnt;
+
+ int assigned_node_id;
+ wait_queue_head_t event_queue;
+
+ /* Save the enabled LCB error bits */
+ u64 lcb_err_en;
+ u8 dc_shutdown;
+};
+
+/* 8051 firmware version helper */
+#define dc8051_ver(a, b) ((a) << 8 | (b))
+
+/* f_put_tid types */
+#define PT_EXPECTED 0
+#define PT_EAGER 1
+#define PT_INVALID 2
+
+/* Private data for file operations */
+struct hfi1_filedata {
+ struct hfi1_ctxtdata *uctxt;
+ unsigned subctxt;
+ struct hfi1_user_sdma_comp_q *cq;
+ struct hfi1_user_sdma_pkt_q *pq;
+ /* for cpu affinity; -1 if none */
+ int rec_cpu_num;
+};
+
+extern struct list_head hfi1_dev_list;
+extern spinlock_t hfi1_devs_lock;
+struct hfi1_devdata *hfi1_lookup(int unit);
+extern u32 hfi1_cpulist_count;
+extern unsigned long *hfi1_cpulist;
+
+extern unsigned int snoop_drop_send;
+extern unsigned int snoop_force_capture;
+int hfi1_init(struct hfi1_devdata *, int);
+int hfi1_count_units(int *npresentp, int *nupp);
+int hfi1_count_active_units(void);
+
+int hfi1_diag_add(struct hfi1_devdata *);
+void hfi1_diag_remove(struct hfi1_devdata *);
+void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup);
+
+void handle_user_interrupt(struct hfi1_ctxtdata *rcd);
+
+int hfi1_create_rcvhdrq(struct hfi1_devdata *, struct hfi1_ctxtdata *);
+int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *);
+int hfi1_create_ctxts(struct hfi1_devdata *dd);
+struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *, u32);
+void hfi1_init_pportdata(struct pci_dev *, struct hfi1_pportdata *,
+ struct hfi1_devdata *, u8, u8);
+void hfi1_free_ctxtdata(struct hfi1_devdata *, struct hfi1_ctxtdata *);
+
+void handle_receive_interrupt(struct hfi1_ctxtdata *);
+void handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd);
+void handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd);
+int hfi1_reset_device(int);
+
+/* return the driver's idea of the logical OPA port state */
+static inline u32 driver_lstate(struct hfi1_pportdata *ppd)
+{
+ return ppd->lstate; /* use the cached value */
+}
+
+static inline u16 generate_jkey(kuid_t uid)
+{
+ return from_kuid(current_user_ns(), uid) & 0xffff;
+}
+
+/*
+ * active_egress_rate
+ *
+ * returns the active egress rate in units of [10^6 bits/sec]
+ */
+static inline u32 active_egress_rate(struct hfi1_pportdata *ppd)
+{
+ u16 link_speed = ppd->link_speed_active;
+ u16 link_width = ppd->link_width_active;
+ u32 egress_rate;
+
+ if (link_speed == OPA_LINK_SPEED_25G)
+ egress_rate = 25000;
+ else /* assume OPA_LINK_SPEED_12_5G */
+ egress_rate = 12500;
+
+ switch (link_width) {
+ case OPA_LINK_WIDTH_4X:
+ egress_rate *= 4;
+ break;
+ case OPA_LINK_WIDTH_3X:
+ egress_rate *= 3;
+ break;
+ case OPA_LINK_WIDTH_2X:
+ egress_rate *= 2;
+ break;
+ default:
+ /* assume IB_WIDTH_1X */
+ break;
+ }
+
+ return egress_rate;
+}
+
+/*
+ * egress_cycles
+ *
+ * Returns the number of 'fabric clock cycles' to egress a packet
+ * of length 'len' bytes, at 'rate' Mbit/s. Since the fabric clock
+ * rate is (approximately) 805 MHz, the units of the returned value
+ * are (1/805 MHz).
+ */
+static inline u32 egress_cycles(u32 len, u32 rate)
+{
+ u32 cycles;
+
+ /*
+ * cycles is:
+ *
+ * (length) [bits] / (rate) [bits/sec]
+ * ---------------------------------------------------
+ * fabric_clock_period == 1 /(805 * 10^6) [cycles/sec]
+ */
+
+ cycles = len * 8; /* bits */
+ cycles *= 805;
+ cycles /= rate;
+
+ return cycles;
+}
+
+void set_link_ipg(struct hfi1_pportdata *ppd);
+void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
+ u32 rqpn, u8 svc_type);
+void return_cnp(struct hfi1_ibport *ibp, struct hfi1_qp *qp, u32 remote_qpn,
+ u32 pkey, u32 slid, u32 dlid, u8 sc5,
+ const struct ib_grh *old_grh);
+
+#define PACKET_EGRESS_TIMEOUT 350
+static inline void pause_for_credit_return(struct hfi1_devdata *dd)
+{
+ /* Pause at least 1us, to ensure chip returns all credits */
+ u32 usec = cclock_to_ns(dd, PACKET_EGRESS_TIMEOUT) / 1000;
+
+ udelay(usec ? usec : 1);
+}
+
+/**
+ * sc_to_vlt() reverse lookup sc to vl
+ * @dd - devdata
+ * @sc5 - 5 bit sc
+ */
+static inline u8 sc_to_vlt(struct hfi1_devdata *dd, u8 sc5)
+{
+ unsigned seq;
+ u8 rval;
+
+ if (sc5 >= OPA_MAX_SCS)
+ return (u8)(0xff);
+
+ do {
+ seq = read_seqbegin(&dd->sc2vl_lock);
+ rval = *(((u8 *)dd->sc2vl) + sc5);
+ } while (read_seqretry(&dd->sc2vl_lock, seq));
+
+ return rval;
+}
+
+#define PKEY_MEMBER_MASK 0x8000
+#define PKEY_LOW_15_MASK 0x7fff
+
+/*
+ * ingress_pkey_matches_entry - return 1 if the pkey matches ent (ent
+ * being an entry from the ingress partition key table), return 0
+ * otherwise. Use the matching criteria for ingress partition keys
+ * specified in the OPAv1 spec., section 9.10.14.
+ */
+static inline int ingress_pkey_matches_entry(u16 pkey, u16 ent)
+{
+ u16 mkey = pkey & PKEY_LOW_15_MASK;
+ u16 ment = ent & PKEY_LOW_15_MASK;
+
+ if (mkey == ment) {
+ /*
+ * If pkey[15] is clear (limited partition member),
+ * is bit 15 in the corresponding table element
+ * clear (limited member)?
+ */
+ if (!(pkey & PKEY_MEMBER_MASK))
+ return !!(ent & PKEY_MEMBER_MASK);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * ingress_pkey_table_search - search the entire pkey table for
+ * an entry which matches 'pkey'. return 0 if a match is found,
+ * and 1 otherwise.
+ */
+static int ingress_pkey_table_search(struct hfi1_pportdata *ppd, u16 pkey)
+{
+ int i;
+
+ for (i = 0; i < MAX_PKEY_VALUES; i++) {
+ if (ingress_pkey_matches_entry(pkey, ppd->pkeys[i]))
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * ingress_pkey_table_fail - record a failure of ingress pkey validation,
+ * i.e., increment port_rcv_constraint_errors for the port, and record
+ * the 'error info' for this failure.
+ */
+static void ingress_pkey_table_fail(struct hfi1_pportdata *ppd, u16 pkey,
+ u16 slid)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+
+ incr_cntr64(&ppd->port_rcv_constraint_errors);
+ if (!(dd->err_info_rcv_constraint.status & OPA_EI_STATUS_SMASK)) {
+ dd->err_info_rcv_constraint.status |= OPA_EI_STATUS_SMASK;
+ dd->err_info_rcv_constraint.slid = slid;
+ dd->err_info_rcv_constraint.pkey = pkey;
+ }
+}
+
+/*
+ * ingress_pkey_check - Return 0 if the ingress pkey is valid, return 1
+ * otherwise. Use the criteria in the OPAv1 spec, section 9.10.14. idx
+ * is a hint as to the best place in the partition key table to begin
+ * searching. This function should not be called on the data path because
+ * of performance reasons. On datapath pkey check is expected to be done
+ * by HW and rcv_pkey_check function should be called instead.
+ */
+static inline int ingress_pkey_check(struct hfi1_pportdata *ppd, u16 pkey,
+ u8 sc5, u8 idx, u16 slid)
+{
+ if (!(ppd->part_enforce & HFI1_PART_ENFORCE_IN))
+ return 0;
+
+ /* If SC15, pkey[0:14] must be 0x7fff */
+ if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
+ goto bad;
+
+ /* Is the pkey = 0x0, or 0x8000? */
+ if ((pkey & PKEY_LOW_15_MASK) == 0)
+ goto bad;
+
+ /* The most likely matching pkey has index 'idx' */
+ if (ingress_pkey_matches_entry(pkey, ppd->pkeys[idx]))
+ return 0;
+
+ /* no match - try the whole table */
+ if (!ingress_pkey_table_search(ppd, pkey))
+ return 0;
+
+bad:
+ ingress_pkey_table_fail(ppd, pkey, slid);
+ return 1;
+}
+
+/*
+ * rcv_pkey_check - Return 0 if the ingress pkey is valid, return 1
+ * otherwise. It only ensures pkey is vlid for QP0. This function
+ * should be called on the data path instead of ingress_pkey_check
+ * as on data path, pkey check is done by HW (except for QP0).
+ */
+static inline int rcv_pkey_check(struct hfi1_pportdata *ppd, u16 pkey,
+ u8 sc5, u16 slid)
+{
+ if (!(ppd->part_enforce & HFI1_PART_ENFORCE_IN))
+ return 0;
+
+ /* If SC15, pkey[0:14] must be 0x7fff */
+ if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
+ goto bad;
+
+ return 0;
+bad:
+ ingress_pkey_table_fail(ppd, pkey, slid);
+ return 1;
+}
+
+/* MTU handling */
+
+/* MTU enumeration, 256-4k match IB */
+#define OPA_MTU_0 0
+#define OPA_MTU_256 1
+#define OPA_MTU_512 2
+#define OPA_MTU_1024 3
+#define OPA_MTU_2048 4
+#define OPA_MTU_4096 5
+
+u32 lrh_max_header_bytes(struct hfi1_devdata *dd);
+int mtu_to_enum(u32 mtu, int default_if_bad);
+u16 enum_to_mtu(int);
+static inline int valid_ib_mtu(unsigned int mtu)
+{
+ return mtu == 256 || mtu == 512 ||
+ mtu == 1024 || mtu == 2048 ||
+ mtu == 4096;
+}
+static inline int valid_opa_max_mtu(unsigned int mtu)
+{
+ return mtu >= 2048 &&
+ (valid_ib_mtu(mtu) || mtu == 8192 || mtu == 10240);
+}
+
+int set_mtu(struct hfi1_pportdata *);
+
+int hfi1_set_lid(struct hfi1_pportdata *, u32, u8);
+void hfi1_disable_after_error(struct hfi1_devdata *);
+int hfi1_set_uevent_bits(struct hfi1_pportdata *, const int);
+int hfi1_rcvbuf_validate(u32, u8, u16 *);
+
+int fm_get_table(struct hfi1_pportdata *, int, void *);
+int fm_set_table(struct hfi1_pportdata *, int, void *);
+
+void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf);
+void reset_link_credits(struct hfi1_devdata *dd);
+void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
+
+int snoop_recv_handler(struct hfi1_packet *packet);
+int snoop_send_dma_handler(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr,
+ u32 hdrwords, struct hfi1_sge_state *ss, u32 len,
+ u32 plen, u32 dwords, u64 pbc);
+int snoop_send_pio_handler(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr,
+ u32 hdrwords, struct hfi1_sge_state *ss, u32 len,
+ u32 plen, u32 dwords, u64 pbc);
+void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf,
+ u64 pbc, const void *from, size_t count);
+
+/* for use in system calls, where we want to know device type, etc. */
+#define ctxt_fp(fp) \
+ (((struct hfi1_filedata *)(fp)->private_data)->uctxt)
+#define subctxt_fp(fp) \
+ (((struct hfi1_filedata *)(fp)->private_data)->subctxt)
+#define tidcursor_fp(fp) \
+ (((struct hfi1_filedata *)(fp)->private_data)->tidcursor)
+#define user_sdma_pkt_fp(fp) \
+ (((struct hfi1_filedata *)(fp)->private_data)->pq)
+#define user_sdma_comp_fp(fp) \
+ (((struct hfi1_filedata *)(fp)->private_data)->cq)
+
+static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd)
+{
+ return ppd->dd;
+}
+
+static inline struct hfi1_devdata *dd_from_dev(struct hfi1_ibdev *dev)
+{
+ return container_of(dev, struct hfi1_devdata, verbs_dev);
+}
+
+static inline struct hfi1_devdata *dd_from_ibdev(struct ib_device *ibdev)
+{
+ return dd_from_dev(to_idev(ibdev));
+}
+
+static inline struct hfi1_pportdata *ppd_from_ibp(struct hfi1_ibport *ibp)
+{
+ return container_of(ibp, struct hfi1_pportdata, ibport_data);
+}
+
+static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u8 port)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+
+ WARN_ON(pidx >= dd->num_pports);
+ return &dd->pport[pidx].ibport_data;
+}
+
+/*
+ * Return the indexed PKEY from the port PKEY table.
+ */
+static inline u16 hfi1_get_pkey(struct hfi1_ibport *ibp, unsigned index)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u16 ret;
+
+ if (index >= ARRAY_SIZE(ppd->pkeys))
+ ret = 0;
+ else
+ ret = ppd->pkeys[index];
+
+ return ret;
+}
+
+/*
+ * Readers of cc_state must call get_cc_state() under rcu_read_lock().
+ * Writers of cc_state must call get_cc_state() under cc_state_lock.
+ */
+static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd)
+{
+ return rcu_dereference(ppd->cc_state);
+}
+
+/*
+ * values for dd->flags (_device_ related flags)
+ */
+#define HFI1_INITTED 0x1 /* chip and driver up and initted */
+#define HFI1_PRESENT 0x2 /* chip accesses can be done */
+#define HFI1_FROZEN 0x4 /* chip in SPC freeze */
+#define HFI1_HAS_SDMA_TIMEOUT 0x8
+#define HFI1_HAS_SEND_DMA 0x10 /* Supports Send DMA */
+#define HFI1_FORCED_FREEZE 0x80 /* driver forced freeze mode */
+#define HFI1_DO_INIT_ASIC 0x100 /* This device will init the ASIC */
+
+/* IB dword length mask in PBC (lower 11 bits); same for all chips */
+#define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1)
+
+
+/* ctxt_flag bit offsets */
+ /* context has been setup */
+#define HFI1_CTXT_SETUP_DONE 1
+ /* waiting for a packet to arrive */
+#define HFI1_CTXT_WAITING_RCV 2
+ /* master has not finished initializing */
+#define HFI1_CTXT_MASTER_UNINIT 4
+ /* waiting for an urgent packet to arrive */
+#define HFI1_CTXT_WAITING_URG 5
+
+/* free up any allocated data at closes */
+struct hfi1_devdata *hfi1_init_dd(struct pci_dev *,
+ const struct pci_device_id *);
+void hfi1_free_devdata(struct hfi1_devdata *);
+void cc_state_reclaim(struct rcu_head *rcu);
+struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra);
+
+/*
+ * Set LED override, only the two LSBs have "public" meaning, but
+ * any non-zero value substitutes them for the Link and LinkTrain
+ * LED states.
+ */
+#define HFI1_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
+#define HFI1_LED_LOG 2 /* Logical (link) YELLOW LED */
+void hfi1_set_led_override(struct hfi1_pportdata *ppd, unsigned int val);
+
+#define HFI1_CREDIT_RETURN_RATE (100)
+
+/*
+ * The number of words for the KDETH protocol field. If this is
+ * larger then the actual field used, then part of the payload
+ * will be in the header.
+ *
+ * Optimally, we want this sized so that a typical case will
+ * use full cache lines. The typical local KDETH header would
+ * be:
+ *
+ * Bytes Field
+ * 8 LRH
+ * 12 BHT
+ * ?? KDETH
+ * 8 RHF
+ * ---
+ * 28 + KDETH
+ *
+ * For a 64-byte cache line, KDETH would need to be 36 bytes or 9 DWORDS
+ */
+#define DEFAULT_RCVHDRSIZE 9
+
+/*
+ * Maximal header byte count:
+ *
+ * Bytes Field
+ * 8 LRH
+ * 40 GRH (optional)
+ * 12 BTH
+ * ?? KDETH
+ * 8 RHF
+ * ---
+ * 68 + KDETH
+ *
+ * We also want to maintain a cache line alignment to assist DMA'ing
+ * of the header bytes. Round up to a good size.
+ */
+#define DEFAULT_RCVHDR_ENTSIZE 32
+
+int hfi1_get_user_pages(unsigned long, size_t, struct page **);
+void hfi1_release_user_pages(struct page **, size_t);
+
+static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
+{
+ *((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL;
+}
+
+static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
+{
+ /*
+ * volatile because it's a DMA target from the chip, routine is
+ * inlined, and don't want register caching or reordering.
+ */
+ return (u32) le64_to_cpu(*rcd->rcvhdrtail_kvaddr);
+}
+
+/*
+ * sysfs interface.
+ */
+
+extern const char ib_hfi1_version[];
+
+int hfi1_device_create(struct hfi1_devdata *);
+void hfi1_device_remove(struct hfi1_devdata *);
+
+int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
+ struct kobject *kobj);
+int hfi1_verbs_register_sysfs(struct hfi1_devdata *);
+void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *);
+/* Hook for sysfs read of QSFP */
+int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len);
+
+int hfi1_pcie_init(struct pci_dev *, const struct pci_device_id *);
+void hfi1_pcie_cleanup(struct pci_dev *);
+int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *,
+ const struct pci_device_id *);
+void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
+void hfi1_pcie_flr(struct hfi1_devdata *);
+int pcie_speeds(struct hfi1_devdata *);
+void request_msix(struct hfi1_devdata *, u32 *, struct hfi1_msix_entry *);
+void hfi1_enable_intx(struct pci_dev *);
+void hfi1_nomsix(struct hfi1_devdata *);
+void restore_pci_variables(struct hfi1_devdata *dd);
+int do_pcie_gen3_transition(struct hfi1_devdata *dd);
+int parse_platform_config(struct hfi1_devdata *dd);
+int get_platform_config_field(struct hfi1_devdata *dd,
+ enum platform_config_table_type_encoding table_type,
+ int table_index, int field_index, u32 *data, u32 len);
+
+dma_addr_t hfi1_map_page(struct pci_dev *, struct page *, unsigned long,
+ size_t, int);
+const char *get_unit_name(int unit);
+
+/*
+ * Flush write combining store buffers (if present) and perform a write
+ * barrier.
+ */
+static inline void flush_wc(void)
+{
+ asm volatile("sfence" : : : "memory");
+}
+
+void handle_eflags(struct hfi1_packet *packet);
+int process_receive_ib(struct hfi1_packet *packet);
+int process_receive_bypass(struct hfi1_packet *packet);
+int process_receive_error(struct hfi1_packet *packet);
+int kdeth_process_expected(struct hfi1_packet *packet);
+int kdeth_process_eager(struct hfi1_packet *packet);
+int process_receive_invalid(struct hfi1_packet *packet);
+
+extern rhf_rcv_function_ptr snoop_rhf_rcv_functions[8];
+
+void update_sge(struct hfi1_sge_state *ss, u32 length);
+
+/* global module parameter variables */
+extern unsigned int hfi1_max_mtu;
+extern unsigned int hfi1_cu;
+extern unsigned int user_credit_return_threshold;
+extern uint num_rcv_contexts;
+extern unsigned n_krcvqs;
+extern u8 krcvqs[];
+extern int krcvqsset;
+extern uint kdeth_qp;
+extern uint loopback;
+extern uint quick_linkup;
+extern uint rcv_intr_timeout;
+extern uint rcv_intr_count;
+extern uint rcv_intr_dynamic;
+extern ushort link_crc_mask;
+
+extern struct mutex hfi1_mutex;
+
+/* Number of seconds before our card status check... */
+#define STATUS_TIMEOUT 60
+
+#define DRIVER_NAME "hfi1"
+#define HFI1_USER_MINOR_BASE 0
+#define HFI1_TRACE_MINOR 127
+#define HFI1_DIAGPKT_MINOR 128
+#define HFI1_DIAG_MINOR_BASE 129
+#define HFI1_SNOOP_CAPTURE_BASE 200
+#define HFI1_NMINORS 255
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+#define PCI_DEVICE_ID_INTEL0 0x24f0
+#define PCI_DEVICE_ID_INTEL1 0x24f1
+
+#define HFI1_PKT_USER_SC_INTEGRITY \
+ (SEND_CTXT_CHECK_ENABLE_DISALLOW_NON_KDETH_PACKETS_SMASK \
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_SMASK \
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_GRH_SMASK)
+
+#define HFI1_PKT_KERNEL_SC_INTEGRITY \
+ (SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK)
+
+static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
+ u16 ctxt_type)
+{
+ u64 base_sc_integrity =
+ SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_SMASK
+ | SEND_CTXT_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK
+ | SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
+ | SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK
+ | SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK
+ | SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
+ | SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK
+ | SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK;
+
+ if (ctxt_type == SC_USER)
+ base_sc_integrity |= HFI1_PKT_USER_SC_INTEGRITY;
+ else
+ base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY;
+
+ if (is_a0(dd))
+ /* turn off send-side job key checks - A0 erratum */
+ return base_sc_integrity &
+ ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+ return base_sc_integrity;
+}
+
+static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
+{
+ u64 base_sdma_integrity =
+ SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
+ | SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
+ | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
+ | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
+ | SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
+ | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK
+ | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK
+ | SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK
+ | SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_SMASK
+ | SEND_DMA_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK
+ | SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
+ | SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK
+ | SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK
+ | SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
+ | SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK
+ | SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK;
+
+ if (is_a0(dd))
+ /* turn off send-side job key checks - A0 erratum */
+ return base_sdma_integrity &
+ ~SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+ return base_sdma_integrity;
+}
+
+/*
+ * hfi1_early_err is used (only!) to print early errors before devdata is
+ * allocated, or when dd->pcidev may not be valid, and at the tail end of
+ * cleanup when devdata may have been freed, etc. hfi1_dev_porterr is
+ * the same as dd_dev_err, but is used when the message really needs
+ * the IB port# to be definitive as to what's happening..
+ */
+#define hfi1_early_err(dev, fmt, ...) \
+ dev_err(dev, fmt, ##__VA_ARGS__)
+
+#define hfi1_early_info(dev, fmt, ...) \
+ dev_info(dev, fmt, ##__VA_ARGS__)
+
+#define dd_dev_emerg(dd, fmt, ...) \
+ dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
+ get_unit_name((dd)->unit), ##__VA_ARGS__)
+#define dd_dev_err(dd, fmt, ...) \
+ dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
+ get_unit_name((dd)->unit), ##__VA_ARGS__)
+#define dd_dev_warn(dd, fmt, ...) \
+ dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
+ get_unit_name((dd)->unit), ##__VA_ARGS__)
+
+#define dd_dev_warn_ratelimited(dd, fmt, ...) \
+ dev_warn_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
+ get_unit_name((dd)->unit), ##__VA_ARGS__)
+
+#define dd_dev_info(dd, fmt, ...) \
+ dev_info(&(dd)->pcidev->dev, "%s: " fmt, \
+ get_unit_name((dd)->unit), ##__VA_ARGS__)
+
+#define hfi1_dev_porterr(dd, port, fmt, ...) \
+ dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
+ get_unit_name((dd)->unit), (dd)->unit, (port), \
+ ##__VA_ARGS__)
+
+/*
+ * this is used for formatting hw error messages...
+ */
+struct hfi1_hwerror_msgs {
+ u64 mask;
+ const char *msg;
+ size_t sz;
+};
+
+/* in intr.c... */
+void hfi1_format_hwerrors(u64 hwerrs,
+ const struct hfi1_hwerror_msgs *hwerrmsgs,
+ size_t nhwerrmsgs, char *msg, size_t lmsg);
+
+#define USER_OPCODE_CHECK_VAL 0xC0
+#define USER_OPCODE_CHECK_MASK 0xC0
+#define OPCODE_CHECK_VAL_DISABLED 0x0
+#define OPCODE_CHECK_MASK_DISABLED 0x0
+
+static inline void hfi1_reset_cpu_counters(struct hfi1_devdata *dd)
+{
+ struct hfi1_pportdata *ppd;
+ int i;
+
+ dd->z_int_counter = get_all_cpu_total(dd->int_counter);
+ dd->z_rcv_limit = get_all_cpu_total(dd->rcv_limit);
+
+ ppd = (struct hfi1_pportdata *)(dd + 1);
+ for (i = 0; i < dd->num_pports; i++, ppd++) {
+ ppd->ibport_data.z_rc_acks =
+ get_all_cpu_total(ppd->ibport_data.rc_acks);
+ ppd->ibport_data.z_rc_qacks =
+ get_all_cpu_total(ppd->ibport_data.rc_qacks);
+ }
+}
+
+/* Control LED state */
+static inline void setextled(struct hfi1_devdata *dd, u32 on)
+{
+ if (on)
+ write_csr(dd, DCC_CFG_LED_CNTRL, 0x1F);
+ else
+ write_csr(dd, DCC_CFG_LED_CNTRL, 0x10);
+}
+
+int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp);
+
+#endif /* _HFI1_KERNEL_H */
diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c
new file mode 100644
index 000000000000..a877eda8c13c
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/init.c
@@ -0,0 +1,1722 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/hrtimer.h>
+
+#include "hfi.h"
+#include "device.h"
+#include "common.h"
+#include "mad.h"
+#include "sdma.h"
+#include "debugfs.h"
+#include "verbs.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
+/*
+ * min buffers we want to have per context, after driver
+ */
+#define HFI1_MIN_USER_CTXT_BUFCNT 7
+
+#define HFI1_MIN_HDRQ_EGRBUF_CNT 2
+#define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
+#define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
+
+/*
+ * Number of user receive contexts we are configured to use (to allow for more
+ * pio buffers per ctxt, etc.) Zero means use one user context per CPU.
+ */
+uint num_rcv_contexts;
+module_param_named(num_rcv_contexts, num_rcv_contexts, uint, S_IRUGO);
+MODULE_PARM_DESC(
+ num_rcv_contexts, "Set max number of user receive contexts to use");
+
+u8 krcvqs[RXE_NUM_DATA_VL];
+int krcvqsset;
+module_param_array(krcvqs, byte, &krcvqsset, S_IRUGO);
+MODULE_PARM_DESC(krcvqs, "Array of the number of kernel receive queues by VL");
+
+/* computed based on above array */
+unsigned n_krcvqs;
+
+static unsigned hfi1_rcvarr_split = 25;
+module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers");
+
+static uint eager_buffer_size = (2 << 20); /* 2MB */
+module_param(eager_buffer_size, uint, S_IRUGO);
+MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 2MB");
+
+static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */
+module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)");
+
+static uint hfi1_hdrq_entsize = 32;
+module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, S_IRUGO);
+MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B (default), 32 - 128B");
+
+unsigned int user_credit_return_threshold = 33; /* default is 33% */
+module_param(user_credit_return_threshold, uint, S_IRUGO);
+MODULE_PARM_DESC(user_credit_return_theshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
+
+static inline u64 encode_rcv_header_entry_size(u16);
+
+static struct idr hfi1_unit_table;
+u32 hfi1_cpulist_count;
+unsigned long *hfi1_cpulist;
+
+/*
+ * Common code for creating the receive context array.
+ */
+int hfi1_create_ctxts(struct hfi1_devdata *dd)
+{
+ unsigned i;
+ int ret;
+ int local_node_id = pcibus_to_node(dd->pcidev->bus);
+
+ if (local_node_id < 0)
+ local_node_id = numa_node_id();
+ dd->assigned_node_id = local_node_id;
+
+ dd->rcd = kcalloc(dd->num_rcv_contexts, sizeof(*dd->rcd), GFP_KERNEL);
+ if (!dd->rcd) {
+ dd_dev_err(dd,
+ "Unable to allocate receive context array, failing\n");
+ goto nomem;
+ }
+
+ /* create one or more kernel contexts */
+ for (i = 0; i < dd->first_user_ctxt; ++i) {
+ struct hfi1_pportdata *ppd;
+ struct hfi1_ctxtdata *rcd;
+
+ ppd = dd->pport + (i % dd->num_pports);
+ rcd = hfi1_create_ctxtdata(ppd, i);
+ if (!rcd) {
+ dd_dev_err(dd,
+ "Unable to allocate kernel receive context, failing\n");
+ goto nomem;
+ }
+ /*
+ * Set up the kernel context flags here and now because they
+ * use default values for all receive side memories. User
+ * contexts will be handled as they are created.
+ */
+ rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
+ HFI1_CAP_KGET(NODROP_RHQ_FULL) |
+ HFI1_CAP_KGET(NODROP_EGR_FULL) |
+ HFI1_CAP_KGET(DMA_RTAIL);
+ rcd->seq_cnt = 1;
+
+ rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
+ if (!rcd->sc) {
+ dd_dev_err(dd,
+ "Unable to allocate kernel send context, failing\n");
+ dd->rcd[rcd->ctxt] = NULL;
+ hfi1_free_ctxtdata(dd, rcd);
+ goto nomem;
+ }
+
+ ret = hfi1_init_ctxt(rcd->sc);
+ if (ret < 0) {
+ dd_dev_err(dd,
+ "Failed to setup kernel receive context, failing\n");
+ sc_free(rcd->sc);
+ dd->rcd[rcd->ctxt] = NULL;
+ hfi1_free_ctxtdata(dd, rcd);
+ ret = -EFAULT;
+ goto bail;
+ }
+ }
+
+ return 0;
+nomem:
+ ret = -ENOMEM;
+bail:
+ kfree(dd->rcd);
+ dd->rcd = NULL;
+ return ret;
+}
+
+/*
+ * Common code for user and kernel context setup.
+ */
+struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ struct hfi1_ctxtdata *rcd;
+ unsigned kctxt_ngroups = 0;
+ u32 base;
+
+ if (dd->rcv_entries.nctxt_extra >
+ dd->num_rcv_contexts - dd->first_user_ctxt)
+ kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
+ (dd->num_rcv_contexts - dd->first_user_ctxt));
+ rcd = kzalloc(sizeof(*rcd), GFP_KERNEL);
+ if (rcd) {
+ u32 rcvtids, max_entries;
+
+ dd_dev_info(dd, "%s: setting up context %u\n", __func__, ctxt);
+
+ INIT_LIST_HEAD(&rcd->qp_wait_list);
+ rcd->ppd = ppd;
+ rcd->dd = dd;
+ rcd->cnt = 1;
+ rcd->ctxt = ctxt;
+ dd->rcd[ctxt] = rcd;
+ rcd->numa_id = numa_node_id();
+ rcd->rcv_array_groups = dd->rcv_entries.ngroups;
+
+ spin_lock_init(&rcd->exp_lock);
+
+ /*
+ * Calculate the context's RcvArray entry starting point.
+ * We do this here because we have to take into account all
+ * the RcvArray entries that previous context would have
+ * taken and we have to account for any extra groups
+ * assigned to the kernel or user contexts.
+ */
+ if (ctxt < dd->first_user_ctxt) {
+ if (ctxt < kctxt_ngroups) {
+ base = ctxt * (dd->rcv_entries.ngroups + 1);
+ rcd->rcv_array_groups++;
+ } else
+ base = kctxt_ngroups +
+ (ctxt * dd->rcv_entries.ngroups);
+ } else {
+ u16 ct = ctxt - dd->first_user_ctxt;
+
+ base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) +
+ kctxt_ngroups);
+ if (ct < dd->rcv_entries.nctxt_extra) {
+ base += ct * (dd->rcv_entries.ngroups + 1);
+ rcd->rcv_array_groups++;
+ } else
+ base += dd->rcv_entries.nctxt_extra +
+ (ct * dd->rcv_entries.ngroups);
+ }
+ rcd->eager_base = base * dd->rcv_entries.group_size;
+
+ /* Validate and initialize Rcv Hdr Q variables */
+ if (rcvhdrcnt % HDRQ_INCREMENT) {
+ dd_dev_err(dd,
+ "ctxt%u: header queue count %d must be divisible by %d\n",
+ rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT);
+ goto bail;
+ }
+ rcd->rcvhdrq_cnt = rcvhdrcnt;
+ rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
+ /*
+ * Simple Eager buffer allocation: we have already pre-allocated
+ * the number of RcvArray entry groups. Each ctxtdata structure
+ * holds the number of groups for that context.
+ *
+ * To follow CSR requirements and maintain cacheline alignment,
+ * make sure all sizes and bases are multiples of group_size.
+ *
+ * The expected entry count is what is left after assigning
+ * eager.
+ */
+ max_entries = rcd->rcv_array_groups *
+ dd->rcv_entries.group_size;
+ rcvtids = ((max_entries * hfi1_rcvarr_split) / 100);
+ rcd->egrbufs.count = round_down(rcvtids,
+ dd->rcv_entries.group_size);
+ if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) {
+ dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n",
+ rcd->ctxt);
+ rcd->egrbufs.count = MAX_EAGER_ENTRIES;
+ }
+ dd_dev_info(dd, "ctxt%u: max Eager buffer RcvArray entries: %u\n",
+ rcd->ctxt, rcd->egrbufs.count);
+
+ /*
+ * Allocate array that will hold the eager buffer accounting
+ * data.
+ * This will allocate the maximum possible buffer count based
+ * on the value of the RcvArray split parameter.
+ * The resulting value will be rounded down to the closest
+ * multiple of dd->rcv_entries.group_size.
+ */
+ rcd->egrbufs.buffers = kzalloc(sizeof(*rcd->egrbufs.buffers) *
+ rcd->egrbufs.count, GFP_KERNEL);
+ if (!rcd->egrbufs.buffers)
+ goto bail;
+ rcd->egrbufs.rcvtids = kzalloc(sizeof(*rcd->egrbufs.rcvtids) *
+ rcd->egrbufs.count, GFP_KERNEL);
+ if (!rcd->egrbufs.rcvtids)
+ goto bail;
+ rcd->egrbufs.size = eager_buffer_size;
+ /*
+ * The size of the buffers programmed into the RcvArray
+ * entries needs to be big enough to handle the highest
+ * MTU supported.
+ */
+ if (rcd->egrbufs.size < hfi1_max_mtu) {
+ rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
+ dd_dev_info(dd,
+ "ctxt%u: eager bufs size too small. Adjusting to %zu\n",
+ rcd->ctxt, rcd->egrbufs.size);
+ }
+ rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
+
+ if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */
+ rcd->opstats = kzalloc(sizeof(*rcd->opstats),
+ GFP_KERNEL);
+ if (!rcd->opstats) {
+ dd_dev_err(dd,
+ "ctxt%u: Unable to allocate per ctxt stats buffer\n",
+ rcd->ctxt);
+ goto bail;
+ }
+ }
+ }
+ return rcd;
+bail:
+ kfree(rcd->opstats);
+ kfree(rcd->egrbufs.rcvtids);
+ kfree(rcd->egrbufs.buffers);
+ kfree(rcd);
+ return NULL;
+}
+
+/*
+ * Convert a receive header entry size that to the encoding used in the CSR.
+ *
+ * Return a zero if the given size is invalid.
+ */
+static inline u64 encode_rcv_header_entry_size(u16 size)
+{
+ /* there are only 3 valid receive header entry sizes */
+ if (size == 2)
+ return 1;
+ if (size == 16)
+ return 2;
+ else if (size == 32)
+ return 4;
+ return 0; /* invalid */
+}
+
+/*
+ * Select the largest ccti value over all SLs to determine the intra-
+ * packet gap for the link.
+ *
+ * called with cca_timer_lock held (to protect access to cca_timer
+ * array), and rcu_read_lock() (to protect access to cc_state).
+ */
+void set_link_ipg(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ struct cc_state *cc_state;
+ int i;
+ u16 cce, ccti_limit, max_ccti = 0;
+ u16 shift, mult;
+ u64 src;
+ u32 current_egress_rate; /* Mbits /sec */
+ u32 max_pkt_time;
+ /*
+ * max_pkt_time is the maximum packet egress time in units
+ * of the fabric clock period 1/(805 MHz).
+ */
+
+ cc_state = get_cc_state(ppd);
+
+ if (cc_state == NULL)
+ /*
+ * This should _never_ happen - rcu_read_lock() is held,
+ * and set_link_ipg() should not be called if cc_state
+ * is NULL.
+ */
+ return;
+
+ for (i = 0; i < OPA_MAX_SLS; i++) {
+ u16 ccti = ppd->cca_timer[i].ccti;
+
+ if (ccti > max_ccti)
+ max_ccti = ccti;
+ }
+
+ ccti_limit = cc_state->cct.ccti_limit;
+ if (max_ccti > ccti_limit)
+ max_ccti = ccti_limit;
+
+ cce = cc_state->cct.entries[max_ccti].entry;
+ shift = (cce & 0xc000) >> 14;
+ mult = (cce & 0x3fff);
+
+ current_egress_rate = active_egress_rate(ppd);
+
+ max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate);
+
+ src = (max_pkt_time >> shift) * mult;
+
+ src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK;
+ src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT;
+
+ write_csr(dd, SEND_STATIC_RATE_CONTROL, src);
+}
+
+static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
+{
+ struct cca_timer *cca_timer;
+ struct hfi1_pportdata *ppd;
+ int sl;
+ u16 ccti, ccti_timer, ccti_min;
+ struct cc_state *cc_state;
+
+ cca_timer = container_of(t, struct cca_timer, hrtimer);
+ ppd = cca_timer->ppd;
+ sl = cca_timer->sl;
+
+ rcu_read_lock();
+
+ cc_state = get_cc_state(ppd);
+
+ if (cc_state == NULL) {
+ rcu_read_unlock();
+ return HRTIMER_NORESTART;
+ }
+
+ /*
+ * 1) decrement ccti for SL
+ * 2) calculate IPG for link (set_link_ipg())
+ * 3) restart timer, unless ccti is at min value
+ */
+
+ ccti_min = cc_state->cong_setting.entries[sl].ccti_min;
+ ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
+
+ spin_lock(&ppd->cca_timer_lock);
+
+ ccti = cca_timer->ccti;
+
+ if (ccti > ccti_min) {
+ cca_timer->ccti--;
+ set_link_ipg(ppd);
+ }
+
+ spin_unlock(&ppd->cca_timer_lock);
+
+ rcu_read_unlock();
+
+ if (ccti > ccti_min) {
+ unsigned long nsec = 1024 * ccti_timer;
+ /* ccti_timer is in units of 1.024 usec */
+ hrtimer_forward_now(t, ns_to_ktime(nsec));
+ return HRTIMER_RESTART;
+ }
+ return HRTIMER_NORESTART;
+}
+
+/*
+ * Common code for initializing the physical port structure.
+ */
+void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
+ struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
+{
+ int i, size;
+ uint default_pkey_idx;
+
+ ppd->dd = dd;
+ ppd->hw_pidx = hw_pidx;
+ ppd->port = port; /* IB port number, not index */
+
+ default_pkey_idx = 1;
+
+ ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
+ if (loopback) {
+ hfi1_early_err(&pdev->dev,
+ "Faking data partition 0x8001 in idx %u\n",
+ !default_pkey_idx);
+ ppd->pkeys[!default_pkey_idx] = 0x8001;
+ }
+
+ INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
+ INIT_WORK(&ppd->link_up_work, handle_link_up);
+ INIT_WORK(&ppd->link_down_work, handle_link_down);
+ INIT_WORK(&ppd->freeze_work, handle_freeze);
+ INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
+ INIT_WORK(&ppd->sma_message_work, handle_sma_message);
+ INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
+ mutex_init(&ppd->hls_lock);
+ spin_lock_init(&ppd->sdma_alllock);
+ spin_lock_init(&ppd->qsfp_info.qsfp_lock);
+
+ ppd->sm_trap_qp = 0x0;
+ ppd->sa_qp = 0x1;
+
+ ppd->hfi1_wq = NULL;
+
+ spin_lock_init(&ppd->cca_timer_lock);
+
+ for (i = 0; i < OPA_MAX_SLS; i++) {
+ hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ ppd->cca_timer[i].ppd = ppd;
+ ppd->cca_timer[i].sl = i;
+ ppd->cca_timer[i].ccti = 0;
+ ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
+ }
+
+ ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
+
+ spin_lock_init(&ppd->cc_state_lock);
+ spin_lock_init(&ppd->cc_log_lock);
+ size = sizeof(struct cc_state);
+ RCU_INIT_POINTER(ppd->cc_state, kzalloc(size, GFP_KERNEL));
+ if (!rcu_dereference(ppd->cc_state))
+ goto bail;
+ return;
+
+bail:
+
+ hfi1_early_err(&pdev->dev,
+ "Congestion Control Agent disabled for port %d\n", port);
+}
+
+/*
+ * Do initialization for device that is only needed on
+ * first detect, not on resets.
+ */
+static int loadtime_init(struct hfi1_devdata *dd)
+{
+ return 0;
+}
+
+/**
+ * init_after_reset - re-initialize after a reset
+ * @dd: the hfi1_ib device
+ *
+ * sanity check at least some of the values after reset, and
+ * ensure no receive or transmit (explicitly, in case reset
+ * failed
+ */
+static int init_after_reset(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /*
+ * Ensure chip does no sends or receives, tail updates, or
+ * pioavail updates while we re-initialize. This is mostly
+ * for the driver data structures, not chip registers.
+ */
+ for (i = 0; i < dd->num_rcv_contexts; i++)
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
+ HFI1_RCVCTRL_INTRAVAIL_DIS |
+ HFI1_RCVCTRL_TAILUPD_DIS, i);
+ pio_send_control(dd, PSC_GLOBAL_DISABLE);
+ for (i = 0; i < dd->num_send_contexts; i++)
+ sc_disable(dd->send_contexts[i].sc);
+
+ return 0;
+}
+
+static void enable_chip(struct hfi1_devdata *dd)
+{
+ u32 rcvmask;
+ u32 i;
+
+ /* enable PIO send */
+ pio_send_control(dd, PSC_GLOBAL_ENABLE);
+
+ /*
+ * Enable kernel ctxts' receive and receive interrupt.
+ * Other ctxts done as user opens and initializes them.
+ */
+ rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
+ for (i = 0; i < dd->first_user_ctxt; ++i) {
+ rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
+ HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
+ if (!HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, MULTI_PKT_EGR))
+ rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
+ if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_RHQ_FULL))
+ rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
+ if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_EGR_FULL))
+ rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
+ hfi1_rcvctrl(dd, rcvmask, i);
+ sc_enable(dd->rcd[i]->sc);
+ }
+}
+
+/**
+ * create_workqueues - create per port workqueues
+ * @dd: the hfi1_ib device
+ */
+static int create_workqueues(struct hfi1_devdata *dd)
+{
+ int pidx;
+ struct hfi1_pportdata *ppd;
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (!ppd->hfi1_wq) {
+ char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */
+
+ snprintf(wq_name, sizeof(wq_name), "hfi%d_%d",
+ dd->unit, pidx);
+ ppd->hfi1_wq =
+ create_singlethread_workqueue(wq_name);
+ if (!ppd->hfi1_wq)
+ goto wq_error;
+ }
+ }
+ return 0;
+wq_error:
+ pr_err("create_singlethread_workqueue failed for port %d\n",
+ pidx + 1);
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (ppd->hfi1_wq) {
+ destroy_workqueue(ppd->hfi1_wq);
+ ppd->hfi1_wq = NULL;
+ }
+ }
+ return -ENOMEM;
+}
+
+/**
+ * hfi1_init - do the actual initialization sequence on the chip
+ * @dd: the hfi1_ib device
+ * @reinit: re-initializing, so don't allocate new memory
+ *
+ * Do the actual initialization sequence on the chip. This is done
+ * both from the init routine called from the PCI infrastructure, and
+ * when we reset the chip, or detect that it was reset internally,
+ * or it's administratively re-enabled.
+ *
+ * Memory allocation here and in called routines is only done in
+ * the first case (reinit == 0). We have to be careful, because even
+ * without memory allocation, we need to re-write all the chip registers
+ * TIDs, etc. after the reset or enable has completed.
+ */
+int hfi1_init(struct hfi1_devdata *dd, int reinit)
+{
+ int ret = 0, pidx, lastfail = 0;
+ unsigned i, len;
+ struct hfi1_ctxtdata *rcd;
+ struct hfi1_pportdata *ppd;
+
+ /* Set up recv low level handlers */
+ dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EXPECTED] =
+ kdeth_process_expected;
+ dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EAGER] =
+ kdeth_process_eager;
+ dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_IB] = process_receive_ib;
+ dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_ERROR] =
+ process_receive_error;
+ dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_BYPASS] =
+ process_receive_bypass;
+ dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID5] =
+ process_receive_invalid;
+ dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID6] =
+ process_receive_invalid;
+ dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID7] =
+ process_receive_invalid;
+ dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions;
+
+ /* Set up send low level handlers */
+ dd->process_pio_send = hfi1_verbs_send_pio;
+ dd->process_dma_send = hfi1_verbs_send_dma;
+ dd->pio_inline_send = pio_copy;
+
+ if (is_a0(dd)) {
+ atomic_set(&dd->drop_packet, DROP_PACKET_ON);
+ dd->do_drop = 1;
+ } else {
+ atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
+ dd->do_drop = 0;
+ }
+
+ /* make sure the link is not "up" */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ ppd->linkup = 0;
+ }
+
+ if (reinit)
+ ret = init_after_reset(dd);
+ else
+ ret = loadtime_init(dd);
+ if (ret)
+ goto done;
+
+ /* dd->rcd can be NULL if early initialization failed */
+ for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
+ /*
+ * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
+ * re-init, the simplest way to handle this is to free
+ * existing, and re-allocate.
+ * Need to re-create rest of ctxt 0 ctxtdata as well.
+ */
+ rcd = dd->rcd[i];
+ if (!rcd)
+ continue;
+
+ rcd->do_interrupt = &handle_receive_interrupt;
+
+ lastfail = hfi1_create_rcvhdrq(dd, rcd);
+ if (!lastfail)
+ lastfail = hfi1_setup_eagerbufs(rcd);
+ if (lastfail)
+ dd_dev_err(dd,
+ "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
+ }
+ if (lastfail)
+ ret = lastfail;
+
+ /* Allocate enough memory for user event notification. */
+ len = ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS *
+ sizeof(*dd->events), PAGE_SIZE);
+ dd->events = vmalloc_user(len);
+ if (!dd->events)
+ dd_dev_err(dd, "Failed to allocate user events page\n");
+ /*
+ * Allocate a page for device and port status.
+ * Page will be shared amongst all user processes.
+ */
+ dd->status = vmalloc_user(PAGE_SIZE);
+ if (!dd->status)
+ dd_dev_err(dd, "Failed to allocate dev status page\n");
+ else
+ dd->freezelen = PAGE_SIZE - (sizeof(*dd->status) -
+ sizeof(dd->status->freezemsg));
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (dd->status)
+ /* Currently, we only have one port */
+ ppd->statusp = &dd->status->port;
+
+ set_mtu(ppd);
+ }
+
+ /* enable chip even if we have an error, so we can debug cause */
+ enable_chip(dd);
+
+ ret = hfi1_cq_init(dd);
+done:
+ /*
+ * Set status even if port serdes is not initialized
+ * so that diags will work.
+ */
+ if (dd->status)
+ dd->status->dev |= HFI1_STATUS_CHIP_PRESENT |
+ HFI1_STATUS_INITTED;
+ if (!ret) {
+ /* enable all interrupts from the chip */
+ set_intr_state(dd, 1);
+
+ /* chip is OK for user apps; mark it as initialized */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+ /* initialize the qsfp if it exists
+ * Requires interrupts to be enabled so we are notified
+ * when the QSFP completes reset, and has
+ * to be done before bringing up the SERDES
+ */
+ init_qsfp(ppd);
+
+ /* start the serdes - must be after interrupts are
+ enabled so we are notified when the link goes up */
+ lastfail = bringup_serdes(ppd);
+ if (lastfail)
+ dd_dev_info(dd,
+ "Failed to bring up port %u\n",
+ ppd->port);
+
+ /*
+ * Set status even if port serdes is not initialized
+ * so that diags will work.
+ */
+ if (ppd->statusp)
+ *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT |
+ HFI1_STATUS_INITTED;
+ if (!ppd->link_speed_enabled)
+ continue;
+ }
+ }
+
+ /* if ret is non-zero, we probably should do some cleanup here... */
+ return ret;
+}
+
+static inline struct hfi1_devdata *__hfi1_lookup(int unit)
+{
+ return idr_find(&hfi1_unit_table, unit);
+}
+
+struct hfi1_devdata *hfi1_lookup(int unit)
+{
+ struct hfi1_devdata *dd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hfi1_devs_lock, flags);
+ dd = __hfi1_lookup(unit);
+ spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+
+ return dd;
+}
+
+/*
+ * Stop the timers during unit shutdown, or after an error late
+ * in initialization.
+ */
+static void stop_timers(struct hfi1_devdata *dd)
+{
+ struct hfi1_pportdata *ppd;
+ int pidx;
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (ppd->led_override_timer.data) {
+ del_timer_sync(&ppd->led_override_timer);
+ atomic_set(&ppd->led_override_timer_active, 0);
+ }
+ }
+}
+
+/**
+ * shutdown_device - shut down a device
+ * @dd: the hfi1_ib device
+ *
+ * This is called to make the device quiet when we are about to
+ * unload the driver, and also when the device is administratively
+ * disabled. It does not free any data structures.
+ * Everything it does has to be setup again by hfi1_init(dd, 1)
+ */
+static void shutdown_device(struct hfi1_devdata *dd)
+{
+ struct hfi1_pportdata *ppd;
+ unsigned pidx;
+ int i;
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+ ppd->linkup = 0;
+ if (ppd->statusp)
+ *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
+ HFI1_STATUS_IB_READY);
+ }
+ dd->flags &= ~HFI1_INITTED;
+
+ /* mask interrupts, but not errors */
+ set_intr_state(dd, 0);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ for (i = 0; i < dd->num_rcv_contexts; i++)
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |
+ HFI1_RCVCTRL_CTXT_DIS |
+ HFI1_RCVCTRL_INTRAVAIL_DIS |
+ HFI1_RCVCTRL_PKEY_DIS |
+ HFI1_RCVCTRL_ONE_PKT_EGR_DIS, i);
+ /*
+ * Gracefully stop all sends allowing any in progress to
+ * trickle out first.
+ */
+ for (i = 0; i < dd->num_send_contexts; i++)
+ sc_flush(dd->send_contexts[i].sc);
+ }
+
+ /*
+ * Enough for anything that's going to trickle out to have actually
+ * done so.
+ */
+ udelay(20);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+ /* disable all contexts */
+ for (i = 0; i < dd->num_send_contexts; i++)
+ sc_disable(dd->send_contexts[i].sc);
+ /* disable the send device */
+ pio_send_control(dd, PSC_GLOBAL_DISABLE);
+
+ /*
+ * Clear SerdesEnable.
+ * We can't count on interrupts since we are stopping.
+ */
+ hfi1_quiet_serdes(ppd);
+
+ if (ppd->hfi1_wq) {
+ destroy_workqueue(ppd->hfi1_wq);
+ ppd->hfi1_wq = NULL;
+ }
+ }
+ sdma_exit(dd);
+}
+
+/**
+ * hfi1_free_ctxtdata - free a context's allocated data
+ * @dd: the hfi1_ib device
+ * @rcd: the ctxtdata structure
+ *
+ * free up any allocated data for a context
+ * This should not touch anything that would affect a simultaneous
+ * re-allocation of context data, because it is called after hfi1_mutex
+ * is released (and can be called from reinit as well).
+ * It should never change any chip state, or global driver state.
+ */
+void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
+{
+ unsigned e;
+
+ if (!rcd)
+ return;
+
+ if (rcd->rcvhdrq) {
+ dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
+ rcd->rcvhdrq, rcd->rcvhdrq_phys);
+ rcd->rcvhdrq = NULL;
+ if (rcd->rcvhdrtail_kvaddr) {
+ dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
+ (void *)rcd->rcvhdrtail_kvaddr,
+ rcd->rcvhdrqtailaddr_phys);
+ rcd->rcvhdrtail_kvaddr = NULL;
+ }
+ }
+
+ /* all the RcvArray entries should have been cleared by now */
+ kfree(rcd->egrbufs.rcvtids);
+
+ for (e = 0; e < rcd->egrbufs.alloced; e++) {
+ if (rcd->egrbufs.buffers[e].phys)
+ dma_free_coherent(&dd->pcidev->dev,
+ rcd->egrbufs.buffers[e].len,
+ rcd->egrbufs.buffers[e].addr,
+ rcd->egrbufs.buffers[e].phys);
+ }
+ kfree(rcd->egrbufs.buffers);
+
+ sc_free(rcd->sc);
+ vfree(rcd->physshadow);
+ vfree(rcd->tid_pg_list);
+ vfree(rcd->user_event_mask);
+ vfree(rcd->subctxt_uregbase);
+ vfree(rcd->subctxt_rcvegrbuf);
+ vfree(rcd->subctxt_rcvhdr_base);
+ kfree(rcd->tidusemap);
+ kfree(rcd->opstats);
+ kfree(rcd);
+}
+
+void hfi1_free_devdata(struct hfi1_devdata *dd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hfi1_devs_lock, flags);
+ idr_remove(&hfi1_unit_table, dd->unit);
+ list_del(&dd->list);
+ spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+ hfi1_dbg_ibdev_exit(&dd->verbs_dev);
+ rcu_barrier(); /* wait for rcu callbacks to complete */
+ free_percpu(dd->int_counter);
+ free_percpu(dd->rcv_limit);
+ ib_dealloc_device(&dd->verbs_dev.ibdev);
+}
+
+/*
+ * Allocate our primary per-unit data structure. Must be done via verbs
+ * allocator, because the verbs cleanup process both does cleanup and
+ * free of the data structure.
+ * "extra" is for chip-specific data.
+ *
+ * Use the idr mechanism to get a unit number for this unit.
+ */
+struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
+{
+ unsigned long flags;
+ struct hfi1_devdata *dd;
+ int ret;
+
+ dd = (struct hfi1_devdata *)ib_alloc_device(sizeof(*dd) + extra);
+ if (!dd)
+ return ERR_PTR(-ENOMEM);
+ /* extra is * number of ports */
+ dd->num_pports = extra / sizeof(struct hfi1_pportdata);
+ dd->pport = (struct hfi1_pportdata *)(dd + 1);
+
+ INIT_LIST_HEAD(&dd->list);
+ dd->node = dev_to_node(&pdev->dev);
+ if (dd->node < 0)
+ dd->node = 0;
+ idr_preload(GFP_KERNEL);
+ spin_lock_irqsave(&hfi1_devs_lock, flags);
+
+ ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT);
+ if (ret >= 0) {
+ dd->unit = ret;
+ list_add(&dd->list, &hfi1_dev_list);
+ }
+
+ spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+ idr_preload_end();
+
+ if (ret < 0) {
+ hfi1_early_err(&pdev->dev,
+ "Could not allocate unit ID: error %d\n", -ret);
+ goto bail;
+ }
+ /*
+ * Initialize all locks for the device. This needs to be as early as
+ * possible so locks are usable.
+ */
+ spin_lock_init(&dd->sc_lock);
+ spin_lock_init(&dd->sendctrl_lock);
+ spin_lock_init(&dd->rcvctrl_lock);
+ spin_lock_init(&dd->uctxt_lock);
+ spin_lock_init(&dd->hfi1_diag_trans_lock);
+ spin_lock_init(&dd->sc_init_lock);
+ spin_lock_init(&dd->dc8051_lock);
+ spin_lock_init(&dd->dc8051_memlock);
+ mutex_init(&dd->qsfp_i2c_mutex);
+ seqlock_init(&dd->sc2vl_lock);
+ spin_lock_init(&dd->sde_map_lock);
+ init_waitqueue_head(&dd->event_queue);
+
+ dd->int_counter = alloc_percpu(u64);
+ if (!dd->int_counter) {
+ ret = -ENOMEM;
+ hfi1_early_err(&pdev->dev,
+ "Could not allocate per-cpu int_counter\n");
+ goto bail;
+ }
+
+ dd->rcv_limit = alloc_percpu(u64);
+ if (!dd->rcv_limit) {
+ ret = -ENOMEM;
+ hfi1_early_err(&pdev->dev,
+ "Could not allocate per-cpu rcv_limit\n");
+ goto bail;
+ }
+
+ if (!hfi1_cpulist_count) {
+ u32 count = num_online_cpus();
+
+ hfi1_cpulist = kzalloc(BITS_TO_LONGS(count) *
+ sizeof(long), GFP_KERNEL);
+ if (hfi1_cpulist)
+ hfi1_cpulist_count = count;
+ else
+ hfi1_early_err(
+ &pdev->dev,
+ "Could not alloc cpulist info, cpu affinity might be wrong\n");
+ }
+ hfi1_dbg_ibdev_init(&dd->verbs_dev);
+ return dd;
+
+bail:
+ if (!list_empty(&dd->list))
+ list_del_init(&dd->list);
+ ib_dealloc_device(&dd->verbs_dev.ibdev);
+ return ERR_PTR(ret);
+}
+
+/*
+ * Called from freeze mode handlers, and from PCI error
+ * reporting code. Should be paranoid about state of
+ * system and data structures.
+ */
+void hfi1_disable_after_error(struct hfi1_devdata *dd)
+{
+ if (dd->flags & HFI1_INITTED) {
+ u32 pidx;
+
+ dd->flags &= ~HFI1_INITTED;
+ if (dd->pport)
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ struct hfi1_pportdata *ppd;
+
+ ppd = dd->pport + pidx;
+ if (dd->flags & HFI1_PRESENT)
+ set_link_state(ppd, HLS_DN_DISABLE);
+
+ if (ppd->statusp)
+ *ppd->statusp &= ~HFI1_STATUS_IB_READY;
+ }
+ }
+
+ /*
+ * Mark as having had an error for driver, and also
+ * for /sys and status word mapped to user programs.
+ * This marks unit as not usable, until reset.
+ */
+ if (dd->status)
+ dd->status->dev |= HFI1_STATUS_HWERROR;
+}
+
+static void remove_one(struct pci_dev *);
+static int init_one(struct pci_dev *, const struct pci_device_id *);
+
+#define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
+#define PFX DRIVER_NAME ": "
+
+static const struct pci_device_id hfi1_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl);
+
+static struct pci_driver hfi1_pci_driver = {
+ .name = DRIVER_NAME,
+ .probe = init_one,
+ .remove = remove_one,
+ .id_table = hfi1_pci_tbl,
+ .err_handler = &hfi1_pci_err_handler,
+};
+
+static void __init compute_krcvqs(void)
+{
+ int i;
+
+ for (i = 0; i < krcvqsset; i++)
+ n_krcvqs += krcvqs[i];
+}
+
+/*
+ * Do all the generic driver unit- and chip-independent memory
+ * allocation and initialization.
+ */
+static int __init hfi1_mod_init(void)
+{
+ int ret;
+
+ ret = dev_init();
+ if (ret)
+ goto bail;
+
+ /* validate max MTU before any devices start */
+ if (!valid_opa_max_mtu(hfi1_max_mtu)) {
+ pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
+ hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU);
+ hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
+ }
+ /* valid CUs run from 1-128 in powers of 2 */
+ if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu))
+ hfi1_cu = 1;
+ /* valid credit return threshold is 0-100, variable is unsigned */
+ if (user_credit_return_threshold > 100)
+ user_credit_return_threshold = 100;
+
+ compute_krcvqs();
+ /* sanitize receive interrupt count, time must wait until after
+ the hardware type is known */
+ if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK)
+ rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK;
+ /* reject invalid combinations */
+ if (rcv_intr_count == 0 && rcv_intr_timeout == 0) {
+ pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n");
+ rcv_intr_count = 1;
+ }
+ if (rcv_intr_count > 1 && rcv_intr_timeout == 0) {
+ /*
+ * Avoid indefinite packet delivery by requiring a timeout
+ * if count is > 1.
+ */
+ pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n");
+ rcv_intr_timeout = 1;
+ }
+ if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) {
+ /*
+ * The dynamic algorithm expects a non-zero timeout
+ * and a count > 1.
+ */
+ pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n");
+ rcv_intr_dynamic = 0;
+ }
+
+ /* sanitize link CRC options */
+ link_crc_mask &= SUPPORTED_CRCS;
+
+ /*
+ * These must be called before the driver is registered with
+ * the PCI subsystem.
+ */
+ idr_init(&hfi1_unit_table);
+
+ hfi1_dbg_init();
+ ret = pci_register_driver(&hfi1_pci_driver);
+ if (ret < 0) {
+ pr_err("Unable to register driver: error %d\n", -ret);
+ goto bail_dev;
+ }
+ goto bail; /* all OK */
+
+bail_dev:
+ hfi1_dbg_exit();
+ idr_destroy(&hfi1_unit_table);
+ dev_cleanup();
+bail:
+ return ret;
+}
+
+module_init(hfi1_mod_init);
+
+/*
+ * Do the non-unit driver cleanup, memory free, etc. at unload.
+ */
+static void __exit hfi1_mod_cleanup(void)
+{
+ pci_unregister_driver(&hfi1_pci_driver);
+ hfi1_dbg_exit();
+ hfi1_cpulist_count = 0;
+ kfree(hfi1_cpulist);
+
+ idr_destroy(&hfi1_unit_table);
+ dispose_firmware(); /* asymmetric with obtain_firmware() */
+ dev_cleanup();
+}
+
+module_exit(hfi1_mod_cleanup);
+
+/* this can only be called after a successful initialization */
+static void cleanup_device_data(struct hfi1_devdata *dd)
+{
+ int ctxt;
+ int pidx;
+ struct hfi1_ctxtdata **tmp;
+ unsigned long flags;
+
+ /* users can't do anything more with chip */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ struct hfi1_pportdata *ppd = &dd->pport[pidx];
+ struct cc_state *cc_state;
+ int i;
+
+ if (ppd->statusp)
+ *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT;
+
+ for (i = 0; i < OPA_MAX_SLS; i++)
+ hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
+
+ spin_lock(&ppd->cc_state_lock);
+ cc_state = get_cc_state(ppd);
+ rcu_assign_pointer(ppd->cc_state, NULL);
+ spin_unlock(&ppd->cc_state_lock);
+
+ if (cc_state)
+ call_rcu(&cc_state->rcu, cc_state_reclaim);
+ }
+
+ free_credit_return(dd);
+
+ /*
+ * Free any resources still in use (usually just kernel contexts)
+ * at unload; we do for ctxtcnt, because that's what we allocate.
+ * We acquire lock to be really paranoid that rcd isn't being
+ * accessed from some interrupt-related code (that should not happen,
+ * but best to be sure).
+ */
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ tmp = dd->rcd;
+ dd->rcd = NULL;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) {
+ struct hfi1_ctxtdata *rcd = tmp[ctxt];
+
+ tmp[ctxt] = NULL; /* debugging paranoia */
+ if (rcd) {
+ hfi1_clear_tids(rcd);
+ hfi1_free_ctxtdata(dd, rcd);
+ }
+ }
+ kfree(tmp);
+ /* must follow rcv context free - need to remove rcv's hooks */
+ for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
+ sc_free(dd->send_contexts[ctxt].sc);
+ dd->num_send_contexts = 0;
+ kfree(dd->send_contexts);
+ dd->send_contexts = NULL;
+ kfree(dd->boardname);
+ vfree(dd->events);
+ vfree(dd->status);
+ hfi1_cq_exit(dd);
+}
+
+/*
+ * Clean up on unit shutdown, or error during unit load after
+ * successful initialization.
+ */
+static void postinit_cleanup(struct hfi1_devdata *dd)
+{
+ hfi1_start_cleanup(dd);
+
+ hfi1_pcie_ddcleanup(dd);
+ hfi1_pcie_cleanup(dd->pcidev);
+
+ cleanup_device_data(dd);
+
+ hfi1_free_devdata(dd);
+}
+
+static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int ret = 0, j, pidx, initfail;
+ struct hfi1_devdata *dd = NULL;
+
+ /* First, lock the non-writable module parameters */
+ HFI1_CAP_LOCK();
+
+ /* Validate some global module parameters */
+ if (rcvhdrcnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
+ hfi1_early_err(&pdev->dev, "Header queue count too small\n");
+ ret = -EINVAL;
+ goto bail;
+ }
+ /* use the encoding function as a sanitization check */
+ if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
+ hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n",
+ hfi1_hdrq_entsize);
+ goto bail;
+ }
+
+ /* The receive eager buffer size must be set before the receive
+ * contexts are created.
+ *
+ * Set the eager buffer size. Validate that it falls in a range
+ * allowed by the hardware - all powers of 2 between the min and
+ * max. The maximum valid MTU is within the eager buffer range
+ * so we do not need to cap the max_mtu by an eager buffer size
+ * setting.
+ */
+ if (eager_buffer_size) {
+ if (!is_power_of_2(eager_buffer_size))
+ eager_buffer_size =
+ roundup_pow_of_two(eager_buffer_size);
+ eager_buffer_size =
+ clamp_val(eager_buffer_size,
+ MIN_EAGER_BUFFER * 8,
+ MAX_EAGER_BUFFER_TOTAL);
+ hfi1_early_info(&pdev->dev, "Eager buffer size %u\n",
+ eager_buffer_size);
+ } else {
+ hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n");
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /* restrict value of hfi1_rcvarr_split */
+ hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
+
+ ret = hfi1_pcie_init(pdev, ent);
+ if (ret)
+ goto bail;
+
+ /*
+ * Do device-specific initialization, function table setup, dd
+ * allocation, etc.
+ */
+ switch (ent->device) {
+ case PCI_DEVICE_ID_INTEL0:
+ case PCI_DEVICE_ID_INTEL1:
+ dd = hfi1_init_dd(pdev, ent);
+ break;
+ default:
+ hfi1_early_err(&pdev->dev,
+ "Failing on unknown Intel deviceid 0x%x\n",
+ ent->device);
+ ret = -ENODEV;
+ }
+
+ if (IS_ERR(dd))
+ ret = PTR_ERR(dd);
+ if (ret)
+ goto clean_bail; /* error already printed */
+
+ ret = create_workqueues(dd);
+ if (ret)
+ goto clean_bail;
+
+ /* do the generic initialization */
+ initfail = hfi1_init(dd, 0);
+
+ ret = hfi1_register_ib_device(dd);
+
+ /*
+ * Now ready for use. this should be cleared whenever we
+ * detect a reset, or initiate one. If earlier failure,
+ * we still create devices, so diags, etc. can be used
+ * to determine cause of problem.
+ */
+ if (!initfail && !ret)
+ dd->flags |= HFI1_INITTED;
+
+ j = hfi1_device_create(dd);
+ if (j)
+ dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
+
+ if (initfail || ret) {
+ stop_timers(dd);
+ flush_workqueue(ib_wq);
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ hfi1_quiet_serdes(dd->pport + pidx);
+ if (!j)
+ hfi1_device_remove(dd);
+ if (!ret)
+ hfi1_unregister_ib_device(dd);
+ postinit_cleanup(dd);
+ if (initfail)
+ ret = initfail;
+ goto bail; /* everything already cleaned */
+ }
+
+ sdma_start(dd);
+
+ return 0;
+
+clean_bail:
+ hfi1_pcie_cleanup(pdev);
+bail:
+ return ret;
+}
+
+static void remove_one(struct pci_dev *pdev)
+{
+ struct hfi1_devdata *dd = pci_get_drvdata(pdev);
+
+ /* unregister from IB core */
+ hfi1_unregister_ib_device(dd);
+
+ /*
+ * Disable the IB link, disable interrupts on the device,
+ * clear dma engines, etc.
+ */
+ shutdown_device(dd);
+
+ stop_timers(dd);
+
+ /* wait until all of our (qsfp) queue_work() calls complete */
+ flush_workqueue(ib_wq);
+
+ hfi1_device_remove(dd);
+
+ postinit_cleanup(dd);
+}
+
+/**
+ * hfi1_create_rcvhdrq - create a receive header queue
+ * @dd: the hfi1_ib device
+ * @rcd: the context data
+ *
+ * This must be contiguous memory (from an i/o perspective), and must be
+ * DMA'able (which means for some systems, it will go through an IOMMU,
+ * or be forced into a low address range).
+ */
+int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
+{
+ unsigned amt;
+ u64 reg;
+
+ if (!rcd->rcvhdrq) {
+ dma_addr_t phys_hdrqtail;
+ gfp_t gfp_flags;
+
+ /*
+ * rcvhdrqentsize is in DWs, so we have to convert to bytes
+ * (* sizeof(u32)).
+ */
+ amt = ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize *
+ sizeof(u32), PAGE_SIZE);
+
+ gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
+ GFP_USER : GFP_KERNEL;
+ rcd->rcvhdrq = dma_zalloc_coherent(
+ &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
+ gfp_flags | __GFP_COMP);
+
+ if (!rcd->rcvhdrq) {
+ dd_dev_err(dd,
+ "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
+ amt, rcd->ctxt);
+ goto bail;
+ }
+
+ /* Event mask is per device now and is in hfi1_devdata */
+ /*if (rcd->ctxt >= dd->first_user_ctxt) {
+ rcd->user_event_mask = vmalloc_user(PAGE_SIZE);
+ if (!rcd->user_event_mask)
+ goto bail_free_hdrq;
+ }*/
+
+ if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
+ rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
+ &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
+ gfp_flags);
+ if (!rcd->rcvhdrtail_kvaddr)
+ goto bail_free;
+ rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
+ }
+
+ rcd->rcvhdrq_size = amt;
+ }
+ /*
+ * These values are per-context:
+ * RcvHdrCnt
+ * RcvHdrEntSize
+ * RcvHdrSize
+ */
+ reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT)
+ & RCV_HDR_CNT_CNT_MASK)
+ << RCV_HDR_CNT_CNT_SHIFT;
+ write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg);
+ reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize)
+ & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK)
+ << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
+ write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg);
+ reg = (dd->rcvhdrsize & RCV_HDR_SIZE_HDR_SIZE_MASK)
+ << RCV_HDR_SIZE_HDR_SIZE_SHIFT;
+ write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg);
+ return 0;
+
+bail_free:
+ dd_dev_err(dd,
+ "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
+ rcd->ctxt);
+ vfree(rcd->user_event_mask);
+ rcd->user_event_mask = NULL;
+ dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
+ rcd->rcvhdrq_phys);
+ rcd->rcvhdrq = NULL;
+bail:
+ return -ENOMEM;
+}
+
+/**
+ * allocate eager buffers, both kernel and user contexts.
+ * @rcd: the context we are setting up.
+ *
+ * Allocate the eager TID buffers and program them into hip.
+ * They are no longer completely contiguous, we do multiple allocation
+ * calls. Otherwise we get the OOM code involved, by asking for too
+ * much per call, with disastrous results on some kernels.
+ */
+int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
+{
+ struct hfi1_devdata *dd = rcd->dd;
+ u32 max_entries, egrtop, alloced_bytes = 0, idx = 0;
+ gfp_t gfp_flags;
+ u16 order;
+ int ret = 0;
+ u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
+
+ /*
+ * GFP_USER, but without GFP_FS, so buffer cache can be
+ * coalesced (we hope); otherwise, even at order 4,
+ * heavy filesystem activity makes these fail, and we can
+ * use compound pages.
+ */
+ gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
+
+ /*
+ * The minimum size of the eager buffers is a groups of MTU-sized
+ * buffers.
+ * The global eager_buffer_size parameter is checked against the
+ * theoretical lower limit of the value. Here, we check against the
+ * MTU.
+ */
+ if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
+ rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
+ /*
+ * If using one-pkt-per-egr-buffer, lower the eager buffer
+ * size to the max MTU (page-aligned).
+ */
+ if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
+ rcd->egrbufs.rcvtid_size = round_mtu;
+
+ /*
+ * Eager buffers sizes of 1MB or less require smaller TID sizes
+ * to satisfy the "multiple of 8 RcvArray entries" requirement.
+ */
+ if (rcd->egrbufs.size <= (1 << 20))
+ rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu,
+ rounddown_pow_of_two(rcd->egrbufs.size / 8));
+
+ while (alloced_bytes < rcd->egrbufs.size &&
+ rcd->egrbufs.alloced < rcd->egrbufs.count) {
+ rcd->egrbufs.buffers[idx].addr =
+ dma_zalloc_coherent(&dd->pcidev->dev,
+ rcd->egrbufs.rcvtid_size,
+ &rcd->egrbufs.buffers[idx].phys,
+ gfp_flags);
+ if (rcd->egrbufs.buffers[idx].addr) {
+ rcd->egrbufs.buffers[idx].len =
+ rcd->egrbufs.rcvtid_size;
+ rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
+ rcd->egrbufs.buffers[idx].addr;
+ rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].phys =
+ rcd->egrbufs.buffers[idx].phys;
+ rcd->egrbufs.alloced++;
+ alloced_bytes += rcd->egrbufs.rcvtid_size;
+ idx++;
+ } else {
+ u32 new_size, i, j;
+ u64 offset = 0;
+
+ /*
+ * Fail the eager buffer allocation if:
+ * - we are already using the lowest acceptable size
+ * - we are using one-pkt-per-egr-buffer (this implies
+ * that we are accepting only one size)
+ */
+ if (rcd->egrbufs.rcvtid_size == round_mtu ||
+ !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
+ dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
+ rcd->ctxt);
+ goto bail_rcvegrbuf_phys;
+ }
+
+ new_size = rcd->egrbufs.rcvtid_size / 2;
+
+ /*
+ * If the first attempt to allocate memory failed, don't
+ * fail everything but continue with the next lower
+ * size.
+ */
+ if (idx == 0) {
+ rcd->egrbufs.rcvtid_size = new_size;
+ continue;
+ }
+
+ /*
+ * Re-partition already allocated buffers to a smaller
+ * size.
+ */
+ rcd->egrbufs.alloced = 0;
+ for (i = 0, j = 0, offset = 0; j < idx; i++) {
+ if (i >= rcd->egrbufs.count)
+ break;
+ rcd->egrbufs.rcvtids[i].phys =
+ rcd->egrbufs.buffers[j].phys + offset;
+ rcd->egrbufs.rcvtids[i].addr =
+ rcd->egrbufs.buffers[j].addr + offset;
+ rcd->egrbufs.alloced++;
+ if ((rcd->egrbufs.buffers[j].phys + offset +
+ new_size) ==
+ (rcd->egrbufs.buffers[j].phys +
+ rcd->egrbufs.buffers[j].len)) {
+ j++;
+ offset = 0;
+ } else
+ offset += new_size;
+ }
+ rcd->egrbufs.rcvtid_size = new_size;
+ }
+ }
+ rcd->egrbufs.numbufs = idx;
+ rcd->egrbufs.size = alloced_bytes;
+
+ dd_dev_info(dd, "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
+ rcd->ctxt, rcd->egrbufs.alloced, rcd->egrbufs.rcvtid_size,
+ rcd->egrbufs.size);
+
+ /*
+ * Set the contexts rcv array head update threshold to the closest
+ * power of 2 (so we can use a mask instead of modulo) below half
+ * the allocated entries.
+ */
+ rcd->egrbufs.threshold =
+ rounddown_pow_of_two(rcd->egrbufs.alloced / 2);
+ /*
+ * Compute the expected RcvArray entry base. This is done after
+ * allocating the eager buffers in order to maximize the
+ * expected RcvArray entries for the context.
+ */
+ max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
+ egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
+ rcd->expected_count = max_entries - egrtop;
+ if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2)
+ rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
+
+ rcd->expected_base = rcd->eager_base + egrtop;
+ dd_dev_info(dd, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
+ rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
+ rcd->eager_base, rcd->expected_base);
+
+ if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
+ dd_dev_err(dd, "ctxt%u: current Eager buffer size is invalid %u\n",
+ rcd->ctxt, rcd->egrbufs.rcvtid_size);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
+ hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
+ rcd->egrbufs.rcvtids[idx].phys, order);
+ cond_resched();
+ }
+ goto bail;
+
+bail_rcvegrbuf_phys:
+ for (idx = 0; idx < rcd->egrbufs.alloced &&
+ rcd->egrbufs.buffers[idx].addr;
+ idx++) {
+ dma_free_coherent(&dd->pcidev->dev,
+ rcd->egrbufs.buffers[idx].len,
+ rcd->egrbufs.buffers[idx].addr,
+ rcd->egrbufs.buffers[idx].phys);
+ rcd->egrbufs.buffers[idx].addr = NULL;
+ rcd->egrbufs.buffers[idx].phys = 0;
+ rcd->egrbufs.buffers[idx].len = 0;
+ }
+bail:
+ return ret;
+}
diff --git a/drivers/staging/rdma/hfi1/intr.c b/drivers/staging/rdma/hfi1/intr.c
new file mode 100644
index 000000000000..426582b9ab65
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/intr.c
@@ -0,0 +1,207 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "hfi.h"
+#include "common.h"
+#include "sdma.h"
+
+/**
+ * format_hwmsg - format a single hwerror message
+ * @msg message buffer
+ * @msgl length of message buffer
+ * @hwmsg message to add to message buffer
+ */
+static void format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
+{
+ strlcat(msg, "[", msgl);
+ strlcat(msg, hwmsg, msgl);
+ strlcat(msg, "]", msgl);
+}
+
+/**
+ * hfi1_format_hwerrors - format hardware error messages for display
+ * @hwerrs hardware errors bit vector
+ * @hwerrmsgs hardware error descriptions
+ * @nhwerrmsgs number of hwerrmsgs
+ * @msg message buffer
+ * @msgl message buffer length
+ */
+void hfi1_format_hwerrors(u64 hwerrs, const struct hfi1_hwerror_msgs *hwerrmsgs,
+ size_t nhwerrmsgs, char *msg, size_t msgl)
+{
+ int i;
+
+ for (i = 0; i < nhwerrmsgs; i++)
+ if (hwerrs & hwerrmsgs[i].mask)
+ format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
+}
+
+static void signal_ib_event(struct hfi1_pportdata *ppd, enum ib_event_type ev)
+{
+ struct ib_event event;
+ struct hfi1_devdata *dd = ppd->dd;
+
+ /*
+ * Only call ib_dispatch_event() if the IB device has been
+ * registered. HFI1_INITED is set iff the driver has successfully
+ * registered with the IB core.
+ */
+ if (!(dd->flags & HFI1_INITTED))
+ return;
+ event.device = &dd->verbs_dev.ibdev;
+ event.element.port_num = ppd->port;
+ event.event = ev;
+ ib_dispatch_event(&event);
+}
+
+/*
+ * Handle a linkup or link down notification.
+ * This is called outside an interrupt.
+ */
+void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
+{
+ struct hfi1_pportdata *ppd = &dd->pport[0];
+ enum ib_event_type ev;
+
+ if (!(ppd->linkup ^ !!linkup))
+ return; /* no change, nothing to do */
+
+ if (linkup) {
+ /*
+ * Quick linkup and all link up on the simulator does not
+ * trigger or implement:
+ * - VerifyCap interrupt
+ * - VerifyCap frames
+ * But rather moves directly to LinkUp.
+ *
+ * Do the work of the VerifyCap interrupt handler,
+ * handle_verify_cap(), but do not try moving the state to
+ * LinkUp as we are already there.
+ *
+ * NOTE: This uses this device's vAU, vCU, and vl15_init for
+ * the remote values. Both sides must be using the values.
+ */
+ if (quick_linkup
+ || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
+ set_up_vl15(dd, dd->vau, dd->vl15_init);
+ assign_remote_cm_au_table(dd, dd->vcu);
+ ppd->neighbor_guid =
+ read_csr(dd,
+ DC_DC8051_STS_REMOTE_GUID);
+ ppd->neighbor_type =
+ read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
+ DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
+ ppd->neighbor_port_number =
+ read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
+ DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
+ dd_dev_info(dd,
+ "Neighbor GUID: %llx Neighbor type %d\n",
+ ppd->neighbor_guid,
+ ppd->neighbor_type);
+ }
+
+ /* physical link went up */
+ ppd->linkup = 1;
+ ppd->offline_disabled_reason = OPA_LINKDOWN_REASON_NONE;
+
+ /* link widths are not available until the link is fully up */
+ get_linkup_link_widths(ppd);
+
+ } else {
+ /* physical link went down */
+ ppd->linkup = 0;
+
+ /* clear HW details of the previous connection */
+ reset_link_credits(dd);
+
+ /* freeze after a link down to guarantee a clean egress */
+ start_freeze_handling(ppd, FREEZE_SELF|FREEZE_LINK_DOWN);
+
+ ev = IB_EVENT_PORT_ERR;
+
+ hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LINKDOWN_BIT);
+
+ /* if we are down, the neighbor is down */
+ ppd->neighbor_normal = 0;
+
+ /* notify IB of the link change */
+ signal_ib_event(ppd, ev);
+ }
+
+
+}
+
+/*
+ * Handle receive or urgent interrupts for user contexts. This means a user
+ * process was waiting for a packet to arrive, and didn't want to poll.
+ */
+void handle_user_interrupt(struct hfi1_ctxtdata *rcd)
+{
+ struct hfi1_devdata *dd = rcd->dd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ if (!rcd->cnt)
+ goto done;
+
+ if (test_and_clear_bit(HFI1_CTXT_WAITING_RCV, &rcd->event_flags)) {
+ wake_up_interruptible(&rcd->wait);
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_DIS, rcd->ctxt);
+ } else if (test_and_clear_bit(HFI1_CTXT_WAITING_URG,
+ &rcd->event_flags)) {
+ rcd->urgent++;
+ wake_up_interruptible(&rcd->wait);
+ }
+done:
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+}
diff --git a/drivers/staging/rdma/hfi1/iowait.h b/drivers/staging/rdma/hfi1/iowait.h
new file mode 100644
index 000000000000..fa361b405851
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/iowait.h
@@ -0,0 +1,186 @@
+#ifndef _HFI1_IOWAIT_H
+#define _HFI1_IOWAIT_H
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+
+/*
+ * typedef (*restart_t)() - restart callback
+ * @work: pointer to work structure
+ */
+typedef void (*restart_t)(struct work_struct *work);
+
+struct sdma_txreq;
+struct sdma_engine;
+/**
+ * struct iowait - linkage for delayed progress/waiting
+ * @list: used to add/insert into QP/PQ wait lists
+ * @tx_head: overflow list of sdma_txreq's
+ * @sleep: no space callback
+ * @wakeup: space callback
+ * @iowork: workqueue overhead
+ * @wait_dma: wait for sdma_busy == 0
+ * @sdma_busy: # of packets in flight
+ * @count: total number of descriptors in tx_head'ed list
+ * @tx_limit: limit for overflow queuing
+ * @tx_count: number of tx entry's in tx_head'ed list
+ *
+ * This is to be embedded in user's state structure
+ * (QP or PQ).
+ *
+ * The sleep and wakeup members are a
+ * bit misnamed. They do not strictly
+ * speaking sleep or wake up, but they
+ * are callbacks for the ULP to implement
+ * what ever queuing/dequeuing of
+ * the embedded iowait and its containing struct
+ * when a resource shortage like SDMA ring space is seen.
+ *
+ * Both potentially have locks help
+ * so sleeping is not allowed.
+ *
+ * The wait_dma member along with the iow
+ */
+
+struct iowait {
+ struct list_head list;
+ struct list_head tx_head;
+ int (*sleep)(
+ struct sdma_engine *sde,
+ struct iowait *wait,
+ struct sdma_txreq *tx,
+ unsigned seq);
+ void (*wakeup)(struct iowait *wait, int reason);
+ struct work_struct iowork;
+ wait_queue_head_t wait_dma;
+ atomic_t sdma_busy;
+ u32 count;
+ u32 tx_limit;
+ u32 tx_count;
+};
+
+#define SDMA_AVAIL_REASON 0
+
+/**
+ * iowait_init() - initialize wait structure
+ * @wait: wait struct to initialize
+ * @tx_limit: limit for overflow queuing
+ * @func: restart function for workqueue
+ * @sleep: sleep function for no space
+ * @wakeup: wakeup function for no space
+ *
+ * This function initializes the iowait
+ * structure embedded in the QP or PQ.
+ *
+ */
+
+static inline void iowait_init(
+ struct iowait *wait,
+ u32 tx_limit,
+ void (*func)(struct work_struct *work),
+ int (*sleep)(
+ struct sdma_engine *sde,
+ struct iowait *wait,
+ struct sdma_txreq *tx,
+ unsigned seq),
+ void (*wakeup)(struct iowait *wait, int reason))
+{
+ wait->count = 0;
+ INIT_LIST_HEAD(&wait->list);
+ INIT_LIST_HEAD(&wait->tx_head);
+ INIT_WORK(&wait->iowork, func);
+ init_waitqueue_head(&wait->wait_dma);
+ atomic_set(&wait->sdma_busy, 0);
+ wait->tx_limit = tx_limit;
+ wait->sleep = sleep;
+ wait->wakeup = wakeup;
+}
+
+/**
+ * iowait_schedule() - initialize wait structure
+ * @wait: wait struct to schedule
+ * @wq: workqueue for schedule
+ */
+static inline void iowait_schedule(
+ struct iowait *wait,
+ struct workqueue_struct *wq)
+{
+ queue_work(wq, &wait->iowork);
+}
+
+/**
+ * iowait_sdma_drain() - wait for DMAs to drain
+ *
+ * @wait: iowait structure
+ *
+ * This will delay until the iowait sdmas have
+ * completed.
+ */
+static inline void iowait_sdma_drain(struct iowait *wait)
+{
+ wait_event(wait->wait_dma, !atomic_read(&wait->sdma_busy));
+}
+
+/**
+ * iowait_drain_wakeup() - trigger iowait_drain() waiter
+ *
+ * @wait: iowait structure
+ *
+ * This will trigger any waiters.
+ */
+static inline void iowait_drain_wakeup(struct iowait *wait)
+{
+ wake_up(&wait->wait_dma);
+}
+
+#endif
diff --git a/drivers/staging/rdma/hfi1/keys.c b/drivers/staging/rdma/hfi1/keys.c
new file mode 100644
index 000000000000..f6eff177ace1
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/keys.c
@@ -0,0 +1,411 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "hfi.h"
+
+/**
+ * hfi1_alloc_lkey - allocate an lkey
+ * @mr: memory region that this lkey protects
+ * @dma_region: 0->normal key, 1->restricted DMA key
+ *
+ * Returns 0 if successful, otherwise returns -errno.
+ *
+ * Increments mr reference count as required.
+ *
+ * Sets the lkey field mr for non-dma regions.
+ *
+ */
+
+int hfi1_alloc_lkey(struct hfi1_mregion *mr, int dma_region)
+{
+ unsigned long flags;
+ u32 r;
+ u32 n;
+ int ret = 0;
+ struct hfi1_ibdev *dev = to_idev(mr->pd->device);
+ struct hfi1_lkey_table *rkt = &dev->lk_table;
+
+ hfi1_get_mr(mr);
+ spin_lock_irqsave(&rkt->lock, flags);
+
+ /* special case for dma_mr lkey == 0 */
+ if (dma_region) {
+ struct hfi1_mregion *tmr;
+
+ tmr = rcu_access_pointer(dev->dma_mr);
+ if (!tmr) {
+ rcu_assign_pointer(dev->dma_mr, mr);
+ mr->lkey_published = 1;
+ } else {
+ hfi1_put_mr(mr);
+ }
+ goto success;
+ }
+
+ /* Find the next available LKEY */
+ r = rkt->next;
+ n = r;
+ for (;;) {
+ if (!rcu_access_pointer(rkt->table[r]))
+ break;
+ r = (r + 1) & (rkt->max - 1);
+ if (r == n)
+ goto bail;
+ }
+ rkt->next = (r + 1) & (rkt->max - 1);
+ /*
+ * Make sure lkey is never zero which is reserved to indicate an
+ * unrestricted LKEY.
+ */
+ rkt->gen++;
+ /*
+ * bits are capped in verbs.c to ensure enough bits for
+ * generation number
+ */
+ mr->lkey = (r << (32 - hfi1_lkey_table_size)) |
+ ((((1 << (24 - hfi1_lkey_table_size)) - 1) & rkt->gen)
+ << 8);
+ if (mr->lkey == 0) {
+ mr->lkey |= 1 << 8;
+ rkt->gen++;
+ }
+ rcu_assign_pointer(rkt->table[r], mr);
+ mr->lkey_published = 1;
+success:
+ spin_unlock_irqrestore(&rkt->lock, flags);
+out:
+ return ret;
+bail:
+ hfi1_put_mr(mr);
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ ret = -ENOMEM;
+ goto out;
+}
+
+/**
+ * hfi1_free_lkey - free an lkey
+ * @mr: mr to free from tables
+ */
+void hfi1_free_lkey(struct hfi1_mregion *mr)
+{
+ unsigned long flags;
+ u32 lkey = mr->lkey;
+ u32 r;
+ struct hfi1_ibdev *dev = to_idev(mr->pd->device);
+ struct hfi1_lkey_table *rkt = &dev->lk_table;
+ int freed = 0;
+
+ spin_lock_irqsave(&rkt->lock, flags);
+ if (!mr->lkey_published)
+ goto out;
+ if (lkey == 0)
+ RCU_INIT_POINTER(dev->dma_mr, NULL);
+ else {
+ r = lkey >> (32 - hfi1_lkey_table_size);
+ RCU_INIT_POINTER(rkt->table[r], NULL);
+ }
+ mr->lkey_published = 0;
+ freed++;
+out:
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ if (freed) {
+ synchronize_rcu();
+ hfi1_put_mr(mr);
+ }
+}
+
+/**
+ * hfi1_lkey_ok - check IB SGE for validity and initialize
+ * @rkt: table containing lkey to check SGE against
+ * @pd: protection domain
+ * @isge: outgoing internal SGE
+ * @sge: SGE to check
+ * @acc: access flags
+ *
+ * Return 1 if valid and successful, otherwise returns 0.
+ *
+ * increments the reference count upon success
+ *
+ * Check the IB SGE for validity and initialize our internal version
+ * of it.
+ */
+int hfi1_lkey_ok(struct hfi1_lkey_table *rkt, struct hfi1_pd *pd,
+ struct hfi1_sge *isge, struct ib_sge *sge, int acc)
+{
+ struct hfi1_mregion *mr;
+ unsigned n, m;
+ size_t off;
+
+ /*
+ * We use LKEY == zero for kernel virtual addresses
+ * (see hfi1_get_dma_mr and dma.c).
+ */
+ rcu_read_lock();
+ if (sge->lkey == 0) {
+ struct hfi1_ibdev *dev = to_idev(pd->ibpd.device);
+
+ if (pd->user)
+ goto bail;
+ mr = rcu_dereference(dev->dma_mr);
+ if (!mr)
+ goto bail;
+ atomic_inc(&mr->refcount);
+ rcu_read_unlock();
+
+ isge->mr = mr;
+ isge->vaddr = (void *) sge->addr;
+ isge->length = sge->length;
+ isge->sge_length = sge->length;
+ isge->m = 0;
+ isge->n = 0;
+ goto ok;
+ }
+ mr = rcu_dereference(
+ rkt->table[(sge->lkey >> (32 - hfi1_lkey_table_size))]);
+ if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
+ goto bail;
+
+ off = sge->addr - mr->user_base;
+ if (unlikely(sge->addr < mr->user_base ||
+ off + sge->length > mr->length ||
+ (mr->access_flags & acc) != acc))
+ goto bail;
+ atomic_inc(&mr->refcount);
+ rcu_read_unlock();
+
+ off += mr->offset;
+ if (mr->page_shift) {
+ /*
+ page sizes are uniform power of 2 so no loop is necessary
+ entries_spanned_by_off is the number of times the loop below
+ would have executed.
+ */
+ size_t entries_spanned_by_off;
+
+ entries_spanned_by_off = off >> mr->page_shift;
+ off -= (entries_spanned_by_off << mr->page_shift);
+ m = entries_spanned_by_off / HFI1_SEGSZ;
+ n = entries_spanned_by_off % HFI1_SEGSZ;
+ } else {
+ m = 0;
+ n = 0;
+ while (off >= mr->map[m]->segs[n].length) {
+ off -= mr->map[m]->segs[n].length;
+ n++;
+ if (n >= HFI1_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ }
+ isge->mr = mr;
+ isge->vaddr = mr->map[m]->segs[n].vaddr + off;
+ isge->length = mr->map[m]->segs[n].length - off;
+ isge->sge_length = sge->length;
+ isge->m = m;
+ isge->n = n;
+ok:
+ return 1;
+bail:
+ rcu_read_unlock();
+ return 0;
+}
+
+/**
+ * hfi1_rkey_ok - check the IB virtual address, length, and RKEY
+ * @qp: qp for validation
+ * @sge: SGE state
+ * @len: length of data
+ * @vaddr: virtual address to place data
+ * @rkey: rkey to check
+ * @acc: access flags
+ *
+ * Return 1 if successful, otherwise 0.
+ *
+ * increments the reference count upon success
+ */
+int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge,
+ u32 len, u64 vaddr, u32 rkey, int acc)
+{
+ struct hfi1_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
+ struct hfi1_mregion *mr;
+ unsigned n, m;
+ size_t off;
+
+ /*
+ * We use RKEY == zero for kernel virtual addresses
+ * (see hfi1_get_dma_mr and dma.c).
+ */
+ rcu_read_lock();
+ if (rkey == 0) {
+ struct hfi1_pd *pd = to_ipd(qp->ibqp.pd);
+ struct hfi1_ibdev *dev = to_idev(pd->ibpd.device);
+
+ if (pd->user)
+ goto bail;
+ mr = rcu_dereference(dev->dma_mr);
+ if (!mr)
+ goto bail;
+ atomic_inc(&mr->refcount);
+ rcu_read_unlock();
+
+ sge->mr = mr;
+ sge->vaddr = (void *) vaddr;
+ sge->length = len;
+ sge->sge_length = len;
+ sge->m = 0;
+ sge->n = 0;
+ goto ok;
+ }
+
+ mr = rcu_dereference(
+ rkt->table[(rkey >> (32 - hfi1_lkey_table_size))]);
+ if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
+ goto bail;
+
+ off = vaddr - mr->iova;
+ if (unlikely(vaddr < mr->iova || off + len > mr->length ||
+ (mr->access_flags & acc) == 0))
+ goto bail;
+ atomic_inc(&mr->refcount);
+ rcu_read_unlock();
+
+ off += mr->offset;
+ if (mr->page_shift) {
+ /*
+ page sizes are uniform power of 2 so no loop is necessary
+ entries_spanned_by_off is the number of times the loop below
+ would have executed.
+ */
+ size_t entries_spanned_by_off;
+
+ entries_spanned_by_off = off >> mr->page_shift;
+ off -= (entries_spanned_by_off << mr->page_shift);
+ m = entries_spanned_by_off / HFI1_SEGSZ;
+ n = entries_spanned_by_off % HFI1_SEGSZ;
+ } else {
+ m = 0;
+ n = 0;
+ while (off >= mr->map[m]->segs[n].length) {
+ off -= mr->map[m]->segs[n].length;
+ n++;
+ if (n >= HFI1_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ }
+ sge->mr = mr;
+ sge->vaddr = mr->map[m]->segs[n].vaddr + off;
+ sge->length = mr->map[m]->segs[n].length - off;
+ sge->sge_length = len;
+ sge->m = m;
+ sge->n = n;
+ok:
+ return 1;
+bail:
+ rcu_read_unlock();
+ return 0;
+}
+
+/*
+ * Initialize the memory region specified by the work request.
+ */
+int hfi1_fast_reg_mr(struct hfi1_qp *qp, struct ib_send_wr *wr)
+{
+ struct hfi1_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
+ struct hfi1_pd *pd = to_ipd(qp->ibqp.pd);
+ struct hfi1_mregion *mr;
+ u32 rkey = wr->wr.fast_reg.rkey;
+ unsigned i, n, m;
+ int ret = -EINVAL;
+ unsigned long flags;
+ u64 *page_list;
+ size_t ps;
+
+ spin_lock_irqsave(&rkt->lock, flags);
+ if (pd->user || rkey == 0)
+ goto bail;
+
+ mr = rcu_dereference_protected(
+ rkt->table[(rkey >> (32 - hfi1_lkey_table_size))],
+ lockdep_is_held(&rkt->lock));
+ if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
+ goto bail;
+
+ if (wr->wr.fast_reg.page_list_len > mr->max_segs)
+ goto bail;
+
+ ps = 1UL << wr->wr.fast_reg.page_shift;
+ if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len)
+ goto bail;
+
+ mr->user_base = wr->wr.fast_reg.iova_start;
+ mr->iova = wr->wr.fast_reg.iova_start;
+ mr->lkey = rkey;
+ mr->length = wr->wr.fast_reg.length;
+ mr->access_flags = wr->wr.fast_reg.access_flags;
+ page_list = wr->wr.fast_reg.page_list->page_list;
+ m = 0;
+ n = 0;
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+ mr->map[m]->segs[n].vaddr = (void *) page_list[i];
+ mr->map[m]->segs[n].length = ps;
+ if (++n == HFI1_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+
+ ret = 0;
+bail:
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ return ret;
+}
diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c
new file mode 100644
index 000000000000..37269eb90c34
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/mad.c
@@ -0,0 +1,4257 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/net.h>
+#define OPA_NUM_PKEY_BLOCKS_PER_SMP (OPA_SMP_DR_DATA_SIZE \
+ / (OPA_PARTITION_TABLE_BLK_SIZE * sizeof(u16)))
+
+#include "hfi.h"
+#include "mad.h"
+#include "trace.h"
+
+/* the reset value from the FM is supposed to be 0xffff, handle both */
+#define OPA_LINK_WIDTH_RESET_OLD 0x0fff
+#define OPA_LINK_WIDTH_RESET 0xffff
+
+static int reply(struct ib_mad_hdr *smp)
+{
+ /*
+ * The verbs framework will handle the directed/LID route
+ * packet changes.
+ */
+ smp->method = IB_MGMT_METHOD_GET_RESP;
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ smp->status |= IB_SMP_DIRECTION;
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+}
+
+static inline void clear_opa_smp_data(struct opa_smp *smp)
+{
+ void *data = opa_get_smp_data(smp);
+ size_t size = opa_get_smp_data_size(smp);
+
+ memset(data, 0, size);
+}
+
+static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
+{
+ struct ib_mad_send_buf *send_buf;
+ struct ib_mad_agent *agent;
+ struct ib_smp *smp;
+ int ret;
+ unsigned long flags;
+ unsigned long timeout;
+ int pkey_idx;
+ u32 qpn = ppd_from_ibp(ibp)->sm_trap_qp;
+
+ agent = ibp->send_agent;
+ if (!agent)
+ return;
+
+ /* o14-3.2.1 */
+ if (ppd_from_ibp(ibp)->lstate != IB_PORT_ACTIVE)
+ return;
+
+ /* o14-2 */
+ if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout))
+ return;
+
+ pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
+ if (pkey_idx < 0) {
+ pr_warn("%s: failed to find limited mgmt pkey, defaulting 0x%x\n",
+ __func__, hfi1_get_pkey(ibp, 1));
+ pkey_idx = 1;
+ }
+
+ send_buf = ib_create_send_mad(agent, qpn, pkey_idx, 0,
+ IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
+ GFP_ATOMIC, IB_MGMT_BASE_VERSION);
+ if (IS_ERR(send_buf))
+ return;
+
+ smp = send_buf->mad;
+ smp->base_version = IB_MGMT_BASE_VERSION;
+ smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
+ smp->class_version = 1;
+ smp->method = IB_MGMT_METHOD_TRAP;
+ ibp->tid++;
+ smp->tid = cpu_to_be64(ibp->tid);
+ smp->attr_id = IB_SMP_ATTR_NOTICE;
+ /* o14-1: smp->mkey = 0; */
+ memcpy(smp->data, data, len);
+
+ spin_lock_irqsave(&ibp->lock, flags);
+ if (!ibp->sm_ah) {
+ if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
+ struct ib_ah *ah;
+
+ ah = hfi1_create_qp0_ah(ibp, ibp->sm_lid);
+ if (IS_ERR(ah))
+ ret = PTR_ERR(ah);
+ else {
+ send_buf->ah = ah;
+ ibp->sm_ah = to_iah(ah);
+ ret = 0;
+ }
+ } else
+ ret = -EINVAL;
+ } else {
+ send_buf->ah = &ibp->sm_ah->ibah;
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&ibp->lock, flags);
+
+ if (!ret)
+ ret = ib_post_send_mad(send_buf, NULL);
+ if (!ret) {
+ /* 4.096 usec. */
+ timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000;
+ ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout);
+ } else {
+ ib_free_send_mad(send_buf);
+ ibp->trap_timeout = 0;
+ }
+}
+
+/*
+ * Send a bad [PQ]_Key trap (ch. 14.3.8).
+ */
+void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
+ u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
+{
+ struct ib_mad_notice_attr data;
+
+ if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
+ ibp->pkey_violations++;
+ else
+ ibp->qkey_violations++;
+ ibp->n_pkt_drops++;
+
+ /* Send violation trap */
+ data.generic_type = IB_NOTICE_TYPE_SECURITY;
+ data.prod_type_msb = 0;
+ data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ data.trap_num = trap_num;
+ data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+ data.toggle_count = 0;
+ memset(&data.details, 0, sizeof(data.details));
+ data.details.ntc_257_258.lid1 = lid1;
+ data.details.ntc_257_258.lid2 = lid2;
+ data.details.ntc_257_258.key = cpu_to_be32(key);
+ data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
+ data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
+
+ send_trap(ibp, &data, sizeof(data));
+}
+
+/*
+ * Send a bad M_Key trap (ch. 14.3.9).
+ */
+static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
+ __be64 mkey, __be32 dr_slid, u8 return_path[], u8 hop_cnt)
+{
+ struct ib_mad_notice_attr data;
+
+ /* Send violation trap */
+ data.generic_type = IB_NOTICE_TYPE_SECURITY;
+ data.prod_type_msb = 0;
+ data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
+ data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+ data.toggle_count = 0;
+ memset(&data.details, 0, sizeof(data.details));
+ data.details.ntc_256.lid = data.issuer_lid;
+ data.details.ntc_256.method = mad->method;
+ data.details.ntc_256.attr_id = mad->attr_id;
+ data.details.ntc_256.attr_mod = mad->attr_mod;
+ data.details.ntc_256.mkey = mkey;
+ if (mad->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+
+ data.details.ntc_256.dr_slid = (__force __be16)dr_slid;
+ data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
+ if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {
+ data.details.ntc_256.dr_trunc_hop |=
+ IB_NOTICE_TRAP_DR_TRUNC;
+ hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);
+ }
+ data.details.ntc_256.dr_trunc_hop |= hop_cnt;
+ memcpy(data.details.ntc_256.dr_rtn_path, return_path,
+ hop_cnt);
+ }
+
+ send_trap(ibp, &data, sizeof(data));
+}
+
+/*
+ * Send a Port Capability Mask Changed trap (ch. 14.3.11).
+ */
+void hfi1_cap_mask_chg(struct hfi1_ibport *ibp)
+{
+ struct ib_mad_notice_attr data;
+
+ data.generic_type = IB_NOTICE_TYPE_INFO;
+ data.prod_type_msb = 0;
+ data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
+ data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+ data.toggle_count = 0;
+ memset(&data.details, 0, sizeof(data.details));
+ data.details.ntc_144.lid = data.issuer_lid;
+ data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
+
+ send_trap(ibp, &data, sizeof(data));
+}
+
+/*
+ * Send a System Image GUID Changed trap (ch. 14.3.12).
+ */
+void hfi1_sys_guid_chg(struct hfi1_ibport *ibp)
+{
+ struct ib_mad_notice_attr data;
+
+ data.generic_type = IB_NOTICE_TYPE_INFO;
+ data.prod_type_msb = 0;
+ data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
+ data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+ data.toggle_count = 0;
+ memset(&data.details, 0, sizeof(data.details));
+ data.details.ntc_145.lid = data.issuer_lid;
+ data.details.ntc_145.new_sys_guid = ib_hfi1_sys_image_guid;
+
+ send_trap(ibp, &data, sizeof(data));
+}
+
+/*
+ * Send a Node Description Changed trap (ch. 14.3.13).
+ */
+void hfi1_node_desc_chg(struct hfi1_ibport *ibp)
+{
+ struct ib_mad_notice_attr data;
+
+ data.generic_type = IB_NOTICE_TYPE_INFO;
+ data.prod_type_msb = 0;
+ data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
+ data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+ data.toggle_count = 0;
+ memset(&data.details, 0, sizeof(data.details));
+ data.details.ntc_144.lid = data.issuer_lid;
+ data.details.ntc_144.local_changes = 1;
+ data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
+
+ send_trap(ibp, &data, sizeof(data));
+}
+
+static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
+ u8 *data, struct ib_device *ibdev,
+ u8 port, u32 *resp_len)
+{
+ struct opa_node_description *nd;
+
+ if (am) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ nd = (struct opa_node_description *)data;
+
+ memcpy(nd->data, ibdev->node_desc, sizeof(nd->data));
+
+ if (resp_len)
+ *resp_len += sizeof(*nd);
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ struct opa_node_info *ni;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
+
+ ni = (struct opa_node_info *)data;
+
+ /* GUID 0 is illegal */
+ if (am || pidx >= dd->num_pports || dd->pport[pidx].guid == 0) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ ni->port_guid = cpu_to_be64(dd->pport[pidx].guid);
+ ni->base_version = OPA_MGMT_BASE_VERSION;
+ ni->class_version = OPA_SMI_CLASS_VERSION;
+ ni->node_type = 1; /* channel adapter */
+ ni->num_ports = ibdev->phys_port_cnt;
+ /* This is already in network order */
+ ni->system_image_guid = ib_hfi1_sys_image_guid;
+ /* Use first-port GUID as node */
+ ni->node_guid = cpu_to_be64(dd->pport->guid);
+ ni->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
+ ni->device_id = cpu_to_be16(dd->pcidev->device);
+ ni->revision = cpu_to_be32(dd->minrev);
+ ni->local_port_num = port;
+ ni->vendor_id[0] = dd->oui1;
+ ni->vendor_id[1] = dd->oui2;
+ ni->vendor_id[2] = dd->oui3;
+
+ if (resp_len)
+ *resp_len += sizeof(*ni);
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
+
+ /* GUID 0 is illegal */
+ if (smp->attr_mod || pidx >= dd->num_pports ||
+ dd->pport[pidx].guid == 0)
+ smp->status |= IB_SMP_INVALID_FIELD;
+ else
+ nip->port_guid = cpu_to_be64(dd->pport[pidx].guid);
+
+ nip->base_version = OPA_MGMT_BASE_VERSION;
+ nip->class_version = OPA_SMI_CLASS_VERSION;
+ nip->node_type = 1; /* channel adapter */
+ nip->num_ports = ibdev->phys_port_cnt;
+ /* This is already in network order */
+ nip->sys_guid = ib_hfi1_sys_image_guid;
+ /* Use first-port GUID as node */
+ nip->node_guid = cpu_to_be64(dd->pport->guid);
+ nip->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
+ nip->device_id = cpu_to_be16(dd->pcidev->device);
+ nip->revision = cpu_to_be32(dd->minrev);
+ nip->local_port_num = port;
+ nip->vendor_id[0] = dd->oui1;
+ nip->vendor_id[1] = dd->oui2;
+ nip->vendor_id[2] = dd->oui3;
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static void set_link_width_enabled(struct hfi1_pportdata *ppd, u32 w)
+{
+ (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_ENB, w);
+}
+
+static void set_link_width_downgrade_enabled(struct hfi1_pportdata *ppd, u32 w)
+{
+ (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_DG_ENB, w);
+}
+
+static void set_link_speed_enabled(struct hfi1_pportdata *ppd, u32 s)
+{
+ (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_SPD_ENB, s);
+}
+
+static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
+ int mad_flags, __be64 mkey, __be32 dr_slid,
+ u8 return_path[], u8 hop_cnt)
+{
+ int valid_mkey = 0;
+ int ret = 0;
+
+ /* Is the mkey in the process of expiring? */
+ if (ibp->mkey_lease_timeout &&
+ time_after_eq(jiffies, ibp->mkey_lease_timeout)) {
+ /* Clear timeout and mkey protection field. */
+ ibp->mkey_lease_timeout = 0;
+ ibp->mkeyprot = 0;
+ }
+
+ if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->mkey == 0 ||
+ ibp->mkey == mkey)
+ valid_mkey = 1;
+
+ /* Unset lease timeout on any valid Get/Set/TrapRepress */
+ if (valid_mkey && ibp->mkey_lease_timeout &&
+ (mad->method == IB_MGMT_METHOD_GET ||
+ mad->method == IB_MGMT_METHOD_SET ||
+ mad->method == IB_MGMT_METHOD_TRAP_REPRESS))
+ ibp->mkey_lease_timeout = 0;
+
+ if (!valid_mkey) {
+ switch (mad->method) {
+ case IB_MGMT_METHOD_GET:
+ /* Bad mkey not a violation below level 2 */
+ if (ibp->mkeyprot < 2)
+ break;
+ case IB_MGMT_METHOD_SET:
+ case IB_MGMT_METHOD_TRAP_REPRESS:
+ if (ibp->mkey_violations != 0xFFFF)
+ ++ibp->mkey_violations;
+ if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
+ ibp->mkey_lease_timeout = jiffies +
+ ibp->mkey_lease_period * HZ;
+ /* Generate a trap notice. */
+ bad_mkey(ibp, mad, mkey, dr_slid, return_path,
+ hop_cnt);
+ ret = 1;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * The SMA caches reads from LCB registers in case the LCB is unavailable.
+ * (The LCB is unavailable in certain link states, for example.)
+ */
+struct lcb_datum {
+ u32 off;
+ u64 val;
+};
+
+static struct lcb_datum lcb_cache[] = {
+ { DC_LCB_STS_ROUND_TRIP_LTP_CNT, 0 },
+};
+
+static int write_lcb_cache(u32 off, u64 val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
+ if (lcb_cache[i].off == off) {
+ lcb_cache[i].val = val;
+ return 0;
+ }
+ }
+
+ pr_warn("%s bad offset 0x%x\n", __func__, off);
+ return -1;
+}
+
+static int read_lcb_cache(u32 off, u64 *val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
+ if (lcb_cache[i].off == off) {
+ *val = lcb_cache[i].val;
+ return 0;
+ }
+ }
+
+ pr_warn("%s bad offset 0x%x\n", __func__, off);
+ return -1;
+}
+
+void read_ltp_rtt(struct hfi1_devdata *dd)
+{
+ u64 reg;
+
+ if (read_lcb_csr(dd, DC_LCB_STS_ROUND_TRIP_LTP_CNT, &reg))
+ dd_dev_err(dd, "%s: unable to read LTP RTT\n", __func__);
+ else
+ write_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, reg);
+}
+
+static u8 __opa_porttype(struct hfi1_pportdata *ppd)
+{
+ if (qsfp_mod_present(ppd)) {
+ if (ppd->qsfp_info.cache_valid)
+ return OPA_PORT_TYPE_STANDARD;
+ return OPA_PORT_TYPE_DISCONNECTED;
+ }
+ return OPA_PORT_TYPE_UNKNOWN;
+}
+
+static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ int i;
+ struct hfi1_devdata *dd;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_ibport *ibp;
+ struct opa_port_info *pi = (struct opa_port_info *)data;
+ u8 mtu;
+ u8 credit_rate;
+ u32 state;
+ u32 num_ports = OPA_AM_NPORT(am);
+ u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
+ u32 buffer_units;
+ u64 tmp = 0;
+
+ if (num_ports != 1) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ dd = dd_from_ibdev(ibdev);
+ /* IB numbers ports from 1, hw from 0 */
+ ppd = dd->pport + (port - 1);
+ ibp = &ppd->ibport_data;
+
+ if (ppd->vls_supported/2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
+ ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ pi->lid = cpu_to_be32(ppd->lid);
+
+ /* Only return the mkey if the protection field allows it. */
+ if (!(smp->method == IB_MGMT_METHOD_GET &&
+ ibp->mkey != smp->mkey &&
+ ibp->mkeyprot == 1))
+ pi->mkey = ibp->mkey;
+
+ pi->subnet_prefix = ibp->gid_prefix;
+ pi->sm_lid = cpu_to_be32(ibp->sm_lid);
+ pi->ib_cap_mask = cpu_to_be32(ibp->port_cap_flags);
+ pi->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period);
+ pi->sm_trap_qp = cpu_to_be32(ppd->sm_trap_qp);
+ pi->sa_qp = cpu_to_be32(ppd->sa_qp);
+
+ pi->link_width.enabled = cpu_to_be16(ppd->link_width_enabled);
+ pi->link_width.supported = cpu_to_be16(ppd->link_width_supported);
+ pi->link_width.active = cpu_to_be16(ppd->link_width_active);
+
+ pi->link_width_downgrade.supported =
+ cpu_to_be16(ppd->link_width_downgrade_supported);
+ pi->link_width_downgrade.enabled =
+ cpu_to_be16(ppd->link_width_downgrade_enabled);
+ pi->link_width_downgrade.tx_active =
+ cpu_to_be16(ppd->link_width_downgrade_tx_active);
+ pi->link_width_downgrade.rx_active =
+ cpu_to_be16(ppd->link_width_downgrade_rx_active);
+
+ pi->link_speed.supported = cpu_to_be16(ppd->link_speed_supported);
+ pi->link_speed.active = cpu_to_be16(ppd->link_speed_active);
+ pi->link_speed.enabled = cpu_to_be16(ppd->link_speed_enabled);
+
+ state = driver_lstate(ppd);
+
+ if (start_of_sm_config && (state == IB_PORT_INIT))
+ ppd->is_sm_config_started = 1;
+
+ pi->port_phys_conf = __opa_porttype(ppd) & 0xf;
+
+#if PI_LED_ENABLE_SUP
+ pi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
+ pi->port_states.ledenable_offlinereason |=
+ ppd->is_sm_config_started << 5;
+ pi->port_states.ledenable_offlinereason |=
+ ppd->offline_disabled_reason & OPA_PI_MASK_OFFLINE_REASON;
+#else
+ pi->port_states.offline_reason = ppd->neighbor_normal << 4;
+ pi->port_states.offline_reason |= ppd->is_sm_config_started << 5;
+ pi->port_states.offline_reason |= ppd->offline_disabled_reason &
+ OPA_PI_MASK_OFFLINE_REASON;
+#endif /* PI_LED_ENABLE_SUP */
+
+ pi->port_states.portphysstate_portstate =
+ (hfi1_ibphys_portstate(ppd) << 4) | state;
+
+ pi->mkeyprotect_lmc = (ibp->mkeyprot << 6) | ppd->lmc;
+
+ memset(pi->neigh_mtu.pvlx_to_mtu, 0, sizeof(pi->neigh_mtu.pvlx_to_mtu));
+ for (i = 0; i < ppd->vls_supported; i++) {
+ mtu = mtu_to_enum(dd->vld[i].mtu, HFI1_DEFAULT_ACTIVE_MTU);
+ if ((i % 2) == 0)
+ pi->neigh_mtu.pvlx_to_mtu[i/2] |= (mtu << 4);
+ else
+ pi->neigh_mtu.pvlx_to_mtu[i/2] |= mtu;
+ }
+ /* don't forget VL 15 */
+ mtu = mtu_to_enum(dd->vld[15].mtu, 2048);
+ pi->neigh_mtu.pvlx_to_mtu[15/2] |= mtu;
+ pi->smsl = ibp->sm_sl & OPA_PI_MASK_SMSL;
+ pi->operational_vls = hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS);
+ pi->partenforce_filterraw |=
+ (ppd->linkinit_reason & OPA_PI_MASK_LINKINIT_REASON);
+ if (ppd->part_enforce & HFI1_PART_ENFORCE_IN)
+ pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_IN;
+ if (ppd->part_enforce & HFI1_PART_ENFORCE_OUT)
+ pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_OUT;
+ pi->mkey_violations = cpu_to_be16(ibp->mkey_violations);
+ /* P_KeyViolations are counted by hardware. */
+ pi->pkey_violations = cpu_to_be16(ibp->pkey_violations);
+ pi->qkey_violations = cpu_to_be16(ibp->qkey_violations);
+
+ pi->vl.cap = ppd->vls_supported;
+ pi->vl.high_limit = cpu_to_be16(ibp->vl_high_limit);
+ pi->vl.arb_high_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_CAP);
+ pi->vl.arb_low_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_LOW_CAP);
+
+ pi->clientrereg_subnettimeout = ibp->subnet_timeout;
+
+ pi->port_link_mode = cpu_to_be16(OPA_PORT_LINK_MODE_OPA << 10 |
+ OPA_PORT_LINK_MODE_OPA << 5 |
+ OPA_PORT_LINK_MODE_OPA);
+
+ pi->port_ltp_crc_mode = cpu_to_be16(ppd->port_ltp_crc_mode);
+
+ pi->port_mode = cpu_to_be16(
+ ppd->is_active_optimize_enabled ?
+ OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE : 0);
+
+ pi->port_packet_format.supported =
+ cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B);
+ pi->port_packet_format.enabled =
+ cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B);
+
+ /* flit_control.interleave is (OPA V1, version .76):
+ * bits use
+ * ---- ---
+ * 2 res
+ * 2 DistanceSupported
+ * 2 DistanceEnabled
+ * 5 MaxNextLevelTxEnabled
+ * 5 MaxNestLevelRxSupported
+ *
+ * HFI supports only "distance mode 1" (see OPA V1, version .76,
+ * section 9.6.2), so set DistanceSupported, DistanceEnabled
+ * to 0x1.
+ */
+ pi->flit_control.interleave = cpu_to_be16(0x1400);
+
+ pi->link_down_reason = ppd->local_link_down_reason.sma;
+ pi->neigh_link_down_reason = ppd->neigh_link_down_reason.sma;
+ pi->port_error_action = cpu_to_be32(ppd->port_error_action);
+ pi->mtucap = mtu_to_enum(hfi1_max_mtu, IB_MTU_4096);
+
+ /* 32.768 usec. response time (guessing) */
+ pi->resptimevalue = 3;
+
+ pi->local_port_num = port;
+
+ /* buffer info for FM */
+ pi->overall_buffer_space = cpu_to_be16(dd->link_credits);
+
+ pi->neigh_node_guid = cpu_to_be64(ppd->neighbor_guid);
+ pi->neigh_port_num = ppd->neighbor_port_number;
+ pi->port_neigh_mode =
+ (ppd->neighbor_type & OPA_PI_MASK_NEIGH_NODE_TYPE) |
+ (ppd->mgmt_allowed ? OPA_PI_MASK_NEIGH_MGMT_ALLOWED : 0) |
+ (ppd->neighbor_fm_security ?
+ OPA_PI_MASK_NEIGH_FW_AUTH_BYPASS : 0);
+
+ /* HFIs shall always return VL15 credits to their
+ * neighbor in a timely manner, without any credit return pacing.
+ */
+ credit_rate = 0;
+ buffer_units = (dd->vau) & OPA_PI_MASK_BUF_UNIT_BUF_ALLOC;
+ buffer_units |= (dd->vcu << 3) & OPA_PI_MASK_BUF_UNIT_CREDIT_ACK;
+ buffer_units |= (credit_rate << 6) &
+ OPA_PI_MASK_BUF_UNIT_VL15_CREDIT_RATE;
+ buffer_units |= (dd->vl15_init << 11) & OPA_PI_MASK_BUF_UNIT_VL15_INIT;
+ pi->buffer_units = cpu_to_be32(buffer_units);
+
+ pi->opa_cap_mask = cpu_to_be16(OPA_CAP_MASK3_IsSharedSpaceSupported);
+
+ /* HFI supports a replay buffer 128 LTPs in size */
+ pi->replay_depth.buffer = 0x80;
+ /* read the cached value of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
+ read_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, &tmp);
+
+ /* this counter is 16 bits wide, but the replay_depth.wire
+ * variable is only 8 bits */
+ if (tmp > 0xff)
+ tmp = 0xff;
+ pi->replay_depth.wire = tmp;
+
+ if (resp_len)
+ *resp_len += sizeof(struct opa_port_info);
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+/**
+ * get_pkeys - return the PKEY table
+ * @dd: the hfi1_ib device
+ * @port: the IB port number
+ * @pkeys: the pkey table is placed here
+ */
+static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
+{
+ struct hfi1_pportdata *ppd = dd->pport + port - 1;
+
+ memcpy(pkeys, ppd->pkeys, sizeof(ppd->pkeys));
+
+ return 0;
+}
+
+static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ u32 n_blocks_req = OPA_AM_NBLK(am);
+ u32 start_block = am & 0x7ff;
+ __be16 *p;
+ u16 *q;
+ int i;
+ u16 n_blocks_avail;
+ unsigned npkeys = hfi1_get_npkeys(dd);
+ size_t size;
+
+ if (n_blocks_req == 0) {
+ pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
+ port, start_block, n_blocks_req);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ n_blocks_avail = (u16) (npkeys/OPA_PARTITION_TABLE_BLK_SIZE) + 1;
+
+ size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16);
+
+ if (start_block + n_blocks_req > n_blocks_avail ||
+ n_blocks_req > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
+ pr_warn("OPA Get PKey AM Invalid : s 0x%x; req 0x%x; "
+ "avail 0x%x; blk/smp 0x%lx\n",
+ start_block, n_blocks_req, n_blocks_avail,
+ OPA_NUM_PKEY_BLOCKS_PER_SMP);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ p = (__be16 *) data;
+ q = (u16 *)data;
+ /* get the real pkeys if we are requesting the first block */
+ if (start_block == 0) {
+ get_pkeys(dd, port, q);
+ for (i = 0; i < npkeys; i++)
+ p[i] = cpu_to_be16(q[i]);
+ if (resp_len)
+ *resp_len += size;
+ } else
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+enum {
+ HFI_TRANSITION_DISALLOWED,
+ HFI_TRANSITION_IGNORED,
+ HFI_TRANSITION_ALLOWED,
+ HFI_TRANSITION_UNDEFINED,
+};
+
+/*
+ * Use shortened names to improve readability of
+ * {logical,physical}_state_transitions
+ */
+enum {
+ __D = HFI_TRANSITION_DISALLOWED,
+ __I = HFI_TRANSITION_IGNORED,
+ __A = HFI_TRANSITION_ALLOWED,
+ __U = HFI_TRANSITION_UNDEFINED,
+};
+
+/*
+ * IB_PORTPHYSSTATE_POLLING (2) through OPA_PORTPHYSSTATE_MAX (11) are
+ * represented in physical_state_transitions.
+ */
+#define __N_PHYSTATES (OPA_PORTPHYSSTATE_MAX - IB_PORTPHYSSTATE_POLLING + 1)
+
+/*
+ * Within physical_state_transitions, rows represent "old" states,
+ * columns "new" states, and physical_state_transitions.allowed[old][new]
+ * indicates if the transition from old state to new state is legal (see
+ * OPAg1v1, Table 6-4).
+ */
+static const struct {
+ u8 allowed[__N_PHYSTATES][__N_PHYSTATES];
+} physical_state_transitions = {
+ {
+ /* 2 3 4 5 6 7 8 9 10 11 */
+ /* 2 */ { __A, __A, __D, __D, __D, __D, __D, __D, __D, __D },
+ /* 3 */ { __A, __I, __D, __D, __D, __D, __D, __D, __D, __A },
+ /* 4 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
+ /* 5 */ { __A, __A, __D, __I, __D, __D, __D, __D, __D, __D },
+ /* 6 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
+ /* 7 */ { __D, __A, __D, __D, __D, __I, __D, __D, __D, __D },
+ /* 8 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
+ /* 9 */ { __I, __A, __D, __D, __D, __D, __D, __I, __D, __D },
+ /*10 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
+ /*11 */ { __D, __A, __D, __D, __D, __D, __D, __D, __D, __I },
+ }
+};
+
+/*
+ * IB_PORT_DOWN (1) through IB_PORT_ACTIVE_DEFER (5) are represented
+ * logical_state_transitions
+ */
+
+#define __N_LOGICAL_STATES (IB_PORT_ACTIVE_DEFER - IB_PORT_DOWN + 1)
+
+/*
+ * Within logical_state_transitions rows represent "old" states,
+ * columns "new" states, and logical_state_transitions.allowed[old][new]
+ * indicates if the transition from old state to new state is legal (see
+ * OPAg1v1, Table 9-12).
+ */
+static const struct {
+ u8 allowed[__N_LOGICAL_STATES][__N_LOGICAL_STATES];
+} logical_state_transitions = {
+ {
+ /* 1 2 3 4 5 */
+ /* 1 */ { __I, __D, __D, __D, __U},
+ /* 2 */ { __D, __I, __A, __D, __U},
+ /* 3 */ { __D, __D, __I, __A, __U},
+ /* 4 */ { __D, __D, __I, __I, __U},
+ /* 5 */ { __U, __U, __U, __U, __U},
+ }
+};
+
+static int logical_transition_allowed(int old, int new)
+{
+ if (old < IB_PORT_NOP || old > IB_PORT_ACTIVE_DEFER ||
+ new < IB_PORT_NOP || new > IB_PORT_ACTIVE_DEFER) {
+ pr_warn("invalid logical state(s) (old %d new %d)\n",
+ old, new);
+ return HFI_TRANSITION_UNDEFINED;
+ }
+
+ if (new == IB_PORT_NOP)
+ return HFI_TRANSITION_ALLOWED; /* always allowed */
+
+ /* adjust states for indexing into logical_state_transitions */
+ old -= IB_PORT_DOWN;
+ new -= IB_PORT_DOWN;
+
+ if (old < 0 || new < 0)
+ return HFI_TRANSITION_UNDEFINED;
+ return logical_state_transitions.allowed[old][new];
+}
+
+static int physical_transition_allowed(int old, int new)
+{
+ if (old < IB_PORTPHYSSTATE_NOP || old > OPA_PORTPHYSSTATE_MAX ||
+ new < IB_PORTPHYSSTATE_NOP || new > OPA_PORTPHYSSTATE_MAX) {
+ pr_warn("invalid physical state(s) (old %d new %d)\n",
+ old, new);
+ return HFI_TRANSITION_UNDEFINED;
+ }
+
+ if (new == IB_PORTPHYSSTATE_NOP)
+ return HFI_TRANSITION_ALLOWED; /* always allowed */
+
+ /* adjust states for indexing into physical_state_transitions */
+ old -= IB_PORTPHYSSTATE_POLLING;
+ new -= IB_PORTPHYSSTATE_POLLING;
+
+ if (old < 0 || new < 0)
+ return HFI_TRANSITION_UNDEFINED;
+ return physical_state_transitions.allowed[old][new];
+}
+
+static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
+ u32 logical_new, u32 physical_new)
+{
+ u32 physical_old = driver_physical_state(ppd);
+ u32 logical_old = driver_logical_state(ppd);
+ int ret, logical_allowed, physical_allowed;
+
+ logical_allowed = ret =
+ logical_transition_allowed(logical_old, logical_new);
+
+ if (ret == HFI_TRANSITION_DISALLOWED ||
+ ret == HFI_TRANSITION_UNDEFINED) {
+ pr_warn("invalid logical state transition %s -> %s\n",
+ opa_lstate_name(logical_old),
+ opa_lstate_name(logical_new));
+ return ret;
+ }
+
+ physical_allowed = ret =
+ physical_transition_allowed(physical_old, physical_new);
+
+ if (ret == HFI_TRANSITION_DISALLOWED ||
+ ret == HFI_TRANSITION_UNDEFINED) {
+ pr_warn("invalid physical state transition %s -> %s\n",
+ opa_pstate_name(physical_old),
+ opa_pstate_name(physical_new));
+ return ret;
+ }
+
+ if (logical_allowed == HFI_TRANSITION_IGNORED &&
+ physical_allowed == HFI_TRANSITION_IGNORED)
+ return HFI_TRANSITION_IGNORED;
+
+ /*
+ * Either physical_allowed or logical_allowed is
+ * HFI_TRANSITION_ALLOWED.
+ */
+ return HFI_TRANSITION_ALLOWED;
+}
+
+static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
+ u32 logical_state, u32 phys_state,
+ int suppress_idle_sma)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u32 link_state;
+ int ret;
+
+ ret = port_states_transition_allowed(ppd, logical_state, phys_state);
+ if (ret == HFI_TRANSITION_DISALLOWED ||
+ ret == HFI_TRANSITION_UNDEFINED) {
+ /* error message emitted above */
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return 0;
+ }
+
+ if (ret == HFI_TRANSITION_IGNORED)
+ return 0;
+
+ if ((phys_state != IB_PORTPHYSSTATE_NOP) &&
+ !(logical_state == IB_PORT_DOWN ||
+ logical_state == IB_PORT_NOP)){
+ pr_warn("SubnSet(OPA_PortInfo) port state invalid: logical_state 0x%x physical_state 0x%x\n",
+ logical_state, phys_state);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ }
+
+ /*
+ * Logical state changes are summarized in OPAv1g1 spec.,
+ * Table 9-12; physical state changes are summarized in
+ * OPAv1g1 spec., Table 6.4.
+ */
+ switch (logical_state) {
+ case IB_PORT_NOP:
+ if (phys_state == IB_PORTPHYSSTATE_NOP)
+ break;
+ /* FALLTHROUGH */
+ case IB_PORT_DOWN:
+ if (phys_state == IB_PORTPHYSSTATE_NOP)
+ link_state = HLS_DN_DOWNDEF;
+ else if (phys_state == IB_PORTPHYSSTATE_POLLING) {
+ link_state = HLS_DN_POLL;
+ set_link_down_reason(ppd,
+ OPA_LINKDOWN_REASON_FM_BOUNCE, 0,
+ OPA_LINKDOWN_REASON_FM_BOUNCE);
+ } else if (phys_state == IB_PORTPHYSSTATE_DISABLED)
+ link_state = HLS_DN_DISABLE;
+ else {
+ pr_warn("SubnSet(OPA_PortInfo) invalid physical state 0x%x\n",
+ phys_state);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ break;
+ }
+
+ set_link_state(ppd, link_state);
+ if (link_state == HLS_DN_DISABLE &&
+ (ppd->offline_disabled_reason >
+ OPA_LINKDOWN_REASON_SMA_DISABLED ||
+ ppd->offline_disabled_reason ==
+ OPA_LINKDOWN_REASON_NONE))
+ ppd->offline_disabled_reason =
+ OPA_LINKDOWN_REASON_SMA_DISABLED;
+ /*
+ * Don't send a reply if the response would be sent
+ * through the disabled port.
+ */
+ if (link_state == HLS_DN_DISABLE && smp->hop_cnt)
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+ break;
+ case IB_PORT_ARMED:
+ ret = set_link_state(ppd, HLS_UP_ARMED);
+ if ((ret == 0) && (suppress_idle_sma == 0))
+ send_idle_sma(dd, SMA_IDLE_ARM);
+ break;
+ case IB_PORT_ACTIVE:
+ if (ppd->neighbor_normal) {
+ ret = set_link_state(ppd, HLS_UP_ACTIVE);
+ if (ret == 0)
+ send_idle_sma(dd, SMA_IDLE_ACTIVE);
+ } else {
+ pr_warn("SubnSet(OPA_PortInfo) Cannot move to Active with NeighborNormal 0\n");
+ smp->status |= IB_SMP_INVALID_FIELD;
+ }
+ break;
+ default:
+ pr_warn("SubnSet(OPA_PortInfo) invalid logical state 0x%x\n",
+ logical_state);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ }
+
+ return 0;
+}
+
+/**
+ * subn_set_opa_portinfo - set port information
+ * @smp: the incoming SM packet
+ * @ibdev: the infiniband device
+ * @port: the port on the device
+ *
+ */
+static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ struct opa_port_info *pi = (struct opa_port_info *)data;
+ struct ib_event event;
+ struct hfi1_devdata *dd;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_ibport *ibp;
+ u8 clientrereg;
+ unsigned long flags;
+ u32 smlid, opa_lid; /* tmp vars to hold LID values */
+ u16 lid;
+ u8 ls_old, ls_new, ps_new;
+ u8 vls;
+ u8 msl;
+ u8 crc_enabled;
+ u16 lse, lwe, mtu;
+ u32 num_ports = OPA_AM_NPORT(am);
+ u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
+ int ret, i, invalid = 0, call_set_mtu = 0;
+ int call_link_downgrade_policy = 0;
+
+ if (num_ports != 1) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ opa_lid = be32_to_cpu(pi->lid);
+ if (opa_lid & 0xFFFF0000) {
+ pr_warn("OPA_PortInfo lid out of range: %X\n", opa_lid);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ goto get_only;
+ }
+
+ lid = (u16)(opa_lid & 0x0000FFFF);
+
+ smlid = be32_to_cpu(pi->sm_lid);
+ if (smlid & 0xFFFF0000) {
+ pr_warn("OPA_PortInfo SM lid out of range: %X\n", smlid);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ goto get_only;
+ }
+ smlid &= 0x0000FFFF;
+
+ clientrereg = (pi->clientrereg_subnettimeout &
+ OPA_PI_MASK_CLIENT_REREGISTER);
+
+ dd = dd_from_ibdev(ibdev);
+ /* IB numbers ports from 1, hw from 0 */
+ ppd = dd->pport + (port - 1);
+ ibp = &ppd->ibport_data;
+ event.device = ibdev;
+ event.element.port_num = port;
+
+ ls_old = driver_lstate(ppd);
+
+ ibp->mkey = pi->mkey;
+ ibp->gid_prefix = pi->subnet_prefix;
+ ibp->mkey_lease_period = be16_to_cpu(pi->mkey_lease_period);
+
+ /* Must be a valid unicast LID address. */
+ if ((lid == 0 && ls_old > IB_PORT_INIT) ||
+ lid >= HFI1_MULTICAST_LID_BASE) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ pr_warn("SubnSet(OPA_PortInfo) lid invalid 0x%x\n",
+ lid);
+ } else if (ppd->lid != lid ||
+ ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC)) {
+ if (ppd->lid != lid)
+ hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LID_CHANGE_BIT);
+ if (ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC))
+ hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LMC_CHANGE_BIT);
+ hfi1_set_lid(ppd, lid, pi->mkeyprotect_lmc & OPA_PI_MASK_LMC);
+ event.event = IB_EVENT_LID_CHANGE;
+ ib_dispatch_event(&event);
+ }
+
+ msl = pi->smsl & OPA_PI_MASK_SMSL;
+ if (pi->partenforce_filterraw & OPA_PI_MASK_LINKINIT_REASON)
+ ppd->linkinit_reason =
+ (pi->partenforce_filterraw &
+ OPA_PI_MASK_LINKINIT_REASON);
+ /* enable/disable SW pkey checking as per FM control */
+ if (pi->partenforce_filterraw & OPA_PI_MASK_PARTITION_ENFORCE_IN)
+ ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
+ else
+ ppd->part_enforce &= ~HFI1_PART_ENFORCE_IN;
+
+ if (pi->partenforce_filterraw & OPA_PI_MASK_PARTITION_ENFORCE_OUT)
+ ppd->part_enforce |= HFI1_PART_ENFORCE_OUT;
+ else
+ ppd->part_enforce &= ~HFI1_PART_ENFORCE_OUT;
+
+ /* Must be a valid unicast LID address. */
+ if ((smlid == 0 && ls_old > IB_PORT_INIT) ||
+ smlid >= HFI1_MULTICAST_LID_BASE) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ pr_warn("SubnSet(OPA_PortInfo) smlid invalid 0x%x\n", smlid);
+ } else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
+ pr_warn("SubnSet(OPA_PortInfo) smlid 0x%x\n", smlid);
+ spin_lock_irqsave(&ibp->lock, flags);
+ if (ibp->sm_ah) {
+ if (smlid != ibp->sm_lid)
+ ibp->sm_ah->attr.dlid = smlid;
+ if (msl != ibp->sm_sl)
+ ibp->sm_ah->attr.sl = msl;
+ }
+ spin_unlock_irqrestore(&ibp->lock, flags);
+ if (smlid != ibp->sm_lid)
+ ibp->sm_lid = smlid;
+ if (msl != ibp->sm_sl)
+ ibp->sm_sl = msl;
+ event.event = IB_EVENT_SM_CHANGE;
+ ib_dispatch_event(&event);
+ }
+
+ if (pi->link_down_reason == 0) {
+ ppd->local_link_down_reason.sma = 0;
+ ppd->local_link_down_reason.latest = 0;
+ }
+
+ if (pi->neigh_link_down_reason == 0) {
+ ppd->neigh_link_down_reason.sma = 0;
+ ppd->neigh_link_down_reason.latest = 0;
+ }
+
+ ppd->sm_trap_qp = be32_to_cpu(pi->sm_trap_qp);
+ ppd->sa_qp = be32_to_cpu(pi->sa_qp);
+
+ ppd->port_error_action = be32_to_cpu(pi->port_error_action);
+ lwe = be16_to_cpu(pi->link_width.enabled);
+ if (lwe) {
+ if (lwe == OPA_LINK_WIDTH_RESET
+ || lwe == OPA_LINK_WIDTH_RESET_OLD)
+ set_link_width_enabled(ppd, ppd->link_width_supported);
+ else if ((lwe & ~ppd->link_width_supported) == 0)
+ set_link_width_enabled(ppd, lwe);
+ else
+ smp->status |= IB_SMP_INVALID_FIELD;
+ }
+ lwe = be16_to_cpu(pi->link_width_downgrade.enabled);
+ /* LWD.E is always applied - 0 means "disabled" */
+ if (lwe == OPA_LINK_WIDTH_RESET
+ || lwe == OPA_LINK_WIDTH_RESET_OLD) {
+ set_link_width_downgrade_enabled(ppd,
+ ppd->link_width_downgrade_supported);
+ } else if ((lwe & ~ppd->link_width_downgrade_supported) == 0) {
+ /* only set and apply if something changed */
+ if (lwe != ppd->link_width_downgrade_enabled) {
+ set_link_width_downgrade_enabled(ppd, lwe);
+ call_link_downgrade_policy = 1;
+ }
+ } else
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ lse = be16_to_cpu(pi->link_speed.enabled);
+ if (lse) {
+ if (lse & be16_to_cpu(pi->link_speed.supported))
+ set_link_speed_enabled(ppd, lse);
+ else
+ smp->status |= IB_SMP_INVALID_FIELD;
+ }
+
+ ibp->mkeyprot = (pi->mkeyprotect_lmc & OPA_PI_MASK_MKEY_PROT_BIT) >> 6;
+ ibp->vl_high_limit = be16_to_cpu(pi->vl.high_limit) & 0xFF;
+ (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_LIMIT,
+ ibp->vl_high_limit);
+
+ if (ppd->vls_supported/2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
+ ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+ for (i = 0; i < ppd->vls_supported; i++) {
+ if ((i % 2) == 0)
+ mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i/2] >> 4)
+ & 0xF);
+ else
+ mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i/2] & 0xF);
+ if (mtu == 0xffff) {
+ pr_warn("SubnSet(OPA_PortInfo) mtu invalid %d (0x%x)\n",
+ mtu,
+ (pi->neigh_mtu.pvlx_to_mtu[0] >> 4) & 0xF);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ mtu = hfi1_max_mtu; /* use a valid MTU */
+ }
+ if (dd->vld[i].mtu != mtu) {
+ dd_dev_info(dd,
+ "MTU change on vl %d from %d to %d\n",
+ i, dd->vld[i].mtu, mtu);
+ dd->vld[i].mtu = mtu;
+ call_set_mtu++;
+ }
+ }
+ /* As per OPAV1 spec: VL15 must support and be configured
+ * for operation with a 2048 or larger MTU.
+ */
+ mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[15/2] & 0xF);
+ if (mtu < 2048 || mtu == 0xffff)
+ mtu = 2048;
+ if (dd->vld[15].mtu != mtu) {
+ dd_dev_info(dd,
+ "MTU change on vl 15 from %d to %d\n",
+ dd->vld[15].mtu, mtu);
+ dd->vld[15].mtu = mtu;
+ call_set_mtu++;
+ }
+ if (call_set_mtu)
+ set_mtu(ppd);
+
+ /* Set operational VLs */
+ vls = pi->operational_vls & OPA_PI_MASK_OPERATIONAL_VL;
+ if (vls) {
+ if (vls > ppd->vls_supported) {
+ pr_warn("SubnSet(OPA_PortInfo) VL's supported invalid %d\n",
+ pi->operational_vls);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ } else {
+ if (hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS,
+ vls) == -EINVAL)
+ smp->status |= IB_SMP_INVALID_FIELD;
+ }
+ }
+
+ if (pi->mkey_violations == 0)
+ ibp->mkey_violations = 0;
+
+ if (pi->pkey_violations == 0)
+ ibp->pkey_violations = 0;
+
+ if (pi->qkey_violations == 0)
+ ibp->qkey_violations = 0;
+
+ ibp->subnet_timeout =
+ pi->clientrereg_subnettimeout & OPA_PI_MASK_SUBNET_TIMEOUT;
+
+ crc_enabled = be16_to_cpu(pi->port_ltp_crc_mode);
+ crc_enabled >>= 4;
+ crc_enabled &= 0xf;
+
+ if (crc_enabled != 0)
+ ppd->port_crc_mode_enabled = port_ltp_to_cap(crc_enabled);
+
+ ppd->is_active_optimize_enabled =
+ !!(be16_to_cpu(pi->port_mode)
+ & OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE);
+
+ ls_new = pi->port_states.portphysstate_portstate &
+ OPA_PI_MASK_PORT_STATE;
+ ps_new = (pi->port_states.portphysstate_portstate &
+ OPA_PI_MASK_PORT_PHYSICAL_STATE) >> 4;
+
+ if (ls_old == IB_PORT_INIT) {
+ if (start_of_sm_config) {
+ if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
+ ppd->is_sm_config_started = 1;
+ } else if (ls_new == IB_PORT_ARMED) {
+ if (ppd->is_sm_config_started == 0)
+ invalid = 1;
+ }
+ }
+
+ /* Handle CLIENT_REREGISTER event b/c SM asked us for it */
+ if (clientrereg) {
+ event.event = IB_EVENT_CLIENT_REREGISTER;
+ ib_dispatch_event(&event);
+ }
+
+ /*
+ * Do the port state change now that the other link parameters
+ * have been set.
+ * Changing the port physical state only makes sense if the link
+ * is down or is being set to down.
+ */
+
+ ret = set_port_states(ppd, smp, ls_new, ps_new, invalid);
+ if (ret)
+ return ret;
+
+ ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len);
+
+ /* restore re-reg bit per o14-12.2.1 */
+ pi->clientrereg_subnettimeout |= clientrereg;
+
+ /*
+ * Apply the new link downgrade policy. This may result in a link
+ * bounce. Do this after everything else so things are settled.
+ * Possible problem: if setting the port state above fails, then
+ * the policy change is not applied.
+ */
+ if (call_link_downgrade_policy)
+ apply_link_downgrade_policy(ppd, 0);
+
+ return ret;
+
+get_only:
+ return __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len);
+}
+
+/**
+ * set_pkeys - set the PKEY table for ctxt 0
+ * @dd: the hfi1_ib device
+ * @port: the IB port number
+ * @pkeys: the PKEY table
+ */
+static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
+{
+ struct hfi1_pportdata *ppd;
+ int i;
+ int changed = 0;
+ int update_includes_mgmt_partition = 0;
+
+ /*
+ * IB port one/two always maps to context zero/one,
+ * always a kernel context, no locking needed
+ * If we get here with ppd setup, no need to check
+ * that rcd is valid.
+ */
+ ppd = dd->pport + (port - 1);
+ /*
+ * If the update does not include the management pkey, don't do it.
+ */
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (pkeys[i] == LIM_MGMT_P_KEY) {
+ update_includes_mgmt_partition = 1;
+ break;
+ }
+ }
+
+ if (!update_includes_mgmt_partition)
+ return 1;
+
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ u16 key = pkeys[i];
+ u16 okey = ppd->pkeys[i];
+
+ if (key == okey)
+ continue;
+ /*
+ * The SM gives us the complete PKey table. We have
+ * to ensure that we put the PKeys in the matching
+ * slots.
+ */
+ ppd->pkeys[i] = key;
+ changed = 1;
+ }
+
+ if (changed) {
+ struct ib_event event;
+
+ (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+
+ event.event = IB_EVENT_PKEY_CHANGE;
+ event.device = &dd->verbs_dev.ibdev;
+ event.element.port_num = port;
+ ib_dispatch_event(&event);
+ }
+ return 0;
+}
+
+static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ u32 n_blocks_sent = OPA_AM_NBLK(am);
+ u32 start_block = am & 0x7ff;
+ u16 *p = (u16 *) data;
+ __be16 *q = (__be16 *)data;
+ int i;
+ u16 n_blocks_avail;
+ unsigned npkeys = hfi1_get_npkeys(dd);
+
+ if (n_blocks_sent == 0) {
+ pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
+ port, start_block, n_blocks_sent);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ n_blocks_avail = (u16)(npkeys/OPA_PARTITION_TABLE_BLK_SIZE) + 1;
+
+ if (start_block + n_blocks_sent > n_blocks_avail ||
+ n_blocks_sent > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
+ pr_warn("OPA Set PKey AM Invalid : s 0x%x; req 0x%x; avail 0x%x; blk/smp 0x%lx\n",
+ start_block, n_blocks_sent, n_blocks_avail,
+ OPA_NUM_PKEY_BLOCKS_PER_SMP);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ for (i = 0; i < n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE; i++)
+ p[i] = be16_to_cpu(q[i]);
+
+ if (start_block == 0 && set_pkeys(dd, port, p) != 0) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ return __subn_get_opa_pkeytable(smp, am, data, ibdev, port, resp_len);
+}
+
+static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
+{
+ u64 *val = (u64 *)data;
+
+ *val++ = read_csr(dd, SEND_SC2VLT0);
+ *val++ = read_csr(dd, SEND_SC2VLT1);
+ *val++ = read_csr(dd, SEND_SC2VLT2);
+ *val++ = read_csr(dd, SEND_SC2VLT3);
+ return 0;
+}
+
+#define ILLEGAL_VL 12
+/*
+ * filter_sc2vlt changes mappings to VL15 to ILLEGAL_VL (except
+ * for SC15, which must map to VL15). If we don't remap things this
+ * way it is possible for VL15 counters to increment when we try to
+ * send on a SC which is mapped to an invalid VL.
+ */
+static void filter_sc2vlt(void *data)
+{
+ int i;
+ u8 *pd = (u8 *)data;
+
+ for (i = 0; i < OPA_MAX_SCS; i++) {
+ if (i == 15)
+ continue;
+ if ((pd[i] & 0x1f) == 0xf)
+ pd[i] = ILLEGAL_VL;
+ }
+}
+
+static int set_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
+{
+ u64 *val = (u64 *)data;
+
+ filter_sc2vlt(data);
+
+ write_csr(dd, SEND_SC2VLT0, *val++);
+ write_csr(dd, SEND_SC2VLT1, *val++);
+ write_csr(dd, SEND_SC2VLT2, *val++);
+ write_csr(dd, SEND_SC2VLT3, *val++);
+ write_seqlock_irq(&dd->sc2vl_lock);
+ memcpy(dd->sc2vl, (u64 *)data, sizeof(dd->sc2vl));
+ write_sequnlock_irq(&dd->sc2vl_lock);
+ return 0;
+}
+
+static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ u8 *p = (u8 *)data;
+ size_t size = ARRAY_SIZE(ibp->sl_to_sc); /* == 32 */
+ unsigned i;
+
+ if (am) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++)
+ *p++ = ibp->sl_to_sc[i];
+
+ if (resp_len)
+ *resp_len += size;
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ u8 *p = (u8 *)data;
+ int i;
+
+ if (am) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++)
+ ibp->sl_to_sc[i] = *p++;
+
+ return __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len);
+}
+
+static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ u8 *p = (u8 *)data;
+ size_t size = ARRAY_SIZE(ibp->sc_to_sl); /* == 32 */
+ unsigned i;
+
+ if (am) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
+ *p++ = ibp->sc_to_sl[i];
+
+ if (resp_len)
+ *resp_len += size;
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ u8 *p = (u8 *)data;
+ int i;
+
+ if (am) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
+ ibp->sc_to_sl[i] = *p++;
+
+ return __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, resp_len);
+}
+
+static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ u32 n_blocks = OPA_AM_NBLK(am);
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ void *vp = (void *) data;
+ size_t size = 4 * sizeof(u64);
+
+ if (n_blocks != 1) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ get_sc2vlt_tables(dd, vp);
+
+ if (resp_len)
+ *resp_len += size;
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ u32 n_blocks = OPA_AM_NBLK(am);
+ int async_update = OPA_AM_ASYNC(am);
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ void *vp = (void *) data;
+ struct hfi1_pportdata *ppd;
+ int lstate;
+
+ if (n_blocks != 1 || async_update) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ /* IB numbers ports from 1, hw from 0 */
+ ppd = dd->pport + (port - 1);
+ lstate = driver_lstate(ppd);
+ /* it's known that async_update is 0 by this point, but include
+ * the explicit check for clarity */
+ if (!async_update &&
+ (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ set_sc2vlt_tables(dd, vp);
+
+ return __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, resp_len);
+}
+
+static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ u32 n_blocks = OPA_AM_NPORT(am);
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_pportdata *ppd;
+ void *vp = (void *) data;
+ int size;
+
+ if (n_blocks != 1) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ ppd = dd->pport + (port - 1);
+
+ size = fm_get_table(ppd, FM_TBL_SC2VLNT, vp);
+
+ if (resp_len)
+ *resp_len += size;
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ u32 n_blocks = OPA_AM_NPORT(am);
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_pportdata *ppd;
+ void *vp = (void *) data;
+ int lstate;
+
+ if (n_blocks != 1) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ /* IB numbers ports from 1, hw from 0 */
+ ppd = dd->pport + (port - 1);
+ lstate = driver_lstate(ppd);
+ if (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ ppd = dd->pport + (port - 1);
+
+ fm_set_table(ppd, FM_TBL_SC2VLNT, vp);
+
+ return __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
+ resp_len);
+}
+
+static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ u32 nports = OPA_AM_NPORT(am);
+ u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
+ u32 lstate;
+ struct hfi1_ibport *ibp;
+ struct hfi1_pportdata *ppd;
+ struct opa_port_state_info *psi = (struct opa_port_state_info *) data;
+
+ if (nports != 1) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ ibp = to_iport(ibdev, port);
+ ppd = ppd_from_ibp(ibp);
+
+ lstate = driver_lstate(ppd);
+
+ if (start_of_sm_config && (lstate == IB_PORT_INIT))
+ ppd->is_sm_config_started = 1;
+
+#if PI_LED_ENABLE_SUP
+ psi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
+ psi->port_states.ledenable_offlinereason |=
+ ppd->is_sm_config_started << 5;
+ psi->port_states.ledenable_offlinereason |=
+ ppd->offline_disabled_reason & OPA_PI_MASK_OFFLINE_REASON;
+#else
+ psi->port_states.offline_reason = ppd->neighbor_normal << 4;
+ psi->port_states.offline_reason |= ppd->is_sm_config_started << 5;
+ psi->port_states.offline_reason |= ppd->offline_disabled_reason &
+ OPA_PI_MASK_OFFLINE_REASON;
+#endif /* PI_LED_ENABLE_SUP */
+
+ psi->port_states.portphysstate_portstate =
+ (hfi1_ibphys_portstate(ppd) << 4) | (lstate & 0xf);
+ psi->link_width_downgrade_tx_active =
+ ppd->link_width_downgrade_tx_active;
+ psi->link_width_downgrade_rx_active =
+ ppd->link_width_downgrade_rx_active;
+ if (resp_len)
+ *resp_len += sizeof(struct opa_port_state_info);
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ u32 nports = OPA_AM_NPORT(am);
+ u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
+ u32 ls_old;
+ u8 ls_new, ps_new;
+ struct hfi1_ibport *ibp;
+ struct hfi1_pportdata *ppd;
+ struct opa_port_state_info *psi = (struct opa_port_state_info *) data;
+ int ret, invalid = 0;
+
+ if (nports != 1) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ ibp = to_iport(ibdev, port);
+ ppd = ppd_from_ibp(ibp);
+
+ ls_old = driver_lstate(ppd);
+
+ ls_new = port_states_to_logical_state(&psi->port_states);
+ ps_new = port_states_to_phys_state(&psi->port_states);
+
+ if (ls_old == IB_PORT_INIT) {
+ if (start_of_sm_config) {
+ if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
+ ppd->is_sm_config_started = 1;
+ } else if (ls_new == IB_PORT_ARMED) {
+ if (ppd->is_sm_config_started == 0)
+ invalid = 1;
+ }
+ }
+
+ ret = set_port_states(ppd, smp, ls_new, ps_new, invalid);
+ if (ret)
+ return ret;
+
+ if (invalid)
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len);
+}
+
+static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ u32 addr = OPA_AM_CI_ADDR(am);
+ u32 len = OPA_AM_CI_LEN(am) + 1;
+ int ret;
+
+#define __CI_PAGE_SIZE (1 << 7) /* 128 bytes */
+#define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1)
+#define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK)
+
+ /* check that addr is within spec, and
+ * addr and (addr + len - 1) are on the same "page" */
+ if (addr >= 4096 ||
+ (__CI_PAGE_NUM(addr) != __CI_PAGE_NUM(addr + len - 1))) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ ret = get_cable_info(dd, port, addr, len, data);
+
+ if (ret == -ENODEV) {
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ /* The address range for the CableInfo SMA query is wider than the
+ * memory available on the QSFP cable. We want to return a valid
+ * response, albeit zeroed out, for address ranges beyond available
+ * memory but that are within the CableInfo query spec
+ */
+ if (ret < 0 && ret != -ERANGE) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ if (resp_len)
+ *resp_len += len;
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port, u32 *resp_len)
+{
+ u32 num_ports = OPA_AM_NPORT(am);
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_pportdata *ppd;
+ struct buffer_control *p = (struct buffer_control *) data;
+ int size;
+
+ if (num_ports != 1) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ ppd = dd->pport + (port - 1);
+ size = fm_get_table(ppd, FM_TBL_BUFFER_CONTROL, p);
+ trace_bct_get(dd, p);
+ if (resp_len)
+ *resp_len += size;
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port, u32 *resp_len)
+{
+ u32 num_ports = OPA_AM_NPORT(am);
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_pportdata *ppd;
+ struct buffer_control *p = (struct buffer_control *) data;
+
+ if (num_ports != 1) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+ ppd = dd->pport + (port - 1);
+ trace_bct_set(dd, p);
+ if (fm_set_table(ppd, FM_TBL_BUFFER_CONTROL, p) < 0) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ return __subn_get_opa_bct(smp, am, data, ibdev, port, resp_len);
+}
+
+static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
+ u32 num_ports = OPA_AM_NPORT(am);
+ u8 section = (am & 0x00ff0000) >> 16;
+ u8 *p = data;
+ int size = 0;
+
+ if (num_ports != 1) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ switch (section) {
+ case OPA_VLARB_LOW_ELEMENTS:
+ size = fm_get_table(ppd, FM_TBL_VL_LOW_ARB, p);
+ break;
+ case OPA_VLARB_HIGH_ELEMENTS:
+ size = fm_get_table(ppd, FM_TBL_VL_HIGH_ARB, p);
+ break;
+ case OPA_VLARB_PREEMPT_ELEMENTS:
+ size = fm_get_table(ppd, FM_TBL_VL_PREEMPT_ELEMS, p);
+ break;
+ case OPA_VLARB_PREEMPT_MATRIX:
+ size = fm_get_table(ppd, FM_TBL_VL_PREEMPT_MATRIX, p);
+ break;
+ default:
+ pr_warn("OPA SubnGet(VL Arb) AM Invalid : 0x%x\n",
+ be32_to_cpu(smp->attr_mod));
+ smp->status |= IB_SMP_INVALID_FIELD;
+ break;
+ }
+
+ if (size > 0 && resp_len)
+ *resp_len += size;
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
+ u32 num_ports = OPA_AM_NPORT(am);
+ u8 section = (am & 0x00ff0000) >> 16;
+ u8 *p = data;
+
+ if (num_ports != 1) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ switch (section) {
+ case OPA_VLARB_LOW_ELEMENTS:
+ (void) fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p);
+ break;
+ case OPA_VLARB_HIGH_ELEMENTS:
+ (void) fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p);
+ break;
+ /* neither OPA_VLARB_PREEMPT_ELEMENTS, or OPA_VLARB_PREEMPT_MATRIX
+ * can be changed from the default values */
+ case OPA_VLARB_PREEMPT_ELEMENTS:
+ /* FALLTHROUGH */
+ case OPA_VLARB_PREEMPT_MATRIX:
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ break;
+ default:
+ pr_warn("OPA SubnSet(VL Arb) AM Invalid : 0x%x\n",
+ be32_to_cpu(smp->attr_mod));
+ smp->status |= IB_SMP_INVALID_FIELD;
+ break;
+ }
+
+ return __subn_get_opa_vl_arb(smp, am, data, ibdev, port, resp_len);
+}
+
+struct opa_pma_mad {
+ struct ib_mad_hdr mad_hdr;
+ u8 data[2024];
+} __packed;
+
+struct opa_class_port_info {
+ u8 base_version;
+ u8 class_version;
+ __be16 cap_mask;
+ __be32 cap_mask2_resp_time;
+
+ u8 redirect_gid[16];
+ __be32 redirect_tc_fl;
+ __be32 redirect_lid;
+ __be32 redirect_sl_qp;
+ __be32 redirect_qkey;
+
+ u8 trap_gid[16];
+ __be32 trap_tc_fl;
+ __be32 trap_lid;
+ __be32 trap_hl_qp;
+ __be32 trap_qkey;
+
+ __be16 trap_pkey;
+ __be16 redirect_pkey;
+
+ u8 trap_sl_rsvd;
+ u8 reserved[3];
+} __packed;
+
+struct opa_port_status_req {
+ __u8 port_num;
+ __u8 reserved[3];
+ __be32 vl_select_mask;
+};
+
+#define VL_MASK_ALL 0x000080ff
+
+struct opa_port_status_rsp {
+ __u8 port_num;
+ __u8 reserved[3];
+ __be32 vl_select_mask;
+
+ /* Data counters */
+ __be64 port_xmit_data;
+ __be64 port_rcv_data;
+ __be64 port_xmit_pkts;
+ __be64 port_rcv_pkts;
+ __be64 port_multicast_xmit_pkts;
+ __be64 port_multicast_rcv_pkts;
+ __be64 port_xmit_wait;
+ __be64 sw_port_congestion;
+ __be64 port_rcv_fecn;
+ __be64 port_rcv_becn;
+ __be64 port_xmit_time_cong;
+ __be64 port_xmit_wasted_bw;
+ __be64 port_xmit_wait_data;
+ __be64 port_rcv_bubble;
+ __be64 port_mark_fecn;
+ /* Error counters */
+ __be64 port_rcv_constraint_errors;
+ __be64 port_rcv_switch_relay_errors;
+ __be64 port_xmit_discards;
+ __be64 port_xmit_constraint_errors;
+ __be64 port_rcv_remote_physical_errors;
+ __be64 local_link_integrity_errors;
+ __be64 port_rcv_errors;
+ __be64 excessive_buffer_overruns;
+ __be64 fm_config_errors;
+ __be32 link_error_recovery;
+ __be32 link_downed;
+ u8 uncorrectable_errors;
+
+ u8 link_quality_indicator; /* 5res, 3bit */
+ u8 res2[6];
+ struct _vls_pctrs {
+ /* per-VL Data counters */
+ __be64 port_vl_xmit_data;
+ __be64 port_vl_rcv_data;
+ __be64 port_vl_xmit_pkts;
+ __be64 port_vl_rcv_pkts;
+ __be64 port_vl_xmit_wait;
+ __be64 sw_port_vl_congestion;
+ __be64 port_vl_rcv_fecn;
+ __be64 port_vl_rcv_becn;
+ __be64 port_xmit_time_cong;
+ __be64 port_vl_xmit_wasted_bw;
+ __be64 port_vl_xmit_wait_data;
+ __be64 port_vl_rcv_bubble;
+ __be64 port_vl_mark_fecn;
+ __be64 port_vl_xmit_discards;
+ } vls[0]; /* real array size defined by # bits set in vl_select_mask */
+};
+
+enum counter_selects {
+ CS_PORT_XMIT_DATA = (1 << 31),
+ CS_PORT_RCV_DATA = (1 << 30),
+ CS_PORT_XMIT_PKTS = (1 << 29),
+ CS_PORT_RCV_PKTS = (1 << 28),
+ CS_PORT_MCAST_XMIT_PKTS = (1 << 27),
+ CS_PORT_MCAST_RCV_PKTS = (1 << 26),
+ CS_PORT_XMIT_WAIT = (1 << 25),
+ CS_SW_PORT_CONGESTION = (1 << 24),
+ CS_PORT_RCV_FECN = (1 << 23),
+ CS_PORT_RCV_BECN = (1 << 22),
+ CS_PORT_XMIT_TIME_CONG = (1 << 21),
+ CS_PORT_XMIT_WASTED_BW = (1 << 20),
+ CS_PORT_XMIT_WAIT_DATA = (1 << 19),
+ CS_PORT_RCV_BUBBLE = (1 << 18),
+ CS_PORT_MARK_FECN = (1 << 17),
+ CS_PORT_RCV_CONSTRAINT_ERRORS = (1 << 16),
+ CS_PORT_RCV_SWITCH_RELAY_ERRORS = (1 << 15),
+ CS_PORT_XMIT_DISCARDS = (1 << 14),
+ CS_PORT_XMIT_CONSTRAINT_ERRORS = (1 << 13),
+ CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS = (1 << 12),
+ CS_LOCAL_LINK_INTEGRITY_ERRORS = (1 << 11),
+ CS_PORT_RCV_ERRORS = (1 << 10),
+ CS_EXCESSIVE_BUFFER_OVERRUNS = (1 << 9),
+ CS_FM_CONFIG_ERRORS = (1 << 8),
+ CS_LINK_ERROR_RECOVERY = (1 << 7),
+ CS_LINK_DOWNED = (1 << 6),
+ CS_UNCORRECTABLE_ERRORS = (1 << 5),
+};
+
+struct opa_clear_port_status {
+ __be64 port_select_mask[4];
+ __be32 counter_select_mask;
+};
+
+struct opa_aggregate {
+ __be16 attr_id;
+ __be16 err_reqlength; /* 1 bit, 8 res, 7 bit */
+ __be32 attr_mod;
+ u8 data[0];
+};
+
+/* Request contains first two fields, response contains those plus the rest */
+struct opa_port_data_counters_msg {
+ __be64 port_select_mask[4];
+ __be32 vl_select_mask;
+
+ /* Response fields follow */
+ __be32 reserved1;
+ struct _port_dctrs {
+ u8 port_number;
+ u8 reserved2[3];
+ __be32 link_quality_indicator; /* 29res, 3bit */
+
+ /* Data counters */
+ __be64 port_xmit_data;
+ __be64 port_rcv_data;
+ __be64 port_xmit_pkts;
+ __be64 port_rcv_pkts;
+ __be64 port_multicast_xmit_pkts;
+ __be64 port_multicast_rcv_pkts;
+ __be64 port_xmit_wait;
+ __be64 sw_port_congestion;
+ __be64 port_rcv_fecn;
+ __be64 port_rcv_becn;
+ __be64 port_xmit_time_cong;
+ __be64 port_xmit_wasted_bw;
+ __be64 port_xmit_wait_data;
+ __be64 port_rcv_bubble;
+ __be64 port_mark_fecn;
+
+ __be64 port_error_counter_summary;
+ /* Sum of error counts/port */
+
+ struct _vls_dctrs {
+ /* per-VL Data counters */
+ __be64 port_vl_xmit_data;
+ __be64 port_vl_rcv_data;
+ __be64 port_vl_xmit_pkts;
+ __be64 port_vl_rcv_pkts;
+ __be64 port_vl_xmit_wait;
+ __be64 sw_port_vl_congestion;
+ __be64 port_vl_rcv_fecn;
+ __be64 port_vl_rcv_becn;
+ __be64 port_xmit_time_cong;
+ __be64 port_vl_xmit_wasted_bw;
+ __be64 port_vl_xmit_wait_data;
+ __be64 port_vl_rcv_bubble;
+ __be64 port_vl_mark_fecn;
+ } vls[0];
+ /* array size defined by #bits set in vl_select_mask*/
+ } port[1]; /* array size defined by #ports in attribute modifier */
+};
+
+struct opa_port_error_counters64_msg {
+ /* Request contains first two fields, response contains the
+ * whole magilla */
+ __be64 port_select_mask[4];
+ __be32 vl_select_mask;
+
+ /* Response-only fields follow */
+ __be32 reserved1;
+ struct _port_ectrs {
+ u8 port_number;
+ u8 reserved2[7];
+ __be64 port_rcv_constraint_errors;
+ __be64 port_rcv_switch_relay_errors;
+ __be64 port_xmit_discards;
+ __be64 port_xmit_constraint_errors;
+ __be64 port_rcv_remote_physical_errors;
+ __be64 local_link_integrity_errors;
+ __be64 port_rcv_errors;
+ __be64 excessive_buffer_overruns;
+ __be64 fm_config_errors;
+ __be32 link_error_recovery;
+ __be32 link_downed;
+ u8 uncorrectable_errors;
+ u8 reserved3[7];
+ struct _vls_ectrs {
+ __be64 port_vl_xmit_discards;
+ } vls[0];
+ /* array size defined by #bits set in vl_select_mask */
+ } port[1]; /* array size defined by #ports in attribute modifier */
+};
+
+struct opa_port_error_info_msg {
+ __be64 port_select_mask[4];
+ __be32 error_info_select_mask;
+ __be32 reserved1;
+ struct _port_ei {
+
+ u8 port_number;
+ u8 reserved2[7];
+
+ /* PortRcvErrorInfo */
+ struct {
+ u8 status_and_code;
+ union {
+ u8 raw[17];
+ struct {
+ /* EI1to12 format */
+ u8 packet_flit1[8];
+ u8 packet_flit2[8];
+ u8 remaining_flit_bits12;
+ } ei1to12;
+ struct {
+ u8 packet_bytes[8];
+ u8 remaining_flit_bits;
+ } ei13;
+ } ei;
+ u8 reserved3[6];
+ } __packed port_rcv_ei;
+
+ /* ExcessiveBufferOverrunInfo */
+ struct {
+ u8 status_and_sc;
+ u8 reserved4[7];
+ } __packed excessive_buffer_overrun_ei;
+
+ /* PortXmitConstraintErrorInfo */
+ struct {
+ u8 status;
+ u8 reserved5;
+ __be16 pkey;
+ __be32 slid;
+ } __packed port_xmit_constraint_ei;
+
+ /* PortRcvConstraintErrorInfo */
+ struct {
+ u8 status;
+ u8 reserved6;
+ __be16 pkey;
+ __be32 slid;
+ } __packed port_rcv_constraint_ei;
+
+ /* PortRcvSwitchRelayErrorInfo */
+ struct {
+ u8 status_and_code;
+ u8 reserved7[3];
+ __u32 error_info;
+ } __packed port_rcv_switch_relay_ei;
+
+ /* UncorrectableErrorInfo */
+ struct {
+ u8 status_and_code;
+ u8 reserved8;
+ } __packed uncorrectable_ei;
+
+ /* FMConfigErrorInfo */
+ struct {
+ u8 status_and_code;
+ u8 error_info;
+ } __packed fm_config_ei;
+ __u32 reserved9;
+ } port[1]; /* actual array size defined by #ports in attr modifier */
+};
+
+/* opa_port_error_info_msg error_info_select_mask bit definitions */
+enum error_info_selects {
+ ES_PORT_RCV_ERROR_INFO = (1 << 31),
+ ES_EXCESSIVE_BUFFER_OVERRUN_INFO = (1 << 30),
+ ES_PORT_XMIT_CONSTRAINT_ERROR_INFO = (1 << 29),
+ ES_PORT_RCV_CONSTRAINT_ERROR_INFO = (1 << 28),
+ ES_PORT_RCV_SWITCH_RELAY_ERROR_INFO = (1 << 27),
+ ES_UNCORRECTABLE_ERROR_INFO = (1 << 26),
+ ES_FM_CONFIG_ERROR_INFO = (1 << 25)
+};
+
+static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
+ struct ib_device *ibdev, u32 *resp_len)
+{
+ struct opa_class_port_info *p =
+ (struct opa_class_port_info *)pmp->data;
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+
+ if (pmp->mad_hdr.attr_mod != 0)
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+
+ p->base_version = OPA_MGMT_BASE_VERSION;
+ p->class_version = OPA_SMI_CLASS_VERSION;
+ /*
+ * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
+ */
+ p->cap_mask2_resp_time = cpu_to_be32(18);
+
+ if (resp_len)
+ *resp_len += sizeof(*p);
+
+ return reply((struct ib_mad_hdr *)pmp);
+}
+
+static void a0_portstatus(struct hfi1_pportdata *ppd,
+ struct opa_port_status_rsp *rsp, u32 vl_select_mask)
+{
+ if (!is_bx(ppd->dd)) {
+ unsigned long vl;
+ int vfi = 0;
+ u64 max_vl_xmit_wait = 0, tmp;
+ u32 vl_all_mask = VL_MASK_ALL;
+ u64 rcv_data, rcv_bubble;
+
+ rcv_data = be64_to_cpu(rsp->port_rcv_data);
+ rcv_bubble = be64_to_cpu(rsp->port_rcv_bubble);
+ /* In the measured time period, calculate the total number
+ * of flits that were received. Subtract out one false
+ * rcv_bubble increment for every 32 received flits but
+ * don't let the number go negative.
+ */
+ if (rcv_bubble >= (rcv_data>>5)) {
+ rcv_bubble -= (rcv_data>>5);
+ rsp->port_rcv_bubble = cpu_to_be64(rcv_bubble);
+ }
+ for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
+ 8 * sizeof(vl_select_mask)) {
+ rcv_data = be64_to_cpu(rsp->vls[vfi].port_vl_rcv_data);
+ rcv_bubble =
+ be64_to_cpu(rsp->vls[vfi].port_vl_rcv_bubble);
+ if (rcv_bubble >= (rcv_data>>5)) {
+ rcv_bubble -= (rcv_data>>5);
+ rsp->vls[vfi].port_vl_rcv_bubble =
+ cpu_to_be64(rcv_bubble);
+ }
+ vfi++;
+ }
+
+ for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
+ 8 * sizeof(vl_all_mask)) {
+ tmp = read_port_cntr(ppd, C_TX_WAIT_VL,
+ idx_from_vl(vl));
+ if (tmp > max_vl_xmit_wait)
+ max_vl_xmit_wait = tmp;
+ }
+ rsp->port_xmit_wait = cpu_to_be64(max_vl_xmit_wait);
+ }
+}
+
+
+static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
+ struct ib_device *ibdev, u8 port, u32 *resp_len)
+{
+ struct opa_port_status_req *req =
+ (struct opa_port_status_req *)pmp->data;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct opa_port_status_rsp *rsp;
+ u32 vl_select_mask = be32_to_cpu(req->vl_select_mask);
+ unsigned long vl;
+ size_t response_data_size;
+ u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
+ u8 port_num = req->port_num;
+ u8 num_vls = hweight32(vl_select_mask);
+ struct _vls_pctrs *vlinfo;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ int vfi;
+ u64 tmp, tmp2;
+
+ response_data_size = sizeof(struct opa_port_status_rsp) +
+ num_vls * sizeof(struct _vls_pctrs);
+ if (response_data_size > sizeof(pmp->data)) {
+ pmp->mad_hdr.status |= OPA_PM_STATUS_REQUEST_TOO_LARGE;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ if (nports != 1 || (port_num && port_num != port)
+ || num_vls > OPA_MAX_VLS || (vl_select_mask & ~VL_MASK_ALL)) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+
+ rsp = (struct opa_port_status_rsp *)pmp->data;
+ if (port_num)
+ rsp->port_num = port_num;
+ else
+ rsp->port_num = port;
+
+ rsp->port_rcv_constraint_errors =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
+ CNTR_INVALID_VL));
+
+ hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
+
+ rsp->vl_select_mask = cpu_to_be32(vl_select_mask);
+ rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_bubble =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL));
+ rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
+ CNTR_INVALID_VL));
+ rsp->port_multicast_xmit_pkts =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
+ CNTR_INVALID_VL));
+ rsp->port_multicast_rcv_pkts =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
+ CNTR_INVALID_VL));
+ rsp->port_xmit_wait =
+ cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL));
+ rsp->port_rcv_fecn =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
+ rsp->port_rcv_becn =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
+ rsp->port_xmit_discards =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
+ CNTR_INVALID_VL));
+ rsp->port_xmit_constraint_errors =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_remote_physical_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
+ CNTR_INVALID_VL));
+ tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
+ tmp2 = tmp + read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
+ if (tmp2 < tmp) {
+ /* overflow/wrapped */
+ rsp->local_link_integrity_errors = cpu_to_be64(~0);
+ } else {
+ rsp->local_link_integrity_errors = cpu_to_be64(tmp2);
+ }
+ tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
+ tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
+ CNTR_INVALID_VL);
+ if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
+ /* overflow/wrapped */
+ rsp->link_error_recovery = cpu_to_be32(~0);
+ } else {
+ rsp->link_error_recovery = cpu_to_be32(tmp2);
+ }
+ rsp->port_rcv_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
+ rsp->excessive_buffer_overruns =
+ cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
+ rsp->fm_config_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
+ CNTR_INVALID_VL));
+ rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
+ CNTR_INVALID_VL));
+
+ /* rsp->uncorrectable_errors is 8 bits wide, and it pegs at 0xff */
+ tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
+ rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
+
+ vlinfo = &(rsp->vls[0]);
+ vfi = 0;
+ /* The vl_select_mask has been checked above, and we know
+ * that it contains only entries which represent valid VLs.
+ * So in the for_each_set_bit() loop below, we don't need
+ * any additional checks for vl.
+ */
+ for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
+ 8 * sizeof(vl_select_mask)) {
+ memset(vlinfo, 0, sizeof(*vlinfo));
+
+ tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
+ rsp->vls[vfi].port_vl_rcv_data = cpu_to_be64(tmp);
+ rsp->vls[vfi].port_vl_rcv_bubble =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BBL_VL,
+ idx_from_vl(vl)));
+
+ rsp->vls[vfi].port_vl_rcv_pkts =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
+ idx_from_vl(vl)));
+
+ rsp->vls[vfi].port_vl_xmit_data =
+ cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
+ idx_from_vl(vl)));
+
+ rsp->vls[vfi].port_vl_xmit_pkts =
+ cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
+ idx_from_vl(vl)));
+
+ rsp->vls[vfi].port_vl_xmit_wait =
+ cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL,
+ idx_from_vl(vl)));
+
+ rsp->vls[vfi].port_vl_rcv_fecn =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
+ idx_from_vl(vl)));
+
+ rsp->vls[vfi].port_vl_rcv_becn =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
+ idx_from_vl(vl)));
+
+ vlinfo++;
+ vfi++;
+ }
+
+ a0_portstatus(ppd, rsp, vl_select_mask);
+
+ if (resp_len)
+ *resp_len += response_data_size;
+
+ return reply((struct ib_mad_hdr *)pmp);
+}
+
+static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u64 error_counter_summary = 0, tmp;
+
+ error_counter_summary += read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
+ CNTR_INVALID_VL);
+ /* port_rcv_switch_relay_errors is 0 for HFIs */
+ error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_DSCD,
+ CNTR_INVALID_VL);
+ error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
+ CNTR_INVALID_VL);
+ error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
+ CNTR_INVALID_VL);
+ error_counter_summary += read_dev_cntr(dd, C_DC_TX_REPLAY,
+ CNTR_INVALID_VL);
+ error_counter_summary += read_dev_cntr(dd, C_DC_RX_REPLAY,
+ CNTR_INVALID_VL);
+ error_counter_summary += read_dev_cntr(dd, C_DC_SEQ_CRC_CNT,
+ CNTR_INVALID_VL);
+ error_counter_summary += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
+ CNTR_INVALID_VL);
+ error_counter_summary += read_dev_cntr(dd, C_DC_RCV_ERR,
+ CNTR_INVALID_VL);
+ error_counter_summary += read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
+ error_counter_summary += read_dev_cntr(dd, C_DC_FM_CFG_ERR,
+ CNTR_INVALID_VL);
+ /* ppd->link_downed is a 32-bit value */
+ error_counter_summary += read_port_cntr(ppd, C_SW_LINK_DOWN,
+ CNTR_INVALID_VL);
+ tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
+ /* this is an 8-bit quantity */
+ error_counter_summary += tmp < 0x100 ? (tmp & 0xff) : 0xff;
+
+ return error_counter_summary;
+}
+
+static void a0_datacounters(struct hfi1_devdata *dd, struct _port_dctrs *rsp,
+ u32 vl_select_mask)
+{
+ if (!is_bx(dd)) {
+ unsigned long vl;
+ int vfi = 0;
+ u64 rcv_data, rcv_bubble, sum_vl_xmit_wait = 0;
+
+ rcv_data = be64_to_cpu(rsp->port_rcv_data);
+ rcv_bubble = be64_to_cpu(rsp->port_rcv_bubble);
+ /* In the measured time period, calculate the total number
+ * of flits that were received. Subtract out one false
+ * rcv_bubble increment for every 32 received flits but
+ * don't let the number go negative.
+ */
+ if (rcv_bubble >= (rcv_data>>5)) {
+ rcv_bubble -= (rcv_data>>5);
+ rsp->port_rcv_bubble = cpu_to_be64(rcv_bubble);
+ }
+ for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
+ 8 * sizeof(vl_select_mask)) {
+ rcv_data = be64_to_cpu(rsp->vls[vfi].port_vl_rcv_data);
+ rcv_bubble =
+ be64_to_cpu(rsp->vls[vfi].port_vl_rcv_bubble);
+ if (rcv_bubble >= (rcv_data>>5)) {
+ rcv_bubble -= (rcv_data>>5);
+ rsp->vls[vfi].port_vl_rcv_bubble =
+ cpu_to_be64(rcv_bubble);
+ }
+ vfi++;
+ }
+ vfi = 0;
+ for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
+ 8 * sizeof(vl_select_mask)) {
+ u64 tmp = sum_vl_xmit_wait +
+ be64_to_cpu(rsp->vls[vfi++].port_vl_xmit_wait);
+ if (tmp < sum_vl_xmit_wait) {
+ /* we wrapped */
+ sum_vl_xmit_wait = (u64) ~0;
+ break;
+ }
+ sum_vl_xmit_wait = tmp;
+ }
+ if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
+ rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
+ }
+}
+
+static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
+ struct ib_device *ibdev, u8 port, u32 *resp_len)
+{
+ struct opa_port_data_counters_msg *req =
+ (struct opa_port_data_counters_msg *)pmp->data;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct _port_dctrs *rsp;
+ struct _vls_dctrs *vlinfo;
+ size_t response_data_size;
+ u32 num_ports;
+ u8 num_pslm;
+ u8 lq, num_vls;
+ u64 port_mask;
+ unsigned long port_num;
+ unsigned long vl;
+ u32 vl_select_mask;
+ int vfi;
+
+ num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
+ num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
+ num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
+ vl_select_mask = be32_to_cpu(req->vl_select_mask);
+
+ if (num_ports != 1 || (vl_select_mask & ~VL_MASK_ALL)) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ /* Sanity check */
+ response_data_size = sizeof(struct opa_port_data_counters_msg) +
+ num_vls * sizeof(struct _vls_dctrs);
+
+ if (response_data_size > sizeof(pmp->data)) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ /*
+ * The bit set in the mask needs to be consistent with the
+ * port the request came in on.
+ */
+ port_mask = be64_to_cpu(req->port_select_mask[3]);
+ port_num = find_first_bit((unsigned long *)&port_mask,
+ sizeof(port_mask));
+
+ if ((u8)port_num != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ rsp = (struct _port_dctrs *)&(req->port[0]);
+ memset(rsp, 0, sizeof(*rsp));
+
+ rsp->port_number = port;
+ /*
+ * Note that link_quality_indicator is a 32 bit quantity in
+ * 'datacounters' queries (as opposed to 'portinfo' queries,
+ * where it's a byte).
+ */
+ hfi1_read_link_quality(dd, &lq);
+ rsp->link_quality_indicator = cpu_to_be32((u32)lq);
+
+ /* rsp->sw_port_congestion is 0 for HFIs */
+ /* rsp->port_xmit_time_cong is 0 for HFIs */
+ /* rsp->port_xmit_wasted_bw ??? */
+ /* rsp->port_xmit_wait_data ??? */
+ /* rsp->port_mark_fecn is 0 for HFIs */
+
+ rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_bubble =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL));
+ rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
+ CNTR_INVALID_VL));
+ rsp->port_multicast_xmit_pkts =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
+ CNTR_INVALID_VL));
+ rsp->port_multicast_rcv_pkts =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
+ CNTR_INVALID_VL));
+ rsp->port_xmit_wait =
+ cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL));
+ rsp->port_rcv_fecn =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
+ rsp->port_rcv_becn =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
+
+ rsp->port_error_counter_summary =
+ cpu_to_be64(get_error_counter_summary(ibdev, port));
+
+ vlinfo = &(rsp->vls[0]);
+ vfi = 0;
+ /* The vl_select_mask has been checked above, and we know
+ * that it contains only entries which represent valid VLs.
+ * So in the for_each_set_bit() loop below, we don't need
+ * any additional checks for vl.
+ */
+ for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
+ 8 * sizeof(req->vl_select_mask)) {
+ memset(vlinfo, 0, sizeof(*vlinfo));
+
+ rsp->vls[vfi].port_vl_xmit_data =
+ cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
+ idx_from_vl(vl)));
+
+ rsp->vls[vfi].port_vl_rcv_data =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RX_FLIT_VL,
+ idx_from_vl(vl)));
+ rsp->vls[vfi].port_vl_rcv_bubble =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BBL_VL,
+ idx_from_vl(vl)));
+
+ rsp->vls[vfi].port_vl_xmit_pkts =
+ cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
+ idx_from_vl(vl)));
+
+ rsp->vls[vfi].port_vl_rcv_pkts =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
+ idx_from_vl(vl)));
+
+ rsp->vls[vfi].port_vl_xmit_wait =
+ cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL,
+ idx_from_vl(vl)));
+
+ rsp->vls[vfi].port_vl_rcv_fecn =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
+ idx_from_vl(vl)));
+ rsp->vls[vfi].port_vl_rcv_becn =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
+ idx_from_vl(vl)));
+
+ /* rsp->port_vl_xmit_time_cong is 0 for HFIs */
+ /* rsp->port_vl_xmit_wasted_bw ??? */
+ /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ???
+ * does this differ from rsp->vls[vfi].port_vl_xmit_wait */
+ /*rsp->vls[vfi].port_vl_mark_fecn =
+ cpu_to_be64(read_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT
+ + offset));
+ */
+ vlinfo++;
+ vfi++;
+ }
+
+ a0_datacounters(dd, rsp, vl_select_mask);
+
+ if (resp_len)
+ *resp_len += response_data_size;
+
+ return reply((struct ib_mad_hdr *)pmp);
+}
+
+static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
+ struct ib_device *ibdev, u8 port, u32 *resp_len)
+{
+ size_t response_data_size;
+ struct _port_ectrs *rsp;
+ unsigned long port_num;
+ struct opa_port_error_counters64_msg *req;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ u32 num_ports;
+ u8 num_pslm;
+ u8 num_vls;
+ struct hfi1_ibport *ibp;
+ struct hfi1_pportdata *ppd;
+ struct _vls_ectrs *vlinfo;
+ unsigned long vl;
+ u64 port_mask, tmp, tmp2;
+ u32 vl_select_mask;
+ int vfi;
+
+ req = (struct opa_port_error_counters64_msg *)pmp->data;
+
+ num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
+
+ num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
+ num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
+
+ if (num_ports != 1 || num_ports != num_pslm) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ response_data_size = sizeof(struct opa_port_error_counters64_msg) +
+ num_vls * sizeof(struct _vls_ectrs);
+
+ if (response_data_size > sizeof(pmp->data)) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+ /*
+ * The bit set in the mask needs to be consistent with the
+ * port the request came in on.
+ */
+ port_mask = be64_to_cpu(req->port_select_mask[3]);
+ port_num = find_first_bit((unsigned long *)&port_mask,
+ sizeof(port_mask));
+
+ if ((u8)port_num != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ rsp = (struct _port_ectrs *)&(req->port[0]);
+
+ ibp = to_iport(ibdev, port_num);
+ ppd = ppd_from_ibp(ibp);
+
+ memset(rsp, 0, sizeof(*rsp));
+ rsp->port_number = (u8)port_num;
+
+ rsp->port_rcv_constraint_errors =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
+ CNTR_INVALID_VL));
+ /* port_rcv_switch_relay_errors is 0 for HFIs */
+ rsp->port_xmit_discards =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_remote_physical_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
+ CNTR_INVALID_VL));
+ tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
+ tmp2 = tmp + read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
+ if (tmp2 < tmp) {
+ /* overflow/wrapped */
+ rsp->local_link_integrity_errors = cpu_to_be64(~0);
+ } else {
+ rsp->local_link_integrity_errors = cpu_to_be64(tmp2);
+ }
+ tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
+ tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
+ CNTR_INVALID_VL);
+ if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
+ /* overflow/wrapped */
+ rsp->link_error_recovery = cpu_to_be32(~0);
+ } else {
+ rsp->link_error_recovery = cpu_to_be32(tmp2);
+ }
+ rsp->port_xmit_constraint_errors =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
+ CNTR_INVALID_VL));
+ rsp->excessive_buffer_overruns =
+ cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
+ rsp->fm_config_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
+ CNTR_INVALID_VL));
+ rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
+ CNTR_INVALID_VL));
+ tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
+ rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
+
+ vlinfo = (struct _vls_ectrs *)&(rsp->vls[0]);
+ vfi = 0;
+ vl_select_mask = be32_to_cpu(req->vl_select_mask);
+ for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
+ 8 * sizeof(req->vl_select_mask)) {
+ memset(vlinfo, 0, sizeof(*vlinfo));
+ /* vlinfo->vls[vfi].port_vl_xmit_discards ??? */
+ vlinfo += 1;
+ vfi++;
+ }
+
+ if (resp_len)
+ *resp_len += response_data_size;
+
+ return reply((struct ib_mad_hdr *)pmp);
+}
+
+static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
+ struct ib_device *ibdev, u8 port, u32 *resp_len)
+{
+ size_t response_data_size;
+ struct _port_ei *rsp;
+ struct opa_port_error_info_msg *req;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ u64 port_mask;
+ u32 num_ports;
+ unsigned long port_num;
+ u8 num_pslm;
+ u64 reg;
+
+ req = (struct opa_port_error_info_msg *)pmp->data;
+ rsp = (struct _port_ei *)&(req->port[0]);
+
+ num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
+ num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
+
+ memset(rsp, 0, sizeof(*rsp));
+
+ if (num_ports != 1 || num_ports != num_pslm) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ /* Sanity check */
+ response_data_size = sizeof(struct opa_port_error_info_msg);
+
+ if (response_data_size > sizeof(pmp->data)) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ /*
+ * The bit set in the mask needs to be consistent with the port
+ * the request came in on.
+ */
+ port_mask = be64_to_cpu(req->port_select_mask[3]);
+ port_num = find_first_bit((unsigned long *)&port_mask,
+ sizeof(port_mask));
+
+ if ((u8)port_num != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ /* PortRcvErrorInfo */
+ rsp->port_rcv_ei.status_and_code =
+ dd->err_info_rcvport.status_and_code;
+ memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit1,
+ &dd->err_info_rcvport.packet_flit1, sizeof(u64));
+ memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit2,
+ &dd->err_info_rcvport.packet_flit2, sizeof(u64));
+
+ /* ExcessiverBufferOverrunInfo */
+ reg = read_csr(dd, RCV_ERR_INFO);
+ if (reg & RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK) {
+ /* if the RcvExcessBufferOverrun bit is set, save SC of
+ * first pkt that encountered an excess buffer overrun */
+ u8 tmp = (u8)reg;
+
+ tmp &= RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SC_SMASK;
+ tmp <<= 2;
+ rsp->excessive_buffer_overrun_ei.status_and_sc = tmp;
+ /* set the status bit */
+ rsp->excessive_buffer_overrun_ei.status_and_sc |= 0x80;
+ }
+
+ rsp->port_xmit_constraint_ei.status =
+ dd->err_info_xmit_constraint.status;
+ rsp->port_xmit_constraint_ei.pkey =
+ cpu_to_be16(dd->err_info_xmit_constraint.pkey);
+ rsp->port_xmit_constraint_ei.slid =
+ cpu_to_be32(dd->err_info_xmit_constraint.slid);
+
+ rsp->port_rcv_constraint_ei.status =
+ dd->err_info_rcv_constraint.status;
+ rsp->port_rcv_constraint_ei.pkey =
+ cpu_to_be16(dd->err_info_rcv_constraint.pkey);
+ rsp->port_rcv_constraint_ei.slid =
+ cpu_to_be32(dd->err_info_rcv_constraint.slid);
+
+ /* UncorrectableErrorInfo */
+ rsp->uncorrectable_ei.status_and_code = dd->err_info_uncorrectable;
+
+ /* FMConfigErrorInfo */
+ rsp->fm_config_ei.status_and_code = dd->err_info_fmconfig;
+
+ if (resp_len)
+ *resp_len += response_data_size;
+
+ return reply((struct ib_mad_hdr *)pmp);
+}
+
+static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
+ struct ib_device *ibdev, u8 port, u32 *resp_len)
+{
+ struct opa_clear_port_status *req =
+ (struct opa_clear_port_status *)pmp->data;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
+ u64 portn = be64_to_cpu(req->port_select_mask[3]);
+ u32 counter_select = be32_to_cpu(req->counter_select_mask);
+ u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
+ unsigned long vl;
+
+ if ((nports != 1) || (portn != 1 << port)) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+ /*
+ * only counters returned by pma_get_opa_portstatus() are
+ * handled, so when pma_get_opa_portstatus() gets a fix,
+ * the corresponding change should be made here as well.
+ */
+
+ if (counter_select & CS_PORT_XMIT_DATA)
+ write_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_PORT_RCV_DATA)
+ write_dev_cntr(dd, C_DC_RCV_FLITS, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_PORT_XMIT_PKTS)
+ write_dev_cntr(dd, C_DC_XMIT_PKTS, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_PORT_RCV_PKTS)
+ write_dev_cntr(dd, C_DC_RCV_PKTS, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_PORT_MCAST_XMIT_PKTS)
+ write_dev_cntr(dd, C_DC_MC_XMIT_PKTS, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_PORT_MCAST_RCV_PKTS)
+ write_dev_cntr(dd, C_DC_MC_RCV_PKTS, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_PORT_XMIT_WAIT)
+ write_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL, 0);
+
+ /* ignore cs_sw_portCongestion for HFIs */
+
+ if (counter_select & CS_PORT_RCV_FECN)
+ write_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_PORT_RCV_BECN)
+ write_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL, 0);
+
+ /* ignore cs_port_xmit_time_cong for HFIs */
+ /* ignore cs_port_xmit_wasted_bw for now */
+ /* ignore cs_port_xmit_wait_data for now */
+ if (counter_select & CS_PORT_RCV_BUBBLE)
+ write_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL, 0);
+
+ /* Only applicable for switch */
+ /*if (counter_select & CS_PORT_MARK_FECN)
+ write_csr(dd, DCC_PRF_PORT_MARK_FECN_CNT, 0);*/
+
+ if (counter_select & CS_PORT_RCV_CONSTRAINT_ERRORS)
+ write_port_cntr(ppd, C_SW_RCV_CSTR_ERR, CNTR_INVALID_VL, 0);
+
+ /* ignore cs_port_rcv_switch_relay_errors for HFIs */
+ if (counter_select & CS_PORT_XMIT_DISCARDS)
+ write_port_cntr(ppd, C_SW_XMIT_DSCD, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_PORT_XMIT_CONSTRAINT_ERRORS)
+ write_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS)
+ write_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_LOCAL_LINK_INTEGRITY_ERRORS) {
+ write_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL, 0);
+ write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
+ }
+
+ if (counter_select & CS_LINK_ERROR_RECOVERY) {
+ write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
+ write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
+ CNTR_INVALID_VL, 0);
+ }
+
+ if (counter_select & CS_PORT_RCV_ERRORS)
+ write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_EXCESSIVE_BUFFER_OVERRUNS) {
+ write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
+ dd->rcv_ovfl_cnt = 0;
+ }
+
+ if (counter_select & CS_FM_CONFIG_ERRORS)
+ write_dev_cntr(dd, C_DC_FM_CFG_ERR, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_LINK_DOWNED)
+ write_port_cntr(ppd, C_SW_LINK_DOWN, CNTR_INVALID_VL, 0);
+
+ if (counter_select & CS_UNCORRECTABLE_ERRORS)
+ write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
+
+ for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
+ 8 * sizeof(vl_select_mask)) {
+
+ if (counter_select & CS_PORT_XMIT_DATA)
+ write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
+
+ if (counter_select & CS_PORT_RCV_DATA)
+ write_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl), 0);
+
+ if (counter_select & CS_PORT_XMIT_PKTS)
+ write_port_cntr(ppd, C_TX_PKT_VL, idx_from_vl(vl), 0);
+
+ if (counter_select & CS_PORT_RCV_PKTS)
+ write_dev_cntr(dd, C_DC_RX_PKT_VL, idx_from_vl(vl), 0);
+
+ if (counter_select & CS_PORT_XMIT_WAIT)
+ write_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl), 0);
+
+ /* sw_port_vl_congestion is 0 for HFIs */
+ if (counter_select & CS_PORT_RCV_FECN)
+ write_dev_cntr(dd, C_DC_RCV_FCN_VL, idx_from_vl(vl), 0);
+
+ if (counter_select & CS_PORT_RCV_BECN)
+ write_dev_cntr(dd, C_DC_RCV_BCN_VL, idx_from_vl(vl), 0);
+
+ /* port_vl_xmit_time_cong is 0 for HFIs */
+ /* port_vl_xmit_wasted_bw ??? */
+ /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ??? */
+ if (counter_select & CS_PORT_RCV_BUBBLE)
+ write_dev_cntr(dd, C_DC_RCV_BBL_VL, idx_from_vl(vl), 0);
+
+ /*if (counter_select & CS_PORT_MARK_FECN)
+ write_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + offset, 0);
+ */
+ /* port_vl_xmit_discards ??? */
+ }
+
+ if (resp_len)
+ *resp_len += sizeof(*req);
+
+ return reply((struct ib_mad_hdr *)pmp);
+}
+
+static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
+ struct ib_device *ibdev, u8 port, u32 *resp_len)
+{
+ struct _port_ei *rsp;
+ struct opa_port_error_info_msg *req;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ u64 port_mask;
+ u32 num_ports;
+ unsigned long port_num;
+ u8 num_pslm;
+ u32 error_info_select;
+
+ req = (struct opa_port_error_info_msg *)pmp->data;
+ rsp = (struct _port_ei *)&(req->port[0]);
+
+ num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
+ num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
+
+ memset(rsp, 0, sizeof(*rsp));
+
+ if (num_ports != 1 || num_ports != num_pslm) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ /*
+ * The bit set in the mask needs to be consistent with the port
+ * the request came in on.
+ */
+ port_mask = be64_to_cpu(req->port_select_mask[3]);
+ port_num = find_first_bit((unsigned long *)&port_mask,
+ sizeof(port_mask));
+
+ if ((u8)port_num != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ error_info_select = be32_to_cpu(req->error_info_select_mask);
+
+ /* PortRcvErrorInfo */
+ if (error_info_select & ES_PORT_RCV_ERROR_INFO)
+ /* turn off status bit */
+ dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
+
+ /* ExcessiverBufferOverrunInfo */
+ if (error_info_select & ES_EXCESSIVE_BUFFER_OVERRUN_INFO)
+ /* status bit is essentially kept in the h/w - bit 5 of
+ * RCV_ERR_INFO */
+ write_csr(dd, RCV_ERR_INFO,
+ RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
+
+ if (error_info_select & ES_PORT_XMIT_CONSTRAINT_ERROR_INFO)
+ dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
+
+ if (error_info_select & ES_PORT_RCV_CONSTRAINT_ERROR_INFO)
+ dd->err_info_rcv_constraint.status &= ~OPA_EI_STATUS_SMASK;
+
+ /* UncorrectableErrorInfo */
+ if (error_info_select & ES_UNCORRECTABLE_ERROR_INFO)
+ /* turn off status bit */
+ dd->err_info_uncorrectable &= ~OPA_EI_STATUS_SMASK;
+
+ /* FMConfigErrorInfo */
+ if (error_info_select & ES_FM_CONFIG_ERROR_INFO)
+ /* turn off status bit */
+ dd->err_info_fmconfig &= ~OPA_EI_STATUS_SMASK;
+
+ if (resp_len)
+ *resp_len += sizeof(*req);
+
+ return reply((struct ib_mad_hdr *)pmp);
+}
+
+struct opa_congestion_info_attr {
+ __be16 congestion_info;
+ u8 control_table_cap; /* Multiple of 64 entry unit CCTs */
+ u8 congestion_log_length;
+} __packed;
+
+static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ struct opa_congestion_info_attr *p =
+ (struct opa_congestion_info_attr *)data;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ p->congestion_info = 0;
+ p->control_table_cap = ppd->cc_max_table_entries;
+ p->congestion_log_length = OPA_CONG_LOG_ELEMS;
+
+ if (resp_len)
+ *resp_len += sizeof(*p);
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
+ u8 *data,
+ struct ib_device *ibdev,
+ u8 port, u32 *resp_len)
+{
+ int i;
+ struct opa_congestion_setting_attr *p =
+ (struct opa_congestion_setting_attr *) data;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct opa_congestion_setting_entry_shadow *entries;
+ struct cc_state *cc_state;
+
+ rcu_read_lock();
+
+ cc_state = get_cc_state(ppd);
+
+ if (cc_state == NULL) {
+ rcu_read_unlock();
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ entries = cc_state->cong_setting.entries;
+ p->port_control = cpu_to_be16(cc_state->cong_setting.port_control);
+ p->control_map = cpu_to_be32(cc_state->cong_setting.control_map);
+ for (i = 0; i < OPA_MAX_SLS; i++) {
+ p->entries[i].ccti_increase = entries[i].ccti_increase;
+ p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
+ p->entries[i].trigger_threshold =
+ entries[i].trigger_threshold;
+ p->entries[i].ccti_min = entries[i].ccti_min;
+ }
+
+ rcu_read_unlock();
+
+ if (resp_len)
+ *resp_len += sizeof(*p);
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ struct opa_congestion_setting_attr *p =
+ (struct opa_congestion_setting_attr *) data;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct opa_congestion_setting_entry_shadow *entries;
+ int i;
+
+ ppd->cc_sl_control_map = be32_to_cpu(p->control_map);
+
+ entries = ppd->congestion_entries;
+ for (i = 0; i < OPA_MAX_SLS; i++) {
+ entries[i].ccti_increase = p->entries[i].ccti_increase;
+ entries[i].ccti_timer = be16_to_cpu(p->entries[i].ccti_timer);
+ entries[i].trigger_threshold =
+ p->entries[i].trigger_threshold;
+ entries[i].ccti_min = p->entries[i].ccti_min;
+ }
+
+ return __subn_get_opa_cong_setting(smp, am, data, ibdev, port,
+ resp_len);
+}
+
+static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
+ u8 *data, struct ib_device *ibdev,
+ u8 port, u32 *resp_len)
+{
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct opa_hfi1_cong_log *cong_log = (struct opa_hfi1_cong_log *)data;
+ s64 ts;
+ int i;
+
+ if (am != 0) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ spin_lock(&ppd->cc_log_lock);
+
+ cong_log->log_type = OPA_CC_LOG_TYPE_HFI;
+ cong_log->congestion_flags = 0;
+ cong_log->threshold_event_counter =
+ cpu_to_be16(ppd->threshold_event_counter);
+ memcpy(cong_log->threshold_cong_event_map,
+ ppd->threshold_cong_event_map,
+ sizeof(cong_log->threshold_cong_event_map));
+ /* keep timestamp in units of 1.024 usec */
+ ts = ktime_to_ns(ktime_get()) / 1024;
+ cong_log->current_time_stamp = cpu_to_be32(ts);
+ for (i = 0; i < OPA_CONG_LOG_ELEMS; i++) {
+ struct opa_hfi1_cong_log_event_internal *cce =
+ &ppd->cc_events[ppd->cc_mad_idx++];
+ if (ppd->cc_mad_idx == OPA_CONG_LOG_ELEMS)
+ ppd->cc_mad_idx = 0;
+ /*
+ * Entries which are older than twice the time
+ * required to wrap the counter are supposed to
+ * be zeroed (CA10-49 IBTA, release 1.2.1, V1).
+ */
+ if ((u64)(ts - cce->timestamp) > (2 * UINT_MAX))
+ continue;
+ memcpy(cong_log->events[i].local_qp_cn_entry, &cce->lqpn, 3);
+ memcpy(cong_log->events[i].remote_qp_number_cn_entry,
+ &cce->rqpn, 3);
+ cong_log->events[i].sl_svc_type_cn_entry =
+ ((cce->sl & 0x1f) << 3) | (cce->svc_type & 0x7);
+ cong_log->events[i].remote_lid_cn_entry =
+ cpu_to_be32(cce->rlid);
+ cong_log->events[i].timestamp_cn_entry =
+ cpu_to_be32(cce->timestamp);
+ }
+
+ /*
+ * Reset threshold_cong_event_map, and threshold_event_counter
+ * to 0 when log is read.
+ */
+ memset(ppd->threshold_cong_event_map, 0x0,
+ sizeof(ppd->threshold_cong_event_map));
+ ppd->threshold_event_counter = 0;
+
+ spin_unlock(&ppd->cc_log_lock);
+
+ if (resp_len)
+ *resp_len += sizeof(struct opa_hfi1_cong_log);
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ struct ib_cc_table_attr *cc_table_attr =
+ (struct ib_cc_table_attr *) data;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 start_block = OPA_AM_START_BLK(am);
+ u32 n_blocks = OPA_AM_NBLK(am);
+ struct ib_cc_table_entry_shadow *entries;
+ int i, j;
+ u32 sentry, eentry;
+ struct cc_state *cc_state;
+
+ /* sanity check n_blocks, start_block */
+ if (n_blocks == 0 ||
+ start_block + n_blocks > ppd->cc_max_table_entries) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ rcu_read_lock();
+
+ cc_state = get_cc_state(ppd);
+
+ if (cc_state == NULL) {
+ rcu_read_unlock();
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ sentry = start_block * IB_CCT_ENTRIES;
+ eentry = sentry + (IB_CCT_ENTRIES * n_blocks);
+
+ cc_table_attr->ccti_limit = cpu_to_be16(cc_state->cct.ccti_limit);
+
+ entries = cc_state->cct.entries;
+
+ /* return n_blocks, though the last block may not be full */
+ for (j = 0, i = sentry; i < eentry; j++, i++)
+ cc_table_attr->ccti_entries[j].entry =
+ cpu_to_be16(entries[i].entry);
+
+ rcu_read_unlock();
+
+ if (resp_len)
+ *resp_len += sizeof(u16)*(IB_CCT_ENTRIES * n_blocks + 1);
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+void cc_state_reclaim(struct rcu_head *rcu)
+{
+ struct cc_state *cc_state = container_of(rcu, struct cc_state, rcu);
+
+ kfree(cc_state);
+}
+
+static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ struct ib_cc_table_attr *p = (struct ib_cc_table_attr *) data;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 start_block = OPA_AM_START_BLK(am);
+ u32 n_blocks = OPA_AM_NBLK(am);
+ struct ib_cc_table_entry_shadow *entries;
+ int i, j;
+ u32 sentry, eentry;
+ u16 ccti_limit;
+ struct cc_state *old_cc_state, *new_cc_state;
+
+ /* sanity check n_blocks, start_block */
+ if (n_blocks == 0 ||
+ start_block + n_blocks > ppd->cc_max_table_entries) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ sentry = start_block * IB_CCT_ENTRIES;
+ eentry = sentry + ((n_blocks - 1) * IB_CCT_ENTRIES) +
+ (be16_to_cpu(p->ccti_limit)) % IB_CCT_ENTRIES + 1;
+
+ /* sanity check ccti_limit */
+ ccti_limit = be16_to_cpu(p->ccti_limit);
+ if (ccti_limit + 1 > eentry) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
+ if (new_cc_state == NULL)
+ goto getit;
+
+ spin_lock(&ppd->cc_state_lock);
+
+ old_cc_state = get_cc_state(ppd);
+
+ if (old_cc_state == NULL) {
+ spin_unlock(&ppd->cc_state_lock);
+ kfree(new_cc_state);
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ *new_cc_state = *old_cc_state;
+
+ new_cc_state->cct.ccti_limit = ccti_limit;
+
+ entries = ppd->ccti_entries;
+ ppd->total_cct_entry = ccti_limit + 1;
+
+ for (j = 0, i = sentry; i < eentry; j++, i++)
+ entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry);
+
+ memcpy(new_cc_state->cct.entries, entries,
+ eentry * sizeof(struct ib_cc_table_entry));
+
+ new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
+ new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
+ memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
+ OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
+
+ rcu_assign_pointer(ppd->cc_state, new_cc_state);
+
+ spin_unlock(&ppd->cc_state_lock);
+
+ call_rcu(&old_cc_state->rcu, cc_state_reclaim);
+
+getit:
+ return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len);
+}
+
+struct opa_led_info {
+ __be32 rsvd_led_mask;
+ __be32 rsvd;
+};
+
+#define OPA_LED_SHIFT 31
+#define OPA_LED_MASK (1 << OPA_LED_SHIFT)
+
+static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct opa_led_info *p = (struct opa_led_info *) data;
+ u32 nport = OPA_AM_NPORT(am);
+ u64 reg;
+
+ if (nport != 1 || OPA_AM_PORTNUM(am)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ reg = read_csr(dd, DCC_CFG_LED_CNTRL);
+ if ((reg & DCC_CFG_LED_CNTRL_LED_CNTRL_SMASK) &&
+ ((reg & DCC_CFG_LED_CNTRL_LED_SW_BLINK_RATE_SMASK) == 0xf))
+ p->rsvd_led_mask = cpu_to_be32(OPA_LED_MASK);
+
+ if (resp_len)
+ *resp_len += sizeof(struct opa_led_info);
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct opa_led_info *p = (struct opa_led_info *) data;
+ u32 nport = OPA_AM_NPORT(am);
+ int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK);
+
+ if (nport != 1 || OPA_AM_PORTNUM(am)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ setextled(dd, on);
+
+ return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len);
+}
+
+static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
+ u8 *data, struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ int ret;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+
+ switch (attr_id) {
+ case IB_SMP_ATTR_NODE_DESC:
+ ret = __subn_get_opa_nodedesc(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case IB_SMP_ATTR_NODE_INFO:
+ ret = __subn_get_opa_nodeinfo(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case IB_SMP_ATTR_PORT_INFO:
+ ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case IB_SMP_ATTR_PKEY_TABLE:
+ ret = __subn_get_opa_pkeytable(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case OPA_ATTRIB_ID_SL_TO_SC_MAP:
+ ret = __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case OPA_ATTRIB_ID_SC_TO_SL_MAP:
+ ret = __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
+ ret = __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
+ ret = __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case OPA_ATTRIB_ID_PORT_STATE_INFO:
+ ret = __subn_get_opa_psi(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
+ ret = __subn_get_opa_bct(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case OPA_ATTRIB_ID_CABLE_INFO:
+ ret = __subn_get_opa_cable_info(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case IB_SMP_ATTR_VL_ARB_TABLE:
+ ret = __subn_get_opa_vl_arb(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case OPA_ATTRIB_ID_CONGESTION_INFO:
+ ret = __subn_get_opa_cong_info(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
+ ret = __subn_get_opa_cong_setting(smp, am, data, ibdev,
+ port, resp_len);
+ break;
+ case OPA_ATTRIB_ID_HFI_CONGESTION_LOG:
+ ret = __subn_get_opa_hfi1_cong_log(smp, am, data, ibdev,
+ port, resp_len);
+ break;
+ case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
+ ret = __subn_get_opa_cc_table(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case IB_SMP_ATTR_LED_INFO:
+ ret = __subn_get_opa_led_info(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case IB_SMP_ATTR_SM_INFO:
+ if (ibp->port_cap_flags & IB_PORT_SM_DISABLED)
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+ if (ibp->port_cap_flags & IB_PORT_SM)
+ return IB_MAD_RESULT_SUCCESS;
+ /* FALLTHROUGH */
+ default:
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_mad_hdr *)smp);
+ break;
+ }
+ return ret;
+}
+
+static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
+ u8 *data, struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ int ret;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+
+ switch (attr_id) {
+ case IB_SMP_ATTR_PORT_INFO:
+ ret = __subn_set_opa_portinfo(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case IB_SMP_ATTR_PKEY_TABLE:
+ ret = __subn_set_opa_pkeytable(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case OPA_ATTRIB_ID_SL_TO_SC_MAP:
+ ret = __subn_set_opa_sl_to_sc(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case OPA_ATTRIB_ID_SC_TO_SL_MAP:
+ ret = __subn_set_opa_sc_to_sl(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
+ ret = __subn_set_opa_sc_to_vlt(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
+ ret = __subn_set_opa_sc_to_vlnt(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case OPA_ATTRIB_ID_PORT_STATE_INFO:
+ ret = __subn_set_opa_psi(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
+ ret = __subn_set_opa_bct(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case IB_SMP_ATTR_VL_ARB_TABLE:
+ ret = __subn_set_opa_vl_arb(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
+ ret = __subn_set_opa_cong_setting(smp, am, data, ibdev,
+ port, resp_len);
+ break;
+ case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
+ ret = __subn_set_opa_cc_table(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case IB_SMP_ATTR_LED_INFO:
+ ret = __subn_set_opa_led_info(smp, am, data, ibdev, port,
+ resp_len);
+ break;
+ case IB_SMP_ATTR_SM_INFO:
+ if (ibp->port_cap_flags & IB_PORT_SM_DISABLED)
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+ if (ibp->port_cap_flags & IB_PORT_SM)
+ return IB_MAD_RESULT_SUCCESS;
+ /* FALLTHROUGH */
+ default:
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_mad_hdr *)smp);
+ break;
+ }
+ return ret;
+}
+
+static inline void set_aggr_error(struct opa_aggregate *ag)
+{
+ ag->err_reqlength |= cpu_to_be16(0x8000);
+}
+
+static int subn_get_opa_aggregate(struct opa_smp *smp,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ int i;
+ u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
+ u8 *next_smp = opa_get_smp_data(smp);
+
+ if (num_attr < 1 || num_attr > 117) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ for (i = 0; i < num_attr; i++) {
+ struct opa_aggregate *agg;
+ size_t agg_data_len;
+ size_t agg_size;
+ u32 am;
+
+ agg = (struct opa_aggregate *)next_smp;
+ agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
+ agg_size = sizeof(*agg) + agg_data_len;
+ am = be32_to_cpu(agg->attr_mod);
+
+ *resp_len += agg_size;
+
+ if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ /* zero the payload for this segment */
+ memset(next_smp + sizeof(*agg), 0, agg_data_len);
+
+ (void) subn_get_opa_sma(agg->attr_id, smp, am, agg->data,
+ ibdev, port, NULL);
+ if (smp->status & ~IB_SMP_DIRECTION) {
+ set_aggr_error(agg);
+ return reply((struct ib_mad_hdr *)smp);
+ }
+ next_smp += agg_size;
+
+ }
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+static int subn_set_opa_aggregate(struct opa_smp *smp,
+ struct ib_device *ibdev, u8 port,
+ u32 *resp_len)
+{
+ int i;
+ u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
+ u8 *next_smp = opa_get_smp_data(smp);
+
+ if (num_attr < 1 || num_attr > 117) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ for (i = 0; i < num_attr; i++) {
+ struct opa_aggregate *agg;
+ size_t agg_data_len;
+ size_t agg_size;
+ u32 am;
+
+ agg = (struct opa_aggregate *)next_smp;
+ agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
+ agg_size = sizeof(*agg) + agg_data_len;
+ am = be32_to_cpu(agg->attr_mod);
+
+ *resp_len += agg_size;
+
+ if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
+ (void) subn_set_opa_sma(agg->attr_id, smp, am, agg->data,
+ ibdev, port, NULL);
+ if (smp->status & ~IB_SMP_DIRECTION) {
+ set_aggr_error(agg);
+ return reply((struct ib_mad_hdr *)smp);
+ }
+ next_smp += agg_size;
+
+ }
+
+ return reply((struct ib_mad_hdr *)smp);
+}
+
+/*
+ * OPAv1 specifies that, on the transition to link up, these counters
+ * are cleared:
+ * PortRcvErrors [*]
+ * LinkErrorRecovery
+ * LocalLinkIntegrityErrors
+ * ExcessiveBufferOverruns [*]
+ *
+ * [*] Error info associated with these counters is retained, but the
+ * error info status is reset to 0.
+ */
+void clear_linkup_counters(struct hfi1_devdata *dd)
+{
+ /* PortRcvErrors */
+ write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
+ dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
+ /* LinkErrorRecovery */
+ write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
+ write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL, 0);
+ /* LocalLinkIntegrityErrors */
+ write_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL, 0);
+ write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
+ /* ExcessiveBufferOverruns */
+ write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
+ dd->rcv_ovfl_cnt = 0;
+ dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
+}
+
+/*
+ * is_local_mad() returns 1 if 'mad' is sent from, and destined to the
+ * local node, 0 otherwise.
+ */
+static int is_local_mad(struct hfi1_ibport *ibp, const struct opa_mad *mad,
+ const struct ib_wc *in_wc)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ const struct opa_smp *smp = (const struct opa_smp *)mad;
+
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ return (smp->hop_cnt == 0 &&
+ smp->route.dr.dr_slid == OPA_LID_PERMISSIVE &&
+ smp->route.dr.dr_dlid == OPA_LID_PERMISSIVE);
+ }
+
+ return (in_wc->slid == ppd->lid);
+}
+
+/*
+ * opa_local_smp_check() should only be called on MADs for which
+ * is_local_mad() returns true. It applies the SMP checks that are
+ * specific to SMPs which are sent from, and destined to this node.
+ * opa_local_smp_check() returns 0 if the SMP passes its checks, 1
+ * otherwise.
+ *
+ * SMPs which arrive from other nodes are instead checked by
+ * opa_smp_check().
+ */
+static int opa_local_smp_check(struct hfi1_ibport *ibp,
+ const struct ib_wc *in_wc)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u16 slid = in_wc->slid;
+ u16 pkey;
+
+ if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
+ return 1;
+
+ pkey = ppd->pkeys[in_wc->pkey_index];
+ /*
+ * We need to do the "node-local" checks specified in OPAv1,
+ * rev 0.90, section 9.10.26, which are:
+ * - pkey is 0x7fff, or 0xffff
+ * - Source QPN == 0 || Destination QPN == 0
+ * - the MAD header's management class is either
+ * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE or
+ * IB_MGMT_CLASS_SUBN_LID_ROUTED
+ * - SLID != 0
+ *
+ * However, we know (and so don't need to check again) that,
+ * for local SMPs, the MAD stack passes MADs with:
+ * - Source QPN of 0
+ * - MAD mgmt_class is IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
+ * - SLID is either: OPA_LID_PERMISSIVE (0xFFFFFFFF), or
+ * our own port's lid
+ *
+ */
+ if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
+ return 0;
+ ingress_pkey_table_fail(ppd, pkey, slid);
+ return 1;
+}
+
+static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
+ u8 port, const struct opa_mad *in_mad,
+ struct opa_mad *out_mad,
+ u32 *resp_len)
+{
+ struct opa_smp *smp = (struct opa_smp *)out_mad;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ u8 *data;
+ u32 am;
+ __be16 attr_id;
+ int ret;
+
+ *out_mad = *in_mad;
+ data = opa_get_smp_data(smp);
+
+ am = be32_to_cpu(smp->attr_mod);
+ attr_id = smp->attr_id;
+ if (smp->class_version != OPA_SMI_CLASS_VERSION) {
+ smp->status |= IB_SMP_UNSUP_VERSION;
+ ret = reply((struct ib_mad_hdr *)smp);
+ goto bail;
+ }
+ ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags, smp->mkey,
+ smp->route.dr.dr_slid, smp->route.dr.return_path,
+ smp->hop_cnt);
+ if (ret) {
+ u32 port_num = be32_to_cpu(smp->attr_mod);
+
+ /*
+ * If this is a get/set portinfo, we already check the
+ * M_Key if the MAD is for another port and the M_Key
+ * is OK on the receiving port. This check is needed
+ * to increment the error counters when the M_Key
+ * fails to match on *both* ports.
+ */
+ if (attr_id == IB_SMP_ATTR_PORT_INFO &&
+ (smp->method == IB_MGMT_METHOD_GET ||
+ smp->method == IB_MGMT_METHOD_SET) &&
+ port_num && port_num <= ibdev->phys_port_cnt &&
+ port != port_num)
+ (void) check_mkey(to_iport(ibdev, port_num),
+ (struct ib_mad_hdr *)smp, 0,
+ smp->mkey, smp->route.dr.dr_slid,
+ smp->route.dr.return_path,
+ smp->hop_cnt);
+ ret = IB_MAD_RESULT_FAILURE;
+ goto bail;
+ }
+
+ *resp_len = opa_get_smp_header_size(smp);
+
+ switch (smp->method) {
+ case IB_MGMT_METHOD_GET:
+ switch (attr_id) {
+ default:
+ clear_opa_smp_data(smp);
+ ret = subn_get_opa_sma(attr_id, smp, am, data,
+ ibdev, port, resp_len);
+ goto bail;
+ case OPA_ATTRIB_ID_AGGREGATE:
+ ret = subn_get_opa_aggregate(smp, ibdev, port,
+ resp_len);
+ goto bail;
+ }
+ case IB_MGMT_METHOD_SET:
+ switch (attr_id) {
+ default:
+ ret = subn_set_opa_sma(attr_id, smp, am, data,
+ ibdev, port, resp_len);
+ goto bail;
+ case OPA_ATTRIB_ID_AGGREGATE:
+ ret = subn_set_opa_aggregate(smp, ibdev, port,
+ resp_len);
+ goto bail;
+ }
+ case IB_MGMT_METHOD_TRAP:
+ case IB_MGMT_METHOD_REPORT:
+ case IB_MGMT_METHOD_REPORT_RESP:
+ case IB_MGMT_METHOD_GET_RESP:
+ /*
+ * The ib_mad module will call us to process responses
+ * before checking for other consumers.
+ * Just tell the caller to process it normally.
+ */
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+ default:
+ smp->status |= IB_SMP_UNSUP_METHOD;
+ ret = reply((struct ib_mad_hdr *)smp);
+ }
+
+bail:
+ return ret;
+}
+
+static int process_subn(struct ib_device *ibdev, int mad_flags,
+ u8 port, const struct ib_mad *in_mad,
+ struct ib_mad *out_mad)
+{
+ struct ib_smp *smp = (struct ib_smp *)out_mad;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ int ret;
+
+ *out_mad = *in_mad;
+ if (smp->class_version != 1) {
+ smp->status |= IB_SMP_UNSUP_VERSION;
+ ret = reply((struct ib_mad_hdr *)smp);
+ goto bail;
+ }
+
+ ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags,
+ smp->mkey, (__force __be32)smp->dr_slid,
+ smp->return_path, smp->hop_cnt);
+ if (ret) {
+ u32 port_num = be32_to_cpu(smp->attr_mod);
+
+ /*
+ * If this is a get/set portinfo, we already check the
+ * M_Key if the MAD is for another port and the M_Key
+ * is OK on the receiving port. This check is needed
+ * to increment the error counters when the M_Key
+ * fails to match on *both* ports.
+ */
+ if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
+ (smp->method == IB_MGMT_METHOD_GET ||
+ smp->method == IB_MGMT_METHOD_SET) &&
+ port_num && port_num <= ibdev->phys_port_cnt &&
+ port != port_num)
+ (void) check_mkey(to_iport(ibdev, port_num),
+ (struct ib_mad_hdr *)smp, 0,
+ smp->mkey,
+ (__force __be32)smp->dr_slid,
+ smp->return_path, smp->hop_cnt);
+ ret = IB_MAD_RESULT_FAILURE;
+ goto bail;
+ }
+
+ switch (smp->method) {
+ case IB_MGMT_METHOD_GET:
+ switch (smp->attr_id) {
+ case IB_SMP_ATTR_NODE_INFO:
+ ret = subn_get_nodeinfo(smp, ibdev, port);
+ goto bail;
+ default:
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_mad_hdr *)smp);
+ goto bail;
+ }
+ }
+
+bail:
+ return ret;
+}
+
+static int process_perf_opa(struct ib_device *ibdev, u8 port,
+ const struct opa_mad *in_mad,
+ struct opa_mad *out_mad, u32 *resp_len)
+{
+ struct opa_pma_mad *pmp = (struct opa_pma_mad *)out_mad;
+ int ret;
+
+ *out_mad = *in_mad;
+
+ if (pmp->mad_hdr.class_version != OPA_SMI_CLASS_VERSION) {
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
+ return reply((struct ib_mad_hdr *)pmp);
+ }
+
+ *resp_len = sizeof(pmp->mad_hdr);
+
+ switch (pmp->mad_hdr.method) {
+ case IB_MGMT_METHOD_GET:
+ switch (pmp->mad_hdr.attr_id) {
+ case IB_PMA_CLASS_PORT_INFO:
+ ret = pma_get_opa_classportinfo(pmp, ibdev, resp_len);
+ goto bail;
+ case OPA_PM_ATTRIB_ID_PORT_STATUS:
+ ret = pma_get_opa_portstatus(pmp, ibdev, port,
+ resp_len);
+ goto bail;
+ case OPA_PM_ATTRIB_ID_DATA_PORT_COUNTERS:
+ ret = pma_get_opa_datacounters(pmp, ibdev, port,
+ resp_len);
+ goto bail;
+ case OPA_PM_ATTRIB_ID_ERROR_PORT_COUNTERS:
+ ret = pma_get_opa_porterrors(pmp, ibdev, port,
+ resp_len);
+ goto bail;
+ case OPA_PM_ATTRIB_ID_ERROR_INFO:
+ ret = pma_get_opa_errorinfo(pmp, ibdev, port,
+ resp_len);
+ goto bail;
+ default:
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_mad_hdr *)pmp);
+ goto bail;
+ }
+
+ case IB_MGMT_METHOD_SET:
+ switch (pmp->mad_hdr.attr_id) {
+ case OPA_PM_ATTRIB_ID_CLEAR_PORT_STATUS:
+ ret = pma_set_opa_portstatus(pmp, ibdev, port,
+ resp_len);
+ goto bail;
+ case OPA_PM_ATTRIB_ID_ERROR_INFO:
+ ret = pma_set_opa_errorinfo(pmp, ibdev, port,
+ resp_len);
+ goto bail;
+ default:
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_mad_hdr *)pmp);
+ goto bail;
+ }
+
+ case IB_MGMT_METHOD_TRAP:
+ case IB_MGMT_METHOD_GET_RESP:
+ /*
+ * The ib_mad module will call us to process responses
+ * before checking for other consumers.
+ * Just tell the caller to process it normally.
+ */
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+
+ default:
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
+ ret = reply((struct ib_mad_hdr *)pmp);
+ }
+
+bail:
+ return ret;
+}
+
+static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
+ u8 port, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct opa_mad *in_mad,
+ struct opa_mad *out_mad, size_t *out_mad_size,
+ u16 *out_mad_pkey_index)
+{
+ int ret;
+ int pkey_idx;
+ u32 resp_len = 0;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+
+ pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
+ if (pkey_idx < 0) {
+ pr_warn("failed to find limited mgmt pkey, defaulting 0x%x\n",
+ hfi1_get_pkey(ibp, 1));
+ pkey_idx = 1;
+ }
+ *out_mad_pkey_index = (u16)pkey_idx;
+
+ switch (in_mad->mad_hdr.mgmt_class) {
+ case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+ case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+ if (is_local_mad(ibp, in_mad, in_wc)) {
+ ret = opa_local_smp_check(ibp, in_wc);
+ if (ret)
+ return IB_MAD_RESULT_FAILURE;
+ }
+ ret = process_subn_opa(ibdev, mad_flags, port, in_mad,
+ out_mad, &resp_len);
+ goto bail;
+ case IB_MGMT_CLASS_PERF_MGMT:
+ ret = process_perf_opa(ibdev, port, in_mad, out_mad,
+ &resp_len);
+ goto bail;
+
+ default:
+ ret = IB_MAD_RESULT_SUCCESS;
+ }
+
+bail:
+ if (ret & IB_MAD_RESULT_REPLY)
+ *out_mad_size = round_up(resp_len, 8);
+ else if (ret & IB_MAD_RESULT_SUCCESS)
+ *out_mad_size = in_wc->byte_len - sizeof(struct ib_grh);
+
+ return ret;
+}
+
+static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
+ const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad *in_mad,
+ struct ib_mad *out_mad)
+{
+ int ret;
+
+ switch (in_mad->mad_hdr.mgmt_class) {
+ case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+ case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+ ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
+ goto bail;
+ default:
+ ret = IB_MAD_RESULT_SUCCESS;
+ }
+
+bail:
+ return ret;
+}
+
+/**
+ * hfi1_process_mad - process an incoming MAD packet
+ * @ibdev: the infiniband device this packet came in on
+ * @mad_flags: MAD flags
+ * @port: the port number this packet came in on
+ * @in_wc: the work completion entry for this packet
+ * @in_grh: the global route header for this packet
+ * @in_mad: the incoming MAD
+ * @out_mad: any outgoing MAD reply
+ *
+ * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
+ * interested in processing.
+ *
+ * Note that the verbs framework has already done the MAD sanity checks,
+ * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
+ * MADs.
+ *
+ * This is called by the ib_mad module.
+ */
+int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *in_mad, size_t in_mad_size,
+ struct ib_mad_hdr *out_mad, size_t *out_mad_size,
+ u16 *out_mad_pkey_index)
+{
+ switch (in_mad->base_version) {
+ case OPA_MGMT_BASE_VERSION:
+ if (unlikely(in_mad_size != sizeof(struct opa_mad))) {
+ dev_err(ibdev->dma_device, "invalid in_mad_size\n");
+ return IB_MAD_RESULT_FAILURE;
+ }
+ return hfi1_process_opa_mad(ibdev, mad_flags, port,
+ in_wc, in_grh,
+ (struct opa_mad *)in_mad,
+ (struct opa_mad *)out_mad,
+ out_mad_size,
+ out_mad_pkey_index);
+ case IB_MGMT_BASE_VERSION:
+ return hfi1_process_ib_mad(ibdev, mad_flags, port,
+ in_wc, in_grh,
+ (const struct ib_mad *)in_mad,
+ (struct ib_mad *)out_mad);
+ default:
+ break;
+ }
+
+ return IB_MAD_RESULT_FAILURE;
+}
+
+static void send_handler(struct ib_mad_agent *agent,
+ struct ib_mad_send_wc *mad_send_wc)
+{
+ ib_free_send_mad(mad_send_wc->send_buf);
+}
+
+int hfi1_create_agents(struct hfi1_ibdev *dev)
+{
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+ struct ib_mad_agent *agent;
+ struct hfi1_ibport *ibp;
+ int p;
+ int ret;
+
+ for (p = 0; p < dd->num_pports; p++) {
+ ibp = &dd->pport[p].ibport_data;
+ agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
+ NULL, 0, send_handler,
+ NULL, NULL, 0);
+ if (IS_ERR(agent)) {
+ ret = PTR_ERR(agent);
+ goto err;
+ }
+
+ ibp->send_agent = agent;
+ }
+
+ return 0;
+
+err:
+ for (p = 0; p < dd->num_pports; p++) {
+ ibp = &dd->pport[p].ibport_data;
+ if (ibp->send_agent) {
+ agent = ibp->send_agent;
+ ibp->send_agent = NULL;
+ ib_unregister_mad_agent(agent);
+ }
+ }
+
+ return ret;
+}
+
+void hfi1_free_agents(struct hfi1_ibdev *dev)
+{
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+ struct ib_mad_agent *agent;
+ struct hfi1_ibport *ibp;
+ int p;
+
+ for (p = 0; p < dd->num_pports; p++) {
+ ibp = &dd->pport[p].ibport_data;
+ if (ibp->send_agent) {
+ agent = ibp->send_agent;
+ ibp->send_agent = NULL;
+ ib_unregister_mad_agent(agent);
+ }
+ if (ibp->sm_ah) {
+ ib_destroy_ah(&ibp->sm_ah->ibah);
+ ibp->sm_ah = NULL;
+ }
+ }
+}
diff --git a/drivers/staging/rdma/hfi1/mad.h b/drivers/staging/rdma/hfi1/mad.h
new file mode 100644
index 000000000000..47457501c044
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/mad.h
@@ -0,0 +1,325 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef _HFI1_MAD_H
+#define _HFI1_MAD_H
+
+#include <rdma/ib_pma.h>
+#define USE_PI_LED_ENABLE 1 /* use led enabled bit in struct
+ * opa_port_states, if available */
+#include <rdma/opa_smi.h>
+#include <rdma/opa_port_info.h>
+#ifndef PI_LED_ENABLE_SUP
+#define PI_LED_ENABLE_SUP 0
+#endif
+#include "opa_compat.h"
+
+
+
+#define IB_VLARB_LOWPRI_0_31 1
+#define IB_VLARB_LOWPRI_32_63 2
+#define IB_VLARB_HIGHPRI_0_31 3
+#define IB_VLARB_HIGHPRI_32_63 4
+
+#define OPA_MAX_PREEMPT_CAP 32
+#define OPA_VLARB_LOW_ELEMENTS 0
+#define OPA_VLARB_HIGH_ELEMENTS 1
+#define OPA_VLARB_PREEMPT_ELEMENTS 2
+#define OPA_VLARB_PREEMPT_MATRIX 3
+
+#define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00)
+
+struct ib_pma_portcounters_cong {
+ u8 reserved;
+ u8 reserved1;
+ __be16 port_check_rate;
+ __be16 symbol_error_counter;
+ u8 link_error_recovery_counter;
+ u8 link_downed_counter;
+ __be16 port_rcv_errors;
+ __be16 port_rcv_remphys_errors;
+ __be16 port_rcv_switch_relay_errors;
+ __be16 port_xmit_discards;
+ u8 port_xmit_constraint_errors;
+ u8 port_rcv_constraint_errors;
+ u8 reserved2;
+ u8 link_overrun_errors; /* LocalLink: 7:4, BufferOverrun: 3:0 */
+ __be16 reserved3;
+ __be16 vl15_dropped;
+ __be64 port_xmit_data;
+ __be64 port_rcv_data;
+ __be64 port_xmit_packets;
+ __be64 port_rcv_packets;
+ __be64 port_xmit_wait;
+ __be64 port_adr_events;
+} __packed;
+
+#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
+#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
+#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
+#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
+
+#define OPA_MAX_PREEMPT_CAP 32
+#define OPA_VLARB_LOW_ELEMENTS 0
+#define OPA_VLARB_HIGH_ELEMENTS 1
+#define OPA_VLARB_PREEMPT_ELEMENTS 2
+#define OPA_VLARB_PREEMPT_MATRIX 3
+
+#define HFI1_XMIT_RATE_UNSUPPORTED 0x0
+#define HFI1_XMIT_RATE_PICO 0x7
+/* number of 4nsec cycles equaling 2secs */
+#define HFI1_CONG_TIMER_PSINTERVAL 0x1DCD64EC
+
+#define IB_CC_SVCTYPE_RC 0x0
+#define IB_CC_SVCTYPE_UC 0x1
+#define IB_CC_SVCTYPE_RD 0x2
+#define IB_CC_SVCTYPE_UD 0x3
+
+
+/*
+ * There should be an equivalent IB #define for the following, but
+ * I cannot find it.
+ */
+#define OPA_CC_LOG_TYPE_HFI 2
+
+struct opa_hfi1_cong_log_event_internal {
+ u32 lqpn;
+ u32 rqpn;
+ u8 sl;
+ u8 svc_type;
+ u32 rlid;
+ s64 timestamp; /* wider than 32 bits to detect 32 bit rollover */
+};
+
+struct opa_hfi1_cong_log_event {
+ u8 local_qp_cn_entry[3];
+ u8 remote_qp_number_cn_entry[3];
+ u8 sl_svc_type_cn_entry; /* 5 bits SL, 3 bits svc type */
+ u8 reserved;
+ __be32 remote_lid_cn_entry;
+ __be32 timestamp_cn_entry;
+} __packed;
+
+#define OPA_CONG_LOG_ELEMS 96
+
+struct opa_hfi1_cong_log {
+ u8 log_type;
+ u8 congestion_flags;
+ __be16 threshold_event_counter;
+ __be32 current_time_stamp;
+ u8 threshold_cong_event_map[OPA_MAX_SLS/8];
+ struct opa_hfi1_cong_log_event events[OPA_CONG_LOG_ELEMS];
+} __packed;
+
+#define IB_CC_TABLE_CAP_DEFAULT 31
+
+/* Port control flags */
+#define IB_CC_CCS_PC_SL_BASED 0x01
+
+struct opa_congestion_setting_entry {
+ u8 ccti_increase;
+ u8 reserved;
+ __be16 ccti_timer;
+ u8 trigger_threshold;
+ u8 ccti_min; /* min CCTI for cc table */
+} __packed;
+
+struct opa_congestion_setting_entry_shadow {
+ u8 ccti_increase;
+ u8 reserved;
+ u16 ccti_timer;
+ u8 trigger_threshold;
+ u8 ccti_min; /* min CCTI for cc table */
+} __packed;
+
+struct opa_congestion_setting_attr {
+ __be32 control_map;
+ __be16 port_control;
+ struct opa_congestion_setting_entry entries[OPA_MAX_SLS];
+} __packed;
+
+struct opa_congestion_setting_attr_shadow {
+ u32 control_map;
+ u16 port_control;
+ struct opa_congestion_setting_entry_shadow entries[OPA_MAX_SLS];
+} __packed;
+
+#define IB_CC_TABLE_ENTRY_INCREASE_DEFAULT 1
+#define IB_CC_TABLE_ENTRY_TIMER_DEFAULT 1
+
+/* 64 Congestion Control table entries in a single MAD */
+#define IB_CCT_ENTRIES 64
+#define IB_CCT_MIN_ENTRIES (IB_CCT_ENTRIES * 2)
+
+struct ib_cc_table_entry {
+ __be16 entry; /* shift:2, multiplier:14 */
+};
+
+struct ib_cc_table_entry_shadow {
+ u16 entry; /* shift:2, multiplier:14 */
+};
+
+struct ib_cc_table_attr {
+ __be16 ccti_limit; /* max CCTI for cc table */
+ struct ib_cc_table_entry ccti_entries[IB_CCT_ENTRIES];
+} __packed;
+
+struct ib_cc_table_attr_shadow {
+ u16 ccti_limit; /* max CCTI for cc table */
+ struct ib_cc_table_entry_shadow ccti_entries[IB_CCT_ENTRIES];
+} __packed;
+
+#define CC_TABLE_SHADOW_MAX \
+ (IB_CC_TABLE_CAP_DEFAULT * IB_CCT_ENTRIES)
+
+struct cc_table_shadow {
+ u16 ccti_limit; /* max CCTI for cc table */
+ struct ib_cc_table_entry_shadow entries[CC_TABLE_SHADOW_MAX];
+} __packed;
+
+/*
+ * struct cc_state combines the (active) per-port congestion control
+ * table, and the (active) per-SL congestion settings. cc_state data
+ * may need to be read in code paths that we want to be fast, so it
+ * is an RCU protected structure.
+ */
+struct cc_state {
+ struct rcu_head rcu;
+ struct cc_table_shadow cct;
+ struct opa_congestion_setting_attr_shadow cong_setting;
+};
+
+/*
+ * OPA BufferControl MAD
+ */
+
+/* attribute modifier macros */
+#define OPA_AM_NPORT_SHIFT 24
+#define OPA_AM_NPORT_MASK 0xff
+#define OPA_AM_NPORT_SMASK (OPA_AM_NPORT_MASK << OPA_AM_NPORT_SHIFT)
+#define OPA_AM_NPORT(am) (((am) >> OPA_AM_NPORT_SHIFT) & \
+ OPA_AM_NPORT_MASK)
+
+#define OPA_AM_NBLK_SHIFT 24
+#define OPA_AM_NBLK_MASK 0xff
+#define OPA_AM_NBLK_SMASK (OPA_AM_NBLK_MASK << OPA_AM_NBLK_SHIFT)
+#define OPA_AM_NBLK(am) (((am) >> OPA_AM_NBLK_SHIFT) & \
+ OPA_AM_NBLK_MASK)
+
+#define OPA_AM_START_BLK_SHIFT 0
+#define OPA_AM_START_BLK_MASK 0xff
+#define OPA_AM_START_BLK_SMASK (OPA_AM_START_BLK_MASK << \
+ OPA_AM_START_BLK_SHIFT)
+#define OPA_AM_START_BLK(am) (((am) >> OPA_AM_START_BLK_SHIFT) & \
+ OPA_AM_START_BLK_MASK)
+
+#define OPA_AM_PORTNUM_SHIFT 0
+#define OPA_AM_PORTNUM_MASK 0xff
+#define OPA_AM_PORTNUM_SMASK (OPA_AM_PORTNUM_MASK << OPA_AM_PORTNUM_SHIFT)
+#define OPA_AM_PORTNUM(am) (((am) >> OPA_AM_PORTNUM_SHIFT) & \
+ OPA_AM_PORTNUM_MASK)
+
+#define OPA_AM_ASYNC_SHIFT 12
+#define OPA_AM_ASYNC_MASK 0x1
+#define OPA_AM_ASYNC_SMASK (OPA_AM_ASYNC_MASK << OPA_AM_ASYNC_SHIFT)
+#define OPA_AM_ASYNC(am) (((am) >> OPA_AM_ASYNC_SHIFT) & \
+ OPA_AM_ASYNC_MASK)
+
+#define OPA_AM_START_SM_CFG_SHIFT 9
+#define OPA_AM_START_SM_CFG_MASK 0x1
+#define OPA_AM_START_SM_CFG_SMASK (OPA_AM_START_SM_CFG_MASK << \
+ OPA_AM_START_SM_CFG_SHIFT)
+#define OPA_AM_START_SM_CFG(am) (((am) >> OPA_AM_START_SM_CFG_SHIFT) \
+ & OPA_AM_START_SM_CFG_MASK)
+
+#define OPA_AM_CI_ADDR_SHIFT 19
+#define OPA_AM_CI_ADDR_MASK 0xfff
+#define OPA_AM_CI_ADDR_SMASK (OPA_AM_CI_ADDR_MASK << OPA_CI_ADDR_SHIFT)
+#define OPA_AM_CI_ADDR(am) (((am) >> OPA_AM_CI_ADDR_SHIFT) & \
+ OPA_AM_CI_ADDR_MASK)
+
+#define OPA_AM_CI_LEN_SHIFT 13
+#define OPA_AM_CI_LEN_MASK 0x3f
+#define OPA_AM_CI_LEN_SMASK (OPA_AM_CI_LEN_MASK << OPA_CI_LEN_SHIFT)
+#define OPA_AM_CI_LEN(am) (((am) >> OPA_AM_CI_LEN_SHIFT) & \
+ OPA_AM_CI_LEN_MASK)
+
+/* error info macros */
+#define OPA_EI_STATUS_SMASK 0x80
+#define OPA_EI_CODE_SMASK 0x0f
+
+struct vl_limit {
+ __be16 dedicated;
+ __be16 shared;
+};
+
+struct buffer_control {
+ __be16 reserved;
+ __be16 overall_shared_limit;
+ struct vl_limit vl[OPA_MAX_VLS];
+};
+
+struct sc2vlnt {
+ u8 vlnt[32]; /* 5 bit VL, 3 bits reserved */
+};
+
+/*
+ * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
+ * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
+ * We support 5 counters which only count the mandatory quantities.
+ */
+#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
+#define COUNTER_MASK0_9 \
+ cpu_to_be32(COUNTER_MASK(1, 0) | \
+ COUNTER_MASK(1, 1) | \
+ COUNTER_MASK(1, 2) | \
+ COUNTER_MASK(1, 3) | \
+ COUNTER_MASK(1, 4))
+
+#endif /* _HFI1_MAD_H */
diff --git a/drivers/staging/rdma/hfi1/mmap.c b/drivers/staging/rdma/hfi1/mmap.c
new file mode 100644
index 000000000000..5173b1c60b3d
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/mmap.c
@@ -0,0 +1,192 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <asm/pgtable.h>
+
+#include "verbs.h"
+
+/**
+ * hfi1_release_mmap_info - free mmap info structure
+ * @ref: a pointer to the kref within struct hfi1_mmap_info
+ */
+void hfi1_release_mmap_info(struct kref *ref)
+{
+ struct hfi1_mmap_info *ip =
+ container_of(ref, struct hfi1_mmap_info, ref);
+ struct hfi1_ibdev *dev = to_idev(ip->context->device);
+
+ spin_lock_irq(&dev->pending_lock);
+ list_del(&ip->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+
+ vfree(ip->obj);
+ kfree(ip);
+}
+
+/*
+ * open and close keep track of how many times the CQ is mapped,
+ * to avoid releasing it.
+ */
+static void hfi1_vma_open(struct vm_area_struct *vma)
+{
+ struct hfi1_mmap_info *ip = vma->vm_private_data;
+
+ kref_get(&ip->ref);
+}
+
+static void hfi1_vma_close(struct vm_area_struct *vma)
+{
+ struct hfi1_mmap_info *ip = vma->vm_private_data;
+
+ kref_put(&ip->ref, hfi1_release_mmap_info);
+}
+
+static struct vm_operations_struct hfi1_vm_ops = {
+ .open = hfi1_vma_open,
+ .close = hfi1_vma_close,
+};
+
+/**
+ * hfi1_mmap - create a new mmap region
+ * @context: the IB user context of the process making the mmap() call
+ * @vma: the VMA to be initialized
+ * Return zero if the mmap is OK. Otherwise, return an errno.
+ */
+int hfi1_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ struct hfi1_ibdev *dev = to_idev(context->device);
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ struct hfi1_mmap_info *ip, *pp;
+ int ret = -EINVAL;
+
+ /*
+ * Search the device's list of objects waiting for a mmap call.
+ * Normally, this list is very short since a call to create a
+ * CQ, QP, or SRQ is soon followed by a call to mmap().
+ */
+ spin_lock_irq(&dev->pending_lock);
+ list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
+ pending_mmaps) {
+ /* Only the creator is allowed to mmap the object */
+ if (context != ip->context || (__u64) offset != ip->offset)
+ continue;
+ /* Don't allow a mmap larger than the object. */
+ if (size > ip->size)
+ break;
+
+ list_del_init(&ip->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+
+ ret = remap_vmalloc_range(vma, ip->obj, 0);
+ if (ret)
+ goto done;
+ vma->vm_ops = &hfi1_vm_ops;
+ vma->vm_private_data = ip;
+ hfi1_vma_open(vma);
+ goto done;
+ }
+ spin_unlock_irq(&dev->pending_lock);
+done:
+ return ret;
+}
+
+/*
+ * Allocate information for hfi1_mmap
+ */
+struct hfi1_mmap_info *hfi1_create_mmap_info(struct hfi1_ibdev *dev,
+ u32 size,
+ struct ib_ucontext *context,
+ void *obj) {
+ struct hfi1_mmap_info *ip;
+
+ ip = kmalloc(sizeof(*ip), GFP_KERNEL);
+ if (!ip)
+ goto bail;
+
+ size = PAGE_ALIGN(size);
+
+ spin_lock_irq(&dev->mmap_offset_lock);
+ if (dev->mmap_offset == 0)
+ dev->mmap_offset = PAGE_SIZE;
+ ip->offset = dev->mmap_offset;
+ dev->mmap_offset += size;
+ spin_unlock_irq(&dev->mmap_offset_lock);
+
+ INIT_LIST_HEAD(&ip->pending_mmaps);
+ ip->size = size;
+ ip->context = context;
+ ip->obj = obj;
+ kref_init(&ip->ref);
+
+bail:
+ return ip;
+}
+
+void hfi1_update_mmap_info(struct hfi1_ibdev *dev, struct hfi1_mmap_info *ip,
+ u32 size, void *obj)
+{
+ size = PAGE_ALIGN(size);
+
+ spin_lock_irq(&dev->mmap_offset_lock);
+ if (dev->mmap_offset == 0)
+ dev->mmap_offset = PAGE_SIZE;
+ ip->offset = dev->mmap_offset;
+ dev->mmap_offset += size;
+ spin_unlock_irq(&dev->mmap_offset_lock);
+
+ ip->size = size;
+ ip->obj = obj;
+}
diff --git a/drivers/staging/rdma/hfi1/mr.c b/drivers/staging/rdma/hfi1/mr.c
new file mode 100644
index 000000000000..bd64e4f986f9
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/mr.c
@@ -0,0 +1,551 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/ib_umem.h>
+#include <rdma/ib_smi.h>
+
+#include "hfi.h"
+
+/* Fast memory region */
+struct hfi1_fmr {
+ struct ib_fmr ibfmr;
+ struct hfi1_mregion mr; /* must be last */
+};
+
+static inline struct hfi1_fmr *to_ifmr(struct ib_fmr *ibfmr)
+{
+ return container_of(ibfmr, struct hfi1_fmr, ibfmr);
+}
+
+static int init_mregion(struct hfi1_mregion *mr, struct ib_pd *pd,
+ int count)
+{
+ int m, i = 0;
+ int rval = 0;
+
+ m = (count + HFI1_SEGSZ - 1) / HFI1_SEGSZ;
+ for (; i < m; i++) {
+ mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
+ if (!mr->map[i])
+ goto bail;
+ }
+ mr->mapsz = m;
+ init_completion(&mr->comp);
+ /* count returning the ptr to user */
+ atomic_set(&mr->refcount, 1);
+ mr->pd = pd;
+ mr->max_segs = count;
+out:
+ return rval;
+bail:
+ while (i)
+ kfree(mr->map[--i]);
+ rval = -ENOMEM;
+ goto out;
+}
+
+static void deinit_mregion(struct hfi1_mregion *mr)
+{
+ int i = mr->mapsz;
+
+ mr->mapsz = 0;
+ while (i)
+ kfree(mr->map[--i]);
+}
+
+
+/**
+ * hfi1_get_dma_mr - get a DMA memory region
+ * @pd: protection domain for this memory region
+ * @acc: access flags
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ * Note that all DMA addresses should be created via the
+ * struct ib_dma_mapping_ops functions (see dma.c).
+ */
+struct ib_mr *hfi1_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct hfi1_mr *mr = NULL;
+ struct ib_mr *ret;
+ int rval;
+
+ if (to_ipd(pd)->user) {
+ ret = ERR_PTR(-EPERM);
+ goto bail;
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ rval = init_mregion(&mr->mr, pd, 0);
+ if (rval) {
+ ret = ERR_PTR(rval);
+ goto bail;
+ }
+
+
+ rval = hfi1_alloc_lkey(&mr->mr, 1);
+ if (rval) {
+ ret = ERR_PTR(rval);
+ goto bail_mregion;
+ }
+
+ mr->mr.access_flags = acc;
+ ret = &mr->ibmr;
+done:
+ return ret;
+
+bail_mregion:
+ deinit_mregion(&mr->mr);
+bail:
+ kfree(mr);
+ goto done;
+}
+
+static struct hfi1_mr *alloc_mr(int count, struct ib_pd *pd)
+{
+ struct hfi1_mr *mr;
+ int rval = -ENOMEM;
+ int m;
+
+ /* Allocate struct plus pointers to first level page tables. */
+ m = (count + HFI1_SEGSZ - 1) / HFI1_SEGSZ;
+ mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
+ if (!mr)
+ goto bail;
+
+ rval = init_mregion(&mr->mr, pd, count);
+ if (rval)
+ goto bail;
+ /*
+ * ib_reg_phys_mr() will initialize mr->ibmr except for
+ * lkey and rkey.
+ */
+ rval = hfi1_alloc_lkey(&mr->mr, 0);
+ if (rval)
+ goto bail_mregion;
+ mr->ibmr.lkey = mr->mr.lkey;
+ mr->ibmr.rkey = mr->mr.lkey;
+done:
+ return mr;
+
+bail_mregion:
+ deinit_mregion(&mr->mr);
+bail:
+ kfree(mr);
+ mr = ERR_PTR(rval);
+ goto done;
+}
+
+/**
+ * hfi1_reg_phys_mr - register a physical memory region
+ * @pd: protection domain for this memory region
+ * @buffer_list: pointer to the list of physical buffers to register
+ * @num_phys_buf: the number of physical buffers to register
+ * @iova_start: the starting address passed over IB which maps to this MR
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ */
+struct ib_mr *hfi1_reg_phys_mr(struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf, int acc, u64 *iova_start)
+{
+ struct hfi1_mr *mr;
+ int n, m, i;
+ struct ib_mr *ret;
+
+ mr = alloc_mr(num_phys_buf, pd);
+ if (IS_ERR(mr)) {
+ ret = (struct ib_mr *)mr;
+ goto bail;
+ }
+
+ mr->mr.user_base = *iova_start;
+ mr->mr.iova = *iova_start;
+ mr->mr.access_flags = acc;
+
+ m = 0;
+ n = 0;
+ for (i = 0; i < num_phys_buf; i++) {
+ mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
+ mr->mr.map[m]->segs[n].length = buffer_list[i].size;
+ mr->mr.length += buffer_list[i].size;
+ n++;
+ if (n == HFI1_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+
+ ret = &mr->ibmr;
+
+bail:
+ return ret;
+}
+
+/**
+ * hfi1_reg_user_mr - register a userspace memory region
+ * @pd: protection domain for this memory region
+ * @start: starting userspace address
+ * @length: length of region to register
+ * @mr_access_flags: access flags for this memory region
+ * @udata: unused by the driver
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ */
+struct ib_mr *hfi1_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_udata *udata)
+{
+ struct hfi1_mr *mr;
+ struct ib_umem *umem;
+ struct scatterlist *sg;
+ int n, m, entry;
+ struct ib_mr *ret;
+
+ if (length == 0) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+
+ umem = ib_umem_get(pd->uobject->context, start, length,
+ mr_access_flags, 0);
+ if (IS_ERR(umem))
+ return (void *) umem;
+
+ n = umem->nmap;
+
+ mr = alloc_mr(n, pd);
+ if (IS_ERR(mr)) {
+ ret = (struct ib_mr *)mr;
+ ib_umem_release(umem);
+ goto bail;
+ }
+
+ mr->mr.user_base = start;
+ mr->mr.iova = virt_addr;
+ mr->mr.length = length;
+ mr->mr.offset = ib_umem_offset(umem);
+ mr->mr.access_flags = mr_access_flags;
+ mr->umem = umem;
+
+ if (is_power_of_2(umem->page_size))
+ mr->mr.page_shift = ilog2(umem->page_size);
+ m = 0;
+ n = 0;
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ void *vaddr;
+
+ vaddr = page_address(sg_page(sg));
+ if (!vaddr) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+ mr->mr.map[m]->segs[n].vaddr = vaddr;
+ mr->mr.map[m]->segs[n].length = umem->page_size;
+ n++;
+ if (n == HFI1_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ ret = &mr->ibmr;
+
+bail:
+ return ret;
+}
+
+/**
+ * hfi1_dereg_mr - unregister and free a memory region
+ * @ibmr: the memory region to free
+ *
+ * Returns 0 on success.
+ *
+ * Note that this is called to free MRs created by hfi1_get_dma_mr()
+ * or hfi1_reg_user_mr().
+ */
+int hfi1_dereg_mr(struct ib_mr *ibmr)
+{
+ struct hfi1_mr *mr = to_imr(ibmr);
+ int ret = 0;
+ unsigned long timeout;
+
+ hfi1_free_lkey(&mr->mr);
+
+ hfi1_put_mr(&mr->mr); /* will set completion if last */
+ timeout = wait_for_completion_timeout(&mr->mr.comp,
+ 5 * HZ);
+ if (!timeout) {
+ dd_dev_err(
+ dd_from_ibdev(mr->mr.pd->device),
+ "hfi1_dereg_mr timeout mr %p pd %p refcount %u\n",
+ mr, mr->mr.pd, atomic_read(&mr->mr.refcount));
+ hfi1_get_mr(&mr->mr);
+ ret = -EBUSY;
+ goto out;
+ }
+ deinit_mregion(&mr->mr);
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+ kfree(mr);
+out:
+ return ret;
+}
+
+/*
+ * Allocate a memory region usable with the
+ * IB_WR_FAST_REG_MR send work request.
+ *
+ * Return the memory region on success, otherwise return an errno.
+ */
+struct ib_mr *hfi1_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct hfi1_mr *mr;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ mr = alloc_mr(max_num_sg, pd);
+ if (IS_ERR(mr))
+ return (struct ib_mr *)mr;
+
+ return &mr->ibmr;
+}
+
+struct ib_fast_reg_page_list *
+hfi1_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
+{
+ unsigned size = page_list_len * sizeof(u64);
+ struct ib_fast_reg_page_list *pl;
+
+ if (size > PAGE_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ pl = kzalloc(sizeof(*pl), GFP_KERNEL);
+ if (!pl)
+ return ERR_PTR(-ENOMEM);
+
+ pl->page_list = kzalloc(size, GFP_KERNEL);
+ if (!pl->page_list)
+ goto err_free;
+
+ return pl;
+
+err_free:
+ kfree(pl);
+ return ERR_PTR(-ENOMEM);
+}
+
+void hfi1_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)
+{
+ kfree(pl->page_list);
+ kfree(pl);
+}
+
+/**
+ * hfi1_alloc_fmr - allocate a fast memory region
+ * @pd: the protection domain for this memory region
+ * @mr_access_flags: access flags for this memory region
+ * @fmr_attr: fast memory region attributes
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ */
+struct ib_fmr *hfi1_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
+ struct ib_fmr_attr *fmr_attr)
+{
+ struct hfi1_fmr *fmr;
+ int m;
+ struct ib_fmr *ret;
+ int rval = -ENOMEM;
+
+ /* Allocate struct plus pointers to first level page tables. */
+ m = (fmr_attr->max_pages + HFI1_SEGSZ - 1) / HFI1_SEGSZ;
+ fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
+ if (!fmr)
+ goto bail;
+
+ rval = init_mregion(&fmr->mr, pd, fmr_attr->max_pages);
+ if (rval)
+ goto bail;
+
+ /*
+ * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
+ * rkey.
+ */
+ rval = hfi1_alloc_lkey(&fmr->mr, 0);
+ if (rval)
+ goto bail_mregion;
+ fmr->ibfmr.rkey = fmr->mr.lkey;
+ fmr->ibfmr.lkey = fmr->mr.lkey;
+ /*
+ * Resources are allocated but no valid mapping (RKEY can't be
+ * used).
+ */
+ fmr->mr.access_flags = mr_access_flags;
+ fmr->mr.max_segs = fmr_attr->max_pages;
+ fmr->mr.page_shift = fmr_attr->page_shift;
+
+ ret = &fmr->ibfmr;
+done:
+ return ret;
+
+bail_mregion:
+ deinit_mregion(&fmr->mr);
+bail:
+ kfree(fmr);
+ ret = ERR_PTR(rval);
+ goto done;
+}
+
+/**
+ * hfi1_map_phys_fmr - set up a fast memory region
+ * @ibmfr: the fast memory region to set up
+ * @page_list: the list of pages to associate with the fast memory region
+ * @list_len: the number of pages to associate with the fast memory region
+ * @iova: the virtual address of the start of the fast memory region
+ *
+ * This may be called from interrupt context.
+ */
+
+int hfi1_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+ int list_len, u64 iova)
+{
+ struct hfi1_fmr *fmr = to_ifmr(ibfmr);
+ struct hfi1_lkey_table *rkt;
+ unsigned long flags;
+ int m, n, i;
+ u32 ps;
+ int ret;
+
+ i = atomic_read(&fmr->mr.refcount);
+ if (i > 2)
+ return -EBUSY;
+
+ if (list_len > fmr->mr.max_segs) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ rkt = &to_idev(ibfmr->device)->lk_table;
+ spin_lock_irqsave(&rkt->lock, flags);
+ fmr->mr.user_base = iova;
+ fmr->mr.iova = iova;
+ ps = 1 << fmr->mr.page_shift;
+ fmr->mr.length = list_len * ps;
+ m = 0;
+ n = 0;
+ for (i = 0; i < list_len; i++) {
+ fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
+ fmr->mr.map[m]->segs[n].length = ps;
+ if (++n == HFI1_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * hfi1_unmap_fmr - unmap fast memory regions
+ * @fmr_list: the list of fast memory regions to unmap
+ *
+ * Returns 0 on success.
+ */
+int hfi1_unmap_fmr(struct list_head *fmr_list)
+{
+ struct hfi1_fmr *fmr;
+ struct hfi1_lkey_table *rkt;
+ unsigned long flags;
+
+ list_for_each_entry(fmr, fmr_list, ibfmr.list) {
+ rkt = &to_idev(fmr->ibfmr.device)->lk_table;
+ spin_lock_irqsave(&rkt->lock, flags);
+ fmr->mr.user_base = 0;
+ fmr->mr.iova = 0;
+ fmr->mr.length = 0;
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ }
+ return 0;
+}
+
+/**
+ * hfi1_dealloc_fmr - deallocate a fast memory region
+ * @ibfmr: the fast memory region to deallocate
+ *
+ * Returns 0 on success.
+ */
+int hfi1_dealloc_fmr(struct ib_fmr *ibfmr)
+{
+ struct hfi1_fmr *fmr = to_ifmr(ibfmr);
+ int ret = 0;
+ unsigned long timeout;
+
+ hfi1_free_lkey(&fmr->mr);
+ hfi1_put_mr(&fmr->mr); /* will set completion if last */
+ timeout = wait_for_completion_timeout(&fmr->mr.comp,
+ 5 * HZ);
+ if (!timeout) {
+ hfi1_get_mr(&fmr->mr);
+ ret = -EBUSY;
+ goto out;
+ }
+ deinit_mregion(&fmr->mr);
+ kfree(fmr);
+out:
+ return ret;
+}
diff --git a/drivers/staging/rdma/hfi1/opa_compat.h b/drivers/staging/rdma/hfi1/opa_compat.h
new file mode 100644
index 000000000000..f64eec1c2951
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/opa_compat.h
@@ -0,0 +1,129 @@
+#ifndef _LINUX_H
+#define _LINUX_H
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This header file is for OPA-specific definitions which are
+ * required by the HFI driver, and which aren't yet in the Linux
+ * IB core. We'll collect these all here, then merge them into
+ * the kernel when that's convenient.
+ */
+
+/* OPA SMA attribute IDs */
+#define OPA_ATTRIB_ID_CONGESTION_INFO cpu_to_be16(0x008b)
+#define OPA_ATTRIB_ID_HFI_CONGESTION_LOG cpu_to_be16(0x008f)
+#define OPA_ATTRIB_ID_HFI_CONGESTION_SETTING cpu_to_be16(0x0090)
+#define OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE cpu_to_be16(0x0091)
+
+/* OPA PMA attribute IDs */
+#define OPA_PM_ATTRIB_ID_PORT_STATUS cpu_to_be16(0x0040)
+#define OPA_PM_ATTRIB_ID_CLEAR_PORT_STATUS cpu_to_be16(0x0041)
+#define OPA_PM_ATTRIB_ID_DATA_PORT_COUNTERS cpu_to_be16(0x0042)
+#define OPA_PM_ATTRIB_ID_ERROR_PORT_COUNTERS cpu_to_be16(0x0043)
+#define OPA_PM_ATTRIB_ID_ERROR_INFO cpu_to_be16(0x0044)
+
+/* OPA status codes */
+#define OPA_PM_STATUS_REQUEST_TOO_LARGE cpu_to_be16(0x100)
+
+static inline u8 port_states_to_logical_state(struct opa_port_states *ps)
+{
+ return ps->portphysstate_portstate & OPA_PI_MASK_PORT_STATE;
+}
+
+static inline u8 port_states_to_phys_state(struct opa_port_states *ps)
+{
+ return ((ps->portphysstate_portstate &
+ OPA_PI_MASK_PORT_PHYSICAL_STATE) >> 4) & 0xf;
+}
+
+/*
+ * OPA port physical states
+ * IB Volume 1, Table 146 PortInfo/IB Volume 2 Section 5.4.2(1) PortPhysState
+ * values.
+ *
+ * When writing, only values 0-3 are valid, other values are ignored.
+ * When reading, 0 is reserved.
+ *
+ * Returned by the ibphys_portstate() routine.
+ */
+enum opa_port_phys_state {
+ IB_PORTPHYSSTATE_NOP = 0,
+ /* 1 is reserved */
+ IB_PORTPHYSSTATE_POLLING = 2,
+ IB_PORTPHYSSTATE_DISABLED = 3,
+ IB_PORTPHYSSTATE_TRAINING = 4,
+ IB_PORTPHYSSTATE_LINKUP = 5,
+ IB_PORTPHYSSTATE_LINK_ERROR_RECOVERY = 6,
+ IB_PORTPHYSSTATE_PHY_TEST = 7,
+ /* 8 is reserved */
+ OPA_PORTPHYSSTATE_OFFLINE = 9,
+ OPA_PORTPHYSSTATE_GANGED = 10,
+ OPA_PORTPHYSSTATE_TEST = 11,
+ OPA_PORTPHYSSTATE_MAX = 11,
+ /* values 12-15 are reserved/ignored */
+};
+
+/* OPA_PORT_TYPE_* definitions - these belong in opa_port_info.h */
+#define OPA_PORT_TYPE_UNKNOWN 0
+#define OPA_PORT_TYPE_DISCONNECTED 1
+/* port is not currently usable, CableInfo not available */
+#define OPA_PORT_TYPE_FIXED 2
+/* A fixed backplane port in a director class switch. All OPA ASICS */
+#define OPA_PORT_TYPE_VARIABLE 3
+/* A backplane port in a blade system, possibly mixed configuration */
+#define OPA_PORT_TYPE_STANDARD 4
+/* implies a SFF-8636 defined format for CableInfo (QSFP) */
+#define OPA_PORT_TYPE_SI_PHOTONICS 5
+/* A silicon photonics module implies TBD defined format for CableInfo
+ * as defined by Intel SFO group */
+/* 6 - 15 are reserved */
+
+#endif /* _LINUX_H */
diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c
new file mode 100644
index 000000000000..ac5653c0f65e
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/pcie.c
@@ -0,0 +1,1253 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/aer.h>
+#include <linux/module.h>
+
+#include "hfi.h"
+#include "chip_registers.h"
+
+/* link speed vector for Gen3 speed - not in Linux headers */
+#define GEN1_SPEED_VECTOR 0x1
+#define GEN2_SPEED_VECTOR 0x2
+#define GEN3_SPEED_VECTOR 0x3
+
+/*
+ * This file contains PCIe utility routines.
+ */
+
+/*
+ * Code to adjust PCIe capabilities.
+ */
+static void tune_pcie_caps(struct hfi1_devdata *);
+
+/*
+ * Do all the common PCIe setup and initialization.
+ * devdata is not yet allocated, and is not allocated until after this
+ * routine returns success. Therefore dd_dev_err() can't be used for error
+ * printing.
+ */
+int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ /*
+ * This can happen (in theory) iff:
+ * We did a chip reset, and then failed to reprogram the
+ * BAR, or the chip reset due to an internal error. We then
+ * unloaded the driver and reloaded it.
+ *
+ * Both reset cases set the BAR back to initial state. For
+ * the latter case, the AER sticky error bit at offset 0x718
+ * should be set, but the Linux kernel doesn't yet know
+ * about that, it appears. If the original BAR was retained
+ * in the kernel data structures, this may be OK.
+ */
+ hfi1_early_err(&pdev->dev, "pci enable failed: error %d\n",
+ -ret);
+ goto done;
+ }
+
+ ret = pci_request_regions(pdev, DRIVER_NAME);
+ if (ret) {
+ hfi1_early_err(&pdev->dev,
+ "pci_request_regions fails: err %d\n", -ret);
+ goto bail;
+ }
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret) {
+ /*
+ * If the 64 bit setup fails, try 32 bit. Some systems
+ * do not setup 64 bit maps on systems with 2GB or less
+ * memory installed.
+ */
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ hfi1_early_err(&pdev->dev,
+ "Unable to set DMA mask: %d\n", ret);
+ goto bail;
+ }
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ } else
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret) {
+ hfi1_early_err(&pdev->dev,
+ "Unable to set DMA consistent mask: %d\n", ret);
+ goto bail;
+ }
+
+ pci_set_master(pdev);
+ ret = pci_enable_pcie_error_reporting(pdev);
+ if (ret) {
+ hfi1_early_err(&pdev->dev,
+ "Unable to enable pcie error reporting: %d\n",
+ ret);
+ ret = 0;
+ }
+ goto done;
+
+bail:
+ hfi1_pcie_cleanup(pdev);
+done:
+ return ret;
+}
+
+/*
+ * Clean what was done in hfi1_pcie_init()
+ */
+void hfi1_pcie_cleanup(struct pci_dev *pdev)
+{
+ pci_disable_device(pdev);
+ /*
+ * Release regions should be called after the disable. OK to
+ * call if request regions has not been called or failed.
+ */
+ pci_release_regions(pdev);
+}
+
+/*
+ * Do remaining PCIe setup, once dd is allocated, and save away
+ * fields required to re-initialize after a chip reset, or for
+ * various other purposes
+ */
+int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ unsigned long len;
+ resource_size_t addr;
+
+ dd->pcidev = pdev;
+ pci_set_drvdata(pdev, dd);
+
+ addr = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+
+ /*
+ * The TXE PIO buffers are at the tail end of the chip space.
+ * Cut them off and map them separately.
+ */
+
+ /* sanity check vs expectations */
+ if (len != TXE_PIO_SEND + TXE_PIO_SIZE) {
+ dd_dev_err(dd, "chip PIO range does not match\n");
+ return -EINVAL;
+ }
+
+ dd->kregbase = ioremap_nocache(addr, TXE_PIO_SEND);
+ if (!dd->kregbase)
+ return -ENOMEM;
+
+ dd->piobase = ioremap_wc(addr + TXE_PIO_SEND, TXE_PIO_SIZE);
+ if (!dd->piobase) {
+ iounmap(dd->kregbase);
+ return -ENOMEM;
+ }
+
+ dd->flags |= HFI1_PRESENT; /* now register routines work */
+
+ dd->kregend = dd->kregbase + TXE_PIO_SEND;
+ dd->physaddr = addr; /* used for io_remap, etc. */
+
+ /*
+ * Re-map the chip's RcvArray as write-combining to allow us
+ * to write an entire cacheline worth of entries in one shot.
+ * If this re-map fails, just continue - the RcvArray programming
+ * function will handle both cases.
+ */
+ dd->chip_rcv_array_count = read_csr(dd, RCV_ARRAY_CNT);
+ dd->rcvarray_wc = ioremap_wc(addr + RCV_ARRAY,
+ dd->chip_rcv_array_count * 8);
+ dd_dev_info(dd, "WC Remapped RcvArray: %p\n", dd->rcvarray_wc);
+ /*
+ * Save BARs and command to rewrite after device reset.
+ */
+ dd->pcibar0 = addr;
+ dd->pcibar1 = addr >> 32;
+ pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom);
+ pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command);
+ pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl);
+ pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL, &dd->pcie_lnkctl);
+ pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL2,
+ &dd->pcie_devctl2);
+ pci_read_config_dword(dd->pcidev, PCI_CFG_MSIX0, &dd->pci_msix0);
+ pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1,
+ &dd->pci_lnkctl3);
+ pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, &dd->pci_tph2);
+
+ return 0;
+}
+
+/*
+ * Do PCIe cleanup related to dd, after chip-specific cleanup, etc. Just prior
+ * to releasing the dd memory.
+ * Void because all of the core pcie cleanup functions are void.
+ */
+void hfi1_pcie_ddcleanup(struct hfi1_devdata *dd)
+{
+ u64 __iomem *base = (void __iomem *) dd->kregbase;
+
+ dd->flags &= ~HFI1_PRESENT;
+ dd->kregbase = NULL;
+ iounmap(base);
+ if (dd->rcvarray_wc)
+ iounmap(dd->rcvarray_wc);
+ if (dd->piobase)
+ iounmap(dd->piobase);
+
+ pci_set_drvdata(dd->pcidev, NULL);
+}
+
+/*
+ * Do a Function Level Reset (FLR) on the device.
+ * Based on static function drivers/pci/pci.c:pcie_flr().
+ */
+void hfi1_pcie_flr(struct hfi1_devdata *dd)
+{
+ int i;
+ u16 status;
+
+ /* no need to check for the capability - we know the device has it */
+
+ /* wait for Transaction Pending bit to clear, at most a few ms */
+ for (i = 0; i < 4; i++) {
+ if (i)
+ msleep((1 << (i - 1)) * 100);
+
+ pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVSTA, &status);
+ if (!(status & PCI_EXP_DEVSTA_TRPND))
+ goto clear;
+ }
+
+ dd_dev_err(dd, "Transaction Pending bit is not clearing, proceeding with reset anyway\n");
+
+clear:
+ pcie_capability_set_word(dd->pcidev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_BCR_FLR);
+ /* PCIe spec requires the function to be back within 100ms */
+ msleep(100);
+}
+
+static void msix_setup(struct hfi1_devdata *dd, int pos, u32 *msixcnt,
+ struct hfi1_msix_entry *hfi1_msix_entry)
+{
+ int ret;
+ int nvec = *msixcnt;
+ struct msix_entry *msix_entry;
+ int i;
+
+ /* We can't pass hfi1_msix_entry array to msix_setup
+ * so use a dummy msix_entry array and copy the allocated
+ * irq back to the hfi1_msix_entry array. */
+ msix_entry = kmalloc_array(nvec, sizeof(*msix_entry), GFP_KERNEL);
+ if (!msix_entry) {
+ ret = -ENOMEM;
+ goto do_intx;
+ }
+
+ for (i = 0; i < nvec; i++)
+ msix_entry[i] = hfi1_msix_entry[i].msix;
+
+ ret = pci_enable_msix_range(dd->pcidev, msix_entry, 1, nvec);
+ if (ret < 0)
+ goto free_msix_entry;
+ nvec = ret;
+
+ for (i = 0; i < nvec; i++)
+ hfi1_msix_entry[i].msix = msix_entry[i];
+
+ kfree(msix_entry);
+ *msixcnt = nvec;
+ return;
+
+free_msix_entry:
+ kfree(msix_entry);
+
+do_intx:
+ dd_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n",
+ nvec, ret);
+ *msixcnt = 0;
+ hfi1_enable_intx(dd->pcidev);
+
+}
+
+/* return the PCIe link speed from the given link status */
+static u32 extract_speed(u16 linkstat)
+{
+ u32 speed;
+
+ switch (linkstat & PCI_EXP_LNKSTA_CLS) {
+ default: /* not defined, assume Gen1 */
+ case PCI_EXP_LNKSTA_CLS_2_5GB:
+ speed = 2500; /* Gen 1, 2.5GHz */
+ break;
+ case PCI_EXP_LNKSTA_CLS_5_0GB:
+ speed = 5000; /* Gen 2, 5GHz */
+ break;
+ case GEN3_SPEED_VECTOR:
+ speed = 8000; /* Gen 3, 8GHz */
+ break;
+ }
+ return speed;
+}
+
+/* return the PCIe link speed from the given link status */
+static u32 extract_width(u16 linkstat)
+{
+ return (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
+}
+
+/* read the link status and set dd->{lbus_width,lbus_speed,lbus_info} */
+static void update_lbus_info(struct hfi1_devdata *dd)
+{
+ u16 linkstat;
+
+ pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
+ dd->lbus_width = extract_width(linkstat);
+ dd->lbus_speed = extract_speed(linkstat);
+ snprintf(dd->lbus_info, sizeof(dd->lbus_info),
+ "PCIe,%uMHz,x%u", dd->lbus_speed, dd->lbus_width);
+}
+
+/*
+ * Read in the current PCIe link width and speed. Find if the link is
+ * Gen3 capable.
+ */
+int pcie_speeds(struct hfi1_devdata *dd)
+{
+ u32 linkcap;
+
+ if (!pci_is_pcie(dd->pcidev)) {
+ dd_dev_err(dd, "Can't find PCI Express capability!\n");
+ return -EINVAL;
+ }
+
+ /* find if our max speed is Gen3 and parent supports Gen3 speeds */
+ dd->link_gen3_capable = 1;
+
+ pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &linkcap);
+ if ((linkcap & PCI_EXP_LNKCAP_SLS) != GEN3_SPEED_VECTOR) {
+ dd_dev_info(dd,
+ "This HFI is not Gen3 capable, max speed 0x%x, need 0x3\n",
+ linkcap & PCI_EXP_LNKCAP_SLS);
+ dd->link_gen3_capable = 0;
+ }
+
+ /*
+ * bus->max_bus_speed is set from the bridge's linkcap Max Link Speed
+ */
+ if (dd->pcidev->bus->max_bus_speed != PCIE_SPEED_8_0GT) {
+ dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n");
+ dd->link_gen3_capable = 0;
+ }
+
+ /* obtain the link width and current speed */
+ update_lbus_info(dd);
+
+ /* check against expected pcie width and complain if "wrong" */
+ if (dd->lbus_width < 16)
+ dd_dev_err(dd, "PCIe width %u (x16 HFI)\n", dd->lbus_width);
+
+ return 0;
+}
+
+/*
+ * Returns in *nent:
+ * - actual number of interrupts allocated
+ * - 0 if fell back to INTx.
+ */
+void request_msix(struct hfi1_devdata *dd, u32 *nent,
+ struct hfi1_msix_entry *entry)
+{
+ int pos;
+
+ pos = dd->pcidev->msix_cap;
+ if (*nent && pos) {
+ msix_setup(dd, pos, nent, entry);
+ /* did it, either MSI-X or INTx */
+ } else {
+ *nent = 0;
+ hfi1_enable_intx(dd->pcidev);
+ }
+
+ tune_pcie_caps(dd);
+}
+
+/*
+ * Disable MSI-X.
+ */
+void hfi1_nomsix(struct hfi1_devdata *dd)
+{
+ pci_disable_msix(dd->pcidev);
+}
+
+void hfi1_enable_intx(struct pci_dev *pdev)
+{
+ /* first, turn on INTx */
+ pci_intx(pdev, 1);
+ /* then turn off MSI-X */
+ pci_disable_msix(pdev);
+}
+
+/* restore command and BARs after a reset has wiped them out */
+void restore_pci_variables(struct hfi1_devdata *dd)
+{
+ pci_write_config_word(dd->pcidev, PCI_COMMAND, dd->pci_command);
+ pci_write_config_dword(dd->pcidev,
+ PCI_BASE_ADDRESS_0, dd->pcibar0);
+ pci_write_config_dword(dd->pcidev,
+ PCI_BASE_ADDRESS_1, dd->pcibar1);
+ pci_write_config_dword(dd->pcidev,
+ PCI_ROM_ADDRESS, dd->pci_rom);
+ pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, dd->pcie_devctl);
+ pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL, dd->pcie_lnkctl);
+ pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL2,
+ dd->pcie_devctl2);
+ pci_write_config_dword(dd->pcidev, PCI_CFG_MSIX0, dd->pci_msix0);
+ pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1,
+ dd->pci_lnkctl3);
+ pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2);
+}
+
+
+/*
+ * BIOS may not set PCIe bus-utilization parameters for best performance.
+ * Check and optionally adjust them to maximize our throughput.
+ */
+static int hfi1_pcie_caps;
+module_param_named(pcie_caps, hfi1_pcie_caps, int, S_IRUGO);
+MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
+
+static void tune_pcie_caps(struct hfi1_devdata *dd)
+{
+ struct pci_dev *parent;
+ u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
+ u16 rc_mrrs, ep_mrrs, max_mrrs;
+
+ /* Find out supported and configured values for parent (root) */
+ parent = dd->pcidev->bus->self;
+ if (!pci_is_root_bus(parent->bus)) {
+ dd_dev_info(dd, "Parent not root\n");
+ return;
+ }
+
+ if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
+ return;
+ rc_mpss = parent->pcie_mpss;
+ rc_mps = ffs(pcie_get_mps(parent)) - 8;
+ /* Find out supported and configured values for endpoint (us) */
+ ep_mpss = dd->pcidev->pcie_mpss;
+ ep_mps = ffs(pcie_get_mps(dd->pcidev)) - 8;
+
+ /* Find max payload supported by root, endpoint */
+ if (rc_mpss > ep_mpss)
+ rc_mpss = ep_mpss;
+
+ /* If Supported greater than limit in module param, limit it */
+ if (rc_mpss > (hfi1_pcie_caps & 7))
+ rc_mpss = hfi1_pcie_caps & 7;
+ /* If less than (allowed, supported), bump root payload */
+ if (rc_mpss > rc_mps) {
+ rc_mps = rc_mpss;
+ pcie_set_mps(parent, 128 << rc_mps);
+ }
+ /* If less than (allowed, supported), bump endpoint payload */
+ if (rc_mpss > ep_mps) {
+ ep_mps = rc_mpss;
+ pcie_set_mps(dd->pcidev, 128 << ep_mps);
+ }
+
+ /*
+ * Now the Read Request size.
+ * No field for max supported, but PCIe spec limits it to 4096,
+ * which is code '5' (log2(4096) - 7)
+ */
+ max_mrrs = 5;
+ if (max_mrrs > ((hfi1_pcie_caps >> 4) & 7))
+ max_mrrs = (hfi1_pcie_caps >> 4) & 7;
+
+ max_mrrs = 128 << max_mrrs;
+ rc_mrrs = pcie_get_readrq(parent);
+ ep_mrrs = pcie_get_readrq(dd->pcidev);
+
+ if (max_mrrs > rc_mrrs) {
+ rc_mrrs = max_mrrs;
+ pcie_set_readrq(parent, rc_mrrs);
+ }
+ if (max_mrrs > ep_mrrs) {
+ ep_mrrs = max_mrrs;
+ pcie_set_readrq(dd->pcidev, ep_mrrs);
+ }
+}
+/* End of PCIe capability tuning */
+
+/*
+ * From here through hfi1_pci_err_handler definition is invoked via
+ * PCI error infrastructure, registered via pci
+ */
+static pci_ers_result_t
+pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct hfi1_devdata *dd = pci_get_drvdata(pdev);
+ pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
+
+ switch (state) {
+ case pci_channel_io_normal:
+ dd_dev_info(dd, "State Normal, ignoring\n");
+ break;
+
+ case pci_channel_io_frozen:
+ dd_dev_info(dd, "State Frozen, requesting reset\n");
+ pci_disable_device(pdev);
+ ret = PCI_ERS_RESULT_NEED_RESET;
+ break;
+
+ case pci_channel_io_perm_failure:
+ if (dd) {
+ dd_dev_info(dd, "State Permanent Failure, disabling\n");
+ /* no more register accesses! */
+ dd->flags &= ~HFI1_PRESENT;
+ hfi1_disable_after_error(dd);
+ }
+ /* else early, or other problem */
+ ret = PCI_ERS_RESULT_DISCONNECT;
+ break;
+
+ default: /* shouldn't happen */
+ dd_dev_info(dd, "HFI1 PCI errors detected (state %d)\n",
+ state);
+ break;
+ }
+ return ret;
+}
+
+static pci_ers_result_t
+pci_mmio_enabled(struct pci_dev *pdev)
+{
+ u64 words = 0U;
+ struct hfi1_devdata *dd = pci_get_drvdata(pdev);
+ pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
+
+ if (dd && dd->pport) {
+ words = read_port_cntr(dd->pport, C_RX_WORDS, CNTR_INVALID_VL);
+ if (words == ~0ULL)
+ ret = PCI_ERS_RESULT_NEED_RESET;
+ dd_dev_info(dd,
+ "HFI1 mmio_enabled function called, read wordscntr %Lx, returning %d\n",
+ words, ret);
+ }
+ return ret;
+}
+
+static pci_ers_result_t
+pci_slot_reset(struct pci_dev *pdev)
+{
+ struct hfi1_devdata *dd = pci_get_drvdata(pdev);
+
+ dd_dev_info(dd, "HFI1 slot_reset function called, ignored\n");
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static pci_ers_result_t
+pci_link_reset(struct pci_dev *pdev)
+{
+ struct hfi1_devdata *dd = pci_get_drvdata(pdev);
+
+ dd_dev_info(dd, "HFI1 link_reset function called, ignored\n");
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static void
+pci_resume(struct pci_dev *pdev)
+{
+ struct hfi1_devdata *dd = pci_get_drvdata(pdev);
+
+ dd_dev_info(dd, "HFI1 resume function called\n");
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ /*
+ * Running jobs will fail, since it's asynchronous
+ * unlike sysfs-requested reset. Better than
+ * doing nothing.
+ */
+ hfi1_init(dd, 1); /* same as re-init after reset */
+}
+
+const struct pci_error_handlers hfi1_pci_err_handler = {
+ .error_detected = pci_error_detected,
+ .mmio_enabled = pci_mmio_enabled,
+ .link_reset = pci_link_reset,
+ .slot_reset = pci_slot_reset,
+ .resume = pci_resume,
+};
+
+/*============================================================================*/
+/* PCIe Gen3 support */
+
+/*
+ * This code is separated out because it is expected to be removed in the
+ * final shipping product. If not, then it will be revisited and items
+ * will be moved to more standard locations.
+ */
+
+/* ASIC_PCI_SD_HOST_STATUS.FW_DNLD_STS field values */
+#define DL_STATUS_HFI0 0x1 /* hfi0 firmware download complete */
+#define DL_STATUS_HFI1 0x2 /* hfi1 firmware download complete */
+#define DL_STATUS_BOTH 0x3 /* hfi0 and hfi1 firmware download complete */
+
+/* ASIC_PCI_SD_HOST_STATUS.FW_DNLD_ERR field values */
+#define DL_ERR_NONE 0x0 /* no error */
+#define DL_ERR_SWAP_PARITY 0x1 /* parity error in SerDes interrupt */
+ /* or response data */
+#define DL_ERR_DISABLED 0x2 /* hfi disabled */
+#define DL_ERR_SECURITY 0x3 /* security check failed */
+#define DL_ERR_SBUS 0x4 /* SBus status error */
+#define DL_ERR_XFR_PARITY 0x5 /* parity error during ROM transfer*/
+
+/* gasket block secondary bus reset delay */
+#define SBR_DELAY_US 200000 /* 200ms */
+
+/* mask for PCIe capability register lnkctl2 target link speed */
+#define LNKCTL2_TARGET_LINK_SPEED_MASK 0xf
+
+static uint pcie_target = 3;
+module_param(pcie_target, uint, S_IRUGO);
+MODULE_PARM_DESC(pcie_target, "PCIe target speed (0 skip, 1-3 Gen1-3)");
+
+static uint pcie_force;
+module_param(pcie_force, uint, S_IRUGO);
+MODULE_PARM_DESC(pcie_force, "Force driver to do a PCIe firmware download even if already at target speed");
+
+static uint pcie_retry = 5;
+module_param(pcie_retry, uint, S_IRUGO);
+MODULE_PARM_DESC(pcie_retry, "Driver will try this many times to reach requested speed");
+
+#define UNSET_PSET 255
+#define DEFAULT_DISCRETE_PSET 2 /* discrete HFI */
+#define DEFAULT_MCP_PSET 4 /* MCP HFI */
+static uint pcie_pset = UNSET_PSET;
+module_param(pcie_pset, uint, S_IRUGO);
+MODULE_PARM_DESC(pcie_pset, "PCIe Eq Pset value to use, range is 0-10");
+
+/* equalization columns */
+#define PREC 0
+#define ATTN 1
+#define POST 2
+
+/* discrete silicon preliminary equalization values */
+static const u8 discrete_preliminary_eq[11][3] = {
+ /* prec attn post */
+ { 0x00, 0x00, 0x12 }, /* p0 */
+ { 0x00, 0x00, 0x0c }, /* p1 */
+ { 0x00, 0x00, 0x0f }, /* p2 */
+ { 0x00, 0x00, 0x09 }, /* p3 */
+ { 0x00, 0x00, 0x00 }, /* p4 */
+ { 0x06, 0x00, 0x00 }, /* p5 */
+ { 0x09, 0x00, 0x00 }, /* p6 */
+ { 0x06, 0x00, 0x0f }, /* p7 */
+ { 0x09, 0x00, 0x09 }, /* p8 */
+ { 0x0c, 0x00, 0x00 }, /* p9 */
+ { 0x00, 0x00, 0x18 }, /* p10 */
+};
+
+/* integrated silicon preliminary equalization values */
+static const u8 integrated_preliminary_eq[11][3] = {
+ /* prec attn post */
+ { 0x00, 0x1e, 0x07 }, /* p0 */
+ { 0x00, 0x1e, 0x05 }, /* p1 */
+ { 0x00, 0x1e, 0x06 }, /* p2 */
+ { 0x00, 0x1e, 0x04 }, /* p3 */
+ { 0x00, 0x1e, 0x00 }, /* p4 */
+ { 0x03, 0x1e, 0x00 }, /* p5 */
+ { 0x04, 0x1e, 0x00 }, /* p6 */
+ { 0x03, 0x1e, 0x06 }, /* p7 */
+ { 0x03, 0x1e, 0x04 }, /* p8 */
+ { 0x05, 0x1e, 0x00 }, /* p9 */
+ { 0x00, 0x1e, 0x0a }, /* p10 */
+};
+
+/* helper to format the value to write to hardware */
+#define eq_value(pre, curr, post) \
+ ((((u32)(pre)) << \
+ PCIE_CFG_REG_PL102_GEN3_EQ_PRE_CURSOR_PSET_SHIFT) \
+ | (((u32)(curr)) << PCIE_CFG_REG_PL102_GEN3_EQ_CURSOR_PSET_SHIFT) \
+ | (((u32)(post)) << \
+ PCIE_CFG_REG_PL102_GEN3_EQ_POST_CURSOR_PSET_SHIFT))
+
+/*
+ * Load the given EQ preset table into the PCIe hardware.
+ */
+static int load_eq_table(struct hfi1_devdata *dd, const u8 eq[11][3], u8 fs,
+ u8 div)
+{
+ struct pci_dev *pdev = dd->pcidev;
+ u32 hit_error = 0;
+ u32 violation;
+ u32 i;
+ u8 c_minus1, c0, c_plus1;
+
+ for (i = 0; i < 11; i++) {
+ /* set index */
+ pci_write_config_dword(pdev, PCIE_CFG_REG_PL103, i);
+ /* write the value */
+ c_minus1 = eq[i][PREC] / div;
+ c0 = fs - (eq[i][PREC] / div) - (eq[i][POST] / div);
+ c_plus1 = eq[i][POST] / div;
+ pci_write_config_dword(pdev, PCIE_CFG_REG_PL102,
+ eq_value(c_minus1, c0, c_plus1));
+ /* check if these coefficients violate EQ rules */
+ pci_read_config_dword(dd->pcidev, PCIE_CFG_REG_PL105,
+ &violation);
+ if (violation
+ & PCIE_CFG_REG_PL105_GEN3_EQ_VIOLATE_COEF_RULES_SMASK){
+ if (hit_error == 0) {
+ dd_dev_err(dd,
+ "Gen3 EQ Table Coefficient rule violations\n");
+ dd_dev_err(dd, " prec attn post\n");
+ }
+ dd_dev_err(dd, " p%02d: %02x %02x %02x\n",
+ i, (u32)eq[i][0], (u32)eq[i][1], (u32)eq[i][2]);
+ dd_dev_err(dd, " %02x %02x %02x\n",
+ (u32)c_minus1, (u32)c0, (u32)c_plus1);
+ hit_error = 1;
+ }
+ }
+ if (hit_error)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Steps to be done after the PCIe firmware is downloaded and
+ * before the SBR for the Pcie Gen3.
+ * The hardware mutex is already being held.
+ */
+static void pcie_post_steps(struct hfi1_devdata *dd)
+{
+ int i;
+
+ set_sbus_fast_mode(dd);
+ /*
+ * Write to the PCIe PCSes to set the G3_LOCKED_NEXT bits to 1.
+ * This avoids a spurious framing error that can otherwise be
+ * generated by the MAC layer.
+ *
+ * Use individual addresses since no broadcast is set up.
+ */
+ for (i = 0; i < NUM_PCIE_SERDES; i++) {
+ sbus_request(dd, pcie_pcs_addrs[dd->hfi1_id][i],
+ 0x03, WRITE_SBUS_RECEIVER, 0x00022132);
+ }
+
+ clear_sbus_fast_mode(dd);
+}
+
+/*
+ * Trigger a secondary bus reset (SBR) on ourselves using our parent.
+ *
+ * Based on pci_parent_bus_reset() which is not exported by the
+ * kernel core.
+ */
+static int trigger_sbr(struct hfi1_devdata *dd)
+{
+ struct pci_dev *dev = dd->pcidev;
+ struct pci_dev *pdev;
+
+ /* need a parent */
+ if (!dev->bus->self) {
+ dd_dev_err(dd, "%s: no parent device\n", __func__);
+ return -ENOTTY;
+ }
+
+ /* should not be anyone else on the bus */
+ list_for_each_entry(pdev, &dev->bus->devices, bus_list)
+ if (pdev != dev) {
+ dd_dev_err(dd,
+ "%s: another device is on the same bus\n",
+ __func__);
+ return -ENOTTY;
+ }
+
+ /*
+ * A secondary bus reset (SBR) issues a hot reset to our device.
+ * The following routine does a 1s wait after the reset is dropped
+ * per PCI Trhfa (recovery time). PCIe 3.0 section 6.6.1 -
+ * Conventional Reset, paragraph 3, line 35 also says that a 1s
+ * delay after a reset is required. Per spec requirements,
+ * the link is either working or not after that point.
+ */
+ pci_reset_bridge_secondary_bus(dev->bus->self);
+
+ return 0;
+}
+
+/*
+ * Write the given gasket interrupt register.
+ */
+static void write_gasket_interrupt(struct hfi1_devdata *dd, int index,
+ u16 code, u16 data)
+{
+ write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (index * 8),
+ (((u64)code << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_CODE_SHIFT)
+ |((u64)data << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_DATA_SHIFT)));
+}
+
+/*
+ * Tell the gasket logic how to react to the reset.
+ */
+static void arm_gasket_logic(struct hfi1_devdata *dd)
+{
+ u64 reg;
+
+ reg = (((u64)1 << dd->hfi1_id)
+ << ASIC_PCIE_SD_HOST_CMD_INTRPT_CMD_SHIFT)
+ | ((u64)pcie_serdes_broadcast[dd->hfi1_id]
+ << ASIC_PCIE_SD_HOST_CMD_SBUS_RCVR_ADDR_SHIFT
+ | ASIC_PCIE_SD_HOST_CMD_SBR_MODE_SMASK
+ | ((u64)SBR_DELAY_US & ASIC_PCIE_SD_HOST_CMD_TIMER_MASK)
+ << ASIC_PCIE_SD_HOST_CMD_TIMER_SHIFT
+ );
+ write_csr(dd, ASIC_PCIE_SD_HOST_CMD, reg);
+ /* read back to push the write */
+ read_csr(dd, ASIC_PCIE_SD_HOST_CMD);
+}
+
+/*
+ * Do all the steps needed to transition the PCIe link to Gen3 speed.
+ */
+int do_pcie_gen3_transition(struct hfi1_devdata *dd)
+{
+ struct pci_dev *parent;
+ u64 fw_ctrl;
+ u64 reg, therm;
+ u32 reg32, fs, lf;
+ u32 status, err;
+ int ret;
+ int do_retry, retry_count = 0;
+ uint default_pset;
+ u16 target_vector, target_speed;
+ u16 lnkctl, lnkctl2, vendor;
+ u8 nsbr = 1;
+ u8 div;
+ const u8 (*eq)[3];
+ int return_error = 0;
+
+ /* PCIe Gen3 is for the ASIC only */
+ if (dd->icode != ICODE_RTL_SILICON)
+ return 0;
+
+ if (pcie_target == 1) { /* target Gen1 */
+ target_vector = GEN1_SPEED_VECTOR;
+ target_speed = 2500;
+ } else if (pcie_target == 2) { /* target Gen2 */
+ target_vector = GEN2_SPEED_VECTOR;
+ target_speed = 5000;
+ } else if (pcie_target == 3) { /* target Gen3 */
+ target_vector = GEN3_SPEED_VECTOR;
+ target_speed = 8000;
+ } else {
+ /* off or invalid target - skip */
+ dd_dev_info(dd, "%s: Skipping PCIe transition\n", __func__);
+ return 0;
+ }
+
+ /* if already at target speed, done (unless forced) */
+ if (dd->lbus_speed == target_speed) {
+ dd_dev_info(dd, "%s: PCIe already at gen%d, %s\n", __func__,
+ pcie_target,
+ pcie_force ? "re-doing anyway" : "skipping");
+ if (!pcie_force)
+ return 0;
+ }
+
+ /*
+ * A0 needs an additional SBR
+ */
+ if (is_a0(dd))
+ nsbr++;
+
+ /*
+ * Do the Gen3 transition. Steps are those of the PCIe Gen3
+ * recipe.
+ */
+
+ /* step 1: pcie link working in gen1/gen2 */
+
+ /* step 2: if either side is not capable of Gen3, done */
+ if (pcie_target == 3 && !dd->link_gen3_capable) {
+ dd_dev_err(dd, "The PCIe link is not Gen3 capable\n");
+ ret = -ENOSYS;
+ goto done_no_mutex;
+ }
+
+ /* hold the HW mutex across the firmware download and SBR */
+ ret = acquire_hw_mutex(dd);
+ if (ret)
+ return ret;
+
+ /* make sure thermal polling is not causing interrupts */
+ therm = read_csr(dd, ASIC_CFG_THERM_POLL_EN);
+ if (therm) {
+ write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
+ msleep(100);
+ dd_dev_info(dd, "%s: Disabled therm polling\n",
+ __func__);
+ }
+
+ /* step 3: download SBus Master firmware */
+ /* step 4: download PCIe Gen3 SerDes firmware */
+retry:
+ dd_dev_info(dd, "%s: downloading firmware\n", __func__);
+ ret = load_pcie_firmware(dd);
+ if (ret)
+ goto done;
+
+ /* step 5: set up device parameter settings */
+ dd_dev_info(dd, "%s: setting PCIe registers\n", __func__);
+
+ /*
+ * PcieCfgSpcie1 - Link Control 3
+ * Leave at reset value. No need to set PerfEq - link equalization
+ * will be performed automatically after the SBR when the target
+ * speed is 8GT/s.
+ */
+
+ /* clear all 16 per-lane error bits (PCIe: Lane Error Status) */
+ pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE2, 0xffff);
+
+ /* step 5a: Set Synopsys Port Logic registers */
+
+ /*
+ * PcieCfgRegPl2 - Port Force Link
+ *
+ * Set the low power field to 0x10 to avoid unnecessary power
+ * management messages. All other fields are zero.
+ */
+ reg32 = 0x10ul << PCIE_CFG_REG_PL2_LOW_PWR_ENT_CNT_SHIFT;
+ pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL2, reg32);
+
+ /*
+ * PcieCfgRegPl100 - Gen3 Control
+ *
+ * turn off PcieCfgRegPl100.Gen3ZRxDcNonCompl
+ * turn on PcieCfgRegPl100.EqEieosCnt (erratum)
+ * Everything else zero.
+ */
+ reg32 = PCIE_CFG_REG_PL100_EQ_EIEOS_CNT_SMASK;
+ pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL100, reg32);
+
+ /*
+ * PcieCfgRegPl101 - Gen3 EQ FS and LF
+ * PcieCfgRegPl102 - Gen3 EQ Presets to Coefficients Mapping
+ * PcieCfgRegPl103 - Gen3 EQ Preset Index
+ * PcieCfgRegPl105 - Gen3 EQ Status
+ *
+ * Give initial EQ settings.
+ */
+ if (dd->pcidev->device == PCI_DEVICE_ID_INTEL0) { /* discrete */
+ /* 1000mV, FS=24, LF = 8 */
+ fs = 24;
+ lf = 8;
+ div = 3;
+ eq = discrete_preliminary_eq;
+ default_pset = DEFAULT_DISCRETE_PSET;
+ } else {
+ /* 400mV, FS=29, LF = 9 */
+ fs = 29;
+ lf = 9;
+ div = 1;
+ eq = integrated_preliminary_eq;
+ default_pset = DEFAULT_MCP_PSET;
+ }
+ pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL101,
+ (fs << PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_FS_SHIFT)
+ | (lf << PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_LF_SHIFT));
+ ret = load_eq_table(dd, eq, fs, div);
+ if (ret)
+ goto done;
+
+ /*
+ * PcieCfgRegPl106 - Gen3 EQ Control
+ *
+ * Set Gen3EqPsetReqVec, leave other fields 0.
+ */
+ if (pcie_pset == UNSET_PSET)
+ pcie_pset = default_pset;
+ if (pcie_pset > 10) { /* valid range is 0-10, inclusive */
+ dd_dev_err(dd, "%s: Invalid Eq Pset %u, setting to %d\n",
+ __func__, pcie_pset, default_pset);
+ pcie_pset = default_pset;
+ }
+ dd_dev_info(dd, "%s: using EQ Pset %u\n", __func__, pcie_pset);
+ pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL106,
+ ((1 << pcie_pset)
+ << PCIE_CFG_REG_PL106_GEN3_EQ_PSET_REQ_VEC_SHIFT)
+ | PCIE_CFG_REG_PL106_GEN3_EQ_EVAL2MS_DISABLE_SMASK
+ | PCIE_CFG_REG_PL106_GEN3_EQ_PHASE23_EXIT_MODE_SMASK);
+
+ /*
+ * step 5b: Do post firmware download steps via SBus
+ */
+ dd_dev_info(dd, "%s: doing pcie post steps\n", __func__);
+ pcie_post_steps(dd);
+
+ /*
+ * step 5c: Program gasket interrupts
+ */
+ /* set the Rx Bit Rate to REFCLK ratio */
+ write_gasket_interrupt(dd, 0, 0x0006, 0x0050);
+ /* disable pCal for PCIe Gen3 RX equalization */
+ write_gasket_interrupt(dd, 1, 0x0026, 0x5b01);
+ /*
+ * Enable iCal for PCIe Gen3 RX equalization, and set which
+ * evaluation of RX_EQ_EVAL will launch the iCal procedure.
+ */
+ write_gasket_interrupt(dd, 2, 0x0026, 0x5202);
+ /* terminate list */
+ write_gasket_interrupt(dd, 3, 0x0000, 0x0000);
+
+ /*
+ * step 5d: program XMT margin
+ * Right now, leave the default alone. To change, do a
+ * read-modify-write of:
+ * CcePcieCtrl.XmtMargin
+ * CcePcieCtrl.XmitMarginOverwriteEnable
+ */
+
+ /* step 5e: disable active state power management (ASPM) */
+ dd_dev_info(dd, "%s: clearing ASPM\n", __func__);
+ pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL, &lnkctl);
+ lnkctl &= ~PCI_EXP_LNKCTL_ASPMC;
+ pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL, lnkctl);
+
+ /*
+ * step 5f: clear DirectSpeedChange
+ * PcieCfgRegPl67.DirectSpeedChange must be zero to prevent the
+ * change in the speed target from starting before we are ready.
+ * This field defaults to 0 and we are not changing it, so nothing
+ * needs to be done.
+ */
+
+ /* step 5g: Set target link speed */
+ /*
+ * Set target link speed to be target on both device and parent.
+ * On setting the parent: Some system BIOSs "helpfully" set the
+ * parent target speed to Gen2 to match the ASIC's initial speed.
+ * We can set the target Gen3 because we have already checked
+ * that it is Gen3 capable earlier.
+ */
+ dd_dev_info(dd, "%s: setting parent target link speed\n", __func__);
+ parent = dd->pcidev->bus->self;
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL2, &lnkctl2);
+ dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
+ (u32)lnkctl2);
+ /* only write to parent if target is not as high as ours */
+ if ((lnkctl2 & LNKCTL2_TARGET_LINK_SPEED_MASK) < target_vector) {
+ lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK;
+ lnkctl2 |= target_vector;
+ dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
+ (u32)lnkctl2);
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL2, lnkctl2);
+ } else {
+ dd_dev_info(dd, "%s: ..target speed is OK\n", __func__);
+ }
+
+ dd_dev_info(dd, "%s: setting target link speed\n", __func__);
+ pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL2, &lnkctl2);
+ dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
+ (u32)lnkctl2);
+ lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK;
+ lnkctl2 |= target_vector;
+ dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
+ (u32)lnkctl2);
+ pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL2, lnkctl2);
+
+ /* step 5h: arm gasket logic */
+ /* hold DC in reset across the SBR */
+ write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
+ (void) read_csr(dd, CCE_DC_CTRL); /* DC reset hold */
+ /* save firmware control across the SBR */
+ fw_ctrl = read_csr(dd, MISC_CFG_FW_CTRL);
+
+ dd_dev_info(dd, "%s: arming gasket logic\n", __func__);
+ arm_gasket_logic(dd);
+
+ /*
+ * step 6: quiesce PCIe link
+ * The chip has already been reset, so there will be no traffic
+ * from the chip. Linux has no easy way to enforce that it will
+ * not try to access the device, so we just need to hope it doesn't
+ * do it while we are doing the reset.
+ */
+
+ /*
+ * step 7: initiate the secondary bus reset (SBR)
+ * step 8: hardware brings the links back up
+ * step 9: wait for link speed transition to be complete
+ */
+ dd_dev_info(dd, "%s: calling trigger_sbr\n", __func__);
+ ret = trigger_sbr(dd);
+ if (ret)
+ goto done;
+
+ /* step 10: decide what to do next */
+
+ /* check if we can read PCI space */
+ ret = pci_read_config_word(dd->pcidev, PCI_VENDOR_ID, &vendor);
+ if (ret) {
+ dd_dev_info(dd,
+ "%s: read of VendorID failed after SBR, err %d\n",
+ __func__, ret);
+ return_error = 1;
+ goto done;
+ }
+ if (vendor == 0xffff) {
+ dd_dev_info(dd, "%s: VendorID is all 1s after SBR\n", __func__);
+ return_error = 1;
+ ret = -EIO;
+ goto done;
+ }
+
+ /* restore PCI space registers we know were reset */
+ dd_dev_info(dd, "%s: calling restore_pci_variables\n", __func__);
+ restore_pci_variables(dd);
+ /* restore firmware control */
+ write_csr(dd, MISC_CFG_FW_CTRL, fw_ctrl);
+
+ /*
+ * Check the gasket block status.
+ *
+ * This is the first CSR read after the SBR. If the read returns
+ * all 1s (fails), the link did not make it back.
+ *
+ * Once we're sure we can read and write, clear the DC reset after
+ * the SBR. Then check for any per-lane errors. Then look over
+ * the status.
+ */
+ reg = read_csr(dd, ASIC_PCIE_SD_HOST_STATUS);
+ dd_dev_info(dd, "%s: gasket block status: 0x%llx\n", __func__, reg);
+ if (reg == ~0ull) { /* PCIe read failed/timeout */
+ dd_dev_err(dd, "SBR failed - unable to read from device\n");
+ return_error = 1;
+ ret = -ENOSYS;
+ goto done;
+ }
+
+ /* clear the DC reset */
+ write_csr(dd, CCE_DC_CTRL, 0);
+ /* Set the LED off */
+ if (is_a0(dd))
+ setextled(dd, 0);
+
+ /* check for any per-lane errors */
+ pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE2, &reg32);
+ dd_dev_info(dd, "%s: per-lane errors: 0x%x\n", __func__, reg32);
+
+ /* extract status, look for our HFI */
+ status = (reg >> ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_SHIFT)
+ & ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_MASK;
+ if ((status & (1 << dd->hfi1_id)) == 0) {
+ dd_dev_err(dd,
+ "%s: gasket status 0x%x, expecting 0x%x\n",
+ __func__, status, 1 << dd->hfi1_id);
+ ret = -EIO;
+ goto done;
+ }
+
+ /* extract error */
+ err = (reg >> ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_ERR_SHIFT)
+ & ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_ERR_MASK;
+ if (err) {
+ dd_dev_err(dd, "%s: gasket error %d\n", __func__, err);
+ ret = -EIO;
+ goto done;
+ }
+
+ /* update our link information cache */
+ update_lbus_info(dd);
+ dd_dev_info(dd, "%s: new speed and width: %s\n", __func__,
+ dd->lbus_info);
+
+ if (dd->lbus_speed != target_speed) { /* not target */
+ /* maybe retry */
+ do_retry = retry_count < pcie_retry;
+ dd_dev_err(dd, "PCIe link speed did not switch to Gen%d%s\n",
+ pcie_target, do_retry ? ", retrying" : "");
+ retry_count++;
+ if (do_retry) {
+ msleep(100); /* allow time to settle */
+ goto retry;
+ }
+ ret = -EIO;
+ }
+
+done:
+ if (therm) {
+ write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
+ msleep(100);
+ dd_dev_info(dd, "%s: Re-enable therm polling\n",
+ __func__);
+ }
+ release_hw_mutex(dd);
+done_no_mutex:
+ /* return no error if it is OK to be at current speed */
+ if (ret && !return_error) {
+ dd_dev_err(dd, "Proceeding at current speed PCIe speed\n");
+ ret = 0;
+ }
+
+ dd_dev_info(dd, "%s: done\n", __func__);
+ return ret;
+}
diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/staging/rdma/hfi1/pio.c
new file mode 100644
index 000000000000..9991814a8f05
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/pio.c
@@ -0,0 +1,1771 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/delay.h>
+#include "hfi.h"
+#include "qp.h"
+#include "trace.h"
+
+#define SC_CTXT_PACKET_EGRESS_TIMEOUT 350 /* in chip cycles */
+
+#define SC(name) SEND_CTXT_##name
+/*
+ * Send Context functions
+ */
+static void sc_wait_for_packet_egress(struct send_context *sc, int pause);
+
+/*
+ * Set the CM reset bit and wait for it to clear. Use the provided
+ * sendctrl register. This routine has no locking.
+ */
+void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl)
+{
+ write_csr(dd, SEND_CTRL, sendctrl | SEND_CTRL_CM_RESET_SMASK);
+ while (1) {
+ udelay(1);
+ sendctrl = read_csr(dd, SEND_CTRL);
+ if ((sendctrl & SEND_CTRL_CM_RESET_SMASK) == 0)
+ break;
+ }
+}
+
+/* defined in header release 48 and higher */
+#ifndef SEND_CTRL_UNSUPPORTED_VL_SHIFT
+#define SEND_CTRL_UNSUPPORTED_VL_SHIFT 3
+#define SEND_CTRL_UNSUPPORTED_VL_MASK 0xffull
+#define SEND_CTRL_UNSUPPORTED_VL_SMASK (SEND_CTRL_UNSUPPORTED_VL_MASK \
+ << SEND_CTRL_UNSUPPORTED_VL_SHIFT)
+#endif
+
+/* global control of PIO send */
+void pio_send_control(struct hfi1_devdata *dd, int op)
+{
+ u64 reg, mask;
+ unsigned long flags;
+ int write = 1; /* write sendctrl back */
+ int flush = 0; /* re-read sendctrl to make sure it is flushed */
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+
+ reg = read_csr(dd, SEND_CTRL);
+ switch (op) {
+ case PSC_GLOBAL_ENABLE:
+ reg |= SEND_CTRL_SEND_ENABLE_SMASK;
+ /* Fall through */
+ case PSC_DATA_VL_ENABLE:
+ /* Disallow sending on VLs not enabled */
+ mask = (((~0ull)<<num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK)<<
+ SEND_CTRL_UNSUPPORTED_VL_SHIFT;
+ reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
+ break;
+ case PSC_GLOBAL_DISABLE:
+ reg &= ~SEND_CTRL_SEND_ENABLE_SMASK;
+ break;
+ case PSC_GLOBAL_VLARB_ENABLE:
+ reg |= SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
+ break;
+ case PSC_GLOBAL_VLARB_DISABLE:
+ reg &= ~SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
+ break;
+ case PSC_CM_RESET:
+ __cm_reset(dd, reg);
+ write = 0; /* CSR already written (and flushed) */
+ break;
+ case PSC_DATA_VL_DISABLE:
+ reg |= SEND_CTRL_UNSUPPORTED_VL_SMASK;
+ flush = 1;
+ break;
+ default:
+ dd_dev_err(dd, "%s: invalid control %d\n", __func__, op);
+ break;
+ }
+
+ if (write) {
+ write_csr(dd, SEND_CTRL, reg);
+ if (flush)
+ (void) read_csr(dd, SEND_CTRL); /* flush write */
+ }
+
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+}
+
+/* number of send context memory pools */
+#define NUM_SC_POOLS 2
+
+/* Send Context Size (SCS) wildcards */
+#define SCS_POOL_0 -1
+#define SCS_POOL_1 -2
+/* Send Context Count (SCC) wildcards */
+#define SCC_PER_VL -1
+#define SCC_PER_CPU -2
+
+#define SCC_PER_KRCVQ -3
+#define SCC_ACK_CREDITS 32
+
+#define PIO_WAIT_BATCH_SIZE 5
+
+/* default send context sizes */
+static struct sc_config_sizes sc_config_sizes[SC_MAX] = {
+ [SC_KERNEL] = { .size = SCS_POOL_0, /* even divide, pool 0 */
+ .count = SCC_PER_VL },/* one per NUMA */
+ [SC_ACK] = { .size = SCC_ACK_CREDITS,
+ .count = SCC_PER_KRCVQ },
+ [SC_USER] = { .size = SCS_POOL_0, /* even divide, pool 0 */
+ .count = SCC_PER_CPU }, /* one per CPU */
+
+};
+
+/* send context memory pool configuration */
+struct mem_pool_config {
+ int centipercent; /* % of memory, in 100ths of 1% */
+ int absolute_blocks; /* absolute block count */
+};
+
+/* default memory pool configuration: 100% in pool 0 */
+static struct mem_pool_config sc_mem_pool_config[NUM_SC_POOLS] = {
+ /* centi%, abs blocks */
+ { 10000, -1 }, /* pool 0 */
+ { 0, -1 }, /* pool 1 */
+};
+
+/* memory pool information, used when calculating final sizes */
+struct mem_pool_info {
+ int centipercent; /* 100th of 1% of memory to use, -1 if blocks
+ already set */
+ int count; /* count of contexts in the pool */
+ int blocks; /* block size of the pool */
+ int size; /* context size, in blocks */
+};
+
+/*
+ * Convert a pool wildcard to a valid pool index. The wildcards
+ * start at -1 and increase negatively. Map them as:
+ * -1 => 0
+ * -2 => 1
+ * etc.
+ *
+ * Return -1 on non-wildcard input, otherwise convert to a pool number.
+ */
+static int wildcard_to_pool(int wc)
+{
+ if (wc >= 0)
+ return -1; /* non-wildcard */
+ return -wc - 1;
+}
+
+static const char *sc_type_names[SC_MAX] = {
+ "kernel",
+ "ack",
+ "user"
+};
+
+static const char *sc_type_name(int index)
+{
+ if (index < 0 || index >= SC_MAX)
+ return "unknown";
+ return sc_type_names[index];
+}
+
+/*
+ * Read the send context memory pool configuration and send context
+ * size configuration. Replace any wildcards and come up with final
+ * counts and sizes for the send context types.
+ */
+int init_sc_pools_and_sizes(struct hfi1_devdata *dd)
+{
+ struct mem_pool_info mem_pool_info[NUM_SC_POOLS] = { { 0 } };
+ int total_blocks = (dd->chip_pio_mem_size / PIO_BLOCK_SIZE) - 1;
+ int total_contexts = 0;
+ int fixed_blocks;
+ int pool_blocks;
+ int used_blocks;
+ int cp_total; /* centipercent total */
+ int ab_total; /* absolute block total */
+ int extra;
+ int i;
+
+ /*
+ * Step 0:
+ * - copy the centipercents/absolute sizes from the pool config
+ * - sanity check these values
+ * - add up centipercents, then later check for full value
+ * - add up absolute blocks, then later check for over-commit
+ */
+ cp_total = 0;
+ ab_total = 0;
+ for (i = 0; i < NUM_SC_POOLS; i++) {
+ int cp = sc_mem_pool_config[i].centipercent;
+ int ab = sc_mem_pool_config[i].absolute_blocks;
+
+ /*
+ * A negative value is "unused" or "invalid". Both *can*
+ * be valid, but centipercent wins, so check that first
+ */
+ if (cp >= 0) { /* centipercent valid */
+ cp_total += cp;
+ } else if (ab >= 0) { /* absolute blocks valid */
+ ab_total += ab;
+ } else { /* neither valid */
+ dd_dev_err(
+ dd,
+ "Send context memory pool %d: both the block count and centipercent are invalid\n",
+ i);
+ return -EINVAL;
+ }
+
+ mem_pool_info[i].centipercent = cp;
+ mem_pool_info[i].blocks = ab;
+ }
+
+ /* do not use both % and absolute blocks for different pools */
+ if (cp_total != 0 && ab_total != 0) {
+ dd_dev_err(
+ dd,
+ "All send context memory pools must be described as either centipercent or blocks, no mixing between pools\n");
+ return -EINVAL;
+ }
+
+ /* if any percentages are present, they must add up to 100% x 100 */
+ if (cp_total != 0 && cp_total != 10000) {
+ dd_dev_err(
+ dd,
+ "Send context memory pool centipercent is %d, expecting 10000\n",
+ cp_total);
+ return -EINVAL;
+ }
+
+ /* the absolute pool total cannot be more than the mem total */
+ if (ab_total > total_blocks) {
+ dd_dev_err(
+ dd,
+ "Send context memory pool absolute block count %d is larger than the memory size %d\n",
+ ab_total, total_blocks);
+ return -EINVAL;
+ }
+
+ /*
+ * Step 2:
+ * - copy from the context size config
+ * - replace context type wildcard counts with real values
+ * - add up non-memory pool block sizes
+ * - add up memory pool user counts
+ */
+ fixed_blocks = 0;
+ for (i = 0; i < SC_MAX; i++) {
+ int count = sc_config_sizes[i].count;
+ int size = sc_config_sizes[i].size;
+ int pool;
+
+ /*
+ * Sanity check count: Either a positive value or
+ * one of the expected wildcards is valid. The positive
+ * value is checked later when we compare against total
+ * memory available.
+ */
+ if (i == SC_ACK) {
+ count = dd->n_krcv_queues;
+ } else if (i == SC_KERNEL) {
+ count = num_vls + 1 /* VL15 */;
+ } else if (count == SCC_PER_CPU) {
+ count = dd->num_rcv_contexts - dd->n_krcv_queues;
+ } else if (count < 0) {
+ dd_dev_err(
+ dd,
+ "%s send context invalid count wildcard %d\n",
+ sc_type_name(i), count);
+ return -EINVAL;
+ }
+ if (total_contexts + count > dd->chip_send_contexts)
+ count = dd->chip_send_contexts - total_contexts;
+
+ total_contexts += count;
+
+ /*
+ * Sanity check pool: The conversion will return a pool
+ * number or -1 if a fixed (non-negative) value. The fixed
+ * value is checked later when we compare against
+ * total memory available.
+ */
+ pool = wildcard_to_pool(size);
+ if (pool == -1) { /* non-wildcard */
+ fixed_blocks += size * count;
+ } else if (pool < NUM_SC_POOLS) { /* valid wildcard */
+ mem_pool_info[pool].count += count;
+ } else { /* invalid wildcard */
+ dd_dev_err(
+ dd,
+ "%s send context invalid pool wildcard %d\n",
+ sc_type_name(i), size);
+ return -EINVAL;
+ }
+
+ dd->sc_sizes[i].count = count;
+ dd->sc_sizes[i].size = size;
+ }
+ if (fixed_blocks > total_blocks) {
+ dd_dev_err(
+ dd,
+ "Send context fixed block count, %u, larger than total block count %u\n",
+ fixed_blocks, total_blocks);
+ return -EINVAL;
+ }
+
+ /* step 3: calculate the blocks in the pools, and pool context sizes */
+ pool_blocks = total_blocks - fixed_blocks;
+ if (ab_total > pool_blocks) {
+ dd_dev_err(
+ dd,
+ "Send context fixed pool sizes, %u, larger than pool block count %u\n",
+ ab_total, pool_blocks);
+ return -EINVAL;
+ }
+ /* subtract off the fixed pool blocks */
+ pool_blocks -= ab_total;
+
+ for (i = 0; i < NUM_SC_POOLS; i++) {
+ struct mem_pool_info *pi = &mem_pool_info[i];
+
+ /* % beats absolute blocks */
+ if (pi->centipercent >= 0)
+ pi->blocks = (pool_blocks * pi->centipercent) / 10000;
+
+ if (pi->blocks == 0 && pi->count != 0) {
+ dd_dev_err(
+ dd,
+ "Send context memory pool %d has %u contexts, but no blocks\n",
+ i, pi->count);
+ return -EINVAL;
+ }
+ if (pi->count == 0) {
+ /* warn about wasted blocks */
+ if (pi->blocks != 0)
+ dd_dev_err(
+ dd,
+ "Send context memory pool %d has %u blocks, but zero contexts\n",
+ i, pi->blocks);
+ pi->size = 0;
+ } else {
+ pi->size = pi->blocks / pi->count;
+ }
+ }
+
+ /* step 4: fill in the context type sizes from the pool sizes */
+ used_blocks = 0;
+ for (i = 0; i < SC_MAX; i++) {
+ if (dd->sc_sizes[i].size < 0) {
+ unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size);
+
+ WARN_ON_ONCE(pool >= NUM_SC_POOLS);
+ dd->sc_sizes[i].size = mem_pool_info[pool].size;
+ }
+ /* make sure we are not larger than what is allowed by the HW */
+#define PIO_MAX_BLOCKS 1024
+ if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS)
+ dd->sc_sizes[i].size = PIO_MAX_BLOCKS;
+
+ /* calculate our total usage */
+ used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count;
+ }
+ extra = total_blocks - used_blocks;
+ if (extra != 0)
+ dd_dev_info(dd, "unused send context blocks: %d\n", extra);
+
+ return total_contexts;
+}
+
+int init_send_contexts(struct hfi1_devdata *dd)
+{
+ u16 base;
+ int ret, i, j, context;
+
+ ret = init_credit_return(dd);
+ if (ret)
+ return ret;
+
+ dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8),
+ GFP_KERNEL);
+ dd->send_contexts = kcalloc(dd->num_send_contexts,
+ sizeof(struct send_context_info),
+ GFP_KERNEL);
+ if (!dd->send_contexts || !dd->hw_to_sw) {
+ dd_dev_err(dd, "Unable to allocate send context arrays\n");
+ kfree(dd->hw_to_sw);
+ kfree(dd->send_contexts);
+ free_credit_return(dd);
+ return -ENOMEM;
+ }
+
+ /* hardware context map starts with invalid send context indices */
+ for (i = 0; i < TXE_NUM_CONTEXTS; i++)
+ dd->hw_to_sw[i] = INVALID_SCI;
+
+ /*
+ * All send contexts have their credit sizes. Allocate credits
+ * for each context one after another from the global space.
+ */
+ context = 0;
+ base = 1;
+ for (i = 0; i < SC_MAX; i++) {
+ struct sc_config_sizes *scs = &dd->sc_sizes[i];
+
+ for (j = 0; j < scs->count; j++) {
+ struct send_context_info *sci =
+ &dd->send_contexts[context];
+ sci->type = i;
+ sci->base = base;
+ sci->credits = scs->size;
+
+ context++;
+ base += scs->size;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Allocate a software index and hardware context of the given type.
+ *
+ * Must be called with dd->sc_lock held.
+ */
+static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index,
+ u32 *hw_context)
+{
+ struct send_context_info *sci;
+ u32 index;
+ u32 context;
+
+ for (index = 0, sci = &dd->send_contexts[0];
+ index < dd->num_send_contexts; index++, sci++) {
+ if (sci->type == type && sci->allocated == 0) {
+ sci->allocated = 1;
+ /* use a 1:1 mapping, but make them non-equal */
+ context = dd->chip_send_contexts - index - 1;
+ dd->hw_to_sw[context] = index;
+ *sw_index = index;
+ *hw_context = context;
+ return 0; /* success */
+ }
+ }
+ dd_dev_err(dd, "Unable to locate a free type %d send context\n", type);
+ return -ENOSPC;
+}
+
+/*
+ * Free the send context given by its software index.
+ *
+ * Must be called with dd->sc_lock held.
+ */
+static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context)
+{
+ struct send_context_info *sci;
+
+ sci = &dd->send_contexts[sw_index];
+ if (!sci->allocated) {
+ dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n",
+ __func__, sw_index, hw_context);
+ }
+ sci->allocated = 0;
+ dd->hw_to_sw[hw_context] = INVALID_SCI;
+}
+
+/* return the base context of a context in a group */
+static inline u32 group_context(u32 context, u32 group)
+{
+ return (context >> group) << group;
+}
+
+/* return the size of a group */
+static inline u32 group_size(u32 group)
+{
+ return 1 << group;
+}
+
+/*
+ * Obtain the credit return addresses, kernel virtual and physical, for the
+ * given sc.
+ *
+ * To understand this routine:
+ * o va and pa are arrays of struct credit_return. One for each physical
+ * send context, per NUMA.
+ * o Each send context always looks in its relative location in a struct
+ * credit_return for its credit return.
+ * o Each send context in a group must have its return address CSR programmed
+ * with the same value. Use the address of the first send context in the
+ * group.
+ */
+static void cr_group_addresses(struct send_context *sc, dma_addr_t *pa)
+{
+ u32 gc = group_context(sc->hw_context, sc->group);
+ u32 index = sc->hw_context & 0x7;
+
+ sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index];
+ *pa = (unsigned long)
+ &((struct credit_return *)sc->dd->cr_base[sc->node].pa)[gc];
+}
+
+/*
+ * Work queue function triggered in error interrupt routine for
+ * kernel contexts.
+ */
+static void sc_halted(struct work_struct *work)
+{
+ struct send_context *sc;
+
+ sc = container_of(work, struct send_context, halt_work);
+ sc_restart(sc);
+}
+
+/*
+ * Calculate PIO block threshold for this send context using the given MTU.
+ * Trigger a return when one MTU plus optional header of credits remain.
+ *
+ * Parameter mtu is in bytes.
+ * Parameter hdrqentsize is in DWORDs.
+ *
+ * Return value is what to write into the CSR: trigger return when
+ * unreturned credits pass this count.
+ */
+u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize)
+{
+ u32 release_credits;
+ u32 threshold;
+
+ /* add in the header size, then divide by the PIO block size */
+ mtu += hdrqentsize << 2;
+ release_credits = DIV_ROUND_UP(mtu, PIO_BLOCK_SIZE);
+
+ /* check against this context's credits */
+ if (sc->credits <= release_credits)
+ threshold = 1;
+ else
+ threshold = sc->credits - release_credits;
+
+ return threshold;
+}
+
+/*
+ * Calculate credit threshold in terms of percent of the allocated credits.
+ * Trigger when unreturned credits equal or exceed the percentage of the whole.
+ *
+ * Return value is what to write into the CSR: trigger return when
+ * unreturned credits pass this count.
+ */
+static u32 sc_percent_to_threshold(struct send_context *sc, u32 percent)
+{
+ return (sc->credits * percent) / 100;
+}
+
+/*
+ * Set the credit return threshold.
+ */
+void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold)
+{
+ unsigned long flags;
+ u32 old_threshold;
+ int force_return = 0;
+
+ spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
+
+ old_threshold = (sc->credit_ctrl >>
+ SC(CREDIT_CTRL_THRESHOLD_SHIFT))
+ & SC(CREDIT_CTRL_THRESHOLD_MASK);
+
+ if (new_threshold != old_threshold) {
+ sc->credit_ctrl =
+ (sc->credit_ctrl
+ & ~SC(CREDIT_CTRL_THRESHOLD_SMASK))
+ | ((new_threshold
+ & SC(CREDIT_CTRL_THRESHOLD_MASK))
+ << SC(CREDIT_CTRL_THRESHOLD_SHIFT));
+ write_kctxt_csr(sc->dd, sc->hw_context,
+ SC(CREDIT_CTRL), sc->credit_ctrl);
+
+ /* force a credit return on change to avoid a possible stall */
+ force_return = 1;
+ }
+
+ spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
+
+ if (force_return)
+ sc_return_credits(sc);
+}
+
+/*
+ * set_pio_integrity
+ *
+ * Set the CHECK_ENABLE register for the send context 'sc'.
+ */
+void set_pio_integrity(struct send_context *sc)
+{
+ struct hfi1_devdata *dd = sc->dd;
+ u64 reg = 0;
+ u32 hw_context = sc->hw_context;
+ int type = sc->type;
+
+ /*
+ * No integrity checks if HFI1_CAP_NO_INTEGRITY is set, or if
+ * we're snooping.
+ */
+ if (likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) &&
+ dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE)
+ reg = hfi1_pkt_default_send_ctxt_mask(dd, type);
+
+ write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), reg);
+}
+
+/*
+ * Allocate a NUMA relative send context structure of the given type along
+ * with a HW context.
+ */
+struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
+ uint hdrqentsize, int numa)
+{
+ struct send_context_info *sci;
+ struct send_context *sc;
+ dma_addr_t pa;
+ unsigned long flags;
+ u64 reg;
+ u32 thresh;
+ u32 sw_index;
+ u32 hw_context;
+ int ret;
+ u8 opval, opmask;
+
+ /* do not allocate while frozen */
+ if (dd->flags & HFI1_FROZEN)
+ return NULL;
+
+ sc = kzalloc_node(sizeof(struct send_context), GFP_KERNEL, numa);
+ if (!sc) {
+ dd_dev_err(dd, "Cannot allocate send context structure\n");
+ return NULL;
+ }
+
+ spin_lock_irqsave(&dd->sc_lock, flags);
+ ret = sc_hw_alloc(dd, type, &sw_index, &hw_context);
+ if (ret) {
+ spin_unlock_irqrestore(&dd->sc_lock, flags);
+ kfree(sc);
+ return NULL;
+ }
+
+ sci = &dd->send_contexts[sw_index];
+ sci->sc = sc;
+
+ sc->dd = dd;
+ sc->node = numa;
+ sc->type = type;
+ spin_lock_init(&sc->alloc_lock);
+ spin_lock_init(&sc->release_lock);
+ spin_lock_init(&sc->credit_ctrl_lock);
+ INIT_LIST_HEAD(&sc->piowait);
+ INIT_WORK(&sc->halt_work, sc_halted);
+ atomic_set(&sc->buffers_allocated, 0);
+ init_waitqueue_head(&sc->halt_wait);
+
+ /* grouping is always single context for now */
+ sc->group = 0;
+
+ sc->sw_index = sw_index;
+ sc->hw_context = hw_context;
+ cr_group_addresses(sc, &pa);
+ sc->credits = sci->credits;
+
+/* PIO Send Memory Address details */
+#define PIO_ADDR_CONTEXT_MASK 0xfful
+#define PIO_ADDR_CONTEXT_SHIFT 16
+ sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK)
+ << PIO_ADDR_CONTEXT_SHIFT);
+
+ /* set base and credits */
+ reg = ((sci->credits & SC(CTRL_CTXT_DEPTH_MASK))
+ << SC(CTRL_CTXT_DEPTH_SHIFT))
+ | ((sci->base & SC(CTRL_CTXT_BASE_MASK))
+ << SC(CTRL_CTXT_BASE_SHIFT));
+ write_kctxt_csr(dd, hw_context, SC(CTRL), reg);
+
+ set_pio_integrity(sc);
+
+ /* unmask all errors */
+ write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1);
+
+ /* set the default partition key */
+ write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY),
+ (DEFAULT_PKEY &
+ SC(CHECK_PARTITION_KEY_VALUE_MASK))
+ << SC(CHECK_PARTITION_KEY_VALUE_SHIFT));
+
+ /* per context type checks */
+ if (type == SC_USER) {
+ opval = USER_OPCODE_CHECK_VAL;
+ opmask = USER_OPCODE_CHECK_MASK;
+ } else {
+ opval = OPCODE_CHECK_VAL_DISABLED;
+ opmask = OPCODE_CHECK_MASK_DISABLED;
+ }
+
+ /* set the send context check opcode mask and value */
+ write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE),
+ ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) |
+ ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT)));
+
+ /* set up credit return */
+ reg = pa & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK);
+ write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg);
+
+ /*
+ * Calculate the initial credit return threshold.
+ *
+ * For Ack contexts, set a threshold for half the credits.
+ * For User contexts use the given percentage. This has been
+ * sanitized on driver start-up.
+ * For Kernel contexts, use the default MTU plus a header.
+ */
+ if (type == SC_ACK) {
+ thresh = sc_percent_to_threshold(sc, 50);
+ } else if (type == SC_USER) {
+ thresh = sc_percent_to_threshold(sc,
+ user_credit_return_threshold);
+ } else { /* kernel */
+ thresh = sc_mtu_to_threshold(sc, hfi1_max_mtu, hdrqentsize);
+ }
+ reg = thresh << SC(CREDIT_CTRL_THRESHOLD_SHIFT);
+ /* add in early return */
+ if (type == SC_USER && HFI1_CAP_IS_USET(EARLY_CREDIT_RETURN))
+ reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
+ else if (HFI1_CAP_IS_KSET(EARLY_CREDIT_RETURN)) /* kernel, ack */
+ reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
+
+ /* set up write-through credit_ctrl */
+ sc->credit_ctrl = reg;
+ write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), reg);
+
+ /* User send contexts should not allow sending on VL15 */
+ if (type == SC_USER) {
+ reg = 1ULL << 15;
+ write_kctxt_csr(dd, hw_context, SC(CHECK_VL), reg);
+ }
+
+ spin_unlock_irqrestore(&dd->sc_lock, flags);
+
+ /*
+ * Allocate shadow ring to track outstanding PIO buffers _after_
+ * unlocking. We don't know the size until the lock is held and
+ * we can't allocate while the lock is held. No one is using
+ * the context yet, so allocate it now.
+ *
+ * User contexts do not get a shadow ring.
+ */
+ if (type != SC_USER) {
+ /*
+ * Size the shadow ring 1 larger than the number of credits
+ * so head == tail can mean empty.
+ */
+ sc->sr_size = sci->credits + 1;
+ sc->sr = kzalloc_node(sizeof(union pio_shadow_ring) *
+ sc->sr_size, GFP_KERNEL, numa);
+ if (!sc->sr) {
+ dd_dev_err(dd,
+ "Cannot allocate send context shadow ring structure\n");
+ sc_free(sc);
+ return NULL;
+ }
+ }
+
+ dd_dev_info(dd,
+ "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u\n",
+ sw_index,
+ hw_context,
+ sc_type_name(type),
+ sc->group,
+ sc->credits,
+ sc->credit_ctrl,
+ thresh);
+
+ return sc;
+}
+
+/* free a per-NUMA send context structure */
+void sc_free(struct send_context *sc)
+{
+ struct hfi1_devdata *dd;
+ unsigned long flags;
+ u32 sw_index;
+ u32 hw_context;
+
+ if (!sc)
+ return;
+
+ sc->flags |= SCF_IN_FREE; /* ensure no restarts */
+ dd = sc->dd;
+ if (!list_empty(&sc->piowait))
+ dd_dev_err(dd, "piowait list not empty!\n");
+ sw_index = sc->sw_index;
+ hw_context = sc->hw_context;
+ sc_disable(sc); /* make sure the HW is disabled */
+ flush_work(&sc->halt_work);
+
+ spin_lock_irqsave(&dd->sc_lock, flags);
+ dd->send_contexts[sw_index].sc = NULL;
+
+ /* clear/disable all registers set in sc_alloc */
+ write_kctxt_csr(dd, hw_context, SC(CTRL), 0);
+ write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), 0);
+ write_kctxt_csr(dd, hw_context, SC(ERR_MASK), 0);
+ write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 0);
+ write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 0);
+ write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), 0);
+ write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), 0);
+
+ /* release the index and context for re-use */
+ sc_hw_free(dd, sw_index, hw_context);
+ spin_unlock_irqrestore(&dd->sc_lock, flags);
+
+ kfree(sc->sr);
+ kfree(sc);
+}
+
+/* disable the context */
+void sc_disable(struct send_context *sc)
+{
+ u64 reg;
+ unsigned long flags;
+ struct pio_buf *pbuf;
+
+ if (!sc)
+ return;
+
+ /* do all steps, even if already disabled */
+ spin_lock_irqsave(&sc->alloc_lock, flags);
+ reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
+ reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
+ sc->flags &= ~SCF_ENABLED;
+ sc_wait_for_packet_egress(sc, 1);
+ write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
+ spin_unlock_irqrestore(&sc->alloc_lock, flags);
+
+ /*
+ * Flush any waiters. Once the context is disabled,
+ * credit return interrupts are stopped (although there
+ * could be one in-process when the context is disabled).
+ * Wait one microsecond for any lingering interrupts, then
+ * proceed with the flush.
+ */
+ udelay(1);
+ spin_lock_irqsave(&sc->release_lock, flags);
+ if (sc->sr) { /* this context has a shadow ring */
+ while (sc->sr_tail != sc->sr_head) {
+ pbuf = &sc->sr[sc->sr_tail].pbuf;
+ if (pbuf->cb)
+ (*pbuf->cb)(pbuf->arg, PRC_SC_DISABLE);
+ sc->sr_tail++;
+ if (sc->sr_tail >= sc->sr_size)
+ sc->sr_tail = 0;
+ }
+ }
+ spin_unlock_irqrestore(&sc->release_lock, flags);
+}
+
+/* return SendEgressCtxtStatus.PacketOccupancy */
+#define packet_occupancy(r) \
+ (((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)\
+ >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT)
+
+/* is egress halted on the context? */
+#define egress_halted(r) \
+ ((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK)
+
+/* wait for packet egress, optionally pause for credit return */
+static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
+{
+ struct hfi1_devdata *dd = sc->dd;
+ u64 reg;
+ u32 loop = 0;
+
+ while (1) {
+ reg = read_csr(dd, sc->hw_context * 8 +
+ SEND_EGRESS_CTXT_STATUS);
+ /* done if egress is stopped */
+ if (egress_halted(reg))
+ break;
+ reg = packet_occupancy(reg);
+ if (reg == 0)
+ break;
+ if (loop > 100) {
+ dd_dev_err(dd,
+ "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u\n",
+ __func__, sc->sw_index,
+ sc->hw_context, (u32)reg);
+ break;
+ }
+ loop++;
+ udelay(1);
+ }
+
+ if (pause)
+ /* Add additional delay to ensure chip returns all credits */
+ pause_for_credit_return(dd);
+}
+
+void sc_wait(struct hfi1_devdata *dd)
+{
+ int i;
+
+ for (i = 0; i < dd->num_send_contexts; i++) {
+ struct send_context *sc = dd->send_contexts[i].sc;
+
+ if (!sc)
+ continue;
+ sc_wait_for_packet_egress(sc, 0);
+ }
+}
+
+/*
+ * Restart a context after it has been halted due to error.
+ *
+ * If the first step fails - wait for the halt to be asserted, return early.
+ * Otherwise complain about timeouts but keep going.
+ *
+ * It is expected that allocations (enabled flag bit) have been shut off
+ * already (only applies to kernel contexts).
+ */
+int sc_restart(struct send_context *sc)
+{
+ struct hfi1_devdata *dd = sc->dd;
+ u64 reg;
+ u32 loop;
+ int count;
+
+ /* bounce off if not halted, or being free'd */
+ if (!(sc->flags & SCF_HALTED) || (sc->flags & SCF_IN_FREE))
+ return -EINVAL;
+
+ dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index,
+ sc->hw_context);
+
+ /*
+ * Step 1: Wait for the context to actually halt.
+ *
+ * The error interrupt is asynchronous to actually setting halt
+ * on the context.
+ */
+ loop = 0;
+ while (1) {
+ reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS));
+ if (reg & SC(STATUS_CTXT_HALTED_SMASK))
+ break;
+ if (loop > 100) {
+ dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n",
+ __func__, sc->sw_index, sc->hw_context);
+ return -ETIME;
+ }
+ loop++;
+ udelay(1);
+ }
+
+ /*
+ * Step 2: Ensure no users are still trying to write to PIO.
+ *
+ * For kernel contexts, we have already turned off buffer allocation.
+ * Now wait for the buffer count to go to zero.
+ *
+ * For user contexts, the user handling code has cut off write access
+ * to the context's PIO pages before calling this routine and will
+ * restore write access after this routine returns.
+ */
+ if (sc->type != SC_USER) {
+ /* kernel context */
+ loop = 0;
+ while (1) {
+ count = atomic_read(&sc->buffers_allocated);
+ if (count == 0)
+ break;
+ if (loop > 100) {
+ dd_dev_err(dd,
+ "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n",
+ __func__, sc->sw_index,
+ sc->hw_context, count);
+ }
+ loop++;
+ udelay(1);
+ }
+ }
+
+ /*
+ * Step 3: Wait for all packets to egress.
+ * This is done while disabling the send context
+ *
+ * Step 4: Disable the context
+ *
+ * This is a superset of the halt. After the disable, the
+ * errors can be cleared.
+ */
+ sc_disable(sc);
+
+ /*
+ * Step 5: Enable the context
+ *
+ * This enable will clear the halted flag and per-send context
+ * error flags.
+ */
+ return sc_enable(sc);
+}
+
+/*
+ * PIO freeze processing. To be called after the TXE block is fully frozen.
+ * Go through all frozen send contexts and disable them. The contexts are
+ * already stopped by the freeze.
+ */
+void pio_freeze(struct hfi1_devdata *dd)
+{
+ struct send_context *sc;
+ int i;
+
+ for (i = 0; i < dd->num_send_contexts; i++) {
+ sc = dd->send_contexts[i].sc;
+ /*
+ * Don't disable unallocated, unfrozen, or user send contexts.
+ * User send contexts will be disabled when the process
+ * calls into the driver to reset its context.
+ */
+ if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
+ continue;
+
+ /* only need to disable, the context is already stopped */
+ sc_disable(sc);
+ }
+}
+
+/*
+ * Unfreeze PIO for kernel send contexts. The precondition for calling this
+ * is that all PIO send contexts have been disabled and the SPC freeze has
+ * been cleared. Now perform the last step and re-enable each kernel context.
+ * User (PSM) processing will occur when PSM calls into the kernel to
+ * acknowledge the freeze.
+ */
+void pio_kernel_unfreeze(struct hfi1_devdata *dd)
+{
+ struct send_context *sc;
+ int i;
+
+ for (i = 0; i < dd->num_send_contexts; i++) {
+ sc = dd->send_contexts[i].sc;
+ if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
+ continue;
+
+ sc_enable(sc); /* will clear the sc frozen flag */
+ }
+}
+
+/*
+ * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
+ * Returns:
+ * -ETIMEDOUT - if we wait too long
+ * -EIO - if there was an error
+ */
+static int pio_init_wait_progress(struct hfi1_devdata *dd)
+{
+ u64 reg;
+ int max, count = 0;
+
+ /* max is the longest possible HW init time / delay */
+ max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5;
+ while (1) {
+ reg = read_csr(dd, SEND_PIO_INIT_CTXT);
+ if (!(reg & SEND_PIO_INIT_CTXT_PIO_INIT_IN_PROGRESS_SMASK))
+ break;
+ if (count >= max)
+ return -ETIMEDOUT;
+ udelay(5);
+ count++;
+ }
+
+ return reg & SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK ? -EIO : 0;
+}
+
+/*
+ * Reset all of the send contexts to their power-on state. Used
+ * only during manual init - no lock against sc_enable needed.
+ */
+void pio_reset_all(struct hfi1_devdata *dd)
+{
+ int ret;
+
+ /* make sure the init engine is not busy */
+ ret = pio_init_wait_progress(dd);
+ /* ignore any timeout */
+ if (ret == -EIO) {
+ /* clear the error */
+ write_csr(dd, SEND_PIO_ERR_CLEAR,
+ SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK);
+ }
+
+ /* reset init all */
+ write_csr(dd, SEND_PIO_INIT_CTXT,
+ SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK);
+ udelay(2);
+ ret = pio_init_wait_progress(dd);
+ if (ret < 0) {
+ dd_dev_err(dd,
+ "PIO send context init %s while initializing all PIO blocks\n",
+ ret == -ETIMEDOUT ? "is stuck" : "had an error");
+ }
+}
+
+/* enable the context */
+int sc_enable(struct send_context *sc)
+{
+ u64 sc_ctrl, reg, pio;
+ struct hfi1_devdata *dd;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!sc)
+ return -EINVAL;
+ dd = sc->dd;
+
+ /*
+ * Obtain the allocator lock to guard against any allocation
+ * attempts (which should not happen prior to context being
+ * enabled). On the release/disable side we don't need to
+ * worry about locking since the releaser will not do anything
+ * if the context accounting values have not changed.
+ */
+ spin_lock_irqsave(&sc->alloc_lock, flags);
+ sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
+ if ((sc_ctrl & SC(CTRL_CTXT_ENABLE_SMASK)))
+ goto unlock; /* already enabled */
+
+ /* IMPORTANT: only clear free and fill if transitioning 0 -> 1 */
+
+ *sc->hw_free = 0;
+ sc->free = 0;
+ sc->alloc_free = 0;
+ sc->fill = 0;
+ sc->sr_head = 0;
+ sc->sr_tail = 0;
+ sc->flags = 0;
+ atomic_set(&sc->buffers_allocated, 0);
+
+ /*
+ * Clear all per-context errors. Some of these will be set when
+ * we are re-enabling after a context halt. Now that the context
+ * is disabled, the halt will not clear until after the PIO init
+ * engine runs below.
+ */
+ reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS));
+ if (reg)
+ write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR),
+ reg);
+
+ /*
+ * The HW PIO initialization engine can handle only one init
+ * request at a time. Serialize access to each device's engine.
+ */
+ spin_lock(&dd->sc_init_lock);
+ /*
+ * Since access to this code block is serialized and
+ * each access waits for the initialization to complete
+ * before releasing the lock, the PIO initialization engine
+ * should not be in use, so we don't have to wait for the
+ * InProgress bit to go down.
+ */
+ pio = ((sc->hw_context & SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK) <<
+ SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_SHIFT) |
+ SEND_PIO_INIT_CTXT_PIO_SINGLE_CTXT_INIT_SMASK;
+ write_csr(dd, SEND_PIO_INIT_CTXT, pio);
+ /*
+ * Wait until the engine is done. Give the chip the required time
+ * so, hopefully, we read the register just once.
+ */
+ udelay(2);
+ ret = pio_init_wait_progress(dd);
+ spin_unlock(&dd->sc_init_lock);
+ if (ret) {
+ dd_dev_err(dd,
+ "sctxt%u(%u): Context not enabled due to init failure %d\n",
+ sc->sw_index, sc->hw_context, ret);
+ goto unlock;
+ }
+
+ /*
+ * All is well. Enable the context.
+ */
+ sc_ctrl |= SC(CTRL_CTXT_ENABLE_SMASK);
+ write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl);
+ /*
+ * Read SendCtxtCtrl to force the write out and prevent a timing
+ * hazard where a PIO write may reach the context before the enable.
+ */
+ read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
+ sc->flags |= SCF_ENABLED;
+
+unlock:
+ spin_unlock_irqrestore(&sc->alloc_lock, flags);
+
+ return ret;
+}
+
+/* force a credit return on the context */
+void sc_return_credits(struct send_context *sc)
+{
+ if (!sc)
+ return;
+
+ /* a 0->1 transition schedules a credit return */
+ write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE),
+ SC(CREDIT_FORCE_FORCE_RETURN_SMASK));
+ /*
+ * Ensure that the write is flushed and the credit return is
+ * scheduled. We care more about the 0 -> 1 transition.
+ */
+ read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE));
+ /* set back to 0 for next time */
+ write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0);
+}
+
+/* allow all in-flight packets to drain on the context */
+void sc_flush(struct send_context *sc)
+{
+ if (!sc)
+ return;
+
+ sc_wait_for_packet_egress(sc, 1);
+}
+
+/* drop all packets on the context, no waiting until they are sent */
+void sc_drop(struct send_context *sc)
+{
+ if (!sc)
+ return;
+
+ dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n",
+ __func__, sc->sw_index, sc->hw_context);
+}
+
+/*
+ * Start the software reaction to a context halt or SPC freeze:
+ * - mark the context as halted or frozen
+ * - stop buffer allocations
+ *
+ * Called from the error interrupt. Other work is deferred until
+ * out of the interrupt.
+ */
+void sc_stop(struct send_context *sc, int flag)
+{
+ unsigned long flags;
+
+ /* mark the context */
+ sc->flags |= flag;
+
+ /* stop buffer allocations */
+ spin_lock_irqsave(&sc->alloc_lock, flags);
+ sc->flags &= ~SCF_ENABLED;
+ spin_unlock_irqrestore(&sc->alloc_lock, flags);
+ wake_up(&sc->halt_wait);
+}
+
+#define BLOCK_DWORDS (PIO_BLOCK_SIZE/sizeof(u32))
+#define dwords_to_blocks(x) DIV_ROUND_UP(x, BLOCK_DWORDS)
+
+/*
+ * The send context buffer "allocator".
+ *
+ * @sc: the PIO send context we are allocating from
+ * @len: length of whole packet - including PBC - in dwords
+ * @cb: optional callback to call when the buffer is finished sending
+ * @arg: argument for cb
+ *
+ * Return a pointer to a PIO buffer if successful, NULL if not enough room.
+ */
+struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
+ pio_release_cb cb, void *arg)
+{
+ struct pio_buf *pbuf = NULL;
+ unsigned long flags;
+ unsigned long avail;
+ unsigned long blocks = dwords_to_blocks(dw_len);
+ unsigned long start_fill;
+ int trycount = 0;
+ u32 head, next;
+
+ spin_lock_irqsave(&sc->alloc_lock, flags);
+ if (!(sc->flags & SCF_ENABLED)) {
+ spin_unlock_irqrestore(&sc->alloc_lock, flags);
+ goto done;
+ }
+
+retry:
+ avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free);
+ if (blocks > avail) {
+ /* not enough room */
+ if (unlikely(trycount)) { /* already tried to get more room */
+ spin_unlock_irqrestore(&sc->alloc_lock, flags);
+ goto done;
+ }
+ /* copy from receiver cache line and recalculate */
+ sc->alloc_free = ACCESS_ONCE(sc->free);
+ avail =
+ (unsigned long)sc->credits -
+ (sc->fill - sc->alloc_free);
+ if (blocks > avail) {
+ /* still no room, actively update */
+ spin_unlock_irqrestore(&sc->alloc_lock, flags);
+ sc_release_update(sc);
+ spin_lock_irqsave(&sc->alloc_lock, flags);
+ sc->alloc_free = ACCESS_ONCE(sc->free);
+ trycount++;
+ goto retry;
+ }
+ }
+
+ /* there is enough room */
+
+ atomic_inc(&sc->buffers_allocated);
+
+ /* read this once */
+ head = sc->sr_head;
+
+ /* "allocate" the buffer */
+ start_fill = sc->fill;
+ sc->fill += blocks;
+
+ /*
+ * Fill the parts that the releaser looks at before moving the head.
+ * The only necessary piece is the sent_at field. The credits
+ * we have just allocated cannot have been returned yet, so the
+ * cb and arg will not be looked at for a "while". Put them
+ * on this side of the memory barrier anyway.
+ */
+ pbuf = &sc->sr[head].pbuf;
+ pbuf->sent_at = sc->fill;
+ pbuf->cb = cb;
+ pbuf->arg = arg;
+ pbuf->sc = sc; /* could be filled in at sc->sr init time */
+ /* make sure this is in memory before updating the head */
+
+ /* calculate next head index, do not store */
+ next = head + 1;
+ if (next >= sc->sr_size)
+ next = 0;
+ /* update the head - must be last! - the releaser can look at fields
+ in pbuf once we move the head */
+ smp_wmb();
+ sc->sr_head = next;
+ spin_unlock_irqrestore(&sc->alloc_lock, flags);
+
+ /* finish filling in the buffer outside the lock */
+ pbuf->start = sc->base_addr + ((start_fill % sc->credits)
+ * PIO_BLOCK_SIZE);
+ pbuf->size = sc->credits * PIO_BLOCK_SIZE;
+ pbuf->end = sc->base_addr + pbuf->size;
+ pbuf->block_count = blocks;
+ pbuf->qw_written = 0;
+ pbuf->carry_bytes = 0;
+ pbuf->carry.val64 = 0;
+done:
+ return pbuf;
+}
+
+/*
+ * There are at least two entities that can turn on credit return
+ * interrupts and they can overlap. Avoid problems by implementing
+ * a count scheme that is enforced by a lock. The lock is needed because
+ * the count and CSR write must be paired.
+ */
+
+/*
+ * Start credit return interrupts. This is managed by a count. If already
+ * on, just increment the count.
+ */
+void sc_add_credit_return_intr(struct send_context *sc)
+{
+ unsigned long flags;
+
+ /* lock must surround both the count change and the CSR update */
+ spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
+ if (sc->credit_intr_count == 0) {
+ sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
+ write_kctxt_csr(sc->dd, sc->hw_context,
+ SC(CREDIT_CTRL), sc->credit_ctrl);
+ }
+ sc->credit_intr_count++;
+ spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
+}
+
+/*
+ * Stop credit return interrupts. This is managed by a count. Decrement the
+ * count, if the last user, then turn the credit interrupts off.
+ */
+void sc_del_credit_return_intr(struct send_context *sc)
+{
+ unsigned long flags;
+
+ WARN_ON(sc->credit_intr_count == 0);
+
+ /* lock must surround both the count change and the CSR update */
+ spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
+ sc->credit_intr_count--;
+ if (sc->credit_intr_count == 0) {
+ sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
+ write_kctxt_csr(sc->dd, sc->hw_context,
+ SC(CREDIT_CTRL), sc->credit_ctrl);
+ }
+ spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
+}
+
+/*
+ * The caller must be careful when calling this. All needint calls
+ * must be paired with !needint.
+ */
+void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint)
+{
+ if (needint)
+ sc_add_credit_return_intr(sc);
+ else
+ sc_del_credit_return_intr(sc);
+ trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl);
+ if (needint) {
+ mmiowb();
+ sc_return_credits(sc);
+ }
+}
+
+/**
+ * sc_piobufavail - callback when a PIO buffer is available
+ * @sc: the send context
+ *
+ * This is called from the interrupt handler when a PIO buffer is
+ * available after hfi1_verbs_send() returned an error that no buffers were
+ * available. Disable the interrupt if there are no more QPs waiting.
+ */
+static void sc_piobufavail(struct send_context *sc)
+{
+ struct hfi1_devdata *dd = sc->dd;
+ struct hfi1_ibdev *dev = &dd->verbs_dev;
+ struct list_head *list;
+ struct hfi1_qp *qps[PIO_WAIT_BATCH_SIZE];
+ struct hfi1_qp *qp;
+ unsigned long flags;
+ unsigned i, n = 0;
+
+ if (dd->send_contexts[sc->sw_index].type != SC_KERNEL)
+ return;
+ list = &sc->piowait;
+ /*
+ * Note: checking that the piowait list is empty and clearing
+ * the buffer available interrupt needs to be atomic or we
+ * could end up with QPs on the wait list with the interrupt
+ * disabled.
+ */
+ write_seqlock_irqsave(&dev->iowait_lock, flags);
+ while (!list_empty(list)) {
+ struct iowait *wait;
+
+ if (n == ARRAY_SIZE(qps))
+ goto full;
+ wait = list_first_entry(list, struct iowait, list);
+ qp = container_of(wait, struct hfi1_qp, s_iowait);
+ list_del_init(&qp->s_iowait.list);
+ /* refcount held until actual wake up */
+ qps[n++] = qp;
+ }
+ /*
+ * Counting: only call wantpiobuf_intr() if there were waiters and they
+ * are now all gone.
+ */
+ if (n)
+ hfi1_sc_wantpiobuf_intr(sc, 0);
+full:
+ write_sequnlock_irqrestore(&dev->iowait_lock, flags);
+
+ for (i = 0; i < n; i++)
+ hfi1_qp_wakeup(qps[i], HFI1_S_WAIT_PIO);
+}
+
+/* translate a send credit update to a bit code of reasons */
+static inline int fill_code(u64 hw_free)
+{
+ int code = 0;
+
+ if (hw_free & CR_STATUS_SMASK)
+ code |= PRC_STATUS_ERR;
+ if (hw_free & CR_CREDIT_RETURN_DUE_TO_PBC_SMASK)
+ code |= PRC_PBC;
+ if (hw_free & CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SMASK)
+ code |= PRC_THRESHOLD;
+ if (hw_free & CR_CREDIT_RETURN_DUE_TO_ERR_SMASK)
+ code |= PRC_FILL_ERR;
+ if (hw_free & CR_CREDIT_RETURN_DUE_TO_FORCE_SMASK)
+ code |= PRC_SC_DISABLE;
+ return code;
+}
+
+/* use the jiffies compare to get the wrap right */
+#define sent_before(a, b) time_before(a, b) /* a < b */
+
+/*
+ * The send context buffer "releaser".
+ */
+void sc_release_update(struct send_context *sc)
+{
+ struct pio_buf *pbuf;
+ u64 hw_free;
+ u32 head, tail;
+ unsigned long old_free;
+ unsigned long extra;
+ unsigned long flags;
+ int code;
+
+ if (!sc)
+ return;
+
+ spin_lock_irqsave(&sc->release_lock, flags);
+ /* update free */
+ hw_free = le64_to_cpu(*sc->hw_free); /* volatile read */
+ old_free = sc->free;
+ extra = (((hw_free & CR_COUNTER_SMASK) >> CR_COUNTER_SHIFT)
+ - (old_free & CR_COUNTER_MASK))
+ & CR_COUNTER_MASK;
+ sc->free = old_free + extra;
+ trace_hfi1_piofree(sc, extra);
+
+ /* call sent buffer callbacks */
+ code = -1; /* code not yet set */
+ head = ACCESS_ONCE(sc->sr_head); /* snapshot the head */
+ tail = sc->sr_tail;
+ while (head != tail) {
+ pbuf = &sc->sr[tail].pbuf;
+
+ if (sent_before(sc->free, pbuf->sent_at)) {
+ /* not sent yet */
+ break;
+ }
+ if (pbuf->cb) {
+ if (code < 0) /* fill in code on first user */
+ code = fill_code(hw_free);
+ (*pbuf->cb)(pbuf->arg, code);
+ }
+
+ tail++;
+ if (tail >= sc->sr_size)
+ tail = 0;
+ }
+ /* update tail, in case we moved it */
+ sc->sr_tail = tail;
+ spin_unlock_irqrestore(&sc->release_lock, flags);
+ sc_piobufavail(sc);
+}
+
+/*
+ * Send context group releaser. Argument is the send context that caused
+ * the interrupt. Called from the send context interrupt handler.
+ *
+ * Call release on all contexts in the group.
+ *
+ * This routine takes the sc_lock without an irqsave because it is only
+ * called from an interrupt handler. Adjust if that changes.
+ */
+void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context)
+{
+ struct send_context *sc;
+ u32 sw_index;
+ u32 gc, gc_end;
+
+ spin_lock(&dd->sc_lock);
+ sw_index = dd->hw_to_sw[hw_context];
+ if (unlikely(sw_index >= dd->num_send_contexts)) {
+ dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n",
+ __func__, hw_context, sw_index);
+ goto done;
+ }
+ sc = dd->send_contexts[sw_index].sc;
+ if (unlikely(!sc))
+ goto done;
+
+ gc = group_context(hw_context, sc->group);
+ gc_end = gc + group_size(sc->group);
+ for (; gc < gc_end; gc++) {
+ sw_index = dd->hw_to_sw[gc];
+ if (unlikely(sw_index >= dd->num_send_contexts)) {
+ dd_dev_err(dd,
+ "%s: invalid hw (%u) to sw (%u) mapping\n",
+ __func__, hw_context, sw_index);
+ continue;
+ }
+ sc_release_update(dd->send_contexts[sw_index].sc);
+ }
+done:
+ spin_unlock(&dd->sc_lock);
+}
+
+int init_pervl_scs(struct hfi1_devdata *dd)
+{
+ int i;
+ u64 mask, all_vl_mask = (u64) 0x80ff; /* VLs 0-7, 15 */
+ u32 ctxt;
+
+ dd->vld[15].sc = sc_alloc(dd, SC_KERNEL,
+ dd->rcd[0]->rcvhdrqentsize, dd->node);
+ if (!dd->vld[15].sc)
+ goto nomem;
+ hfi1_init_ctxt(dd->vld[15].sc);
+ dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048);
+ for (i = 0; i < num_vls; i++) {
+ /*
+ * Since this function does not deal with a specific
+ * receive context but we need the RcvHdrQ entry size,
+ * use the size from rcd[0]. It is guaranteed to be
+ * valid at this point and will remain the same for all
+ * receive contexts.
+ */
+ dd->vld[i].sc = sc_alloc(dd, SC_KERNEL,
+ dd->rcd[0]->rcvhdrqentsize, dd->node);
+ if (!dd->vld[i].sc)
+ goto nomem;
+
+ hfi1_init_ctxt(dd->vld[i].sc);
+
+ /* non VL15 start with the max MTU */
+ dd->vld[i].mtu = hfi1_max_mtu;
+ }
+ sc_enable(dd->vld[15].sc);
+ ctxt = dd->vld[15].sc->hw_context;
+ mask = all_vl_mask & ~(1LL << 15);
+ write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
+ dd_dev_info(dd,
+ "Using send context %u(%u) for VL15\n",
+ dd->vld[15].sc->sw_index, ctxt);
+ for (i = 0; i < num_vls; i++) {
+ sc_enable(dd->vld[i].sc);
+ ctxt = dd->vld[i].sc->hw_context;
+ mask = all_vl_mask & ~(1LL << i);
+ write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
+ }
+ return 0;
+nomem:
+ sc_free(dd->vld[15].sc);
+ for (i = 0; i < num_vls; i++)
+ sc_free(dd->vld[i].sc);
+ return -ENOMEM;
+}
+
+int init_credit_return(struct hfi1_devdata *dd)
+{
+ int ret;
+ int num_numa;
+ int i;
+
+ num_numa = num_online_nodes();
+ /* enforce the expectation that the numas are compact */
+ for (i = 0; i < num_numa; i++) {
+ if (!node_online(i)) {
+ dd_dev_err(dd, "NUMA nodes are not compact\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ }
+
+ dd->cr_base = kcalloc(
+ num_numa,
+ sizeof(struct credit_return_base),
+ GFP_KERNEL);
+ if (!dd->cr_base) {
+ dd_dev_err(dd, "Unable to allocate credit return base\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+ for (i = 0; i < num_numa; i++) {
+ int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
+
+ set_dev_node(&dd->pcidev->dev, i);
+ dd->cr_base[i].va = dma_zalloc_coherent(
+ &dd->pcidev->dev,
+ bytes,
+ &dd->cr_base[i].pa,
+ GFP_KERNEL);
+ if (dd->cr_base[i].va == NULL) {
+ set_dev_node(&dd->pcidev->dev, dd->node);
+ dd_dev_err(dd,
+ "Unable to allocate credit return DMA range for NUMA %d\n",
+ i);
+ ret = -ENOMEM;
+ goto done;
+ }
+ }
+ set_dev_node(&dd->pcidev->dev, dd->node);
+
+ ret = 0;
+done:
+ return ret;
+}
+
+void free_credit_return(struct hfi1_devdata *dd)
+{
+ int num_numa;
+ int i;
+
+ if (!dd->cr_base)
+ return;
+
+ num_numa = num_online_nodes();
+ for (i = 0; i < num_numa; i++) {
+ if (dd->cr_base[i].va) {
+ dma_free_coherent(&dd->pcidev->dev,
+ TXE_NUM_CONTEXTS
+ * sizeof(struct credit_return),
+ dd->cr_base[i].va,
+ dd->cr_base[i].pa);
+ }
+ }
+ kfree(dd->cr_base);
+ dd->cr_base = NULL;
+}
diff --git a/drivers/staging/rdma/hfi1/pio.h b/drivers/staging/rdma/hfi1/pio.h
new file mode 100644
index 000000000000..0bb885ca3cfb
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/pio.h
@@ -0,0 +1,224 @@
+#ifndef _PIO_H
+#define _PIO_H
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+
+/* send context types */
+#define SC_KERNEL 0
+#define SC_ACK 1
+#define SC_USER 2
+#define SC_MAX 3
+
+/* invalid send context index */
+#define INVALID_SCI 0xff
+
+/* PIO buffer release callback function */
+typedef void (*pio_release_cb)(void *arg, int code);
+
+/* PIO release codes - in bits, as there could more than one that apply */
+#define PRC_OK 0 /* no known error */
+#define PRC_STATUS_ERR 0x01 /* credit return due to status error */
+#define PRC_PBC 0x02 /* credit return due to PBC */
+#define PRC_THRESHOLD 0x04 /* credit return due to threshold */
+#define PRC_FILL_ERR 0x08 /* credit return due fill error */
+#define PRC_FORCE 0x10 /* credit return due credit force */
+#define PRC_SC_DISABLE 0x20 /* clean-up after a context disable */
+
+/* byte helper */
+union mix {
+ u64 val64;
+ u32 val32[2];
+ u8 val8[8];
+};
+
+/* an allocated PIO buffer */
+struct pio_buf {
+ struct send_context *sc;/* back pointer to owning send context */
+ pio_release_cb cb; /* called when the buffer is released */
+ void *arg; /* argument for cb */
+ void __iomem *start; /* buffer start address */
+ void __iomem *end; /* context end address */
+ unsigned long size; /* context size, in bytes */
+ unsigned long sent_at; /* buffer is sent when <= free */
+ u32 block_count; /* size of buffer, in blocks */
+ u32 qw_written; /* QW written so far */
+ u32 carry_bytes; /* number of valid bytes in carry */
+ union mix carry; /* pending unwritten bytes */
+};
+
+/* cache line aligned pio buffer array */
+union pio_shadow_ring {
+ struct pio_buf pbuf;
+ u64 unused[16]; /* cache line spacer */
+} ____cacheline_aligned;
+
+/* per-NUMA send context */
+struct send_context {
+ /* read-only after init */
+ struct hfi1_devdata *dd; /* device */
+ void __iomem *base_addr; /* start of PIO memory */
+ union pio_shadow_ring *sr; /* shadow ring */
+ volatile __le64 *hw_free; /* HW free counter */
+ struct work_struct halt_work; /* halted context work queue entry */
+ unsigned long flags; /* flags */
+ int node; /* context home node */
+ int type; /* context type */
+ u32 sw_index; /* software index number */
+ u32 hw_context; /* hardware context number */
+ u32 credits; /* number of blocks in context */
+ u32 sr_size; /* size of the shadow ring */
+ u32 group; /* credit return group */
+ /* allocator fields */
+ spinlock_t alloc_lock ____cacheline_aligned_in_smp;
+ unsigned long fill; /* official alloc count */
+ unsigned long alloc_free; /* copy of free (less cache thrash) */
+ u32 sr_head; /* shadow ring head */
+ /* releaser fields */
+ spinlock_t release_lock ____cacheline_aligned_in_smp;
+ unsigned long free; /* official free count */
+ u32 sr_tail; /* shadow ring tail */
+ /* list for PIO waiters */
+ struct list_head piowait ____cacheline_aligned_in_smp;
+ spinlock_t credit_ctrl_lock ____cacheline_aligned_in_smp;
+ u64 credit_ctrl; /* cache for credit control */
+ u32 credit_intr_count; /* count of credit intr users */
+ atomic_t buffers_allocated; /* count of buffers allocated */
+ wait_queue_head_t halt_wait; /* wait until kernel sees interrupt */
+};
+
+/* send context flags */
+#define SCF_ENABLED 0x01
+#define SCF_IN_FREE 0x02
+#define SCF_HALTED 0x04
+#define SCF_FROZEN 0x08
+
+struct send_context_info {
+ struct send_context *sc; /* allocated working context */
+ u16 allocated; /* has this been allocated? */
+ u16 type; /* context type */
+ u16 base; /* base in PIO array */
+ u16 credits; /* size in PIO array */
+};
+
+/* DMA credit return, index is always (context & 0x7) */
+struct credit_return {
+ volatile __le64 cr[8];
+};
+
+/* NUMA indexed credit return array */
+struct credit_return_base {
+ struct credit_return *va;
+ dma_addr_t pa;
+};
+
+/* send context configuration sizes (one per type) */
+struct sc_config_sizes {
+ short int size;
+ short int count;
+};
+
+/* send context functions */
+int init_credit_return(struct hfi1_devdata *dd);
+void free_credit_return(struct hfi1_devdata *dd);
+int init_sc_pools_and_sizes(struct hfi1_devdata *dd);
+int init_send_contexts(struct hfi1_devdata *dd);
+int init_credit_return(struct hfi1_devdata *dd);
+int init_pervl_scs(struct hfi1_devdata *dd);
+struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
+ uint hdrqentsize, int numa);
+void sc_free(struct send_context *sc);
+int sc_enable(struct send_context *sc);
+void sc_disable(struct send_context *sc);
+int sc_restart(struct send_context *sc);
+void sc_return_credits(struct send_context *sc);
+void sc_flush(struct send_context *sc);
+void sc_drop(struct send_context *sc);
+void sc_stop(struct send_context *sc, int bit);
+struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
+ pio_release_cb cb, void *arg);
+void sc_release_update(struct send_context *sc);
+void sc_return_credits(struct send_context *sc);
+void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context);
+void sc_add_credit_return_intr(struct send_context *sc);
+void sc_del_credit_return_intr(struct send_context *sc);
+void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold);
+u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize);
+void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint);
+void sc_wait(struct hfi1_devdata *dd);
+void set_pio_integrity(struct send_context *sc);
+
+/* support functions */
+void pio_reset_all(struct hfi1_devdata *dd);
+void pio_freeze(struct hfi1_devdata *dd);
+void pio_kernel_unfreeze(struct hfi1_devdata *dd);
+
+/* global PIO send control operations */
+#define PSC_GLOBAL_ENABLE 0
+#define PSC_GLOBAL_DISABLE 1
+#define PSC_GLOBAL_VLARB_ENABLE 2
+#define PSC_GLOBAL_VLARB_DISABLE 3
+#define PSC_CM_RESET 4
+#define PSC_DATA_VL_ENABLE 5
+#define PSC_DATA_VL_DISABLE 6
+
+void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl);
+void pio_send_control(struct hfi1_devdata *dd, int op);
+
+
+/* PIO copy routines */
+void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
+ const void *from, size_t count);
+void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc,
+ const void *from, size_t nbytes);
+void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes);
+void seg_pio_copy_end(struct pio_buf *pbuf);
+
+#endif /* _PIO_H */
diff --git a/drivers/staging/rdma/hfi1/pio_copy.c b/drivers/staging/rdma/hfi1/pio_copy.c
new file mode 100644
index 000000000000..8972bbc02038
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/pio_copy.c
@@ -0,0 +1,858 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "hfi.h"
+
+/* additive distance between non-SOP and SOP space */
+#define SOP_DISTANCE (TXE_PIO_SIZE / 2)
+#define PIO_BLOCK_MASK (PIO_BLOCK_SIZE-1)
+/* number of QUADWORDs in a block */
+#define PIO_BLOCK_QWS (PIO_BLOCK_SIZE/sizeof(u64))
+
+/**
+ * pio_copy - copy data block to MMIO space
+ * @pbuf: a number of blocks allocated within a PIO send context
+ * @pbc: PBC to send
+ * @from: source, must be 8 byte aligned
+ * @count: number of DWORD (32-bit) quantities to copy from source
+ *
+ * Copy data from source to PIO Send Buffer memory, 8 bytes at a time.
+ * Must always write full BLOCK_SIZE bytes blocks. The first block must
+ * be written to the corresponding SOP=1 address.
+ *
+ * Known:
+ * o pbuf->start always starts on a block boundary
+ * o pbuf can wrap only at a block boundary
+ */
+void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
+ const void *from, size_t count)
+{
+ void __iomem *dest = pbuf->start + SOP_DISTANCE;
+ void __iomem *send = dest + PIO_BLOCK_SIZE;
+ void __iomem *dend; /* 8-byte data end */
+
+ /* write the PBC */
+ writeq(pbc, dest);
+ dest += sizeof(u64);
+
+ /* calculate where the QWORD data ends - in SOP=1 space */
+ dend = dest + ((count>>1) * sizeof(u64));
+
+ if (dend < send) {
+ /* all QWORD data is within the SOP block, does *not*
+ reach the end of the SOP block */
+
+ while (dest < dend) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+ /*
+ * No boundary checks are needed here:
+ * 0. We're not on the SOP block boundary
+ * 1. The possible DWORD dangle will still be within
+ * the SOP block
+ * 2. We cannot wrap except on a block boundary.
+ */
+ } else {
+ /* QWORD data extends _to_ or beyond the SOP block */
+
+ /* write 8-byte SOP chunk data */
+ while (dest < send) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+ /* drop out of the SOP range */
+ dest -= SOP_DISTANCE;
+ dend -= SOP_DISTANCE;
+
+ /*
+ * If the wrap comes before or matches the data end,
+ * copy until until the wrap, then wrap.
+ *
+ * If the data ends at the end of the SOP above and
+ * the buffer wraps, then pbuf->end == dend == dest
+ * and nothing will get written, but we will wrap in
+ * case there is a dangling DWORD.
+ */
+ if (pbuf->end <= dend) {
+ while (dest < pbuf->end) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+
+ dest -= pbuf->size;
+ dend -= pbuf->size;
+ }
+
+ /* write 8-byte non-SOP, non-wrap chunk data */
+ while (dest < dend) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+ }
+ /* at this point we have wrapped if we are going to wrap */
+
+ /* write dangling u32, if any */
+ if (count & 1) {
+ union mix val;
+
+ val.val64 = 0;
+ val.val32[0] = *(u32 *)from;
+ writeq(val.val64, dest);
+ dest += sizeof(u64);
+ }
+ /* fill in rest of block, no need to check pbuf->end
+ as we only wrap on a block boundary */
+ while (((unsigned long)dest & PIO_BLOCK_MASK) != 0) {
+ writeq(0, dest);
+ dest += sizeof(u64);
+ }
+
+ /* finished with this buffer */
+ atomic_dec(&pbuf->sc->buffers_allocated);
+}
+
+/* USE_SHIFTS is faster in user-space tests on a Xeon X5570 @ 2.93GHz */
+#define USE_SHIFTS 1
+#ifdef USE_SHIFTS
+/*
+ * Handle carry bytes using shifts and masks.
+ *
+ * NOTE: the value the unused portion of carry is expected to always be zero.
+ */
+
+/*
+ * "zero" shift - bit shift used to zero out upper bytes. Input is
+ * the count of LSB bytes to preserve.
+ */
+#define zshift(x) (8 * (8-(x)))
+
+/*
+ * "merge" shift - bit shift used to merge with carry bytes. Input is
+ * the LSB byte count to move beyond.
+ */
+#define mshift(x) (8 * (x))
+
+/*
+ * Read nbytes bytes from "from" and return them in the LSB bytes
+ * of pbuf->carry. Other bytes are zeroed. Any previous value
+ * pbuf->carry is lost.
+ *
+ * NOTES:
+ * o do not read from from if nbytes is zero
+ * o from may _not_ be u64 aligned
+ * o nbytes must not span a QW boundary
+ */
+static inline void read_low_bytes(struct pio_buf *pbuf, const void *from,
+ unsigned int nbytes)
+{
+ unsigned long off;
+
+ if (nbytes == 0) {
+ pbuf->carry.val64 = 0;
+ } else {
+ /* align our pointer */
+ off = (unsigned long)from & 0x7;
+ from = (void *)((unsigned long)from & ~0x7l);
+ pbuf->carry.val64 = ((*(u64 *)from)
+ << zshift(nbytes + off))/* zero upper bytes */
+ >> zshift(nbytes); /* place at bottom */
+ }
+ pbuf->carry_bytes = nbytes;
+}
+
+/*
+ * Read nbytes bytes from "from" and put them at the next significant bytes
+ * of pbuf->carry. Unused bytes are zeroed. It is expected that the extra
+ * read does not overfill carry.
+ *
+ * NOTES:
+ * o from may _not_ be u64 aligned
+ * o nbytes may span a QW boundary
+ */
+static inline void read_extra_bytes(struct pio_buf *pbuf,
+ const void *from, unsigned int nbytes)
+{
+ unsigned long off = (unsigned long)from & 0x7;
+ unsigned int room, xbytes;
+
+ /* align our pointer */
+ from = (void *)((unsigned long)from & ~0x7l);
+
+ /* check count first - don't read anything if count is zero */
+ while (nbytes) {
+ /* find the number of bytes in this u64 */
+ room = 8 - off; /* this u64 has room for this many bytes */
+ xbytes = nbytes > room ? room : nbytes;
+
+ /*
+ * shift down to zero lower bytes, shift up to zero upper
+ * bytes, shift back down to move into place
+ */
+ pbuf->carry.val64 |= (((*(u64 *)from)
+ >> mshift(off))
+ << zshift(xbytes))
+ >> zshift(xbytes+pbuf->carry_bytes);
+ off = 0;
+ pbuf->carry_bytes += xbytes;
+ nbytes -= xbytes;
+ from += sizeof(u64);
+ }
+}
+
+/*
+ * Zero extra bytes from the end of pbuf->carry.
+ *
+ * NOTES:
+ * o zbytes <= old_bytes
+ */
+static inline void zero_extra_bytes(struct pio_buf *pbuf, unsigned int zbytes)
+{
+ unsigned int remaining;
+
+ if (zbytes == 0) /* nothing to do */
+ return;
+
+ remaining = pbuf->carry_bytes - zbytes; /* remaining bytes */
+
+ /* NOTE: zshift only guaranteed to work if remaining != 0 */
+ if (remaining)
+ pbuf->carry.val64 = (pbuf->carry.val64 << zshift(remaining))
+ >> zshift(remaining);
+ else
+ pbuf->carry.val64 = 0;
+ pbuf->carry_bytes = remaining;
+}
+
+/*
+ * Write a quad word using parts of pbuf->carry and the next 8 bytes of src.
+ * Put the unused part of the next 8 bytes of src into the LSB bytes of
+ * pbuf->carry with the upper bytes zeroed..
+ *
+ * NOTES:
+ * o result must keep unused bytes zeroed
+ * o src must be u64 aligned
+ */
+static inline void merge_write8(
+ struct pio_buf *pbuf,
+ void __iomem *dest,
+ const void *src)
+{
+ u64 new, temp;
+
+ new = *(u64 *)src;
+ temp = pbuf->carry.val64 | (new << mshift(pbuf->carry_bytes));
+ writeq(temp, dest);
+ pbuf->carry.val64 = new >> zshift(pbuf->carry_bytes);
+}
+
+/*
+ * Write a quad word using all bytes of carry.
+ */
+static inline void carry8_write8(union mix carry, void __iomem *dest)
+{
+ writeq(carry.val64, dest);
+}
+
+/*
+ * Write a quad word using all the valid bytes of carry. If carry
+ * has zero valid bytes, nothing is written.
+ * Returns 0 on nothing written, non-zero on quad word written.
+ */
+static inline int carry_write8(struct pio_buf *pbuf, void __iomem *dest)
+{
+ if (pbuf->carry_bytes) {
+ /* unused bytes are always kept zeroed, so just write */
+ writeq(pbuf->carry.val64, dest);
+ return 1;
+ }
+
+ return 0;
+}
+
+#else /* USE_SHIFTS */
+/*
+ * Handle carry bytes using byte copies.
+ *
+ * NOTE: the value the unused portion of carry is left uninitialized.
+ */
+
+/*
+ * Jump copy - no-loop copy for < 8 bytes.
+ */
+static inline void jcopy(u8 *dest, const u8 *src, u32 n)
+{
+ switch (n) {
+ case 7:
+ *dest++ = *src++;
+ case 6:
+ *dest++ = *src++;
+ case 5:
+ *dest++ = *src++;
+ case 4:
+ *dest++ = *src++;
+ case 3:
+ *dest++ = *src++;
+ case 2:
+ *dest++ = *src++;
+ case 1:
+ *dest++ = *src++;
+ }
+}
+
+/*
+ * Read nbytes from "from" and and place them in the low bytes
+ * of pbuf->carry. Other bytes are left as-is. Any previous
+ * value in pbuf->carry is lost.
+ *
+ * NOTES:
+ * o do not read from from if nbytes is zero
+ * o from may _not_ be u64 aligned.
+ */
+static inline void read_low_bytes(struct pio_buf *pbuf, const void *from,
+ unsigned int nbytes)
+{
+ jcopy(&pbuf->carry.val8[0], from, nbytes);
+ pbuf->carry_bytes = nbytes;
+}
+
+/*
+ * Read nbytes bytes from "from" and put them at the end of pbuf->carry.
+ * It is expected that the extra read does not overfill carry.
+ *
+ * NOTES:
+ * o from may _not_ be u64 aligned
+ * o nbytes may span a QW boundary
+ */
+static inline void read_extra_bytes(struct pio_buf *pbuf,
+ const void *from, unsigned int nbytes)
+{
+ jcopy(&pbuf->carry.val8[pbuf->carry_bytes], from, nbytes);
+ pbuf->carry_bytes += nbytes;
+}
+
+/*
+ * Zero extra bytes from the end of pbuf->carry.
+ *
+ * We do not care about the value of unused bytes in carry, so just
+ * reduce the byte count.
+ *
+ * NOTES:
+ * o zbytes <= old_bytes
+ */
+static inline void zero_extra_bytes(struct pio_buf *pbuf, unsigned int zbytes)
+{
+ pbuf->carry_bytes -= zbytes;
+}
+
+/*
+ * Write a quad word using parts of pbuf->carry and the next 8 bytes of src.
+ * Put the unused part of the next 8 bytes of src into the low bytes of
+ * pbuf->carry.
+ */
+static inline void merge_write8(
+ struct pio_buf *pbuf,
+ void *dest,
+ const void *src)
+{
+ u32 remainder = 8 - pbuf->carry_bytes;
+
+ jcopy(&pbuf->carry.val8[pbuf->carry_bytes], src, remainder);
+ writeq(pbuf->carry.val64, dest);
+ jcopy(&pbuf->carry.val8[0], src+remainder, pbuf->carry_bytes);
+}
+
+/*
+ * Write a quad word using all bytes of carry.
+ */
+static inline void carry8_write8(union mix carry, void *dest)
+{
+ writeq(carry.val64, dest);
+}
+
+/*
+ * Write a quad word using all the valid bytes of carry. If carry
+ * has zero valid bytes, nothing is written.
+ * Returns 0 on nothing written, non-zero on quad word written.
+ */
+static inline int carry_write8(struct pio_buf *pbuf, void *dest)
+{
+ if (pbuf->carry_bytes) {
+ u64 zero = 0;
+
+ jcopy(&pbuf->carry.val8[pbuf->carry_bytes], (u8 *)&zero,
+ 8 - pbuf->carry_bytes);
+ writeq(pbuf->carry.val64, dest);
+ return 1;
+ }
+
+ return 0;
+}
+#endif /* USE_SHIFTS */
+
+/*
+ * Segmented PIO Copy - start
+ *
+ * Start a PIO copy.
+ *
+ * @pbuf: destination buffer
+ * @pbc: the PBC for the PIO buffer
+ * @from: data source, QWORD aligned
+ * @nbytes: bytes to copy
+ */
+void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc,
+ const void *from, size_t nbytes)
+{
+ void __iomem *dest = pbuf->start + SOP_DISTANCE;
+ void __iomem *send = dest + PIO_BLOCK_SIZE;
+ void __iomem *dend; /* 8-byte data end */
+
+ writeq(pbc, dest);
+ dest += sizeof(u64);
+
+ /* calculate where the QWORD data ends - in SOP=1 space */
+ dend = dest + ((nbytes>>3) * sizeof(u64));
+
+ if (dend < send) {
+ /* all QWORD data is within the SOP block, does *not*
+ reach the end of the SOP block */
+
+ while (dest < dend) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+ /*
+ * No boundary checks are needed here:
+ * 0. We're not on the SOP block boundary
+ * 1. The possible DWORD dangle will still be within
+ * the SOP block
+ * 2. We cannot wrap except on a block boundary.
+ */
+ } else {
+ /* QWORD data extends _to_ or beyond the SOP block */
+
+ /* write 8-byte SOP chunk data */
+ while (dest < send) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+ /* drop out of the SOP range */
+ dest -= SOP_DISTANCE;
+ dend -= SOP_DISTANCE;
+
+ /*
+ * If the wrap comes before or matches the data end,
+ * copy until until the wrap, then wrap.
+ *
+ * If the data ends at the end of the SOP above and
+ * the buffer wraps, then pbuf->end == dend == dest
+ * and nothing will get written, but we will wrap in
+ * case there is a dangling DWORD.
+ */
+ if (pbuf->end <= dend) {
+ while (dest < pbuf->end) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+
+ dest -= pbuf->size;
+ dend -= pbuf->size;
+ }
+
+ /* write 8-byte non-SOP, non-wrap chunk data */
+ while (dest < dend) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+ }
+ /* at this point we have wrapped if we are going to wrap */
+
+ /* ...but it doesn't matter as we're done writing */
+
+ /* save dangling bytes, if any */
+ read_low_bytes(pbuf, from, nbytes & 0x7);
+
+ pbuf->qw_written = 1 /*PBC*/ + (nbytes >> 3);
+}
+
+/*
+ * Mid copy helper, "mixed case" - source is 64-bit aligned but carry
+ * bytes are non-zero.
+ *
+ * Whole u64s must be written to the chip, so bytes must be manually merged.
+ *
+ * @pbuf: destination buffer
+ * @from: data source, is QWORD aligned.
+ * @nbytes: bytes to copy
+ *
+ * Must handle nbytes < 8.
+ */
+static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes)
+{
+ void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
+ void __iomem *dend; /* 8-byte data end */
+ unsigned long qw_to_write = (pbuf->carry_bytes + nbytes) >> 3;
+ unsigned long bytes_left = (pbuf->carry_bytes + nbytes) & 0x7;
+
+ /* calculate 8-byte data end */
+ dend = dest + (qw_to_write * sizeof(u64));
+
+ if (pbuf->qw_written < PIO_BLOCK_QWS) {
+ /*
+ * Still within SOP block. We don't need to check for
+ * wrap because we are still in the first block and
+ * can only wrap on block boundaries.
+ */
+ void __iomem *send; /* SOP end */
+ void __iomem *xend;
+
+ /* calculate the end of data or end of block, whichever
+ comes first */
+ send = pbuf->start + PIO_BLOCK_SIZE;
+ xend = send < dend ? send : dend;
+
+ /* shift up to SOP=1 space */
+ dest += SOP_DISTANCE;
+ xend += SOP_DISTANCE;
+
+ /* write 8-byte chunk data */
+ while (dest < xend) {
+ merge_write8(pbuf, dest, from);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+
+ /* shift down to SOP=0 space */
+ dest -= SOP_DISTANCE;
+ }
+ /*
+ * At this point dest could be (either, both, or neither):
+ * - at dend
+ * - at the wrap
+ */
+
+ /*
+ * If the wrap comes before or matches the data end,
+ * copy until until the wrap, then wrap.
+ *
+ * If dest is at the wrap, we will fall into the if,
+ * not do the loop, when wrap.
+ *
+ * If the data ends at the end of the SOP above and
+ * the buffer wraps, then pbuf->end == dend == dest
+ * and nothing will get written.
+ */
+ if (pbuf->end <= dend) {
+ while (dest < pbuf->end) {
+ merge_write8(pbuf, dest, from);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+
+ dest -= pbuf->size;
+ dend -= pbuf->size;
+ }
+
+ /* write 8-byte non-SOP, non-wrap chunk data */
+ while (dest < dend) {
+ merge_write8(pbuf, dest, from);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+
+ /* adjust carry */
+ if (pbuf->carry_bytes < bytes_left) {
+ /* need to read more */
+ read_extra_bytes(pbuf, from, bytes_left - pbuf->carry_bytes);
+ } else {
+ /* remove invalid bytes */
+ zero_extra_bytes(pbuf, pbuf->carry_bytes - bytes_left);
+ }
+
+ pbuf->qw_written += qw_to_write;
+}
+
+/*
+ * Mid copy helper, "straight case" - source pointer is 64-bit aligned
+ * with no carry bytes.
+ *
+ * @pbuf: destination buffer
+ * @from: data source, is QWORD aligned
+ * @nbytes: bytes to copy
+ *
+ * Must handle nbytes < 8.
+ */
+static void mid_copy_straight(struct pio_buf *pbuf,
+ const void *from, size_t nbytes)
+{
+ void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
+ void __iomem *dend; /* 8-byte data end */
+
+ /* calculate 8-byte data end */
+ dend = dest + ((nbytes>>3) * sizeof(u64));
+
+ if (pbuf->qw_written < PIO_BLOCK_QWS) {
+ /*
+ * Still within SOP block. We don't need to check for
+ * wrap because we are still in the first block and
+ * can only wrap on block boundaries.
+ */
+ void __iomem *send; /* SOP end */
+ void __iomem *xend;
+
+ /* calculate the end of data or end of block, whichever
+ comes first */
+ send = pbuf->start + PIO_BLOCK_SIZE;
+ xend = send < dend ? send : dend;
+
+ /* shift up to SOP=1 space */
+ dest += SOP_DISTANCE;
+ xend += SOP_DISTANCE;
+
+ /* write 8-byte chunk data */
+ while (dest < xend) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+
+ /* shift down to SOP=0 space */
+ dest -= SOP_DISTANCE;
+ }
+ /*
+ * At this point dest could be (either, both, or neither):
+ * - at dend
+ * - at the wrap
+ */
+
+ /*
+ * If the wrap comes before or matches the data end,
+ * copy until until the wrap, then wrap.
+ *
+ * If dest is at the wrap, we will fall into the if,
+ * not do the loop, when wrap.
+ *
+ * If the data ends at the end of the SOP above and
+ * the buffer wraps, then pbuf->end == dend == dest
+ * and nothing will get written.
+ */
+ if (pbuf->end <= dend) {
+ while (dest < pbuf->end) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+
+ dest -= pbuf->size;
+ dend -= pbuf->size;
+ }
+
+ /* write 8-byte non-SOP, non-wrap chunk data */
+ while (dest < dend) {
+ writeq(*(u64 *)from, dest);
+ from += sizeof(u64);
+ dest += sizeof(u64);
+ }
+
+ /* we know carry_bytes was zero on entry to this routine */
+ read_low_bytes(pbuf, from, nbytes & 0x7);
+
+ pbuf->qw_written += nbytes>>3;
+}
+
+/*
+ * Segmented PIO Copy - middle
+ *
+ * Must handle any aligned tail and any aligned source with any byte count.
+ *
+ * @pbuf: a number of blocks allocated within a PIO send context
+ * @from: data source
+ * @nbytes: number of bytes to copy
+ */
+void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
+{
+ unsigned long from_align = (unsigned long)from & 0x7;
+
+ if (pbuf->carry_bytes + nbytes < 8) {
+ /* not enough bytes to fill a QW */
+ read_extra_bytes(pbuf, from, nbytes);
+ return;
+ }
+
+ if (from_align) {
+ /* misaligned source pointer - align it */
+ unsigned long to_align;
+
+ /* bytes to read to align "from" */
+ to_align = 8 - from_align;
+
+ /*
+ * In the advance-to-alignment logic below, we do not need
+ * to check if we are using more than nbytes. This is because
+ * if we are here, we already know that carry+nbytes will
+ * fill at least one QW.
+ */
+ if (pbuf->carry_bytes + to_align < 8) {
+ /* not enough align bytes to fill a QW */
+ read_extra_bytes(pbuf, from, to_align);
+ from += to_align;
+ nbytes -= to_align;
+ } else {
+ /* bytes to fill carry */
+ unsigned long to_fill = 8 - pbuf->carry_bytes;
+ /* bytes left over to be read */
+ unsigned long extra = to_align - to_fill;
+ void __iomem *dest;
+
+ /* fill carry... */
+ read_extra_bytes(pbuf, from, to_fill);
+ from += to_fill;
+ nbytes -= to_fill;
+
+ /* ...now write carry */
+ dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
+
+ /*
+ * The two checks immediately below cannot both be
+ * true, hence the else. If we have wrapped, we
+ * cannot still be within the first block.
+ * Conversely, if we are still in the first block, we
+ * cannot have wrapped. We do the wrap check first
+ * as that is more likely.
+ */
+ /* adjust if we've wrapped */
+ if (dest >= pbuf->end)
+ dest -= pbuf->size;
+ /* jump to SOP range if within the first block */
+ else if (pbuf->qw_written < PIO_BLOCK_QWS)
+ dest += SOP_DISTANCE;
+
+ carry8_write8(pbuf->carry, dest);
+ pbuf->qw_written++;
+
+ /* read any extra bytes to do final alignment */
+ /* this will overwrite anything in pbuf->carry */
+ read_low_bytes(pbuf, from, extra);
+ from += extra;
+ nbytes -= extra;
+ }
+
+ /* at this point, from is QW aligned */
+ }
+
+ if (pbuf->carry_bytes)
+ mid_copy_mix(pbuf, from, nbytes);
+ else
+ mid_copy_straight(pbuf, from, nbytes);
+}
+
+/*
+ * Segmented PIO Copy - end
+ *
+ * Write any remainder (in pbuf->carry) and finish writing the whole block.
+ *
+ * @pbuf: a number of blocks allocated within a PIO send context
+ */
+void seg_pio_copy_end(struct pio_buf *pbuf)
+{
+ void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
+
+ /*
+ * The two checks immediately below cannot both be true, hence the
+ * else. If we have wrapped, we cannot still be within the first
+ * block. Conversely, if we are still in the first block, we
+ * cannot have wrapped. We do the wrap check first as that is
+ * more likely.
+ */
+ /* adjust if we have wrapped */
+ if (dest >= pbuf->end)
+ dest -= pbuf->size;
+ /* jump to the SOP range if within the first block */
+ else if (pbuf->qw_written < PIO_BLOCK_QWS)
+ dest += SOP_DISTANCE;
+
+ /* write final bytes, if any */
+ if (carry_write8(pbuf, dest)) {
+ dest += sizeof(u64);
+ /*
+ * NOTE: We do not need to recalculate whether dest needs
+ * SOP_DISTANCE or not.
+ *
+ * If we are in the first block and the dangle write
+ * keeps us in the same block, dest will need
+ * to retain SOP_DISTANCE in the loop below.
+ *
+ * If we are in the first block and the dangle write pushes
+ * us to the next block, then loop below will not run
+ * and dest is not used. Hence we do not need to update
+ * it.
+ *
+ * If we are past the first block, then SOP_DISTANCE
+ * was never added, so there is nothing to do.
+ */
+ }
+
+ /* fill in rest of block */
+ while (((unsigned long)dest & PIO_BLOCK_MASK) != 0) {
+ writeq(0, dest);
+ dest += sizeof(u64);
+ }
+
+ /* finished with this buffer */
+ atomic_dec(&pbuf->sc->buffers_allocated);
+}
diff --git a/drivers/staging/rdma/hfi1/platform_config.h b/drivers/staging/rdma/hfi1/platform_config.h
new file mode 100644
index 000000000000..8a94a8342052
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/platform_config.h
@@ -0,0 +1,286 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef __PLATFORM_CONFIG_H
+#define __PLATFORM_CONFIG_H
+
+#define METADATA_TABLE_FIELD_START_SHIFT 0
+#define METADATA_TABLE_FIELD_START_LEN_BITS 15
+#define METADATA_TABLE_FIELD_LEN_SHIFT 16
+#define METADATA_TABLE_FIELD_LEN_LEN_BITS 16
+
+/* Header structure */
+#define PLATFORM_CONFIG_HEADER_RECORD_IDX_SHIFT 0
+#define PLATFORM_CONFIG_HEADER_RECORD_IDX_LEN_BITS 6
+#define PLATFORM_CONFIG_HEADER_TABLE_LENGTH_SHIFT 16
+#define PLATFORM_CONFIG_HEADER_TABLE_LENGTH_LEN_BITS 12
+#define PLATFORM_CONFIG_HEADER_TABLE_TYPE_SHIFT 28
+#define PLATFORM_CONFIG_HEADER_TABLE_TYPE_LEN_BITS 4
+
+enum platform_config_table_type_encoding {
+ PLATFORM_CONFIG_TABLE_RESERVED,
+ PLATFORM_CONFIG_SYSTEM_TABLE,
+ PLATFORM_CONFIG_PORT_TABLE,
+ PLATFORM_CONFIG_RX_PRESET_TABLE,
+ PLATFORM_CONFIG_TX_PRESET_TABLE,
+ PLATFORM_CONFIG_QSFP_ATTEN_TABLE,
+ PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE,
+ PLATFORM_CONFIG_TABLE_MAX
+};
+
+enum platform_config_system_table_fields {
+ SYSTEM_TABLE_RESERVED,
+ SYSTEM_TABLE_NODE_STRING,
+ SYSTEM_TABLE_SYSTEM_IMAGE_GUID,
+ SYSTEM_TABLE_NODE_GUID,
+ SYSTEM_TABLE_REVISION,
+ SYSTEM_TABLE_VENDOR_OUI,
+ SYSTEM_TABLE_META_VERSION,
+ SYSTEM_TABLE_DEVICE_ID,
+ SYSTEM_TABLE_PARTITION_ENFORCEMENT_CAP,
+ SYSTEM_TABLE_QSFP_POWER_CLASS_MAX,
+ SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_12G,
+ SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
+ SYSTEM_TABLE_VARIABLE_TABLE_ENTRIES_PER_PORT,
+ SYSTEM_TABLE_MAX
+};
+
+enum platform_config_port_table_fields {
+ PORT_TABLE_RESERVED,
+ PORT_TABLE_PORT_TYPE,
+ PORT_TABLE_ATTENUATION_12G,
+ PORT_TABLE_ATTENUATION_25G,
+ PORT_TABLE_LINK_SPEED_SUPPORTED,
+ PORT_TABLE_LINK_WIDTH_SUPPORTED,
+ PORT_TABLE_VL_CAP,
+ PORT_TABLE_MTU_CAP,
+ PORT_TABLE_TX_LANE_ENABLE_MASK,
+ PORT_TABLE_LOCAL_MAX_TIMEOUT,
+ PORT_TABLE_AUTO_LANE_SHEDDING_ENABLED,
+ PORT_TABLE_EXTERNAL_LOOPBACK_ALLOWED,
+ PORT_TABLE_TX_PRESET_IDX_PASSIVE_CU,
+ PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
+ PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
+ PORT_TABLE_RX_PRESET_IDX,
+ PORT_TABLE_CABLE_REACH_CLASS,
+ PORT_TABLE_MAX
+};
+
+enum platform_config_rx_preset_table_fields {
+ RX_PRESET_TABLE_RESERVED,
+ RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
+ RX_PRESET_TABLE_QSFP_RX_EQ_APPLY,
+ RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
+ RX_PRESET_TABLE_QSFP_RX_CDR,
+ RX_PRESET_TABLE_QSFP_RX_EQ,
+ RX_PRESET_TABLE_QSFP_RX_AMP,
+ RX_PRESET_TABLE_MAX
+};
+
+enum platform_config_tx_preset_table_fields {
+ TX_PRESET_TABLE_RESERVED,
+ TX_PRESET_TABLE_PRECUR,
+ TX_PRESET_TABLE_ATTN,
+ TX_PRESET_TABLE_POSTCUR,
+ TX_PRESET_TABLE_QSFP_TX_CDR_APPLY,
+ TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
+ TX_PRESET_TABLE_QSFP_TX_CDR,
+ TX_PRESET_TABLE_QSFP_TX_EQ,
+ TX_PRESET_TABLE_MAX
+};
+
+enum platform_config_qsfp_attn_table_fields {
+ QSFP_ATTEN_TABLE_RESERVED,
+ QSFP_ATTEN_TABLE_TX_PRESET_IDX,
+ QSFP_ATTEN_TABLE_RX_PRESET_IDX,
+ QSFP_ATTEN_TABLE_MAX
+};
+
+enum platform_config_variable_settings_table_fields {
+ VARIABLE_SETTINGS_TABLE_RESERVED,
+ VARIABLE_SETTINGS_TABLE_TX_PRESET_IDX,
+ VARIABLE_SETTINGS_TABLE_RX_PRESET_IDX,
+ VARIABLE_SETTINGS_TABLE_MAX
+};
+
+struct platform_config_data {
+ u32 *table;
+ u32 *table_metadata;
+ u32 num_table;
+};
+
+/*
+ * This struct acts as a quick reference into the platform_data binary image
+ * and is populated by parse_platform_config(...) depending on the specific
+ * META_VERSION
+ */
+struct platform_config_cache {
+ u8 cache_valid;
+ struct platform_config_data config_tables[PLATFORM_CONFIG_TABLE_MAX];
+};
+
+static const u32 platform_config_table_limits[PLATFORM_CONFIG_TABLE_MAX] = {
+ 0,
+ SYSTEM_TABLE_MAX,
+ PORT_TABLE_MAX,
+ RX_PRESET_TABLE_MAX,
+ TX_PRESET_TABLE_MAX,
+ QSFP_ATTEN_TABLE_MAX,
+ VARIABLE_SETTINGS_TABLE_MAX
+};
+
+/* This section defines default values and encodings for the
+ * fields defined for each table above
+ */
+
+/*=====================================================
+ * System table encodings
+ *====================================================*/
+#define PLATFORM_CONFIG_MAGIC_NUM 0x3d4f5041
+#define PLATFORM_CONFIG_MAGIC_NUMBER_LEN 4
+
+/*
+ * These power classes are the same as defined in SFF 8636 spec rev 2.4
+ * describing byte 129 in table 6-16, except enumerated in a different order
+ */
+enum platform_config_qsfp_power_class_encoding {
+ QSFP_POWER_CLASS_1 = 1,
+ QSFP_POWER_CLASS_2,
+ QSFP_POWER_CLASS_3,
+ QSFP_POWER_CLASS_4,
+ QSFP_POWER_CLASS_5,
+ QSFP_POWER_CLASS_6,
+ QSFP_POWER_CLASS_7
+};
+
+
+/*=====================================================
+ * Port table encodings
+ *==================================================== */
+enum platform_config_port_type_encoding {
+ PORT_TYPE_RESERVED,
+ PORT_TYPE_DISCONNECTED,
+ PORT_TYPE_FIXED,
+ PORT_TYPE_VARIABLE,
+ PORT_TYPE_QSFP,
+ PORT_TYPE_MAX
+};
+
+enum platform_config_link_speed_supported_encoding {
+ LINK_SPEED_SUPP_12G = 1,
+ LINK_SPEED_SUPP_25G,
+ LINK_SPEED_SUPP_12G_25G,
+ LINK_SPEED_SUPP_MAX
+};
+
+/*
+ * This is a subset (not strict) of the link downgrades
+ * supported. The link downgrades supported are expected
+ * to be supplied to the driver by another entity such as
+ * the fabric manager
+ */
+enum platform_config_link_width_supported_encoding {
+ LINK_WIDTH_SUPP_1X = 1,
+ LINK_WIDTH_SUPP_2X,
+ LINK_WIDTH_SUPP_2X_1X,
+ LINK_WIDTH_SUPP_3X,
+ LINK_WIDTH_SUPP_3X_1X,
+ LINK_WIDTH_SUPP_3X_2X,
+ LINK_WIDTH_SUPP_3X_2X_1X,
+ LINK_WIDTH_SUPP_4X,
+ LINK_WIDTH_SUPP_4X_1X,
+ LINK_WIDTH_SUPP_4X_2X,
+ LINK_WIDTH_SUPP_4X_2X_1X,
+ LINK_WIDTH_SUPP_4X_3X,
+ LINK_WIDTH_SUPP_4X_3X_1X,
+ LINK_WIDTH_SUPP_4X_3X_2X,
+ LINK_WIDTH_SUPP_4X_3X_2X_1X,
+ LINK_WIDTH_SUPP_MAX
+};
+
+enum platform_config_virtual_lane_capability_encoding {
+ VL_CAP_VL0 = 1,
+ VL_CAP_VL0_1,
+ VL_CAP_VL0_2,
+ VL_CAP_VL0_3,
+ VL_CAP_VL0_4,
+ VL_CAP_VL0_5,
+ VL_CAP_VL0_6,
+ VL_CAP_VL0_7,
+ VL_CAP_VL0_8,
+ VL_CAP_VL0_9,
+ VL_CAP_VL0_10,
+ VL_CAP_VL0_11,
+ VL_CAP_VL0_12,
+ VL_CAP_VL0_13,
+ VL_CAP_VL0_14,
+ VL_CAP_MAX
+};
+
+/* Max MTU */
+enum platform_config_mtu_capability_encoding {
+ MTU_CAP_256 = 1,
+ MTU_CAP_512 = 2,
+ MTU_CAP_1024 = 3,
+ MTU_CAP_2048 = 4,
+ MTU_CAP_4096 = 5,
+ MTU_CAP_8192 = 6,
+ MTU_CAP_10240 = 7
+};
+
+enum platform_config_local_max_timeout_encoding {
+ LOCAL_MAX_TIMEOUT_10_MS = 1,
+ LOCAL_MAX_TIMEOUT_100_MS,
+ LOCAL_MAX_TIMEOUT_1_S,
+ LOCAL_MAX_TIMEOUT_10_S,
+ LOCAL_MAX_TIMEOUT_100_S,
+ LOCAL_MAX_TIMEOUT_1000_S
+};
+
+#endif /*__PLATFORM_CONFIG_H*/
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c
new file mode 100644
index 000000000000..df1fa56eaf85
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/qp.c
@@ -0,0 +1,1687 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+#include <linux/hash.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/seq_file.h>
+
+#include "hfi.h"
+#include "qp.h"
+#include "trace.h"
+#include "sdma.h"
+
+#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
+#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
+
+static unsigned int hfi1_qp_table_size = 256;
+module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO);
+MODULE_PARM_DESC(qp_table_size, "QP table size");
+
+static void flush_tx_list(struct hfi1_qp *qp);
+static int iowait_sleep(
+ struct sdma_engine *sde,
+ struct iowait *wait,
+ struct sdma_txreq *stx,
+ unsigned seq);
+static void iowait_wakeup(struct iowait *wait, int reason);
+
+static inline unsigned mk_qpn(struct hfi1_qpn_table *qpt,
+ struct qpn_map *map, unsigned off)
+{
+ return (map - qpt->map) * BITS_PER_PAGE + off;
+}
+
+/*
+ * Convert the AETH credit code into the number of credits.
+ */
+static const u16 credit_table[31] = {
+ 0, /* 0 */
+ 1, /* 1 */
+ 2, /* 2 */
+ 3, /* 3 */
+ 4, /* 4 */
+ 6, /* 5 */
+ 8, /* 6 */
+ 12, /* 7 */
+ 16, /* 8 */
+ 24, /* 9 */
+ 32, /* A */
+ 48, /* B */
+ 64, /* C */
+ 96, /* D */
+ 128, /* E */
+ 192, /* F */
+ 256, /* 10 */
+ 384, /* 11 */
+ 512, /* 12 */
+ 768, /* 13 */
+ 1024, /* 14 */
+ 1536, /* 15 */
+ 2048, /* 16 */
+ 3072, /* 17 */
+ 4096, /* 18 */
+ 6144, /* 19 */
+ 8192, /* 1A */
+ 12288, /* 1B */
+ 16384, /* 1C */
+ 24576, /* 1D */
+ 32768 /* 1E */
+};
+
+static void get_map_page(struct hfi1_qpn_table *qpt, struct qpn_map *map)
+{
+ unsigned long page = get_zeroed_page(GFP_KERNEL);
+
+ /*
+ * Free the page if someone raced with us installing it.
+ */
+
+ spin_lock(&qpt->lock);
+ if (map->page)
+ free_page(page);
+ else
+ map->page = (void *)page;
+ spin_unlock(&qpt->lock);
+}
+
+/*
+ * Allocate the next available QPN or
+ * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
+ */
+static int alloc_qpn(struct hfi1_devdata *dd, struct hfi1_qpn_table *qpt,
+ enum ib_qp_type type, u8 port)
+{
+ u32 i, offset, max_scan, qpn;
+ struct qpn_map *map;
+ u32 ret;
+
+ if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
+ unsigned n;
+
+ ret = type == IB_QPT_GSI;
+ n = 1 << (ret + 2 * (port - 1));
+ spin_lock(&qpt->lock);
+ if (qpt->flags & n)
+ ret = -EINVAL;
+ else
+ qpt->flags |= n;
+ spin_unlock(&qpt->lock);
+ goto bail;
+ }
+
+ qpn = qpt->last + qpt->incr;
+ if (qpn >= QPN_MAX)
+ qpn = qpt->incr | ((qpt->last & 1) ^ 1);
+ /* offset carries bit 0 */
+ offset = qpn & BITS_PER_PAGE_MASK;
+ map = &qpt->map[qpn / BITS_PER_PAGE];
+ max_scan = qpt->nmaps - !offset;
+ for (i = 0;;) {
+ if (unlikely(!map->page)) {
+ get_map_page(qpt, map);
+ if (unlikely(!map->page))
+ break;
+ }
+ do {
+ if (!test_and_set_bit(offset, map->page)) {
+ qpt->last = qpn;
+ ret = qpn;
+ goto bail;
+ }
+ offset += qpt->incr;
+ /*
+ * This qpn might be bogus if offset >= BITS_PER_PAGE.
+ * That is OK. It gets re-assigned below
+ */
+ qpn = mk_qpn(qpt, map, offset);
+ } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
+ /*
+ * In order to keep the number of pages allocated to a
+ * minimum, we scan the all existing pages before increasing
+ * the size of the bitmap table.
+ */
+ if (++i > max_scan) {
+ if (qpt->nmaps == QPNMAP_ENTRIES)
+ break;
+ map = &qpt->map[qpt->nmaps++];
+ /* start at incr with current bit 0 */
+ offset = qpt->incr | (offset & 1);
+ } else if (map < &qpt->map[qpt->nmaps]) {
+ ++map;
+ /* start at incr with current bit 0 */
+ offset = qpt->incr | (offset & 1);
+ } else {
+ map = &qpt->map[0];
+ /* wrap to first map page, invert bit 0 */
+ offset = qpt->incr | ((offset & 1) ^ 1);
+ }
+ /* there can be no bits at shift and below */
+ WARN_ON(offset & (dd->qos_shift - 1));
+ qpn = mk_qpn(qpt, map, offset);
+ }
+
+ ret = -ENOMEM;
+
+bail:
+ return ret;
+}
+
+static void free_qpn(struct hfi1_qpn_table *qpt, u32 qpn)
+{
+ struct qpn_map *map;
+
+ map = qpt->map + qpn / BITS_PER_PAGE;
+ if (map->page)
+ clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
+}
+
+/*
+ * Put the QP into the hash table.
+ * The hash table holds a reference to the QP.
+ */
+static void insert_qp(struct hfi1_ibdev *dev, struct hfi1_qp *qp)
+{
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ unsigned long flags;
+
+ atomic_inc(&qp->refcount);
+ spin_lock_irqsave(&dev->qp_dev->qpt_lock, flags);
+
+ if (qp->ibqp.qp_num <= 1) {
+ rcu_assign_pointer(ibp->qp[qp->ibqp.qp_num], qp);
+ } else {
+ u32 n = qpn_hash(dev->qp_dev, qp->ibqp.qp_num);
+
+ qp->next = dev->qp_dev->qp_table[n];
+ rcu_assign_pointer(dev->qp_dev->qp_table[n], qp);
+ trace_hfi1_qpinsert(qp, n);
+ }
+
+ spin_unlock_irqrestore(&dev->qp_dev->qpt_lock, flags);
+}
+
+/*
+ * Remove the QP from the table so it can't be found asynchronously by
+ * the receive interrupt routine.
+ */
+static void remove_qp(struct hfi1_ibdev *dev, struct hfi1_qp *qp)
+{
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ u32 n = qpn_hash(dev->qp_dev, qp->ibqp.qp_num);
+ unsigned long flags;
+ int removed = 1;
+
+ spin_lock_irqsave(&dev->qp_dev->qpt_lock, flags);
+
+ if (rcu_dereference_protected(ibp->qp[0],
+ lockdep_is_held(&dev->qp_dev->qpt_lock)) == qp) {
+ RCU_INIT_POINTER(ibp->qp[0], NULL);
+ } else if (rcu_dereference_protected(ibp->qp[1],
+ lockdep_is_held(&dev->qp_dev->qpt_lock)) == qp) {
+ RCU_INIT_POINTER(ibp->qp[1], NULL);
+ } else {
+ struct hfi1_qp *q;
+ struct hfi1_qp __rcu **qpp;
+
+ removed = 0;
+ qpp = &dev->qp_dev->qp_table[n];
+ for (; (q = rcu_dereference_protected(*qpp,
+ lockdep_is_held(&dev->qp_dev->qpt_lock)))
+ != NULL;
+ qpp = &q->next)
+ if (q == qp) {
+ RCU_INIT_POINTER(*qpp,
+ rcu_dereference_protected(qp->next,
+ lockdep_is_held(&dev->qp_dev->qpt_lock)));
+ removed = 1;
+ trace_hfi1_qpremove(qp, n);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&dev->qp_dev->qpt_lock, flags);
+ if (removed) {
+ synchronize_rcu();
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+}
+
+/**
+ * free_all_qps - check for QPs still in use
+ * @qpt: the QP table to empty
+ *
+ * There should not be any QPs still in use.
+ * Free memory for table.
+ */
+static unsigned free_all_qps(struct hfi1_devdata *dd)
+{
+ struct hfi1_ibdev *dev = &dd->verbs_dev;
+ unsigned long flags;
+ struct hfi1_qp *qp;
+ unsigned n, qp_inuse = 0;
+
+ for (n = 0; n < dd->num_pports; n++) {
+ struct hfi1_ibport *ibp = &dd->pport[n].ibport_data;
+
+ if (!hfi1_mcast_tree_empty(ibp))
+ qp_inuse++;
+ rcu_read_lock();
+ if (rcu_dereference(ibp->qp[0]))
+ qp_inuse++;
+ if (rcu_dereference(ibp->qp[1]))
+ qp_inuse++;
+ rcu_read_unlock();
+ }
+
+ if (!dev->qp_dev)
+ goto bail;
+ spin_lock_irqsave(&dev->qp_dev->qpt_lock, flags);
+ for (n = 0; n < dev->qp_dev->qp_table_size; n++) {
+ qp = rcu_dereference_protected(dev->qp_dev->qp_table[n],
+ lockdep_is_held(&dev->qp_dev->qpt_lock));
+ RCU_INIT_POINTER(dev->qp_dev->qp_table[n], NULL);
+
+ for (; qp; qp = rcu_dereference_protected(qp->next,
+ lockdep_is_held(&dev->qp_dev->qpt_lock)))
+ qp_inuse++;
+ }
+ spin_unlock_irqrestore(&dev->qp_dev->qpt_lock, flags);
+ synchronize_rcu();
+bail:
+ return qp_inuse;
+}
+
+/**
+ * reset_qp - initialize the QP state to the reset state
+ * @qp: the QP to reset
+ * @type: the QP type
+ */
+static void reset_qp(struct hfi1_qp *qp, enum ib_qp_type type)
+{
+ qp->remote_qpn = 0;
+ qp->qkey = 0;
+ qp->qp_access_flags = 0;
+ iowait_init(
+ &qp->s_iowait,
+ 1,
+ hfi1_do_send,
+ iowait_sleep,
+ iowait_wakeup);
+ qp->s_flags &= HFI1_S_SIGNAL_REQ_WR;
+ qp->s_hdrwords = 0;
+ qp->s_wqe = NULL;
+ qp->s_draining = 0;
+ qp->s_next_psn = 0;
+ qp->s_last_psn = 0;
+ qp->s_sending_psn = 0;
+ qp->s_sending_hpsn = 0;
+ qp->s_psn = 0;
+ qp->r_psn = 0;
+ qp->r_msn = 0;
+ if (type == IB_QPT_RC) {
+ qp->s_state = IB_OPCODE_RC_SEND_LAST;
+ qp->r_state = IB_OPCODE_RC_SEND_LAST;
+ } else {
+ qp->s_state = IB_OPCODE_UC_SEND_LAST;
+ qp->r_state = IB_OPCODE_UC_SEND_LAST;
+ }
+ qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
+ qp->r_nak_state = 0;
+ qp->r_aflags = 0;
+ qp->r_flags = 0;
+ qp->s_head = 0;
+ qp->s_tail = 0;
+ qp->s_cur = 0;
+ qp->s_acked = 0;
+ qp->s_last = 0;
+ qp->s_ssn = 1;
+ qp->s_lsn = 0;
+ clear_ahg(qp);
+ qp->s_mig_state = IB_MIG_MIGRATED;
+ memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
+ qp->r_head_ack_queue = 0;
+ qp->s_tail_ack_queue = 0;
+ qp->s_num_rd_atomic = 0;
+ if (qp->r_rq.wq) {
+ qp->r_rq.wq->head = 0;
+ qp->r_rq.wq->tail = 0;
+ }
+ qp->r_sge.num_sge = 0;
+}
+
+static void clear_mr_refs(struct hfi1_qp *qp, int clr_sends)
+{
+ unsigned n;
+
+ if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags))
+ hfi1_put_ss(&qp->s_rdma_read_sge);
+
+ hfi1_put_ss(&qp->r_sge);
+
+ if (clr_sends) {
+ while (qp->s_last != qp->s_head) {
+ struct hfi1_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
+ unsigned i;
+
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct hfi1_sge *sge = &wqe->sg_list[i];
+
+ hfi1_put_mr(sge->mr);
+ }
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI)
+ atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ }
+ if (qp->s_rdma_mr) {
+ hfi1_put_mr(qp->s_rdma_mr);
+ qp->s_rdma_mr = NULL;
+ }
+ }
+
+ if (qp->ibqp.qp_type != IB_QPT_RC)
+ return;
+
+ for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
+ struct hfi1_ack_entry *e = &qp->s_ack_queue[n];
+
+ if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
+ e->rdma_sge.mr) {
+ hfi1_put_mr(e->rdma_sge.mr);
+ e->rdma_sge.mr = NULL;
+ }
+ }
+}
+
+/**
+ * hfi1_error_qp - put a QP into the error state
+ * @qp: the QP to put into the error state
+ * @err: the receive completion error to signal if a RWQE is active
+ *
+ * Flushes both send and receive work queues.
+ * Returns true if last WQE event should be generated.
+ * The QP r_lock and s_lock should be held and interrupts disabled.
+ * If we are already in error state, just return.
+ */
+int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err)
+{
+ struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
+ struct ib_wc wc;
+ int ret = 0;
+
+ if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
+ goto bail;
+
+ qp->state = IB_QPS_ERR;
+
+ if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) {
+ qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR);
+ del_timer(&qp->s_timer);
+ }
+
+ if (qp->s_flags & HFI1_S_ANY_WAIT_SEND)
+ qp->s_flags &= ~HFI1_S_ANY_WAIT_SEND;
+
+ write_seqlock(&dev->iowait_lock);
+ if (!list_empty(&qp->s_iowait.list) && !(qp->s_flags & HFI1_S_BUSY)) {
+ qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
+ list_del_init(&qp->s_iowait.list);
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+ write_sequnlock(&dev->iowait_lock);
+
+ if (!(qp->s_flags & HFI1_S_BUSY)) {
+ qp->s_hdrwords = 0;
+ if (qp->s_rdma_mr) {
+ hfi1_put_mr(qp->s_rdma_mr);
+ qp->s_rdma_mr = NULL;
+ }
+ flush_tx_list(qp);
+ }
+
+ /* Schedule the sending tasklet to drain the send work queue. */
+ if (qp->s_last != qp->s_head)
+ hfi1_schedule_send(qp);
+
+ clear_mr_refs(qp, 0);
+
+ memset(&wc, 0, sizeof(wc));
+ wc.qp = &qp->ibqp;
+ wc.opcode = IB_WC_RECV;
+
+ if (test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) {
+ wc.wr_id = qp->r_wr_id;
+ wc.status = err;
+ hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ }
+ wc.status = IB_WC_WR_FLUSH_ERR;
+
+ if (qp->r_rq.wq) {
+ struct hfi1_rwq *wq;
+ u32 head;
+ u32 tail;
+
+ spin_lock(&qp->r_rq.lock);
+
+ /* sanity check pointers before trusting them */
+ wq = qp->r_rq.wq;
+ head = wq->head;
+ if (head >= qp->r_rq.size)
+ head = 0;
+ tail = wq->tail;
+ if (tail >= qp->r_rq.size)
+ tail = 0;
+ while (tail != head) {
+ wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
+ if (++tail >= qp->r_rq.size)
+ tail = 0;
+ hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ }
+ wq->tail = tail;
+
+ spin_unlock(&qp->r_rq.lock);
+ } else if (qp->ibqp.event_handler)
+ ret = 1;
+
+bail:
+ return ret;
+}
+
+static void flush_tx_list(struct hfi1_qp *qp)
+{
+ while (!list_empty(&qp->s_iowait.tx_head)) {
+ struct sdma_txreq *tx;
+
+ tx = list_first_entry(
+ &qp->s_iowait.tx_head,
+ struct sdma_txreq,
+ list);
+ list_del_init(&tx->list);
+ hfi1_put_txreq(
+ container_of(tx, struct verbs_txreq, txreq));
+ }
+}
+
+static void flush_iowait(struct hfi1_qp *qp)
+{
+ struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
+ unsigned long flags;
+
+ write_seqlock_irqsave(&dev->iowait_lock, flags);
+ if (!list_empty(&qp->s_iowait.list)) {
+ list_del_init(&qp->s_iowait.list);
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+ write_sequnlock_irqrestore(&dev->iowait_lock, flags);
+}
+
+static inline int opa_mtu_enum_to_int(int mtu)
+{
+ switch (mtu) {
+ case OPA_MTU_8192: return 8192;
+ case OPA_MTU_10240: return 10240;
+ default: return -1;
+ }
+}
+
+/**
+ * This function is what we would push to the core layer if we wanted to be a
+ * "first class citizen". Instead we hide this here and rely on Verbs ULPs
+ * to blindly pass the MTU enum value from the PathRecord to us.
+ *
+ * The actual flag used to determine "8k MTU" will change and is currently
+ * unknown.
+ */
+static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
+{
+ int val = opa_mtu_enum_to_int((int)mtu);
+
+ if (val > 0)
+ return val;
+ return ib_mtu_enum_to_int(mtu);
+}
+
+
+/**
+ * hfi1_modify_qp - modify the attributes of a queue pair
+ * @ibqp: the queue pair who's attributes we're modifying
+ * @attr: the new attributes
+ * @attr_mask: the mask of attributes to modify
+ * @udata: user data for libibverbs.so
+ *
+ * Returns 0 on success, otherwise returns an errno.
+ */
+int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct hfi1_ibdev *dev = to_idev(ibqp->device);
+ struct hfi1_qp *qp = to_iqp(ibqp);
+ enum ib_qp_state cur_state, new_state;
+ struct ib_event ev;
+ int lastwqe = 0;
+ int mig = 0;
+ int ret;
+ u32 pmtu = 0; /* for gcc warning only */
+ struct hfi1_devdata *dd;
+
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_lock);
+
+ cur_state = attr_mask & IB_QP_CUR_STATE ?
+ attr->cur_qp_state : qp->state;
+ new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
+
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
+ attr_mask, IB_LINK_LAYER_UNSPECIFIED))
+ goto inval;
+
+ if (attr_mask & IB_QP_AV) {
+ if (attr->ah_attr.dlid >= HFI1_MULTICAST_LID_BASE)
+ goto inval;
+ if (hfi1_check_ah(qp->ibqp.device, &attr->ah_attr))
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ if (attr->alt_ah_attr.dlid >= HFI1_MULTICAST_LID_BASE)
+ goto inval;
+ if (hfi1_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
+ goto inval;
+ if (attr->alt_pkey_index >= hfi1_get_npkeys(dd_from_dev(dev)))
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ if (attr->pkey_index >= hfi1_get_npkeys(dd_from_dev(dev)))
+ goto inval;
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER)
+ if (attr->min_rnr_timer > 31)
+ goto inval;
+
+ if (attr_mask & IB_QP_PORT)
+ if (qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI ||
+ attr->port_num == 0 ||
+ attr->port_num > ibqp->device->phys_port_cnt)
+ goto inval;
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ if (attr->dest_qp_num > HFI1_QPN_MASK)
+ goto inval;
+
+ if (attr_mask & IB_QP_RETRY_CNT)
+ if (attr->retry_cnt > 7)
+ goto inval;
+
+ if (attr_mask & IB_QP_RNR_RETRY)
+ if (attr->rnr_retry > 7)
+ goto inval;
+
+ /*
+ * Don't allow invalid path_mtu values. OK to set greater
+ * than the active mtu (or even the max_cap, if we have tuned
+ * that to a small mtu. We'll set qp->path_mtu
+ * to the lesser of requested attribute mtu and active,
+ * for packetizing messages.
+ * Note that the QP port has to be set in INIT and MTU in RTR.
+ */
+ if (attr_mask & IB_QP_PATH_MTU) {
+ int mtu, pidx = qp->port_num - 1;
+
+ dd = dd_from_dev(dev);
+ mtu = verbs_mtu_enum_to_int(ibqp->device, attr->path_mtu);
+ if (mtu == -1)
+ goto inval;
+
+ if (mtu > dd->pport[pidx].ibmtu)
+ pmtu = mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048);
+ else
+ pmtu = attr->path_mtu;
+ }
+
+ if (attr_mask & IB_QP_PATH_MIG_STATE) {
+ if (attr->path_mig_state == IB_MIG_REARM) {
+ if (qp->s_mig_state == IB_MIG_ARMED)
+ goto inval;
+ if (new_state != IB_QPS_RTS)
+ goto inval;
+ } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
+ if (qp->s_mig_state == IB_MIG_REARM)
+ goto inval;
+ if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
+ goto inval;
+ if (qp->s_mig_state == IB_MIG_ARMED)
+ mig = 1;
+ } else
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ if (attr->max_dest_rd_atomic > HFI1_MAX_RDMA_ATOMIC)
+ goto inval;
+
+ switch (new_state) {
+ case IB_QPS_RESET:
+ if (qp->state != IB_QPS_RESET) {
+ qp->state = IB_QPS_RESET;
+ flush_iowait(qp);
+ qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_ANY_WAIT);
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irq(&qp->r_lock);
+ /* Stop the sending work queue and retry timer */
+ cancel_work_sync(&qp->s_iowait.iowork);
+ del_timer_sync(&qp->s_timer);
+ iowait_sdma_drain(&qp->s_iowait);
+ flush_tx_list(qp);
+ remove_qp(dev, qp);
+ wait_event(qp->wait, !atomic_read(&qp->refcount));
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_lock);
+ clear_mr_refs(qp, 1);
+ clear_ahg(qp);
+ reset_qp(qp, ibqp->qp_type);
+ }
+ break;
+
+ case IB_QPS_RTR:
+ /* Allow event to re-trigger if QP set to RTR more than once */
+ qp->r_flags &= ~HFI1_R_COMM_EST;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_SQD:
+ qp->s_draining = qp->s_last != qp->s_cur;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_SQE:
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ goto inval;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_ERR:
+ lastwqe = hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ break;
+
+ default:
+ qp->state = new_state;
+ break;
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ qp->s_pkey_index = attr->pkey_index;
+
+ if (attr_mask & IB_QP_PORT)
+ qp->port_num = attr->port_num;
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ qp->remote_qpn = attr->dest_qp_num;
+
+ if (attr_mask & IB_QP_SQ_PSN) {
+ qp->s_next_psn = attr->sq_psn & PSN_MODIFY_MASK;
+ qp->s_psn = qp->s_next_psn;
+ qp->s_sending_psn = qp->s_next_psn;
+ qp->s_last_psn = qp->s_next_psn - 1;
+ qp->s_sending_hpsn = qp->s_last_psn;
+ }
+
+ if (attr_mask & IB_QP_RQ_PSN)
+ qp->r_psn = attr->rq_psn & PSN_MODIFY_MASK;
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ qp->qp_access_flags = attr->qp_access_flags;
+
+ if (attr_mask & IB_QP_AV) {
+ qp->remote_ah_attr = attr->ah_attr;
+ qp->s_srate = attr->ah_attr.static_rate;
+ qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
+ }
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ qp->alt_ah_attr = attr->alt_ah_attr;
+ qp->s_alt_pkey_index = attr->alt_pkey_index;
+ }
+
+ if (attr_mask & IB_QP_PATH_MIG_STATE) {
+ qp->s_mig_state = attr->path_mig_state;
+ if (mig) {
+ qp->remote_ah_attr = qp->alt_ah_attr;
+ qp->port_num = qp->alt_ah_attr.port_num;
+ qp->s_pkey_index = qp->s_alt_pkey_index;
+ qp->s_flags |= HFI1_S_AHG_CLEAR;
+ }
+ }
+
+ if (attr_mask & IB_QP_PATH_MTU) {
+ struct hfi1_ibport *ibp;
+ u8 sc, vl;
+ u32 mtu;
+
+ dd = dd_from_dev(dev);
+ ibp = &dd->pport[qp->port_num - 1].ibport_data;
+
+ sc = ibp->sl_to_sc[qp->remote_ah_attr.sl];
+ vl = sc_to_vlt(dd, sc);
+
+ mtu = verbs_mtu_enum_to_int(ibqp->device, pmtu);
+ if (vl < PER_VL_SEND_CONTEXTS)
+ mtu = min_t(u32, mtu, dd->vld[vl].mtu);
+ pmtu = mtu_to_enum(mtu, OPA_MTU_8192);
+
+ qp->path_mtu = pmtu;
+ qp->pmtu = mtu;
+ }
+
+ if (attr_mask & IB_QP_RETRY_CNT) {
+ qp->s_retry_cnt = attr->retry_cnt;
+ qp->s_retry = attr->retry_cnt;
+ }
+
+ if (attr_mask & IB_QP_RNR_RETRY) {
+ qp->s_rnr_retry_cnt = attr->rnr_retry;
+ qp->s_rnr_retry = attr->rnr_retry;
+ }
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER)
+ qp->r_min_rnr_timer = attr->min_rnr_timer;
+
+ if (attr_mask & IB_QP_TIMEOUT) {
+ qp->timeout = attr->timeout;
+ qp->timeout_jiffies =
+ usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
+ 1000UL);
+ }
+
+ if (attr_mask & IB_QP_QKEY)
+ qp->qkey = attr->qkey;
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
+ qp->s_max_rd_atomic = attr->max_rd_atomic;
+
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irq(&qp->r_lock);
+
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ insert_qp(dev, qp);
+
+ if (lastwqe) {
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ if (mig) {
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_PATH_MIG;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ ret = 0;
+ goto bail;
+
+inval:
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irq(&qp->r_lock);
+ ret = -EINVAL;
+
+bail:
+ return ret;
+}
+
+int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+ struct hfi1_qp *qp = to_iqp(ibqp);
+
+ attr->qp_state = qp->state;
+ attr->cur_qp_state = attr->qp_state;
+ attr->path_mtu = qp->path_mtu;
+ attr->path_mig_state = qp->s_mig_state;
+ attr->qkey = qp->qkey;
+ attr->rq_psn = mask_psn(qp->r_psn);
+ attr->sq_psn = mask_psn(qp->s_next_psn);
+ attr->dest_qp_num = qp->remote_qpn;
+ attr->qp_access_flags = qp->qp_access_flags;
+ attr->cap.max_send_wr = qp->s_size - 1;
+ attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
+ attr->cap.max_send_sge = qp->s_max_sge;
+ attr->cap.max_recv_sge = qp->r_rq.max_sge;
+ attr->cap.max_inline_data = 0;
+ attr->ah_attr = qp->remote_ah_attr;
+ attr->alt_ah_attr = qp->alt_ah_attr;
+ attr->pkey_index = qp->s_pkey_index;
+ attr->alt_pkey_index = qp->s_alt_pkey_index;
+ attr->en_sqd_async_notify = 0;
+ attr->sq_draining = qp->s_draining;
+ attr->max_rd_atomic = qp->s_max_rd_atomic;
+ attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
+ attr->min_rnr_timer = qp->r_min_rnr_timer;
+ attr->port_num = qp->port_num;
+ attr->timeout = qp->timeout;
+ attr->retry_cnt = qp->s_retry_cnt;
+ attr->rnr_retry = qp->s_rnr_retry_cnt;
+ attr->alt_port_num = qp->alt_ah_attr.port_num;
+ attr->alt_timeout = qp->alt_timeout;
+
+ init_attr->event_handler = qp->ibqp.event_handler;
+ init_attr->qp_context = qp->ibqp.qp_context;
+ init_attr->send_cq = qp->ibqp.send_cq;
+ init_attr->recv_cq = qp->ibqp.recv_cq;
+ init_attr->srq = qp->ibqp.srq;
+ init_attr->cap = attr->cap;
+ if (qp->s_flags & HFI1_S_SIGNAL_REQ_WR)
+ init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
+ else
+ init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
+ init_attr->qp_type = qp->ibqp.qp_type;
+ init_attr->port_num = qp->port_num;
+ return 0;
+}
+
+/**
+ * hfi1_compute_aeth - compute the AETH (syndrome + MSN)
+ * @qp: the queue pair to compute the AETH for
+ *
+ * Returns the AETH.
+ */
+__be32 hfi1_compute_aeth(struct hfi1_qp *qp)
+{
+ u32 aeth = qp->r_msn & HFI1_MSN_MASK;
+
+ if (qp->ibqp.srq) {
+ /*
+ * Shared receive queues don't generate credits.
+ * Set the credit field to the invalid value.
+ */
+ aeth |= HFI1_AETH_CREDIT_INVAL << HFI1_AETH_CREDIT_SHIFT;
+ } else {
+ u32 min, max, x;
+ u32 credits;
+ struct hfi1_rwq *wq = qp->r_rq.wq;
+ u32 head;
+ u32 tail;
+
+ /* sanity check pointers before trusting them */
+ head = wq->head;
+ if (head >= qp->r_rq.size)
+ head = 0;
+ tail = wq->tail;
+ if (tail >= qp->r_rq.size)
+ tail = 0;
+ /*
+ * Compute the number of credits available (RWQEs).
+ * There is a small chance that the pair of reads are
+ * not atomic, which is OK, since the fuzziness is
+ * resolved as further ACKs go out.
+ */
+ credits = head - tail;
+ if ((int)credits < 0)
+ credits += qp->r_rq.size;
+ /*
+ * Binary search the credit table to find the code to
+ * use.
+ */
+ min = 0;
+ max = 31;
+ for (;;) {
+ x = (min + max) / 2;
+ if (credit_table[x] == credits)
+ break;
+ if (credit_table[x] > credits)
+ max = x;
+ else if (min == x)
+ break;
+ else
+ min = x;
+ }
+ aeth |= x << HFI1_AETH_CREDIT_SHIFT;
+ }
+ return cpu_to_be32(aeth);
+}
+
+/**
+ * hfi1_create_qp - create a queue pair for a device
+ * @ibpd: the protection domain who's device we create the queue pair for
+ * @init_attr: the attributes of the queue pair
+ * @udata: user data for libibverbs.so
+ *
+ * Returns the queue pair on success, otherwise returns an errno.
+ *
+ * Called by the ib_create_qp() core verbs function.
+ */
+struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct hfi1_qp *qp;
+ int err;
+ struct hfi1_swqe *swq = NULL;
+ struct hfi1_ibdev *dev;
+ struct hfi1_devdata *dd;
+ size_t sz;
+ size_t sg_list_sz;
+ struct ib_qp *ret;
+
+ if (init_attr->cap.max_send_sge > hfi1_max_sges ||
+ init_attr->cap.max_send_wr > hfi1_max_qp_wrs ||
+ init_attr->create_flags) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+
+ /* Check receive queue parameters if no SRQ is specified. */
+ if (!init_attr->srq) {
+ if (init_attr->cap.max_recv_sge > hfi1_max_sges ||
+ init_attr->cap.max_recv_wr > hfi1_max_qp_wrs) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+ if (init_attr->cap.max_send_sge +
+ init_attr->cap.max_send_wr +
+ init_attr->cap.max_recv_sge +
+ init_attr->cap.max_recv_wr == 0) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+ }
+
+ switch (init_attr->qp_type) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ if (init_attr->port_num == 0 ||
+ init_attr->port_num > ibpd->device->phys_port_cnt) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+ case IB_QPT_UC:
+ case IB_QPT_RC:
+ case IB_QPT_UD:
+ sz = sizeof(struct hfi1_sge) *
+ init_attr->cap.max_send_sge +
+ sizeof(struct hfi1_swqe);
+ swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
+ if (swq == NULL) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+ sz = sizeof(*qp);
+ sg_list_sz = 0;
+ if (init_attr->srq) {
+ struct hfi1_srq *srq = to_isrq(init_attr->srq);
+
+ if (srq->rq.max_sge > 1)
+ sg_list_sz = sizeof(*qp->r_sg_list) *
+ (srq->rq.max_sge - 1);
+ } else if (init_attr->cap.max_recv_sge > 1)
+ sg_list_sz = sizeof(*qp->r_sg_list) *
+ (init_attr->cap.max_recv_sge - 1);
+ qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
+ if (!qp) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_swq;
+ }
+ RCU_INIT_POINTER(qp->next, NULL);
+ qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
+ if (!qp->s_hdr) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_qp;
+ }
+ qp->timeout_jiffies =
+ usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
+ 1000UL);
+ if (init_attr->srq)
+ sz = 0;
+ else {
+ qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
+ qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
+ sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
+ sizeof(struct hfi1_rwqe);
+ qp->r_rq.wq = vmalloc_user(sizeof(struct hfi1_rwq) +
+ qp->r_rq.size * sz);
+ if (!qp->r_rq.wq) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_qp;
+ }
+ }
+
+ /*
+ * ib_create_qp() will initialize qp->ibqp
+ * except for qp->ibqp.qp_num.
+ */
+ spin_lock_init(&qp->r_lock);
+ spin_lock_init(&qp->s_lock);
+ spin_lock_init(&qp->r_rq.lock);
+ atomic_set(&qp->refcount, 0);
+ init_waitqueue_head(&qp->wait);
+ init_timer(&qp->s_timer);
+ qp->s_timer.data = (unsigned long)qp;
+ INIT_LIST_HEAD(&qp->rspwait);
+ qp->state = IB_QPS_RESET;
+ qp->s_wq = swq;
+ qp->s_size = init_attr->cap.max_send_wr + 1;
+ qp->s_max_sge = init_attr->cap.max_send_sge;
+ if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
+ qp->s_flags = HFI1_S_SIGNAL_REQ_WR;
+ dev = to_idev(ibpd->device);
+ dd = dd_from_dev(dev);
+ err = alloc_qpn(dd, &dev->qp_dev->qpn_table, init_attr->qp_type,
+ init_attr->port_num);
+ if (err < 0) {
+ ret = ERR_PTR(err);
+ vfree(qp->r_rq.wq);
+ goto bail_qp;
+ }
+ qp->ibqp.qp_num = err;
+ qp->port_num = init_attr->port_num;
+ reset_qp(qp, init_attr->qp_type);
+
+ break;
+
+ default:
+ /* Don't support raw QPs */
+ ret = ERR_PTR(-ENOSYS);
+ goto bail;
+ }
+
+ init_attr->cap.max_inline_data = 0;
+
+ /*
+ * Return the address of the RWQ as the offset to mmap.
+ * See hfi1_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ if (!qp->r_rq.wq) {
+ __u64 offset = 0;
+
+ err = ib_copy_to_udata(udata, &offset,
+ sizeof(offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_ip;
+ }
+ } else {
+ u32 s = sizeof(struct hfi1_rwq) + qp->r_rq.size * sz;
+
+ qp->ip = hfi1_create_mmap_info(dev, s,
+ ibpd->uobject->context,
+ qp->r_rq.wq);
+ if (!qp->ip) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_ip;
+ }
+
+ err = ib_copy_to_udata(udata, &(qp->ip->offset),
+ sizeof(qp->ip->offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_ip;
+ }
+ }
+ }
+
+ spin_lock(&dev->n_qps_lock);
+ if (dev->n_qps_allocated == hfi1_max_qps) {
+ spin_unlock(&dev->n_qps_lock);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_ip;
+ }
+
+ dev->n_qps_allocated++;
+ spin_unlock(&dev->n_qps_lock);
+
+ if (qp->ip) {
+ spin_lock_irq(&dev->pending_lock);
+ list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+
+ ret = &qp->ibqp;
+
+ /*
+ * We have our QP and its good, now keep track of what types of opcodes
+ * can be processed on this QP. We do this by keeping track of what the
+ * 3 high order bits of the opcode are.
+ */
+ switch (init_attr->qp_type) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ qp->allowed_ops = IB_OPCODE_UD_SEND_ONLY & OPCODE_QP_MASK;
+ break;
+ case IB_QPT_RC:
+ qp->allowed_ops = IB_OPCODE_RC_SEND_ONLY & OPCODE_QP_MASK;
+ break;
+ case IB_QPT_UC:
+ qp->allowed_ops = IB_OPCODE_UC_SEND_ONLY & OPCODE_QP_MASK;
+ break;
+ default:
+ ret = ERR_PTR(-EINVAL);
+ goto bail_ip;
+ }
+
+ goto bail;
+
+bail_ip:
+ if (qp->ip)
+ kref_put(&qp->ip->ref, hfi1_release_mmap_info);
+ else
+ vfree(qp->r_rq.wq);
+ free_qpn(&dev->qp_dev->qpn_table, qp->ibqp.qp_num);
+bail_qp:
+ kfree(qp->s_hdr);
+ kfree(qp);
+bail_swq:
+ vfree(swq);
+bail:
+ return ret;
+}
+
+/**
+ * hfi1_destroy_qp - destroy a queue pair
+ * @ibqp: the queue pair to destroy
+ *
+ * Returns 0 on success.
+ *
+ * Note that this can be called while the QP is actively sending or
+ * receiving!
+ */
+int hfi1_destroy_qp(struct ib_qp *ibqp)
+{
+ struct hfi1_qp *qp = to_iqp(ibqp);
+ struct hfi1_ibdev *dev = to_idev(ibqp->device);
+
+ /* Make sure HW and driver activity is stopped. */
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_lock);
+ if (qp->state != IB_QPS_RESET) {
+ qp->state = IB_QPS_RESET;
+ flush_iowait(qp);
+ qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_ANY_WAIT);
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irq(&qp->r_lock);
+ cancel_work_sync(&qp->s_iowait.iowork);
+ del_timer_sync(&qp->s_timer);
+ iowait_sdma_drain(&qp->s_iowait);
+ flush_tx_list(qp);
+ remove_qp(dev, qp);
+ wait_event(qp->wait, !atomic_read(&qp->refcount));
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_lock);
+ clear_mr_refs(qp, 1);
+ clear_ahg(qp);
+ }
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irq(&qp->r_lock);
+
+ /* all user's cleaned up, mark it available */
+ free_qpn(&dev->qp_dev->qpn_table, qp->ibqp.qp_num);
+ spin_lock(&dev->n_qps_lock);
+ dev->n_qps_allocated--;
+ spin_unlock(&dev->n_qps_lock);
+
+ if (qp->ip)
+ kref_put(&qp->ip->ref, hfi1_release_mmap_info);
+ else
+ vfree(qp->r_rq.wq);
+ vfree(qp->s_wq);
+ kfree(qp->s_hdr);
+ kfree(qp);
+ return 0;
+}
+
+/**
+ * init_qpn_table - initialize the QP number table for a device
+ * @qpt: the QPN table
+ */
+static int init_qpn_table(struct hfi1_devdata *dd, struct hfi1_qpn_table *qpt)
+{
+ u32 offset, qpn, i;
+ struct qpn_map *map;
+ int ret = 0;
+
+ spin_lock_init(&qpt->lock);
+
+ qpt->last = 0;
+ qpt->incr = 1 << dd->qos_shift;
+
+ /* insure we don't assign QPs from KDETH 64K window */
+ qpn = kdeth_qp << 16;
+ qpt->nmaps = qpn / BITS_PER_PAGE;
+ /* This should always be zero */
+ offset = qpn & BITS_PER_PAGE_MASK;
+ map = &qpt->map[qpt->nmaps];
+ dd_dev_info(dd, "Reserving QPNs for KDETH window from 0x%x to 0x%x\n",
+ qpn, qpn + 65535);
+ for (i = 0; i < 65536; i++) {
+ if (!map->page) {
+ get_map_page(qpt, map);
+ if (!map->page) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+ set_bit(offset, map->page);
+ offset++;
+ if (offset == BITS_PER_PAGE) {
+ /* next page */
+ qpt->nmaps++;
+ map++;
+ offset = 0;
+ }
+ }
+ return ret;
+}
+
+/**
+ * free_qpn_table - free the QP number table for a device
+ * @qpt: the QPN table
+ */
+static void free_qpn_table(struct hfi1_qpn_table *qpt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
+ free_page((unsigned long) qpt->map[i].page);
+}
+
+/**
+ * hfi1_get_credit - flush the send work queue of a QP
+ * @qp: the qp who's send work queue to flush
+ * @aeth: the Acknowledge Extended Transport Header
+ *
+ * The QP s_lock should be held.
+ */
+void hfi1_get_credit(struct hfi1_qp *qp, u32 aeth)
+{
+ u32 credit = (aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK;
+
+ /*
+ * If the credit is invalid, we can send
+ * as many packets as we like. Otherwise, we have to
+ * honor the credit field.
+ */
+ if (credit == HFI1_AETH_CREDIT_INVAL) {
+ if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) {
+ qp->s_flags |= HFI1_S_UNLIMITED_CREDIT;
+ if (qp->s_flags & HFI1_S_WAIT_SSN_CREDIT) {
+ qp->s_flags &= ~HFI1_S_WAIT_SSN_CREDIT;
+ hfi1_schedule_send(qp);
+ }
+ }
+ } else if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) {
+ /* Compute new LSN (i.e., MSN + credit) */
+ credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK;
+ if (cmp_msn(credit, qp->s_lsn) > 0) {
+ qp->s_lsn = credit;
+ if (qp->s_flags & HFI1_S_WAIT_SSN_CREDIT) {
+ qp->s_flags &= ~HFI1_S_WAIT_SSN_CREDIT;
+ hfi1_schedule_send(qp);
+ }
+ }
+ }
+}
+
+void hfi1_qp_wakeup(struct hfi1_qp *qp, u32 flag)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_flags & flag) {
+ qp->s_flags &= ~flag;
+ trace_hfi1_qpwakeup(qp, flag);
+ hfi1_schedule_send(qp);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ /* Notify hfi1_destroy_qp() if it is waiting. */
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+}
+
+static int iowait_sleep(
+ struct sdma_engine *sde,
+ struct iowait *wait,
+ struct sdma_txreq *stx,
+ unsigned seq)
+{
+ struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq);
+ struct hfi1_qp *qp;
+ unsigned long flags;
+ int ret = 0;
+ struct hfi1_ibdev *dev;
+
+ qp = tx->qp;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) {
+
+ /*
+ * If we couldn't queue the DMA request, save the info
+ * and try again later rather than destroying the
+ * buffer and undoing the side effects of the copy.
+ */
+ /* Make a common routine? */
+ dev = &sde->dd->verbs_dev;
+ list_add_tail(&stx->list, &wait->tx_head);
+ write_seqlock(&dev->iowait_lock);
+ if (sdma_progress(sde, seq, stx))
+ goto eagain;
+ if (list_empty(&qp->s_iowait.list)) {
+ struct hfi1_ibport *ibp =
+ to_iport(qp->ibqp.device, qp->port_num);
+
+ ibp->n_dmawait++;
+ qp->s_flags |= HFI1_S_WAIT_DMA_DESC;
+ list_add_tail(&qp->s_iowait.list, &sde->dmawait);
+ trace_hfi1_qpsleep(qp, HFI1_S_WAIT_DMA_DESC);
+ atomic_inc(&qp->refcount);
+ }
+ write_sequnlock(&dev->iowait_lock);
+ qp->s_flags &= ~HFI1_S_BUSY;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ ret = -EBUSY;
+ } else {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ hfi1_put_txreq(tx);
+ }
+ return ret;
+eagain:
+ write_sequnlock(&dev->iowait_lock);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ list_del_init(&stx->list);
+ return -EAGAIN;
+}
+
+static void iowait_wakeup(struct iowait *wait, int reason)
+{
+ struct hfi1_qp *qp = container_of(wait, struct hfi1_qp, s_iowait);
+
+ WARN_ON(reason != SDMA_AVAIL_REASON);
+ hfi1_qp_wakeup(qp, HFI1_S_WAIT_DMA_DESC);
+}
+
+int hfi1_qp_init(struct hfi1_ibdev *dev)
+{
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+ int i;
+ int ret = -ENOMEM;
+
+ /* allocate parent object */
+ dev->qp_dev = kzalloc(sizeof(*dev->qp_dev), GFP_KERNEL);
+ if (!dev->qp_dev)
+ goto nomem;
+ /* allocate hash table */
+ dev->qp_dev->qp_table_size = hfi1_qp_table_size;
+ dev->qp_dev->qp_table_bits = ilog2(hfi1_qp_table_size);
+ dev->qp_dev->qp_table =
+ kmalloc(dev->qp_dev->qp_table_size *
+ sizeof(*dev->qp_dev->qp_table),
+ GFP_KERNEL);
+ if (!dev->qp_dev->qp_table)
+ goto nomem;
+ for (i = 0; i < dev->qp_dev->qp_table_size; i++)
+ RCU_INIT_POINTER(dev->qp_dev->qp_table[i], NULL);
+ spin_lock_init(&dev->qp_dev->qpt_lock);
+ /* initialize qpn map */
+ ret = init_qpn_table(dd, &dev->qp_dev->qpn_table);
+ if (ret)
+ goto nomem;
+ return ret;
+nomem:
+ if (dev->qp_dev) {
+ kfree(dev->qp_dev->qp_table);
+ free_qpn_table(&dev->qp_dev->qpn_table);
+ kfree(dev->qp_dev);
+ }
+ return ret;
+}
+
+void hfi1_qp_exit(struct hfi1_ibdev *dev)
+{
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+ u32 qps_inuse;
+
+ qps_inuse = free_all_qps(dd);
+ if (qps_inuse)
+ dd_dev_err(dd, "QP memory leak! %u still in use\n",
+ qps_inuse);
+ if (dev->qp_dev) {
+ kfree(dev->qp_dev->qp_table);
+ free_qpn_table(&dev->qp_dev->qpn_table);
+ kfree(dev->qp_dev);
+ }
+}
+
+/**
+ *
+ * qp_to_sdma_engine - map a qp to a send engine
+ * @qp: the QP
+ * @sc5: the 5 bit sc
+ *
+ * Return:
+ * A send engine for the qp or NULL for SMI type qp.
+ */
+struct sdma_engine *qp_to_sdma_engine(struct hfi1_qp *qp, u8 sc5)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct sdma_engine *sde;
+
+ if (!(dd->flags & HFI1_HAS_SEND_DMA))
+ return NULL;
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_UC:
+ case IB_QPT_RC:
+ break;
+ case IB_QPT_SMI:
+ return NULL;
+ default:
+ break;
+ }
+ sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5);
+ return sde;
+}
+
+struct qp_iter {
+ struct hfi1_ibdev *dev;
+ struct hfi1_qp *qp;
+ int specials;
+ int n;
+};
+
+struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev)
+{
+ struct qp_iter *iter;
+
+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return NULL;
+
+ iter->dev = dev;
+ iter->specials = dev->ibdev.phys_port_cnt * 2;
+ if (qp_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+
+ return iter;
+}
+
+int qp_iter_next(struct qp_iter *iter)
+{
+ struct hfi1_ibdev *dev = iter->dev;
+ int n = iter->n;
+ int ret = 1;
+ struct hfi1_qp *pqp = iter->qp;
+ struct hfi1_qp *qp;
+
+ /*
+ * The approach is to consider the special qps
+ * as an additional table entries before the
+ * real hash table. Since the qp code sets
+ * the qp->next hash link to NULL, this works just fine.
+ *
+ * iter->specials is 2 * # ports
+ *
+ * n = 0..iter->specials is the special qp indices
+ *
+ * n = iter->specials..dev->qp_dev->qp_table_size+iter->specials are
+ * the potential hash bucket entries
+ *
+ */
+ for (; n < dev->qp_dev->qp_table_size + iter->specials; n++) {
+ if (pqp) {
+ qp = rcu_dereference(pqp->next);
+ } else {
+ if (n < iter->specials) {
+ struct hfi1_pportdata *ppd;
+ struct hfi1_ibport *ibp;
+ int pidx;
+
+ pidx = n % dev->ibdev.phys_port_cnt;
+ ppd = &dd_from_dev(dev)->pport[pidx];
+ ibp = &ppd->ibport_data;
+
+ if (!(n & 1))
+ qp = rcu_dereference(ibp->qp[0]);
+ else
+ qp = rcu_dereference(ibp->qp[1]);
+ } else {
+ qp = rcu_dereference(
+ dev->qp_dev->qp_table[
+ (n - iter->specials)]);
+ }
+ }
+ pqp = qp;
+ if (qp) {
+ iter->qp = qp;
+ iter->n = n;
+ return 0;
+ }
+ }
+ return ret;
+}
+
+static const char * const qp_type_str[] = {
+ "SMI", "GSI", "RC", "UC", "UD",
+};
+
+static int qp_idle(struct hfi1_qp *qp)
+{
+ return
+ qp->s_last == qp->s_acked &&
+ qp->s_acked == qp->s_cur &&
+ qp->s_cur == qp->s_tail &&
+ qp->s_tail == qp->s_head;
+}
+
+void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
+{
+ struct hfi1_swqe *wqe;
+ struct hfi1_qp *qp = iter->qp;
+ struct sdma_engine *sde;
+
+ sde = qp_to_sdma_engine(qp, qp->s_sc);
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ seq_printf(s,
+ "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x SL %u MTU %d %u %u %u SDE %p,%u\n",
+ iter->n,
+ qp_idle(qp) ? "I" : "B",
+ qp->ibqp.qp_num,
+ atomic_read(&qp->refcount),
+ qp_type_str[qp->ibqp.qp_type],
+ qp->state,
+ wqe ? wqe->wr.opcode : 0,
+ qp->s_hdrwords,
+ qp->s_flags,
+ atomic_read(&qp->s_iowait.sdma_busy),
+ !list_empty(&qp->s_iowait.list),
+ qp->timeout,
+ wqe ? wqe->ssn : 0,
+ qp->s_lsn,
+ qp->s_last_psn,
+ qp->s_psn, qp->s_next_psn,
+ qp->s_sending_psn, qp->s_sending_hpsn,
+ qp->s_last, qp->s_acked, qp->s_cur,
+ qp->s_tail, qp->s_head, qp->s_size,
+ qp->remote_qpn,
+ qp->remote_ah_attr.dlid,
+ qp->remote_ah_attr.sl,
+ qp->pmtu,
+ qp->s_retry_cnt,
+ qp->timeout,
+ qp->s_rnr_retry_cnt,
+ sde,
+ sde ? sde->this_idx : 0);
+}
+
+void qp_comm_est(struct hfi1_qp *qp)
+{
+ qp->r_flags |= HFI1_R_COMM_EST;
+ if (qp->ibqp.event_handler) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_COMM_EST;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+}
diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h
new file mode 100644
index 000000000000..6b505859b59c
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/qp.h
@@ -0,0 +1,235 @@
+#ifndef _QP_H
+#define _QP_H
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/hash.h>
+#include "verbs.h"
+
+#define QPN_MAX (1 << 24)
+#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
+
+/*
+ * QPN-map pages start out as NULL, they get allocated upon
+ * first use and are never deallocated. This way,
+ * large bitmaps are not allocated unless large numbers of QPs are used.
+ */
+struct qpn_map {
+ void *page;
+};
+
+struct hfi1_qpn_table {
+ spinlock_t lock; /* protect changes in this struct */
+ unsigned flags; /* flags for QP0/1 allocated for each port */
+ u32 last; /* last QP number allocated */
+ u32 nmaps; /* size of the map table */
+ u16 limit;
+ u8 incr;
+ /* bit map of free QP numbers other than 0/1 */
+ struct qpn_map map[QPNMAP_ENTRIES];
+};
+
+struct hfi1_qp_ibdev {
+ u32 qp_table_size;
+ u32 qp_table_bits;
+ struct hfi1_qp __rcu **qp_table;
+ spinlock_t qpt_lock;
+ struct hfi1_qpn_table qpn_table;
+};
+
+static inline u32 qpn_hash(struct hfi1_qp_ibdev *dev, u32 qpn)
+{
+ return hash_32(qpn, dev->qp_table_bits);
+}
+
+/**
+ * hfi1_lookup_qpn - return the QP with the given QPN
+ * @ibp: the ibport
+ * @qpn: the QP number to look up
+ *
+ * The caller must hold the rcu_read_lock(), and keep the lock until
+ * the returned qp is no longer in use.
+ */
+static inline struct hfi1_qp *hfi1_lookup_qpn(struct hfi1_ibport *ibp,
+ u32 qpn) __must_hold(RCU)
+{
+ struct hfi1_qp *qp = NULL;
+
+ if (unlikely(qpn <= 1)) {
+ qp = rcu_dereference(ibp->qp[qpn]);
+ } else {
+ struct hfi1_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
+ u32 n = qpn_hash(dev->qp_dev, qpn);
+
+ for (qp = rcu_dereference(dev->qp_dev->qp_table[n]); qp;
+ qp = rcu_dereference(qp->next))
+ if (qp->ibqp.qp_num == qpn)
+ break;
+ }
+ return qp;
+}
+
+/**
+ * hfi1_error_qp - put a QP into the error state
+ * @qp: the QP to put into the error state
+ * @err: the receive completion error to signal if a RWQE is active
+ *
+ * Flushes both send and receive work queues.
+ * Returns true if last WQE event should be generated.
+ * The QP r_lock and s_lock should be held and interrupts disabled.
+ * If we are already in error state, just return.
+ */
+int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err);
+
+/**
+ * hfi1_modify_qp - modify the attributes of a queue pair
+ * @ibqp: the queue pair who's attributes we're modifying
+ * @attr: the new attributes
+ * @attr_mask: the mask of attributes to modify
+ * @udata: user data for libibverbs.so
+ *
+ * Returns 0 on success, otherwise returns an errno.
+ */
+int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+
+int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr);
+
+/**
+ * hfi1_compute_aeth - compute the AETH (syndrome + MSN)
+ * @qp: the queue pair to compute the AETH for
+ *
+ * Returns the AETH.
+ */
+__be32 hfi1_compute_aeth(struct hfi1_qp *qp);
+
+/**
+ * hfi1_create_qp - create a queue pair for a device
+ * @ibpd: the protection domain who's device we create the queue pair for
+ * @init_attr: the attributes of the queue pair
+ * @udata: user data for libibverbs.so
+ *
+ * Returns the queue pair on success, otherwise returns an errno.
+ *
+ * Called by the ib_create_qp() core verbs function.
+ */
+struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+/**
+ * hfi1_destroy_qp - destroy a queue pair
+ * @ibqp: the queue pair to destroy
+ *
+ * Returns 0 on success.
+ *
+ * Note that this can be called while the QP is actively sending or
+ * receiving!
+ */
+int hfi1_destroy_qp(struct ib_qp *ibqp);
+
+/**
+ * hfi1_get_credit - flush the send work queue of a QP
+ * @qp: the qp who's send work queue to flush
+ * @aeth: the Acknowledge Extended Transport Header
+ *
+ * The QP s_lock should be held.
+ */
+void hfi1_get_credit(struct hfi1_qp *qp, u32 aeth);
+
+/**
+ * hfi1_qp_init - allocate QP tables
+ * @dev: a pointer to the hfi1_ibdev
+ */
+int hfi1_qp_init(struct hfi1_ibdev *dev);
+
+/**
+ * hfi1_qp_exit - free the QP related structures
+ * @dev: a pointer to the hfi1_ibdev
+ */
+void hfi1_qp_exit(struct hfi1_ibdev *dev);
+
+/**
+ * hfi1_qp_waitup - wake up on the indicated event
+ * @qp: the QP
+ * @flag: flag the qp on which the qp is stalled
+ */
+void hfi1_qp_wakeup(struct hfi1_qp *qp, u32 flag);
+
+struct sdma_engine *qp_to_sdma_engine(struct hfi1_qp *qp, u8 sc5);
+
+struct qp_iter;
+
+/**
+ * qp_iter_init - wake up on the indicated event
+ * @dev: the hfi1_ibdev
+ */
+struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev);
+
+/**
+ * qp_iter_next - wakeup on the indicated event
+ * @iter: the iterator for the qp hash list
+ */
+int qp_iter_next(struct qp_iter *iter);
+
+/**
+ * qp_iter_next - wake up on the indicated event
+ * @s: the seq_file to emit the qp information on
+ * @iter: the iterator for the qp hash list
+ */
+void qp_iter_print(struct seq_file *s, struct qp_iter *iter);
+
+/**
+ * qp_comm_est - handle trap with QP established
+ * @qp: the QP
+ */
+void qp_comm_est(struct hfi1_qp *qp);
+
+#endif /* _QP_H */
diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/staging/rdma/hfi1/qsfp.c
new file mode 100644
index 000000000000..3138936157db
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/qsfp.c
@@ -0,0 +1,546 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include "hfi.h"
+#include "twsi.h"
+
+/*
+ * QSFP support for hfi driver, using "Two Wire Serial Interface" driver
+ * in twsi.c
+ */
+#define I2C_MAX_RETRY 4
+
+/*
+ * Unlocked i2c write. Must hold dd->qsfp_i2c_mutex.
+ */
+static int __i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
+ int offset, void *bp, int len)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ int ret, cnt;
+ u8 *buff = bp;
+
+ /* Make sure TWSI bus is in sane state. */
+ ret = hfi1_twsi_reset(dd, target);
+ if (ret) {
+ hfi1_dev_porterr(dd, ppd->port,
+ "I2C interface Reset for write failed\n");
+ return -EIO;
+ }
+
+ cnt = 0;
+ while (cnt < len) {
+ int wlen = len - cnt;
+
+ ret = hfi1_twsi_blk_wr(dd, target, i2c_addr, offset,
+ buff + cnt, wlen);
+ if (ret) {
+ /* hfi1_twsi_blk_wr() 1 for error, else 0 */
+ return -EIO;
+ }
+ offset += wlen;
+ cnt += wlen;
+ }
+
+ /* Must wait min 20us between qsfp i2c transactions */
+ udelay(20);
+
+ return cnt;
+}
+
+int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
+ void *bp, int len)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dd->qsfp_i2c_mutex);
+ if (!ret) {
+ ret = __i2c_write(ppd, target, i2c_addr, offset, bp, len);
+ mutex_unlock(&dd->qsfp_i2c_mutex);
+ }
+
+ return ret;
+}
+
+/*
+ * Unlocked i2c read. Must hold dd->qsfp_i2c_mutex.
+ */
+static int __i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
+ int offset, void *bp, int len)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ int ret, cnt, pass = 0;
+ int stuck = 0;
+ u8 *buff = bp;
+
+ /* Make sure TWSI bus is in sane state. */
+ ret = hfi1_twsi_reset(dd, target);
+ if (ret) {
+ hfi1_dev_porterr(dd, ppd->port,
+ "I2C interface Reset for read failed\n");
+ ret = -EIO;
+ stuck = 1;
+ goto exit;
+ }
+
+ cnt = 0;
+ while (cnt < len) {
+ int rlen = len - cnt;
+
+ ret = hfi1_twsi_blk_rd(dd, target, i2c_addr, offset,
+ buff + cnt, rlen);
+ /* Some QSFP's fail first try. Retry as experiment */
+ if (ret && cnt == 0 && ++pass < I2C_MAX_RETRY)
+ continue;
+ if (ret) {
+ /* hfi1_twsi_blk_rd() 1 for error, else 0 */
+ ret = -EIO;
+ goto exit;
+ }
+ offset += rlen;
+ cnt += rlen;
+ }
+
+ ret = cnt;
+
+exit:
+ if (stuck)
+ dd_dev_err(dd, "I2C interface bus stuck non-idle\n");
+
+ if (pass >= I2C_MAX_RETRY && ret)
+ hfi1_dev_porterr(dd, ppd->port,
+ "I2C failed even retrying\n");
+ else if (pass)
+ hfi1_dev_porterr(dd, ppd->port, "I2C retries: %d\n", pass);
+
+ /* Must wait min 20us between qsfp i2c transactions */
+ udelay(20);
+
+ return ret;
+}
+
+int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
+ void *bp, int len)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dd->qsfp_i2c_mutex);
+ if (!ret) {
+ ret = __i2c_read(ppd, target, i2c_addr, offset, bp, len);
+ mutex_unlock(&dd->qsfp_i2c_mutex);
+ }
+
+ return ret;
+}
+
+int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
+ int len)
+{
+ int count = 0;
+ int offset;
+ int nwrite;
+ int ret;
+ u8 page;
+
+ ret = mutex_lock_interruptible(&ppd->dd->qsfp_i2c_mutex);
+ if (ret)
+ return ret;
+
+ while (count < len) {
+ /*
+ * Set the qsfp page based on a zero-based addresss
+ * and a page size of QSFP_PAGESIZE bytes.
+ */
+ page = (u8)(addr / QSFP_PAGESIZE);
+
+ ret = __i2c_write(ppd, target, QSFP_DEV,
+ QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
+ if (ret != 1) {
+ hfi1_dev_porterr(
+ ppd->dd,
+ ppd->port,
+ "can't write QSFP_PAGE_SELECT_BYTE: %d\n", ret);
+ ret = -EIO;
+ break;
+ }
+
+ /* truncate write to end of page if crossing page boundary */
+ offset = addr % QSFP_PAGESIZE;
+ nwrite = len - count;
+ if ((offset + nwrite) > QSFP_PAGESIZE)
+ nwrite = QSFP_PAGESIZE - offset;
+
+ ret = __i2c_write(ppd, target, QSFP_DEV, offset, bp + count,
+ nwrite);
+ if (ret <= 0) /* stop on error or nothing read */
+ break;
+
+ count += ret;
+ addr += ret;
+ }
+
+ mutex_unlock(&ppd->dd->qsfp_i2c_mutex);
+
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
+ int len)
+{
+ int count = 0;
+ int offset;
+ int nread;
+ int ret;
+ u8 page;
+
+ ret = mutex_lock_interruptible(&ppd->dd->qsfp_i2c_mutex);
+ if (ret)
+ return ret;
+
+ while (count < len) {
+ /*
+ * Set the qsfp page based on a zero-based address
+ * and a page size of QSFP_PAGESIZE bytes.
+ */
+ page = (u8)(addr / QSFP_PAGESIZE);
+ ret = __i2c_write(ppd, target, QSFP_DEV,
+ QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
+ if (ret != 1) {
+ hfi1_dev_porterr(
+ ppd->dd,
+ ppd->port,
+ "can't write QSFP_PAGE_SELECT_BYTE: %d\n", ret);
+ ret = -EIO;
+ break;
+ }
+
+ /* truncate read to end of page if crossing page boundary */
+ offset = addr % QSFP_PAGESIZE;
+ nread = len - count;
+ if ((offset + nread) > QSFP_PAGESIZE)
+ nread = QSFP_PAGESIZE - offset;
+
+ ret = __i2c_read(ppd, target, QSFP_DEV, offset, bp + count,
+ nread);
+ if (ret <= 0) /* stop on error or nothing read */
+ break;
+
+ count += ret;
+ addr += ret;
+ }
+
+ mutex_unlock(&ppd->dd->qsfp_i2c_mutex);
+
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+/*
+ * This function caches the QSFP memory range in 128 byte chunks.
+ * As an example, the next byte after address 255 is byte 128 from
+ * upper page 01H (if existing) rather than byte 0 from lower page 00H.
+ */
+int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp)
+{
+ u32 target = ppd->dd->hfi1_id;
+ int ret;
+ unsigned long flags;
+ u8 *cache = &cp->cache[0];
+
+ /* ensure sane contents on invalid reads, for cable swaps */
+ memset(cache, 0, (QSFP_MAX_NUM_PAGES*128));
+ dd_dev_info(ppd->dd, "%s: called\n", __func__);
+ if (!qsfp_mod_present(ppd)) {
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ ret = qsfp_read(ppd, target, 0, cache, 256);
+ if (ret != 256) {
+ dd_dev_info(ppd->dd,
+ "%s: Read of pages 00H failed, expected 256, got %d\n",
+ __func__, ret);
+ goto bail;
+ }
+
+ if (cache[0] != 0x0C && cache[0] != 0x0D)
+ goto bail;
+
+ /* Is paging enabled? */
+ if (!(cache[2] & 4)) {
+
+ /* Paging enabled, page 03 required */
+ if ((cache[195] & 0xC0) == 0xC0) {
+ /* all */
+ ret = qsfp_read(ppd, target, 384, cache + 256, 128);
+ if (ret <= 0 || ret != 128) {
+ dd_dev_info(ppd->dd, "%s: failed\n", __func__);
+ goto bail;
+ }
+ ret = qsfp_read(ppd, target, 640, cache + 384, 128);
+ if (ret <= 0 || ret != 128) {
+ dd_dev_info(ppd->dd, "%s: failed\n", __func__);
+ goto bail;
+ }
+ ret = qsfp_read(ppd, target, 896, cache + 512, 128);
+ if (ret <= 0 || ret != 128) {
+ dd_dev_info(ppd->dd, "%s: failed\n", __func__);
+ goto bail;
+ }
+ } else if ((cache[195] & 0x80) == 0x80) {
+ /* only page 2 and 3 */
+ ret = qsfp_read(ppd, target, 640, cache + 384, 128);
+ if (ret <= 0 || ret != 128) {
+ dd_dev_info(ppd->dd, "%s: failed\n", __func__);
+ goto bail;
+ }
+ ret = qsfp_read(ppd, target, 896, cache + 512, 128);
+ if (ret <= 0 || ret != 128) {
+ dd_dev_info(ppd->dd, "%s: failed\n", __func__);
+ goto bail;
+ }
+ } else if ((cache[195] & 0x40) == 0x40) {
+ /* only page 1 and 3 */
+ ret = qsfp_read(ppd, target, 384, cache + 256, 128);
+ if (ret <= 0 || ret != 128) {
+ dd_dev_info(ppd->dd, "%s: failed\n", __func__);
+ goto bail;
+ }
+ ret = qsfp_read(ppd, target, 896, cache + 512, 128);
+ if (ret <= 0 || ret != 128) {
+ dd_dev_info(ppd->dd, "%s: failed\n", __func__);
+ goto bail;
+ }
+ } else {
+ /* only page 3 */
+ ret = qsfp_read(ppd, target, 896, cache + 512, 128);
+ if (ret <= 0 || ret != 128) {
+ dd_dev_info(ppd->dd, "%s: failed\n", __func__);
+ goto bail;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
+ ppd->qsfp_info.cache_valid = 1;
+ ppd->qsfp_info.cache_refresh_required = 0;
+ spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
+
+ return 0;
+
+bail:
+ memset(cache, 0, (QSFP_MAX_NUM_PAGES*128));
+ return ret;
+}
+
+const char * const hfi1_qsfp_devtech[16] = {
+ "850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP",
+ "1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML",
+ "Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq",
+ "Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq"
+};
+
+#define QSFP_DUMP_CHUNK 16 /* Holds longest string */
+#define QSFP_DEFAULT_HDR_CNT 224
+
+static const char *pwr_codes = "1.5W2.0W2.5W3.5W";
+
+int qsfp_mod_present(struct hfi1_pportdata *ppd)
+{
+ if (HFI1_CAP_IS_KSET(QSFP_ENABLED)) {
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 reg;
+
+ reg = read_csr(dd,
+ dd->hfi1_id ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
+ return !(reg & QSFP_HFI0_MODPRST_N);
+ }
+ /* always return cable present */
+ return 1;
+}
+
+/*
+ * This function maps QSFP memory addresses in 128 byte chunks in the following
+ * fashion per the CableInfo SMA query definition in the IBA 1.3 spec/OPA Gen 1
+ * spec
+ * For addr 000-127, lower page 00h
+ * For addr 128-255, upper page 00h
+ * For addr 256-383, upper page 01h
+ * For addr 384-511, upper page 02h
+ * For addr 512-639, upper page 03h
+ *
+ * For addresses beyond this range, it returns the invalid range of data buffer
+ * set to 0.
+ * For upper pages that are optional, if they are not valid, returns the
+ * particular range of bytes in the data buffer set to 0.
+ */
+int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
+ u8 *data)
+{
+ struct hfi1_pportdata *ppd;
+ u32 excess_len = 0;
+ int ret = 0;
+
+ if (port_num > dd->num_pports || port_num < 1) {
+ dd_dev_info(dd, "%s: Invalid port number %d\n",
+ __func__, port_num);
+ ret = -EINVAL;
+ goto set_zeroes;
+ }
+
+ ppd = dd->pport + (port_num - 1);
+ if (!qsfp_mod_present(ppd)) {
+ ret = -ENODEV;
+ goto set_zeroes;
+ }
+
+ if (!ppd->qsfp_info.cache_valid) {
+ ret = -EINVAL;
+ goto set_zeroes;
+ }
+
+ if (addr >= (QSFP_MAX_NUM_PAGES * 128)) {
+ ret = -ERANGE;
+ goto set_zeroes;
+ }
+
+ if ((addr + len) > (QSFP_MAX_NUM_PAGES * 128)) {
+ excess_len = (addr + len) - (QSFP_MAX_NUM_PAGES * 128);
+ memcpy(data, &ppd->qsfp_info.cache[addr], (len - excess_len));
+ data += (len - excess_len);
+ goto set_zeroes;
+ }
+
+ memcpy(data, &ppd->qsfp_info.cache[addr], len);
+ return 0;
+
+set_zeroes:
+ memset(data, 0, excess_len);
+ return ret;
+}
+
+int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len)
+{
+ u8 *cache = &ppd->qsfp_info.cache[0];
+ u8 bin_buff[QSFP_DUMP_CHUNK];
+ char lenstr[6];
+ int sofar, ret;
+ int bidx = 0;
+ u8 *atten = &cache[QSFP_ATTEN_OFFS];
+ u8 *vendor_oui = &cache[QSFP_VOUI_OFFS];
+
+ sofar = 0;
+ lenstr[0] = ' ';
+ lenstr[1] = '\0';
+
+ if (ppd->qsfp_info.cache_valid) {
+
+ if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
+ sprintf(lenstr, "%dM ", cache[QSFP_MOD_LEN_OFFS]);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n",
+ pwr_codes +
+ (QSFP_PWR(cache[QSFP_MOD_PWR_OFFS]) * 4));
+
+ sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n",
+ lenstr,
+ hfi1_qsfp_devtech[(cache[QSFP_MOD_TECH_OFFS]) >> 4]);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n",
+ QSFP_VEND_LEN, &cache[QSFP_VEND_OFFS]);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n",
+ QSFP_OUI(vendor_oui));
+
+ sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n",
+ QSFP_PN_LEN, &cache[QSFP_PN_OFFS]);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n",
+ QSFP_REV_LEN, &cache[QSFP_REV_OFFS]);
+
+ if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
+ sofar += scnprintf(buf + sofar, len - sofar,
+ "Atten:%d, %d\n",
+ QSFP_ATTEN_SDR(atten),
+ QSFP_ATTEN_DDR(atten));
+
+ sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n",
+ QSFP_SN_LEN, &cache[QSFP_SN_OFFS]);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
+ QSFP_DATE_LEN, &cache[QSFP_DATE_OFFS]);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
+ QSFP_LOT_LEN, &cache[QSFP_LOT_OFFS]);
+
+ while (bidx < QSFP_DEFAULT_HDR_CNT) {
+ int iidx;
+
+ memcpy(bin_buff, &cache[bidx], QSFP_DUMP_CHUNK);
+ for (iidx = 0; iidx < QSFP_DUMP_CHUNK; ++iidx) {
+ sofar += scnprintf(buf + sofar, len-sofar,
+ " %02X", bin_buff[iidx]);
+ }
+ sofar += scnprintf(buf + sofar, len - sofar, "\n");
+ bidx += QSFP_DUMP_CHUNK;
+ }
+ }
+ ret = sofar;
+ return ret;
+}
diff --git a/drivers/staging/rdma/hfi1/qsfp.h b/drivers/staging/rdma/hfi1/qsfp.h
new file mode 100644
index 000000000000..d30c2a6baa0b
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/qsfp.h
@@ -0,0 +1,222 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/* QSFP support common definitions, for hfi driver */
+
+#define QSFP_DEV 0xA0
+#define QSFP_PWR_LAG_MSEC 2000
+#define QSFP_MODPRS_LAG_MSEC 20
+/* 128 byte pages, per SFF 8636 rev 2.4 */
+#define QSFP_MAX_NUM_PAGES 5
+
+/*
+ * Below are masks for QSFP pins. Pins are the same for HFI0 and HFI1.
+ * _N means asserted low
+ */
+#define QSFP_HFI0_I2CCLK (1 << 0)
+#define QSFP_HFI0_I2CDAT (1 << 1)
+#define QSFP_HFI0_RESET_N (1 << 2)
+#define QSFP_HFI0_INT_N (1 << 3)
+#define QSFP_HFI0_MODPRST_N (1 << 4)
+
+/* QSFP is paged at 256 bytes */
+#define QSFP_PAGESIZE 256
+
+/* Defined fields that Intel requires of qualified cables */
+/* Byte 0 is Identifier, not checked */
+/* Byte 1 is reserved "status MSB" */
+/* Byte 2 is "status LSB" We only care that D2 "Flat Mem" is set. */
+/*
+ * Rest of first 128 not used, although 127 is reserved for page select
+ * if module is not "Flat memory".
+ */
+#define QSFP_PAGE_SELECT_BYTE_OFFS 127
+/* Byte 128 is Identifier: must be 0x0c for QSFP, or 0x0d for QSFP+ */
+#define QSFP_MOD_ID_OFFS 128
+/*
+ * Byte 129 is "Extended Identifier". We only care about D7,D6: Power class
+ * 0:1.5W, 1:2.0W, 2:2.5W, 3:3.5W
+ */
+#define QSFP_MOD_PWR_OFFS 129
+/* Byte 130 is Connector type. Not Intel req'd */
+/* Bytes 131..138 are Transceiver types, bit maps for various tech, none IB */
+/* Byte 139 is encoding. code 0x01 is 8b10b. Not Intel req'd */
+/* byte 140 is nominal bit-rate, in units of 100Mbits/sec Not Intel req'd */
+/* Byte 141 is Extended Rate Select. Not Intel req'd */
+/* Bytes 142..145 are lengths for various fiber types. Not Intel req'd */
+/* Byte 146 is length for Copper. Units of 1 meter */
+#define QSFP_MOD_LEN_OFFS 146
+/*
+ * Byte 147 is Device technology. D0..3 not Intel req'd
+ * D4..7 select from 15 choices, translated by table:
+ */
+#define QSFP_MOD_TECH_OFFS 147
+extern const char *const hfi1_qsfp_devtech[16];
+/* Active Equalization includes fiber, copper full EQ, and copper near Eq */
+#define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1)
+/* Active Equalization includes fiber, copper full EQ, and copper far Eq */
+#define QSFP_IS_ACTIVE_FAR(tech) ((0x32FF >> ((tech) >> 4)) & 1)
+/* Attenuation should be valid for copper other than full/near Eq */
+#define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1)
+/* Length is only valid if technology is "copper" */
+#define QSFP_IS_CU(tech) ((0xED00 >> ((tech) >> 4)) & 1)
+#define QSFP_TECH_1490 9
+
+#define QSFP_OUI(oui) (((unsigned)oui[0] << 16) | ((unsigned)oui[1] << 8) | \
+ oui[2])
+#define QSFP_OUI_AMPHENOL 0x415048
+#define QSFP_OUI_FINISAR 0x009065
+#define QSFP_OUI_GORE 0x002177
+
+/* Bytes 148..163 are Vendor Name, Left-justified Blank-filled */
+#define QSFP_VEND_OFFS 148
+#define QSFP_VEND_LEN 16
+/* Byte 164 is IB Extended transceiver codes Bits D0..3 are SDR,DDR,QDR,EDR */
+#define QSFP_IBXCV_OFFS 164
+/* Bytes 165..167 are Vendor OUI number */
+#define QSFP_VOUI_OFFS 165
+#define QSFP_VOUI_LEN 3
+/* Bytes 168..183 are Vendor Part Number, string */
+#define QSFP_PN_OFFS 168
+#define QSFP_PN_LEN 16
+/* Bytes 184,185 are Vendor Rev. Left Justified, Blank-filled */
+#define QSFP_REV_OFFS 184
+#define QSFP_REV_LEN 2
+/*
+ * Bytes 186,187 are Wavelength, if Optical. Not Intel req'd
+ * If copper, they are attenuation in dB:
+ * Byte 186 is at 2.5Gb/sec (SDR), Byte 187 at 5.0Gb/sec (DDR)
+ */
+#define QSFP_ATTEN_OFFS 186
+#define QSFP_ATTEN_LEN 2
+/* Bytes 188,189 are Wavelength tolerance, not Intel req'd */
+/* Byte 190 is Max Case Temp. Not Intel req'd */
+/* Byte 191 is LSB of sum of bytes 128..190. Not Intel req'd */
+#define QSFP_CC_OFFS 191
+/* Bytes 192..195 are Options implemented in qsfp. Not Intel req'd */
+/* Bytes 196..211 are Serial Number, String */
+#define QSFP_SN_OFFS 196
+#define QSFP_SN_LEN 16
+/* Bytes 212..219 are date-code YYMMDD (MM==1 for Jan) */
+#define QSFP_DATE_OFFS 212
+#define QSFP_DATE_LEN 6
+/* Bytes 218,219 are optional lot-code, string */
+#define QSFP_LOT_OFFS 218
+#define QSFP_LOT_LEN 2
+/* Bytes 220, 221 indicate monitoring options, Not Intel req'd */
+/* Byte 223 is LSB of sum of bytes 192..222 */
+#define QSFP_CC_EXT_OFFS 223
+
+/*
+ * Interrupt flag masks
+ */
+#define QSFP_DATA_NOT_READY 0x01
+
+#define QSFP_HIGH_TEMP_ALARM 0x80
+#define QSFP_LOW_TEMP_ALARM 0x40
+#define QSFP_HIGH_TEMP_WARNING 0x20
+#define QSFP_LOW_TEMP_WARNING 0x10
+
+#define QSFP_HIGH_VCC_ALARM 0x80
+#define QSFP_LOW_VCC_ALARM 0x40
+#define QSFP_HIGH_VCC_WARNING 0x20
+#define QSFP_LOW_VCC_WARNING 0x10
+
+#define QSFP_HIGH_POWER_ALARM 0x88
+#define QSFP_LOW_POWER_ALARM 0x44
+#define QSFP_HIGH_POWER_WARNING 0x22
+#define QSFP_LOW_POWER_WARNING 0x11
+
+#define QSFP_HIGH_BIAS_ALARM 0x88
+#define QSFP_LOW_BIAS_ALARM 0x44
+#define QSFP_HIGH_BIAS_WARNING 0x22
+#define QSFP_LOW_BIAS_WARNING 0x11
+
+/*
+ * struct qsfp_data encapsulates state of QSFP device for one port.
+ * it will be part of port-specific data if a board supports QSFP.
+ *
+ * Since multiple board-types use QSFP, and their pport_data structs
+ * differ (in the chip-specific section), we need a pointer to its head.
+ *
+ * Avoiding premature optimization, we will have one work_struct per port,
+ * and let the qsfp_lock arbitrate access to common resources.
+ *
+ */
+
+#define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3)
+#define QSFP_ATTEN_SDR(attenarray) (attenarray[0])
+#define QSFP_ATTEN_DDR(attenarray) (attenarray[1])
+
+struct qsfp_data {
+ /* Helps to find our way */
+ struct hfi1_pportdata *ppd;
+ struct work_struct qsfp_work;
+ u8 cache[QSFP_MAX_NUM_PAGES*128];
+ spinlock_t qsfp_lock;
+ u8 check_interrupt_flags;
+ u8 qsfp_interrupt_functional;
+ u8 cache_valid;
+ u8 cache_refresh_required;
+};
+
+int refresh_qsfp_cache(struct hfi1_pportdata *ppd,
+ struct qsfp_data *cp);
+int qsfp_mod_present(struct hfi1_pportdata *ppd);
+int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr,
+ u32 len, u8 *data);
+
+int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
+ int offset, void *bp, int len);
+int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
+ int offset, void *bp, int len);
+int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
+ int len);
+int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
+ int len);
diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c
new file mode 100644
index 000000000000..632dd5ba7dfd
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/rc.c
@@ -0,0 +1,2426 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/io.h>
+
+#include "hfi.h"
+#include "qp.h"
+#include "sdma.h"
+#include "trace.h"
+
+/* cut down ridiculously long IB macro names */
+#define OP(x) IB_OPCODE_RC_##x
+
+static void rc_timeout(unsigned long arg);
+
+static u32 restart_sge(struct hfi1_sge_state *ss, struct hfi1_swqe *wqe,
+ u32 psn, u32 pmtu)
+{
+ u32 len;
+
+ len = delta_psn(psn, wqe->psn) * pmtu;
+ ss->sge = wqe->sg_list[0];
+ ss->sg_list = wqe->sg_list + 1;
+ ss->num_sge = wqe->wr.num_sge;
+ ss->total_len = wqe->length;
+ hfi1_skip_sge(ss, len, 0);
+ return wqe->length - len;
+}
+
+static void start_timer(struct hfi1_qp *qp)
+{
+ qp->s_flags |= HFI1_S_TIMER;
+ qp->s_timer.function = rc_timeout;
+ /* 4.096 usec. * (1 << qp->timeout) */
+ qp->s_timer.expires = jiffies + qp->timeout_jiffies;
+ add_timer(&qp->s_timer);
+}
+
+/**
+ * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
+ * @dev: the device for this QP
+ * @qp: a pointer to the QP
+ * @ohdr: a pointer to the IB header being constructed
+ * @pmtu: the path MTU
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ * Note that we are in the responder's side of the QP context.
+ * Note the QP s_lock must be held.
+ */
+static int make_rc_ack(struct hfi1_ibdev *dev, struct hfi1_qp *qp,
+ struct hfi1_other_headers *ohdr, u32 pmtu)
+{
+ struct hfi1_ack_entry *e;
+ u32 hwords;
+ u32 len;
+ u32 bth0;
+ u32 bth2;
+ int middle = 0;
+
+ /* Don't send an ACK if we aren't supposed to. */
+ if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK))
+ goto bail;
+
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+
+ switch (qp->s_ack_state) {
+ case OP(RDMA_READ_RESPONSE_LAST):
+ case OP(RDMA_READ_RESPONSE_ONLY):
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ if (e->rdma_sge.mr) {
+ hfi1_put_mr(e->rdma_sge.mr);
+ e->rdma_sge.mr = NULL;
+ }
+ /* FALLTHROUGH */
+ case OP(ATOMIC_ACKNOWLEDGE):
+ /*
+ * We can increment the tail pointer now that the last
+ * response has been sent instead of only being
+ * constructed.
+ */
+ if (++qp->s_tail_ack_queue > HFI1_MAX_RDMA_ATOMIC)
+ qp->s_tail_ack_queue = 0;
+ /* FALLTHROUGH */
+ case OP(SEND_ONLY):
+ case OP(ACKNOWLEDGE):
+ /* Check for no next entry in the queue. */
+ if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
+ if (qp->s_flags & HFI1_S_ACK_PENDING)
+ goto normal;
+ goto bail;
+ }
+
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ if (e->opcode == OP(RDMA_READ_REQUEST)) {
+ /*
+ * If a RDMA read response is being resent and
+ * we haven't seen the duplicate request yet,
+ * then stop sending the remaining responses the
+ * responder has seen until the requester re-sends it.
+ */
+ len = e->rdma_sge.sge_length;
+ if (len && !e->rdma_sge.mr) {
+ qp->s_tail_ack_queue = qp->r_head_ack_queue;
+ goto bail;
+ }
+ /* Copy SGE state in case we need to resend */
+ qp->s_rdma_mr = e->rdma_sge.mr;
+ if (qp->s_rdma_mr)
+ hfi1_get_mr(qp->s_rdma_mr);
+ qp->s_ack_rdma_sge.sge = e->rdma_sge;
+ qp->s_ack_rdma_sge.num_sge = 1;
+ qp->s_cur_sge = &qp->s_ack_rdma_sge;
+ if (len > pmtu) {
+ len = pmtu;
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
+ } else {
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
+ e->sent = 1;
+ }
+ ohdr->u.aeth = hfi1_compute_aeth(qp);
+ hwords++;
+ qp->s_ack_rdma_psn = e->psn;
+ bth2 = mask_psn(qp->s_ack_rdma_psn++);
+ } else {
+ /* COMPARE_SWAP or FETCH_ADD */
+ qp->s_cur_sge = NULL;
+ len = 0;
+ qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
+ ohdr->u.at.aeth = hfi1_compute_aeth(qp);
+ ohdr->u.at.atomic_ack_eth[0] =
+ cpu_to_be32(e->atomic_data >> 32);
+ ohdr->u.at.atomic_ack_eth[1] =
+ cpu_to_be32(e->atomic_data);
+ hwords += sizeof(ohdr->u.at) / sizeof(u32);
+ bth2 = mask_psn(e->psn);
+ e->sent = 1;
+ }
+ bth0 = qp->s_ack_state << 24;
+ break;
+
+ case OP(RDMA_READ_RESPONSE_FIRST):
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(RDMA_READ_RESPONSE_MIDDLE):
+ qp->s_cur_sge = &qp->s_ack_rdma_sge;
+ qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
+ if (qp->s_rdma_mr)
+ hfi1_get_mr(qp->s_rdma_mr);
+ len = qp->s_ack_rdma_sge.sge.sge_length;
+ if (len > pmtu) {
+ len = pmtu;
+ middle = HFI1_CAP_IS_KSET(SDMA_AHG);
+ } else {
+ ohdr->u.aeth = hfi1_compute_aeth(qp);
+ hwords++;
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ e->sent = 1;
+ }
+ bth0 = qp->s_ack_state << 24;
+ bth2 = mask_psn(qp->s_ack_rdma_psn++);
+ break;
+
+ default:
+normal:
+ /*
+ * Send a regular ACK.
+ * Set the s_ack_state so we wait until after sending
+ * the ACK before setting s_ack_state to ACKNOWLEDGE
+ * (see above).
+ */
+ qp->s_ack_state = OP(SEND_ONLY);
+ qp->s_flags &= ~HFI1_S_ACK_PENDING;
+ qp->s_cur_sge = NULL;
+ if (qp->s_nak_state)
+ ohdr->u.aeth =
+ cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) |
+ (qp->s_nak_state <<
+ HFI1_AETH_CREDIT_SHIFT));
+ else
+ ohdr->u.aeth = hfi1_compute_aeth(qp);
+ hwords++;
+ len = 0;
+ bth0 = OP(ACKNOWLEDGE) << 24;
+ bth2 = mask_psn(qp->s_ack_psn);
+ }
+ qp->s_rdma_ack_cnt++;
+ qp->s_hdrwords = hwords;
+ qp->s_cur_size = len;
+ hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle);
+ return 1;
+
+bail:
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+ /*
+ * Ensure s_rdma_ack_cnt changes are committed prior to resetting
+ * HFI1_S_RESP_PENDING
+ */
+ smp_wmb();
+ qp->s_flags &= ~(HFI1_S_RESP_PENDING
+ | HFI1_S_ACK_PENDING
+ | HFI1_S_AHG_VALID);
+ return 0;
+}
+
+/**
+ * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
+ * @qp: a pointer to the QP
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ */
+int hfi1_make_rc_req(struct hfi1_qp *qp)
+{
+ struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
+ struct hfi1_other_headers *ohdr;
+ struct hfi1_sge_state *ss;
+ struct hfi1_swqe *wqe;
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ u32 hwords = 5;
+ u32 len;
+ u32 bth0 = 0;
+ u32 bth2;
+ u32 pmtu = qp->pmtu;
+ char newreq;
+ unsigned long flags;
+ int ret = 0;
+ int middle = 0;
+ int delta;
+
+ ohdr = &qp->s_hdr->ibh.u.oth;
+ if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ ohdr = &qp->s_hdr->ibh.u.l.oth;
+
+ /*
+ * The lock is needed to synchronize between the sending tasklet,
+ * the receive interrupt handler, and timeout re-sends.
+ */
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Sending responses has higher priority over sending requests. */
+ if ((qp->s_flags & HFI1_S_RESP_PENDING) &&
+ make_rc_ack(dev, qp, ohdr, pmtu))
+ goto done;
+
+ if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_SEND_OK)) {
+ if (!(ib_hfi1_state_ops[qp->state] & HFI1_FLUSH_SEND))
+ goto bail;
+ /* We are in the error state, flush the work request. */
+ if (qp->s_last == qp->s_head)
+ goto bail;
+ /* If DMAs are in progress, we can't flush immediately. */
+ if (atomic_read(&qp->s_iowait.sdma_busy)) {
+ qp->s_flags |= HFI1_S_WAIT_DMA;
+ goto bail;
+ }
+ clear_ahg(qp);
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
+ IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
+ /* will get called again */
+ goto done;
+ }
+
+ if (qp->s_flags & (HFI1_S_WAIT_RNR | HFI1_S_WAIT_ACK))
+ goto bail;
+
+ if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
+ if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
+ qp->s_flags |= HFI1_S_WAIT_PSN;
+ goto bail;
+ }
+ qp->s_sending_psn = qp->s_psn;
+ qp->s_sending_hpsn = qp->s_psn - 1;
+ }
+
+ /* Send a request. */
+ wqe = get_swqe_ptr(qp, qp->s_cur);
+ switch (qp->s_state) {
+ default:
+ if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_NEXT_SEND_OK))
+ goto bail;
+ /*
+ * Resend an old request or start a new one.
+ *
+ * We keep track of the current SWQE so that
+ * we don't reset the "furthest progress" state
+ * if we need to back up.
+ */
+ newreq = 0;
+ if (qp->s_cur == qp->s_tail) {
+ /* Check if send work queue is empty. */
+ if (qp->s_tail == qp->s_head) {
+ clear_ahg(qp);
+ goto bail;
+ }
+ /*
+ * If a fence is requested, wait for previous
+ * RDMA read and atomic operations to finish.
+ */
+ if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
+ qp->s_num_rd_atomic) {
+ qp->s_flags |= HFI1_S_WAIT_FENCE;
+ goto bail;
+ }
+ wqe->psn = qp->s_next_psn;
+ newreq = 1;
+ }
+ /*
+ * Note that we have to be careful not to modify the
+ * original work request since we may need to resend
+ * it.
+ */
+ len = wqe->length;
+ ss = &qp->s_sge;
+ bth2 = mask_psn(qp->s_psn);
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ /* If no credit, return. */
+ if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT) &&
+ cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
+ qp->s_flags |= HFI1_S_WAIT_SSN_CREDIT;
+ goto bail;
+ }
+ wqe->lpsn = wqe->psn;
+ if (len > pmtu) {
+ wqe->lpsn += (len - 1) / pmtu;
+ qp->s_state = OP(SEND_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND)
+ qp->s_state = OP(SEND_ONLY);
+ else {
+ qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ bth2 |= IB_BTH_REQ_ACK;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_RDMA_WRITE:
+ if (newreq && !(qp->s_flags & HFI1_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+ /* FALLTHROUGH */
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ /* If no credit, return. */
+ if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT) &&
+ cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
+ qp->s_flags |= HFI1_S_WAIT_SSN_CREDIT;
+ goto bail;
+ }
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(len);
+ hwords += sizeof(struct ib_reth) / sizeof(u32);
+ wqe->lpsn = wqe->psn;
+ if (len > pmtu) {
+ wqe->lpsn += (len - 1) / pmtu;
+ qp->s_state = OP(RDMA_WRITE_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ qp->s_state = OP(RDMA_WRITE_ONLY);
+ else {
+ qp->s_state =
+ OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after RETH */
+ ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ }
+ bth2 |= IB_BTH_REQ_ACK;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_RDMA_READ:
+ /*
+ * Don't allow more operations to be started
+ * than the QP limits allow.
+ */
+ if (newreq) {
+ if (qp->s_num_rd_atomic >=
+ qp->s_max_rd_atomic) {
+ qp->s_flags |= HFI1_S_WAIT_RDMAR;
+ goto bail;
+ }
+ qp->s_num_rd_atomic++;
+ if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+ /*
+ * Adjust s_next_psn to count the
+ * expected number of responses.
+ */
+ if (len > pmtu)
+ qp->s_next_psn += (len - 1) / pmtu;
+ wqe->lpsn = qp->s_next_psn++;
+ }
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(len);
+ qp->s_state = OP(RDMA_READ_REQUEST);
+ hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
+ ss = NULL;
+ len = 0;
+ bth2 |= IB_BTH_REQ_ACK;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ /*
+ * Don't allow more operations to be started
+ * than the QP limits allow.
+ */
+ if (newreq) {
+ if (qp->s_num_rd_atomic >=
+ qp->s_max_rd_atomic) {
+ qp->s_flags |= HFI1_S_WAIT_RDMAR;
+ goto bail;
+ }
+ qp->s_num_rd_atomic++;
+ if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+ wqe->lpsn = wqe->psn;
+ }
+ if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+ qp->s_state = OP(COMPARE_SWAP);
+ ohdr->u.atomic_eth.swap_data = cpu_to_be64(
+ wqe->wr.wr.atomic.swap);
+ ohdr->u.atomic_eth.compare_data = cpu_to_be64(
+ wqe->wr.wr.atomic.compare_add);
+ } else {
+ qp->s_state = OP(FETCH_ADD);
+ ohdr->u.atomic_eth.swap_data = cpu_to_be64(
+ wqe->wr.wr.atomic.compare_add);
+ ohdr->u.atomic_eth.compare_data = 0;
+ }
+ ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
+ wqe->wr.wr.atomic.remote_addr >> 32);
+ ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
+ wqe->wr.wr.atomic.remote_addr);
+ ohdr->u.atomic_eth.rkey = cpu_to_be32(
+ wqe->wr.wr.atomic.rkey);
+ hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
+ ss = NULL;
+ len = 0;
+ bth2 |= IB_BTH_REQ_ACK;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ default:
+ goto bail;
+ }
+ qp->s_sge.sge = wqe->sg_list[0];
+ qp->s_sge.sg_list = wqe->sg_list + 1;
+ qp->s_sge.num_sge = wqe->wr.num_sge;
+ qp->s_sge.total_len = wqe->length;
+ qp->s_len = wqe->length;
+ if (newreq) {
+ qp->s_tail++;
+ if (qp->s_tail >= qp->s_size)
+ qp->s_tail = 0;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_READ)
+ qp->s_psn = wqe->lpsn + 1;
+ else {
+ qp->s_psn++;
+ if (cmp_psn(qp->s_psn, qp->s_next_psn) > 0)
+ qp->s_next_psn = qp->s_psn;
+ }
+ break;
+
+ case OP(RDMA_READ_RESPONSE_FIRST):
+ /*
+ * qp->s_state is normally set to the opcode of the
+ * last packet constructed for new requests and therefore
+ * is never set to RDMA read response.
+ * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
+ * thread to indicate a SEND needs to be restarted from an
+ * earlier PSN without interfering with the sending thread.
+ * See restart_rc().
+ */
+ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
+ /* FALLTHROUGH */
+ case OP(SEND_FIRST):
+ qp->s_state = OP(SEND_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(SEND_MIDDLE):
+ bth2 = mask_psn(qp->s_psn++);
+ if (cmp_psn(qp->s_psn, qp->s_next_psn) > 0)
+ qp->s_next_psn = qp->s_psn;
+ ss = &qp->s_sge;
+ len = qp->s_len;
+ if (len > pmtu) {
+ len = pmtu;
+ middle = HFI1_CAP_IS_KSET(SDMA_AHG);
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND)
+ qp->s_state = OP(SEND_LAST);
+ else {
+ qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ bth2 |= IB_BTH_REQ_ACK;
+ qp->s_cur++;
+ if (qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case OP(RDMA_READ_RESPONSE_LAST):
+ /*
+ * qp->s_state is normally set to the opcode of the
+ * last packet constructed for new requests and therefore
+ * is never set to RDMA read response.
+ * RDMA_READ_RESPONSE_LAST is used by the ACK processing
+ * thread to indicate a RDMA write needs to be restarted from
+ * an earlier PSN without interfering with the sending thread.
+ * See restart_rc().
+ */
+ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
+ /* FALLTHROUGH */
+ case OP(RDMA_WRITE_FIRST):
+ qp->s_state = OP(RDMA_WRITE_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(RDMA_WRITE_MIDDLE):
+ bth2 = mask_psn(qp->s_psn++);
+ if (cmp_psn(qp->s_psn, qp->s_next_psn) > 0)
+ qp->s_next_psn = qp->s_psn;
+ ss = &qp->s_sge;
+ len = qp->s_len;
+ if (len > pmtu) {
+ len = pmtu;
+ middle = HFI1_CAP_IS_KSET(SDMA_AHG);
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ qp->s_state = OP(RDMA_WRITE_LAST);
+ else {
+ qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ }
+ bth2 |= IB_BTH_REQ_ACK;
+ qp->s_cur++;
+ if (qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case OP(RDMA_READ_RESPONSE_MIDDLE):
+ /*
+ * qp->s_state is normally set to the opcode of the
+ * last packet constructed for new requests and therefore
+ * is never set to RDMA read response.
+ * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
+ * thread to indicate a RDMA read needs to be restarted from
+ * an earlier PSN without interfering with the sending thread.
+ * See restart_rc().
+ */
+ len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
+ qp->s_state = OP(RDMA_READ_REQUEST);
+ hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
+ bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK;
+ qp->s_psn = wqe->lpsn + 1;
+ ss = NULL;
+ len = 0;
+ qp->s_cur++;
+ if (qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+ }
+ qp->s_sending_hpsn = bth2;
+ delta = delta_psn(bth2, wqe->psn);
+ if (delta && delta % HFI1_PSN_CREDIT == 0)
+ bth2 |= IB_BTH_REQ_ACK;
+ if (qp->s_flags & HFI1_S_SEND_ONE) {
+ qp->s_flags &= ~HFI1_S_SEND_ONE;
+ qp->s_flags |= HFI1_S_WAIT_ACK;
+ bth2 |= IB_BTH_REQ_ACK;
+ }
+ qp->s_len -= len;
+ qp->s_hdrwords = hwords;
+ qp->s_cur_sge = ss;
+ qp->s_cur_size = len;
+ hfi1_make_ruc_header(
+ qp,
+ ohdr,
+ bth0 | (qp->s_state << 24),
+ bth2,
+ middle);
+done:
+ ret = 1;
+ goto unlock;
+
+bail:
+ qp->s_flags &= ~HFI1_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
+}
+
+/**
+ * hfi1_send_rc_ack - Construct an ACK packet and send it
+ * @qp: a pointer to the QP
+ *
+ * This is called from hfi1_rc_rcv() and handle_receive_interrupt().
+ * Note that RDMA reads and atomics are handled in the
+ * send side QP state and tasklet.
+ */
+void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct hfi1_qp *qp,
+ int is_fecn)
+{
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u64 pbc, pbc_flags = 0;
+ u16 lrh0;
+ u16 sc5;
+ u32 bth0;
+ u32 hwords;
+ u32 vl, plen;
+ struct send_context *sc;
+ struct pio_buf *pbuf;
+ struct hfi1_ib_header hdr;
+ struct hfi1_other_headers *ohdr;
+
+ /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
+ if (qp->s_flags & HFI1_S_RESP_PENDING)
+ goto queue_ack;
+
+ /* Ensure s_rdma_ack_cnt changes are committed */
+ smp_read_barrier_depends();
+ if (qp->s_rdma_ack_cnt)
+ goto queue_ack;
+
+ /* Construct the header */
+ /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
+ hwords = 6;
+ if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
+ hwords += hfi1_make_grh(ibp, &hdr.u.l.grh,
+ &qp->remote_ah_attr.grh, hwords, 0);
+ ohdr = &hdr.u.l.oth;
+ lrh0 = HFI1_LRH_GRH;
+ } else {
+ ohdr = &hdr.u.oth;
+ lrh0 = HFI1_LRH_BTH;
+ }
+ /* read pkey_index w/o lock (its atomic) */
+ bth0 = hfi1_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
+ if (qp->s_mig_state == IB_MIG_MIGRATED)
+ bth0 |= IB_BTH_MIG_REQ;
+ if (qp->r_nak_state)
+ ohdr->u.aeth = cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) |
+ (qp->r_nak_state <<
+ HFI1_AETH_CREDIT_SHIFT));
+ else
+ ohdr->u.aeth = hfi1_compute_aeth(qp);
+ sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
+ /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
+ pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
+ lrh0 |= (sc5 & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
+ hdr.lrh[0] = cpu_to_be16(lrh0);
+ hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
+ hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
+ ohdr->bth[1] |= cpu_to_be32((!!is_fecn) << HFI1_BECN_SHIFT);
+ ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));
+
+ /* Don't try to send ACKs if the link isn't ACTIVE */
+ if (driver_lstate(ppd) != IB_PORT_ACTIVE)
+ return;
+
+ sc = rcd->sc;
+ plen = 2 /* PBC */ + hwords;
+ vl = sc_to_vlt(ppd->dd, sc5);
+ pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
+
+ pbuf = sc_buffer_alloc(sc, plen, NULL, NULL);
+ if (!pbuf) {
+ /*
+ * We have no room to send at the moment. Pass
+ * responsibility for sending the ACK to the send tasklet
+ * so that when enough buffer space becomes available,
+ * the ACK is sent ahead of other outgoing packets.
+ */
+ goto queue_ack;
+ }
+
+ trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &hdr);
+
+ /* write the pbc and data */
+ ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, &hdr, hwords);
+
+ return;
+
+queue_ack:
+ this_cpu_inc(*ibp->rc_qacks);
+ spin_lock(&qp->s_lock);
+ qp->s_flags |= HFI1_S_ACK_PENDING | HFI1_S_RESP_PENDING;
+ qp->s_nak_state = qp->r_nak_state;
+ qp->s_ack_psn = qp->r_ack_psn;
+ if (is_fecn)
+ qp->s_flags |= HFI1_S_ECN;
+
+ /* Schedule the send tasklet. */
+ hfi1_schedule_send(qp);
+ spin_unlock(&qp->s_lock);
+}
+
+/**
+ * reset_psn - reset the QP state to send starting from PSN
+ * @qp: the QP
+ * @psn: the packet sequence number to restart at
+ *
+ * This is called from hfi1_rc_rcv() to process an incoming RC ACK
+ * for the given QP.
+ * Called at interrupt level with the QP s_lock held.
+ */
+static void reset_psn(struct hfi1_qp *qp, u32 psn)
+{
+ u32 n = qp->s_acked;
+ struct hfi1_swqe *wqe = get_swqe_ptr(qp, n);
+ u32 opcode;
+
+ qp->s_cur = n;
+
+ /*
+ * If we are starting the request from the beginning,
+ * let the normal send code handle initialization.
+ */
+ if (cmp_psn(psn, wqe->psn) <= 0) {
+ qp->s_state = OP(SEND_LAST);
+ goto done;
+ }
+
+ /* Find the work request opcode corresponding to the given PSN. */
+ opcode = wqe->wr.opcode;
+ for (;;) {
+ int diff;
+
+ if (++n == qp->s_size)
+ n = 0;
+ if (n == qp->s_tail)
+ break;
+ wqe = get_swqe_ptr(qp, n);
+ diff = cmp_psn(psn, wqe->psn);
+ if (diff < 0)
+ break;
+ qp->s_cur = n;
+ /*
+ * If we are starting the request from the beginning,
+ * let the normal send code handle initialization.
+ */
+ if (diff == 0) {
+ qp->s_state = OP(SEND_LAST);
+ goto done;
+ }
+ opcode = wqe->wr.opcode;
+ }
+
+ /*
+ * Set the state to restart in the middle of a request.
+ * Don't change the s_sge, s_cur_sge, or s_cur_size.
+ * See hfi1_make_rc_req().
+ */
+ switch (opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
+ break;
+
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
+ break;
+
+ case IB_WR_RDMA_READ:
+ qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
+ break;
+
+ default:
+ /*
+ * This case shouldn't happen since its only
+ * one PSN per req.
+ */
+ qp->s_state = OP(SEND_LAST);
+ }
+done:
+ qp->s_psn = psn;
+ /*
+ * Set HFI1_S_WAIT_PSN as rc_complete() may start the timer
+ * asynchronously before the send tasklet can get scheduled.
+ * Doing it in hfi1_make_rc_req() is too late.
+ */
+ if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
+ (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
+ qp->s_flags |= HFI1_S_WAIT_PSN;
+ qp->s_flags &= ~HFI1_S_AHG_VALID;
+}
+
+/*
+ * Back up requester to resend the last un-ACKed request.
+ * The QP r_lock and s_lock should be held and interrupts disabled.
+ */
+static void restart_rc(struct hfi1_qp *qp, u32 psn, int wait)
+{
+ struct hfi1_swqe *wqe = get_swqe_ptr(qp, qp->s_acked);
+ struct hfi1_ibport *ibp;
+
+ if (qp->s_retry == 0) {
+ if (qp->s_mig_state == IB_MIG_ARMED) {
+ hfi1_migrate_qp(qp);
+ qp->s_retry = qp->s_retry_cnt;
+ } else if (qp->s_last == qp->s_acked) {
+ hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
+ hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ return;
+ } else /* need to handle delayed completion */
+ return;
+ } else
+ qp->s_retry--;
+
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ if (wqe->wr.opcode == IB_WR_RDMA_READ)
+ ibp->n_rc_resends++;
+ else
+ ibp->n_rc_resends += delta_psn(qp->s_psn, psn);
+
+ qp->s_flags &= ~(HFI1_S_WAIT_FENCE | HFI1_S_WAIT_RDMAR |
+ HFI1_S_WAIT_SSN_CREDIT | HFI1_S_WAIT_PSN |
+ HFI1_S_WAIT_ACK);
+ if (wait)
+ qp->s_flags |= HFI1_S_SEND_ONE;
+ reset_psn(qp, psn);
+}
+
+/*
+ * This is called from s_timer for missing responses.
+ */
+static void rc_timeout(unsigned long arg)
+{
+ struct hfi1_qp *qp = (struct hfi1_qp *)arg;
+ struct hfi1_ibport *ibp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->r_lock, flags);
+ spin_lock(&qp->s_lock);
+ if (qp->s_flags & HFI1_S_TIMER) {
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ibp->n_rc_timeouts++;
+ qp->s_flags &= ~HFI1_S_TIMER;
+ del_timer(&qp->s_timer);
+ restart_rc(qp, qp->s_last_psn + 1, 1);
+ hfi1_schedule_send(qp);
+ }
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irqrestore(&qp->r_lock, flags);
+}
+
+/*
+ * This is called from s_timer for RNR timeouts.
+ */
+void hfi1_rc_rnr_retry(unsigned long arg)
+{
+ struct hfi1_qp *qp = (struct hfi1_qp *)arg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_flags & HFI1_S_WAIT_RNR) {
+ qp->s_flags &= ~HFI1_S_WAIT_RNR;
+ del_timer(&qp->s_timer);
+ hfi1_schedule_send(qp);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
+
+/*
+ * Set qp->s_sending_psn to the next PSN after the given one.
+ * This would be psn+1 except when RDMA reads are present.
+ */
+static void reset_sending_psn(struct hfi1_qp *qp, u32 psn)
+{
+ struct hfi1_swqe *wqe;
+ u32 n = qp->s_last;
+
+ /* Find the work request corresponding to the given PSN. */
+ for (;;) {
+ wqe = get_swqe_ptr(qp, n);
+ if (cmp_psn(psn, wqe->lpsn) <= 0) {
+ if (wqe->wr.opcode == IB_WR_RDMA_READ)
+ qp->s_sending_psn = wqe->lpsn + 1;
+ else
+ qp->s_sending_psn = psn + 1;
+ break;
+ }
+ if (++n == qp->s_size)
+ n = 0;
+ if (n == qp->s_tail)
+ break;
+ }
+}
+
+/*
+ * This should be called with the QP s_lock held and interrupts disabled.
+ */
+void hfi1_rc_send_complete(struct hfi1_qp *qp, struct hfi1_ib_header *hdr)
+{
+ struct hfi1_other_headers *ohdr;
+ struct hfi1_swqe *wqe;
+ struct ib_wc wc;
+ unsigned i;
+ u32 opcode;
+ u32 psn;
+
+ if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_OR_FLUSH_SEND))
+ return;
+
+ /* Find out where the BTH is */
+ if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else
+ ohdr = &hdr->u.l.oth;
+
+ opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+ if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
+ opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
+ WARN_ON(!qp->s_rdma_ack_cnt);
+ qp->s_rdma_ack_cnt--;
+ return;
+ }
+
+ psn = be32_to_cpu(ohdr->bth[2]);
+ reset_sending_psn(qp, psn);
+
+ /*
+ * Start timer after a packet requesting an ACK has been sent and
+ * there are still requests that haven't been acked.
+ */
+ if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
+ !(qp->s_flags &
+ (HFI1_S_TIMER | HFI1_S_WAIT_RNR | HFI1_S_WAIT_PSN)) &&
+ (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK))
+ start_timer(qp);
+
+ while (qp->s_last != qp->s_acked) {
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
+ cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
+ break;
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct hfi1_sge *sge = &wqe->sg_list[i];
+
+ hfi1_put_mr(sge->mr);
+ }
+ /* Post a send completion queue entry if requested. */
+ if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
+ memset(&wc, 0, sizeof(wc));
+ wc.wr_id = wqe->wr.wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
+ wc.byte_len = wqe->length;
+ wc.qp = &qp->ibqp;
+ hfi1_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+ }
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ }
+ /*
+ * If we were waiting for sends to complete before re-sending,
+ * and they are now complete, restart sending.
+ */
+ trace_hfi1_rc_sendcomplete(qp, psn);
+ if (qp->s_flags & HFI1_S_WAIT_PSN &&
+ cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+ qp->s_flags &= ~HFI1_S_WAIT_PSN;
+ qp->s_sending_psn = qp->s_psn;
+ qp->s_sending_hpsn = qp->s_psn - 1;
+ hfi1_schedule_send(qp);
+ }
+}
+
+static inline void update_last_psn(struct hfi1_qp *qp, u32 psn)
+{
+ qp->s_last_psn = psn;
+}
+
+/*
+ * Generate a SWQE completion.
+ * This is similar to hfi1_send_complete but has to check to be sure
+ * that the SGEs are not being referenced if the SWQE is being resent.
+ */
+static struct hfi1_swqe *do_rc_completion(struct hfi1_qp *qp,
+ struct hfi1_swqe *wqe,
+ struct hfi1_ibport *ibp)
+{
+ struct ib_wc wc;
+ unsigned i;
+
+ /*
+ * Don't decrement refcount and don't generate a
+ * completion if the SWQE is being resent until the send
+ * is finished.
+ */
+ if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
+ cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct hfi1_sge *sge = &wqe->sg_list[i];
+
+ hfi1_put_mr(sge->mr);
+ }
+ /* Post a send completion queue entry if requested. */
+ if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
+ memset(&wc, 0, sizeof(wc));
+ wc.wr_id = wqe->wr.wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
+ wc.byte_len = wqe->length;
+ wc.qp = &qp->ibqp;
+ hfi1_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+ }
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ } else {
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ this_cpu_inc(*ibp->rc_delayed_comp);
+ /*
+ * If send progress not running attempt to progress
+ * SDMA queue.
+ */
+ if (ppd->dd->flags & HFI1_HAS_SEND_DMA) {
+ struct sdma_engine *engine;
+ u8 sc5;
+
+ /* For now use sc to find engine */
+ sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
+ engine = qp_to_sdma_engine(qp, sc5);
+ sdma_engine_progress_schedule(engine);
+ }
+ }
+
+ qp->s_retry = qp->s_retry_cnt;
+ update_last_psn(qp, wqe->lpsn);
+
+ /*
+ * If we are completing a request which is in the process of
+ * being resent, we can stop re-sending it since we know the
+ * responder has already seen it.
+ */
+ if (qp->s_acked == qp->s_cur) {
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ qp->s_acked = qp->s_cur;
+ wqe = get_swqe_ptr(qp, qp->s_cur);
+ if (qp->s_acked != qp->s_tail) {
+ qp->s_state = OP(SEND_LAST);
+ qp->s_psn = wqe->psn;
+ }
+ } else {
+ if (++qp->s_acked >= qp->s_size)
+ qp->s_acked = 0;
+ if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
+ qp->s_draining = 0;
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+ }
+ return wqe;
+}
+
+/**
+ * do_rc_ack - process an incoming RC ACK
+ * @qp: the QP the ACK came in on
+ * @psn: the packet sequence number of the ACK
+ * @opcode: the opcode of the request that resulted in the ACK
+ *
+ * This is called from rc_rcv_resp() to process an incoming RC ACK
+ * for the given QP.
+ * Called at interrupt level with the QP s_lock held.
+ * Returns 1 if OK, 0 if current operation should be aborted (NAK).
+ */
+static int do_rc_ack(struct hfi1_qp *qp, u32 aeth, u32 psn, int opcode,
+ u64 val, struct hfi1_ctxtdata *rcd)
+{
+ struct hfi1_ibport *ibp;
+ enum ib_wc_status status;
+ struct hfi1_swqe *wqe;
+ int ret = 0;
+ u32 ack_psn;
+ int diff;
+
+ /* Remove QP from retry timer */
+ if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) {
+ qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR);
+ del_timer(&qp->s_timer);
+ }
+
+ /*
+ * Note that NAKs implicitly ACK outstanding SEND and RDMA write
+ * requests and implicitly NAK RDMA read and atomic requests issued
+ * before the NAK'ed request. The MSN won't include the NAK'ed
+ * request but will include an ACK'ed request(s).
+ */
+ ack_psn = psn;
+ if (aeth >> 29)
+ ack_psn--;
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+
+ /*
+ * The MSN might be for a later WQE than the PSN indicates so
+ * only complete WQEs that the PSN finishes.
+ */
+ while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) {
+ /*
+ * RDMA_READ_RESPONSE_ONLY is a special case since
+ * we want to generate completion events for everything
+ * before the RDMA read, copy the data, then generate
+ * the completion for the read.
+ */
+ if (wqe->wr.opcode == IB_WR_RDMA_READ &&
+ opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
+ diff == 0) {
+ ret = 1;
+ goto bail;
+ }
+ /*
+ * If this request is a RDMA read or atomic, and the ACK is
+ * for a later operation, this ACK NAKs the RDMA read or
+ * atomic. In other words, only a RDMA_READ_LAST or ONLY
+ * can ACK a RDMA read and likewise for atomic ops. Note
+ * that the NAK case can only happen if relaxed ordering is
+ * used and requests are sent after an RDMA read or atomic
+ * is sent but before the response is received.
+ */
+ if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
+ (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
+ ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
+ (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
+ /* Retry this request. */
+ if (!(qp->r_flags & HFI1_R_RDMAR_SEQ)) {
+ qp->r_flags |= HFI1_R_RDMAR_SEQ;
+ restart_rc(qp, qp->s_last_psn + 1, 0);
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= HFI1_R_RSP_SEND;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait,
+ &rcd->qp_wait_list);
+ }
+ }
+ /*
+ * No need to process the ACK/NAK since we are
+ * restarting an earlier request.
+ */
+ goto bail;
+ }
+ if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ u64 *vaddr = wqe->sg_list[0].vaddr;
+ *vaddr = val;
+ }
+ if (qp->s_num_rd_atomic &&
+ (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
+ qp->s_num_rd_atomic--;
+ /* Restart sending task if fence is complete */
+ if ((qp->s_flags & HFI1_S_WAIT_FENCE) &&
+ !qp->s_num_rd_atomic) {
+ qp->s_flags &= ~(HFI1_S_WAIT_FENCE |
+ HFI1_S_WAIT_ACK);
+ hfi1_schedule_send(qp);
+ } else if (qp->s_flags & HFI1_S_WAIT_RDMAR) {
+ qp->s_flags &= ~(HFI1_S_WAIT_RDMAR |
+ HFI1_S_WAIT_ACK);
+ hfi1_schedule_send(qp);
+ }
+ }
+ wqe = do_rc_completion(qp, wqe, ibp);
+ if (qp->s_acked == qp->s_tail)
+ break;
+ }
+
+ switch (aeth >> 29) {
+ case 0: /* ACK */
+ this_cpu_inc(*ibp->rc_acks);
+ if (qp->s_acked != qp->s_tail) {
+ /*
+ * We are expecting more ACKs so
+ * reset the re-transmit timer.
+ */
+ start_timer(qp);
+ /*
+ * We can stop re-sending the earlier packets and
+ * continue with the next packet the receiver wants.
+ */
+ if (cmp_psn(qp->s_psn, psn) <= 0)
+ reset_psn(qp, psn + 1);
+ } else if (cmp_psn(qp->s_psn, psn) <= 0) {
+ qp->s_state = OP(SEND_LAST);
+ qp->s_psn = psn + 1;
+ }
+ if (qp->s_flags & HFI1_S_WAIT_ACK) {
+ qp->s_flags &= ~HFI1_S_WAIT_ACK;
+ hfi1_schedule_send(qp);
+ }
+ hfi1_get_credit(qp, aeth);
+ qp->s_rnr_retry = qp->s_rnr_retry_cnt;
+ qp->s_retry = qp->s_retry_cnt;
+ update_last_psn(qp, psn);
+ ret = 1;
+ goto bail;
+
+ case 1: /* RNR NAK */
+ ibp->n_rnr_naks++;
+ if (qp->s_acked == qp->s_tail)
+ goto bail;
+ if (qp->s_flags & HFI1_S_WAIT_RNR)
+ goto bail;
+ if (qp->s_rnr_retry == 0) {
+ status = IB_WC_RNR_RETRY_EXC_ERR;
+ goto class_b;
+ }
+ if (qp->s_rnr_retry_cnt < 7)
+ qp->s_rnr_retry--;
+
+ /* The last valid PSN is the previous PSN. */
+ update_last_psn(qp, psn - 1);
+
+ ibp->n_rc_resends += delta_psn(qp->s_psn, psn);
+
+ reset_psn(qp, psn);
+
+ qp->s_flags &= ~(HFI1_S_WAIT_SSN_CREDIT | HFI1_S_WAIT_ACK);
+ qp->s_flags |= HFI1_S_WAIT_RNR;
+ qp->s_timer.function = hfi1_rc_rnr_retry;
+ qp->s_timer.expires = jiffies + usecs_to_jiffies(
+ ib_hfi1_rnr_table[(aeth >> HFI1_AETH_CREDIT_SHIFT) &
+ HFI1_AETH_CREDIT_MASK]);
+ add_timer(&qp->s_timer);
+ goto bail;
+
+ case 3: /* NAK */
+ if (qp->s_acked == qp->s_tail)
+ goto bail;
+ /* The last valid PSN is the previous PSN. */
+ update_last_psn(qp, psn - 1);
+ switch ((aeth >> HFI1_AETH_CREDIT_SHIFT) &
+ HFI1_AETH_CREDIT_MASK) {
+ case 0: /* PSN sequence error */
+ ibp->n_seq_naks++;
+ /*
+ * Back up to the responder's expected PSN.
+ * Note that we might get a NAK in the middle of an
+ * RDMA READ response which terminates the RDMA
+ * READ.
+ */
+ restart_rc(qp, psn, 0);
+ hfi1_schedule_send(qp);
+ break;
+
+ case 1: /* Invalid Request */
+ status = IB_WC_REM_INV_REQ_ERR;
+ ibp->n_other_naks++;
+ goto class_b;
+
+ case 2: /* Remote Access Error */
+ status = IB_WC_REM_ACCESS_ERR;
+ ibp->n_other_naks++;
+ goto class_b;
+
+ case 3: /* Remote Operation Error */
+ status = IB_WC_REM_OP_ERR;
+ ibp->n_other_naks++;
+class_b:
+ if (qp->s_last == qp->s_acked) {
+ hfi1_send_complete(qp, wqe, status);
+ hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ }
+ break;
+
+ default:
+ /* Ignore other reserved NAK error codes */
+ goto reserved;
+ }
+ qp->s_retry = qp->s_retry_cnt;
+ qp->s_rnr_retry = qp->s_rnr_retry_cnt;
+ goto bail;
+
+ default: /* 2: reserved */
+reserved:
+ /* Ignore reserved NAK codes. */
+ goto bail;
+ }
+
+bail:
+ return ret;
+}
+
+/*
+ * We have seen an out of sequence RDMA read middle or last packet.
+ * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
+ */
+static void rdma_seq_err(struct hfi1_qp *qp, struct hfi1_ibport *ibp, u32 psn,
+ struct hfi1_ctxtdata *rcd)
+{
+ struct hfi1_swqe *wqe;
+
+ /* Remove QP from retry timer */
+ if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) {
+ qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR);
+ del_timer(&qp->s_timer);
+ }
+
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+
+ while (cmp_psn(psn, wqe->lpsn) > 0) {
+ if (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
+ break;
+ wqe = do_rc_completion(qp, wqe, ibp);
+ }
+
+ ibp->n_rdma_seq++;
+ qp->r_flags |= HFI1_R_RDMAR_SEQ;
+ restart_rc(qp, qp->s_last_psn + 1, 0);
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= HFI1_R_RSP_SEND;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+}
+
+/**
+ * rc_rcv_resp - process an incoming RC response packet
+ * @ibp: the port this packet came in on
+ * @ohdr: the other headers for this packet
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP for this packet
+ * @opcode: the opcode for this packet
+ * @psn: the packet sequence number for this packet
+ * @hdrsize: the header length
+ * @pmtu: the path MTU
+ *
+ * This is called from hfi1_rc_rcv() to process an incoming RC response
+ * packet for the given QP.
+ * Called at interrupt level.
+ */
+static void rc_rcv_resp(struct hfi1_ibport *ibp,
+ struct hfi1_other_headers *ohdr,
+ void *data, u32 tlen, struct hfi1_qp *qp,
+ u32 opcode, u32 psn, u32 hdrsize, u32 pmtu,
+ struct hfi1_ctxtdata *rcd)
+{
+ struct hfi1_swqe *wqe;
+ enum ib_wc_status status;
+ unsigned long flags;
+ int diff;
+ u32 pad;
+ u32 aeth;
+ u64 val;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Ignore invalid responses. */
+ if (cmp_psn(psn, qp->s_next_psn) >= 0)
+ goto ack_done;
+
+ /* Ignore duplicate responses. */
+ diff = cmp_psn(psn, qp->s_last_psn);
+ if (unlikely(diff <= 0)) {
+ /* Update credits for "ghost" ACKs */
+ if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ if ((aeth >> 29) == 0)
+ hfi1_get_credit(qp, aeth);
+ }
+ goto ack_done;
+ }
+
+ /*
+ * Skip everything other than the PSN we expect, if we are waiting
+ * for a reply to a restarted RDMA read or atomic op.
+ */
+ if (qp->r_flags & HFI1_R_RDMAR_SEQ) {
+ if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
+ goto ack_done;
+ qp->r_flags &= ~HFI1_R_RDMAR_SEQ;
+ }
+
+ if (unlikely(qp->s_acked == qp->s_tail))
+ goto ack_done;
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+ status = IB_WC_SUCCESS;
+
+ switch (opcode) {
+ case OP(ACKNOWLEDGE):
+ case OP(ATOMIC_ACKNOWLEDGE):
+ case OP(RDMA_READ_RESPONSE_FIRST):
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
+ __be32 *p = ohdr->u.at.atomic_ack_eth;
+
+ val = ((u64) be32_to_cpu(p[0]) << 32) |
+ be32_to_cpu(p[1]);
+ } else
+ val = 0;
+ if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
+ opcode != OP(RDMA_READ_RESPONSE_FIRST))
+ goto ack_done;
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+ if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
+ goto ack_op_err;
+ /*
+ * If this is a response to a resent RDMA read, we
+ * have to be careful to copy the data to the right
+ * location.
+ */
+ qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
+ wqe, psn, pmtu);
+ goto read_middle;
+
+ case OP(RDMA_READ_RESPONSE_MIDDLE):
+ /* no AETH, no ACK */
+ if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
+ goto ack_seq_err;
+ if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
+ goto ack_op_err;
+read_middle:
+ if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ goto ack_len_err;
+ if (unlikely(pmtu >= qp->s_rdma_read_len))
+ goto ack_len_err;
+
+ /*
+ * We got a response so update the timeout.
+ * 4.096 usec. * (1 << qp->timeout)
+ */
+ qp->s_flags |= HFI1_S_TIMER;
+ mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
+ if (qp->s_flags & HFI1_S_WAIT_ACK) {
+ qp->s_flags &= ~HFI1_S_WAIT_ACK;
+ hfi1_schedule_send(qp);
+ }
+
+ if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
+ qp->s_retry = qp->s_retry_cnt;
+
+ /*
+ * Update the RDMA receive state but do the copy w/o
+ * holding the locks and blocking interrupts.
+ */
+ qp->s_rdma_read_len -= pmtu;
+ update_last_psn(qp, psn);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
+ goto bail;
+
+ case OP(RDMA_READ_RESPONSE_ONLY):
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
+ goto ack_done;
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /*
+ * Check that the data size is >= 0 && <= pmtu.
+ * Remember to account for ICRC (4).
+ */
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto ack_len_err;
+ /*
+ * If this is a response to a resent RDMA read, we
+ * have to be careful to copy the data to the right
+ * location.
+ */
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+ qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
+ wqe, psn, pmtu);
+ goto read_last;
+
+ case OP(RDMA_READ_RESPONSE_LAST):
+ /* ACKs READ req. */
+ if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
+ goto ack_seq_err;
+ if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
+ goto ack_op_err;
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /*
+ * Check that the data size is >= 1 && <= pmtu.
+ * Remember to account for ICRC (4).
+ */
+ if (unlikely(tlen <= (hdrsize + pad + 4)))
+ goto ack_len_err;
+read_last:
+ tlen -= hdrsize + pad + 4;
+ if (unlikely(tlen != qp->s_rdma_read_len))
+ goto ack_len_err;
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
+ WARN_ON(qp->s_rdma_read_sge.num_sge);
+ (void) do_rc_ack(qp, aeth, psn,
+ OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
+ goto ack_done;
+ }
+
+ack_op_err:
+ status = IB_WC_LOC_QP_OP_ERR;
+ goto ack_err;
+
+ack_seq_err:
+ rdma_seq_err(qp, ibp, psn, rcd);
+ goto ack_done;
+
+ack_len_err:
+ status = IB_WC_LOC_LEN_ERR;
+ack_err:
+ if (qp->s_last == qp->s_acked) {
+ hfi1_send_complete(qp, wqe, status);
+ hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ }
+ack_done:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+bail:
+ return;
+}
+
+/**
+ * rc_rcv_error - process an incoming duplicate or error RC packet
+ * @ohdr: the other headers for this packet
+ * @data: the packet data
+ * @qp: the QP for this packet
+ * @opcode: the opcode for this packet
+ * @psn: the packet sequence number for this packet
+ * @diff: the difference between the PSN and the expected PSN
+ *
+ * This is called from hfi1_rc_rcv() to process an unexpected
+ * incoming RC packet for the given QP.
+ * Called at interrupt level.
+ * Return 1 if no more processing is needed; otherwise return 0 to
+ * schedule a response to be sent.
+ */
+static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
+ struct hfi1_qp *qp, u32 opcode, u32 psn, int diff,
+ struct hfi1_ctxtdata *rcd)
+{
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_ack_entry *e;
+ unsigned long flags;
+ u8 i, prev;
+ int old_req;
+
+ if (diff > 0) {
+ /*
+ * Packet sequence error.
+ * A NAK will ACK earlier sends and RDMA writes.
+ * Don't queue the NAK if we already sent one.
+ */
+ if (!qp->r_nak_state) {
+ ibp->n_rc_seqnak++;
+ qp->r_nak_state = IB_NAK_PSN_ERROR;
+ /* Use the expected PSN. */
+ qp->r_ack_psn = qp->r_psn;
+ /*
+ * Wait to send the sequence NAK until all packets
+ * in the receive queue have been processed.
+ * Otherwise, we end up propagating congestion.
+ */
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= HFI1_R_RSP_NAK;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+ }
+ goto done;
+ }
+
+ /*
+ * Handle a duplicate request. Don't re-execute SEND, RDMA
+ * write or atomic op. Don't NAK errors, just silently drop
+ * the duplicate request. Note that r_sge, r_len, and
+ * r_rcv_len may be in use so don't modify them.
+ *
+ * We are supposed to ACK the earliest duplicate PSN but we
+ * can coalesce an outstanding duplicate ACK. We have to
+ * send the earliest so that RDMA reads can be restarted at
+ * the requester's expected PSN.
+ *
+ * First, find where this duplicate PSN falls within the
+ * ACKs previously sent.
+ * old_req is true if there is an older response that is scheduled
+ * to be sent before sending this one.
+ */
+ e = NULL;
+ old_req = 1;
+ ibp->n_rc_dupreq++;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ for (i = qp->r_head_ack_queue; ; i = prev) {
+ if (i == qp->s_tail_ack_queue)
+ old_req = 0;
+ if (i)
+ prev = i - 1;
+ else
+ prev = HFI1_MAX_RDMA_ATOMIC;
+ if (prev == qp->r_head_ack_queue) {
+ e = NULL;
+ break;
+ }
+ e = &qp->s_ack_queue[prev];
+ if (!e->opcode) {
+ e = NULL;
+ break;
+ }
+ if (cmp_psn(psn, e->psn) >= 0) {
+ if (prev == qp->s_tail_ack_queue &&
+ cmp_psn(psn, e->lpsn) <= 0)
+ old_req = 0;
+ break;
+ }
+ }
+ switch (opcode) {
+ case OP(RDMA_READ_REQUEST): {
+ struct ib_reth *reth;
+ u32 offset;
+ u32 len;
+
+ /*
+ * If we didn't find the RDMA read request in the ack queue,
+ * we can ignore this request.
+ */
+ if (!e || e->opcode != OP(RDMA_READ_REQUEST))
+ goto unlock_done;
+ /* RETH comes after BTH */
+ reth = &ohdr->u.rc.reth;
+ /*
+ * Address range must be a subset of the original
+ * request and start on pmtu boundaries.
+ * We reuse the old ack_queue slot since the requester
+ * should not back up and request an earlier PSN for the
+ * same request.
+ */
+ offset = delta_psn(psn, e->psn) * qp->pmtu;
+ len = be32_to_cpu(reth->length);
+ if (unlikely(offset + len != e->rdma_sge.sge_length))
+ goto unlock_done;
+ if (e->rdma_sge.mr) {
+ hfi1_put_mr(e->rdma_sge.mr);
+ e->rdma_sge.mr = NULL;
+ }
+ if (len != 0) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = be64_to_cpu(reth->vaddr);
+ int ok;
+
+ ok = hfi1_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
+ IB_ACCESS_REMOTE_READ);
+ if (unlikely(!ok))
+ goto unlock_done;
+ } else {
+ e->rdma_sge.vaddr = NULL;
+ e->rdma_sge.length = 0;
+ e->rdma_sge.sge_length = 0;
+ }
+ e->psn = psn;
+ if (old_req)
+ goto unlock_done;
+ qp->s_tail_ack_queue = prev;
+ break;
+ }
+
+ case OP(COMPARE_SWAP):
+ case OP(FETCH_ADD): {
+ /*
+ * If we didn't find the atomic request in the ack queue
+ * or the send tasklet is already backed up to send an
+ * earlier entry, we can ignore this request.
+ */
+ if (!e || e->opcode != (u8) opcode || old_req)
+ goto unlock_done;
+ qp->s_tail_ack_queue = prev;
+ break;
+ }
+
+ default:
+ /*
+ * Ignore this operation if it doesn't request an ACK
+ * or an earlier RDMA read or atomic is going to be resent.
+ */
+ if (!(psn & IB_BTH_REQ_ACK) || old_req)
+ goto unlock_done;
+ /*
+ * Resend the most recent ACK if this request is
+ * after all the previous RDMA reads and atomics.
+ */
+ if (i == qp->r_head_ack_queue) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ qp->r_nak_state = 0;
+ qp->r_ack_psn = qp->r_psn - 1;
+ goto send_ack;
+ }
+
+ /*
+ * Resend the RDMA read or atomic op which
+ * ACKs this duplicate request.
+ */
+ qp->s_tail_ack_queue = i;
+ break;
+ }
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+ qp->s_flags |= HFI1_S_RESP_PENDING;
+ qp->r_nak_state = 0;
+ hfi1_schedule_send(qp);
+
+unlock_done:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+done:
+ return 1;
+
+send_ack:
+ return 0;
+}
+
+void hfi1_rc_error(struct hfi1_qp *qp, enum ib_wc_status err)
+{
+ unsigned long flags;
+ int lastwqe;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ lastwqe = hfi1_error_qp(qp, err);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ if (lastwqe) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+}
+
+static inline void update_ack_queue(struct hfi1_qp *qp, unsigned n)
+{
+ unsigned next;
+
+ next = n + 1;
+ if (next > HFI1_MAX_RDMA_ATOMIC)
+ next = 0;
+ qp->s_tail_ack_queue = next;
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+}
+
+static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
+ u32 lqpn, u32 rqpn, u8 svc_type)
+{
+ struct opa_hfi1_cong_log_event_internal *cc_event;
+
+ if (sl >= OPA_MAX_SLS)
+ return;
+
+ spin_lock(&ppd->cc_log_lock);
+
+ ppd->threshold_cong_event_map[sl/8] |= 1 << (sl % 8);
+ ppd->threshold_event_counter++;
+
+ cc_event = &ppd->cc_events[ppd->cc_log_idx++];
+ if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS)
+ ppd->cc_log_idx = 0;
+ cc_event->lqpn = lqpn & HFI1_QPN_MASK;
+ cc_event->rqpn = rqpn & HFI1_QPN_MASK;
+ cc_event->sl = sl;
+ cc_event->svc_type = svc_type;
+ cc_event->rlid = rlid;
+ /* keep timestamp in units of 1.024 usec */
+ cc_event->timestamp = ktime_to_ns(ktime_get()) / 1024;
+
+ spin_unlock(&ppd->cc_log_lock);
+}
+
+void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
+ u32 rqpn, u8 svc_type)
+{
+ struct cca_timer *cca_timer;
+ u16 ccti, ccti_incr, ccti_timer, ccti_limit;
+ u8 trigger_threshold;
+ struct cc_state *cc_state;
+
+ if (sl >= OPA_MAX_SLS)
+ return;
+
+ cca_timer = &ppd->cca_timer[sl];
+
+ cc_state = get_cc_state(ppd);
+
+ if (cc_state == NULL)
+ return;
+
+ /*
+ * 1) increase CCTI (for this SL)
+ * 2) select IPG (i.e., call set_link_ipg())
+ * 3) start timer
+ */
+ ccti_limit = cc_state->cct.ccti_limit;
+ ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase;
+ ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
+ trigger_threshold =
+ cc_state->cong_setting.entries[sl].trigger_threshold;
+
+ spin_lock(&ppd->cca_timer_lock);
+
+ if (cca_timer->ccti < ccti_limit) {
+ if (cca_timer->ccti + ccti_incr <= ccti_limit)
+ cca_timer->ccti += ccti_incr;
+ else
+ cca_timer->ccti = ccti_limit;
+ set_link_ipg(ppd);
+ }
+
+ spin_unlock(&ppd->cca_timer_lock);
+
+ ccti = cca_timer->ccti;
+
+ if (!hrtimer_active(&cca_timer->hrtimer)) {
+ /* ccti_timer is in units of 1.024 usec */
+ unsigned long nsec = 1024 * ccti_timer;
+
+ hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec),
+ HRTIMER_MODE_REL);
+ }
+
+ if ((trigger_threshold != 0) && (ccti >= trigger_threshold))
+ log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type);
+}
+
+/**
+ * hfi1_rc_rcv - process an incoming RC packet
+ * @rcd: the context pointer
+ * @hdr: the header of this packet
+ * @rcv_flags: flags relevant to rcv processing
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP for this packet
+ *
+ * This is called from qp_rcv() to process an incoming RC packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+void hfi1_rc_rcv(struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct hfi1_ib_header *hdr = packet->hdr;
+ u32 rcv_flags = packet->rcv_flags;
+ void *data = packet->ebuf;
+ u32 tlen = packet->tlen;
+ struct hfi1_qp *qp = packet->qp;
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct hfi1_other_headers *ohdr = packet->ohdr;
+ u32 bth0, opcode;
+ u32 hdrsize = packet->hlen;
+ u32 psn;
+ u32 pad;
+ struct ib_wc wc;
+ u32 pmtu = qp->pmtu;
+ int diff;
+ struct ib_reth *reth;
+ unsigned long flags;
+ u32 bth1;
+ int ret, is_fecn = 0;
+
+ bth0 = be32_to_cpu(ohdr->bth[0]);
+ if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0))
+ return;
+
+ bth1 = be32_to_cpu(ohdr->bth[1]);
+ if (unlikely(bth1 & (HFI1_BECN_SMASK | HFI1_FECN_SMASK))) {
+ if (bth1 & HFI1_BECN_SMASK) {
+ u16 rlid = qp->remote_ah_attr.dlid;
+ u32 lqpn, rqpn;
+
+ lqpn = qp->ibqp.qp_num;
+ rqpn = qp->remote_qpn;
+ process_becn(
+ ppd,
+ qp->remote_ah_attr.sl,
+ rlid, lqpn, rqpn,
+ IB_CC_SVCTYPE_RC);
+ }
+ is_fecn = bth1 & HFI1_FECN_SMASK;
+ }
+
+ psn = be32_to_cpu(ohdr->bth[2]);
+ opcode = bth0 >> 24;
+
+ /*
+ * Process responses (ACKs) before anything else. Note that the
+ * packet sequence number will be for something in the send work
+ * queue rather than the expected receive packet sequence number.
+ * In other words, this QP is the requester.
+ */
+ if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
+ opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
+ rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
+ hdrsize, pmtu, rcd);
+ if (is_fecn)
+ goto send_ack;
+ return;
+ }
+
+ /* Compute 24 bits worth of difference. */
+ diff = delta_psn(psn, qp->r_psn);
+ if (unlikely(diff)) {
+ if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
+ return;
+ goto send_ack;
+ }
+
+ /* Check for opcode sequence errors. */
+ switch (qp->r_state) {
+ case OP(SEND_FIRST):
+ case OP(SEND_MIDDLE):
+ if (opcode == OP(SEND_MIDDLE) ||
+ opcode == OP(SEND_LAST) ||
+ opcode == OP(SEND_LAST_WITH_IMMEDIATE))
+ break;
+ goto nack_inv;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_MIDDLE):
+ if (opcode == OP(RDMA_WRITE_MIDDLE) ||
+ opcode == OP(RDMA_WRITE_LAST) ||
+ opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
+ break;
+ goto nack_inv;
+
+ default:
+ if (opcode == OP(SEND_MIDDLE) ||
+ opcode == OP(SEND_LAST) ||
+ opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
+ opcode == OP(RDMA_WRITE_MIDDLE) ||
+ opcode == OP(RDMA_WRITE_LAST) ||
+ opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
+ goto nack_inv;
+ /*
+ * Note that it is up to the requester to not send a new
+ * RDMA read or atomic operation before receiving an ACK
+ * for the previous operation.
+ */
+ break;
+ }
+
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & HFI1_R_COMM_EST))
+ qp_comm_est(qp);
+
+ /* OK, process the packet. */
+ switch (opcode) {
+ case OP(SEND_FIRST):
+ ret = hfi1_get_rwqe(qp, 0);
+ if (ret < 0)
+ goto nack_op_err;
+ if (!ret)
+ goto rnr_nak;
+ qp->r_rcv_len = 0;
+ /* FALLTHROUGH */
+ case OP(SEND_MIDDLE):
+ case OP(RDMA_WRITE_MIDDLE):
+send_middle:
+ /* Check for invalid length PMTU or posted rwqe len. */
+ if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ goto nack_inv;
+ qp->r_rcv_len += pmtu;
+ if (unlikely(qp->r_rcv_len > qp->r_len))
+ goto nack_inv;
+ hfi1_copy_sge(&qp->r_sge, data, pmtu, 1);
+ break;
+
+ case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
+ /* consume RWQE */
+ ret = hfi1_get_rwqe(qp, 1);
+ if (ret < 0)
+ goto nack_op_err;
+ if (!ret)
+ goto rnr_nak;
+ goto send_last_imm;
+
+ case OP(SEND_ONLY):
+ case OP(SEND_ONLY_WITH_IMMEDIATE):
+ ret = hfi1_get_rwqe(qp, 0);
+ if (ret < 0)
+ goto nack_op_err;
+ if (!ret)
+ goto rnr_nak;
+ qp->r_rcv_len = 0;
+ if (opcode == OP(SEND_ONLY))
+ goto no_immediate_data;
+ /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
+ case OP(SEND_LAST_WITH_IMMEDIATE):
+send_last_imm:
+ wc.ex.imm_data = ohdr->u.imm_data;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ goto send_last;
+ case OP(SEND_LAST):
+ case OP(RDMA_WRITE_LAST):
+no_immediate_data:
+ wc.wc_flags = 0;
+ wc.ex.imm_data = 0;
+send_last:
+ /* Get the number of bytes the message was padded by. */
+ pad = (bth0 >> 20) & 3;
+ /* Check for invalid length. */
+ /* LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto nack_inv;
+ /* Don't count the CRC. */
+ tlen -= (hdrsize + pad + 4);
+ wc.byte_len = tlen + qp->r_rcv_len;
+ if (unlikely(wc.byte_len > qp->r_len))
+ goto nack_inv;
+ hfi1_copy_sge(&qp->r_sge, data, tlen, 1);
+ hfi1_put_ss(&qp->r_sge);
+ qp->r_msn++;
+ if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags))
+ break;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
+ opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+ wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ else
+ wc.opcode = IB_WC_RECV;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = qp->remote_qpn;
+ wc.slid = qp->remote_ah_attr.dlid;
+ /*
+ * It seems that IB mandates the presence of an SL in a
+ * work completion only for the UD transport (see section
+ * 11.4.2 of IBTA Vol. 1).
+ *
+ * However, the way the SL is chosen below is consistent
+ * with the way that IB/qib works and is trying avoid
+ * introducing incompatibilities.
+ *
+ * See also OPA Vol. 1, section 9.7.6, and table 9-17.
+ */
+ wc.sl = qp->remote_ah_attr.sl;
+ /* zero fields that are N/A */
+ wc.vendor_err = 0;
+ wc.pkey_index = 0;
+ wc.dlid_path_bits = 0;
+ wc.port_num = 0;
+ /* Signal completion event if the solicited bit is set. */
+ hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ (bth0 & IB_BTH_SOLICITED) != 0);
+ break;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_ONLY):
+ case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto nack_inv;
+ /* consume RWQE */
+ reth = &ohdr->u.rc.reth;
+ qp->r_len = be32_to_cpu(reth->length);
+ qp->r_rcv_len = 0;
+ qp->r_sge.sg_list = NULL;
+ if (qp->r_len != 0) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = be64_to_cpu(reth->vaddr);
+ int ok;
+
+ /* Check rkey & NAK */
+ ok = hfi1_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
+ rkey, IB_ACCESS_REMOTE_WRITE);
+ if (unlikely(!ok))
+ goto nack_acc;
+ qp->r_sge.num_sge = 1;
+ } else {
+ qp->r_sge.num_sge = 0;
+ qp->r_sge.sge.mr = NULL;
+ qp->r_sge.sge.vaddr = NULL;
+ qp->r_sge.sge.length = 0;
+ qp->r_sge.sge.sge_length = 0;
+ }
+ if (opcode == OP(RDMA_WRITE_FIRST))
+ goto send_middle;
+ else if (opcode == OP(RDMA_WRITE_ONLY))
+ goto no_immediate_data;
+ ret = hfi1_get_rwqe(qp, 1);
+ if (ret < 0)
+ goto nack_op_err;
+ if (!ret)
+ goto rnr_nak;
+ wc.ex.imm_data = ohdr->u.rc.imm_data;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ goto send_last;
+
+ case OP(RDMA_READ_REQUEST): {
+ struct hfi1_ack_entry *e;
+ u32 len;
+ u8 next;
+
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
+ goto nack_inv;
+ next = qp->r_head_ack_queue + 1;
+ /* s_ack_queue is size HFI1_MAX_RDMA_ATOMIC+1 so use > not >= */
+ if (next > HFI1_MAX_RDMA_ATOMIC)
+ next = 0;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (unlikely(next == qp->s_tail_ack_queue)) {
+ if (!qp->s_ack_queue[next].sent)
+ goto nack_inv_unlck;
+ update_ack_queue(qp, next);
+ }
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+ if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+ hfi1_put_mr(e->rdma_sge.mr);
+ e->rdma_sge.mr = NULL;
+ }
+ reth = &ohdr->u.rc.reth;
+ len = be32_to_cpu(reth->length);
+ if (len) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = be64_to_cpu(reth->vaddr);
+ int ok;
+
+ /* Check rkey & NAK */
+ ok = hfi1_rkey_ok(qp, &e->rdma_sge, len, vaddr,
+ rkey, IB_ACCESS_REMOTE_READ);
+ if (unlikely(!ok))
+ goto nack_acc_unlck;
+ /*
+ * Update the next expected PSN. We add 1 later
+ * below, so only add the remainder here.
+ */
+ if (len > pmtu)
+ qp->r_psn += (len - 1) / pmtu;
+ } else {
+ e->rdma_sge.mr = NULL;
+ e->rdma_sge.vaddr = NULL;
+ e->rdma_sge.length = 0;
+ e->rdma_sge.sge_length = 0;
+ }
+ e->opcode = opcode;
+ e->sent = 0;
+ e->psn = psn;
+ e->lpsn = qp->r_psn;
+ /*
+ * We need to increment the MSN here instead of when we
+ * finish sending the result since a duplicate request would
+ * increment it more than once.
+ */
+ qp->r_msn++;
+ qp->r_psn++;
+ qp->r_state = opcode;
+ qp->r_nak_state = 0;
+ qp->r_head_ack_queue = next;
+
+ /* Schedule the send tasklet. */
+ qp->s_flags |= HFI1_S_RESP_PENDING;
+ hfi1_schedule_send(qp);
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ if (is_fecn)
+ goto send_ack;
+ return;
+ }
+
+ case OP(COMPARE_SWAP):
+ case OP(FETCH_ADD): {
+ struct ib_atomic_eth *ateth;
+ struct hfi1_ack_entry *e;
+ u64 vaddr;
+ atomic64_t *maddr;
+ u64 sdata;
+ u32 rkey;
+ u8 next;
+
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
+ goto nack_inv;
+ next = qp->r_head_ack_queue + 1;
+ if (next > HFI1_MAX_RDMA_ATOMIC)
+ next = 0;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (unlikely(next == qp->s_tail_ack_queue)) {
+ if (!qp->s_ack_queue[next].sent)
+ goto nack_inv_unlck;
+ update_ack_queue(qp, next);
+ }
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+ if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+ hfi1_put_mr(e->rdma_sge.mr);
+ e->rdma_sge.mr = NULL;
+ }
+ ateth = &ohdr->u.atomic_eth;
+ vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
+ be32_to_cpu(ateth->vaddr[1]);
+ if (unlikely(vaddr & (sizeof(u64) - 1)))
+ goto nack_inv_unlck;
+ rkey = be32_to_cpu(ateth->rkey);
+ /* Check rkey & NAK */
+ if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+ vaddr, rkey,
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto nack_acc_unlck;
+ /* Perform atomic OP and save result. */
+ maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
+ sdata = be64_to_cpu(ateth->swap_data);
+ e->atomic_data = (opcode == OP(FETCH_ADD)) ?
+ (u64) atomic64_add_return(sdata, maddr) - sdata :
+ (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+ be64_to_cpu(ateth->compare_data),
+ sdata);
+ hfi1_put_mr(qp->r_sge.sge.mr);
+ qp->r_sge.num_sge = 0;
+ e->opcode = opcode;
+ e->sent = 0;
+ e->psn = psn;
+ e->lpsn = psn;
+ qp->r_msn++;
+ qp->r_psn++;
+ qp->r_state = opcode;
+ qp->r_nak_state = 0;
+ qp->r_head_ack_queue = next;
+
+ /* Schedule the send tasklet. */
+ qp->s_flags |= HFI1_S_RESP_PENDING;
+ hfi1_schedule_send(qp);
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ if (is_fecn)
+ goto send_ack;
+ return;
+ }
+
+ default:
+ /* NAK unknown opcodes. */
+ goto nack_inv;
+ }
+ qp->r_psn++;
+ qp->r_state = opcode;
+ qp->r_ack_psn = psn;
+ qp->r_nak_state = 0;
+ /* Send an ACK if requested or required. */
+ if (psn & (1 << 31))
+ goto send_ack;
+ return;
+
+rnr_nak:
+ qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
+ qp->r_ack_psn = qp->r_psn;
+ /* Queue RNR NAK for later */
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= HFI1_R_RSP_NAK;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+ return;
+
+nack_op_err:
+ hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
+ qp->r_ack_psn = qp->r_psn;
+ /* Queue NAK for later */
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= HFI1_R_RSP_NAK;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+ return;
+
+nack_inv_unlck:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+nack_inv:
+ hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ qp->r_nak_state = IB_NAK_INVALID_REQUEST;
+ qp->r_ack_psn = qp->r_psn;
+ /* Queue NAK for later */
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= HFI1_R_RSP_NAK;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+ return;
+
+nack_acc_unlck:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+nack_acc:
+ hfi1_rc_error(qp, IB_WC_LOC_PROT_ERR);
+ qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
+ qp->r_ack_psn = qp->r_psn;
+send_ack:
+ hfi1_send_rc_ack(rcd, qp, is_fecn);
+}
+
+void hfi1_rc_hdrerr(
+ struct hfi1_ctxtdata *rcd,
+ struct hfi1_ib_header *hdr,
+ u32 rcv_flags,
+ struct hfi1_qp *qp)
+{
+ int has_grh = rcv_flags & HFI1_HAS_GRH;
+ struct hfi1_other_headers *ohdr;
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ int diff;
+ u8 opcode;
+ u32 psn;
+
+ /* Check for GRH */
+ ohdr = &hdr->u.oth;
+ if (has_grh)
+ ohdr = &hdr->u.l.oth;
+
+ opcode = be32_to_cpu(ohdr->bth[0]);
+ if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
+ return;
+
+ psn = be32_to_cpu(ohdr->bth[2]);
+ opcode >>= 24;
+
+ /* Only deal with RDMA Writes for now */
+ if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
+ diff = delta_psn(psn, qp->r_psn);
+ if (!qp->r_nak_state && diff >= 0) {
+ ibp->n_rc_seqnak++;
+ qp->r_nak_state = IB_NAK_PSN_ERROR;
+ /* Use the expected PSN. */
+ qp->r_ack_psn = qp->r_psn;
+ /*
+ * Wait to send the sequence
+ * NAK until all packets
+ * in the receive queue have
+ * been processed.
+ * Otherwise, we end up
+ * propagating congestion.
+ */
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= HFI1_R_RSP_NAK;
+ atomic_inc(&qp->refcount);
+ list_add_tail(
+ &qp->rspwait,
+ &rcd->qp_wait_list);
+ }
+ } /* Out of sequence NAK */
+ } /* QP Request NAKs */
+}
diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c
new file mode 100644
index 000000000000..a4115288db66
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/ruc.c
@@ -0,0 +1,948 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/spinlock.h>
+
+#include "hfi.h"
+#include "mad.h"
+#include "qp.h"
+#include "sdma.h"
+
+/*
+ * Convert the AETH RNR timeout code into the number of microseconds.
+ */
+const u32 ib_hfi1_rnr_table[32] = {
+ 655360, /* 00: 655.36 */
+ 10, /* 01: .01 */
+ 20, /* 02 .02 */
+ 30, /* 03: .03 */
+ 40, /* 04: .04 */
+ 60, /* 05: .06 */
+ 80, /* 06: .08 */
+ 120, /* 07: .12 */
+ 160, /* 08: .16 */
+ 240, /* 09: .24 */
+ 320, /* 0A: .32 */
+ 480, /* 0B: .48 */
+ 640, /* 0C: .64 */
+ 960, /* 0D: .96 */
+ 1280, /* 0E: 1.28 */
+ 1920, /* 0F: 1.92 */
+ 2560, /* 10: 2.56 */
+ 3840, /* 11: 3.84 */
+ 5120, /* 12: 5.12 */
+ 7680, /* 13: 7.68 */
+ 10240, /* 14: 10.24 */
+ 15360, /* 15: 15.36 */
+ 20480, /* 16: 20.48 */
+ 30720, /* 17: 30.72 */
+ 40960, /* 18: 40.96 */
+ 61440, /* 19: 61.44 */
+ 81920, /* 1A: 81.92 */
+ 122880, /* 1B: 122.88 */
+ 163840, /* 1C: 163.84 */
+ 245760, /* 1D: 245.76 */
+ 327680, /* 1E: 327.68 */
+ 491520 /* 1F: 491.52 */
+};
+
+/*
+ * Validate a RWQE and fill in the SGE state.
+ * Return 1 if OK.
+ */
+static int init_sge(struct hfi1_qp *qp, struct hfi1_rwqe *wqe)
+{
+ int i, j, ret;
+ struct ib_wc wc;
+ struct hfi1_lkey_table *rkt;
+ struct hfi1_pd *pd;
+ struct hfi1_sge_state *ss;
+
+ rkt = &to_idev(qp->ibqp.device)->lk_table;
+ pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
+ ss = &qp->r_sge;
+ ss->sg_list = qp->r_sg_list;
+ qp->r_len = 0;
+ for (i = j = 0; i < wqe->num_sge; i++) {
+ if (wqe->sg_list[i].length == 0)
+ continue;
+ /* Check LKEY */
+ if (!hfi1_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
+ &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
+ goto bad_lkey;
+ qp->r_len += wqe->sg_list[i].length;
+ j++;
+ }
+ ss->num_sge = j;
+ ss->total_len = qp->r_len;
+ ret = 1;
+ goto bail;
+
+bad_lkey:
+ while (j) {
+ struct hfi1_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
+
+ hfi1_put_mr(sge->mr);
+ }
+ ss->num_sge = 0;
+ memset(&wc, 0, sizeof(wc));
+ wc.wr_id = wqe->wr_id;
+ wc.status = IB_WC_LOC_PROT_ERR;
+ wc.opcode = IB_WC_RECV;
+ wc.qp = &qp->ibqp;
+ /* Signal solicited completion event. */
+ hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ ret = 0;
+bail:
+ return ret;
+}
+
+/**
+ * hfi1_get_rwqe - copy the next RWQE into the QP's RWQE
+ * @qp: the QP
+ * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
+ *
+ * Return -1 if there is a local error, 0 if no RWQE is available,
+ * otherwise return 1.
+ *
+ * Can be called from interrupt level.
+ */
+int hfi1_get_rwqe(struct hfi1_qp *qp, int wr_id_only)
+{
+ unsigned long flags;
+ struct hfi1_rq *rq;
+ struct hfi1_rwq *wq;
+ struct hfi1_srq *srq;
+ struct hfi1_rwqe *wqe;
+ void (*handler)(struct ib_event *, void *);
+ u32 tail;
+ int ret;
+
+ if (qp->ibqp.srq) {
+ srq = to_isrq(qp->ibqp.srq);
+ handler = srq->ibsrq.event_handler;
+ rq = &srq->rq;
+ } else {
+ srq = NULL;
+ handler = NULL;
+ rq = &qp->r_rq;
+ }
+
+ spin_lock_irqsave(&rq->lock, flags);
+ if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK)) {
+ ret = 0;
+ goto unlock;
+ }
+
+ wq = rq->wq;
+ tail = wq->tail;
+ /* Validate tail before using it since it is user writable. */
+ if (tail >= rq->size)
+ tail = 0;
+ if (unlikely(tail == wq->head)) {
+ ret = 0;
+ goto unlock;
+ }
+ /* Make sure entry is read after head index is read. */
+ smp_rmb();
+ wqe = get_rwqe_ptr(rq, tail);
+ /*
+ * Even though we update the tail index in memory, the verbs
+ * consumer is not supposed to post more entries until a
+ * completion is generated.
+ */
+ if (++tail >= rq->size)
+ tail = 0;
+ wq->tail = tail;
+ if (!wr_id_only && !init_sge(qp, wqe)) {
+ ret = -1;
+ goto unlock;
+ }
+ qp->r_wr_id = wqe->wr_id;
+
+ ret = 1;
+ set_bit(HFI1_R_WRID_VALID, &qp->r_aflags);
+ if (handler) {
+ u32 n;
+
+ /*
+ * Validate head pointer value and compute
+ * the number of remaining WQEs.
+ */
+ n = wq->head;
+ if (n >= rq->size)
+ n = 0;
+ if (n < tail)
+ n += rq->size - tail;
+ else
+ n -= tail;
+ if (n < srq->limit) {
+ struct ib_event ev;
+
+ srq->limit = 0;
+ spin_unlock_irqrestore(&rq->lock, flags);
+ ev.device = qp->ibqp.device;
+ ev.element.srq = qp->ibqp.srq;
+ ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ handler(&ev, srq->ibsrq.srq_context);
+ goto bail;
+ }
+ }
+unlock:
+ spin_unlock_irqrestore(&rq->lock, flags);
+bail:
+ return ret;
+}
+
+/*
+ * Switch to alternate path.
+ * The QP s_lock should be held and interrupts disabled.
+ */
+void hfi1_migrate_qp(struct hfi1_qp *qp)
+{
+ struct ib_event ev;
+
+ qp->s_mig_state = IB_MIG_MIGRATED;
+ qp->remote_ah_attr = qp->alt_ah_attr;
+ qp->port_num = qp->alt_ah_attr.port_num;
+ qp->s_pkey_index = qp->s_alt_pkey_index;
+ qp->s_flags |= HFI1_S_AHG_CLEAR;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_PATH_MIG;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+}
+
+static __be64 get_sguid(struct hfi1_ibport *ibp, unsigned index)
+{
+ if (!index) {
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ return cpu_to_be64(ppd->guid);
+ }
+ return ibp->guids[index - 1];
+}
+
+static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
+{
+ return (gid->global.interface_id == id &&
+ (gid->global.subnet_prefix == gid_prefix ||
+ gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
+}
+
+/*
+ *
+ * This should be called with the QP r_lock held.
+ *
+ * The s_lock will be acquired around the hfi1_migrate_qp() call.
+ */
+int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
+ int has_grh, struct hfi1_qp *qp, u32 bth0)
+{
+ __be64 guid;
+ unsigned long flags;
+ u8 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
+
+ if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
+ if (!has_grh) {
+ if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
+ goto err;
+ } else {
+ if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
+ goto err;
+ guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
+ if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
+ goto err;
+ if (!gid_ok(&hdr->u.l.grh.sgid,
+ qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
+ qp->alt_ah_attr.grh.dgid.global.interface_id))
+ goto err;
+ }
+ if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0,
+ sc5, be16_to_cpu(hdr->lrh[3])))) {
+ hfi1_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
+ (u16)bth0,
+ (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
+ 0, qp->ibqp.qp_num,
+ hdr->lrh[3], hdr->lrh[1]);
+ goto err;
+ }
+ /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
+ if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
+ ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
+ goto err;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ hfi1_migrate_qp(qp);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ } else {
+ if (!has_grh) {
+ if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ goto err;
+ } else {
+ if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
+ goto err;
+ guid = get_sguid(ibp,
+ qp->remote_ah_attr.grh.sgid_index);
+ if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
+ goto err;
+ if (!gid_ok(&hdr->u.l.grh.sgid,
+ qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
+ qp->remote_ah_attr.grh.dgid.global.interface_id))
+ goto err;
+ }
+ if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0,
+ sc5, be16_to_cpu(hdr->lrh[3])))) {
+ hfi1_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
+ (u16)bth0,
+ (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
+ 0, qp->ibqp.qp_num,
+ hdr->lrh[3], hdr->lrh[1]);
+ goto err;
+ }
+ /* Validate the SLID. See Ch. 9.6.1.5 */
+ if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
+ ppd_from_ibp(ibp)->port != qp->port_num)
+ goto err;
+ if (qp->s_mig_state == IB_MIG_REARM &&
+ !(bth0 & IB_BTH_MIG_REQ))
+ qp->s_mig_state = IB_MIG_ARMED;
+ }
+
+ return 0;
+
+err:
+ return 1;
+}
+
+/**
+ * ruc_loopback - handle UC and RC loopback requests
+ * @sqp: the sending QP
+ *
+ * This is called from hfi1_do_send() to
+ * forward a WQE addressed to the same HFI.
+ * Note that although we are single threaded due to the tasklet, we still
+ * have to protect against post_send(). We don't have to worry about
+ * receive interrupts since this is a connected protocol and all packets
+ * will pass through here.
+ */
+static void ruc_loopback(struct hfi1_qp *sqp)
+{
+ struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
+ struct hfi1_qp *qp;
+ struct hfi1_swqe *wqe;
+ struct hfi1_sge *sge;
+ unsigned long flags;
+ struct ib_wc wc;
+ u64 sdata;
+ atomic64_t *maddr;
+ enum ib_wc_status send_status;
+ int release;
+ int ret;
+
+ rcu_read_lock();
+
+ /*
+ * Note that we check the responder QP state after
+ * checking the requester's state.
+ */
+ qp = hfi1_lookup_qpn(ibp, sqp->remote_qpn);
+
+ spin_lock_irqsave(&sqp->s_lock, flags);
+
+ /* Return if we are already busy processing a work request. */
+ if ((sqp->s_flags & (HFI1_S_BUSY | HFI1_S_ANY_WAIT)) ||
+ !(ib_hfi1_state_ops[sqp->state] & HFI1_PROCESS_OR_FLUSH_SEND))
+ goto unlock;
+
+ sqp->s_flags |= HFI1_S_BUSY;
+
+again:
+ if (sqp->s_last == sqp->s_head)
+ goto clr_busy;
+ wqe = get_swqe_ptr(sqp, sqp->s_last);
+
+ /* Return if it is not OK to start a new work request. */
+ if (!(ib_hfi1_state_ops[sqp->state] & HFI1_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_hfi1_state_ops[sqp->state] & HFI1_FLUSH_SEND))
+ goto clr_busy;
+ /* We are in the error state, flush the work request. */
+ send_status = IB_WC_WR_FLUSH_ERR;
+ goto flush_send;
+ }
+
+ /*
+ * We can rely on the entry not changing without the s_lock
+ * being held until we update s_last.
+ * We increment s_cur to indicate s_last is in progress.
+ */
+ if (sqp->s_last == sqp->s_cur) {
+ if (++sqp->s_cur >= sqp->s_size)
+ sqp->s_cur = 0;
+ }
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+
+ if (!qp || !(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) ||
+ qp->ibqp.qp_type != sqp->ibqp.qp_type) {
+ ibp->n_pkt_drops++;
+ /*
+ * For RC, the requester would timeout and retry so
+ * shortcut the timeouts and just signal too many retries.
+ */
+ if (sqp->ibqp.qp_type == IB_QPT_RC)
+ send_status = IB_WC_RETRY_EXC_ERR;
+ else
+ send_status = IB_WC_SUCCESS;
+ goto serr;
+ }
+
+ memset(&wc, 0, sizeof(wc));
+ send_status = IB_WC_SUCCESS;
+
+ release = 1;
+ sqp->s_sge.sge = wqe->sg_list[0];
+ sqp->s_sge.sg_list = wqe->sg_list + 1;
+ sqp->s_sge.num_sge = wqe->wr.num_sge;
+ sqp->s_len = wqe->length;
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = wqe->wr.ex.imm_data;
+ /* FALLTHROUGH */
+ case IB_WR_SEND:
+ ret = hfi1_get_rwqe(qp, 0);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto rnr_nak;
+ break;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto inv_err;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = wqe->wr.ex.imm_data;
+ ret = hfi1_get_rwqe(qp, 1);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto rnr_nak;
+ /* FALLTHROUGH */
+ case IB_WR_RDMA_WRITE:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto inv_err;
+ if (wqe->length == 0)
+ break;
+ if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
+ wqe->wr.wr.rdma.remote_addr,
+ wqe->wr.wr.rdma.rkey,
+ IB_ACCESS_REMOTE_WRITE)))
+ goto acc_err;
+ qp->r_sge.sg_list = NULL;
+ qp->r_sge.num_sge = 1;
+ qp->r_sge.total_len = wqe->length;
+ break;
+
+ case IB_WR_RDMA_READ:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
+ goto inv_err;
+ if (unlikely(!hfi1_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
+ wqe->wr.wr.rdma.remote_addr,
+ wqe->wr.wr.rdma.rkey,
+ IB_ACCESS_REMOTE_READ)))
+ goto acc_err;
+ release = 0;
+ sqp->s_sge.sg_list = NULL;
+ sqp->s_sge.num_sge = 1;
+ qp->r_sge.sge = wqe->sg_list[0];
+ qp->r_sge.sg_list = wqe->sg_list + 1;
+ qp->r_sge.num_sge = wqe->wr.num_sge;
+ qp->r_sge.total_len = wqe->length;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
+ goto inv_err;
+ if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+ wqe->wr.wr.atomic.remote_addr,
+ wqe->wr.wr.atomic.rkey,
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto acc_err;
+ /* Perform atomic OP and save result. */
+ maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
+ sdata = wqe->wr.wr.atomic.compare_add;
+ *(u64 *) sqp->s_sge.sge.vaddr =
+ (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
+ (u64) atomic64_add_return(sdata, maddr) - sdata :
+ (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+ sdata, wqe->wr.wr.atomic.swap);
+ hfi1_put_mr(qp->r_sge.sge.mr);
+ qp->r_sge.num_sge = 0;
+ goto send_comp;
+
+ default:
+ send_status = IB_WC_LOC_QP_OP_ERR;
+ goto serr;
+ }
+
+ sge = &sqp->s_sge.sge;
+ while (sqp->s_len) {
+ u32 len = sqp->s_len;
+
+ if (len > sge->length)
+ len = sge->length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ WARN_ON_ONCE(len == 0);
+ hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, release);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (!release)
+ hfi1_put_mr(sge->mr);
+ if (--sqp->s_sge.num_sge)
+ *sge = *sqp->s_sge.sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= HFI1_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ sqp->s_len -= len;
+ }
+ if (release)
+ hfi1_put_ss(&qp->r_sge);
+
+ if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags))
+ goto send_comp;
+
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
+ wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ else
+ wc.opcode = IB_WC_RECV;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.byte_len = wqe->length;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = qp->remote_qpn;
+ wc.slid = qp->remote_ah_attr.dlid;
+ wc.sl = qp->remote_ah_attr.sl;
+ wc.port_num = 1;
+ /* Signal completion event if the solicited bit is set. */
+ hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ wqe->wr.send_flags & IB_SEND_SOLICITED);
+
+send_comp:
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ ibp->n_loop_pkts++;
+flush_send:
+ sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
+ hfi1_send_complete(sqp, wqe, send_status);
+ goto again;
+
+rnr_nak:
+ /* Handle RNR NAK */
+ if (qp->ibqp.qp_type == IB_QPT_UC)
+ goto send_comp;
+ ibp->n_rnr_naks++;
+ /*
+ * Note: we don't need the s_lock held since the BUSY flag
+ * makes this single threaded.
+ */
+ if (sqp->s_rnr_retry == 0) {
+ send_status = IB_WC_RNR_RETRY_EXC_ERR;
+ goto serr;
+ }
+ if (sqp->s_rnr_retry_cnt < 7)
+ sqp->s_rnr_retry--;
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ if (!(ib_hfi1_state_ops[sqp->state] & HFI1_PROCESS_RECV_OK))
+ goto clr_busy;
+ sqp->s_flags |= HFI1_S_WAIT_RNR;
+ sqp->s_timer.function = hfi1_rc_rnr_retry;
+ sqp->s_timer.expires = jiffies +
+ usecs_to_jiffies(ib_hfi1_rnr_table[qp->r_min_rnr_timer]);
+ add_timer(&sqp->s_timer);
+ goto clr_busy;
+
+op_err:
+ send_status = IB_WC_REM_OP_ERR;
+ wc.status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+
+inv_err:
+ send_status = IB_WC_REM_INV_REQ_ERR;
+ wc.status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+
+acc_err:
+ send_status = IB_WC_REM_ACCESS_ERR;
+ wc.status = IB_WC_LOC_PROT_ERR;
+err:
+ /* responder goes to error state */
+ hfi1_rc_error(qp, wc.status);
+
+serr:
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ hfi1_send_complete(sqp, wqe, send_status);
+ if (sqp->ibqp.qp_type == IB_QPT_RC) {
+ int lastwqe = hfi1_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+
+ sqp->s_flags &= ~HFI1_S_BUSY;
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+ if (lastwqe) {
+ struct ib_event ev;
+
+ ev.device = sqp->ibqp.device;
+ ev.element.qp = &sqp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
+ }
+ goto done;
+ }
+clr_busy:
+ sqp->s_flags &= ~HFI1_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+done:
+ rcu_read_unlock();
+}
+
+/**
+ * hfi1_make_grh - construct a GRH header
+ * @ibp: a pointer to the IB port
+ * @hdr: a pointer to the GRH header being constructed
+ * @grh: the global route address to send to
+ * @hwords: the number of 32 bit words of header being sent
+ * @nwords: the number of 32 bit words of data being sent
+ *
+ * Return the size of the header in 32 bit words.
+ */
+u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
+ struct ib_global_route *grh, u32 hwords, u32 nwords)
+{
+ hdr->version_tclass_flow =
+ cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
+ (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
+ (grh->flow_label << IB_GRH_FLOW_SHIFT));
+ hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
+ /* next_hdr is defined by C8-7 in ch. 8.4.1 */
+ hdr->next_hdr = IB_GRH_NEXT_HDR;
+ hdr->hop_limit = grh->hop_limit;
+ /* The SGID is 32-bit aligned. */
+ hdr->sgid.global.subnet_prefix = ibp->gid_prefix;
+ hdr->sgid.global.interface_id =
+ grh->sgid_index && grh->sgid_index < ARRAY_SIZE(ibp->guids) ?
+ ibp->guids[grh->sgid_index - 1] :
+ cpu_to_be64(ppd_from_ibp(ibp)->guid);
+ hdr->dgid = grh->dgid;
+
+ /* GRH header size in 32-bit words. */
+ return sizeof(struct ib_grh) / sizeof(u32);
+}
+
+/*
+ * free_ahg - clear ahg from QP
+ */
+void clear_ahg(struct hfi1_qp *qp)
+{
+ qp->s_hdr->ahgcount = 0;
+ qp->s_flags &= ~(HFI1_S_AHG_VALID | HFI1_S_AHG_CLEAR);
+ if (qp->s_sde)
+ sdma_ahg_free(qp->s_sde, qp->s_ahgidx);
+ qp->s_ahgidx = -1;
+ qp->s_sde = NULL;
+}
+
+#define BTH2_OFFSET (offsetof(struct hfi1_pio_header, hdr.u.oth.bth[2]) / 4)
+
+/**
+ * build_ahg - create ahg in s_hdr
+ * @qp: a pointer to QP
+ * @npsn: the next PSN for the request/response
+ *
+ * This routine handles the AHG by allocating an ahg entry and causing the
+ * copy of the first middle.
+ *
+ * Subsequent middles use the copied entry, editing the
+ * PSN with 1 or 2 edits.
+ */
+static inline void build_ahg(struct hfi1_qp *qp, u32 npsn)
+{
+ if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR))
+ clear_ahg(qp);
+ if (!(qp->s_flags & HFI1_S_AHG_VALID)) {
+ /* first middle that needs copy */
+ if (qp->s_ahgidx < 0) {
+ if (!qp->s_sde)
+ qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
+ qp->s_ahgidx = sdma_ahg_alloc(qp->s_sde);
+ }
+ if (qp->s_ahgidx >= 0) {
+ qp->s_ahgpsn = npsn;
+ qp->s_hdr->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
+ /* save to protect a change in another thread */
+ qp->s_hdr->sde = qp->s_sde;
+ qp->s_hdr->ahgidx = qp->s_ahgidx;
+ qp->s_flags |= HFI1_S_AHG_VALID;
+ }
+ } else {
+ /* subsequent middle after valid */
+ if (qp->s_ahgidx >= 0) {
+ qp->s_hdr->tx_flags |= SDMA_TXREQ_F_USE_AHG;
+ qp->s_hdr->ahgidx = qp->s_ahgidx;
+ qp->s_hdr->ahgcount++;
+ qp->s_hdr->ahgdesc[0] =
+ sdma_build_ahg_descriptor(
+ (__force u16)cpu_to_be16((u16)npsn),
+ BTH2_OFFSET,
+ 16,
+ 16);
+ if ((npsn & 0xffff0000) !=
+ (qp->s_ahgpsn & 0xffff0000)) {
+ qp->s_hdr->ahgcount++;
+ qp->s_hdr->ahgdesc[1] =
+ sdma_build_ahg_descriptor(
+ (__force u16)cpu_to_be16(
+ (u16)(npsn >> 16)),
+ BTH2_OFFSET,
+ 0,
+ 16);
+ }
+ }
+ }
+}
+
+void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr,
+ u32 bth0, u32 bth2, int middle)
+{
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ u16 lrh0;
+ u32 nwords;
+ u32 extra_bytes;
+ u8 sc5;
+ u32 bth1;
+
+ /* Construct the header. */
+ extra_bytes = -qp->s_cur_size & 3;
+ nwords = (qp->s_cur_size + extra_bytes) >> 2;
+ lrh0 = HFI1_LRH_BTH;
+ if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
+ qp->s_hdrwords += hfi1_make_grh(ibp, &qp->s_hdr->ibh.u.l.grh,
+ &qp->remote_ah_attr.grh,
+ qp->s_hdrwords, nwords);
+ lrh0 = HFI1_LRH_GRH;
+ middle = 0;
+ }
+ sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
+ lrh0 |= (sc5 & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
+ qp->s_sc = sc5;
+ /*
+ * reset s_hdr/AHG fields
+ *
+ * This insures that the ahgentry/ahgcount
+ * are at a non-AHG default to protect
+ * build_verbs_tx_desc() from using
+ * an include ahgidx.
+ *
+ * build_ahg() will modify as appropriate
+ * to use the AHG feature.
+ */
+ qp->s_hdr->tx_flags = 0;
+ qp->s_hdr->ahgcount = 0;
+ qp->s_hdr->ahgidx = 0;
+ qp->s_hdr->sde = NULL;
+ if (qp->s_mig_state == IB_MIG_MIGRATED)
+ bth0 |= IB_BTH_MIG_REQ;
+ else
+ middle = 0;
+ if (middle)
+ build_ahg(qp, bth2);
+ else
+ qp->s_flags &= ~HFI1_S_AHG_VALID;
+ qp->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0);
+ qp->s_hdr->ibh.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ qp->s_hdr->ibh.lrh[2] =
+ cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
+ qp->s_hdr->ibh.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
+ qp->remote_ah_attr.src_path_bits);
+ bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index);
+ bth0 |= extra_bytes << 20;
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ bth1 = qp->remote_qpn;
+ if (qp->s_flags & HFI1_S_ECN) {
+ qp->s_flags &= ~HFI1_S_ECN;
+ /* we recently received a FECN, so return a BECN */
+ bth1 |= (HFI1_BECN_MASK << HFI1_BECN_SHIFT);
+ }
+ ohdr->bth[1] = cpu_to_be32(bth1);
+ ohdr->bth[2] = cpu_to_be32(bth2);
+}
+
+/**
+ * hfi1_do_send - perform a send on a QP
+ * @work: contains a pointer to the QP
+ *
+ * Process entries in the send work queue until credit or queue is
+ * exhausted. Only allow one CPU to send a packet per QP (tasklet).
+ * Otherwise, two threads could send packets out of order.
+ */
+void hfi1_do_send(struct work_struct *work)
+{
+ struct iowait *wait = container_of(work, struct iowait, iowork);
+ struct hfi1_qp *qp = container_of(wait, struct hfi1_qp, s_iowait);
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ int (*make_req)(struct hfi1_qp *qp);
+ unsigned long flags;
+
+ if ((qp->ibqp.qp_type == IB_QPT_RC ||
+ qp->ibqp.qp_type == IB_QPT_UC) &&
+ !loopback &&
+ (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) {
+ ruc_loopback(qp);
+ return;
+ }
+
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ make_req = hfi1_make_rc_req;
+ else if (qp->ibqp.qp_type == IB_QPT_UC)
+ make_req = hfi1_make_uc_req;
+ else
+ make_req = hfi1_make_ud_req;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Return if we are already busy processing a work request. */
+ if (!hfi1_send_ok(qp)) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return;
+ }
+
+ qp->s_flags |= HFI1_S_BUSY;
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ do {
+ /* Check for a constructed packet to be sent. */
+ if (qp->s_hdrwords != 0) {
+ /*
+ * If the packet cannot be sent now, return and
+ * the send tasklet will be woken up later.
+ */
+ if (hfi1_verbs_send(qp, qp->s_hdr, qp->s_hdrwords,
+ qp->s_cur_sge, qp->s_cur_size))
+ break;
+ /* Record that s_hdr is empty. */
+ qp->s_hdrwords = 0;
+ }
+ } while (make_req(qp));
+}
+
+/*
+ * This should be called with s_lock held.
+ */
+void hfi1_send_complete(struct hfi1_qp *qp, struct hfi1_swqe *wqe,
+ enum ib_wc_status status)
+{
+ u32 old_last, last;
+ unsigned i;
+
+ if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_OR_FLUSH_SEND))
+ return;
+
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct hfi1_sge *sge = &wqe->sg_list[i];
+
+ hfi1_put_mr(sge->mr);
+ }
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI)
+ atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
+
+ /* See ch. 11.2.4.1 and 10.7.3.1 */
+ if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+ status != IB_WC_SUCCESS) {
+ struct ib_wc wc;
+
+ memset(&wc, 0, sizeof(wc));
+ wc.wr_id = wqe->wr.wr_id;
+ wc.status = status;
+ wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
+ wc.qp = &qp->ibqp;
+ if (status == IB_WC_SUCCESS)
+ wc.byte_len = wqe->length;
+ hfi1_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
+ status != IB_WC_SUCCESS);
+ }
+
+ last = qp->s_last;
+ old_last = last;
+ if (++last >= qp->s_size)
+ last = 0;
+ qp->s_last = last;
+ if (qp->s_acked == old_last)
+ qp->s_acked = last;
+ if (qp->s_cur == old_last)
+ qp->s_cur = last;
+ if (qp->s_tail == old_last)
+ qp->s_tail = last;
+ if (qp->state == IB_QPS_SQD && last == qp->s_cur)
+ qp->s_draining = 0;
+}
diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c
new file mode 100644
index 000000000000..a8c903caecce
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/sdma.c
@@ -0,0 +1,2962 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/spinlock.h>
+#include <linux/seqlock.h>
+#include <linux/netdevice.h>
+#include <linux/moduleparam.h>
+#include <linux/bitops.h>
+#include <linux/timer.h>
+#include <linux/vmalloc.h>
+
+#include "hfi.h"
+#include "common.h"
+#include "qp.h"
+#include "sdma.h"
+#include "iowait.h"
+#include "trace.h"
+
+/* must be a power of 2 >= 64 <= 32768 */
+#define SDMA_DESCQ_CNT 1024
+#define INVALID_TAIL 0xffff
+
+static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
+module_param(sdma_descq_cnt, uint, S_IRUGO);
+MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
+
+static uint sdma_idle_cnt = 250;
+module_param(sdma_idle_cnt, uint, S_IRUGO);
+MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)");
+
+uint mod_num_sdma;
+module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO);
+MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use");
+
+#define SDMA_WAIT_BATCH_SIZE 20
+/* max wait time for a SDMA engine to indicate it has halted */
+#define SDMA_ERR_HALT_TIMEOUT 10 /* ms */
+/* all SDMA engine errors that cause a halt */
+
+#define SD(name) SEND_DMA_##name
+#define ALL_SDMA_ENG_HALT_ERRS \
+ (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \
+ | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
+
+/* sdma_sendctrl operations */
+#define SDMA_SENDCTRL_OP_ENABLE (1U << 0)
+#define SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
+#define SDMA_SENDCTRL_OP_HALT (1U << 2)
+#define SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
+
+/* handle long defines */
+#define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
+SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
+#define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \
+SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
+
+static const char * const sdma_state_names[] = {
+ [sdma_state_s00_hw_down] = "s00_HwDown",
+ [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait",
+ [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait",
+ [sdma_state_s20_idle] = "s20_Idle",
+ [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
+ [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
+ [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
+ [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait",
+ [sdma_state_s80_hw_freeze] = "s80_HwFreeze",
+ [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean",
+ [sdma_state_s99_running] = "s99_Running",
+};
+
+static const char * const sdma_event_names[] = {
+ [sdma_event_e00_go_hw_down] = "e00_GoHwDown",
+ [sdma_event_e10_go_hw_start] = "e10_GoHwStart",
+ [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone",
+ [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone",
+ [sdma_event_e30_go_running] = "e30_GoRunning",
+ [sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
+ [sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
+ [sdma_event_e60_hw_halted] = "e60_HwHalted",
+ [sdma_event_e70_go_idle] = "e70_GoIdle",
+ [sdma_event_e80_hw_freeze] = "e80_HwFreeze",
+ [sdma_event_e81_hw_frozen] = "e81_HwFrozen",
+ [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze",
+ [sdma_event_e85_link_down] = "e85_LinkDown",
+ [sdma_event_e90_sw_halted] = "e90_SwHalted",
+};
+
+static const struct sdma_set_state_action sdma_action_table[] = {
+ [sdma_state_s00_hw_down] = {
+ .go_s99_running_tofalse = 1,
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 0,
+ .op_cleanup = 0,
+ },
+ [sdma_state_s10_hw_start_up_halt_wait] = {
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 1,
+ .op_cleanup = 0,
+ },
+ [sdma_state_s15_hw_start_up_clean_wait] = {
+ .op_enable = 0,
+ .op_intenable = 1,
+ .op_halt = 0,
+ .op_cleanup = 1,
+ },
+ [sdma_state_s20_idle] = {
+ .op_enable = 0,
+ .op_intenable = 1,
+ .op_halt = 0,
+ .op_cleanup = 0,
+ },
+ [sdma_state_s30_sw_clean_up_wait] = {
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 0,
+ .op_cleanup = 0,
+ },
+ [sdma_state_s40_hw_clean_up_wait] = {
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 0,
+ .op_cleanup = 1,
+ },
+ [sdma_state_s50_hw_halt_wait] = {
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 0,
+ .op_cleanup = 0,
+ },
+ [sdma_state_s60_idle_halt_wait] = {
+ .go_s99_running_tofalse = 1,
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 1,
+ .op_cleanup = 0,
+ },
+ [sdma_state_s80_hw_freeze] = {
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 0,
+ .op_cleanup = 0,
+ },
+ [sdma_state_s82_freeze_sw_clean] = {
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 0,
+ .op_cleanup = 0,
+ },
+ [sdma_state_s99_running] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 0,
+ .op_cleanup = 0,
+ .go_s99_running_totrue = 1,
+ },
+};
+
+#define SDMA_TAIL_UPDATE_THRESH 0x1F
+
+/* declare all statics here rather than keep sorting */
+static void sdma_complete(struct kref *);
+static void sdma_finalput(struct sdma_state *);
+static void sdma_get(struct sdma_state *);
+static void sdma_hw_clean_up_task(unsigned long);
+static void sdma_put(struct sdma_state *);
+static void sdma_set_state(struct sdma_engine *, enum sdma_states);
+static void sdma_start_hw_clean_up(struct sdma_engine *);
+static void sdma_start_sw_clean_up(struct sdma_engine *);
+static void sdma_sw_clean_up_task(unsigned long);
+static void sdma_sendctrl(struct sdma_engine *, unsigned);
+static void init_sdma_regs(struct sdma_engine *, u32, uint);
+static void sdma_process_event(
+ struct sdma_engine *sde,
+ enum sdma_events event);
+static void __sdma_process_event(
+ struct sdma_engine *sde,
+ enum sdma_events event);
+static void dump_sdma_state(struct sdma_engine *sde);
+static void sdma_make_progress(struct sdma_engine *sde, u64 status);
+static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail);
+static void sdma_flush_descq(struct sdma_engine *sde);
+
+/**
+ * sdma_state_name() - return state string from enum
+ * @state: state
+ */
+static const char *sdma_state_name(enum sdma_states state)
+{
+ return sdma_state_names[state];
+}
+
+static void sdma_get(struct sdma_state *ss)
+{
+ kref_get(&ss->kref);
+}
+
+static void sdma_complete(struct kref *kref)
+{
+ struct sdma_state *ss =
+ container_of(kref, struct sdma_state, kref);
+
+ complete(&ss->comp);
+}
+
+static void sdma_put(struct sdma_state *ss)
+{
+ kref_put(&ss->kref, sdma_complete);
+}
+
+static void sdma_finalput(struct sdma_state *ss)
+{
+ sdma_put(ss);
+ wait_for_completion(&ss->comp);
+}
+
+static inline void write_sde_csr(
+ struct sdma_engine *sde,
+ u32 offset0,
+ u64 value)
+{
+ write_kctxt_csr(sde->dd, sde->this_idx, offset0, value);
+}
+
+static inline u64 read_sde_csr(
+ struct sdma_engine *sde,
+ u32 offset0)
+{
+ return read_kctxt_csr(sde->dd, sde->this_idx, offset0);
+}
+
+/*
+ * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for
+ * sdma engine 'sde' to drop to 0.
+ */
+static void sdma_wait_for_packet_egress(struct sdma_engine *sde,
+ int pause)
+{
+ u64 off = 8 * sde->this_idx;
+ struct hfi1_devdata *dd = sde->dd;
+ int lcnt = 0;
+
+ while (1) {
+ u64 reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS);
+
+ reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK;
+ reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT;
+ if (reg == 0)
+ break;
+ if (lcnt++ > 100) {
+ dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u\n",
+ __func__, sde->this_idx, (u32)reg);
+ break;
+ }
+ udelay(1);
+ }
+}
+
+/*
+ * sdma_wait() - wait for packet egress to complete for all SDMA engines,
+ * and pause for credit return.
+ */
+void sdma_wait(struct hfi1_devdata *dd)
+{
+ int i;
+
+ for (i = 0; i < dd->num_sdma; i++) {
+ struct sdma_engine *sde = &dd->per_sdma[i];
+
+ sdma_wait_for_packet_egress(sde, 0);
+ }
+}
+
+static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt)
+{
+ u64 reg;
+
+ if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT))
+ return;
+ reg = cnt;
+ reg &= SD(DESC_CNT_CNT_MASK);
+ reg <<= SD(DESC_CNT_CNT_SHIFT);
+ write_sde_csr(sde, SD(DESC_CNT), reg);
+}
+
+/*
+ * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
+ *
+ * Depending on timing there can be txreqs in two places:
+ * - in the descq ring
+ * - in the flush list
+ *
+ * To avoid ordering issues the descq ring needs to be flushed
+ * first followed by the flush list.
+ *
+ * This routine is called from two places
+ * - From a work queue item
+ * - Directly from the state machine just before setting the
+ * state to running
+ *
+ * Must be called with head_lock held
+ *
+ */
+static void sdma_flush(struct sdma_engine *sde)
+{
+ struct sdma_txreq *txp, *txp_next;
+ LIST_HEAD(flushlist);
+
+ /* flush from head to tail */
+ sdma_flush_descq(sde);
+ spin_lock(&sde->flushlist_lock);
+ /* copy flush list */
+ list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) {
+ list_del_init(&txp->list);
+ list_add_tail(&txp->list, &flushlist);
+ }
+ spin_unlock(&sde->flushlist_lock);
+ /* flush from flush list */
+ list_for_each_entry_safe(txp, txp_next, &flushlist, list) {
+ int drained = 0;
+ /* protect against complete modifying */
+ struct iowait *wait = txp->wait;
+
+ list_del_init(&txp->list);
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+ trace_hfi1_sdma_out_sn(sde, txp->sn);
+ if (WARN_ON_ONCE(sde->head_sn != txp->sn))
+ dd_dev_err(sde->dd, "expected %llu got %llu\n",
+ sde->head_sn, txp->sn);
+ sde->head_sn++;
+#endif
+ sdma_txclean(sde->dd, txp);
+ if (wait)
+ drained = atomic_dec_and_test(&wait->sdma_busy);
+ if (txp->complete)
+ (*txp->complete)(txp, SDMA_TXREQ_S_ABORTED, drained);
+ if (wait && drained)
+ iowait_drain_wakeup(wait);
+ }
+}
+
+/*
+ * Fields a work request for flushing the descq ring
+ * and the flush list
+ *
+ * If the engine has been brought to running during
+ * the scheduling delay, the flush is ignored, assuming
+ * that the process of bringing the engine to running
+ * would have done this flush prior to going to running.
+ *
+ */
+static void sdma_field_flush(struct work_struct *work)
+{
+ unsigned long flags;
+ struct sdma_engine *sde =
+ container_of(work, struct sdma_engine, flush_worker);
+
+ write_seqlock_irqsave(&sde->head_lock, flags);
+ if (!__sdma_running(sde))
+ sdma_flush(sde);
+ write_sequnlock_irqrestore(&sde->head_lock, flags);
+}
+
+static void sdma_err_halt_wait(struct work_struct *work)
+{
+ struct sdma_engine *sde = container_of(work, struct sdma_engine,
+ err_halt_worker);
+ u64 statuscsr;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT);
+ while (1) {
+ statuscsr = read_sde_csr(sde, SD(STATUS));
+ statuscsr &= SD(STATUS_ENG_HALTED_SMASK);
+ if (statuscsr)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dd_dev_err(sde->dd,
+ "SDMA engine %d - timeout waiting for engine to halt\n",
+ sde->this_idx);
+ /*
+ * Continue anyway. This could happen if there was
+ * an uncorrectable error in the wrong spot.
+ */
+ break;
+ }
+ usleep_range(80, 120);
+ }
+
+ sdma_process_event(sde, sdma_event_e15_hw_halt_done);
+}
+
+static void sdma_start_err_halt_wait(struct sdma_engine *sde)
+{
+ schedule_work(&sde->err_halt_worker);
+}
+
+
+static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
+{
+ if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
+
+ unsigned index;
+ struct hfi1_devdata *dd = sde->dd;
+
+ for (index = 0; index < dd->num_sdma; index++) {
+ struct sdma_engine *curr_sdma = &dd->per_sdma[index];
+
+ if (curr_sdma != sde)
+ curr_sdma->progress_check_head =
+ curr_sdma->descq_head;
+ }
+ dd_dev_err(sde->dd,
+ "SDMA engine %d - check scheduled\n",
+ sde->this_idx);
+ mod_timer(&sde->err_progress_check_timer, jiffies + 10);
+ }
+}
+
+static void sdma_err_progress_check(unsigned long data)
+{
+ unsigned index;
+ struct sdma_engine *sde = (struct sdma_engine *)data;
+
+ dd_dev_err(sde->dd, "SDE progress check event\n");
+ for (index = 0; index < sde->dd->num_sdma; index++) {
+ struct sdma_engine *curr_sde = &sde->dd->per_sdma[index];
+ unsigned long flags;
+
+ /* check progress on each engine except the current one */
+ if (curr_sde == sde)
+ continue;
+ /*
+ * We must lock interrupts when acquiring sde->lock,
+ * to avoid a deadlock if interrupt triggers and spins on
+ * the same lock on same CPU
+ */
+ spin_lock_irqsave(&curr_sde->tail_lock, flags);
+ write_seqlock(&curr_sde->head_lock);
+
+ /* skip non-running queues */
+ if (curr_sde->state.current_state != sdma_state_s99_running) {
+ write_sequnlock(&curr_sde->head_lock);
+ spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
+ continue;
+ }
+
+ if ((curr_sde->descq_head != curr_sde->descq_tail) &&
+ (curr_sde->descq_head ==
+ curr_sde->progress_check_head))
+ __sdma_process_event(curr_sde,
+ sdma_event_e90_sw_halted);
+ write_sequnlock(&curr_sde->head_lock);
+ spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
+ }
+ schedule_work(&sde->err_halt_worker);
+}
+
+static void sdma_hw_clean_up_task(unsigned long opaque)
+{
+ struct sdma_engine *sde = (struct sdma_engine *) opaque;
+ u64 statuscsr;
+
+ while (1) {
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
+ sde->this_idx, slashstrip(__FILE__), __LINE__,
+ __func__);
+#endif
+ statuscsr = read_sde_csr(sde, SD(STATUS));
+ statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK);
+ if (statuscsr)
+ break;
+ udelay(10);
+ }
+
+ sdma_process_event(sde, sdma_event_e25_hw_clean_up_done);
+}
+
+static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
+{
+ smp_read_barrier_depends(); /* see sdma_update_tail() */
+ return sde->tx_ring[sde->tx_head & sde->sdma_mask];
+}
+
+/*
+ * flush ring for recovery
+ */
+static void sdma_flush_descq(struct sdma_engine *sde)
+{
+ u16 head, tail;
+ int progress = 0;
+ struct sdma_txreq *txp = get_txhead(sde);
+
+ /* The reason for some of the complexity of this code is that
+ * not all descriptors have corresponding txps. So, we have to
+ * be able to skip over descs until we wander into the range of
+ * the next txp on the list.
+ */
+ head = sde->descq_head & sde->sdma_mask;
+ tail = sde->descq_tail & sde->sdma_mask;
+ while (head != tail) {
+ /* advance head, wrap if needed */
+ head = ++sde->descq_head & sde->sdma_mask;
+ /* if now past this txp's descs, do the callback */
+ if (txp && txp->next_descq_idx == head) {
+ int drained = 0;
+ /* protect against complete modifying */
+ struct iowait *wait = txp->wait;
+
+ /* remove from list */
+ sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
+ if (wait)
+ drained = atomic_dec_and_test(&wait->sdma_busy);
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+ trace_hfi1_sdma_out_sn(sde, txp->sn);
+ if (WARN_ON_ONCE(sde->head_sn != txp->sn))
+ dd_dev_err(sde->dd, "expected %llu got %llu\n",
+ sde->head_sn, txp->sn);
+ sde->head_sn++;
+#endif
+ sdma_txclean(sde->dd, txp);
+ trace_hfi1_sdma_progress(sde, head, tail, txp);
+ if (txp->complete)
+ (*txp->complete)(
+ txp,
+ SDMA_TXREQ_S_ABORTED,
+ drained);
+ if (wait && drained)
+ iowait_drain_wakeup(wait);
+ /* see if there is another txp */
+ txp = get_txhead(sde);
+ }
+ progress++;
+ }
+ if (progress)
+ sdma_desc_avail(sde, sdma_descq_freecnt(sde));
+}
+
+static void sdma_sw_clean_up_task(unsigned long opaque)
+{
+ struct sdma_engine *sde = (struct sdma_engine *) opaque;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sde->tail_lock, flags);
+ write_seqlock(&sde->head_lock);
+
+ /*
+ * At this point, the following should always be true:
+ * - We are halted, so no more descriptors are getting retired.
+ * - We are not running, so no one is submitting new work.
+ * - Only we can send the e40_sw_cleaned, so we can't start
+ * running again until we say so. So, the active list and
+ * descq are ours to play with.
+ */
+
+
+ /*
+ * In the error clean up sequence, software clean must be called
+ * before the hardware clean so we can use the hardware head in
+ * the progress routine. A hardware clean or SPC unfreeze will
+ * reset the hardware head.
+ *
+ * Process all retired requests. The progress routine will use the
+ * latest physical hardware head - we are not running so speed does
+ * not matter.
+ */
+ sdma_make_progress(sde, 0);
+
+ sdma_flush(sde);
+
+ /*
+ * Reset our notion of head and tail.
+ * Note that the HW registers have been reset via an earlier
+ * clean up.
+ */
+ sde->descq_tail = 0;
+ sde->descq_head = 0;
+ sde->desc_avail = sdma_descq_freecnt(sde);
+ *sde->head_dma = 0;
+
+ __sdma_process_event(sde, sdma_event_e40_sw_cleaned);
+
+ write_sequnlock(&sde->head_lock);
+ spin_unlock_irqrestore(&sde->tail_lock, flags);
+}
+
+static void sdma_sw_tear_down(struct sdma_engine *sde)
+{
+ struct sdma_state *ss = &sde->state;
+
+ /* Releasing this reference means the state machine has stopped. */
+ sdma_put(ss);
+
+ /* stop waiting for all unfreeze events to complete */
+ atomic_set(&sde->dd->sdma_unfreeze_count, -1);
+ wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
+}
+
+static void sdma_start_hw_clean_up(struct sdma_engine *sde)
+{
+ tasklet_hi_schedule(&sde->sdma_hw_clean_up_task);
+}
+
+static void sdma_start_sw_clean_up(struct sdma_engine *sde)
+{
+ tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
+}
+
+static void sdma_set_state(struct sdma_engine *sde,
+ enum sdma_states next_state)
+{
+ struct sdma_state *ss = &sde->state;
+ const struct sdma_set_state_action *action = sdma_action_table;
+ unsigned op = 0;
+
+ trace_hfi1_sdma_state(
+ sde,
+ sdma_state_names[ss->current_state],
+ sdma_state_names[next_state]);
+
+ /* debugging bookkeeping */
+ ss->previous_state = ss->current_state;
+ ss->previous_op = ss->current_op;
+ ss->current_state = next_state;
+
+ if (ss->previous_state != sdma_state_s99_running
+ && next_state == sdma_state_s99_running)
+ sdma_flush(sde);
+
+ if (action[next_state].op_enable)
+ op |= SDMA_SENDCTRL_OP_ENABLE;
+
+ if (action[next_state].op_intenable)
+ op |= SDMA_SENDCTRL_OP_INTENABLE;
+
+ if (action[next_state].op_halt)
+ op |= SDMA_SENDCTRL_OP_HALT;
+
+ if (action[next_state].op_cleanup)
+ op |= SDMA_SENDCTRL_OP_CLEANUP;
+
+ if (action[next_state].go_s99_running_tofalse)
+ ss->go_s99_running = 0;
+
+ if (action[next_state].go_s99_running_totrue)
+ ss->go_s99_running = 1;
+
+ ss->current_op = op;
+ sdma_sendctrl(sde, ss->current_op);
+}
+
+/**
+ * sdma_get_descq_cnt() - called when device probed
+ *
+ * Return a validated descq count.
+ *
+ * This is currently only used in the verbs initialization to build the tx
+ * list.
+ *
+ * This will probably be deleted in favor of a more scalable approach to
+ * alloc tx's.
+ *
+ */
+u16 sdma_get_descq_cnt(void)
+{
+ u16 count = sdma_descq_cnt;
+
+ if (!count)
+ return SDMA_DESCQ_CNT;
+ /* count must be a power of 2 greater than 64 and less than
+ * 32768. Otherwise return default.
+ */
+ if (!is_power_of_2(count))
+ return SDMA_DESCQ_CNT;
+ if (count < 64 && count > 32768)
+ return SDMA_DESCQ_CNT;
+ return count;
+}
+/**
+ * sdma_select_engine_vl() - select sdma engine
+ * @dd: devdata
+ * @selector: a spreading factor
+ * @vl: this vl
+ *
+ *
+ * This function returns an engine based on the selector and a vl. The
+ * mapping fields are protected by RCU.
+ */
+struct sdma_engine *sdma_select_engine_vl(
+ struct hfi1_devdata *dd,
+ u32 selector,
+ u8 vl)
+{
+ struct sdma_vl_map *m;
+ struct sdma_map_elem *e;
+ struct sdma_engine *rval;
+
+ if (WARN_ON(vl > 8))
+ return NULL;
+
+ rcu_read_lock();
+ m = rcu_dereference(dd->sdma_map);
+ if (unlikely(!m)) {
+ rcu_read_unlock();
+ return NULL;
+ }
+ e = m->map[vl & m->mask];
+ rval = e->sde[selector & e->mask];
+ rcu_read_unlock();
+
+ trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
+ return rval;
+}
+
+/**
+ * sdma_select_engine_sc() - select sdma engine
+ * @dd: devdata
+ * @selector: a spreading factor
+ * @sc5: the 5 bit sc
+ *
+ *
+ * This function returns an engine based on the selector and an sc.
+ */
+struct sdma_engine *sdma_select_engine_sc(
+ struct hfi1_devdata *dd,
+ u32 selector,
+ u8 sc5)
+{
+ u8 vl = sc_to_vlt(dd, sc5);
+
+ return sdma_select_engine_vl(dd, selector, vl);
+}
+
+/*
+ * Free the indicated map struct
+ */
+static void sdma_map_free(struct sdma_vl_map *m)
+{
+ int i;
+
+ for (i = 0; m && i < m->actual_vls; i++)
+ kfree(m->map[i]);
+ kfree(m);
+}
+
+/*
+ * Handle RCU callback
+ */
+static void sdma_map_rcu_callback(struct rcu_head *list)
+{
+ struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list);
+
+ sdma_map_free(m);
+}
+
+/**
+ * sdma_map_init - called when # vls change
+ * @dd: hfi1_devdata
+ * @port: port number
+ * @num_vls: number of vls
+ * @vl_engines: per vl engine mapping (optional)
+ *
+ * This routine changes the mapping based on the number of vls.
+ *
+ * vl_engines is used to specify a non-uniform vl/engine loading. NULL
+ * implies auto computing the loading and giving each VLs a uniform
+ * distribution of engines per VL.
+ *
+ * The auto algorithm computes the sde_per_vl and the number of extra
+ * engines. Any extra engines are added from the last VL on down.
+ *
+ * rcu locking is used here to control access to the mapping fields.
+ *
+ * If either the num_vls or num_sdma are non-power of 2, the array sizes
+ * in the struct sdma_vl_map and the struct sdma_map_elem are rounded
+ * up to the next highest power of 2 and the first entry is reused
+ * in a round robin fashion.
+ *
+ * If an error occurs the map change is not done and the mapping is
+ * not changed.
+ *
+ */
+int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
+{
+ int i, j;
+ int extra, sde_per_vl;
+ int engine = 0;
+ u8 lvl_engines[OPA_MAX_VLS];
+ struct sdma_vl_map *oldmap, *newmap;
+
+ if (!(dd->flags & HFI1_HAS_SEND_DMA))
+ return 0;
+
+ if (!vl_engines) {
+ /* truncate divide */
+ sde_per_vl = dd->num_sdma / num_vls;
+ /* extras */
+ extra = dd->num_sdma % num_vls;
+ vl_engines = lvl_engines;
+ /* add extras from last vl down */
+ for (i = num_vls - 1; i >= 0; i--, extra--)
+ vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0);
+ }
+ /* build new map */
+ newmap = kzalloc(
+ sizeof(struct sdma_vl_map) +
+ roundup_pow_of_two(num_vls) *
+ sizeof(struct sdma_map_elem *),
+ GFP_KERNEL);
+ if (!newmap)
+ goto bail;
+ newmap->actual_vls = num_vls;
+ newmap->vls = roundup_pow_of_two(num_vls);
+ newmap->mask = (1 << ilog2(newmap->vls)) - 1;
+ for (i = 0; i < newmap->vls; i++) {
+ /* save for wrap around */
+ int first_engine = engine;
+
+ if (i < newmap->actual_vls) {
+ int sz = roundup_pow_of_two(vl_engines[i]);
+
+ /* only allocate once */
+ newmap->map[i] = kzalloc(
+ sizeof(struct sdma_map_elem) +
+ sz * sizeof(struct sdma_engine *),
+ GFP_KERNEL);
+ if (!newmap->map[i])
+ goto bail;
+ newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
+ /* assign engines */
+ for (j = 0; j < sz; j++) {
+ newmap->map[i]->sde[j] =
+ &dd->per_sdma[engine];
+ if (++engine >= first_engine + vl_engines[i])
+ /* wrap back to first engine */
+ engine = first_engine;
+ }
+ } else {
+ /* just re-use entry without allocating */
+ newmap->map[i] = newmap->map[i % num_vls];
+ }
+ engine = first_engine + vl_engines[i];
+ }
+ /* newmap in hand, save old map */
+ spin_lock_irq(&dd->sde_map_lock);
+ oldmap = rcu_dereference_protected(dd->sdma_map,
+ lockdep_is_held(&dd->sde_map_lock));
+
+ /* publish newmap */
+ rcu_assign_pointer(dd->sdma_map, newmap);
+
+ spin_unlock_irq(&dd->sde_map_lock);
+ /* success, free any old map after grace period */
+ if (oldmap)
+ call_rcu(&oldmap->list, sdma_map_rcu_callback);
+ return 0;
+bail:
+ /* free any partial allocation */
+ sdma_map_free(newmap);
+ return -ENOMEM;
+}
+
+/*
+ * Clean up allocated memory.
+ *
+ * This routine is can be called regardless of the success of sdma_init()
+ *
+ */
+static void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
+{
+ size_t i;
+ struct sdma_engine *sde;
+
+ if (dd->sdma_pad_dma) {
+ dma_free_coherent(&dd->pcidev->dev, 4,
+ (void *)dd->sdma_pad_dma,
+ dd->sdma_pad_phys);
+ dd->sdma_pad_dma = NULL;
+ dd->sdma_pad_phys = 0;
+ }
+ if (dd->sdma_heads_dma) {
+ dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size,
+ (void *)dd->sdma_heads_dma,
+ dd->sdma_heads_phys);
+ dd->sdma_heads_dma = NULL;
+ dd->sdma_heads_phys = 0;
+ }
+ for (i = 0; dd->per_sdma && i < num_engines; ++i) {
+ sde = &dd->per_sdma[i];
+
+ sde->head_dma = NULL;
+ sde->head_phys = 0;
+
+ if (sde->descq) {
+ dma_free_coherent(
+ &dd->pcidev->dev,
+ sde->descq_cnt * sizeof(u64[2]),
+ sde->descq,
+ sde->descq_phys
+ );
+ sde->descq = NULL;
+ sde->descq_phys = 0;
+ }
+ if (is_vmalloc_addr(sde->tx_ring))
+ vfree(sde->tx_ring);
+ else
+ kfree(sde->tx_ring);
+ sde->tx_ring = NULL;
+ }
+ spin_lock_irq(&dd->sde_map_lock);
+ kfree(rcu_access_pointer(dd->sdma_map));
+ RCU_INIT_POINTER(dd->sdma_map, NULL);
+ spin_unlock_irq(&dd->sde_map_lock);
+ synchronize_rcu();
+ kfree(dd->per_sdma);
+ dd->per_sdma = NULL;
+}
+
+/**
+ * sdma_init() - called when device probed
+ * @dd: hfi1_devdata
+ * @port: port number (currently only zero)
+ *
+ * sdma_init initializes the specified number of engines.
+ *
+ * The code initializes each sde, its csrs. Interrupts
+ * are not required to be enabled.
+ *
+ * Returns:
+ * 0 - success, -errno on failure
+ */
+int sdma_init(struct hfi1_devdata *dd, u8 port)
+{
+ unsigned this_idx;
+ struct sdma_engine *sde;
+ u16 descq_cnt;
+ void *curr_head;
+ struct hfi1_pportdata *ppd = dd->pport + port;
+ u32 per_sdma_credits;
+ uint idle_cnt = sdma_idle_cnt;
+ size_t num_engines = dd->chip_sdma_engines;
+
+ if (!HFI1_CAP_IS_KSET(SDMA)) {
+ HFI1_CAP_CLEAR(SDMA_AHG);
+ return 0;
+ }
+ if (mod_num_sdma &&
+ /* can't exceed chip support */
+ mod_num_sdma <= dd->chip_sdma_engines &&
+ /* count must be >= vls */
+ mod_num_sdma >= num_vls)
+ num_engines = mod_num_sdma;
+
+ dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
+ dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", dd->chip_sdma_engines);
+ dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
+ dd->chip_sdma_mem_size);
+
+ per_sdma_credits =
+ dd->chip_sdma_mem_size/(num_engines * SDMA_BLOCK_SIZE);
+
+ /* set up freeze waitqueue */
+ init_waitqueue_head(&dd->sdma_unfreeze_wq);
+ atomic_set(&dd->sdma_unfreeze_count, 0);
+
+ descq_cnt = sdma_get_descq_cnt();
+ dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n",
+ num_engines, descq_cnt);
+
+ /* alloc memory for array of send engines */
+ dd->per_sdma = kcalloc(num_engines, sizeof(*dd->per_sdma), GFP_KERNEL);
+ if (!dd->per_sdma)
+ return -ENOMEM;
+
+ idle_cnt = ns_to_cclock(dd, idle_cnt);
+ /* Allocate memory for SendDMA descriptor FIFOs */
+ for (this_idx = 0; this_idx < num_engines; ++this_idx) {
+ sde = &dd->per_sdma[this_idx];
+ sde->dd = dd;
+ sde->ppd = ppd;
+ sde->this_idx = this_idx;
+ sde->descq_cnt = descq_cnt;
+ sde->desc_avail = sdma_descq_freecnt(sde);
+ sde->sdma_shift = ilog2(descq_cnt);
+ sde->sdma_mask = (1 << sde->sdma_shift) - 1;
+ sde->descq_full_count = 0;
+
+ /* Create a mask for all 3 chip interrupt sources */
+ sde->imask = (u64)1 << (0*TXE_NUM_SDMA_ENGINES + this_idx)
+ | (u64)1 << (1*TXE_NUM_SDMA_ENGINES + this_idx)
+ | (u64)1 << (2*TXE_NUM_SDMA_ENGINES + this_idx);
+ /* Create a mask specifically for sdma_idle */
+ sde->idle_mask =
+ (u64)1 << (2*TXE_NUM_SDMA_ENGINES + this_idx);
+ /* Create a mask specifically for sdma_progress */
+ sde->progress_mask =
+ (u64)1 << (TXE_NUM_SDMA_ENGINES + this_idx);
+ spin_lock_init(&sde->tail_lock);
+ seqlock_init(&sde->head_lock);
+ spin_lock_init(&sde->senddmactrl_lock);
+ spin_lock_init(&sde->flushlist_lock);
+ /* insure there is always a zero bit */
+ sde->ahg_bits = 0xfffffffe00000000ULL;
+
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+
+ /* set up reference counting */
+ kref_init(&sde->state.kref);
+ init_completion(&sde->state.comp);
+
+ INIT_LIST_HEAD(&sde->flushlist);
+ INIT_LIST_HEAD(&sde->dmawait);
+
+ sde->tail_csr =
+ get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
+
+ if (idle_cnt)
+ dd->default_desc1 =
+ SDMA_DESC1_HEAD_TO_HOST_FLAG;
+ else
+ dd->default_desc1 =
+ SDMA_DESC1_INT_REQ_FLAG;
+
+ tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
+ (unsigned long)sde);
+
+ tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
+ (unsigned long)sde);
+ INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait);
+ INIT_WORK(&sde->flush_worker, sdma_field_flush);
+
+ sde->progress_check_head = 0;
+
+ init_timer(&sde->err_progress_check_timer);
+ sde->err_progress_check_timer.function =
+ sdma_err_progress_check;
+ sde->err_progress_check_timer.data = (unsigned long)sde;
+
+ sde->descq = dma_zalloc_coherent(
+ &dd->pcidev->dev,
+ descq_cnt * sizeof(u64[2]),
+ &sde->descq_phys,
+ GFP_KERNEL
+ );
+ if (!sde->descq)
+ goto bail;
+ sde->tx_ring =
+ kcalloc(descq_cnt, sizeof(struct sdma_txreq *),
+ GFP_KERNEL);
+ if (!sde->tx_ring)
+ sde->tx_ring =
+ vzalloc(
+ sizeof(struct sdma_txreq *) *
+ descq_cnt);
+ if (!sde->tx_ring)
+ goto bail;
+ }
+
+ dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
+ /* Allocate memory for DMA of head registers to memory */
+ dd->sdma_heads_dma = dma_zalloc_coherent(
+ &dd->pcidev->dev,
+ dd->sdma_heads_size,
+ &dd->sdma_heads_phys,
+ GFP_KERNEL
+ );
+ if (!dd->sdma_heads_dma) {
+ dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
+ goto bail;
+ }
+
+ /* Allocate memory for pad */
+ dd->sdma_pad_dma = dma_zalloc_coherent(
+ &dd->pcidev->dev,
+ sizeof(u32),
+ &dd->sdma_pad_phys,
+ GFP_KERNEL
+ );
+ if (!dd->sdma_pad_dma) {
+ dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
+ goto bail;
+ }
+
+ /* assign each engine to different cacheline and init registers */
+ curr_head = (void *)dd->sdma_heads_dma;
+ for (this_idx = 0; this_idx < num_engines; ++this_idx) {
+ unsigned long phys_offset;
+
+ sde = &dd->per_sdma[this_idx];
+
+ sde->head_dma = curr_head;
+ curr_head += L1_CACHE_BYTES;
+ phys_offset = (unsigned long)sde->head_dma -
+ (unsigned long)dd->sdma_heads_dma;
+ sde->head_phys = dd->sdma_heads_phys + phys_offset;
+ init_sdma_regs(sde, per_sdma_credits, idle_cnt);
+ }
+ dd->flags |= HFI1_HAS_SEND_DMA;
+ dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0;
+ dd->num_sdma = num_engines;
+ if (sdma_map_init(dd, port, ppd->vls_operational, NULL))
+ goto bail;
+ dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
+ return 0;
+
+bail:
+ sdma_clean(dd, num_engines);
+ return -ENOMEM;
+}
+
+/**
+ * sdma_all_running() - called when the link goes up
+ * @dd: hfi1_devdata
+ *
+ * This routine moves all engines to the running state.
+ */
+void sdma_all_running(struct hfi1_devdata *dd)
+{
+ struct sdma_engine *sde;
+ unsigned int i;
+
+ /* move all engines to running */
+ for (i = 0; i < dd->num_sdma; ++i) {
+ sde = &dd->per_sdma[i];
+ sdma_process_event(sde, sdma_event_e30_go_running);
+ }
+}
+
+/**
+ * sdma_all_idle() - called when the link goes down
+ * @dd: hfi1_devdata
+ *
+ * This routine moves all engines to the idle state.
+ */
+void sdma_all_idle(struct hfi1_devdata *dd)
+{
+ struct sdma_engine *sde;
+ unsigned int i;
+
+ /* idle all engines */
+ for (i = 0; i < dd->num_sdma; ++i) {
+ sde = &dd->per_sdma[i];
+ sdma_process_event(sde, sdma_event_e70_go_idle);
+ }
+}
+
+/**
+ * sdma_start() - called to kick off state processing for all engines
+ * @dd: hfi1_devdata
+ *
+ * This routine is for kicking off the state processing for all required
+ * sdma engines. Interrupts need to be working at this point.
+ *
+ */
+void sdma_start(struct hfi1_devdata *dd)
+{
+ unsigned i;
+ struct sdma_engine *sde;
+
+ /* kick off the engines state processing */
+ for (i = 0; i < dd->num_sdma; ++i) {
+ sde = &dd->per_sdma[i];
+ sdma_process_event(sde, sdma_event_e10_go_hw_start);
+ }
+}
+
+/**
+ * sdma_exit() - used when module is removed
+ * @dd: hfi1_devdata
+ */
+void sdma_exit(struct hfi1_devdata *dd)
+{
+ unsigned this_idx;
+ struct sdma_engine *sde;
+
+ for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma;
+ ++this_idx) {
+
+ sde = &dd->per_sdma[this_idx];
+ if (!list_empty(&sde->dmawait))
+ dd_dev_err(dd, "sde %u: dmawait list not empty!\n",
+ sde->this_idx);
+ sdma_process_event(sde, sdma_event_e00_go_hw_down);
+
+ del_timer_sync(&sde->err_progress_check_timer);
+
+ /*
+ * This waits for the state machine to exit so it is not
+ * necessary to kill the sdma_sw_clean_up_task to make sure
+ * it is not running.
+ */
+ sdma_finalput(&sde->state);
+ }
+ sdma_clean(dd, dd->num_sdma);
+}
+
+/*
+ * unmap the indicated descriptor
+ */
+static inline void sdma_unmap_desc(
+ struct hfi1_devdata *dd,
+ struct sdma_desc *descp)
+{
+ switch (sdma_mapping_type(descp)) {
+ case SDMA_MAP_SINGLE:
+ dma_unmap_single(
+ &dd->pcidev->dev,
+ sdma_mapping_addr(descp),
+ sdma_mapping_len(descp),
+ DMA_TO_DEVICE);
+ break;
+ case SDMA_MAP_PAGE:
+ dma_unmap_page(
+ &dd->pcidev->dev,
+ sdma_mapping_addr(descp),
+ sdma_mapping_len(descp),
+ DMA_TO_DEVICE);
+ break;
+ }
+}
+
+/*
+ * return the mode as indicated by the first
+ * descriptor in the tx.
+ */
+static inline u8 ahg_mode(struct sdma_txreq *tx)
+{
+ return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK)
+ >> SDMA_DESC1_HEADER_MODE_SHIFT;
+}
+
+/**
+ * sdma_txclean() - clean tx of mappings, descp *kmalloc's
+ * @dd: hfi1_devdata for unmapping
+ * @tx: tx request to clean
+ *
+ * This is used in the progress routine to clean the tx or
+ * by the ULP to toss an in-process tx build.
+ *
+ * The code can be called multiple times without issue.
+ *
+ */
+void sdma_txclean(
+ struct hfi1_devdata *dd,
+ struct sdma_txreq *tx)
+{
+ u16 i;
+
+ if (tx->num_desc) {
+ u8 skip = 0, mode = ahg_mode(tx);
+
+ /* unmap first */
+ sdma_unmap_desc(dd, &tx->descp[0]);
+ /* determine number of AHG descriptors to skip */
+ if (mode > SDMA_AHG_APPLY_UPDATE1)
+ skip = mode >> 1;
+ for (i = 1 + skip; i < tx->num_desc; i++)
+ sdma_unmap_desc(dd, &tx->descp[i]);
+ tx->num_desc = 0;
+ }
+ kfree(tx->coalesce_buf);
+ tx->coalesce_buf = NULL;
+ /* kmalloc'ed descp */
+ if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) {
+ tx->desc_limit = ARRAY_SIZE(tx->descs);
+ kfree(tx->descp);
+ }
+}
+
+static inline u16 sdma_gethead(struct sdma_engine *sde)
+{
+ struct hfi1_devdata *dd = sde->dd;
+ int use_dmahead;
+ u16 hwhead;
+
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
+ sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
+#endif
+
+retry:
+ use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) &&
+ (dd->flags & HFI1_HAS_SDMA_TIMEOUT);
+ hwhead = use_dmahead ?
+ (u16) le64_to_cpu(*sde->head_dma) :
+ (u16) read_sde_csr(sde, SD(HEAD));
+
+ if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) {
+ u16 cnt;
+ u16 swtail;
+ u16 swhead;
+ int sane;
+
+ swhead = sde->descq_head & sde->sdma_mask;
+ /* this code is really bad for cache line trading */
+ swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
+ cnt = sde->descq_cnt;
+
+ if (swhead < swtail)
+ /* not wrapped */
+ sane = (hwhead >= swhead) & (hwhead <= swtail);
+ else if (swhead > swtail)
+ /* wrapped around */
+ sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
+ (hwhead <= swtail);
+ else
+ /* empty */
+ sane = (hwhead == swhead);
+
+ if (unlikely(!sane)) {
+ dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
+ sde->this_idx,
+ use_dmahead ? "dma" : "kreg",
+ hwhead, swhead, swtail, cnt);
+ if (use_dmahead) {
+ /* try one more time, using csr */
+ use_dmahead = 0;
+ goto retry;
+ }
+ /* proceed as if no progress */
+ hwhead = swhead;
+ }
+ }
+ return hwhead;
+}
+
+/*
+ * This is called when there are send DMA descriptors that might be
+ * available.
+ *
+ * This is called with head_lock held.
+ */
+static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail)
+{
+ struct iowait *wait, *nw;
+ struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
+ unsigned i, n = 0, seq;
+ struct sdma_txreq *stx;
+ struct hfi1_ibdev *dev = &sde->dd->verbs_dev;
+
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
+ slashstrip(__FILE__), __LINE__, __func__);
+ dd_dev_err(sde->dd, "avail: %u\n", avail);
+#endif
+
+ do {
+ seq = read_seqbegin(&dev->iowait_lock);
+ if (!list_empty(&sde->dmawait)) {
+ /* at least one item */
+ write_seqlock(&dev->iowait_lock);
+ /* Harvest waiters wanting DMA descriptors */
+ list_for_each_entry_safe(
+ wait,
+ nw,
+ &sde->dmawait,
+ list) {
+ u16 num_desc = 0;
+
+ if (!wait->wakeup)
+ continue;
+ if (n == ARRAY_SIZE(waits))
+ break;
+ if (!list_empty(&wait->tx_head)) {
+ stx = list_first_entry(
+ &wait->tx_head,
+ struct sdma_txreq,
+ list);
+ num_desc = stx->num_desc;
+ }
+ if (num_desc > avail)
+ break;
+ avail -= num_desc;
+ list_del_init(&wait->list);
+ waits[n++] = wait;
+ }
+ write_sequnlock(&dev->iowait_lock);
+ break;
+ }
+ } while (read_seqretry(&dev->iowait_lock, seq));
+
+ for (i = 0; i < n; i++)
+ waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON);
+}
+
+/* head_lock must be held */
+static void sdma_make_progress(struct sdma_engine *sde, u64 status)
+{
+ struct sdma_txreq *txp = NULL;
+ int progress = 0;
+ u16 hwhead, swhead, swtail;
+ int idle_check_done = 0;
+
+ hwhead = sdma_gethead(sde);
+
+ /* The reason for some of the complexity of this code is that
+ * not all descriptors have corresponding txps. So, we have to
+ * be able to skip over descs until we wander into the range of
+ * the next txp on the list.
+ */
+
+retry:
+ txp = get_txhead(sde);
+ swhead = sde->descq_head & sde->sdma_mask;
+ trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
+ while (swhead != hwhead) {
+ /* advance head, wrap if needed */
+ swhead = ++sde->descq_head & sde->sdma_mask;
+
+ /* if now past this txp's descs, do the callback */
+ if (txp && txp->next_descq_idx == swhead) {
+ int drained = 0;
+ /* protect against complete modifying */
+ struct iowait *wait = txp->wait;
+
+ /* remove from list */
+ sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
+ if (wait)
+ drained = atomic_dec_and_test(&wait->sdma_busy);
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+ trace_hfi1_sdma_out_sn(sde, txp->sn);
+ if (WARN_ON_ONCE(sde->head_sn != txp->sn))
+ dd_dev_err(sde->dd, "expected %llu got %llu\n",
+ sde->head_sn, txp->sn);
+ sde->head_sn++;
+#endif
+ sdma_txclean(sde->dd, txp);
+ if (txp->complete)
+ (*txp->complete)(
+ txp,
+ SDMA_TXREQ_S_OK,
+ drained);
+ if (wait && drained)
+ iowait_drain_wakeup(wait);
+ /* see if there is another txp */
+ txp = get_txhead(sde);
+ }
+ trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
+ progress++;
+ }
+
+ /*
+ * The SDMA idle interrupt is not guaranteed to be ordered with respect
+ * to updates to the the dma_head location in host memory. The head
+ * value read might not be fully up to date. If there are pending
+ * descriptors and the SDMA idle interrupt fired then read from the
+ * CSR SDMA head instead to get the latest value from the hardware.
+ * The hardware SDMA head should be read at most once in this invocation
+ * of sdma_make_progress(..) which is ensured by idle_check_done flag
+ */
+ if ((status & sde->idle_mask) && !idle_check_done) {
+ swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
+ if (swtail != hwhead) {
+ hwhead = (u16)read_sde_csr(sde, SD(HEAD));
+ idle_check_done = 1;
+ goto retry;
+ }
+ }
+
+ sde->last_status = status;
+ if (progress)
+ sdma_desc_avail(sde, sdma_descq_freecnt(sde));
+}
+
+/*
+ * sdma_engine_interrupt() - interrupt handler for engine
+ * @sde: sdma engine
+ * @status: sdma interrupt reason
+ *
+ * Status is a mask of the 3 possible interrupts for this engine. It will
+ * contain bits _only_ for this SDMA engine. It will contain at least one
+ * bit, it may contain more.
+ */
+void sdma_engine_interrupt(struct sdma_engine *sde, u64 status)
+{
+ trace_hfi1_sdma_engine_interrupt(sde, status);
+ write_seqlock(&sde->head_lock);
+ sdma_set_desc_cnt(sde, sde->descq_cnt / 2);
+ sdma_make_progress(sde, status);
+ write_sequnlock(&sde->head_lock);
+}
+
+/**
+ * sdma_engine_error() - error handler for engine
+ * @sde: sdma engine
+ * @status: sdma interrupt reason
+ */
+void sdma_engine_error(struct sdma_engine *sde, u64 status)
+{
+ unsigned long flags;
+
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
+ sde->this_idx,
+ (unsigned long long)status,
+ sdma_state_names[sde->state.current_state]);
+#endif
+ spin_lock_irqsave(&sde->tail_lock, flags);
+ write_seqlock(&sde->head_lock);
+ if (status & ALL_SDMA_ENG_HALT_ERRS)
+ __sdma_process_event(sde, sdma_event_e60_hw_halted);
+ if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) {
+ dd_dev_err(sde->dd,
+ "SDMA (%u) engine error: 0x%llx state %s\n",
+ sde->this_idx,
+ (unsigned long long)status,
+ sdma_state_names[sde->state.current_state]);
+ dump_sdma_state(sde);
+ }
+ write_sequnlock(&sde->head_lock);
+ spin_unlock_irqrestore(&sde->tail_lock, flags);
+}
+
+static void sdma_sendctrl(struct sdma_engine *sde, unsigned op)
+{
+ u64 set_senddmactrl = 0;
+ u64 clr_senddmactrl = 0;
+ unsigned long flags;
+
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
+ sde->this_idx,
+ (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0,
+ (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0,
+ (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0,
+ (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0);
+#endif
+
+ if (op & SDMA_SENDCTRL_OP_ENABLE)
+ set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
+ else
+ clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
+
+ if (op & SDMA_SENDCTRL_OP_INTENABLE)
+ set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
+ else
+ clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
+
+ if (op & SDMA_SENDCTRL_OP_HALT)
+ set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
+ else
+ clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
+
+ spin_lock_irqsave(&sde->senddmactrl_lock, flags);
+
+ sde->p_senddmactrl |= set_senddmactrl;
+ sde->p_senddmactrl &= ~clr_senddmactrl;
+
+ if (op & SDMA_SENDCTRL_OP_CLEANUP)
+ write_sde_csr(sde, SD(CTRL),
+ sde->p_senddmactrl |
+ SD(CTRL_SDMA_CLEANUP_SMASK));
+ else
+ write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl);
+
+ spin_unlock_irqrestore(&sde->senddmactrl_lock, flags);
+
+#ifdef CONFIG_SDMA_VERBOSITY
+ sdma_dumpstate(sde);
+#endif
+}
+
+static void sdma_setlengen(struct sdma_engine *sde)
+{
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
+ sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
+#endif
+
+ /*
+ * Set SendDmaLenGen and clear-then-set the MSB of the generation
+ * count to enable generation checking and load the internal
+ * generation counter.
+ */
+ write_sde_csr(sde, SD(LEN_GEN),
+ (sde->descq_cnt/64) << SD(LEN_GEN_LENGTH_SHIFT)
+ );
+ write_sde_csr(sde, SD(LEN_GEN),
+ ((sde->descq_cnt/64) << SD(LEN_GEN_LENGTH_SHIFT))
+ | (4ULL << SD(LEN_GEN_GENERATION_SHIFT))
+ );
+}
+
+static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail)
+{
+ /* Commit writes to memory and advance the tail on the chip */
+ smp_wmb(); /* see get_txhead() */
+ writeq(tail, sde->tail_csr);
+}
+
+/*
+ * This is called when changing to state s10_hw_start_up_halt_wait as
+ * a result of send buffer errors or send DMA descriptor errors.
+ */
+static void sdma_hw_start_up(struct sdma_engine *sde)
+{
+ u64 reg;
+
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
+ sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
+#endif
+
+ sdma_setlengen(sde);
+ sdma_update_tail(sde, 0); /* Set SendDmaTail */
+ *sde->head_dma = 0;
+
+ reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) <<
+ SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT);
+ write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
+}
+
+#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
+(r &= ~SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
+
+#define SET_STATIC_RATE_CONTROL_SMASK(r) \
+(r |= SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
+/*
+ * set_sdma_integrity
+ *
+ * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'.
+ */
+static void set_sdma_integrity(struct sdma_engine *sde)
+{
+ struct hfi1_devdata *dd = sde->dd;
+ u64 reg;
+
+ if (unlikely(HFI1_CAP_IS_KSET(NO_INTEGRITY)))
+ return;
+
+ reg = hfi1_pkt_base_sdma_integrity(dd);
+
+ if (HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
+ CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
+ else
+ SET_STATIC_RATE_CONTROL_SMASK(reg);
+
+ write_sde_csr(sde, SD(CHECK_ENABLE), reg);
+}
+
+
+static void init_sdma_regs(
+ struct sdma_engine *sde,
+ u32 credits,
+ uint idle_cnt)
+{
+ u8 opval, opmask;
+#ifdef CONFIG_SDMA_VERBOSITY
+ struct hfi1_devdata *dd = sde->dd;
+
+ dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n",
+ sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
+#endif
+
+ write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys);
+ sdma_setlengen(sde);
+ sdma_update_tail(sde, 0); /* Set SendDmaTail */
+ write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt);
+ write_sde_csr(sde, SD(DESC_CNT), 0);
+ write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys);
+ write_sde_csr(sde, SD(MEMORY),
+ ((u64)credits <<
+ SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
+ ((u64)(credits * sde->this_idx) <<
+ SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
+ write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull);
+ set_sdma_integrity(sde);
+ opmask = OPCODE_CHECK_MASK_DISABLED;
+ opval = OPCODE_CHECK_VAL_DISABLED;
+ write_sde_csr(sde, SD(CHECK_OPCODE),
+ (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) |
+ (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT));
+}
+
+#ifdef CONFIG_SDMA_VERBOSITY
+
+#define sdma_dumpstate_helper0(reg) do { \
+ csr = read_csr(sde->dd, reg); \
+ dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \
+ } while (0)
+
+#define sdma_dumpstate_helper(reg) do { \
+ csr = read_sde_csr(sde, reg); \
+ dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
+ #reg, sde->this_idx, csr); \
+ } while (0)
+
+#define sdma_dumpstate_helper2(reg) do { \
+ csr = read_csr(sde->dd, reg + (8 * i)); \
+ dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \
+ #reg, i, csr); \
+ } while (0)
+
+void sdma_dumpstate(struct sdma_engine *sde)
+{
+ u64 csr;
+ unsigned i;
+
+ sdma_dumpstate_helper(SD(CTRL));
+ sdma_dumpstate_helper(SD(STATUS));
+ sdma_dumpstate_helper0(SD(ERR_STATUS));
+ sdma_dumpstate_helper0(SD(ERR_MASK));
+ sdma_dumpstate_helper(SD(ENG_ERR_STATUS));
+ sdma_dumpstate_helper(SD(ENG_ERR_MASK));
+
+ for (i = 0; i < CCE_NUM_INT_CSRS; ++i) {
+ sdma_dumpstate_helper2(CCE_INT_STATUS);
+ sdma_dumpstate_helper2(CCE_INT_MASK);
+ sdma_dumpstate_helper2(CCE_INT_BLOCKED);
+ }
+
+ sdma_dumpstate_helper(SD(TAIL));
+ sdma_dumpstate_helper(SD(HEAD));
+ sdma_dumpstate_helper(SD(PRIORITY_THLD));
+ sdma_dumpstate_helper(SD(IDLE_CNT));
+ sdma_dumpstate_helper(SD(RELOAD_CNT));
+ sdma_dumpstate_helper(SD(DESC_CNT));
+ sdma_dumpstate_helper(SD(DESC_FETCHED_CNT));
+ sdma_dumpstate_helper(SD(MEMORY));
+ sdma_dumpstate_helper0(SD(ENGINES));
+ sdma_dumpstate_helper0(SD(MEM_SIZE));
+ /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */
+ sdma_dumpstate_helper(SD(BASE_ADDR));
+ sdma_dumpstate_helper(SD(LEN_GEN));
+ sdma_dumpstate_helper(SD(HEAD_ADDR));
+ sdma_dumpstate_helper(SD(CHECK_ENABLE));
+ sdma_dumpstate_helper(SD(CHECK_VL));
+ sdma_dumpstate_helper(SD(CHECK_JOB_KEY));
+ sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY));
+ sdma_dumpstate_helper(SD(CHECK_SLID));
+ sdma_dumpstate_helper(SD(CHECK_OPCODE));
+}
+#endif
+
+static void dump_sdma_state(struct sdma_engine *sde)
+{
+ struct hw_sdma_desc *descq;
+ struct hw_sdma_desc *descqp;
+ u64 desc[2];
+ u64 addr;
+ u8 gen;
+ u16 len;
+ u16 head, tail, cnt;
+
+ head = sde->descq_head & sde->sdma_mask;
+ tail = sde->descq_tail & sde->sdma_mask;
+ cnt = sdma_descq_freecnt(sde);
+ descq = sde->descq;
+
+ dd_dev_err(sde->dd,
+ "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
+ sde->this_idx,
+ head,
+ tail,
+ cnt,
+ !list_empty(&sde->flushlist));
+
+ /* print info for each entry in the descriptor queue */
+ while (head != tail) {
+ char flags[6] = { 'x', 'x', 'x', 'x', 0 };
+
+ descqp = &sde->descq[head];
+ desc[0] = le64_to_cpu(descqp->qw[0]);
+ desc[1] = le64_to_cpu(descqp->qw[1]);
+ flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
+ flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
+ 'H' : '-';
+ flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
+ flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
+ addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
+ & SDMA_DESC0_PHY_ADDR_MASK;
+ gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
+ & SDMA_DESC1_GENERATION_MASK;
+ len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
+ & SDMA_DESC0_BYTE_COUNT_MASK;
+ dd_dev_err(sde->dd,
+ "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
+ head, flags, addr, gen, len);
+ dd_dev_err(sde->dd,
+ "\tdesc0:0x%016llx desc1 0x%016llx\n",
+ desc[0], desc[1]);
+ if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
+ dd_dev_err(sde->dd,
+ "\taidx: %u amode: %u alen: %u\n",
+ (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
+ >> SDMA_DESC1_HEADER_INDEX_MASK),
+ (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
+ >> SDMA_DESC1_HEADER_MODE_SHIFT),
+ (u8)((desc[1] & SDMA_DESC1_HEADER_DWS_SMASK)
+ >> SDMA_DESC1_HEADER_DWS_SHIFT));
+ head++;
+ head &= sde->sdma_mask;
+ }
+}
+
+#define SDE_FMT \
+ "SDE %u STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
+/**
+ * sdma_seqfile_dump_sde() - debugfs dump of sde
+ * @s: seq file
+ * @sde: send dma engine to dump
+ *
+ * This routine dumps the sde to the indicated seq file.
+ */
+void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
+{
+ u16 head, tail;
+ struct hw_sdma_desc *descqp;
+ u64 desc[2];
+ u64 addr;
+ u8 gen;
+ u16 len;
+
+ head = sde->descq_head & sde->sdma_mask;
+ tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
+ seq_printf(s, SDE_FMT, sde->this_idx,
+ sdma_state_name(sde->state.current_state),
+ (unsigned long long)read_sde_csr(sde, SD(CTRL)),
+ (unsigned long long)read_sde_csr(sde, SD(STATUS)),
+ (unsigned long long)read_sde_csr(sde,
+ SD(ENG_ERR_STATUS)),
+ (unsigned long long)read_sde_csr(sde, SD(TAIL)),
+ tail,
+ (unsigned long long)read_sde_csr(sde, SD(HEAD)),
+ head,
+ (unsigned long long)le64_to_cpu(*sde->head_dma),
+ (unsigned long long)read_sde_csr(sde, SD(MEMORY)),
+ (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
+ (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)),
+ (unsigned long long)sde->last_status,
+ (unsigned long long)sde->ahg_bits,
+ sde->tx_tail,
+ sde->tx_head,
+ sde->descq_tail,
+ sde->descq_head,
+ !list_empty(&sde->flushlist),
+ sde->descq_full_count,
+ (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID));
+
+ /* print info for each entry in the descriptor queue */
+ while (head != tail) {
+ char flags[6] = { 'x', 'x', 'x', 'x', 0 };
+
+ descqp = &sde->descq[head];
+ desc[0] = le64_to_cpu(descqp->qw[0]);
+ desc[1] = le64_to_cpu(descqp->qw[1]);
+ flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
+ flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
+ 'H' : '-';
+ flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
+ flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
+ addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
+ & SDMA_DESC0_PHY_ADDR_MASK;
+ gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
+ & SDMA_DESC1_GENERATION_MASK;
+ len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
+ & SDMA_DESC0_BYTE_COUNT_MASK;
+ seq_printf(s,
+ "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
+ head, flags, addr, gen, len);
+ if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
+ seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
+ (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
+ >> SDMA_DESC1_HEADER_INDEX_MASK),
+ (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
+ >> SDMA_DESC1_HEADER_MODE_SHIFT));
+ head = (head + 1) & sde->sdma_mask;
+ }
+}
+
+/*
+ * add the generation number into
+ * the qw1 and return
+ */
+static inline u64 add_gen(struct sdma_engine *sde, u64 qw1)
+{
+ u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3;
+
+ qw1 &= ~SDMA_DESC1_GENERATION_SMASK;
+ qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK)
+ << SDMA_DESC1_GENERATION_SHIFT;
+ return qw1;
+}
+
+/*
+ * This routine submits the indicated tx
+ *
+ * Space has already been guaranteed and
+ * tail side of ring is locked.
+ *
+ * The hardware tail update is done
+ * in the caller and that is facilitated
+ * by returning the new tail.
+ *
+ * There is special case logic for ahg
+ * to not add the generation number for
+ * up to 2 descriptors that follow the
+ * first descriptor.
+ *
+ */
+static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
+{
+ int i;
+ u16 tail;
+ struct sdma_desc *descp = tx->descp;
+ u8 skip = 0, mode = ahg_mode(tx);
+
+ tail = sde->descq_tail & sde->sdma_mask;
+ sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
+ sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1]));
+ trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1],
+ tail, &sde->descq[tail]);
+ tail = ++sde->descq_tail & sde->sdma_mask;
+ descp++;
+ if (mode > SDMA_AHG_APPLY_UPDATE1)
+ skip = mode >> 1;
+ for (i = 1; i < tx->num_desc; i++, descp++) {
+ u64 qw1;
+
+ sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
+ if (skip) {
+ /* edits don't have generation */
+ qw1 = descp->qw[1];
+ skip--;
+ } else {
+ /* replace generation with real one for non-edits */
+ qw1 = add_gen(sde, descp->qw[1]);
+ }
+ sde->descq[tail].qw[1] = cpu_to_le64(qw1);
+ trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1,
+ tail, &sde->descq[tail]);
+ tail = ++sde->descq_tail & sde->sdma_mask;
+ }
+ tx->next_descq_idx = tail;
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+ tx->sn = sde->tail_sn++;
+ trace_hfi1_sdma_in_sn(sde, tx->sn);
+ WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]);
+#endif
+ sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx;
+ sde->desc_avail -= tx->num_desc;
+ return tail;
+}
+
+/*
+ * Check for progress
+ */
+static int sdma_check_progress(
+ struct sdma_engine *sde,
+ struct iowait *wait,
+ struct sdma_txreq *tx)
+{
+ int ret;
+
+ sde->desc_avail = sdma_descq_freecnt(sde);
+ if (tx->num_desc <= sde->desc_avail)
+ return -EAGAIN;
+ /* pulse the head_lock */
+ if (wait && wait->sleep) {
+ unsigned seq;
+
+ seq = raw_seqcount_begin(
+ (const seqcount_t *)&sde->head_lock.seqcount);
+ ret = wait->sleep(sde, wait, tx, seq);
+ if (ret == -EAGAIN)
+ sde->desc_avail = sdma_descq_freecnt(sde);
+ } else
+ ret = -EBUSY;
+ return ret;
+}
+
+/**
+ * sdma_send_txreq() - submit a tx req to ring
+ * @sde: sdma engine to use
+ * @wait: wait structure to use when full (may be NULL)
+ * @tx: sdma_txreq to submit
+ *
+ * The call submits the tx into the ring. If a iowait structure is non-NULL
+ * the packet will be queued to the list in wait.
+ *
+ * Return:
+ * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in
+ * ring (wait == NULL)
+ * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
+ */
+int sdma_send_txreq(struct sdma_engine *sde,
+ struct iowait *wait,
+ struct sdma_txreq *tx)
+{
+ int ret = 0;
+ u16 tail;
+ unsigned long flags;
+
+ /* user should have supplied entire packet */
+ if (unlikely(tx->tlen))
+ return -EINVAL;
+ tx->wait = wait;
+ spin_lock_irqsave(&sde->tail_lock, flags);
+retry:
+ if (unlikely(!__sdma_running(sde)))
+ goto unlock_noconn;
+ if (unlikely(tx->num_desc > sde->desc_avail))
+ goto nodesc;
+ tail = submit_tx(sde, tx);
+ if (wait)
+ atomic_inc(&wait->sdma_busy);
+ sdma_update_tail(sde, tail);
+unlock:
+ spin_unlock_irqrestore(&sde->tail_lock, flags);
+ return ret;
+unlock_noconn:
+ if (wait)
+ atomic_inc(&wait->sdma_busy);
+ tx->next_descq_idx = 0;
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+ tx->sn = sde->tail_sn++;
+ trace_hfi1_sdma_in_sn(sde, tx->sn);
+#endif
+ spin_lock(&sde->flushlist_lock);
+ list_add_tail(&tx->list, &sde->flushlist);
+ spin_unlock(&sde->flushlist_lock);
+ if (wait) {
+ wait->tx_count++;
+ wait->count += tx->num_desc;
+ }
+ schedule_work(&sde->flush_worker);
+ ret = -ECOMM;
+ goto unlock;
+nodesc:
+ ret = sdma_check_progress(sde, wait, tx);
+ if (ret == -EAGAIN) {
+ ret = 0;
+ goto retry;
+ }
+ sde->descq_full_count++;
+ goto unlock;
+}
+
+/**
+ * sdma_send_txlist() - submit a list of tx req to ring
+ * @sde: sdma engine to use
+ * @wait: wait structure to use when full (may be NULL)
+ * @tx_list: list of sdma_txreqs to submit
+ *
+ * The call submits the list into the ring.
+ *
+ * If the iowait structure is non-NULL and not equal to the iowait list
+ * the unprocessed part of the list will be appended to the list in wait.
+ *
+ * In all cases, the tx_list will be updated so the head of the tx_list is
+ * the list of descriptors that have yet to be transmitted.
+ *
+ * The intent of this call is to provide a more efficient
+ * way of submitting multiple packets to SDMA while holding the tail
+ * side locking.
+ *
+ * Return:
+ * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring
+ * (wait == NULL)
+ * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
+ */
+int sdma_send_txlist(struct sdma_engine *sde,
+ struct iowait *wait,
+ struct list_head *tx_list)
+{
+ struct sdma_txreq *tx, *tx_next;
+ int ret = 0;
+ unsigned long flags;
+ u16 tail = INVALID_TAIL;
+ int count = 0;
+
+ spin_lock_irqsave(&sde->tail_lock, flags);
+retry:
+ list_for_each_entry_safe(tx, tx_next, tx_list, list) {
+ tx->wait = wait;
+ if (unlikely(!__sdma_running(sde)))
+ goto unlock_noconn;
+ if (unlikely(tx->num_desc > sde->desc_avail))
+ goto nodesc;
+ if (unlikely(tx->tlen)) {
+ ret = -EINVAL;
+ goto update_tail;
+ }
+ list_del_init(&tx->list);
+ tail = submit_tx(sde, tx);
+ count++;
+ if (tail != INVALID_TAIL &&
+ (count & SDMA_TAIL_UPDATE_THRESH) == 0) {
+ sdma_update_tail(sde, tail);
+ tail = INVALID_TAIL;
+ }
+ }
+update_tail:
+ if (wait)
+ atomic_add(count, &wait->sdma_busy);
+ if (tail != INVALID_TAIL)
+ sdma_update_tail(sde, tail);
+ spin_unlock_irqrestore(&sde->tail_lock, flags);
+ return ret;
+unlock_noconn:
+ spin_lock(&sde->flushlist_lock);
+ list_for_each_entry_safe(tx, tx_next, tx_list, list) {
+ tx->wait = wait;
+ list_del_init(&tx->list);
+ if (wait)
+ atomic_inc(&wait->sdma_busy);
+ tx->next_descq_idx = 0;
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+ tx->sn = sde->tail_sn++;
+ trace_hfi1_sdma_in_sn(sde, tx->sn);
+#endif
+ list_add_tail(&tx->list, &sde->flushlist);
+ if (wait) {
+ wait->tx_count++;
+ wait->count += tx->num_desc;
+ }
+ }
+ spin_unlock(&sde->flushlist_lock);
+ schedule_work(&sde->flush_worker);
+ ret = -ECOMM;
+ goto update_tail;
+nodesc:
+ ret = sdma_check_progress(sde, wait, tx);
+ if (ret == -EAGAIN) {
+ ret = 0;
+ goto retry;
+ }
+ sde->descq_full_count++;
+ goto update_tail;
+}
+
+static void sdma_process_event(struct sdma_engine *sde,
+ enum sdma_events event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sde->tail_lock, flags);
+ write_seqlock(&sde->head_lock);
+
+ __sdma_process_event(sde, event);
+
+ if (sde->state.current_state == sdma_state_s99_running)
+ sdma_desc_avail(sde, sdma_descq_freecnt(sde));
+
+ write_sequnlock(&sde->head_lock);
+ spin_unlock_irqrestore(&sde->tail_lock, flags);
+}
+
+static void __sdma_process_event(struct sdma_engine *sde,
+ enum sdma_events event)
+{
+ struct sdma_state *ss = &sde->state;
+ int need_progress = 0;
+
+ /* CONFIG SDMA temporary */
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx,
+ sdma_state_names[ss->current_state],
+ sdma_event_names[event]);
+#endif
+
+ switch (ss->current_state) {
+ case sdma_state_s00_hw_down:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ break;
+ case sdma_event_e30_go_running:
+ /*
+ * If down, but running requested (usually result
+ * of link up, then we need to start up.
+ * This can happen when hw down is requested while
+ * bringing the link up with traffic active on
+ * 7220, e.g. */
+ ss->go_s99_running = 1;
+ /* fall through and start dma engine */
+ case sdma_event_e10_go_hw_start:
+ /* This reference means the state machine is started */
+ sdma_get(&sde->state);
+ sdma_set_state(sde,
+ sdma_state_s10_hw_start_up_halt_wait);
+ break;
+ case sdma_event_e15_hw_halt_done:
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ break;
+ case sdma_event_e40_sw_cleaned:
+ sdma_sw_tear_down(sde);
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ break;
+ case sdma_event_e70_go_idle:
+ break;
+ case sdma_event_e80_hw_freeze:
+ break;
+ case sdma_event_e81_hw_frozen:
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ break;
+ case sdma_event_e85_link_down:
+ break;
+ case sdma_event_e90_sw_halted:
+ break;
+ }
+ break;
+
+ case sdma_state_s10_hw_start_up_halt_wait:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+ sdma_sw_tear_down(sde);
+ break;
+ case sdma_event_e10_go_hw_start:
+ break;
+ case sdma_event_e15_hw_halt_done:
+ sdma_set_state(sde,
+ sdma_state_s15_hw_start_up_clean_wait);
+ sdma_start_hw_clean_up(sde);
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ break;
+ case sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case sdma_event_e40_sw_cleaned:
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ sdma_start_err_halt_wait(sde);
+ break;
+ case sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e80_hw_freeze:
+ break;
+ case sdma_event_e81_hw_frozen:
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ break;
+ case sdma_event_e85_link_down:
+ break;
+ case sdma_event_e90_sw_halted:
+ break;
+ }
+ break;
+
+ case sdma_state_s15_hw_start_up_clean_wait:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+ sdma_sw_tear_down(sde);
+ break;
+ case sdma_event_e10_go_hw_start:
+ break;
+ case sdma_event_e15_hw_halt_done:
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ sdma_hw_start_up(sde);
+ sdma_set_state(sde, ss->go_s99_running ?
+ sdma_state_s99_running :
+ sdma_state_s20_idle);
+ break;
+ case sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case sdma_event_e40_sw_cleaned:
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ break;
+ case sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e80_hw_freeze:
+ break;
+ case sdma_event_e81_hw_frozen:
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ break;
+ case sdma_event_e85_link_down:
+ break;
+ case sdma_event_e90_sw_halted:
+ break;
+ }
+ break;
+
+ case sdma_state_s20_idle:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+ sdma_sw_tear_down(sde);
+ break;
+ case sdma_event_e10_go_hw_start:
+ break;
+ case sdma_event_e15_hw_halt_done:
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ break;
+ case sdma_event_e30_go_running:
+ sdma_set_state(sde, sdma_state_s99_running);
+ ss->go_s99_running = 1;
+ break;
+ case sdma_event_e40_sw_cleaned:
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
+ sdma_start_err_halt_wait(sde);
+ break;
+ case sdma_event_e70_go_idle:
+ break;
+ case sdma_event_e85_link_down:
+ /* fall through */
+ case sdma_event_e80_hw_freeze:
+ sdma_set_state(sde, sdma_state_s80_hw_freeze);
+ atomic_dec(&sde->dd->sdma_unfreeze_count);
+ wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
+ break;
+ case sdma_event_e81_hw_frozen:
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ break;
+ case sdma_event_e90_sw_halted:
+ break;
+ }
+ break;
+
+ case sdma_state_s30_sw_clean_up_wait:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+ break;
+ case sdma_event_e10_go_hw_start:
+ break;
+ case sdma_event_e15_hw_halt_done:
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ break;
+ case sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case sdma_event_e40_sw_cleaned:
+ sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait);
+ sdma_start_hw_clean_up(sde);
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ break;
+ case sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e80_hw_freeze:
+ break;
+ case sdma_event_e81_hw_frozen:
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ break;
+ case sdma_event_e85_link_down:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e90_sw_halted:
+ break;
+ }
+ break;
+
+ case sdma_state_s40_hw_clean_up_wait:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+ sdma_start_sw_clean_up(sde);
+ break;
+ case sdma_event_e10_go_hw_start:
+ break;
+ case sdma_event_e15_hw_halt_done:
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ sdma_hw_start_up(sde);
+ sdma_set_state(sde, ss->go_s99_running ?
+ sdma_state_s99_running :
+ sdma_state_s20_idle);
+ break;
+ case sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case sdma_event_e40_sw_cleaned:
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ break;
+ case sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e80_hw_freeze:
+ break;
+ case sdma_event_e81_hw_frozen:
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ break;
+ case sdma_event_e85_link_down:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e90_sw_halted:
+ break;
+ }
+ break;
+
+ case sdma_state_s50_hw_halt_wait:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+ sdma_start_sw_clean_up(sde);
+ break;
+ case sdma_event_e10_go_hw_start:
+ break;
+ case sdma_event_e15_hw_halt_done:
+ sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
+ sdma_start_sw_clean_up(sde);
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ break;
+ case sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case sdma_event_e40_sw_cleaned:
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ sdma_start_err_halt_wait(sde);
+ break;
+ case sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e80_hw_freeze:
+ break;
+ case sdma_event_e81_hw_frozen:
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ break;
+ case sdma_event_e85_link_down:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e90_sw_halted:
+ break;
+ }
+ break;
+
+ case sdma_state_s60_idle_halt_wait:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+ sdma_start_sw_clean_up(sde);
+ break;
+ case sdma_event_e10_go_hw_start:
+ break;
+ case sdma_event_e15_hw_halt_done:
+ sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
+ sdma_start_sw_clean_up(sde);
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ break;
+ case sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case sdma_event_e40_sw_cleaned:
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ sdma_start_err_halt_wait(sde);
+ break;
+ case sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e80_hw_freeze:
+ break;
+ case sdma_event_e81_hw_frozen:
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ break;
+ case sdma_event_e85_link_down:
+ break;
+ case sdma_event_e90_sw_halted:
+ break;
+ }
+ break;
+
+ case sdma_state_s80_hw_freeze:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+ sdma_start_sw_clean_up(sde);
+ break;
+ case sdma_event_e10_go_hw_start:
+ break;
+ case sdma_event_e15_hw_halt_done:
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ break;
+ case sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case sdma_event_e40_sw_cleaned:
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ break;
+ case sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e80_hw_freeze:
+ break;
+ case sdma_event_e81_hw_frozen:
+ sdma_set_state(sde, sdma_state_s82_freeze_sw_clean);
+ sdma_start_sw_clean_up(sde);
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ break;
+ case sdma_event_e85_link_down:
+ break;
+ case sdma_event_e90_sw_halted:
+ break;
+ }
+ break;
+
+ case sdma_state_s82_freeze_sw_clean:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+ sdma_start_sw_clean_up(sde);
+ break;
+ case sdma_event_e10_go_hw_start:
+ break;
+ case sdma_event_e15_hw_halt_done:
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ break;
+ case sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case sdma_event_e40_sw_cleaned:
+ /* notify caller this engine is done cleaning */
+ atomic_dec(&sde->dd->sdma_unfreeze_count);
+ wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ break;
+ case sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case sdma_event_e80_hw_freeze:
+ break;
+ case sdma_event_e81_hw_frozen:
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ sdma_hw_start_up(sde);
+ sdma_set_state(sde, ss->go_s99_running ?
+ sdma_state_s99_running :
+ sdma_state_s20_idle);
+ break;
+ case sdma_event_e85_link_down:
+ break;
+ case sdma_event_e90_sw_halted:
+ break;
+ }
+ break;
+
+ case sdma_state_s99_running:
+ switch (event) {
+ case sdma_event_e00_go_hw_down:
+ sdma_set_state(sde, sdma_state_s00_hw_down);
+ sdma_start_sw_clean_up(sde);
+ break;
+ case sdma_event_e10_go_hw_start:
+ break;
+ case sdma_event_e15_hw_halt_done:
+ break;
+ case sdma_event_e25_hw_clean_up_done:
+ break;
+ case sdma_event_e30_go_running:
+ break;
+ case sdma_event_e40_sw_cleaned:
+ break;
+ case sdma_event_e50_hw_cleaned:
+ break;
+ case sdma_event_e60_hw_halted:
+ need_progress = 1;
+ sdma_err_progress_check_schedule(sde);
+ case sdma_event_e90_sw_halted:
+ /*
+ * SW initiated halt does not perform engines
+ * progress check
+ */
+ sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
+ sdma_start_err_halt_wait(sde);
+ break;
+ case sdma_event_e70_go_idle:
+ sdma_set_state(sde, sdma_state_s60_idle_halt_wait);
+ break;
+ case sdma_event_e85_link_down:
+ ss->go_s99_running = 0;
+ /* fall through */
+ case sdma_event_e80_hw_freeze:
+ sdma_set_state(sde, sdma_state_s80_hw_freeze);
+ atomic_dec(&sde->dd->sdma_unfreeze_count);
+ wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
+ break;
+ case sdma_event_e81_hw_frozen:
+ break;
+ case sdma_event_e82_hw_unfreeze:
+ break;
+ }
+ break;
+ }
+
+ ss->last_event = event;
+ if (need_progress)
+ sdma_make_progress(sde, 0);
+}
+
+/*
+ * _extend_sdma_tx_descs() - helper to extend txreq
+ *
+ * This is called once the initial nominal allocation
+ * of descriptors in the sdma_txreq is exhausted.
+ *
+ * The code will bump the allocation up to the max
+ * of MAX_DESC (64) descriptors. There doesn't seem
+ * much point in an interim step.
+ *
+ */
+int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+{
+ int i;
+
+ tx->descp = kmalloc_array(
+ MAX_DESC,
+ sizeof(struct sdma_desc),
+ GFP_ATOMIC);
+ if (!tx->descp)
+ return -ENOMEM;
+ tx->desc_limit = MAX_DESC;
+ /* copy ones already built */
+ for (i = 0; i < tx->num_desc; i++)
+ tx->descp[i] = tx->descs[i];
+ return 0;
+}
+
+/* Update sdes when the lmc changes */
+void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid)
+{
+ struct sdma_engine *sde;
+ int i;
+ u64 sreg;
+
+ sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) <<
+ SD(CHECK_SLID_MASK_SHIFT)) |
+ (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) <<
+ SD(CHECK_SLID_VALUE_SHIFT));
+
+ for (i = 0; i < dd->num_sdma; i++) {
+ hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x",
+ i, (u32)sreg);
+ sde = &dd->per_sdma[i];
+ write_sde_csr(sde, SD(CHECK_SLID), sreg);
+ }
+}
+
+/* tx not dword sized - pad */
+int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+{
+ int rval = 0;
+
+ if ((unlikely(tx->num_desc == tx->desc_limit))) {
+ rval = _extend_sdma_tx_descs(dd, tx);
+ if (rval)
+ return rval;
+ }
+ /* finish the one just added */
+ tx->num_desc++;
+ make_tx_sdma_desc(
+ tx,
+ SDMA_MAP_NONE,
+ dd->sdma_pad_phys,
+ sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
+ _sdma_close_tx(dd, tx);
+ return rval;
+}
+
+/*
+ * Add ahg to the sdma_txreq
+ *
+ * The logic will consume up to 3
+ * descriptors at the beginning of
+ * sdma_txreq.
+ */
+void _sdma_txreq_ahgadd(
+ struct sdma_txreq *tx,
+ u8 num_ahg,
+ u8 ahg_entry,
+ u32 *ahg,
+ u8 ahg_hlen)
+{
+ u32 i, shift = 0, desc = 0;
+ u8 mode;
+
+ WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4);
+ /* compute mode */
+ if (num_ahg == 1)
+ mode = SDMA_AHG_APPLY_UPDATE1;
+ else if (num_ahg <= 5)
+ mode = SDMA_AHG_APPLY_UPDATE2;
+ else
+ mode = SDMA_AHG_APPLY_UPDATE3;
+ tx->num_desc++;
+ /* initialize to consumed descriptors to zero */
+ switch (mode) {
+ case SDMA_AHG_APPLY_UPDATE3:
+ tx->num_desc++;
+ tx->descs[2].qw[0] = 0;
+ tx->descs[2].qw[1] = 0;
+ /* FALLTHROUGH */
+ case SDMA_AHG_APPLY_UPDATE2:
+ tx->num_desc++;
+ tx->descs[1].qw[0] = 0;
+ tx->descs[1].qw[1] = 0;
+ break;
+ }
+ ahg_hlen >>= 2;
+ tx->descs[0].qw[1] |=
+ (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
+ << SDMA_DESC1_HEADER_INDEX_SHIFT) |
+ (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK)
+ << SDMA_DESC1_HEADER_DWS_SHIFT) |
+ (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK)
+ << SDMA_DESC1_HEADER_MODE_SHIFT) |
+ (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK)
+ << SDMA_DESC1_HEADER_UPDATE1_SHIFT);
+ for (i = 0; i < (num_ahg - 1); i++) {
+ if (!shift && !(i & 2))
+ desc++;
+ tx->descs[desc].qw[!!(i & 2)] |=
+ (((u64)ahg[i + 1])
+ << shift);
+ shift = (shift + 32) & 63;
+ }
+}
+
+/**
+ * sdma_ahg_alloc - allocate an AHG entry
+ * @sde: engine to allocate from
+ *
+ * Return:
+ * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled,
+ * -ENOSPC if an entry is not available
+ */
+int sdma_ahg_alloc(struct sdma_engine *sde)
+{
+ int nr;
+ int oldbit;
+
+ if (!sde) {
+ trace_hfi1_ahg_allocate(sde, -EINVAL);
+ return -EINVAL;
+ }
+ while (1) {
+ nr = ffz(ACCESS_ONCE(sde->ahg_bits));
+ if (nr > 31) {
+ trace_hfi1_ahg_allocate(sde, -ENOSPC);
+ return -ENOSPC;
+ }
+ oldbit = test_and_set_bit(nr, &sde->ahg_bits);
+ if (!oldbit)
+ break;
+ cpu_relax();
+ }
+ trace_hfi1_ahg_allocate(sde, nr);
+ return nr;
+}
+
+/**
+ * sdma_ahg_free - free an AHG entry
+ * @sde: engine to return AHG entry
+ * @ahg_index: index to free
+ *
+ * This routine frees the indicate AHG entry.
+ */
+void sdma_ahg_free(struct sdma_engine *sde, int ahg_index)
+{
+ if (!sde)
+ return;
+ trace_hfi1_ahg_deallocate(sde, ahg_index);
+ if (ahg_index < 0 || ahg_index > 31)
+ return;
+ clear_bit(ahg_index, &sde->ahg_bits);
+}
+
+/*
+ * SPC freeze handling for SDMA engines. Called when the driver knows
+ * the SPC is going into a freeze but before the freeze is fully
+ * settled. Generally an error interrupt.
+ *
+ * This event will pull the engine out of running so no more entries can be
+ * added to the engine's queue.
+ */
+void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down)
+{
+ int i;
+ enum sdma_events event = link_down ? sdma_event_e85_link_down :
+ sdma_event_e80_hw_freeze;
+
+ /* set up the wait but do not wait here */
+ atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
+
+ /* tell all engines to stop running and wait */
+ for (i = 0; i < dd->num_sdma; i++)
+ sdma_process_event(&dd->per_sdma[i], event);
+
+ /* sdma_freeze() will wait for all engines to have stopped */
+}
+
+/*
+ * SPC freeze handling for SDMA engines. Called when the driver knows
+ * the SPC is fully frozen.
+ */
+void sdma_freeze(struct hfi1_devdata *dd)
+{
+ int i;
+ int ret;
+
+ /*
+ * Make sure all engines have moved out of the running state before
+ * continuing.
+ */
+ ret = wait_event_interruptible(dd->sdma_unfreeze_wq,
+ atomic_read(&dd->sdma_unfreeze_count) <= 0);
+ /* interrupted or count is negative, then unloading - just exit */
+ if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0)
+ return;
+
+ /* set up the count for the next wait */
+ atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
+
+ /* tell all engines that the SPC is frozen, they can start cleaning */
+ for (i = 0; i < dd->num_sdma; i++)
+ sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen);
+
+ /*
+ * Wait for everyone to finish software clean before exiting. The
+ * software clean will read engine CSRs, so must be completed before
+ * the next step, which will clear the engine CSRs.
+ */
+ (void) wait_event_interruptible(dd->sdma_unfreeze_wq,
+ atomic_read(&dd->sdma_unfreeze_count) <= 0);
+ /* no need to check results - done no matter what */
+}
+
+/*
+ * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen.
+ *
+ * The SPC freeze acts like a SDMA halt and a hardware clean combined. All
+ * that is left is a software clean. We could do it after the SPC is fully
+ * frozen, but then we'd have to add another state to wait for the unfreeze.
+ * Instead, just defer the software clean until the unfreeze step.
+ */
+void sdma_unfreeze(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /* tell all engines start freeze clean up */
+ for (i = 0; i < dd->num_sdma; i++)
+ sdma_process_event(&dd->per_sdma[i],
+ sdma_event_e82_hw_unfreeze);
+}
+
+/**
+ * _sdma_engine_progress_schedule() - schedule progress on engine
+ * @sde: sdma_engine to schedule progress
+ *
+ */
+void _sdma_engine_progress_schedule(
+ struct sdma_engine *sde)
+{
+ trace_hfi1_sdma_engine_progress(sde, sde->progress_mask);
+ /* assume we have selected a good cpu */
+ write_csr(sde->dd,
+ CCE_INT_FORCE + (8*(IS_SDMA_START/64)), sde->progress_mask);
+}
diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h
new file mode 100644
index 000000000000..1e613fcd8f4c
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/sdma.h
@@ -0,0 +1,1123 @@
+#ifndef _HFI1_SDMA_H
+#define _HFI1_SDMA_H
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <asm/byteorder.h>
+#include <linux/workqueue.h>
+#include <linux/rculist.h>
+
+#include "hfi.h"
+#include "verbs.h"
+
+/* increased for AHG */
+#define NUM_DESC 6
+/* Hardware limit */
+#define MAX_DESC 64
+/* Hardware limit for SDMA packet size */
+#define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1)
+
+
+#define SDMA_TXREQ_S_OK 0
+#define SDMA_TXREQ_S_SENDERROR 1
+#define SDMA_TXREQ_S_ABORTED 2
+#define SDMA_TXREQ_S_SHUTDOWN 3
+
+/* flags bits */
+#define SDMA_TXREQ_F_URGENT 0x0001
+#define SDMA_TXREQ_F_AHG_COPY 0x0002
+#define SDMA_TXREQ_F_USE_AHG 0x0004
+
+#define SDMA_MAP_NONE 0
+#define SDMA_MAP_SINGLE 1
+#define SDMA_MAP_PAGE 2
+
+#define SDMA_AHG_VALUE_MASK 0xffff
+#define SDMA_AHG_VALUE_SHIFT 0
+#define SDMA_AHG_INDEX_MASK 0xf
+#define SDMA_AHG_INDEX_SHIFT 16
+#define SDMA_AHG_FIELD_LEN_MASK 0xf
+#define SDMA_AHG_FIELD_LEN_SHIFT 20
+#define SDMA_AHG_FIELD_START_MASK 0x1f
+#define SDMA_AHG_FIELD_START_SHIFT 24
+#define SDMA_AHG_UPDATE_ENABLE_MASK 0x1
+#define SDMA_AHG_UPDATE_ENABLE_SHIFT 31
+
+/* AHG modes */
+
+/*
+ * Be aware the ordering and values
+ * for SDMA_AHG_APPLY_UPDATE[123]
+ * are assumed in generating a skip
+ * count in submit_tx() in sdma.c
+ */
+#define SDMA_AHG_NO_AHG 0
+#define SDMA_AHG_COPY 1
+#define SDMA_AHG_APPLY_UPDATE1 2
+#define SDMA_AHG_APPLY_UPDATE2 3
+#define SDMA_AHG_APPLY_UPDATE3 4
+
+/*
+ * Bits defined in the send DMA descriptor.
+ */
+#define SDMA_DESC0_FIRST_DESC_FLAG (1ULL<<63)
+#define SDMA_DESC0_LAST_DESC_FLAG (1ULL<<62)
+#define SDMA_DESC0_BYTE_COUNT_SHIFT 48
+#define SDMA_DESC0_BYTE_COUNT_WIDTH 14
+#define SDMA_DESC0_BYTE_COUNT_MASK \
+ ((1ULL<<SDMA_DESC0_BYTE_COUNT_WIDTH)-1ULL)
+#define SDMA_DESC0_BYTE_COUNT_SMASK \
+ (SDMA_DESC0_BYTE_COUNT_MASK<<SDMA_DESC0_BYTE_COUNT_SHIFT)
+#define SDMA_DESC0_PHY_ADDR_SHIFT 0
+#define SDMA_DESC0_PHY_ADDR_WIDTH 48
+#define SDMA_DESC0_PHY_ADDR_MASK \
+ ((1ULL<<SDMA_DESC0_PHY_ADDR_WIDTH)-1ULL)
+#define SDMA_DESC0_PHY_ADDR_SMASK \
+ (SDMA_DESC0_PHY_ADDR_MASK<<SDMA_DESC0_PHY_ADDR_SHIFT)
+
+#define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32
+#define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32
+#define SDMA_DESC1_HEADER_UPDATE1_MASK \
+ ((1ULL<<SDMA_DESC1_HEADER_UPDATE1_WIDTH)-1ULL)
+#define SDMA_DESC1_HEADER_UPDATE1_SMASK \
+ (SDMA_DESC1_HEADER_UPDATE1_MASK<<SDMA_DESC1_HEADER_UPDATE1_SHIFT)
+#define SDMA_DESC1_HEADER_MODE_SHIFT 13
+#define SDMA_DESC1_HEADER_MODE_WIDTH 3
+#define SDMA_DESC1_HEADER_MODE_MASK \
+ ((1ULL<<SDMA_DESC1_HEADER_MODE_WIDTH)-1ULL)
+#define SDMA_DESC1_HEADER_MODE_SMASK \
+ (SDMA_DESC1_HEADER_MODE_MASK<<SDMA_DESC1_HEADER_MODE_SHIFT)
+#define SDMA_DESC1_HEADER_INDEX_SHIFT 8
+#define SDMA_DESC1_HEADER_INDEX_WIDTH 5
+#define SDMA_DESC1_HEADER_INDEX_MASK \
+ ((1ULL<<SDMA_DESC1_HEADER_INDEX_WIDTH)-1ULL)
+#define SDMA_DESC1_HEADER_INDEX_SMASK \
+ (SDMA_DESC1_HEADER_INDEX_MASK<<SDMA_DESC1_HEADER_INDEX_SHIFT)
+#define SDMA_DESC1_HEADER_DWS_SHIFT 4
+#define SDMA_DESC1_HEADER_DWS_WIDTH 4
+#define SDMA_DESC1_HEADER_DWS_MASK \
+ ((1ULL<<SDMA_DESC1_HEADER_DWS_WIDTH)-1ULL)
+#define SDMA_DESC1_HEADER_DWS_SMASK \
+ (SDMA_DESC1_HEADER_DWS_MASK<<SDMA_DESC1_HEADER_DWS_SHIFT)
+#define SDMA_DESC1_GENERATION_SHIFT 2
+#define SDMA_DESC1_GENERATION_WIDTH 2
+#define SDMA_DESC1_GENERATION_MASK \
+ ((1ULL<<SDMA_DESC1_GENERATION_WIDTH)-1ULL)
+#define SDMA_DESC1_GENERATION_SMASK \
+ (SDMA_DESC1_GENERATION_MASK<<SDMA_DESC1_GENERATION_SHIFT)
+#define SDMA_DESC1_INT_REQ_FLAG (1ULL<<1)
+#define SDMA_DESC1_HEAD_TO_HOST_FLAG (1ULL<<0)
+
+enum sdma_states {
+ sdma_state_s00_hw_down,
+ sdma_state_s10_hw_start_up_halt_wait,
+ sdma_state_s15_hw_start_up_clean_wait,
+ sdma_state_s20_idle,
+ sdma_state_s30_sw_clean_up_wait,
+ sdma_state_s40_hw_clean_up_wait,
+ sdma_state_s50_hw_halt_wait,
+ sdma_state_s60_idle_halt_wait,
+ sdma_state_s80_hw_freeze,
+ sdma_state_s82_freeze_sw_clean,
+ sdma_state_s99_running,
+};
+
+enum sdma_events {
+ sdma_event_e00_go_hw_down,
+ sdma_event_e10_go_hw_start,
+ sdma_event_e15_hw_halt_done,
+ sdma_event_e25_hw_clean_up_done,
+ sdma_event_e30_go_running,
+ sdma_event_e40_sw_cleaned,
+ sdma_event_e50_hw_cleaned,
+ sdma_event_e60_hw_halted,
+ sdma_event_e70_go_idle,
+ sdma_event_e80_hw_freeze,
+ sdma_event_e81_hw_frozen,
+ sdma_event_e82_hw_unfreeze,
+ sdma_event_e85_link_down,
+ sdma_event_e90_sw_halted,
+};
+
+struct sdma_set_state_action {
+ unsigned op_enable:1;
+ unsigned op_intenable:1;
+ unsigned op_halt:1;
+ unsigned op_cleanup:1;
+ unsigned go_s99_running_tofalse:1;
+ unsigned go_s99_running_totrue:1;
+};
+
+struct sdma_state {
+ struct kref kref;
+ struct completion comp;
+ enum sdma_states current_state;
+ unsigned current_op;
+ unsigned go_s99_running;
+ /* debugging/development */
+ enum sdma_states previous_state;
+ unsigned previous_op;
+ enum sdma_events last_event;
+};
+
+/**
+ * DOC: sdma exported routines
+ *
+ * These sdma routines fit into three categories:
+ * - The SDMA API for building and submitting packets
+ * to the ring
+ *
+ * - Initialization and tear down routines to buildup
+ * and tear down SDMA
+ *
+ * - ISR entrances to handle interrupts, state changes
+ * and errors
+ */
+
+/**
+ * DOC: sdma PSM/verbs API
+ *
+ * The sdma API is designed to be used by both PSM
+ * and verbs to supply packets to the SDMA ring.
+ *
+ * The usage of the API is as follows:
+ *
+ * Embed a struct iowait in the QP or
+ * PQ. The iowait should be initialized with a
+ * call to iowait_init().
+ *
+ * The user of the API should create an allocation method
+ * for their version of the txreq. slabs, pre-allocated lists,
+ * and dma pools can be used. Once the user's overload of
+ * the sdma_txreq has been allocated, the sdma_txreq member
+ * must be initialized with sdma_txinit() or sdma_txinit_ahg().
+ *
+ * The txreq must be declared with the sdma_txreq first.
+ *
+ * The tx request, once initialized, is manipulated with calls to
+ * sdma_txadd_daddr(), sdma_txadd_page(), or sdma_txadd_kvaddr()
+ * for each disjoint memory location. It is the user's responsibility
+ * to understand the packet boundaries and page boundaries to do the
+ * appropriate number of sdma_txadd_* calls.. The user
+ * must be prepared to deal with failures from these routines due to
+ * either memory allocation or dma_mapping failures.
+ *
+ * The mapping specifics for each memory location are recorded
+ * in the tx. Memory locations added with sdma_txadd_page()
+ * and sdma_txadd_kvaddr() are automatically mapped when added
+ * to the tx and nmapped as part of the progress processing in the
+ * SDMA interrupt handling.
+ *
+ * sdma_txadd_daddr() is used to add an dma_addr_t memory to the
+ * tx. An example of a use case would be a pre-allocated
+ * set of headers allocated via dma_pool_alloc() or
+ * dma_alloc_coherent(). For these memory locations, it
+ * is the responsibility of the user to handle that unmapping.
+ * (This would usually be at an unload or job termination.)
+ *
+ * The routine sdma_send_txreq() is used to submit
+ * a tx to the ring after the appropriate number of
+ * sdma_txadd_* have been done.
+ *
+ * If it is desired to send a burst of sdma_txreqs, sdma_send_txlist()
+ * can be used to submit a list of packets.
+ *
+ * The user is free to use the link overhead in the struct sdma_txreq as
+ * long as the tx isn't in flight.
+ *
+ * The extreme degenerate case of the number of descriptors
+ * exceeding the ring size is automatically handled as
+ * memory locations are added. An overflow of the descriptor
+ * array that is part of the sdma_txreq is also automatically
+ * handled.
+ *
+ */
+
+/**
+ * DOC: Infrastructure calls
+ *
+ * sdma_init() is used to initialize data structures and
+ * CSRs for the desired number of SDMA engines.
+ *
+ * sdma_start() is used to kick the SDMA engines initialized
+ * with sdma_init(). Interrupts must be enabled at this
+ * point since aspects of the state machine are interrupt
+ * driven.
+ *
+ * sdma_engine_error() and sdma_engine_interrupt() are
+ * entrances for interrupts.
+ *
+ * sdma_map_init() is for the management of the mapping
+ * table when the number of vls is changed.
+ *
+ */
+
+/*
+ * struct hw_sdma_desc - raw 128 bit SDMA descriptor
+ *
+ * This is the raw descriptor in the SDMA ring
+ */
+struct hw_sdma_desc {
+ /* private: don't use directly */
+ __le64 qw[2];
+};
+
+/*
+ * struct sdma_desc - canonical fragment descriptor
+ *
+ * This is the descriptor carried in the tx request
+ * corresponding to each fragment.
+ *
+ */
+struct sdma_desc {
+ /* private: don't use directly */
+ u64 qw[2];
+};
+
+struct sdma_txreq;
+typedef void (*callback_t)(struct sdma_txreq *, int, int);
+
+/**
+ * struct sdma_txreq - the sdma_txreq structure (one per packet)
+ * @list: for use by user and by queuing for wait
+ *
+ * This is the representation of a packet which consists of some
+ * number of fragments. Storage is provided to within the structure.
+ * for all fragments.
+ *
+ * The storage for the descriptors are automatically extended as needed
+ * when the currently allocation is exceeded.
+ *
+ * The user (Verbs or PSM) may overload this structure with fields
+ * specific to their use by putting this struct first in their struct.
+ * The method of allocation of the overloaded structure is user dependent
+ *
+ * The list is the only public field in the structure.
+ *
+ */
+
+struct sdma_txreq {
+ struct list_head list;
+ /* private: */
+ struct sdma_desc *descp;
+ /* private: */
+ void *coalesce_buf;
+ /* private: */
+ struct iowait *wait;
+ /* private: */
+ callback_t complete;
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+ u64 sn;
+#endif
+ /* private: - used in coalesce/pad processing */
+ u16 packet_len;
+ /* private: - down-counted to trigger last */
+ u16 tlen;
+ /* private: flags */
+ u16 flags;
+ /* private: */
+ u16 num_desc;
+ /* private: */
+ u16 desc_limit;
+ /* private: */
+ u16 next_descq_idx;
+ /* private: */
+ struct sdma_desc descs[NUM_DESC];
+};
+
+struct verbs_txreq {
+ struct hfi1_pio_header phdr;
+ struct sdma_txreq txreq;
+ struct hfi1_qp *qp;
+ struct hfi1_swqe *wqe;
+ struct hfi1_mregion *mr;
+ struct hfi1_sge_state *ss;
+ struct sdma_engine *sde;
+ u16 hdr_dwords;
+ u16 hdr_inx;
+};
+
+/**
+ * struct sdma_engine - Data pertaining to each SDMA engine.
+ * @dd: a back-pointer to the device data
+ * @ppd: per port back-pointer
+ * @imask: mask for irq manipulation
+ * @idle_mask: mask for determining if an interrupt is due to sdma_idle
+ *
+ * This structure has the state for each sdma_engine.
+ *
+ * Accessing to non public fields are not supported
+ * since the private members are subject to change.
+ */
+struct sdma_engine {
+ /* read mostly */
+ struct hfi1_devdata *dd;
+ struct hfi1_pportdata *ppd;
+ /* private: */
+ void __iomem *tail_csr;
+ u64 imask; /* clear interrupt mask */
+ u64 idle_mask;
+ u64 progress_mask;
+ /* private: */
+ struct workqueue_struct *wq;
+ /* private: */
+ volatile __le64 *head_dma; /* DMA'ed by chip */
+ /* private: */
+ dma_addr_t head_phys;
+ /* private: */
+ struct hw_sdma_desc *descq;
+ /* private: */
+ unsigned descq_full_count;
+ struct sdma_txreq **tx_ring;
+ /* private: */
+ dma_addr_t descq_phys;
+ /* private */
+ u32 sdma_mask;
+ /* private */
+ struct sdma_state state;
+ /* private: */
+ u8 sdma_shift;
+ /* private: */
+ u8 this_idx; /* zero relative engine */
+ /* protect changes to senddmactrl shadow */
+ spinlock_t senddmactrl_lock;
+ /* private: */
+ u64 p_senddmactrl; /* shadow per-engine SendDmaCtrl */
+
+ /* read/write using tail_lock */
+ spinlock_t tail_lock ____cacheline_aligned_in_smp;
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+ /* private: */
+ u64 tail_sn;
+#endif
+ /* private: */
+ u32 descq_tail;
+ /* private: */
+ unsigned long ahg_bits;
+ /* private: */
+ u16 desc_avail;
+ /* private: */
+ u16 tx_tail;
+ /* private: */
+ u16 descq_cnt;
+
+ /* read/write using head_lock */
+ /* private: */
+ seqlock_t head_lock ____cacheline_aligned_in_smp;
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+ /* private: */
+ u64 head_sn;
+#endif
+ /* private: */
+ u32 descq_head;
+ /* private: */
+ u16 tx_head;
+ /* private: */
+ u64 last_status;
+
+ /* private: */
+ struct list_head dmawait;
+
+ /* CONFIG SDMA for now, just blindly duplicate */
+ /* private: */
+ struct tasklet_struct sdma_hw_clean_up_task
+ ____cacheline_aligned_in_smp;
+
+ /* private: */
+ struct tasklet_struct sdma_sw_clean_up_task
+ ____cacheline_aligned_in_smp;
+ /* private: */
+ struct work_struct err_halt_worker;
+ /* private */
+ struct timer_list err_progress_check_timer;
+ u32 progress_check_head;
+ /* private: */
+ struct work_struct flush_worker;
+ spinlock_t flushlist_lock;
+ /* private: */
+ struct list_head flushlist;
+};
+
+
+int sdma_init(struct hfi1_devdata *dd, u8 port);
+void sdma_start(struct hfi1_devdata *dd);
+void sdma_exit(struct hfi1_devdata *dd);
+void sdma_all_running(struct hfi1_devdata *dd);
+void sdma_all_idle(struct hfi1_devdata *dd);
+void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle);
+void sdma_freeze(struct hfi1_devdata *dd);
+void sdma_unfreeze(struct hfi1_devdata *dd);
+void sdma_wait(struct hfi1_devdata *dd);
+
+/**
+ * sdma_empty() - idle engine test
+ * @engine: sdma engine
+ *
+ * Currently used by verbs as a latency optimization.
+ *
+ * Return:
+ * 1 - empty, 0 - non-empty
+ */
+static inline int sdma_empty(struct sdma_engine *sde)
+{
+ return sde->descq_tail == sde->descq_head;
+}
+
+static inline u16 sdma_descq_freecnt(struct sdma_engine *sde)
+{
+ return sde->descq_cnt -
+ (sde->descq_tail -
+ ACCESS_ONCE(sde->descq_head)) - 1;
+}
+
+static inline u16 sdma_descq_inprocess(struct sdma_engine *sde)
+{
+ return sde->descq_cnt - sdma_descq_freecnt(sde);
+}
+
+/*
+ * Either head_lock or tail lock required to see
+ * a steady state.
+ */
+static inline int __sdma_running(struct sdma_engine *engine)
+{
+ return engine->state.current_state == sdma_state_s99_running;
+}
+
+
+/**
+ * sdma_running() - state suitability test
+ * @engine: sdma engine
+ *
+ * sdma_running probes the internal state to determine if it is suitable
+ * for submitting packets.
+ *
+ * Return:
+ * 1 - ok to submit, 0 - not ok to submit
+ *
+ */
+static inline int sdma_running(struct sdma_engine *engine)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&engine->tail_lock, flags);
+ ret = __sdma_running(engine);
+ spin_unlock_irqrestore(&engine->tail_lock, flags);
+ return ret;
+}
+
+void _sdma_txreq_ahgadd(
+ struct sdma_txreq *tx,
+ u8 num_ahg,
+ u8 ahg_entry,
+ u32 *ahg,
+ u8 ahg_hlen);
+
+
+/**
+ * sdma_txinit_ahg() - initialize an sdma_txreq struct with AHG
+ * @tx: tx request to initialize
+ * @flags: flags to key last descriptor additions
+ * @tlen: total packet length (pbc + headers + data)
+ * @ahg_entry: ahg entry to use (0 - 31)
+ * @num_ahg: ahg descriptor for first descriptor (0 - 9)
+ * @ahg: array of AHG descriptors (up to 9 entries)
+ * @ahg_hlen: number of bytes from ASIC entry to use
+ * @cb: callback
+ *
+ * The allocation of the sdma_txreq and it enclosing structure is user
+ * dependent. This routine must be called to initialize the user independent
+ * fields.
+ *
+ * The currently supported flags are SDMA_TXREQ_F_URGENT,
+ * SDMA_TXREQ_F_AHG_COPY, and SDMA_TXREQ_F_USE_AHG.
+ *
+ * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the
+ * completion is desired as soon as possible.
+ *
+ * SDMA_TXREQ_F_AHG_COPY causes the header in the first descriptor to be
+ * copied to chip entry. SDMA_TXREQ_F_USE_AHG causes the code to add in
+ * the AHG descriptors into the first 1 to 3 descriptors.
+ *
+ * Completions of submitted requests can be gotten on selected
+ * txreqs by giving a completion routine callback to sdma_txinit() or
+ * sdma_txinit_ahg(). The environment in which the callback runs
+ * can be from an ISR, a tasklet, or a thread, so no sleeping
+ * kernel routines can be used. Aspects of the sdma ring may
+ * be locked so care should be taken with locking.
+ *
+ * The callback pointer can be NULL to avoid any callback for the packet
+ * being submitted. The callback will be provided this tx, a status, and a flag.
+ *
+ * The status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR,
+ * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN.
+ *
+ * The flag, if the is the iowait had been used, indicates the iowait
+ * sdma_busy count has reached zero.
+ *
+ * user data portion of tlen should be precise. The sdma_txadd_* entrances
+ * will pad with a descriptor references 1 - 3 bytes when the number of bytes
+ * specified in tlen have been supplied to the sdma_txreq.
+ *
+ * ahg_hlen is used to determine the number of on-chip entry bytes to
+ * use as the header. This is for cases where the stored header is
+ * larger than the header to be used in a packet. This is typical
+ * for verbs where an RDMA_WRITE_FIRST is larger than the packet in
+ * and RDMA_WRITE_MIDDLE.
+ *
+ */
+static inline int sdma_txinit_ahg(
+ struct sdma_txreq *tx,
+ u16 flags,
+ u16 tlen,
+ u8 ahg_entry,
+ u8 num_ahg,
+ u32 *ahg,
+ u8 ahg_hlen,
+ void (*cb)(struct sdma_txreq *, int, int))
+{
+ if (tlen == 0)
+ return -ENODATA;
+ if (tlen > MAX_SDMA_PKT_SIZE)
+ return -EMSGSIZE;
+ tx->desc_limit = ARRAY_SIZE(tx->descs);
+ tx->descp = &tx->descs[0];
+ INIT_LIST_HEAD(&tx->list);
+ tx->num_desc = 0;
+ tx->flags = flags;
+ tx->complete = cb;
+ tx->coalesce_buf = NULL;
+ tx->wait = NULL;
+ tx->tlen = tx->packet_len = tlen;
+ tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG;
+ tx->descs[0].qw[1] = 0;
+ if (flags & SDMA_TXREQ_F_AHG_COPY)
+ tx->descs[0].qw[1] |=
+ (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
+ << SDMA_DESC1_HEADER_INDEX_SHIFT) |
+ (((u64)SDMA_AHG_COPY & SDMA_DESC1_HEADER_MODE_MASK)
+ << SDMA_DESC1_HEADER_MODE_SHIFT);
+ else if (flags & SDMA_TXREQ_F_USE_AHG && num_ahg)
+ _sdma_txreq_ahgadd(tx, num_ahg, ahg_entry, ahg, ahg_hlen);
+ return 0;
+}
+
+/**
+ * sdma_txinit() - initialize an sdma_txreq struct (no AHG)
+ * @tx: tx request to initialize
+ * @flags: flags to key last descriptor additions
+ * @tlen: total packet length (pbc + headers + data)
+ * @cb: callback pointer
+ *
+ * The allocation of the sdma_txreq and it enclosing structure is user
+ * dependent. This routine must be called to initialize the user
+ * independent fields.
+ *
+ * The currently supported flags is SDMA_TXREQ_F_URGENT.
+ *
+ * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the
+ * completion is desired as soon as possible.
+ *
+ * Completions of submitted requests can be gotten on selected
+ * txreqs by giving a completion routine callback to sdma_txinit() or
+ * sdma_txinit_ahg(). The environment in which the callback runs
+ * can be from an ISR, a tasklet, or a thread, so no sleeping
+ * kernel routines can be used. The head size of the sdma ring may
+ * be locked so care should be taken with locking.
+ *
+ * The callback pointer can be NULL to avoid any callback for the packet
+ * being submitted.
+ *
+ * The callback, if non-NULL, will be provided this tx and a status. The
+ * status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR,
+ * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN.
+ *
+ */
+static inline int sdma_txinit(
+ struct sdma_txreq *tx,
+ u16 flags,
+ u16 tlen,
+ void (*cb)(struct sdma_txreq *, int, int))
+{
+ return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb);
+}
+
+/* helpers - don't use */
+static inline int sdma_mapping_type(struct sdma_desc *d)
+{
+ return (d->qw[1] & SDMA_DESC1_GENERATION_SMASK)
+ >> SDMA_DESC1_GENERATION_SHIFT;
+}
+
+static inline size_t sdma_mapping_len(struct sdma_desc *d)
+{
+ return (d->qw[0] & SDMA_DESC0_BYTE_COUNT_SMASK)
+ >> SDMA_DESC0_BYTE_COUNT_SHIFT;
+}
+
+static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d)
+{
+ return (d->qw[0] & SDMA_DESC0_PHY_ADDR_SMASK)
+ >> SDMA_DESC0_PHY_ADDR_SHIFT;
+}
+
+static inline void make_tx_sdma_desc(
+ struct sdma_txreq *tx,
+ int type,
+ dma_addr_t addr,
+ size_t len)
+{
+ struct sdma_desc *desc = &tx->descp[tx->num_desc];
+
+ if (!tx->num_desc) {
+ /* qw[0] zero; qw[1] first, ahg mode already in from init */
+ desc->qw[1] |= ((u64)type & SDMA_DESC1_GENERATION_MASK)
+ << SDMA_DESC1_GENERATION_SHIFT;
+ } else {
+ desc->qw[0] = 0;
+ desc->qw[1] = ((u64)type & SDMA_DESC1_GENERATION_MASK)
+ << SDMA_DESC1_GENERATION_SHIFT;
+ }
+ desc->qw[0] |= (((u64)addr & SDMA_DESC0_PHY_ADDR_MASK)
+ << SDMA_DESC0_PHY_ADDR_SHIFT) |
+ (((u64)len & SDMA_DESC0_BYTE_COUNT_MASK)
+ << SDMA_DESC0_BYTE_COUNT_SHIFT);
+}
+
+/* helper to extend txreq */
+int _extend_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *);
+int _pad_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *);
+void sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *);
+
+/* helpers used by public routines */
+static inline void _sdma_close_tx(struct hfi1_devdata *dd,
+ struct sdma_txreq *tx)
+{
+ tx->descp[tx->num_desc].qw[0] |=
+ SDMA_DESC0_LAST_DESC_FLAG;
+ tx->descp[tx->num_desc].qw[1] |=
+ dd->default_desc1;
+ if (tx->flags & SDMA_TXREQ_F_URGENT)
+ tx->descp[tx->num_desc].qw[1] |=
+ (SDMA_DESC1_HEAD_TO_HOST_FLAG|
+ SDMA_DESC1_INT_REQ_FLAG);
+}
+
+static inline int _sdma_txadd_daddr(
+ struct hfi1_devdata *dd,
+ int type,
+ struct sdma_txreq *tx,
+ dma_addr_t addr,
+ u16 len)
+{
+ int rval = 0;
+
+ if ((unlikely(tx->num_desc == tx->desc_limit))) {
+ rval = _extend_sdma_tx_descs(dd, tx);
+ if (rval)
+ return rval;
+ }
+ make_tx_sdma_desc(
+ tx,
+ type,
+ addr, len);
+ WARN_ON(len > tx->tlen);
+ tx->tlen -= len;
+ /* special cases for last */
+ if (!tx->tlen) {
+ if (tx->packet_len & (sizeof(u32) - 1))
+ rval = _pad_sdma_tx_descs(dd, tx);
+ else
+ _sdma_close_tx(dd, tx);
+ }
+ tx->num_desc++;
+ return rval;
+}
+
+/**
+ * sdma_txadd_page() - add a page to the sdma_txreq
+ * @dd: the device to use for mapping
+ * @tx: tx request to which the page is added
+ * @page: page to map
+ * @offset: offset within the page
+ * @len: length in bytes
+ *
+ * This is used to add a page/offset/length descriptor.
+ *
+ * The mapping/unmapping of the page/offset/len is automatically handled.
+ *
+ * Return:
+ * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't
+ * extend descriptor array or couldn't allocate coalesce
+ * buffer.
+ *
+ */
+static inline int sdma_txadd_page(
+ struct hfi1_devdata *dd,
+ struct sdma_txreq *tx,
+ struct page *page,
+ unsigned long offset,
+ u16 len)
+{
+ dma_addr_t addr =
+ dma_map_page(
+ &dd->pcidev->dev,
+ page,
+ offset,
+ len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
+ sdma_txclean(dd, tx);
+ return -ENOSPC;
+ }
+ return _sdma_txadd_daddr(
+ dd, SDMA_MAP_PAGE, tx, addr, len);
+}
+
+/**
+ * sdma_txadd_daddr() - add a dma address to the sdma_txreq
+ * @dd: the device to use for mapping
+ * @tx: sdma_txreq to which the page is added
+ * @addr: dma address mapped by caller
+ * @len: length in bytes
+ *
+ * This is used to add a descriptor for memory that is already dma mapped.
+ *
+ * In this case, there is no unmapping as part of the progress processing for
+ * this memory location.
+ *
+ * Return:
+ * 0 - success, -ENOMEM - couldn't extend descriptor array
+ */
+
+static inline int sdma_txadd_daddr(
+ struct hfi1_devdata *dd,
+ struct sdma_txreq *tx,
+ dma_addr_t addr,
+ u16 len)
+{
+ return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len);
+}
+
+/**
+ * sdma_txadd_kvaddr() - add a kernel virtual address to sdma_txreq
+ * @dd: the device to use for mapping
+ * @tx: sdma_txreq to which the page is added
+ * @kvaddr: the kernel virtual address
+ * @len: length in bytes
+ *
+ * This is used to add a descriptor referenced by the indicated kvaddr and
+ * len.
+ *
+ * The mapping/unmapping of the kvaddr and len is automatically handled.
+ *
+ * Return:
+ * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't extend
+ * descriptor array
+ */
+static inline int sdma_txadd_kvaddr(
+ struct hfi1_devdata *dd,
+ struct sdma_txreq *tx,
+ void *kvaddr,
+ u16 len)
+{
+ dma_addr_t addr =
+ dma_map_single(
+ &dd->pcidev->dev,
+ kvaddr,
+ len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
+ sdma_txclean(dd, tx);
+ return -ENOSPC;
+ }
+ return _sdma_txadd_daddr(
+ dd, SDMA_MAP_SINGLE, tx, addr, len);
+}
+
+struct iowait;
+
+int sdma_send_txreq(struct sdma_engine *sde,
+ struct iowait *wait,
+ struct sdma_txreq *tx);
+int sdma_send_txlist(struct sdma_engine *sde,
+ struct iowait *wait,
+ struct list_head *tx_list);
+
+int sdma_ahg_alloc(struct sdma_engine *sde);
+void sdma_ahg_free(struct sdma_engine *sde, int ahg_index);
+
+/**
+ * sdma_build_ahg - build ahg descriptor
+ * @data
+ * @dwindex
+ * @startbit
+ * @bits
+ *
+ * Build and return a 32 bit descriptor.
+ */
+static inline u32 sdma_build_ahg_descriptor(
+ u16 data,
+ u8 dwindex,
+ u8 startbit,
+ u8 bits)
+{
+ return (u32)(1UL << SDMA_AHG_UPDATE_ENABLE_SHIFT |
+ ((startbit & SDMA_AHG_FIELD_START_MASK) <<
+ SDMA_AHG_FIELD_START_SHIFT) |
+ ((bits & SDMA_AHG_FIELD_LEN_MASK) <<
+ SDMA_AHG_FIELD_LEN_SHIFT) |
+ ((dwindex & SDMA_AHG_INDEX_MASK) <<
+ SDMA_AHG_INDEX_SHIFT) |
+ ((data & SDMA_AHG_VALUE_MASK) <<
+ SDMA_AHG_VALUE_SHIFT));
+}
+
+/**
+ * sdma_progress - use seq number of detect head progress
+ * @sde: sdma_engine to check
+ * @seq: base seq count
+ * @tx: txreq for which we need to check descriptor availability
+ *
+ * This is used in the appropriate spot in the sleep routine
+ * to check for potential ring progress. This routine gets the
+ * seqcount before queuing the iowait structure for progress.
+ *
+ * If the seqcount indicates that progress needs to be checked,
+ * re-submission is detected by checking whether the descriptor
+ * queue has enough descriptor for the txreq.
+ */
+static inline unsigned sdma_progress(struct sdma_engine *sde, unsigned seq,
+ struct sdma_txreq *tx)
+{
+ if (read_seqretry(&sde->head_lock, seq)) {
+ sde->desc_avail = sdma_descq_freecnt(sde);
+ if (tx->num_desc > sde->desc_avail)
+ return 0;
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * sdma_iowait_schedule() - initialize wait structure
+ * @sde: sdma_engine to schedule
+ * @wait: wait struct to schedule
+ *
+ * This function initializes the iowait
+ * structure embedded in the QP or PQ.
+ *
+ */
+static inline void sdma_iowait_schedule(
+ struct sdma_engine *sde,
+ struct iowait *wait)
+{
+ iowait_schedule(wait, sde->wq);
+}
+
+/* for use by interrupt handling */
+void sdma_engine_error(struct sdma_engine *sde, u64 status);
+void sdma_engine_interrupt(struct sdma_engine *sde, u64 status);
+
+/*
+ *
+ * The diagram below details the relationship of the mapping structures
+ *
+ * Since the mapping now allows for non-uniform engines per vl, the
+ * number of engines for a vl is either the vl_engines[vl] or
+ * a computation based on num_sdma/num_vls:
+ *
+ * For example:
+ * nactual = vl_engines ? vl_engines[vl] : num_sdma/num_vls
+ *
+ * n = roundup to next highest power of 2 using nactual
+ *
+ * In the case where there are num_sdma/num_vls doesn't divide
+ * evenly, the extras are added from the last vl downward.
+ *
+ * For the case where n > nactual, the engines are assigned
+ * in a round robin fashion wrapping back to the first engine
+ * for a particular vl.
+ *
+ * dd->sdma_map
+ * | sdma_map_elem[0]
+ * | +--------------------+
+ * v | mask |
+ * sdma_vl_map |--------------------|
+ * +--------------------------+ | sde[0] -> eng 1 |
+ * | list (RCU) | |--------------------|
+ * |--------------------------| ->| sde[1] -> eng 2 |
+ * | mask | --/ |--------------------|
+ * |--------------------------| -/ | * |
+ * | actual_vls (max 8) | -/ |--------------------|
+ * |--------------------------| --/ | sde[n] -> eng n |
+ * | vls (max 8) | -/ +--------------------+
+ * |--------------------------| --/
+ * | map[0] |-/
+ * |--------------------------| +--------------------+
+ * | map[1] |--- | mask |
+ * |--------------------------| \---- |--------------------|
+ * | * | \-- | sde[0] -> eng 1+n |
+ * | * | \---- |--------------------|
+ * | * | \->| sde[1] -> eng 2+n |
+ * |--------------------------| |--------------------|
+ * | map[vls - 1] |- | * |
+ * +--------------------------+ \- |--------------------|
+ * \- | sde[m] -> eng m+n |
+ * \ +--------------------+
+ * \-
+ * \
+ * \- +--------------------+
+ * \- | mask |
+ * \ |--------------------|
+ * \- | sde[0] -> eng 1+m+n|
+ * \- |--------------------|
+ * >| sde[1] -> eng 2+m+n|
+ * |--------------------|
+ * | * |
+ * |--------------------|
+ * | sde[o] -> eng o+m+n|
+ * +--------------------+
+ *
+ */
+
+/**
+ * struct sdma_map_elem - mapping for a vl
+ * @mask - selector mask
+ * @sde - array of engines for this vl
+ *
+ * The mask is used to "mod" the selector
+ * to produce index into the trailing
+ * array of sdes.
+ */
+struct sdma_map_elem {
+ u32 mask;
+ struct sdma_engine *sde[0];
+};
+
+/**
+ * struct sdma_map_el - mapping for a vl
+ * @list - rcu head for free callback
+ * @mask - vl mask to "mod" the vl to produce an index to map array
+ * @actual_vls - number of vls
+ * @vls - number of vls rounded to next power of 2
+ * @map - array of sdma_map_elem entries
+ *
+ * This is the parent mapping structure. The trailing
+ * members of the struct point to sdma_map_elem entries, which
+ * in turn point to an array of sde's for that vl.
+ */
+struct sdma_vl_map {
+ struct rcu_head list;
+ u32 mask;
+ u8 actual_vls;
+ u8 vls;
+ struct sdma_map_elem *map[0];
+};
+
+int sdma_map_init(
+ struct hfi1_devdata *dd,
+ u8 port,
+ u8 num_vls,
+ u8 *vl_engines);
+
+/* slow path */
+void _sdma_engine_progress_schedule(struct sdma_engine *sde);
+
+/**
+ * sdma_engine_progress_schedule() - schedule progress on engine
+ * @sde: sdma_engine to schedule progress
+ *
+ * This is the fast path.
+ *
+ */
+static inline void sdma_engine_progress_schedule(
+ struct sdma_engine *sde)
+{
+ if (!sde || sdma_descq_inprocess(sde) < (sde->descq_cnt / 8))
+ return;
+ _sdma_engine_progress_schedule(sde);
+}
+
+struct sdma_engine *sdma_select_engine_sc(
+ struct hfi1_devdata *dd,
+ u32 selector,
+ u8 sc5);
+
+struct sdma_engine *sdma_select_engine_vl(
+ struct hfi1_devdata *dd,
+ u32 selector,
+ u8 vl);
+
+void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *);
+
+#ifdef CONFIG_SDMA_VERBOSITY
+void sdma_dumpstate(struct sdma_engine *);
+#endif
+static inline char *slashstrip(char *s)
+{
+ char *r = s;
+
+ while (*s)
+ if (*s++ == '/')
+ r = s;
+ return r;
+}
+
+u16 sdma_get_descq_cnt(void);
+
+extern uint mod_num_sdma;
+
+void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);
+
+#endif
diff --git a/drivers/staging/rdma/hfi1/srq.c b/drivers/staging/rdma/hfi1/srq.c
new file mode 100644
index 000000000000..67786d417493
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/srq.c
@@ -0,0 +1,397 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "verbs.h"
+
+/**
+ * hfi1_post_srq_receive - post a receive on a shared receive queue
+ * @ibsrq: the SRQ to post the receive on
+ * @wr: the list of work requests to post
+ * @bad_wr: A pointer to the first WR to cause a problem is put here
+ *
+ * This may be called from interrupt context.
+ */
+int hfi1_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct hfi1_srq *srq = to_isrq(ibsrq);
+ struct hfi1_rwq *wq;
+ unsigned long flags;
+ int ret;
+
+ for (; wr; wr = wr->next) {
+ struct hfi1_rwqe *wqe;
+ u32 next;
+ int i;
+
+ if ((unsigned) wr->num_sge > srq->rq.max_sge) {
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ spin_lock_irqsave(&srq->rq.lock, flags);
+ wq = srq->rq.wq;
+ next = wq->head + 1;
+ if (next >= srq->rq.size)
+ next = 0;
+ if (next == wq->tail) {
+ spin_unlock_irqrestore(&srq->rq.lock, flags);
+ *bad_wr = wr;
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ wqe = get_rwqe_ptr(&srq->rq, wq->head);
+ wqe->wr_id = wr->wr_id;
+ wqe->num_sge = wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++)
+ wqe->sg_list[i] = wr->sg_list[i];
+ /* Make sure queue entry is written before the head index. */
+ smp_wmb();
+ wq->head = next;
+ spin_unlock_irqrestore(&srq->rq.lock, flags);
+ }
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * hfi1_create_srq - create a shared receive queue
+ * @ibpd: the protection domain of the SRQ to create
+ * @srq_init_attr: the attributes of the SRQ
+ * @udata: data from libibverbs when creating a user SRQ
+ */
+struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata)
+{
+ struct hfi1_ibdev *dev = to_idev(ibpd->device);
+ struct hfi1_srq *srq;
+ u32 sz;
+ struct ib_srq *ret;
+
+ if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
+ ret = ERR_PTR(-ENOSYS);
+ goto done;
+ }
+
+ if (srq_init_attr->attr.max_sge == 0 ||
+ srq_init_attr->attr.max_sge > hfi1_max_srq_sges ||
+ srq_init_attr->attr.max_wr == 0 ||
+ srq_init_attr->attr.max_wr > hfi1_max_srq_wrs) {
+ ret = ERR_PTR(-EINVAL);
+ goto done;
+ }
+
+ srq = kmalloc(sizeof(*srq), GFP_KERNEL);
+ if (!srq) {
+ ret = ERR_PTR(-ENOMEM);
+ goto done;
+ }
+
+ /*
+ * Need to use vmalloc() if we want to support large #s of entries.
+ */
+ srq->rq.size = srq_init_attr->attr.max_wr + 1;
+ srq->rq.max_sge = srq_init_attr->attr.max_sge;
+ sz = sizeof(struct ib_sge) * srq->rq.max_sge +
+ sizeof(struct hfi1_rwqe);
+ srq->rq.wq = vmalloc_user(sizeof(struct hfi1_rwq) + srq->rq.size * sz);
+ if (!srq->rq.wq) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_srq;
+ }
+
+ /*
+ * Return the address of the RWQ as the offset to mmap.
+ * See hfi1_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ int err;
+ u32 s = sizeof(struct hfi1_rwq) + srq->rq.size * sz;
+
+ srq->ip =
+ hfi1_create_mmap_info(dev, s, ibpd->uobject->context,
+ srq->rq.wq);
+ if (!srq->ip) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_wq;
+ }
+
+ err = ib_copy_to_udata(udata, &srq->ip->offset,
+ sizeof(srq->ip->offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_ip;
+ }
+ } else
+ srq->ip = NULL;
+
+ /*
+ * ib_create_srq() will initialize srq->ibsrq.
+ */
+ spin_lock_init(&srq->rq.lock);
+ srq->rq.wq->head = 0;
+ srq->rq.wq->tail = 0;
+ srq->limit = srq_init_attr->attr.srq_limit;
+
+ spin_lock(&dev->n_srqs_lock);
+ if (dev->n_srqs_allocated == hfi1_max_srqs) {
+ spin_unlock(&dev->n_srqs_lock);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_ip;
+ }
+
+ dev->n_srqs_allocated++;
+ spin_unlock(&dev->n_srqs_lock);
+
+ if (srq->ip) {
+ spin_lock_irq(&dev->pending_lock);
+ list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+
+ ret = &srq->ibsrq;
+ goto done;
+
+bail_ip:
+ kfree(srq->ip);
+bail_wq:
+ vfree(srq->rq.wq);
+bail_srq:
+ kfree(srq);
+done:
+ return ret;
+}
+
+/**
+ * hfi1_modify_srq - modify a shared receive queue
+ * @ibsrq: the SRQ to modify
+ * @attr: the new attributes of the SRQ
+ * @attr_mask: indicates which attributes to modify
+ * @udata: user data for libibverbs.so
+ */
+int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask,
+ struct ib_udata *udata)
+{
+ struct hfi1_srq *srq = to_isrq(ibsrq);
+ struct hfi1_rwq *wq;
+ int ret = 0;
+
+ if (attr_mask & IB_SRQ_MAX_WR) {
+ struct hfi1_rwq *owq;
+ struct hfi1_rwqe *p;
+ u32 sz, size, n, head, tail;
+
+ /* Check that the requested sizes are below the limits. */
+ if ((attr->max_wr > hfi1_max_srq_wrs) ||
+ ((attr_mask & IB_SRQ_LIMIT) ?
+ attr->srq_limit : srq->limit) > attr->max_wr) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ sz = sizeof(struct hfi1_rwqe) +
+ srq->rq.max_sge * sizeof(struct ib_sge);
+ size = attr->max_wr + 1;
+ wq = vmalloc_user(sizeof(struct hfi1_rwq) + size * sz);
+ if (!wq) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ /* Check that we can write the offset to mmap. */
+ if (udata && udata->inlen >= sizeof(__u64)) {
+ __u64 offset_addr;
+ __u64 offset = 0;
+
+ ret = ib_copy_from_udata(&offset_addr, udata,
+ sizeof(offset_addr));
+ if (ret)
+ goto bail_free;
+ udata->outbuf =
+ (void __user *) (unsigned long) offset_addr;
+ ret = ib_copy_to_udata(udata, &offset,
+ sizeof(offset));
+ if (ret)
+ goto bail_free;
+ }
+
+ spin_lock_irq(&srq->rq.lock);
+ /*
+ * validate head and tail pointer values and compute
+ * the number of remaining WQEs.
+ */
+ owq = srq->rq.wq;
+ head = owq->head;
+ tail = owq->tail;
+ if (head >= srq->rq.size || tail >= srq->rq.size) {
+ ret = -EINVAL;
+ goto bail_unlock;
+ }
+ n = head;
+ if (n < tail)
+ n += srq->rq.size - tail;
+ else
+ n -= tail;
+ if (size <= n) {
+ ret = -EINVAL;
+ goto bail_unlock;
+ }
+ n = 0;
+ p = wq->wq;
+ while (tail != head) {
+ struct hfi1_rwqe *wqe;
+ int i;
+
+ wqe = get_rwqe_ptr(&srq->rq, tail);
+ p->wr_id = wqe->wr_id;
+ p->num_sge = wqe->num_sge;
+ for (i = 0; i < wqe->num_sge; i++)
+ p->sg_list[i] = wqe->sg_list[i];
+ n++;
+ p = (struct hfi1_rwqe *)((char *)p + sz);
+ if (++tail >= srq->rq.size)
+ tail = 0;
+ }
+ srq->rq.wq = wq;
+ srq->rq.size = size;
+ wq->head = n;
+ wq->tail = 0;
+ if (attr_mask & IB_SRQ_LIMIT)
+ srq->limit = attr->srq_limit;
+ spin_unlock_irq(&srq->rq.lock);
+
+ vfree(owq);
+
+ if (srq->ip) {
+ struct hfi1_mmap_info *ip = srq->ip;
+ struct hfi1_ibdev *dev = to_idev(srq->ibsrq.device);
+ u32 s = sizeof(struct hfi1_rwq) + size * sz;
+
+ hfi1_update_mmap_info(dev, ip, s, wq);
+
+ /*
+ * Return the offset to mmap.
+ * See hfi1_mmap() for details.
+ */
+ if (udata && udata->inlen >= sizeof(__u64)) {
+ ret = ib_copy_to_udata(udata, &ip->offset,
+ sizeof(ip->offset));
+ if (ret)
+ goto bail;
+ }
+
+ /*
+ * Put user mapping info onto the pending list
+ * unless it already is on the list.
+ */
+ spin_lock_irq(&dev->pending_lock);
+ if (list_empty(&ip->pending_mmaps))
+ list_add(&ip->pending_mmaps,
+ &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+ } else if (attr_mask & IB_SRQ_LIMIT) {
+ spin_lock_irq(&srq->rq.lock);
+ if (attr->srq_limit >= srq->rq.size)
+ ret = -EINVAL;
+ else
+ srq->limit = attr->srq_limit;
+ spin_unlock_irq(&srq->rq.lock);
+ }
+ goto bail;
+
+bail_unlock:
+ spin_unlock_irq(&srq->rq.lock);
+bail_free:
+ vfree(wq);
+bail:
+ return ret;
+}
+
+int hfi1_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+{
+ struct hfi1_srq *srq = to_isrq(ibsrq);
+
+ attr->max_wr = srq->rq.size - 1;
+ attr->max_sge = srq->rq.max_sge;
+ attr->srq_limit = srq->limit;
+ return 0;
+}
+
+/**
+ * hfi1_destroy_srq - destroy a shared receive queue
+ * @ibsrq: the SRQ to destroy
+ */
+int hfi1_destroy_srq(struct ib_srq *ibsrq)
+{
+ struct hfi1_srq *srq = to_isrq(ibsrq);
+ struct hfi1_ibdev *dev = to_idev(ibsrq->device);
+
+ spin_lock(&dev->n_srqs_lock);
+ dev->n_srqs_allocated--;
+ spin_unlock(&dev->n_srqs_lock);
+ if (srq->ip)
+ kref_put(&srq->ip->ref, hfi1_release_mmap_info);
+ else
+ vfree(srq->rq.wq);
+ kfree(srq);
+
+ return 0;
+}
diff --git a/drivers/staging/rdma/hfi1/sysfs.c b/drivers/staging/rdma/hfi1/sysfs.c
new file mode 100644
index 000000000000..b78c72861ef9
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/sysfs.c
@@ -0,0 +1,739 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/ctype.h>
+
+#include "hfi.h"
+#include "mad.h"
+#include "trace.h"
+
+
+/*
+ * Start of per-port congestion control structures and support code
+ */
+
+/*
+ * Congestion control table size followed by table entries
+ */
+static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ int ret;
+ struct hfi1_pportdata *ppd =
+ container_of(kobj, struct hfi1_pportdata, pport_cc_kobj);
+ struct cc_state *cc_state;
+
+ ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow)
+ + sizeof(__be16);
+
+ if (pos > ret)
+ return -EINVAL;
+
+ if (count > ret - pos)
+ count = ret - pos;
+
+ if (!count)
+ return count;
+
+ rcu_read_lock();
+ cc_state = get_cc_state(ppd);
+ if (cc_state == NULL) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ memcpy(buf, &cc_state->cct, count);
+ rcu_read_unlock();
+
+ return count;
+}
+
+static void port_release(struct kobject *kobj)
+{
+ /* nothing to do since memory is freed by hfi1_free_devdata() */
+}
+
+static struct kobj_type port_cc_ktype = {
+ .release = port_release,
+};
+
+static struct bin_attribute cc_table_bin_attr = {
+ .attr = {.name = "cc_table_bin", .mode = 0444},
+ .read = read_cc_table_bin,
+ .size = PAGE_SIZE,
+};
+
+/*
+ * Congestion settings: port control, control map and an array of 16
+ * entries for the congestion entries - increase, timer, event log
+ * trigger threshold and the minimum injection rate delay.
+ */
+static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ int ret;
+ struct hfi1_pportdata *ppd =
+ container_of(kobj, struct hfi1_pportdata, pport_cc_kobj);
+ struct cc_state *cc_state;
+
+ ret = sizeof(struct opa_congestion_setting_attr_shadow);
+
+ if (pos > ret)
+ return -EINVAL;
+ if (count > ret - pos)
+ count = ret - pos;
+
+ if (!count)
+ return count;
+
+ rcu_read_lock();
+ cc_state = get_cc_state(ppd);
+ if (cc_state == NULL) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ memcpy(buf, &cc_state->cong_setting, count);
+ rcu_read_unlock();
+
+ return count;
+}
+
+static struct bin_attribute cc_setting_bin_attr = {
+ .attr = {.name = "cc_settings_bin", .mode = 0444},
+ .read = read_cc_setting_bin,
+ .size = PAGE_SIZE,
+};
+
+/* Start sc2vl */
+#define HFI1_SC2VL_ATTR(N) \
+ static struct hfi1_sc2vl_attr hfi1_sc2vl_attr_##N = { \
+ .attr = { .name = __stringify(N), .mode = 0444 }, \
+ .sc = N \
+ }
+
+struct hfi1_sc2vl_attr {
+ struct attribute attr;
+ int sc;
+};
+
+HFI1_SC2VL_ATTR(0);
+HFI1_SC2VL_ATTR(1);
+HFI1_SC2VL_ATTR(2);
+HFI1_SC2VL_ATTR(3);
+HFI1_SC2VL_ATTR(4);
+HFI1_SC2VL_ATTR(5);
+HFI1_SC2VL_ATTR(6);
+HFI1_SC2VL_ATTR(7);
+HFI1_SC2VL_ATTR(8);
+HFI1_SC2VL_ATTR(9);
+HFI1_SC2VL_ATTR(10);
+HFI1_SC2VL_ATTR(11);
+HFI1_SC2VL_ATTR(12);
+HFI1_SC2VL_ATTR(13);
+HFI1_SC2VL_ATTR(14);
+HFI1_SC2VL_ATTR(15);
+HFI1_SC2VL_ATTR(16);
+HFI1_SC2VL_ATTR(17);
+HFI1_SC2VL_ATTR(18);
+HFI1_SC2VL_ATTR(19);
+HFI1_SC2VL_ATTR(20);
+HFI1_SC2VL_ATTR(21);
+HFI1_SC2VL_ATTR(22);
+HFI1_SC2VL_ATTR(23);
+HFI1_SC2VL_ATTR(24);
+HFI1_SC2VL_ATTR(25);
+HFI1_SC2VL_ATTR(26);
+HFI1_SC2VL_ATTR(27);
+HFI1_SC2VL_ATTR(28);
+HFI1_SC2VL_ATTR(29);
+HFI1_SC2VL_ATTR(30);
+HFI1_SC2VL_ATTR(31);
+
+
+static struct attribute *sc2vl_default_attributes[] = {
+ &hfi1_sc2vl_attr_0.attr,
+ &hfi1_sc2vl_attr_1.attr,
+ &hfi1_sc2vl_attr_2.attr,
+ &hfi1_sc2vl_attr_3.attr,
+ &hfi1_sc2vl_attr_4.attr,
+ &hfi1_sc2vl_attr_5.attr,
+ &hfi1_sc2vl_attr_6.attr,
+ &hfi1_sc2vl_attr_7.attr,
+ &hfi1_sc2vl_attr_8.attr,
+ &hfi1_sc2vl_attr_9.attr,
+ &hfi1_sc2vl_attr_10.attr,
+ &hfi1_sc2vl_attr_11.attr,
+ &hfi1_sc2vl_attr_12.attr,
+ &hfi1_sc2vl_attr_13.attr,
+ &hfi1_sc2vl_attr_14.attr,
+ &hfi1_sc2vl_attr_15.attr,
+ &hfi1_sc2vl_attr_16.attr,
+ &hfi1_sc2vl_attr_17.attr,
+ &hfi1_sc2vl_attr_18.attr,
+ &hfi1_sc2vl_attr_19.attr,
+ &hfi1_sc2vl_attr_20.attr,
+ &hfi1_sc2vl_attr_21.attr,
+ &hfi1_sc2vl_attr_22.attr,
+ &hfi1_sc2vl_attr_23.attr,
+ &hfi1_sc2vl_attr_24.attr,
+ &hfi1_sc2vl_attr_25.attr,
+ &hfi1_sc2vl_attr_26.attr,
+ &hfi1_sc2vl_attr_27.attr,
+ &hfi1_sc2vl_attr_28.attr,
+ &hfi1_sc2vl_attr_29.attr,
+ &hfi1_sc2vl_attr_30.attr,
+ &hfi1_sc2vl_attr_31.attr,
+ NULL
+};
+
+static ssize_t sc2vl_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct hfi1_sc2vl_attr *sattr =
+ container_of(attr, struct hfi1_sc2vl_attr, attr);
+ struct hfi1_pportdata *ppd =
+ container_of(kobj, struct hfi1_pportdata, sc2vl_kobj);
+ struct hfi1_devdata *dd = ppd->dd;
+
+ return sprintf(buf, "%u\n", *((u8 *)dd->sc2vl + sattr->sc));
+}
+
+static const struct sysfs_ops hfi1_sc2vl_ops = {
+ .show = sc2vl_attr_show,
+};
+
+static struct kobj_type hfi1_sc2vl_ktype = {
+ .release = port_release,
+ .sysfs_ops = &hfi1_sc2vl_ops,
+ .default_attrs = sc2vl_default_attributes
+};
+
+/* End sc2vl */
+
+/* Start sl2sc */
+#define HFI1_SL2SC_ATTR(N) \
+ static struct hfi1_sl2sc_attr hfi1_sl2sc_attr_##N = { \
+ .attr = { .name = __stringify(N), .mode = 0444 }, \
+ .sl = N \
+ }
+
+struct hfi1_sl2sc_attr {
+ struct attribute attr;
+ int sl;
+};
+
+HFI1_SL2SC_ATTR(0);
+HFI1_SL2SC_ATTR(1);
+HFI1_SL2SC_ATTR(2);
+HFI1_SL2SC_ATTR(3);
+HFI1_SL2SC_ATTR(4);
+HFI1_SL2SC_ATTR(5);
+HFI1_SL2SC_ATTR(6);
+HFI1_SL2SC_ATTR(7);
+HFI1_SL2SC_ATTR(8);
+HFI1_SL2SC_ATTR(9);
+HFI1_SL2SC_ATTR(10);
+HFI1_SL2SC_ATTR(11);
+HFI1_SL2SC_ATTR(12);
+HFI1_SL2SC_ATTR(13);
+HFI1_SL2SC_ATTR(14);
+HFI1_SL2SC_ATTR(15);
+HFI1_SL2SC_ATTR(16);
+HFI1_SL2SC_ATTR(17);
+HFI1_SL2SC_ATTR(18);
+HFI1_SL2SC_ATTR(19);
+HFI1_SL2SC_ATTR(20);
+HFI1_SL2SC_ATTR(21);
+HFI1_SL2SC_ATTR(22);
+HFI1_SL2SC_ATTR(23);
+HFI1_SL2SC_ATTR(24);
+HFI1_SL2SC_ATTR(25);
+HFI1_SL2SC_ATTR(26);
+HFI1_SL2SC_ATTR(27);
+HFI1_SL2SC_ATTR(28);
+HFI1_SL2SC_ATTR(29);
+HFI1_SL2SC_ATTR(30);
+HFI1_SL2SC_ATTR(31);
+
+
+static struct attribute *sl2sc_default_attributes[] = {
+ &hfi1_sl2sc_attr_0.attr,
+ &hfi1_sl2sc_attr_1.attr,
+ &hfi1_sl2sc_attr_2.attr,
+ &hfi1_sl2sc_attr_3.attr,
+ &hfi1_sl2sc_attr_4.attr,
+ &hfi1_sl2sc_attr_5.attr,
+ &hfi1_sl2sc_attr_6.attr,
+ &hfi1_sl2sc_attr_7.attr,
+ &hfi1_sl2sc_attr_8.attr,
+ &hfi1_sl2sc_attr_9.attr,
+ &hfi1_sl2sc_attr_10.attr,
+ &hfi1_sl2sc_attr_11.attr,
+ &hfi1_sl2sc_attr_12.attr,
+ &hfi1_sl2sc_attr_13.attr,
+ &hfi1_sl2sc_attr_14.attr,
+ &hfi1_sl2sc_attr_15.attr,
+ &hfi1_sl2sc_attr_16.attr,
+ &hfi1_sl2sc_attr_17.attr,
+ &hfi1_sl2sc_attr_18.attr,
+ &hfi1_sl2sc_attr_19.attr,
+ &hfi1_sl2sc_attr_20.attr,
+ &hfi1_sl2sc_attr_21.attr,
+ &hfi1_sl2sc_attr_22.attr,
+ &hfi1_sl2sc_attr_23.attr,
+ &hfi1_sl2sc_attr_24.attr,
+ &hfi1_sl2sc_attr_25.attr,
+ &hfi1_sl2sc_attr_26.attr,
+ &hfi1_sl2sc_attr_27.attr,
+ &hfi1_sl2sc_attr_28.attr,
+ &hfi1_sl2sc_attr_29.attr,
+ &hfi1_sl2sc_attr_30.attr,
+ &hfi1_sl2sc_attr_31.attr,
+ NULL
+};
+
+static ssize_t sl2sc_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct hfi1_sl2sc_attr *sattr =
+ container_of(attr, struct hfi1_sl2sc_attr, attr);
+ struct hfi1_pportdata *ppd =
+ container_of(kobj, struct hfi1_pportdata, sl2sc_kobj);
+ struct hfi1_ibport *ibp = &ppd->ibport_data;
+
+ return sprintf(buf, "%u\n", ibp->sl_to_sc[sattr->sl]);
+}
+
+static const struct sysfs_ops hfi1_sl2sc_ops = {
+ .show = sl2sc_attr_show,
+};
+
+static struct kobj_type hfi1_sl2sc_ktype = {
+ .release = port_release,
+ .sysfs_ops = &hfi1_sl2sc_ops,
+ .default_attrs = sl2sc_default_attributes
+};
+
+/* End sl2sc */
+
+/* Start vl2mtu */
+
+#define HFI1_VL2MTU_ATTR(N) \
+ static struct hfi1_vl2mtu_attr hfi1_vl2mtu_attr_##N = { \
+ .attr = { .name = __stringify(N), .mode = 0444 }, \
+ .vl = N \
+ }
+
+struct hfi1_vl2mtu_attr {
+ struct attribute attr;
+ int vl;
+};
+
+HFI1_VL2MTU_ATTR(0);
+HFI1_VL2MTU_ATTR(1);
+HFI1_VL2MTU_ATTR(2);
+HFI1_VL2MTU_ATTR(3);
+HFI1_VL2MTU_ATTR(4);
+HFI1_VL2MTU_ATTR(5);
+HFI1_VL2MTU_ATTR(6);
+HFI1_VL2MTU_ATTR(7);
+HFI1_VL2MTU_ATTR(8);
+HFI1_VL2MTU_ATTR(9);
+HFI1_VL2MTU_ATTR(10);
+HFI1_VL2MTU_ATTR(11);
+HFI1_VL2MTU_ATTR(12);
+HFI1_VL2MTU_ATTR(13);
+HFI1_VL2MTU_ATTR(14);
+HFI1_VL2MTU_ATTR(15);
+
+static struct attribute *vl2mtu_default_attributes[] = {
+ &hfi1_vl2mtu_attr_0.attr,
+ &hfi1_vl2mtu_attr_1.attr,
+ &hfi1_vl2mtu_attr_2.attr,
+ &hfi1_vl2mtu_attr_3.attr,
+ &hfi1_vl2mtu_attr_4.attr,
+ &hfi1_vl2mtu_attr_5.attr,
+ &hfi1_vl2mtu_attr_6.attr,
+ &hfi1_vl2mtu_attr_7.attr,
+ &hfi1_vl2mtu_attr_8.attr,
+ &hfi1_vl2mtu_attr_9.attr,
+ &hfi1_vl2mtu_attr_10.attr,
+ &hfi1_vl2mtu_attr_11.attr,
+ &hfi1_vl2mtu_attr_12.attr,
+ &hfi1_vl2mtu_attr_13.attr,
+ &hfi1_vl2mtu_attr_14.attr,
+ &hfi1_vl2mtu_attr_15.attr,
+ NULL
+};
+
+static ssize_t vl2mtu_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct hfi1_vl2mtu_attr *vlattr =
+ container_of(attr, struct hfi1_vl2mtu_attr, attr);
+ struct hfi1_pportdata *ppd =
+ container_of(kobj, struct hfi1_pportdata, vl2mtu_kobj);
+ struct hfi1_devdata *dd = ppd->dd;
+
+ return sprintf(buf, "%u\n", dd->vld[vlattr->vl].mtu);
+}
+
+static const struct sysfs_ops hfi1_vl2mtu_ops = {
+ .show = vl2mtu_attr_show,
+};
+
+static struct kobj_type hfi1_vl2mtu_ktype = {
+ .release = port_release,
+ .sysfs_ops = &hfi1_vl2mtu_ops,
+ .default_attrs = vl2mtu_default_attributes
+};
+
+
+/* end of per-port file structures and support code */
+
+/*
+ * Start of per-unit (or driver, in some cases, but replicated
+ * per unit) functions (these get a device *)
+ */
+static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct hfi1_ibdev *dev =
+ container_of(device, struct hfi1_ibdev, ibdev.dev);
+
+ return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
+}
+
+static ssize_t show_hfi(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct hfi1_ibdev *dev =
+ container_of(device, struct hfi1_ibdev, ibdev.dev);
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+ int ret;
+
+ if (!dd->boardname)
+ ret = -EINVAL;
+ else
+ ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
+ return ret;
+}
+
+static ssize_t show_boardversion(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct hfi1_ibdev *dev =
+ container_of(device, struct hfi1_ibdev, ibdev.dev);
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+
+ /* The string printed here is already newline-terminated. */
+ return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
+}
+
+
+static ssize_t show_nctxts(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct hfi1_ibdev *dev =
+ container_of(device, struct hfi1_ibdev, ibdev.dev);
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+
+ /*
+ * Return the smaller of send and receive contexts.
+ * Normally, user level applications would require both a send
+ * and a receive context, so returning the smaller of the two counts
+ * give a more accurate picture of total contexts available.
+ */
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ min(dd->num_rcv_contexts - dd->first_user_ctxt,
+ (u32)dd->sc_sizes[SC_USER].count));
+}
+
+static ssize_t show_nfreectxts(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct hfi1_ibdev *dev =
+ container_of(device, struct hfi1_ibdev, ibdev.dev);
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+
+ /* Return the number of free user ports (contexts) available. */
+ return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
+}
+
+static ssize_t show_serial(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct hfi1_ibdev *dev =
+ container_of(device, struct hfi1_ibdev, ibdev.dev);
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s", dd->serial);
+
+}
+
+static ssize_t store_chip_reset(struct device *device,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct hfi1_ibdev *dev =
+ container_of(device, struct hfi1_ibdev, ibdev.dev);
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+ int ret;
+
+ if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ ret = hfi1_reset_device(dd->unit);
+bail:
+ return ret < 0 ? ret : count;
+}
+
+/*
+ * Convert the reported temperature from an integer (reported in
+ * units of 0.25C) to a floating point number.
+ */
+#define temp2str(temp, buf, size, idx) \
+ scnprintf((buf) + (idx), (size) - (idx), "%u.%02u ", \
+ ((temp) >> 2), ((temp) & 0x3) * 25)
+
+/*
+ * Dump tempsense values, in decimal, to ease shell-scripts.
+ */
+static ssize_t show_tempsense(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct hfi1_ibdev *dev =
+ container_of(device, struct hfi1_ibdev, ibdev.dev);
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+ struct hfi1_temp temp;
+ int ret = -ENXIO;
+
+ ret = hfi1_tempsense_rd(dd, &temp);
+ if (!ret) {
+ int idx = 0;
+
+ idx += temp2str(temp.curr, buf, PAGE_SIZE, idx);
+ idx += temp2str(temp.lo_lim, buf, PAGE_SIZE, idx);
+ idx += temp2str(temp.hi_lim, buf, PAGE_SIZE, idx);
+ idx += temp2str(temp.crit_lim, buf, PAGE_SIZE, idx);
+ idx += scnprintf(buf + idx, PAGE_SIZE - idx,
+ "%u %u %u\n", temp.triggers & 0x1,
+ temp.triggers & 0x2, temp.triggers & 0x4);
+ ret = idx;
+ }
+ return ret;
+}
+
+/*
+ * end of per-unit (or driver, in some cases, but replicated
+ * per unit) functions
+ */
+
+/* start of per-unit file structures and support code */
+static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static DEVICE_ATTR(board_id, S_IRUGO, show_hfi, NULL);
+static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
+static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
+static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
+static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
+static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
+static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
+
+static struct device_attribute *hfi1_attributes[] = {
+ &dev_attr_hw_rev,
+ &dev_attr_board_id,
+ &dev_attr_nctxts,
+ &dev_attr_nfreectxts,
+ &dev_attr_serial,
+ &dev_attr_boardversion,
+ &dev_attr_tempsense,
+ &dev_attr_chip_reset,
+};
+
+int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
+ struct kobject *kobj)
+{
+ struct hfi1_pportdata *ppd;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ int ret;
+
+ if (!port_num || port_num > dd->num_pports) {
+ dd_dev_err(dd,
+ "Skipping infiniband class with invalid port %u\n",
+ port_num);
+ return -ENODEV;
+ }
+ ppd = &dd->pport[port_num - 1];
+
+ ret = kobject_init_and_add(&ppd->sc2vl_kobj, &hfi1_sc2vl_ktype, kobj,
+ "sc2vl");
+ if (ret) {
+ dd_dev_err(dd,
+ "Skipping sc2vl sysfs info, (err %d) port %u\n",
+ ret, port_num);
+ goto bail;
+ }
+ kobject_uevent(&ppd->sc2vl_kobj, KOBJ_ADD);
+
+ ret = kobject_init_and_add(&ppd->sl2sc_kobj, &hfi1_sl2sc_ktype, kobj,
+ "sl2sc");
+ if (ret) {
+ dd_dev_err(dd,
+ "Skipping sl2sc sysfs info, (err %d) port %u\n",
+ ret, port_num);
+ goto bail_sc2vl;
+ }
+ kobject_uevent(&ppd->sl2sc_kobj, KOBJ_ADD);
+
+ ret = kobject_init_and_add(&ppd->vl2mtu_kobj, &hfi1_vl2mtu_ktype, kobj,
+ "vl2mtu");
+ if (ret) {
+ dd_dev_err(dd,
+ "Skipping vl2mtu sysfs info, (err %d) port %u\n",
+ ret, port_num);
+ goto bail_sl2sc;
+ }
+ kobject_uevent(&ppd->vl2mtu_kobj, KOBJ_ADD);
+
+
+ ret = kobject_init_and_add(&ppd->pport_cc_kobj, &port_cc_ktype,
+ kobj, "CCMgtA");
+ if (ret) {
+ dd_dev_err(dd,
+ "Skipping Congestion Control sysfs info, (err %d) port %u\n",
+ ret, port_num);
+ goto bail_vl2mtu;
+ }
+
+ kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
+
+ ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
+ &cc_setting_bin_attr);
+ if (ret) {
+ dd_dev_err(dd,
+ "Skipping Congestion Control setting sysfs info, (err %d) port %u\n",
+ ret, port_num);
+ goto bail_cc;
+ }
+
+ ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
+ &cc_table_bin_attr);
+ if (ret) {
+ dd_dev_err(dd,
+ "Skipping Congestion Control table sysfs info, (err %d) port %u\n",
+ ret, port_num);
+ goto bail_cc_entry_bin;
+ }
+
+ dd_dev_info(dd,
+ "IB%u: Congestion Control Agent enabled for port %d\n",
+ dd->unit, port_num);
+
+ return 0;
+
+bail_cc_entry_bin:
+ sysfs_remove_bin_file(&ppd->pport_cc_kobj,
+ &cc_setting_bin_attr);
+bail_cc:
+ kobject_put(&ppd->pport_cc_kobj);
+bail_vl2mtu:
+ kobject_put(&ppd->vl2mtu_kobj);
+bail_sl2sc:
+ kobject_put(&ppd->sl2sc_kobj);
+bail_sc2vl:
+ kobject_put(&ppd->sc2vl_kobj);
+bail:
+ return ret;
+}
+
+/*
+ * Register and create our files in /sys/class/infiniband.
+ */
+int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd)
+{
+ struct ib_device *dev = &dd->verbs_dev.ibdev;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(hfi1_attributes); ++i) {
+ ret = device_create_file(&dev->dev, hfi1_attributes[i]);
+ if (ret)
+ goto bail;
+ }
+
+ return 0;
+bail:
+ for (i = 0; i < ARRAY_SIZE(hfi1_attributes); ++i)
+ device_remove_file(&dev->dev, hfi1_attributes[i]);
+ return ret;
+}
+
+/*
+ * Unregister and remove our files in /sys/class/infiniband.
+ */
+void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd)
+{
+ struct hfi1_pportdata *ppd;
+ int i;
+
+ for (i = 0; i < dd->num_pports; i++) {
+ ppd = &dd->pport[i];
+
+ sysfs_remove_bin_file(&ppd->pport_cc_kobj,
+ &cc_setting_bin_attr);
+ sysfs_remove_bin_file(&ppd->pport_cc_kobj,
+ &cc_table_bin_attr);
+ kobject_put(&ppd->pport_cc_kobj);
+ kobject_put(&ppd->vl2mtu_kobj);
+ kobject_put(&ppd->sl2sc_kobj);
+ kobject_put(&ppd->sc2vl_kobj);
+ }
+}
diff --git a/drivers/staging/rdma/hfi1/trace.c b/drivers/staging/rdma/hfi1/trace.c
new file mode 100644
index 000000000000..70ad7b9fc1ce
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/trace.c
@@ -0,0 +1,221 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr)
+{
+ struct hfi1_other_headers *ohdr;
+ u8 opcode;
+ u8 lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
+
+ if (lnh == HFI1_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else
+ ohdr = &hdr->u.l.oth;
+ opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+ return hdr_len_by_opcode[opcode] == 0 ?
+ 0 : hdr_len_by_opcode[opcode] - (12 + 8);
+}
+
+#define IMM_PRN "imm %d"
+#define RETH_PRN "reth vaddr 0x%.16llx rkey 0x%.8x dlen 0x%.8x"
+#define AETH_PRN "aeth syn 0x%.2x msn 0x%.8x"
+#define DETH_PRN "deth qkey 0x%.8x sqpn 0x%.6x"
+#define ATOMICACKETH_PRN "origdata %lld"
+#define ATOMICETH_PRN "vaddr 0x%llx rkey 0x%.8x sdata %lld cdata %lld"
+
+#define OP(transport, op) IB_OPCODE_## transport ## _ ## op
+
+static u64 ib_u64_get(__be32 *p)
+{
+ return ((u64)be32_to_cpu(p[0]) << 32) | be32_to_cpu(p[1]);
+}
+
+const char *parse_everbs_hdrs(
+ struct trace_seq *p,
+ u8 opcode,
+ void *ehdrs)
+{
+ union ib_ehdrs *eh = ehdrs;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ switch (opcode) {
+ /* imm */
+ case OP(RC, SEND_LAST_WITH_IMMEDIATE):
+ case OP(UC, SEND_LAST_WITH_IMMEDIATE):
+ case OP(RC, SEND_ONLY_WITH_IMMEDIATE):
+ case OP(UC, SEND_ONLY_WITH_IMMEDIATE):
+ case OP(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE):
+ case OP(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE):
+ trace_seq_printf(p, IMM_PRN,
+ be32_to_cpu(eh->imm_data));
+ break;
+ /* reth + imm */
+ case OP(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE):
+ case OP(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE):
+ trace_seq_printf(p, RETH_PRN " " IMM_PRN,
+ (unsigned long long)ib_u64_get(
+ (__be32 *)&eh->rc.reth.vaddr),
+ be32_to_cpu(eh->rc.reth.rkey),
+ be32_to_cpu(eh->rc.reth.length),
+ be32_to_cpu(eh->rc.imm_data));
+ break;
+ /* reth */
+ case OP(RC, RDMA_READ_REQUEST):
+ case OP(RC, RDMA_WRITE_FIRST):
+ case OP(UC, RDMA_WRITE_FIRST):
+ case OP(RC, RDMA_WRITE_ONLY):
+ case OP(UC, RDMA_WRITE_ONLY):
+ trace_seq_printf(p, RETH_PRN,
+ (unsigned long long)ib_u64_get(
+ (__be32 *)&eh->rc.reth.vaddr),
+ be32_to_cpu(eh->rc.reth.rkey),
+ be32_to_cpu(eh->rc.reth.length));
+ break;
+ case OP(RC, RDMA_READ_RESPONSE_FIRST):
+ case OP(RC, RDMA_READ_RESPONSE_LAST):
+ case OP(RC, RDMA_READ_RESPONSE_ONLY):
+ case OP(RC, ACKNOWLEDGE):
+ trace_seq_printf(p, AETH_PRN,
+ be32_to_cpu(eh->aeth) >> 24,
+ be32_to_cpu(eh->aeth) & HFI1_QPN_MASK);
+ break;
+ /* aeth + atomicacketh */
+ case OP(RC, ATOMIC_ACKNOWLEDGE):
+ trace_seq_printf(p, AETH_PRN " " ATOMICACKETH_PRN,
+ (be32_to_cpu(eh->at.aeth) >> 24) & 0xff,
+ be32_to_cpu(eh->at.aeth) & HFI1_QPN_MASK,
+ (unsigned long long)ib_u64_get(eh->at.atomic_ack_eth));
+ break;
+ /* atomiceth */
+ case OP(RC, COMPARE_SWAP):
+ case OP(RC, FETCH_ADD):
+ trace_seq_printf(p, ATOMICETH_PRN,
+ (unsigned long long)ib_u64_get(eh->atomic_eth.vaddr),
+ eh->atomic_eth.rkey,
+ (unsigned long long)ib_u64_get(
+ (__be32 *)&eh->atomic_eth.swap_data),
+ (unsigned long long) ib_u64_get(
+ (__be32 *)&eh->atomic_eth.compare_data));
+ break;
+ /* deth */
+ case OP(UD, SEND_ONLY):
+ case OP(UD, SEND_ONLY_WITH_IMMEDIATE):
+ trace_seq_printf(p, DETH_PRN,
+ be32_to_cpu(eh->ud.deth[0]),
+ be32_to_cpu(eh->ud.deth[1]) & HFI1_QPN_MASK);
+ break;
+ }
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+const char *parse_sdma_flags(
+ struct trace_seq *p,
+ u64 desc0, u64 desc1)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ char flags[5] = { 'x', 'x', 'x', 'x', 0 };
+
+ flags[0] = (desc1 & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
+ flags[1] = (desc1 & SDMA_DESC1_HEAD_TO_HOST_FLAG) ? 'H' : '-';
+ flags[2] = (desc0 & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
+ flags[3] = (desc0 & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
+ trace_seq_printf(p, "%s", flags);
+ if (desc0 & SDMA_DESC0_FIRST_DESC_FLAG)
+ trace_seq_printf(p, " amode:%u aidx:%u alen:%u",
+ (u8)((desc1 >> SDMA_DESC1_HEADER_MODE_SHIFT)
+ & SDMA_DESC1_HEADER_MODE_MASK),
+ (u8)((desc1 >> SDMA_DESC1_HEADER_INDEX_SHIFT)
+ & SDMA_DESC1_HEADER_INDEX_MASK),
+ (u8)((desc1 >> SDMA_DESC1_HEADER_DWS_SHIFT)
+ & SDMA_DESC1_HEADER_DWS_MASK));
+ return ret;
+}
+
+const char *print_u32_array(
+ struct trace_seq *p,
+ u32 *arr, int len)
+{
+ int i;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ for (i = 0; i < len ; i++)
+ trace_seq_printf(p, "%s%#x", i == 0 ? "" : " ", arr[i]);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+const char *print_u64_array(
+ struct trace_seq *p,
+ u64 *arr, int len)
+{
+ int i;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ for (i = 0; i < len; i++)
+ trace_seq_printf(p, "%s0x%016llx", i == 0 ? "" : " ", arr[i]);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+__hfi1_trace_fn(PKT);
+__hfi1_trace_fn(PROC);
+__hfi1_trace_fn(SDMA);
+__hfi1_trace_fn(LINKVERB);
+__hfi1_trace_fn(DEBUG);
+__hfi1_trace_fn(SNOOP);
+__hfi1_trace_fn(CNTR);
+__hfi1_trace_fn(PIO);
+__hfi1_trace_fn(DC8051);
+__hfi1_trace_fn(FIRMWARE);
+__hfi1_trace_fn(RCVCTRL);
+__hfi1_trace_fn(TID);
diff --git a/drivers/staging/rdma/hfi1/trace.h b/drivers/staging/rdma/hfi1/trace.h
new file mode 100644
index 000000000000..d7851c0a0171
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/trace.h
@@ -0,0 +1,1409 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR hfi1
+
+#if !defined(__HFI1_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+#include "mad.h"
+#include "sdma.h"
+
+#define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))
+#define DD_DEV_ASSIGN(dd) __assign_str(dev, dev_name(&(dd)->pcidev->dev))
+
+#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
+#define show_packettype(etype) \
+__print_symbolic(etype, \
+ packettype_name(EXPECTED), \
+ packettype_name(EAGER), \
+ packettype_name(IB), \
+ packettype_name(ERROR), \
+ packettype_name(BYPASS))
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_rx
+
+TRACE_EVENT(hfi1_rcvhdr,
+ TP_PROTO(struct hfi1_devdata *dd,
+ u64 eflags,
+ u32 ctxt,
+ u32 etype,
+ u32 hlen,
+ u32 tlen,
+ u32 updegr,
+ u32 etail),
+ TP_ARGS(dd, ctxt, eflags, etype, hlen, tlen, updegr, etail),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u64, eflags)
+ __field(u32, ctxt)
+ __field(u32, etype)
+ __field(u32, hlen)
+ __field(u32, tlen)
+ __field(u32, updegr)
+ __field(u32, etail)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+ __entry->eflags = eflags;
+ __entry->ctxt = ctxt;
+ __entry->etype = etype;
+ __entry->hlen = hlen;
+ __entry->tlen = tlen;
+ __entry->updegr = updegr;
+ __entry->etail = etail;
+ ),
+ TP_printk(
+"[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->eflags,
+ __entry->etype, show_packettype(__entry->etype),
+ __entry->hlen,
+ __entry->tlen,
+ __entry->updegr,
+ __entry->etail
+ )
+);
+
+TRACE_EVENT(hfi1_receive_interrupt,
+ TP_PROTO(struct hfi1_devdata *dd, u32 ctxt),
+ TP_ARGS(dd, ctxt),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u32, ctxt)
+ __field(u8, slow_path)
+ __field(u8, dma_rtail)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ if (dd->rcd[ctxt]->do_interrupt ==
+ &handle_receive_interrupt) {
+ __entry->slow_path = 1;
+ __entry->dma_rtail = 0xFF;
+ } else if (dd->rcd[ctxt]->do_interrupt ==
+ &handle_receive_interrupt_dma_rtail){
+ __entry->dma_rtail = 1;
+ __entry->slow_path = 0;
+ } else if (dd->rcd[ctxt]->do_interrupt ==
+ &handle_receive_interrupt_nodma_rtail) {
+ __entry->dma_rtail = 0;
+ __entry->slow_path = 0;
+ }
+ ),
+ TP_printk(
+ "[%s] ctxt %d SlowPath: %d DmaRtail: %d",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->slow_path,
+ __entry->dma_rtail
+ )
+);
+
+const char *print_u64_array(struct trace_seq *, u64 *, int);
+
+TRACE_EVENT(hfi1_exp_tid_map,
+ TP_PROTO(unsigned ctxt, u16 subctxt, int dir,
+ unsigned long *maps, u16 count),
+ TP_ARGS(ctxt, subctxt, dir, maps, count),
+ TP_STRUCT__entry(
+ __field(unsigned, ctxt)
+ __field(u16, subctxt)
+ __field(int, dir)
+ __field(u16, count)
+ __dynamic_array(unsigned long, maps, sizeof(*maps) * count)
+ ),
+ TP_fast_assign(
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->dir = dir;
+ __entry->count = count;
+ memcpy(__get_dynamic_array(maps), maps,
+ sizeof(*maps) * count);
+ ),
+ TP_printk("[%3u:%02u] %s tidmaps %s",
+ __entry->ctxt,
+ __entry->subctxt,
+ (__entry->dir ? ">" : "<"),
+ print_u64_array(p, __get_dynamic_array(maps),
+ __entry->count)
+ )
+ );
+
+TRACE_EVENT(hfi1_exp_rcv_set,
+ TP_PROTO(unsigned ctxt, u16 subctxt, u32 tid,
+ unsigned long vaddr, u64 phys_addr, void *page),
+ TP_ARGS(ctxt, subctxt, tid, vaddr, phys_addr, page),
+ TP_STRUCT__entry(
+ __field(unsigned, ctxt)
+ __field(u16, subctxt)
+ __field(u32, tid)
+ __field(unsigned long, vaddr)
+ __field(u64, phys_addr)
+ __field(void *, page)
+ ),
+ TP_fast_assign(
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->tid = tid;
+ __entry->vaddr = vaddr;
+ __entry->phys_addr = phys_addr;
+ __entry->page = page;
+ ),
+ TP_printk("[%u:%u] TID %u, vaddrs 0x%lx, physaddr 0x%llx, pgp %p",
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->tid,
+ __entry->vaddr,
+ __entry->phys_addr,
+ __entry->page
+ )
+ );
+
+TRACE_EVENT(hfi1_exp_rcv_free,
+ TP_PROTO(unsigned ctxt, u16 subctxt, u32 tid,
+ unsigned long phys, void *page),
+ TP_ARGS(ctxt, subctxt, tid, phys, page),
+ TP_STRUCT__entry(
+ __field(unsigned, ctxt)
+ __field(u16, subctxt)
+ __field(u32, tid)
+ __field(unsigned long, phys)
+ __field(void *, page)
+ ),
+ TP_fast_assign(
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->tid = tid;
+ __entry->phys = phys;
+ __entry->page = page;
+ ),
+ TP_printk("[%u:%u] freeing TID %u, 0x%lx, pgp %p",
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->tid,
+ __entry->phys,
+ __entry->page
+ )
+ );
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_tx
+
+TRACE_EVENT(hfi1_piofree,
+ TP_PROTO(struct send_context *sc, int extra),
+ TP_ARGS(sc, extra),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(sc->dd)
+ __field(u32, sw_index)
+ __field(u32, hw_context)
+ __field(int, extra)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(sc->dd);
+ __entry->sw_index = sc->sw_index;
+ __entry->hw_context = sc->hw_context;
+ __entry->extra = extra;
+ ),
+ TP_printk(
+ "[%s] ctxt %u(%u) extra %d",
+ __get_str(dev),
+ __entry->sw_index,
+ __entry->hw_context,
+ __entry->extra
+ )
+);
+
+TRACE_EVENT(hfi1_wantpiointr,
+ TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
+ TP_ARGS(sc, needint, credit_ctrl),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(sc->dd)
+ __field(u32, sw_index)
+ __field(u32, hw_context)
+ __field(u32, needint)
+ __field(u64, credit_ctrl)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(sc->dd);
+ __entry->sw_index = sc->sw_index;
+ __entry->hw_context = sc->hw_context;
+ __entry->needint = needint;
+ __entry->credit_ctrl = credit_ctrl;
+ ),
+ TP_printk(
+ "[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
+ __get_str(dev),
+ __entry->sw_index,
+ __entry->hw_context,
+ __entry->needint,
+ (unsigned long long)__entry->credit_ctrl
+ )
+);
+
+DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
+ TP_PROTO(struct hfi1_qp *qp, u32 flags),
+ TP_ARGS(qp, flags),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, flags)
+ __field(u32, s_flags)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ __entry->flags = flags;
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->s_flags = qp->s_flags;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x flags 0x%x s_flags 0x%x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->flags,
+ __entry->s_flags
+ )
+);
+
+DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
+ TP_PROTO(struct hfi1_qp *qp, u32 flags),
+ TP_ARGS(qp, flags));
+
+DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
+ TP_PROTO(struct hfi1_qp *qp, u32 flags),
+ TP_ARGS(qp, flags));
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_qphash
+DECLARE_EVENT_CLASS(hfi1_qphash_template,
+ TP_PROTO(struct hfi1_qp *qp, u32 bucket),
+ TP_ARGS(qp, bucket),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, bucket)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->bucket = bucket;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x bucket %u",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->bucket
+ )
+);
+
+DEFINE_EVENT(hfi1_qphash_template, hfi1_qpinsert,
+ TP_PROTO(struct hfi1_qp *qp, u32 bucket),
+ TP_ARGS(qp, bucket));
+
+DEFINE_EVENT(hfi1_qphash_template, hfi1_qpremove,
+ TP_PROTO(struct hfi1_qp *qp, u32 bucket),
+ TP_ARGS(qp, bucket));
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_ibhdrs
+
+u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr);
+const char *parse_everbs_hdrs(
+ struct trace_seq *p,
+ u8 opcode,
+ void *ehdrs);
+
+#define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
+
+const char *parse_sdma_flags(
+ struct trace_seq *p,
+ u64 desc0, u64 desc1);
+
+#define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
+
+
+#define lrh_name(lrh) { HFI1_##lrh, #lrh }
+#define show_lnh(lrh) \
+__print_symbolic(lrh, \
+ lrh_name(LRH_BTH), \
+ lrh_name(LRH_GRH))
+
+#define ib_opcode_name(opcode) { IB_OPCODE_##opcode, #opcode }
+#define show_ib_opcode(opcode) \
+__print_symbolic(opcode, \
+ ib_opcode_name(RC_SEND_FIRST), \
+ ib_opcode_name(RC_SEND_MIDDLE), \
+ ib_opcode_name(RC_SEND_LAST), \
+ ib_opcode_name(RC_SEND_LAST_WITH_IMMEDIATE), \
+ ib_opcode_name(RC_SEND_ONLY), \
+ ib_opcode_name(RC_SEND_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(RC_RDMA_WRITE_FIRST), \
+ ib_opcode_name(RC_RDMA_WRITE_MIDDLE), \
+ ib_opcode_name(RC_RDMA_WRITE_LAST), \
+ ib_opcode_name(RC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
+ ib_opcode_name(RC_RDMA_WRITE_ONLY), \
+ ib_opcode_name(RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(RC_RDMA_READ_REQUEST), \
+ ib_opcode_name(RC_RDMA_READ_RESPONSE_FIRST), \
+ ib_opcode_name(RC_RDMA_READ_RESPONSE_MIDDLE), \
+ ib_opcode_name(RC_RDMA_READ_RESPONSE_LAST), \
+ ib_opcode_name(RC_RDMA_READ_RESPONSE_ONLY), \
+ ib_opcode_name(RC_ACKNOWLEDGE), \
+ ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
+ ib_opcode_name(RC_COMPARE_SWAP), \
+ ib_opcode_name(RC_FETCH_ADD), \
+ ib_opcode_name(UC_SEND_FIRST), \
+ ib_opcode_name(UC_SEND_MIDDLE), \
+ ib_opcode_name(UC_SEND_LAST), \
+ ib_opcode_name(UC_SEND_LAST_WITH_IMMEDIATE), \
+ ib_opcode_name(UC_SEND_ONLY), \
+ ib_opcode_name(UC_SEND_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(UC_RDMA_WRITE_FIRST), \
+ ib_opcode_name(UC_RDMA_WRITE_MIDDLE), \
+ ib_opcode_name(UC_RDMA_WRITE_LAST), \
+ ib_opcode_name(UC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
+ ib_opcode_name(UC_RDMA_WRITE_ONLY), \
+ ib_opcode_name(UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(UD_SEND_ONLY), \
+ ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE))
+
+
+#define LRH_PRN "vl %d lver %d sl %d lnh %d,%s dlid %.4x len %d slid %.4x"
+#define BTH_PRN \
+ "op 0x%.2x,%s se %d m %d pad %d tver %d pkey 0x%.4x " \
+ "f %d b %d qpn 0x%.6x a %d psn 0x%.8x"
+#define EHDR_PRN "%s"
+
+DECLARE_EVENT_CLASS(hfi1_ibhdr_template,
+ TP_PROTO(struct hfi1_devdata *dd,
+ struct hfi1_ib_header *hdr),
+ TP_ARGS(dd, hdr),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ /* LRH */
+ __field(u8, vl)
+ __field(u8, lver)
+ __field(u8, sl)
+ __field(u8, lnh)
+ __field(u16, dlid)
+ __field(u16, len)
+ __field(u16, slid)
+ /* BTH */
+ __field(u8, opcode)
+ __field(u8, se)
+ __field(u8, m)
+ __field(u8, pad)
+ __field(u8, tver)
+ __field(u16, pkey)
+ __field(u8, f)
+ __field(u8, b)
+ __field(u32, qpn)
+ __field(u8, a)
+ __field(u32, psn)
+ /* extended headers */
+ __dynamic_array(u8, ehdrs, ibhdr_exhdr_len(hdr))
+ ),
+ TP_fast_assign(
+ struct hfi1_other_headers *ohdr;
+
+ DD_DEV_ASSIGN(dd);
+ /* LRH */
+ __entry->vl =
+ (u8)(be16_to_cpu(hdr->lrh[0]) >> 12);
+ __entry->lver =
+ (u8)(be16_to_cpu(hdr->lrh[0]) >> 8) & 0xf;
+ __entry->sl =
+ (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
+ __entry->lnh =
+ (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
+ __entry->dlid =
+ be16_to_cpu(hdr->lrh[1]);
+ /* allow for larger len */
+ __entry->len =
+ be16_to_cpu(hdr->lrh[2]);
+ __entry->slid =
+ be16_to_cpu(hdr->lrh[3]);
+ /* BTH */
+ if (__entry->lnh == HFI1_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else
+ ohdr = &hdr->u.l.oth;
+ __entry->opcode =
+ (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
+ __entry->se =
+ (be32_to_cpu(ohdr->bth[0]) >> 23) & 1;
+ __entry->m =
+ (be32_to_cpu(ohdr->bth[0]) >> 22) & 1;
+ __entry->pad =
+ (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ __entry->tver =
+ (be32_to_cpu(ohdr->bth[0]) >> 16) & 0xf;
+ __entry->pkey =
+ be32_to_cpu(ohdr->bth[0]) & 0xffff;
+ __entry->f =
+ (be32_to_cpu(ohdr->bth[1]) >> HFI1_FECN_SHIFT)
+ & HFI1_FECN_MASK;
+ __entry->b =
+ (be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT)
+ & HFI1_BECN_MASK;
+ __entry->qpn =
+ be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
+ __entry->a =
+ (be32_to_cpu(ohdr->bth[2]) >> 31) & 1;
+ /* allow for larger PSN */
+ __entry->psn =
+ be32_to_cpu(ohdr->bth[2]) & 0x7fffffff;
+ /* extended headers */
+ memcpy(
+ __get_dynamic_array(ehdrs),
+ &ohdr->u,
+ ibhdr_exhdr_len(hdr));
+ ),
+ TP_printk("[%s] " LRH_PRN " " BTH_PRN " " EHDR_PRN,
+ __get_str(dev),
+ /* LRH */
+ __entry->vl,
+ __entry->lver,
+ __entry->sl,
+ __entry->lnh, show_lnh(__entry->lnh),
+ __entry->dlid,
+ __entry->len,
+ __entry->slid,
+ /* BTH */
+ __entry->opcode, show_ib_opcode(__entry->opcode),
+ __entry->se,
+ __entry->m,
+ __entry->pad,
+ __entry->tver,
+ __entry->pkey,
+ __entry->f,
+ __entry->b,
+ __entry->qpn,
+ __entry->a,
+ __entry->psn,
+ /* extended headers */
+ __parse_ib_ehdrs(
+ __entry->opcode,
+ (void *)__get_dynamic_array(ehdrs))
+ )
+);
+
+DEFINE_EVENT(hfi1_ibhdr_template, input_ibhdr,
+ TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
+ TP_ARGS(dd, hdr));
+
+DEFINE_EVENT(hfi1_ibhdr_template, output_ibhdr,
+ TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
+ TP_ARGS(dd, hdr));
+
+#define SNOOP_PRN \
+ "slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \
+ "svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_snoop
+
+
+TRACE_EVENT(snoop_capture,
+ TP_PROTO(struct hfi1_devdata *dd,
+ int hdr_len,
+ struct hfi1_ib_header *hdr,
+ int data_len,
+ void *data),
+ TP_ARGS(dd, hdr_len, hdr, data_len, data),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u16, slid)
+ __field(u16, dlid)
+ __field(u32, qpn)
+ __field(u8, opcode)
+ __field(u8, sl)
+ __field(u16, pkey)
+ __field(u32, hdr_len)
+ __field(u32, data_len)
+ __field(u8, lnh)
+ __dynamic_array(u8, raw_hdr, hdr_len)
+ __dynamic_array(u8, raw_pkt, data_len)
+ ),
+ TP_fast_assign(
+ struct hfi1_other_headers *ohdr;
+
+ __entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
+ if (__entry->lnh == HFI1_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else
+ ohdr = &hdr->u.l.oth;
+ DD_DEV_ASSIGN(dd);
+ __entry->slid = be16_to_cpu(hdr->lrh[3]);
+ __entry->dlid = be16_to_cpu(hdr->lrh[1]);
+ __entry->qpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
+ __entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
+ __entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
+ __entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff;
+ __entry->hdr_len = hdr_len;
+ __entry->data_len = data_len;
+ memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len);
+ memcpy(__get_dynamic_array(raw_pkt), data, data_len);
+ ),
+ TP_printk("[%s] " SNOOP_PRN,
+ __get_str(dev),
+ __entry->slid,
+ __entry->dlid,
+ __entry->qpn,
+ __entry->opcode,
+ show_ib_opcode(__entry->opcode),
+ __entry->sl,
+ __entry->pkey,
+ __entry->hdr_len,
+ __entry->data_len
+ )
+);
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_ctxts
+
+#define UCTXT_FMT \
+ "cred:%u, credaddr:0x%llx, piobase:0x%llx, rcvhdr_cnt:%u, " \
+ "rcvbase:0x%llx, rcvegrc:%u, rcvegrb:0x%llx"
+TRACE_EVENT(hfi1_uctxtdata,
+ TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt),
+ TP_ARGS(dd, uctxt),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(unsigned, ctxt)
+ __field(u32, credits)
+ __field(u64, hw_free)
+ __field(u64, piobase)
+ __field(u16, rcvhdrq_cnt)
+ __field(u64, rcvhdrq_phys)
+ __field(u32, eager_cnt)
+ __field(u64, rcvegr_phys)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+ __entry->ctxt = uctxt->ctxt;
+ __entry->credits = uctxt->sc->credits;
+ __entry->hw_free = (u64)uctxt->sc->hw_free;
+ __entry->piobase = (u64)uctxt->sc->base_addr;
+ __entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
+ __entry->rcvhdrq_phys = uctxt->rcvhdrq_phys;
+ __entry->eager_cnt = uctxt->egrbufs.alloced;
+ __entry->rcvegr_phys = uctxt->egrbufs.rcvtids[0].phys;
+ ),
+ TP_printk(
+ "[%s] ctxt %u " UCTXT_FMT,
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->credits,
+ __entry->hw_free,
+ __entry->piobase,
+ __entry->rcvhdrq_cnt,
+ __entry->rcvhdrq_phys,
+ __entry->eager_cnt,
+ __entry->rcvegr_phys
+ )
+ );
+
+#define CINFO_FMT \
+ "egrtids:%u, egr_size:%u, hdrq_cnt:%u, hdrq_size:%u, sdma_ring_size:%u"
+TRACE_EVENT(hfi1_ctxt_info,
+ TP_PROTO(struct hfi1_devdata *dd, unsigned ctxt, unsigned subctxt,
+ struct hfi1_ctxt_info cinfo),
+ TP_ARGS(dd, ctxt, subctxt, cinfo),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(unsigned, ctxt)
+ __field(unsigned, subctxt)
+ __field(u16, egrtids)
+ __field(u16, rcvhdrq_cnt)
+ __field(u16, rcvhdrq_size)
+ __field(u16, sdma_ring_size)
+ __field(u32, rcvegr_size)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->egrtids = cinfo.egrtids;
+ __entry->rcvhdrq_cnt = cinfo.rcvhdrq_cnt;
+ __entry->rcvhdrq_size = cinfo.rcvhdrq_entsize;
+ __entry->sdma_ring_size = cinfo.sdma_ring_size;
+ __entry->rcvegr_size = cinfo.rcvegr_size;
+ ),
+ TP_printk(
+ "[%s] ctxt %u:%u " CINFO_FMT,
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->egrtids,
+ __entry->rcvegr_size,
+ __entry->rcvhdrq_cnt,
+ __entry->rcvhdrq_size,
+ __entry->sdma_ring_size
+ )
+ );
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_sma
+
+#define BCT_FORMAT \
+ "shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
+
+#define BCT(field) \
+ be16_to_cpu( \
+ ((struct buffer_control *)__get_dynamic_array(bct))->field \
+ )
+
+DECLARE_EVENT_CLASS(hfi1_bct_template,
+ TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
+ TP_ARGS(dd, bc),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __dynamic_array(u8, bct, sizeof(*bc))
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+ memcpy(
+ __get_dynamic_array(bct),
+ bc,
+ sizeof(*bc));
+ ),
+ TP_printk(BCT_FORMAT,
+ BCT(overall_shared_limit),
+
+ BCT(vl[0].dedicated),
+ BCT(vl[0].shared),
+
+ BCT(vl[1].dedicated),
+ BCT(vl[1].shared),
+
+ BCT(vl[2].dedicated),
+ BCT(vl[2].shared),
+
+ BCT(vl[3].dedicated),
+ BCT(vl[3].shared),
+
+ BCT(vl[4].dedicated),
+ BCT(vl[4].shared),
+
+ BCT(vl[5].dedicated),
+ BCT(vl[5].shared),
+
+ BCT(vl[6].dedicated),
+ BCT(vl[6].shared),
+
+ BCT(vl[7].dedicated),
+ BCT(vl[7].shared),
+
+ BCT(vl[15].dedicated),
+ BCT(vl[15].shared)
+ )
+);
+
+
+DEFINE_EVENT(hfi1_bct_template, bct_set,
+ TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
+ TP_ARGS(dd, bc));
+
+DEFINE_EVENT(hfi1_bct_template, bct_get,
+ TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
+ TP_ARGS(dd, bc));
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_sdma
+
+TRACE_EVENT(hfi1_sdma_descriptor,
+ TP_PROTO(
+ struct sdma_engine *sde,
+ u64 desc0,
+ u64 desc1,
+ u16 e,
+ void *descp),
+ TP_ARGS(sde, desc0, desc1, e, descp),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(sde->dd)
+ __field(void *, descp)
+ __field(u64, desc0)
+ __field(u64, desc1)
+ __field(u16, e)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(sde->dd);
+ __entry->desc0 = desc0;
+ __entry->desc1 = desc1;
+ __entry->idx = sde->this_idx;
+ __entry->descp = descp;
+ __entry->e = e;
+ ),
+ TP_printk(
+ "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
+ __get_str(dev),
+ __entry->idx,
+ __parse_sdma_flags(__entry->desc0, __entry->desc1),
+ (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT)
+ & SDMA_DESC0_PHY_ADDR_MASK,
+ (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT)
+ & SDMA_DESC1_GENERATION_MASK),
+ (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT)
+ & SDMA_DESC0_BYTE_COUNT_MASK),
+ __entry->desc0,
+ __entry->desc1,
+ __entry->descp,
+ __entry->e
+ )
+);
+
+TRACE_EVENT(hfi1_sdma_engine_select,
+ TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
+ TP_ARGS(dd, sel, vl, idx),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u32, sel)
+ __field(u8, vl)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+ __entry->sel = sel;
+ __entry->vl = vl;
+ __entry->idx = idx;
+ ),
+ TP_printk(
+ "[%s] selecting SDE %u sel 0x%x vl %u",
+ __get_str(dev),
+ __entry->idx,
+ __entry->sel,
+ __entry->vl
+ )
+);
+
+DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
+ TP_PROTO(
+ struct sdma_engine *sde,
+ u64 status
+ ),
+ TP_ARGS(sde, status),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(sde->dd)
+ __field(u64, status)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(sde->dd);
+ __entry->status = status;
+ __entry->idx = sde->this_idx;
+ ),
+ TP_printk(
+ "[%s] SDE(%u) status %llx",
+ __get_str(dev),
+ __entry->idx,
+ (unsigned long long)__entry->status
+ )
+);
+
+DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
+ TP_PROTO(
+ struct sdma_engine *sde,
+ u64 status
+ ),
+ TP_ARGS(sde, status)
+);
+
+DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
+ TP_PROTO(
+ struct sdma_engine *sde,
+ u64 status
+ ),
+ TP_ARGS(sde, status)
+);
+
+DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
+ TP_PROTO(
+ struct sdma_engine *sde,
+ int aidx
+ ),
+ TP_ARGS(sde, aidx),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(sde->dd)
+ __field(int, aidx)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(sde->dd);
+ __entry->idx = sde->this_idx;
+ __entry->aidx = aidx;
+ ),
+ TP_printk(
+ "[%s] SDE(%u) aidx %d",
+ __get_str(dev),
+ __entry->idx,
+ __entry->aidx
+ )
+);
+
+DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
+ TP_PROTO(
+ struct sdma_engine *sde,
+ int aidx
+ ),
+ TP_ARGS(sde, aidx));
+
+DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
+ TP_PROTO(
+ struct sdma_engine *sde,
+ int aidx
+ ),
+ TP_ARGS(sde, aidx));
+
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+TRACE_EVENT(hfi1_sdma_progress,
+ TP_PROTO(
+ struct sdma_engine *sde,
+ u16 hwhead,
+ u16 swhead,
+ struct sdma_txreq *txp
+ ),
+ TP_ARGS(sde, hwhead, swhead, txp),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(sde->dd)
+ __field(u64, sn)
+ __field(u16, hwhead)
+ __field(u16, swhead)
+ __field(u16, txnext)
+ __field(u16, tx_tail)
+ __field(u16, tx_head)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(sde->dd);
+ __entry->hwhead = hwhead;
+ __entry->swhead = swhead;
+ __entry->tx_tail = sde->tx_tail;
+ __entry->tx_head = sde->tx_head;
+ __entry->txnext = txp ? txp->next_descq_idx : ~0;
+ __entry->idx = sde->this_idx;
+ __entry->sn = txp ? txp->sn : ~0;
+ ),
+ TP_printk(
+ "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
+ __get_str(dev),
+ __entry->idx,
+ __entry->sn,
+ __entry->hwhead,
+ __entry->swhead,
+ __entry->txnext,
+ __entry->tx_head,
+ __entry->tx_tail
+ )
+);
+#else
+TRACE_EVENT(hfi1_sdma_progress,
+ TP_PROTO(
+ struct sdma_engine *sde,
+ u16 hwhead,
+ u16 swhead,
+ struct sdma_txreq *txp
+ ),
+ TP_ARGS(sde, hwhead, swhead, txp),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(sde->dd)
+ __field(u16, hwhead)
+ __field(u16, swhead)
+ __field(u16, txnext)
+ __field(u16, tx_tail)
+ __field(u16, tx_head)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(sde->dd);
+ __entry->hwhead = hwhead;
+ __entry->swhead = swhead;
+ __entry->tx_tail = sde->tx_tail;
+ __entry->tx_head = sde->tx_head;
+ __entry->txnext = txp ? txp->next_descq_idx : ~0;
+ __entry->idx = sde->this_idx;
+ ),
+ TP_printk(
+ "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
+ __get_str(dev),
+ __entry->idx,
+ __entry->hwhead,
+ __entry->swhead,
+ __entry->txnext,
+ __entry->tx_head,
+ __entry->tx_tail
+ )
+);
+#endif
+
+DECLARE_EVENT_CLASS(hfi1_sdma_sn,
+ TP_PROTO(
+ struct sdma_engine *sde,
+ u64 sn
+ ),
+ TP_ARGS(sde, sn),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(sde->dd)
+ __field(u64, sn)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(sde->dd);
+ __entry->sn = sn;
+ __entry->idx = sde->this_idx;
+ ),
+ TP_printk(
+ "[%s] SDE(%u) sn %llu",
+ __get_str(dev),
+ __entry->idx,
+ __entry->sn
+ )
+);
+
+DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
+ TP_PROTO(
+ struct sdma_engine *sde,
+ u64 sn
+ ),
+ TP_ARGS(sde, sn)
+);
+
+DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
+ TP_PROTO(
+ struct sdma_engine *sde,
+ u64 sn
+ ),
+ TP_ARGS(sde, sn)
+);
+
+#define USDMA_HDR_FORMAT \
+ "[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
+
+TRACE_EVENT(hfi1_sdma_user_header,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
+ struct hfi1_pkt_header *hdr, u32 tidval),
+ TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u8, subctxt)
+ __field(u16, req)
+ __field(__le32, pbc0)
+ __field(__le32, pbc1)
+ __field(__be32, lrh0)
+ __field(__be32, lrh1)
+ __field(__be32, bth0)
+ __field(__be32, bth1)
+ __field(__be32, bth2)
+ __field(__le32, kdeth0)
+ __field(__le32, kdeth1)
+ __field(__le32, kdeth2)
+ __field(__le32, kdeth3)
+ __field(__le32, kdeth4)
+ __field(__le32, kdeth5)
+ __field(__le32, kdeth6)
+ __field(__le32, kdeth7)
+ __field(__le32, kdeth8)
+ __field(u32, tidval)
+ ),
+ TP_fast_assign(
+ __le32 *pbc = (__le32 *)hdr->pbc;
+ __be32 *lrh = (__be32 *)hdr->lrh;
+ __be32 *bth = (__be32 *)hdr->bth;
+ __le32 *kdeth = (__le32 *)&hdr->kdeth;
+
+ DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->req = req;
+ __entry->pbc0 = pbc[0];
+ __entry->pbc1 = pbc[1];
+ __entry->lrh0 = be32_to_cpu(lrh[0]);
+ __entry->lrh1 = be32_to_cpu(lrh[1]);
+ __entry->bth0 = be32_to_cpu(bth[0]);
+ __entry->bth1 = be32_to_cpu(bth[1]);
+ __entry->bth2 = be32_to_cpu(bth[2]);
+ __entry->kdeth0 = kdeth[0];
+ __entry->kdeth1 = kdeth[1];
+ __entry->kdeth2 = kdeth[2];
+ __entry->kdeth3 = kdeth[3];
+ __entry->kdeth4 = kdeth[4];
+ __entry->kdeth5 = kdeth[5];
+ __entry->kdeth6 = kdeth[6];
+ __entry->kdeth7 = kdeth[7];
+ __entry->kdeth8 = kdeth[8];
+ __entry->tidval = tidval;
+ ),
+ TP_printk(USDMA_HDR_FORMAT,
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->req,
+ __entry->pbc1,
+ __entry->pbc0,
+ __entry->lrh0,
+ __entry->lrh1,
+ __entry->bth0,
+ __entry->bth1,
+ __entry->bth2,
+ __entry->kdeth0,
+ __entry->kdeth1,
+ __entry->kdeth2,
+ __entry->kdeth3,
+ __entry->kdeth4,
+ __entry->kdeth5,
+ __entry->kdeth6,
+ __entry->kdeth7,
+ __entry->kdeth8,
+ __entry->tidval
+ )
+ );
+
+#define SDMA_UREQ_FMT \
+ "[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
+TRACE_EVENT(hfi1_sdma_user_reqinfo,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
+ TP_ARGS(dd, ctxt, subctxt, i),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd);
+ __field(u16, ctxt)
+ __field(u8, subctxt)
+ __field(u8, ver_opcode)
+ __field(u8, iovcnt)
+ __field(u16, npkts)
+ __field(u16, fragsize)
+ __field(u16, comp_idx)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->ver_opcode = i[0] & 0xff;
+ __entry->iovcnt = (i[0] >> 8) & 0xff;
+ __entry->npkts = i[1];
+ __entry->fragsize = i[2];
+ __entry->comp_idx = i[3];
+ ),
+ TP_printk(SDMA_UREQ_FMT,
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->ver_opcode,
+ __entry->iovcnt,
+ __entry->npkts,
+ __entry->fragsize,
+ __entry->comp_idx
+ )
+ );
+
+#define usdma_complete_name(st) { st, #st }
+#define show_usdma_complete_state(st) \
+ __print_symbolic(st, \
+ usdma_complete_name(FREE), \
+ usdma_complete_name(QUEUED), \
+ usdma_complete_name(COMPLETE), \
+ usdma_complete_name(ERROR))
+
+TRACE_EVENT(hfi1_sdma_user_completion,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
+ u8 state, int code),
+ TP_ARGS(dd, ctxt, subctxt, idx, state, code),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u8, subctxt)
+ __field(u16, idx)
+ __field(u8, state)
+ __field(int, code)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->idx = idx;
+ __entry->state = state;
+ __entry->code = code;
+ ),
+ TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
+ __get_str(dev), __entry->ctxt, __entry->subctxt,
+ __entry->idx, show_usdma_complete_state(__entry->state),
+ __entry->code)
+ );
+
+const char *print_u32_array(struct trace_seq *, u32 *, int);
+#define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
+
+TRACE_EVENT(hfi1_sdma_user_header_ahg,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
+ u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
+ TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u8, subctxt)
+ __field(u16, req)
+ __field(u8, sde)
+ __field(u8, idx)
+ __field(int, len)
+ __field(u32, tidval)
+ __array(u32, ahg, 10)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->req = req;
+ __entry->sde = sde;
+ __entry->idx = ahgidx;
+ __entry->len = len;
+ __entry->tidval = tidval;
+ memcpy(__entry->ahg, ahg, len * sizeof(u32));
+ ),
+ TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->req,
+ __entry->sde,
+ __entry->idx,
+ __entry->len - 1,
+ __print_u32_hex(__entry->ahg, __entry->len),
+ __entry->tidval
+ )
+ );
+
+TRACE_EVENT(hfi1_sdma_state,
+ TP_PROTO(
+ struct sdma_engine *sde,
+ const char *cstate,
+ const char *nstate
+ ),
+ TP_ARGS(sde, cstate, nstate),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(sde->dd)
+ __string(curstate, cstate)
+ __string(newstate, nstate)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(sde->dd);
+ __assign_str(curstate, cstate);
+ __assign_str(newstate, nstate);
+ ),
+ TP_printk("[%s] current state %s new state %s",
+ __get_str(dev),
+ __get_str(curstate),
+ __get_str(newstate)
+ )
+);
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_rc
+
+DECLARE_EVENT_CLASS(hfi1_sdma_rc,
+ TP_PROTO(struct hfi1_qp *qp, u32 psn),
+ TP_ARGS(qp, psn),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, flags)
+ __field(u32, psn)
+ __field(u32, sending_psn)
+ __field(u32, sending_hpsn)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->flags = qp->s_flags;
+ __entry->psn = psn;
+ __entry->sending_psn = qp->s_sending_psn;
+ __entry->sending_hpsn = qp->s_sending_hpsn;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x flags 0x%x psn 0x%x sending_psn 0x%x sending_hpsn 0x%x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->flags,
+ __entry->psn,
+ __entry->sending_psn,
+ __entry->sending_psn
+ )
+);
+
+DEFINE_EVENT(hfi1_sdma_rc, hfi1_rc_sendcomplete,
+ TP_PROTO(struct hfi1_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_misc
+
+TRACE_EVENT(hfi1_interrupt,
+ TP_PROTO(struct hfi1_devdata *dd, const struct is_table *is_entry,
+ int src),
+ TP_ARGS(dd, is_entry, src),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __array(char, buf, 64)
+ __field(int, src)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd)
+ is_entry->is_name(__entry->buf, 64, src - is_entry->start);
+ __entry->src = src;
+ ),
+ TP_printk("[%s] source: %s [%d]", __get_str(dev), __entry->buf,
+ __entry->src)
+);
+
+/*
+ * Note:
+ * This produces a REALLY ugly trace in the console output when the string is
+ * too long.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_trace
+
+#define MAX_MSG_LEN 512
+
+DECLARE_EVENT_CLASS(hfi1_trace_template,
+ TP_PROTO(const char *function, struct va_format *vaf),
+ TP_ARGS(function, vaf),
+ TP_STRUCT__entry(
+ __string(function, function)
+ __dynamic_array(char, msg, MAX_MSG_LEN)
+ ),
+ TP_fast_assign(
+ __assign_str(function, function);
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ MAX_MSG_LEN, vaf->fmt,
+ *vaf->va) >= MAX_MSG_LEN);
+ ),
+ TP_printk("(%s) %s",
+ __get_str(function),
+ __get_str(msg))
+);
+
+/*
+ * It may be nice to macroize the __hfi1_trace but the va_* stuff requires an
+ * actual function to work and can not be in a macro.
+ */
+#define __hfi1_trace_def(lvl) \
+void __hfi1_trace_##lvl(const char *funct, char *fmt, ...); \
+ \
+DEFINE_EVENT(hfi1_trace_template, hfi1_ ##lvl, \
+ TP_PROTO(const char *function, struct va_format *vaf), \
+ TP_ARGS(function, vaf))
+
+#define __hfi1_trace_fn(lvl) \
+void __hfi1_trace_##lvl(const char *func, char *fmt, ...) \
+{ \
+ struct va_format vaf = { \
+ .fmt = fmt, \
+ }; \
+ va_list args; \
+ \
+ va_start(args, fmt); \
+ vaf.va = &args; \
+ trace_hfi1_ ##lvl(func, &vaf); \
+ va_end(args); \
+ return; \
+}
+
+/*
+ * To create a new trace level simply define it below and as a __hfi1_trace_fn
+ * in trace.c. This will create all the hooks for calling
+ * hfi1_cdbg(LVL, fmt, ...); as well as take care of all
+ * the debugfs stuff.
+ */
+__hfi1_trace_def(PKT);
+__hfi1_trace_def(PROC);
+__hfi1_trace_def(SDMA);
+__hfi1_trace_def(LINKVERB);
+__hfi1_trace_def(DEBUG);
+__hfi1_trace_def(SNOOP);
+__hfi1_trace_def(CNTR);
+__hfi1_trace_def(PIO);
+__hfi1_trace_def(DC8051);
+__hfi1_trace_def(FIRMWARE);
+__hfi1_trace_def(RCVCTRL);
+__hfi1_trace_def(TID);
+
+#define hfi1_cdbg(which, fmt, ...) \
+ __hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__)
+
+#define hfi1_dbg(fmt, ...) \
+ hfi1_cdbg(DEBUG, fmt, ##__VA_ARGS__)
+
+/*
+ * Define HFI1_EARLY_DBG at compile time or here to enable early trace
+ * messages. Do not check in an enablement for this.
+ */
+
+#ifdef HFI1_EARLY_DBG
+#define hfi1_dbg_early(fmt, ...) \
+ trace_printk(fmt, ##__VA_ARGS__)
+#else
+#define hfi1_dbg_early(fmt, ...)
+#endif
+
+#endif /* __HFI1_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/drivers/staging/rdma/hfi1/twsi.c b/drivers/staging/rdma/hfi1/twsi.c
new file mode 100644
index 000000000000..ea54fd2700ad
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/twsi.c
@@ -0,0 +1,518 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include "hfi.h"
+#include "twsi.h"
+
+/*
+ * "Two Wire Serial Interface" support.
+ *
+ * Originally written for a not-quite-i2c serial eeprom, which is
+ * still used on some supported boards. Later boards have added a
+ * variety of other uses, most board-specific, so the bit-boffing
+ * part has been split off to this file, while the other parts
+ * have been moved to chip-specific files.
+ *
+ * We have also dropped all pretense of fully generic (e.g. pretend
+ * we don't know whether '1' is the higher voltage) interface, as
+ * the restrictions of the generic i2c interface (e.g. no access from
+ * driver itself) make it unsuitable for this use.
+ */
+
+#define READ_CMD 1
+#define WRITE_CMD 0
+
+/**
+ * i2c_wait_for_writes - wait for a write
+ * @dd: the hfi1_ib device
+ *
+ * We use this instead of udelay directly, so we can make sure
+ * that previous register writes have been flushed all the way
+ * to the chip. Since we are delaying anyway, the cost doesn't
+ * hurt, and makes the bit twiddling more regular
+ */
+static void i2c_wait_for_writes(struct hfi1_devdata *dd, u32 target)
+{
+ /*
+ * implicit read of EXTStatus is as good as explicit
+ * read of scratch, if all we want to do is flush
+ * writes.
+ */
+ hfi1_gpio_mod(dd, target, 0, 0, 0);
+ rmb(); /* inlined, so prevent compiler reordering */
+}
+
+/*
+ * QSFP modules are allowed to hold SCL low for 500uSec. Allow twice that
+ * for "almost compliant" modules
+ */
+#define SCL_WAIT_USEC 1000
+
+/* BUF_WAIT is time bus must be free between STOP or ACK and to next START.
+ * Should be 20, but some chips need more.
+ */
+#define TWSI_BUF_WAIT_USEC 60
+
+static void scl_out(struct hfi1_devdata *dd, u32 target, u8 bit)
+{
+ u32 mask;
+
+ udelay(1);
+
+ mask = QSFP_HFI0_I2CCLK;
+
+ /* SCL is meant to be bare-drain, so never set "OUT", just DIR */
+ hfi1_gpio_mod(dd, target, 0, bit ? 0 : mask, mask);
+
+ /*
+ * Allow for slow slaves by simple
+ * delay for falling edge, sampling on rise.
+ */
+ if (!bit)
+ udelay(2);
+ else {
+ int rise_usec;
+
+ for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
+ if (mask & hfi1_gpio_mod(dd, target, 0, 0, 0))
+ break;
+ udelay(2);
+ }
+ if (rise_usec <= 0)
+ dd_dev_err(dd, "SCL interface stuck low > %d uSec\n",
+ SCL_WAIT_USEC);
+ }
+ i2c_wait_for_writes(dd, target);
+}
+
+static void sda_out(struct hfi1_devdata *dd, u32 target, u8 bit)
+{
+ u32 mask;
+
+ mask = QSFP_HFI0_I2CDAT;
+
+ /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
+ hfi1_gpio_mod(dd, target, 0, bit ? 0 : mask, mask);
+
+ i2c_wait_for_writes(dd, target);
+ udelay(2);
+}
+
+static u8 sda_in(struct hfi1_devdata *dd, u32 target, int wait)
+{
+ u32 read_val, mask;
+
+ mask = QSFP_HFI0_I2CDAT;
+ /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
+ hfi1_gpio_mod(dd, target, 0, 0, mask);
+ read_val = hfi1_gpio_mod(dd, target, 0, 0, 0);
+ if (wait)
+ i2c_wait_for_writes(dd, target);
+ return (read_val & mask) >> GPIO_SDA_NUM;
+}
+
+/**
+ * i2c_ackrcv - see if ack following write is true
+ * @dd: the hfi1_ib device
+ */
+static int i2c_ackrcv(struct hfi1_devdata *dd, u32 target)
+{
+ u8 ack_received;
+
+ /* AT ENTRY SCL = LOW */
+ /* change direction, ignore data */
+ ack_received = sda_in(dd, target, 1);
+ scl_out(dd, target, 1);
+ ack_received = sda_in(dd, target, 1) == 0;
+ scl_out(dd, target, 0);
+ return ack_received;
+}
+
+static void stop_cmd(struct hfi1_devdata *dd, u32 target);
+
+/**
+ * rd_byte - read a byte, sending STOP on last, else ACK
+ * @dd: the hfi1_ib device
+ *
+ * Returns byte shifted out of device
+ */
+static int rd_byte(struct hfi1_devdata *dd, u32 target, int last)
+{
+ int bit_cntr, data;
+
+ data = 0;
+
+ for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
+ data <<= 1;
+ scl_out(dd, target, 1);
+ data |= sda_in(dd, target, 0);
+ scl_out(dd, target, 0);
+ }
+ if (last) {
+ scl_out(dd, target, 1);
+ stop_cmd(dd, target);
+ } else {
+ sda_out(dd, target, 0);
+ scl_out(dd, target, 1);
+ scl_out(dd, target, 0);
+ sda_out(dd, target, 1);
+ }
+ return data;
+}
+
+/**
+ * wr_byte - write a byte, one bit at a time
+ * @dd: the hfi1_ib device
+ * @data: the byte to write
+ *
+ * Returns 0 if we got the following ack, otherwise 1
+ */
+static int wr_byte(struct hfi1_devdata *dd, u32 target, u8 data)
+{
+ int bit_cntr;
+ u8 bit;
+
+ for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
+ bit = (data >> bit_cntr) & 1;
+ sda_out(dd, target, bit);
+ scl_out(dd, target, 1);
+ scl_out(dd, target, 0);
+ }
+ return (!i2c_ackrcv(dd, target)) ? 1 : 0;
+}
+
+/*
+ * issue TWSI start sequence:
+ * (both clock/data high, clock high, data low while clock is high)
+ */
+static void start_seq(struct hfi1_devdata *dd, u32 target)
+{
+ sda_out(dd, target, 1);
+ scl_out(dd, target, 1);
+ sda_out(dd, target, 0);
+ udelay(1);
+ scl_out(dd, target, 0);
+}
+
+/**
+ * stop_seq - transmit the stop sequence
+ * @dd: the hfi1_ib device
+ *
+ * (both clock/data low, clock high, data high while clock is high)
+ */
+static void stop_seq(struct hfi1_devdata *dd, u32 target)
+{
+ scl_out(dd, target, 0);
+ sda_out(dd, target, 0);
+ scl_out(dd, target, 1);
+ sda_out(dd, target, 1);
+}
+
+/**
+ * stop_cmd - transmit the stop condition
+ * @dd: the hfi1_ib device
+ *
+ * (both clock/data low, clock high, data high while clock is high)
+ */
+static void stop_cmd(struct hfi1_devdata *dd, u32 target)
+{
+ stop_seq(dd, target);
+ udelay(TWSI_BUF_WAIT_USEC);
+}
+
+/**
+ * hfi1_twsi_reset - reset I2C communication
+ * @dd: the hfi1_ib device
+ */
+
+int hfi1_twsi_reset(struct hfi1_devdata *dd, u32 target)
+{
+ int clock_cycles_left = 9;
+ int was_high = 0;
+ u32 pins, mask;
+
+ /* Both SCL and SDA should be high. If not, there
+ * is something wrong.
+ */
+ mask = QSFP_HFI0_I2CCLK | QSFP_HFI0_I2CDAT;
+
+ /*
+ * Force pins to desired innocuous state.
+ * This is the default power-on state with out=0 and dir=0,
+ * So tri-stated and should be floating high (barring HW problems)
+ */
+ hfi1_gpio_mod(dd, target, 0, 0, mask);
+
+ /*
+ * Clock nine times to get all listeners into a sane state.
+ * If SDA does not go high at any point, we are wedged.
+ * One vendor recommends then issuing START followed by STOP.
+ * we cannot use our "normal" functions to do that, because
+ * if SCL drops between them, another vendor's part will
+ * wedge, dropping SDA and keeping it low forever, at the end of
+ * the next transaction (even if it was not the device addressed).
+ * So our START and STOP take place with SCL held high.
+ */
+ while (clock_cycles_left--) {
+ scl_out(dd, target, 0);
+ scl_out(dd, target, 1);
+ /* Note if SDA is high, but keep clocking to sync slave */
+ was_high |= sda_in(dd, target, 0);
+ }
+
+ if (was_high) {
+ /*
+ * We saw a high, which we hope means the slave is sync'd.
+ * Issue START, STOP, pause for T_BUF.
+ */
+
+ pins = hfi1_gpio_mod(dd, target, 0, 0, 0);
+ if ((pins & mask) != mask)
+ dd_dev_err(dd, "GPIO pins not at rest: %d\n",
+ pins & mask);
+ /* Drop SDA to issue START */
+ udelay(1); /* Guarantee .6 uSec setup */
+ sda_out(dd, target, 0);
+ udelay(1); /* Guarantee .6 uSec hold */
+ /* At this point, SCL is high, SDA low. Raise SDA for STOP */
+ sda_out(dd, target, 1);
+ udelay(TWSI_BUF_WAIT_USEC);
+ }
+
+ return !was_high;
+}
+
+#define HFI1_TWSI_START 0x100
+#define HFI1_TWSI_STOP 0x200
+
+/* Write byte to TWSI, optionally prefixed with START or suffixed with
+ * STOP.
+ * returns 0 if OK (ACK received), else != 0
+ */
+static int twsi_wr(struct hfi1_devdata *dd, u32 target, int data, int flags)
+{
+ int ret = 1;
+
+ if (flags & HFI1_TWSI_START)
+ start_seq(dd, target);
+
+ /* Leaves SCL low (from i2c_ackrcv()) */
+ ret = wr_byte(dd, target, data);
+
+ if (flags & HFI1_TWSI_STOP)
+ stop_cmd(dd, target);
+ return ret;
+}
+
+/* Added functionality for IBA7220-based cards */
+#define HFI1_TEMP_DEV 0x98
+
+/*
+ * hfi1_twsi_blk_rd
+ * General interface for data transfer from twsi devices.
+ * One vestige of its former role is that it recognizes a device
+ * HFI1_TWSI_NO_DEV and does the correct operation for the legacy part,
+ * which responded to all TWSI device codes, interpreting them as
+ * address within device. On all other devices found on board handled by
+ * this driver, the device is followed by a one-byte "address" which selects
+ * the "register" or "offset" within the device from which data should
+ * be read.
+ */
+int hfi1_twsi_blk_rd(struct hfi1_devdata *dd, u32 target, int dev, int addr,
+ void *buffer, int len)
+{
+ int ret;
+ u8 *bp = buffer;
+
+ ret = 1;
+
+ if (dev == HFI1_TWSI_NO_DEV) {
+ /* legacy not-really-I2C */
+ addr = (addr << 1) | READ_CMD;
+ ret = twsi_wr(dd, target, addr, HFI1_TWSI_START);
+ } else {
+ /* Actual I2C */
+ ret = twsi_wr(dd, target, dev | WRITE_CMD, HFI1_TWSI_START);
+ if (ret) {
+ stop_cmd(dd, target);
+ ret = 1;
+ goto bail;
+ }
+ /*
+ * SFF spec claims we do _not_ stop after the addr
+ * but simply issue a start with the "read" dev-addr.
+ * Since we are implicitly waiting for ACK here,
+ * we need t_buf (nominally 20uSec) before that start,
+ * and cannot rely on the delay built in to the STOP
+ */
+ ret = twsi_wr(dd, target, addr, 0);
+ udelay(TWSI_BUF_WAIT_USEC);
+
+ if (ret) {
+ dd_dev_err(dd,
+ "Failed to write interface read addr %02X\n",
+ addr);
+ ret = 1;
+ goto bail;
+ }
+ ret = twsi_wr(dd, target, dev | READ_CMD, HFI1_TWSI_START);
+ }
+ if (ret) {
+ stop_cmd(dd, target);
+ ret = 1;
+ goto bail;
+ }
+
+ /*
+ * block devices keeps clocking data out as long as we ack,
+ * automatically incrementing the address. Some have "pages"
+ * whose boundaries will not be crossed, but the handling
+ * of these is left to the caller, who is in a better
+ * position to know.
+ */
+ while (len-- > 0) {
+ /*
+ * Get and store data, sending ACK if length remaining,
+ * else STOP
+ */
+ *bp++ = rd_byte(dd, target, !len);
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/*
+ * hfi1_twsi_blk_wr
+ * General interface for data transfer to twsi devices.
+ * One vestige of its former role is that it recognizes a device
+ * HFI1_TWSI_NO_DEV and does the correct operation for the legacy part,
+ * which responded to all TWSI device codes, interpreting them as
+ * address within device. On all other devices found on board handled by
+ * this driver, the device is followed by a one-byte "address" which selects
+ * the "register" or "offset" within the device to which data should
+ * be written.
+ */
+int hfi1_twsi_blk_wr(struct hfi1_devdata *dd, u32 target, int dev, int addr,
+ const void *buffer, int len)
+{
+ int sub_len;
+ const u8 *bp = buffer;
+ int max_wait_time, i;
+ int ret = 1;
+
+ while (len > 0) {
+ if (dev == HFI1_TWSI_NO_DEV) {
+ if (twsi_wr(dd, target, (addr << 1) | WRITE_CMD,
+ HFI1_TWSI_START)) {
+ goto failed_write;
+ }
+ } else {
+ /* Real I2C */
+ if (twsi_wr(dd, target,
+ dev | WRITE_CMD, HFI1_TWSI_START))
+ goto failed_write;
+ ret = twsi_wr(dd, target, addr, 0);
+ if (ret) {
+ dd_dev_err(dd,
+ "Failed to write interface write addr %02X\n",
+ addr);
+ goto failed_write;
+ }
+ }
+
+ sub_len = min(len, 4);
+ addr += sub_len;
+ len -= sub_len;
+
+ for (i = 0; i < sub_len; i++)
+ if (twsi_wr(dd, target, *bp++, 0))
+ goto failed_write;
+
+ stop_cmd(dd, target);
+
+ /*
+ * Wait for write complete by waiting for a successful
+ * read (the chip replies with a zero after the write
+ * cmd completes, and before it writes to the eeprom.
+ * The startcmd for the read will fail the ack until
+ * the writes have completed. We do this inline to avoid
+ * the debug prints that are in the real read routine
+ * if the startcmd fails.
+ * We also use the proper device address, so it doesn't matter
+ * whether we have real eeprom_dev. Legacy likes any address.
+ */
+ max_wait_time = 100;
+ while (twsi_wr(dd, target,
+ dev | READ_CMD, HFI1_TWSI_START)) {
+ stop_cmd(dd, target);
+ if (!--max_wait_time)
+ goto failed_write;
+ }
+ /* now read (and ignore) the resulting byte */
+ rd_byte(dd, target, 1);
+ }
+
+ ret = 0;
+ goto bail;
+
+failed_write:
+ stop_cmd(dd, target);
+ ret = 1;
+
+bail:
+ return ret;
+}
diff --git a/drivers/staging/rdma/hfi1/twsi.h b/drivers/staging/rdma/hfi1/twsi.h
new file mode 100644
index 000000000000..5907e029613d
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/twsi.h
@@ -0,0 +1,68 @@
+#ifndef _TWSI_H
+#define _TWSI_H
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define HFI1_TWSI_NO_DEV 0xFF
+
+struct hfi1_devdata;
+
+/* Bit position of SDA pin in ASIC_QSFP* registers */
+#define GPIO_SDA_NUM 1
+
+/* these functions must be called with qsfp_lock held */
+int hfi1_twsi_reset(struct hfi1_devdata *dd, u32 target);
+int hfi1_twsi_blk_rd(struct hfi1_devdata *dd, u32 target, int dev, int addr,
+ void *buffer, int len);
+int hfi1_twsi_blk_wr(struct hfi1_devdata *dd, u32 target, int dev, int addr,
+ const void *buffer, int len);
+
+
+#endif /* _TWSI_H */
diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c
new file mode 100644
index 000000000000..b536f397737c
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/uc.c
@@ -0,0 +1,585 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "hfi.h"
+#include "sdma.h"
+#include "qp.h"
+
+/* cut down ridiculously long IB macro names */
+#define OP(x) IB_OPCODE_UC_##x
+
+/**
+ * hfi1_make_uc_req - construct a request packet (SEND, RDMA write)
+ * @qp: a pointer to the QP
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ */
+int hfi1_make_uc_req(struct hfi1_qp *qp)
+{
+ struct hfi1_other_headers *ohdr;
+ struct hfi1_swqe *wqe;
+ unsigned long flags;
+ u32 hwords = 5;
+ u32 bth0 = 0;
+ u32 len;
+ u32 pmtu = qp->pmtu;
+ int ret = 0;
+ int middle = 0;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_SEND_OK)) {
+ if (!(ib_hfi1_state_ops[qp->state] & HFI1_FLUSH_SEND))
+ goto bail;
+ /* We are in the error state, flush the work request. */
+ if (qp->s_last == qp->s_head)
+ goto bail;
+ /* If DMAs are in progress, we can't flush immediately. */
+ if (atomic_read(&qp->s_iowait.sdma_busy)) {
+ qp->s_flags |= HFI1_S_WAIT_DMA;
+ goto bail;
+ }
+ clear_ahg(qp);
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ goto done;
+ }
+
+ ohdr = &qp->s_hdr->ibh.u.oth;
+ if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ ohdr = &qp->s_hdr->ibh.u.l.oth;
+
+ /* Get the next send request. */
+ wqe = get_swqe_ptr(qp, qp->s_cur);
+ qp->s_wqe = NULL;
+ switch (qp->s_state) {
+ default:
+ if (!(ib_hfi1_state_ops[qp->state] &
+ HFI1_PROCESS_NEXT_SEND_OK))
+ goto bail;
+ /* Check if send work queue is empty. */
+ if (qp->s_cur == qp->s_head) {
+ clear_ahg(qp);
+ goto bail;
+ }
+ /*
+ * Start a new request.
+ */
+ wqe->psn = qp->s_next_psn;
+ qp->s_psn = qp->s_next_psn;
+ qp->s_sge.sge = wqe->sg_list[0];
+ qp->s_sge.sg_list = wqe->sg_list + 1;
+ qp->s_sge.num_sge = wqe->wr.num_sge;
+ qp->s_sge.total_len = wqe->length;
+ len = wqe->length;
+ qp->s_len = len;
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ if (len > pmtu) {
+ qp->s_state = OP(SEND_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND)
+ qp->s_state = OP(SEND_ONLY);
+ else {
+ qp->s_state =
+ OP(SEND_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ qp->s_wqe = wqe;
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(len);
+ hwords += sizeof(struct ib_reth) / 4;
+ if (len > pmtu) {
+ qp->s_state = OP(RDMA_WRITE_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ qp->s_state = OP(RDMA_WRITE_ONLY);
+ else {
+ qp->s_state =
+ OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after the RETH */
+ ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ }
+ qp->s_wqe = wqe;
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ default:
+ goto bail;
+ }
+ break;
+
+ case OP(SEND_FIRST):
+ qp->s_state = OP(SEND_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(SEND_MIDDLE):
+ len = qp->s_len;
+ if (len > pmtu) {
+ len = pmtu;
+ middle = HFI1_CAP_IS_KSET(SDMA_AHG);
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND)
+ qp->s_state = OP(SEND_LAST);
+ else {
+ qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ qp->s_wqe = wqe;
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case OP(RDMA_WRITE_FIRST):
+ qp->s_state = OP(RDMA_WRITE_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(RDMA_WRITE_MIDDLE):
+ len = qp->s_len;
+ if (len > pmtu) {
+ len = pmtu;
+ middle = HFI1_CAP_IS_KSET(SDMA_AHG);
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ qp->s_state = OP(RDMA_WRITE_LAST);
+ else {
+ qp->s_state =
+ OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ }
+ qp->s_wqe = wqe;
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+ }
+ qp->s_len -= len;
+ qp->s_hdrwords = hwords;
+ qp->s_cur_sge = &qp->s_sge;
+ qp->s_cur_size = len;
+ hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
+ mask_psn(qp->s_next_psn++), middle);
+done:
+ ret = 1;
+ goto unlock;
+
+bail:
+ qp->s_flags &= ~HFI1_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
+}
+
+/**
+ * hfi1_uc_rcv - handle an incoming UC packet
+ * @ibp: the port the packet came in on
+ * @hdr: the header of the packet
+ * @rcv_flags: flags relevant to rcv processing
+ * @data: the packet data
+ * @tlen: the length of the packet
+ * @qp: the QP for this packet.
+ *
+ * This is called from qp_rcv() to process an incoming UC packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+void hfi1_uc_rcv(struct hfi1_packet *packet)
+{
+ struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
+ struct hfi1_ib_header *hdr = packet->hdr;
+ u32 rcv_flags = packet->rcv_flags;
+ void *data = packet->ebuf;
+ u32 tlen = packet->tlen;
+ struct hfi1_qp *qp = packet->qp;
+ struct hfi1_other_headers *ohdr = packet->ohdr;
+ u32 opcode;
+ u32 hdrsize = packet->hlen;
+ u32 psn;
+ u32 pad;
+ struct ib_wc wc;
+ u32 pmtu = qp->pmtu;
+ struct ib_reth *reth;
+ int has_grh = rcv_flags & HFI1_HAS_GRH;
+ int ret;
+ u32 bth1;
+ struct ib_grh *grh = NULL;
+
+ opcode = be32_to_cpu(ohdr->bth[0]);
+ if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
+ return;
+
+ bth1 = be32_to_cpu(ohdr->bth[1]);
+ if (unlikely(bth1 & (HFI1_BECN_SMASK | HFI1_FECN_SMASK))) {
+ if (bth1 & HFI1_BECN_SMASK) {
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 rqpn, lqpn;
+ u16 rlid = be16_to_cpu(hdr->lrh[3]);
+ u8 sl, sc5;
+
+ lqpn = bth1 & HFI1_QPN_MASK;
+ rqpn = qp->remote_qpn;
+
+ sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
+ sl = ibp->sc_to_sl[sc5];
+
+ process_becn(ppd, sl, rlid, lqpn, rqpn,
+ IB_CC_SVCTYPE_UC);
+ }
+
+ if (bth1 & HFI1_FECN_SMASK) {
+ u16 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
+ u16 slid = be16_to_cpu(hdr->lrh[3]);
+ u16 dlid = be16_to_cpu(hdr->lrh[1]);
+ u32 src_qp = qp->remote_qpn;
+ u8 sc5;
+
+ sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
+
+ return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh);
+ }
+ }
+
+ psn = be32_to_cpu(ohdr->bth[2]);
+ opcode >>= 24;
+
+ /* Compare the PSN verses the expected PSN. */
+ if (unlikely(cmp_psn(psn, qp->r_psn) != 0)) {
+ /*
+ * Handle a sequence error.
+ * Silently drop any current message.
+ */
+ qp->r_psn = psn;
+inv:
+ if (qp->r_state == OP(SEND_FIRST) ||
+ qp->r_state == OP(SEND_MIDDLE)) {
+ set_bit(HFI1_R_REWIND_SGE, &qp->r_aflags);
+ qp->r_sge.num_sge = 0;
+ } else
+ hfi1_put_ss(&qp->r_sge);
+ qp->r_state = OP(SEND_LAST);
+ switch (opcode) {
+ case OP(SEND_FIRST):
+ case OP(SEND_ONLY):
+ case OP(SEND_ONLY_WITH_IMMEDIATE):
+ goto send_first;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_ONLY):
+ case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
+ goto rdma_first;
+
+ default:
+ goto drop;
+ }
+ }
+
+ /* Check for opcode sequence errors. */
+ switch (qp->r_state) {
+ case OP(SEND_FIRST):
+ case OP(SEND_MIDDLE):
+ if (opcode == OP(SEND_MIDDLE) ||
+ opcode == OP(SEND_LAST) ||
+ opcode == OP(SEND_LAST_WITH_IMMEDIATE))
+ break;
+ goto inv;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_MIDDLE):
+ if (opcode == OP(RDMA_WRITE_MIDDLE) ||
+ opcode == OP(RDMA_WRITE_LAST) ||
+ opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
+ break;
+ goto inv;
+
+ default:
+ if (opcode == OP(SEND_FIRST) ||
+ opcode == OP(SEND_ONLY) ||
+ opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
+ opcode == OP(RDMA_WRITE_FIRST) ||
+ opcode == OP(RDMA_WRITE_ONLY) ||
+ opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+ break;
+ goto inv;
+ }
+
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & HFI1_R_COMM_EST))
+ qp_comm_est(qp);
+
+ /* OK, process the packet. */
+ switch (opcode) {
+ case OP(SEND_FIRST):
+ case OP(SEND_ONLY):
+ case OP(SEND_ONLY_WITH_IMMEDIATE):
+send_first:
+ if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags))
+ qp->r_sge = qp->s_rdma_read_sge;
+ else {
+ ret = hfi1_get_rwqe(qp, 0);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto drop;
+ /*
+ * qp->s_rdma_read_sge will be the owner
+ * of the mr references.
+ */
+ qp->s_rdma_read_sge = qp->r_sge;
+ }
+ qp->r_rcv_len = 0;
+ if (opcode == OP(SEND_ONLY))
+ goto no_immediate_data;
+ else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
+ goto send_last_imm;
+ /* FALLTHROUGH */
+ case OP(SEND_MIDDLE):
+ /* Check for invalid length PMTU or posted rwqe len. */
+ if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ goto rewind;
+ qp->r_rcv_len += pmtu;
+ if (unlikely(qp->r_rcv_len > qp->r_len))
+ goto rewind;
+ hfi1_copy_sge(&qp->r_sge, data, pmtu, 0);
+ break;
+
+ case OP(SEND_LAST_WITH_IMMEDIATE):
+send_last_imm:
+ wc.ex.imm_data = ohdr->u.imm_data;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ goto send_last;
+ case OP(SEND_LAST):
+no_immediate_data:
+ wc.ex.imm_data = 0;
+ wc.wc_flags = 0;
+send_last:
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /* Check for invalid length. */
+ /* LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto rewind;
+ /* Don't count the CRC. */
+ tlen -= (hdrsize + pad + 4);
+ wc.byte_len = tlen + qp->r_rcv_len;
+ if (unlikely(wc.byte_len > qp->r_len))
+ goto rewind;
+ wc.opcode = IB_WC_RECV;
+ hfi1_copy_sge(&qp->r_sge, data, tlen, 0);
+ hfi1_put_ss(&qp->s_rdma_read_sge);
+last_imm:
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = qp->remote_qpn;
+ wc.slid = qp->remote_ah_attr.dlid;
+ /*
+ * It seems that IB mandates the presence of an SL in a
+ * work completion only for the UD transport (see section
+ * 11.4.2 of IBTA Vol. 1).
+ *
+ * However, the way the SL is chosen below is consistent
+ * with the way that IB/qib works and is trying avoid
+ * introducing incompatibilities.
+ *
+ * See also OPA Vol. 1, section 9.7.6, and table 9-17.
+ */
+ wc.sl = qp->remote_ah_attr.sl;
+ /* zero fields that are N/A */
+ wc.vendor_err = 0;
+ wc.pkey_index = 0;
+ wc.dlid_path_bits = 0;
+ wc.port_num = 0;
+ /* Signal completion event if the solicited bit is set. */
+ hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ (ohdr->bth[0] &
+ cpu_to_be32(IB_BTH_SOLICITED)) != 0);
+ break;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_ONLY):
+ case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
+rdma_first:
+ if (unlikely(!(qp->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE))) {
+ goto drop;
+ }
+ reth = &ohdr->u.rc.reth;
+ qp->r_len = be32_to_cpu(reth->length);
+ qp->r_rcv_len = 0;
+ qp->r_sge.sg_list = NULL;
+ if (qp->r_len != 0) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = be64_to_cpu(reth->vaddr);
+ int ok;
+
+ /* Check rkey */
+ ok = hfi1_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
+ vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
+ if (unlikely(!ok))
+ goto drop;
+ qp->r_sge.num_sge = 1;
+ } else {
+ qp->r_sge.num_sge = 0;
+ qp->r_sge.sge.mr = NULL;
+ qp->r_sge.sge.vaddr = NULL;
+ qp->r_sge.sge.length = 0;
+ qp->r_sge.sge.sge_length = 0;
+ }
+ if (opcode == OP(RDMA_WRITE_ONLY))
+ goto rdma_last;
+ else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
+ wc.ex.imm_data = ohdr->u.rc.imm_data;
+ goto rdma_last_imm;
+ }
+ /* FALLTHROUGH */
+ case OP(RDMA_WRITE_MIDDLE):
+ /* Check for invalid length PMTU or posted rwqe len. */
+ if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ goto drop;
+ qp->r_rcv_len += pmtu;
+ if (unlikely(qp->r_rcv_len > qp->r_len))
+ goto drop;
+ hfi1_copy_sge(&qp->r_sge, data, pmtu, 1);
+ break;
+
+ case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
+ wc.ex.imm_data = ohdr->u.imm_data;
+rdma_last_imm:
+ wc.wc_flags = IB_WC_WITH_IMM;
+
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /* Check for invalid length. */
+ /* LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto drop;
+ /* Don't count the CRC. */
+ tlen -= (hdrsize + pad + 4);
+ if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
+ goto drop;
+ if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags))
+ hfi1_put_ss(&qp->s_rdma_read_sge);
+ else {
+ ret = hfi1_get_rwqe(qp, 1);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto drop;
+ }
+ wc.byte_len = qp->r_len;
+ wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ hfi1_copy_sge(&qp->r_sge, data, tlen, 1);
+ hfi1_put_ss(&qp->r_sge);
+ goto last_imm;
+
+ case OP(RDMA_WRITE_LAST):
+rdma_last:
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /* Check for invalid length. */
+ /* LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto drop;
+ /* Don't count the CRC. */
+ tlen -= (hdrsize + pad + 4);
+ if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
+ goto drop;
+ hfi1_copy_sge(&qp->r_sge, data, tlen, 1);
+ hfi1_put_ss(&qp->r_sge);
+ break;
+
+ default:
+ /* Drop packet for unknown opcodes. */
+ goto drop;
+ }
+ qp->r_psn++;
+ qp->r_state = opcode;
+ return;
+
+rewind:
+ set_bit(HFI1_R_REWIND_SGE, &qp->r_aflags);
+ qp->r_sge.num_sge = 0;
+drop:
+ ibp->n_pkt_drops++;
+ return;
+
+op_err:
+ hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ return;
+
+}
diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c
new file mode 100644
index 000000000000..d40d1a1e10aa
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/ud.c
@@ -0,0 +1,885 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/net.h>
+#include <rdma/ib_smi.h>
+
+#include "hfi.h"
+#include "mad.h"
+#include "qp.h"
+
+/**
+ * ud_loopback - handle send on loopback QPs
+ * @sqp: the sending QP
+ * @swqe: the send work request
+ *
+ * This is called from hfi1_make_ud_req() to forward a WQE addressed
+ * to the same HFI.
+ * Note that the receive interrupt handler may be calling hfi1_ud_rcv()
+ * while this is being called.
+ */
+static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe)
+{
+ struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
+ struct hfi1_pportdata *ppd;
+ struct hfi1_qp *qp;
+ struct ib_ah_attr *ah_attr;
+ unsigned long flags;
+ struct hfi1_sge_state ssge;
+ struct hfi1_sge *sge;
+ struct ib_wc wc;
+ u32 length;
+ enum ib_qp_type sqptype, dqptype;
+
+ rcu_read_lock();
+
+ qp = hfi1_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
+ if (!qp) {
+ ibp->n_pkt_drops++;
+ rcu_read_unlock();
+ return;
+ }
+
+ sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
+ IB_QPT_UD : sqp->ibqp.qp_type;
+ dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
+ IB_QPT_UD : qp->ibqp.qp_type;
+
+ if (dqptype != sqptype ||
+ !(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK)) {
+ ibp->n_pkt_drops++;
+ goto drop;
+ }
+
+ ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr;
+ ppd = ppd_from_ibp(ibp);
+
+ if (qp->ibqp.qp_num > 1) {
+ u16 pkey;
+ u16 slid;
+ u8 sc5 = ibp->sl_to_sc[ah_attr->sl];
+
+ pkey = hfi1_get_pkey(ibp, sqp->s_pkey_index);
+ slid = ppd->lid | (ah_attr->src_path_bits &
+ ((1 << ppd->lmc) - 1));
+ if (unlikely(ingress_pkey_check(ppd, pkey, sc5,
+ qp->s_pkey_index, slid))) {
+ hfi1_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey,
+ ah_attr->sl,
+ sqp->ibqp.qp_num, qp->ibqp.qp_num,
+ cpu_to_be16(slid),
+ cpu_to_be16(ah_attr->dlid));
+ goto drop;
+ }
+ }
+
+ /*
+ * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
+ * Qkeys with the high order bit set mean use the
+ * qkey from the QP context instead of the WR (see 10.2.5).
+ */
+ if (qp->ibqp.qp_num) {
+ u32 qkey;
+
+ qkey = (int)swqe->wr.wr.ud.remote_qkey < 0 ?
+ sqp->qkey : swqe->wr.wr.ud.remote_qkey;
+ if (unlikely(qkey != qp->qkey)) {
+ u16 lid;
+
+ lid = ppd->lid | (ah_attr->src_path_bits &
+ ((1 << ppd->lmc) - 1));
+ hfi1_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
+ ah_attr->sl,
+ sqp->ibqp.qp_num, qp->ibqp.qp_num,
+ cpu_to_be16(lid),
+ cpu_to_be16(ah_attr->dlid));
+ goto drop;
+ }
+ }
+
+ /*
+ * A GRH is expected to precede the data even if not
+ * present on the wire.
+ */
+ length = swqe->length;
+ memset(&wc, 0, sizeof(wc));
+ wc.byte_len = length + sizeof(struct ib_grh);
+
+ if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = swqe->wr.ex.imm_data;
+ }
+
+ spin_lock_irqsave(&qp->r_lock, flags);
+
+ /*
+ * Get the next work request entry to find where to put the data.
+ */
+ if (qp->r_flags & HFI1_R_REUSE_SGE)
+ qp->r_flags &= ~HFI1_R_REUSE_SGE;
+ else {
+ int ret;
+
+ ret = hfi1_get_rwqe(qp, 0);
+ if (ret < 0) {
+ hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ goto bail_unlock;
+ }
+ if (!ret) {
+ if (qp->ibqp.qp_num == 0)
+ ibp->n_vl15_dropped++;
+ goto bail_unlock;
+ }
+ }
+ /* Silently drop packets which are too big. */
+ if (unlikely(wc.byte_len > qp->r_len)) {
+ qp->r_flags |= HFI1_R_REUSE_SGE;
+ ibp->n_pkt_drops++;
+ goto bail_unlock;
+ }
+
+ if (ah_attr->ah_flags & IB_AH_GRH) {
+ hfi1_copy_sge(&qp->r_sge, &ah_attr->grh,
+ sizeof(struct ib_grh), 1);
+ wc.wc_flags |= IB_WC_GRH;
+ } else
+ hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
+ ssge.sg_list = swqe->sg_list + 1;
+ ssge.sge = *swqe->sg_list;
+ ssge.num_sge = swqe->wr.num_sge;
+ sge = &ssge.sge;
+ while (length) {
+ u32 len = sge->length;
+
+ if (len > length)
+ len = length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ WARN_ON_ONCE(len == 0);
+ hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (--ssge.num_sge)
+ *sge = *ssge.sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= HFI1_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ length -= len;
+ }
+ hfi1_put_ss(&qp->r_sge);
+ if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags))
+ goto bail_unlock;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = IB_WC_RECV;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = sqp->ibqp.qp_num;
+ if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) {
+ if (sqp->ibqp.qp_type == IB_QPT_GSI ||
+ sqp->ibqp.qp_type == IB_QPT_SMI)
+ wc.pkey_index = swqe->wr.wr.ud.pkey_index;
+ else
+ wc.pkey_index = sqp->s_pkey_index;
+ } else {
+ wc.pkey_index = 0;
+ }
+ wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
+ /* Check for loopback when the port lid is not set */
+ if (wc.slid == 0 && sqp->ibqp.qp_type == IB_QPT_GSI)
+ wc.slid = HFI1_PERMISSIVE_LID;
+ wc.sl = ah_attr->sl;
+ wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
+ wc.port_num = qp->port_num;
+ /* Signal completion event if the solicited bit is set. */
+ hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ swqe->wr.send_flags & IB_SEND_SOLICITED);
+ ibp->n_loop_pkts++;
+bail_unlock:
+ spin_unlock_irqrestore(&qp->r_lock, flags);
+drop:
+ rcu_read_unlock();
+}
+
+/**
+ * hfi1_make_ud_req - construct a UD request packet
+ * @qp: the QP
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ */
+int hfi1_make_ud_req(struct hfi1_qp *qp)
+{
+ struct hfi1_other_headers *ohdr;
+ struct ib_ah_attr *ah_attr;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_ibport *ibp;
+ struct hfi1_swqe *wqe;
+ unsigned long flags;
+ u32 nwords;
+ u32 extra_bytes;
+ u32 bth0;
+ u16 lrh0;
+ u16 lid;
+ int ret = 0;
+ int next_cur;
+ u8 sc5;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_hfi1_state_ops[qp->state] & HFI1_FLUSH_SEND))
+ goto bail;
+ /* We are in the error state, flush the work request. */
+ if (qp->s_last == qp->s_head)
+ goto bail;
+ /* If DMAs are in progress, we can't flush immediately. */
+ if (atomic_read(&qp->s_iowait.sdma_busy)) {
+ qp->s_flags |= HFI1_S_WAIT_DMA;
+ goto bail;
+ }
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ goto done;
+ }
+
+ if (qp->s_cur == qp->s_head)
+ goto bail;
+
+ wqe = get_swqe_ptr(qp, qp->s_cur);
+ next_cur = qp->s_cur + 1;
+ if (next_cur >= qp->s_size)
+ next_cur = 0;
+
+ /* Construct the header. */
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ppd = ppd_from_ibp(ibp);
+ ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
+ if (ah_attr->dlid < HFI1_MULTICAST_LID_BASE ||
+ ah_attr->dlid == HFI1_PERMISSIVE_LID) {
+ lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
+ if (unlikely(!loopback && (lid == ppd->lid ||
+ (lid == HFI1_PERMISSIVE_LID &&
+ qp->ibqp.qp_type == IB_QPT_GSI)))) {
+ /*
+ * If DMAs are in progress, we can't generate
+ * a completion for the loopback packet since
+ * it would be out of order.
+ * Instead of waiting, we could queue a
+ * zero length descriptor so we get a callback.
+ */
+ if (atomic_read(&qp->s_iowait.sdma_busy)) {
+ qp->s_flags |= HFI1_S_WAIT_DMA;
+ goto bail;
+ }
+ qp->s_cur = next_cur;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ ud_loopback(qp, wqe);
+ spin_lock_irqsave(&qp->s_lock, flags);
+ hfi1_send_complete(qp, wqe, IB_WC_SUCCESS);
+ goto done;
+ }
+ }
+
+ qp->s_cur = next_cur;
+ extra_bytes = -wqe->length & 3;
+ nwords = (wqe->length + extra_bytes) >> 2;
+
+ /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
+ qp->s_hdrwords = 7;
+ qp->s_cur_size = wqe->length;
+ qp->s_cur_sge = &qp->s_sge;
+ qp->s_srate = ah_attr->static_rate;
+ qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
+ qp->s_wqe = wqe;
+ qp->s_sge.sge = wqe->sg_list[0];
+ qp->s_sge.sg_list = wqe->sg_list + 1;
+ qp->s_sge.num_sge = wqe->wr.num_sge;
+ qp->s_sge.total_len = wqe->length;
+
+ if (ah_attr->ah_flags & IB_AH_GRH) {
+ /* Header size in 32-bit words. */
+ qp->s_hdrwords += hfi1_make_grh(ibp, &qp->s_hdr->ibh.u.l.grh,
+ &ah_attr->grh,
+ qp->s_hdrwords, nwords);
+ lrh0 = HFI1_LRH_GRH;
+ ohdr = &qp->s_hdr->ibh.u.l.oth;
+ /*
+ * Don't worry about sending to locally attached multicast
+ * QPs. It is unspecified by the spec. what happens.
+ */
+ } else {
+ /* Header size in 32-bit words. */
+ lrh0 = HFI1_LRH_BTH;
+ ohdr = &qp->s_hdr->ibh.u.oth;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
+ qp->s_hdrwords++;
+ ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
+ bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
+ } else
+ bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
+ sc5 = ibp->sl_to_sc[ah_attr->sl];
+ lrh0 |= (ah_attr->sl & 0xf) << 4;
+ if (qp->ibqp.qp_type == IB_QPT_SMI) {
+ lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
+ qp->s_sc = 0xf;
+ } else {
+ lrh0 |= (sc5 & 0xf) << 12;
+ qp->s_sc = sc5;
+ }
+ qp->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0);
+ qp->s_hdr->ibh.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
+ qp->s_hdr->ibh.lrh[2] =
+ cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
+ if (ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE))
+ qp->s_hdr->ibh.lrh[3] = IB_LID_PERMISSIVE;
+ else {
+ lid = ppd->lid;
+ if (lid) {
+ lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
+ qp->s_hdr->ibh.lrh[3] = cpu_to_be16(lid);
+ } else
+ qp->s_hdr->ibh.lrh[3] = IB_LID_PERMISSIVE;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ bth0 |= extra_bytes << 20;
+ if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI)
+ bth0 |= hfi1_get_pkey(ibp, wqe->wr.wr.ud.pkey_index);
+ else
+ bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index);
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
+ ohdr->bth[2] = cpu_to_be32(mask_psn(qp->s_next_psn++));
+ /*
+ * Qkeys with the high order bit set mean use the
+ * qkey from the QP context instead of the WR (see 10.2.5).
+ */
+ ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ?
+ qp->qkey : wqe->wr.wr.ud.remote_qkey);
+ ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
+ /* disarm any ahg */
+ qp->s_hdr->ahgcount = 0;
+ qp->s_hdr->ahgidx = 0;
+ qp->s_hdr->tx_flags = 0;
+ qp->s_hdr->sde = NULL;
+
+done:
+ ret = 1;
+ goto unlock;
+
+bail:
+ qp->s_flags &= ~HFI1_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
+}
+
+/*
+ * Hardware can't check this so we do it here.
+ *
+ * This is a slightly different algorithm than the standard pkey check. It
+ * special cases the management keys and allows for 0x7fff and 0xffff to be in
+ * the table at the same time.
+ *
+ * @returns the index found or -1 if not found
+ */
+int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ unsigned i;
+
+ if (pkey == FULL_MGMT_P_KEY || pkey == LIM_MGMT_P_KEY) {
+ unsigned lim_idx = -1;
+
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i) {
+ /* here we look for an exact match */
+ if (ppd->pkeys[i] == pkey)
+ return i;
+ if (ppd->pkeys[i] == LIM_MGMT_P_KEY)
+ lim_idx = i;
+ }
+
+ /* did not find 0xffff return 0x7fff idx if found */
+ if (pkey == FULL_MGMT_P_KEY)
+ return lim_idx;
+
+ /* no match... */
+ return -1;
+ }
+
+ pkey &= 0x7fff; /* remove limited/full membership bit */
+
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i)
+ if ((ppd->pkeys[i] & 0x7fff) == pkey)
+ return i;
+
+ /*
+ * Should not get here, this means hardware failed to validate pkeys.
+ */
+ return -1;
+}
+
+void return_cnp(struct hfi1_ibport *ibp, struct hfi1_qp *qp, u32 remote_qpn,
+ u32 pkey, u32 slid, u32 dlid, u8 sc5,
+ const struct ib_grh *old_grh)
+{
+ u64 pbc, pbc_flags = 0;
+ u32 bth0, plen, vl, hwords = 5;
+ u16 lrh0;
+ u8 sl = ibp->sc_to_sl[sc5];
+ struct hfi1_ib_header hdr;
+ struct hfi1_other_headers *ohdr;
+ struct pio_buf *pbuf;
+ struct send_context *ctxt = qp_to_send_context(qp, sc5);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ if (old_grh) {
+ struct ib_grh *grh = &hdr.u.l.grh;
+
+ grh->version_tclass_flow = old_grh->version_tclass_flow;
+ grh->paylen = cpu_to_be16((hwords - 2 + SIZE_OF_CRC) << 2);
+ grh->hop_limit = 0xff;
+ grh->sgid = old_grh->dgid;
+ grh->dgid = old_grh->sgid;
+ ohdr = &hdr.u.l.oth;
+ lrh0 = HFI1_LRH_GRH;
+ hwords += sizeof(struct ib_grh) / sizeof(u32);
+ } else {
+ ohdr = &hdr.u.oth;
+ lrh0 = HFI1_LRH_BTH;
+ }
+
+ lrh0 |= (sc5 & 0xf) << 12 | sl << 4;
+
+ bth0 = pkey | (IB_OPCODE_CNP << 24);
+ ohdr->bth[0] = cpu_to_be32(bth0);
+
+ ohdr->bth[1] = cpu_to_be32(remote_qpn | (1 << HFI1_BECN_SHIFT));
+ ohdr->bth[2] = 0; /* PSN 0 */
+
+ hdr.lrh[0] = cpu_to_be16(lrh0);
+ hdr.lrh[1] = cpu_to_be16(dlid);
+ hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
+ hdr.lrh[3] = cpu_to_be16(slid);
+
+ plen = 2 /* PBC */ + hwords;
+ pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
+ vl = sc_to_vlt(ppd->dd, sc5);
+ pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
+ if (ctxt) {
+ pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
+ if (pbuf)
+ ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
+ &hdr, hwords);
+ }
+}
+
+/*
+ * opa_smp_check() - Do the regular pkey checking, and the additional
+ * checks for SMPs specified in OPAv1 rev 0.90, section 9.10.26
+ * ("SMA Packet Checks").
+ *
+ * Note that:
+ * - Checks are done using the pkey directly from the packet's BTH,
+ * and specifically _not_ the pkey that we attach to the completion,
+ * which may be different.
+ * - These checks are specifically for "non-local" SMPs (i.e., SMPs
+ * which originated on another node). SMPs which are sent from, and
+ * destined to this node are checked in opa_local_smp_check().
+ *
+ * At the point where opa_smp_check() is called, we know:
+ * - destination QP is QP0
+ *
+ * opa_smp_check() returns 0 if all checks succeed, 1 otherwise.
+ */
+static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
+ struct hfi1_qp *qp, u16 slid, struct opa_smp *smp)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ /*
+ * I don't think it's possible for us to get here with sc != 0xf,
+ * but check it to be certain.
+ */
+ if (sc5 != 0xf)
+ return 1;
+
+ if (rcv_pkey_check(ppd, pkey, sc5, slid))
+ return 1;
+
+ /*
+ * At this point we know (and so don't need to check again) that
+ * the pkey is either LIM_MGMT_P_KEY, or FULL_MGMT_P_KEY
+ * (see ingress_pkey_check).
+ */
+ if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE &&
+ smp->mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED) {
+ ingress_pkey_table_fail(ppd, pkey, slid);
+ return 1;
+ }
+
+ /*
+ * SMPs fall into one of four (disjoint) categories:
+ * SMA request, SMA response, trap, or trap repress.
+ * Our response depends, in part, on which type of
+ * SMP we're processing.
+ *
+ * If this is not an SMA request, or trap repress:
+ * - accept MAD if the port is running an SM
+ * - pkey == FULL_MGMT_P_KEY =>
+ * reply with unsupported method (i.e., just mark
+ * the smp's status field here, and let it be
+ * processed normally)
+ * - pkey != LIM_MGMT_P_KEY =>
+ * increment port recv constraint errors, drop MAD
+ * If this is an SMA request or trap repress:
+ * - pkey != FULL_MGMT_P_KEY =>
+ * increment port recv constraint errors, drop MAD
+ */
+ switch (smp->method) {
+ case IB_MGMT_METHOD_GET:
+ case IB_MGMT_METHOD_SET:
+ case IB_MGMT_METHOD_REPORT:
+ case IB_MGMT_METHOD_TRAP_REPRESS:
+ if (pkey != FULL_MGMT_P_KEY) {
+ ingress_pkey_table_fail(ppd, pkey, slid);
+ return 1;
+ }
+ break;
+ case IB_MGMT_METHOD_SEND:
+ case IB_MGMT_METHOD_TRAP:
+ case IB_MGMT_METHOD_GET_RESP:
+ case IB_MGMT_METHOD_REPORT_RESP:
+ if (ibp->port_cap_flags & IB_PORT_SM)
+ return 0;
+ if (pkey == FULL_MGMT_P_KEY) {
+ smp->status |= IB_SMP_UNSUP_METHOD;
+ return 0;
+ }
+ if (pkey != LIM_MGMT_P_KEY) {
+ ingress_pkey_table_fail(ppd, pkey, slid);
+ return 1;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+
+/**
+ * hfi1_ud_rcv - receive an incoming UD packet
+ * @ibp: the port the packet came in on
+ * @hdr: the packet header
+ * @rcv_flags: flags relevant to rcv processing
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP the packet came on
+ *
+ * This is called from qp_rcv() to process an incoming UD packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+void hfi1_ud_rcv(struct hfi1_packet *packet)
+{
+ struct hfi1_other_headers *ohdr = packet->ohdr;
+ int opcode;
+ u32 hdrsize = packet->hlen;
+ u32 pad;
+ struct ib_wc wc;
+ u32 qkey;
+ u32 src_qp;
+ u16 dlid, pkey;
+ int mgmt_pkey_idx = -1;
+ struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
+ struct hfi1_ib_header *hdr = packet->hdr;
+ u32 rcv_flags = packet->rcv_flags;
+ void *data = packet->ebuf;
+ u32 tlen = packet->tlen;
+ struct hfi1_qp *qp = packet->qp;
+ bool has_grh = rcv_flags & HFI1_HAS_GRH;
+ bool sc4_bit = has_sc4_bit(packet);
+ u8 sc;
+ u32 bth1;
+ int is_mcast;
+ struct ib_grh *grh = NULL;
+
+ qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
+ src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & HFI1_QPN_MASK;
+ dlid = be16_to_cpu(hdr->lrh[1]);
+ is_mcast = (dlid > HFI1_MULTICAST_LID_BASE) &&
+ (dlid != HFI1_PERMISSIVE_LID);
+ bth1 = be32_to_cpu(ohdr->bth[1]);
+ if (unlikely(bth1 & HFI1_BECN_SMASK)) {
+ /*
+ * In pre-B0 h/w the CNP_OPCODE is handled via an
+ * error path (errata 291394).
+ */
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 lqpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
+ u8 sl, sc5;
+
+ sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
+ sc5 |= sc4_bit;
+ sl = ibp->sc_to_sl[sc5];
+
+ process_becn(ppd, sl, 0, lqpn, 0, IB_CC_SVCTYPE_UD);
+ }
+
+ /*
+ * The opcode is in the low byte when its in network order
+ * (top byte when in host order).
+ */
+ opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+ opcode &= 0xff;
+
+ pkey = (u16)be32_to_cpu(ohdr->bth[0]);
+
+ if (!is_mcast && (opcode != IB_OPCODE_CNP) && bth1 & HFI1_FECN_SMASK) {
+ u16 slid = be16_to_cpu(hdr->lrh[3]);
+ u8 sc5;
+
+ sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
+ sc5 |= sc4_bit;
+
+ return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh);
+ }
+ /*
+ * Get the number of bytes the message was padded by
+ * and drop incomplete packets.
+ */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto drop;
+
+ tlen -= hdrsize + pad + 4;
+
+ /*
+ * Check that the permissive LID is only used on QP0
+ * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
+ */
+ if (qp->ibqp.qp_num) {
+ if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
+ hdr->lrh[3] == IB_LID_PERMISSIVE))
+ goto drop;
+ if (qp->ibqp.qp_num > 1) {
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u16 slid;
+ u8 sc5;
+
+ sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
+ sc5 |= sc4_bit;
+
+ slid = be16_to_cpu(hdr->lrh[3]);
+ if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
+ /*
+ * Traps will not be sent for packets dropped
+ * by the HW. This is fine, as sending trap
+ * for invalid pkeys is optional according to
+ * IB spec (release 1.3, section 10.9.4)
+ */
+ hfi1_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
+ pkey,
+ (be16_to_cpu(hdr->lrh[0]) >> 4) &
+ 0xF,
+ src_qp, qp->ibqp.qp_num,
+ hdr->lrh[3], hdr->lrh[1]);
+ return;
+ }
+ } else {
+ /* GSI packet */
+ mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
+ if (mgmt_pkey_idx < 0)
+ goto drop;
+
+ }
+ if (unlikely(qkey != qp->qkey)) {
+ hfi1_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
+ (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
+ src_qp, qp->ibqp.qp_num,
+ hdr->lrh[3], hdr->lrh[1]);
+ return;
+ }
+ /* Drop invalid MAD packets (see 13.5.3.1). */
+ if (unlikely(qp->ibqp.qp_num == 1 &&
+ (tlen > 2048 ||
+ (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
+ goto drop;
+ } else {
+ /* Received on QP0, and so by definition, this is an SMP */
+ struct opa_smp *smp = (struct opa_smp *)data;
+ u16 slid = be16_to_cpu(hdr->lrh[3]);
+ u8 sc5;
+
+ sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
+ sc5 |= sc4_bit;
+
+ if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
+ goto drop;
+
+ if (tlen > 2048)
+ goto drop;
+ if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
+ hdr->lrh[3] == IB_LID_PERMISSIVE) &&
+ smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ goto drop;
+
+ /* look up SMI pkey */
+ mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
+ if (mgmt_pkey_idx < 0)
+ goto drop;
+
+ }
+
+ if (qp->ibqp.qp_num > 1 &&
+ opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
+ wc.ex.imm_data = ohdr->u.ud.imm_data;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ tlen -= sizeof(u32);
+ } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
+ wc.ex.imm_data = 0;
+ wc.wc_flags = 0;
+ } else
+ goto drop;
+
+ /*
+ * A GRH is expected to precede the data even if not
+ * present on the wire.
+ */
+ wc.byte_len = tlen + sizeof(struct ib_grh);
+
+ /*
+ * Get the next work request entry to find where to put the data.
+ */
+ if (qp->r_flags & HFI1_R_REUSE_SGE)
+ qp->r_flags &= ~HFI1_R_REUSE_SGE;
+ else {
+ int ret;
+
+ ret = hfi1_get_rwqe(qp, 0);
+ if (ret < 0) {
+ hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ return;
+ }
+ if (!ret) {
+ if (qp->ibqp.qp_num == 0)
+ ibp->n_vl15_dropped++;
+ return;
+ }
+ }
+ /* Silently drop packets which are too big. */
+ if (unlikely(wc.byte_len > qp->r_len)) {
+ qp->r_flags |= HFI1_R_REUSE_SGE;
+ goto drop;
+ }
+ if (has_grh) {
+ hfi1_copy_sge(&qp->r_sge, &hdr->u.l.grh,
+ sizeof(struct ib_grh), 1);
+ wc.wc_flags |= IB_WC_GRH;
+ } else
+ hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
+ hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
+ hfi1_put_ss(&qp->r_sge);
+ if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags))
+ return;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = IB_WC_RECV;
+ wc.vendor_err = 0;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = src_qp;
+
+ if (qp->ibqp.qp_type == IB_QPT_GSI ||
+ qp->ibqp.qp_type == IB_QPT_SMI) {
+ if (mgmt_pkey_idx < 0) {
+ if (net_ratelimit()) {
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct hfi1_devdata *dd = ppd->dd;
+
+ dd_dev_err(dd, "QP type %d mgmt_pkey_idx < 0 and packet not dropped???\n",
+ qp->ibqp.qp_type);
+ mgmt_pkey_idx = 0;
+ }
+ }
+ wc.pkey_index = (unsigned)mgmt_pkey_idx;
+ } else
+ wc.pkey_index = 0;
+
+ wc.slid = be16_to_cpu(hdr->lrh[3]);
+ sc = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
+ sc |= sc4_bit;
+ wc.sl = ibp->sc_to_sl[sc];
+
+ /*
+ * Save the LMC lower bits if the destination LID is a unicast LID.
+ */
+ wc.dlid_path_bits = dlid >= HFI1_MULTICAST_LID_BASE ? 0 :
+ dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
+ wc.port_num = qp->port_num;
+ /* Signal completion event if the solicited bit is set. */
+ hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ (ohdr->bth[0] &
+ cpu_to_be32(IB_BTH_SOLICITED)) != 0);
+ return;
+
+drop:
+ ibp->n_pkt_drops++;
+}
diff --git a/drivers/staging/rdma/hfi1/user_pages.c b/drivers/staging/rdma/hfi1/user_pages.c
new file mode 100644
index 000000000000..9071afbd7bf4
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/user_pages.c
@@ -0,0 +1,156 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/mm.h>
+#include <linux/device.h>
+
+#include "hfi.h"
+
+static void __hfi1_release_user_pages(struct page **p, size_t num_pages,
+ int dirty)
+{
+ size_t i;
+
+ for (i = 0; i < num_pages; i++) {
+ if (dirty)
+ set_page_dirty_lock(p[i]);
+ put_page(p[i]);
+ }
+}
+
+/*
+ * Call with current->mm->mmap_sem held.
+ */
+static int __hfi1_get_user_pages(unsigned long start_page, size_t num_pages,
+ struct page **p)
+{
+ unsigned long lock_limit;
+ size_t got;
+ int ret;
+
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+ if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ for (got = 0; got < num_pages; got += ret) {
+ ret = get_user_pages(current, current->mm,
+ start_page + got * PAGE_SIZE,
+ num_pages - got, 1, 1,
+ p + got, NULL);
+ if (ret < 0)
+ goto bail_release;
+ }
+
+ current->mm->pinned_vm += num_pages;
+
+ ret = 0;
+ goto bail;
+
+bail_release:
+ __hfi1_release_user_pages(p, got, 0);
+bail:
+ return ret;
+}
+
+/**
+ * hfi1_map_page - a safety wrapper around pci_map_page()
+ *
+ */
+dma_addr_t hfi1_map_page(struct pci_dev *hwdev, struct page *page,
+ unsigned long offset, size_t size, int direction)
+{
+ dma_addr_t phys;
+
+ phys = pci_map_page(hwdev, page, offset, size, direction);
+
+ return phys;
+}
+
+/**
+ * hfi1_get_user_pages - lock user pages into memory
+ * @start_page: the start page
+ * @num_pages: the number of pages
+ * @p: the output page structures
+ *
+ * This function takes a given start page (page aligned user virtual
+ * address) and pins it and the following specified number of pages. For
+ * now, num_pages is always 1, but that will probably change at some point
+ * (because caller is doing expected sends on a single virtually contiguous
+ * buffer, so we can do all pages at once).
+ */
+int hfi1_get_user_pages(unsigned long start_page, size_t num_pages,
+ struct page **p)
+{
+ int ret;
+
+ down_write(&current->mm->mmap_sem);
+
+ ret = __hfi1_get_user_pages(start_page, num_pages, p);
+
+ up_write(&current->mm->mmap_sem);
+
+ return ret;
+}
+
+void hfi1_release_user_pages(struct page **p, size_t num_pages)
+{
+ if (current->mm) /* during close after signal, mm can be NULL */
+ down_write(&current->mm->mmap_sem);
+
+ __hfi1_release_user_pages(p, num_pages, 1);
+
+ if (current->mm) {
+ current->mm->pinned_vm -= num_pages;
+ up_write(&current->mm->mmap_sem);
+ }
+}
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c
new file mode 100644
index 000000000000..55526613a522
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/user_sdma.c
@@ -0,0 +1,1444 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/uio.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/mmu_context.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+
+#include "hfi.h"
+#include "sdma.h"
+#include "user_sdma.h"
+#include "sdma.h"
+#include "verbs.h" /* for the headers */
+#include "common.h" /* for struct hfi1_tid_info */
+#include "trace.h"
+
+static uint hfi1_sdma_comp_ring_size = 128;
+module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO);
+MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 128");
+
+/* The maximum number of Data io vectors per message/request */
+#define MAX_VECTORS_PER_REQ 8
+/*
+ * Maximum number of packet to send from each message/request
+ * before moving to the next one.
+ */
+#define MAX_PKTS_PER_QUEUE 16
+
+#define num_pages(x) (1 + ((((x) - 1) & PAGE_MASK) >> PAGE_SHIFT))
+
+#define req_opcode(x) \
+ (((x) >> HFI1_SDMA_REQ_OPCODE_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
+#define req_version(x) \
+ (((x) >> HFI1_SDMA_REQ_VERSION_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
+#define req_iovcnt(x) \
+ (((x) >> HFI1_SDMA_REQ_IOVCNT_SHIFT) & HFI1_SDMA_REQ_IOVCNT_MASK)
+
+/* Number of BTH.PSN bits used for sequence number in expected rcvs */
+#define BTH_SEQ_MASK 0x7ffull
+
+/*
+ * Define fields in the KDETH header so we can update the header
+ * template.
+ */
+#define KDETH_OFFSET_SHIFT 0
+#define KDETH_OFFSET_MASK 0x7fff
+#define KDETH_OM_SHIFT 15
+#define KDETH_OM_MASK 0x1
+#define KDETH_TID_SHIFT 16
+#define KDETH_TID_MASK 0x3ff
+#define KDETH_TIDCTRL_SHIFT 26
+#define KDETH_TIDCTRL_MASK 0x3
+#define KDETH_INTR_SHIFT 28
+#define KDETH_INTR_MASK 0x1
+#define KDETH_SH_SHIFT 29
+#define KDETH_SH_MASK 0x1
+#define KDETH_HCRC_UPPER_SHIFT 16
+#define KDETH_HCRC_UPPER_MASK 0xff
+#define KDETH_HCRC_LOWER_SHIFT 24
+#define KDETH_HCRC_LOWER_MASK 0xff
+
+#define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
+#define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
+
+#define KDETH_GET(val, field) \
+ (((le32_to_cpu((val))) >> KDETH_##field##_SHIFT) & KDETH_##field##_MASK)
+#define KDETH_SET(dw, field, val) do { \
+ u32 dwval = le32_to_cpu(dw); \
+ dwval &= ~(KDETH_##field##_MASK << KDETH_##field##_SHIFT); \
+ dwval |= (((val) & KDETH_##field##_MASK) << \
+ KDETH_##field##_SHIFT); \
+ dw = cpu_to_le32(dwval); \
+ } while (0)
+
+#define AHG_HEADER_SET(arr, idx, dw, bit, width, value) \
+ do { \
+ if ((idx) < ARRAY_SIZE((arr))) \
+ (arr)[(idx++)] = sdma_build_ahg_descriptor( \
+ (__force u16)(value), (dw), (bit), \
+ (width)); \
+ else \
+ return -ERANGE; \
+ } while (0)
+
+/* KDETH OM multipliers and switch over point */
+#define KDETH_OM_SMALL 4
+#define KDETH_OM_LARGE 64
+#define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1))
+
+/* Last packet in the request */
+#define USER_SDMA_TXREQ_FLAGS_LAST_PKT (1 << 0)
+
+#define SDMA_REQ_IN_USE 0
+#define SDMA_REQ_FOR_THREAD 1
+#define SDMA_REQ_SEND_DONE 2
+#define SDMA_REQ_HAVE_AHG 3
+#define SDMA_REQ_HAS_ERROR 4
+#define SDMA_REQ_DONE_ERROR 5
+
+#define SDMA_PKT_Q_INACTIVE (1 << 0)
+#define SDMA_PKT_Q_ACTIVE (1 << 1)
+#define SDMA_PKT_Q_DEFERRED (1 << 2)
+
+/*
+ * Maximum retry attempts to submit a TX request
+ * before putting the process to sleep.
+ */
+#define MAX_DEFER_RETRY_COUNT 1
+
+static unsigned initial_pkt_count = 8;
+
+#define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
+
+struct user_sdma_iovec {
+ struct iovec iov;
+ /* number of pages in this vector */
+ unsigned npages;
+ /* array of pinned pages for this vector */
+ struct page **pages;
+ /* offset into the virtual address space of the vector at
+ * which we last left off. */
+ u64 offset;
+};
+
+struct user_sdma_request {
+ struct sdma_req_info info;
+ struct hfi1_user_sdma_pkt_q *pq;
+ struct hfi1_user_sdma_comp_q *cq;
+ /* This is the original header from user space */
+ struct hfi1_pkt_header hdr;
+ /*
+ * Pointer to the SDMA engine for this request.
+ * Since different request could be on different VLs,
+ * each request will need it's own engine pointer.
+ */
+ struct sdma_engine *sde;
+ u8 ahg_idx;
+ u32 ahg[9];
+ /*
+ * KDETH.Offset (Eager) field
+ * We need to remember the initial value so the headers
+ * can be updated properly.
+ */
+ u32 koffset;
+ /*
+ * KDETH.OFFSET (TID) field
+ * The offset can cover multiple packets, depending on the
+ * size of the TID entry.
+ */
+ u32 tidoffset;
+ /*
+ * KDETH.OM
+ * Remember this because the header template always sets it
+ * to 0.
+ */
+ u8 omfactor;
+ /*
+ * pointer to the user's task_struct. We are going to
+ * get a reference to it so we can process io vectors
+ * at a later time.
+ */
+ struct task_struct *user_proc;
+ /*
+ * pointer to the user's mm_struct. We are going to
+ * get a reference to it so it doesn't get freed
+ * since we might not be in process context when we
+ * are processing the iov's.
+ * Using this mm_struct, we can get vma based on the
+ * iov's address (find_vma()).
+ */
+ struct mm_struct *user_mm;
+ /*
+ * We copy the iovs for this request (based on
+ * info.iovcnt). These are only the data vectors
+ */
+ unsigned data_iovs;
+ /* total length of the data in the request */
+ u32 data_len;
+ /* progress index moving along the iovs array */
+ unsigned iov_idx;
+ struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ];
+ /* number of elements copied to the tids array */
+ u16 n_tids;
+ /* TID array values copied from the tid_iov vector */
+ u32 *tids;
+ u16 tididx;
+ u32 sent;
+ u64 seqnum;
+ spinlock_t list_lock;
+ struct list_head txps;
+ unsigned long flags;
+};
+
+struct user_sdma_txreq {
+ /* Packet header for the txreq */
+ struct hfi1_pkt_header hdr;
+ struct sdma_txreq txreq;
+ struct user_sdma_request *req;
+ struct user_sdma_iovec *iovec1;
+ struct user_sdma_iovec *iovec2;
+ u16 flags;
+ unsigned busycount;
+ u64 seqnum;
+};
+
+#define SDMA_DBG(req, fmt, ...) \
+ hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \
+ (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \
+ ##__VA_ARGS__)
+#define SDMA_Q_DBG(pq, fmt, ...) \
+ hfi1_cdbg(SDMA, "[%u:%u:%u] " fmt, (pq)->dd->unit, (pq)->ctxt, \
+ (pq)->subctxt, ##__VA_ARGS__)
+
+static int user_sdma_send_pkts(struct user_sdma_request *, unsigned);
+static int num_user_pages(const struct iovec *);
+static void user_sdma_txreq_cb(struct sdma_txreq *, int, int);
+static void user_sdma_free_request(struct user_sdma_request *);
+static int pin_vector_pages(struct user_sdma_request *,
+ struct user_sdma_iovec *);
+static void unpin_vector_pages(struct user_sdma_iovec *);
+static int check_header_template(struct user_sdma_request *,
+ struct hfi1_pkt_header *, u32, u32);
+static int set_txreq_header(struct user_sdma_request *,
+ struct user_sdma_txreq *, u32);
+static int set_txreq_header_ahg(struct user_sdma_request *,
+ struct user_sdma_txreq *, u32);
+static inline void set_comp_state(struct user_sdma_request *,
+ enum hfi1_sdma_comp_state, int);
+static inline u32 set_pkt_bth_psn(__be32, u8, u32);
+static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);
+
+static int defer_packet_queue(
+ struct sdma_engine *,
+ struct iowait *,
+ struct sdma_txreq *,
+ unsigned seq);
+static void activate_packet_queue(struct iowait *, int);
+
+static inline int iovec_may_free(struct user_sdma_iovec *iovec,
+ void (*free)(struct user_sdma_iovec *))
+{
+ if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
+ free(iovec);
+ return 1;
+ }
+ return 0;
+}
+
+static inline void iovec_set_complete(struct user_sdma_iovec *iovec)
+{
+ iovec->offset = iovec->iov.iov_len;
+}
+
+static int defer_packet_queue(
+ struct sdma_engine *sde,
+ struct iowait *wait,
+ struct sdma_txreq *txreq,
+ unsigned seq)
+{
+ struct hfi1_user_sdma_pkt_q *pq =
+ container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
+ struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
+ struct user_sdma_txreq *tx =
+ container_of(txreq, struct user_sdma_txreq, txreq);
+
+ if (sdma_progress(sde, seq, txreq)) {
+ if (tx->busycount++ < MAX_DEFER_RETRY_COUNT)
+ goto eagain;
+ }
+ /*
+ * We are assuming that if the list is enqueued somewhere, it
+ * is to the dmawait list since that is the only place where
+ * it is supposed to be enqueued.
+ */
+ xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
+ write_seqlock(&dev->iowait_lock);
+ if (list_empty(&pq->busy.list))
+ list_add_tail(&pq->busy.list, &sde->dmawait);
+ write_sequnlock(&dev->iowait_lock);
+ return -EBUSY;
+eagain:
+ return -EAGAIN;
+}
+
+static void activate_packet_queue(struct iowait *wait, int reason)
+{
+ struct hfi1_user_sdma_pkt_q *pq =
+ container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
+ xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
+ wake_up(&wait->wait_dma);
+};
+
+static void sdma_kmem_cache_ctor(void *obj)
+{
+ struct user_sdma_txreq *tx = (struct user_sdma_txreq *)obj;
+
+ memset(tx, 0, sizeof(*tx));
+}
+
+int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
+{
+ int ret = 0;
+ unsigned memsize;
+ char buf[64];
+ struct hfi1_devdata *dd;
+ struct hfi1_user_sdma_comp_q *cq;
+ struct hfi1_user_sdma_pkt_q *pq;
+ unsigned long flags;
+
+ if (!uctxt || !fp) {
+ ret = -EBADF;
+ goto done;
+ }
+
+ if (!hfi1_sdma_comp_ring_size) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ dd = uctxt->dd;
+
+ pq = kzalloc(sizeof(*pq), GFP_KERNEL);
+ if (!pq) {
+ dd_dev_err(dd,
+ "[%u:%u] Failed to allocate SDMA request struct\n",
+ uctxt->ctxt, subctxt_fp(fp));
+ goto pq_nomem;
+ }
+ memsize = sizeof(*pq->reqs) * hfi1_sdma_comp_ring_size;
+ pq->reqs = kmalloc(memsize, GFP_KERNEL);
+ if (!pq->reqs) {
+ dd_dev_err(dd,
+ "[%u:%u] Failed to allocate SDMA request queue (%u)\n",
+ uctxt->ctxt, subctxt_fp(fp), memsize);
+ goto pq_reqs_nomem;
+ }
+ INIT_LIST_HEAD(&pq->list);
+ pq->dd = dd;
+ pq->ctxt = uctxt->ctxt;
+ pq->subctxt = subctxt_fp(fp);
+ pq->n_max_reqs = hfi1_sdma_comp_ring_size;
+ pq->state = SDMA_PKT_Q_INACTIVE;
+ atomic_set(&pq->n_reqs, 0);
+
+ iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
+ activate_packet_queue);
+ pq->reqidx = 0;
+ snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt,
+ subctxt_fp(fp));
+ pq->txreq_cache = kmem_cache_create(buf,
+ sizeof(struct user_sdma_txreq),
+ L1_CACHE_BYTES,
+ SLAB_HWCACHE_ALIGN,
+ sdma_kmem_cache_ctor);
+ if (!pq->txreq_cache) {
+ dd_dev_err(dd, "[%u] Failed to allocate TxReq cache\n",
+ uctxt->ctxt);
+ goto pq_txreq_nomem;
+ }
+ user_sdma_pkt_fp(fp) = pq;
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq) {
+ dd_dev_err(dd,
+ "[%u:%u] Failed to allocate SDMA completion queue\n",
+ uctxt->ctxt, subctxt_fp(fp));
+ goto cq_nomem;
+ }
+
+ memsize = ALIGN(sizeof(*cq->comps) * hfi1_sdma_comp_ring_size,
+ PAGE_SIZE);
+ cq->comps = vmalloc_user(memsize);
+ if (!cq->comps) {
+ dd_dev_err(dd,
+ "[%u:%u] Failed to allocate SDMA completion queue entries\n",
+ uctxt->ctxt, subctxt_fp(fp));
+ goto cq_comps_nomem;
+ }
+ cq->nentries = hfi1_sdma_comp_ring_size;
+ user_sdma_comp_fp(fp) = cq;
+
+ spin_lock_irqsave(&uctxt->sdma_qlock, flags);
+ list_add(&pq->list, &uctxt->sdma_queues);
+ spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
+ goto done;
+
+cq_comps_nomem:
+ kfree(cq);
+cq_nomem:
+ kmem_cache_destroy(pq->txreq_cache);
+pq_txreq_nomem:
+ kfree(pq->reqs);
+pq_reqs_nomem:
+ kfree(pq);
+ user_sdma_pkt_fp(fp) = NULL;
+pq_nomem:
+ ret = -ENOMEM;
+done:
+ return ret;
+}
+
+int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
+{
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct hfi1_user_sdma_pkt_q *pq;
+ unsigned long flags;
+
+ hfi1_cdbg(SDMA, "[%u:%u:%u] Freeing user SDMA queues", uctxt->dd->unit,
+ uctxt->ctxt, fd->subctxt);
+ pq = fd->pq;
+ if (pq) {
+ u16 i, j;
+
+ spin_lock_irqsave(&uctxt->sdma_qlock, flags);
+ if (!list_empty(&pq->list))
+ list_del_init(&pq->list);
+ spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
+ iowait_sdma_drain(&pq->busy);
+ if (pq->reqs) {
+ for (i = 0, j = 0; i < atomic_read(&pq->n_reqs) &&
+ j < pq->n_max_reqs; j++) {
+ struct user_sdma_request *req = &pq->reqs[j];
+
+ if (test_bit(SDMA_REQ_IN_USE, &req->flags)) {
+ set_comp_state(req, ERROR, -ECOMM);
+ user_sdma_free_request(req);
+ i++;
+ }
+ }
+ kfree(pq->reqs);
+ }
+ if (pq->txreq_cache)
+ kmem_cache_destroy(pq->txreq_cache);
+ kfree(pq);
+ fd->pq = NULL;
+ }
+ if (fd->cq) {
+ if (fd->cq->comps)
+ vfree(fd->cq->comps);
+ kfree(fd->cq);
+ fd->cq = NULL;
+ }
+ return 0;
+}
+
+int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
+ unsigned long dim, unsigned long *count)
+{
+ int ret = 0, i = 0, sent;
+ struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
+ struct hfi1_user_sdma_pkt_q *pq = user_sdma_pkt_fp(fp);
+ struct hfi1_user_sdma_comp_q *cq = user_sdma_comp_fp(fp);
+ struct hfi1_devdata *dd = pq->dd;
+ unsigned long idx = 0;
+ u8 pcount = initial_pkt_count;
+ struct sdma_req_info info;
+ struct user_sdma_request *req;
+ u8 opcode, sc, vl;
+
+ if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
+ hfi1_cdbg(
+ SDMA,
+ "[%u:%u:%u] First vector not big enough for header %lu/%lu",
+ dd->unit, uctxt->ctxt, subctxt_fp(fp),
+ iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr));
+ ret = -EINVAL;
+ goto done;
+ }
+ ret = copy_from_user(&info, iovec[idx].iov_base, sizeof(info));
+ if (ret) {
+ hfi1_cdbg(SDMA, "[%u:%u:%u] Failed to copy info QW (%d)",
+ dd->unit, uctxt->ctxt, subctxt_fp(fp), ret);
+ ret = -EFAULT;
+ goto done;
+ }
+ trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, subctxt_fp(fp),
+ (u16 *)&info);
+ if (cq->comps[info.comp_idx].status == QUEUED) {
+ hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in QUEUED state",
+ dd->unit, uctxt->ctxt, subctxt_fp(fp),
+ info.comp_idx);
+ ret = -EBADSLT;
+ goto done;
+ }
+ if (!info.fragsize) {
+ hfi1_cdbg(SDMA,
+ "[%u:%u:%u:%u] Request does not specify fragsize",
+ dd->unit, uctxt->ctxt, subctxt_fp(fp), info.comp_idx);
+ ret = -EINVAL;
+ goto done;
+ }
+ /*
+ * We've done all the safety checks that we can up to this point,
+ * "allocate" the request entry.
+ */
+ hfi1_cdbg(SDMA, "[%u:%u:%u] Using req/comp entry %u\n", dd->unit,
+ uctxt->ctxt, subctxt_fp(fp), info.comp_idx);
+ req = pq->reqs + info.comp_idx;
+ memset(req, 0, sizeof(*req));
+ /* Mark the request as IN_USE before we start filling it in. */
+ set_bit(SDMA_REQ_IN_USE, &req->flags);
+ req->data_iovs = req_iovcnt(info.ctrl) - 1;
+ req->pq = pq;
+ req->cq = cq;
+ INIT_LIST_HEAD(&req->txps);
+ spin_lock_init(&req->list_lock);
+ memcpy(&req->info, &info, sizeof(info));
+
+ if (req_opcode(info.ctrl) == EXPECTED)
+ req->data_iovs--;
+
+ if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) {
+ SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs,
+ MAX_VECTORS_PER_REQ);
+ ret = -EINVAL;
+ goto done;
+ }
+ /* Copy the header from the user buffer */
+ ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
+ sizeof(req->hdr));
+ if (ret) {
+ SDMA_DBG(req, "Failed to copy header template (%d)", ret);
+ ret = -EFAULT;
+ goto free_req;
+ }
+
+ /* If Static rate control is not enabled, sanitize the header. */
+ if (!HFI1_CAP_IS_USET(STATIC_RATE_CTRL))
+ req->hdr.pbc[2] = 0;
+
+ /* Validate the opcode. Do not trust packets from user space blindly. */
+ opcode = (be32_to_cpu(req->hdr.bth[0]) >> 24) & 0xff;
+ if ((opcode & USER_OPCODE_CHECK_MASK) !=
+ USER_OPCODE_CHECK_VAL) {
+ SDMA_DBG(req, "Invalid opcode (%d)", opcode);
+ ret = -EINVAL;
+ goto free_req;
+ }
+ /*
+ * Validate the vl. Do not trust packets from user space blindly.
+ * VL comes from PBC, SC comes from LRH, and the VL needs to
+ * match the SC look up.
+ */
+ vl = (le16_to_cpu(req->hdr.pbc[0]) >> 12) & 0xF;
+ sc = (((be16_to_cpu(req->hdr.lrh[0]) >> 12) & 0xF) |
+ (((le16_to_cpu(req->hdr.pbc[1]) >> 14) & 0x1) << 4));
+ if (vl >= dd->pport->vls_operational ||
+ vl != sc_to_vlt(dd, sc)) {
+ SDMA_DBG(req, "Invalid SC(%u)/VL(%u)", sc, vl);
+ ret = -EINVAL;
+ goto free_req;
+ }
+
+ /*
+ * Also should check the BTH.lnh. If it says the next header is GRH then
+ * the RXE parsing will be off and will land in the middle of the KDETH
+ * or miss it entirely.
+ */
+ if ((be16_to_cpu(req->hdr.lrh[0]) & 0x3) == HFI1_LRH_GRH) {
+ SDMA_DBG(req, "User tried to pass in a GRH");
+ ret = -EINVAL;
+ goto free_req;
+ }
+
+ req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]);
+ /* Calculate the initial TID offset based on the values of
+ KDETH.OFFSET and KDETH.OM that are passed in. */
+ req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) *
+ (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
+ KDETH_OM_LARGE : KDETH_OM_SMALL);
+ SDMA_DBG(req, "Initial TID offset %u", req->tidoffset);
+ idx++;
+
+ /* Save all the IO vector structures */
+ while (i < req->data_iovs) {
+ memcpy(&req->iovs[i].iov, iovec + idx++, sizeof(struct iovec));
+ req->iovs[i].offset = 0;
+ req->data_len += req->iovs[i++].iov.iov_len;
+ }
+ SDMA_DBG(req, "total data length %u", req->data_len);
+
+ if (pcount > req->info.npkts)
+ pcount = req->info.npkts;
+ /*
+ * Copy any TID info
+ * User space will provide the TID info only when the
+ * request type is EXPECTED. This is true even if there is
+ * only one packet in the request and the header is already
+ * setup. The reason for the singular TID case is that the
+ * driver needs to perform safety checks.
+ */
+ if (req_opcode(req->info.ctrl) == EXPECTED) {
+ u16 ntids = iovec[idx].iov_len / sizeof(*req->tids);
+
+ if (!ntids || ntids > MAX_TID_PAIR_ENTRIES) {
+ ret = -EINVAL;
+ goto free_req;
+ }
+ req->tids = kcalloc(ntids, sizeof(*req->tids), GFP_KERNEL);
+ if (!req->tids) {
+ ret = -ENOMEM;
+ goto free_req;
+ }
+ /*
+ * We have to copy all of the tids because they may vary
+ * in size and, therefore, the TID count might not be
+ * equal to the pkt count. However, there is no way to
+ * tell at this point.
+ */
+ ret = copy_from_user(req->tids, iovec[idx].iov_base,
+ ntids * sizeof(*req->tids));
+ if (ret) {
+ SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
+ ntids, ret);
+ ret = -EFAULT;
+ goto free_req;
+ }
+ req->n_tids = ntids;
+ idx++;
+ }
+
+ /* Have to select the engine */
+ req->sde = sdma_select_engine_vl(dd,
+ (u32)(uctxt->ctxt + subctxt_fp(fp)),
+ vl);
+ if (!req->sde || !sdma_running(req->sde)) {
+ ret = -ECOMM;
+ goto free_req;
+ }
+
+ /* We don't need an AHG entry if the request contains only one packet */
+ if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG)) {
+ int ahg = sdma_ahg_alloc(req->sde);
+
+ if (likely(ahg >= 0)) {
+ req->ahg_idx = (u8)ahg;
+ set_bit(SDMA_REQ_HAVE_AHG, &req->flags);
+ }
+ }
+
+ set_comp_state(req, QUEUED, 0);
+ /* Send the first N packets in the request to buy us some time */
+ sent = user_sdma_send_pkts(req, pcount);
+ if (unlikely(sent < 0)) {
+ if (sent != -EBUSY) {
+ ret = sent;
+ goto send_err;
+ } else
+ sent = 0;
+ }
+ atomic_inc(&pq->n_reqs);
+
+ if (sent < req->info.npkts) {
+ /* Take the references to the user's task and mm_struct */
+ get_task_struct(current);
+ req->user_proc = current;
+
+ /*
+ * This is a somewhat blocking send implementation.
+ * The driver will block the caller until all packets of the
+ * request have been submitted to the SDMA engine. However, it
+ * will not wait for send completions.
+ */
+ while (!test_bit(SDMA_REQ_SEND_DONE, &req->flags)) {
+ ret = user_sdma_send_pkts(req, pcount);
+ if (ret < 0) {
+ if (ret != -EBUSY)
+ goto send_err;
+ wait_event_interruptible_timeout(
+ pq->busy.wait_dma,
+ (pq->state == SDMA_PKT_Q_ACTIVE),
+ msecs_to_jiffies(
+ SDMA_IOWAIT_TIMEOUT));
+ }
+ }
+
+ }
+ ret = 0;
+ *count += idx;
+ goto done;
+send_err:
+ set_comp_state(req, ERROR, ret);
+free_req:
+ user_sdma_free_request(req);
+done:
+ return ret;
+}
+
+static inline u32 compute_data_length(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx)
+{
+ /*
+ * Determine the proper size of the packet data.
+ * The size of the data of the first packet is in the header
+ * template. However, it includes the header and ICRC, which need
+ * to be subtracted.
+ * The size of the remaining packets is the minimum of the frag
+ * size (MTU) or remaining data in the request.
+ */
+ u32 len;
+
+ if (!req->seqnum) {
+ len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) -
+ (sizeof(tx->hdr) - 4));
+ } else if (req_opcode(req->info.ctrl) == EXPECTED) {
+ u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) *
+ PAGE_SIZE;
+ /* Get the data length based on the remaining space in the
+ * TID pair. */
+ len = min(tidlen - req->tidoffset, (u32)req->info.fragsize);
+ /* If we've filled up the TID pair, move to the next one. */
+ if (unlikely(!len) && ++req->tididx < req->n_tids &&
+ req->tids[req->tididx]) {
+ tidlen = EXP_TID_GET(req->tids[req->tididx],
+ LEN) * PAGE_SIZE;
+ req->tidoffset = 0;
+ len = min_t(u32, tidlen, req->info.fragsize);
+ }
+ /* Since the TID pairs map entire pages, make sure that we
+ * are not going to try to send more data that we have
+ * remaining. */
+ len = min(len, req->data_len - req->sent);
+ } else
+ len = min(req->data_len - req->sent, (u32)req->info.fragsize);
+ SDMA_DBG(req, "Data Length = %u", len);
+ return len;
+}
+
+static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len)
+{
+ /* (Size of complete header - size of PBC) + 4B ICRC + data length */
+ return ((sizeof(hdr) - sizeof(hdr.pbc)) + 4 + len);
+}
+
+static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
+{
+ int ret = 0;
+ unsigned npkts = 0;
+ struct user_sdma_txreq *tx = NULL;
+ struct hfi1_user_sdma_pkt_q *pq = NULL;
+ struct user_sdma_iovec *iovec = NULL;
+
+ if (!req->pq) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ pq = req->pq;
+
+ /*
+ * Check if we might have sent the entire request already
+ */
+ if (unlikely(req->seqnum == req->info.npkts)) {
+ if (!list_empty(&req->txps))
+ goto dosend;
+ goto done;
+ }
+
+ if (!maxpkts || maxpkts > req->info.npkts - req->seqnum)
+ maxpkts = req->info.npkts - req->seqnum;
+
+ while (npkts < maxpkts) {
+ u32 datalen = 0, queued = 0, data_sent = 0;
+ u64 iov_offset = 0;
+
+ /*
+ * Check whether any of the completions have come back
+ * with errors. If so, we are not going to process any
+ * more packets from this request.
+ */
+ if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) {
+ set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
+ ret = -EFAULT;
+ goto done;
+ }
+
+ tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
+ if (!tx) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ tx->flags = 0;
+ tx->req = req;
+ tx->busycount = 0;
+ tx->iovec1 = NULL;
+ tx->iovec2 = NULL;
+
+ if (req->seqnum == req->info.npkts - 1)
+ tx->flags |= USER_SDMA_TXREQ_FLAGS_LAST_PKT;
+
+ /*
+ * Calculate the payload size - this is min of the fragment
+ * (MTU) size or the remaining bytes in the request but only
+ * if we have payload data.
+ */
+ if (req->data_len) {
+ iovec = &req->iovs[req->iov_idx];
+ if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
+ if (++req->iov_idx == req->data_iovs) {
+ ret = -EFAULT;
+ goto free_txreq;
+ }
+ iovec = &req->iovs[req->iov_idx];
+ WARN_ON(iovec->offset);
+ }
+
+ /*
+ * This request might include only a header and no user
+ * data, so pin pages only if there is data and it the
+ * pages have not been pinned already.
+ */
+ if (unlikely(!iovec->pages && iovec->iov.iov_len)) {
+ ret = pin_vector_pages(req, iovec);
+ if (ret)
+ goto free_tx;
+ }
+
+ tx->iovec1 = iovec;
+ datalen = compute_data_length(req, tx);
+ if (!datalen) {
+ SDMA_DBG(req,
+ "Request has data but pkt len is 0");
+ ret = -EFAULT;
+ goto free_tx;
+ }
+ }
+
+ if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags)) {
+ if (!req->seqnum) {
+ u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
+ u32 lrhlen = get_lrh_len(req->hdr, datalen);
+ /*
+ * Copy the request header into the tx header
+ * because the HW needs a cacheline-aligned
+ * address.
+ * This copy can be optimized out if the hdr
+ * member of user_sdma_request were also
+ * cacheline aligned.
+ */
+ memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr));
+ if (PBC2LRH(pbclen) != lrhlen) {
+ pbclen = (pbclen & 0xf000) |
+ LRH2PBC(lrhlen);
+ tx->hdr.pbc[0] = cpu_to_le16(pbclen);
+ }
+ ret = sdma_txinit_ahg(&tx->txreq,
+ SDMA_TXREQ_F_AHG_COPY,
+ sizeof(tx->hdr) + datalen,
+ req->ahg_idx, 0, NULL, 0,
+ user_sdma_txreq_cb);
+ if (ret)
+ goto free_tx;
+ ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq,
+ &tx->hdr,
+ sizeof(tx->hdr));
+ if (ret)
+ goto free_txreq;
+ } else {
+ int changes;
+
+ changes = set_txreq_header_ahg(req, tx,
+ datalen);
+ if (changes < 0)
+ goto free_tx;
+ sdma_txinit_ahg(&tx->txreq,
+ SDMA_TXREQ_F_USE_AHG,
+ datalen, req->ahg_idx, changes,
+ req->ahg, sizeof(req->hdr),
+ user_sdma_txreq_cb);
+ }
+ } else {
+ ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
+ datalen, user_sdma_txreq_cb);
+ if (ret)
+ goto free_tx;
+ /*
+ * Modify the header for this packet. This only needs
+ * to be done if we are not going to use AHG. Otherwise,
+ * the HW will do it based on the changes we gave it
+ * during sdma_txinit_ahg().
+ */
+ ret = set_txreq_header(req, tx, datalen);
+ if (ret)
+ goto free_txreq;
+ }
+
+ /*
+ * If the request contains any data vectors, add up to
+ * fragsize bytes to the descriptor.
+ */
+ while (queued < datalen &&
+ (req->sent + data_sent) < req->data_len) {
+ unsigned long base, offset;
+ unsigned pageidx, len;
+
+ base = (unsigned long)iovec->iov.iov_base;
+ offset = ((base + iovec->offset + iov_offset) &
+ ~PAGE_MASK);
+ pageidx = (((iovec->offset + iov_offset +
+ base) - (base & PAGE_MASK)) >> PAGE_SHIFT);
+ len = offset + req->info.fragsize > PAGE_SIZE ?
+ PAGE_SIZE - offset : req->info.fragsize;
+ len = min((datalen - queued), len);
+ ret = sdma_txadd_page(pq->dd, &tx->txreq,
+ iovec->pages[pageidx],
+ offset, len);
+ if (ret) {
+ dd_dev_err(pq->dd,
+ "SDMA txreq add page failed %d\n",
+ ret);
+ iovec_set_complete(iovec);
+ goto free_txreq;
+ }
+ iov_offset += len;
+ queued += len;
+ data_sent += len;
+ if (unlikely(queued < datalen &&
+ pageidx == iovec->npages &&
+ req->iov_idx < req->data_iovs - 1)) {
+ iovec->offset += iov_offset;
+ iovec = &req->iovs[++req->iov_idx];
+ if (!iovec->pages) {
+ ret = pin_vector_pages(req, iovec);
+ if (ret)
+ goto free_txreq;
+ }
+ iov_offset = 0;
+ tx->iovec2 = iovec;
+
+ }
+ }
+ /*
+ * The txreq was submitted successfully so we can update
+ * the counters.
+ */
+ req->koffset += datalen;
+ if (req_opcode(req->info.ctrl) == EXPECTED)
+ req->tidoffset += datalen;
+ req->sent += data_sent;
+ if (req->data_len) {
+ if (tx->iovec1 && !tx->iovec2)
+ tx->iovec1->offset += iov_offset;
+ else if (tx->iovec2)
+ tx->iovec2->offset += iov_offset;
+ }
+ /*
+ * It is important to increment this here as it is used to
+ * generate the BTH.PSN and, therefore, can't be bulk-updated
+ * outside of the loop.
+ */
+ tx->seqnum = req->seqnum++;
+ list_add_tail(&tx->txreq.list, &req->txps);
+ npkts++;
+ }
+dosend:
+ ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps);
+ if (list_empty(&req->txps))
+ if (req->seqnum == req->info.npkts) {
+ set_bit(SDMA_REQ_SEND_DONE, &req->flags);
+ /*
+ * The txreq has already been submitted to the HW queue
+ * so we can free the AHG entry now. Corruption will not
+ * happen due to the sequential manner in which
+ * descriptors are processed.
+ */
+ if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags))
+ sdma_ahg_free(req->sde, req->ahg_idx);
+ }
+ goto done;
+free_txreq:
+ sdma_txclean(pq->dd, &tx->txreq);
+free_tx:
+ kmem_cache_free(pq->txreq_cache, tx);
+done:
+ return ret;
+}
+
+/*
+ * How many pages in this iovec element?
+ */
+static inline int num_user_pages(const struct iovec *iov)
+{
+ const unsigned long addr = (unsigned long) iov->iov_base;
+ const unsigned long len = iov->iov_len;
+ const unsigned long spage = addr & PAGE_MASK;
+ const unsigned long epage = (addr + len - 1) & PAGE_MASK;
+
+ return 1 + ((epage - spage) >> PAGE_SHIFT);
+}
+
+static int pin_vector_pages(struct user_sdma_request *req,
+ struct user_sdma_iovec *iovec) {
+ int ret = 0;
+ unsigned pinned;
+
+ iovec->npages = num_user_pages(&iovec->iov);
+ iovec->pages = kzalloc(sizeof(*iovec->pages) *
+ iovec->npages, GFP_KERNEL);
+ if (!iovec->pages) {
+ SDMA_DBG(req, "Failed page array alloc");
+ ret = -ENOMEM;
+ goto done;
+ }
+ /* If called by the kernel thread, use the user's mm */
+ if (current->flags & PF_KTHREAD)
+ use_mm(req->user_proc->mm);
+ pinned = get_user_pages_fast(
+ (unsigned long)iovec->iov.iov_base,
+ iovec->npages, 0, iovec->pages);
+ /* If called by the kernel thread, unuse the user's mm */
+ if (current->flags & PF_KTHREAD)
+ unuse_mm(req->user_proc->mm);
+ if (pinned != iovec->npages) {
+ SDMA_DBG(req, "Failed to pin pages (%u/%u)", pinned,
+ iovec->npages);
+ ret = -EFAULT;
+ goto pfree;
+ }
+ goto done;
+pfree:
+ unpin_vector_pages(iovec);
+done:
+ return ret;
+}
+
+static void unpin_vector_pages(struct user_sdma_iovec *iovec)
+{
+ unsigned i;
+
+ if (ACCESS_ONCE(iovec->offset) != iovec->iov.iov_len) {
+ hfi1_cdbg(SDMA,
+ "the complete vector has not been sent yet %llu %zu",
+ iovec->offset, iovec->iov.iov_len);
+ return;
+ }
+ for (i = 0; i < iovec->npages; i++)
+ if (iovec->pages[i])
+ put_page(iovec->pages[i]);
+ kfree(iovec->pages);
+ iovec->pages = NULL;
+ iovec->npages = 0;
+ iovec->offset = 0;
+}
+
+static int check_header_template(struct user_sdma_request *req,
+ struct hfi1_pkt_header *hdr, u32 lrhlen,
+ u32 datalen)
+{
+ /*
+ * Perform safety checks for any type of packet:
+ * - transfer size is multiple of 64bytes
+ * - packet length is multiple of 4bytes
+ * - entire request length is multiple of 4bytes
+ * - packet length is not larger than MTU size
+ *
+ * These checks are only done for the first packet of the
+ * transfer since the header is "given" to us by user space.
+ * For the remainder of the packets we compute the values.
+ */
+ if (req->info.fragsize % PIO_BLOCK_SIZE ||
+ lrhlen & 0x3 || req->data_len & 0x3 ||
+ lrhlen > get_lrh_len(*hdr, req->info.fragsize))
+ return -EINVAL;
+
+ if (req_opcode(req->info.ctrl) == EXPECTED) {
+ /*
+ * The header is checked only on the first packet. Furthermore,
+ * we ensure that at least one TID entry is copied when the
+ * request is submitted. Therefore, we don't have to verify that
+ * tididx points to something sane.
+ */
+ u32 tidval = req->tids[req->tididx],
+ tidlen = EXP_TID_GET(tidval, LEN) * PAGE_SIZE,
+ tididx = EXP_TID_GET(tidval, IDX),
+ tidctrl = EXP_TID_GET(tidval, CTRL),
+ tidoff;
+ __le32 kval = hdr->kdeth.ver_tid_offset;
+
+ tidoff = KDETH_GET(kval, OFFSET) *
+ (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
+ KDETH_OM_LARGE : KDETH_OM_SMALL);
+ /*
+ * Expected receive packets have the following
+ * additional checks:
+ * - offset is not larger than the TID size
+ * - TIDCtrl values match between header and TID array
+ * - TID indexes match between header and TID array
+ */
+ if ((tidoff + datalen > tidlen) ||
+ KDETH_GET(kval, TIDCTRL) != tidctrl ||
+ KDETH_GET(kval, TID) != tididx)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Correctly set the BTH.PSN field based on type of
+ * transfer - eager packets can just increment the PSN but
+ * expected packets encode generation and sequence in the
+ * BTH.PSN field so just incrementing will result in errors.
+ */
+static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags)
+{
+ u32 val = be32_to_cpu(bthpsn),
+ mask = (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffffull :
+ 0xffffffull),
+ psn = val & mask;
+ if (expct)
+ psn = (psn & ~BTH_SEQ_MASK) | ((psn + frags) & BTH_SEQ_MASK);
+ else
+ psn = psn + frags;
+ return psn & mask;
+}
+
+static int set_txreq_header(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx, u32 datalen)
+{
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ struct hfi1_pkt_header *hdr = &tx->hdr;
+ u16 pbclen;
+ int ret;
+ u32 tidval = 0, lrhlen = get_lrh_len(*hdr, datalen);
+
+ /* Copy the header template to the request before modification */
+ memcpy(hdr, &req->hdr, sizeof(*hdr));
+
+ /*
+ * Check if the PBC and LRH length are mismatched. If so
+ * adjust both in the header.
+ */
+ pbclen = le16_to_cpu(hdr->pbc[0]);
+ if (PBC2LRH(pbclen) != lrhlen) {
+ pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen);
+ hdr->pbc[0] = cpu_to_le16(pbclen);
+ hdr->lrh[2] = cpu_to_be16(lrhlen >> 2);
+ /*
+ * Third packet
+ * This is the first packet in the sequence that has
+ * a "static" size that can be used for the rest of
+ * the packets (besides the last one).
+ */
+ if (unlikely(req->seqnum == 2)) {
+ /*
+ * From this point on the lengths in both the
+ * PBC and LRH are the same until the last
+ * packet.
+ * Adjust the template so we don't have to update
+ * every packet
+ */
+ req->hdr.pbc[0] = hdr->pbc[0];
+ req->hdr.lrh[2] = hdr->lrh[2];
+ }
+ }
+ /*
+ * We only have to modify the header if this is not the
+ * first packet in the request. Otherwise, we use the
+ * header given to us.
+ */
+ if (unlikely(!req->seqnum)) {
+ ret = check_header_template(req, hdr, lrhlen, datalen);
+ if (ret)
+ return ret;
+ goto done;
+
+ }
+
+ hdr->bth[2] = cpu_to_be32(
+ set_pkt_bth_psn(hdr->bth[2],
+ (req_opcode(req->info.ctrl) == EXPECTED),
+ req->seqnum));
+
+ /* Set ACK request on last packet */
+ if (unlikely(tx->flags & USER_SDMA_TXREQ_FLAGS_LAST_PKT))
+ hdr->bth[2] |= cpu_to_be32(1UL<<31);
+
+ /* Set the new offset */
+ hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset);
+ /* Expected packets have to fill in the new TID information */
+ if (req_opcode(req->info.ctrl) == EXPECTED) {
+ tidval = req->tids[req->tididx];
+ /*
+ * If the offset puts us at the end of the current TID,
+ * advance everything.
+ */
+ if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
+ PAGE_SIZE)) {
+ req->tidoffset = 0;
+ /* Since we don't copy all the TIDs, all at once,
+ * we have to check again. */
+ if (++req->tididx > req->n_tids - 1 ||
+ !req->tids[req->tididx]) {
+ return -EINVAL;
+ }
+ tidval = req->tids[req->tididx];
+ }
+ req->omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >=
+ KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE : KDETH_OM_SMALL;
+ /* Set KDETH.TIDCtrl based on value for this TID. */
+ KDETH_SET(hdr->kdeth.ver_tid_offset, TIDCTRL,
+ EXP_TID_GET(tidval, CTRL));
+ /* Set KDETH.TID based on value for this TID */
+ KDETH_SET(hdr->kdeth.ver_tid_offset, TID,
+ EXP_TID_GET(tidval, IDX));
+ /* Clear KDETH.SH only on the last packet */
+ if (unlikely(tx->flags & USER_SDMA_TXREQ_FLAGS_LAST_PKT))
+ KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0);
+ /*
+ * Set the KDETH.OFFSET and KDETH.OM based on size of
+ * transfer.
+ */
+ SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
+ req->tidoffset, req->tidoffset / req->omfactor,
+ !!(req->omfactor - KDETH_OM_SMALL));
+ KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
+ req->tidoffset / req->omfactor);
+ KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
+ !!(req->omfactor - KDETH_OM_SMALL));
+ }
+done:
+ trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
+ req->info.comp_idx, hdr, tidval);
+ return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr));
+}
+
+static int set_txreq_header_ahg(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx, u32 len)
+{
+ int diff = 0;
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ struct hfi1_pkt_header *hdr = &req->hdr;
+ u16 pbclen = le16_to_cpu(hdr->pbc[0]);
+ u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, len);
+
+ if (PBC2LRH(pbclen) != lrhlen) {
+ /* PBC.PbcLengthDWs */
+ AHG_HEADER_SET(req->ahg, diff, 0, 0, 12,
+ cpu_to_le16(LRH2PBC(lrhlen)));
+ /* LRH.PktLen (we need the full 16 bits due to byte swap) */
+ AHG_HEADER_SET(req->ahg, diff, 3, 0, 16,
+ cpu_to_be16(lrhlen >> 2));
+ }
+
+ /*
+ * Do the common updates
+ */
+ /* BTH.PSN and BTH.A */
+ val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) &
+ (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
+ if (unlikely(tx->flags & USER_SDMA_TXREQ_FLAGS_LAST_PKT))
+ val32 |= 1UL << 31;
+ AHG_HEADER_SET(req->ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
+ AHG_HEADER_SET(req->ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
+ /* KDETH.Offset */
+ AHG_HEADER_SET(req->ahg, diff, 15, 0, 16,
+ cpu_to_le16(req->koffset & 0xffff));
+ AHG_HEADER_SET(req->ahg, diff, 15, 16, 16,
+ cpu_to_le16(req->koffset >> 16));
+ if (req_opcode(req->info.ctrl) == EXPECTED) {
+ __le16 val;
+
+ tidval = req->tids[req->tididx];
+
+ /*
+ * If the offset puts us at the end of the current TID,
+ * advance everything.
+ */
+ if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
+ PAGE_SIZE)) {
+ req->tidoffset = 0;
+ /* Since we don't copy all the TIDs, all at once,
+ * we have to check again. */
+ if (++req->tididx > req->n_tids - 1 ||
+ !req->tids[req->tididx]) {
+ return -EINVAL;
+ }
+ tidval = req->tids[req->tididx];
+ }
+ req->omfactor = ((EXP_TID_GET(tidval, LEN) *
+ PAGE_SIZE) >=
+ KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE :
+ KDETH_OM_SMALL;
+ /* KDETH.OM and KDETH.OFFSET (TID) */
+ AHG_HEADER_SET(req->ahg, diff, 7, 0, 16,
+ ((!!(req->omfactor - KDETH_OM_SMALL)) << 15 |
+ ((req->tidoffset / req->omfactor) & 0x7fff)));
+ /* KDETH.TIDCtrl, KDETH.TID */
+ val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
+ (EXP_TID_GET(tidval, IDX) & 0x3ff));
+ /* Clear KDETH.SH on last packet */
+ if (unlikely(tx->flags & USER_SDMA_TXREQ_FLAGS_LAST_PKT)) {
+ val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset,
+ INTR) >> 16);
+ val &= cpu_to_le16(~(1U << 13));
+ AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
+ } else
+ AHG_HEADER_SET(req->ahg, diff, 7, 16, 12, val);
+ }
+
+ trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
+ req->info.comp_idx, req->sde->this_idx,
+ req->ahg_idx, req->ahg, diff, tidval);
+ return diff;
+}
+
+static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status,
+ int drain)
+{
+ struct user_sdma_txreq *tx =
+ container_of(txreq, struct user_sdma_txreq, txreq);
+ struct user_sdma_request *req = tx->req;
+ struct hfi1_user_sdma_pkt_q *pq = req ? req->pq : NULL;
+ u64 tx_seqnum;
+
+ if (unlikely(!req || !pq))
+ return;
+
+ if (tx->iovec1)
+ iovec_may_free(tx->iovec1, unpin_vector_pages);
+ if (tx->iovec2)
+ iovec_may_free(tx->iovec2, unpin_vector_pages);
+
+ tx_seqnum = tx->seqnum;
+ kmem_cache_free(pq->txreq_cache, tx);
+
+ if (status != SDMA_TXREQ_S_OK) {
+ dd_dev_err(pq->dd, "SDMA completion with error %d", status);
+ set_comp_state(req, ERROR, status);
+ set_bit(SDMA_REQ_HAS_ERROR, &req->flags);
+ /* Do not free the request until the sender loop has ack'ed
+ * the error and we've seen all txreqs. */
+ if (tx_seqnum == ACCESS_ONCE(req->seqnum) &&
+ test_bit(SDMA_REQ_DONE_ERROR, &req->flags)) {
+ atomic_dec(&pq->n_reqs);
+ user_sdma_free_request(req);
+ }
+ } else {
+ if (tx_seqnum == req->info.npkts - 1) {
+ /* We've sent and completed all packets in this
+ * request. Signal completion to the user */
+ atomic_dec(&pq->n_reqs);
+ set_comp_state(req, COMPLETE, 0);
+ user_sdma_free_request(req);
+ }
+ }
+ if (!atomic_read(&pq->n_reqs))
+ xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
+}
+
+static void user_sdma_free_request(struct user_sdma_request *req)
+{
+ if (!list_empty(&req->txps)) {
+ struct sdma_txreq *t, *p;
+
+ list_for_each_entry_safe(t, p, &req->txps, list) {
+ struct user_sdma_txreq *tx =
+ container_of(t, struct user_sdma_txreq, txreq);
+ list_del_init(&t->list);
+ sdma_txclean(req->pq->dd, t);
+ kmem_cache_free(req->pq->txreq_cache, tx);
+ }
+ }
+ if (req->data_iovs) {
+ int i;
+
+ for (i = 0; i < req->data_iovs; i++)
+ if (req->iovs[i].npages && req->iovs[i].pages)
+ unpin_vector_pages(&req->iovs[i]);
+ }
+ if (req->user_proc)
+ put_task_struct(req->user_proc);
+ kfree(req->tids);
+ clear_bit(SDMA_REQ_IN_USE, &req->flags);
+}
+
+static inline void set_comp_state(struct user_sdma_request *req,
+ enum hfi1_sdma_comp_state state,
+ int ret)
+{
+ SDMA_DBG(req, "Setting completion status %u %d", state, ret);
+ req->cq->comps[req->info.comp_idx].status = state;
+ if (state == ERROR)
+ req->cq->comps[req->info.comp_idx].errcode = -ret;
+ trace_hfi1_sdma_user_completion(req->pq->dd, req->pq->ctxt,
+ req->pq->subctxt, req->info.comp_idx,
+ state, ret);
+}
diff --git a/drivers/staging/rdma/hfi1/user_sdma.h b/drivers/staging/rdma/hfi1/user_sdma.h
new file mode 100644
index 000000000000..fa4422553e23
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/user_sdma.h
@@ -0,0 +1,89 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/device.h>
+#include <linux/wait.h>
+
+#include "common.h"
+#include "iowait.h"
+
+#define EXP_TID_TIDLEN_MASK 0x7FFULL
+#define EXP_TID_TIDLEN_SHIFT 0
+#define EXP_TID_TIDCTRL_MASK 0x3ULL
+#define EXP_TID_TIDCTRL_SHIFT 20
+#define EXP_TID_TIDIDX_MASK 0x7FFULL
+#define EXP_TID_TIDIDX_SHIFT 22
+#define EXP_TID_GET(tid, field) \
+ (((tid) >> EXP_TID_TID##field##_SHIFT) & EXP_TID_TID##field##_MASK)
+
+extern uint extended_psn;
+
+struct hfi1_user_sdma_pkt_q {
+ struct list_head list;
+ unsigned ctxt;
+ unsigned subctxt;
+ u16 n_max_reqs;
+ atomic_t n_reqs;
+ u16 reqidx;
+ struct hfi1_devdata *dd;
+ struct kmem_cache *txreq_cache;
+ struct user_sdma_request *reqs;
+ struct iowait busy;
+ unsigned state;
+};
+
+struct hfi1_user_sdma_comp_q {
+ u16 nentries;
+ struct hfi1_sdma_comp_entry *comps;
+};
+
+int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *, struct file *);
+int hfi1_user_sdma_free_queues(struct hfi1_filedata *);
+int hfi1_user_sdma_process_request(struct file *, struct iovec *, unsigned long,
+ unsigned long *);
diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c
new file mode 100644
index 000000000000..53ac21431542
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/verbs.c
@@ -0,0 +1,2143 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/ib_mad.h>
+#include <rdma/ib_user_verbs.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/utsname.h>
+#include <linux/rculist.h>
+#include <linux/mm.h>
+#include <linux/random.h>
+#include <linux/vmalloc.h>
+
+#include "hfi.h"
+#include "common.h"
+#include "device.h"
+#include "trace.h"
+#include "qp.h"
+#include "sdma.h"
+
+unsigned int hfi1_lkey_table_size = 16;
+module_param_named(lkey_table_size, hfi1_lkey_table_size, uint,
+ S_IRUGO);
+MODULE_PARM_DESC(lkey_table_size,
+ "LKEY table size in bits (2^n, 1 <= n <= 23)");
+
+static unsigned int hfi1_max_pds = 0xFFFF;
+module_param_named(max_pds, hfi1_max_pds, uint, S_IRUGO);
+MODULE_PARM_DESC(max_pds,
+ "Maximum number of protection domains to support");
+
+static unsigned int hfi1_max_ahs = 0xFFFF;
+module_param_named(max_ahs, hfi1_max_ahs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
+
+unsigned int hfi1_max_cqes = 0x2FFFF;
+module_param_named(max_cqes, hfi1_max_cqes, uint, S_IRUGO);
+MODULE_PARM_DESC(max_cqes,
+ "Maximum number of completion queue entries to support");
+
+unsigned int hfi1_max_cqs = 0x1FFFF;
+module_param_named(max_cqs, hfi1_max_cqs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
+
+unsigned int hfi1_max_qp_wrs = 0x3FFF;
+module_param_named(max_qp_wrs, hfi1_max_qp_wrs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
+
+unsigned int hfi1_max_qps = 16384;
+module_param_named(max_qps, hfi1_max_qps, uint, S_IRUGO);
+MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
+
+unsigned int hfi1_max_sges = 0x60;
+module_param_named(max_sges, hfi1_max_sges, uint, S_IRUGO);
+MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
+
+unsigned int hfi1_max_mcast_grps = 16384;
+module_param_named(max_mcast_grps, hfi1_max_mcast_grps, uint, S_IRUGO);
+MODULE_PARM_DESC(max_mcast_grps,
+ "Maximum number of multicast groups to support");
+
+unsigned int hfi1_max_mcast_qp_attached = 16;
+module_param_named(max_mcast_qp_attached, hfi1_max_mcast_qp_attached,
+ uint, S_IRUGO);
+MODULE_PARM_DESC(max_mcast_qp_attached,
+ "Maximum number of attached QPs to support");
+
+unsigned int hfi1_max_srqs = 1024;
+module_param_named(max_srqs, hfi1_max_srqs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
+
+unsigned int hfi1_max_srq_sges = 128;
+module_param_named(max_srq_sges, hfi1_max_srq_sges, uint, S_IRUGO);
+MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
+
+unsigned int hfi1_max_srq_wrs = 0x1FFFF;
+module_param_named(max_srq_wrs, hfi1_max_srq_wrs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
+
+static void verbs_sdma_complete(
+ struct sdma_txreq *cookie,
+ int status,
+ int drained);
+
+/*
+ * Note that it is OK to post send work requests in the SQE and ERR
+ * states; hfi1_do_send() will process them and generate error
+ * completions as per IB 1.2 C10-96.
+ */
+const int ib_hfi1_state_ops[IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = 0,
+ [IB_QPS_INIT] = HFI1_POST_RECV_OK,
+ [IB_QPS_RTR] = HFI1_POST_RECV_OK | HFI1_PROCESS_RECV_OK,
+ [IB_QPS_RTS] = HFI1_POST_RECV_OK | HFI1_PROCESS_RECV_OK |
+ HFI1_POST_SEND_OK | HFI1_PROCESS_SEND_OK |
+ HFI1_PROCESS_NEXT_SEND_OK,
+ [IB_QPS_SQD] = HFI1_POST_RECV_OK | HFI1_PROCESS_RECV_OK |
+ HFI1_POST_SEND_OK | HFI1_PROCESS_SEND_OK,
+ [IB_QPS_SQE] = HFI1_POST_RECV_OK | HFI1_PROCESS_RECV_OK |
+ HFI1_POST_SEND_OK | HFI1_FLUSH_SEND,
+ [IB_QPS_ERR] = HFI1_POST_RECV_OK | HFI1_FLUSH_RECV |
+ HFI1_POST_SEND_OK | HFI1_FLUSH_SEND,
+};
+
+struct hfi1_ucontext {
+ struct ib_ucontext ibucontext;
+};
+
+static inline struct hfi1_ucontext *to_iucontext(struct ib_ucontext
+ *ibucontext)
+{
+ return container_of(ibucontext, struct hfi1_ucontext, ibucontext);
+}
+
+/*
+ * Translate ib_wr_opcode into ib_wc_opcode.
+ */
+const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
+ [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
+ [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
+ [IB_WR_SEND] = IB_WC_SEND,
+ [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
+ [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
+ [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
+};
+
+/*
+ * Length of header by opcode, 0 --> not supported
+ */
+const u8 hdr_len_by_opcode[256] = {
+ /* RC */
+ [IB_OPCODE_RC_SEND_FIRST] = 12 + 8,
+ [IB_OPCODE_RC_SEND_MIDDLE] = 12 + 8,
+ [IB_OPCODE_RC_SEND_LAST] = 12 + 8,
+ [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
+ [IB_OPCODE_RC_SEND_ONLY] = 12 + 8,
+ [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4,
+ [IB_OPCODE_RC_RDMA_WRITE_FIRST] = 12 + 8 + 16,
+ [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = 12 + 8,
+ [IB_OPCODE_RC_RDMA_WRITE_LAST] = 12 + 8,
+ [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
+ [IB_OPCODE_RC_RDMA_WRITE_ONLY] = 12 + 8 + 16,
+ [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
+ [IB_OPCODE_RC_RDMA_READ_REQUEST] = 12 + 8 + 16,
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = 12 + 8 + 4,
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = 12 + 8,
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = 12 + 8 + 4,
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = 12 + 8 + 4,
+ [IB_OPCODE_RC_ACKNOWLEDGE] = 12 + 8 + 4,
+ [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4,
+ [IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28,
+ [IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28,
+ /* UC */
+ [IB_OPCODE_UC_SEND_FIRST] = 12 + 8,
+ [IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8,
+ [IB_OPCODE_UC_SEND_LAST] = 12 + 8,
+ [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
+ [IB_OPCODE_UC_SEND_ONLY] = 12 + 8,
+ [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4,
+ [IB_OPCODE_UC_RDMA_WRITE_FIRST] = 12 + 8 + 16,
+ [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = 12 + 8,
+ [IB_OPCODE_UC_RDMA_WRITE_LAST] = 12 + 8,
+ [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
+ [IB_OPCODE_UC_RDMA_WRITE_ONLY] = 12 + 8 + 16,
+ [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
+ /* UD */
+ [IB_OPCODE_UD_SEND_ONLY] = 12 + 8 + 8,
+ [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 12
+};
+
+static const opcode_handler opcode_handler_tbl[256] = {
+ /* RC */
+ [IB_OPCODE_RC_SEND_FIRST] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_SEND_MIDDLE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_SEND_LAST] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_SEND_ONLY] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_WRITE_FIRST] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_WRITE_LAST] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_WRITE_ONLY] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_READ_REQUEST] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_ACKNOWLEDGE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_COMPARE_SWAP] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_FETCH_ADD] = &hfi1_rc_rcv,
+ /* UC */
+ [IB_OPCODE_UC_SEND_FIRST] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_SEND_MIDDLE] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_SEND_LAST] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_SEND_ONLY] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_RDMA_WRITE_FIRST] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_RDMA_WRITE_LAST] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_RDMA_WRITE_ONLY] = &hfi1_uc_rcv,
+ [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
+ /* UD */
+ [IB_OPCODE_UD_SEND_ONLY] = &hfi1_ud_rcv,
+ [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_ud_rcv,
+ /* CNP */
+ [IB_OPCODE_CNP] = &hfi1_cnp_rcv
+};
+
+/*
+ * System image GUID.
+ */
+__be64 ib_hfi1_sys_image_guid;
+
+/**
+ * hfi1_copy_sge - copy data to SGE memory
+ * @ss: the SGE state
+ * @data: the data to copy
+ * @length: the length of the data
+ */
+void hfi1_copy_sge(
+ struct hfi1_sge_state *ss,
+ void *data, u32 length,
+ int release)
+{
+ struct hfi1_sge *sge = &ss->sge;
+
+ while (length) {
+ u32 len = sge->length;
+
+ if (len > length)
+ len = length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ WARN_ON_ONCE(len == 0);
+ memcpy(sge->vaddr, data, len);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (release)
+ hfi1_put_mr(sge->mr);
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= HFI1_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ data += len;
+ length -= len;
+ }
+}
+
+/**
+ * hfi1_skip_sge - skip over SGE memory
+ * @ss: the SGE state
+ * @length: the number of bytes to skip
+ */
+void hfi1_skip_sge(struct hfi1_sge_state *ss, u32 length, int release)
+{
+ struct hfi1_sge *sge = &ss->sge;
+
+ while (length) {
+ u32 len = sge->length;
+
+ if (len > length)
+ len = length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ WARN_ON_ONCE(len == 0);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (release)
+ hfi1_put_mr(sge->mr);
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= HFI1_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ length -= len;
+ }
+}
+
+/**
+ * post_one_send - post one RC, UC, or UD send work request
+ * @qp: the QP to post on
+ * @wr: the work request to send
+ */
+static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr)
+{
+ struct hfi1_swqe *wqe;
+ u32 next;
+ int i;
+ int j;
+ int acc;
+ struct hfi1_lkey_table *rkt;
+ struct hfi1_pd *pd;
+ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct hfi1_pportdata *ppd;
+ struct hfi1_ibport *ibp;
+
+ /* IB spec says that num_sge == 0 is OK. */
+ if (unlikely(wr->num_sge > qp->s_max_sge))
+ return -EINVAL;
+
+ ppd = &dd->pport[qp->port_num - 1];
+ ibp = &ppd->ibport_data;
+
+ /*
+ * Don't allow RDMA reads or atomic operations on UC or
+ * undefined operations.
+ * Make sure buffer is large enough to hold the result for atomics.
+ */
+ if (wr->opcode == IB_WR_FAST_REG_MR) {
+ return -EINVAL;
+ } else if (qp->ibqp.qp_type == IB_QPT_UC) {
+ if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
+ return -EINVAL;
+ } else if (qp->ibqp.qp_type != IB_QPT_RC) {
+ /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
+ if (wr->opcode != IB_WR_SEND &&
+ wr->opcode != IB_WR_SEND_WITH_IMM)
+ return -EINVAL;
+ /* Check UD destination address PD */
+ if (qp->ibqp.pd != wr->wr.ud.ah->pd)
+ return -EINVAL;
+ } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
+ return -EINVAL;
+ else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
+ (wr->num_sge == 0 ||
+ wr->sg_list[0].length < sizeof(u64) ||
+ wr->sg_list[0].addr & (sizeof(u64) - 1)))
+ return -EINVAL;
+ else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
+ return -EINVAL;
+
+ next = qp->s_head + 1;
+ if (next >= qp->s_size)
+ next = 0;
+ if (next == qp->s_last)
+ return -ENOMEM;
+
+ rkt = &to_idev(qp->ibqp.device)->lk_table;
+ pd = to_ipd(qp->ibqp.pd);
+ wqe = get_swqe_ptr(qp, qp->s_head);
+ wqe->wr = *wr;
+ wqe->length = 0;
+ j = 0;
+ if (wr->num_sge) {
+ acc = wr->opcode >= IB_WR_RDMA_READ ?
+ IB_ACCESS_LOCAL_WRITE : 0;
+ for (i = 0; i < wr->num_sge; i++) {
+ u32 length = wr->sg_list[i].length;
+ int ok;
+
+ if (length == 0)
+ continue;
+ ok = hfi1_lkey_ok(rkt, pd, &wqe->sg_list[j],
+ &wr->sg_list[i], acc);
+ if (!ok)
+ goto bail_inval_free;
+ wqe->length += length;
+ j++;
+ }
+ wqe->wr.num_sge = j;
+ }
+ if (qp->ibqp.qp_type == IB_QPT_UC ||
+ qp->ibqp.qp_type == IB_QPT_RC) {
+ if (wqe->length > 0x80000000U)
+ goto bail_inval_free;
+ } else {
+ struct hfi1_ah *ah = to_iah(wr->wr.ud.ah);
+
+ atomic_inc(&ah->refcount);
+ }
+ wqe->ssn = qp->s_ssn++;
+ qp->s_head = next;
+
+ return 0;
+
+bail_inval_free:
+ /* release mr holds */
+ while (j) {
+ struct hfi1_sge *sge = &wqe->sg_list[--j];
+
+ hfi1_put_mr(sge->mr);
+ }
+ return -EINVAL;
+}
+
+/**
+ * post_send - post a send on a QP
+ * @ibqp: the QP to post the send on
+ * @wr: the list of work requests to post
+ * @bad_wr: the first bad WR is put here
+ *
+ * This may be called from interrupt context.
+ */
+static int post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct hfi1_qp *qp = to_iqp(ibqp);
+ int err = 0;
+ int call_send;
+ unsigned long flags;
+ unsigned nreq = 0;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Check that state is OK to post send. */
+ if (unlikely(!(ib_hfi1_state_ops[qp->state] & HFI1_POST_SEND_OK))) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return -EINVAL;
+ }
+
+ /* sq empty and not list -> call send */
+ call_send = qp->s_head == qp->s_last && !wr->next;
+
+ for (; wr; wr = wr->next) {
+ err = post_one_send(qp, wr);
+ if (unlikely(err)) {
+ *bad_wr = wr;
+ goto bail;
+ }
+ nreq++;
+ }
+bail:
+ if (nreq && !call_send)
+ hfi1_schedule_send(qp);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ if (nreq && call_send)
+ hfi1_do_send(&qp->s_iowait.iowork);
+ return err;
+}
+
+/**
+ * post_receive - post a receive on a QP
+ * @ibqp: the QP to post the receive on
+ * @wr: the WR to post
+ * @bad_wr: the first bad WR is put here
+ *
+ * This may be called from interrupt context.
+ */
+static int post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct hfi1_qp *qp = to_iqp(ibqp);
+ struct hfi1_rwq *wq = qp->r_rq.wq;
+ unsigned long flags;
+ int ret;
+
+ /* Check that state is OK to post receive. */
+ if (!(ib_hfi1_state_ops[qp->state] & HFI1_POST_RECV_OK) || !wq) {
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ for (; wr; wr = wr->next) {
+ struct hfi1_rwqe *wqe;
+ u32 next;
+ int i;
+
+ if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ spin_lock_irqsave(&qp->r_rq.lock, flags);
+ next = wq->head + 1;
+ if (next >= qp->r_rq.size)
+ next = 0;
+ if (next == wq->tail) {
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ *bad_wr = wr;
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
+ wqe->wr_id = wr->wr_id;
+ wqe->num_sge = wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++)
+ wqe->sg_list[i] = wr->sg_list[i];
+ /* Make sure queue entry is written before the head index. */
+ smp_wmb();
+ wq->head = next;
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ }
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/*
+ * Make sure the QP is ready and able to accept the given opcode.
+ */
+static inline int qp_ok(int opcode, struct hfi1_packet *packet)
+{
+ struct hfi1_ibport *ibp;
+
+ if (!(ib_hfi1_state_ops[packet->qp->state] & HFI1_PROCESS_RECV_OK))
+ goto dropit;
+ if (((opcode & OPCODE_QP_MASK) == packet->qp->allowed_ops) ||
+ (opcode == IB_OPCODE_CNP))
+ return 1;
+dropit:
+ ibp = &packet->rcd->ppd->ibport_data;
+ ibp->n_pkt_drops++;
+ return 0;
+}
+
+
+/**
+ * hfi1_ib_rcv - process an incoming packet
+ * @packet: data packet information
+ *
+ * This is called to process an incoming packet at interrupt level.
+ *
+ * Tlen is the length of the header + data + CRC in bytes.
+ */
+void hfi1_ib_rcv(struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct hfi1_ib_header *hdr = packet->hdr;
+ u32 tlen = packet->tlen;
+ struct hfi1_pportdata *ppd = rcd->ppd;
+ struct hfi1_ibport *ibp = &ppd->ibport_data;
+ u32 qp_num;
+ int lnh;
+ u8 opcode;
+ u16 lid;
+
+ /* Check for GRH */
+ lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+ if (lnh == HFI1_LRH_BTH)
+ packet->ohdr = &hdr->u.oth;
+ else if (lnh == HFI1_LRH_GRH) {
+ u32 vtf;
+
+ packet->ohdr = &hdr->u.l.oth;
+ if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
+ goto drop;
+ vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
+ if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
+ goto drop;
+ packet->rcv_flags |= HFI1_HAS_GRH;
+ } else
+ goto drop;
+
+ trace_input_ibhdr(rcd->dd, hdr);
+
+ opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24);
+ inc_opstats(tlen, &rcd->opstats->stats[opcode]);
+
+ /* Get the destination QP number. */
+ qp_num = be32_to_cpu(packet->ohdr->bth[1]) & HFI1_QPN_MASK;
+ lid = be16_to_cpu(hdr->lrh[1]);
+ if (unlikely((lid >= HFI1_MULTICAST_LID_BASE) &&
+ (lid != HFI1_PERMISSIVE_LID))) {
+ struct hfi1_mcast *mcast;
+ struct hfi1_mcast_qp *p;
+
+ if (lnh != HFI1_LRH_GRH)
+ goto drop;
+ mcast = hfi1_mcast_find(ibp, &hdr->u.l.grh.dgid);
+ if (mcast == NULL)
+ goto drop;
+ list_for_each_entry_rcu(p, &mcast->qp_list, list) {
+ packet->qp = p->qp;
+ spin_lock(&packet->qp->r_lock);
+ if (likely((qp_ok(opcode, packet))))
+ opcode_handler_tbl[opcode](packet);
+ spin_unlock(&packet->qp->r_lock);
+ }
+ /*
+ * Notify hfi1_multicast_detach() if it is waiting for us
+ * to finish.
+ */
+ if (atomic_dec_return(&mcast->refcount) <= 1)
+ wake_up(&mcast->wait);
+ } else {
+ rcu_read_lock();
+ packet->qp = hfi1_lookup_qpn(ibp, qp_num);
+ if (!packet->qp) {
+ rcu_read_unlock();
+ goto drop;
+ }
+ spin_lock(&packet->qp->r_lock);
+ if (likely((qp_ok(opcode, packet))))
+ opcode_handler_tbl[opcode](packet);
+ spin_unlock(&packet->qp->r_lock);
+ rcu_read_unlock();
+ }
+ return;
+
+drop:
+ ibp->n_pkt_drops++;
+}
+
+/*
+ * This is called from a timer to check for QPs
+ * which need kernel memory in order to send a packet.
+ */
+static void mem_timer(unsigned long data)
+{
+ struct hfi1_ibdev *dev = (struct hfi1_ibdev *)data;
+ struct list_head *list = &dev->memwait;
+ struct hfi1_qp *qp = NULL;
+ struct iowait *wait;
+ unsigned long flags;
+
+ write_seqlock_irqsave(&dev->iowait_lock, flags);
+ if (!list_empty(list)) {
+ wait = list_first_entry(list, struct iowait, list);
+ qp = container_of(wait, struct hfi1_qp, s_iowait);
+ list_del_init(&qp->s_iowait.list);
+ /* refcount held until actual wake up */
+ if (!list_empty(list))
+ mod_timer(&dev->mem_timer, jiffies + 1);
+ }
+ write_sequnlock_irqrestore(&dev->iowait_lock, flags);
+
+ if (qp)
+ hfi1_qp_wakeup(qp, HFI1_S_WAIT_KMEM);
+}
+
+void update_sge(struct hfi1_sge_state *ss, u32 length)
+{
+ struct hfi1_sge *sge = &ss->sge;
+
+ sge->vaddr += length;
+ sge->length -= length;
+ sge->sge_length -= length;
+ if (sge->sge_length == 0) {
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= HFI1_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ return;
+ sge->n = 0;
+ }
+ sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+}
+
+static noinline struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
+ struct hfi1_qp *qp)
+{
+ struct verbs_txreq *tx;
+ unsigned long flags;
+
+ tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
+ if (!tx) {
+ spin_lock_irqsave(&qp->s_lock, flags);
+ write_seqlock(&dev->iowait_lock);
+ if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK &&
+ list_empty(&qp->s_iowait.list)) {
+ dev->n_txwait++;
+ qp->s_flags |= HFI1_S_WAIT_TX;
+ list_add_tail(&qp->s_iowait.list, &dev->txwait);
+ trace_hfi1_qpsleep(qp, HFI1_S_WAIT_TX);
+ atomic_inc(&qp->refcount);
+ }
+ qp->s_flags &= ~HFI1_S_BUSY;
+ write_sequnlock(&dev->iowait_lock);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ tx = ERR_PTR(-EBUSY);
+ }
+ return tx;
+}
+
+static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
+ struct hfi1_qp *qp)
+{
+ struct verbs_txreq *tx;
+
+ tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
+ if (!tx)
+ /* call slow path to get the lock */
+ tx = __get_txreq(dev, qp);
+ if (tx)
+ tx->qp = qp;
+ return tx;
+}
+
+void hfi1_put_txreq(struct verbs_txreq *tx)
+{
+ struct hfi1_ibdev *dev;
+ struct hfi1_qp *qp;
+ unsigned long flags;
+ unsigned int seq;
+
+ qp = tx->qp;
+ dev = to_idev(qp->ibqp.device);
+
+ if (tx->mr) {
+ hfi1_put_mr(tx->mr);
+ tx->mr = NULL;
+ }
+ sdma_txclean(dd_from_dev(dev), &tx->txreq);
+
+ /* Free verbs_txreq and return to slab cache */
+ kmem_cache_free(dev->verbs_txreq_cache, tx);
+
+ do {
+ seq = read_seqbegin(&dev->iowait_lock);
+ if (!list_empty(&dev->txwait)) {
+ struct iowait *wait;
+
+ write_seqlock_irqsave(&dev->iowait_lock, flags);
+ /* Wake up first QP wanting a free struct */
+ wait = list_first_entry(&dev->txwait, struct iowait,
+ list);
+ qp = container_of(wait, struct hfi1_qp, s_iowait);
+ list_del_init(&qp->s_iowait.list);
+ /* refcount held until actual wake up */
+ write_sequnlock_irqrestore(&dev->iowait_lock, flags);
+ hfi1_qp_wakeup(qp, HFI1_S_WAIT_TX);
+ break;
+ }
+ } while (read_seqretry(&dev->iowait_lock, seq));
+}
+
+/*
+ * This is called with progress side lock held.
+ */
+/* New API */
+static void verbs_sdma_complete(
+ struct sdma_txreq *cookie,
+ int status,
+ int drained)
+{
+ struct verbs_txreq *tx =
+ container_of(cookie, struct verbs_txreq, txreq);
+ struct hfi1_qp *qp = tx->qp;
+
+ spin_lock(&qp->s_lock);
+ if (tx->wqe)
+ hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
+ else if (qp->ibqp.qp_type == IB_QPT_RC) {
+ struct hfi1_ib_header *hdr;
+
+ hdr = &tx->phdr.hdr;
+ hfi1_rc_send_complete(qp, hdr);
+ }
+ if (drained) {
+ /*
+ * This happens when the send engine notes
+ * a QP in the error state and cannot
+ * do the flush work until that QP's
+ * sdma work has finished.
+ */
+ if (qp->s_flags & HFI1_S_WAIT_DMA) {
+ qp->s_flags &= ~HFI1_S_WAIT_DMA;
+ hfi1_schedule_send(qp);
+ }
+ }
+ spin_unlock(&qp->s_lock);
+
+ hfi1_put_txreq(tx);
+}
+
+static int wait_kmem(struct hfi1_ibdev *dev, struct hfi1_qp *qp)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) {
+ write_seqlock(&dev->iowait_lock);
+ if (list_empty(&qp->s_iowait.list)) {
+ if (list_empty(&dev->memwait))
+ mod_timer(&dev->mem_timer, jiffies + 1);
+ qp->s_flags |= HFI1_S_WAIT_KMEM;
+ list_add_tail(&qp->s_iowait.list, &dev->memwait);
+ trace_hfi1_qpsleep(qp, HFI1_S_WAIT_KMEM);
+ atomic_inc(&qp->refcount);
+ }
+ write_sequnlock(&dev->iowait_lock);
+ qp->s_flags &= ~HFI1_S_BUSY;
+ ret = -EBUSY;
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ return ret;
+}
+
+/*
+ * This routine calls txadds for each sg entry.
+ *
+ * Add failures will revert the sge cursor
+ */
+static int build_verbs_ulp_payload(
+ struct sdma_engine *sde,
+ struct hfi1_sge_state *ss,
+ u32 length,
+ struct verbs_txreq *tx)
+{
+ struct hfi1_sge *sg_list = ss->sg_list;
+ struct hfi1_sge sge = ss->sge;
+ u8 num_sge = ss->num_sge;
+ u32 len;
+ int ret = 0;
+
+ while (length) {
+ len = ss->sge.length;
+ if (len > length)
+ len = length;
+ if (len > ss->sge.sge_length)
+ len = ss->sge.sge_length;
+ WARN_ON_ONCE(len == 0);
+ ret = sdma_txadd_kvaddr(
+ sde->dd,
+ &tx->txreq,
+ ss->sge.vaddr,
+ len);
+ if (ret)
+ goto bail_txadd;
+ update_sge(ss, len);
+ length -= len;
+ }
+ return ret;
+bail_txadd:
+ /* unwind cursor */
+ ss->sge = sge;
+ ss->num_sge = num_sge;
+ ss->sg_list = sg_list;
+ return ret;
+}
+
+/*
+ * Build the number of DMA descriptors needed to send length bytes of data.
+ *
+ * NOTE: DMA mapping is held in the tx until completed in the ring or
+ * the tx desc is freed without having been submitted to the ring
+ *
+ * This routine insures the following all the helper routine
+ * calls succeed.
+ */
+/* New API */
+static int build_verbs_tx_desc(
+ struct sdma_engine *sde,
+ struct hfi1_sge_state *ss,
+ u32 length,
+ struct verbs_txreq *tx,
+ struct ahg_ib_header *ahdr,
+ u64 pbc)
+{
+ int ret = 0;
+ struct hfi1_pio_header *phdr;
+ u16 hdrbytes = tx->hdr_dwords << 2;
+
+ phdr = &tx->phdr;
+ if (!ahdr->ahgcount) {
+ ret = sdma_txinit_ahg(
+ &tx->txreq,
+ ahdr->tx_flags,
+ hdrbytes + length,
+ ahdr->ahgidx,
+ 0,
+ NULL,
+ 0,
+ verbs_sdma_complete);
+ if (ret)
+ goto bail_txadd;
+ phdr->pbc = cpu_to_le64(pbc);
+ memcpy(&phdr->hdr, &ahdr->ibh, hdrbytes - sizeof(phdr->pbc));
+ /* add the header */
+ ret = sdma_txadd_kvaddr(
+ sde->dd,
+ &tx->txreq,
+ &tx->phdr,
+ tx->hdr_dwords << 2);
+ if (ret)
+ goto bail_txadd;
+ } else {
+ struct hfi1_other_headers *sohdr = &ahdr->ibh.u.oth;
+ struct hfi1_other_headers *dohdr = &phdr->hdr.u.oth;
+
+ /* needed in rc_send_complete() */
+ phdr->hdr.lrh[0] = ahdr->ibh.lrh[0];
+ if ((be16_to_cpu(phdr->hdr.lrh[0]) & 3) == HFI1_LRH_GRH) {
+ sohdr = &ahdr->ibh.u.l.oth;
+ dohdr = &phdr->hdr.u.l.oth;
+ }
+ /* opcode */
+ dohdr->bth[0] = sohdr->bth[0];
+ /* PSN/ACK */
+ dohdr->bth[2] = sohdr->bth[2];
+ ret = sdma_txinit_ahg(
+ &tx->txreq,
+ ahdr->tx_flags,
+ length,
+ ahdr->ahgidx,
+ ahdr->ahgcount,
+ ahdr->ahgdesc,
+ hdrbytes,
+ verbs_sdma_complete);
+ if (ret)
+ goto bail_txadd;
+ }
+
+ /* add the ulp payload - if any. ss can be NULL for acks */
+ if (ss)
+ ret = build_verbs_ulp_payload(sde, ss, length, tx);
+bail_txadd:
+ return ret;
+}
+
+int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
+ u32 hdrwords, struct hfi1_sge_state *ss, u32 len,
+ u32 plen, u32 dwords, u64 pbc)
+{
+ struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct verbs_txreq *tx;
+ struct sdma_txreq *stx;
+ u64 pbc_flags = 0;
+ struct sdma_engine *sde;
+ u8 sc5 = qp->s_sc;
+ int ret;
+
+ if (!list_empty(&qp->s_iowait.tx_head)) {
+ stx = list_first_entry(
+ &qp->s_iowait.tx_head,
+ struct sdma_txreq,
+ list);
+ list_del_init(&stx->list);
+ tx = container_of(stx, struct verbs_txreq, txreq);
+ ret = sdma_send_txreq(tx->sde, &qp->s_iowait, stx);
+ if (unlikely(ret == -ECOMM))
+ goto bail_ecomm;
+ return ret;
+ }
+
+ tx = get_txreq(dev, qp);
+ if (IS_ERR(tx))
+ goto bail_tx;
+
+ if (!qp->s_hdr->sde) {
+ tx->sde = sde = qp_to_sdma_engine(qp, sc5);
+ if (!sde)
+ goto bail_no_sde;
+ } else
+ tx->sde = sde = qp->s_hdr->sde;
+
+ if (likely(pbc == 0)) {
+ u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
+ /* No vl15 here */
+ /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
+ pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
+
+ pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
+ }
+ tx->wqe = qp->s_wqe;
+ tx->mr = qp->s_rdma_mr;
+ if (qp->s_rdma_mr)
+ qp->s_rdma_mr = NULL;
+ tx->hdr_dwords = hdrwords + 2;
+ ret = build_verbs_tx_desc(sde, ss, len, tx, ahdr, pbc);
+ if (unlikely(ret))
+ goto bail_build;
+ trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ahdr->ibh);
+ ret = sdma_send_txreq(sde, &qp->s_iowait, &tx->txreq);
+ if (unlikely(ret == -ECOMM))
+ goto bail_ecomm;
+ return ret;
+
+bail_no_sde:
+ hfi1_put_txreq(tx);
+bail_ecomm:
+ /* The current one got "sent" */
+ return 0;
+bail_build:
+ /* kmalloc or mapping fail */
+ hfi1_put_txreq(tx);
+ return wait_kmem(dev, qp);
+bail_tx:
+ return PTR_ERR(tx);
+}
+
+/*
+ * If we are now in the error state, return zero to flush the
+ * send work request.
+ */
+static int no_bufs_available(struct hfi1_qp *qp, struct send_context *sc)
+{
+ struct hfi1_devdata *dd = sc->dd;
+ struct hfi1_ibdev *dev = &dd->verbs_dev;
+ unsigned long flags;
+ int ret = 0;
+
+ /*
+ * Note that as soon as want_buffer() is called and
+ * possibly before it returns, sc_piobufavail()
+ * could be called. Therefore, put QP on the I/O wait list before
+ * enabling the PIO avail interrupt.
+ */
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) {
+ write_seqlock(&dev->iowait_lock);
+ if (list_empty(&qp->s_iowait.list)) {
+ struct hfi1_ibdev *dev = &dd->verbs_dev;
+ int was_empty;
+
+ dev->n_piowait++;
+ qp->s_flags |= HFI1_S_WAIT_PIO;
+ was_empty = list_empty(&sc->piowait);
+ list_add_tail(&qp->s_iowait.list, &sc->piowait);
+ trace_hfi1_qpsleep(qp, HFI1_S_WAIT_PIO);
+ atomic_inc(&qp->refcount);
+ /* counting: only call wantpiobuf_intr if first user */
+ if (was_empty)
+ hfi1_sc_wantpiobuf_intr(sc, 1);
+ }
+ write_sequnlock(&dev->iowait_lock);
+ qp->s_flags &= ~HFI1_S_BUSY;
+ ret = -EBUSY;
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
+}
+
+struct send_context *qp_to_send_context(struct hfi1_qp *qp, u8 sc5)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct hfi1_pportdata *ppd = dd->pport + (qp->port_num - 1);
+ u8 vl;
+
+ vl = sc_to_vlt(dd, sc5);
+ if (vl >= ppd->vls_supported && vl != 15)
+ return NULL;
+ return dd->vld[vl].sc;
+}
+
+int hfi1_verbs_send_pio(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
+ u32 hdrwords, struct hfi1_sge_state *ss, u32 len,
+ u32 plen, u32 dwords, u64 pbc)
+{
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 *hdr = (u32 *)&ahdr->ibh;
+ u64 pbc_flags = 0;
+ u32 sc5;
+ unsigned long flags = 0;
+ struct send_context *sc;
+ struct pio_buf *pbuf;
+ int wc_status = IB_WC_SUCCESS;
+
+ /* vl15 special case taken care of in ud.c */
+ sc5 = qp->s_sc;
+ sc = qp_to_send_context(qp, sc5);
+
+ if (!sc)
+ return -EINVAL;
+ if (likely(pbc == 0)) {
+ u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
+ /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
+ pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
+ pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
+ }
+ pbuf = sc_buffer_alloc(sc, plen, NULL, NULL);
+ if (unlikely(pbuf == NULL)) {
+ if (ppd->host_link_state != HLS_UP_ACTIVE) {
+ /*
+ * If we have filled the PIO buffers to capacity and are
+ * not in an active state this request is not going to
+ * go out to so just complete it with an error or else a
+ * ULP or the core may be stuck waiting.
+ */
+ hfi1_cdbg(
+ PIO,
+ "alloc failed. state not active, completing");
+ wc_status = IB_WC_GENERAL_ERR;
+ goto pio_bail;
+ } else {
+ /*
+ * This is a normal occurrence. The PIO buffs are full
+ * up but we are still happily sending, well we could be
+ * so lets continue to queue the request.
+ */
+ hfi1_cdbg(PIO, "alloc failed. state active, queuing");
+ return no_bufs_available(qp, sc);
+ }
+ }
+
+ if (len == 0) {
+ pio_copy(ppd->dd, pbuf, pbc, hdr, hdrwords);
+ } else {
+ if (ss) {
+ seg_pio_copy_start(pbuf, pbc, hdr, hdrwords*4);
+ while (len) {
+ void *addr = ss->sge.vaddr;
+ u32 slen = ss->sge.length;
+
+ if (slen > len)
+ slen = len;
+ update_sge(ss, slen);
+ seg_pio_copy_mid(pbuf, addr, slen);
+ len -= slen;
+ }
+ seg_pio_copy_end(pbuf);
+ }
+ }
+
+ trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ahdr->ibh);
+
+ if (qp->s_rdma_mr) {
+ hfi1_put_mr(qp->s_rdma_mr);
+ qp->s_rdma_mr = NULL;
+ }
+
+pio_bail:
+ if (qp->s_wqe) {
+ spin_lock_irqsave(&qp->s_lock, flags);
+ hfi1_send_complete(qp, qp->s_wqe, wc_status);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ } else if (qp->ibqp.qp_type == IB_QPT_RC) {
+ spin_lock_irqsave(&qp->s_lock, flags);
+ hfi1_rc_send_complete(qp, &ahdr->ibh);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ }
+ return 0;
+}
+/*
+ * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent
+ * being an entry from the ingress partition key table), return 0
+ * otherwise. Use the matching criteria for egress partition keys
+ * specified in the OPAv1 spec., section 9.1l.7.
+ */
+static inline int egress_pkey_matches_entry(u16 pkey, u16 ent)
+{
+ u16 mkey = pkey & PKEY_LOW_15_MASK;
+ u16 ment = ent & PKEY_LOW_15_MASK;
+
+ if (mkey == ment) {
+ /*
+ * If pkey[15] is set (full partition member),
+ * is bit 15 in the corresponding table element
+ * clear (limited member)?
+ */
+ if (pkey & PKEY_MEMBER_MASK)
+ return !!(ent & PKEY_MEMBER_MASK);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * egress_pkey_check - return 0 if hdr's pkey matches according to the
+ * criteria in the OPAv1 spec., section 9.11.7.
+ */
+static inline int egress_pkey_check(struct hfi1_pportdata *ppd,
+ struct hfi1_ib_header *hdr,
+ struct hfi1_qp *qp)
+{
+ struct hfi1_other_headers *ohdr;
+ struct hfi1_devdata *dd;
+ int i = 0;
+ u16 pkey;
+ u8 lnh, sc5 = qp->s_sc;
+
+ if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT))
+ return 0;
+
+ /* locate the pkey within the headers */
+ lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+ if (lnh == HFI1_LRH_GRH)
+ ohdr = &hdr->u.l.oth;
+ else
+ ohdr = &hdr->u.oth;
+
+ pkey = (u16)be32_to_cpu(ohdr->bth[0]);
+
+ /* If SC15, pkey[0:14] must be 0x7fff */
+ if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
+ goto bad;
+
+
+ /* Is the pkey = 0x0, or 0x8000? */
+ if ((pkey & PKEY_LOW_15_MASK) == 0)
+ goto bad;
+
+ /* The most likely matching pkey has index qp->s_pkey_index */
+ if (unlikely(!egress_pkey_matches_entry(pkey,
+ ppd->pkeys[qp->s_pkey_index]))) {
+ /* no match - try the entire table */
+ for (; i < MAX_PKEY_VALUES; i++) {
+ if (egress_pkey_matches_entry(pkey, ppd->pkeys[i]))
+ break;
+ }
+ }
+
+ if (i < MAX_PKEY_VALUES)
+ return 0;
+bad:
+ incr_cntr64(&ppd->port_xmit_constraint_errors);
+ dd = ppd->dd;
+ if (!(dd->err_info_xmit_constraint.status & OPA_EI_STATUS_SMASK)) {
+ u16 slid = be16_to_cpu(hdr->lrh[3]);
+
+ dd->err_info_xmit_constraint.status |= OPA_EI_STATUS_SMASK;
+ dd->err_info_xmit_constraint.slid = slid;
+ dd->err_info_xmit_constraint.pkey = pkey;
+ }
+ return 1;
+}
+
+/**
+ * hfi1_verbs_send - send a packet
+ * @qp: the QP to send on
+ * @ahdr: the packet header
+ * @hdrwords: the number of 32-bit words in the header
+ * @ss: the SGE to send
+ * @len: the length of the packet in bytes
+ *
+ * Return zero if packet is sent or queued OK.
+ * Return non-zero and clear qp->s_flags HFI1_S_BUSY otherwise.
+ */
+int hfi1_verbs_send(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
+ u32 hdrwords, struct hfi1_sge_state *ss, u32 len)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ u32 plen;
+ int ret;
+ int pio = 0;
+ unsigned long flags = 0;
+ u32 dwords = (len + 3) >> 2;
+
+ /*
+ * VL15 packets (IB_QPT_SMI) will always use PIO, so we
+ * can defer SDMA restart until link goes ACTIVE without
+ * worrying about just how we got there.
+ */
+ if ((qp->ibqp.qp_type == IB_QPT_SMI) ||
+ !(dd->flags & HFI1_HAS_SEND_DMA))
+ pio = 1;
+
+ ret = egress_pkey_check(dd->pport, &ahdr->ibh, qp);
+ if (unlikely(ret)) {
+ /*
+ * The value we are returning here does not get propagated to
+ * the verbs caller. Thus we need to complete the request with
+ * error otherwise the caller could be sitting waiting on the
+ * completion event. Only do this for PIO. SDMA has its own
+ * mechanism for handling the errors. So for SDMA we can just
+ * return.
+ */
+ if (pio) {
+ hfi1_cdbg(PIO, "%s() Failed. Completing with err",
+ __func__);
+ spin_lock_irqsave(&qp->s_lock, flags);
+ hfi1_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ }
+ return -EINVAL;
+ }
+
+ /*
+ * Calculate the send buffer trigger address.
+ * The +2 counts for the pbc control qword
+ */
+ plen = hdrwords + dwords + 2;
+
+ if (pio) {
+ ret = dd->process_pio_send(
+ qp, ahdr, hdrwords, ss, len, plen, dwords, 0);
+ } else {
+#ifdef CONFIG_SDMA_VERBOSITY
+ dd_dev_err(dd, "CONFIG SDMA %s:%d %s()\n",
+ slashstrip(__FILE__), __LINE__, __func__);
+ dd_dev_err(dd, "SDMA hdrwords = %u, len = %u\n", hdrwords, len);
+#endif
+ ret = dd->process_dma_send(
+ qp, ahdr, hdrwords, ss, len, plen, dwords, 0);
+ }
+
+ return ret;
+}
+
+static int query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props,
+ struct ib_udata *uhw)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_ibdev *dev = to_idev(ibdev);
+
+ if (uhw->inlen || uhw->outlen)
+ return -EINVAL;
+ memset(props, 0, sizeof(*props));
+
+ props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
+ IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
+ IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
+ IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
+
+ props->page_size_cap = PAGE_SIZE;
+ props->vendor_id =
+ dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
+ props->vendor_part_id = dd->pcidev->device;
+ props->hw_ver = dd->minrev;
+ props->sys_image_guid = ib_hfi1_sys_image_guid;
+ props->max_mr_size = ~0ULL;
+ props->max_qp = hfi1_max_qps;
+ props->max_qp_wr = hfi1_max_qp_wrs;
+ props->max_sge = hfi1_max_sges;
+ props->max_sge_rd = hfi1_max_sges;
+ props->max_cq = hfi1_max_cqs;
+ props->max_ah = hfi1_max_ahs;
+ props->max_cqe = hfi1_max_cqes;
+ props->max_mr = dev->lk_table.max;
+ props->max_fmr = dev->lk_table.max;
+ props->max_map_per_fmr = 32767;
+ props->max_pd = hfi1_max_pds;
+ props->max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
+ props->max_qp_init_rd_atom = 255;
+ /* props->max_res_rd_atom */
+ props->max_srq = hfi1_max_srqs;
+ props->max_srq_wr = hfi1_max_srq_wrs;
+ props->max_srq_sge = hfi1_max_srq_sges;
+ /* props->local_ca_ack_delay */
+ props->atomic_cap = IB_ATOMIC_GLOB;
+ props->max_pkeys = hfi1_get_npkeys(dd);
+ props->max_mcast_grp = hfi1_max_mcast_grps;
+ props->max_mcast_qp_attach = hfi1_max_mcast_qp_attached;
+ props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
+ props->max_mcast_grp;
+
+ return 0;
+}
+
+static inline u16 opa_speed_to_ib(u16 in)
+{
+ u16 out = 0;
+
+ if (in & OPA_LINK_SPEED_25G)
+ out |= IB_SPEED_EDR;
+ if (in & OPA_LINK_SPEED_12_5G)
+ out |= IB_SPEED_FDR;
+
+ return out;
+}
+
+/*
+ * Convert a single OPA link width (no multiple flags) to an IB value.
+ * A zero OPA link width means link down, which means the IB width value
+ * is a don't care.
+ */
+static inline u16 opa_width_to_ib(u16 in)
+{
+ switch (in) {
+ case OPA_LINK_WIDTH_1X:
+ /* map 2x and 3x to 1x as they don't exist in IB */
+ case OPA_LINK_WIDTH_2X:
+ case OPA_LINK_WIDTH_3X:
+ return IB_WIDTH_1X;
+ default: /* link down or unknown, return our largest width */
+ case OPA_LINK_WIDTH_4X:
+ return IB_WIDTH_4X;
+ }
+}
+
+static int query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u16 lid = ppd->lid;
+
+ memset(props, 0, sizeof(*props));
+ props->lid = lid ? lid : 0;
+ props->lmc = ppd->lmc;
+ props->sm_lid = ibp->sm_lid;
+ props->sm_sl = ibp->sm_sl;
+ /* OPA logical states match IB logical states */
+ props->state = driver_lstate(ppd);
+ props->phys_state = hfi1_ibphys_portstate(ppd);
+ props->port_cap_flags = ibp->port_cap_flags;
+ props->gid_tbl_len = HFI1_GUIDS_PER_PORT;
+ props->max_msg_sz = 0x80000000;
+ props->pkey_tbl_len = hfi1_get_npkeys(dd);
+ props->bad_pkey_cntr = ibp->pkey_violations;
+ props->qkey_viol_cntr = ibp->qkey_violations;
+ props->active_width = (u8)opa_width_to_ib(ppd->link_width_active);
+ /* see rate_show() in ib core/sysfs.c */
+ props->active_speed = (u8)opa_speed_to_ib(ppd->link_speed_active);
+ props->max_vl_num = ppd->vls_supported;
+ props->init_type_reply = 0;
+
+ /* Once we are a "first class" citizen and have added the OPA MTUs to
+ * the core we can advertise the larger MTU enum to the ULPs, for now
+ * advertise only 4K.
+ *
+ * Those applications which are either OPA aware or pass the MTU enum
+ * from the Path Records to us will get the new 8k MTU. Those that
+ * attempt to process the MTU enum may fail in various ways.
+ */
+ props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ?
+ 4096 : hfi1_max_mtu), IB_MTU_4096);
+ props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
+ mtu_to_enum(ppd->ibmtu, IB_MTU_2048);
+ props->subnet_timeout = ibp->subnet_timeout;
+
+ return 0;
+}
+
+static int port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ err = query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ memset(immutable, 0, sizeof(*immutable));
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->core_cap_flags = RDMA_CORE_PORT_INTEL_OPA;
+ immutable->max_mad_size = OPA_MGMT_MAD_SIZE;
+
+ return 0;
+}
+
+static int modify_device(struct ib_device *device,
+ int device_modify_mask,
+ struct ib_device_modify *device_modify)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(device);
+ unsigned i;
+ int ret;
+
+ if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
+ IB_DEVICE_MODIFY_NODE_DESC)) {
+ ret = -EOPNOTSUPP;
+ goto bail;
+ }
+
+ if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
+ memcpy(device->node_desc, device_modify->node_desc, 64);
+ for (i = 0; i < dd->num_pports; i++) {
+ struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
+
+ hfi1_node_desc_chg(ibp);
+ }
+ }
+
+ if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
+ ib_hfi1_sys_image_guid =
+ cpu_to_be64(device_modify->sys_image_guid);
+ for (i = 0; i < dd->num_pports; i++) {
+ struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
+
+ hfi1_sys_guid_chg(ibp);
+ }
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+static int modify_port(struct ib_device *ibdev, u8 port,
+ int port_modify_mask, struct ib_port_modify *props)
+{
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ int ret = 0;
+
+ ibp->port_cap_flags |= props->set_port_cap_mask;
+ ibp->port_cap_flags &= ~props->clr_port_cap_mask;
+ if (props->set_port_cap_mask || props->clr_port_cap_mask)
+ hfi1_cap_mask_chg(ibp);
+ if (port_modify_mask & IB_PORT_SHUTDOWN) {
+ set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0,
+ OPA_LINKDOWN_REASON_UNKNOWN);
+ ret = set_link_state(ppd, HLS_DN_DOWNDEF);
+ }
+ if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
+ ibp->qkey_violations = 0;
+ return ret;
+}
+
+static int query_gid(struct ib_device *ibdev, u8 port,
+ int index, union ib_gid *gid)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ int ret = 0;
+
+ if (!port || port > dd->num_pports)
+ ret = -EINVAL;
+ else {
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ gid->global.subnet_prefix = ibp->gid_prefix;
+ if (index == 0)
+ gid->global.interface_id = cpu_to_be64(ppd->guid);
+ else if (index < HFI1_GUIDS_PER_PORT)
+ gid->global.interface_id = ibp->guids[index - 1];
+ else
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct ib_pd *alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct hfi1_ibdev *dev = to_idev(ibdev);
+ struct hfi1_pd *pd;
+ struct ib_pd *ret;
+
+ /*
+ * This is actually totally arbitrary. Some correctness tests
+ * assume there's a maximum number of PDs that can be allocated.
+ * We don't actually have this limit, but we fail the test if
+ * we allow allocations of more than we report for this value.
+ */
+
+ pd = kmalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ spin_lock(&dev->n_pds_lock);
+ if (dev->n_pds_allocated == hfi1_max_pds) {
+ spin_unlock(&dev->n_pds_lock);
+ kfree(pd);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ dev->n_pds_allocated++;
+ spin_unlock(&dev->n_pds_lock);
+
+ /* ib_alloc_pd() will initialize pd->ibpd. */
+ pd->user = udata != NULL;
+
+ ret = &pd->ibpd;
+
+bail:
+ return ret;
+}
+
+static int dealloc_pd(struct ib_pd *ibpd)
+{
+ struct hfi1_pd *pd = to_ipd(ibpd);
+ struct hfi1_ibdev *dev = to_idev(ibpd->device);
+
+ spin_lock(&dev->n_pds_lock);
+ dev->n_pds_allocated--;
+ spin_unlock(&dev->n_pds_lock);
+
+ kfree(pd);
+
+ return 0;
+}
+
+/*
+ * convert ah port,sl to sc
+ */
+u8 ah_to_sc(struct ib_device *ibdev, struct ib_ah_attr *ah)
+{
+ struct hfi1_ibport *ibp = to_iport(ibdev, ah->port_num);
+
+ return ibp->sl_to_sc[ah->sl];
+}
+
+int hfi1_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
+{
+ struct hfi1_ibport *ibp;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_devdata *dd;
+ u8 sc5;
+
+ /* A multicast address requires a GRH (see ch. 8.4.1). */
+ if (ah_attr->dlid >= HFI1_MULTICAST_LID_BASE &&
+ ah_attr->dlid != HFI1_PERMISSIVE_LID &&
+ !(ah_attr->ah_flags & IB_AH_GRH))
+ goto bail;
+ if ((ah_attr->ah_flags & IB_AH_GRH) &&
+ ah_attr->grh.sgid_index >= HFI1_GUIDS_PER_PORT)
+ goto bail;
+ if (ah_attr->dlid == 0)
+ goto bail;
+ if (ah_attr->port_num < 1 ||
+ ah_attr->port_num > ibdev->phys_port_cnt)
+ goto bail;
+ if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
+ ib_rate_to_mbps(ah_attr->static_rate) < 0)
+ goto bail;
+ if (ah_attr->sl >= OPA_MAX_SLS)
+ goto bail;
+ /* test the mapping for validity */
+ ibp = to_iport(ibdev, ah_attr->port_num);
+ ppd = ppd_from_ibp(ibp);
+ sc5 = ibp->sl_to_sc[ah_attr->sl];
+ dd = dd_from_ppd(ppd);
+ if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
+ goto bail;
+ return 0;
+bail:
+ return -EINVAL;
+}
+
+/**
+ * create_ah - create an address handle
+ * @pd: the protection domain
+ * @ah_attr: the attributes of the AH
+ *
+ * This may be called from interrupt context.
+ */
+static struct ib_ah *create_ah(struct ib_pd *pd,
+ struct ib_ah_attr *ah_attr)
+{
+ struct hfi1_ah *ah;
+ struct ib_ah *ret;
+ struct hfi1_ibdev *dev = to_idev(pd->device);
+ unsigned long flags;
+
+ if (hfi1_check_ah(pd->device, ah_attr)) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+
+ ah = kmalloc(sizeof(*ah), GFP_ATOMIC);
+ if (!ah) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ spin_lock_irqsave(&dev->n_ahs_lock, flags);
+ if (dev->n_ahs_allocated == hfi1_max_ahs) {
+ spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+ kfree(ah);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ dev->n_ahs_allocated++;
+ spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+
+ /* ib_create_ah() will initialize ah->ibah. */
+ ah->attr = *ah_attr;
+ atomic_set(&ah->refcount, 0);
+
+ ret = &ah->ibah;
+
+bail:
+ return ret;
+}
+
+struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid)
+{
+ struct ib_ah_attr attr;
+ struct ib_ah *ah = ERR_PTR(-EINVAL);
+ struct hfi1_qp *qp0;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.dlid = dlid;
+ attr.port_num = ppd_from_ibp(ibp)->port;
+ rcu_read_lock();
+ qp0 = rcu_dereference(ibp->qp[0]);
+ if (qp0)
+ ah = ib_create_ah(qp0->ibqp.pd, &attr);
+ rcu_read_unlock();
+ return ah;
+}
+
+/**
+ * destroy_ah - destroy an address handle
+ * @ibah: the AH to destroy
+ *
+ * This may be called from interrupt context.
+ */
+static int destroy_ah(struct ib_ah *ibah)
+{
+ struct hfi1_ibdev *dev = to_idev(ibah->device);
+ struct hfi1_ah *ah = to_iah(ibah);
+ unsigned long flags;
+
+ if (atomic_read(&ah->refcount) != 0)
+ return -EBUSY;
+
+ spin_lock_irqsave(&dev->n_ahs_lock, flags);
+ dev->n_ahs_allocated--;
+ spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+
+ kfree(ah);
+
+ return 0;
+}
+
+static int modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+{
+ struct hfi1_ah *ah = to_iah(ibah);
+
+ if (hfi1_check_ah(ibah->device, ah_attr))
+ return -EINVAL;
+
+ ah->attr = *ah_attr;
+
+ return 0;
+}
+
+static int query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+{
+ struct hfi1_ah *ah = to_iah(ibah);
+
+ *ah_attr = ah->attr;
+
+ return 0;
+}
+
+/**
+ * hfi1_get_npkeys - return the size of the PKEY table for context 0
+ * @dd: the hfi1_ib device
+ */
+unsigned hfi1_get_npkeys(struct hfi1_devdata *dd)
+{
+ return ARRAY_SIZE(dd->pport[0].pkeys);
+}
+
+static int query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ int ret;
+
+ if (index >= hfi1_get_npkeys(dd)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ *pkey = hfi1_get_pkey(to_iport(ibdev, port), index);
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * alloc_ucontext - allocate a ucontest
+ * @ibdev: the infiniband device
+ * @udata: not used by the driver
+ */
+
+static struct ib_ucontext *alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct hfi1_ucontext *context;
+ struct ib_ucontext *ret;
+
+ context = kmalloc(sizeof(*context), GFP_KERNEL);
+ if (!context) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ ret = &context->ibucontext;
+
+bail:
+ return ret;
+}
+
+static int dealloc_ucontext(struct ib_ucontext *context)
+{
+ kfree(to_iucontext(context));
+ return 0;
+}
+
+static void init_ibport(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_ibport *ibp = &ppd->ibport_data;
+ size_t sz = ARRAY_SIZE(ibp->sl_to_sc);
+ int i;
+
+ for (i = 0; i < sz; i++) {
+ ibp->sl_to_sc[i] = i;
+ ibp->sc_to_sl[i] = i;
+ }
+
+ spin_lock_init(&ibp->lock);
+ /* Set the prefix to the default value (see ch. 4.1.1) */
+ ibp->gid_prefix = IB_DEFAULT_GID_PREFIX;
+ ibp->sm_lid = 0;
+ /* Below should only set bits defined in OPA PortInfo.CapabilityMask */
+ ibp->port_cap_flags = IB_PORT_AUTO_MIGR_SUP |
+ IB_PORT_CAP_MASK_NOTICE_SUP;
+ ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
+ ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
+ ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
+ ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
+ ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
+
+ RCU_INIT_POINTER(ibp->qp[0], NULL);
+ RCU_INIT_POINTER(ibp->qp[1], NULL);
+}
+
+static void verbs_txreq_kmem_cache_ctor(void *obj)
+{
+ struct verbs_txreq *tx = (struct verbs_txreq *)obj;
+
+ memset(tx, 0, sizeof(*tx));
+}
+
+/**
+ * hfi1_register_ib_device - register our device with the infiniband core
+ * @dd: the device data structure
+ * Return 0 if successful, errno if unsuccessful.
+ */
+int hfi1_register_ib_device(struct hfi1_devdata *dd)
+{
+ struct hfi1_ibdev *dev = &dd->verbs_dev;
+ struct ib_device *ibdev = &dev->ibdev;
+ struct hfi1_pportdata *ppd = dd->pport;
+ unsigned i, lk_tab_size;
+ int ret;
+ size_t lcpysz = IB_DEVICE_NAME_MAX;
+ u16 descq_cnt;
+
+ ret = hfi1_qp_init(dev);
+ if (ret)
+ goto err_qp_init;
+
+
+ for (i = 0; i < dd->num_pports; i++)
+ init_ibport(ppd + i);
+
+ /* Only need to initialize non-zero fields. */
+ spin_lock_init(&dev->n_pds_lock);
+ spin_lock_init(&dev->n_ahs_lock);
+ spin_lock_init(&dev->n_cqs_lock);
+ spin_lock_init(&dev->n_qps_lock);
+ spin_lock_init(&dev->n_srqs_lock);
+ spin_lock_init(&dev->n_mcast_grps_lock);
+ init_timer(&dev->mem_timer);
+ dev->mem_timer.function = mem_timer;
+ dev->mem_timer.data = (unsigned long) dev;
+
+ /*
+ * The top hfi1_lkey_table_size bits are used to index the
+ * table. The lower 8 bits can be owned by the user (copied from
+ * the LKEY). The remaining bits act as a generation number or tag.
+ */
+ spin_lock_init(&dev->lk_table.lock);
+ dev->lk_table.max = 1 << hfi1_lkey_table_size;
+ /* ensure generation is at least 4 bits (keys.c) */
+ if (hfi1_lkey_table_size > MAX_LKEY_TABLE_BITS) {
+ dd_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
+ hfi1_lkey_table_size, MAX_LKEY_TABLE_BITS);
+ hfi1_lkey_table_size = MAX_LKEY_TABLE_BITS;
+ }
+ lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
+ dev->lk_table.table = (struct hfi1_mregion __rcu **)
+ vmalloc(lk_tab_size);
+ if (dev->lk_table.table == NULL) {
+ ret = -ENOMEM;
+ goto err_lk;
+ }
+ RCU_INIT_POINTER(dev->dma_mr, NULL);
+ for (i = 0; i < dev->lk_table.max; i++)
+ RCU_INIT_POINTER(dev->lk_table.table[i], NULL);
+ INIT_LIST_HEAD(&dev->pending_mmaps);
+ spin_lock_init(&dev->pending_lock);
+ seqlock_init(&dev->iowait_lock);
+ dev->mmap_offset = PAGE_SIZE;
+ spin_lock_init(&dev->mmap_offset_lock);
+ INIT_LIST_HEAD(&dev->txwait);
+ INIT_LIST_HEAD(&dev->memwait);
+
+ descq_cnt = sdma_get_descq_cnt();
+
+ /* SLAB_HWCACHE_ALIGN for AHG */
+ dev->verbs_txreq_cache = kmem_cache_create("hfi1_vtxreq_cache",
+ sizeof(struct verbs_txreq),
+ 0, SLAB_HWCACHE_ALIGN,
+ verbs_txreq_kmem_cache_ctor);
+ if (!dev->verbs_txreq_cache) {
+ ret = -ENOMEM;
+ goto err_verbs_txreq;
+ }
+
+ /*
+ * The system image GUID is supposed to be the same for all
+ * HFIs in a single system but since there can be other
+ * device types in the system, we can't be sure this is unique.
+ */
+ if (!ib_hfi1_sys_image_guid)
+ ib_hfi1_sys_image_guid = cpu_to_be64(ppd->guid);
+ lcpysz = strlcpy(ibdev->name, class_name(), lcpysz);
+ strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz);
+ ibdev->owner = THIS_MODULE;
+ ibdev->node_guid = cpu_to_be64(ppd->guid);
+ ibdev->uverbs_abi_ver = HFI1_UVERBS_ABI_VERSION;
+ ibdev->uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
+ (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POST_SEND) |
+ (1ull << IB_USER_VERBS_CMD_POST_RECV) |
+ (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
+ ibdev->node_type = RDMA_NODE_IB_CA;
+ ibdev->phys_port_cnt = dd->num_pports;
+ ibdev->num_comp_vectors = 1;
+ ibdev->dma_device = &dd->pcidev->dev;
+ ibdev->query_device = query_device;
+ ibdev->modify_device = modify_device;
+ ibdev->query_port = query_port;
+ ibdev->modify_port = modify_port;
+ ibdev->query_pkey = query_pkey;
+ ibdev->query_gid = query_gid;
+ ibdev->alloc_ucontext = alloc_ucontext;
+ ibdev->dealloc_ucontext = dealloc_ucontext;
+ ibdev->alloc_pd = alloc_pd;
+ ibdev->dealloc_pd = dealloc_pd;
+ ibdev->create_ah = create_ah;
+ ibdev->destroy_ah = destroy_ah;
+ ibdev->modify_ah = modify_ah;
+ ibdev->query_ah = query_ah;
+ ibdev->create_srq = hfi1_create_srq;
+ ibdev->modify_srq = hfi1_modify_srq;
+ ibdev->query_srq = hfi1_query_srq;
+ ibdev->destroy_srq = hfi1_destroy_srq;
+ ibdev->create_qp = hfi1_create_qp;
+ ibdev->modify_qp = hfi1_modify_qp;
+ ibdev->query_qp = hfi1_query_qp;
+ ibdev->destroy_qp = hfi1_destroy_qp;
+ ibdev->post_send = post_send;
+ ibdev->post_recv = post_receive;
+ ibdev->post_srq_recv = hfi1_post_srq_receive;
+ ibdev->create_cq = hfi1_create_cq;
+ ibdev->destroy_cq = hfi1_destroy_cq;
+ ibdev->resize_cq = hfi1_resize_cq;
+ ibdev->poll_cq = hfi1_poll_cq;
+ ibdev->req_notify_cq = hfi1_req_notify_cq;
+ ibdev->get_dma_mr = hfi1_get_dma_mr;
+ ibdev->reg_phys_mr = hfi1_reg_phys_mr;
+ ibdev->reg_user_mr = hfi1_reg_user_mr;
+ ibdev->dereg_mr = hfi1_dereg_mr;
+ ibdev->alloc_mr = hfi1_alloc_mr;
+ ibdev->alloc_fast_reg_page_list = hfi1_alloc_fast_reg_page_list;
+ ibdev->free_fast_reg_page_list = hfi1_free_fast_reg_page_list;
+ ibdev->alloc_fmr = hfi1_alloc_fmr;
+ ibdev->map_phys_fmr = hfi1_map_phys_fmr;
+ ibdev->unmap_fmr = hfi1_unmap_fmr;
+ ibdev->dealloc_fmr = hfi1_dealloc_fmr;
+ ibdev->attach_mcast = hfi1_multicast_attach;
+ ibdev->detach_mcast = hfi1_multicast_detach;
+ ibdev->process_mad = hfi1_process_mad;
+ ibdev->mmap = hfi1_mmap;
+ ibdev->dma_ops = &hfi1_dma_mapping_ops;
+ ibdev->get_port_immutable = port_immutable;
+
+ strncpy(ibdev->node_desc, init_utsname()->nodename,
+ sizeof(ibdev->node_desc));
+
+ ret = ib_register_device(ibdev, hfi1_create_port_files);
+ if (ret)
+ goto err_reg;
+
+ ret = hfi1_create_agents(dev);
+ if (ret)
+ goto err_agents;
+
+ ret = hfi1_verbs_register_sysfs(dd);
+ if (ret)
+ goto err_class;
+
+ goto bail;
+
+err_class:
+ hfi1_free_agents(dev);
+err_agents:
+ ib_unregister_device(ibdev);
+err_reg:
+err_verbs_txreq:
+ kmem_cache_destroy(dev->verbs_txreq_cache);
+ vfree(dev->lk_table.table);
+err_lk:
+ hfi1_qp_exit(dev);
+err_qp_init:
+ dd_dev_err(dd, "cannot register verbs: %d!\n", -ret);
+bail:
+ return ret;
+}
+
+void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
+{
+ struct hfi1_ibdev *dev = &dd->verbs_dev;
+ struct ib_device *ibdev = &dev->ibdev;
+
+ hfi1_verbs_unregister_sysfs(dd);
+
+ hfi1_free_agents(dev);
+
+ ib_unregister_device(ibdev);
+
+ if (!list_empty(&dev->txwait))
+ dd_dev_err(dd, "txwait list not empty!\n");
+ if (!list_empty(&dev->memwait))
+ dd_dev_err(dd, "memwait list not empty!\n");
+ if (dev->dma_mr)
+ dd_dev_err(dd, "DMA MR not NULL!\n");
+
+ hfi1_qp_exit(dev);
+ del_timer_sync(&dev->mem_timer);
+ kmem_cache_destroy(dev->verbs_txreq_cache);
+ vfree(dev->lk_table.table);
+}
+
+/*
+ * This must be called with s_lock held.
+ */
+void hfi1_schedule_send(struct hfi1_qp *qp)
+{
+ if (hfi1_send_ok(qp)) {
+ struct hfi1_ibport *ibp =
+ to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ iowait_schedule(&qp->s_iowait, ppd->hfi1_wq);
+ }
+}
+
+void hfi1_cnp_rcv(struct hfi1_packet *packet)
+{
+ struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
+
+ if (packet->qp->ibqp.qp_type == IB_QPT_UC)
+ hfi1_uc_rcv(packet);
+ else if (packet->qp->ibqp.qp_type == IB_QPT_UD)
+ hfi1_ud_rcv(packet);
+ else
+ ibp->n_pkt_drops++;
+}
diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h
new file mode 100644
index 000000000000..ed903a93baf7
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/verbs.h
@@ -0,0 +1,1151 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef HFI1_VERBS_H
+#define HFI1_VERBS_H
+
+#include <linux/types.h>
+#include <linux/seqlock.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/kref.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/completion.h>
+#include <rdma/ib_pack.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_mad.h>
+
+struct hfi1_ctxtdata;
+struct hfi1_pportdata;
+struct hfi1_devdata;
+struct hfi1_packet;
+
+#include "iowait.h"
+
+#define HFI1_MAX_RDMA_ATOMIC 16
+#define HFI1_GUIDS_PER_PORT 5
+
+/*
+ * Increment this value if any changes that break userspace ABI
+ * compatibility are made.
+ */
+#define HFI1_UVERBS_ABI_VERSION 2
+
+/*
+ * Define an ib_cq_notify value that is not valid so we know when CQ
+ * notifications are armed.
+ */
+#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
+
+#define IB_SEQ_NAK (3 << 29)
+
+/* AETH NAK opcode values */
+#define IB_RNR_NAK 0x20
+#define IB_NAK_PSN_ERROR 0x60
+#define IB_NAK_INVALID_REQUEST 0x61
+#define IB_NAK_REMOTE_ACCESS_ERROR 0x62
+#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
+#define IB_NAK_INVALID_RD_REQUEST 0x64
+
+/* Flags for checking QP state (see ib_hfi1_state_ops[]) */
+#define HFI1_POST_SEND_OK 0x01
+#define HFI1_POST_RECV_OK 0x02
+#define HFI1_PROCESS_RECV_OK 0x04
+#define HFI1_PROCESS_SEND_OK 0x08
+#define HFI1_PROCESS_NEXT_SEND_OK 0x10
+#define HFI1_FLUSH_SEND 0x20
+#define HFI1_FLUSH_RECV 0x40
+#define HFI1_PROCESS_OR_FLUSH_SEND \
+ (HFI1_PROCESS_SEND_OK | HFI1_FLUSH_SEND)
+
+/* IB Performance Manager status values */
+#define IB_PMA_SAMPLE_STATUS_DONE 0x00
+#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
+#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
+
+/* Mandatory IB performance counter select values. */
+#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
+#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
+#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
+#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
+#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
+
+#define HFI1_VENDOR_IPG cpu_to_be16(0xFFA0)
+
+#define IB_BTH_REQ_ACK (1 << 31)
+#define IB_BTH_SOLICITED (1 << 23)
+#define IB_BTH_MIG_REQ (1 << 22)
+
+#define IB_GRH_VERSION 6
+#define IB_GRH_VERSION_MASK 0xF
+#define IB_GRH_VERSION_SHIFT 28
+#define IB_GRH_TCLASS_MASK 0xFF
+#define IB_GRH_TCLASS_SHIFT 20
+#define IB_GRH_FLOW_MASK 0xFFFFF
+#define IB_GRH_FLOW_SHIFT 0
+#define IB_GRH_NEXT_HDR 0x1B
+
+#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
+
+/* flags passed by hfi1_ib_rcv() */
+enum {
+ HFI1_HAS_GRH = (1 << 0),
+};
+
+struct ib_reth {
+ __be64 vaddr;
+ __be32 rkey;
+ __be32 length;
+} __packed;
+
+struct ib_atomic_eth {
+ __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
+ __be32 rkey;
+ __be64 swap_data;
+ __be64 compare_data;
+} __packed;
+
+union ib_ehdrs {
+ struct {
+ __be32 deth[2];
+ __be32 imm_data;
+ } ud;
+ struct {
+ struct ib_reth reth;
+ __be32 imm_data;
+ } rc;
+ struct {
+ __be32 aeth;
+ __be32 atomic_ack_eth[2];
+ } at;
+ __be32 imm_data;
+ __be32 aeth;
+ struct ib_atomic_eth atomic_eth;
+} __packed;
+
+struct hfi1_other_headers {
+ __be32 bth[3];
+ union ib_ehdrs u;
+} __packed;
+
+/*
+ * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
+ * long (72 w/ imm_data). Only the first 56 bytes of the IB header
+ * will be in the eager header buffer. The remaining 12 or 16 bytes
+ * are in the data buffer.
+ */
+struct hfi1_ib_header {
+ __be16 lrh[4];
+ union {
+ struct {
+ struct ib_grh grh;
+ struct hfi1_other_headers oth;
+ } l;
+ struct hfi1_other_headers oth;
+ } u;
+} __packed;
+
+struct ahg_ib_header {
+ struct sdma_engine *sde;
+ u32 ahgdesc[2];
+ u16 tx_flags;
+ u8 ahgcount;
+ u8 ahgidx;
+ struct hfi1_ib_header ibh;
+};
+
+struct hfi1_pio_header {
+ __le64 pbc;
+ struct hfi1_ib_header hdr;
+} __packed;
+
+/*
+ * used for force cacheline alignment for AHG
+ */
+struct tx_pio_header {
+ struct hfi1_pio_header phdr;
+} ____cacheline_aligned;
+
+/*
+ * There is one struct hfi1_mcast for each multicast GID.
+ * All attached QPs are then stored as a list of
+ * struct hfi1_mcast_qp.
+ */
+struct hfi1_mcast_qp {
+ struct list_head list;
+ struct hfi1_qp *qp;
+};
+
+struct hfi1_mcast {
+ struct rb_node rb_node;
+ union ib_gid mgid;
+ struct list_head qp_list;
+ wait_queue_head_t wait;
+ atomic_t refcount;
+ int n_attached;
+};
+
+/* Protection domain */
+struct hfi1_pd {
+ struct ib_pd ibpd;
+ int user; /* non-zero if created from user space */
+};
+
+/* Address Handle */
+struct hfi1_ah {
+ struct ib_ah ibah;
+ struct ib_ah_attr attr;
+ atomic_t refcount;
+};
+
+/*
+ * This structure is used by hfi1_mmap() to validate an offset
+ * when an mmap() request is made. The vm_area_struct then uses
+ * this as its vm_private_data.
+ */
+struct hfi1_mmap_info {
+ struct list_head pending_mmaps;
+ struct ib_ucontext *context;
+ void *obj;
+ __u64 offset;
+ struct kref ref;
+ unsigned size;
+};
+
+/*
+ * This structure is used to contain the head pointer, tail pointer,
+ * and completion queue entries as a single memory allocation so
+ * it can be mmap'ed into user space.
+ */
+struct hfi1_cq_wc {
+ u32 head; /* index of next entry to fill */
+ u32 tail; /* index of next ib_poll_cq() entry */
+ union {
+ /* these are actually size ibcq.cqe + 1 */
+ struct ib_uverbs_wc uqueue[0];
+ struct ib_wc kqueue[0];
+ };
+};
+
+/*
+ * The completion queue structure.
+ */
+struct hfi1_cq {
+ struct ib_cq ibcq;
+ struct kthread_work comptask;
+ struct hfi1_devdata *dd;
+ spinlock_t lock; /* protect changes in this struct */
+ u8 notify;
+ u8 triggered;
+ struct hfi1_cq_wc *queue;
+ struct hfi1_mmap_info *ip;
+};
+
+/*
+ * A segment is a linear region of low physical memory.
+ * Used by the verbs layer.
+ */
+struct hfi1_seg {
+ void *vaddr;
+ size_t length;
+};
+
+/* The number of hfi1_segs that fit in a page. */
+#define HFI1_SEGSZ (PAGE_SIZE / sizeof(struct hfi1_seg))
+
+struct hfi1_segarray {
+ struct hfi1_seg segs[HFI1_SEGSZ];
+};
+
+struct hfi1_mregion {
+ struct ib_pd *pd; /* shares refcnt of ibmr.pd */
+ u64 user_base; /* User's address for this region */
+ u64 iova; /* IB start address of this region */
+ size_t length;
+ u32 lkey;
+ u32 offset; /* offset (bytes) to start of region */
+ int access_flags;
+ u32 max_segs; /* number of hfi1_segs in all the arrays */
+ u32 mapsz; /* size of the map array */
+ u8 page_shift; /* 0 - non unform/non powerof2 sizes */
+ u8 lkey_published; /* in global table */
+ struct completion comp; /* complete when refcount goes to zero */
+ atomic_t refcount;
+ struct hfi1_segarray *map[0]; /* the segments */
+};
+
+/*
+ * These keep track of the copy progress within a memory region.
+ * Used by the verbs layer.
+ */
+struct hfi1_sge {
+ struct hfi1_mregion *mr;
+ void *vaddr; /* kernel virtual address of segment */
+ u32 sge_length; /* length of the SGE */
+ u32 length; /* remaining length of the segment */
+ u16 m; /* current index: mr->map[m] */
+ u16 n; /* current index: mr->map[m]->segs[n] */
+};
+
+/* Memory region */
+struct hfi1_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ struct hfi1_mregion mr; /* must be last */
+};
+
+/*
+ * Send work request queue entry.
+ * The size of the sg_list is determined when the QP is created and stored
+ * in qp->s_max_sge.
+ */
+struct hfi1_swqe {
+ struct ib_send_wr wr; /* don't use wr.sg_list */
+ u32 psn; /* first packet sequence number */
+ u32 lpsn; /* last packet sequence number */
+ u32 ssn; /* send sequence number */
+ u32 length; /* total length of data in sg_list */
+ struct hfi1_sge sg_list[0];
+};
+
+/*
+ * Receive work request queue entry.
+ * The size of the sg_list is determined when the QP (or SRQ) is created
+ * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
+ */
+struct hfi1_rwqe {
+ u64 wr_id;
+ u8 num_sge;
+ struct ib_sge sg_list[0];
+};
+
+/*
+ * This structure is used to contain the head pointer, tail pointer,
+ * and receive work queue entries as a single memory allocation so
+ * it can be mmap'ed into user space.
+ * Note that the wq array elements are variable size so you can't
+ * just index into the array to get the N'th element;
+ * use get_rwqe_ptr() instead.
+ */
+struct hfi1_rwq {
+ u32 head; /* new work requests posted to the head */
+ u32 tail; /* receives pull requests from here. */
+ struct hfi1_rwqe wq[0];
+};
+
+struct hfi1_rq {
+ struct hfi1_rwq *wq;
+ u32 size; /* size of RWQE array */
+ u8 max_sge;
+ /* protect changes in this struct */
+ spinlock_t lock ____cacheline_aligned_in_smp;
+};
+
+struct hfi1_srq {
+ struct ib_srq ibsrq;
+ struct hfi1_rq rq;
+ struct hfi1_mmap_info *ip;
+ /* send signal when number of RWQEs < limit */
+ u32 limit;
+};
+
+struct hfi1_sge_state {
+ struct hfi1_sge *sg_list; /* next SGE to be used if any */
+ struct hfi1_sge sge; /* progress state for the current SGE */
+ u32 total_len;
+ u8 num_sge;
+};
+
+/*
+ * This structure holds the information that the send tasklet needs
+ * to send a RDMA read response or atomic operation.
+ */
+struct hfi1_ack_entry {
+ u8 opcode;
+ u8 sent;
+ u32 psn;
+ u32 lpsn;
+ union {
+ struct hfi1_sge rdma_sge;
+ u64 atomic_data;
+ };
+};
+
+/*
+ * Variables prefixed with s_ are for the requester (sender).
+ * Variables prefixed with r_ are for the responder (receiver).
+ * Variables prefixed with ack_ are for responder replies.
+ *
+ * Common variables are protected by both r_rq.lock and s_lock in that order
+ * which only happens in modify_qp() or changing the QP 'state'.
+ */
+struct hfi1_qp {
+ struct ib_qp ibqp;
+ /* read mostly fields above and below */
+ struct ib_ah_attr remote_ah_attr;
+ struct ib_ah_attr alt_ah_attr;
+ struct hfi1_qp __rcu *next; /* link list for QPN hash table */
+ struct hfi1_swqe *s_wq; /* send work queue */
+ struct hfi1_mmap_info *ip;
+ struct ahg_ib_header *s_hdr; /* next packet header to send */
+ u8 s_sc; /* SC[0..4] for next packet */
+ unsigned long timeout_jiffies; /* computed from timeout */
+
+ enum ib_mtu path_mtu;
+ int srate_mbps; /* s_srate (below) converted to Mbit/s */
+ u32 remote_qpn;
+ u32 pmtu; /* decoded from path_mtu */
+ u32 qkey; /* QKEY for this QP (for UD or RD) */
+ u32 s_size; /* send work queue size */
+ u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
+ u32 s_ahgpsn; /* set to the psn in the copy of the header */
+
+ u8 state; /* QP state */
+ u8 allowed_ops; /* high order bits of allowed opcodes */
+ u8 qp_access_flags;
+ u8 alt_timeout; /* Alternate path timeout for this QP */
+ u8 timeout; /* Timeout for this QP */
+ u8 s_srate;
+ u8 s_mig_state;
+ u8 port_num;
+ u8 s_pkey_index; /* PKEY index to use */
+ u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
+ u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
+ u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
+ u8 s_retry_cnt; /* number of times to retry */
+ u8 s_rnr_retry_cnt;
+ u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
+ u8 s_max_sge; /* size of s_wq->sg_list */
+ u8 s_draining;
+
+ /* start of read/write fields */
+ atomic_t refcount ____cacheline_aligned_in_smp;
+ wait_queue_head_t wait;
+
+
+ struct hfi1_ack_entry s_ack_queue[HFI1_MAX_RDMA_ATOMIC + 1]
+ ____cacheline_aligned_in_smp;
+ struct hfi1_sge_state s_rdma_read_sge;
+
+ spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
+ unsigned long r_aflags;
+ u64 r_wr_id; /* ID for current receive WQE */
+ u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
+ u32 r_len; /* total length of r_sge */
+ u32 r_rcv_len; /* receive data len processed */
+ u32 r_psn; /* expected rcv packet sequence number */
+ u32 r_msn; /* message sequence number */
+
+ u8 r_state; /* opcode of last packet received */
+ u8 r_flags;
+ u8 r_head_ack_queue; /* index into s_ack_queue[] */
+
+ struct list_head rspwait; /* link for waiting to respond */
+
+ struct hfi1_sge_state r_sge; /* current receive data */
+ struct hfi1_rq r_rq; /* receive work queue */
+
+ spinlock_t s_lock ____cacheline_aligned_in_smp;
+ struct hfi1_sge_state *s_cur_sge;
+ u32 s_flags;
+ struct hfi1_swqe *s_wqe;
+ struct hfi1_sge_state s_sge; /* current send request data */
+ struct hfi1_mregion *s_rdma_mr;
+ struct sdma_engine *s_sde; /* current sde */
+ u32 s_cur_size; /* size of send packet in bytes */
+ u32 s_len; /* total length of s_sge */
+ u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
+ u32 s_next_psn; /* PSN for next request */
+ u32 s_last_psn; /* last response PSN processed */
+ u32 s_sending_psn; /* lowest PSN that is being sent */
+ u32 s_sending_hpsn; /* highest PSN that is being sent */
+ u32 s_psn; /* current packet sequence number */
+ u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
+ u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
+ u32 s_head; /* new entries added here */
+ u32 s_tail; /* next entry to process */
+ u32 s_cur; /* current work queue entry */
+ u32 s_acked; /* last un-ACK'ed entry */
+ u32 s_last; /* last completed entry */
+ u32 s_ssn; /* SSN of tail entry */
+ u32 s_lsn; /* limit sequence number (credit) */
+ u16 s_hdrwords; /* size of s_hdr in 32 bit words */
+ u16 s_rdma_ack_cnt;
+ s8 s_ahgidx;
+ u8 s_state; /* opcode of last packet sent */
+ u8 s_ack_state; /* opcode of packet to ACK */
+ u8 s_nak_state; /* non-zero if NAK is pending */
+ u8 r_nak_state; /* non-zero if NAK is pending */
+ u8 s_retry; /* requester retry counter */
+ u8 s_rnr_retry; /* requester RNR retry counter */
+ u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
+ u8 s_tail_ack_queue; /* index into s_ack_queue[] */
+
+ struct hfi1_sge_state s_ack_rdma_sge;
+ struct timer_list s_timer;
+
+ struct iowait s_iowait;
+
+ struct hfi1_sge r_sg_list[0] /* verified SGEs */
+ ____cacheline_aligned_in_smp;
+};
+
+/*
+ * Atomic bit definitions for r_aflags.
+ */
+#define HFI1_R_WRID_VALID 0
+#define HFI1_R_REWIND_SGE 1
+
+/*
+ * Bit definitions for r_flags.
+ */
+#define HFI1_R_REUSE_SGE 0x01
+#define HFI1_R_RDMAR_SEQ 0x02
+#define HFI1_R_RSP_NAK 0x04
+#define HFI1_R_RSP_SEND 0x08
+#define HFI1_R_COMM_EST 0x10
+
+/*
+ * Bit definitions for s_flags.
+ *
+ * HFI1_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
+ * HFI1_S_BUSY - send tasklet is processing the QP
+ * HFI1_S_TIMER - the RC retry timer is active
+ * HFI1_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
+ * HFI1_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
+ * before processing the next SWQE
+ * HFI1_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
+ * before processing the next SWQE
+ * HFI1_S_WAIT_RNR - waiting for RNR timeout
+ * HFI1_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
+ * HFI1_S_WAIT_DMA - waiting for send DMA queue to drain before generating
+ * next send completion entry not via send DMA
+ * HFI1_S_WAIT_PIO - waiting for a send buffer to be available
+ * HFI1_S_WAIT_TX - waiting for a struct verbs_txreq to be available
+ * HFI1_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
+ * HFI1_S_WAIT_KMEM - waiting for kernel memory to be available
+ * HFI1_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
+ * HFI1_S_WAIT_ACK - waiting for an ACK packet before sending more requests
+ * HFI1_S_SEND_ONE - send one packet, request ACK, then wait for ACK
+ * HFI1_S_ECN - a BECN was queued to the send engine
+ */
+#define HFI1_S_SIGNAL_REQ_WR 0x0001
+#define HFI1_S_BUSY 0x0002
+#define HFI1_S_TIMER 0x0004
+#define HFI1_S_RESP_PENDING 0x0008
+#define HFI1_S_ACK_PENDING 0x0010
+#define HFI1_S_WAIT_FENCE 0x0020
+#define HFI1_S_WAIT_RDMAR 0x0040
+#define HFI1_S_WAIT_RNR 0x0080
+#define HFI1_S_WAIT_SSN_CREDIT 0x0100
+#define HFI1_S_WAIT_DMA 0x0200
+#define HFI1_S_WAIT_PIO 0x0400
+#define HFI1_S_WAIT_TX 0x0800
+#define HFI1_S_WAIT_DMA_DESC 0x1000
+#define HFI1_S_WAIT_KMEM 0x2000
+#define HFI1_S_WAIT_PSN 0x4000
+#define HFI1_S_WAIT_ACK 0x8000
+#define HFI1_S_SEND_ONE 0x10000
+#define HFI1_S_UNLIMITED_CREDIT 0x20000
+#define HFI1_S_AHG_VALID 0x40000
+#define HFI1_S_AHG_CLEAR 0x80000
+#define HFI1_S_ECN 0x100000
+
+/*
+ * Wait flags that would prevent any packet type from being sent.
+ */
+#define HFI1_S_ANY_WAIT_IO (HFI1_S_WAIT_PIO | HFI1_S_WAIT_TX | \
+ HFI1_S_WAIT_DMA_DESC | HFI1_S_WAIT_KMEM)
+
+/*
+ * Wait flags that would prevent send work requests from making progress.
+ */
+#define HFI1_S_ANY_WAIT_SEND (HFI1_S_WAIT_FENCE | HFI1_S_WAIT_RDMAR | \
+ HFI1_S_WAIT_RNR | HFI1_S_WAIT_SSN_CREDIT | HFI1_S_WAIT_DMA | \
+ HFI1_S_WAIT_PSN | HFI1_S_WAIT_ACK)
+
+#define HFI1_S_ANY_WAIT (HFI1_S_ANY_WAIT_IO | HFI1_S_ANY_WAIT_SEND)
+
+#define HFI1_PSN_CREDIT 16
+
+/*
+ * Since struct hfi1_swqe is not a fixed size, we can't simply index into
+ * struct hfi1_qp.s_wq. This function does the array index computation.
+ */
+static inline struct hfi1_swqe *get_swqe_ptr(struct hfi1_qp *qp,
+ unsigned n)
+{
+ return (struct hfi1_swqe *)((char *)qp->s_wq +
+ (sizeof(struct hfi1_swqe) +
+ qp->s_max_sge *
+ sizeof(struct hfi1_sge)) * n);
+}
+
+/*
+ * Since struct hfi1_rwqe is not a fixed size, we can't simply index into
+ * struct hfi1_rwq.wq. This function does the array index computation.
+ */
+static inline struct hfi1_rwqe *get_rwqe_ptr(struct hfi1_rq *rq, unsigned n)
+{
+ return (struct hfi1_rwqe *)
+ ((char *) rq->wq->wq +
+ (sizeof(struct hfi1_rwqe) +
+ rq->max_sge * sizeof(struct ib_sge)) * n);
+}
+
+#define MAX_LKEY_TABLE_BITS 23
+
+struct hfi1_lkey_table {
+ spinlock_t lock; /* protect changes in this struct */
+ u32 next; /* next unused index (speeds search) */
+ u32 gen; /* generation count */
+ u32 max; /* size of the table */
+ struct hfi1_mregion __rcu **table;
+};
+
+struct hfi1_opcode_stats {
+ u64 n_packets; /* number of packets */
+ u64 n_bytes; /* total number of bytes */
+};
+
+struct hfi1_opcode_stats_perctx {
+ struct hfi1_opcode_stats stats[256];
+};
+
+static inline void inc_opstats(
+ u32 tlen,
+ struct hfi1_opcode_stats *stats)
+{
+#ifdef CONFIG_DEBUG_FS
+ stats->n_bytes += tlen;
+ stats->n_packets++;
+#endif
+}
+
+struct hfi1_ibport {
+ struct hfi1_qp __rcu *qp[2];
+ struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
+ struct hfi1_ah *sm_ah;
+ struct hfi1_ah *smi_ah;
+ struct rb_root mcast_tree;
+ spinlock_t lock; /* protect changes in this struct */
+
+ /* non-zero when timer is set */
+ unsigned long mkey_lease_timeout;
+ unsigned long trap_timeout;
+ __be64 gid_prefix; /* in network order */
+ __be64 mkey;
+ __be64 guids[HFI1_GUIDS_PER_PORT - 1]; /* writable GUIDs */
+ u64 tid; /* TID for traps */
+ u64 n_rc_resends;
+ u64 n_seq_naks;
+ u64 n_rdma_seq;
+ u64 n_rnr_naks;
+ u64 n_other_naks;
+ u64 n_loop_pkts;
+ u64 n_pkt_drops;
+ u64 n_vl15_dropped;
+ u64 n_rc_timeouts;
+ u64 n_dmawait;
+ u64 n_unaligned;
+ u64 n_rc_dupreq;
+ u64 n_rc_seqnak;
+
+ /* Hot-path per CPU counters to avoid cacheline trading to update */
+ u64 z_rc_acks;
+ u64 z_rc_qacks;
+ u64 z_rc_delayed_comp;
+ u64 __percpu *rc_acks;
+ u64 __percpu *rc_qacks;
+ u64 __percpu *rc_delayed_comp;
+
+ u32 port_cap_flags;
+ u32 pma_sample_start;
+ u32 pma_sample_interval;
+ __be16 pma_counter_select[5];
+ u16 pma_tag;
+ u16 pkey_violations;
+ u16 qkey_violations;
+ u16 mkey_violations;
+ u16 mkey_lease_period;
+ u16 sm_lid;
+ u16 repress_traps;
+ u8 sm_sl;
+ u8 mkeyprot;
+ u8 subnet_timeout;
+ u8 vl_high_limit;
+ /* the first 16 entries are sl_to_vl for !OPA */
+ u8 sl_to_sc[32];
+ u8 sc_to_sl[32];
+};
+
+
+struct hfi1_qp_ibdev;
+struct hfi1_ibdev {
+ struct ib_device ibdev;
+ struct list_head pending_mmaps;
+ spinlock_t mmap_offset_lock; /* protect mmap_offset */
+ u32 mmap_offset;
+ struct hfi1_mregion __rcu *dma_mr;
+
+ struct hfi1_qp_ibdev *qp_dev;
+
+ /* QP numbers are shared by all IB ports */
+ struct hfi1_lkey_table lk_table;
+ /* protect wait lists */
+ seqlock_t iowait_lock;
+ struct list_head txwait; /* list for wait verbs_txreq */
+ struct list_head memwait; /* list for wait kernel memory */
+ struct list_head txreq_free;
+ struct kmem_cache *verbs_txreq_cache;
+ struct timer_list mem_timer;
+
+ /* other waiters */
+ spinlock_t pending_lock;
+
+ u64 n_piowait;
+ u64 n_txwait;
+ u64 n_kmem_wait;
+
+ u32 n_pds_allocated; /* number of PDs allocated for device */
+ spinlock_t n_pds_lock;
+ u32 n_ahs_allocated; /* number of AHs allocated for device */
+ spinlock_t n_ahs_lock;
+ u32 n_cqs_allocated; /* number of CQs allocated for device */
+ spinlock_t n_cqs_lock;
+ u32 n_qps_allocated; /* number of QPs allocated for device */
+ spinlock_t n_qps_lock;
+ u32 n_srqs_allocated; /* number of SRQs allocated for device */
+ spinlock_t n_srqs_lock;
+ u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
+ spinlock_t n_mcast_grps_lock;
+#ifdef CONFIG_DEBUG_FS
+ /* per HFI debugfs */
+ struct dentry *hfi1_ibdev_dbg;
+ /* per HFI symlinks to above */
+ struct dentry *hfi1_ibdev_link;
+#endif
+};
+
+struct hfi1_verbs_counters {
+ u64 symbol_error_counter;
+ u64 link_error_recovery_counter;
+ u64 link_downed_counter;
+ u64 port_rcv_errors;
+ u64 port_rcv_remphys_errors;
+ u64 port_xmit_discards;
+ u64 port_xmit_data;
+ u64 port_rcv_data;
+ u64 port_xmit_packets;
+ u64 port_rcv_packets;
+ u32 local_link_integrity_errors;
+ u32 excessive_buffer_overrun_errors;
+ u32 vl15_dropped;
+};
+
+static inline struct hfi1_mr *to_imr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct hfi1_mr, ibmr);
+}
+
+static inline struct hfi1_pd *to_ipd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct hfi1_pd, ibpd);
+}
+
+static inline struct hfi1_ah *to_iah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct hfi1_ah, ibah);
+}
+
+static inline struct hfi1_cq *to_icq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct hfi1_cq, ibcq);
+}
+
+static inline struct hfi1_srq *to_isrq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct hfi1_srq, ibsrq);
+}
+
+static inline struct hfi1_qp *to_iqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct hfi1_qp, ibqp);
+}
+
+static inline struct hfi1_ibdev *to_idev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct hfi1_ibdev, ibdev);
+}
+
+/*
+ * Send if not busy or waiting for I/O and either
+ * a RC response is pending or we can process send work requests.
+ */
+static inline int hfi1_send_ok(struct hfi1_qp *qp)
+{
+ return !(qp->s_flags & (HFI1_S_BUSY | HFI1_S_ANY_WAIT_IO)) &&
+ (qp->s_hdrwords || (qp->s_flags & HFI1_S_RESP_PENDING) ||
+ !(qp->s_flags & HFI1_S_ANY_WAIT_SEND));
+}
+
+/*
+ * This must be called with s_lock held.
+ */
+void hfi1_schedule_send(struct hfi1_qp *qp);
+void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
+ u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
+void hfi1_cap_mask_chg(struct hfi1_ibport *ibp);
+void hfi1_sys_guid_chg(struct hfi1_ibport *ibp);
+void hfi1_node_desc_chg(struct hfi1_ibport *ibp);
+int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *in_mad, size_t in_mad_size,
+ struct ib_mad_hdr *out_mad, size_t *out_mad_size,
+ u16 *out_mad_pkey_index);
+int hfi1_create_agents(struct hfi1_ibdev *dev);
+void hfi1_free_agents(struct hfi1_ibdev *dev);
+
+/*
+ * The PSN_MASK and PSN_SHIFT allow for
+ * 1) comparing two PSNs
+ * 2) returning the PSN with any upper bits masked
+ * 3) returning the difference between to PSNs
+ *
+ * The number of significant bits in the PSN must
+ * necessarily be at least one bit less than
+ * the container holding the PSN.
+ */
+#ifndef CONFIG_HFI1_VERBS_31BIT_PSN
+#define PSN_MASK 0xFFFFFF
+#define PSN_SHIFT 8
+#else
+#define PSN_MASK 0x7FFFFFFF
+#define PSN_SHIFT 1
+#endif
+#define PSN_MODIFY_MASK 0xFFFFFF
+
+/* Number of bits to pay attention to in the opcode for checking qp type */
+#define OPCODE_QP_MASK 0xE0
+
+/*
+ * Compare the lower 24 bits of the msn values.
+ * Returns an integer <, ==, or > than zero.
+ */
+static inline int cmp_msn(u32 a, u32 b)
+{
+ return (((int) a) - ((int) b)) << 8;
+}
+
+/*
+ * Compare two PSNs
+ * Returns an integer <, ==, or > than zero.
+ */
+static inline int cmp_psn(u32 a, u32 b)
+{
+ return (((int) a) - ((int) b)) << PSN_SHIFT;
+}
+
+/*
+ * Return masked PSN
+ */
+static inline u32 mask_psn(u32 a)
+{
+ return a & PSN_MASK;
+}
+
+/*
+ * Return delta between two PSNs
+ */
+static inline u32 delta_psn(u32 a, u32 b)
+{
+ return (((int)a - (int)b) << PSN_SHIFT) >> PSN_SHIFT;
+}
+
+struct hfi1_mcast *hfi1_mcast_find(struct hfi1_ibport *ibp, union ib_gid *mgid);
+
+int hfi1_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+
+int hfi1_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+
+int hfi1_mcast_tree_empty(struct hfi1_ibport *ibp);
+
+struct verbs_txreq;
+void hfi1_put_txreq(struct verbs_txreq *tx);
+
+int hfi1_verbs_send(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
+ u32 hdrwords, struct hfi1_sge_state *ss, u32 len);
+
+void hfi1_copy_sge(struct hfi1_sge_state *ss, void *data, u32 length,
+ int release);
+
+void hfi1_skip_sge(struct hfi1_sge_state *ss, u32 length, int release);
+
+void hfi1_cnp_rcv(struct hfi1_packet *packet);
+
+void hfi1_uc_rcv(struct hfi1_packet *packet);
+
+void hfi1_rc_rcv(struct hfi1_packet *packet);
+
+void hfi1_rc_hdrerr(
+ struct hfi1_ctxtdata *rcd,
+ struct hfi1_ib_header *hdr,
+ u32 rcv_flags,
+ struct hfi1_qp *qp);
+
+u8 ah_to_sc(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
+
+int hfi1_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
+
+struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid);
+
+void hfi1_rc_rnr_retry(unsigned long arg);
+
+void hfi1_rc_send_complete(struct hfi1_qp *qp, struct hfi1_ib_header *hdr);
+
+void hfi1_rc_error(struct hfi1_qp *qp, enum ib_wc_status err);
+
+void hfi1_ud_rcv(struct hfi1_packet *packet);
+
+int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey);
+
+int hfi1_alloc_lkey(struct hfi1_mregion *mr, int dma_region);
+
+void hfi1_free_lkey(struct hfi1_mregion *mr);
+
+int hfi1_lkey_ok(struct hfi1_lkey_table *rkt, struct hfi1_pd *pd,
+ struct hfi1_sge *isge, struct ib_sge *sge, int acc);
+
+int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge,
+ u32 len, u64 vaddr, u32 rkey, int acc);
+
+int hfi1_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+
+struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
+
+int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask,
+ struct ib_udata *udata);
+
+int hfi1_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
+
+int hfi1_destroy_srq(struct ib_srq *ibsrq);
+
+int hfi1_cq_init(struct hfi1_devdata *dd);
+
+void hfi1_cq_exit(struct hfi1_devdata *dd);
+
+void hfi1_cq_enter(struct hfi1_cq *cq, struct ib_wc *entry, int sig);
+
+int hfi1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
+
+struct ib_cq *hfi1_create_cq(
+ struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+
+int hfi1_destroy_cq(struct ib_cq *ibcq);
+
+int hfi1_req_notify_cq(
+ struct ib_cq *ibcq,
+ enum ib_cq_notify_flags notify_flags);
+
+int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
+
+struct ib_mr *hfi1_get_dma_mr(struct ib_pd *pd, int acc);
+
+struct ib_mr *hfi1_reg_phys_mr(struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf, int acc, u64 *iova_start);
+
+struct ib_mr *hfi1_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_udata *udata);
+
+int hfi1_dereg_mr(struct ib_mr *ibmr);
+
+struct ib_mr *hfi1_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_entries);
+
+struct ib_fast_reg_page_list *hfi1_alloc_fast_reg_page_list(
+ struct ib_device *ibdev, int page_list_len);
+
+void hfi1_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl);
+
+int hfi1_fast_reg_mr(struct hfi1_qp *qp, struct ib_send_wr *wr);
+
+struct ib_fmr *hfi1_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
+ struct ib_fmr_attr *fmr_attr);
+
+int hfi1_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+ int list_len, u64 iova);
+
+int hfi1_unmap_fmr(struct list_head *fmr_list);
+
+int hfi1_dealloc_fmr(struct ib_fmr *ibfmr);
+
+static inline void hfi1_get_mr(struct hfi1_mregion *mr)
+{
+ atomic_inc(&mr->refcount);
+}
+
+static inline void hfi1_put_mr(struct hfi1_mregion *mr)
+{
+ if (unlikely(atomic_dec_and_test(&mr->refcount)))
+ complete(&mr->comp);
+}
+
+static inline void hfi1_put_ss(struct hfi1_sge_state *ss)
+{
+ while (ss->num_sge) {
+ hfi1_put_mr(ss->sge.mr);
+ if (--ss->num_sge)
+ ss->sge = *ss->sg_list++;
+ }
+}
+
+void hfi1_release_mmap_info(struct kref *ref);
+
+struct hfi1_mmap_info *hfi1_create_mmap_info(struct hfi1_ibdev *dev, u32 size,
+ struct ib_ucontext *context,
+ void *obj);
+
+void hfi1_update_mmap_info(struct hfi1_ibdev *dev, struct hfi1_mmap_info *ip,
+ u32 size, void *obj);
+
+int hfi1_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+
+int hfi1_get_rwqe(struct hfi1_qp *qp, int wr_id_only);
+
+void hfi1_migrate_qp(struct hfi1_qp *qp);
+
+int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
+ int has_grh, struct hfi1_qp *qp, u32 bth0);
+
+u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
+ struct ib_global_route *grh, u32 hwords, u32 nwords);
+
+void clear_ahg(struct hfi1_qp *qp);
+
+void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr,
+ u32 bth0, u32 bth2, int middle);
+
+void hfi1_do_send(struct work_struct *work);
+
+void hfi1_send_complete(struct hfi1_qp *qp, struct hfi1_swqe *wqe,
+ enum ib_wc_status status);
+
+void hfi1_send_rc_ack(struct hfi1_ctxtdata *, struct hfi1_qp *qp, int is_fecn);
+
+int hfi1_make_rc_req(struct hfi1_qp *qp);
+
+int hfi1_make_uc_req(struct hfi1_qp *qp);
+
+int hfi1_make_ud_req(struct hfi1_qp *qp);
+
+int hfi1_register_ib_device(struct hfi1_devdata *);
+
+void hfi1_unregister_ib_device(struct hfi1_devdata *);
+
+void hfi1_ib_rcv(struct hfi1_packet *packet);
+
+unsigned hfi1_get_npkeys(struct hfi1_devdata *);
+
+int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct ahg_ib_header *hdr,
+ u32 hdrwords, struct hfi1_sge_state *ss, u32 len,
+ u32 plen, u32 dwords, u64 pbc);
+
+int hfi1_verbs_send_pio(struct hfi1_qp *qp, struct ahg_ib_header *hdr,
+ u32 hdrwords, struct hfi1_sge_state *ss, u32 len,
+ u32 plen, u32 dwords, u64 pbc);
+
+struct send_context *qp_to_send_context(struct hfi1_qp *qp, u8 sc5);
+
+extern const enum ib_wc_opcode ib_hfi1_wc_opcode[];
+
+extern const u8 hdr_len_by_opcode[];
+
+extern const int ib_hfi1_state_ops[];
+
+extern __be64 ib_hfi1_sys_image_guid; /* in network order */
+
+extern unsigned int hfi1_lkey_table_size;
+
+extern unsigned int hfi1_max_cqes;
+
+extern unsigned int hfi1_max_cqs;
+
+extern unsigned int hfi1_max_qp_wrs;
+
+extern unsigned int hfi1_max_qps;
+
+extern unsigned int hfi1_max_sges;
+
+extern unsigned int hfi1_max_mcast_grps;
+
+extern unsigned int hfi1_max_mcast_qp_attached;
+
+extern unsigned int hfi1_max_srqs;
+
+extern unsigned int hfi1_max_srq_sges;
+
+extern unsigned int hfi1_max_srq_wrs;
+
+extern const u32 ib_hfi1_rnr_table[];
+
+extern struct ib_dma_mapping_ops hfi1_dma_mapping_ops;
+
+#endif /* HFI1_VERBS_H */
diff --git a/drivers/staging/rdma/hfi1/verbs_mcast.c b/drivers/staging/rdma/hfi1/verbs_mcast.c
new file mode 100644
index 000000000000..afc6b4c61a1d
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/verbs_mcast.c
@@ -0,0 +1,385 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/rculist.h>
+
+#include "hfi.h"
+
+/**
+ * mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
+ * @qp: the QP to link
+ */
+static struct hfi1_mcast_qp *mcast_qp_alloc(struct hfi1_qp *qp)
+{
+ struct hfi1_mcast_qp *mqp;
+
+ mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
+ if (!mqp)
+ goto bail;
+
+ mqp->qp = qp;
+ atomic_inc(&qp->refcount);
+
+bail:
+ return mqp;
+}
+
+static void mcast_qp_free(struct hfi1_mcast_qp *mqp)
+{
+ struct hfi1_qp *qp = mqp->qp;
+
+ /* Notify hfi1_destroy_qp() if it is waiting. */
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+
+ kfree(mqp);
+}
+
+/**
+ * mcast_alloc - allocate the multicast GID structure
+ * @mgid: the multicast GID
+ *
+ * A list of QPs will be attached to this structure.
+ */
+static struct hfi1_mcast *mcast_alloc(union ib_gid *mgid)
+{
+ struct hfi1_mcast *mcast;
+
+ mcast = kmalloc(sizeof(*mcast), GFP_KERNEL);
+ if (!mcast)
+ goto bail;
+
+ mcast->mgid = *mgid;
+ INIT_LIST_HEAD(&mcast->qp_list);
+ init_waitqueue_head(&mcast->wait);
+ atomic_set(&mcast->refcount, 0);
+ mcast->n_attached = 0;
+
+bail:
+ return mcast;
+}
+
+static void mcast_free(struct hfi1_mcast *mcast)
+{
+ struct hfi1_mcast_qp *p, *tmp;
+
+ list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
+ mcast_qp_free(p);
+
+ kfree(mcast);
+}
+
+/**
+ * hfi1_mcast_find - search the global table for the given multicast GID
+ * @ibp: the IB port structure
+ * @mgid: the multicast GID to search for
+ *
+ * Returns NULL if not found.
+ *
+ * The caller is responsible for decrementing the reference count if found.
+ */
+struct hfi1_mcast *hfi1_mcast_find(struct hfi1_ibport *ibp, union ib_gid *mgid)
+{
+ struct rb_node *n;
+ unsigned long flags;
+ struct hfi1_mcast *mcast;
+
+ spin_lock_irqsave(&ibp->lock, flags);
+ n = ibp->mcast_tree.rb_node;
+ while (n) {
+ int ret;
+
+ mcast = rb_entry(n, struct hfi1_mcast, rb_node);
+
+ ret = memcmp(mgid->raw, mcast->mgid.raw,
+ sizeof(union ib_gid));
+ if (ret < 0)
+ n = n->rb_left;
+ else if (ret > 0)
+ n = n->rb_right;
+ else {
+ atomic_inc(&mcast->refcount);
+ spin_unlock_irqrestore(&ibp->lock, flags);
+ goto bail;
+ }
+ }
+ spin_unlock_irqrestore(&ibp->lock, flags);
+
+ mcast = NULL;
+
+bail:
+ return mcast;
+}
+
+/**
+ * mcast_add - insert mcast GID into table and attach QP struct
+ * @mcast: the mcast GID table
+ * @mqp: the QP to attach
+ *
+ * Return zero if both were added. Return EEXIST if the GID was already in
+ * the table but the QP was added. Return ESRCH if the QP was already
+ * attached and neither structure was added.
+ */
+static int mcast_add(struct hfi1_ibdev *dev, struct hfi1_ibport *ibp,
+ struct hfi1_mcast *mcast, struct hfi1_mcast_qp *mqp)
+{
+ struct rb_node **n = &ibp->mcast_tree.rb_node;
+ struct rb_node *pn = NULL;
+ int ret;
+
+ spin_lock_irq(&ibp->lock);
+
+ while (*n) {
+ struct hfi1_mcast *tmcast;
+ struct hfi1_mcast_qp *p;
+
+ pn = *n;
+ tmcast = rb_entry(pn, struct hfi1_mcast, rb_node);
+
+ ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
+ sizeof(union ib_gid));
+ if (ret < 0) {
+ n = &pn->rb_left;
+ continue;
+ }
+ if (ret > 0) {
+ n = &pn->rb_right;
+ continue;
+ }
+
+ /* Search the QP list to see if this is already there. */
+ list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
+ if (p->qp == mqp->qp) {
+ ret = ESRCH;
+ goto bail;
+ }
+ }
+ if (tmcast->n_attached == hfi1_max_mcast_qp_attached) {
+ ret = ENOMEM;
+ goto bail;
+ }
+
+ tmcast->n_attached++;
+
+ list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
+ ret = EEXIST;
+ goto bail;
+ }
+
+ spin_lock(&dev->n_mcast_grps_lock);
+ if (dev->n_mcast_grps_allocated == hfi1_max_mcast_grps) {
+ spin_unlock(&dev->n_mcast_grps_lock);
+ ret = ENOMEM;
+ goto bail;
+ }
+
+ dev->n_mcast_grps_allocated++;
+ spin_unlock(&dev->n_mcast_grps_lock);
+
+ mcast->n_attached++;
+
+ list_add_tail_rcu(&mqp->list, &mcast->qp_list);
+
+ atomic_inc(&mcast->refcount);
+ rb_link_node(&mcast->rb_node, pn, n);
+ rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
+
+ ret = 0;
+
+bail:
+ spin_unlock_irq(&ibp->lock);
+
+ return ret;
+}
+
+int hfi1_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ struct hfi1_qp *qp = to_iqp(ibqp);
+ struct hfi1_ibdev *dev = to_idev(ibqp->device);
+ struct hfi1_ibport *ibp;
+ struct hfi1_mcast *mcast;
+ struct hfi1_mcast_qp *mqp;
+ int ret;
+
+ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /*
+ * Allocate data structures since its better to do this outside of
+ * spin locks and it will most likely be needed.
+ */
+ mcast = mcast_alloc(gid);
+ if (mcast == NULL) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+ mqp = mcast_qp_alloc(qp);
+ if (mqp == NULL) {
+ mcast_free(mcast);
+ ret = -ENOMEM;
+ goto bail;
+ }
+ ibp = to_iport(ibqp->device, qp->port_num);
+ switch (mcast_add(dev, ibp, mcast, mqp)) {
+ case ESRCH:
+ /* Neither was used: OK to attach the same QP twice. */
+ mcast_qp_free(mqp);
+ mcast_free(mcast);
+ break;
+
+ case EEXIST: /* The mcast wasn't used */
+ mcast_free(mcast);
+ break;
+
+ case ENOMEM:
+ /* Exceeded the maximum number of mcast groups. */
+ mcast_qp_free(mqp);
+ mcast_free(mcast);
+ ret = -ENOMEM;
+ goto bail;
+
+ default:
+ break;
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+int hfi1_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ struct hfi1_qp *qp = to_iqp(ibqp);
+ struct hfi1_ibdev *dev = to_idev(ibqp->device);
+ struct hfi1_ibport *ibp = to_iport(ibqp->device, qp->port_num);
+ struct hfi1_mcast *mcast = NULL;
+ struct hfi1_mcast_qp *p, *tmp;
+ struct rb_node *n;
+ int last = 0;
+ int ret;
+
+ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ spin_lock_irq(&ibp->lock);
+
+ /* Find the GID in the mcast table. */
+ n = ibp->mcast_tree.rb_node;
+ while (1) {
+ if (n == NULL) {
+ spin_unlock_irq(&ibp->lock);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ mcast = rb_entry(n, struct hfi1_mcast, rb_node);
+ ret = memcmp(gid->raw, mcast->mgid.raw,
+ sizeof(union ib_gid));
+ if (ret < 0)
+ n = n->rb_left;
+ else if (ret > 0)
+ n = n->rb_right;
+ else
+ break;
+ }
+
+ /* Search the QP list. */
+ list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
+ if (p->qp != qp)
+ continue;
+ /*
+ * We found it, so remove it, but don't poison the forward
+ * link until we are sure there are no list walkers.
+ */
+ list_del_rcu(&p->list);
+ mcast->n_attached--;
+
+ /* If this was the last attached QP, remove the GID too. */
+ if (list_empty(&mcast->qp_list)) {
+ rb_erase(&mcast->rb_node, &ibp->mcast_tree);
+ last = 1;
+ }
+ break;
+ }
+
+ spin_unlock_irq(&ibp->lock);
+
+ if (p) {
+ /*
+ * Wait for any list walkers to finish before freeing the
+ * list element.
+ */
+ wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
+ mcast_qp_free(p);
+ }
+ if (last) {
+ atomic_dec(&mcast->refcount);
+ wait_event(mcast->wait, !atomic_read(&mcast->refcount));
+ mcast_free(mcast);
+ spin_lock_irq(&dev->n_mcast_grps_lock);
+ dev->n_mcast_grps_allocated--;
+ spin_unlock_irq(&dev->n_mcast_grps_lock);
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+int hfi1_mcast_tree_empty(struct hfi1_ibport *ibp)
+{
+ return ibp->mcast_tree.rb_node == NULL;
+}
diff --git a/drivers/infiniband/hw/ipath/Kconfig b/drivers/staging/rdma/ipath/Kconfig
index 8fe54ff00580..041ce0634968 100644
--- a/drivers/infiniband/hw/ipath/Kconfig
+++ b/drivers/staging/rdma/ipath/Kconfig
@@ -2,7 +2,7 @@ config INFINIBAND_IPATH
tristate "QLogic HTX HCA support"
depends on 64BIT && NET && HT_IRQ
---help---
- This is a driver for the obsolete QLogic Hyper-Transport
+ This is a driver for the deprecated QLogic Hyper-Transport
IB host channel adapter (model QHT7140),
including InfiniBand verbs support. This driver allows these
devices to be used with both kernel upper level protocols such
@@ -12,3 +12,5 @@ config INFINIBAND_IPATH
If you have this hardware you will need to boot with PAT disabled
on your x86-64 systems, use the nopat kernel parameter.
+
+ Note that this driver will soon be removed entirely from the kernel.
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/staging/rdma/ipath/Makefile
index 4496f2820c92..4496f2820c92 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/staging/rdma/ipath/Makefile
diff --git a/drivers/staging/rdma/ipath/TODO b/drivers/staging/rdma/ipath/TODO
new file mode 100644
index 000000000000..cb00158d64c8
--- /dev/null
+++ b/drivers/staging/rdma/ipath/TODO
@@ -0,0 +1,5 @@
+The ipath driver has been moved to staging in preparation for its removal in a
+few releases. The driver will be deleted during the 4.6 merge window.
+
+Contact Dennis Dalessandro <dennis.dalessandro@intel.com> and
+Cc: linux-rdma@vger.kernel.org
diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/staging/rdma/ipath/ipath_common.h
index 28cfe97cf1e9..28cfe97cf1e9 100644
--- a/drivers/infiniband/hw/ipath/ipath_common.h
+++ b/drivers/staging/rdma/ipath/ipath_common.h
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/staging/rdma/ipath/ipath_cq.c
index e9dd9112e718..e9dd9112e718 100644
--- a/drivers/infiniband/hw/ipath/ipath_cq.c
+++ b/drivers/staging/rdma/ipath/ipath_cq.c
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/staging/rdma/ipath/ipath_debug.h
index 65926cd35759..65926cd35759 100644
--- a/drivers/infiniband/hw/ipath/ipath_debug.h
+++ b/drivers/staging/rdma/ipath/ipath_debug.h
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/staging/rdma/ipath/ipath_diag.c
index 45802e97332e..45802e97332e 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/staging/rdma/ipath/ipath_diag.c
diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/staging/rdma/ipath/ipath_dma.c
index 123a8c053539..123a8c053539 100644
--- a/drivers/infiniband/hw/ipath/ipath_dma.c
+++ b/drivers/staging/rdma/ipath/ipath_dma.c
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/staging/rdma/ipath/ipath_driver.c
index 2d7e503d13cb..871dbe56216a 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/staging/rdma/ipath/ipath_driver.c
@@ -31,6 +31,8 @@
* SOFTWARE.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/idr.h>
@@ -399,8 +401,8 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u32 bar0 = 0, bar1 = 0;
#ifdef CONFIG_X86_64
- if (WARN(pat_enabled(),
- "ipath needs PAT disabled, boot with nopat kernel parameter\n")) {
+ if (pat_enabled()) {
+ pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n");
ret = -ENODEV;
goto bail;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/staging/rdma/ipath/ipath_eeprom.c
index fc7181985e8e..fc7181985e8e 100644
--- a/drivers/infiniband/hw/ipath/ipath_eeprom.c
+++ b/drivers/staging/rdma/ipath/ipath_eeprom.c
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/staging/rdma/ipath/ipath_file_ops.c
index 450d15965005..450d15965005 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/staging/rdma/ipath/ipath_file_ops.c
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/staging/rdma/ipath/ipath_fs.c
index 25422a3a7238..25422a3a7238 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/staging/rdma/ipath/ipath_fs.c
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/staging/rdma/ipath/ipath_iba6110.c
index 7cc305488a3d..7cc305488a3d 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/staging/rdma/ipath/ipath_iba6110.c
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/staging/rdma/ipath/ipath_init_chip.c
index be2a60e142b0..be2a60e142b0 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/staging/rdma/ipath/ipath_init_chip.c
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/staging/rdma/ipath/ipath_intr.c
index 01ba792791a0..01ba792791a0 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/staging/rdma/ipath/ipath_intr.c
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/staging/rdma/ipath/ipath_kernel.h
index f0f947122779..f0f947122779 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/staging/rdma/ipath/ipath_kernel.h
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/staging/rdma/ipath/ipath_keys.c
index c0e933fec218..c0e933fec218 100644
--- a/drivers/infiniband/hw/ipath/ipath_keys.c
+++ b/drivers/staging/rdma/ipath/ipath_keys.c
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/staging/rdma/ipath/ipath_mad.c
index 948188e37f95..ad3a926ab3c5 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/staging/rdma/ipath/ipath_mad.c
@@ -1499,8 +1499,9 @@ int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
diff --git a/drivers/infiniband/hw/ipath/ipath_mmap.c b/drivers/staging/rdma/ipath/ipath_mmap.c
index e73274229404..e73274229404 100644
--- a/drivers/infiniband/hw/ipath/ipath_mmap.c
+++ b/drivers/staging/rdma/ipath/ipath_mmap.c
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/staging/rdma/ipath/ipath_mr.c
index c7278f6a8217..c7278f6a8217 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/staging/rdma/ipath/ipath_mr.c
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/staging/rdma/ipath/ipath_qp.c
index face87602dc1..face87602dc1 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/staging/rdma/ipath/ipath_qp.c
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/staging/rdma/ipath/ipath_rc.c
index 79b3dbc97179..79b3dbc97179 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/staging/rdma/ipath/ipath_rc.c
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/staging/rdma/ipath/ipath_registers.h
index 8f44d0cf3833..8f44d0cf3833 100644
--- a/drivers/infiniband/hw/ipath/ipath_registers.h
+++ b/drivers/staging/rdma/ipath/ipath_registers.h
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/staging/rdma/ipath/ipath_ruc.c
index 1f95bbaf7602..1f95bbaf7602 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/staging/rdma/ipath/ipath_ruc.c
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/staging/rdma/ipath/ipath_sdma.c
index 17a517766ad2..17a517766ad2 100644
--- a/drivers/infiniband/hw/ipath/ipath_sdma.c
+++ b/drivers/staging/rdma/ipath/ipath_sdma.c
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/staging/rdma/ipath/ipath_srq.c
index 26271984b717..26271984b717 100644
--- a/drivers/infiniband/hw/ipath/ipath_srq.c
+++ b/drivers/staging/rdma/ipath/ipath_srq.c
diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/staging/rdma/ipath/ipath_stats.c
index f63e143e3292..f63e143e3292 100644
--- a/drivers/infiniband/hw/ipath/ipath_stats.c
+++ b/drivers/staging/rdma/ipath/ipath_stats.c
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/staging/rdma/ipath/ipath_sysfs.c
index 75558f33f1cb..75558f33f1cb 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/staging/rdma/ipath/ipath_sysfs.c
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/staging/rdma/ipath/ipath_uc.c
index 22e60998f1a7..22e60998f1a7 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/staging/rdma/ipath/ipath_uc.c
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/staging/rdma/ipath/ipath_ud.c
index e8a2a915251e..e8a2a915251e 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/staging/rdma/ipath/ipath_ud.c
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/staging/rdma/ipath/ipath_user_pages.c
index 1da1252dcdb3..1da1252dcdb3 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/staging/rdma/ipath/ipath_user_pages.c
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/staging/rdma/ipath/ipath_user_sdma.c
index cc04b7ba3488..cc04b7ba3488 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+++ b/drivers/staging/rdma/ipath/ipath_user_sdma.c
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.h b/drivers/staging/rdma/ipath/ipath_user_sdma.h
index fc76316c4a58..fc76316c4a58 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_sdma.h
+++ b/drivers/staging/rdma/ipath/ipath_user_sdma.h
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/staging/rdma/ipath/ipath_verbs.c
index 48253b839a6f..ed2bbc2f7eae 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/staging/rdma/ipath/ipath_verbs.c
@@ -1521,6 +1521,7 @@ static int ipath_query_device(struct ib_device *ibdev, struct ib_device_attr *pr
props->max_qp = ib_ipath_max_qps;
props->max_qp_wr = ib_ipath_max_qp_wrs;
props->max_sge = ib_ipath_max_sges;
+ props->max_sge_rd = ib_ipath_max_sges;
props->max_cq = ib_ipath_max_cqs;
props->max_ah = ib_ipath_max_ahs;
props->max_cqe = ib_ipath_max_cqes;
@@ -2044,9 +2045,9 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
spin_lock_init(&idev->qp_table.lock);
spin_lock_init(&idev->lk_table.lock);
- idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE);
+ idev->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
/* Set the prefix to the default value (see ch. 4.1.1) */
- idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL);
+ idev->gid_prefix = cpu_to_be64(0xfe80000000000000ULL);
ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
if (ret)
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/staging/rdma/ipath/ipath_verbs.h
index ec167e545e15..ec167e545e15 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/staging/rdma/ipath/ipath_verbs.h
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c b/drivers/staging/rdma/ipath/ipath_verbs_mcast.c
index 6216ea923853..6216ea923853 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
+++ b/drivers/staging/rdma/ipath/ipath_verbs_mcast.c
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c b/drivers/staging/rdma/ipath/ipath_wc_ppc64.c
index 1a7e20a75149..1a7e20a75149 100644
--- a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
+++ b/drivers/staging/rdma/ipath/ipath_wc_ppc64.c
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c b/drivers/staging/rdma/ipath/ipath_wc_x86_64.c
index 7b6e4c843e19..7b6e4c843e19 100644
--- a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
+++ b/drivers/staging/rdma/ipath/ipath_wc_x86_64.c
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 581af88e3024..5c45f8a8c2cf 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -379,7 +379,8 @@ void expire_timeout_chk(struct adapter *padapter)
if (pmlmeext->active_keep_alive_check) {
int stainfo_offset;
- stainfo_offset = rtw_stainfo_offset(pstapriv, psta);
+ stainfo_offset =
+ rtw_stainfo_offset(pstapriv, psta);
if (stainfo_offset_valid(stainfo_offset))
chk_alive_list[chk_alive_num++] = stainfo_offset;
continue;
@@ -1584,7 +1585,7 @@ void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
}
}
- if (!(psta->capability & WLAN_CAPABILITY_SHORT_SLOT)) {
+ if (!(psta->capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)) {
if (!psta->no_short_slot_time_set) {
psta->no_short_slot_time_set = 1;
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
index bc3fe10ff247..993c7db87a1a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_debug.c
+++ b/drivers/staging/rtl8188eu/core/rtw_debug.c
@@ -219,6 +219,7 @@ int proc_get_ht_option(char *page, char **start,
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
int len = 0;
+
len += snprintf(page + len, count - len, "ht_option=%d\n", pmlmepriv->htpriv.ht_option);
*eof = 1;
return len;
@@ -588,6 +589,7 @@ int proc_set_rx_signal(struct file *file, const char __user *buffer,
if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
int num = sscanf(tmp, "%u %u", &is_signal_dbg, &signal_strength);
+
is_signal_dbg = is_signal_dbg == 0 ? 0 : 1;
if (is_signal_dbg && num != 2)
return count;
@@ -917,7 +919,7 @@ int proc_get_best_channel(char *page, char **start,
/* 5G */
if (pmlmeext->channel_set[i].ChannelNum >= 36 &&
pmlmeext->channel_set[i].ChannelNum < 140) {
- /* Find primary channel */
+ /* Find primary channel */
if (((pmlmeext->channel_set[i].ChannelNum - 36) % 8 == 0) &&
(pmlmeext->channel_set[i].rx_count < pmlmeext->channel_set[index_5G].rx_count)) {
index_5G = i;
@@ -927,7 +929,7 @@ int proc_get_best_channel(char *page, char **start,
if (pmlmeext->channel_set[i].ChannelNum >= 149 &&
pmlmeext->channel_set[i].ChannelNum < 165) {
- /* find primary channel */
+ /* find primary channel */
if (((pmlmeext->channel_set[i].ChannelNum - 149) % 8 == 0) &&
(pmlmeext->channel_set[i].rx_count < pmlmeext->channel_set[index_5G].rx_count)) {
index_5G = i;
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index dbaba2c6cce5..7b99ea91a9e6 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -551,7 +551,7 @@ int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data)
bContinual = false;
}
} else if (ReadState & PG_STATE_DATA) {
- /* Data section Read ------------- */
+ /* Data section Read ------------- */
efuse_WordEnableDataRead(hworden, tmpdata, data);
efuse_addr = efuse_addr + (word_cnts*2)+1;
ReadState = PG_STATE_HEADER;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index 11b780d6c4ab..c3c582881a09 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -19,6 +19,8 @@
******************************************************************************/
#define _IEEE80211_C
+#include <linux/ieee80211.h>
+
#include <drv_types.h>
#include <osdep_intf.h>
#include <ieee80211.h>
@@ -1042,7 +1044,7 @@ enum parse_res rtw_ieee802_11_parse_elems(u8 *start, uint len,
elems->timeout_int = pos;
elems->timeout_int_len = elen;
break;
- case WLAN_EID_HT_CAP:
+ case WLAN_EID_HT_CAPABILITY:
elems->ht_capabilities = pos;
elems->ht_capabilities_len = elen;
break;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
index 8c05cb021c46..22f5b45f5f7f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
@@ -183,7 +183,7 @@ u8 rtw_set_802_11_bssid(struct adapter *padapter, u8 *bssid)
if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
rtw_indicate_disconnect(padapter);
- rtw_free_assoc_resources(padapter, 1);
+ rtw_free_assoc_resources(padapter);
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
_clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
@@ -271,7 +271,7 @@ u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid)
if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
rtw_indicate_disconnect(padapter);
- rtw_free_assoc_resources(padapter, 1);
+ rtw_free_assoc_resources(padapter);
if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) {
_clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
@@ -293,7 +293,7 @@ u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid)
if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
rtw_indicate_disconnect(padapter);
- rtw_free_assoc_resources(padapter, 1);
+ rtw_free_assoc_resources(padapter);
if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) {
_clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
@@ -366,7 +366,7 @@ u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter,
if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)))
- rtw_free_assoc_resources(padapter, 1);
+ rtw_free_assoc_resources(padapter);
if ((*pold_state == Ndis802_11Infrastructure) || (*pold_state == Ndis802_11IBSS)) {
if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
@@ -415,7 +415,7 @@ u8 rtw_set_802_11_disassociate(struct adapter *padapter)
rtw_disassoc_cmd(padapter, 0, true);
rtw_indicate_disconnect(padapter);
- rtw_free_assoc_resources(padapter, 1);
+ rtw_free_assoc_resources(padapter);
rtw_pwr_wakeup(padapter);
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index 05584515c5b4..2b917a18e228 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -19,6 +19,7 @@
******************************************************************************/
#define _RTW_MLME_C_
+#include <linux/ieee80211.h>
#include <osdep_service.h>
#include <drv_types.h>
@@ -160,7 +161,7 @@ exit:
return pnetwork;
}
-static void _rtw_free_network(struct mlme_priv *pmlmepriv , struct wlan_network *pnetwork, u8 isfreeall)
+static void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork, u8 isfreeall)
{
u32 curr_time, delta_time;
u32 lifetime = SCANQUEUE_LIFETIME;
@@ -352,8 +353,8 @@ int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst)
((!memcmp(src->Ssid.Ssid, dst->Ssid.Ssid, src->Ssid.SsidLength)) == true) &&
((s_cap & WLAN_CAPABILITY_IBSS) ==
(d_cap & WLAN_CAPABILITY_IBSS)) &&
- ((s_cap & WLAN_CAPABILITY_BSS) ==
- (d_cap & WLAN_CAPABILITY_BSS)));
+ ((s_cap & WLAN_CAPABILITY_ESS) ==
+ (d_cap & WLAN_CAPABILITY_ESS)));
}
struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue)
@@ -581,7 +582,7 @@ static int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *
}
/* TODO: Perry: For Power Management */
-void rtw_atimdone_event_callback(struct adapter *adapter , u8 *pbuf)
+void rtw_atimdone_event_callback(struct adapter *adapter, u8 *pbuf)
{
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("receive atimdone_evet\n"));
return;
@@ -614,7 +615,7 @@ void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
ibss_wlan = rtw_find_network(&pmlmepriv->scanned_queue, pnetwork->MacAddress);
if (ibss_wlan) {
- memcpy(ibss_wlan->network.IEs , pnetwork->IEs, 8);
+ memcpy(ibss_wlan->network.IEs, pnetwork->IEs, 8);
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto exit;
}
@@ -692,8 +693,8 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
pmlmepriv->to_join = false;
s_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv);
if (_SUCCESS == s_ret) {
- mod_timer(&pmlmepriv->assoc_timer,
- jiffies + msecs_to_jiffies(MAX_JOIN_TIMEOUT));
+ mod_timer(&pmlmepriv->assoc_timer,
+ jiffies + msecs_to_jiffies(MAX_JOIN_TIMEOUT));
} else if (s_ret == 2) { /* there is no need to wait for join */
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
rtw_indicate_connect(adapter);
@@ -703,7 +704,7 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
if (--pmlmepriv->to_roaming == 0 ||
_SUCCESS != rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1, NULL, 0)) {
pmlmepriv->to_roaming = 0;
- rtw_free_assoc_resources(adapter, 1);
+ rtw_free_assoc_resources(adapter);
rtw_indicate_disconnect(adapter);
} else {
pmlmepriv->to_join = true;
@@ -757,7 +758,19 @@ static void free_scanqueue(struct mlme_priv *pmlmepriv)
/*
*rtw_free_assoc_resources: the caller has to lock pmlmepriv->lock
*/
-void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue)
+void rtw_free_assoc_resources(struct adapter *adapter)
+{
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+
+ spin_lock_bh(&pmlmepriv->scanned_queue.lock);
+ rtw_free_assoc_resources_locked(adapter);
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+}
+
+/*
+*rtw_free_assoc_resources_locked: the caller has to lock pmlmepriv->lock
+*/
+void rtw_free_assoc_resources_locked(struct adapter *adapter)
{
struct wlan_network *pwlan = NULL;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
@@ -792,8 +805,6 @@ void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue)
rtw_init_bcmc_stainfo(adapter);
}
- if (lock_scanned_queue)
- spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
if (pwlan)
@@ -804,8 +815,6 @@ void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue)
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) && (adapter->stapriv.asoc_sta_count == 1)))
rtw_free_network_nolock(pmlmepriv, pwlan);
- if (lock_scanned_queue)
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
pmlmepriv->key_mask = 0;
}
@@ -1301,7 +1310,7 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
rtw_free_uc_swdec_pending_queue(adapter);
- rtw_free_assoc_resources(adapter, 1);
+ rtw_free_assoc_resources(adapter);
rtw_indicate_disconnect(adapter);
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
/* remove the network entry in scanned_queue */
@@ -1382,7 +1391,7 @@ void _rtw_join_timeout_handler (unsigned long data)
DBG_88E("%s try another roaming\n", __func__);
do_join_r = rtw_do_join(adapter);
if (_SUCCESS != do_join_r) {
- DBG_88E("%s roaming do_join return %d\n", __func__ , do_join_r);
+ DBG_88E("%s roaming do_join return %d\n", __func__, do_join_r);
continue;
}
break;
@@ -1556,7 +1565,7 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
rtw_disassoc_cmd(adapter, 0, true);
rtw_indicate_disconnect(adapter);
- rtw_free_assoc_resources(adapter, 0);
+ rtw_free_assoc_resources_locked(adapter);
}
rtw_hal_get_def_var(adapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &(supp_ant_div));
@@ -1997,7 +2006,7 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
p = rtw_get_ie(in_ie+12, _HT_ADD_INFO_IE_, &ielen, in_len-12);
if (p && (ielen == sizeof(struct ieee80211_ht_addt_info))) {
out_len = *pout_len;
- rtw_set_ie(out_ie+out_len, _HT_ADD_INFO_IE_, ielen, p+2 , pout_len);
+ rtw_set_ie(out_ie+out_len, _HT_ADD_INFO_IE_, ielen, p+2, pout_len);
}
}
return phtpriv->ht_option;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index a0b8f665fa2f..935b48eef8b1 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -1096,7 +1096,7 @@ static void issue_assocreq(struct adapter *padapter)
/* Check if the AP's supported rates are also supported by STA. */
for (j = 0; j < sta_bssrate_len; j++) {
- /* Avoid the proprietary data rate (22Mbps) of Handlink WSG-4000 AP */
+ /* Avoid the proprietary data rate (22Mbps) of Handlink WSG-4000 AP */
if ((pmlmeinfo->network.SupportedRates[i]|IEEE80211_BASIC_RATE_MASK)
== (sta_bssrate[j]|IEEE80211_BASIC_RATE_MASK))
break;
@@ -2932,7 +2932,7 @@ static unsigned int OnAuthClient(struct adapter *padapter,
if (seq == 2) {
if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) {
- /* legendary shared system */
+ /* legendary shared system */
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, (int *)&len,
pkt_len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_);
@@ -3367,7 +3367,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
spin_unlock_bh(&pstapriv->asoc_list_lock);
/* now the station is qualified to join our BSS... */
- if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (_STATS_SUCCESSFUL_ == status)) {
+ if ((pstat->state & WIFI_FW_ASSOC_SUCCESS) && (_STATS_SUCCESSFUL_ == status)) {
/* 1 bss_cap_update & sta_info_update */
bss_cap_update_on_sta_join(padapter, pstat);
sta_info_update(padapter, pstat);
@@ -4155,8 +4155,8 @@ static void _mgt_dispatcher(struct adapter *padapter, struct mlme_handler *ptabl
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
u8 *pframe = precv_frame->rx_data;
- if (ptable->func) {
- /* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
+ if (ptable->func) {
+ /* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN))
return;
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index ec0a8a4cdc6e..9765946466ab 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -70,7 +70,7 @@ static int rtw_hw_suspend(struct adapter *padapter)
}
}
/* s2-3. */
- rtw_free_assoc_resources(padapter, 1);
+ rtw_free_assoc_resources(padapter);
/* s2-4. */
rtw_free_network_queue(padapter, true);
@@ -549,12 +549,6 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
(unsigned long)padapter);
}
-inline void rtw_set_ips_deny(struct adapter *padapter, u32 ms)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- pwrpriv->ips_deny_time = jiffies + msecs_to_jiffies(ms);
-}
-
/*
* rtw_pwr_wakeup - Wake the NIC up from: 1)IPS. 2)USB autosuspend
* @adapter: pointer to struct adapter structure
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 8501eb898824..44eeb03213e6 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -74,10 +74,8 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
precvpriv->pallocated_frame_buf = vzalloc(NR_RECVFRAME * sizeof(struct recv_frame) + RXFRAME_ALIGN_SZ);
- if (precvpriv->pallocated_frame_buf == NULL) {
- res = _FAIL;
- goto exit;
- }
+ if (!precvpriv->pallocated_frame_buf)
+ return _FAIL;
precvpriv->precv_frame_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(precvpriv->pallocated_frame_buf), RXFRAME_ALIGN_SZ);
@@ -89,7 +87,7 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
list_add_tail(&(precvframe->list),
&(precvpriv->free_recv_queue.queue));
- res = rtw_os_recv_resource_alloc(padapter, precvframe);
+ rtw_os_recv_resource_alloc(precvframe);
precvframe->len = 0;
@@ -107,8 +105,6 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
precvpriv->signal_stat_sampling_interval = 1000; /* ms */
rtw_set_signal_stat_timer(precvpriv);
-exit:
-
return res;
}
@@ -117,7 +113,6 @@ void _rtw_free_recv_priv(struct recv_priv *precvpriv)
{
struct adapter *padapter = precvpriv->adapter;
-
rtw_free_uc_swdec_pending_queue(padapter);
if (precvpriv->pallocated_frame_buf) {
@@ -153,7 +148,6 @@ struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
}
}
-
return (struct recv_frame *)hdr;
}
@@ -170,14 +164,6 @@ struct recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
return precvframe;
}
-void rtw_init_recvframe(struct recv_frame *precvframe, struct recv_priv *precvpriv)
-{
- /* Perry: This can be removed */
- INIT_LIST_HEAD(&precvframe->list);
-
- precvframe->len = 0;
-}
-
int rtw_free_recvframe(struct recv_frame *precvframe,
struct __queue *pfree_recv_queue)
{
@@ -208,7 +194,6 @@ int rtw_free_recvframe(struct recv_frame *precvframe,
spin_unlock_bh(&pfree_recv_queue->lock);
-
return _SUCCESS;
}
@@ -217,7 +202,6 @@ int _rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue)
struct adapter *padapter = precvframe->adapter;
struct recv_priv *precvpriv = &padapter->recvpriv;
-
list_del_init(&(precvframe->list));
list_add_tail(&(precvframe->list), get_list_head(queue));
@@ -226,7 +210,6 @@ int _rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue)
precvpriv->free_recvframe_cnt++;
}
-
return _SUCCESS;
}
@@ -421,7 +404,6 @@ static int recvframe_chkmic(struct adapter *adapter,
exit:
-
return res;
}
@@ -483,7 +465,6 @@ static struct recv_frame *decryptor(struct adapter *padapter,
return_packet = NULL;
}
-
return return_packet;
}
@@ -502,7 +483,6 @@ static struct recv_frame *portctrl(struct adapter *adapter,
struct rx_pkt_attrib *pattrib;
__be16 be_tmp;
-
pstapriv = &adapter->stapriv;
auth_alg = adapter->securitypriv.dot11AuthAlgrthm;
@@ -561,7 +541,6 @@ static struct recv_frame *portctrl(struct adapter *adapter,
prtnframe = precv_frame;
}
-
return prtnframe;
}
@@ -573,7 +552,6 @@ static int recv_decache(struct recv_frame *precv_frame, u8 bretry,
u16 seq_ctrl = ((precv_frame->attrib.seq_num&0xffff) << 4) |
(precv_frame->attrib.frag_num & 0xf);
-
if (tid > 15) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("recv_decache, (tid>15)! seq_ctrl=0x%x, tid=0x%x\n", seq_ctrl, tid));
@@ -590,7 +568,6 @@ static int recv_decache(struct recv_frame *precv_frame, u8 bretry,
prxcache->tid_rxseq[tid] = seq_ctrl;
-
return _SUCCESS;
}
@@ -727,7 +704,6 @@ int sta2sta_data_frame(struct adapter *adapter, struct recv_frame *precv_frame,
u8 *sta_addr = NULL;
int bmcast = IS_MCAST(pattrib->dst);
-
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
/* filter packets that SA is myself or multicast or broadcast */
@@ -815,7 +791,6 @@ static int ap2sta_data_frame(
u8 *myhwaddr = myid(&adapter->eeprompriv);
int bmcast = IS_MCAST(pattrib->dst);
-
if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) &&
(check_fwstate(pmlmepriv, _FW_LINKED) == true ||
check_fwstate(pmlmepriv, _FW_UNDER_LINKING))) {
@@ -907,7 +882,6 @@ static int ap2sta_data_frame(
exit:
-
return ret;
}
@@ -922,7 +896,6 @@ static int sta2ap_data_frame(struct adapter *adapter,
unsigned char *mybssid = get_bssid(pmlmepriv);
int ret = _SUCCESS;
-
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
/* For AP mode, RA = BSSID, TX = STA(SRC_ADDR), A3 = DST_ADDR */
if (memcmp(pattrib->bssid, mybssid, ETH_ALEN)) {
@@ -967,7 +940,6 @@ static int sta2ap_data_frame(struct adapter *adapter,
exit:
-
return ret;
}
@@ -1149,7 +1121,6 @@ static int validate_recv_data_frame(struct adapter *adapter,
struct security_priv *psecuritypriv = &adapter->securitypriv;
int ret = _SUCCESS;
-
bretry = GetRetry(ptr);
pda = get_da(ptr);
psa = get_sa(ptr);
@@ -1253,7 +1224,6 @@ static int validate_recv_data_frame(struct adapter *adapter,
exit:
-
return ret;
}
@@ -1273,7 +1243,6 @@ static int validate_recv_frame(struct adapter *adapter,
u8 ver = (unsigned char)(*ptr)&0x3;
struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
-
if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
int ch_set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, rtw_get_oper_ch(adapter));
if (ch_set_idx >= 0)
@@ -1362,7 +1331,6 @@ static int validate_recv_frame(struct adapter *adapter,
exit:
-
return retval;
}
@@ -1445,7 +1413,6 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
struct recv_frame *prframe, *pnextrframe;
struct __queue *pfree_recv_queue;
-
curfragnum = 0;
pfree_recv_queue = &adapter->recvpriv.free_recv_queue;
@@ -1510,7 +1477,6 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("Performance defrag!!!!!\n"));
-
return prframe;
}
@@ -1528,7 +1494,6 @@ struct recv_frame *recvframe_chk_defrag(struct adapter *padapter,
struct recv_frame *prtnframe = NULL;
struct __queue *pfree_recv_queue, *pdefrag_q;
-
pstapriv = &padapter->stapriv;
pfhdr = precv_frame;
@@ -1612,7 +1577,6 @@ struct recv_frame *recvframe_chk_defrag(struct adapter *padapter,
}
}
-
return prtnframe;
}
@@ -2116,7 +2080,6 @@ s32 rtw_recv_entry(struct recv_frame *precvframe)
struct recv_priv *precvpriv;
s32 ret = _SUCCESS;
-
padapter = precvframe->adapter;
precvpriv = &padapter->recvpriv;
@@ -2129,7 +2092,6 @@ s32 rtw_recv_entry(struct recv_frame *precvframe)
precvpriv->rx_pkts++;
-
return ret;
_recv_entry_drop:
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index d870a5ce8585..22839d57dc8c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -1330,7 +1330,7 @@ static int aes_decipher(u8 *key, uint hdrlen,
bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
for (j = 0; j < 16; j++)
- pframe[payload_index++] = chain_buffer[j];
+ pframe[payload_index++] = chain_buffer[j];
}
if (payload_remainder > 0) { /* If there is a short final block, then pad it,*/
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 32300df7b996..077b39a41c60 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -19,6 +19,8 @@
******************************************************************************/
#define _RTW_WLAN_UTIL_C_
+#include <linux/ieee80211.h>
+
#include <osdep_service.h>
#include <drv_types.h>
#include <wifi.h>
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index fda169d37771..5dc0b90e8ab5 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -868,7 +868,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
/* check if enable ampdu */
if (pattrib->ht_en && psta->htpriv.ampdu_enable) {
if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority))
- pattrib->ampdu_en = true;
+ pattrib->ampdu_en = true;
}
/* re-check if enable ampdu by BA_starting_seqctrl */
@@ -1026,22 +1026,22 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
/* adding icv, if necessary... */
if (pattrib->iv_len) {
switch (pattrib->encrypt) {
- case _WEP40_:
- case _WEP104_:
- WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
- break;
- case _TKIP_:
- if (bmcst)
- TKIP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
- else
- TKIP_IV(pattrib->iv, psta->dot11txpn, 0);
- break;
- case _AES_:
- if (bmcst)
- AES_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
- else
- AES_IV(pattrib->iv, psta->dot11txpn, 0);
- break;
+ case _WEP40_:
+ case _WEP104_:
+ WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+ break;
+ case _TKIP_:
+ if (bmcst)
+ TKIP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+ else
+ TKIP_IV(pattrib->iv, psta->dot11txpn, 0);
+ break;
+ case _AES_:
+ if (bmcst)
+ AES_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+ else
+ AES_IV(pattrib->iv, psta->dot11txpn, 0);
+ break;
}
memcpy(pframe, pattrib->iv, pattrib->iv_len);
@@ -1769,7 +1769,7 @@ int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fra
int bmcst = IS_MCAST(pattrib->ra);
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == false)
- return ret;
+ return ret;
if (pattrib->psta)
psta = pattrib->psta;
diff --git a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
index 15a176596305..2633a13b4e58 100644
--- a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
+++ b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
@@ -659,11 +659,11 @@ void ODM_RA_UpdateRateInfo_8188E(struct odm_dm_struct *dm_odm, u8 macid, u8 Rate
{
struct odm_ra_info *pRaInfo = NULL;
+ if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
+ return;
ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
("macid =%d RateID = 0x%x RateMask = 0x%x SGIEnable =%d\n",
macid, RateID, RateMask, SGIEnable));
- if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
- return;
pRaInfo = &(dm_odm->RAInfo[macid]);
pRaInfo->RateID = RateID;
@@ -676,10 +676,10 @@ void ODM_RA_SetRSSI_8188E(struct odm_dm_struct *dm_odm, u8 macid, u8 Rssi)
{
struct odm_ra_info *pRaInfo = NULL;
- ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
- (" macid =%d Rssi =%d\n", macid, Rssi));
if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
return;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ (" macid =%d Rssi =%d\n", macid, Rssi));
pRaInfo = &(dm_odm->RAInfo[macid]);
pRaInfo->RssiStaRA = Rssi;
diff --git a/drivers/staging/rtl8188eu/hal/bb_cfg.c b/drivers/staging/rtl8188eu/hal/bb_cfg.c
index 8eb2b39a0b67..9c7e626aa703 100644
--- a/drivers/staging/rtl8188eu/hal/bb_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/bb_cfg.c
@@ -24,9 +24,9 @@
#define read_next_pair(array, v1, v2, i) \
do { \
- i += 2; \
- v1 = array[i]; \
- v2 = array[i+1]; \
+ i += 2; \
+ v1 = array[i]; \
+ v2 = array[i+1]; \
} while (0)
diff --git a/drivers/staging/rtl8188eu/hal/hal_com.c b/drivers/staging/rtl8188eu/hal/hal_com.c
index 170e3de5eab4..38e9fdc312d3 100644
--- a/drivers/staging/rtl8188eu/hal/hal_com.c
+++ b/drivers/staging/rtl8188eu/hal/hal_com.c
@@ -31,18 +31,7 @@ void dump_chip_info(struct HAL_VERSION chip_vers)
uint cnt = 0;
char buf[128];
- if (IS_81XXC(chip_vers)) {
- cnt += sprintf((buf+cnt), "Chip Version Info: %s_",
- IS_92C_SERIAL(chip_vers) ?
- "CHIP_8192C" : "CHIP_8188C");
- } else if (IS_92D(chip_vers)) {
- cnt += sprintf((buf+cnt), "Chip Version Info: CHIP_8192D_");
- } else if (IS_8723_SERIES(chip_vers)) {
- cnt += sprintf((buf+cnt), "Chip Version Info: CHIP_8723A_");
- } else if (IS_8188E(chip_vers)) {
- cnt += sprintf((buf+cnt), "Chip Version Info: CHIP_8188E_");
- }
-
+ cnt += sprintf((buf+cnt), "Chip Version Info: CHIP_8188E_");
cnt += sprintf((buf+cnt), "%s_", IS_NORMAL_CHIP(chip_vers) ?
"Normal_Chip" : "Test_Chip");
cnt += sprintf((buf+cnt), "%s_", IS_CHIP_VENDOR_TSMC(chip_vers) ?
@@ -60,18 +49,8 @@ void dump_chip_info(struct HAL_VERSION chip_vers)
else
cnt += sprintf((buf+cnt), "UNKNOWN_CUT(%d)_",
chip_vers.CUTVersion);
-
- if (IS_1T1R(chip_vers))
- cnt += sprintf((buf+cnt), "1T1R_");
- else if (IS_1T2R(chip_vers))
- cnt += sprintf((buf+cnt), "1T2R_");
- else if (IS_2T2R(chip_vers))
- cnt += sprintf((buf+cnt), "2T2R_");
- else
- cnt += sprintf((buf+cnt), "UNKNOWN_RFTYPE(%d)_",
- chip_vers.RFType);
-
- cnt += sprintf((buf+cnt), "RomVer(%d)\n", chip_vers.ROMVer);
+ cnt += sprintf((buf+cnt), "1T1R_");
+ cnt += sprintf((buf+cnt), "RomVer(0)\n");
pr_info("%s", buf);
}
diff --git a/drivers/staging/rtl8188eu/hal/hal_intf.c b/drivers/staging/rtl8188eu/hal/hal_intf.c
index 5edb5c41c8e7..85c17ef942f3 100644
--- a/drivers/staging/rtl8188eu/hal/hal_intf.c
+++ b/drivers/staging/rtl8188eu/hal/hal_intf.c
@@ -131,14 +131,6 @@ void rtw_hal_get_hwreg(struct adapter *adapt, u8 variable, u8 *val)
adapt->HalFunc.GetHwRegHandler(adapt, variable, val);
}
-u8 rtw_hal_set_def_var(struct adapter *adapt, enum hal_def_variable var,
- void *val)
-{
- if (adapt->HalFunc.SetHalDefVarHandler)
- return adapt->HalFunc.SetHalDefVarHandler(adapt, var, val);
- return _FAIL;
-}
-
u8 rtw_hal_get_def_var(struct adapter *adapt,
enum hal_def_variable var, void *val)
{
@@ -156,22 +148,6 @@ void rtw_hal_set_odm_var(struct adapter *adapt,
val1, set);
}
-void rtw_hal_enable_interrupt(struct adapter *adapt)
-{
- if (adapt->HalFunc.enable_interrupt)
- adapt->HalFunc.enable_interrupt(adapt);
- else
- DBG_88E("%s: HalFunc.enable_interrupt is NULL!\n", __func__);
-}
-
-void rtw_hal_disable_interrupt(struct adapter *adapt)
-{
- if (adapt->HalFunc.disable_interrupt)
- adapt->HalFunc.disable_interrupt(adapt);
- else
- DBG_88E("%s: HalFunc.disable_interrupt is NULL!\n", __func__);
-}
-
u32 rtw_hal_inirp_init(struct adapter *adapt)
{
u32 rst = _FAIL;
@@ -269,14 +245,6 @@ u32 rtw_hal_read_rfreg(struct adapter *adapt, enum rf_radio_path rfpath,
return data;
}
-void rtw_hal_write_rfreg(struct adapter *adapt, enum rf_radio_path rfpath,
- u32 regaddr, u32 bitmask, u32 data)
-{
- if (adapt->HalFunc.write_rfreg)
- adapt->HalFunc.write_rfreg(adapt, rfpath, regaddr,
- bitmask, data);
-}
-
void rtw_hal_set_bwmode(struct adapter *adapt,
enum ht_channel_width bandwidth, u8 offset)
{
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 28b5e7bd4fc0..710fdc3449f8 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -1170,13 +1170,10 @@ void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm)
}
for (i = 0; i < sta_cnt; i++) {
- if (PWDB_rssi[i] != (0)) {
- if (pHalData->fw_ractrl) {
- /* Report every sta's RSSI to FW */
- } else {
- ODM_RA_SetRSSI_8188E(
- &(pHalData->odmpriv), (PWDB_rssi[i]&0xFF), (u8)((PWDB_rssi[i]>>16) & 0xFF));
- }
+ if (PWDB_rssi[i] != 0) {
+ ODM_RA_SetRSSI_8188E(&pHalData->odmpriv,
+ PWDB_rssi[i] & 0xFF,
+ (PWDB_rssi[i] >> 16) & 0xFF);
}
}
diff --git a/drivers/staging/rtl8188eu/hal/rf.c b/drivers/staging/rtl8188eu/hal/rf.c
index 097092772a86..38845d17d593 100644
--- a/drivers/staging/rtl8188eu/hal/rf.c
+++ b/drivers/staging/rtl8188eu/hal/rf.c
@@ -102,7 +102,7 @@ void rtl88eu_phy_rf6052_set_cck_txpower(struct adapter *adapt, u8 *powerlevel)
}
}
rtl88eu_dm_txpower_track_adjust(&hal_data->odmpriv, 1, &direction,
- &pwrtrac_value);
+ &pwrtrac_value);
if (direction == 1) {
/* Increase TX power */
diff --git a/drivers/staging/rtl8188eu/hal/rf_cfg.c b/drivers/staging/rtl8188eu/hal/rf_cfg.c
index 455ecdc8d9fa..954cade478db 100644
--- a/drivers/staging/rtl8188eu/hal/rf_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/rf_cfg.c
@@ -295,7 +295,7 @@ static bool rf6052_conf_para(struct adapter *adapt)
break;
}
- if (rtstatus != true)
+ if (!rtstatus)
return false;
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 86347f2ccdfd..0a62bfa210fe 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -127,27 +127,6 @@ exit:
return ret;
}
-u8 rtl8188e_set_raid_cmd(struct adapter *adapt, u32 mask)
-{
- u8 buf[3];
- u8 res = _SUCCESS;
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
-
- if (haldata->fw_ractrl) {
-
- memset(buf, 0, 3);
- put_unaligned_le32(mask, buf);
-
- FillH2CCmd_88E(adapt, H2C_DM_MACID_CFG, 3, buf);
- } else {
- DBG_88E("==>%s fw dont support RA\n", __func__);
- res = _FAIL;
- }
-
-
- return res;
-}
-
/* bitmap[0:27] = tx_rate_bitmap */
/* bitmap[28:31]= Rate Adaptive id */
/* arg[0:4] = macid */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 7904d2260f2c..a6295ca6a73e 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -128,7 +128,7 @@ static void rtl8188e_free_hal_data(struct adapter *padapter)
padapter->HalData = NULL;
}
-static struct HAL_VERSION ReadChipVersion8188E(struct adapter *padapter)
+static void ReadChipVersion8188E(struct adapter *padapter)
{
u32 value32;
struct HAL_VERSION ChipVersion;
@@ -137,41 +137,17 @@ static struct HAL_VERSION ReadChipVersion8188E(struct adapter *padapter)
pHalData = GET_HAL_DATA(padapter);
value32 = usb_read32(padapter, REG_SYS_CFG);
- ChipVersion.ICType = CHIP_8188E;
ChipVersion.ChipType = ((value32 & RTL_ID) ? TEST_CHIP : NORMAL_CHIP);
-
- ChipVersion.RFType = RF_TYPE_1T1R;
ChipVersion.VendorType = ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : CHIP_VENDOR_TSMC);
ChipVersion.CUTVersion = (value32 & CHIP_VER_RTL_MASK)>>CHIP_VER_RTL_SHIFT; /* IC version (CUT) */
- /* For regulator mode. by tynli. 2011.01.14 */
- pHalData->RegulatorMode = ((value32 & TRP_BT_EN) ? RT_LDO_REGULATOR : RT_SWITCHING_REGULATOR);
-
- ChipVersion.ROMVer = 0; /* ROM code version. */
-
dump_chip_info(ChipVersion);
pHalData->VersionID = ChipVersion;
-
- if (IS_1T2R(ChipVersion)) {
- pHalData->rf_type = RF_1T2R;
- pHalData->NumTotalRFPath = 2;
- } else if (IS_2T2R(ChipVersion)) {
- pHalData->rf_type = RF_2T2R;
- pHalData->NumTotalRFPath = 2;
- } else{
- pHalData->rf_type = RF_1T1R;
- pHalData->NumTotalRFPath = 1;
- }
+ pHalData->rf_type = RF_1T1R;
+ pHalData->NumTotalRFPath = 1;
MSG_88E("RF_Type is %x!!\n", pHalData->rf_type);
-
- return ChipVersion;
-}
-
-static void rtl8188e_read_chip_version(struct adapter *padapter)
-{
- ReadChipVersion8188E(padapter);
}
static void rtl8188e_SetHalODMVar(struct adapter *Adapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet)
@@ -220,7 +196,7 @@ void rtl8188e_set_hal_ops(struct hal_ops *pHalFunc)
pHalFunc->dm_init = &rtl8188e_init_dm_priv;
- pHalFunc->read_chip_version = &rtl8188e_read_chip_version;
+ pHalFunc->read_chip_version = &ReadChipVersion8188E;
pHalFunc->set_bwmode_handler = &phy_set_bw_mode;
pHalFunc->set_channel_handler = &phy_sw_chnl;
@@ -232,7 +208,6 @@ void rtl8188e_set_hal_ops(struct hal_ops *pHalFunc)
pHalFunc->AntDivBeforeLinkHandler = &AntDivBeforeLink8188E;
pHalFunc->AntDivCompareHandler = &AntDivCompare8188E;
pHalFunc->read_rfreg = &phy_query_rf_reg;
- pHalFunc->write_rfreg = &phy_set_rf_reg;
pHalFunc->sreset_init_value = &sreset_init_value;
pHalFunc->sreset_get_wifi_status = &sreset_get_wifi_status;
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index 872622214264..1ef878fd997b 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -743,19 +743,16 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
if (Adapter->registrypriv.mp_mode == 1) {
_InitRxSetting(Adapter);
Adapter->bFWReady = false;
- haldata->fw_ractrl = false;
} else {
status = rtl88eu_download_fw(Adapter);
if (status) {
DBG_88E("%s: Download Firmware failed!!\n", __func__);
Adapter->bFWReady = false;
- haldata->fw_ractrl = false;
return status;
} else {
RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("Initializeadapt8192CSdio(): Download Firmware Success!!\n"));
Adapter->bFWReady = true;
- haldata->fw_ractrl = false;
}
}
rtl8188e_InitializeFirmwareVars(Adapter);
@@ -1703,7 +1700,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
/* Forece leave RF low power mode for 1T1R to prevent conficting setting in Fw power */
/* saving sequence. 2010.06.07. Added by tynli. Suggested by SD3 yschang. */
- if ((psmode != PS_MODE_ACTIVE) && (!IS_92C_SERIAL(haldata->VersionID)))
+ if (psmode != PS_MODE_ACTIVE)
ODM_RF_Saving(podmpriv, true);
rtl8188e_set_FwPwrMode_cmd(Adapter, psmode);
}
@@ -1961,75 +1958,6 @@ GetHalDefVar8188EUsb(
return bResult;
}
-/* */
-/* Description: */
-/* Change default setting of specified variable. */
-/* */
-static u8 SetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue)
-{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
- u8 bResult = _SUCCESS;
-
- switch (eVariable) {
- case HAL_DEF_DBG_DM_FUNC:
- {
- u8 dm_func = *((u8 *)pValue);
- struct odm_dm_struct *podmpriv = &haldata->odmpriv;
-
- if (dm_func == 0) { /* disable all dynamic func */
- podmpriv->SupportAbility = DYNAMIC_FUNC_DISABLE;
- DBG_88E("==> Disable all dynamic function...\n");
- } else if (dm_func == 1) {/* disable DIG */
- podmpriv->SupportAbility &= (~DYNAMIC_BB_DIG);
- DBG_88E("==> Disable DIG...\n");
- } else if (dm_func == 2) {/* disable High power */
- podmpriv->SupportAbility &= (~DYNAMIC_BB_DYNAMIC_TXPWR);
- } else if (dm_func == 3) {/* disable tx power tracking */
- podmpriv->SupportAbility &= (~DYNAMIC_RF_CALIBRATION);
- DBG_88E("==> Disable tx power tracking...\n");
- } else if (dm_func == 5) {/* disable antenna diversity */
- podmpriv->SupportAbility &= (~DYNAMIC_BB_ANT_DIV);
- } else if (dm_func == 6) {/* turn on all dynamic func */
- if (!(podmpriv->SupportAbility & DYNAMIC_BB_DIG)) {
- struct rtw_dig *pDigTable = &podmpriv->DM_DigTable;
- pDigTable->CurIGValue = usb_read8(Adapter, 0xc50);
- }
- podmpriv->SupportAbility = DYNAMIC_ALL_FUNC_ENABLE;
- DBG_88E("==> Turn on all dynamic function...\n");
- }
- }
- break;
- case HAL_DEF_DBG_DUMP_RXPKT:
- haldata->bDumpRxPkt = *((u8 *)pValue);
- break;
- case HAL_DEF_DBG_DUMP_TXPKT:
- haldata->bDumpTxPkt = *((u8 *)pValue);
- break;
- case HW_DEF_FA_CNT_DUMP:
- {
- u8 bRSSIDump = *((u8 *)pValue);
- struct odm_dm_struct *dm_ocm = &(haldata->odmpriv);
- if (bRSSIDump)
- dm_ocm->DebugComponents = ODM_COMP_DIG|ODM_COMP_FA_CNT;
- else
- dm_ocm->DebugComponents = 0;
- }
- break;
- case HW_DEF_ODM_DBG_FLAG:
- {
- u64 DebugComponents = *((u64 *)pValue);
- struct odm_dm_struct *dm_ocm = &(haldata->odmpriv);
- dm_ocm->DebugComponents = DebugComponents;
- }
- break;
- default:
- bResult = _FAIL;
- break;
- }
-
- return bResult;
-}
-
static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
{
u8 init_rate = 0;
@@ -2085,28 +2013,9 @@ static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_l
init_rate = get_highest_rate_idx(mask)&0x3f;
- if (haldata->fw_ractrl) {
- u8 arg;
-
- arg = mac_id & 0x1f;/* MACID */
- arg |= BIT(7);
- if (shortGIrate)
- arg |= BIT(5);
- mask |= ((raid << 28) & 0xf0000000);
- DBG_88E("update raid entry, mask=0x%x, arg=0x%x\n", mask, arg);
- psta->ra_mask = mask;
- mask |= ((raid << 28) & 0xf0000000);
+ ODM_RA_UpdateRateInfo_8188E(&haldata->odmpriv, mac_id,
+ raid, mask, shortGIrate);
- /* to do ,for 8188E-SMIC */
- rtl8188e_set_raid_cmd(adapt, mask);
- } else {
- ODM_RA_UpdateRateInfo_8188E(&(haldata->odmpriv),
- mac_id,
- raid,
- mask,
- shortGIrate
- );
- }
/* set ra_id */
psta->raid = raid;
psta->init_rate = init_rate;
@@ -2156,7 +2065,6 @@ static void rtl8188eu_init_default_value(struct adapter *adapt)
pwrctrlpriv = &adapt->pwrctrlpriv;
/* init default value */
- haldata->fw_ractrl = false;
if (!pwrctrlpriv->bkeepfwalive)
haldata->LastHMEBoxNum = 0;
@@ -2200,7 +2108,6 @@ void rtl8188eu_set_hal_ops(struct adapter *adapt)
halfunc->SetHwRegHandler = &SetHwReg8188EU;
halfunc->GetHwRegHandler = &GetHwReg8188EU;
halfunc->GetHalDefVarHandler = &GetHalDefVar8188EUsb;
- halfunc->SetHalDefVarHandler = &SetHalDefVar8188EUsb;
halfunc->UpdateRAMaskHandler = &UpdateHalRAMask8188EUsb;
halfunc->SetBeaconRelatedRegistersHandler = &SetBeaconRelatedRegisters8188EUsb;
diff --git a/drivers/staging/rtl8188eu/include/HalVerDef.h b/drivers/staging/rtl8188eu/include/HalVerDef.h
index 97047cf06780..56b4ff08e509 100644
--- a/drivers/staging/rtl8188eu/include/HalVerDef.h
+++ b/drivers/staging/rtl8188eu/include/HalVerDef.h
@@ -20,20 +20,6 @@
#ifndef __HAL_VERSION_DEF_H__
#define __HAL_VERSION_DEF_H__
-enum HAL_IC_TYPE {
- CHIP_8192S = 0,
- CHIP_8188C = 1,
- CHIP_8192C = 2,
- CHIP_8192D = 3,
- CHIP_8723A = 4,
- CHIP_8188E = 5,
- CHIP_8881A = 6,
- CHIP_8812A = 7,
- CHIP_8821A = 8,
- CHIP_8723B = 9,
- CHIP_8192E = 10,
-};
-
enum HAL_CHIP_TYPE {
TEST_CHIP = 0,
NORMAL_CHIP = 1,
@@ -55,48 +41,20 @@ enum HAL_VENDOR {
CHIP_VENDOR_UMC = 1,
};
-enum HAL_RF_TYPE {
- RF_TYPE_1T1R = 0,
- RF_TYPE_1T2R = 1,
- RF_TYPE_2T2R = 2,
- RF_TYPE_2T3R = 3,
- RF_TYPE_2T4R = 4,
- RF_TYPE_3T3R = 5,
- RF_TYPE_3T4R = 6,
- RF_TYPE_4T4R = 7,
-};
-
struct HAL_VERSION {
- enum HAL_IC_TYPE ICType;
enum HAL_CHIP_TYPE ChipType;
enum HAL_CUT_VERSION CUTVersion;
enum HAL_VENDOR VendorType;
- enum HAL_RF_TYPE RFType;
- u8 ROMVer;
};
/* Get element */
-#define GET_CVID_IC_TYPE(version) (((version).ICType))
#define GET_CVID_CHIP_TYPE(version) (((version).ChipType))
-#define GET_CVID_RF_TYPE(version) (((version).RFType))
#define GET_CVID_MANUFACTUER(version) (((version).VendorType))
#define GET_CVID_CUT_VERSION(version) (((version).CUTVersion))
-#define GET_CVID_ROM_VERSION(version) (((version).ROMVer) & ROM_VERSION_MASK)
/* Common Macro. -- */
/* HAL_VERSION VersionID */
-/* HAL_IC_TYPE_E */
-#define IS_81XXC(version) \
- (((GET_CVID_IC_TYPE(version) == CHIP_8192C) || \
- (GET_CVID_IC_TYPE(version) == CHIP_8188C)) ? true : false)
-#define IS_8723_SERIES(version) \
- ((GET_CVID_IC_TYPE(version) == CHIP_8723A) ? true : false)
-#define IS_92D(version) \
- ((GET_CVID_IC_TYPE(version) == CHIP_8192D) ? true : false)
-#define IS_8188E(version) \
- ((GET_CVID_IC_TYPE(version) == CHIP_8188E) ? true : false)
-
/* HAL_CHIP_TYPE_E */
#define IS_TEST_CHIP(version) \
((GET_CVID_CHIP_TYPE(version) == TEST_CHIP) ? true : false)
@@ -122,46 +80,4 @@ struct HAL_VERSION {
#define IS_CHIP_VENDOR_UMC(version) \
((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_UMC) ? true : false)
-/* HAL_RF_TYPE_E */
-#define IS_1T1R(version) \
- ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T1R) ? true : false)
-#define IS_1T2R(version) \
- ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R) ? true : false)
-#define IS_2T2R(version) \
- ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R) ? true : false)
-
-/* Chip version Macro. -- */
-#define IS_81XXC_TEST_CHIP(version) \
- ((IS_81XXC(version) && (!IS_NORMAL_CHIP(version))) ? true : false)
-
-#define IS_92C_SERIAL(version) \
- ((IS_81XXC(version) && IS_2T2R(version)) ? true : false)
-#define IS_81xxC_VENDOR_UMC_A_CUT(version) \
- (IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ? \
- (IS_A_CUT(version) ? true : false) : false) : false)
-#define IS_81xxC_VENDOR_UMC_B_CUT(version) \
- (IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ? \
- (IS_B_CUT(version) ? true : false) : false) : false)
-#define IS_81xxC_VENDOR_UMC_C_CUT(version) \
- (IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ? \
- (IS_C_CUT(version) ? true : false) : false) : false)
-
-#define IS_NORMAL_CHIP92D(version) \
- ((IS_92D(version)) ? \
- ((GET_CVID_CHIP_TYPE(version) == NORMAL_CHIP) ? true : false) : false)
-
-#define IS_92D_SINGLEPHY(version) \
- ((IS_92D(version)) ? (IS_2T2R(version) ? true : false) : false)
-#define IS_92D_C_CUT(version) \
- ((IS_92D(version)) ? (IS_C_CUT(version) ? true : false) : false)
-#define IS_92D_D_CUT(version) \
- ((IS_92D(version)) ? (IS_D_CUT(version) ? true : false) : false)
-#define IS_92D_E_CUT(version) \
- ((IS_92D(version)) ? (IS_E_CUT(version) ? true : false) : false)
-
-#define IS_8723A_A_CUT(version) \
- ((IS_8723_SERIES(version)) ? (IS_A_CUT(version) ? true : false) : false)
-#define IS_8723A_B_CUT(version) \
- ((IS_8723_SERIES(version)) ? (IS_B_CUT(version) ? true : false) : false)
-
#endif
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index 3b476d80f64d..e73c6341248e 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -171,8 +171,6 @@ struct hal_ops {
void (*read_adapter_info)(struct adapter *padapter);
- void (*enable_interrupt)(struct adapter *padapter);
- void (*disable_interrupt)(struct adapter *padapter);
s32 (*interrupt_handler)(struct adapter *padapter);
void (*set_bwmode_handler)(struct adapter *padapter,
@@ -190,9 +188,6 @@ struct hal_ops {
u8 (*GetHalDefVarHandler)(struct adapter *padapter,
enum hal_def_variable eVariable,
void *pValue);
- u8 (*SetHalDefVarHandler)(struct adapter *padapter,
- enum hal_def_variable eVariable,
- void *pValue);
void (*SetHalODMVarHandler)(struct adapter *padapter,
enum hal_odm_variable eVariable,
@@ -216,9 +211,6 @@ struct hal_ops {
u32 (*read_rfreg)(struct adapter *padapter,
enum rf_radio_path eRFPath, u32 RegAddr,
u32 BitMask);
- void (*write_rfreg)(struct adapter *padapter,
- enum rf_radio_path eRFPath, u32 RegAddr,
- u32 BitMask, u32 Data);
void (*sreset_init_value)(struct adapter *padapter);
u8 (*sreset_get_wifi_status)(struct adapter *padapter);
@@ -267,8 +259,6 @@ void rtw_hal_chip_configure(struct adapter *padapter);
void rtw_hal_read_chip_info(struct adapter *padapter);
void rtw_hal_read_chip_version(struct adapter *padapter);
-u8 rtw_hal_set_def_var(struct adapter *padapter,
- enum hal_def_variable eVariable, void *pValue);
u8 rtw_hal_get_def_var(struct adapter *padapter,
enum hal_def_variable eVariable, void *pValue);
@@ -276,9 +266,6 @@ void rtw_hal_set_odm_var(struct adapter *padapter,
enum hal_odm_variable eVariable, void *pValue1,
bool bSet);
-void rtw_hal_enable_interrupt(struct adapter *padapter);
-void rtw_hal_disable_interrupt(struct adapter *padapter);
-
u32 rtw_hal_inirp_init(struct adapter *padapter);
u32 rtw_hal_inirp_deinit(struct adapter *padapter);
@@ -300,9 +287,6 @@ void rtw_hal_bcn_related_reg_setting(struct adapter *padapter);
u32 rtw_hal_read_rfreg(struct adapter *padapter, enum rf_radio_path eRFPath,
u32 RegAddr, u32 BitMask);
-void rtw_hal_write_rfreg(struct adapter *padapter,
- enum rf_radio_path eRFPath, u32 RegAddr,
- u32 BitMask, u32 Data);
void rtw_hal_set_bwmode(struct adapter *padapter,
enum ht_channel_width Bandwidth, u8 Offset);
diff --git a/drivers/staging/rtl8188eu/include/ieee80211.h b/drivers/staging/rtl8188eu/include/ieee80211.h
index b129ad148b47..6400f75707bd 100644
--- a/drivers/staging/rtl8188eu/include/ieee80211.h
+++ b/drivers/staging/rtl8188eu/include/ieee80211.h
@@ -477,63 +477,9 @@ struct ieee80211_snap_hdr {
#define WLAN_GET_SEQ_FRAG(seq) ((seq) & RTW_IEEE80211_SCTL_FRAG)
#define WLAN_GET_SEQ_SEQ(seq) ((seq) & RTW_IEEE80211_SCTL_SEQ)
-/* Authentication algorithms */
-#define WLAN_AUTH_OPEN 0
-#define WLAN_AUTH_SHARED_KEY 1
-
-#define WLAN_AUTH_CHALLENGE_LEN 128
-
-#define WLAN_CAPABILITY_BSS (1<<0)
-#define WLAN_CAPABILITY_IBSS (1<<1)
-#define WLAN_CAPABILITY_CF_POLLABLE (1<<2)
-#define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3)
-#define WLAN_CAPABILITY_PRIVACY (1<<4)
-#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5)
-#define WLAN_CAPABILITY_PBCC (1<<6)
-#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7)
-#define WLAN_CAPABILITY_SHORT_SLOT (1<<10)
-
/* Non standard? Not in <linux/ieee80211.h> */
#define WLAN_REASON_EXPIRATION_CHK 65535
-/* Information Element IDs */
-#define WLAN_EID_SSID 0
-#define WLAN_EID_SUPP_RATES 1
-#define WLAN_EID_FH_PARAMS 2
-#define WLAN_EID_DS_PARAMS 3
-#define WLAN_EID_CF_PARAMS 4
-#define WLAN_EID_TIM 5
-#define WLAN_EID_IBSS_PARAMS 6
-#define WLAN_EID_CHALLENGE 16
-/* EIDs defined by IEEE 802.11h - START */
-#define WLAN_EID_PWR_CONSTRAINT 32
-#define WLAN_EID_PWR_CAPABILITY 33
-#define WLAN_EID_TPC_REQUEST 34
-#define WLAN_EID_TPC_REPORT 35
-#define WLAN_EID_SUPPORTED_CHANNELS 36
-#define WLAN_EID_CHANNEL_SWITCH 37
-#define WLAN_EID_MEASURE_REQUEST 38
-#define WLAN_EID_MEASURE_REPORT 39
-#define WLAN_EID_QUITE 40
-#define WLAN_EID_IBSS_DFS 41
-/* EIDs defined by IEEE 802.11h - END */
-#define WLAN_EID_ERP_INFO 42
-#define WLAN_EID_HT_CAP 45
-#define WLAN_EID_RSN 48
-#define WLAN_EID_EXT_SUPP_RATES 50
-#define WLAN_EID_MOBILITY_DOMAIN 54
-#define WLAN_EID_FAST_BSS_TRANSITION 55
-#define WLAN_EID_TIMEOUT_INTERVAL 56
-#define WLAN_EID_RIC_DATA 57
-#define WLAN_EID_HT_OPERATION 61
-#define WLAN_EID_SECONDARY_CHANNEL_OFFSET 62
-#define WLAN_EID_20_40_BSS_COEXISTENCE 72
-#define WLAN_EID_20_40_BSS_INTOLERANT 73
-#define WLAN_EID_OVERLAPPING_BSS_SCAN_PARAMS 74
-#define WLAN_EID_MMIE 76
-#define WLAN_EID_VENDOR_SPECIFIC 221
-#define WLAN_EID_GENERIC (WLAN_EID_VENDOR_SPECIFIC)
-
#define IEEE80211_MGMT_HDR_LEN 24
#define IEEE80211_DATA_HDR3_LEN 24
#define IEEE80211_DATA_HDR4_LEN 30
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index 00472e0c00a0..cf9ca685eb77 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -123,7 +123,7 @@ static inline int rtw_netif_queue_stopped(struct net_device *pnetdev)
#define BIT35 0x0800000000
#define BIT36 0x1000000000
-extern int RTW_STATUS_CODE(int error_code);
+int RTW_STATUS_CODE(int error_code);
#define rtw_update_mem_stat(flag, sz) do {} while (0)
u8 *_rtw_malloc(u32 sz);
diff --git a/drivers/staging/rtl8188eu/include/recv_osdep.h b/drivers/staging/rtl8188eu/include/recv_osdep.h
index 0809963ce6aa..fdeb603b6cc1 100644
--- a/drivers/staging/rtl8188eu/include/recv_osdep.h
+++ b/drivers/staging/rtl8188eu/include/recv_osdep.h
@@ -38,8 +38,7 @@ void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup);
int rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
void rtw_free_recv_priv(struct recv_priv *precvpriv);
-int rtw_os_recv_resource_alloc(struct adapter *adapt,
- struct recv_frame *recvfr);
+void rtw_os_recv_resource_alloc(struct recv_frame *recvfr);
int rtw_os_recvbuf_resource_alloc(struct adapter *adapt, struct recv_buf *buf);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
index 42b1f22424eb..f813ce0563f8 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
@@ -107,7 +107,6 @@ struct P2P_PS_CTWPeriod_t {
/* host message to firmware cmd */
void rtl8188e_set_FwPwrMode_cmd(struct adapter *padapter, u8 Mode);
void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *padapter, u8 mstatus);
-u8 rtl8188e_set_raid_cmd(struct adapter *padapter, u32 mask);
void rtl8188e_Add_RateATid(struct adapter *padapter, u32 bitmap, u8 arg,
u8 rssi_level);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index 7d8e022925e0..cbad364f189c 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -188,15 +188,8 @@ struct txpowerinfo24g {
#define EFUSE_PROTECT_BYTES_BANK 16
-/* For RTL8723 regulator mode. */
-enum rt_regulator_mode {
- RT_SWITCHING_REGULATOR = 0,
- RT_LDO_REGULATOR = 1,
-};
-
struct hal_data_8188e {
struct HAL_VERSION VersionID;
- enum rt_regulator_mode RegulatorMode; /* switching regulator or LDO */
u16 CustomerID;
u8 *pfirmware;
u32 fwsize;
@@ -301,7 +294,6 @@ struct hal_data_8188e {
/* for host message to fw */
u8 LastHMEBoxNum;
- u8 fw_ractrl;
u8 RegTxPause;
/* Beacon function related global variable. */
u32 RegBcnCtrlVal;
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 8c7e8a36aa13..4c992573e3ca 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -535,7 +535,8 @@ void rtw_generate_random_ibss(u8 *pibss);
struct wlan_network *rtw_find_network(struct __queue *scanned_queue, u8 *addr);
struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue);
-void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue);
+void rtw_free_assoc_resources(struct adapter *adapter);
+void rtw_free_assoc_resources_locked(struct adapter *adapter);
void rtw_indicate_disconnect(struct adapter *adapter);
void rtw_indicate_connect(struct adapter *adapter);
void rtw_indicate_scan_done(struct adapter *padapter, bool aborted);
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index 2417809f3aef..9093a5f94d32 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -457,9 +457,9 @@ struct mlme_ext_priv {
int init_mlme_ext_priv(struct adapter *adapter);
int init_hw_mlme_ext(struct adapter *padapter);
void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext);
-extern void init_mlme_ext_timer(struct adapter *padapter);
-extern void init_addba_retry_timer(struct adapter *adapt, struct sta_info *sta);
-extern struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv);
+void init_mlme_ext_timer(struct adapter *padapter);
+void init_addba_retry_timer(struct adapter *adapt, struct sta_info *sta);
+struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv);
unsigned char networktype_to_raid(unsigned char network_type);
u8 judge_network_type(struct adapter *padapter, unsigned char *rate, int len);
@@ -554,7 +554,7 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *addr,
int cam_idx);
void beacon_timing_control(struct adapter *padapter);
-extern u8 set_tx_beacon_cmd(struct adapter *padapter);
+u8 set_tx_beacon_cmd(struct adapter *padapter);
unsigned int setup_beacon_frame(struct adapter *padapter,
unsigned char *beacon_frame);
void update_mgnt_tx_rate(struct adapter *padapter, u8 rate);
diff --git a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
index aa1fd87c47fb..a493d4c37ef1 100644
--- a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
@@ -257,7 +257,6 @@ s32 LPS_RF_ON_check(struct adapter *adapter, u32 delay_ms);
void LPS_Enter(struct adapter *adapter);
void LPS_Leave(struct adapter *adapter);
-void rtw_set_ips_deny(struct adapter *adapter, u32 ms);
int _rtw_pwr_wakeup(struct adapter *adapter, u32 ips_defer_ms,
const char *caller);
#define rtw_pwr_wakeup(adapter) \
diff --git a/drivers/staging/rtl8188eu/include/sta_info.h b/drivers/staging/rtl8188eu/include/sta_info.h
index 9612490539b3..d4e78326fc8d 100644
--- a/drivers/staging/rtl8188eu/include/sta_info.h
+++ b/drivers/staging/rtl8188eu/include/sta_info.h
@@ -113,7 +113,6 @@ struct sta_info {
u8 raid;
u8 init_rate;
- u32 ra_mask;
u8 wireless_mode; /* NETWORK_TYPE */
struct stainfo_stats sta_stats;
@@ -351,19 +350,19 @@ static inline u32 wifi_mac_hash(u8 *mac)
return x;
}
-extern u32 _rtw_init_sta_priv(struct sta_priv *pstapriv);
-extern u32 _rtw_free_sta_priv(struct sta_priv *pstapriv);
+u32 _rtw_init_sta_priv(struct sta_priv *pstapriv);
+u32 _rtw_free_sta_priv(struct sta_priv *pstapriv);
#define stainfo_offset_valid(offset) (offset < NUM_STA && offset >= 0)
int rtw_stainfo_offset(struct sta_priv *stapriv, struct sta_info *sta);
struct sta_info *rtw_get_stainfo_by_offset(struct sta_priv *stapriv, int off);
-extern struct sta_info *rtw_alloc_stainfo(struct sta_priv *stapriv, u8 *hwaddr);
-extern u32 rtw_free_stainfo(struct adapter *adapt, struct sta_info *psta);
-extern void rtw_free_all_stainfo(struct adapter *adapt);
-extern struct sta_info *rtw_get_stainfo(struct sta_priv *stapriv, u8 *hwaddr);
-extern u32 rtw_init_bcmc_stainfo(struct adapter *adapt);
-extern struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter);
-extern u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr);
+struct sta_info *rtw_alloc_stainfo(struct sta_priv *stapriv, u8 *hwaddr);
+u32 rtw_free_stainfo(struct adapter *adapt, struct sta_info *psta);
+void rtw_free_all_stainfo(struct adapter *adapt);
+struct sta_info *rtw_get_stainfo(struct sta_priv *stapriv, u8 *hwaddr);
+u32 rtw_init_bcmc_stainfo(struct adapter *adapt);
+struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter);
+u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr);
#endif /* _STA_INFO_H_ */
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index a08a2e045e59..dba8af1ec41e 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -649,13 +649,6 @@ enum ht_cap_ampdu_factor {
#define IEEE80211_MAX_AMPDU_BUF 0x40
-/* Spatial Multiplexing Power Save Modes */
-#define WLAN_HT_CAP_SM_PS_STATIC 0
-#define WLAN_HT_CAP_SM_PS_DYNAMIC 1
-#define WLAN_HT_CAP_SM_PS_INVALID 2
-#define WLAN_HT_CAP_SM_PS_DISABLED 3
-
-
#define OP_MODE_PURE 0
#define OP_MODE_MAY_BE_LEGACY_STAS 1
#define OP_MODE_20MHZ_HT_STA_ASSOCED 2
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 38dba1435c1e..969574926e21 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -179,8 +179,8 @@ static char *translate_scan(struct adapter *padapter,
cap = le16_to_cpu(le_tmp);
- if (cap & (WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_BSS)) {
- if (cap & WLAN_CAPABILITY_BSS)
+ if (!WLAN_CAPABILITY_IS_STA_BSS(cap)) {
+ if (cap & WLAN_CAPABILITY_ESS)
iwe.u.mode = IW_MODE_MASTER;
else
iwe.u.mode = IW_MODE_ADHOC;
@@ -1871,7 +1871,7 @@ static int rtw_wx_set_auth(struct net_device *dev,
rtw_disassoc_cmd(padapter, 500, false);
DBG_88E("%s...call rtw_indicate_disconnect\n ", __func__);
rtw_indicate_disconnect(padapter);
- rtw_free_assoc_resources(padapter, 1);
+ rtw_free_assoc_resources(padapter);
}
ret = wpa_set_auth_algs(dev, (u32)param->value);
break;
@@ -2485,16 +2485,13 @@ static int rtw_set_beacon(struct net_device *dev, struct ieee_param *param, int
static int rtw_hostapd_sta_flush(struct net_device *dev)
{
- int ret = 0;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
DBG_88E("%s\n", __func__);
flush_all_cam_entry(padapter); /* clear CAM */
- ret = rtw_sta_flush(padapter);
-
- return ret;
+ return rtw_sta_flush(padapter);
}
static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
@@ -2666,7 +2663,8 @@ static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param)
psta = rtw_get_stainfo(pstapriv, param->sta_addr);
if (psta) {
- if ((psta->wpa_ie[0] == WLAN_EID_RSN) || (psta->wpa_ie[0] == WLAN_EID_GENERIC)) {
+ if (psta->wpa_ie[0] == WLAN_EID_RSN ||
+ psta->wpa_ie[0] == WLAN_EID_VENDOR_SPECIFIC) {
int wpa_ie_len;
int copy_len;
@@ -2809,7 +2807,6 @@ static int rtw_set_hidden_ssid(struct net_device *dev, struct ieee_param *param,
static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *param, int len)
{
- int ret = 0;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
@@ -2820,13 +2817,11 @@ static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *p
param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
return -EINVAL;
- ret = rtw_acl_remove_sta(padapter, param->sta_addr);
- return ret;
+ return rtw_acl_remove_sta(padapter, param->sta_addr);
}
static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *param, int len)
{
- int ret = 0;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
@@ -2837,8 +2832,7 @@ static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *para
param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
return -EINVAL;
- ret = rtw_acl_add_sta(padapter, param->sta_addr);
- return ret;
+ return rtw_acl_add_sta(padapter, param->sta_addr);
}
static int rtw_ioctl_set_macaddr_acl(struct net_device *dev, struct ieee_param *param, int len)
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index a14e79f31abf..2361bce480c3 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -1175,7 +1175,7 @@ static int netdev_close(struct net_device *pnetdev)
/* s2-2. indicate disconnect to os */
rtw_indicate_disconnect(padapter);
/* s2-3. */
- rtw_free_assoc_resources(padapter, 1);
+ rtw_free_assoc_resources(padapter);
/* s2-4. */
rtw_free_network_queue(padapter, true);
/* Close LED */
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index 05701328dce4..3ebb8b206e46 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -17,8 +17,6 @@
*
*
******************************************************************************/
-#define _RECV_OSDEP_C_
-
#include <osdep_service.h>
#include <drv_types.h>
@@ -29,26 +27,22 @@
#include <usb_ops_linux.h>
/* alloc os related resource in struct recv_frame */
-int rtw_os_recv_resource_alloc(struct adapter *padapter,
- struct recv_frame *precvframe)
+void rtw_os_recv_resource_alloc(struct recv_frame *precvframe)
{
precvframe->pkt_newalloc = NULL;
precvframe->pkt = NULL;
- return _SUCCESS;
}
/* alloc os related resource in struct recv_buf */
int rtw_os_recvbuf_resource_alloc(struct adapter *padapter,
struct recv_buf *precvbuf)
{
- int res = _SUCCESS;
-
- precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
- if (precvbuf->purb == NULL)
- res = _FAIL;
precvbuf->pskb = NULL;
precvbuf->reuse = false;
- return res;
+ precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!precvbuf->purb)
+ return _FAIL;
+ return _SUCCESS;
}
void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup)
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index d0d4335b444c..33bfe054f867 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -17,8 +17,8 @@
*
*
******************************************************************************/
-#define _HCI_INTF_C_
+#define pr_fmt(fmt) "R8188EU: " fmt
#include <osdep_service.h>
#include <drv_types.h>
#include <recv_osdep.h>
@@ -55,7 +55,6 @@ MODULE_DEVICE_TABLE(usb, rtw_usb_id_tbl);
static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
{
int i;
- int status = _FAIL;
struct dvobj_priv *pdvobjpriv;
struct usb_host_config *phost_conf;
struct usb_config_descriptor *pconf_desc;
@@ -64,10 +63,9 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
struct usb_endpoint_descriptor *pendp_desc;
struct usb_device *pusbd;
-
pdvobjpriv = kzalloc(sizeof(*pdvobjpriv), GFP_KERNEL);
if (pdvobjpriv == NULL)
- goto exit;
+ return NULL;
pdvobjpriv->pusbintf = usb_intf;
pusbd = interface_to_usbdev(usb_intf);
@@ -115,20 +113,13 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
mutex_init(&pdvobjpriv->usb_vendor_req_mutex);
pdvobjpriv->usb_vendor_req_buf = kzalloc(MAX_USB_IO_CTL_SIZE, GFP_KERNEL);
- if (!pdvobjpriv->usb_vendor_req_buf)
- goto free_dvobj;
-
- usb_get_dev(pusbd);
-
- status = _SUCCESS;
-
-free_dvobj:
- if (status != _SUCCESS && pdvobjpriv) {
+ if (!pdvobjpriv->usb_vendor_req_buf) {
usb_set_intfdata(usb_intf, NULL);
kfree(pdvobjpriv);
- pdvobjpriv = NULL;
+ return NULL;
}
-exit:
+ usb_get_dev(pusbd);
+
return pdvobjpriv;
}
@@ -136,7 +127,6 @@ static void usb_dvobj_deinit(struct usb_interface *usb_intf)
{
struct dvobj_priv *dvobj = usb_get_intfdata(usb_intf);
-
usb_set_intfdata(usb_intf, NULL);
if (dvobj) {
/* Modify condition for 92DU DMDP 2010.11.18, by Thomas */
@@ -150,7 +140,7 @@ static void usb_dvobj_deinit(struct usb_interface *usb_intf)
* on sitesurvey for the first time when
* device is up . Reset usb port for sitesurvey
* fail issue. */
- DBG_88E("usb attached..., try to reset usb device\n");
+ pr_debug("usb attached..., try to reset usb device\n");
usb_reset_device(interface_to_usbdev(usb_intf));
}
}
@@ -201,7 +191,7 @@ static void rtw_dev_unload(struct adapter *padapter)
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_dev_unload\n"));
if (padapter->bup) {
- DBG_88E("===> rtw_dev_unload\n");
+ pr_debug("===> rtw_dev_unload\n");
padapter->bDriverStopped = true;
if (padapter->xmitpriv.ack_tx)
rtw_ack_tx_done(&padapter->xmitpriv, RTW_SCTX_DONE_DRV_STOP);
@@ -224,7 +214,7 @@ static void rtw_dev_unload(struct adapter *padapter)
("r871x_dev_unload():padapter->bup == false\n"));
}
- DBG_88E("<=== rtw_dev_unload\n");
+ pr_debug("<=== rtw_dev_unload\n");
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-rtw_dev_unload\n"));
}
@@ -236,16 +226,13 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
struct net_device *pnetdev = padapter->pnetdev;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-
- int ret = 0;
u32 start_time = jiffies;
-
- DBG_88E("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
+ pr_debug("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
if ((!padapter->bup) || (padapter->bDriverStopped) ||
(padapter->bSurpriseRemoved)) {
- DBG_88E("padapter->bup=%d bDriverStopped=%d bSurpriseRemoved = %d\n",
+ pr_debug("padapter->bup=%d bDriverStopped=%d bSurpriseRemoved = %d\n",
padapter->bup, padapter->bDriverStopped,
padapter->bSurpriseRemoved);
goto exit;
@@ -267,7 +254,7 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) &&
check_fwstate(pmlmepriv, _FW_LINKED)) {
- DBG_88E("%s:%d %s(%pM), length:%d assoc_ssid.length:%d\n",
+ pr_debug("%s:%d %s(%pM), length:%d assoc_ssid.length:%d\n",
__func__, __LINE__,
pmlmepriv->cur_network.network.Ssid.Ssid,
pmlmepriv->cur_network.network.MacAddress,
@@ -279,7 +266,7 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
/* s2-2. indicate disconnect to os */
rtw_indicate_disconnect(padapter);
/* s2-3. */
- rtw_free_assoc_resources(padapter, 1);
+ rtw_free_assoc_resources(padapter);
/* s2-4. */
rtw_free_network_queue(padapter, true);
@@ -293,10 +280,10 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
rtw_indicate_disconnect(padapter);
exit:
- DBG_88E("<=== %s return %d.............. in %dms\n", __func__
- , ret, rtw_get_passing_time_ms(start_time));
+ pr_debug("<=== %s .............. in %dms\n", __func__,
+ rtw_get_passing_time_ms(start_time));
- return ret;
+ return 0;
}
static int rtw_resume_process(struct adapter *padapter)
@@ -306,7 +293,7 @@ static int rtw_resume_process(struct adapter *padapter)
int ret = -1;
u32 start_time = jiffies;
- DBG_88E("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
+ pr_debug("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
if (padapter) {
pnetdev = padapter->pnetdev;
@@ -319,7 +306,7 @@ static int rtw_resume_process(struct adapter *padapter)
rtw_reset_drv_sw(padapter);
pwrpriv->bkeepfwalive = false;
- DBG_88E("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
+ pr_debug("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
if (pm_netdev_open(pnetdev, true) != 0)
goto exit;
@@ -334,10 +321,9 @@ static int rtw_resume_process(struct adapter *padapter)
exit:
if (pwrpriv)
pwrpriv->bInSuspend = false;
- DBG_88E("<=== %s return %d.............. in %dms\n", __func__,
+ pr_debug("<=== %s return %d.............. in %dms\n", __func__,
ret, rtw_get_passing_time_ms(start_time));
-
return ret;
}
@@ -407,8 +393,8 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
dvobj->pusbdev->do_remote_wakeup = 1;
pusb_intf->needs_remote_wakeup = 1;
device_init_wakeup(&pusb_intf->dev, 1);
- DBG_88E("\n padapter->pwrctrlpriv.bSupportRemoteWakeup~~~~~~\n");
- DBG_88E("\n padapter->pwrctrlpriv.bSupportRemoteWakeup~~~[%d]~~~\n",
+ pr_debug("\n padapter->pwrctrlpriv.bSupportRemoteWakeup~~~~~~\n");
+ pr_debug("\n padapter->pwrctrlpriv.bSupportRemoteWakeup~~~[%d]~~~\n",
device_may_wakeup(&pusb_intf->dev));
}
#endif
@@ -416,13 +402,13 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
/* 2012-07-11 Move here to prevent the 8723AS-VAU BT auto
* suspend influence */
if (usb_autopm_get_interface(pusb_intf) < 0)
- DBG_88E("can't get autopm:\n");
+ pr_debug("can't get autopm:\n");
/* alloc dev name after read efuse. */
rtw_init_netdev_name(pnetdev, padapter->registrypriv.ifname);
rtw_macaddr_cfg(padapter->eeprompriv.mac_addr);
memcpy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
- DBG_88E("MAC Address from pnetdev->dev_addr = %pM\n",
+ pr_debug("MAC Address from pnetdev->dev_addr = %pM\n",
pnetdev->dev_addr);
/* step 6. Tell the network stack we exist */
@@ -431,7 +417,7 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
goto free_hal_data;
}
- DBG_88E("bDriverStopped:%d, bSurpriseRemoved:%d, bup:%d, hw_init_completed:%d\n"
+ pr_debug("bDriverStopped:%d, bSurpriseRemoved:%d, bup:%d, hw_init_completed:%d\n"
, padapter->bDriverStopped
, padapter->bSurpriseRemoved
, padapter->bup
@@ -475,7 +461,7 @@ static void rtw_usb_if1_deinit(struct adapter *if1)
rtw_cancel_all_timer(if1);
rtw_dev_unload(if1);
- DBG_88E("+r871xu_dev_remove, hw_init_completed=%d\n",
+ pr_debug("+r871xu_dev_remove, hw_init_completed=%d\n",
if1->hw_init_completed);
rtw_free_drv_sw(if1);
if (pnetdev)
@@ -485,7 +471,6 @@ static void rtw_usb_if1_deinit(struct adapter *if1)
static int rtw_drv_init(struct usb_interface *pusb_intf, const struct usb_device_id *pdid)
{
struct adapter *if1 = NULL;
- int status = _FAIL;
struct dvobj_priv *dvobj;
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_drv_init\n"));
@@ -500,19 +485,18 @@ static int rtw_drv_init(struct usb_interface *pusb_intf, const struct usb_device
if1 = rtw_usb_if1_init(dvobj, pusb_intf, pdid);
if (if1 == NULL) {
- DBG_88E("rtw_init_primarystruct adapter Failed!\n");
+ pr_debug("rtw_init_primarystruct adapter Failed!\n");
goto free_dvobj;
}
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-871x_drv - drv_init, success!\n"));
- status = _SUCCESS;
+ return 0;
free_dvobj:
- if (status != _SUCCESS)
- usb_dvobj_deinit(pusb_intf);
+ usb_dvobj_deinit(pusb_intf);
exit:
- return status == _SUCCESS ? 0 : -ENODEV;
+ return -ENODEV;
}
/*
@@ -524,8 +508,7 @@ static void rtw_dev_remove(struct usb_interface *pusb_intf)
struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
struct adapter *padapter = dvobj->if1;
-
- DBG_88E("+rtw_dev_remove\n");
+ pr_debug("+rtw_dev_remove\n");
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+dev_remove()\n"));
if (!pusb_intf->unregistering)
@@ -541,7 +524,7 @@ static void rtw_dev_remove(struct usb_interface *pusb_intf)
usb_dvobj_deinit(pusb_intf);
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-dev_remove()\n"));
- DBG_88E("-r871xu_dev_remove, done\n");
+ pr_debug("-r871xu_dev_remove, done\n");
}
static struct usb_driver rtl8188e_usb_drv = {
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index ef9da863c335..fcf9b3bcf76a 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -160,21 +160,6 @@ void Dot11d_UpdateCountryIe(struct rtllib_device *dev, u8 *pTaddr,
pDot11dInfo->State = DOT11D_STATE_LEARNED;
}
-u8 DOT11D_GetMaxTxPwrInDbm(struct rtllib_device *dev, u8 Channel)
-{
- struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(dev);
- u8 MaxTxPwrInDbm = 255;
-
- if (MAX_CHANNEL_NUMBER < Channel) {
- netdev_info(dev->dev, "DOT11D_GetMaxTxPwrInDbm(): Invalid Channel\n");
- return MaxTxPwrInDbm;
- }
- if (pDot11dInfo->channel_map[Channel])
- MaxTxPwrInDbm = pDot11dInfo->MaxTxPwrDbmList[Channel];
-
- return MaxTxPwrInDbm;
-}
-
void DOT11D_ScanComplete(struct rtllib_device *dev)
{
struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(dev);
@@ -190,27 +175,3 @@ void DOT11D_ScanComplete(struct rtllib_device *dev)
break;
}
}
-
-int ToLegalChannel(struct rtllib_device *dev, u8 channel)
-{
- struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(dev);
- u8 default_chn = 0;
- u32 i;
-
- for (i = 1; i <= MAX_CHANNEL_NUMBER; i++) {
- if (pDot11dInfo->channel_map[i] > 0) {
- default_chn = i;
- break;
- }
- }
-
- if (MAX_CHANNEL_NUMBER < channel) {
- netdev_err(dev->dev, "%s(): Invalid Channel\n", __func__);
- return default_chn;
- }
-
- if (pDot11dInfo->channel_map[channel] > 0)
- return channel;
-
- return default_chn;
-}
diff --git a/drivers/staging/rtl8192e/dot11d.h b/drivers/staging/rtl8192e/dot11d.h
index 69e0f8f7e3f8..129ebed2e3cc 100644
--- a/drivers/staging/rtl8192e/dot11d.h
+++ b/drivers/staging/rtl8192e/dot11d.h
@@ -79,7 +79,6 @@ static inline void cpMacAddr(unsigned char *des, unsigned char *src)
#define UPDATE_CIE_SRC(__pIeeeDev, __pTa) \
cpMacAddr(GET_DOT11D_INFO(__pIeeeDev)->CountryIeSrcAddr, __pTa)
-#define CIE_WATCHDOG_TH 1
#define GET_CIE_WATCHDOG(__pIeeeDev) \
(GET_DOT11D_INFO(__pIeeeDev)->CountryIeWatchdog)
static inline void RESET_CIE_WATCHDOG(struct rtllib_device *__pIeeeDev)
@@ -88,16 +87,11 @@ static inline void RESET_CIE_WATCHDOG(struct rtllib_device *__pIeeeDev)
}
#define UPDATE_CIE_WATCHDOG(__pIeeeDev) (++GET_CIE_WATCHDOG(__pIeeeDev))
-#define IS_DOT11D_STATE_DONE(__pIeeeDev) \
- (GET_DOT11D_INFO(__pIeeeDev)->State == DOT11D_STATE_DONE)
-
void dot11d_init(struct rtllib_device *dev);
void Dot11d_Channelmap(u8 channel_plan, struct rtllib_device *ieee);
void Dot11d_Reset(struct rtllib_device *dev);
void Dot11d_UpdateCountryIe(struct rtllib_device *dev, u8 *pTaddr,
u16 CoutryIeLen, u8 *pCoutryIe);
-u8 DOT11D_GetMaxTxPwrInDbm(struct rtllib_device *dev, u8 Channel);
void DOT11D_ScanComplete(struct rtllib_device *dev);
-int ToLegalChannel(struct rtllib_device *dev, u8 channel);
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
index d0b08301b88f..dba4584c7006 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
@@ -26,8 +26,6 @@
#define MAX_SILENT_RESET_RX_SLOT_NUM 10
#define RX_MPDU_QUEUE 0
-#define RX_CMD_QUEUE 1
-
enum rtl819x_loopback {
RTL819X_NO_LOOPBACK = 0,
@@ -36,11 +34,6 @@ enum rtl819x_loopback {
RTL819X_CCK_LOOPBACK = 3,
};
-
-#define RESET_DELAY_8185 20
-
-#define RT_IBSS_INT_MASKS (IMR_BcnInt | IMR_BcnInt | IMR_TBDOK | IMR_TBDER)
-
#define DESC90_RATE1M 0x00
#define DESC90_RATE2M 0x01
#define DESC90_RATE5_5M 0x02
@@ -74,17 +67,6 @@ enum rtl819x_loopback {
#define SHORT_SLOT_TIME 9
#define NON_SHORT_SLOT_TIME 20
-
-#define MAX_LINES_HWCONFIG_TXT 1000
-#define MAX_BYTES_LINE_HWCONFIG_TXT 128
-
-#define SW_THREE_WIRE 0
-#define HW_THREE_WIRE 2
-
-#define BT_DEMO_BOARD 0
-#define BT_QA_BOARD 1
-#define BT_FPGA 2
-
#define RX_SMOOTH 20
#define QSLT_BK 0x1
@@ -96,25 +78,14 @@ enum rtl819x_loopback {
#define QSLT_MGNT 0x12
#define QSLT_CMD 0x13
-#define NUM_OF_FIRMWARE_QUEUE 10
-#define NUM_OF_PAGES_IN_FW 0x100
#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x007
#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x0aa
#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x024
#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x007
-#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0
-#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x2
#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x10
-#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0
#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x4
#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xd
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00
-
#define APPLIED_RESERVED_QUEUE_IN_FW 0x80000000
#define RSVD_FW_QUEUE_PAGE_BK_SHIFT 0x00
#define RSVD_FW_QUEUE_PAGE_BE_SHIFT 0x08
@@ -197,23 +168,6 @@ struct tx_fwinfo_8190pci {
};
-
-#define TX_DESC_SIZE 32
-
-#define TX_DESC_CMD_SIZE 32
-
-
-#define TX_STATUS_DESC_SIZE 32
-
-#define TX_FWINFO_SIZE 8
-
-
-#define RX_DESC_SIZE 16
-
-#define RX_STATUS_DESC_SIZE 16
-
-#define RX_DRIVER_INFO_SIZE 8
-
struct log_int_8190 {
u32 nIMR_COMDOK;
u32 nIMR_MGNTDOK;
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
index facc6f1f302b..c8f25ade2535 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
@@ -22,55 +22,38 @@
#include "r8192E_phy.h"
#include "r8190P_rtl8256.h"
-void PHY_SetRF8256Bandwidth(struct net_device *dev,
- enum ht_channel_width Bandwidth)
+void rtl92e_set_bandwidth(struct net_device *dev,
+ enum ht_channel_width Bandwidth)
{
u8 eRFPath;
struct r8192_priv *priv = rtllib_priv(dev);
+ if (priv->card_8192_version != VERSION_8190_BD &&
+ priv->card_8192_version != VERSION_8190_BE) {
+ netdev_warn(dev, "%s(): Unknown HW version.\n", __func__);
+ return;
+ }
+
for (eRFPath = 0; eRFPath < priv->NumTotalRFPath; eRFPath++) {
- if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
+ if (!rtl92e_is_legal_rf_path(dev, eRFPath))
continue;
switch (Bandwidth) {
case HT_CHANNEL_WIDTH_20:
- if (priv->card_8192_version == VERSION_8190_BD ||
- priv->card_8192_version == VERSION_8190_BE) {
- rtl8192_phy_SetRFReg(dev,
- (enum rf90_radio_path)eRFPath,
- 0x0b, bMask12Bits, 0x100);
- rtl8192_phy_SetRFReg(dev,
- (enum rf90_radio_path)eRFPath,
- 0x2c, bMask12Bits, 0x3d7);
- rtl8192_phy_SetRFReg(dev,
- (enum rf90_radio_path)eRFPath,
- 0x0e, bMask12Bits, 0x021);
-
- } else {
- netdev_warn(dev, "%s(): Unknown HW version.\n",
- __func__);
- }
-
+ rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath,
+ 0x0b, bMask12Bits, 0x100);
+ rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath,
+ 0x2c, bMask12Bits, 0x3d7);
+ rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath,
+ 0x0e, bMask12Bits, 0x021);
break;
case HT_CHANNEL_WIDTH_20_40:
- if (priv->card_8192_version == VERSION_8190_BD ||
- priv->card_8192_version == VERSION_8190_BE) {
- rtl8192_phy_SetRFReg(dev,
- (enum rf90_radio_path)eRFPath,
- 0x0b, bMask12Bits, 0x300);
- rtl8192_phy_SetRFReg(dev,
- (enum rf90_radio_path)eRFPath,
- 0x2c, bMask12Bits, 0x3ff);
- rtl8192_phy_SetRFReg(dev,
- (enum rf90_radio_path)eRFPath,
- 0x0e, bMask12Bits, 0x0e1);
-
- } else {
- netdev_warn(dev, "%s(): Unknown HW version.\n",
- __func__);
- }
-
-
+ rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath,
+ 0x0b, bMask12Bits, 0x300);
+ rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath,
+ 0x2c, bMask12Bits, 0x3ff);
+ rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath,
+ 0x0e, bMask12Bits, 0x0e1);
break;
default:
netdev_err(dev, "%s(): Unknown bandwidth: %#X\n",
@@ -81,15 +64,7 @@ void PHY_SetRF8256Bandwidth(struct net_device *dev,
}
}
-bool PHY_RF8256_Config(struct net_device *dev)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
-
- priv->NumTotalRFPath = RTL819X_TOTAL_RF_PATH;
- return phy_RF8256_Config_ParaFile(dev);
-}
-
-bool phy_RF8256_Config_ParaFile(struct net_device *dev)
+bool rtl92e_config_rf(struct net_device *dev)
{
u32 u4RegValue = 0;
u8 eRFPath;
@@ -102,9 +77,11 @@ bool phy_RF8256_Config_ParaFile(struct net_device *dev)
u8 ConstRetryTimes = 5, RetryTimes = 5;
u8 ret = 0;
+ priv->NumTotalRFPath = RTL819X_TOTAL_RF_PATH;
+
for (eRFPath = (enum rf90_radio_path)RF90_PATH_A;
eRFPath < priv->NumTotalRFPath; eRFPath++) {
- if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
+ if (!rtl92e_is_legal_rf_path(dev, eRFPath))
continue;
pPhyReg = &priv->PHYRegDef[eRFPath];
@@ -113,114 +90,63 @@ bool phy_RF8256_Config_ParaFile(struct net_device *dev)
switch (eRFPath) {
case RF90_PATH_A:
case RF90_PATH_C:
- u4RegValue = rtl8192_QueryBBReg(dev, pPhyReg->rfintfs,
- bRFSI_RFENV);
+ u4RegValue = rtl92e_get_bb_reg(dev, pPhyReg->rfintfs,
+ bRFSI_RFENV);
break;
case RF90_PATH_B:
case RF90_PATH_D:
- u4RegValue = rtl8192_QueryBBReg(dev, pPhyReg->rfintfs,
- bRFSI_RFENV<<16);
+ u4RegValue = rtl92e_get_bb_reg(dev, pPhyReg->rfintfs,
+ bRFSI_RFENV<<16);
break;
}
- rtl8192_setBBreg(dev, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1);
+ rtl92e_set_bb_reg(dev, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1);
- rtl8192_setBBreg(dev, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
+ rtl92e_set_bb_reg(dev, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
- rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2,
- b3WireAddressLength, 0x0);
- rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2,
- b3WireDataLength, 0x0);
+ rtl92e_set_bb_reg(dev, pPhyReg->rfHSSIPara2,
+ b3WireAddressLength, 0x0);
+ rtl92e_set_bb_reg(dev, pPhyReg->rfHSSIPara2,
+ b3WireDataLength, 0x0);
- rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path) eRFPath, 0x0,
- bMask12Bits, 0xbf);
+ rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath, 0x0,
+ bMask12Bits, 0xbf);
- rtStatus = rtl8192_phy_checkBBAndRF(dev, HW90_BLOCK_RF,
- (enum rf90_radio_path)eRFPath);
+ rtStatus = rtl92e_check_bb_and_rf(dev, HW90_BLOCK_RF,
+ (enum rf90_radio_path)eRFPath);
if (!rtStatus) {
netdev_err(dev, "%s(): Failed to check RF Path %d.\n",
__func__, eRFPath);
- goto phy_RF8256_Config_ParaFile_Fail;
+ goto fail;
}
RetryTimes = ConstRetryTimes;
RF3_Final_Value = 0;
- switch (eRFPath) {
- case RF90_PATH_A:
- while (RF3_Final_Value != RegValueToBeCheck &&
- RetryTimes != 0) {
- ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,
- (enum rf90_radio_path)eRFPath);
- RF3_Final_Value = rtl8192_phy_QueryRFReg(dev,
- (enum rf90_radio_path)eRFPath,
- RegOffSetToBeCheck,
- bMask12Bits);
- RT_TRACE(COMP_RF,
- "RF %d %d register final value: %x\n",
- eRFPath, RegOffSetToBeCheck,
- RF3_Final_Value);
- RetryTimes--;
- }
- break;
- case RF90_PATH_B:
- while (RF3_Final_Value != RegValueToBeCheck &&
- RetryTimes != 0) {
- ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,
- (enum rf90_radio_path)eRFPath);
- RF3_Final_Value = rtl8192_phy_QueryRFReg(dev,
- (enum rf90_radio_path)eRFPath,
- RegOffSetToBeCheck,
- bMask12Bits);
- RT_TRACE(COMP_RF,
- "RF %d %d register final value: %x\n",
- eRFPath, RegOffSetToBeCheck,
- RF3_Final_Value);
- RetryTimes--;
- }
- break;
- case RF90_PATH_C:
- while (RF3_Final_Value != RegValueToBeCheck &&
- RetryTimes != 0) {
- ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,
+ while (RF3_Final_Value != RegValueToBeCheck &&
+ RetryTimes != 0) {
+ ret = rtl92e_config_rf_path(dev,
(enum rf90_radio_path)eRFPath);
- RF3_Final_Value = rtl8192_phy_QueryRFReg(dev,
+ RF3_Final_Value = rtl92e_get_rf_reg(dev,
(enum rf90_radio_path)eRFPath,
RegOffSetToBeCheck,
bMask12Bits);
- RT_TRACE(COMP_RF,
- "RF %d %d register final value: %x\n",
- eRFPath, RegOffSetToBeCheck,
- RF3_Final_Value);
- RetryTimes--;
- }
- break;
- case RF90_PATH_D:
- while (RF3_Final_Value != RegValueToBeCheck &&
- RetryTimes != 0) {
- ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,
- (enum rf90_radio_path)eRFPath);
- RF3_Final_Value = rtl8192_phy_QueryRFReg(dev,
- (enum rf90_radio_path)eRFPath,
- RegOffSetToBeCheck, bMask12Bits);
- RT_TRACE(COMP_RF,
- "RF %d %d register final value: %x\n",
- eRFPath, RegOffSetToBeCheck,
- RF3_Final_Value);
- RetryTimes--;
- }
- break;
+ RT_TRACE(COMP_RF,
+ "RF %d %d register final value: %x\n",
+ eRFPath, RegOffSetToBeCheck,
+ RF3_Final_Value);
+ RetryTimes--;
}
switch (eRFPath) {
case RF90_PATH_A:
case RF90_PATH_C:
- rtl8192_setBBreg(dev, pPhyReg->rfintfs, bRFSI_RFENV,
- u4RegValue);
+ rtl92e_set_bb_reg(dev, pPhyReg->rfintfs, bRFSI_RFENV,
+ u4RegValue);
break;
case RF90_PATH_B:
case RF90_PATH_D:
- rtl8192_setBBreg(dev, pPhyReg->rfintfs, bRFSI_RFENV<<16,
- u4RegValue);
+ rtl92e_set_bb_reg(dev, pPhyReg->rfintfs,
+ bRFSI_RFENV<<16, u4RegValue);
break;
}
@@ -228,7 +154,7 @@ bool phy_RF8256_Config_ParaFile(struct net_device *dev)
netdev_err(dev,
"%s(): Failed to initialize RF Path %d.\n",
__func__, eRFPath);
- goto phy_RF8256_Config_ParaFile_Fail;
+ goto fail;
}
}
@@ -236,11 +162,11 @@ bool phy_RF8256_Config_ParaFile(struct net_device *dev)
RT_TRACE(COMP_PHY, "PHY Initialization Success\n");
return true;
-phy_RF8256_Config_ParaFile_Fail:
+fail:
return false;
}
-void PHY_SetRF8256CCKTxPower(struct net_device *dev, u8 powerlevel)
+void rtl92e_set_cck_tx_power(struct net_device *dev, u8 powerlevel)
{
u32 TxAGC = 0;
struct r8192_priv *priv = rtllib_priv(dev);
@@ -254,11 +180,11 @@ void PHY_SetRF8256CCKTxPower(struct net_device *dev, u8 powerlevel)
}
if (TxAGC > 0x24)
TxAGC = 0x24;
- rtl8192_setBBreg(dev, rTxAGC_CCK_Mcs32, bTxAGCRateCCK, TxAGC);
+ rtl92e_set_bb_reg(dev, rTxAGC_CCK_Mcs32, bTxAGCRateCCK, TxAGC);
}
-void PHY_SetRF8256OFDMTxPower(struct net_device *dev, u8 powerlevel)
+void rtl92e_set_ofdm_tx_power(struct net_device *dev, u8 powerlevel)
{
struct r8192_priv *priv = rtllib_priv(dev);
u32 writeVal, powerBase0, powerBase1, writeVal_tmp;
@@ -300,7 +226,7 @@ void PHY_SetRF8256OFDMTxPower(struct net_device *dev, u8 powerlevel)
else
writeVal = (byte3 << 24) | (byte2 << 16) |
(byte1 << 8) | byte0;
- rtl8192_setBBreg(dev, RegOffset[index], 0x7f7f7f7f, writeVal);
+ rtl92e_set_bb_reg(dev, RegOffset[index], 0x7f7f7f7f, writeVal);
}
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
index 64e831d2f4e5..3e4363f41728 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
@@ -21,11 +21,10 @@
#define RTL8225H
#define RTL819X_TOTAL_RF_PATH 2
-extern void PHY_SetRF8256Bandwidth(struct net_device *dev,
- enum ht_channel_width Bandwidth);
-extern bool PHY_RF8256_Config(struct net_device *dev);
-extern bool phy_RF8256_Config_ParaFile(struct net_device *dev);
-extern void PHY_SetRF8256CCKTxPower(struct net_device *dev, u8 powerlevel);
-extern void PHY_SetRF8256OFDMTxPower(struct net_device *dev, u8 powerlevel);
+void rtl92e_set_bandwidth(struct net_device *dev,
+ enum ht_channel_width Bandwidth);
+bool rtl92e_config_rf(struct net_device *dev);
+void rtl92e_set_cck_tx_power(struct net_device *dev, u8 powerlevel);
+void rtl92e_set_ofdm_tx_power(struct net_device *dev, u8 powerlevel);
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
index ebd08a16685e..9ddabf59784c 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
@@ -21,11 +21,8 @@
#include "r8192E_hw.h"
#include "r8192E_cmdpkt.h"
-bool cmpk_message_handle_tx(
- struct net_device *dev,
- u8 *code_virtual_address,
- u32 packettype,
- u32 buffer_len)
+bool rtl92e_send_cmd_pkt(struct net_device *dev, u8 *code_virtual_address,
+ u32 packettype, u32 buffer_len)
{
bool rt_status = true;
@@ -41,7 +38,7 @@ bool cmpk_message_handle_tx(
struct tx_fwinfo_8190pci *pTxFwInfo = NULL;
RT_TRACE(COMP_CMDPKT, "%s(),buffer_len is %d\n", __func__, buffer_len);
- firmware_init_param(dev);
+ rtl92e_init_fw_param(dev);
frag_threshold = pfirmware->cmdpacket_frag_thresold;
do {
@@ -84,7 +81,7 @@ bool cmpk_message_handle_tx(
} while (frag_offset < buffer_len);
- write_nic_byte(dev, TPPoll, TPPoll_CQ);
+ rtl92e_writeb(dev, TPPoll, TPPoll_CQ);
Failed:
return rt_status;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h
index f714d5100059..2a8b165cb8e1 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h
@@ -19,7 +19,6 @@
#ifndef R819XUSB_CMDPKT_H
#define R819XUSB_CMDPKT_H
-extern bool cmpk_message_handle_tx(struct net_device *dev,
- u8 *codevirtualaddress, u32 packettype,
- u32 buffer_len);
+bool rtl92e_send_cmd_pkt(struct net_device *dev, u8 *codevirtualaddress,
+ u32 packettype, u32 buffer_len);
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index f6661bbae7a8..c28cabc23fc0 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -33,7 +33,7 @@
static int WDCAPARA_ADD[] = {EDCAPARA_BE, EDCAPARA_BK, EDCAPARA_VI,
EDCAPARA_VO};
-void rtl8192e_start_beacon(struct net_device *dev)
+void rtl92e_start_beacon(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
struct rtllib_network *net = &priv->rtllib->current_network;
@@ -41,21 +41,20 @@ void rtl8192e_start_beacon(struct net_device *dev)
u16 BcnCW = 6;
u16 BcnIFS = 0xf;
- DMESG("Enabling beacon TX");
- rtl8192_irq_disable(dev);
+ rtl92e_irq_disable(dev);
- write_nic_word(dev, ATIMWND, 2);
+ rtl92e_writew(dev, ATIMWND, 2);
- write_nic_word(dev, BCN_INTERVAL, net->beacon_interval);
- write_nic_word(dev, BCN_DRV_EARLY_INT, 10);
- write_nic_word(dev, BCN_DMATIME, 256);
+ rtl92e_writew(dev, BCN_INTERVAL, net->beacon_interval);
+ rtl92e_writew(dev, BCN_DRV_EARLY_INT, 10);
+ rtl92e_writew(dev, BCN_DMATIME, 256);
- write_nic_byte(dev, BCN_ERR_THRESH, 100);
+ rtl92e_writeb(dev, BCN_ERR_THRESH, 100);
BcnTimeCfg |= BcnCW<<BCN_TCFG_CW_SHIFT;
BcnTimeCfg |= BcnIFS<<BCN_TCFG_IFS;
- write_nic_word(dev, BCN_TCFG, BcnTimeCfg);
- rtl8192_irq_enable(dev);
+ rtl92e_writew(dev, BCN_TCFG, BcnTimeCfg);
+ rtl92e_irq_enable(dev);
}
static void rtl8192e_update_msr(struct net_device *dev)
@@ -64,7 +63,7 @@ static void rtl8192e_update_msr(struct net_device *dev)
u8 msr;
enum led_ctl_mode LedAction = LED_CTL_NO_LINK;
- msr = read_nic_byte(dev, MSR);
+ msr = rtl92e_readb(dev, MSR);
msr &= ~MSR_LINK_MASK;
switch (priv->rtllib->iw_mode) {
@@ -91,26 +90,26 @@ static void rtl8192e_update_msr(struct net_device *dev)
break;
}
- write_nic_byte(dev, MSR, msr);
+ rtl92e_writeb(dev, MSR, msr);
if (priv->rtllib->LedControlHandler)
priv->rtllib->LedControlHandler(dev, LedAction);
}
-void rtl8192e_SetHwReg(struct net_device *dev, u8 variable, u8 *val)
+void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
{
struct r8192_priv *priv = rtllib_priv(dev);
switch (variable) {
case HW_VAR_BSSID:
- write_nic_dword(dev, BSSIDR, ((u32 *)(val))[0]);
- write_nic_word(dev, BSSIDR+2, ((u16 *)(val+2))[0]);
+ rtl92e_writel(dev, BSSIDR, ((u32 *)(val))[0]);
+ rtl92e_writew(dev, BSSIDR+2, ((u16 *)(val+2))[0]);
break;
case HW_VAR_MEDIA_STATUS:
{
enum rt_op_mode OpMode = *((enum rt_op_mode *)(val));
enum led_ctl_mode LedAction = LED_CTL_NO_LINK;
- u8 btMsr = read_nic_byte(dev, MSR);
+ u8 btMsr = rtl92e_readb(dev, MSR);
btMsr &= 0xfc;
@@ -134,7 +133,7 @@ void rtl8192e_SetHwReg(struct net_device *dev, u8 variable, u8 *val)
break;
}
- write_nic_byte(dev, MSR, btMsr);
+ rtl92e_writeb(dev, MSR, btMsr);
}
break;
@@ -144,7 +143,7 @@ void rtl8192e_SetHwReg(struct net_device *dev, u8 variable, u8 *val)
u32 RegRCR, Type;
Type = ((u8 *)(val))[0];
- RegRCR = read_nic_dword(dev, RCR);
+ RegRCR = rtl92e_readl(dev, RCR);
priv->ReceiveConfig = RegRCR;
if (Type == true)
@@ -152,7 +151,7 @@ void rtl8192e_SetHwReg(struct net_device *dev, u8 variable, u8 *val)
else if (Type == false)
RegRCR &= (~RCR_CBSSID);
- write_nic_dword(dev, RCR, RegRCR);
+ rtl92e_writel(dev, RCR, RegRCR);
priv->ReceiveConfig = RegRCR;
}
@@ -161,7 +160,7 @@ void rtl8192e_SetHwReg(struct net_device *dev, u8 variable, u8 *val)
case HW_VAR_SLOT_TIME:
priv->slot_time = val[0];
- write_nic_byte(dev, SLOT_TIME, val[0]);
+ rtl92e_writeb(dev, SLOT_TIME, val[0]);
break;
@@ -173,12 +172,12 @@ void rtl8192e_SetHwReg(struct net_device *dev, u8 variable, u8 *val)
regTmp = priv->basic_rate;
if (priv->short_preamble)
regTmp |= BRSR_AckShortPmb;
- write_nic_dword(dev, RRSR, regTmp);
+ rtl92e_writel(dev, RRSR, regTmp);
break;
}
case HW_VAR_CPU_RST:
- write_nic_dword(dev, CPU_GEN, ((u32 *)(val))[0]);
+ rtl92e_writel(dev, CPU_GEN, ((u32 *)(val))[0]);
break;
case HW_VAR_AC_PARAM:
@@ -194,7 +193,7 @@ void rtl8192e_SetHwReg(struct net_device *dev, u8 variable, u8 *val)
u1bAIFS = qop->aifs[pAcParam] *
((mode&(IEEE_G|IEEE_N_24G)) ? 9 : 20) + aSifsTime;
- dm_init_edca_turbo(dev);
+ rtl92e_dm_init_edca_turbo(dev);
u4bAcParam = (le16_to_cpu(qop->tx_op_limit[pAcParam]) <<
AC_PARAM_TXOP_LIMIT_OFFSET) |
@@ -208,19 +207,19 @@ void rtl8192e_SetHwReg(struct net_device *dev, u8 variable, u8 *val)
__func__, eACI, u4bAcParam);
switch (eACI) {
case AC1_BK:
- write_nic_dword(dev, EDCAPARA_BK, u4bAcParam);
+ rtl92e_writel(dev, EDCAPARA_BK, u4bAcParam);
break;
case AC0_BE:
- write_nic_dword(dev, EDCAPARA_BE, u4bAcParam);
+ rtl92e_writel(dev, EDCAPARA_BE, u4bAcParam);
break;
case AC2_VI:
- write_nic_dword(dev, EDCAPARA_VI, u4bAcParam);
+ rtl92e_writel(dev, EDCAPARA_VI, u4bAcParam);
break;
case AC3_VO:
- write_nic_dword(dev, EDCAPARA_VO, u4bAcParam);
+ rtl92e_writel(dev, EDCAPARA_VO, u4bAcParam);
break;
default:
@@ -242,7 +241,7 @@ void rtl8192e_SetHwReg(struct net_device *dev, u8 variable, u8 *val)
union aci_aifsn *pAciAifsn = (union aci_aifsn *) &
(qos_parameters->aifs[0]);
u8 acm = pAciAifsn->f.acm;
- u8 AcmCtrl = read_nic_byte(dev, AcmHwCtrl);
+ u8 AcmCtrl = rtl92e_readb(dev, AcmHwCtrl);
RT_TRACE(COMP_DBG, "===========>%s():HW_VAR_ACM_CTRL:%x\n",
__func__, eACI);
@@ -290,20 +289,20 @@ void rtl8192e_SetHwReg(struct net_device *dev, u8 variable, u8 *val)
RT_TRACE(COMP_QOS,
"SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
AcmCtrl);
- write_nic_byte(dev, AcmHwCtrl, AcmCtrl);
+ rtl92e_writeb(dev, AcmHwCtrl, AcmCtrl);
break;
}
case HW_VAR_SIFS:
- write_nic_byte(dev, SIFS, val[0]);
- write_nic_byte(dev, SIFS+1, val[0]);
+ rtl92e_writeb(dev, SIFS, val[0]);
+ rtl92e_writeb(dev, SIFS+1, val[0]);
break;
case HW_VAR_RF_TIMING:
{
u8 Rf_Timing = *((u8 *)val);
- write_nic_byte(dev, rFPGA0_RFTiming1, Rf_Timing);
+ rtl92e_writeb(dev, rFPGA0_RFTiming1, Rf_Timing);
break;
}
@@ -324,7 +323,7 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
RT_TRACE(COMP_INIT, "====> rtl8192_read_eeprom_info\n");
- EEPROMId = eprom_read(dev, 0);
+ EEPROMId = rtl92e_eeprom_read(dev, 0);
if (EEPROMId != RTL8190_EEPROM_ID) {
netdev_err(dev, "%s(): Invalid EEPROM ID: %x\n", __func__,
EEPROMId);
@@ -334,12 +333,14 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
}
if (!priv->AutoloadFailFlag) {
- priv->eeprom_vid = eprom_read(dev, EEPROM_VID >> 1);
- priv->eeprom_did = eprom_read(dev, EEPROM_DID >> 1);
+ priv->eeprom_vid = rtl92e_eeprom_read(dev, EEPROM_VID >> 1);
+ priv->eeprom_did = rtl92e_eeprom_read(dev, EEPROM_DID >> 1);
- usValue = eprom_read(dev, (u16)(EEPROM_Customer_ID>>1)) >> 8;
+ usValue = rtl92e_eeprom_read(dev,
+ (u16)(EEPROM_Customer_ID>>1)) >> 8;
priv->eeprom_CustomerID = (u8)(usValue & 0xff);
- usValue = eprom_read(dev, EEPROM_ICVersion_ChannelPlan>>1);
+ usValue = rtl92e_eeprom_read(dev,
+ EEPROM_ICVersion_ChannelPlan>>1);
priv->eeprom_ChannelPlan = usValue&0xff;
IC_Version = (usValue & 0xff00)>>8;
@@ -377,7 +378,7 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
if (!priv->AutoloadFailFlag) {
for (i = 0; i < 6; i += 2) {
- usValue = eprom_read(dev,
+ usValue = rtl92e_eeprom_read(dev,
(u16)((EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1));
*(u16 *)(&dev->dev_addr[i]) = usValue;
}
@@ -397,8 +398,8 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
if (priv->card_8192_version > VERSION_8190_BD) {
if (!priv->AutoloadFailFlag) {
- tempval = (eprom_read(dev, (EEPROM_RFInd_PowerDiff >>
- 1))) & 0xff;
+ tempval = (rtl92e_eeprom_read(dev,
+ (EEPROM_RFInd_PowerDiff >> 1))) & 0xff;
priv->EEPROMLegacyHTTxPowerDiff = tempval & 0xf;
if (tempval&0x80)
@@ -412,7 +413,7 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
priv->EEPROMLegacyHTTxPowerDiff);
if (!priv->AutoloadFailFlag)
- priv->EEPROMThermalMeter = (u8)(((eprom_read(dev,
+ priv->EEPROMThermalMeter = (u8)(((rtl92e_eeprom_read(dev,
(EEPROM_ThermalMeter>>1))) &
0xff00)>>8);
else
@@ -423,7 +424,7 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
if (priv->epromtype == EEPROM_93C46) {
if (!priv->AutoloadFailFlag) {
- usValue = eprom_read(dev,
+ usValue = rtl92e_eeprom_read(dev,
EEPROM_TxPwDiff_CrystalCap >> 1);
priv->EEPROMAntPwDiff = (usValue&0x0fff);
priv->EEPROMCrystalCap = (u8)((usValue & 0xf000)
@@ -441,7 +442,7 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
for (i = 0; i < 14; i += 2) {
if (!priv->AutoloadFailFlag)
- usValue = eprom_read(dev,
+ usValue = rtl92e_eeprom_read(dev,
(u16)((EEPROM_TxPwIndex_CCK +
i) >> 1));
else
@@ -457,7 +458,7 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
}
for (i = 0; i < 14; i += 2) {
if (!priv->AutoloadFailFlag)
- usValue = eprom_read(dev,
+ usValue = rtl92e_eeprom_read(dev,
(u16)((EEPROM_TxPwIndex_OFDM_24G
+ i) >> 1));
else
@@ -561,7 +562,7 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
RT_TRACE(COMP_INIT, "\n2T4R config\n");
}
- init_rate_adaptive(dev);
+ rtl92e_init_adaptive_rate(dev);
priv->rf_chip = RF_8256;
@@ -626,13 +627,13 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
RT_TRACE(COMP_TRACE, "<==== ReadAdapterInfo\n");
}
-void rtl8192_get_eeprom_size(struct net_device *dev)
+void rtl92e_get_eeprom_size(struct net_device *dev)
{
u16 curCR;
struct r8192_priv *priv = rtllib_priv(dev);
RT_TRACE(COMP_INIT, "===========>%s()\n", __func__);
- curCR = read_nic_dword(dev, EPROM_CMD);
+ curCR = rtl92e_readl(dev, EPROM_CMD);
RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD,
curCR);
priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 :
@@ -684,26 +685,26 @@ static void rtl8192_hwconfig(struct net_device *dev)
break;
}
- write_nic_byte(dev, BW_OPMODE, regBwOpMode);
+ rtl92e_writeb(dev, BW_OPMODE, regBwOpMode);
{
u32 ratr_value = 0;
ratr_value = regRATR;
if (priv->rf_type == RF_1T2R)
ratr_value &= ~(RATE_ALL_OFDM_2SS);
- write_nic_dword(dev, RATR0, ratr_value);
- write_nic_byte(dev, UFWP, 1);
+ rtl92e_writel(dev, RATR0, ratr_value);
+ rtl92e_writeb(dev, UFWP, 1);
}
- regTmp = read_nic_byte(dev, 0x313);
+ regTmp = rtl92e_readb(dev, 0x313);
regRRSR = ((regTmp) << 24) | (regRRSR & 0x00ffffff);
- write_nic_dword(dev, RRSR, regRRSR);
+ rtl92e_writel(dev, RRSR, regRRSR);
- write_nic_word(dev, RETRY_LIMIT,
- priv->ShortRetryLimit << RETRY_LIMIT_SHORT_SHIFT |
- priv->LongRetryLimit << RETRY_LIMIT_LONG_SHIFT);
+ rtl92e_writew(dev, RETRY_LIMIT,
+ priv->ShortRetryLimit << RETRY_LIMIT_SHORT_SHIFT |
+ priv->LongRetryLimit << RETRY_LIMIT_LONG_SHIFT);
}
-bool rtl8192_adapter_start(struct net_device *dev)
+bool rtl92e_start_adapter(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u32 ulRegRead;
@@ -719,10 +720,10 @@ bool rtl8192_adapter_start(struct net_device *dev)
priv->being_init_adapter = true;
start:
- rtl8192_pci_resetdescring(dev);
+ rtl92e_reset_desc_ring(dev);
priv->Rf_Mode = RF_OP_By_SW_3wire;
if (priv->ResetProgress == RESET_TYPE_NORESET) {
- write_nic_byte(dev, ANAPAR, 0x37);
+ rtl92e_writeb(dev, ANAPAR, 0x37);
mdelay(500);
}
priv->pFirmware->firmware_status = FW_STATUS_0_INIT;
@@ -730,7 +731,7 @@ start:
if (priv->RegRfOff)
priv->rtllib->eRFPowerState = eRfOff;
- ulRegRead = read_nic_dword(dev, CPU_GEN);
+ ulRegRead = rtl92e_readl(dev, CPU_GEN);
if (priv->pFirmware->firmware_status == FW_STATUS_0_INIT)
ulRegRead |= CPU_GEN_SYSTEM_RESET;
else if (priv->pFirmware->firmware_status == FW_STATUS_5_READY)
@@ -739,19 +740,19 @@ start:
netdev_err(dev, "%s(): undefined firmware state: %d.\n",
__func__, priv->pFirmware->firmware_status);
- write_nic_dword(dev, CPU_GEN, ulRegRead);
+ rtl92e_writel(dev, CPU_GEN, ulRegRead);
- ICVersion = read_nic_byte(dev, IC_VERRSION);
+ ICVersion = rtl92e_readb(dev, IC_VERRSION);
if (ICVersion >= 0x4) {
- SwitchingRegulatorOutput = read_nic_byte(dev, SWREGULATOR);
+ SwitchingRegulatorOutput = rtl92e_readb(dev, SWREGULATOR);
if (SwitchingRegulatorOutput != 0xb8) {
- write_nic_byte(dev, SWREGULATOR, 0xa8);
+ rtl92e_writeb(dev, SWREGULATOR, 0xa8);
mdelay(1);
- write_nic_byte(dev, SWREGULATOR, 0xb8);
+ rtl92e_writeb(dev, SWREGULATOR, 0xb8);
}
}
RT_TRACE(COMP_INIT, "BB Config Start!\n");
- rtStatus = rtl8192_BBConfig(dev);
+ rtStatus = rtl92e_config_bb(dev);
if (!rtStatus) {
netdev_warn(dev, "%s(): Failed to configure BB\n", __func__);
return rtStatus;
@@ -760,7 +761,7 @@ start:
priv->LoopbackMode = RTL819X_NO_LOOPBACK;
if (priv->ResetProgress == RESET_TYPE_NORESET) {
- ulRegRead = read_nic_dword(dev, CPU_GEN);
+ ulRegRead = rtl92e_readl(dev, CPU_GEN);
if (priv->LoopbackMode == RTL819X_NO_LOOPBACK)
ulRegRead = ((ulRegRead & CPU_GEN_NO_LOOPBACK_MSK) |
CPU_GEN_NO_LOOPBACK_SET);
@@ -770,73 +771,73 @@ start:
netdev_err(dev, "%s: Invalid loopback mode setting.\n",
__func__);
- write_nic_dword(dev, CPU_GEN, ulRegRead);
+ rtl92e_writel(dev, CPU_GEN, ulRegRead);
udelay(500);
}
rtl8192_hwconfig(dev);
- write_nic_byte(dev, CMDR, CR_RE | CR_TE);
-
- write_nic_byte(dev, PCIF, ((MXDMA2_NoLimit<<MXDMA2_RX_SHIFT) |
- (MXDMA2_NoLimit<<MXDMA2_TX_SHIFT)));
- write_nic_dword(dev, MAC0, ((u32 *)dev->dev_addr)[0]);
- write_nic_word(dev, MAC4, ((u16 *)(dev->dev_addr + 4))[0]);
- write_nic_dword(dev, RCR, priv->ReceiveConfig);
-
- write_nic_dword(dev, RQPN1, NUM_OF_PAGE_IN_FW_QUEUE_BK <<
- RSVD_FW_QUEUE_PAGE_BK_SHIFT |
- NUM_OF_PAGE_IN_FW_QUEUE_BE <<
- RSVD_FW_QUEUE_PAGE_BE_SHIFT |
- NUM_OF_PAGE_IN_FW_QUEUE_VI <<
- RSVD_FW_QUEUE_PAGE_VI_SHIFT |
- NUM_OF_PAGE_IN_FW_QUEUE_VO <<
- RSVD_FW_QUEUE_PAGE_VO_SHIFT);
- write_nic_dword(dev, RQPN2, NUM_OF_PAGE_IN_FW_QUEUE_MGNT <<
- RSVD_FW_QUEUE_PAGE_MGNT_SHIFT);
- write_nic_dword(dev, RQPN3, APPLIED_RESERVED_QUEUE_IN_FW |
- NUM_OF_PAGE_IN_FW_QUEUE_BCN <<
- RSVD_FW_QUEUE_PAGE_BCN_SHIFT|
- NUM_OF_PAGE_IN_FW_QUEUE_PUB <<
- RSVD_FW_QUEUE_PAGE_PUB_SHIFT);
-
- rtl8192_tx_enable(dev);
- rtl8192_rx_enable(dev);
- ulRegRead = (0xFFF00000 & read_nic_dword(dev, RRSR)) |
+ rtl92e_writeb(dev, CMDR, CR_RE | CR_TE);
+
+ rtl92e_writeb(dev, PCIF, ((MXDMA2_NoLimit<<MXDMA2_RX_SHIFT) |
+ (MXDMA2_NoLimit<<MXDMA2_TX_SHIFT)));
+ rtl92e_writel(dev, MAC0, ((u32 *)dev->dev_addr)[0]);
+ rtl92e_writew(dev, MAC4, ((u16 *)(dev->dev_addr + 4))[0]);
+ rtl92e_writel(dev, RCR, priv->ReceiveConfig);
+
+ rtl92e_writel(dev, RQPN1, NUM_OF_PAGE_IN_FW_QUEUE_BK <<
+ RSVD_FW_QUEUE_PAGE_BK_SHIFT |
+ NUM_OF_PAGE_IN_FW_QUEUE_BE <<
+ RSVD_FW_QUEUE_PAGE_BE_SHIFT |
+ NUM_OF_PAGE_IN_FW_QUEUE_VI <<
+ RSVD_FW_QUEUE_PAGE_VI_SHIFT |
+ NUM_OF_PAGE_IN_FW_QUEUE_VO <<
+ RSVD_FW_QUEUE_PAGE_VO_SHIFT);
+ rtl92e_writel(dev, RQPN2, NUM_OF_PAGE_IN_FW_QUEUE_MGNT <<
+ RSVD_FW_QUEUE_PAGE_MGNT_SHIFT);
+ rtl92e_writel(dev, RQPN3, APPLIED_RESERVED_QUEUE_IN_FW |
+ NUM_OF_PAGE_IN_FW_QUEUE_BCN <<
+ RSVD_FW_QUEUE_PAGE_BCN_SHIFT|
+ NUM_OF_PAGE_IN_FW_QUEUE_PUB <<
+ RSVD_FW_QUEUE_PAGE_PUB_SHIFT);
+
+ rtl92e_tx_enable(dev);
+ rtl92e_rx_enable(dev);
+ ulRegRead = (0xFFF00000 & rtl92e_readl(dev, RRSR)) |
RATE_ALL_OFDM_AG | RATE_ALL_CCK;
- write_nic_dword(dev, RRSR, ulRegRead);
- write_nic_dword(dev, RATR0+4*7, (RATE_ALL_OFDM_AG | RATE_ALL_CCK));
+ rtl92e_writel(dev, RRSR, ulRegRead);
+ rtl92e_writel(dev, RATR0+4*7, (RATE_ALL_OFDM_AG | RATE_ALL_CCK));
- write_nic_byte(dev, ACK_TIMEOUT, 0x30);
+ rtl92e_writeb(dev, ACK_TIMEOUT, 0x30);
if (priv->ResetProgress == RESET_TYPE_NORESET)
- rtl8192_SetWirelessMode(dev, priv->rtllib->mode);
- CamResetAllEntry(dev);
+ rtl92e_set_wireless_mode(dev, priv->rtllib->mode);
+ rtl92e_cam_reset(dev);
{
u8 SECR_value = 0x0;
SECR_value |= SCR_TxEncEnable;
SECR_value |= SCR_RxDecEnable;
SECR_value |= SCR_NoSKMC;
- write_nic_byte(dev, SECR, SECR_value);
+ rtl92e_writeb(dev, SECR, SECR_value);
}
- write_nic_word(dev, ATIMWND, 2);
- write_nic_word(dev, BCN_INTERVAL, 100);
+ rtl92e_writew(dev, ATIMWND, 2);
+ rtl92e_writew(dev, BCN_INTERVAL, 100);
{
int i;
for (i = 0; i < QOS_QUEUE_NUM; i++)
- write_nic_dword(dev, WDCAPARA_ADD[i], 0x005e4332);
+ rtl92e_writel(dev, WDCAPARA_ADD[i], 0x005e4332);
}
- write_nic_byte(dev, 0xbe, 0xc0);
+ rtl92e_writeb(dev, 0xbe, 0xc0);
- rtl8192_phy_configmac(dev);
+ rtl92e_config_mac(dev);
if (priv->card_8192_version > (u8) VERSION_8190_BD) {
- rtl8192_phy_getTxPower(dev);
- rtl8192_phy_setTxPower(dev, priv->chan);
+ rtl92e_get_tx_power(dev);
+ rtl92e_set_tx_power(dev, priv->chan);
}
- tmpvalue = read_nic_byte(dev, IC_VERRSION);
+ tmpvalue = rtl92e_readb(dev, IC_VERRSION);
priv->IC_Cut = tmpvalue;
RT_TRACE(COMP_INIT, "priv->IC_Cut= 0x%x\n", priv->IC_Cut);
if (priv->IC_Cut >= IC_VersionCut_D) {
@@ -851,7 +852,7 @@ start:
}
RT_TRACE(COMP_INIT, "Load Firmware!\n");
- bfirmwareok = init_firmware(dev);
+ bfirmwareok = rtl92e_init_fw(dev);
if (!bfirmwareok) {
if (retry_times < 10) {
retry_times++;
@@ -864,37 +865,34 @@ start:
RT_TRACE(COMP_INIT, "Load Firmware finished!\n");
if (priv->ResetProgress == RESET_TYPE_NORESET) {
RT_TRACE(COMP_INIT, "RF Config Started!\n");
- rtStatus = rtl8192_phy_RFConfig(dev);
+ rtStatus = rtl92e_config_phy(dev);
if (!rtStatus) {
netdev_info(dev, "RF Config failed\n");
return rtStatus;
}
RT_TRACE(COMP_INIT, "RF Config Finished!\n");
}
- rtl8192_phy_updateInitGain(dev);
- rtl8192_setBBreg(dev, rFPGA0_RFMOD, bCCKEn, 0x1);
- rtl8192_setBBreg(dev, rFPGA0_RFMOD, bOFDMEn, 0x1);
+ rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bCCKEn, 0x1);
+ rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bOFDMEn, 0x1);
- write_nic_byte(dev, 0x87, 0x0);
+ rtl92e_writeb(dev, 0x87, 0x0);
if (priv->RegRfOff) {
RT_TRACE((COMP_INIT | COMP_RF | COMP_POWER),
"%s(): Turn off RF for RegRfOff ----------\n",
__func__);
- MgntActSet_RF_State(dev, eRfOff, RF_CHANGE_BY_SW, true);
+ rtl92e_set_rf_state(dev, eRfOff, RF_CHANGE_BY_SW);
} else if (priv->rtllib->RfOffReason > RF_CHANGE_BY_PS) {
RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER),
"%s(): Turn off RF for RfOffReason(%d) ----------\n",
__func__, priv->rtllib->RfOffReason);
- MgntActSet_RF_State(dev, eRfOff, priv->rtllib->RfOffReason,
- true);
+ rtl92e_set_rf_state(dev, eRfOff, priv->rtllib->RfOffReason);
} else if (priv->rtllib->RfOffReason >= RF_CHANGE_BY_IPS) {
RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER),
"%s(): Turn off RF for RfOffReason(%d) ----------\n",
__func__, priv->rtllib->RfOffReason);
- MgntActSet_RF_State(dev, eRfOff, priv->rtllib->RfOffReason,
- true);
+ rtl92e_set_rf_state(dev, eRfOff, priv->rtllib->RfOffReason);
} else {
RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER), "%s(): RF-ON\n",
__func__);
@@ -908,13 +906,13 @@ start:
priv->Rf_Mode = RF_OP_By_SW_3wire;
if (priv->ResetProgress == RESET_TYPE_NORESET) {
- dm_initialize_txpower_tracking(dev);
+ rtl92e_dm_init_txpower_tracking(dev);
if (priv->IC_Cut >= IC_VersionCut_D) {
- tmpRegA = rtl8192_QueryBBReg(dev,
- rOFDM0_XATxIQImbalance, bMaskDWord);
- tmpRegC = rtl8192_QueryBBReg(dev,
- rOFDM0_XCTxIQImbalance, bMaskDWord);
+ tmpRegA = rtl92e_get_bb_reg(dev, rOFDM0_XATxIQImbalance,
+ bMaskDWord);
+ tmpRegC = rtl92e_get_bb_reg(dev, rOFDM0_XCTxIQImbalance,
+ bMaskDWord);
for (i = 0; i < TxBBGainTableLength; i++) {
if (tmpRegA == dm_tx_bb_gain[i]) {
priv->rfa_txpowertrackingindex = (u8)i;
@@ -926,8 +924,8 @@ start:
}
}
- TempCCk = rtl8192_QueryBBReg(dev,
- rCCK0_TxFilter1, bMaskByte2);
+ TempCCk = rtl92e_get_bb_reg(dev, rCCK0_TxFilter1,
+ bMaskByte2);
for (i = 0; i < CCKTxBBGainTableLength; i++) {
if (TempCCk == dm_cck_tx_bb_gain[i][0]) {
@@ -954,7 +952,7 @@ start:
priv->btxpower_tracking = false;
}
}
- rtl8192_irq_enable(dev);
+ rtl92e_irq_enable(dev);
end:
priv->being_init_adapter = false;
return rtStatus;
@@ -969,27 +967,27 @@ static void rtl8192_net_update(struct net_device *dev)
u16 rate_config = 0;
net = &priv->rtllib->current_network;
- rtl8192_config_rate(dev, &rate_config);
+ rtl92e_config_rate(dev, &rate_config);
priv->dot11CurrentPreambleMode = PREAMBLE_AUTO;
priv->basic_rate = rate_config &= 0x15f;
- write_nic_dword(dev, BSSIDR, ((u32 *)net->bssid)[0]);
- write_nic_word(dev, BSSIDR+4, ((u16 *)net->bssid)[2]);
+ rtl92e_writel(dev, BSSIDR, ((u32 *)net->bssid)[0]);
+ rtl92e_writew(dev, BSSIDR+4, ((u16 *)net->bssid)[2]);
if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
- write_nic_word(dev, ATIMWND, 2);
- write_nic_word(dev, BCN_DMATIME, 256);
- write_nic_word(dev, BCN_INTERVAL, net->beacon_interval);
- write_nic_word(dev, BCN_DRV_EARLY_INT, 10);
- write_nic_byte(dev, BCN_ERR_THRESH, 100);
+ rtl92e_writew(dev, ATIMWND, 2);
+ rtl92e_writew(dev, BCN_DMATIME, 256);
+ rtl92e_writew(dev, BCN_INTERVAL, net->beacon_interval);
+ rtl92e_writew(dev, BCN_DRV_EARLY_INT, 10);
+ rtl92e_writeb(dev, BCN_ERR_THRESH, 100);
BcnTimeCfg |= (BcnCW<<BCN_TCFG_CW_SHIFT);
BcnTimeCfg |= BcnIFS<<BCN_TCFG_IFS;
- write_nic_word(dev, BCN_TCFG, BcnTimeCfg);
+ rtl92e_writew(dev, BCN_TCFG, BcnTimeCfg);
}
}
-void rtl8192_link_change(struct net_device *dev)
+void rtl92e_link_change(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
@@ -1002,16 +1000,16 @@ void rtl8192_link_change(struct net_device *dev)
priv->ops->update_ratr_table(dev);
if ((KEY_TYPE_WEP40 == ieee->pairwise_key_type) ||
(KEY_TYPE_WEP104 == ieee->pairwise_key_type))
- EnableHWSecurityConfig8192(dev);
+ rtl92e_enable_hw_security_config(dev);
} else {
- write_nic_byte(dev, 0x173, 0);
+ rtl92e_writeb(dev, 0x173, 0);
}
rtl8192e_update_msr(dev);
if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) {
u32 reg = 0;
- reg = read_nic_dword(dev, RCR);
+ reg = rtl92e_readl(dev, RCR);
if (priv->rtllib->state == RTLLIB_LINKED) {
if (ieee->IntelPromiscuousModeInfo.bPromiscuousOn)
;
@@ -1020,12 +1018,12 @@ void rtl8192_link_change(struct net_device *dev)
} else
priv->ReceiveConfig = reg &= ~RCR_CBSSID;
- write_nic_dword(dev, RCR, reg);
+ rtl92e_writel(dev, RCR, reg);
}
}
-void rtl8192_AllowAllDestAddr(struct net_device *dev,
- bool bAllowAllDA, bool WriteIntoReg)
+void rtl92e_set_monitor_mode(struct net_device *dev, bool bAllowAllDA,
+ bool WriteIntoReg)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1035,7 +1033,7 @@ void rtl8192_AllowAllDestAddr(struct net_device *dev,
priv->ReceiveConfig &= ~RCR_AAP;
if (WriteIntoReg)
- write_nic_dword(dev, RCR, priv->ReceiveConfig);
+ rtl92e_writel(dev, RCR, priv->ReceiveConfig);
}
static u8 MRateToHwRate8190Pci(u8 rate)
@@ -1177,8 +1175,20 @@ static u8 rtl8192_MapHwQueueToFirmwareQueue(struct net_device *dev, u8 QueueID,
return QueueSelect;
}
-void rtl8192_tx_fill_desc(struct net_device *dev, struct tx_desc *pdesc,
- struct cb_desc *cb_desc, struct sk_buff *skb)
+static u8 rtl8192_QueryIsShort(u8 TxHT, u8 TxRate, struct cb_desc *tcb_desc)
+{
+ u8 tmp_Short;
+
+ tmp_Short = (TxHT == 1) ? ((tcb_desc->bUseShortGI) ? 1 : 0) :
+ ((tcb_desc->bUseShortPreamble) ? 1 : 0);
+ if (TxHT == 1 && TxRate != DESC90_RATEMCS15)
+ tmp_Short = 0;
+
+ return tmp_Short;
+}
+
+void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
+ struct cb_desc *cb_desc, struct sk_buff *skb)
{
struct r8192_priv *priv = rtllib_priv(dev);
dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
@@ -1286,9 +1296,8 @@ void rtl8192_tx_fill_desc(struct net_device *dev, struct tx_desc *pdesc,
pdesc->TxBuffAddr = mapping;
}
-void rtl8192_tx_fill_cmd_desc(struct net_device *dev,
- struct tx_desc_cmd *entry,
- struct cb_desc *cb_desc, struct sk_buff *skb)
+void rtl92e_fill_tx_cmd_desc(struct net_device *dev, struct tx_desc_cmd *entry,
+ struct cb_desc *cb_desc, struct sk_buff *skb)
{
struct r8192_priv *priv = rtllib_priv(dev);
dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
@@ -1506,8 +1515,9 @@ static void rtl8192_query_rxphystatus(
pstats->bPacketBeacon = precord_stats->bPacketBeacon = bPacketBeacon;
pstats->bToSelfBA = precord_stats->bToSelfBA = bToSelfBA;
if (check_reg824 == 0) {
- reg824_bit9 = rtl8192_QueryBBReg(priv->rtllib->dev,
- rFPGA0_XA_HSSIParameter2, 0x200);
+ reg824_bit9 = rtl92e_get_bb_reg(priv->rtllib->dev,
+ rFPGA0_XA_HSSIParameter2,
+ 0x200);
check_reg824 = 1;
}
@@ -1575,7 +1585,7 @@ static void rtl8192_query_rxphystatus(
}
}
- pwdb_all = rtl819x_query_rxpwrpercentage(rx_pwr_all);
+ pwdb_all = rtl92e_rx_db_to_percent(rx_pwr_all);
pstats->RxPWDBAll = precord_stats->RxPWDBAll = pwdb_all;
pstats->RecvSignalPower = rx_pwr_all;
@@ -1615,7 +1625,7 @@ static void rtl8192_query_rxphystatus(
rx_snrX /= 2;
priv->stats.rxSNRdB[i] = (long)rx_snrX;
- RSSI = rtl819x_query_rxpwrpercentage(rx_pwr[i]);
+ RSSI = rtl92e_rx_db_to_percent(rx_pwr[i]);
if (priv->brfpath_rxenable[i])
total_rssi += RSSI;
@@ -1628,7 +1638,7 @@ static void rtl8192_query_rxphystatus(
rx_pwr_all = (((pofdm_buf->pwdb_all) >> 1) & 0x7f) - 106;
- pwdb_all = rtl819x_query_rxpwrpercentage(rx_pwr_all);
+ pwdb_all = rtl92e_rx_db_to_percent(rx_pwr_all);
pstats->RxPWDBAll = precord_stats->RxPWDBAll = pwdb_all;
pstats->RxPower = precord_stats->RxPower = rx_pwr_all;
@@ -1645,7 +1655,7 @@ static void rtl8192_query_rxphystatus(
rx_evmX /= 2;
- evm = rtl819x_evm_dbtopercentage(rx_evmX);
+ evm = rtl92e_evm_db_to_percent(rx_evmX);
if (bpacket_match_bssid) {
if (i == 0) {
pstats->SignalQuality = (u8)(evm &
@@ -1721,8 +1731,8 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
slide_rssi_index = 0;
tmp_val = priv->stats.slide_rssi_total/slide_rssi_statistics;
- priv->stats.signal_strength = rtl819x_translate_todbm(priv,
- (u8)tmp_val);
+ priv->stats.signal_strength = rtl92e_translate_to_dbm(priv,
+ (u8)tmp_val);
curr_st->rssi = priv->stats.signal_strength;
if (!prev_st->bPacketMatchBSSID) {
if (!prev_st->bToSelfBA)
@@ -1732,13 +1742,10 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
if (!bcheck)
return;
- rtl819x_process_cck_rxpathsel(priv, prev_st);
-
priv->stats.num_process_phyinfo++;
if (!prev_st->bIsCCK && prev_st->bPacketToSelf) {
for (rfpath = RF90_PATH_A; rfpath < RF90_PATH_C; rfpath++) {
- if (!rtl8192_phy_CheckIsLegalRFPath(priv->rtllib->dev,
- rfpath))
+ if (!rtl92e_is_legal_rf_path(priv->rtllib->dev, rfpath))
continue;
RT_TRACE(COMP_DBG,
"Jacken -> pPreviousstats->RxMIMOSignalStrength[rfpath] = %d\n",
@@ -1813,7 +1820,7 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
(RX_SMOOTH-1)) +
(prev_st->RxPWDBAll)) / (RX_SMOOTH);
}
- rtl819x_update_rxsignalstatistics8190pci(priv, prev_st);
+ rtl92e_update_rx_statistics(priv, prev_st);
}
if (prev_st->SignalQuality != 0) {
@@ -1900,7 +1907,7 @@ static void rtl8192_TranslateRxSignalStuff(struct net_device *dev,
rtl8192_query_rxphystatus(priv, pstats, pdesc, pdrvinfo,
&previous_stats, bpacket_match_bssid,
bpacket_toself, bPacketBeacon, bToSelfBA);
- rtl8192_record_rxdesc_forlateruse(pstats, &previous_stats);
+ rtl92e_copy_mpdu_stats(pstats, &previous_stats);
}
static void rtl8192_UpdateReceivedRateHistogramStatistics(
@@ -2016,10 +2023,8 @@ static void rtl8192_UpdateReceivedRateHistogramStatistics(
priv->stats.received_rate_histogram[rcvType][rateIndex]++;
}
-bool rtl8192_rx_query_status_desc(struct net_device *dev,
- struct rtllib_rx_stats *stats,
- struct rx_desc *pdesc,
- struct sk_buff *skb)
+bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
+ struct rx_desc *pdesc, struct sk_buff *skb)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rx_fwinfo *pDrvInfo = NULL;
@@ -2063,9 +2068,9 @@ bool rtl8192_rx_query_status_desc(struct net_device *dev,
(pDrvInfo->FirstAGGR == 1);
stats->TimeStampLow = pDrvInfo->TSFL;
- stats->TimeStampHigh = read_nic_dword(dev, TSFR+4);
+ stats->TimeStampHigh = rtl92e_readl(dev, TSFR+4);
- rtl819x_UpdateRxPktTimeStamp(dev, stats);
+ rtl92e_update_rx_pkt_timestamp(dev, stats);
if ((stats->RxBufShift + stats->RxDrvInfoSize) > 0)
stats->bShift = 1;
@@ -2089,7 +2094,7 @@ bool rtl8192_rx_query_status_desc(struct net_device *dev,
return true;
}
-void rtl8192_halt_adapter(struct net_device *dev, bool reset)
+void rtl92e_stop_adapter(struct net_device *dev, bool reset)
{
struct r8192_priv *priv = rtllib_priv(dev);
int i;
@@ -2102,7 +2107,7 @@ void rtl8192_halt_adapter(struct net_device *dev, bool reset)
if (!priv->rtllib->bSupportRemoteWakeUp) {
u1bTmp = 0x0;
- write_nic_byte(dev, CMDR, u1bTmp);
+ rtl92e_writeb(dev, CMDR, u1bTmp);
}
mdelay(20);
@@ -2113,18 +2118,18 @@ void rtl8192_halt_adapter(struct net_device *dev, bool reset)
priv->bHwRfOffAction = 2;
if (!priv->rtllib->bSupportRemoteWakeUp) {
- PHY_SetRtl8192eRfOff(dev);
- ulRegRead = read_nic_dword(dev, CPU_GEN);
+ rtl92e_set_rf_off(dev);
+ ulRegRead = rtl92e_readl(dev, CPU_GEN);
ulRegRead |= CPU_GEN_SYSTEM_RESET;
- write_nic_dword(dev, CPU_GEN, ulRegRead);
+ rtl92e_writel(dev, CPU_GEN, ulRegRead);
} else {
- write_nic_dword(dev, WFCRC0, 0xffffffff);
- write_nic_dword(dev, WFCRC1, 0xffffffff);
- write_nic_dword(dev, WFCRC2, 0xffffffff);
+ rtl92e_writel(dev, WFCRC0, 0xffffffff);
+ rtl92e_writel(dev, WFCRC1, 0xffffffff);
+ rtl92e_writel(dev, WFCRC2, 0xffffffff);
- write_nic_byte(dev, PMR, 0x5);
- write_nic_byte(dev, MacBlkCtrl, 0xa);
+ rtl92e_writeb(dev, PMR, 0x5);
+ rtl92e_writeb(dev, MacBlkCtrl, 0xa);
}
}
@@ -2136,7 +2141,7 @@ void rtl8192_halt_adapter(struct net_device *dev, bool reset)
skb_queue_purge(&priv->skb_queue);
}
-void rtl8192_update_ratr_table(struct net_device *dev)
+void rtl92e_update_ratr_table(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
@@ -2145,7 +2150,7 @@ void rtl8192_update_ratr_table(struct net_device *dev)
u16 rate_config = 0;
u8 rate_index = 0;
- rtl8192_config_rate(dev, &rate_config);
+ rtl92e_config_rate(dev, &rate_config);
ratr_value = rate_config | *pMcsRate << 12;
switch (ieee->mode) {
case IEEE_A:
@@ -2179,12 +2184,12 @@ void rtl8192_update_ratr_table(struct net_device *dev)
else if (!ieee->pHTInfo->bCurTxBW40MHz &&
ieee->pHTInfo->bCurShortGI20MHz)
ratr_value |= 0x80000000;
- write_nic_dword(dev, RATR0+rate_index*4, ratr_value);
- write_nic_byte(dev, UFWP, 1);
+ rtl92e_writel(dev, RATR0+rate_index*4, ratr_value);
+ rtl92e_writeb(dev, UFWP, 1);
}
void
-rtl8192_InitializeVariables(struct net_device *dev)
+rtl92e_init_variables(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -2218,66 +2223,65 @@ rtl8192_InitializeVariables(struct net_device *dev)
priv->bfirst_after_down = false;
}
-void rtl8192_EnableInterrupt(struct net_device *dev)
+void rtl92e_enable_irq(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
priv->irq_enabled = 1;
- write_nic_dword(dev, INTA_MASK, priv->irq_mask[0]);
+ rtl92e_writel(dev, INTA_MASK, priv->irq_mask[0]);
}
-void rtl8192_DisableInterrupt(struct net_device *dev)
+void rtl92e_disable_irq(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
- write_nic_dword(dev, INTA_MASK, 0);
+ rtl92e_writel(dev, INTA_MASK, 0);
priv->irq_enabled = 0;
}
-void rtl8192_ClearInterrupt(struct net_device *dev)
+void rtl92e_clear_irq(struct net_device *dev)
{
u32 tmp = 0;
- tmp = read_nic_dword(dev, ISR);
- write_nic_dword(dev, ISR, tmp);
+ tmp = rtl92e_readl(dev, ISR);
+ rtl92e_writel(dev, ISR, tmp);
}
-void rtl8192_enable_rx(struct net_device *dev)
+void rtl92e_enable_rx(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
- write_nic_dword(dev, RDQDA, priv->rx_ring_dma[RX_MPDU_QUEUE]);
+ rtl92e_writel(dev, RDQDA, priv->rx_ring_dma[RX_MPDU_QUEUE]);
}
static const u32 TX_DESC_BASE[] = {
BKQDA, BEQDA, VIQDA, VOQDA, HCCAQDA, CQDA, MQDA, HQDA, BQDA
};
-void rtl8192_enable_tx(struct net_device *dev)
+void rtl92e_enable_tx(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
u32 i;
for (i = 0; i < MAX_TX_QUEUE_COUNT; i++)
- write_nic_dword(dev, TX_DESC_BASE[i], priv->tx_ring[i].dma);
+ rtl92e_writel(dev, TX_DESC_BASE[i], priv->tx_ring[i].dma);
}
-void rtl8192_interrupt_recognized(struct net_device *dev, u32 *p_inta,
- u32 *p_intb)
+void rtl92e_ack_irq(struct net_device *dev, u32 *p_inta, u32 *p_intb)
{
- *p_inta = read_nic_dword(dev, ISR);
- write_nic_dword(dev, ISR, *p_inta);
+ *p_inta = rtl92e_readl(dev, ISR);
+ rtl92e_writel(dev, ISR, *p_inta);
}
-bool rtl8192_HalRxCheckStuck(struct net_device *dev)
+bool rtl92e_is_rx_stuck(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- u16 RegRxCounter = read_nic_word(dev, 0x130);
+ u16 RegRxCounter = rtl92e_readw(dev, 0x130);
bool bStuck = false;
static u8 rx_chk_cnt;
u32 SlotIndex = 0, TotalRxStuckCount = 0;
@@ -2338,11 +2342,11 @@ bool rtl8192_HalRxCheckStuck(struct net_device *dev)
return bStuck;
}
-bool rtl8192_HalTxCheckStuck(struct net_device *dev)
+bool rtl92e_is_tx_stuck(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
bool bStuck = false;
- u16 RegTxCounter = read_nic_word(dev, 0x128);
+ u16 RegTxCounter = rtl92e_readw(dev, 0x128);
RT_TRACE(COMP_RESET, "%s():RegTxCounter is %d,TxCounter is %d\n",
__func__, RegTxCounter, priv->TxCounter);
@@ -2355,7 +2359,7 @@ bool rtl8192_HalTxCheckStuck(struct net_device *dev)
return bStuck;
}
-bool rtl8192_GetNmodeSupportBySecCfg(struct net_device *dev)
+bool rtl92e_get_nmode_support_by_sec(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
@@ -2369,34 +2373,10 @@ bool rtl8192_GetNmodeSupportBySecCfg(struct net_device *dev)
}
}
-bool rtl8192_GetHalfNmodeSupportByAPs(struct net_device *dev)
+bool rtl92e_is_halfn_supported_by_ap(struct net_device *dev)
{
- bool Reval;
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
- if (ieee->bHalfWirelessN24GMode == true)
- Reval = true;
- else
- Reval = false;
-
- return Reval;
-}
-
-u8 rtl8192_QueryIsShort(u8 TxHT, u8 TxRate, struct cb_desc *tcb_desc)
-{
- u8 tmp_Short;
-
- tmp_Short = (TxHT == 1) ? ((tcb_desc->bUseShortGI) ? 1 : 0) :
- ((tcb_desc->bUseShortPreamble) ? 1 : 0);
- if (TxHT == 1 && TxRate != DESC90_RATEMCS15)
- tmp_Short = 0;
-
- return tmp_Short;
-}
-
-void ActUpdateChannelAccessSetting(struct net_device *dev,
- enum wireless_mode WirelessMode,
- struct channel_access_setting *ChnlAccessSetting)
-{
+ return ieee->bHalfWirelessN24GMode;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
index dbe0e1c87056..6bd6b3a4fcea 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
@@ -27,36 +27,30 @@
#include "r8190P_def.h"
-u8 rtl8192_QueryIsShort(u8 TxHT, u8 TxRate, struct cb_desc *tcb_desc);
-bool rtl8192_GetHalfNmodeSupportByAPs(struct net_device *dev);
-bool rtl8192_GetNmodeSupportBySecCfg(struct net_device *dev);
-bool rtl8192_HalTxCheckStuck(struct net_device *dev);
-bool rtl8192_HalRxCheckStuck(struct net_device *dev);
-void rtl8192_interrupt_recognized(struct net_device *dev, u32 *p_inta,
- u32 *p_intb);
-void rtl8192_enable_rx(struct net_device *dev);
-void rtl8192_enable_tx(struct net_device *dev);
-void rtl8192_EnableInterrupt(struct net_device *dev);
-void rtl8192_DisableInterrupt(struct net_device *dev);
-void rtl8192_ClearInterrupt(struct net_device *dev);
-void rtl8192_InitializeVariables(struct net_device *dev);
-void rtl8192e_start_beacon(struct net_device *dev);
-void rtl8192e_SetHwReg(struct net_device *dev, u8 variable, u8 *val);
-void rtl8192_get_eeprom_size(struct net_device *dev);
-bool rtl8192_adapter_start(struct net_device *dev);
-void rtl8192_link_change(struct net_device *dev);
-void rtl8192_AllowAllDestAddr(struct net_device *dev, bool bAllowAllDA,
- bool WriteIntoReg);
-void rtl8192_tx_fill_desc(struct net_device *dev, struct tx_desc *pdesc,
- struct cb_desc *cb_desc,
- struct sk_buff *skb);
-void rtl8192_tx_fill_cmd_desc(struct net_device *dev,
- struct tx_desc_cmd *entry,
- struct cb_desc *cb_desc, struct sk_buff *skb);
-bool rtl8192_rx_query_status_desc(struct net_device *dev,
- struct rtllib_rx_stats *stats,
- struct rx_desc *pdesc,
- struct sk_buff *skb);
-void rtl8192_halt_adapter(struct net_device *dev, bool reset);
-void rtl8192_update_ratr_table(struct net_device *dev);
+bool rtl92e_is_halfn_supported_by_ap(struct net_device *dev);
+bool rtl92e_get_nmode_support_by_sec(struct net_device *dev);
+bool rtl92e_is_tx_stuck(struct net_device *dev);
+bool rtl92e_is_rx_stuck(struct net_device *dev);
+void rtl92e_ack_irq(struct net_device *dev, u32 *p_inta, u32 *p_intb);
+void rtl92e_enable_rx(struct net_device *dev);
+void rtl92e_enable_tx(struct net_device *dev);
+void rtl92e_enable_irq(struct net_device *dev);
+void rtl92e_disable_irq(struct net_device *dev);
+void rtl92e_clear_irq(struct net_device *dev);
+void rtl92e_init_variables(struct net_device *dev);
+void rtl92e_start_beacon(struct net_device *dev);
+void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val);
+void rtl92e_get_eeprom_size(struct net_device *dev);
+bool rtl92e_start_adapter(struct net_device *dev);
+void rtl92e_link_change(struct net_device *dev);
+void rtl92e_set_monitor_mode(struct net_device *dev, bool bAllowAllDA,
+ bool WriteIntoReg);
+void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
+ struct cb_desc *cb_desc, struct sk_buff *skb);
+void rtl92e_fill_tx_cmd_desc(struct net_device *dev, struct tx_desc_cmd *entry,
+ struct cb_desc *cb_desc, struct sk_buff *skb);
+bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
+ struct rx_desc *pdesc, struct sk_buff *skb);
+void rtl92e_stop_adapter(struct net_device *dev, bool reset);
+void rtl92e_update_ratr_table(struct net_device *dev);
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
index 17d2a1540cc8..5c527c419bc9 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
@@ -23,7 +23,7 @@
#include "r8192E_firmware.h"
#include <linux/firmware.h>
-void firmware_init_param(struct net_device *dev)
+void rtl92e_init_fw_param(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_firmware *pfirmware = priv->pFirmware;
@@ -46,7 +46,7 @@ static bool fw_download_code(struct net_device *dev, u8 *code_virtual_address,
struct cb_desc *tcb_desc;
u8 bLastIniPkt;
- firmware_init_param(dev);
+ rtl92e_init_fw_param(dev);
frag_threshold = pfirmware->cmdpacket_frag_thresold;
do {
if ((buffer_len - frag_offset) > frag_threshold) {
@@ -96,7 +96,7 @@ static bool fw_download_code(struct net_device *dev, u8 *code_virtual_address,
} while (frag_offset < buffer_len);
- write_nic_byte(dev, TPPoll, TPPoll_CQ);
+ rtl92e_writeb(dev, TPPoll, TPPoll_CQ);
return true;
}
@@ -109,7 +109,7 @@ static bool CPUcheck_maincodeok_turnonCPU(struct net_device *dev)
timeout = jiffies + msecs_to_jiffies(200);
while (time_before(jiffies, timeout)) {
- CPU_status = read_nic_dword(dev, CPU_GEN);
+ CPU_status = rtl92e_readl(dev, CPU_GEN);
if (CPU_status & CPU_GEN_PUT_CODE_OK)
break;
mdelay(2);
@@ -122,14 +122,14 @@ static bool CPUcheck_maincodeok_turnonCPU(struct net_device *dev)
RT_TRACE(COMP_FIRMWARE, "Download Firmware: Put code ok!\n");
}
- CPU_status = read_nic_dword(dev, CPU_GEN);
- write_nic_byte(dev, CPU_GEN,
- (u8)((CPU_status|CPU_GEN_PWR_STB_CPU)&0xff));
+ CPU_status = rtl92e_readl(dev, CPU_GEN);
+ rtl92e_writeb(dev, CPU_GEN,
+ (u8)((CPU_status|CPU_GEN_PWR_STB_CPU)&0xff));
mdelay(1);
timeout = jiffies + msecs_to_jiffies(200);
while (time_before(jiffies, timeout)) {
- CPU_status = read_nic_dword(dev, CPU_GEN);
+ CPU_status = rtl92e_readl(dev, CPU_GEN);
if (CPU_status&CPU_GEN_BOOT_RDY)
break;
mdelay(2);
@@ -158,7 +158,7 @@ static bool CPUcheck_firmware_ready(struct net_device *dev)
timeout = jiffies + msecs_to_jiffies(20);
while (time_before(jiffies, timeout)) {
- CPU_status = read_nic_dword(dev, CPU_GEN);
+ CPU_status = rtl92e_readl(dev, CPU_GEN);
if (CPU_status&CPU_GEN_FIRM_RDY)
break;
mdelay(2);
@@ -223,7 +223,7 @@ static bool firmware_check_ready(struct net_device *dev,
return rt_status;
}
-bool init_firmware(struct net_device *dev)
+bool rtl92e_init_fw(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
bool rt_status = true;
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h
index d79e54203199..fa760f7ac145 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h
@@ -19,8 +19,6 @@
#ifndef __INC_FIRMWARE_H
#define __INC_FIRMWARE_H
-#define RTL8190_CPU_START_OFFSET 0x80
-
#define GET_COMMAND_PACKET_FRAG_THRESHOLD(v) (4*(v/4) - 8)
#define RTL8192E_BOOT_IMG_FW "RTL8192E/boot.img"
@@ -61,7 +59,7 @@ struct rt_firmware {
u16 firmware_buf_size[MAX_FW_INIT_STEP];
};
-bool init_firmware(struct net_device *dev);
-extern void firmware_init_param(struct net_device *dev);
+bool rtl92e_init_fw(struct net_device *dev);
+void rtl92e_init_fw_param(struct net_device *dev);
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
index 43c3fb859d10..c81832dcf181 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
@@ -119,10 +119,10 @@ enum _RTL8192Pci_HW {
#define EPROM_CMD_NORMAL 0
#define EPROM_CMD_LOAD 1
#define EPROM_CMD_PROGRAM 2
-#define EPROM_CS_SHIFT 3
-#define EPROM_CK_SHIFT 2
-#define EPROM_W_SHIFT 1
-#define EPROM_R_SHIFT 0
+#define EPROM_CS_BIT 3
+#define EPROM_CK_BIT 2
+#define EPROM_W_BIT 1
+#define EPROM_R_BIT 0
AFR = 0x010,
#define AFR_CardBEn (1<<0)
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index fba7654160e8..3a15a0f5b479 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -64,7 +64,7 @@ static u32 rtl8192_CalculateBitShift(u32 dwBitMask)
return i;
}
-u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev, u32 eRFPath)
+u8 rtl92e_is_legal_rf_path(struct net_device *dev, u32 eRFPath)
{
u8 ret = 1;
struct r8192_priv *priv = rtllib_priv(dev);
@@ -80,27 +80,27 @@ u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev, u32 eRFPath)
return ret;
}
-void rtl8192_setBBreg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask,
- u32 dwData)
+void rtl92e_set_bb_reg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask,
+ u32 dwData)
{
u32 OriginalValue, BitShift, NewValue;
if (dwBitMask != bMaskDWord) {
- OriginalValue = read_nic_dword(dev, dwRegAddr);
+ OriginalValue = rtl92e_readl(dev, dwRegAddr);
BitShift = rtl8192_CalculateBitShift(dwBitMask);
NewValue = (((OriginalValue) & (~dwBitMask)) |
(dwData << BitShift));
- write_nic_dword(dev, dwRegAddr, NewValue);
+ rtl92e_writel(dev, dwRegAddr, NewValue);
} else
- write_nic_dword(dev, dwRegAddr, dwData);
+ rtl92e_writel(dev, dwRegAddr, dwData);
}
-u32 rtl8192_QueryBBReg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask)
+u32 rtl92e_get_bb_reg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask)
{
u32 Ret = 0, OriginalValue, BitShift;
- OriginalValue = read_nic_dword(dev, dwRegAddr);
+ OriginalValue = rtl92e_readl(dev, dwRegAddr);
BitShift = rtl8192_CalculateBitShift(dwBitMask);
Ret = (OriginalValue & dwBitMask) >> BitShift;
@@ -117,19 +117,19 @@ static u32 rtl8192_phy_RFSerialRead(struct net_device *dev,
Offset &= 0x3f;
if (priv->rf_chip == RF_8256) {
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0);
+ rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0);
if (Offset >= 31) {
priv->RfReg0Value[eRFPath] |= 0x140;
- rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
- bMaskDWord,
- (priv->RfReg0Value[eRFPath]<<16));
+ rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset,
+ bMaskDWord,
+ (priv->RfReg0Value[eRFPath]<<16));
NewOffset = Offset - 30;
} else if (Offset >= 16) {
priv->RfReg0Value[eRFPath] |= 0x100;
priv->RfReg0Value[eRFPath] &= (~0x40);
- rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
- bMaskDWord,
- (priv->RfReg0Value[eRFPath]<<16));
+ rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset,
+ bMaskDWord,
+ (priv->RfReg0Value[eRFPath]<<16));
NewOffset = Offset - 15;
} else
@@ -139,23 +139,23 @@ static u32 rtl8192_phy_RFSerialRead(struct net_device *dev,
"check RF type here, need to be 8256\n");
NewOffset = Offset;
}
- rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadAddress,
- NewOffset);
- rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x0);
- rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x1);
+ rtl92e_set_bb_reg(dev, pPhyReg->rfHSSIPara2, bLSSIReadAddress,
+ NewOffset);
+ rtl92e_set_bb_reg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x0);
+ rtl92e_set_bb_reg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x1);
mdelay(1);
- ret = rtl8192_QueryBBReg(dev, pPhyReg->rfLSSIReadBack,
- bLSSIReadBackData);
+ ret = rtl92e_get_bb_reg(dev, pPhyReg->rfLSSIReadBack,
+ bLSSIReadBackData);
if (priv->rf_chip == RF_8256) {
priv->RfReg0Value[eRFPath] &= 0xebf;
- rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord,
- (priv->RfReg0Value[eRFPath] << 16));
+ rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset, bMaskDWord,
+ (priv->RfReg0Value[eRFPath] << 16));
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3);
+ rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3);
}
@@ -173,20 +173,20 @@ static void rtl8192_phy_RFSerialWrite(struct net_device *dev,
Offset &= 0x3f;
if (priv->rf_chip == RF_8256) {
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0);
+ rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0);
if (Offset >= 31) {
priv->RfReg0Value[eRFPath] |= 0x140;
- rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
- bMaskDWord,
- (priv->RfReg0Value[eRFPath] << 16));
+ rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset,
+ bMaskDWord,
+ (priv->RfReg0Value[eRFPath] << 16));
NewOffset = Offset - 30;
} else if (Offset >= 16) {
priv->RfReg0Value[eRFPath] |= 0x100;
priv->RfReg0Value[eRFPath] &= (~0x40);
- rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
- bMaskDWord,
- (priv->RfReg0Value[eRFPath] << 16));
+ rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset,
+ bMaskDWord,
+ (priv->RfReg0Value[eRFPath] << 16));
NewOffset = Offset - 15;
} else
NewOffset = Offset;
@@ -198,7 +198,7 @@ static void rtl8192_phy_RFSerialWrite(struct net_device *dev,
DataAndAddr = (Data<<16) | (NewOffset&0x3f);
- rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
+ rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
if (Offset == 0x0)
priv->RfReg0Value[eRFPath] = Data;
@@ -206,23 +206,21 @@ static void rtl8192_phy_RFSerialWrite(struct net_device *dev,
if (priv->rf_chip == RF_8256) {
if (Offset != 0) {
priv->RfReg0Value[eRFPath] &= 0xebf;
- rtl8192_setBBreg(
- dev,
- pPhyReg->rf3wireOffset,
- bMaskDWord,
- (priv->RfReg0Value[eRFPath] << 16));
+ rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset,
+ bMaskDWord,
+ (priv->RfReg0Value[eRFPath] << 16));
}
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3);
+ rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3);
}
}
-void rtl8192_phy_SetRFReg(struct net_device *dev, enum rf90_radio_path eRFPath,
- u32 RegAddr, u32 BitMask, u32 Data)
+void rtl92e_set_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
+ u32 RegAddr, u32 BitMask, u32 Data)
{
struct r8192_priv *priv = rtllib_priv(dev);
u32 Original_Value, BitShift, New_Value;
- if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
+ if (!rtl92e_is_legal_rf_path(dev, eRFPath))
return;
if (priv->rtllib->eRFPowerState != eRfOn && !priv->being_init_adapter)
return;
@@ -256,13 +254,13 @@ void rtl8192_phy_SetRFReg(struct net_device *dev, enum rf90_radio_path eRFPath,
}
}
-u32 rtl8192_phy_QueryRFReg(struct net_device *dev, enum rf90_radio_path eRFPath,
- u32 RegAddr, u32 BitMask)
+u32 rtl92e_get_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
+ u32 RegAddr, u32 BitMask)
{
u32 Original_Value, Readback_Value, BitShift;
struct r8192_priv *priv = rtllib_priv(dev);
- if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
+ if (!rtl92e_is_legal_rf_path(dev, eRFPath))
return 0;
if (priv->rtllib->eRFPowerState != eRfOn && !priv->being_init_adapter)
return 0;
@@ -289,20 +287,20 @@ static u32 phy_FwRFSerialRead(struct net_device *dev,
Data |= ((Offset & 0xFF) << 12);
Data |= ((eRFPath & 0x3) << 20);
Data |= 0x80000000;
- while (read_nic_dword(dev, QPNR)&0x80000000) {
+ while (rtl92e_readl(dev, QPNR) & 0x80000000) {
if (time++ < 100)
udelay(10);
else
break;
}
- write_nic_dword(dev, QPNR, Data);
- while (read_nic_dword(dev, QPNR) & 0x80000000) {
+ rtl92e_writel(dev, QPNR, Data);
+ while (rtl92e_readl(dev, QPNR) & 0x80000000) {
if (time++ < 100)
udelay(10);
else
return 0;
}
- return read_nic_dword(dev, RF_DATA);
+ return rtl92e_readl(dev, RF_DATA);
}
@@ -317,18 +315,18 @@ static void phy_FwRFSerialWrite(struct net_device *dev,
Data |= 0x400000;
Data |= 0x80000000;
- while (read_nic_dword(dev, QPNR) & 0x80000000) {
+ while (rtl92e_readl(dev, QPNR) & 0x80000000) {
if (time++ < 100)
udelay(10);
else
break;
}
- write_nic_dword(dev, QPNR, Data);
+ rtl92e_writel(dev, QPNR, Data);
}
-void rtl8192_phy_configmac(struct net_device *dev)
+void rtl92e_config_mac(struct net_device *dev)
{
u32 dwArrayLen = 0, i = 0;
u32 *pdwArray = NULL;
@@ -350,14 +348,14 @@ void rtl8192_phy_configmac(struct net_device *dev)
pdwArray[i], pdwArray[i+1], pdwArray[i+2]);
if (pdwArray[i] == 0x318)
pdwArray[i+2] = 0x00000800;
- rtl8192_setBBreg(dev, pdwArray[i], pdwArray[i+1],
- pdwArray[i+2]);
+ rtl92e_set_bb_reg(dev, pdwArray[i], pdwArray[i+1],
+ pdwArray[i+2]);
}
return;
}
-void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType)
+static void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType)
{
int i;
u32 *Rtl819XPHY_REGArray_Table = NULL;
@@ -377,9 +375,9 @@ void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType)
if (ConfigType == BaseBand_Config_PHY_REG) {
for (i = 0; i < PHY_REGArrayLen; i += 2) {
- rtl8192_setBBreg(dev, Rtl819XPHY_REGArray_Table[i],
- bMaskDWord,
- Rtl819XPHY_REGArray_Table[i+1]);
+ rtl92e_set_bb_reg(dev, Rtl819XPHY_REGArray_Table[i],
+ bMaskDWord,
+ Rtl819XPHY_REGArray_Table[i+1]);
RT_TRACE(COMP_DBG,
"i: %x, The Rtl819xUsbPHY_REGArray[0] is %x Rtl819xUsbPHY_REGArray[1] is %x\n",
i, Rtl819XPHY_REGArray_Table[i],
@@ -387,9 +385,9 @@ void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType)
}
} else if (ConfigType == BaseBand_Config_AGC_TAB) {
for (i = 0; i < AGCTAB_ArrayLen; i += 2) {
- rtl8192_setBBreg(dev, Rtl819XAGCTAB_Array_Table[i],
- bMaskDWord,
- Rtl819XAGCTAB_Array_Table[i+1]);
+ rtl92e_set_bb_reg(dev, Rtl819XAGCTAB_Array_Table[i],
+ bMaskDWord,
+ Rtl819XAGCTAB_Array_Table[i+1]);
RT_TRACE(COMP_DBG,
"i:%x, The rtl819XAGCTAB_Array[0] is %x rtl819XAGCTAB_Array[1] is %x\n",
i, Rtl819XAGCTAB_Array_Table[i],
@@ -489,9 +487,8 @@ static void rtl8192_InitBBRFRegDef(struct net_device *dev)
}
-bool rtl8192_phy_checkBBAndRF(struct net_device *dev,
- enum hw90_block CheckBlock,
- enum rf90_radio_path eRFPath)
+bool rtl92e_check_bb_and_rf(struct net_device *dev, enum hw90_block CheckBlock,
+ enum rf90_radio_path eRFPath)
{
bool ret = true;
u32 i, CheckTimes = 4, dwRegRead = 0;
@@ -515,20 +512,20 @@ bool rtl8192_phy_checkBBAndRF(struct net_device *dev,
switch (CheckBlock) {
case HW90_BLOCK_PHY0:
case HW90_BLOCK_PHY1:
- write_nic_dword(dev, WriteAddr[CheckBlock],
- WriteData[i]);
- dwRegRead = read_nic_dword(dev, WriteAddr[CheckBlock]);
+ rtl92e_writel(dev, WriteAddr[CheckBlock],
+ WriteData[i]);
+ dwRegRead = rtl92e_readl(dev, WriteAddr[CheckBlock]);
break;
case HW90_BLOCK_RF:
WriteData[i] &= 0xfff;
- rtl8192_phy_SetRFReg(dev, eRFPath,
- WriteAddr[HW90_BLOCK_RF],
- bMask12Bits, WriteData[i]);
+ rtl92e_set_rf_reg(dev, eRFPath,
+ WriteAddr[HW90_BLOCK_RF],
+ bMask12Bits, WriteData[i]);
mdelay(10);
- dwRegRead = rtl8192_phy_QueryRFReg(dev, eRFPath,
- WriteAddr[HW90_BLOCK_RF],
- bMaskDWord);
+ dwRegRead = rtl92e_get_rf_reg(dev, eRFPath,
+ WriteAddr[HW90_BLOCK_RF],
+ bMaskDWord);
mdelay(10);
break;
@@ -555,29 +552,29 @@ static bool rtl8192_BB_Config_ParaFile(struct net_device *dev)
u8 bRegValue = 0, eCheckItem = 0;
u32 dwRegValue = 0;
- bRegValue = read_nic_byte(dev, BB_GLOBAL_RESET);
- write_nic_byte(dev, BB_GLOBAL_RESET, (bRegValue|BB_GLOBAL_RESET_BIT));
+ bRegValue = rtl92e_readb(dev, BB_GLOBAL_RESET);
+ rtl92e_writeb(dev, BB_GLOBAL_RESET, (bRegValue|BB_GLOBAL_RESET_BIT));
- dwRegValue = read_nic_dword(dev, CPU_GEN);
- write_nic_dword(dev, CPU_GEN, (dwRegValue&(~CPU_GEN_BB_RST)));
+ dwRegValue = rtl92e_readl(dev, CPU_GEN);
+ rtl92e_writel(dev, CPU_GEN, (dwRegValue&(~CPU_GEN_BB_RST)));
for (eCheckItem = (enum hw90_block)HW90_BLOCK_PHY0;
eCheckItem <= HW90_BLOCK_PHY1; eCheckItem++) {
- rtStatus = rtl8192_phy_checkBBAndRF(dev,
- (enum hw90_block)eCheckItem,
- (enum rf90_radio_path)0);
+ rtStatus = rtl92e_check_bb_and_rf(dev,
+ (enum hw90_block)eCheckItem,
+ (enum rf90_radio_path)0);
if (!rtStatus) {
RT_TRACE((COMP_ERR | COMP_PHY),
- "PHY_RF8256_Config():Check PHY%d Fail!!\n",
+ "rtl92e_config_rf():Check PHY%d Fail!!\n",
eCheckItem-1);
return rtStatus;
}
}
- rtl8192_setBBreg(dev, rFPGA0_RFMOD, bCCKEn|bOFDMEn, 0x0);
+ rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bCCKEn|bOFDMEn, 0x0);
rtl8192_phyConfigBB(dev, BaseBand_Config_PHY_REG);
- dwRegValue = read_nic_dword(dev, CPU_GEN);
- write_nic_dword(dev, CPU_GEN, (dwRegValue|CPU_GEN_BB_RST));
+ dwRegValue = rtl92e_readl(dev, CPU_GEN);
+ rtl92e_writel(dev, CPU_GEN, (dwRegValue|CPU_GEN_BB_RST));
rtl8192_phyConfigBB(dev, BaseBand_Config_AGC_TAB);
@@ -588,57 +585,57 @@ static bool rtl8192_BB_Config_ParaFile(struct net_device *dev)
priv->AntennaTxPwDiff[0]);
else
dwRegValue = 0x0;
- rtl8192_setBBreg(dev, rFPGA0_TxGainStage,
- (bXBTxAGC|bXCTxAGC|bXDTxAGC), dwRegValue);
+ rtl92e_set_bb_reg(dev, rFPGA0_TxGainStage,
+ (bXBTxAGC|bXCTxAGC|bXDTxAGC), dwRegValue);
dwRegValue = priv->CrystalCap;
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, bXtalCap92x,
- dwRegValue);
+ rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, bXtalCap92x,
+ dwRegValue);
}
return rtStatus;
}
-bool rtl8192_BBConfig(struct net_device *dev)
+bool rtl92e_config_bb(struct net_device *dev)
{
rtl8192_InitBBRFRegDef(dev);
return rtl8192_BB_Config_ParaFile(dev);
}
-void rtl8192_phy_getTxPower(struct net_device *dev)
+void rtl92e_get_tx_power(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
priv->MCSTxPowerLevelOriginalOffset[0] =
- read_nic_dword(dev, rTxAGC_Rate18_06);
+ rtl92e_readl(dev, rTxAGC_Rate18_06);
priv->MCSTxPowerLevelOriginalOffset[1] =
- read_nic_dword(dev, rTxAGC_Rate54_24);
+ rtl92e_readl(dev, rTxAGC_Rate54_24);
priv->MCSTxPowerLevelOriginalOffset[2] =
- read_nic_dword(dev, rTxAGC_Mcs03_Mcs00);
+ rtl92e_readl(dev, rTxAGC_Mcs03_Mcs00);
priv->MCSTxPowerLevelOriginalOffset[3] =
- read_nic_dword(dev, rTxAGC_Mcs07_Mcs04);
+ rtl92e_readl(dev, rTxAGC_Mcs07_Mcs04);
priv->MCSTxPowerLevelOriginalOffset[4] =
- read_nic_dword(dev, rTxAGC_Mcs11_Mcs08);
+ rtl92e_readl(dev, rTxAGC_Mcs11_Mcs08);
priv->MCSTxPowerLevelOriginalOffset[5] =
- read_nic_dword(dev, rTxAGC_Mcs15_Mcs12);
+ rtl92e_readl(dev, rTxAGC_Mcs15_Mcs12);
- priv->DefaultInitialGain[0] = read_nic_byte(dev, rOFDM0_XAAGCCore1);
- priv->DefaultInitialGain[1] = read_nic_byte(dev, rOFDM0_XBAGCCore1);
- priv->DefaultInitialGain[2] = read_nic_byte(dev, rOFDM0_XCAGCCore1);
- priv->DefaultInitialGain[3] = read_nic_byte(dev, rOFDM0_XDAGCCore1);
+ priv->DefaultInitialGain[0] = rtl92e_readb(dev, rOFDM0_XAAGCCore1);
+ priv->DefaultInitialGain[1] = rtl92e_readb(dev, rOFDM0_XBAGCCore1);
+ priv->DefaultInitialGain[2] = rtl92e_readb(dev, rOFDM0_XCAGCCore1);
+ priv->DefaultInitialGain[3] = rtl92e_readb(dev, rOFDM0_XDAGCCore1);
RT_TRACE(COMP_INIT,
"Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x)\n",
priv->DefaultInitialGain[0], priv->DefaultInitialGain[1],
priv->DefaultInitialGain[2], priv->DefaultInitialGain[3]);
- priv->framesync = read_nic_byte(dev, rOFDM0_RxDetector3);
- priv->framesyncC34 = read_nic_dword(dev, rOFDM0_RxDetector2);
+ priv->framesync = rtl92e_readb(dev, rOFDM0_RxDetector3);
+ priv->framesyncC34 = rtl92e_readl(dev, rOFDM0_RxDetector2);
RT_TRACE(COMP_INIT, "Default framesync (0x%x) = 0x%x\n",
rOFDM0_RxDetector3, priv->framesync);
- priv->SifsTime = read_nic_word(dev, SIFS);
+ priv->SifsTime = rtl92e_readw(dev, SIFS);
}
-void rtl8192_phy_setTxPower(struct net_device *dev, u8 channel)
+void rtl92e_set_tx_power(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 powerlevel = 0, powerlevelOFDM24G = 0;
@@ -671,16 +668,17 @@ void rtl8192_phy_setTxPower(struct net_device *dev, u8 channel)
priv->AntennaTxPwDiff[1]<<4 |
priv->AntennaTxPwDiff[0]);
- rtl8192_setBBreg(dev, rFPGA0_TxGainStage,
- (bXBTxAGC|bXCTxAGC|bXDTxAGC), u4RegValue);
+ rtl92e_set_bb_reg(dev, rFPGA0_TxGainStage,
+ (bXBTxAGC|bXCTxAGC|bXDTxAGC),
+ u4RegValue);
}
}
switch (priv->rf_chip) {
case RF_8225:
break;
case RF_8256:
- PHY_SetRF8256CCKTxPower(dev, powerlevel);
- PHY_SetRF8256OFDMTxPower(dev, powerlevelOFDM24G);
+ rtl92e_set_cck_tx_power(dev, powerlevel);
+ rtl92e_set_ofdm_tx_power(dev, powerlevelOFDM24G);
break;
case RF_8258:
break;
@@ -690,7 +688,7 @@ void rtl8192_phy_setTxPower(struct net_device *dev, u8 channel)
}
}
-bool rtl8192_phy_RFConfig(struct net_device *dev)
+bool rtl92e_config_phy(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
bool rtStatus = true;
@@ -699,7 +697,7 @@ bool rtl8192_phy_RFConfig(struct net_device *dev)
case RF_8225:
break;
case RF_8256:
- rtStatus = PHY_RF8256_Config(dev);
+ rtStatus = rtl92e_config_rf(dev);
break;
case RF_8258:
@@ -714,12 +712,7 @@ bool rtl8192_phy_RFConfig(struct net_device *dev)
return rtStatus;
}
-void rtl8192_phy_updateInitGain(struct net_device *dev)
-{
-}
-
-u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
- enum rf90_radio_path eRFPath)
+u8 rtl92e_config_rf_path(struct net_device *dev, enum rf90_radio_path eRFPath)
{
int i;
@@ -731,10 +724,9 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
msleep(100);
continue;
}
- rtl8192_phy_SetRFReg(dev, eRFPath,
- Rtl819XRadioA_Array[i],
- bMask12Bits,
- Rtl819XRadioA_Array[i+1]);
+ rtl92e_set_rf_reg(dev, eRFPath, Rtl819XRadioA_Array[i],
+ bMask12Bits,
+ Rtl819XRadioA_Array[i+1]);
}
break;
@@ -744,10 +736,9 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
msleep(100);
continue;
}
- rtl8192_phy_SetRFReg(dev, eRFPath,
- Rtl819XRadioB_Array[i],
- bMask12Bits,
- Rtl819XRadioB_Array[i+1]);
+ rtl92e_set_rf_reg(dev, eRFPath, Rtl819XRadioB_Array[i],
+ bMask12Bits,
+ Rtl819XRadioB_Array[i+1]);
}
break;
@@ -757,10 +748,9 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
msleep(100);
continue;
}
- rtl8192_phy_SetRFReg(dev, eRFPath,
- Rtl819XRadioC_Array[i],
- bMask12Bits,
- Rtl819XRadioC_Array[i+1]);
+ rtl92e_set_rf_reg(dev, eRFPath, Rtl819XRadioC_Array[i],
+ bMask12Bits,
+ Rtl819XRadioC_Array[i+1]);
}
break;
@@ -770,9 +760,9 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
msleep(100);
continue;
}
- rtl8192_phy_SetRFReg(dev, eRFPath,
- Rtl819XRadioD_Array[i], bMask12Bits,
- Rtl819XRadioD_Array[i+1]);
+ rtl92e_set_rf_reg(dev, eRFPath, Rtl819XRadioD_Array[i],
+ bMask12Bits,
+ Rtl819XRadioD_Array[i+1]);
}
break;
@@ -794,8 +784,8 @@ static void rtl8192_SetTxPowerLevel(struct net_device *dev, u8 channel)
break;
case RF_8256:
- PHY_SetRF8256CCKTxPower(dev, powerlevel);
- PHY_SetRF8256OFDMTxPower(dev, powerlevelOFDM24G);
+ rtl92e_set_cck_tx_power(dev, powerlevel);
+ rtl92e_set_ofdm_tx_power(dev, powerlevelOFDM24G);
break;
case RF_8258:
@@ -941,21 +931,21 @@ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
rtl8192_SetTxPowerLevel(dev, channel);
break;
case CmdID_WritePortUlong:
- write_nic_dword(dev, CurrentCmd->Para1,
- CurrentCmd->Para2);
+ rtl92e_writel(dev, CurrentCmd->Para1,
+ CurrentCmd->Para2);
break;
case CmdID_WritePortUshort:
- write_nic_word(dev, CurrentCmd->Para1,
- (u16)CurrentCmd->Para2);
+ rtl92e_writew(dev, CurrentCmd->Para1,
+ (u16)CurrentCmd->Para2);
break;
case CmdID_WritePortUchar:
- write_nic_byte(dev, CurrentCmd->Para1,
- (u8)CurrentCmd->Para2);
+ rtl92e_writeb(dev, CurrentCmd->Para1,
+ (u8)CurrentCmd->Para2);
break;
case CmdID_RF_WriteReg:
for (eRFPath = 0; eRFPath <
priv->NumTotalRFPath; eRFPath++)
- rtl8192_phy_SetRFReg(dev,
+ rtl92e_set_rf_reg(dev,
(enum rf90_radio_path)eRFPath,
CurrentCmd->Para1, bMask12Bits,
CurrentCmd->Para2<<7);
@@ -986,7 +976,7 @@ static void rtl8192_phy_FinishSwChnlNow(struct net_device *dev, u8 channel)
break;
}
}
-void rtl8192_SwChnl_WorkItem(struct net_device *dev)
+static void rtl8192_SwChnl_WorkItem(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1001,7 +991,7 @@ void rtl8192_SwChnl_WorkItem(struct net_device *dev)
RT_TRACE(COMP_TRACE, "<== SwChnlCallback819xUsbWorkItem()\n");
}
-u8 rtl8192_phy_SwChnl(struct net_device *dev, u8 channel)
+u8 rtl92e_set_channel(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1082,13 +1072,13 @@ static void CCK_Tx_Power_Track_BW_Switch_TSSI(struct net_device *dev)
if (priv->rtllib->current_network.channel == 14 &&
!priv->bcck_in_ch14) {
priv->bcck_in_ch14 = true;
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
} else if (priv->rtllib->current_network.channel !=
14 && priv->bcck_in_ch14) {
priv->bcck_in_ch14 = false;
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
} else {
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
}
break;
@@ -1110,13 +1100,13 @@ static void CCK_Tx_Power_Track_BW_Switch_TSSI(struct net_device *dev)
if (priv->rtllib->current_network.channel == 14 &&
!priv->bcck_in_ch14) {
priv->bcck_in_ch14 = true;
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
} else if (priv->rtllib->current_network.channel != 14
&& priv->bcck_in_ch14) {
priv->bcck_in_ch14 = false;
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
} else {
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
}
break;
}
@@ -1150,7 +1140,7 @@ static void CCK_Tx_Power_Track_BW_Switch_ThermalMeter(struct net_device *dev)
priv->CCK_index);
break;
}
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
}
static void CCK_Tx_Power_Track_BW_Switch(struct net_device *dev)
@@ -1163,7 +1153,7 @@ static void CCK_Tx_Power_Track_BW_Switch(struct net_device *dev)
CCK_Tx_Power_Track_BW_Switch_ThermalMeter(dev);
}
-void rtl8192_SetBWModeWorkItem(struct net_device *dev)
+static void rtl8192_SetBWModeWorkItem(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1183,17 +1173,17 @@ void rtl8192_SetBWModeWorkItem(struct net_device *dev)
netdev_err(dev, "%s(): Driver is not initialized\n", __func__);
return;
}
- regBwOpMode = read_nic_byte(dev, BW_OPMODE);
+ regBwOpMode = rtl92e_readb(dev, BW_OPMODE);
switch (priv->CurrentChannelBW) {
case HT_CHANNEL_WIDTH_20:
regBwOpMode |= BW_OPMODE_20MHZ;
- write_nic_byte(dev, BW_OPMODE, regBwOpMode);
+ rtl92e_writeb(dev, BW_OPMODE, regBwOpMode);
break;
case HT_CHANNEL_WIDTH_20_40:
regBwOpMode &= ~BW_OPMODE_20MHZ;
- write_nic_byte(dev, BW_OPMODE, regBwOpMode);
+ rtl92e_writeb(dev, BW_OPMODE, regBwOpMode);
break;
default:
@@ -1204,38 +1194,38 @@ void rtl8192_SetBWModeWorkItem(struct net_device *dev)
switch (priv->CurrentChannelBW) {
case HT_CHANNEL_WIDTH_20:
- rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x0);
- rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x0);
+ rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bRFMOD, 0x0);
+ rtl92e_set_bb_reg(dev, rFPGA1_RFMOD, bRFMOD, 0x0);
if (!priv->btxpower_tracking) {
- write_nic_dword(dev, rCCK0_TxFilter1, 0x1a1b0000);
- write_nic_dword(dev, rCCK0_TxFilter2, 0x090e1317);
- write_nic_dword(dev, rCCK0_DebugPort, 0x00000204);
+ rtl92e_writel(dev, rCCK0_TxFilter1, 0x1a1b0000);
+ rtl92e_writel(dev, rCCK0_TxFilter2, 0x090e1317);
+ rtl92e_writel(dev, rCCK0_DebugPort, 0x00000204);
} else {
CCK_Tx_Power_Track_BW_Switch(dev);
}
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 1);
+ rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x00100000, 1);
break;
case HT_CHANNEL_WIDTH_20_40:
- rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x1);
- rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x1);
+ rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bRFMOD, 0x1);
+ rtl92e_set_bb_reg(dev, rFPGA1_RFMOD, bRFMOD, 0x1);
if (!priv->btxpower_tracking) {
- write_nic_dword(dev, rCCK0_TxFilter1, 0x35360000);
- write_nic_dword(dev, rCCK0_TxFilter2, 0x121c252e);
- write_nic_dword(dev, rCCK0_DebugPort, 0x00000409);
+ rtl92e_writel(dev, rCCK0_TxFilter1, 0x35360000);
+ rtl92e_writel(dev, rCCK0_TxFilter2, 0x121c252e);
+ rtl92e_writel(dev, rCCK0_DebugPort, 0x00000409);
} else {
CCK_Tx_Power_Track_BW_Switch(dev);
}
- rtl8192_setBBreg(dev, rCCK0_System, bCCKSideBand,
- (priv->nCur40MhzPrimeSC>>1));
- rtl8192_setBBreg(dev, rOFDM1_LSTF, 0xC00,
- priv->nCur40MhzPrimeSC);
+ rtl92e_set_bb_reg(dev, rCCK0_System, bCCKSideBand,
+ (priv->nCur40MhzPrimeSC>>1));
+ rtl92e_set_bb_reg(dev, rOFDM1_LSTF, 0xC00,
+ priv->nCur40MhzPrimeSC);
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 0);
+ rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x00100000, 0);
break;
default:
netdev_err(dev, "%s(): unknown Bandwidth: %#X\n", __func__,
@@ -1249,7 +1239,7 @@ void rtl8192_SetBWModeWorkItem(struct net_device *dev)
break;
case RF_8256:
- PHY_SetRF8256Bandwidth(dev, priv->CurrentChannelBW);
+ rtl92e_set_bandwidth(dev, priv->CurrentChannelBW);
break;
case RF_8258:
@@ -1270,8 +1260,8 @@ void rtl8192_SetBWModeWorkItem(struct net_device *dev)
RT_TRACE(COMP_SWBW, "<==SetBWMode819xUsb()");
}
-void rtl8192_SetBWMode(struct net_device *dev, enum ht_channel_width Bandwidth,
- enum ht_extchnl_offset Offset)
+void rtl92e_set_bw_mode(struct net_device *dev, enum ht_channel_width Bandwidth,
+ enum ht_extchnl_offset Offset)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1295,7 +1285,7 @@ void rtl8192_SetBWMode(struct net_device *dev, enum ht_channel_width Bandwidth,
}
-void InitialGain819xPci(struct net_device *dev, u8 Operation)
+void rtl92e_init_gain(struct net_device *dev, u8 Operation)
{
#define SCAN_RX_INITIAL_GAIN 0x17
#define POWER_DETECTION_TH 0x08
@@ -1312,21 +1302,21 @@ void InitialGain819xPci(struct net_device *dev, u8 Operation)
BitMask = bMaskByte0;
if (dm_digtable.dig_algorithm ==
DIG_ALGO_BY_FALSE_ALARM)
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
+ rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
priv->initgain_backup.xaagccore1 =
- (u8)rtl8192_QueryBBReg(dev, rOFDM0_XAAGCCore1,
- BitMask);
+ (u8)rtl92e_get_bb_reg(dev, rOFDM0_XAAGCCore1,
+ BitMask);
priv->initgain_backup.xbagccore1 =
- (u8)rtl8192_QueryBBReg(dev, rOFDM0_XBAGCCore1,
- BitMask);
+ (u8)rtl92e_get_bb_reg(dev, rOFDM0_XBAGCCore1,
+ BitMask);
priv->initgain_backup.xcagccore1 =
- (u8)rtl8192_QueryBBReg(dev, rOFDM0_XCAGCCore1,
- BitMask);
+ (u8)rtl92e_get_bb_reg(dev, rOFDM0_XCAGCCore1,
+ BitMask);
priv->initgain_backup.xdagccore1 =
- (u8)rtl8192_QueryBBReg(dev, rOFDM0_XDAGCCore1,
- BitMask);
+ (u8)rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1,
+ BitMask);
BitMask = bMaskByte2;
- priv->initgain_backup.cca = (u8)rtl8192_QueryBBReg(dev,
+ priv->initgain_backup.cca = (u8)rtl92e_get_bb_reg(dev,
rCCK0_CCA, BitMask);
RT_TRACE(COMP_SCAN,
@@ -1347,13 +1337,13 @@ void InitialGain819xPci(struct net_device *dev, u8 Operation)
RT_TRACE(COMP_SCAN, "Write scan initial gain = 0x%x\n",
initial_gain);
- write_nic_byte(dev, rOFDM0_XAAGCCore1, initial_gain);
- write_nic_byte(dev, rOFDM0_XBAGCCore1, initial_gain);
- write_nic_byte(dev, rOFDM0_XCAGCCore1, initial_gain);
- write_nic_byte(dev, rOFDM0_XDAGCCore1, initial_gain);
+ rtl92e_writeb(dev, rOFDM0_XAAGCCore1, initial_gain);
+ rtl92e_writeb(dev, rOFDM0_XBAGCCore1, initial_gain);
+ rtl92e_writeb(dev, rOFDM0_XCAGCCore1, initial_gain);
+ rtl92e_writeb(dev, rOFDM0_XDAGCCore1, initial_gain);
RT_TRACE(COMP_SCAN, "Write scan 0xa0a = 0x%x\n",
POWER_DETECTION_TH);
- write_nic_byte(dev, 0xa0a, POWER_DETECTION_TH);
+ rtl92e_writeb(dev, 0xa0a, POWER_DETECTION_TH);
break;
case IG_Restore:
RT_TRACE(COMP_SCAN,
@@ -1361,18 +1351,18 @@ void InitialGain819xPci(struct net_device *dev, u8 Operation)
BitMask = 0x7f;
if (dm_digtable.dig_algorithm ==
DIG_ALGO_BY_FALSE_ALARM)
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
+ rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
- rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, BitMask,
+ rtl92e_set_bb_reg(dev, rOFDM0_XAAGCCore1, BitMask,
(u32)priv->initgain_backup.xaagccore1);
- rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, BitMask,
+ rtl92e_set_bb_reg(dev, rOFDM0_XBAGCCore1, BitMask,
(u32)priv->initgain_backup.xbagccore1);
- rtl8192_setBBreg(dev, rOFDM0_XCAGCCore1, BitMask,
+ rtl92e_set_bb_reg(dev, rOFDM0_XCAGCCore1, BitMask,
(u32)priv->initgain_backup.xcagccore1);
- rtl8192_setBBreg(dev, rOFDM0_XDAGCCore1, BitMask,
+ rtl92e_set_bb_reg(dev, rOFDM0_XDAGCCore1, BitMask,
(u32)priv->initgain_backup.xdagccore1);
BitMask = bMaskByte2;
- rtl8192_setBBreg(dev, rCCK0_CCA, BitMask,
+ rtl92e_set_bb_reg(dev, rCCK0_CCA, BitMask,
(u32)priv->initgain_backup.cca);
RT_TRACE(COMP_SCAN,
@@ -1391,12 +1381,12 @@ void InitialGain819xPci(struct net_device *dev, u8 Operation)
"Scan BBInitialGainRestore 0xa0a is %x\n",
priv->initgain_backup.cca);
- rtl8192_phy_setTxPower(dev,
+ rtl92e_set_tx_power(dev,
priv->rtllib->current_network.channel);
if (dm_digtable.dig_algorithm ==
DIG_ALGO_BY_FALSE_ALARM)
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1);
+ rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
break;
default:
RT_TRACE(COMP_SCAN, "Unknown IG Operation.\n");
@@ -1405,17 +1395,17 @@ void InitialGain819xPci(struct net_device *dev, u8 Operation)
}
}
-void PHY_SetRtl8192eRfOff(struct net_device *dev)
+void rtl92e_set_rf_off(struct net_device *dev)
{
- rtl8192_setBBreg(dev, rFPGA0_XA_RFInterfaceOE, BIT4, 0x0);
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x0);
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x18, 0x0);
- rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0xf, 0x0);
- rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0xf, 0x0);
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x60, 0x0);
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x4, 0x0);
- write_nic_byte(dev, ANAPAR_FOR_8192PciE, 0x07);
+ rtl92e_set_bb_reg(dev, rFPGA0_XA_RFInterfaceOE, BIT4, 0x0);
+ rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0x300, 0x0);
+ rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x18, 0x0);
+ rtl92e_set_bb_reg(dev, rOFDM0_TRxPathEnable, 0xf, 0x0);
+ rtl92e_set_bb_reg(dev, rOFDM1_TRxPathEnable, 0xf, 0x0);
+ rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x60, 0x0);
+ rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x4, 0x0);
+ rtl92e_writeb(dev, ANAPAR_FOR_8192PciE, 0x07);
}
@@ -1447,7 +1437,7 @@ static bool SetRFPowerState8190(struct net_device *dev,
do {
InitilizeCount--;
priv->RegRfOff = false;
- rtstatus = NicIFEnableNIC(dev);
+ rtstatus = rtl92e_enable_nic(dev);
} while (!rtstatus && (InitilizeCount > 0));
if (!rtstatus) {
@@ -1461,24 +1451,24 @@ static bool SetRFPowerState8190(struct net_device *dev,
RT_CLEAR_PS_LEVEL(pPSC,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
- write_nic_byte(dev, ANAPAR, 0x37);
+ rtl92e_writeb(dev, ANAPAR, 0x37);
mdelay(1);
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1,
+ rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1,
0x4, 0x1);
priv->bHwRfOffAction = 0;
- rtl8192_setBBreg(dev, rFPGA0_XA_RFInterfaceOE,
- BIT4, 0x1);
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4,
- 0x300, 0x3);
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1,
- 0x18, 0x3);
- rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0x3,
- 0x3);
- rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x3,
- 0x3);
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1,
- 0x60, 0x3);
+ rtl92e_set_bb_reg(dev, rFPGA0_XA_RFInterfaceOE,
+ BIT4, 0x1);
+ rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4,
+ 0x300, 0x3);
+ rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1,
+ 0x18, 0x3);
+ rtl92e_set_bb_reg(dev, rOFDM0_TRxPathEnable,
+ 0x3, 0x3);
+ rtl92e_set_bb_reg(dev, rOFDM1_TRxPathEnable,
+ 0x3, 0x3);
+ rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1,
+ 0x60, 0x3);
}
@@ -1511,7 +1501,7 @@ static bool SetRFPowerState8190(struct net_device *dev,
break;
}
}
- PHY_SetRtl8192eRfOff(dev);
+ rtl92e_set_rf_off(dev);
break;
case eRfOff:
@@ -1543,11 +1533,11 @@ static bool SetRFPowerState8190(struct net_device *dev,
if (pPSC->RegRfPsLevel & RT_RF_OFF_LEVL_HALT_NIC &&
!RT_IN_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC)) {
- NicIFDisableNIC(dev);
+ rtl92e_disable_nic(dev);
RT_SET_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC);
} else if (!(pPSC->RegRfPsLevel &
RT_RF_OFF_LEVL_HALT_NIC)) {
- PHY_SetRtl8192eRfOff(dev);
+ rtl92e_set_rf_off(dev);
}
break;
@@ -1586,32 +1576,34 @@ static bool SetRFPowerState8190(struct net_device *dev,
return bResult;
}
-bool SetRFPowerState(struct net_device *dev,
- enum rt_rf_power_state eRFPowerState)
+bool rtl92e_set_rf_power_state(struct net_device *dev,
+ enum rt_rf_power_state eRFPowerState)
{
struct r8192_priv *priv = rtllib_priv(dev);
bool bResult = false;
- RT_TRACE(COMP_PS, "---------> SetRFPowerState(): eRFPowerState(%d)\n",
+ RT_TRACE(COMP_PS,
+ "---------> rtl92e_set_rf_power_state(): eRFPowerState(%d)\n",
eRFPowerState);
if (eRFPowerState == priv->rtllib->eRFPowerState &&
priv->bHwRfOffAction == 0) {
RT_TRACE(COMP_PS,
- "<--------- SetRFPowerState(): discard the request for eRFPowerState(%d) is the same.\n",
+ "<--------- rtl92e_set_rf_power_state(): discard the request for eRFPowerState(%d) is the same.\n",
eRFPowerState);
return bResult;
}
bResult = SetRFPowerState8190(dev, eRFPowerState);
- RT_TRACE(COMP_PS, "<--------- SetRFPowerState(): bResult(%d)\n",
+ RT_TRACE(COMP_PS,
+ "<--------- rtl92e_set_rf_power_state(): bResult(%d)\n",
bResult);
return bResult;
}
-void PHY_ScanOperationBackup8192(struct net_device *dev, u8 Operation)
+void rtl92e_scan_op_backup(struct net_device *dev, u8 Operation)
{
struct r8192_priv *priv = rtllib_priv(dev);
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
index 18bc58240fbe..96015d342009 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
@@ -67,48 +67,36 @@ enum rf90_radio_path {
#define bMaskLWord 0x0000ffff
#define bMaskDWord 0xffffffff
-extern u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev,
- u32 eRFPath);
-extern void rtl8192_setBBreg(struct net_device *dev, u32 dwRegAddr,
- u32 dwBitMask, u32 dwData);
-extern u32 rtl8192_QueryBBReg(struct net_device *dev, u32 dwRegAddr,
- u32 dwBitMask);
-extern void rtl8192_phy_SetRFReg(struct net_device *dev,
- enum rf90_radio_path eRFPath,
- u32 RegAddr, u32 BitMask, u32 Data);
-extern u32 rtl8192_phy_QueryRFReg(struct net_device *dev,
- enum rf90_radio_path eRFPath,
- u32 RegAddr, u32 BitMask);
-extern void rtl8192_phy_configmac(struct net_device *dev);
-extern void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType);
-extern bool rtl8192_phy_checkBBAndRF(struct net_device *dev,
- enum hw90_block CheckBlock,
- enum rf90_radio_path eRFPath);
-extern bool rtl8192_BBConfig(struct net_device *dev);
-extern void rtl8192_phy_getTxPower(struct net_device *dev);
-extern void rtl8192_phy_setTxPower(struct net_device *dev, u8 channel);
-extern bool rtl8192_phy_RFConfig(struct net_device *dev);
-extern void rtl8192_phy_updateInitGain(struct net_device *dev);
-extern u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
- enum rf90_radio_path eRFPath);
+u8 rtl92e_is_legal_rf_path(struct net_device *dev, u32 eRFPath);
+void rtl92e_set_bb_reg(struct net_device *dev, u32 dwRegAddr,
+ u32 dwBitMask, u32 dwData);
+u32 rtl92e_get_bb_reg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask);
+void rtl92e_set_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
+ u32 RegAddr, u32 BitMask, u32 Data);
+u32 rtl92e_get_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
+ u32 RegAddr, u32 BitMask);
+void rtl92e_config_mac(struct net_device *dev);
+bool rtl92e_check_bb_and_rf(struct net_device *dev,
+ enum hw90_block CheckBlock,
+ enum rf90_radio_path eRFPath);
+bool rtl92e_config_bb(struct net_device *dev);
+void rtl92e_get_tx_power(struct net_device *dev);
+void rtl92e_set_tx_power(struct net_device *dev, u8 channel);
+bool rtl92e_config_phy(struct net_device *dev);
+u8 rtl92e_config_rf_path(struct net_device *dev, enum rf90_radio_path eRFPath);
-extern u8 rtl8192_phy_SwChnl(struct net_device *dev, u8 channel);
-extern void rtl8192_SetBWMode(struct net_device *dev,
- enum ht_channel_width Bandwidth,
- enum ht_extchnl_offset Offset);
-extern void rtl8192_SwChnl_WorkItem(struct net_device *dev);
-extern void rtl8192_SetBWModeWorkItem(struct net_device *dev);
-extern void InitialGain819xPci(struct net_device *dev, u8 Operation);
+u8 rtl92e_set_channel(struct net_device *dev, u8 channel);
+void rtl92e_set_bw_mode(struct net_device *dev,
+ enum ht_channel_width Bandwidth,
+ enum ht_extchnl_offset Offset);
+void rtl92e_init_gain(struct net_device *dev, u8 Operation);
-extern void PHY_SetRtl8192eRfOff(struct net_device *dev);
+void rtl92e_set_rf_off(struct net_device *dev);
-bool
-SetRFPowerState(
- struct net_device *dev,
- enum rt_rf_power_state eRFPowerState
- );
-#define PHY_SetRFPowerState SetRFPowerState
+bool rtl92e_set_rf_power_state(struct net_device *dev,
+ enum rt_rf_power_state eRFPowerState);
+#define PHY_SetRFPowerState rtl92e_set_rf_power_state
-extern void PHY_ScanOperationBackup8192(struct net_device *dev, u8 Operation);
+void rtl92e_scan_op_backup(struct net_device *dev, u8 Operation);
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index f246222e5fc9..29dd93ac5e93 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
@@ -28,15 +28,15 @@
#include "r8190P_rtl8256.h" /* RTL8225 Radio frontend */
#include "r8192E_cmdpkt.h"
-void CamResetAllEntry(struct net_device *dev)
+void rtl92e_cam_reset(struct net_device *dev)
{
u32 ulcommand = 0;
ulcommand |= BIT31|BIT30;
- write_nic_dword(dev, RWCAM, ulcommand);
+ rtl92e_writel(dev, RWCAM, ulcommand);
}
-void EnableHWSecurityConfig8192(struct net_device *dev)
+void rtl92e_enable_hw_security_config(struct net_device *dev)
{
u8 SECR_value = 0x0;
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
@@ -65,11 +65,12 @@ void EnableHWSecurityConfig8192(struct net_device *dev)
RT_TRACE(COMP_SEC, "%s:, hwsec:%d, pairwise_key:%d, SECR_value:%x\n",
__func__, ieee->hwsec_active, ieee->pairwise_key_type,
SECR_value);
- write_nic_byte(dev, SECR, SECR_value);
+ rtl92e_writeb(dev, SECR, SECR_value);
}
-void set_swcam(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
- const u8 *MacAddr, u8 DefaultKey, u32 *KeyContent, u8 is_mesh)
+void rtl92e_set_swcam(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
+ u16 KeyType, const u8 *MacAddr, u8 DefaultKey,
+ u32 *KeyContent, u8 is_mesh)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
@@ -77,6 +78,10 @@ void set_swcam(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
RT_TRACE(COMP_DBG,
"===========>%s():EntryNo is %d,KeyIndex is %d,KeyType is %d,is_mesh is %d\n",
__func__, EntryNo, KeyIndex, KeyType, is_mesh);
+
+ if (EntryNo >= TOTAL_CAM_ENTRY)
+ return;
+
if (!is_mesh) {
ieee->swcamtable[EntryNo].bused = true;
ieee->swcamtable[EntryNo].key_index = KeyIndex;
@@ -87,8 +92,9 @@ void set_swcam(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
}
}
-void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
- const u8 *MacAddr, u8 DefaultKey, u32 *KeyContent)
+void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
+ u16 KeyType, const u8 *MacAddr, u8 DefaultKey,
+ u32 *KeyContent)
{
u32 TargetCommand = 0;
u32 TargetContent = 0;
@@ -106,16 +112,18 @@ void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
return;
}
down(&priv->rtllib->ips_sem);
- IPSLeave(dev);
+ rtl92e_ips_leave(dev);
up(&priv->rtllib->ips_sem);
}
}
priv->rtllib->is_set_key = true;
- if (EntryNo >= TOTAL_CAM_ENTRY)
+ if (EntryNo >= TOTAL_CAM_ENTRY) {
netdev_info(dev, "%s(): Invalid CAM entry\n", __func__);
+ return;
+ }
RT_TRACE(COMP_SEC,
- "====>to setKey(), dev:%p, EntryNo:%d, KeyIndex:%d,KeyType:%d, MacAddr %pM\n",
+ "====>to rtl92e_set_key(), dev:%p, EntryNo:%d, KeyIndex:%d,KeyType:%d, MacAddr %pM\n",
dev, EntryNo, KeyIndex, KeyType, MacAddr);
if (DefaultKey)
@@ -133,20 +141,20 @@ void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
(u32)(*(MacAddr+1)) << 24 |
(u32)usConfig;
- write_nic_dword(dev, WCAMI, TargetContent);
- write_nic_dword(dev, RWCAM, TargetCommand);
+ rtl92e_writel(dev, WCAMI, TargetContent);
+ rtl92e_writel(dev, RWCAM, TargetCommand);
} else if (i == 1) {
TargetContent = (u32)(*(MacAddr+2)) |
(u32)(*(MacAddr+3)) << 8 |
(u32)(*(MacAddr+4)) << 16 |
(u32)(*(MacAddr+5)) << 24;
- write_nic_dword(dev, WCAMI, TargetContent);
- write_nic_dword(dev, RWCAM, TargetCommand);
+ rtl92e_writel(dev, WCAMI, TargetContent);
+ rtl92e_writel(dev, RWCAM, TargetCommand);
} else {
if (KeyContent != NULL) {
- write_nic_dword(dev, WCAMI,
- (u32)(*(KeyContent+i-2)));
- write_nic_dword(dev, RWCAM, TargetCommand);
+ rtl92e_writel(dev, WCAMI,
+ (u32)(*(KeyContent+i-2)));
+ rtl92e_writel(dev, RWCAM, TargetCommand);
udelay(100);
}
}
@@ -154,7 +162,7 @@ void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
RT_TRACE(COMP_SEC, "=========>after set key, usconfig:%x\n", usConfig);
}
-void CamRestoreAllEntry(struct net_device *dev)
+void rtl92e_cam_restore(struct net_device *dev)
{
u8 EntryId = 0;
struct r8192_priv *priv = rtllib_priv(dev);
@@ -170,7 +178,7 @@ void CamRestoreAllEntry(struct net_device *dev)
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
- RT_TRACE(COMP_SEC, "CamRestoreAllEntry:\n");
+ RT_TRACE(COMP_SEC, "rtl92e_cam_restore:\n");
if ((priv->rtllib->pairwise_key_type == KEY_TYPE_WEP40) ||
@@ -179,36 +187,41 @@ void CamRestoreAllEntry(struct net_device *dev)
for (EntryId = 0; EntryId < 4; EntryId++) {
MacAddr = CAM_CONST_ADDR[EntryId];
if (priv->rtllib->swcamtable[EntryId].bused) {
- setKey(dev, EntryId, EntryId,
- priv->rtllib->pairwise_key_type, MacAddr,
- 0, (u32 *)(&priv->rtllib->swcamtable
- [EntryId].key_buf[0]));
+ rtl92e_set_key(dev, EntryId, EntryId,
+ priv->rtllib->pairwise_key_type,
+ MacAddr, 0,
+ (u32 *)(&priv->rtllib->swcamtable
+ [EntryId].key_buf[0]));
}
}
} else if (priv->rtllib->pairwise_key_type == KEY_TYPE_TKIP) {
if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
- setKey(dev, 4, 0, priv->rtllib->pairwise_key_type,
- (u8 *)dev->dev_addr, 0,
- (u32 *)(&priv->rtllib->swcamtable[4].key_buf[0]));
+ rtl92e_set_key(dev, 4, 0,
+ priv->rtllib->pairwise_key_type,
+ (u8 *)dev->dev_addr, 0,
+ (u32 *)(&priv->rtllib->swcamtable[4].
+ key_buf[0]));
} else {
- setKey(dev, 4, 0, priv->rtllib->pairwise_key_type,
- MacAddr, 0,
- (u32 *)(&priv->rtllib->swcamtable[4].key_buf[0]));
+ rtl92e_set_key(dev, 4, 0,
+ priv->rtllib->pairwise_key_type,
+ MacAddr, 0,
+ (u32 *)(&priv->rtllib->swcamtable[4].
+ key_buf[0]));
}
} else if (priv->rtllib->pairwise_key_type == KEY_TYPE_CCMP) {
if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
- setKey(dev, 4, 0,
- priv->rtllib->pairwise_key_type,
- (u8 *)dev->dev_addr, 0,
- (u32 *)(&priv->rtllib->swcamtable[4].
- key_buf[0]));
+ rtl92e_set_key(dev, 4, 0,
+ priv->rtllib->pairwise_key_type,
+ (u8 *)dev->dev_addr, 0,
+ (u32 *)(&priv->rtllib->swcamtable[4].
+ key_buf[0]));
} else {
- setKey(dev, 4, 0,
- priv->rtllib->pairwise_key_type, MacAddr,
- 0, (u32 *)(&priv->rtllib->swcamtable[4].
- key_buf[0]));
+ rtl92e_set_key(dev, 4, 0,
+ priv->rtllib->pairwise_key_type, MacAddr,
+ 0, (u32 *)(&priv->rtllib->swcamtable[4].
+ key_buf[0]));
}
}
@@ -216,20 +229,18 @@ void CamRestoreAllEntry(struct net_device *dev)
MacAddr = CAM_CONST_BROAD;
for (EntryId = 1; EntryId < 4; EntryId++) {
if (priv->rtllib->swcamtable[EntryId].bused) {
- setKey(dev, EntryId, EntryId,
- priv->rtllib->group_key_type,
- MacAddr, 0,
- (u32 *)(&priv->rtllib->swcamtable[EntryId].key_buf[0])
- );
+ rtl92e_set_key(dev, EntryId, EntryId,
+ priv->rtllib->group_key_type,
+ MacAddr, 0,
+ (u32 *)(&priv->rtllib->swcamtable[EntryId].key_buf[0]));
}
}
if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
if (priv->rtllib->swcamtable[0].bused) {
- setKey(dev, 0, 0,
- priv->rtllib->group_key_type,
- CAM_CONST_ADDR[0], 0,
- (u32 *)(&priv->rtllib->swcamtable[0].key_buf[0])
- );
+ rtl92e_set_key(dev, 0, 0,
+ priv->rtllib->group_key_type,
+ CAM_CONST_ADDR[0], 0,
+ (u32 *)(&priv->rtllib->swcamtable[0].key_buf[0]));
} else {
netdev_warn(dev,
"%s(): ADHOC TKIP: missing key entry.\n",
@@ -241,19 +252,19 @@ void CamRestoreAllEntry(struct net_device *dev)
MacAddr = CAM_CONST_BROAD;
for (EntryId = 1; EntryId < 4; EntryId++) {
if (priv->rtllib->swcamtable[EntryId].bused) {
- setKey(dev, EntryId, EntryId,
- priv->rtllib->group_key_type,
- MacAddr, 0,
- (u32 *)(&priv->rtllib->swcamtable[EntryId].key_buf[0]));
+ rtl92e_set_key(dev, EntryId, EntryId,
+ priv->rtllib->group_key_type,
+ MacAddr, 0,
+ (u32 *)(&priv->rtllib->swcamtable[EntryId].key_buf[0]));
}
}
if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
if (priv->rtllib->swcamtable[0].bused) {
- setKey(dev, 0, 0,
- priv->rtllib->group_key_type,
- CAM_CONST_ADDR[0], 0,
- (u32 *)(&priv->rtllib->swcamtable[0].key_buf[0]));
+ rtl92e_set_key(dev, 0, 0,
+ priv->rtllib->group_key_type,
+ CAM_CONST_ADDR[0], 0,
+ (u32 *)(&priv->rtllib->swcamtable[0].key_buf[0]));
} else {
netdev_warn(dev,
"%s(): ADHOC CCMP: missing key entry.\n",
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
index f23ab46c77e7..9ef8b36fc6b5 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
@@ -28,12 +28,14 @@
#include <linux/types.h>
struct net_device;
-void CamResetAllEntry(struct net_device *dev);
-void EnableHWSecurityConfig8192(struct net_device *dev);
-void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
- const u8 *MacAddr, u8 DefaultKey, u32 *KeyContent);
-void set_swcam(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
- const u8 *MacAddr, u8 DefaultKey, u32 *KeyContent, u8 is_mesh);
-void CamRestoreAllEntry(struct net_device *dev);
+void rtl92e_cam_reset(struct net_device *dev);
+void rtl92e_enable_hw_security_config(struct net_device *dev);
+void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
+ u16 KeyType, const u8 *MacAddr, u8 DefaultKey,
+ u32 *KeyContent);
+void rtl92e_set_swcam(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
+ u16 KeyType, const u8 *MacAddr, u8 DefaultKey,
+ u32 *KeyContent, u8 is_mesh);
+void rtl92e_cam_restore(struct net_device *dev);
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index c6cdb43b864c..d6b46dfd01e1 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -22,25 +22,6 @@
* Contact Information:
* wlanfae <wlanfae@realtek.com>
******************************************************************************/
-#undef RX_DONT_PASS_UL
-#undef DEBUG_EPROM
-#undef DEBUG_RX_VERBOSE
-#undef DUMMY_RX
-#undef DEBUG_ZERO_RX
-#undef DEBUG_RX_SKB
-#undef DEBUG_TX_FRAG
-#undef DEBUG_RX_FRAG
-#undef DEBUG_TX_FILLDESC
-#undef DEBUG_TX
-#undef DEBUG_IRQ
-#undef DEBUG_RX
-#undef DEBUG_RXALLOC
-#undef DEBUG_REGISTERS
-#undef DEBUG_RING
-#undef DEBUG_IRQ_TASKLET
-#undef DEBUG_TX_ALLOC
-#undef DEBUG_TX_DESC
-
#include <linux/uaccess.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
@@ -63,24 +44,24 @@ static char *ifname = "wlan%d";
static struct rtl819x_ops rtl819xp_ops = {
.nic_type = NIC_8192E,
- .get_eeprom_size = rtl8192_get_eeprom_size,
- .init_adapter_variable = rtl8192_InitializeVariables,
- .initialize_adapter = rtl8192_adapter_start,
- .link_change = rtl8192_link_change,
- .tx_fill_descriptor = rtl8192_tx_fill_desc,
- .tx_fill_cmd_descriptor = rtl8192_tx_fill_cmd_desc,
- .rx_query_status_descriptor = rtl8192_rx_query_status_desc,
+ .get_eeprom_size = rtl92e_get_eeprom_size,
+ .init_adapter_variable = rtl92e_init_variables,
+ .initialize_adapter = rtl92e_start_adapter,
+ .link_change = rtl92e_link_change,
+ .tx_fill_descriptor = rtl92e_fill_tx_desc,
+ .tx_fill_cmd_descriptor = rtl92e_fill_tx_cmd_desc,
+ .rx_query_status_descriptor = rtl92e_get_rx_stats,
.rx_command_packet_handler = NULL,
- .stop_adapter = rtl8192_halt_adapter,
- .update_ratr_table = rtl8192_update_ratr_table,
- .irq_enable = rtl8192_EnableInterrupt,
- .irq_disable = rtl8192_DisableInterrupt,
- .irq_clear = rtl8192_ClearInterrupt,
- .rx_enable = rtl8192_enable_rx,
- .tx_enable = rtl8192_enable_tx,
- .interrupt_recognized = rtl8192_interrupt_recognized,
- .TxCheckStuckHandler = rtl8192_HalTxCheckStuck,
- .RxCheckStuckHandler = rtl8192_HalRxCheckStuck,
+ .stop_adapter = rtl92e_stop_adapter,
+ .update_ratr_table = rtl92e_update_ratr_table,
+ .irq_enable = rtl92e_enable_irq,
+ .irq_disable = rtl92e_disable_irq,
+ .irq_clear = rtl92e_clear_irq,
+ .rx_enable = rtl92e_enable_rx,
+ .tx_enable = rtl92e_enable_tx,
+ .interrupt_recognized = rtl92e_ack_irq,
+ .TxCheckStuckHandler = rtl92e_is_tx_stuck,
+ .RxCheckStuckHandler = rtl92e_is_rx_stuck,
};
static struct pci_device_id rtl8192_pci_id_tbl[] = {
@@ -102,202 +83,61 @@ static struct pci_driver rtl8192_pci_driver = {
.id_table = rtl8192_pci_id_tbl, /* PCI_ID table */
.probe = rtl8192_pci_probe, /* probe fn */
.remove = rtl8192_pci_disconnect, /* remove fn */
- .suspend = rtl8192E_suspend, /* PM suspend fn */
- .resume = rtl8192E_resume, /* PM resume fn */
+ .suspend = rtl92e_suspend, /* PM suspend fn */
+ .resume = rtl92e_resume, /* PM resume fn */
};
+static short rtl8192_is_tx_queue_empty(struct net_device *dev);
+static void rtl819x_watchdog_wqcallback(void *data);
+static void watch_dog_timer_callback(unsigned long data);
+static void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
+ int rate);
+static int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void rtl8192_tx_cmd(struct net_device *dev, struct sk_buff *skb);
+static short rtl8192_tx(struct net_device *dev, struct sk_buff *skb);
+static short rtl8192_pci_initdescring(struct net_device *dev);
+static void rtl8192_irq_tx_tasklet(struct r8192_priv *priv);
+static void rtl8192_irq_rx_tasklet(struct r8192_priv *priv);
+static void rtl8192_cancel_deferred_work(struct r8192_priv *priv);
+static int _rtl8192_up(struct net_device *dev, bool is_silent_reset);
+static int rtl8192_up(struct net_device *dev);
+static int rtl8192_down(struct net_device *dev, bool shutdownrf);
+static void rtl8192_restart(void *data);
+
/****************************************************************************
-----------------------------IO STUFF-------------------------
*****************************************************************************/
-static bool PlatformIOCheckPageLegalAndGetRegMask(u32 u4bPage, u8 *pu1bPageMask)
-{
- bool bReturn = false;
-
- *pu1bPageMask = 0xfe;
-
- switch (u4bPage) {
- case 1: case 2: case 3: case 4:
- case 8: case 9: case 10: case 12: case 13:
- bReturn = true;
- *pu1bPageMask = 0xf0;
- break;
-
- default:
- bReturn = false;
- break;
- }
-
- return bReturn;
-}
-
-void write_nic_io_byte(struct net_device *dev, int x, u8 y)
-{
- u32 u4bPage = x >> 8;
- u8 u1PageMask = 0;
- bool bIsLegalPage = false;
-
- if (u4bPage == 0) {
- outb(y&0xff, dev->base_addr + x);
-
- } else {
- bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage,
- &u1PageMask);
- if (bIsLegalPage) {
- u8 u1bPsr = read_nic_io_byte(dev, PSR);
-
- write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) |
- (u8)u4bPage));
- write_nic_io_byte(dev, (x & 0xff), y);
- write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask));
- }
- }
-}
-
-void write_nic_io_word(struct net_device *dev, int x, u16 y)
-{
- u32 u4bPage = x >> 8;
- u8 u1PageMask = 0;
- bool bIsLegalPage = false;
-
- if (u4bPage == 0) {
- outw(y, dev->base_addr + x);
- } else {
- bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage,
- &u1PageMask);
- if (bIsLegalPage) {
- u8 u1bPsr = read_nic_io_byte(dev, PSR);
-
- write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) |
- (u8)u4bPage));
- write_nic_io_word(dev, (x & 0xff), y);
- write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask));
-
- }
- }
-}
-
-void write_nic_io_dword(struct net_device *dev, int x, u32 y)
-{
- u32 u4bPage = x >> 8;
- u8 u1PageMask = 0;
- bool bIsLegalPage = false;
-
- if (u4bPage == 0) {
- outl(y, dev->base_addr + x);
- } else {
- bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage,
- &u1PageMask);
- if (bIsLegalPage) {
- u8 u1bPsr = read_nic_io_byte(dev, PSR);
-
- write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) |
- (u8)u4bPage));
- write_nic_io_dword(dev, (x & 0xff), y);
- write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask));
- }
- }
-}
-
-u8 read_nic_io_byte(struct net_device *dev, int x)
-{
- u32 u4bPage = x >> 8;
- u8 u1PageMask = 0;
- bool bIsLegalPage = false;
- u8 Data = 0;
-
- if (u4bPage == 0)
- return 0xff&inb(dev->base_addr + x);
-
- bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage,
- &u1PageMask);
- if (bIsLegalPage) {
- u8 u1bPsr = read_nic_io_byte(dev, PSR);
-
- write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) |
- (u8)u4bPage));
- Data = read_nic_io_byte(dev, (x & 0xff));
- write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask));
- }
-
- return Data;
-}
-
-u16 read_nic_io_word(struct net_device *dev, int x)
-{
- u32 u4bPage = x >> 8;
- u8 u1PageMask = 0;
- bool bIsLegalPage = false;
- u16 Data = 0;
-
- if (u4bPage == 0)
- return inw(dev->base_addr + x);
- bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage,
- &u1PageMask);
- if (bIsLegalPage) {
- u8 u1bPsr = read_nic_io_byte(dev, PSR);
-
- write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) |
- (u8)u4bPage));
- Data = read_nic_io_word(dev, (x & 0xff));
- write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask));
- }
-
- return Data;
-}
-u32 read_nic_io_dword(struct net_device *dev, int x)
-{
- u32 u4bPage = x >> 8;
- u8 u1PageMask = 0;
- bool bIsLegalPage = false;
- u32 Data = 0;
-
- if (u4bPage == 0)
- return inl(dev->base_addr + x);
- bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage,
- &u1PageMask);
- if (bIsLegalPage) {
- u8 u1bPsr = read_nic_io_byte(dev, PSR);
-
- write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) |
- (u8)u4bPage));
- Data = read_nic_io_dword(dev, (x & 0xff));
- write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask));
- }
-
- return Data;
-}
-
-u8 read_nic_byte(struct net_device *dev, int x)
+u8 rtl92e_readb(struct net_device *dev, int x)
{
return 0xff & readb((u8 __iomem *)dev->mem_start + x);
}
-u32 read_nic_dword(struct net_device *dev, int x)
+u32 rtl92e_readl(struct net_device *dev, int x)
{
return readl((u8 __iomem *)dev->mem_start + x);
}
-u16 read_nic_word(struct net_device *dev, int x)
+u16 rtl92e_readw(struct net_device *dev, int x)
{
return readw((u8 __iomem *)dev->mem_start + x);
}
-void write_nic_byte(struct net_device *dev, int x, u8 y)
+void rtl92e_writeb(struct net_device *dev, int x, u8 y)
{
writeb(y, (u8 __iomem *)dev->mem_start + x);
udelay(20);
}
-void write_nic_dword(struct net_device *dev, int x, u32 y)
+void rtl92e_writel(struct net_device *dev, int x, u32 y)
{
writel(y, (u8 __iomem *)dev->mem_start + x);
udelay(20);
}
-void write_nic_word(struct net_device *dev, int x, u16 y)
+void rtl92e_writew(struct net_device *dev, int x, u16 y)
{
writew(y, (u8 __iomem *)dev->mem_start + x);
@@ -307,10 +147,9 @@ void write_nic_word(struct net_device *dev, int x, u16 y)
/****************************************************************************
-----------------------------GENERAL FUNCTION-------------------------
*****************************************************************************/
-bool MgntActSet_RF_State(struct net_device *dev,
+bool rtl92e_set_rf_state(struct net_device *dev,
enum rt_rf_power_state StateToSet,
- RT_RF_CHANGE_SOURCE ChangeSource,
- bool ProtectOrNot)
+ RT_RF_CHANGE_SOURCE ChangeSource)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
@@ -321,39 +160,34 @@ bool MgntActSet_RF_State(struct net_device *dev,
unsigned long flag;
RT_TRACE((COMP_PS | COMP_RF),
- "===>MgntActSet_RF_State(): StateToSet(%d)\n", StateToSet);
-
- ProtectOrNot = false;
+ "===>rtl92e_set_rf_state(): StateToSet(%d)\n", StateToSet);
+ while (true) {
+ spin_lock_irqsave(&priv->rf_ps_lock, flag);
+ if (priv->RFChangeInProgress) {
+ spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
+ RT_TRACE((COMP_PS | COMP_RF),
+ "rtl92e_set_rf_state(): RF Change in progress! Wait to set..StateToSet(%d).\n",
+ StateToSet);
- if (!ProtectOrNot) {
- while (true) {
- spin_lock_irqsave(&priv->rf_ps_lock, flag);
- if (priv->RFChangeInProgress) {
- spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
+ while (priv->RFChangeInProgress) {
+ RFWaitCounter++;
RT_TRACE((COMP_PS | COMP_RF),
- "MgntActSet_RF_State(): RF Change in progress! Wait to set..StateToSet(%d).\n",
- StateToSet);
-
- while (priv->RFChangeInProgress) {
- RFWaitCounter++;
- RT_TRACE((COMP_PS | COMP_RF),
- "MgntActSet_RF_State(): Wait 1 ms (%d times)...\n",
- RFWaitCounter);
- mdelay(1);
-
- if (RFWaitCounter > 100) {
- netdev_warn(dev,
- "%s(): Timeout waiting for RF change.\n",
- __func__);
- return false;
- }
+ "rtl92e_set_rf_state(): Wait 1 ms (%d times)...\n",
+ RFWaitCounter);
+ mdelay(1);
+
+ if (RFWaitCounter > 100) {
+ netdev_warn(dev,
+ "%s(): Timeout waiting for RF change.\n",
+ __func__);
+ return false;
}
- } else {
- priv->RFChangeInProgress = true;
- spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
- break;
}
+ } else {
+ priv->RFChangeInProgress = true;
+ spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
+ break;
}
}
@@ -376,7 +210,7 @@ bool MgntActSet_RF_State(struct net_device *dev,
bConnectBySSID = true;
} else {
RT_TRACE((COMP_PS | COMP_RF),
- "MgntActSet_RF_State - eRfon reject pMgntInfo->RfOffReason= 0x%x, ChangeSource=0x%X\n",
+ "rtl92e_set_rf_state - eRfon reject pMgntInfo->RfOffReason= 0x%x, ChangeSource=0x%X\n",
priv->rtllib->RfOffReason, ChangeSource);
}
@@ -413,7 +247,7 @@ bool MgntActSet_RF_State(struct net_device *dev,
if (bActionAllowed) {
RT_TRACE((COMP_PS | COMP_RF),
- "MgntActSet_RF_State(): Action is allowed.... StateToSet(%d), RfOffReason(%#X)\n",
+ "rtl92e_set_rf_state(): Action is allowed.... StateToSet(%d), RfOffReason(%#X)\n",
StateToSet, priv->rtllib->RfOffReason);
PHY_SetRFPowerState(dev, StateToSet);
if (StateToSet == eRfOn) {
@@ -426,17 +260,15 @@ bool MgntActSet_RF_State(struct net_device *dev,
}
} else {
RT_TRACE((COMP_PS | COMP_RF),
- "MgntActSet_RF_State(): Action is rejected.... StateToSet(%d), ChangeSource(%#X), RfOffReason(%#X)\n",
+ "rtl92e_set_rf_state(): Action is rejected.... StateToSet(%d), ChangeSource(%#X), RfOffReason(%#X)\n",
StateToSet, ChangeSource, priv->rtllib->RfOffReason);
}
- if (!ProtectOrNot) {
- spin_lock_irqsave(&priv->rf_ps_lock, flag);
- priv->RFChangeInProgress = false;
- spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
- }
+ spin_lock_irqsave(&priv->rf_ps_lock, flag);
+ priv->RFChangeInProgress = false;
+ spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
- RT_TRACE((COMP_PS | COMP_RF), "<===MgntActSet_RF_State()\n");
+ RT_TRACE((COMP_PS | COMP_RF), "<===rtl92e_set_rf_state()\n");
return bActionAllowed;
}
@@ -450,7 +282,7 @@ static short rtl8192_check_nic_enough_desc(struct net_device *dev, int prio)
return 0;
}
-void rtl8192_tx_timeout(struct net_device *dev)
+static void rtl8192_tx_timeout(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -458,7 +290,7 @@ void rtl8192_tx_timeout(struct net_device *dev)
netdev_info(dev, "TXTIMEOUT");
}
-void rtl8192_irq_enable(struct net_device *dev)
+void rtl92e_irq_enable(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
@@ -467,7 +299,7 @@ void rtl8192_irq_enable(struct net_device *dev)
priv->ops->irq_enable(dev);
}
-void rtl8192_irq_disable(struct net_device *dev)
+void rtl92e_irq_disable(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
@@ -476,7 +308,7 @@ void rtl8192_irq_disable(struct net_device *dev)
priv->irq_enabled = 0;
}
-void rtl8192_set_chan(struct net_device *dev, short ch)
+static void rtl8192_set_chan(struct net_device *dev, short ch)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
@@ -490,7 +322,7 @@ void rtl8192_set_chan(struct net_device *dev, short ch)
priv->rf_set_chan(dev, priv->chan);
}
-void rtl8192_update_cap(struct net_device *dev, u16 cap)
+static void rtl8192_update_cap(struct net_device *dev, u16 cap)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_network *net = &priv->rtllib->current_network;
@@ -683,7 +515,7 @@ static int rtl8192_qos_association_resp(struct r8192_priv *priv,
RT_TRACE(COMP_QOS, "%s: network->flags = %d,%d\n", __func__,
network->flags, priv->rtllib->current_network.qos_data.active);
if (set_qos_param == 1) {
- dm_init_edca_turbo(priv->rtllib->dev);
+ rtl92e_dm_init_edca_turbo(priv->rtllib->dev);
queue_work_rsl(priv->priv_wq, &priv->qos_activate);
}
return 0;
@@ -733,7 +565,7 @@ static void rtl8192_stop_beacon(struct net_device *dev)
{
}
-void rtl8192_config_rate(struct net_device *dev, u16 *rate_config)
+void rtl92e_config_rate(struct net_device *dev, u16 *rate_config)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_network *net;
@@ -864,7 +696,7 @@ static u8 rtl8192_getSupportedWireleeMode(struct net_device *dev)
return ret;
}
-void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
+void rtl92e_set_wireless_mode(struct net_device *dev, u8 wireless_mode)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 bSupportMode = rtl8192_getSupportedWireleeMode(dev);
@@ -895,9 +727,6 @@ void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
priv->rtllib->mode = wireless_mode;
- ActUpdateChannelAccessSetting(dev, wireless_mode,
- &priv->ChannelAccessSetting);
-
if ((wireless_mode == WIRELESS_MODE_N_24G) ||
(wireless_mode == WIRELESS_MODE_N_5G)) {
priv->rtllib->pHTInfo->bEnableHT = 1;
@@ -941,7 +770,7 @@ static int _rtl8192_sta_up(struct net_device *dev, bool is_silent_reset)
priv->bfirst_init = false;
if (priv->polling_timer_on == 0)
- check_rfctrl_gpio_timer((unsigned long)dev);
+ rtl92e_check_rfctrl_gpio_timer((unsigned long)dev);
if (priv->rtllib->state != RTLLIB_LINKED)
rtllib_softmac_start_protocol(priv->rtllib, 0);
@@ -969,7 +798,7 @@ static int rtl8192_sta_down(struct net_device *dev, bool shutdownrf)
priv->rtllib->rtllib_ips_leave(dev);
if (priv->rtllib->state == RTLLIB_LINKED)
- LeisurePSLeave(dev);
+ rtl92e_leisure_ps_leave(dev);
priv->bDriverIsGoingToUnload = true;
priv->up = 0;
@@ -982,9 +811,9 @@ static int rtl8192_sta_down(struct net_device *dev, bool shutdownrf)
priv->rtllib->wpa_ie_len = 0;
kfree(priv->rtllib->wpa_ie);
priv->rtllib->wpa_ie = NULL;
- CamResetAllEntry(dev);
+ rtl92e_cam_reset(dev);
memset(priv->rtllib->swcamtable, 0, sizeof(struct sw_cam_table) * 32);
- rtl8192_irq_disable(dev);
+ rtl92e_irq_disable(dev);
del_timer_sync(&priv->watch_dog_timer);
rtl8192_cancel_deferred_work(priv);
@@ -1027,38 +856,36 @@ static void rtl8192_init_priv_handler(struct net_device *dev)
priv->rtllib->set_chan = rtl8192_set_chan;
priv->rtllib->link_change = priv->ops->link_change;
priv->rtllib->softmac_data_hard_start_xmit = rtl8192_hard_data_xmit;
- priv->rtllib->data_hard_stop = rtl8192_data_hard_stop;
- priv->rtllib->data_hard_resume = rtl8192_data_hard_resume;
priv->rtllib->check_nic_enough_desc = rtl8192_check_nic_enough_desc;
priv->rtllib->handle_assoc_response = rtl8192_handle_assoc_response;
priv->rtllib->handle_beacon = rtl8192_handle_beacon;
- priv->rtllib->SetWirelessMode = rtl8192_SetWirelessMode;
- priv->rtllib->LeisurePSLeave = LeisurePSLeave;
- priv->rtllib->SetBWModeHandler = rtl8192_SetBWMode;
- priv->rf_set_chan = rtl8192_phy_SwChnl;
+ priv->rtllib->SetWirelessMode = rtl92e_set_wireless_mode;
+ priv->rtllib->LeisurePSLeave = rtl92e_leisure_ps_leave;
+ priv->rtllib->SetBWModeHandler = rtl92e_set_bw_mode;
+ priv->rf_set_chan = rtl92e_set_channel;
- priv->rtllib->start_send_beacons = rtl8192e_start_beacon;
+ priv->rtllib->start_send_beacons = rtl92e_start_beacon;
priv->rtllib->stop_send_beacons = rtl8192_stop_beacon;
- priv->rtllib->sta_wake_up = rtl8192_hw_wakeup;
- priv->rtllib->enter_sleep_state = rtl8192_hw_to_sleep;
+ priv->rtllib->sta_wake_up = rtl92e_hw_wakeup;
+ priv->rtllib->enter_sleep_state = rtl92e_enter_sleep;
priv->rtllib->ps_is_queue_empty = rtl8192_is_tx_queue_empty;
- priv->rtllib->GetNmodeSupportBySecCfg = rtl8192_GetNmodeSupportBySecCfg;
+ priv->rtllib->GetNmodeSupportBySecCfg = rtl92e_get_nmode_support_by_sec;
priv->rtllib->GetHalfNmodeSupportByAPsHandler =
- rtl8192_GetHalfNmodeSupportByAPs;
+ rtl92e_is_halfn_supported_by_ap;
- priv->rtllib->SetHwRegHandler = rtl8192e_SetHwReg;
- priv->rtllib->AllowAllDestAddrHandler = rtl8192_AllowAllDestAddr;
+ priv->rtllib->SetHwRegHandler = rtl92e_set_reg;
+ priv->rtllib->AllowAllDestAddrHandler = rtl92e_set_monitor_mode;
priv->rtllib->SetFwCmdHandler = NULL;
- priv->rtllib->InitialGainHandler = InitialGain819xPci;
- priv->rtllib->rtllib_ips_leave_wq = rtllib_ips_leave_wq;
- priv->rtllib->rtllib_ips_leave = rtllib_ips_leave;
+ priv->rtllib->InitialGainHandler = rtl92e_init_gain;
+ priv->rtllib->rtllib_ips_leave_wq = rtl92e_rtllib_ips_leave_wq;
+ priv->rtllib->rtllib_ips_leave = rtl92e_rtllib_ips_leave;
priv->rtllib->LedControlHandler = NULL;
priv->rtllib->UpdateBeaconInterruptHandler = NULL;
- priv->rtllib->ScanOperationBackupHandler = PHY_ScanOperationBackup8192;
+ priv->rtllib->ScanOperationBackupHandler = rtl92e_scan_op_backup;
}
static void rtl8192_init_priv_constant(struct net_device *dev)
@@ -1181,20 +1008,21 @@ static void rtl8192_init_priv_task(struct net_device *dev)
priv->priv_wq = create_workqueue(DRV_NAME);
INIT_WORK_RSL(&priv->reset_wq, (void *)rtl8192_restart, dev);
- INIT_WORK_RSL(&priv->rtllib->ips_leave_wq, (void *)IPSLeave_wq, dev);
+ INIT_WORK_RSL(&priv->rtllib->ips_leave_wq, (void *)rtl92e_ips_leave_wq,
+ dev);
INIT_DELAYED_WORK_RSL(&priv->watch_dog_wq,
(void *)rtl819x_watchdog_wqcallback, dev);
INIT_DELAYED_WORK_RSL(&priv->txpower_tracking_wq,
- (void *)dm_txpower_trackingcallback, dev);
+ (void *)rtl92e_dm_txpower_tracking_wq, dev);
INIT_DELAYED_WORK_RSL(&priv->rfpath_check_wq,
- (void *)dm_rf_pathcheck_workitemcallback, dev);
+ (void *)rtl92e_dm_rf_pathcheck_wq, dev);
INIT_DELAYED_WORK_RSL(&priv->update_beacon_wq,
(void *)rtl8192_update_beacon, dev);
INIT_WORK_RSL(&priv->qos_activate, (void *)rtl8192_qos_activate, dev);
INIT_DELAYED_WORK_RSL(&priv->rtllib->hw_wakeup_wq,
- (void *) rtl8192_hw_wakeup_wq, dev);
+ (void *) rtl92e_hw_wakeup_wq, dev);
INIT_DELAYED_WORK_RSL(&priv->rtllib->hw_sleep_wq,
- (void *) rtl8192_hw_sleep_wq, dev);
+ (void *) rtl92e_hw_sleep_wq, dev);
tasklet_init(&priv->irq_rx_tasklet,
(void(*)(unsigned long))rtl8192_irq_rx_tasklet,
(unsigned long)priv);
@@ -1250,17 +1078,17 @@ static short rtl8192_init(struct net_device *dev)
priv->ops->init_adapter_variable(dev);
rtl8192_get_channel_map(dev);
- init_hal_dm(dev);
+ rtl92e_dm_init(dev);
setup_timer(&priv->watch_dog_timer,
watch_dog_timer_callback,
(unsigned long) dev);
setup_timer(&priv->gpio_polling_timer,
- check_rfctrl_gpio_timer,
+ rtl92e_check_rfctrl_gpio_timer,
(unsigned long)dev);
- rtl8192_irq_disable(dev);
+ rtl92e_irq_disable(dev);
if (request_irq(dev->irq, rtl8192_interrupt, IRQF_SHARED,
dev->name, dev)) {
netdev_err(dev, "Error allocating IRQ %d", dev->irq);
@@ -1282,7 +1110,7 @@ static short rtl8192_init(struct net_device *dev)
/***************************************************************************
-------------------------------WATCHDOG STUFF---------------------------
***************************************************************************/
-short rtl8192_is_tx_queue_empty(struct net_device *dev)
+static short rtl8192_is_tx_queue_empty(struct net_device *dev)
{
int i = 0;
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1439,7 +1267,7 @@ RESET_START:
down(&priv->wx_sem);
if (priv->rtllib->state == RTLLIB_LINKED)
- LeisurePSLeave(dev);
+ rtl92e_leisure_ps_leave(dev);
if (priv->up) {
netdev_info(dev, "%s():the driver is not up.\n",
@@ -1459,10 +1287,10 @@ RESET_START:
if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
- rtl8192_irq_disable(dev);
+ rtl92e_irq_disable(dev);
del_timer_sync(&priv->watch_dog_timer);
rtl8192_cancel_deferred_work(priv);
- deinit_hal_dm(dev);
+ rtl92e_dm_deinit(dev);
rtllib_stop_scan_syncro(ieee);
if (ieee->state == RTLLIB_LINKED) {
@@ -1479,7 +1307,7 @@ RESET_START:
rtllib_softmac_stop_protocol(priv->rtllib, 0, true);
}
- dm_backup_dynamic_mechanism_state(dev);
+ rtl92e_dm_backup_state(dev);
up(&priv->wx_sem);
RT_TRACE(COMP_RESET,
@@ -1508,7 +1336,7 @@ RESET_START:
priv->RFChangeInProgress = false;
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
- EnableHWSecurityConfig8192(dev);
+ rtl92e_enable_hw_security_config(dev);
if (ieee->state == RTLLIB_LINKED && ieee->iw_mode ==
IW_MODE_INFRA) {
@@ -1527,15 +1355,13 @@ RESET_START:
rtllib_start_send_beacons(ieee);
- if (ieee->data_hard_resume)
- ieee->data_hard_resume(ieee->dev);
netif_carrier_on(ieee->dev);
} else if (ieee->iw_mode == IW_MODE_MESH) {
rtl819x_silentreset_mesh_bk(dev, IsPortal);
}
- CamRestoreAllEntry(dev);
- dm_restore_dynamic_mechanism_state(dev);
+ rtl92e_cam_restore(dev);
+ rtl92e_dm_restore_state(dev);
END:
priv->ResetProgress = RESET_TYPE_NORESET;
priv->reset_count++;
@@ -1543,7 +1369,7 @@ END:
priv->bForcedSilentReset = false;
priv->bResetInProgress = false;
- write_nic_byte(dev, UFWP, 1);
+ rtl92e_writeb(dev, UFWP, 1);
RT_TRACE(COMP_RESET, "Reset finished!! ====>[%d]\n",
priv->reset_count);
}
@@ -1570,8 +1396,7 @@ static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
}
}
-
-void rtl819x_watchdog_wqcallback(void *data)
+static void rtl819x_watchdog_wqcallback(void *data)
{
struct r8192_priv *priv = container_of_dwork_rsl(data,
struct r8192_priv, watch_dog_wq);
@@ -1597,7 +1422,7 @@ void rtl819x_watchdog_wqcallback(void *data)
priv->rtllib->CntAfterLink = 0;
}
- hal_dm_watchdog(dev);
+ rtl92e_dm_watchdog(dev);
if (rtllib_act_scanning(priv->rtllib, false) == false) {
if ((ieee->iw_mode == IW_MODE_INFRA) && (ieee->state ==
@@ -1608,8 +1433,8 @@ void rtl819x_watchdog_wqcallback(void *data)
IPS_CALLBACK_NONE) &&
(!ieee->bNetPromiscuousMode)) {
RT_TRACE(COMP_PS,
- "====================>haha: IPSEnter()\n");
- IPSEnter(dev);
+ "====================>haha: rtl92e_ips_enter()\n");
+ rtl92e_ips_enter(dev);
}
}
}
@@ -1640,13 +1465,13 @@ void rtl819x_watchdog_wqcallback(void *data)
bEnterPS = false;
if (bEnterPS)
- LeisurePSEnter(dev);
+ rtl92e_leisure_ps_enter(dev);
else
- LeisurePSLeave(dev);
+ rtl92e_leisure_ps_leave(dev);
} else {
RT_TRACE(COMP_LPS, "====>no link LPS leave\n");
- LeisurePSLeave(dev);
+ rtl92e_leisure_ps_leave(dev);
}
ieee->LinkDetectInfo.NumRxOkInPeriod = 0;
@@ -1725,7 +1550,7 @@ void rtl819x_watchdog_wqcallback(void *data)
RT_TRACE(COMP_TRACE, " <==RtUsbCheckForHangWorkItemCallback()\n");
}
-void watch_dog_timer_callback(unsigned long data)
+static void watch_dog_timer_callback(unsigned long data)
{
struct r8192_priv *priv = rtllib_priv((struct net_device *)data);
@@ -1737,14 +1562,14 @@ void watch_dog_timer_callback(unsigned long data)
/****************************************************************************
---------------------------- NIC TX/RX STUFF---------------------------
*****************************************************************************/
-void rtl8192_rx_enable(struct net_device *dev)
+void rtl92e_rx_enable(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
priv->ops->rx_enable(dev);
}
-void rtl8192_tx_enable(struct net_device *dev)
+void rtl92e_tx_enable(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
@@ -1802,16 +1627,7 @@ static void rtl8192_free_tx_ring(struct net_device *dev, unsigned int prio)
ring->desc = NULL;
}
-void rtl8192_data_hard_stop(struct net_device *dev)
-{
-}
-
-
-void rtl8192_data_hard_resume(struct net_device *dev)
-{
-}
-
-void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
+static void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
int rate)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
@@ -1826,8 +1642,8 @@ void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
return;
}
- if (queue_index != TXCMD_QUEUE)
- netdev_warn(dev, "%s(): queue index != TXCMD_QUEUE\n",
+ if (queue_index == TXCMD_QUEUE)
+ netdev_warn(dev, "%s(): queue index == TXCMD_QUEUE\n",
__func__);
memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
@@ -1843,7 +1659,7 @@ void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
}
}
-int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
int ret;
@@ -1902,7 +1718,7 @@ static void rtl8192_tx_isr(struct net_device *dev, int prio)
tasklet_schedule(&priv->irq_tx_tasklet);
}
-void rtl8192_tx_cmd(struct net_device *dev, struct sk_buff *skb)
+static void rtl8192_tx_cmd(struct net_device *dev, struct sk_buff *skb)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtl8192_tx_ring *ring;
@@ -1925,7 +1741,7 @@ void rtl8192_tx_cmd(struct net_device *dev, struct sk_buff *skb)
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
}
-short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
+static short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtl8192_tx_ring *ring;
@@ -1997,7 +1813,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
dev->trans_start = jiffies;
- write_nic_word(dev, TPPoll, 0x01 << tcb_desc->queue_index);
+ rtl92e_writew(dev, TPPoll, 0x01 << tcb_desc->queue_index);
return 0;
}
@@ -2077,8 +1893,7 @@ static int rtl8192_alloc_tx_desc_ring(struct net_device *dev,
return 0;
}
-
-short rtl8192_pci_initdescring(struct net_device *dev)
+static short rtl8192_pci_initdescring(struct net_device *dev)
{
u32 ret;
int i;
@@ -2104,7 +1919,7 @@ err_free_rings:
return 1;
}
-void rtl8192_pci_resetdescring(struct net_device *dev)
+void rtl92e_reset_desc_ring(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
int i, rx_queue_idx;
@@ -2144,8 +1959,8 @@ void rtl8192_pci_resetdescring(struct net_device *dev)
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
}
-void rtl819x_UpdateRxPktTimeStamp(struct net_device *dev,
- struct rtllib_rx_stats *stats)
+void rtl92e_update_rx_pkt_timestamp(struct net_device *dev,
+ struct rtllib_rx_stats *stats)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
@@ -2155,7 +1970,7 @@ void rtl819x_UpdateRxPktTimeStamp(struct net_device *dev,
priv->LastRxDescTSF = stats->mac_time;
}
-long rtl819x_translate_todbm(struct r8192_priv *priv, u8 signal_strength_index)
+long rtl92e_translate_to_dbm(struct r8192_priv *priv, u8 signal_strength_index)
{
long signal_power;
@@ -2166,11 +1981,8 @@ long rtl819x_translate_todbm(struct r8192_priv *priv, u8 signal_strength_index)
}
-void
-rtl819x_update_rxsignalstatistics8190pci(
- struct r8192_priv *priv,
- struct rtllib_rx_stats *pprevious_stats
- )
+void rtl92e_update_rx_statistics(struct r8192_priv *priv,
+ struct rtllib_rx_stats *pprevious_stats)
{
int weighting = 0;
@@ -2189,13 +2001,7 @@ rtl819x_update_rxsignalstatistics8190pci(
weighting) / 6;
}
-void rtl819x_process_cck_rxpathsel(struct r8192_priv *priv,
- struct rtllib_rx_stats *pprevious_stats)
-{
-}
-
-
-u8 rtl819x_query_rxpwrpercentage(char antpower)
+u8 rtl92e_rx_db_to_percent(char antpower)
{
if ((antpower <= -100) || (antpower >= 20))
return 0;
@@ -2206,10 +2012,7 @@ u8 rtl819x_query_rxpwrpercentage(char antpower)
} /* QueryRxPwrPercentage */
-u8
-rtl819x_evm_dbtopercentage(
- char value
- )
+u8 rtl92e_evm_db_to_percent(char value)
{
char ret_val;
@@ -2226,11 +2029,8 @@ rtl819x_evm_dbtopercentage(
return ret_val;
}
-void
-rtl8192_record_rxdesc_forlateruse(
- struct rtllib_rx_stats *psrc_stats,
- struct rtllib_rx_stats *ptarget_stats
-)
+void rtl92e_copy_mpdu_stats(struct rtllib_rx_stats *psrc_stats,
+ struct rtllib_rx_stats *ptarget_stats)
{
ptarget_stats->bIsAMPDU = psrc_stats->bIsAMPDU;
ptarget_stats->bFirstMPDU = psrc_stats->bFirstMPDU;
@@ -2344,11 +2144,6 @@ done:
}
-static void rtl8192_rx_cmd(struct net_device *dev)
-{
-}
-
-
static void rtl8192_tx_resume(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
@@ -2366,26 +2161,23 @@ static void rtl8192_tx_resume(struct net_device *dev)
}
}
-void rtl8192_irq_tx_tasklet(struct r8192_priv *priv)
+static void rtl8192_irq_tx_tasklet(struct r8192_priv *priv)
{
rtl8192_tx_resume(priv->rtllib->dev);
}
-void rtl8192_irq_rx_tasklet(struct r8192_priv *priv)
+static void rtl8192_irq_rx_tasklet(struct r8192_priv *priv)
{
rtl8192_rx_normal(priv->rtllib->dev);
- if (MAX_RX_QUEUE > 1)
- rtl8192_rx_cmd(priv->rtllib->dev);
-
- write_nic_dword(priv->rtllib->dev, INTA_MASK,
- read_nic_dword(priv->rtllib->dev, INTA_MASK) | IMR_RDU);
+ rtl92e_writel(priv->rtllib->dev, INTA_MASK,
+ rtl92e_readl(priv->rtllib->dev, INTA_MASK) | IMR_RDU);
}
/****************************************************************************
---------------------------- NIC START/CLOSE STUFF---------------------------
*****************************************************************************/
-void rtl8192_cancel_deferred_work(struct r8192_priv *priv)
+static void rtl8192_cancel_deferred_work(struct r8192_priv *priv)
{
cancel_delayed_work(&priv->watch_dog_wq);
cancel_delayed_work(&priv->update_beacon_wq);
@@ -2394,14 +2186,13 @@ void rtl8192_cancel_deferred_work(struct r8192_priv *priv)
cancel_work_sync(&priv->qos_activate);
}
-int _rtl8192_up(struct net_device *dev, bool is_silent_reset)
+static int _rtl8192_up(struct net_device *dev, bool is_silent_reset)
{
if (_rtl8192_sta_up(dev, is_silent_reset) == -1)
return -1;
return 0;
}
-
static int rtl8192_open(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -2414,8 +2205,7 @@ static int rtl8192_open(struct net_device *dev)
}
-
-int rtl8192_up(struct net_device *dev)
+static int rtl8192_up(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -2445,7 +2235,7 @@ static int rtl8192_close(struct net_device *dev)
}
-int rtl8192_down(struct net_device *dev, bool shutdownrf)
+static int rtl8192_down(struct net_device *dev, bool shutdownrf)
{
if (rtl8192_sta_down(dev, shutdownrf) == -1)
return -1;
@@ -2453,19 +2243,19 @@ int rtl8192_down(struct net_device *dev, bool shutdownrf)
return 0;
}
-void rtl8192_commit(struct net_device *dev)
+void rtl92e_commit(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->up == 0)
return;
rtllib_softmac_stop_protocol(priv->rtllib, 0, true);
- rtl8192_irq_disable(dev);
+ rtl92e_irq_disable(dev);
priv->ops->stop_adapter(dev, true);
_rtl8192_up(dev, false);
}
-void rtl8192_restart(void *data)
+static void rtl8192_restart(void *data)
{
struct r8192_priv *priv = container_of_work_rsl(data, struct r8192_priv,
reset_wq);
@@ -2473,7 +2263,7 @@ void rtl8192_restart(void *data)
down(&priv->wx_sem);
- rtl8192_commit(dev);
+ rtl92e_commit(dev);
up(&priv->wx_sem);
}
@@ -2552,30 +2342,34 @@ static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (is_zero_ether_addr(ieee->ap_mac_addr))
ieee->iw_mode = IW_MODE_ADHOC;
memcpy((u8 *)key, ipw->u.crypt.key, 16);
- EnableHWSecurityConfig8192(dev);
- set_swcam(dev, 4, ipw->u.crypt.idx,
- ieee->pairwise_key_type,
- (u8 *)ieee->ap_mac_addr,
- 0, key, 0);
- setKey(dev, 4, ipw->u.crypt.idx,
- ieee->pairwise_key_type,
- (u8 *)ieee->ap_mac_addr, 0, key);
- if (ieee->iw_mode == IW_MODE_ADHOC) {
- set_swcam(dev, ipw->u.crypt.idx,
- ipw->u.crypt.idx,
- ieee->pairwise_key_type,
- (u8 *)ieee->ap_mac_addr,
- 0, key, 0);
- setKey(dev, ipw->u.crypt.idx,
- ipw->u.crypt.idx,
+ rtl92e_enable_hw_security_config(dev);
+ rtl92e_set_swcam(dev, 4,
+ ipw->u.crypt.idx,
+ ieee->pairwise_key_type,
+ (u8 *)ieee->ap_mac_addr,
+ 0, key, 0);
+ rtl92e_set_key(dev, 4, ipw->u.crypt.idx,
ieee->pairwise_key_type,
(u8 *)ieee->ap_mac_addr,
0, key);
+ if (ieee->iw_mode == IW_MODE_ADHOC) {
+ rtl92e_set_swcam(dev,
+ ipw->u.crypt.idx,
+ ipw->u.crypt.idx,
+ ieee->pairwise_key_type,
+ (u8 *)ieee->ap_mac_addr,
+ 0, key, 0);
+ rtl92e_set_key(dev,
+ ipw->u.crypt.idx,
+ ipw->u.crypt.idx,
+ ieee->pairwise_key_type,
+ (u8 *)ieee->ap_mac_addr,
+ 0, key);
}
}
if ((ieee->pairwise_key_type == KEY_TYPE_CCMP)
&& ieee->pHTInfo->bCurrentHTSupport) {
- write_nic_byte(dev, 0x173, 1);
+ rtl92e_writeb(dev, 0x173, 1);
}
} else {
@@ -2595,14 +2389,15 @@ static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
ieee->group_key_type = KEY_TYPE_NA;
if (ieee->group_key_type) {
- set_swcam(dev, ipw->u.crypt.idx,
- ipw->u.crypt.idx,
- ieee->group_key_type,
- broadcast_addr, 0, key, 0);
- setKey(dev, ipw->u.crypt.idx,
- ipw->u.crypt.idx,
- ieee->group_key_type,
- broadcast_addr, 0, key);
+ rtl92e_set_swcam(dev, ipw->u.crypt.idx,
+ ipw->u.crypt.idx,
+ ieee->group_key_type,
+ broadcast_addr, 0, key,
+ 0);
+ rtl92e_set_key(dev, ipw->u.crypt.idx,
+ ipw->u.crypt.idx,
+ ieee->group_key_type,
+ broadcast_addr, 0, key);
}
}
}
@@ -2707,8 +2502,8 @@ static irqreturn_t rtl8192_interrupt(int irq, void *netdev)
if (inta & IMR_RDU) {
RT_TRACE(COMP_INTR, "rx descriptor unavailable!\n");
priv->stats.rxrdu++;
- write_nic_dword(dev, INTA_MASK,
- read_nic_dword(dev, INTA_MASK) & ~IMR_RDU);
+ rtl92e_writel(dev, INTA_MASK,
+ rtl92e_readl(dev, INTA_MASK) & ~IMR_RDU);
tasklet_schedule(&priv->irq_rx_tasklet);
}
@@ -2782,7 +2577,6 @@ static int rtl8192_pci_probe(struct pci_dev *pdev,
struct rtl819x_ops *ops = (struct rtl819x_ops *)(id->driver_data);
unsigned long pmem_start, pmem_len, pmem_flags;
int err = -ENOMEM;
- bool bdma64 = false;
u8 revision_id;
RT_TRACE(COMP_INIT, "Configuring chip resources");
@@ -2806,8 +2600,6 @@ static int rtl8192_pci_probe(struct pci_dev *pdev,
goto err_pci_disable;
err = -ENODEV;
- if (bdma64)
- dev->features |= NETIF_F_HIGHDMA;
pci_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -2850,12 +2642,12 @@ static int rtl8192_pci_probe(struct pci_dev *pdev,
pci_read_config_byte(pdev, 0x08, &revision_id);
/* If the revisionid is 0x10, the device uses rtl8192se. */
if (pdev->device == 0x8192 && revision_id == 0x10)
- goto err_rel_mem;
+ goto err_unmap;
priv->ops = ops;
- if (rtl8192_pci_findadapter(pdev, dev) == false)
- goto err_rel_mem;
+ if (rtl92e_check_adapter(pdev, dev) == false)
+ goto err_unmap;
dev->irq = pdev->irq;
priv->irq = 0;
@@ -2888,7 +2680,7 @@ static int rtl8192_pci_probe(struct pci_dev *pdev,
RT_TRACE(COMP_INIT, "dev name: %s\n", dev->name);
if (priv->polling_timer_on == 0)
- check_rfctrl_gpio_timer((unsigned long)dev);
+ rtl92e_check_rfctrl_gpio_timer((unsigned long)dev);
RT_TRACE(COMP_INIT, "Driver probe completed\n");
return 0;
@@ -2896,12 +2688,12 @@ static int rtl8192_pci_probe(struct pci_dev *pdev,
err_free_irq:
free_irq(dev->irq, dev);
priv->irq = 0;
+err_unmap:
+ iounmap((void __iomem *)ioaddr);
err_rel_mem:
release_mem_region(pmem_start, pmem_len);
err_rel_rtllib:
free_rtllib(dev);
-
- DMESG("wlan driver load failed\n");
err_pci_disable:
pci_disable_device(pdev);
return err;
@@ -2922,7 +2714,7 @@ static void rtl8192_pci_disconnect(struct pci_dev *pdev)
cancel_delayed_work(&priv->gpio_change_rf_wq);
priv->polling_timer_on = 0;
rtl8192_down(dev, true);
- deinit_hal_dm(dev);
+ rtl92e_dm_deinit(dev);
if (priv->pFirmware) {
vfree(priv->pFirmware);
priv->pFirmware = NULL;
@@ -2952,7 +2744,7 @@ static void rtl8192_pci_disconnect(struct pci_dev *pdev)
RT_TRACE(COMP_DOWN, "wlan driver removed\n");
}
-bool NicIFEnableNIC(struct net_device *dev)
+bool rtl92e_enable_nic(struct net_device *dev)
{
bool init_status = true;
struct r8192_priv *priv = rtllib_priv(dev);
@@ -2977,12 +2769,12 @@ bool NicIFEnableNIC(struct net_device *dev)
RT_CLEAR_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC);
priv->bfirst_init = false;
- rtl8192_irq_enable(dev);
+ rtl92e_irq_enable(dev);
priv->bdisable_nic = false;
RT_TRACE(COMP_PS, "<===========%s()\n", __func__);
return init_status;
}
-bool NicIFDisableNIC(struct net_device *dev)
+bool rtl92e_disable_nic(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 tmp_state = 0;
@@ -2993,7 +2785,7 @@ bool NicIFDisableNIC(struct net_device *dev)
rtllib_softmac_stop_protocol(priv->rtllib, 0, false);
priv->rtllib->state = tmp_state;
rtl8192_cancel_deferred_work(priv);
- rtl8192_irq_disable(dev);
+ rtl92e_irq_disable(dev);
priv->ops->stop_adapter(dev, false);
RT_TRACE(COMP_PS, "<=========%s()\n", __func__);
@@ -3007,7 +2799,6 @@ static int __init rtl8192_pci_module_init(void)
pr_info("Copyright (c) 2007-2008, Realsil Wlan Driver\n");
if (0 != pci_register_driver(&rtl8192_pci_driver)) {
- DMESG("No device found");
/*pci_unregister_driver (&rtl8192_pci_driver);*/
return -ENODEV;
}
@@ -3021,7 +2812,7 @@ static void __exit rtl8192_pci_module_exit(void)
RT_TRACE(COMP_DOWN, "Exiting");
}
-void check_rfctrl_gpio_timer(unsigned long data)
+void rtl92e_check_rfctrl_gpio_timer(unsigned long data)
{
struct r8192_priv *priv = rtllib_priv((struct net_device *)data);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index 776d950655cb..cd948bb13840 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -578,84 +578,44 @@ struct r8192_priv {
extern const struct ethtool_ops rtl819x_ethtool_ops;
-void rtl8192_tx_cmd(struct net_device *dev, struct sk_buff *skb);
-short rtl8192_tx(struct net_device *dev, struct sk_buff *skb);
-
-u8 read_nic_io_byte(struct net_device *dev, int x);
-u32 read_nic_io_dword(struct net_device *dev, int x);
-u16 read_nic_io_word(struct net_device *dev, int x);
-void write_nic_io_byte(struct net_device *dev, int x, u8 y);
-void write_nic_io_word(struct net_device *dev, int x, u16 y);
-void write_nic_io_dword(struct net_device *dev, int x, u32 y);
-
-u8 read_nic_byte(struct net_device *dev, int x);
-u32 read_nic_dword(struct net_device *dev, int x);
-u16 read_nic_word(struct net_device *dev, int x);
-void write_nic_byte(struct net_device *dev, int x, u8 y);
-void write_nic_word(struct net_device *dev, int x, u16 y);
-void write_nic_dword(struct net_device *dev, int x, u32 y);
+u8 rtl92e_readb(struct net_device *dev, int x);
+u32 rtl92e_readl(struct net_device *dev, int x);
+u16 rtl92e_readw(struct net_device *dev, int x);
+void rtl92e_writeb(struct net_device *dev, int x, u8 y);
+void rtl92e_writew(struct net_device *dev, int x, u16 y);
+void rtl92e_writel(struct net_device *dev, int x, u32 y);
void force_pci_posting(struct net_device *dev);
-void rtl8192_rx_enable(struct net_device *);
-void rtl8192_tx_enable(struct net_device *);
-
-int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
-void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
- int rate);
-void rtl8192_data_hard_stop(struct net_device *dev);
-void rtl8192_data_hard_resume(struct net_device *dev);
-void rtl8192_restart(void *data);
-void rtl819x_watchdog_wqcallback(void *data);
-void rtl8192_hw_sleep_wq(void *data);
-void watch_dog_timer_callback(unsigned long data);
-void rtl8192_irq_rx_tasklet(struct r8192_priv *priv);
-void rtl8192_irq_tx_tasklet(struct r8192_priv *priv);
-int rtl8192_down(struct net_device *dev, bool shutdownrf);
-int rtl8192_up(struct net_device *dev);
-void rtl8192_commit(struct net_device *dev);
-void rtl8192_set_chan(struct net_device *dev, short ch);
-
-void check_rfctrl_gpio_timer(unsigned long data);
-
-void rtl8192_hw_wakeup_wq(void *data);
-short rtl8192_pci_initdescring(struct net_device *dev);
-
-void rtl8192_cancel_deferred_work(struct r8192_priv *priv);
-
-int _rtl8192_up(struct net_device *dev, bool is_silent_reset);
-
-short rtl8192_is_tx_queue_empty(struct net_device *dev);
-void rtl8192_irq_disable(struct net_device *dev);
-
-void rtl8192_tx_timeout(struct net_device *dev);
-void rtl8192_pci_resetdescring(struct net_device *dev);
-void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode);
-void rtl8192_irq_enable(struct net_device *dev);
-void rtl8192_config_rate(struct net_device *dev, u16 *rate_config);
-void rtl8192_update_cap(struct net_device *dev, u16 cap);
-void rtl8192_irq_disable(struct net_device *dev);
-
-void rtl819x_UpdateRxPktTimeStamp(struct net_device *dev,
- struct rtllib_rx_stats *stats);
-long rtl819x_translate_todbm(struct r8192_priv *priv, u8 signal_strength_index);
-void rtl819x_update_rxsignalstatistics8190pci(struct r8192_priv *priv,
- struct rtllib_rx_stats *pprevious_stats);
-u8 rtl819x_evm_dbtopercentage(char value);
-void rtl819x_process_cck_rxpathsel(struct r8192_priv *priv,
- struct rtllib_rx_stats *pprevious_stats);
-u8 rtl819x_query_rxpwrpercentage(char antpower);
-void rtl8192_record_rxdesc_forlateruse(struct rtllib_rx_stats *psrc_stats,
- struct rtllib_rx_stats *ptarget_stats);
-bool NicIFEnableNIC(struct net_device *dev);
-bool NicIFDisableNIC(struct net_device *dev);
-
-bool MgntActSet_RF_State(struct net_device *dev,
- enum rt_rf_power_state StateToSet,
- RT_RF_CHANGE_SOURCE ChangeSource,
- bool ProtectOrNot);
-void ActUpdateChannelAccessSetting(struct net_device *dev,
- enum wireless_mode WirelessMode,
- struct channel_access_setting *ChnlAccessSetting);
+void rtl92e_rx_enable(struct net_device *);
+void rtl92e_tx_enable(struct net_device *);
+
+void rtl92e_hw_sleep_wq(void *data);
+void rtl92e_commit(struct net_device *dev);
+
+void rtl92e_check_rfctrl_gpio_timer(unsigned long data);
+
+void rtl92e_hw_wakeup_wq(void *data);
+void rtl92e_reset_desc_ring(struct net_device *dev);
+void rtl92e_set_wireless_mode(struct net_device *dev, u8 wireless_mode);
+void rtl92e_irq_enable(struct net_device *dev);
+void rtl92e_config_rate(struct net_device *dev, u16 *rate_config);
+void rtl92e_irq_disable(struct net_device *dev);
+
+void rtl92e_update_rx_pkt_timestamp(struct net_device *dev,
+ struct rtllib_rx_stats *stats);
+long rtl92e_translate_to_dbm(struct r8192_priv *priv, u8 signal_strength_index);
+void rtl92e_update_rx_statistics(struct r8192_priv *priv,
+ struct rtllib_rx_stats *pprevious_stats);
+u8 rtl92e_evm_db_to_percent(char value);
+u8 rtl92e_rx_db_to_percent(char antpower);
+void rtl92e_copy_mpdu_stats(struct rtllib_rx_stats *psrc_stats,
+ struct rtllib_rx_stats *ptarget_stats);
+bool rtl92e_enable_nic(struct net_device *dev);
+bool rtl92e_disable_nic(struct net_device *dev);
+
+bool rtl92e_set_rf_state(struct net_device *dev,
+ enum rt_rf_power_state StateToSet,
+ RT_RF_CHANGE_SOURCE ChangeSource);
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 3de7cc549794..1a0c690bfa07 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -58,9 +58,6 @@ static u32 edca_setting_UL[HT_IOT_PEER_MAX] = {
0x5e4332
};
-#define RTK_UL_EDCA 0xa44f
-#define RTK_DL_EDCA 0x5e4322
-
const u32 dm_tx_bb_gain[TxBBGainTableLength] = {
0x7f8001fe, /* 12 dB */
0x788001e2, /* 11 dB */
@@ -213,6 +210,9 @@ static void dm_deInit_fsync(struct net_device *dev);
static void dm_check_txrateandretrycount(struct net_device *dev);
static void dm_check_ac_dc_power(struct net_device *dev);
+static void dm_check_fsync(struct net_device *dev);
+static void dm_CheckRfCtrlGPIO(void *data);
+static void dm_fsync_timer_callback(unsigned long data);
/*---------------------Define local function prototype-----------------------*/
@@ -224,7 +224,7 @@ static void dm_send_rssi_tofw(struct net_device *dev);
static void dm_ctstoself(struct net_device *dev);
/*---------------------------Define function prototype------------------------*/
-void init_hal_dm(struct net_device *dev)
+void rtl92e_dm_init(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -234,10 +234,10 @@ void init_hal_dm(struct net_device *dev)
dm_init_dynamic_txpower(dev);
- init_rate_adaptive(dev);
+ rtl92e_init_adaptive_rate(dev);
dm_dig_init(dev);
- dm_init_edca_turbo(dev);
+ rtl92e_dm_init_edca_turbo(dev);
dm_init_bandwidth_autoswitch(dev);
dm_init_fsync(dev);
dm_init_rxpath_selection(dev);
@@ -249,14 +249,14 @@ void init_hal_dm(struct net_device *dev)
(void *)dm_CheckRfCtrlGPIO, dev);
}
-void deinit_hal_dm(struct net_device *dev)
+void rtl92e_dm_deinit(struct net_device *dev)
{
dm_deInit_fsync(dev);
}
-void hal_dm_watchdog(struct net_device *dev)
+void rtl92e_dm_watchdog(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -307,7 +307,7 @@ static void dm_check_ac_dc_power(struct net_device *dev)
};
-void init_rate_adaptive(struct net_device *dev)
+void rtl92e_init_adaptive_rate(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -444,7 +444,7 @@ static void dm_check_rate_adaptive(struct net_device *dev)
if (priv->rtllib->GetHalfNmodeSupportByAPsHandler(dev))
targetRATR &= 0xf00fffff;
- currentRATR = read_nic_dword(dev, RATR0);
+ currentRATR = rtl92e_readl(dev, RATR0);
if (targetRATR != currentRATR) {
u32 ratr_value;
@@ -454,8 +454,8 @@ static void dm_check_rate_adaptive(struct net_device *dev)
currentRATR, targetRATR);
if (priv->rf_type == RF_1T2R)
ratr_value &= ~(RATE_ALL_OFDM_2SS);
- write_nic_dword(dev, RATR0, ratr_value);
- write_nic_byte(dev, UFWP, 1);
+ rtl92e_writel(dev, RATR0, ratr_value);
+ rtl92e_writeb(dev, UFWP, 1);
pra->last_ratr = targetRATR;
}
@@ -561,40 +561,40 @@ static void dm_tx_update_tssi_weak_signal(struct net_device *dev, u8 RF_Type)
p->rfa_txpowertrackingindex--;
if (p->rfa_txpowertrackingindex_real > 4) {
p->rfa_txpowertrackingindex_real--;
- rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance,
- bMaskDWord,
- dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
+ rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
+ bMaskDWord,
+ dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
}
p->rfc_txpowertrackingindex--;
if (p->rfc_txpowertrackingindex_real > 4) {
p->rfc_txpowertrackingindex_real--;
- rtl8192_setBBreg(dev,
- rOFDM0_XCTxIQImbalance,
- bMaskDWord,
- dm_tx_bb_gain[p->rfc_txpowertrackingindex_real]);
+ rtl92e_set_bb_reg(dev,
+ rOFDM0_XCTxIQImbalance,
+ bMaskDWord,
+ dm_tx_bb_gain[p->rfc_txpowertrackingindex_real]);
}
} else {
- rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance,
- bMaskDWord,
- dm_tx_bb_gain[4]);
- rtl8192_setBBreg(dev,
- rOFDM0_XCTxIQImbalance,
- bMaskDWord, dm_tx_bb_gain[4]);
+ rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
+ bMaskDWord,
+ dm_tx_bb_gain[4]);
+ rtl92e_set_bb_reg(dev,
+ rOFDM0_XCTxIQImbalance,
+ bMaskDWord, dm_tx_bb_gain[4]);
}
} else {
if (p->rfa_txpowertrackingindex > 0) {
p->rfa_txpowertrackingindex--;
if (p->rfa_txpowertrackingindex_real > 4) {
p->rfa_txpowertrackingindex_real--;
- rtl8192_setBBreg(dev,
- rOFDM0_XATxIQImbalance,
- bMaskDWord,
- dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
+ rtl92e_set_bb_reg(dev,
+ rOFDM0_XATxIQImbalance,
+ bMaskDWord,
+ dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
}
} else {
- rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance,
- bMaskDWord, dm_tx_bb_gain[4]);
+ rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
+ bMaskDWord, dm_tx_bb_gain[4]);
}
}
}
@@ -608,36 +608,33 @@ static void dm_tx_update_tssi_strong_signal(struct net_device *dev, u8 RF_Type)
(p->rfc_txpowertrackingindex < TxBBGainTableLength - 1)) {
p->rfa_txpowertrackingindex++;
p->rfa_txpowertrackingindex_real++;
- rtl8192_setBBreg(dev,
- rOFDM0_XATxIQImbalance,
- bMaskDWord,
- dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
+ rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
+ bMaskDWord,
+ dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
p->rfc_txpowertrackingindex++;
p->rfc_txpowertrackingindex_real++;
- rtl8192_setBBreg(dev,
- rOFDM0_XCTxIQImbalance,
- bMaskDWord,
- dm_tx_bb_gain[p->rfc_txpowertrackingindex_real]);
+ rtl92e_set_bb_reg(dev, rOFDM0_XCTxIQImbalance,
+ bMaskDWord,
+ dm_tx_bb_gain[p->rfc_txpowertrackingindex_real]);
} else {
- rtl8192_setBBreg(dev,
- rOFDM0_XATxIQImbalance,
- bMaskDWord,
- dm_tx_bb_gain[TxBBGainTableLength - 1]);
- rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance,
- bMaskDWord,
- dm_tx_bb_gain[TxBBGainTableLength - 1]);
+ rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
+ bMaskDWord,
+ dm_tx_bb_gain[TxBBGainTableLength - 1]);
+ rtl92e_set_bb_reg(dev, rOFDM0_XCTxIQImbalance,
+ bMaskDWord,
+ dm_tx_bb_gain[TxBBGainTableLength - 1]);
}
} else {
if (p->rfa_txpowertrackingindex < (TxBBGainTableLength - 1)) {
p->rfa_txpowertrackingindex++;
p->rfa_txpowertrackingindex_real++;
- rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance,
- bMaskDWord,
- dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
+ rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
+ bMaskDWord,
+ dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
} else {
- rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance,
- bMaskDWord,
- dm_tx_bb_gain[TxBBGainTableLength - 1]);
+ rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
+ bMaskDWord,
+ dm_tx_bb_gain[TxBBGainTableLength - 1]);
}
}
}
@@ -656,8 +653,8 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
u32 delta = 0;
RT_TRACE(COMP_POWER_TRACKING, "%s()\n", __func__);
- write_nic_byte(dev, Pw_Track_Flag, 0);
- write_nic_byte(dev, FW_Busy_Flag, 0);
+ rtl92e_writeb(dev, Pw_Track_Flag, 0);
+ rtl92e_writeb(dev, FW_Busy_Flag, 0);
priv->rtllib->bdynamic_txpower_enable = false;
bHighpowerstate = priv->bDynamicTxHighPower;
@@ -674,12 +671,11 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
tx_cmd.Op = TXCMD_SET_TX_PWR_TRACKING;
tx_cmd.Length = 4;
tx_cmd.Value = Value;
- cmpk_message_handle_tx(dev, (u8 *)&tx_cmd,
- DESC_PACKET_TYPE_INIT,
- sizeof(struct dcmd_txcmd));
+ rtl92e_send_cmd_pkt(dev, (u8 *)&tx_cmd, DESC_PACKET_TYPE_INIT,
+ sizeof(struct dcmd_txcmd));
mdelay(1);
for (i = 0; i <= 30; i++) {
- Pwr_Flag = read_nic_byte(dev, Pw_Track_Flag);
+ Pwr_Flag = rtl92e_readb(dev, Pw_Track_Flag);
if (Pwr_Flag == 0) {
mdelay(1);
@@ -687,35 +683,35 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
if (priv->bResetInProgress) {
RT_TRACE(COMP_POWER_TRACKING,
"we are in silent reset progress, so return\n");
- write_nic_byte(dev, Pw_Track_Flag, 0);
- write_nic_byte(dev, FW_Busy_Flag, 0);
+ rtl92e_writeb(dev, Pw_Track_Flag, 0);
+ rtl92e_writeb(dev, FW_Busy_Flag, 0);
return;
}
if (priv->rtllib->eRFPowerState != eRfOn) {
RT_TRACE(COMP_POWER_TRACKING,
"we are in power save, so return\n");
- write_nic_byte(dev, Pw_Track_Flag, 0);
- write_nic_byte(dev, FW_Busy_Flag, 0);
+ rtl92e_writeb(dev, Pw_Track_Flag, 0);
+ rtl92e_writeb(dev, FW_Busy_Flag, 0);
return;
}
continue;
}
- Avg_TSSI_Meas = read_nic_word(dev, Tssi_Mea_Value);
+ Avg_TSSI_Meas = rtl92e_readw(dev, Tssi_Mea_Value);
if (Avg_TSSI_Meas == 0) {
- write_nic_byte(dev, Pw_Track_Flag, 0);
- write_nic_byte(dev, FW_Busy_Flag, 0);
+ rtl92e_writeb(dev, Pw_Track_Flag, 0);
+ rtl92e_writeb(dev, FW_Busy_Flag, 0);
return;
}
for (k = 0; k < 5; k++) {
if (k != 4)
- tmp_report[k] = read_nic_byte(dev,
+ tmp_report[k] = rtl92e_readb(dev,
Tssi_Report_Value1+k);
else
- tmp_report[k] = read_nic_byte(dev,
+ tmp_report[k] = rtl92e_readb(dev,
Tssi_Report_Value2);
RT_TRACE(COMP_POWER_TRACKING,
@@ -729,7 +725,7 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
}
if (viviflag) {
- write_nic_byte(dev, Pw_Track_Flag, 0);
+ rtl92e_writeb(dev, Pw_Track_Flag, 0);
viviflag = false;
RT_TRACE(COMP_POWER_TRACKING,
"we filted this data\n");
@@ -756,8 +752,8 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
if (delta <= E_FOR_TX_POWER_TRACK) {
priv->rtllib->bdynamic_txpower_enable = true;
- write_nic_byte(dev, Pw_Track_Flag, 0);
- write_nic_byte(dev, FW_Busy_Flag, 0);
+ rtl92e_writeb(dev, Pw_Track_Flag, 0);
+ rtl92e_writeb(dev, FW_Busy_Flag, 0);
RT_TRACE(COMP_POWER_TRACKING,
"tx power track is done\n");
RT_TRACE(COMP_POWER_TRACKING,
@@ -806,12 +802,12 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
if (priv->rtllib->current_network.channel == 14 &&
!priv->bcck_in_ch14) {
priv->bcck_in_ch14 = true;
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
} else if (priv->rtllib->current_network.channel != 14 && priv->bcck_in_ch14) {
priv->bcck_in_ch14 = false;
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
} else
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
}
RT_TRACE(COMP_POWER_TRACKING,
"priv->rfa_txpowertrackingindex = %d\n",
@@ -829,23 +825,23 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
if (priv->CCKPresentAttentuation_difference <= -12 ||
priv->CCKPresentAttentuation_difference >= 24) {
priv->rtllib->bdynamic_txpower_enable = true;
- write_nic_byte(dev, Pw_Track_Flag, 0);
- write_nic_byte(dev, FW_Busy_Flag, 0);
+ rtl92e_writeb(dev, Pw_Track_Flag, 0);
+ rtl92e_writeb(dev, FW_Busy_Flag, 0);
RT_TRACE(COMP_POWER_TRACKING,
"tx power track--->limited\n");
return;
}
- write_nic_byte(dev, Pw_Track_Flag, 0);
+ rtl92e_writeb(dev, Pw_Track_Flag, 0);
Avg_TSSI_Meas_from_driver = 0;
for (k = 0; k < 5; k++)
tmp_report[k] = 0;
break;
}
- write_nic_byte(dev, FW_Busy_Flag, 0);
+ rtl92e_writeb(dev, FW_Busy_Flag, 0);
}
priv->rtllib->bdynamic_txpower_enable = true;
- write_nic_byte(dev, Pw_Track_Flag, 0);
+ rtl92e_writeb(dev, Pw_Track_Flag, 0);
}
static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device *dev)
@@ -857,8 +853,8 @@ static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device *dev)
int i = 0, CCKSwingNeedUpdate = 0;
if (!priv->btxpower_trackingInit) {
- tmpRegA = rtl8192_QueryBBReg(dev, rOFDM0_XATxIQImbalance,
- bMaskDWord);
+ tmpRegA = rtl92e_get_bb_reg(dev, rOFDM0_XATxIQImbalance,
+ bMaskDWord);
for (i = 0; i < OFDM_Table_Length; i++) {
if (tmpRegA == OFDMSwingTable[i]) {
priv->OFDM_index[0] = (u8)i;
@@ -869,7 +865,7 @@ static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device *dev)
}
}
- TempCCk = rtl8192_QueryBBReg(dev, rCCK0_TxFilter1, bMaskByte2);
+ TempCCk = rtl92e_get_bb_reg(dev, rCCK0_TxFilter1, bMaskByte2);
for (i = 0; i < CCK_Table_length; i++) {
if (TempCCk == (u32)CCKSwingTable_Ch1_Ch13[i][0]) {
priv->CCK_index = (u8) i;
@@ -884,7 +880,7 @@ static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device *dev)
return;
}
- tmpRegA = rtl8192_phy_QueryRFReg(dev, RF90_PATH_A, 0x12, 0x078);
+ tmpRegA = rtl92e_get_rf_reg(dev, RF90_PATH_A, 0x12, 0x078);
RT_TRACE(COMP_POWER_TRACKING, "Readback ThermalMeterA = %d\n", tmpRegA);
if (tmpRegA < 3 || tmpRegA > 13)
return;
@@ -939,11 +935,11 @@ static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device *dev)
}
if (CCKSwingNeedUpdate)
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
if (priv->OFDM_index[0] != tmpOFDMindex) {
priv->OFDM_index[0] = tmpOFDMindex;
- rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord,
- OFDMSwingTable[priv->OFDM_index[0]]);
+ rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance, bMaskDWord,
+ OFDMSwingTable[priv->OFDM_index[0]]);
RT_TRACE(COMP_POWER_TRACKING, "Update OFDMSwing[%d] = 0x%x\n",
priv->OFDM_index[0],
OFDMSwingTable[priv->OFDM_index[0]]);
@@ -951,7 +947,7 @@ static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device *dev)
priv->txpower_count = 0;
}
-void dm_txpower_trackingcallback(void *data)
+void rtl92e_dm_txpower_tracking_wq(void *data)
{
struct r8192_priv *priv = container_of_dwork_rsl(data,
struct r8192_priv, txpower_tracking_wq);
@@ -989,7 +985,7 @@ static void dm_InitializeTXPowerTracking_ThermalMeter(struct net_device *dev)
priv->btxpower_tracking);
}
-void dm_initialize_txpower_tracking(struct net_device *dev)
+void rtl92e_dm_init_txpower_tracking(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1005,7 +1001,7 @@ static void dm_CheckTXPowerTracking_TSSI(struct net_device *dev)
static u32 tx_power_track_counter;
RT_TRACE(COMP_POWER_TRACKING, "%s()\n", __func__);
- if (read_nic_byte(dev, 0x11e) == 1)
+ if (rtl92e_readb(dev, 0x11e) == 1)
return;
if (!priv->btxpower_tracking)
return;
@@ -1039,10 +1035,10 @@ static void dm_CheckTXPowerTracking_ThermalMeter(struct net_device *dev)
if (!TM_Trigger) {
{
- rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d);
- rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f);
- rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d);
- rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f);
+ rtl92e_set_rf_reg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d);
+ rtl92e_set_rf_reg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f);
+ rtl92e_set_rf_reg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d);
+ rtl92e_set_rf_reg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f);
}
TM_Trigger = 1;
return;
@@ -1074,30 +1070,30 @@ static void dm_CCKTxPowerAdjust_TSSI(struct net_device *dev, bool bInCH14)
TempVal = (u32)(dm_cck_tx_bb_gain[attenuation][0] +
(dm_cck_tx_bb_gain[attenuation][1] << 8));
- rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
+ rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
TempVal = (u32)((dm_cck_tx_bb_gain[attenuation][2]) +
(dm_cck_tx_bb_gain[attenuation][3] << 8) +
(dm_cck_tx_bb_gain[attenuation][4] << 16)+
(dm_cck_tx_bb_gain[attenuation][5] << 24));
- rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
+ rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
TempVal = (u32)(dm_cck_tx_bb_gain[attenuation][6] +
(dm_cck_tx_bb_gain[attenuation][7] << 8));
- rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
+ rtl92e_set_bb_reg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
} else {
TempVal = (u32)((dm_cck_tx_bb_gain_ch14[attenuation][0]) +
(dm_cck_tx_bb_gain_ch14[attenuation][1] << 8));
- rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
+ rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
TempVal = (u32)((dm_cck_tx_bb_gain_ch14[attenuation][2]) +
(dm_cck_tx_bb_gain_ch14[attenuation][3] << 8) +
(dm_cck_tx_bb_gain_ch14[attenuation][4] << 16)+
(dm_cck_tx_bb_gain_ch14[attenuation][5] << 24));
- rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
+ rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
TempVal = (u32)((dm_cck_tx_bb_gain_ch14[attenuation][6]) +
(dm_cck_tx_bb_gain_ch14[attenuation][7] << 8));
- rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
+ rtl92e_set_bb_reg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
}
}
@@ -1111,7 +1107,7 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev,
if (!bInCH14) {
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][0] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][1] << 8);
- rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
+ rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING,
"CCK not chnl 14, reg 0x%x = 0x%x\n", rCCK0_TxFilter1,
TempVal);
@@ -1119,14 +1115,14 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev,
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][3] << 8) +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][4] << 16)+
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][5] << 24);
- rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
+ rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING,
"CCK not chnl 14, reg 0x%x = 0x%x\n", rCCK0_TxFilter2,
TempVal);
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][6] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][7] << 8);
- rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
+ rtl92e_set_bb_reg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING,
"CCK not chnl 14, reg 0x%x = 0x%x\n", rCCK0_DebugPort,
TempVal);
@@ -1134,26 +1130,26 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev,
TempVal = CCKSwingTable_Ch14[priv->CCK_index][0] +
(CCKSwingTable_Ch14[priv->CCK_index][1] << 8);
- rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
+ rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter1, TempVal);
TempVal = CCKSwingTable_Ch14[priv->CCK_index][2] +
(CCKSwingTable_Ch14[priv->CCK_index][3] << 8) +
(CCKSwingTable_Ch14[priv->CCK_index][4] << 16)+
(CCKSwingTable_Ch14[priv->CCK_index][5] << 24);
- rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
+ rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter2, TempVal);
TempVal = CCKSwingTable_Ch14[priv->CCK_index][6] +
(CCKSwingTable_Ch14[priv->CCK_index][7]<<8);
- rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
+ rtl92e_set_bb_reg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
rCCK0_DebugPort, TempVal);
}
}
-void dm_cck_txpower_adjust(struct net_device *dev, bool binch14)
+void rtl92e_dm_cck_txpower_adjust(struct net_device *dev, bool binch14)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1168,8 +1164,8 @@ static void dm_txpower_reset_recovery(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
RT_TRACE(COMP_POWER_TRACKING, "Start Reset Recovery ==>\n");
- rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord,
- dm_tx_bb_gain[priv->rfa_txpowertrackingindex]);
+ rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance, bMaskDWord,
+ dm_tx_bb_gain[priv->rfa_txpowertrackingindex]);
RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc80 is %08x\n",
dm_tx_bb_gain[priv->rfa_txpowertrackingindex]);
RT_TRACE(COMP_POWER_TRACKING,
@@ -1181,10 +1177,10 @@ static void dm_txpower_reset_recovery(struct net_device *dev)
RT_TRACE(COMP_POWER_TRACKING,
"Reset Recovery: CCK Attenuation is %d dB\n",
priv->CCKPresentAttentuation);
- dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
- rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord,
- dm_tx_bb_gain[priv->rfc_txpowertrackingindex]);
+ rtl92e_set_bb_reg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord,
+ dm_tx_bb_gain[priv->rfc_txpowertrackingindex]);
RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc90 is %08x\n",
dm_tx_bb_gain[priv->rfc_txpowertrackingindex]);
RT_TRACE(COMP_POWER_TRACKING,
@@ -1195,7 +1191,7 @@ static void dm_txpower_reset_recovery(struct net_device *dev)
dm_tx_bb_gain_idx_to_amplify(priv->rfc_txpowertrackingindex));
}
-void dm_restore_dynamic_mechanism_state(struct net_device *dev)
+void rtl92e_dm_restore_state(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u32 reg_ratr = priv->rate_adaptive.last_ratr;
@@ -1203,7 +1199,7 @@ void dm_restore_dynamic_mechanism_state(struct net_device *dev)
if (!priv->up) {
RT_TRACE(COMP_RATE,
- "<---- dm_restore_dynamic_mechanism_state(): driver is going to unload\n");
+ "<---- rtl92e_dm_restore_state(): driver is going to unload\n");
return;
}
@@ -1215,8 +1211,8 @@ void dm_restore_dynamic_mechanism_state(struct net_device *dev)
ratr_value = reg_ratr;
if (priv->rf_type == RF_1T2R)
ratr_value &= ~(RATE_ALL_OFDM_2SS);
- write_nic_dword(dev, RATR0, ratr_value);
- write_nic_byte(dev, UFWP, 1);
+ rtl92e_writel(dev, RATR0, ratr_value);
+ rtl92e_writeb(dev, UFWP, 1);
if (priv->btxpower_trackingInit && priv->btxpower_tracking)
dm_txpower_reset_recovery(dev);
@@ -1232,18 +1228,18 @@ static void dm_bb_initialgain_restore(struct net_device *dev)
if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
return;
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
- rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, bit_mask,
- (u32)priv->initgain_backup.xaagccore1);
- rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, bit_mask,
- (u32)priv->initgain_backup.xbagccore1);
- rtl8192_setBBreg(dev, rOFDM0_XCAGCCore1, bit_mask,
- (u32)priv->initgain_backup.xcagccore1);
- rtl8192_setBBreg(dev, rOFDM0_XDAGCCore1, bit_mask,
- (u32)priv->initgain_backup.xdagccore1);
+ rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
+ rtl92e_set_bb_reg(dev, rOFDM0_XAAGCCore1, bit_mask,
+ (u32)priv->initgain_backup.xaagccore1);
+ rtl92e_set_bb_reg(dev, rOFDM0_XBAGCCore1, bit_mask,
+ (u32)priv->initgain_backup.xbagccore1);
+ rtl92e_set_bb_reg(dev, rOFDM0_XCAGCCore1, bit_mask,
+ (u32)priv->initgain_backup.xcagccore1);
+ rtl92e_set_bb_reg(dev, rOFDM0_XDAGCCore1, bit_mask,
+ (u32)priv->initgain_backup.xdagccore1);
bit_mask = bMaskByte2;
- rtl8192_setBBreg(dev, rCCK0_CCA, bit_mask,
- (u32)priv->initgain_backup.cca);
+ rtl92e_set_bb_reg(dev, rCCK0_CCA, bit_mask,
+ (u32)priv->initgain_backup.cca);
RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc50 is %x\n",
priv->initgain_backup.xaagccore1);
@@ -1255,12 +1251,12 @@ static void dm_bb_initialgain_restore(struct net_device *dev)
priv->initgain_backup.xdagccore1);
RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xa0a is %x\n",
priv->initgain_backup.cca);
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1);
+ rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
}
-void dm_backup_dynamic_mechanism_state(struct net_device *dev)
+void rtl92e_dm_backup_state(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1279,13 +1275,13 @@ static void dm_bb_initialgain_backup(struct net_device *dev)
if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
return;
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
- priv->initgain_backup.xaagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XAAGCCore1, bit_mask);
- priv->initgain_backup.xbagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XBAGCCore1, bit_mask);
- priv->initgain_backup.xcagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XCAGCCore1, bit_mask);
- priv->initgain_backup.xdagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XDAGCCore1, bit_mask);
+ rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
+ priv->initgain_backup.xaagccore1 = (u8)rtl92e_get_bb_reg(dev, rOFDM0_XAAGCCore1, bit_mask);
+ priv->initgain_backup.xbagccore1 = (u8)rtl92e_get_bb_reg(dev, rOFDM0_XBAGCCore1, bit_mask);
+ priv->initgain_backup.xcagccore1 = (u8)rtl92e_get_bb_reg(dev, rOFDM0_XCAGCCore1, bit_mask);
+ priv->initgain_backup.xdagccore1 = (u8)rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1, bit_mask);
bit_mask = bMaskByte2;
- priv->initgain_backup.cca = (u8)rtl8192_QueryBBReg(dev, rCCK0_CCA, bit_mask);
+ priv->initgain_backup.cca = (u8)rtl92e_get_bb_reg(dev, rCCK0_CCA, bit_mask);
RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc50 is %x\n",
priv->initgain_backup.xaagccore1);
@@ -1376,7 +1372,7 @@ static void dm_ctrl_initgain_byrssi_by_driverrssi(
fw_dig = 0;
if (fw_dig <= 3) {
for (i = 0; i < 3; i++)
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
+ rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
fw_dig++;
dm_digtable.dig_state = DM_STA_DIG_OFF;
}
@@ -1410,7 +1406,7 @@ static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(
if (dm_digtable.dig_algorithm_switch) {
dm_digtable.dig_state = DM_STA_DIG_MAX;
for (i = 0; i < 3; i++)
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1);
+ rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
dm_digtable.dig_algorithm_switch = 0;
}
@@ -1429,19 +1425,19 @@ static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(
dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX;
dm_digtable.dig_state = DM_STA_DIG_OFF;
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
+ rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
- write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x17);
- write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x17);
- write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x17);
- write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x17);
+ rtl92e_writeb(dev, rOFDM0_XAAGCCore1, 0x17);
+ rtl92e_writeb(dev, rOFDM0_XBAGCCore1, 0x17);
+ rtl92e_writeb(dev, rOFDM0_XCAGCCore1, 0x17);
+ rtl92e_writeb(dev, rOFDM0_XDAGCCore1, 0x17);
if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x00);
+ rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x00);
else
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
+ rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x42);
- write_nic_byte(dev, 0xa0a, 0x08);
+ rtl92e_writeb(dev, 0xa0a, 0x08);
return;
}
@@ -1462,25 +1458,25 @@ static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(
dm_digtable.dig_state = DM_STA_DIG_ON;
if (reset_flag == 1) {
- write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x2c);
- write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x2c);
- write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x2c);
- write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x2c);
+ rtl92e_writeb(dev, rOFDM0_XAAGCCore1, 0x2c);
+ rtl92e_writeb(dev, rOFDM0_XBAGCCore1, 0x2c);
+ rtl92e_writeb(dev, rOFDM0_XCAGCCore1, 0x2c);
+ rtl92e_writeb(dev, rOFDM0_XDAGCCore1, 0x2c);
} else {
- write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x20);
- write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x20);
- write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x20);
- write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x20);
+ rtl92e_writeb(dev, rOFDM0_XAAGCCore1, 0x20);
+ rtl92e_writeb(dev, rOFDM0_XBAGCCore1, 0x20);
+ rtl92e_writeb(dev, rOFDM0_XCAGCCore1, 0x20);
+ rtl92e_writeb(dev, rOFDM0_XDAGCCore1, 0x20);
}
if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
+ rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x20);
else
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x44);
+ rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x44);
- write_nic_byte(dev, 0xa0a, 0xcd);
+ rtl92e_writeb(dev, 0xa0a, 0xcd);
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1);
+ rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
}
dm_ctrl_initgain_byrssi_highpwr(dev);
}
@@ -1505,9 +1501,9 @@ static void dm_ctrl_initgain_byrssi_highpwr(struct net_device *dev)
dm_digtable.dig_highpwr_state = DM_STA_DIG_ON;
if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x10);
+ rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x10);
else
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x43);
+ rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x43);
} else {
if (dm_digtable.dig_highpwr_state == DM_STA_DIG_OFF &&
(priv->reset_count == reset_cnt_highpwr))
@@ -1519,9 +1515,9 @@ static void dm_ctrl_initgain_byrssi_highpwr(struct net_device *dev)
(priv->undecorated_smoothed_pwdb >=
dm_digtable.rssi_high_thresh)) {
if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
+ rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x20);
else
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x44);
+ rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x44);
}
}
reset_cnt_highpwr = priv->reset_count;
@@ -1568,16 +1564,16 @@ static void dm_initial_gain(struct net_device *dev)
reset_cnt = priv->reset_count;
}
- if (dm_digtable.pre_ig_value != read_nic_byte(dev, rOFDM0_XAAGCCore1))
+ if (dm_digtable.pre_ig_value != rtl92e_readb(dev, rOFDM0_XAAGCCore1))
force_write = 1;
if ((dm_digtable.pre_ig_value != dm_digtable.cur_ig_value)
|| !initialized || force_write) {
initial_gain = (u8)dm_digtable.cur_ig_value;
- write_nic_byte(dev, rOFDM0_XAAGCCore1, initial_gain);
- write_nic_byte(dev, rOFDM0_XBAGCCore1, initial_gain);
- write_nic_byte(dev, rOFDM0_XCAGCCore1, initial_gain);
- write_nic_byte(dev, rOFDM0_XDAGCCore1, initial_gain);
+ rtl92e_writeb(dev, rOFDM0_XAAGCCore1, initial_gain);
+ rtl92e_writeb(dev, rOFDM0_XBAGCCore1, initial_gain);
+ rtl92e_writeb(dev, rOFDM0_XCAGCCore1, initial_gain);
+ rtl92e_writeb(dev, rOFDM0_XDAGCCore1, initial_gain);
dm_digtable.pre_ig_value = dm_digtable.cur_ig_value;
initialized = 1;
force_write = 0;
@@ -1630,20 +1626,20 @@ static void dm_pd_th(struct net_device *dev)
(initialized <= 3) || force_write) {
if (dm_digtable.curpd_thstate == DIG_PD_AT_LOW_POWER) {
if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x00);
+ rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x00);
else
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
+ rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x42);
} else if (dm_digtable.curpd_thstate ==
DIG_PD_AT_NORMAL_POWER) {
if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
+ rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x20);
else
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x44);
+ rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x44);
} else if (dm_digtable.curpd_thstate == DIG_PD_AT_HIGH_POWER) {
if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
- write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x10);
+ rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x10);
else
- write_nic_byte(dev, rOFDM0_RxDetector1, 0x43);
+ rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x43);
}
dm_digtable.prepd_thstate = dm_digtable.curpd_thstate;
if (initialized <= 3)
@@ -1687,16 +1683,16 @@ static void dm_cs_ratio(struct net_device *dev)
if ((dm_digtable.precs_ratio_state != dm_digtable.curcs_ratio_state) ||
!initialized || force_write) {
if (dm_digtable.curcs_ratio_state == DIG_CS_RATIO_LOWER)
- write_nic_byte(dev, 0xa0a, 0x08);
+ rtl92e_writeb(dev, 0xa0a, 0x08);
else if (dm_digtable.curcs_ratio_state == DIG_CS_RATIO_HIGHER)
- write_nic_byte(dev, 0xa0a, 0xcd);
+ rtl92e_writeb(dev, 0xa0a, 0xcd);
dm_digtable.precs_ratio_state = dm_digtable.curcs_ratio_state;
initialized = 1;
force_write = 0;
}
}
-void dm_init_edca_turbo(struct net_device *dev)
+void rtl92e_dm_init_edca_turbo(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1745,21 +1741,19 @@ static void dm_check_edca_turbo(struct net_device *dev)
if (curTxOkCnt > 4*curRxOkCnt) {
if (priv->bis_cur_rdlstate ||
!priv->bcurrent_turbo_EDCA) {
- write_nic_dword(dev, EDCAPARA_BE,
- edca_setting_UL[pHTInfo->IOTPeer]);
+ rtl92e_writel(dev, EDCAPARA_BE,
+ edca_setting_UL[pHTInfo->IOTPeer]);
priv->bis_cur_rdlstate = false;
}
} else {
if (!priv->bis_cur_rdlstate ||
!priv->bcurrent_turbo_EDCA) {
if (priv->rtllib->mode == WIRELESS_MODE_G)
- write_nic_dword(dev,
- EDCAPARA_BE,
- edca_setting_DL_GMode[pHTInfo->IOTPeer]);
+ rtl92e_writel(dev, EDCAPARA_BE,
+ edca_setting_DL_GMode[pHTInfo->IOTPeer]);
else
- write_nic_dword(dev,
- EDCAPARA_BE,
- edca_setting_DL[pHTInfo->IOTPeer]);
+ rtl92e_writel(dev, EDCAPARA_BE,
+ edca_setting_DL[pHTInfo->IOTPeer]);
priv->bis_cur_rdlstate = true;
}
}
@@ -1769,20 +1763,18 @@ static void dm_check_edca_turbo(struct net_device *dev)
if (!priv->bis_cur_rdlstate ||
!priv->bcurrent_turbo_EDCA) {
if (priv->rtllib->mode == WIRELESS_MODE_G)
- write_nic_dword(dev,
- EDCAPARA_BE,
- edca_setting_DL_GMode[pHTInfo->IOTPeer]);
+ rtl92e_writel(dev, EDCAPARA_BE,
+ edca_setting_DL_GMode[pHTInfo->IOTPeer]);
else
- write_nic_dword(dev,
- EDCAPARA_BE,
- edca_setting_DL[pHTInfo->IOTPeer]);
+ rtl92e_writel(dev, EDCAPARA_BE,
+ edca_setting_DL[pHTInfo->IOTPeer]);
priv->bis_cur_rdlstate = true;
}
} else {
if (priv->bis_cur_rdlstate ||
!priv->bcurrent_turbo_EDCA) {
- write_nic_dword(dev, EDCAPARA_BE,
- edca_setting_UL[pHTInfo->IOTPeer]);
+ rtl92e_writel(dev, EDCAPARA_BE,
+ edca_setting_UL[pHTInfo->IOTPeer]);
priv->bis_cur_rdlstate = false;
}
@@ -1854,7 +1846,7 @@ static void dm_check_pbc_gpio(struct net_device *dev)
{
}
-void dm_CheckRfCtrlGPIO(void *data)
+static void dm_CheckRfCtrlGPIO(void *data)
{
struct r8192_priv *priv = container_of_dwork_rsl(data,
struct r8192_priv, gpio_change_rf_wq);
@@ -1877,7 +1869,7 @@ void dm_CheckRfCtrlGPIO(void *data)
return;
}
- tmp1byte = read_nic_byte(dev, GPI);
+ tmp1byte = rtl92e_readb(dev, GPI);
eRfPowerStateToSet = (tmp1byte&BIT1) ? eRfOn : eRfOff;
@@ -1896,8 +1888,7 @@ void dm_CheckRfCtrlGPIO(void *data)
if (bActuallySet) {
mdelay(1000);
priv->bHwRfOffAction = 1;
- MgntActSet_RF_State(dev, eRfPowerStateToSet, RF_CHANGE_BY_HW,
- true);
+ rtl92e_set_rf_state(dev, eRfPowerStateToSet, RF_CHANGE_BY_HW);
if (priv->bHwRadioOff)
argv[1] = "RFOFF";
else
@@ -1909,7 +1900,7 @@ void dm_CheckRfCtrlGPIO(void *data)
}
}
-void dm_rf_pathcheck_workitemcallback(void *data)
+void rtl92e_dm_rf_pathcheck_wq(void *data)
{
struct r8192_priv *priv = container_of_dwork_rsl(data,
struct r8192_priv,
@@ -1917,7 +1908,7 @@ void dm_rf_pathcheck_workitemcallback(void *data)
struct net_device *dev = priv->rtllib->dev;
u8 rfpath = 0, i;
- rfpath = read_nic_byte(dev, 0xc04);
+ rfpath = rtl92e_readb(dev, 0xc04);
for (i = 0; i < RF90_PATH_MAX; i++) {
if (rfpath & (0x01<<i))
@@ -1974,12 +1965,12 @@ static void dm_rxpath_sel_byrssi(struct net_device *dev)
return;
if (!cck_Rx_Path_initialized) {
- DM_RxPathSelTable.cck_Rx_path = (read_nic_byte(dev, 0xa07)&0xf);
+ DM_RxPathSelTable.cck_Rx_path = (rtl92e_readb(dev, 0xa07)&0xf);
cck_Rx_Path_initialized = 1;
}
DM_RxPathSelTable.disabledRF = 0xf;
- DM_RxPathSelTable.disabledRF &= ~(read_nic_byte(dev, 0xc04));
+ DM_RxPathSelTable.disabledRF &= ~(rtl92e_readb(dev, 0xc04));
if (priv->rtllib->mode == WIRELESS_MODE_B)
DM_RxPathSelTable.cck_method = CCK_Rx_Version_2;
@@ -2116,10 +2107,10 @@ static void dm_rxpath_sel_byrssi(struct net_device *dev)
DM_RxPathSelTable.diff_TH) {
DM_RxPathSelTable.rf_enable_rssi_th[min_rssi_index] =
tmp_max_rssi+5;
- rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable,
- 0x1<<min_rssi_index, 0x0);
- rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable,
- 0x1<<min_rssi_index, 0x0);
+ rtl92e_set_bb_reg(dev, rOFDM0_TRxPathEnable,
+ 0x1<<min_rssi_index, 0x0);
+ rtl92e_set_bb_reg(dev, rOFDM1_TRxPathEnable,
+ 0x1<<min_rssi_index, 0x0);
disabled_rf_cnt++;
}
if (DM_RxPathSelTable.cck_method == CCK_Rx_Version_1) {
@@ -2133,8 +2124,8 @@ static void dm_rxpath_sel_byrssi(struct net_device *dev)
if (update_cck_rx_path) {
DM_RxPathSelTable.cck_Rx_path = (cck_default_Rx<<2) |
(cck_optional_Rx);
- rtl8192_setBBreg(dev, rCCK0_AFESetting, 0x0f000000,
- DM_RxPathSelTable.cck_Rx_path);
+ rtl92e_set_bb_reg(dev, rCCK0_AFESetting, 0x0f000000,
+ DM_RxPathSelTable.cck_Rx_path);
}
if (DM_RxPathSelTable.disabledRF) {
@@ -2142,12 +2133,12 @@ static void dm_rxpath_sel_byrssi(struct net_device *dev)
if ((DM_RxPathSelTable.disabledRF>>i) & 0x1) {
if (tmp_max_rssi >=
DM_RxPathSelTable.rf_enable_rssi_th[i]) {
- rtl8192_setBBreg(dev,
- rOFDM0_TRxPathEnable, 0x1 << i,
- 0x1);
- rtl8192_setBBreg(dev,
- rOFDM1_TRxPathEnable,
- 0x1 << i, 0x1);
+ rtl92e_set_bb_reg(dev,
+ rOFDM0_TRxPathEnable,
+ 0x1 << i, 0x1);
+ rtl92e_set_bb_reg(dev,
+ rOFDM1_TRxPathEnable,
+ 0x1 << i, 0x1);
DM_RxPathSelTable.rf_enable_rssi_th[i]
= 100;
disabled_rf_cnt--;
@@ -2191,7 +2182,7 @@ static void dm_deInit_fsync(struct net_device *dev)
del_timer_sync(&priv->fsync_timer);
}
-void dm_fsync_timer_callback(unsigned long data)
+static void dm_fsync_timer_callback(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct r8192_priv *priv = rtllib_priv((struct net_device *)data);
@@ -2252,18 +2243,18 @@ void dm_fsync_timer_callback(unsigned long data)
bDoubleTimeInterval = true;
priv->bswitch_fsync = !priv->bswitch_fsync;
if (priv->bswitch_fsync) {
- write_nic_byte(dev, 0xC36, 0x1c);
- write_nic_byte(dev, 0xC3e, 0x90);
+ rtl92e_writeb(dev, 0xC36, 0x1c);
+ rtl92e_writeb(dev, 0xC3e, 0x90);
} else {
- write_nic_byte(dev, 0xC36, 0x5c);
- write_nic_byte(dev, 0xC3e, 0x96);
+ rtl92e_writeb(dev, 0xC36, 0x5c);
+ rtl92e_writeb(dev, 0xC3e, 0x96);
}
} else if (priv->undecorated_smoothed_pwdb <=
priv->rtllib->fsync_rssi_threshold) {
if (priv->bswitch_fsync) {
priv->bswitch_fsync = false;
- write_nic_byte(dev, 0xC36, 0x5c);
- write_nic_byte(dev, 0xC3e, 0x96);
+ rtl92e_writeb(dev, 0xC36, 0x5c);
+ rtl92e_writeb(dev, 0xC3e, 0x96);
}
}
if (bDoubleTimeInterval) {
@@ -2283,11 +2274,11 @@ void dm_fsync_timer_callback(unsigned long data)
} else {
if (priv->bswitch_fsync) {
priv->bswitch_fsync = false;
- write_nic_byte(dev, 0xC36, 0x5c);
- write_nic_byte(dev, 0xC3e, 0x96);
+ rtl92e_writeb(dev, 0xC36, 0x5c);
+ rtl92e_writeb(dev, 0xC3e, 0x96);
}
priv->ContinueDiffCount = 0;
- write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd);
+ rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c52cd);
}
RT_TRACE(COMP_HALDM, "ContinueDiffCount %d\n", priv->ContinueDiffCount);
RT_TRACE(COMP_HALDM,
@@ -2302,10 +2293,10 @@ static void dm_StartHWFsync(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
RT_TRACE(COMP_HALDM, "%s\n", __func__);
- write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cf);
+ rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c12cf);
priv->rtllib->SetHwRegHandler(dev, HW_VAR_RF_TIMING,
(u8 *)(&rf_timing));
- write_nic_byte(dev, 0xc3b, 0x41);
+ rtl92e_writeb(dev, 0xc3b, 0x41);
}
static void dm_EndHWFsync(struct net_device *dev)
@@ -2314,10 +2305,10 @@ static void dm_EndHWFsync(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
RT_TRACE(COMP_HALDM, "%s\n", __func__);
- write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd);
+ rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c52cd);
priv->rtllib->SetHwRegHandler(dev, HW_VAR_RF_TIMING, (u8 *)
(&rf_timing));
- write_nic_byte(dev, 0xc3b, 0x49);
+ rtl92e_writeb(dev, 0xc3b, 0x49);
}
static void dm_EndSWFsync(struct net_device *dev)
@@ -2330,13 +2321,13 @@ static void dm_EndSWFsync(struct net_device *dev)
if (priv->bswitch_fsync) {
priv->bswitch_fsync = false;
- write_nic_byte(dev, 0xC36, 0x5c);
+ rtl92e_writeb(dev, 0xC36, 0x5c);
- write_nic_byte(dev, 0xC3e, 0x96);
+ rtl92e_writeb(dev, 0xC3e, 0x96);
}
priv->ContinueDiffCount = 0;
- write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd);
+ rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c52cd);
}
static void dm_StartSWFsync(struct net_device *dev)
@@ -2371,11 +2362,11 @@ static void dm_StartSWFsync(struct net_device *dev)
msecs_to_jiffies(priv->rtllib->fsync_time_interval);
add_timer(&priv->fsync_timer);
- write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cd);
+ rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c12cd);
}
-void dm_check_fsync(struct net_device *dev)
+static void dm_check_fsync(struct net_device *dev)
{
#define RegC38_Default 0
#define RegC38_NonFsync_Other_AP 1
@@ -2431,7 +2422,7 @@ void dm_check_fsync(struct net_device *dev)
}
if (priv->framesyncMonitor) {
if (reg_c38_State != RegC38_Fsync_AP_BCM) {
- write_nic_byte(dev, rOFDM0_RxDetector3, 0x95);
+ rtl92e_writeb(dev, rOFDM0_RxDetector3, 0x95);
reg_c38_State = RegC38_Fsync_AP_BCM;
}
@@ -2457,7 +2448,7 @@ void dm_check_fsync(struct net_device *dev)
RegC38_TH) {
if (reg_c38_State !=
RegC38_NonFsync_Other_AP) {
- write_nic_byte(dev,
+ rtl92e_writeb(dev,
rOFDM0_RxDetector3,
0x90);
@@ -2467,7 +2458,7 @@ void dm_check_fsync(struct net_device *dev)
} else if (priv->undecorated_smoothed_pwdb >=
(RegC38_TH+5)) {
if (reg_c38_State) {
- write_nic_byte(dev,
+ rtl92e_writeb(dev,
rOFDM0_RxDetector3,
priv->framesync);
reg_c38_State = RegC38_Default;
@@ -2475,8 +2466,8 @@ void dm_check_fsync(struct net_device *dev)
}
} else {
if (reg_c38_State) {
- write_nic_byte(dev, rOFDM0_RxDetector3,
- priv->framesync);
+ rtl92e_writeb(dev, rOFDM0_RxDetector3,
+ priv->framesync);
reg_c38_State = RegC38_Default;
}
}
@@ -2484,14 +2475,14 @@ void dm_check_fsync(struct net_device *dev)
}
if (priv->framesyncMonitor) {
if (priv->reset_count != reset_cnt) {
- write_nic_byte(dev, rOFDM0_RxDetector3,
+ rtl92e_writeb(dev, rOFDM0_RxDetector3,
priv->framesync);
reg_c38_State = RegC38_Default;
reset_cnt = priv->reset_count;
}
} else {
if (reg_c38_State) {
- write_nic_byte(dev, rOFDM0_RxDetector3,
+ rtl92e_writeb(dev, rOFDM0_RxDetector3,
priv->framesync);
reg_c38_State = RegC38_Default;
}
@@ -2556,8 +2547,7 @@ static void dm_dynamic_txpower(struct net_device *dev)
RT_TRACE(COMP_TXAGC, "SetTxPowerLevel8190() channel = %d\n",
priv->rtllib->current_network.channel);
- rtl8192_phy_setTxPower(dev,
- priv->rtllib->current_network.channel);
+ rtl92e_set_tx_power(dev, priv->rtllib->current_network.channel);
}
priv->bLastDTPFlag_High = priv->bDynamicTxHighPower;
priv->bLastDTPFlag_Low = priv->bDynamicTxLowPower;
@@ -2569,13 +2559,13 @@ static void dm_check_txrateandretrycount(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
- ieee->softmac_stats.CurrentShowTxate = read_nic_byte(dev,
+ ieee->softmac_stats.CurrentShowTxate = rtl92e_readb(dev,
Current_Tx_Rate_Reg);
- ieee->softmac_stats.last_packet_rate = read_nic_byte(dev,
+ ieee->softmac_stats.last_packet_rate = rtl92e_readb(dev,
Initial_Tx_Rate_Reg);
- ieee->softmac_stats.txretrycount = read_nic_dword(dev,
+ ieee->softmac_stats.txretrycount = rtl92e_readl(dev,
Tx_Retry_Count_Reg);
}
@@ -2583,5 +2573,5 @@ static void dm_send_rssi_tofw(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- write_nic_byte(dev, DRIVER_RSSI, (u8)priv->undecorated_smoothed_pwdb);
+ rtl92e_writeb(dev, DRIVER_RSSI, (u8)priv->undecorated_smoothed_pwdb);
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
index b037451c3ada..097f0dc2056d 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
@@ -27,26 +27,17 @@
#define DM_DIG_THRESH_HIGH 40
#define DM_DIG_THRESH_LOW 35
-#define DM_FALSEALARM_THRESH_LOW 40
-#define DM_FALSEALARM_THRESH_HIGH 1000
-
#define DM_DIG_HIGH_PWR_THRESH_HIGH 75
#define DM_DIG_HIGH_PWR_THRESH_LOW 70
#define BW_AUTO_SWITCH_HIGH_LOW 25
#define BW_AUTO_SWITCH_LOW_HIGH 30
-#define DM_check_fsync_time_interval 500
-
-
#define DM_DIG_BACKOFF 12
#define DM_DIG_MAX 0x36
#define DM_DIG_MIN 0x1c
#define DM_DIG_MIN_Netcore 0x12
-#define DM_DIG_BACKOFF_MAX 12
-#define DM_DIG_BACKOFF_MIN -4
-
#define RxPathSelection_SS_TH_low 30
#define RxPathSelection_diff_TH 18
@@ -55,8 +46,6 @@
#define RateAdaptiveTH_Low_40M 10
#define VeryLowRSSI 15
-#define CTSToSelfTHVal 35
-
#define WAIotTHVal 25
#define E_FOR_TX_POWER_TRACK 300
@@ -70,14 +59,6 @@
#define Tx_Retry_Count_Reg 0x1ac
#define RegC38_TH 20
-#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
-#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
-
-#define TxHighPwrLevel_Normal 0
-#define TxHighPwrLevel_Level1 1
-#define TxHighPwrLevel_Level2 2
-
-#define DM_Type_ByFW 0
#define DM_Type_ByDriver 1
/*--------------------------Define Parameters-------------------------------*/
@@ -207,23 +188,20 @@ extern const u8 dm_cck_tx_bb_gain_ch14[CCKTxBBGainTableLength][8];
/*--------------------------Exported Function prototype---------------------*/
/*--------------------------Exported Function prototype---------------------*/
-extern void init_hal_dm(struct net_device *dev);
-extern void deinit_hal_dm(struct net_device *dev);
+void rtl92e_dm_init(struct net_device *dev);
+void rtl92e_dm_deinit(struct net_device *dev);
-extern void hal_dm_watchdog(struct net_device *dev);
+void rtl92e_dm_watchdog(struct net_device *dev);
-extern void init_rate_adaptive(struct net_device *dev);
-extern void dm_txpower_trackingcallback(void *data);
+void rtl92e_init_adaptive_rate(struct net_device *dev);
+void rtl92e_dm_txpower_tracking_wq(void *data);
-extern void dm_cck_txpower_adjust(struct net_device *dev, bool binch14);
+void rtl92e_dm_cck_txpower_adjust(struct net_device *dev, bool binch14);
-extern void dm_restore_dynamic_mechanism_state(struct net_device *dev);
-extern void dm_backup_dynamic_mechanism_state(struct net_device *dev);
-extern void dm_init_edca_turbo(struct net_device *dev);
-extern void dm_rf_pathcheck_workitemcallback(void *data);
-extern void dm_fsync_timer_callback(unsigned long data);
-extern void dm_check_fsync(struct net_device *dev);
-extern void dm_initialize_txpower_tracking(struct net_device *dev);
-extern void dm_CheckRfCtrlGPIO(void *data);
+void rtl92e_dm_restore_state(struct net_device *dev);
+void rtl92e_dm_backup_state(struct net_device *dev);
+void rtl92e_dm_init_edca_turbo(struct net_device *dev);
+void rtl92e_dm_rf_pathcheck_wq(void *data);
+void rtl92e_dm_init_txpower_tracking(struct net_device *dev);
#endif /*__R8192UDM_H__ */
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
index a6778e0853c7..039ccfd41230 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
@@ -25,115 +25,75 @@
#include "rtl_core.h"
#include "rtl_eeprom.h"
-static void eprom_cs(struct net_device *dev, short bit)
+static void _rtl92e_gpio_write_bit(struct net_device *dev, int no, bool val)
{
- if (bit)
- write_nic_byte(dev, EPROM_CMD,
- (1 << EPROM_CS_SHIFT) |
- read_nic_byte(dev, EPROM_CMD));
+ u8 reg = rtl92e_readb(dev, EPROM_CMD);
+
+ if (val)
+ reg |= 1 << no;
else
- write_nic_byte(dev, EPROM_CMD, read_nic_byte(dev, EPROM_CMD)
- & ~(1<<EPROM_CS_SHIFT));
+ reg &= ~(1 << no);
+ rtl92e_writeb(dev, EPROM_CMD, reg);
udelay(EPROM_DELAY);
}
-
-static void eprom_ck_cycle(struct net_device *dev)
+static bool _rtl92e_gpio_get_bit(struct net_device *dev, int no)
{
- write_nic_byte(dev, EPROM_CMD,
- (1<<EPROM_CK_SHIFT) | read_nic_byte(dev, EPROM_CMD));
- udelay(EPROM_DELAY);
- write_nic_byte(dev, EPROM_CMD,
- read_nic_byte(dev, EPROM_CMD) & ~(1<<EPROM_CK_SHIFT));
- udelay(EPROM_DELAY);
-}
+ u8 reg = rtl92e_readb(dev, EPROM_CMD);
+ return (reg >> no) & 0x1;
+}
-static void eprom_w(struct net_device *dev, short bit)
+static void _rtl92e_eeprom_ck_cycle(struct net_device *dev)
{
- if (bit)
- write_nic_byte(dev, EPROM_CMD, (1<<EPROM_W_SHIFT) |
- read_nic_byte(dev, EPROM_CMD));
- else
- write_nic_byte(dev, EPROM_CMD, read_nic_byte(dev, EPROM_CMD)
- & ~(1<<EPROM_W_SHIFT));
-
- udelay(EPROM_DELAY);
+ _rtl92e_gpio_write_bit(dev, EPROM_CK_BIT, 1);
+ _rtl92e_gpio_write_bit(dev, EPROM_CK_BIT, 0);
}
-
-static short eprom_r(struct net_device *dev)
+static u16 _rtl92e_eeprom_xfer(struct net_device *dev, u16 data, int tx_len)
{
- short bit;
+ u16 ret = 0;
+ int rx_len = 16;
- bit = (read_nic_byte(dev, EPROM_CMD) & (1<<EPROM_R_SHIFT));
- udelay(EPROM_DELAY);
+ _rtl92e_gpio_write_bit(dev, EPROM_CS_BIT, 1);
+ _rtl92e_eeprom_ck_cycle(dev);
- if (bit)
- return 1;
- return 0;
-}
+ while (tx_len--) {
+ _rtl92e_gpio_write_bit(dev, EPROM_W_BIT,
+ (data >> tx_len) & 0x1);
+ _rtl92e_eeprom_ck_cycle(dev);
+ }
-static void eprom_send_bits_string(struct net_device *dev, short b[], int len)
-{
- int i;
+ _rtl92e_gpio_write_bit(dev, EPROM_W_BIT, 0);
- for (i = 0; i < len; i++) {
- eprom_w(dev, b[i]);
- eprom_ck_cycle(dev);
+ while (rx_len--) {
+ _rtl92e_eeprom_ck_cycle(dev);
+ ret |= _rtl92e_gpio_get_bit(dev, EPROM_R_BIT) << rx_len;
}
+
+ _rtl92e_gpio_write_bit(dev, EPROM_CS_BIT, 0);
+ _rtl92e_eeprom_ck_cycle(dev);
+
+ return ret;
}
-u32 eprom_read(struct net_device *dev, u32 addr)
+u32 rtl92e_eeprom_read(struct net_device *dev, u32 addr)
{
struct r8192_priv *priv = rtllib_priv(dev);
- short read_cmd[] = {1, 1, 0};
- short addr_str[8];
- int i;
- int addr_len;
- u32 ret;
-
- ret = 0;
- write_nic_byte(dev, EPROM_CMD,
- (EPROM_CMD_PROGRAM << EPROM_CMD_OPERATING_MODE_SHIFT));
- udelay(EPROM_DELAY);
+ u32 ret = 0;
- if (priv->epromtype == EEPROM_93C56) {
- addr_str[7] = addr & 1;
- addr_str[6] = addr & (1<<1);
- addr_str[5] = addr & (1<<2);
- addr_str[4] = addr & (1<<3);
- addr_str[3] = addr & (1<<4);
- addr_str[2] = addr & (1<<5);
- addr_str[1] = addr & (1<<6);
- addr_str[0] = addr & (1<<7);
- addr_len = 8;
- } else {
- addr_str[5] = addr & 1;
- addr_str[4] = addr & (1<<1);
- addr_str[3] = addr & (1<<2);
- addr_str[2] = addr & (1<<3);
- addr_str[1] = addr & (1<<4);
- addr_str[0] = addr & (1<<5);
- addr_len = 6;
- }
- eprom_cs(dev, 1);
- eprom_ck_cycle(dev);
- eprom_send_bits_string(dev, read_cmd, 3);
- eprom_send_bits_string(dev, addr_str, addr_len);
-
- eprom_w(dev, 0);
-
- for (i = 0; i < 16; i++) {
- eprom_ck_cycle(dev);
- ret |= (eprom_r(dev)<<(15-i));
- }
+ rtl92e_writeb(dev, EPROM_CMD,
+ (EPROM_CMD_PROGRAM << EPROM_CMD_OPERATING_MODE_SHIFT));
+ udelay(EPROM_DELAY);
- eprom_cs(dev, 0);
- eprom_ck_cycle(dev);
+ /* EEPROM is configured as x16 */
+ if (priv->epromtype == EEPROM_93C56)
+ ret = _rtl92e_eeprom_xfer(dev, (addr & 0xFF) | (0x6 << 8), 11);
+ else
+ ret = _rtl92e_eeprom_xfer(dev, (addr & 0x3F) | (0x6 << 6), 9);
- write_nic_byte(dev, EPROM_CMD,
- (EPROM_CMD_NORMAL<<EPROM_CMD_OPERATING_MODE_SHIFT));
+ rtl92e_writeb(dev, EPROM_CMD,
+ (EPROM_CMD_NORMAL<<EPROM_CMD_OPERATING_MODE_SHIFT));
return ret;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
index adea2b4c7a44..8d23aea5fb4f 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
@@ -26,4 +26,4 @@
#define EPROM_DELAY 10
-u32 eprom_read(struct net_device *dev, u32 addr);
+u32 rtl92e_eeprom_read(struct net_device *dev, u32 addr);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
index 6bbd1c626e24..9fcb099e6edd 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
@@ -45,7 +45,7 @@ static void rtl8192_parse_pci_configuration(struct pci_dev *pdev,
pci_write_config_byte(pdev, 0x70f, tmp);
}
-bool rtl8192_pci_findadapter(struct pci_dev *pdev, struct net_device *dev)
+bool rtl92e_check_adapter(struct pci_dev *pdev, struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
u16 VenderID;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
index e8d5527a5f04..6246841bde15 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
@@ -29,6 +29,6 @@
#include <linux/pci.h>
struct net_device;
-bool rtl8192_pci_findadapter(struct pci_dev *pdev, struct net_device *dev);
+bool rtl92e_check_adapter(struct pci_dev *pdev, struct net_device *dev);
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
index e4908672421c..b0268fdc100f 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
@@ -23,7 +23,7 @@
#include "rtl_pm.h"
-int rtl8192E_suspend(struct pci_dev *pdev, pm_message_t state)
+int rtl92e_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct r8192_priv *priv = rtllib_priv(dev);
@@ -45,16 +45,16 @@ int rtl8192E_suspend(struct pci_dev *pdev, pm_message_t state)
netif_device_detach(dev);
if (!priv->rtllib->bSupportRemoteWakeUp) {
- MgntActSet_RF_State(dev, eRfOff, RF_CHANGE_BY_INIT, true);
- ulRegRead = read_nic_dword(dev, CPU_GEN);
+ rtl92e_set_rf_state(dev, eRfOff, RF_CHANGE_BY_INIT);
+ ulRegRead = rtl92e_readl(dev, CPU_GEN);
ulRegRead |= CPU_GEN_SYSTEM_RESET;
- write_nic_dword(dev, CPU_GEN, ulRegRead);
+ rtl92e_writel(dev, CPU_GEN, ulRegRead);
} else {
- write_nic_dword(dev, WFCRC0, 0xffffffff);
- write_nic_dword(dev, WFCRC1, 0xffffffff);
- write_nic_dword(dev, WFCRC2, 0xffffffff);
- write_nic_byte(dev, PMR, 0x5);
- write_nic_byte(dev, MacBlkCtrl, 0xa);
+ rtl92e_writel(dev, WFCRC0, 0xffffffff);
+ rtl92e_writel(dev, WFCRC1, 0xffffffff);
+ rtl92e_writel(dev, WFCRC2, 0xffffffff);
+ rtl92e_writeb(dev, PMR, 0x5);
+ rtl92e_writeb(dev, MacBlkCtrl, 0xa);
}
out_pci_suspend:
netdev_info(dev, "WOL is %s\n", priv->rtllib->bSupportRemoteWakeUp ?
@@ -70,7 +70,7 @@ out_pci_suspend:
return 0;
}
-int rtl8192E_resume(struct pci_dev *pdev)
+int rtl92e_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct r8192_priv *priv = rtllib_priv(dev);
@@ -95,7 +95,7 @@ int rtl8192E_resume(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D0, 0);
if (priv->polling_timer_on == 0)
- check_rfctrl_gpio_timer((unsigned long)dev);
+ rtl92e_check_rfctrl_gpio_timer((unsigned long)dev);
if (!netif_running(dev)) {
netdev_info(dev,
@@ -108,7 +108,7 @@ int rtl8192E_resume(struct pci_dev *pdev)
dev->netdev_ops->ndo_open(dev);
if (!priv->rtllib->bSupportRemoteWakeUp)
- MgntActSet_RF_State(dev, eRfOn, RF_CHANGE_BY_INIT, true);
+ rtl92e_set_rf_state(dev, eRfOn, RF_CHANGE_BY_INIT);
out:
RT_TRACE(COMP_POWER, "<================r8192E resume call.\n");
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
index 7bfe44817f23..cdc45f7fb339 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
@@ -23,7 +23,7 @@
#include <linux/types.h>
#include <linux/pci.h>
-int rtl8192E_suspend(struct pci_dev *dev, pm_message_t state);
-int rtl8192E_resume(struct pci_dev *dev);
+int rtl92e_suspend(struct pci_dev *dev, pm_message_t state);
+int rtl92e_resume(struct pci_dev *dev);
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index 404cb83153d9..f09560d60dc4 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
@@ -45,10 +45,10 @@ static void rtl8192_hw_sleep_down(struct net_device *dev)
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
RT_TRACE(COMP_DBG, "%s()============>come to sleep down\n", __func__);
- MgntActSet_RF_State(dev, eRfSleep, RF_CHANGE_BY_PS, false);
+ rtl92e_set_rf_state(dev, eRfSleep, RF_CHANGE_BY_PS);
}
-void rtl8192_hw_sleep_wq(void *data)
+void rtl92e_hw_sleep_wq(void *data)
{
struct rtllib_device *ieee = container_of_dwork_rsl(data,
struct rtllib_device, hw_sleep_wq);
@@ -57,7 +57,7 @@ void rtl8192_hw_sleep_wq(void *data)
rtl8192_hw_sleep_down(dev);
}
-void rtl8192_hw_wakeup(struct net_device *dev)
+void rtl92e_hw_wakeup(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
unsigned long flags = 0;
@@ -66,7 +66,7 @@ void rtl8192_hw_wakeup(struct net_device *dev)
if (priv->RFChangeInProgress) {
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
RT_TRACE(COMP_DBG,
- "rtl8192_hw_wakeup(): RF Change in progress!\n");
+ "rtl92e_hw_wakeup(): RF Change in progress!\n");
queue_delayed_work_rsl(priv->rtllib->wq,
&priv->rtllib->hw_wakeup_wq,
msecs_to_jiffies(10));
@@ -74,21 +74,21 @@ void rtl8192_hw_wakeup(struct net_device *dev)
}
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
RT_TRACE(COMP_PS, "%s()============>come to wake up\n", __func__);
- MgntActSet_RF_State(dev, eRfOn, RF_CHANGE_BY_PS, false);
+ rtl92e_set_rf_state(dev, eRfOn, RF_CHANGE_BY_PS);
}
-void rtl8192_hw_wakeup_wq(void *data)
+void rtl92e_hw_wakeup_wq(void *data)
{
struct rtllib_device *ieee = container_of_dwork_rsl(data,
struct rtllib_device, hw_wakeup_wq);
struct net_device *dev = ieee->dev;
- rtl8192_hw_wakeup(dev);
+ rtl92e_hw_wakeup(dev);
}
#define MIN_SLEEP_TIME 50
#define MAX_SLEEP_TIME 10000
-void rtl8192_hw_to_sleep(struct net_device *dev, u64 time)
+void rtl92e_enter_sleep(struct net_device *dev, u64 time)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -133,14 +133,13 @@ static void InactivePsWorkItemCallback(struct net_device *dev)
RT_TRACE(COMP_PS, "InactivePsWorkItemCallback(): Set RF to %s.\n",
pPSC->eInactivePowerState == eRfOff ? "OFF" : "ON");
- MgntActSet_RF_State(dev, pPSC->eInactivePowerState, RF_CHANGE_BY_IPS,
- false);
+ rtl92e_set_rf_state(dev, pPSC->eInactivePowerState, RF_CHANGE_BY_IPS);
pPSC->bSwRfProcessing = false;
RT_TRACE(COMP_PS, "InactivePsWorkItemCallback() <---------\n");
}
-void IPSEnter(struct net_device *dev)
+void rtl92e_ips_enter(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
@@ -152,7 +151,7 @@ void IPSEnter(struct net_device *dev)
if (rtState == eRfOn && !pPSC->bSwRfProcessing &&
(priv->rtllib->state != RTLLIB_LINKED) &&
(priv->rtllib->iw_mode != IW_MODE_MASTER)) {
- RT_TRACE(COMP_PS, "IPSEnter(): Turn off RF.\n");
+ RT_TRACE(COMP_PS, "rtl92e_ips_enter(): Turn off RF.\n");
pPSC->eInactivePowerState = eRfOff;
priv->isRFOff = true;
priv->bInPowerSaveMode = true;
@@ -161,7 +160,7 @@ void IPSEnter(struct net_device *dev)
}
}
-void IPSLeave(struct net_device *dev)
+void rtl92e_ips_leave(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
@@ -172,7 +171,7 @@ void IPSLeave(struct net_device *dev)
rtState = priv->rtllib->eRFPowerState;
if (rtState != eRfOn && !pPSC->bSwRfProcessing &&
priv->rtllib->RfOffReason <= RF_CHANGE_BY_IPS) {
- RT_TRACE(COMP_PS, "IPSLeave(): Turn on RF.\n");
+ RT_TRACE(COMP_PS, "rtl92e_ips_leave(): Turn on RF.\n");
pPSC->eInactivePowerState = eRfOn;
priv->bInPowerSaveMode = false;
InactivePsWorkItemCallback(dev);
@@ -180,7 +179,7 @@ void IPSLeave(struct net_device *dev)
}
}
-void IPSLeave_wq(void *data)
+void rtl92e_ips_leave_wq(void *data)
{
struct rtllib_device *ieee = container_of_work_rsl(data,
struct rtllib_device, ips_leave_wq);
@@ -188,11 +187,11 @@ void IPSLeave_wq(void *data)
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
down(&priv->rtllib->ips_sem);
- IPSLeave(dev);
+ rtl92e_ips_leave(dev);
up(&priv->rtllib->ips_sem);
}
-void rtllib_ips_leave_wq(struct net_device *dev)
+void rtl92e_rtllib_ips_leave_wq(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
enum rt_rf_power_state rtState;
@@ -206,7 +205,7 @@ void rtllib_ips_leave_wq(struct net_device *dev)
__func__);
return;
}
- netdev_info(dev, "=========>%s(): IPSLeave\n",
+ netdev_info(dev, "=========>%s(): rtl92e_ips_leave\n",
__func__);
queue_work_rsl(priv->rtllib->wq,
&priv->rtllib->ips_leave_wq);
@@ -214,12 +213,12 @@ void rtllib_ips_leave_wq(struct net_device *dev)
}
}
-void rtllib_ips_leave(struct net_device *dev)
+void rtl92e_rtllib_ips_leave(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
down(&priv->rtllib->ips_sem);
- IPSLeave(dev);
+ rtl92e_ips_leave(dev);
up(&priv->rtllib->ips_sem);
}
@@ -238,7 +237,7 @@ static bool MgntActSet_802_11_PowerSaveMode(struct net_device *dev,
rtPsMode == RTLLIB_PS_DISABLED) {
unsigned long flags;
- rtl8192_hw_wakeup(dev);
+ rtl92e_hw_wakeup(dev);
priv->rtllib->sta_sleep = LPS_IS_WAKE;
spin_lock_irqsave(&(priv->rtllib->mgmt_tx_lock), flags);
@@ -251,13 +250,13 @@ static bool MgntActSet_802_11_PowerSaveMode(struct net_device *dev,
return true;
}
-void LeisurePSEnter(struct net_device *dev)
+void rtl92e_leisure_ps_enter(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
&(priv->rtllib->PowerSaveControl);
- RT_TRACE(COMP_PS, "LeisurePSEnter()...\n");
+ RT_TRACE(COMP_PS, "rtl92e_leisure_ps_enter()...\n");
RT_TRACE(COMP_PS,
"pPSC->bLeisurePs = %d, ieee->ps = %d,pPSC->LpsIdleCount is %d,RT_CHECK_FOR_HANG_PERIOD is %d\n",
pPSC->bLeisurePs, priv->rtllib->ps, pPSC->LpsIdleCount,
@@ -275,7 +274,7 @@ void LeisurePSEnter(struct net_device *dev)
if (priv->rtllib->ps == RTLLIB_PS_DISABLED) {
RT_TRACE(COMP_LPS,
- "LeisurePSEnter(): Enter 802.11 power save mode...\n");
+ "rtl92e_leisure_ps_enter(): Enter 802.11 power save mode...\n");
if (!pPSC->bFwCtrlLPS) {
if (priv->rtllib->SetFwCmdHandler)
@@ -291,21 +290,21 @@ void LeisurePSEnter(struct net_device *dev)
}
}
-void LeisurePSLeave(struct net_device *dev)
+void rtl92e_leisure_ps_leave(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
&(priv->rtllib->PowerSaveControl);
- RT_TRACE(COMP_PS, "LeisurePSLeave()...\n");
+ RT_TRACE(COMP_PS, "rtl92e_leisure_ps_leave()...\n");
RT_TRACE(COMP_PS, "pPSC->bLeisurePs = %d, ieee->ps = %d\n",
pPSC->bLeisurePs, priv->rtllib->ps);
if (pPSC->bLeisurePs) {
if (priv->rtllib->ps != RTLLIB_PS_DISABLED) {
RT_TRACE(COMP_LPS,
- "LeisurePSLeave(): Busy Traffic , Leave 802.11 power save..\n");
+ "rtl92e_leisure_ps_leave(): Busy Traffic , Leave 802.11 power save..\n");
MgntActSet_802_11_PowerSaveMode(dev,
RTLLIB_PS_DISABLED);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
index 962f2e5b8bf8..35fc9e2a3365 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
@@ -30,18 +30,17 @@
struct net_device;
#define RT_CHECK_FOR_HANG_PERIOD 2
-#define INIT_DEFAULT_CHAN 1
-void rtl8192_hw_wakeup(struct net_device *dev);
-void rtl8192_hw_to_sleep(struct net_device *dev, u64 time);
-void rtllib_ips_leave_wq(struct net_device *dev);
-void rtllib_ips_leave(struct net_device *dev);
-void IPSLeave_wq(void *data);
+void rtl92e_hw_wakeup(struct net_device *dev);
+void rtl92e_enter_sleep(struct net_device *dev, u64 time);
+void rtl92e_rtllib_ips_leave_wq(struct net_device *dev);
+void rtl92e_rtllib_ips_leave(struct net_device *dev);
+void rtl92e_ips_leave_wq(void *data);
-void IPSEnter(struct net_device *dev);
-void IPSLeave(struct net_device *dev);
+void rtl92e_ips_enter(struct net_device *dev);
+void rtl92e_ips_leave(struct net_device *dev);
-void LeisurePSEnter(struct net_device *dev);
-void LeisurePSLeave(struct net_device *dev);
+void rtl92e_leisure_ps_enter(struct net_device *dev);
+void rtl92e_leisure_ps_leave(struct net_device *dev);
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index f5e4961677d2..7e3ca7ef997b 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -192,7 +192,7 @@ static int r8192_wx_adapter_power_status(struct net_device *dev,
pPSC->bLeisurePs = true;
} else {
if (priv->rtllib->state == RTLLIB_LINKED)
- LeisurePSLeave(dev);
+ rtl92e_leisure_ps_leave(dev);
priv->ps_force = true;
pPSC->bLeisurePs = false;
@@ -282,10 +282,11 @@ static int r8192_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
up(&priv->wx_sem);
return -1;
}
- netdev_info(dev, "=========>%s(): IPSLeave\n",
+ netdev_info(dev,
+ "=========>%s(): rtl92e_ips_leave\n",
__func__);
down(&priv->rtllib->ips_sem);
- IPSLeave(dev);
+ rtl92e_ips_leave(dev);
up(&priv->rtllib->ips_sem);
}
}
@@ -442,10 +443,11 @@ static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
up(&priv->wx_sem);
return -1;
}
- RT_TRACE(COMP_PS, "=========>%s(): IPSLeave\n",
+ RT_TRACE(COMP_PS,
+ "=========>%s(): rtl92e_ips_leave\n",
__func__);
down(&priv->rtllib->ips_sem);
- IPSLeave(dev);
+ rtl92e_ips_leave(dev);
up(&priv->rtllib->ips_sem);
}
}
@@ -700,7 +702,7 @@ static int r8192_wx_set_enc(struct net_device *dev,
priv->rtllib->wx_set_enc = 1;
down(&priv->rtllib->ips_sem);
- IPSLeave(dev);
+ rtl92e_ips_leave(dev);
up(&priv->rtllib->ips_sem);
down(&priv->wx_sem);
@@ -711,7 +713,7 @@ static int r8192_wx_set_enc(struct net_device *dev,
if (wrqu->encoding.flags & IW_ENCODE_DISABLED) {
ieee->pairwise_key_type = ieee->group_key_type = KEY_TYPE_NA;
- CamResetAllEntry(dev);
+ rtl92e_cam_reset(dev);
memset(priv->rtllib->swcamtable, 0,
sizeof(struct sw_cam_table) * 32);
goto end_hw_sec;
@@ -729,9 +731,6 @@ static int r8192_wx_set_enc(struct net_device *dev,
hwkey[i] |= (key[4 * i + 3] & mask) << 24;
}
- #define CONF_WEP40 0x4
- #define CONF_WEP104 0x14
-
switch (wrqu->encoding.flags & IW_ENCODE_INDEX) {
case 0:
key_idx = ieee->crypt_info.tx_keyidx;
@@ -753,16 +752,16 @@ static int r8192_wx_set_enc(struct net_device *dev,
}
if (wrqu->encoding.length == 0x5) {
ieee->pairwise_key_type = KEY_TYPE_WEP40;
- EnableHWSecurityConfig8192(dev);
+ rtl92e_enable_hw_security_config(dev);
}
else if (wrqu->encoding.length == 0xd) {
ieee->pairwise_key_type = KEY_TYPE_WEP104;
- EnableHWSecurityConfig8192(dev);
- setKey(dev, key_idx, key_idx, KEY_TYPE_WEP104,
- zero_addr[key_idx], 0, hwkey);
- set_swcam(dev, key_idx, key_idx, KEY_TYPE_WEP104,
- zero_addr[key_idx], 0, hwkey, 0);
+ rtl92e_enable_hw_security_config(dev);
+ rtl92e_set_key(dev, key_idx, key_idx, KEY_TYPE_WEP104,
+ zero_addr[key_idx], 0, hwkey);
+ rtl92e_set_swcam(dev, key_idx, key_idx, KEY_TYPE_WEP104,
+ zero_addr[key_idx], 0, hwkey, 0);
} else {
netdev_info(dev,
"wrong type in WEP, not WEP40 and WEP104\n");
@@ -821,17 +820,13 @@ static int r8192_wx_set_retry(struct net_device *dev,
}
if (wrqu->retry.flags & IW_RETRY_MAX) {
priv->retry_rts = wrqu->retry.value;
- DMESG("Setting retry for RTS/CTS data to %d",
- wrqu->retry.value);
} else {
priv->retry_data = wrqu->retry.value;
- DMESG("Setting retry for non RTS/CTS data to %d",
- wrqu->retry.value);
}
- rtl8192_commit(dev);
+ rtl92e_commit(dev);
exit:
up(&priv->wx_sem);
@@ -917,7 +912,7 @@ static int r8192_wx_set_enc_ext(struct net_device *dev,
priv->rtllib->wx_set_enc = 1;
down(&priv->rtllib->ips_sem);
- IPSLeave(dev);
+ rtl92e_ips_leave(dev);
up(&priv->rtllib->ips_sem);
ret = rtllib_wx_set_encode_ext(ieee, info, wrqu, extra);
@@ -933,7 +928,7 @@ static int r8192_wx_set_enc_ext(struct net_device *dev,
ext->alg == IW_ENCODE_ALG_NONE) {
ieee->pairwise_key_type = ieee->group_key_type
= KEY_TYPE_NA;
- CamResetAllEntry(dev);
+ rtl92e_cam_reset(dev);
memset(priv->rtllib->swcamtable, 0,
sizeof(struct sw_cam_table) * 32);
goto end_hw_sec;
@@ -950,28 +945,29 @@ static int r8192_wx_set_enc_ext(struct net_device *dev,
if ((ext->key_len == 13) && (alg == KEY_TYPE_WEP40))
alg = KEY_TYPE_WEP104;
ieee->pairwise_key_type = alg;
- EnableHWSecurityConfig8192(dev);
+ rtl92e_enable_hw_security_config(dev);
}
memcpy((u8 *)key, ext->key, 16);
if ((alg & KEY_TYPE_WEP40) && (ieee->auth_mode != 2)) {
if (ext->key_len == 13)
ieee->pairwise_key_type = alg = KEY_TYPE_WEP104;
- setKey(dev, idx, idx, alg, zero, 0, key);
- set_swcam(dev, idx, idx, alg, zero, 0, key, 0);
+ rtl92e_set_key(dev, idx, idx, alg, zero, 0, key);
+ rtl92e_set_swcam(dev, idx, idx, alg, zero, 0, key, 0);
} else if (group) {
ieee->group_key_type = alg;
- setKey(dev, idx, idx, alg, broadcast_addr, 0, key);
- set_swcam(dev, idx, idx, alg, broadcast_addr, 0,
- key, 0);
+ rtl92e_set_key(dev, idx, idx, alg, broadcast_addr, 0,
+ key);
+ rtl92e_set_swcam(dev, idx, idx, alg, broadcast_addr, 0,
+ key, 0);
} else {
if ((ieee->pairwise_key_type == KEY_TYPE_CCMP) &&
ieee->pHTInfo->bCurrentHTSupport)
- write_nic_byte(dev, 0x173, 1);
- setKey(dev, 4, idx, alg, (u8 *)ieee->ap_mac_addr,
- 0, key);
- set_swcam(dev, 4, idx, alg, (u8 *)ieee->ap_mac_addr,
- 0, key, 0);
+ rtl92e_writeb(dev, 0x173, 1);
+ rtl92e_set_key(dev, 4, idx, alg,
+ (u8 *)ieee->ap_mac_addr, 0, key);
+ rtl92e_set_swcam(dev, 4, idx, alg,
+ (u8 *)ieee->ap_mac_addr, 0, key, 0);
}
@@ -1119,41 +1115,41 @@ static int r8192_wx_get_PromiscuousMode(struct net_device *dev,
}
-#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
+#define IW_IOCTL(x) ((x) - SIOCSIWCOMMIT)
static iw_handler r8192_wx_handlers[] = {
- IW_IOCTL(SIOCGIWNAME) = r8192_wx_get_name,
- IW_IOCTL(SIOCSIWFREQ) = r8192_wx_set_freq,
- IW_IOCTL(SIOCGIWFREQ) = r8192_wx_get_freq,
- IW_IOCTL(SIOCSIWMODE) = r8192_wx_set_mode,
- IW_IOCTL(SIOCGIWMODE) = r8192_wx_get_mode,
- IW_IOCTL(SIOCSIWSENS) = r8192_wx_set_sens,
- IW_IOCTL(SIOCGIWSENS) = r8192_wx_get_sens,
- IW_IOCTL(SIOCGIWRANGE) = rtl8192_wx_get_range,
- IW_IOCTL(SIOCSIWAP) = r8192_wx_set_wap,
- IW_IOCTL(SIOCGIWAP) = r8192_wx_get_wap,
- IW_IOCTL(SIOCSIWSCAN) = r8192_wx_set_scan,
- IW_IOCTL(SIOCGIWSCAN) = r8192_wx_get_scan,
- IW_IOCTL(SIOCSIWESSID) = r8192_wx_set_essid,
- IW_IOCTL(SIOCGIWESSID) = r8192_wx_get_essid,
- IW_IOCTL(SIOCSIWNICKN) = r8192_wx_set_nick,
- IW_IOCTL(SIOCGIWNICKN) = r8192_wx_get_nick,
- IW_IOCTL(SIOCSIWRATE) = r8192_wx_set_rate,
- IW_IOCTL(SIOCGIWRATE) = r8192_wx_get_rate,
- IW_IOCTL(SIOCSIWRTS) = r8192_wx_set_rts,
- IW_IOCTL(SIOCGIWRTS) = r8192_wx_get_rts,
- IW_IOCTL(SIOCSIWFRAG) = r8192_wx_set_frag,
- IW_IOCTL(SIOCGIWFRAG) = r8192_wx_get_frag,
- IW_IOCTL(SIOCSIWRETRY) = r8192_wx_set_retry,
- IW_IOCTL(SIOCGIWRETRY) = r8192_wx_get_retry,
- IW_IOCTL(SIOCSIWENCODE) = r8192_wx_set_enc,
- IW_IOCTL(SIOCGIWENCODE) = r8192_wx_get_enc,
- IW_IOCTL(SIOCSIWPOWER) = r8192_wx_set_power,
- IW_IOCTL(SIOCGIWPOWER) = r8192_wx_get_power,
- IW_IOCTL(SIOCSIWGENIE) = r8192_wx_set_gen_ie,
- IW_IOCTL(SIOCGIWGENIE) = r8192_wx_get_gen_ie,
- IW_IOCTL(SIOCSIWMLME) = r8192_wx_set_mlme,
- IW_IOCTL(SIOCSIWAUTH) = r8192_wx_set_auth,
- IW_IOCTL(SIOCSIWENCODEEXT) = r8192_wx_set_enc_ext,
+ [IW_IOCTL(SIOCGIWNAME)] = r8192_wx_get_name,
+ [IW_IOCTL(SIOCSIWFREQ)] = r8192_wx_set_freq,
+ [IW_IOCTL(SIOCGIWFREQ)] = r8192_wx_get_freq,
+ [IW_IOCTL(SIOCSIWMODE)] = r8192_wx_set_mode,
+ [IW_IOCTL(SIOCGIWMODE)] = r8192_wx_get_mode,
+ [IW_IOCTL(SIOCSIWSENS)] = r8192_wx_set_sens,
+ [IW_IOCTL(SIOCGIWSENS)] = r8192_wx_get_sens,
+ [IW_IOCTL(SIOCGIWRANGE)] = rtl8192_wx_get_range,
+ [IW_IOCTL(SIOCSIWAP)] = r8192_wx_set_wap,
+ [IW_IOCTL(SIOCGIWAP)] = r8192_wx_get_wap,
+ [IW_IOCTL(SIOCSIWSCAN)] = r8192_wx_set_scan,
+ [IW_IOCTL(SIOCGIWSCAN)] = r8192_wx_get_scan,
+ [IW_IOCTL(SIOCSIWESSID)] = r8192_wx_set_essid,
+ [IW_IOCTL(SIOCGIWESSID)] = r8192_wx_get_essid,
+ [IW_IOCTL(SIOCSIWNICKN)] = r8192_wx_set_nick,
+ [IW_IOCTL(SIOCGIWNICKN)] = r8192_wx_get_nick,
+ [IW_IOCTL(SIOCSIWRATE)] = r8192_wx_set_rate,
+ [IW_IOCTL(SIOCGIWRATE)] = r8192_wx_get_rate,
+ [IW_IOCTL(SIOCSIWRTS)] = r8192_wx_set_rts,
+ [IW_IOCTL(SIOCGIWRTS)] = r8192_wx_get_rts,
+ [IW_IOCTL(SIOCSIWFRAG)] = r8192_wx_set_frag,
+ [IW_IOCTL(SIOCGIWFRAG)] = r8192_wx_get_frag,
+ [IW_IOCTL(SIOCSIWRETRY)] = r8192_wx_set_retry,
+ [IW_IOCTL(SIOCGIWRETRY)] = r8192_wx_get_retry,
+ [IW_IOCTL(SIOCSIWENCODE)] = r8192_wx_set_enc,
+ [IW_IOCTL(SIOCGIWENCODE)] = r8192_wx_get_enc,
+ [IW_IOCTL(SIOCSIWPOWER)] = r8192_wx_set_power,
+ [IW_IOCTL(SIOCGIWPOWER)] = r8192_wx_get_power,
+ [IW_IOCTL(SIOCSIWGENIE)] = r8192_wx_set_gen_ie,
+ [IW_IOCTL(SIOCGIWGENIE)] = r8192_wx_get_gen_ie,
+ [IW_IOCTL(SIOCSIWMLME)] = r8192_wx_set_mlme,
+ [IW_IOCTL(SIOCSIWAUTH)] = r8192_wx_set_auth,
+ [IW_IOCTL(SIOCSIWENCODEEXT)] = r8192_wx_set_enc_ext,
};
/* the following rule need to be following,
diff --git a/drivers/staging/rtl8192e/rtl819x_BA.h b/drivers/staging/rtl8192e/rtl819x_BA.h
index 613e14c12df3..894666465152 100644
--- a/drivers/staging/rtl8192e/rtl819x_BA.h
+++ b/drivers/staging/rtl8192e/rtl819x_BA.h
@@ -19,11 +19,7 @@
#ifndef _BATYPE_H_
#define _BATYPE_H_
-#define TOTAL_TXBA_NUM 16
-#define TOTAL_RXBA_NUM 16
-
#define BA_SETUP_TIMEOUT 200
-#define BA_INACT_TIMEOUT 60000
#define BA_POLICY_DELAYED 0
#define BA_POLICY_IMMEDIATE 1
@@ -32,7 +28,6 @@
#define ADDBA_STATUS_REFUSED 37
#define ADDBA_STATUS_INVALID_PARAM 38
-#define DELBA_REASON_QSTA_LEAVING 36
#define DELBA_REASON_END_BA 37
#define DELBA_REASON_UNKNOWN_BA 38
#define DELBA_REASON_TIMEOUT 39
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 60f536c295ab..78ede4a817fc 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -428,7 +428,6 @@ int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
{
struct rtllib_hdr_3addr *delba = NULL;
union delba_param_set *pDelBaParamSet = NULL;
- u16 *pReasonCode = NULL;
u8 *dst = NULL;
if (skb->len < sizeof(struct rtllib_hdr_3addr) + 6) {
@@ -453,9 +452,7 @@ int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
#endif
delba = (struct rtllib_hdr_3addr *)skb->data;
dst = (u8 *)(&delba->addr2[0]);
- delba += sizeof(struct rtllib_hdr_3addr);
- pDelBaParamSet = (union delba_param_set *)(delba+2);
- pReasonCode = (u16 *)(delba+4);
+ pDelBaParamSet = (union delba_param_set *)&delba->payload[2];
if (pDelBaParamSet->field.Initiator == 1) {
struct rx_ts_record *pRxTs;
diff --git a/drivers/staging/rtl8192e/rtl819x_HT.h b/drivers/staging/rtl8192e/rtl819x_HT.h
index 0c263d9f7246..51711dcdc8ef 100644
--- a/drivers/staging/rtl8192e/rtl819x_HT.h
+++ b/drivers/staging/rtl8192e/rtl819x_HT.h
@@ -20,8 +20,6 @@
#define _RTL819XU_HTTYPE_H_
#define MIMO_PS_STATIC 0
-#define MIMO_PS_DYNAMIC 1
-#define MIMO_PS_NOLIMIT 3
#define sHTCLng 4
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index b5c3647b0f80..555745b2a75e 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -117,7 +117,7 @@ void HTUpdateDefaultSetting(struct rtllib_device *ieee)
pHTInfo->RxReorderPendingTime = 30;
}
-u16 HTMcsToDataRate(struct rtllib_device *ieee, u8 nMcsRate)
+static u16 HTMcsToDataRate(struct rtllib_device *ieee, u8 nMcsRate)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
@@ -502,7 +502,8 @@ u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
return mcsRate | 0x80;
}
-u8 HTFilterMCSRate(struct rtllib_device *ieee, u8 *pSupportMCS, u8 *pOperateMCS)
+static u8 HTFilterMCSRate(struct rtllib_device *ieee, u8 *pSupportMCS,
+ u8 *pOperateMCS)
{
u8 i;
diff --git a/drivers/staging/rtl8192e/rtl819x_Qos.h b/drivers/staging/rtl8192e/rtl819x_Qos.h
index 3aa35ced2b8b..fcc8fabbebb7 100644
--- a/drivers/staging/rtl8192e/rtl819x_Qos.h
+++ b/drivers/staging/rtl8192e/rtl819x_Qos.h
@@ -96,11 +96,6 @@ struct octet_string {
u16 Length;
};
-enum ack_policy {
- eAckPlc0_ACK = 0x00,
- eAckPlc1_NoACK = 0x01,
-};
-
#define AC0_BE 0
#define AC1_BK 1
#define AC2_VI 2
diff --git a/drivers/staging/rtl8192e/rtl819x_TS.h b/drivers/staging/rtl8192e/rtl819x_TS.h
index b8fed556928c..a93348c37f17 100644
--- a/drivers/staging/rtl8192e/rtl819x_TS.h
+++ b/drivers/staging/rtl8192e/rtl819x_TS.h
@@ -19,8 +19,6 @@
#ifndef _TSTYPE_H_
#define _TSTYPE_H_
#include "rtl819x_Qos.h"
-#define TS_SETUP_TIMEOUT 60
-#define TS_INACT_TIMEOUT 60
#define TS_ADDBA_DELAY 60
#define TOTAL_TS_NUM 16
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index 05aea4321b9d..7087959443cb 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -113,7 +113,7 @@ static void TsAddBaProcess(unsigned long data)
static void ResetTsCommonInfo(struct ts_common_info *pTsCommonInfo)
{
- memset(pTsCommonInfo->Addr, 0, 6);
+ eth_zero_addr(pTsCommonInfo->Addr);
memset(&pTsCommonInfo->TSpec, 0, sizeof(union tspec_body));
memset(&pTsCommonInfo->TClass, 0, sizeof(union qos_tclas)*TCLAS_NUM);
pTsCommonInfo->TClasProc = 0;
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index fd38c6dd146b..563ac12f0b2c 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -84,9 +84,6 @@
#define iwe_stream_add_point_rsl(info, start, stop, iwe, p) \
iwe_stream_add_point(info, start, stop, iwe, p)
-#define usb_alloc_urb_rsl(x, y) usb_alloc_urb(x, y)
-#define usb_submit_urb_rsl(x, y) usb_submit_urb(x, y)
-
static inline void *netdev_priv_rsl(struct net_device *dev)
{
return netdev_priv(dev);
@@ -110,27 +107,14 @@ static inline void *netdev_priv_rsl(struct net_device *dev)
#define HIGH_QUEUE 7
#define BEACON_QUEUE 8
-#define LOW_QUEUE BE_QUEUE
-#define NORMAL_QUEUE MGNT_QUEUE
-
#ifndef IW_MODE_MESH
#define IW_MODE_MESH 7
#endif
-#define AMSDU_SUBHEADER_LEN 14
-#define SWRF_TIMEOUT 50
#define IE_CISCO_FLAG_POSITION 0x08
#define SUPPORT_CKIP_MIC 0x08
#define SUPPORT_CKIP_PK 0x10
-#define RT_RF_OFF_LEVL_ASPM BIT0
-#define RT_RF_OFF_LEVL_CLK_REQ BIT1
-#define RT_RF_OFF_LEVL_PCI_D3 BIT2
#define RT_RF_OFF_LEVL_HALT_NIC BIT3
-#define RT_RF_OFF_LEVL_FREE_FW BIT4
-#define RT_RF_OFF_LEVL_FW_32K BIT5
-#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT6
-#define RT_RF_LPS_DISALBE_2R BIT30
-#define RT_RF_LPS_LEVEL_ASPM BIT31
#define RT_IN_PS_LEVEL(pPSC, _PS_FLAG) \
((pPSC->CurPsLevel & _PS_FLAG) ? true : false)
#define RT_CLEAR_PS_LEVEL(pPSC, _PS_FLAG) \
@@ -244,22 +228,6 @@ struct sw_chnl_cmd {
#define MGN_MCS13 0x8d
#define MGN_MCS14 0x8e
#define MGN_MCS15 0x8f
-#define MGN_MCS0_SG 0x90
-#define MGN_MCS1_SG 0x91
-#define MGN_MCS2_SG 0x92
-#define MGN_MCS3_SG 0x93
-#define MGN_MCS4_SG 0x94
-#define MGN_MCS5_SG 0x95
-#define MGN_MCS6_SG 0x96
-#define MGN_MCS7_SG 0x97
-#define MGN_MCS8_SG 0x98
-#define MGN_MCS9_SG 0x99
-#define MGN_MCS10_SG 0x9a
-#define MGN_MCS11_SG 0x9b
-#define MGN_MCS12_SG 0x9c
-#define MGN_MCS13_SG 0x9d
-#define MGN_MCS14_SG 0x9e
-#define MGN_MCS15_SG 0x9f
enum hw_variables {
HW_VAR_ETHER_ADDR,
@@ -722,42 +690,13 @@ struct rtllib_frag_entry {
u8 dst_addr[ETH_ALEN];
};
-struct rtllib_stats {
- unsigned int tx_unicast_frames;
- unsigned int tx_multicast_frames;
- unsigned int tx_fragments;
- unsigned int tx_unicast_octets;
- unsigned int tx_multicast_octets;
- unsigned int tx_deferred_transmissions;
- unsigned int tx_single_retry_frames;
- unsigned int tx_multiple_retry_frames;
- unsigned int tx_retry_limit_exceeded;
- unsigned int tx_discards;
- unsigned int rx_unicast_frames;
- unsigned int rx_multicast_frames;
- unsigned int rx_fragments;
- unsigned int rx_unicast_octets;
- unsigned int rx_multicast_octets;
- unsigned int rx_fcs_errors;
- unsigned int rx_discards_no_buffer;
- unsigned int tx_discards_wrong_sa;
- unsigned int rx_discards_undecryptable;
- unsigned int rx_message_in_msg_fragments;
- unsigned int rx_message_in_bad_msg_fragments;
-};
-
struct rtllib_device;
-#define SEC_KEY_1 (1<<0)
-#define SEC_KEY_2 (1<<1)
-#define SEC_KEY_3 (1<<2)
-#define SEC_KEY_4 (1<<3)
#define SEC_ACTIVE_KEY (1<<4)
#define SEC_AUTH_MODE (1<<5)
#define SEC_UNICAST_GROUP (1<<6)
#define SEC_LEVEL (1<<7)
#define SEC_ENABLED (1<<8)
-#define SEC_ENCRYPT (1<<9)
#define SEC_LEVEL_0 0 /* None */
#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */
@@ -772,7 +711,6 @@ struct rtllib_device;
#define WEP_KEY_LEN 13
#define SCM_KEY_LEN 32
-#define SCM_TEMPORAL_KEY_LENGTH 16
struct rtllib_security {
u16 active_key:2,
@@ -1187,8 +1125,6 @@ enum {WMM_all_frame, WMM_two_frame, WMM_four_frame, WMM_six_frame};
#define WME_AC_BE 0x01
#define WME_AC_VI 0x02
#define WME_AC_VO 0x03
-#define WME_ACI_MASK 0x03
-#define WME_AIFSN_MASK 0x03
#define WME_AC_PRAM_LEN 16
#define MAX_RECEIVE_BUFFER_SIZE 9100
@@ -1204,12 +1140,6 @@ enum {WMM_all_frame, WMM_two_frame, WMM_four_frame, WMM_six_frame};
#define ETHERNET_HEADER_SIZE 14 /* length of two Ethernet address
* plus ether type*/
-struct ether_header {
- u8 ether_dhost[ETHER_ADDR_LEN];
- u8 ether_shost[ETHER_ADDR_LEN];
- u16 ether_type;
-} __packed;
-
enum erp_t {
ERP_NonERPpresent = 0x01,
ERP_UseProtection = 0x02,
@@ -1591,7 +1521,6 @@ struct rtllib_device {
/* Bookkeeping structures */
struct net_device_stats stats;
- struct rtllib_stats ieee_stats;
struct rtllib_softmac_stats softmac_stats;
/* Probe / Beacon management */
@@ -1673,7 +1602,6 @@ struct rtllib_device {
int short_slot;
int mode; /* A, B, G */
int modulation; /* CCK, OFDM */
- int freq_band; /* 2.4Ghz, 5.2Ghz, Mixed */
/* used for forcing the ibss workqueue to terminate
* without wait for the syncro scan to terminate
@@ -2056,244 +1984,199 @@ static inline int rtllib_is_cck_rate(u8 rate)
/* rtllib.c */
-extern void free_rtllib(struct net_device *dev);
-extern struct net_device *alloc_rtllib(int sizeof_priv);
+void free_rtllib(struct net_device *dev);
+struct net_device *alloc_rtllib(int sizeof_priv);
/* rtllib_tx.c */
-extern int rtllib_encrypt_fragment(
+int rtllib_encrypt_fragment(
struct rtllib_device *ieee,
struct sk_buff *frag,
int hdr_len);
-extern int rtllib_xmit(struct sk_buff *skb, struct net_device *dev);
-extern int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev);
-extern void rtllib_txb_free(struct rtllib_txb *);
+int rtllib_xmit(struct sk_buff *skb, struct net_device *dev);
+void rtllib_txb_free(struct rtllib_txb *);
/* rtllib_rx.c */
-extern int rtllib_rx(struct rtllib_device *ieee, struct sk_buff *skb,
- struct rtllib_rx_stats *rx_stats);
-extern void rtllib_rx_mgt(struct rtllib_device *ieee,
- struct sk_buff *skb,
- struct rtllib_rx_stats *stats);
-extern void rtllib_rx_probe_rq(struct rtllib_device *ieee,
- struct sk_buff *skb);
-extern int rtllib_legal_channel(struct rtllib_device *rtllib, u8 channel);
+int rtllib_rx(struct rtllib_device *ieee, struct sk_buff *skb,
+ struct rtllib_rx_stats *rx_stats);
+void rtllib_rx_probe_rq(struct rtllib_device *ieee,
+ struct sk_buff *skb);
+int rtllib_legal_channel(struct rtllib_device *rtllib, u8 channel);
/* rtllib_wx.c */
-extern int rtllib_wx_get_scan(struct rtllib_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-extern int rtllib_wx_set_encode(struct rtllib_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-extern int rtllib_wx_get_encode(struct rtllib_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-extern int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-extern int rtllib_wx_set_auth(struct rtllib_device *ieee,
- struct iw_request_info *info,
- struct iw_param *data, char *extra);
-extern int rtllib_wx_set_mlme(struct rtllib_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-extern int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len);
+int rtllib_wx_get_scan(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *key);
+int rtllib_wx_set_encode(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *key);
+int rtllib_wx_get_encode(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *key);
+int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+int rtllib_wx_set_auth(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ struct iw_param *data, char *extra);
+int rtllib_wx_set_mlme(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len);
/* rtllib_softmac.c */
-extern short rtllib_is_54g(struct rtllib_network *net);
-extern int rtllib_rx_frame_softmac(struct rtllib_device *ieee,
- struct sk_buff *skb,
- struct rtllib_rx_stats *rx_stats, u16 type,
- u16 stype);
-extern void rtllib_softmac_new_net(struct rtllib_device *ieee,
- struct rtllib_network *net);
+int rtllib_rx_frame_softmac(struct rtllib_device *ieee, struct sk_buff *skb,
+ struct rtllib_rx_stats *rx_stats, u16 type,
+ u16 stype);
+void rtllib_softmac_new_net(struct rtllib_device *ieee,
+ struct rtllib_network *net);
void SendDisassociation(struct rtllib_device *ieee, bool deauth, u16 asRsn);
-extern void rtllib_softmac_xmit(struct rtllib_txb *txb,
- struct rtllib_device *ieee);
-
-extern void rtllib_stop_send_beacons(struct rtllib_device *ieee);
-extern void notify_wx_assoc_event(struct rtllib_device *ieee);
-extern void rtllib_softmac_check_all_nets(struct rtllib_device *ieee);
-extern void rtllib_start_bss(struct rtllib_device *ieee);
-extern void rtllib_start_master_bss(struct rtllib_device *ieee);
-extern void rtllib_start_ibss(struct rtllib_device *ieee);
-extern void rtllib_softmac_init(struct rtllib_device *ieee);
-extern void rtllib_softmac_free(struct rtllib_device *ieee);
-extern void rtllib_associate_abort(struct rtllib_device *ieee);
-extern void rtllib_disassociate(struct rtllib_device *ieee);
-extern void rtllib_stop_scan(struct rtllib_device *ieee);
-extern bool rtllib_act_scanning(struct rtllib_device *ieee, bool sync_scan);
-extern void rtllib_stop_scan_syncro(struct rtllib_device *ieee);
-extern void rtllib_start_scan_syncro(struct rtllib_device *ieee, u8 is_mesh);
-extern void rtllib_sta_ps_send_null_frame(struct rtllib_device *ieee,
- short pwr);
-extern void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl);
-extern void rtllib_sta_ps_send_pspoll_frame(struct rtllib_device *ieee);
-extern void rtllib_start_protocol(struct rtllib_device *ieee);
-extern void rtllib_stop_protocol(struct rtllib_device *ieee, u8 shutdown);
-
-extern void rtllib_EnableNetMonitorMode(struct net_device *dev,
+void rtllib_softmac_xmit(struct rtllib_txb *txb, struct rtllib_device *ieee);
+
+void rtllib_stop_send_beacons(struct rtllib_device *ieee);
+void notify_wx_assoc_event(struct rtllib_device *ieee);
+void rtllib_start_ibss(struct rtllib_device *ieee);
+void rtllib_softmac_init(struct rtllib_device *ieee);
+void rtllib_softmac_free(struct rtllib_device *ieee);
+void rtllib_disassociate(struct rtllib_device *ieee);
+void rtllib_stop_scan(struct rtllib_device *ieee);
+bool rtllib_act_scanning(struct rtllib_device *ieee, bool sync_scan);
+void rtllib_stop_scan_syncro(struct rtllib_device *ieee);
+void rtllib_start_scan_syncro(struct rtllib_device *ieee, u8 is_mesh);
+void rtllib_sta_ps_send_null_frame(struct rtllib_device *ieee, short pwr);
+void rtllib_sta_ps_send_pspoll_frame(struct rtllib_device *ieee);
+void rtllib_start_protocol(struct rtllib_device *ieee);
+void rtllib_stop_protocol(struct rtllib_device *ieee, u8 shutdown);
+
+void rtllib_EnableNetMonitorMode(struct net_device *dev, bool bInitState);
+void rtllib_DisableNetMonitorMode(struct net_device *dev, bool bInitState);
+void rtllib_EnableIntelPromiscuousMode(struct net_device *dev, bool bInitState);
+void rtllib_DisableIntelPromiscuousMode(struct net_device *dev,
bool bInitState);
-extern void rtllib_DisableNetMonitorMode(struct net_device *dev,
- bool bInitState);
-extern void rtllib_EnableIntelPromiscuousMode(struct net_device *dev,
- bool bInitState);
-extern void rtllib_DisableIntelPromiscuousMode(struct net_device *dev,
- bool bInitState);
-extern void rtllib_send_probe_requests(struct rtllib_device *ieee, u8 is_mesh);
-
-extern void rtllib_softmac_stop_protocol(struct rtllib_device *ieee,
- u8 mesh_flag, u8 shutdown);
-extern void rtllib_softmac_start_protocol(struct rtllib_device *ieee,
- u8 mesh_flag);
-
-extern void rtllib_reset_queue(struct rtllib_device *ieee);
-extern void rtllib_wake_all_queues(struct rtllib_device *ieee);
-extern void rtllib_stop_all_queues(struct rtllib_device *ieee);
-extern struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee);
-extern void rtllib_start_send_beacons(struct rtllib_device *ieee);
-extern void rtllib_stop_send_beacons(struct rtllib_device *ieee);
-extern int rtllib_wpa_supplicant_ioctl(struct rtllib_device *ieee,
- struct iw_point *p, u8 is_mesh);
-
-extern void notify_wx_assoc_event(struct rtllib_device *ieee);
-extern void rtllib_ps_tx_ack(struct rtllib_device *ieee, short success);
-
-extern void softmac_mgmt_xmit(struct sk_buff *skb,
- struct rtllib_device *ieee);
-extern u16 rtllib_query_seqnum(struct rtllib_device *ieee,
- struct sk_buff *skb, u8 *dst);
-extern u8 rtllib_ap_sec_type(struct rtllib_device *ieee);
+void rtllib_softmac_stop_protocol(struct rtllib_device *ieee,
+ u8 mesh_flag, u8 shutdown);
+void rtllib_softmac_start_protocol(struct rtllib_device *ieee, u8 mesh_flag);
+
+void rtllib_reset_queue(struct rtllib_device *ieee);
+void rtllib_wake_all_queues(struct rtllib_device *ieee);
+void rtllib_stop_all_queues(struct rtllib_device *ieee);
+struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee);
+void rtllib_start_send_beacons(struct rtllib_device *ieee);
+void rtllib_stop_send_beacons(struct rtllib_device *ieee);
+int rtllib_wpa_supplicant_ioctl(struct rtllib_device *ieee,
+ struct iw_point *p, u8 is_mesh);
+
+void notify_wx_assoc_event(struct rtllib_device *ieee);
+void rtllib_ps_tx_ack(struct rtllib_device *ieee, short success);
+
+void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee);
+u8 rtllib_ap_sec_type(struct rtllib_device *ieee);
/* rtllib_softmac_wx.c */
-extern int rtllib_wx_get_wap(struct rtllib_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *ext);
+int rtllib_wx_get_wap(struct rtllib_device *ieee, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *ext);
-extern int rtllib_wx_set_wap(struct rtllib_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *awrq,
- char *extra);
+int rtllib_wx_set_wap(struct rtllib_device *ieee, struct iw_request_info *info,
+ union iwreq_data *awrq, char *extra);
-extern int rtllib_wx_get_essid(struct rtllib_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
+int rtllib_wx_get_essid(struct rtllib_device *ieee, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
-extern int rtllib_wx_set_rate(struct rtllib_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+int rtllib_wx_set_rate(struct rtllib_device *ieee, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
-extern int rtllib_wx_get_rate(struct rtllib_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+int rtllib_wx_get_rate(struct rtllib_device *ieee, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
-extern int rtllib_wx_set_mode(struct rtllib_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
+int rtllib_wx_set_mode(struct rtllib_device *ieee, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
-extern int rtllib_wx_set_scan(struct rtllib_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
+int rtllib_wx_set_scan(struct rtllib_device *ieee, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
-extern int rtllib_wx_set_essid(struct rtllib_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra);
+int rtllib_wx_set_essid(struct rtllib_device *ieee, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra);
-extern int rtllib_wx_get_mode(struct rtllib_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
+int rtllib_wx_get_mode(struct rtllib_device *ieee, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
-extern int rtllib_wx_set_freq(struct rtllib_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
+int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
-extern int rtllib_wx_get_freq(struct rtllib_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
-extern void rtllib_wx_sync_scan_wq(void *data);
+int rtllib_wx_get_freq(struct rtllib_device *ieee, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
+void rtllib_wx_sync_scan_wq(void *data);
-extern int rtllib_wx_set_rawtx(struct rtllib_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+int rtllib_wx_set_rawtx(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
-extern int rtllib_wx_get_name(struct rtllib_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+int rtllib_wx_get_name(struct rtllib_device *ieee, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
-extern int rtllib_wx_set_power(struct rtllib_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+int rtllib_wx_set_power(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
-extern int rtllib_wx_get_power(struct rtllib_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+int rtllib_wx_get_power(struct rtllib_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
-extern int rtllib_wx_set_rts(struct rtllib_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+int rtllib_wx_set_rts(struct rtllib_device *ieee, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
-extern int rtllib_wx_get_rts(struct rtllib_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+int rtllib_wx_get_rts(struct rtllib_device *ieee, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
#define MAX_RECEIVE_BUFFER_SIZE 9100
void HTSetConnectBwMode(struct rtllib_device *ieee,
enum ht_channel_width Bandwidth,
enum ht_extchnl_offset Offset);
-extern void HTUpdateDefaultSetting(struct rtllib_device *ieee);
-extern void HTConstructCapabilityElement(struct rtllib_device *ieee,
- u8 *posHTCap, u8 *len,
- u8 isEncrypt, bool bAssoc);
-extern void HTConstructInfoElement(struct rtllib_device *ieee,
- u8 *posHTInfo, u8 *len, u8 isEncrypt);
-extern void HTConstructRT2RTAggElement(struct rtllib_device *ieee,
- u8 *posRT2RTAgg, u8 *len);
-extern void HTOnAssocRsp(struct rtllib_device *ieee);
-extern void HTInitializeHTInfo(struct rtllib_device *ieee);
-extern void HTInitializeBssDesc(struct bss_ht *pBssHT);
-extern void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
- struct rtllib_network *pNetwork);
-extern void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
- struct rtllib_network *pNetwork);
-extern u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
- u8 *pMCSFilter);
+void HTUpdateDefaultSetting(struct rtllib_device *ieee);
+void HTConstructCapabilityElement(struct rtllib_device *ieee,
+ u8 *posHTCap, u8 *len,
+ u8 isEncrypt, bool bAssoc);
+void HTConstructInfoElement(struct rtllib_device *ieee,
+ u8 *posHTInfo, u8 *len, u8 isEncrypt);
+void HTConstructRT2RTAggElement(struct rtllib_device *ieee,
+ u8 *posRT2RTAgg, u8 *len);
+void HTOnAssocRsp(struct rtllib_device *ieee);
+void HTInitializeHTInfo(struct rtllib_device *ieee);
+void HTInitializeBssDesc(struct bss_ht *pBssHT);
+void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
+ struct rtllib_network *pNetwork);
+void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
+ struct rtllib_network *pNetwork);
+u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
+ u8 *pMCSFilter);
extern u8 MCS_FILTER_ALL[];
extern u16 MCS_DATA_RATE[2][2][77];
-extern u8 HTCCheck(struct rtllib_device *ieee, u8 *pFrame);
-extern void HTResetIOTSetting(struct rt_hi_throughput *pHTInfo);
-extern bool IsHTHalfNmodeAPs(struct rtllib_device *ieee);
-extern u16 HTMcsToDataRate(struct rtllib_device *ieee, u8 nMcsRate);
-extern u16 TxCountToDataRate(struct rtllib_device *ieee, u8 nDataRate);
-extern int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb);
-extern int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb);
-extern int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb);
-extern void TsInitAddBA(struct rtllib_device *ieee, struct tx_ts_record *pTS,
- u8 Policy, u8 bOverwritePending);
-extern void TsInitDelBA(struct rtllib_device *ieee,
- struct ts_common_info *pTsCommonInfo,
- enum tr_select TxRxSelect);
-extern void BaSetupTimeOut(unsigned long data);
-extern void TxBaInactTimeout(unsigned long data);
-extern void RxBaInactTimeout(unsigned long data);
-extern void ResetBaEntry(struct ba_record *pBA);
-extern bool GetTs(
- struct rtllib_device *ieee,
- struct ts_common_info **ppTS,
- u8 *Addr,
- u8 TID,
- enum tr_select TxRxSelect,
- bool bAddNewTs
-);
-extern void TSInitialize(struct rtllib_device *ieee);
-extern void TsStartAddBaProcess(struct rtllib_device *ieee,
- struct tx_ts_record *pTxTS);
-extern void RemovePeerTS(struct rtllib_device *ieee, u8 *Addr);
-extern void RemoveAllTS(struct rtllib_device *ieee);
-void rtllib_softmac_scan_syncro(struct rtllib_device *ieee, u8 is_mesh);
+u8 HTCCheck(struct rtllib_device *ieee, u8 *pFrame);
+void HTResetIOTSetting(struct rt_hi_throughput *pHTInfo);
+bool IsHTHalfNmodeAPs(struct rtllib_device *ieee);
+u16 TxCountToDataRate(struct rtllib_device *ieee, u8 nDataRate);
+int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb);
+int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb);
+int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb);
+void TsInitAddBA(struct rtllib_device *ieee, struct tx_ts_record *pTS,
+ u8 Policy, u8 bOverwritePending);
+void TsInitDelBA(struct rtllib_device *ieee,
+ struct ts_common_info *pTsCommonInfo,
+ enum tr_select TxRxSelect);
+void BaSetupTimeOut(unsigned long data);
+void TxBaInactTimeout(unsigned long data);
+void RxBaInactTimeout(unsigned long data);
+void ResetBaEntry(struct ba_record *pBA);
+bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS, u8 *Addr,
+ u8 TID, enum tr_select TxRxSelect, bool bAddNewTs);
+void TSInitialize(struct rtllib_device *ieee);
+void TsStartAddBaProcess(struct rtllib_device *ieee,
+ struct tx_ts_record *pTxTS);
+void RemovePeerTS(struct rtllib_device *ieee, u8 *Addr);
+void RemoveAllTS(struct rtllib_device *ieee);
extern const long rtllib_wlan_frequencies[];
@@ -2317,23 +2200,19 @@ bool rtllib_MgntDisconnect(struct rtllib_device *rtllib, u8 asRsn);
/* For the function is more related to hardware setting, it's better to use the
* ieee handler to refer to it.
*/
-extern void rtllib_update_active_chan_map(struct rtllib_device *ieee);
-extern void rtllib_FlushRxTsPendingPkts(struct rtllib_device *ieee,
- struct rx_ts_record *pTS);
-extern int rtllib_parse_info_param(struct rtllib_device *ieee,
- struct rtllib_info_element *info_element,
- u16 length,
- struct rtllib_network *network,
- struct rtllib_rx_stats *stats);
+void rtllib_FlushRxTsPendingPkts(struct rtllib_device *ieee,
+ struct rx_ts_record *pTS);
+int rtllib_parse_info_param(struct rtllib_device *ieee,
+ struct rtllib_info_element *info_element,
+ u16 length,
+ struct rtllib_network *network,
+ struct rtllib_rx_stats *stats);
void rtllib_indicate_packets(struct rtllib_device *ieee,
struct rtllib_rxb **prxbIndicateArray, u8 index);
-extern u8 HTFilterMCSRate(struct rtllib_device *ieee, u8 *pSupportMCS,
- u8 *pOperateMCS);
-extern void HTUseDefaultSetting(struct rtllib_device *ieee);
+void HTUseDefaultSetting(struct rtllib_device *ieee);
#define RT_ASOC_RETRY_LIMIT 5
u8 MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee);
-extern void rtllib_TURBO_Info(struct rtllib_device *ieee, u8 **tag_p);
#define SEM_DOWN_IEEE_WX(psem) down(psem)
#define SEM_UP_IEEE_WX(psem) up(psem)
diff --git a/drivers/staging/rtl8192e/rtllib_debug.h b/drivers/staging/rtl8192e/rtllib_debug.h
index 42e88d69ae63..17c276d71058 100644
--- a/drivers/staging/rtl8192e/rtllib_debug.h
+++ b/drivers/staging/rtl8192e/rtllib_debug.h
@@ -30,8 +30,6 @@
#define DRV_NAME "rtllib_92e"
#endif
-#define DMESG(x, a...)
-
extern u32 rt_global_debug_component;
/* These are the defines for rt_global_debug_component */
@@ -40,10 +38,7 @@ enum RTL_DEBUG {
COMP_DBG = (1 << 1),
COMP_INIT = (1 << 2),
COMP_RECV = (1 << 3),
- COMP_SEND = (1 << 4),
- COMP_CMD = (1 << 5),
COMP_POWER = (1 << 6),
- COMP_EPROM = (1 << 7),
COMP_SWBW = (1 << 8),
COMP_SEC = (1 << 9),
COMP_LPS = (1 << 10),
@@ -58,15 +53,12 @@ enum RTL_DEBUG {
COMP_CH = (1 << 19),
COMP_RF = (1 << 20),
COMP_FIRMWARE = (1 << 21),
- COMP_HT = (1 << 22),
COMP_RESET = (1 << 23),
COMP_CMDPKT = (1 << 24),
COMP_SCAN = (1 << 25),
COMP_PS = (1 << 26),
COMP_DOWN = (1 << 27),
COMP_INTR = (1 << 28),
- COMP_LED = (1 << 29),
- COMP_MLME = (1 << 30),
COMP_ERR = (1 << 31)
};
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index da862c3da4ce..09f0820fb340 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -44,6 +44,9 @@
#include "rtllib.h"
#include "dot11d.h"
+static void rtllib_rx_mgt(struct rtllib_device *ieee, struct sk_buff *skb,
+ struct rtllib_rx_stats *stats);
+
static inline void rtllib_monitor_rx(struct rtllib_device *ieee,
struct sk_buff *skb,
struct rtllib_rx_stats *rx_status,
@@ -317,7 +320,6 @@ rtllib_rx_frame_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
netdev_dbg(ieee->dev,
"Decryption failed ICV mismatch (key %d)\n",
skb->data[hdrlen + 3] >> 6);
- ieee->ieee_stats.rx_discards_undecryptable++;
return -1;
}
@@ -1077,7 +1079,6 @@ static int rtllib_rx_get_crypt(struct rtllib_device *ieee, struct sk_buff *skb,
netdev_dbg(ieee->dev,
"Decryption failed (not set) (SA= %pM)\n",
hdr->addr2);
- ieee->ieee_stats.rx_discards_undecryptable++;
return -1;
}
}
@@ -1743,37 +1744,61 @@ static int rtllib_parse_qos_info_param_IE(struct rtllib_device *ieee,
return rc;
}
-#define MFIE_STRING(x) case MFIE_TYPE_ ##x: return #x
-
static const char *get_info_element_string(u16 id)
{
switch (id) {
- MFIE_STRING(SSID);
- MFIE_STRING(RATES);
- MFIE_STRING(FH_SET);
- MFIE_STRING(DS_SET);
- MFIE_STRING(CF_SET);
- MFIE_STRING(TIM);
- MFIE_STRING(IBSS_SET);
- MFIE_STRING(COUNTRY);
- MFIE_STRING(HOP_PARAMS);
- MFIE_STRING(HOP_TABLE);
- MFIE_STRING(REQUEST);
- MFIE_STRING(CHALLENGE);
- MFIE_STRING(POWER_CONSTRAINT);
- MFIE_STRING(POWER_CAPABILITY);
- MFIE_STRING(TPC_REQUEST);
- MFIE_STRING(TPC_REPORT);
- MFIE_STRING(SUPP_CHANNELS);
- MFIE_STRING(CSA);
- MFIE_STRING(MEASURE_REQUEST);
- MFIE_STRING(MEASURE_REPORT);
- MFIE_STRING(QUIET);
- MFIE_STRING(IBSS_DFS);
- MFIE_STRING(RSN);
- MFIE_STRING(RATES_EX);
- MFIE_STRING(GENERIC);
- MFIE_STRING(QOS_PARAMETER);
+ case MFIE_TYPE_SSID:
+ return "SSID";
+ case MFIE_TYPE_RATES:
+ return "RATES";
+ case MFIE_TYPE_FH_SET:
+ return "FH_SET";
+ case MFIE_TYPE_DS_SET:
+ return "DS_SET";
+ case MFIE_TYPE_CF_SET:
+ return "CF_SET";
+ case MFIE_TYPE_TIM:
+ return "TIM";
+ case MFIE_TYPE_IBSS_SET:
+ return "IBSS_SET";
+ case MFIE_TYPE_COUNTRY:
+ return "COUNTRY";
+ case MFIE_TYPE_HOP_PARAMS:
+ return "HOP_PARAMS";
+ case MFIE_TYPE_HOP_TABLE:
+ return "HOP_TABLE";
+ case MFIE_TYPE_REQUEST:
+ return "REQUEST";
+ case MFIE_TYPE_CHALLENGE:
+ return "CHALLENGE";
+ case MFIE_TYPE_POWER_CONSTRAINT:
+ return "POWER_CONSTRAINT";
+ case MFIE_TYPE_POWER_CAPABILITY:
+ return "POWER_CAPABILITY";
+ case MFIE_TYPE_TPC_REQUEST:
+ return "TPC_REQUEST";
+ case MFIE_TYPE_TPC_REPORT:
+ return "TPC_REPORT";
+ case MFIE_TYPE_SUPP_CHANNELS:
+ return "SUPP_CHANNELS";
+ case MFIE_TYPE_CSA:
+ return "CSA";
+ case MFIE_TYPE_MEASURE_REQUEST:
+ return "MEASURE_REQUEST";
+ case MFIE_TYPE_MEASURE_REPORT:
+ return "MEASURE_REPORT";
+ case MFIE_TYPE_QUIET:
+ return "QUIET";
+ case MFIE_TYPE_IBSS_DFS:
+ return "IBSS_DFS";
+ case MFIE_TYPE_RSN:
+ return "RSN";
+ case MFIE_TYPE_RATES_EX:
+ return "RATES_EX";
+ case MFIE_TYPE_GENERIC:
+ return "GENERIC";
+ case MFIE_TYPE_QOS_PARAMETER:
+ return "QOS_PARAMETER";
default:
return "UNKNOWN";
}
@@ -2717,9 +2742,9 @@ free_network:
kfree(network);
}
-void rtllib_rx_mgt(struct rtllib_device *ieee,
- struct sk_buff *skb,
- struct rtllib_rx_stats *stats)
+static void rtllib_rx_mgt(struct rtllib_device *ieee,
+ struct sk_buff *skb,
+ struct rtllib_rx_stats *stats)
{
struct rtllib_hdr_4addr *header = (struct rtllib_hdr_4addr *)skb->data;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index d320c31732f2..1503cbb3574e 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -23,7 +23,10 @@
#include <linux/ieee80211.h>
#include "dot11d.h"
-short rtllib_is_54g(struct rtllib_network *net)
+static void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl);
+
+
+static short rtllib_is_54g(struct rtllib_network *net)
{
return (net->rates_ex_len > 0) || (net->rates_len > 4);
}
@@ -107,7 +110,7 @@ static void rtllib_WMM_Info(struct rtllib_device *ieee, u8 **tag_p)
*tag_p = tag;
}
-void rtllib_TURBO_Info(struct rtllib_device *ieee, u8 **tag_p)
+static void rtllib_TURBO_Info(struct rtllib_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
@@ -369,7 +372,7 @@ static inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee)
return skb;
}
-struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee);
+static struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee);
static void rtllib_send_beacon(struct rtllib_device *ieee)
{
@@ -483,7 +486,7 @@ static void rtllib_send_probe(struct rtllib_device *ieee, u8 is_mesh)
}
-void rtllib_send_probe_requests(struct rtllib_device *ieee, u8 is_mesh)
+static void rtllib_send_probe_requests(struct rtllib_device *ieee, u8 is_mesh)
{
if (ieee->active_scan && (ieee->softmac_features &
IEEE_SOFTMAC_PROBERQ)) {
@@ -492,7 +495,7 @@ void rtllib_send_probe_requests(struct rtllib_device *ieee, u8 is_mesh)
}
}
-void rtllib_update_active_chan_map(struct rtllib_device *ieee)
+static void rtllib_update_active_chan_map(struct rtllib_device *ieee)
{
memcpy(ieee->active_channel_map, GET_DOT11D_INFO(ieee)->channel_map,
MAX_CHANNEL_NUMBER+1);
@@ -501,7 +504,7 @@ void rtllib_update_active_chan_map(struct rtllib_device *ieee)
/* this performs syncro scan blocking the caller until all channels
* in the allowed channel map has been checked.
*/
-void rtllib_softmac_scan_syncro(struct rtllib_device *ieee, u8 is_mesh)
+static void rtllib_softmac_scan_syncro(struct rtllib_device *ieee, u8 is_mesh)
{
union iwreq_data wrqu;
short ch = 0;
@@ -1401,7 +1404,7 @@ inline struct sk_buff *rtllib_association_req(struct rtllib_network *beacon,
return skb;
}
-void rtllib_associate_abort(struct rtllib_device *ieee)
+static void rtllib_associate_abort(struct rtllib_device *ieee)
{
unsigned long flags;
@@ -1511,7 +1514,6 @@ static void rtllib_associate_step2(struct rtllib_device *ieee)
}
}
-#define CANCELLED 2
static void rtllib_associate_complete_wq(void *data)
{
struct rtllib_device *ieee = (struct rtllib_device *)
@@ -1753,7 +1755,7 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee,
}
}
-void rtllib_softmac_check_all_nets(struct rtllib_device *ieee)
+static void rtllib_softmac_check_all_nets(struct rtllib_device *ieee)
{
unsigned long flags;
struct rtllib_network *target;
@@ -2109,7 +2111,7 @@ out:
}
-void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl)
+static void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl)
{
if (ieee->sta_sleep == LPS_IS_WAKE) {
if (nl) {
@@ -2545,7 +2547,7 @@ inline void rtllib_randomize_cell(struct rtllib_device *ieee)
}
/* called in user context only */
-void rtllib_start_master_bss(struct rtllib_device *ieee)
+static void rtllib_start_master_bss(struct rtllib_device *ieee)
{
ieee->assoc_id = 1;
@@ -2720,7 +2722,7 @@ inline void rtllib_start_ibss(struct rtllib_device *ieee)
}
/* this is called only in user context, with wx_sem held */
-void rtllib_start_bss(struct rtllib_device *ieee)
+static void rtllib_start_bss(struct rtllib_device *ieee)
{
unsigned long flags;
@@ -2817,7 +2819,7 @@ exit:
up(&ieee->wx_sem);
}
-struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee)
+static struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee)
{
const u8 broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
@@ -3084,7 +3086,7 @@ static int rtllib_wpa_enable(struct rtllib_device *ieee, int value)
*/
netdev_info(ieee->dev, "%s WPA\n", value ? "enabling" : "disabling");
ieee->wpa_enabled = value;
- memset(ieee->ap_mac_addr, 0, 6);
+ eth_zero_addr(ieee->ap_mac_addr);
return 0;
}
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index e99ea5e67ef9..b992e4612fd8 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -151,7 +151,7 @@
static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
-inline int rtllib_put_snap(u8 *data, u16 h_proto)
+static int rtllib_put_snap(u8 *data, u16 h_proto)
{
struct rtllib_snap_hdr *snap;
u8 *oui;
@@ -205,7 +205,6 @@ int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag,
if (res < 0) {
netdev_info(ieee->dev, "%s: Encryption failed: len=%d.\n",
ieee->dev->name, frag->len);
- ieee->ieee_stats.tx_discards++;
return -1;
}
@@ -515,8 +514,8 @@ static void rtllib_txrate_selectmode(struct rtllib_device *ieee,
}
}
-u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
- u8 *dst)
+static u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
+ u8 *dst)
{
u16 seqnum = 0;
@@ -566,7 +565,7 @@ static u8 rtllib_current_rate(struct rtllib_device *ieee)
return ieee->rate & 0x7F;
}
-int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
+static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
{
struct rtllib_device *ieee = (struct rtllib_device *)
netdev_priv_rsl(dev);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index 23af2aad458e..d481a26c25ae 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -2169,98 +2169,99 @@ static inline int ieee80211_is_cck_rate(u8 rate)
/* ieee80211.c */
-extern void free_ieee80211(struct net_device *dev);
-extern struct net_device *alloc_ieee80211(int sizeof_priv);
+void free_ieee80211(struct net_device *dev);
+struct net_device *alloc_ieee80211(int sizeof_priv);
-extern int ieee80211_set_encryption(struct ieee80211_device *ieee);
+int ieee80211_set_encryption(struct ieee80211_device *ieee);
/* ieee80211_tx.c */
-extern int ieee80211_encrypt_fragment(
- struct ieee80211_device *ieee,
- struct sk_buff *frag,
- int hdr_len);
+int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
+ struct sk_buff *frag, int hdr_len);
-extern int ieee80211_xmit(struct sk_buff *skb,
- struct net_device *dev);
-extern void ieee80211_txb_free(struct ieee80211_txb *);
+int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev);
+void ieee80211_txb_free(struct ieee80211_txb *);
/* ieee80211_rx.c */
-extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats);
-extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
- struct rtl_80211_hdr_4addr *header,
- struct ieee80211_rx_stats *stats);
+int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
+ struct ieee80211_rx_stats *rx_stats);
+void ieee80211_rx_mgt(struct ieee80211_device *ieee,
+ struct rtl_80211_hdr_4addr *header,
+ struct ieee80211_rx_stats *stats);
/* ieee80211_wx.c */
-extern int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-extern int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-extern int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-extern int ieee80211_wx_get_encode_ext(struct ieee80211_device *ieee,
+int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *key);
+int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-extern int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
+ union iwreq_data *wrqu, char *key);
+int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-extern int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- struct iw_param *data, char *extra);
-extern int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-extern int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len);
+ union iwreq_data *wrqu, char *key);
+int ieee80211_wx_get_encode_ext(struct ieee80211_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
+ struct iw_request_info *info,
+ struct iw_param *data, char *extra);
+int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len);
/* ieee80211_softmac.c */
-extern short ieee80211_is_54g(const struct ieee80211_network *net);
-extern short ieee80211_is_shortslot(const struct ieee80211_network *net);
-extern int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats, u16 type,
- u16 stype);
-extern void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee80211_network *net);
+short ieee80211_is_54g(const struct ieee80211_network *net);
+short ieee80211_is_shortslot(const struct ieee80211_network *net);
+int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee,
+ struct sk_buff *skb,
+ struct ieee80211_rx_stats *rx_stats,
+ u16 type, u16 stype);
+void ieee80211_softmac_new_net(struct ieee80211_device *ieee,
+ struct ieee80211_network *net);
void SendDisassociation(struct ieee80211_device *ieee, u8 *asSta, u8 asRsn);
-extern void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *ieee);
-
-extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
-extern void notify_wx_assoc_event(struct ieee80211_device *ieee);
-extern void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee);
-extern void ieee80211_start_bss(struct ieee80211_device *ieee);
-extern void ieee80211_start_master_bss(struct ieee80211_device *ieee);
-extern void ieee80211_start_ibss(struct ieee80211_device *ieee);
-extern void ieee80211_softmac_init(struct ieee80211_device *ieee);
-extern void ieee80211_softmac_free(struct ieee80211_device *ieee);
-extern void ieee80211_associate_abort(struct ieee80211_device *ieee);
-extern void ieee80211_disassociate(struct ieee80211_device *ieee);
-extern void ieee80211_stop_scan(struct ieee80211_device *ieee);
-extern void ieee80211_start_scan_syncro(struct ieee80211_device *ieee);
-extern void ieee80211_check_all_nets(struct ieee80211_device *ieee);
-extern void ieee80211_start_protocol(struct ieee80211_device *ieee);
-extern void ieee80211_stop_protocol(struct ieee80211_device *ieee);
-extern void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee);
-extern void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee);
-extern void ieee80211_reset_queue(struct ieee80211_device *ieee);
-extern void ieee80211_wake_queue(struct ieee80211_device *ieee);
-extern void ieee80211_stop_queue(struct ieee80211_device *ieee);
-extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
-extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
-extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
-extern int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_point *p);
-extern void notify_wx_assoc_event(struct ieee80211_device *ieee);
-extern void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success);
-
-extern void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee);
+void ieee80211_softmac_xmit(struct ieee80211_txb *txb,
+ struct ieee80211_device *ieee);
+
+void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
+void notify_wx_assoc_event(struct ieee80211_device *ieee);
+void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee);
+void ieee80211_start_bss(struct ieee80211_device *ieee);
+void ieee80211_start_master_bss(struct ieee80211_device *ieee);
+void ieee80211_start_ibss(struct ieee80211_device *ieee);
+void ieee80211_softmac_init(struct ieee80211_device *ieee);
+void ieee80211_softmac_free(struct ieee80211_device *ieee);
+void ieee80211_associate_abort(struct ieee80211_device *ieee);
+void ieee80211_disassociate(struct ieee80211_device *ieee);
+void ieee80211_stop_scan(struct ieee80211_device *ieee);
+void ieee80211_start_scan_syncro(struct ieee80211_device *ieee);
+void ieee80211_check_all_nets(struct ieee80211_device *ieee);
+void ieee80211_start_protocol(struct ieee80211_device *ieee);
+void ieee80211_stop_protocol(struct ieee80211_device *ieee);
+void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee);
+void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee);
+void ieee80211_reset_queue(struct ieee80211_device *ieee);
+void ieee80211_wake_queue(struct ieee80211_device *ieee);
+void ieee80211_stop_queue(struct ieee80211_device *ieee);
+struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
+void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
+void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
+int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee,
+ struct iw_point *p);
+void notify_wx_assoc_event(struct ieee80211_device *ieee);
+void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success);
+
+void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee);
/* ieee80211_crypt_ccmp&tkip&wep.c */
-extern void ieee80211_tkip_null(void);
-extern void ieee80211_wep_null(void);
-extern void ieee80211_ccmp_null(void);
+void ieee80211_tkip_null(void);
+void ieee80211_wep_null(void);
+void ieee80211_ccmp_null(void);
int ieee80211_crypto_init(void);
void ieee80211_crypto_deinit(void);
@@ -2273,116 +2274,128 @@ void ieee80211_crypto_wep_exit(void);
/* ieee80211_softmac_wx.c */
-extern int ieee80211_wx_get_wap(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *ext);
+int ieee80211_wx_get_wap(struct ieee80211_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *ext);
-extern int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
+int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *awrq,
char *extra);
-extern int ieee80211_wx_get_essid(struct ieee80211_device *ieee, struct iw_request_info *a,union iwreq_data *wrqu,char *b);
+int ieee80211_wx_get_essid(struct ieee80211_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
-extern int ieee80211_wx_set_rate(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_set_rate(struct ieee80211_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
-extern int ieee80211_wx_get_rate(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_get_rate(struct ieee80211_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
-extern int ieee80211_wx_set_mode(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
+int ieee80211_wx_set_mode(struct ieee80211_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
-extern int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
+int ieee80211_wx_set_scan(struct ieee80211_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
-extern int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra);
-extern int ieee80211_wx_get_mode(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
+int ieee80211_wx_get_mode(struct ieee80211_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
-extern int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
+int ieee80211_wx_set_freq(struct ieee80211_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
-extern int ieee80211_wx_get_freq(struct ieee80211_device *ieee, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b);
+int ieee80211_wx_get_freq(struct ieee80211_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
/* ieee80211_module.c */
-extern int ieee80211_debug_init(void);
-extern void ieee80211_debug_exit(void);
+int ieee80211_debug_init(void);
+void ieee80211_debug_exit(void);
//extern void ieee80211_wx_sync_scan_wq(struct ieee80211_device *ieee);
-extern void ieee80211_wx_sync_scan_wq(struct work_struct *work);
+void ieee80211_wx_sync_scan_wq(struct work_struct *work);
-extern int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
- struct iw_request_info *info,
+int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
+ struct iw_request_info *info,
union iwreq_data *wrqu, char *extra);
-extern int ieee80211_wx_get_name(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_get_name(struct ieee80211_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
-extern int ieee80211_wx_set_power(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_set_power(struct ieee80211_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
-extern int ieee80211_wx_get_power(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_get_power(struct ieee80211_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
-extern int ieee80211_wx_set_rts(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_set_rts(struct ieee80211_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
-extern int ieee80211_wx_get_rts(struct ieee80211_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_get_rts(struct ieee80211_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
//HT
#define MAX_RECEIVE_BUFFER_SIZE 9100 //
-extern void HTDebugHTCapability(u8 *CapIE, u8 *TitleString );
-extern void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString);
-
-void HTSetConnectBwMode(struct ieee80211_device *ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
-extern void HTUpdateDefaultSetting(struct ieee80211_device *ieee);
-extern void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u8 *len, u8 isEncrypt);
-extern void HTConstructInfoElement(struct ieee80211_device *ieee, u8 *posHTInfo, u8 *len, u8 isEncrypt);
-extern void HTConstructRT2RTAggElement(struct ieee80211_device *ieee, u8 *posRT2RTAgg, u8 *len);
-extern void HTOnAssocRsp(struct ieee80211_device *ieee);
-extern void HTInitializeHTInfo(struct ieee80211_device *ieee);
-extern void HTInitializeBssDesc(PBSS_HT pBssHT);
-extern void HTResetSelfAndSavePeerSetting(struct ieee80211_device *ieee, struct ieee80211_network *pNetwork);
-extern void HTUpdateSelfAndPeerSetting(struct ieee80211_device *ieee, struct ieee80211_network *pNetwork);
-extern u8 HTGetHighestMCSRate(struct ieee80211_device *ieee, u8 *pMCSRateSet, u8 *pMCSFilter);
+void HTDebugHTCapability(u8 *CapIE, u8 *TitleString);
+void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString);
+
+void HTSetConnectBwMode(struct ieee80211_device *ieee,
+ HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
+void HTUpdateDefaultSetting(struct ieee80211_device *ieee);
+void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap,
+ u8 *len, u8 isEncrypt);
+void HTConstructInfoElement(struct ieee80211_device *ieee, u8 *posHTInfo,
+ u8 *len, u8 isEncrypt);
+void HTConstructRT2RTAggElement(struct ieee80211_device *ieee, u8 *posRT2RTAgg,
+ u8 *len);
+void HTOnAssocRsp(struct ieee80211_device *ieee);
+void HTInitializeHTInfo(struct ieee80211_device *ieee);
+void HTInitializeBssDesc(PBSS_HT pBssHT);
+void HTResetSelfAndSavePeerSetting(struct ieee80211_device *ieee,
+ struct ieee80211_network *pNetwork);
+void HTUpdateSelfAndPeerSetting(struct ieee80211_device *ieee,
+ struct ieee80211_network *pNetwork);
+u8 HTGetHighestMCSRate(struct ieee80211_device *ieee,
+ u8 *pMCSRateSet, u8 *pMCSFilter);
extern u8 MCS_FILTER_ALL[];
extern u16 MCS_DATA_RATE[2][2][77] ;
-extern u8 HTCCheck(struct ieee80211_device *ieee, u8 *pFrame);
+u8 HTCCheck(struct ieee80211_device *ieee, u8 *pFrame);
//extern void HTSetConnectBwModeCallback(unsigned long data);
-extern void HTResetIOTSetting(PRT_HIGH_THROUGHPUT pHTInfo);
-extern bool IsHTHalfNmodeAPs(struct ieee80211_device *ieee);
-extern u16 HTHalfMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate);
-extern u16 HTMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate);
-extern u16 TxCountToDataRate(struct ieee80211_device *ieee, u8 nDataRate);
+void HTResetIOTSetting(PRT_HIGH_THROUGHPUT pHTInfo);
+bool IsHTHalfNmodeAPs(struct ieee80211_device *ieee);
+u16 HTHalfMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate);
+u16 HTMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate);
+u16 TxCountToDataRate(struct ieee80211_device *ieee, u8 nDataRate);
//function in BAPROC.c
-extern int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee,
- struct sk_buff *skb);
-extern int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee,
- struct sk_buff *skb);
-extern int ieee80211_rx_DELBA(struct ieee80211_device *ieee,struct sk_buff *skb);
-extern void TsInitAddBA(struct ieee80211_device *ieee, PTX_TS_RECORD pTS,
- u8 Policy, u8 bOverwritePending);
-extern void TsInitDelBA(struct ieee80211_device *ieee,
- PTS_COMMON_INFO pTsCommonInfo, TR_SELECT TxRxSelect);
-extern void BaSetupTimeOut(unsigned long data);
-extern void TxBaInactTimeout(unsigned long data);
-extern void RxBaInactTimeout(unsigned long data);
-extern void ResetBaEntry(PBA_RECORD pBA);
+int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb);
+int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb);
+int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb);
+void TsInitAddBA(struct ieee80211_device *ieee, PTX_TS_RECORD pTS,
+ u8 Policy, u8 bOverwritePending);
+void TsInitDelBA(struct ieee80211_device *ieee,
+ PTS_COMMON_INFO pTsCommonInfo, TR_SELECT TxRxSelect);
+void BaSetupTimeOut(unsigned long data);
+void TxBaInactTimeout(unsigned long data);
+void RxBaInactTimeout(unsigned long data);
+void ResetBaEntry(PBA_RECORD pBA);
//function in TS.c
-extern bool GetTs(
+bool GetTs(
struct ieee80211_device *ieee,
PTS_COMMON_INFO *ppTS,
u8 *Addr,
@@ -2390,10 +2403,10 @@ extern bool GetTs(
TR_SELECT TxRxSelect, //Rx:1, Tx:0
bool bAddNewTs
);
-extern void TSInitialize(struct ieee80211_device *ieee);
-extern void TsStartAddBaProcess(struct ieee80211_device *ieee, PTX_TS_RECORD pTxTS);
-extern void RemovePeerTS(struct ieee80211_device *ieee, u8 *Addr);
-extern void RemoveAllTS(struct ieee80211_device *ieee);
+void TSInitialize(struct ieee80211_device *ieee);
+void TsStartAddBaProcess(struct ieee80211_device *ieee, PTX_TS_RECORD pTxTS);
+void RemovePeerTS(struct ieee80211_device *ieee, u8 *Addr);
+void RemoveAllTS(struct ieee80211_device *ieee);
void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee);
extern const long ieee80211_wlan_frequencies[];
@@ -2423,14 +2436,16 @@ static inline const char *escape_essid(const char *essid, u8 essid_len) {
/* For the function is more related to hardware setting, it's better to use the
* ieee handler to refer to it.
*/
-extern short check_nic_enough_desc(struct net_device *dev, int queue_index);
-extern int ieee80211_data_xmit(struct sk_buff *skb, struct net_device *dev);
-extern int ieee80211_parse_info_param(struct ieee80211_device *ieee,
- struct ieee80211_info_element *info_element,
- u16 length,
- struct ieee80211_network *network,
- struct ieee80211_rx_stats *stats);
-
-void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_rxb **prxbIndicateArray,u8 index);
+short check_nic_enough_desc(struct net_device *dev, int queue_index);
+int ieee80211_data_xmit(struct sk_buff *skb, struct net_device *dev);
+int ieee80211_parse_info_param(struct ieee80211_device *ieee,
+ struct ieee80211_info_element *info_element,
+ u16 length,
+ struct ieee80211_network *network,
+ struct ieee80211_rx_stats *stats);
+
+void ieee80211_indicate_packets(struct ieee80211_device *ieee,
+ struct ieee80211_rxb **prxbIndicateArray,
+ u8 index);
#define RT_ASOC_RETRY_LIMIT 5
#endif /* IEEE80211_H */
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index b374088c5ff8..0aa9021cb95e 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -1014,7 +1014,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
goto rx_dropped;
// if QoS enabled, should check the sequence for each of the AC
- if( (ieee->pHTInfo->bCurRxReorderEnable == false) || !ieee->current_network.qos_data.active|| !IsDataFrame(skb->data) || IsLegacyDataFrame(skb->data)){
+ if ((!ieee->pHTInfo->bCurRxReorderEnable) || !ieee->current_network.qos_data.active|| !IsDataFrame(skb->data) || IsLegacyDataFrame(skb->data)) {
if (is_duplicate_packet(ieee, hdr))
goto rx_dropped;
@@ -1307,7 +1307,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
}
//added by amy for reorder
- if(ieee->pHTInfo->bCurRxReorderEnable == false ||pTS == NULL){
+ if (!ieee->pHTInfo->bCurRxReorderEnable || pTS == NULL){
//added by amy for reorder
for(i = 0; i<rxb->nr_subframes; i++) {
struct sk_buff *sub_skb = rxb->subframes[i];
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 1b11acb96233..39e9892c3fa6 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -1177,7 +1177,7 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
tag = skb_put(skb, ht_cap_len);
*tag++ = MFIE_TYPE_HT_CAP;
*tag++ = ht_cap_len - 2;
- memcpy(tag, ht_cap_buf,ht_cap_len -2);
+ memcpy(tag, ht_cap_buf, ht_cap_len - 2);
tag += ht_cap_len -2;
}
}
@@ -1214,7 +1214,7 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
tag = skb_put(skb, realtek_ie_len);
*tag++ = MFIE_TYPE_GENERIC;
*tag++ = realtek_ie_len - 2;
- memcpy(tag, realtek_ie_buf,realtek_ie_len -2 );
+ memcpy(tag, realtek_ie_buf, realtek_ie_len - 2);
}
}
// printk("<=====%s(), %p, %p\n", __func__, ieee->dev, ieee->dev->dev_addr);
@@ -1964,7 +1964,7 @@ static void ieee80211_check_auth_response(struct ieee80211_device *ieee,
}
if (ieee->current_network.mode == IEEE_N_24G &&
- bHalfSupportNmode == true) {
+ bHalfSupportNmode) {
netdev_dbg(ieee->dev, "enter half N mode\n");
ieee->bHalfWirelessN24GMode = true;
} else
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
index 714fbcace72b..3e502520648e 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
@@ -391,7 +391,7 @@ int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
union iwreq_data *wrqu, char *extra)
{
- int ret=0,len;
+ int ret = 0, len;
short proto_started;
unsigned long flags;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index 5353a45ffdff..fff8d583c62f 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -336,12 +336,12 @@ static void ieee80211_tx_query_agg_cap(struct ieee80211_device *ieee,
printk("===>can't get TS\n");
return;
}
- if (pTxTs->TxAdmittedBARecord.bValid == false)
+ if (!pTxTs->TxAdmittedBARecord.bValid)
{
TsStartAddBaProcess(ieee, pTxTs);
goto FORCED_AGG_SETTING;
}
- else if (pTxTs->bUsingBa == false)
+ else if (!pTxTs->bUsingBa)
{
if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum, (pTxTs->TxCurSeq+1)%4096))
pTxTs->bUsingBa = true;
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index 9ff8e056ab7f..3bde744604c2 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -364,8 +364,8 @@ int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb)
printk("====================>rx ADDBAREQ from :%pM\n", dst);
//some other capability is not ready now.
if ((ieee->current_network.qos_data.active == 0) ||
- (ieee->pHTInfo->bCurrentHTSupport == false)) //||
- // (ieee->pStaQos->bEnableRxImmBA == false) )
+ (!ieee->pHTInfo->bCurrentHTSupport)) //||
+ // (!ieee->pStaQos->bEnableRxImmBA) )
{
rc = ADDBA_STATUS_REFUSED;
IEEE80211_DEBUG(IEEE80211_DL_ERR, "Failed to reply on ADDBA_REQ as some capability is not ready(%d, %d)\n", ieee->current_network.qos_data.active, ieee->pHTInfo->bCurrentHTSupport);
@@ -462,8 +462,8 @@ int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb)
// Check the capability
// Since we can always receive A-MPDU, we just check if it is under HT mode.
if (ieee->current_network.qos_data.active == 0 ||
- ieee->pHTInfo->bCurrentHTSupport == false ||
- ieee->pHTInfo->bCurrentAMPDUEnable == false) {
+ !ieee->pHTInfo->bCurrentHTSupport ||
+ !ieee->pHTInfo->bCurrentAMPDUEnable) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "reject to ADDBA_RSP as some capability is not ready(%d, %d, %d)\n",ieee->current_network.qos_data.active, ieee->pHTInfo->bCurrentHTSupport, ieee->pHTInfo->bCurrentAMPDUEnable);
ReasonCode = DELBA_REASON_UNKNOWN_BA;
goto OnADDBARsp_Reject;
@@ -502,7 +502,7 @@ int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb)
IEEE80211_DEBUG(IEEE80211_DL_BA, "OnADDBARsp(): Recv ADDBA Rsp. Drop because already admit it! \n");
return -1;
}
- else if((pPendingBA->bValid == false) ||(*pDialogToken != pPendingBA->DialogToken))
+ else if((!pPendingBA->bValid) ||(*pDialogToken != pPendingBA->DialogToken))
{
IEEE80211_DEBUG(IEEE80211_DL_ERR, "OnADDBARsp(): Recv ADDBA Rsp. BA invalid, DELBA! \n");
ReasonCode = DELBA_REASON_UNKNOWN_BA;
@@ -571,7 +571,6 @@ int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb)
{
struct rtl_80211_hdr_3addr *delba = NULL;
PDELBA_PARAM_SET pDelBaParamSet = NULL;
- u16 *pReasonCode = NULL;
u8 *dst = NULL;
if (skb->len < sizeof(struct rtl_80211_hdr_3addr) + 6) {
@@ -583,7 +582,7 @@ int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb)
}
if (ieee->current_network.qos_data.active == 0 ||
- ieee->pHTInfo->bCurrentHTSupport == false )
+ !ieee->pHTInfo->bCurrentHTSupport)
{
IEEE80211_DEBUG(IEEE80211_DL_ERR, "received DELBA while QOS or HT is not supported(%d, %d)\n",ieee->current_network.qos_data.active, ieee->pHTInfo->bCurrentHTSupport);
return -1;
@@ -592,9 +591,7 @@ int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb)
IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
delba = (struct rtl_80211_hdr_3addr *)skb->data;
dst = (u8 *)(&delba->addr2[0]);
- delba += sizeof(struct rtl_80211_hdr_3addr);
- pDelBaParamSet = (PDELBA_PARAM_SET)(delba+2);
- pReasonCode = (u16 *)(delba+4);
+ pDelBaParamSet = (PDELBA_PARAM_SET)&delba->payload[2];
if(pDelBaParamSet->field.Initiator == 1)
{
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
index c2588f80625b..c27397b14adb 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
@@ -224,9 +224,9 @@ static bool IsHTHalfNmode40Bandwidth(struct ieee80211_device *ieee)
bool retValue = false;
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- if(pHTInfo->bCurrentHTSupport == false ) // wireless is n mode
+ if(!pHTInfo->bCurrentHTSupport) // wireless is n mode
retValue = false;
- else if(pHTInfo->bRegBW40MHz == false) // station supports 40 bw
+ else if(!pHTInfo->bRegBW40MHz) // station supports 40 bw
retValue = false;
else if(!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) // station in half n mode
retValue = false;
@@ -243,7 +243,7 @@ static bool IsHTHalfNmodeSGI(struct ieee80211_device *ieee, bool is40MHz)
bool retValue = false;
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- if(pHTInfo->bCurrentHTSupport == false ) // wireless is n mode
+ if(!pHTInfo->bCurrentHTSupport) // wireless is n mode
retValue = false;
else if(!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) // station in half n mode
retValue = false;
@@ -675,7 +675,7 @@ void HTConstructInfoElement(struct ieee80211_device *ieee, u8 *posHTInfo, u8 *le
if ( (ieee->iw_mode == IW_MODE_ADHOC) || (ieee->iw_mode == IW_MODE_MASTER)) //ap mode is not currently supported
{
pHTInfoEle->ControlChl = ieee->current_network.channel;
- pHTInfoEle->ExtChlOffset = ((pHT->bRegBW40MHz == false)?HT_EXTCHNL_OFFSET_NO_EXT:
+ pHTInfoEle->ExtChlOffset = ((!pHT->bRegBW40MHz)?HT_EXTCHNL_OFFSET_NO_EXT:
(ieee->current_network.channel<=6)?
HT_EXTCHNL_OFFSET_UPPER:HT_EXTCHNL_OFFSET_LOWER);
pHTInfoEle->RecommemdedTxWidth = pHT->bRegBW40MHz;
@@ -945,7 +945,7 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily
static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; // For 11n EWC definition, 2007.07.17, by Emily
- if (pHTInfo->bCurrentHTSupport == false) {
+ if (!pHTInfo->bCurrentHTSupport) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "<=== HTOnAssocRsp(): HT_DISABLE\n");
return;
}
@@ -956,7 +956,7 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
// HTDebugHTCapability(pHTInfo->PeerHTCapBuf,"HTOnAssocRsp_wq");
// HTDebugHTInfo(pHTInfo->PeerHTInfoBuf,"HTOnAssocRsp_wq");
//
- if(!memcmp(pHTInfo->PeerHTCapBuf,EWC11NHTCap, sizeof(EWC11NHTCap)))
+ if (!memcmp(pHTInfo->PeerHTCapBuf, EWC11NHTCap, sizeof(EWC11NHTCap)))
pPeerHTCap = (PHT_CAPABILITY_ELE)(&pHTInfo->PeerHTCapBuf[4]);
else
pPeerHTCap = (PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf);
@@ -976,7 +976,7 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
//
HTSetConnectBwMode(ieee, (HT_CHANNEL_WIDTH)(pPeerHTCap->ChlWidth), (HT_EXTCHNL_OFFSET)(pPeerHTInfo->ExtChlOffset));
-// if(pHTInfo->bCurBW40MHz == true)
+// if (pHTInfo->bCurBW40MHz)
pHTInfo->bCurTxBW40MHz = ((pPeerHTInfo->RecommemdedTxWidth == 1)?true:false);
//
@@ -1341,7 +1341,7 @@ void HTSetConnectBwMode(struct ieee80211_device *ieee, HT_CHANNEL_WIDTH Bandwidt
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
// u32 flags = 0;
- if(pHTInfo->bRegBW40MHz == false)
+ if(!pHTInfo->bRegBW40MHz)
return;
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index ea92fdebe5a7..f33c74342cf3 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -112,7 +112,7 @@ static void TsAddBaProcess(unsigned long data)
static void ResetTsCommonInfo(PTS_COMMON_INFO pTsCommonInfo)
{
- memset(pTsCommonInfo->Addr, 0, 6);
+ eth_zero_addr(pTsCommonInfo->Addr);
memset(&pTsCommonInfo->TSpec, 0, sizeof(TSPEC_BODY));
memset(&pTsCommonInfo->TClass, 0, sizeof(QOS_TCLAS)*TCLAS_NUM);
pTsCommonInfo->TClasProc = 0;
@@ -584,7 +584,7 @@ void RemoveAllTS(struct ieee80211_device *ieee)
void TsStartAddBaProcess(struct ieee80211_device *ieee, PTX_TS_RECORD pTxTS)
{
- if(pTxTS->bAddBaReqInProgress == false)
+ if(!pTxTS->bAddBaReqInProgress)
{
pTxTS->bAddBaReqInProgress = true;
if(pTxTS->bAddBaReqDelayed)
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.h b/drivers/staging/rtl8192u/r8190_rtl8256.h
index 6e5662f7951c..1ba4f83b520e 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.h
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.h
@@ -14,11 +14,10 @@
#define RTL8225H
#define RTL819X_TOTAL_RF_PATH 2 /* for 8192U */
-extern void PHY_SetRF8256Bandwidth(struct net_device *dev,
- HT_CHANNEL_WIDTH Bandwidth);
-extern void PHY_RF8256_Config(struct net_device *dev);
-extern void phy_RF8256_Config_ParaFile(struct net_device *dev);
-extern void PHY_SetRF8256CCKTxPower(struct net_device *dev, u8 powerlevel);
-extern void PHY_SetRF8256OFDMTxPower(struct net_device *dev, u8 powerlevel);
+void PHY_SetRF8256Bandwidth(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth);
+void PHY_RF8256_Config(struct net_device *dev);
+void phy_RF8256_Config_ParaFile(struct net_device *dev);
+void PHY_SetRF8256CCKTxPower(struct net_device *dev, u8 powerlevel);
+void PHY_SetRF8256OFDMTxPower(struct net_device *dev, u8 powerlevel);
#endif
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index 6c2e438c9af4..785fd027a00d 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -1187,7 +1187,7 @@ void write_phy_cck(struct net_device *dev, u8 adr, u32 data);
void write_phy_ofdm(struct net_device *dev, u8 adr, u32 data);
void rtl8185_tx_antenna(struct net_device *dev, u8 ant);
void rtl8192_set_rxconf(struct net_device *dev);
-extern void rtl819xusb_beacon_tx(struct net_device *dev, u16 tx_rate);
+void rtl819xusb_beacon_tx(struct net_device *dev, u16 tx_rate);
void EnableHWSecurityConfig8192(struct net_device *dev);
void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType, u8 *MacAddr, u8 DefaultKey, u32 *KeyContent);
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index b852396d21e6..6f6fe38081bc 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -2043,16 +2043,9 @@ static bool GetNmodeSupportBySecCfg8192(struct net_device *dev)
static bool GetHalfNmodeSupportByAPs819xUsb(struct net_device *dev)
{
- bool Reval;
struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device *ieee = priv->ieee80211;
-
- if (ieee->bHalfWirelessN24GMode == true)
- Reval = true;
- else
- Reval = false;
- return Reval;
+ return priv->ieee80211->bHalfWirelessN24GMode;
}
static void rtl8192_refresh_supportrate(struct r8192_priv *priv)
@@ -2762,7 +2755,7 @@ static bool rtl8192_adapter_start(struct net_device *dev)
//
#ifdef TO_DO_LIST
if (Adapter->ResetProgress == RESET_TYPE_NORESET) {
- if (pMgntInfo->RegRfOff == true) { /* User disable RF via registry. */
+ if (pMgntInfo->RegRfOff) { /* User disable RF via registry. */
RT_TRACE((COMP_INIT|COMP_RF), DBG_LOUD, ("InitializeAdapter819xUsb(): Turn off RF for RegRfOff ----------\n"));
MgntActSet_RF_State(Adapter, eRfOff, RF_CHANGE_BY_SW);
// Those actions will be discard in MgntActSet_RF_State because of the same state
@@ -4406,7 +4399,8 @@ static void query_rxdesc_status(struct sk_buff *skb,
/* RTL8190 set this bit to indicate that Hw does not decrypt packet */
stats->Decrypted = !desc->SWDec;
- if ((priv->ieee80211->pHTInfo->bCurrentHTSupport == true) && (priv->ieee80211->pairwise_key_type == KEY_TYPE_CCMP))
+ if ((priv->ieee80211->pHTInfo->bCurrentHTSupport) &&
+ (priv->ieee80211->pairwise_key_type == KEY_TYPE_CCMP))
stats->bHwError = false;
else
stats->bHwError = stats->bCRC|stats->bICV;
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index 7ca5d8fbc57f..5277f2eec033 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -438,7 +438,7 @@ static void dm_bandwidth_autoswitch(struct net_device *dev)
if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 || !priv->ieee80211->bandwidth_auto_switch.bautoswitch_enable)
return;
- if (priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz == false) { /* If send packets in 40 Mhz in 20/40 */
+ if (!priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz) { /* If send packets in 40 Mhz in 20/40 */
if (priv->undecorated_smoothed_pwdb <= priv->ieee80211->bandwidth_auto_switch.threshold_40Mhzto20Mhz)
priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz = true;
} else { /* in force send packets in 20 Mhz in 20/40 */
@@ -1731,7 +1731,7 @@ static void dm_dig_init(struct net_device *dev)
*---------------------------------------------------------------------------*/
static void dm_ctrl_initgain_byrssi(struct net_device *dev)
{
- if (dm_digtable.dig_enable_flag == false)
+ if (!dm_digtable.dig_enable_flag)
return;
if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
@@ -1750,7 +1750,7 @@ static void dm_ctrl_initgain_byrssi_by_driverrssi(
u8 i;
static u8 fw_dig;
- if (dm_digtable.dig_enable_flag == false)
+ if (!dm_digtable.dig_enable_flag)
return;
/*DbgPrint("Dig by Sw Rssi\n");*/
@@ -1792,7 +1792,7 @@ static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(
static u32 reset_cnt;
u8 i;
- if (dm_digtable.dig_enable_flag == false)
+ if (!dm_digtable.dig_enable_flag)
return;
if (dm_digtable.dig_algorithm_switch) {
diff --git a/drivers/staging/rtl8192u/r8192U_dm.h b/drivers/staging/rtl8192u/r8192U_dm.h
index 6cd32eb44085..2d0232fb3f9b 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.h
+++ b/drivers/staging/rtl8192u/r8192U_dm.h
@@ -212,24 +212,24 @@ extern struct dynamic_rx_path_sel DM_RxPathSelTable;
/*--------------------------Exported Function prototype---------------------*/
-extern void init_hal_dm(struct net_device *dev);
-extern void deinit_hal_dm(struct net_device *dev);
-extern void hal_dm_watchdog(struct net_device *dev);
-extern void init_rate_adaptive(struct net_device *dev);
-extern void dm_txpower_trackingcallback(struct work_struct *work);
-extern void dm_restore_dynamic_mechanism_state(struct net_device *dev);
-extern void dm_backup_dynamic_mechanism_state(struct net_device *dev);
-extern void dm_change_dynamic_initgain_thresh(struct net_device *dev,
- u32 dm_type, u32 dm_value);
-extern void dm_force_tx_fw_info(struct net_device *dev,
- u32 force_type, u32 force_value);
-extern void dm_init_edca_turbo(struct net_device *dev);
-extern void dm_rf_operation_test_callback(unsigned long data);
-extern void dm_rf_pathcheck_workitemcallback(struct work_struct *work);
-extern void dm_fsync_timer_callback(unsigned long data);
-extern void dm_cck_txpower_adjust(struct net_device *dev, bool binch14);
-extern void dm_shadow_init(struct net_device *dev);
-extern void dm_initialize_txpower_tracking(struct net_device *dev);
+void init_hal_dm(struct net_device *dev);
+void deinit_hal_dm(struct net_device *dev);
+void hal_dm_watchdog(struct net_device *dev);
+void init_rate_adaptive(struct net_device *dev);
+void dm_txpower_trackingcallback(struct work_struct *work);
+void dm_restore_dynamic_mechanism_state(struct net_device *dev);
+void dm_backup_dynamic_mechanism_state(struct net_device *dev);
+void dm_change_dynamic_initgain_thresh(struct net_device *dev,
+ u32 dm_type, u32 dm_value);
+void dm_force_tx_fw_info(struct net_device *dev,
+ u32 force_type, u32 force_value);
+void dm_init_edca_turbo(struct net_device *dev);
+void dm_rf_operation_test_callback(unsigned long data);
+void dm_rf_pathcheck_workitemcallback(struct work_struct *work);
+void dm_fsync_timer_callback(unsigned long data);
+void dm_cck_txpower_adjust(struct net_device *dev, bool binch14);
+void dm_shadow_init(struct net_device *dev);
+void dm_initialize_txpower_tracking(struct net_device *dev);
/*--------------------------Exported Function prototype---------------------*/
diff --git a/drivers/staging/rtl8192u/r8192U_wx.h b/drivers/staging/rtl8192u/r8192U_wx.h
index d6a2d9756531..fb5f808433d1 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.h
+++ b/drivers/staging/rtl8192u/r8192U_wx.h
@@ -19,6 +19,6 @@
extern struct iw_handler_def r8192_wx_handlers_def;
/* Enable the rtl819x_core.c to share this function, david 2008.9.22 */
-extern struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev);
+struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev);
#endif
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.h b/drivers/staging/rtl8192u/r819xU_cmdpkt.h
index 52cd437ef7bb..cc8029a15df4 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.h
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.h
@@ -182,10 +182,10 @@ typedef enum _rt_status {
RT_STATUS_RESOURCE
} rt_status, *prt_status;
-extern u32 cmpk_message_handle_rx(struct net_device *dev,
- struct ieee80211_rx_stats *pstats);
-extern rt_status SendTxCommandPacket(struct net_device *dev,
- void *pData, u32 DataLen);
+u32 cmpk_message_handle_rx(struct net_device *dev,
+ struct ieee80211_rx_stats *pstats);
+rt_status SendTxCommandPacket(struct net_device *dev,
+ void *pData, u32 DataLen);
#endif
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c
index d27b1e24ca4a..08302dfb0d90 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware.c
@@ -66,7 +66,7 @@ static bool fw_download_code(struct net_device *dev, u8 *code_virtual_address,
skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + frag_length + 4);
if (!skb)
return false;
- memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
+ memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->queue_index = TXCMD_QUEUE;
tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_INIT;
@@ -91,7 +91,7 @@ static bool fw_download_code(struct net_device *dev, u8 *code_virtual_address,
if (!priv->ieee80211->check_nic_enough_desc(dev, index) ||
(!skb_queue_empty(&priv->ieee80211->skb_waitQ[index])) ||
(priv->ieee80211->queue_stop)) {
- RT_TRACE(COMP_FIRMWARE,"=====================================================> tx full!\n");
+ RT_TRACE(COMP_FIRMWARE, "=====================================================> tx full!\n");
skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
} else {
priv->ieee80211->softmac_hard_start_xmit(skb, dev);
@@ -144,7 +144,8 @@ static bool CPUcheck_maincodeok_turnonCPU(struct net_device *dev)
/* Turn On CPU */
read_nic_dword(dev, CPU_GEN, &CPU_status);
- write_nic_byte(dev, CPU_GEN, (u8)((CPU_status|CPU_GEN_PWR_STB_CPU)&0xff));
+ write_nic_byte(dev, CPU_GEN,
+ (u8)((CPU_status | CPU_GEN_PWR_STB_CPU) & 0xff));
mdelay(1000);
/* Check whether CPU boot OK */
@@ -242,7 +243,7 @@ bool init_firmware(struct net_device *dev)
* or read image file from array. Default load from IMG file
*/
if (rst_opt == OPT_SYSTEM_RESET) {
- rc = request_firmware(&fw_entry, fw_name[init_step],&priv->udev->dev);
+ rc = request_firmware(&fw_entry, fw_name[init_step], &priv->udev->dev);
if (rc < 0) {
RT_TRACE(COMP_ERR, "request firmware fail!\n");
goto download_firmware_fail;
@@ -254,12 +255,12 @@ bool init_firmware(struct net_device *dev)
}
if (init_step != FW_INIT_STEP1_MAIN) {
- memcpy(pfirmware->firmware_buf,fw_entry->data,fw_entry->size);
+ memcpy(pfirmware->firmware_buf, fw_entry->data, fw_entry->size);
mapped_file = pfirmware->firmware_buf;
file_length = fw_entry->size;
} else {
memset(pfirmware->firmware_buf, 0, 128);
- memcpy(&pfirmware->firmware_buf[128],fw_entry->data,fw_entry->size);
+ memcpy(&pfirmware->firmware_buf[128], fw_entry->data, fw_entry->size);
mapped_file = pfirmware->firmware_buf;
file_length = fw_entry->size + 128;
}
@@ -319,7 +320,7 @@ bool init_firmware(struct net_device *dev)
rt_status = CPUcheck_firmware_ready(dev);
if (!rt_status) {
- RT_TRACE(COMP_ERR, "CPUcheck_firmware_ready fail(%d)!\n",rt_status);
+ RT_TRACE(COMP_ERR, "CPUcheck_firmware_ready fail(%d)!\n", rt_status);
goto download_firmware_fail;
}
diff --git a/drivers/staging/rtl8192u/r819xU_phy.h b/drivers/staging/rtl8192u/r819xU_phy.h
index 66cbe3f9cafd..e672126330f3 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.h
+++ b/drivers/staging/rtl8192u/r819xU_phy.h
@@ -57,36 +57,35 @@ typedef enum _RF90_RADIO_PATH {
#define bMaskLWord 0x0000ffff
#define bMaskDWord 0xffffffff
-extern u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev, u32 eRFPath);
-extern void rtl8192_setBBreg(struct net_device *dev, u32 reg_addr,
- u32 bitmask, u32 data);
-extern u32 rtl8192_QueryBBReg(struct net_device *dev, u32 reg_addr,
- u32 bitmask);
-extern void rtl8192_phy_SetRFReg(struct net_device *dev,
- RF90_RADIO_PATH_E eRFPath, u32 reg_addr, u32 bitmask, u32 data);
-extern u32 rtl8192_phy_QueryRFReg(struct net_device *dev,
- RF90_RADIO_PATH_E eRFPath, u32 reg_addr, u32 bitmask);
-extern void rtl8192_phy_configmac(struct net_device *dev);
-extern void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType);
-extern u8 rtl8192_phy_checkBBAndRF(struct net_device *dev,
- HW90_BLOCK_E CheckBlock, RF90_RADIO_PATH_E eRFPath);
-extern void rtl8192_BBConfig(struct net_device *dev);
-extern void rtl8192_phy_getTxPower(struct net_device *dev);
-extern void rtl8192_phy_setTxPower(struct net_device *dev, u8 channel);
-extern void rtl8192_phy_RFConfig(struct net_device *dev);
-extern void rtl8192_phy_updateInitGain(struct net_device *dev);
-extern u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
- RF90_RADIO_PATH_E eRFPath);
+u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev, u32 eRFPath);
+void rtl8192_setBBreg(struct net_device *dev, u32 reg_addr,
+ u32 bitmask, u32 data);
+u32 rtl8192_QueryBBReg(struct net_device *dev, u32 reg_addr, u32 bitmask);
+void rtl8192_phy_SetRFReg(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
+ u32 reg_addr, u32 bitmask, u32 data);
+u32 rtl8192_phy_QueryRFReg(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
+ u32 reg_addr, u32 bitmask);
+void rtl8192_phy_configmac(struct net_device *dev);
+void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType);
+u8 rtl8192_phy_checkBBAndRF(struct net_device *dev,
+ HW90_BLOCK_E CheckBlock, RF90_RADIO_PATH_E eRFPath);
+void rtl8192_BBConfig(struct net_device *dev);
+void rtl8192_phy_getTxPower(struct net_device *dev);
+void rtl8192_phy_setTxPower(struct net_device *dev, u8 channel);
+void rtl8192_phy_RFConfig(struct net_device *dev);
+void rtl8192_phy_updateInitGain(struct net_device *dev);
+u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
+ RF90_RADIO_PATH_E eRFPath);
-extern u8 rtl8192_phy_SwChnl(struct net_device *dev, u8 channel);
-extern void rtl8192_SetBWMode(struct net_device *dev,
- HT_CHANNEL_WIDTH bandwidth, HT_EXTCHNL_OFFSET offset);
-extern void rtl8192_SwChnl_WorkItem(struct net_device *dev);
+u8 rtl8192_phy_SwChnl(struct net_device *dev, u8 channel);
+void rtl8192_SetBWMode(struct net_device *dev, HT_CHANNEL_WIDTH bandwidth,
+ HT_EXTCHNL_OFFSET offset);
+void rtl8192_SwChnl_WorkItem(struct net_device *dev);
void rtl8192_SetBWModeWorkItem(struct net_device *dev);
-extern bool rtl8192_SetRFPowerState(struct net_device *dev,
- RT_RF_POWER_STATE eRFPowerState);
-extern void InitialGain819xUsb(struct net_device *dev, u8 Operation);
+bool rtl8192_SetRFPowerState(struct net_device *dev,
+ RT_RF_POWER_STATE eRFPowerState);
+void InitialGain819xUsb(struct net_device *dev, u8 Operation);
-extern void InitialGainOperateWorkItemCallBack(struct work_struct *work);
+void InitialGainOperateWorkItemCallBack(struct work_struct *work);
#endif
diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
index 57868085ce58..c5527c1ccaa6 100644
--- a/drivers/staging/rtl8712/ieee80211.c
+++ b/drivers/staging/rtl8712/ieee80211.c
@@ -134,22 +134,20 @@ u8 *r8712_get_ie(u8 *pbuf, sint index, sint *len, sint limit)
return NULL;
}
-static void set_supported_rate(u8 *SupportedRates, uint mode)
+static void set_supported_rate(u8 *rates, uint mode)
{
- memset(SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX);
+ memset(rates, 0, NDIS_802_11_LENGTH_RATES_EX);
switch (mode) {
case WIRELESS_11B:
- memcpy(SupportedRates, WIFI_CCKRATES,
- IEEE80211_CCK_RATE_LEN);
+ memcpy(rates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
break;
case WIRELESS_11G:
case WIRELESS_11A:
- memcpy(SupportedRates, WIFI_OFDMRATES,
- IEEE80211_NUM_OFDM_RATESLEN);
+ memcpy(rates, WIFI_OFDMRATES, IEEE80211_NUM_OFDM_RATESLEN);
break;
case WIRELESS_11BG:
- memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
- memcpy(SupportedRates + IEEE80211_CCK_RATE_LEN, WIFI_OFDMRATES,
+ memcpy(rates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
+ memcpy(rates + IEEE80211_CCK_RATE_LEN, WIFI_OFDMRATES,
IEEE80211_NUM_OFDM_RATESLEN);
break;
}
@@ -195,17 +193,16 @@ int r8712_generate_ie(struct registry_priv *pregistrypriv)
ie = r8712_set_ie(ie, _SSID_IE_, pdev_network->Ssid.SsidLength,
pdev_network->Ssid.Ssid, &sz);
/*supported rates*/
- set_supported_rate(pdev_network->SupportedRates,
- pregistrypriv->wireless_mode);
- rateLen = r8712_get_rateset_len(pdev_network->SupportedRates);
+ set_supported_rate(pdev_network->rates, pregistrypriv->wireless_mode);
+ rateLen = r8712_get_rateset_len(pdev_network->rates);
if (rateLen > 8) {
ie = r8712_set_ie(ie, _SUPPORTEDRATES_IE_, 8,
- pdev_network->SupportedRates, &sz);
+ pdev_network->rates, &sz);
ie = r8712_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8),
- (pdev_network->SupportedRates + 8), &sz);
+ (pdev_network->rates + 8), &sz);
} else
ie = r8712_set_ie(ie, _SUPPORTEDRATES_IE_,
- rateLen, pdev_network->SupportedRates, &sz);
+ rateLen, pdev_network->rates, &sz);
/*DS parameter set*/
ie = r8712_set_ie(ie, _DSSET_IE_, 1,
(u8 *)&(pdev_network->Configuration.DSConfig), &sz);
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index fcb8c61b2884..4fa2540a6c34 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -58,8 +58,8 @@ int r8712_init_recv_priv(struct recv_priv *precvpriv, struct _adapter *padapter)
/*init recv_buf*/
_init_queue(&precvpriv->free_recv_buf_queue);
- precvpriv->pallocated_recv_buf = kzalloc(NR_RECVBUFF * sizeof(struct recv_buf) + 4,
- GFP_ATOMIC);
+ precvpriv->pallocated_recv_buf =
+ kzalloc(NR_RECVBUFF * sizeof(struct recv_buf) + 4, GFP_ATOMIC);
if (precvpriv->pallocated_recv_buf == NULL)
return _FAIL;
precvpriv->precv_buf = precvpriv->pallocated_recv_buf + 4 -
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index e35854d28f90..ef7182961002 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -456,9 +456,7 @@ u8 r8712_createbss_cmd(struct _adapter *padapter)
INIT_LIST_HEAD(&pcmd->list);
pcmd->cmdcode = _CreateBss_CMD_;
pcmd->parmbuf = (unsigned char *)pdev_network;
- pcmd->cmdsz = r8712_get_ndis_wlan_bssid_ex_sz((
- struct ndis_wlan_bssid_ex *)
- pdev_network);
+ pcmd->cmdsz = r8712_get_wlan_bssid_ex_sz(pdev_network);
pcmd->rsp = NULL;
pcmd->rspsz = 0;
/* notes: translate IELength & Length after assign to cmdsz; */
@@ -471,8 +469,7 @@ u8 r8712_createbss_cmd(struct _adapter *padapter)
u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
{
- uint t_len = 0;
- struct ndis_wlan_bssid_ex *psecnetwork;
+ struct wlan_bssid_ex *psecnetwork;
struct cmd_obj *pcmd;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -486,14 +483,6 @@ u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC);
if (pcmd == NULL)
return _FAIL;
- t_len = sizeof(u32) + 6 * sizeof(unsigned char) + 2 +
- sizeof(struct ndis_802_11_ssid) + sizeof(u32) +
- sizeof(s32) +
- sizeof(enum NDIS_802_11_NETWORK_TYPE) +
- sizeof(struct NDIS_802_11_CONFIGURATION) +
- sizeof(enum NDIS_802_11_NETWORK_INFRASTRUCTURE) +
- sizeof(NDIS_802_11_RATES_EX) +
- sizeof(u32) + MAX_IE_SZ;
/* for hidden ap to set fw_state here */
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) !=
@@ -511,12 +500,12 @@ u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
break;
}
}
- psecnetwork = (struct ndis_wlan_bssid_ex *)&psecuritypriv->sec_bss;
+ psecnetwork = &psecuritypriv->sec_bss;
if (psecnetwork == NULL) {
kfree(pcmd);
return _FAIL;
}
- memcpy(psecnetwork, &pnetwork->network, t_len);
+ memcpy(psecnetwork, &pnetwork->network, sizeof(*psecnetwork));
psecuritypriv->authenticator_ie[0] = (unsigned char)
psecnetwork->IELength;
if ((psecnetwork->IELength-12) < (256 - 1))
@@ -575,7 +564,7 @@ u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
memcpy(&psecuritypriv->supplicant_ie[1], &psecnetwork->IEs[0],
255);
/* get cmdsz before endian conversion */
- pcmd->cmdsz = r8712_get_ndis_wlan_bssid_ex_sz(psecnetwork);
+ pcmd->cmdsz = r8712_get_wlan_bssid_ex_sz(psecnetwork);
#ifdef __BIG_ENDIAN
/* wlan_network endian conversion */
psecnetwork->Length = cpu_to_le32(psecnetwork->Length);
@@ -903,8 +892,7 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
struct sta_info *psta = NULL;
struct wlan_network *pwlan = NULL;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ndis_wlan_bssid_ex *pnetwork = (struct ndis_wlan_bssid_ex *)
- pcmd->parmbuf;
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)pcmd->parmbuf;
struct wlan_network *tgt_network = &(pmlmepriv->cur_network);
if (pcmd->res != H2C_SUCCESS)
@@ -958,11 +946,11 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
} else
list_add_tail(&(pwlan->list),
&pmlmepriv->scanned_queue.queue);
- pnetwork->Length = r8712_get_ndis_wlan_bssid_ex_sz(pnetwork);
+ pnetwork->Length = r8712_get_wlan_bssid_ex_sz(pnetwork);
memcpy(&(pwlan->network), pnetwork, pnetwork->Length);
pwlan->fixed = true;
memcpy(&tgt_network->network, pnetwork,
- (r8712_get_ndis_wlan_bssid_ex_sz(pnetwork)));
+ (r8712_get_wlan_bssid_ex_sz(pnetwork)));
if (pmlmepriv->fw_state & _FW_UNDER_LINKING)
pmlmepriv->fw_state ^= _FW_UNDER_LINKING;
/* we will set _FW_LINKED when there is one more sat to
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.h b/drivers/staging/rtl8712/rtl871x_cmd.h
index cb8225b94cf1..818cd8807a38 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.h
+++ b/drivers/staging/rtl8712/rtl871x_cmd.h
@@ -123,15 +123,6 @@ struct usb_suspend_parm {
};
/*
- * Caller Mode: Infra, Ad-Hoc
- * Notes: To join the specified bss
- * Command Event Mode
- */
-struct joinbss_parm {
- struct ndis_wlan_bssid_ex network;
-};
-
-/*
* Caller Mode: Infra, Ad-HoC(C)
* Notes: To disconnect the current associated BSS
* Command Mode
@@ -141,15 +132,6 @@ struct disconnect_parm {
};
/*
- * Caller Mode: AP, Ad-HoC(M)
- * Notes: To create a BSS
- * Command Mode
- */
-struct createbss_parm {
- struct ndis_wlan_bssid_ex network;
-};
-
-/*
* Caller Mode: AP, Ad-HoC, Infra
* Notes: To set the NIC mode of RTL8711
* Command Mode
diff --git a/drivers/staging/rtl8712/rtl871x_event.h b/drivers/staging/rtl8712/rtl871x_event.h
index e03ee90d2870..697c8d735150 100644
--- a/drivers/staging/rtl8712/rtl871x_event.h
+++ b/drivers/staging/rtl8712/rtl871x_event.h
@@ -36,7 +36,7 @@
* Used to report a bss has been scanned
*/
struct survey_event {
- struct ndis_wlan_bssid_ex bss;
+ struct wlan_bssid_ex bss;
};
/*
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl.h b/drivers/staging/rtl8712/rtl871x_ioctl.h
index 8e6ef5d49fbf..c9218be5bb4f 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl.h
+++ b/drivers/staging/rtl8712/rtl871x_ioctl.h
@@ -76,22 +76,18 @@ uint oid_null_function(struct oid_par_priv *poid_par_priv);
extern struct iw_handler_def r871x_handlers_def;
-extern uint drv_query_info(
- struct net_device *MiniportAdapterContext,
- uint Oid,
- void *InformationBuffer,
- u32 InformationBufferLength,
- u32 *BytesWritten,
- u32 *BytesNeeded
-);
+uint drv_query_info(struct net_device *MiniportAdapterContext,
+ uint Oid,
+ void *InformationBuffer,
+ u32 InformationBufferLength,
+ u32 *BytesWritten,
+ u32 *BytesNeeded);
-extern uint drv_set_info(
- struct net_device *MiniportAdapterContext,
- uint Oid,
- void *InformationBuffer,
- u32 InformationBufferLength,
- u32 *BytesRead,
- u32 *BytesNeeded
-);
+uint drv_set_info(struct net_device *MiniportAdapterContext,
+ uint Oid,
+ void *InformationBuffer,
+ u32 InformationBufferLength,
+ u32 *BytesRead,
+ u32 *BytesNeeded);
#endif
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 3388f971fb48..143be0fdc578 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -203,14 +203,12 @@ static inline char *translate_scan(struct _adapter *padapter,
}
/* Add the protocol name */
iwe.cmd = SIOCGIWNAME;
- if ((r8712_is_cckratesonly_included((u8 *)&pnetwork->network.
- SupportedRates)) == true) {
+ if (r8712_is_cckratesonly_included(pnetwork->network.rates)) {
if (ht_cap == true)
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bn");
else
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11b");
- } else if ((r8712_is_cckrates_included((u8 *)&pnetwork->network.
- SupportedRates)) == true) {
+ } else if (r8712_is_cckrates_included(pnetwork->network.rates)) {
if (ht_cap == true)
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bgn");
else
@@ -270,9 +268,9 @@ static inline char *translate_scan(struct _adapter *padapter,
iwe.u.bitrate.disabled = 0;
iwe.u.bitrate.value = 0;
i = 0;
- while (pnetwork->network.SupportedRates[i] != 0) {
+ while (pnetwork->network.rates[i] != 0) {
/* Bit rate given in 500 kb/s units */
- iwe.u.bitrate.value = (pnetwork->network.SupportedRates[i++] &
+ iwe.u.bitrate.value = (pnetwork->network.rates[i++] &
0x7F) * 500000;
current_val = iwe_stream_add_value(info, start, current_val,
stop, &iwe, IW_EV_PARAM_LEN);
@@ -634,8 +632,8 @@ static int r8711_wx_get_name(struct net_device *dev,
char *p;
u8 ht_cap = false;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct ndis_wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
- NDIS_802_11_RATES_EX *prates = NULL;
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+ u8 *prates;
if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE) ==
true) {
@@ -644,15 +642,15 @@ static int r8711_wx_get_name(struct net_device *dev,
&ht_ielen, pcur_bss->IELength - 12);
if (p && ht_ielen > 0)
ht_cap = true;
- prates = &pcur_bss->SupportedRates;
- if (r8712_is_cckratesonly_included((u8 *)prates) == true) {
+ prates = pcur_bss->rates;
+ if (r8712_is_cckratesonly_included(prates) == true) {
if (ht_cap == true)
snprintf(wrqu->name, IFNAMSIZ,
"IEEE 802.11bn");
else
snprintf(wrqu->name, IFNAMSIZ,
"IEEE 802.11b");
- } else if ((r8712_is_cckrates_included((u8 *)prates)) == true) {
+ } else if ((r8712_is_cckrates_included(prates)) == true) {
if (ht_cap == true)
snprintf(wrqu->name, IFNAMSIZ,
"IEEE 802.11bgn");
@@ -723,7 +721,7 @@ static int r8711_wx_get_freq(struct net_device *dev,
{
struct _adapter *padapter = netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ndis_wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
wrqu->freq.m = ieee80211_wlan_frequencies[
@@ -1111,7 +1109,7 @@ static int r8711_wx_get_wap(struct net_device *dev,
{
struct _adapter *padapter = netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ndis_wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
wrqu->ap_addr.sa_family = ARPHRD_ETHER;
if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE |
@@ -1327,7 +1325,7 @@ static int r8711_wx_get_essid(struct net_device *dev,
{
struct _adapter *padapter = netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ndis_wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
u32 len, ret = 0;
if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE)) {
@@ -1419,7 +1417,7 @@ static int r8711_wx_get_rate(struct net_device *dev,
{
struct _adapter *padapter = netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ndis_wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
struct ieee80211_ht_cap *pht_capie;
unsigned char rf_type = padapter->registrypriv.rf_config;
int i;
@@ -1444,9 +1442,9 @@ static int r8711_wx_get_rate(struct net_device *dev,
(IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40)) ? 1 : 0;
}
- while ((pcur_bss->SupportedRates[i] != 0) &&
- (pcur_bss->SupportedRates[i] != 0xFF)) {
- rate = pcur_bss->SupportedRates[i] & 0x7F;
+ while ((pcur_bss->rates[i] != 0) &&
+ (pcur_bss->rates[i] != 0xFF)) {
+ rate = pcur_bss->rates[i] & 0x7F;
if (rate > max_rate)
max_rate = rate;
wrqu->bitrate.fixed = 0; /* no auto select */
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index c044b0e55ba9..fc5dbea08cb4 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -208,19 +208,9 @@ void r8712_generate_random_ibss(u8 *pibss)
pibss[5] = (u8)((curtime>>16) & 0xff);
}
-uint r8712_get_ndis_wlan_bssid_ex_sz(struct ndis_wlan_bssid_ex *bss)
+uint r8712_get_wlan_bssid_ex_sz(struct wlan_bssid_ex *bss)
{
- uint t_len;
-
- t_len = sizeof(u32) + 6 * sizeof(unsigned long) + 2 +
- sizeof(struct ndis_802_11_ssid) + sizeof(u32) +
- sizeof(s32) +
- sizeof(enum NDIS_802_11_NETWORK_TYPE) +
- sizeof(struct NDIS_802_11_CONFIGURATION) +
- sizeof(enum NDIS_802_11_NETWORK_INFRASTRUCTURE) +
- sizeof(NDIS_802_11_RATES_EX) +
- sizeof(u32) + bss->IELength;
- return t_len;
+ return sizeof(*bss) + bss->IELength - MAX_IE_SZ;
}
u8 *r8712_get_capability_from_ie(u8 *ie)
@@ -286,8 +276,8 @@ int r8712_is_same_ibss(struct _adapter *adapter, struct wlan_network *pnetwork)
}
-static int is_same_network(struct ndis_wlan_bssid_ex *src,
- struct ndis_wlan_bssid_ex *dst)
+static int is_same_network(struct wlan_bssid_ex *src,
+ struct wlan_bssid_ex *dst)
{
u16 s_cap, d_cap;
@@ -332,8 +322,8 @@ struct wlan_network *r8712_get_oldest_wlan_network(
return oldest;
}
-static void update_network(struct ndis_wlan_bssid_ex *dst,
- struct ndis_wlan_bssid_ex *src,
+static void update_network(struct wlan_bssid_ex *dst,
+ struct wlan_bssid_ex *src,
struct _adapter *padapter)
{
u32 last_evm = 0, tmpVal;
@@ -366,11 +356,11 @@ static void update_network(struct ndis_wlan_bssid_ex *dst,
src->Rssi = padapter->recvpriv.signal;
} else
src->Rssi = (src->Rssi + dst->Rssi) / 2;
- memcpy((u8 *)dst, (u8 *)src, r8712_get_ndis_wlan_bssid_ex_sz(src));
+ memcpy((u8 *)dst, (u8 *)src, r8712_get_wlan_bssid_ex_sz(src));
}
static void update_current_network(struct _adapter *adapter,
- struct ndis_wlan_bssid_ex *pnetwork)
+ struct wlan_bssid_ex *pnetwork)
{
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
@@ -388,7 +378,7 @@ static void update_current_network(struct _adapter *adapter,
Caller must hold pmlmepriv->lock first.
*/
static void update_scanned_network(struct _adapter *adapter,
- struct ndis_wlan_bssid_ex *target)
+ struct wlan_bssid_ex *target)
{
struct list_head *plist, *phead;
@@ -426,7 +416,7 @@ static void update_scanned_network(struct _adapter *adapter,
target->Rssi = (pnetwork->network.Rssi +
target->Rssi) / 2;
memcpy(&pnetwork->network, target,
- r8712_get_ndis_wlan_bssid_ex_sz(target));
+ r8712_get_wlan_bssid_ex_sz(target));
pnetwork->last_scanned = jiffies;
} else {
/* Otherwise just pull from the free list */
@@ -434,7 +424,7 @@ static void update_scanned_network(struct _adapter *adapter,
pnetwork = alloc_network(pmlmepriv);
if (pnetwork == NULL)
return;
- bssid_ex_sz = r8712_get_ndis_wlan_bssid_ex_sz(target);
+ bssid_ex_sz = r8712_get_wlan_bssid_ex_sz(target);
target->Length = bssid_ex_sz;
memcpy(&pnetwork->network, target, bssid_ex_sz);
list_add_tail(&pnetwork->list, &queue->queue);
@@ -451,7 +441,7 @@ static void update_scanned_network(struct _adapter *adapter,
}
static void rtl8711_add_network(struct _adapter *adapter,
- struct ndis_wlan_bssid_ex *pnetwork)
+ struct wlan_bssid_ex *pnetwork)
{
unsigned long irqL;
struct mlme_priv *pmlmepriv = &(((struct _adapter *)adapter)->mlmepriv);
@@ -507,10 +497,10 @@ void r8712_survey_event_callback(struct _adapter *adapter, u8 *pbuf)
{
unsigned long flags;
u32 len;
- struct ndis_wlan_bssid_ex *pnetwork;
+ struct wlan_bssid_ex *pnetwork;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- pnetwork = (struct ndis_wlan_bssid_ex *)pbuf;
+ pnetwork = (struct wlan_bssid_ex *)pbuf;
#ifdef __BIG_ENDIAN
/* endian_convert */
pnetwork->Length = le32_to_cpu(pnetwork->Length);
@@ -538,7 +528,7 @@ void r8712_survey_event_callback(struct _adapter *adapter, u8 *pbuf)
le32_to_cpu(pnetwork->InfrastructureMode);
pnetwork->IELength = le32_to_cpu(pnetwork->IELength);
#endif
- len = r8712_get_ndis_wlan_bssid_ex_sz(pnetwork);
+ len = r8712_get_wlan_bssid_ex_sz(pnetwork);
if (len > sizeof(struct wlan_bssid_ex))
return;
spin_lock_irqsave(&pmlmepriv->lock2, flags);
@@ -769,7 +759,7 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
the_same_macaddr = !memcmp(pnetwork->network.MacAddress,
cur_network->network.MacAddress, ETH_ALEN);
pnetwork->network.Length =
- r8712_get_ndis_wlan_bssid_ex_sz(&pnetwork->network);
+ r8712_get_wlan_bssid_ex_sz(&pnetwork->network);
spin_lock_irqsave(&pmlmepriv->lock, irqL);
if (pnetwork->network.Length > sizeof(struct wlan_bssid_ex))
goto ignore_joinbss_callback;
@@ -1001,7 +991,7 @@ void r8712_stadel_event_callback(struct _adapter *adapter, u8 *pbuf)
pdev_network = &(adapter->registrypriv.dev_network);
pibss = adapter->registrypriv.dev_network.MacAddress;
memcpy(pdev_network, &tgt_network->network,
- r8712_get_ndis_wlan_bssid_ex_sz(&tgt_network->
+ r8712_get_wlan_bssid_ex_sz(&tgt_network->
network));
memcpy(&pdev_network->Ssid,
&pmlmepriv->assoc_ssid,
@@ -1668,8 +1658,7 @@ void r8712_update_registrypriv_dev_network(struct _adapter *adapter)
*/
sz = r8712_generate_ie(pregistrypriv);
pdev_network->IELength = sz;
- pdev_network->Length = r8712_get_ndis_wlan_bssid_ex_sz(
- (struct ndis_wlan_bssid_ex *)pdev_network);
+ pdev_network->Length = r8712_get_wlan_bssid_ex_sz(pdev_network);
}
/*the function is at passive_level*/
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.h b/drivers/staging/rtl8712/rtl871x_mlme.h
index 42bd0bf8a816..08d6c986c11e 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.h
+++ b/drivers/staging/rtl8712/rtl871x_mlme.h
@@ -202,7 +202,7 @@ sint r8712_set_key(struct _adapter *adapter,
struct security_priv *psecuritypriv, sint keyid);
sint r8712_set_auth(struct _adapter *adapter,
struct security_priv *psecuritypriv);
-uint r8712_get_ndis_wlan_bssid_ex_sz(struct ndis_wlan_bssid_ex *bss);
+uint r8712_get_wlan_bssid_ex_sz(struct wlan_bssid_ex *bss);
void r8712_generate_random_ibss(u8 *pibss);
u8 *r8712_get_capability_from_ie(u8 *ie);
struct wlan_network *r8712_get_oldest_wlan_network(
diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
index 0b5461208eb9..77f01bf1ca3c 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
+++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
@@ -160,13 +160,13 @@ static int mp_start_test(struct _adapter *padapter)
struct mp_priv *pmppriv = &padapter->mppriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *tgt_network = &pmlmepriv->cur_network;
- struct ndis_wlan_bssid_ex bssid;
+ struct wlan_bssid_ex bssid;
struct sta_info *psta;
unsigned long length;
unsigned long irqL;
int res = _SUCCESS;
- /* 3 1. initialize a new struct ndis_wlan_bssid_ex */
+ /* 3 1. initialize a new struct wlan_bssid_ex */
memcpy(bssid.MacAddress, pmppriv->network_macaddr, ETH_ALEN);
bssid.Ssid.SsidLength = 16;
memcpy(bssid.Ssid.Ssid, (unsigned char *)"mp_pseudo_adhoc",
@@ -174,7 +174,7 @@ static int mp_start_test(struct _adapter *padapter)
bssid.InfrastructureMode = Ndis802_11IBSS;
bssid.NetworkTypeInUse = Ndis802_11DS;
bssid.IELength = 0;
- length = r8712_get_ndis_wlan_bssid_ex_sz(&bssid);
+ length = r8712_get_wlan_bssid_ex_sz(&bssid);
if (length % 4) {
/*round up to multiple of 4 bytes.*/
bssid.Length = ((length >> 2) + 1) << 2;
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index bcd1a5128868..862792826dc5 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -124,28 +124,25 @@ static u8 crc32_reverseBit(u8 data)
static void crc32_init(void)
{
+ sint i, j;
+ u32 c;
+ u8 *p = (u8 *)&c, *p1;
+ u8 k;
+
if (bcrc32initialized == 1)
return;
- else {
- sint i, j;
- u32 c;
- u8 *p = (u8 *)&c, *p1;
- u8 k;
-
- c = 0x12340000;
- for (i = 0; i < 256; ++i) {
- k = crc32_reverseBit((u8)i);
- for (c = ((u32)k) << 24, j = 8; j > 0; --j)
- c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY :
- (c << 1);
- p1 = (u8 *)&crc32_table[i];
- p1[0] = crc32_reverseBit(p[3]);
- p1[1] = crc32_reverseBit(p[2]);
- p1[2] = crc32_reverseBit(p[1]);
- p1[3] = crc32_reverseBit(p[0]);
- }
- bcrc32initialized = 1;
+
+ for (i = 0; i < 256; ++i) {
+ k = crc32_reverseBit((u8)i);
+ for (c = ((u32)k) << 24, j = 8; j > 0; --j)
+ c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1);
+ p1 = (u8 *)&crc32_table[i];
+ p1[0] = crc32_reverseBit(p[3]);
+ p1[1] = crc32_reverseBit(p[2]);
+ p1[2] = crc32_reverseBit(p[1]);
+ p1[3] = crc32_reverseBit(p[0]);
}
+ bcrc32initialized = 1;
}
static u32 getcrc32(u8 *buf, u32 len)
diff --git a/drivers/staging/rtl8712/wlan_bssdef.h b/drivers/staging/rtl8712/wlan_bssdef.h
index 2ea8a3d6b705..fda5707c4acd 100644
--- a/drivers/staging/rtl8712/wlan_bssdef.h
+++ b/drivers/staging/rtl8712/wlan_bssdef.h
@@ -32,11 +32,6 @@
#define NDIS_802_11_LENGTH_RATES 8
#define NDIS_802_11_LENGTH_RATES_EX 16
-/* Set of 8 data rates*/
-typedef unsigned char NDIS_802_11_RATES[NDIS_802_11_LENGTH_RATES];
-/* Set of 16 data rates */
-typedef unsigned char NDIS_802_11_RATES_EX[NDIS_802_11_LENGTH_RATES_EX];
-
struct ndis_802_11_ssid {
u32 SsidLength;
u8 Ssid[32];
@@ -83,18 +78,7 @@ struct NDIS_802_11_FIXED_IEs {
u16 Capabilities;
};
-/*
- * Length is the 4 bytes multiples of the sume of
- * 6 * sizeof (unsigned char) + 2 + sizeof (ndis_802_11_ssid) + sizeof (u32)
- * + sizeof (s32) + sizeof (NDIS_802_11_NETWORK_TYPE)
- * + sizeof (struct NDIS_802_11_CONFIGURATION)
- * + sizeof (NDIS_802_11_RATES_EX) + IELength
-
- * Except the IELength, all other fields are fixed length. Therefore, we can
- * define a macro to present the partial sum.
- */
-
-struct ndis_wlan_bssid_ex {
+struct wlan_bssid_ex {
u32 Length;
unsigned char MacAddress[6];
u8 Reserved[2];
@@ -104,7 +88,8 @@ struct ndis_wlan_bssid_ex {
enum NDIS_802_11_NETWORK_TYPE NetworkTypeInUse;
struct NDIS_802_11_CONFIGURATION Configuration;
enum NDIS_802_11_NETWORK_INFRASTRUCTURE InfrastructureMode;
- NDIS_802_11_RATES_EX SupportedRates;
+ u8 rates[NDIS_802_11_LENGTH_RATES_EX];
+ /* number of content bytes in EIs, which varies */
u32 IELength;
/*(timestamp, beacon interval, and capability information) */
u8 IEs[MAX_IE_SZ];
@@ -213,7 +198,7 @@ struct wlan_network {
unsigned int last_scanned; /*timestamp for the network */
int aid; /*will only be valid when a BSS is joined. */
int join_res;
- struct ndis_wlan_bssid_ex network; /*must be the last item */
+ struct wlan_bssid_ex network; /*must be the last item */
};
enum VRTL_CARRIER_SENSE {
@@ -244,24 +229,5 @@ enum UAPSD_MAX_SP {
#define NUM_PRE_AUTH_KEY 16
#define NUM_PMKID_CACHE NUM_PRE_AUTH_KEY
-/*
- * WPA2
- */
-struct wlan_bssid_ex {
- u32 Length;
- unsigned char MacAddress[6];
- u8 Reserved[2];
- struct ndis_802_11_ssid Ssid;
- u32 Privacy;
- s32 Rssi;
- enum NDIS_802_11_NETWORK_TYPE NetworkTypeInUse;
- struct NDIS_802_11_CONFIGURATION Configuration;
- enum NDIS_802_11_NETWORK_INFRASTRUCTURE InfrastructureMode;
- NDIS_802_11_RATES_EX SupportedRates;
- u32 IELength;
- u8 IEs[MAX_IE_SZ]; /* (timestamp, beacon interval, and capability
- * information) */
-};
-
#endif /* #ifndef WLAN_BSSDEF_H_ */
diff --git a/drivers/staging/rtl8723au/core/rtw_recv.c b/drivers/staging/rtl8723au/core/rtw_recv.c
index 274a4b65c022..ad0549c66529 100644
--- a/drivers/staging/rtl8723au/core/rtw_recv.c
+++ b/drivers/staging/rtl8723au/core/rtw_recv.c
@@ -1554,8 +1554,7 @@ static int wlanhdr_to_ethhdr (struct recv_frame *precvframe)
ether_addr_copy(ptr + ETH_ALEN, pattrib->src);
if (!bsnaphdr) {
- len = htons(len);
- memcpy(ptr + 12, &len, 2);
+ put_unaligned_be16(len, ptr + 12);
}
diff --git a/drivers/staging/rtl8723au/core/rtw_security.c b/drivers/staging/rtl8723au/core/rtw_security.c
index af53c92fc3a2..3d40bab1d9c8 100644
--- a/drivers/staging/rtl8723au/core/rtw_security.c
+++ b/drivers/staging/rtl8723au/core/rtw_security.c
@@ -148,7 +148,7 @@ void rtw_wep_encrypt23a(struct rtw_adapter *padapter,
struct xmit_frame *pxmitframe)
{
/* exclude ICV */
- unsigned char crc[4];
+ __le32 crc;
struct arc4context mycontext;
int curfragnum, length, index;
u32 keylength;
@@ -186,18 +186,20 @@ void rtw_wep_encrypt23a(struct rtw_adapter *padapter,
length = pattrib->last_txcmdsz - pattrib->hdrlen -
pattrib->iv_len - pattrib->icv_len;
- *((u32 *)crc) = cpu_to_le32(getcrc32(payload, length));
+ crc = cpu_to_le32(getcrc32(payload, length));
arcfour_init(&mycontext, wepkey, 3 + keylength);
arcfour_encrypt(&mycontext, payload, payload, length);
- arcfour_encrypt(&mycontext, payload + length, crc, 4);
+ arcfour_encrypt(&mycontext, payload + length,
+ (char *)&crc, 4);
} else {
length = pxmitpriv->frag_len - pattrib->hdrlen -
pattrib->iv_len - pattrib->icv_len;
- *((u32 *)crc) = cpu_to_le32(getcrc32(payload, length));
+ crc = cpu_to_le32(getcrc32(payload, length));
arcfour_init(&mycontext, wepkey, 3 + keylength);
arcfour_encrypt(&mycontext, payload, payload, length);
- arcfour_encrypt(&mycontext, payload + length, crc, 4);
+ arcfour_encrypt(&mycontext, payload + length,
+ (char *)&crc, 4);
pframe += pxmitpriv->frag_len;
pframe = PTR_ALIGN(pframe, 4);
@@ -602,7 +604,7 @@ int rtw_tkip_encrypt23a(struct rtw_adapter *padapter,
u32 pnh;
u8 rc4key[16];
u8 ttkey[16];
- u8 crc[4];
+ __le32 crc;
u8 hw_hdr_offset = 0;
struct arc4context mycontext;
int curfragnum, length;
@@ -679,11 +681,12 @@ int rtw_tkip_encrypt23a(struct rtw_adapter *padapter,
"pattrib->iv_len =%x, pattrib->icv_len =%x\n",
pattrib->iv_len,
pattrib->icv_len);
- *((u32 *)crc) = cpu_to_le32(getcrc32(payload, length));
+ crc = cpu_to_le32(getcrc32(payload, length));
arcfour_init(&mycontext, rc4key, 16);
arcfour_encrypt(&mycontext, payload, payload, length);
- arcfour_encrypt(&mycontext, payload + length, crc, 4);
+ arcfour_encrypt(&mycontext, payload + length,
+ (char *)&crc, 4);
} else {
length = (pxmitpriv->frag_len -
@@ -691,10 +694,11 @@ int rtw_tkip_encrypt23a(struct rtw_adapter *padapter,
pattrib->iv_len -
pattrib->icv_len);
- *((u32 *)crc) = cpu_to_le32(getcrc32(payload, length));
+ crc = cpu_to_le32(getcrc32(payload, length));
arcfour_init(&mycontext, rc4key, 16);
arcfour_encrypt(&mycontext, payload, payload, length);
- arcfour_encrypt(&mycontext, payload + length, crc, 4);
+ arcfour_encrypt(&mycontext, payload + length,
+ (char *)&crc, 4);
pframe += pxmitpriv->frag_len;
pframe = PTR_ALIGN(pframe, 4);
diff --git a/drivers/staging/rtl8723au/hal/odm.c b/drivers/staging/rtl8723au/hal/odm.c
index f354f5e11a30..6b9dbeffafcb 100644
--- a/drivers/staging/rtl8723au/hal/odm.c
+++ b/drivers/staging/rtl8723au/hal/odm.c
@@ -985,7 +985,7 @@ void ODM_RF_Saving23a(struct dm_odm_t *pDM_Odm, u8 bForceInNormal)
val32 = rtl8723au_read32(adapter, 0x874);
val32 |= pDM_PSTable->Reg874;
rtl8723au_write32(adapter, 0x874, val32);
-
+
val32 = rtl8723au_read32(adapter, 0xc70);
val32 |= pDM_PSTable->RegC70;
rtl8723au_write32(adapter, 0xc70, val32);
diff --git a/drivers/staging/rtl8723au/hal/odm_RegConfig8723A.c b/drivers/staging/rtl8723au/hal/odm_RegConfig8723A.c
index 342dec3e939f..a63c6cb88bc9 100644
--- a/drivers/staging/rtl8723au/hal/odm_RegConfig8723A.c
+++ b/drivers/staging/rtl8723au/hal/odm_RegConfig8723A.c
@@ -21,7 +21,7 @@ odm_ConfigRFReg_8723A(
struct dm_odm_t *pDM_Odm,
u32 Addr,
u32 Data,
- enum RF_RADIO_PATH RF_PATH,
+ enum RF_RADIO_PATH RF_PATH,
u32 RegAddr
)
{
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
index cb5076abda8b..cf2388f4f6e7 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
@@ -1838,7 +1838,7 @@ Hal_EfuseParseThermalMeter_8723A(struct rtw_adapter *padapter,
static void rtl8723a_cal_txdesc_chksum(struct tx_desc *ptxdesc)
{
- u16 *usPtr = (u16 *) ptxdesc;
+ __le16 *usPtr = (__le16 *)ptxdesc;
u32 count = 16; /* (32 bytes / 2 bytes per XOR) => 16 times */
u32 index;
u16 checksum = 0;
@@ -1847,7 +1847,7 @@ static void rtl8723a_cal_txdesc_chksum(struct tx_desc *ptxdesc)
ptxdesc->txdw7 &= cpu_to_le32(0xffff0000);
for (index = 0; index < count; index++)
- checksum ^= le16_to_cpu(*(usPtr + index));
+ checksum ^= le16_to_cpu(usPtr[index]);
ptxdesc->txdw7 |= cpu_to_le32(checksum & 0x0000ffff);
}
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index ee818b0dc401..cdaa1aba50ed 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -1121,11 +1121,10 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
#ifdef SUPPORT_MSXC
if ((buf[cur_addr_off + 8] == 0x10) ||
- (buf[cur_addr_off + 8] == 0x13))
+ (buf[cur_addr_off + 8] == 0x13)) {
#else
- if (buf[cur_addr_off + 8] == 0x10)
+ if (buf[cur_addr_off + 8] == 0x10) {
#endif
- {
sys_info_addr = ((u32)buf[cur_addr_off + 0] << 24) |
((u32)buf[cur_addr_off + 1] << 16) |
((u32)buf[cur_addr_off + 2] << 8) |
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index a8d657bb5c1b..d6c498209b2c 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -246,11 +246,10 @@ RTY_SEND_CMD:
}
}
#ifdef SUPPORT_SD_LOCK
- if (ptr[1] & 0x7D)
+ if (ptr[1] & 0x7D) {
#else
- if (ptr[1] & 0x7F)
+ if (ptr[1] & 0x7F) {
#endif
- {
dev_dbg(rtsx_dev(chip), "ptr[1]: 0x%02x\n",
ptr[1]);
rtsx_trace(chip);
@@ -3520,12 +3519,11 @@ int reset_sd_card(struct rtsx_chip *chip)
if (chip->sd_io) {
rtsx_trace(chip);
return STATUS_FAIL;
- } else {
- retval = reset_mmc(chip);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ }
+ retval = reset_mmc(chip);
+ if (retval != STATUS_SUCCESS) {
+ rtsx_trace(chip);
+ return STATUS_FAIL;
}
}
}
@@ -4149,11 +4147,10 @@ RTY_SEND_CMD:
}
}
#ifdef SUPPORT_SD_LOCK
- if (ptr[1] & 0x7D)
+ if (ptr[1] & 0x7D) {
#else
- if (ptr[1] & 0x7F)
+ if (ptr[1] & 0x7F) {
#endif
- {
rtsx_trace(chip);
return STATUS_FAIL;
}
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index a609f3e67256..858597087ba7 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -2329,6 +2329,7 @@ static int slic_if_init(struct adapter *adapter, unsigned long *flags)
if (!adapter->isp_initialized) {
unsigned long flags;
+
pshmem = (struct slic_shmem *)(unsigned long)
adapter->phys_shmem;
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index f4975d2d97ad..5e6798ea9468 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -268,7 +268,7 @@ int ddk750_initHw(initchip_param_t *pInitParam)
#endif
- if (pInitParam->powerMode != 0 )
+ if (pInitParam->powerMode != 0)
pInitParam->powerMode = 0;
setPowerMode(pInitParam->powerMode);
@@ -464,17 +464,18 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
RN = N * request;
quo = RN / input;
rem = RN % input;/* rem always small than 14318181 */
- fl_quo = (rem * 10000 /input);
+ fl_quo = (rem * 10000 / input);
for (d = xcnt - 1; d >= 0; d--) {
X = xparm[d].value;
M = quo*X;
M += fl_quo * X / 10000;
/* round step */
- M += (fl_quo*X % 10000)>5000?1:0;
+ M += (fl_quo*X % 10000) > 5000?1:0;
if (M < 256 && M > 0) {
unsigned int diff;
- tmpClock = pll->inputFreq *M / N / X;
+
+ tmpClock = pll->inputFreq * M / N / X;
diff = absDiff(tmpClock, request_orig);
if (diff < miniDiff) {
pll->M = M;
@@ -599,9 +600,9 @@ unsigned int formatPllReg(pll_value_t *pPLL)
On returning a 32 bit number, the value can be applied to any PLL in the calling function.
*/
ulPllReg =
- FIELD_SET( 0, PANEL_PLL_CTRL, BYPASS, OFF)
- | FIELD_SET( 0, PANEL_PLL_CTRL, POWER, ON)
- | FIELD_SET( 0, PANEL_PLL_CTRL, INPUT, OSC)
+ FIELD_SET(0, PANEL_PLL_CTRL, BYPASS, OFF)
+ | FIELD_SET(0, PANEL_PLL_CTRL, POWER, ON)
+ | FIELD_SET(0, PANEL_PLL_CTRL, INPUT, OSC)
#ifndef VALIDATION_CHIP
| FIELD_VALUE(0, PANEL_PLL_CTRL, POD, pPLL->POD)
#endif
diff --git a/drivers/staging/sm750fb/ddk750_chip.h b/drivers/staging/sm750fb/ddk750_chip.h
index 4e030e820cf3..6ff043608fe9 100644
--- a/drivers/staging/sm750fb/ddk750_chip.h
+++ b/drivers/staging/sm750fb/ddk750_chip.h
@@ -8,8 +8,7 @@
#include <linux/io.h>
/* This is all the chips recognized by this library */
-typedef enum _logical_chip_type_t
-{
+typedef enum _logical_chip_type_t {
SM_UNKNOWN,
SM718,
SM750,
@@ -18,8 +17,7 @@ typedef enum _logical_chip_type_t
logical_chip_type_t;
-typedef enum _clock_type_t
-{
+typedef enum _clock_type_t {
MXCLK_PLL,
PRIMARY_PLL,
SECONDARY_PLL,
@@ -28,8 +26,7 @@ typedef enum _clock_type_t
}
clock_type_t;
-typedef struct _pll_value_t
-{
+typedef struct _pll_value_t {
clock_type_t clockType;
unsigned long inputFreq; /* Input clock frequency to the PLL */
@@ -42,8 +39,7 @@ typedef struct _pll_value_t
pll_value_t;
/* input struct to initChipParam() function */
-typedef struct _initchip_param_t
-{
+typedef struct _initchip_param_t {
unsigned short powerMode; /* Use power mode 0 or 1 */
unsigned short chipClock; /**
* Speed of main chip clock in MHz unit
diff --git a/drivers/staging/sm750fb/ddk750_display.c b/drivers/staging/sm750fb/ddk750_display.c
index a3e672056ef8..8348113482d9 100644
--- a/drivers/staging/sm750fb/ddk750_display.c
+++ b/drivers/staging/sm750fb/ddk750_display.c
@@ -15,16 +15,14 @@ static void setDisplayControl(int ctrl, int dispState)
cnt = 0;
/* Set the primary display control */
- if (!ctrl)
- {
+ if (!ctrl) {
ulDisplayCtrlReg = PEEK32(PANEL_DISPLAY_CTRL);
/* Turn on/off the Panel display control */
- if (dispState)
- {
+ if (dispState) {
/* Timing should be enabled first before enabling the plane
* because changing at the same time does not guarantee that
* the plane will also enabled or disabled.
- */
+ */
ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg,
PANEL_DISPLAY_CTRL, TIMING, ENABLE);
POKE32(PANEL_DISPLAY_CTRL, ulDisplayCtrlReg);
@@ -45,16 +43,13 @@ static void setDisplayControl(int ctrl, int dispState)
* until a few delay. Need to write
* and read it a couple times
*/
- do
- {
+ do {
cnt++;
POKE32(PANEL_DISPLAY_CTRL, ulDisplayCtrlReg);
- } while((PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits) !=
+ } while ((PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits) !=
(ulDisplayCtrlReg & ~ulReservedBits));
printk("Set Panel Plane enbit:after tried %d times\n", cnt);
- }
- else
- {
+ } else {
/* When turning off, there is no rule on the programming
* sequence since whenever the clock is off, then it does not
* matter whether the plane is enabled or disabled.
@@ -71,14 +66,11 @@ static void setDisplayControl(int ctrl, int dispState)
POKE32(PANEL_DISPLAY_CTRL, ulDisplayCtrlReg);
}
- }
- /* Set the secondary display control */
- else
- {
+ } else {
+ /* Set the secondary display control */
ulDisplayCtrlReg = PEEK32(CRT_DISPLAY_CTRL);
- if (dispState)
- {
+ if (dispState) {
/* Timing should be enabled first before enabling the plane because changing at the
same time does not guarantee that the plane will also enabled or disabled.
*/
@@ -100,16 +92,13 @@ static void setDisplayControl(int ctrl, int dispState)
FIELD_SET(0, CRT_DISPLAY_CTRL, RESERVED_3_MASK, ENABLE) |
FIELD_SET(0, CRT_DISPLAY_CTRL, RESERVED_4_MASK, ENABLE);
- do
- {
+ do {
cnt++;
POKE32(CRT_DISPLAY_CTRL, ulDisplayCtrlReg);
- } while((PEEK32(CRT_DISPLAY_CTRL) & ~ulReservedBits) !=
+ } while ((PEEK32(CRT_DISPLAY_CTRL) & ~ulReservedBits) !=
(ulDisplayCtrlReg & ~ulReservedBits));
printk("Set Crt Plane enbit:after tried %d times\n", cnt);
- }
- else
- {
+ } else {
/* When turning off, there is no rule on the programming
* sequence since whenever the clock is off, then it does not
* matter whether the plane is enabled or disabled.
@@ -132,71 +121,60 @@ static void setDisplayControl(int ctrl, int dispState)
static void waitNextVerticalSync(int ctrl, int delay)
{
unsigned int status;
- if(!ctrl){
+
+ if (!ctrl) {
/* primary controller */
- /* Do not wait when the Primary PLL is off or display control is already off.
- This will prevent the software to wait forever. */
+ /* Do not wait when the Primary PLL is off or display control is already off.
+ This will prevent the software to wait forever. */
if ((FIELD_GET(PEEK32(PANEL_PLL_CTRL), PANEL_PLL_CTRL, POWER) ==
PANEL_PLL_CTRL_POWER_OFF) ||
(FIELD_GET(PEEK32(PANEL_DISPLAY_CTRL), PANEL_DISPLAY_CTRL, TIMING) ==
- PANEL_DISPLAY_CTRL_TIMING_DISABLE))
- {
+ PANEL_DISPLAY_CTRL_TIMING_DISABLE)) {
return;
}
- while (delay-- > 0)
- {
- /* Wait for end of vsync. */
- do
- {
- status = FIELD_GET(PEEK32(SYSTEM_CTRL),
- SYSTEM_CTRL,
- PANEL_VSYNC);
- }
- while (status == SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
-
- /* Wait for start of vsync. */
- do
- {
- status = FIELD_GET(PEEK32(SYSTEM_CTRL),
- SYSTEM_CTRL,
- PANEL_VSYNC);
- }
- while (status == SYSTEM_CTRL_PANEL_VSYNC_INACTIVE);
- }
-
- }else{
+ while (delay-- > 0) {
+ /* Wait for end of vsync. */
+ do {
+ status = FIELD_GET(PEEK32(SYSTEM_CTRL),
+ SYSTEM_CTRL,
+ PANEL_VSYNC);
+ } while (status == SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
+
+ /* Wait for start of vsync. */
+ do {
+ status = FIELD_GET(PEEK32(SYSTEM_CTRL),
+ SYSTEM_CTRL,
+ PANEL_VSYNC);
+ } while (status == SYSTEM_CTRL_PANEL_VSYNC_INACTIVE);
+ }
+
+ } else {
/* Do not wait when the Primary PLL is off or display control is already off.
This will prevent the software to wait forever. */
if ((FIELD_GET(PEEK32(CRT_PLL_CTRL), CRT_PLL_CTRL, POWER) ==
CRT_PLL_CTRL_POWER_OFF) ||
(FIELD_GET(PEEK32(CRT_DISPLAY_CTRL), CRT_DISPLAY_CTRL, TIMING) ==
- CRT_DISPLAY_CTRL_TIMING_DISABLE))
- {
+ CRT_DISPLAY_CTRL_TIMING_DISABLE)) {
return;
}
- while (delay-- > 0)
- {
+ while (delay-- > 0) {
/* Wait for end of vsync. */
- do
- {
+ do {
status = FIELD_GET(PEEK32(SYSTEM_CTRL),
SYSTEM_CTRL,
CRT_VSYNC);
- }
- while (status == SYSTEM_CTRL_CRT_VSYNC_ACTIVE);
+ } while (status == SYSTEM_CTRL_CRT_VSYNC_ACTIVE);
/* Wait for start of vsync. */
- do
- {
+ do {
status = FIELD_GET(PEEK32(SYSTEM_CTRL),
SYSTEM_CTRL,
CRT_VSYNC);
- }
- while (status == SYSTEM_CTRL_CRT_VSYNC_INACTIVE);
+ } while (status == SYSTEM_CTRL_CRT_VSYNC_INACTIVE);
}
}
}
@@ -233,14 +211,15 @@ static void swPanelPowerSequence(int disp, int delay)
void ddk750_setLogicalDispOut(disp_output_t output)
{
unsigned int reg;
- if(output & PNL_2_USAGE){
+
+ if (output & PNL_2_USAGE) {
/* set panel path controller select */
reg = PEEK32(PANEL_DISPLAY_CTRL);
reg = FIELD_VALUE(reg, PANEL_DISPLAY_CTRL, SELECT, (output & PNL_2_MASK)>>PNL_2_OFFSET);
POKE32(PANEL_DISPLAY_CTRL, reg);
}
- if(output & CRT_2_USAGE){
+ if (output & CRT_2_USAGE) {
/* set crt path controller select */
reg = PEEK32(CRT_DISPLAY_CTRL);
reg = FIELD_VALUE(reg, CRT_DISPLAY_CTRL, SELECT, (output & CRT_2_MASK)>>CRT_2_OFFSET);
@@ -250,58 +229,57 @@ void ddk750_setLogicalDispOut(disp_output_t output)
}
- if(output & PRI_TP_USAGE){
+ if (output & PRI_TP_USAGE) {
/* set primary timing and plane en_bit */
setDisplayControl(0, (output&PRI_TP_MASK)>>PRI_TP_OFFSET);
}
- if(output & SEC_TP_USAGE){
+ if (output & SEC_TP_USAGE) {
/* set secondary timing and plane en_bit*/
setDisplayControl(1, (output&SEC_TP_MASK)>>SEC_TP_OFFSET);
}
- if(output & PNL_SEQ_USAGE){
+ if (output & PNL_SEQ_USAGE) {
/* set panel sequence */
swPanelPowerSequence((output&PNL_SEQ_MASK)>>PNL_SEQ_OFFSET, 4);
}
- if(output & DAC_USAGE)
+ if (output & DAC_USAGE)
setDAC((output & DAC_MASK)>>DAC_OFFSET);
- if(output & DPMS_USAGE)
+ if (output & DPMS_USAGE)
ddk750_setDPMS((output & DPMS_MASK) >> DPMS_OFFSET);
}
int ddk750_initDVIDisp(void)
{
- /* Initialize DVI. If the dviInit fail and the VendorID or the DeviceID are
- not zeroed, then set the failure flag. If it is zeroe, it might mean
- that the system is in Dual CRT Monitor configuration. */
-
- /* De-skew enabled with default 111b value.
- This will fix some artifacts problem in some mode on board 2.2.
- Somehow this fix does not affect board 2.1.
- */
- if ((dviInit(1, /* Select Rising Edge */
- 1, /* Select 24-bit bus */
- 0, /* Select Single Edge clock */
- 1, /* Enable HSync as is */
- 1, /* Enable VSync as is */
- 1, /* Enable De-skew */
- 7, /* Set the de-skew setting to maximum setup */
- 1, /* Enable continuous Sync */
- 1, /* Enable PLL Filter */
- 4 /* Use the recommended value for PLL Filter value */
- ) != 0) && (dviGetVendorID() != 0x0000) && (dviGetDeviceID() != 0x0000))
- {
- return (-1);
- }
-
- /* TODO: Initialize other display component */
-
- /* Success */
- return 0;
+ /* Initialize DVI. If the dviInit fail and the VendorID or the DeviceID are
+ not zeroed, then set the failure flag. If it is zeroe, it might mean
+ that the system is in Dual CRT Monitor configuration. */
+
+ /* De-skew enabled with default 111b value.
+ This will fix some artifacts problem in some mode on board 2.2.
+ Somehow this fix does not affect board 2.1.
+ */
+ if ((dviInit(1, /* Select Rising Edge */
+ 1, /* Select 24-bit bus */
+ 0, /* Select Single Edge clock */
+ 1, /* Enable HSync as is */
+ 1, /* Enable VSync as is */
+ 1, /* Enable De-skew */
+ 7, /* Set the de-skew setting to maximum setup */
+ 1, /* Enable continuous Sync */
+ 1, /* Enable PLL Filter */
+ 4 /* Use the recommended value for PLL Filter value */
+ ) != 0) && (dviGetVendorID() != 0x0000) && (dviGetDeviceID() != 0x0000)) {
+ return (-1);
+ }
+
+ /* TODO: Initialize other display component */
+
+ /* Success */
+ return 0;
}
diff --git a/drivers/staging/sm750fb/ddk750_display.h b/drivers/staging/sm750fb/ddk750_display.h
index ae0f84c68de5..abccf84a8c9a 100644
--- a/drivers/staging/sm750fb/ddk750_display.h
+++ b/drivers/staging/sm750fb/ddk750_display.h
@@ -8,7 +8,7 @@
#define PNL_2_OFFSET 0
#define PNL_2_MASK (3 << PNL_2_OFFSET)
#define PNL_2_USAGE (PNL_2_MASK << 16)
-#define PNL_2_PRI ((0 << PNL_2_OFFSET)|PNL_2_USAGE)
+#define PNL_2_PRI ((0 << PNL_2_OFFSET)|PNL_2_USAGE)
#define PNL_2_SEC ((2 << PNL_2_OFFSET)|PNL_2_USAGE)
@@ -46,7 +46,7 @@
0: both off
*/
#define SEC_TP_OFFSET 5
-#define SEC_TP_MASK (1<< SEC_TP_OFFSET)
+#define SEC_TP_MASK (1 << SEC_TP_OFFSET)
#define SEC_TP_USAGE (SEC_TP_MASK << 16)
#define SEC_TP_ON ((0x1 << SEC_TP_OFFSET)|SEC_TP_USAGE)
#define SEC_TP_OFF ((0x0 << SEC_TP_OFFSET)|SEC_TP_USAGE)
@@ -67,7 +67,7 @@
#define DAC_OFFSET 7
#define DAC_MASK (1 << DAC_OFFSET)
#define DAC_USAGE (DAC_MASK << 16)
-#define DAC_ON ((0x0<< DAC_OFFSET)|DAC_USAGE)
+#define DAC_ON ((0x0 << DAC_OFFSET)|DAC_USAGE)
#define DAC_OFF ((0x1 << DAC_OFFSET)|DAC_USAGE)
/* DPMS only affect D-SUB head
@@ -86,8 +86,7 @@
CRT means crt path DSUB
*/
#if 0
-typedef enum _disp_output_t
-{
+typedef enum _disp_output_t {
NO_DISPLAY = DPMS_OFF,
LCD1_PRI = PNL_2_PRI|PRI_TP_ON|PNL_SEQ_ON|DPMS_OFF|DAC_ON,
@@ -129,7 +128,7 @@ typedef enum _disp_output_t
}
disp_output_t;
#else
-typedef enum _disp_output_t{
+typedef enum _disp_output_t {
do_LCD1_PRI = PNL_2_PRI|PRI_TP_ON|PNL_SEQ_ON|DAC_ON,
do_LCD1_SEC = PNL_2_SEC|SEC_TP_ON|PNL_SEQ_ON|DAC_ON,
#if 0
diff --git a/drivers/staging/sm750fb/ddk750_dvi.c b/drivers/staging/sm750fb/ddk750_dvi.c
index b2bf7e66d5cb..a7a23514ac39 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.c
+++ b/drivers/staging/sm750fb/ddk750_dvi.c
@@ -1,4 +1,4 @@
-#define USE_DVICHIP
+#define USE_DVICHIP
#ifdef USE_DVICHIP
#include "ddk750_help.h"
#include "ddk750_reg.h"
@@ -9,47 +9,46 @@
/* This global variable contains all the supported driver and its corresponding
function API. Please set the function pointer to NULL whenever the function
is not supported. */
-static dvi_ctrl_device_t g_dcftSupportedDviController[] =
-{
+static dvi_ctrl_device_t g_dcftSupportedDviController[] = {
#ifdef DVI_CTRL_SII164
- {
- .pfnInit = sii164InitChip,
- .pfnGetVendorId = sii164GetVendorID,
- .pfnGetDeviceId = sii164GetDeviceID,
+ {
+ .pfnInit = sii164InitChip,
+ .pfnGetVendorId = sii164GetVendorID,
+ .pfnGetDeviceId = sii164GetDeviceID,
#ifdef SII164_FULL_FUNCTIONS
- .pfnResetChip = sii164ResetChip,
- .pfnGetChipString = sii164GetChipString,
- .pfnSetPower = sii164SetPower,
- .pfnEnableHotPlugDetection = sii164EnableHotPlugDetection,
- .pfnIsConnected = sii164IsConnected,
- .pfnCheckInterrupt = sii164CheckInterrupt,
- .pfnClearInterrupt = sii164ClearInterrupt,
+ .pfnResetChip = sii164ResetChip,
+ .pfnGetChipString = sii164GetChipString,
+ .pfnSetPower = sii164SetPower,
+ .pfnEnableHotPlugDetection = sii164EnableHotPlugDetection,
+ .pfnIsConnected = sii164IsConnected,
+ .pfnCheckInterrupt = sii164CheckInterrupt,
+ .pfnClearInterrupt = sii164ClearInterrupt,
#endif
- },
+ },
#endif
};
int dviInit(
- unsigned char edgeSelect,
- unsigned char busSelect,
- unsigned char dualEdgeClkSelect,
- unsigned char hsyncEnable,
- unsigned char vsyncEnable,
- unsigned char deskewEnable,
- unsigned char deskewSetting,
- unsigned char continuousSyncEnable,
- unsigned char pllFilterEnable,
- unsigned char pllFilterValue
+ unsigned char edgeSelect,
+ unsigned char busSelect,
+ unsigned char dualEdgeClkSelect,
+ unsigned char hsyncEnable,
+ unsigned char vsyncEnable,
+ unsigned char deskewEnable,
+ unsigned char deskewSetting,
+ unsigned char continuousSyncEnable,
+ unsigned char pllFilterEnable,
+ unsigned char pllFilterValue
)
{
dvi_ctrl_device_t *pCurrentDviCtrl;
+
pCurrentDviCtrl = g_dcftSupportedDviController;
- if(pCurrentDviCtrl->pfnInit != NULL)
- {
+ if (pCurrentDviCtrl->pfnInit != NULL) {
return pCurrentDviCtrl->pfnInit(edgeSelect, busSelect, dualEdgeClkSelect, hsyncEnable,
- vsyncEnable, deskewEnable, deskewSetting, continuousSyncEnable,
- pllFilterEnable, pllFilterValue);
+ vsyncEnable, deskewEnable, deskewSetting, continuousSyncEnable,
+ pllFilterEnable, pllFilterValue);
}
return -1; /* error */
}
@@ -64,13 +63,13 @@ int dviInit(
*/
unsigned short dviGetVendorID(void)
{
- dvi_ctrl_device_t *pCurrentDviCtrl;
+ dvi_ctrl_device_t *pCurrentDviCtrl;
- pCurrentDviCtrl = g_dcftSupportedDviController;
- if (pCurrentDviCtrl != (dvi_ctrl_device_t *)0)
- return pCurrentDviCtrl->pfnGetVendorId();
+ pCurrentDviCtrl = g_dcftSupportedDviController;
+ if (pCurrentDviCtrl != (dvi_ctrl_device_t *)0)
+ return pCurrentDviCtrl->pfnGetVendorId();
- return 0x0000;
+ return 0x0000;
}
@@ -83,13 +82,13 @@ unsigned short dviGetVendorID(void)
*/
unsigned short dviGetDeviceID(void)
{
- dvi_ctrl_device_t *pCurrentDviCtrl;
+ dvi_ctrl_device_t *pCurrentDviCtrl;
pCurrentDviCtrl = g_dcftSupportedDviController;
- if (pCurrentDviCtrl != (dvi_ctrl_device_t *)0)
- return pCurrentDviCtrl->pfnGetDeviceId();
+ if (pCurrentDviCtrl != (dvi_ctrl_device_t *)0)
+ return pCurrentDviCtrl->pfnGetDeviceId();
- return 0x0000;
+ return 0x0000;
}
#endif
diff --git a/drivers/staging/sm750fb/ddk750_dvi.h b/drivers/staging/sm750fb/ddk750_dvi.h
index 83bbd6d62061..e1d4c9a2d50a 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.h
+++ b/drivers/staging/sm750fb/ddk750_dvi.h
@@ -26,8 +26,7 @@ typedef unsigned char (*PFN_DVICTRL_CHECKINTERRUPT)(void);
typedef void (*PFN_DVICTRL_CLEARINTERRUPT)(void);
/* Structure to hold all the function pointer to the DVI Controller. */
-typedef struct _dvi_ctrl_device_t
-{
+typedef struct _dvi_ctrl_device_t {
PFN_DVICTRL_INIT pfnInit;
PFN_DVICTRL_RESETCHIP pfnResetChip;
PFN_DVICTRL_GETCHIPSTRING pfnGetChipString;
diff --git a/drivers/staging/sm750fb/ddk750_help.c b/drivers/staging/sm750fb/ddk750_help.c
index 1adcafcc5133..9637dd30d037 100644
--- a/drivers/staging/sm750fb/ddk750_help.c
+++ b/drivers/staging/sm750fb/ddk750_help.c
@@ -1,8 +1,8 @@
#include "ddk750_help.h"
-void __iomem *mmio750 = NULL;
-char revId750 = 0;
-unsigned short devId750 = 0;
+void __iomem *mmio750;
+char revId750;
+unsigned short devId750;
/* after driver mapped io registers, use this function first */
void ddk750_set_mmio(void __iomem *addr, unsigned short devId, char revId)
@@ -10,7 +10,7 @@ void ddk750_set_mmio(void __iomem *addr, unsigned short devId, char revId)
mmio750 = addr;
devId750 = devId;
revId750 = revId;
- if(revId == 0xfe)
+ if (revId == 0xfe)
printk("found sm750le\n");
}
diff --git a/drivers/staging/sm750fb/ddk750_help.h b/drivers/staging/sm750fb/ddk750_help.h
index 4285b056585a..3b06aed431bd 100644
--- a/drivers/staging/sm750fb/ddk750_help.h
+++ b/drivers/staging/sm750fb/ddk750_help.h
@@ -12,8 +12,8 @@
#if 0
/* if 718 big endian turned on,be aware that don't use this driver for general use,only for ppc big-endian */
#warning "big endian on target cpu and enable nature big endian support of 718 capability !"
-#define PEEK32(addr) __raw_readl(mmio750 + addr)
-#define POKE32(addr, data) __raw_writel(data, mmio750 + addr)
+#define PEEK32(addr) __raw_readl(mmio750 + addr)
+#define POKE32(addr, data) __raw_writel(data, mmio750 + addr)
#else /* software control endianness */
#define PEEK32(addr) readl(addr + mmio750)
#define POKE32(addr, data) writel(data, addr + mmio750)
diff --git a/drivers/staging/sm750fb/ddk750_hwi2c.c b/drivers/staging/sm750fb/ddk750_hwi2c.c
index 7826376ed705..5ddac430aea2 100644
--- a/drivers/staging/sm750fb/ddk750_hwi2c.c
+++ b/drivers/staging/sm750fb/ddk750_hwi2c.c
@@ -10,70 +10,70 @@
int hwI2CInit(
- unsigned char busSpeedMode
+unsigned char busSpeedMode
)
{
- unsigned int value;
+ unsigned int value;
- /* Enable GPIO 30 & 31 as IIC clock & data */
+ /* Enable GPIO 30 & 31 as IIC clock & data */
value = PEEK32(GPIO_MUX);
- value = FIELD_SET(value, GPIO_MUX, 30, I2C) |
- FIELD_SET(0, GPIO_MUX, 31, I2C);
+ value = FIELD_SET(value, GPIO_MUX, 30, I2C) |
+ FIELD_SET(0, GPIO_MUX, 31, I2C);
POKE32(GPIO_MUX, value);
- /* Enable Hardware I2C power.
- TODO: Check if we need to enable GPIO power?
- */
- enableI2C(1);
-
- /* Enable the I2C Controller and set the bus speed mode */
- value = PEEK32(I2C_CTRL);
- if (busSpeedMode == 0)
- value = FIELD_SET(value, I2C_CTRL, MODE, STANDARD);
- else
- value = FIELD_SET(value, I2C_CTRL, MODE, FAST);
- value = FIELD_SET(value, I2C_CTRL, EN, ENABLE);
- POKE32(I2C_CTRL, value);
-
- return 0;
+ /* Enable Hardware I2C power.
+ TODO: Check if we need to enable GPIO power?
+ */
+ enableI2C(1);
+
+ /* Enable the I2C Controller and set the bus speed mode */
+ value = PEEK32(I2C_CTRL);
+ if (busSpeedMode == 0)
+ value = FIELD_SET(value, I2C_CTRL, MODE, STANDARD);
+ else
+ value = FIELD_SET(value, I2C_CTRL, MODE, FAST);
+ value = FIELD_SET(value, I2C_CTRL, EN, ENABLE);
+ POKE32(I2C_CTRL, value);
+
+ return 0;
}
void hwI2CClose(void)
{
- unsigned int value;
+ unsigned int value;
- /* Disable I2C controller */
- value = PEEK32(I2C_CTRL);
- value = FIELD_SET(value, I2C_CTRL, EN, DISABLE);
- POKE32(I2C_CTRL, value);
+ /* Disable I2C controller */
+ value = PEEK32(I2C_CTRL);
+ value = FIELD_SET(value, I2C_CTRL, EN, DISABLE);
+ POKE32(I2C_CTRL, value);
- /* Disable I2C Power */
- enableI2C(0);
+ /* Disable I2C Power */
+ enableI2C(0);
- /* Set GPIO 30 & 31 back as GPIO pins */
- value = PEEK32(GPIO_MUX);
- value = FIELD_SET(value, GPIO_MUX, 30, GPIO);
- value = FIELD_SET(value, GPIO_MUX, 31, GPIO);
- POKE32(GPIO_MUX, value);
+ /* Set GPIO 30 & 31 back as GPIO pins */
+ value = PEEK32(GPIO_MUX);
+ value = FIELD_SET(value, GPIO_MUX, 30, GPIO);
+ value = FIELD_SET(value, GPIO_MUX, 31, GPIO);
+ POKE32(GPIO_MUX, value);
}
static long hwI2CWaitTXDone(void)
{
- unsigned int timeout;
+ unsigned int timeout;
- /* Wait until the transfer is completed. */
- timeout = HWI2C_WAIT_TIMEOUT;
+ /* Wait until the transfer is completed. */
+ timeout = HWI2C_WAIT_TIMEOUT;
while ((FIELD_GET(PEEK32(I2C_STATUS), I2C_STATUS, TX) != I2C_STATUS_TX_COMPLETED) &&
- (timeout != 0))
+ (timeout != 0))
timeout--;
if (timeout == 0)
- return (-1);
+ return (-1);
- return 0;
+ return 0;
}
@@ -91,53 +91,52 @@ static long hwI2CWaitTXDone(void)
* Total number of bytes those are actually written.
*/
static unsigned int hwI2CWriteData(
- unsigned char deviceAddress,
- unsigned int length,
- unsigned char *pBuffer
+ unsigned char deviceAddress,
+ unsigned int length,
+ unsigned char *pBuffer
)
{
- unsigned char count, i;
- unsigned int totalBytes = 0;
+ unsigned char count, i;
+ unsigned int totalBytes = 0;
- /* Set the Device Address */
- POKE32(I2C_SLAVE_ADDRESS, deviceAddress & ~0x01);
+ /* Set the Device Address */
+ POKE32(I2C_SLAVE_ADDRESS, deviceAddress & ~0x01);
- /* Write data.
- * Note:
- * Only 16 byte can be accessed per i2c start instruction.
- */
- do
- {
- /* Reset I2C by writing 0 to I2C_RESET register to clear the previous status. */
- POKE32(I2C_RESET, 0);
+ /* Write data.
+ * Note:
+ * Only 16 byte can be accessed per i2c start instruction.
+ */
+ do {
+ /* Reset I2C by writing 0 to I2C_RESET register to clear the previous status. */
+ POKE32(I2C_RESET, 0);
- /* Set the number of bytes to be written */
- if (length < MAX_HWI2C_FIFO)
- count = length - 1;
- else
- count = MAX_HWI2C_FIFO - 1;
- POKE32(I2C_BYTE_COUNT, count);
+ /* Set the number of bytes to be written */
+ if (length < MAX_HWI2C_FIFO)
+ count = length - 1;
+ else
+ count = MAX_HWI2C_FIFO - 1;
+ POKE32(I2C_BYTE_COUNT, count);
- /* Move the data to the I2C data register */
- for (i = 0; i <= count; i++)
- POKE32(I2C_DATA0 + i, *pBuffer++);
+ /* Move the data to the I2C data register */
+ for (i = 0; i <= count; i++)
+ POKE32(I2C_DATA0 + i, *pBuffer++);
- /* Start the I2C */
- POKE32(I2C_CTRL, FIELD_SET(PEEK32(I2C_CTRL), I2C_CTRL, CTRL, START));
+ /* Start the I2C */
+ POKE32(I2C_CTRL, FIELD_SET(PEEK32(I2C_CTRL), I2C_CTRL, CTRL, START));
- /* Wait until the transfer is completed. */
- if (hwI2CWaitTXDone() != 0)
- break;
+ /* Wait until the transfer is completed. */
+ if (hwI2CWaitTXDone() != 0)
+ break;
- /* Substract length */
- length -= (count + 1);
+ /* Substract length */
+ length -= (count + 1);
- /* Total byte written */
- totalBytes += (count + 1);
+ /* Total byte written */
+ totalBytes += (count + 1);
- } while (length > 0);
+ } while (length > 0);
- return totalBytes;
+ return totalBytes;
}
@@ -158,53 +157,52 @@ static unsigned int hwI2CWriteData(
* Total number of actual bytes read from the slave device
*/
static unsigned int hwI2CReadData(
- unsigned char deviceAddress,
- unsigned int length,
- unsigned char *pBuffer
+ unsigned char deviceAddress,
+ unsigned int length,
+ unsigned char *pBuffer
)
{
- unsigned char count, i;
- unsigned int totalBytes = 0;
+ unsigned char count, i;
+ unsigned int totalBytes = 0;
- /* Set the Device Address */
- POKE32(I2C_SLAVE_ADDRESS, deviceAddress | 0x01);
+ /* Set the Device Address */
+ POKE32(I2C_SLAVE_ADDRESS, deviceAddress | 0x01);
- /* Read data and save them to the buffer.
- * Note:
- * Only 16 byte can be accessed per i2c start instruction.
- */
- do
- {
- /* Reset I2C by writing 0 to I2C_RESET register to clear all the status. */
- POKE32(I2C_RESET, 0);
+ /* Read data and save them to the buffer.
+ * Note:
+ * Only 16 byte can be accessed per i2c start instruction.
+ */
+ do {
+ /* Reset I2C by writing 0 to I2C_RESET register to clear all the status. */
+ POKE32(I2C_RESET, 0);
- /* Set the number of bytes to be read */
- if (length <= MAX_HWI2C_FIFO)
- count = length - 1;
- else
- count = MAX_HWI2C_FIFO - 1;
- POKE32(I2C_BYTE_COUNT, count);
+ /* Set the number of bytes to be read */
+ if (length <= MAX_HWI2C_FIFO)
+ count = length - 1;
+ else
+ count = MAX_HWI2C_FIFO - 1;
+ POKE32(I2C_BYTE_COUNT, count);
- /* Start the I2C */
- POKE32(I2C_CTRL, FIELD_SET(PEEK32(I2C_CTRL), I2C_CTRL, CTRL, START));
+ /* Start the I2C */
+ POKE32(I2C_CTRL, FIELD_SET(PEEK32(I2C_CTRL), I2C_CTRL, CTRL, START));
- /* Wait until transaction done. */
- if (hwI2CWaitTXDone() != 0)
- break;
+ /* Wait until transaction done. */
+ if (hwI2CWaitTXDone() != 0)
+ break;
- /* Save the data to the given buffer */
- for (i = 0; i <= count; i++)
- *pBuffer++ = PEEK32(I2C_DATA0 + i);
+ /* Save the data to the given buffer */
+ for (i = 0; i <= count; i++)
+ *pBuffer++ = PEEK32(I2C_DATA0 + i);
- /* Substract length by 16 */
- length -= (count + 1);
+ /* Substract length by 16 */
+ length -= (count + 1);
- /* Number of bytes read. */
- totalBytes += (count + 1);
+ /* Number of bytes read. */
+ totalBytes += (count + 1);
- } while (length > 0);
+ } while (length > 0);
- return totalBytes;
+ return totalBytes;
}
@@ -222,16 +220,16 @@ static unsigned int hwI2CReadData(
* Register value
*/
unsigned char hwI2CReadReg(
- unsigned char deviceAddress,
- unsigned char registerIndex
+ unsigned char deviceAddress,
+ unsigned char registerIndex
)
{
- unsigned char value = (0xFF);
+ unsigned char value = (0xFF);
- if (hwI2CWriteData(deviceAddress, 1, &registerIndex) == 1)
- hwI2CReadData(deviceAddress, 1, &value);
+ if (hwI2CWriteData(deviceAddress, 1, &registerIndex) == 1)
+ hwI2CReadData(deviceAddress, 1, &value);
- return value;
+ return value;
}
@@ -252,19 +250,19 @@ unsigned char hwI2CReadReg(
* -1 - Fail
*/
int hwI2CWriteReg(
- unsigned char deviceAddress,
- unsigned char registerIndex,
- unsigned char data
+ unsigned char deviceAddress,
+ unsigned char registerIndex,
+ unsigned char data
)
{
- unsigned char value[2];
+ unsigned char value[2];
- value[0] = registerIndex;
- value[1] = data;
- if (hwI2CWriteData(deviceAddress, 2, value) == 2)
- return 0;
+ value[0] = registerIndex;
+ value[1] = data;
+ if (hwI2CWriteData(deviceAddress, 2, value) == 2)
+ return 0;
- return (-1);
+ return (-1);
}
diff --git a/drivers/staging/sm750fb/ddk750_mode.c b/drivers/staging/sm750fb/ddk750_mode.c
index 74313ff84e45..2399b175ade0 100644
--- a/drivers/staging/sm750fb/ddk750_mode.c
+++ b/drivers/staging/sm750fb/ddk750_mode.c
@@ -20,54 +20,54 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
x = pModeParam->horizontal_display_end;
y = pModeParam->vertical_display_end;
- /* SM750LE has to set up the top-left and bottom-right
- registers as well.
- Note that normal SM750/SM718 only use those two register for
- auto-centering mode.
- */
- POKE32(CRT_AUTO_CENTERING_TL,
- FIELD_VALUE(0, CRT_AUTO_CENTERING_TL, TOP, 0)
- | FIELD_VALUE(0, CRT_AUTO_CENTERING_TL, LEFT, 0));
-
- POKE32(CRT_AUTO_CENTERING_BR,
- FIELD_VALUE(0, CRT_AUTO_CENTERING_BR, BOTTOM, y-1)
- | FIELD_VALUE(0, CRT_AUTO_CENTERING_BR, RIGHT, x-1));
-
- /* Assume common fields in dispControl have been properly set before
- calling this function.
- This function only sets the extra fields in dispControl.
- */
+ /* SM750LE has to set up the top-left and bottom-right
+ registers as well.
+ Note that normal SM750/SM718 only use those two register for
+ auto-centering mode.
+ */
+ POKE32(CRT_AUTO_CENTERING_TL,
+ FIELD_VALUE(0, CRT_AUTO_CENTERING_TL, TOP, 0)
+ | FIELD_VALUE(0, CRT_AUTO_CENTERING_TL, LEFT, 0));
+
+ POKE32(CRT_AUTO_CENTERING_BR,
+ FIELD_VALUE(0, CRT_AUTO_CENTERING_BR, BOTTOM, y-1)
+ | FIELD_VALUE(0, CRT_AUTO_CENTERING_BR, RIGHT, x-1));
+
+ /* Assume common fields in dispControl have been properly set before
+ calling this function.
+ This function only sets the extra fields in dispControl.
+ */
/* Clear bit 29:27 of display control register */
- dispControl &= FIELD_CLEAR(CRT_DISPLAY_CTRL, CLK);
+ dispControl &= FIELD_CLEAR(CRT_DISPLAY_CTRL, CLK);
/* Set bit 29:27 of display control register for the right clock */
/* Note that SM750LE only need to supported 7 resoluitons. */
- if ( x == 800 && y == 600 )
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL41);
+ if (x == 800 && y == 600)
+ dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL41);
else if (x == 1024 && y == 768)
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL65);
+ dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL65);
else if (x == 1152 && y == 864)
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL80);
+ dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL80);
else if (x == 1280 && y == 768)
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL80);
+ dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL80);
else if (x == 1280 && y == 720)
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL74);
+ dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL74);
else if (x == 1280 && y == 960)
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL108);
+ dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL108);
else if (x == 1280 && y == 1024)
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL108);
+ dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL108);
else /* default to VGA clock */
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL25);
+ dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL25);
/* Set bit 25:24 of display controller */
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CRTSELECT, CRT);
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, RGBBIT, 24BIT);
+ dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CRTSELECT, CRT);
+ dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, RGBBIT, 24BIT);
- /* Set bit 14 of display controller */
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLOCK_PHASE, ACTIVE_LOW);
+ /* Set bit 14 of display controller */
+ dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLOCK_PHASE, ACTIVE_LOW);
- POKE32(CRT_DISPLAY_CTRL, dispControl);
+ POKE32(CRT_DISPLAY_CTRL, dispControl);
return dispControl;
}
@@ -80,25 +80,25 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
int ret = 0;
int cnt = 0;
unsigned int ulTmpValue, ulReg;
- if(pll->clockType == SECONDARY_PLL)
- {
+
+ if (pll->clockType == SECONDARY_PLL) {
/* programe secondary pixel clock */
POKE32(CRT_PLL_CTRL, formatPllReg(pll));
- POKE32(CRT_HORIZONTAL_TOTAL,
- FIELD_VALUE(0, CRT_HORIZONTAL_TOTAL, TOTAL, pModeParam->horizontal_total - 1)
- | FIELD_VALUE(0, CRT_HORIZONTAL_TOTAL, DISPLAY_END, pModeParam->horizontal_display_end - 1));
+ POKE32(CRT_HORIZONTAL_TOTAL,
+ FIELD_VALUE(0, CRT_HORIZONTAL_TOTAL, TOTAL, pModeParam->horizontal_total - 1)
+ | FIELD_VALUE(0, CRT_HORIZONTAL_TOTAL, DISPLAY_END, pModeParam->horizontal_display_end - 1));
- POKE32(CRT_HORIZONTAL_SYNC,
- FIELD_VALUE(0, CRT_HORIZONTAL_SYNC, WIDTH, pModeParam->horizontal_sync_width)
- | FIELD_VALUE(0, CRT_HORIZONTAL_SYNC, START, pModeParam->horizontal_sync_start - 1));
+ POKE32(CRT_HORIZONTAL_SYNC,
+ FIELD_VALUE(0, CRT_HORIZONTAL_SYNC, WIDTH, pModeParam->horizontal_sync_width)
+ | FIELD_VALUE(0, CRT_HORIZONTAL_SYNC, START, pModeParam->horizontal_sync_start - 1));
- POKE32(CRT_VERTICAL_TOTAL,
- FIELD_VALUE(0, CRT_VERTICAL_TOTAL, TOTAL, pModeParam->vertical_total - 1)
- | FIELD_VALUE(0, CRT_VERTICAL_TOTAL, DISPLAY_END, pModeParam->vertical_display_end - 1));
+ POKE32(CRT_VERTICAL_TOTAL,
+ FIELD_VALUE(0, CRT_VERTICAL_TOTAL, TOTAL, pModeParam->vertical_total - 1)
+ | FIELD_VALUE(0, CRT_VERTICAL_TOTAL, DISPLAY_END, pModeParam->vertical_display_end - 1));
- POKE32(CRT_VERTICAL_SYNC,
- FIELD_VALUE(0, CRT_VERTICAL_SYNC, HEIGHT, pModeParam->vertical_sync_height)
- | FIELD_VALUE(0, CRT_VERTICAL_SYNC, START, pModeParam->vertical_sync_start - 1));
+ POKE32(CRT_VERTICAL_SYNC,
+ FIELD_VALUE(0, CRT_VERTICAL_SYNC, HEIGHT, pModeParam->vertical_sync_height)
+ | FIELD_VALUE(0, CRT_VERTICAL_SYNC, START, pModeParam->vertical_sync_start - 1));
ulTmpValue = FIELD_VALUE(0, CRT_DISPLAY_CTRL, VSYNC_PHASE, pModeParam->vertical_sync_polarity)|
@@ -107,9 +107,9 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
FIELD_SET(0, CRT_DISPLAY_CTRL, PLANE, ENABLE);
- if(getChipType() == SM750LE){
+ if (getChipType() == SM750LE) {
displayControlAdjust_SM750LE(pModeParam, ulTmpValue);
- }else{
+ } else {
ulReg = PEEK32(CRT_DISPLAY_CTRL)
& FIELD_CLEAR(CRT_DISPLAY_CTRL, VSYNC_PHASE)
& FIELD_CLEAR(CRT_DISPLAY_CTRL, HSYNC_PHASE)
@@ -119,45 +119,44 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
POKE32(CRT_DISPLAY_CTRL, ulTmpValue|ulReg);
}
- }
- else if(pll->clockType == PRIMARY_PLL)
- {
+ } else if (pll->clockType == PRIMARY_PLL) {
unsigned int ulReservedBits;
+
POKE32(PANEL_PLL_CTRL, formatPllReg(pll));
- POKE32(PANEL_HORIZONTAL_TOTAL,
- FIELD_VALUE(0, PANEL_HORIZONTAL_TOTAL, TOTAL, pModeParam->horizontal_total - 1)
- | FIELD_VALUE(0, PANEL_HORIZONTAL_TOTAL, DISPLAY_END, pModeParam->horizontal_display_end - 1));
+ POKE32(PANEL_HORIZONTAL_TOTAL,
+ FIELD_VALUE(0, PANEL_HORIZONTAL_TOTAL, TOTAL, pModeParam->horizontal_total - 1)
+ | FIELD_VALUE(0, PANEL_HORIZONTAL_TOTAL, DISPLAY_END, pModeParam->horizontal_display_end - 1));
- POKE32(PANEL_HORIZONTAL_SYNC,
- FIELD_VALUE(0, PANEL_HORIZONTAL_SYNC, WIDTH, pModeParam->horizontal_sync_width)
- | FIELD_VALUE(0, PANEL_HORIZONTAL_SYNC, START, pModeParam->horizontal_sync_start - 1));
+ POKE32(PANEL_HORIZONTAL_SYNC,
+ FIELD_VALUE(0, PANEL_HORIZONTAL_SYNC, WIDTH, pModeParam->horizontal_sync_width)
+ | FIELD_VALUE(0, PANEL_HORIZONTAL_SYNC, START, pModeParam->horizontal_sync_start - 1));
- POKE32(PANEL_VERTICAL_TOTAL,
- FIELD_VALUE(0, PANEL_VERTICAL_TOTAL, TOTAL, pModeParam->vertical_total - 1)
- | FIELD_VALUE(0, PANEL_VERTICAL_TOTAL, DISPLAY_END, pModeParam->vertical_display_end - 1));
+ POKE32(PANEL_VERTICAL_TOTAL,
+ FIELD_VALUE(0, PANEL_VERTICAL_TOTAL, TOTAL, pModeParam->vertical_total - 1)
+ | FIELD_VALUE(0, PANEL_VERTICAL_TOTAL, DISPLAY_END, pModeParam->vertical_display_end - 1));
- POKE32(PANEL_VERTICAL_SYNC,
- FIELD_VALUE(0, PANEL_VERTICAL_SYNC, HEIGHT, pModeParam->vertical_sync_height)
- | FIELD_VALUE(0, PANEL_VERTICAL_SYNC, START, pModeParam->vertical_sync_start - 1));
+ POKE32(PANEL_VERTICAL_SYNC,
+ FIELD_VALUE(0, PANEL_VERTICAL_SYNC, HEIGHT, pModeParam->vertical_sync_height)
+ | FIELD_VALUE(0, PANEL_VERTICAL_SYNC, START, pModeParam->vertical_sync_start - 1));
ulTmpValue = FIELD_VALUE(0, PANEL_DISPLAY_CTRL, VSYNC_PHASE, pModeParam->vertical_sync_polarity)|
- FIELD_VALUE(0, PANEL_DISPLAY_CTRL, HSYNC_PHASE, pModeParam->horizontal_sync_polarity)|
- FIELD_VALUE(0, PANEL_DISPLAY_CTRL, CLOCK_PHASE, pModeParam->clock_phase_polarity)|
- FIELD_SET(0, PANEL_DISPLAY_CTRL, TIMING, ENABLE)|
- FIELD_SET(0, PANEL_DISPLAY_CTRL, PLANE, ENABLE);
+ FIELD_VALUE(0, PANEL_DISPLAY_CTRL, HSYNC_PHASE, pModeParam->horizontal_sync_polarity)|
+ FIELD_VALUE(0, PANEL_DISPLAY_CTRL, CLOCK_PHASE, pModeParam->clock_phase_polarity)|
+ FIELD_SET(0, PANEL_DISPLAY_CTRL, TIMING, ENABLE)|
+ FIELD_SET(0, PANEL_DISPLAY_CTRL, PLANE, ENABLE);
- ulReservedBits = FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_1_MASK, ENABLE) |
- FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_2_MASK, ENABLE) |
- FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_3_MASK, ENABLE)|
- FIELD_SET(0, PANEL_DISPLAY_CTRL, VSYNC, ACTIVE_LOW);
+ ulReservedBits = FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_1_MASK, ENABLE) |
+ FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_2_MASK, ENABLE) |
+ FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_3_MASK, ENABLE)|
+ FIELD_SET(0, PANEL_DISPLAY_CTRL, VSYNC, ACTIVE_LOW);
- ulReg = (PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits)
- & FIELD_CLEAR(PANEL_DISPLAY_CTRL, CLOCK_PHASE)
- & FIELD_CLEAR(PANEL_DISPLAY_CTRL, VSYNC_PHASE)
- & FIELD_CLEAR(PANEL_DISPLAY_CTRL, HSYNC_PHASE)
- & FIELD_CLEAR(PANEL_DISPLAY_CTRL, TIMING)
- & FIELD_CLEAR(PANEL_DISPLAY_CTRL, PLANE);
+ ulReg = (PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits)
+ & FIELD_CLEAR(PANEL_DISPLAY_CTRL, CLOCK_PHASE)
+ & FIELD_CLEAR(PANEL_DISPLAY_CTRL, VSYNC_PHASE)
+ & FIELD_CLEAR(PANEL_DISPLAY_CTRL, HSYNC_PHASE)
+ & FIELD_CLEAR(PANEL_DISPLAY_CTRL, TIMING)
+ & FIELD_CLEAR(PANEL_DISPLAY_CTRL, PLANE);
/* May a hardware bug or just my test chip (not confirmed).
@@ -170,16 +169,14 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
POKE32(PANEL_DISPLAY_CTRL, ulTmpValue|ulReg);
#if 1
- while((PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits) != (ulTmpValue|ulReg))
- {
+ while ((PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits) != (ulTmpValue|ulReg)) {
cnt++;
- if(cnt > 1000)
+ if (cnt > 1000)
break;
POKE32(PANEL_DISPLAY_CTRL, ulTmpValue|ulReg);
}
#endif
- }
- else{
+ } else {
ret = -1;
}
return ret;
@@ -189,11 +186,12 @@ int ddk750_setModeTiming(mode_parameter_t *parm, clock_type_t clock)
{
pll_value_t pll;
unsigned int uiActualPixelClk;
+
pll.inputFreq = DEFAULT_INPUT_CLOCK;
pll.clockType = clock;
uiActualPixelClk = calcPllValue(parm->pixel_clock, &pll);
- if(getChipType() == SM750LE){
+ if (getChipType() == SM750LE) {
/* set graphic mode via IO method */
outb_p(0x88, 0x3d4);
outb_p(0x06, 0x3d5);
diff --git a/drivers/staging/sm750fb/ddk750_mode.h b/drivers/staging/sm750fb/ddk750_mode.h
index 4e8fab3f17e4..e846dc2c3d5c 100644
--- a/drivers/staging/sm750fb/ddk750_mode.h
+++ b/drivers/staging/sm750fb/ddk750_mode.h
@@ -3,37 +3,35 @@
#include "ddk750_chip.h"
-typedef enum _spolarity_t
-{
- POS = 0, /* positive */
- NEG, /* negative */
+typedef enum _spolarity_t {
+ POS = 0, /* positive */
+ NEG, /* negative */
}
spolarity_t;
-typedef struct _mode_parameter_t
-{
- /* Horizontal timing. */
- unsigned long horizontal_total;
- unsigned long horizontal_display_end;
- unsigned long horizontal_sync_start;
- unsigned long horizontal_sync_width;
- spolarity_t horizontal_sync_polarity;
-
- /* Vertical timing. */
- unsigned long vertical_total;
- unsigned long vertical_display_end;
- unsigned long vertical_sync_start;
- unsigned long vertical_sync_height;
- spolarity_t vertical_sync_polarity;
-
- /* Refresh timing. */
- unsigned long pixel_clock;
- unsigned long horizontal_frequency;
- unsigned long vertical_frequency;
-
- /* Clock Phase. This clock phase only applies to Panel. */
- spolarity_t clock_phase_polarity;
+typedef struct _mode_parameter_t {
+ /* Horizontal timing. */
+ unsigned long horizontal_total;
+ unsigned long horizontal_display_end;
+ unsigned long horizontal_sync_start;
+ unsigned long horizontal_sync_width;
+ spolarity_t horizontal_sync_polarity;
+
+ /* Vertical timing. */
+ unsigned long vertical_total;
+ unsigned long vertical_display_end;
+ unsigned long vertical_sync_start;
+ unsigned long vertical_sync_height;
+ spolarity_t vertical_sync_polarity;
+
+ /* Refresh timing. */
+ unsigned long pixel_clock;
+ unsigned long horizontal_frequency;
+ unsigned long vertical_frequency;
+
+ /* Clock Phase. This clock phase only applies to Panel. */
+ spolarity_t clock_phase_polarity;
}
mode_parameter_t;
diff --git a/drivers/staging/sm750fb/ddk750_power.c b/drivers/staging/sm750fb/ddk750_power.c
index 1e5f398aed10..e580dab2b625 100644
--- a/drivers/staging/sm750fb/ddk750_power.c
+++ b/drivers/staging/sm750fb/ddk750_power.c
@@ -5,21 +5,23 @@
void ddk750_setDPMS(DPMS_t state)
{
unsigned int value;
- if(getChipType() == SM750LE){
+
+ if (getChipType() == SM750LE) {
value = PEEK32(CRT_DISPLAY_CTRL);
- POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(value, CRT_DISPLAY_CTRL, DPMS, state));
- }else{
+ POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(value, CRT_DISPLAY_CTRL,
+ DPMS, state));
+ } else {
value = PEEK32(SYSTEM_CTRL);
- value= FIELD_VALUE(value, SYSTEM_CTRL, DPMS, state);
+ value = FIELD_VALUE(value, SYSTEM_CTRL, DPMS, state);
POKE32(SYSTEM_CTRL, value);
}
}
unsigned int getPowerMode(void)
{
- if(getChipType() == SM750LE)
+ if (getChipType() == SM750LE)
return 0;
- return (FIELD_GET(PEEK32(POWER_MODE_CTRL), POWER_MODE_CTRL, MODE));
+ return FIELD_GET(PEEK32(POWER_MODE_CTRL), POWER_MODE_CTRL, MODE);
}
@@ -29,76 +31,74 @@ unsigned int getPowerMode(void)
*/
void setPowerMode(unsigned int powerMode)
{
- unsigned int control_value = 0;
+ unsigned int control_value = 0;
- control_value = PEEK32(POWER_MODE_CTRL);
+ control_value = PEEK32(POWER_MODE_CTRL);
- if(getChipType() == SM750LE)
+ if (getChipType() == SM750LE)
return;
- switch (powerMode)
- {
- case POWER_MODE_CTRL_MODE_MODE0:
- control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE, MODE0);
- break;
+ switch (powerMode) {
+ case POWER_MODE_CTRL_MODE_MODE0:
+ control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE,
+ MODE0);
+ break;
- case POWER_MODE_CTRL_MODE_MODE1:
- control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE, MODE1);
- break;
+ case POWER_MODE_CTRL_MODE_MODE1:
+ control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE,
+ MODE1);
+ break;
- case POWER_MODE_CTRL_MODE_SLEEP:
- control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE, SLEEP);
- break;
+ case POWER_MODE_CTRL_MODE_SLEEP:
+ control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE,
+ SLEEP);
+ break;
- default:
- break;
- }
+ default:
+ break;
+ }
- /* Set up other fields in Power Control Register */
- if (powerMode == POWER_MODE_CTRL_MODE_SLEEP)
- {
- control_value =
+ /* Set up other fields in Power Control Register */
+ if (powerMode == POWER_MODE_CTRL_MODE_SLEEP) {
+ control_value =
#ifdef VALIDATION_CHIP
- FIELD_SET( control_value, POWER_MODE_CTRL, 336CLK, OFF) |
+ FIELD_SET(control_value, POWER_MODE_CTRL, 336CLK, OFF) |
#endif
- FIELD_SET( control_value, POWER_MODE_CTRL, OSC_INPUT, OFF);
- }
- else
- {
- control_value =
+ FIELD_SET(control_value, POWER_MODE_CTRL, OSC_INPUT, OFF);
+ } else {
+ control_value =
#ifdef VALIDATION_CHIP
- FIELD_SET( control_value, POWER_MODE_CTRL, 336CLK, ON) |
+ FIELD_SET(control_value, POWER_MODE_CTRL, 336CLK, ON) |
#endif
- FIELD_SET( control_value, POWER_MODE_CTRL, OSC_INPUT, ON);
- }
+ FIELD_SET(control_value, POWER_MODE_CTRL, OSC_INPUT, ON);
+ }
- /* Program new power mode. */
- POKE32(POWER_MODE_CTRL, control_value);
+ /* Program new power mode. */
+ POKE32(POWER_MODE_CTRL, control_value);
}
void setCurrentGate(unsigned int gate)
{
- unsigned int gate_reg;
- unsigned int mode;
-
- /* Get current power mode. */
- mode = getPowerMode();
-
- switch (mode)
- {
- case POWER_MODE_CTRL_MODE_MODE0:
- gate_reg = MODE0_GATE;
- break;
-
- case POWER_MODE_CTRL_MODE_MODE1:
- gate_reg = MODE1_GATE;
- break;
-
- default:
- gate_reg = MODE0_GATE;
- break;
- }
- POKE32(gate_reg, gate);
+ unsigned int gate_reg;
+ unsigned int mode;
+
+ /* Get current power mode. */
+ mode = getPowerMode();
+
+ switch (mode) {
+ case POWER_MODE_CTRL_MODE_MODE0:
+ gate_reg = MODE0_GATE;
+ break;
+
+ case POWER_MODE_CTRL_MODE_MODE1:
+ gate_reg = MODE1_GATE;
+ break;
+
+ default:
+ gate_reg = MODE0_GATE;
+ break;
+ }
+ POKE32(gate_reg, gate);
}
@@ -108,21 +108,18 @@ void setCurrentGate(unsigned int gate)
*/
void enable2DEngine(unsigned int enable)
{
- uint32_t gate;
-
- gate = PEEK32(CURRENT_GATE);
- if (enable)
- {
- gate = FIELD_SET(gate, CURRENT_GATE, DE, ON);
- gate = FIELD_SET(gate, CURRENT_GATE, CSC, ON);
- }
- else
- {
- gate = FIELD_SET(gate, CURRENT_GATE, DE, OFF);
- gate = FIELD_SET(gate, CURRENT_GATE, CSC, OFF);
- }
-
- setCurrentGate(gate);
+ uint32_t gate;
+
+ gate = PEEK32(CURRENT_GATE);
+ if (enable) {
+ gate = FIELD_SET(gate, CURRENT_GATE, DE, ON);
+ gate = FIELD_SET(gate, CURRENT_GATE, CSC, ON);
+ } else {
+ gate = FIELD_SET(gate, CURRENT_GATE, DE, OFF);
+ gate = FIELD_SET(gate, CURRENT_GATE, CSC, OFF);
+ }
+
+ setCurrentGate(gate);
}
@@ -131,58 +128,56 @@ void enable2DEngine(unsigned int enable)
*/
void enableZVPort(unsigned int enable)
{
- uint32_t gate;
+ uint32_t gate;
- /* Enable ZV Port Gate */
- gate = PEEK32(CURRENT_GATE);
- if (enable)
- {
- gate = FIELD_SET(gate, CURRENT_GATE, ZVPORT, ON);
+ /* Enable ZV Port Gate */
+ gate = PEEK32(CURRENT_GATE);
+ if (enable) {
+ gate = FIELD_SET(gate, CURRENT_GATE, ZVPORT, ON);
#if 1
- /* Using Software I2C */
- gate = FIELD_SET(gate, CURRENT_GATE, GPIO, ON);
+ /* Using Software I2C */
+ gate = FIELD_SET(gate, CURRENT_GATE, GPIO, ON);
#else
- /* Using Hardware I2C */
- gate = FIELD_SET(gate, CURRENT_GATE, I2C, ON);
+ /* Using Hardware I2C */
+ gate = FIELD_SET(gate, CURRENT_GATE, I2C, ON);
#endif
- }
- else
- {
- /* Disable ZV Port Gate. There is no way to know whether the GPIO pins are being used
- or not. Therefore, do not disable the GPIO gate. */
- gate = FIELD_SET(gate, CURRENT_GATE, ZVPORT, OFF);
- }
-
- setCurrentGate(gate);
+ } else {
+ /* Disable ZV Port Gate. There is no way to know whether the
+ GPIO pins are being used or not. Therefore, do not disable the
+ GPIO gate. */
+ gate = FIELD_SET(gate, CURRENT_GATE, ZVPORT, OFF);
+ }
+
+ setCurrentGate(gate);
}
void enableSSP(unsigned int enable)
{
- uint32_t gate;
+ uint32_t gate;
- /* Enable SSP Gate */
- gate = PEEK32(CURRENT_GATE);
- if (enable)
- gate = FIELD_SET(gate, CURRENT_GATE, SSP, ON);
- else
- gate = FIELD_SET(gate, CURRENT_GATE, SSP, OFF);
+ /* Enable SSP Gate */
+ gate = PEEK32(CURRENT_GATE);
+ if (enable)
+ gate = FIELD_SET(gate, CURRENT_GATE, SSP, ON);
+ else
+ gate = FIELD_SET(gate, CURRENT_GATE, SSP, OFF);
- setCurrentGate(gate);
+ setCurrentGate(gate);
}
void enableDMA(unsigned int enable)
{
- uint32_t gate;
+ uint32_t gate;
- /* Enable DMA Gate */
- gate = PEEK32(CURRENT_GATE);
- if (enable)
- gate = FIELD_SET(gate, CURRENT_GATE, DMA, ON);
- else
- gate = FIELD_SET(gate, CURRENT_GATE, DMA, OFF);
+ /* Enable DMA Gate */
+ gate = PEEK32(CURRENT_GATE);
+ if (enable)
+ gate = FIELD_SET(gate, CURRENT_GATE, DMA, ON);
+ else
+ gate = FIELD_SET(gate, CURRENT_GATE, DMA, OFF);
- setCurrentGate(gate);
+ setCurrentGate(gate);
}
/*
@@ -190,16 +185,16 @@ void enableDMA(unsigned int enable)
*/
void enableGPIO(unsigned int enable)
{
- uint32_t gate;
+ uint32_t gate;
- /* Enable GPIO Gate */
- gate = PEEK32(CURRENT_GATE);
- if (enable)
- gate = FIELD_SET(gate, CURRENT_GATE, GPIO, ON);
- else
- gate = FIELD_SET(gate, CURRENT_GATE, GPIO, OFF);
+ /* Enable GPIO Gate */
+ gate = PEEK32(CURRENT_GATE);
+ if (enable)
+ gate = FIELD_SET(gate, CURRENT_GATE, GPIO, ON);
+ else
+ gate = FIELD_SET(gate, CURRENT_GATE, GPIO, OFF);
- setCurrentGate(gate);
+ setCurrentGate(gate);
}
/*
@@ -207,16 +202,16 @@ void enableGPIO(unsigned int enable)
*/
void enablePWM(unsigned int enable)
{
- uint32_t gate;
+ uint32_t gate;
- /* Enable PWM Gate */
- gate = PEEK32(CURRENT_GATE);
- if (enable)
- gate = FIELD_SET(gate, CURRENT_GATE, PWM, ON);
- else
- gate = FIELD_SET(gate, CURRENT_GATE, PWM, OFF);
+ /* Enable PWM Gate */
+ gate = PEEK32(CURRENT_GATE);
+ if (enable)
+ gate = FIELD_SET(gate, CURRENT_GATE, PWM, ON);
+ else
+ gate = FIELD_SET(gate, CURRENT_GATE, PWM, OFF);
- setCurrentGate(gate);
+ setCurrentGate(gate);
}
/*
@@ -224,16 +219,16 @@ void enablePWM(unsigned int enable)
*/
void enableI2C(unsigned int enable)
{
- uint32_t gate;
+ uint32_t gate;
- /* Enable I2C Gate */
- gate = PEEK32(CURRENT_GATE);
- if (enable)
- gate = FIELD_SET(gate, CURRENT_GATE, I2C, ON);
- else
- gate = FIELD_SET(gate, CURRENT_GATE, I2C, OFF);
+ /* Enable I2C Gate */
+ gate = PEEK32(CURRENT_GATE);
+ if (enable)
+ gate = FIELD_SET(gate, CURRENT_GATE, I2C, ON);
+ else
+ gate = FIELD_SET(gate, CURRENT_GATE, I2C, OFF);
- setCurrentGate(gate);
+ setCurrentGate(gate);
}
diff --git a/drivers/staging/sm750fb/ddk750_power.h b/drivers/staging/sm750fb/ddk750_power.h
index 4e00955a07dd..b7cf6b281fb6 100644
--- a/drivers/staging/sm750fb/ddk750_power.h
+++ b/drivers/staging/sm750fb/ddk750_power.h
@@ -1,12 +1,11 @@
#ifndef DDK750_POWER_H__
#define DDK750_POWER_H__
-typedef enum _DPMS_t
-{
- crtDPMS_ON = 0x0,
- crtDPMS_STANDBY = 0x1,
- crtDPMS_SUSPEND = 0x2,
- crtDPMS_OFF = 0x3,
+typedef enum _DPMS_t {
+ crtDPMS_ON = 0x0,
+ crtDPMS_STANDBY = 0x1,
+ crtDPMS_SUSPEND = 0x2,
+ crtDPMS_OFF = 0x3,
}
DPMS_t;
diff --git a/drivers/staging/sm750fb/ddk750_reg.h b/drivers/staging/sm750fb/ddk750_reg.h
index 1a40dc2a2f75..2995625c3d62 100644
--- a/drivers/staging/sm750fb/ddk750_reg.h
+++ b/drivers/staging/sm750fb/ddk750_reg.h
@@ -1640,9 +1640,9 @@
/* CRT Graphics Control */
#define CRT_DISPLAY_CTRL 0x080200
-#define CRT_DISPLAY_CTRL_RESERVED_1_MASK 31:27
-#define CRT_DISPLAY_CTRL_RESERVED_1_MASK_DISABLE 0
-#define CRT_DISPLAY_CTRL_RESERVED_1_MASK_ENABLE 0x1F
+#define CRT_DISPLAY_CTRL_RESERVED_1_MASK 31:27
+#define CRT_DISPLAY_CTRL_RESERVED_1_MASK_DISABLE 0
+#define CRT_DISPLAY_CTRL_RESERVED_1_MASK_ENABLE 0x1F
/* SM750LE definition */
#define CRT_DISPLAY_CTRL_DPMS 31:30
@@ -1664,9 +1664,9 @@
#define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC_ENABLE 0
-#define CRT_DISPLAY_CTRL_RESERVED_2_MASK 25:24
-#define CRT_DISPLAY_CTRL_RESERVED_2_MASK_ENABLE 3
-#define CRT_DISPLAY_CTRL_RESERVED_2_MASK_DISABLE 0
+#define CRT_DISPLAY_CTRL_RESERVED_2_MASK 25:24
+#define CRT_DISPLAY_CTRL_RESERVED_2_MASK_ENABLE 3
+#define CRT_DISPLAY_CTRL_RESERVED_2_MASK_DISABLE 0
/* SM750LE definition */
#define CRT_DISPLAY_CTRL_CRTSELECT 25:25
@@ -1677,11 +1677,11 @@
#define CRT_DISPLAY_CTRL_RGBBIT_12BIT 1
-#define CRT_DISPLAY_CTRL_RESERVED_3_MASK 15:15
+#define CRT_DISPLAY_CTRL_RESERVED_3_MASK 15:15
#define CRT_DISPLAY_CTRL_RESERVED_3_MASK_DISABLE 0
#define CRT_DISPLAY_CTRL_RESERVED_3_MASK_ENABLE 1
-#define CRT_DISPLAY_CTRL_RESERVED_4_MASK 9:9
+#define CRT_DISPLAY_CTRL_RESERVED_4_MASK 9:9
#define CRT_DISPLAY_CTRL_RESERVED_4_MASK_DISABLE 0
#define CRT_DISPLAY_CTRL_RESERVED_4_MASK_ENABLE 1
@@ -1882,7 +1882,7 @@
#endif
/* sm750le new register to control panel output */
-#define DISPLAY_CONTROL_750LE 0x80288
+#define DISPLAY_CONTROL_750LE 0x80288
/* Palette RAM */
/* Panel Palette register starts at 0x080400 ~ 0x0807FC */
diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c
index b6395b87fc21..0bdf3db11df0 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.c
+++ b/drivers/staging/sm750fb/ddk750_sii164.c
@@ -36,12 +36,12 @@ static char *gDviCtrlChipName = "Silicon Image SiI 164";
*/
unsigned short sii164GetVendorID(void)
{
- unsigned short vendorID;
+ unsigned short vendorID;
- vendorID = ((unsigned short) i2cReadReg(SII164_I2C_ADDRESS, SII164_VENDOR_ID_HIGH) << 8) |
- (unsigned short) i2cReadReg(SII164_I2C_ADDRESS, SII164_VENDOR_ID_LOW);
+ vendorID = ((unsigned short) i2cReadReg(SII164_I2C_ADDRESS, SII164_VENDOR_ID_HIGH) << 8) |
+ (unsigned short) i2cReadReg(SII164_I2C_ADDRESS, SII164_VENDOR_ID_LOW);
- return vendorID;
+ return vendorID;
}
/*
@@ -53,12 +53,12 @@ unsigned short sii164GetVendorID(void)
*/
unsigned short sii164GetDeviceID(void)
{
- unsigned short deviceID;
+ unsigned short deviceID;
- deviceID = ((unsigned short) i2cReadReg(SII164_I2C_ADDRESS, SII164_DEVICE_ID_HIGH) << 8) |
- (unsigned short) i2cReadReg(SII164_I2C_ADDRESS, SII164_DEVICE_ID_LOW);
+ deviceID = ((unsigned short) i2cReadReg(SII164_I2C_ADDRESS, SII164_DEVICE_ID_HIGH) << 8) |
+ (unsigned short) i2cReadReg(SII164_I2C_ADDRESS, SII164_DEVICE_ID_LOW);
- return deviceID;
+ return deviceID;
}
@@ -113,132 +113,130 @@ unsigned short sii164GetDeviceID(void)
* -1 - Fail.
*/
long sii164InitChip(
- unsigned char edgeSelect,
- unsigned char busSelect,
- unsigned char dualEdgeClkSelect,
- unsigned char hsyncEnable,
- unsigned char vsyncEnable,
- unsigned char deskewEnable,
- unsigned char deskewSetting,
- unsigned char continuousSyncEnable,
- unsigned char pllFilterEnable,
- unsigned char pllFilterValue
+ unsigned char edgeSelect,
+ unsigned char busSelect,
+ unsigned char dualEdgeClkSelect,
+ unsigned char hsyncEnable,
+ unsigned char vsyncEnable,
+ unsigned char deskewEnable,
+ unsigned char deskewSetting,
+ unsigned char continuousSyncEnable,
+ unsigned char pllFilterEnable,
+ unsigned char pllFilterValue
)
{
unsigned char config;
- /* Initialize the i2c bus */
+ /* Initialize the i2c bus */
#ifdef USE_HW_I2C
- /* Use fast mode. */
- hwI2CInit(1);
+ /* Use fast mode. */
+ hwI2CInit(1);
#else
- swI2CInit(DEFAULT_I2C_SCL, DEFAULT_I2C_SDA);
+ swI2CInit(DEFAULT_I2C_SCL, DEFAULT_I2C_SDA);
#endif
- /* Check if SII164 Chip exists */
- if ((sii164GetVendorID() == SII164_VENDOR_ID) && (sii164GetDeviceID() == SII164_DEVICE_ID))
- {
- /*
- * Initialize SII164 controller chip.
- */
-
- /* Select the edge */
- if (edgeSelect == 0)
- config = SII164_CONFIGURATION_LATCH_FALLING;
- else
- config = SII164_CONFIGURATION_LATCH_RISING;
-
- /* Select bus wide */
- if (busSelect == 0)
- config |= SII164_CONFIGURATION_BUS_12BITS;
- else
- config |= SII164_CONFIGURATION_BUS_24BITS;
-
- /* Select Dual/Single Edge Clock */
- if (dualEdgeClkSelect == 0)
- config |= SII164_CONFIGURATION_CLOCK_SINGLE;
- else
- config |= SII164_CONFIGURATION_CLOCK_DUAL;
-
- /* Select HSync Enable */
- if (hsyncEnable == 0)
- config |= SII164_CONFIGURATION_HSYNC_FORCE_LOW;
- else
- config |= SII164_CONFIGURATION_HSYNC_AS_IS;
-
- /* Select VSync Enable */
- if (vsyncEnable == 0)
- config |= SII164_CONFIGURATION_VSYNC_FORCE_LOW;
- else
- config |= SII164_CONFIGURATION_VSYNC_AS_IS;
-
- i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
-
- /* De-skew enabled with default 111b value.
- This will fix some artifacts problem in some mode on board 2.2.
- Somehow this fix does not affect board 2.1.
- */
- if (deskewEnable == 0)
- config = SII164_DESKEW_DISABLE;
- else
- config = SII164_DESKEW_ENABLE;
-
- switch (deskewSetting)
- {
- case 0:
- config |= SII164_DESKEW_1_STEP;
- break;
- case 1:
- config |= SII164_DESKEW_2_STEP;
- break;
- case 2:
- config |= SII164_DESKEW_3_STEP;
- break;
- case 3:
- config |= SII164_DESKEW_4_STEP;
- break;
- case 4:
- config |= SII164_DESKEW_5_STEP;
- break;
- case 5:
- config |= SII164_DESKEW_6_STEP;
- break;
- case 6:
- config |= SII164_DESKEW_7_STEP;
- break;
- case 7:
- config |= SII164_DESKEW_8_STEP;
- break;
- }
- i2cWriteReg(SII164_I2C_ADDRESS, SII164_DESKEW, config);
-
- /* Enable/Disable Continuous Sync. */
- if (continuousSyncEnable == 0)
- config = SII164_PLL_FILTER_SYNC_CONTINUOUS_DISABLE;
- else
- config = SII164_PLL_FILTER_SYNC_CONTINUOUS_ENABLE;
-
- /* Enable/Disable PLL Filter */
- if (pllFilterEnable == 0)
- config |= SII164_PLL_FILTER_DISABLE;
- else
- config |= SII164_PLL_FILTER_ENABLE;
-
- /* Set the PLL Filter value */
- config |= ((pllFilterValue & 0x07) << 1);
-
- i2cWriteReg(SII164_I2C_ADDRESS, SII164_PLL, config);
-
- /* Recover from Power Down and enable output. */
- config = i2cReadReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION);
- config |= SII164_CONFIGURATION_POWER_NORMAL;
- i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
-
- return 0;
- }
-
- /* Return -1 if initialization fails. */
- return (-1);
+ /* Check if SII164 Chip exists */
+ if ((sii164GetVendorID() == SII164_VENDOR_ID) && (sii164GetDeviceID() == SII164_DEVICE_ID)) {
+ /*
+ * Initialize SII164 controller chip.
+ */
+
+ /* Select the edge */
+ if (edgeSelect == 0)
+ config = SII164_CONFIGURATION_LATCH_FALLING;
+ else
+ config = SII164_CONFIGURATION_LATCH_RISING;
+
+ /* Select bus wide */
+ if (busSelect == 0)
+ config |= SII164_CONFIGURATION_BUS_12BITS;
+ else
+ config |= SII164_CONFIGURATION_BUS_24BITS;
+
+ /* Select Dual/Single Edge Clock */
+ if (dualEdgeClkSelect == 0)
+ config |= SII164_CONFIGURATION_CLOCK_SINGLE;
+ else
+ config |= SII164_CONFIGURATION_CLOCK_DUAL;
+
+ /* Select HSync Enable */
+ if (hsyncEnable == 0)
+ config |= SII164_CONFIGURATION_HSYNC_FORCE_LOW;
+ else
+ config |= SII164_CONFIGURATION_HSYNC_AS_IS;
+
+ /* Select VSync Enable */
+ if (vsyncEnable == 0)
+ config |= SII164_CONFIGURATION_VSYNC_FORCE_LOW;
+ else
+ config |= SII164_CONFIGURATION_VSYNC_AS_IS;
+
+ i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
+
+ /* De-skew enabled with default 111b value.
+ This will fix some artifacts problem in some mode on board 2.2.
+ Somehow this fix does not affect board 2.1.
+ */
+ if (deskewEnable == 0)
+ config = SII164_DESKEW_DISABLE;
+ else
+ config = SII164_DESKEW_ENABLE;
+
+ switch (deskewSetting) {
+ case 0:
+ config |= SII164_DESKEW_1_STEP;
+ break;
+ case 1:
+ config |= SII164_DESKEW_2_STEP;
+ break;
+ case 2:
+ config |= SII164_DESKEW_3_STEP;
+ break;
+ case 3:
+ config |= SII164_DESKEW_4_STEP;
+ break;
+ case 4:
+ config |= SII164_DESKEW_5_STEP;
+ break;
+ case 5:
+ config |= SII164_DESKEW_6_STEP;
+ break;
+ case 6:
+ config |= SII164_DESKEW_7_STEP;
+ break;
+ case 7:
+ config |= SII164_DESKEW_8_STEP;
+ break;
+ }
+ i2cWriteReg(SII164_I2C_ADDRESS, SII164_DESKEW, config);
+
+ /* Enable/Disable Continuous Sync. */
+ if (continuousSyncEnable == 0)
+ config = SII164_PLL_FILTER_SYNC_CONTINUOUS_DISABLE;
+ else
+ config = SII164_PLL_FILTER_SYNC_CONTINUOUS_ENABLE;
+
+ /* Enable/Disable PLL Filter */
+ if (pllFilterEnable == 0)
+ config |= SII164_PLL_FILTER_DISABLE;
+ else
+ config |= SII164_PLL_FILTER_ENABLE;
+
+ /* Set the PLL Filter value */
+ config |= ((pllFilterValue & 0x07) << 1);
+
+ i2cWriteReg(SII164_I2C_ADDRESS, SII164_PLL, config);
+
+ /* Recover from Power Down and enable output. */
+ config = i2cReadReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION);
+ config |= SII164_CONFIGURATION_POWER_NORMAL;
+ i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
+
+ return 0;
+ }
+
+ /* Return -1 if initialization fails. */
+ return (-1);
}
@@ -255,9 +253,9 @@ long sii164InitChip(
*/
void sii164ResetChip(void)
{
- /* Power down */
- sii164SetPower(0);
- sii164SetPower(1);
+ /* Power down */
+ sii164SetPower(0);
+ sii164SetPower(1);
}
@@ -268,7 +266,7 @@ void sii164ResetChip(void)
*/
char *sii164GetChipString(void)
{
- return gDviCtrlChipName;
+ return gDviCtrlChipName;
}
@@ -280,26 +278,23 @@ char *sii164GetChipString(void)
* powerUp - Flag to set the power down or up
*/
void sii164SetPower(
- unsigned char powerUp
+ unsigned char powerUp
)
{
- unsigned char config;
-
- config = i2cReadReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION);
- if (powerUp == 1)
- {
- /* Power up the chip */
- config &= ~SII164_CONFIGURATION_POWER_MASK;
- config |= SII164_CONFIGURATION_POWER_NORMAL;
- i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
- }
- else
- {
- /* Power down the chip */
- config &= ~SII164_CONFIGURATION_POWER_MASK;
- config |= SII164_CONFIGURATION_POWER_DOWN;
- i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
- }
+ unsigned char config;
+
+ config = i2cReadReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION);
+ if (powerUp == 1) {
+ /* Power up the chip */
+ config &= ~SII164_CONFIGURATION_POWER_MASK;
+ config |= SII164_CONFIGURATION_POWER_NORMAL;
+ i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
+ } else {
+ /* Power down the chip */
+ config &= ~SII164_CONFIGURATION_POWER_MASK;
+ config |= SII164_CONFIGURATION_POWER_DOWN;
+ i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
+ }
}
@@ -308,31 +303,30 @@ void sii164SetPower(
* This function selects the mode of the hot plug detection.
*/
static void sii164SelectHotPlugDetectionMode(
- sii164_hot_plug_mode_t hotPlugMode
+ sii164_hot_plug_mode_t hotPlugMode
)
{
- unsigned char detectReg;
-
- detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) & ~SII164_DETECT_MONITOR_SENSE_OUTPUT_FLAG;
- switch (hotPlugMode)
- {
- case SII164_HOTPLUG_DISABLE:
- detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_HIGH;
- break;
- case SII164_HOTPLUG_USE_MDI:
- detectReg &= ~SII164_DETECT_INTERRUPT_MASK;
- detectReg |= SII164_DETECT_INTERRUPT_BY_HTPLG_PIN;
- detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_MDI;
- break;
- case SII164_HOTPLUG_USE_RSEN:
- detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_RSEN;
- break;
- case SII164_HOTPLUG_USE_HTPLG:
- detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_HTPLG;
- break;
- }
-
- i2cWriteReg(SII164_I2C_ADDRESS, SII164_DETECT, detectReg);
+ unsigned char detectReg;
+
+ detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) & ~SII164_DETECT_MONITOR_SENSE_OUTPUT_FLAG;
+ switch (hotPlugMode) {
+ case SII164_HOTPLUG_DISABLE:
+ detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_HIGH;
+ break;
+ case SII164_HOTPLUG_USE_MDI:
+ detectReg &= ~SII164_DETECT_INTERRUPT_MASK;
+ detectReg |= SII164_DETECT_INTERRUPT_BY_HTPLG_PIN;
+ detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_MDI;
+ break;
+ case SII164_HOTPLUG_USE_RSEN:
+ detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_RSEN;
+ break;
+ case SII164_HOTPLUG_USE_HTPLG:
+ detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_HTPLG;
+ break;
+ }
+
+ i2cWriteReg(SII164_I2C_ADDRESS, SII164_DETECT, detectReg);
}
/*
@@ -342,18 +336,19 @@ static void sii164SelectHotPlugDetectionMode(
* enableHotPlug - Enable (=1) / disable (=0) Hot Plug detection
*/
void sii164EnableHotPlugDetection(
- unsigned char enableHotPlug
+ unsigned char enableHotPlug
)
{
- unsigned char detectReg;
- detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT);
-
- /* Depending on each DVI controller, need to enable the hot plug based on each
- individual chip design. */
- if (enableHotPlug != 0)
- sii164SelectHotPlugDetectionMode(SII164_HOTPLUG_USE_MDI);
- else
- sii164SelectHotPlugDetectionMode(SII164_HOTPLUG_DISABLE);
+ unsigned char detectReg;
+
+ detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT);
+
+ /* Depending on each DVI controller, need to enable the hot plug based on each
+ individual chip design. */
+ if (enableHotPlug != 0)
+ sii164SelectHotPlugDetectionMode(SII164_HOTPLUG_USE_MDI);
+ else
+ sii164SelectHotPlugDetectionMode(SII164_HOTPLUG_DISABLE);
}
/*
@@ -366,13 +361,13 @@ void sii164EnableHotPlugDetection(
*/
unsigned char sii164IsConnected(void)
{
- unsigned char hotPlugValue;
+ unsigned char hotPlugValue;
- hotPlugValue = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) & SII164_DETECT_HOT_PLUG_STATUS_MASK;
- if (hotPlugValue == SII164_DETECT_HOT_PLUG_STATUS_ON)
- return 1;
- else
- return 0;
+ hotPlugValue = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) & SII164_DETECT_HOT_PLUG_STATUS_MASK;
+ if (hotPlugValue == SII164_DETECT_HOT_PLUG_STATUS_ON)
+ return 1;
+ else
+ return 0;
}
/*
@@ -385,13 +380,13 @@ unsigned char sii164IsConnected(void)
*/
unsigned char sii164CheckInterrupt(void)
{
- unsigned char detectReg;
+ unsigned char detectReg;
- detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) & SII164_DETECT_MONITOR_STATE_MASK;
- if (detectReg == SII164_DETECT_MONITOR_STATE_CHANGE)
- return 1;
- else
- return 0;
+ detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) & SII164_DETECT_MONITOR_STATE_MASK;
+ if (detectReg == SII164_DETECT_MONITOR_STATE_CHANGE)
+ return 1;
+ else
+ return 0;
}
/*
@@ -400,11 +395,11 @@ unsigned char sii164CheckInterrupt(void)
*/
void sii164ClearInterrupt(void)
{
- unsigned char detectReg;
+ unsigned char detectReg;
- /* Clear the MDI interrupt */
- detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT);
- i2cWriteReg(SII164_I2C_ADDRESS, SII164_DETECT, detectReg | SII164_DETECT_MONITOR_STATE_CLEAR);
+ /* Clear the MDI interrupt */
+ detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT);
+ i2cWriteReg(SII164_I2C_ADDRESS, SII164_DETECT, detectReg | SII164_DETECT_MONITOR_STATE_CLEAR);
}
#endif
diff --git a/drivers/staging/sm750fb/ddk750_sii164.h b/drivers/staging/sm750fb/ddk750_sii164.h
index 2b4c7d3381df..f2610c90eeb4 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.h
+++ b/drivers/staging/sm750fb/ddk750_sii164.h
@@ -4,27 +4,26 @@
#define USE_DVICHIP
/* Hot Plug detection mode structure */
-typedef enum _sii164_hot_plug_mode_t
-{
- SII164_HOTPLUG_DISABLE = 0, /* Disable Hot Plug output bit (always high). */
- SII164_HOTPLUG_USE_MDI, /* Use Monitor Detect Interrupt bit. */
- SII164_HOTPLUG_USE_RSEN, /* Use Receiver Sense detect bit. */
- SII164_HOTPLUG_USE_HTPLG /* Use Hot Plug detect bit. */
+typedef enum _sii164_hot_plug_mode_t {
+ SII164_HOTPLUG_DISABLE = 0, /* Disable Hot Plug output bit (always high). */
+ SII164_HOTPLUG_USE_MDI, /* Use Monitor Detect Interrupt bit. */
+ SII164_HOTPLUG_USE_RSEN, /* Use Receiver Sense detect bit. */
+ SII164_HOTPLUG_USE_HTPLG /* Use Hot Plug detect bit. */
} sii164_hot_plug_mode_t;
/* Silicon Image SiI164 chip prototype */
long sii164InitChip(
- unsigned char edgeSelect,
- unsigned char busSelect,
- unsigned char dualEdgeClkSelect,
- unsigned char hsyncEnable,
- unsigned char vsyncEnable,
- unsigned char deskewEnable,
- unsigned char deskewSetting,
- unsigned char continuousSyncEnable,
- unsigned char pllFilterEnable,
- unsigned char pllFilterValue
+ unsigned char edgeSelect,
+ unsigned char busSelect,
+ unsigned char dualEdgeClkSelect,
+ unsigned char hsyncEnable,
+ unsigned char vsyncEnable,
+ unsigned char deskewEnable,
+ unsigned char deskewSetting,
+ unsigned char continuousSyncEnable,
+ unsigned char pllFilterEnable,
+ unsigned char pllFilterValue
);
unsigned short sii164GetVendorID(void);
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 8e201f19cc0d..07f8afd2defe 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -24,8 +24,7 @@
#include "modedb.h"
-int smi_indent = 0;
-
+int smi_indent;
/*
* #ifdef __BIG_ENDIAN
@@ -40,17 +39,15 @@ typedef void (*PROC_SPEC_SETUP)(struct lynx_share*, char *);
typedef int (*PROC_SPEC_MAP)(struct lynx_share*, struct pci_dev*);
typedef int (*PROC_SPEC_INITHW)(struct lynx_share*, struct pci_dev*);
-
/* common var for all device */
static int g_hwcursor = 1;
static int g_noaccel;
static int g_nomtrr;
static const char *g_fbmode[] = {NULL, NULL};
static const char *g_def_fbmode = "800x600-16@60";
-static char *g_settings = NULL;
+static char *g_settings;
static int g_dualview;
-static char *g_option = NULL;
-
+static char *g_option;
static const struct fb_videomode lynx750_ext[] = {
/* 1024x600-60 VESA [1.71:1] */
@@ -115,8 +112,6 @@ static const struct fb_videomode lynx750_ext[] = {
};
-
-
/* no hardware cursor supported under version 2.6.10, kernel bug */
static int lynxfb_ops_cursor(struct fb_info *info, struct fb_cursor *fbcursor)
{
@@ -149,18 +144,17 @@ static int lynxfb_ops_cursor(struct fb_info *info, struct fb_cursor *fbcursor)
/* get the 16bit color of kernel means */
u16 fg, bg;
- fg = ((info->cmap.red[fbcursor->image.fg_color] & 0xf800))|
- ((info->cmap.green[fbcursor->image.fg_color] & 0xfc00) >> 5)|
+ fg = ((info->cmap.red[fbcursor->image.fg_color] & 0xf800)) |
+ ((info->cmap.green[fbcursor->image.fg_color] & 0xfc00) >> 5) |
((info->cmap.blue[fbcursor->image.fg_color] & 0xf800) >> 11);
- bg = ((info->cmap.red[fbcursor->image.bg_color] & 0xf800))|
- ((info->cmap.green[fbcursor->image.bg_color] & 0xfc00) >> 5)|
+ bg = ((info->cmap.red[fbcursor->image.bg_color] & 0xf800)) |
+ ((info->cmap.green[fbcursor->image.bg_color] & 0xfc00) >> 5) |
((info->cmap.blue[fbcursor->image.bg_color] & 0xf800) >> 11);
cursor->setColor(cursor, fg, bg);
}
-
if (fbcursor->set & (FB_CUR_SETSHAPE | FB_CUR_SETIMAGE)) {
cursor->setData(cursor,
fbcursor->rop,
@@ -188,14 +182,17 @@ static void lynxfb_ops_fillrect(struct fb_info *info,
par = info->par;
share = par->share;
- /* each time 2d function begin to work,below three variable always need
- * be set, seems we can put them together in some place */
+ /*
+ * each time 2d function begin to work,below three variable always need
+ * be set, seems we can put them together in some place
+ */
base = par->crtc.oScreen;
pitch = info->fix.line_length;
Bpp = info->var.bits_per_pixel >> 3;
- color = (Bpp == 1)?region->color:((u32 *)info->pseudo_palette)[region->color];
- rop = (region->rop != ROP_COPY) ? HW_ROP2_XOR:HW_ROP2_COPY;
+ color = (Bpp == 1) ? region->color :
+ ((u32 *)info->pseudo_palette)[region->color];
+ rop = (region->rop != ROP_COPY) ? HW_ROP2_XOR : HW_ROP2_COPY;
/*
* If not use spin_lock,system will die if user load driver
@@ -223,8 +220,10 @@ static void lynxfb_ops_copyarea(struct fb_info *info,
par = info->par;
share = par->share;
- /* each time 2d function begin to work,below three variable always need
- * be set, seems we can put them together in some place */
+ /*
+ * each time 2d function begin to work,below three variable always need
+ * be set, seems we can put them together in some place
+ */
base = par->crtc.oScreen;
pitch = info->fix.line_length;
Bpp = info->var.bits_per_pixel >> 3;
@@ -254,28 +253,29 @@ static void lynxfb_ops_imageblit(struct fb_info *info,
par = info->par;
share = par->share;
- /* each time 2d function begin to work,below three variable always need
- * be set, seems we can put them together in some place */
+ /*
+ * each time 2d function begin to work,below three variable always need
+ * be set, seems we can put them together in some place
+ */
base = par->crtc.oScreen;
pitch = info->fix.line_length;
Bpp = info->var.bits_per_pixel >> 3;
- if (image->depth == 1) {
- if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
- info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
- fgcol = ((u32 *)info->pseudo_palette)[image->fg_color];
- bgcol = ((u32 *)info->pseudo_palette)[image->bg_color];
- } else {
- fgcol = image->fg_color;
- bgcol = image->bg_color;
- }
- goto _do_work;
- }
/* TODO: Implement hardware acceleration for image->depth > 1 */
- cfb_imageblit(info, image);
- return;
+ if (image->depth != 1) {
+ cfb_imageblit(info, image);
+ return;
+ }
+
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+ fgcol = ((u32 *)info->pseudo_palette)[image->fg_color];
+ bgcol = ((u32 *)info->pseudo_palette)[image->bg_color];
+ } else {
+ fgcol = image->fg_color;
+ bgcol = image->bg_color;
+ }
-_do_work:
/*
* If not use spin_lock, system will die if user load driver
* and immediately unload driver frequently (dual)
@@ -284,7 +284,7 @@ _do_work:
spin_lock(&share->slock);
share->accel.de_imageblit(&share->accel,
- image->data, image->width>>3, 0,
+ image->data, image->width >> 3, 0,
base, pitch, Bpp,
image->dx, image->dy,
image->width, image->height,
@@ -298,18 +298,13 @@ static int lynxfb_ops_pan_display(struct fb_var_screeninfo *var,
{
struct lynxfb_par *par;
struct lynxfb_crtc *crtc;
- int ret;
-
if (!info)
return -EINVAL;
- ret = 0;
par = info->par;
crtc = &par->crtc;
- ret = crtc->proc_panDisplay(crtc, var, info);
-
- return ret;
+ return crtc->proc_panDisplay(crtc, var, info);
}
static int lynxfb_ops_set_par(struct fb_info *info)
@@ -340,9 +335,10 @@ static int lynxfb_ops_set_par(struct fb_info *info)
fix->line_length = line_length;
pr_info("fix->line_length = %d\n", fix->line_length);
- /* var->red,green,blue,transp are need to be set by driver
+ /*
+ * var->red,green,blue,transp are need to be set by driver
* and these data should be set before setcolreg routine
- * */
+ */
switch (var->bits_per_pixel) {
case 8:
@@ -466,7 +462,6 @@ static int lynxfb_resume(struct pci_dev *pdev)
int ret;
-
ret = 0;
share = pci_get_drvdata(pdev);
@@ -478,7 +473,6 @@ static int lynxfb_resume(struct pci_dev *pdev)
return ret;
}
-
if (pdev->dev.power.power_state.event != PM_EVENT_FREEZE) {
pci_restore_state(pdev);
ret = pci_enable_device(pdev);
@@ -493,7 +487,6 @@ static int lynxfb_resume(struct pci_dev *pdev)
hw_sm750_inithw(share, pdev);
-
info = share->fbinfo[0];
if (info) {
@@ -518,7 +511,6 @@ static int lynxfb_resume(struct pci_dev *pdev)
fb_set_suspend(info, 0);
}
-
console_unlock();
return ret;
}
@@ -534,7 +526,6 @@ static int lynxfb_ops_check_var(struct fb_var_screeninfo *var,
int ret;
resource_size_t request;
-
par = info->par;
crtc = &par->crtc;
output = &par->output;
@@ -546,7 +537,6 @@ static int lynxfb_ops_check_var(struct fb_var_screeninfo *var,
var->yres,
var->bits_per_pixel);
-
switch (var->bits_per_pixel) {
case 8:
case 16:
@@ -617,7 +607,6 @@ exit:
return ret;
}
-
static int lynxfb_ops_setcolreg(unsigned regno,
unsigned red,
unsigned green,
@@ -652,7 +641,6 @@ static int lynxfb_ops_setcolreg(unsigned regno,
goto exit;
}
-
if (info->fix.visual == FB_VISUAL_TRUECOLOR && regno < 256) {
u32 val;
@@ -699,7 +687,8 @@ static int sm750fb_set_drv(struct lynxfb_par *par)
output = &par->output;
crtc = &par->crtc;
- crtc->vidmem_size = (share->dual)?share->vidmem_size>>1:share->vidmem_size;
+ crtc->vidmem_size = (share->dual) ? share->vidmem_size >> 1 :
+ share->vidmem_size;
/* setup crtc and output member */
spec_share->hwCursor = g_hwcursor;
@@ -716,10 +705,12 @@ static int sm750fb_set_drv(struct lynxfb_par *par)
output->proc_setMode = hw_sm750_output_setMode;
output->proc_checkMode = hw_sm750_output_checkMode;
- output->proc_setBLANK = (share->revid == SM750LE_REVISION_ID)?hw_sm750le_setBLANK:hw_sm750_setBLANK;
+ output->proc_setBLANK = (share->revid == SM750LE_REVISION_ID) ?
+ hw_sm750le_setBLANK : hw_sm750_setBLANK;
output->clear = hw_sm750_output_clear;
/* chip specific phase */
- share->accel.de_wait = (share->revid == SM750LE_REVISION_ID)?hw_sm750le_deWait : hw_sm750_deWait;
+ share->accel.de_wait = (share->revid == SM750LE_REVISION_ID) ?
+ hw_sm750le_deWait : hw_sm750_deWait;
switch (spec_share->state.dataflow) {
case sm750_simul_pri:
output->paths = sm750_pnc;
@@ -782,7 +773,6 @@ static struct fb_ops lynxfb_ops = {
.fb_cursor = lynxfb_ops_cursor,
};
-
static int lynxfb_set_fbinfo(struct fb_info *info, int index)
{
int i;
@@ -803,7 +793,6 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
"kernel HELPERS prepared vesa_modes",
};
-
static const char *fixId[2] = {
"sm750_fb1", "sm750_fb2",
};
@@ -824,15 +813,16 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
sm750fb_set_drv(par);
lynxfb_ops.fb_pan_display = lynxfb_ops_pan_display;
-
- /* set current cursor variable and proc pointer,
- * must be set after crtc member initialized */
+ /*
+ * set current cursor variable and proc pointer,
+ * must be set after crtc member initialized
+ */
crtc->cursor.offset = crtc->oScreen + crtc->vidmem_size - 1024;
crtc->cursor.mmio = share->pvReg + 0x800f0 + (int)crtc->channel * 0x140;
pr_info("crtc->cursor.mmio = %p\n", crtc->cursor.mmio);
crtc->cursor.maxH = crtc->cursor.maxW = 64;
- crtc->cursor.size = crtc->cursor.maxH*crtc->cursor.maxW*2/8;
+ crtc->cursor.size = crtc->cursor.maxH * crtc->cursor.maxW * 2 / 8;
crtc->cursor.disable = hw_cursor_disable;
crtc->cursor.enable = hw_cursor_enable;
crtc->cursor.setColor = hw_cursor_setColor;
@@ -841,7 +831,6 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
crtc->cursor.setData = hw_cursor_setData;
crtc->cursor.vstart = share->pvMem + crtc->cursor.offset;
-
crtc->cursor.share = share;
memset_io(crtc->cursor.vstart, 0, crtc->cursor.size);
if (!g_hwcursor) {
@@ -849,7 +838,6 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
crtc->cursor.disable(&crtc->cursor);
}
-
/* set info->fbops, must be set before fb_find_mode */
if (!share->accel_off) {
/* use 2d acceleration */
@@ -865,7 +853,6 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
g_fbmode[index] = g_fbmode[0];
}
-
for (i = 0; i < 3; i++) {
ret = fb_find_mode(var, info, g_fbmode[index],
@@ -917,13 +904,13 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
/* set info */
line_length = PADDING(crtc->line_pad,
- (var->xres_virtual * var->bits_per_pixel/8));
+ (var->xres_virtual * var->bits_per_pixel / 8));
info->pseudo_palette = &par->pseudo_palette[0];
info->screen_base = crtc->vScreen;
pr_debug("screen_base vaddr = %p\n", info->screen_base);
info->screen_size = line_length * var->yres_virtual;
- info->flags = FBINFO_FLAG_DEFAULT|0;
+ info->flags = FBINFO_FLAG_DEFAULT | 0;
/* set info->fix */
fix->type = FB_TYPE_PACKED_PIXELS;
@@ -935,15 +922,15 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
strlcpy(fix->id, fixId[index], sizeof(fix->id));
-
fix->smem_start = crtc->oScreen + share->vidmem_start;
pr_info("fix->smem_start = %lx\n", fix->smem_start);
- /* according to mmap experiment from user space application,
+ /*
+ * according to mmap experiment from user space application,
* fix->mmio_len should not larger than virtual size
* (xres_virtual x yres_virtual x ByPP)
* Below line maybe buggy when user mmap fb dev node and write
* data into the bound over virtual size
- * */
+ */
fix->smem_len = crtc->vidmem_size;
pr_info("fix->smem_len = %x\n", fix->smem_len);
info->screen_size = fix->smem_len;
@@ -967,7 +954,7 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
var->accel_flags = 0;
var->vmode = FB_VMODE_NONINTERLACED;
- pr_debug("#1 show info->cmap : \nstart=%d,len=%d,red=%p,green=%p,blue=%p,transp=%p\n",
+ pr_debug("#1 show info->cmap :\nstart=%d,len=%d,red=%p,green=%p,blue=%p,transp=%p\n",
info->cmap.start, info->cmap.len,
info->cmap.red, info->cmap.green, info->cmap.blue,
info->cmap.transp);
@@ -998,7 +985,6 @@ static void sm750fb_setup(struct lynx_share *share, char *src)
#endif
int swap;
-
spec_share = container_of(share, struct sm750_share, share);
#ifdef CAP_EXPENSIION
exp_res = NULL;
@@ -1096,15 +1082,16 @@ static int lynxfb_pci_probe(struct pci_dev *pdev,
size_t spec_offset = 0;
int fbidx;
-
/* enable device */
if (pci_enable_device(pdev)) {
pr_err("can not enable device.\n");
goto err_enable;
}
- /* though offset of share in sm750_share is 0,
- * we use this marcro as the same */
+ /*
+ * though offset of share in sm750_share is 0,
+ * we use this marcro as the same
+ */
spec_offset = offsetof(struct sm750_share, share);
spec_share = kzalloc(sizeof(*spec_share), GFP_KERNEL);
@@ -1128,10 +1115,12 @@ static int lynxfb_pci_probe(struct pci_dev *pdev,
spin_lock_init(&share->slock);
if (!share->accel_off) {
- /* hook deInit and 2d routines, notes that below hw_xxx
+ /*
+ * hook deInit and 2d routines, notes that below hw_xxx
* routine can work on most of lynx chips
* if some chip need specific function,
- * please hook it in smXXX_set_drv routine */
+ * please hook it in smXXX_set_drv routine
+ */
share->accel.de_init = hw_de_init;
share->accel.de_fillrect = hw_fillrect;
share->accel.de_copyarea = hw_copyarea;
@@ -1268,7 +1257,6 @@ static int __init lynxfb_setup(char *options)
int len;
char *opt, *tmp;
-
if (!options || !*options) {
pr_warn("no options.\n");
return 0;
@@ -1283,14 +1271,15 @@ static int __init lynxfb_setup(char *options)
tmp = g_settings;
- /* Notes:
- char * strsep(char **s,const char * ct);
- @s: the string to be searched
- @ct :the characters to search for
-
- strsep() updates @options to pointer after the first found token
- it also returns the pointer ahead the token.
- */
+ /*
+ * Notes:
+ * char * strsep(char **s,const char * ct);
+ * @s: the string to be searched
+ * @ct :the characters to search for
+ *
+ * strsep() updates @options to pointer after the first found token
+ * it also returns the pointer ahead the token.
+ */
while ((opt = strsep(&options, ":")) != NULL) {
/* options that mean for any lynx chips are configured here */
if (!strncmp(opt, "noaccel", strlen("noaccel")))
@@ -1332,7 +1321,6 @@ static struct pci_driver lynxfb_driver = {
#endif
};
-
static int __init lynxfb_init(void)
{
char *option;
diff --git a/drivers/staging/sm750fb/sm750.h b/drivers/staging/sm750fb/sm750.h
index cc80580bc823..5bc445571b48 100644
--- a/drivers/staging/sm750fb/sm750.h
+++ b/drivers/staging/sm750fb/sm750.h
@@ -5,20 +5,20 @@
#define FB_ACCEL_SMI 0xab
/* please use revision id to distinguish sm750le and sm750*/
-#define SPC_SM750 0
+#define SPC_SM750 0
#define MB(x) ((x)<<20)
#define MHZ(x) ((x) * 1000000)
/* align should be 2,4,8,16 */
-#define PADDING(align, data) (((data)+(align)-1)&(~((align) -1)))
+#define PADDING(align, data) (((data)+(align)-1)&(~((align) - 1)))
extern int smi_indent;
-struct lynx_accel{
+struct lynx_accel {
/* base virtual address of DPR registers */
- volatile unsigned char __iomem * dprBase;
+ volatile unsigned char __iomem *dprBase;
/* base virtual address of de data port */
- volatile unsigned char __iomem * dpPortBase;
+ volatile unsigned char __iomem *dpPortBase;
/* function fointers */
void (*de_init)(struct lynx_accel *);
@@ -38,10 +38,10 @@ struct lynx_accel{
};
-/* lynx_share stands for a presentation of two frame buffer
- that use one smi adaptor , it is similar to a basic class of C++
+/* lynx_share stands for a presentation of two frame buffer
+ that use one smi adaptor , it is similar to a basic class of C++
*/
-struct lynx_share{
+struct lynx_share {
/* common members */
u16 devid;
u8 revid;
@@ -53,7 +53,7 @@ struct lynx_share{
int mtrr_off;
struct{
int vram;
- }mtrr;
+ } mtrr;
/* all smi graphic adaptor got below attributes */
unsigned long vidmem_start;
unsigned long vidreg_start;
@@ -64,11 +64,11 @@ struct lynx_share{
/* locks*/
spinlock_t slock;
/* function pointers */
- void (*suspend)(struct lynx_share*);
- void (*resume)(struct lynx_share*);
+ void (*suspend)(struct lynx_share *);
+ void (*resume)(struct lynx_share *);
};
-struct lynx_cursor{
+struct lynx_cursor {
/* cursor width ,height and size */
int w;
int h;
@@ -80,7 +80,7 @@ struct lynx_cursor{
char __iomem *vstart;
int offset;
/* mmio addr of hw cursor */
- volatile char __iomem * mmio;
+ volatile char __iomem *mmio;
/* the lynx_share of this adaptor */
struct lynx_share *share;
/* proc_routines */
@@ -92,7 +92,7 @@ struct lynx_cursor{
void (*setData)(struct lynx_cursor *, u16, const u8*, const u8*);
};
-struct lynxfb_crtc{
+struct lynxfb_crtc {
unsigned char __iomem *vCursor; /* virtual address of cursor */
unsigned char __iomem *vScreen; /* virtual address of on_screen */
int oCursor; /* cursor address offset in vidmem */
@@ -108,14 +108,14 @@ struct lynxfb_crtc{
void *priv;
- int(*proc_setMode)(struct lynxfb_crtc*,
+ int (*proc_setMode)(struct lynxfb_crtc*,
struct fb_var_screeninfo*,
struct fb_fix_screeninfo*);
- int(*proc_checkMode)(struct lynxfb_crtc*, struct fb_var_screeninfo*);
- int(*proc_setColReg)(struct lynxfb_crtc*, ushort, ushort, ushort, ushort);
- void (*clear)(struct lynxfb_crtc*);
- /* pan display */
+ int (*proc_checkMode)(struct lynxfb_crtc*, struct fb_var_screeninfo*);
+ int (*proc_setColReg)(struct lynxfb_crtc*, ushort, ushort, ushort, ushort);
+ void (*clear)(struct lynxfb_crtc *);
+ /* pan display */
int (*proc_panDisplay)(struct lynxfb_crtc *,
const struct fb_var_screeninfo *,
const struct fb_info *);
@@ -123,33 +123,33 @@ struct lynxfb_crtc{
struct lynx_cursor cursor;
};
-struct lynxfb_output{
+struct lynxfb_output {
int dpms;
int paths;
- /* which paths(s) this output stands for,for sm750:
- paths=1:means output for panel paths
- paths=2:means output for crt paths
- paths=3:means output for both panel and crt paths
+ /* which paths(s) this output stands for,for sm750:
+ paths=1:means output for panel paths
+ paths=2:means output for crt paths
+ paths=3:means output for both panel and crt paths
*/
int *channel;
- /* which channel these outputs linked with,for sm750:
- *channel=0 means primary channel
- *channel=1 means secondary channel
- output->channel ==> &crtc->channel
+ /* which channel these outputs linked with,for sm750:
+ *channel=0 means primary channel
+ *channel=1 means secondary channel
+ output->channel ==> &crtc->channel
*/
void *priv;
- int(*proc_setMode)(struct lynxfb_output*,
+ int (*proc_setMode)(struct lynxfb_output*,
struct fb_var_screeninfo*,
struct fb_fix_screeninfo*);
- int(*proc_checkMode)(struct lynxfb_output*, struct fb_var_screeninfo*);
- int(*proc_setBLANK)(struct lynxfb_output*, int);
- void (*clear)(struct lynxfb_output*);
+ int (*proc_checkMode)(struct lynxfb_output*, struct fb_var_screeninfo*);
+ int (*proc_setBLANK)(struct lynxfb_output*, int);
+ void (*clear)(struct lynxfb_output *);
};
-struct lynxfb_par{
+struct lynxfb_par {
/* either 0 or 1 for dual head adaptor,0 is the older one registered */
int index;
unsigned int pseudo_palette[256];
@@ -165,14 +165,14 @@ struct lynxfb_par{
#define PS_TO_HZ(ps) \
- ({ \
+ ({ \
unsigned long long hz = 1000*1000*1000*1000ULL; \
do_div(hz, ps); \
- (unsigned long)hz;})
+ (unsigned long)hz; })
static inline unsigned long ps_to_hz(unsigned int psvalue)
{
- unsigned long long numerator=1000*1000*1000*1000ULL;
+ unsigned long long numerator = 1000*1000*1000*1000ULL;
/* 10^12 / picosecond period gives frequency in Hz */
do_div(numerator, psvalue);
return (unsigned long)numerator;
diff --git a/drivers/staging/sm750fb/sm750_accel.c b/drivers/staging/sm750fb/sm750_accel.c
index 6eee4cd582d1..1dd06a2e4ede 100644
--- a/drivers/staging/sm750fb/sm750_accel.c
+++ b/drivers/staging/sm750fb/sm750_accel.c
@@ -37,7 +37,7 @@ void hw_de_init(struct lynx_accel *accel)
{
/* setup 2d engine registers */
u32 reg, clr;
-
+
write_dpr(accel, DE_MASKS, 0xFFFFFFFF);
/* dpr1c */
@@ -82,7 +82,7 @@ void hw_de_init(struct lynx_accel *accel)
void hw_set2dformat(struct lynx_accel *accel, int fmt)
{
u32 reg;
-
+
/* fmt=0,1,2 for 8,16,32,bpp on sm718/750/502 */
reg = read_dpr(accel, DE_STRETCH_FORMAT);
reg = FIELD_VALUE(reg, DE_STRETCH_FORMAT, PIXEL_FORMAT, fmt);
@@ -96,11 +96,10 @@ int hw_fillrect(struct lynx_accel *accel,
{
u32 deCtrl;
- if(accel->de_wait() != 0)
- {
+ if (accel->de_wait() != 0) {
/* int time wait and always busy,seems hardware
* got something error */
- pr_debug("%s:De engine always bussy\n", __func__);
+ pr_debug("De engine always busy\n");
return -1;
}
@@ -151,112 +150,102 @@ unsigned int width,
unsigned int height, /* width and height of rectangle in pixel value */
unsigned int rop2) /* ROP value */
{
- unsigned int nDirection, de_ctrl;
- int opSign;
- nDirection = LEFT_TO_RIGHT;
+ unsigned int nDirection, de_ctrl;
+ int opSign;
+
+ nDirection = LEFT_TO_RIGHT;
/* Direction of ROP2 operation: 1 = Left to Right, (-1) = Right to Left */
- opSign = 1;
- de_ctrl = 0;
-
- /* If source and destination are the same surface, need to check for overlay cases */
- if (sBase == dBase && sPitch == dPitch)
- {
- /* Determine direction of operation */
- if (sy < dy)
- {
- /* +----------+
- |S |
- | +----------+
- | | | |
- | | | |
- +---|------+ |
- | D|
- +----------+ */
-
- nDirection = BOTTOM_TO_TOP;
- }
- else if (sy > dy)
- {
- /* +----------+
- |D |
- | +----------+
- | | | |
- | | | |
- +---|------+ |
- | S|
- +----------+ */
-
- nDirection = TOP_TO_BOTTOM;
- }
- else
- {
- /* sy == dy */
-
- if (sx <= dx)
- {
- /* +------+---+------+
- |S | | D|
- | | | |
- | | | |
- | | | |
- +------+---+------+ */
-
- nDirection = RIGHT_TO_LEFT;
- }
- else
- {
- /* sx > dx */
-
- /* +------+---+------+
- |D | | S|
- | | | |
- | | | |
- | | | |
- +------+---+------+ */
-
- nDirection = LEFT_TO_RIGHT;
- }
- }
- }
-
- if ((nDirection == BOTTOM_TO_TOP) || (nDirection == RIGHT_TO_LEFT))
- {
- sx += width - 1;
- sy += height - 1;
- dx += width - 1;
- dy += height - 1;
- opSign = (-1);
- }
-
- /* Note:
- DE_FOREGROUND are DE_BACKGROUND are don't care.
- DE_COLOR_COMPARE and DE_COLOR_COMPARE_MAKS are set by set deSetTransparency().
- */
+ opSign = 1;
+ de_ctrl = 0;
+
+ /* If source and destination are the same surface, need to check for overlay cases */
+ if (sBase == dBase && sPitch == dPitch) {
+ /* Determine direction of operation */
+ if (sy < dy) {
+ /* +----------+
+ |S |
+ | +----------+
+ | | | |
+ | | | |
+ +---|------+ |
+ | D|
+ +----------+ */
+
+ nDirection = BOTTOM_TO_TOP;
+ } else if (sy > dy) {
+ /* +----------+
+ |D |
+ | +----------+
+ | | | |
+ | | | |
+ +---|------+ |
+ | S|
+ +----------+ */
+
+ nDirection = TOP_TO_BOTTOM;
+ } else {
+ /* sy == dy */
+
+ if (sx <= dx) {
+ /* +------+---+------+
+ |S | | D|
+ | | | |
+ | | | |
+ | | | |
+ +------+---+------+ */
+
+ nDirection = RIGHT_TO_LEFT;
+ } else {
+ /* sx > dx */
+
+ /* +------+---+------+
+ |D | | S|
+ | | | |
+ | | | |
+ | | | |
+ +------+---+------+ */
+
+ nDirection = LEFT_TO_RIGHT;
+ }
+ }
+ }
- /* 2D Source Base.
- It is an address offset (128 bit aligned) from the beginning of frame buffer.
- */
- write_dpr(accel, DE_WINDOW_SOURCE_BASE, sBase); /* dpr40 */
+ if ((nDirection == BOTTOM_TO_TOP) || (nDirection == RIGHT_TO_LEFT)) {
+ sx += width - 1;
+ sy += height - 1;
+ dx += width - 1;
+ dy += height - 1;
+ opSign = (-1);
+ }
- /* 2D Destination Base.
- It is an address offset (128 bit aligned) from the beginning of frame buffer.
- */
- write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase); /* dpr44 */
+ /* Note:
+ DE_FOREGROUND are DE_BACKGROUND are don't care.
+ DE_COLOR_COMPARE and DE_COLOR_COMPARE_MAKS are set by set deSetTransparency().
+ */
+
+ /* 2D Source Base.
+ It is an address offset (128 bit aligned) from the beginning of frame buffer.
+ */
+ write_dpr(accel, DE_WINDOW_SOURCE_BASE, sBase); /* dpr40 */
+
+ /* 2D Destination Base.
+ It is an address offset (128 bit aligned) from the beginning of frame buffer.
+ */
+ write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase); /* dpr44 */
#if 0
/* Program pitch (distance between the 1st points of two adjacent lines).
Note that input pitch is BYTE value, but the 2D Pitch register uses
pixel values. Need Byte to pixel conversion.
*/
- if(Bpp == 3){
+ if (Bpp == 3) {
sx *= 3;
dx *= 3;
width *= 3;
write_dpr(accel, DE_PITCH,
FIELD_VALUE(0, DE_PITCH, DESTINATION, dPitch) |
FIELD_VALUE(0, DE_PITCH, SOURCE, sPitch)); /* dpr10 */
- }
- else
+ } else
#endif
{
write_dpr(accel, DE_PITCH,
@@ -267,54 +256,53 @@ unsigned int rop2) /* ROP value */
/* Screen Window width in Pixels.
2D engine uses this value to calculate the linear address in frame buffer for a given point.
*/
- write_dpr(accel, DE_WINDOW_WIDTH,
- FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION, (dPitch/Bpp)) |
- FIELD_VALUE(0, DE_WINDOW_WIDTH, SOURCE, (sPitch/Bpp))); /* dpr3c */
+ write_dpr(accel, DE_WINDOW_WIDTH,
+ FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION, (dPitch/Bpp)) |
+ FIELD_VALUE(0, DE_WINDOW_WIDTH, SOURCE, (sPitch/Bpp))); /* dpr3c */
- if (accel->de_wait() != 0){
+ if (accel->de_wait() != 0)
return -1;
+
+ {
+
+ write_dpr(accel, DE_SOURCE,
+ FIELD_SET(0, DE_SOURCE, WRAP, DISABLE) |
+ FIELD_VALUE(0, DE_SOURCE, X_K1, sx) |
+ FIELD_VALUE(0, DE_SOURCE, Y_K2, sy)); /* dpr0 */
+ write_dpr(accel, DE_DESTINATION,
+ FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) |
+ FIELD_VALUE(0, DE_DESTINATION, X, dx) |
+ FIELD_VALUE(0, DE_DESTINATION, Y, dy)); /* dpr04 */
+ write_dpr(accel, DE_DIMENSION,
+ FIELD_VALUE(0, DE_DIMENSION, X, width) |
+ FIELD_VALUE(0, DE_DIMENSION, Y_ET, height)); /* dpr08 */
+
+ de_ctrl = FIELD_VALUE(0, DE_CONTROL, ROP, rop2) |
+ FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2) |
+ FIELD_SET(0, DE_CONTROL, COMMAND, BITBLT) |
+ ((nDirection == RIGHT_TO_LEFT) ?
+ FIELD_SET(0, DE_CONTROL, DIRECTION, RIGHT_TO_LEFT)
+ : FIELD_SET(0, DE_CONTROL, DIRECTION, LEFT_TO_RIGHT)) |
+ FIELD_SET(0, DE_CONTROL, STATUS, START);
+ write_dpr(accel, DE_CONTROL, de_ctrl); /* dpr0c */
+
}
- {
-
- write_dpr(accel, DE_SOURCE,
- FIELD_SET (0, DE_SOURCE, WRAP, DISABLE) |
- FIELD_VALUE(0, DE_SOURCE, X_K1, sx) |
- FIELD_VALUE(0, DE_SOURCE, Y_K2, sy)); /* dpr0 */
- write_dpr(accel, DE_DESTINATION,
- FIELD_SET (0, DE_DESTINATION, WRAP, DISABLE) |
- FIELD_VALUE(0, DE_DESTINATION, X, dx) |
- FIELD_VALUE(0, DE_DESTINATION, Y, dy)); /* dpr04 */
- write_dpr(accel, DE_DIMENSION,
- FIELD_VALUE(0, DE_DIMENSION, X, width) |
- FIELD_VALUE(0, DE_DIMENSION, Y_ET, height)); /* dpr08 */
-
- de_ctrl =
- FIELD_VALUE(0, DE_CONTROL, ROP, rop2) |
- FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2) |
- FIELD_SET(0, DE_CONTROL, COMMAND, BITBLT) |
- ((nDirection == RIGHT_TO_LEFT) ?
- FIELD_SET(0, DE_CONTROL, DIRECTION, RIGHT_TO_LEFT)
- : FIELD_SET(0, DE_CONTROL, DIRECTION, LEFT_TO_RIGHT)) |
- FIELD_SET(0, DE_CONTROL, STATUS, START);
- write_dpr(accel, DE_CONTROL, de_ctrl); /* dpr0c */
- }
-
- return 0;
+ return 0;
}
static unsigned int deGetTransparency(struct lynx_accel *accel)
{
- unsigned int de_ctrl;
+ unsigned int de_ctrl;
- de_ctrl = read_dpr(accel, DE_CONTROL);
+ de_ctrl = read_dpr(accel, DE_CONTROL);
- de_ctrl &=
- FIELD_MASK(DE_CONTROL_TRANSPARENCY_MATCH) |
- FIELD_MASK(DE_CONTROL_TRANSPARENCY_SELECT)|
- FIELD_MASK(DE_CONTROL_TRANSPARENCY);
+ de_ctrl &=
+ FIELD_MASK(DE_CONTROL_TRANSPARENCY_MATCH) |
+ FIELD_MASK(DE_CONTROL_TRANSPARENCY_SELECT)|
+ FIELD_MASK(DE_CONTROL_TRANSPARENCY);
- return de_ctrl;
+ return de_ctrl;
}
int hw_imageblit(struct lynx_accel *accel,
@@ -332,38 +320,36 @@ int hw_imageblit(struct lynx_accel *accel,
u32 bColor, /* Background color (corresponding to a 0 in the monochrome data */
u32 rop2) /* ROP value */
{
- unsigned int ulBytesPerScan;
- unsigned int ul4BytesPerScan;
- unsigned int ulBytesRemain;
- unsigned int de_ctrl = 0;
- unsigned char ajRemain[4];
- int i, j;
-
- startBit &= 7; /* Just make sure the start bit is within legal range */
- ulBytesPerScan = (width + startBit + 7) / 8;
- ul4BytesPerScan = ulBytesPerScan & ~3;
- ulBytesRemain = ulBytesPerScan & 3;
-
- if(accel->de_wait() != 0)
- {
- return -1;
- }
-
- /* 2D Source Base.
- Use 0 for HOST Blt.
- */
- write_dpr(accel, DE_WINDOW_SOURCE_BASE, 0);
+ unsigned int ulBytesPerScan;
+ unsigned int ul4BytesPerScan;
+ unsigned int ulBytesRemain;
+ unsigned int de_ctrl = 0;
+ unsigned char ajRemain[4];
+ int i, j;
+
+ startBit &= 7; /* Just make sure the start bit is within legal range */
+ ulBytesPerScan = (width + startBit + 7) / 8;
+ ul4BytesPerScan = ulBytesPerScan & ~3;
+ ulBytesRemain = ulBytesPerScan & 3;
+
+ if (accel->de_wait() != 0)
+ return -1;
- /* 2D Destination Base.
- It is an address offset (128 bit aligned) from the beginning of frame buffer.
- */
- write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase);
+ /* 2D Source Base.
+ Use 0 for HOST Blt.
+ */
+ write_dpr(accel, DE_WINDOW_SOURCE_BASE, 0);
+
+ /* 2D Destination Base.
+ It is an address offset (128 bit aligned) from the beginning of frame buffer.
+ */
+ write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase);
#if 0
/* Program pitch (distance between the 1st points of two adjacent lines).
Note that input pitch is BYTE value, but the 2D Pitch register uses
pixel values. Need Byte to pixel conversion.
*/
- if(bytePerPixel == 3 ){
+ if (bytePerPixel == 3) {
dx *= 3;
width *= 3;
startBit *= 3;
@@ -371,8 +357,7 @@ int hw_imageblit(struct lynx_accel *accel,
FIELD_VALUE(0, DE_PITCH, DESTINATION, dPitch) |
FIELD_VALUE(0, DE_PITCH, SOURCE, dPitch)); /* dpr10 */
- }
- else
+ } else
#endif
{
write_dpr(accel, DE_PITCH,
@@ -380,30 +365,30 @@ int hw_imageblit(struct lynx_accel *accel,
FIELD_VALUE(0, DE_PITCH, SOURCE, dPitch/bytePerPixel)); /* dpr10 */
}
- /* Screen Window width in Pixels.
- 2D engine uses this value to calculate the linear address in frame buffer for a given point.
- */
- write_dpr(accel, DE_WINDOW_WIDTH,
- FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION, (dPitch/bytePerPixel)) |
- FIELD_VALUE(0, DE_WINDOW_WIDTH, SOURCE, (dPitch/bytePerPixel)));
+ /* Screen Window width in Pixels.
+ 2D engine uses this value to calculate the linear address in frame buffer for a given point.
+ */
+ write_dpr(accel, DE_WINDOW_WIDTH,
+ FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION, (dPitch/bytePerPixel)) |
+ FIELD_VALUE(0, DE_WINDOW_WIDTH, SOURCE, (dPitch/bytePerPixel)));
- /* Note: For 2D Source in Host Write, only X_K1_MONO field is needed, and Y_K2 field is not used.
- For mono bitmap, use startBit for X_K1. */
- write_dpr(accel, DE_SOURCE,
- FIELD_SET (0, DE_SOURCE, WRAP, DISABLE) |
- FIELD_VALUE(0, DE_SOURCE, X_K1_MONO, startBit)); /* dpr00 */
+ /* Note: For 2D Source in Host Write, only X_K1_MONO field is needed, and Y_K2 field is not used.
+ For mono bitmap, use startBit for X_K1. */
+ write_dpr(accel, DE_SOURCE,
+ FIELD_SET(0, DE_SOURCE, WRAP, DISABLE) |
+ FIELD_VALUE(0, DE_SOURCE, X_K1_MONO, startBit)); /* dpr00 */
- write_dpr(accel, DE_DESTINATION,
- FIELD_SET (0, DE_DESTINATION, WRAP, DISABLE) |
- FIELD_VALUE(0, DE_DESTINATION, X, dx) |
- FIELD_VALUE(0, DE_DESTINATION, Y, dy)); /* dpr04 */
+ write_dpr(accel, DE_DESTINATION,
+ FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) |
+ FIELD_VALUE(0, DE_DESTINATION, X, dx) |
+ FIELD_VALUE(0, DE_DESTINATION, Y, dy)); /* dpr04 */
- write_dpr(accel, DE_DIMENSION,
- FIELD_VALUE(0, DE_DIMENSION, X, width) |
- FIELD_VALUE(0, DE_DIMENSION, Y_ET, height)); /* dpr08 */
+ write_dpr(accel, DE_DIMENSION,
+ FIELD_VALUE(0, DE_DIMENSION, X, width) |
+ FIELD_VALUE(0, DE_DIMENSION, Y_ET, height)); /* dpr08 */
- write_dpr(accel, DE_FOREGROUND, fColor);
- write_dpr(accel, DE_BACKGROUND, bColor);
+ write_dpr(accel, DE_FOREGROUND, fColor);
+ write_dpr(accel, DE_BACKGROUND, bColor);
de_ctrl = FIELD_VALUE(0, DE_CONTROL, ROP, rop2) |
FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2) |
@@ -413,24 +398,20 @@ int hw_imageblit(struct lynx_accel *accel,
write_dpr(accel, DE_CONTROL, de_ctrl | deGetTransparency(accel));
- /* Write MONO data (line by line) to 2D Engine data port */
- for (i=0; i<height; i++)
- {
- /* For each line, send the data in chunks of 4 bytes */
- for (j=0; j<(ul4BytesPerScan/4); j++)
- {
- write_dpPort(accel, *(unsigned int *)(pSrcbuf + (j * 4)));
- }
-
- if (ulBytesRemain)
- {
- memcpy(ajRemain, pSrcbuf+ul4BytesPerScan, ulBytesRemain);
- write_dpPort(accel, *(unsigned int *)ajRemain);
- }
-
- pSrcbuf += srcDelta;
- }
-
- return 0;
+ /* Write MONO data (line by line) to 2D Engine data port */
+ for (i = 0; i < height; i++) {
+ /* For each line, send the data in chunks of 4 bytes */
+ for (j = 0; j < (ul4BytesPerScan/4); j++)
+ write_dpPort(accel, *(unsigned int *)(pSrcbuf + (j * 4)));
+
+ if (ulBytesRemain) {
+ memcpy(ajRemain, pSrcbuf+ul4BytesPerScan, ulBytesRemain);
+ write_dpPort(accel, *(unsigned int *)ajRemain);
+ }
+
+ pSrcbuf += srcDelta;
+ }
+
+ return 0;
}
diff --git a/drivers/staging/sm750fb/sm750_accel.h b/drivers/staging/sm750fb/sm750_accel.h
index d3d256c21206..f252e47d5ee9 100644
--- a/drivers/staging/sm750fb/sm750_accel.h
+++ b/drivers/staging/sm750fb/sm750_accel.h
@@ -7,7 +7,7 @@
/* notes: below address are the offset value from de_base_address (0x100000)*/
/* for sm718/750/502 de_base is at mmreg_1mb*/
-#define DE_BASE_ADDR_TYPE1 0x100000
+#define DE_BASE_ADDR_TYPE1 0x100000
/* for sm712,de_base is at mmreg_32kb */
#define DE_BASE_ADDR_TYPE2 0x8000
/* for sm722,de_base is at mmreg_0 */
@@ -26,7 +26,7 @@
#define DE_SOURCE_WRAP_ENABLE 1
#define DE_SOURCE_X_K1 29:16
#define DE_SOURCE_Y_K2 15:0
-#define DE_SOURCE_X_K1_MONO 20:16
+#define DE_SOURCE_X_K1_MONO 20:16
#define DE_DESTINATION 0x4
#define DE_DESTINATION_WRAP 31:31
diff --git a/drivers/staging/sm750fb/sm750_cursor.c b/drivers/staging/sm750fb/sm750_cursor.c
index 405e24b6768f..a94a4bbff450 100644
--- a/drivers/staging/sm750fb/sm750_cursor.c
+++ b/drivers/staging/sm750fb/sm750_cursor.c
@@ -61,6 +61,7 @@ writel((data), cursor->mmio + (addr))
void hw_cursor_enable(struct lynx_cursor *cursor)
{
u32 reg;
+
reg = FIELD_VALUE(0, HWC_ADDRESS, ADDRESS, cursor->offset)|
FIELD_SET(0, HWC_ADDRESS, EXT, LOCAL)|
FIELD_SET(0, HWC_ADDRESS, ENABLE, ENABLE);
@@ -81,6 +82,7 @@ void hw_cursor_setPos(struct lynx_cursor *cursor,
int x, int y)
{
u32 reg;
+
reg = FIELD_VALUE(0, HWC_LOCATION, Y, y)|
FIELD_VALUE(0, HWC_LOCATION, X, x);
POKE32(HWC_LOCATION, reg);
@@ -93,7 +95,7 @@ void hw_cursor_setColor(struct lynx_cursor *cursor,
}
void hw_cursor_setData(struct lynx_cursor *cursor,
- u16 rop, const u8* pcol, const u8* pmsk)
+ u16 rop, const u8 *pcol, const u8 *pmsk)
{
int i, j, count, pitch, offset;
u8 color, mask, opr;
@@ -122,8 +124,7 @@ void hw_cursor_setData(struct lynx_cursor *cursor,
odd=0;
*/
- for(i=0;i<count;i++)
- {
+ for (i = 0; i < count; i++) {
color = *pcol++;
mask = *pmsk++;
data = 0;
@@ -132,26 +133,25 @@ void hw_cursor_setData(struct lynx_cursor *cursor,
* but method 2 shows no lag
* and method 1 seems a bit wrong*/
#if 0
- if(rop == ROP_XOR)
+ if (rop == ROP_XOR)
opr = mask ^ color;
else
opr = mask & color;
- for(j=0;j<8;j++)
- {
+ for (j = 0; j < 8; j++) {
- if(opr & (0x80 >> j))
- { /* use fg color,id = 2 */
+ if (opr & (0x80 >> j)) {
+ /* use fg color,id = 2 */
data |= 2 << (j*2);
- }else{
+ } else {
/* use bg color,id = 1 */
data |= 1 << (j*2);
}
}
#else
- for(j=0;j<8;j++){
- if(mask & (0x80>>j)){
- if(rop == ROP_XOR)
+ for (j = 0; j < 8; j++) {
+ if (mask & (0x80>>j)) {
+ if (rop == ROP_XOR)
opr = mask ^ color;
else
opr = mask & color;
@@ -165,15 +165,15 @@ void hw_cursor_setData(struct lynx_cursor *cursor,
/* assume pitch is 1,2,4,8,...*/
#if 0
- if(!((i+1)&(pitch-1))) /* below line equal to is line */
+ if (!((i+1)&(pitch-1))) /* below line equal to is line */
#else
- if((i+1) % pitch == 0)
+ if ((i+1) % pitch == 0)
#endif
{
/* need a return */
pstart += offset;
pbuffer = pstart;
- }else{
+ } else {
pbuffer += sizeof(u16);
}
@@ -184,7 +184,7 @@ void hw_cursor_setData(struct lynx_cursor *cursor,
void hw_cursor_setData2(struct lynx_cursor *cursor,
- u16 rop, const u8* pcol, const u8* pmsk)
+ u16 rop, const u8 *pcol, const u8 *pmsk)
{
int i, j, count, pitch, offset;
u8 color, mask;
@@ -204,45 +204,42 @@ void hw_cursor_setData2(struct lynx_cursor *cursor,
pstart = cursor->vstart;
pbuffer = pstart;
- for(i=0;i<count;i++)
- {
+ for (i = 0; i < count; i++) {
color = *pcol++;
mask = *pmsk++;
data = 0;
/* either method below works well, but method 2 shows no lag */
#if 0
- if(rop == ROP_XOR)
+ if (rop == ROP_XOR)
opr = mask ^ color;
else
opr = mask & color;
- for(j=0;j<8;j++)
- {
+ for (j = 0; j < 8; j++) {
- if(opr & (0x80 >> j))
- { /* use fg color,id = 2 */
+ if (opr & (0x80 >> j)) {
+ /* use fg color,id = 2 */
data |= 2 << (j*2);
- }else{
+ } else {
/* use bg color,id = 1 */
data |= 1 << (j*2);
}
}
#else
- for(j=0;j<8;j++){
- if(mask & (1<<j))
+ for (j = 0; j < 8; j++) {
+ if (mask & (1<<j))
data |= ((color & (1<<j))?1:2)<<(j*2);
}
#endif
iowrite16(data, pbuffer);
/* assume pitch is 1,2,4,8,...*/
- if(!(i&(pitch-1)))
- {
+ if (!(i&(pitch-1))) {
/* need a return */
pstart += offset;
pbuffer = pstart;
- }else{
+ } else {
pbuffer += sizeof(u16);
}
diff --git a/drivers/staging/sm750fb/sm750_cursor.h b/drivers/staging/sm750fb/sm750_cursor.h
index e1716a600239..6c4fc9b73489 100644
--- a/drivers/staging/sm750fb/sm750_cursor.h
+++ b/drivers/staging/sm750fb/sm750_cursor.h
@@ -11,7 +11,7 @@ void hw_cursor_setPos(struct lynx_cursor *cursor,
void hw_cursor_setColor(struct lynx_cursor *cursor,
u32 fg, u32 bg);
void hw_cursor_setData(struct lynx_cursor *cursor,
- u16 rop, const u8* data, const u8* mask);
+ u16 rop, const u8 *data, const u8 *mask);
void hw_cursor_setData2(struct lynx_cursor *cursor,
- u16 rop, const u8* data, const u8* mask);
+ u16 rop, const u8 *data, const u8 *mask);
#endif
diff --git a/drivers/staging/sm750fb/sm750_help.h b/drivers/staging/sm750fb/sm750_help.h
index 05777f72c166..8dc6bd22f5db 100644
--- a/drivers/staging/sm750fb/sm750_help.h
+++ b/drivers/staging/sm750fb/sm750_help.h
@@ -11,9 +11,9 @@
#define GET_FIELD(d, f) (((d) >> _LSB(f)) & RAW_MASK(f))
#define TEST_FIELD(d, f, v) (GET_FIELD(d, f) == f ## _ ## v)
#define SET_FIELD(d, f, v) (((d) & ~GET_MASK(f)) | \
- (((f ## _ ## v) & RAW_MASK(f)) << _LSB(f)))
+ (((f ## _ ## v) & RAW_MASK(f)) << _LSB(f)))
#define SET_FIELDV(d, f, v) (((d) & ~GET_MASK(f)) | \
- (((v) & RAW_MASK(f)) << _LSB(f)))
+ (((v) & RAW_MASK(f)) << _LSB(f)))
/* Internal macros */
#define _F_START(f) (0 ? f)
@@ -26,24 +26,24 @@
/* Global macros */
#define FIELD_GET(x, reg, field) \
( \
- _F_NORMALIZE((x), reg ## _ ## field) \
+ _F_NORMALIZE((x), reg ## _ ## field) \
)
#define FIELD_SET(x, reg, field, value) \
( \
- (x & ~_F_MASK(reg ## _ ## field)) \
- | _F_DENORMALIZE(reg ## _ ## field ## _ ## value, reg ## _ ## field) \
+ (x & ~_F_MASK(reg ## _ ## field)) \
+ | _F_DENORMALIZE(reg ## _ ## field ## _ ## value, reg ## _ ## field) \
)
#define FIELD_VALUE(x, reg, field, value) \
( \
- (x & ~_F_MASK(reg ## _ ## field)) \
- | _F_DENORMALIZE(value, reg ## _ ## field) \
+ (x & ~_F_MASK(reg ## _ ## field)) \
+ | _F_DENORMALIZE(value, reg ## _ ## field) \
)
#define FIELD_CLEAR(reg, field) \
( \
- ~ _F_MASK(reg ## _ ## field) \
+ ~ _F_MASK(reg ## _ ## field) \
)
/* Field Macros */
@@ -55,25 +55,25 @@
#define FIELD_DENORMALIZE(field, value) (((value) << FIELD_START(field)) & FIELD_MASK(field))
#define FIELD_INIT(reg, field, value) FIELD_DENORMALIZE(reg ## _ ## field, \
- reg ## _ ## field ## _ ## value)
+ reg ## _ ## field ## _ ## value)
#define FIELD_INIT_VAL(reg, field, value) \
- (FIELD_DENORMALIZE(reg ## _ ## field, value))
+ (FIELD_DENORMALIZE(reg ## _ ## field, value))
#define FIELD_VAL_SET(x, r, f, v) x = x & ~FIELD_MASK(r ## _ ## f) \
- | FIELD_DENORMALIZE(r ## _ ## f, r ## _ ## f ## _ ## v)
+ | FIELD_DENORMALIZE(r ## _ ## f, r ## _ ## f ## _ ## v)
#define RGB(r, g, b) \
( \
- (unsigned long) (((r) << 16) | ((g) << 8) | (b)) \
+ (unsigned long) (((r) << 16) | ((g) << 8) | (b)) \
)
#define RGB16(r, g, b) \
( \
- (unsigned short) ((((r) & 0xF8) << 8) | (((g) & 0xFC) << 3) | (((b) & 0xF8) >> 3)) \
+ (unsigned short) ((((r) & 0xF8) << 8) | (((g) & 0xFC) << 3) | (((b) & 0xF8) >> 3)) \
)
static inline unsigned int absDiff(unsigned int a, unsigned int b)
{
- if(a<b)
+ if (a < b)
return b-a;
else
return a-b;
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index 84381bc414e7..7317ba9b7fe5 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -24,11 +24,11 @@
#include "ddk750.h"
#include "sm750_accel.h"
-int hw_sm750_map(struct lynx_share* share, struct pci_dev* pdev)
+int hw_sm750_map(struct lynx_share *share, struct pci_dev *pdev)
{
int ret;
struct sm750_share *spec_share;
-
+
spec_share = container_of(share, struct sm750_share, share);
ret = 0;
@@ -43,24 +43,23 @@ int hw_sm750_map(struct lynx_share* share, struct pci_dev* pdev)
* in lynxfb_remove, or memory will not be mapped again
* successfully
* */
-
- if((ret = pci_request_region(pdev, 1, "sm750fb")))
- {
+ ret = pci_request_region(pdev, 1, "sm750fb");
+ if (ret) {
pr_err("Can not request PCI regions.\n");
goto exit;
}
/* now map mmio and vidmem*/
share->pvReg = ioremap_nocache(share->vidreg_start, share->vidreg_size);
- if(!share->pvReg){
+ if (!share->pvReg) {
pr_err("mmio failed\n");
ret = -EFAULT;
goto exit;
- }else{
+ } else {
pr_info("mmio virtual addr = %p\n", share->pvReg);
}
-
+
share->accel.dprBase = share->pvReg + DE_BASE_ADDR_TYPE1;
share->accel.dpPortBase = share->pvReg + DE_PORT_ADDR_TYPE1;
@@ -78,8 +77,8 @@ int hw_sm750_map(struct lynx_share* share, struct pci_dev* pdev)
/* reserve the vidmem space of smi adaptor */
#if 0
- if((ret = pci_request_region(pdev, 0, _moduleName_)))
- {
+ ret = pci_request_region(pdev, 0, _moduleName_);
+ if (ret) {
pr_err("Can not request PCI regions.\n");
goto exit;
}
@@ -87,11 +86,11 @@ int hw_sm750_map(struct lynx_share* share, struct pci_dev* pdev)
share->pvMem = ioremap_wc(share->vidmem_start, share->vidmem_size);
- if(!share->pvMem){
+ if (!share->pvMem) {
pr_err("Map video memory failed\n");
ret = -EFAULT;
goto exit;
- }else{
+ } else {
pr_info("video memory vaddr = %p\n", share->pvMem);
}
exit:
@@ -104,22 +103,22 @@ int hw_sm750_inithw(struct lynx_share *share, struct pci_dev *pdev)
{
struct sm750_share *spec_share;
struct init_status *parm;
-
+
spec_share = container_of(share, struct sm750_share, share);
parm = &spec_share->state.initParm;
- if(parm->chip_clk == 0)
- parm->chip_clk = (getChipType() == SM750LE)?
+ if (parm->chip_clk == 0)
+ parm->chip_clk = (getChipType() == SM750LE) ?
DEFAULT_SM750LE_CHIP_CLOCK :
DEFAULT_SM750_CHIP_CLOCK;
- if(parm->mem_clk == 0)
+ if (parm->mem_clk == 0)
parm->mem_clk = parm->chip_clk;
- if(parm->master_clk == 0)
+ if (parm->master_clk == 0)
parm->master_clk = parm->chip_clk/3;
ddk750_initHw((initchip_param_t *)&spec_share->state.initParm);
/* for sm718,open pci burst */
- if(share->devid == 0x718){
+ if (share->devid == 0x718) {
POKE32(SYSTEM_CTRL,
FIELD_SET(PEEK32(SYSTEM_CTRL), SYSTEM_CTRL, PCI_BURST, ON));
}
@@ -130,10 +129,9 @@ int hw_sm750_inithw(struct lynx_share *share, struct pci_dev *pdev)
ddk750_initDVIDisp();
#endif
- if(getChipType() != SM750LE)
- {
+ if (getChipType() != SM750LE) {
/* does user need CRT ?*/
- if(spec_share->state.nocrt){
+ if (spec_share->state.nocrt) {
POKE32(MISC_CTRL,
FIELD_SET(PEEK32(MISC_CTRL),
MISC_CTRL,
@@ -143,7 +141,7 @@ int hw_sm750_inithw(struct lynx_share *share, struct pci_dev *pdev)
FIELD_SET(PEEK32(SYSTEM_CTRL),
SYSTEM_CTRL,
DPMS, VNHN));
- }else{
+ } else {
POKE32(MISC_CTRL,
FIELD_SET(PEEK32(MISC_CTRL),
MISC_CTRL,
@@ -155,45 +153,43 @@ int hw_sm750_inithw(struct lynx_share *share, struct pci_dev *pdev)
DPMS, VPHP));
}
- switch (spec_share->state.pnltype){
- case sm750_doubleTFT:
- case sm750_24TFT:
- case sm750_dualTFT:
- POKE32(PANEL_DISPLAY_CTRL,
- FIELD_VALUE(PEEK32(PANEL_DISPLAY_CTRL),
- PANEL_DISPLAY_CTRL,
- TFT_DISP,
- spec_share->state.pnltype));
- break;
+ switch (spec_share->state.pnltype) {
+ case sm750_doubleTFT:
+ case sm750_24TFT:
+ case sm750_dualTFT:
+ POKE32(PANEL_DISPLAY_CTRL,
+ FIELD_VALUE(PEEK32(PANEL_DISPLAY_CTRL),
+ PANEL_DISPLAY_CTRL,
+ TFT_DISP,
+ spec_share->state.pnltype));
+ break;
}
- }else{
+ } else {
/* for 750LE ,no DVI chip initilization makes Monitor no signal */
/* Set up GPIO for software I2C to program DVI chip in the
Xilinx SP605 board, in order to have video signal.
*/
- swI2CInit(0, 1);
+ swI2CInit(0, 1);
- /* Customer may NOT use CH7301 DVI chip, which has to be
- initialized differently.
- */
- if (swI2CReadReg(0xec, 0x4a) == 0x95)
- {
- /* The following register values for CH7301 are from
- Chrontel app note and our experiment.
- */
+ /* Customer may NOT use CH7301 DVI chip, which has to be
+ initialized differently.
+ */
+ if (swI2CReadReg(0xec, 0x4a) == 0x95) {
+ /* The following register values for CH7301 are from
+ Chrontel app note and our experiment.
+ */
pr_info("yes,CH7301 DVI chip found\n");
- swI2CWriteReg(0xec, 0x1d, 0x16);
- swI2CWriteReg(0xec, 0x21, 0x9);
- swI2CWriteReg(0xec, 0x49, 0xC0);
+ swI2CWriteReg(0xec, 0x1d, 0x16);
+ swI2CWriteReg(0xec, 0x21, 0x9);
+ swI2CWriteReg(0xec, 0x49, 0xC0);
pr_info("okay,CH7301 DVI chip setup done\n");
- }
+ }
}
/* init 2d engine */
- if(!share->accel_off){
+ if (!share->accel_off)
hw_sm750_initAccel(share);
- }
return 0;
}
@@ -202,86 +198,87 @@ int hw_sm750_inithw(struct lynx_share *share, struct pci_dev *pdev)
resource_size_t hw_sm750_getVMSize(struct lynx_share *share)
{
resource_size_t ret;
-
+
ret = ddk750_getVMSize();
return ret;
}
-int hw_sm750_output_checkMode(struct lynxfb_output* output, struct fb_var_screeninfo* var)
+int hw_sm750_output_checkMode(struct lynxfb_output *output, struct fb_var_screeninfo *var)
{
-
+
return 0;
}
-int hw_sm750_output_setMode(struct lynxfb_output* output,
- struct fb_var_screeninfo* var, struct fb_fix_screeninfo* fix)
+int hw_sm750_output_setMode(struct lynxfb_output *output,
+ struct fb_var_screeninfo *var, struct fb_fix_screeninfo *fix)
{
int ret;
disp_output_t dispSet;
int channel;
-
+
ret = 0;
dispSet = 0;
channel = *output->channel;
- if(getChipType() != SM750LE){
- if(channel == sm750_primary){
+ if (getChipType() != SM750LE) {
+ if (channel == sm750_primary) {
pr_info("primary channel\n");
- if(output->paths & sm750_panel)
+ if (output->paths & sm750_panel)
dispSet |= do_LCD1_PRI;
- if(output->paths & sm750_crt)
+ if (output->paths & sm750_crt)
dispSet |= do_CRT_PRI;
- }else{
+ } else {
pr_info("secondary channel\n");
- if(output->paths & sm750_panel)
+ if (output->paths & sm750_panel)
dispSet |= do_LCD1_SEC;
- if(output->paths & sm750_crt)
+ if (output->paths & sm750_crt)
dispSet |= do_CRT_SEC;
}
ddk750_setLogicalDispOut(dispSet);
- }else{
+ } else {
/* just open DISPLAY_CONTROL_750LE register bit 3:0*/
u32 reg;
+
reg = PEEK32(DISPLAY_CONTROL_750LE);
reg |= 0xf;
POKE32(DISPLAY_CONTROL_750LE, reg);
}
- pr_info("ddk setlogicdispout done \n");
+ pr_info("ddk setlogicdispout done\n");
return ret;
}
-void hw_sm750_output_clear(struct lynxfb_output* output)
+void hw_sm750_output_clear(struct lynxfb_output *output)
{
-
+
return;
}
-int hw_sm750_crtc_checkMode(struct lynxfb_crtc* crtc, struct fb_var_screeninfo* var)
+int hw_sm750_crtc_checkMode(struct lynxfb_crtc *crtc, struct fb_var_screeninfo *var)
{
struct lynx_share *share;
-
+
share = container_of(crtc, struct lynxfb_par, crtc)->share;
- switch (var->bits_per_pixel){
- case 8:
- case 16:
- break;
- case 32:
- if (share->revid == SM750LE_REVISION_ID) {
- pr_debug("750le do not support 32bpp\n");
- return -EINVAL;
- }
- break;
- default:
+ switch (var->bits_per_pixel) {
+ case 8:
+ case 16:
+ break;
+ case 32:
+ if (share->revid == SM750LE_REVISION_ID) {
+ pr_debug("750le do not support 32bpp\n");
return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
}
@@ -292,9 +289,9 @@ int hw_sm750_crtc_checkMode(struct lynxfb_crtc* crtc, struct fb_var_screeninfo*
/*
set the controller's mode for @crtc charged with @var and @fix parameters
*/
-int hw_sm750_crtc_setMode(struct lynxfb_crtc* crtc,
- struct fb_var_screeninfo* var,
- struct fb_fix_screeninfo* fix)
+int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
+ struct fb_var_screeninfo *var,
+ struct fb_fix_screeninfo *fix)
{
int ret, fmt;
u32 reg;
@@ -303,24 +300,24 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc* crtc,
struct lynx_share *share;
struct lynxfb_par *par;
-
+
ret = 0;
par = container_of(crtc, struct lynxfb_par, crtc);
share = par->share;
#if 1
- if(!share->accel_off){
+ if (!share->accel_off) {
/* set 2d engine pixel format according to mode bpp */
- switch(var->bits_per_pixel){
- case 8:
- fmt = 0;
- break;
- case 16:
- fmt = 1;
- break;
- case 32:
- default:
- fmt = 2;
- break;
+ switch (var->bits_per_pixel) {
+ case 8:
+ fmt = 0;
+ break;
+ case 16:
+ fmt = 1;
+ break;
+ case 32:
+ default:
+ fmt = 2;
+ break;
}
hw_set2dformat(&share->accel, fmt);
}
@@ -330,7 +327,7 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc* crtc,
modparm.pixel_clock = ps_to_hz(var->pixclock);
modparm.vertical_sync_polarity = (var->sync & FB_SYNC_HOR_HIGH_ACT) ? POS:NEG;
modparm.horizontal_sync_polarity = (var->sync & FB_SYNC_VERT_HIGH_ACT) ? POS:NEG;
- modparm.clock_phase_polarity = (var->sync& FB_SYNC_COMP_HIGH_ACT) ? POS:NEG;
+ modparm.clock_phase_polarity = (var->sync & FB_SYNC_COMP_HIGH_ACT) ? POS:NEG;
modparm.horizontal_display_end = var->xres;
modparm.horizontal_sync_width = var->hsync_len;
modparm.horizontal_sync_start = var->xres + var->right_margin;
@@ -341,19 +338,19 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc* crtc,
modparm.vertical_total = var->yres + var->upper_margin + var->lower_margin + var->vsync_len;
/* choose pll */
- if(crtc->channel != sm750_secondary)
+ if (crtc->channel != sm750_secondary)
clock = PRIMARY_PLL;
else
clock = SECONDARY_PLL;
pr_debug("Request pixel clock = %lu\n", modparm.pixel_clock);
ret = ddk750_setModeTiming(&modparm, clock);
- if(ret){
+ if (ret) {
pr_err("Set mode timing failed\n");
goto exit;
}
- if(crtc->channel != sm750_secondary){
+ if (crtc->channel != sm750_secondary) {
/* set pitch, offset ,width,start address ,etc... */
POKE32(PANEL_FB_ADDRESS,
FIELD_SET(0, PANEL_FB_ADDRESS, STATUS, CURRENT)|
@@ -369,7 +366,7 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc* crtc,
FIELD_VALUE(0, PANEL_FB_WIDTH, OFFSET, fix->line_length));
POKE32(PANEL_WINDOW_WIDTH,
- FIELD_VALUE(0, PANEL_WINDOW_WIDTH, WIDTH, var->xres -1)|
+ FIELD_VALUE(0, PANEL_WINDOW_WIDTH, WIDTH, var->xres - 1)|
FIELD_VALUE(0, PANEL_WINDOW_WIDTH, X, var->xoffset));
POKE32(PANEL_WINDOW_HEIGHT,
@@ -389,7 +386,7 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc* crtc,
PANEL_DISPLAY_CTRL, FORMAT,
(var->bits_per_pixel >> 4)
));
- }else{
+ } else {
/* not implemented now */
POKE32(CRT_FB_ADDRESS, crtc->oScreen);
reg = var->xres * (var->bits_per_pixel >> 3);
@@ -412,138 +409,137 @@ exit:
return ret;
}
-void hw_sm750_crtc_clear(struct lynxfb_crtc* crtc)
+void hw_sm750_crtc_clear(struct lynxfb_crtc *crtc)
{
-
+
return;
}
-int hw_sm750_setColReg(struct lynxfb_crtc* crtc, ushort index,
+int hw_sm750_setColReg(struct lynxfb_crtc *crtc, ushort index,
ushort red, ushort green, ushort blue)
{
- static unsigned int add[]={PANEL_PALETTE_RAM, CRT_PALETTE_RAM};
+ static unsigned int add[] = {PANEL_PALETTE_RAM, CRT_PALETTE_RAM};
+
POKE32(add[crtc->channel] + index*4, (red<<16)|(green<<8)|blue);
return 0;
}
-int hw_sm750le_setBLANK(struct lynxfb_output * output, int blank){
+int hw_sm750le_setBLANK(struct lynxfb_output *output, int blank)
+{
int dpms, crtdb;
-
- switch(blank)
- {
+
+ switch (blank) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
- case FB_BLANK_UNBLANK:
+ case FB_BLANK_UNBLANK:
#else
- case VESA_NO_BLANKING:
+ case VESA_NO_BLANKING:
#endif
- dpms = CRT_DISPLAY_CTRL_DPMS_0;
- crtdb = CRT_DISPLAY_CTRL_BLANK_OFF;
- break;
+ dpms = CRT_DISPLAY_CTRL_DPMS_0;
+ crtdb = CRT_DISPLAY_CTRL_BLANK_OFF;
+ break;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
- case FB_BLANK_NORMAL:
- dpms = CRT_DISPLAY_CTRL_DPMS_0;
- crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
- break;
+ case FB_BLANK_NORMAL:
+ dpms = CRT_DISPLAY_CTRL_DPMS_0;
+ crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+ break;
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
- case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_VSYNC_SUSPEND:
#else
- case VESA_VSYNC_SUSPEND:
+ case VESA_VSYNC_SUSPEND:
#endif
- dpms = CRT_DISPLAY_CTRL_DPMS_2;
- crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
- break;
+ dpms = CRT_DISPLAY_CTRL_DPMS_2;
+ crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+ break;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
- case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
#else
- case VESA_HSYNC_SUSPEND:
+ case VESA_HSYNC_SUSPEND:
#endif
- dpms = CRT_DISPLAY_CTRL_DPMS_1;
- crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
- break;
+ dpms = CRT_DISPLAY_CTRL_DPMS_1;
+ crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+ break;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
- case FB_BLANK_POWERDOWN:
+ case FB_BLANK_POWERDOWN:
#else
- case VESA_POWERDOWN:
+ case VESA_POWERDOWN:
#endif
- dpms = CRT_DISPLAY_CTRL_DPMS_3;
- crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
- break;
- default:
- return -EINVAL;
+ dpms = CRT_DISPLAY_CTRL_DPMS_3;
+ crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+ break;
+ default:
+ return -EINVAL;
}
- if(output->paths & sm750_crt){
+ if (output->paths & sm750_crt) {
POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(PEEK32(CRT_DISPLAY_CTRL), CRT_DISPLAY_CTRL, DPMS, dpms));
POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(PEEK32(CRT_DISPLAY_CTRL), CRT_DISPLAY_CTRL, BLANK, crtdb));
}
return 0;
}
-int hw_sm750_setBLANK(struct lynxfb_output* output, int blank)
+int hw_sm750_setBLANK(struct lynxfb_output *output, int blank)
{
unsigned int dpms, pps, crtdb;
-
+
dpms = pps = crtdb = 0;
- switch (blank)
- {
+ switch (blank) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
- case FB_BLANK_UNBLANK:
+ case FB_BLANK_UNBLANK:
#else
- case VESA_NO_BLANKING:
+ case VESA_NO_BLANKING:
#endif
- pr_info("flag = FB_BLANK_UNBLANK \n");
- dpms = SYSTEM_CTRL_DPMS_VPHP;
- pps = PANEL_DISPLAY_CTRL_DATA_ENABLE;
- crtdb = CRT_DISPLAY_CTRL_BLANK_OFF;
- break;
+ pr_info("flag = FB_BLANK_UNBLANK\n");
+ dpms = SYSTEM_CTRL_DPMS_VPHP;
+ pps = PANEL_DISPLAY_CTRL_DATA_ENABLE;
+ crtdb = CRT_DISPLAY_CTRL_BLANK_OFF;
+ break;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
- case FB_BLANK_NORMAL:
- pr_info("flag = FB_BLANK_NORMAL \n");
- dpms = SYSTEM_CTRL_DPMS_VPHP;
- pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
- crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
- break;
+ case FB_BLANK_NORMAL:
+ pr_info("flag = FB_BLANK_NORMAL\n");
+ dpms = SYSTEM_CTRL_DPMS_VPHP;
+ pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
+ crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+ break;
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
- case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_VSYNC_SUSPEND:
#else
- case VESA_VSYNC_SUSPEND:
+ case VESA_VSYNC_SUSPEND:
#endif
- dpms = SYSTEM_CTRL_DPMS_VNHP;
- pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
- crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
- break;
+ dpms = SYSTEM_CTRL_DPMS_VNHP;
+ pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
+ crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+ break;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
- case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
#else
- case VESA_HSYNC_SUSPEND:
+ case VESA_HSYNC_SUSPEND:
#endif
- dpms = SYSTEM_CTRL_DPMS_VPHN;
- pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
- crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
- break;
+ dpms = SYSTEM_CTRL_DPMS_VPHN;
+ pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
+ crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+ break;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
- case FB_BLANK_POWERDOWN:
+ case FB_BLANK_POWERDOWN:
#else
- case VESA_POWERDOWN:
+ case VESA_POWERDOWN:
#endif
- dpms = SYSTEM_CTRL_DPMS_VNHN;
- pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
- crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
- break;
+ dpms = SYSTEM_CTRL_DPMS_VNHN;
+ pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
+ crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+ break;
}
- if(output->paths & sm750_crt){
+ if (output->paths & sm750_crt) {
POKE32(SYSTEM_CTRL, FIELD_VALUE(PEEK32(SYSTEM_CTRL), SYSTEM_CTRL, DPMS, dpms));
POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(PEEK32(CRT_DISPLAY_CTRL), CRT_DISPLAY_CTRL, BLANK, crtdb));
}
- if(output->paths & sm750_panel){
+ if (output->paths & sm750_panel)
POKE32(PANEL_DISPLAY_CTRL, FIELD_VALUE(PEEK32(PANEL_DISPLAY_CTRL), PANEL_DISPLAY_CTRL, DATA, pps));
- }
return 0;
}
@@ -552,9 +548,10 @@ int hw_sm750_setBLANK(struct lynxfb_output* output, int blank)
void hw_sm750_initAccel(struct lynx_share *share)
{
u32 reg;
+
enable2DEngine(1);
- if(getChipType() == SM750LE){
+ if (getChipType() == SM750LE) {
reg = PEEK32(DE_STATE1);
reg = FIELD_SET(reg, DE_STATE1, DE_ABORT, ON);
POKE32(DE_STATE1, reg);
@@ -563,7 +560,7 @@ void hw_sm750_initAccel(struct lynx_share *share)
reg = FIELD_SET(reg, DE_STATE1, DE_ABORT, OFF);
POKE32(DE_STATE1, reg);
- }else{
+ } else {
/* engine reset */
reg = PEEK32(SYSTEM_CTRL);
reg = FIELD_SET(reg, SYSTEM_CTRL, DE_ABORT, ON);
@@ -580,13 +577,14 @@ void hw_sm750_initAccel(struct lynx_share *share)
int hw_sm750le_deWait(void)
{
- int i=0x10000000;
- while(i--){
+ int i = 0x10000000;
+
+ while (i--) {
unsigned int dwVal = PEEK32(DE_STATE2);
- if((FIELD_GET(dwVal, DE_STATE2, DE_STATUS) == DE_STATE2_DE_STATUS_IDLE) &&
+
+ if ((FIELD_GET(dwVal, DE_STATE2, DE_STATUS) == DE_STATE2_DE_STATUS_IDLE) &&
(FIELD_GET(dwVal, DE_STATE2, DE_FIFO) == DE_STATE2_DE_FIFO_EMPTY) &&
- (FIELD_GET(dwVal, DE_STATE2, DE_MEM_FIFO) == DE_STATE2_DE_MEM_FIFO_EMPTY))
- {
+ (FIELD_GET(dwVal, DE_STATE2, DE_MEM_FIFO) == DE_STATE2_DE_MEM_FIFO_EMPTY)) {
return 0;
}
}
@@ -597,13 +595,14 @@ int hw_sm750le_deWait(void)
int hw_sm750_deWait(void)
{
- int i=0x10000000;
- while(i--){
+ int i = 0x10000000;
+
+ while (i--) {
unsigned int dwVal = PEEK32(SYSTEM_CTRL);
- if((FIELD_GET(dwVal, SYSTEM_CTRL, DE_STATUS) == SYSTEM_CTRL_DE_STATUS_IDLE) &&
+
+ if ((FIELD_GET(dwVal, SYSTEM_CTRL, DE_STATUS) == SYSTEM_CTRL_DE_STATUS_IDLE) &&
(FIELD_GET(dwVal, SYSTEM_CTRL, DE_FIFO) == SYSTEM_CTRL_DE_FIFO_EMPTY) &&
- (FIELD_GET(dwVal, SYSTEM_CTRL, DE_MEM_FIFO) == SYSTEM_CTRL_DE_MEM_FIFO_EMPTY))
- {
+ (FIELD_GET(dwVal, SYSTEM_CTRL, DE_MEM_FIFO) == SYSTEM_CTRL_DE_MEM_FIFO_EMPTY)) {
return 0;
}
}
@@ -612,28 +611,27 @@ int hw_sm750_deWait(void)
}
int hw_sm750_pan_display(struct lynxfb_crtc *crtc,
- const struct fb_var_screeninfo *var,
- const struct fb_info *info)
+ const struct fb_var_screeninfo *var,
+ const struct fb_info *info)
{
- uint32_t total;
- /* check params */
- if ((var->xoffset + var->xres > var->xres_virtual) ||
- (var->yoffset + var->yres > var->yres_virtual)) {
- return -EINVAL;
- }
-
- total = var->yoffset * info->fix.line_length +
- ((var->xoffset * var->bits_per_pixel) >> 3);
- total += crtc->oScreen;
- if (crtc->channel == sm750_primary) {
- POKE32(PANEL_FB_ADDRESS,
- FIELD_VALUE(PEEK32(PANEL_FB_ADDRESS),
- PANEL_FB_ADDRESS, ADDRESS, total));
- } else {
- POKE32(CRT_FB_ADDRESS,
- FIELD_VALUE(PEEK32(CRT_FB_ADDRESS),
- CRT_FB_ADDRESS, ADDRESS, total));
- }
- return 0;
-}
+ uint32_t total;
+ /* check params */
+ if ((var->xoffset + var->xres > var->xres_virtual) ||
+ (var->yoffset + var->yres > var->yres_virtual)) {
+ return -EINVAL;
+ }
+ total = var->yoffset * info->fix.line_length +
+ ((var->xoffset * var->bits_per_pixel) >> 3);
+ total += crtc->oScreen;
+ if (crtc->channel == sm750_primary) {
+ POKE32(PANEL_FB_ADDRESS,
+ FIELD_VALUE(PEEK32(PANEL_FB_ADDRESS),
+ PANEL_FB_ADDRESS, ADDRESS, total));
+ } else {
+ POKE32(CRT_FB_ADDRESS,
+ FIELD_VALUE(PEEK32(CRT_FB_ADDRESS),
+ CRT_FB_ADDRESS, ADDRESS, total));
+ }
+ return 0;
+}
diff --git a/drivers/staging/sm750fb/sm750_hw.h b/drivers/staging/sm750fb/sm750_hw.h
index 93288b3a99d8..3781a1a11c68 100644
--- a/drivers/staging/sm750fb/sm750_hw.h
+++ b/drivers/staging/sm750fb/sm750_hw.h
@@ -2,14 +2,14 @@
#define LYNX_HW750_H__
-#define DEFAULT_SM750_CHIP_CLOCK 290
-#define DEFAULT_SM750LE_CHIP_CLOCK 333
+#define DEFAULT_SM750_CHIP_CLOCK 290
+#define DEFAULT_SM750LE_CHIP_CLOCK 333
#ifndef SM750LE_REVISION_ID
#define SM750LE_REVISION_ID (unsigned char)0xfe
#endif
-enum sm750_pnltype{
+enum sm750_pnltype {
sm750_24TFT = 0,/* 24bit tft */
@@ -19,30 +19,30 @@ enum sm750_pnltype{
};
/* vga channel is not concerned */
-enum sm750_dataflow{
+enum sm750_dataflow {
sm750_simul_pri,/* primary => all head */
sm750_simul_sec,/* secondary => all head */
- sm750_dual_normal,/* primary => panel head and secondary => crt */
+ sm750_dual_normal,/* primary => panel head and secondary => crt */
- sm750_dual_swap,/* primary => crt head and secondary => panel */
+ sm750_dual_swap,/* primary => crt head and secondary => panel */
};
-enum sm750_channel{
+enum sm750_channel {
sm750_primary = 0,
/* enum value equal to the register filed data */
sm750_secondary = 1,
};
-enum sm750_path{
+enum sm750_path {
sm750_panel = 1,
sm750_crt = 2,
sm750_pnc = 3,/* panel and crt */
};
-struct init_status{
+struct init_status {
ushort powerMode;
/* below three clocks are in unit of MHZ*/
ushort chip_clk;
@@ -52,7 +52,7 @@ struct init_status{
ushort resetMemory;
};
-struct sm750_state{
+struct sm750_state {
struct init_status initParm;
enum sm750_pnltype pnltype;
enum sm750_dataflow dataflow;
@@ -61,24 +61,24 @@ struct sm750_state{
int yLCD;
};
-/* sm750_share stands for a presentation of two frame buffer
- that use one sm750 adaptor, it is similar to the super class of lynx_share
- in C++
-*/
+/* sm750_share stands for a presentation of two frame buffer
+ that use one sm750 adaptor, it is similar to the super class of lynx_share
+ in C++
+ */
-struct sm750_share{
+struct sm750_share {
/* it's better to put lynx_share struct to the first place of sm750_share */
struct lynx_share share;
struct sm750_state state;
int hwCursor;
- /* 0: no hardware cursor
- 1: primary crtc hw cursor enabled,
- 2: secondary crtc hw cursor enabled
- 3: both ctrc hw cursor enabled
+ /* 0: no hardware cursor
+ 1: primary crtc hw cursor enabled,
+ 2: secondary crtc hw cursor enabled
+ 3: both ctrc hw cursor enabled
*/
};
-int hw_sm750_map(struct lynx_share* share, struct pci_dev* pdev);
+int hw_sm750_map(struct lynx_share *share, struct pci_dev *pdev);
int hw_sm750_inithw(struct lynx_share*, struct pci_dev *);
void hw_sm750_initAccel(struct lynx_share *);
int hw_sm750_deWait(void);
@@ -92,10 +92,10 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc*, struct fb_var_screeninfo*, struct
int hw_sm750_setColReg(struct lynxfb_crtc*, ushort, ushort, ushort, ushort);
int hw_sm750_setBLANK(struct lynxfb_output*, int);
int hw_sm750le_setBLANK(struct lynxfb_output*, int);
-void hw_sm750_crtc_clear(struct lynxfb_crtc*);
-void hw_sm750_output_clear(struct lynxfb_output*);
+void hw_sm750_crtc_clear(struct lynxfb_crtc *);
+void hw_sm750_output_clear(struct lynxfb_output *);
int hw_sm750_pan_display(struct lynxfb_crtc *crtc,
- const struct fb_var_screeninfo *var,
- const struct fb_info *info);
+ const struct fb_var_screeninfo *var,
+ const struct fb_info *info);
#endif
diff --git a/drivers/staging/sm7xxfb/Kconfig b/drivers/staging/sm7xxfb/Kconfig
deleted file mode 100644
index e2922ae3a3ee..000000000000
--- a/drivers/staging/sm7xxfb/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-config FB_SM7XX
- tristate "Silicon Motion SM7XX framebuffer support"
- depends on FB && PCI
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- Frame buffer driver for the Silicon Motion SM710, SM712, SM721
- and SM722 chips.
-
- This driver is also available as a module. The module will be
- called sm7xxfb. If you want to compile it as a module, say M
- here and read <file:Documentation/kbuild/modules.txt>.
diff --git a/drivers/staging/sm7xxfb/Makefile b/drivers/staging/sm7xxfb/Makefile
deleted file mode 100644
index 48f471cf9f36..000000000000
--- a/drivers/staging/sm7xxfb/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_FB_SM7XX) += sm7xxfb.o
diff --git a/drivers/staging/sm7xxfb/TODO b/drivers/staging/sm7xxfb/TODO
deleted file mode 100644
index 7cb0b242f204..000000000000
--- a/drivers/staging/sm7xxfb/TODO
+++ /dev/null
@@ -1,12 +0,0 @@
-TODO:
-- Dual head support
-- 2D acceleration support
-- use kernel coding style
-- refine the code and remove unused code
-- move it to drivers/video/fbdev/sm7xxfb.c
-
-Please send any patches to
- Greg Kroah-Hartman <greg@kroah.com>
- Sudip Mukherjee <sudipm.mukherjee@gmail.com>
- Teddy Wang <teddy.wang@siliconmotion.com>
- Sudip Mukherjee <sudip@vectorindia.org>
diff --git a/drivers/staging/speakup/buffers.c b/drivers/staging/speakup/buffers.c
index d45c8afb041d..d4d45989b605 100644
--- a/drivers/staging/speakup/buffers.c
+++ b/drivers/staging/speakup/buffers.c
@@ -63,7 +63,8 @@ void synth_buffer_add(char ch)
{
if (!synth->alive) {
/* This makes sure that we won't stop TTYs if there is no synth
- * to restart them */
+ * to restart them
+ */
return;
}
if (synth_buffer_free() <= 100) {
diff --git a/drivers/staging/speakup/i18n.c b/drivers/staging/speakup/i18n.c
index 9ea16c5b4d6c..f061747546a6 100644
--- a/drivers/staging/speakup/i18n.c
+++ b/drivers/staging/speakup/i18n.c
@@ -1,5 +1,6 @@
/* Internationalization implementation. Includes definitions of English
- * string arrays, and the i18n pointer. */
+ * string arrays, and the i18n pointer.
+ */
#include <linux/slab.h> /* For kmalloc. */
#include <linux/ctype.h>
diff --git a/drivers/staging/speakup/i18n.h b/drivers/staging/speakup/i18n.h
index 326d086f9d5a..8fcce566653f 100644
--- a/drivers/staging/speakup/i18n.h
+++ b/drivers/staging/speakup/i18n.h
@@ -224,11 +224,11 @@ struct msg_group_t {
enum msg_index_t end;
};
-extern char *spk_msg_get(enum msg_index_t index);
-extern ssize_t spk_msg_set(enum msg_index_t index, char *text, size_t length);
-extern struct msg_group_t *spk_find_msg_group(const char *group_name);
-extern void spk_reset_msg_group(struct msg_group_t *group);
-extern void spk_initialize_msgs(void);
-extern void spk_free_user_msgs(void);
+char *spk_msg_get(enum msg_index_t index);
+ssize_t spk_msg_set(enum msg_index_t index, char *text, size_t length);
+struct msg_group_t *spk_find_msg_group(const char *group_name);
+void spk_reset_msg_group(struct msg_group_t *group);
+void spk_initialize_msgs(void);
+void spk_free_user_msgs(void);
#endif
diff --git a/drivers/staging/speakup/keyhelp.c b/drivers/staging/speakup/keyhelp.c
index 94756742136f..02d5c706aee7 100644
--- a/drivers/staging/speakup/keyhelp.c
+++ b/drivers/staging/speakup/keyhelp.c
@@ -165,7 +165,7 @@ int spk_handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key)
synth_printf("\n");
return 1;
}
- cur_item = letter_offsets[ch-'a'];
+ cur_item = letter_offsets[ch-'a'];
} else if (type == KT_CUR) {
if (ch == 0
&& (MSG_FUNCNAMES_START + cur_item + 1) <=
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index 0211df60004a..958add4839bc 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -240,7 +240,8 @@ static ssize_t keymap_show(struct kobject *kobj, struct kobj_attribute *attr,
cp += sprintf(cp, "%d, %d, %d,\n", KEY_MAP_VER, num_keys, nstates);
cp1 += 2; /* now pointing at shift states */
/* dump num_keys+1 as first row is shift states + flags,
- * each subsequent row is key + states */
+ * each subsequent row is key + states
+ */
for (n = 0; n <= num_keys; n++) {
for (i = 0; i <= nstates; i++) {
ch = *cp1++;
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 6c4f9a1ed07f..63c59bc89b04 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -128,7 +128,8 @@ static char *phonetic[] = {
/* array of 256 char pointers (one for each character description)
* initialized to default_chars and user selectable via
- * /proc/speakup/characters */
+ * /proc/speakup/characters
+ */
char *spk_characters[256];
char *spk_default_chars[256] = {
@@ -194,7 +195,8 @@ char *spk_default_chars[256] = {
/* array of 256 u_short (one for each character)
* initialized to default_chartab and user selectable via
- * /sys/module/speakup/parameters/chartab */
+ * /sys/module/speakup/parameters/chartab
+ */
u_short spk_chartab[256];
static u_short default_chartab[256] = {
@@ -540,7 +542,8 @@ static void say_next_char(struct vc_data *vc)
* see if there is a word starting on the next position to the right
* and return that word if it exists. If it does not exist it will
* move left to the beginning of any previous word on the line or the
- * beginning off the line whichever comes first.. */
+ * beginning off the line whichever comes first..
+ */
static u_long get_word(struct vc_data *vc)
{
@@ -1113,7 +1116,8 @@ static void spkup_write(const char *in_buf, int count)
* suppress multiple to get rid of long pauses and
* clear repeat count
* so if someone has
- * repeats on you don't get nothing repeated count */
+ * repeats on you don't get nothing repeated count
+ */
if (ch != old_ch)
synth_printf("%c", ch);
else
@@ -1509,7 +1513,8 @@ static void do_handle_cursor(struct vc_data *vc, u_char value, char up_flag)
if (spk_no_intr)
spk_do_flush();
/* the key press flushes if !no_inter but we want to flush on cursor
- * moves regardless of no_inter state */
+ * moves regardless of no_inter state
+ */
is_cursor = value + 1;
old_cursor_pos = vc->vc_pos;
old_cursor_x = vc->vc_x;
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index a0315701c7d9..98af3b1f2d2a 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -114,7 +114,8 @@ int speakup_set_selection(struct tty_struct *tty)
obp = bp;
if (!((i + 2) % vc->vc_size_row)) {
/* strip trailing blanks from line and add newline,
- unless non-space at end of line. */
+ * unless non-space at end of line.
+ */
if (obp != bp) {
bp = obp;
*bp++ = '\r';
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index 1d9d51bdf517..66ac999a0323 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -51,7 +51,8 @@ const struct old_serial_port *spk_serial_init(int index)
}
/* Disable UART interrupts, set DTR and RTS high
- * and set speed. */
+ * and set speed.
+ */
outb(cval | UART_LCR_DLAB, ser->port + UART_LCR); /* set DLAB */
outb(quot & 0xff, ser->port + UART_DLL); /* LS of divisor */
outb(quot >> 8, ser->port + UART_DLM); /* MS of divisor */
@@ -145,7 +146,8 @@ int spk_wait_for_xmitr(void)
synth->alive = 0;
/* No synth any more, so nobody will restart TTYs, and we thus
* need to do it ourselves. Now that there is no synth we can
- * let application flood anyway */
+ * let application flood anyway
+ */
speakup_start_ttys();
timeouts = 0;
return 0;
@@ -163,7 +165,8 @@ int spk_wait_for_xmitr(void)
/* CTS */
if (--tmout == 0) {
/* pr_warn("%s: timed out (cts)\n",
- * synth->long_name); */
+ * synth->long_name);
+ */
timeouts++;
return 0;
}
@@ -217,4 +220,3 @@ void spk_serial_release(void)
speakup_info.port_tts = 0;
}
EXPORT_SYMBOL_GPL(spk_serial_release);
-
diff --git a/drivers/staging/speakup/speakup.h b/drivers/staging/speakup/speakup.h
index a7f4962427f3..df74c912da72 100644
--- a/drivers/staging/speakup/speakup.h
+++ b/drivers/staging/speakup/speakup.h
@@ -42,46 +42,44 @@
#define IS_CHAR(x, type) (spk_chartab[((u_char)x)]&type)
#define IS_TYPE(x, type) ((spk_chartab[((u_char)x)]&type) == type)
-extern int speakup_thread(void *data);
-extern void spk_reset_default_chars(void);
-extern void spk_reset_default_chartab(void);
-extern void synth_start(void);
+int speakup_thread(void *data);
+void spk_reset_default_chars(void);
+void spk_reset_default_chartab(void);
+void synth_start(void);
void synth_insert_next_index(int sent_num);
void spk_reset_index_count(int sc);
void spk_get_index_count(int *linecount, int *sentcount);
-extern int spk_set_key_info(const u_char *key_info, u_char *k_buffer);
-extern char *spk_strlwr(char *s);
-extern char *spk_s2uchar(char *start, char *dest);
-extern int speakup_kobj_init(void);
-extern void speakup_kobj_exit(void);
-extern int spk_chartab_get_value(char *keyword);
-extern void speakup_register_var(struct var_t *var);
-extern void speakup_unregister_var(enum var_id_t var_id);
-extern struct st_var_header *spk_get_var_header(enum var_id_t var_id);
-extern struct st_var_header *spk_var_header_by_name(const char *name);
-extern struct punc_var_t *spk_get_punc_var(enum var_id_t var_id);
-extern int spk_set_num_var(int val, struct st_var_header *var, int how);
-extern int spk_set_string_var(const char *page, struct st_var_header *var,
- int len);
-extern int spk_set_mask_bits(const char *input, const int which, const int how);
+int spk_set_key_info(const u_char *key_info, u_char *k_buffer);
+char *spk_strlwr(char *s);
+char *spk_s2uchar(char *start, char *dest);
+int speakup_kobj_init(void);
+void speakup_kobj_exit(void);
+int spk_chartab_get_value(char *keyword);
+void speakup_register_var(struct var_t *var);
+void speakup_unregister_var(enum var_id_t var_id);
+struct st_var_header *spk_get_var_header(enum var_id_t var_id);
+struct st_var_header *spk_var_header_by_name(const char *name);
+struct punc_var_t *spk_get_punc_var(enum var_id_t var_id);
+int spk_set_num_var(int val, struct st_var_header *var, int how);
+int spk_set_string_var(const char *page, struct st_var_header *var, int len);
+int spk_set_mask_bits(const char *input, const int which, const int how);
extern special_func spk_special_handler;
-extern int spk_handle_help(struct vc_data *vc, u_char type, u_char ch,
- u_short key);
-extern int synth_init(char *name);
-extern void synth_release(void);
+int spk_handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key);
+int synth_init(char *name);
+void synth_release(void);
-extern void spk_do_flush(void);
-extern void speakup_start_ttys(void);
-extern void synth_buffer_add(char ch);
-extern void synth_buffer_clear(void);
-extern void speakup_clear_selection(void);
-extern int speakup_set_selection(struct tty_struct *tty);
-extern int speakup_paste_selection(struct tty_struct *tty);
-extern void speakup_cancel_paste(void);
-extern void speakup_register_devsynth(void);
-extern void speakup_unregister_devsynth(void);
-extern void synth_write(const char *buf, size_t count);
-extern int synth_supports_indexing(void);
+void spk_do_flush(void);
+void speakup_start_ttys(void);
+void synth_buffer_add(char ch);
+void synth_buffer_clear(void);
+void speakup_clear_selection(void);
+int speakup_set_selection(struct tty_struct *tty);
+int speakup_paste_selection(struct tty_struct *tty);
+void speakup_cancel_paste(void);
+void speakup_register_devsynth(void);
+void speakup_unregister_devsynth(void);
+void synth_write(const char *buf, size_t count);
+int synth_supports_indexing(void);
extern struct vc_data *spk_sel_cons;
extern unsigned short spk_xs, spk_ys, spk_xe, spk_ye; /* our region points */
diff --git a/drivers/staging/speakup/speakup_acnt.h b/drivers/staging/speakup/speakup_acnt.h
index 6376fca9e0e1..107ec1155f51 100644
--- a/drivers/staging/speakup/speakup_acnt.h
+++ b/drivers/staging/speakup/speakup_acnt.h
@@ -6,10 +6,12 @@
/* Port Status Flags */
#define SYNTH_READABLE 0x01 /* mask for bit which is nonzero if a
- byte can be read from the data port */
+ * byte can be read from the data port
+ */
#define SYNTH_WRITABLE 0x02 /* mask for RDY bit, which when set to
- 1, indicates the data port is ready
- to accept a byte of data. */
+ * 1, indicates the data port is ready
+ * to accept a byte of data.
+ */
#define SYNTH_QUIET 'S' /* synth is not speaking */
#define SYNTH_FULL 'F' /* synth is full. */
#define SYNTH_ALMOST_EMPTY 'M' /* synth has less than 2 seconds of text left */
diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
index 437e13a85943..4893fef3f894 100644
--- a/drivers/staging/speakup/speakup_decpc.c
+++ b/drivers/staging/speakup/speakup_decpc.c
@@ -88,8 +88,9 @@
#define CTRL_last_index 0x0b00 /* get last index spoken */
#define CTRL_io_priority 0x0c00 /* change i/o priority */
#define CTRL_free_mem 0x0d00 /* get free paragraphs on module */
-#define CTRL_get_lang 0x0e00 /* return bit mask of loaded
- * languages */
+#define CTRL_get_lang 0x0e00 /* return bit mask of loaded
+ * languages
+ */
#define CMD_test 0x2000 /* self-test request */
#define TEST_mask 0x0F00 /* isolate test field */
#define TEST_null 0x0000 /* no test requested */
@@ -500,4 +501,3 @@ MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for DECtalk PC synthesizers");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-
diff --git a/drivers/staging/speakup/speakup_dtlk.h b/drivers/staging/speakup/speakup_dtlk.h
index d951d18c5792..46d885fcfb20 100644
--- a/drivers/staging/speakup/speakup_dtlk.h
+++ b/drivers/staging/speakup/speakup_dtlk.h
@@ -4,31 +4,37 @@
#define SYNTH_CLEAR 0x18 /* stops speech */
/* TTS Port Status Flags */
#define TTS_READABLE 0x80 /* mask for bit which is nonzero if a
- byte can be read from the TTS port */
+ * byte can be read from the TTS port
+ */
#define TTS_SPEAKING 0x40 /* mask for SYNC bit, which is nonzero
- while DoubleTalk is producing
- output with TTS, PCM or CVSD
- synthesizers or tone generators
- (that is, all but LPC) */
+ * while DoubleTalk is producing
+ * output with TTS, PCM or CVSD
+ * synthesizers or tone generators
+ * (that is, all but LPC)
+ */
#define TTS_SPEAKING2 0x20 /* mask for SYNC2 bit,
- which falls to zero up to 0.4 sec
- before speech stops */
+ * which falls to zero up to 0.4 sec
+ * before speech stops
+ */
#define TTS_WRITABLE 0x10 /* mask for RDY bit, which when set to
- 1, indicates the TTS port is ready
- to accept a byte of data. The RDY
- bit goes zero 2-3 usec after
- writing, and goes 1 again 180-190
- usec later. */
+ * 1, indicates the TTS port is ready
+ * to accept a byte of data. The RDY
+ * bit goes zero 2-3 usec after
+ * writing, and goes 1 again 180-190
+ * usec later.
+ */
#define TTS_ALMOST_FULL 0x08 /* mask for AF bit: When set to 1,
- indicates that less than 300 bytes
- are available in the TTS input
- buffer. AF is always 0 in the PCM,
- TGN and CVSD modes. */
+ * indicates that less than 300 bytes
+ * are available in the TTS input
+ * buffer. AF is always 0 in the PCM,
+ * TGN and CVSD modes.
+ */
#define TTS_ALMOST_EMPTY 0x04 /* mask for AE bit: When set to 1,
- indicates that less than 300 bytes
- are remaining in DoubleTalk's input
- (TTS or PCM) buffer. AE is always 1
- in the TGN and CVSD modes. */
+ * indicates that less than 300 bytes
+ * are remaining in DoubleTalk's input
+ * (TTS or PCM) buffer. AE is always 1
+ * in the TGN and CVSD modes.
+ */
/* data returned by Interrogate command */
struct synth_settings {
@@ -45,10 +51,12 @@ struct synth_settings {
u_char ext_dict_loaded; /* 1=exception dictionary loaded */
u_char ext_dict_status; /* 1=exception dictionary enabled */
u_char free_ram; /* # pages (truncated) remaining for
- * text buffer */
+ * text buffer
+ */
u_char articulation; /* nA; 0-9 */
u_char reverb; /* nR; 0-9 */
u_char eob; /* 7Fh value indicating end of
- * parameter block */
+ * parameter block
+ */
u_char has_indexing; /* nonzero if indexing is implemented */
};
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index fb31bb95d83a..366358b600a1 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -356,4 +356,3 @@ MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>");
MODULE_DESCRIPTION("Speakup userspace software synthesizer support");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-
diff --git a/drivers/staging/speakup/thread.c b/drivers/staging/speakup/thread.c
index d95efb702fe4..90c383ee7c3f 100644
--- a/drivers/staging/speakup/thread.c
+++ b/drivers/staging/speakup/thread.c
@@ -48,7 +48,8 @@ int speakup_thread(void *data)
kd_mksound(our_sound.freq, our_sound.jiffies);
if (synth && synth->catch_up && synth->alive) {
/* It is up to the callee to take the lock, so that it
- * can sleep whenever it likes */
+ * can sleep whenever it likes
+ */
synth->catch_up(synth);
}
diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
index 1b0d1c08741f..75bf40c14c79 100644
--- a/drivers/staging/speakup/varhandlers.c
+++ b/drivers/staging/speakup/varhandlers.c
@@ -269,7 +269,8 @@ int spk_set_string_var(const char *page, struct st_var_header *var, int len)
/* spk_set_mask_bits sets or clears the punc/delim/repeat bits,
* if input is null uses the defaults.
* values for how: 0 clears bits of chars supplied,
- * 1 clears allk, 2 sets bits for chars */
+ * 1 clears allk, 2 sets bits for chars
+ */
int spk_set_mask_bits(const char *input, const int which, const int how)
{
u_char *cp;
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
index 0f524bb7b41d..1f9ba8beb061 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
@@ -1126,7 +1126,6 @@ MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table);
static struct i2c_driver synaptics_rmi4_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &synaptics_rmi4_dev_pm_ops,
},
.probe = synaptics_rmi4_probe,
diff --git a/drivers/staging/unisys/Kconfig b/drivers/staging/unisys/Kconfig
index 778f9d05f98a..624abe66c20c 100644
--- a/drivers/staging/unisys/Kconfig
+++ b/drivers/staging/unisys/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig UNISYSSPAR
bool "Unisys SPAR driver support"
- depends on X86_64
+ depends on X86_64 && !UML
select PCI
select ACPI
---help---
diff --git a/drivers/staging/unisys/include/channel_guid.h b/drivers/staging/unisys/include/channel_guid.h
index 706363fc3e9a..17cb499cb53c 100644
--- a/drivers/staging/unisys/include/channel_guid.h
+++ b/drivers/staging/unisys/include/channel_guid.h
@@ -17,35 +17,31 @@
* CHANNEL Guids
*/
-/* Used in IOChannel
- * {414815ed-c58c-11da-95a9-00e08161165f}
- */
+/* {414815ed-c58c-11da-95a9-00e08161165f} */
#define SPAR_VHBA_CHANNEL_PROTOCOL_UUID \
UUID_LE(0x414815ed, 0xc58c, 0x11da, \
0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
static const uuid_le spar_vhba_channel_protocol_uuid =
SPAR_VHBA_CHANNEL_PROTOCOL_UUID;
+#define SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR \
+ "414815ed-c58c-11da-95a9-00e08161165f"
-/* Used in IOChannel
- * {8cd5994d-c58e-11da-95a9-00e08161165f}
- */
+/* {8cd5994d-c58e-11da-95a9-00e08161165f} */
#define SPAR_VNIC_CHANNEL_PROTOCOL_UUID \
UUID_LE(0x8cd5994d, 0xc58e, 0x11da, \
0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
static const uuid_le spar_vnic_channel_protocol_uuid =
SPAR_VNIC_CHANNEL_PROTOCOL_UUID;
+#define SPAR_VNIC_CHANNEL_PROTOCOL_UUID_STR \
+ "8cd5994d-c58e-11da-95a9-00e08161165f"
-/* Used in IOChannel
- * {72120008-4AAB-11DC-8530-444553544200}
- */
+/* {72120008-4AAB-11DC-8530-444553544200} */
#define SPAR_SIOVM_UUID \
UUID_LE(0x72120008, 0x4AAB, 0x11DC, \
0x85, 0x30, 0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
static const uuid_le spar_siovm_uuid = SPAR_SIOVM_UUID;
-/* Used in visornoop/visornoop_main.c
- * {5b52c5ac-e5f5-4d42-8dff-429eaecd221f}
- */
+/* {5b52c5ac-e5f5-4d42-8dff-429eaecd221f} */
#define SPAR_CONTROLDIRECTOR_CHANNEL_PROTOCOL_UUID \
UUID_LE(0x5b52c5ac, 0xe5f5, 0x4d42, \
0x8d, 0xff, 0x42, 0x9e, 0xae, 0xcd, 0x22, 0x1f)
@@ -53,9 +49,7 @@ static const uuid_le spar_siovm_uuid = SPAR_SIOVM_UUID;
static const uuid_le spar_controldirector_channel_protocol_uuid =
SPAR_CONTROLDIRECTOR_CHANNEL_PROTOCOL_UUID;
-/* Used in visorchipset/visorchipset_main.c
- * {B4E79625-AEDE-4EAA-9E11-D3EDDCD4504C}
- */
+/* {b4e79625-aede-4eAA-9e11-D3eddcd4504c} */
#define SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID \
UUID_LE(0xb4e79625, 0xaede, 0x4eaa, \
0x9e, 0x11, 0xd3, 0xed, 0xdc, 0xd4, 0x50, 0x4c)
diff --git a/drivers/staging/unisys/include/visorbus.h b/drivers/staging/unisys/include/visorbus.h
index e4a21e42e868..9235536fa75f 100644
--- a/drivers/staging/unisys/include/visorbus.h
+++ b/drivers/staging/unisys/include/visorbus.h
@@ -113,7 +113,8 @@ struct visor_driver {
struct driver_attribute version_attr;
};
-#define to_visor_driver(x) container_of(x, struct visor_driver, driver)
+#define to_visor_driver(x) ((x) ? \
+ (container_of(x, struct visor_driver, driver)) : (NULL))
/** A device type for things "plugged" into the visorbus bus */
@@ -200,6 +201,8 @@ bool visorchannel_signalremove(struct visorchannel *channel, u32 queue,
void *msg);
bool visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
void *msg);
+bool visorchannel_signalempty(struct visorchannel *channel, u32 queue);
+
int visorchannel_signalqueue_slots_avail(struct visorchannel *channel,
u32 queue);
int visorchannel_signalqueue_max_slots(struct visorchannel *channel, u32 queue);
diff --git a/drivers/staging/unisys/visorbus/controlvmchannel.h b/drivers/staging/unisys/visorbus/controlvmchannel.h
index a50d9cf4bed7..ec25366b127c 100644
--- a/drivers/staging/unisys/visorbus/controlvmchannel.h
+++ b/drivers/staging/unisys/visorbus/controlvmchannel.h
@@ -1,10 +1,9 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
+/* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/drivers/staging/unisys/visorbus/controlvmcompletionstatus.h b/drivers/staging/unisys/visorbus/controlvmcompletionstatus.h
index f74f5d8c2820..3c97ebac4f32 100644
--- a/drivers/staging/unisys/visorbus/controlvmcompletionstatus.h
+++ b/drivers/staging/unisys/visorbus/controlvmcompletionstatus.h
@@ -1,12 +1,11 @@
/* controlvmcompletionstatus.c
*
- * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All Rights Reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/drivers/staging/unisys/visorbus/iovmcall_gnuc.h b/drivers/staging/unisys/visorbus/iovmcall_gnuc.h
index 57dd93e0cc83..b08b6ecc8d31 100644
--- a/drivers/staging/unisys/visorbus/iovmcall_gnuc.h
+++ b/drivers/staging/unisys/visorbus/iovmcall_gnuc.h
@@ -1,10 +1,9 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
+/* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/drivers/staging/unisys/visorbus/periodic_work.c b/drivers/staging/unisys/visorbus/periodic_work.c
index 5e56088cf855..a3631c3591f6 100644
--- a/drivers/staging/unisys/visorbus/periodic_work.c
+++ b/drivers/staging/unisys/visorbus/periodic_work.c
@@ -1,12 +1,11 @@
/* periodic_work.c
*
- * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/drivers/staging/unisys/visorbus/vbuschannel.h b/drivers/staging/unisys/visorbus/vbuschannel.h
index 5ed83a3f1428..80e64477e547 100644
--- a/drivers/staging/unisys/visorbus/vbuschannel.h
+++ b/drivers/staging/unisys/visorbus/vbuschannel.h
@@ -1,10 +1,9 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
+/* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/drivers/staging/unisys/visorbus/vbusdeviceinfo.h b/drivers/staging/unisys/visorbus/vbusdeviceinfo.h
index 9b6d3e69355c..f59fd8a523c4 100644
--- a/drivers/staging/unisys/visorbus/vbusdeviceinfo.h
+++ b/drivers/staging/unisys/visorbus/vbusdeviceinfo.h
@@ -1,10 +1,9 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
+/* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index 6db47196c189..2309f5f2b238 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -1,12 +1,11 @@
/* visorbus_main.c
*
- * Copyright � 2010 - 2013 UNISYS CORPORATION
+ * Copyright � 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -70,6 +69,38 @@ static const struct attribute_group *visorbus_bus_groups[] = {
NULL,
};
+/*
+ * DEVICE type attributes
+ *
+ * The modalias file will contain the guid of the device.
+ */
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct visor_device *vdev;
+ uuid_le guid;
+
+ vdev = to_visor_device(dev);
+ guid = visorchannel_get_uuid(vdev->visorchannel);
+ return snprintf(buf, PAGE_SIZE, "visorbus:%pUl\n", &guid);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *visorbus_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
+};
+
+/* sysfs example for bridge-only sysfs files using device_type's */
+static const struct attribute_group visorbus_dev_group = {
+ .attrs = visorbus_dev_attrs,
+};
+
+static const struct attribute_group *visorbus_dev_groups[] = {
+ &visorbus_dev_group,
+ NULL,
+};
+
/** This describes the TYPE of bus.
* (Don't confuse this with an INSTANCE of the bus.)
*/
@@ -77,6 +108,7 @@ struct bus_type visorbus_type = {
.name = "visorbus",
.match = visorbus_match,
.uevent = visorbus_uevent,
+ .dev_groups = visorbus_dev_groups,
.bus_groups = visorbus_bus_groups,
};
@@ -129,7 +161,13 @@ static LIST_HEAD(list_all_device_instances);
static int
visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env)
{
- if (add_uevent_var(env, "VERSION=%s", VERSION))
+ struct visor_device *dev;
+ uuid_le guid;
+
+ dev = to_visor_device(xdev);
+ guid = visorchannel_get_uuid(dev->visorchannel);
+
+ if (add_uevent_var(env, "MODALIAS=visorbus:%pUl", &guid))
return -ENOMEM;
return 0;
}
@@ -218,9 +256,9 @@ visorbus_release_device(struct device *xdev)
struct devmajorminor_attribute {
struct attribute attr;
int slot;
- ssize_t (*show)(struct visor_device *, int slot, char *buf);
- ssize_t (*store)(struct visor_device *, int slot, const char *buf,
- size_t count);
+ ssize_t (*show)(struct visor_device *, int slot, char *buf);
+ ssize_t (*store)(struct visor_device *, int slot, const char *buf,
+ size_t count);
};
static ssize_t DEVMAJORMINOR_ATTR(struct visor_device *dev, int slot, char *buf)
@@ -281,12 +319,11 @@ devmajorminor_create_file(struct visor_device *dev, const char *name,
rc = -ENOMEM;
goto away;
}
- myattr = kmalloc(sizeof(*myattr), GFP_KERNEL);
+ myattr = kzalloc(sizeof(*myattr), GFP_KERNEL);
if (!myattr) {
rc = -ENOMEM;
goto away;
}
- memset(myattr, 0, sizeof(struct devmajorminor_attribute));
myattr->show = DEVMAJORMINOR_ATTR;
myattr->store = NULL;
myattr->slot = slot;
@@ -471,6 +508,7 @@ static struct attribute *channel_attrs[] = {
&dev_attr_typeguid.attr,
&dev_attr_zoneguid.attr,
&dev_attr_typename.attr,
+ NULL
};
static struct attribute_group channel_attr_grp = {
@@ -478,7 +516,7 @@ static struct attribute_group channel_attr_grp = {
.attrs = channel_attrs,
};
-static const struct attribute_group *visorbus_dev_groups[] = {
+static const struct attribute_group *visorbus_channel_groups[] = {
&channel_attr_grp,
NULL
};
@@ -678,7 +716,7 @@ unregister_driver_attributes(struct visor_driver *drv)
static void
dev_periodic_work(void *xdev)
{
- struct visor_device *dev = (struct visor_device *)xdev;
+ struct visor_device *dev = xdev;
struct visor_driver *drv = to_visor_driver(dev->device.driver);
down(&dev->visordriver_callback_lock);
@@ -937,7 +975,7 @@ create_visor_device(struct visor_device *dev)
sema_init(&dev->visordriver_callback_lock, 1); /* unlocked */
dev->device.bus = &visorbus_type;
- dev->device.groups = visorbus_dev_groups;
+ dev->device.groups = visorbus_channel_groups;
device_initialize(&dev->device);
dev->device.release = visorbus_release_device;
/* keep a reference just for us (now 2) */
@@ -1043,10 +1081,10 @@ write_vbus_chp_info(struct visorchannel *chan,
int off = sizeof(struct channel_header) + hdr_info->chp_info_offset;
if (hdr_info->chp_info_offset == 0)
- return -1;
+ return -1;
if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
- return -1;
+ return -1;
return 0;
}
@@ -1061,10 +1099,10 @@ write_vbus_bus_info(struct visorchannel *chan,
int off = sizeof(struct channel_header) + hdr_info->bus_info_offset;
if (hdr_info->bus_info_offset == 0)
- return -1;
+ return -1;
if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
- return -1;
+ return -1;
return 0;
}
@@ -1081,10 +1119,10 @@ write_vbus_dev_info(struct visorchannel *chan,
(hdr_info->device_info_struct_bytes * devix);
if (hdr_info->dev_info_offset == 0)
- return -1;
+ return -1;
if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
- return -1;
+ return -1;
return 0;
}
@@ -1106,7 +1144,7 @@ fix_vbus_dev_info(struct visor_device *visordev)
struct spar_vbus_headerinfo *hdr_info;
if (!visordev->device.driver)
- return;
+ return;
hdr_info = (struct spar_vbus_headerinfo *)visordev->vbus_hdr_info;
if (!hdr_info)
@@ -1319,11 +1357,11 @@ static void
pause_state_change_complete(struct visor_device *dev, int status)
{
if (!dev->pausing)
- return;
+ return;
dev->pausing = false;
if (!chipset_responders.device_pause) /* this can never happen! */
- return;
+ return;
/* Notify the chipset driver that the pause is complete, which
* will presumably want to send some sort of response to the
@@ -1339,11 +1377,11 @@ static void
resume_state_change_complete(struct visor_device *dev, int status)
{
if (!dev->resuming)
- return;
+ return;
dev->resuming = false;
if (!chipset_responders.device_resume) /* this can never happen! */
- return;
+ return;
/* Notify the chipset driver that the resume is complete,
* which will presumably want to send some sort of response to
@@ -1367,14 +1405,14 @@ initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause)
else
notify_func = chipset_responders.device_resume;
if (!notify_func)
- goto away;
+ goto away;
drv = to_visor_driver(dev->device.driver);
if (!drv)
- goto away;
+ goto away;
if (dev->pausing || dev->resuming)
- goto away;
+ goto away;
/* Note that even though both drv->pause() and drv->resume
* specify a callback function, it is NOT necessary for us to
@@ -1385,7 +1423,7 @@ initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause)
*/
if (is_pause) {
if (!drv->pause)
- goto away;
+ goto away;
dev->pausing = true;
x = drv->pause(dev, pause_state_change_complete);
@@ -1397,7 +1435,7 @@ initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause)
* would never even get here in that case. */
fix_vbus_dev_info(dev);
if (!drv->resume)
- goto away;
+ goto away;
dev->resuming = true;
x = drv->resume(dev, resume_state_change_complete);
@@ -1413,7 +1451,7 @@ initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause)
away:
if (rc < 0) {
if (notify_func)
- (*notify_func)(dev, rc);
+ (*notify_func)(dev, rc);
}
}
@@ -1469,8 +1507,8 @@ visorbus_init(void)
away:
if (rc)
- POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
- POSTCODE_SEVERITY_ERR);
+ POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
+ POSTCODE_SEVERITY_ERR);
return rc;
}
@@ -1495,9 +1533,8 @@ visorbus_exit(void)
list_for_each_safe(listentry, listtmp, &list_all_bus_instances) {
struct visor_device *dev = list_entry(listentry,
- struct
- visor_device,
- list_all);
+ struct visor_device,
+ list_all);
remove_bus_instance(dev);
}
remove_bus_type();
diff --git a/drivers/staging/unisys/visorbus/visorbus_private.h b/drivers/staging/unisys/visorbus/visorbus_private.h
index 2f12483e38ab..39edd2018453 100644
--- a/drivers/staging/unisys/visorbus/visorbus_private.h
+++ b/drivers/staging/unisys/visorbus/visorbus_private.h
@@ -1,12 +1,11 @@
/* visorchipset.h
*
- * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
index 20b63496e9f2..2693c46afdc0 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -1,12 +1,11 @@
/* visorchannel_funcs.c
*
- * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,6 +20,7 @@
*/
#include <linux/uuid.h>
+#include <linux/io.h>
#include "version.h"
#include "visorbus.h"
@@ -36,7 +36,7 @@ static const uuid_le spar_video_guid = SPAR_CONSOLEVIDEO_CHANNEL_PROTOCOL_GUID;
struct visorchannel {
u64 physaddr;
ulong nbytes;
- void __iomem *mapped;
+ void *mapped;
bool requested;
struct channel_header chan_hdr;
uuid_le guid;
@@ -93,7 +93,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
}
}
- channel->mapped = ioremap_cache(physaddr, size);
+ channel->mapped = memremap(physaddr, size, MEMREMAP_WB);
if (!channel->mapped) {
release_mem_region(physaddr, size);
goto cleanup;
@@ -113,7 +113,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
if (uuid_le_cmp(guid, NULL_UUID_LE) == 0)
guid = channel->chan_hdr.chtype;
- iounmap(channel->mapped);
+ memunmap(channel->mapped);
if (channel->requested)
release_mem_region(channel->physaddr, channel->nbytes);
channel->mapped = NULL;
@@ -126,7 +126,8 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
}
}
- channel->mapped = ioremap_cache(channel->physaddr, channel_bytes);
+ channel->mapped = memremap(channel->physaddr, channel_bytes,
+ MEMREMAP_WB);
if (!channel->mapped) {
release_mem_region(channel->physaddr, channel_bytes);
goto cleanup;
@@ -167,7 +168,7 @@ visorchannel_destroy(struct visorchannel *channel)
if (!channel)
return;
if (channel->mapped) {
- iounmap(channel->mapped);
+ memunmap(channel->mapped);
if (channel->requested)
release_mem_region(channel->physaddr, channel->nbytes);
}
@@ -241,7 +242,7 @@ visorchannel_read(struct visorchannel *channel, ulong offset,
if (offset + nbytes > channel->nbytes)
return -EIO;
- memcpy_fromio(local, channel->mapped + offset, nbytes);
+ memcpy(local, channel->mapped + offset, nbytes);
return 0;
}
@@ -259,10 +260,11 @@ visorchannel_write(struct visorchannel *channel, ulong offset,
if (offset < chdr_size) {
copy_size = min(chdr_size - offset, nbytes);
- memcpy(&channel->chan_hdr + offset, local, copy_size);
+ memcpy(((char *)(&channel->chan_hdr)) + offset,
+ local, copy_size);
}
- memcpy_toio(channel->mapped + offset, local, nbytes);
+ memcpy(channel->mapped + offset, local, nbytes);
return 0;
}
@@ -416,11 +418,12 @@ bool
visorchannel_signalremove(struct visorchannel *channel, u32 queue, void *msg)
{
bool rc;
+ unsigned long flags;
if (channel->needs_lock) {
- spin_lock(&channel->remove_lock);
+ spin_lock_irqsave(&channel->remove_lock, flags);
rc = signalremove_inner(channel, queue, msg);
- spin_unlock(&channel->remove_lock);
+ spin_unlock_irqrestore(&channel->remove_lock, flags);
} else {
rc = signalremove_inner(channel, queue, msg);
}
@@ -429,6 +432,27 @@ visorchannel_signalremove(struct visorchannel *channel, u32 queue, void *msg)
}
EXPORT_SYMBOL_GPL(visorchannel_signalremove);
+bool
+visorchannel_signalempty(struct visorchannel *channel, u32 queue)
+{
+ unsigned long flags = 0;
+ struct signal_queue_header sig_hdr;
+ bool rc = false;
+
+ if (channel->needs_lock)
+ spin_lock_irqsave(&channel->remove_lock, flags);
+
+ if (!sig_read_header(channel, queue, &sig_hdr))
+ rc = true;
+ if (sig_hdr.head == sig_hdr.tail)
+ rc = true;
+ if (channel->needs_lock)
+ spin_unlock_irqrestore(&channel->remove_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visorchannel_signalempty);
+
static bool
signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg)
{
@@ -470,11 +494,12 @@ bool
visorchannel_signalinsert(struct visorchannel *channel, u32 queue, void *msg)
{
bool rc;
+ unsigned long flags;
if (channel->needs_lock) {
- spin_lock(&channel->insert_lock);
+ spin_lock_irqsave(&channel->insert_lock, flags);
rc = signalinsert_inner(channel, queue, msg);
- spin_unlock(&channel->insert_lock);
+ spin_unlock_irqrestore(&channel->insert_lock, flags);
} else {
rc = signalinsert_inner(channel, queue, msg);
}
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index bb8087e70127..94419c36d2af 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -1,12 +1,11 @@
/* visorchipset_main.c
*
- * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -119,7 +118,7 @@ static struct visorchannel *controlvm_channel;
/* Manages the request payload in the controlvm channel */
struct visor_controlvm_payload_info {
- u8 __iomem *ptr; /* pointer to base address of payload pool */
+ u8 *ptr; /* pointer to base address of payload pool */
u64 offset; /* offset from beginning of controlvm
* channel to beginning of payload * pool */
u32 bytes; /* number of bytes in payload pool */
@@ -401,21 +400,22 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
p = __va((unsigned long) (addr));
memcpy(ctx->data, p, bytes);
} else {
- void __iomem *mapping;
+ void *mapping;
if (!request_mem_region(addr, bytes, "visorchipset")) {
rc = NULL;
goto cleanup;
}
- mapping = ioremap_cache(addr, bytes);
+ mapping = memremap(addr, bytes, MEMREMAP_WB);
if (!mapping) {
release_mem_region(addr, bytes);
rc = NULL;
goto cleanup;
}
- memcpy_fromio(ctx->data, mapping, bytes);
+ memcpy(ctx->data, mapping, bytes);
release_mem_region(addr, bytes);
+ memunmap(mapping);
}
ctx->byte_stream = true;
@@ -1247,10 +1247,11 @@ my_device_create(struct controlvm_message *inmsg)
POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
POSTCODE_SEVERITY_INFO);
- visorchannel = visorchannel_create(cmd->create_device.channel_addr,
- cmd->create_device.channel_bytes,
- GFP_KERNEL,
- cmd->create_device.data_type_uuid);
+ visorchannel =
+ visorchannel_create_with_lock(cmd->create_device.channel_addr,
+ cmd->create_device.channel_bytes,
+ GFP_KERNEL,
+ cmd->create_device.data_type_uuid);
if (!visorchannel) {
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
@@ -1327,7 +1328,7 @@ static int
initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
struct visor_controlvm_payload_info *info)
{
- u8 __iomem *payload = NULL;
+ u8 *payload = NULL;
int rc = CONTROLVM_RESP_SUCCESS;
if (!info) {
@@ -1339,7 +1340,7 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
goto cleanup;
}
- payload = ioremap_cache(phys_addr + offset, bytes);
+ payload = memremap(phys_addr + offset, bytes, MEMREMAP_WB);
if (!payload) {
rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
goto cleanup;
@@ -1352,7 +1353,7 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
cleanup:
if (rc < 0) {
if (payload) {
- iounmap(payload);
+ memunmap(payload);
payload = NULL;
}
}
@@ -1363,7 +1364,7 @@ static void
destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
{
if (info->ptr) {
- iounmap(info->ptr);
+ memunmap(info->ptr);
info->ptr = NULL;
}
memset(info, 0, sizeof(struct visor_controlvm_payload_info));
@@ -2047,6 +2048,7 @@ device_create_response(struct visor_device *dev_info, int response)
response);
kfree(dev_info->pending_msg_hdr);
+ dev_info->pending_msg_hdr = NULL;
}
static void
@@ -2381,6 +2383,9 @@ static struct acpi_driver unisys_acpi_driver = {
.remove = visorchipset_exit,
},
};
+
+MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
+
static __init uint32_t visorutil_spar_detect(void)
{
unsigned int eax, ebx, ecx, edx;
diff --git a/drivers/staging/unisys/visorbus/vmcallinterface.h b/drivers/staging/unisys/visorbus/vmcallinterface.h
index 7a53df00726a..7abd27a618f8 100644
--- a/drivers/staging/unisys/visorbus/vmcallinterface.h
+++ b/drivers/staging/unisys/visorbus/vmcallinterface.h
@@ -1,10 +1,9 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
+/* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 710074437737..8c9da7ea7845 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1,10 +1,9 @@
/* Copyright (c) 2012 - 2015 UNISYS CORPORATION
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -20,15 +19,16 @@
*/
#include <linux/debugfs.h>
-#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
+#include <linux/netdevice.h>
#include <linux/kthread.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
#include "visorbus.h"
#include "iochannel.h"
-#define VISORNIC_INFINITE_RESPONSE_WAIT 0
+#define VISORNIC_INFINITE_RSP_WAIT 0
#define VISORNICSOPENMAX 32
#define MAXDEVICES 16384
@@ -61,7 +61,6 @@ static const struct file_operations debugfs_enable_ints_fops = {
.write = enable_ints_write,
};
-static struct workqueue_struct *visornic_serverdown_workqueue;
static struct workqueue_struct *visornic_timeout_reset_workqueue;
/* GUIDS for director channel type supported by this driver. */
@@ -72,6 +71,15 @@ static struct visor_channeltype_descriptor visornic_channel_types[] = {
{ SPAR_VNIC_CHANNEL_PROTOCOL_UUID, "ultravnic" },
{ NULL_UUID_LE, NULL }
};
+MODULE_DEVICE_TABLE(visorbus, visornic_channel_types);
+/*
+ * FIXME XXX: This next line of code must be fixed and removed before
+ * acceptance into the 'normal' part of the kernel. It is only here as a place
+ * holder to get module autoloading functionality working for visorbus. Code
+ * must be added to scripts/mode/file2alias.c, etc., to get this working
+ * properly.
+ */
+MODULE_ALIAS("visorbus:" SPAR_VNIC_CHANNEL_PROTOCOL_UUID_STR);
/* This is used to tell the visor bus driver which types of visor devices
* we support, and what functions to call when a visor device that we support
@@ -90,12 +98,6 @@ static struct visor_driver visornic_driver = {
.channel_interrupt = NULL,
};
-struct visor_thread_info {
- struct task_struct *task;
- struct completion has_stopped;
- int id;
-};
-
struct chanstat {
unsigned long got_rcv;
unsigned long got_enbdisack;
@@ -104,6 +106,7 @@ struct chanstat {
unsigned long sent_enbdis;
unsigned long sent_promisc;
unsigned long sent_post;
+ unsigned long sent_post_failed;
unsigned long sent_xmit;
unsigned long reject_count;
unsigned long extra_rcvbufs_sent;
@@ -111,7 +114,6 @@ struct chanstat {
struct visornic_devdata {
int devnum;
- int thread_wait_ms;
unsigned short enabled; /* 0 disabled 1 enabled to receive */
unsigned short enab_dis_acked; /* NET_RCV_ENABLE/DISABLE acked by
* IOPART
@@ -119,7 +121,6 @@ struct visornic_devdata {
struct visor_device *dev;
char name[99];
struct list_head list_all; /* < link within list_all_devices list */
- struct kref kref;
struct net_device *netdev;
struct net_device_stats net_stats;
atomic_t interrupt_rcvd;
@@ -137,20 +138,21 @@ struct visornic_devdata {
atomic_t num_rcvbuf_in_iovm;
unsigned long alloc_failed_in_if_needed_cnt;
unsigned long alloc_failed_in_repost_rtn_cnt;
- int max_outstanding_net_xmits; /* absolute max number of outstanding
- * xmits - should never hit this
- */
- int upper_threshold_net_xmits; /* high water mark for calling
- * netif_stop_queue()
- */
- int lower_threshold_net_xmits; /* high water mark for calling
- * netif_wake_queue()
- */
+ unsigned long max_outstanding_net_xmits; /* absolute max number of
+ * outstanding xmits - should
+ * never hit this
+ */
+ unsigned long upper_threshold_net_xmits; /* high water mark for
+ * calling netif_stop_queue()
+ */
+ unsigned long lower_threshold_net_xmits; /* high water mark for calling
+ * netif_wake_queue()
+ */
struct sk_buff_head xmitbufhead; /* xmitbufhead is the head of the
* xmit buffer list that have been
* sent to the IOPART end
*/
- struct work_struct serverdown_completion;
+ visorbus_state_complete_func server_down_complete_func;
struct work_struct timeout_reset;
struct uiscmdrsp *cmdrsp_rcv; /* cmdrsp_rcv is used for
* posting/unposting rcv buffers
@@ -161,8 +163,8 @@ struct visornic_devdata {
*/
bool server_down; /* IOPART is down */
bool server_change_state; /* Processing SERVER_CHANGESTATE msg */
+ bool going_away; /* device is being torn down */
struct dentry *eth_debugfs_dir;
- struct visor_thread_info threadinfo;
u64 interrupts_rcvd;
u64 interrupts_notme;
u64 interrupts_disabled;
@@ -194,16 +196,19 @@ struct visornic_devdata {
int queuefullmsg_logged;
struct chanstat chstat;
+ struct timer_list irq_poll_timer;
+ struct napi_struct napi;
+ struct uiscmdrsp cmdrsp[SIZEOF_CMDRSP];
};
-/* array of open devices maintained by open() and close() */
-static struct net_device *num_visornic_open[VISORNICSOPENMAX];
/* List of all visornic_devdata structs,
* linked via the list_all member
*/
static LIST_HEAD(list_all_devices);
static DEFINE_SPINLOCK(lock_all_devices);
+static int visornic_poll(struct napi_struct *napi, int budget);
+static void poll_for_irq(unsigned long v);
/**
* visor_copy_fragsinfo_from_skb(
@@ -223,9 +228,25 @@ visor_copy_fragsinfo_from_skb(struct sk_buff *skb, unsigned int firstfraglen,
struct phys_info frags[])
{
unsigned int count = 0, ii, size, offset = 0, numfrags;
+ unsigned int total_count;
numfrags = skb_shinfo(skb)->nr_frags;
+ /*
+ * Compute the number of fragments this skb has, and if its more than
+ * frag array can hold, linearize the skb
+ */
+ total_count = numfrags + (firstfraglen / PI_PAGE_SIZE);
+ if (firstfraglen % PI_PAGE_SIZE)
+ total_count++;
+
+ if (total_count > frags_max) {
+ if (skb_linearize(skb))
+ return -EINVAL;
+ numfrags = skb_shinfo(skb)->nr_frags;
+ firstfraglen = 0;
+ }
+
while (firstfraglen) {
if (count == frags_max)
return -EINVAL;
@@ -256,8 +277,16 @@ visor_copy_fragsinfo_from_skb(struct sk_buff *skb, unsigned int firstfraglen,
page_offset,
skb_shinfo(skb)->frags[ii].
size, count, frags_max, frags);
- if (!count)
- return -EIO;
+ /*
+ * add_physinfo_entries only returns
+ * zero if the frags array is out of room
+ * That should never happen because we
+ * fail above, if count+numfrags > frags_max.
+ * Given that theres no recovery mechanism from putting
+ * half a packet in the I/O channel, panic here as this
+ * should never happen
+ */
+ BUG_ON(!count);
}
}
if (skb_shinfo(skb)->frag_list) {
@@ -279,222 +308,15 @@ visor_copy_fragsinfo_from_skb(struct sk_buff *skb, unsigned int firstfraglen,
return count;
}
-/**
- * visort_thread_start - starts thread for the device
- * @thrinfo: The thread to start
- * @threadfn: Function the thread starts
- * @thrcontext: Context to pass to the thread, i.e. devdata
- * @name: string describing name of thread
- *
- * Starts a thread for the device, currently only thread is
- * process_incoming_rsps
- * Returns 0 on success;
- */
-static int visor_thread_start(struct visor_thread_info *thrinfo,
- int (*threadfn)(void *),
- void *thrcontext, char *name)
-{
- /* used to stop the thread */
- init_completion(&thrinfo->has_stopped);
- thrinfo->task = kthread_run(threadfn, thrcontext, name);
- if (IS_ERR(thrinfo->task)) {
- thrinfo->id = 0;
- return -EINVAL;
- }
- thrinfo->id = thrinfo->task->pid;
- return 0;
-}
-
-/**
- * visor_thread_stop - stop a thread for the device
- * @thrinfo: The thread to stop
- *
- * Stop the thread and wait for completion for a minute
- * Returns void.
- */
-static void visor_thread_stop(struct visor_thread_info *thrinfo)
-{
- if (!thrinfo->id)
- return; /* thread not running */
-
- kthread_stop(thrinfo->task);
- /* give up if the thread has NOT died in 1 minute */
- if (wait_for_completion_timeout(&thrinfo->has_stopped, 60 * HZ))
- thrinfo->id = 0;
-}
-
-/* DebugFS code */
-static ssize_t info_debugfs_read(struct file *file, char __user *buf,
- size_t len, loff_t *offset)
-{
- int i;
- ssize_t bytes_read = 0;
- int str_pos = 0;
- struct visornic_devdata *devdata;
- char *vbuf;
-
- if (len > MAX_BUF)
- len = MAX_BUF;
- vbuf = kzalloc(len, GFP_KERNEL);
- if (!vbuf)
- return -ENOMEM;
-
- /* for each vnic channel
- * dump out channel specific data
- */
- for (i = 0; i < VISORNICSOPENMAX; i++) {
- if (!num_visornic_open[i])
- continue;
-
- devdata = netdev_priv(num_visornic_open[i]);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- "Vnic i = %d\n", i);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- "netdev = %s (0x%p), MAC Addr %pM\n",
- num_visornic_open[i]->name,
- num_visornic_open[i],
- num_visornic_open[i]->dev_addr);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- "VisorNic Dev Info = 0x%p\n", devdata);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " num_rcv_bufs = %d\n",
- devdata->num_rcv_bufs);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " max_oustanding_next_xmits = %d\n",
- devdata->max_outstanding_net_xmits);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " upper_threshold_net_xmits = %d\n",
- devdata->upper_threshold_net_xmits);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " lower_threshold_net_xmits = %d\n",
- devdata->lower_threshold_net_xmits);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " queuefullmsg_logged = %d\n",
- devdata->queuefullmsg_logged);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.got_rcv = %lu\n",
- devdata->chstat.got_rcv);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.got_enbdisack = %lu\n",
- devdata->chstat.got_enbdisack);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.got_xmit_done = %lu\n",
- devdata->chstat.got_xmit_done);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.xmit_fail = %lu\n",
- devdata->chstat.xmit_fail);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.sent_enbdis = %lu\n",
- devdata->chstat.sent_enbdis);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.sent_promisc = %lu\n",
- devdata->chstat.sent_promisc);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.sent_post = %lu\n",
- devdata->chstat.sent_post);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.sent_xmit = %lu\n",
- devdata->chstat.sent_xmit);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.reject_count = %lu\n",
- devdata->chstat.reject_count);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " chstat.extra_rcvbufs_sent = %lu\n",
- devdata->chstat.extra_rcvbufs_sent);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " n_rcv0 = %lu\n", devdata->n_rcv0);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " n_rcv1 = %lu\n", devdata->n_rcv1);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " n_rcv2 = %lu\n", devdata->n_rcv2);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " n_rcvx = %lu\n", devdata->n_rcvx);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " num_rcvbuf_in_iovm = %d\n",
- atomic_read(&devdata->num_rcvbuf_in_iovm));
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " alloc_failed_in_if_needed_cnt = %lu\n",
- devdata->alloc_failed_in_if_needed_cnt);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " alloc_failed_in_repost_rtn_cnt = %lu\n",
- devdata->alloc_failed_in_repost_rtn_cnt);
- /* str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- * " inner_loop_limit_reached_cnt = %lu\n",
- * devdata->inner_loop_limit_reached_cnt);
- */
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " found_repost_rcvbuf_cnt = %lu\n",
- devdata->found_repost_rcvbuf_cnt);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " repost_found_skb_cnt = %lu\n",
- devdata->repost_found_skb_cnt);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " n_repost_deficit = %lu\n",
- devdata->n_repost_deficit);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " bad_rcv_buf = %lu\n",
- devdata->bad_rcv_buf);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " n_rcv_packets_not_accepted = %lu\n",
- devdata->n_rcv_packets_not_accepted);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " interrupts_rcvd = %llu\n",
- devdata->interrupts_rcvd);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " interrupts_notme = %llu\n",
- devdata->interrupts_notme);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " interrupts_disabled = %llu\n",
- devdata->interrupts_disabled);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " busy_cnt = %llu\n",
- devdata->busy_cnt);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " flow_control_upper_hits = %llu\n",
- devdata->flow_control_upper_hits);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " flow_control_lower_hits = %llu\n",
- devdata->flow_control_lower_hits);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " thread_wait_ms = %d\n",
- devdata->thread_wait_ms);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- " netif_queue = %s\n",
- netif_queue_stopped(devdata->netdev) ?
- "stopped" : "running");
- }
- bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
- kfree(vbuf);
- return bytes_read;
-}
-
static ssize_t enable_ints_write(struct file *file,
const char __user *buffer,
size_t count, loff_t *ppos)
{
- char buf[4];
- int i, new_value;
- struct visornic_devdata *devdata;
-
- if (count >= ARRAY_SIZE(buf))
- return -EINVAL;
-
- buf[count] = '\0';
- if (copy_from_user(buf, buffer, count))
- return -EFAULT;
-
- i = kstrtoint(buf, 10, &new_value);
- if (i != 0)
- return -EFAULT;
-
- /* set all counts to new_value usually 0 */
- for (i = 0; i < VISORNICSOPENMAX; i++) {
- if (num_visornic_open[i]) {
- devdata = netdev_priv(num_visornic_open[i]);
- /* TODO update features bit in channel */
- }
- }
-
+ /*
+ * Don't want to break ABI here by having a debugfs
+ * file that no longer exists or is writable, so
+ * lets just make this a vestigual function
+ */
return count;
}
@@ -509,44 +331,29 @@ static ssize_t enable_ints_write(struct file *file,
* Returns void.
*/
static void
-visornic_serverdown_complete(struct work_struct *work)
+visornic_serverdown_complete(struct visornic_devdata *devdata)
{
- struct visornic_devdata *devdata;
struct net_device *netdev;
- unsigned long flags;
- int i = 0, count = 0;
- devdata = container_of(work, struct visornic_devdata,
- serverdown_completion);
netdev = devdata->netdev;
- /* Stop using datachan */
- visor_thread_stop(&devdata->threadinfo);
-
- /* Inform Linux that the link is down */
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
+ /* Stop polling for interrupts */
+ del_timer_sync(&devdata->irq_poll_timer);
- /* Free the skb for XMITs that haven't been serviced by the server
- * We shouldn't have to inform Linux about these IOs because they
- * are "lost in the ethernet"
- */
- skb_queue_purge(&devdata->xmitbufhead);
+ rtnl_lock();
+ dev_close(netdev);
+ rtnl_unlock();
- spin_lock_irqsave(&devdata->priv_lock, flags);
- /* free rcv buffers */
- for (i = 0; i < devdata->num_rcv_bufs; i++) {
- if (devdata->rcvbuf[i]) {
- kfree_skb(devdata->rcvbuf[i]);
- devdata->rcvbuf[i] = NULL;
- count++;
- }
- }
atomic_set(&devdata->num_rcvbuf_in_iovm, 0);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
+ devdata->chstat.sent_xmit = 0;
+ devdata->chstat.got_xmit_done = 0;
+
+ if (devdata->server_down_complete_func)
+ (*devdata->server_down_complete_func)(devdata->dev, 0);
devdata->server_down = true;
devdata->server_change_state = false;
+ devdata->server_down_complete_func = NULL;
}
/**
@@ -558,15 +365,31 @@ visornic_serverdown_complete(struct work_struct *work)
* Returns 0 if we scheduled the work, -EINVAL on error.
*/
static int
-visornic_serverdown(struct visornic_devdata *devdata)
+visornic_serverdown(struct visornic_devdata *devdata,
+ visorbus_state_complete_func complete_func)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&devdata->priv_lock, flags);
if (!devdata->server_down && !devdata->server_change_state) {
+ if (devdata->going_away) {
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+ dev_dbg(&devdata->dev->device,
+ "%s aborting because device removal pending\n",
+ __func__);
+ return -ENODEV;
+ }
devdata->server_change_state = true;
- queue_work(visornic_serverdown_workqueue,
- &devdata->serverdown_completion);
+ devdata->server_down_complete_func = complete_func;
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+ visornic_serverdown_complete(devdata);
} else if (devdata->server_change_state) {
+ dev_dbg(&devdata->dev->device, "%s changing state\n",
+ __func__);
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
return -EINVAL;
- }
+ } else
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
return 0;
}
@@ -625,11 +448,14 @@ post_skb(struct uiscmdrsp *cmdrsp,
if ((cmdrsp->net.rcvpost.frag.pi_off + skb->len) <= PI_PAGE_SIZE) {
cmdrsp->net.type = NET_RCV_POST;
cmdrsp->cmdtype = CMD_NET_TYPE;
- visorchannel_signalinsert(devdata->dev->visorchannel,
+ if (visorchannel_signalinsert(devdata->dev->visorchannel,
IOCHAN_TO_IOPART,
- cmdrsp);
- atomic_inc(&devdata->num_rcvbuf_in_iovm);
- devdata->chstat.sent_post++;
+ cmdrsp)) {
+ atomic_inc(&devdata->num_rcvbuf_in_iovm);
+ devdata->chstat.sent_post++;
+ } else {
+ devdata->chstat.sent_post_failed++;
+ }
}
}
@@ -651,10 +477,10 @@ send_enbdis(struct net_device *netdev, int state,
devdata->cmdrsp_rcv->net.enbdis.context = netdev;
devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
- visorchannel_signalinsert(devdata->dev->visorchannel,
+ if (visorchannel_signalinsert(devdata->dev->visorchannel,
IOCHAN_TO_IOPART,
- devdata->cmdrsp_rcv);
- devdata->chstat.sent_enbdis++;
+ devdata->cmdrsp_rcv))
+ devdata->chstat.sent_enbdis++;
}
/**
@@ -676,9 +502,6 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
unsigned long flags;
int wait = 0;
- /* stop the transmit queue so nothing more can be transmitted */
- netif_stop_queue(netdev);
-
/* send a msg telling the other end we are stopping incoming pkts */
spin_lock_irqsave(&devdata->priv_lock, flags);
devdata->enabled = 0;
@@ -695,12 +518,14 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
* when it gets a disable.
*/
spin_lock_irqsave(&devdata->priv_lock, flags);
- while ((timeout == VISORNIC_INFINITE_RESPONSE_WAIT) ||
+ while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
(wait < timeout)) {
if (devdata->enab_dis_acked)
break;
if (devdata->server_down || devdata->server_change_state) {
spin_unlock_irqrestore(&devdata->priv_lock, flags);
+ dev_dbg(&netdev->dev, "%s server went away\n",
+ __func__);
return -EIO;
}
set_current_state(TASK_INTERRUPTIBLE);
@@ -722,10 +547,16 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
break;
}
}
-
/* we've set enabled to 0, so we can give up the lock. */
spin_unlock_irqrestore(&devdata->priv_lock, flags);
+ /* stop the transmit queue so nothing more can be transmitted */
+ netif_stop_queue(netdev);
+
+ napi_disable(&devdata->napi);
+
+ skb_queue_purge(&devdata->xmitbufhead);
+
/* Free rcv buffers - other end has automatically unposed them on
* disable
*/
@@ -736,13 +567,6 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
}
}
- /* remove references from array */
- for (i = 0; i < VISORNICSOPENMAX; i++)
- if (num_visornic_open[i] == netdev) {
- num_visornic_open[i] = NULL;
- break;
- }
-
return 0;
}
@@ -814,11 +638,15 @@ visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
* gets a disable.
*/
i = init_rcv_bufs(netdev, devdata);
- if (i < 0)
+ if (i < 0) {
+ dev_err(&netdev->dev,
+ "%s failed to init rcv bufs (%d)\n", __func__, i);
return i;
+ }
spin_lock_irqsave(&devdata->priv_lock, flags);
devdata->enabled = 1;
+ devdata->enab_dis_acked = 0;
/* now we're ready, let's send an ENB to uisnic but until we get
* an ACK back from uisnic, we'll drop the packets
@@ -829,15 +657,18 @@ visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
/* send enable and wait for ack -- don't hold lock when sending enable
* because if the queue is full, insert might sleep.
*/
+ napi_enable(&devdata->napi);
send_enbdis(netdev, 1, devdata);
spin_lock_irqsave(&devdata->priv_lock, flags);
- while ((timeout == VISORNIC_INFINITE_RESPONSE_WAIT) ||
+ while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
(wait < timeout)) {
if (devdata->enab_dis_acked)
break;
if (devdata->server_down || devdata->server_change_state) {
spin_unlock_irqrestore(&devdata->priv_lock, flags);
+ dev_dbg(&netdev->dev, "%s server went away\n",
+ __func__);
return -EIO;
}
set_current_state(TASK_INTERRUPTIBLE);
@@ -848,19 +679,13 @@ visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
spin_unlock_irqrestore(&devdata->priv_lock, flags);
- if (!devdata->enab_dis_acked)
+ if (!devdata->enab_dis_acked) {
+ dev_err(&netdev->dev, "%s missing ACK\n", __func__);
return -EIO;
-
- /* find an open slot in the array to save off VisorNic references
- * for debug
- */
- for (i = 0; i < VISORNICSOPENMAX; i++) {
- if (!num_visornic_open[i]) {
- num_visornic_open[i] = netdev;
- break;
- }
}
+ netif_start_queue(netdev);
+
return 0;
}
@@ -882,20 +707,29 @@ visornic_timeout_reset(struct work_struct *work)
devdata = container_of(work, struct visornic_devdata, timeout_reset);
netdev = devdata->netdev;
- netif_stop_queue(netdev);
- response = visornic_disable_with_timeout(netdev, 100);
+ rtnl_lock();
+ if (!netif_running(netdev)) {
+ rtnl_unlock();
+ return;
+ }
+
+ response = visornic_disable_with_timeout(netdev,
+ VISORNIC_INFINITE_RSP_WAIT);
if (response)
goto call_serverdown;
- response = visornic_enable_with_timeout(netdev, 100);
+ response = visornic_enable_with_timeout(netdev,
+ VISORNIC_INFINITE_RSP_WAIT);
if (response)
goto call_serverdown;
- netif_wake_queue(netdev);
+
+ rtnl_unlock();
return;
call_serverdown:
- visornic_serverdown(devdata);
+ visornic_serverdown(devdata, NULL);
+ rtnl_unlock();
}
/**
@@ -908,12 +742,7 @@ call_serverdown:
static int
visornic_open(struct net_device *netdev)
{
- visornic_enable_with_timeout(netdev, VISORNIC_INFINITE_RESPONSE_WAIT);
-
- /* start the interface's transmit queue, allowing it to accept
- * packets for transmission
- */
- netif_start_queue(netdev);
+ visornic_enable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
return 0;
}
@@ -928,13 +757,59 @@ visornic_open(struct net_device *netdev)
static int
visornic_close(struct net_device *netdev)
{
- netif_stop_queue(netdev);
- visornic_disable_with_timeout(netdev, VISORNIC_INFINITE_RESPONSE_WAIT);
+ visornic_disable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
return 0;
}
/**
+ * devdata_xmits_outstanding - compute outstanding xmits
+ * @devdata: visornic_devdata for device
+ *
+ * Return value is the number of outstanding xmits.
+ */
+static unsigned long devdata_xmits_outstanding(struct visornic_devdata *devdata)
+{
+ if (devdata->chstat.sent_xmit >= devdata->chstat.got_xmit_done)
+ return devdata->chstat.sent_xmit -
+ devdata->chstat.got_xmit_done;
+ else
+ return (ULONG_MAX - devdata->chstat.got_xmit_done
+ + devdata->chstat.sent_xmit + 1);
+}
+
+/**
+ * vnic_hit_high_watermark
+ * @devdata: indicates visornic device we are checking
+ * @high_watermark: max num of unacked xmits we will tolerate,
+ * before we will start throttling
+ *
+ * Returns true iff the number of unacked xmits sent to
+ * the IO partition is >= high_watermark.
+ */
+static inline bool vnic_hit_high_watermark(struct visornic_devdata *devdata,
+ ulong high_watermark)
+{
+ return (devdata_xmits_outstanding(devdata) >= high_watermark);
+}
+
+/**
+ * vnic_hit_low_watermark
+ * @devdata: indicates visornic device we are checking
+ * @low_watermark: we will wait until the num of unacked xmits
+ * drops to this value or lower before we start
+ * transmitting again
+ *
+ * Returns true iff the number of unacked xmits sent to
+ * the IO partition is <= low_watermark.
+ */
+static inline bool vnic_hit_low_watermark(struct visornic_devdata *devdata,
+ ulong low_watermark)
+{
+ return (devdata_xmits_outstanding(devdata) <= low_watermark);
+}
+
+/**
* visornic_xmit - send a packet to the IO Partition
* @skb: Packet to be sent
* @netdev: net device the packet is being sent from
@@ -944,7 +819,7 @@ visornic_close(struct net_device *netdev)
* function is protected from concurrent calls by a spinlock xmit_lock
* in the net_device struct, but as soon as the function returns it
* can be called again.
- * Returns NETDEV_TX_OK for success, NETDEV_TX_BUSY for error.
+ * Returns NETDEV_TX_OK.
*/
static int
visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
@@ -961,7 +836,10 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
devdata->server_change_state) {
spin_unlock_irqrestore(&devdata->priv_lock, flags);
devdata->busy_cnt++;
- return NETDEV_TX_BUSY;
+ dev_dbg(&netdev->dev,
+ "%s busy - queue stopped\n", __func__);
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
}
/* sk_buff struct is used to host network data throughout all the
@@ -979,7 +857,11 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
if (firstfraglen < ETH_HEADER_SIZE) {
spin_unlock_irqrestore(&devdata->priv_lock, flags);
devdata->busy_cnt++;
- return NETDEV_TX_BUSY;
+ dev_err(&netdev->dev,
+ "%s busy - first frag too small (%d)\n",
+ __func__, firstfraglen);
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
}
if ((len < ETH_MIN_PACKET_SIZE) &&
@@ -1002,13 +884,8 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
/* save the pointer to skb -- we'll need it for completion */
cmdrsp->net.buf = skb;
- if (((devdata->chstat.sent_xmit >= devdata->chstat.got_xmit_done) &&
- (devdata->chstat.sent_xmit - devdata->chstat.got_xmit_done >=
- devdata->max_outstanding_net_xmits)) ||
- ((devdata->chstat.sent_xmit < devdata->chstat.got_xmit_done) &&
- (ULONG_MAX - devdata->chstat.got_xmit_done +
- devdata->chstat.sent_xmit >=
- devdata->max_outstanding_net_xmits))) {
+ if (vnic_hit_high_watermark(devdata,
+ devdata->max_outstanding_net_xmits)) {
/* too many NET_XMITs queued over to IOVM - need to wait
*/
devdata->chstat.reject_count++;
@@ -1018,7 +895,11 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
netif_stop_queue(netdev);
spin_unlock_irqrestore(&devdata->priv_lock, flags);
devdata->busy_cnt++;
- return NETDEV_TX_BUSY;
+ dev_dbg(&netdev->dev,
+ "%s busy - waiting for iovm to catch up\n",
+ __func__);
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
}
if (devdata->queuefullmsg_logged)
devdata->queuefullmsg_logged = 0;
@@ -1055,10 +936,13 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
visor_copy_fragsinfo_from_skb(skb, firstfraglen,
MAX_PHYS_INFO,
cmdrsp->net.xmt.frags);
- if (cmdrsp->net.xmt.num_frags == -1) {
+ if (cmdrsp->net.xmt.num_frags < 0) {
spin_unlock_irqrestore(&devdata->priv_lock, flags);
devdata->busy_cnt++;
- return NETDEV_TX_BUSY;
+ dev_err(&netdev->dev,
+ "%s busy - copy frags failed\n", __func__);
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
}
if (!visorchannel_signalinsert(devdata->dev->visorchannel,
@@ -1066,18 +950,15 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
netif_stop_queue(netdev);
spin_unlock_irqrestore(&devdata->priv_lock, flags);
devdata->busy_cnt++;
- return NETDEV_TX_BUSY;
+ dev_dbg(&netdev->dev,
+ "%s busy - signalinsert failed\n", __func__);
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
}
/* Track the skbs that have been sent to the IOVM for XMIT */
skb_queue_head(&devdata->xmitbufhead, skb);
- /* set the last transmission start time
- * linux doc says: Do not forget to update netdev->trans_start to
- * jiffies after each new tx packet is given to the hardware.
- */
- netdev->trans_start = jiffies;
-
/* update xmt stats */
devdata->net_stats.tx_packets++;
devdata->net_stats.tx_bytes += skb->len;
@@ -1086,18 +967,16 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
/* check to see if we have hit the high watermark for
* netif_stop_queue()
*/
- if (((devdata->chstat.sent_xmit >= devdata->chstat.got_xmit_done) &&
- (devdata->chstat.sent_xmit - devdata->chstat.got_xmit_done >=
- devdata->upper_threshold_net_xmits)) ||
- ((devdata->chstat.sent_xmit < devdata->chstat.got_xmit_done) &&
- (ULONG_MAX - devdata->chstat.got_xmit_done +
- devdata->chstat.sent_xmit >=
- devdata->upper_threshold_net_xmits))) {
+ if (vnic_hit_high_watermark(devdata,
+ devdata->upper_threshold_net_xmits)) {
/* too many NET_XMITs queued over to IOVM - need to wait */
netif_stop_queue(netdev); /* calling stop queue - call
* netif_wake_queue() after lower
* threshold
*/
+ dev_dbg(&netdev->dev,
+ "%s busy - invoking iovm flow control\n",
+ __func__);
devdata->flow_control_upper_hits++;
}
spin_unlock_irqrestore(&devdata->priv_lock, flags);
@@ -1121,21 +1000,6 @@ visornic_get_stats(struct net_device *netdev)
}
/**
- * visornic_ioctl - ioctl function for netdevice.
- * @netdev: netdevice
- * @ifr: ignored
- * @cmd: ignored
- *
- * Currently not supported.
- * Returns EOPNOTSUPP
- */
-static int
-visornic_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- return -EOPNOTSUPP;
-}
-
-/**
* visornic_change_mtu - changes mtu of device.
* @netdev: netdevice
* @new_mtu: value of new mtu
@@ -1201,15 +1065,24 @@ visornic_xmit_timeout(struct net_device *netdev)
unsigned long flags;
spin_lock_irqsave(&devdata->priv_lock, flags);
+ if (devdata->going_away) {
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+ dev_dbg(&devdata->dev->device,
+ "%s aborting because device removal pending\n",
+ __func__);
+ return;
+ }
+
/* Ensure that a ServerDown message hasn't been received */
if (!devdata->enabled ||
(devdata->server_down && !devdata->server_change_state)) {
+ dev_dbg(&netdev->dev, "%s no processing\n",
+ __func__);
spin_unlock_irqrestore(&devdata->priv_lock, flags);
return;
}
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
queue_work(visornic_timeout_reset_workqueue, &devdata->timeout_reset);
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
}
/**
@@ -1281,7 +1154,6 @@ repost_return(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
devdata->bad_rcv_buf++;
}
}
- atomic_dec(&devdata->usage);
return status;
}
@@ -1293,18 +1165,16 @@ repost_return(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
* it up the stack.
* Returns void
*/
-static void
+static int
visornic_rx(struct uiscmdrsp *cmdrsp)
{
struct visornic_devdata *devdata;
struct sk_buff *skb, *prev, *curr;
struct net_device *netdev;
- int cc, currsize, off, status;
+ int cc, currsize, off;
struct ethhdr *eth;
unsigned long flags;
-#ifdef DEBUG
- struct phys_info testfrags[MAX_PHYS_INFO];
-#endif
+ int rx_count = 0;
/* post new rcv buf to the other end using the cmdrsp we have at hand
* post it without holding lock - but we'll use the signal lock to
@@ -1314,18 +1184,6 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
skb = cmdrsp->net.buf;
netdev = skb->dev;
- if (!netdev) {
- /* We must have previously downed this network device and
- * this skb and device is no longer valid. This also means
- * the skb reference was removed from devdata->rcvbuf so no
- * need to search for it.
- * All we can do is free the skb and return.
- * Note: We crash if we try to log this here.
- */
- kfree_skb(skb);
- return;
- }
-
devdata = netdev_priv(netdev);
spin_lock_irqsave(&devdata->priv_lock, flags);
@@ -1335,10 +1193,6 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
devdata->net_stats.rx_packets++;
devdata->net_stats.rx_bytes = skb->len;
- atomic_inc(&devdata->usage); /* don't want a close to happen before
- * we're done here
- */
-
/* set length to how much was ACTUALLY received -
* NOTE: rcv_done_len includes actual length of data rcvd
* including ethhdr
@@ -1352,7 +1206,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
*/
spin_unlock_irqrestore(&devdata->priv_lock, flags);
repost_return(cmdrsp, devdata, skb, netdev);
- return;
+ return rx_count;
}
spin_unlock_irqrestore(&devdata->priv_lock, flags);
@@ -1371,7 +1225,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
dev_err(&devdata->netdev->dev,
"repost_return failed");
- return;
+ return rx_count;
}
/* length rcvd is greater than firstfrag in this skb rcv buf */
skb->tail += RCVPOST_BUF_SIZE; /* amount in skb->data */
@@ -1386,7 +1240,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
dev_err(&devdata->netdev->dev,
"repost_return failed");
- return;
+ return rx_count;
}
skb->tail += skb->len;
skb->data_len = 0; /* nothing rcvd in frag_list */
@@ -1405,7 +1259,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
if (cmdrsp->net.rcv.rcvbuf[0] != skb) {
if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
dev_err(&devdata->netdev->dev, "repost_return failed");
- return;
+ return rx_count;
}
if (cmdrsp->net.rcv.numrcvbufs > 1) {
@@ -1431,29 +1285,12 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
curr->data_len = 0;
off += currsize;
}
-#ifdef DEBUG
/* assert skb->len == off */
if (skb->len != off) {
- dev_err(&devdata->netdev->dev,
- "%s something wrong; skb->len:%d != off:%d\n",
- netdev->name, skb->len, off);
- }
- /* test code */
- cc = util_copy_fragsinfo_from_skb("rcvchaintest", skb,
- RCVPOST_BUF_SIZE,
- MAX_PHYS_INFO, testfrags);
- if (cc != cmdrsp->net.rcv.numrcvbufs) {
- dev_err(&devdata->netdev->dev,
- "**** %s Something wrong; rcvd chain length %d different from one we calculated %d\n",
- netdev->name, cmdrsp->net.rcv.numrcvbufs, cc);
- }
- for (i = 0; i < cc; i++) {
- dev_inf(&devdata->netdev->dev,
- "test:RCVPOST_BUF_SIZE:%d[%d] pfn:%llu off:0x%x len:%d\n",
- RCVPOST_BUF_SIZE, i, testfrags[i].pi_pfn,
- testfrags[i].pi_off, testfrags[i].pi_len);
+ netdev_err(devdata->netdev,
+ "something wrong; skb->len:%d != off:%d\n",
+ skb->len, off);
}
-#endif
}
/* set up packet's protocl type using ethernet header - this
@@ -1505,10 +1342,11 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
/* drop packet - don't forward it up to OS */
devdata->n_rcv_packets_not_accepted++;
repost_return(cmdrsp, devdata, skb, netdev);
- return;
+ return rx_count;
} while (0);
- status = netif_rx(skb);
+ rx_count++;
+ netif_receive_skb(skb);
/* netif_rx returns various values, but "in practice most drivers
* ignore the return value
*/
@@ -1520,6 +1358,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
* new rcv buffer.
*/
repost_return(cmdrsp, devdata, skb, netdev);
+ return rx_count;
}
/**
@@ -1545,14 +1384,11 @@ devdata_initialize(struct visornic_devdata *devdata, struct visor_device *dev)
spin_unlock(&dev_num_pool_lock);
if (devnum == MAXDEVICES)
devnum = -1;
- if (devnum < 0) {
- kfree(devdata);
+ if (devnum < 0)
return NULL;
- }
devdata->devnum = devnum;
devdata->dev = dev;
strncpy(devdata->name, dev_name(&dev->device), sizeof(devdata->name));
- kref_init(&devdata->kref);
spin_lock(&lock_all_devices);
list_add_tail(&devdata->list_all, &list_all_devices);
spin_unlock(&lock_all_devices);
@@ -1560,24 +1396,23 @@ devdata_initialize(struct visornic_devdata *devdata, struct visor_device *dev)
}
/**
- * devdata_release - Frees up a devdata
- * @mykref: kref to the devdata
+ * devdata_release - Frees up references in devdata
+ * @devdata: struct to clean up
*
- * Frees up a devdata.
+ * Frees up references in devdata.
* Returns void
*/
-static void devdata_release(struct kref *mykref)
+static void devdata_release(struct visornic_devdata *devdata)
{
- struct visornic_devdata *devdata =
- container_of(mykref, struct visornic_devdata, kref);
-
spin_lock(&dev_num_pool_lock);
clear_bit(devdata->devnum, dev_num_pool);
spin_unlock(&dev_num_pool_lock);
spin_lock(&lock_all_devices);
list_del(&devdata->list_all);
spin_unlock(&lock_all_devices);
- kfree(devdata);
+ kfree(devdata->rcvbuf);
+ kfree(devdata->cmdrsp_rcv);
+ kfree(devdata->xmit_cmdrsp);
}
static const struct net_device_ops visornic_dev_ops = {
@@ -1585,12 +1420,163 @@ static const struct net_device_ops visornic_dev_ops = {
.ndo_stop = visornic_close,
.ndo_start_xmit = visornic_xmit,
.ndo_get_stats = visornic_get_stats,
- .ndo_do_ioctl = visornic_ioctl,
.ndo_change_mtu = visornic_change_mtu,
.ndo_tx_timeout = visornic_xmit_timeout,
.ndo_set_rx_mode = visornic_set_multi,
};
+/* DebugFS code */
+static ssize_t info_debugfs_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ ssize_t bytes_read = 0;
+ int str_pos = 0;
+ struct visornic_devdata *devdata;
+ struct net_device *dev;
+ char *vbuf;
+
+ if (len > MAX_BUF)
+ len = MAX_BUF;
+ vbuf = kzalloc(len, GFP_KERNEL);
+ if (!vbuf)
+ return -ENOMEM;
+
+ /* for each vnic channel
+ * dump out channel specific data
+ */
+ rcu_read_lock();
+ for_each_netdev_rcu(current->nsproxy->net_ns, dev) {
+ /*
+ * Only consider netdevs that are visornic, and are open
+ */
+ if ((dev->netdev_ops != &visornic_dev_ops) ||
+ (!netif_queue_stopped(dev)))
+ continue;
+
+ devdata = netdev_priv(dev);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ "netdev = %s (0x%p), MAC Addr %pM\n",
+ dev->name,
+ dev,
+ dev->dev_addr);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ "VisorNic Dev Info = 0x%p\n", devdata);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " num_rcv_bufs = %d\n",
+ devdata->num_rcv_bufs);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " max_oustanding_next_xmits = %lu\n",
+ devdata->max_outstanding_net_xmits);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " upper_threshold_net_xmits = %lu\n",
+ devdata->upper_threshold_net_xmits);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " lower_threshold_net_xmits = %lu\n",
+ devdata->lower_threshold_net_xmits);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " queuefullmsg_logged = %d\n",
+ devdata->queuefullmsg_logged);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " chstat.got_rcv = %lu\n",
+ devdata->chstat.got_rcv);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " chstat.got_enbdisack = %lu\n",
+ devdata->chstat.got_enbdisack);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " chstat.got_xmit_done = %lu\n",
+ devdata->chstat.got_xmit_done);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " chstat.xmit_fail = %lu\n",
+ devdata->chstat.xmit_fail);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " chstat.sent_enbdis = %lu\n",
+ devdata->chstat.sent_enbdis);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " chstat.sent_promisc = %lu\n",
+ devdata->chstat.sent_promisc);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " chstat.sent_post = %lu\n",
+ devdata->chstat.sent_post);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " chstat.sent_post_failed = %lu\n",
+ devdata->chstat.sent_post_failed);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " chstat.sent_xmit = %lu\n",
+ devdata->chstat.sent_xmit);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " chstat.reject_count = %lu\n",
+ devdata->chstat.reject_count);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " chstat.extra_rcvbufs_sent = %lu\n",
+ devdata->chstat.extra_rcvbufs_sent);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " n_rcv0 = %lu\n", devdata->n_rcv0);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " n_rcv1 = %lu\n", devdata->n_rcv1);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " n_rcv2 = %lu\n", devdata->n_rcv2);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " n_rcvx = %lu\n", devdata->n_rcvx);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " num_rcvbuf_in_iovm = %d\n",
+ atomic_read(&devdata->num_rcvbuf_in_iovm));
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " alloc_failed_in_if_needed_cnt = %lu\n",
+ devdata->alloc_failed_in_if_needed_cnt);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " alloc_failed_in_repost_rtn_cnt = %lu\n",
+ devdata->alloc_failed_in_repost_rtn_cnt);
+ /* str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ * " inner_loop_limit_reached_cnt = %lu\n",
+ * devdata->inner_loop_limit_reached_cnt);
+ */
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " found_repost_rcvbuf_cnt = %lu\n",
+ devdata->found_repost_rcvbuf_cnt);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " repost_found_skb_cnt = %lu\n",
+ devdata->repost_found_skb_cnt);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " n_repost_deficit = %lu\n",
+ devdata->n_repost_deficit);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " bad_rcv_buf = %lu\n",
+ devdata->bad_rcv_buf);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " n_rcv_packets_not_accepted = %lu\n",
+ devdata->n_rcv_packets_not_accepted);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " interrupts_rcvd = %llu\n",
+ devdata->interrupts_rcvd);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " interrupts_notme = %llu\n",
+ devdata->interrupts_notme);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " interrupts_disabled = %llu\n",
+ devdata->interrupts_disabled);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " busy_cnt = %llu\n",
+ devdata->busy_cnt);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " flow_control_upper_hits = %llu\n",
+ devdata->flow_control_upper_hits);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " flow_control_lower_hits = %llu\n",
+ devdata->flow_control_lower_hits);
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " netif_queue = %s\n",
+ netif_queue_stopped(devdata->netdev) ?
+ "stopped" : "running");
+ str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+ " xmits_outstanding = %lu\n",
+ devdata_xmits_outstanding(devdata));
+ }
+ rcu_read_unlock();
+ bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
+ kfree(vbuf);
+ return bytes_read;
+}
+
/**
* send_rcv_posts_if_needed
* @devdata: visornic device
@@ -1644,15 +1630,15 @@ send_rcv_posts_if_needed(struct visornic_devdata *devdata)
* Returns when response queue is empty or when the threadd stops.
*/
static void
-drain_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata)
+service_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
+ int *rx_work_done)
{
unsigned long flags;
struct net_device *netdev;
- /* drain queue */
- while (1) {
- /* TODO: CLIENT ACQUIRE -- Don't really need this at the
- * moment */
+ /* TODO: CLIENT ACQUIRE -- Don't really need this at the
+ * moment */
+ for (;;) {
if (!visorchannel_signalremove(devdata->dev->visorchannel,
IOCHAN_FROM_IOPART,
cmdrsp))
@@ -1662,7 +1648,7 @@ drain_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata)
case NET_RCV:
devdata->chstat.got_rcv++;
/* process incoming packet */
- visornic_rx(cmdrsp);
+ *rx_work_done += visornic_rx(cmdrsp);
break;
case NET_XMIT_DONE:
spin_lock_irqsave(&devdata->priv_lock, flags);
@@ -1678,16 +1664,8 @@ drain_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata)
* the lower watermark for
* netif_wake_queue()
*/
- if (((devdata->chstat.sent_xmit >=
- devdata->chstat.got_xmit_done) &&
- (devdata->chstat.sent_xmit -
- devdata->chstat.got_xmit_done <=
- devdata->lower_threshold_net_xmits)) ||
- ((devdata->chstat.sent_xmit <
- devdata->chstat.got_xmit_done) &&
- (ULONG_MAX - devdata->chstat.got_xmit_done
- + devdata->chstat.sent_xmit <=
- devdata->lower_threshold_net_xmits))) {
+ if (vnic_hit_low_watermark(devdata,
+ devdata->lower_threshold_net_xmits)) {
/* enough NET_XMITs completed
* so can restart netif queue
*/
@@ -1738,50 +1716,51 @@ drain_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata)
break;
}
/* cmdrsp is now available for reuse */
-
- if (kthread_should_stop())
- break;
}
}
+static int visornic_poll(struct napi_struct *napi, int budget)
+{
+ struct visornic_devdata *devdata = container_of(napi,
+ struct visornic_devdata,
+ napi);
+ int rx_count = 0;
+
+ send_rcv_posts_if_needed(devdata);
+ service_resp_queue(devdata->cmdrsp, devdata, &rx_count);
+
+ /*
+ * If there aren't any more packets to receive
+ * stop the poll
+ */
+ if (rx_count < budget)
+ napi_complete(napi);
+
+ return rx_count;
+}
+
/**
- * process_incoming_rsps - Checks the status of the response queue.
+ * poll_for_irq - Checks the status of the response queue.
* @v: void pointer to the visronic devdata
*
* Main function of the vnic_incoming thread. Peridocially check the
* response queue and drain it if needed.
* Returns when thread has stopped.
*/
-static int
-process_incoming_rsps(void *v)
+static void
+poll_for_irq(unsigned long v)
{
- struct visornic_devdata *devdata = v;
- struct uiscmdrsp *cmdrsp = NULL;
- const int SZ = SIZEOF_CMDRSP;
+ struct visornic_devdata *devdata = (struct visornic_devdata *)v;
- cmdrsp = kmalloc(SZ, GFP_ATOMIC);
- if (!cmdrsp)
- complete_and_exit(&devdata->threadinfo.has_stopped, 0);
+ if (!visorchannel_signalempty(
+ devdata->dev->visorchannel,
+ IOCHAN_FROM_IOPART))
+ napi_schedule(&devdata->napi);
- while (1) {
- wait_event_interruptible_timeout(
- devdata->rsp_queue, (atomic_read(
- &devdata->interrupt_rcvd) == 1),
- msecs_to_jiffies(devdata->thread_wait_ms));
+ atomic_set(&devdata->interrupt_rcvd, 0);
- /* periodically check to see if there are any rcf bufs which
- * need to get sent to the IOSP. This can only happen if
- * we run out of memory when trying to allocate skbs.
- */
- atomic_set(&devdata->interrupt_rcvd, 0);
- send_rcv_posts_if_needed(devdata);
- drain_queue(cmdrsp, devdata);
- if (kthread_should_stop())
- break;
- }
+ mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
- kfree(cmdrsp);
- complete_and_exit(&devdata->threadinfo.has_stopped, 0);
}
/**
@@ -1801,12 +1780,15 @@ static int visornic_probe(struct visor_device *dev)
u64 features;
netdev = alloc_etherdev(sizeof(struct visornic_devdata));
- if (!netdev)
+ if (!netdev) {
+ dev_err(&dev->device,
+ "%s alloc_etherdev failed\n", __func__);
return -ENOMEM;
+ }
netdev->netdev_ops = &visornic_dev_ops;
netdev->watchdog_timeo = (5 * HZ);
- netdev->dev.parent = &dev->device;
+ SET_NETDEV_DEV(netdev, &dev->device);
/* Get MAC adddress from channel and read it into the device. */
netdev->addr_len = ETH_ALEN;
@@ -1814,16 +1796,23 @@ static int visornic_probe(struct visor_device *dev)
vnic.macaddr);
err = visorbus_read_channel(dev, channel_offset, netdev->dev_addr,
ETH_ALEN);
- if (err < 0)
+ if (err < 0) {
+ dev_err(&dev->device,
+ "%s failed to get mac addr from chan (%d)\n",
+ __func__, err);
goto cleanup_netdev;
+ }
devdata = devdata_initialize(netdev_priv(netdev), dev);
if (!devdata) {
+ dev_err(&dev->device,
+ "%s devdata_initialize failed\n", __func__);
err = -ENOMEM;
goto cleanup_netdev;
}
devdata->netdev = netdev;
+ dev_set_drvdata(&dev->device, devdata);
init_waitqueue_head(&devdata->rsp_queue);
spin_lock_init(&devdata->priv_lock);
devdata->enabled = 0; /* not yet */
@@ -1834,10 +1823,14 @@ static int visornic_probe(struct visor_device *dev)
vnic.num_rcv_bufs);
err = visorbus_read_channel(dev, channel_offset,
&devdata->num_rcv_bufs, 4);
- if (err)
+ if (err) {
+ dev_err(&dev->device,
+ "%s failed to get #rcv bufs from chan (%d)\n",
+ __func__, err);
goto cleanup_netdev;
+ }
- devdata->rcvbuf = kmalloc(sizeof(struct sk_buff *) *
+ devdata->rcvbuf = kzalloc(sizeof(struct sk_buff *) *
devdata->num_rcv_bufs, GFP_KERNEL);
if (!devdata->rcvbuf) {
err = -ENOMEM;
@@ -1846,12 +1839,15 @@ static int visornic_probe(struct visor_device *dev)
/* set the net_xmit outstanding threshold */
/* always leave two slots open but you should have 3 at a minimum */
+ /* note that max_outstanding_net_xmits must be > 0 */
devdata->max_outstanding_net_xmits =
- max(3, ((devdata->num_rcv_bufs / 3) - 2));
+ max_t(unsigned long, 3, ((devdata->num_rcv_bufs / 3) - 2));
devdata->upper_threshold_net_xmits =
- max(2, devdata->max_outstanding_net_xmits - 1);
+ max_t(unsigned long,
+ 2, (devdata->max_outstanding_net_xmits - 1));
devdata->lower_threshold_net_xmits =
- max(1, devdata->max_outstanding_net_xmits / 2);
+ max_t(unsigned long,
+ 1, (devdata->max_outstanding_net_xmits / 2));
skb_queue_head_init(&devdata->xmitbufhead);
@@ -1866,8 +1862,6 @@ static int visornic_probe(struct visor_device *dev)
err = -ENOMEM;
goto cleanup_xmit_cmdrsp;
}
- INIT_WORK(&devdata->serverdown_completion,
- visornic_serverdown_complete);
INIT_WORK(&devdata->timeout_reset, visornic_timeout_reset);
devdata->server_down = false;
devdata->server_change_state = false;
@@ -1876,42 +1870,70 @@ static int visornic_probe(struct visor_device *dev)
channel_offset = offsetof(struct spar_io_channel_protocol,
vnic.mtu);
err = visorbus_read_channel(dev, channel_offset, &netdev->mtu, 4);
- if (err)
+ if (err) {
+ dev_err(&dev->device,
+ "%s failed to get mtu from chan (%d)\n",
+ __func__, err);
goto cleanup_xmit_cmdrsp;
+ }
/* TODO: Setup Interrupt information */
/* Let's start our threads to get responses */
+ netif_napi_add(netdev, &devdata->napi, visornic_poll, 64);
+
+ setup_timer(&devdata->irq_poll_timer, poll_for_irq,
+ (unsigned long)devdata);
+ /*
+ * Note: This time has to start running before the while
+ * loop below because the napi routine is responsible for
+ * setting enab_dis_acked
+ */
+ mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
+
channel_offset = offsetof(struct spar_io_channel_protocol,
channel_header.features);
err = visorbus_read_channel(dev, channel_offset, &features, 8);
- if (err)
- goto cleanup_xmit_cmdrsp;
+ if (err) {
+ dev_err(&dev->device,
+ "%s failed to get features from chan (%d)\n",
+ __func__, err);
+ goto cleanup_napi_add;
+ }
features |= ULTRA_IO_CHANNEL_IS_POLLING;
err = visorbus_write_channel(dev, channel_offset, &features, 8);
- if (err)
- goto cleanup_xmit_cmdrsp;
-
- devdata->thread_wait_ms = 2;
- visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
- devdata, "vnic_incoming");
+ if (err) {
+ dev_err(&dev->device,
+ "%s failed to set features in chan (%d)\n",
+ __func__, err);
+ goto cleanup_napi_add;
+ }
err = register_netdev(netdev);
- if (err)
- goto cleanup_thread_stop;
+ if (err) {
+ dev_err(&dev->device,
+ "%s register_netdev failed (%d)\n", __func__, err);
+ goto cleanup_napi_add;
+ }
/* create debgug/sysfs directories */
devdata->eth_debugfs_dir = debugfs_create_dir(netdev->name,
visornic_debugfs_dir);
if (!devdata->eth_debugfs_dir) {
+ dev_err(&dev->device,
+ "%s debugfs_create_dir %s failed\n",
+ __func__, netdev->name);
err = -ENOMEM;
- goto cleanup_thread_stop;
+ goto cleanup_xmit_cmdrsp;
}
+ dev_info(&dev->device, "%s success netdev=%s\n",
+ __func__, netdev->name);
return 0;
-cleanup_thread_stop:
- visor_thread_stop(&devdata->threadinfo);
+cleanup_napi_add:
+ del_timer_sync(&devdata->irq_poll_timer);
+ netif_napi_del(&devdata->napi);
cleanup_xmit_cmdrsp:
kfree(devdata->xmit_cmdrsp);
@@ -1954,12 +1976,41 @@ static void host_side_disappeared(struct visornic_devdata *devdata)
static void visornic_remove(struct visor_device *dev)
{
struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
+ struct net_device *netdev;
+ unsigned long flags;
- if (!devdata)
+ if (!devdata) {
+ dev_err(&dev->device, "%s no devdata\n", __func__);
+ return;
+ }
+ spin_lock_irqsave(&devdata->priv_lock, flags);
+ if (devdata->going_away) {
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+ dev_err(&dev->device, "%s already being removed\n", __func__);
return;
+ }
+ devdata->going_away = true;
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+ netdev = devdata->netdev;
+ if (!netdev) {
+ dev_err(&dev->device, "%s not net device\n", __func__);
+ return;
+ }
+
+ /* going_away prevents new items being added to the workqueues */
+ flush_workqueue(visornic_timeout_reset_workqueue);
+
+ debugfs_remove_recursive(devdata->eth_debugfs_dir);
+
+ unregister_netdev(netdev); /* this will call visornic_close() */
+
+ del_timer_sync(&devdata->irq_poll_timer);
+ netif_napi_del(&devdata->napi);
+
dev_set_drvdata(&dev->device, NULL);
host_side_disappeared(devdata);
- kref_put(&devdata->kref, devdata_release);
+ devdata_release(devdata);
+ free_netdev(netdev);
}
/**
@@ -1980,8 +2031,7 @@ static int visornic_pause(struct visor_device *dev,
{
struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
- visornic_serverdown(devdata);
- complete_func(dev, 0);
+ visornic_serverdown(devdata, complete_func);
return 0;
}
@@ -2003,37 +2053,40 @@ static int visornic_resume(struct visor_device *dev,
unsigned long flags;
devdata = dev_get_drvdata(&dev->device);
- if (!devdata)
+ if (!devdata) {
+ dev_err(&dev->device, "%s no devdata\n", __func__);
return -EINVAL;
+ }
netdev = devdata->netdev;
- if (devdata->server_down && !devdata->server_change_state) {
- devdata->server_change_state = true;
- /* Must transition channel to ATTACHED state BEFORE
- * we can start using the device again.
- * TODO: State transitions
- */
- visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
- devdata, "vnic_incoming");
- init_rcv_bufs(netdev, devdata);
- spin_lock_irqsave(&devdata->priv_lock, flags);
- devdata->enabled = 1;
-
- /* Now we're ready, let's send an ENB to uisnic but until
- * we get an ACK back from uisnic, we'll drop the packets
- */
- devdata->enab_dis_acked = 0;
+ spin_lock_irqsave(&devdata->priv_lock, flags);
+ if (devdata->server_change_state) {
spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
- /* send enable and wait for ack - don't hold lock when
- * sending enable because if the queue if sull, insert
- * might sleep.
- */
- send_enbdis(netdev, 1, devdata);
- } else if (devdata->server_change_state) {
- return -EIO;
+ dev_err(&dev->device, "%s server already changing state\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (!devdata->server_down) {
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+ dev_err(&dev->device, "%s server not down\n", __func__);
+ complete_func(dev, 0);
+ return 0;
}
+ devdata->server_change_state = true;
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+
+ /* Must transition channel to ATTACHED state BEFORE
+ * we can start using the device again.
+ * TODO: State transitions
+ */
+ mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
+
+ init_rcv_bufs(netdev, devdata);
+
+ rtnl_lock();
+ dev_open(netdev);
+ rtnl_unlock();
complete_func(dev, 0);
return 0;
@@ -2051,18 +2104,6 @@ static int visornic_init(void)
struct dentry *ret;
int err = -ENOMEM;
- /* create workqueue for serverdown completion */
- visornic_serverdown_workqueue =
- create_singlethread_workqueue("visornic_serverdown");
- if (!visornic_serverdown_workqueue)
- return -ENOMEM;
-
- /* create workqueue for tx timeout reset */
- visornic_timeout_reset_workqueue =
- create_singlethread_workqueue("visornic_timeout_reset");
- if (!visornic_timeout_reset_workqueue)
- return -ENOMEM;
-
visornic_debugfs_dir = debugfs_create_dir("visornic", NULL);
if (!visornic_debugfs_dir)
return err;
@@ -2076,12 +2117,6 @@ static int visornic_init(void)
if (!ret)
goto cleanup_debugfs;
- /* create workqueue for serverdown completion */
- visornic_serverdown_workqueue =
- create_singlethread_workqueue("visornic_serverdown");
- if (!visornic_serverdown_workqueue)
- goto cleanup_debugfs;
-
/* create workqueue for tx timeout reset */
visornic_timeout_reset_workqueue =
create_singlethread_workqueue("visornic_timeout_reset");
@@ -2097,8 +2132,6 @@ static int visornic_init(void)
return 0;
cleanup_workqueue:
- flush_workqueue(visornic_serverdown_workqueue);
- destroy_workqueue(visornic_serverdown_workqueue);
if (visornic_timeout_reset_workqueue) {
flush_workqueue(visornic_timeout_reset_workqueue);
destroy_workqueue(visornic_timeout_reset_workqueue);
@@ -2116,17 +2149,14 @@ cleanup_debugfs:
*/
static void visornic_cleanup(void)
{
- if (visornic_serverdown_workqueue) {
- flush_workqueue(visornic_serverdown_workqueue);
- destroy_workqueue(visornic_serverdown_workqueue);
- }
+ visorbus_unregister_visor_driver(&visornic_driver);
+
if (visornic_timeout_reset_workqueue) {
flush_workqueue(visornic_timeout_reset_workqueue);
destroy_workqueue(visornic_timeout_reset_workqueue);
}
debugfs_remove_recursive(visornic_debugfs_dir);
- visorbus_unregister_visor_driver(&visornic_driver);
kfree(dev_num_pool);
dev_num_pool = NULL;
}
diff --git a/drivers/staging/vme/devices/vme_pio2_core.c b/drivers/staging/vme/devices/vme_pio2_core.c
index eabbcc710a20..35c6ce5047de 100644
--- a/drivers/staging/vme/devices/vme_pio2_core.c
+++ b/drivers/staging/vme/devices/vme_pio2_core.c
@@ -24,7 +24,6 @@
#include "vme_pio2.h"
-
static const char driver_name[] = "pio2";
static int bus[PIO2_CARDS_MAX];
@@ -118,7 +117,6 @@ static void pio2_int(int level, int vector, void *ptr)
}
}
-
/*
* We return whether this has been successful - this is used in the probe to
* ensure we have a valid card.
@@ -158,7 +156,6 @@ static struct vme_driver pio2_driver = {
.remove = pio2_remove,
};
-
static int __init pio2_init(void)
{
if (bus_num == 0) {
@@ -178,7 +175,6 @@ static int __init pio2_init(void)
static int pio2_match(struct vme_dev *vdev)
{
-
if (vdev->num >= bus_num) {
dev_err(&vdev->dev,
"The enumeration of the VMEbus to which the board is connected must be specified\n");
@@ -220,7 +216,7 @@ static int pio2_probe(struct vme_dev *vdev)
int vec;
card = kzalloc(sizeof(struct pio2_card), GFP_KERNEL);
- if (card == NULL) {
+ if (!card) {
retval = -ENOMEM;
goto err_struct;
}
@@ -234,7 +230,6 @@ static int pio2_probe(struct vme_dev *vdev)
card->vdev = vdev;
for (i = 0; i < PIO2_VARIANT_LENGTH; i++) {
-
if (isdigit(card->variant[i]) == 0) {
dev_err(&card->vdev->dev, "Variant invalid\n");
retval = -EINVAL;
@@ -264,29 +259,29 @@ static int pio2_probe(struct vme_dev *vdev)
for (i = 1; i < PIO2_VARIANT_LENGTH; i++) {
switch (card->variant[i]) {
case '0':
- card->bank[i-1].config = NOFIT;
+ card->bank[i - 1].config = NOFIT;
break;
case '1':
case '2':
case '3':
case '4':
- card->bank[i-1].config = INPUT;
+ card->bank[i - 1].config = INPUT;
break;
case '5':
- card->bank[i-1].config = OUTPUT;
+ card->bank[i - 1].config = OUTPUT;
break;
case '6':
case '7':
case '8':
case '9':
- card->bank[i-1].config = BOTH;
+ card->bank[i - 1].config = BOTH;
break;
}
}
/* Get a master window and position over regs */
card->window = vme_master_request(vdev, VME_A24, VME_SCT, VME_D16);
- if (card->window == NULL) {
+ if (!card->window) {
dev_err(&card->vdev->dev,
"Unable to assign VME master resource\n");
retval = -EIO;
@@ -481,7 +476,6 @@ static void __exit pio2_exit(void)
vme_unregister_driver(&pio2_driver);
}
-
/* These are required for each board */
MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the board is connected");
module_param_array(bus, int, &bus_num, S_IRUGO);
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index 9cca97af3044..8e61a3b3e7e4 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -101,13 +101,13 @@ struct image_desc {
struct vme_resource *resource; /* VME resource */
int mmap_count; /* Number of current mmap's */
};
+
static struct image_desc image[VME_DEVS];
static struct cdev *vme_user_cdev; /* Character device */
static struct class *vme_user_sysfs_class; /* Sysfs class */
static struct vme_dev *vme_user_bridge; /* Pointer to user device */
-
static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR,
MASTER_MINOR, MASTER_MINOR,
SLAVE_MINOR, SLAVE_MINOR,
@@ -120,125 +120,68 @@ struct vme_user_vma_priv {
atomic_t refcnt;
};
-
-/*
- * We are going ot alloc a page during init per window for small transfers.
- * Small transfers will go VME -> buffer -> user space. Larger (more than a
- * page) transfers will lock the user space buffer into memory and then
- * transfer the data directly into the user space buffers.
- */
static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
- loff_t *ppos)
+ loff_t *ppos)
{
- ssize_t retval;
ssize_t copied = 0;
- if (count <= image[minor].size_buf) {
- /* We copy to kernel buffer */
- copied = vme_master_read(image[minor].resource,
- image[minor].kern_buf, count, *ppos);
- if (copied < 0)
- return (int)copied;
-
- retval = __copy_to_user(buf, image[minor].kern_buf,
- (unsigned long)copied);
- if (retval != 0) {
- copied = (copied - retval);
- pr_info("User copy failed\n");
- return -EINVAL;
- }
+ if (count > image[minor].size_buf)
+ count = image[minor].size_buf;
- } else {
- /* XXX Need to write this */
- pr_info("Currently don't support large transfers\n");
- /* Map in pages from userspace */
+ copied = vme_master_read(image[minor].resource, image[minor].kern_buf,
+ count, *ppos);
+ if (copied < 0)
+ return (int)copied;
- /* Call vme_master_read to do the transfer */
- return -EINVAL;
- }
+ if (__copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied))
+ return -EFAULT;
return copied;
}
-/*
- * We are going to alloc a page during init per window for small transfers.
- * Small transfers will go user space -> buffer -> VME. Larger (more than a
- * page) transfers will lock the user space buffer into memory and then
- * transfer the data directly from the user space buffers out to VME.
- */
static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
- ssize_t retval;
- ssize_t copied = 0;
+ if (count > image[minor].size_buf)
+ count = image[minor].size_buf;
- if (count <= image[minor].size_buf) {
- retval = __copy_from_user(image[minor].kern_buf, buf,
- (unsigned long)count);
- if (retval != 0)
- copied = (copied - retval);
- else
- copied = count;
-
- copied = vme_master_write(image[minor].resource,
- image[minor].kern_buf, copied, *ppos);
- } else {
- /* XXX Need to write this */
- pr_info("Currently don't support large transfers\n");
- /* Map in pages from userspace */
-
- /* Call vme_master_write to do the transfer */
- return -EINVAL;
- }
+ if (__copy_from_user(image[minor].kern_buf, buf, (unsigned long)count))
+ return -EFAULT;
- return copied;
+ return vme_master_write(image[minor].resource, image[minor].kern_buf,
+ count, *ppos);
}
static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
void *image_ptr;
- ssize_t retval;
image_ptr = image[minor].kern_buf + *ppos;
+ if (__copy_to_user(buf, image_ptr, (unsigned long)count))
+ return -EFAULT;
- retval = __copy_to_user(buf, image_ptr, (unsigned long)count);
- if (retval != 0) {
- retval = (count - retval);
- pr_warn("Partial copy to userspace\n");
- } else
- retval = count;
-
- /* Return number of bytes successfully read */
- return retval;
+ return count;
}
static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
void *image_ptr;
- size_t retval;
image_ptr = image[minor].kern_buf + *ppos;
+ if (__copy_from_user(image_ptr, buf, (unsigned long)count))
+ return -EFAULT;
- retval = __copy_from_user(image_ptr, buf, (unsigned long)count);
- if (retval != 0) {
- retval = (count - retval);
- pr_warn("Partial copy to userspace\n");
- } else
- retval = count;
-
- /* Return number of bytes successfully read */
- return retval;
+ return count;
}
static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
+ loff_t *ppos)
{
unsigned int minor = MINOR(file_inode(file)->i_rdev);
ssize_t retval;
size_t image_size;
- size_t okcount;
if (minor == CONTROL_MINOR)
return 0;
@@ -256,16 +199,14 @@ static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
/* Ensure not reading past end of the image */
if (*ppos + count > image_size)
- okcount = image_size - *ppos;
- else
- okcount = count;
+ count = image_size - *ppos;
switch (type[minor]) {
case MASTER_MINOR:
- retval = resource_to_user(minor, buf, okcount, ppos);
+ retval = resource_to_user(minor, buf, count, ppos);
break;
case SLAVE_MINOR:
- retval = buffer_to_user(minor, buf, okcount, ppos);
+ retval = buffer_to_user(minor, buf, count, ppos);
break;
default:
retval = -EINVAL;
@@ -279,12 +220,11 @@ static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
}
static ssize_t vme_user_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
unsigned int minor = MINOR(file_inode(file)->i_rdev);
ssize_t retval;
size_t image_size;
- size_t okcount;
if (minor == CONTROL_MINOR)
return 0;
@@ -301,16 +241,14 @@ static ssize_t vme_user_write(struct file *file, const char __user *buf,
/* Ensure not reading past end of the image */
if (*ppos + count > image_size)
- okcount = image_size - *ppos;
- else
- okcount = count;
+ count = image_size - *ppos;
switch (type[minor]) {
case MASTER_MINOR:
- retval = resource_from_user(minor, buf, okcount, ppos);
+ retval = resource_from_user(minor, buf, count, ppos);
break;
case SLAVE_MINOR:
- retval = buffer_from_user(minor, buf, okcount, ppos);
+ retval = buffer_from_user(minor, buf, count, ppos);
break;
default:
retval = -EINVAL;
@@ -354,7 +292,7 @@ static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
* already been defined.
*/
static int vme_user_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long arg)
{
struct vme_master master;
struct vme_slave slave;
@@ -390,12 +328,13 @@ static int vme_user_ioctl(struct inode *inode, struct file *file,
* to userspace as they are
*/
retval = vme_master_get(image[minor].resource,
- &master.enable, &master.vme_addr,
- &master.size, &master.aspace,
- &master.cycle, &master.dwidth);
+ &master.enable,
+ &master.vme_addr,
+ &master.size, &master.aspace,
+ &master.cycle, &master.dwidth);
copied = copy_to_user(argp, &master,
- sizeof(struct vme_master));
+ sizeof(struct vme_master));
if (copied != 0) {
pr_warn("Partial copy to userspace\n");
return -EFAULT;
@@ -435,12 +374,12 @@ static int vme_user_ioctl(struct inode *inode, struct file *file,
* to userspace as they are
*/
retval = vme_slave_get(image[minor].resource,
- &slave.enable, &slave.vme_addr,
- &slave.size, &pci_addr, &slave.aspace,
- &slave.cycle);
+ &slave.enable, &slave.vme_addr,
+ &slave.size, &pci_addr,
+ &slave.aspace, &slave.cycle);
copied = copy_to_user(argp, &slave,
- sizeof(struct vme_slave));
+ sizeof(struct vme_slave));
if (copied != 0) {
pr_warn("Partial copy to userspace\n");
return -EFAULT;
@@ -526,8 +465,8 @@ static int vme_user_master_mmap(unsigned int minor, struct vm_area_struct *vma)
return err;
}
- vma_priv = kmalloc(sizeof(struct vme_user_vma_priv), GFP_KERNEL);
- if (vma_priv == NULL) {
+ vma_priv = kmalloc(sizeof(*vma_priv), GFP_KERNEL);
+ if (!vma_priv) {
mutex_unlock(&image[minor].mutex);
return -ENOMEM;
}
@@ -588,7 +527,7 @@ static int vme_user_probe(struct vme_dev *vdev)
char *name;
/* Save pointer to the bridge device */
- if (vme_user_bridge != NULL) {
+ if (vme_user_bridge) {
dev_err(&vdev->dev, "Driver can only be loaded for 1 device\n");
err = -EINVAL;
goto err_dev;
@@ -606,7 +545,7 @@ static int vme_user_probe(struct vme_dev *vdev)
/* Assign major and minor numbers for the driver */
err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS,
- driver_name);
+ driver_name);
if (err) {
dev_warn(&vdev->dev, "Error getting Major Number %d for driver.\n",
VME_MAJOR);
@@ -622,10 +561,8 @@ static int vme_user_probe(struct vme_dev *vdev)
vme_user_cdev->ops = &vme_user_fops;
vme_user_cdev->owner = THIS_MODULE;
err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS);
- if (err) {
- dev_warn(&vdev->dev, "cdev_all failed\n");
+ if (err)
goto err_char;
- }
/* Request slave resources and allocate buffers (128kB wide) */
for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
@@ -636,7 +573,7 @@ static int vme_user_probe(struct vme_dev *vdev)
*/
image[i].resource = vme_slave_request(vme_user_bridge,
VME_A24, VME_SCT);
- if (image[i].resource == NULL) {
+ if (!image[i].resource) {
dev_warn(&vdev->dev,
"Unable to allocate slave resource\n");
err = -ENOMEM;
@@ -645,7 +582,7 @@ static int vme_user_probe(struct vme_dev *vdev)
image[i].size_buf = PCI_BUF_SIZE;
image[i].kern_buf = vme_alloc_consistent(image[i].resource,
image[i].size_buf, &image[i].pci_buf);
- if (image[i].kern_buf == NULL) {
+ if (!image[i].kern_buf) {
dev_warn(&vdev->dev,
"Unable to allocate memory for buffer\n");
image[i].pci_buf = 0;
@@ -663,7 +600,7 @@ static int vme_user_probe(struct vme_dev *vdev)
/* XXX Need to properly request attributes */
image[i].resource = vme_master_request(vme_user_bridge,
VME_A32, VME_SCT, VME_D32);
- if (image[i].resource == NULL) {
+ if (!image[i].resource) {
dev_warn(&vdev->dev,
"Unable to allocate master resource\n");
err = -ENOMEM;
@@ -671,7 +608,7 @@ static int vme_user_probe(struct vme_dev *vdev)
}
image[i].size_buf = PCI_BUF_SIZE;
image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
- if (image[i].kern_buf == NULL) {
+ if (!image[i].kern_buf) {
err = -ENOMEM;
vme_master_free(image[i].resource);
goto err_master;
@@ -835,7 +772,6 @@ static void __exit vme_user_exit(void)
vme_unregister_driver(&vme_user_driver);
}
-
MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
module_param_array(bus, int, &bus_num, 0);
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index b0ea38f1911c..9e61f2df3a00 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -1728,10 +1728,8 @@ BBuGetFrameTime(
unsigned int uRateIdx = (unsigned int) wRate;
unsigned int uRate = 0;
- if (uRateIdx > RATE_54M) {
- ASSERT(0);
+ if (uRateIdx > RATE_54M)
return 0;
- }
uRate = (unsigned int)awcFrameTime[uRateIdx];
@@ -1945,7 +1943,6 @@ bool BBbReadEmbedded(struct vnt_private *priv,
VNSvInPortB(dwIoBase + MAC_REG_BBREGDATA, pbyData);
if (ww == W_MAX_TIMEOUT) {
- DBG_PORT80(0x30);
pr_debug(" DBG_PORT80(0x30)\n");
return false;
}
@@ -1988,7 +1985,6 @@ bool BBbWriteEmbedded(struct vnt_private *priv,
}
if (ww == W_MAX_TIMEOUT) {
- DBG_PORT80(0x31);
pr_debug(" DBG_PORT80(0x31)\n");
return false;
}
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index e00c0605d154..c7b75dfc2d5f 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -514,7 +514,7 @@ CARDvSafeResetTx(
)
{
unsigned int uu;
- PSTxDesc pCurrTD;
+ struct vnt_tx_desc *pCurrTD;
/* initialize TD index */
pDevice->apTailTD[0] = pDevice->apCurrTD[0] = &(pDevice->apTD0Rings[0]);
@@ -525,12 +525,12 @@ CARDvSafeResetTx(
for (uu = 0; uu < pDevice->sOpts.nTxDescs[0]; uu++) {
pCurrTD = &(pDevice->apTD0Rings[uu]);
- pCurrTD->m_td0TD0.f1Owner = OWNED_BY_HOST;
+ pCurrTD->td0.owner = OWNED_BY_HOST;
/* init all Tx Packet pointer to NULL */
}
for (uu = 0; uu < pDevice->sOpts.nTxDescs[1]; uu++) {
pCurrTD = &(pDevice->apTD1Rings[uu]);
- pCurrTD->m_td0TD0.f1Owner = OWNED_BY_HOST;
+ pCurrTD->td0.owner = OWNED_BY_HOST;
/* init all Tx Packet pointer to NULL */
}
@@ -573,17 +573,17 @@ CARDvSafeResetRx(
/* init state, all RD is chip's */
for (uu = 0; uu < pDevice->sOpts.nRxDescs0; uu++) {
pDesc = &(pDevice->aRD0Ring[uu]);
- pDesc->m_rd0RD0.wResCount = (unsigned short)(pDevice->rx_buf_sz);
+ pDesc->m_rd0RD0.wResCount = cpu_to_le16(pDevice->rx_buf_sz);
pDesc->m_rd0RD0.f1Owner = OWNED_BY_NIC;
- pDesc->m_rd1RD1.wReqCount = (unsigned short)(pDevice->rx_buf_sz);
+ pDesc->m_rd1RD1.wReqCount = cpu_to_le16(pDevice->rx_buf_sz);
}
/* init state, all RD is chip's */
for (uu = 0; uu < pDevice->sOpts.nRxDescs1; uu++) {
pDesc = &(pDevice->aRD1Ring[uu]);
- pDesc->m_rd0RD0.wResCount = (unsigned short)(pDevice->rx_buf_sz);
+ pDesc->m_rd0RD0.wResCount = cpu_to_le16(pDevice->rx_buf_sz);
pDesc->m_rd0RD0.f1Owner = OWNED_BY_NIC;
- pDesc->m_rd1RD1.wReqCount = (unsigned short)(pDevice->rx_buf_sz);
+ pDesc->m_rd1RD1.wReqCount = cpu_to_le16(pDevice->rx_buf_sz);
}
/* set perPkt mode */
@@ -847,7 +847,6 @@ void CARDvSetLoopbackMode(struct vnt_private *priv, unsigned short wLoopbackMode
case CARD_LB_PHY:
break;
default:
- ASSERT(false);
break;
}
/* set MAC loopback */
diff --git a/drivers/staging/vt6655/desc.h b/drivers/staging/vt6655/desc.h
index 758eeb2afd51..3c9007e34c0f 100644
--- a/drivers/staging/vt6655/desc.h
+++ b/drivers/staging/vt6655/desc.h
@@ -170,13 +170,12 @@
typedef struct tagDEVICE_RD_INFO {
struct sk_buff *skb;
dma_addr_t skb_dma;
- dma_addr_t curr_desc;
} DEVICE_RD_INFO, *PDEVICE_RD_INFO;
#ifdef __BIG_ENDIAN
typedef struct tagRDES0 {
- volatile unsigned short wResCount;
+ volatile __le16 wResCount;
union {
volatile u16 f15Reserved;
struct {
@@ -191,7 +190,7 @@ SRDES0, *PSRDES0;
#else
typedef struct tagRDES0 {
- unsigned short wResCount;
+ __le16 wResCount;
unsigned short f15Reserved:15;
unsigned short f1Owner:1;
} __attribute__ ((__packed__))
@@ -200,7 +199,7 @@ SRDES0;
#endif
typedef struct tagRDES1 {
- unsigned short wReqCount;
+ __le16 wReqCount;
unsigned short wReserved;
} __attribute__ ((__packed__))
SRDES1;
@@ -209,93 +208,56 @@ SRDES1;
typedef struct tagSRxDesc {
volatile SRDES0 m_rd0RD0;
volatile SRDES1 m_rd1RD1;
- volatile u32 buff_addr;
- volatile u32 next_desc;
+ volatile __le32 buff_addr;
+ volatile __le32 next_desc;
struct tagSRxDesc *next __aligned(8);
volatile PDEVICE_RD_INFO pRDInfo __aligned(8);
} __attribute__ ((__packed__))
SRxDesc, *PSRxDesc;
typedef const SRxDesc *PCSRxDesc;
+struct vnt_tdes0 {
+ volatile u8 tsr0;
+ volatile u8 tsr1;
#ifdef __BIG_ENDIAN
-
-typedef struct tagTDES0 {
- volatile unsigned char byTSR0;
- volatile unsigned char byTSR1;
union {
- volatile u16 f15Txtime;
+ volatile u16 f15_txtime;
struct {
- volatile u8 f8Reserved1;
- volatile u8 f1Owner:1;
- volatile u8 f7Reserved:7;
- } __attribute__ ((__packed__));
- } __attribute__ ((__packed__));
-} __attribute__ ((__packed__))
-STDES0, PSTDES0;
-
+ volatile u8 f8_reserved;
+ volatile u8 owner:1;
+ volatile u8 f7_reserved:7;
+ } __packed;
+ } __packed;
#else
-
-typedef struct tagTDES0 {
- volatile unsigned char byTSR0;
- volatile unsigned char byTSR1;
- volatile unsigned short f15Txtime:15;
- volatile unsigned short f1Owner:1;
-} __attribute__ ((__packed__))
-STDES0;
-
+ volatile u16 f15_txtime:15;
+ volatile u16 owner:1;
#endif
+} __packed;
-typedef struct tagTDES1 {
- volatile unsigned short wReqCount;
- volatile unsigned char byTCR;
- volatile unsigned char byReserved;
-} __attribute__ ((__packed__))
-STDES1;
+struct vnt_tdes1 {
+ volatile __le16 req_count;
+ volatile u8 tcr;
+ volatile u8 reserved;
+} __packed;
-typedef struct tagDEVICE_TD_INFO {
+struct vnt_td_info {
void *mic_hdr;
struct sk_buff *skb;
unsigned char *buf;
- dma_addr_t skb_dma;
- dma_addr_t buf_dma;
- dma_addr_t curr_desc;
- unsigned long dwReqCount;
- unsigned long dwHeaderLength;
- unsigned char byFlags;
-} DEVICE_TD_INFO, *PDEVICE_TD_INFO;
+ dma_addr_t buf_dma;
+ u16 req_count;
+ u8 flags;
+};
/* transmit descriptor */
-typedef struct tagSTxDesc {
- volatile STDES0 m_td0TD0;
- volatile STDES1 m_td1TD1;
- volatile u32 buff_addr;
- volatile u32 next_desc;
- struct tagSTxDesc *next __aligned(8);
- volatile PDEVICE_TD_INFO pTDInfo __aligned(8);
-} __attribute__ ((__packed__))
-STxDesc, *PSTxDesc;
-typedef const STxDesc *PCSTxDesc;
-
-typedef struct tagSTxSyncDesc {
- volatile STDES0 m_td0TD0;
- volatile STDES1 m_td1TD1;
- volatile u32 buff_addr; /* pointer to logical buffer */
- volatile u32 next_desc; /* pointer to next logical descriptor */
- volatile unsigned short m_wFIFOCtl;
- volatile unsigned short m_wTimeStamp;
- struct tagSTxSyncDesc *next __aligned(8);
- volatile PDEVICE_TD_INFO pTDInfo __aligned(8);
-} __attribute__ ((__packed__))
-STxSyncDesc, *PSTxSyncDesc;
-typedef const STxSyncDesc *PCSTxSyncDesc;
-
-/* RsvTime buffer header */
-typedef struct tagSRrvTime_atim {
- unsigned short wCTSTxRrvTime_ba;
- unsigned short wTxRrvTime_a;
-} __attribute__ ((__packed__))
-SRrvTime_atim, *PSRrvTime_atim;
-typedef const SRrvTime_atim *PCSRrvTime_atim;
+struct vnt_tx_desc {
+ volatile struct vnt_tdes0 td0;
+ volatile struct vnt_tdes1 td1;
+ volatile __le32 buff_addr;
+ volatile __le32 next_desc;
+ struct vnt_tx_desc *next __aligned(8);
+ struct vnt_td_info *td_info __aligned(8);
+} __packed;
/* Length, Service, and Signal fields of Phy for Tx */
struct vnt_phy_field {
@@ -310,42 +272,4 @@ union vnt_phy_field_swap {
u32 field_write;
};
-/* Tx FIFO header */
-typedef struct tagSTxBufHead {
- u32 adwTxKey[4];
- unsigned short wFIFOCtl;
- unsigned short wTimeStamp;
- unsigned short wFragCtl;
- unsigned char byTxPower;
- unsigned char wReserved;
-} __attribute__ ((__packed__))
-STxBufHead, *PSTxBufHead;
-typedef const STxBufHead *PCSTxBufHead;
-
-typedef struct tagSBEACONCtl {
- u32 BufReady:1;
- u32 TSF:15;
- u32 BufLen:11;
- u32 Reserved:5;
-} __attribute__ ((__packed__))
-SBEACONCtl;
-
-typedef struct tagSSecretKey {
- u32 dwLowDword;
- unsigned char byHighByte;
-} __attribute__ ((__packed__))
-SSecretKey;
-
-typedef struct tagSKeyEntry {
- unsigned char abyAddrHi[2];
- unsigned short wKCTL;
- unsigned char abyAddrLo[4];
- u32 dwKey0[4];
- u32 dwKey1[4];
- u32 dwKey2[4];
- u32 dwKey3[4];
- u32 dwKey4[4];
-} __attribute__ ((__packed__))
-SKeyEntry;
-
#endif /* __DESC_H__ */
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index 5cf1b337cba7..c9fa6ef42d34 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -252,11 +252,11 @@ struct vnt_private {
int nTxQueues;
volatile int iTDUsed[TYPE_MAXTD];
- volatile PSTxDesc apCurrTD[TYPE_MAXTD];
- volatile PSTxDesc apTailTD[TYPE_MAXTD];
+ struct vnt_tx_desc *apCurrTD[TYPE_MAXTD];
+ struct vnt_tx_desc *apTailTD[TYPE_MAXTD];
- volatile PSTxDesc apTD0Rings;
- volatile PSTxDesc apTD1Rings;
+ struct vnt_tx_desc *apTD0Rings;
+ struct vnt_tx_desc *apTD1Rings;
volatile PSRxDesc aRD0Ring;
volatile PSRxDesc aRD1Ring;
@@ -403,6 +403,7 @@ struct vnt_private {
unsigned char abyEEPROM[EEP_MAX_CONTEXT_SIZE]; /* unsigned long alignment */
unsigned short wBeaconInterval;
+ u16 wake_up_count;
struct work_struct interrupt_work;
@@ -414,8 +415,8 @@ static inline PDEVICE_RD_INFO alloc_rd_info(void)
return kzalloc(sizeof(DEVICE_RD_INFO), GFP_ATOMIC);
}
-static inline PDEVICE_TD_INFO alloc_td_info(void)
+static inline struct vnt_td_info *alloc_td_info(void)
{
- return kzalloc(sizeof(DEVICE_TD_INFO), GFP_ATOMIC);
+ return kzalloc(sizeof(struct vnt_td_info), GFP_ATOMIC);
}
#endif
diff --git a/drivers/staging/vt6655/device_cfg.h b/drivers/staging/vt6655/device_cfg.h
index a4a8a8489e0b..b4c9547d3138 100644
--- a/drivers/staging/vt6655/device_cfg.h
+++ b/drivers/staging/vt6655/device_cfg.h
@@ -69,19 +69,4 @@ typedef enum _chip_type {
VT3253 = 1
} CHIP_TYPE, *PCHIP_TYPE;
-#ifdef VIAWET_DEBUG
-#define ASSERT(x) \
-do { \
- if (!(x)) { \
- pr_err("assertion %s failed: file %s line %d\n", \
- #x, __func__, __LINE__); \
- *(int *)0 = 0; \
- } \
-} while (0)
-#define DBG_PORT80(value) outb(value, 0x80)
-#else
-#define ASSERT(x)
-#define DBG_PORT80(value)
-#endif
-
#endif
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index ed040fbb7df8..0d8f123c57fe 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -157,7 +157,7 @@ static int device_rx_srv(struct vnt_private *pDevice, unsigned int uIdx);
static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx);
static bool device_alloc_rx_buf(struct vnt_private *pDevice, PSRxDesc pDesc);
static void device_init_registers(struct vnt_private *pDevice);
-static void device_free_tx_buf(struct vnt_private *pDevice, PSTxDesc pDesc);
+static void device_free_tx_buf(struct vnt_private *, struct vnt_tx_desc *);
static void device_free_td0_ring(struct vnt_private *pDevice);
static void device_free_td1_ring(struct vnt_private *pDevice);
static void device_free_rd0_ring(struct vnt_private *pDevice);
@@ -522,8 +522,8 @@ static bool device_init_rings(struct vnt_private *pDevice)
vir_pool = dma_zalloc_coherent(&pDevice->pcid->dev,
pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) +
pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) +
- pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) +
- pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc),
+ pDevice->sOpts.nTxDescs[0] * sizeof(struct vnt_tx_desc) +
+ pDevice->sOpts.nTxDescs[1] * sizeof(struct vnt_tx_desc),
&pDevice->pool_dma, GFP_ATOMIC);
if (vir_pool == NULL) {
dev_err(&pDevice->pcid->dev, "allocate desc dma memory failed\n");
@@ -551,8 +551,8 @@ static bool device_init_rings(struct vnt_private *pDevice)
dma_free_coherent(&pDevice->pcid->dev,
pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) +
pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) +
- pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) +
- pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc),
+ pDevice->sOpts.nTxDescs[0] * sizeof(struct vnt_tx_desc) +
+ pDevice->sOpts.nTxDescs[1] * sizeof(struct vnt_tx_desc),
vir_pool, pDevice->pool_dma
);
return false;
@@ -562,7 +562,7 @@ static bool device_init_rings(struct vnt_private *pDevice)
pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc);
pDevice->td1_pool_dma = pDevice->td0_pool_dma +
- pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc);
+ pDevice->sOpts.nTxDescs[0] * sizeof(struct vnt_tx_desc);
/* vir_pool: pvoid type */
pDevice->apTD0Rings = vir_pool
@@ -572,7 +572,7 @@ static bool device_init_rings(struct vnt_private *pDevice)
pDevice->apTD1Rings = vir_pool
+ pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc)
+ pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc)
- + pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc);
+ + pDevice->sOpts.nTxDescs[0] * sizeof(struct vnt_tx_desc);
pDevice->tx1_bufs = pDevice->tx0_bufs +
pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ;
@@ -597,8 +597,8 @@ static void device_free_rings(struct vnt_private *pDevice)
dma_free_coherent(&pDevice->pcid->dev,
pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) +
pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) +
- pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) +
- pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc)
+ pDevice->sOpts.nTxDescs[0] * sizeof(struct vnt_tx_desc) +
+ pDevice->sOpts.nTxDescs[1] * sizeof(struct vnt_tx_desc)
,
pDevice->aRD0Ring, pDevice->pool_dma
);
@@ -623,12 +623,11 @@ static void device_init_rd0_ring(struct vnt_private *pDevice)
for (i = 0; i < pDevice->sOpts.nRxDescs0; i ++, curr += sizeof(SRxDesc)) {
pDesc = &(pDevice->aRD0Ring[i]);
pDesc->pRDInfo = alloc_rd_info();
- ASSERT(pDesc->pRDInfo);
+
if (!device_alloc_rx_buf(pDevice, pDesc))
dev_err(&pDevice->pcid->dev, "can not alloc rx bufs\n");
pDesc->next = &(pDevice->aRD0Ring[(i+1) % pDevice->sOpts.nRxDescs0]);
- pDesc->pRDInfo->curr_desc = cpu_to_le32(curr);
pDesc->next_desc = cpu_to_le32(curr + sizeof(SRxDesc));
}
@@ -647,12 +646,11 @@ static void device_init_rd1_ring(struct vnt_private *pDevice)
for (i = 0; i < pDevice->sOpts.nRxDescs1; i ++, curr += sizeof(SRxDesc)) {
pDesc = &(pDevice->aRD1Ring[i]);
pDesc->pRDInfo = alloc_rd_info();
- ASSERT(pDesc->pRDInfo);
+
if (!device_alloc_rx_buf(pDevice, pDesc))
dev_err(&pDevice->pcid->dev, "can not alloc rx bufs\n");
pDesc->next = &(pDevice->aRD1Ring[(i+1) % pDevice->sOpts.nRxDescs1]);
- pDesc->pRDInfo->curr_desc = cpu_to_le32(curr);
pDesc->next_desc = cpu_to_le32(curr + sizeof(SRxDesc));
}
@@ -699,20 +697,20 @@ static void device_init_td0_ring(struct vnt_private *pDevice)
{
int i;
dma_addr_t curr;
- PSTxDesc pDesc;
+ struct vnt_tx_desc *pDesc;
curr = pDevice->td0_pool_dma;
- for (i = 0; i < pDevice->sOpts.nTxDescs[0]; i++, curr += sizeof(STxDesc)) {
+ for (i = 0; i < pDevice->sOpts.nTxDescs[0];
+ i++, curr += sizeof(struct vnt_tx_desc)) {
pDesc = &(pDevice->apTD0Rings[i]);
- pDesc->pTDInfo = alloc_td_info();
- ASSERT(pDesc->pTDInfo);
+ pDesc->td_info = alloc_td_info();
+
if (pDevice->flags & DEVICE_FLAGS_TX_ALIGN) {
- pDesc->pTDInfo->buf = pDevice->tx0_bufs + (i)*PKT_BUF_SZ;
- pDesc->pTDInfo->buf_dma = pDevice->tx_bufs_dma0 + (i)*PKT_BUF_SZ;
+ pDesc->td_info->buf = pDevice->tx0_bufs + (i)*PKT_BUF_SZ;
+ pDesc->td_info->buf_dma = pDevice->tx_bufs_dma0 + (i)*PKT_BUF_SZ;
}
pDesc->next = &(pDevice->apTD0Rings[(i+1) % pDevice->sOpts.nTxDescs[0]]);
- pDesc->pTDInfo->curr_desc = cpu_to_le32(curr);
- pDesc->next_desc = cpu_to_le32(curr+sizeof(STxDesc));
+ pDesc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc));
}
if (i > 0)
@@ -724,21 +722,21 @@ static void device_init_td1_ring(struct vnt_private *pDevice)
{
int i;
dma_addr_t curr;
- PSTxDesc pDesc;
+ struct vnt_tx_desc *pDesc;
/* Init the TD ring entries */
curr = pDevice->td1_pool_dma;
- for (i = 0; i < pDevice->sOpts.nTxDescs[1]; i++, curr += sizeof(STxDesc)) {
+ for (i = 0; i < pDevice->sOpts.nTxDescs[1];
+ i++, curr += sizeof(struct vnt_tx_desc)) {
pDesc = &(pDevice->apTD1Rings[i]);
- pDesc->pTDInfo = alloc_td_info();
- ASSERT(pDesc->pTDInfo);
+ pDesc->td_info = alloc_td_info();
+
if (pDevice->flags & DEVICE_FLAGS_TX_ALIGN) {
- pDesc->pTDInfo->buf = pDevice->tx1_bufs + (i) * PKT_BUF_SZ;
- pDesc->pTDInfo->buf_dma = pDevice->tx_bufs_dma1 + (i) * PKT_BUF_SZ;
+ pDesc->td_info->buf = pDevice->tx1_bufs + (i) * PKT_BUF_SZ;
+ pDesc->td_info->buf_dma = pDevice->tx_bufs_dma1 + (i) * PKT_BUF_SZ;
}
pDesc->next = &(pDevice->apTD1Rings[(i + 1) % pDevice->sOpts.nTxDescs[1]]);
- pDesc->pTDInfo->curr_desc = cpu_to_le32(curr);
- pDesc->next_desc = cpu_to_le32(curr+sizeof(STxDesc));
+ pDesc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc));
}
if (i > 0)
@@ -751,17 +749,11 @@ static void device_free_td0_ring(struct vnt_private *pDevice)
int i;
for (i = 0; i < pDevice->sOpts.nTxDescs[0]; i++) {
- PSTxDesc pDesc = &(pDevice->apTD0Rings[i]);
- PDEVICE_TD_INFO pTDInfo = pDesc->pTDInfo;
+ struct vnt_tx_desc *pDesc = &pDevice->apTD0Rings[i];
+ struct vnt_td_info *pTDInfo = pDesc->td_info;
- if (pTDInfo->skb_dma && (pTDInfo->skb_dma != pTDInfo->buf_dma))
- dma_unmap_single(&pDevice->pcid->dev, pTDInfo->skb_dma,
- pTDInfo->skb->len, DMA_TO_DEVICE);
-
- if (pTDInfo->skb)
- dev_kfree_skb(pTDInfo->skb);
-
- kfree(pDesc->pTDInfo);
+ dev_kfree_skb(pTDInfo->skb);
+ kfree(pDesc->td_info);
}
}
@@ -770,17 +762,11 @@ static void device_free_td1_ring(struct vnt_private *pDevice)
int i;
for (i = 0; i < pDevice->sOpts.nTxDescs[1]; i++) {
- PSTxDesc pDesc = &(pDevice->apTD1Rings[i]);
- PDEVICE_TD_INFO pTDInfo = pDesc->pTDInfo;
-
- if (pTDInfo->skb_dma && (pTDInfo->skb_dma != pTDInfo->buf_dma))
- dma_unmap_single(&pDevice->pcid->dev, pTDInfo->skb_dma,
- pTDInfo->skb->len, DMA_TO_DEVICE);
-
- if (pTDInfo->skb)
- dev_kfree_skb(pTDInfo->skb);
+ struct vnt_tx_desc *pDesc = &pDevice->apTD1Rings[i];
+ struct vnt_td_info *pTDInfo = pDesc->td_info;
- kfree(pDesc->pTDInfo);
+ dev_kfree_skb(pTDInfo->skb);
+ kfree(pDesc->td_info);
}
}
@@ -822,7 +808,6 @@ static bool device_alloc_rx_buf(struct vnt_private *pDevice, PSRxDesc pRD)
pRDInfo->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
if (pRDInfo->skb == NULL)
return false;
- ASSERT(pRDInfo->skb);
pRDInfo->skb_dma =
dma_map_single(&pDevice->pcid->dev,
@@ -856,7 +841,7 @@ static const u8 fallback_rate1[5][5] = {
};
static int vnt_int_report_rate(struct vnt_private *priv,
- PDEVICE_TD_INFO context, u8 tsr0, u8 tsr1)
+ struct vnt_td_info *context, u8 tsr0, u8 tsr1)
{
struct vnt_tx_fifo_head *fifo_head;
struct ieee80211_tx_info *info;
@@ -917,23 +902,23 @@ static int vnt_int_report_rate(struct vnt_private *priv,
static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
{
- PSTxDesc pTD;
+ struct vnt_tx_desc *pTD;
int works = 0;
unsigned char byTsr0;
unsigned char byTsr1;
for (pTD = pDevice->apTailTD[uIdx]; pDevice->iTDUsed[uIdx] > 0; pTD = pTD->next) {
- if (pTD->m_td0TD0.f1Owner == OWNED_BY_NIC)
+ if (pTD->td0.owner == OWNED_BY_NIC)
break;
if (works++ > 15)
break;
- byTsr0 = pTD->m_td0TD0.byTSR0;
- byTsr1 = pTD->m_td0TD0.byTSR1;
+ byTsr0 = pTD->td0.tsr0;
+ byTsr1 = pTD->td0.tsr1;
/* Only the status of first TD in the chain is correct */
- if (pTD->m_td1TD1.byTCR & TCR_STP) {
- if ((pTD->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) != 0) {
+ if (pTD->td1.tcr & TCR_STP) {
+ if ((pTD->td_info->flags & TD_FLAGS_NETIF_SKB) != 0) {
if (!(byTsr1 & TSR1_TERR)) {
if (byTsr0 != 0) {
pr_debug(" Tx[%d] OK but has error. tsr1[%02X] tsr0[%02X]\n",
@@ -947,13 +932,13 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
}
if (byTsr1 & TSR1_TERR) {
- if ((pTD->pTDInfo->byFlags & TD_FLAGS_PRIV_SKB) != 0) {
+ if ((pTD->td_info->flags & TD_FLAGS_PRIV_SKB) != 0) {
pr_debug(" Tx[%d] fail has error. tsr1[%02X] tsr0[%02X]\n",
(int)uIdx, byTsr1, byTsr0);
}
}
- vnt_int_report_rate(pDevice, pTD->pTDInfo, byTsr0, byTsr1);
+ vnt_int_report_rate(pDevice, pTD->td_info, byTsr0, byTsr1);
device_free_tx_buf(pDevice, pTD);
pDevice->iTDUsed[uIdx]--;
@@ -975,23 +960,17 @@ static void device_error(struct vnt_private *pDevice, unsigned short status)
}
}
-static void device_free_tx_buf(struct vnt_private *pDevice, PSTxDesc pDesc)
+static void device_free_tx_buf(struct vnt_private *pDevice,
+ struct vnt_tx_desc *pDesc)
{
- PDEVICE_TD_INFO pTDInfo = pDesc->pTDInfo;
+ struct vnt_td_info *pTDInfo = pDesc->td_info;
struct sk_buff *skb = pTDInfo->skb;
- /* pre-allocated buf_dma can't be unmapped. */
- if (pTDInfo->skb_dma && (pTDInfo->skb_dma != pTDInfo->buf_dma)) {
- dma_unmap_single(&pDevice->pcid->dev, pTDInfo->skb_dma,
- skb->len, DMA_TO_DEVICE);
- }
-
if (skb)
ieee80211_tx_status_irqsafe(pDevice->hw, skb);
- pTDInfo->skb_dma = 0;
pTDInfo->skb = NULL;
- pTDInfo->byFlags = 0;
+ pTDInfo->flags = 0;
}
static void vnt_check_bb_vga(struct vnt_private *priv)
@@ -1180,7 +1159,7 @@ static irqreturn_t vnt_interrupt(int irq, void *arg)
static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- PSTxDesc head_td;
+ struct vnt_tx_desc *head_td;
u32 dma_idx;
unsigned long flags;
@@ -1198,12 +1177,12 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
head_td = priv->apCurrTD[dma_idx];
- head_td->m_td1TD1.byTCR = 0;
+ head_td->td1.tcr = 0;
- head_td->pTDInfo->skb = skb;
+ head_td->td_info->skb = skb;
if (dma_idx == TYPE_AC0DMA)
- head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
+ head_td->td_info->flags = TD_FLAGS_NETIF_SKB;
priv->apCurrTD[dma_idx] = head_td->next;
@@ -1211,26 +1190,22 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
vnt_generate_fifo_header(priv, dma_idx, head_td, skb);
- if (MACbIsRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_PS))
- MACbPSWakeup(priv->PortOffset);
-
spin_lock_irqsave(&priv->lock, flags);
priv->bPWBitOn = false;
/* Set TSR1 & ReqCount in TxDescHead */
- head_td->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
- head_td->m_td1TD1.wReqCount =
- cpu_to_le16((u16)head_td->pTDInfo->dwReqCount);
+ head_td->td1.tcr |= (TCR_STP | TCR_EDP | EDMSDU);
+ head_td->td1.req_count = cpu_to_le16(head_td->td_info->req_count);
- head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma);
+ head_td->buff_addr = cpu_to_le32(head_td->td_info->buf_dma);
/* Poll Transmit the adapter */
wmb();
- head_td->m_td0TD0.f1Owner = OWNED_BY_NIC;
+ head_td->td0.owner = OWNED_BY_NIC;
wmb(); /* second memory barrier */
- if (head_td->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)
+ if (head_td->td_info->flags & TD_FLAGS_NETIF_SKB)
MACvTransmitAC0(priv->PortOffset);
else
MACvTransmit0(priv->PortOffset);
@@ -1418,7 +1393,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
priv->current_aid = conf->aid;
- if (changed & BSS_CHANGED_BSSID) {
+ if (changed & BSS_CHANGED_BSSID && conf->bssid) {
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
@@ -1483,8 +1458,9 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
}
}
- if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) {
- if (conf->assoc) {
+ if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
+ priv->op_mode != NL80211_IFTYPE_AP) {
+ if (conf->assoc && conf->beacon_rate) {
CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
conf->sync_tsf);
@@ -1774,6 +1750,12 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
return -ENODEV;
}
+ if (dma_set_mask(&pcid->dev, DMA_BIT_MASK(32))) {
+ dev_err(&pcid->dev, ": Failed to set dma 32 bit mask\n");
+ device_free_info(priv);
+ return -ENODEV;
+ }
+
INIT_WORK(&priv->interrupt_work, vnt_interrupt_work);
/* do reset */
@@ -1811,6 +1793,7 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
ieee80211_hw_set(priv->hw, SIGNAL_DBM);
ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
ieee80211_hw_set(priv->hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(priv->hw, SUPPORTS_PS);
priv->hw->max_signal = 100;
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index b25ee962558d..e14eed160a19 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -144,7 +144,7 @@ bool vnt_receive_frame(struct vnt_private *priv, PSRxDesc curr_rd)
priv->rx_buf_sz, DMA_FROM_DEVICE);
frame_size = le16_to_cpu(curr_rd->m_rd1RD1.wReqCount)
- - cpu_to_le16(curr_rd->m_rd0RD0.wResCount);
+ - le16_to_cpu(curr_rd->m_rd0RD0.wResCount);
if ((frame_size > 2364) || (frame_size < 33)) {
/* Frame Size error drop this packet.*/
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index aed530f022b8..3dfd333475c0 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -186,7 +186,6 @@ void MACvSetLoopbackMode(void __iomem *dwIoBase, unsigned char byLoopbackMode)
{
unsigned char byOrgValue;
- ASSERT(byLoopbackMode < 3);
byLoopbackMode <<= 6;
/* set TCR */
VNSvInPortB(dwIoBase + MAC_REG_TEST, &byOrgValue);
@@ -374,7 +373,6 @@ bool MACbSafeRxOff(void __iomem *dwIoBase)
break;
}
if (ww == W_MAX_TIMEOUT) {
- DBG_PORT80(0x10);
pr_debug(" DBG_PORT80(0x10)\n");
return false;
}
@@ -384,7 +382,6 @@ bool MACbSafeRxOff(void __iomem *dwIoBase)
break;
}
if (ww == W_MAX_TIMEOUT) {
- DBG_PORT80(0x11);
pr_debug(" DBG_PORT80(0x11)\n");
return false;
}
@@ -398,7 +395,6 @@ bool MACbSafeRxOff(void __iomem *dwIoBase)
break;
}
if (ww == W_MAX_TIMEOUT) {
- DBG_PORT80(0x12);
pr_debug(" DBG_PORT80(0x12)\n");
return false;
}
@@ -436,7 +432,6 @@ bool MACbSafeTxOff(void __iomem *dwIoBase)
break;
}
if (ww == W_MAX_TIMEOUT) {
- DBG_PORT80(0x20);
pr_debug(" DBG_PORT80(0x20)\n");
return false;
}
@@ -446,7 +441,6 @@ bool MACbSafeTxOff(void __iomem *dwIoBase)
break;
}
if (ww == W_MAX_TIMEOUT) {
- DBG_PORT80(0x21);
pr_debug(" DBG_PORT80(0x21)\n");
return false;
}
@@ -461,7 +455,6 @@ bool MACbSafeTxOff(void __iomem *dwIoBase)
break;
}
if (ww == W_MAX_TIMEOUT) {
- DBG_PORT80(0x24);
pr_debug(" DBG_PORT80(0x24)\n");
return false;
}
@@ -486,13 +479,11 @@ bool MACbSafeStop(void __iomem *dwIoBase)
MACvRegBitsOff(dwIoBase, MAC_REG_TCR, TCR_AUTOBCNTX);
if (!MACbSafeRxOff(dwIoBase)) {
- DBG_PORT80(0xA1);
pr_debug(" MACbSafeRxOff == false)\n");
MACbSafeSoftwareReset(dwIoBase);
return false;
}
if (!MACbSafeTxOff(dwIoBase)) {
- DBG_PORT80(0xA2);
pr_debug(" MACbSafeTxOff == false)\n");
MACbSafeSoftwareReset(dwIoBase);
return false;
@@ -590,9 +581,6 @@ void MACvSetCurrRx0DescAddr(void __iomem *dwIoBase, unsigned long dwCurrDescAddr
break;
}
- if (ww == W_MAX_TIMEOUT)
- DBG_PORT80(0x13);
-
VNSvOutPortD(dwIoBase + MAC_REG_RXDMAPTR0, dwCurrDescAddr);
if (byOrgDMACtl & DMACTL_RUN)
VNSvOutPortB(dwIoBase + MAC_REG_RXDMACTL0, DMACTL_RUN);
@@ -627,8 +615,6 @@ void MACvSetCurrRx1DescAddr(void __iomem *dwIoBase, unsigned long dwCurrDescAddr
if (!(byData & DMACTL_RUN))
break;
}
- if (ww == W_MAX_TIMEOUT)
- DBG_PORT80(0x14);
VNSvOutPortD(dwIoBase + MAC_REG_RXDMAPTR1, dwCurrDescAddr);
if (byOrgDMACtl & DMACTL_RUN)
@@ -666,8 +652,6 @@ void MACvSetCurrTx0DescAddrEx(void __iomem *dwIoBase,
if (!(byData & DMACTL_RUN))
break;
}
- if (ww == W_MAX_TIMEOUT)
- DBG_PORT80(0x25);
VNSvOutPortD(dwIoBase + MAC_REG_TXDMAPTR0, dwCurrDescAddr);
if (byOrgDMACtl & DMACTL_RUN)
@@ -706,7 +690,6 @@ void MACvSetCurrAC0DescAddrEx(void __iomem *dwIoBase,
break;
}
if (ww == W_MAX_TIMEOUT) {
- DBG_PORT80(0x26);
pr_debug(" DBG_PORT80(0x26)\n");
}
VNSvOutPortD(dwIoBase + MAC_REG_AC0DMAPTR, dwCurrDescAddr);
@@ -807,7 +790,6 @@ bool MACbPSWakeup(void __iomem *dwIoBase)
break;
}
if (ww == W_MAX_TIMEOUT) {
- DBG_PORT80(0x36);
pr_debug(" DBG_PORT80(0x33)\n");
return false;
}
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index be3c4e949b6a..06e6b9d871c4 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -157,10 +157,18 @@ PSbIsNextTBTTWakeUp(
struct ieee80211_conf *conf = &hw->conf;
bool bWakeUp = false;
- if (conf->listen_interval == 1) {
- /* Turn on wake up to listen next beacon */
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN);
- bWakeUp = true;
+ if (conf->listen_interval > 1) {
+ if (!pDevice->wake_up_count)
+ pDevice->wake_up_count = conf->listen_interval;
+
+ --pDevice->wake_up_count;
+
+ if (pDevice->wake_up_count == 1) {
+ /* Turn on wake up to listen next beacon */
+ MACvRegBitsOn(pDevice->PortOffset,
+ MAC_REG_PSCTL, PSCTL_LNBCN);
+ bWakeUp = true;
+ }
}
return bWakeUp;
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 7626f635f160..c537321444be 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -39,66 +39,66 @@
#include "rf.h"
#include "baseband.h"
-#define BY_AL2230_REG_LEN 23 //24bit
+#define BY_AL2230_REG_LEN 23 /* 24bit */
#define CB_AL2230_INIT_SEQ 15
-#define SWITCH_CHANNEL_DELAY_AL2230 200 //us
+#define SWITCH_CHANNEL_DELAY_AL2230 200 /* us */
#define AL2230_PWR_IDX_LEN 64
-#define BY_AL7230_REG_LEN 23 //24bit
+#define BY_AL7230_REG_LEN 23 /* 24bit */
#define CB_AL7230_INIT_SEQ 16
-#define SWITCH_CHANNEL_DELAY_AL7230 200 //us
+#define SWITCH_CHANNEL_DELAY_AL7230 200 /* us */
#define AL7230_PWR_IDX_LEN 64
static const unsigned long dwAL2230InitTable[CB_AL2230_INIT_SEQ] = {
- 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
- 0x01A00200+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
- 0x00FFF300+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
- 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
- 0x0F4DC500+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
- 0x0805B600+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
- 0x0146C700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
- 0x00068800+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
- 0x0403B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
- 0x00DBBA00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
- 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
+ 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+ 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+ 0x01A00200+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+ 0x00FFF300+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+ 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+ 0x0F4DC500+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+ 0x0805B600+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+ 0x0146C700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+ 0x00068800+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+ 0x0403B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+ 0x00DBBA00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+ 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
0x0BDFFC00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
0x00000D00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
0x00580F00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW
};
static const unsigned long dwAL2230ChannelTable0[CB_MAX_CHANNEL] = {
- 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
- 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
- 0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
- 0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 4, Tf = 2427MHz
- 0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 5, Tf = 2432MHz
- 0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 6, Tf = 2437MHz
- 0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 2442MHz
- 0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 2447MHz
- 0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 2452MHz
- 0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 10, Tf = 2457MHz
- 0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 2462MHz
- 0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 2467MHz
- 0x03F7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 13, Tf = 2472MHz
- 0x03E7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW // channel = 14, Tf = 2412M
+ 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
+ 0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
+ 0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
+ 0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
+ 0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
+ 0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
+ 0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
+ 0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
+ 0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
+ 0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
+ 0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
+ 0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
+ 0x03F7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
+ 0x03E7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 14, Tf = 2412M */
};
static const unsigned long dwAL2230ChannelTable1[CB_MAX_CHANNEL] = {
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
- 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
- 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 4, Tf = 2427MHz
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 5, Tf = 2432MHz
- 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 6, Tf = 2437MHz
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 2442MHz
- 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 2447MHz
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 2452MHz
- 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 10, Tf = 2457MHz
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 2462MHz
- 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 2467MHz
- 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 13, Tf = 2472MHz
- 0x06666100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW // channel = 14, Tf = 2412M
+ 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
+ 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
+ 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
+ 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
+ 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
+ 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
+ 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
+ 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
+ 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
+ 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
+ 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
+ 0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
+ 0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
+ 0x06666100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 14, Tf = 2412M */
};
static unsigned long dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
@@ -168,240 +168,240 @@ static unsigned long dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
0x0407F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW
};
-// 40MHz reference frequency
-// Need to Pull PLLON(PE3) low when writing channel registers through 3-wire.
+/* 40MHz reference frequency
+ * Need to Pull PLLON(PE3) low when writing channel registers through 3-wire.*/
static const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = {
- 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel1 // Need modify for 11a
- 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel1 // Need modify for 11a
- 0x841FF200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 451FE2
- 0x3FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 5FDFA3
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // 11b/g // Need modify for 11a
- // RoberYu:20050113, Rev0.47 Regsiter Setting Guide
- 0x802B5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 8D1B55
+ 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
+ 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
+ 0x841FF200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 451FE2 */
+ 0x3FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 5FDFA3 */
+ 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* 11b/g // Need modify for 11a */
+ /* RoberYu:20050113, Rev0.47 Regsiter Setting Guide */
+ 0x802B5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 8D1B55 */
0x56AF3600+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 860207
+ 0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 860207 */
0x6EBC0800+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
0x221BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0xE0000A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: E0600A
- 0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10)
- // RoberYu:20050113, Rev0.47 Regsiter Setting Guide
- 0x000A3C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 00143C
+ 0xE0000A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: E0600A */
+ 0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */
+ /* RoberYu:20050113, Rev0.47 Regsiter Setting Guide */
+ 0x000A3C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 00143C */
0xFFFFFD00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
0x00000E00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x1ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // Need modify for 11a: 12BACF
+ 0x1ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* Need modify for 11a: 12BACF */
};
static const unsigned long dwAL7230InitTableAMode[CB_AL7230_INIT_SEQ] = {
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel184 // Need modify for 11b/g
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel184 // Need modify for 11b/g
- 0x451FE200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g
- 0x5FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g
- 0x67F78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // 11a // Need modify for 11b/g
- 0x853F5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g, RoberYu:20050113
+ 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */
+ 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */
+ 0x451FE200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
+ 0x5FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
+ 0x67F78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* 11a // Need modify for 11b/g */
+ 0x853F5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g, RoberYu:20050113 */
0x56AF3600+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g
+ 0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
0x6EBC0800+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
0x221BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0xE0600A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g
- 0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10)
- 0x00147C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g
+ 0xE0600A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
+ 0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */
+ 0x00147C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
0xFFFFFD00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
0x00000E00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
- 0x12BACF00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // Need modify for 11b/g
+ 0x12BACF00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* Need modify for 11b/g */
};
static const unsigned long dwAL7230ChannelTable0[CB_MAX_CHANNEL] = {
- 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
- 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
- 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
- 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 4, Tf = 2427MHz
- 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 5, Tf = 2432MHz
- 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 6, Tf = 2437MHz
- 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 2442MHz
- 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 2447MHz //RobertYu: 20050218, update for APNode 0.49
- 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 2452MHz //RobertYu: 20050218, update for APNode 0.49
- 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 10, Tf = 2457MHz //RobertYu: 20050218, update for APNode 0.49
- 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 2462MHz //RobertYu: 20050218, update for APNode 0.49
- 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 2467MHz //RobertYu: 20050218, update for APNode 0.49
- 0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 13, Tf = 2472MHz //RobertYu: 20050218, update for APNode 0.49
- 0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 14, Tf = 2484MHz
-
- // 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22)
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 183, Tf = 4915MHz (15)
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 184, Tf = 4920MHz (16)
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 185, Tf = 4925MHz (17)
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 187, Tf = 4935MHz (18)
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 188, Tf = 4940MHz (19)
- 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 189, Tf = 4945MHz (20)
- 0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 192, Tf = 4960MHz (21)
- 0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 196, Tf = 4980MHz (22)
-
- // 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
- // 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56)
-
- 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 5035MHz (23)
- 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 5040MHz (24)
- 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 5045MHz (25)
- 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 5055MHz (26)
- 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 5060MHz (27)
- 0x0FF55000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 16, Tf = 5080MHz (28)
- 0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 34, Tf = 5170MHz (29)
- 0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 36, Tf = 5180MHz (30)
- 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 38, Tf = 5190MHz (31) //RobertYu: 20050218, update for APNode 0.49
- 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 40, Tf = 5200MHz (32)
- 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 42, Tf = 5210MHz (33)
- 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 44, Tf = 5220MHz (34)
- 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 46, Tf = 5230MHz (35)
- 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 48, Tf = 5240MHz (36)
- 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 52, Tf = 5260MHz (37)
- 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 56, Tf = 5280MHz (38)
- 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 60, Tf = 5300MHz (39)
- 0x0FF59000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 64, Tf = 5320MHz (40)
-
- 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 100, Tf = 5500MHz (41)
- 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 104, Tf = 5520MHz (42)
- 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 108, Tf = 5540MHz (43)
- 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 112, Tf = 5560MHz (44)
- 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 116, Tf = 5580MHz (45)
- 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 120, Tf = 5600MHz (46)
- 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 124, Tf = 5620MHz (47)
- 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 128, Tf = 5640MHz (48)
- 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 132, Tf = 5660MHz (49)
- 0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 136, Tf = 5680MHz (50)
- 0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 140, Tf = 5700MHz (51)
- 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 149, Tf = 5745MHz (52)
- 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 153, Tf = 5765MHz (53)
- 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 157, Tf = 5785MHz (54)
- 0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 161, Tf = 5805MHz (55)
- 0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // channel = 165, Tf = 5825MHz (56)
+ 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
+ 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
+ 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
+ 0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
+ 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
+ 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
+ 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
+ 0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
+
+ /* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) */
+ 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
+ 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
+ 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
+ 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
+ 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
+ 0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
+ 0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
+ 0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
+
+ /* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
+ * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) */
+
+ 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */
+ 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */
+ 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */
+ 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */
+ 0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */
+ 0x0FF55000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */
+ 0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */
+ 0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */
+ 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) //RobertYu: 20050218, update for APNode 0.49 */
+ 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */
+ 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */
+ 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */
+ 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */
+ 0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */
+ 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */
+ 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */
+ 0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */
+ 0x0FF59000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */
+
+ 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
+ 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
+ 0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
+ 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
+ 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
+ 0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
+ 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
+ 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
+ 0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
+ 0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
+ 0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
+ 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
+ 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
+ 0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
+ 0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
+ 0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */
};
static const unsigned long dwAL7230ChannelTable1[CB_MAX_CHANNEL] = {
- 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
- 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
- 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
- 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 4, Tf = 2427MHz
- 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 5, Tf = 2432MHz
- 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 6, Tf = 2437MHz
- 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 2442MHz
- 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 2447MHz
- 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 2452MHz
- 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 10, Tf = 2457MHz
- 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 2462MHz
- 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 2467MHz
- 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 13, Tf = 2472MHz
- 0x06666100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 14, Tf = 2484MHz
-
- // 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22)
- 0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 183, Tf = 4915MHz (15)
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 184, Tf = 4920MHz (16)
- 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 185, Tf = 4925MHz (17)
- 0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 187, Tf = 4935MHz (18)
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 188, Tf = 4940MHz (19)
- 0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 189, Tf = 4945MHz (20)
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 192, Tf = 4960MHz (21)
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 196, Tf = 4980MHz (22)
-
- // 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
- // 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56)
- 0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 5035MHz (23)
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 5040MHz (24)
- 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 5045MHz (25)
- 0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 5055MHz (26)
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 5060MHz (27)
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 16, Tf = 5080MHz (28)
- 0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 34, Tf = 5170MHz (29)
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 36, Tf = 5180MHz (30)
- 0x10000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 38, Tf = 5190MHz (31)
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 40, Tf = 5200MHz (32)
- 0x1AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 42, Tf = 5210MHz (33)
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 44, Tf = 5220MHz (34)
- 0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 46, Tf = 5230MHz (35)
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 48, Tf = 5240MHz (36)
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 52, Tf = 5260MHz (37)
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 56, Tf = 5280MHz (38)
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 60, Tf = 5300MHz (39)
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 64, Tf = 5320MHz (40)
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 100, Tf = 5500MHz (41)
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 104, Tf = 5520MHz (42)
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 108, Tf = 5540MHz (43)
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 112, Tf = 5560MHz (44)
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 116, Tf = 5580MHz (45)
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 120, Tf = 5600MHz (46)
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 124, Tf = 5620MHz (47)
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 128, Tf = 5640MHz (48)
- 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 132, Tf = 5660MHz (49)
- 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 136, Tf = 5680MHz (50)
- 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 140, Tf = 5700MHz (51)
- 0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 149, Tf = 5745MHz (52)
- 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 153, Tf = 5765MHz (53)
- 0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 157, Tf = 5785MHz (54)
- 0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 161, Tf = 5805MHz (55)
- 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // channel = 165, Tf = 5825MHz (56)
+ 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
+ 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
+ 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
+ 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
+ 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
+ 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
+ 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
+ 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
+ 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
+ 0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
+ 0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
+ 0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
+ 0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
+ 0x06666100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
+
+ /* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) */
+ 0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
+ 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
+ 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
+ 0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
+ 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
+ 0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
+ 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
+ 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
+
+ /* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
+ * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) */
+ 0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */
+ 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */
+ 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */
+ 0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */
+ 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */
+ 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */
+ 0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */
+ 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */
+ 0x10000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) */
+ 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */
+ 0x1AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */
+ 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */
+ 0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */
+ 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */
+ 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */
+ 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */
+ 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */
+ 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */
+ 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
+ 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
+ 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
+ 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
+ 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
+ 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
+ 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
+ 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
+ 0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
+ 0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
+ 0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
+ 0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
+ 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
+ 0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
+ 0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
+ 0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */
};
static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 4, Tf = 2427MHz
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 5, Tf = 2432MHz
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 6, Tf = 2437MHz
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 2442MHz
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 2447MHz
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 2452MHz
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 10, Tf = 2457MHz
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 2462MHz
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 2467MHz
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 13, Tf = 2472MHz
- 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 14, Tf = 2484MHz
-
- // 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 183, Tf = 4915MHz (15)
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 184, Tf = 4920MHz (16)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 185, Tf = 4925MHz (17)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 187, Tf = 4935MHz (18)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 188, Tf = 4940MHz (19)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 189, Tf = 4945MHz (20)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 192, Tf = 4960MHz (21)
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 196, Tf = 4980MHz (22)
-
- // 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
- // 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 5035MHz (23)
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 5040MHz (24)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 5045MHz (25)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 5055MHz (26)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 5060MHz (27)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 16, Tf = 5080MHz (28)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 34, Tf = 5170MHz (29)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 36, Tf = 5180MHz (30)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 38, Tf = 5190MHz (31)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 40, Tf = 5200MHz (32)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 42, Tf = 5210MHz (33)
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 44, Tf = 5220MHz (34)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 46, Tf = 5230MHz (35)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 48, Tf = 5240MHz (36)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 52, Tf = 5260MHz (37)
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 56, Tf = 5280MHz (38)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 60, Tf = 5300MHz (39)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 64, Tf = 5320MHz (40)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 100, Tf = 5500MHz (41)
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 104, Tf = 5520MHz (42)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 108, Tf = 5540MHz (43)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 112, Tf = 5560MHz (44)
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 116, Tf = 5580MHz (45)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 120, Tf = 5600MHz (46)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 124, Tf = 5620MHz (47)
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 128, Tf = 5640MHz (48)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 132, Tf = 5660MHz (49)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 136, Tf = 5680MHz (50)
- 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 140, Tf = 5700MHz (51)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 149, Tf = 5745MHz (52)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 153, Tf = 5765MHz (53)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 157, Tf = 5785MHz (54)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 161, Tf = 5805MHz (55)
- 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // channel = 165, Tf = 5825MHz (56)
+ 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
+ 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
+ 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
+ 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
+ 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
+ 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
+ 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
+ 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
+ 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
+ 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
+ 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
+ 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
+ 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
+ 0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
+
+ /* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
+ 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
+ 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
+
+ /* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
+ * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 5035MHz (23) */
+ 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 5040MHz (24) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 5045MHz (25) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 5055MHz (26) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 5060MHz (27) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 16, Tf = 5080MHz (28) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 34, Tf = 5170MHz (29) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 36, Tf = 5180MHz (30) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 38, Tf = 5190MHz (31) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 40, Tf = 5200MHz (32) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 42, Tf = 5210MHz (33) */
+ 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 44, Tf = 5220MHz (34) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 46, Tf = 5230MHz (35) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 48, Tf = 5240MHz (36) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 52, Tf = 5260MHz (37) */
+ 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 56, Tf = 5280MHz (38) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 60, Tf = 5300MHz (39) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 64, Tf = 5320MHz (40) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
+ 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
+ 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
+ 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
+ 0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
+ 0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */
};
/*
@@ -438,13 +438,13 @@ static bool s_bAL7230Init(struct vnt_private *priv)
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
/* Calibration */
- MACvTimer0MicroSDelay(dwIoBase, 150);//150us
+ MACvTimer0MicroSDelay(dwIoBase, 150);/* 150us */
/* TXDCOC:active, RCK:disable */
bResult &= IFRFbWriteEmbedded(priv, (0x9ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
- MACvTimer0MicroSDelay(dwIoBase, 30);//30us
+ MACvTimer0MicroSDelay(dwIoBase, 30);/* 30us */
/* TXDCOC:disable, RCK:active */
bResult &= IFRFbWriteEmbedded(priv, (0x3ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
- MACvTimer0MicroSDelay(dwIoBase, 30);//30us
+ MACvTimer0MicroSDelay(dwIoBase, 30);/* 30us */
/* TXDCOC:disable, RCK:disable */
bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]);
@@ -457,7 +457,7 @@ static bool s_bAL7230Init(struct vnt_private *priv)
/* PE1: TX_ON, PE2: RX_ON, PE3: PLLON */
/* 3-wire control for power saving mode */
- VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); //1100 0000
+ VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
return bResult;
}
@@ -557,16 +557,16 @@ static bool RFbAL2230Init(struct vnt_private *priv)
for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++)
bResult &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[ii]);
- MACvTimer0MicroSDelay(dwIoBase, 30); //delay 30 us
+ MACvTimer0MicroSDelay(dwIoBase, 30); /* delay 30 us */
/* PLL On */
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
- MACvTimer0MicroSDelay(dwIoBase, 150);//150us
+ MACvTimer0MicroSDelay(dwIoBase, 150);/* 150us */
bResult &= IFRFbWriteEmbedded(priv, (0x00d80f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
- MACvTimer0MicroSDelay(dwIoBase, 30);//30us
+ MACvTimer0MicroSDelay(dwIoBase, 30);/* 30us */
bResult &= IFRFbWriteEmbedded(priv, (0x00780f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
- MACvTimer0MicroSDelay(dwIoBase, 30);//30us
+ MACvTimer0MicroSDelay(dwIoBase, 30);/* 30us */
bResult &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]);
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
@@ -575,7 +575,7 @@ static bool RFbAL2230Init(struct vnt_private *priv)
SOFTPWRCTL_TXPEINV));
/* 3-wire control for power saving mode */
- VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); //1100 0000
+ VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
return bResult;
}
@@ -661,11 +661,11 @@ bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType,
case RF_AL2230S:
bResult = RFbAL2230SelectChannel(priv, byChannel);
break;
- //{{ RobertYu: 20050104
+ /*{{ RobertYu: 20050104 */
case RF_AIROHA7230:
bResult = s_bAL7230SelectChannel(priv, byChannel);
break;
- //}} RobertYu
+ /*}} RobertYu */
case RF_NOTHING:
bResult = true;
break;
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index 2ea21e2b00f2..b5fc3eed06fb 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -33,18 +33,18 @@
#include "device.h"
/*--------------------- Export Definitions -------------------------*/
-//
-// Baseband RF pair definition in eeprom (Bits 6..0)
-//
+/*
+ * Baseband RF pair definition in eeprom (Bits 6..0)
+*/
#define RF_RFMD2959 0x01
#define RF_MAXIMAG 0x02
#define RF_AIROHA 0x03
#define RF_UW2451 0x05
#define RF_MAXIMG 0x06
-#define RF_MAXIM2829 0x07 // RobertYu: 20041118
-#define RF_UW2452 0x08 // RobertYu: 20041210
-#define RF_AIROHA7230 0x0a // RobertYu: 20050104
+#define RF_MAXIM2829 0x07 /* RobertYu: 20041118 */
+#define RF_UW2452 0x08 /* RobertYu: 20041210 */
+#define RF_AIROHA7230 0x0a /* RobertYu: 20050104 */
#define RF_UW2453 0x0b
#define RF_VT3226 0x09
@@ -63,9 +63,9 @@
#define ZONE_MKK 6
#define ZONE_ISRAEL 7
-//[20050104] CB_MAXIM2829_CHANNEL_5G_HIGH, CB_UW2452_CHANNEL_5G_HIGH: 40==>41
-#define CB_MAXIM2829_CHANNEL_5G_HIGH 41 //Index41: channel = 100, Tf = 5500MHz, set the (A3:A0=0101) D6=1
-#define CB_UW2452_CHANNEL_5G_HIGH 41 //[20041210] Index41: channel = 100, Tf = 5500MHz, change VCO2->VCO3
+/* [20050104] CB_MAXIM2829_CHANNEL_5G_HIGH, CB_UW2452_CHANNEL_5G_HIGH: 40==>41 */
+#define CB_MAXIM2829_CHANNEL_5G_HIGH 41 /* Index41: channel = 100, Tf = 5500MHz, set the (A3:A0=0101) D6=1 */
+#define CB_UW2452_CHANNEL_5G_HIGH 41 /* [20041210] Index41: channel = 100, Tf = 5500MHz, change VCO2->VCO3 */
/*--------------------- Export Classes ----------------------------*/
@@ -93,8 +93,8 @@ RFvRSSITodBm(
long *pldBm
);
-//{{ RobertYu: 20050104
+/* {{ RobertYu: 20050104 */
bool RFbAL7230SelectChannelPostProcess(struct vnt_private *, u16, u16);
-//}} RobertYu
+/* }} RobertYu */
-#endif // __RF_H__
+#endif /* __RF_H__ */
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 534338c46619..5875d655dd55 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -130,7 +130,7 @@ s_vGenerateTxParameter(
static unsigned int
s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
unsigned char *pbyTxBufferAddr,
- unsigned int uDMAIdx, PSTxDesc pHeadTD,
+ unsigned int uDMAIdx, struct vnt_tx_desc *pHeadTD,
unsigned int uNodeIndex);
static
@@ -387,7 +387,6 @@ s_uGetDataDuration(
break;
}
- ASSERT(false);
return 0;
}
@@ -1028,10 +1027,10 @@ s_vGenerateTxParameter(
static unsigned int
s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
unsigned char *pbyTxBufferAddr,
- unsigned int uDMAIdx, PSTxDesc pHeadTD,
+ unsigned int uDMAIdx, struct vnt_tx_desc *pHeadTD,
unsigned int is_pspoll)
{
- PDEVICE_TD_INFO td_info = pHeadTD->pTDInfo;
+ struct vnt_td_info *td_info = pHeadTD->td_info;
struct sk_buff *skb = td_info->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -1048,7 +1047,7 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
unsigned int cbReqCount = 0;
bool bNeedACK = (bool)(fifo_ctl & FIFOCTL_NEEDACK);
bool bRTS = (bool)(fifo_ctl & FIFOCTL_RTS);
- PSTxDesc ptdCurr;
+ struct vnt_tx_desc *ptdCurr;
unsigned int cbHeaderLength = 0;
void *pvRrvTime;
struct vnt_mic_hdr *pMICHDR;
@@ -1089,7 +1088,7 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
/* Set RrvTime/RTS/CTS Buffer */
- wTxBufSize = sizeof(STxBufHead);
+ wTxBufSize = sizeof(struct vnt_tx_fifo_head);
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {/* 802.11g packet */
if (byFBOption == AUTO_FB_NONE) {
@@ -1193,17 +1192,15 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
hdr->duration_id = uDuration;
cbReqCount = cbHeaderLength + uPadding + skb->len;
- pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf;
+ pbyBuffer = (unsigned char *)pHeadTD->td_info->buf;
uLength = cbHeaderLength + uPadding;
/* Copy the Packet into a tx Buffer */
memcpy((pbyBuffer + uLength), skb->data, skb->len);
- ptdCurr = (PSTxDesc)pHeadTD;
+ ptdCurr = pHeadTD;
- ptdCurr->pTDInfo->dwReqCount = cbReqCount;
- ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
- ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
+ ptdCurr->td_info->req_count = (u16)cbReqCount;
return cbHeaderLength;
}
@@ -1276,9 +1273,9 @@ static void vnt_fill_txkey(struct ieee80211_hdr *hdr, u8 *key_buffer,
}
int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
- PSTxDesc head_td, struct sk_buff *skb)
+ struct vnt_tx_desc *head_td, struct sk_buff *skb)
{
- PDEVICE_TD_INFO td_info = head_td->pTDInfo;
+ struct vnt_td_info *td_info = head_td->td_info;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *tx_rate = &info->control.rates[0];
struct ieee80211_rate *rate;
diff --git a/drivers/staging/vt6655/rxtx.h b/drivers/staging/vt6655/rxtx.h
index b9bd1639b13e..1e30ecb5c63c 100644
--- a/drivers/staging/vt6655/rxtx.h
+++ b/drivers/staging/vt6655/rxtx.h
@@ -192,9 +192,9 @@ struct vnt_tx_short_buf_head {
} __packed;
int vnt_generate_fifo_header(struct vnt_private *, u32,
- PSTxDesc head_td, struct sk_buff *);
+ struct vnt_tx_desc *head_td, struct sk_buff *);
int vnt_beacon_make(struct vnt_private *, struct ieee80211_vif *);
int vnt_beacon_enable(struct vnt_private *, struct ieee80211_vif *,
struct ieee80211_bss_conf *);
-#endif // __RXTX_H__
+#endif /* __RXTX_H__ */
diff --git a/drivers/staging/vt6655/upc.h b/drivers/staging/vt6655/upc.h
index cc63dc8d47f7..85fe0464cfb3 100644
--- a/drivers/staging/vt6655/upc.h
+++ b/drivers/staging/vt6655/upc.h
@@ -37,35 +37,23 @@
/* For memory mapped IO */
-#define VNSvInPortB(dwIOAddress, pbyData) \
-do { \
- *(pbyData) = ioread8(dwIOAddress); \
-} while (0)
+#define VNSvInPortB(dwIOAddress, pbyData) \
+ (*(pbyData) = ioread8(dwIOAddress))
-#define VNSvInPortW(dwIOAddress, pwData) \
-do { \
- *(pwData) = ioread16(dwIOAddress); \
-} while (0)
+#define VNSvInPortW(dwIOAddress, pwData) \
+ (*(pwData) = ioread16(dwIOAddress))
-#define VNSvInPortD(dwIOAddress, pdwData) \
-do { \
- *(pdwData) = ioread32(dwIOAddress); \
-} while (0)
+#define VNSvInPortD(dwIOAddress, pdwData) \
+ (*(pdwData) = ioread32(dwIOAddress))
-#define VNSvOutPortB(dwIOAddress, byData) \
-do { \
- iowrite8((u8)byData, dwIOAddress); \
-} while (0)
+#define VNSvOutPortB(dwIOAddress, byData) \
+ iowrite8((u8)(byData), dwIOAddress)
-#define VNSvOutPortW(dwIOAddress, wData) \
-do { \
- iowrite16((u16)wData, dwIOAddress); \
-} while (0)
+#define VNSvOutPortW(dwIOAddress, wData) \
+ iowrite16((u16)(wData), dwIOAddress)
-#define VNSvOutPortD(dwIOAddress, dwData) \
-do { \
- iowrite32((u32)dwData, dwIOAddress); \
-} while (0)
+#define VNSvOutPortD(dwIOAddress, dwData) \
+ iowrite32((u32)(dwData), dwIOAddress)
#define PCAvDelayByIO(uDelayUnit) \
do { \
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index f97323f19acf..af572d718135 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -701,7 +701,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
priv->current_aid = conf->aid;
- if (changed & BSS_CHANGED_BSSID)
+ if (changed & BSS_CHANGED_BSSID && conf->bssid)
vnt_mac_set_bssid_addr(priv, (u8 *)conf->bssid);
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 8116791f4f06..da075f485298 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -45,8 +45,11 @@
#include "usbpipe.h"
static const u16 vnt_time_stampoff[2][MAX_RATE] = {
- {384, 288, 226, 209, 54, 43, 37, 31, 28, 25, 24, 23},/* Long Preamble */
- {384, 192, 130, 113, 54, 43, 37, 31, 28, 25, 24, 23},/* Short Preamble */
+ /* Long Preamble */
+ {384, 288, 226, 209, 54, 43, 37, 31, 28, 25, 24, 23},
+
+ /* Short Preamble */
+ {384, 192, 130, 113, 54, 43, 37, 31, 28, 25, 24, 23},
};
static const u16 vnt_fb_opt0[2][5] = {
diff --git a/drivers/staging/wilc1000/Kconfig b/drivers/staging/wilc1000/Kconfig
index 062d9c5ca4bd..51bbf468fe45 100644
--- a/drivers/staging/wilc1000/Kconfig
+++ b/drivers/staging/wilc1000/Kconfig
@@ -37,17 +37,26 @@ choice
bool "SDIO support"
depends on MMC
---help---
- This module adds support for the SDIO interface
- of adapters using WILC chipset. Select this if
- your platform is using the SDIO bus.
+ This module adds support for the SDIO interface of adapters using
+ WILC1000 chipset. The Atmel WILC1000 SDIO is a full speed interface.
+ It meets SDIO card specification version 2.0. The interface supports
+ the 1-bit/4-bit SD transfer mode at the clock range of 0-50 MHz.
+ The host can use this interface to read and write from any register
+ within the chip as well as configure the WILC1000 for data DMA.
+ To use this interface, pin9 (SDIO_SPI_CFG) must be grounded. Select
+ this if your platform is using the SDIO bus.
config WILC1000_SPI
depends on SPI
bool "SPI support"
---help---
- This module adds support for the SPI interface
- of adapters using WILC chipset. Select this if
- your platform is using the SPI bus.
+ This module adds support for the SPI interface of adapters using
+ WILC1000 chipset. The Atmel WILC1000 has a Serial Peripheral
+ Interface (SPI) that operates as a SPI slave. This SPI interface can
+ be used for control and for serial I/O of 802.11 data. The SPI is a
+ full-duplex slave synchronous serial interface that is available
+ immediately following reset when pin 9 (SDIO_SPI_CFG) is tied to
+ VDDIO. Select this if your platform is using the SPI bus.
endchoice
config WILC1000_HW_OOB_INTR
@@ -55,5 +64,8 @@ config WILC1000_HW_OOB_INTR
depends on WILC1000 && WILC1000_SDIO
default n
---help---
- If your platform don't recognize SDIO IRQ, connect chipset external IRQ pin
- and check this option. Or, Use this to get all interrupts including SDIO interrupts.
+ This option enables out-of-band interrupt support for the WILC1000
+ chipset. This OOB interrupt is intended to provide a faster interrupt
+ mechanism for SDIO host controllers that don't support SDIO interrupt.
+ Select this option If the SDIO host controller in your platform
+ doesn't support SDIO time devision interrupt.
diff --git a/drivers/staging/wilc1000/Makefile b/drivers/staging/wilc1000/Makefile
index a78c4d529a58..6be8a920706a 100644
--- a/drivers/staging/wilc1000/Makefile
+++ b/drivers/staging/wilc1000/Makefile
@@ -25,10 +25,10 @@ ccflags-$(CONFIG_WILC1000_PREALLOCATE_AT_LOADING_DRIVER) += -DMEMORY_STATIC \
ccflags-$(CONFIG_WILC1000_DYNAMICALLY_ALLOCATE_MEMROY) += -DWILC_NORMAL_ALLOC
-wilc1000-objs := wilc_wfi_netdevice.o wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \
- wilc_memory.o wilc_msgqueue.o wilc_sleep.o wilc_strutils.o \
- wilc_timer.o coreconfigurator.o host_interface.o \
- fifo_buffer.o wilc_sdio.o wilc_spi.o wilc_wlan_cfg.o wilc_debugfs.o
+wilc1000-objs := wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \
+ wilc_memory.o wilc_msgqueue.o \
+ coreconfigurator.o host_interface.o \
+ wilc_sdio.o wilc_spi.o wilc_wlan_cfg.o wilc_debugfs.o
wilc1000-$(CONFIG_WILC1000_SDIO) += linux_wlan_sdio.o
wilc1000-$(CONFIG_WILC1000_SPI) += linux_wlan_spi.o
diff --git a/drivers/staging/wilc1000/coreconfigsimulator.h b/drivers/staging/wilc1000/coreconfigsimulator.h
deleted file mode 100644
index 5e01f8e4a41d..000000000000
--- a/drivers/staging/wilc1000/coreconfigsimulator.h
+++ /dev/null
@@ -1,17 +0,0 @@
-
-/*!
- * @file coreconfigsimulator.h
- * @brief
- * @author
- * @sa coreconfigsimulator.c
- * @date 1 Mar 2012
- * @version 1.0
- */
-
-#ifndef CORECONFIGSIMULATOR_H
-#define CORECONFIGSIMULATOR_H
-
-extern s32 CoreConfigSimulatorInit(void);
-extern s32 CoreConfigSimulatorDeInit(void);
-
-#endif
diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c
index ed6ac45c0385..16a0abc970c0 100644
--- a/drivers/staging/wilc1000/coreconfigurator.c
+++ b/drivers/staging/wilc1000/coreconfigurator.c
@@ -167,7 +167,6 @@ extern void host_int_ScanCompleteReceived(u8 *pu8Buffer, u32 u32Length);
static struct semaphore SemHandleSendPkt;
static struct semaphore SemHandlePktResp;
-static s8 *gps8ConfigPacket;
static tstrConfigPktInfo gstrConfigPktInfo;
@@ -544,21 +543,21 @@ INLINE u8 get_from_ds(u8 *header)
/* header and updates the MAC Address in the allocated 'addr' variable. */
INLINE void get_address1(u8 *pu8msa, u8 *addr)
{
- WILC_memcpy(addr, pu8msa + 4, 6);
+ memcpy(addr, pu8msa + 4, 6);
}
/* This function extracts the MAC Address in 'address2' field of the MAC */
/* header and updates the MAC Address in the allocated 'addr' variable. */
INLINE void get_address2(u8 *pu8msa, u8 *addr)
{
- WILC_memcpy(addr, pu8msa + 10, 6);
+ memcpy(addr, pu8msa + 10, 6);
}
/* This function extracts the MAC Address in 'address3' field of the MAC */
/* header and updates the MAC Address in the allocated 'addr' variable. */
INLINE void get_address3(u8 *pu8msa, u8 *addr)
{
- WILC_memcpy(addr, pu8msa + 16, 6);
+ memcpy(addr, pu8msa + 16, 6);
}
/* This function extracts the BSSID from the incoming WLAN packet based on */
@@ -605,7 +604,7 @@ INLINE u16 get_cap_info(u8 *data)
{
u16 cap_info = 0;
u16 index = MAC_HDR_LEN;
- tenuFrmSubtype st = BEACON;
+ tenuFrmSubtype st;
st = get_sub_type(data);
@@ -674,17 +673,8 @@ s32 CoreConfiguratorInit(void)
sema_init(&SemHandleSendPkt, 1);
sema_init(&SemHandlePktResp, 0);
- gps8ConfigPacket = (s8 *)WILC_MALLOC(MAX_PACKET_BUFF_SIZE);
- if (gps8ConfigPacket == NULL) {
- PRINT_ER("failed in gps8ConfigPacket allocation\n");
- s32Error = WILC_NO_MEM;
- goto _fail_;
- }
-
- WILC_memset((void *)gps8ConfigPacket, 0, MAX_PACKET_BUFF_SIZE);
- WILC_memset((void *)(&gstrConfigPktInfo), 0, sizeof(tstrConfigPktInfo));
-_fail_:
+ memset((void *)(&gstrConfigPktInfo), 0, sizeof(tstrConfigPktInfo));
return s32Error;
}
@@ -706,11 +696,10 @@ u8 *get_tim_elm(u8 *pu8msa, u16 u16RxLen, u16 u16TagParamOffset)
/* Search for the TIM Element Field and return if the element is found */
while (u16index < (u16RxLen - FCS_LEN)) {
- if (pu8msa[u16index] == ITIM) {
+ if (pu8msa[u16index] == ITIM)
return &pu8msa[u16index];
- } else {
+ else
u16index += (IE_HDR_LEN + pu8msa[u16index + 1]);
- }
}
return 0;
@@ -811,8 +800,11 @@ s32 ParseNetworkInfo(u8 *pu8MsgBuffer, tstrNetworkInfo **ppstrNetworkInfo)
u32 u32Tsf_Lo;
u32 u32Tsf_Hi;
- pstrNetworkInfo = (tstrNetworkInfo *)WILC_MALLOC(sizeof(tstrNetworkInfo));
- WILC_memset((void *)(pstrNetworkInfo), 0, sizeof(tstrNetworkInfo));
+ pstrNetworkInfo = kmalloc(sizeof(tstrNetworkInfo), GFP_KERNEL);
+ if (!pstrNetworkInfo)
+ return -ENOMEM;
+
+ memset((void *)(pstrNetworkInfo), 0, sizeof(tstrNetworkInfo));
pstrNetworkInfo->s8rssi = pu8WidVal[0];
@@ -855,17 +847,19 @@ s32 ParseNetworkInfo(u8 *pu8MsgBuffer, tstrNetworkInfo **ppstrNetworkInfo)
/* Get DTIM Period */
pu8TimElm = get_tim_elm(pu8msa, (u16RxLen + FCS_LEN), u8index);
- if (pu8TimElm != 0) {
+ if (pu8TimElm != 0)
pstrNetworkInfo->u8DtimPeriod = pu8TimElm[3];
- }
pu8IEs = &pu8msa[MAC_HDR_LEN + TIME_STAMP_LEN + BEACON_INTERVAL_LEN + CAP_INFO_LEN];
u16IEsLen = u16RxLen - (MAC_HDR_LEN + TIME_STAMP_LEN + BEACON_INTERVAL_LEN + CAP_INFO_LEN);
if (u16IEsLen > 0) {
- pstrNetworkInfo->pu8IEs = (u8 *)WILC_MALLOC(u16IEsLen);
- WILC_memset((void *)(pstrNetworkInfo->pu8IEs), 0, u16IEsLen);
+ pstrNetworkInfo->pu8IEs = kmalloc(u16IEsLen, GFP_KERNEL);
+ if (!pstrNetworkInfo->pu8IEs)
+ return -ENOMEM;
- WILC_memcpy(pstrNetworkInfo->pu8IEs, pu8IEs, u16IEsLen);
+ memset((void *)(pstrNetworkInfo->pu8IEs), 0, u16IEsLen);
+
+ memcpy(pstrNetworkInfo->pu8IEs, pu8IEs, u16IEsLen);
}
pstrNetworkInfo->u16IEsLen = u16IEsLen;
@@ -893,13 +887,13 @@ s32 DeallocateNetworkInfo(tstrNetworkInfo *pstrNetworkInfo)
if (pstrNetworkInfo != NULL) {
if (pstrNetworkInfo->pu8IEs != NULL) {
- WILC_FREE(pstrNetworkInfo->pu8IEs);
+ kfree(pstrNetworkInfo->pu8IEs);
pstrNetworkInfo->pu8IEs = NULL;
} else {
s32Error = WILC_FAIL;
}
- WILC_FREE(pstrNetworkInfo);
+ kfree(pstrNetworkInfo);
pstrNetworkInfo = NULL;
} else {
@@ -929,8 +923,11 @@ s32 ParseAssocRespInfo(u8 *pu8Buffer, u32 u32BufferLen,
u8 *pu8IEs = 0;
u16 u16IEsLen = 0;
- pstrConnectRespInfo = (tstrConnectRespInfo *)WILC_MALLOC(sizeof(tstrConnectRespInfo));
- WILC_memset((void *)(pstrConnectRespInfo), 0, sizeof(tstrConnectRespInfo));
+ pstrConnectRespInfo = kmalloc(sizeof(tstrConnectRespInfo), GFP_KERNEL);
+ if (!pstrConnectRespInfo)
+ return -ENOMEM;
+
+ memset((void *)(pstrConnectRespInfo), 0, sizeof(tstrConnectRespInfo));
/* u16AssocRespLen = pu8Buffer[0]; */
u16AssocRespLen = (u16)u32BufferLen;
@@ -949,10 +946,13 @@ s32 ParseAssocRespInfo(u8 *pu8Buffer, u32 u32BufferLen,
pu8IEs = &pu8Buffer[CAP_INFO_LEN + STATUS_CODE_LEN + AID_LEN];
u16IEsLen = u16AssocRespLen - (CAP_INFO_LEN + STATUS_CODE_LEN + AID_LEN);
- pstrConnectRespInfo->pu8RespIEs = (u8 *)WILC_MALLOC(u16IEsLen);
- WILC_memset((void *)(pstrConnectRespInfo->pu8RespIEs), 0, u16IEsLen);
+ pstrConnectRespInfo->pu8RespIEs = kmalloc(u16IEsLen, GFP_KERNEL);
+ if (!pstrConnectRespInfo->pu8RespIEs)
+ return -ENOMEM;
- WILC_memcpy(pstrConnectRespInfo->pu8RespIEs, pu8IEs, u16IEsLen);
+ memset((void *)(pstrConnectRespInfo->pu8RespIEs), 0, u16IEsLen);
+
+ memcpy(pstrConnectRespInfo->pu8RespIEs, pu8IEs, u16IEsLen);
pstrConnectRespInfo->u16RespIEsLen = u16IEsLen;
}
@@ -978,13 +978,13 @@ s32 DeallocateAssocRespInfo(tstrConnectRespInfo *pstrConnectRespInfo)
if (pstrConnectRespInfo != NULL) {
if (pstrConnectRespInfo->pu8RespIEs != NULL) {
- WILC_FREE(pstrConnectRespInfo->pu8RespIEs);
+ kfree(pstrConnectRespInfo->pu8RespIEs);
pstrConnectRespInfo->pu8RespIEs = NULL;
} else {
s32Error = WILC_FAIL;
}
- WILC_FREE(pstrConnectRespInfo);
+ kfree(pstrConnectRespInfo);
pstrConnectRespInfo = NULL;
} else {
@@ -1018,13 +1018,12 @@ s32 ParseSurveyResults(u8 ppu8RcvdSiteSurveyResults[][MAX_SURVEY_RESULT_FRAG_SIZ
}
}
- pstrSurveyResults = (wid_site_survey_reslts_s *)WILC_MALLOC(u32SurveyResultsCount * sizeof(wid_site_survey_reslts_s));
- if (pstrSurveyResults == NULL) {
- u32SurveyResultsCount = 0;
- WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
- }
+ pstrSurveyResults = kmalloc_array(u32SurveyResultsCount,
+ sizeof(wid_site_survey_reslts_s), GFP_KERNEL);
+ if (!pstrSurveyResults)
+ return -ENOMEM;
- WILC_memset((void *)(pstrSurveyResults), 0, u32SurveyResultsCount * sizeof(wid_site_survey_reslts_s));
+ memset((void *)(pstrSurveyResults), 0, u32SurveyResultsCount * sizeof(wid_site_survey_reslts_s));
u32SurveyResultsCount = 0;
@@ -1039,7 +1038,7 @@ s32 ParseSurveyResults(u8 ppu8RcvdSiteSurveyResults[][MAX_SURVEY_RESULT_FRAG_SIZ
pu8BufferPtr += 2;
for (j = 0; j < u32SurveyBytesLength; j += SURVEY_RESULT_LENGTH) {
- WILC_memcpy(&pstrSurveyResults[u32SurveyResultsCount], pu8BufferPtr, SURVEY_RESULT_LENGTH);
+ memcpy(&pstrSurveyResults[u32SurveyResultsCount], pu8BufferPtr, SURVEY_RESULT_LENGTH);
pu8BufferPtr += SURVEY_RESULT_LENGTH;
u32SurveyResultsCount++;
}
@@ -1058,7 +1057,7 @@ s32 DeallocateSurveyResults(wid_site_survey_reslts_s *pstrSurveyResults)
s32 s32Error = WILC_SUCCESS;
if (pstrSurveyResults != NULL) {
- WILC_FREE(pstrSurveyResults);
+ kfree(pstrSurveyResults);
}
return s32Error;
@@ -1334,7 +1333,6 @@ void ProcessStrWid(char *pcPacket, s32 *ps32PktLen,
if (g_oper_mode == SET_CFG) {
/* Message Length */
- /* u16MsgLen = WILC_strlen(pu8val); */
u16MsgLen = (u16)s32ValueSize;
/* Length */
@@ -1441,8 +1439,6 @@ void ProcessAdrWid(char *pcPacket, s32 *ps32PktLen,
void ProcessBinWid(char *pcPacket, s32 *ps32PktLen,
tstrWID *pstrWID, u8 *pu8val, s32 s32ValueSize)
{
- /* WILC_ERROR("processing Binary WIDs is not supported\n"); */
-
u16 u16MsgLen = 0;
u16 idx = 0;
s32 s32PktLen = *ps32PktLen;
@@ -1528,11 +1524,10 @@ s32 further_process_response(u8 *resp,
u8 cfg_str[256] = {0};
tenuWIDtype enuWIDtype = WID_UNDEF;
- if (process_wid_num) {
+ if (process_wid_num)
enuWIDtype = get_wid_type(g_wid_num);
- } else {
+ else
enuWIDtype = gastrWIDs[cnt].enuWIDtype;
- }
switch (enuWIDtype) {
@@ -1566,18 +1561,10 @@ s32 further_process_response(u8 *resp,
}
case WID_STR:
- WILC_memcpy(cfg_str, resp + idx, cfg_len);
+ memcpy(cfg_str, resp + idx, cfg_len);
/* cfg_str[cfg_len] = '\0'; //mostafa: no need currently for NULL termination */
- if (process_wid_num) {
- /*fprintf(out_file,"0x%4.4x = %s\n",g_wid_num,
- * cfg_str);*/
- } else {
- /*fprintf(out_file,"%s = %s\n",gastrWIDs[cnt].cfg_switch,
- * cfg_str);*/
- }
-
if (pstrWIDresult->s32ValueSize >= cfg_len) {
- WILC_memcpy(pstrWIDresult->ps8WidVal, cfg_str, cfg_len); /* mostafa: no need currently for the extra NULL byte */
+ memcpy(pstrWIDresult->ps8WidVal, cfg_str, cfg_len); /* mostafa: no need currently for the extra NULL byte */
pstrWIDresult->s32ValueSize = cfg_len;
} else {
PRINT_ER("allocated WID buffer length is smaller than the received WID Length\n");
@@ -1589,15 +1576,8 @@ s32 further_process_response(u8 *resp,
case WID_ADR:
create_mac_addr(cfg_str, resp + idx);
- WILC_strncpy(pstrWIDresult->ps8WidVal, cfg_str, WILC_strlen(cfg_str));
- pstrWIDresult->ps8WidVal[WILC_strlen(cfg_str)] = '\0';
- if (process_wid_num) {
- /*fprintf(out_file,"0x%4.4x = %s\n",g_wid_num,
- * cfg_str);*/
- } else {
- /*fprintf(out_file,"%s = %s\n",gastrWIDs[cnt].cfg_switch,
- * cfg_str);*/
- }
+ strncpy(pstrWIDresult->ps8WidVal, cfg_str, strlen(cfg_str));
+ pstrWIDresult->ps8WidVal[strlen(cfg_str)] = '\0';
break;
case WID_IP:
@@ -1606,18 +1586,11 @@ s32 further_process_response(u8 *resp,
MAKE_WORD16(resp[idx + 2], resp[idx + 3])
);
conv_int_to_ip(cfg_str, cfg_int);
- if (process_wid_num) {
- /*fprintf(out_file,"0x%4.4x = %s\n",g_wid_num,
- * cfg_str);*/
- } else {
- /*fprintf(out_file,"%s = %s\n",gastrWIDs[cnt].cfg_switch,
- * cfg_str);*/
- }
break;
case WID_BIN_DATA:
if (pstrWIDresult->s32ValueSize >= cfg_len) {
- WILC_memcpy(pstrWIDresult->ps8WidVal, resp + idx, cfg_len);
+ memcpy(pstrWIDresult->ps8WidVal, resp + idx, cfg_len);
pstrWIDresult->s32ValueSize = cfg_len;
} else {
PRINT_ER("Allocated WID buffer length is smaller than the received WID Length Err(%d)\n", retval);
@@ -1739,7 +1712,6 @@ s32 ParseResponse(u8 *resp, tstrWID *pstrWIDcfgResult)
s32 ParseWriteResponse(u8 *pu8RespBuffer)
{
s32 s32Error = WILC_FAIL;
- u16 u16RespLen = 0;
u16 u16WIDtype = (u16)WID_NIL;
/* Check whether the received frame is a valid response */
@@ -1748,9 +1720,6 @@ s32 ParseWriteResponse(u8 *pu8RespBuffer)
return WILC_FAIL;
}
- /* Extract Response Length */
- u16RespLen = MAKE_WORD16(pu8RespBuffer[2], pu8RespBuffer[3]);
-
u16WIDtype = MAKE_WORD16(pu8RespBuffer[4], pu8RespBuffer[5]);
/* Check for WID_STATUS ID and then check the length and status value */
@@ -1898,104 +1867,21 @@ s32 ConfigWaitResponse(char *pcRespBuffer, s32 s32MaxRespBuffLen, s32 *ps32Bytes
*ps32BytesRead = gstrConfigPktInfo.s32BytesRead;
}
- WILC_memset((void *)(&gstrConfigPktInfo), 0, sizeof(tstrConfigPktInfo));
+ memset((void *)(&gstrConfigPktInfo), 0, sizeof(tstrConfigPktInfo));
return s32Error;
}
-/**
- * @brief sends certain Configuration Packet based on the input WIDs pstrWIDs
- * and retrieves the packet response pu8RxResp
- * @details
- * @param[in] pstrWIDs WIDs to be sent in the configuration packet
- * @param[in] u32WIDsCount number of WIDs to be sent in the configuration packet
- * @param[out] pu8RxResp The received Packet Response
- * @param[out] ps32RxRespLen Length of the received Packet Response
- * @return Error code indicating success/failure
- * @note
- * @author mabubakr
- * @date 1 Mar 2012
- * @version 1.0
- */
-#ifdef SIMULATION
-s32 SendConfigPkt(u8 u8Mode, tstrWID *pstrWIDs,
- u32 u32WIDsCount, bool bRespRequired, u32 drvHandler)
-{
- s32 s32Error = WILC_SUCCESS;
- s32 err = WILC_SUCCESS;
- s32 s32ConfigPacketLen = 0;
- s32 s32RcvdRespLen = 0;
-
- down(&SemHandleSendPkt);
-
- /*set the packet mode*/
- g_oper_mode = u8Mode;
-
- WILC_memset((void *)gps8ConfigPacket, 0, MAX_PACKET_BUFF_SIZE);
-
- if (CreateConfigPacket(gps8ConfigPacket, &s32ConfigPacketLen, pstrWIDs, u32WIDsCount) != WILC_SUCCESS) {
- s32Error = WILC_FAIL;
- goto End_ConfigPkt;
- }
- /*bug 3878*/
- gstrConfigPktInfo.pcRespBuffer = gps8ConfigPacket;
- gstrConfigPktInfo.s32MaxRespBuffLen = MAX_PACKET_BUFF_SIZE;
- PRINT_INFO(CORECONFIG_DBG, "GLOBAL =bRespRequired =%d\n", bRespRequired);
- gstrConfigPktInfo.bRespRequired = bRespRequired;
-
- s32Error = SendRawPacket(gps8ConfigPacket, s32ConfigPacketLen);
- if (s32Error != WILC_SUCCESS) {
- goto End_ConfigPkt;
- }
-
- WILC_memset((void *)gps8ConfigPacket, 0, MAX_PACKET_BUFF_SIZE);
-
- ConfigWaitResponse(gps8ConfigPacket, MAX_PACKET_BUFF_SIZE, &s32RcvdRespLen, bRespRequired);
-
-
- if (bRespRequired) {
- /* If the operating Mode is GET, then we expect a response frame from */
- /* the driver. Hence start listening to the port for response */
- if (g_oper_mode == GET_CFG) {
- #if 1
- err = ParseResponse(gps8ConfigPacket, pstrWIDs);
- if (err != 0) {
- s32Error = WILC_FAIL;
- goto End_ConfigPkt;
- } else {
- s32Error = WILC_SUCCESS;
- }
- #endif
- } else {
- err = ParseWriteResponse(gps8ConfigPacket);
- if (err != WRITE_RESP_SUCCESS) {
- s32Error = WILC_FAIL;
- goto End_ConfigPkt;
- } else {
- s32Error = WILC_SUCCESS;
- }
- }
-
-
- }
-
-
-End_ConfigPkt:
- up(&SemHandleSendPkt);
-
- return s32Error;
-}
-#endif
s32 ConfigProvideResponse(char *pcRespBuffer, s32 s32RespLen)
{
s32 s32Error = WILC_SUCCESS;
if (gstrConfigPktInfo.bRespRequired) {
if (s32RespLen <= gstrConfigPktInfo.s32MaxRespBuffLen) {
- WILC_memcpy(gstrConfigPktInfo.pcRespBuffer, pcRespBuffer, s32RespLen);
+ memcpy(gstrConfigPktInfo.pcRespBuffer, pcRespBuffer, s32RespLen);
gstrConfigPktInfo.s32BytesRead = s32RespLen;
} else {
- WILC_memcpy(gstrConfigPktInfo.pcRespBuffer, pcRespBuffer, gstrConfigPktInfo.s32MaxRespBuffLen);
+ memcpy(gstrConfigPktInfo.pcRespBuffer, pcRespBuffer, gstrConfigPktInfo.s32MaxRespBuffLen);
gstrConfigPktInfo.s32BytesRead = gstrConfigPktInfo.s32MaxRespBuffLen;
PRINT_ER("BusProvideResponse() Response greater than the prepared Buffer Size\n");
}
@@ -2069,17 +1955,10 @@ s32 CoreConfiguratorDeInit(void)
PRINT_D(CORECONFIG_DBG, "CoreConfiguratorDeInit()\n");
- if (gps8ConfigPacket != NULL) {
-
- WILC_FREE(gps8ConfigPacket);
- gps8ConfigPacket = NULL;
- }
return s32Error;
}
-
-#ifndef SIMULATION
/*Using the global handle of the driver*/
extern wilc_wlan_oup_t *gpstrWlanOps;
/**
@@ -2129,7 +2008,6 @@ s32 SendConfigPkt(u8 u8Mode, tstrWID *pstrWIDs,
/**
* get the value
**/
- /* WILC_Sleep(1000); */
counter = 0;
for (counter = 0; counter < u32WIDsCount; counter++) {
pstrWIDs[counter].s32ValueSize = gpstrWlanOps->wlan_cfg_get_value(
@@ -2153,4 +2031,3 @@ s32 SendConfigPkt(u8 u8Mode, tstrWID *pstrWIDs,
return ret;
}
-#endif
diff --git a/drivers/staging/wilc1000/coreconfigurator.h b/drivers/staging/wilc1000/coreconfigurator.h
index 9059c8df7ce5..3af193543cdb 100644
--- a/drivers/staging/wilc1000/coreconfigurator.h
+++ b/drivers/staging/wilc1000/coreconfigurator.h
@@ -8,7 +8,6 @@
* @version 1.0
*/
-
#ifndef CORECONFIGURATOR_H
#define CORECONFIGURATOR_H
@@ -42,7 +41,6 @@ extern u16 g_num_total_switches;
#define AID_LEN 2
#define IE_HDR_LEN 2
-
/* Operating Mode: SET */
#define SET_CFG 0
/* Operating Mode: GET */
@@ -59,15 +57,12 @@ extern u16 g_num_total_switches;
#define MAC_CONNECTED 1
#define MAC_DISCONNECTED 0
-
-
/*****************************************************************************/
/* Function Macros */
/*****************************************************************************/
#define MAKE_WORD16(lsb, msb) ((((u16)(msb) << 8) & 0xFF00) | (lsb))
#define MAKE_WORD32(lsw, msw) ((((u32)(msw) << 16) & 0xFFFF0000) | (lsw))
-
/*****************************************************************************/
/* Type Definitions */
/*****************************************************************************/
@@ -140,7 +135,6 @@ typedef struct {
u16 u16RespIEsLen;
} tstrConnectRespInfo;
-
typedef struct {
u8 au8bssid[6];
u8 *pu8ReqIEs;
@@ -150,8 +144,6 @@ typedef struct {
u16 u16ConnectStatus;
} tstrConnectInfo;
-
-
typedef struct {
u16 u16reason;
u8 *ie;
@@ -171,26 +163,27 @@ typedef struct wid_site_survey_reslts {
} wid_site_survey_reslts_s;
#endif
-extern s32 CoreConfiguratorInit(void);
-extern s32 CoreConfiguratorDeInit(void);
+s32 CoreConfiguratorInit(void);
+s32 CoreConfiguratorDeInit(void);
-extern s32 SendConfigPkt(u8 u8Mode, tstrWID *pstrWIDs,
- u32 u32WIDsCount, bool bRespRequired, u32 drvHandler);
-extern s32 ParseNetworkInfo(u8 *pu8MsgBuffer, tstrNetworkInfo **ppstrNetworkInfo);
-extern s32 DeallocateNetworkInfo(tstrNetworkInfo *pstrNetworkInfo);
+s32 SendConfigPkt(u8 u8Mode, tstrWID *pstrWIDs,
+ u32 u32WIDsCount, bool bRespRequired, u32 drvHandler);
+s32 ParseNetworkInfo(u8 *pu8MsgBuffer, tstrNetworkInfo **ppstrNetworkInfo);
+s32 DeallocateNetworkInfo(tstrNetworkInfo *pstrNetworkInfo);
-extern s32 ParseAssocRespInfo(u8 *pu8Buffer, u32 u32BufferLen,
- tstrConnectRespInfo **ppstrConnectRespInfo);
-extern s32 DeallocateAssocRespInfo(tstrConnectRespInfo *pstrConnectRespInfo);
+s32 ParseAssocRespInfo(u8 *pu8Buffer, u32 u32BufferLen,
+ tstrConnectRespInfo **ppstrConnectRespInfo);
+s32 DeallocateAssocRespInfo(tstrConnectRespInfo *pstrConnectRespInfo);
#ifndef CONNECT_DIRECT
-extern s32 ParseSurveyResults(u8 ppu8RcvdSiteSurveyResults[][MAX_SURVEY_RESULT_FRAG_SIZE],
- wid_site_survey_reslts_s **ppstrSurveyResults, u32 *pu32SurveyResultsCount);
-extern s32 DeallocateSurveyResults(wid_site_survey_reslts_s *pstrSurveyResults);
+s32 ParseSurveyResults(u8 ppu8RcvdSiteSurveyResults[][MAX_SURVEY_RESULT_FRAG_SIZE],
+ wid_site_survey_reslts_s **ppstrSurveyResults,
+ u32 *pu32SurveyResultsCount);
+s32 DeallocateSurveyResults(wid_site_survey_reslts_s *pstrSurveyResults);
#endif
-extern s32 SendRawPacket(s8 *pspacket, s32 s32PacketLen);
-extern void NetworkInfoReceived(u8 *pu8Buffer, u32 u32Length);
+s32 SendRawPacket(s8 *pspacket, s32 s32PacketLen);
+void NetworkInfoReceived(u8 *pu8Buffer, u32 u32Length);
void GnrlAsyncInfoReceived(u8 *pu8Buffer, u32 u32Length);
void host_int_ScanCompleteReceived(u8 *pu8Buffer, u32 u32Length);
diff --git a/drivers/staging/wilc1000/fifo_buffer.c b/drivers/staging/wilc1000/fifo_buffer.c
deleted file mode 100644
index b6c07cfc43d2..000000000000
--- a/drivers/staging/wilc1000/fifo_buffer.c
+++ /dev/null
@@ -1,133 +0,0 @@
-
-
-#include "fifo_buffer.h"
-
-
-
-u32 FIFO_InitBuffer(tHANDLE *hBuffer, u32 u32BufferLength)
-{
- u32 u32Error = 0;
- tstrFifoHandler *pstrFifoHandler = WILC_MALLOC (sizeof (tstrFifoHandler));
- if (pstrFifoHandler) {
- WILC_memset (pstrFifoHandler, 0, sizeof (tstrFifoHandler));
- pstrFifoHandler->pu8Buffer = WILC_MALLOC (u32BufferLength);
- if (pstrFifoHandler->pu8Buffer) {
- pstrFifoHandler->u32BufferLength = u32BufferLength;
- WILC_memset (pstrFifoHandler->pu8Buffer, 0, u32BufferLength);
- /* create semaphore */
- sema_init(&pstrFifoHandler->SemBuffer, 1);
- *hBuffer = pstrFifoHandler;
- } else {
- *hBuffer = NULL;
- u32Error = 1;
- }
- } else {
- u32Error = 1;
- }
- return u32Error;
-}
-u32 FIFO_DeInit(tHANDLE hFifo)
-{
- u32 u32Error = 0;
- tstrFifoHandler *pstrFifoHandler = (tstrFifoHandler *) hFifo;
- if (pstrFifoHandler) {
- if (pstrFifoHandler->pu8Buffer) {
- WILC_FREE (pstrFifoHandler->pu8Buffer);
- } else {
- u32Error = 1;
- }
-
- WILC_FREE (pstrFifoHandler);
- } else {
- u32Error = 1;
- }
- return u32Error;
-}
-
-u32 FIFO_ReadBytes(tHANDLE hFifo, u8 *pu8Buffer, u32 u32BytesToRead, u32 *pu32BytesRead)
-{
- u32 u32Error = 0;
- tstrFifoHandler *pstrFifoHandler = (tstrFifoHandler *) hFifo;
- if (pstrFifoHandler && pu32BytesRead) {
- if (pstrFifoHandler->u32TotalBytes) {
- down(&pstrFifoHandler->SemBuffer);
-
- if (u32BytesToRead > pstrFifoHandler->u32TotalBytes) {
- *pu32BytesRead = pstrFifoHandler->u32TotalBytes;
- } else {
- *pu32BytesRead = u32BytesToRead;
- }
- if ((pstrFifoHandler->u32ReadOffset + u32BytesToRead) <= pstrFifoHandler->u32BufferLength) {
- WILC_memcpy(pu8Buffer, pstrFifoHandler->pu8Buffer + pstrFifoHandler->u32ReadOffset,
- *pu32BytesRead);
- /* update read offset and total bytes */
- pstrFifoHandler->u32ReadOffset += u32BytesToRead;
- pstrFifoHandler->u32TotalBytes -= u32BytesToRead;
-
- } else {
- u32 u32FirstPart =
- pstrFifoHandler->u32BufferLength - pstrFifoHandler->u32ReadOffset;
- WILC_memcpy(pu8Buffer, pstrFifoHandler->pu8Buffer + pstrFifoHandler->u32ReadOffset,
- u32FirstPart);
- WILC_memcpy(pu8Buffer + u32FirstPart, pstrFifoHandler->pu8Buffer,
- u32BytesToRead - u32FirstPart);
- /* update read offset and total bytes */
- pstrFifoHandler->u32ReadOffset = u32BytesToRead - u32FirstPart;
- pstrFifoHandler->u32TotalBytes -= u32BytesToRead;
- }
- up(&pstrFifoHandler->SemBuffer);
- } else {
- u32Error = 1;
- }
- } else {
- u32Error = 1;
- }
- return u32Error;
-}
-
-u32 FIFO_WriteBytes(tHANDLE hFifo, u8 *pu8Buffer, u32 u32BytesToWrite, bool bForceOverWrite)
-{
- u32 u32Error = 0;
- tstrFifoHandler *pstrFifoHandler = (tstrFifoHandler *) hFifo;
- if (pstrFifoHandler) {
- if (u32BytesToWrite < pstrFifoHandler->u32BufferLength) {
- if ((pstrFifoHandler->u32TotalBytes + u32BytesToWrite) <= pstrFifoHandler->u32BufferLength ||
- bForceOverWrite) {
- down(&pstrFifoHandler->SemBuffer);
- if ((pstrFifoHandler->u32WriteOffset + u32BytesToWrite) <= pstrFifoHandler->u32BufferLength) {
- WILC_memcpy(pstrFifoHandler->pu8Buffer + pstrFifoHandler->u32WriteOffset, pu8Buffer,
- u32BytesToWrite);
- /* update read offset and total bytes */
- pstrFifoHandler->u32WriteOffset += u32BytesToWrite;
- pstrFifoHandler->u32TotalBytes += u32BytesToWrite;
-
- } else {
- u32 u32FirstPart =
- pstrFifoHandler->u32BufferLength - pstrFifoHandler->u32WriteOffset;
- WILC_memcpy(pstrFifoHandler->pu8Buffer + pstrFifoHandler->u32WriteOffset, pu8Buffer,
- u32FirstPart);
- WILC_memcpy(pstrFifoHandler->pu8Buffer, pu8Buffer + u32FirstPart,
- u32BytesToWrite - u32FirstPart);
- /* update read offset and total bytes */
- pstrFifoHandler->u32WriteOffset = u32BytesToWrite - u32FirstPart;
- pstrFifoHandler->u32TotalBytes += u32BytesToWrite;
- }
- /* if data overwriten */
- if (pstrFifoHandler->u32TotalBytes > pstrFifoHandler->u32BufferLength) {
- /* adjust read offset to the oldest data available */
- pstrFifoHandler->u32ReadOffset = pstrFifoHandler->u32WriteOffset;
- /* data availabe is the buffer length */
- pstrFifoHandler->u32TotalBytes = pstrFifoHandler->u32BufferLength;
- }
- up(&pstrFifoHandler->SemBuffer);
- } else {
- u32Error = 1;
- }
- } else {
- u32Error = 1;
- }
- } else {
- u32Error = 1;
- }
- return u32Error;
-}
diff --git a/drivers/staging/wilc1000/fifo_buffer.h b/drivers/staging/wilc1000/fifo_buffer.h
deleted file mode 100644
index 7b76998e4238..000000000000
--- a/drivers/staging/wilc1000/fifo_buffer.h
+++ /dev/null
@@ -1,26 +0,0 @@
-
-#include <linux/types.h>
-#include <linux/semaphore.h>
-#include "wilc_memory.h"
-#include "wilc_strutils.h"
-
-
-#define tHANDLE void *
-
-typedef struct {
- u8 *pu8Buffer;
- u32 u32BufferLength;
- u32 u32WriteOffset;
- u32 u32ReadOffset;
- u32 u32TotalBytes;
- struct semaphore SemBuffer;
-} tstrFifoHandler;
-
-
-extern u32 FIFO_InitBuffer(tHANDLE *hBuffer,
- u32 u32BufferLength);
-extern u32 FIFO_DeInit(tHANDLE hFifo);
-extern u32 FIFO_ReadBytes(tHANDLE hFifo, u8 *pu8Buffer,
- u32 u32BytesToRead, u32 *pu32BytesRead);
-extern u32 FIFO_WriteBytes(tHANDLE hFifo, u8 *pu8Buffer,
- u32 u32BytesToWrite, bool bForceOverWrite);
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 6b10bbbe6ab2..66fa677015db 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -6,10 +6,9 @@ extern s32 TransportDeInit(void);
extern u8 connecting;
#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
-extern WILC_TimerHandle hDuringIpTimer;
+extern struct timer_list hDuringIpTimer;
#endif
-extern bool bEnablePS;
/*BugID_5137*/
extern u8 g_wilc_initialized;
/*****************************************************************************/
@@ -467,7 +466,7 @@ typedef union _tuniHostIFmsgBody {
typedef struct _tstrHostIFmsg {
u16 u16MsgId; /*!< Message ID */
tuniHostIFmsgBody uniHostIFmsgBody; /*!< Message body */
- void *drvHandler;
+ tstrWILC_WFIDrv *drvHandler;
} tstrHostIFmsg;
#ifdef CONNECT_DIRECT
@@ -534,8 +533,8 @@ typedef enum {
/*****************************************************************************/
-tstrWILC_WFIDrv *terminated_handle = NULL;
-tstrWILC_WFIDrv *gWFiDrvHandle = NULL;
+tstrWILC_WFIDrv *terminated_handle;
+tstrWILC_WFIDrv *gWFiDrvHandle;
#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
bool g_obtainingIP = false;
#endif
@@ -547,7 +546,7 @@ static struct semaphore hSemHostIFthrdEnd;
struct semaphore hSemDeinitDrvHandle;
static struct semaphore hWaitResponse;
struct semaphore hSemHostIntDeinit;
-WILC_TimerHandle g_hPeriodicRSSI;
+struct timer_list g_hPeriodicRSSI;
@@ -570,9 +569,7 @@ static u8 gs8GetIP[2][4];
static u32 gu32InactiveTime;
static u8 gu8DelBcn;
#endif
-#ifndef SIMULATION
static u32 gu32WidConnRstHack;
-#endif
/*BugID_5137*/
u8 *gu8FlushedJoinReq;
@@ -604,7 +601,7 @@ extern int linux_wlan_get_num_conn_ifcs(void);
* @date
* @version 1.0
*/
-static s32 Handle_SetChannel(void *drvHandler, tstrHostIFSetChan *pstrHostIFSetChan)
+static s32 Handle_SetChannel(tstrWILC_WFIDrv *drvHandler, tstrHostIFSetChan *pstrHostIFSetChan)
{
s32 s32Error = WILC_SUCCESS;
@@ -659,9 +656,8 @@ static s32 Handle_SetWfiDrvHandler(tstrHostIfSetDrvHandler *pstrHostIfSetDrvHand
s32Error = SendConfigPkt(SET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
- if ((pstrHostIfSetDrvHandler->u32Address) == (u32)NULL) {
+ if ((pstrHostIfSetDrvHandler->u32Address) == (u32)NULL)
up(&hSemDeinitDrvHandle);
- }
if (s32Error) {
@@ -685,7 +681,7 @@ static s32 Handle_SetWfiDrvHandler(tstrHostIfSetDrvHandler *pstrHostIfSetDrvHand
* @date
* @version 1.0
*/
-static s32 Handle_SetOperationMode(void *drvHandler, tstrHostIfSetOperationMode *pstrHostIfSetOperationMode)
+static s32 Handle_SetOperationMode(tstrWILC_WFIDrv *drvHandler, tstrHostIfSetOperationMode *pstrHostIfSetOperationMode)
{
s32 s32Error = WILC_SUCCESS;
@@ -700,14 +696,13 @@ static s32 Handle_SetOperationMode(void *drvHandler, tstrHostIfSetOperationMode
strWID.s32ValueSize = sizeof(u32);
/*Sending Cfg*/
- PRINT_INFO(HOSTINF_DBG, "pstrWFIDrv= %p \n", pstrWFIDrv);
+ PRINT_INFO(HOSTINF_DBG, "pstrWFIDrv= %p\n", pstrWFIDrv);
s32Error = SendConfigPkt(SET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
- if ((pstrHostIfSetOperationMode->u32Mode) == (u32)NULL) {
+ if ((pstrHostIfSetOperationMode->u32Mode) == (u32)NULL)
up(&hSemDeinitDrvHandle);
- }
if (s32Error) {
@@ -731,7 +726,7 @@ static s32 Handle_SetOperationMode(void *drvHandler, tstrHostIfSetOperationMode
* @date
* @version 1.0
*/
-s32 Handle_set_IPAddress(void *drvHandler, u8 *pu8IPAddr, u8 idx)
+s32 Handle_set_IPAddress(tstrWILC_WFIDrv *drvHandler, u8 *pu8IPAddr, u8 idx)
{
s32 s32Error = WILC_SUCCESS;
@@ -742,9 +737,9 @@ s32 Handle_set_IPAddress(void *drvHandler, u8 *pu8IPAddr, u8 idx)
if (pu8IPAddr[0] < 192)
pu8IPAddr[0] = 0;
- PRINT_INFO(HOSTINF_DBG, "Indx = %d, Handling set IP = %d.%d.%d.%d \n", idx, pu8IPAddr[0], pu8IPAddr[1], pu8IPAddr[2], pu8IPAddr[3]);
+ PRINT_INFO(HOSTINF_DBG, "Indx = %d, Handling set IP = %pI4\n", idx, pu8IPAddr);
- WILC_memcpy(gs8SetIP[idx], pu8IPAddr, IP_ALEN);
+ memcpy(gs8SetIP[idx], pu8IPAddr, IP_ALEN);
/*prepare configuration packet*/
strWID.u16WIDid = (u16)WID_IP_ADDRESS;
@@ -756,7 +751,7 @@ s32 Handle_set_IPAddress(void *drvHandler, u8 *pu8IPAddr, u8 idx)
- host_int_get_ipaddress((WILC_WFIDrvHandle)drvHandler, firmwareIPAddress, idx);
+ host_int_get_ipaddress(drvHandler, firmwareIPAddress, idx);
if (s32Error) {
PRINT_D(HOSTINF_DBG, "Failed to set IP address\n");
@@ -783,7 +778,7 @@ s32 Handle_set_IPAddress(void *drvHandler, u8 *pu8IPAddr, u8 idx)
* @date
* @version 1.0
*/
-s32 Handle_get_IPAddress(void *drvHandler, u8 *pu8IPAddr, u8 idx)
+s32 Handle_get_IPAddress(tstrWILC_WFIDrv *drvHandler, u8 *pu8IPAddr, u8 idx)
{
s32 s32Error = WILC_SUCCESS;
@@ -793,27 +788,27 @@ s32 Handle_get_IPAddress(void *drvHandler, u8 *pu8IPAddr, u8 idx)
/*prepare configuration packet*/
strWID.u16WIDid = (u16)WID_IP_ADDRESS;
strWID.enuWIDtype = WID_STR;
- strWID.ps8WidVal = (u8 *)WILC_MALLOC(IP_ALEN);
+ strWID.ps8WidVal = WILC_MALLOC(IP_ALEN);
strWID.s32ValueSize = IP_ALEN;
s32Error = SendConfigPkt(GET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
- PRINT_INFO(HOSTINF_DBG, "%d.%d.%d.%d\n", (u8)(strWID.ps8WidVal[0]), (u8)(strWID.ps8WidVal[1]), (u8)(strWID.ps8WidVal[2]), (u8)(strWID.ps8WidVal[3]));
+ PRINT_INFO(HOSTINF_DBG, "%pI4\n", strWID.ps8WidVal);
- WILC_memcpy(gs8GetIP[idx], strWID.ps8WidVal, IP_ALEN);
+ memcpy(gs8GetIP[idx], strWID.ps8WidVal, IP_ALEN);
/*get the value by searching the local copy*/
- WILC_FREE(strWID.ps8WidVal);
+ kfree(strWID.ps8WidVal);
- if (WILC_memcmp(gs8GetIP[idx], gs8SetIP[idx], IP_ALEN) != 0)
- host_int_setup_ipaddress((WILC_WFIDrvHandle)pstrWFIDrv, gs8SetIP[idx], idx);
+ if (memcmp(gs8GetIP[idx], gs8SetIP[idx], IP_ALEN) != 0)
+ host_int_setup_ipaddress(pstrWFIDrv, gs8SetIP[idx], idx);
if (s32Error != WILC_SUCCESS) {
PRINT_ER("Failed to get IP address\n");
WILC_ERRORREPORT(s32Error, WILC_INVALID_STATE);
} else {
- PRINT_INFO(HOSTINF_DBG, "IP address retrieved:: u8IfIdx = %d \n", idx);
- PRINT_INFO(HOSTINF_DBG, "%d.%d.%d.%d\n", gs8GetIP[idx][0], gs8GetIP[idx][1], gs8GetIP[idx][2], gs8GetIP[idx][3]);
+ PRINT_INFO(HOSTINF_DBG, "IP address retrieved:: u8IfIdx = %d\n", idx);
+ PRINT_INFO(HOSTINF_DBG, "%pI4\n", gs8GetIP[idx]);
PRINT_INFO(HOSTINF_DBG, "\n");
}
@@ -836,18 +831,19 @@ s32 Handle_get_IPAddress(void *drvHandler, u8 *pu8IPAddr, u8 idx)
* @date November 2013
* @version 7.0
*/
-static s32 Handle_SetMacAddress(void *drvHandler, tstrHostIfSetMacAddress *pstrHostIfSetMacAddress)
+static s32 Handle_SetMacAddress(tstrWILC_WFIDrv *drvHandler, tstrHostIfSetMacAddress *pstrHostIfSetMacAddress)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
- u8 *mac_buf = (u8 *)WILC_MALLOC(ETH_ALEN);
+ u8 *mac_buf = WILC_MALLOC(ETH_ALEN);
+
if (mac_buf == NULL) {
PRINT_ER("No buffer to send mac address\n");
return WILC_FAIL;
}
- WILC_memcpy(mac_buf, pstrHostIfSetMacAddress->u8MacAddress, ETH_ALEN);
+ memcpy(mac_buf, pstrHostIfSetMacAddress->u8MacAddress, ETH_ALEN);
/*prepare configuration packet*/
strWID.u16WIDid = (u16)WID_MAC_ADDR;
@@ -866,7 +862,7 @@ static s32 Handle_SetMacAddress(void *drvHandler, tstrHostIfSetMacAddress *pstrH
{
}
- WILC_FREE(mac_buf);
+ kfree(mac_buf);
return s32Error;
}
@@ -881,7 +877,7 @@ static s32 Handle_SetMacAddress(void *drvHandler, tstrHostIfSetMacAddress *pstrH
* @date JAN 2013
* @version 8.0
*/
-static s32 Handle_GetMacAddress(void *drvHandler, tstrHostIfGetMacAddress *pstrHostIfGetMacAddress)
+static s32 Handle_GetMacAddress(tstrWILC_WFIDrv *drvHandler, tstrHostIfGetMacAddress *pstrHostIfGetMacAddress)
{
s32 s32Error = WILC_SUCCESS;
@@ -918,7 +914,7 @@ static s32 Handle_GetMacAddress(void *drvHandler, tstrHostIfGetMacAddress *pstrH
* @date
* @version 1.0
*/
-static s32 Handle_CfgParam(void *drvHandler, tstrHostIFCfgParamAttr *strHostIFCfgParamAttr)
+static s32 Handle_CfgParam(tstrWILC_WFIDrv *drvHandler, tstrHostIFCfgParamAttr *strHostIFCfgParamAttr)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWIDList[32];
@@ -1204,10 +1200,9 @@ static s32 Handle_CfgParam(void *drvHandler, tstrHostIFCfgParamAttr *strHostIFCf
}
s32Error = SendConfigPkt(SET_CFG, strWIDList, u8WidCnt, false, (u32)pstrWFIDrv);
- if (s32Error) {
+ if (s32Error)
PRINT_ER("Error in setting CFG params\n");
- }
WILC_CATCH(s32Error)
{
}
@@ -1228,6 +1223,7 @@ static s32 Handle_CfgParam(void *drvHandler, tstrHostIFCfgParamAttr *strHostIFCf
static s32 Handle_wait_msg_q_empty(void)
{
s32 s32Error = WILC_SUCCESS;
+
g_wilc_initialized = 0;
up(&hWaitResponse);
return s32Error;
@@ -1242,7 +1238,7 @@ static s32 Handle_wait_msg_q_empty(void)
* @date
* @version 1.0
*/
-static s32 Handle_Scan(void *drvHandler, tstrHostIFscanAttr *pstrHostIFscanAttr)
+static s32 Handle_Scan(tstrWILC_WFIDrv *drvHandler, tstrHostIFscanAttr *pstrHostIFscanAttr)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWIDList[5];
@@ -1254,7 +1250,7 @@ static s32 Handle_Scan(void *drvHandler, tstrHostIFscanAttr *pstrHostIFscanAttr)
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *) drvHandler;
PRINT_D(HOSTINF_DBG, "Setting SCAN params\n");
- PRINT_D(HOSTINF_DBG, "Scanning: In [%d] state \n", pstrWFIDrv->enuHostIFstate);
+ PRINT_D(HOSTINF_DBG, "Scanning: In [%d] state\n", pstrWFIDrv->enuHostIFstate);
pstrWFIDrv->strWILC_UsrScanReq.pfUserScanResult = pstrHostIFscanAttr->pfScanResult;
pstrWFIDrv->strWILC_UsrScanReq.u32UserScanPvoid = pstrHostIFscanAttr->pvUserArg;
@@ -1284,9 +1280,8 @@ static s32 Handle_Scan(void *drvHandler, tstrHostIFscanAttr *pstrHostIFscanAttr)
strWIDList[u32WidsCount].u16WIDid = (u16)WID_SSID_PROBE_REQ;
strWIDList[u32WidsCount].enuWIDtype = WID_STR;
- for (i = 0; i < pstrHostIFscanAttr->strHiddenNetwork.u8ssidnum; i++) {
+ for (i = 0; i < pstrHostIFscanAttr->strHiddenNetwork.u8ssidnum; i++)
valuesize += ((pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo[i].u8ssidlen) + 1);
- }
pu8HdnNtwrksWidVal = WILC_MALLOC(valuesize + 1);
strWIDList[u32WidsCount].ps8WidVal = pu8HdnNtwrksWidVal;
if (strWIDList[u32WidsCount].ps8WidVal != NULL) {
@@ -1298,7 +1293,7 @@ static s32 Handle_Scan(void *drvHandler, tstrHostIFscanAttr *pstrHostIFscanAttr)
for (i = 0; i < pstrHostIFscanAttr->strHiddenNetwork.u8ssidnum; i++) {
*pu8Buffer++ = pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo[i].u8ssidlen;
- WILC_memcpy(pu8Buffer, pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo[i].pu8ssid, pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo[i].u8ssidlen);
+ memcpy(pu8Buffer, pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo[i].pu8ssid, pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo[i].u8ssidlen);
pu8Buffer += pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo[i].u8ssidlen;
}
@@ -1336,9 +1331,8 @@ static s32 Handle_Scan(void *drvHandler, tstrHostIFscanAttr *pstrHostIFscanAttr)
int i;
for (i = 0; i < pstrHostIFscanAttr->u8ChnlListLen; i++) {
- if (pstrHostIFscanAttr->pu8ChnlFreqList[i] > 0) {
+ if (pstrHostIFscanAttr->pu8ChnlFreqList[i] > 0)
pstrHostIFscanAttr->pu8ChnlFreqList[i] = pstrHostIFscanAttr->pu8ChnlFreqList[i] - 1;
- }
}
}
@@ -1356,11 +1350,10 @@ static s32 Handle_Scan(void *drvHandler, tstrHostIFscanAttr *pstrHostIFscanAttr)
/*keep the state as is , no need to change it*/
/* gWFiDrvHandle->enuHostIFstate = HOST_IF_SCANNING; */
- if (pstrWFIDrv->enuHostIFstate == HOST_IF_CONNECTED) {
+ if (pstrWFIDrv->enuHostIFstate == HOST_IF_CONNECTED)
gbScanWhileConnected = true;
- } else if (pstrWFIDrv->enuHostIFstate == HOST_IF_IDLE) {
+ else if (pstrWFIDrv->enuHostIFstate == HOST_IF_IDLE)
gbScanWhileConnected = false;
- }
s32Error = SendConfigPkt(SET_CFG, strWIDList, u32WidsCount, false, (u32)pstrWFIDrv);
@@ -1373,36 +1366,35 @@ static s32 Handle_Scan(void *drvHandler, tstrHostIFscanAttr *pstrHostIFscanAttr)
WILC_CATCH(s32Error)
{
- WILC_TimerStop(&(pstrWFIDrv->hScanTimer), NULL);
+ del_timer(&pstrWFIDrv->hScanTimer);
/*if there is an ongoing scan request*/
Handle_ScanDone(drvHandler, SCAN_EVENT_ABORTED);
}
/* Deallocate pstrHostIFscanAttr->u8ChnlListLen which was prevoisuly allocated by the sending thread */
if (pstrHostIFscanAttr->pu8ChnlFreqList != NULL) {
- WILC_FREE(pstrHostIFscanAttr->pu8ChnlFreqList);
+ kfree(pstrHostIFscanAttr->pu8ChnlFreqList);
pstrHostIFscanAttr->pu8ChnlFreqList = NULL;
}
/* Deallocate pstrHostIFscanAttr->pu8IEs which was previously allocated by the sending thread */
if (pstrHostIFscanAttr->pu8IEs != NULL) {
- WILC_FREE(pstrHostIFscanAttr->pu8IEs);
+ kfree(pstrHostIFscanAttr->pu8IEs);
pstrHostIFscanAttr->pu8IEs = NULL;
}
if (pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo != NULL) {
- WILC_FREE(pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo);
+ kfree(pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo);
pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo = NULL;
}
/* Deallocate pstrHostIFscanAttr->u8ChnlListLen which was prevoisuly allocated by the sending thread */
if (pstrHostIFscanAttr->pu8ChnlFreqList != NULL) {
- WILC_FREE(pstrHostIFscanAttr->pu8ChnlFreqList);
+ kfree(pstrHostIFscanAttr->pu8ChnlFreqList);
pstrHostIFscanAttr->pu8ChnlFreqList = NULL;
}
- if (pu8HdnNtwrksWidVal != NULL) {
- WILC_FREE(pu8HdnNtwrksWidVal);
- }
+ if (pu8HdnNtwrksWidVal != NULL)
+ kfree(pu8HdnNtwrksWidVal);
return s32Error;
}
@@ -1416,7 +1408,7 @@ static s32 Handle_Scan(void *drvHandler, tstrHostIFscanAttr *pstrHostIFscanAttr)
* @date
* @version 1.0
*/
-static s32 Handle_ScanDone(void *drvHandler, tenuScanEvent enuEvent)
+static s32 Handle_ScanDone(tstrWILC_WFIDrv *drvHandler, tenuScanEvent enuEvent)
{
s32 s32Error = WILC_SUCCESS;
@@ -1476,7 +1468,7 @@ static s32 Handle_ScanDone(void *drvHandler, tenuScanEvent enuEvent)
* @version 1.0
*/
u8 u8ConnectedSSID[6] = {0};
-static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFconnectAttr)
+static s32 Handle_Connect(tstrWILC_WFIDrv *drvHandler, tstrHostIFconnectAttr *pstrHostIFconnectAttr)
{
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *) drvHandler;
s32 s32Error = WILC_SUCCESS;
@@ -1500,12 +1492,12 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
PRINT_D(GENERIC_DBG, "Handling connect request\n");
#ifndef CONNECT_DIRECT
- WILC_memset(gapu8RcvdSurveyResults[0], 0, MAX_SURVEY_RESULT_FRAG_SIZE);
- WILC_memset(gapu8RcvdSurveyResults[1], 0, MAX_SURVEY_RESULT_FRAG_SIZE);
+ memset(gapu8RcvdSurveyResults[0], 0, MAX_SURVEY_RESULT_FRAG_SIZE);
+ memset(gapu8RcvdSurveyResults[1], 0, MAX_SURVEY_RESULT_FRAG_SIZE);
PRINT_D(HOSTINF_DBG, "Getting site survey results\n");
- s32Err = host_int_get_site_survey_results((WILC_WFIDrvHandle)pstrWFIDrv,
+ s32Err = host_int_get_site_survey_results(pstrWFIDrv,
gapu8RcvdSurveyResults,
MAX_SURVEY_RESULT_FRAG_SIZE);
if (s32Err) {
@@ -1521,19 +1513,19 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
/* use the parsed info in pstrSurveyResults, then deallocate it */
PRINT_D(HOSTINF_DBG, "Copying site survey results in global structure, then deallocate\n");
for (i = 0; i < pstrWFIDrv->u32SurveyResultsCount; i++) {
- WILC_memcpy(&pstrWFIDrv->astrSurveyResults[i], &pstrSurveyResults[i],
+ memcpy(&pstrWFIDrv->astrSurveyResults[i], &pstrSurveyResults[i],
sizeof(wid_site_survey_reslts_s));
}
DeallocateSurveyResults(pstrSurveyResults);
} else {
WILC_ERRORREPORT(s32Error, WILC_FAIL);
- PRINT_ER("ParseSurveyResults() Error(%d) \n", s32Err);
+ PRINT_ER("ParseSurveyResults() Error(%d)\n", s32Err);
}
for (i = 0; i < pstrWFIDrv->u32SurveyResultsCount; i++) {
- if (WILC_memcmp(pstrWFIDrv->astrSurveyResults[i].SSID,
+ if (memcmp(pstrWFIDrv->astrSurveyResults[i].SSID,
pstrHostIFconnectAttr->pu8ssid,
pstrHostIFconnectAttr->ssidLen) == 0) {
PRINT_INFO(HOSTINF_DBG, "Network with required SSID is found %s\n", pstrHostIFconnectAttr->pu8ssid);
@@ -1546,7 +1538,7 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
/* BSSID is also passed from the user, so decision of matching
* should consider also this passed BSSID */
- if (WILC_memcmp(pstrWFIDrv->astrSurveyResults[i].BSSID,
+ if (memcmp(pstrWFIDrv->astrSurveyResults[i].BSSID,
pstrHostIFconnectAttr->pu8bssid,
6) == 0) {
PRINT_INFO(HOSTINF_DBG, "BSSID is passed from the user and matched\n");
@@ -1559,29 +1551,29 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
if (i < pstrWFIDrv->u32SurveyResultsCount) {
u8bssDscListIndex = i;
- PRINT_INFO(HOSTINF_DBG, "Connecting to network of Bss Idx %d and SSID %s and channel %d \n",
+ PRINT_INFO(HOSTINF_DBG, "Connecting to network of Bss Idx%d and SSID %s and channel%d\n",
u8bssDscListIndex, pstrWFIDrv->astrSurveyResults[u8bssDscListIndex].SSID,
pstrWFIDrv->astrSurveyResults[u8bssDscListIndex].Channel);
PRINT_INFO(HOSTINF_DBG, "Saving connection parameters in global structure\n");
if (pstrHostIFconnectAttr->pu8bssid != NULL) {
- pstrWFIDrv->strWILC_UsrConnReq.pu8bssid = (u8 *)WILC_MALLOC(6);
- WILC_memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid, pstrHostIFconnectAttr->pu8bssid, 6);
+ pstrWFIDrv->strWILC_UsrConnReq.pu8bssid = WILC_MALLOC(6);
+ memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid, pstrHostIFconnectAttr->pu8bssid, 6);
}
pstrWFIDrv->strWILC_UsrConnReq.ssidLen = pstrHostIFconnectAttr->ssidLen;
if (pstrHostIFconnectAttr->pu8ssid != NULL) {
- pstrWFIDrv->strWILC_UsrConnReq.pu8ssid = (u8 *)WILC_MALLOC(pstrHostIFconnectAttr->ssidLen + 1);
- WILC_memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid, pstrHostIFconnectAttr->pu8ssid,
+ pstrWFIDrv->strWILC_UsrConnReq.pu8ssid = WILC_MALLOC(pstrHostIFconnectAttr->ssidLen + 1);
+ memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid, pstrHostIFconnectAttr->pu8ssid,
pstrHostIFconnectAttr->ssidLen);
pstrWFIDrv->strWILC_UsrConnReq.pu8ssid[pstrHostIFconnectAttr->ssidLen] = '\0';
}
pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen = pstrHostIFconnectAttr->IEsLen;
if (pstrHostIFconnectAttr->pu8IEs != NULL) {
- pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs = (u8 *)WILC_MALLOC(pstrHostIFconnectAttr->IEsLen);
- WILC_memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs, pstrHostIFconnectAttr->pu8IEs,
+ pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs = WILC_MALLOC(pstrHostIFconnectAttr->IEsLen);
+ memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs, pstrHostIFconnectAttr->pu8IEs,
pstrHostIFconnectAttr->IEsLen);
}
@@ -1630,14 +1622,12 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
strWIDList[u32WidsCount].ps8WidVal = (s8 *)&u8bssDscListIndex;
u32WidsCount++;
- #ifndef SIMULATION
/* A temporary workaround to avoid handling the misleading MAC_DISCONNECTED raised from the
* firmware at chip reset when processing the WIDs of the Connect Request.
* (This workaround should be removed in the future when the Chip reset of the Connect WIDs is disabled) */
/* ////////////////////// */
gu32WidConnRstHack = 0;
/* ////////////////////// */
- #endif
s32Error = SendConfigPkt(SET_CFG, strWIDList, u32WidsCount, false, (u32)pstrWFIDrv);
if (s32Error) {
@@ -1656,7 +1646,7 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
/* if we try to connect to an already connected AP then discard the request */
- if (WILC_memcmp(pstrHostIFconnectAttr->pu8bssid, u8ConnectedSSID, ETH_ALEN) == 0) {
+ if (memcmp(pstrHostIFconnectAttr->pu8bssid, u8ConnectedSSID, ETH_ALEN) == 0) {
s32Error = WILC_SUCCESS;
PRINT_ER("Trying to connect to an already connected AP, Discard connect request\n");
@@ -1675,22 +1665,22 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
#endif /*WILC_PARSE_SCAN_IN_HOST*/
if (pstrHostIFconnectAttr->pu8bssid != NULL) {
- pstrWFIDrv->strWILC_UsrConnReq.pu8bssid = (u8 *)WILC_MALLOC(6);
- WILC_memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid, pstrHostIFconnectAttr->pu8bssid, 6);
+ pstrWFIDrv->strWILC_UsrConnReq.pu8bssid = WILC_MALLOC(6);
+ memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid, pstrHostIFconnectAttr->pu8bssid, 6);
}
pstrWFIDrv->strWILC_UsrConnReq.ssidLen = pstrHostIFconnectAttr->ssidLen;
if (pstrHostIFconnectAttr->pu8ssid != NULL) {
- pstrWFIDrv->strWILC_UsrConnReq.pu8ssid = (u8 *)WILC_MALLOC(pstrHostIFconnectAttr->ssidLen + 1);
- WILC_memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid, pstrHostIFconnectAttr->pu8ssid,
+ pstrWFIDrv->strWILC_UsrConnReq.pu8ssid = WILC_MALLOC(pstrHostIFconnectAttr->ssidLen + 1);
+ memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid, pstrHostIFconnectAttr->pu8ssid,
pstrHostIFconnectAttr->ssidLen);
pstrWFIDrv->strWILC_UsrConnReq.pu8ssid[pstrHostIFconnectAttr->ssidLen] = '\0';
}
pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen = pstrHostIFconnectAttr->IEsLen;
if (pstrHostIFconnectAttr->pu8IEs != NULL) {
- pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs = (u8 *)WILC_MALLOC(pstrHostIFconnectAttr->IEsLen);
- WILC_memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs, pstrHostIFconnectAttr->pu8IEs,
+ pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs = WILC_MALLOC(pstrHostIFconnectAttr->IEsLen);
+ memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs, pstrHostIFconnectAttr->pu8IEs,
pstrHostIFconnectAttr->IEsLen);
}
@@ -1728,7 +1718,7 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
u32WidsCount++;
/*BugID_5137*/
- if (WILC_memcmp("DIRECT-", pstrHostIFconnectAttr->pu8ssid, 7)) {
+ if (memcmp("DIRECT-", pstrHostIFconnectAttr->pu8ssid, 7)) {
gu32FlushedInfoElemAsocSize = pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen;
gu8FlushedInfoElemAsoc = WILC_MALLOC(gu32FlushedInfoElemAsocSize);
@@ -1743,7 +1733,7 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
u32WidsCount++;
/*BugID_5137*/
- if (WILC_memcmp("DIRECT-", pstrHostIFconnectAttr->pu8ssid, 7))
+ if (memcmp("DIRECT-", pstrHostIFconnectAttr->pu8ssid, 7))
gu8Flushed11iMode = pstrWFIDrv->strWILC_UsrConnReq.u8security;
PRINT_INFO(HOSTINF_DBG, "Encrypt Mode = %x\n", pstrWFIDrv->strWILC_UsrConnReq.u8security);
@@ -1756,7 +1746,7 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
u32WidsCount++;
/*BugID_5137*/
- if (WILC_memcmp("DIRECT-", pstrHostIFconnectAttr->pu8ssid, 7))
+ if (memcmp("DIRECT-", pstrHostIFconnectAttr->pu8ssid, 7))
gu8FlushedAuthType = (u8)pstrWFIDrv->strWILC_UsrConnReq.tenuAuth_type;
PRINT_INFO(HOSTINF_DBG, "Authentication Type = %x\n", pstrWFIDrv->strWILC_UsrConnReq.tenuAuth_type);
@@ -1778,14 +1768,13 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
strWIDList[u32WidsCount].s32ValueSize = MAX_SSID_LEN + 7;
strWIDList[u32WidsCount].ps8WidVal = WILC_MALLOC(strWIDList[u32WidsCount].s32ValueSize);
- if (strWIDList[u32WidsCount].ps8WidVal == NULL) {
+ if (strWIDList[u32WidsCount].ps8WidVal == NULL)
WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
- }
pu8CurrByte = strWIDList[u32WidsCount].ps8WidVal;
if (pstrHostIFconnectAttr->pu8ssid != NULL) {
- WILC_memcpy(pu8CurrByte, pstrHostIFconnectAttr->pu8ssid, pstrHostIFconnectAttr->ssidLen);
+ memcpy(pu8CurrByte, pstrHostIFconnectAttr->pu8ssid, pstrHostIFconnectAttr->ssidLen);
pu8CurrByte[pstrHostIFconnectAttr->ssidLen] = '\0';
}
pu8CurrByte += MAX_SSID_LEN;
@@ -1795,9 +1784,8 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
PRINT_ER("Channel out of range\n");
*(pu8CurrByte++) = 0xFF;
}
- if (pstrHostIFconnectAttr->pu8bssid != NULL) {
- WILC_memcpy(pu8CurrByte, pstrHostIFconnectAttr->pu8bssid, 6);
- }
+ if (pstrHostIFconnectAttr->pu8bssid != NULL)
+ memcpy(pu8CurrByte, pstrHostIFconnectAttr->pu8bssid, 6);
pu8CurrByte += 6;
/* keep the buffer at the start of the allocated pointer to use it with the free*/
@@ -1813,19 +1801,18 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
strWIDList[u32WidsCount].ps8WidVal = WILC_MALLOC(strWIDList[u32WidsCount].s32ValueSize);
/*BugID_5137*/
- if (WILC_memcmp("DIRECT-", pstrHostIFconnectAttr->pu8ssid, 7)) {
+ if (memcmp("DIRECT-", pstrHostIFconnectAttr->pu8ssid, 7)) {
gu32FlushedJoinReqSize = strWIDList[u32WidsCount].s32ValueSize;
gu8FlushedJoinReq = WILC_MALLOC(gu32FlushedJoinReqSize);
}
- if (strWIDList[u32WidsCount].ps8WidVal == NULL) {
+ if (strWIDList[u32WidsCount].ps8WidVal == NULL)
WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
- }
pu8CurrByte = strWIDList[u32WidsCount].ps8WidVal;
if (pstrHostIFconnectAttr->pu8ssid != NULL) {
- WILC_memcpy(pu8CurrByte, pstrHostIFconnectAttr->pu8ssid, pstrHostIFconnectAttr->ssidLen);
+ memcpy(pu8CurrByte, pstrHostIFconnectAttr->pu8ssid, pstrHostIFconnectAttr->ssidLen);
pu8CurrByte[pstrHostIFconnectAttr->ssidLen] = '\0';
}
pu8CurrByte += MAX_SSID_LEN;
@@ -1845,15 +1832,13 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
PRINT_D(HOSTINF_DBG, "* Cap Info %0x*\n", (*(pu8CurrByte - 2) | ((*(pu8CurrByte - 1)) << 8)));
/* sa*/
- if (pstrHostIFconnectAttr->pu8bssid != NULL) {
- WILC_memcpy(pu8CurrByte, pstrHostIFconnectAttr->pu8bssid, 6);
- }
+ if (pstrHostIFconnectAttr->pu8bssid != NULL)
+ memcpy(pu8CurrByte, pstrHostIFconnectAttr->pu8bssid, 6);
pu8CurrByte += 6;
/* bssid*/
- if (pstrHostIFconnectAttr->pu8bssid != NULL) {
- WILC_memcpy(pu8CurrByte, pstrHostIFconnectAttr->pu8bssid, 6);
- }
+ if (pstrHostIFconnectAttr->pu8bssid != NULL)
+ memcpy(pu8CurrByte, pstrHostIFconnectAttr->pu8bssid, 6);
pu8CurrByte += 6;
/* Beacon Period*/
@@ -1864,7 +1849,7 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
*(pu8CurrByte++) = ptstrJoinBssParam->dtim_period;
PRINT_D(HOSTINF_DBG, "* DTIM Period %d*\n", (*(pu8CurrByte - 1)));
/* Supported rates*/
- WILC_memcpy(pu8CurrByte, ptstrJoinBssParam->supp_rates, MAX_RATES_SUPPORTED + 1);
+ memcpy(pu8CurrByte, ptstrJoinBssParam->supp_rates, MAX_RATES_SUPPORTED + 1);
pu8CurrByte += (MAX_RATES_SUPPORTED + 1);
/* wmm cap*/
@@ -1888,15 +1873,15 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
*(pu8CurrByte++) = ptstrJoinBssParam->mode_802_11i;
PRINT_D(HOSTINF_DBG, "* mode_802_11i %d*\n", (*(pu8CurrByte - 1)));
/* rsn pcip policy*/
- WILC_memcpy(pu8CurrByte, ptstrJoinBssParam->rsn_pcip_policy, sizeof(ptstrJoinBssParam->rsn_pcip_policy));
+ memcpy(pu8CurrByte, ptstrJoinBssParam->rsn_pcip_policy, sizeof(ptstrJoinBssParam->rsn_pcip_policy));
pu8CurrByte += sizeof(ptstrJoinBssParam->rsn_pcip_policy);
/* rsn auth policy*/
- WILC_memcpy(pu8CurrByte, ptstrJoinBssParam->rsn_auth_policy, sizeof(ptstrJoinBssParam->rsn_auth_policy));
+ memcpy(pu8CurrByte, ptstrJoinBssParam->rsn_auth_policy, sizeof(ptstrJoinBssParam->rsn_auth_policy));
pu8CurrByte += sizeof(ptstrJoinBssParam->rsn_auth_policy);
/* rsn auth policy*/
- WILC_memcpy(pu8CurrByte, ptstrJoinBssParam->rsn_cap, sizeof(ptstrJoinBssParam->rsn_cap));
+ memcpy(pu8CurrByte, ptstrJoinBssParam->rsn_cap, sizeof(ptstrJoinBssParam->rsn_cap));
pu8CurrByte += sizeof(ptstrJoinBssParam->rsn_cap);
/*BugID_5137*/
@@ -1921,15 +1906,15 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
*(pu8CurrByte++) = ptstrJoinBssParam->u8Count;
- WILC_memcpy(pu8CurrByte, ptstrJoinBssParam->au8Duration, sizeof(ptstrJoinBssParam->au8Duration));
+ memcpy(pu8CurrByte, ptstrJoinBssParam->au8Duration, sizeof(ptstrJoinBssParam->au8Duration));
pu8CurrByte += sizeof(ptstrJoinBssParam->au8Duration);
- WILC_memcpy(pu8CurrByte, ptstrJoinBssParam->au8Interval, sizeof(ptstrJoinBssParam->au8Interval));
+ memcpy(pu8CurrByte, ptstrJoinBssParam->au8Interval, sizeof(ptstrJoinBssParam->au8Interval));
pu8CurrByte += sizeof(ptstrJoinBssParam->au8Interval);
- WILC_memcpy(pu8CurrByte, ptstrJoinBssParam->au8StartTime, sizeof(ptstrJoinBssParam->au8StartTime));
+ memcpy(pu8CurrByte, ptstrJoinBssParam->au8StartTime, sizeof(ptstrJoinBssParam->au8StartTime));
pu8CurrByte += sizeof(ptstrJoinBssParam->au8StartTime);
@@ -1945,17 +1930,15 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
#endif /* #ifdef WILC_PARSE_SCAN_IN_HOST*/
u32WidsCount++;
- #ifndef SIMULATION
/* A temporary workaround to avoid handling the misleading MAC_DISCONNECTED raised from the
* firmware at chip reset when processing the WIDs of the Connect Request.
* (This workaround should be removed in the future when the Chip reset of the Connect WIDs is disabled) */
/* ////////////////////// */
gu32WidConnRstHack = 0;
/* ////////////////////// */
- #endif
/*BugID_5137*/
- if (WILC_memcmp("DIRECT-", pstrHostIFconnectAttr->pu8ssid, 7)) {
+ if (memcmp("DIRECT-", pstrHostIFconnectAttr->pu8ssid, 7)) {
memcpy(gu8FlushedJoinReq, pu8CurrByte, gu32FlushedJoinReqSize);
gu8FlushedJoinReqDrvHandler = (u32)pstrWFIDrv;
}
@@ -1963,7 +1946,7 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
PRINT_D(GENERIC_DBG, "send HOST_IF_WAITING_CONN_RESP\n");
if (pstrHostIFconnectAttr->pu8bssid != NULL) {
- WILC_memcpy(u8ConnectedSSID, pstrHostIFconnectAttr->pu8bssid, ETH_ALEN);
+ memcpy(u8ConnectedSSID, pstrHostIFconnectAttr->pu8bssid, ETH_ALEN);
PRINT_D(GENERIC_DBG, "save Bssid = %x:%x:%x:%x:%x:%x\n", (pstrHostIFconnectAttr->pu8bssid[0]), (pstrHostIFconnectAttr->pu8bssid[1]), (pstrHostIFconnectAttr->pu8bssid[2]), (pstrHostIFconnectAttr->pu8bssid[3]), (pstrHostIFconnectAttr->pu8bssid[4]), (pstrHostIFconnectAttr->pu8bssid[5]));
PRINT_D(GENERIC_DBG, "save bssid = %x:%x:%x:%x:%x:%x\n", (u8ConnectedSSID[0]), (u8ConnectedSSID[1]), (u8ConnectedSSID[2]), (u8ConnectedSSID[3]), (u8ConnectedSSID[4]), (u8ConnectedSSID[5]));
@@ -1983,21 +1966,20 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
{
tstrConnectInfo strConnectInfo;
- WILC_TimerStop(&(pstrWFIDrv->hConnectTimer), NULL);
+ del_timer(&pstrWFIDrv->hConnectTimer);
PRINT_D(HOSTINF_DBG, "could not start connecting to the required network\n");
- WILC_memset(&strConnectInfo, 0, sizeof(tstrConnectInfo));
+ memset(&strConnectInfo, 0, sizeof(tstrConnectInfo));
if (pstrHostIFconnectAttr->pfConnectResult != NULL) {
- if (pstrHostIFconnectAttr->pu8bssid != NULL) {
- WILC_memcpy(strConnectInfo.au8bssid, pstrHostIFconnectAttr->pu8bssid, 6);
- }
+ if (pstrHostIFconnectAttr->pu8bssid != NULL)
+ memcpy(strConnectInfo.au8bssid, pstrHostIFconnectAttr->pu8bssid, 6);
if (pstrHostIFconnectAttr->pu8IEs != NULL) {
strConnectInfo.ReqIEsLen = pstrHostIFconnectAttr->IEsLen;
- strConnectInfo.pu8ReqIEs = (u8 *)WILC_MALLOC(pstrHostIFconnectAttr->IEsLen);
- WILC_memcpy(strConnectInfo.pu8ReqIEs,
+ strConnectInfo.pu8ReqIEs = WILC_MALLOC(pstrHostIFconnectAttr->IEsLen);
+ memcpy(strConnectInfo.pu8ReqIEs,
pstrHostIFconnectAttr->pu8IEs,
pstrHostIFconnectAttr->IEsLen);
}
@@ -2011,37 +1993,36 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
pstrWFIDrv->enuHostIFstate = HOST_IF_IDLE;
/* Deallocation */
if (strConnectInfo.pu8ReqIEs != NULL) {
- WILC_FREE(strConnectInfo.pu8ReqIEs);
+ kfree(strConnectInfo.pu8ReqIEs);
strConnectInfo.pu8ReqIEs = NULL;
}
} else {
- PRINT_ER("Connect callback function pointer is NULL \n");
+ PRINT_ER("Connect callback function pointer is NULL\n");
}
}
PRINT_D(HOSTINF_DBG, "Deallocating connection parameters\n");
/* Deallocate pstrHostIFconnectAttr->pu8bssid which was prevoisuly allocated by the sending thread */
if (pstrHostIFconnectAttr->pu8bssid != NULL) {
- WILC_FREE(pstrHostIFconnectAttr->pu8bssid);
+ kfree(pstrHostIFconnectAttr->pu8bssid);
pstrHostIFconnectAttr->pu8bssid = NULL;
}
/* Deallocate pstrHostIFconnectAttr->pu8ssid which was prevoisuly allocated by the sending thread */
if (pstrHostIFconnectAttr->pu8ssid != NULL) {
- WILC_FREE(pstrHostIFconnectAttr->pu8ssid);
+ kfree(pstrHostIFconnectAttr->pu8ssid);
pstrHostIFconnectAttr->pu8ssid = NULL;
}
/* Deallocate pstrHostIFconnectAttr->pu8IEs which was prevoisuly allocated by the sending thread */
if (pstrHostIFconnectAttr->pu8IEs != NULL) {
- WILC_FREE(pstrHostIFconnectAttr->pu8IEs);
+ kfree(pstrHostIFconnectAttr->pu8IEs);
pstrHostIFconnectAttr->pu8IEs = NULL;
}
- if (pu8CurrByte != NULL) {
- WILC_FREE(pu8CurrByte);
- }
+ if (pu8CurrByte != NULL)
+ kfree(pu8CurrByte);
return s32Error;
}
@@ -2056,7 +2037,7 @@ static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFcon
* @version 8.0
*/
-static s32 Handle_FlushConnect(void *drvHandler)
+static s32 Handle_FlushConnect(tstrWILC_WFIDrv *drvHandler)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWIDList[5];
@@ -2123,7 +2104,7 @@ static s32 Handle_FlushConnect(void *drvHandler)
* @date
* @version 1.0
*/
-static s32 Handle_ConnectTimeout(void *drvHandler)
+static s32 Handle_ConnectTimeout(tstrWILC_WFIDrv *drvHandler)
{
s32 s32Error = WILC_SUCCESS;
tstrConnectInfo strConnectInfo;
@@ -2141,7 +2122,7 @@ static s32 Handle_ConnectTimeout(void *drvHandler)
gbScanWhileConnected = false;
- WILC_memset(&strConnectInfo, 0, sizeof(tstrConnectInfo));
+ memset(&strConnectInfo, 0, sizeof(tstrConnectInfo));
/* First, we will notify the upper layer with the Connection failure {through the Connect Callback function},
@@ -2149,14 +2130,14 @@ static s32 Handle_ConnectTimeout(void *drvHandler)
* WID_DISCONNECT} */
if (pstrWFIDrv->strWILC_UsrConnReq.pfUserConnectResult != NULL) {
if (pstrWFIDrv->strWILC_UsrConnReq.pu8bssid != NULL) {
- WILC_memcpy(strConnectInfo.au8bssid,
+ memcpy(strConnectInfo.au8bssid,
pstrWFIDrv->strWILC_UsrConnReq.pu8bssid, 6);
}
if (pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs != NULL) {
strConnectInfo.ReqIEsLen = pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen;
- strConnectInfo.pu8ReqIEs = (u8 *)WILC_MALLOC(pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen);
- WILC_memcpy(strConnectInfo.pu8ReqIEs,
+ strConnectInfo.pu8ReqIEs = WILC_MALLOC(pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen);
+ memcpy(strConnectInfo.pu8ReqIEs,
pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs,
pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen);
}
@@ -2169,11 +2150,11 @@ static s32 Handle_ConnectTimeout(void *drvHandler)
/* Deallocation of strConnectInfo.pu8ReqIEs */
if (strConnectInfo.pu8ReqIEs != NULL) {
- WILC_FREE(strConnectInfo.pu8ReqIEs);
+ kfree(strConnectInfo.pu8ReqIEs);
strConnectInfo.pu8ReqIEs = NULL;
}
} else {
- PRINT_ER("Connect callback function pointer is NULL \n");
+ PRINT_ER("Connect callback function pointer is NULL\n");
}
/* Here we will notify our firmware also with the Connection failure {through sending to it Cfg packet carrying
@@ -2186,37 +2167,36 @@ static s32 Handle_ConnectTimeout(void *drvHandler)
PRINT_D(HOSTINF_DBG, "Sending disconnect request\n");
s32Error = SendConfigPkt(SET_CFG, &strWID, 1, false, (u32)pstrWFIDrv);
- if (s32Error) {
+ if (s32Error)
PRINT_ER("Failed to send dissconect config packet\n");
- }
/* Deallocation of the Saved Connect Request in the global Handle */
pstrWFIDrv->strWILC_UsrConnReq.ssidLen = 0;
if (pstrWFIDrv->strWILC_UsrConnReq.pu8ssid != NULL) {
- WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid);
+ kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid);
pstrWFIDrv->strWILC_UsrConnReq.pu8ssid = NULL;
}
if (pstrWFIDrv->strWILC_UsrConnReq.pu8bssid != NULL) {
- WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid);
+ kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid);
pstrWFIDrv->strWILC_UsrConnReq.pu8bssid = NULL;
}
pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen = 0;
if (pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs != NULL) {
- WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs);
+ kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs);
pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs = NULL;
}
- WILC_memset(u8ConnectedSSID, 0, ETH_ALEN);
+ memset(u8ConnectedSSID, 0, ETH_ALEN);
/*BugID_5213*/
/*Freeing flushed join request params on connect timeout*/
if (gu8FlushedJoinReq != NULL && gu8FlushedJoinReqDrvHandler == (u32)drvHandler) {
- WILC_FREE(gu8FlushedJoinReq);
+ kfree(gu8FlushedJoinReq);
gu8FlushedJoinReq = NULL;
}
if (gu8FlushedInfoElemAsoc != NULL && gu8FlushedJoinReqDrvHandler == (u32)drvHandler) {
- WILC_FREE(gu8FlushedInfoElemAsoc);
+ kfree(gu8FlushedInfoElemAsoc);
gu8FlushedInfoElemAsoc = NULL;
}
@@ -2232,7 +2212,7 @@ static s32 Handle_ConnectTimeout(void *drvHandler)
* @date
* @version 1.0
*/
-static s32 Handle_RcvdNtwrkInfo(void *drvHandler, tstrRcvdNetworkInfo *pstrRcvdNetworkInfo)
+static s32 Handle_RcvdNtwrkInfo(tstrWILC_WFIDrv *drvHandler, tstrRcvdNetworkInfo *pstrRcvdNetworkInfo)
{
u32 i;
bool bNewNtwrkFound;
@@ -2264,7 +2244,7 @@ static s32 Handle_RcvdNtwrkInfo(void *drvHandler, tstrRcvdNetworkInfo *pstrRcvdN
if ((pstrWFIDrv->strWILC_UsrScanReq.astrFoundNetworkInfo[i].au8bssid != NULL) &&
(pstrNetworkInfo->au8bssid != NULL)) {
- if (WILC_memcmp(pstrWFIDrv->strWILC_UsrScanReq.astrFoundNetworkInfo[i].au8bssid,
+ if (memcmp(pstrWFIDrv->strWILC_UsrScanReq.astrFoundNetworkInfo[i].au8bssid,
pstrNetworkInfo->au8bssid, 6) == 0) {
if (pstrNetworkInfo->s8rssi <= pstrWFIDrv->strWILC_UsrScanReq.astrFoundNetworkInfo[i].s8rssi) {
/*we have already found this network with better rssi, so keep the old cached one and don't
@@ -2294,7 +2274,7 @@ static s32 Handle_RcvdNtwrkInfo(void *drvHandler, tstrRcvdNetworkInfo *pstrRcvdN
if ((pstrWFIDrv->strWILC_UsrScanReq.astrFoundNetworkInfo[pstrWFIDrv->strWILC_UsrScanReq.u32RcvdChCount].au8bssid != NULL)
&& (pstrNetworkInfo->au8bssid != NULL)) {
- WILC_memcpy(pstrWFIDrv->strWILC_UsrScanReq.astrFoundNetworkInfo[pstrWFIDrv->strWILC_UsrScanReq.u32RcvdChCount].au8bssid,
+ memcpy(pstrWFIDrv->strWILC_UsrScanReq.astrFoundNetworkInfo[pstrWFIDrv->strWILC_UsrScanReq.u32RcvdChCount].au8bssid,
pstrNetworkInfo->au8bssid, 6);
pstrWFIDrv->strWILC_UsrScanReq.u32RcvdChCount++;
@@ -2313,7 +2293,7 @@ static s32 Handle_RcvdNtwrkInfo(void *drvHandler, tstrRcvdNetworkInfo *pstrRcvdN
}
} else {
- PRINT_WRN(HOSTINF_DBG, "Discovered networks exceeded max. limit \n");
+ PRINT_WRN(HOSTINF_DBG, "Discovered networks exceeded max. limit\n");
}
} else {
pstrNetworkInfo->bNewNetwork = false;
@@ -2332,7 +2312,7 @@ static s32 Handle_RcvdNtwrkInfo(void *drvHandler, tstrRcvdNetworkInfo *pstrRcvdN
done:
/* Deallocate pstrRcvdNetworkInfo->pu8Buffer which was prevoisuly allocated by the sending thread */
if (pstrRcvdNetworkInfo->pu8Buffer != NULL) {
- WILC_FREE(pstrRcvdNetworkInfo->pu8Buffer);
+ kfree(pstrRcvdNetworkInfo->pu8Buffer);
pstrRcvdNetworkInfo->pu8Buffer = NULL;
}
@@ -2354,7 +2334,7 @@ done:
* @date
* @version 1.0
*/
-static s32 Handle_RcvdGnrlAsyncInfo(void *drvHandler, tstrRcvdGnrlAsyncInfo *pstrRcvdGnrlAsyncInfo)
+static s32 Handle_RcvdGnrlAsyncInfo(tstrWILC_WFIDrv *drvHandler, tstrRcvdGnrlAsyncInfo *pstrRcvdGnrlAsyncInfo)
{
/* TODO: mostafa: till now, this function just handles only the received mac status msg, */
/* which carries only 1 WID which have WID ID = WID_STATUS */
@@ -2371,9 +2351,9 @@ static s32 Handle_RcvdGnrlAsyncInfo(void *drvHandler, tstrRcvdGnrlAsyncInfo *pst
tstrDisconnectNotifInfo strDisconnectNotifInfo;
s32 s32Err = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *) drvHandler;
- if (pstrWFIDrv == NULL) {
+
+ if (pstrWFIDrv == NULL)
PRINT_ER("Driver handler is NULL\n");
- }
PRINT_D(GENERIC_DBG, "Current State = %d,Received state = %d\n", pstrWFIDrv->enuHostIFstate,
pstrRcvdGnrlAsyncInfo->pu8Buffer[7]);
@@ -2417,12 +2397,12 @@ static s32 Handle_RcvdGnrlAsyncInfo(void *drvHandler, tstrRcvdGnrlAsyncInfo *pst
PRINT_D(HOSTINF_DBG, "Recieved MAC status = %d with Reason = %d , Code = %d\n", u8MacStatus, u8MacStatusReasonCode, u8MacStatusAdditionalInfo);
- WILC_memset(&strConnectInfo, 0, sizeof(tstrConnectInfo));
+ memset(&strConnectInfo, 0, sizeof(tstrConnectInfo));
if (u8MacStatus == MAC_CONNECTED) {
- WILC_memset(gapu8RcvdAssocResp, 0, MAX_ASSOC_RESP_FRAME_SIZE);
+ memset(gapu8RcvdAssocResp, 0, MAX_ASSOC_RESP_FRAME_SIZE);
- host_int_get_assoc_res_info((WILC_WFIDrvHandle)pstrWFIDrv,
+ host_int_get_assoc_res_info(pstrWFIDrv,
gapu8RcvdAssocResp,
MAX_ASSOC_RESP_FRAME_SIZE,
&u32RcvdAssocRespInfoLen);
@@ -2435,7 +2415,7 @@ static s32 Handle_RcvdGnrlAsyncInfo(void *drvHandler, tstrRcvdGnrlAsyncInfo *pst
s32Err = ParseAssocRespInfo(gapu8RcvdAssocResp, u32RcvdAssocRespInfoLen,
&pstrConnectRespInfo);
if (s32Err) {
- PRINT_ER("ParseAssocRespInfo() returned error %d \n", s32Err);
+ PRINT_ER("ParseAssocRespInfo() returned error %d\n", s32Err);
} else {
/* use the necessary parsed Info from the Received Association Response */
strConnectInfo.u16ConnectStatus = pstrConnectRespInfo->u16ConnectStatus;
@@ -2446,8 +2426,8 @@ static s32 Handle_RcvdGnrlAsyncInfo(void *drvHandler, tstrRcvdGnrlAsyncInfo *pst
strConnectInfo.u16RespIEsLen = pstrConnectRespInfo->u16RespIEsLen;
- strConnectInfo.pu8RespIEs = (u8 *)WILC_MALLOC(pstrConnectRespInfo->u16RespIEsLen);
- WILC_memcpy(strConnectInfo.pu8RespIEs, pstrConnectRespInfo->pu8RespIEs,
+ strConnectInfo.pu8RespIEs = WILC_MALLOC(pstrConnectRespInfo->u16RespIEsLen);
+ memcpy(strConnectInfo.pu8RespIEs, pstrConnectRespInfo->pu8RespIEs,
pstrConnectRespInfo->u16RespIEsLen);
}
}
@@ -2466,23 +2446,23 @@ static s32 Handle_RcvdGnrlAsyncInfo(void *drvHandler, tstrRcvdGnrlAsyncInfo *pst
* So check first the matching between the received mac status and the received status code in Asoc Resp */
if ((u8MacStatus == MAC_CONNECTED) &&
(strConnectInfo.u16ConnectStatus != SUCCESSFUL_STATUSCODE)) {
- PRINT_ER("Received MAC status is MAC_CONNECTED while the received status code in Asoc Resp is not SUCCESSFUL_STATUSCODE \n");
- WILC_memset(u8ConnectedSSID, 0, ETH_ALEN);
+ PRINT_ER("Received MAC status is MAC_CONNECTED while the received status code in Asoc Resp is not SUCCESSFUL_STATUSCODE\n");
+ memset(u8ConnectedSSID, 0, ETH_ALEN);
} else if (u8MacStatus == MAC_DISCONNECTED) {
PRINT_ER("Received MAC status is MAC_DISCONNECTED\n");
- WILC_memset(u8ConnectedSSID, 0, ETH_ALEN);
+ memset(u8ConnectedSSID, 0, ETH_ALEN);
}
/* TODO: mostafa: correct BSSID should be retrieved from actual BSSID received from AP */
/* through a structure of type tstrConnectRespInfo */
if (pstrWFIDrv->strWILC_UsrConnReq.pu8bssid != NULL) {
PRINT_D(HOSTINF_DBG, "Retrieving actual BSSID from AP\n");
- WILC_memcpy(strConnectInfo.au8bssid, pstrWFIDrv->strWILC_UsrConnReq.pu8bssid, 6);
+ memcpy(strConnectInfo.au8bssid, pstrWFIDrv->strWILC_UsrConnReq.pu8bssid, 6);
if ((u8MacStatus == MAC_CONNECTED) &&
(strConnectInfo.u16ConnectStatus == SUCCESSFUL_STATUSCODE)) {
- WILC_memcpy(pstrWFIDrv->au8AssociatedBSSID,
+ memcpy(pstrWFIDrv->au8AssociatedBSSID,
pstrWFIDrv->strWILC_UsrConnReq.pu8bssid, ETH_ALEN);
}
}
@@ -2490,14 +2470,14 @@ static s32 Handle_RcvdGnrlAsyncInfo(void *drvHandler, tstrRcvdGnrlAsyncInfo *pst
if (pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs != NULL) {
strConnectInfo.ReqIEsLen = pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen;
- strConnectInfo.pu8ReqIEs = (u8 *)WILC_MALLOC(pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen);
- WILC_memcpy(strConnectInfo.pu8ReqIEs,
+ strConnectInfo.pu8ReqIEs = WILC_MALLOC(pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen);
+ memcpy(strConnectInfo.pu8ReqIEs,
pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs,
pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen);
}
- WILC_TimerStop(&(pstrWFIDrv->hConnectTimer), NULL);
+ del_timer(&pstrWFIDrv->hConnectTimer);
pstrWFIDrv->strWILC_UsrConnReq.pfUserConnectResult(CONN_DISCONN_EVENT_CONN_RESP,
&strConnectInfo,
u8MacStatus,
@@ -2512,7 +2492,7 @@ static s32 Handle_RcvdGnrlAsyncInfo(void *drvHandler, tstrRcvdGnrlAsyncInfo *pst
(strConnectInfo.u16ConnectStatus == SUCCESSFUL_STATUSCODE)) {
#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
- host_int_set_power_mgmt((WILC_WFIDrvHandle)pstrWFIDrv, 0, 0);
+ host_int_set_power_mgmt(pstrWFIDrv, 0, 0);
#endif
PRINT_D(HOSTINF_DBG, "MAC status : CONNECTED and Connect Status : Successful\n");
@@ -2521,7 +2501,8 @@ static s32 Handle_RcvdGnrlAsyncInfo(void *drvHandler, tstrRcvdGnrlAsyncInfo *pst
#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
PRINT_D(GENERIC_DBG, "Obtaining an IP, Disable Scan\n");
g_obtainingIP = true;
- WILC_TimerStart(&hDuringIpTimer, 10000, NULL, NULL);
+ mod_timer(&hDuringIpTimer,
+ jiffies + msecs_to_jiffies(10000));
#endif
#ifdef WILC_PARSE_SCAN_IN_HOST
@@ -2540,30 +2521,30 @@ static s32 Handle_RcvdGnrlAsyncInfo(void *drvHandler, tstrRcvdGnrlAsyncInfo *pst
/* Deallocation */
if (strConnectInfo.pu8RespIEs != NULL) {
- WILC_FREE(strConnectInfo.pu8RespIEs);
+ kfree(strConnectInfo.pu8RespIEs);
strConnectInfo.pu8RespIEs = NULL;
}
if (strConnectInfo.pu8ReqIEs != NULL) {
- WILC_FREE(strConnectInfo.pu8ReqIEs);
+ kfree(strConnectInfo.pu8ReqIEs);
strConnectInfo.pu8ReqIEs = NULL;
}
pstrWFIDrv->strWILC_UsrConnReq.ssidLen = 0;
if (pstrWFIDrv->strWILC_UsrConnReq.pu8ssid != NULL) {
- WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid);
+ kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid);
pstrWFIDrv->strWILC_UsrConnReq.pu8ssid = NULL;
}
if (pstrWFIDrv->strWILC_UsrConnReq.pu8bssid != NULL) {
- WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid);
+ kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid);
pstrWFIDrv->strWILC_UsrConnReq.pu8bssid = NULL;
}
pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen = 0;
if (pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs != NULL) {
- WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs);
+ kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs);
pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs = NULL;
}
@@ -2572,11 +2553,11 @@ static s32 Handle_RcvdGnrlAsyncInfo(void *drvHandler, tstrRcvdGnrlAsyncInfo *pst
/* Disassociation or Deauthentication frame has been received */
PRINT_D(HOSTINF_DBG, "Received MAC_DISCONNECTED from the FW\n");
- WILC_memset(&strDisconnectNotifInfo, 0, sizeof(tstrDisconnectNotifInfo));
+ memset(&strDisconnectNotifInfo, 0, sizeof(tstrDisconnectNotifInfo));
if (pstrWFIDrv->strWILC_UsrScanReq.pfUserScanResult) {
- PRINT_D(HOSTINF_DBG, "\n\n<< Abort the running OBSS Scan >> \n\n");
- WILC_TimerStop(&(pstrWFIDrv->hScanTimer), NULL);
+ PRINT_D(HOSTINF_DBG, "\n\n<< Abort the running OBSS Scan >>\n\n");
+ del_timer(&pstrWFIDrv->hScanTimer);
Handle_ScanDone((void *)pstrWFIDrv, SCAN_EVENT_ABORTED);
}
@@ -2588,7 +2569,7 @@ static s32 Handle_RcvdGnrlAsyncInfo(void *drvHandler, tstrRcvdGnrlAsyncInfo *pst
#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
g_obtainingIP = false;
- host_int_set_power_mgmt((WILC_WFIDrvHandle)pstrWFIDrv, 0, 0);
+ host_int_set_power_mgmt(pstrWFIDrv, 0, 0);
#endif
pstrWFIDrv->strWILC_UsrConnReq.pfUserConnectResult(CONN_DISCONN_EVENT_DISCONN_NOTIF,
@@ -2598,10 +2579,10 @@ static s32 Handle_RcvdGnrlAsyncInfo(void *drvHandler, tstrRcvdGnrlAsyncInfo *pst
pstrWFIDrv->strWILC_UsrConnReq.u32UserConnectPvoid);
} else {
- PRINT_ER("Connect result callback function is NULL \n");
+ PRINT_ER("Connect result callback function is NULL\n");
}
- WILC_memset(pstrWFIDrv->au8AssociatedBSSID, 0, ETH_ALEN);
+ memset(pstrWFIDrv->au8AssociatedBSSID, 0, ETH_ALEN);
/* Deallocation */
@@ -2611,25 +2592,25 @@ static s32 Handle_RcvdGnrlAsyncInfo(void *drvHandler, tstrRcvdGnrlAsyncInfo *pst
/*
* if(strDisconnectNotifInfo.ie != NULL)
* {
- * WILC_FREE(strDisconnectNotifInfo.ie);
+ * kfree(strDisconnectNotifInfo.ie);
* strDisconnectNotifInfo.ie = NULL;
* }
*/
pstrWFIDrv->strWILC_UsrConnReq.ssidLen = 0;
if (pstrWFIDrv->strWILC_UsrConnReq.pu8ssid != NULL) {
- WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid);
+ kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid);
pstrWFIDrv->strWILC_UsrConnReq.pu8ssid = NULL;
}
if (pstrWFIDrv->strWILC_UsrConnReq.pu8bssid != NULL) {
- WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid);
+ kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid);
pstrWFIDrv->strWILC_UsrConnReq.pu8bssid = NULL;
}
pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen = 0;
if (pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs != NULL) {
- WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs);
+ kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs);
pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs = NULL;
}
@@ -2637,11 +2618,11 @@ static s32 Handle_RcvdGnrlAsyncInfo(void *drvHandler, tstrRcvdGnrlAsyncInfo *pst
/*Freeing flushed join request params on receiving*/
/*MAC_DISCONNECTED while connected*/
if (gu8FlushedJoinReq != NULL && gu8FlushedJoinReqDrvHandler == (u32)drvHandler) {
- WILC_FREE(gu8FlushedJoinReq);
+ kfree(gu8FlushedJoinReq);
gu8FlushedJoinReq = NULL;
}
if (gu8FlushedInfoElemAsoc != NULL && gu8FlushedJoinReqDrvHandler == (u32)drvHandler) {
- WILC_FREE(gu8FlushedInfoElemAsoc);
+ kfree(gu8FlushedInfoElemAsoc);
gu8FlushedInfoElemAsoc = NULL;
}
@@ -2651,13 +2632,12 @@ static s32 Handle_RcvdGnrlAsyncInfo(void *drvHandler, tstrRcvdGnrlAsyncInfo *pst
} else if ((u8MacStatus == MAC_DISCONNECTED) &&
(pstrWFIDrv->strWILC_UsrScanReq.pfUserScanResult != NULL)) {
PRINT_D(HOSTINF_DBG, "Received MAC_DISCONNECTED from the FW while scanning\n");
- PRINT_D(HOSTINF_DBG, "\n\n<< Abort the running Scan >> \n\n");
+ PRINT_D(HOSTINF_DBG, "\n\n<< Abort the running Scan >>\n\n");
/*Abort the running scan*/
- WILC_TimerStop(&(pstrWFIDrv->hScanTimer), NULL);
- if (pstrWFIDrv->strWILC_UsrScanReq.pfUserScanResult) {
- Handle_ScanDone((void *)pstrWFIDrv, SCAN_EVENT_ABORTED);
+ del_timer(&pstrWFIDrv->hScanTimer);
+ if (pstrWFIDrv->strWILC_UsrScanReq.pfUserScanResult)
+ Handle_ScanDone(pstrWFIDrv, SCAN_EVENT_ABORTED);
- }
}
}
@@ -2669,7 +2649,7 @@ static s32 Handle_RcvdGnrlAsyncInfo(void *drvHandler, tstrRcvdGnrlAsyncInfo *pst
/* Deallocate pstrRcvdGnrlAsyncInfo->pu8Buffer which was prevoisuly allocated by the sending thread */
if (pstrRcvdGnrlAsyncInfo->pu8Buffer != NULL) {
- WILC_FREE(pstrRcvdGnrlAsyncInfo->pu8Buffer);
+ kfree(pstrRcvdGnrlAsyncInfo->pu8Buffer);
pstrRcvdGnrlAsyncInfo->pu8Buffer = NULL;
}
@@ -2685,7 +2665,7 @@ static s32 Handle_RcvdGnrlAsyncInfo(void *drvHandler, tstrRcvdGnrlAsyncInfo *pst
* @date
* @version 1.0
*/
-static int Handle_Key(void *drvHandler, tstrHostIFkeyAttr *pstrHostIFkeyAttr)
+static int Handle_Key(tstrWILC_WFIDrv *drvHandler, tstrHostIFkeyAttr *pstrHostIFkeyAttr)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
@@ -2726,7 +2706,7 @@ static int Handle_Key(void *drvHandler, tstrHostIFkeyAttr *pstrHostIFkeyAttr)
strWIDList[2].s32ValueSize = sizeof(char);
- pu8keybuf = (u8 *)WILC_MALLOC(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.u8WepKeylen);
+ pu8keybuf = WILC_MALLOC(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.u8WepKeylen);
if (pu8keybuf == NULL) {
@@ -2734,11 +2714,11 @@ static int Handle_Key(void *drvHandler, tstrHostIFkeyAttr *pstrHostIFkeyAttr)
return -1;
}
- WILC_memcpy(pu8keybuf, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey,
+ memcpy(pu8keybuf, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey,
pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.u8WepKeylen);
- WILC_FREE(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey);
+ kfree(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey);
strWIDList[3].u16WIDid = (u16)WID_WEP_KEY_VALUE;
strWIDList[3].enuWIDtype = WID_STR;
@@ -2747,7 +2727,7 @@ static int Handle_Key(void *drvHandler, tstrHostIFkeyAttr *pstrHostIFkeyAttr)
s32Error = SendConfigPkt(SET_CFG, strWIDList, 4, true, (u32)pstrWFIDrv);
- WILC_FREE(pu8keybuf);
+ kfree(pu8keybuf);
}
@@ -2755,19 +2735,19 @@ static int Handle_Key(void *drvHandler, tstrHostIFkeyAttr *pstrHostIFkeyAttr)
if (pstrHostIFkeyAttr->u8KeyAction & ADDKEY) {
PRINT_D(HOSTINF_DBG, "Handling WEP key\n");
- pu8keybuf = (u8 *)WILC_MALLOC(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.u8WepKeylen + 2);
+ pu8keybuf = WILC_MALLOC(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.u8WepKeylen + 2);
if (pu8keybuf == NULL) {
PRINT_ER("No buffer to send Key\n");
return -1;
}
pu8keybuf[0] = pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.u8Wepidx;
- WILC_memcpy(pu8keybuf + 1, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.u8WepKeylen, 1);
+ memcpy(pu8keybuf + 1, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.u8WepKeylen, 1);
- WILC_memcpy(pu8keybuf + 2, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey,
+ memcpy(pu8keybuf + 2, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey,
pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.u8WepKeylen);
- WILC_FREE(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey);
+ kfree(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey);
strWID.u16WIDid = (u16)WID_ADD_WEP_KEY;
strWID.enuWIDtype = WID_STR;
@@ -2775,7 +2755,7 @@ static int Handle_Key(void *drvHandler, tstrHostIFkeyAttr *pstrHostIFkeyAttr)
strWID.s32ValueSize = pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.u8WepKeylen + 2;
s32Error = SendConfigPkt(SET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
- WILC_FREE(pu8keybuf);
+ kfree(pu8keybuf);
} else if (pstrHostIFkeyAttr->u8KeyAction & REMOVEKEY) {
PRINT_D(HOSTINF_DBG, "Removing key\n");
@@ -2803,14 +2783,14 @@ static int Handle_Key(void *drvHandler, tstrHostIFkeyAttr *pstrHostIFkeyAttr)
case WPARxGtk:
#ifdef WILC_AP_EXTERNAL_MLME
if (pstrHostIFkeyAttr->u8KeyAction & ADDKEY_AP) {
- pu8keybuf = (u8 *)WILC_MALLOC(RX_MIC_KEY_MSG_LEN);
+ pu8keybuf = WILC_MALLOC(RX_MIC_KEY_MSG_LEN);
if (pu8keybuf == NULL) {
PRINT_ER("No buffer to send RxGTK Key\n");
ret = -1;
goto _WPARxGtk_end_case_;
}
- WILC_memset(pu8keybuf, 0, RX_MIC_KEY_MSG_LEN);
+ memset(pu8keybuf, 0, RX_MIC_KEY_MSG_LEN);
/*|----------------------------------------------------------------------------|
@@ -2821,14 +2801,14 @@ static int Handle_Key(void *drvHandler, tstrHostIFkeyAttr *pstrHostIFkeyAttr)
if (pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq != NULL)
- WILC_memcpy(pu8keybuf + 6, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq, 8);
+ memcpy(pu8keybuf + 6, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq, 8);
- WILC_memcpy(pu8keybuf + 14, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8keyidx, 1);
+ memcpy(pu8keybuf + 14, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8keyidx, 1);
- WILC_memcpy(pu8keybuf + 15, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen, 1);
+ memcpy(pu8keybuf + 15, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen, 1);
- WILC_memcpy(pu8keybuf + 16, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
+ memcpy(pu8keybuf + 16, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen);
/* pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Ciphermode = 0X51; */
strWIDList[0].u16WIDid = (u16)WID_11I_MODE;
@@ -2843,7 +2823,7 @@ static int Handle_Key(void *drvHandler, tstrHostIFkeyAttr *pstrHostIFkeyAttr)
s32Error = SendConfigPkt(SET_CFG, strWIDList, 2, true, (u32)pstrWFIDrv);
- WILC_FREE(pu8keybuf);
+ kfree(pu8keybuf);
/* ////////////////////////// */
up(&(pstrWFIDrv->hSemTestKeyBlock));
@@ -2854,14 +2834,14 @@ static int Handle_Key(void *drvHandler, tstrHostIFkeyAttr *pstrHostIFkeyAttr)
if (pstrHostIFkeyAttr->u8KeyAction & ADDKEY) {
PRINT_D(HOSTINF_DBG, "Handling group key(Rx) function\n");
- pu8keybuf = (u8 *)WILC_MALLOC(RX_MIC_KEY_MSG_LEN);
+ pu8keybuf = WILC_MALLOC(RX_MIC_KEY_MSG_LEN);
if (pu8keybuf == NULL) {
PRINT_ER("No buffer to send RxGTK Key\n");
ret = -1;
goto _WPARxGtk_end_case_;
}
- WILC_memset(pu8keybuf, 0, RX_MIC_KEY_MSG_LEN);
+ memset(pu8keybuf, 0, RX_MIC_KEY_MSG_LEN);
/*|----------------------------------------------------------------------------|
@@ -2869,18 +2849,17 @@ static int Handle_Key(void *drvHandler, tstrHostIFkeyAttr *pstrHostIFkeyAttr)
* |------------|---------|-------|------------|---------------|----------------|
| 6 bytes | 8 byte |1 byte | 1 byte | 16 bytes | 8 bytes |*/
- if (pstrWFIDrv->enuHostIFstate == HOST_IF_CONNECTED) {
- WILC_memcpy(pu8keybuf, pstrWFIDrv->au8AssociatedBSSID, ETH_ALEN);
- } else {
- PRINT_ER("Couldn't handle WPARxGtk while enuHostIFstate is not HOST_IF_CONNECTED \n");
- }
+ if (pstrWFIDrv->enuHostIFstate == HOST_IF_CONNECTED)
+ memcpy(pu8keybuf, pstrWFIDrv->au8AssociatedBSSID, ETH_ALEN);
+ else
+ PRINT_ER("Couldn't handle WPARxGtk while enuHostIFstate is not HOST_IF_CONNECTED\n");
- WILC_memcpy(pu8keybuf + 6, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq, 8);
+ memcpy(pu8keybuf + 6, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq, 8);
- WILC_memcpy(pu8keybuf + 14, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8keyidx, 1);
+ memcpy(pu8keybuf + 14, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8keyidx, 1);
- WILC_memcpy(pu8keybuf + 15, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen, 1);
- WILC_memcpy(pu8keybuf + 16, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
+ memcpy(pu8keybuf + 15, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen, 1);
+ memcpy(pu8keybuf + 16, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen);
strWID.u16WIDid = (u16)WID_ADD_RX_GTK;
@@ -2890,15 +2869,15 @@ static int Handle_Key(void *drvHandler, tstrHostIFkeyAttr *pstrHostIFkeyAttr)
s32Error = SendConfigPkt(SET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
- WILC_FREE(pu8keybuf);
+ kfree(pu8keybuf);
/* ////////////////////////// */
up(&(pstrWFIDrv->hSemTestKeyBlock));
/* ///////////////////////// */
}
_WPARxGtk_end_case_:
- WILC_FREE(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key);
- WILC_FREE(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq);
+ kfree(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key);
+ kfree(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq);
if (ret == -1)
return ret;
@@ -2909,7 +2888,7 @@ _WPARxGtk_end_case_:
if (pstrHostIFkeyAttr->u8KeyAction & ADDKEY_AP) {
- pu8keybuf = (u8 *)WILC_MALLOC(PTK_KEY_MSG_LEN + 1);
+ pu8keybuf = WILC_MALLOC(PTK_KEY_MSG_LEN + 1);
@@ -2926,12 +2905,12 @@ _WPARxGtk_end_case_:
| 6 bytes | 1 byte | 1byte | 16 bytes | 8 bytes | 8 bytes |
|-----------------------------------------------------------------------------|*/
- WILC_memcpy(pu8keybuf, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8macaddr, 6); /*1 bytes Key Length */
+ memcpy(pu8keybuf, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8macaddr, 6); /*1 bytes Key Length */
- WILC_memcpy(pu8keybuf + 6, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8keyidx, 1);
- WILC_memcpy(pu8keybuf + 7, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen, 1);
+ memcpy(pu8keybuf + 6, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8keyidx, 1);
+ memcpy(pu8keybuf + 7, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen, 1);
/*16 byte TK*/
- WILC_memcpy(pu8keybuf + 8, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
+ memcpy(pu8keybuf + 8, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen);
@@ -2946,7 +2925,7 @@ _WPARxGtk_end_case_:
strWIDList[1].s32ValueSize = PTK_KEY_MSG_LEN + 1;
s32Error = SendConfigPkt(SET_CFG, strWIDList, 2, true, (u32)pstrWFIDrv);
- WILC_FREE(pu8keybuf);
+ kfree(pu8keybuf);
/* ////////////////////////// */
up(&(pstrWFIDrv->hSemTestKeyBlock));
@@ -2956,7 +2935,7 @@ _WPARxGtk_end_case_:
if (pstrHostIFkeyAttr->u8KeyAction & ADDKEY) {
- pu8keybuf = (u8 *)WILC_MALLOC(PTK_KEY_MSG_LEN);
+ pu8keybuf = WILC_MALLOC(PTK_KEY_MSG_LEN);
@@ -2973,11 +2952,11 @@ _WPARxGtk_end_case_:
| 6 bytes | 1byte | 16 bytes | 8 bytes | 8 bytes |
|-----------------------------------------------------------------------------|*/
- WILC_memcpy(pu8keybuf, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8macaddr, 6); /*1 bytes Key Length */
+ memcpy(pu8keybuf, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8macaddr, 6); /*1 bytes Key Length */
- WILC_memcpy(pu8keybuf + 6, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen, 1);
+ memcpy(pu8keybuf + 6, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen, 1);
/*16 byte TK*/
- WILC_memcpy(pu8keybuf + 7, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
+ memcpy(pu8keybuf + 7, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen);
@@ -2987,7 +2966,7 @@ _WPARxGtk_end_case_:
strWID.s32ValueSize = PTK_KEY_MSG_LEN;
s32Error = SendConfigPkt(SET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
- WILC_FREE(pu8keybuf);
+ kfree(pu8keybuf);
/* ////////////////////////// */
up(&(pstrWFIDrv->hSemTestKeyBlock));
@@ -2995,7 +2974,7 @@ _WPARxGtk_end_case_:
}
_WPAPtk_end_case_:
- WILC_FREE(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key);
+ kfree(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key);
if (ret == -1)
return ret;
@@ -3006,7 +2985,7 @@ _WPAPtk_end_case_:
PRINT_D(HOSTINF_DBG, "Handling PMKSA key\n");
- pu8keybuf = (u8 *)WILC_MALLOC((pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFpmkidAttr.numpmkid * PMKSA_KEY_LEN) + 1);
+ pu8keybuf = WILC_MALLOC((pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFpmkidAttr.numpmkid * PMKSA_KEY_LEN) + 1);
if (pu8keybuf == NULL) {
PRINT_ER("No buffer to send PMKSA Key\n");
return -1;
@@ -3016,8 +2995,8 @@ _WPAPtk_end_case_:
for (i = 0; i < pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFpmkidAttr.numpmkid; i++) {
- WILC_memcpy(pu8keybuf + ((PMKSA_KEY_LEN * i) + 1), pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFpmkidAttr.pmkidlist[i].bssid, ETH_ALEN);
- WILC_memcpy(pu8keybuf + ((PMKSA_KEY_LEN * i) + ETH_ALEN + 1), pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFpmkidAttr.pmkidlist[i].pmkid, PMKID_LEN);
+ memcpy(pu8keybuf + ((PMKSA_KEY_LEN * i) + 1), pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFpmkidAttr.pmkidlist[i].bssid, ETH_ALEN);
+ memcpy(pu8keybuf + ((PMKSA_KEY_LEN * i) + ETH_ALEN + 1), pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFpmkidAttr.pmkidlist[i].pmkid, PMKID_LEN);
}
strWID.u16WIDid = (u16)WID_PMKID_INFO;
@@ -3027,7 +3006,7 @@ _WPAPtk_end_case_:
s32Error = SendConfigPkt(SET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
- WILC_FREE(pu8keybuf);
+ kfree(pu8keybuf);
break;
}
@@ -3048,7 +3027,7 @@ _WPAPtk_end_case_:
* @date
* @version 1.0
*/
-static void Handle_Disconnect(void *drvHandler)
+static void Handle_Disconnect(tstrWILC_WFIDrv *drvHandler)
{
tstrWID strWID;
@@ -3069,10 +3048,10 @@ static void Handle_Disconnect(void *drvHandler)
#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
g_obtainingIP = false;
- host_int_set_power_mgmt((WILC_WFIDrvHandle)pstrWFIDrv, 0, 0);
+ host_int_set_power_mgmt(pstrWFIDrv, 0, 0);
#endif
- WILC_memset(u8ConnectedSSID, 0, ETH_ALEN);
+ memset(u8ConnectedSSID, 0, ETH_ALEN);
s32Error = SendConfigPkt(SET_CFG, &strWID, 1, false, (u32)pstrWFIDrv);
@@ -3082,14 +3061,14 @@ static void Handle_Disconnect(void *drvHandler)
} else {
tstrDisconnectNotifInfo strDisconnectNotifInfo;
- WILC_memset(&strDisconnectNotifInfo, 0, sizeof(tstrDisconnectNotifInfo));
+ memset(&strDisconnectNotifInfo, 0, sizeof(tstrDisconnectNotifInfo));
strDisconnectNotifInfo.u16reason = 0;
strDisconnectNotifInfo.ie = NULL;
strDisconnectNotifInfo.ie_len = 0;
if (pstrWFIDrv->strWILC_UsrScanReq.pfUserScanResult) {
- WILC_TimerStop(&(pstrWFIDrv->hScanTimer), NULL);
+ del_timer(&pstrWFIDrv->hScanTimer);
pstrWFIDrv->strWILC_UsrScanReq.pfUserScanResult(SCAN_EVENT_ABORTED, NULL,
pstrWFIDrv->strWILC_UsrScanReq.u32UserScanPvoid, NULL);
@@ -3102,48 +3081,48 @@ static void Handle_Disconnect(void *drvHandler)
/*Stop connect timer, if connection in progress*/
if (pstrWFIDrv->enuHostIFstate == HOST_IF_WAITING_CONN_RESP) {
PRINT_D(HOSTINF_DBG, "Upper layer requested termination of connection\n");
- WILC_TimerStop(&(pstrWFIDrv->hConnectTimer), NULL);
+ del_timer(&pstrWFIDrv->hConnectTimer);
}
pstrWFIDrv->strWILC_UsrConnReq.pfUserConnectResult(CONN_DISCONN_EVENT_DISCONN_NOTIF, NULL,
0, &strDisconnectNotifInfo, pstrWFIDrv->strWILC_UsrConnReq.u32UserConnectPvoid);
} else {
- PRINT_ER("strWILC_UsrConnReq.pfUserConnectResult = NULL \n");
+ PRINT_ER("strWILC_UsrConnReq.pfUserConnectResult = NULL\n");
}
gbScanWhileConnected = false;
pstrWFIDrv->enuHostIFstate = HOST_IF_IDLE;
- WILC_memset(pstrWFIDrv->au8AssociatedBSSID, 0, ETH_ALEN);
+ memset(pstrWFIDrv->au8AssociatedBSSID, 0, ETH_ALEN);
/* Deallocation */
pstrWFIDrv->strWILC_UsrConnReq.ssidLen = 0;
if (pstrWFIDrv->strWILC_UsrConnReq.pu8ssid != NULL) {
- WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid);
+ kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid);
pstrWFIDrv->strWILC_UsrConnReq.pu8ssid = NULL;
}
if (pstrWFIDrv->strWILC_UsrConnReq.pu8bssid != NULL) {
- WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid);
+ kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid);
pstrWFIDrv->strWILC_UsrConnReq.pu8bssid = NULL;
}
pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen = 0;
if (pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs != NULL) {
- WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs);
+ kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs);
pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs = NULL;
}
/*BugID_5137*/
if (gu8FlushedJoinReq != NULL && gu8FlushedJoinReqDrvHandler == (u32)drvHandler) {
- WILC_FREE(gu8FlushedJoinReq);
+ kfree(gu8FlushedJoinReq);
gu8FlushedJoinReq = NULL;
}
if (gu8FlushedInfoElemAsoc != NULL && gu8FlushedJoinReqDrvHandler == (u32)drvHandler) {
- WILC_FREE(gu8FlushedInfoElemAsoc);
+ kfree(gu8FlushedInfoElemAsoc);
gu8FlushedInfoElemAsoc = NULL;
}
@@ -3161,7 +3140,7 @@ static void Handle_Disconnect(void *drvHandler)
}
-void resolve_disconnect_aberration(void *drvHandler)
+void resolve_disconnect_aberration(tstrWILC_WFIDrv *drvHandler)
{
tstrWILC_WFIDrv *pstrWFIDrv;
@@ -3170,10 +3149,10 @@ void resolve_disconnect_aberration(void *drvHandler)
return;
if ((pstrWFIDrv->enuHostIFstate == HOST_IF_WAITING_CONN_RESP) || (pstrWFIDrv->enuHostIFstate == HOST_IF_CONNECTING)) {
PRINT_D(HOSTINF_DBG, "\n\n<< correcting Supplicant state machine >>\n\n");
- host_int_disconnect((WILC_WFIDrvHandle)pstrWFIDrv, 1);
+ host_int_disconnect(pstrWFIDrv, 1);
}
}
-static s32 Switch_Log_Terminal(void *drvHandler)
+static s32 Switch_Log_Terminal(tstrWILC_WFIDrv *drvHandler)
{
@@ -3194,7 +3173,7 @@ static s32 Switch_Log_Terminal(void *drvHandler)
PRINT_D(HOSTINF_DBG, "Failed to switch log terminal\n");
WILC_ERRORREPORT(s32Error, WILC_INVALID_STATE);
} else {
- PRINT_INFO(HOSTINF_DBG, "MAC address set :: \n");
+ PRINT_INFO(HOSTINF_DBG, "MAC address set ::\n");
}
@@ -3217,13 +3196,14 @@ static s32 Switch_Log_Terminal(void *drvHandler)
* @date
* @version 1.0
*/
-static s32 Handle_GetChnl(void *drvHandler)
+static s32 Handle_GetChnl(tstrWILC_WFIDrv *drvHandler)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
/* tstrWILC_WFIDrv * pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv; */
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
+
strWID.u16WIDid = (u16)WID_CURRENT_CHANNEL;
strWID.enuWIDtype = WID_CHAR;
strWID.ps8WidVal = (s8 *)&gu8Chnl;
@@ -3261,7 +3241,7 @@ static s32 Handle_GetChnl(void *drvHandler)
* @date
* @version 1.0
*/
-static void Handle_GetRssi(void *drvHandler)
+static void Handle_GetRssi(tstrWILC_WFIDrv *drvHandler)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
@@ -3291,7 +3271,7 @@ static void Handle_GetRssi(void *drvHandler)
}
-static void Handle_GetLinkspeed(void *drvHandler)
+static void Handle_GetLinkspeed(tstrWILC_WFIDrv *drvHandler)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
@@ -3321,7 +3301,7 @@ static void Handle_GetLinkspeed(void *drvHandler)
}
-s32 Handle_GetStatistics(void *drvHandler, tstrStatistics *pstrStatistics)
+s32 Handle_GetStatistics(tstrWILC_WFIDrv *drvHandler, tstrStatistics *pstrStatistics)
{
tstrWID strWIDList[5];
uint32_t u32WidsCount = 0, s32Error = 0;
@@ -3382,7 +3362,7 @@ s32 Handle_GetStatistics(void *drvHandler, tstrStatistics *pstrStatistics)
* @date
* @version 1.0
*/
-static s32 Handle_Get_InActiveTime(void *drvHandler, tstrHostIfStaInactiveT *strHostIfStaInactiveT)
+static s32 Handle_Get_InActiveTime(tstrWILC_WFIDrv *drvHandler, tstrHostIfStaInactiveT *strHostIfStaInactiveT)
{
s32 s32Error = WILC_SUCCESS;
@@ -3394,11 +3374,11 @@ static s32 Handle_Get_InActiveTime(void *drvHandler, tstrHostIfStaInactiveT *str
strWID.u16WIDid = (u16)WID_SET_STA_MAC_INACTIVE_TIME;
strWID.enuWIDtype = WID_STR;
strWID.s32ValueSize = ETH_ALEN;
- strWID.ps8WidVal = (u8 *)WILC_MALLOC(strWID.s32ValueSize);
+ strWID.ps8WidVal = WILC_MALLOC(strWID.s32ValueSize);
stamac = strWID.ps8WidVal;
- WILC_memcpy(stamac, strHostIfStaInactiveT->mac, ETH_ALEN);
+ memcpy(stamac, strHostIfStaInactiveT->mac, ETH_ALEN);
PRINT_D(CFG80211_DBG, "SETING STA inactive time\n");
@@ -3451,21 +3431,21 @@ static s32 Handle_Get_InActiveTime(void *drvHandler, tstrHostIfStaInactiveT *str
* @date
* @version 1.0
*/
-static void Handle_AddBeacon(void *drvHandler, tstrHostIFSetBeacon *pstrSetBeaconParam)
+static void Handle_AddBeacon(tstrWILC_WFIDrv *drvHandler, tstrHostIFSetBeacon *pstrSetBeaconParam)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
u8 *pu8CurrByte;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
+
PRINT_D(HOSTINF_DBG, "Adding BEACON\n");
strWID.u16WIDid = (u16)WID_ADD_BEACON;
strWID.enuWIDtype = WID_BIN;
strWID.s32ValueSize = pstrSetBeaconParam->u32HeadLen + pstrSetBeaconParam->u32TailLen + 16;
strWID.ps8WidVal = WILC_MALLOC(strWID.s32ValueSize);
- if (strWID.ps8WidVal == NULL) {
+ if (strWID.ps8WidVal == NULL)
WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
- }
pu8CurrByte = strWID.ps8WidVal;
*pu8CurrByte++ = (pstrSetBeaconParam->u32Interval & 0xFF);
@@ -3508,9 +3488,9 @@ static void Handle_AddBeacon(void *drvHandler, tstrHostIFSetBeacon *pstrSetBeaco
WILC_CATCH(s32Error)
{
}
- WILC_FREE_IF_TRUE(strWID.ps8WidVal);
- WILC_FREE_IF_TRUE(pstrSetBeaconParam->pu8Head);
- WILC_FREE_IF_TRUE(pstrSetBeaconParam->pu8Tail);
+ kfree(strWID.ps8WidVal);
+ kfree(pstrSetBeaconParam->pu8Head);
+ kfree(pstrSetBeaconParam->pu8Tail);
}
@@ -3523,20 +3503,20 @@ static void Handle_AddBeacon(void *drvHandler, tstrHostIFSetBeacon *pstrSetBeaco
* @date
* @version 1.0
*/
-static void Handle_DelBeacon(void *drvHandler, tstrHostIFDelBeacon *pstrDelBeacon)
+static void Handle_DelBeacon(tstrWILC_WFIDrv *drvHandler, tstrHostIFDelBeacon *pstrDelBeacon)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
u8 *pu8CurrByte;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
+
strWID.u16WIDid = (u16)WID_DEL_BEACON;
strWID.enuWIDtype = WID_CHAR;
strWID.s32ValueSize = sizeof(char);
strWID.ps8WidVal = &gu8DelBcn;
- if (strWID.ps8WidVal == NULL) {
+ if (strWID.ps8WidVal == NULL)
WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
- }
pu8CurrByte = strWID.ps8WidVal;
@@ -3573,16 +3553,15 @@ static u32 WILC_HostIf_PackStaParam(u8 *pu8Buffer, tstrWILC_AddStaParam *pstrSta
pu8CurrByte = pu8Buffer;
PRINT_D(HOSTINF_DBG, "Packing STA params\n");
- WILC_memcpy(pu8CurrByte, pstrStationParam->au8BSSID, ETH_ALEN);
+ memcpy(pu8CurrByte, pstrStationParam->au8BSSID, ETH_ALEN);
pu8CurrByte += ETH_ALEN;
*pu8CurrByte++ = pstrStationParam->u16AssocID & 0xFF;
*pu8CurrByte++ = (pstrStationParam->u16AssocID >> 8) & 0xFF;
*pu8CurrByte++ = pstrStationParam->u8NumRates;
- if (pstrStationParam->u8NumRates > 0) {
- WILC_memcpy(pu8CurrByte, pstrStationParam->pu8Rates, pstrStationParam->u8NumRates);
- }
+ if (pstrStationParam->u8NumRates > 0)
+ memcpy(pu8CurrByte, pstrStationParam->pu8Rates, pstrStationParam->u8NumRates);
pu8CurrByte += pstrStationParam->u8NumRates;
*pu8CurrByte++ = pstrStationParam->bIsHTSupported;
@@ -3590,7 +3569,7 @@ static u32 WILC_HostIf_PackStaParam(u8 *pu8Buffer, tstrWILC_AddStaParam *pstrSta
*pu8CurrByte++ = (pstrStationParam->u16HTCapInfo >> 8) & 0xFF;
*pu8CurrByte++ = pstrStationParam->u8AmpduParams;
- WILC_memcpy(pu8CurrByte, pstrStationParam->au8SuppMCsSet, WILC_SUPP_MCS_SET_SIZE);
+ memcpy(pu8CurrByte, pstrStationParam->au8SuppMCsSet, WILC_SUPP_MCS_SET_SIZE);
pu8CurrByte += WILC_SUPP_MCS_SET_SIZE;
*pu8CurrByte++ = pstrStationParam->u16HTExtParams & 0xFF;
@@ -3621,21 +3600,21 @@ static u32 WILC_HostIf_PackStaParam(u8 *pu8Buffer, tstrWILC_AddStaParam *pstrSta
* @date
* @version 1.0
*/
-static void Handle_AddStation(void *drvHandler, tstrWILC_AddStaParam *pstrStationParam)
+static void Handle_AddStation(tstrWILC_WFIDrv *drvHandler, tstrWILC_AddStaParam *pstrStationParam)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
u8 *pu8CurrByte;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
+
PRINT_D(HOSTINF_DBG, "Handling add station\n");
strWID.u16WIDid = (u16)WID_ADD_STA;
strWID.enuWIDtype = WID_BIN;
strWID.s32ValueSize = WILC_ADD_STA_LENGTH + pstrStationParam->u8NumRates;
strWID.ps8WidVal = WILC_MALLOC(strWID.s32ValueSize);
- if (strWID.ps8WidVal == NULL) {
+ if (strWID.ps8WidVal == NULL)
WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
- }
pu8CurrByte = strWID.ps8WidVal;
pu8CurrByte += WILC_HostIf_PackStaParam(pu8CurrByte, pstrStationParam);
@@ -3651,8 +3630,8 @@ static void Handle_AddStation(void *drvHandler, tstrWILC_AddStaParam *pstrStatio
WILC_CATCH(s32Error)
{
}
- WILC_FREE_IF_TRUE(pstrStationParam->pu8Rates);
- WILC_FREE_IF_TRUE(strWID.ps8WidVal);
+ kfree(pstrStationParam->pu8Rates);
+ kfree(strWID.ps8WidVal);
}
/**
@@ -3664,24 +3643,25 @@ static void Handle_AddStation(void *drvHandler, tstrWILC_AddStaParam *pstrStatio
* @date
* @version 1.0
*/
-static void Handle_DelAllSta(void *drvHandler, tstrHostIFDelAllSta *pstrDelAllStaParam)
+static void Handle_DelAllSta(tstrWILC_WFIDrv *drvHandler, tstrHostIFDelAllSta *pstrDelAllStaParam)
{
s32 s32Error = WILC_SUCCESS;
+
tstrWID strWID;
u8 *pu8CurrByte;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
u8 i;
u8 au8Zero_Buff[6] = {0};
+
strWID.u16WIDid = (u16)WID_DEL_ALL_STA;
strWID.enuWIDtype = WID_STR;
strWID.s32ValueSize = (pstrDelAllStaParam->u8Num_AssocSta * ETH_ALEN) + 1;
- PRINT_D(HOSTINF_DBG, "Handling delete station \n");
+ PRINT_D(HOSTINF_DBG, "Handling delete station\n");
strWID.ps8WidVal = WILC_MALLOC((pstrDelAllStaParam->u8Num_AssocSta * ETH_ALEN) + 1);
- if (strWID.ps8WidVal == NULL) {
+ if (strWID.ps8WidVal == NULL)
WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
- }
pu8CurrByte = strWID.ps8WidVal;
@@ -3689,7 +3669,7 @@ static void Handle_DelAllSta(void *drvHandler, tstrHostIFDelAllSta *pstrDelAllSt
for (i = 0; i < MAX_NUM_STA; i++) {
if (memcmp(pstrDelAllStaParam->au8Sta_DelAllSta[i], au8Zero_Buff, ETH_ALEN))
- WILC_memcpy(pu8CurrByte, pstrDelAllStaParam->au8Sta_DelAllSta[i], ETH_ALEN);
+ memcpy(pu8CurrByte, pstrDelAllStaParam->au8Sta_DelAllSta[i], ETH_ALEN);
else
continue;
@@ -3700,14 +3680,14 @@ static void Handle_DelAllSta(void *drvHandler, tstrHostIFDelAllSta *pstrDelAllSt
s32Error = SendConfigPkt(SET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
if (s32Error) {
- PRINT_ER("Failed to send add station config packe\n");
+ PRINT_ER("Failed to send add station config packet\n");
WILC_ERRORREPORT(s32Error, WILC_FAIL);
}
WILC_CATCH(s32Error)
{
}
- WILC_FREE_IF_TRUE(strWID.ps8WidVal);
+ kfree(strWID.ps8WidVal);
up(&hWaitResponse);
}
@@ -3722,7 +3702,7 @@ static void Handle_DelAllSta(void *drvHandler, tstrHostIFDelAllSta *pstrDelAllSt
* @date
* @version 1.0
*/
-static void Handle_DelStation(void *drvHandler, tstrHostIFDelSta *pstrDelStaParam)
+static void Handle_DelStation(tstrWILC_WFIDrv *drvHandler, tstrHostIFDelSta *pstrDelStaParam)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
@@ -3733,29 +3713,28 @@ static void Handle_DelStation(void *drvHandler, tstrHostIFDelSta *pstrDelStaPara
strWID.enuWIDtype = WID_BIN;
strWID.s32ValueSize = ETH_ALEN;
- PRINT_D(HOSTINF_DBG, "Handling delete station \n");
+ PRINT_D(HOSTINF_DBG, "Handling delete station\n");
strWID.ps8WidVal = WILC_MALLOC(strWID.s32ValueSize);
- if (strWID.ps8WidVal == NULL) {
+ if (strWID.ps8WidVal == NULL)
WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
- }
pu8CurrByte = strWID.ps8WidVal;
- WILC_memcpy(pu8CurrByte, pstrDelStaParam->au8MacAddr, ETH_ALEN);
+ memcpy(pu8CurrByte, pstrDelStaParam->au8MacAddr, ETH_ALEN);
/*Sending Cfg*/
s32Error = SendConfigPkt(SET_CFG, &strWID, 1, false, (u32)pstrWFIDrv);
if (s32Error) {
- PRINT_ER("Failed to send add station config packe\n");
+ PRINT_ER("Failed to send add station config packet\n");
WILC_ERRORREPORT(s32Error, WILC_FAIL);
}
WILC_CATCH(s32Error)
{
}
- WILC_FREE_IF_TRUE(strWID.ps8WidVal);
+ kfree(strWID.ps8WidVal);
}
@@ -3768,7 +3747,7 @@ static void Handle_DelStation(void *drvHandler, tstrHostIFDelSta *pstrDelStaPara
* @date
* @version 1.0
*/
-static void Handle_EditStation(void *drvHandler, tstrWILC_AddStaParam *pstrStationParam)
+static void Handle_EditStation(tstrWILC_WFIDrv *drvHandler, tstrWILC_AddStaParam *pstrStationParam)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
@@ -3781,9 +3760,8 @@ static void Handle_EditStation(void *drvHandler, tstrWILC_AddStaParam *pstrStati
PRINT_D(HOSTINF_DBG, "Handling edit station\n");
strWID.ps8WidVal = WILC_MALLOC(strWID.s32ValueSize);
- if (strWID.ps8WidVal == NULL) {
+ if (strWID.ps8WidVal == NULL)
WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
- }
pu8CurrByte = strWID.ps8WidVal;
pu8CurrByte += WILC_HostIf_PackStaParam(pu8CurrByte, pstrStationParam);
@@ -3799,8 +3777,8 @@ static void Handle_EditStation(void *drvHandler, tstrWILC_AddStaParam *pstrStati
WILC_CATCH(s32Error)
{
}
- WILC_FREE_IF_TRUE(pstrStationParam->pu8Rates);
- WILC_FREE_IF_TRUE(strWID.ps8WidVal);
+ kfree(pstrStationParam->pu8Rates);
+ kfree(strWID.ps8WidVal);
}
#endif /*WILC_AP_EXTERNAL_MLME*/
@@ -3814,7 +3792,7 @@ static void Handle_EditStation(void *drvHandler, tstrWILC_AddStaParam *pstrStati
* @date
* @version 1.0
*/
-static int Handle_RemainOnChan(void *drvHandler, tstrHostIfRemainOnChan *pstrHostIfRemainOnChan)
+static int Handle_RemainOnChan(tstrWILC_WFIDrv *drvHandler, tstrHostIfRemainOnChan *pstrHostIfRemainOnChan)
{
s32 s32Error = WILC_SUCCESS;
u8 u8remain_on_chan_flag;
@@ -3856,30 +3834,30 @@ static int Handle_RemainOnChan(void *drvHandler, tstrHostIfRemainOnChan *pstrHos
strWID.u16WIDid = (u16)WID_REMAIN_ON_CHAN;
strWID.enuWIDtype = WID_STR;
strWID.s32ValueSize = 2;
- strWID.ps8WidVal = (s8 *)WILC_MALLOC(strWID.s32ValueSize);
+ strWID.ps8WidVal = WILC_MALLOC(strWID.s32ValueSize);
- if (strWID.ps8WidVal == NULL) {
+ if (strWID.ps8WidVal == NULL)
WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
- }
strWID.ps8WidVal[0] = u8remain_on_chan_flag;
strWID.ps8WidVal[1] = (s8)pstrHostIfRemainOnChan->u16Channel;
/*Sending Cfg*/
s32Error = SendConfigPkt(SET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
- if (s32Error != WILC_SUCCESS) {
+ if (s32Error != WILC_SUCCESS)
PRINT_ER("Failed to set remain on channel\n");
- }
WILC_CATCH(-1)
{
P2P_LISTEN_STATE = 1;
- WILC_TimerStart(&(pstrWFIDrv->hRemainOnChannel), pstrHostIfRemainOnChan->u32duration, (void *)pstrWFIDrv, NULL);
+ pstrWFIDrv->hRemainOnChannel.data = (unsigned long)pstrWFIDrv;
+ mod_timer(&pstrWFIDrv->hRemainOnChannel,
+ jiffies +
+ msecs_to_jiffies(pstrHostIfRemainOnChan->u32duration));
/*Calling CFG ready_on_channel*/
- if (pstrWFIDrv->strHostIfRemainOnChan.pRemainOnChanReady) {
+ if (pstrWFIDrv->strHostIfRemainOnChan.pRemainOnChanReady)
pstrWFIDrv->strHostIfRemainOnChan.pRemainOnChanReady(pstrWFIDrv->strHostIfRemainOnChan.pVoid);
- }
if (pstrWFIDrv->u8RemainOnChan_pendingreq)
pstrWFIDrv->u8RemainOnChan_pendingreq = 0;
@@ -3896,7 +3874,7 @@ static int Handle_RemainOnChan(void *drvHandler, tstrHostIfRemainOnChan *pstrHos
* @date
* @version 1.0
*/
-static int Handle_RegisterFrame(void *drvHandler, tstrHostIfRegisterFrame *pstrHostIfRegisterFrame)
+static int Handle_RegisterFrame(tstrWILC_WFIDrv *drvHandler, tstrHostIfRegisterFrame *pstrHostIfRegisterFrame)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
@@ -3909,15 +3887,14 @@ static int Handle_RegisterFrame(void *drvHandler, tstrHostIfRegisterFrame *pstrH
strWID.u16WIDid = (u16)WID_REGISTER_FRAME;
strWID.enuWIDtype = WID_STR;
strWID.ps8WidVal = WILC_MALLOC(sizeof(u16) + 2);
- if (strWID.ps8WidVal == NULL) {
+ if (strWID.ps8WidVal == NULL)
WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
- }
pu8CurrByte = strWID.ps8WidVal;
*pu8CurrByte++ = pstrHostIfRegisterFrame->bReg;
*pu8CurrByte++ = pstrHostIfRegisterFrame->u8Regid;
- WILC_memcpy(pu8CurrByte, &(pstrHostIfRegisterFrame->u16FrameType), sizeof(u16));
+ memcpy(pu8CurrByte, &(pstrHostIfRegisterFrame->u16FrameType), sizeof(u16));
strWID.s32ValueSize = sizeof(u16) + 2;
@@ -3949,7 +3926,7 @@ static int Handle_RegisterFrame(void *drvHandler, tstrHostIfRegisterFrame *pstrH
* @version 1.0
*/
#define FALSE_FRMWR_CHANNEL 100
-static u32 Handle_ListenStateExpired(void *drvHandler, tstrHostIfRemainOnChan *pstrHostIfRemainOnChan)
+static u32 Handle_ListenStateExpired(tstrWILC_WFIDrv *drvHandler, tstrHostIfRemainOnChan *pstrHostIfRemainOnChan)
{
u8 u8remain_on_chan_flag;
tstrWID strWID;
@@ -3968,9 +3945,8 @@ static u32 Handle_ListenStateExpired(void *drvHandler, tstrHostIfRemainOnChan *p
strWID.s32ValueSize = 2;
strWID.ps8WidVal = WILC_MALLOC(strWID.s32ValueSize);
- if (strWID.ps8WidVal == NULL) {
+ if (strWID.ps8WidVal == NULL)
PRINT_ER("Failed to allocate memory\n");
- }
strWID.ps8WidVal[0] = u8remain_on_chan_flag;
strWID.ps8WidVal[1] = FALSE_FRMWR_CHANNEL;
@@ -4006,25 +3982,24 @@ _done_:
* @date
* @version 1.0
*/
-static void ListenTimerCB(void *pvArg)
+static void ListenTimerCB(unsigned long arg)
{
s32 s32Error = WILC_SUCCESS;
tstrHostIFmsg strHostIFmsg;
- tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)pvArg;
+ tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)arg;
/*Stopping remain-on-channel timer*/
- WILC_TimerStop(&(pstrWFIDrv->hRemainOnChannel), NULL);
+ del_timer(&pstrWFIDrv->hRemainOnChannel);
/* prepare the Timer Callback message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_LISTEN_TIMER_FIRED;
strHostIFmsg.drvHandler = pstrWFIDrv;
strHostIFmsg.uniHostIFmsgBody.strHostIfRemainOnChan.u32ListenSessionID = pstrWFIDrv->strHostIfRemainOnChan.u32ListenSessionID;
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
WILC_ERRORREPORT(s32Error, s32Error);
- }
WILC_CATCH(s32Error)
{
@@ -4042,19 +4017,19 @@ static void ListenTimerCB(void *pvArg)
* @date
* @version 1.0
*/
-static void Handle_PowerManagement(void *drvHandler, tstrHostIfPowerMgmtParam *strPowerMgmtParam)
+static void Handle_PowerManagement(tstrWILC_WFIDrv *drvHandler, tstrHostIfPowerMgmtParam *strPowerMgmtParam)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
s8 s8PowerMode;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
+
strWID.u16WIDid = (u16)WID_POWER_MANAGEMENT;
- if (strPowerMgmtParam->bIsEnabled == true) {
+ if (strPowerMgmtParam->bIsEnabled == true)
s8PowerMode = MIN_FAST_PS;
- } else {
+ else
s8PowerMode = NO_POWERSAVE;
- }
PRINT_D(HOSTINF_DBG, "Handling power mgmt to %d\n", s8PowerMode);
strWID.ps8WidVal = &s8PowerMode;
strWID.s32ValueSize = sizeof(char);
@@ -4083,7 +4058,7 @@ static void Handle_PowerManagement(void *drvHandler, tstrHostIfPowerMgmtParam *s
* @date
* @version 1.0
*/
-static void Handle_SetMulticastFilter(void *drvHandler, tstrHostIFSetMulti *strHostIfSetMulti)
+static void Handle_SetMulticastFilter(tstrWILC_WFIDrv *drvHandler, tstrHostIFSetMulti *strHostIfSetMulti)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
@@ -4095,9 +4070,8 @@ static void Handle_SetMulticastFilter(void *drvHandler, tstrHostIFSetMulti *strH
strWID.enuWIDtype = WID_BIN;
strWID.s32ValueSize = sizeof(tstrHostIFSetMulti) + ((strHostIfSetMulti->u32count) * ETH_ALEN);
strWID.ps8WidVal = WILC_MALLOC(strWID.s32ValueSize);
- if (strWID.ps8WidVal == NULL) {
+ if (strWID.ps8WidVal == NULL)
WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
- }
pu8CurrByte = strWID.ps8WidVal;
*pu8CurrByte++ = (strHostIfSetMulti->bIsEnabled & 0xFF);
@@ -4123,7 +4097,7 @@ static void Handle_SetMulticastFilter(void *drvHandler, tstrHostIFSetMulti *strH
WILC_CATCH(s32Error)
{
}
- WILC_FREE_IF_TRUE(strWID.ps8WidVal);
+ kfree(strWID.ps8WidVal);
}
@@ -4138,7 +4112,7 @@ static void Handle_SetMulticastFilter(void *drvHandler, tstrHostIFSetMulti *strH
* @date Feb. 2014
* @version 9.0
*/
-static s32 Handle_AddBASession(void *drvHandler, tstrHostIfBASessionInfo *strHostIfBASessionInfo)
+static s32 Handle_AddBASession(tstrWILC_WFIDrv *drvHandler, tstrHostIfBASessionInfo *strHostIfBASessionInfo)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
@@ -4146,7 +4120,7 @@ static s32 Handle_AddBASession(void *drvHandler, tstrHostIfBASessionInfo *strHos
char *ptr = NULL;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
- PRINT_D(HOSTINF_DBG, "Opening Block Ack session with\nBSSID = %.2x:%.2x:%.2x \nTID=%d \nBufferSize == %d \nSessionTimeOut = %d\n",
+ PRINT_D(HOSTINF_DBG, "Opening Block Ack session with\nBSSID = %.2x:%.2x:%.2x\nTID=%d\nBufferSize == %d\nSessionTimeOut = %d\n",
strHostIfBASessionInfo->au8Bssid[0],
strHostIfBASessionInfo->au8Bssid[1],
strHostIfBASessionInfo->au8Bssid[2],
@@ -4156,14 +4130,14 @@ static s32 Handle_AddBASession(void *drvHandler, tstrHostIfBASessionInfo *strHos
strWID.u16WIDid = (u16)WID_11E_P_ACTION_REQ;
strWID.enuWIDtype = WID_STR;
- strWID.ps8WidVal = (u8 *)WILC_MALLOC(BLOCK_ACK_REQ_SIZE);
+ strWID.ps8WidVal = WILC_MALLOC(BLOCK_ACK_REQ_SIZE);
strWID.s32ValueSize = BLOCK_ACK_REQ_SIZE;
ptr = strWID.ps8WidVal;
/* *ptr++ = 0x14; */
*ptr++ = 0x14;
*ptr++ = 0x3;
*ptr++ = 0x0;
- WILC_memcpy(ptr, strHostIfBASessionInfo->au8Bssid, ETH_ALEN);
+ memcpy(ptr, strHostIfBASessionInfo->au8Bssid, ETH_ALEN);
ptr += ETH_ALEN;
*ptr++ = strHostIfBASessionInfo->u8Ted;
/* BA Policy*/
@@ -4195,7 +4169,7 @@ static s32 Handle_AddBASession(void *drvHandler, tstrHostIfBASessionInfo *strHos
*ptr++ = 15;
*ptr++ = 7;
*ptr++ = 0x2;
- WILC_memcpy(ptr, strHostIfBASessionInfo->au8Bssid, ETH_ALEN);
+ memcpy(ptr, strHostIfBASessionInfo->au8Bssid, ETH_ALEN);
ptr += ETH_ALEN;
/* TID*/
*ptr++ = strHostIfBASessionInfo->u8Ted;
@@ -4209,7 +4183,7 @@ static s32 Handle_AddBASession(void *drvHandler, tstrHostIfBASessionInfo *strHos
s32Error = SendConfigPkt(SET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
if (strWID.ps8WidVal != NULL)
- WILC_FREE(strWID.ps8WidVal);
+ kfree(strWID.ps8WidVal);
return s32Error;
@@ -4226,14 +4200,14 @@ static s32 Handle_AddBASession(void *drvHandler, tstrHostIfBASessionInfo *strHos
* @date Feb. 2013
* @version 9.0
*/
-static s32 Handle_DelBASession(void *drvHandler, tstrHostIfBASessionInfo *strHostIfBASessionInfo)
+static s32 Handle_DelBASession(tstrWILC_WFIDrv *drvHandler, tstrHostIfBASessionInfo *strHostIfBASessionInfo)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
char *ptr = NULL;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
- PRINT_D(GENERIC_DBG, "Delete Block Ack session with\nBSSID = %.2x:%.2x:%.2x \nTID=%d\n",
+ PRINT_D(GENERIC_DBG, "Delete Block Ack session with\nBSSID = %.2x:%.2x:%.2x\nTID=%d\n",
strHostIfBASessionInfo->au8Bssid[0],
strHostIfBASessionInfo->au8Bssid[1],
strHostIfBASessionInfo->au8Bssid[2],
@@ -4241,14 +4215,14 @@ static s32 Handle_DelBASession(void *drvHandler, tstrHostIfBASessionInfo *strHos
strWID.u16WIDid = (u16)WID_11E_P_ACTION_REQ;
strWID.enuWIDtype = WID_STR;
- strWID.ps8WidVal = (u8 *)WILC_MALLOC(BLOCK_ACK_REQ_SIZE);
+ strWID.ps8WidVal = WILC_MALLOC(BLOCK_ACK_REQ_SIZE);
strWID.s32ValueSize = BLOCK_ACK_REQ_SIZE;
ptr = strWID.ps8WidVal;
/* *ptr++ = 0x14; */
*ptr++ = 0x14;
*ptr++ = 0x3;
*ptr++ = 0x2;
- WILC_memcpy(ptr, strHostIfBASessionInfo->au8Bssid, ETH_ALEN);
+ memcpy(ptr, strHostIfBASessionInfo->au8Bssid, ETH_ALEN);
ptr += ETH_ALEN;
*ptr++ = strHostIfBASessionInfo->u8Ted;
/* BA direction = recipent*/
@@ -4269,7 +4243,7 @@ static s32 Handle_DelBASession(void *drvHandler, tstrHostIfBASessionInfo *strHos
*ptr++ = 15;
*ptr++ = 7;
*ptr++ = 0x3;
- WILC_memcpy(ptr, strHostIfBASessionInfo->au8Bssid, ETH_ALEN);
+ memcpy(ptr, strHostIfBASessionInfo->au8Bssid, ETH_ALEN);
ptr += ETH_ALEN;
/* TID*/
*ptr++ = strHostIfBASessionInfo->u8Ted;
@@ -4277,7 +4251,7 @@ static s32 Handle_DelBASession(void *drvHandler, tstrHostIfBASessionInfo *strHos
s32Error = SendConfigPkt(SET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
if (strWID.ps8WidVal != NULL)
- WILC_FREE(strWID.ps8WidVal);
+ kfree(strWID.ps8WidVal);
/*BugID_5222*/
up(&hWaitResponse);
@@ -4296,14 +4270,14 @@ static s32 Handle_DelBASession(void *drvHandler, tstrHostIfBASessionInfo *strHos
* @date Feb. 2013
* @version 9.0
*/
-static s32 Handle_DelAllRxBASessions(void *drvHandler, tstrHostIfBASessionInfo *strHostIfBASessionInfo)
+static s32 Handle_DelAllRxBASessions(tstrWILC_WFIDrv *drvHandler, tstrHostIfBASessionInfo *strHostIfBASessionInfo)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
char *ptr = NULL;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
- PRINT_D(GENERIC_DBG, "Delete Block Ack session with\nBSSID = %.2x:%.2x:%.2x \nTID=%d\n",
+ PRINT_D(GENERIC_DBG, "Delete Block Ack session with\nBSSID = %.2x:%.2x:%.2x\nTID=%d\n",
strHostIfBASessionInfo->au8Bssid[0],
strHostIfBASessionInfo->au8Bssid[1],
strHostIfBASessionInfo->au8Bssid[2],
@@ -4311,13 +4285,13 @@ static s32 Handle_DelAllRxBASessions(void *drvHandler, tstrHostIfBASessionInfo *
strWID.u16WIDid = (u16)WID_DEL_ALL_RX_BA;
strWID.enuWIDtype = WID_STR;
- strWID.ps8WidVal = (u8 *)WILC_MALLOC(BLOCK_ACK_REQ_SIZE);
+ strWID.ps8WidVal = WILC_MALLOC(BLOCK_ACK_REQ_SIZE);
strWID.s32ValueSize = BLOCK_ACK_REQ_SIZE;
ptr = strWID.ps8WidVal;
*ptr++ = 0x14;
*ptr++ = 0x3;
*ptr++ = 0x2;
- WILC_memcpy(ptr, strHostIfBASessionInfo->au8Bssid, ETH_ALEN);
+ memcpy(ptr, strHostIfBASessionInfo->au8Bssid, ETH_ALEN);
ptr += ETH_ALEN;
*ptr++ = strHostIfBASessionInfo->u8Ted;
/* BA direction = recipent*/
@@ -4331,7 +4305,7 @@ static s32 Handle_DelAllRxBASessions(void *drvHandler, tstrHostIfBASessionInfo *
if (strWID.ps8WidVal != NULL)
- WILC_FREE(strWID.ps8WidVal);
+ kfree(strWID.ps8WidVal);
/*BugID_5222*/
up(&hWaitResponse);
@@ -4355,10 +4329,10 @@ static int hostIFthread(void *pvArg)
tstrHostIFmsg strHostIFmsg;
tstrWILC_WFIDrv *pstrWFIDrv;
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
while (1) {
- WILC_MsgQueueRecv(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), &u32Ret, NULL);
+ WILC_MsgQueueRecv(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), &u32Ret);
pstrWFIDrv = (tstrWILC_WFIDrv *)strHostIFmsg.drvHandler;
if (strHostIFmsg.u16MsgId == HOST_IF_MSG_EXIT) {
PRINT_D(GENERIC_DBG, "THREAD: Exiting HostIfThread\n");
@@ -4369,15 +4343,15 @@ static int hostIFthread(void *pvArg)
/*Re-Queue HIF message*/
if ((!g_wilc_initialized)) {
PRINT_D(GENERIC_DBG, "--WAIT--");
- WILC_Sleep(200);
- WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ usleep_range(200 * 1000, 200 * 1000);
+ WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
continue;
}
if (strHostIFmsg.u16MsgId == HOST_IF_MSG_CONNECT && pstrWFIDrv->strWILC_UsrScanReq.pfUserScanResult != NULL) {
PRINT_D(HOSTINF_DBG, "Requeue connect request till scan done received\n");
- WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- WILC_Sleep(2);
+ WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ usleep_range(2 * 1000, 2 * 1000);
continue;
}
@@ -4425,14 +4399,13 @@ static int hostIFthread(void *pvArg)
break;
case HOST_IF_MSG_RCVD_SCAN_COMPLETE:
- WILC_TimerStop(&(pstrWFIDrv->hScanTimer), NULL);
+ del_timer(&pstrWFIDrv->hScanTimer);
PRINT_D(HOSTINF_DBG, "scan completed successfully\n");
/*BugID_5213*/
/*Allow chip sleep, only if both interfaces are not connected*/
- if (!linux_wlan_get_num_conn_ifcs()) {
+ if (!linux_wlan_get_num_conn_ifcs())
chip_sleep_manually(INFINITE_SLEEP_TIME);
- }
Handle_ScanDone(strHostIFmsg.drvHandler, SCAN_EVENT_DONE);
@@ -4492,7 +4465,7 @@ static int hostIFthread(void *pvArg)
break;
case HOST_IF_MSG_CONNECT_TIMER_FIRED:
- PRINT_D(HOSTINF_DBG, "Connect Timeout \n");
+ PRINT_D(HOSTINF_DBG, "Connect Timeout\n");
Handle_ConnectTimeout(strHostIFmsg.drvHandler);
break;
@@ -4563,7 +4536,7 @@ static int hostIFthread(void *pvArg)
break;
default:
- PRINT_ER("[Host Interface] undefined Received Msg ID \n");
+ PRINT_ER("[Host Interface] undefined Received Msg ID\n");
break;
}
}
@@ -4573,30 +4546,32 @@ static int hostIFthread(void *pvArg)
return 0;
}
-static void TimerCB_Scan(void *pvArg)
+static void TimerCB_Scan(unsigned long arg)
{
+ void *pvArg = (void *)arg;
tstrHostIFmsg strHostIFmsg;
/* prepare the Timer Callback message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.drvHandler = pvArg;
strHostIFmsg.u16MsgId = HOST_IF_MSG_SCAN_TIMER_FIRED;
/* send the message */
- WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
}
-static void TimerCB_Connect(void *pvArg)
+static void TimerCB_Connect(unsigned long arg)
{
+ void *pvArg = (void *)arg;
tstrHostIFmsg strHostIFmsg;
/* prepare the Timer Callback message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.drvHandler = pvArg;
strHostIFmsg.u16MsgId = HOST_IF_MSG_CONNECT_TIMER_FIRED;
/* send the message */
- WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
}
@@ -4613,7 +4588,7 @@ static void TimerCB_Connect(void *pvArg)
* @version 1.0
*/
/* Check implementation in core adding 9 bytes to the input! */
-s32 host_int_remove_key(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8StaAddress)
+s32 host_int_remove_key(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8StaAddress)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
@@ -4642,19 +4617,18 @@ s32 host_int_remove_key(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8StaAddress)
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_remove_wep_key(WILC_WFIDrvHandle hWFIDrv, u8 u8keyIdx)
+s32 host_int_remove_wep_key(tstrWILC_WFIDrv *hWFIDrv, u8 u8keyIdx)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
tstrHostIFmsg strHostIFmsg;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
/* prepare the Remove Wep Key Message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_KEY;
@@ -4668,9 +4642,9 @@ s32 host_int_remove_wep_key(WILC_WFIDrvHandle hWFIDrv, u8 u8keyIdx)
uniHostIFkeyAttr.strHostIFwepAttr.u8Wepidx = u8keyIdx;
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
if (s32Error)
- PRINT_ER("Error in sending message queue : Request to remove WEP key \n");
+ PRINT_ER("Error in sending message queue : Request to remove WEP key\n");
down(&(pstrWFIDrv->hSemTestKeyBlock));
WILC_CATCH(s32Error)
@@ -4692,19 +4666,18 @@ s32 host_int_remove_wep_key(WILC_WFIDrvHandle hWFIDrv, u8 u8keyIdx)
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_set_WEPDefaultKeyID(WILC_WFIDrvHandle hWFIDrv, u8 u8Index)
+s32 host_int_set_WEPDefaultKeyID(tstrWILC_WFIDrv *hWFIDrv, u8 u8Index)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
tstrHostIFmsg strHostIFmsg;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
/* prepare the Key Message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_KEY;
@@ -4717,7 +4690,7 @@ s32 host_int_set_WEPDefaultKeyID(WILC_WFIDrvHandle hWFIDrv, u8 u8Index)
uniHostIFkeyAttr.strHostIFwepAttr.u8Wepidx = u8Index;
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
if (s32Error)
PRINT_ER("Error in sending message queue : Default key index\n");
down(&(pstrWFIDrv->hSemTestKeyBlock));
@@ -4749,20 +4722,19 @@ s32 host_int_set_WEPDefaultKeyID(WILC_WFIDrvHandle hWFIDrv, u8 u8Index)
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_add_wep_key_bss_sta(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8WepKey, u8 u8WepKeylen, u8 u8Keyidx)
+s32 host_int_add_wep_key_bss_sta(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8WepKey, u8 u8WepKeylen, u8 u8Keyidx)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
tstrHostIFmsg strHostIFmsg;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
/* prepare the Key Message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_KEY;
@@ -4772,9 +4744,9 @@ s32 host_int_add_wep_key_bss_sta(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8WepKey,
strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.
- uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey = (u8 *)WILC_MALLOC(u8WepKeylen);
+ uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey = WILC_MALLOC(u8WepKeylen);
- WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey,
+ memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey,
pu8WepKey, u8WepKeylen);
@@ -4785,7 +4757,7 @@ s32 host_int_add_wep_key_bss_sta(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8WepKey,
uniHostIFkeyAttr.strHostIFwepAttr.u8Wepidx = u8Keyidx;
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
if (s32Error)
PRINT_ER("Error in sending message queue :WEP Key\n");
down(&(pstrWFIDrv->hSemTestKeyBlock));
@@ -4815,7 +4787,7 @@ s32 host_int_add_wep_key_bss_sta(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8WepKey,
* @date 28 FEB 2013
* @version 1.0
*/
-s32 host_int_add_wep_key_bss_ap(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8WepKey, u8 u8WepKeylen, u8 u8Keyidx, u8 u8mode, AUTHTYPE_T tenuAuth_type)
+s32 host_int_add_wep_key_bss_ap(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8WepKey, u8 u8WepKeylen, u8 u8Keyidx, u8 u8mode, AUTHTYPE_T tenuAuth_type)
{
s32 s32Error = WILC_SUCCESS;
@@ -4823,13 +4795,12 @@ s32 host_int_add_wep_key_bss_ap(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8WepKey,
tstrHostIFmsg strHostIFmsg;
u8 i;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
/* prepare the Key Message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
if (INFO) {
for (i = 0; i < u8WepKeylen; i++)
@@ -4842,10 +4813,10 @@ s32 host_int_add_wep_key_bss_ap(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8WepKey,
strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.
- uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey = (u8 *)WILC_MALLOC((u8WepKeylen));
+ uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey = WILC_MALLOC((u8WepKeylen));
- WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey,
+ memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey,
pu8WepKey, (u8WepKeylen));
@@ -4861,7 +4832,7 @@ s32 host_int_add_wep_key_bss_ap(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8WepKey,
strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.
uniHostIFkeyAttr.strHostIFwepAttr.tenuAuth_type = tenuAuth_type;
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
if (s32Error)
PRINT_ER("Error in sending message queue :WEP Key\n");
@@ -4891,7 +4862,7 @@ s32 host_int_add_wep_key_bss_ap(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8WepKey,
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_add_ptk(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8Ptk, u8 u8PtkKeylen,
+s32 host_int_add_ptk(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8Ptk, u8 u8PtkKeylen,
const u8 *mac_addr, const u8 *pu8RxMic, const u8 *pu8TxMic, u8 mode, u8 u8Ciphermode, u8 u8Idx)
{
s32 s32Error = WILC_SUCCESS;
@@ -4899,18 +4870,16 @@ s32 host_int_add_ptk(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8Ptk, u8 u8PtkKeylen
tstrHostIFmsg strHostIFmsg;
u8 u8KeyLen = u8PtkKeylen;
u32 i;
- if (pstrWFIDrv == NULL) {
+
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
- if (pu8RxMic != NULL) {
+ if (pu8RxMic != NULL)
u8KeyLen += RX_MIC_KEY_LEN;
- }
- if (pu8TxMic != NULL) {
+ if (pu8TxMic != NULL)
u8KeyLen += TX_MIC_KEY_LEN;
- }
/* prepare the Key Message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_KEY;
@@ -4927,15 +4896,15 @@ s32 host_int_add_ptk(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8Ptk, u8 u8PtkKeylen
strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.
- uniHostIFkeyAttr.strHostIFwpaAttr.pu8key = (u8 *)WILC_MALLOC(u8PtkKeylen);
+ uniHostIFkeyAttr.strHostIFwpaAttr.pu8key = WILC_MALLOC(u8PtkKeylen);
- WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
+ memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
pu8Ptk, u8PtkKeylen);
if (pu8RxMic != NULL) {
- WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key + 16,
+ memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key + 16,
pu8RxMic, RX_MIC_KEY_LEN);
if (INFO) {
for (i = 0; i < RX_MIC_KEY_LEN; i++)
@@ -4944,7 +4913,7 @@ s32 host_int_add_ptk(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8Ptk, u8 u8PtkKeylen
}
if (pu8TxMic != NULL) {
- WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key + 24,
+ memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key + 24,
pu8TxMic, TX_MIC_KEY_LEN);
if (INFO) {
for (i = 0; i < TX_MIC_KEY_LEN; i++)
@@ -4962,14 +4931,13 @@ s32 host_int_add_ptk(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8Ptk, u8 u8PtkKeylen
strHostIFmsg.drvHandler = hWFIDrv;
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
if (s32Error)
PRINT_ER("Error in sending message queue: PTK Key\n");
/* ////////////// */
down(&(pstrWFIDrv->hSemTestKeyBlock));
- /* WILC_Sleep(100); */
/* /////// */
WILC_CATCH(s32Error)
@@ -4993,7 +4961,7 @@ s32 host_int_add_ptk(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8Ptk, u8 u8PtkKeylen
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_add_rx_gtk(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8RxGtk, u8 u8GtkKeylen,
+s32 host_int_add_rx_gtk(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8RxGtk, u8 u8GtkKeylen,
u8 u8KeyIdx, u32 u32KeyRSClen, const u8 *KeyRSC,
const u8 *pu8RxMic, const u8 *pu8TxMic, u8 mode, u8 u8Ciphermode)
{
@@ -5002,24 +4970,21 @@ s32 host_int_add_rx_gtk(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8RxGtk, u8 u8GtkK
tstrHostIFmsg strHostIFmsg;
u8 u8KeyLen = u8GtkKeylen;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
/* prepare the Key Message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
- if (pu8RxMic != NULL) {
+ if (pu8RxMic != NULL)
u8KeyLen += RX_MIC_KEY_LEN;
- }
- if (pu8TxMic != NULL) {
+ if (pu8TxMic != NULL)
u8KeyLen += TX_MIC_KEY_LEN;
- }
if (KeyRSC != NULL) {
strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.
- uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq = (u8 *)WILC_MALLOC(u32KeyRSClen);
+ uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq = WILC_MALLOC(u32KeyRSClen);
- WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq,
+ memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq,
KeyRSC, u32KeyRSClen);
}
@@ -5039,20 +5004,20 @@ s32 host_int_add_rx_gtk(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8RxGtk, u8 u8GtkK
strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.
- uniHostIFkeyAttr.strHostIFwpaAttr.pu8key = (u8 *)WILC_MALLOC(u8KeyLen);
+ uniHostIFkeyAttr.strHostIFwpaAttr.pu8key = WILC_MALLOC(u8KeyLen);
- WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
+ memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
pu8RxGtk, u8GtkKeylen);
if (pu8RxMic != NULL) {
- WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key + 16,
+ memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key + 16,
pu8RxMic, RX_MIC_KEY_LEN);
}
if (pu8TxMic != NULL) {
- WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key + 24,
+ memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key + 24,
pu8TxMic, TX_MIC_KEY_LEN);
}
@@ -5068,12 +5033,11 @@ s32 host_int_add_rx_gtk(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8RxGtk, u8 u8GtkK
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
if (s32Error)
PRINT_ER("Error in sending message queue: RX GTK\n");
/* ////////////// */
down(&(pstrWFIDrv->hSemTestKeyBlock));
- /* WILC_Sleep(100); */
/* /////// */
WILC_CATCH(s32Error)
@@ -5103,7 +5067,7 @@ s32 host_int_add_rx_gtk(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8RxGtk, u8 u8GtkK
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_set_pmkid_info(WILC_WFIDrvHandle hWFIDrv, tstrHostIFpmkidAttr *pu8PmkidInfoArray)
+s32 host_int_set_pmkid_info(tstrWILC_WFIDrv *hWFIDrv, tstrHostIFpmkidAttr *pu8PmkidInfoArray)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
@@ -5111,12 +5075,11 @@ s32 host_int_set_pmkid_info(WILC_WFIDrvHandle hWFIDrv, tstrHostIFpmkidAttr *pu8P
u32 i;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
/* prepare the Key Message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_KEY;
strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.enuKeyType = PMKSA;
@@ -5125,15 +5088,15 @@ s32 host_int_set_pmkid_info(WILC_WFIDrvHandle hWFIDrv, tstrHostIFpmkidAttr *pu8P
for (i = 0; i < pu8PmkidInfoArray->numpmkid; i++) {
- WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFpmkidAttr.pmkidlist[i].bssid, &pu8PmkidInfoArray->pmkidlist[i].bssid,
+ memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFpmkidAttr.pmkidlist[i].bssid, &pu8PmkidInfoArray->pmkidlist[i].bssid,
ETH_ALEN);
- WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFpmkidAttr.pmkidlist[i].pmkid, &pu8PmkidInfoArray->pmkidlist[i].pmkid,
+ memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFpmkidAttr.pmkidlist[i].pmkid, &pu8PmkidInfoArray->pmkidlist[i].pmkid,
PMKID_LEN);
}
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
if (s32Error)
PRINT_ER(" Error in sending messagequeue: PMKID Info\n");
@@ -5166,7 +5129,7 @@ s32 host_int_set_pmkid_info(WILC_WFIDrvHandle hWFIDrv, tstrHostIFpmkidAttr *pu8P
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_get_pmkid_info(WILC_WFIDrvHandle hWFIDrv, u8 *pu8PmkidInfoArray,
+s32 host_int_get_pmkid_info(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8PmkidInfoArray,
u32 u32PmkidInfoLen)
{
s32 s32Error = WILC_SUCCESS;
@@ -5195,14 +5158,12 @@ s32 host_int_get_pmkid_info(WILC_WFIDrvHandle hWFIDrv, u8 *pu8PmkidInfoArray,
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_set_RSNAConfigPSKPassPhrase(WILC_WFIDrvHandle hWFIDrv, u8 *pu8PassPhrase,
+s32 host_int_set_RSNAConfigPSKPassPhrase(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8PassPhrase,
u8 u8Psklength)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
- /* tstrWILC_WFIDrv * pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv; */
- /* u8 u8Psklength = WILC_strlen(pu8PassPhrase); */
/*validating psk length*/
if ((u8Psklength > 7) && (u8Psklength < 65)) {
strWID.u16WIDid = (u16)WID_11I_PSK;
@@ -5224,20 +5185,20 @@ s32 host_int_set_RSNAConfigPSKPassPhrase(WILC_WFIDrvHandle hWFIDrv, u8 *pu8PassP
* @date 19 April 2012
* @version 1.0
*/
-s32 host_int_get_MacAddress(WILC_WFIDrvHandle hWFIDrv, u8 *pu8MacAddress)
+s32 host_int_get_MacAddress(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8MacAddress)
{
s32 s32Error = WILC_SUCCESS;
tstrHostIFmsg strHostIFmsg;
/* prepare the Message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_GET_MAC_ADDRESS;
strHostIFmsg.uniHostIFmsgBody.strHostIfGetMacAddress.u8MacAddress = pu8MacAddress;
strHostIFmsg.drvHandler = hWFIDrv;
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
if (s32Error) {
PRINT_ER("Failed to send get mac address\n");
return WILC_FAIL;
@@ -5258,7 +5219,7 @@ s32 host_int_get_MacAddress(WILC_WFIDrvHandle hWFIDrv, u8 *pu8MacAddress)
* @date 16 July 2012
* @version 1.0
*/
-s32 host_int_set_MacAddress(WILC_WFIDrvHandle hWFIDrv, u8 *pu8MacAddress)
+s32 host_int_set_MacAddress(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8MacAddress)
{
s32 s32Error = WILC_SUCCESS;
tstrHostIFmsg strHostIFmsg;
@@ -5266,12 +5227,12 @@ s32 host_int_set_MacAddress(WILC_WFIDrvHandle hWFIDrv, u8 *pu8MacAddress)
PRINT_D(GENERIC_DBG, "mac addr = %x:%x:%x\n", pu8MacAddress[0], pu8MacAddress[1], pu8MacAddress[2]);
/* prepare setting mac address message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_SET_MAC_ADDRESS;
- WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIfSetMacAddress.u8MacAddress, pu8MacAddress, ETH_ALEN);
+ memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIfSetMacAddress.u8MacAddress, pu8MacAddress, ETH_ALEN);
strHostIFmsg.drvHandler = hWFIDrv;
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
if (s32Error) {
PRINT_ER("Failed to send message queue: Set mac address\n");
WILC_ERRORREPORT(s32Error, s32Error);
@@ -5299,7 +5260,7 @@ s32 host_int_set_MacAddress(WILC_WFIDrvHandle hWFIDrv, u8 *pu8MacAddress)
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_get_RSNAConfigPSKPassPhrase(WILC_WFIDrvHandle hWFIDrv,
+s32 host_int_get_RSNAConfigPSKPassPhrase(tstrWILC_WFIDrv *hWFIDrv,
u8 *pu8PassPhrase, u8 u8Psklength)
{
s32 s32Error = WILC_SUCCESS;
@@ -5346,7 +5307,7 @@ s32 host_int_get_RSNAConfigPSKPassPhrase(WILC_WFIDrvHandle hWFIDrv,
* @version 1.0
*/
#ifndef CONNECT_DIRECT
-s32 host_int_get_site_survey_results(WILC_WFIDrvHandle hWFIDrv,
+s32 host_int_get_site_survey_results(tstrWILC_WFIDrv *hWFIDrv,
u8 ppu8RcvdSiteSurveyResults[][MAX_SURVEY_RESULT_FRAG_SIZE],
u32 u32MaxSiteSrvyFragLen)
{
@@ -5396,7 +5357,7 @@ s32 host_int_get_site_survey_results(WILC_WFIDrvHandle hWFIDrv,
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_set_start_scan_req(WILC_WFIDrvHandle hWFIDrv, u8 scanSource)
+s32 host_int_set_start_scan_req(tstrWILC_WFIDrv *hWFIDrv, u8 scanSource)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
@@ -5426,7 +5387,7 @@ s32 host_int_set_start_scan_req(WILC_WFIDrvHandle hWFIDrv, u8 scanSource)
* @version 1.0
*/
-s32 host_int_get_start_scan_req(WILC_WFIDrvHandle hWFIDrv, u8 *pu8ScanSource)
+s32 host_int_get_start_scan_req(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8ScanSource)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
@@ -5451,7 +5412,7 @@ s32 host_int_get_start_scan_req(WILC_WFIDrvHandle hWFIDrv, u8 *pu8ScanSource)
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_set_join_req(WILC_WFIDrvHandle hWFIDrv, u8 *pu8bssid,
+s32 host_int_set_join_req(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8bssid,
const u8 *pu8ssid, size_t ssidLen,
const u8 *pu8IEs, size_t IEsLen,
tWILCpfConnectResult pfConnectResult, void *pvUserArg,
@@ -5464,9 +5425,8 @@ s32 host_int_set_join_req(WILC_WFIDrvHandle hWFIDrv, u8 *pu8bssid,
tstrHostIFmsg strHostIFmsg;
tenuScanConnTimer enuScanConnTimer;
- if (pstrWFIDrv == NULL || pfConnectResult == NULL) {
+ if (pstrWFIDrv == NULL || pfConnectResult == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
if (hWFIDrv == NULL) {
PRINT_ER("Driver not initialized: gWFiDrvHandle = NULL\n");
@@ -5486,7 +5446,7 @@ s32 host_int_set_join_req(WILC_WFIDrvHandle hWFIDrv, u8 *pu8bssid,
* }
*/
/* prepare the Connect Message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_CONNECT;
@@ -5499,39 +5459,41 @@ s32 host_int_set_join_req(WILC_WFIDrvHandle hWFIDrv, u8 *pu8bssid,
strHostIFmsg.drvHandler = hWFIDrv;
if (pu8bssid != NULL) {
- strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8bssid = (u8 *)WILC_MALLOC(6); /* will be deallocated by the receiving thread */
- WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8bssid,
+ strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8bssid = WILC_MALLOC(6); /* will be deallocated by the receiving thread */
+ memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8bssid,
pu8bssid, 6);
}
if (pu8ssid != NULL) {
strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.ssidLen = ssidLen;
- strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8ssid = (u8 *)WILC_MALLOC(ssidLen); /* will be deallocated by the receiving thread */
- WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8ssid,
+ strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8ssid = WILC_MALLOC(ssidLen); /* will be deallocated by the receiving thread */
+ memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8ssid,
pu8ssid, ssidLen);
}
if (pu8IEs != NULL) {
strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.IEsLen = IEsLen;
- strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8IEs = (u8 *)WILC_MALLOC(IEsLen); /* will be deallocated by the receiving thread */
- WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8IEs,
+ strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8IEs = WILC_MALLOC(IEsLen); /* will be deallocated by the receiving thread */
+ memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8IEs,
pu8IEs, IEsLen);
}
- if (pstrWFIDrv->enuHostIFstate < HOST_IF_CONNECTING) {
+ if (pstrWFIDrv->enuHostIFstate < HOST_IF_CONNECTING)
pstrWFIDrv->enuHostIFstate = HOST_IF_CONNECTING;
- } else
+ else
PRINT_D(GENERIC_DBG, "Don't set state to 'connecting' as state is %d\n", pstrWFIDrv->enuHostIFstate);
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
if (s32Error) {
PRINT_ER("Failed to send message queue: Set join request\n");
WILC_ERRORREPORT(s32Error, WILC_FAIL);
}
enuScanConnTimer = CONNECT_TIMER;
- WILC_TimerStart(&(pstrWFIDrv->hConnectTimer), HOST_IF_CONNECT_TIMEOUT, (void *) hWFIDrv, NULL);
+ pstrWFIDrv->hConnectTimer.data = (unsigned long)hWFIDrv;
+ mod_timer(&pstrWFIDrv->hConnectTimer,
+ jiffies + msecs_to_jiffies(HOST_IF_CONNECT_TIMEOUT));
WILC_CATCH(s32Error)
{
@@ -5553,7 +5515,7 @@ s32 host_int_set_join_req(WILC_WFIDrvHandle hWFIDrv, u8 *pu8bssid,
* @version 8.0
*/
-s32 host_int_flush_join_req(WILC_WFIDrvHandle hWFIDrv)
+s32 host_int_flush_join_req(tstrWILC_WFIDrv *hWFIDrv)
{
s32 s32Error = WILC_SUCCESS;
tstrHostIFmsg strHostIFmsg;
@@ -5564,16 +5526,15 @@ s32 host_int_flush_join_req(WILC_WFIDrvHandle hWFIDrv)
}
- if (hWFIDrv == NULL) {
+ if (hWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
strHostIFmsg.u16MsgId = HOST_IF_MSG_FLUSH_CONNECT;
strHostIFmsg.drvHandler = hWFIDrv;
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
if (s32Error) {
PRINT_ER("Failed to send message queue: Flush join request\n");
WILC_ERRORREPORT(s32Error, WILC_FAIL);
@@ -5597,14 +5558,14 @@ s32 host_int_flush_join_req(WILC_WFIDrvHandle hWFIDrv)
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_disconnect(WILC_WFIDrvHandle hWFIDrv, u16 u16ReasonCode)
+s32 host_int_disconnect(tstrWILC_WFIDrv *hWFIDrv, u16 u16ReasonCode)
{
s32 s32Error = WILC_SUCCESS;
tstrHostIFmsg strHostIFmsg;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
if (pstrWFIDrv == NULL) {
- PRINT_ER("Driver not initialized: pstrWFIDrv = NULL \n");
+ PRINT_ER("Driver not initialized: pstrWFIDrv = NULL\n");
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
}
@@ -5614,13 +5575,13 @@ s32 host_int_disconnect(WILC_WFIDrvHandle hWFIDrv, u16 u16ReasonCode)
}
/* prepare the Disconnect Message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_DISCONNECT;
strHostIFmsg.drvHandler = hWFIDrv;
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
if (s32Error)
PRINT_ER("Failed to send message queue: disconnect\n");
/* ////////////// */
@@ -5646,7 +5607,7 @@ s32 host_int_disconnect(WILC_WFIDrvHandle hWFIDrv, u16 u16ReasonCode)
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_disconnect_station(WILC_WFIDrvHandle hWFIDrv, u8 assoc_id)
+s32 host_int_disconnect_station(tstrWILC_WFIDrv *hWFIDrv, u8 assoc_id)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
@@ -5686,7 +5647,7 @@ s32 host_int_disconnect_station(WILC_WFIDrvHandle hWFIDrv, u8 assoc_id)
* @version 1.0
*/
-s32 host_int_get_assoc_req_info(WILC_WFIDrvHandle hWFIDrv, u8 *pu8AssocReqInfo,
+s32 host_int_get_assoc_req_info(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8AssocReqInfo,
u32 u32AssocReqInfoLen)
{
s32 s32Error = WILC_SUCCESS;
@@ -5713,7 +5674,7 @@ s32 host_int_get_assoc_req_info(WILC_WFIDrvHandle hWFIDrv, u8 *pu8AssocReqInfo,
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_get_assoc_res_info(WILC_WFIDrvHandle hWFIDrv, u8 *pu8AssocRespInfo,
+s32 host_int_get_assoc_res_info(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8AssocRespInfo,
u32 u32MaxAssocRespInfoLen, u32 *pu32RcvdAssocRespInfoLen)
{
s32 s32Error = WILC_SUCCESS;
@@ -5721,7 +5682,7 @@ s32 host_int_get_assoc_res_info(WILC_WFIDrvHandle hWFIDrv, u8 *pu8AssocRespInfo,
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
if (pstrWFIDrv == NULL) {
- PRINT_ER("Driver not initialized: pstrWFIDrv = NULL \n");
+ PRINT_ER("Driver not initialized: pstrWFIDrv = NULL\n");
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
}
@@ -5763,7 +5724,7 @@ s32 host_int_get_assoc_res_info(WILC_WFIDrvHandle hWFIDrv, u8 *pu8AssocRespInfo,
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_get_rx_power_level(WILC_WFIDrvHandle hWFIDrv, u8 *pu8RxPowerLevel,
+s32 host_int_get_rx_power_level(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8RxPowerLevel,
u32 u32RxPowerLevelLen)
{
s32 s32Error = WILC_SUCCESS;
@@ -5794,26 +5755,24 @@ s32 host_int_get_rx_power_level(WILC_WFIDrvHandle hWFIDrv, u8 *pu8RxPowerLevel,
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_set_mac_chnl_num(WILC_WFIDrvHandle hWFIDrv, u8 u8ChNum)
+s32 host_int_set_mac_chnl_num(tstrWILC_WFIDrv *hWFIDrv, u8 u8ChNum)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
tstrHostIFmsg strHostIFmsg;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
/* prepare the set channel message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_SET_CHANNEL;
strHostIFmsg.uniHostIFmsgBody.strHostIFSetChan.u8SetChan = u8ChNum;
strHostIFmsg.drvHandler = hWFIDrv;
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
WILC_ERRORREPORT(s32Error, s32Error);
- }
WILC_CATCH(s32Error)
{
@@ -5831,12 +5790,11 @@ s32 host_int_wait_msg_queue_idle(void)
/* prepare the set driver handler message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_Q_IDLE;
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
WILC_ERRORREPORT(s32Error, s32Error);
- }
WILC_CATCH(s32Error)
{
@@ -5849,7 +5807,7 @@ s32 host_int_wait_msg_queue_idle(void)
}
-s32 host_int_set_wfi_drv_handler(u32 u32address)
+s32 host_int_set_wfi_drv_handler(tstrWILC_WFIDrv *u32address)
{
s32 s32Error = WILC_SUCCESS;
@@ -5858,15 +5816,14 @@ s32 host_int_set_wfi_drv_handler(u32 u32address)
/* prepare the set driver handler message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_SET_WFIDRV_HANDLER;
strHostIFmsg.uniHostIFmsgBody.strHostIfSetDrvHandler.u32Address = u32address;
/* strHostIFmsg.drvHandler=hWFIDrv; */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
WILC_ERRORREPORT(s32Error, s32Error);
- }
WILC_CATCH(s32Error)
{
@@ -5877,7 +5834,7 @@ s32 host_int_set_wfi_drv_handler(u32 u32address)
-s32 host_int_set_operation_mode(WILC_WFIDrvHandle hWFIDrv, u32 u32mode)
+s32 host_int_set_operation_mode(tstrWILC_WFIDrv *hWFIDrv, u32 u32mode)
{
s32 s32Error = WILC_SUCCESS;
@@ -5886,15 +5843,14 @@ s32 host_int_set_operation_mode(WILC_WFIDrvHandle hWFIDrv, u32 u32mode)
/* prepare the set driver handler message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_SET_OPERATION_MODE;
strHostIFmsg.uniHostIFmsgBody.strHostIfSetOperationMode.u32Mode = u32mode;
strHostIFmsg.drvHandler = hWFIDrv;
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
WILC_ERRORREPORT(s32Error, s32Error);
- }
WILC_CATCH(s32Error)
{
@@ -5918,25 +5874,25 @@ s32 host_int_set_operation_mode(WILC_WFIDrvHandle hWFIDrv, u32 u32mode)
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_get_host_chnl_num(WILC_WFIDrvHandle hWFIDrv, u8 *pu8ChNo)
+s32 host_int_get_host_chnl_num(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8ChNo)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
tstrHostIFmsg strHostIFmsg;
if (pstrWFIDrv == NULL) {
- PRINT_ER("Driver not initialized: pstrWFIDrv = NULL \n");
+ PRINT_ER("Driver not initialized: pstrWFIDrv = NULL\n");
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
}
/* prepare the Get Channel Message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_GET_CHNL;
strHostIFmsg.drvHandler = hWFIDrv;
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
if (s32Error)
PRINT_ER("Failed to send get host channel param's message queue ");
down(&(pstrWFIDrv->hSemGetCHNL));
@@ -5964,7 +5920,7 @@ s32 host_int_get_host_chnl_num(WILC_WFIDrvHandle hWFIDrv, u8 *pu8ChNo)
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_test_set_int_wid(WILC_WFIDrvHandle hWFIDrv, u32 u32TestMemAddr)
+s32 host_int_test_set_int_wid(tstrWILC_WFIDrv *hWFIDrv, u32 u32TestMemAddr)
{
s32 s32Error = WILC_SUCCESS;
tstrWID strWID;
@@ -5972,7 +5928,7 @@ s32 host_int_test_set_int_wid(WILC_WFIDrvHandle hWFIDrv, u32 u32TestMemAddr)
if (pstrWFIDrv == NULL) {
- PRINT_ER("Driver not initialized: pstrWFIDrv = NULL \n");
+ PRINT_ER("Driver not initialized: pstrWFIDrv = NULL\n");
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
}
@@ -6011,28 +5967,28 @@ s32 host_int_test_set_int_wid(WILC_WFIDrvHandle hWFIDrv, u32 u32TestMemAddr)
* @date
* @version 1.0
*/
-s32 host_int_get_inactive_time(WILC_WFIDrvHandle hWFIDrv, const u8 *mac, u32 *pu32InactiveTime)
+s32 host_int_get_inactive_time(tstrWILC_WFIDrv *hWFIDrv, const u8 *mac, u32 *pu32InactiveTime)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
tstrHostIFmsg strHostIFmsg;
if (pstrWFIDrv == NULL) {
- PRINT_ER("Driver not initialized: pstrWFIDrv = NULL \n");
+ PRINT_ER("Driver not initialized: pstrWFIDrv = NULL\n");
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
}
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
- WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIfStaInactiveT.mac,
+ memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIfStaInactiveT.mac,
mac, ETH_ALEN);
strHostIFmsg.u16MsgId = HOST_IF_MSG_GET_INACTIVETIME;
strHostIFmsg.drvHandler = hWFIDrv;
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
if (s32Error)
PRINT_ER("Failed to send get host channel param's message queue ");
@@ -6057,7 +6013,7 @@ s32 host_int_get_inactive_time(WILC_WFIDrvHandle hWFIDrv, const u8 *mac, u32 *pu
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_test_get_int_wid(WILC_WFIDrvHandle hWFIDrv, u32 *pu32TestMemAddr)
+s32 host_int_test_get_int_wid(tstrWILC_WFIDrv *hWFIDrv, u32 *pu32TestMemAddr)
{
s32 s32Error = WILC_SUCCESS;
@@ -6066,7 +6022,7 @@ s32 host_int_test_get_int_wid(WILC_WFIDrvHandle hWFIDrv, u32 *pu32TestMemAddr)
if (pstrWFIDrv == NULL) {
- PRINT_ER("Driver not initialized: pstrWFIDrv = NULL \n");
+ PRINT_ER("Driver not initialized: pstrWFIDrv = NULL\n");
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
}
@@ -6106,7 +6062,7 @@ s32 host_int_test_get_int_wid(WILC_WFIDrvHandle hWFIDrv, u32 *pu32TestMemAddr)
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_get_rssi(WILC_WFIDrvHandle hWFIDrv, s8 *ps8Rssi)
+s32 host_int_get_rssi(tstrWILC_WFIDrv *hWFIDrv, s8 *ps8Rssi)
{
s32 s32Error = WILC_SUCCESS;
tstrHostIFmsg strHostIFmsg;
@@ -6114,13 +6070,13 @@ s32 host_int_get_rssi(WILC_WFIDrvHandle hWFIDrv, s8 *ps8Rssi)
/* prepare the Get RSSI Message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_GET_RSSI;
strHostIFmsg.drvHandler = hWFIDrv;
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
if (s32Error) {
PRINT_ER("Failed to send get host channel param's message queue ");
return WILC_FAIL;
@@ -6141,7 +6097,7 @@ s32 host_int_get_rssi(WILC_WFIDrvHandle hWFIDrv, s8 *ps8Rssi)
return s32Error;
}
-s32 host_int_get_link_speed(WILC_WFIDrvHandle hWFIDrv, s8 *ps8lnkspd)
+s32 host_int_get_link_speed(tstrWILC_WFIDrv *hWFIDrv, s8 *ps8lnkspd)
{
tstrHostIFmsg strHostIFmsg;
s32 s32Error = WILC_SUCCESS;
@@ -6151,13 +6107,13 @@ s32 host_int_get_link_speed(WILC_WFIDrvHandle hWFIDrv, s8 *ps8lnkspd)
/* prepare the Get LINKSPEED Message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_GET_LINKSPEED;
strHostIFmsg.drvHandler = hWFIDrv;
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
if (s32Error) {
PRINT_ER("Failed to send GET_LINKSPEED to message queue ");
return WILC_FAIL;
@@ -6178,20 +6134,20 @@ s32 host_int_get_link_speed(WILC_WFIDrvHandle hWFIDrv, s8 *ps8lnkspd)
return s32Error;
}
-s32 host_int_get_statistics(WILC_WFIDrvHandle hWFIDrv, tstrStatistics *pstrStatistics)
+s32 host_int_get_statistics(tstrWILC_WFIDrv *hWFIDrv, tstrStatistics *pstrStatistics)
{
s32 s32Error = WILC_SUCCESS;
tstrHostIFmsg strHostIFmsg;
/* prepare the Get RSSI Message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_GET_STATISTICS;
strHostIFmsg.uniHostIFmsgBody.pUserData = (char *)pstrStatistics;
strHostIFmsg.drvHandler = hWFIDrv;
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
if (s32Error) {
PRINT_ER("Failed to send get host channel param's message queue ");
return WILC_FAIL;
@@ -6218,7 +6174,7 @@ s32 host_int_get_statistics(WILC_WFIDrvHandle hWFIDrv, tstrStatistics *pstrStati
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_scan(WILC_WFIDrvHandle hWFIDrv, u8 u8ScanSource,
+s32 host_int_scan(tstrWILC_WFIDrv *hWFIDrv, u8 u8ScanSource,
u8 u8ScanType, u8 *pu8ChnlFreqList,
u8 u8ChnlListLen, const u8 *pu8IEs,
size_t IEsLen, tWILCpfScanResult ScanResult,
@@ -6229,13 +6185,12 @@ s32 host_int_scan(WILC_WFIDrvHandle hWFIDrv, u8 u8ScanSource,
tstrHostIFmsg strHostIFmsg;
tenuScanConnTimer enuScanConnTimer;
- if (pstrWFIDrv == NULL || ScanResult == NULL) {
+ if (pstrWFIDrv == NULL || ScanResult == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
/* prepare the Scan Message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_SCAN;
@@ -6253,17 +6208,17 @@ s32 host_int_scan(WILC_WFIDrvHandle hWFIDrv, u8 u8ScanSource,
strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.pvUserArg = pvUserArg;
strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.u8ChnlListLen = u8ChnlListLen;
- strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.pu8ChnlFreqList = (u8 *)WILC_MALLOC(u8ChnlListLen); /* will be deallocated by the receiving thread */
- WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.pu8ChnlFreqList,
+ strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.pu8ChnlFreqList = WILC_MALLOC(u8ChnlListLen); /* will be deallocated by the receiving thread */
+ memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.pu8ChnlFreqList,
pu8ChnlFreqList, u8ChnlListLen);
strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.IEsLen = IEsLen;
- strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.pu8IEs = (u8 *)WILC_MALLOC(IEsLen); /* will be deallocated by the receiving thread */
- WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.pu8IEs,
+ strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.pu8IEs = WILC_MALLOC(IEsLen); /* will be deallocated by the receiving thread */
+ memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.pu8IEs,
pu8IEs, IEsLen);
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
if (s32Error) {
PRINT_ER("Error in sending message queue scanning parameters: Error(%d)\n", s32Error);
WILC_ERRORREPORT(s32Error, WILC_FAIL);
@@ -6271,8 +6226,9 @@ s32 host_int_scan(WILC_WFIDrvHandle hWFIDrv, u8 u8ScanSource,
enuScanConnTimer = SCAN_TIMER;
PRINT_D(HOSTINF_DBG, ">> Starting the SCAN timer\n");
- WILC_TimerStart(&(pstrWFIDrv->hScanTimer), HOST_IF_SCAN_TIMEOUT, (void *) hWFIDrv, NULL);
-
+ pstrWFIDrv->hScanTimer.data = (unsigned long)hWFIDrv;
+ mod_timer(&pstrWFIDrv->hScanTimer,
+ jiffies + msecs_to_jiffies(HOST_IF_SCAN_TIMEOUT));
WILC_CATCH(s32Error)
{
@@ -6292,7 +6248,7 @@ s32 host_int_scan(WILC_WFIDrvHandle hWFIDrv, u8 u8ScanSource,
* @date 8 March 2012
* @version 1.0
*/
-s32 hif_set_cfg(WILC_WFIDrvHandle hWFIDrv, tstrCfgParamVal *pstrCfgParamVal)
+s32 hif_set_cfg(tstrWILC_WFIDrv *hWFIDrv, tstrCfgParamVal *pstrCfgParamVal)
{
s32 s32Error = WILC_SUCCESS;
@@ -6301,16 +6257,15 @@ s32 hif_set_cfg(WILC_WFIDrvHandle hWFIDrv, tstrCfgParamVal *pstrCfgParamVal)
tstrHostIFmsg strHostIFmsg;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
/* prepare the WiphyParams Message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_CFG_PARAMS;
strHostIFmsg.uniHostIFmsgBody.strHostIFCfgParamAttr.pstrCfgParamVal = *pstrCfgParamVal;
strHostIFmsg.drvHandler = hWFIDrv;
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
WILC_CATCH(s32Error)
{
@@ -6334,7 +6289,7 @@ s32 hif_set_cfg(WILC_WFIDrvHandle hWFIDrv, tstrCfgParamVal *pstrCfgParamVal)
* @date 8 March 2012
* @version 1.0
*/
-s32 hif_get_cfg(WILC_WFIDrvHandle hWFIDrv, u16 u16WID, u16 *pu16WID_Value)
+s32 hif_get_cfg(tstrWILC_WFIDrv *hWFIDrv, u16 u16WID, u16 *pu16WID_Value)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
@@ -6342,7 +6297,7 @@ s32 hif_get_cfg(WILC_WFIDrvHandle hWFIDrv, u16 u16WID, u16 *pu16WID_Value)
down(&(pstrWFIDrv->gtOsCfgValuesSem));
if (pstrWFIDrv == NULL) {
- PRINT_ER("Driver not initialized: pstrWFIDrv = NULL \n");
+ PRINT_ER("Driver not initialized: pstrWFIDrv = NULL\n");
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
}
PRINT_D(HOSTINF_DBG, "Getting configuration parameters\n");
@@ -6469,9 +6424,10 @@ void host_int_send_join_leave_info_to_host
* @version 1.0
*/
-void GetPeriodicRSSI(void *pvArg)
+static void GetPeriodicRSSI(unsigned long arg)
{
- tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)pvArg;
+ tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)arg;
+
if (pstrWFIDrv == NULL) {
PRINT_ER("Driver handler is NULL\n");
return;
@@ -6482,19 +6438,20 @@ void GetPeriodicRSSI(void *pvArg)
tstrHostIFmsg strHostIFmsg;
/* prepare the Get RSSI Message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_GET_RSSI;
strHostIFmsg.drvHandler = pstrWFIDrv;
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
if (s32Error) {
PRINT_ER("Failed to send get host channel param's message queue ");
return;
}
}
- WILC_TimerStart(&(g_hPeriodicRSSI), 5000, (void *)pstrWFIDrv, NULL);
+ g_hPeriodicRSSI.data = (unsigned long)pstrWFIDrv;
+ mod_timer(&g_hPeriodicRSSI, jiffies + msecs_to_jiffies(5000));
}
@@ -6515,7 +6472,7 @@ static u32 u32Intialized;
static u32 msgQ_created;
static u32 clients_count;
-s32 host_int_init(WILC_WFIDrvHandle *phWFIDrv)
+s32 host_int_init(tstrWILC_WFIDrv **phWFIDrv)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv;
@@ -6535,16 +6492,16 @@ s32 host_int_init(WILC_WFIDrvHandle *phWFIDrv)
/*Allocate host interface private structure*/
- pstrWFIDrv = (tstrWILC_WFIDrv *)WILC_MALLOC(sizeof(tstrWILC_WFIDrv));
+ pstrWFIDrv = WILC_MALLOC(sizeof(tstrWILC_WFIDrv));
if (pstrWFIDrv == NULL) {
/* WILC_ERRORREPORT(s32Error,WILC_NO_MEM); */
s32Error = WILC_NO_MEM;
PRINT_ER("Failed to allocate memory\n");
goto _fail_timer_2;
}
- WILC_memset(pstrWFIDrv, 0, sizeof(tstrWILC_WFIDrv));
+ memset(pstrWFIDrv, 0, sizeof(tstrWILC_WFIDrv));
/*return driver handle to user*/
- *phWFIDrv = (WILC_WFIDrvHandle)pstrWFIDrv;
+ *phWFIDrv = pstrWFIDrv;
/*save into globl handle*/
#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
@@ -6575,9 +6532,7 @@ s32 host_int_init(WILC_WFIDrvHandle *phWFIDrv)
PRINT_D(HOSTINF_DBG, "INIT: CLIENT COUNT %d\n", clients_count);
if (clients_count == 0) {
-
- s32Error = WILC_MsgQueueCreate(&gMsgQHostIF, NULL);
-
+ s32Error = WILC_MsgQueueCreate(&gMsgQHostIF);
if (s32Error < 0) {
PRINT_ER("Failed to creat MQ\n");
@@ -6590,47 +6545,24 @@ s32 host_int_init(WILC_WFIDrvHandle *phWFIDrv)
s32Error = WILC_FAIL;
goto _fail_mq_;
}
- s32Error = WILC_TimerCreate(&(g_hPeriodicRSSI), GetPeriodicRSSI, NULL);
- if (s32Error < 0) {
- PRINT_ER("Failed to creat Timer\n");
- goto _fail_timer_1;
- }
- WILC_TimerStart(&(g_hPeriodicRSSI), 5000, (void *)pstrWFIDrv, NULL);
-
+ setup_timer(&g_hPeriodicRSSI, GetPeriodicRSSI,
+ (unsigned long)pstrWFIDrv);
+ mod_timer(&g_hPeriodicRSSI, jiffies + msecs_to_jiffies(5000));
}
- s32Error = WILC_TimerCreate(&(pstrWFIDrv->hScanTimer), TimerCB_Scan, NULL);
- if (s32Error < 0) {
- PRINT_ER("Failed to creat Timer\n");
- goto _fail_thread_;
- }
-
- s32Error = WILC_TimerCreate(&(pstrWFIDrv->hConnectTimer), TimerCB_Connect, NULL);
- if (s32Error < 0) {
- PRINT_ER("Failed to creat Timer\n");
- goto _fail_timer_1;
- }
+ setup_timer(&pstrWFIDrv->hScanTimer, TimerCB_Scan, 0);
+ setup_timer(&pstrWFIDrv->hConnectTimer, TimerCB_Connect, 0);
#ifdef WILC_P2P
/*Remain on channel timer*/
- s32Error = WILC_TimerCreate(&(pstrWFIDrv->hRemainOnChannel), ListenTimerCB, NULL);
- if (s32Error < 0) {
- PRINT_ER("Failed to creat Remain-on-channel Timer\n");
- goto _fail_timer_3;
- }
+ setup_timer(&pstrWFIDrv->hRemainOnChannel, ListenTimerCB, 0);
#endif
sema_init(&(pstrWFIDrv->gtOsCfgValuesSem), 1);
down(&(pstrWFIDrv->gtOsCfgValuesSem));
-
-
-#ifdef SIMULATION
- TransportInit();
-#endif
-
pstrWFIDrv->enuHostIFstate = HOST_IF_IDLE;
/* gWFiDrvHandle->bPendingConnRequest = false; */
@@ -6666,11 +6598,6 @@ s32 host_int_init(WILC_WFIDrvHandle *phWFIDrv)
goto _fail_mem_;
}
-#ifdef SIMULATION
- /*Initialize Simulaor*/
- CoreConfigSimulatorInit();
-#endif
-
u32Intialized = 1;
clients_count++; /* increase number of created entities */
@@ -6679,20 +6606,17 @@ s32 host_int_init(WILC_WFIDrvHandle *phWFIDrv)
_fail_mem_:
if (pstrWFIDrv != NULL)
- WILC_FREE(pstrWFIDrv);
+ kfree(pstrWFIDrv);
#ifdef WILC_P2P
-_fail_timer_3:
- WILC_TimerDestroy(&(pstrWFIDrv->hRemainOnChannel), NULL);
+ del_timer_sync(&pstrWFIDrv->hRemainOnChannel);
#endif
_fail_timer_2:
up(&(pstrWFIDrv->gtOsCfgValuesSem));
- WILC_TimerDestroy(&(pstrWFIDrv->hConnectTimer), NULL);
-_fail_timer_1:
- WILC_TimerDestroy(&(pstrWFIDrv->hScanTimer), NULL);
-_fail_thread_:
+ del_timer_sync(&pstrWFIDrv->hConnectTimer);
+ del_timer_sync(&pstrWFIDrv->hScanTimer);
kthread_stop(HostIFthreadHandler);
_fail_mq_:
- WILC_MsgQueueDestroy(&gMsgQHostIF, NULL);
+ WILC_MsgQueueDestroy(&gMsgQHostIF);
_fail_:
return s32Error;
@@ -6708,7 +6632,7 @@ _fail_:
* @version 1.0
*/
-s32 host_int_deinit(WILC_WFIDrvHandle hWFIDrv)
+s32 host_int_deinit(tstrWILC_WFIDrv *hWFIDrv)
{
s32 s32Error = WILC_SUCCESS;
tstrHostIFmsg strHostIFmsg;
@@ -6737,28 +6661,28 @@ s32 host_int_deinit(WILC_WFIDrvHandle hWFIDrv)
/*BugID_5348*/
/*Destroy all timers before acquiring hSemDeinitDrvHandle*/
/*to guarantee handling all messages befor proceeding*/
- if (WILC_TimerDestroy(&(pstrWFIDrv->hScanTimer), NULL)) {
- PRINT_D(HOSTINF_DBG, ">> Scan timer is active \n");
+ if (del_timer_sync(&pstrWFIDrv->hScanTimer)) {
+ PRINT_D(HOSTINF_DBG, ">> Scan timer is active\n");
/* msleep(HOST_IF_SCAN_TIMEOUT+1000); */
}
- if (WILC_TimerDestroy(&(pstrWFIDrv->hConnectTimer), NULL)) {
- PRINT_D(HOSTINF_DBG, ">> Connect timer is active \n");
+ if (del_timer_sync(&pstrWFIDrv->hConnectTimer)) {
+ PRINT_D(HOSTINF_DBG, ">> Connect timer is active\n");
/* msleep(HOST_IF_CONNECT_TIMEOUT+1000); */
}
- if (WILC_TimerDestroy(&(g_hPeriodicRSSI), NULL)) {
- PRINT_D(HOSTINF_DBG, ">> Connect timer is active \n");
+ if (del_timer_sync(&g_hPeriodicRSSI)) {
+ PRINT_D(HOSTINF_DBG, ">> Connect timer is active\n");
/* msleep(HOST_IF_CONNECT_TIMEOUT+1000); */
}
#ifdef WILC_P2P
/*Destroy Remain-onchannel Timer*/
- WILC_TimerDestroy(&(pstrWFIDrv->hRemainOnChannel), NULL);
+ del_timer_sync(&pstrWFIDrv->hRemainOnChannel);
#endif
- host_int_set_wfi_drv_handler((u32)NULL);
+ host_int_set_wfi_drv_handler(NULL);
down(&hSemDeinitDrvHandle);
@@ -6770,39 +6694,30 @@ s32 host_int_deinit(WILC_WFIDrvHandle hWFIDrv)
pstrWFIDrv->strWILC_UsrScanReq.pfUserScanResult = NULL;
}
/*deinit configurator and simulator*/
-#ifdef SIMULATION
- CoreConfigSimulatorDeInit();
-#endif
CoreConfiguratorDeInit();
-#ifdef SIMULATION
- TransportDeInit();
-#endif
pstrWFIDrv->enuHostIFstate = HOST_IF_IDLE;
gbScanWhileConnected = false;
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
if (clients_count == 1) {
- if (WILC_TimerDestroy(&g_hPeriodicRSSI, NULL)) {
- PRINT_D(HOSTINF_DBG, ">> Connect timer is active \n");
+ if (del_timer_sync(&g_hPeriodicRSSI)) {
+ PRINT_D(HOSTINF_DBG, ">> Connect timer is active\n");
/* msleep(HOST_IF_CONNECT_TIMEOUT+1000); */
}
strHostIFmsg.u16MsgId = HOST_IF_MSG_EXIT;
strHostIFmsg.drvHandler = hWFIDrv;
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error != WILC_SUCCESS) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error != WILC_SUCCESS)
PRINT_ER("Error in sending deinit's message queue message function: Error(%d)\n", s32Error);
- }
down(&hSemHostIFthrdEnd);
-
-
- WILC_MsgQueueDestroy(&gMsgQHostIF, NULL);
+ WILC_MsgQueueDestroy(&gMsgQHostIF);
msgQ_created = 0;
}
@@ -6812,7 +6727,7 @@ s32 host_int_deinit(WILC_WFIDrvHandle hWFIDrv)
u32Intialized = 0;
/* gWFiDrvHandle = NULL; */
if (pstrWFIDrv != NULL) {
- WILC_FREE(pstrWFIDrv);
+ kfree(pstrWFIDrv);
/* pstrWFIDrv=NULL; */
}
@@ -6854,24 +6769,20 @@ void NetworkInfoReceived(u8 *pu8Buffer, u32 u32Length)
}
/* prepare the Asynchronous Network Info message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_RCVD_NTWRK_INFO;
strHostIFmsg.drvHandler = pstrWFIDrv;
strHostIFmsg.uniHostIFmsgBody.strRcvdNetworkInfo.u32Length = u32Length;
- strHostIFmsg.uniHostIFmsgBody.strRcvdNetworkInfo.pu8Buffer = (u8 *)WILC_MALLOC(u32Length); /* will be deallocated by the receiving thread */
- WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strRcvdNetworkInfo.pu8Buffer,
+ strHostIFmsg.uniHostIFmsgBody.strRcvdNetworkInfo.pu8Buffer = WILC_MALLOC(u32Length); /* will be deallocated by the receiving thread */
+ memcpy(strHostIFmsg.uniHostIFmsgBody.strRcvdNetworkInfo.pu8Buffer,
pu8Buffer, u32Length);
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
PRINT_ER("Error in sending network info message queue message parameters: Error(%d)\n", s32Error);
- }
-
-
- return;
}
/**
@@ -6897,7 +6808,7 @@ void GnrlAsyncInfoReceived(u8 *pu8Buffer, u32 u32Length)
drvHandler = ((pu8Buffer[u32Length - 4]) | (pu8Buffer[u32Length - 3] << 8) | (pu8Buffer[u32Length - 2] << 16) | (pu8Buffer[u32Length - 1] << 24));
pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
- PRINT_D(HOSTINF_DBG, "General asynchronous info packet received \n");
+ PRINT_D(HOSTINF_DBG, "General asynchronous info packet received\n");
if (pstrWFIDrv == NULL || pstrWFIDrv == terminated_handle) {
@@ -6916,7 +6827,7 @@ void GnrlAsyncInfoReceived(u8 *pu8Buffer, u32 u32Length)
}
/* prepare the General Asynchronous Info message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_RCVD_GNRL_ASYNC_INFO;
@@ -6924,19 +6835,17 @@ void GnrlAsyncInfoReceived(u8 *pu8Buffer, u32 u32Length)
strHostIFmsg.uniHostIFmsgBody.strRcvdGnrlAsyncInfo.u32Length = u32Length;
- strHostIFmsg.uniHostIFmsgBody.strRcvdGnrlAsyncInfo.pu8Buffer = (u8 *)WILC_MALLOC(u32Length); /* will be deallocated by the receiving thread */
- WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strRcvdGnrlAsyncInfo.pu8Buffer,
+ strHostIFmsg.uniHostIFmsgBody.strRcvdGnrlAsyncInfo.pu8Buffer = WILC_MALLOC(u32Length); /* will be deallocated by the receiving thread */
+ memcpy(strHostIFmsg.uniHostIFmsgBody.strRcvdGnrlAsyncInfo.pu8Buffer,
pu8Buffer, u32Length);
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
PRINT_ER("Error in sending message queue asynchronous message info: Error(%d)\n", s32Error);
- }
/*BugID_5348*/
up(&hSemHostIntDeinit);
- return;
}
/**
@@ -6954,20 +6863,20 @@ void host_int_ScanCompleteReceived(u8 *pu8Buffer, u32 u32Length)
tstrHostIFmsg strHostIFmsg;
u32 drvHandler;
tstrWILC_WFIDrv *pstrWFIDrv = NULL;
+
drvHandler = ((pu8Buffer[u32Length - 4]) | (pu8Buffer[u32Length - 3] << 8) | (pu8Buffer[u32Length - 2] << 16) | (pu8Buffer[u32Length - 1] << 24));
pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
PRINT_D(GENERIC_DBG, "Scan notification received %p\n", pstrWFIDrv);
- if (pstrWFIDrv == NULL || pstrWFIDrv == terminated_handle) {
+ if (pstrWFIDrv == NULL || pstrWFIDrv == terminated_handle)
return;
- }
/*if there is an ongoing scan request*/
if (pstrWFIDrv->strWILC_UsrScanReq.pfUserScanResult) {
/* prepare theScan Done message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_RCVD_SCAN_COMPLETE;
strHostIFmsg.drvHandler = pstrWFIDrv;
@@ -6978,14 +6887,13 @@ void host_int_ScanCompleteReceived(u8 *pu8Buffer, u32 u32Length)
/*strHostIFmsg.uniHostIFmsgBody.strScanComplete.u32Length = u32Length;
* strHostIFmsg.uniHostIFmsgBody.strScanComplete.pu8Buffer = (u8*)WILC_MALLOC(u32Length);
- * WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strScanComplete.pu8Buffer,
+ * memcpy(strHostIFmsg.uniHostIFmsgBody.strScanComplete.pu8Buffer,
* pu8Buffer, u32Length); */
/* send the message */
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
PRINT_ER("Error in sending message queue scan complete parameters: Error(%d)\n", s32Error);
- }
}
@@ -7008,18 +6916,17 @@ void host_int_ScanCompleteReceived(u8 *pu8Buffer, u32 u32Length)
* @date
* @version 1.0
*/
-s32 host_int_remain_on_channel(WILC_WFIDrvHandle hWFIDrv, u32 u32SessionID, u32 u32duration, u16 chan, tWILCpfRemainOnChanExpired RemainOnChanExpired, tWILCpfRemainOnChanReady RemainOnChanReady, void *pvUserArg)
+s32 host_int_remain_on_channel(tstrWILC_WFIDrv *hWFIDrv, u32 u32SessionID, u32 u32duration, u16 chan, tWILCpfRemainOnChanExpired RemainOnChanExpired, tWILCpfRemainOnChanReady RemainOnChanReady, void *pvUserArg)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
tstrHostIFmsg strHostIFmsg;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
/* prepare the remainonchan Message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
/* prepare the WiphyParams Message */
strHostIFmsg.u16MsgId = HOST_IF_MSG_REMAIN_ON_CHAN;
@@ -7031,10 +6938,9 @@ s32 host_int_remain_on_channel(WILC_WFIDrvHandle hWFIDrv, u32 u32SessionID, u32
strHostIFmsg.uniHostIFmsgBody.strHostIfRemainOnChan.u32ListenSessionID = u32SessionID;
strHostIFmsg.drvHandler = hWFIDrv;
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
WILC_ERRORREPORT(s32Error, s32Error);
- }
WILC_CATCH(s32Error)
{
@@ -7057,29 +6963,27 @@ s32 host_int_remain_on_channel(WILC_WFIDrvHandle hWFIDrv, u32 u32SessionID, u32
* @date
* @version 1.0
*/
-s32 host_int_ListenStateExpired(WILC_WFIDrvHandle hWFIDrv, u32 u32SessionID)
+s32 host_int_ListenStateExpired(tstrWILC_WFIDrv *hWFIDrv, u32 u32SessionID)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
tstrHostIFmsg strHostIFmsg;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
/*Stopping remain-on-channel timer*/
- WILC_TimerStop(&(pstrWFIDrv->hRemainOnChannel), NULL);
+ del_timer(&pstrWFIDrv->hRemainOnChannel);
/* prepare the timer fire Message */
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
strHostIFmsg.u16MsgId = HOST_IF_MSG_LISTEN_TIMER_FIRED;
strHostIFmsg.drvHandler = hWFIDrv;
strHostIFmsg.uniHostIFmsgBody.strHostIfRemainOnChan.u32ListenSessionID = u32SessionID;
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
WILC_ERRORREPORT(s32Error, s32Error);
- }
WILC_CATCH(s32Error)
{
@@ -7095,17 +6999,16 @@ s32 host_int_ListenStateExpired(WILC_WFIDrvHandle hWFIDrv, u32 u32SessionID)
* @author
* @date
* @version 1.0*/
-s32 host_int_frame_register(WILC_WFIDrvHandle hWFIDrv, u16 u16FrameType, bool bReg)
+s32 host_int_frame_register(tstrWILC_WFIDrv *hWFIDrv, u16 u16FrameType, bool bReg)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
tstrHostIFmsg strHostIFmsg;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
/* prepare the WiphyParams Message */
strHostIFmsg.u16MsgId = HOST_IF_MSG_REGISTER_FRAME;
@@ -7128,10 +7031,9 @@ s32 host_int_frame_register(WILC_WFIDrvHandle hWFIDrv, u16 u16FrameType, bool bR
strHostIFmsg.uniHostIFmsgBody.strHostIfRegisterFrame.bReg = bReg;
strHostIFmsg.drvHandler = hWFIDrv;
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
WILC_ERRORREPORT(s32Error, s32Error);
- }
WILC_CATCH(s32Error)
{
@@ -7155,7 +7057,7 @@ s32 host_int_frame_register(WILC_WFIDrvHandle hWFIDrv, u16 u16FrameType, bool bR
* @date
* @version 1.0
*/
-s32 host_int_add_beacon(WILC_WFIDrvHandle hWFIDrv, u32 u32Interval,
+s32 host_int_add_beacon(tstrWILC_WFIDrv *hWFIDrv, u32 u32Interval,
u32 u32DTIMPeriod,
u32 u32HeadLen, u8 *pu8Head,
u32 u32TailLen, u8 *pu8Tail)
@@ -7165,11 +7067,10 @@ s32 host_int_add_beacon(WILC_WFIDrvHandle hWFIDrv, u32 u32Interval,
tstrHostIFmsg strHostIFmsg;
tstrHostIFSetBeacon *pstrSetBeaconParam = &strHostIFmsg.uniHostIFmsgBody.strHostIFSetBeacon;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
PRINT_D(HOSTINF_DBG, "Setting adding beacon message queue params\n");
@@ -7180,38 +7081,33 @@ s32 host_int_add_beacon(WILC_WFIDrvHandle hWFIDrv, u32 u32Interval,
pstrSetBeaconParam->u32Interval = u32Interval;
pstrSetBeaconParam->u32DTIMPeriod = u32DTIMPeriod;
pstrSetBeaconParam->u32HeadLen = u32HeadLen;
- pstrSetBeaconParam->pu8Head = (u8 *)WILC_MALLOC(u32HeadLen);
- if (pstrSetBeaconParam->pu8Head == NULL) {
+ pstrSetBeaconParam->pu8Head = WILC_MALLOC(u32HeadLen);
+ if (pstrSetBeaconParam->pu8Head == NULL)
WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
- }
- WILC_memcpy(pstrSetBeaconParam->pu8Head, pu8Head, u32HeadLen);
+ memcpy(pstrSetBeaconParam->pu8Head, pu8Head, u32HeadLen);
pstrSetBeaconParam->u32TailLen = u32TailLen;
/* Bug 4599 : if tail length = 0 skip allocating & copying */
if (u32TailLen > 0) {
- pstrSetBeaconParam->pu8Tail = (u8 *)WILC_MALLOC(u32TailLen);
- if (pstrSetBeaconParam->pu8Tail == NULL) {
+ pstrSetBeaconParam->pu8Tail = WILC_MALLOC(u32TailLen);
+ if (pstrSetBeaconParam->pu8Tail == NULL)
WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
- }
- WILC_memcpy(pstrSetBeaconParam->pu8Tail, pu8Tail, u32TailLen);
+ memcpy(pstrSetBeaconParam->pu8Tail, pu8Tail, u32TailLen);
} else {
pstrSetBeaconParam->pu8Tail = NULL;
}
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
WILC_ERRORREPORT(s32Error, s32Error);
- }
WILC_CATCH(s32Error)
{
- if (pstrSetBeaconParam->pu8Head != NULL) {
- WILC_FREE(pstrSetBeaconParam->pu8Head);
- }
+ if (pstrSetBeaconParam->pu8Head != NULL)
+ kfree(pstrSetBeaconParam->pu8Head);
- if (pstrSetBeaconParam->pu8Tail != NULL) {
- WILC_FREE(pstrSetBeaconParam->pu8Tail);
- }
+ if (pstrSetBeaconParam->pu8Tail != NULL)
+ kfree(pstrSetBeaconParam->pu8Tail);
}
return s32Error;
@@ -7228,22 +7124,21 @@ s32 host_int_add_beacon(WILC_WFIDrvHandle hWFIDrv, u32 u32Interval,
* @date
* @version 1.0
*/
-s32 host_int_del_beacon(WILC_WFIDrvHandle hWFIDrv)
+s32 host_int_del_beacon(tstrWILC_WFIDrv *hWFIDrv)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
tstrHostIFmsg strHostIFmsg;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
/* prepare the WiphyParams Message */
strHostIFmsg.u16MsgId = HOST_IF_MSG_DEL_BEACON;
strHostIFmsg.drvHandler = hWFIDrv;
PRINT_D(HOSTINF_DBG, "Setting deleting beacon message queue params\n");
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
WILC_ERRORCHECK(s32Error);
WILC_CATCH(s32Error)
@@ -7262,7 +7157,7 @@ s32 host_int_del_beacon(WILC_WFIDrvHandle hWFIDrv)
* @date
* @version 1.0
*/
-s32 host_int_add_station(WILC_WFIDrvHandle hWFIDrv, tstrWILC_AddStaParam *pstrStaParams)
+s32 host_int_add_station(tstrWILC_WFIDrv *hWFIDrv, tstrWILC_AddStaParam *pstrStaParams)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
@@ -7270,11 +7165,10 @@ s32 host_int_add_station(WILC_WFIDrvHandle hWFIDrv, tstrWILC_AddStaParam *pstrSt
tstrWILC_AddStaParam *pstrAddStationMsg = &strHostIFmsg.uniHostIFmsgBody.strAddStaParam;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
PRINT_D(HOSTINF_DBG, "Setting adding station message queue params\n");
@@ -7283,20 +7177,20 @@ s32 host_int_add_station(WILC_WFIDrvHandle hWFIDrv, tstrWILC_AddStaParam *pstrSt
strHostIFmsg.u16MsgId = HOST_IF_MSG_ADD_STATION;
strHostIFmsg.drvHandler = hWFIDrv;
- WILC_memcpy(pstrAddStationMsg, pstrStaParams, sizeof(tstrWILC_AddStaParam));
+ memcpy(pstrAddStationMsg, pstrStaParams, sizeof(tstrWILC_AddStaParam));
if (pstrAddStationMsg->u8NumRates > 0) {
u8 *rates = WILC_MALLOC(pstrAddStationMsg->u8NumRates);
+
WILC_NULLCHECK(s32Error, rates);
- WILC_memcpy(rates, pstrStaParams->pu8Rates, pstrAddStationMsg->u8NumRates);
+ memcpy(rates, pstrStaParams->pu8Rates, pstrAddStationMsg->u8NumRates);
pstrAddStationMsg->pu8Rates = rates;
}
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
WILC_ERRORREPORT(s32Error, s32Error);
- }
WILC_CATCH(s32Error)
{
@@ -7313,18 +7207,17 @@ s32 host_int_add_station(WILC_WFIDrvHandle hWFIDrv, tstrWILC_AddStaParam *pstrSt
* @date
* @version 1.0
*/
-s32 host_int_del_station(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8MacAddr)
+s32 host_int_del_station(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8MacAddr)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
tstrHostIFmsg strHostIFmsg;
tstrHostIFDelSta *pstrDelStationMsg = &strHostIFmsg.uniHostIFmsgBody.strDelStaParam;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
PRINT_D(HOSTINF_DBG, "Setting deleting station message queue params\n");
@@ -7336,14 +7229,13 @@ s32 host_int_del_station(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8MacAddr)
/*BugID_4795: Handling situation of deleting all stations*/
if (pu8MacAddr == NULL)
- WILC_memset(pstrDelStationMsg->au8MacAddr, 255, ETH_ALEN);
+ memset(pstrDelStationMsg->au8MacAddr, 255, ETH_ALEN);
else
- WILC_memcpy(pstrDelStationMsg->au8MacAddr, pu8MacAddr, ETH_ALEN);
+ memcpy(pstrDelStationMsg->au8MacAddr, pu8MacAddr, ETH_ALEN);
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
WILC_ERRORREPORT(s32Error, s32Error);
- }
WILC_CATCH(s32Error)
{
@@ -7359,7 +7251,7 @@ s32 host_int_del_station(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8MacAddr)
* @date
* @version 1.0
*/
-s32 host_int_del_allstation(WILC_WFIDrvHandle hWFIDrv, u8 pu8MacAddr[][ETH_ALEN])
+s32 host_int_del_allstation(tstrWILC_WFIDrv *hWFIDrv, u8 pu8MacAddr[][ETH_ALEN])
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
@@ -7370,11 +7262,10 @@ s32 host_int_del_allstation(WILC_WFIDrvHandle hWFIDrv, u8 pu8MacAddr[][ETH_ALEN]
u8 u8AssocNumb = 0;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
PRINT_D(HOSTINF_DBG, "Setting deauthenticating station message queue params\n");
@@ -7385,7 +7276,7 @@ s32 host_int_del_allstation(WILC_WFIDrvHandle hWFIDrv, u8 pu8MacAddr[][ETH_ALEN]
/* Handling situation of deauthenticing all associated stations*/
for (i = 0; i < MAX_NUM_STA; i++) {
if (memcmp(pu8MacAddr[i], au8Zero_Buff, ETH_ALEN)) {
- WILC_memcpy(pstrDelAllStationMsg->au8Sta_DelAllSta[i], pu8MacAddr[i], ETH_ALEN);
+ memcpy(pstrDelAllStationMsg->au8Sta_DelAllSta[i], pu8MacAddr[i], ETH_ALEN);
PRINT_D(CFG80211_DBG, "BSSID = %x%x%x%x%x%x\n", pstrDelAllStationMsg->au8Sta_DelAllSta[i][0], pstrDelAllStationMsg->au8Sta_DelAllSta[i][1], pstrDelAllStationMsg->au8Sta_DelAllSta[i][2], pstrDelAllStationMsg->au8Sta_DelAllSta[i][3], pstrDelAllStationMsg->au8Sta_DelAllSta[i][4],
pstrDelAllStationMsg->au8Sta_DelAllSta[i][5]);
u8AssocNumb++;
@@ -7397,13 +7288,12 @@ s32 host_int_del_allstation(WILC_WFIDrvHandle hWFIDrv, u8 pu8MacAddr[][ETH_ALEN]
}
pstrDelAllStationMsg->u8Num_AssocSta = u8AssocNumb;
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
- if (s32Error) {
+ if (s32Error)
WILC_ERRORREPORT(s32Error, s32Error);
- }
WILC_CATCH(s32Error)
{
@@ -7423,38 +7313,37 @@ s32 host_int_del_allstation(WILC_WFIDrvHandle hWFIDrv, u8 pu8MacAddr[][ETH_ALEN]
* @date
* @version 1.0
*/
-s32 host_int_edit_station(WILC_WFIDrvHandle hWFIDrv, tstrWILC_AddStaParam *pstrStaParams)
+s32 host_int_edit_station(tstrWILC_WFIDrv *hWFIDrv, tstrWILC_AddStaParam *pstrStaParams)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
tstrHostIFmsg strHostIFmsg;
tstrWILC_AddStaParam *pstrAddStationMsg = &strHostIFmsg.uniHostIFmsgBody.strAddStaParam;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
PRINT_D(HOSTINF_DBG, "Setting editing station message queue params\n");
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
/* prepare the WiphyParams Message */
strHostIFmsg.u16MsgId = HOST_IF_MSG_EDIT_STATION;
strHostIFmsg.drvHandler = hWFIDrv;
- WILC_memcpy(pstrAddStationMsg, pstrStaParams, sizeof(tstrWILC_AddStaParam));
+ memcpy(pstrAddStationMsg, pstrStaParams, sizeof(tstrWILC_AddStaParam));
if (pstrAddStationMsg->u8NumRates > 0) {
u8 *rates = WILC_MALLOC(pstrAddStationMsg->u8NumRates);
+
WILC_NULLCHECK(s32Error, rates);
- WILC_memcpy(rates, pstrStaParams->pu8Rates, pstrAddStationMsg->u8NumRates);
+ memcpy(rates, pstrStaParams->pu8Rates, pstrAddStationMsg->u8NumRates);
pstrAddStationMsg->pu8Rates = rates;
}
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
WILC_ERRORREPORT(s32Error, s32Error);
- }
WILC_CATCH(s32Error)
{
}
@@ -7463,22 +7352,21 @@ s32 host_int_edit_station(WILC_WFIDrvHandle hWFIDrv, tstrWILC_AddStaParam *pstrS
#endif /*WILC_AP_EXTERNAL_MLME*/
uint32_t wilc_get_chipid(uint8_t);
-s32 host_int_set_power_mgmt(WILC_WFIDrvHandle hWFIDrv, bool bIsEnabled, u32 u32Timeout)
+s32 host_int_set_power_mgmt(tstrWILC_WFIDrv *hWFIDrv, bool bIsEnabled, u32 u32Timeout)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
tstrHostIFmsg strHostIFmsg;
tstrHostIfPowerMgmtParam *pstrPowerMgmtParam = &strHostIFmsg.uniHostIFmsgBody.strPowerMgmtparam;
- PRINT_INFO(HOSTINF_DBG, "\n\n>> Setting PS to %d << \n\n", bIsEnabled);
+ PRINT_INFO(HOSTINF_DBG, "\n\n>> Setting PS to %d <<\n\n", bIsEnabled);
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
PRINT_D(HOSTINF_DBG, "Setting Power management message queue params\n");
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
/* prepare the WiphyParams Message */
@@ -7489,17 +7377,16 @@ s32 host_int_set_power_mgmt(WILC_WFIDrvHandle hWFIDrv, bool bIsEnabled, u32 u32T
pstrPowerMgmtParam->u32Timeout = u32Timeout;
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
WILC_ERRORREPORT(s32Error, s32Error);
- }
WILC_CATCH(s32Error)
{
}
return s32Error;
}
-s32 host_int_setup_multicast_filter(WILC_WFIDrvHandle hWFIDrv, bool bIsEnabled, u32 u32count)
+s32 host_int_setup_multicast_filter(tstrWILC_WFIDrv *hWFIDrv, bool bIsEnabled, u32 u32count)
{
s32 s32Error = WILC_SUCCESS;
@@ -7508,13 +7395,12 @@ s32 host_int_setup_multicast_filter(WILC_WFIDrvHandle hWFIDrv, bool bIsEnabled,
tstrHostIFSetMulti *pstrMulticastFilterParam = &strHostIFmsg.uniHostIFmsgBody.strHostIfSetMulti;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
PRINT_D(HOSTINF_DBG, "Setting Multicast Filter params\n");
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
/* prepare the WiphyParams Message */
@@ -7524,10 +7410,9 @@ s32 host_int_setup_multicast_filter(WILC_WFIDrvHandle hWFIDrv, bool bIsEnabled,
pstrMulticastFilterParam->bIsEnabled = bIsEnabled;
pstrMulticastFilterParam->u32count = u32count;
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
WILC_ERRORREPORT(s32Error, s32Error);
- }
WILC_CATCH(s32Error)
{
}
@@ -7568,17 +7453,17 @@ static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo)
pNewJoinBssParam = WILC_MALLOC(sizeof(tstrJoinBssParam));
if (pNewJoinBssParam != NULL) {
- WILC_memset(pNewJoinBssParam, 0, sizeof(tstrJoinBssParam));
+ memset(pNewJoinBssParam, 0, sizeof(tstrJoinBssParam));
pNewJoinBssParam->dtim_period = ptstrNetworkInfo->u8DtimPeriod;
pNewJoinBssParam->beacon_period = ptstrNetworkInfo->u16BeaconPeriod;
pNewJoinBssParam->cap_info = ptstrNetworkInfo->u16CapInfo;
- WILC_memcpy(pNewJoinBssParam->au8bssid, ptstrNetworkInfo->au8bssid, 6);
+ memcpy(pNewJoinBssParam->au8bssid, ptstrNetworkInfo->au8bssid, 6);
/*for(i=0; i<6;i++)
* PRINT_D(HOSTINF_DBG,"%c",pNewJoinBssParam->au8bssid[i]);*/
- WILC_memcpy((u8 *)pNewJoinBssParam->ssid, ptstrNetworkInfo->au8ssid, ptstrNetworkInfo->u8SsidLen + 1);
+ memcpy((u8 *)pNewJoinBssParam->ssid, ptstrNetworkInfo->au8ssid, ptstrNetworkInfo->u8SsidLen + 1);
pNewJoinBssParam->ssidLen = ptstrNetworkInfo->u8SsidLen;
- WILC_memset(pNewJoinBssParam->rsn_pcip_policy, 0xFF, 3);
- WILC_memset(pNewJoinBssParam->rsn_auth_policy, 0xFF, 3);
+ memset(pNewJoinBssParam->rsn_pcip_policy, 0xFF, 3);
+ memset(pNewJoinBssParam->rsn_auth_policy, 0xFF, 3);
/*for(i=0; i<pNewJoinBssParam->ssidLen;i++)
* PRINT_D(HOSTINF_DBG,"%c",pNewJoinBssParam->ssid[i]);*/
@@ -7633,9 +7518,8 @@ static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo)
pNewJoinBssParam->wmm_cap = true;
/* Check if Bit 7 is set indicating U-APSD capability */
- if (pu8IEs[index + 8] & (1 << 7)) {
+ if (pu8IEs[index + 8] & (1 << 7))
pNewJoinBssParam->uapsd_cap = true;
- }
index += pu8IEs[index + 1] + 2;
continue;
}
@@ -7645,6 +7529,7 @@ static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo)
(pu8IEs[index + 4] == 0x9a) && /* OUI */
(pu8IEs[index + 5] == 0x09) && (pu8IEs[index + 6] == 0x0c)) { /* OUI Type */
u16 u16P2P_count;
+
pNewJoinBssParam->tsf = ptstrNetworkInfo->u32Tsf;
pNewJoinBssParam->u8NoaEnbaled = 1;
pNewJoinBssParam->u8Index = pu8IEs[index + 9];
@@ -7656,20 +7541,20 @@ static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo)
} else
pNewJoinBssParam->u8OppEnable = 0;
/* HOSTINF_DBG */
- PRINT_D(GENERIC_DBG, "P2P Dump \n");
+ PRINT_D(GENERIC_DBG, "P2P Dump\n");
for (i = 0; i < pu8IEs[index + 7]; i++)
- PRINT_D(GENERIC_DBG, " %x \n", pu8IEs[index + 9 + i]);
+ PRINT_D(GENERIC_DBG, " %x\n", pu8IEs[index + 9 + i]);
pNewJoinBssParam->u8Count = pu8IEs[index + 11];
u16P2P_count = index + 12;
- WILC_memcpy(pNewJoinBssParam->au8Duration, pu8IEs + u16P2P_count, 4);
+ memcpy(pNewJoinBssParam->au8Duration, pu8IEs + u16P2P_count, 4);
u16P2P_count += 4;
- WILC_memcpy(pNewJoinBssParam->au8Interval, pu8IEs + u16P2P_count, 4);
+ memcpy(pNewJoinBssParam->au8Interval, pu8IEs + u16P2P_count, 4);
u16P2P_count += 4;
- WILC_memcpy(pNewJoinBssParam->au8StartTime, pu8IEs + u16P2P_count, 4);
+ memcpy(pNewJoinBssParam->au8StartTime, pu8IEs + u16P2P_count, 4);
index += pu8IEs[index + 1] + 2;
continue;
@@ -7698,7 +7583,7 @@ static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo)
rsnIndex += 7; /* skipping id, length, version(2B) and first 3 bytes of gcipher */
pNewJoinBssParam->rsn_grp_policy = pu8IEs[rsnIndex];
rsnIndex++;
- /* PRINT_D(HOSTINF_DBG,"Group Policy: %0x \n",pNewJoinBssParam->rsn_grp_policy); */
+ /* PRINT_D(HOSTINF_DBG,"Group Policy: %0x\n",pNewJoinBssParam->rsn_grp_policy); */
/* initialize policies with invalid values */
jumpOffset = pu8IEs[rsnIndex] * 4; /* total no.of bytes of pcipher field (count*4) */
@@ -7709,7 +7594,7 @@ static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo)
pcipherCount = (pu8IEs[rsnIndex] > 3) ? 3 : pu8IEs[rsnIndex];
rsnIndex += 2; /* jump 2 bytes of pcipher count */
- /* PRINT_D(HOSTINF_DBG,"\npcipher:%d \n",pcipherCount); */
+ /* PRINT_D(HOSTINF_DBG,"\npcipher:%d\n",pcipherCount); */
for (i = pcipherTotalCount, j = 0; i < pcipherCount + pcipherTotalCount && i < 3; i++, j++) {
/* each count corresponds to 4 bytes, only last byte is saved */
pNewJoinBssParam->rsn_pcip_policy[i] = pu8IEs[rsnIndex + ((j + 1) * 4) - 1];
@@ -7755,7 +7640,7 @@ static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo)
void host_int_freeJoinParams(void *pJoinParams)
{
if ((tstrJoinBssParam *)pJoinParams != NULL)
- WILC_FREE((tstrJoinBssParam *)pJoinParams);
+ kfree((tstrJoinBssParam *)pJoinParams);
else
PRINT_ER("Unable to FREE null pointer\n");
}
@@ -7771,7 +7656,7 @@ void host_int_freeJoinParams(void *pJoinParams)
* @date
* @version 1.0**/
-static int host_int_addBASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char TID, short int BufferSize,
+static int host_int_addBASession(tstrWILC_WFIDrv *hWFIDrv, char *pBSSID, char TID, short int BufferSize,
short int SessionTimeout, void *drvHandler)
{
s32 s32Error = WILC_SUCCESS;
@@ -7779,11 +7664,10 @@ static int host_int_addBASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char T
tstrHostIFmsg strHostIFmsg;
tstrHostIfBASessionInfo *pBASessionInfo = &strHostIFmsg.uniHostIFmsgBody.strHostIfBASessionInfo;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
/* prepare the WiphyParams Message */
strHostIFmsg.u16MsgId = HOST_IF_MSG_ADD_BA_SESSION;
@@ -7794,10 +7678,9 @@ static int host_int_addBASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char T
pBASessionInfo->u16SessionTimeout = SessionTimeout;
strHostIFmsg.drvHandler = hWFIDrv;
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
WILC_ERRORREPORT(s32Error, s32Error);
- }
WILC_CATCH(s32Error)
{
@@ -7807,18 +7690,17 @@ static int host_int_addBASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char T
}
-s32 host_int_delBASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char TID)
+s32 host_int_delBASession(tstrWILC_WFIDrv *hWFIDrv, char *pBSSID, char TID)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
tstrHostIFmsg strHostIFmsg;
tstrHostIfBASessionInfo *pBASessionInfo = &strHostIFmsg.uniHostIFmsgBody.strHostIfBASessionInfo;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
/* prepare the WiphyParams Message */
strHostIFmsg.u16MsgId = HOST_IF_MSG_DEL_BA_SESSION;
@@ -7827,10 +7709,9 @@ s32 host_int_delBASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char TID)
pBASessionInfo->u8Ted = TID;
strHostIFmsg.drvHandler = hWFIDrv;
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
WILC_ERRORREPORT(s32Error, s32Error);
- }
WILC_CATCH(s32Error)
{
@@ -7842,18 +7723,17 @@ s32 host_int_delBASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char TID)
return s32Error;
}
-s32 host_int_del_All_Rx_BASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char TID)
+s32 host_int_del_All_Rx_BASession(tstrWILC_WFIDrv *hWFIDrv, char *pBSSID, char TID)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
tstrHostIFmsg strHostIFmsg;
tstrHostIfBASessionInfo *pBASessionInfo = &strHostIFmsg.uniHostIFmsgBody.strHostIfBASessionInfo;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
/* prepare the WiphyParams Message */
strHostIFmsg.u16MsgId = HOST_IF_MSG_DEL_ALL_RX_BA_SESSIONS;
@@ -7862,10 +7742,9 @@ s32 host_int_del_All_Rx_BASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char
pBASessionInfo->u8Ted = TID;
strHostIFmsg.drvHandler = hWFIDrv;
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
WILC_ERRORREPORT(s32Error, s32Error);
- }
WILC_CATCH(s32Error)
{
@@ -7885,7 +7764,7 @@ s32 host_int_del_All_Rx_BASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char
* @author Abdelrahman Sobhy
* @date
* @version 1.0*/
-s32 host_int_setup_ipaddress(WILC_WFIDrvHandle hWFIDrv, u8 *u16ipadd, u8 idx)
+s32 host_int_setup_ipaddress(tstrWILC_WFIDrv *hWFIDrv, u8 *u16ipadd, u8 idx)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
@@ -7894,11 +7773,10 @@ s32 host_int_setup_ipaddress(WILC_WFIDrvHandle hWFIDrv, u8 *u16ipadd, u8 idx)
/* TODO: Enable This feature on softap firmware */
return 0;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
/* prepare the WiphyParams Message */
strHostIFmsg.u16MsgId = HOST_IF_MSG_SET_IPADDRESS;
@@ -7907,10 +7785,9 @@ s32 host_int_setup_ipaddress(WILC_WFIDrvHandle hWFIDrv, u8 *u16ipadd, u8 idx)
strHostIFmsg.drvHandler = hWFIDrv;
strHostIFmsg.uniHostIFmsgBody.strHostIfSetIP.idx = idx;
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
WILC_ERRORREPORT(s32Error, s32Error);
- }
WILC_CATCH(s32Error)
{
@@ -7929,29 +7806,27 @@ s32 host_int_setup_ipaddress(WILC_WFIDrvHandle hWFIDrv, u8 *u16ipadd, u8 idx)
* @author Abdelrahman Sobhy
* @date
* @version 1.0*/
-s32 host_int_get_ipaddress(WILC_WFIDrvHandle hWFIDrv, u8 *u16ipadd, u8 idx)
+s32 host_int_get_ipaddress(tstrWILC_WFIDrv *hWFIDrv, u8 *u16ipadd, u8 idx)
{
s32 s32Error = WILC_SUCCESS;
tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
tstrHostIFmsg strHostIFmsg;
- if (pstrWFIDrv == NULL) {
+ if (pstrWFIDrv == NULL)
WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
- }
- WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+ memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
/* prepare the WiphyParams Message */
strHostIFmsg.u16MsgId = HOST_IF_MSG_GET_IPADDRESS;
strHostIFmsg.uniHostIFmsgBody.strHostIfSetIP.au8IPAddr = u16ipadd;
- strHostIFmsg.drvHandler=hWFIDrv;
- strHostIFmsg.uniHostIFmsgBody.strHostIfSetIP.idx= idx;
+ strHostIFmsg.drvHandler = hWFIDrv;
+ strHostIFmsg.uniHostIFmsgBody.strHostIfSetIP.idx = idx;
- s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
- if (s32Error) {
+ s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+ if (s32Error)
WILC_ERRORREPORT(s32Error, s32Error);
- }
WILC_CATCH(s32Error)
{
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index 38db740745cd..e66dee9af5da 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -11,7 +11,6 @@
#define HOST_INT_H
#include "coreconfigurator.h"
-#include "coreconfigsimulator.h"
/*****************************************************************************/
/* Macros */
/*****************************************************************************/
@@ -368,10 +367,10 @@ typedef struct {
struct semaphore hSemGetCHNL;
struct semaphore hSemInactiveTime;
/* timer handlers */
- WILC_TimerHandle hScanTimer;
- WILC_TimerHandle hConnectTimer;
+ struct timer_list hScanTimer;
+ struct timer_list hConnectTimer;
#ifdef WILC_P2P
- WILC_TimerHandle hRemainOnChannel;
+ struct timer_list hRemainOnChannel;
#endif
bool IFC_UP;
@@ -433,7 +432,7 @@ typedef struct {
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_remove_key(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8StaAddress);
+s32 host_int_remove_key(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8StaAddress);
/**
* @brief removes WEP key
* @details valid only in BSS STA mode if External Supplicant support is enabled.
@@ -448,7 +447,7 @@ s32 host_int_remove_key(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8StaAddress);
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_remove_wep_key(WILC_WFIDrvHandle hWFIDrv, u8 u8Index);
+s32 host_int_remove_wep_key(tstrWILC_WFIDrv *hWFIDrv, u8 u8Index);
/**
* @brief sets WEP deafault key
* @details Sets the index of the WEP encryption key in use,
@@ -461,7 +460,7 @@ s32 host_int_remove_wep_key(WILC_WFIDrvHandle hWFIDrv, u8 u8Index);
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_set_WEPDefaultKeyID(WILC_WFIDrvHandle hWFIDrv, u8 u8Index);
+s32 host_int_set_WEPDefaultKeyID(tstrWILC_WFIDrv *hWFIDrv, u8 u8Index);
/**
* @brief sets WEP deafault key
@@ -482,7 +481,7 @@ s32 host_int_set_WEPDefaultKeyID(WILC_WFIDrvHandle hWFIDrv, u8 u8Index);
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_add_wep_key_bss_sta(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8WepKey, u8 u8WepKeylen, u8 u8Keyidx);
+s32 host_int_add_wep_key_bss_sta(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8WepKey, u8 u8WepKeylen, u8 u8Keyidx);
/**
* @brief host_int_add_wep_key_bss_ap
* @details valid only in AP mode if External Supplicant support is enabled.
@@ -497,7 +496,7 @@ s32 host_int_add_wep_key_bss_sta(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8WepKey,
* @date 28 Feb 2013
* @version 1.0
*/
-s32 host_int_add_wep_key_bss_ap(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8WepKey, u8 u8WepKeylen, u8 u8Keyidx, u8 u8mode, AUTHTYPE_T tenuAuth_type);
+s32 host_int_add_wep_key_bss_ap(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8WepKey, u8 u8WepKeylen, u8 u8Keyidx, u8 u8mode, AUTHTYPE_T tenuAuth_type);
/**
* @brief adds ptk Key
@@ -515,7 +514,7 @@ s32 host_int_add_wep_key_bss_ap(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8WepKey,
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_add_ptk(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8Ptk, u8 u8PtkKeylen,
+s32 host_int_add_ptk(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8Ptk, u8 u8PtkKeylen,
const u8 *mac_addr, const u8 *pu8RxMic, const u8 *pu8TxMic, u8 mode, u8 u8Ciphermode, u8 u8Idx);
/**
@@ -530,7 +529,7 @@ s32 host_int_add_ptk(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8Ptk, u8 u8PtkKeylen
* @date 15 April 2013
* @version 1.0
*/
-s32 host_int_get_inactive_time(WILC_WFIDrvHandle hWFIDrv, const u8 *mac, u32 *pu32InactiveTime);
+s32 host_int_get_inactive_time(tstrWILC_WFIDrv *hWFIDrv, const u8 *mac, u32 *pu32InactiveTime);
/**
* @brief adds Rx GTk Key
@@ -548,7 +547,7 @@ s32 host_int_get_inactive_time(WILC_WFIDrvHandle hWFIDrv, const u8 *mac, u32 *pu
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_add_rx_gtk(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8RxGtk, u8 u8GtkKeylen,
+s32 host_int_add_rx_gtk(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8RxGtk, u8 u8GtkKeylen,
u8 u8KeyIdx, u32 u32KeyRSClen, const u8 *KeyRSC,
const u8 *pu8RxMic, const u8 *pu8TxMic, u8 mode, u8 u8Ciphermode);
@@ -569,7 +568,7 @@ s32 host_int_add_rx_gtk(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8RxGtk, u8 u8GtkK
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_add_tx_gtk(WILC_WFIDrvHandle hWFIDrv, u8 u8KeyLen, u8 *pu8TxGtk, u8 u8KeyIdx);
+s32 host_int_add_tx_gtk(tstrWILC_WFIDrv *hWFIDrv, u8 u8KeyLen, u8 *pu8TxGtk, u8 u8KeyIdx);
/**
* @brief caches the pmkid
@@ -592,7 +591,7 @@ s32 host_int_add_tx_gtk(WILC_WFIDrvHandle hWFIDrv, u8 u8KeyLen, u8 *pu8TxGtk, u8
* @version 1.0
*/
-s32 host_int_set_pmkid_info(WILC_WFIDrvHandle hWFIDrv, tstrHostIFpmkidAttr *pu8PmkidInfoArray);
+s32 host_int_set_pmkid_info(tstrWILC_WFIDrv *hWFIDrv, tstrHostIFpmkidAttr *pu8PmkidInfoArray);
/**
* @brief gets the cached the pmkid info
* @details valid only in BSS STA mode if External Supplicant
@@ -616,7 +615,7 @@ s32 host_int_set_pmkid_info(WILC_WFIDrvHandle hWFIDrv, tstrHostIFpmkidAttr *pu8P
* @version 1.0
*/
-s32 host_int_get_pmkid_info(WILC_WFIDrvHandle hWFIDrv, u8 *pu8PmkidInfoArray,
+s32 host_int_get_pmkid_info(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8PmkidInfoArray,
u32 u32PmkidInfoLen);
/**
@@ -633,7 +632,7 @@ s32 host_int_get_pmkid_info(WILC_WFIDrvHandle hWFIDrv, u8 *pu8PmkidInfoArray,
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_set_RSNAConfigPSKPassPhrase(WILC_WFIDrvHandle hWFIDrv, u8 *pu8PassPhrase,
+s32 host_int_set_RSNAConfigPSKPassPhrase(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8PassPhrase,
u8 u8Psklength);
/**
* @brief gets the pass phrase
@@ -649,7 +648,7 @@ s32 host_int_set_RSNAConfigPSKPassPhrase(WILC_WFIDrvHandle hWFIDrv, u8 *pu8PassP
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_get_RSNAConfigPSKPassPhrase(WILC_WFIDrvHandle hWFIDrv,
+s32 host_int_get_RSNAConfigPSKPassPhrase(tstrWILC_WFIDrv *hWFIDrv,
u8 *pu8PassPhrase, u8 u8Psklength);
/**
@@ -663,7 +662,7 @@ s32 host_int_get_RSNAConfigPSKPassPhrase(WILC_WFIDrvHandle hWFIDrv,
* @date 19 April 2012
* @version 1.0
*/
-s32 host_int_get_MacAddress(WILC_WFIDrvHandle hWFIDrv, u8 *pu8MacAddress);
+s32 host_int_get_MacAddress(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8MacAddress);
/**
* @brief sets mac address
@@ -676,7 +675,7 @@ s32 host_int_get_MacAddress(WILC_WFIDrvHandle hWFIDrv, u8 *pu8MacAddress);
* @date 16 July 2012
* @version 1.0
*/
-s32 host_int_set_MacAddress(WILC_WFIDrvHandle hWFIDrv, u8 *pu8MacAddress);
+s32 host_int_set_MacAddress(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8MacAddress);
/**
* @brief wait until msg q is empty
@@ -721,7 +720,7 @@ s32 host_int_wait_msg_queue_idle(void);
* @version 1.0
*/
#ifndef CONNECT_DIRECT
-s32 host_int_get_site_survey_results(WILC_WFIDrvHandle hWFIDrv,
+s32 host_int_get_site_survey_results(tstrWILC_WFIDrv *hWFIDrv,
u8 ppu8RcvdSiteSurveyResults[][MAX_SURVEY_RESULT_FRAG_SIZE],
u32 u32MaxSiteSrvyFragLen);
#endif
@@ -742,7 +741,7 @@ s32 host_int_get_site_survey_results(WILC_WFIDrvHandle hWFIDrv,
* @version 1.0
*/
-s32 host_int_set_start_scan_req(WILC_WFIDrvHandle hWFIDrv, u8 scanSource);
+s32 host_int_set_start_scan_req(tstrWILC_WFIDrv *hWFIDrv, u8 scanSource);
/**
* @brief gets scan source of the last scan
* @details
@@ -758,7 +757,7 @@ s32 host_int_set_start_scan_req(WILC_WFIDrvHandle hWFIDrv, u8 scanSource);
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_get_start_scan_req(WILC_WFIDrvHandle hWFIDrv, u8 *pu8ScanSource);
+s32 host_int_get_start_scan_req(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8ScanSource);
/**
* @brief sets a join request
@@ -772,7 +771,7 @@ s32 host_int_get_start_scan_req(WILC_WFIDrvHandle hWFIDrv, u8 *pu8ScanSource);
* @version 1.0
*/
-s32 host_int_set_join_req(WILC_WFIDrvHandle hWFIDrv, u8 *pu8bssid,
+s32 host_int_set_join_req(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8bssid,
const u8 *pu8ssid, size_t ssidLen,
const u8 *pu8IEs, size_t IEsLen,
tWILCpfConnectResult pfConnectResult, void *pvUserArg,
@@ -792,7 +791,7 @@ s32 host_int_set_join_req(WILC_WFIDrvHandle hWFIDrv, u8 *pu8bssid,
* @version 8.0
*/
-s32 host_int_flush_join_req(WILC_WFIDrvHandle hWFIDrv);
+s32 host_int_flush_join_req(tstrWILC_WFIDrv *hWFIDrv);
/**
@@ -806,7 +805,7 @@ s32 host_int_flush_join_req(WILC_WFIDrvHandle hWFIDrv);
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_disconnect(WILC_WFIDrvHandle hWFIDrv, u16 u16ReasonCode);
+s32 host_int_disconnect(tstrWILC_WFIDrv *hWFIDrv, u16 u16ReasonCode);
/**
* @brief disconnects a sta
@@ -819,7 +818,7 @@ s32 host_int_disconnect(WILC_WFIDrvHandle hWFIDrv, u16 u16ReasonCode);
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_disconnect_station(WILC_WFIDrvHandle hWFIDrv, u8 assoc_id);
+s32 host_int_disconnect_station(tstrWILC_WFIDrv *hWFIDrv, u8 assoc_id);
/**
* @brief gets a Association request info
* @details
@@ -846,7 +845,7 @@ s32 host_int_disconnect_station(WILC_WFIDrvHandle hWFIDrv, u8 assoc_id);
* @version 1.0
*/
-s32 host_int_get_assoc_req_info(WILC_WFIDrvHandle hWFIDrv, u8 *pu8AssocReqInfo,
+s32 host_int_get_assoc_req_info(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8AssocReqInfo,
u32 u32AssocReqInfoLen);
/**
* @brief gets a Association Response info
@@ -860,7 +859,7 @@ s32 host_int_get_assoc_req_info(WILC_WFIDrvHandle hWFIDrv, u8 *pu8AssocReqInfo,
* @version 1.0
*/
-s32 host_int_get_assoc_res_info(WILC_WFIDrvHandle hWFIDrv, u8 *pu8AssocRespInfo,
+s32 host_int_get_assoc_res_info(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8AssocRespInfo,
u32 u32MaxAssocRespInfoLen, u32 *pu32RcvdAssocRespInfoLen);
/**
* @brief gets a Association Response info
@@ -877,7 +876,7 @@ s32 host_int_get_assoc_res_info(WILC_WFIDrvHandle hWFIDrv, u8 *pu8AssocRespInfo,
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_get_rx_power_level(WILC_WFIDrvHandle hWFIDrv, u8 *pu8RxPowerLevel,
+s32 host_int_get_rx_power_level(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8RxPowerLevel,
u32 u32RxPowerLevelLen);
/**
@@ -895,7 +894,7 @@ s32 host_int_get_rx_power_level(WILC_WFIDrvHandle hWFIDrv, u8 *pu8RxPowerLevel,
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_set_mac_chnl_num(WILC_WFIDrvHandle hWFIDrv, u8 u8ChNum);
+s32 host_int_set_mac_chnl_num(tstrWILC_WFIDrv *hWFIDrv, u8 u8ChNum);
/**
* @brief gets the current channel index
@@ -912,7 +911,7 @@ s32 host_int_set_mac_chnl_num(WILC_WFIDrvHandle hWFIDrv, u8 u8ChNum);
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_get_host_chnl_num(WILC_WFIDrvHandle hWFIDrv, u8 *pu8ChNo);
+s32 host_int_get_host_chnl_num(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8ChNo);
/**
* @brief gets the sta rssi
* @details gets the currently maintained RSSI value for the station.
@@ -926,8 +925,8 @@ s32 host_int_get_host_chnl_num(WILC_WFIDrvHandle hWFIDrv, u8 *pu8ChNo);
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_get_rssi(WILC_WFIDrvHandle hWFIDrv, s8 *ps8Rssi);
-s32 host_int_get_link_speed(WILC_WFIDrvHandle hWFIDrv, s8 *ps8lnkspd);
+s32 host_int_get_rssi(tstrWILC_WFIDrv *hWFIDrv, s8 *ps8Rssi);
+s32 host_int_get_link_speed(tstrWILC_WFIDrv *hWFIDrv, s8 *ps8lnkspd);
/**
* @brief scans a set of channels
* @details
@@ -945,7 +944,7 @@ s32 host_int_get_link_speed(WILC_WFIDrvHandle hWFIDrv, s8 *ps8lnkspd);
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_scan(WILC_WFIDrvHandle hWFIDrv, u8 u8ScanSource,
+s32 host_int_scan(tstrWILC_WFIDrv *hWFIDrv, u8 u8ScanSource,
u8 u8ScanType, u8 *pu8ChnlFreqList,
u8 u8ChnlListLen, const u8 *pu8IEs,
size_t IEsLen, tWILCpfScanResult ScanResult,
@@ -961,7 +960,7 @@ s32 host_int_scan(WILC_WFIDrvHandle hWFIDrv, u8 u8ScanSource,
* @date 8 March 2012
* @version 1.0
*/
-s32 hif_set_cfg(WILC_WFIDrvHandle hWFIDrv, tstrCfgParamVal *pstrCfgParamVal);
+s32 hif_set_cfg(tstrWILC_WFIDrv *hWFIDrv, tstrCfgParamVal *pstrCfgParamVal);
/**
* @brief gets configuration wids values
@@ -975,7 +974,7 @@ s32 hif_set_cfg(WILC_WFIDrvHandle hWFIDrv, tstrCfgParamVal *pstrCfgParamVal);
* @date 8 March 2012
* @version 1.0
*/
-s32 hif_get_cfg(WILC_WFIDrvHandle hWFIDrv, u16 u16WID, u16 *pu16WID_Value);
+s32 hif_get_cfg(tstrWILC_WFIDrv *hWFIDrv, u16 u16WID, u16 *pu16WID_Value);
/*****************************************************************************/
/* Notification Functions */
/*****************************************************************************/
@@ -1022,7 +1021,7 @@ void host_int_send_network_info_to_host
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_init(WILC_WFIDrvHandle *phWFIDrv);
+s32 host_int_init(tstrWILC_WFIDrv **phWFIDrv);
/**
* @brief host interface initialization function
@@ -1033,7 +1032,7 @@ s32 host_int_init(WILC_WFIDrvHandle *phWFIDrv);
* @date 8 March 2012
* @version 1.0
*/
-s32 host_int_deinit(WILC_WFIDrvHandle hWFIDrv);
+s32 host_int_deinit(tstrWILC_WFIDrv *hWFIDrv);
/*!
@@ -1058,7 +1057,7 @@ s32 host_int_deinit(WILC_WFIDrvHandle hWFIDrv);
* @version 1.0 Description
*
*/
-s32 host_int_add_beacon(WILC_WFIDrvHandle hWFIDrv, u32 u32Interval,
+s32 host_int_add_beacon(tstrWILC_WFIDrv *hWFIDrv, u32 u32Interval,
u32 u32DTIMPeriod,
u32 u32HeadLen, u8 *pu8Head,
u32 u32TailLen, u8 *pu8tail);
@@ -1076,7 +1075,7 @@ s32 host_int_add_beacon(WILC_WFIDrvHandle hWFIDrv, u32 u32Interval,
* @date 10 Julys 2012
* @version 1.0 Description
*/
-s32 host_int_del_beacon(WILC_WFIDrvHandle hWFIDrv);
+s32 host_int_del_beacon(tstrWILC_WFIDrv *hWFIDrv);
/*!
* @fn s32 host_int_add_station(WILC_WFIDrvHandle hWFIDrv, tstrWILC_AddStaParam strStaParams)
@@ -1091,7 +1090,7 @@ s32 host_int_del_beacon(WILC_WFIDrvHandle hWFIDrv);
* @date 12 July 2012
* @version 1.0 Description
*/
-s32 host_int_add_station(WILC_WFIDrvHandle hWFIDrv, tstrWILC_AddStaParam *pstrStaParams);
+s32 host_int_add_station(tstrWILC_WFIDrv *hWFIDrv, tstrWILC_AddStaParam *pstrStaParams);
/*!
* @fn s32 host_int_del_allstation(WILC_WFIDrvHandle hWFIDrv, const u8* pu8MacAddr)
@@ -1106,7 +1105,7 @@ s32 host_int_add_station(WILC_WFIDrvHandle hWFIDrv, tstrWILC_AddStaParam *pstrSt
* @date 09 April 2014
* @version 1.0 Description
*/
-s32 host_int_del_allstation(WILC_WFIDrvHandle hWFIDrv, u8 pu8MacAddr[][ETH_ALEN]);
+s32 host_int_del_allstation(tstrWILC_WFIDrv *hWFIDrv, u8 pu8MacAddr[][ETH_ALEN]);
/*!
* @fn s32 host_int_del_station(WILC_WFIDrvHandle hWFIDrv, u8* pu8MacAddr)
@@ -1121,7 +1120,7 @@ s32 host_int_del_allstation(WILC_WFIDrvHandle hWFIDrv, u8 pu8MacAddr[][ETH_ALEN]
* @date 15 July 2012
* @version 1.0 Description
*/
-s32 host_int_del_station(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8MacAddr);
+s32 host_int_del_station(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8MacAddr);
/*!
* @fn s32 host_int_edit_station(WILC_WFIDrvHandle hWFIDrv, tstrWILC_AddStaParam strStaParams)
@@ -1136,7 +1135,7 @@ s32 host_int_del_station(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8MacAddr);
* @date 15 July 2012
* @version 1.0 Description
*/
-s32 host_int_edit_station(WILC_WFIDrvHandle hWFIDrv, tstrWILC_AddStaParam *pstrStaParams);
+s32 host_int_edit_station(tstrWILC_WFIDrv *hWFIDrv, tstrWILC_AddStaParam *pstrStaParams);
/*!
* @fn s32 host_int_set_power_mgmt(WILC_WFIDrvHandle hWFIDrv, bool bIsEnabled, u32 u32Timeout)
@@ -1153,7 +1152,7 @@ s32 host_int_edit_station(WILC_WFIDrvHandle hWFIDrv, tstrWILC_AddStaParam *pstrS
* @date 24 November 2012
* @version 1.0 Description
*/
-s32 host_int_set_power_mgmt(WILC_WFIDrvHandle hWFIDrv, bool bIsEnabled, u32 u32Timeout);
+s32 host_int_set_power_mgmt(tstrWILC_WFIDrv *hWFIDrv, bool bIsEnabled, u32 u32Timeout);
/* @param[in,out] hWFIDrv handle to the wifi driver
* @param[in] bIsEnabled TRUE if enabled, FALSE otherwise
* @param[in] u8count count of mac address entries in the filter table
@@ -1165,7 +1164,7 @@ s32 host_int_set_power_mgmt(WILC_WFIDrvHandle hWFIDrv, bool bIsEnabled, u32 u32T
* @date 24 November 2012
* @version 1.0 Description
*/
-s32 host_int_setup_multicast_filter(WILC_WFIDrvHandle hWFIDrv, bool bIsEnabled, u32 u32count);
+s32 host_int_setup_multicast_filter(tstrWILC_WFIDrv *hWFIDrv, bool bIsEnabled, u32 u32count);
/**
* @brief host_int_setup_ipaddress
* @details set IP address on firmware
@@ -1175,7 +1174,7 @@ s32 host_int_setup_multicast_filter(WILC_WFIDrvHandle hWFIDrv, bool bIsEnabled,
* @date
* @version 1.0
*/
-s32 host_int_setup_ipaddress(WILC_WFIDrvHandle hWFIDrv, u8 *pu8IPAddr, u8 idx);
+s32 host_int_setup_ipaddress(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8IPAddr, u8 idx);
/**
@@ -1187,7 +1186,7 @@ s32 host_int_setup_ipaddress(WILC_WFIDrvHandle hWFIDrv, u8 *pu8IPAddr, u8 idx);
* @date
* @version 1.0
*/
-s32 host_int_delBASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char TID);
+s32 host_int_delBASession(tstrWILC_WFIDrv *hWFIDrv, char *pBSSID, char TID);
/**
* @brief host_int_delBASession
@@ -1198,7 +1197,7 @@ s32 host_int_delBASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char TID);
* @date
* @version 1.0
*/
-s32 host_int_del_All_Rx_BASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char TID);
+s32 host_int_del_All_Rx_BASession(tstrWILC_WFIDrv *hWFIDrv, char *pBSSID, char TID);
/**
@@ -1210,7 +1209,7 @@ s32 host_int_del_All_Rx_BASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char
* @date
* @version 1.0
*/
-s32 host_int_get_ipaddress(WILC_WFIDrvHandle hWFIDrv, u8 *pu8IPAddr, u8 idx);
+s32 host_int_get_ipaddress(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8IPAddr, u8 idx);
#ifdef WILC_P2P
/**
@@ -1222,7 +1221,7 @@ s32 host_int_get_ipaddress(WILC_WFIDrvHandle hWFIDrv, u8 *pu8IPAddr, u8 idx);
* @date
* @version 1.0
*/
-s32 host_int_remain_on_channel(WILC_WFIDrvHandle hWFIDrv, u32 u32SessionID, u32 u32duration, u16 chan, tWILCpfRemainOnChanExpired RemainOnChanExpired, tWILCpfRemainOnChanReady RemainOnChanReady, void *pvUserArg);
+s32 host_int_remain_on_channel(tstrWILC_WFIDrv *hWFIDrv, u32 u32SessionID, u32 u32duration, u16 chan, tWILCpfRemainOnChanExpired RemainOnChanExpired, tWILCpfRemainOnChanReady RemainOnChanReady, void *pvUserArg);
/**
* @brief host_int_ListenStateExpired
@@ -1238,7 +1237,7 @@ s32 host_int_remain_on_channel(WILC_WFIDrvHandle hWFIDrv, u32 u32SessionID, u32
* @date
* @version 1.0
*/
-s32 host_int_ListenStateExpired(WILC_WFIDrvHandle hWFIDrv, u32 u32SessionID);
+s32 host_int_ListenStateExpired(tstrWILC_WFIDrv *hWFIDrv, u32 u32SessionID);
/**
* @brief host_int_frame_register
@@ -1249,7 +1248,7 @@ s32 host_int_ListenStateExpired(WILC_WFIDrvHandle hWFIDrv, u32 u32SessionID);
* @date
* @version 1.0
*/
-s32 host_int_frame_register(WILC_WFIDrvHandle hWFIDrv, u16 u16FrameType, bool bReg);
+s32 host_int_frame_register(tstrWILC_WFIDrv *hWFIDrv, u16 u16FrameType, bool bReg);
#endif
/**
* @brief host_int_set_wfi_drv_handler
@@ -1260,18 +1259,18 @@ s32 host_int_frame_register(WILC_WFIDrvHandle hWFIDrv, u16 u16FrameType, bool bR
* @date
* @version 1.0
*/
-s32 host_int_set_wfi_drv_handler(u32 u32address);
-s32 host_int_set_operation_mode(WILC_WFIDrvHandle hWFIDrv, u32 u32mode);
+s32 host_int_set_wfi_drv_handler(tstrWILC_WFIDrv *u32address);
+s32 host_int_set_operation_mode(tstrWILC_WFIDrv *hWFIDrv, u32 u32mode);
-static s32 Handle_ScanDone(void *drvHandler, tenuScanEvent enuEvent);
+static s32 Handle_ScanDone(tstrWILC_WFIDrv *drvHandler, tenuScanEvent enuEvent);
-static int host_int_addBASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char TID, short int BufferSize,
+static int host_int_addBASession(tstrWILC_WFIDrv *hWFIDrv, char *pBSSID, char TID, short int BufferSize,
short int SessionTimeout, void *drvHandler);
void host_int_freeJoinParams(void *pJoinParams);
-s32 host_int_get_statistics(WILC_WFIDrvHandle hWFIDrv, tstrStatistics *pstrStatistics);
+s32 host_int_get_statistics(tstrWILC_WFIDrv *hWFIDrv, tstrStatistics *pstrStatistics);
/*****************************************************************************/
/* */
diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c
index f5296f53a3d2..b8d7d048439b 100644
--- a/drivers/staging/wilc1000/linux_mon.c
+++ b/drivers/staging/wilc1000/linux_mon.c
@@ -6,20 +6,15 @@
* @date 01 MAR 2012
* @version 1.0
*/
-
-#ifndef SIMULATION
#include "wilc_wfi_cfgoperations.h"
#include "linux_wlan_common.h"
#include "wilc_wlan_if.h"
#include "wilc_wlan.h"
-#endif
+
#ifdef WILC_FULLY_HOSTING_AP
#include "wilc_host_ap.h"
#endif
#ifdef WILC_AP_EXTERNAL_MLME
-#ifdef SIMULATION
-#include "wilc_wfi_cfgoperations.h"
-#endif
struct wilc_wfi_radiotap_hdr {
struct ieee80211_radiotap_header hdr;
@@ -39,9 +34,7 @@ extern linux_wlan_t *g_linux_wlan;
static struct net_device *wilc_wfi_mon; /* global monitor netdev */
-#ifdef SIMULATION
-extern int WILC_WFI_Tx(struct sk_buff *skb, struct net_device *dev);
-#elif USE_WIRELESS
+#if USE_WIRELESS
extern int mac_xmit(struct sk_buff *skb, struct net_device *dev);
#endif
@@ -237,14 +230,12 @@ static void mgmt_tx_complete(void *priv, int status)
}
static int mon_mgmt_tx(struct net_device *dev, const u8 *buf, size_t len)
{
- linux_wlan_t *nic;
struct tx_complete_mon_data *mgmt_tx = NULL;
if (dev == NULL) {
PRINT_D(HOSTAPD_DBG, "ERROR: dev == NULL\n");
return WILC_FAIL;
}
- nic = netdev_priv(dev);
netif_stop_queue(dev);
mgmt_tx = kmalloc(sizeof(struct tx_complete_mon_data), GFP_ATOMIC);
@@ -298,7 +289,6 @@ static int mon_mgmt_tx(struct net_device *dev, const u8 *buf, size_t len)
static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct ieee80211_radiotap_header *rtap_hdr;
u32 rtap_len, i, ret = 0;
struct WILC_WFI_mon_priv *mon_priv;
@@ -318,7 +308,6 @@ static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
return WILC_FAIL;
}
- rtap_hdr = (struct ieee80211_radiotap_header *)skb->data;
rtap_len = ieee80211_get_radiotap_len(skb->data);
if (skb->len < rtap_len) {
@@ -378,9 +367,7 @@ static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
PRINT_INFO(HOSTAPD_DBG, "SKB netdevice name = %s\n", skb->dev->name);
PRINT_INFO(HOSTAPD_DBG, "MONITOR real dev name = %s\n", mon_priv->real_ndev->name);
- #ifdef SIMULATION
- ret = WILC_WFI_Tx(skb, mon_priv->real_ndev);
- #elif USE_WIRELESS
+ #if USE_WIRELESS
/* Identify if Ethernet or MAC header (data or mgmt) */
memcpy(srcAdd, &skb->data[10], 6);
memcpy(bssid, &skb->data[16], 6);
@@ -493,9 +480,9 @@ static void WILC_WFI_mon_setup(struct net_device *dev)
/* dev->destructor = free_netdev; */
PRINT_INFO(CORECONFIG_DBG, "In Ethernet setup function\n");
ether_setup(dev);
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->type = ARPHRD_IEEE80211_RADIOTAP;
- memset(dev->dev_addr, 0, ETH_ALEN);
+ eth_zero_addr(dev->dev_addr);
#ifdef USE_WIRELESS
{
@@ -571,7 +558,7 @@ struct net_device *WILC_WFI_init_mon_interface(const char *name, struct net_devi
* @date 12 JUL 2012
* @version 1.0
*/
-int WILC_WFI_deinit_mon_interface()
+int WILC_WFI_deinit_mon_interface(void)
{
bool rollback_lock = false;
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index b352c504a77d..b3cc9f5c7937 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -1,4 +1,3 @@
-#ifndef SIMULATION
#include "wilc_wfi_cfgoperations.h"
#include "linux_wlan_common.h"
#include "wilc_wlan_if.h"
@@ -72,7 +71,7 @@ extern void resolve_disconnect_aberration(void *drvHandler);
extern u8 gau8MulticastMacAddrList[WILC_MULTICAST_TABLE_SIZE][ETH_ALEN];
void wilc1000_wlan_deinit(linux_wlan_t *nic);
#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
-extern WILC_TimerHandle hDuringIpTimer;
+extern struct timer_list hDuringIpTimer;
#endif
static int linux_wlan_device_power(int on_off)
@@ -103,7 +102,6 @@ static int linux_wlan_device_detection(int on_off)
return 0;
}
-
#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
static int dev_state_ev_handler(struct notifier_block *this, unsigned long event, void *ptr);
@@ -116,7 +114,6 @@ static struct notifier_block g_dev_notifier = {
if (g_linux_wlan->oup.wlan_cleanup != NULL) \
g_linux_wlan->oup.wlan_cleanup(); }
-
#ifndef STA_FIRMWARE
#define STA_FIRMWARE "wifi_firmware.bin"
#endif
@@ -129,15 +126,12 @@ static struct notifier_block g_dev_notifier = {
#define P2P_CONCURRENCY_FIRMWARE "wifi_firmware_p2p_concurrency.bin"
#endif
-
-
typedef struct android_wifi_priv_cmd {
char *buf;
int used_len;
int total_len;
} android_wifi_priv_cmd;
-
#define IRQ_WAIT 1
#define IRQ_NO_WAIT 0
/*
@@ -158,7 +152,6 @@ void linux_wlan_unlock(void *vp);
extern void WILC_WFI_monitor_rx(uint8_t *buff, uint32_t size);
extern void WILC_WFI_p2p_rx(struct net_device *dev, uint8_t *buff, uint32_t size);
-
static void *internal_alloc(uint32_t size, uint32_t flag);
static void linux_wlan_tx_complete(void *priv, int status);
void frmw_to_linux(uint8_t *buff, uint32_t size, uint32_t pkt_offset);
@@ -170,8 +163,6 @@ static struct net_device_stats *mac_stats(struct net_device *dev);
static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd);
static void wilc_set_multicast_list(struct net_device *dev);
-
-
/*
* for now - in frmw_to_linux there should be private data to be passed to it
* and this data should be pointer to net device
@@ -200,22 +191,18 @@ volatile int WatchDogdebuggerCounter;
char DebugBuffer[DEGUG_BUFFER_LENGTH + 20] = {0};
static char *ps8current = DebugBuffer;
-
-
void printk_later(const char *format, ...)
{
va_list args;
- va_start (args, format);
- ps8current += vsprintf (ps8current, format, args);
- va_end (args);
- if ((ps8current - DebugBuffer) > DEGUG_BUFFER_LENGTH) {
+ va_start(args, format);
+ ps8current += vsprintf(ps8current, format, args);
+ va_end(args);
+ if ((ps8current - DebugBuffer) > DEGUG_BUFFER_LENGTH)
ps8current = DebugBuffer;
- }
}
-
-void dump_logs()
+void dump_logs(void)
{
if (DebugBuffer[0]) {
DebugBuffer[DEGUG_BUFFER_LENGTH] = 0;
@@ -229,7 +216,7 @@ void dump_logs()
}
}
-void Reset_WatchDogdebugger()
+void Reset_WatchDogdebugger(void)
{
WatchDogdebuggerCounter = 0;
}
@@ -246,11 +233,8 @@ static int DebuggingThreadTask(void *vp)
WatchDogdebuggerCounter = 0;
}
}
-
-
#endif
-
#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
static int dev_state_ev_handler(struct notifier_block *this, unsigned long event, void *ptr)
{
@@ -298,25 +282,22 @@ static int dev_state_ev_handler(struct notifier_block *this, unsigned long event
PRINT_INFO(GENERIC_DBG, "\n ============== IP Address Obtained ===============\n\n");
-
/*If we are in station mode or client mode*/
if (nic->iftype == STATION_MODE || nic->iftype == CLIENT_MODE) {
pstrWFIDrv->IFC_UP = 1;
g_obtainingIP = false;
- WILC_TimerStop(&hDuringIpTimer, NULL);
+ del_timer(&hDuringIpTimer);
PRINT_D(GENERIC_DBG, "IP obtained , enable scan\n");
}
-
-
if (bEnablePS)
- host_int_set_power_mgmt((WILC_WFIDrvHandle)pstrWFIDrv, 1, 0);
+ host_int_set_power_mgmt(pstrWFIDrv, 1, 0);
PRINT_D(GENERIC_DBG, "[%s] Up IP\n", dev_iface->ifa_label);
pIP_Add_buff = (char *) (&(dev_iface->ifa_address));
- PRINT_D(GENERIC_DBG, "IP add=%d:%d:%d:%d \n", pIP_Add_buff[0], pIP_Add_buff[1], pIP_Add_buff[2], pIP_Add_buff[3]);
- host_int_setup_ipaddress((WILC_WFIDrvHandle)pstrWFIDrv, pIP_Add_buff, nic->u8IfIdx);
+ PRINT_D(GENERIC_DBG, "IP add=%d:%d:%d:%d\n", pIP_Add_buff[0], pIP_Add_buff[1], pIP_Add_buff[2], pIP_Add_buff[3]);
+ host_int_setup_ipaddress(pstrWFIDrv, pIP_Add_buff, nic->u8IfIdx);
break;
@@ -330,17 +311,16 @@ static int dev_state_ev_handler(struct notifier_block *this, unsigned long event
}
if (memcmp(dev_iface->ifa_label, wlan_dev_name, 5) == 0)
- host_int_set_power_mgmt((WILC_WFIDrvHandle)pstrWFIDrv, 0, 0);
+ host_int_set_power_mgmt(pstrWFIDrv, 0, 0);
resolve_disconnect_aberration(pstrWFIDrv);
-
PRINT_D(GENERIC_DBG, "[%s] Down IP\n", dev_iface->ifa_label);
pIP_Add_buff = null_ip;
- PRINT_D(GENERIC_DBG, "IP add=%d:%d:%d:%d \n", pIP_Add_buff[0], pIP_Add_buff[1], pIP_Add_buff[2], pIP_Add_buff[3]);
+ PRINT_D(GENERIC_DBG, "IP add=%d:%d:%d:%d\n", pIP_Add_buff[0], pIP_Add_buff[1], pIP_Add_buff[2], pIP_Add_buff[3]);
- host_int_setup_ipaddress((WILC_WFIDrvHandle)pstrWFIDrv, pIP_Add_buff, nic->u8IfIdx);
+ host_int_setup_ipaddress(pstrWFIDrv, pIP_Add_buff, nic->u8IfIdx);
break;
@@ -387,8 +367,6 @@ void linux_wlan_disable_irq(int wait)
#if (defined WILC_SPI) || (defined WILC_SDIO_IRQ_GPIO)
static irqreturn_t isr_uh_routine(int irq, void *user_data)
{
-
-
int_rcvdU++;
#if (RX_BH_TYPE != RX_BH_THREADED_IRQ)
linux_wlan_disable_irq(IRQ_NO_WAIT);
@@ -440,19 +418,14 @@ static void isr_bh_routine(struct work_struct *work)
#else
return;
#endif
-
-
-
}
int_rcvdB++;
PRINT_D(INT_DBG, "Interrupt received BH\n");
- if (g_linux_wlan->oup.wlan_handle_rx_isr != 0) {
+ if (g_linux_wlan->oup.wlan_handle_rx_isr != 0)
g_linux_wlan->oup.wlan_handle_rx_isr();
- } else {
+ else
PRINT_ER("wlan_handle_rx_isr() hasn't been initialized\n");
- }
-
#if (RX_BH_TYPE == RX_BH_THREADED_IRQ)
return IRQ_HANDLED;
@@ -476,18 +449,16 @@ static int isr_bh_routine(void *vp)
}
int_rcvdB++;
PRINT_D(INT_DBG, "Interrupt received BH\n");
- if (g_linux_wlan->oup.wlan_handle_rx_isr != 0) {
+ if (g_linux_wlan->oup.wlan_handle_rx_isr != 0)
g_linux_wlan->oup.wlan_handle_rx_isr();
- } else {
+ else
PRINT_ER("wlan_handle_rx_isr() hasn't been initialized\n");
- }
}
return 0;
}
#endif
-
#if (defined WILC_SPI) || (defined WILC_SDIO_IRQ_GPIO)
static int init_irq(linux_wlan_t *p_nic)
{
@@ -504,9 +475,9 @@ static int init_irq(linux_wlan_t *p_nic)
*
* ex) nic->dev_irq_num = gpio_to_irq(GPIO_NUM);
*/
-#elif defined (NM73131_0_BOARD)
+#elif defined(NM73131_0_BOARD)
nic->dev_irq_num = IRQ_WILC1000;
-#elif defined (PANDA_BOARD)
+#elif defined(PANDA_BOARD)
gpio_export(GPIO_NUM, 1);
nic->dev_irq_num = OMAP_GPIO_IRQ(GPIO_NUM);
irq_set_irq_type(nic->dev_irq_num, IRQ_TYPE_LEVEL_LOW);
@@ -518,7 +489,6 @@ static int init_irq(linux_wlan_t *p_nic)
PRINT_ER("could not obtain gpio for WILC_INTR\n");
}
-
#if (RX_BH_TYPE == RX_BH_THREADED_IRQ)
if ((ret != -1) && (request_threaded_irq(nic->dev_irq_num, isr_uh_routine, isr_bh_routine,
IRQF_TRIGGER_LOW | IRQF_ONESHOT, /*Without IRQF_ONESHOT the uh will remain kicked in and dont gave a chance to bh*/
@@ -554,7 +524,6 @@ static void deinit_irq(linux_wlan_t *nic)
#endif
}
-
/*
* OS functions
*/
@@ -601,7 +570,6 @@ void linux_wlan_free(void *vp)
}
}
-
static void *internal_alloc(uint32_t size, uint32_t flag)
{
char *pntr = NULL;
@@ -610,7 +578,6 @@ static void *internal_alloc(uint32_t size, uint32_t flag)
return (void *)pntr;
}
-
static void linux_wlan_init_lock(char *lockName, void *plock, int count)
{
sema_init((struct semaphore *)plock, count);
@@ -638,25 +605,22 @@ static int linux_wlan_lock_timeout(void *vp, u32 timeout)
{
int error = -1;
PRINT_D(LOCK_DBG, "Locking %p\n", vp);
- if (vp != NULL) {
+ if (vp != NULL)
error = down_timeout((struct semaphore *)vp, msecs_to_jiffies(timeout));
- } else {
+ else
PRINT_ER("Failed, mutex is NULL\n");
- }
return error;
}
void linux_wlan_unlock(void *vp)
{
PRINT_D(LOCK_DBG, "Unlocking %p\n", vp);
- if (vp != NULL) {
+ if (vp != NULL)
up((struct semaphore *)vp);
- } else {
+ else
PRINT_ER("Failed, mutex is NULL\n");
- }
}
-
static void linux_wlan_init_mutex(char *lockName, void *plock, int count)
{
mutex_init((struct mutex *)plock);
@@ -702,7 +666,6 @@ static void linux_wlan_unlock_mutex(void *vp)
}
}
-
/*Added by Amr - BugID_4720*/
static void linux_wlan_init_spin_lock(char *lockName, void *plock, int count)
{
@@ -780,9 +743,8 @@ struct net_device *GetIfHandler(uint8_t *pMacHeader)
}
}
PRINT_INFO(INIT_DBG, "Invalide handle\n");
- for (i = 0; i < 25; i++) {
+ for (i = 0; i < 25; i++)
PRINT_D(INIT_DBG, "%02x ", pMacHeader[i]);
- }
Bssid = pMacHeader + 18;
Bssid1 = pMacHeader + 12;
for (i = 0; i < g_linux_wlan->u8NoIfcs; i++) {
@@ -822,9 +784,8 @@ int linux_wlan_get_num_conn_ifcs(void)
uint8_t ret_val = 0;
for (i = 0; i < g_linux_wlan->u8NoIfcs; i++) {
- if (memcmp(g_linux_wlan->strInterfaceInfo[i].aBSSID, null_bssid, 6)) {
+ if (memcmp(g_linux_wlan->strInterfaceInfo[i].aBSSID, null_bssid, 6))
ret_val++;
- }
}
return ret_val;
}
@@ -868,7 +829,6 @@ static int linux_wlan_txq_task(void *vp)
#define TX_BACKOFF_WEIGHT_MIN (0)
#define TX_BACKOFF_WEIGHT_UNIT_MS (10)
int backoff_weight = TX_BACKOFF_WEIGHT_MIN;
- signed long timeout;
#endif
/* inform wilc1000_wlan_init that TXQ task is started. */
@@ -906,7 +866,6 @@ static int linux_wlan_txq_task(void *vp)
}
if (ret == WILC_TX_ERR_NO_BUF) { /* failed to allocate buffers in chip. */
- timeout = msecs_to_jiffies(TX_BACKOFF_WEIGHT_UNIT_MS << backoff_weight);
do {
/* Back off from sending packets for some time. */
/* schedule_timeout will allow RX task to run and free buffers.*/
@@ -915,15 +874,13 @@ static int linux_wlan_txq_task(void *vp)
msleep(TX_BACKOFF_WEIGHT_UNIT_MS << backoff_weight);
} while (/*timeout*/ 0);
backoff_weight += TX_BACKOFF_WEIGHT_INCR_STEP;
- if (backoff_weight > TX_BACKOFF_WEIGHT_MAX) {
+ if (backoff_weight > TX_BACKOFF_WEIGHT_MAX)
backoff_weight = TX_BACKOFF_WEIGHT_MAX;
- }
} else {
if (backoff_weight > TX_BACKOFF_WEIGHT_MIN) {
backoff_weight -= TX_BACKOFF_WEIGHT_DECR_STEP;
- if (backoff_weight < TX_BACKOFF_WEIGHT_MIN) {
+ if (backoff_weight < TX_BACKOFF_WEIGHT_MIN)
backoff_weight = TX_BACKOFF_WEIGHT_MIN;
- }
}
}
/*TODO: drop packets after a certain time/number of retry count. */
@@ -946,7 +903,6 @@ int linux_wlan_get_firmware(perInterface_wlan_t *p_nic)
const struct firmware *wilc_firmware;
char *firmware;
-
if (nic->iftype == AP_MODE)
firmware = AP_FIRMWARE;
else if (nic->iftype == STATION_MODE)
@@ -958,8 +914,6 @@ int linux_wlan_get_firmware(perInterface_wlan_t *p_nic)
firmware = P2P_CONCURRENCY_FIRMWARE;
}
-
-
if (nic == NULL) {
PRINT_ER("NIC is NULL\n");
goto _fail_;
@@ -970,7 +924,6 @@ int linux_wlan_get_firmware(perInterface_wlan_t *p_nic)
goto _fail_;
}
-
/* the firmare should be located in /lib/firmware in
* root file system with the name specified above */
@@ -1054,9 +1007,8 @@ static int linux_wlan_firmware_download(linux_wlan_t *p_nic)
**/
PRINT_D(INIT_DBG, "Downloading Firmware ...\n");
ret = g_linux_wlan->oup.wlan_firmware_download(g_linux_wlan->wilc_firmware->data, g_linux_wlan->wilc_firmware->size);
- if (ret < 0) {
+ if (ret < 0)
goto _FAIL_;
- }
/* Freeing FW buffer */
PRINT_D(INIT_DBG, "Freeing FW buffer ...\n");
@@ -1064,13 +1016,12 @@ static int linux_wlan_firmware_download(linux_wlan_t *p_nic)
release_firmware(g_linux_wlan->wilc_firmware);
g_linux_wlan->wilc_firmware = NULL;
- PRINT_D(INIT_DBG, "Download Succeeded \n");
+ PRINT_D(INIT_DBG, "Download Succeeded\n");
_FAIL_:
return ret;
}
-
/* startup configuration - could be changed later using iconfig*/
static int linux_wlan_init_test_config(struct net_device *dev, linux_wlan_t *p_nic)
{
@@ -1096,7 +1047,6 @@ static int linux_wlan_init_test_config(struct net_device *dev, linux_wlan_t *p_n
PRINT_D(INIT_DBG, "MAC address is : %02x-%02x-%02x-%02x-%02x-%02x\n", mac_add[0], mac_add[1], mac_add[2], mac_add[3], mac_add[4], mac_add[5]);
wilc_get_chipid(0);
-
if (g_linux_wlan->oup.wlan_cfg_set == NULL) {
PRINT_D(INIT_DBG, "Null p[ointer\n");
goto _fail_;
@@ -1116,7 +1066,6 @@ static int linux_wlan_init_test_config(struct net_device *dev, linux_wlan_t *p_n
if (!g_linux_wlan->oup.wlan_cfg_set(0, WID_BSS_TYPE, c_val, 1, 0, 0))
goto _fail_;
-
/* c_val[0] = RATE_AUTO; / * bug 4275: Enable autorate and limit it to 24Mbps * / */
c_val[0] = RATE_AUTO;
if (!g_linux_wlan->oup.wlan_cfg_set(0, WID_CURRENT_TX_RATE, c_val, 1, 0, 0))
@@ -1351,7 +1300,6 @@ _fail_:
return -1;
}
-
/**************************/
void wilc1000_wlan_deinit(linux_wlan_t *nic)
{
@@ -1385,16 +1333,12 @@ void wilc1000_wlan_deinit(linux_wlan_t *nic)
#endif
#endif
-
/* not sure if the following unlocks are needed or not*/
- if (&g_linux_wlan->rxq_event != NULL) {
+ if (&g_linux_wlan->rxq_event != NULL)
linux_wlan_unlock(&g_linux_wlan->rxq_event);
- }
- if (&g_linux_wlan->txq_event != NULL) {
+ if (&g_linux_wlan->txq_event != NULL)
linux_wlan_unlock(&g_linux_wlan->txq_event);
- }
-
#if (RX_BH_TYPE == RX_BH_WORK_QUEUE)
/*Removing the work struct from the linux kernel workqueue*/
@@ -1412,7 +1356,6 @@ void wilc1000_wlan_deinit(linux_wlan_t *nic)
PRINT_D(INIT_DBG, "Deinitializing IRQ\n");
deinit_irq(g_linux_wlan);
-
if (&g_linux_wlan->oup != NULL) {
if (g_linux_wlan->oup.wlan_stop != NULL)
g_linux_wlan->oup.wlan_stop();
@@ -1442,7 +1385,6 @@ void wilc1000_wlan_deinit(linux_wlan_t *nic)
} else {
PRINT_D(INIT_DBG, "wilc1000 is not initialized\n");
}
- return;
}
int wlan_init_locks(linux_wlan_t *p_nic)
@@ -1536,7 +1478,7 @@ void linux_to_wlan(wilc_wlan_inp_t *nwi, linux_wlan_t *nic)
nwi->os_context.txq_wait_event = (void *)&g_linux_wlan->txq_event;
-#if defined (MEMORY_STATIC)
+#if defined(MEMORY_STATIC)
nwi->os_context.rx_buffer_size = LINUX_RX_SIZE;
#endif
nwi->os_context.rxq_critical_section = (void *)&g_linux_wlan->rxq_cs;
@@ -1673,17 +1615,14 @@ static void wlan_deinitialize_threads(linux_wlan_t *nic)
if (&g_linux_wlan->rxq_event != NULL)
linux_wlan_unlock(&g_linux_wlan->rxq_event);
-
if (g_linux_wlan->rxq_thread != NULL) {
kthread_stop(g_linux_wlan->rxq_thread);
g_linux_wlan->rxq_thread = NULL;
}
-
if (&g_linux_wlan->txq_event != NULL)
linux_wlan_unlock(&g_linux_wlan->txq_event);
-
if (g_linux_wlan->txq_thread != NULL) {
kthread_stop(g_linux_wlan->txq_thread);
g_linux_wlan->txq_thread = NULL;
@@ -1747,14 +1686,12 @@ static int linux_wlan_read_mac_addr(void *vp)
}
}
- if (index == array_size) {
+ if (index == array_size)
PRINT_ER("random MAC\n");
- }
exit:
- if (fp && !IS_ERR(fp)) {
+ if (fp && !IS_ERR(fp))
filp_close(fp, NULL);
- }
set_fs(old_fs);
@@ -1786,9 +1723,8 @@ uint8_t wilc1000_prepare_11b_core(wilc_wlan_inp_t *nwi, wilc_wlan_oup_t *nwo, li
sdio_register_driver(&wilc_bus);
- while (!probe) {
+ while (!probe)
msleep(100);
- }
probe = 0;
g_linux_wlan->wilc_sdio_func = local_sdio_func;
linux_to_wlan(nwi, nic);
@@ -1820,9 +1756,8 @@ int repeat_power_cycle(perInterface_wlan_t *nic)
sdio_register_driver(&wilc_bus);
/* msleep(1000); */
- while (!probe) {
+ while (!probe)
msleep(100);
- }
probe = 0;
g_linux_wlan->wilc_sdio_func = local_sdio_func;
linux_to_wlan(&nwi, g_linux_wlan);
@@ -1834,7 +1769,7 @@ int repeat_power_cycle(perInterface_wlan_t *nic)
#endif
if (linux_wlan_get_firmware(nic)) {
- PRINT_ER("Can't get firmware \n");
+ PRINT_ER("Can't get firmware\n");
ret = -1;
goto __fail__;
}
@@ -1847,9 +1782,8 @@ int repeat_power_cycle(perInterface_wlan_t *nic)
}
/* Start firmware*/
ret = linux_wlan_start_firmware(nic);
- if (ret < 0) {
+ if (ret < 0)
PRINT_ER("Failed to start firmware\n");
- }
__fail__:
return ret;
}
@@ -1871,9 +1805,8 @@ int wilc1000_wlan_init(struct net_device *dev, perInterface_wlan_t *p_nic)
#ifdef STATIC_MACADDRESS
wilc_mac_thread = kthread_run(linux_wlan_read_mac_addr, NULL, "wilc_mac_thread");
- if (wilc_mac_thread < 0) {
+ if (wilc_mac_thread < 0)
PRINT_ER("couldn't create Mac addr thread\n");
- }
#endif
linux_to_wlan(&nwi, g_linux_wlan);
@@ -1889,7 +1822,6 @@ int wilc1000_wlan_init(struct net_device *dev, perInterface_wlan_t *p_nic)
/*Save the oup structre into global pointer*/
gpstrWlanOps = &g_linux_wlan->oup;
-
ret = wlan_initialize_threads(nic);
if (ret < 0) {
PRINT_ER("Initializing Threads FAILED\n");
@@ -1922,12 +1854,11 @@ int wilc1000_wlan_init(struct net_device *dev, perInterface_wlan_t *p_nic)
#endif
if (linux_wlan_get_firmware(nic)) {
- PRINT_ER("Can't get firmware \n");
+ PRINT_ER("Can't get firmware\n");
ret = -EIO;
goto _fail_irq_enable_;
}
-
/*Download firmware*/
ret = linux_wlan_firmware_download(g_linux_wlan);
if (ret < 0) {
@@ -1967,7 +1898,6 @@ int wilc1000_wlan_init(struct net_device *dev, perInterface_wlan_t *p_nic)
g_linux_wlan->wilc1000_initialized = 1;
return 0; /*success*/
-
_fail_fw_start_:
if (&g_linux_wlan->oup != NULL) {
if (g_linux_wlan->oup.wlan_stop != NULL)
@@ -1996,12 +1926,11 @@ _fail_locks_:
return ret;
}
-
/*
* - this function will be called automatically by OS when module inserted.
*/
-#if !defined (NM73131_0_BOARD)
+#if !defined(NM73131_0_BOARD)
int mac_init_fn(struct net_device *ndev)
{
@@ -2028,12 +1957,11 @@ int mac_init_fn(struct net_device *ndev)
}
#endif
-
void WILC_WFI_frame_register(struct wiphy *wiphy, struct net_device *dev,
u16 frame_type, bool reg);
/* This fn is called, when this device is setup using ifconfig */
-#if !defined (NM73131_0_BOARD)
+#if !defined(NM73131_0_BOARD)
int mac_open(struct net_device *ndev)
{
perInterface_wlan_t *nic;
@@ -2078,7 +2006,7 @@ int mac_open(struct net_device *ndev)
for (i = 0; i < g_linux_wlan->u8NoIfcs; i++) {
if (ndev == g_linux_wlan->strInterfaceInfo[i].wilc_netdev) {
memcpy(g_linux_wlan->strInterfaceInfo[i].aSrcAddress, mac_add, ETH_ALEN);
- g_linux_wlan->strInterfaceInfo[i].drvHandler = (u32)priv->hWILCWFIDrv;
+ g_linux_wlan->strInterfaceInfo[i].drvHandler = priv->hWILCWFIDrv;
break;
}
}
@@ -2092,7 +2020,6 @@ int mac_open(struct net_device *ndev)
goto _err_;
}
-
WILC_WFI_frame_register(nic->wilc_netdev->ieee80211_ptr->wiphy, nic->wilc_netdev,
nic->g_struct_frame_reg[0].frame_type, nic->g_struct_frame_reg[0].reg);
WILC_WFI_frame_register(nic->wilc_netdev->ieee80211_ptr->wiphy, nic->wilc_netdev,
@@ -2131,7 +2058,6 @@ struct net_device_stats *mac_stats(struct net_device *dev)
{
perInterface_wlan_t *nic = netdev_priv(dev);
-
return &nic->netstats;
}
@@ -2146,17 +2072,16 @@ static void wilc_set_multicast_list(struct net_device *dev)
priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
pstrWFIDrv = (tstrWILC_WFIDrv *)priv->hWILCWFIDrv;
-
if (!dev)
return;
- PRINT_D(INIT_DBG, "Setting Multicast List with count = %d. \n", dev->mc.count);
+ PRINT_D(INIT_DBG, "Setting Multicast List with count = %d.\n", dev->mc.count);
if (dev->flags & IFF_PROMISC) {
/* Normally, we should configure the chip to retrive all packets
* but we don't wanna support this right now */
/* TODO: add promiscuous mode support */
- PRINT_D(INIT_DBG, "Set promiscuous mode ON, retrive all packets \n");
+ PRINT_D(INIT_DBG, "Set promiscuous mode ON, retrive all packets\n");
return;
}
@@ -2165,27 +2090,27 @@ static void wilc_set_multicast_list(struct net_device *dev)
if ((dev->flags & IFF_ALLMULTI) || (dev->mc.count) > WILC_MULTICAST_TABLE_SIZE) {
PRINT_D(INIT_DBG, "Disable multicast filter, retrive all multicast packets\n");
/* get all multicast packets */
- host_int_setup_multicast_filter((WILC_WFIDrvHandle)pstrWFIDrv, false, 0);
+ host_int_setup_multicast_filter(pstrWFIDrv, false, 0);
return;
}
/* No multicast? Just get our own stuff */
if ((dev->mc.count) == 0) {
PRINT_D(INIT_DBG, "Enable multicast filter, retrive directed packets only.\n");
- host_int_setup_multicast_filter((WILC_WFIDrvHandle)pstrWFIDrv, true, 0);
+ host_int_setup_multicast_filter(pstrWFIDrv, true, 0);
return;
}
/* Store all of the multicast addresses in the hardware filter */
netdev_for_each_mc_addr(ha, dev)
{
- WILC_memcpy(gau8MulticastMacAddrList[i], ha->addr, ETH_ALEN);
+ memcpy(gau8MulticastMacAddrList[i], ha->addr, ETH_ALEN);
PRINT_D(INIT_DBG, "Entry[%d]: %x:%x:%x:%x:%x:%x\n", i,
gau8MulticastMacAddrList[i][0], gau8MulticastMacAddrList[i][1], gau8MulticastMacAddrList[i][2], gau8MulticastMacAddrList[i][3], gau8MulticastMacAddrList[i][4], gau8MulticastMacAddrList[i][5]);
i++;
}
- host_int_setup_multicast_filter((WILC_WFIDrvHandle)pstrWFIDrv, true, (dev->mc.count));
+ host_int_setup_multicast_filter(pstrWFIDrv, true, (dev->mc.count));
return;
@@ -2195,11 +2120,10 @@ static void linux_wlan_tx_complete(void *priv, int status)
{
struct tx_complete_data *pv_data = (struct tx_complete_data *)priv;
- if (status == 1) {
+ if (status == 1)
PRINT_D(TX_DBG, "Packet sent successfully - Size = %d - Address = %p - SKB = %p\n", pv_data->size, pv_data->buff, pv_data->skb);
- } else {
+ else
PRINT_D(TX_DBG, "Couldn't send packet - Size = %d - Address = %p - SKB = %p\n", pv_data->size, pv_data->buff, pv_data->skb);
- }
/* Free the SK Buffer, its work is done */
dev_kfree_skb(pv_data->skb);
linux_wlan_free(pv_data);
@@ -2215,7 +2139,7 @@ int mac_xmit(struct sk_buff *skb, struct net_device *ndev)
struct ethhdr *eth_h;
nic = netdev_priv(ndev);
- PRINT_D(INT_DBG, "\n========\n IntUH: %d - IntBH: %d - IntCld: %d \n========\n", int_rcvdU, int_rcvdB, int_clrd);
+ PRINT_D(INT_DBG, "\n========\n IntUH: %d - IntBH: %d - IntCld: %d\n========\n", int_rcvdU, int_rcvdB, int_clrd);
PRINT_D(TX_DBG, "Sending packet just received from TCP/IP\n");
/* Stop the network interface queue */
@@ -2237,18 +2161,16 @@ int mac_xmit(struct sk_buff *skb, struct net_device *ndev)
tx_data->skb = skb;
eth_h = (struct ethhdr *)(skb->data);
- if (eth_h->h_proto == 0x8e88) {
+ if (eth_h->h_proto == 0x8e88)
PRINT_D(INIT_DBG, "EAPOL transmitted\n");
- }
/*get source and dest ip addresses*/
ih = (struct iphdr *)(skb->data + sizeof(struct ethhdr));
pu8UdpBuffer = (char *)ih + sizeof(struct iphdr);
- if ((pu8UdpBuffer[1] == 68 && pu8UdpBuffer[3] == 67) || (pu8UdpBuffer[1] == 67 && pu8UdpBuffer[3] == 68)) {
+ if ((pu8UdpBuffer[1] == 68 && pu8UdpBuffer[3] == 67) || (pu8UdpBuffer[1] == 67 && pu8UdpBuffer[3] == 68))
PRINT_D(GENERIC_DBG, "DHCP Message transmitted, type:%x %x %x\n", pu8UdpBuffer[248], pu8UdpBuffer[249], pu8UdpBuffer[250]);
- }
PRINT_D(TX_DBG, "Sending packet - Size = %d - Address = %p - SKB = %p\n", tx_data->size, tx_data->buff, tx_data->skb);
/* Send packet to MAC HW - for now the tx_complete function will be just status
@@ -2269,7 +2191,6 @@ int mac_xmit(struct sk_buff *skb, struct net_device *ndev)
QueueCount = WILC_Xmit_data((void *)tx_data, HOST_TO_WLAN);
#endif /* WILC_FULLY_HOSTING_AP */
-
if (QueueCount > FLOW_CONTROL_UPPER_THRESHOLD) {
netif_stop_queue(g_linux_wlan->strInterfaceInfo[0].wilc_netdev);
netif_stop_queue(g_linux_wlan->strInterfaceInfo[1].wilc_netdev);
@@ -2278,7 +2199,6 @@ int mac_xmit(struct sk_buff *skb, struct net_device *ndev)
return 0;
}
-
int mac_close(struct net_device *ndev)
{
struct WILC_WFI_priv *priv;
@@ -2301,8 +2221,6 @@ int mac_close(struct net_device *ndev)
pstrWFIDrv = (tstrWILC_WFIDrv *)priv->hWILCWFIDrv;
-
-
PRINT_D(GENERIC_DBG, "Mac close\n");
if (g_linux_wlan == NULL) {
@@ -2348,7 +2266,6 @@ int mac_close(struct net_device *ndev)
return 0;
}
-
int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
{
@@ -2359,8 +2276,6 @@ int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
struct WILC_WFI_priv *priv;
s32 s32Error = WILC_SUCCESS;
-
-
/* struct iwreq *wrq = (struct iwreq *) req; // tony moved to case SIOCSIWPRIV */
#ifdef USE_WIRELESS
nic = netdev_priv(ndev);
@@ -2422,9 +2337,7 @@ int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
done:
- if (buff != NULL) {
- kfree(buff);
- }
+ kfree(buff);
return s32Error;
}
@@ -2455,7 +2368,6 @@ void frmw_to_linux(uint8_t *buff, uint32_t size, uint32_t pkt_offset)
frame_len = size;
buff_to_send = buff;
-
/* Need to send the packet up to the host, allocate a skb buffer */
skb = dev_alloc_skb(frame_len);
if (skb == NULL) {
@@ -2465,14 +2377,12 @@ void frmw_to_linux(uint8_t *buff, uint32_t size, uint32_t pkt_offset)
skb_reserve(skb, (unsigned int)skb->data & 0x3);
- if (g_linux_wlan == NULL || wilc_netdev == NULL) {
+ if (g_linux_wlan == NULL || wilc_netdev == NULL)
PRINT_ER("wilc_netdev in g_linux_wlan is NULL");
- }
skb->dev = wilc_netdev;
- if (skb->dev == NULL) {
+ if (skb->dev == NULL)
PRINT_ER("skb->dev is NULL\n");
- }
/*
* for(i=0;i<40;i++)
@@ -2502,9 +2412,8 @@ void frmw_to_linux(uint8_t *buff, uint32_t size, uint32_t pkt_offset)
ih = (struct iphdr *)(skb->data + sizeof(struct ethhdr));
pu8UdpBuffer = (char *)ih + sizeof(struct iphdr);
- if (buff_to_send[35] == 67 && buff_to_send[37] == 68) {
+ if (buff_to_send[35] == 67 && buff_to_send[37] == 68)
PRINT_D(RX_DBG, "DHCP Message received\n");
- }
if (buff_to_send[12] == 0x88 && buff_to_send[13] == 0x8e)
PRINT_D(GENERIC_DBG, "eapol received\n");
#endif
@@ -2516,9 +2425,8 @@ void frmw_to_linux(uint8_t *buff, uint32_t size, uint32_t pkt_offset)
PRINT_D(RX_DBG, "netif_rx ret value is: %d\n", stats);
}
#ifndef TCP_ENHANCEMENTS
- else {
+ else
PRINT_ER("Discard sending packet with len = %d\n", size);
- }
#endif
}
@@ -2541,9 +2449,8 @@ void WILC_WFI_mgmt_rx(uint8_t *buff, uint32_t size)
#ifdef WILC_P2P
nic = netdev_priv(g_linux_wlan->strInterfaceInfo[1].wilc_netdev); /* p2p0 */
if ((buff[0] == nic->g_struct_frame_reg[0].frame_type && nic->g_struct_frame_reg[0].reg) ||
- (buff[0] == nic->g_struct_frame_reg[1].frame_type && nic->g_struct_frame_reg[1].reg)) {
+ (buff[0] == nic->g_struct_frame_reg[1].frame_type && nic->g_struct_frame_reg[1].reg))
WILC_WFI_p2p_rx(g_linux_wlan->strInterfaceInfo[1].wilc_netdev, buff, size);
- }
#endif
}
@@ -2557,7 +2464,7 @@ int wilc_netdev_init(void)
linux_wlan_init_lock("close_exit_sync", &close_exit_sync, 0);
/*create the common structure*/
- g_linux_wlan = (linux_wlan_t *)WILC_MALLOC(sizeof(linux_wlan_t));
+ g_linux_wlan = WILC_MALLOC(sizeof(linux_wlan_t));
memset(g_linux_wlan, 0, sizeof(linux_wlan_t));
/*Reset interrupt count debug*/
@@ -2625,7 +2532,6 @@ int wilc_netdev_init(void)
}
#endif
-
if (register_netdev(ndev)) {
PRINT_ER("Device couldn't be registered - %s\n", ndev->name);
return -1; /* ERROR */
@@ -2638,7 +2544,7 @@ int wilc_netdev_init(void)
#ifndef WILC_SDIO
if (!linux_spi_init(&g_linux_wlan->wilc_spidev)) {
- PRINT_ER("Can't initialize SPI \n");
+ PRINT_ER("Can't initialize SPI\n");
return -1; /* ERROR */
}
g_linux_wlan->wilc_spidev = wilc_spi_dev;
@@ -2649,13 +2555,10 @@ int wilc_netdev_init(void)
return 0;
}
-
/*The 1st function called after module inserted*/
static int __init init_wilc_driver(void)
{
-
-
-#if defined (WILC_DEBUGFS)
+#if defined(WILC_DEBUGFS)
if (wilc_debugfs_init() < 0) {
PRINT_D(GENERIC_DBG, "fail to create debugfs for wilc driver\n");
return -1;
@@ -2674,17 +2577,15 @@ static int __init init_wilc_driver(void)
int ret;
ret = sdio_register_driver(&wilc_bus);
- if (ret < 0) {
+ if (ret < 0)
PRINT_D(INIT_DBG, "init_wilc_driver: Failed register sdio driver\n");
- }
return ret;
}
#else
PRINT_D(INIT_DBG, "Initializing netdev\n");
- if (wilc_netdev_init()) {
+ if (wilc_netdev_init())
PRINT_ER("Couldn't initialize netdev\n");
- }
return 0;
#endif
}
@@ -2702,18 +2603,15 @@ static void __exit exit_wilc_driver(void)
unregister_inetaddr_notifier(&g_dev_notifier);
#endif
- for (i = 0; i < NUM_CONCURRENT_IFC; i++) {
+ for (i = 0; i < NUM_CONCURRENT_IFC; i++)
nic[i] = netdev_priv(g_linux_wlan->strInterfaceInfo[i].wilc_netdev);
- }
}
-
if ((g_linux_wlan != NULL) && g_linux_wlan->wilc_firmware != NULL) {
release_firmware(g_linux_wlan->wilc_firmware);
g_linux_wlan->wilc_firmware = NULL;
}
-
if ((g_linux_wlan != NULL) && (((g_linux_wlan->strInterfaceInfo[0].wilc_netdev) != NULL)
|| ((g_linux_wlan->strInterfaceInfo[1].wilc_netdev) != NULL))) {
PRINT_D(INIT_DBG, "Waiting for mac_close ....\n");
@@ -2723,17 +2621,15 @@ static void __exit exit_wilc_driver(void)
else
PRINT_D(INIT_DBG, "mac_closed\n");
-
for (i = 0; i < NUM_CONCURRENT_IFC; i++) {
/* close all opened interfaces */
if (g_linux_wlan->strInterfaceInfo[i].wilc_netdev != NULL) {
- if (nic[i]->mac_opened) {
+ if (nic[i]->mac_opened)
mac_close(g_linux_wlan->strInterfaceInfo[i].wilc_netdev);
- }
}
}
for (i = 0; i < NUM_CONCURRENT_IFC; i++) {
- PRINT_D(INIT_DBG, "Unregistering netdev %p \n", g_linux_wlan->strInterfaceInfo[i].wilc_netdev);
+ PRINT_D(INIT_DBG, "Unregistering netdev %p\n", g_linux_wlan->strInterfaceInfo[i].wilc_netdev);
unregister_netdev(g_linux_wlan->strInterfaceInfo[i].wilc_netdev);
#ifdef USE_WIRELESS
PRINT_D(INIT_DBG, "Freeing Wiphy...\n");
@@ -2744,7 +2640,6 @@ static void __exit exit_wilc_driver(void)
}
}
-
#ifdef USE_WIRELESS
#ifdef WILC_AP_EXTERNAL_MLME
/* Bug 4600 : WILC_WFI_deinit_mon_interface was already called at mac_close */
@@ -2764,12 +2659,12 @@ static void __exit exit_wilc_driver(void)
linux_wlan_deinit_lock(&close_exit_sync);
if (g_linux_wlan != NULL) {
- WILC_FREE(g_linux_wlan);
+ kfree(g_linux_wlan);
g_linux_wlan = NULL;
}
printk("Module_exit Done.\n");
-#if defined (WILC_DEBUGFS)
+#if defined(WILC_DEBUGFS)
wilc_debugfs_remove();
#endif
@@ -2780,4 +2675,3 @@ static void __exit exit_wilc_driver(void)
module_exit(exit_wilc_driver);
MODULE_LICENSE("GPL");
-#endif
diff --git a/drivers/staging/wilc1000/linux_wlan_common.h b/drivers/staging/wilc1000/linux_wlan_common.h
index 2476bfda1b46..e6ebf3e89129 100644
--- a/drivers/staging/wilc1000/linux_wlan_common.h
+++ b/drivers/staging/wilc1000/linux_wlan_common.h
@@ -39,8 +39,8 @@ enum debug_region {
#define FIRM_DBG (1 << Firmware_debug)
#if defined (WILC_DEBUGFS)
-extern int wilc_debugfs_init(void);
-extern void wilc_debugfs_remove(void);
+int wilc_debugfs_init(void);
+void wilc_debugfs_remove(void);
extern atomic_t REGION;
extern atomic_t DEBUG_LEVEL;
diff --git a/drivers/staging/wilc1000/linux_wlan_sdio.c b/drivers/staging/wilc1000/linux_wlan_sdio.c
index 858e3a191bce..37f31f4558b5 100644
--- a/drivers/staging/wilc1000/linux_wlan_sdio.c
+++ b/drivers/staging/wilc1000/linux_wlan_sdio.c
@@ -31,7 +31,6 @@
struct sdio_func *local_sdio_func;
extern linux_wlan_t *g_linux_wlan;
extern int wilc_netdev_init(void);
-extern int sdio_clear_int(void);
extern void wilc_handle_isr(void);
static unsigned int sdio_default_speed;
diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c
index c328208cda29..ae111862e7a9 100644
--- a/drivers/staging/wilc1000/wilc_debugfs.c
+++ b/drivers/staging/wilc1000/wilc_debugfs.c
@@ -48,38 +48,28 @@ static ssize_t wilc_debug_level_read(struct file *file, char __user *userbuf, si
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
}
-static ssize_t wilc_debug_level_write(struct file *filp, const char *buf, size_t count, loff_t *ppos)
+static ssize_t wilc_debug_level_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
{
- char buffer[128] = {};
int flag = 0;
+ int ret;
- if (count > sizeof(buffer))
- return -EINVAL;
-
- if (copy_from_user(buffer, buf, count)) {
- return -EFAULT;
- }
-
- flag = buffer[0] - '0';
-
- if (flag > 0) {
- flag = DEBUG | ERR;
- } else if (flag < 0) {
- flag = 100;
- }
+ ret = kstrtouint_from_user(buf, count, 16, &flag);
+ if (ret)
+ return ret;
if (flag > DBG_LEVEL_ALL) {
printk("%s, value (0x%08x) is out of range, stay previous flag (0x%08x)\n", __func__, flag, atomic_read(&DEBUG_LEVEL));
- return -EFAULT;
+ return -EINVAL;
}
atomic_set(&DEBUG_LEVEL, (int)flag);
- if (flag == 0) {
+ if (flag == 0)
printk("Debug-level disabled\n");
- } else {
+ else
printk("Debug-level enabled\n");
- }
+
return count;
}
diff --git a/drivers/staging/wilc1000/wilc_exported_buf.c b/drivers/staging/wilc1000/wilc_exported_buf.c
index 529457816f65..c3f6a0a1c9ad 100644
--- a/drivers/staging/wilc1000/wilc_exported_buf.c
+++ b/drivers/staging/wilc1000/wilc_exported_buf.c
@@ -8,9 +8,6 @@
#define LINUX_TX_SIZE (64 * 1024)
#define WILC1000_FW_SIZE (4 * 1024)
-#define DECLARE_WILC_BUFFER(name) \
- void *exported_ ## name = NULL;
-
#define MALLOC_WILC_BUFFER(name, size) \
exported_ ## name = kmalloc(size, GFP_KERNEL); \
if (!exported_ ## name) { \
@@ -24,9 +21,9 @@
/*
* Add necessary buffer pointers
*/
-DECLARE_WILC_BUFFER(g_tx_buf)
-DECLARE_WILC_BUFFER(g_rx_buf)
-DECLARE_WILC_BUFFER(g_fw_buf)
+void *exported_g_tx_buf;
+void *exported_g_rx_buf;
+void *exported_g_fw_buf;
void *get_tx_buffer(void)
{
@@ -65,12 +62,10 @@ static void __exit wilc_module_deinit(void)
FREE_WILC_BUFFER(g_tx_buf)
FREE_WILC_BUFFER(g_rx_buf)
FREE_WILC_BUFFER(g_fw_buf)
-
- return;
}
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Tony Cho");
MODULE_DESCRIPTION("WILC1xxx Memory Manager");
pure_initcall(wilc_module_init);
-module_exit(wilc_module_deinit); \ No newline at end of file
+module_exit(wilc_module_deinit);
diff --git a/drivers/staging/wilc1000/wilc_log.h b/drivers/staging/wilc1000/wilc_log.h
deleted file mode 100644
index 2269ebdec129..000000000000
--- a/drivers/staging/wilc1000/wilc_log.h
+++ /dev/null
@@ -1,47 +0,0 @@
-#ifndef __WILC_LOG_H__
-#define __WILC_LOG_H__
-
-/* Errors will always get printed */
-#define WILC_ERROR(...) do { WILC_PRINTF("(ERR)(%s:%d) ", __WILC_FUNCTION__, __WILC_LINE__); \
- WILC_PRINTF(__VA_ARGS__); \
- } while (0)
-
-/* Wraning only printed if verbosity is 1 or more */
-#if (WILC_LOG_VERBOSITY_LEVEL > 0)
-#define WILC_WARN(...) do { WILC_PRINTF("(WRN)"); \
- WILC_PRINTF(__VA_ARGS__); \
- } while (0)
-#else
-#define WILC_WARN(...) (0)
-#endif
-
-/* Info only printed if verbosity is 2 or more */
-#if (WILC_LOG_VERBOSITY_LEVEL > 1)
-#define WILC_INFO(...) do { WILC_PRINTF("(INF)"); \
- WILC_PRINTF(__VA_ARGS__); \
- } while (0)
-#else
-#define WILC_INFO(...) (0)
-#endif
-
-/* Debug is only printed if verbosity is 3 or more */
-#if (WILC_LOG_VERBOSITY_LEVEL > 2)
-#define WILC_DBG(...) do { WILC_PRINTF("(DBG)(%s:%d) ", __WILC_FUNCTION__, __WILC_LINE__); \
- WILC_PRINTF(__VA_ARGS__); \
- } while (0)
-
-#else
-#define WILC_DBG(...) (0)
-#endif
-
-/* Function In/Out is only printed if verbosity is 4 or more */
-#if (WILC_LOG_VERBOSITY_LEVEL > 3)
-#define WILC_FN_IN do { WILC_PRINTF("(FIN) (%s:%d) \n", __WILC_FUNCTION__, __WILC_LINE__); } while (0)
-#define WILC_FN_OUT(ret) do { WILC_PRINTF("(FOUT) (%s:%d) %d.\n", __WILC_FUNCTION__, __WILC_LINE__, (ret)); } while (0)
-#else
-#define WILC_FN_IN (0)
-#define WILC_FN_OUT(ret) (0)
-#endif
-
-
-#endif \ No newline at end of file
diff --git a/drivers/staging/wilc1000/wilc_memory.c b/drivers/staging/wilc1000/wilc_memory.c
index c70707fefb66..e90a95705a7d 100644
--- a/drivers/staging/wilc1000/wilc_memory.c
+++ b/drivers/staging/wilc1000/wilc_memory.c
@@ -9,50 +9,8 @@
void *WILC_MemoryAlloc(u32 u32Size, tstrWILC_MemoryAttrs *strAttrs,
char *pcFileName, u32 u32LineNo)
{
- if (u32Size > 0) {
+ if (u32Size > 0)
return kmalloc(u32Size, GFP_ATOMIC);
- } else {
+ else
return NULL;
- }
-}
-
-/*!
- * @author syounan
- * @date 18 Aug 2010
- * @version 1.0
- */
-void *WILC_MemoryCalloc(u32 u32Size, tstrWILC_MemoryAttrs *strAttrs,
- char *pcFileName, u32 u32LineNo)
-{
- return kcalloc(u32Size, 1, GFP_KERNEL);
-}
-
-/*!
- * @author syounan
- * @date 18 Aug 2010
- * @version 1.0
- */
-void *WILC_MemoryRealloc(void *pvOldBlock, u32 u32NewSize,
- tstrWILC_MemoryAttrs *strAttrs, char *pcFileName, u32 u32LineNo)
-{
- if (u32NewSize == 0) {
- kfree(pvOldBlock);
- return NULL;
- } else if (pvOldBlock == NULL) {
- return kmalloc(u32NewSize, GFP_KERNEL);
- } else {
- return krealloc(pvOldBlock, u32NewSize, GFP_KERNEL);
- }
-
-}
-
-/*!
- * @author syounan
- * @date 18 Aug 2010
- * @version 1.0
- */
-void WILC_MemoryFree(const void *pvBlock, tstrWILC_MemoryAttrs *strAttrs,
- char *pcFileName, u32 u32LineNo)
-{
- kfree(pvBlock);
}
diff --git a/drivers/staging/wilc1000/wilc_memory.h b/drivers/staging/wilc1000/wilc_memory.h
index 372d7053e873..f19cec11a69a 100644
--- a/drivers/staging/wilc1000/wilc_memory.h
+++ b/drivers/staging/wilc1000/wilc_memory.h
@@ -38,8 +38,6 @@ typedef struct {
* @sa sttrWILC_MemoryAttrs
* @sa WILC_MALLOC
* @sa WILC_MALLOC_EX
- * @sa WILC_NEW
- * @sa WILC_NEW_EX
* @author syounan
* @date 16 Aug 2010
* @version 1.0
@@ -48,140 +46,12 @@ void *WILC_MemoryAlloc(u32 u32Size, tstrWILC_MemoryAttrs *strAttrs,
char *pcFileName, u32 u32LineNo);
/*!
- * @brief Allocates a given size of bytes and zero filling it
- * @param[in] u32Size size of memory in bytes to be allocated
- * @param[in] strAttrs Optional attributes, NULL for default
- * if not NULL, pAllocationPool should point to the pool to use for
- * this allocation. if NULL memory will be allocated directly from
- * the system
- * @param[in] pcFileName file name of the calling code for debugging
- * @param[in] u32LineNo line number of the calling code for debugging
- * @return The new allocated block, NULL if allocation fails
- * @note It is recommended to use of of the wrapper macros instead of
- * calling this function directly
- * @sa sttrWILC_MemoryAttrs
- * @sa WILC_CALLOC
- * @sa WILC_CALLOC_EX
- * @sa WILC_NEW_0
- * @sa WILC_NEW_0_EX
- * @author syounan
- * @date 16 Aug 2010
- * @version 1.0
- */
-void *WILC_MemoryCalloc(u32 u32Size, tstrWILC_MemoryAttrs *strAttrs,
- char *pcFileName, u32 u32LineNo);
-
-/*!
- * @brief Reallocates a given block to a new size
- * @param[in] pvOldBlock the old memory block, if NULL then this function
- * behaves as a new allocation function
- * @param[in] u32NewSize size of the new memory block in bytes, if zero then
- * this function behaves as a free function
- * @param[in] strAttrs Optional attributes, NULL for default
- * if pAllocationPool!=NULL and pvOldBlock==NULL, pAllocationPool
- * should point to the pool to use for this allocation.
- * if pAllocationPool==NULL and pvOldBlock==NULL memory will be
- * allocated directly from the system
- * if and pvOldBlock!=NULL, pAllocationPool will not be inspected
- * and reallocation is done from the same pool as the original block
- * @param[in] pcFileName file name of the calling code for debugging
- * @param[in] u32LineNo line number of the calling code for debugging
- * @return The new allocated block, possibly same as pvOldBlock
- * @note It is recommended to use of of the wrapper macros instead of
- * calling this function directly
- * @sa sttrWILC_MemoryAttrs
- * @sa WILC_REALLOC
- * @sa WILC_REALLOC_EX
- * @author syounan
- * @date 16 Aug 2010
- * @version 1.0
- */
-void *WILC_MemoryRealloc(void *pvOldBlock, u32 u32NewSize,
- tstrWILC_MemoryAttrs *strAttrs, char *pcFileName, u32 u32LineNo);
-
-/*!
- * @brief Frees given block
- * @param[in] pvBlock the memory block to be freed
- * @param[in] strAttrs Optional attributes, NULL for default
- * @param[in] pcFileName file name of the calling code for debugging
- * @param[in] u32LineNo line number of the calling code for debugging
- * @note It is recommended to use of of the wrapper macros instead of
- * calling this function directly
- * @sa sttrWILC_MemoryAttrs
- * @sa WILC_FREE
- * @sa WILC_FREE_EX
- * @sa WILC_FREE_SET_NULL
- * @sa WILC_FREE_IF_TRUE
- * @author syounan
- * @date 16 Aug 2010
- * @version 1.0
- */
-void WILC_MemoryFree(const void *pvBlock, tstrWILC_MemoryAttrs *strAttrs,
- char *pcFileName, u32 u32LineNo);
-
-/*!
* @brief standrad malloc wrapper with custom attributes
*/
#define WILC_MALLOC_EX(__size__, __attrs__) \
(WILC_MemoryAlloc( \
(__size__), __attrs__, NULL, 0))
-/*!
- * @brief standrad calloc wrapper with custom attributes
- */
- #define WILC_CALLOC_EX(__size__, __attrs__) \
- (WILC_MemoryCalloc( \
- (__size__), __attrs__, NULL, 0))
-
-/*!
- * @brief standrad realloc wrapper with custom attributes
- */
- #define WILC_REALLOC_EX(__ptr__, __new_size__, __attrs__) \
- (WILC_MemoryRealloc( \
- (__ptr__), (__new_size__), __attrs__, NULL, 0))
-/*!
- * @brief standrad free wrapper with custom attributes
- */
- #define WILC_FREE_EX(__ptr__, __attrs__) \
- (WILC_MemoryFree( \
- (__ptr__), __attrs__, NULL, 0))
-
-/*!
- * @brief Allocates a block (with custom attributes) of given type and number of
- * elements
- */
-#define WILC_NEW_EX(__struct_type__, __n_structs__, __attrs__) \
- ((__struct_type__ *)WILC_MALLOC_EX( \
- sizeof(__struct_type__) * (u32)(__n_structs__), __attrs__))
-
-/*!
- * @brief Allocates a block (with custom attributes) of given type and number of
- * elements and Zero-fills it
- */
-#define WILC_NEW_0_EX(__struct_type__, __n_structs__, __attrs__) \
- ((__struct_type__ *)WILC_CALLOC_EX( \
- sizeof(__struct_type__) * (u32)(__n_structs__), __attrs__))
-
-/*!
- * @brief Frees a block (with custom attributes), also setting the original pointer
- * to NULL
- */
-#define WILC_FREE_SET_NULL_EX(__ptr__, __attrs__) do { \
- if (__ptr__ != NULL) { \
- WILC_FREE_EX(__ptr__, __attrs__); \
- __ptr__ = NULL; \
- } \
-} while (0)
-
-/*!
- * @brief Frees a block (with custom attributes) if the pointer expression evaluates
- * to true
- */
-#define WILC_FREE_IF_TRUE_EX(__ptr__, __attrs__) do { \
- if (__ptr__ != NULL) { \
- WILC_FREE_EX(__ptr__, __attrs__); \
- } \
-} while (0)
/*!
* @brief standrad malloc wrapper with default attributes
@@ -189,51 +59,8 @@ void WILC_MemoryFree(const void *pvBlock, tstrWILC_MemoryAttrs *strAttrs,
#define WILC_MALLOC(__size__) \
WILC_MALLOC_EX(__size__, NULL)
-/*!
- * @brief standrad calloc wrapper with default attributes
- */
-#define WILC_CALLOC(__size__) \
- WILC_CALLOC_EX(__size__, NULL)
-
-/*!
- * @brief standrad realloc wrapper with default attributes
- */
-#define WILC_REALLOC(__ptr__, __new_size__) \
- WILC_REALLOC_EX(__ptr__, __new_size__, NULL)
-/*!
- * @brief standrad free wrapper with default attributes
- */
-#define WILC_FREE(__ptr__) \
- WILC_FREE_EX(__ptr__, NULL)
-/*!
- * @brief Allocates a block (with default attributes) of given type and number of
- * elements
- */
-#define WILC_NEW(__struct_type__, __n_structs__) \
- WILC_NEW_EX(__struct_type__, __n_structs__, NULL)
-
-/*!
- * @brief Allocates a block (with default attributes) of given type and number of
- * elements and Zero-fills it
- */
-#define WILC_NEW_0(__struct_type__, __n_structs__) \
- WILC_NEW_O_EX(__struct_type__, __n_structs__, NULL)
-
-/*!
- * @brief Frees a block (with default attributes), also setting the original pointer
- * to NULL
- */
-#define WILC_FREE_SET_NULL(__ptr__) \
- WILC_FREE_SET_NULL_EX(__ptr__, NULL)
-
-/*!
- * @brief Frees a block (with default attributes) if the pointer expression evaluates
- * to true
- */
-#define WILC_FREE_IF_TRUE(__ptr__) \
- WILC_FREE_IF_TRUE_EX(__ptr__, NULL)
#endif
diff --git a/drivers/staging/wilc1000/wilc_msgqueue.c b/drivers/staging/wilc1000/wilc_msgqueue.c
index 16bcef4b5c00..70e4fa6a07a6 100644
--- a/drivers/staging/wilc1000/wilc_msgqueue.c
+++ b/drivers/staging/wilc1000/wilc_msgqueue.c
@@ -8,8 +8,7 @@
* @note copied from FLO glue implementatuion
* @version 1.0
*/
-WILC_ErrNo WILC_MsgQueueCreate(WILC_MsgQueueHandle *pHandle,
- tstrWILC_MsgQueueAttrs *pstrAttrs)
+WILC_ErrNo WILC_MsgQueueCreate(WILC_MsgQueueHandle *pHandle)
{
spin_lock_init(&pHandle->strCriticalSection);
sema_init(&pHandle->hSem, 0);
@@ -25,8 +24,7 @@ WILC_ErrNo WILC_MsgQueueCreate(WILC_MsgQueueHandle *pHandle,
* @note copied from FLO glue implementatuion
* @version 1.0
*/
-WILC_ErrNo WILC_MsgQueueDestroy(WILC_MsgQueueHandle *pHandle,
- tstrWILC_MsgQueueAttrs *pstrAttrs)
+WILC_ErrNo WILC_MsgQueueDestroy(WILC_MsgQueueHandle *pHandle)
{
pHandle->bExiting = true;
@@ -39,7 +37,7 @@ WILC_ErrNo WILC_MsgQueueDestroy(WILC_MsgQueueHandle *pHandle,
while (pHandle->pstrMessageList != NULL) {
Message *pstrMessge = pHandle->pstrMessageList->pstrNext;
- WILC_FREE(pHandle->pstrMessageList);
+ kfree(pHandle->pstrMessageList);
pHandle->pstrMessageList = pstrMessge;
}
@@ -53,8 +51,7 @@ WILC_ErrNo WILC_MsgQueueDestroy(WILC_MsgQueueHandle *pHandle,
* @version 1.0
*/
WILC_ErrNo WILC_MsgQueueSend(WILC_MsgQueueHandle *pHandle,
- const void *pvSendBuffer, u32 u32SendBufferSize,
- tstrWILC_MsgQueueAttrs *pstrAttrs)
+ const void *pvSendBuffer, u32 u32SendBufferSize)
{
WILC_ErrNo s32RetStatus = WILC_SUCCESS;
unsigned long flags;
@@ -71,13 +68,13 @@ WILC_ErrNo WILC_MsgQueueSend(WILC_MsgQueueHandle *pHandle,
spin_lock_irqsave(&pHandle->strCriticalSection, flags);
/* construct a new message */
- pstrMessage = WILC_NEW(Message, 1);
+ pstrMessage = kmalloc(sizeof(Message), GFP_ATOMIC);
WILC_NULLCHECK(s32RetStatus, pstrMessage);
pstrMessage->u32Length = u32SendBufferSize;
pstrMessage->pstrNext = NULL;
pstrMessage->pvBuffer = WILC_MALLOC(u32SendBufferSize);
WILC_NULLCHECK(s32RetStatus, pstrMessage->pvBuffer);
- WILC_memcpy(pstrMessage->pvBuffer, pvSendBuffer, u32SendBufferSize);
+ memcpy(pstrMessage->pvBuffer, pvSendBuffer, u32SendBufferSize);
/* add it to the message queue */
@@ -100,9 +97,9 @@ WILC_ErrNo WILC_MsgQueueSend(WILC_MsgQueueHandle *pHandle,
/* error occured, free any allocations */
if (pstrMessage != NULL) {
if (pstrMessage->pvBuffer != NULL) {
- WILC_FREE(pstrMessage->pvBuffer);
+ kfree(pstrMessage->pvBuffer);
}
- WILC_FREE(pstrMessage);
+ kfree(pstrMessage);
}
}
@@ -119,8 +116,7 @@ WILC_ErrNo WILC_MsgQueueSend(WILC_MsgQueueHandle *pHandle,
*/
WILC_ErrNo WILC_MsgQueueRecv(WILC_MsgQueueHandle *pHandle,
void *pvRecvBuffer, u32 u32RecvBufferSize,
- u32 *pu32ReceivedLength,
- tstrWILC_MsgQueueAttrs *pstrAttrs)
+ u32 *pu32ReceivedLength)
{
Message *pstrMessage;
@@ -170,13 +166,13 @@ WILC_ErrNo WILC_MsgQueueRecv(WILC_MsgQueueHandle *pHandle,
/* consume the message */
pHandle->u32ReceiversCount--;
- WILC_memcpy(pvRecvBuffer, pstrMessage->pvBuffer, pstrMessage->u32Length);
+ memcpy(pvRecvBuffer, pstrMessage->pvBuffer, pstrMessage->u32Length);
*pu32ReceivedLength = pstrMessage->u32Length;
pHandle->pstrMessageList = pstrMessage->pstrNext;
- WILC_FREE(pstrMessage->pvBuffer);
- WILC_FREE(pstrMessage);
+ kfree(pstrMessage->pvBuffer);
+ kfree(pstrMessage);
spin_unlock_irqrestore(&pHandle->strCriticalSection, flags);
diff --git a/drivers/staging/wilc1000/wilc_msgqueue.h b/drivers/staging/wilc1000/wilc_msgqueue.h
index 35b10019eebd..ef1d2fa20c50 100644
--- a/drivers/staging/wilc1000/wilc_msgqueue.h
+++ b/drivers/staging/wilc1000/wilc_msgqueue.h
@@ -13,20 +13,6 @@
#include "wilc_platform.h"
#include "wilc_errorsupport.h"
#include "wilc_memory.h"
-#include "wilc_strutils.h"
-
-/*!
- * @struct tstrWILC_MsgQueueAttrs
- * @brief Message Queue API options
- * @author syounan
- * @date 30 Aug 2010
- * @version 1.0
- */
-typedef struct {
- /* a dummy member to avoid compiler errors*/
- u8 dummy;
-
-} tstrWILC_MsgQueueAttrs;
/*!
* @brief Creates a new Message queue
@@ -37,14 +23,11 @@ typedef struct {
* @param[in,out] pHandle handle to the message queue object
* @param[in] pstrAttrs Optional attributes, NULL for default
* @return Error code indicating sucess/failure
- * @sa tstrWILC_MsgQueueAttrs
* @author syounan
* @date 30 Aug 2010
* @version 1.0
*/
-WILC_ErrNo WILC_MsgQueueCreate(WILC_MsgQueueHandle *pHandle,
- tstrWILC_MsgQueueAttrs *pstrAttrs);
-
+WILC_ErrNo WILC_MsgQueueCreate(WILC_MsgQueueHandle *pHandle);
/*!
* @brief Sends a message
@@ -57,15 +40,12 @@ WILC_ErrNo WILC_MsgQueueCreate(WILC_MsgQueueHandle *pHandle,
* @param[in] u32SendBufferSize the size of the data to send
* @param[in] pstrAttrs Optional attributes, NULL for default
* @return Error code indicating sucess/failure
- * @sa tstrWILC_MsgQueueAttrs
* @author syounan
* @date 30 Aug 2010
* @version 1.0
*/
WILC_ErrNo WILC_MsgQueueSend(WILC_MsgQueueHandle *pHandle,
- const void *pvSendBuffer, u32 u32SendBufferSize,
- tstrWILC_MsgQueueAttrs *pstrAttrs);
-
+ const void *pvSendBuffer, u32 u32SendBufferSize);
/*!
* @brief Receives a message
@@ -79,30 +59,23 @@ WILC_ErrNo WILC_MsgQueueSend(WILC_MsgQueueHandle *pHandle,
* @param[out] pu32ReceivedLength the length of received data
* @param[in] pstrAttrs Optional attributes, NULL for default
* @return Error code indicating sucess/failure
- * @sa tstrWILC_MsgQueueAttrs
* @author syounan
* @date 30 Aug 2010
* @version 1.0
*/
WILC_ErrNo WILC_MsgQueueRecv(WILC_MsgQueueHandle *pHandle,
void *pvRecvBuffer, u32 u32RecvBufferSize,
- u32 *pu32ReceivedLength,
- tstrWILC_MsgQueueAttrs *pstrAttrs);
-
+ u32 *pu32ReceivedLength);
/*!
* @brief Destroys an existing Message queue
* @param[in] pHandle handle to the message queue object
* @param[in] pstrAttrs Optional attributes, NULL for default
* @return Error code indicating sucess/failure
- * @sa tstrWILC_MsgQueueAttrs
* @author syounan
* @date 30 Aug 2010
* @version 1.0
*/
-WILC_ErrNo WILC_MsgQueueDestroy(WILC_MsgQueueHandle *pHandle,
- tstrWILC_MsgQueueAttrs *pstrAttrs);
-
-
+WILC_ErrNo WILC_MsgQueueDestroy(WILC_MsgQueueHandle *pHandle);
#endif
diff --git a/drivers/staging/wilc1000/wilc_osconfig.h b/drivers/staging/wilc1000/wilc_osconfig.h
deleted file mode 100644
index f9c25140393e..000000000000
--- a/drivers/staging/wilc1000/wilc_osconfig.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* Logs options */
-#define WILC_LOGS_NOTHING 0
-#define WILC_LOGS_WARN 1
-#define WILC_LOGS_WARN_INFO 2
-#define WILC_LOGS_WARN_INFO_DBG 3
-#define WILC_LOGS_WARN_INFO_DBG_FN 4
-#define WILC_LOGS_ALL 5
-
-#define WILC_LOG_VERBOSITY_LEVEL WILC_LOGS_ALL
diff --git a/drivers/staging/wilc1000/wilc_oswrapper.h b/drivers/staging/wilc1000/wilc_oswrapper.h
index e97aa96006e0..cb483253e788 100644
--- a/drivers/staging/wilc1000/wilc_oswrapper.h
+++ b/drivers/staging/wilc1000/wilc_oswrapper.h
@@ -14,26 +14,14 @@
#define WILC_OSW_INTERFACE_VER 2
/* Os Configuration File */
-#include "wilc_osconfig.h"
#include "wilc_platform.h"
-/* Logging Functions */
-#include "wilc_log.h"
-
/* Error reporting and handling support */
#include "wilc_errorsupport.h"
-/* Sleep support */
-#include "wilc_sleep.h"
-
-/* Timer support */
-#include "wilc_timer.h"
-
/* Memory support */
#include "wilc_memory.h"
-/* String Utilities */
-#include "wilc_strutils.h"
/* Message Queue */
#include "wilc_msgqueue.h"
diff --git a/drivers/staging/wilc1000/wilc_platform.h b/drivers/staging/wilc1000/wilc_platform.h
index d03532cc3af2..1e56973f2f93 100644
--- a/drivers/staging/wilc1000/wilc_platform.h
+++ b/drivers/staging/wilc1000/wilc_platform.h
@@ -1,5 +1,5 @@
-#ifndef __WILC_platfrom_H__
-#define __WILC_platfrom_H__
+#ifndef __WILC_platform_H__
+#define __WILC_platform_H__
#include <linux/kthread.h>
#include <linux/semaphore.h>
@@ -16,10 +16,6 @@
* OS specific types
*******************************************************************/
-typedef struct timer_list WILC_TimerHandle;
-
-
-
/* Message Queue type is a structure */
typedef struct __Message_struct {
void *pvBuffer;
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index 897e47e317ff..5a18148a593e 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -10,17 +10,7 @@
#include "wilc_wlan_if.h"
#include "wilc_wlan.h"
-
-#ifdef WILC1000_SINGLE_TRANSFER
-#define WILC_SDIO_BLOCK_SIZE 256
-#else
- #if defined(PLAT_AML8726_M3) /* johnny */
- #define WILC_SDIO_BLOCK_SIZE 512
- #define MAX_SEG_SIZE (1 << 12) /* 4096 */
- #else
- #define WILC_SDIO_BLOCK_SIZE 512
- #endif
-#endif
+#define WILC_SDIO_BLOCK_SIZE 512
typedef struct {
void *os_context;
@@ -90,7 +80,6 @@ static int sdio_set_func0_csa_address_byte0(uint32_t adr)
{
sdio_cmd52_t cmd;
-
/**
* Review: BIG ENDIAN
**/
@@ -108,6 +97,7 @@ static int sdio_set_func0_csa_address_byte0(uint32_t adr)
_fail_:
return 0;
}
+
static int sdio_set_func0_block_size(uint32_t block_size)
{
sdio_cmd52_t cmd;
@@ -170,6 +160,7 @@ static int sdio_clear_int(void)
#ifndef WILC_SDIO_IRQ_GPIO
/* uint32_t sts; */
sdio_cmd52_t cmd;
+
cmd.read_write = 0;
cmd.function = 1;
cmd.raw = 0;
@@ -181,6 +172,7 @@ static int sdio_clear_int(void)
return cmd.data;
#else
uint32_t reg;
+
if (!sdio_read_reg(WILC_HOST_RX_CTRL_0, &reg)) {
g_sdio.dPrint(N_ERR, "[wilc spi]: Failed read reg (%08x)...\n", WILC_HOST_RX_CTRL_0);
return 0;
@@ -197,6 +189,7 @@ uint32_t sdio_xfer_cnt(void)
{
uint32_t cnt = 0;
sdio_cmd52_t cmd;
+
cmd.read_write = 0;
cmd.function = 1;
cmd.raw = 0;
@@ -222,8 +215,6 @@ uint32_t sdio_xfer_cnt(void)
cnt |= (cmd.data << 16);
return cnt;
-
-
}
/********************************************
@@ -263,6 +254,7 @@ static int sdio_write_reg(uint32_t addr, uint32_t data)
if ((addr >= 0xf0) && (addr <= 0xff)) {
sdio_cmd52_t cmd;
+
cmd.read_write = 1;
cmd.function = 0;
cmd.raw = 0;
@@ -325,16 +317,6 @@ static int sdio_write(uint32_t addr, uint8_t *buf, uint32_t size)
cmd.function = 0;
cmd.address = 0x10f;
} else {
-#ifdef WILC1000_SINGLE_TRANSFER
- /**
- * has to be block aligned...
- **/
- nleft = size % block_size;
- if (nleft > 0) {
- size += block_size;
- size &= ~(block_size - 1);
- }
-#else
/**
* has to be word aligned...
**/
@@ -342,7 +324,6 @@ static int sdio_write(uint32_t addr, uint8_t *buf, uint32_t size)
size += 4;
size &= ~0x3;
}
-#endif
/**
* func 1 access
@@ -355,89 +336,6 @@ static int sdio_write(uint32_t addr, uint8_t *buf, uint32_t size)
nleft = size % block_size;
if (nblk > 0) {
-
-#if defined(PLAT_AML8726_M3_BACKUP) /* johnny */
- int i;
-
- for (i = 0; i < nblk; i++) {
- cmd.block_mode = 0; /* 1; */
- cmd.increment = 1;
- cmd.count = block_size; /* nblk; */
- cmd.buffer = buf;
- cmd.block_size = block_size;
- if (addr > 0) {
- if (!sdio_set_func0_csa_address(addr))
- goto _fail_;
- }
- if (!g_sdio.sdio_cmd53(&cmd)) {
- g_sdio.dPrint(N_ERR, "[wilc sdio]: Failed cmd53 [%x], block send...\n", addr);
- goto _fail_;
- }
-
- if (addr > 0)
- addr += block_size; /* addr += nblk*block_size; */
-
- buf += block_size; /* buf += nblk*block_size; */
- }
-
-#elif defined(PLAT_AML8726_M3) /* johnny */
-
- int i;
- int rest;
- int seg_cnt;
-
- seg_cnt = (nblk * block_size) / MAX_SEG_SIZE;
- rest = (nblk * block_size) & (MAX_SEG_SIZE - 1);
-
- for (i = 0; i < seg_cnt; i++) {
- cmd.block_mode = 1;
- cmd.increment = 1;
- cmd.count = MAX_SEG_SIZE / block_size;
- cmd.buffer = buf;
- cmd.block_size = block_size;
-
- if (addr > 0) {
- if (!sdio_set_func0_csa_address(addr))
- goto _fail_;
- }
- if (!g_sdio.sdio_cmd53(&cmd)) {
- g_sdio.dPrint(N_ERR, "[wilc sdio]: Failed cmd53 [%x], block send...\n", addr);
- goto _fail_;
- }
-
- if (addr > 0)
- addr += MAX_SEG_SIZE;
-
- buf += MAX_SEG_SIZE;
-
- }
-
-
- if (rest > 0) {
- cmd.block_mode = 1;
- cmd.increment = 1;
- cmd.count = rest / block_size;
- cmd.buffer = buf;
- cmd.block_size = block_size; /* johnny : prevent it from setting unexpected value */
-
- if (addr > 0) {
- if (!sdio_set_func0_csa_address(addr))
- goto _fail_;
- }
- if (!g_sdio.sdio_cmd53(&cmd)) {
- g_sdio.dPrint(N_ERR, "[wilc sdio]: Failed cmd53 [%x], bytes send...\n", addr);
- goto _fail_;
- }
-
- if (addr > 0)
- addr += rest;
-
- buf += rest;
-
- }
-
-#else
-
cmd.block_mode = 1;
cmd.increment = 1;
cmd.count = nblk;
@@ -454,11 +352,8 @@ static int sdio_write(uint32_t addr, uint8_t *buf, uint32_t size)
if (addr > 0)
addr += nblk * block_size;
buf += nblk * block_size;
-
-#endif /* platform */
}
-
if (nleft > 0) {
cmd.block_mode = 0;
cmd.increment = 1;
@@ -488,6 +383,7 @@ static int sdio_read_reg(uint32_t addr, uint32_t *data)
{
if ((addr >= 0xf0) && (addr <= 0xff)) {
sdio_cmd52_t cmd;
+
cmd.read_write = 0;
cmd.function = 0;
cmd.raw = 0;
@@ -552,16 +448,6 @@ static int sdio_read(uint32_t addr, uint8_t *buf, uint32_t size)
cmd.function = 0;
cmd.address = 0x10f;
} else {
-#ifdef WILC1000_SINGLE_TRANSFER
- /**
- * has to be block aligned...
- **/
- nleft = size % block_size;
- if (nleft > 0) {
- size += block_size;
- size &= ~(block_size - 1);
- }
-#else
/**
* has to be word aligned...
**/
@@ -569,7 +455,6 @@ static int sdio_read(uint32_t addr, uint8_t *buf, uint32_t size)
size += 4;
size &= ~0x3;
}
-#endif
/**
* func 1 access
@@ -582,89 +467,6 @@ static int sdio_read(uint32_t addr, uint8_t *buf, uint32_t size)
nleft = size % block_size;
if (nblk > 0) {
-
-#if defined(PLAT_AML8726_M3_BACKUP) /* johnny */
-
- int i;
-
- for (i = 0; i < nblk; i++) {
- cmd.block_mode = 0; /* 1; */
- cmd.increment = 1;
- cmd.count = block_size; /* nblk; */
- cmd.buffer = buf;
- cmd.block_size = block_size;
- if (addr > 0) {
- if (!sdio_set_func0_csa_address(addr))
- goto _fail_;
- }
- if (!g_sdio.sdio_cmd53(&cmd)) {
- g_sdio.dPrint(N_ERR, "[wilc sdio]: Failed cmd53 [%x], block read...\n", addr);
- goto _fail_;
- }
- if (addr > 0)
- addr += block_size; /* addr += nblk*block_size; */
- buf += block_size; /* buf += nblk*block_size; */
- }
-
-#elif defined(PLAT_AML8726_M3) /* johnny */
-
- int i;
- int rest;
- int seg_cnt;
-
- seg_cnt = (nblk * block_size) / MAX_SEG_SIZE;
- rest = (nblk * block_size) & (MAX_SEG_SIZE - 1);
-
- for (i = 0; i < seg_cnt; i++) {
- cmd.block_mode = 1;
- cmd.increment = 1;
- cmd.count = MAX_SEG_SIZE / block_size;
- cmd.buffer = buf;
- cmd.block_size = block_size;
-
-
- if (addr > 0) {
- if (!sdio_set_func0_csa_address(addr))
- goto _fail_;
- }
- if (!g_sdio.sdio_cmd53(&cmd)) {
- g_sdio.dPrint(N_ERR, "[wilc sdio]: Failed cmd53 [%x], block read...\n", addr);
- goto _fail_;
- }
-
- if (addr > 0)
- addr += MAX_SEG_SIZE;
-
- buf += MAX_SEG_SIZE;
-
- }
-
-
- if (rest > 0) {
- cmd.block_mode = 1;
- cmd.increment = 1;
- cmd.count = rest / block_size;
- cmd.buffer = buf;
- cmd.block_size = block_size; /* johnny : prevent it from setting unexpected value */
-
- if (addr > 0) {
- if (!sdio_set_func0_csa_address(addr))
- goto _fail_;
- }
- if (!g_sdio.sdio_cmd53(&cmd)) {
- g_sdio.dPrint(N_ERR, "[wilc sdio]: Failed cmd53 [%x], block read...\n", addr);
- goto _fail_;
- }
-
- if (addr > 0)
- addr += rest;
-
- buf += rest;
-
- }
-
-#else
-
cmd.block_mode = 1;
cmd.increment = 1;
cmd.count = nblk;
@@ -681,8 +483,6 @@ static int sdio_read(uint32_t addr, uint8_t *buf, uint32_t size)
if (addr > 0)
addr += nblk * block_size;
buf += nblk * block_size;
-
-#endif /* platform */
} /* if (nblk > 0) */
if (nleft > 0) {
@@ -784,6 +584,7 @@ static int sdio_init(wilc_wlan_inp_t *inp, wilc_debug_func func)
sdio_cmd52_t cmd;
int loop;
uint32_t chipid;
+
memset(&g_sdio, 0, sizeof(wilc_sdio_t));
g_sdio.dPrint = func;
@@ -891,14 +692,12 @@ static int sdio_init(wilc_wlan_inp_t *inp, wilc_debug_func func)
goto _fail_;
}
g_sdio.dPrint(N_ERR, "[wilc sdio]: chipid (%08x)\n", chipid);
- if ((chipid & 0xfff) > 0x2a0) {
+ if ((chipid & 0xfff) > 0x2a0)
g_sdio.has_thrpt_enh3 = 1;
- } else {
+ else
g_sdio.has_thrpt_enh3 = 0;
- }
g_sdio.dPrint(N_ERR, "[wilc sdio]: has_thrpt_enh3 = %d...\n", g_sdio.has_thrpt_enh3);
-
return 1;
_fail_:
@@ -925,23 +724,21 @@ static int sdio_read_size(uint32_t *size)
/**
* Read DMA count in words
**/
- {
- cmd.read_write = 0;
- cmd.function = 0;
- cmd.raw = 0;
- cmd.address = 0xf2;
- cmd.data = 0;
- g_sdio.sdio_cmd52(&cmd);
- tmp = cmd.data;
+ cmd.read_write = 0;
+ cmd.function = 0;
+ cmd.raw = 0;
+ cmd.address = 0xf2;
+ cmd.data = 0;
+ g_sdio.sdio_cmd52(&cmd);
+ tmp = cmd.data;
- /* cmd.read_write = 0; */
- /* cmd.function = 0; */
- /* cmd.raw = 0; */
- cmd.address = 0xf3;
- cmd.data = 0;
- g_sdio.sdio_cmd52(&cmd);
- tmp |= (cmd.data << 8);
- }
+ /* cmd.read_write = 0; */
+ /* cmd.function = 0; */
+ /* cmd.raw = 0; */
+ cmd.address = 0xf3;
+ cmd.data = 0;
+ g_sdio.sdio_cmd52(&cmd);
+ tmp |= (cmd.data << 8);
*size = tmp;
return 1;
@@ -966,26 +763,21 @@ static int sdio_read_int(uint32_t *int_status)
cmd.data = 0;
g_sdio.sdio_cmd52(&cmd);
- if (cmd.data & (1 << 0)) {
+ if (cmd.data & (1 << 0))
tmp |= INT_0;
- }
- if (cmd.data & (1 << 2)) {
+ if (cmd.data & (1 << 2))
tmp |= INT_1;
- }
- if (cmd.data & (1 << 3)) {
+ if (cmd.data & (1 << 3))
tmp |= INT_2;
- }
- if (cmd.data & (1 << 4)) {
+ if (cmd.data & (1 << 4))
tmp |= INT_3;
- }
- if (cmd.data & (1 << 5)) {
+ if (cmd.data & (1 << 5))
tmp |= INT_4;
- }
- if (cmd.data & (1 << 6)) {
+ if (cmd.data & (1 << 6))
tmp |= INT_5;
- }
{
int i;
+
for (i = g_sdio.nint; i < MAX_NUM_INT; i++) {
if ((tmp >> (IRG_FLAGS_OFFSET + i)) & 0x1) {
g_sdio.dPrint(N_ERR, "[wilc sdio]: Unexpected interrupt (1) : tmp=%x, data=%x\n", tmp, cmd.data);
@@ -1024,6 +816,7 @@ static int sdio_clear_int_ext(uint32_t val)
#ifdef WILC_SDIO_IRQ_GPIO
{
uint32_t flags;
+
flags = val & ((1 << MAX_NUN_INT_THRPT_ENH2) - 1);
reg = flags;
}
@@ -1041,6 +834,7 @@ static int sdio_clear_int_ext(uint32_t val)
reg |= (1 << 7);
if (reg) {
sdio_cmd52_t cmd;
+
cmd.read_write = 1;
cmd.function = 0;
cmd.raw = 0;
@@ -1060,6 +854,7 @@ static int sdio_clear_int_ext(uint32_t val)
/* see below. has_thrpt_enh2 uses register 0xf8 to clear interrupts. */
/* Cannot clear multiple interrupts. Must clear each interrupt individually */
uint32_t flags;
+
flags = val & ((1 << MAX_NUM_INT) - 1);
if (flags) {
int i;
@@ -1068,6 +863,7 @@ static int sdio_clear_int_ext(uint32_t val)
for (i = 0; i < g_sdio.nint; i++) {
if (flags & 1) {
sdio_cmd52_t cmd;
+
cmd.read_write = 1;
cmd.function = 0;
cmd.raw = 0;
@@ -1085,9 +881,8 @@ static int sdio_clear_int_ext(uint32_t val)
break;
flags >>= 1;
}
- if (!ret) {
+ if (!ret)
goto _fail_;
- }
for (i = g_sdio.nint; i < MAX_NUM_INT; i++) {
if (flags & 1)
g_sdio.dPrint(N_ERR, "[wilc sdio]: Unexpected interrupt cleared %d...\n", i);
@@ -1097,7 +892,6 @@ static int sdio_clear_int_ext(uint32_t val)
}
#endif /* WILC_SDIO_IRQ_GPIO */
-
{
uint32_t vmm_ctl;
@@ -1138,7 +932,6 @@ static int sdio_sync_ext(int nint /* how mant interrupts to enable. */)
{
uint32_t reg;
-
if (nint > MAX_NUM_INT) {
g_sdio.dPrint(N_ERR, "[wilc sdio]: Too many interupts (%d)...\n", nint);
return 0;
@@ -1148,7 +941,6 @@ static int sdio_sync_ext(int nint /* how mant interrupts to enable. */)
return 0;
}
-
g_sdio.nint = nint;
/**
@@ -1170,7 +962,6 @@ static int sdio_sync_ext(int nint /* how mant interrupts to enable. */)
uint32_t reg;
int ret, i;
-
/**
* interrupt pin mux select
**/
@@ -1195,9 +986,8 @@ static int sdio_sync_ext(int nint /* how mant interrupts to enable. */)
return 0;
}
- for (i = 0; (i < 5) && (nint > 0); i++, nint--) {
+ for (i = 0; (i < 5) && (nint > 0); i++, nint--)
reg |= (1 << (27 + i));
- }
ret = sdio_write_reg(WILC_INTR_ENABLE, reg);
if (!ret) {
g_sdio.dPrint(N_ERR, "[wilc sdio]: Failed write reg (%08x)...\n", WILC_INTR_ENABLE);
@@ -1210,9 +1000,8 @@ static int sdio_sync_ext(int nint /* how mant interrupts to enable. */)
return 0;
}
- for (i = 0; (i < 3) && (nint > 0); i++, nint--) {
+ for (i = 0; (i < 3) && (nint > 0); i++, nint--)
reg |= (1 << i);
- }
ret = sdio_read_reg(WILC_INTR2_ENABLE, &reg);
if (!ret) {
@@ -1225,7 +1014,6 @@ static int sdio_sync_ext(int nint /* how mant interrupts to enable. */)
return 1;
}
-
/********************************************
*
* Global sdio HIF function table
diff --git a/drivers/staging/wilc1000/wilc_sleep.c b/drivers/staging/wilc1000/wilc_sleep.c
deleted file mode 100644
index adab3cac64f9..000000000000
--- a/drivers/staging/wilc1000/wilc_sleep.c
+++ /dev/null
@@ -1,18 +0,0 @@
-
-#include "wilc_sleep.h"
-
-/*
- * @author mdaftedar
- * @date 10 Aug 2010
- * @version 1.0
- */
-void WILC_Sleep(u32 u32TimeMilliSec)
-{
- if (u32TimeMilliSec <= 4000000) {
- u32 u32Temp = u32TimeMilliSec * 1000;
- usleep_range(u32Temp, u32Temp);
- } else {
- msleep(u32TimeMilliSec);
- }
-
-}
diff --git a/drivers/staging/wilc1000/wilc_sleep.h b/drivers/staging/wilc1000/wilc_sleep.h
deleted file mode 100644
index cf9047f707a7..000000000000
--- a/drivers/staging/wilc1000/wilc_sleep.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __WILC_SLEEP_H__
-#define __WILC_SLEEP_H__
-
-#include <linux/types.h>
-#include <linux/delay.h>
-
-/*!
- * @brief forces the current thread to sleep until the given time has elapsed
- * @param[in] u32TimeMilliSec Time to sleep in Milli seconds
- * @sa WILC_SleepMicrosec
- * @author syounan
- * @date 10 Aug 2010
- * @version 1.0
- * @note This function offers a relatively innacurate and low resolution
- * sleep, for accurate high resolution sleep use u32TimeMicoSec
- */
-/* TODO: remove and open-code in callers */
-void WILC_Sleep(u32 u32TimeMilliSec);
-
-#endif
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index abea5df656d7..1bf7d314ae34 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -374,11 +374,10 @@ static int spi_cmd_complete(uint8_t cmd, uint32_t adr, uint8_t *b, uint32_t sz,
return result;
}
- if (!g_spi.crc_off) {
+ if (!g_spi.crc_off)
wb[len - 1] = (crc7(0x7f, (const uint8_t *)&wb[0], len - 1)) << 1;
- } else {
+ else
len -= 1;
- }
#define NUM_SKIP_BYTES (1)
#define NUM_RSP_BYTES (2)
@@ -522,11 +521,10 @@ static int spi_cmd_complete(uint8_t cmd, uint32_t adr, uint8_t *b, uint32_t sz,
if (sz > 0) {
int nbytes;
- if (sz <= (DATA_PKT_SZ - ix)) {
+ if (sz <= (DATA_PKT_SZ - ix))
nbytes = sz;
- } else {
+ else
nbytes = DATA_PKT_SZ - ix;
- }
/**
* Read bytes
@@ -557,11 +555,10 @@ static int spi_cmd_complete(uint8_t cmd, uint32_t adr, uint8_t *b, uint32_t sz,
while (sz > 0) {
int nbytes;
- if (sz <= DATA_PKT_SZ) {
+ if (sz <= DATA_PKT_SZ)
nbytes = sz;
- } else {
+ else
nbytes = DATA_PKT_SZ;
- }
/**
* read data response only on the next DMA cycles not
diff --git a/drivers/staging/wilc1000/wilc_strutils.c b/drivers/staging/wilc1000/wilc_strutils.c
deleted file mode 100644
index e0145953ceef..000000000000
--- a/drivers/staging/wilc1000/wilc_strutils.c
+++ /dev/null
@@ -1,80 +0,0 @@
-
-#define _CRT_SECURE_NO_DEPRECATE
-
-#include "wilc_strutils.h"
-
-
-/*!
- * @author syounan
- * @date 18 Aug 2010
- * @version 1.0
- */
-s32 WILC_memcmp(const void *pvArg1, const void *pvArg2, u32 u32Count)
-{
- return memcmp(pvArg1, pvArg2, u32Count);
-}
-
-
-/*!
- * @author syounan
- * @date 18 Aug 2010
- * @version 1.0
- */
-void WILC_memcpy_INTERNAL(void *pvTarget, const void *pvSource, u32 u32Count)
-{
- memcpy(pvTarget, pvSource, u32Count);
-}
-
-/*!
- * @author syounan
- * @date 18 Aug 2010
- * @version 1.0
- */
-void *WILC_memset(void *pvTarget, u8 u8SetValue, u32 u32Count)
-{
- return memset(pvTarget, u8SetValue, u32Count);
-}
-
-/*!
- * @author syounan
- * @date 18 Aug 2010
- * @version 1.0
- */
-char *WILC_strncpy(char *pcTarget, const char *pcSource,
- u32 u32Count)
-{
- return strncpy(pcTarget, pcSource, u32Count);
-}
-
-s32 WILC_strncmp(const char *pcStr1, const char *pcStr2,
- u32 u32Count)
-{
- s32 s32Result;
-
- if (pcStr1 == NULL && pcStr2 == NULL) {
- s32Result = 0;
- } else if (pcStr1 == NULL) {
- s32Result = -1;
- } else if (pcStr2 == NULL) {
- s32Result = 1;
- } else {
- s32Result = strncmp(pcStr1, pcStr2, u32Count);
- if (s32Result < 0) {
- s32Result = -1;
- } else if (s32Result > 0) {
- s32Result = 1;
- }
- }
-
- return s32Result;
-}
-
-/*!
- * @author syounan
- * @date 18 Aug 2010
- * @version 1.0
- */
-u32 WILC_strlen(const char *pcStr)
-{
- return (u32)strlen(pcStr);
-}
diff --git a/drivers/staging/wilc1000/wilc_strutils.h b/drivers/staging/wilc1000/wilc_strutils.h
deleted file mode 100644
index d1445575a25e..000000000000
--- a/drivers/staging/wilc1000/wilc_strutils.h
+++ /dev/null
@@ -1,134 +0,0 @@
-#ifndef __WILC_STRUTILS_H__
-#define __WILC_STRUTILS_H__
-
-/*!
- * @file wilc_strutils.h
- * @brief Basic string utilities
- * @author syounan
- * @sa wilc_oswrapper.h top level OS wrapper file
- * @date 16 Aug 2010
- * @version 1.0
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include "wilc_errorsupport.h"
-
-/*!
- * @brief Compares two memory buffers
- * @param[in] pvArg1 pointer to the first memory location
- * @param[in] pvArg2 pointer to the second memory location
- * @param[in] u32Count the size of the memory buffers
- * @return 0 if the 2 buffers are equal, 1 if pvArg1 is bigger than pvArg2,
- * -1 if pvArg1 smaller than pvArg2
- * @note this function repeats the functionality of standard memcmp
- * @author syounan
- * @date 18 Aug 2010
- * @version 1.0
- */
-s32 WILC_memcmp(const void *pvArg1, const void *pvArg2, u32 u32Count);
-
-/*!
- * @brief Internal implementation for memory copy
- * @param[in] pvTarget the target buffer to which the data is copied into
- * @param[in] pvSource pointer to the second memory location
- * @param[in] u32Count the size of the data to copy
- * @note this function should not be used directly, use WILC_memcpy instead
- * @author syounan
- * @date 18 Aug 2010
- * @version 1.0
- */
-void WILC_memcpy_INTERNAL(void *pvTarget, const void *pvSource, u32 u32Count);
-
-/*!
- * @brief Copies the contents of a memory buffer into another
- * @param[in] pvTarget the target buffer to which the data is copied into
- * @param[in] pvSource pointer to the second memory location
- * @param[in] u32Count the size of the data to copy
- * @return WILC_SUCCESS if copy is successfully handeled
- * WILC_FAIL if copy failed
- * @note this function repeats the functionality of standard memcpy,
- * however memcpy is undefined if the two buffers overlap but this
- * implementation will check for overlap and report error
- * @author syounan
- * @date 18 Aug 2010
- * @version 1.0
- */
-static WILC_ErrNo WILC_memcpy(void *pvTarget, const void *pvSource, u32 u32Count)
-{
- if (
- (((u8 *)pvTarget <= (u8 *)pvSource)
- && (((u8 *)pvTarget + u32Count) > (u8 *)pvSource))
-
- || (((u8 *)pvSource <= (u8 *)pvTarget)
- && (((u8 *)pvSource + u32Count) > (u8 *)pvTarget))
- ) {
- /* ovelapped memory, return Error */
- return WILC_FAIL;
- } else {
- WILC_memcpy_INTERNAL(pvTarget, pvSource, u32Count);
- return WILC_SUCCESS;
- }
-}
-
-/*!
- * @brief Sets the contents of a memory buffer with the given value
- * @param[in] pvTarget the target buffer which contsnts will be set
- * @param[in] u8SetValue the value to be used
- * @param[in] u32Count the size of the memory buffer
- * @return value of pvTarget
- * @note this function repeats the functionality of standard memset
- * @author syounan
- * @date 18 Aug 2010
- * @version 1.0
- */
-void *WILC_memset(void *pvTarget, u8 u8SetValue, u32 u32Count);
-
-/*!
- * @brief copies the contents of source string into the target string
- * @param[in] pcTarget the target string buffer
- * @param[in] pcSource the source string the will be copied
- * @param[in] u32Count copying will proceed until a null character in pcSource
- * is encountered or u32Count of bytes copied
- * @return value of pcTarget
- * @note this function repeats the functionality of standard strncpy
- * @author syounan
- * @date 18 Aug 2010
- * @version 1.0
- */
-char *WILC_strncpy(char *pcTarget, const char *pcSource,
- u32 u32Count);
-
-/*!
- * @brief Compares two strings up to u32Count characters
- * @details Compares 2 strings reporting which is bigger, NULL is considered
- * the smallest string, then a zero length string then all other
- * strings depending on thier ascii characters order with small case
- * converted to uppder case
- * @param[in] pcStr1 the first string, NULL is valid and considered smaller
- * than any other non-NULL string (incliding zero lenght strings)
- * @param[in] pcStr2 the second string, NULL is valid and considered smaller
- * than any other non-NULL string (incliding zero lenght strings)
- * @param[in] u32Count copying will proceed until a null character in pcStr1 or
- * pcStr2 is encountered or u32Count of bytes copied
- * @return 0 if the 2 strings are equal, 1 if pcStr1 is bigger than pcStr2,
- * -1 if pcStr1 smaller than pcStr2
- * @author aabozaeid
- * @date 7 Dec 2010
- * @version 1.0
- */
-s32 WILC_strncmp(const char *pcStr1, const char *pcStr2,
- u32 u32Count);
-
-/*!
- * @brief gets the length of a string
- * @param[in] pcStr the string
- * @return the length
- * @note this function repeats the functionality of standard strlen
- * @author syounan
- * @date 18 Aug 2010
- * @version 1.0
- */
-u32 WILC_strlen(const char *pcStr);
-
-#endif
diff --git a/drivers/staging/wilc1000/wilc_timer.c b/drivers/staging/wilc1000/wilc_timer.c
deleted file mode 100644
index dc71157f9c3e..000000000000
--- a/drivers/staging/wilc1000/wilc_timer.c
+++ /dev/null
@@ -1,45 +0,0 @@
-
-#include "wilc_timer.h"
-
-WILC_ErrNo WILC_TimerCreate(WILC_TimerHandle *pHandle,
- tpfWILC_TimerFunction pfCallback, tstrWILC_TimerAttrs *pstrAttrs)
-{
- WILC_ErrNo s32RetStatus = WILC_SUCCESS;
- setup_timer(pHandle, (void(*)(unsigned long))pfCallback, 0);
-
- return s32RetStatus;
-}
-
-WILC_ErrNo WILC_TimerDestroy(WILC_TimerHandle *pHandle,
- tstrWILC_TimerAttrs *pstrAttrs)
-{
- WILC_ErrNo s32RetStatus = WILC_FAIL;
- if (pHandle != NULL) {
- s32RetStatus = del_timer_sync(pHandle);
- pHandle = NULL;
- }
-
- return s32RetStatus;
-}
-
-
-WILC_ErrNo WILC_TimerStart(WILC_TimerHandle *pHandle, u32 u32Timeout,
- void *pvArg, tstrWILC_TimerAttrs *pstrAttrs)
-{
- WILC_ErrNo s32RetStatus = WILC_FAIL;
- if (pHandle != NULL) {
- pHandle->data = (unsigned long)pvArg;
- s32RetStatus = mod_timer(pHandle, (jiffies + msecs_to_jiffies(u32Timeout)));
- }
- return s32RetStatus;
-}
-
-WILC_ErrNo WILC_TimerStop(WILC_TimerHandle *pHandle,
- tstrWILC_TimerAttrs *pstrAttrs)
-{
- WILC_ErrNo s32RetStatus = WILC_FAIL;
- if (pHandle != NULL)
- s32RetStatus = del_timer(pHandle);
-
- return s32RetStatus;
-}
diff --git a/drivers/staging/wilc1000/wilc_timer.h b/drivers/staging/wilc1000/wilc_timer.h
deleted file mode 100644
index 931269db3194..000000000000
--- a/drivers/staging/wilc1000/wilc_timer.h
+++ /dev/null
@@ -1,129 +0,0 @@
-#ifndef __WILC_TIMER_H__
-#define __WILC_TIMER_H__
-
-/*!
- * @file wilc_timer.h
- * @brief Timer (One Shot and Periodic) OS wrapper functionality
- * @author syounan
- * @sa wilc_oswrapper.h top level OS wrapper file
- * @date 16 Aug 2010
- * @version 1.0
- */
-
-#include "wilc_platform.h"
-#include "wilc_errorsupport.h"
-
-typedef void (*tpfWILC_TimerFunction)(void *);
-
-/*!
- * @struct tstrWILC_TimerAttrs
- * @brief Timer API options
- * @author syounan
- * @date 16 Aug 2010
- * @version 1.0
- */
-typedef struct {
- /* a dummy member to avoid compiler errors*/
- u8 dummy;
-} tstrWILC_TimerAttrs;
-
-/*!
- * @brief Creates a new timer
- * @details Timers are a useful utility to execute some callback function
- * in the future.
- * A timer object has 3 states : IDLE, PENDING and EXECUTING
- * IDLE : initial timer state after creation, no execution for the
- * callback function is planned
- * PENDING : a request to execute the callback function is made
- * using WILC_TimerStart.
- * EXECUTING : the timer has expired and its callback is now
- * executing, when execution is done the timer returns to PENDING
- * if the feature CONFIG_WILC_TIMER_PERIODIC is enabled and
- * the flag tstrWILC_TimerAttrs.bPeriodicTimer is set. otherwise the
- * timer will return to IDLE
- * @param[out] pHandle handle to the newly created timer object
- * @param[in] pfEntry pointer to the callback function to be called when the
- * timer expires
- * the underlaying OS may put many restrictions on what can be
- * called inside a timer's callback, as a general rule no blocking
- * operations (IO or semaphore Acquision) should be perfomred
- * It is recommended that the callback will be as short as possible
- * and only flags other threads to do the actual work
- * also it should be noted that the underlaying OS maynot give any
- * guarentees on which contect this callback will execute in
- * @param[in] pstrAttrs Optional attributes, NULL for default
- * @return Error code indicating sucess/failure
- * @sa WILC_TimerAttrs
- * @author syounan
- * @date 16 Aug 2010
- * @version 1.0
- */
-WILC_ErrNo WILC_TimerCreate(WILC_TimerHandle *pHandle,
- tpfWILC_TimerFunction pfCallback, tstrWILC_TimerAttrs *pstrAttrs);
-
-
-/*!
- * @brief Destroys a given timer
- * @details This will destroy a given timer freeing any resources used by it
- * if the timer was PENDING Then must be cancelled as well(i.e.
- * goes to IDLE, same effect as calling WILC_TimerCancel first)
- * if the timer was EXECUTING then the callback will be allowed to
- * finish first then all resources are freed
- * @param[in] pHandle handle to the timer object
- * @param[in] pstrAttrs Optional attributes, NULL for default
- * @return Error code indicating sucess/failure
- * @sa WILC_TimerAttrs
- * @author syounan
- * @date 16 Aug 2010
- * @version 1.0
- */
-WILC_ErrNo WILC_TimerDestroy(WILC_TimerHandle *pHandle,
- tstrWILC_TimerAttrs *pstrAttrs);
-
-/*!
- * @brief Starts a given timer
- * @details This function will move the timer to the PENDING state until the
- * given time expires (in msec) then the callback function will be
- * executed (timer in EXECUTING state) after execution is dene the
- * timer either goes to IDLE (if bPeriodicTimer==false) or
- * PENDING with same timeout value (if bPeriodicTimer==true)
- * @param[in] pHandle handle to the timer object
- * @param[in] u32Timeout timeout value in msec after witch the callback
- * function will be executed. Timeout value of 0 is not allowed for
- * periodic timers
- * @param[in] pstrAttrs Optional attributes, NULL for default,
- * set bPeriodicTimer to run this timer as a periodic timer
- * @return Error code indicating sucess/failure
- * @sa WILC_TimerAttrs
- * @author syounan
- * @date 16 Aug 2010
- * @version 1.0
- */
-WILC_ErrNo WILC_TimerStart(WILC_TimerHandle *pHandle, u32 u32Timeout, void *pvArg,
- tstrWILC_TimerAttrs *pstrAttrs);
-
-
-/*!
- * @brief Stops a given timer
- * @details This function will move the timer to the IDLE state cancelling
- * any sheduled callback execution.
- * if this function is called on a timer already in the IDLE state
- * it will have no effect.
- * if this function is called on a timer in EXECUTING state
- * (callback has already started) it will wait until executing is
- * done then move the timer to the IDLE state (which is trivial
- * work if the timer is non periodic)
- * @param[in] pHandle handle to the timer object
- * @param[in] pstrAttrs Optional attributes, NULL for default,
- * @return Error code indicating sucess/failure
- * @sa WILC_TimerAttrs
- * @author syounan
- * @date 16 Aug 2010
- * @version 1.0
- */
-WILC_ErrNo WILC_TimerStop(WILC_TimerHandle *pHandle,
- tstrWILC_TimerAttrs *pstrAttrs);
-
-
-
-#endif
diff --git a/drivers/staging/wilc1000/wilc_type.h b/drivers/staging/wilc1000/wilc_type.h
deleted file mode 100644
index 5f36e7f92cd1..000000000000
--- a/drivers/staging/wilc1000/wilc_type.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* ////////////////////////////////////////////////////////////////////////// */
-/* */
-/* Copyright (c) Atmel Corporation. All rights reserved. */
-/* */
-/* Module Name: wilc_type.h */
-/* */
-/* */
-/* //////////////////////////////////////////////////////////////////////////// */
-#ifndef WILC_TYPE_H
-#define WILC_TYPE_H
-
-/********************************************
- *
- * Type Defines
- *
- ********************************************/
-#ifdef WIN32
-typedef char int8_t;
-typedef short int16_t;
-typedef long int32_t;
-typedef unsigned char uint8_t;
-typedef unsigned short uint16_t;
-typedef unsigned long uint32_t;
-#else
-#ifdef _linux_
-/*typedef unsigned char uint8_t;
- * typedef unsigned short uint16_t;
- * typedef unsigned long uint32_t;*/
-#include <stdint.h>
-#else
-#include "wilc_oswrapper.h"
-#endif
-#endif
-#endif
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 92064db9eb05..a6edc973f636 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -22,7 +22,6 @@
#define IS_MGMT_STATUS_SUCCES 0x040
#define GET_PKT_OFFSET(a) (((a) >> 22) & 0x1ff)
-extern void linux_wlan_free(void *vp);
extern int linux_wlan_get_firmware(perInterface_wlan_t *p_nic);
extern void linux_wlan_unlock(void *vp);
extern u16 Set_machw_change_vir_if(bool bValue);
@@ -33,9 +32,9 @@ extern int mac_close(struct net_device *ndev);
tstrNetworkInfo astrLastScannedNtwrksShadow[MAX_NUM_SCANNED_NETWORKS_SHADOW];
u32 u32LastScannedNtwrksCountShadow;
#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
-WILC_TimerHandle hDuringIpTimer;
+struct timer_list hDuringIpTimer;
#endif
-WILC_TimerHandle hAgingTimer;
+struct timer_list hAgingTimer;
static u8 op_ifcs;
extern u8 u8ConnectedSSID[6];
@@ -112,7 +111,7 @@ u8 u8P2P_oui[] = {0x50, 0x6f, 0x9A, 0x09};
u8 u8P2Plocalrandom = 0x01;
u8 u8P2Precvrandom = 0x00;
u8 u8P2P_vendorspec[] = {0xdd, 0x05, 0x00, 0x08, 0x40, 0x03};
-bool bWilc_ie = false;
+bool bWilc_ie;
#endif
static struct ieee80211_supported_band WILC_WFI_band_2ghz = {
@@ -135,25 +134,23 @@ struct add_key_params g_add_ptk_key_params;
struct wilc_wfi_key g_key_ptk_params;
struct wilc_wfi_wep_key g_key_wep_params;
u8 g_flushing_in_progress;
-bool g_ptk_keys_saved = false;
-bool g_gtk_keys_saved = false;
-bool g_wep_keys_saved = false;
+bool g_ptk_keys_saved;
+bool g_gtk_keys_saved;
+bool g_wep_keys_saved;
#define AGING_TIME (9 * 1000)
#define duringIP_TIME 15000
void clear_shadow_scan(void *pUserVoid)
{
- struct WILC_WFI_priv *priv;
int i;
- priv = (struct WILC_WFI_priv *)pUserVoid;
if (op_ifcs == 0) {
- WILC_TimerDestroy(&hAgingTimer, NULL);
+ del_timer_sync(&hAgingTimer);
PRINT_INFO(CORECONFIG_DBG, "destroy aging timer\n");
for (i = 0; i < u32LastScannedNtwrksCountShadow; i++) {
if (astrLastScannedNtwrksShadow[u32LastScannedNtwrksCountShadow].pu8IEs != NULL) {
- WILC_FREE(astrLastScannedNtwrksShadow[i].pu8IEs);
+ kfree(astrLastScannedNtwrksShadow[i].pu8IEs);
astrLastScannedNtwrksShadow[u32LastScannedNtwrksCountShadow].pu8IEs = NULL;
}
@@ -204,7 +201,7 @@ void refresh_scan(void *pUserVoid, uint8_t all, bool bDirectScan)
channel = ieee80211_get_channel(wiphy, s32Freq);
rssi = get_rssi_avg(pstrNetworkInfo);
- if (WILC_memcmp("DIRECT-", pstrNetworkInfo->au8ssid, 7) || bDirectScan) {
+ if (memcmp("DIRECT-", pstrNetworkInfo->au8ssid, 7) || bDirectScan) {
bss = cfg80211_inform_bss(wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN, pstrNetworkInfo->au8bssid, pstrNetworkInfo->u64Tsf, pstrNetworkInfo->u16CapInfo,
pstrNetworkInfo->u16BeaconPeriod, (const u8 *)pstrNetworkInfo->pu8IEs,
(size_t)pstrNetworkInfo->u16IEsLen, (((s32)rssi) * 100), GFP_KERNEL);
@@ -219,9 +216,7 @@ void refresh_scan(void *pUserVoid, uint8_t all, bool bDirectScan)
void reset_shadow_found(void *pUserVoid)
{
- struct WILC_WFI_priv *priv;
int i;
- priv = (struct WILC_WFI_priv *)pUserVoid;
for (i = 0; i < u32LastScannedNtwrksCountShadow; i++) {
astrLastScannedNtwrksShadow[i].u8Found = 0;
@@ -230,28 +225,24 @@ void reset_shadow_found(void *pUserVoid)
void update_scan_time(void *pUserVoid)
{
- struct WILC_WFI_priv *priv;
int i;
- priv = (struct WILC_WFI_priv *)pUserVoid;
for (i = 0; i < u32LastScannedNtwrksCountShadow; i++) {
astrLastScannedNtwrksShadow[i].u32TimeRcvdInScan = jiffies;
}
}
-void remove_network_from_shadow(void *pUserVoid)
+static void remove_network_from_shadow(unsigned long arg)
{
- struct WILC_WFI_priv *priv;
unsigned long now = jiffies;
int i, j;
- priv = (struct WILC_WFI_priv *)pUserVoid;
for (i = 0; i < u32LastScannedNtwrksCountShadow; i++) {
if (time_after(now, astrLastScannedNtwrksShadow[i].u32TimeRcvdInScan + (unsigned long)(SCAN_RESULT_EXPIRE))) {
- PRINT_D(CFG80211_DBG, "Network expired in ScanShadow: %s \n", astrLastScannedNtwrksShadow[i].au8ssid);
+ PRINT_D(CFG80211_DBG, "Network expired in ScanShadow: %s\n", astrLastScannedNtwrksShadow[i].au8ssid);
if (astrLastScannedNtwrksShadow[i].pu8IEs != NULL) {
- WILC_FREE(astrLastScannedNtwrksShadow[i].pu8IEs);
+ kfree(astrLastScannedNtwrksShadow[i].pu8IEs);
astrLastScannedNtwrksShadow[i].pu8IEs = NULL;
}
@@ -265,14 +256,16 @@ void remove_network_from_shadow(void *pUserVoid)
}
PRINT_D(CFG80211_DBG, "Number of cached networks: %d\n", u32LastScannedNtwrksCountShadow);
- if (u32LastScannedNtwrksCountShadow != 0)
- WILC_TimerStart(&(hAgingTimer), AGING_TIME, pUserVoid, NULL);
- else
+ if (u32LastScannedNtwrksCountShadow != 0) {
+ hAgingTimer.data = arg;
+ mod_timer(&hAgingTimer, jiffies + msecs_to_jiffies(AGING_TIME));
+ } else {
PRINT_D(CFG80211_DBG, "No need to restart Aging timer\n");
+ }
}
#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
-void clear_duringIP(void *pUserVoid)
+static void clear_duringIP(unsigned long arg)
{
PRINT_D(GENERIC_DBG, "GO:IP Obtained , enable scan\n");
g_obtainingIP = false;
@@ -281,19 +274,18 @@ void clear_duringIP(void *pUserVoid)
int8_t is_network_in_shadow(tstrNetworkInfo *pstrNetworkInfo, void *pUserVoid)
{
- struct WILC_WFI_priv *priv;
int8_t state = -1;
int i;
- priv = (struct WILC_WFI_priv *)pUserVoid;
if (u32LastScannedNtwrksCountShadow == 0) {
PRINT_D(CFG80211_DBG, "Starting Aging timer\n");
- WILC_TimerStart(&(hAgingTimer), AGING_TIME, pUserVoid, NULL);
+ hAgingTimer.data = (unsigned long)pUserVoid;
+ mod_timer(&hAgingTimer, jiffies + msecs_to_jiffies(AGING_TIME));
state = -1;
} else {
/* Linear search for now */
for (i = 0; i < u32LastScannedNtwrksCountShadow; i++) {
- if (WILC_memcmp(astrLastScannedNtwrksShadow[i].au8bssid,
+ if (memcmp(astrLastScannedNtwrksShadow[i].au8bssid,
pstrNetworkInfo->au8bssid, 6) == 0) {
state = i;
break;
@@ -305,11 +297,9 @@ int8_t is_network_in_shadow(tstrNetworkInfo *pstrNetworkInfo, void *pUserVoid)
void add_network_to_shadow(tstrNetworkInfo *pstrNetworkInfo, void *pUserVoid, void *pJoinParams)
{
- struct WILC_WFI_priv *priv;
int8_t ap_found = is_network_in_shadow(pstrNetworkInfo, pUserVoid);
uint32_t ap_index = 0;
uint8_t rssi_index = 0;
- priv = (struct WILC_WFI_priv *)pUserVoid;
if (u32LastScannedNtwrksCountShadow >= MAX_NUM_SCANNED_NETWORKS_SHADOW) {
PRINT_D(CFG80211_DBG, "Shadow network reached its maximum limit\n");
@@ -334,10 +324,10 @@ void add_network_to_shadow(tstrNetworkInfo *pstrNetworkInfo, void *pUserVoid, vo
astrLastScannedNtwrksShadow[ap_index].u16CapInfo = pstrNetworkInfo->u16CapInfo;
astrLastScannedNtwrksShadow[ap_index].u8SsidLen = pstrNetworkInfo->u8SsidLen;
- WILC_memcpy(astrLastScannedNtwrksShadow[ap_index].au8ssid,
+ memcpy(astrLastScannedNtwrksShadow[ap_index].au8ssid,
pstrNetworkInfo->au8ssid, pstrNetworkInfo->u8SsidLen);
- WILC_memcpy(astrLastScannedNtwrksShadow[ap_index].au8bssid,
+ memcpy(astrLastScannedNtwrksShadow[ap_index].au8bssid,
pstrNetworkInfo->au8bssid, ETH_ALEN);
astrLastScannedNtwrksShadow[ap_index].u16BeaconPeriod = pstrNetworkInfo->u16BeaconPeriod;
@@ -347,10 +337,10 @@ void add_network_to_shadow(tstrNetworkInfo *pstrNetworkInfo, void *pUserVoid, vo
astrLastScannedNtwrksShadow[ap_index].u16IEsLen = pstrNetworkInfo->u16IEsLen;
astrLastScannedNtwrksShadow[ap_index].u64Tsf = pstrNetworkInfo->u64Tsf;
if (ap_found != -1)
- WILC_FREE(astrLastScannedNtwrksShadow[ap_index].pu8IEs);
+ kfree(astrLastScannedNtwrksShadow[ap_index].pu8IEs);
astrLastScannedNtwrksShadow[ap_index].pu8IEs =
- (u8 *)WILC_MALLOC(pstrNetworkInfo->u16IEsLen); /* will be deallocated by the WILC_WFI_CfgScan() function */
- WILC_memcpy(astrLastScannedNtwrksShadow[ap_index].pu8IEs,
+ WILC_MALLOC(pstrNetworkInfo->u16IEsLen); /* will be deallocated by the WILC_WFI_CfgScan() function */
+ memcpy(astrLastScannedNtwrksShadow[ap_index].pu8IEs,
pstrNetworkInfo->pu8IEs, pstrNetworkInfo->u16IEsLen);
astrLastScannedNtwrksShadow[ap_index].u32TimeRcvdInScan = jiffies;
@@ -406,7 +396,7 @@ static void CfgScanResult(tenuScanEvent enuScanEvent, tstrNetworkInfo *pstrNetwo
WILC_NULLCHECK(s32Error, channel);
PRINT_INFO(CFG80211_DBG, "Network Info:: CHANNEL Frequency: %d, RSSI: %d, CapabilityInfo: %d,"
- "BeaconPeriod: %d \n", channel->center_freq, (((s32)pstrNetworkInfo->s8rssi) * 100),
+ "BeaconPeriod: %d\n", channel->center_freq, (((s32)pstrNetworkInfo->s8rssi) * 100),
pstrNetworkInfo->u16CapInfo, pstrNetworkInfo->u16BeaconPeriod);
if (pstrNetworkInfo->bNewNetwork == true) {
@@ -426,7 +416,7 @@ static void CfgScanResult(tenuScanEvent enuScanEvent, tstrNetworkInfo *pstrNetwo
/*P2P peers are sent to WPA supplicant and added to shadow table*/
- if (!(WILC_memcmp("DIRECT-", pstrNetworkInfo->au8ssid, 7))) {
+ if (!(memcmp("DIRECT-", pstrNetworkInfo->au8ssid, 7))) {
bss = cfg80211_inform_bss(wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN, pstrNetworkInfo->au8bssid, pstrNetworkInfo->u64Tsf, pstrNetworkInfo->u16CapInfo,
pstrNetworkInfo->u16BeaconPeriod, (const u8 *)pstrNetworkInfo->pu8IEs,
(size_t)pstrNetworkInfo->u16IEsLen, (((s32)pstrNetworkInfo->s8rssi) * 100), GFP_KERNEL);
@@ -441,8 +431,8 @@ static void CfgScanResult(tenuScanEvent enuScanEvent, tstrNetworkInfo *pstrNetwo
u32 i;
/* So this network is discovered before, we'll just update its RSSI */
for (i = 0; i < priv->u32RcvdChCount; i++) {
- if (WILC_memcmp(astrLastScannedNtwrksShadow[i].au8bssid, pstrNetworkInfo->au8bssid, 6) == 0) {
- PRINT_D(CFG80211_DBG, "Update RSSI of %s \n", astrLastScannedNtwrksShadow[i].au8ssid);
+ if (memcmp(astrLastScannedNtwrksShadow[i].au8bssid, pstrNetworkInfo->au8bssid, 6) == 0) {
+ PRINT_D(CFG80211_DBG, "Update RSSI of %s\n", astrLastScannedNtwrksShadow[i].au8ssid);
astrLastScannedNtwrksShadow[i].s8rssi = pstrNetworkInfo->s8rssi;
astrLastScannedNtwrksShadow[i].u32TimeRcvdInScan = jiffies;
@@ -452,15 +442,14 @@ static void CfgScanResult(tenuScanEvent enuScanEvent, tstrNetworkInfo *pstrNetwo
}
}
} else if (enuScanEvent == SCAN_EVENT_DONE) {
- PRINT_D(CFG80211_DBG, "Scan Done[%p] \n", priv->dev);
- PRINT_D(CFG80211_DBG, "Refreshing Scan ... \n");
+ PRINT_D(CFG80211_DBG, "Scan Done[%p]\n", priv->dev);
+ PRINT_D(CFG80211_DBG, "Refreshing Scan ...\n");
refresh_scan(priv, 1, false);
- if (priv->u32RcvdChCount > 0) {
- PRINT_D(CFG80211_DBG, "%d Network(s) found \n", priv->u32RcvdChCount);
- } else {
- PRINT_D(CFG80211_DBG, "No networks found \n");
- }
+ if (priv->u32RcvdChCount > 0)
+ PRINT_D(CFG80211_DBG, "%d Network(s) found\n", priv->u32RcvdChCount);
+ else
+ PRINT_D(CFG80211_DBG, "No networks found\n");
down(&(priv->hSemScanReq));
@@ -477,7 +466,7 @@ static void CfgScanResult(tenuScanEvent enuScanEvent, tstrNetworkInfo *pstrNetwo
else if (enuScanEvent == SCAN_EVENT_ABORTED) {
down(&(priv->hSemScanReq));
- PRINT_D(CFG80211_DBG, "Scan Aborted \n");
+ PRINT_D(CFG80211_DBG, "Scan Aborted\n");
if (priv->pstrScanReq != NULL) {
update_scan_time(priv);
@@ -515,7 +504,7 @@ int WILC_WFI_Set_PMKSA(u8 *bssid, struct WILC_WFI_priv *priv)
for (i = 0; i < priv->pmkid_list.numpmkid; i++) {
- if (!WILC_memcmp(bssid, priv->pmkid_list.pmkidlist[i].bssid,
+ if (!memcmp(bssid, priv->pmkid_list.pmkidlist[i].bssid,
ETH_ALEN)) {
PRINT_D(CFG80211_DBG, "PMKID successful comparison");
@@ -586,7 +575,7 @@ static void CfgConnectResult(tenuConnDisconnEvent enuConnDisconnEvent,
* = SUCCESSFUL_STATUSCODE, while mac status is MAC_DISCONNECTED (which means something wrong happened) */
u16ConnectStatus = WLAN_STATUS_UNSPECIFIED_FAILURE;
linux_wlan_set_bssid(priv->dev, NullBssid);
- WILC_memset(u8ConnectedSSID, 0, ETH_ALEN);
+ memset(u8ConnectedSSID, 0, ETH_ALEN);
/*BugID_5457*/
/*Invalidate u8WLANChannel value on wlan0 disconnect*/
@@ -595,7 +584,7 @@ static void CfgConnectResult(tenuConnDisconnEvent enuConnDisconnEvent,
u8WLANChannel = INVALID_CHANNEL;
#endif
- PRINT_ER("Unspecified failure: Connection status %d : MAC status = %d \n", u16ConnectStatus, u8MacStatus);
+ PRINT_ER("Unspecified failure: Connection status %d : MAC status = %d\n", u16ConnectStatus, u8MacStatus);
}
if (u16ConnectStatus == WLAN_STATUS_SUCCESS) {
@@ -604,14 +593,14 @@ static void CfgConnectResult(tenuConnDisconnEvent enuConnDisconnEvent,
PRINT_INFO(CFG80211_DBG, "Connection Successful:: BSSID: %x%x%x%x%x%x\n", pstrConnectInfo->au8bssid[0],
pstrConnectInfo->au8bssid[1], pstrConnectInfo->au8bssid[2], pstrConnectInfo->au8bssid[3], pstrConnectInfo->au8bssid[4], pstrConnectInfo->au8bssid[5]);
- WILC_memcpy(priv->au8AssociatedBss, pstrConnectInfo->au8bssid, ETH_ALEN);
+ memcpy(priv->au8AssociatedBss, pstrConnectInfo->au8bssid, ETH_ALEN);
/* BugID_4209: if this network has expired in the scan results in the above nl80211 layer, refresh them here by calling
* cfg80211_inform_bss() with the last Scan results before calling cfg80211_connect_result() to avoid
* Linux kernel warning generated at the nl80211 layer */
for (i = 0; i < u32LastScannedNtwrksCountShadow; i++) {
- if (WILC_memcmp(astrLastScannedNtwrksShadow[i].au8bssid,
+ if (memcmp(astrLastScannedNtwrksShadow[i].au8bssid,
pstrConnectInfo->au8bssid, ETH_ALEN) == 0) {
unsigned long now = jiffies;
@@ -652,9 +641,9 @@ static void CfgConnectResult(tenuConnDisconnEvent enuConnDisconnEvent,
u8P2Plocalrandom = 0x01;
u8P2Precvrandom = 0x00;
bWilc_ie = false;
- WILC_memset(priv->au8AssociatedBss, 0, ETH_ALEN);
+ memset(priv->au8AssociatedBss, 0, ETH_ALEN);
linux_wlan_set_bssid(priv->dev, NullBssid);
- WILC_memset(u8ConnectedSSID, 0, ETH_ALEN);
+ memset(u8ConnectedSSID, 0, ETH_ALEN);
/*BugID_5457*/
/*Invalidate u8WLANChannel value on wlan0 disconnect*/
@@ -675,7 +664,8 @@ static void CfgConnectResult(tenuConnDisconnEvent enuConnDisconnEvent,
pstrDisconnectNotifInfo->u16reason = 1;
}
cfg80211_disconnected(dev, pstrDisconnectNotifInfo->u16reason, pstrDisconnectNotifInfo->ie,
- pstrDisconnectNotifInfo->ie_len, GFP_KERNEL);
+ pstrDisconnectNotifInfo->ie_len, false,
+ GFP_KERNEL);
}
@@ -747,7 +737,7 @@ static int WILC_WFI_CfgScan(struct wiphy *wiphy, struct cfg80211_scan_request *r
priv->u32RcvdChCount = 0;
- host_int_set_wfi_drv_handler((u32)priv->hWILCWFIDrv);
+ host_int_set_wfi_drv_handler(priv->hWILCWFIDrv);
reset_shadow_found(priv);
@@ -777,20 +767,20 @@ static int WILC_WFI_CfgScan(struct wiphy *wiphy, struct cfg80211_scan_request *r
if (request->ssids[i].ssid != NULL && request->ssids[i].ssid_len != 0) {
strHiddenNetwork.pstrHiddenNetworkInfo[i].pu8ssid = WILC_MALLOC(request->ssids[i].ssid_len);
- WILC_memcpy(strHiddenNetwork.pstrHiddenNetworkInfo[i].pu8ssid, request->ssids[i].ssid, request->ssids[i].ssid_len);
+ memcpy(strHiddenNetwork.pstrHiddenNetworkInfo[i].pu8ssid, request->ssids[i].ssid, request->ssids[i].ssid_len);
strHiddenNetwork.pstrHiddenNetworkInfo[i].u8ssidlen = request->ssids[i].ssid_len;
} else {
- PRINT_D(CFG80211_DBG, "Received one NULL SSID \n");
+ PRINT_D(CFG80211_DBG, "Received one NULL SSID\n");
strHiddenNetwork.u8ssidnum -= 1;
}
}
- PRINT_D(CFG80211_DBG, "Trigger Scan Request \n");
+ PRINT_D(CFG80211_DBG, "Trigger Scan Request\n");
s32Error = host_int_scan(priv->hWILCWFIDrv, USER_SCAN, ACTIVE_SCAN,
au8ScanChanList, request->n_channels,
(const u8 *)request->ie, request->ie_len,
CfgScanResult, (void *)priv, &strHiddenNetwork);
} else {
- PRINT_D(CFG80211_DBG, "Trigger Scan Request \n");
+ PRINT_D(CFG80211_DBG, "Trigger Scan Request\n");
s32Error = host_int_scan(priv->hWILCWFIDrv, USER_SCAN, ACTIVE_SCAN,
au8ScanChanList, request->n_channels,
(const u8 *)request->ie, request->ie_len,
@@ -799,7 +789,7 @@ static int WILC_WFI_CfgScan(struct wiphy *wiphy, struct cfg80211_scan_request *r
} else {
PRINT_ER("Requested num of scanned channels is greater than the max, supported"
- " channels \n");
+ " channels\n");
}
if (s32Error != WILC_SUCCESS) {
@@ -842,21 +832,21 @@ static int WILC_WFI_CfgConnect(struct wiphy *wiphy, struct net_device *dev,
priv = wiphy_priv(wiphy);
pstrWFIDrv = (tstrWILC_WFIDrv *)(priv->hWILCWFIDrv);
- host_int_set_wfi_drv_handler((u32)priv->hWILCWFIDrv);
+ host_int_set_wfi_drv_handler(priv->hWILCWFIDrv);
PRINT_D(CFG80211_DBG, "Connecting to SSID [%s] on netdev [%p] host if [%p]\n", sme->ssid, dev, priv->hWILCWFIDrv);
#ifdef WILC_P2P
- if (!(WILC_strncmp(sme->ssid, "DIRECT-", 7))) {
+ if (!(strncmp(sme->ssid, "DIRECT-", 7))) {
PRINT_D(CFG80211_DBG, "Connected to Direct network,OBSS disabled\n");
pstrWFIDrv->u8P2PConnect = 1;
} else
pstrWFIDrv->u8P2PConnect = 0;
#endif
- PRINT_INFO(CFG80211_DBG, "Required SSID = %s\n , AuthType = %d \n", sme->ssid, sme->auth_type);
+ PRINT_INFO(CFG80211_DBG, "Required SSID = %s\n , AuthType = %d\n", sme->ssid, sme->auth_type);
for (i = 0; i < u32LastScannedNtwrksCountShadow; i++) {
if ((sme->ssid_len == astrLastScannedNtwrksShadow[i].u8SsidLen) &&
- WILC_memcmp(astrLastScannedNtwrksShadow[i].au8ssid,
+ memcmp(astrLastScannedNtwrksShadow[i].au8ssid,
sme->ssid,
sme->ssid_len) == 0) {
PRINT_INFO(CFG80211_DBG, "Network with required SSID is found %s\n", sme->ssid);
@@ -868,7 +858,7 @@ static int WILC_WFI_CfgConnect(struct wiphy *wiphy, struct net_device *dev,
} else {
/* BSSID is also passed from the user, so decision of matching
* should consider also this passed BSSID */
- if (WILC_memcmp(astrLastScannedNtwrksShadow[i].au8bssid,
+ if (memcmp(astrLastScannedNtwrksShadow[i].au8bssid,
sme->bssid,
ETH_ALEN) == 0) {
PRINT_INFO(CFG80211_DBG, "BSSID is passed from the user and matched\n");
@@ -898,8 +888,8 @@ static int WILC_WFI_CfgConnect(struct wiphy *wiphy, struct net_device *dev,
}
priv->WILC_WFI_wep_default = 0;
- WILC_memset(priv->WILC_WFI_wep_key, 0, sizeof(priv->WILC_WFI_wep_key));
- WILC_memset(priv->WILC_WFI_wep_key_len, 0, sizeof(priv->WILC_WFI_wep_key_len));
+ memset(priv->WILC_WFI_wep_key, 0, sizeof(priv->WILC_WFI_wep_key));
+ memset(priv->WILC_WFI_wep_key_len, 0, sizeof(priv->WILC_WFI_wep_key_len));
PRINT_INFO(CFG80211_DBG, "sme->crypto.wpa_versions=%x\n", sme->crypto.wpa_versions);
PRINT_INFO(CFG80211_DBG, "sme->crypto.cipher_group=%x\n", sme->crypto.cipher_group);
@@ -928,7 +918,7 @@ static int WILC_WFI_CfgConnect(struct wiphy *wiphy, struct net_device *dev,
}
priv->WILC_WFI_wep_default = sme->key_idx;
priv->WILC_WFI_wep_key_len[sme->key_idx] = sme->key_len;
- WILC_memcpy(priv->WILC_WFI_wep_key[sme->key_idx], sme->key, sme->key_len);
+ memcpy(priv->WILC_WFI_wep_key[sme->key_idx], sme->key, sme->key_len);
/*BugID_5137*/
g_key_wep_params.key_len = sme->key_len;
@@ -946,7 +936,7 @@ static int WILC_WFI_CfgConnect(struct wiphy *wiphy, struct net_device *dev,
priv->WILC_WFI_wep_default = sme->key_idx;
priv->WILC_WFI_wep_key_len[sme->key_idx] = sme->key_len;
- WILC_memcpy(priv->WILC_WFI_wep_key[sme->key_idx], sme->key, sme->key_len);
+ memcpy(priv->WILC_WFI_wep_key[sme->key_idx], sme->key, sme->key_len);
/*BugID_5137*/
g_key_wep_params.key_len = sme->key_len;
@@ -1057,7 +1047,7 @@ static int WILC_WFI_CfgConnect(struct wiphy *wiphy, struct net_device *dev,
tenuAuth_type, pstrNetworkInfo->u8channel,
pstrNetworkInfo->pJoinParams);
if (s32Error != WILC_SUCCESS) {
- PRINT_ER("host_int_set_join_req(): Error(%d) \n", s32Error);
+ PRINT_ER("host_int_set_join_req(): Error(%d)\n", s32Error);
s32Error = -ENOENT;
goto done;
}
@@ -1162,7 +1152,7 @@ static int WILC_WFI_add_key(struct wiphy *wiphy, struct net_device *netdev, u8 k
priv->WILC_WFI_wep_default = key_index;
priv->WILC_WFI_wep_key_len[key_index] = params->key_len;
- WILC_memcpy(priv->WILC_WFI_wep_key[key_index], params->key, params->key_len);
+ memcpy(priv->WILC_WFI_wep_key[key_index], params->key, params->key_len);
PRINT_D(CFG80211_DBG, "Adding AP WEP Default key Idx = %d\n", key_index);
PRINT_D(CFG80211_DBG, "Adding AP WEP Key len= %d\n", params->key_len);
@@ -1181,10 +1171,10 @@ static int WILC_WFI_add_key(struct wiphy *wiphy, struct net_device *netdev, u8 k
break;
}
#endif
- if (WILC_memcmp(params->key, priv->WILC_WFI_wep_key[key_index], params->key_len)) {
+ if (memcmp(params->key, priv->WILC_WFI_wep_key[key_index], params->key_len)) {
priv->WILC_WFI_wep_default = key_index;
priv->WILC_WFI_wep_key_len[key_index] = params->key_len;
- WILC_memcpy(priv->WILC_WFI_wep_key[key_index], params->key, params->key_len);
+ memcpy(priv->WILC_WFI_wep_key[key_index], params->key, params->key_len);
PRINT_D(CFG80211_DBG, "Adding WEP Default key Idx = %d\n", key_index);
PRINT_D(CFG80211_DBG, "Adding WEP Key length = %d\n", params->key_len);
@@ -1203,21 +1193,20 @@ static int WILC_WFI_add_key(struct wiphy *wiphy, struct net_device *netdev, u8 k
if (priv->wdev->iftype == NL80211_IFTYPE_AP || priv->wdev->iftype == NL80211_IFTYPE_P2P_GO) {
if (priv->wilc_gtk[key_index] == NULL) {
- priv->wilc_gtk[key_index] = (struct wilc_wfi_key *)WILC_MALLOC(sizeof(struct wilc_wfi_key));
+ priv->wilc_gtk[key_index] = WILC_MALLOC(sizeof(struct wilc_wfi_key));
priv->wilc_gtk[key_index]->key = NULL;
priv->wilc_gtk[key_index]->seq = NULL;
}
if (priv->wilc_ptk[key_index] == NULL) {
- priv->wilc_ptk[key_index] = (struct wilc_wfi_key *)WILC_MALLOC(sizeof(struct wilc_wfi_key));
+ priv->wilc_ptk[key_index] = WILC_MALLOC(sizeof(struct wilc_wfi_key));
priv->wilc_ptk[key_index]->key = NULL;
priv->wilc_ptk[key_index]->seq = NULL;
}
- if (!pairwise)
- {
+ if (!pairwise) {
if (params->cipher == WLAN_CIPHER_SUITE_TKIP)
u8gmode = ENCRYPT_ENABLED | WPA | TKIP;
else
@@ -1233,18 +1222,18 @@ static int WILC_WFI_add_key(struct wiphy *wiphy, struct net_device *netdev, u8 k
}
/* if there has been previous allocation for the same index through its key, free that memory and allocate again*/
if (priv->wilc_gtk[key_index]->key)
- WILC_FREE(priv->wilc_gtk[key_index]->key);
+ kfree(priv->wilc_gtk[key_index]->key);
- priv->wilc_gtk[key_index]->key = (u8 *)WILC_MALLOC(params->key_len);
- WILC_memcpy(priv->wilc_gtk[key_index]->key, params->key, params->key_len);
+ priv->wilc_gtk[key_index]->key = WILC_MALLOC(params->key_len);
+ memcpy(priv->wilc_gtk[key_index]->key, params->key, params->key_len);
/* if there has been previous allocation for the same index through its seq, free that memory and allocate again*/
if (priv->wilc_gtk[key_index]->seq)
- WILC_FREE(priv->wilc_gtk[key_index]->seq);
+ kfree(priv->wilc_gtk[key_index]->seq);
if ((params->seq_len) > 0) {
- priv->wilc_gtk[key_index]->seq = (u8 *)WILC_MALLOC(params->seq_len);
- WILC_memcpy(priv->wilc_gtk[key_index]->seq, params->seq, params->seq_len);
+ priv->wilc_gtk[key_index]->seq = WILC_MALLOC(params->seq_len);
+ memcpy(priv->wilc_gtk[key_index]->seq, params->seq, params->seq_len);
}
priv->wilc_gtk[key_index]->cipher = params->cipher;
@@ -1279,15 +1268,15 @@ static int WILC_WFI_add_key(struct wiphy *wiphy, struct net_device *netdev, u8 k
}
if (priv->wilc_ptk[key_index]->key)
- WILC_FREE(priv->wilc_ptk[key_index]->key);
+ kfree(priv->wilc_ptk[key_index]->key);
- priv->wilc_ptk[key_index]->key = (u8 *)WILC_MALLOC(params->key_len);
+ priv->wilc_ptk[key_index]->key = WILC_MALLOC(params->key_len);
if (priv->wilc_ptk[key_index]->seq)
- WILC_FREE(priv->wilc_ptk[key_index]->seq);
+ kfree(priv->wilc_ptk[key_index]->seq);
if ((params->seq_len) > 0)
- priv->wilc_ptk[key_index]->seq = (u8 *)WILC_MALLOC(params->seq_len);
+ priv->wilc_ptk[key_index]->seq = WILC_MALLOC(params->seq_len);
if (INFO) {
for (i = 0; i < params->key_len; i++)
@@ -1297,10 +1286,10 @@ static int WILC_WFI_add_key(struct wiphy *wiphy, struct net_device *netdev, u8 k
PRINT_INFO(CFG80211_DBG, "Adding group seq value[%d] = %x\n", i, params->seq[i]);
}
- WILC_memcpy(priv->wilc_ptk[key_index]->key, params->key, params->key_len);
+ memcpy(priv->wilc_ptk[key_index]->key, params->key, params->key_len);
if ((params->seq_len) > 0)
- WILC_memcpy(priv->wilc_ptk[key_index]->seq, params->seq, params->seq_len);
+ memcpy(priv->wilc_ptk[key_index]->seq, params->seq, params->seq_len);
priv->wilc_ptk[key_index]->cipher = params->cipher;
priv->wilc_ptk[key_index]->key_len = params->key_len;
@@ -1315,8 +1304,7 @@ static int WILC_WFI_add_key(struct wiphy *wiphy, struct net_device *netdev, u8 k
{
u8mode = 0;
- if (!pairwise)
- {
+ if (!pairwise) {
if (params->key_len > 16 && params->cipher == WLAN_CIPHER_SUITE_TKIP) {
/* swap the tx mic by rx mic */
pu8RxMic = params->key + 24;
@@ -1437,7 +1425,7 @@ static int WILC_WFI_del_key(struct wiphy *wiphy, struct net_device *netdev,
/*Delete saved WEP keys params, if any*/
if (g_key_wep_params.key != NULL) {
- WILC_FREE(g_key_wep_params.key);
+ kfree(g_key_wep_params.key);
g_key_wep_params.key = NULL;
}
@@ -1448,16 +1436,16 @@ static int WILC_WFI_del_key(struct wiphy *wiphy, struct net_device *netdev,
if (priv->wilc_gtk[key_index]->key != NULL) {
- WILC_FREE(priv->wilc_gtk[key_index]->key);
+ kfree(priv->wilc_gtk[key_index]->key);
priv->wilc_gtk[key_index]->key = NULL;
}
if (priv->wilc_gtk[key_index]->seq) {
- WILC_FREE(priv->wilc_gtk[key_index]->seq);
+ kfree(priv->wilc_gtk[key_index]->seq);
priv->wilc_gtk[key_index]->seq = NULL;
}
- WILC_FREE(priv->wilc_gtk[key_index]);
+ kfree(priv->wilc_gtk[key_index]);
priv->wilc_gtk[key_index] = NULL;
}
@@ -1466,35 +1454,35 @@ static int WILC_WFI_del_key(struct wiphy *wiphy, struct net_device *netdev,
if (priv->wilc_ptk[key_index]->key) {
- WILC_FREE(priv->wilc_ptk[key_index]->key);
+ kfree(priv->wilc_ptk[key_index]->key);
priv->wilc_ptk[key_index]->key = NULL;
}
if (priv->wilc_ptk[key_index]->seq) {
- WILC_FREE(priv->wilc_ptk[key_index]->seq);
+ kfree(priv->wilc_ptk[key_index]->seq);
priv->wilc_ptk[key_index]->seq = NULL;
}
- WILC_FREE(priv->wilc_ptk[key_index]);
+ kfree(priv->wilc_ptk[key_index]);
priv->wilc_ptk[key_index] = NULL;
}
#endif
/*Delete saved PTK and GTK keys params, if any*/
if (g_key_ptk_params.key != NULL) {
- WILC_FREE(g_key_ptk_params.key);
+ kfree(g_key_ptk_params.key);
g_key_ptk_params.key = NULL;
}
if (g_key_ptk_params.seq != NULL) {
- WILC_FREE(g_key_ptk_params.seq);
+ kfree(g_key_ptk_params.seq);
g_key_ptk_params.seq = NULL;
}
if (g_key_gtk_params.key != NULL) {
- WILC_FREE(g_key_gtk_params.key);
+ kfree(g_key_gtk_params.key);
g_key_gtk_params.key = NULL;
}
if (g_key_gtk_params.seq != NULL) {
- WILC_FREE(g_key_gtk_params.seq);
+ kfree(g_key_gtk_params.seq);
g_key_gtk_params.seq = NULL;
}
@@ -1503,7 +1491,7 @@ static int WILC_WFI_del_key(struct wiphy *wiphy, struct net_device *netdev,
}
if (key_index >= 0 && key_index <= 3) {
- WILC_memset(priv->WILC_WFI_wep_key[key_index], 0, priv->WILC_WFI_wep_key_len[key_index]);
+ memset(priv->WILC_WFI_wep_key[key_index], 0, priv->WILC_WFI_wep_key_len[key_index]);
priv->WILC_WFI_wep_key_len[key_index] = 0;
PRINT_D(CFG80211_DBG, "Removing WEP key with index = %d\n", key_index);
@@ -1588,7 +1576,7 @@ static int WILC_WFI_set_default_key(struct wiphy *wiphy, struct net_device *netd
priv = wiphy_priv(wiphy);
- PRINT_D(CFG80211_DBG, "Setting default key with idx = %d \n", key_index);
+ PRINT_D(CFG80211_DBG, "Setting default key with idx = %d\n", key_index);
if (key_index != priv->WILC_WFI_wep_default) {
@@ -1633,7 +1621,6 @@ static int WILC_WFI_dump_survey(struct wiphy *wiphy, struct net_device *netdev,
* @version 1.0
*/
-extern uint32_t Statisitcs_totalAcks, Statisitcs_DroppedAcks;
static int WILC_WFI_get_station(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac, struct station_info *sinfo)
{
@@ -1689,7 +1676,7 @@ static int WILC_WFI_get_station(struct wiphy *wiphy, struct net_device *dev,
* kernel version 3.0.0
*/
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL) |
- BIT( NL80211_STA_INFO_RX_PACKETS) |
+ BIT(NL80211_STA_INFO_RX_PACKETS) |
BIT(NL80211_STA_INFO_TX_PACKETS) |
BIT(NL80211_STA_INFO_TX_FAILED) |
BIT(NL80211_STA_INFO_TX_BITRATE);
@@ -1701,11 +1688,10 @@ static int WILC_WFI_get_station(struct wiphy *wiphy, struct net_device *dev,
sinfo->txrate.legacy = strStatistics.u8LinkSpeed * 10;
#ifdef TCP_ENHANCEMENTS
- if ((strStatistics.u8LinkSpeed > TCP_ACK_FILTER_LINK_SPEED_THRESH) && (strStatistics.u8LinkSpeed != DEFAULT_LINK_SPEED)) {
+ if ((strStatistics.u8LinkSpeed > TCP_ACK_FILTER_LINK_SPEED_THRESH) && (strStatistics.u8LinkSpeed != DEFAULT_LINK_SPEED))
Enable_TCP_ACK_Filter(true);
- } else if (strStatistics.u8LinkSpeed != DEFAULT_LINK_SPEED) {
+ else if (strStatistics.u8LinkSpeed != DEFAULT_LINK_SPEED)
Enable_TCP_ACK_Filter(false);
- }
#endif
PRINT_D(CORECONFIG_DBG, "*** stats[%d][%d][%d][%d][%d]\n", sinfo->signal, sinfo->rx_packets, sinfo->tx_packets,
@@ -1826,7 +1812,7 @@ static int WILC_WFI_set_wiphy_params(struct wiphy *wiphy, u32 changed)
priv = wiphy_priv(wiphy);
pstrCfgParamVal.u32SetCfgFlag = 0;
- PRINT_D(CFG80211_DBG, "Setting Wiphy params \n");
+ PRINT_D(CFG80211_DBG, "Setting Wiphy params\n");
if (changed & WIPHY_PARAM_RETRY_SHORT) {
PRINT_D(CFG80211_DBG, "Setting WIPHY_PARAM_RETRY_SHORT %d\n",
@@ -1909,7 +1895,7 @@ static int WILC_WFI_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
for (i = 0; i < priv->pmkid_list.numpmkid; i++) {
- if (!WILC_memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid,
+ if (!memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid,
ETH_ALEN)) {
/*If bssid already exists and pmkid value needs to reset*/
flag = PMKID_FOUND;
@@ -1919,9 +1905,9 @@ static int WILC_WFI_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
}
if (i < WILC_MAX_NUM_PMKIDS) {
PRINT_D(CFG80211_DBG, "Setting PMKID in private structure\n");
- WILC_memcpy(priv->pmkid_list.pmkidlist[i].bssid, pmksa->bssid,
+ memcpy(priv->pmkid_list.pmkidlist[i].bssid, pmksa->bssid,
ETH_ALEN);
- WILC_memcpy(priv->pmkid_list.pmkidlist[i].pmkid, pmksa->pmkid,
+ memcpy(priv->pmkid_list.pmkidlist[i].pmkid, pmksa->pmkid,
PMKID_LEN);
if (!(flag == PMKID_FOUND))
priv->pmkid_list.numpmkid++;
@@ -1959,11 +1945,11 @@ static int WILC_WFI_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
PRINT_D(CFG80211_DBG, "Deleting PMKSA keys\n");
for (i = 0; i < priv->pmkid_list.numpmkid; i++) {
- if (!WILC_memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid,
+ if (!memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid,
ETH_ALEN)) {
/*If bssid is found, reset the values*/
PRINT_D(CFG80211_DBG, "Reseting PMKID values\n");
- WILC_memset(&priv->pmkid_list.pmkidlist[i], 0, sizeof(tstrHostIFpmkid));
+ memset(&priv->pmkid_list.pmkidlist[i], 0, sizeof(tstrHostIFpmkid));
flag = PMKID_FOUND;
break;
}
@@ -1971,10 +1957,10 @@ static int WILC_WFI_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
if (i < priv->pmkid_list.numpmkid && priv->pmkid_list.numpmkid > 0) {
for (; i < (priv->pmkid_list.numpmkid - 1); i++) {
- WILC_memcpy(priv->pmkid_list.pmkidlist[i].bssid,
+ memcpy(priv->pmkid_list.pmkidlist[i].bssid,
priv->pmkid_list.pmkidlist[i + 1].bssid,
ETH_ALEN);
- WILC_memcpy(priv->pmkid_list.pmkidlist[i].pmkid,
+ memcpy(priv->pmkid_list.pmkidlist[i].pmkid,
priv->pmkid_list.pmkidlist[i].pmkid,
PMKID_LEN);
}
@@ -2002,7 +1988,7 @@ static int WILC_WFI_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
PRINT_D(CFG80211_DBG, "Flushing PMKID key values\n");
/*Get cashed Pmkids and set all with zeros*/
- WILC_memset(&priv->pmkid_list, 0, sizeof(tstrHostIFpmkidAttr));
+ memset(&priv->pmkid_list, 0, sizeof(tstrHostIFpmkidAttr));
return 0;
}
@@ -2074,11 +2060,10 @@ void WILC_WFI_CfgParseRxAction(u8 *buf, u32 len)
}
#endif /* USE_SUPPLICANT_GO_INTENT */
- if (buf[index] == CHANLIST_ATTR_ID) {
+ if (buf[index] == CHANLIST_ATTR_ID)
channel_list_attr_index = index;
- } else if (buf[index] == OPERCHAN_ATTR_ID) {
+ else if (buf[index] == OPERCHAN_ATTR_ID)
op_channel_attr_index = index;
- }
index += buf[index + 1] + 3; /* ID,Length byte */
}
@@ -2160,11 +2145,10 @@ void WILC_WFI_CfgParseTxAction(u8 *buf, u32 len, bool bOperChan, u8 iftype)
}
#endif
- if (buf[index] == CHANLIST_ATTR_ID) {
+ if (buf[index] == CHANLIST_ATTR_ID)
channel_list_attr_index = index;
- } else if (buf[index] == OPERCHAN_ATTR_ID) {
+ else if (buf[index] == OPERCHAN_ATTR_ID)
op_channel_attr_index = index;
- }
index += buf[index + 1] + 3; /* ID,Length byte */
}
@@ -2218,7 +2202,7 @@ void WILC_WFI_p2p_rx (struct net_device *dev, uint8_t *buff, uint32_t size)
pstrWFIDrv = (tstrWILC_WFIDrv *)priv->hWILCWFIDrv;
/* Get WILC header */
- WILC_memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET);
+ memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET);
/* The packet offset field conain info about what type of managment frame */
/* we are dealing with and ack status */
@@ -2270,11 +2254,11 @@ void WILC_WFI_p2p_rx (struct net_device *dev, uint8_t *buff, uint32_t size)
case PUBLIC_ACT_VENDORSPEC:
/*Now we have a public action vendor specific action frame, check if its a p2p public action frame
* based on the standard its should have the p2p_oui attribute with the following values 50 6f 9A 09*/
- if (!WILC_memcmp(u8P2P_oui, &buff[ACTION_SUBTYPE_ID + 1], 4)) {
+ if (!memcmp(u8P2P_oui, &buff[ACTION_SUBTYPE_ID + 1], 4)) {
if ((buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP)) {
if (!bWilc_ie) {
for (i = P2P_PUB_ACTION_SUBTYPE; i < size; i++) {
- if (!WILC_memcmp(u8P2P_vendorspec, &buff[i], 6)) {
+ if (!memcmp(u8P2P_vendorspec, &buff[i], 6)) {
u8P2Precvrandom = buff[i + 6];
bWilc_ie = true;
PRINT_D(GENERIC_DBG, "WILC Vendor specific IE:%02x\n", u8P2Precvrandom);
@@ -2287,7 +2271,7 @@ void WILC_WFI_p2p_rx (struct net_device *dev, uint8_t *buff, uint32_t size)
if ((buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP
|| buff[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_REQ || buff[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_RSP)) {
for (i = P2P_PUB_ACTION_SUBTYPE + 2; i < size; i++) {
- if (buff[i] == P2PELEM_ATTR_ID && !(WILC_memcmp(u8P2P_oui, &buff[i + 2], 4))) {
+ if (buff[i] == P2PELEM_ATTR_ID && !(memcmp(u8P2P_oui, &buff[i + 2], 4))) {
WILC_WFI_CfgParseRxAction(&buff[i + 6], size - (i + 6));
break;
}
@@ -2351,7 +2335,7 @@ static void WILC_WFI_RemainOnChannelReady(void *pUserVoid)
struct WILC_WFI_priv *priv;
priv = (struct WILC_WFI_priv *)pUserVoid;
- PRINT_D(HOSTINF_DBG, "Remain on channel ready \n");
+ PRINT_D(HOSTINF_DBG, "Remain on channel ready\n");
priv->bInP2PlistenState = true;
@@ -2379,7 +2363,7 @@ static void WILC_WFI_RemainOnChannelExpired(void *pUserVoid, u32 u32SessionID)
/*BugID_5477*/
if (u32SessionID == priv->strRemainOnChanParams.u32ListenSessionID) {
- PRINT_D(GENERIC_DBG, "Remain on channel expired \n");
+ PRINT_D(GENERIC_DBG, "Remain on channel expired\n");
priv->bInP2PlistenState = false;
@@ -2485,7 +2469,7 @@ static int WILC_WFI_cancel_remain_on_channel(struct wiphy *wiphy,
*/
void WILC_WFI_add_wilcvendorspec(u8 *buff)
{
- WILC_memcpy(buff, u8P2P_vendorspec, sizeof(u8P2P_vendorspec));
+ memcpy(buff, u8P2P_vendorspec, sizeof(u8P2P_vendorspec));
}
/**
* @brief WILC_WFI_mgmt_tx_frame
@@ -2528,17 +2512,17 @@ int WILC_WFI_mgmt_tx(struct wiphy *wiphy,
if (ieee80211_is_mgmt(mgmt->frame_control)) {
/*mgmt frame allocation*/
- mgmt_tx = (struct p2p_mgmt_data *)WILC_MALLOC(sizeof(struct p2p_mgmt_data));
+ mgmt_tx = WILC_MALLOC(sizeof(struct p2p_mgmt_data));
if (mgmt_tx == NULL) {
PRINT_ER("Failed to allocate memory for mgmt_tx structure\n");
return WILC_FAIL;
}
- mgmt_tx->buff = (char *)WILC_MALLOC(buf_len);
+ mgmt_tx->buff = WILC_MALLOC(buf_len);
if (mgmt_tx->buff == NULL) {
PRINT_ER("Failed to allocate memory for mgmt_tx buff\n");
return WILC_FAIL;
}
- WILC_memcpy(mgmt_tx->buff, buf, len);
+ memcpy(mgmt_tx->buff, buf, len);
mgmt_tx->size = len;
@@ -2583,7 +2567,7 @@ int WILC_WFI_mgmt_tx(struct wiphy *wiphy,
{
/*Now we have a public action vendor specific action frame, check if its a p2p public action frame
* based on the standard its should have the p2p_oui attribute with the following values 50 6f 9A 09*/
- if (!WILC_memcmp(u8P2P_oui, &buf[ACTION_SUBTYPE_ID + 1], 4)) {
+ if (!memcmp(u8P2P_oui, &buf[ACTION_SUBTYPE_ID + 1], 4)) {
/*For the connection of two WILC's connection generate a rand number to determine who will be a GO*/
if ((buf[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP)) {
if (u8P2Plocalrandom == 1 && u8P2Precvrandom < u8P2Plocalrandom) {
@@ -2600,7 +2584,7 @@ int WILC_WFI_mgmt_tx(struct wiphy *wiphy,
/*Search for the p2p information information element , after the Public action subtype theres a byte for teh dialog token, skip that*/
for (i = P2P_PUB_ACTION_SUBTYPE + 2; i < len; i++) {
- if (buf[i] == P2PELEM_ATTR_ID && !(WILC_memcmp(u8P2P_oui, &buf[i + 2], 4))) {
+ if (buf[i] == P2PELEM_ATTR_ID && !(memcmp(u8P2P_oui, &buf[i + 2], 4))) {
if (buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_RSP)
WILC_WFI_CfgParseTxAction(&mgmt_tx->buff[i + 6], len - (i + 6), true, nic->iftype);
@@ -2862,7 +2846,7 @@ static int WILC_WFI_change_virt_intf(struct wiphy *wiphy, struct net_device *dev
#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
g_obtainingIP = false;
- WILC_TimerStop(&hDuringIpTimer, NULL);
+ del_timer(&hDuringIpTimer);
PRINT_D(GENERIC_DBG, "Changing virtual interface, enable scan\n");
#endif
/*BugID_5137*/
@@ -2886,7 +2870,6 @@ static int WILC_WFI_change_virt_intf(struct wiphy *wiphy, struct net_device *dev
/*Remove the enteries of the previously connected clients*/
memset(priv->assoc_stainfo.au8Sta_AssociatedBss, 0, MAX_NUM_STA * ETH_ALEN);
- #ifndef SIMULATION
#ifdef WILC_P2P
interface_type = nic->iftype;
nic->iftype = STATION_MODE;
@@ -2907,15 +2890,15 @@ static int WILC_WFI_change_virt_intf(struct wiphy *wiphy, struct net_device *dev
/*Setting interface 1 drv handler and mac address in newly downloaded FW*/
host_int_set_wfi_drv_handler(g_linux_wlan->strInterfaceInfo[0].drvHandler);
- host_int_set_MacAddress((WILC_WFIDrvHandle)(g_linux_wlan->strInterfaceInfo[0].drvHandler),
+ host_int_set_MacAddress(g_linux_wlan->strInterfaceInfo[0].drvHandler,
g_linux_wlan->strInterfaceInfo[0].aSrcAddress);
host_int_set_operation_mode(priv->hWILCWFIDrv, STATION_MODE);
/*Add saved WEP keys, if any*/
if (g_wep_keys_saved) {
- host_int_set_WEPDefaultKeyID((WILC_WFIDrvHandle)(g_linux_wlan->strInterfaceInfo[0].drvHandler),
+ host_int_set_WEPDefaultKeyID(g_linux_wlan->strInterfaceInfo[0].drvHandler,
g_key_wep_params.key_idx);
- host_int_add_wep_key_bss_sta((WILC_WFIDrvHandle)(g_linux_wlan->strInterfaceInfo[0].drvHandler),
+ host_int_add_wep_key_bss_sta(g_linux_wlan->strInterfaceInfo[0].drvHandler,
g_key_wep_params.key,
g_key_wep_params.key_len,
g_key_wep_params.key_idx);
@@ -2964,7 +2947,6 @@ static int WILC_WFI_change_virt_intf(struct wiphy *wiphy, struct net_device *dev
host_int_set_power_mgmt(priv->hWILCWFIDrv, 1, 0);
}
#endif
- #endif
break;
case NL80211_IFTYPE_P2P_CLIENT:
@@ -2979,7 +2961,6 @@ static int WILC_WFI_change_virt_intf(struct wiphy *wiphy, struct net_device *dev
priv->wdev->iftype = type;
nic->monitor_flag = 0;
- #ifndef SIMULATION
#ifdef WILC_P2P
PRINT_D(HOSTAPD_DBG, "Downloading P2P_CONCURRENCY_FIRMWARE\n");
@@ -2995,15 +2976,15 @@ static int WILC_WFI_change_virt_intf(struct wiphy *wiphy, struct net_device *dev
g_wilc_initialized = 1;
host_int_set_wfi_drv_handler(g_linux_wlan->strInterfaceInfo[0].drvHandler);
- host_int_set_MacAddress((WILC_WFIDrvHandle)(g_linux_wlan->strInterfaceInfo[0].drvHandler),
+ host_int_set_MacAddress(g_linux_wlan->strInterfaceInfo[0].drvHandler,
g_linux_wlan->strInterfaceInfo[0].aSrcAddress);
host_int_set_operation_mode(priv->hWILCWFIDrv, STATION_MODE);
/*Add saved WEP keys, if any*/
if (g_wep_keys_saved) {
- host_int_set_WEPDefaultKeyID((WILC_WFIDrvHandle)(g_linux_wlan->strInterfaceInfo[0].drvHandler),
+ host_int_set_WEPDefaultKeyID(g_linux_wlan->strInterfaceInfo[0].drvHandler,
g_key_wep_params.key_idx);
- host_int_add_wep_key_bss_sta((WILC_WFIDrvHandle)(g_linux_wlan->strInterfaceInfo[0].drvHandler),
+ host_int_add_wep_key_bss_sta(g_linux_wlan->strInterfaceInfo[0].drvHandler,
g_key_wep_params.key,
g_key_wep_params.key_len,
g_key_wep_params.key_idx);
@@ -3053,7 +3034,6 @@ static int WILC_WFI_change_virt_intf(struct wiphy *wiphy, struct net_device *dev
}
}
#endif
- #endif
break;
case NL80211_IFTYPE_AP:
@@ -3064,7 +3044,6 @@ static int WILC_WFI_change_virt_intf(struct wiphy *wiphy, struct net_device *dev
nic->iftype = AP_MODE;
PRINT_D(CORECONFIG_DBG, "priv->hWILCWFIDrv[%p]\n", priv->hWILCWFIDrv);
- #ifndef SIMULATION
PRINT_D(HOSTAPD_DBG, "Downloading AP firmware\n");
linux_wlan_get_firmware(nic);
#ifdef WILC_P2P
@@ -3086,7 +3065,6 @@ static int WILC_WFI_change_virt_intf(struct wiphy *wiphy, struct net_device *dev
}
}
#endif
- #endif
break;
case NL80211_IFTYPE_P2P_GO:
@@ -3094,7 +3072,7 @@ static int WILC_WFI_change_virt_intf(struct wiphy *wiphy, struct net_device *dev
#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
g_obtainingIP = true;
- WILC_TimerStart(&hDuringIpTimer, duringIP_TIME, NULL, NULL);
+ mod_timer(&hDuringIpTimer, jiffies + msecs_to_jiffies(duringIP_TIME));
#endif
host_int_set_power_mgmt(priv->hWILCWFIDrv, 0, 0);
/*BugID_5222*/
@@ -3110,7 +3088,6 @@ static int WILC_WFI_change_virt_intf(struct wiphy *wiphy, struct net_device *dev
PRINT_D(CORECONFIG_DBG, "priv->hWILCWFIDrv[%p]\n", priv->hWILCWFIDrv);
- #ifndef SIMULATION
#ifdef WILC_P2P
PRINT_D(HOSTAPD_DBG, "Downloading P2P_CONCURRENCY_FIRMWARE\n");
@@ -3127,15 +3104,15 @@ static int WILC_WFI_change_virt_intf(struct wiphy *wiphy, struct net_device *dev
/*Setting interface 1 drv handler and mac address in newly downloaded FW*/
host_int_set_wfi_drv_handler(g_linux_wlan->strInterfaceInfo[0].drvHandler);
- host_int_set_MacAddress((WILC_WFIDrvHandle)(g_linux_wlan->strInterfaceInfo[0].drvHandler),
+ host_int_set_MacAddress(g_linux_wlan->strInterfaceInfo[0].drvHandler,
g_linux_wlan->strInterfaceInfo[0].aSrcAddress);
host_int_set_operation_mode(priv->hWILCWFIDrv, AP_MODE);
/*Add saved WEP keys, if any*/
if (g_wep_keys_saved) {
- host_int_set_WEPDefaultKeyID((WILC_WFIDrvHandle)(g_linux_wlan->strInterfaceInfo[0].drvHandler),
+ host_int_set_WEPDefaultKeyID(g_linux_wlan->strInterfaceInfo[0].drvHandler,
g_key_wep_params.key_idx);
- host_int_add_wep_key_bss_sta((WILC_WFIDrvHandle)(g_linux_wlan->strInterfaceInfo[0].drvHandler),
+ host_int_add_wep_key_bss_sta(g_linux_wlan->strInterfaceInfo[0].drvHandler,
g_key_wep_params.key,
g_key_wep_params.key_len,
g_key_wep_params.key_idx);
@@ -3185,7 +3162,6 @@ static int WILC_WFI_change_virt_intf(struct wiphy *wiphy, struct net_device *dev
}
}
#endif
- #endif
break;
default:
@@ -3234,7 +3210,7 @@ static int WILC_WFI_start_ap(struct wiphy *wiphy, struct net_device *dev,
priv = wiphy_priv(wiphy);
PRINT_D(HOSTAPD_DBG, "Starting ap\n");
- PRINT_D(HOSTAPD_DBG, "Interval = %d \n DTIM period = %d\n Head length = %zu Tail length = %zu\n",
+ PRINT_D(HOSTAPD_DBG, "Interval = %d\n DTIM period = %d\n Head length = %zu Tail length = %zu\n",
settings->beacon_interval, settings->dtim_period, beacon->head_len, beacon->tail_len);
s32Error = WILC_WFI_CfgSetChannel(wiphy, &settings->chandef);
@@ -3353,7 +3329,7 @@ static int WILC_WFI_add_station(struct wiphy *wiphy, struct net_device *dev,
{
s32 s32Error = WILC_SUCCESS;
struct WILC_WFI_priv *priv;
- tstrWILC_AddStaParam strStaParams = {{0}};
+ tstrWILC_AddStaParam strStaParams = { {0} };
perInterface_wlan_t *nic;
@@ -3365,8 +3341,8 @@ static int WILC_WFI_add_station(struct wiphy *wiphy, struct net_device *dev,
if (nic->iftype == AP_MODE || nic->iftype == GO_MODE) {
#ifndef WILC_FULLY_HOSTING_AP
- WILC_memcpy(strStaParams.au8BSSID, mac, ETH_ALEN);
- WILC_memcpy(priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid], mac, ETH_ALEN);
+ memcpy(strStaParams.au8BSSID, mac, ETH_ALEN);
+ memcpy(priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid], mac, ETH_ALEN);
strStaParams.u16AssocID = params->aid;
strStaParams.u8NumRates = params->supported_rates_len;
strStaParams.pu8Rates = params->supported_rates;
@@ -3384,7 +3360,7 @@ static int WILC_WFI_add_station(struct wiphy *wiphy, struct net_device *dev,
strStaParams.bIsHTSupported = true;
strStaParams.u16HTCapInfo = params->ht_capa->cap_info;
strStaParams.u8AmpduParams = params->ht_capa->ampdu_params_info;
- WILC_memcpy(strStaParams.au8SuppMCsSet, &params->ht_capa->mcs, WILC_SUPP_MCS_SET_SIZE);
+ memcpy(strStaParams.au8SuppMCsSet, &params->ht_capa->mcs, WILC_SUPP_MCS_SET_SIZE);
strStaParams.u16HTExtParams = params->ht_capa->extended_ht_cap_info;
strStaParams.u32TxBeamformingCap = params->ht_capa->tx_BF_cap_info;
strStaParams.u8ASELCap = params->ht_capa->antenna_selection_info;
@@ -3407,7 +3383,7 @@ static int WILC_WFI_add_station(struct wiphy *wiphy, struct net_device *dev,
#else
PRINT_D(CFG80211_DBG, "Adding station parameters %d\n", params->aid);
- WILC_memcpy(priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid], mac, ETH_ALEN);
+ memcpy(priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid], mac, ETH_ALEN);
PRINT_D(CFG80211_DBG, "BSSID = %x%x%x%x%x%x\n", priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][0], priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][1], priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][2], priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][3], priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][4],
priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][5]);
@@ -3450,7 +3426,7 @@ static int WILC_WFI_del_station(struct wiphy *wiphy, struct net_device *dev,
if (mac == NULL) {
- PRINT_D(HOSTAPD_DBG, "All associated stations \n");
+ PRINT_D(HOSTAPD_DBG, "All associated stations\n");
s32Error = host_int_del_allstation(priv->hWILCWFIDrv, priv->assoc_stainfo.au8Sta_AssociatedBss);
} else {
PRINT_D(HOSTAPD_DBG, "With mac address: %x%x%x%x%x%x\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
@@ -3484,7 +3460,7 @@ static int WILC_WFI_change_station(struct wiphy *wiphy, struct net_device *dev,
{
s32 s32Error = WILC_SUCCESS;
struct WILC_WFI_priv *priv;
- tstrWILC_AddStaParam strStaParams = {{0}};
+ tstrWILC_AddStaParam strStaParams = { {0} };
perInterface_wlan_t *nic;
@@ -3498,7 +3474,7 @@ static int WILC_WFI_change_station(struct wiphy *wiphy, struct net_device *dev,
if (nic->iftype == AP_MODE || nic->iftype == GO_MODE) {
#ifndef WILC_FULLY_HOSTING_AP
- WILC_memcpy(strStaParams.au8BSSID, mac, ETH_ALEN);
+ memcpy(strStaParams.au8BSSID, mac, ETH_ALEN);
strStaParams.u16AssocID = params->aid;
strStaParams.u8NumRates = params->supported_rates_len;
strStaParams.pu8Rates = params->supported_rates;
@@ -3514,7 +3490,7 @@ static int WILC_WFI_change_station(struct wiphy *wiphy, struct net_device *dev,
strStaParams.bIsHTSupported = true;
strStaParams.u16HTCapInfo = params->ht_capa->cap_info;
strStaParams.u8AmpduParams = params->ht_capa->ampdu_params_info;
- WILC_memcpy(strStaParams.au8SuppMCsSet, &params->ht_capa->mcs, WILC_SUPP_MCS_SET_SIZE);
+ memcpy(strStaParams.au8SuppMCsSet, &params->ht_capa->mcs, WILC_SUPP_MCS_SET_SIZE);
strStaParams.u16HTExtParams = params->ht_capa->extended_ht_cap_info;
strStaParams.u32TxBeamformingCap = params->ht_capa->tx_BF_cap_info;
strStaParams.u8ASELCap = params->ht_capa->antenna_selection_info;
@@ -3581,13 +3557,8 @@ struct wireless_dev *WILC_WFI_add_virt_intf(struct wiphy *wiphy, const char *nam
new_ifc = WILC_WFI_init_mon_interface(name, nic->wilc_netdev);
if (new_ifc != NULL) {
PRINT_D(HOSTAPD_DBG, "Setting monitor flag in private structure\n");
- #ifdef SIMULATION
- priv = netdev_priv(priv->wdev->netdev);
- priv->monitor_flag = 1;
- #else
nic = netdev_priv(priv->wdev->netdev);
nic->monitor_flag = 1;
- #endif
} else
PRINT_ER("Error in initializing monitor interface\n ");
}
@@ -3856,9 +3827,9 @@ int WILC_WFI_InitHostInt(struct net_device *net)
PRINT_D(INIT_DBG, "Host[%p][%p]\n", net, net->ieee80211_ptr);
priv = wdev_priv(net->ieee80211_ptr);
if (op_ifcs == 0) {
- s32Error = WILC_TimerCreate(&(hAgingTimer), remove_network_from_shadow, NULL);
+ setup_timer(&hAgingTimer, remove_network_from_shadow, 0);
#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
- s32Error = WILC_TimerCreate(&(hDuringIpTimer), clear_duringIP, NULL);
+ setup_timer(&hDuringIpTimer, clear_duringIP, 0);
#endif
}
op_ifcs++;
@@ -3908,7 +3879,7 @@ int WILC_WFI_DeInitHostInt(struct net_device *net)
#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
if (op_ifcs == 0) {
PRINT_D(CORECONFIG_DBG, "destroy during ip\n");
- WILC_TimerDestroy(&hDuringIpTimer, NULL);
+ del_timer_sync(&hDuringIpTimer);
}
#endif
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
index c25350cb58c8..97b663b7fd14 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
@@ -123,7 +123,7 @@ struct net_device *WILC_WFI_init_mon_interface(const char *name, struct net_devi
#ifdef TCP_ENHANCEMENTS
#define TCP_ACK_FILTER_LINK_SPEED_THRESH 54
#define DEFAULT_LINK_SPEED 72
-extern void Enable_TCP_ACK_Filter(bool value);
+void Enable_TCP_ACK_Filter(bool value);
#endif
#endif
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.c b/drivers/staging/wilc1000/wilc_wfi_netdevice.c
deleted file mode 100644
index ab66ce4bd790..000000000000
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.c
+++ /dev/null
@@ -1,951 +0,0 @@
-/*!
- * @file wilc_wfi_netdevice.c
- * @brief File Operations OS wrapper functionality
- * @author mdaftedar
- * @sa wilc_wfi_netdevice.h
- * @date 01 MAR 2012
- * @version 1.0
- */
-
-#ifdef SIMULATION
-
-#include "wilc_wfi_cfgoperations.h"
-#include "host_interface.h"
-
-
-MODULE_AUTHOR("Mai Daftedar");
-MODULE_LICENSE("Dual BSD/GPL");
-
-
-struct net_device *WILC_WFI_devs[2];
-
-/*
- * Transmitter lockup simulation, normally disabled.
- */
-static int lockup;
-module_param(lockup, int, 0);
-
-static int timeout = WILC_WFI_TIMEOUT;
-module_param(timeout, int, 0);
-
-/*
- * Do we run in NAPI mode?
- */
-static int use_napi ;
-module_param(use_napi, int, 0);
-
-
-/*
- * A structure representing an in-flight packet.
- */
-struct WILC_WFI_packet {
- struct WILC_WFI_packet *next;
- struct net_device *dev;
- int datalen;
- u8 data[ETH_DATA_LEN];
-};
-
-
-
-int pool_size = 8;
-module_param(pool_size, int, 0);
-
-
-static void WILC_WFI_TxTimeout(struct net_device *dev);
-static void (*WILC_WFI_Interrupt)(int, void *, struct pt_regs *);
-
-/**
- * @brief WILC_WFI_SetupPool
- * @details Set up a device's packet pool.
- * @param[in] struct net_device *dev : Network Device Pointer
- * @return NONE
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-void WILC_WFI_SetupPool(struct net_device *dev)
-{
- struct WILC_WFI_priv *priv = netdev_priv(dev);
- int i;
- struct WILC_WFI_packet *pkt;
-
- priv->ppool = NULL;
- for (i = 0; i < pool_size; i++) {
- pkt = kmalloc (sizeof (struct WILC_WFI_packet), GFP_KERNEL);
- if (pkt == NULL) {
- PRINT_D(RX_DBG, "Ran out of memory allocating packet pool\n");
- return;
- }
- pkt->dev = dev;
- pkt->next = priv->ppool;
- priv->ppool = pkt;
- }
-}
-
-/**
- * @brief WILC_WFI_TearDownPool
- * @details Internal cleanup function that's called after the network device
- * driver is unregistered
- * @param[in] struct net_device *dev : Network Device Driver
- * @return NONE
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-void WILC_WFI_TearDownPool(struct net_device *dev)
-{
- struct WILC_WFI_priv *priv = netdev_priv(dev);
- struct WILC_WFI_packet *pkt;
-
- while ((pkt = priv->ppool)) {
- priv->ppool = pkt->next;
- kfree (pkt);
- /* FIXME - in-flight packets ? */
- }
-}
-
-/**
- * @brief WILC_WFI_GetTxBuffer
- * @details Buffer/pool management
- * @param[in] net_device *dev : Network Device Driver Structure
- * @return struct WILC_WFI_packet
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-struct WILC_WFI_packet *WILC_WFI_GetTxBuffer(struct net_device *dev)
-{
- struct WILC_WFI_priv *priv = netdev_priv(dev);
- unsigned long flags;
- struct WILC_WFI_packet *pkt;
-
- spin_lock_irqsave(&priv->lock, flags);
- pkt = priv->ppool;
- priv->ppool = pkt->next;
- if (priv->ppool == NULL) {
- PRINT_INFO(RX_DBG, "Pool empty\n");
- netif_stop_queue(dev);
- }
- spin_unlock_irqrestore(&priv->lock, flags);
- return pkt;
-}
-/**
- * @brief WILC_WFI_ReleaseBuffer
- * @details Buffer/pool management
- * @param[in] WILC_WFI_packet *pkt : Structure holding in-flight packet
- * @return NONE
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-void WILC_WFI_ReleaseBuffer(struct WILC_WFI_packet *pkt)
-{
- unsigned long flags;
- struct WILC_WFI_priv *priv = netdev_priv(pkt->dev);
-
- spin_lock_irqsave(&priv->lock, flags);
- pkt->next = priv->ppool;
- priv->ppool = pkt;
- spin_unlock_irqrestore(&priv->lock, flags);
- if (netif_queue_stopped(pkt->dev) && pkt->next == NULL)
- netif_wake_queue(pkt->dev);
-}
-
-/**
- * @brief WILC_WFI_EnqueueBuf
- * @details Enqueuing packets in an RX buffer queue
- * @param[in] WILC_WFI_packet *pkt : Structure holding in-flight packet
- * @param[in] net_device *dev : Network Device Driver Structure
- * @return NONE
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-void WILC_WFI_EnqueueBuf(struct net_device *dev, struct WILC_WFI_packet *pkt)
-{
- unsigned long flags;
- struct WILC_WFI_priv *priv = netdev_priv(dev);
-
- spin_lock_irqsave(&priv->lock, flags);
- pkt->next = priv->rx_queue; /* FIXME - misorders packets */
- priv->rx_queue = pkt;
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-/**
- * @brief WILC_WFI_DequeueBuf
- * @details Dequeuing packets from the RX buffer queue
- * @param[in] net_device *dev : Network Device Driver Structure
- * @return WILC_WFI_packet *pkt : Structure holding in-flight pac
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-struct WILC_WFI_packet *WILC_WFI_DequeueBuf(struct net_device *dev)
-{
- struct WILC_WFI_priv *priv = netdev_priv(dev);
- struct WILC_WFI_packet *pkt;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- pkt = priv->rx_queue;
- if (pkt != NULL)
- priv->rx_queue = pkt->next;
- spin_unlock_irqrestore(&priv->lock, flags);
- return pkt;
-}
-/**
- * @brief WILC_WFI_RxInts
- * @details Enable and disable receive interrupts.
- * @param[in] net_device *dev : Network Device Driver Structure
- * @param[in] enable : Enable/Disable flag
- * @return NONE
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-static void WILC_WFI_RxInts(struct net_device *dev, int enable)
-{
- struct WILC_WFI_priv *priv = netdev_priv(dev);
- priv->rx_int_enabled = enable;
-}
-
-/**
- * @brief WILC_WFI_Open
- * @details Open Network Device Driver, called when the network
- * interface is opened. It starts the interface's transmit queue.
- * @param[in] net_device *dev : Network Device Driver Structure
- * @param[in] enable : Enable/Disable flag
- * @return int : Returns 0 upon success.
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-int WILC_WFI_Open(struct net_device *dev)
-{
- /* request_region(), request_irq(), .... (like fops->open) */
- /*
- * Assign the hardware address of the board: use "\0SNULx", where
- * x is 0 or 1. The first byte is '\0' to avoid being a multicast
- * address (the first byte of multicast addrs is odd).
- */
- memcpy(dev->dev_addr, "\0WLAN0", ETH_ALEN);
- if (dev == WILC_WFI_devs[1])
- dev->dev_addr[ETH_ALEN - 1]++; /* \0SNUL1 */
-
- WILC_WFI_InitHostInt(dev);
- netif_start_queue(dev);
- return 0;
-}
-/**
- * @brief WILC_WFI_Release
- * @details Release Network Device Driver, called when the network
- * interface is stopped or brought down. This function marks
- * the network driver as not being able to transmit
- * @param[in] net_device *dev : Network Device Driver Structure
- * @return int : Return 0 on Success.
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-int WILC_WFI_Release(struct net_device *dev)
-{
- /* release ports, irq and such -- like fops->close */
-
- netif_stop_queue(dev); /* can't transmit any more */
-
- return 0;
-}
-/**
- * @brief WILC_WFI_Config
- * @details Configuration changes (passed on by ifconfig)
- * @param[in] net_device *dev : Network Device Driver Structure
- * @param[in] struct ifmap *map : Contains the ioctl implementation for the
- * network driver.
- * @return int : Return 0 on Success.
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-int WILC_WFI_Config(struct net_device *dev, struct ifmap *map)
-{
- if (dev->flags & IFF_UP) /* can't act on a running interface */
- return -EBUSY;
-
- /* Don't allow changing the I/O address */
- if (map->base_addr != dev->base_addr) {
- PRINT_D(RX_DBG, KERN_WARNING "WILC_WFI: Can't change I/O address\n");
- return -EOPNOTSUPP;
- }
-
- /* Allow changing the IRQ */
- if (map->irq != dev->irq) {
- dev->irq = map->irq;
- /* request_irq() is delayed to open-time */
- }
-
- /* ignore other fields */
- return 0;
-}
-/**
- * @brief WILC_WFI_Rx
- * @details Receive a packet: retrieve, encapsulate and pass over to upper
- * levels
- * @param[in] net_device *dev : Network Device Driver Structure
- * @param[in] WILC_WFI_packet :
- * @return NONE
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-void WILC_WFI_Rx(struct net_device *dev, struct WILC_WFI_packet *pkt)
-{
- int i;
- struct sk_buff *skb;
- struct WILC_WFI_priv *priv = netdev_priv(dev);
- s8 rssi;
- /*
- * The packet has been retrieved from the transmission
- * medium. Build an skb around it, so upper layers can handle it
- */
-
-
- skb = dev_alloc_skb(pkt->datalen + 2);
- if (!skb) {
- if (printk_ratelimit())
- PRINT_D(RX_DBG, "WILC_WFI rx: low on mem - packet dropped\n");
- priv->stats.rx_dropped++;
- goto out;
- }
- skb_reserve(skb, 2); /* align IP on 16B boundary */
- memcpy(skb_put(skb, pkt->datalen), pkt->data, pkt->datalen);
-
- if (priv->monitor_flag) {
- PRINT_INFO(RX_DBG, "In monitor device name %s\n", dev->name);
- priv = wiphy_priv(priv->dev->ieee80211_ptr->wiphy);
- PRINT_D(RX_DBG, "VALUE PASSED IN OF HRWD %p\n", priv->hWILCWFIDrv);
- /* host_int_get_rssi(priv->hWILCWFIDrv, &(rssi)); */
- if (INFO) {
- for (i = 14; i < skb->len; i++)
- PRINT_INFO(RX_DBG, "RXdata[%d] %02x\n", i, skb->data[i]);
- }
- WILC_WFI_monitor_rx(dev, skb);
- return;
- }
-out:
- return;
-}
-
-/**
- * @brief WILC_WFI_Poll
- * @details The poll implementation
- * @param[in] struct napi_struct *napi :
- * @param[in] int budget :
- * @return int : Return 0 on Success.
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-static int WILC_WFI_Poll(struct napi_struct *napi, int budget)
-{
- int npackets = 0;
- struct sk_buff *skb;
- struct WILC_WFI_priv *priv = container_of(napi, struct WILC_WFI_priv, napi);
- struct net_device *dev = priv->dev;
- struct WILC_WFI_packet *pkt;
-
- while (npackets < budget && priv->rx_queue) {
- pkt = WILC_WFI_DequeueBuf(dev);
- skb = dev_alloc_skb(pkt->datalen + 2);
- if (!skb) {
- if (printk_ratelimit())
- PRINT_D(RX_DBG, "WILC_WFI: packet dropped\n");
- priv->stats.rx_dropped++;
- WILC_WFI_ReleaseBuffer(pkt);
- continue;
- }
- skb_reserve(skb, 2); /* align IP on 16B boundary */
- memcpy(skb_put(skb, pkt->datalen), pkt->data, pkt->datalen);
- skb->dev = dev;
- skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */
- netif_receive_skb(skb);
- /* Maintain stats */
- npackets++;
- WILC_WFI_update_stats(priv->dev->ieee80211_ptr->wiphy, pkt->datalen, WILC_WFI_RX_PKT);
- WILC_WFI_ReleaseBuffer(pkt);
- }
- /* If we processed all packets, we're done; tell the kernel and re-enable ints */
- if (npackets < budget) {
- napi_complete(napi);
- WILC_WFI_RxInts(dev, 1);
- }
- return npackets;
-}
-
-/**
- * @brief WILC_WFI_Poll
- * @details The typical interrupt entry point
- * @param[in] struct napi_struct *napi :
- * @param[in] int budget :
- * @return int : Return 0 on Success.
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-static void WILC_WFI_RegularInterrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
- int statusword;
- struct WILC_WFI_priv *priv;
- struct WILC_WFI_packet *pkt = NULL;
- /*
- * As usual, check the "device" pointer to be sure it is
- * really interrupting.
- * Then assign "struct device *dev"
- */
- struct net_device *dev = (struct net_device *)dev_id;
- /* ... and check with hw if it's really ours */
-
- /* paranoid */
- if (!dev)
- return;
-
- /* Lock the device */
- priv = netdev_priv(dev);
- spin_lock(&priv->lock);
-
- /* retrieve statusword: real netdevices use I/O instructions */
- statusword = priv->status;
- priv->status = 0;
- if (statusword & WILC_WFI_RX_INTR) {
- /* send it to WILC_WFI_rx for handling */
- pkt = priv->rx_queue;
- if (pkt) {
- priv->rx_queue = pkt->next;
- WILC_WFI_Rx(dev, pkt);
- }
- }
- if (statusword & WILC_WFI_TX_INTR) {
- /* a transmission is over: free the skb */
- WILC_WFI_update_stats(priv->dev->ieee80211_ptr->wiphy, priv->tx_packetlen, WILC_WFI_TX_PKT);
- dev_kfree_skb(priv->skb);
- }
-
- /* Unlock the device and we are done */
- spin_unlock(&priv->lock);
- if (pkt)
- WILC_WFI_ReleaseBuffer(pkt); /* Do this outside the lock! */
- return;
-}
-/**
- * @brief WILC_WFI_NapiInterrupt
- * @details A NAPI interrupt handler
- * @param[in] irq:
- * @param[in] dev_id:
- * @param[in] pt_regs:
- * @return NONE
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-static void WILC_WFI_NapiInterrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
- int statusword;
- struct WILC_WFI_priv *priv;
-
- /*
- * As usual, check the "device" pointer for shared handlers.
- * Then assign "struct device *dev"
- */
- struct net_device *dev = (struct net_device *)dev_id;
- /* ... and check with hw if it's really ours */
-
- /* paranoid */
- if (!dev)
- return;
-
- /* Lock the device */
- priv = netdev_priv(dev);
- spin_lock(&priv->lock);
-
- /* retrieve statusword: real netdevices use I/O instructions */
- statusword = priv->status;
- priv->status = 0;
- if (statusword & WILC_WFI_RX_INTR) {
- WILC_WFI_RxInts(dev, 0); /* Disable further interrupts */
- napi_schedule(&priv->napi);
- }
- if (statusword & WILC_WFI_TX_INTR) {
- /* a transmission is over: free the skb */
-
- WILC_WFI_update_stats(priv->dev->ieee80211_ptr->wiphy, priv->tx_packetlen, WILC_WFI_TX_PKT);
- dev_kfree_skb(priv->skb);
- }
-
- /* Unlock the device and we are done */
- spin_unlock(&priv->lock);
- return;
-}
-
-/**
- * @brief MI_WFI_HwTx
- * @details Transmit a packet (low level interface)
- * @param[in] buf:
- * @param[in] len:
- * @param[in] net_device *dev:
- * @return NONE
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-void WILC_WFI_HwTx(char *buf, int len, struct net_device *dev)
-{
- /*
- * This function deals with hw details. This interface loops
- * back the packet to the other WILC_WFI interface (if any).
- * In other words, this function implements the WILC_WFI behaviour,
- * while all other procedures are rather device-independent
- */
- struct iphdr *ih;
- struct net_device *dest;
- struct WILC_WFI_priv *priv;
- u32 *saddr, *daddr;
- struct WILC_WFI_packet *tx_buffer;
-
-
- /* I am paranoid. Ain't I? */
- if (len < sizeof(struct ethhdr) + sizeof(struct iphdr)) {
- PRINT_D(RX_DBG, "WILC_WFI: Hmm... packet too short (%i octets)\n",
- len);
- return;
- }
-
- if (0) { /* enable this conditional to look at the data */
- int i;
- PRINT_D(RX_DBG, "len is %i", len);
- for (i = 14; i < len; i++)
- PRINT_D(RX_DBG, "TXdata[%d] %02x\n", i, buf[i] & 0xff);
- /* PRINT_D(RX_DBG, "\n"); */
- }
- /*
- * Ethhdr is 14 bytes, but the kernel arranges for iphdr
- * to be aligned (i.e., ethhdr is unaligned)
- */
- ih = (struct iphdr *)(buf + sizeof(struct ethhdr));
- saddr = &ih->saddr;
- daddr = &ih->daddr;
-
- ((u8 *)saddr)[2] ^= 1; /* change the third octet (class C) */
- ((u8 *)daddr)[2] ^= 1;
-
- ih->check = 0; /* and rebuild the checksum (ip needs it) */
- ih->check = ip_fast_csum((unsigned char *)ih, ih->ihl);
-
-
- if (dev == WILC_WFI_devs[0])
- PRINT_D(RX_DBG, "%08x:%05i --> %08x:%05i\n",
- ntohl(ih->saddr), ntohs(((struct tcphdr *)(ih + 1))->source),
- ntohl(ih->daddr), ntohs(((struct tcphdr *)(ih + 1))->dest));
- else
- PRINT_D(RX_DBG, "%08x:%05i <-- %08x:%05i\n",
- ntohl(ih->daddr), ntohs(((struct tcphdr *)(ih + 1))->dest),
- ntohl(ih->saddr), ntohs(((struct tcphdr *)(ih + 1))->source));
-
- /*
- * Ok, now the packet is ready for transmission: first simulate a
- * receive interrupt on the twin device, then a
- * transmission-done on the transmitting device
- */
- dest = WILC_WFI_devs[dev == WILC_WFI_devs[0] ? 1 : 0];
- priv = netdev_priv(dest);
-
- tx_buffer = WILC_WFI_GetTxBuffer(dev);
- tx_buffer->datalen = len;
- memcpy(tx_buffer->data, buf, len);
- WILC_WFI_EnqueueBuf(dest, tx_buffer);
- if (priv->rx_int_enabled) {
- priv->status |= WILC_WFI_RX_INTR;
- WILC_WFI_Interrupt(0, dest, NULL);
- }
-
- priv = netdev_priv(dev);
- priv->tx_packetlen = len;
- priv->tx_packetdata = buf;
- priv->status |= WILC_WFI_TX_INTR;
- if (lockup && ((priv->stats.tx_packets + 1) % lockup) == 0) {
- /* Simulate a dropped transmit interrupt */
- netif_stop_queue(dev);
- PRINT_D(RX_DBG, "Simulate lockup at %ld, txp %ld\n", jiffies,
- (unsigned long) priv->stats.tx_packets);
- } else
- WILC_WFI_Interrupt(0, dev, NULL);
-
-}
-
-/**
- * @brief WILC_WFI_Tx
- * @details Transmit a packet (called by the kernel)
- * @param[in] sk_buff *skb:
- * @param[in] net_device *dev:
- * @return NONE
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-int WILC_WFI_Tx(struct sk_buff *skb, struct net_device *dev)
-{
- int len;
- char *data, shortpkt[ETH_ZLEN];
- struct WILC_WFI_priv *priv = netdev_priv(dev);
-
- /* priv = wiphy_priv(priv->dev->ieee80211_ptr->wiphy); */
-
- /* if(priv->monitor_flag) */
- /* mac80211_hwsim_monitor_rx(skb); */
-
-
- data = skb->data;
- len = skb->len;
-
- if (len < ETH_ZLEN) {
- memset(shortpkt, 0, ETH_ZLEN);
- memcpy(shortpkt, skb->data, skb->len);
- len = ETH_ZLEN;
- data = shortpkt;
- }
- dev->trans_start = jiffies; /* save the timestamp */
-
- /* Remember the skb, so we can free it at interrupt time */
- priv->skb = skb;
-
- /* actual deliver of data is device-specific, and not shown here */
- WILC_WFI_HwTx(data, len, dev);
-
- return 0; /* Our simple device can not fail */
-}
-
-/**
- * @brief WILC_WFI_TxTimeout
- * @details Deal with a transmit timeout.
- * @param[in] net_device *dev:
- * @return NONE
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-void WILC_WFI_TxTimeout(struct net_device *dev)
-{
- struct WILC_WFI_priv *priv = netdev_priv(dev);
-
- PRINT_D(RX_DBG, "Transmit timeout at %ld, latency %ld\n", jiffies,
- jiffies - dev->trans_start);
- /* Simulate a transmission interrupt to get things moving */
- priv->status = WILC_WFI_TX_INTR;
- WILC_WFI_Interrupt(0, dev, NULL);
- priv->stats.tx_errors++;
- netif_wake_queue(dev);
- return;
-}
-
-/**
- * @brief WILC_WFI_Ioctl
- * @details Ioctl commands
- * @param[in] net_device *dev:
- * @param[in] ifreq *rq
- * @param[in] cmd:
- * @return int : Return 0 on Success
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-int WILC_WFI_Ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- PRINT_D(RX_DBG, "ioctl\n");
- return 0;
-}
-
-/**
- * @brief WILC_WFI_Stat
- * @details Return statistics to the caller
- * @param[in] net_device *dev:
- * @return WILC_WFI_Stats : Return net_device_stats stucture with the
- * network device driver private data contents.
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-struct net_device_stats *WILC_WFI_Stats(struct net_device *dev)
-{
- struct WILC_WFI_priv *priv = netdev_priv(dev);
- return &priv->stats;
-}
-
-/**
- * @brief WILC_WFI_RebuildHeader
- * @details This function is called to fill up an eth header, since arp is not
- * available on the interface
- * @param[in] sk_buff *skb:
- * @return int : Return 0 on Success
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-int WILC_WFI_RebuildHeader(struct sk_buff *skb)
-{
- struct ethhdr *eth = (struct ethhdr *) skb->data;
- struct net_device *dev = skb->dev;
-
- memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
- memcpy(eth->h_dest, dev->dev_addr, dev->addr_len);
- eth->h_dest[ETH_ALEN - 1] ^= 0x01; /* dest is us xor 1 */
- return 0;
-}
-/**
- * @brief WILC_WFI_RebuildHeader
- * @details This function is called to fill up an eth header, since arp is not
- * available on the interface
- * @param[in] sk_buff *skb:
- * @param[in] struct net_device *dev:
- * @param[in] unsigned short type:
- * @param[in] const void *saddr,
- * @param[in] const void *daddr:
- * @param[in] unsigned int len
- * @return int : Return 0 on Success
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-int WILC_WFI_Header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type, const void *daddr, const void *saddr,
- unsigned int len)
-{
- struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
-
- eth->h_proto = htons(type);
- memcpy(eth->h_source, saddr ? saddr : dev->dev_addr, dev->addr_len);
- memcpy(eth->h_dest, daddr ? daddr : dev->dev_addr, dev->addr_len);
- eth->h_dest[ETH_ALEN - 1] ^= 0x01; /* dest is us xor 1 */
- return dev->hard_header_len;
-}
-
-/**
- * @brief WILC_WFI_ChangeMtu
- * @details The "change_mtu" method is usually not needed.
- * If you need it, it must be like this.
- * @param[in] net_device *dev : Network Device Driver Structure
- * @param[in] new_mtu :
- * @return int : Returns 0 on Success.
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-int WILC_WFI_ChangeMtu(struct net_device *dev, int new_mtu)
-{
- unsigned long flags;
- struct WILC_WFI_priv *priv = netdev_priv(dev);
- spinlock_t *lock = &priv->lock;
-
- /* check ranges */
- if ((new_mtu < 68) || (new_mtu > 1500))
- return -EINVAL;
- /*
- * Do anything you need, and the accept the value
- */
- spin_lock_irqsave(lock, flags);
- dev->mtu = new_mtu;
- spin_unlock_irqrestore(lock, flags);
- return 0; /* success */
-}
-
-static const struct header_ops WILC_WFI_header_ops = {
- .create = WILC_WFI_Header,
- .rebuild = WILC_WFI_RebuildHeader,
- .cache = NULL, /* disable caching */
-};
-
-
-static const struct net_device_ops WILC_WFI_netdev_ops = {
- .ndo_open = WILC_WFI_Open,
- .ndo_stop = WILC_WFI_Release,
- .ndo_set_config = WILC_WFI_Config,
- .ndo_start_xmit = WILC_WFI_Tx,
- .ndo_do_ioctl = WILC_WFI_Ioctl,
- .ndo_get_stats = WILC_WFI_Stats,
- .ndo_change_mtu = WILC_WFI_ChangeMtu,
- .ndo_tx_timeout = WILC_WFI_TxTimeout,
-};
-
-/**
- * @brief WILC_WFI_Init
- * @details The init function (sometimes called probe).
- * It is invoked by register_netdev()
- * @param[in] net_device *dev:
- * @return NONE
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-void WILC_WFI_Init(struct net_device *dev)
-{
- struct WILC_WFI_priv *priv;
-
-
- /*
- * Then, assign other fields in dev, using ether_setup() and some
- * hand assignments
- */
- ether_setup(dev); /* assign some of the fields */
- /* 1- Allocate space */
-
- dev->netdev_ops = &WILC_WFI_netdev_ops;
- dev->header_ops = &WILC_WFI_header_ops;
- dev->watchdog_timeo = timeout;
- /* keep the default flags, just add NOARP */
- dev->flags |= IFF_NOARP;
- dev->features |= NETIF_F_NO_CSUM;
- /*
- * Then, initialize the priv field. This encloses the statistics
- * and a few private fields.
- */
- priv = netdev_priv(dev);
- memset(priv, 0, sizeof(struct WILC_WFI_priv));
- priv->dev = dev;
- netif_napi_add(dev, &priv->napi, WILC_WFI_Poll, 2);
- /* The last parameter above is the NAPI "weight". */
- spin_lock_init(&priv->lock);
- WILC_WFI_RxInts(dev, 1); /* enable receive interrupts */
- WILC_WFI_SetupPool(dev);
-}
-
-/**
- * @brief WILC_WFI_Stat
- * @details Return statistics to the caller
- * @param[in] net_device *dev:
- * @return WILC_WFI_Stats : Return net_device_stats stucture with the
- * network device driver private data contents.
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-
-void WILC_WFI_Cleanup(void)
-{
- int i;
- struct WILC_WFI_priv *priv[2];
-
- /*if(hwsim_mon!=NULL)
- * {
- * PRINT_D(RX_DBG, "Freeing monitor interface\n");
- * unregister_netdev(hwsim_mon);
- * free_netdev(hwsim_mon);
- * }*/
- for (i = 0; i < 2; i++) {
- priv[i] = netdev_priv(WILC_WFI_devs[i]);
-
- if (WILC_WFI_devs[i]) {
- PRINT_D(RX_DBG, "Unregistering\n");
- unregister_netdev(WILC_WFI_devs[i]);
- WILC_WFI_TearDownPool(WILC_WFI_devs[i]);
- free_netdev(WILC_WFI_devs[i]);
- PRINT_D(RX_DBG, "[NETDEV]Stopping interface\n");
- WILC_WFI_DeInitHostInt(WILC_WFI_devs[i]);
- WILC_WFI_WiphyFree(WILC_WFI_devs[i]);
- }
-
- }
- /* unregister_netdev(hwsim_mon); */
- WILC_WFI_deinit_mon_interface();
- return;
-}
-
-
-void StartConfigSim(void);
-
-
-
-
-
-
-
-/**
- * @brief WILC_WFI_Stat
- * @details Return statistics to the caller
- * @param[in] net_device *dev:
- * @return WILC_WFI_Stats : Return net_device_stats stucture with the
- * network device driver private data contents.
- * @author mdaftedar
- * @date 01 MAR 2012
- * @version 1.0
- */
-int WILC_WFI_InitModule(void)
-{
-
- int result, i, ret = -ENOMEM;
- struct WILC_WFI_priv *priv[2], *netpriv;
- struct wireless_dev *wdev;
- WILC_WFI_Interrupt = use_napi ? WILC_WFI_NapiInterrupt : WILC_WFI_RegularInterrupt;
- char buf[IFNAMSIZ];
-
- for (i = 0; i < 2; i++) {
-
- /* Allocate the net devices */
- WILC_WFI_devs[i] = alloc_netdev(sizeof(struct WILC_WFI_priv), "wlan%d",
- WILC_WFI_Init);
- if (WILC_WFI_devs[i] == NULL)
- goto out;
- /* priv[i] = netdev_priv(WILC_WFI_devs[i]); */
-
- wdev = WILC_WFI_WiphyRegister(WILC_WFI_devs[i]);
- WILC_WFI_devs[i]->ieee80211_ptr = wdev;
- netpriv = netdev_priv(WILC_WFI_devs[i]);
- netpriv->dev->ieee80211_ptr = wdev;
- netpriv->dev->ml_priv = netpriv;
- wdev->netdev = netpriv->dev;
-
- /*Registering the net device*/
- result = register_netdev(WILC_WFI_devs[i]);
- if (result)
- PRINT_D(RX_DBG, "WILC_WFI: error %i registering device \"%s\"\n",
- result, WILC_WFI_devs[i]->name);
- else
- ret = 0;
- }
-
-
- /*init atmel driver */
- priv[0] = netdev_priv(WILC_WFI_devs[0]);
- priv[1] = netdev_priv(WILC_WFI_devs[1]);
-
- if (priv[1]->dev->ieee80211_ptr->wiphy->interface_modes && BIT(NL80211_IFTYPE_MONITOR)) {
- /* snprintf(buf, IFNAMSIZ, "mon.%s", priv[1]->dev->name); */
- /* WILC_WFI_init_mon_interface(); */
- /* priv[1]->monitor_flag = 1; */
-
- }
- priv[0]->bCfgScanning = false;
- priv[0]->u32RcvdChCount = 0;
-
- WILC_memset(priv[0]->au8AssociatedBss, 0xFF, ETH_ALEN);
-
-
- /* ret = host_int_init(&priv[0]->hWILCWFIDrv); */
- /*copy handle to the other driver*/
- /* priv[1]->hWILCWFIDrv = priv[0]->hWILCWFIDrv; */
- if (ret) {
- PRINT_ER("Error Init Driver\n");
- }
-
-
-out:
- if (ret)
- WILC_WFI_Cleanup();
- return ret;
-
-
-}
-
-
-module_init(WILC_WFI_InitModule);
-module_exit(WILC_WFI_Cleanup);
-
-#endif
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index d413fa3861c0..77f320d125e8 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -77,12 +77,12 @@ struct WILC_WFI_stats {
#define num_reg_frame 2
/*
* If you use RX_BH_WORK_QUEUE on LPC3131: You may lose the first interrupt on
- * LPC3131 which is important to get the MAC start status when you are blocked inside
- * linux_wlan_firmware_download() which blocks mac_open().
+ * LPC3131 which is important to get the MAC start status when you are blocked
+ * inside linux_wlan_firmware_download() which blocks mac_open().
*/
-#if defined (NM73131_0_BOARD)
+#if defined(NM73131_0_BOARD)
#define RX_BH_TYPE RX_BH_KTHREAD
-#elif defined (PANDA_BOARD)
+#elif defined(PANDA_BOARD)
#define RX_BH_TYPE RX_BH_THREADED_IRQ
#else
#define RX_BH_TYPE RX_BH_KTHREAD
@@ -95,6 +95,7 @@ struct wilc_wfi_key {
int seq_len;
u32 cipher;
};
+
struct wilc_wfi_wep_key {
u8 *key;
u8 key_len;
@@ -143,14 +144,15 @@ struct WILC_WFI_priv {
spinlock_t lock;
struct net_device *dev;
struct napi_struct napi;
- WILC_WFIDrvHandle hWILCWFIDrv;
+ tstrWILC_WFIDrv *hWILCWFIDrv;
WILC_WFIDrvHandle hWILCWFIDrv_2;
tstrHostIFpmkidAttr pmkid_list;
struct WILC_WFI_stats netstats;
u8 WILC_WFI_wep_default;
u8 WILC_WFI_wep_key[4][WLAN_KEY_LEN_WEP104];
u8 WILC_WFI_wep_key_len[4];
- struct net_device *real_ndev; /* The real interface that the monitor is on */
+ /* The real interface that the monitor is on */
+ struct net_device *real_ndev;
struct wilc_wfi_key *wilc_gtk[MAX_NUM_STA];
struct wilc_wfi_key *wilc_ptk[MAX_NUM_STA];
u8 wilc_groupkey;
@@ -174,7 +176,7 @@ typedef struct {
typedef struct {
uint8_t aSrcAddress[ETH_ALEN];
uint8_t aBSSID[ETH_ALEN];
- uint32_t drvHandler;
+ tstrWILC_WFIDrv *drvHandler;
struct net_device *wilc_netdev;
} tstrInterfaceInfo;
typedef struct {
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index 3af91f770485..7c53a2bd0381 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -16,7 +16,6 @@
* Global
*
********************************************/
-extern unsigned int int_clrd;
extern wilc_hif_func_t hif_sdio;
extern wilc_hif_func_t hif_spi;
extern wilc_cfg_func_t mac_cfg;
@@ -24,8 +23,6 @@ extern wilc_cfg_func_t mac_cfg;
extern u8 g_wilc_initialized; /* AMR : 0422 RK3026 Crash issue */
#endif
extern void WILC_WFI_mgmt_rx(uint8_t *buff, uint32_t size);
-extern void frmw_to_linux(uint8_t *buff, uint32_t size);
-int sdio_xfer_cnt(void);
uint32_t wilc_get_chipid(uint8_t update);
u16 Set_machw_change_vir_if(bool bValue);
@@ -118,18 +115,15 @@ static void wilc_debug(uint32_t flag, char *fmt, ...)
{
char buf[256];
va_list args;
- int len;
if (flag & dbgflag) {
va_start(args, fmt);
- len = vsprintf(buf, fmt, args);
+ vsprintf(buf, fmt, args);
va_end(args);
if (g_wlan.os_func.os_debug)
g_wlan.os_func.os_debug(buf);
}
-
- return;
}
static CHIP_PS_STATE_T genuChipPSstate = CHIP_WAKEDUP;
@@ -292,13 +286,13 @@ uint32_t Statisitcs_totalAcks = 0, Statisitcs_DroppedAcks = 0;
#ifdef TCP_ACK_FILTER
struct Ack_session_info;
-typedef struct Ack_session_info {
+struct Ack_session_info {
uint32_t Ack_seq_num;
uint32_t Bigger_Ack_num;
uint16_t src_port;
uint16_t dst_port;
uint16_t status;
-} Ack_session_info_t;
+};
typedef struct {
uint32_t ack_num;
@@ -319,7 +313,7 @@ struct Ack_session_info *Alloc_head;
#define MAX_TCP_SESSION 25
#define MAX_PENDING_ACKS 256
-Ack_session_info_t Acks_keep_track_info[2 * MAX_TCP_SESSION];
+struct Ack_session_info Acks_keep_track_info[2 * MAX_TCP_SESSION];
Pending_Acks_info_t Pending_Acks_info[MAX_PENDING_ACKS];
uint32_t PendingAcks_arrBase;
@@ -451,7 +445,7 @@ static int wilc_wlan_txq_filter_dup_tcp_ack(void)
for (i = PendingAcks_arrBase; i < (PendingAcks_arrBase + Pending_Acks); i++) {
if (Pending_Acks_info[i].ack_num < Acks_keep_track_info[Pending_Acks_info[i].Session_index].Bigger_Ack_num) {
struct txq_entry_t *tqe;
- PRINT_D(TCP_ENH, "DROP ACK: %u \n", Pending_Acks_info[i].ack_num);
+ PRINT_D(TCP_ENH, "DROP ACK: %u\n", Pending_Acks_info[i].ack_num);
tqe = Pending_Acks_info[i].txqe;
if (tqe) {
wilc_wlan_txq_remove(tqe);
@@ -467,11 +461,10 @@ static int wilc_wlan_txq_filter_dup_tcp_ack(void)
Pending_Acks = 0;
Opened_TCP_session = 0;
- if (PendingAcks_arrBase == 0) {
+ if (PendingAcks_arrBase == 0)
PendingAcks_arrBase = MAX_TCP_SESSION;
- } else {
+ else
PendingAcks_arrBase = 0;
- }
p->os_func.os_spin_unlock(p->txq_spinlock, &p->txq_spinlock_flags);
@@ -735,7 +728,7 @@ INLINE void chip_wakeup(void)
do {
/* Wait for the chip to stabilize*/
- WILC_Sleep(2);
+ usleep_range(2 * 1000, 2 * 1000);
/* Make sure chip is awake. This is an extra step that can be removed */
/* later to avoid the bus access overhead */
if ((wilc_get_chipid(true) == 0)) {
@@ -758,7 +751,7 @@ INLINE void chip_wakeup(void)
/* If still off, redo the wake up sequence */
while (((clk_status_reg & 0x1) == 0) && (((++trials) % 3) == 0)) {
/* Wait for the chip to stabilize*/
- WILC_Sleep(2);
+ usleep_range(2 * 1000, 2 * 1000);
/* Make sure chip is awake. This is an extra step that can be removed */
/* later to avoid the bus access overhead */
@@ -996,7 +989,7 @@ static int wilc_wlan_handle_txq(uint32_t *pu32TxqCount)
/**
* wait for vmm table is ready
**/
- PRINT_WRN(GENERIC_DBG, "[wilc txq]: warn, vmm table not clear yet, wait... \n");
+ PRINT_WRN(GENERIC_DBG, "[wilc txq]: warn, vmm table not clear yet, wait...\n");
release_bus(RELEASE_ALLOW_SLEEP);
p->os_func.os_sleep(3); /* wait 3 ms */
acquire_bus(ACQUIRE_AND_WAKEUP);
@@ -1063,7 +1056,7 @@ static int wilc_wlan_handle_txq(uint32_t *pu32TxqCount)
}
if (entries == 0) {
- PRINT_WRN(GENERIC_DBG, "[wilc txq]: no more buffer in the chip (reg: %08x), retry later [[ %d, %x ]] \n", reg, i, vmm_table[i - 1]);
+ PRINT_WRN(GENERIC_DBG, "[wilc txq]: no more buffer in the chip (reg: %08x), retry later [[ %d, %x ]]\n", reg, i, vmm_table[i - 1]);
/* undo the transaction. */
ret = p->hif_func.hif_read_reg(WILC_HOST_TX_CTRL, &reg);
@@ -1114,11 +1107,10 @@ static int wilc_wlan_handle_txq(uint32_t *pu32TxqCount)
/*Bug3959: transmitting mgmt frames received from host*/
/*setting bit 30 in the host header to indicate mgmt frame*/
#ifdef WILC_AP_EXTERNAL_MLME
- if (tqe->type == WILC_MGMT_PKT) {
+ if (tqe->type == WILC_MGMT_PKT)
header |= (1 << 30);
- } else {
+ else
header &= ~(1 << 30);
- }
#endif
#ifdef BIG_ENDIAN
@@ -1213,7 +1205,7 @@ static void wilc_wlan_handle_rxq(void)
do {
if (p->quit) {
- PRINT_D(RX_DBG, "exit 1st do-while due to Clean_UP function \n");
+ PRINT_D(RX_DBG, "exit 1st do-while due to Clean_UP function\n");
p->os_func.os_signal(p->cfg_wait);
break;
}
@@ -1330,8 +1322,7 @@ static void wilc_wlan_handle_rxq(void)
} while (1);
p->rxq_exit = 1;
- PRINT_D(RX_DBG, "THREAD: Exiting RX thread \n");
- return;
+ PRINT_D(RX_DBG, "THREAD: Exiting RX thread\n");
}
/********************************************
@@ -1414,7 +1405,7 @@ static void wilc_wlan_handle_isr_ext(uint32_t int_status)
buffer = p->os_func.os_malloc(size);
if (buffer == NULL) {
wilc_debug(N_ERR, "[wilc isr]: fail alloc host memory...drop the packets (%d)\n", size);
- WILC_Sleep(100);
+ usleep_range(100 * 1000, 100 * 1000);
goto _end_;
}
#endif
@@ -1545,11 +1536,10 @@ static int wilc_wlan_firmware_download(const uint8_t *buffer, uint32_t buffer_si
acquire_bus(ACQUIRE_ONLY);
offset += 8;
while (((int)size) && (offset < buffer_size)) {
- if (size <= blksz) {
+ if (size <= blksz)
size2 = size;
- } else {
+ else
size2 = blksz;
- }
/* Copy firmware into a DMA coherent buffer */
memcpy(dma_buffer, &buffer[offset], size2);
ret = p->hif_func.hif_block_tx(addr, dma_buffer, size2);
@@ -1782,7 +1772,7 @@ static int wilc_wlan_stop(void)
/******************************************************************************/
reg = ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 8) | (1 << 9) | (1 << 26) | (1 << 29) | (1 << 30) | (1 << 31)); /**/
/**/
- ret = p->hif_func.hif_write_reg(WILC_GLB_RESET_0, reg); /**/
+ p->hif_func.hif_write_reg(WILC_GLB_RESET_0, reg); /**/
reg = ~(1 << 10); /**/
/**/
ret = p->hif_func.hif_write_reg(WILC_GLB_RESET_0, reg); /**/
@@ -2306,11 +2296,10 @@ u16 Set_machw_change_vir_if(bool bValue)
PRINT_ER("Error while Reading reg WILC_CHANGING_VIR_IF\n");
}
- if (bValue) {
+ if (bValue)
reg |= (BIT31);
- } else {
+ else
reg &= ~(BIT31);
- }
ret = (&g_wlan)->hif_func.hif_write_reg(WILC_CHANGING_VIR_IF, reg);
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h
index 0ba7ec69e2b4..244f7108ae92 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wilc_wlan.h
@@ -1,7 +1,7 @@
#ifndef WILC_WLAN_H
#define WILC_WLAN_H
-#include "wilc_type.h"
+#include "wilc_oswrapper.h"
#define ISWILC1000(id) (((id & 0xfffff000) == 0x100000) ? 1 : 0)
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.c b/drivers/staging/wilc1000/wilc_wlan_cfg.c
index 3cffe55b3a93..e2842d37b078 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.c
+++ b/drivers/staging/wilc1000/wilc_wlan_cfg.c
@@ -363,8 +363,6 @@ static void wilc_wlan_parse_response_frame(uint8_t *info, int size)
size -= (2 + len);
info += (2 + len);
}
-
- return;
}
static int wilc_wlan_parse_info_frame(uint8_t *info, int size)
@@ -513,7 +511,6 @@ static int wilc_wlan_cfg_indicate_rx(uint8_t *frame, int size, wilc_cfg_rsp_t *r
int ret = 1;
uint8_t msg_type;
uint8_t msg_id;
- uint16_t msg_len;
#ifdef WILC_FULLY_HOSTING_AP
u32 *ptru32Frame;
bool bStatus = frame[2];
@@ -528,11 +525,6 @@ static int wilc_wlan_cfg_indicate_rx(uint8_t *frame, int size, wilc_cfg_rsp_t *r
msg_type = frame[0];
msg_id = frame[1]; /* seq no */
-#ifdef BIG_ENDIAN
- msg_len = (frame[2] << 8) | frame[3];
-#else
- msg_len = (frame[3] << 8) | frame[2];
-#endif
frame += 4;
size -= 4;
@@ -557,7 +549,7 @@ static int wilc_wlan_cfg_indicate_rx(uint8_t *frame, int size, wilc_cfg_rsp_t *r
case 'L':
#ifndef SWITCH_LOG_TERMINAL
- PRINT_ER("Unexpected firmware log message received \n");
+ PRINT_ER("Unexpected firmware log message received\n");
#else
PRINT_D(FIRM_DBG, "\nFIRMWARE LOGS :\n<<\n%s\n>>\n", frame);
break;
@@ -572,18 +564,18 @@ static int wilc_wlan_cfg_indicate_rx(uint8_t *frame, int size, wilc_cfg_rsp_t *r
#endif
/*bug3819:*/
case 'S':
- PRINT_INFO(RX_DBG, "Scan Notification Received \n");
+ PRINT_INFO(RX_DBG, "Scan Notification Received\n");
host_int_ScanCompleteReceived(frame - 4, size + 4);
break;
#ifdef WILC_FULLY_HOSTING_AP
case 'T':
- PRINT_INFO(RX_DBG, "TBTT Notification Received \n");
+ PRINT_INFO(RX_DBG, "TBTT Notification Received\n");
process_tbtt_isr();
break;
case 'A':
- PRINT_INFO(RX_DBG, "HOSTAPD ACK Notification Received \n");
+ PRINT_INFO(RX_DBG, "HOSTAPD ACK Notification Received\n");
WILC_mgm_HOSTAPD_ACK(ptru32Frame, bStatus);
break;
#endif
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index 8ed51e385118..5cf74e4c4a70 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -20,7 +20,7 @@
/* #define USE_OLD_SPI_SW */
-#include "wilc_type.h"
+#include "wilc_oswrapper.h"
#include "linux_wlan_common.h"
@@ -43,8 +43,8 @@
********************************************/
#define HIF_SDIO (0)
-#define HIF_SPI (1 << 0)
-#define HIF_SDIO_GPIO_IRQ (1 << 2)
+#define HIF_SPI BIT(0)
+#define HIF_SDIO_GPIO_IRQ BIT(2)
/********************************************
diff --git a/drivers/staging/xgifb/Makefile b/drivers/staging/xgifb/Makefile
index 55e519905346..964a843c4521 100644
--- a/drivers/staging/xgifb/Makefile
+++ b/drivers/staging/xgifb/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_FB_XGI) += xgifb.o
-xgifb-y := XGI_main_26.o vb_init.o vb_setmode.o vb_util.o
+xgifb-y := XGI_main_26.o vb_init.o vb_setmode.o
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 943d463cf193..5a6251a45112 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -18,8 +18,8 @@
#define Index_CR_GPIO_Reg1 0x48
#define Index_CR_GPIO_Reg3 0x4a
-#define GPIOG_EN (1<<6)
-#define GPIOG_READ (1<<1)
+#define GPIOG_EN BIT(6)
+#define GPIOG_READ BIT(1)
static char *forcecrt2type;
static char *mode;
@@ -29,7 +29,7 @@ static unsigned int refresh_rate;
/* -------------------- Macro definitions ---------------------------- */
#ifdef DEBUG
-static void dumpVGAReg(void)
+static void dumpVGAReg(struct xgifb_video_info *xgifb_info)
{
u8 i, reg;
@@ -48,7 +48,7 @@ static void dumpVGAReg(void)
}
}
#else
-static inline void dumpVGAReg(void)
+static inline void dumpVGAReg(struct xgifb_video_info *xgifb_info)
{
}
#endif
@@ -1073,7 +1073,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
}
XGIfb_bpp_to_var(xgifb_info, var); /*update ARGB info*/
- dumpVGAReg();
+ dumpVGAReg(xgifb_info);
return 0;
}
@@ -2019,7 +2019,7 @@ static int xgifb_probe(struct pci_dev *pdev,
goto error_mtrr;
}
- dumpVGAReg();
+ dumpVGAReg(xgifb_info);
return 0;
diff --git a/drivers/staging/xgifb/vb_init.h b/drivers/staging/xgifb/vb_init.h
index 24573026a7c0..500cabe41a3c 100644
--- a/drivers/staging/xgifb/vb_init.h
+++ b/drivers/staging/xgifb/vb_init.h
@@ -1,6 +1,6 @@
#ifndef _VBINIT_
#define _VBINIT_
-extern unsigned char XGIInitNew(struct pci_dev *pdev);
-extern void XGIRegInit(struct vb_device_info *, unsigned long);
+unsigned char XGIInitNew(struct pci_dev *pdev);
+void XGIRegInit(struct vb_device_info *, unsigned long);
#endif
diff --git a/drivers/staging/xgifb/vb_setmode.h b/drivers/staging/xgifb/vb_setmode.h
index 5301bec6440d..6f082a7a5a4a 100644
--- a/drivers/staging/xgifb/vb_setmode.h
+++ b/drivers/staging/xgifb/vb_setmode.h
@@ -1,23 +1,23 @@
#ifndef _VBSETMODE_
#define _VBSETMODE_
-extern void InitTo330Pointer(unsigned char, struct vb_device_info *);
-extern void XGI_UnLockCRT2(struct vb_device_info *);
-extern void XGI_LockCRT2(struct vb_device_info *);
-extern void XGI_DisplayOff(struct xgifb_video_info *,
- struct xgi_hw_device_info *,
- struct vb_device_info *);
-extern void XGI_GetVBType(struct vb_device_info *);
-extern void XGI_SenseCRT1(struct vb_device_info *);
-extern unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeNo);
+void InitTo330Pointer(unsigned char, struct vb_device_info *);
+void XGI_UnLockCRT2(struct vb_device_info *);
+void XGI_LockCRT2(struct vb_device_info *);
+void XGI_DisplayOff(struct xgifb_video_info *,
+ struct xgi_hw_device_info *,
+ struct vb_device_info *);
+void XGI_GetVBType(struct vb_device_info *);
+void XGI_SenseCRT1(struct vb_device_info *);
+unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *HwDeviceExtension,
+ unsigned short ModeNo);
-extern unsigned char XGI_SearchModeID(unsigned short ModeNo,
- unsigned short *ModeIdIndex);
-extern unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
- unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *);
+unsigned char XGI_SearchModeID(unsigned short ModeNo,
+ unsigned short *ModeIdIndex);
+unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
+ unsigned short ModeNo,
+ unsigned short ModeIdIndex,
+ struct vb_device_info *);
#endif
diff --git a/drivers/staging/xgifb/vb_util.c b/drivers/staging/xgifb/vb_util.c
deleted file mode 100644
index be3437ca339e..000000000000
--- a/drivers/staging/xgifb/vb_util.c
+++ /dev/null
@@ -1,42 +0,0 @@
-#include "vgatypes.h"
-#include "vb_util.h"
-
-void xgifb_reg_set(unsigned long port, u8 index, u8 data)
-{
- outb(index, port);
- outb(data, port + 1);
-}
-
-u8 xgifb_reg_get(unsigned long port, u8 index)
-{
- outb(index, port);
- return inb(port + 1);
-}
-
-void xgifb_reg_and_or(unsigned long port, u8 index,
- unsigned data_and, unsigned data_or)
-{
- u8 temp;
-
- temp = xgifb_reg_get(port, index); /* XGINew_Part1Port index 02 */
- temp = (temp & data_and) | data_or;
- xgifb_reg_set(port, index, temp);
-}
-
-void xgifb_reg_and(unsigned long port, u8 index, unsigned data_and)
-{
- u8 temp;
-
- temp = xgifb_reg_get(port, index); /* XGINew_Part1Port index 02 */
- temp &= data_and;
- xgifb_reg_set(port, index, temp);
-}
-
-void xgifb_reg_or(unsigned long port, u8 index, unsigned data_or)
-{
- u8 temp;
-
- temp = xgifb_reg_get(port, index); /* XGINew_Part1Port index 02 */
- temp |= data_or;
- xgifb_reg_set(port, index, temp);
-}
diff --git a/drivers/staging/xgifb/vb_util.h b/drivers/staging/xgifb/vb_util.h
index 9161de1d37dd..7bd395fb31b2 100644
--- a/drivers/staging/xgifb/vb_util.h
+++ b/drivers/staging/xgifb/vb_util.h
@@ -1,9 +1,43 @@
#ifndef _VBUTIL_
#define _VBUTIL_
-extern void xgifb_reg_set(unsigned long, u8, u8);
-extern u8 xgifb_reg_get(unsigned long, u8);
-extern void xgifb_reg_or(unsigned long, u8, unsigned);
-extern void xgifb_reg_and(unsigned long, u8, unsigned);
-extern void xgifb_reg_and_or(unsigned long, u8, unsigned, unsigned);
+static inline void xgifb_reg_set(unsigned long port, u8 index, u8 data)
+{
+ outb(index, port);
+ outb(data, port + 1);
+}
+
+static inline u8 xgifb_reg_get(unsigned long port, u8 index)
+{
+ outb(index, port);
+ return inb(port + 1);
+}
+
+static inline void xgifb_reg_and_or(unsigned long port, u8 index,
+ unsigned data_and, unsigned data_or)
+{
+ u8 temp;
+
+ temp = xgifb_reg_get(port, index);
+ temp = (temp & data_and) | data_or;
+ xgifb_reg_set(port, index, temp);
+}
+
+static inline void xgifb_reg_and(unsigned long port, u8 index, unsigned data_and)
+{
+ u8 temp;
+
+ temp = xgifb_reg_get(port, index);
+ temp &= data_and;
+ xgifb_reg_set(port, index, temp);
+}
+
+static inline void xgifb_reg_or(unsigned long port, u8 index, unsigned data_or)
+{
+ u8 temp;
+
+ temp = xgifb_reg_get(port, index);
+ temp |= data_or;
+ xgifb_reg_set(port, index, temp);
+}
#endif
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 4e68b62193ed..342a07c58d89 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -269,14 +269,14 @@ int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg,
}
bool iscsit_check_np_match(
- struct __kernel_sockaddr_storage *sockaddr,
+ struct sockaddr_storage *sockaddr,
struct iscsi_np *np,
int network_transport)
{
struct sockaddr_in *sock_in, *sock_in_e;
struct sockaddr_in6 *sock_in6, *sock_in6_e;
bool ip_match = false;
- u16 port;
+ u16 port, port_e;
if (sockaddr->ss_family == AF_INET6) {
sock_in6 = (struct sockaddr_in6 *)sockaddr;
@@ -288,6 +288,7 @@ bool iscsit_check_np_match(
ip_match = true;
port = ntohs(sock_in6->sin6_port);
+ port_e = ntohs(sock_in6_e->sin6_port);
} else {
sock_in = (struct sockaddr_in *)sockaddr;
sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
@@ -296,9 +297,10 @@ bool iscsit_check_np_match(
ip_match = true;
port = ntohs(sock_in->sin_port);
+ port_e = ntohs(sock_in_e->sin_port);
}
- if (ip_match && (np->np_port == port) &&
+ if (ip_match && (port_e == port) &&
(np->np_network_transport == network_transport))
return true;
@@ -309,7 +311,7 @@ bool iscsit_check_np_match(
* Called with mutex np_lock held
*/
static struct iscsi_np *iscsit_get_np(
- struct __kernel_sockaddr_storage *sockaddr,
+ struct sockaddr_storage *sockaddr,
int network_transport)
{
struct iscsi_np *np;
@@ -340,12 +342,9 @@ static struct iscsi_np *iscsit_get_np(
}
struct iscsi_np *iscsit_add_np(
- struct __kernel_sockaddr_storage *sockaddr,
- char *ip_str,
+ struct sockaddr_storage *sockaddr,
int network_transport)
{
- struct sockaddr_in *sock_in;
- struct sockaddr_in6 *sock_in6;
struct iscsi_np *np;
int ret;
@@ -368,16 +367,6 @@ struct iscsi_np *iscsit_add_np(
}
np->np_flags |= NPF_IP_NETWORK;
- if (sockaddr->ss_family == AF_INET6) {
- sock_in6 = (struct sockaddr_in6 *)sockaddr;
- snprintf(np->np_ip, IPV6_ADDRESS_SPACE, "%s", ip_str);
- np->np_port = ntohs(sock_in6->sin6_port);
- } else {
- sock_in = (struct sockaddr_in *)sockaddr;
- sprintf(np->np_ip, "%s", ip_str);
- np->np_port = ntohs(sock_in->sin_port);
- }
-
np->np_network_transport = network_transport;
spin_lock_init(&np->np_thread_lock);
init_completion(&np->np_restart_comp);
@@ -411,8 +400,8 @@ struct iscsi_np *iscsit_add_np(
list_add_tail(&np->np_list, &g_np_list);
mutex_unlock(&np_lock);
- pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
- np->np_ip, np->np_port, np->np_transport->name);
+ pr_debug("CORE[0] - Added Network Portal: %pISpc on %s\n",
+ &np->np_sockaddr, np->np_transport->name);
return np;
}
@@ -481,8 +470,8 @@ int iscsit_del_np(struct iscsi_np *np)
list_del(&np->np_list);
mutex_unlock(&np_lock);
- pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
- np->np_ip, np->np_port, np->np_transport->name);
+ pr_debug("CORE[0] - Removed Network Portal: %pISpc on %s\n",
+ &np->np_sockaddr, np->np_transport->name);
iscsit_put_transport(np->np_transport);
kfree(np);
@@ -968,9 +957,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
- if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+ if (hdr->flags & ISCSI_FLAG_CMD_READ)
cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
- } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
+ else
cmd->targ_xfer_tag = 0xFFFFFFFF;
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
@@ -1209,7 +1198,6 @@ static u32 iscsit_do_crypto_hash_sg(
u8 *pad_bytes)
{
u32 data_crc;
- u32 i;
struct scatterlist *sg;
unsigned int page_off;
@@ -1218,15 +1206,15 @@ static u32 iscsit_do_crypto_hash_sg(
sg = cmd->first_data_sg;
page_off = cmd->first_data_sg_off;
- i = 0;
while (data_length) {
- u32 cur_len = min_t(u32, data_length, (sg[i].length - page_off));
+ u32 cur_len = min_t(u32, data_length, (sg->length - page_off));
- crypto_hash_update(hash, &sg[i], cur_len);
+ crypto_hash_update(hash, sg, cur_len);
data_length -= cur_len;
page_off = 0;
- i++;
+ /* iscsit_map_iovec has already checked for invalid sg pointers */
+ sg = sg_next(sg);
}
if (padding) {
@@ -2556,7 +2544,7 @@ static int iscsit_send_conn_drop_async_message(
cmd->stat_sn = conn->stat_sn++;
hdr->statsn = cpu_to_be32(cmd->stat_sn);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION;
hdr->param1 = cpu_to_be16(cmd->logout_cid);
hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
@@ -2628,7 +2616,7 @@ iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
hdr->statsn = cpu_to_be32(0xFFFFFFFF);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
hdr->datasn = cpu_to_be32(datain->data_sn);
hdr->offset = cpu_to_be32(datain->offset);
@@ -2839,7 +2827,7 @@ iscsit_build_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
iscsit_increment_maxcmdsn(cmd, conn->sess);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
pr_debug("Built Logout Response ITT: 0x%08x StatSN:"
" 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
@@ -2902,7 +2890,7 @@ iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
iscsit_increment_maxcmdsn(cmd, conn->sess);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
" StatSN: 0x%08x, Length %u\n", (nopout_response) ?
@@ -3049,7 +3037,7 @@ static int iscsit_send_r2t(
hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
hdr->statsn = cpu_to_be32(conn->stat_sn);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
hdr->r2tsn = cpu_to_be32(r2t->r2t_sn);
hdr->data_offset = cpu_to_be32(r2t->offset);
hdr->data_length = cpu_to_be32(r2t->xfer_len);
@@ -3202,7 +3190,7 @@ void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
iscsit_increment_maxcmdsn(cmd, conn->sess);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
" Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
@@ -3321,7 +3309,7 @@ iscsit_build_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
iscsit_increment_maxcmdsn(cmd, conn->sess);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
pr_debug("Built Task Management Response ITT: 0x%08x,"
" StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
@@ -3399,6 +3387,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
int target_name_printed;
unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
+ bool active;
buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength,
SENDTARGETS_BUF_LIMIT);
@@ -3452,19 +3441,18 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
}
spin_lock(&tpg->tpg_state_lock);
- if ((tpg->tpg_state == TPG_STATE_FREE) ||
- (tpg->tpg_state == TPG_STATE_INACTIVE)) {
- spin_unlock(&tpg->tpg_state_lock);
- continue;
- }
+ active = (tpg->tpg_state == TPG_STATE_ACTIVE);
spin_unlock(&tpg->tpg_state_lock);
+ if (!active && tpg->tpg_attrib.tpg_enabled_sendtargets)
+ continue;
+
spin_lock(&tpg->tpg_np_lock);
list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
tpg_np_list) {
struct iscsi_np *np = tpg_np->tpg_np;
bool inaddr_any = iscsit_check_inaddr_any(np);
- char *fmt_str;
+ struct sockaddr_storage *sockaddr;
if (np->np_network_transport != network_transport)
continue;
@@ -3492,15 +3480,15 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
}
}
- if (np->np_sockaddr.ss_family == AF_INET6)
- fmt_str = "TargetAddress=[%s]:%hu,%hu";
+ if (inaddr_any)
+ sockaddr = &conn->local_sockaddr;
else
- fmt_str = "TargetAddress=%s:%hu,%hu";
+ sockaddr = &np->np_sockaddr;
- len = sprintf(buf, fmt_str,
- inaddr_any ? conn->local_ip : np->np_ip,
- np->np_port,
- tpg->tpgt);
+ len = sprintf(buf, "TargetAddress="
+ "%pISpc,%hu",
+ sockaddr,
+ tpg->tpgt);
len += 1;
if ((len + payload_len) > buffer_len) {
@@ -3576,7 +3564,7 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
*/
cmd->maxcmdsn_inc = 0;
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x,"
" Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag,
@@ -3654,7 +3642,7 @@ iscsit_build_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
cmd->stat_sn = conn->stat_sn++;
hdr->statsn = cpu_to_be32(cmd->stat_sn);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
}
EXPORT_SYMBOL(iscsit_build_reject);
@@ -3998,7 +3986,13 @@ get_immediate:
}
transport_err:
- iscsit_take_action_for_connection_exit(conn);
+ /*
+ * Avoid the normal connection failure code-path if this connection
+ * is still within LOGIN mode, and iscsi_np process context is
+ * responsible for cleaning up the early connection failure.
+ */
+ if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
+ iscsit_take_action_for_connection_exit(conn);
out:
return 0;
}
@@ -4082,7 +4076,7 @@ reject:
int iscsi_target_rx_thread(void *arg)
{
- int ret;
+ int ret, rc;
u8 buffer[ISCSI_HDR_LEN], opcode;
u32 checksum = 0, digest = 0;
struct iscsi_conn *conn = arg;
@@ -4092,10 +4086,16 @@ int iscsi_target_rx_thread(void *arg)
* connection recovery / failure event can be triggered externally.
*/
allow_signal(SIGINT);
+ /*
+ * Wait for iscsi_post_login_handler() to complete before allowing
+ * incoming iscsi/tcp socket I/O, and/or failing the connection.
+ */
+ rc = wait_for_completion_interruptible(&conn->rx_login_comp);
+ if (rc < 0)
+ return 0;
if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
struct completion comp;
- int rc;
init_completion(&comp);
rc = wait_for_completion_interruptible(&comp);
@@ -4532,7 +4532,18 @@ static void iscsit_logout_post_handler_closesession(
struct iscsi_conn *conn)
{
struct iscsi_session *sess = conn->sess;
- int sleep = cmpxchg(&conn->tx_thread_active, true, false);
+ int sleep = 1;
+ /*
+ * Traditional iscsi/tcp will invoke this logic from TX thread
+ * context during session logout, so clear tx_thread_active and
+ * sleep if iscsit_close_connection() has not already occured.
+ *
+ * Since iser-target invokes this logic from it's own workqueue,
+ * always sleep waiting for RX/TX thread shutdown to complete
+ * within iscsit_close_connection().
+ */
+ if (conn->conn_transport->transport_type == ISCSI_TCP)
+ sleep = cmpxchg(&conn->tx_thread_active, true, false);
atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);
@@ -4546,7 +4557,10 @@ static void iscsit_logout_post_handler_closesession(
static void iscsit_logout_post_handler_samecid(
struct iscsi_conn *conn)
{
- int sleep = cmpxchg(&conn->tx_thread_active, true, false);
+ int sleep = 1;
+
+ if (conn->conn_transport->transport_type == ISCSI_TCP)
+ sleep = cmpxchg(&conn->tx_thread_active, true, false);
atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);
@@ -4765,6 +4779,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
struct iscsi_session *sess;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
struct se_session *se_sess, *se_sess_tmp;
+ LIST_HEAD(free_list);
int session_count = 0;
spin_lock_bh(&se_tpg->session_lock);
@@ -4786,14 +4801,17 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
}
atomic_set(&sess->session_reinstatement, 1);
spin_unlock(&sess->conn_lock);
- spin_unlock_bh(&se_tpg->session_lock);
- iscsit_free_session(sess);
- spin_lock_bh(&se_tpg->session_lock);
+ list_move_tail(&se_sess->sess_list, &free_list);
+ }
+ spin_unlock_bh(&se_tpg->session_lock);
+ list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+
+ iscsit_free_session(sess);
session_count++;
}
- spin_unlock_bh(&se_tpg->session_lock);
pr_debug("Released %d iSCSI Session(s) from Target Portal"
" Group: %hu\n", session_count, tpg->tpgt);
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 7d0f9c00d9c2..4cf2c0f2ba2f 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -10,10 +10,10 @@ extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *);
extern void iscsit_login_kref_put(struct kref *);
extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *,
struct iscsi_tpg_np *);
-extern bool iscsit_check_np_match(struct __kernel_sockaddr_storage *,
+extern bool iscsit_check_np_match(struct sockaddr_storage *,
struct iscsi_np *, int);
-extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
- char *, int);
+extern struct iscsi_np *iscsit_add_np(struct sockaddr_storage *,
+ int);
extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
struct iscsi_portal_group *, bool);
extern int iscsit_del_np(struct iscsi_np *);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index c1898c84b3d2..c7461d770d3a 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -99,7 +99,7 @@ static ssize_t lio_target_np_store_sctp(
* Use existing np->np_sockaddr for SCTP network portal reference
*/
tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
- np->np_ip, tpg_np, ISCSI_SCTP_TCP);
+ tpg_np, ISCSI_SCTP_TCP);
if (!tpg_np_sctp || IS_ERR(tpg_np_sctp))
goto out;
} else {
@@ -177,7 +177,7 @@ static ssize_t lio_target_np_store_iser(
}
tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
- np->np_ip, tpg_np, ISCSI_INFINIBAND);
+ tpg_np, ISCSI_INFINIBAND);
if (IS_ERR(tpg_np_iser)) {
rc = PTR_ERR(tpg_np_iser);
goto out;
@@ -220,7 +220,7 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
struct iscsi_portal_group *tpg;
struct iscsi_tpg_np *tpg_np;
char *str, *str2, *ip_str, *port_str;
- struct __kernel_sockaddr_storage sockaddr;
+ struct sockaddr_storage sockaddr;
struct sockaddr_in *sock_in;
struct sockaddr_in6 *sock_in6;
unsigned long port;
@@ -235,7 +235,7 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
memset(buf, 0, MAX_PORTAL_LEN + 1);
snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name);
- memset(&sockaddr, 0, sizeof(struct __kernel_sockaddr_storage));
+ memset(&sockaddr, 0, sizeof(struct sockaddr_storage));
str = strstr(buf, "[");
if (str) {
@@ -248,8 +248,8 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
return ERR_PTR(-EINVAL);
}
str++; /* Skip over leading "[" */
- *str2 = '\0'; /* Terminate the IPv6 address */
- str2++; /* Skip over the "]" */
+ *str2 = '\0'; /* Terminate the unbracketed IPv6 address */
+ str2++; /* Skip over the \0 */
port_str = strstr(str2, ":");
if (!port_str) {
pr_err("Unable to locate \":port\""
@@ -267,7 +267,7 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
sock_in6 = (struct sockaddr_in6 *)&sockaddr;
sock_in6->sin6_family = AF_INET6;
sock_in6->sin6_port = htons((unsigned short)port);
- ret = in6_pton(str, IPV6_ADDRESS_SPACE,
+ ret = in6_pton(str, -1,
(void *)&sock_in6->sin6_addr.in6_u, -1, &end);
if (ret <= 0) {
pr_err("in6_pton returned: %d\n", ret);
@@ -316,7 +316,7 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
* sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/
*
*/
- tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, str, NULL,
+ tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, NULL,
ISCSI_TCP);
if (IS_ERR(tpg_np)) {
iscsit_put_tpg(tpg);
@@ -344,8 +344,8 @@ static void lio_target_call_delnpfromtpg(
se_tpg = &tpg->tpg_se_tpg;
pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s TPGT: %hu"
- " PORTAL: %s:%hu\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
- tpg->tpgt, tpg_np->tpg_np->np_ip, tpg_np->tpg_np->np_port);
+ " PORTAL: %pISpc\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
+ tpg->tpgt, &tpg_np->tpg_np->np_sockaddr);
ret = iscsit_tpg_del_network_portal(tpg, tpg_np);
if (ret < 0)
@@ -656,6 +656,7 @@ static ssize_t lio_target_nacl_show_info(
struct iscsi_conn *conn;
struct se_session *se_sess;
ssize_t rb = 0;
+ u32 max_cmd_sn;
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
@@ -703,11 +704,12 @@ static ssize_t lio_target_nacl_show_info(
" Values]-----------------------\n");
rb += sprintf(page+rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN"
" : MaxCmdSN : ITT : TTT\n");
+ max_cmd_sn = (u32) atomic_read(&sess->max_cmd_sn);
rb += sprintf(page+rb, " 0x%08x 0x%08x 0x%08x 0x%08x"
" 0x%08x 0x%08x\n",
sess->cmdsn_window,
- (sess->max_cmd_sn - sess->exp_cmd_sn) + 1,
- sess->exp_cmd_sn, sess->max_cmd_sn,
+ (max_cmd_sn - sess->exp_cmd_sn) + 1,
+ sess->exp_cmd_sn, max_cmd_sn,
sess->init_task_tag, sess->targ_xfer_tag);
rb += sprintf(page+rb, "----------------------[iSCSI"
" Connections]-------------------------\n");
@@ -751,7 +753,7 @@ static ssize_t lio_target_nacl_show_info(
break;
}
- rb += sprintf(page+rb, " Address %s %s", conn->login_ip,
+ rb += sprintf(page+rb, " Address %pISc %s", &conn->login_sockaddr,
(conn->network_transport == ISCSI_TCP) ?
"TCP" : "SCTP");
rb += sprintf(page+rb, " StatSN: 0x%08x\n",
@@ -1010,6 +1012,11 @@ TPG_ATTR(t10_pi, S_IRUGO | S_IWUSR);
*/
DEF_TPG_ATTRIB(fabric_prot_type);
TPG_ATTR(fabric_prot_type, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_tpg_enabled_sendtargets
+ */
+DEF_TPG_ATTRIB(tpg_enabled_sendtargets);
+TPG_ATTR(tpg_enabled_sendtargets, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_authentication.attr,
@@ -1024,6 +1031,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_default_erl.attr,
&iscsi_tpg_attrib_t10_pi.attr,
&iscsi_tpg_attrib_fabric_prot_type.attr,
+ &iscsi_tpg_attrib_tpg_enabled_sendtargets.attr,
NULL,
};
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index 5fabcd3d623f..0382fa24b53b 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -47,19 +47,19 @@ void iscsit_determine_maxcmdsn(struct iscsi_session *sess)
* core_set_queue_depth_for_node().
*/
sess->cmdsn_window = se_nacl->queue_depth;
- sess->max_cmd_sn = (sess->max_cmd_sn + se_nacl->queue_depth) - 1;
+ atomic_add(se_nacl->queue_depth - 1, &sess->max_cmd_sn);
}
void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess)
{
+ u32 max_cmd_sn;
+
if (cmd->immediate_cmd || cmd->maxcmdsn_inc)
return;
cmd->maxcmdsn_inc = 1;
- mutex_lock(&sess->cmdsn_mutex);
- sess->max_cmd_sn += 1;
- pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
- mutex_unlock(&sess->cmdsn_mutex);
+ max_cmd_sn = atomic_inc_return(&sess->max_cmd_sn);
+ pr_debug("Updated MaxCmdSN to 0x%08x\n", max_cmd_sn);
}
EXPORT_SYMBOL(iscsit_increment_maxcmdsn);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 3d0fe4ff5590..96e78c823d13 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -82,6 +82,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
init_completion(&conn->conn_logout_comp);
init_completion(&conn->rx_half_close_comp);
init_completion(&conn->tx_half_close_comp);
+ init_completion(&conn->rx_login_comp);
spin_lock_init(&conn->cmd_lock);
spin_lock_init(&conn->conn_usage_lock);
spin_lock_init(&conn->immed_queue_lock);
@@ -330,7 +331,7 @@ static int iscsi_login_zero_tsih_s1(
* The FFP CmdSN window values will be allocated from the TPG's
* Initiator Node's ACL once the login has been successfully completed.
*/
- sess->max_cmd_sn = be32_to_cpu(pdu->cmdsn);
+ atomic_set(&sess->max_cmd_sn, be32_to_cpu(pdu->cmdsn));
sess->sess_ops = kzalloc(sizeof(struct iscsi_sess_ops), GFP_KERNEL);
if (!sess->sess_ops) {
@@ -644,7 +645,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
iscsit_start_nopin_timer(conn);
}
-static int iscsit_start_kthreads(struct iscsi_conn *conn)
+int iscsit_start_kthreads(struct iscsi_conn *conn)
{
int ret = 0;
@@ -679,6 +680,7 @@ static int iscsit_start_kthreads(struct iscsi_conn *conn)
return 0;
out_tx:
+ send_sig(SIGINT, conn->tx_thread, 1);
kthread_stop(conn->tx_thread);
conn->tx_thread_active = false;
out_bitmap:
@@ -689,7 +691,7 @@ out_bitmap:
return ret;
}
-int iscsi_post_login_handler(
+void iscsi_post_login_handler(
struct iscsi_np *np,
struct iscsi_conn *conn,
u8 zero_tsih)
@@ -699,7 +701,6 @@ int iscsi_post_login_handler(
struct se_session *se_sess = sess->se_sess;
struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
- int rc;
iscsit_inc_conn_usage_count(conn);
@@ -728,9 +729,9 @@ int iscsi_post_login_handler(
stop_timer = 1;
}
- pr_debug("iSCSI Login successful on CID: %hu from %s to"
- " %s:%hu,%hu\n", conn->cid, conn->login_ip,
- conn->local_ip, conn->local_port, tpg->tpgt);
+ pr_debug("iSCSI Login successful on CID: %hu from %pISpc to"
+ " %pISpc,%hu\n", conn->cid, &conn->login_sockaddr,
+ &conn->local_sockaddr, tpg->tpgt);
list_add_tail(&conn->conn_list, &sess->sess_conn_list);
atomic_inc(&sess->nconn);
@@ -739,10 +740,6 @@ int iscsi_post_login_handler(
sess->sess_ops->InitiatorName);
spin_unlock_bh(&sess->conn_lock);
- rc = iscsit_start_kthreads(conn);
- if (rc)
- return rc;
-
iscsi_post_login_start_timers(conn);
/*
* Determine CPU mask to ensure connection's RX and TX kthreads
@@ -751,15 +748,20 @@ int iscsi_post_login_handler(
iscsit_thread_get_cpumask(conn);
conn->conn_rx_reset_cpumask = 1;
conn->conn_tx_reset_cpumask = 1;
-
+ /*
+ * Wakeup the sleeping iscsi_target_rx_thread() now that
+ * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
+ */
+ complete(&conn->rx_login_comp);
iscsit_dec_conn_usage_count(conn);
+
if (stop_timer) {
spin_lock_bh(&se_tpg->session_lock);
iscsit_stop_time2retain_timer(sess);
spin_unlock_bh(&se_tpg->session_lock);
}
iscsit_dec_session_usage_count(sess);
- return 0;
+ return;
}
iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
@@ -774,8 +776,8 @@ int iscsi_post_login_handler(
pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n");
sess->session_state = TARG_SESS_STATE_LOGGED_IN;
- pr_debug("iSCSI Login successful on CID: %hu from %s to %s:%hu,%hu\n",
- conn->cid, conn->login_ip, conn->local_ip, conn->local_port,
+ pr_debug("iSCSI Login successful on CID: %hu from %pISpc to %pISpc,%hu\n",
+ conn->cid, &conn->login_sockaddr, &conn->local_sockaddr,
tpg->tpgt);
spin_lock_bh(&sess->conn_lock);
@@ -800,10 +802,6 @@ int iscsi_post_login_handler(
" iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
spin_unlock_bh(&se_tpg->session_lock);
- rc = iscsit_start_kthreads(conn);
- if (rc)
- return rc;
-
iscsi_post_login_start_timers(conn);
/*
* Determine CPU mask to ensure connection's RX and TX kthreads
@@ -812,10 +810,12 @@ int iscsi_post_login_handler(
iscsit_thread_get_cpumask(conn);
conn->conn_rx_reset_cpumask = 1;
conn->conn_tx_reset_cpumask = 1;
-
+ /*
+ * Wakeup the sleeping iscsi_target_rx_thread() now that
+ * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
+ */
+ complete(&conn->rx_login_comp);
iscsit_dec_conn_usage_count(conn);
-
- return 0;
}
static void iscsi_handle_login_thread_timeout(unsigned long data)
@@ -823,8 +823,8 @@ static void iscsi_handle_login_thread_timeout(unsigned long data)
struct iscsi_np *np = (struct iscsi_np *) data;
spin_lock_bh(&np->np_thread_lock);
- pr_err("iSCSI Login timeout on Network Portal %s:%hu\n",
- np->np_ip, np->np_port);
+ pr_err("iSCSI Login timeout on Network Portal %pISpc\n",
+ &np->np_sockaddr);
if (np->np_login_timer_flags & ISCSI_TF_STOP) {
spin_unlock_bh(&np->np_thread_lock);
@@ -877,7 +877,7 @@ static void iscsi_stop_login_thread_timer(struct iscsi_np *np)
int iscsit_setup_np(
struct iscsi_np *np,
- struct __kernel_sockaddr_storage *sockaddr)
+ struct sockaddr_storage *sockaddr)
{
struct socket *sock = NULL;
int backlog = ISCSIT_TCP_BACKLOG, ret, opt = 0, len;
@@ -916,7 +916,7 @@ int iscsit_setup_np(
* in iscsi_target_configfs.c code..
*/
memcpy(&np->np_sockaddr, sockaddr,
- sizeof(struct __kernel_sockaddr_storage));
+ sizeof(struct sockaddr_storage));
if (sockaddr->ss_family == AF_INET6)
len = sizeof(struct sockaddr_in6);
@@ -975,7 +975,7 @@ fail:
int iscsi_target_setup_login_socket(
struct iscsi_np *np,
- struct __kernel_sockaddr_storage *sockaddr)
+ struct sockaddr_storage *sockaddr)
{
struct iscsit_transport *t;
int rc;
@@ -1015,44 +1015,42 @@ int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
rc = conn->sock->ops->getname(conn->sock,
(struct sockaddr *)&sock_in6, &err, 1);
if (!rc) {
- if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr))
- snprintf(conn->login_ip, sizeof(conn->login_ip), "[%pI6c]",
- &sock_in6.sin6_addr.in6_u);
- else
- snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI4",
- &sock_in6.sin6_addr.s6_addr32[3]);
- conn->login_port = ntohs(sock_in6.sin6_port);
+ if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) {
+ memcpy(&conn->login_sockaddr, &sock_in6, sizeof(sock_in6));
+ } else {
+ /* Pretend to be an ipv4 socket */
+ sock_in.sin_family = AF_INET;
+ sock_in.sin_port = sock_in6.sin6_port;
+ memcpy(&sock_in.sin_addr, &sock_in6.sin6_addr.s6_addr32[3], 4);
+ memcpy(&conn->login_sockaddr, &sock_in, sizeof(sock_in));
+ }
}
rc = conn->sock->ops->getname(conn->sock,
(struct sockaddr *)&sock_in6, &err, 0);
if (!rc) {
- if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr))
- snprintf(conn->local_ip, sizeof(conn->local_ip), "[%pI6c]",
- &sock_in6.sin6_addr.in6_u);
- else
- snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI4",
- &sock_in6.sin6_addr.s6_addr32[3]);
- conn->local_port = ntohs(sock_in6.sin6_port);
+ if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) {
+ memcpy(&conn->local_sockaddr, &sock_in6, sizeof(sock_in6));
+ } else {
+ /* Pretend to be an ipv4 socket */
+ sock_in.sin_family = AF_INET;
+ sock_in.sin_port = sock_in6.sin6_port;
+ memcpy(&sock_in.sin_addr, &sock_in6.sin6_addr.s6_addr32[3], 4);
+ memcpy(&conn->local_sockaddr, &sock_in, sizeof(sock_in));
+ }
}
} else {
memset(&sock_in, 0, sizeof(struct sockaddr_in));
rc = conn->sock->ops->getname(conn->sock,
(struct sockaddr *)&sock_in, &err, 1);
- if (!rc) {
- sprintf(conn->login_ip, "%pI4",
- &sock_in.sin_addr.s_addr);
- conn->login_port = ntohs(sock_in.sin_port);
- }
+ if (!rc)
+ memcpy(&conn->login_sockaddr, &sock_in, sizeof(sock_in));
rc = conn->sock->ops->getname(conn->sock,
(struct sockaddr *)&sock_in, &err, 0);
- if (!rc) {
- sprintf(conn->local_ip, "%pI4",
- &sock_in.sin_addr.s_addr);
- conn->local_port = ntohs(sock_in.sin_port);
- }
+ if (!rc)
+ memcpy(&conn->local_sockaddr, &sock_in, sizeof(sock_in));
}
return 0;
@@ -1302,8 +1300,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
spin_unlock_bh(&np->np_thread_lock);
- pr_err("iSCSI Network Portal on %s:%hu currently not"
- " active.\n", np->np_ip, np->np_port);
+ pr_err("iSCSI Network Portal on %pISpc currently not"
+ " active.\n", &np->np_sockaddr);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
goto new_sess_out;
@@ -1312,9 +1310,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
conn->network_transport = np->np_network_transport;
- pr_debug("Received iSCSI login request from %s on %s Network"
- " Portal %s:%hu\n", conn->login_ip, np->np_transport->name,
- conn->local_ip, conn->local_port);
+ pr_debug("Received iSCSI login request from %pISpc on %s Network"
+ " Portal %pISpc\n", &conn->login_sockaddr, np->np_transport->name,
+ &conn->local_sockaddr);
pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n");
conn->conn_state = TARG_CONN_STATE_IN_LOGIN;
@@ -1380,23 +1378,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
if (ret < 0)
goto new_sess_out;
- if (!conn->sess) {
- pr_err("struct iscsi_conn session pointer is NULL!\n");
- goto new_sess_out;
- }
-
iscsi_stop_login_thread_timer(np);
- if (signal_pending(current))
- goto new_sess_out;
-
if (ret == 1) {
tpg_np = conn->tpg_np;
- ret = iscsi_post_login_handler(np, conn, zero_tsih);
- if (ret < 0)
- goto new_sess_out;
-
+ iscsi_post_login_handler(np, conn, zero_tsih);
iscsit_deaccess_np(np, tpg, tpg_np);
}
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 1c7358081533..b597aa2c61a1 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -5,14 +5,15 @@ extern int iscsi_login_setup_crypto(struct iscsi_conn *);
extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
extern int iscsit_setup_np(struct iscsi_np *,
- struct __kernel_sockaddr_storage *);
+ struct sockaddr_storage *);
extern int iscsi_target_setup_login_socket(struct iscsi_np *,
- struct __kernel_sockaddr_storage *);
+ struct sockaddr_storage *);
extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
-extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
+extern int iscsit_start_kthreads(struct iscsi_conn *);
+extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
bool, bool);
extern int iscsi_target_login_thread(void *);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 8c02fa34716f..5c964c09c89f 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -17,6 +17,7 @@
******************************************************************************/
#include <linux/ctype.h>
+#include <linux/kthread.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
@@ -340,7 +341,6 @@ static int iscsi_target_check_first_request(
static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
{
u32 padding = 0;
- struct iscsi_session *sess = conn->sess;
struct iscsi_login_rsp *login_rsp;
login_rsp = (struct iscsi_login_rsp *) login->rsp;
@@ -352,7 +352,7 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
login_rsp->itt = login->init_task_tag;
login_rsp->statsn = cpu_to_be32(conn->stat_sn++);
login_rsp->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- login_rsp->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ login_rsp->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
pr_debug("Sending Login Response, Flags: 0x%02x, ITT: 0x%08x,"
" ExpCmdSN; 0x%08x, MaxCmdSN: 0x%08x, StatSN: 0x%08x, Length:"
@@ -361,18 +361,45 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
ntohl(login_rsp->statsn), login->rsp_length);
padding = ((-login->rsp_length) & 3);
+ /*
+ * Before sending the last login response containing the transition
+ * bit for full-feature-phase, go ahead and start up TX/RX threads
+ * now to avoid potential resource allocation failures after the
+ * final login response has been sent.
+ */
+ if (login->login_complete) {
+ int rc = iscsit_start_kthreads(conn);
+ if (rc) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ }
if (conn->conn_transport->iscsit_put_login_tx(conn, login,
login->rsp_length + padding) < 0)
- return -1;
+ goto err;
login->rsp_length = 0;
- mutex_lock(&sess->cmdsn_mutex);
- login_rsp->exp_cmdsn = cpu_to_be32(sess->exp_cmd_sn);
- login_rsp->max_cmdsn = cpu_to_be32(sess->max_cmd_sn);
- mutex_unlock(&sess->cmdsn_mutex);
return 0;
+
+err:
+ if (login->login_complete) {
+ if (conn->rx_thread && conn->rx_thread_active) {
+ send_sig(SIGINT, conn->rx_thread, 1);
+ kthread_stop(conn->rx_thread);
+ }
+ if (conn->tx_thread && conn->tx_thread_active) {
+ send_sig(SIGINT, conn->tx_thread, 1);
+ kthread_stop(conn->tx_thread);
+ }
+ spin_lock(&iscsit_global->ts_bitmap_lock);
+ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
+ get_order(1));
+ spin_unlock(&iscsit_global->ts_bitmap_lock);
+ }
+ return -1;
}
static void iscsi_target_sk_data_ready(struct sock *sk)
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index 5e1349a3b143..9dd94ff0b62c 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -430,7 +430,7 @@ static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr(
int ret;
spin_lock(&lstat->lock);
- ret = snprintf(page, PAGE_SIZE, "%s\n", lstat->last_intr_fail_ip_addr);
+ ret = snprintf(page, PAGE_SIZE, "%pISc\n", &lstat->last_intr_fail_sockaddr);
spin_unlock(&lstat->lock);
return ret;
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index cf59c397007b..11320df939f7 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -50,7 +50,7 @@ u8 iscsit_tmr_abort_task(
pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
" %hu.\n", hdr->rtt, conn->cid);
return (iscsi_sna_gte(be32_to_cpu(hdr->refcmdsn), conn->sess->exp_cmd_sn) &&
- iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), conn->sess->max_cmd_sn)) ?
+ iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), (u32) atomic_read(&conn->sess->max_cmd_sn))) ?
ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
}
if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) {
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 968068ffcb1c..23c95cd14167 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -226,6 +226,7 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
a->default_erl = TA_DEFAULT_ERL;
a->t10_pi = TA_DEFAULT_T10_PI;
a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE;
+ a->tpg_enabled_sendtargets = TA_DEFAULT_TPG_ENABLED_SENDTARGETS;
}
int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -430,7 +431,7 @@ struct iscsi_tpg_np *iscsit_tpg_locate_child_np(
static bool iscsit_tpg_check_network_portal(
struct iscsi_tiqn *tiqn,
- struct __kernel_sockaddr_storage *sockaddr,
+ struct sockaddr_storage *sockaddr,
int network_transport)
{
struct iscsi_portal_group *tpg;
@@ -459,8 +460,7 @@ static bool iscsit_tpg_check_network_portal(
struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
struct iscsi_portal_group *tpg,
- struct __kernel_sockaddr_storage *sockaddr,
- char *ip_str,
+ struct sockaddr_storage *sockaddr,
struct iscsi_tpg_np *tpg_np_parent,
int network_transport)
{
@@ -470,8 +470,8 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
if (!tpg_np_parent) {
if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr,
network_transport)) {
- pr_err("Network Portal: %s already exists on a"
- " different TPG on %s\n", ip_str,
+ pr_err("Network Portal: %pISc already exists on a"
+ " different TPG on %s\n", sockaddr,
tpg->tpg_tiqn->tiqn);
return ERR_PTR(-EEXIST);
}
@@ -484,7 +484,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
return ERR_PTR(-ENOMEM);
}
- np = iscsit_add_np(sockaddr, ip_str, network_transport);
+ np = iscsit_add_np(sockaddr, network_transport);
if (IS_ERR(np)) {
kfree(tpg_np);
return ERR_CAST(np);
@@ -514,8 +514,8 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
spin_unlock(&tpg_np_parent->tpg_np_parent_lock);
}
- pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n",
- tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
+ pr_debug("CORE[%s] - Added Network Portal: %pISpc,%hu on %s\n",
+ tpg->tpg_tiqn->tiqn, &np->np_sockaddr, tpg->tpgt,
np->np_transport->name);
return tpg_np;
@@ -528,8 +528,8 @@ static int iscsit_tpg_release_np(
{
iscsit_clear_tpg_np_login_thread(tpg_np, tpg, true);
- pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n",
- tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
+ pr_debug("CORE[%s] - Removed Network Portal: %pISpc,%hu on %s\n",
+ tpg->tpg_tiqn->tiqn, &np->np_sockaddr, tpg->tpgt,
np->np_transport->name);
tpg_np->tpg_np = NULL;
@@ -892,3 +892,21 @@ int iscsit_ta_fabric_prot_type(
return 0;
}
+
+int iscsit_ta_tpg_enabled_sendtargets(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->tpg_enabled_sendtargets = flag;
+ pr_debug("iSCSI_TPG[%hu] - TPG enabled bit required for SendTargets:"
+ " %s\n", tpg->tpgt, (a->tpg_enabled_sendtargets) ? "ON" : "OFF");
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 95ff5bdecd71..9db32bd24cd4 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -22,7 +22,7 @@ extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsi_session
extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *);
extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int);
extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *,
- struct __kernel_sockaddr_storage *, char *, struct iscsi_tpg_np *,
+ struct sockaddr_storage *, struct iscsi_tpg_np *,
int);
extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
struct iscsi_tpg_np *);
@@ -40,5 +40,6 @@ extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32);
extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_tpg_enabled_sendtargets(struct iscsi_portal_group *, u32);
#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index a2bff0702eb2..428b0d9e3dba 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -233,6 +233,7 @@ struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn)
{
+ u32 max_cmdsn;
int ret;
/*
@@ -241,10 +242,10 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm
* or order CmdSNs due to multiple connection sessions and/or
* CRC failures.
*/
- if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {
+ max_cmdsn = atomic_read(&sess->max_cmd_sn);
+ if (iscsi_sna_gt(cmdsn, max_cmdsn)) {
pr_err("Received CmdSN: 0x%08x is greater than"
- " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn,
- sess->max_cmd_sn);
+ " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn, max_cmdsn);
ret = CMDSN_MAXCMDSN_OVERRUN;
} else if (cmdsn == sess->exp_cmd_sn) {
@@ -1371,6 +1372,33 @@ int tx_data(
return iscsit_do_tx_data(conn, &c);
}
+static bool sockaddr_equal(struct sockaddr_storage *x, struct sockaddr_storage *y)
+{
+ switch (x->ss_family) {
+ case AF_INET: {
+ struct sockaddr_in *sinx = (struct sockaddr_in *)x;
+ struct sockaddr_in *siny = (struct sockaddr_in *)y;
+ if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
+ return false;
+ if (sinx->sin_port != siny->sin_port)
+ return false;
+ break;
+ }
+ case AF_INET6: {
+ struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
+ struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
+ if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
+ return false;
+ if (sinx->sin6_port != siny->sin6_port)
+ return false;
+ break;
+ }
+ default:
+ return false;
+ }
+ return true;
+}
+
void iscsit_collect_login_stats(
struct iscsi_conn *conn,
u8 status_class,
@@ -1387,7 +1415,7 @@ void iscsit_collect_login_stats(
ls = &tiqn->login_stats;
spin_lock(&ls->lock);
- if (!strcmp(conn->login_ip, ls->last_intr_fail_ip_addr) &&
+ if (sockaddr_equal(&conn->login_sockaddr, &ls->last_intr_fail_sockaddr) &&
((get_jiffies_64() - ls->last_fail_time) < 10)) {
/* We already have the failure info for this login */
spin_unlock(&ls->lock);
@@ -1427,8 +1455,7 @@ void iscsit_collect_login_stats(
ls->last_intr_fail_ip_family = conn->login_family;
- snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE,
- "%s", conn->login_ip);
+ ls->last_intr_fail_sockaddr = conn->login_sockaddr;
ls->last_fail_time = get_jiffies_64();
}
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index a556bdebd775..5bc85ffed720 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -526,7 +526,7 @@ static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
{
/*
- * Return the passed NAA identifier for the SAS Target Port
+ * Return the passed NAA identifier for the Target Port
*/
return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
}
@@ -845,7 +845,7 @@ static int tcm_loop_make_nexus(
transport_free_session(tl_nexus->se_sess);
goto out;
}
- /* Now, register the SAS I_T Nexus as active. */
+ /* Now, register the I_T Nexus as active. */
transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
tl_nexus->se_sess, tl_nexus);
tl_tpg->tl_nexus = tl_nexus;
@@ -884,7 +884,7 @@ static int tcm_loop_drop_nexus(
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
tl_nexus->se_sess->se_node_acl->initiatorname);
/*
- * Release the SCSI I_T Nexus to the emulated SAS Target Port
+ * Release the SCSI I_T Nexus to the emulated Target Port
*/
transport_deregister_session(tl_nexus->se_sess);
tpg->tl_nexus = NULL;
@@ -1034,6 +1034,11 @@ static ssize_t tcm_loop_tpg_store_transport_status(
}
if (!strncmp(page, "offline", 7)) {
tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
+ if (tl_tpg->tl_nexus) {
+ struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
+
+ core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
+ }
return count;
}
return -EINVAL;
@@ -1077,7 +1082,7 @@ static struct se_portal_group *tcm_loop_make_naa_tpg(
tl_tpg->tl_hba = tl_hba;
tl_tpg->tl_tpgt = tpgt;
/*
- * Register the tl_tpg as a emulated SAS TCM Target Endpoint
+ * Register the tl_tpg as a emulated TCM Target Endpoint
*/
ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
if (ret < 0)
@@ -1102,11 +1107,11 @@ static void tcm_loop_drop_naa_tpg(
tl_hba = tl_tpg->tl_hba;
tpgt = tl_tpg->tl_tpgt;
/*
- * Release the I_T Nexus for the Virtual SAS link if present
+ * Release the I_T Nexus for the Virtual target link if present
*/
tcm_loop_drop_nexus(tl_tpg);
/*
- * Deregister the tl_tpg as a emulated SAS TCM Target Endpoint
+ * Deregister the tl_tpg as a emulated TCM Target Endpoint
*/
core_tpg_deregister(se_tpg);
@@ -1199,8 +1204,9 @@ static void tcm_loop_drop_scsi_hba(
struct tcm_loop_hba, tl_hba_wwn);
pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
- " SAS Address: %s at Linux/SCSI Host ID: %d\n",
- tl_hba->tl_wwn_address, tl_hba->sh->host_no);
+ " %s Address: %s at Linux/SCSI Host ID: %d\n",
+ tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
+ tl_hba->sh->host_no);
/*
* Call device_unregister() on the original tl_hba->dev.
* tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 0b0de3647478..860e84046177 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -457,8 +457,15 @@ void target_unregister_template(const struct target_core_fabric_ops *fo)
if (!strcmp(t->tf_ops->name, fo->name)) {
BUG_ON(atomic_read(&t->tf_access_cnt));
list_del(&t->tf_list);
+ mutex_unlock(&g_tf_lock);
+ /*
+ * Wait for any outstanding fabric se_deve_entry->rcu_head
+ * callbacks to complete post kfree_rcu(), before allowing
+ * fabric driver unload of TFO->module to proceed.
+ */
+ rcu_barrier();
kfree(t);
- break;
+ return;
}
}
mutex_unlock(&g_tf_lock);
@@ -747,7 +754,7 @@ static ssize_t store_pi_prot_type(struct se_dev_attrib *da,
if (!dev->transport->init_prot || !dev->transport->free_prot) {
/* 0 is only allowed value for non-supporting backends */
if (flag == 0)
- return 0;
+ return count;
pr_err("DIF protection not supported by backend: %s\n",
dev->transport->name);
@@ -1590,9 +1597,9 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
u8 type = 0;
if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
- return 0;
+ return count;
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
- return 0;
+ return count;
if (dev->export_count) {
pr_debug("Unable to process APTPL metadata while"
@@ -1658,22 +1665,32 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
* PR APTPL Metadata for Reservation
*/
case Opt_res_holder:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
res_holder = arg;
break;
case Opt_res_type:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
type = (u8)arg;
break;
case Opt_res_scope:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
break;
case Opt_res_all_tg_pt:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
all_tg_pt = (int)arg;
break;
case Opt_mapped_lun:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
mapped_lun = (u64)arg;
break;
/*
@@ -1701,14 +1718,20 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
}
break;
case Opt_tpgt:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
tpgt = (u16)arg;
break;
case Opt_port_rtpi:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
break;
case Opt_target_lun:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
target_lun = (u64)arg;
break;
default:
@@ -1985,7 +2008,7 @@ static ssize_t target_core_store_alua_lu_gp(
lu_gp_mem = dev->dev_alua_lu_gp_mem;
if (!lu_gp_mem)
- return 0;
+ return count;
if (count > LU_GROUP_NAME_BUF) {
pr_err("ALUA LU Group Alias too large!\n");
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 09e682b1c549..dcc424ac35d4 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -620,8 +620,6 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
lacl->mapped_lun = mapped_lun;
lacl->se_lun_nacl = nacl;
- snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
- nacl->initiatorname);
return lacl;
}
@@ -656,7 +654,7 @@ int core_dev_add_initiator_node_lun_acl(
" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
- lacl->initiatorname);
+ nacl->initiatorname);
/*
* Check to see if there are any existing persistent reservation APTPL
* pre-registrations that need to be enabled for this LUN ACL..
@@ -688,7 +686,7 @@ int core_dev_del_initiator_node_lun_acl(
" InitiatorNode: %s Mapped LUN: %llu\n",
tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
- lacl->initiatorname, lacl->mapped_lun);
+ nacl->initiatorname, lacl->mapped_lun);
return 0;
}
@@ -701,7 +699,7 @@ void core_dev_free_initiator_node_lun_acl(
" Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg),
tpg->se_tpg_tfo->get_fabric_name(),
- lacl->initiatorname, lacl->mapped_lun);
+ lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
kfree(lacl);
}
@@ -754,7 +752,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_link_magic = SE_DEV_LINK_MAGIC;
dev->se_hba = hba;
dev->transport = hba->backend->ops;
- dev->prot_length = sizeof(struct se_dif_v1_tuple);
+ dev->prot_length = sizeof(struct t10_pi_tuple);
dev->hba_index = hba->hba_index;
INIT_LIST_HEAD(&dev->dev_list);
@@ -771,7 +769,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
sema_init(&dev->caw_sem, 1);
- atomic_set(&dev->dev_ordered_id, 0);
INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&dev->t10_pr.registration_list);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 48a36989c1a6..be42429468e2 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -203,7 +203,7 @@ static ssize_t target_fabric_mappedlun_store_write_protect(
pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
" Mapped LUN: %llu Write Protect bit to %s\n",
se_tpg->se_tpg_tfo->get_fabric_name(),
- lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
+ se_nacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
return count;
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 62ea4e8e70a8..9522960c7fdd 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -84,8 +84,16 @@ void target_backend_unregister(const struct target_backend_ops *ops)
list_for_each_entry(tb, &backend_list, list) {
if (tb->ops == ops) {
list_del(&tb->list);
+ mutex_unlock(&backend_mutex);
+ /*
+ * Wait for any outstanding backend driver ->rcu_head
+ * callbacks to complete post TBO->free_device() ->
+ * call_rcu(), before allowing backend driver module
+ * unload of target_backend_ops->owner to proceed.
+ */
+ rcu_barrier();
kfree(tb);
- break;
+ return;
}
}
mutex_unlock(&backend_mutex);
@@ -176,3 +184,8 @@ core_delete_hba(struct se_hba *hba)
kfree(hba);
return 0;
}
+
+bool target_sense_desc_format(struct se_device *dev)
+{
+ return dev->transport->get_blocks(dev) > U32_MAX;
+}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 6d88d24e6cce..5a9982f5d5d6 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -306,20 +306,13 @@ static void iblock_complete_cmd(struct se_cmd *cmd)
kfree(ibr);
}
-static void iblock_bio_done(struct bio *bio, int err)
+static void iblock_bio_done(struct bio *bio)
{
struct se_cmd *cmd = bio->bi_private;
struct iblock_req *ibr = cmd->priv;
- /*
- * Set -EIO if !BIO_UPTODATE and the passed is still err=0
- */
- if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
- err = -EIO;
-
- if (err != 0) {
- pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
- " err: %d\n", bio, err);
+ if (bio->bi_error) {
+ pr_err("bio error: %p, err: %d\n", bio, bio->bi_error);
/*
* Bump the ib_bio_err_cnt and release bio.
*/
@@ -370,15 +363,15 @@ static void iblock_submit_bios(struct bio_list *list, int rw)
blk_finish_plug(&plug);
}
-static void iblock_end_io_flush(struct bio *bio, int err)
+static void iblock_end_io_flush(struct bio *bio)
{
struct se_cmd *cmd = bio->bi_private;
- if (err)
- pr_err("IBLOCK: cache flush failed: %d\n", err);
+ if (bio->bi_error)
+ pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_error);
if (cmd) {
- if (err)
+ if (bio->bi_error)
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
else
target_complete_cmd(cmd, SAM_STAT_GOOD);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 0fdbe43b7dad..5ab7100de17e 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1474,7 +1474,7 @@ core_scsi3_decode_spec_i_port(
LIST_HEAD(tid_dest_list);
struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
unsigned char *buf, *ptr, proto_ident;
- const unsigned char *i_str;
+ const unsigned char *i_str = NULL;
char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
sense_reason_t ret;
u32 tpdl, tid_len = 0;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 08e9084ee615..de18790eb21c 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -852,7 +852,7 @@ static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
return bl;
}
-static void pscsi_bi_endio(struct bio *bio, int error)
+static void pscsi_bi_endio(struct bio *bio)
{
bio_put(bio);
}
@@ -973,7 +973,7 @@ fail:
while (*hbio) {
bio = *hbio;
*hbio = (*hbio)->bi_next;
- bio_endio(bio, 0); /* XXX: should be error */
+ bio_endio(bio);
}
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
@@ -1061,7 +1061,7 @@ fail_free_bio:
while (hbio) {
struct bio *bio = hbio;
hbio = hbio->bi_next;
- bio_endio(bio, 0); /* XXX: should be error */
+ bio_endio(bio);
}
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
fail:
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 4703f403f31c..47a833f3a145 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -138,16 +138,12 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *
sg_per_table = (total_sg_needed > max_sg_per_table) ?
max_sg_per_table : total_sg_needed;
-#ifdef CONFIG_ARCH_HAS_SG_CHAIN
-
/*
* Reserve extra element for chain entry
*/
if (sg_per_table < total_sg_needed)
chain_entry = 1;
-#endif /* CONFIG_ARCH_HAS_SG_CHAIN */
-
sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
GFP_KERNEL);
if (!sg) {
@@ -158,15 +154,11 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *
sg_init_table(sg, sg_per_table + chain_entry);
-#ifdef CONFIG_ARCH_HAS_SG_CHAIN
-
if (i > 0) {
sg_chain(sg_table[i - 1].sg_table,
max_sg_per_table + 1, sg);
}
-#endif /* CONFIG_ARCH_HAS_SG_CHAIN */
-
sg_table[i].sg_table = sg;
sg_table[i].rd_sg_count = sg_per_table;
sg_table[i].page_start_offset = page_offset;
@@ -333,6 +325,7 @@ static int rd_configure_device(struct se_device *dev)
dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
dev->dev_attrib.hw_max_sectors = UINT_MAX;
dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
+ dev->dev_attrib.is_nonrot = 1;
rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
@@ -429,42 +422,6 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
prot_sg = &prot_table->sg_table[prot_page -
prot_table->page_start_offset];
-#ifndef CONFIG_ARCH_HAS_SG_CHAIN
-
- prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length,
- PAGE_SIZE);
-
- /*
- * Allocate temporaly contiguous scatterlist entries if prot pages
- * straddles multiple scatterlist tables.
- */
- if (prot_table->page_end_offset < prot_page + prot_npages - 1) {
- int i;
-
- prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL);
- if (!prot_sg)
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-
- need_to_release = true;
- sg_init_table(prot_sg, prot_npages);
-
- for (i = 0; i < prot_npages; i++) {
- if (prot_page + i > prot_table->page_end_offset) {
- prot_table = rd_get_prot_table(dev,
- prot_page + i);
- if (!prot_table) {
- kfree(prot_sg);
- return rc;
- }
- sg_unmark_end(&prot_sg[i - 1]);
- }
- prot_sg[i] = prot_table->sg_table[prot_page + i -
- prot_table->page_start_offset];
- }
- }
-
-#endif /* !CONFIG_ARCH_HAS_SG_CHAIN */
-
if (is_read)
rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
prot_sg, prot_offset);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index e318ddbe15da..0b4b2a67d9f9 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -154,6 +154,38 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
return 0;
}
+static sense_reason_t
+sbc_emulate_startstop(struct se_cmd *cmd)
+{
+ unsigned char *cdb = cmd->t_task_cdb;
+
+ /*
+ * See sbc3r36 section 5.25
+ * Immediate bit should be set since there is nothing to complete
+ * POWER CONDITION MODIFIER 0h
+ */
+ if (!(cdb[1] & 1) || cdb[2] || cdb[3])
+ return TCM_INVALID_CDB_FIELD;
+
+ /*
+ * See sbc3r36 section 5.25
+ * POWER CONDITION 0h START_VALID - process START and LOEJ
+ */
+ if (cdb[4] >> 4 & 0xf)
+ return TCM_INVALID_CDB_FIELD;
+
+ /*
+ * See sbc3r36 section 5.25
+ * LOEJ 0h - nothing to load or unload
+ * START 1h - we are ready
+ */
+ if (!(cdb[4] & 1) || (cdb[4] & 2) || (cdb[4] & 4))
+ return TCM_INVALID_CDB_FIELD;
+
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
+}
+
sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
{
u32 num_blocks;
@@ -960,6 +992,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
" than 1\n", sectors);
return TCM_INVALID_CDB_FIELD;
}
+ if (sbc_check_dpofua(dev, cmd, cdb))
+ return TCM_INVALID_CDB_FIELD;
+
/*
* Double size because we have two buffers, note that
* zero is not an error..
@@ -1069,6 +1104,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
size = 0;
cmd->execute_cmd = sbc_emulate_noop;
break;
+ case START_STOP:
+ size = 0;
+ cmd->execute_cmd = sbc_emulate_startstop;
+ break;
default:
ret = spc_parse_cdb(cmd, &size);
if (ret)
@@ -1191,7 +1230,7 @@ void
sbc_dif_generate(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct se_dif_v1_tuple *sdt;
+ struct t10_pi_tuple *sdt;
struct scatterlist *dsg = cmd->t_data_sg, *psg;
sector_t sector = cmd->t_task_lba;
void *daddr, *paddr;
@@ -1203,7 +1242,7 @@ sbc_dif_generate(struct se_cmd *cmd)
daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
for (j = 0; j < psg->length;
- j += sizeof(struct se_dif_v1_tuple)) {
+ j += sizeof(*sdt)) {
__u16 crc;
unsigned int avail;
@@ -1256,7 +1295,7 @@ sbc_dif_generate(struct se_cmd *cmd)
}
static sense_reason_t
-sbc_dif_v1_verify(struct se_cmd *cmd, struct se_dif_v1_tuple *sdt,
+sbc_dif_v1_verify(struct se_cmd *cmd, struct t10_pi_tuple *sdt,
__u16 crc, sector_t sector, unsigned int ei_lba)
{
__be16 csum;
@@ -1346,7 +1385,7 @@ sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors,
unsigned int ei_lba, struct scatterlist *psg, int psg_off)
{
struct se_device *dev = cmd->se_dev;
- struct se_dif_v1_tuple *sdt;
+ struct t10_pi_tuple *sdt;
struct scatterlist *dsg = cmd->t_data_sg;
sector_t sector = start;
void *daddr, *paddr;
@@ -1361,7 +1400,7 @@ sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors,
for (i = psg_off; i < psg->length &&
sector < start + sectors;
- i += sizeof(struct se_dif_v1_tuple)) {
+ i += sizeof(*sdt)) {
__u16 crc;
unsigned int avail;
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index b0744433315a..9413e1a949e5 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -454,10 +454,17 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT)
buf[4] = 0x5;
else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT ||
- cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
+ cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
buf[4] = 0x4;
}
+ /* logical unit supports type 1 and type 3 protection */
+ if ((dev->transport->get_device_type(dev) == TYPE_DISK) &&
+ (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) &&
+ (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) {
+ buf[4] |= (0x3 << 3);
+ }
+
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
@@ -477,8 +484,8 @@ static sense_reason_t
spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
- int have_tp = 0;
- int opt, min;
+ u32 mtl = 0;
+ int have_tp = 0, opt, min;
/*
* Following spc3r22 section 6.5.3 Block Limits VPD page, when
@@ -509,8 +516,15 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set MAXIMUM TRANSFER LENGTH
+ *
+ * XXX: Currently assumes single PAGE_SIZE per scatterlist for fabrics
+ * enforcing maximum HW scatter-gather-list entry limit
*/
- put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
+ if (cmd->se_tfo->max_data_sg_nents) {
+ mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) /
+ dev->dev_attrib.block_size;
+ }
+ put_unaligned_be32(min_not_zero(mtl, dev->dev_attrib.hw_max_sectors), &buf[8]);
/*
* Set OPTIMAL TRANSFER LENGTH
@@ -761,7 +775,12 @@ static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
if (pc == 1)
goto out;
- p[2] = 2;
+ /* GLTSD: No implicit save of log parameters */
+ p[2] = (1 << 1);
+ if (target_sense_desc_format(dev))
+ /* D_SENSE: Descriptor format sense data for 64bit sectors */
+ p[2] |= (1 << 2);
+
/*
* From spc4r23, 7.4.7 Control mode page
*
@@ -1144,6 +1163,7 @@ static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
unsigned char *rbuf;
u8 ua_asc = 0, ua_ascq = 0;
unsigned char buf[SE_SENSE_BUF];
+ bool desc_format = target_sense_desc_format(cmd->se_dev);
memset(buf, 0, SE_SENSE_BUF);
@@ -1157,32 +1177,11 @@ static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
if (!rbuf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
- /*
- * CURRENT ERROR, UNIT ATTENTION
- */
- buf[0] = 0x70;
- buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
-
- /*
- * The Additional Sense Code (ASC) from the UNIT ATTENTION
- */
- buf[SPC_ASC_KEY_OFFSET] = ua_asc;
- buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
- buf[7] = 0x0A;
- } else {
- /*
- * CURRENT ERROR, NO SENSE
- */
- buf[0] = 0x70;
- buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
-
- /*
- * NO ADDITIONAL SENSE INFORMATION
- */
- buf[SPC_ASC_KEY_OFFSET] = 0x00;
- buf[7] = 0x0A;
- }
+ if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))
+ scsi_build_sense_buffer(desc_format, buf, UNIT_ATTENTION,
+ ua_asc, ua_ascq);
+ else
+ scsi_build_sense_buffer(desc_format, buf, NO_SENSE, 0x0, 0x0);
memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
transport_kunmap_data_sg(cmd);
@@ -1196,17 +1195,13 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
+ struct scsi_lun slun;
unsigned char *buf;
u32 lun_count = 0, offset = 8;
-
- if (cmd->data_length < 16) {
- pr_warn("REPORT LUNS allocation length %u too small\n",
- cmd->data_length);
- return TCM_INVALID_CDB_FIELD;
- }
+ __be32 len;
buf = transport_kmap_data_sg(cmd);
- if (!buf)
+ if (cmd->data_length && !buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
@@ -1214,11 +1209,9 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
* coming via a target_core_mod PASSTHROUGH op, and not through
* a $FABRIC_MOD. In that case, report LUN=0 only.
*/
- if (!sess) {
- int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
- lun_count = 1;
+ if (!sess)
goto done;
- }
+
nacl = sess->se_node_acl;
rcu_read_lock();
@@ -1229,10 +1222,12 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
* See SPC2-R20 7.19.
*/
lun_count++;
- if ((offset + 8) > cmd->data_length)
+ if (offset >= cmd->data_length)
continue;
- int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
+ int_to_scsilun(deve->mapped_lun, &slun);
+ memcpy(buf + offset, &slun,
+ min(8u, cmd->data_length - offset));
offset += 8;
}
rcu_read_unlock();
@@ -1241,12 +1236,22 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
* See SPC3 r07, page 159.
*/
done:
- lun_count *= 8;
- buf[0] = ((lun_count >> 24) & 0xff);
- buf[1] = ((lun_count >> 16) & 0xff);
- buf[2] = ((lun_count >> 8) & 0xff);
- buf[3] = (lun_count & 0xff);
- transport_kunmap_data_sg(cmd);
+ /*
+ * If no LUNs are accessible, report virtual LUN 0.
+ */
+ if (lun_count == 0) {
+ int_to_scsilun(0, &slun);
+ if (cmd->data_length > 8)
+ memcpy(buf + offset, &slun,
+ min(8u, cmd->data_length - offset));
+ lun_count = 1;
+ }
+
+ if (buf) {
+ len = cpu_to_be32(lun_count * 8);
+ memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
+ transport_kunmap_data_sg(cmd);
+ }
target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
return 0;
@@ -1405,9 +1410,6 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
}
break;
default:
- pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
- " 0x%02x, sending CHECK_CONDITION.\n",
- cmd->se_tfo->get_fabric_name(), cdb[0]);
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index babde4ad841f..2d0381dd105c 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -41,6 +41,7 @@
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
+#include "target_core_ua.h"
extern struct se_device *g_lun0_dev;
@@ -83,6 +84,22 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
}
EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
+void core_allocate_nexus_loss_ua(
+ struct se_node_acl *nacl)
+{
+ struct se_dev_entry *deve;
+
+ if (!nacl)
+ return;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
+ core_scsi3_ua_allocate(deve, 0x29,
+ ASCQ_29H_NEXUS_LOSS_OCCURRED);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
+
/* core_tpg_add_node_to_devs():
*
*
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index ce8574b7220c..5bacc7b5ed6d 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -39,6 +39,7 @@
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi_proto.h>
+#include <scsi/scsi_common.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
@@ -1074,6 +1075,55 @@ transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
}
EXPORT_SYMBOL(transport_set_vpd_ident);
+static sense_reason_t
+target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
+ unsigned int size)
+{
+ u32 mtl;
+
+ if (!cmd->se_tfo->max_data_sg_nents)
+ return TCM_NO_SENSE;
+ /*
+ * Check if fabric enforced maximum SGL entries per I/O descriptor
+ * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT +
+ * residual_count and reduce original cmd->data_length to maximum
+ * length based on single PAGE_SIZE entry scatter-lists.
+ */
+ mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
+ if (cmd->data_length > mtl) {
+ /*
+ * If an existing CDB overflow is present, calculate new residual
+ * based on CDB size minus fabric maximum transfer length.
+ *
+ * If an existing CDB underflow is present, calculate new residual
+ * based on original cmd->data_length minus fabric maximum transfer
+ * length.
+ *
+ * Otherwise, set the underflow residual based on cmd->data_length
+ * minus fabric maximum transfer length.
+ */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ cmd->residual_count = (size - mtl);
+ } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ u32 orig_dl = size + cmd->residual_count;
+ cmd->residual_count = (orig_dl - mtl);
+ } else {
+ cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+ cmd->residual_count = (cmd->data_length - mtl);
+ }
+ cmd->data_length = mtl;
+ /*
+ * Reset sbc_check_prot() calculated protection payload
+ * length based upon the new smaller MTL.
+ */
+ if (cmd->prot_length) {
+ u32 sectors = (mtl / dev->dev_attrib.block_size);
+ cmd->prot_length = dev->prot_length * sectors;
+ }
+ }
+ return TCM_NO_SENSE;
+}
+
sense_reason_t
target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
{
@@ -1087,9 +1137,9 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
cmd->data_length, size, cmd->t_task_cdb[0]);
- if (cmd->data_direction == DMA_TO_DEVICE) {
- pr_err("Rejecting underflow/overflow"
- " WRITE data\n");
+ if (cmd->data_direction == DMA_TO_DEVICE &&
+ cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+ pr_err("Rejecting underflow/overflow WRITE data\n");
return TCM_INVALID_CDB_FIELD;
}
/*
@@ -1119,7 +1169,7 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
}
}
- return 0;
+ return target_check_max_data_sg_nents(cmd, dev, size);
}
@@ -1177,14 +1227,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
" emulation is not supported\n");
return TCM_INVALID_CDB_FIELD;
}
- /*
- * Used to determine when ORDERED commands should go from
- * Dormant to Active status.
- */
- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
- pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
- cmd->se_ordered_id, cmd->sam_task_attr,
- dev->transport->name);
+
return 0;
}
@@ -1246,6 +1289,11 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
}
ret = dev->transport->parse_cdb(cmd);
+ if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
+ pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
+ cmd->se_tfo->get_fabric_name(),
+ cmd->se_sess->se_node_acl->initiatorname,
+ cmd->t_task_cdb[0]);
if (ret)
return ret;
@@ -1693,8 +1741,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
check_stop:
transport_lun_remove_cmd(cmd);
- if (!transport_cmd_check_stop_to_fabric(cmd))
- ;
+ transport_cmd_check_stop_to_fabric(cmd);
return;
queue_full:
@@ -1767,16 +1814,14 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
*/
switch (cmd->sam_task_attr) {
case TCM_HEAD_TAG:
- pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
- "se_ordered_id: %u\n",
- cmd->t_task_cdb[0], cmd->se_ordered_id);
+ pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
+ cmd->t_task_cdb[0]);
return false;
case TCM_ORDERED_TAG:
atomic_inc_mb(&dev->dev_ordered_sync);
- pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
- " se_ordered_id: %u\n",
- cmd->t_task_cdb[0], cmd->se_ordered_id);
+ pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
+ cmd->t_task_cdb[0]);
/*
* Execute an ORDERED command if no other older commands
@@ -1800,10 +1845,8 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
spin_unlock(&dev->delayed_cmd_lock);
- pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
- " delayed CMD list, se_ordered_id: %u\n",
- cmd->t_task_cdb[0], cmd->sam_task_attr,
- cmd->se_ordered_id);
+ pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
+ cmd->t_task_cdb[0], cmd->sam_task_attr);
return true;
}
@@ -1888,20 +1931,18 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
atomic_dec_mb(&dev->simple_cmds);
dev->dev_cur_ordered_id++;
- pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
- " SIMPLE: %u\n", dev->dev_cur_ordered_id,
- cmd->se_ordered_id);
+ pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n",
+ dev->dev_cur_ordered_id);
} else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
dev->dev_cur_ordered_id++;
- pr_debug("Incremented dev_cur_ordered_id: %u for"
- " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
- cmd->se_ordered_id);
+ pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
+ dev->dev_cur_ordered_id);
} else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
atomic_dec_mb(&dev->dev_ordered_sync);
dev->dev_cur_ordered_id++;
- pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
- " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
+ pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
+ dev->dev_cur_ordered_id);
}
target_restart_delayed_cmds(dev);
@@ -2615,37 +2656,159 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
}
EXPORT_SYMBOL(transport_wait_for_tasks);
-static int transport_get_sense_codes(
- struct se_cmd *cmd,
- u8 *asc,
- u8 *ascq)
+struct sense_info {
+ u8 key;
+ u8 asc;
+ u8 ascq;
+ bool add_sector_info;
+};
+
+static const struct sense_info sense_info_table[] = {
+ [TCM_NO_SENSE] = {
+ .key = NOT_READY
+ },
+ [TCM_NON_EXISTENT_LUN] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */
+ },
+ [TCM_UNSUPPORTED_SCSI_OPCODE] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
+ },
+ [TCM_SECTOR_COUNT_TOO_MANY] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
+ },
+ [TCM_UNKNOWN_MODE_PAGE] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x24, /* INVALID FIELD IN CDB */
+ },
+ [TCM_CHECK_CONDITION_ABORT_CMD] = {
+ .key = ABORTED_COMMAND,
+ .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */
+ .ascq = 0x03,
+ },
+ [TCM_INCORRECT_AMOUNT_OF_DATA] = {
+ .key = ABORTED_COMMAND,
+ .asc = 0x0c, /* WRITE ERROR */
+ .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */
+ },
+ [TCM_INVALID_CDB_FIELD] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x24, /* INVALID FIELD IN CDB */
+ },
+ [TCM_INVALID_PARAMETER_LIST] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
+ },
+ [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
+ },
+ [TCM_UNEXPECTED_UNSOLICITED_DATA] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x0c, /* WRITE ERROR */
+ .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */
+ },
+ [TCM_SERVICE_CRC_ERROR] = {
+ .key = ABORTED_COMMAND,
+ .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */
+ .ascq = 0x05, /* N/A */
+ },
+ [TCM_SNACK_REJECTED] = {
+ .key = ABORTED_COMMAND,
+ .asc = 0x11, /* READ ERROR */
+ .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */
+ },
+ [TCM_WRITE_PROTECTED] = {
+ .key = DATA_PROTECT,
+ .asc = 0x27, /* WRITE PROTECTED */
+ },
+ [TCM_ADDRESS_OUT_OF_RANGE] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
+ },
+ [TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
+ .key = UNIT_ATTENTION,
+ },
+ [TCM_CHECK_CONDITION_NOT_READY] = {
+ .key = NOT_READY,
+ },
+ [TCM_MISCOMPARE_VERIFY] = {
+ .key = MISCOMPARE,
+ .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
+ .ascq = 0x00,
+ },
+ [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
+ .key = ABORTED_COMMAND,
+ .asc = 0x10,
+ .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
+ .add_sector_info = true,
+ },
+ [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
+ .key = ABORTED_COMMAND,
+ .asc = 0x10,
+ .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
+ .add_sector_info = true,
+ },
+ [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
+ .key = ABORTED_COMMAND,
+ .asc = 0x10,
+ .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
+ .add_sector_info = true,
+ },
+ [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
+ /*
+ * Returning ILLEGAL REQUEST would cause immediate IO errors on
+ * Solaris initiators. Returning NOT READY instead means the
+ * operations will be retried a finite number of times and we
+ * can survive intermittent errors.
+ */
+ .key = NOT_READY,
+ .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
+ },
+};
+
+static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
{
- *asc = cmd->scsi_asc;
- *ascq = cmd->scsi_ascq;
+ const struct sense_info *si;
+ u8 *buffer = cmd->sense_buffer;
+ int r = (__force int)reason;
+ u8 asc, ascq;
+ bool desc_format = target_sense_desc_format(cmd->se_dev);
- return 0;
-}
+ if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
+ si = &sense_info_table[r];
+ else
+ si = &sense_info_table[(__force int)
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
-static
-void transport_err_sector_info(unsigned char *buffer, sector_t bad_sector)
-{
- /* Place failed LBA in sense data information descriptor 0. */
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 0xc;
- buffer[SPC_DESC_TYPE_OFFSET] = 0; /* Information */
- buffer[SPC_ADDITIONAL_DESC_LEN_OFFSET] = 0xa;
- buffer[SPC_VALIDITY_OFFSET] = 0x80;
+ if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
+ core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
+ WARN_ON_ONCE(asc == 0);
+ } else if (si->asc == 0) {
+ WARN_ON_ONCE(cmd->scsi_asc == 0);
+ asc = cmd->scsi_asc;
+ ascq = cmd->scsi_ascq;
+ } else {
+ asc = si->asc;
+ ascq = si->ascq;
+ }
+
+ scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq);
+ if (si->add_sector_info)
+ return scsi_set_sense_information(buffer,
+ cmd->scsi_sense_length,
+ cmd->bad_sector);
- /* Descriptor Information: failing sector */
- put_unaligned_be64(bad_sector, &buffer[12]);
+ return 0;
}
int
transport_send_check_condition_and_sense(struct se_cmd *cmd,
sense_reason_t reason, int from_transport)
{
- unsigned char *buffer = cmd->sense_buffer;
unsigned long flags;
- u8 asc = 0, ascq = 0;
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
@@ -2655,243 +2818,17 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- if (!reason && from_transport)
- goto after_reason;
+ if (!from_transport) {
+ int rc;
- if (!from_transport)
cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
-
- /*
- * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
- * SENSE KEY values from include/scsi/scsi.h
- */
- switch (reason) {
- case TCM_NO_SENSE:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* Not Ready */
- buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
- /* NO ADDITIONAL SENSE INFORMATION */
- buffer[SPC_ASC_KEY_OFFSET] = 0;
- buffer[SPC_ASCQ_KEY_OFFSET] = 0;
- break;
- case TCM_NON_EXISTENT_LUN:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ILLEGAL REQUEST */
- buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
- /* LOGICAL UNIT NOT SUPPORTED */
- buffer[SPC_ASC_KEY_OFFSET] = 0x25;
- break;
- case TCM_UNSUPPORTED_SCSI_OPCODE:
- case TCM_SECTOR_COUNT_TOO_MANY:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ILLEGAL REQUEST */
- buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
- /* INVALID COMMAND OPERATION CODE */
- buffer[SPC_ASC_KEY_OFFSET] = 0x20;
- break;
- case TCM_UNKNOWN_MODE_PAGE:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ILLEGAL REQUEST */
- buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
- /* INVALID FIELD IN CDB */
- buffer[SPC_ASC_KEY_OFFSET] = 0x24;
- break;
- case TCM_CHECK_CONDITION_ABORT_CMD:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ABORTED COMMAND */
- buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
- /* BUS DEVICE RESET FUNCTION OCCURRED */
- buffer[SPC_ASC_KEY_OFFSET] = 0x29;
- buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
- break;
- case TCM_INCORRECT_AMOUNT_OF_DATA:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ABORTED COMMAND */
- buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
- /* WRITE ERROR */
- buffer[SPC_ASC_KEY_OFFSET] = 0x0c;
- /* NOT ENOUGH UNSOLICITED DATA */
- buffer[SPC_ASCQ_KEY_OFFSET] = 0x0d;
- break;
- case TCM_INVALID_CDB_FIELD:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ILLEGAL REQUEST */
- buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
- /* INVALID FIELD IN CDB */
- buffer[SPC_ASC_KEY_OFFSET] = 0x24;
- break;
- case TCM_INVALID_PARAMETER_LIST:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ILLEGAL REQUEST */
- buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
- /* INVALID FIELD IN PARAMETER LIST */
- buffer[SPC_ASC_KEY_OFFSET] = 0x26;
- break;
- case TCM_PARAMETER_LIST_LENGTH_ERROR:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ILLEGAL REQUEST */
- buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
- /* PARAMETER LIST LENGTH ERROR */
- buffer[SPC_ASC_KEY_OFFSET] = 0x1a;
- break;
- case TCM_UNEXPECTED_UNSOLICITED_DATA:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ABORTED COMMAND */
- buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
- /* WRITE ERROR */
- buffer[SPC_ASC_KEY_OFFSET] = 0x0c;
- /* UNEXPECTED_UNSOLICITED_DATA */
- buffer[SPC_ASCQ_KEY_OFFSET] = 0x0c;
- break;
- case TCM_SERVICE_CRC_ERROR:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ABORTED COMMAND */
- buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
- /* PROTOCOL SERVICE CRC ERROR */
- buffer[SPC_ASC_KEY_OFFSET] = 0x47;
- /* N/A */
- buffer[SPC_ASCQ_KEY_OFFSET] = 0x05;
- break;
- case TCM_SNACK_REJECTED:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ABORTED COMMAND */
- buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
- /* READ ERROR */
- buffer[SPC_ASC_KEY_OFFSET] = 0x11;
- /* FAILED RETRANSMISSION REQUEST */
- buffer[SPC_ASCQ_KEY_OFFSET] = 0x13;
- break;
- case TCM_WRITE_PROTECTED:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* DATA PROTECT */
- buffer[SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
- /* WRITE PROTECTED */
- buffer[SPC_ASC_KEY_OFFSET] = 0x27;
- break;
- case TCM_ADDRESS_OUT_OF_RANGE:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ILLEGAL REQUEST */
- buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
- /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
- buffer[SPC_ASC_KEY_OFFSET] = 0x21;
- break;
- case TCM_CHECK_CONDITION_UNIT_ATTENTION:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* UNIT ATTENTION */
- buffer[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
- core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
- buffer[SPC_ASC_KEY_OFFSET] = asc;
- buffer[SPC_ASCQ_KEY_OFFSET] = ascq;
- break;
- case TCM_CHECK_CONDITION_NOT_READY:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* Not Ready */
- buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
- transport_get_sense_codes(cmd, &asc, &ascq);
- buffer[SPC_ASC_KEY_OFFSET] = asc;
- buffer[SPC_ASCQ_KEY_OFFSET] = ascq;
- break;
- case TCM_MISCOMPARE_VERIFY:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- buffer[SPC_SENSE_KEY_OFFSET] = MISCOMPARE;
- /* MISCOMPARE DURING VERIFY OPERATION */
- buffer[SPC_ASC_KEY_OFFSET] = 0x1d;
- buffer[SPC_ASCQ_KEY_OFFSET] = 0x00;
- break;
- case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ILLEGAL REQUEST */
- buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
- /* LOGICAL BLOCK GUARD CHECK FAILED */
- buffer[SPC_ASC_KEY_OFFSET] = 0x10;
- buffer[SPC_ASCQ_KEY_OFFSET] = 0x01;
- transport_err_sector_info(buffer, cmd->bad_sector);
- break;
- case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ILLEGAL REQUEST */
- buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
- /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
- buffer[SPC_ASC_KEY_OFFSET] = 0x10;
- buffer[SPC_ASCQ_KEY_OFFSET] = 0x02;
- transport_err_sector_info(buffer, cmd->bad_sector);
- break;
- case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ILLEGAL REQUEST */
- buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
- /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
- buffer[SPC_ASC_KEY_OFFSET] = 0x10;
- buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
- transport_err_sector_info(buffer, cmd->bad_sector);
- break;
- case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
- default:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /*
- * Returning ILLEGAL REQUEST would cause immediate IO errors on
- * Solaris initiators. Returning NOT READY instead means the
- * operations will be retried a finite number of times and we
- * can survive intermittent errors.
- */
- buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
- /* LOGICAL UNIT COMMUNICATION FAILURE */
- buffer[SPC_ASC_KEY_OFFSET] = 0x08;
- break;
+ cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
+ rc = translate_sense_reason(cmd, reason);
+ if (rc)
+ return rc;
}
- /*
- * This code uses linux/include/scsi/scsi.h SAM status codes!
- */
- cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
- /*
- * Automatically padded, this value is encoded in the fabric's
- * data_length response PDU containing the SCSI defined sense data.
- */
- cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
-after_reason:
trace_target_cmd_complete(cmd);
return cmd->se_tfo->queue_status(cmd);
}
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index c448ef421ce7..937cebf76633 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -25,6 +25,7 @@
#include <linux/parser.h>
#include <linux/vmalloc.h>
#include <linux/uio_driver.h>
+#include <linux/stringify.h>
#include <net/genetlink.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
@@ -538,14 +539,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
cmd->se_cmd);
- transport_generic_request_failure(cmd->se_cmd,
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
- cmd->se_cmd = NULL;
- kmem_cache_free(tcmu_cmd_cache, cmd);
- return;
- }
-
- if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
+ entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
+ } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
se_cmd->scsi_sense_length);
@@ -577,7 +572,6 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
{
struct tcmu_mailbox *mb;
- LIST_HEAD(cpl_cmds);
unsigned long flags;
int handled = 0;
@@ -905,7 +899,7 @@ static int tcmu_configure_device(struct se_device *dev)
WARN_ON(!PAGE_ALIGNED(udev->data_off));
WARN_ON(udev->data_size % PAGE_SIZE);
- info->version = xstr(TCMU_MAILBOX_VERSION);
+ info->version = __stringify(TCMU_MAILBOX_VERSION);
info->mem[0].name = "tcm-user command & data buffer";
info->mem[0].addr = (phys_addr_t) udev->mb_addr;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 4515f52546f8..47fe94ee10b8 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -450,6 +450,8 @@ int target_xcopy_setup_pt(void)
memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
INIT_LIST_HEAD(&xcopy_pt_sess.sess_list);
INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list);
+ INIT_LIST_HEAD(&xcopy_pt_sess.sess_cmd_list);
+ spin_lock_init(&xcopy_pt_sess.sess_cmd_lock);
xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
@@ -644,7 +646,7 @@ static int target_xcopy_read_source(
pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
(unsigned long long)src_lba, src_sectors, length);
- transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
+ transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
xop->src_pt_cmd = xpt_cmd;
@@ -704,7 +706,7 @@ static int target_xcopy_write_destination(
pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
(unsigned long long)dst_lba, dst_sectors, length);
- transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
+ transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
xop->dst_pt_cmd = xpt_cmd;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 68031723e5be..aa3caca8bace 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -255,7 +255,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
struct ft_cmd *cmd = arg;
struct fc_frame_header *fh;
- if (unlikely(IS_ERR(fp))) {
+ if (IS_ERR(fp)) {
/* XXX need to find cmd if queued */
cmd->seq = NULL;
cmd->aborted = true;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 118938ee8552..039004400987 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -340,6 +340,14 @@ config ACPI_THERMAL_REL
tristate
depends on ACPI
+config INTEL_PCH_THERMAL
+ tristate "Intel PCH Thermal Reporting Driver"
+ depends on X86 && PCI
+ help
+ Enable this to support thermal reporting on certain intel PCHs.
+ Thermal reporting device will provide temperature reading,
+ programmable trip points and other information.
+
menu "Texas Instruments thermal drivers"
source "drivers/thermal/ti-soc-thermal/Kconfig"
endmenu
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 535dfee1496f..26f160809959 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o
obj-$(CONFIG_INTEL_QUARK_DTS_THERMAL) += intel_quark_dts_thermal.o
obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
+obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
obj-$(CONFIG_ST_THERMAL) += st/
obj-$(CONFIG_TEGRA_SOCTHERM) += tegra_soctherm.o
obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 01255fd65135..26b8d326546a 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -155,7 +155,7 @@ static bool armada_is_valid(struct armada_thermal_priv *priv)
}
static int armada_get_temp(struct thermal_zone_device *thermal,
- unsigned long *temp)
+ int *temp)
{
struct armada_thermal_priv *priv = thermal->devdata;
unsigned long reg;
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 6509c61b9648..620dcd405ff6 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -68,7 +68,7 @@ struct power_table {
* registered cooling device.
* @cpufreq_state: integer value representing the current state of cpufreq
* cooling devices.
- * @cpufreq_val: integer value representing the absolute value of the clipped
+ * @clipped_freq: integer value representing the absolute value of the clipped
* frequency.
* @max_level: maximum cooling level. One less than total number of valid
* cpufreq frequencies.
@@ -91,7 +91,7 @@ struct cpufreq_cooling_device {
int id;
struct thermal_cooling_device *cool_dev;
unsigned int cpufreq_state;
- unsigned int cpufreq_val;
+ unsigned int clipped_freq;
unsigned int max_level;
unsigned int *freq_table; /* In descending order */
struct cpumask allowed_cpus;
@@ -107,6 +107,9 @@ struct cpufreq_cooling_device {
static DEFINE_IDR(cpufreq_idr);
static DEFINE_MUTEX(cooling_cpufreq_lock);
+static unsigned int cpufreq_dev_count;
+
+static DEFINE_MUTEX(cooling_list_lock);
static LIST_HEAD(cpufreq_dev_list);
/**
@@ -185,14 +188,14 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
{
struct cpufreq_cooling_device *cpufreq_dev;
- mutex_lock(&cooling_cpufreq_lock);
+ mutex_lock(&cooling_list_lock);
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
- mutex_unlock(&cooling_cpufreq_lock);
+ mutex_unlock(&cooling_list_lock);
return get_level(cpufreq_dev, freq);
}
}
- mutex_unlock(&cooling_cpufreq_lock);
+ mutex_unlock(&cooling_list_lock);
pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
return THERMAL_CSTATE_INVALID;
@@ -215,29 +218,35 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
- unsigned long max_freq = 0;
+ unsigned long clipped_freq;
struct cpufreq_cooling_device *cpufreq_dev;
- switch (event) {
+ if (event != CPUFREQ_ADJUST)
+ return NOTIFY_DONE;
- case CPUFREQ_ADJUST:
- mutex_lock(&cooling_cpufreq_lock);
- list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
- if (!cpumask_test_cpu(policy->cpu,
- &cpufreq_dev->allowed_cpus))
- continue;
+ mutex_lock(&cooling_list_lock);
+ list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+ if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
+ continue;
- max_freq = cpufreq_dev->cpufreq_val;
+ /*
+ * policy->max is the maximum allowed frequency defined by user
+ * and clipped_freq is the maximum that thermal constraints
+ * allow.
+ *
+ * If clipped_freq is lower than policy->max, then we need to
+ * readjust policy->max.
+ *
+ * But, if clipped_freq is greater than policy->max, we don't
+ * need to do anything.
+ */
+ clipped_freq = cpufreq_dev->clipped_freq;
- if (policy->max != max_freq)
- cpufreq_verify_within_limits(policy, 0,
- max_freq);
- }
- mutex_unlock(&cooling_cpufreq_lock);
+ if (policy->max > clipped_freq)
+ cpufreq_verify_within_limits(policy, 0, clipped_freq);
break;
- default:
- return NOTIFY_DONE;
}
+ mutex_unlock(&cooling_list_lock);
return NOTIFY_OK;
}
@@ -519,7 +528,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
clip_freq = cpufreq_device->freq_table[state];
cpufreq_device->cpufreq_state = state;
- cpufreq_device->cpufreq_val = clip_freq;
+ cpufreq_device->clipped_freq = clip_freq;
cpufreq_update_policy(cpu);
@@ -861,17 +870,19 @@ __cpufreq_cooling_register(struct device_node *np,
pr_debug("%s: freq:%u KHz\n", __func__, freq);
}
- cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
+ cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
cpufreq_dev->cool_dev = cool_dev;
mutex_lock(&cooling_cpufreq_lock);
+ mutex_lock(&cooling_list_lock);
+ list_add(&cpufreq_dev->node, &cpufreq_dev_list);
+ mutex_unlock(&cooling_list_lock);
+
/* Register the notifier for first cpufreq cooling device */
- if (list_empty(&cpufreq_dev_list))
+ if (!cpufreq_dev_count++)
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
- list_add(&cpufreq_dev->node, &cpufreq_dev_list);
-
mutex_unlock(&cooling_cpufreq_lock);
return cool_dev;
@@ -1013,13 +1024,17 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
return;
cpufreq_dev = cdev->devdata;
- mutex_lock(&cooling_cpufreq_lock);
- list_del(&cpufreq_dev->node);
/* Unregister the notifier for the last cpufreq cooling device */
- if (list_empty(&cpufreq_dev_list))
+ mutex_lock(&cooling_cpufreq_lock);
+ if (!--cpufreq_dev_count)
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
+
+ mutex_lock(&cooling_list_lock);
+ list_del(&cpufreq_dev->node);
+ mutex_unlock(&cooling_list_lock);
+
mutex_unlock(&cooling_cpufreq_lock);
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index 2fb273c4baa9..652acd8fbe48 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -107,8 +107,7 @@ static int db8500_cdev_unbind(struct thermal_zone_device *thermal,
}
/* Callback to get current temperature */
-static int db8500_sys_get_temp(struct thermal_zone_device *thermal,
- unsigned long *temp)
+static int db8500_sys_get_temp(struct thermal_zone_device *thermal, int *temp)
{
struct db8500_thermal_zone *pzone = thermal->devdata;
@@ -180,7 +179,7 @@ static int db8500_sys_get_trip_type(struct thermal_zone_device *thermal,
/* Callback to get trip point temperature */
static int db8500_sys_get_trip_temp(struct thermal_zone_device *thermal,
- int trip, unsigned long *temp)
+ int trip, int *temp)
{
struct db8500_thermal_zone *pzone = thermal->devdata;
struct db8500_thsens_platform_data *ptrips = pzone->trip_tab;
@@ -195,7 +194,7 @@ static int db8500_sys_get_trip_temp(struct thermal_zone_device *thermal,
/* Callback to get critical trip point temperature */
static int db8500_sys_get_crit_temp(struct thermal_zone_device *thermal,
- unsigned long *temp)
+ int *temp)
{
struct db8500_thermal_zone *pzone = thermal->devdata;
struct db8500_thsens_platform_data *ptrips = pzone->trip_tab;
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c
index 09f6e304c274..a0bc9de42553 100644
--- a/drivers/thermal/dove_thermal.c
+++ b/drivers/thermal/dove_thermal.c
@@ -93,7 +93,7 @@ static int dove_init_sensor(const struct dove_thermal_priv *priv)
}
static int dove_get_temp(struct thermal_zone_device *thermal,
- unsigned long *temp)
+ int *temp)
{
unsigned long reg;
struct dove_thermal_priv *priv = thermal->devdata;
diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c
index c2c10bbe24d6..34fe36504a55 100644
--- a/drivers/thermal/fair_share.c
+++ b/drivers/thermal/fair_share.c
@@ -34,7 +34,7 @@
static int get_trip_level(struct thermal_zone_device *tz)
{
int count = 0;
- unsigned long trip_temp;
+ int trip_temp;
enum thermal_trip_type trip_type;
if (tz->trips == 0 || !tz->ops->get_trip_temp)
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index c5dd76b2ee74..70836c5b89bc 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -25,14 +25,13 @@
static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
{
- long trip_temp;
- unsigned long trip_hyst;
+ int trip_temp, trip_hyst;
struct thermal_instance *instance;
tz->ops->get_trip_temp(tz, trip, &trip_temp);
tz->ops->get_trip_hyst(tz, trip, &trip_hyst);
- dev_dbg(&tz->device, "Trip%d[temp=%ld]:temp=%d:hyst=%ld\n",
+ dev_dbg(&tz->device, "Trip%d[temp=%d]:temp=%d:hyst=%d\n",
trip, trip_temp, tz->temperature,
trip_hyst);
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index d5dd357ba57c..36d07295f8e3 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -155,7 +155,7 @@ static void hisi_thermal_disable_sensor(struct hisi_thermal_data *data)
mutex_unlock(&data->thermal_lock);
}
-static int hisi_thermal_get_temp(void *_sensor, long *temp)
+static int hisi_thermal_get_temp(void *_sensor, int *temp)
{
struct hisi_thermal_sensor *sensor = _sensor;
struct hisi_thermal_data *data = sensor->thermal;
@@ -178,7 +178,7 @@ static int hisi_thermal_get_temp(void *_sensor, long *temp)
data->irq_bind_sensor = sensor_id;
mutex_unlock(&data->thermal_lock);
- dev_dbg(&data->pdev->dev, "id=%d, irq=%d, temp=%ld, thres=%d\n",
+ dev_dbg(&data->pdev->dev, "id=%d, irq=%d, temp=%d, thres=%d\n",
sensor->id, data->irq_enabled, *temp, sensor->thres_temp);
/*
* Bind irq to sensor for two cases:
@@ -405,7 +405,6 @@ static SIMPLE_DEV_PM_OPS(hisi_thermal_pm_ops,
static struct platform_driver hisi_thermal_driver = {
.driver = {
.name = "hisi_thermal",
- .owner = THIS_MODULE,
.pm = &hisi_thermal_pm_ops,
.of_match_table = of_hisi_thermal_match,
},
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index fde4c2876d14..4bec1d3c3d27 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -98,10 +98,10 @@ struct imx_thermal_data {
enum thermal_device_mode mode;
struct regmap *tempmon;
u32 c1, c2; /* See formula in imx_get_sensor_data() */
- unsigned long temp_passive;
- unsigned long temp_critical;
- unsigned long alarm_temp;
- unsigned long last_temp;
+ int temp_passive;
+ int temp_critical;
+ int alarm_temp;
+ int last_temp;
bool irq_enabled;
int irq;
struct clk *thermal_clk;
@@ -109,7 +109,7 @@ struct imx_thermal_data {
};
static void imx_set_panic_temp(struct imx_thermal_data *data,
- signed long panic_temp)
+ int panic_temp)
{
struct regmap *map = data->tempmon;
int critical_value;
@@ -121,7 +121,7 @@ static void imx_set_panic_temp(struct imx_thermal_data *data,
}
static void imx_set_alarm_temp(struct imx_thermal_data *data,
- signed long alarm_temp)
+ int alarm_temp)
{
struct regmap *map = data->tempmon;
int alarm_value;
@@ -133,7 +133,7 @@ static void imx_set_alarm_temp(struct imx_thermal_data *data,
TEMPSENSE0_ALARM_VALUE_SHIFT);
}
-static int imx_get_temp(struct thermal_zone_device *tz, unsigned long *temp)
+static int imx_get_temp(struct thermal_zone_device *tz, int *temp)
{
struct imx_thermal_data *data = tz->devdata;
struct regmap *map = data->tempmon;
@@ -189,13 +189,13 @@ static int imx_get_temp(struct thermal_zone_device *tz, unsigned long *temp)
if (data->alarm_temp == data->temp_critical &&
*temp < data->temp_passive) {
imx_set_alarm_temp(data, data->temp_passive);
- dev_dbg(&tz->device, "thermal alarm off: T < %lu\n",
+ dev_dbg(&tz->device, "thermal alarm off: T < %d\n",
data->alarm_temp / 1000);
}
}
if (*temp != data->last_temp) {
- dev_dbg(&tz->device, "millicelsius: %ld\n", *temp);
+ dev_dbg(&tz->device, "millicelsius: %d\n", *temp);
data->last_temp = *temp;
}
@@ -262,8 +262,7 @@ static int imx_get_trip_type(struct thermal_zone_device *tz, int trip,
return 0;
}
-static int imx_get_crit_temp(struct thermal_zone_device *tz,
- unsigned long *temp)
+static int imx_get_crit_temp(struct thermal_zone_device *tz, int *temp)
{
struct imx_thermal_data *data = tz->devdata;
@@ -272,7 +271,7 @@ static int imx_get_crit_temp(struct thermal_zone_device *tz,
}
static int imx_get_trip_temp(struct thermal_zone_device *tz, int trip,
- unsigned long *temp)
+ int *temp)
{
struct imx_thermal_data *data = tz->devdata;
@@ -282,7 +281,7 @@ static int imx_get_trip_temp(struct thermal_zone_device *tz, int trip,
}
static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip,
- unsigned long temp)
+ int temp)
{
struct imx_thermal_data *data = tz->devdata;
@@ -434,7 +433,7 @@ static irqreturn_t imx_thermal_alarm_irq_thread(int irq, void *dev)
{
struct imx_thermal_data *data = dev;
- dev_dbg(&data->tz->device, "THERMAL ALARM: T > %lu\n",
+ dev_dbg(&data->tz->device, "THERMAL ALARM: T > %d\n",
data->alarm_temp / 1000);
thermal_zone_device_update(data->tz);
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
index 031018e7a65b..5836e5554433 100644
--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -186,7 +186,7 @@ static int int3400_thermal_run_osc(acpi_handle handle,
}
static int int3400_thermal_get_temp(struct thermal_zone_device *thermal,
- unsigned long *temp)
+ int *temp)
{
*temp = 20 * 1000; /* faked temp sensor with 20C */
return 0;
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
index 1e25133d35e2..b9b2666aa94c 100644
--- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
@@ -20,7 +20,7 @@
#include "int340x_thermal_zone.h"
static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
- unsigned long *temp)
+ int *temp)
{
struct int34x_thermal_zone *d = zone->devdata;
unsigned long long tmp;
@@ -49,7 +49,7 @@ static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
}
static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
- int trip, unsigned long *temp)
+ int trip, int *temp)
{
struct int34x_thermal_zone *d = zone->devdata;
int i;
@@ -114,7 +114,7 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
}
static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
- int trip, unsigned long temp)
+ int trip, int temp)
{
struct int34x_thermal_zone *d = zone->devdata;
acpi_status status;
@@ -136,7 +136,7 @@ static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
static int int340x_thermal_get_trip_hyst(struct thermal_zone_device *zone,
- int trip, unsigned long *temp)
+ int trip, int *temp)
{
struct int34x_thermal_zone *d = zone->devdata;
acpi_status status;
@@ -163,7 +163,7 @@ static struct thermal_zone_device_ops int340x_thermal_zone_ops = {
};
static int int340x_thermal_get_trip_config(acpi_handle handle, char *name,
- unsigned long *temp)
+ int *temp)
{
unsigned long long r;
acpi_status status;
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
index 9f38ab72c4bf..aaadf724ff2e 100644
--- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
+++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
@@ -21,7 +21,7 @@
#define INT340X_THERMAL_MAX_ACT_TRIP_COUNT 10
struct active_trip {
- unsigned long temp;
+ int temp;
int id;
bool valid;
};
@@ -31,11 +31,11 @@ struct int34x_thermal_zone {
struct active_trip act_trips[INT340X_THERMAL_MAX_ACT_TRIP_COUNT];
unsigned long *aux_trips;
int aux_trip_nr;
- unsigned long psv_temp;
+ int psv_temp;
int psv_trip_id;
- unsigned long crt_temp;
+ int crt_temp;
int crt_trip_id;
- unsigned long hot_temp;
+ int hot_temp;
int hot_trip_id;
struct thermal_zone_device *zone;
struct thermal_zone_device_ops *override_ops;
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
index 3df3dc34b124..ccc0ad02d066 100644
--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -145,7 +145,7 @@ static int get_tjmax(void)
return -EINVAL;
}
-static int read_temp_msr(unsigned long *temp)
+static int read_temp_msr(int *temp)
{
int cpu;
u32 eax, edx;
@@ -177,7 +177,7 @@ err_ret:
}
static int proc_thermal_get_zone_temp(struct thermal_zone_device *zone,
- unsigned long *temp)
+ int *temp)
{
int ret;
diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c
new file mode 100644
index 000000000000..50c7da79be83
--- /dev/null
+++ b/drivers/thermal/intel_pch_thermal.c
@@ -0,0 +1,283 @@
+/* intel_pch_thermal.c - Intel PCH Thermal driver
+ *
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * Authors:
+ * Tushar Dave <tushar.n.dave@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/thermal.h>
+
+/* Intel PCH thermal Device IDs */
+#define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */
+
+/* Wildcat Point-LP PCH Thermal registers */
+#define WPT_TEMP 0x0000 /* Temperature */
+#define WPT_TSC 0x04 /* Thermal Sensor Control */
+#define WPT_TSS 0x06 /* Thermal Sensor Status */
+#define WPT_TSEL 0x08 /* Thermal Sensor Enable and Lock */
+#define WPT_TSREL 0x0A /* Thermal Sensor Report Enable and Lock */
+#define WPT_TSMIC 0x0C /* Thermal Sensor SMI Control */
+#define WPT_CTT 0x0010 /* Catastrophic Trip Point */
+#define WPT_TAHV 0x0014 /* Thermal Alert High Value */
+#define WPT_TALV 0x0018 /* Thermal Alert Low Value */
+#define WPT_TL 0x00000040 /* Throttle Value */
+#define WPT_PHL 0x0060 /* PCH Hot Level */
+#define WPT_PHLC 0x62 /* PHL Control */
+#define WPT_TAS 0x80 /* Thermal Alert Status */
+#define WPT_TSPIEN 0x82 /* PCI Interrupt Event Enables */
+#define WPT_TSGPEN 0x84 /* General Purpose Event Enables */
+
+/* Wildcat Point-LP PCH Thermal Register bit definitions */
+#define WPT_TEMP_TSR 0x00ff /* Temp TS Reading */
+#define WPT_TSC_CPDE 0x01 /* Catastrophic Power-Down Enable */
+#define WPT_TSS_TSDSS 0x10 /* Thermal Sensor Dynamic Shutdown Status */
+#define WPT_TSS_GPES 0x08 /* GPE status */
+#define WPT_TSEL_ETS 0x01 /* Enable TS */
+#define WPT_TSEL_PLDB 0x80 /* TSEL Policy Lock-Down Bit */
+#define WPT_TL_TOL 0x000001FF /* T0 Level */
+#define WPT_TL_T1L 0x1ff00000 /* T1 Level */
+#define WPT_TL_TTEN 0x20000000 /* TT Enable */
+
+static char driver_name[] = "Intel PCH thermal driver";
+
+struct pch_thermal_device {
+ void __iomem *hw_base;
+ const struct pch_dev_ops *ops;
+ struct pci_dev *pdev;
+ struct thermal_zone_device *tzd;
+ int crt_trip_id;
+ unsigned long crt_temp;
+ int hot_trip_id;
+ unsigned long hot_temp;
+};
+
+static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
+{
+ u8 tsel;
+ u16 trip_temp;
+
+ *nr_trips = 0;
+
+ /* Check if BIOS has already enabled thermal sensor */
+ if (WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS))
+ goto read_trips;
+
+ tsel = readb(ptd->hw_base + WPT_TSEL);
+ /*
+ * When TSEL's Policy Lock-Down bit is 1, TSEL become RO.
+ * If so, thermal sensor cannot enable. Bail out.
+ */
+ if (tsel & WPT_TSEL_PLDB) {
+ dev_err(&ptd->pdev->dev, "Sensor can't be enabled\n");
+ return -ENODEV;
+ }
+
+ writeb(tsel|WPT_TSEL_ETS, ptd->hw_base + WPT_TSEL);
+ if (!(WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS))) {
+ dev_err(&ptd->pdev->dev, "Sensor can't be enabled\n");
+ return -ENODEV;
+ }
+
+read_trips:
+ ptd->crt_trip_id = -1;
+ trip_temp = readw(ptd->hw_base + WPT_CTT);
+ trip_temp &= 0x1FF;
+ if (trip_temp) {
+ /* Resolution of 1/2 degree C and an offset of -50C */
+ ptd->crt_temp = trip_temp * 1000 / 2 - 50000;
+ ptd->crt_trip_id = 0;
+ ++(*nr_trips);
+ }
+
+ ptd->hot_trip_id = -1;
+ trip_temp = readw(ptd->hw_base + WPT_PHL);
+ trip_temp &= 0x1FF;
+ if (trip_temp) {
+ /* Resolution of 1/2 degree C and an offset of -50C */
+ ptd->hot_temp = trip_temp * 1000 / 2 - 50000;
+ ptd->hot_trip_id = *nr_trips;
+ ++(*nr_trips);
+ }
+
+ return 0;
+}
+
+static int pch_wpt_get_temp(struct pch_thermal_device *ptd, int *temp)
+{
+ u8 wpt_temp;
+
+ wpt_temp = WPT_TEMP_TSR & readl(ptd->hw_base + WPT_TEMP);
+
+ /* Resolution of 1/2 degree C and an offset of -50C */
+ *temp = (wpt_temp * 1000 / 2 - 50000);
+
+ return 0;
+}
+
+struct pch_dev_ops {
+ int (*hw_init)(struct pch_thermal_device *ptd, int *nr_trips);
+ int (*get_temp)(struct pch_thermal_device *ptd, int *temp);
+};
+
+
+/* dev ops for Wildcat Point */
+static struct pch_dev_ops pch_dev_ops_wpt = {
+ .hw_init = pch_wpt_init,
+ .get_temp = pch_wpt_get_temp,
+};
+
+static int pch_thermal_get_temp(struct thermal_zone_device *tzd, int *temp)
+{
+ struct pch_thermal_device *ptd = tzd->devdata;
+
+ return ptd->ops->get_temp(ptd, temp);
+}
+
+static int pch_get_trip_type(struct thermal_zone_device *tzd, int trip,
+ enum thermal_trip_type *type)
+{
+ struct pch_thermal_device *ptd = tzd->devdata;
+
+ if (ptd->crt_trip_id == trip)
+ *type = THERMAL_TRIP_CRITICAL;
+ else if (ptd->hot_trip_id == trip)
+ *type = THERMAL_TRIP_HOT;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int pch_get_trip_temp(struct thermal_zone_device *tzd, int trip, int *temp)
+{
+ struct pch_thermal_device *ptd = tzd->devdata;
+
+ if (ptd->crt_trip_id == trip)
+ *temp = ptd->crt_temp;
+ else if (ptd->hot_trip_id == trip)
+ *temp = ptd->hot_temp;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct thermal_zone_device_ops tzd_ops = {
+ .get_temp = pch_thermal_get_temp,
+ .get_trip_type = pch_get_trip_type,
+ .get_trip_temp = pch_get_trip_temp,
+};
+
+
+static int intel_pch_thermal_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct pch_thermal_device *ptd;
+ int err;
+ int nr_trips;
+ char *dev_name;
+
+ ptd = devm_kzalloc(&pdev->dev, sizeof(*ptd), GFP_KERNEL);
+ if (!ptd)
+ return -ENOMEM;
+
+ switch (pdev->device) {
+ case PCH_THERMAL_DID_WPT:
+ ptd->ops = &pch_dev_ops_wpt;
+ dev_name = "pch_wildcat_point";
+ break;
+ default:
+ dev_err(&pdev->dev, "unknown pch thermal device\n");
+ return -ENODEV;
+ }
+
+ pci_set_drvdata(pdev, ptd);
+ ptd->pdev = pdev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable pci device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "failed to request pci region\n");
+ goto error_disable;
+ }
+
+ ptd->hw_base = pci_ioremap_bar(pdev, 0);
+ if (!ptd->hw_base) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "failed to map mem base\n");
+ goto error_release;
+ }
+
+ err = ptd->ops->hw_init(ptd, &nr_trips);
+ if (err)
+ goto error_cleanup;
+
+ ptd->tzd = thermal_zone_device_register(dev_name, nr_trips, 0, ptd,
+ &tzd_ops, NULL, 0, 0);
+ if (IS_ERR(ptd->tzd)) {
+ dev_err(&pdev->dev, "Failed to register thermal zone %s\n",
+ dev_name);
+ err = PTR_ERR(ptd->tzd);
+ goto error_cleanup;
+ }
+
+ return 0;
+
+error_cleanup:
+ iounmap(ptd->hw_base);
+error_release:
+ pci_release_regions(pdev);
+error_disable:
+ pci_disable_device(pdev);
+ dev_err(&pdev->dev, "pci device failed to probe\n");
+ return err;
+}
+
+static void intel_pch_thermal_remove(struct pci_dev *pdev)
+{
+ struct pch_thermal_device *ptd = pci_get_drvdata(pdev);
+
+ thermal_zone_device_unregister(ptd->tzd);
+ iounmap(ptd->hw_base);
+ pci_set_drvdata(pdev, NULL);
+ pci_release_region(pdev, 0);
+ pci_disable_device(pdev);
+}
+
+static struct pci_device_id intel_pch_thermal_id[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
+
+static struct pci_driver intel_pch_thermal_driver = {
+ .name = "intel_pch_thermal",
+ .id_table = intel_pch_thermal_id,
+ .probe = intel_pch_thermal_probe,
+ .remove = intel_pch_thermal_remove,
+};
+
+module_pci_driver(intel_pch_thermal_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel PCH Thermal driver");
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 5820e8513927..6c79588251d5 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -340,7 +340,7 @@ static bool powerclamp_adjust_controls(unsigned int target_ratio,
/* check result for the last window */
msr_now = pkg_state_counter();
- rdtscll(tsc_now);
+ tsc_now = rdtsc();
/* calculate pkg cstate vs tsc ratio */
if (!msr_last || !tsc_last)
@@ -482,7 +482,7 @@ static void poll_pkg_cstate(struct work_struct *dummy)
u64 val64;
msr_now = pkg_state_counter();
- rdtscll(tsc_now);
+ tsc_now = rdtsc();
jiffies_now = jiffies;
/* calculate pkg cstate vs tsc ratio */
@@ -693,11 +693,14 @@ static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = {
{ X86_VENDOR_INTEL, 6, 0x3f},
{ X86_VENDOR_INTEL, 6, 0x45},
{ X86_VENDOR_INTEL, 6, 0x46},
+ { X86_VENDOR_INTEL, 6, 0x47},
{ X86_VENDOR_INTEL, 6, 0x4c},
{ X86_VENDOR_INTEL, 6, 0x4d},
+ { X86_VENDOR_INTEL, 6, 0x4e},
{ X86_VENDOR_INTEL, 6, 0x4f},
{ X86_VENDOR_INTEL, 6, 0x56},
{ X86_VENDOR_INTEL, 6, 0x57},
+ { X86_VENDOR_INTEL, 6, 0x5e},
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
diff --git a/drivers/thermal/intel_quark_dts_thermal.c b/drivers/thermal/intel_quark_dts_thermal.c
index 4434ec812cb7..5ed90e6c8a64 100644
--- a/drivers/thermal/intel_quark_dts_thermal.c
+++ b/drivers/thermal/intel_quark_dts_thermal.c
@@ -186,7 +186,7 @@ static int soc_dts_disable(struct thermal_zone_device *tzd)
return ret;
}
-static int _get_trip_temp(int trip, unsigned long *temp)
+static int _get_trip_temp(int trip, int *temp)
{
int status;
u32 out;
@@ -212,19 +212,18 @@ static int _get_trip_temp(int trip, unsigned long *temp)
}
static inline int sys_get_trip_temp(struct thermal_zone_device *tzd,
- int trip, unsigned long *temp)
+ int trip, int *temp)
{
return _get_trip_temp(trip, temp);
}
-static inline int sys_get_crit_temp(struct thermal_zone_device *tzd,
- unsigned long *temp)
+static inline int sys_get_crit_temp(struct thermal_zone_device *tzd, int *temp)
{
return _get_trip_temp(QRK_DTS_ID_TP_CRITICAL, temp);
}
static int update_trip_temp(struct soc_sensor_entry *aux_entry,
- int trip, unsigned long temp)
+ int trip, int temp)
{
u32 out;
u32 temp_out;
@@ -272,7 +271,7 @@ failed:
}
static inline int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
- unsigned long temp)
+ int temp)
{
return update_trip_temp(tzd->devdata, trip, temp);
}
@@ -289,7 +288,7 @@ static int sys_get_trip_type(struct thermal_zone_device *thermal,
}
static int sys_get_curr_temp(struct thermal_zone_device *tzd,
- unsigned long *temp)
+ int *temp)
{
u32 out;
int ret;
diff --git a/drivers/thermal/intel_soc_dts_iosf.c b/drivers/thermal/intel_soc_dts_iosf.c
index 42e4b6ac3875..5841d1d72996 100644
--- a/drivers/thermal/intel_soc_dts_iosf.c
+++ b/drivers/thermal/intel_soc_dts_iosf.c
@@ -80,7 +80,7 @@ err_ret:
}
static int sys_get_trip_temp(struct thermal_zone_device *tzd, int trip,
- unsigned long *temp)
+ int *temp)
{
int status;
u32 out;
@@ -106,7 +106,7 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd, int trip,
}
static int update_trip_temp(struct intel_soc_dts_sensor_entry *dts,
- int thres_index, unsigned long temp,
+ int thres_index, int temp,
enum thermal_trip_type trip_type)
{
int status;
@@ -196,7 +196,7 @@ err_restore_ptps:
}
static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
- unsigned long temp)
+ int temp)
{
struct intel_soc_dts_sensor_entry *dts = tzd->devdata;
struct intel_soc_dts_sensors *sensors = dts->sensors;
@@ -226,7 +226,7 @@ static int sys_get_trip_type(struct thermal_zone_device *tzd,
}
static int sys_get_curr_temp(struct thermal_zone_device *tzd,
- unsigned long *temp)
+ int *temp)
{
int status;
u32 out;
diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c
index 11041fe63dc2..892236621767 100644
--- a/drivers/thermal/kirkwood_thermal.c
+++ b/drivers/thermal/kirkwood_thermal.c
@@ -33,7 +33,7 @@ struct kirkwood_thermal_priv {
};
static int kirkwood_get_temp(struct thermal_zone_device *thermal,
- unsigned long *temp)
+ int *temp)
{
unsigned long reg;
struct kirkwood_thermal_priv *priv = thermal->devdata;
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index b295b2b6c191..42b7d4253b94 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -91,7 +91,7 @@ struct __thermal_zone {
/*** DT thermal zone device callbacks ***/
static int of_thermal_get_temp(struct thermal_zone_device *tz,
- unsigned long *temp)
+ int *temp)
{
struct __thermal_zone *data = tz->devdata;
@@ -177,7 +177,7 @@ EXPORT_SYMBOL_GPL(of_thermal_get_trip_points);
* Return: zero on success, error code otherwise
*/
static int of_thermal_set_emul_temp(struct thermal_zone_device *tz,
- unsigned long temp)
+ int temp)
{
struct __thermal_zone *data = tz->devdata;
@@ -311,7 +311,7 @@ static int of_thermal_get_trip_type(struct thermal_zone_device *tz, int trip,
}
static int of_thermal_get_trip_temp(struct thermal_zone_device *tz, int trip,
- unsigned long *temp)
+ int *temp)
{
struct __thermal_zone *data = tz->devdata;
@@ -324,7 +324,7 @@ static int of_thermal_get_trip_temp(struct thermal_zone_device *tz, int trip,
}
static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip,
- unsigned long temp)
+ int temp)
{
struct __thermal_zone *data = tz->devdata;
@@ -338,7 +338,7 @@ static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip,
}
static int of_thermal_get_trip_hyst(struct thermal_zone_device *tz, int trip,
- unsigned long *hyst)
+ int *hyst)
{
struct __thermal_zone *data = tz->devdata;
@@ -351,7 +351,7 @@ static int of_thermal_get_trip_hyst(struct thermal_zone_device *tz, int trip,
}
static int of_thermal_set_trip_hyst(struct thermal_zone_device *tz, int trip,
- unsigned long hyst)
+ int hyst)
{
struct __thermal_zone *data = tz->devdata;
@@ -365,7 +365,7 @@ static int of_thermal_set_trip_hyst(struct thermal_zone_device *tz, int trip,
}
static int of_thermal_get_crit_temp(struct thermal_zone_device *tz,
- unsigned long *temp)
+ int *temp)
{
struct __thermal_zone *data = tz->devdata;
int i;
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 4672250b329f..9c8a7aad0252 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -92,8 +92,8 @@ struct power_allocator_params {
* Return: The power budget for the next period.
*/
static u32 pid_controller(struct thermal_zone_device *tz,
- unsigned long current_temp,
- unsigned long control_temp,
+ int current_temp,
+ int control_temp,
u32 max_allocatable_power)
{
s64 p, i, d, power_range;
@@ -102,7 +102,7 @@ static u32 pid_controller(struct thermal_zone_device *tz,
max_power_frac = int_to_frac(max_allocatable_power);
- err = ((s32)control_temp - (s32)current_temp);
+ err = control_temp - current_temp;
err = int_to_frac(err);
/* Calculate the proportional term */
@@ -223,13 +223,14 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
}
static int allocate_power(struct thermal_zone_device *tz,
- unsigned long current_temp,
- unsigned long control_temp)
+ int current_temp,
+ int control_temp)
{
struct thermal_instance *instance;
struct power_allocator_params *params = tz->governor_data;
u32 *req_power, *max_power, *granted_power, *extra_actor_power;
- u32 total_req_power, max_allocatable_power;
+ u32 *weighted_req_power;
+ u32 total_req_power, max_allocatable_power, total_weighted_req_power;
u32 total_granted_power, power_range;
int i, num_actors, total_weight, ret = 0;
int trip_max_desired_temperature = params->trip_max_desired_temperature;
@@ -247,17 +248,17 @@ static int allocate_power(struct thermal_zone_device *tz,
}
/*
- * We need to allocate three arrays of the same size:
- * req_power, max_power and granted_power. They are going to
- * be needed until this function returns. Allocate them all
- * in one go to simplify the allocation and deallocation
- * logic.
+ * We need to allocate five arrays of the same size:
+ * req_power, max_power, granted_power, extra_actor_power and
+ * weighted_req_power. They are going to be needed until this
+ * function returns. Allocate them all in one go to simplify
+ * the allocation and deallocation logic.
*/
BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power));
BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power));
BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power));
- req_power = devm_kcalloc(&tz->device, num_actors * 4,
- sizeof(*req_power), GFP_KERNEL);
+ BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power));
+ req_power = kcalloc(num_actors * 5, sizeof(*req_power), GFP_KERNEL);
if (!req_power) {
ret = -ENOMEM;
goto unlock;
@@ -266,8 +267,10 @@ static int allocate_power(struct thermal_zone_device *tz,
max_power = &req_power[num_actors];
granted_power = &req_power[2 * num_actors];
extra_actor_power = &req_power[3 * num_actors];
+ weighted_req_power = &req_power[4 * num_actors];
i = 0;
+ total_weighted_req_power = 0;
total_req_power = 0;
max_allocatable_power = 0;
@@ -289,13 +292,14 @@ static int allocate_power(struct thermal_zone_device *tz,
else
weight = instance->weight;
- req_power[i] = frac_to_int(weight * req_power[i]);
+ weighted_req_power[i] = frac_to_int(weight * req_power[i]);
if (power_actor_get_max_power(cdev, tz, &max_power[i]))
continue;
total_req_power += req_power[i];
max_allocatable_power += max_power[i];
+ total_weighted_req_power += weighted_req_power[i];
i++;
}
@@ -303,8 +307,9 @@ static int allocate_power(struct thermal_zone_device *tz,
power_range = pid_controller(tz, current_temp, control_temp,
max_allocatable_power);
- divvy_up_power(req_power, max_power, num_actors, total_req_power,
- power_range, granted_power, extra_actor_power);
+ divvy_up_power(weighted_req_power, max_power, num_actors,
+ total_weighted_req_power, power_range, granted_power,
+ extra_actor_power);
total_granted_power = 0;
i = 0;
@@ -326,9 +331,9 @@ static int allocate_power(struct thermal_zone_device *tz,
granted_power, total_granted_power,
num_actors, power_range,
max_allocatable_power, current_temp,
- (s32)control_temp - (s32)current_temp);
+ control_temp - current_temp);
- devm_kfree(&tz->device, req_power);
+ kfree(req_power);
unlock:
mutex_unlock(&tz->lock);
@@ -411,7 +416,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
{
int ret;
struct power_allocator_params *params;
- unsigned long switch_on_temp, control_temp;
+ int switch_on_temp, control_temp;
u32 temperature_threshold;
if (!tz->tzp || !tz->tzp->sustainable_power) {
@@ -420,7 +425,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
return -EINVAL;
}
- params = devm_kzalloc(&tz->device, sizeof(*params), GFP_KERNEL);
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
@@ -462,21 +467,21 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
return 0;
free:
- devm_kfree(&tz->device, params);
+ kfree(params);
return ret;
}
static void power_allocator_unbind(struct thermal_zone_device *tz)
{
dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
- devm_kfree(&tz->device, tz->governor_data);
+ kfree(tz->governor_data);
tz->governor_data = NULL;
}
static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
{
int ret;
- unsigned long switch_on_temp, control_temp, current_temp;
+ int switch_on_temp, control_temp, current_temp;
struct power_allocator_params *params = tz->governor_data;
/*
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
index c8d27b8fb9ec..b677aada5b52 100644
--- a/drivers/thermal/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -117,7 +117,7 @@ static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
return 0;
}
-static int qpnp_tm_get_temp(void *data, long *temp)
+static int qpnp_tm_get_temp(void *data, int *temp)
{
struct qpnp_tm_chip *chip = data;
int ret, mili_celsius;
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index fe4e767018c4..5d4ae7d705e0 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -200,8 +200,7 @@ err_out_unlock:
return ret;
}
-static int rcar_thermal_get_temp(struct thermal_zone_device *zone,
- unsigned long *temp)
+static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
{
struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
@@ -235,7 +234,7 @@ static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone,
}
static int rcar_thermal_get_trip_temp(struct thermal_zone_device *zone,
- int trip, unsigned long *temp)
+ int trip, int *temp)
{
struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
struct device *dev = rcar_priv_to_dev(priv);
@@ -299,7 +298,7 @@ static void _rcar_thermal_irq_ctrl(struct rcar_thermal_priv *priv, int enable)
static void rcar_thermal_work(struct work_struct *work)
{
struct rcar_thermal_priv *priv;
- unsigned long cctemp, nctemp;
+ int cctemp, nctemp;
priv = container_of(work, struct rcar_thermal_priv, work.work);
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index cd8f5f93b42c..c89ffb26a354 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -64,7 +64,7 @@ struct rockchip_tsadc_chip {
void (*control)(void __iomem *reg, bool on);
/* Per-sensor methods */
- int (*get_temp)(int chn, void __iomem *reg, long *temp);
+ int (*get_temp)(int chn, void __iomem *reg, int *temp);
void (*set_tshut_temp)(int chn, void __iomem *reg, long temp);
void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
};
@@ -191,7 +191,7 @@ static u32 rk_tsadcv2_temp_to_code(long temp)
return 0;
}
-static long rk_tsadcv2_code_to_temp(u32 code)
+static int rk_tsadcv2_code_to_temp(u32 code)
{
unsigned int low = 0;
unsigned int high = ARRAY_SIZE(v2_code_table) - 1;
@@ -277,7 +277,7 @@ static void rk_tsadcv2_control(void __iomem *regs, bool enable)
writel_relaxed(val, regs + TSADCV2_AUTO_CON);
}
-static int rk_tsadcv2_get_temp(int chn, void __iomem *regs, long *temp)
+static int rk_tsadcv2_get_temp(int chn, void __iomem *regs, int *temp)
{
u32 val;
@@ -366,7 +366,7 @@ static irqreturn_t rockchip_thermal_alarm_irq_thread(int irq, void *dev)
return IRQ_HANDLED;
}
-static int rockchip_thermal_get_temp(void *_sensor, long *out_temp)
+static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
{
struct rockchip_thermal_sensor *sensor = _sensor;
struct rockchip_thermal_data *thermal = sensor->thermal;
@@ -374,7 +374,7 @@ static int rockchip_thermal_get_temp(void *_sensor, long *out_temp)
int retval;
retval = tsadc->get_temp(sensor->id, thermal->regs, out_temp);
- dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %ld, retval: %d\n",
+ dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n",
sensor->id, *out_temp, retval);
return retval;
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
index c8e35c1a43dc..e0da3865e060 100644
--- a/drivers/thermal/samsung/Kconfig
+++ b/drivers/thermal/samsung/Kconfig
@@ -1,6 +1,6 @@
config EXYNOS_THERMAL
tristate "Exynos thermal management unit driver"
- depends on OF
+ depends on THERMAL_OF
help
If you say yes here you get support for the TMU (Thermal Management
Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 531f4b179871..0bae8cc6c23a 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -207,8 +207,7 @@ struct exynos_tmu_data {
int (*tmu_initialize)(struct platform_device *pdev);
void (*tmu_control)(struct platform_device *pdev, bool on);
int (*tmu_read)(struct exynos_tmu_data *data);
- void (*tmu_set_emulation)(struct exynos_tmu_data *data,
- unsigned long temp);
+ void (*tmu_set_emulation)(struct exynos_tmu_data *data, int temp);
void (*tmu_clear_irqs)(struct exynos_tmu_data *data);
};
@@ -216,7 +215,7 @@ static void exynos_report_trigger(struct exynos_tmu_data *p)
{
char data[10], *envp[] = { data, NULL };
struct thermal_zone_device *tz = p->tzd;
- unsigned long temp;
+ int temp;
unsigned int i;
if (!tz) {
@@ -517,7 +516,7 @@ static int exynos5433_tmu_initialize(struct platform_device *pdev)
struct thermal_zone_device *tz = data->tzd;
unsigned int status, trim_info;
unsigned int rising_threshold = 0, falling_threshold = 0;
- unsigned long temp, temp_hist;
+ int temp, temp_hist;
int ret = 0, threshold_code, i, sensor_id, cal_type;
status = readb(data->base + EXYNOS_TMU_REG_STATUS);
@@ -610,7 +609,7 @@ static int exynos5440_tmu_initialize(struct platform_device *pdev)
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
unsigned int trim_info = 0, con, rising_threshold;
int ret = 0, threshold_code;
- unsigned long crit_temp = 0;
+ int crit_temp = 0;
/*
* For exynos5440 soc triminfo value is swapped between TMU0 and
@@ -663,7 +662,7 @@ static int exynos7_tmu_initialize(struct platform_device *pdev)
unsigned int status, trim_info;
unsigned int rising_threshold = 0, falling_threshold = 0;
int ret = 0, threshold_code, i;
- unsigned long temp, temp_hist;
+ int temp, temp_hist;
unsigned int reg_off, bit_off;
status = readb(data->base + EXYNOS_TMU_REG_STATUS);
@@ -876,7 +875,7 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on)
writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
}
-static int exynos_get_temp(void *p, long *temp)
+static int exynos_get_temp(void *p, int *temp)
{
struct exynos_tmu_data *data = p;
@@ -896,7 +895,7 @@ static int exynos_get_temp(void *p, long *temp)
#ifdef CONFIG_THERMAL_EMULATION
static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val,
- unsigned long temp)
+ int temp)
{
if (temp) {
temp /= MCELSIUS;
@@ -926,7 +925,7 @@ static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val,
}
static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
- unsigned long temp)
+ int temp)
{
unsigned int val;
u32 emul_con;
@@ -946,7 +945,7 @@ static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
}
static void exynos5440_tmu_set_emulation(struct exynos_tmu_data *data,
- unsigned long temp)
+ int temp)
{
unsigned int val;
@@ -955,7 +954,7 @@ static void exynos5440_tmu_set_emulation(struct exynos_tmu_data *data,
writel(val, data->base + EXYNOS5440_TMU_S0_7_DEBUG);
}
-static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
+static int exynos_tmu_set_emulation(void *drv_data, int temp)
{
struct exynos_tmu_data *data = drv_data;
int ret = -EINVAL;
@@ -978,7 +977,7 @@ out:
#else
#define exynos4412_tmu_set_emulation NULL
#define exynos5440_tmu_set_emulation NULL
-static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
+static int exynos_tmu_set_emulation(void *drv_data, int temp)
{ return -EINVAL; }
#endif /* CONFIG_THERMAL_EMULATION */
@@ -1296,7 +1295,6 @@ static struct thermal_zone_of_device_ops exynos_sensor_ops = {
static int exynos_tmu_probe(struct platform_device *pdev)
{
- struct exynos_tmu_platform_data *pdata;
struct exynos_tmu_data *data;
int ret;
@@ -1318,8 +1316,6 @@ static int exynos_tmu_probe(struct platform_device *pdev)
if (ret)
goto err_sensor;
- pdata = data->pdata;
-
INIT_WORK(&data->irq_work, exynos_tmu_work);
data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
@@ -1392,6 +1388,8 @@ err_clk_sec:
if (!IS_ERR(data->clk_sec))
clk_unprepare(data->clk_sec);
err_sensor:
+ if (!IS_ERR_OR_NULL(data->regulator))
+ regulator_disable(data->regulator);
thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
return ret;
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index bddb71744a6c..534dd9136662 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -38,7 +38,7 @@ struct spear_thermal_dev {
};
static inline int thermal_get_temp(struct thermal_zone_device *thermal,
- unsigned long *temp)
+ int *temp)
{
struct spear_thermal_dev *stdev = thermal->devdata;
diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c
index 76c515dd802b..be637e6b01d2 100644
--- a/drivers/thermal/st/st_thermal.c
+++ b/drivers/thermal/st/st_thermal.c
@@ -111,8 +111,7 @@ static int st_thermal_calibration(struct st_thermal_sensor *sensor)
}
/* Callback to get temperature from HW*/
-static int st_thermal_get_temp(struct thermal_zone_device *th,
- unsigned long *temperature)
+static int st_thermal_get_temp(struct thermal_zone_device *th, int *temperature)
{
struct st_thermal_sensor *sensor = th->devdata;
struct device *dev = sensor->dev;
@@ -159,7 +158,7 @@ static int st_thermal_get_trip_type(struct thermal_zone_device *th,
}
static int st_thermal_get_trip_temp(struct thermal_zone_device *th,
- int trip, unsigned long *temp)
+ int trip, int *temp)
{
struct st_thermal_sensor *sensor = th->devdata;
struct device *dev = sensor->dev;
@@ -214,7 +213,7 @@ int st_thermal_register(struct platform_device *pdev,
sensor->ops = sensor->cdata->ops;
- ret = sensor->ops->regmap_init(sensor);
+ ret = (sensor->ops->regmap_init)(sensor);
if (ret)
return ret;
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index 5a0f12d08e8b..2f9f7086ac3d 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -113,7 +113,7 @@ static void update_passive_instance(struct thermal_zone_device *tz,
static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
{
- long trip_temp;
+ int trip_temp;
enum thermal_trip_type trip_type;
enum thermal_trend trend;
struct thermal_instance *instance;
@@ -135,7 +135,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
trace_thermal_zone_trip(tz, trip, trip_type);
}
- dev_dbg(&tz->device, "Trip%d[type=%d,temp=%ld]:trend=%d,throttle=%d\n",
+ dev_dbg(&tz->device, "Trip%d[type=%d,temp=%d]:trend=%d,throttle=%d\n",
trip, trip_type, trip_temp, trend, throttle);
mutex_lock(&tz->lock);
diff --git a/drivers/thermal/tegra_soctherm.c b/drivers/thermal/tegra_soctherm.c
index 9197fc05c5cc..74ea5765938b 100644
--- a/drivers/thermal/tegra_soctherm.c
+++ b/drivers/thermal/tegra_soctherm.c
@@ -293,7 +293,7 @@ static int enable_tsensor(struct tegra_soctherm *tegra,
* H denotes an addition of 0.5 Celsius and N denotes negation
* of the final value.
*/
-static long translate_temp(u16 val)
+static int translate_temp(u16 val)
{
long t;
@@ -306,7 +306,7 @@ static long translate_temp(u16 val)
return t;
}
-static int tegra_thermctl_get_temp(void *data, long *out_temp)
+static int tegra_thermctl_get_temp(void *data, int *out_temp)
{
struct tegra_thermctl_zone *zone = data;
u32 val;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 04659bfb888b..5e5fc7015c7f 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -426,7 +426,7 @@ static void handle_non_critical_trips(struct thermal_zone_device *tz,
static void handle_critical_trips(struct thermal_zone_device *tz,
int trip, enum thermal_trip_type trip_type)
{
- long trip_temp;
+ int trip_temp;
tz->ops->get_trip_temp(tz, trip, &trip_temp);
@@ -465,7 +465,7 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
}
/**
- * thermal_zone_get_temp() - returns its the temperature of thermal zone
+ * thermal_zone_get_temp() - returns the temperature of a thermal zone
* @tz: a valid pointer to a struct thermal_zone_device
* @temp: a valid pointer to where to store the resulting temperature.
*
@@ -474,14 +474,12 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
*
* Return: On success returns 0, an error code otherwise
*/
-int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp)
+int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
{
int ret = -EINVAL;
-#ifdef CONFIG_THERMAL_EMULATION
int count;
- unsigned long crit_temp = -1UL;
+ int crit_temp = INT_MAX;
enum thermal_trip_type type;
-#endif
if (!tz || IS_ERR(tz) || !tz->ops->get_temp)
goto exit;
@@ -489,25 +487,26 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp)
mutex_lock(&tz->lock);
ret = tz->ops->get_temp(tz, temp);
-#ifdef CONFIG_THERMAL_EMULATION
- if (!tz->emul_temperature)
- goto skip_emul;
-
- for (count = 0; count < tz->trips; count++) {
- ret = tz->ops->get_trip_type(tz, count, &type);
- if (!ret && type == THERMAL_TRIP_CRITICAL) {
- ret = tz->ops->get_trip_temp(tz, count, &crit_temp);
- break;
- }
- }
- if (ret)
- goto skip_emul;
+ if (IS_ENABLED(CONFIG_THERMAL_EMULATION) && tz->emul_temperature) {
+ for (count = 0; count < tz->trips; count++) {
+ ret = tz->ops->get_trip_type(tz, count, &type);
+ if (!ret && type == THERMAL_TRIP_CRITICAL) {
+ ret = tz->ops->get_trip_temp(tz, count,
+ &crit_temp);
+ break;
+ }
+ }
- if (*temp < crit_temp)
- *temp = tz->emul_temperature;
-skip_emul:
-#endif
+ /*
+ * Only allow emulating a temperature when the real temperature
+ * is below the critical temperature so that the emulation code
+ * cannot hide critical conditions.
+ */
+ if (!ret && *temp < crit_temp)
+ *temp = tz->emul_temperature;
+ }
+
mutex_unlock(&tz->lock);
exit:
return ret;
@@ -516,8 +515,7 @@ EXPORT_SYMBOL_GPL(thermal_zone_get_temp);
static void update_temperature(struct thermal_zone_device *tz)
{
- long temp;
- int ret;
+ int temp, ret;
ret = thermal_zone_get_temp(tz, &temp);
if (ret) {
@@ -577,15 +575,14 @@ static ssize_t
temp_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- long temperature;
- int ret;
+ int temperature, ret;
ret = thermal_zone_get_temp(tz, &temperature);
if (ret)
return ret;
- return sprintf(buf, "%ld\n", temperature);
+ return sprintf(buf, "%d\n", temperature);
}
static ssize_t
@@ -689,7 +686,7 @@ trip_point_temp_show(struct device *dev, struct device_attribute *attr,
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
int trip, ret;
- long temperature;
+ int temperature;
if (!tz->ops->get_trip_temp)
return -EPERM;
@@ -702,7 +699,7 @@ trip_point_temp_show(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- return sprintf(buf, "%ld\n", temperature);
+ return sprintf(buf, "%d\n", temperature);
}
static ssize_t
@@ -711,7 +708,7 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
int trip, ret;
- unsigned long temperature;
+ int temperature;
if (!tz->ops->set_trip_hyst)
return -EPERM;
@@ -719,7 +716,7 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
if (!sscanf(attr->attr.name, "trip_point_%d_hyst", &trip))
return -EINVAL;
- if (kstrtoul(buf, 10, &temperature))
+ if (kstrtoint(buf, 10, &temperature))
return -EINVAL;
/*
@@ -738,7 +735,7 @@ trip_point_hyst_show(struct device *dev, struct device_attribute *attr,
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
int trip, ret;
- unsigned long temperature;
+ int temperature;
if (!tz->ops->get_trip_hyst)
return -EPERM;
@@ -748,7 +745,7 @@ trip_point_hyst_show(struct device *dev, struct device_attribute *attr,
ret = tz->ops->get_trip_hyst(tz, trip, &temperature);
- return ret ? ret : sprintf(buf, "%ld\n", temperature);
+ return ret ? ret : sprintf(buf, "%d\n", temperature);
}
static ssize_t
@@ -847,7 +844,27 @@ policy_show(struct device *dev, struct device_attribute *devattr, char *buf)
return sprintf(buf, "%s\n", tz->governor->name);
}
-#ifdef CONFIG_THERMAL_EMULATION
+static ssize_t
+available_policies_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct thermal_governor *pos;
+ ssize_t count = 0;
+ ssize_t size = PAGE_SIZE;
+
+ mutex_lock(&thermal_governor_lock);
+
+ list_for_each_entry(pos, &thermal_governor_list, governor_list) {
+ size = PAGE_SIZE - count;
+ count += scnprintf(buf + count, size, "%s ", pos->name);
+ }
+ count += scnprintf(buf + count, size, "\n");
+
+ mutex_unlock(&thermal_governor_lock);
+
+ return count;
+}
+
static ssize_t
emul_temp_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -873,7 +890,6 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
return ret ? ret : count;
}
static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store);
-#endif/*CONFIG_THERMAL_EMULATION*/
static ssize_t
sustainable_power_show(struct device *dev, struct device_attribute *devattr,
@@ -1032,6 +1048,7 @@ static DEVICE_ATTR(temp, 0444, temp_show, NULL);
static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store);
static DEVICE_ATTR(policy, S_IRUGO | S_IWUSR, policy_show, policy_store);
+static DEVICE_ATTR(available_policies, S_IRUGO, available_policies_show, NULL);
/* sys I/F for cooling device */
#define to_cooling_device(_dev) \
@@ -1333,6 +1350,7 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
return -ENODEV;
unbind:
+ device_remove_file(&tz->device, &pos->weight_attr);
device_remove_file(&tz->device, &pos->attr);
sysfs_remove_link(&tz->device.kobj, pos->name);
release_idr(&tz->idr, &tz->lock, pos->id);
@@ -1802,11 +1820,12 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
goto unregister;
}
-#ifdef CONFIG_THERMAL_EMULATION
- result = device_create_file(&tz->device, &dev_attr_emul_temp);
- if (result)
- goto unregister;
-#endif
+ if (IS_ENABLED(CONFIG_THERMAL_EMULATION)) {
+ result = device_create_file(&tz->device, &dev_attr_emul_temp);
+ if (result)
+ goto unregister;
+ }
+
/* Create policy attribute */
result = device_create_file(&tz->device, &dev_attr_policy);
if (result)
@@ -1817,6 +1836,11 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
if (result)
goto unregister;
+ /* Create available_policies attribute */
+ result = device_create_file(&tz->device, &dev_attr_available_policies);
+ if (result)
+ goto unregister;
+
/* Update 'this' zone's governor information */
mutex_lock(&thermal_governor_lock);
@@ -1848,9 +1872,6 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check);
- if (!tz->ops->get_temp)
- thermal_zone_device_set_polling(tz, 0);
-
thermal_zone_device_update(tz);
return tz;
@@ -1917,6 +1938,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
if (tz->ops->get_mode)
device_remove_file(&tz->device, &dev_attr_mode);
device_remove_file(&tz->device, &dev_attr_policy);
+ device_remove_file(&tz->device, &dev_attr_available_policies);
remove_trip_attrs(tz);
thermal_set_governor(tz, NULL);
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index 1967bee4f076..06fd2ed9ef9d 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -69,7 +69,7 @@ static DEVICE_ATTR(name, 0444, name_show, NULL);
static ssize_t
temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- long temperature;
+ int temperature;
int ret;
struct thermal_hwmon_attr *hwmon_attr
= container_of(attr, struct thermal_hwmon_attr, attr);
@@ -83,7 +83,7 @@ temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
if (ret)
return ret;
- return sprintf(buf, "%ld\n", temperature);
+ return sprintf(buf, "%d\n", temperature);
}
static ssize_t
@@ -95,14 +95,14 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, char *buf)
= container_of(hwmon_attr, struct thermal_hwmon_temp,
temp_crit);
struct thermal_zone_device *tz = temp->tz;
- long temperature;
+ int temperature;
int ret;
ret = tz->ops->get_trip_temp(tz, 0, &temperature);
if (ret)
return ret;
- return sprintf(buf, "%ld\n", temperature);
+ return sprintf(buf, "%d\n", temperature);
}
@@ -142,7 +142,7 @@ thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon,
static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz)
{
- unsigned long temp;
+ int temp;
return tz->ops->get_crit_temp && !tz->ops->get_crit_temp(tz, &temp);
}
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index c7c5b3779dac..b213a1222295 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -76,14 +76,14 @@ static inline int ti_thermal_hotspot_temperature(int t, int s, int c)
/* thermal zone ops */
/* Get temperature callback function for thermal zone */
-static inline int __ti_thermal_get_temp(void *devdata, long *temp)
+static inline int __ti_thermal_get_temp(void *devdata, int *temp)
{
struct thermal_zone_device *pcb_tz = NULL;
struct ti_thermal_data *data = devdata;
struct ti_bandgap *bgp;
const struct ti_temp_sensor *s;
int ret, tmp, slope, constant;
- unsigned long pcb_temp;
+ int pcb_temp;
if (!data)
return 0;
@@ -119,7 +119,7 @@ static inline int __ti_thermal_get_temp(void *devdata, long *temp)
}
static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal,
- unsigned long *temp)
+ int *temp)
{
struct ti_thermal_data *data = thermal->devdata;
@@ -229,7 +229,7 @@ static int ti_thermal_get_trip_type(struct thermal_zone_device *thermal,
/* Get trip temperature callback functions for thermal zone */
static int ti_thermal_get_trip_temp(struct thermal_zone_device *thermal,
- int trip, unsigned long *temp)
+ int trip, int *temp)
{
if (!ti_thermal_is_valid_trip(trip))
return -EINVAL;
@@ -280,7 +280,7 @@ static int ti_thermal_get_trend(struct thermal_zone_device *thermal,
/* Get critical temperature callback functions for thermal zone */
static int ti_thermal_get_crit_temp(struct thermal_zone_device *thermal,
- unsigned long *temp)
+ int *temp)
{
/* shutdown zone */
return ti_thermal_get_trip_temp(thermal, OMAP_TRIP_NUMBER - 1, temp);
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 50d1d2cb091a..7fc919f7da4d 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -164,7 +164,7 @@ err_ret:
return err;
}
-static int sys_get_curr_temp(struct thermal_zone_device *tzd, unsigned long *temp)
+static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp)
{
u32 eax, edx;
struct phy_dev_entry *phy_dev_entry;
@@ -175,7 +175,7 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd, unsigned long *tem
if (eax & 0x80000000) {
*temp = phy_dev_entry->tj_max -
((eax >> 16) & 0x7f) * 1000;
- pr_debug("sys_get_curr_temp %ld\n", *temp);
+ pr_debug("sys_get_curr_temp %d\n", *temp);
return 0;
}
@@ -183,7 +183,7 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd, unsigned long *tem
}
static int sys_get_trip_temp(struct thermal_zone_device *tzd,
- int trip, unsigned long *temp)
+ int trip, int *temp)
{
u32 eax, edx;
struct phy_dev_entry *phy_dev_entry;
@@ -214,13 +214,13 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd,
*temp = phy_dev_entry->tj_max - thres_reg_value * 1000;
else
*temp = 0;
- pr_debug("sys_get_trip_temp %ld\n", *temp);
+ pr_debug("sys_get_trip_temp %d\n", *temp);
return 0;
}
static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
- unsigned long temp)
+ int temp)
{
u32 l, h;
struct phy_dev_entry *phy_dev_entry;
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index a9d837f83ce8..10beb1589d83 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -200,7 +200,7 @@ static int xen_hvm_console_init(void)
{
int r;
uint64_t v = 0;
- unsigned long mfn;
+ unsigned long gfn;
struct xencons_info *info;
if (!xen_hvm_domain())
@@ -217,7 +217,7 @@ static int xen_hvm_console_init(void)
}
/*
* If the toolstack (or the hypervisor) hasn't set these values, the
- * default value is 0. Even though mfn = 0 and evtchn = 0 are
+ * default value is 0. Even though gfn = 0 and evtchn = 0 are
* theoretically correct values, in practice they never are and they
* mean that a legacy toolstack hasn't initialized the pv console correctly.
*/
@@ -229,8 +229,8 @@ static int xen_hvm_console_init(void)
r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v);
if (r < 0 || v == 0)
goto err;
- mfn = v;
- info->intf = xen_remap(mfn << PAGE_SHIFT, PAGE_SIZE);
+ gfn = v;
+ info->intf = xen_remap(gfn << PAGE_SHIFT, PAGE_SIZE);
if (info->intf == NULL)
goto err;
info->vtermno = HVC_COOKIE;
@@ -265,7 +265,8 @@ static int xen_pv_console_init(void)
return 0;
}
info->evtchn = xen_start_info->console.domU.evtchn;
- info->intf = mfn_to_virt(xen_start_info->console.domU.mfn);
+ /* GFN == MFN for PV guest */
+ info->intf = gfn_to_virt(xen_start_info->console.domU.mfn);
info->vtermno = HVC_COOKIE;
spin_lock(&xencons_lock);
@@ -374,7 +375,6 @@ static int xencons_connect_backend(struct xenbus_device *dev,
int ret, evtchn, devid, ref, irq;
struct xenbus_transaction xbt;
grant_ref_t gref_head;
- unsigned long mfn;
ret = xenbus_alloc_evtchn(dev, &evtchn);
if (ret)
@@ -389,10 +389,6 @@ static int xencons_connect_backend(struct xenbus_device *dev,
irq, &domU_hvc_ops, 256);
if (IS_ERR(info->hvc))
return PTR_ERR(info->hvc);
- if (xen_pv_domain())
- mfn = virt_to_mfn(info->intf);
- else
- mfn = __pa(info->intf) >> PAGE_SHIFT;
ret = gnttab_alloc_grant_references(1, &gref_head);
if (ret < 0)
return ret;
@@ -401,7 +397,7 @@ static int xencons_connect_backend(struct xenbus_device *dev,
if (ref < 0)
return ref;
gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id,
- mfn, 0);
+ virt_to_gfn(info->intf), 0);
again:
ret = xenbus_transaction_start(&xbt);
diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
index 41901997c0d6..a75146f600cb 100644
--- a/drivers/tty/hvc/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
@@ -240,9 +240,9 @@ static void hvsi_recv_control(struct hvsi_struct *hp, uint8_t *packet,
{
struct hvsi_control *header = (struct hvsi_control *)packet;
- switch (header->verb) {
+ switch (be16_to_cpu(header->verb)) {
case VSV_MODEM_CTL_UPDATE:
- if ((header->word & HVSI_TSCD) == 0) {
+ if ((be32_to_cpu(header->word) & HVSI_TSCD) == 0) {
/* CD went away; no more connection */
pr_debug("hvsi%i: CD dropped\n", hp->index);
hp->mctrl &= TIOCM_CD;
@@ -267,6 +267,7 @@ static void hvsi_recv_control(struct hvsi_struct *hp, uint8_t *packet,
static void hvsi_recv_response(struct hvsi_struct *hp, uint8_t *packet)
{
struct hvsi_query_response *resp = (struct hvsi_query_response *)packet;
+ uint32_t mctrl_word;
switch (hp->state) {
case HVSI_WAIT_FOR_VER_RESPONSE:
@@ -274,9 +275,10 @@ static void hvsi_recv_response(struct hvsi_struct *hp, uint8_t *packet)
break;
case HVSI_WAIT_FOR_MCTRL_RESPONSE:
hp->mctrl = 0;
- if (resp->u.mctrl_word & HVSI_TSDTR)
+ mctrl_word = be32_to_cpu(resp->u.mctrl_word);
+ if (mctrl_word & HVSI_TSDTR)
hp->mctrl |= TIOCM_DTR;
- if (resp->u.mctrl_word & HVSI_TSCD)
+ if (mctrl_word & HVSI_TSCD)
hp->mctrl |= TIOCM_CD;
__set_state(hp, HVSI_OPEN);
break;
@@ -295,10 +297,10 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
packet.hdr.len = sizeof(struct hvsi_query_response);
- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
- packet.verb = VSV_SEND_VERSION_NUMBER;
+ packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
+ packet.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER);
packet.u.version = HVSI_VERSION;
- packet.query_seqno = query_seqno+1;
+ packet.query_seqno = cpu_to_be16(query_seqno+1);
pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
@@ -319,7 +321,7 @@ static void hvsi_recv_query(struct hvsi_struct *hp, uint8_t *packet)
switch (hp->state) {
case HVSI_WAIT_FOR_VER_QUERY:
- hvsi_version_respond(hp, query->hdr.seqno);
+ hvsi_version_respond(hp, be16_to_cpu(query->hdr.seqno));
__set_state(hp, HVSI_OPEN);
break;
default:
@@ -555,8 +557,8 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
packet.hdr.type = VS_QUERY_PACKET_HEADER;
packet.hdr.len = sizeof(struct hvsi_query);
- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
- packet.verb = verb;
+ packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
+ packet.verb = cpu_to_be16(verb);
pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
@@ -596,14 +598,14 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
struct hvsi_control packet __ALIGNED__;
int wrote;
- packet.hdr.type = VS_CONTROL_PACKET_HEADER,
- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+ packet.hdr.type = VS_CONTROL_PACKET_HEADER;
+ packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
packet.hdr.len = sizeof(struct hvsi_control);
- packet.verb = VSV_SET_MODEM_CTL;
- packet.mask = HVSI_TSDTR;
+ packet.verb = cpu_to_be16(VSV_SET_MODEM_CTL);
+ packet.mask = cpu_to_be32(HVSI_TSDTR);
if (mctrl & TIOCM_DTR)
- packet.word = HVSI_TSDTR;
+ packet.word = cpu_to_be32(HVSI_TSDTR);
pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
@@ -680,7 +682,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
packet.hdr.type = VS_DATA_PACKET_HEADER;
- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+ packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
packet.hdr.len = count + sizeof(struct hvsi_header);
memcpy(&packet.data, buf, count);
@@ -697,9 +699,9 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
struct hvsi_control packet __ALIGNED__;
packet.hdr.type = VS_CONTROL_PACKET_HEADER;
- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+ packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
packet.hdr.len = 6;
- packet.verb = VSV_CLOSE_PROTOCOL;
+ packet.verb = cpu_to_be16(VSV_CLOSE_PROTOCOL);
pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
@@ -1180,7 +1182,7 @@ static int __init hvsi_console_init(void)
/* search device tree for vty nodes */
for_each_compatible_node(vty, "serial", "hvterm-protocol") {
struct hvsi_struct *hp;
- const uint32_t *vtermno, *irq;
+ const __be32 *vtermno, *irq;
vtermno = of_get_property(vty, "reg", NULL);
irq = of_get_property(vty, "interrupts", NULL);
@@ -1202,11 +1204,11 @@ static int __init hvsi_console_init(void)
hp->index = hvsi_count;
hp->inbuf_end = hp->inbuf;
hp->state = HVSI_CLOSED;
- hp->vtermno = *vtermno;
- hp->virq = irq_create_mapping(NULL, irq[0]);
+ hp->vtermno = be32_to_cpup(vtermno);
+ hp->virq = irq_create_mapping(NULL, be32_to_cpup(irq));
if (hp->virq == 0) {
printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n",
- __func__, irq[0]);
+ __func__, be32_to_cpup(irq));
tty_port_destroy(&hp->port);
continue;
}
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index 358323c83b4f..a8c8cfd52a23 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -879,6 +879,11 @@ static const struct tty_operations mips_ejtag_fdc_tty_ops = {
.chars_in_buffer = mips_ejtag_fdc_tty_chars_in_buffer,
};
+int __weak get_c0_fdc_int(void)
+{
+ return -1;
+}
+
static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
{
int ret, nport;
@@ -967,9 +972,7 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
wake_up_process(priv->thread);
/* Look for an FDC IRQ */
- priv->irq = -1;
- if (get_c0_fdc_int)
- priv->irq = get_c0_fdc_int();
+ priv->irq = get_c0_fdc_int();
/* Try requesting the IRQ */
if (priv->irq >= 0) {
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 382d3fcba6cc..c3fe026d3168 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2712,7 +2712,7 @@ static void gsm_mux_rx_netchar(struct gsm_dlci *dlci,
memcpy(skb_put(skb, size), in_buf, size);
skb->dev = net;
- skb->protocol = __constant_htons(ETH_P_IP);
+ skb->protocol = htons(ETH_P_IP);
/* Ship it off to the kernel */
netif_rx(skb);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index c9c27f69e101..20932cc9c8f7 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1108,19 +1108,29 @@ static void eraser(unsigned char c, struct tty_struct *tty)
* Locking: ctrl_lock
*/
-static void isig(int sig, struct tty_struct *tty)
+static void __isig(int sig, struct tty_struct *tty)
{
- struct n_tty_data *ldata = tty->disc_data;
struct pid *tty_pgrp = tty_get_pgrp(tty);
if (tty_pgrp) {
kill_pgrp(tty_pgrp, sig, 1);
put_pid(tty_pgrp);
}
+}
+
+static void isig(int sig, struct tty_struct *tty)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+
+ if (L_NOFLSH(tty)) {
+ /* signal only */
+ __isig(sig, tty);
- if (!L_NOFLSH(tty)) {
+ } else { /* signal and flush */
up_read(&tty->termios_rwsem);
down_write(&tty->termios_rwsem);
+ __isig(sig, tty);
+
/* clear echo buffer */
mutex_lock(&ldata->output_lock);
ldata->echo_head = ldata->echo_tail = 0;
@@ -2137,6 +2147,8 @@ extern ssize_t redirected_tty_write(struct file *, const char __user *,
static int job_control(struct tty_struct *tty, struct file *file)
{
+ struct pid *pgrp;
+
/* Job control check -- must be done at start and after
every sleep (POSIX.1 7.1.1.4). */
/* NOTE: not yet done after every sleep pending a thorough
@@ -2146,18 +2158,25 @@ static int job_control(struct tty_struct *tty, struct file *file)
current->signal->tty != tty)
return 0;
+ rcu_read_lock();
+ pgrp = task_pgrp(current);
+
spin_lock_irq(&tty->ctrl_lock);
if (!tty->pgrp)
printk(KERN_ERR "n_tty_read: no tty->pgrp!\n");
- else if (task_pgrp(current) != tty->pgrp) {
+ else if (pgrp != tty->pgrp) {
spin_unlock_irq(&tty->ctrl_lock);
- if (is_ignored(SIGTTIN) || is_current_pgrp_orphaned())
+ if (is_ignored(SIGTTIN) || is_current_pgrp_orphaned()) {
+ rcu_read_unlock();
return -EIO;
- kill_pgrp(task_pgrp(current), SIGTTIN, 1);
+ }
+ kill_pgrp(pgrp, SIGTTIN, 1);
+ rcu_read_unlock();
set_thread_flag(TIF_SIGPENDING);
return -ERESTARTSYS;
}
spin_unlock_irq(&tty->ctrl_lock);
+ rcu_read_unlock();
return 0;
}
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 4d5e8409769c..4d5937c185c1 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -26,6 +26,12 @@
#include <linux/mutex.h>
#include <linux/poll.h>
+#undef TTY_DEBUG_HANGUP
+#ifdef TTY_DEBUG_HANGUP
+# define tty_debug_hangup(tty, f, args...) tty_debug(tty, f, ##args)
+#else
+# define tty_debug_hangup(tty, f, args...) do {} while (0)
+#endif
#ifdef CONFIG_UNIX98_PTYS
static struct tty_driver *ptm_driver;
@@ -779,6 +785,8 @@ static int ptmx_open(struct inode *inode, struct file *filp)
if (retval)
goto err_release;
+ tty_debug_hangup(tty, "(tty count=%d)\n", tty->count);
+
tty_unlock(tty);
return 0;
err_release:
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index c43f74c53cd9..d54dcd87c67e 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -42,9 +42,9 @@ struct uart_8250_dma {
size_t rx_size;
size_t tx_size;
- unsigned char tx_running:1;
- unsigned char tx_err: 1;
- unsigned char rx_running:1;
+ unsigned char tx_running;
+ unsigned char tx_err;
+ unsigned char rx_running;
};
struct old_serial_port {
@@ -211,3 +211,14 @@ static inline int ns16550a_goto_highspeed(struct uart_8250_port *up)
}
return 1;
}
+
+static inline int serial_index(struct uart_port *port)
+{
+ return port->minor - 64;
+}
+
+#if 0
+#define DEBUG_INTR(fmt...) printk(fmt)
+#else
+#define DEBUG_INTR(fmt...) do { } while (0)
+#endif
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 37fff12dd4d0..271d12137649 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -1,25 +1,23 @@
/*
- * Driver for 8250/16550-type serial ports
+ * Universal/legacy driver for 8250/16550-type serial ports
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* Copyright (C) 2001 Russell King.
*
+ * Supports: ISA-compatible 8250/16550 ports
+ * PNP 8250/16550 ports
+ * early_serial_setup() ports
+ * userspace-configurable "phantom" ports
+ * "serial8250" platform devices
+ * serial8250_register_8250_port() ports
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * A note about mapbase / membase
- *
- * mapbase is the physical address of the IO port.
- * membase is an 'ioremapped' cookie.
*/
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ioport.h>
@@ -38,11 +36,11 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
+#include <linux/io.h>
#ifdef CONFIG_SPARC
#include <linux/sunserialcore.h>
#endif
-#include <asm/io.h>
#include <asm/irq.h>
#include "8250.h"
@@ -58,33 +56,10 @@ static unsigned int nr_uarts = CONFIG_SERIAL_8250_RUNTIME_UARTS;
static struct uart_driver serial8250_reg;
-static int serial_index(struct uart_port *port)
-{
- return port->minor - 64;
-}
-
static unsigned int skip_txen_test; /* force skip of txen test at init time */
-/*
- * Debugging.
- */
-#if 0
-#define DEBUG_AUTOCONF(fmt...) printk(fmt)
-#else
-#define DEBUG_AUTOCONF(fmt...) do { } while (0)
-#endif
-
-#if 0
-#define DEBUG_INTR(fmt...) printk(fmt)
-#else
-#define DEBUG_INTR(fmt...) do { } while (0)
-#endif
-
#define PASS_LIMIT 512
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
-
#include <asm/serial.h>
/*
* SERIAL_PORT_DFNS tells us about built-in ports that have no
@@ -120,1563 +95,6 @@ static struct hlist_head irq_lists[NR_IRQ_HASH];
static DEFINE_MUTEX(hash_mutex); /* Used to walk the hash */
/*
- * Here we define the default xmit fifo size used for each type of UART.
- */
-static const struct serial8250_config uart_config[] = {
- [PORT_UNKNOWN] = {
- .name = "unknown",
- .fifo_size = 1,
- .tx_loadsz = 1,
- },
- [PORT_8250] = {
- .name = "8250",
- .fifo_size = 1,
- .tx_loadsz = 1,
- },
- [PORT_16450] = {
- .name = "16450",
- .fifo_size = 1,
- .tx_loadsz = 1,
- },
- [PORT_16550] = {
- .name = "16550",
- .fifo_size = 1,
- .tx_loadsz = 1,
- },
- [PORT_16550A] = {
- .name = "16550A",
- .fifo_size = 16,
- .tx_loadsz = 16,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
- .rxtrig_bytes = {1, 4, 8, 14},
- .flags = UART_CAP_FIFO,
- },
- [PORT_CIRRUS] = {
- .name = "Cirrus",
- .fifo_size = 1,
- .tx_loadsz = 1,
- },
- [PORT_16650] = {
- .name = "ST16650",
- .fifo_size = 1,
- .tx_loadsz = 1,
- .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
- },
- [PORT_16650V2] = {
- .name = "ST16650V2",
- .fifo_size = 32,
- .tx_loadsz = 16,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
- UART_FCR_T_TRIG_00,
- .rxtrig_bytes = {8, 16, 24, 28},
- .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
- },
- [PORT_16750] = {
- .name = "TI16750",
- .fifo_size = 64,
- .tx_loadsz = 64,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
- UART_FCR7_64BYTE,
- .rxtrig_bytes = {1, 16, 32, 56},
- .flags = UART_CAP_FIFO | UART_CAP_SLEEP | UART_CAP_AFE,
- },
- [PORT_STARTECH] = {
- .name = "Startech",
- .fifo_size = 1,
- .tx_loadsz = 1,
- },
- [PORT_16C950] = {
- .name = "16C950/954",
- .fifo_size = 128,
- .tx_loadsz = 128,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
- /* UART_CAP_EFR breaks billionon CF bluetooth card. */
- .flags = UART_CAP_FIFO | UART_CAP_SLEEP,
- },
- [PORT_16654] = {
- .name = "ST16654",
- .fifo_size = 64,
- .tx_loadsz = 32,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
- UART_FCR_T_TRIG_10,
- .rxtrig_bytes = {8, 16, 56, 60},
- .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
- },
- [PORT_16850] = {
- .name = "XR16850",
- .fifo_size = 128,
- .tx_loadsz = 128,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
- .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
- },
- [PORT_RSA] = {
- .name = "RSA",
- .fifo_size = 2048,
- .tx_loadsz = 2048,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_11,
- .flags = UART_CAP_FIFO,
- },
- [PORT_NS16550A] = {
- .name = "NS16550A",
- .fifo_size = 16,
- .tx_loadsz = 16,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
- .flags = UART_CAP_FIFO | UART_NATSEMI,
- },
- [PORT_XSCALE] = {
- .name = "XScale",
- .fifo_size = 32,
- .tx_loadsz = 32,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
- .flags = UART_CAP_FIFO | UART_CAP_UUE | UART_CAP_RTOIE,
- },
- [PORT_OCTEON] = {
- .name = "OCTEON",
- .fifo_size = 64,
- .tx_loadsz = 64,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
- .flags = UART_CAP_FIFO,
- },
- [PORT_AR7] = {
- .name = "AR7",
- .fifo_size = 16,
- .tx_loadsz = 16,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00,
- .flags = UART_CAP_FIFO | UART_CAP_AFE,
- },
- [PORT_U6_16550A] = {
- .name = "U6_16550A",
- .fifo_size = 64,
- .tx_loadsz = 64,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
- .flags = UART_CAP_FIFO | UART_CAP_AFE,
- },
- [PORT_TEGRA] = {
- .name = "Tegra",
- .fifo_size = 32,
- .tx_loadsz = 8,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
- UART_FCR_T_TRIG_01,
- .rxtrig_bytes = {1, 4, 8, 14},
- .flags = UART_CAP_FIFO | UART_CAP_RTOIE,
- },
- [PORT_XR17D15X] = {
- .name = "XR17D15X",
- .fifo_size = 64,
- .tx_loadsz = 64,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
- .flags = UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR |
- UART_CAP_SLEEP,
- },
- [PORT_XR17V35X] = {
- .name = "XR17V35X",
- .fifo_size = 256,
- .tx_loadsz = 256,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_11 |
- UART_FCR_T_TRIG_11,
- .flags = UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR |
- UART_CAP_SLEEP,
- },
- [PORT_LPC3220] = {
- .name = "LPC3220",
- .fifo_size = 64,
- .tx_loadsz = 32,
- .fcr = UART_FCR_DMA_SELECT | UART_FCR_ENABLE_FIFO |
- UART_FCR_R_TRIG_00 | UART_FCR_T_TRIG_00,
- .flags = UART_CAP_FIFO,
- },
- [PORT_BRCM_TRUMANAGE] = {
- .name = "TruManage",
- .fifo_size = 1,
- .tx_loadsz = 1024,
- .flags = UART_CAP_HFIFO,
- },
- [PORT_8250_CIR] = {
- .name = "CIR port"
- },
- [PORT_ALTR_16550_F32] = {
- .name = "Altera 16550 FIFO32",
- .fifo_size = 32,
- .tx_loadsz = 32,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
- .flags = UART_CAP_FIFO | UART_CAP_AFE,
- },
- [PORT_ALTR_16550_F64] = {
- .name = "Altera 16550 FIFO64",
- .fifo_size = 64,
- .tx_loadsz = 64,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
- .flags = UART_CAP_FIFO | UART_CAP_AFE,
- },
- [PORT_ALTR_16550_F128] = {
- .name = "Altera 16550 FIFO128",
- .fifo_size = 128,
- .tx_loadsz = 128,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
- .flags = UART_CAP_FIFO | UART_CAP_AFE,
- },
-/* tx_loadsz is set to 63-bytes instead of 64-bytes to implement
-workaround of errata A-008006 which states that tx_loadsz should be
-configured less than Maximum supported fifo bytes */
- [PORT_16550A_FSL64] = {
- .name = "16550A_FSL64",
- .fifo_size = 64,
- .tx_loadsz = 63,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
- UART_FCR7_64BYTE,
- .flags = UART_CAP_FIFO,
- },
-};
-
-/* Uart divisor latch read */
-static int default_serial_dl_read(struct uart_8250_port *up)
-{
- return serial_in(up, UART_DLL) | serial_in(up, UART_DLM) << 8;
-}
-
-/* Uart divisor latch write */
-static void default_serial_dl_write(struct uart_8250_port *up, int value)
-{
- serial_out(up, UART_DLL, value & 0xff);
- serial_out(up, UART_DLM, value >> 8 & 0xff);
-}
-
-#if defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_SERIAL_8250_RT288X)
-
-/* Au1x00/RT288x UART hardware has a weird register layout */
-static const s8 au_io_in_map[8] = {
- 0, /* UART_RX */
- 2, /* UART_IER */
- 3, /* UART_IIR */
- 5, /* UART_LCR */
- 6, /* UART_MCR */
- 7, /* UART_LSR */
- 8, /* UART_MSR */
- -1, /* UART_SCR (unmapped) */
-};
-
-static const s8 au_io_out_map[8] = {
- 1, /* UART_TX */
- 2, /* UART_IER */
- 4, /* UART_FCR */
- 5, /* UART_LCR */
- 6, /* UART_MCR */
- -1, /* UART_LSR (unmapped) */
- -1, /* UART_MSR (unmapped) */
- -1, /* UART_SCR (unmapped) */
-};
-
-static unsigned int au_serial_in(struct uart_port *p, int offset)
-{
- if (offset >= ARRAY_SIZE(au_io_in_map))
- return UINT_MAX;
- offset = au_io_in_map[offset];
- if (offset < 0)
- return UINT_MAX;
- return __raw_readl(p->membase + (offset << p->regshift));
-}
-
-static void au_serial_out(struct uart_port *p, int offset, int value)
-{
- if (offset >= ARRAY_SIZE(au_io_out_map))
- return;
- offset = au_io_out_map[offset];
- if (offset < 0)
- return;
- __raw_writel(value, p->membase + (offset << p->regshift));
-}
-
-/* Au1x00 haven't got a standard divisor latch */
-static int au_serial_dl_read(struct uart_8250_port *up)
-{
- return __raw_readl(up->port.membase + 0x28);
-}
-
-static void au_serial_dl_write(struct uart_8250_port *up, int value)
-{
- __raw_writel(value, up->port.membase + 0x28);
-}
-
-#endif
-
-static unsigned int hub6_serial_in(struct uart_port *p, int offset)
-{
- offset = offset << p->regshift;
- outb(p->hub6 - 1 + offset, p->iobase);
- return inb(p->iobase + 1);
-}
-
-static void hub6_serial_out(struct uart_port *p, int offset, int value)
-{
- offset = offset << p->regshift;
- outb(p->hub6 - 1 + offset, p->iobase);
- outb(value, p->iobase + 1);
-}
-
-static unsigned int mem_serial_in(struct uart_port *p, int offset)
-{
- offset = offset << p->regshift;
- return readb(p->membase + offset);
-}
-
-static void mem_serial_out(struct uart_port *p, int offset, int value)
-{
- offset = offset << p->regshift;
- writeb(value, p->membase + offset);
-}
-
-static void mem32_serial_out(struct uart_port *p, int offset, int value)
-{
- offset = offset << p->regshift;
- writel(value, p->membase + offset);
-}
-
-static unsigned int mem32_serial_in(struct uart_port *p, int offset)
-{
- offset = offset << p->regshift;
- return readl(p->membase + offset);
-}
-
-static void mem32be_serial_out(struct uart_port *p, int offset, int value)
-{
- offset = offset << p->regshift;
- iowrite32be(value, p->membase + offset);
-}
-
-static unsigned int mem32be_serial_in(struct uart_port *p, int offset)
-{
- offset = offset << p->regshift;
- return ioread32be(p->membase + offset);
-}
-
-static unsigned int io_serial_in(struct uart_port *p, int offset)
-{
- offset = offset << p->regshift;
- return inb(p->iobase + offset);
-}
-
-static void io_serial_out(struct uart_port *p, int offset, int value)
-{
- offset = offset << p->regshift;
- outb(value, p->iobase + offset);
-}
-
-static int serial8250_default_handle_irq(struct uart_port *port);
-static int exar_handle_irq(struct uart_port *port);
-
-static void set_io_from_upio(struct uart_port *p)
-{
- struct uart_8250_port *up = up_to_u8250p(p);
-
- up->dl_read = default_serial_dl_read;
- up->dl_write = default_serial_dl_write;
-
- switch (p->iotype) {
- case UPIO_HUB6:
- p->serial_in = hub6_serial_in;
- p->serial_out = hub6_serial_out;
- break;
-
- case UPIO_MEM:
- p->serial_in = mem_serial_in;
- p->serial_out = mem_serial_out;
- break;
-
- case UPIO_MEM32:
- p->serial_in = mem32_serial_in;
- p->serial_out = mem32_serial_out;
- break;
-
- case UPIO_MEM32BE:
- p->serial_in = mem32be_serial_in;
- p->serial_out = mem32be_serial_out;
- break;
-
-#if defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_SERIAL_8250_RT288X)
- case UPIO_AU:
- p->serial_in = au_serial_in;
- p->serial_out = au_serial_out;
- up->dl_read = au_serial_dl_read;
- up->dl_write = au_serial_dl_write;
- break;
-#endif
-
- default:
- p->serial_in = io_serial_in;
- p->serial_out = io_serial_out;
- break;
- }
- /* Remember loaded iotype */
- up->cur_iotype = p->iotype;
- p->handle_irq = serial8250_default_handle_irq;
-}
-
-static void
-serial_port_out_sync(struct uart_port *p, int offset, int value)
-{
- switch (p->iotype) {
- case UPIO_MEM:
- case UPIO_MEM32:
- case UPIO_MEM32BE:
- case UPIO_AU:
- p->serial_out(p, offset, value);
- p->serial_in(p, UART_LCR); /* safe, no side-effects */
- break;
- default:
- p->serial_out(p, offset, value);
- }
-}
-
-/*
- * For the 16C950
- */
-static void serial_icr_write(struct uart_8250_port *up, int offset, int value)
-{
- serial_out(up, UART_SCR, offset);
- serial_out(up, UART_ICR, value);
-}
-
-static unsigned int serial_icr_read(struct uart_8250_port *up, int offset)
-{
- unsigned int value;
-
- serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD);
- serial_out(up, UART_SCR, offset);
- value = serial_in(up, UART_ICR);
- serial_icr_write(up, UART_ACR, up->acr);
-
- return value;
-}
-
-/*
- * FIFO support.
- */
-static void serial8250_clear_fifos(struct uart_8250_port *p)
-{
- if (p->capabilities & UART_CAP_FIFO) {
- serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO);
- serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO |
- UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
- serial_out(p, UART_FCR, 0);
- }
-}
-
-void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p)
-{
- serial8250_clear_fifos(p);
- serial_out(p, UART_FCR, p->fcr);
-}
-EXPORT_SYMBOL_GPL(serial8250_clear_and_reinit_fifos);
-
-void serial8250_rpm_get(struct uart_8250_port *p)
-{
- if (!(p->capabilities & UART_CAP_RPM))
- return;
- pm_runtime_get_sync(p->port.dev);
-}
-EXPORT_SYMBOL_GPL(serial8250_rpm_get);
-
-void serial8250_rpm_put(struct uart_8250_port *p)
-{
- if (!(p->capabilities & UART_CAP_RPM))
- return;
- pm_runtime_mark_last_busy(p->port.dev);
- pm_runtime_put_autosuspend(p->port.dev);
-}
-EXPORT_SYMBOL_GPL(serial8250_rpm_put);
-
-/*
- * These two wrappers ensure that enable_runtime_pm_tx() can be called more than
- * once and disable_runtime_pm_tx() will still disable RPM because the fifo is
- * empty and the HW can idle again.
- */
-static void serial8250_rpm_get_tx(struct uart_8250_port *p)
-{
- unsigned char rpm_active;
-
- if (!(p->capabilities & UART_CAP_RPM))
- return;
-
- rpm_active = xchg(&p->rpm_tx_active, 1);
- if (rpm_active)
- return;
- pm_runtime_get_sync(p->port.dev);
-}
-
-static void serial8250_rpm_put_tx(struct uart_8250_port *p)
-{
- unsigned char rpm_active;
-
- if (!(p->capabilities & UART_CAP_RPM))
- return;
-
- rpm_active = xchg(&p->rpm_tx_active, 0);
- if (!rpm_active)
- return;
- pm_runtime_mark_last_busy(p->port.dev);
- pm_runtime_put_autosuspend(p->port.dev);
-}
-
-/*
- * IER sleep support. UARTs which have EFRs need the "extended
- * capability" bit enabled. Note that on XR16C850s, we need to
- * reset LCR to write to IER.
- */
-static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
-{
- unsigned char lcr = 0, efr = 0;
- /*
- * Exar UARTs have a SLEEP register that enables or disables
- * each UART to enter sleep mode separately. On the XR17V35x the
- * register is accessible to each UART at the UART_EXAR_SLEEP
- * offset but the UART channel may only write to the corresponding
- * bit.
- */
- serial8250_rpm_get(p);
- if ((p->port.type == PORT_XR17V35X) ||
- (p->port.type == PORT_XR17D15X)) {
- serial_out(p, UART_EXAR_SLEEP, sleep ? 0xff : 0);
- goto out;
- }
-
- if (p->capabilities & UART_CAP_SLEEP) {
- if (p->capabilities & UART_CAP_EFR) {
- lcr = serial_in(p, UART_LCR);
- efr = serial_in(p, UART_EFR);
- serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(p, UART_EFR, UART_EFR_ECB);
- serial_out(p, UART_LCR, 0);
- }
- serial_out(p, UART_IER, sleep ? UART_IERX_SLEEP : 0);
- if (p->capabilities & UART_CAP_EFR) {
- serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(p, UART_EFR, efr);
- serial_out(p, UART_LCR, lcr);
- }
- }
-out:
- serial8250_rpm_put(p);
-}
-
-#ifdef CONFIG_SERIAL_8250_RSA
-/*
- * Attempts to turn on the RSA FIFO. Returns zero on failure.
- * We set the port uart clock rate if we succeed.
- */
-static int __enable_rsa(struct uart_8250_port *up)
-{
- unsigned char mode;
- int result;
-
- mode = serial_in(up, UART_RSA_MSR);
- result = mode & UART_RSA_MSR_FIFO;
-
- if (!result) {
- serial_out(up, UART_RSA_MSR, mode | UART_RSA_MSR_FIFO);
- mode = serial_in(up, UART_RSA_MSR);
- result = mode & UART_RSA_MSR_FIFO;
- }
-
- if (result)
- up->port.uartclk = SERIAL_RSA_BAUD_BASE * 16;
-
- return result;
-}
-
-static void enable_rsa(struct uart_8250_port *up)
-{
- if (up->port.type == PORT_RSA) {
- if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
- spin_lock_irq(&up->port.lock);
- __enable_rsa(up);
- spin_unlock_irq(&up->port.lock);
- }
- if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
- serial_out(up, UART_RSA_FRR, 0);
- }
-}
-
-/*
- * Attempts to turn off the RSA FIFO. Returns zero on failure.
- * It is unknown why interrupts were disabled in here. However,
- * the caller is expected to preserve this behaviour by grabbing
- * the spinlock before calling this function.
- */
-static void disable_rsa(struct uart_8250_port *up)
-{
- unsigned char mode;
- int result;
-
- if (up->port.type == PORT_RSA &&
- up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
- spin_lock_irq(&up->port.lock);
-
- mode = serial_in(up, UART_RSA_MSR);
- result = !(mode & UART_RSA_MSR_FIFO);
-
- if (!result) {
- serial_out(up, UART_RSA_MSR, mode & ~UART_RSA_MSR_FIFO);
- mode = serial_in(up, UART_RSA_MSR);
- result = !(mode & UART_RSA_MSR_FIFO);
- }
-
- if (result)
- up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
- spin_unlock_irq(&up->port.lock);
- }
-}
-#endif /* CONFIG_SERIAL_8250_RSA */
-
-/*
- * This is a quickie test to see how big the FIFO is.
- * It doesn't work at all the time, more's the pity.
- */
-static int size_fifo(struct uart_8250_port *up)
-{
- unsigned char old_fcr, old_mcr, old_lcr;
- unsigned short old_dl;
- int count;
-
- old_lcr = serial_in(up, UART_LCR);
- serial_out(up, UART_LCR, 0);
- old_fcr = serial_in(up, UART_FCR);
- old_mcr = serial_in(up, UART_MCR);
- serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
- UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
- serial_out(up, UART_MCR, UART_MCR_LOOP);
- serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
- old_dl = serial_dl_read(up);
- serial_dl_write(up, 0x0001);
- serial_out(up, UART_LCR, 0x03);
- for (count = 0; count < 256; count++)
- serial_out(up, UART_TX, count);
- mdelay(20);/* FIXME - schedule_timeout */
- for (count = 0; (serial_in(up, UART_LSR) & UART_LSR_DR) &&
- (count < 256); count++)
- serial_in(up, UART_RX);
- serial_out(up, UART_FCR, old_fcr);
- serial_out(up, UART_MCR, old_mcr);
- serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
- serial_dl_write(up, old_dl);
- serial_out(up, UART_LCR, old_lcr);
-
- return count;
-}
-
-/*
- * Read UART ID using the divisor method - set DLL and DLM to zero
- * and the revision will be in DLL and device type in DLM. We
- * preserve the device state across this.
- */
-static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
-{
- unsigned char old_dll, old_dlm, old_lcr;
- unsigned int id;
-
- old_lcr = serial_in(p, UART_LCR);
- serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
-
- old_dll = serial_in(p, UART_DLL);
- old_dlm = serial_in(p, UART_DLM);
-
- serial_out(p, UART_DLL, 0);
- serial_out(p, UART_DLM, 0);
-
- id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
-
- serial_out(p, UART_DLL, old_dll);
- serial_out(p, UART_DLM, old_dlm);
- serial_out(p, UART_LCR, old_lcr);
-
- return id;
-}
-
-/*
- * This is a helper routine to autodetect StarTech/Exar/Oxsemi UART's.
- * When this function is called we know it is at least a StarTech
- * 16650 V2, but it might be one of several StarTech UARTs, or one of
- * its clones. (We treat the broken original StarTech 16650 V1 as a
- * 16550, and why not? Startech doesn't seem to even acknowledge its
- * existence.)
- *
- * What evil have men's minds wrought...
- */
-static void autoconfig_has_efr(struct uart_8250_port *up)
-{
- unsigned int id1, id2, id3, rev;
-
- /*
- * Everything with an EFR has SLEEP
- */
- up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP;
-
- /*
- * First we check to see if it's an Oxford Semiconductor UART.
- *
- * If we have to do this here because some non-National
- * Semiconductor clone chips lock up if you try writing to the
- * LSR register (which serial_icr_read does)
- */
-
- /*
- * Check for Oxford Semiconductor 16C950.
- *
- * EFR [4] must be set else this test fails.
- *
- * This shouldn't be necessary, but Mike Hudson (Exoray@isys.ca)
- * claims that it's needed for 952 dual UART's (which are not
- * recommended for new designs).
- */
- up->acr = 0;
- serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(up, UART_EFR, UART_EFR_ECB);
- serial_out(up, UART_LCR, 0x00);
- id1 = serial_icr_read(up, UART_ID1);
- id2 = serial_icr_read(up, UART_ID2);
- id3 = serial_icr_read(up, UART_ID3);
- rev = serial_icr_read(up, UART_REV);
-
- DEBUG_AUTOCONF("950id=%02x:%02x:%02x:%02x ", id1, id2, id3, rev);
-
- if (id1 == 0x16 && id2 == 0xC9 &&
- (id3 == 0x50 || id3 == 0x52 || id3 == 0x54)) {
- up->port.type = PORT_16C950;
-
- /*
- * Enable work around for the Oxford Semiconductor 952 rev B
- * chip which causes it to seriously miscalculate baud rates
- * when DLL is 0.
- */
- if (id3 == 0x52 && rev == 0x01)
- up->bugs |= UART_BUG_QUOT;
- return;
- }
-
- /*
- * We check for a XR16C850 by setting DLL and DLM to 0, and then
- * reading back DLL and DLM. The chip type depends on the DLM
- * value read back:
- * 0x10 - XR16C850 and the DLL contains the chip revision.
- * 0x12 - XR16C2850.
- * 0x14 - XR16C854.
- */
- id1 = autoconfig_read_divisor_id(up);
- DEBUG_AUTOCONF("850id=%04x ", id1);
-
- id2 = id1 >> 8;
- if (id2 == 0x10 || id2 == 0x12 || id2 == 0x14) {
- up->port.type = PORT_16850;
- return;
- }
-
- /*
- * It wasn't an XR16C850.
- *
- * We distinguish between the '654 and the '650 by counting
- * how many bytes are in the FIFO. I'm using this for now,
- * since that's the technique that was sent to me in the
- * serial driver update, but I'm not convinced this works.
- * I've had problems doing this in the past. -TYT
- */
- if (size_fifo(up) == 64)
- up->port.type = PORT_16654;
- else
- up->port.type = PORT_16650V2;
-}
-
-/*
- * We detected a chip without a FIFO. Only two fall into
- * this category - the original 8250 and the 16450. The
- * 16450 has a scratch register (accessible with LCR=0)
- */
-static void autoconfig_8250(struct uart_8250_port *up)
-{
- unsigned char scratch, status1, status2;
-
- up->port.type = PORT_8250;
-
- scratch = serial_in(up, UART_SCR);
- serial_out(up, UART_SCR, 0xa5);
- status1 = serial_in(up, UART_SCR);
- serial_out(up, UART_SCR, 0x5a);
- status2 = serial_in(up, UART_SCR);
- serial_out(up, UART_SCR, scratch);
-
- if (status1 == 0xa5 && status2 == 0x5a)
- up->port.type = PORT_16450;
-}
-
-static int broken_efr(struct uart_8250_port *up)
-{
- /*
- * Exar ST16C2550 "A2" devices incorrectly detect as
- * having an EFR, and report an ID of 0x0201. See
- * http://linux.derkeiler.com/Mailing-Lists/Kernel/2004-11/4812.html
- */
- if (autoconfig_read_divisor_id(up) == 0x0201 && size_fifo(up) == 16)
- return 1;
-
- return 0;
-}
-
-/*
- * We know that the chip has FIFOs. Does it have an EFR? The
- * EFR is located in the same register position as the IIR and
- * we know the top two bits of the IIR are currently set. The
- * EFR should contain zero. Try to read the EFR.
- */
-static void autoconfig_16550a(struct uart_8250_port *up)
-{
- unsigned char status1, status2;
- unsigned int iersave;
-
- up->port.type = PORT_16550A;
- up->capabilities |= UART_CAP_FIFO;
-
- /*
- * XR17V35x UARTs have an extra divisor register, DLD
- * that gets enabled with when DLAB is set which will
- * cause the device to incorrectly match and assign
- * port type to PORT_16650. The EFR for this UART is
- * found at offset 0x09. Instead check the Deice ID (DVID)
- * register for a 2, 4 or 8 port UART.
- */
- if (up->port.flags & UPF_EXAR_EFR) {
- status1 = serial_in(up, UART_EXAR_DVID);
- if (status1 == 0x82 || status1 == 0x84 || status1 == 0x88) {
- DEBUG_AUTOCONF("Exar XR17V35x ");
- up->port.type = PORT_XR17V35X;
- up->capabilities |= UART_CAP_AFE | UART_CAP_EFR |
- UART_CAP_SLEEP;
-
- return;
- }
-
- }
-
- /*
- * Check for presence of the EFR when DLAB is set.
- * Only ST16C650V1 UARTs pass this test.
- */
- serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
- if (serial_in(up, UART_EFR) == 0) {
- serial_out(up, UART_EFR, 0xA8);
- if (serial_in(up, UART_EFR) != 0) {
- DEBUG_AUTOCONF("EFRv1 ");
- up->port.type = PORT_16650;
- up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP;
- } else {
- serial_out(up, UART_LCR, 0);
- serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
- UART_FCR7_64BYTE);
- status1 = serial_in(up, UART_IIR) >> 5;
- serial_out(up, UART_FCR, 0);
- serial_out(up, UART_LCR, 0);
-
- if (status1 == 7)
- up->port.type = PORT_16550A_FSL64;
- else
- DEBUG_AUTOCONF("Motorola 8xxx DUART ");
- }
- serial_out(up, UART_EFR, 0);
- return;
- }
-
- /*
- * Maybe it requires 0xbf to be written to the LCR.
- * (other ST16C650V2 UARTs, TI16C752A, etc)
- */
- serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- if (serial_in(up, UART_EFR) == 0 && !broken_efr(up)) {
- DEBUG_AUTOCONF("EFRv2 ");
- autoconfig_has_efr(up);
- return;
- }
-
- /*
- * Check for a National Semiconductor SuperIO chip.
- * Attempt to switch to bank 2, read the value of the LOOP bit
- * from EXCR1. Switch back to bank 0, change it in MCR. Then
- * switch back to bank 2, read it from EXCR1 again and check
- * it's changed. If so, set baud_base in EXCR2 to 921600. -- dwmw2
- */
- serial_out(up, UART_LCR, 0);
- status1 = serial_in(up, UART_MCR);
- serial_out(up, UART_LCR, 0xE0);
- status2 = serial_in(up, 0x02); /* EXCR1 */
-
- if (!((status2 ^ status1) & UART_MCR_LOOP)) {
- serial_out(up, UART_LCR, 0);
- serial_out(up, UART_MCR, status1 ^ UART_MCR_LOOP);
- serial_out(up, UART_LCR, 0xE0);
- status2 = serial_in(up, 0x02); /* EXCR1 */
- serial_out(up, UART_LCR, 0);
- serial_out(up, UART_MCR, status1);
-
- if ((status2 ^ status1) & UART_MCR_LOOP) {
- unsigned short quot;
-
- serial_out(up, UART_LCR, 0xE0);
-
- quot = serial_dl_read(up);
- quot <<= 3;
-
- if (ns16550a_goto_highspeed(up))
- serial_dl_write(up, quot);
-
- serial_out(up, UART_LCR, 0);
-
- up->port.uartclk = 921600*16;
- up->port.type = PORT_NS16550A;
- up->capabilities |= UART_NATSEMI;
- return;
- }
- }
-
- /*
- * No EFR. Try to detect a TI16750, which only sets bit 5 of
- * the IIR when 64 byte FIFO mode is enabled when DLAB is set.
- * Try setting it with and without DLAB set. Cheap clones
- * set bit 5 without DLAB set.
- */
- serial_out(up, UART_LCR, 0);
- serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
- status1 = serial_in(up, UART_IIR) >> 5;
- serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
- serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
- serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
- status2 = serial_in(up, UART_IIR) >> 5;
- serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
- serial_out(up, UART_LCR, 0);
-
- DEBUG_AUTOCONF("iir1=%d iir2=%d ", status1, status2);
-
- if (status1 == 6 && status2 == 7) {
- up->port.type = PORT_16750;
- up->capabilities |= UART_CAP_AFE | UART_CAP_SLEEP;
- return;
- }
-
- /*
- * Try writing and reading the UART_IER_UUE bit (b6).
- * If it works, this is probably one of the Xscale platform's
- * internal UARTs.
- * We're going to explicitly set the UUE bit to 0 before
- * trying to write and read a 1 just to make sure it's not
- * already a 1 and maybe locked there before we even start start.
- */
- iersave = serial_in(up, UART_IER);
- serial_out(up, UART_IER, iersave & ~UART_IER_UUE);
- if (!(serial_in(up, UART_IER) & UART_IER_UUE)) {
- /*
- * OK it's in a known zero state, try writing and reading
- * without disturbing the current state of the other bits.
- */
- serial_out(up, UART_IER, iersave | UART_IER_UUE);
- if (serial_in(up, UART_IER) & UART_IER_UUE) {
- /*
- * It's an Xscale.
- * We'll leave the UART_IER_UUE bit set to 1 (enabled).
- */
- DEBUG_AUTOCONF("Xscale ");
- up->port.type = PORT_XSCALE;
- up->capabilities |= UART_CAP_UUE | UART_CAP_RTOIE;
- return;
- }
- } else {
- /*
- * If we got here we couldn't force the IER_UUE bit to 0.
- * Log it and continue.
- */
- DEBUG_AUTOCONF("Couldn't force IER_UUE to 0 ");
- }
- serial_out(up, UART_IER, iersave);
-
- /*
- * Exar uarts have EFR in a weird location
- */
- if (up->port.flags & UPF_EXAR_EFR) {
- DEBUG_AUTOCONF("Exar XR17D15x ");
- up->port.type = PORT_XR17D15X;
- up->capabilities |= UART_CAP_AFE | UART_CAP_EFR |
- UART_CAP_SLEEP;
-
- return;
- }
-
- /*
- * We distinguish between 16550A and U6 16550A by counting
- * how many bytes are in the FIFO.
- */
- if (up->port.type == PORT_16550A && size_fifo(up) == 64) {
- up->port.type = PORT_U6_16550A;
- up->capabilities |= UART_CAP_AFE;
- }
-}
-
-/*
- * This routine is called by rs_init() to initialize a specific serial
- * port. It determines what type of UART chip this serial port is
- * using: 8250, 16450, 16550, 16550A. The important question is
- * whether or not this UART is a 16550A or not, since this will
- * determine whether or not we can use its FIFO features or not.
- */
-static void autoconfig(struct uart_8250_port *up)
-{
- unsigned char status1, scratch, scratch2, scratch3;
- unsigned char save_lcr, save_mcr;
- struct uart_port *port = &up->port;
- unsigned long flags;
- unsigned int old_capabilities;
-
- if (!port->iobase && !port->mapbase && !port->membase)
- return;
-
- DEBUG_AUTOCONF("ttyS%d: autoconf (0x%04lx, 0x%p): ",
- serial_index(port), port->iobase, port->membase);
-
- /*
- * We really do need global IRQs disabled here - we're going to
- * be frobbing the chips IRQ enable register to see if it exists.
- */
- spin_lock_irqsave(&port->lock, flags);
-
- up->capabilities = 0;
- up->bugs = 0;
-
- if (!(port->flags & UPF_BUGGY_UART)) {
- /*
- * Do a simple existence test first; if we fail this,
- * there's no point trying anything else.
- *
- * 0x80 is used as a nonsense port to prevent against
- * false positives due to ISA bus float. The
- * assumption is that 0x80 is a non-existent port;
- * which should be safe since include/asm/io.h also
- * makes this assumption.
- *
- * Note: this is safe as long as MCR bit 4 is clear
- * and the device is in "PC" mode.
- */
- scratch = serial_in(up, UART_IER);
- serial_out(up, UART_IER, 0);
-#ifdef __i386__
- outb(0xff, 0x080);
-#endif
- /*
- * Mask out IER[7:4] bits for test as some UARTs (e.g. TL
- * 16C754B) allow only to modify them if an EFR bit is set.
- */
- scratch2 = serial_in(up, UART_IER) & 0x0f;
- serial_out(up, UART_IER, 0x0F);
-#ifdef __i386__
- outb(0, 0x080);
-#endif
- scratch3 = serial_in(up, UART_IER) & 0x0f;
- serial_out(up, UART_IER, scratch);
- if (scratch2 != 0 || scratch3 != 0x0F) {
- /*
- * We failed; there's nothing here
- */
- spin_unlock_irqrestore(&port->lock, flags);
- DEBUG_AUTOCONF("IER test failed (%02x, %02x) ",
- scratch2, scratch3);
- goto out;
- }
- }
-
- save_mcr = serial_in(up, UART_MCR);
- save_lcr = serial_in(up, UART_LCR);
-
- /*
- * Check to see if a UART is really there. Certain broken
- * internal modems based on the Rockwell chipset fail this
- * test, because they apparently don't implement the loopback
- * test mode. So this test is skipped on the COM 1 through
- * COM 4 ports. This *should* be safe, since no board
- * manufacturer would be stupid enough to design a board
- * that conflicts with COM 1-4 --- we hope!
- */
- if (!(port->flags & UPF_SKIP_TEST)) {
- serial_out(up, UART_MCR, UART_MCR_LOOP | 0x0A);
- status1 = serial_in(up, UART_MSR) & 0xF0;
- serial_out(up, UART_MCR, save_mcr);
- if (status1 != 0x90) {
- spin_unlock_irqrestore(&port->lock, flags);
- DEBUG_AUTOCONF("LOOP test failed (%02x) ",
- status1);
- goto out;
- }
- }
-
- /*
- * We're pretty sure there's a port here. Lets find out what
- * type of port it is. The IIR top two bits allows us to find
- * out if it's 8250 or 16450, 16550, 16550A or later. This
- * determines what we test for next.
- *
- * We also initialise the EFR (if any) to zero for later. The
- * EFR occupies the same register location as the FCR and IIR.
- */
- serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(up, UART_EFR, 0);
- serial_out(up, UART_LCR, 0);
-
- serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
- scratch = serial_in(up, UART_IIR) >> 6;
-
- switch (scratch) {
- case 0:
- autoconfig_8250(up);
- break;
- case 1:
- port->type = PORT_UNKNOWN;
- break;
- case 2:
- port->type = PORT_16550;
- break;
- case 3:
- autoconfig_16550a(up);
- break;
- }
-
-#ifdef CONFIG_SERIAL_8250_RSA
- /*
- * Only probe for RSA ports if we got the region.
- */
- if (port->type == PORT_16550A && up->probe & UART_PROBE_RSA &&
- __enable_rsa(up))
- port->type = PORT_RSA;
-#endif
-
- serial_out(up, UART_LCR, save_lcr);
-
- port->fifosize = uart_config[up->port.type].fifo_size;
- old_capabilities = up->capabilities;
- up->capabilities = uart_config[port->type].flags;
- up->tx_loadsz = uart_config[port->type].tx_loadsz;
-
- if (port->type == PORT_UNKNOWN)
- goto out_lock;
-
- /*
- * Reset the UART.
- */
-#ifdef CONFIG_SERIAL_8250_RSA
- if (port->type == PORT_RSA)
- serial_out(up, UART_RSA_FRR, 0);
-#endif
- serial_out(up, UART_MCR, save_mcr);
- serial8250_clear_fifos(up);
- serial_in(up, UART_RX);
- if (up->capabilities & UART_CAP_UUE)
- serial_out(up, UART_IER, UART_IER_UUE);
- else
- serial_out(up, UART_IER, 0);
-
-out_lock:
- spin_unlock_irqrestore(&port->lock, flags);
- if (up->capabilities != old_capabilities) {
- printk(KERN_WARNING
- "ttyS%d: detected caps %08x should be %08x\n",
- serial_index(port), old_capabilities,
- up->capabilities);
- }
-out:
- DEBUG_AUTOCONF("iir=%d ", scratch);
- DEBUG_AUTOCONF("type=%s\n", uart_config[port->type].name);
-}
-
-static void autoconfig_irq(struct uart_8250_port *up)
-{
- struct uart_port *port = &up->port;
- unsigned char save_mcr, save_ier;
- unsigned char save_ICP = 0;
- unsigned int ICP = 0;
- unsigned long irqs;
- int irq;
-
- if (port->flags & UPF_FOURPORT) {
- ICP = (port->iobase & 0xfe0) | 0x1f;
- save_ICP = inb_p(ICP);
- outb_p(0x80, ICP);
- inb_p(ICP);
- }
-
- /* forget possible initially masked and pending IRQ */
- probe_irq_off(probe_irq_on());
- save_mcr = serial_in(up, UART_MCR);
- save_ier = serial_in(up, UART_IER);
- serial_out(up, UART_MCR, UART_MCR_OUT1 | UART_MCR_OUT2);
-
- irqs = probe_irq_on();
- serial_out(up, UART_MCR, 0);
- udelay(10);
- if (port->flags & UPF_FOURPORT) {
- serial_out(up, UART_MCR,
- UART_MCR_DTR | UART_MCR_RTS);
- } else {
- serial_out(up, UART_MCR,
- UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
- }
- serial_out(up, UART_IER, 0x0f); /* enable all intrs */
- serial_in(up, UART_LSR);
- serial_in(up, UART_RX);
- serial_in(up, UART_IIR);
- serial_in(up, UART_MSR);
- serial_out(up, UART_TX, 0xFF);
- udelay(20);
- irq = probe_irq_off(irqs);
-
- serial_out(up, UART_MCR, save_mcr);
- serial_out(up, UART_IER, save_ier);
-
- if (port->flags & UPF_FOURPORT)
- outb_p(save_ICP, ICP);
-
- port->irq = (irq > 0) ? irq : 0;
-}
-
-static inline void __stop_tx(struct uart_8250_port *p)
-{
- if (p->ier & UART_IER_THRI) {
- p->ier &= ~UART_IER_THRI;
- serial_out(p, UART_IER, p->ier);
- serial8250_rpm_put_tx(p);
- }
-}
-
-static void serial8250_stop_tx(struct uart_port *port)
-{
- struct uart_8250_port *up = up_to_u8250p(port);
-
- serial8250_rpm_get(up);
- __stop_tx(up);
-
- /*
- * We really want to stop the transmitter from sending.
- */
- if (port->type == PORT_16C950) {
- up->acr |= UART_ACR_TXDIS;
- serial_icr_write(up, UART_ACR, up->acr);
- }
- serial8250_rpm_put(up);
-}
-
-static void serial8250_start_tx(struct uart_port *port)
-{
- struct uart_8250_port *up = up_to_u8250p(port);
-
- serial8250_rpm_get_tx(up);
-
- if (up->dma && !up->dma->tx_dma(up))
- return;
-
- if (!(up->ier & UART_IER_THRI)) {
- up->ier |= UART_IER_THRI;
- serial_port_out(port, UART_IER, up->ier);
-
- if (up->bugs & UART_BUG_TXEN) {
- unsigned char lsr;
- lsr = serial_in(up, UART_LSR);
- up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
- if (lsr & UART_LSR_THRE)
- serial8250_tx_chars(up);
- }
- }
-
- /*
- * Re-enable the transmitter if we disabled it.
- */
- if (port->type == PORT_16C950 && up->acr & UART_ACR_TXDIS) {
- up->acr &= ~UART_ACR_TXDIS;
- serial_icr_write(up, UART_ACR, up->acr);
- }
-}
-
-static void serial8250_throttle(struct uart_port *port)
-{
- port->throttle(port);
-}
-
-static void serial8250_unthrottle(struct uart_port *port)
-{
- port->unthrottle(port);
-}
-
-static void serial8250_stop_rx(struct uart_port *port)
-{
- struct uart_8250_port *up = up_to_u8250p(port);
-
- serial8250_rpm_get(up);
-
- up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
- up->port.read_status_mask &= ~UART_LSR_DR;
- serial_port_out(port, UART_IER, up->ier);
-
- serial8250_rpm_put(up);
-}
-
-static void serial8250_disable_ms(struct uart_port *port)
-{
- struct uart_8250_port *up =
- container_of(port, struct uart_8250_port, port);
-
- /* no MSR capabilities */
- if (up->bugs & UART_BUG_NOMSR)
- return;
-
- up->ier &= ~UART_IER_MSI;
- serial_port_out(port, UART_IER, up->ier);
-}
-
-static void serial8250_enable_ms(struct uart_port *port)
-{
- struct uart_8250_port *up = up_to_u8250p(port);
-
- /* no MSR capabilities */
- if (up->bugs & UART_BUG_NOMSR)
- return;
-
- up->ier |= UART_IER_MSI;
-
- serial8250_rpm_get(up);
- serial_port_out(port, UART_IER, up->ier);
- serial8250_rpm_put(up);
-}
-
-/*
- * serial8250_rx_chars: processes according to the passed in LSR
- * value, and returns the remaining LSR bits not handled
- * by this Rx routine.
- */
-unsigned char
-serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
-{
- struct uart_port *port = &up->port;
- unsigned char ch;
- int max_count = 256;
- char flag;
-
- do {
- if (likely(lsr & UART_LSR_DR))
- ch = serial_in(up, UART_RX);
- else
- /*
- * Intel 82571 has a Serial Over Lan device that will
- * set UART_LSR_BI without setting UART_LSR_DR when
- * it receives a break. To avoid reading from the
- * receive buffer without UART_LSR_DR bit set, we
- * just force the read character to be 0
- */
- ch = 0;
-
- flag = TTY_NORMAL;
- port->icount.rx++;
-
- lsr |= up->lsr_saved_flags;
- up->lsr_saved_flags = 0;
-
- if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) {
- if (lsr & UART_LSR_BI) {
- lsr &= ~(UART_LSR_FE | UART_LSR_PE);
- port->icount.brk++;
- /*
- * We do the SysRQ and SAK checking
- * here because otherwise the break
- * may get masked by ignore_status_mask
- * or read_status_mask.
- */
- if (uart_handle_break(port))
- goto ignore_char;
- } else if (lsr & UART_LSR_PE)
- port->icount.parity++;
- else if (lsr & UART_LSR_FE)
- port->icount.frame++;
- if (lsr & UART_LSR_OE)
- port->icount.overrun++;
-
- /*
- * Mask off conditions which should be ignored.
- */
- lsr &= port->read_status_mask;
-
- if (lsr & UART_LSR_BI) {
- DEBUG_INTR("handling break....");
- flag = TTY_BREAK;
- } else if (lsr & UART_LSR_PE)
- flag = TTY_PARITY;
- else if (lsr & UART_LSR_FE)
- flag = TTY_FRAME;
- }
- if (uart_handle_sysrq_char(port, ch))
- goto ignore_char;
-
- uart_insert_char(port, lsr, UART_LSR_OE, ch, flag);
-
-ignore_char:
- lsr = serial_in(up, UART_LSR);
- } while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (--max_count > 0));
- spin_unlock(&port->lock);
- tty_flip_buffer_push(&port->state->port);
- spin_lock(&port->lock);
- return lsr;
-}
-EXPORT_SYMBOL_GPL(serial8250_rx_chars);
-
-void serial8250_tx_chars(struct uart_8250_port *up)
-{
- struct uart_port *port = &up->port;
- struct circ_buf *xmit = &port->state->xmit;
- int count;
-
- if (port->x_char) {
- serial_out(up, UART_TX, port->x_char);
- port->icount.tx++;
- port->x_char = 0;
- return;
- }
- if (uart_tx_stopped(port)) {
- serial8250_stop_tx(port);
- return;
- }
- if (uart_circ_empty(xmit)) {
- __stop_tx(up);
- return;
- }
-
- count = up->tx_loadsz;
- do {
- serial_out(up, UART_TX, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- port->icount.tx++;
- if (uart_circ_empty(xmit))
- break;
- if (up->capabilities & UART_CAP_HFIFO) {
- if ((serial_port_in(port, UART_LSR) & BOTH_EMPTY) !=
- BOTH_EMPTY)
- break;
- }
- } while (--count > 0);
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
-
- DEBUG_INTR("THRE...");
-
- /*
- * With RPM enabled, we have to wait until the FIFO is empty before the
- * HW can go idle. So we get here once again with empty FIFO and disable
- * the interrupt and RPM in __stop_tx()
- */
- if (uart_circ_empty(xmit) && !(up->capabilities & UART_CAP_RPM))
- __stop_tx(up);
-}
-EXPORT_SYMBOL_GPL(serial8250_tx_chars);
-
-/* Caller holds uart port lock */
-unsigned int serial8250_modem_status(struct uart_8250_port *up)
-{
- struct uart_port *port = &up->port;
- unsigned int status = serial_in(up, UART_MSR);
-
- status |= up->msr_saved_flags;
- up->msr_saved_flags = 0;
- if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI &&
- port->state != NULL) {
- if (status & UART_MSR_TERI)
- port->icount.rng++;
- if (status & UART_MSR_DDSR)
- port->icount.dsr++;
- if (status & UART_MSR_DDCD)
- uart_handle_dcd_change(port, status & UART_MSR_DCD);
- if (status & UART_MSR_DCTS)
- uart_handle_cts_change(port, status & UART_MSR_CTS);
-
- wake_up_interruptible(&port->state->port.delta_msr_wait);
- }
-
- return status;
-}
-EXPORT_SYMBOL_GPL(serial8250_modem_status);
-
-/*
- * This handles the interrupt from one port.
- */
-int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
-{
- unsigned char status;
- unsigned long flags;
- struct uart_8250_port *up = up_to_u8250p(port);
- int dma_err = 0;
-
- if (iir & UART_IIR_NO_INT)
- return 0;
-
- spin_lock_irqsave(&port->lock, flags);
-
- status = serial_port_in(port, UART_LSR);
-
- DEBUG_INTR("status = %x...", status);
-
- if (status & (UART_LSR_DR | UART_LSR_BI)) {
- if (up->dma)
- dma_err = up->dma->rx_dma(up, iir);
-
- if (!up->dma || dma_err)
- status = serial8250_rx_chars(up, status);
- }
- serial8250_modem_status(up);
- if ((!up->dma || (up->dma && up->dma->tx_err)) &&
- (status & UART_LSR_THRE))
- serial8250_tx_chars(up);
-
- spin_unlock_irqrestore(&port->lock, flags);
- return 1;
-}
-EXPORT_SYMBOL_GPL(serial8250_handle_irq);
-
-static int serial8250_default_handle_irq(struct uart_port *port)
-{
- struct uart_8250_port *up = up_to_u8250p(port);
- unsigned int iir;
- int ret;
-
- serial8250_rpm_get(up);
-
- iir = serial_port_in(port, UART_IIR);
- ret = serial8250_handle_irq(port, iir);
-
- serial8250_rpm_put(up);
- return ret;
-}
-
-/*
- * These Exar UARTs have an extra interrupt indicator that could
- * fire for a few unimplemented interrupts. One of which is a
- * wakeup event when coming out of sleep. Put this here just
- * to be on the safe side that these interrupts don't go unhandled.
- */
-static int exar_handle_irq(struct uart_port *port)
-{
- unsigned char int0, int1, int2, int3;
- unsigned int iir = serial_port_in(port, UART_IIR);
- int ret;
-
- ret = serial8250_handle_irq(port, iir);
-
- if ((port->type == PORT_XR17V35X) ||
- (port->type == PORT_XR17D15X)) {
- int0 = serial_port_in(port, 0x80);
- int1 = serial_port_in(port, 0x81);
- int2 = serial_port_in(port, 0x82);
- int3 = serial_port_in(port, 0x83);
- }
-
- return ret;
-}
-
-/*
* This is the serial driver's interrupt routine.
*
* Arjan thinks the old way was overly complex, so it got simplified.
@@ -1941,876 +359,6 @@ static void univ8250_release_irq(struct uart_8250_port *up)
serial_unlink_irq_chain(up);
}
-static unsigned int serial8250_tx_empty(struct uart_port *port)
-{
- struct uart_8250_port *up = up_to_u8250p(port);
- unsigned long flags;
- unsigned int lsr;
-
- serial8250_rpm_get(up);
-
- spin_lock_irqsave(&port->lock, flags);
- lsr = serial_port_in(port, UART_LSR);
- up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
- spin_unlock_irqrestore(&port->lock, flags);
-
- serial8250_rpm_put(up);
-
- return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
-}
-
-static unsigned int serial8250_get_mctrl(struct uart_port *port)
-{
- struct uart_8250_port *up = up_to_u8250p(port);
- unsigned int status;
- unsigned int ret;
-
- serial8250_rpm_get(up);
- status = serial8250_modem_status(up);
- serial8250_rpm_put(up);
-
- ret = 0;
- if (status & UART_MSR_DCD)
- ret |= TIOCM_CAR;
- if (status & UART_MSR_RI)
- ret |= TIOCM_RNG;
- if (status & UART_MSR_DSR)
- ret |= TIOCM_DSR;
- if (status & UART_MSR_CTS)
- ret |= TIOCM_CTS;
- return ret;
-}
-
-void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
- struct uart_8250_port *up = up_to_u8250p(port);
- unsigned char mcr = 0;
-
- if (mctrl & TIOCM_RTS)
- mcr |= UART_MCR_RTS;
- if (mctrl & TIOCM_DTR)
- mcr |= UART_MCR_DTR;
- if (mctrl & TIOCM_OUT1)
- mcr |= UART_MCR_OUT1;
- if (mctrl & TIOCM_OUT2)
- mcr |= UART_MCR_OUT2;
- if (mctrl & TIOCM_LOOP)
- mcr |= UART_MCR_LOOP;
-
- mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr;
-
- serial_port_out(port, UART_MCR, mcr);
-}
-EXPORT_SYMBOL_GPL(serial8250_do_set_mctrl);
-
-static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
- if (port->set_mctrl)
- port->set_mctrl(port, mctrl);
- else
- serial8250_do_set_mctrl(port, mctrl);
-}
-
-static void serial8250_break_ctl(struct uart_port *port, int break_state)
-{
- struct uart_8250_port *up = up_to_u8250p(port);
- unsigned long flags;
-
- serial8250_rpm_get(up);
- spin_lock_irqsave(&port->lock, flags);
- if (break_state == -1)
- up->lcr |= UART_LCR_SBC;
- else
- up->lcr &= ~UART_LCR_SBC;
- serial_port_out(port, UART_LCR, up->lcr);
- spin_unlock_irqrestore(&port->lock, flags);
- serial8250_rpm_put(up);
-}
-
-/*
- * Wait for transmitter & holding register to empty
- */
-static void wait_for_xmitr(struct uart_8250_port *up, int bits)
-{
- unsigned int status, tmout = 10000;
-
- /* Wait up to 10ms for the character(s) to be sent. */
- for (;;) {
- status = serial_in(up, UART_LSR);
-
- up->lsr_saved_flags |= status & LSR_SAVE_FLAGS;
-
- if ((status & bits) == bits)
- break;
- if (--tmout == 0)
- break;
- udelay(1);
- }
-
- /* Wait up to 1s for flow control if necessary */
- if (up->port.flags & UPF_CONS_FLOW) {
- unsigned int tmout;
- for (tmout = 1000000; tmout; tmout--) {
- unsigned int msr = serial_in(up, UART_MSR);
- up->msr_saved_flags |= msr & MSR_SAVE_FLAGS;
- if (msr & UART_MSR_CTS)
- break;
- udelay(1);
- touch_nmi_watchdog();
- }
- }
-}
-
-#ifdef CONFIG_CONSOLE_POLL
-/*
- * Console polling routines for writing and reading from the uart while
- * in an interrupt or debug context.
- */
-
-static int serial8250_get_poll_char(struct uart_port *port)
-{
- struct uart_8250_port *up = up_to_u8250p(port);
- unsigned char lsr;
- int status;
-
- serial8250_rpm_get(up);
-
- lsr = serial_port_in(port, UART_LSR);
-
- if (!(lsr & UART_LSR_DR)) {
- status = NO_POLL_CHAR;
- goto out;
- }
-
- status = serial_port_in(port, UART_RX);
-out:
- serial8250_rpm_put(up);
- return status;
-}
-
-
-static void serial8250_put_poll_char(struct uart_port *port,
- unsigned char c)
-{
- unsigned int ier;
- struct uart_8250_port *up = up_to_u8250p(port);
-
- serial8250_rpm_get(up);
- /*
- * First save the IER then disable the interrupts
- */
- ier = serial_port_in(port, UART_IER);
- if (up->capabilities & UART_CAP_UUE)
- serial_port_out(port, UART_IER, UART_IER_UUE);
- else
- serial_port_out(port, UART_IER, 0);
-
- wait_for_xmitr(up, BOTH_EMPTY);
- /*
- * Send the character out.
- */
- serial_port_out(port, UART_TX, c);
-
- /*
- * Finally, wait for transmitter to become empty
- * and restore the IER
- */
- wait_for_xmitr(up, BOTH_EMPTY);
- serial_port_out(port, UART_IER, ier);
- serial8250_rpm_put(up);
-}
-
-#endif /* CONFIG_CONSOLE_POLL */
-
-int serial8250_do_startup(struct uart_port *port)
-{
- struct uart_8250_port *up = up_to_u8250p(port);
- unsigned long flags;
- unsigned char lsr, iir;
- int retval;
-
- if (port->type == PORT_8250_CIR)
- return -ENODEV;
-
- if (!port->fifosize)
- port->fifosize = uart_config[port->type].fifo_size;
- if (!up->tx_loadsz)
- up->tx_loadsz = uart_config[port->type].tx_loadsz;
- if (!up->capabilities)
- up->capabilities = uart_config[port->type].flags;
- up->mcr = 0;
-
- if (port->iotype != up->cur_iotype)
- set_io_from_upio(port);
-
- serial8250_rpm_get(up);
- if (port->type == PORT_16C950) {
- /* Wake up and initialize UART */
- up->acr = 0;
- serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_port_out(port, UART_EFR, UART_EFR_ECB);
- serial_port_out(port, UART_IER, 0);
- serial_port_out(port, UART_LCR, 0);
- serial_icr_write(up, UART_CSR, 0); /* Reset the UART */
- serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_port_out(port, UART_EFR, UART_EFR_ECB);
- serial_port_out(port, UART_LCR, 0);
- }
-
-#ifdef CONFIG_SERIAL_8250_RSA
- /*
- * If this is an RSA port, see if we can kick it up to the
- * higher speed clock.
- */
- enable_rsa(up);
-#endif
- /*
- * Clear the FIFO buffers and disable them.
- * (they will be reenabled in set_termios())
- */
- serial8250_clear_fifos(up);
-
- /*
- * Clear the interrupt registers.
- */
- serial_port_in(port, UART_LSR);
- serial_port_in(port, UART_RX);
- serial_port_in(port, UART_IIR);
- serial_port_in(port, UART_MSR);
-
- /*
- * At this point, there's no way the LSR could still be 0xff;
- * if it is, then bail out, because there's likely no UART
- * here.
- */
- if (!(port->flags & UPF_BUGGY_UART) &&
- (serial_port_in(port, UART_LSR) == 0xff)) {
- printk_ratelimited(KERN_INFO "ttyS%d: LSR safety check engaged!\n",
- serial_index(port));
- retval = -ENODEV;
- goto out;
- }
-
- /*
- * For a XR16C850, we need to set the trigger levels
- */
- if (port->type == PORT_16850) {
- unsigned char fctr;
-
- serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
-
- fctr = serial_in(up, UART_FCTR) & ~(UART_FCTR_RX|UART_FCTR_TX);
- serial_port_out(port, UART_FCTR,
- fctr | UART_FCTR_TRGD | UART_FCTR_RX);
- serial_port_out(port, UART_TRG, UART_TRG_96);
- serial_port_out(port, UART_FCTR,
- fctr | UART_FCTR_TRGD | UART_FCTR_TX);
- serial_port_out(port, UART_TRG, UART_TRG_96);
-
- serial_port_out(port, UART_LCR, 0);
- }
-
- if (port->irq) {
- unsigned char iir1;
- /*
- * Test for UARTs that do not reassert THRE when the
- * transmitter is idle and the interrupt has already
- * been cleared. Real 16550s should always reassert
- * this interrupt whenever the transmitter is idle and
- * the interrupt is enabled. Delays are necessary to
- * allow register changes to become visible.
- */
- spin_lock_irqsave(&port->lock, flags);
- if (up->port.irqflags & IRQF_SHARED)
- disable_irq_nosync(port->irq);
-
- wait_for_xmitr(up, UART_LSR_THRE);
- serial_port_out_sync(port, UART_IER, UART_IER_THRI);
- udelay(1); /* allow THRE to set */
- iir1 = serial_port_in(port, UART_IIR);
- serial_port_out(port, UART_IER, 0);
- serial_port_out_sync(port, UART_IER, UART_IER_THRI);
- udelay(1); /* allow a working UART time to re-assert THRE */
- iir = serial_port_in(port, UART_IIR);
- serial_port_out(port, UART_IER, 0);
-
- if (port->irqflags & IRQF_SHARED)
- enable_irq(port->irq);
- spin_unlock_irqrestore(&port->lock, flags);
-
- /*
- * If the interrupt is not reasserted, or we otherwise
- * don't trust the iir, setup a timer to kick the UART
- * on a regular basis.
- */
- if ((!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) ||
- up->port.flags & UPF_BUG_THRE) {
- up->bugs |= UART_BUG_THRE;
- }
- }
-
- retval = up->ops->setup_irq(up);
- if (retval)
- goto out;
-
- /*
- * Now, initialize the UART
- */
- serial_port_out(port, UART_LCR, UART_LCR_WLEN8);
-
- spin_lock_irqsave(&port->lock, flags);
- if (up->port.flags & UPF_FOURPORT) {
- if (!up->port.irq)
- up->port.mctrl |= TIOCM_OUT1;
- } else
- /*
- * Most PC uarts need OUT2 raised to enable interrupts.
- */
- if (port->irq)
- up->port.mctrl |= TIOCM_OUT2;
-
- serial8250_set_mctrl(port, port->mctrl);
-
- /* Serial over Lan (SoL) hack:
- Intel 8257x Gigabit ethernet chips have a
- 16550 emulation, to be used for Serial Over Lan.
- Those chips take a longer time than a normal
- serial device to signalize that a transmission
- data was queued. Due to that, the above test generally
- fails. One solution would be to delay the reading of
- iir. However, this is not reliable, since the timeout
- is variable. So, let's just don't test if we receive
- TX irq. This way, we'll never enable UART_BUG_TXEN.
- */
- if (up->port.flags & UPF_NO_TXEN_TEST)
- goto dont_test_tx_en;
-
- /*
- * Do a quick test to see if we receive an
- * interrupt when we enable the TX irq.
- */
- serial_port_out(port, UART_IER, UART_IER_THRI);
- lsr = serial_port_in(port, UART_LSR);
- iir = serial_port_in(port, UART_IIR);
- serial_port_out(port, UART_IER, 0);
-
- if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) {
- if (!(up->bugs & UART_BUG_TXEN)) {
- up->bugs |= UART_BUG_TXEN;
- pr_debug("ttyS%d - enabling bad tx status workarounds\n",
- serial_index(port));
- }
- } else {
- up->bugs &= ~UART_BUG_TXEN;
- }
-
-dont_test_tx_en:
- spin_unlock_irqrestore(&port->lock, flags);
-
- /*
- * Clear the interrupt registers again for luck, and clear the
- * saved flags to avoid getting false values from polling
- * routines or the previous session.
- */
- serial_port_in(port, UART_LSR);
- serial_port_in(port, UART_RX);
- serial_port_in(port, UART_IIR);
- serial_port_in(port, UART_MSR);
- up->lsr_saved_flags = 0;
- up->msr_saved_flags = 0;
-
- /*
- * Request DMA channels for both RX and TX.
- */
- if (up->dma) {
- retval = serial8250_request_dma(up);
- if (retval) {
- pr_warn_ratelimited("ttyS%d - failed to request DMA\n",
- serial_index(port));
- up->dma = NULL;
- }
- }
-
- /*
- * Finally, enable interrupts. Note: Modem status interrupts
- * are set via set_termios(), which will be occurring imminently
- * anyway, so we don't enable them here.
- */
- up->ier = UART_IER_RLSI | UART_IER_RDI;
- serial_port_out(port, UART_IER, up->ier);
-
- if (port->flags & UPF_FOURPORT) {
- unsigned int icp;
- /*
- * Enable interrupts on the AST Fourport board
- */
- icp = (port->iobase & 0xfe0) | 0x01f;
- outb_p(0x80, icp);
- inb_p(icp);
- }
- retval = 0;
-out:
- serial8250_rpm_put(up);
- return retval;
-}
-EXPORT_SYMBOL_GPL(serial8250_do_startup);
-
-static int serial8250_startup(struct uart_port *port)
-{
- if (port->startup)
- return port->startup(port);
- return serial8250_do_startup(port);
-}
-
-void serial8250_do_shutdown(struct uart_port *port)
-{
- struct uart_8250_port *up = up_to_u8250p(port);
- unsigned long flags;
-
- serial8250_rpm_get(up);
- /*
- * Disable interrupts from this port
- */
- up->ier = 0;
- serial_port_out(port, UART_IER, 0);
-
- if (up->dma)
- serial8250_release_dma(up);
-
- spin_lock_irqsave(&port->lock, flags);
- if (port->flags & UPF_FOURPORT) {
- /* reset interrupts on the AST Fourport board */
- inb((port->iobase & 0xfe0) | 0x1f);
- port->mctrl |= TIOCM_OUT1;
- } else
- port->mctrl &= ~TIOCM_OUT2;
-
- serial8250_set_mctrl(port, port->mctrl);
- spin_unlock_irqrestore(&port->lock, flags);
-
- /*
- * Disable break condition and FIFOs
- */
- serial_port_out(port, UART_LCR,
- serial_port_in(port, UART_LCR) & ~UART_LCR_SBC);
- serial8250_clear_fifos(up);
-
-#ifdef CONFIG_SERIAL_8250_RSA
- /*
- * Reset the RSA board back to 115kbps compat mode.
- */
- disable_rsa(up);
-#endif
-
- /*
- * Read data port to reset things, and then unlink from
- * the IRQ chain.
- */
- serial_port_in(port, UART_RX);
- serial8250_rpm_put(up);
-
- up->ops->release_irq(up);
-}
-EXPORT_SYMBOL_GPL(serial8250_do_shutdown);
-
-static void serial8250_shutdown(struct uart_port *port)
-{
- if (port->shutdown)
- port->shutdown(port);
- else
- serial8250_do_shutdown(port);
-}
-
-/*
- * XR17V35x UARTs have an extra fractional divisor register (DLD)
- * Calculate divisor with extra 4-bit fractional portion
- */
-static unsigned int xr17v35x_get_divisor(struct uart_8250_port *up,
- unsigned int baud,
- unsigned int *frac)
-{
- struct uart_port *port = &up->port;
- unsigned int quot_16;
-
- quot_16 = DIV_ROUND_CLOSEST(port->uartclk, baud);
- *frac = quot_16 & 0x0f;
-
- return quot_16 >> 4;
-}
-
-static unsigned int serial8250_get_divisor(struct uart_8250_port *up,
- unsigned int baud,
- unsigned int *frac)
-{
- struct uart_port *port = &up->port;
- unsigned int quot;
-
- /*
- * Handle magic divisors for baud rates above baud_base on
- * SMSC SuperIO chips.
- *
- */
- if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
- baud == (port->uartclk/4))
- quot = 0x8001;
- else if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
- baud == (port->uartclk/8))
- quot = 0x8002;
- else if (up->port.type == PORT_XR17V35X)
- quot = xr17v35x_get_divisor(up, baud, frac);
- else
- quot = uart_get_divisor(port, baud);
-
- /*
- * Oxford Semi 952 rev B workaround
- */
- if (up->bugs & UART_BUG_QUOT && (quot & 0xff) == 0)
- quot++;
-
- return quot;
-}
-
-static unsigned char serial8250_compute_lcr(struct uart_8250_port *up,
- tcflag_t c_cflag)
-{
- unsigned char cval;
-
- switch (c_cflag & CSIZE) {
- case CS5:
- cval = UART_LCR_WLEN5;
- break;
- case CS6:
- cval = UART_LCR_WLEN6;
- break;
- case CS7:
- cval = UART_LCR_WLEN7;
- break;
- default:
- case CS8:
- cval = UART_LCR_WLEN8;
- break;
- }
-
- if (c_cflag & CSTOPB)
- cval |= UART_LCR_STOP;
- if (c_cflag & PARENB) {
- cval |= UART_LCR_PARITY;
- if (up->bugs & UART_BUG_PARITY)
- up->fifo_bug = true;
- }
- if (!(c_cflag & PARODD))
- cval |= UART_LCR_EPAR;
-#ifdef CMSPAR
- if (c_cflag & CMSPAR)
- cval |= UART_LCR_SPAR;
-#endif
-
- return cval;
-}
-
-static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
- unsigned int quot, unsigned int quot_frac)
-{
- struct uart_8250_port *up = up_to_u8250p(port);
-
- /* Workaround to enable 115200 baud on OMAP1510 internal ports */
- if (is_omap1510_8250(up)) {
- if (baud == 115200) {
- quot = 1;
- serial_port_out(port, UART_OMAP_OSC_12M_SEL, 1);
- } else
- serial_port_out(port, UART_OMAP_OSC_12M_SEL, 0);
- }
-
- /*
- * For NatSemi, switch to bank 2 not bank 1, to avoid resetting EXCR2,
- * otherwise just set DLAB
- */
- if (up->capabilities & UART_NATSEMI)
- serial_port_out(port, UART_LCR, 0xe0);
- else
- serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB);
-
- serial_dl_write(up, quot);
-
- /* XR17V35x UARTs have an extra fractional divisor register (DLD) */
- if (up->port.type == PORT_XR17V35X)
- serial_port_out(port, 0x2, quot_frac);
-}
-
-void
-serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
-{
- struct uart_8250_port *up = up_to_u8250p(port);
- unsigned char cval;
- unsigned long flags;
- unsigned int baud, quot, frac = 0;
-
- cval = serial8250_compute_lcr(up, termios->c_cflag);
-
- /*
- * Ask the core to calculate the divisor for us.
- */
- baud = uart_get_baud_rate(port, termios, old,
- port->uartclk / 16 / 0xffff,
- port->uartclk / 16);
- quot = serial8250_get_divisor(up, baud, &frac);
-
- /*
- * Ok, we're now changing the port state. Do it with
- * interrupts disabled.
- */
- serial8250_rpm_get(up);
- spin_lock_irqsave(&port->lock, flags);
-
- up->lcr = cval; /* Save computed LCR */
-
- if (up->capabilities & UART_CAP_FIFO && port->fifosize > 1) {
- /* NOTE: If fifo_bug is not set, a user can set RX_trigger. */
- if ((baud < 2400 && !up->dma) || up->fifo_bug) {
- up->fcr &= ~UART_FCR_TRIGGER_MASK;
- up->fcr |= UART_FCR_TRIGGER_1;
- }
- }
-
- /*
- * MCR-based auto flow control. When AFE is enabled, RTS will be
- * deasserted when the receive FIFO contains more characters than
- * the trigger, or the MCR RTS bit is cleared. In the case where
- * the remote UART is not using CTS auto flow control, we must
- * have sufficient FIFO entries for the latency of the remote
- * UART to respond. IOW, at least 32 bytes of FIFO.
- */
- if (up->capabilities & UART_CAP_AFE && port->fifosize >= 32) {
- up->mcr &= ~UART_MCR_AFE;
- if (termios->c_cflag & CRTSCTS)
- up->mcr |= UART_MCR_AFE;
- }
-
- /*
- * Update the per-port timeout.
- */
- uart_update_timeout(port, termios->c_cflag, baud);
-
- port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
- if (termios->c_iflag & INPCK)
- port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
- port->read_status_mask |= UART_LSR_BI;
-
- /*
- * Characteres to ignore
- */
- port->ignore_status_mask = 0;
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
- if (termios->c_iflag & IGNBRK) {
- port->ignore_status_mask |= UART_LSR_BI;
- /*
- * If we're ignoring parity and break indicators,
- * ignore overruns too (for real raw support).
- */
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |= UART_LSR_OE;
- }
-
- /*
- * ignore all characters if CREAD is not set
- */
- if ((termios->c_cflag & CREAD) == 0)
- port->ignore_status_mask |= UART_LSR_DR;
-
- /*
- * CTS flow control flag and modem status interrupts
- */
- up->ier &= ~UART_IER_MSI;
- if (!(up->bugs & UART_BUG_NOMSR) &&
- UART_ENABLE_MS(&up->port, termios->c_cflag))
- up->ier |= UART_IER_MSI;
- if (up->capabilities & UART_CAP_UUE)
- up->ier |= UART_IER_UUE;
- if (up->capabilities & UART_CAP_RTOIE)
- up->ier |= UART_IER_RTOIE;
-
- serial_port_out(port, UART_IER, up->ier);
-
- if (up->capabilities & UART_CAP_EFR) {
- unsigned char efr = 0;
- /*
- * TI16C752/Startech hardware flow control. FIXME:
- * - TI16C752 requires control thresholds to be set.
- * - UART_MCR_RTS is ineffective if auto-RTS mode is enabled.
- */
- if (termios->c_cflag & CRTSCTS)
- efr |= UART_EFR_CTS;
-
- serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
- if (port->flags & UPF_EXAR_EFR)
- serial_port_out(port, UART_XR_EFR, efr);
- else
- serial_port_out(port, UART_EFR, efr);
- }
-
- serial8250_set_divisor(port, baud, quot, frac);
-
- /*
- * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
- * is written without DLAB set, this mode will be disabled.
- */
- if (port->type == PORT_16750)
- serial_port_out(port, UART_FCR, up->fcr);
-
- serial_port_out(port, UART_LCR, up->lcr); /* reset DLAB */
- if (port->type != PORT_16750) {
- /* emulated UARTs (Lucent Venus 167x) need two steps */
- if (up->fcr & UART_FCR_ENABLE_FIFO)
- serial_port_out(port, UART_FCR, UART_FCR_ENABLE_FIFO);
- serial_port_out(port, UART_FCR, up->fcr); /* set fcr */
- }
- serial8250_set_mctrl(port, port->mctrl);
- spin_unlock_irqrestore(&port->lock, flags);
- serial8250_rpm_put(up);
-
- /* Don't rewrite B0 */
- if (tty_termios_baud_rate(termios))
- tty_termios_encode_baud_rate(termios, baud, baud);
-}
-EXPORT_SYMBOL(serial8250_do_set_termios);
-
-static void
-serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
-{
- if (port->set_termios)
- port->set_termios(port, termios, old);
- else
- serial8250_do_set_termios(port, termios, old);
-}
-
-static void
-serial8250_set_ldisc(struct uart_port *port, struct ktermios *termios)
-{
- if (termios->c_line == N_PPS) {
- port->flags |= UPF_HARDPPS_CD;
- spin_lock_irq(&port->lock);
- serial8250_enable_ms(port);
- spin_unlock_irq(&port->lock);
- } else {
- port->flags &= ~UPF_HARDPPS_CD;
- if (!UART_ENABLE_MS(port, termios->c_cflag)) {
- spin_lock_irq(&port->lock);
- serial8250_disable_ms(port);
- spin_unlock_irq(&port->lock);
- }
- }
-}
-
-
-void serial8250_do_pm(struct uart_port *port, unsigned int state,
- unsigned int oldstate)
-{
- struct uart_8250_port *p = up_to_u8250p(port);
-
- serial8250_set_sleep(p, state != 0);
-}
-EXPORT_SYMBOL(serial8250_do_pm);
-
-static void
-serial8250_pm(struct uart_port *port, unsigned int state,
- unsigned int oldstate)
-{
- if (port->pm)
- port->pm(port, state, oldstate);
- else
- serial8250_do_pm(port, state, oldstate);
-}
-
-static unsigned int serial8250_port_size(struct uart_8250_port *pt)
-{
- if (pt->port.mapsize)
- return pt->port.mapsize;
- if (pt->port.iotype == UPIO_AU) {
- if (pt->port.type == PORT_RT2880)
- return 0x100;
- return 0x1000;
- }
- if (is_omap1_8250(pt))
- return 0x16 << pt->port.regshift;
-
- return 8 << pt->port.regshift;
-}
-
-/*
- * Resource handling.
- */
-static int serial8250_request_std_resource(struct uart_8250_port *up)
-{
- unsigned int size = serial8250_port_size(up);
- struct uart_port *port = &up->port;
- int ret = 0;
-
- switch (port->iotype) {
- case UPIO_AU:
- case UPIO_TSI:
- case UPIO_MEM32:
- case UPIO_MEM32BE:
- case UPIO_MEM:
- if (!port->mapbase)
- break;
-
- if (!request_mem_region(port->mapbase, size, "serial")) {
- ret = -EBUSY;
- break;
- }
-
- if (port->flags & UPF_IOREMAP) {
- port->membase = ioremap_nocache(port->mapbase, size);
- if (!port->membase) {
- release_mem_region(port->mapbase, size);
- ret = -ENOMEM;
- }
- }
- break;
-
- case UPIO_HUB6:
- case UPIO_PORT:
- if (!request_region(port->iobase, size, "serial"))
- ret = -EBUSY;
- break;
- }
- return ret;
-}
-
-static void serial8250_release_std_resource(struct uart_8250_port *up)
-{
- unsigned int size = serial8250_port_size(up);
- struct uart_port *port = &up->port;
-
- switch (port->iotype) {
- case UPIO_AU:
- case UPIO_TSI:
- case UPIO_MEM32:
- case UPIO_MEM32BE:
- case UPIO_MEM:
- if (!port->mapbase)
- break;
-
- if (port->flags & UPF_IOREMAP) {
- iounmap(port->membase);
- port->membase = NULL;
- }
-
- release_mem_region(port->mapbase, size);
- break;
-
- case UPIO_HUB6:
- case UPIO_PORT:
- release_region(port->iobase, size);
- break;
- }
-}
-
#ifdef CONFIG_SERIAL_8250_RSA
static int serial8250_request_rsa_resource(struct uart_8250_port *up)
{
@@ -2848,259 +396,6 @@ static void serial8250_release_rsa_resource(struct uart_8250_port *up)
}
#endif
-static void serial8250_release_port(struct uart_port *port)
-{
- struct uart_8250_port *up = up_to_u8250p(port);
-
- serial8250_release_std_resource(up);
-}
-
-static int serial8250_request_port(struct uart_port *port)
-{
- struct uart_8250_port *up = up_to_u8250p(port);
- int ret;
-
- if (port->type == PORT_8250_CIR)
- return -ENODEV;
-
- ret = serial8250_request_std_resource(up);
-
- return ret;
-}
-
-static int fcr_get_rxtrig_bytes(struct uart_8250_port *up)
-{
- const struct serial8250_config *conf_type = &uart_config[up->port.type];
- unsigned char bytes;
-
- bytes = conf_type->rxtrig_bytes[UART_FCR_R_TRIG_BITS(up->fcr)];
-
- return bytes ? bytes : -EOPNOTSUPP;
-}
-
-static int bytes_to_fcr_rxtrig(struct uart_8250_port *up, unsigned char bytes)
-{
- const struct serial8250_config *conf_type = &uart_config[up->port.type];
- int i;
-
- if (!conf_type->rxtrig_bytes[UART_FCR_R_TRIG_BITS(UART_FCR_R_TRIG_00)])
- return -EOPNOTSUPP;
-
- for (i = 1; i < UART_FCR_R_TRIG_MAX_STATE; i++) {
- if (bytes < conf_type->rxtrig_bytes[i])
- /* Use the nearest lower value */
- return (--i) << UART_FCR_R_TRIG_SHIFT;
- }
-
- return UART_FCR_R_TRIG_11;
-}
-
-static int do_get_rxtrig(struct tty_port *port)
-{
- struct uart_state *state = container_of(port, struct uart_state, port);
- struct uart_port *uport = state->uart_port;
- struct uart_8250_port *up =
- container_of(uport, struct uart_8250_port, port);
-
- if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1)
- return -EINVAL;
-
- return fcr_get_rxtrig_bytes(up);
-}
-
-static int do_serial8250_get_rxtrig(struct tty_port *port)
-{
- int rxtrig_bytes;
-
- mutex_lock(&port->mutex);
- rxtrig_bytes = do_get_rxtrig(port);
- mutex_unlock(&port->mutex);
-
- return rxtrig_bytes;
-}
-
-static ssize_t serial8250_get_attr_rx_trig_bytes(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct tty_port *port = dev_get_drvdata(dev);
- int rxtrig_bytes;
-
- rxtrig_bytes = do_serial8250_get_rxtrig(port);
- if (rxtrig_bytes < 0)
- return rxtrig_bytes;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", rxtrig_bytes);
-}
-
-static int do_set_rxtrig(struct tty_port *port, unsigned char bytes)
-{
- struct uart_state *state = container_of(port, struct uart_state, port);
- struct uart_port *uport = state->uart_port;
- struct uart_8250_port *up =
- container_of(uport, struct uart_8250_port, port);
- int rxtrig;
-
- if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1 ||
- up->fifo_bug)
- return -EINVAL;
-
- rxtrig = bytes_to_fcr_rxtrig(up, bytes);
- if (rxtrig < 0)
- return rxtrig;
-
- serial8250_clear_fifos(up);
- up->fcr &= ~UART_FCR_TRIGGER_MASK;
- up->fcr |= (unsigned char)rxtrig;
- serial_out(up, UART_FCR, up->fcr);
- return 0;
-}
-
-static int do_serial8250_set_rxtrig(struct tty_port *port, unsigned char bytes)
-{
- int ret;
-
- mutex_lock(&port->mutex);
- ret = do_set_rxtrig(port, bytes);
- mutex_unlock(&port->mutex);
-
- return ret;
-}
-
-static ssize_t serial8250_set_attr_rx_trig_bytes(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct tty_port *port = dev_get_drvdata(dev);
- unsigned char bytes;
- int ret;
-
- if (!count)
- return -EINVAL;
-
- ret = kstrtou8(buf, 10, &bytes);
- if (ret < 0)
- return ret;
-
- ret = do_serial8250_set_rxtrig(port, bytes);
- if (ret < 0)
- return ret;
-
- return count;
-}
-
-static DEVICE_ATTR(rx_trig_bytes, S_IRUSR | S_IWUSR | S_IRGRP,
- serial8250_get_attr_rx_trig_bytes,
- serial8250_set_attr_rx_trig_bytes);
-
-static struct attribute *serial8250_dev_attrs[] = {
- &dev_attr_rx_trig_bytes.attr,
- NULL,
- };
-
-static struct attribute_group serial8250_dev_attr_group = {
- .attrs = serial8250_dev_attrs,
- };
-
-static void register_dev_spec_attr_grp(struct uart_8250_port *up)
-{
- const struct serial8250_config *conf_type = &uart_config[up->port.type];
-
- if (conf_type->rxtrig_bytes[0])
- up->port.attr_group = &serial8250_dev_attr_group;
-}
-
-static void serial8250_config_port(struct uart_port *port, int flags)
-{
- struct uart_8250_port *up = up_to_u8250p(port);
- int ret;
-
- if (port->type == PORT_8250_CIR)
- return;
-
- /*
- * Find the region that we can probe for. This in turn
- * tells us whether we can probe for the type of port.
- */
- ret = serial8250_request_std_resource(up);
- if (ret < 0)
- return;
-
- if (port->iotype != up->cur_iotype)
- set_io_from_upio(port);
-
- if (flags & UART_CONFIG_TYPE)
- autoconfig(up);
-
- /* if access method is AU, it is a 16550 with a quirk */
- if (port->type == PORT_16550A && port->iotype == UPIO_AU)
- up->bugs |= UART_BUG_NOMSR;
-
- /* HW bugs may trigger IRQ while IIR == NO_INT */
- if (port->type == PORT_TEGRA)
- up->bugs |= UART_BUG_NOMSR;
-
- if (port->type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
- autoconfig_irq(up);
-
- if (port->type == PORT_UNKNOWN)
- serial8250_release_std_resource(up);
-
- /* Fixme: probably not the best place for this */
- if ((port->type == PORT_XR17V35X) ||
- (port->type == PORT_XR17D15X))
- port->handle_irq = exar_handle_irq;
-
- register_dev_spec_attr_grp(up);
- up->fcr = uart_config[up->port.type].fcr;
-}
-
-static int
-serial8250_verify_port(struct uart_port *port, struct serial_struct *ser)
-{
- if (ser->irq >= nr_irqs || ser->irq < 0 ||
- ser->baud_base < 9600 || ser->type < PORT_UNKNOWN ||
- ser->type >= ARRAY_SIZE(uart_config) || ser->type == PORT_CIRRUS ||
- ser->type == PORT_STARTECH)
- return -EINVAL;
- return 0;
-}
-
-static const char *
-serial8250_type(struct uart_port *port)
-{
- int type = port->type;
-
- if (type >= ARRAY_SIZE(uart_config))
- type = 0;
- return uart_config[type].name;
-}
-
-static const struct uart_ops serial8250_pops = {
- .tx_empty = serial8250_tx_empty,
- .set_mctrl = serial8250_set_mctrl,
- .get_mctrl = serial8250_get_mctrl,
- .stop_tx = serial8250_stop_tx,
- .start_tx = serial8250_start_tx,
- .throttle = serial8250_throttle,
- .unthrottle = serial8250_unthrottle,
- .stop_rx = serial8250_stop_rx,
- .enable_ms = serial8250_enable_ms,
- .break_ctl = serial8250_break_ctl,
- .startup = serial8250_startup,
- .shutdown = serial8250_shutdown,
- .set_termios = serial8250_set_termios,
- .set_ldisc = serial8250_set_ldisc,
- .pm = serial8250_pm,
- .type = serial8250_type,
- .release_port = serial8250_release_port,
- .request_port = serial8250_request_port,
- .config_port = serial8250_config_port,
- .verify_port = serial8250_verify_port,
-#ifdef CONFIG_CONSOLE_POLL
- .poll_get_char = serial8250_get_poll_char,
- .poll_put_char = serial8250_put_poll_char,
-#endif
-};
-
static const struct uart_ops *base_ops;
static struct uart_ops univ8250_port_ops;
@@ -3139,42 +434,6 @@ void serial8250_set_isa_configurator(
}
EXPORT_SYMBOL(serial8250_set_isa_configurator);
-static void serial8250_init_port(struct uart_8250_port *up)
-{
- struct uart_port *port = &up->port;
-
- spin_lock_init(&port->lock);
- port->ops = &serial8250_pops;
-
- up->cur_iotype = 0xFF;
-}
-
-static void serial8250_set_defaults(struct uart_8250_port *up)
-{
- struct uart_port *port = &up->port;
-
- if (up->port.flags & UPF_FIXED_TYPE) {
- unsigned int type = up->port.type;
-
- if (!up->port.fifosize)
- up->port.fifosize = uart_config[type].fifo_size;
- if (!up->tx_loadsz)
- up->tx_loadsz = uart_config[type].tx_loadsz;
- if (!up->capabilities)
- up->capabilities = uart_config[type].flags;
- }
-
- set_io_from_upio(port);
-
- /* default dma handlers */
- if (up->dma) {
- if (!up->dma->tx_dma)
- up->dma->tx_dma = serial8250_tx_dma;
- if (!up->dma->rx_dma)
- up->dma->rx_dma = serial8250_rx_dma;
- }
-}
-
#ifdef CONFIG_SERIAL_8250_RSA
static void univ8250_config_port(struct uart_port *port, int flags)
@@ -3324,94 +583,6 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
#ifdef CONFIG_SERIAL_8250_CONSOLE
-static void serial8250_console_putchar(struct uart_port *port, int ch)
-{
- struct uart_8250_port *up = up_to_u8250p(port);
-
- wait_for_xmitr(up, UART_LSR_THRE);
- serial_port_out(port, UART_TX, ch);
-}
-
-/*
- * Print a string to the serial port trying not to disturb
- * any possible real use of the port...
- *
- * The console_lock must be held when we get here.
- */
-static void serial8250_console_write(struct uart_8250_port *up, const char *s,
- unsigned int count)
-{
- struct uart_port *port = &up->port;
- unsigned long flags;
- unsigned int ier;
- int locked = 1;
-
- touch_nmi_watchdog();
-
- serial8250_rpm_get(up);
-
- if (port->sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
- else
- spin_lock_irqsave(&port->lock, flags);
-
- /*
- * First save the IER then disable the interrupts
- */
- ier = serial_port_in(port, UART_IER);
-
- if (up->capabilities & UART_CAP_UUE)
- serial_port_out(port, UART_IER, UART_IER_UUE);
- else
- serial_port_out(port, UART_IER, 0);
-
- /* check scratch reg to see if port powered off during system sleep */
- if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
- struct ktermios termios;
- unsigned int baud, quot, frac = 0;
-
- termios.c_cflag = port->cons->cflag;
- if (port->state->port.tty && termios.c_cflag == 0)
- termios.c_cflag = port->state->port.tty->termios.c_cflag;
-
- baud = uart_get_baud_rate(port, &termios, NULL,
- port->uartclk / 16 / 0xffff,
- port->uartclk / 16);
- quot = serial8250_get_divisor(up, baud, &frac);
-
- serial8250_set_divisor(port, baud, quot, frac);
- serial_port_out(port, UART_LCR, up->lcr);
- serial_port_out(port, UART_MCR, UART_MCR_DTR | UART_MCR_RTS);
-
- up->canary = 0;
- }
-
- uart_console_write(port, s, count, serial8250_console_putchar);
-
- /*
- * Finally, wait for transmitter to become empty
- * and restore the IER
- */
- wait_for_xmitr(up, BOTH_EMPTY);
- serial_port_out(port, UART_IER, ier);
-
- /*
- * The receive handling will happen properly because the
- * receive ready bit will still be set; it is not cleared
- * on read. However, modem control will not, we must
- * call it if we have saved something in the saved flags
- * while processing with interrupts off.
- */
- if (up->msr_saved_flags)
- serial8250_modem_status(up);
-
- if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
- serial8250_rpm_put(up);
-}
-
static void univ8250_console_write(struct console *co, const char *s,
unsigned int count)
{
@@ -3420,39 +591,6 @@ static void univ8250_console_write(struct console *co, const char *s,
serial8250_console_write(up, s, count);
}
-static unsigned int probe_baud(struct uart_port *port)
-{
- unsigned char lcr, dll, dlm;
- unsigned int quot;
-
- lcr = serial_port_in(port, UART_LCR);
- serial_port_out(port, UART_LCR, lcr | UART_LCR_DLAB);
- dll = serial_port_in(port, UART_DLL);
- dlm = serial_port_in(port, UART_DLM);
- serial_port_out(port, UART_LCR, lcr);
-
- quot = (dlm << 8) | dll;
- return (port->uartclk / 16) / quot;
-}
-
-static int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
-{
- int baud = 9600;
- int bits = 8;
- int parity = 'n';
- int flow = 'n';
-
- if (!port->iobase && !port->membase)
- return -ENODEV;
-
- if (options)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
- else if (probe)
- baud = probe_baud(port);
-
- return uart_set_options(port, port->cons, baud, parity, bits, flow);
-}
-
static int univ8250_console_setup(struct console *co, char *options)
{
struct uart_port *port;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index d48b50641e9a..06324f17a0cb 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -56,7 +56,6 @@
struct dw8250_data {
u8 usr_reg;
- int last_mcr;
int line;
int msr_mask_on;
int msr_mask_off;
@@ -76,12 +75,6 @@ static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
{
struct dw8250_data *d = p->private_data;
- /* If reading MSR, report CTS asserted when auto-CTS/RTS enabled */
- if (offset == UART_MSR && d->last_mcr & UART_MCR_AFE) {
- value |= UART_MSR_CTS;
- value &= ~UART_MSR_DCTS;
- }
-
/* Override any modem control signals if needed */
if (offset == UART_MSR) {
value |= d->msr_mask_on;
@@ -101,11 +94,6 @@ static void dw8250_force_idle(struct uart_port *p)
static void dw8250_serial_out(struct uart_port *p, int offset, int value)
{
- struct dw8250_data *d = p->private_data;
-
- if (offset == UART_MCR)
- d->last_mcr = value;
-
writeb(value, p->membase + (offset << p->regshift));
/* Make sure LCR write wasn't ignored */
@@ -144,11 +132,6 @@ static unsigned int dw8250_serial_inq(struct uart_port *p, int offset)
static void dw8250_serial_outq(struct uart_port *p, int offset, int value)
{
- struct dw8250_data *d = p->private_data;
-
- if (offset == UART_MCR)
- d->last_mcr = value;
-
value &= 0xff;
__raw_writeq(value, p->membase + (offset << p->regshift));
/* Read back to ensure register write ordering. */
@@ -175,11 +158,6 @@ static void dw8250_serial_outq(struct uart_port *p, int offset, int value)
static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
{
- struct dw8250_data *d = p->private_data;
-
- if (offset == UART_MCR)
- d->last_mcr = value;
-
writel(value, p->membase + (offset << p->regshift));
/* Make sure LCR write wasn't ignored */
@@ -257,6 +235,11 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
if (!ret)
p->uartclk = rate;
+
+ p->status &= ~UPSTAT_AUTOCTS;
+ if (termios->c_cflag & CRTSCTS)
+ p->status |= UPSTAT_AUTOCTS;
+
out:
serial8250_do_set_termios(p, termios, old);
}
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index 771dda29a0f8..faed05f25bc2 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -35,7 +35,7 @@
#include <asm/io.h>
#include <asm/serial.h>
-unsigned int __weak __init serial8250_early_in(struct uart_port *port, int offset)
+static unsigned int __init serial8250_early_in(struct uart_port *port, int offset)
{
switch (port->iotype) {
case UPIO_MEM:
@@ -51,7 +51,7 @@ unsigned int __weak __init serial8250_early_in(struct uart_port *port, int offse
}
}
-void __weak __init serial8250_early_out(struct uart_port *port, int offset, int value)
+static void __init serial8250_early_out(struct uart_port *port, int offset, int value)
{
switch (port->iotype) {
case UPIO_MEM:
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 5815e81b5fc6..89474399ab89 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -17,18 +17,19 @@
#include <linux/serial_core.h>
#include "8250.h"
-#define ADDR_PORT 0x4E
-#define DATA_PORT 0x4F
-#define ENTRY_KEY 0x77
+#define ADDR_PORT 0
+#define DATA_PORT 1
#define EXIT_KEY 0xAA
#define CHIP_ID1 0x20
-#define CHIP_ID1_VAL 0x02
#define CHIP_ID2 0x21
-#define CHIP_ID2_VAL 0x16
+#define CHIP_ID_0 0x1602
+#define CHIP_ID_1 0x0501
#define VENDOR_ID1 0x23
#define VENDOR_ID1_VAL 0x19
#define VENDOR_ID2 0x24
#define VENDOR_ID2_VAL 0x34
+#define IO_ADDR1 0x61
+#define IO_ADDR2 0x60
#define LDN 0x7
#define RS485 0xF0
@@ -39,51 +40,49 @@
#define DRIVER_NAME "8250_fintek"
-static int fintek_8250_enter_key(void){
+struct fintek_8250 {
+ u16 base_port;
+ u8 index;
+ u8 key;
+ long line;
+};
+
+static int fintek_8250_enter_key(u16 base_port, u8 key)
+{
- if (!request_muxed_region(ADDR_PORT, 2, DRIVER_NAME))
+ if (!request_muxed_region(base_port, 2, DRIVER_NAME))
return -EBUSY;
- outb(ENTRY_KEY, ADDR_PORT);
- outb(ENTRY_KEY, ADDR_PORT);
+ outb(key, base_port + ADDR_PORT);
+ outb(key, base_port + ADDR_PORT);
return 0;
}
-static void fintek_8250_exit_key(void){
-
- outb(EXIT_KEY, ADDR_PORT);
- release_region(ADDR_PORT, 2);
-}
-
-static int fintek_8250_get_index(resource_size_t base_addr)
+static void fintek_8250_exit_key(u16 base_port)
{
- resource_size_t base[] = {0x3f8, 0x2f8, 0x3e8, 0x2e8};
- int i;
-
- for (i = 0; i < ARRAY_SIZE(base); i++)
- if (base_addr == base[i])
- return i;
- return -ENODEV;
+ outb(EXIT_KEY, base_port + ADDR_PORT);
+ release_region(base_port + ADDR_PORT, 2);
}
-static int fintek_8250_check_id(void)
+static int fintek_8250_check_id(u16 base_port)
{
+ u16 chip;
- outb(CHIP_ID1, ADDR_PORT);
- if (inb(DATA_PORT) != CHIP_ID1_VAL)
+ outb(VENDOR_ID1, base_port + ADDR_PORT);
+ if (inb(base_port + DATA_PORT) != VENDOR_ID1_VAL)
return -ENODEV;
- outb(CHIP_ID2, ADDR_PORT);
- if (inb(DATA_PORT) != CHIP_ID2_VAL)
+ outb(VENDOR_ID2, base_port + ADDR_PORT);
+ if (inb(base_port + DATA_PORT) != VENDOR_ID2_VAL)
return -ENODEV;
- outb(VENDOR_ID1, ADDR_PORT);
- if (inb(DATA_PORT) != VENDOR_ID1_VAL)
- return -ENODEV;
+ outb(CHIP_ID1, base_port + ADDR_PORT);
+ chip = inb(base_port + DATA_PORT);
+ outb(CHIP_ID2, base_port + ADDR_PORT);
+ chip |= inb(base_port + DATA_PORT) << 8;
- outb(VENDOR_ID2, ADDR_PORT);
- if (inb(DATA_PORT) != VENDOR_ID2_VAL)
+ if (chip != CHIP_ID_0 && chip != CHIP_ID_1)
return -ENODEV;
return 0;
@@ -93,9 +92,9 @@ static int fintek_8250_rs485_config(struct uart_port *port,
struct serial_rs485 *rs485)
{
uint8_t config = 0;
- int index = fintek_8250_get_index(port->iobase);
+ struct fintek_8250 *pdata = port->private_data;
- if (index < 0)
+ if (!pdata)
return -EINVAL;
if (rs485->flags & SER_RS485_ENABLED)
@@ -125,44 +124,84 @@ static int fintek_8250_rs485_config(struct uart_port *port,
if (rs485->flags & SER_RS485_RTS_ON_SEND)
config |= RTS_INVERT;
- if (fintek_8250_enter_key())
+ if (fintek_8250_enter_key(pdata->base_port, pdata->key))
return -EBUSY;
- outb(LDN, ADDR_PORT);
- outb(index, DATA_PORT);
- outb(RS485, ADDR_PORT);
- outb(config, DATA_PORT);
- fintek_8250_exit_key();
+ outb(LDN, pdata->base_port + ADDR_PORT);
+ outb(pdata->index, pdata->base_port + DATA_PORT);
+ outb(RS485, pdata->base_port + ADDR_PORT);
+ outb(config, pdata->base_port + DATA_PORT);
+ fintek_8250_exit_key(pdata->base_port);
port->rs485 = *rs485;
return 0;
}
+static int fintek_8250_base_port(u16 io_address, u8 *key, u8 *index)
+{
+ static const u16 addr[] = {0x4e, 0x2e};
+ static const u8 keys[] = {0x77, 0xa0, 0x87, 0x67};
+ int i, j, k;
+
+ for (i = 0; i < ARRAY_SIZE(addr); i++) {
+ for (j = 0; j < ARRAY_SIZE(keys); j++) {
+
+ if (fintek_8250_enter_key(addr[i], keys[j]))
+ continue;
+ if (fintek_8250_check_id(addr[i])) {
+ fintek_8250_exit_key(addr[i]);
+ continue;
+ }
+
+ for (k = 0; k < 4; k++) {
+ u16 aux;
+
+ outb(LDN, addr[i] + ADDR_PORT);
+ outb(k, addr[i] + DATA_PORT);
+
+ outb(IO_ADDR1, addr[i] + ADDR_PORT);
+ aux = inb(addr[i] + DATA_PORT);
+ outb(IO_ADDR2, addr[i] + ADDR_PORT);
+ aux |= inb(addr[i] + DATA_PORT) << 8;
+ if (aux != io_address)
+ continue;
+
+ fintek_8250_exit_key(addr[i]);
+ *key = keys[j];
+ *index = k;
+ return addr[i];
+ }
+ fintek_8250_exit_key(addr[i]);
+ }
+ }
+
+ return -ENODEV;
+}
+
static int
fintek_8250_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
{
- int line;
struct uart_8250_port uart;
- int ret;
+ struct fintek_8250 *pdata;
+ int base_port;
+ u8 key;
+ u8 index;
if (!pnp_port_valid(dev, 0))
return -ENODEV;
- if (fintek_8250_get_index(pnp_port_start(dev, 0)) < 0)
+ base_port = fintek_8250_base_port(pnp_port_start(dev, 0), &key, &index);
+ if (base_port < 0)
return -ENODEV;
- /* Enable configuration registers*/
- if (fintek_8250_enter_key())
- return -EBUSY;
+ memset(&uart, 0, sizeof(uart));
- /*Check ID*/
- ret = fintek_8250_check_id();
- fintek_8250_exit_key();
- if (ret)
- return ret;
+ pdata = devm_kzalloc(&dev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ uart.port.private_data = pdata;
- memset(&uart, 0, sizeof(uart));
if (!pnp_irq_valid(dev, 0))
return -ENODEV;
uart.port.irq = pnp_irq(dev, 0);
@@ -176,40 +215,43 @@ fintek_8250_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
uart.port.uartclk = 1843200;
uart.port.dev = &dev->dev;
- line = serial8250_register_8250_port(&uart);
- if (line < 0)
+ pdata->key = key;
+ pdata->base_port = base_port;
+ pdata->index = index;
+ pdata->line = serial8250_register_8250_port(&uart);
+ if (pdata->line < 0)
return -ENODEV;
- pnp_set_drvdata(dev, (void *)((long)line + 1));
+ pnp_set_drvdata(dev, pdata);
return 0;
}
static void fintek_8250_remove(struct pnp_dev *dev)
{
- long line = (long)pnp_get_drvdata(dev);
+ struct fintek_8250 *pdata = pnp_get_drvdata(dev);
- if (line)
- serial8250_unregister_port(line - 1);
+ if (pdata)
+ serial8250_unregister_port(pdata->line);
}
#ifdef CONFIG_PM
static int fintek_8250_suspend(struct pnp_dev *dev, pm_message_t state)
{
- long line = (long)pnp_get_drvdata(dev);
+ struct fintek_8250 *pdata = pnp_get_drvdata(dev);
- if (!line)
+ if (!pdata)
return -ENODEV;
- serial8250_suspend_port(line - 1);
+ serial8250_suspend_port(pdata->line);
return 0;
}
static int fintek_8250_resume(struct pnp_dev *dev)
{
- long line = (long)pnp_get_drvdata(dev);
+ struct fintek_8250 *pdata = pnp_get_drvdata(dev);
- if (!line)
+ if (!pdata)
return -ENODEV;
- serial8250_resume_port(line - 1);
+ serial8250_resume_port(pdata->line);
return 0;
}
#else
diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
index 21bf81fe794f..7c1e4be48e7b 100644
--- a/drivers/tty/serial/8250/8250_ingenic.c
+++ b/drivers/tty/serial/8250/8250_ingenic.c
@@ -252,7 +252,6 @@ MODULE_DEVICE_TABLE(of, of_match);
static struct platform_driver ingenic_uart_platform_driver = {
.driver = {
.name = "ingenic-uart",
- .owner = THIS_MODULE,
.of_match_table = of_match,
},
.probe = ingenic_uart_probe,
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index d75a66c72750..826c5c4a2103 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/delay.h>
@@ -32,6 +33,11 @@
#define UART_ERRATA_i202_MDR1_ACCESS (1 << 0)
#define OMAP_UART_WER_HAS_TX_WAKEUP (1 << 1)
#define OMAP_DMA_TX_KICK (1 << 2)
+/*
+ * See Advisory 21 in AM437x errata SPRZ408B, updated April 2015.
+ * The same errata is applicable to AM335x and DRA7x processors too.
+ */
+#define UART_ERRATA_CLOCK_DISABLE (1 << 3)
#define OMAP_UART_FCR_RX_TRIG 6
#define OMAP_UART_FCR_TX_TRIG 4
@@ -53,6 +59,12 @@
#define OMAP_UART_MVR_MAJ_SHIFT 8
#define OMAP_UART_MVR_MIN_MASK 0x3f
+/* SYSC register bitmasks */
+#define OMAP_UART_SYSC_SOFTRESET (1 << 1)
+
+/* SYSS register bitmasks */
+#define OMAP_UART_SYSS_RESETDONE (1 << 0)
+
#define UART_TI752_TLR_TX 0
#define UART_TI752_TLR_RX 4
@@ -100,6 +112,7 @@ struct omap8250_priv {
struct work_struct qos_work;
struct uart_8250_dma omap8250_dma;
spinlock_t rx_dma_lock;
+ bool rx_dma_broken;
};
static u32 uart_read(struct uart_8250_port *up, u32 reg)
@@ -232,6 +245,15 @@ static void omap8250_update_scr(struct uart_8250_port *up,
serial_out(up, UART_OMAP_SCR, priv->scr);
}
+static void omap8250_update_mdr1(struct uart_8250_port *up,
+ struct omap8250_priv *priv)
+{
+ if (priv->habit & UART_ERRATA_i202_MDR1_ACCESS)
+ omap_8250_mdr1_errataset(up, priv);
+ else
+ serial_out(up, UART_OMAP_MDR1, priv->mdr1);
+}
+
static void omap8250_restore_regs(struct uart_8250_port *up)
{
struct omap8250_priv *priv = up->port.private_data;
@@ -282,11 +304,9 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
serial_out(up, UART_XOFF1, priv->xoff);
serial_out(up, UART_LCR, up->lcr);
- /* need mode A for FCR */
- if (priv->habit & UART_ERRATA_i202_MDR1_ACCESS)
- omap_8250_mdr1_errataset(up, priv);
- else
- serial_out(up, UART_OMAP_MDR1, priv->mdr1);
+
+ omap8250_update_mdr1(up, priv);
+
up->port.ops->set_mctrl(&up->port, up->port.mctrl);
}
@@ -428,12 +448,9 @@ static void omap_8250_set_termios(struct uart_port *port,
priv->efr |= UART_EFR_CTS;
} else if (up->port.flags & UPF_SOFT_FLOW) {
/*
- * IXON Flag:
- * Enable XON/XOFF flow control on input.
- * Receiver compares XON1, XOFF1.
+ * OMAP rx s/w flow control is borked; the transmitter remains
+ * stuck off even if rx flow control is subsequently disabled
*/
- if (termios->c_iflag & IXON)
- priv->efr |= OMAP_UART_SW_RX;
/*
* IXOFF Flag:
@@ -444,15 +461,6 @@ static void omap_8250_set_termios(struct uart_port *port,
up->port.status |= UPSTAT_AUTOXOFF;
priv->efr |= OMAP_UART_SW_TX;
}
-
- /*
- * IXANY Flag:
- * Enable any character to restart output.
- * Operation resumes after receiving any
- * character after recognition of the XOFF character
- */
- if (termios->c_iflag & IXANY)
- up->mcr |= UART_MCR_XONANY;
}
omap8250_restore_regs(up);
@@ -530,14 +538,14 @@ static void omap_serial_fill_features_erratas(struct uart_8250_port *up,
switch (revision) {
case OMAP_UART_REV_46:
- priv->habit = UART_ERRATA_i202_MDR1_ACCESS;
+ priv->habit |= UART_ERRATA_i202_MDR1_ACCESS;
break;
case OMAP_UART_REV_52:
- priv->habit = UART_ERRATA_i202_MDR1_ACCESS |
+ priv->habit |= UART_ERRATA_i202_MDR1_ACCESS |
OMAP_UART_WER_HAS_TX_WAKEUP;
break;
case OMAP_UART_REV_63:
- priv->habit = UART_ERRATA_i202_MDR1_ACCESS |
+ priv->habit |= UART_ERRATA_i202_MDR1_ACCESS |
OMAP_UART_WER_HAS_TX_WAKEUP;
break;
default:
@@ -754,6 +762,7 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
struct omap8250_priv *priv = p->port.private_data;
struct uart_8250_dma *dma = p->dma;
unsigned long flags;
+ int ret;
spin_lock_irqsave(&priv->rx_dma_lock, flags);
@@ -762,7 +771,9 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
return;
}
- dmaengine_pause(dma->rxchan);
+ ret = dmaengine_pause(dma->rxchan);
+ if (WARN_ON_ONCE(ret))
+ priv->rx_dma_broken = true;
spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
@@ -806,6 +817,9 @@ static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
break;
}
+ if (priv->rx_dma_broken)
+ return -EINVAL;
+
spin_lock_irqsave(&priv->rx_dma_lock, flags);
if (dma->rx_running)
@@ -1054,6 +1068,20 @@ static int omap8250_no_handle_irq(struct uart_port *port)
return 0;
}
+static const u8 am3352_habit = OMAP_DMA_TX_KICK | UART_ERRATA_CLOCK_DISABLE;
+static const u8 am4372_habit = UART_ERRATA_CLOCK_DISABLE;
+
+static const struct of_device_id omap8250_dt_ids[] = {
+ { .compatible = "ti,omap2-uart" },
+ { .compatible = "ti,omap3-uart" },
+ { .compatible = "ti,omap4-uart" },
+ { .compatible = "ti,am3352-uart", .data = &am3352_habit, },
+ { .compatible = "ti,am4372-uart", .data = &am4372_habit, },
+ { .compatible = "ti,dra742-uart", .data = &am4372_habit, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap8250_dt_ids);
+
static int omap8250_probe(struct platform_device *pdev)
{
struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1118,11 +1146,17 @@ static int omap8250_probe(struct platform_device *pdev)
up.port.unthrottle = omap_8250_unthrottle;
if (pdev->dev.of_node) {
+ const struct of_device_id *id;
+
ret = of_alias_get_id(pdev->dev.of_node, "serial");
of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&up.port.uartclk);
priv->wakeirq = irq_of_parse_and_map(pdev->dev.of_node, 1);
+
+ id = of_match_device(of_match_ptr(omap8250_dt_ids), &pdev->dev);
+ if (id && id->data)
+ priv->habit |= *(u8 *)id->data;
} else {
ret = pdev->id;
}
@@ -1180,6 +1214,11 @@ static int omap8250_probe(struct platform_device *pdev)
if (of_machine_is_compatible("ti,am33xx"))
priv->habit |= OMAP_DMA_TX_KICK;
+ /*
+ * pause is currently not supported atleast on omap-sdma
+ * and edma on most earlier kernels.
+ */
+ priv->rx_dma_broken = true;
}
}
#endif
@@ -1257,17 +1296,46 @@ static int omap8250_lost_context(struct uart_8250_port *up)
{
u32 val;
- val = serial_in(up, UART_OMAP_MDR1);
+ val = serial_in(up, UART_OMAP_SCR);
/*
- * If we lose context, then MDR1 is set to its reset value which is
- * UART_OMAP_MDR1_DISABLE. After set_termios() we set it either to 13x
- * or 16x but never to disable again.
+ * If we lose context, then SCR is set to its reset value of zero.
+ * After set_termios() we set bit 3 of SCR (TX_EMPTY_CTL_IT) to 1,
+ * among other bits, to never set the register back to zero again.
*/
- if (val == UART_OMAP_MDR1_DISABLE)
+ if (!val)
return 1;
return 0;
}
+/* TODO: in future, this should happen via API in drivers/reset/ */
+static int omap8250_soft_reset(struct device *dev)
+{
+ struct omap8250_priv *priv = dev_get_drvdata(dev);
+ struct uart_8250_port *up = serial8250_get_port(priv->line);
+ int timeout = 100;
+ int sysc;
+ int syss;
+
+ sysc = serial_in(up, UART_OMAP_SYSC);
+
+ /* softreset the UART */
+ sysc |= OMAP_UART_SYSC_SOFTRESET;
+ serial_out(up, UART_OMAP_SYSC, sysc);
+
+ /* By experiments, 1us enough for reset complete on AM335x */
+ do {
+ udelay(1);
+ syss = serial_in(up, UART_OMAP_SYSS);
+ } while (--timeout && !(syss & OMAP_UART_SYSS_RESETDONE));
+
+ if (!timeout) {
+ dev_err(dev, "timed out waiting for reset done\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static int omap8250_runtime_suspend(struct device *dev)
{
struct omap8250_priv *priv = dev_get_drvdata(dev);
@@ -1285,7 +1353,18 @@ static int omap8250_runtime_suspend(struct device *dev)
return -EBUSY;
}
- if (up->dma)
+ if (priv->habit & UART_ERRATA_CLOCK_DISABLE) {
+ int ret;
+
+ ret = omap8250_soft_reset(dev);
+ if (ret)
+ return ret;
+
+ /* Restore to UART mode after reset (for wakeup) */
+ omap8250_update_mdr1(up, priv);
+ }
+
+ if (up->dma && up->dma->rxchan)
omap_8250_rx_dma(up, UART_IIR_RX_TIMEOUT);
priv->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
@@ -1310,7 +1389,7 @@ static int omap8250_runtime_resume(struct device *dev)
if (loss_cntx)
omap8250_restore_regs(up);
- if (up->dma)
+ if (up->dma && up->dma->rxchan)
omap_8250_rx_dma(up, 0);
priv->latency = priv->calc_latency;
@@ -1367,14 +1446,6 @@ static const struct dev_pm_ops omap8250_dev_pm_ops = {
.complete = omap8250_complete,
};
-static const struct of_device_id omap8250_dt_ids[] = {
- { .compatible = "ti,omap2-uart" },
- { .compatible = "ti,omap3-uart" },
- { .compatible = "ti,omap4-uart" },
- {},
-};
-MODULE_DEVICE_TABLE(of, omap8250_dt_ids);
-
static struct platform_driver omap8250_platform_driver = {
.driver = {
.name = "omap8250",
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index e55f18b93fe7..68042dd1c525 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1417,6 +1417,10 @@ byt_set_termios(struct uart_port *p, struct ktermios *termios,
reg |= BYT_PRV_CLK_EN | BYT_PRV_CLK_UPDATE;
writel(reg, p->membase + BYT_PRV_CLK);
+ p->status &= ~UPSTAT_AUTOCTS;
+ if (termios->c_cflag & CRTSCTS)
+ p->status |= UPSTAT_AUTOCTS;
+
serial8250_do_set_termios(p, termios, old);
}
@@ -1685,11 +1689,65 @@ pci_brcm_trumanage_setup(struct serial_private *priv,
return ret;
}
+/* RTS will control by MCR if this bit is 0 */
+#define FINTEK_RTS_CONTROL_BY_HW BIT(4)
+/* only worked with FINTEK_RTS_CONTROL_BY_HW on */
+#define FINTEK_RTS_INVERT BIT(5)
+
+/* We should do proper H/W transceiver setting before change to RS485 mode */
+static int pci_fintek_rs485_config(struct uart_port *port,
+ struct serial_rs485 *rs485)
+{
+ u8 setting;
+ u8 *index = (u8 *) port->private_data;
+ struct pci_dev *pci_dev = container_of(port->dev, struct pci_dev,
+ dev);
+
+ pci_read_config_byte(pci_dev, 0x40 + 8 * *index + 7, &setting);
+
+ if (!rs485)
+ rs485 = &port->rs485;
+ else if (rs485->flags & SER_RS485_ENABLED)
+ memset(rs485->padding, 0, sizeof(rs485->padding));
+ else
+ memset(rs485, 0, sizeof(*rs485));
+
+ /* F81504/508/512 not support RTS delay before or after send */
+ rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND;
+
+ if (rs485->flags & SER_RS485_ENABLED) {
+ /* Enable RTS H/W control mode */
+ setting |= FINTEK_RTS_CONTROL_BY_HW;
+
+ if (rs485->flags & SER_RS485_RTS_ON_SEND) {
+ /* RTS driving high on TX */
+ setting &= ~FINTEK_RTS_INVERT;
+ } else {
+ /* RTS driving low on TX */
+ setting |= FINTEK_RTS_INVERT;
+ }
+
+ rs485->delay_rts_after_send = 0;
+ rs485->delay_rts_before_send = 0;
+ } else {
+ /* Disable RTS H/W control mode */
+ setting &= ~(FINTEK_RTS_CONTROL_BY_HW | FINTEK_RTS_INVERT);
+ }
+
+ pci_write_config_byte(pci_dev, 0x40 + 8 * *index + 7, setting);
+
+ if (rs485 != &port->rs485)
+ port->rs485 = *rs485;
+
+ return 0;
+}
+
static int pci_fintek_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
{
struct pci_dev *pdev = priv->dev;
+ u8 *data;
u8 config_base;
u16 iobase;
@@ -1702,6 +1760,15 @@ static int pci_fintek_setup(struct serial_private *priv,
port->port.iotype = UPIO_PORT;
port->port.iobase = iobase;
+ port->port.rs485_config = pci_fintek_rs485_config;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(u8), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* preserve index in PCI configuration space */
+ *data = idx;
+ port->port.private_data = data;
return 0;
}
@@ -1712,6 +1779,8 @@ static int pci_fintek_init(struct pci_dev *dev)
u32 max_port, i;
u32 bar_data[3];
u8 config_base;
+ struct serial_private *priv = pci_get_drvdata(dev);
+ struct uart_8250_port *port;
switch (dev->device) {
case 0x1104: /* 4 ports */
@@ -1752,6 +1821,19 @@ static int pci_fintek_init(struct pci_dev *dev)
(u8)((iobase & 0xff00) >> 8));
pci_write_config_byte(dev, config_base + 0x06, dev->irq);
+
+ if (priv) {
+ /* re-apply RS232/485 mode when
+ * pciserial_resume_ports()
+ */
+ port = serial8250_get_port(priv->line[i]);
+ pci_fintek_rs485_config(&port->port, NULL);
+ } else {
+ /* First init without port data
+ * force init to RS232 Mode
+ */
+ pci_write_config_byte(dev, config_base + 0x07, 0x01);
+ }
}
return max_port;
@@ -2017,6 +2099,12 @@ pci_wch_ch38x_setup(struct serial_private *priv,
#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
+#define PCI_VENDOR_ID_PERICOM 0x12D8
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958
+
/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
#define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588
@@ -2331,27 +2419,12 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
* Pericom
*/
{
- .vendor = 0x12d8,
- .device = 0x7952,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup,
- },
- {
- .vendor = 0x12d8,
- .device = 0x7954,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup,
- },
- {
- .vendor = 0x12d8,
- .device = 0x7958,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup,
+ .vendor = PCI_VENDOR_ID_PERICOM,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
},
-
/*
* PLX
*/
@@ -3056,6 +3129,10 @@ enum pci_board_num_t {
pbn_fintek_8,
pbn_fintek_12,
pbn_wch384_4,
+ pbn_pericom_PI7C9X7951,
+ pbn_pericom_PI7C9X7952,
+ pbn_pericom_PI7C9X7954,
+ pbn_pericom_PI7C9X7958,
};
/*
@@ -3881,7 +3958,6 @@ static struct pciserial_board pci_boards[] = {
.base_baud = 115200,
.first_offset = 0x40,
},
-
[pbn_wch384_4] = {
.flags = FL_BASE0,
.num_ports = 4,
@@ -3889,6 +3965,33 @@ static struct pciserial_board pci_boards[] = {
.uart_offset = 8,
.first_offset = 0xC0,
},
+ /*
+ * Pericom PI7C9X795[1248] Uno/Dual/Quad/Octal UART
+ */
+ [pbn_pericom_PI7C9X7951] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 0x8,
+ },
+ [pbn_pericom_PI7C9X7952] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 921600,
+ .uart_offset = 0x8,
+ },
+ [pbn_pericom_PI7C9X7954] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 921600,
+ .uart_offset = 0x8,
+ },
+ [pbn_pericom_PI7C9X7958] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+ .base_baud = 921600,
+ .uart_offset = 0x8,
+ },
};
static const struct pci_device_id blacklist[] = {
@@ -5154,6 +5257,25 @@ static struct pci_device_id serial_pci_tbl[] = {
0,
0, pbn_exar_XR17V8358 },
/*
+ * Pericom PI7C9X795[1248] Uno/Dual/Quad/Octal UART
+ */
+ { PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7951,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0,
+ 0, pbn_pericom_PI7C9X7951 },
+ { PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7952,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0,
+ 0, pbn_pericom_PI7C9X7952 },
+ { PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7954,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0,
+ 0, pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7958,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0,
+ 0, pbn_pericom_PI7C9X7958 },
+ /*
* Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
*/
{ PCI_VENDOR_ID_TOPIC, PCI_DEVICE_ID_TOPIC_TP560,
diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c
index 50a09cd76d50..658b392d1170 100644
--- a/drivers/tty/serial/8250/8250_pnp.c
+++ b/drivers/tty/serial/8250/8250_pnp.c
@@ -41,6 +41,12 @@ static const struct pnp_device_id pnp_dev_table[] = {
{ "AEI1240", 0 },
/* Rockwell 56K ACF II Fax+Data+Voice Modem */
{ "AKY1021", 0 /*SPCI_FL_NO_SHIRQ*/ },
+ /*
+ * ALi Fast Infrared Controller
+ * Native driver (ali-ircc) is broken so at least
+ * it can be used with irtty-sir.
+ */
+ { "ALI5123", 0 },
/* AZT3005 PnP SOUND DEVICE */
{ "AZT4001", 0 },
/* Best Data Products Inc. Smart One 336F PnP Modem */
@@ -364,6 +370,11 @@ static const struct pnp_device_id pnp_dev_table[] = {
/* Winbond CIR port, should not be probed. We should keep track
of it to prevent the legacy serial driver from probing it */
{ "WEC1022", CIR_PORT },
+ /*
+ * SMSC IrCC SIR/FIR port, should not be probed by serial driver
+ * as well so its own driver can bind to it.
+ */
+ { "SMCF010", CIR_PORT },
{ "", 0 }
};
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
new file mode 100644
index 000000000000..54e6c8ddef5d
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -0,0 +1,2912 @@
+/*
+ * Base port operations for 8250/16550-type serial ports
+ *
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ * Split from 8250_core.c, Copyright (C) 2001 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * A note about mapbase / membase
+ *
+ * mapbase is the physical address of the IO port.
+ * membase is an 'ioremapped' cookie.
+ */
+
+#if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/ratelimit.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+#include <linux/nmi.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "8250.h"
+
+/*
+ * Debugging.
+ */
+#if 0
+#define DEBUG_AUTOCONF(fmt...) printk(fmt)
+#else
+#define DEBUG_AUTOCONF(fmt...) do { } while (0)
+#endif
+
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+/*
+ * Here we define the default xmit fifo size used for each type of UART.
+ */
+static const struct serial8250_config uart_config[] = {
+ [PORT_UNKNOWN] = {
+ .name = "unknown",
+ .fifo_size = 1,
+ .tx_loadsz = 1,
+ },
+ [PORT_8250] = {
+ .name = "8250",
+ .fifo_size = 1,
+ .tx_loadsz = 1,
+ },
+ [PORT_16450] = {
+ .name = "16450",
+ .fifo_size = 1,
+ .tx_loadsz = 1,
+ },
+ [PORT_16550] = {
+ .name = "16550",
+ .fifo_size = 1,
+ .tx_loadsz = 1,
+ },
+ [PORT_16550A] = {
+ .name = "16550A",
+ .fifo_size = 16,
+ .tx_loadsz = 16,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .rxtrig_bytes = {1, 4, 8, 14},
+ .flags = UART_CAP_FIFO,
+ },
+ [PORT_CIRRUS] = {
+ .name = "Cirrus",
+ .fifo_size = 1,
+ .tx_loadsz = 1,
+ },
+ [PORT_16650] = {
+ .name = "ST16650",
+ .fifo_size = 1,
+ .tx_loadsz = 1,
+ .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+ },
+ [PORT_16650V2] = {
+ .name = "ST16650V2",
+ .fifo_size = 32,
+ .tx_loadsz = 16,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
+ UART_FCR_T_TRIG_00,
+ .rxtrig_bytes = {8, 16, 24, 28},
+ .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+ },
+ [PORT_16750] = {
+ .name = "TI16750",
+ .fifo_size = 64,
+ .tx_loadsz = 64,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
+ UART_FCR7_64BYTE,
+ .rxtrig_bytes = {1, 16, 32, 56},
+ .flags = UART_CAP_FIFO | UART_CAP_SLEEP | UART_CAP_AFE,
+ },
+ [PORT_STARTECH] = {
+ .name = "Startech",
+ .fifo_size = 1,
+ .tx_loadsz = 1,
+ },
+ [PORT_16C950] = {
+ .name = "16C950/954",
+ .fifo_size = 128,
+ .tx_loadsz = 128,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ /* UART_CAP_EFR breaks billionon CF bluetooth card. */
+ .flags = UART_CAP_FIFO | UART_CAP_SLEEP,
+ },
+ [PORT_16654] = {
+ .name = "ST16654",
+ .fifo_size = 64,
+ .tx_loadsz = 32,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
+ UART_FCR_T_TRIG_10,
+ .rxtrig_bytes = {8, 16, 56, 60},
+ .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+ },
+ [PORT_16850] = {
+ .name = "XR16850",
+ .fifo_size = 128,
+ .tx_loadsz = 128,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+ },
+ [PORT_RSA] = {
+ .name = "RSA",
+ .fifo_size = 2048,
+ .tx_loadsz = 2048,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_11,
+ .flags = UART_CAP_FIFO,
+ },
+ [PORT_NS16550A] = {
+ .name = "NS16550A",
+ .fifo_size = 16,
+ .tx_loadsz = 16,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .flags = UART_CAP_FIFO | UART_NATSEMI,
+ },
+ [PORT_XSCALE] = {
+ .name = "XScale",
+ .fifo_size = 32,
+ .tx_loadsz = 32,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .flags = UART_CAP_FIFO | UART_CAP_UUE | UART_CAP_RTOIE,
+ },
+ [PORT_OCTEON] = {
+ .name = "OCTEON",
+ .fifo_size = 64,
+ .tx_loadsz = 64,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .flags = UART_CAP_FIFO,
+ },
+ [PORT_AR7] = {
+ .name = "AR7",
+ .fifo_size = 16,
+ .tx_loadsz = 16,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00,
+ .flags = UART_CAP_FIFO | UART_CAP_AFE,
+ },
+ [PORT_U6_16550A] = {
+ .name = "U6_16550A",
+ .fifo_size = 64,
+ .tx_loadsz = 64,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .flags = UART_CAP_FIFO | UART_CAP_AFE,
+ },
+ [PORT_TEGRA] = {
+ .name = "Tegra",
+ .fifo_size = 32,
+ .tx_loadsz = 8,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
+ UART_FCR_T_TRIG_01,
+ .rxtrig_bytes = {1, 4, 8, 14},
+ .flags = UART_CAP_FIFO | UART_CAP_RTOIE,
+ },
+ [PORT_XR17D15X] = {
+ .name = "XR17D15X",
+ .fifo_size = 64,
+ .tx_loadsz = 64,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .flags = UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR |
+ UART_CAP_SLEEP,
+ },
+ [PORT_XR17V35X] = {
+ .name = "XR17V35X",
+ .fifo_size = 256,
+ .tx_loadsz = 256,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_11 |
+ UART_FCR_T_TRIG_11,
+ .flags = UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR |
+ UART_CAP_SLEEP,
+ },
+ [PORT_LPC3220] = {
+ .name = "LPC3220",
+ .fifo_size = 64,
+ .tx_loadsz = 32,
+ .fcr = UART_FCR_DMA_SELECT | UART_FCR_ENABLE_FIFO |
+ UART_FCR_R_TRIG_00 | UART_FCR_T_TRIG_00,
+ .flags = UART_CAP_FIFO,
+ },
+ [PORT_BRCM_TRUMANAGE] = {
+ .name = "TruManage",
+ .fifo_size = 1,
+ .tx_loadsz = 1024,
+ .flags = UART_CAP_HFIFO,
+ },
+ [PORT_8250_CIR] = {
+ .name = "CIR port"
+ },
+ [PORT_ALTR_16550_F32] = {
+ .name = "Altera 16550 FIFO32",
+ .fifo_size = 32,
+ .tx_loadsz = 32,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .flags = UART_CAP_FIFO | UART_CAP_AFE,
+ },
+ [PORT_ALTR_16550_F64] = {
+ .name = "Altera 16550 FIFO64",
+ .fifo_size = 64,
+ .tx_loadsz = 64,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .flags = UART_CAP_FIFO | UART_CAP_AFE,
+ },
+ [PORT_ALTR_16550_F128] = {
+ .name = "Altera 16550 FIFO128",
+ .fifo_size = 128,
+ .tx_loadsz = 128,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .flags = UART_CAP_FIFO | UART_CAP_AFE,
+ },
+/* tx_loadsz is set to 63-bytes instead of 64-bytes to implement
+workaround of errata A-008006 which states that tx_loadsz should be
+configured less than Maximum supported fifo bytes */
+ [PORT_16550A_FSL64] = {
+ .name = "16550A_FSL64",
+ .fifo_size = 64,
+ .tx_loadsz = 63,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
+ UART_FCR7_64BYTE,
+ .flags = UART_CAP_FIFO,
+ },
+};
+
+/* Uart divisor latch read */
+static int default_serial_dl_read(struct uart_8250_port *up)
+{
+ return serial_in(up, UART_DLL) | serial_in(up, UART_DLM) << 8;
+}
+
+/* Uart divisor latch write */
+static void default_serial_dl_write(struct uart_8250_port *up, int value)
+{
+ serial_out(up, UART_DLL, value & 0xff);
+ serial_out(up, UART_DLM, value >> 8 & 0xff);
+}
+
+#if defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_SERIAL_8250_RT288X)
+
+/* Au1x00/RT288x UART hardware has a weird register layout */
+static const s8 au_io_in_map[8] = {
+ 0, /* UART_RX */
+ 2, /* UART_IER */
+ 3, /* UART_IIR */
+ 5, /* UART_LCR */
+ 6, /* UART_MCR */
+ 7, /* UART_LSR */
+ 8, /* UART_MSR */
+ -1, /* UART_SCR (unmapped) */
+};
+
+static const s8 au_io_out_map[8] = {
+ 1, /* UART_TX */
+ 2, /* UART_IER */
+ 4, /* UART_FCR */
+ 5, /* UART_LCR */
+ 6, /* UART_MCR */
+ -1, /* UART_LSR (unmapped) */
+ -1, /* UART_MSR (unmapped) */
+ -1, /* UART_SCR (unmapped) */
+};
+
+static unsigned int au_serial_in(struct uart_port *p, int offset)
+{
+ if (offset >= ARRAY_SIZE(au_io_in_map))
+ return UINT_MAX;
+ offset = au_io_in_map[offset];
+ if (offset < 0)
+ return UINT_MAX;
+ return __raw_readl(p->membase + (offset << p->regshift));
+}
+
+static void au_serial_out(struct uart_port *p, int offset, int value)
+{
+ if (offset >= ARRAY_SIZE(au_io_out_map))
+ return;
+ offset = au_io_out_map[offset];
+ if (offset < 0)
+ return;
+ __raw_writel(value, p->membase + (offset << p->regshift));
+}
+
+/* Au1x00 haven't got a standard divisor latch */
+static int au_serial_dl_read(struct uart_8250_port *up)
+{
+ return __raw_readl(up->port.membase + 0x28);
+}
+
+static void au_serial_dl_write(struct uart_8250_port *up, int value)
+{
+ __raw_writel(value, up->port.membase + 0x28);
+}
+
+#endif
+
+static unsigned int hub6_serial_in(struct uart_port *p, int offset)
+{
+ offset = offset << p->regshift;
+ outb(p->hub6 - 1 + offset, p->iobase);
+ return inb(p->iobase + 1);
+}
+
+static void hub6_serial_out(struct uart_port *p, int offset, int value)
+{
+ offset = offset << p->regshift;
+ outb(p->hub6 - 1 + offset, p->iobase);
+ outb(value, p->iobase + 1);
+}
+
+static unsigned int mem_serial_in(struct uart_port *p, int offset)
+{
+ offset = offset << p->regshift;
+ return readb(p->membase + offset);
+}
+
+static void mem_serial_out(struct uart_port *p, int offset, int value)
+{
+ offset = offset << p->regshift;
+ writeb(value, p->membase + offset);
+}
+
+static void mem32_serial_out(struct uart_port *p, int offset, int value)
+{
+ offset = offset << p->regshift;
+ writel(value, p->membase + offset);
+}
+
+static unsigned int mem32_serial_in(struct uart_port *p, int offset)
+{
+ offset = offset << p->regshift;
+ return readl(p->membase + offset);
+}
+
+static void mem32be_serial_out(struct uart_port *p, int offset, int value)
+{
+ offset = offset << p->regshift;
+ iowrite32be(value, p->membase + offset);
+}
+
+static unsigned int mem32be_serial_in(struct uart_port *p, int offset)
+{
+ offset = offset << p->regshift;
+ return ioread32be(p->membase + offset);
+}
+
+static unsigned int io_serial_in(struct uart_port *p, int offset)
+{
+ offset = offset << p->regshift;
+ return inb(p->iobase + offset);
+}
+
+static void io_serial_out(struct uart_port *p, int offset, int value)
+{
+ offset = offset << p->regshift;
+ outb(value, p->iobase + offset);
+}
+
+static int serial8250_default_handle_irq(struct uart_port *port);
+static int exar_handle_irq(struct uart_port *port);
+
+static void set_io_from_upio(struct uart_port *p)
+{
+ struct uart_8250_port *up = up_to_u8250p(p);
+
+ up->dl_read = default_serial_dl_read;
+ up->dl_write = default_serial_dl_write;
+
+ switch (p->iotype) {
+ case UPIO_HUB6:
+ p->serial_in = hub6_serial_in;
+ p->serial_out = hub6_serial_out;
+ break;
+
+ case UPIO_MEM:
+ p->serial_in = mem_serial_in;
+ p->serial_out = mem_serial_out;
+ break;
+
+ case UPIO_MEM32:
+ p->serial_in = mem32_serial_in;
+ p->serial_out = mem32_serial_out;
+ break;
+
+ case UPIO_MEM32BE:
+ p->serial_in = mem32be_serial_in;
+ p->serial_out = mem32be_serial_out;
+ break;
+
+#if defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_SERIAL_8250_RT288X)
+ case UPIO_AU:
+ p->serial_in = au_serial_in;
+ p->serial_out = au_serial_out;
+ up->dl_read = au_serial_dl_read;
+ up->dl_write = au_serial_dl_write;
+ break;
+#endif
+
+ default:
+ p->serial_in = io_serial_in;
+ p->serial_out = io_serial_out;
+ break;
+ }
+ /* Remember loaded iotype */
+ up->cur_iotype = p->iotype;
+ p->handle_irq = serial8250_default_handle_irq;
+}
+
+static void
+serial_port_out_sync(struct uart_port *p, int offset, int value)
+{
+ switch (p->iotype) {
+ case UPIO_MEM:
+ case UPIO_MEM32:
+ case UPIO_MEM32BE:
+ case UPIO_AU:
+ p->serial_out(p, offset, value);
+ p->serial_in(p, UART_LCR); /* safe, no side-effects */
+ break;
+ default:
+ p->serial_out(p, offset, value);
+ }
+}
+
+/*
+ * For the 16C950
+ */
+static void serial_icr_write(struct uart_8250_port *up, int offset, int value)
+{
+ serial_out(up, UART_SCR, offset);
+ serial_out(up, UART_ICR, value);
+}
+
+static unsigned int serial_icr_read(struct uart_8250_port *up, int offset)
+{
+ unsigned int value;
+
+ serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD);
+ serial_out(up, UART_SCR, offset);
+ value = serial_in(up, UART_ICR);
+ serial_icr_write(up, UART_ACR, up->acr);
+
+ return value;
+}
+
+/*
+ * FIFO support.
+ */
+static void serial8250_clear_fifos(struct uart_8250_port *p)
+{
+ if (p->capabilities & UART_CAP_FIFO) {
+ serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ serial_out(p, UART_FCR, 0);
+ }
+}
+
+void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p)
+{
+ serial8250_clear_fifos(p);
+ serial_out(p, UART_FCR, p->fcr);
+}
+EXPORT_SYMBOL_GPL(serial8250_clear_and_reinit_fifos);
+
+void serial8250_rpm_get(struct uart_8250_port *p)
+{
+ if (!(p->capabilities & UART_CAP_RPM))
+ return;
+ pm_runtime_get_sync(p->port.dev);
+}
+EXPORT_SYMBOL_GPL(serial8250_rpm_get);
+
+void serial8250_rpm_put(struct uart_8250_port *p)
+{
+ if (!(p->capabilities & UART_CAP_RPM))
+ return;
+ pm_runtime_mark_last_busy(p->port.dev);
+ pm_runtime_put_autosuspend(p->port.dev);
+}
+EXPORT_SYMBOL_GPL(serial8250_rpm_put);
+
+/*
+ * These two wrappers ensure that enable_runtime_pm_tx() can be called more than
+ * once and disable_runtime_pm_tx() will still disable RPM because the fifo is
+ * empty and the HW can idle again.
+ */
+static void serial8250_rpm_get_tx(struct uart_8250_port *p)
+{
+ unsigned char rpm_active;
+
+ if (!(p->capabilities & UART_CAP_RPM))
+ return;
+
+ rpm_active = xchg(&p->rpm_tx_active, 1);
+ if (rpm_active)
+ return;
+ pm_runtime_get_sync(p->port.dev);
+}
+
+static void serial8250_rpm_put_tx(struct uart_8250_port *p)
+{
+ unsigned char rpm_active;
+
+ if (!(p->capabilities & UART_CAP_RPM))
+ return;
+
+ rpm_active = xchg(&p->rpm_tx_active, 0);
+ if (!rpm_active)
+ return;
+ pm_runtime_mark_last_busy(p->port.dev);
+ pm_runtime_put_autosuspend(p->port.dev);
+}
+
+/*
+ * IER sleep support. UARTs which have EFRs need the "extended
+ * capability" bit enabled. Note that on XR16C850s, we need to
+ * reset LCR to write to IER.
+ */
+static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
+{
+ unsigned char lcr = 0, efr = 0;
+ /*
+ * Exar UARTs have a SLEEP register that enables or disables
+ * each UART to enter sleep mode separately. On the XR17V35x the
+ * register is accessible to each UART at the UART_EXAR_SLEEP
+ * offset but the UART channel may only write to the corresponding
+ * bit.
+ */
+ serial8250_rpm_get(p);
+ if ((p->port.type == PORT_XR17V35X) ||
+ (p->port.type == PORT_XR17D15X)) {
+ serial_out(p, UART_EXAR_SLEEP, sleep ? 0xff : 0);
+ goto out;
+ }
+
+ if (p->capabilities & UART_CAP_SLEEP) {
+ if (p->capabilities & UART_CAP_EFR) {
+ lcr = serial_in(p, UART_LCR);
+ efr = serial_in(p, UART_EFR);
+ serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_out(p, UART_EFR, UART_EFR_ECB);
+ serial_out(p, UART_LCR, 0);
+ }
+ serial_out(p, UART_IER, sleep ? UART_IERX_SLEEP : 0);
+ if (p->capabilities & UART_CAP_EFR) {
+ serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_out(p, UART_EFR, efr);
+ serial_out(p, UART_LCR, lcr);
+ }
+ }
+out:
+ serial8250_rpm_put(p);
+}
+
+#ifdef CONFIG_SERIAL_8250_RSA
+/*
+ * Attempts to turn on the RSA FIFO. Returns zero on failure.
+ * We set the port uart clock rate if we succeed.
+ */
+static int __enable_rsa(struct uart_8250_port *up)
+{
+ unsigned char mode;
+ int result;
+
+ mode = serial_in(up, UART_RSA_MSR);
+ result = mode & UART_RSA_MSR_FIFO;
+
+ if (!result) {
+ serial_out(up, UART_RSA_MSR, mode | UART_RSA_MSR_FIFO);
+ mode = serial_in(up, UART_RSA_MSR);
+ result = mode & UART_RSA_MSR_FIFO;
+ }
+
+ if (result)
+ up->port.uartclk = SERIAL_RSA_BAUD_BASE * 16;
+
+ return result;
+}
+
+static void enable_rsa(struct uart_8250_port *up)
+{
+ if (up->port.type == PORT_RSA) {
+ if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
+ spin_lock_irq(&up->port.lock);
+ __enable_rsa(up);
+ spin_unlock_irq(&up->port.lock);
+ }
+ if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
+ serial_out(up, UART_RSA_FRR, 0);
+ }
+}
+
+/*
+ * Attempts to turn off the RSA FIFO. Returns zero on failure.
+ * It is unknown why interrupts were disabled in here. However,
+ * the caller is expected to preserve this behaviour by grabbing
+ * the spinlock before calling this function.
+ */
+static void disable_rsa(struct uart_8250_port *up)
+{
+ unsigned char mode;
+ int result;
+
+ if (up->port.type == PORT_RSA &&
+ up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
+ spin_lock_irq(&up->port.lock);
+
+ mode = serial_in(up, UART_RSA_MSR);
+ result = !(mode & UART_RSA_MSR_FIFO);
+
+ if (!result) {
+ serial_out(up, UART_RSA_MSR, mode & ~UART_RSA_MSR_FIFO);
+ mode = serial_in(up, UART_RSA_MSR);
+ result = !(mode & UART_RSA_MSR_FIFO);
+ }
+
+ if (result)
+ up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
+ spin_unlock_irq(&up->port.lock);
+ }
+}
+#endif /* CONFIG_SERIAL_8250_RSA */
+
+/*
+ * This is a quickie test to see how big the FIFO is.
+ * It doesn't work at all the time, more's the pity.
+ */
+static int size_fifo(struct uart_8250_port *up)
+{
+ unsigned char old_fcr, old_mcr, old_lcr;
+ unsigned short old_dl;
+ int count;
+
+ old_lcr = serial_in(up, UART_LCR);
+ serial_out(up, UART_LCR, 0);
+ old_fcr = serial_in(up, UART_FCR);
+ old_mcr = serial_in(up, UART_MCR);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ serial_out(up, UART_MCR, UART_MCR_LOOP);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+ old_dl = serial_dl_read(up);
+ serial_dl_write(up, 0x0001);
+ serial_out(up, UART_LCR, 0x03);
+ for (count = 0; count < 256; count++)
+ serial_out(up, UART_TX, count);
+ mdelay(20);/* FIXME - schedule_timeout */
+ for (count = 0; (serial_in(up, UART_LSR) & UART_LSR_DR) &&
+ (count < 256); count++)
+ serial_in(up, UART_RX);
+ serial_out(up, UART_FCR, old_fcr);
+ serial_out(up, UART_MCR, old_mcr);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+ serial_dl_write(up, old_dl);
+ serial_out(up, UART_LCR, old_lcr);
+
+ return count;
+}
+
+/*
+ * Read UART ID using the divisor method - set DLL and DLM to zero
+ * and the revision will be in DLL and device type in DLM. We
+ * preserve the device state across this.
+ */
+static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
+{
+ unsigned char old_dll, old_dlm, old_lcr;
+ unsigned int id;
+
+ old_lcr = serial_in(p, UART_LCR);
+ serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
+
+ old_dll = serial_in(p, UART_DLL);
+ old_dlm = serial_in(p, UART_DLM);
+
+ serial_out(p, UART_DLL, 0);
+ serial_out(p, UART_DLM, 0);
+
+ id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
+
+ serial_out(p, UART_DLL, old_dll);
+ serial_out(p, UART_DLM, old_dlm);
+ serial_out(p, UART_LCR, old_lcr);
+
+ return id;
+}
+
+/*
+ * This is a helper routine to autodetect StarTech/Exar/Oxsemi UART's.
+ * When this function is called we know it is at least a StarTech
+ * 16650 V2, but it might be one of several StarTech UARTs, or one of
+ * its clones. (We treat the broken original StarTech 16650 V1 as a
+ * 16550, and why not? Startech doesn't seem to even acknowledge its
+ * existence.)
+ *
+ * What evil have men's minds wrought...
+ */
+static void autoconfig_has_efr(struct uart_8250_port *up)
+{
+ unsigned int id1, id2, id3, rev;
+
+ /*
+ * Everything with an EFR has SLEEP
+ */
+ up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP;
+
+ /*
+ * First we check to see if it's an Oxford Semiconductor UART.
+ *
+ * If we have to do this here because some non-National
+ * Semiconductor clone chips lock up if you try writing to the
+ * LSR register (which serial_icr_read does)
+ */
+
+ /*
+ * Check for Oxford Semiconductor 16C950.
+ *
+ * EFR [4] must be set else this test fails.
+ *
+ * This shouldn't be necessary, but Mike Hudson (Exoray@isys.ca)
+ * claims that it's needed for 952 dual UART's (which are not
+ * recommended for new designs).
+ */
+ up->acr = 0;
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_out(up, UART_EFR, UART_EFR_ECB);
+ serial_out(up, UART_LCR, 0x00);
+ id1 = serial_icr_read(up, UART_ID1);
+ id2 = serial_icr_read(up, UART_ID2);
+ id3 = serial_icr_read(up, UART_ID3);
+ rev = serial_icr_read(up, UART_REV);
+
+ DEBUG_AUTOCONF("950id=%02x:%02x:%02x:%02x ", id1, id2, id3, rev);
+
+ if (id1 == 0x16 && id2 == 0xC9 &&
+ (id3 == 0x50 || id3 == 0x52 || id3 == 0x54)) {
+ up->port.type = PORT_16C950;
+
+ /*
+ * Enable work around for the Oxford Semiconductor 952 rev B
+ * chip which causes it to seriously miscalculate baud rates
+ * when DLL is 0.
+ */
+ if (id3 == 0x52 && rev == 0x01)
+ up->bugs |= UART_BUG_QUOT;
+ return;
+ }
+
+ /*
+ * We check for a XR16C850 by setting DLL and DLM to 0, and then
+ * reading back DLL and DLM. The chip type depends on the DLM
+ * value read back:
+ * 0x10 - XR16C850 and the DLL contains the chip revision.
+ * 0x12 - XR16C2850.
+ * 0x14 - XR16C854.
+ */
+ id1 = autoconfig_read_divisor_id(up);
+ DEBUG_AUTOCONF("850id=%04x ", id1);
+
+ id2 = id1 >> 8;
+ if (id2 == 0x10 || id2 == 0x12 || id2 == 0x14) {
+ up->port.type = PORT_16850;
+ return;
+ }
+
+ /*
+ * It wasn't an XR16C850.
+ *
+ * We distinguish between the '654 and the '650 by counting
+ * how many bytes are in the FIFO. I'm using this for now,
+ * since that's the technique that was sent to me in the
+ * serial driver update, but I'm not convinced this works.
+ * I've had problems doing this in the past. -TYT
+ */
+ if (size_fifo(up) == 64)
+ up->port.type = PORT_16654;
+ else
+ up->port.type = PORT_16650V2;
+}
+
+/*
+ * We detected a chip without a FIFO. Only two fall into
+ * this category - the original 8250 and the 16450. The
+ * 16450 has a scratch register (accessible with LCR=0)
+ */
+static void autoconfig_8250(struct uart_8250_port *up)
+{
+ unsigned char scratch, status1, status2;
+
+ up->port.type = PORT_8250;
+
+ scratch = serial_in(up, UART_SCR);
+ serial_out(up, UART_SCR, 0xa5);
+ status1 = serial_in(up, UART_SCR);
+ serial_out(up, UART_SCR, 0x5a);
+ status2 = serial_in(up, UART_SCR);
+ serial_out(up, UART_SCR, scratch);
+
+ if (status1 == 0xa5 && status2 == 0x5a)
+ up->port.type = PORT_16450;
+}
+
+static int broken_efr(struct uart_8250_port *up)
+{
+ /*
+ * Exar ST16C2550 "A2" devices incorrectly detect as
+ * having an EFR, and report an ID of 0x0201. See
+ * http://linux.derkeiler.com/Mailing-Lists/Kernel/2004-11/4812.html
+ */
+ if (autoconfig_read_divisor_id(up) == 0x0201 && size_fifo(up) == 16)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * We know that the chip has FIFOs. Does it have an EFR? The
+ * EFR is located in the same register position as the IIR and
+ * we know the top two bits of the IIR are currently set. The
+ * EFR should contain zero. Try to read the EFR.
+ */
+static void autoconfig_16550a(struct uart_8250_port *up)
+{
+ unsigned char status1, status2;
+ unsigned int iersave;
+
+ up->port.type = PORT_16550A;
+ up->capabilities |= UART_CAP_FIFO;
+
+ /*
+ * XR17V35x UARTs have an extra divisor register, DLD
+ * that gets enabled with when DLAB is set which will
+ * cause the device to incorrectly match and assign
+ * port type to PORT_16650. The EFR for this UART is
+ * found at offset 0x09. Instead check the Deice ID (DVID)
+ * register for a 2, 4 or 8 port UART.
+ */
+ if (up->port.flags & UPF_EXAR_EFR) {
+ status1 = serial_in(up, UART_EXAR_DVID);
+ if (status1 == 0x82 || status1 == 0x84 || status1 == 0x88) {
+ DEBUG_AUTOCONF("Exar XR17V35x ");
+ up->port.type = PORT_XR17V35X;
+ up->capabilities |= UART_CAP_AFE | UART_CAP_EFR |
+ UART_CAP_SLEEP;
+
+ return;
+ }
+
+ }
+
+ /*
+ * Check for presence of the EFR when DLAB is set.
+ * Only ST16C650V1 UARTs pass this test.
+ */
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+ if (serial_in(up, UART_EFR) == 0) {
+ serial_out(up, UART_EFR, 0xA8);
+ if (serial_in(up, UART_EFR) != 0) {
+ DEBUG_AUTOCONF("EFRv1 ");
+ up->port.type = PORT_16650;
+ up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP;
+ } else {
+ serial_out(up, UART_LCR, 0);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR7_64BYTE);
+ status1 = serial_in(up, UART_IIR) >> 5;
+ serial_out(up, UART_FCR, 0);
+ serial_out(up, UART_LCR, 0);
+
+ if (status1 == 7)
+ up->port.type = PORT_16550A_FSL64;
+ else
+ DEBUG_AUTOCONF("Motorola 8xxx DUART ");
+ }
+ serial_out(up, UART_EFR, 0);
+ return;
+ }
+
+ /*
+ * Maybe it requires 0xbf to be written to the LCR.
+ * (other ST16C650V2 UARTs, TI16C752A, etc)
+ */
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ if (serial_in(up, UART_EFR) == 0 && !broken_efr(up)) {
+ DEBUG_AUTOCONF("EFRv2 ");
+ autoconfig_has_efr(up);
+ return;
+ }
+
+ /*
+ * Check for a National Semiconductor SuperIO chip.
+ * Attempt to switch to bank 2, read the value of the LOOP bit
+ * from EXCR1. Switch back to bank 0, change it in MCR. Then
+ * switch back to bank 2, read it from EXCR1 again and check
+ * it's changed. If so, set baud_base in EXCR2 to 921600. -- dwmw2
+ */
+ serial_out(up, UART_LCR, 0);
+ status1 = serial_in(up, UART_MCR);
+ serial_out(up, UART_LCR, 0xE0);
+ status2 = serial_in(up, 0x02); /* EXCR1 */
+
+ if (!((status2 ^ status1) & UART_MCR_LOOP)) {
+ serial_out(up, UART_LCR, 0);
+ serial_out(up, UART_MCR, status1 ^ UART_MCR_LOOP);
+ serial_out(up, UART_LCR, 0xE0);
+ status2 = serial_in(up, 0x02); /* EXCR1 */
+ serial_out(up, UART_LCR, 0);
+ serial_out(up, UART_MCR, status1);
+
+ if ((status2 ^ status1) & UART_MCR_LOOP) {
+ unsigned short quot;
+
+ serial_out(up, UART_LCR, 0xE0);
+
+ quot = serial_dl_read(up);
+ quot <<= 3;
+
+ if (ns16550a_goto_highspeed(up))
+ serial_dl_write(up, quot);
+
+ serial_out(up, UART_LCR, 0);
+
+ up->port.uartclk = 921600*16;
+ up->port.type = PORT_NS16550A;
+ up->capabilities |= UART_NATSEMI;
+ return;
+ }
+ }
+
+ /*
+ * No EFR. Try to detect a TI16750, which only sets bit 5 of
+ * the IIR when 64 byte FIFO mode is enabled when DLAB is set.
+ * Try setting it with and without DLAB set. Cheap clones
+ * set bit 5 without DLAB set.
+ */
+ serial_out(up, UART_LCR, 0);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
+ status1 = serial_in(up, UART_IIR) >> 5;
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
+ status2 = serial_in(up, UART_IIR) >> 5;
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(up, UART_LCR, 0);
+
+ DEBUG_AUTOCONF("iir1=%d iir2=%d ", status1, status2);
+
+ if (status1 == 6 && status2 == 7) {
+ up->port.type = PORT_16750;
+ up->capabilities |= UART_CAP_AFE | UART_CAP_SLEEP;
+ return;
+ }
+
+ /*
+ * Try writing and reading the UART_IER_UUE bit (b6).
+ * If it works, this is probably one of the Xscale platform's
+ * internal UARTs.
+ * We're going to explicitly set the UUE bit to 0 before
+ * trying to write and read a 1 just to make sure it's not
+ * already a 1 and maybe locked there before we even start start.
+ */
+ iersave = serial_in(up, UART_IER);
+ serial_out(up, UART_IER, iersave & ~UART_IER_UUE);
+ if (!(serial_in(up, UART_IER) & UART_IER_UUE)) {
+ /*
+ * OK it's in a known zero state, try writing and reading
+ * without disturbing the current state of the other bits.
+ */
+ serial_out(up, UART_IER, iersave | UART_IER_UUE);
+ if (serial_in(up, UART_IER) & UART_IER_UUE) {
+ /*
+ * It's an Xscale.
+ * We'll leave the UART_IER_UUE bit set to 1 (enabled).
+ */
+ DEBUG_AUTOCONF("Xscale ");
+ up->port.type = PORT_XSCALE;
+ up->capabilities |= UART_CAP_UUE | UART_CAP_RTOIE;
+ return;
+ }
+ } else {
+ /*
+ * If we got here we couldn't force the IER_UUE bit to 0.
+ * Log it and continue.
+ */
+ DEBUG_AUTOCONF("Couldn't force IER_UUE to 0 ");
+ }
+ serial_out(up, UART_IER, iersave);
+
+ /*
+ * Exar uarts have EFR in a weird location
+ */
+ if (up->port.flags & UPF_EXAR_EFR) {
+ DEBUG_AUTOCONF("Exar XR17D15x ");
+ up->port.type = PORT_XR17D15X;
+ up->capabilities |= UART_CAP_AFE | UART_CAP_EFR |
+ UART_CAP_SLEEP;
+
+ return;
+ }
+
+ /*
+ * We distinguish between 16550A and U6 16550A by counting
+ * how many bytes are in the FIFO.
+ */
+ if (up->port.type == PORT_16550A && size_fifo(up) == 64) {
+ up->port.type = PORT_U6_16550A;
+ up->capabilities |= UART_CAP_AFE;
+ }
+}
+
+/*
+ * This routine is called by rs_init() to initialize a specific serial
+ * port. It determines what type of UART chip this serial port is
+ * using: 8250, 16450, 16550, 16550A. The important question is
+ * whether or not this UART is a 16550A or not, since this will
+ * determine whether or not we can use its FIFO features or not.
+ */
+static void autoconfig(struct uart_8250_port *up)
+{
+ unsigned char status1, scratch, scratch2, scratch3;
+ unsigned char save_lcr, save_mcr;
+ struct uart_port *port = &up->port;
+ unsigned long flags;
+ unsigned int old_capabilities;
+
+ if (!port->iobase && !port->mapbase && !port->membase)
+ return;
+
+ DEBUG_AUTOCONF("ttyS%d: autoconf (0x%04lx, 0x%p): ",
+ serial_index(port), port->iobase, port->membase);
+
+ /*
+ * We really do need global IRQs disabled here - we're going to
+ * be frobbing the chips IRQ enable register to see if it exists.
+ */
+ spin_lock_irqsave(&port->lock, flags);
+
+ up->capabilities = 0;
+ up->bugs = 0;
+
+ if (!(port->flags & UPF_BUGGY_UART)) {
+ /*
+ * Do a simple existence test first; if we fail this,
+ * there's no point trying anything else.
+ *
+ * 0x80 is used as a nonsense port to prevent against
+ * false positives due to ISA bus float. The
+ * assumption is that 0x80 is a non-existent port;
+ * which should be safe since include/asm/io.h also
+ * makes this assumption.
+ *
+ * Note: this is safe as long as MCR bit 4 is clear
+ * and the device is in "PC" mode.
+ */
+ scratch = serial_in(up, UART_IER);
+ serial_out(up, UART_IER, 0);
+#ifdef __i386__
+ outb(0xff, 0x080);
+#endif
+ /*
+ * Mask out IER[7:4] bits for test as some UARTs (e.g. TL
+ * 16C754B) allow only to modify them if an EFR bit is set.
+ */
+ scratch2 = serial_in(up, UART_IER) & 0x0f;
+ serial_out(up, UART_IER, 0x0F);
+#ifdef __i386__
+ outb(0, 0x080);
+#endif
+ scratch3 = serial_in(up, UART_IER) & 0x0f;
+ serial_out(up, UART_IER, scratch);
+ if (scratch2 != 0 || scratch3 != 0x0F) {
+ /*
+ * We failed; there's nothing here
+ */
+ spin_unlock_irqrestore(&port->lock, flags);
+ DEBUG_AUTOCONF("IER test failed (%02x, %02x) ",
+ scratch2, scratch3);
+ goto out;
+ }
+ }
+
+ save_mcr = serial_in(up, UART_MCR);
+ save_lcr = serial_in(up, UART_LCR);
+
+ /*
+ * Check to see if a UART is really there. Certain broken
+ * internal modems based on the Rockwell chipset fail this
+ * test, because they apparently don't implement the loopback
+ * test mode. So this test is skipped on the COM 1 through
+ * COM 4 ports. This *should* be safe, since no board
+ * manufacturer would be stupid enough to design a board
+ * that conflicts with COM 1-4 --- we hope!
+ */
+ if (!(port->flags & UPF_SKIP_TEST)) {
+ serial_out(up, UART_MCR, UART_MCR_LOOP | 0x0A);
+ status1 = serial_in(up, UART_MSR) & 0xF0;
+ serial_out(up, UART_MCR, save_mcr);
+ if (status1 != 0x90) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ DEBUG_AUTOCONF("LOOP test failed (%02x) ",
+ status1);
+ goto out;
+ }
+ }
+
+ /*
+ * We're pretty sure there's a port here. Lets find out what
+ * type of port it is. The IIR top two bits allows us to find
+ * out if it's 8250 or 16450, 16550, 16550A or later. This
+ * determines what we test for next.
+ *
+ * We also initialise the EFR (if any) to zero for later. The
+ * EFR occupies the same register location as the FCR and IIR.
+ */
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_out(up, UART_EFR, 0);
+ serial_out(up, UART_LCR, 0);
+
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ scratch = serial_in(up, UART_IIR) >> 6;
+
+ switch (scratch) {
+ case 0:
+ autoconfig_8250(up);
+ break;
+ case 1:
+ port->type = PORT_UNKNOWN;
+ break;
+ case 2:
+ port->type = PORT_16550;
+ break;
+ case 3:
+ autoconfig_16550a(up);
+ break;
+ }
+
+#ifdef CONFIG_SERIAL_8250_RSA
+ /*
+ * Only probe for RSA ports if we got the region.
+ */
+ if (port->type == PORT_16550A && up->probe & UART_PROBE_RSA &&
+ __enable_rsa(up))
+ port->type = PORT_RSA;
+#endif
+
+ serial_out(up, UART_LCR, save_lcr);
+
+ port->fifosize = uart_config[up->port.type].fifo_size;
+ old_capabilities = up->capabilities;
+ up->capabilities = uart_config[port->type].flags;
+ up->tx_loadsz = uart_config[port->type].tx_loadsz;
+
+ if (port->type == PORT_UNKNOWN)
+ goto out_lock;
+
+ /*
+ * Reset the UART.
+ */
+#ifdef CONFIG_SERIAL_8250_RSA
+ if (port->type == PORT_RSA)
+ serial_out(up, UART_RSA_FRR, 0);
+#endif
+ serial_out(up, UART_MCR, save_mcr);
+ serial8250_clear_fifos(up);
+ serial_in(up, UART_RX);
+ if (up->capabilities & UART_CAP_UUE)
+ serial_out(up, UART_IER, UART_IER_UUE);
+ else
+ serial_out(up, UART_IER, 0);
+
+out_lock:
+ spin_unlock_irqrestore(&port->lock, flags);
+ if (up->capabilities != old_capabilities) {
+ printk(KERN_WARNING
+ "ttyS%d: detected caps %08x should be %08x\n",
+ serial_index(port), old_capabilities,
+ up->capabilities);
+ }
+out:
+ DEBUG_AUTOCONF("iir=%d ", scratch);
+ DEBUG_AUTOCONF("type=%s\n", uart_config[port->type].name);
+}
+
+static void autoconfig_irq(struct uart_8250_port *up)
+{
+ struct uart_port *port = &up->port;
+ unsigned char save_mcr, save_ier;
+ unsigned char save_ICP = 0;
+ unsigned int ICP = 0;
+ unsigned long irqs;
+ int irq;
+
+ if (port->flags & UPF_FOURPORT) {
+ ICP = (port->iobase & 0xfe0) | 0x1f;
+ save_ICP = inb_p(ICP);
+ outb_p(0x80, ICP);
+ inb_p(ICP);
+ }
+
+ /* forget possible initially masked and pending IRQ */
+ probe_irq_off(probe_irq_on());
+ save_mcr = serial_in(up, UART_MCR);
+ save_ier = serial_in(up, UART_IER);
+ serial_out(up, UART_MCR, UART_MCR_OUT1 | UART_MCR_OUT2);
+
+ irqs = probe_irq_on();
+ serial_out(up, UART_MCR, 0);
+ udelay(10);
+ if (port->flags & UPF_FOURPORT) {
+ serial_out(up, UART_MCR,
+ UART_MCR_DTR | UART_MCR_RTS);
+ } else {
+ serial_out(up, UART_MCR,
+ UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
+ }
+ serial_out(up, UART_IER, 0x0f); /* enable all intrs */
+ serial_in(up, UART_LSR);
+ serial_in(up, UART_RX);
+ serial_in(up, UART_IIR);
+ serial_in(up, UART_MSR);
+ serial_out(up, UART_TX, 0xFF);
+ udelay(20);
+ irq = probe_irq_off(irqs);
+
+ serial_out(up, UART_MCR, save_mcr);
+ serial_out(up, UART_IER, save_ier);
+
+ if (port->flags & UPF_FOURPORT)
+ outb_p(save_ICP, ICP);
+
+ port->irq = (irq > 0) ? irq : 0;
+}
+
+static inline void __stop_tx(struct uart_8250_port *p)
+{
+ if (p->ier & UART_IER_THRI) {
+ p->ier &= ~UART_IER_THRI;
+ serial_out(p, UART_IER, p->ier);
+ serial8250_rpm_put_tx(p);
+ }
+}
+
+static void serial8250_stop_tx(struct uart_port *port)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ serial8250_rpm_get(up);
+ __stop_tx(up);
+
+ /*
+ * We really want to stop the transmitter from sending.
+ */
+ if (port->type == PORT_16C950) {
+ up->acr |= UART_ACR_TXDIS;
+ serial_icr_write(up, UART_ACR, up->acr);
+ }
+ serial8250_rpm_put(up);
+}
+
+static void serial8250_start_tx(struct uart_port *port)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ serial8250_rpm_get_tx(up);
+
+ if (up->dma && !up->dma->tx_dma(up))
+ return;
+
+ if (!(up->ier & UART_IER_THRI)) {
+ up->ier |= UART_IER_THRI;
+ serial_port_out(port, UART_IER, up->ier);
+
+ if (up->bugs & UART_BUG_TXEN) {
+ unsigned char lsr;
+ lsr = serial_in(up, UART_LSR);
+ up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+ if (lsr & UART_LSR_THRE)
+ serial8250_tx_chars(up);
+ }
+ }
+
+ /*
+ * Re-enable the transmitter if we disabled it.
+ */
+ if (port->type == PORT_16C950 && up->acr & UART_ACR_TXDIS) {
+ up->acr &= ~UART_ACR_TXDIS;
+ serial_icr_write(up, UART_ACR, up->acr);
+ }
+}
+
+static void serial8250_throttle(struct uart_port *port)
+{
+ port->throttle(port);
+}
+
+static void serial8250_unthrottle(struct uart_port *port)
+{
+ port->unthrottle(port);
+}
+
+static void serial8250_stop_rx(struct uart_port *port)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ serial8250_rpm_get(up);
+
+ up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
+ up->port.read_status_mask &= ~UART_LSR_DR;
+ serial_port_out(port, UART_IER, up->ier);
+
+ serial8250_rpm_put(up);
+}
+
+static void serial8250_disable_ms(struct uart_port *port)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+
+ /* no MSR capabilities */
+ if (up->bugs & UART_BUG_NOMSR)
+ return;
+
+ up->ier &= ~UART_IER_MSI;
+ serial_port_out(port, UART_IER, up->ier);
+}
+
+static void serial8250_enable_ms(struct uart_port *port)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ /* no MSR capabilities */
+ if (up->bugs & UART_BUG_NOMSR)
+ return;
+
+ up->ier |= UART_IER_MSI;
+
+ serial8250_rpm_get(up);
+ serial_port_out(port, UART_IER, up->ier);
+ serial8250_rpm_put(up);
+}
+
+/*
+ * serial8250_rx_chars: processes according to the passed in LSR
+ * value, and returns the remaining LSR bits not handled
+ * by this Rx routine.
+ */
+unsigned char
+serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
+{
+ struct uart_port *port = &up->port;
+ unsigned char ch;
+ int max_count = 256;
+ char flag;
+
+ do {
+ if (likely(lsr & UART_LSR_DR))
+ ch = serial_in(up, UART_RX);
+ else
+ /*
+ * Intel 82571 has a Serial Over Lan device that will
+ * set UART_LSR_BI without setting UART_LSR_DR when
+ * it receives a break. To avoid reading from the
+ * receive buffer without UART_LSR_DR bit set, we
+ * just force the read character to be 0
+ */
+ ch = 0;
+
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+
+ lsr |= up->lsr_saved_flags;
+ up->lsr_saved_flags = 0;
+
+ if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) {
+ if (lsr & UART_LSR_BI) {
+ lsr &= ~(UART_LSR_FE | UART_LSR_PE);
+ port->icount.brk++;
+ /*
+ * We do the SysRQ and SAK checking
+ * here because otherwise the break
+ * may get masked by ignore_status_mask
+ * or read_status_mask.
+ */
+ if (uart_handle_break(port))
+ goto ignore_char;
+ } else if (lsr & UART_LSR_PE)
+ port->icount.parity++;
+ else if (lsr & UART_LSR_FE)
+ port->icount.frame++;
+ if (lsr & UART_LSR_OE)
+ port->icount.overrun++;
+
+ /*
+ * Mask off conditions which should be ignored.
+ */
+ lsr &= port->read_status_mask;
+
+ if (lsr & UART_LSR_BI) {
+ DEBUG_INTR("handling break....");
+ flag = TTY_BREAK;
+ } else if (lsr & UART_LSR_PE)
+ flag = TTY_PARITY;
+ else if (lsr & UART_LSR_FE)
+ flag = TTY_FRAME;
+ }
+ if (uart_handle_sysrq_char(port, ch))
+ goto ignore_char;
+
+ uart_insert_char(port, lsr, UART_LSR_OE, ch, flag);
+
+ignore_char:
+ lsr = serial_in(up, UART_LSR);
+ } while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (--max_count > 0));
+ spin_unlock(&port->lock);
+ tty_flip_buffer_push(&port->state->port);
+ spin_lock(&port->lock);
+ return lsr;
+}
+EXPORT_SYMBOL_GPL(serial8250_rx_chars);
+
+void serial8250_tx_chars(struct uart_8250_port *up)
+{
+ struct uart_port *port = &up->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ int count;
+
+ if (port->x_char) {
+ serial_out(up, UART_TX, port->x_char);
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+ if (uart_tx_stopped(port)) {
+ serial8250_stop_tx(port);
+ return;
+ }
+ if (uart_circ_empty(xmit)) {
+ __stop_tx(up);
+ return;
+ }
+
+ count = up->tx_loadsz;
+ do {
+ serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ if (up->capabilities & UART_CAP_HFIFO) {
+ if ((serial_port_in(port, UART_LSR) & BOTH_EMPTY) !=
+ BOTH_EMPTY)
+ break;
+ }
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ DEBUG_INTR("THRE...");
+
+ /*
+ * With RPM enabled, we have to wait until the FIFO is empty before the
+ * HW can go idle. So we get here once again with empty FIFO and disable
+ * the interrupt and RPM in __stop_tx()
+ */
+ if (uart_circ_empty(xmit) && !(up->capabilities & UART_CAP_RPM))
+ __stop_tx(up);
+}
+EXPORT_SYMBOL_GPL(serial8250_tx_chars);
+
+/* Caller holds uart port lock */
+unsigned int serial8250_modem_status(struct uart_8250_port *up)
+{
+ struct uart_port *port = &up->port;
+ unsigned int status = serial_in(up, UART_MSR);
+
+ status |= up->msr_saved_flags;
+ up->msr_saved_flags = 0;
+ if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI &&
+ port->state != NULL) {
+ if (status & UART_MSR_TERI)
+ port->icount.rng++;
+ if (status & UART_MSR_DDSR)
+ port->icount.dsr++;
+ if (status & UART_MSR_DDCD)
+ uart_handle_dcd_change(port, status & UART_MSR_DCD);
+ if (status & UART_MSR_DCTS)
+ uart_handle_cts_change(port, status & UART_MSR_CTS);
+
+ wake_up_interruptible(&port->state->port.delta_msr_wait);
+ }
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(serial8250_modem_status);
+
+/*
+ * This handles the interrupt from one port.
+ */
+int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+{
+ unsigned char status;
+ unsigned long flags;
+ struct uart_8250_port *up = up_to_u8250p(port);
+ int dma_err = 0;
+
+ if (iir & UART_IIR_NO_INT)
+ return 0;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ status = serial_port_in(port, UART_LSR);
+
+ DEBUG_INTR("status = %x...", status);
+
+ if (status & (UART_LSR_DR | UART_LSR_BI)) {
+ if (up->dma)
+ dma_err = up->dma->rx_dma(up, iir);
+
+ if (!up->dma || dma_err)
+ status = serial8250_rx_chars(up, status);
+ }
+ serial8250_modem_status(up);
+ if ((!up->dma || (up->dma && up->dma->tx_err)) &&
+ (status & UART_LSR_THRE))
+ serial8250_tx_chars(up);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(serial8250_handle_irq);
+
+static int serial8250_default_handle_irq(struct uart_port *port)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+ unsigned int iir;
+ int ret;
+
+ serial8250_rpm_get(up);
+
+ iir = serial_port_in(port, UART_IIR);
+ ret = serial8250_handle_irq(port, iir);
+
+ serial8250_rpm_put(up);
+ return ret;
+}
+
+/*
+ * These Exar UARTs have an extra interrupt indicator that could
+ * fire for a few unimplemented interrupts. One of which is a
+ * wakeup event when coming out of sleep. Put this here just
+ * to be on the safe side that these interrupts don't go unhandled.
+ */
+static int exar_handle_irq(struct uart_port *port)
+{
+ unsigned char int0, int1, int2, int3;
+ unsigned int iir = serial_port_in(port, UART_IIR);
+ int ret;
+
+ ret = serial8250_handle_irq(port, iir);
+
+ if ((port->type == PORT_XR17V35X) ||
+ (port->type == PORT_XR17D15X)) {
+ int0 = serial_port_in(port, 0x80);
+ int1 = serial_port_in(port, 0x81);
+ int2 = serial_port_in(port, 0x82);
+ int3 = serial_port_in(port, 0x83);
+ }
+
+ return ret;
+}
+
+static unsigned int serial8250_tx_empty(struct uart_port *port)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+ unsigned long flags;
+ unsigned int lsr;
+
+ serial8250_rpm_get(up);
+
+ spin_lock_irqsave(&port->lock, flags);
+ lsr = serial_port_in(port, UART_LSR);
+ up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ serial8250_rpm_put(up);
+
+ return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int serial8250_get_mctrl(struct uart_port *port)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+ unsigned int status;
+ unsigned int ret;
+
+ serial8250_rpm_get(up);
+ status = serial8250_modem_status(up);
+ serial8250_rpm_put(up);
+
+ ret = 0;
+ if (status & UART_MSR_DCD)
+ ret |= TIOCM_CAR;
+ if (status & UART_MSR_RI)
+ ret |= TIOCM_RNG;
+ if (status & UART_MSR_DSR)
+ ret |= TIOCM_DSR;
+ if (status & UART_MSR_CTS)
+ ret |= TIOCM_CTS;
+ return ret;
+}
+
+void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+ unsigned char mcr = 0;
+
+ if (mctrl & TIOCM_RTS)
+ mcr |= UART_MCR_RTS;
+ if (mctrl & TIOCM_DTR)
+ mcr |= UART_MCR_DTR;
+ if (mctrl & TIOCM_OUT1)
+ mcr |= UART_MCR_OUT1;
+ if (mctrl & TIOCM_OUT2)
+ mcr |= UART_MCR_OUT2;
+ if (mctrl & TIOCM_LOOP)
+ mcr |= UART_MCR_LOOP;
+
+ mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr;
+
+ serial_port_out(port, UART_MCR, mcr);
+}
+EXPORT_SYMBOL_GPL(serial8250_do_set_mctrl);
+
+static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ if (port->set_mctrl)
+ port->set_mctrl(port, mctrl);
+ else
+ serial8250_do_set_mctrl(port, mctrl);
+}
+
+static void serial8250_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+ unsigned long flags;
+
+ serial8250_rpm_get(up);
+ spin_lock_irqsave(&port->lock, flags);
+ if (break_state == -1)
+ up->lcr |= UART_LCR_SBC;
+ else
+ up->lcr &= ~UART_LCR_SBC;
+ serial_port_out(port, UART_LCR, up->lcr);
+ spin_unlock_irqrestore(&port->lock, flags);
+ serial8250_rpm_put(up);
+}
+
+/*
+ * Wait for transmitter & holding register to empty
+ */
+static void wait_for_xmitr(struct uart_8250_port *up, int bits)
+{
+ unsigned int status, tmout = 10000;
+
+ /* Wait up to 10ms for the character(s) to be sent. */
+ for (;;) {
+ status = serial_in(up, UART_LSR);
+
+ up->lsr_saved_flags |= status & LSR_SAVE_FLAGS;
+
+ if ((status & bits) == bits)
+ break;
+ if (--tmout == 0)
+ break;
+ udelay(1);
+ }
+
+ /* Wait up to 1s for flow control if necessary */
+ if (up->port.flags & UPF_CONS_FLOW) {
+ unsigned int tmout;
+ for (tmout = 1000000; tmout; tmout--) {
+ unsigned int msr = serial_in(up, UART_MSR);
+ up->msr_saved_flags |= msr & MSR_SAVE_FLAGS;
+ if (msr & UART_MSR_CTS)
+ break;
+ udelay(1);
+ touch_nmi_watchdog();
+ }
+ }
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+/*
+ * Console polling routines for writing and reading from the uart while
+ * in an interrupt or debug context.
+ */
+
+static int serial8250_get_poll_char(struct uart_port *port)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+ unsigned char lsr;
+ int status;
+
+ serial8250_rpm_get(up);
+
+ lsr = serial_port_in(port, UART_LSR);
+
+ if (!(lsr & UART_LSR_DR)) {
+ status = NO_POLL_CHAR;
+ goto out;
+ }
+
+ status = serial_port_in(port, UART_RX);
+out:
+ serial8250_rpm_put(up);
+ return status;
+}
+
+
+static void serial8250_put_poll_char(struct uart_port *port,
+ unsigned char c)
+{
+ unsigned int ier;
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ serial8250_rpm_get(up);
+ /*
+ * First save the IER then disable the interrupts
+ */
+ ier = serial_port_in(port, UART_IER);
+ if (up->capabilities & UART_CAP_UUE)
+ serial_port_out(port, UART_IER, UART_IER_UUE);
+ else
+ serial_port_out(port, UART_IER, 0);
+
+ wait_for_xmitr(up, BOTH_EMPTY);
+ /*
+ * Send the character out.
+ */
+ serial_port_out(port, UART_TX, c);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ wait_for_xmitr(up, BOTH_EMPTY);
+ serial_port_out(port, UART_IER, ier);
+ serial8250_rpm_put(up);
+}
+
+#endif /* CONFIG_CONSOLE_POLL */
+
+int serial8250_do_startup(struct uart_port *port)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+ unsigned long flags;
+ unsigned char lsr, iir;
+ int retval;
+
+ if (port->type == PORT_8250_CIR)
+ return -ENODEV;
+
+ if (!port->fifosize)
+ port->fifosize = uart_config[port->type].fifo_size;
+ if (!up->tx_loadsz)
+ up->tx_loadsz = uart_config[port->type].tx_loadsz;
+ if (!up->capabilities)
+ up->capabilities = uart_config[port->type].flags;
+ up->mcr = 0;
+
+ if (port->iotype != up->cur_iotype)
+ set_io_from_upio(port);
+
+ serial8250_rpm_get(up);
+ if (port->type == PORT_16C950) {
+ /* Wake up and initialize UART */
+ up->acr = 0;
+ serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_port_out(port, UART_EFR, UART_EFR_ECB);
+ serial_port_out(port, UART_IER, 0);
+ serial_port_out(port, UART_LCR, 0);
+ serial_icr_write(up, UART_CSR, 0); /* Reset the UART */
+ serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_port_out(port, UART_EFR, UART_EFR_ECB);
+ serial_port_out(port, UART_LCR, 0);
+ }
+
+#ifdef CONFIG_SERIAL_8250_RSA
+ /*
+ * If this is an RSA port, see if we can kick it up to the
+ * higher speed clock.
+ */
+ enable_rsa(up);
+#endif
+
+ if (port->type == PORT_XR17V35X) {
+ /*
+ * First enable access to IER [7:5], ISR [5:4], FCR [5:4],
+ * MCR [7:5] and MSR [7:0]
+ */
+ serial_port_out(port, UART_XR_EFR, UART_EFR_ECB);
+
+ /*
+ * Make sure all interrups are masked until initialization is
+ * complete and the FIFOs are cleared
+ */
+ serial_port_out(port, UART_IER, 0);
+ }
+
+ /*
+ * Clear the FIFO buffers and disable them.
+ * (they will be reenabled in set_termios())
+ */
+ serial8250_clear_fifos(up);
+
+ /*
+ * Clear the interrupt registers.
+ */
+ serial_port_in(port, UART_LSR);
+ serial_port_in(port, UART_RX);
+ serial_port_in(port, UART_IIR);
+ serial_port_in(port, UART_MSR);
+
+ /*
+ * At this point, there's no way the LSR could still be 0xff;
+ * if it is, then bail out, because there's likely no UART
+ * here.
+ */
+ if (!(port->flags & UPF_BUGGY_UART) &&
+ (serial_port_in(port, UART_LSR) == 0xff)) {
+ printk_ratelimited(KERN_INFO "ttyS%d: LSR safety check engaged!\n",
+ serial_index(port));
+ retval = -ENODEV;
+ goto out;
+ }
+
+ /*
+ * For a XR16C850, we need to set the trigger levels
+ */
+ if (port->type == PORT_16850) {
+ unsigned char fctr;
+
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+
+ fctr = serial_in(up, UART_FCTR) & ~(UART_FCTR_RX|UART_FCTR_TX);
+ serial_port_out(port, UART_FCTR,
+ fctr | UART_FCTR_TRGD | UART_FCTR_RX);
+ serial_port_out(port, UART_TRG, UART_TRG_96);
+ serial_port_out(port, UART_FCTR,
+ fctr | UART_FCTR_TRGD | UART_FCTR_TX);
+ serial_port_out(port, UART_TRG, UART_TRG_96);
+
+ serial_port_out(port, UART_LCR, 0);
+ }
+
+ if (port->irq) {
+ unsigned char iir1;
+ /*
+ * Test for UARTs that do not reassert THRE when the
+ * transmitter is idle and the interrupt has already
+ * been cleared. Real 16550s should always reassert
+ * this interrupt whenever the transmitter is idle and
+ * the interrupt is enabled. Delays are necessary to
+ * allow register changes to become visible.
+ */
+ spin_lock_irqsave(&port->lock, flags);
+ if (up->port.irqflags & IRQF_SHARED)
+ disable_irq_nosync(port->irq);
+
+ wait_for_xmitr(up, UART_LSR_THRE);
+ serial_port_out_sync(port, UART_IER, UART_IER_THRI);
+ udelay(1); /* allow THRE to set */
+ iir1 = serial_port_in(port, UART_IIR);
+ serial_port_out(port, UART_IER, 0);
+ serial_port_out_sync(port, UART_IER, UART_IER_THRI);
+ udelay(1); /* allow a working UART time to re-assert THRE */
+ iir = serial_port_in(port, UART_IIR);
+ serial_port_out(port, UART_IER, 0);
+
+ if (port->irqflags & IRQF_SHARED)
+ enable_irq(port->irq);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /*
+ * If the interrupt is not reasserted, or we otherwise
+ * don't trust the iir, setup a timer to kick the UART
+ * on a regular basis.
+ */
+ if ((!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) ||
+ up->port.flags & UPF_BUG_THRE) {
+ up->bugs |= UART_BUG_THRE;
+ }
+ }
+
+ retval = up->ops->setup_irq(up);
+ if (retval)
+ goto out;
+
+ /*
+ * Now, initialize the UART
+ */
+ serial_port_out(port, UART_LCR, UART_LCR_WLEN8);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (up->port.flags & UPF_FOURPORT) {
+ if (!up->port.irq)
+ up->port.mctrl |= TIOCM_OUT1;
+ } else
+ /*
+ * Most PC uarts need OUT2 raised to enable interrupts.
+ */
+ if (port->irq)
+ up->port.mctrl |= TIOCM_OUT2;
+
+ serial8250_set_mctrl(port, port->mctrl);
+
+ /* Serial over Lan (SoL) hack:
+ Intel 8257x Gigabit ethernet chips have a
+ 16550 emulation, to be used for Serial Over Lan.
+ Those chips take a longer time than a normal
+ serial device to signalize that a transmission
+ data was queued. Due to that, the above test generally
+ fails. One solution would be to delay the reading of
+ iir. However, this is not reliable, since the timeout
+ is variable. So, let's just don't test if we receive
+ TX irq. This way, we'll never enable UART_BUG_TXEN.
+ */
+ if (up->port.flags & UPF_NO_TXEN_TEST)
+ goto dont_test_tx_en;
+
+ /*
+ * Do a quick test to see if we receive an
+ * interrupt when we enable the TX irq.
+ */
+ serial_port_out(port, UART_IER, UART_IER_THRI);
+ lsr = serial_port_in(port, UART_LSR);
+ iir = serial_port_in(port, UART_IIR);
+ serial_port_out(port, UART_IER, 0);
+
+ if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) {
+ if (!(up->bugs & UART_BUG_TXEN)) {
+ up->bugs |= UART_BUG_TXEN;
+ pr_debug("ttyS%d - enabling bad tx status workarounds\n",
+ serial_index(port));
+ }
+ } else {
+ up->bugs &= ~UART_BUG_TXEN;
+ }
+
+dont_test_tx_en:
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /*
+ * Clear the interrupt registers again for luck, and clear the
+ * saved flags to avoid getting false values from polling
+ * routines or the previous session.
+ */
+ serial_port_in(port, UART_LSR);
+ serial_port_in(port, UART_RX);
+ serial_port_in(port, UART_IIR);
+ serial_port_in(port, UART_MSR);
+ up->lsr_saved_flags = 0;
+ up->msr_saved_flags = 0;
+
+ /*
+ * Request DMA channels for both RX and TX.
+ */
+ if (up->dma) {
+ retval = serial8250_request_dma(up);
+ if (retval) {
+ pr_warn_ratelimited("ttyS%d - failed to request DMA\n",
+ serial_index(port));
+ up->dma = NULL;
+ }
+ }
+
+ /*
+ * Set the IER shadow for rx interrupts but defer actual interrupt
+ * enable until after the FIFOs are enabled; otherwise, an already-
+ * active sender can swamp the interrupt handler with "too much work".
+ */
+ up->ier = UART_IER_RLSI | UART_IER_RDI;
+
+ if (port->flags & UPF_FOURPORT) {
+ unsigned int icp;
+ /*
+ * Enable interrupts on the AST Fourport board
+ */
+ icp = (port->iobase & 0xfe0) | 0x01f;
+ outb_p(0x80, icp);
+ inb_p(icp);
+ }
+ retval = 0;
+out:
+ serial8250_rpm_put(up);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(serial8250_do_startup);
+
+static int serial8250_startup(struct uart_port *port)
+{
+ if (port->startup)
+ return port->startup(port);
+ return serial8250_do_startup(port);
+}
+
+void serial8250_do_shutdown(struct uart_port *port)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+ unsigned long flags;
+
+ serial8250_rpm_get(up);
+ /*
+ * Disable interrupts from this port
+ */
+ up->ier = 0;
+ serial_port_out(port, UART_IER, 0);
+
+ if (up->dma)
+ serial8250_release_dma(up);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (port->flags & UPF_FOURPORT) {
+ /* reset interrupts on the AST Fourport board */
+ inb((port->iobase & 0xfe0) | 0x1f);
+ port->mctrl |= TIOCM_OUT1;
+ } else
+ port->mctrl &= ~TIOCM_OUT2;
+
+ serial8250_set_mctrl(port, port->mctrl);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /*
+ * Disable break condition and FIFOs
+ */
+ serial_port_out(port, UART_LCR,
+ serial_port_in(port, UART_LCR) & ~UART_LCR_SBC);
+ serial8250_clear_fifos(up);
+
+#ifdef CONFIG_SERIAL_8250_RSA
+ /*
+ * Reset the RSA board back to 115kbps compat mode.
+ */
+ disable_rsa(up);
+#endif
+
+ /*
+ * Read data port to reset things, and then unlink from
+ * the IRQ chain.
+ */
+ serial_port_in(port, UART_RX);
+ serial8250_rpm_put(up);
+
+ up->ops->release_irq(up);
+}
+EXPORT_SYMBOL_GPL(serial8250_do_shutdown);
+
+static void serial8250_shutdown(struct uart_port *port)
+{
+ if (port->shutdown)
+ port->shutdown(port);
+ else
+ serial8250_do_shutdown(port);
+}
+
+/*
+ * XR17V35x UARTs have an extra fractional divisor register (DLD)
+ * Calculate divisor with extra 4-bit fractional portion
+ */
+static unsigned int xr17v35x_get_divisor(struct uart_8250_port *up,
+ unsigned int baud,
+ unsigned int *frac)
+{
+ struct uart_port *port = &up->port;
+ unsigned int quot_16;
+
+ quot_16 = DIV_ROUND_CLOSEST(port->uartclk, baud);
+ *frac = quot_16 & 0x0f;
+
+ return quot_16 >> 4;
+}
+
+static unsigned int serial8250_get_divisor(struct uart_8250_port *up,
+ unsigned int baud,
+ unsigned int *frac)
+{
+ struct uart_port *port = &up->port;
+ unsigned int quot;
+
+ /*
+ * Handle magic divisors for baud rates above baud_base on
+ * SMSC SuperIO chips.
+ *
+ */
+ if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
+ baud == (port->uartclk/4))
+ quot = 0x8001;
+ else if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
+ baud == (port->uartclk/8))
+ quot = 0x8002;
+ else if (up->port.type == PORT_XR17V35X)
+ quot = xr17v35x_get_divisor(up, baud, frac);
+ else
+ quot = uart_get_divisor(port, baud);
+
+ /*
+ * Oxford Semi 952 rev B workaround
+ */
+ if (up->bugs & UART_BUG_QUOT && (quot & 0xff) == 0)
+ quot++;
+
+ return quot;
+}
+
+static unsigned char serial8250_compute_lcr(struct uart_8250_port *up,
+ tcflag_t c_cflag)
+{
+ unsigned char cval;
+
+ switch (c_cflag & CSIZE) {
+ case CS5:
+ cval = UART_LCR_WLEN5;
+ break;
+ case CS6:
+ cval = UART_LCR_WLEN6;
+ break;
+ case CS7:
+ cval = UART_LCR_WLEN7;
+ break;
+ default:
+ case CS8:
+ cval = UART_LCR_WLEN8;
+ break;
+ }
+
+ if (c_cflag & CSTOPB)
+ cval |= UART_LCR_STOP;
+ if (c_cflag & PARENB) {
+ cval |= UART_LCR_PARITY;
+ if (up->bugs & UART_BUG_PARITY)
+ up->fifo_bug = true;
+ }
+ if (!(c_cflag & PARODD))
+ cval |= UART_LCR_EPAR;
+#ifdef CMSPAR
+ if (c_cflag & CMSPAR)
+ cval |= UART_LCR_SPAR;
+#endif
+
+ return cval;
+}
+
+static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
+ unsigned int quot, unsigned int quot_frac)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ /* Workaround to enable 115200 baud on OMAP1510 internal ports */
+ if (is_omap1510_8250(up)) {
+ if (baud == 115200) {
+ quot = 1;
+ serial_port_out(port, UART_OMAP_OSC_12M_SEL, 1);
+ } else
+ serial_port_out(port, UART_OMAP_OSC_12M_SEL, 0);
+ }
+
+ /*
+ * For NatSemi, switch to bank 2 not bank 1, to avoid resetting EXCR2,
+ * otherwise just set DLAB
+ */
+ if (up->capabilities & UART_NATSEMI)
+ serial_port_out(port, UART_LCR, 0xe0);
+ else
+ serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB);
+
+ serial_dl_write(up, quot);
+
+ /* XR17V35x UARTs have an extra fractional divisor register (DLD) */
+ if (up->port.type == PORT_XR17V35X)
+ serial_port_out(port, 0x2, quot_frac);
+}
+
+void
+serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+ unsigned char cval;
+ unsigned long flags;
+ unsigned int baud, quot, frac = 0;
+
+ cval = serial8250_compute_lcr(up, termios->c_cflag);
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+ baud = uart_get_baud_rate(port, termios, old,
+ port->uartclk / 16 / 0xffff,
+ port->uartclk / 16);
+ quot = serial8250_get_divisor(up, baud, &frac);
+
+ /*
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+ serial8250_rpm_get(up);
+ spin_lock_irqsave(&port->lock, flags);
+
+ up->lcr = cval; /* Save computed LCR */
+
+ if (up->capabilities & UART_CAP_FIFO && port->fifosize > 1) {
+ /* NOTE: If fifo_bug is not set, a user can set RX_trigger. */
+ if ((baud < 2400 && !up->dma) || up->fifo_bug) {
+ up->fcr &= ~UART_FCR_TRIGGER_MASK;
+ up->fcr |= UART_FCR_TRIGGER_1;
+ }
+ }
+
+ /*
+ * MCR-based auto flow control. When AFE is enabled, RTS will be
+ * deasserted when the receive FIFO contains more characters than
+ * the trigger, or the MCR RTS bit is cleared. In the case where
+ * the remote UART is not using CTS auto flow control, we must
+ * have sufficient FIFO entries for the latency of the remote
+ * UART to respond. IOW, at least 32 bytes of FIFO.
+ */
+ if (up->capabilities & UART_CAP_AFE && port->fifosize >= 32) {
+ up->mcr &= ~UART_MCR_AFE;
+ if (termios->c_cflag & CRTSCTS)
+ up->mcr |= UART_MCR_AFE;
+ }
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ port->read_status_mask |= UART_LSR_BI;
+
+ /*
+ * Characteres to ignore
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= UART_LSR_BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= UART_LSR_OE;
+ }
+
+ /*
+ * ignore all characters if CREAD is not set
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ port->ignore_status_mask |= UART_LSR_DR;
+
+ /*
+ * CTS flow control flag and modem status interrupts
+ */
+ up->ier &= ~UART_IER_MSI;
+ if (!(up->bugs & UART_BUG_NOMSR) &&
+ UART_ENABLE_MS(&up->port, termios->c_cflag))
+ up->ier |= UART_IER_MSI;
+ if (up->capabilities & UART_CAP_UUE)
+ up->ier |= UART_IER_UUE;
+ if (up->capabilities & UART_CAP_RTOIE)
+ up->ier |= UART_IER_RTOIE;
+
+ serial_port_out(port, UART_IER, up->ier);
+
+ if (up->capabilities & UART_CAP_EFR) {
+ unsigned char efr = 0;
+ /*
+ * TI16C752/Startech hardware flow control. FIXME:
+ * - TI16C752 requires control thresholds to be set.
+ * - UART_MCR_RTS is ineffective if auto-RTS mode is enabled.
+ */
+ if (termios->c_cflag & CRTSCTS)
+ efr |= UART_EFR_CTS;
+
+ serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
+ if (port->flags & UPF_EXAR_EFR)
+ serial_port_out(port, UART_XR_EFR, efr);
+ else
+ serial_port_out(port, UART_EFR, efr);
+ }
+
+ serial8250_set_divisor(port, baud, quot, frac);
+
+ /*
+ * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
+ * is written without DLAB set, this mode will be disabled.
+ */
+ if (port->type == PORT_16750)
+ serial_port_out(port, UART_FCR, up->fcr);
+
+ serial_port_out(port, UART_LCR, up->lcr); /* reset DLAB */
+ if (port->type != PORT_16750) {
+ /* emulated UARTs (Lucent Venus 167x) need two steps */
+ if (up->fcr & UART_FCR_ENABLE_FIFO)
+ serial_port_out(port, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_port_out(port, UART_FCR, up->fcr); /* set fcr */
+ }
+ serial8250_set_mctrl(port, port->mctrl);
+ spin_unlock_irqrestore(&port->lock, flags);
+ serial8250_rpm_put(up);
+
+ /* Don't rewrite B0 */
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
+}
+EXPORT_SYMBOL(serial8250_do_set_termios);
+
+static void
+serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ if (port->set_termios)
+ port->set_termios(port, termios, old);
+ else
+ serial8250_do_set_termios(port, termios, old);
+}
+
+static void
+serial8250_set_ldisc(struct uart_port *port, struct ktermios *termios)
+{
+ if (termios->c_line == N_PPS) {
+ port->flags |= UPF_HARDPPS_CD;
+ spin_lock_irq(&port->lock);
+ serial8250_enable_ms(port);
+ spin_unlock_irq(&port->lock);
+ } else {
+ port->flags &= ~UPF_HARDPPS_CD;
+ if (!UART_ENABLE_MS(port, termios->c_cflag)) {
+ spin_lock_irq(&port->lock);
+ serial8250_disable_ms(port);
+ spin_unlock_irq(&port->lock);
+ }
+ }
+}
+
+
+void serial8250_do_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct uart_8250_port *p = up_to_u8250p(port);
+
+ serial8250_set_sleep(p, state != 0);
+}
+EXPORT_SYMBOL(serial8250_do_pm);
+
+static void
+serial8250_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ if (port->pm)
+ port->pm(port, state, oldstate);
+ else
+ serial8250_do_pm(port, state, oldstate);
+}
+
+static unsigned int serial8250_port_size(struct uart_8250_port *pt)
+{
+ if (pt->port.mapsize)
+ return pt->port.mapsize;
+ if (pt->port.iotype == UPIO_AU) {
+ if (pt->port.type == PORT_RT2880)
+ return 0x100;
+ return 0x1000;
+ }
+ if (is_omap1_8250(pt))
+ return 0x16 << pt->port.regshift;
+
+ return 8 << pt->port.regshift;
+}
+
+/*
+ * Resource handling.
+ */
+static int serial8250_request_std_resource(struct uart_8250_port *up)
+{
+ unsigned int size = serial8250_port_size(up);
+ struct uart_port *port = &up->port;
+ int ret = 0;
+
+ switch (port->iotype) {
+ case UPIO_AU:
+ case UPIO_TSI:
+ case UPIO_MEM32:
+ case UPIO_MEM32BE:
+ case UPIO_MEM:
+ if (!port->mapbase)
+ break;
+
+ if (!request_mem_region(port->mapbase, size, "serial")) {
+ ret = -EBUSY;
+ break;
+ }
+
+ if (port->flags & UPF_IOREMAP) {
+ port->membase = ioremap_nocache(port->mapbase, size);
+ if (!port->membase) {
+ release_mem_region(port->mapbase, size);
+ ret = -ENOMEM;
+ }
+ }
+ break;
+
+ case UPIO_HUB6:
+ case UPIO_PORT:
+ if (!request_region(port->iobase, size, "serial"))
+ ret = -EBUSY;
+ break;
+ }
+ return ret;
+}
+
+static void serial8250_release_std_resource(struct uart_8250_port *up)
+{
+ unsigned int size = serial8250_port_size(up);
+ struct uart_port *port = &up->port;
+
+ switch (port->iotype) {
+ case UPIO_AU:
+ case UPIO_TSI:
+ case UPIO_MEM32:
+ case UPIO_MEM32BE:
+ case UPIO_MEM:
+ if (!port->mapbase)
+ break;
+
+ if (port->flags & UPF_IOREMAP) {
+ iounmap(port->membase);
+ port->membase = NULL;
+ }
+
+ release_mem_region(port->mapbase, size);
+ break;
+
+ case UPIO_HUB6:
+ case UPIO_PORT:
+ release_region(port->iobase, size);
+ break;
+ }
+}
+
+static void serial8250_release_port(struct uart_port *port)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ serial8250_release_std_resource(up);
+}
+
+static int serial8250_request_port(struct uart_port *port)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+ int ret;
+
+ if (port->type == PORT_8250_CIR)
+ return -ENODEV;
+
+ ret = serial8250_request_std_resource(up);
+
+ return ret;
+}
+
+static int fcr_get_rxtrig_bytes(struct uart_8250_port *up)
+{
+ const struct serial8250_config *conf_type = &uart_config[up->port.type];
+ unsigned char bytes;
+
+ bytes = conf_type->rxtrig_bytes[UART_FCR_R_TRIG_BITS(up->fcr)];
+
+ return bytes ? bytes : -EOPNOTSUPP;
+}
+
+static int bytes_to_fcr_rxtrig(struct uart_8250_port *up, unsigned char bytes)
+{
+ const struct serial8250_config *conf_type = &uart_config[up->port.type];
+ int i;
+
+ if (!conf_type->rxtrig_bytes[UART_FCR_R_TRIG_BITS(UART_FCR_R_TRIG_00)])
+ return -EOPNOTSUPP;
+
+ for (i = 1; i < UART_FCR_R_TRIG_MAX_STATE; i++) {
+ if (bytes < conf_type->rxtrig_bytes[i])
+ /* Use the nearest lower value */
+ return (--i) << UART_FCR_R_TRIG_SHIFT;
+ }
+
+ return UART_FCR_R_TRIG_11;
+}
+
+static int do_get_rxtrig(struct tty_port *port)
+{
+ struct uart_state *state = container_of(port, struct uart_state, port);
+ struct uart_port *uport = state->uart_port;
+ struct uart_8250_port *up =
+ container_of(uport, struct uart_8250_port, port);
+
+ if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1)
+ return -EINVAL;
+
+ return fcr_get_rxtrig_bytes(up);
+}
+
+static int do_serial8250_get_rxtrig(struct tty_port *port)
+{
+ int rxtrig_bytes;
+
+ mutex_lock(&port->mutex);
+ rxtrig_bytes = do_get_rxtrig(port);
+ mutex_unlock(&port->mutex);
+
+ return rxtrig_bytes;
+}
+
+static ssize_t serial8250_get_attr_rx_trig_bytes(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tty_port *port = dev_get_drvdata(dev);
+ int rxtrig_bytes;
+
+ rxtrig_bytes = do_serial8250_get_rxtrig(port);
+ if (rxtrig_bytes < 0)
+ return rxtrig_bytes;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", rxtrig_bytes);
+}
+
+static int do_set_rxtrig(struct tty_port *port, unsigned char bytes)
+{
+ struct uart_state *state = container_of(port, struct uart_state, port);
+ struct uart_port *uport = state->uart_port;
+ struct uart_8250_port *up =
+ container_of(uport, struct uart_8250_port, port);
+ int rxtrig;
+
+ if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1 ||
+ up->fifo_bug)
+ return -EINVAL;
+
+ rxtrig = bytes_to_fcr_rxtrig(up, bytes);
+ if (rxtrig < 0)
+ return rxtrig;
+
+ serial8250_clear_fifos(up);
+ up->fcr &= ~UART_FCR_TRIGGER_MASK;
+ up->fcr |= (unsigned char)rxtrig;
+ serial_out(up, UART_FCR, up->fcr);
+ return 0;
+}
+
+static int do_serial8250_set_rxtrig(struct tty_port *port, unsigned char bytes)
+{
+ int ret;
+
+ mutex_lock(&port->mutex);
+ ret = do_set_rxtrig(port, bytes);
+ mutex_unlock(&port->mutex);
+
+ return ret;
+}
+
+static ssize_t serial8250_set_attr_rx_trig_bytes(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct tty_port *port = dev_get_drvdata(dev);
+ unsigned char bytes;
+ int ret;
+
+ if (!count)
+ return -EINVAL;
+
+ ret = kstrtou8(buf, 10, &bytes);
+ if (ret < 0)
+ return ret;
+
+ ret = do_serial8250_set_rxtrig(port, bytes);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR(rx_trig_bytes, S_IRUSR | S_IWUSR | S_IRGRP,
+ serial8250_get_attr_rx_trig_bytes,
+ serial8250_set_attr_rx_trig_bytes);
+
+static struct attribute *serial8250_dev_attrs[] = {
+ &dev_attr_rx_trig_bytes.attr,
+ NULL,
+ };
+
+static struct attribute_group serial8250_dev_attr_group = {
+ .attrs = serial8250_dev_attrs,
+ };
+
+static void register_dev_spec_attr_grp(struct uart_8250_port *up)
+{
+ const struct serial8250_config *conf_type = &uart_config[up->port.type];
+
+ if (conf_type->rxtrig_bytes[0])
+ up->port.attr_group = &serial8250_dev_attr_group;
+}
+
+static void serial8250_config_port(struct uart_port *port, int flags)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+ int ret;
+
+ if (port->type == PORT_8250_CIR)
+ return;
+
+ /*
+ * Find the region that we can probe for. This in turn
+ * tells us whether we can probe for the type of port.
+ */
+ ret = serial8250_request_std_resource(up);
+ if (ret < 0)
+ return;
+
+ if (port->iotype != up->cur_iotype)
+ set_io_from_upio(port);
+
+ if (flags & UART_CONFIG_TYPE)
+ autoconfig(up);
+
+ /* if access method is AU, it is a 16550 with a quirk */
+ if (port->type == PORT_16550A && port->iotype == UPIO_AU)
+ up->bugs |= UART_BUG_NOMSR;
+
+ /* HW bugs may trigger IRQ while IIR == NO_INT */
+ if (port->type == PORT_TEGRA)
+ up->bugs |= UART_BUG_NOMSR;
+
+ if (port->type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
+ autoconfig_irq(up);
+
+ if (port->type == PORT_UNKNOWN)
+ serial8250_release_std_resource(up);
+
+ /* Fixme: probably not the best place for this */
+ if ((port->type == PORT_XR17V35X) ||
+ (port->type == PORT_XR17D15X))
+ port->handle_irq = exar_handle_irq;
+
+ register_dev_spec_attr_grp(up);
+ up->fcr = uart_config[up->port.type].fcr;
+}
+
+static int
+serial8250_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ if (ser->irq >= nr_irqs || ser->irq < 0 ||
+ ser->baud_base < 9600 || ser->type < PORT_UNKNOWN ||
+ ser->type >= ARRAY_SIZE(uart_config) || ser->type == PORT_CIRRUS ||
+ ser->type == PORT_STARTECH)
+ return -EINVAL;
+ return 0;
+}
+
+static const char *
+serial8250_type(struct uart_port *port)
+{
+ int type = port->type;
+
+ if (type >= ARRAY_SIZE(uart_config))
+ type = 0;
+ return uart_config[type].name;
+}
+
+static const struct uart_ops serial8250_pops = {
+ .tx_empty = serial8250_tx_empty,
+ .set_mctrl = serial8250_set_mctrl,
+ .get_mctrl = serial8250_get_mctrl,
+ .stop_tx = serial8250_stop_tx,
+ .start_tx = serial8250_start_tx,
+ .throttle = serial8250_throttle,
+ .unthrottle = serial8250_unthrottle,
+ .stop_rx = serial8250_stop_rx,
+ .enable_ms = serial8250_enable_ms,
+ .break_ctl = serial8250_break_ctl,
+ .startup = serial8250_startup,
+ .shutdown = serial8250_shutdown,
+ .set_termios = serial8250_set_termios,
+ .set_ldisc = serial8250_set_ldisc,
+ .pm = serial8250_pm,
+ .type = serial8250_type,
+ .release_port = serial8250_release_port,
+ .request_port = serial8250_request_port,
+ .config_port = serial8250_config_port,
+ .verify_port = serial8250_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = serial8250_get_poll_char,
+ .poll_put_char = serial8250_put_poll_char,
+#endif
+};
+
+void serial8250_init_port(struct uart_8250_port *up)
+{
+ struct uart_port *port = &up->port;
+
+ spin_lock_init(&port->lock);
+ port->ops = &serial8250_pops;
+
+ up->cur_iotype = 0xFF;
+}
+EXPORT_SYMBOL_GPL(serial8250_init_port);
+
+void serial8250_set_defaults(struct uart_8250_port *up)
+{
+ struct uart_port *port = &up->port;
+
+ if (up->port.flags & UPF_FIXED_TYPE) {
+ unsigned int type = up->port.type;
+
+ if (!up->port.fifosize)
+ up->port.fifosize = uart_config[type].fifo_size;
+ if (!up->tx_loadsz)
+ up->tx_loadsz = uart_config[type].tx_loadsz;
+ if (!up->capabilities)
+ up->capabilities = uart_config[type].flags;
+ }
+
+ set_io_from_upio(port);
+
+ /* default dma handlers */
+ if (up->dma) {
+ if (!up->dma->tx_dma)
+ up->dma->tx_dma = serial8250_tx_dma;
+ if (!up->dma->rx_dma)
+ up->dma->rx_dma = serial8250_rx_dma;
+ }
+}
+EXPORT_SYMBOL_GPL(serial8250_set_defaults);
+
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+
+static void serial8250_console_putchar(struct uart_port *port, int ch)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ wait_for_xmitr(up, UART_LSR_THRE);
+ serial_port_out(port, UART_TX, ch);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ *
+ * The console_lock must be held when we get here.
+ */
+void serial8250_console_write(struct uart_8250_port *up, const char *s,
+ unsigned int count)
+{
+ struct uart_port *port = &up->port;
+ unsigned long flags;
+ unsigned int ier;
+ int locked = 1;
+
+ touch_nmi_watchdog();
+
+ serial8250_rpm_get(up);
+
+ if (port->sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ ier = serial_port_in(port, UART_IER);
+
+ if (up->capabilities & UART_CAP_UUE)
+ serial_port_out(port, UART_IER, UART_IER_UUE);
+ else
+ serial_port_out(port, UART_IER, 0);
+
+ /* check scratch reg to see if port powered off during system sleep */
+ if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
+ struct ktermios termios;
+ unsigned int baud, quot, frac = 0;
+
+ termios.c_cflag = port->cons->cflag;
+ if (port->state->port.tty && termios.c_cflag == 0)
+ termios.c_cflag = port->state->port.tty->termios.c_cflag;
+
+ baud = uart_get_baud_rate(port, &termios, NULL,
+ port->uartclk / 16 / 0xffff,
+ port->uartclk / 16);
+ quot = serial8250_get_divisor(up, baud, &frac);
+
+ serial8250_set_divisor(port, baud, quot, frac);
+ serial_port_out(port, UART_LCR, up->lcr);
+ serial_port_out(port, UART_MCR, UART_MCR_DTR | UART_MCR_RTS);
+
+ up->canary = 0;
+ }
+
+ uart_console_write(port, s, count, serial8250_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ wait_for_xmitr(up, BOTH_EMPTY);
+ serial_port_out(port, UART_IER, ier);
+
+ /*
+ * The receive handling will happen properly because the
+ * receive ready bit will still be set; it is not cleared
+ * on read. However, modem control will not, we must
+ * call it if we have saved something in the saved flags
+ * while processing with interrupts off.
+ */
+ if (up->msr_saved_flags)
+ serial8250_modem_status(up);
+
+ if (locked)
+ spin_unlock_irqrestore(&port->lock, flags);
+ serial8250_rpm_put(up);
+}
+
+static unsigned int probe_baud(struct uart_port *port)
+{
+ unsigned char lcr, dll, dlm;
+ unsigned int quot;
+
+ lcr = serial_port_in(port, UART_LCR);
+ serial_port_out(port, UART_LCR, lcr | UART_LCR_DLAB);
+ dll = serial_port_in(port, UART_DLL);
+ dlm = serial_port_in(port, UART_DLM);
+ serial_port_out(port, UART_LCR, lcr);
+
+ quot = (dlm << 8) | dll;
+ return (port->uartclk / 16) / quot;
+}
+
+int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
+{
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (!port->iobase && !port->membase)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else if (probe)
+ baud = probe_baud(port);
+
+ return uart_set_options(port, port->cons, baud, parity, bits, flow);
+}
+
+#endif /* CONFIG_SERIAL_8250_CONSOLE */
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index 7d79425c2b09..d11621e2cf1d 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -218,6 +218,7 @@ static int uniphier_uart_probe(struct platform_device *pdev)
ret = serial8250_register_8250_port(&up);
if (ret < 0) {
dev_err(dev, "failed to register 8250 port\n");
+ clk_disable_unprepare(priv->clk);
return ret;
}
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 706295913c34..39c6d2277570 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -2,10 +2,11 @@
# Makefile for the 8250 serial device drivers.
#
-obj-$(CONFIG_SERIAL_8250) += 8250.o
+obj-$(CONFIG_SERIAL_8250) += 8250.o 8250_base.o
8250-y := 8250_core.o
8250-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o
-8250-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o
+8250_base-y := 8250_port.o
+8250_base-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o
obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o
obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o
obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 76e65b714471..687b1ea294b7 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -594,7 +594,7 @@ config SERIAL_IMX_CONSOLE
config SERIAL_UARTLITE
tristate "Xilinx uartlite serial port support"
- depends on PPC32 || MICROBLAZE || MFD_TIMBERDALE || ARCH_ZYNQ
+ depends on HAS_IOMEM
select SERIAL_CORE
help
Say Y here if you want to use the Xilinx uartlite serial controller.
@@ -1067,6 +1067,7 @@ config SERIAL_ETRAXFS
bool "ETRAX FS serial port support"
depends on ETRAX_ARCH_V32 && OF
select SERIAL_CORE
+ select SERIAL_MCTRL_GPIO if GPIOLIB
config SERIAL_ETRAXFS_CONSOLE
bool "ETRAX FS serial console support"
@@ -1185,7 +1186,7 @@ config SERIAL_SC16IS7XX_CORE
config SERIAL_SC16IS7XX
tristate "SC16IS7xx serial support"
select SERIAL_CORE
- depends on I2C || SPI_MASTER
+ depends on (SPI_MASTER && !I2C) || I2C
help
This selects support for SC16IS7xx serial ports.
Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752,
@@ -1376,7 +1377,8 @@ config SERIAL_ALTERA_UART_CONSOLE
config SERIAL_IFX6X60
tristate "SPI protocol driver for Infineon 6x60 modem (EXPERIMENTAL)"
- depends on GPIOLIB && SPI && HAS_DMA
+ depends on GPIOLIB || COMPILE_TEST
+ depends on SPI && HAS_DMA
help
Support for the IFX6x60 modem devices on Intel MID platforms.
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 50cf5b10ceed..fd27e986b1dd 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2310,8 +2310,8 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
void __iomem *base;
base = devm_ioremap_resource(dev, mmiobase);
- if (!base)
- return -ENOMEM;
+ if (IS_ERR(base))
+ return PTR_ERR(base);
index = pl011_probe_dt_alias(index, dev);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 2a8f528153e7..5ca5cf3e9359 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -56,6 +56,15 @@
/* Revisit: We should calculate this based on the actual port settings */
#define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */
+/* The minium number of data FIFOs should be able to contain */
+#define ATMEL_MIN_FIFO_SIZE 8
+/*
+ * These two offsets are substracted from the RX FIFO size to define the RTS
+ * high and low thresholds
+ */
+#define ATMEL_RTS_HIGH_OFFSET 16
+#define ATMEL_RTS_LOW_OFFSET 20
+
#if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
@@ -88,37 +97,6 @@ static void atmel_stop_rx(struct uart_port *port);
#define ATMEL_ISR_PASS_LIMIT 256
-/* UART registers. CR is write-only, hence no GET macro */
-#define UART_PUT_CR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_CR)
-#define UART_GET_MR(port) __raw_readl((port)->membase + ATMEL_US_MR)
-#define UART_PUT_MR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_MR)
-#define UART_PUT_IER(port,v) __raw_writel(v, (port)->membase + ATMEL_US_IER)
-#define UART_PUT_IDR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_IDR)
-#define UART_GET_IMR(port) __raw_readl((port)->membase + ATMEL_US_IMR)
-#define UART_GET_CSR(port) __raw_readl((port)->membase + ATMEL_US_CSR)
-#define UART_GET_CHAR(port) __raw_readl((port)->membase + ATMEL_US_RHR)
-#define UART_PUT_CHAR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_THR)
-#define UART_GET_BRGR(port) __raw_readl((port)->membase + ATMEL_US_BRGR)
-#define UART_PUT_BRGR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_BRGR)
-#define UART_PUT_RTOR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_RTOR)
-#define UART_PUT_TTGR(port, v) __raw_writel(v, (port)->membase + ATMEL_US_TTGR)
-#define UART_GET_IP_NAME(port) __raw_readl((port)->membase + ATMEL_US_NAME)
-#define UART_GET_IP_VERSION(port) __raw_readl((port)->membase + ATMEL_US_VERSION)
-
- /* PDC registers */
-#define UART_PUT_PTCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_PTCR)
-#define UART_GET_PTSR(port) __raw_readl((port)->membase + ATMEL_PDC_PTSR)
-
-#define UART_PUT_RPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RPR)
-#define UART_GET_RPR(port) __raw_readl((port)->membase + ATMEL_PDC_RPR)
-#define UART_PUT_RCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RCR)
-#define UART_PUT_RNPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RNPR)
-#define UART_PUT_RNCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RNCR)
-
-#define UART_PUT_TPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TPR)
-#define UART_PUT_TCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TCR)
-#define UART_GET_TCR(port) __raw_readl((port)->membase + ATMEL_PDC_TCR)
-
struct atmel_dma_buffer {
unsigned char *buf;
dma_addr_t dma_addr;
@@ -166,12 +144,16 @@ struct atmel_uart_port {
unsigned int irq_status;
unsigned int irq_status_prev;
unsigned int status_change;
+ unsigned int tx_len;
struct circ_buf rx_ring;
struct mctrl_gpios *gpios;
int gpio_irq[UART_GPIO_MAX];
unsigned int tx_done_mask;
+ u32 fifo_size;
+ u32 rts_high;
+ u32 rts_low;
bool ms_irq_enabled;
bool is_usart; /* usart or uart */
struct timer_list uart_timer; /* uart timer */
@@ -212,6 +194,43 @@ to_atmel_uart_port(struct uart_port *uart)
return container_of(uart, struct atmel_uart_port, uart);
}
+static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg)
+{
+ return __raw_readl(port->membase + reg);
+}
+
+static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value)
+{
+ __raw_writel(value, port->membase + reg);
+}
+
+#ifdef CONFIG_AVR32
+
+/* AVR32 cannot handle 8 or 16bit I/O accesses but only 32bit I/O accesses */
+static inline u8 atmel_uart_read_char(struct uart_port *port)
+{
+ return __raw_readl(port->membase + ATMEL_US_RHR);
+}
+
+static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
+{
+ __raw_writel(value, port->membase + ATMEL_US_THR);
+}
+
+#else
+
+static inline u8 atmel_uart_read_char(struct uart_port *port)
+{
+ return __raw_readb(port->membase + ATMEL_US_RHR);
+}
+
+static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
+{
+ __raw_writeb(value, port->membase + ATMEL_US_THR);
+}
+
+#endif
+
#ifdef CONFIG_SERIAL_ATMEL_PDC
static bool atmel_use_pdc_rx(struct uart_port *port)
{
@@ -257,7 +276,7 @@ static unsigned int atmel_get_lines_status(struct uart_port *port)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int status, ret = 0;
- status = UART_GET_CSR(port);
+ status = atmel_uart_readl(port, ATMEL_US_CSR);
mctrl_gpio_get(atmel_port->gpios, &ret);
@@ -304,9 +323,9 @@ static int atmel_config_rs485(struct uart_port *port,
unsigned int mode;
/* Disable interrupts */
- UART_PUT_IDR(port, atmel_port->tx_done_mask);
+ atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
- mode = UART_GET_MR(port);
+ mode = atmel_uart_readl(port, ATMEL_US_MR);
/* Resetting serial mode to RS232 (0x0) */
mode &= ~ATMEL_US_USMODE;
@@ -316,7 +335,8 @@ static int atmel_config_rs485(struct uart_port *port,
if (rs485conf->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
- UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
+ atmel_uart_writel(port, ATMEL_US_TTGR,
+ rs485conf->delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
@@ -326,10 +346,10 @@ static int atmel_config_rs485(struct uart_port *port,
else
atmel_port->tx_done_mask = ATMEL_US_TXRDY;
}
- UART_PUT_MR(port, mode);
+ atmel_uart_writel(port, ATMEL_US_MR, mode);
/* Enable interrupts */
- UART_PUT_IER(port, atmel_port->tx_done_mask);
+ atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
return 0;
}
@@ -339,7 +359,9 @@ static int atmel_config_rs485(struct uart_port *port,
*/
static u_int atmel_tx_empty(struct uart_port *port)
{
- return (UART_GET_CSR(port) & ATMEL_US_TXEMPTY) ? TIOCSER_TEMT : 0;
+ return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ?
+ TIOCSER_TEMT :
+ 0;
}
/*
@@ -348,13 +370,14 @@ static u_int atmel_tx_empty(struct uart_port *port)
static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
{
unsigned int control = 0;
- unsigned int mode = UART_GET_MR(port);
+ unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR);
unsigned int rts_paused, rts_ready;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
/* override mode to RS485 if needed, otherwise keep the current mode */
if (port->rs485.flags & SER_RS485_ENABLED) {
- UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
+ atmel_uart_writel(port, ATMEL_US_TTGR,
+ port->rs485.delay_rts_after_send);
mode &= ~ATMEL_US_USMODE;
mode |= ATMEL_US_USMODE_RS485;
}
@@ -384,7 +407,7 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
else
control |= ATMEL_US_DTRDIS;
- UART_PUT_CR(port, control);
+ atmel_uart_writel(port, ATMEL_US_CR, control);
mctrl_gpio_set(atmel_port->gpios, mctrl);
@@ -395,7 +418,7 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
else
mode |= ATMEL_US_CHMODE_NORMAL;
- UART_PUT_MR(port, mode);
+ atmel_uart_writel(port, ATMEL_US_MR, mode);
}
/*
@@ -406,7 +429,7 @@ static u_int atmel_get_mctrl(struct uart_port *port)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int ret = 0, status;
- status = UART_GET_CSR(port);
+ status = atmel_uart_readl(port, ATMEL_US_CSR);
/*
* The control signals are active low.
@@ -432,10 +455,10 @@ static void atmel_stop_tx(struct uart_port *port)
if (atmel_use_pdc_tx(port)) {
/* disable PDC transmit */
- UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
+ atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
}
/* Disable interrupts */
- UART_PUT_IDR(port, atmel_port->tx_done_mask);
+ atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
if ((port->rs485.flags & SER_RS485_ENABLED) &&
!(port->rs485.flags & SER_RS485_RX_DURING_TX))
@@ -450,7 +473,7 @@ static void atmel_start_tx(struct uart_port *port)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_pdc_tx(port)) {
- if (UART_GET_PTSR(port) & ATMEL_PDC_TXTEN)
+ if (atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN)
/* The transmitter is already running. Yes, we
really need this.*/
return;
@@ -460,10 +483,10 @@ static void atmel_start_tx(struct uart_port *port)
atmel_stop_rx(port);
/* re-enable PDC transmit */
- UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
+ atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
}
/* Enable interrupts */
- UART_PUT_IER(port, atmel_port->tx_done_mask);
+ atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
}
/*
@@ -471,17 +494,19 @@ static void atmel_start_tx(struct uart_port *port)
*/
static void atmel_start_rx(struct uart_port *port)
{
- UART_PUT_CR(port, ATMEL_US_RSTSTA); /* reset status and receiver */
+ /* reset status and receiver */
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
- UART_PUT_CR(port, ATMEL_US_RXEN);
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN);
if (atmel_use_pdc_rx(port)) {
/* enable PDC controller */
- UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
- port->read_status_mask);
- UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
+ atmel_uart_writel(port, ATMEL_US_IER,
+ ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
+ port->read_status_mask);
+ atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
} else {
- UART_PUT_IER(port, ATMEL_US_RXRDY);
+ atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
}
}
@@ -490,15 +515,16 @@ static void atmel_start_rx(struct uart_port *port)
*/
static void atmel_stop_rx(struct uart_port *port)
{
- UART_PUT_CR(port, ATMEL_US_RXDIS);
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS);
if (atmel_use_pdc_rx(port)) {
/* disable PDC receive */
- UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS);
- UART_PUT_IDR(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
- port->read_status_mask);
+ atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS);
+ atmel_uart_writel(port, ATMEL_US_IDR,
+ ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
+ port->read_status_mask);
} else {
- UART_PUT_IDR(port, ATMEL_US_RXRDY);
+ atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY);
}
}
@@ -538,7 +564,7 @@ static void atmel_enable_ms(struct uart_port *port)
else
ier |= ATMEL_US_DCDIC;
- UART_PUT_IER(port, ier);
+ atmel_uart_writel(port, ATMEL_US_IER, ier);
}
/*
@@ -577,7 +603,7 @@ static void atmel_disable_ms(struct uart_port *port)
else
idr |= ATMEL_US_DCDIC;
- UART_PUT_IDR(port, idr);
+ atmel_uart_writel(port, ATMEL_US_IDR, idr);
}
/*
@@ -586,9 +612,11 @@ static void atmel_disable_ms(struct uart_port *port)
static void atmel_break_ctl(struct uart_port *port, int break_state)
{
if (break_state != 0)
- UART_PUT_CR(port, ATMEL_US_STTBRK); /* start break */
+ /* start break */
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK);
else
- UART_PUT_CR(port, ATMEL_US_STPBRK); /* stop break */
+ /* stop break */
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK);
}
/*
@@ -622,7 +650,7 @@ atmel_buffer_rx_char(struct uart_port *port, unsigned int status,
static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
{
/* clear error */
- UART_PUT_CR(port, ATMEL_US_RSTSTA);
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
if (status & ATMEL_US_RXBRK) {
/* ignore side-effect */
@@ -645,9 +673,9 @@ static void atmel_rx_chars(struct uart_port *port)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int status, ch;
- status = UART_GET_CSR(port);
+ status = atmel_uart_readl(port, ATMEL_US_CSR);
while (status & ATMEL_US_RXRDY) {
- ch = UART_GET_CHAR(port);
+ ch = atmel_uart_read_char(port);
/*
* note that the error handling code is
@@ -658,12 +686,13 @@ static void atmel_rx_chars(struct uart_port *port)
|| atmel_port->break_active)) {
/* clear error */
- UART_PUT_CR(port, ATMEL_US_RSTSTA);
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
if (status & ATMEL_US_RXBRK
&& !atmel_port->break_active) {
atmel_port->break_active = 1;
- UART_PUT_IER(port, ATMEL_US_RXBRK);
+ atmel_uart_writel(port, ATMEL_US_IER,
+ ATMEL_US_RXBRK);
} else {
/*
* This is either the end-of-break
@@ -672,14 +701,15 @@ static void atmel_rx_chars(struct uart_port *port)
* being set. In both cases, the next
* RXBRK will indicate start-of-break.
*/
- UART_PUT_IDR(port, ATMEL_US_RXBRK);
+ atmel_uart_writel(port, ATMEL_US_IDR,
+ ATMEL_US_RXBRK);
status &= ~ATMEL_US_RXBRK;
atmel_port->break_active = 0;
}
}
atmel_buffer_rx_char(port, status, ch);
- status = UART_GET_CSR(port);
+ status = atmel_uart_readl(port, ATMEL_US_CSR);
}
tasklet_schedule(&atmel_port->tasklet);
@@ -694,16 +724,18 @@ static void atmel_tx_chars(struct uart_port *port)
struct circ_buf *xmit = &port->state->xmit;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- if (port->x_char && UART_GET_CSR(port) & atmel_port->tx_done_mask) {
- UART_PUT_CHAR(port, port->x_char);
+ if (port->x_char &&
+ (atmel_uart_readl(port, ATMEL_US_CSR) & atmel_port->tx_done_mask)) {
+ atmel_uart_write_char(port, port->x_char);
port->icount.tx++;
port->x_char = 0;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port))
return;
- while (UART_GET_CSR(port) & atmel_port->tx_done_mask) {
- UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
+ while (atmel_uart_readl(port, ATMEL_US_CSR) &
+ atmel_port->tx_done_mask) {
+ atmel_uart_write_char(port, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
if (uart_circ_empty(xmit))
@@ -715,7 +747,8 @@ static void atmel_tx_chars(struct uart_port *port)
if (!uart_circ_empty(xmit))
/* Enable interrupts */
- UART_PUT_IER(port, atmel_port->tx_done_mask);
+ atmel_uart_writel(port, ATMEL_US_IER,
+ atmel_port->tx_done_mask);
}
static void atmel_complete_tx_dma(void *arg)
@@ -730,10 +763,10 @@ static void atmel_complete_tx_dma(void *arg)
if (chan)
dmaengine_terminate_all(chan);
- xmit->tail += sg_dma_len(&atmel_port->sg_tx);
+ xmit->tail += atmel_port->tx_len;
xmit->tail &= UART_XMIT_SIZE - 1;
- port->icount.tx += sg_dma_len(&atmel_port->sg_tx);
+ port->icount.tx += atmel_port->tx_len;
spin_lock_irq(&atmel_port->lock_tx);
async_tx_ack(atmel_port->desc_tx);
@@ -781,7 +814,9 @@ static void atmel_tx_dma(struct uart_port *port)
struct circ_buf *xmit = &port->state->xmit;
struct dma_chan *chan = atmel_port->chan_tx;
struct dma_async_tx_descriptor *desc;
- struct scatterlist *sg = &atmel_port->sg_tx;
+ struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx;
+ unsigned int tx_len, part1_len, part2_len, sg_len;
+ dma_addr_t phys_addr;
/* Make sure we have an idle channel */
if (atmel_port->desc_tx != NULL)
@@ -797,18 +832,46 @@ static void atmel_tx_dma(struct uart_port *port)
* Take the port lock to get a
* consistent xmit buffer state.
*/
- sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
- sg_dma_address(sg) = (sg_dma_address(sg) &
- ~(UART_XMIT_SIZE - 1))
- + sg->offset;
- sg_dma_len(sg) = CIRC_CNT_TO_END(xmit->head,
- xmit->tail,
- UART_XMIT_SIZE);
- BUG_ON(!sg_dma_len(sg));
+ tx_len = CIRC_CNT_TO_END(xmit->head,
+ xmit->tail,
+ UART_XMIT_SIZE);
+
+ if (atmel_port->fifo_size) {
+ /* multi data mode */
+ part1_len = (tx_len & ~0x3); /* DWORD access */
+ part2_len = (tx_len & 0x3); /* BYTE access */
+ } else {
+ /* single data (legacy) mode */
+ part1_len = 0;
+ part2_len = tx_len; /* BYTE access only */
+ }
+
+ sg_init_table(sgl, 2);
+ sg_len = 0;
+ phys_addr = sg_dma_address(sg_tx) + xmit->tail;
+ if (part1_len) {
+ sg = &sgl[sg_len++];
+ sg_dma_address(sg) = phys_addr;
+ sg_dma_len(sg) = part1_len;
+
+ phys_addr += part1_len;
+ }
+
+ if (part2_len) {
+ sg = &sgl[sg_len++];
+ sg_dma_address(sg) = phys_addr;
+ sg_dma_len(sg) = part2_len;
+ }
+
+ /*
+ * save tx_len so atmel_complete_tx_dma() will increase
+ * xmit->tail correctly
+ */
+ atmel_port->tx_len = tx_len;
desc = dmaengine_prep_slave_sg(chan,
- sg,
- 1,
+ sgl,
+ sg_len,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT |
DMA_CTRL_ACK);
@@ -817,7 +880,7 @@ static void atmel_tx_dma(struct uart_port *port)
return;
}
- dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE);
+ dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE);
atmel_port->desc_tx = desc;
desc->callback = atmel_complete_tx_dma;
@@ -877,7 +940,9 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
/* Configure the slave DMA */
memset(&config, 0, sizeof(config));
config.direction = DMA_MEM_TO_DEV;
- config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ config.dst_addr_width = (atmel_port->fifo_size) ?
+ DMA_SLAVE_BUSWIDTH_4_BYTES :
+ DMA_SLAVE_BUSWIDTH_1_BYTE;
config.dst_addr = port->mapbase + ATMEL_US_THR;
config.dst_maxburst = 1;
@@ -935,14 +1000,14 @@ static void atmel_rx_from_dma(struct uart_port *port)
/* Reset the UART timeout early so that we don't miss one */
- UART_PUT_CR(port, ATMEL_US_STTTO);
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
dmastat = dmaengine_tx_status(chan,
atmel_port->cookie_rx,
&state);
/* Restart a new tasklet if DMA status is error */
if (dmastat == DMA_ERROR) {
dev_dbg(port->dev, "Get residue error, restart tasklet\n");
- UART_PUT_IER(port, ATMEL_US_TIMEOUT);
+ atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
tasklet_schedule(&atmel_port->tasklet);
return;
}
@@ -1008,7 +1073,7 @@ static void atmel_rx_from_dma(struct uart_port *port)
tty_flip_buffer_push(tport);
spin_lock(&port->lock);
- UART_PUT_IER(port, ATMEL_US_TIMEOUT);
+ atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
}
static int atmel_prepare_rx_dma(struct uart_port *port)
@@ -1118,8 +1183,8 @@ atmel_handle_receive(struct uart_port *port, unsigned int pending)
* the moment.
*/
if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
- UART_PUT_IDR(port, (ATMEL_US_ENDRX
- | ATMEL_US_TIMEOUT));
+ atmel_uart_writel(port, ATMEL_US_IDR,
+ (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
tasklet_schedule(&atmel_port->tasklet);
}
@@ -1130,7 +1195,8 @@ atmel_handle_receive(struct uart_port *port, unsigned int pending)
if (atmel_use_dma_rx(port)) {
if (pending & ATMEL_US_TIMEOUT) {
- UART_PUT_IDR(port, ATMEL_US_TIMEOUT);
+ atmel_uart_writel(port, ATMEL_US_IDR,
+ ATMEL_US_TIMEOUT);
tasklet_schedule(&atmel_port->tasklet);
}
}
@@ -1143,8 +1209,8 @@ atmel_handle_receive(struct uart_port *port, unsigned int pending)
* End of break detected. If it came along with a
* character, atmel_rx_chars will handle it.
*/
- UART_PUT_CR(port, ATMEL_US_RSTSTA);
- UART_PUT_IDR(port, ATMEL_US_RXBRK);
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
+ atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK);
atmel_port->break_active = 0;
}
}
@@ -1159,7 +1225,8 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
if (pending & atmel_port->tx_done_mask) {
/* Either PDC or interrupt transmission */
- UART_PUT_IDR(port, atmel_port->tx_done_mask);
+ atmel_uart_writel(port, ATMEL_US_IDR,
+ atmel_port->tx_done_mask);
tasklet_schedule(&atmel_port->tasklet);
}
}
@@ -1197,7 +1264,7 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
do {
status = atmel_get_lines_status(port);
- mask = UART_GET_IMR(port);
+ mask = atmel_uart_readl(port, ATMEL_US_IMR);
pending = status & mask;
if (!gpio_handled) {
/*
@@ -1223,7 +1290,7 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
if (atmel_port->suspended) {
atmel_port->pending |= pending;
atmel_port->pending_status = status;
- UART_PUT_IDR(port, mask);
+ atmel_uart_writel(port, ATMEL_US_IDR, mask);
pm_system_wakeup();
break;
}
@@ -1260,7 +1327,7 @@ static void atmel_tx_pdc(struct uart_port *port)
int count;
/* nothing left to transmit? */
- if (UART_GET_TCR(port))
+ if (atmel_uart_readl(port, ATMEL_PDC_TCR))
return;
xmit->tail += pdc->ofs;
@@ -1272,7 +1339,7 @@ static void atmel_tx_pdc(struct uart_port *port)
/* more to transmit - setup next transfer */
/* disable PDC transmit */
- UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
+ atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
dma_sync_single_for_device(port->dev,
@@ -1283,12 +1350,14 @@ static void atmel_tx_pdc(struct uart_port *port)
count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
pdc->ofs = count;
- UART_PUT_TPR(port, pdc->dma_addr + xmit->tail);
- UART_PUT_TCR(port, count);
+ atmel_uart_writel(port, ATMEL_PDC_TPR,
+ pdc->dma_addr + xmit->tail);
+ atmel_uart_writel(port, ATMEL_PDC_TCR, count);
/* re-enable PDC transmit */
- UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
+ atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
/* Enable interrupts */
- UART_PUT_IER(port, atmel_port->tx_done_mask);
+ atmel_uart_writel(port, ATMEL_US_IER,
+ atmel_port->tx_done_mask);
} else {
if ((port->rs485.flags & SER_RS485_ENABLED) &&
!(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
@@ -1414,10 +1483,10 @@ static void atmel_rx_from_pdc(struct uart_port *port)
do {
/* Reset the UART timeout early so that we don't miss one */
- UART_PUT_CR(port, ATMEL_US_STTTO);
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
pdc = &atmel_port->pdc_rx[rx_idx];
- head = UART_GET_RPR(port) - pdc->dma_addr;
+ head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr;
tail = pdc->ofs;
/* If the PDC has switched buffers, RPR won't contain
@@ -1460,8 +1529,8 @@ static void atmel_rx_from_pdc(struct uart_port *port)
*/
if (head >= pdc->dma_size) {
pdc->ofs = 0;
- UART_PUT_RNPR(port, pdc->dma_addr);
- UART_PUT_RNCR(port, pdc->dma_size);
+ atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr);
+ atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size);
rx_idx = !rx_idx;
atmel_port->pdc_rx_idx = rx_idx;
@@ -1476,7 +1545,8 @@ static void atmel_rx_from_pdc(struct uart_port *port)
tty_flip_buffer_push(tport);
spin_lock(&port->lock);
- UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
+ atmel_uart_writel(port, ATMEL_US_IER,
+ ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
}
static int atmel_prepare_rx_pdc(struct uart_port *port)
@@ -1509,11 +1579,12 @@ static int atmel_prepare_rx_pdc(struct uart_port *port)
atmel_port->pdc_rx_idx = 0;
- UART_PUT_RPR(port, atmel_port->pdc_rx[0].dma_addr);
- UART_PUT_RCR(port, PDC_BUFFER_SIZE);
+ atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr);
+ atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE);
- UART_PUT_RNPR(port, atmel_port->pdc_rx[1].dma_addr);
- UART_PUT_RNCR(port, PDC_BUFFER_SIZE);
+ atmel_uart_writel(port, ATMEL_PDC_RNPR,
+ atmel_port->pdc_rx[1].dma_addr);
+ atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE);
return 0;
}
@@ -1667,7 +1738,7 @@ static void atmel_set_ops(struct uart_port *port)
static void atmel_get_ip_name(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- int name = UART_GET_IP_NAME(port);
+ int name = atmel_uart_readl(port, ATMEL_US_NAME);
u32 version;
int usart, uart;
/* usart and uart ascii */
@@ -1684,7 +1755,7 @@ static void atmel_get_ip_name(struct uart_port *port)
atmel_port->is_usart = false;
} else {
/* fallback for older SoCs: use version field */
- version = UART_GET_IP_VERSION(port);
+ version = atmel_uart_readl(port, ATMEL_US_VERSION);
switch (version) {
case 0x302:
case 0x10213:
@@ -1756,7 +1827,7 @@ static int atmel_startup(struct uart_port *port)
* request_irq() is called we could get stuck trying to
* handle an unexpected interrupt
*/
- UART_PUT_IDR(port, -1);
+ atmel_uart_writel(port, ATMEL_US_IDR, -1);
atmel_port->ms_irq_enabled = false;
/*
@@ -1797,6 +1868,32 @@ static int atmel_startup(struct uart_port *port)
atmel_set_ops(port);
}
+ /*
+ * Enable FIFO when available
+ */
+ if (atmel_port->fifo_size) {
+ unsigned int txrdym = ATMEL_US_ONE_DATA;
+ unsigned int rxrdym = ATMEL_US_ONE_DATA;
+ unsigned int fmr;
+
+ atmel_uart_writel(port, ATMEL_US_CR,
+ ATMEL_US_FIFOEN |
+ ATMEL_US_RXFCLR |
+ ATMEL_US_TXFLCLR);
+
+ if (atmel_use_dma_tx(port))
+ txrdym = ATMEL_US_FOUR_DATA;
+
+ fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym);
+ if (atmel_port->rts_high &&
+ atmel_port->rts_low)
+ fmr |= ATMEL_US_FRTSC |
+ ATMEL_US_RXFTHRES(atmel_port->rts_high) |
+ ATMEL_US_RXFTHRES2(atmel_port->rts_low);
+
+ atmel_uart_writel(port, ATMEL_US_FMR, fmr);
+ }
+
/* Save current CSR for comparison in atmel_tasklet_func() */
atmel_port->irq_status_prev = atmel_get_lines_status(port);
atmel_port->irq_status = atmel_port->irq_status_prev;
@@ -1804,9 +1901,9 @@ static int atmel_startup(struct uart_port *port)
/*
* Finally, enable the serial port
*/
- UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
/* enable xmit & rcvr */
- UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
setup_timer(&atmel_port->uart_timer,
atmel_uart_timer_callback,
@@ -1819,13 +1916,14 @@ static int atmel_startup(struct uart_port *port)
jiffies + uart_poll_timeout(port));
/* set USART timeout */
} else {
- UART_PUT_RTOR(port, PDC_RX_TIMEOUT);
- UART_PUT_CR(port, ATMEL_US_STTTO);
+ atmel_uart_writel(port, ATMEL_US_RTOR, PDC_RX_TIMEOUT);
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
- UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
+ atmel_uart_writel(port, ATMEL_US_IER,
+ ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
}
/* enable PDC controller */
- UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
+ atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
} else if (atmel_use_dma_rx(port)) {
/* set UART timeout */
if (!atmel_port->is_usart) {
@@ -1833,14 +1931,15 @@ static int atmel_startup(struct uart_port *port)
jiffies + uart_poll_timeout(port));
/* set USART timeout */
} else {
- UART_PUT_RTOR(port, PDC_RX_TIMEOUT);
- UART_PUT_CR(port, ATMEL_US_STTTO);
+ atmel_uart_writel(port, ATMEL_US_RTOR, PDC_RX_TIMEOUT);
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
- UART_PUT_IER(port, ATMEL_US_TIMEOUT);
+ atmel_uart_writel(port, ATMEL_US_IER,
+ ATMEL_US_TIMEOUT);
}
} else {
/* enable receive only */
- UART_PUT_IER(port, ATMEL_US_RXRDY);
+ atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
}
return 0;
@@ -1860,7 +1959,7 @@ static void atmel_flush_buffer(struct uart_port *port)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_pdc_tx(port)) {
- UART_PUT_TCR(port, 0);
+ atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
atmel_port->pdc_tx.ofs = 0;
}
}
@@ -1892,8 +1991,8 @@ static void atmel_shutdown(struct uart_port *port)
atmel_stop_rx(port);
atmel_stop_tx(port);
- UART_PUT_CR(port, ATMEL_US_RSTSTA);
- UART_PUT_IDR(port, -1);
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
+ atmel_uart_writel(port, ATMEL_US_IDR, -1);
/*
@@ -1938,12 +2037,12 @@ static void atmel_serial_pm(struct uart_port *port, unsigned int state,
clk_prepare_enable(atmel_port->clk);
/* re-enable interrupts if we disabled some on suspend */
- UART_PUT_IER(port, atmel_port->backup_imr);
+ atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr);
break;
case 3:
/* Back up the interrupt mask and disable all interrupts */
- atmel_port->backup_imr = UART_GET_IMR(port);
- UART_PUT_IDR(port, -1);
+ atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR);
+ atmel_uart_writel(port, ATMEL_US_IDR, -1);
/*
* Disable the peripheral clock for this serial port.
@@ -1966,7 +2065,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
unsigned int old_mode, mode, imr, quot, baud;
/* save the current mode register */
- mode = old_mode = UART_GET_MR(port);
+ mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
/* reset the mode, clock divisor, parity, stop bits and data size */
mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP |
@@ -2025,7 +2124,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
if (atmel_use_pdc_rx(port))
/* need to enable error interrupts */
- UART_PUT_IER(port, port->read_status_mask);
+ atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask);
/*
* Characters to ignore
@@ -2052,15 +2151,16 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
* transmitter is empty if requested by the caller, so there's
* no need to wait for it here.
*/
- imr = UART_GET_IMR(port);
- UART_PUT_IDR(port, -1);
+ imr = atmel_uart_readl(port, ATMEL_US_IMR);
+ atmel_uart_writel(port, ATMEL_US_IDR, -1);
/* disable receiver and transmitter */
- UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
/* mode */
if (port->rs485.flags & SER_RS485_ENABLED) {
- UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
+ atmel_uart_writel(port, ATMEL_US_TTGR,
+ port->rs485.delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
} else if (termios->c_cflag & CRTSCTS) {
/* RS232 with hardware handshake (RTS/CTS) */
@@ -2071,7 +2171,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
}
/* set the mode, clock divisor, parity, stop bits and data size */
- UART_PUT_MR(port, mode);
+ atmel_uart_writel(port, ATMEL_US_MR, mode);
/*
* when switching the mode, set the RTS line state according to the
@@ -2088,16 +2188,16 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
rts_state = ATMEL_US_RTSEN;
}
- UART_PUT_CR(port, rts_state);
+ atmel_uart_writel(port, ATMEL_US_CR, rts_state);
}
/* set the baud rate */
- UART_PUT_BRGR(port, quot);
- UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
- UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
+ atmel_uart_writel(port, ATMEL_US_BRGR, quot);
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
/* restore interrupts */
- UART_PUT_IER(port, imr);
+ atmel_uart_writel(port, ATMEL_US_IER, imr);
/* CTS flow-control and modem-status interrupts */
if (UART_ENABLE_MS(port, termios->c_cflag))
@@ -2208,18 +2308,18 @@ static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
#ifdef CONFIG_CONSOLE_POLL
static int atmel_poll_get_char(struct uart_port *port)
{
- while (!(UART_GET_CSR(port) & ATMEL_US_RXRDY))
+ while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY))
cpu_relax();
- return UART_GET_CHAR(port);
+ return atmel_uart_read_char(port);
}
static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
{
- while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY))
+ while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
cpu_relax();
- UART_PUT_CHAR(port, ch);
+ atmel_uart_write_char(port, ch);
}
#endif
@@ -2324,9 +2424,9 @@ struct platform_device *atmel_default_console_device; /* the serial console devi
#ifdef CONFIG_SERIAL_ATMEL_CONSOLE
static void atmel_console_putchar(struct uart_port *port, int ch)
{
- while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY))
+ while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
cpu_relax();
- UART_PUT_CHAR(port, ch);
+ atmel_uart_write_char(port, ch);
}
/*
@@ -2342,12 +2442,13 @@ static void atmel_console_write(struct console *co, const char *s, u_int count)
/*
* First, save IMR and then disable interrupts
*/
- imr = UART_GET_IMR(port);
- UART_PUT_IDR(port, ATMEL_US_RXRDY | atmel_port->tx_done_mask);
+ imr = atmel_uart_readl(port, ATMEL_US_IMR);
+ atmel_uart_writel(port, ATMEL_US_IDR,
+ ATMEL_US_RXRDY | atmel_port->tx_done_mask);
/* Store PDC transmit status and disable it */
- pdc_tx = UART_GET_PTSR(port) & ATMEL_PDC_TXTEN;
- UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
+ pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
+ atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
uart_console_write(port, s, count, atmel_console_putchar);
@@ -2356,15 +2457,15 @@ static void atmel_console_write(struct console *co, const char *s, u_int count)
* and restore IMR
*/
do {
- status = UART_GET_CSR(port);
+ status = atmel_uart_readl(port, ATMEL_US_CSR);
} while (!(status & ATMEL_US_TXRDY));
/* Restore PDC transmit status */
if (pdc_tx)
- UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
+ atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
/* set interrupts back the way they were */
- UART_PUT_IER(port, imr);
+ atmel_uart_writel(port, ATMEL_US_IER, imr);
}
/*
@@ -2380,17 +2481,17 @@ static void __init atmel_console_get_options(struct uart_port *port, int *baud,
* If the baud rate generator isn't running, the port wasn't
* initialized by the boot loader.
*/
- quot = UART_GET_BRGR(port) & ATMEL_US_CD;
+ quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD;
if (!quot)
return;
- mr = UART_GET_MR(port) & ATMEL_US_CHRL;
+ mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL;
if (mr == ATMEL_US_CHRL_8)
*bits = 8;
else
*bits = 7;
- mr = UART_GET_MR(port) & ATMEL_US_PAR;
+ mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR;
if (mr == ATMEL_US_PAR_EVEN)
*parity = 'e';
else if (mr == ATMEL_US_PAR_ODD)
@@ -2423,9 +2524,9 @@ static int __init atmel_console_setup(struct console *co, char *options)
if (ret)
return ret;
- UART_PUT_IDR(port, -1);
- UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
- UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
+ atmel_uart_writel(port, ATMEL_US_IDR, -1);
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -2532,7 +2633,8 @@ static int atmel_serial_suspend(struct platform_device *pdev,
if (atmel_is_console_port(port) && console_suspend_enabled) {
/* Drain the TX shifter */
- while (!(UART_GET_CSR(port) & ATMEL_US_TXEMPTY))
+ while (!(atmel_uart_readl(port, ATMEL_US_CSR) &
+ ATMEL_US_TXEMPTY))
cpu_relax();
}
@@ -2599,6 +2701,48 @@ static int atmel_init_gpios(struct atmel_uart_port *p, struct device *dev)
return 0;
}
+static void atmel_serial_probe_fifos(struct atmel_uart_port *port,
+ struct platform_device *pdev)
+{
+ port->fifo_size = 0;
+ port->rts_low = 0;
+ port->rts_high = 0;
+
+ if (of_property_read_u32(pdev->dev.of_node,
+ "atmel,fifo-size",
+ &port->fifo_size))
+ return;
+
+ if (!port->fifo_size)
+ return;
+
+ if (port->fifo_size < ATMEL_MIN_FIFO_SIZE) {
+ port->fifo_size = 0;
+ dev_err(&pdev->dev, "Invalid FIFO size\n");
+ return;
+ }
+
+ /*
+ * 0 <= rts_low <= rts_high <= fifo_size
+ * Once their CTS line asserted by the remote peer, some x86 UARTs tend
+ * to flush their internal TX FIFO, commonly up to 16 data, before
+ * actually stopping to send new data. So we try to set the RTS High
+ * Threshold to a reasonably high value respecting this 16 data
+ * empirical rule when possible.
+ */
+ port->rts_high = max_t(int, port->fifo_size >> 1,
+ port->fifo_size - ATMEL_RTS_HIGH_OFFSET);
+ port->rts_low = max_t(int, port->fifo_size >> 2,
+ port->fifo_size - ATMEL_RTS_LOW_OFFSET);
+
+ dev_info(&pdev->dev, "Using FIFO (%u data)\n",
+ port->fifo_size);
+ dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n",
+ port->rts_high);
+ dev_dbg(&pdev->dev, "RTS Low Threshold : %2u data\n",
+ port->rts_low);
+}
+
static int atmel_serial_probe(struct platform_device *pdev)
{
struct atmel_uart_port *port;
@@ -2635,6 +2779,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
port = &atmel_ports[ret];
port->backup_imr = 0;
port->uart.line = ret;
+ atmel_serial_probe_fifos(port, pdev);
spin_lock_init(&port->lock_suspended);
@@ -2684,8 +2829,9 @@ static int atmel_serial_probe(struct platform_device *pdev)
clk_prepare_enable(port->clk);
if (rs485_enabled) {
- UART_PUT_MR(&port->uart, ATMEL_US_USMODE_NORMAL);
- UART_PUT_CR(&port->uart, ATMEL_US_RTSEN);
+ atmel_uart_writel(&port->uart, ATMEL_US_MR,
+ ATMEL_US_USMODE_NORMAL);
+ atmel_uart_writel(&port->uart, ATMEL_US_CR, ATMEL_US_RTSEN);
}
/*
diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c
index a57301a6fe42..6813e316e9ff 100644
--- a/drivers/tty/serial/etraxfs-uart.c
+++ b/drivers/tty/serial/etraxfs-uart.c
@@ -10,6 +10,8 @@
#include <linux/of_address.h>
#include <hwregs/ser_defs.h>
+#include "serial_mctrl_gpio.h"
+
#define DRV_NAME "etraxfs-uart"
#define UART_NR CONFIG_ETRAX_SERIAL_PORTS
@@ -28,10 +30,7 @@ struct uart_cris_port {
void __iomem *regi_ser;
- struct gpio_desc *dtr_pin;
- struct gpio_desc *dsr_pin;
- struct gpio_desc *ri_pin;
- struct gpio_desc *cd_pin;
+ struct mctrl_gpios *gpios;
int write_ongoing;
};
@@ -112,17 +111,10 @@ cris_console_setup(struct console *co, char *options)
return 0;
}
-static struct tty_driver *cris_console_device(struct console *co, int *index)
-{
- struct uart_driver *p = co->data;
- *index = co->index;
- return p->tty_driver;
-}
-
static struct console cris_console = {
.name = "ttyS",
.write = cris_console_write,
- .device = cris_console_device,
+ .device = uart_console_device,
.setup = cris_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
@@ -373,14 +365,6 @@ static void etraxfs_uart_stop_rx(struct uart_port *port)
REG_WR(ser, regi_ser, rw_rec_ctrl, rec_ctrl);
}
-static void etraxfs_uart_enable_ms(struct uart_port *port)
-{
-}
-
-static void check_modem_status(struct uart_cris_port *up)
-{
-}
-
static unsigned int etraxfs_uart_tx_empty(struct uart_port *port)
{
struct uart_cris_port *up = (struct uart_cris_port *)port;
@@ -404,21 +388,9 @@ static unsigned int etraxfs_uart_get_mctrl(struct uart_port *port)
ret = 0;
if (crisv32_serial_get_rts(up))
ret |= TIOCM_RTS;
- /* DTR is active low */
- if (up->dtr_pin && !gpiod_get_raw_value(up->dtr_pin))
- ret |= TIOCM_DTR;
- /* CD is active low */
- if (up->cd_pin && !gpiod_get_raw_value(up->cd_pin))
- ret |= TIOCM_CD;
- /* RI is active low */
- if (up->ri_pin && !gpiod_get_raw_value(up->ri_pin))
- ret |= TIOCM_RI;
- /* DSR is active low */
- if (up->dsr_pin && !gpiod_get_raw_value(up->dsr_pin))
- ret |= TIOCM_DSR;
if (crisv32_serial_get_cts(up))
ret |= TIOCM_CTS;
- return ret;
+ return mctrl_gpio_get(up->gpios, &ret);
}
static void etraxfs_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
@@ -426,15 +398,7 @@ static void etraxfs_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
struct uart_cris_port *up = (struct uart_cris_port *)port;
crisv32_serial_set_rts(up, mctrl & TIOCM_RTS ? 1 : 0, 0);
- /* DTR is active low */
- if (up->dtr_pin)
- gpiod_set_raw_value(up->dtr_pin, mctrl & TIOCM_DTR ? 0 : 1);
- /* RI is active low */
- if (up->ri_pin)
- gpiod_set_raw_value(up->ri_pin, mctrl & TIOCM_RNG ? 0 : 1);
- /* CD is active low */
- if (up->cd_pin)
- gpiod_set_raw_value(up->cd_pin, mctrl & TIOCM_CD ? 0 : 1);
+ mctrl_gpio_set(up->gpios, mctrl);
}
static void etraxfs_uart_break_ctl(struct uart_port *port, int break_state)
@@ -598,7 +562,6 @@ ser_interrupt(int irq, void *dev_id)
receive_chars_no_dma(up);
handled = 1;
}
- check_modem_status(up);
if (masked_intr.tr_rdy) {
transmit_chars_no_dma(up);
@@ -862,7 +825,6 @@ static const struct uart_ops etraxfs_uart_pops = {
.start_tx = etraxfs_uart_start_tx,
.send_xchar = etraxfs_uart_send_xchar,
.stop_rx = etraxfs_uart_stop_rx,
- .enable_ms = etraxfs_uart_enable_ms,
.break_ctl = etraxfs_uart_break_ctl,
.startup = etraxfs_uart_startup,
.shutdown = etraxfs_uart_shutdown,
@@ -930,11 +892,12 @@ static int etraxfs_uart_probe(struct platform_device *pdev)
up->irq = irq_of_parse_and_map(np, 0);
up->regi_ser = of_iomap(np, 0);
- up->dtr_pin = devm_gpiod_get_optional(&pdev->dev, "dtr");
- up->dsr_pin = devm_gpiod_get_optional(&pdev->dev, "dsr");
- up->ri_pin = devm_gpiod_get_optional(&pdev->dev, "ri");
- up->cd_pin = devm_gpiod_get_optional(&pdev->dev, "cd");
up->port.dev = &pdev->dev;
+
+ up->gpios = mctrl_gpio_init(&pdev->dev, 0);
+ if (IS_ERR(up->gpios))
+ return PTR_ERR(up->gpios);
+
cris_serial_port_init(&up->port, dev_id);
etraxfs_uart_ports[dev_id] = up;
@@ -950,7 +913,7 @@ static int etraxfs_uart_remove(struct platform_device *pdev)
port = platform_get_drvdata(pdev);
uart_remove_one_port(&etraxfs_uart_driver, port);
- etraxfs_uart_ports[pdev->id] = NULL;
+ etraxfs_uart_ports[port->line] = NULL;
return 0;
}
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 2c90dc31bfaa..fe3d41cc8416 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -216,6 +216,8 @@ struct imx_port {
unsigned int tx_bytes;
unsigned int dma_tx_nents;
wait_queue_head_t dma_wait;
+ unsigned int saved_reg[10];
+ bool context_saved;
};
struct imx_port_ucrs {
@@ -700,7 +702,8 @@ static irqreturn_t imx_rxint(int irq, void *dev_id)
if (sport->port.ignore_status_mask & URXD_DUMMY_READ)
goto out;
- tty_insert_flip_char(port, rx, flg);
+ if (tty_insert_flip_char(port, rx, flg) == 0)
+ sport->port.icount.buf_overrun++;
}
out:
@@ -766,7 +769,6 @@ static irqreturn_t imx_int(int irq, void *dev_id)
writel(USR1_AWAKE, sport->port.membase + USR1);
if (sts2 & USR2_ORE) {
- dev_err(sport->port.dev, "Rx FIFO overrun\n");
sport->port.icount.overrun++;
writel(USR2_ORE, sport->port.membase + USR2);
}
@@ -921,8 +923,13 @@ static void dma_rx_callback(void *data)
dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
if (count) {
- if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ))
- tty_insert_flip_string(port, sport->rx_buf, count);
+ if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) {
+ int bytes = tty_insert_flip_string(port, sport->rx_buf,
+ count);
+
+ if (bytes != count)
+ sport->port.icount.buf_overrun++;
+ }
tty_flip_buffer_push(port);
start_rx_dma(sport);
@@ -1121,11 +1128,6 @@ static int imx_startup(struct uart_port *port)
writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
- /* Can we enable the DMA support? */
- if (is_imx6q_uart(sport) && !uart_console(port) &&
- !sport->dma_is_inited)
- imx_uart_dma_init(sport);
-
spin_lock_irqsave(&sport->port.lock, flags);
/* Reset fifo's and state machines */
i = 100;
@@ -1143,9 +1145,6 @@ static int imx_startup(struct uart_port *port)
writel(USR1_RTSD, sport->port.membase + USR1);
writel(USR2_ORE, sport->port.membase + USR2);
- if (sport->dma_is_inited && !sport->dma_is_enabled)
- imx_enable_dma(sport);
-
temp = readl(sport->port.membase + UCR1);
temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
@@ -1316,6 +1315,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
} else {
ucr2 |= UCR2_CTSC;
}
+
+ /* Can we enable the DMA support? */
+ if (is_imx6q_uart(sport) && !uart_console(port)
+ && !sport->dma_is_inited)
+ imx_uart_dma_init(sport);
} else {
termios->c_cflag &= ~CRTSCTS;
}
@@ -1432,6 +1436,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
imx_enable_ms(&sport->port);
+ if (sport->dma_is_inited && !sport->dma_is_enabled)
+ imx_enable_dma(sport);
spin_unlock_irqrestore(&sport->port.lock, flags);
}
@@ -1625,12 +1631,12 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
int locked = 1;
int retval;
- retval = clk_enable(sport->clk_per);
+ retval = clk_prepare_enable(sport->clk_per);
if (retval)
return;
- retval = clk_enable(sport->clk_ipg);
+ retval = clk_prepare_enable(sport->clk_ipg);
if (retval) {
- clk_disable(sport->clk_per);
+ clk_disable_unprepare(sport->clk_per);
return;
}
@@ -1669,8 +1675,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
if (locked)
spin_unlock_irqrestore(&sport->port.lock, flags);
- clk_disable(sport->clk_ipg);
- clk_disable(sport->clk_per);
+ clk_disable_unprepare(sport->clk_ipg);
+ clk_disable_unprepare(sport->clk_per);
}
/*
@@ -1771,15 +1777,7 @@ imx_console_setup(struct console *co, char *options)
retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
- clk_disable(sport->clk_ipg);
- if (retval) {
- clk_unprepare(sport->clk_ipg);
- goto error_console;
- }
-
- retval = clk_prepare(sport->clk_per);
- if (retval)
- clk_disable_unprepare(sport->clk_ipg);
+ clk_disable_unprepare(sport->clk_ipg);
error_console:
return retval;
@@ -1811,36 +1809,6 @@ static struct uart_driver imx_reg = {
.cons = IMX_CONSOLE,
};
-static int serial_imx_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct imx_port *sport = platform_get_drvdata(dev);
- unsigned int val;
-
- /* enable wakeup from i.MX UART */
- val = readl(sport->port.membase + UCR3);
- val |= UCR3_AWAKEN;
- writel(val, sport->port.membase + UCR3);
-
- uart_suspend_port(&imx_reg, &sport->port);
-
- return 0;
-}
-
-static int serial_imx_resume(struct platform_device *dev)
-{
- struct imx_port *sport = platform_get_drvdata(dev);
- unsigned int val;
-
- /* disable wakeup from i.MX UART */
- val = readl(sport->port.membase + UCR3);
- val &= ~UCR3_AWAKEN;
- writel(val, sport->port.membase + UCR3);
-
- uart_resume_port(&imx_reg, &sport->port);
-
- return 0;
-}
-
#ifdef CONFIG_OF
/*
* This function returns 1 iff pdev isn't a device instatiated by dt, 0 iff it
@@ -1902,7 +1870,7 @@ static int serial_imx_probe(struct platform_device *pdev)
{
struct imx_port *sport;
void __iomem *base;
- int ret = 0;
+ int ret = 0, reg;
struct resource *res;
int txirq, rxirq, rtsirq;
@@ -1957,6 +1925,19 @@ static int serial_imx_probe(struct platform_device *pdev)
sport->port.uartclk = clk_get_rate(sport->clk_per);
+ /* For register access, we only need to enable the ipg clock. */
+ ret = clk_prepare_enable(sport->clk_ipg);
+ if (ret)
+ return ret;
+
+ /* Disable interrupts before requesting them */
+ reg = readl_relaxed(sport->port.membase + UCR1);
+ reg &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN |
+ UCR1_TXMPTYEN | UCR1_RTSDEN);
+ writel_relaxed(reg, sport->port.membase + UCR1);
+
+ clk_disable_unprepare(sport->clk_ipg);
+
/*
* Allocate the IRQ(s) i.MX1 has three interrupts whereas later
* chips only have one interrupt.
@@ -1992,16 +1973,135 @@ static int serial_imx_remove(struct platform_device *pdev)
return uart_remove_one_port(&imx_reg, &sport->port);
}
+static void serial_imx_restore_context(struct imx_port *sport)
+{
+ if (!sport->context_saved)
+ return;
+
+ writel(sport->saved_reg[4], sport->port.membase + UFCR);
+ writel(sport->saved_reg[5], sport->port.membase + UESC);
+ writel(sport->saved_reg[6], sport->port.membase + UTIM);
+ writel(sport->saved_reg[7], sport->port.membase + UBIR);
+ writel(sport->saved_reg[8], sport->port.membase + UBMR);
+ writel(sport->saved_reg[9], sport->port.membase + IMX21_UTS);
+ writel(sport->saved_reg[0], sport->port.membase + UCR1);
+ writel(sport->saved_reg[1] | UCR2_SRST, sport->port.membase + UCR2);
+ writel(sport->saved_reg[2], sport->port.membase + UCR3);
+ writel(sport->saved_reg[3], sport->port.membase + UCR4);
+ sport->context_saved = false;
+}
+
+static void serial_imx_save_context(struct imx_port *sport)
+{
+ /* Save necessary regs */
+ sport->saved_reg[0] = readl(sport->port.membase + UCR1);
+ sport->saved_reg[1] = readl(sport->port.membase + UCR2);
+ sport->saved_reg[2] = readl(sport->port.membase + UCR3);
+ sport->saved_reg[3] = readl(sport->port.membase + UCR4);
+ sport->saved_reg[4] = readl(sport->port.membase + UFCR);
+ sport->saved_reg[5] = readl(sport->port.membase + UESC);
+ sport->saved_reg[6] = readl(sport->port.membase + UTIM);
+ sport->saved_reg[7] = readl(sport->port.membase + UBIR);
+ sport->saved_reg[8] = readl(sport->port.membase + UBMR);
+ sport->saved_reg[9] = readl(sport->port.membase + IMX21_UTS);
+ sport->context_saved = true;
+}
+
+static void serial_imx_enable_wakeup(struct imx_port *sport, bool on)
+{
+ unsigned int val;
+
+ val = readl(sport->port.membase + UCR3);
+ if (on)
+ val |= UCR3_AWAKEN;
+ else
+ val &= ~UCR3_AWAKEN;
+ writel(val, sport->port.membase + UCR3);
+
+ val = readl(sport->port.membase + UCR1);
+ if (on)
+ val |= UCR1_RTSDEN;
+ else
+ val &= ~UCR1_RTSDEN;
+ writel(val, sport->port.membase + UCR1);
+}
+
+static int imx_serial_port_suspend_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct imx_port *sport = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = clk_enable(sport->clk_ipg);
+ if (ret)
+ return ret;
+
+ serial_imx_save_context(sport);
+
+ clk_disable(sport->clk_ipg);
+
+ return 0;
+}
+
+static int imx_serial_port_resume_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct imx_port *sport = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = clk_enable(sport->clk_ipg);
+ if (ret)
+ return ret;
+
+ serial_imx_restore_context(sport);
+
+ clk_disable(sport->clk_ipg);
+
+ return 0;
+}
+
+static int imx_serial_port_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct imx_port *sport = platform_get_drvdata(pdev);
+
+ /* enable wakeup from i.MX UART */
+ serial_imx_enable_wakeup(sport, true);
+
+ uart_suspend_port(&imx_reg, &sport->port);
+
+ return 0;
+}
+
+static int imx_serial_port_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct imx_port *sport = platform_get_drvdata(pdev);
+
+ /* disable wakeup from i.MX UART */
+ serial_imx_enable_wakeup(sport, false);
+
+ uart_resume_port(&imx_reg, &sport->port);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx_serial_port_pm_ops = {
+ .suspend_noirq = imx_serial_port_suspend_noirq,
+ .resume_noirq = imx_serial_port_resume_noirq,
+ .suspend = imx_serial_port_suspend,
+ .resume = imx_serial_port_resume,
+};
+
static struct platform_driver serial_imx_driver = {
.probe = serial_imx_probe,
.remove = serial_imx_remove,
- .suspend = serial_imx_suspend,
- .resume = serial_imx_resume,
.id_table = imx_uart_devtype,
.driver = {
.name = "imx-uart",
.of_match_table = imx_uart_dt_ids,
+ .pm = &imx_serial_port_pm_ops,
},
};
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 4ccc0397664c..b88832e8ee82 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -21,7 +21,6 @@
*/
#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
@@ -740,7 +739,6 @@ static const struct of_device_id ltq_asc_match[] = {
{ .compatible = DRVNAME },
{},
};
-MODULE_DEVICE_TABLE(of, ltq_asc_match);
static struct platform_driver lqasc_driver = {
.driver = {
@@ -764,8 +762,4 @@ init_lqasc(void)
return ret;
}
-
-module_init(init_lqasc);
-
-MODULE_DESCRIPTION("Lantiq serial port driver");
-MODULE_LICENSE("GPL");
+device_initcall(init_lqasc);
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
index 35c55505b3eb..b90e7b30468b 100644
--- a/drivers/tty/serial/men_z135_uart.c
+++ b/drivers/tty/serial/men_z135_uart.c
@@ -392,7 +392,6 @@ static irqreturn_t men_z135_intr(int irq, void *data)
struct men_z135_port *uart = (struct men_z135_port *)data;
struct uart_port *port = &uart->port;
bool handled = false;
- unsigned long flags;
int irq_id;
uart->stat_reg = ioread32(port->membase + MEN_Z135_STAT_REG);
@@ -401,7 +400,7 @@ static irqreturn_t men_z135_intr(int irq, void *data)
if (!irq_id)
goto out;
- spin_lock_irqsave(&port->lock, flags);
+ spin_lock(&port->lock);
/* It's save to write to IIR[7:6] RXC[9:8] */
iowrite8(irq_id, port->membase + MEN_Z135_STAT_REG);
@@ -427,7 +426,7 @@ static irqreturn_t men_z135_intr(int irq, void *data)
handled = true;
}
- spin_unlock_irqrestore(&port->lock, flags);
+ spin_unlock(&port->lock);
out:
return IRQ_RETVAL(handled);
}
@@ -717,7 +716,7 @@ static void men_z135_set_termios(struct uart_port *port,
baud = uart_get_baud_rate(port, termios, old, 0, uart_freq / 16);
- spin_lock(&port->lock);
+ spin_lock_irq(&port->lock);
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
@@ -725,7 +724,7 @@ static void men_z135_set_termios(struct uart_port *port,
iowrite32(bd_reg, port->membase + MEN_Z135_BAUD_REG);
uart_update_timeout(port, termios->c_cflag, baud);
- spin_unlock(&port->lock);
+ spin_unlock_irq(&port->lock);
}
static const char *men_z135_type(struct uart_port *port)
@@ -840,7 +839,6 @@ static int men_z135_probe(struct mcb_device *mdev,
uart->port.membase = NULL;
uart->mdev = mdev;
- spin_lock_init(&uart->port.lock);
spin_lock_init(&uart->lock);
err = uart_add_one_port(&men_z135_driver, &uart->port);
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 6fc07eb9d74e..41de374d9784 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -239,8 +239,9 @@ static int mpc52xx_psc_tx_rdy(struct uart_port *port)
static int mpc52xx_psc_tx_empty(struct uart_port *port)
{
- return in_be16(&PSC(port)->mpc52xx_psc_status)
- & MPC52xx_PSC_SR_TXEMP;
+ u16 sts = in_be16(&PSC(port)->mpc52xx_psc_status);
+
+ return (sts & MPC52xx_PSC_SR_TXEMP) ? TIOCSER_TEMT : 0;
}
static void mpc52xx_psc_start_tx(struct uart_port *port)
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 13cf7738fbdc..7c7f30809849 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -100,6 +100,8 @@
#define AUART_CTRL2_TXE (1 << 8)
#define AUART_CTRL2_UARTEN (1 << 0)
+#define AUART_LINECTRL_BAUD_DIV_MAX 0x003fffc0
+#define AUART_LINECTRL_BAUD_DIV_MIN 0x000000ec
#define AUART_LINECTRL_BAUD_DIVINT_SHIFT 16
#define AUART_LINECTRL_BAUD_DIVINT_MASK 0xffff0000
#define AUART_LINECTRL_BAUD_DIVINT(v) (((v) & 0xffff) << 16)
@@ -659,7 +661,7 @@ static void mxs_auart_settermios(struct uart_port *u,
{
struct mxs_auart_port *s = to_auart_port(u);
u32 bm, ctrl, ctrl2, div;
- unsigned int cflag, baud;
+ unsigned int cflag, baud, baud_min, baud_max;
cflag = termios->c_cflag;
@@ -752,7 +754,9 @@ static void mxs_auart_settermios(struct uart_port *u,
}
/* set baud rate */
- baud = uart_get_baud_rate(u, termios, old, 0, u->uartclk);
+ baud_min = DIV_ROUND_UP(u->uartclk * 32, AUART_LINECTRL_BAUD_DIV_MAX);
+ baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN;
+ baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max);
div = u->uartclk * 32 / baud;
ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F);
ctrl |= AUART_LINECTRL_BAUD_DIVINT(div >> 6);
@@ -842,7 +846,7 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
return IRQ_HANDLED;
}
-static void mxs_auart_reset(struct uart_port *u)
+static void mxs_auart_reset_deassert(struct uart_port *u)
{
int i;
unsigned int reg;
@@ -858,6 +862,30 @@ static void mxs_auart_reset(struct uart_port *u)
writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
}
+static void mxs_auart_reset_assert(struct uart_port *u)
+{
+ int i;
+ u32 reg;
+
+ reg = readl(u->membase + AUART_CTRL0);
+ /* if already in reset state, keep it untouched */
+ if (reg & AUART_CTRL0_SFTRST)
+ return;
+
+ writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
+ writel(AUART_CTRL0_SFTRST, u->membase + AUART_CTRL0_SET);
+
+ for (i = 0; i < 1000; i++) {
+ reg = readl(u->membase + AUART_CTRL0);
+ /* reset is finished when the clock is gated */
+ if (reg & AUART_CTRL0_CLKGATE)
+ return;
+ udelay(10);
+ }
+
+ dev_err(u->dev, "Failed to reset the unit.");
+}
+
static int mxs_auart_startup(struct uart_port *u)
{
int ret;
@@ -867,7 +895,13 @@ static int mxs_auart_startup(struct uart_port *u)
if (ret)
return ret;
- writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
+ if (uart_console(u)) {
+ writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
+ } else {
+ /* reset the unit to a well known state */
+ mxs_auart_reset_assert(u);
+ mxs_auart_reset_deassert(u);
+ }
writel(AUART_CTRL2_UARTEN, u->membase + AUART_CTRL2_SET);
@@ -899,12 +933,14 @@ static void mxs_auart_shutdown(struct uart_port *u)
if (auart_dma_enabled(s))
mxs_auart_dma_exit(s);
- writel(AUART_CTRL2_UARTEN, u->membase + AUART_CTRL2_CLR);
-
- writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN,
- u->membase + AUART_INTR_CLR);
-
- writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_SET);
+ if (uart_console(u)) {
+ writel(AUART_CTRL2_UARTEN, u->membase + AUART_CTRL2_CLR);
+ writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN,
+ u->membase + AUART_INTR_CLR);
+ writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_SET);
+ } else {
+ mxs_auart_reset_assert(u);
+ }
clk_disable_unprepare(s->clk);
}
@@ -1291,7 +1327,7 @@ static int mxs_auart_probe(struct platform_device *pdev)
auart_port[s->port.line] = s;
- mxs_auart_reset(&s->port);
+ mxs_auart_reset_deassert(&s->port);
ret = uart_add_one_port(&auart_driver, &s->port);
if (ret)
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 67d0c213b1c7..856686d6dcdb 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -53,7 +53,6 @@
#include "samsung.h"
#if defined(CONFIG_SERIAL_SAMSUNG_DEBUG) && \
- defined(CONFIG_DEBUG_LL) && \
!defined(MODULE)
extern void printascii(const char *);
@@ -295,15 +294,6 @@ static int s3c24xx_serial_start_tx_dma(struct s3c24xx_uart_port *ourport,
if (ourport->tx_mode != S3C24XX_TX_DMA)
enable_tx_dma(ourport);
- while (xmit->tail & (dma_get_cache_alignment() - 1)) {
- if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull)
- return 0;
- wr_regb(port, S3C2410_UTXH, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- port->icount.tx++;
- count--;
- }
-
dma->tx_size = count & ~(dma_get_cache_alignment() - 1);
dma->tx_transfer_addr = dma->tx_addr + xmit->tail;
@@ -342,7 +332,9 @@ static void s3c24xx_serial_start_next_tx(struct s3c24xx_uart_port *ourport)
return;
}
- if (!ourport->dma || !ourport->dma->tx_chan || count < port->fifosize)
+ if (!ourport->dma || !ourport->dma->tx_chan ||
+ count < ourport->min_dma_size ||
+ xmit->tail & (dma_get_cache_alignment() - 1))
s3c24xx_serial_start_tx_pio(ourport);
else
s3c24xx_serial_start_tx_dma(ourport, count);
@@ -736,15 +728,20 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
struct uart_port *port = &ourport->port;
struct circ_buf *xmit = &port->state->xmit;
unsigned long flags;
- int count;
+ int count, dma_count = 0;
spin_lock_irqsave(&port->lock, flags);
count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
- if (ourport->dma && ourport->dma->tx_chan && count >= port->fifosize) {
- s3c24xx_serial_start_tx_dma(ourport, count);
- goto out;
+ if (ourport->dma && ourport->dma->tx_chan &&
+ count >= ourport->min_dma_size) {
+ int align = dma_get_cache_alignment() -
+ (xmit->tail & (dma_get_cache_alignment() - 1));
+ if (count-align >= ourport->min_dma_size) {
+ dma_count = count-align;
+ count = align;
+ }
}
if (port->x_char) {
@@ -765,14 +762,24 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
/* try and drain the buffer... */
- count = port->fifosize;
- while (!uart_circ_empty(xmit) && count-- > 0) {
+ if (count > port->fifosize) {
+ count = port->fifosize;
+ dma_count = 0;
+ }
+
+ while (!uart_circ_empty(xmit) && count > 0) {
if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull)
break;
wr_regb(port, S3C2410_UTXH, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
+ count--;
+ }
+
+ if (!count && dma_count) {
+ s3c24xx_serial_start_tx_dma(ourport, dma_count);
+ goto out;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) {
@@ -1838,6 +1845,13 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
else if (ourport->info->fifosize)
ourport->port.fifosize = ourport->info->fifosize;
+ /*
+ * DMA transfers must be aligned at least to cache line size,
+ * so find minimal transfer size suitable for DMA mode
+ */
+ ourport->min_dma_size = max_t(int, ourport->port.fifosize,
+ dma_get_cache_alignment());
+
probe_index++;
dbg("%s: initialising port %p...\n", __func__, ourport);
diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
index d275032aa68d..fc5deaa4f382 100644
--- a/drivers/tty/serial/samsung.h
+++ b/drivers/tty/serial/samsung.h
@@ -82,6 +82,7 @@ struct s3c24xx_uart_port {
unsigned char tx_claimed;
unsigned int pm_level;
unsigned long baudclk_rate;
+ unsigned int min_dma_size;
unsigned int rx_irq;
unsigned int tx_irq;
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 9e6576004a42..72ffd0dcab78 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -11,6 +11,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -29,6 +31,7 @@
#include <linux/uaccess.h>
#define SC16IS7XX_NAME "sc16is7xx"
+#define SC16IS7XX_MAX_DEVS 8
/* SC16IS7XX register definitions */
#define SC16IS7XX_RHR_REG (0x00) /* RX FIFO */
@@ -312,14 +315,14 @@ struct sc16is7xx_one_config {
struct sc16is7xx_one {
struct uart_port port;
+ u8 line;
struct kthread_work tx_work;
struct kthread_work reg_work;
struct sc16is7xx_one_config config;
};
struct sc16is7xx_port {
- struct uart_driver uart;
- struct sc16is7xx_devtype *devtype;
+ const struct sc16is7xx_devtype *devtype;
struct regmap *regmap;
struct clk *clk;
#ifdef CONFIG_GPIOLIB
@@ -332,16 +335,31 @@ struct sc16is7xx_port {
struct sc16is7xx_one p[0];
};
+static unsigned long sc16is7xx_lines;
+
+static struct uart_driver sc16is7xx_uart = {
+ .owner = THIS_MODULE,
+ .dev_name = "ttySC",
+ .nr = SC16IS7XX_MAX_DEVS,
+};
+
#define to_sc16is7xx_port(p,e) ((container_of((p), struct sc16is7xx_port, e)))
#define to_sc16is7xx_one(p,e) ((container_of((p), struct sc16is7xx_one, e)))
+static int sc16is7xx_line(struct uart_port *port)
+{
+ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+ return one->line;
+}
+
static u8 sc16is7xx_port_read(struct uart_port *port, u8 reg)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
unsigned int val = 0;
+ const u8 line = sc16is7xx_line(port);
- regmap_read(s->regmap,
- (reg << SC16IS7XX_REG_SHIFT) | port->line, &val);
+ regmap_read(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line, &val);
return val;
}
@@ -349,21 +367,55 @@ static u8 sc16is7xx_port_read(struct uart_port *port, u8 reg)
static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+ const u8 line = sc16is7xx_line(port);
+
+ regmap_write(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line, val);
+}
+
+static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen)
+{
+ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+ const u8 line = sc16is7xx_line(port);
+ u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | line;
+
+ regcache_cache_bypass(s->regmap, true);
+ regmap_raw_read(s->regmap, addr, s->buf, rxlen);
+ regcache_cache_bypass(s->regmap, false);
+}
+
+static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
+{
+ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+ const u8 line = sc16is7xx_line(port);
+ u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | line;
- regmap_write(s->regmap,
- (reg << SC16IS7XX_REG_SHIFT) | port->line, val);
+ regcache_cache_bypass(s->regmap, true);
+ regmap_raw_write(s->regmap, addr, s->buf, to_send);
+ regcache_cache_bypass(s->regmap, false);
}
static void sc16is7xx_port_update(struct uart_port *port, u8 reg,
u8 mask, u8 val)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+ const u8 line = sc16is7xx_line(port);
- regmap_update_bits(s->regmap,
- (reg << SC16IS7XX_REG_SHIFT) | port->line,
+ regmap_update_bits(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line,
mask, val);
}
+static int sc16is7xx_alloc_line(void)
+{
+ int i;
+
+ BUILD_BUG_ON(SC16IS7XX_MAX_DEVS > BITS_PER_LONG);
+
+ for (i = 0; i < SC16IS7XX_MAX_DEVS; i++)
+ if (!test_and_set_bit(i, &sc16is7xx_lines))
+ break;
+
+ return i;
+}
static void sc16is7xx_power(struct uart_port *port, int on)
{
@@ -488,7 +540,7 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen,
if (unlikely(rxlen >= sizeof(s->buf))) {
dev_warn_ratelimited(port->dev,
- "Port %i: Possible RX FIFO overrun: %d\n",
+ "ttySC%i: Possible RX FIFO overrun: %d\n",
port->line, rxlen);
port->icount.buf_overrun++;
/* Ensure sanity of RX level */
@@ -508,10 +560,7 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen,
s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG);
bytes_read = 1;
} else {
- regcache_cache_bypass(s->regmap, true);
- regmap_raw_read(s->regmap, SC16IS7XX_RHR_REG,
- s->buf, rxlen);
- regcache_cache_bypass(s->regmap, false);
+ sc16is7xx_fifo_read(port, rxlen);
bytes_read = rxlen;
}
@@ -591,9 +640,8 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
s->buf[i] = xmit->buf[xmit->tail];
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
}
- regcache_cache_bypass(s->regmap, true);
- regmap_raw_write(s->regmap, SC16IS7XX_THR_REG, s->buf, to_send);
- regcache_cache_bypass(s->regmap, false);
+
+ sc16is7xx_fifo_write(port, to_send);
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -633,7 +681,7 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
break;
default:
dev_err_ratelimited(port->dev,
- "Port %i: Unexpected interrupt: %x",
+ "ttySC%i: Unexpected interrupt: %x",
port->line, iir);
break;
}
@@ -645,7 +693,7 @@ static void sc16is7xx_ist(struct kthread_work *ws)
struct sc16is7xx_port *s = to_sc16is7xx_port(ws, irq_work);
int i;
- for (i = 0; i < s->uart.nr; ++i)
+ for (i = 0; i < s->devtype->nr_uart; ++i)
sc16is7xx_port_irq(s, i);
}
@@ -1083,7 +1131,7 @@ static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
#endif
static int sc16is7xx_probe(struct device *dev,
- struct sc16is7xx_devtype *devtype,
+ const struct sc16is7xx_devtype *devtype,
struct regmap *regmap, int irq, unsigned long flags)
{
struct sched_param sched_param = { .sched_priority = MAX_RT_PRIO / 2 };
@@ -1118,23 +1166,13 @@ static int sc16is7xx_probe(struct device *dev,
s->devtype = devtype;
dev_set_drvdata(dev, s);
- /* Register UART driver */
- s->uart.owner = THIS_MODULE;
- s->uart.dev_name = "ttySC";
- s->uart.nr = devtype->nr_uart;
- ret = uart_register_driver(&s->uart);
- if (ret) {
- dev_err(dev, "Registering UART driver failed\n");
- goto out_clk;
- }
-
init_kthread_worker(&s->kworker);
init_kthread_work(&s->irq_work, sc16is7xx_ist);
s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker,
"sc16is7xx");
if (IS_ERR(s->kworker_task)) {
ret = PTR_ERR(s->kworker_task);
- goto out_uart;
+ goto out_clk;
}
sched_setscheduler(s->kworker_task, SCHED_FIFO, &sched_param);
@@ -1158,8 +1196,8 @@ static int sc16is7xx_probe(struct device *dev,
#endif
for (i = 0; i < devtype->nr_uart; ++i) {
+ s->p[i].line = i;
/* Initialize port data */
- s->p[i].port.line = i;
s->p[i].port.dev = dev;
s->p[i].port.irq = irq;
s->p[i].port.type = PORT_SC16IS7XX;
@@ -1169,6 +1207,12 @@ static int sc16is7xx_probe(struct device *dev,
s->p[i].port.uartclk = freq;
s->p[i].port.rs485_config = sc16is7xx_config_rs485;
s->p[i].port.ops = &sc16is7xx_ops;
+ s->p[i].port.line = sc16is7xx_alloc_line();
+ if (s->p[i].port.line >= SC16IS7XX_MAX_DEVS) {
+ ret = -ENOMEM;
+ goto out_ports;
+ }
+
/* Disable all interrupts */
sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_IER_REG, 0);
/* Disable TX/RX */
@@ -1179,7 +1223,7 @@ static int sc16is7xx_probe(struct device *dev,
init_kthread_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
init_kthread_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
/* Register port */
- uart_add_one_port(&s->uart, &s->p[i].port);
+ uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
/* Go to suspend mode */
sc16is7xx_power(&s->p[i].port, 0);
}
@@ -1190,8 +1234,11 @@ static int sc16is7xx_probe(struct device *dev,
if (!ret)
return 0;
- for (i = 0; i < s->uart.nr; i++)
- uart_remove_one_port(&s->uart, &s->p[i].port);
+out_ports:
+ for (i--; i >= 0; i--) {
+ uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
+ clear_bit(s->p[i].port.line, &sc16is7xx_lines);
+ }
#ifdef CONFIG_GPIOLIB
if (devtype->nr_gpio)
@@ -1201,9 +1248,6 @@ out_thread:
#endif
kthread_stop(s->kworker_task);
-out_uart:
- uart_unregister_driver(&s->uart);
-
out_clk:
if (!IS_ERR(s->clk))
clk_disable_unprepare(s->clk);
@@ -1221,15 +1265,15 @@ static int sc16is7xx_remove(struct device *dev)
gpiochip_remove(&s->gpio);
#endif
- for (i = 0; i < s->uart.nr; i++) {
- uart_remove_one_port(&s->uart, &s->p[i].port);
+ for (i = 0; i < s->devtype->nr_uart; i++) {
+ uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
+ clear_bit(s->p[i].port.line, &sc16is7xx_lines);
sc16is7xx_power(&s->p[i].port, 0);
}
flush_kthread_worker(&s->kworker);
kthread_stop(s->kworker_task);
- uart_unregister_driver(&s->uart);
if (!IS_ERR(s->clk))
clk_disable_unprepare(s->clk);
@@ -1259,7 +1303,7 @@ static struct regmap_config regcfg = {
#ifdef CONFIG_SERIAL_SC16IS7XX_SPI
static int sc16is7xx_spi_probe(struct spi_device *spi)
{
- struct sc16is7xx_devtype *devtype;
+ const struct sc16is7xx_devtype *devtype;
unsigned long flags = 0;
struct regmap *regmap;
int ret;
@@ -1328,7 +1372,7 @@ MODULE_ALIAS("spi:sc16is7xx");
static int sc16is7xx_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
- struct sc16is7xx_devtype *devtype;
+ const struct sc16is7xx_devtype *devtype;
unsigned long flags = 0;
struct regmap *regmap;
@@ -1369,7 +1413,6 @@ MODULE_DEVICE_TABLE(i2c, sc16is7xx_i2c_id_table);
static struct i2c_driver sc16is7xx_i2c_uart_driver = {
.driver = {
.name = SC16IS7XX_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(sc16is7xx_dt_ids),
},
.probe = sc16is7xx_i2c_probe,
@@ -1382,7 +1425,14 @@ MODULE_ALIAS("i2c:sc16is7xx");
static int __init sc16is7xx_init(void)
{
- int ret = 0;
+ int ret;
+
+ ret = uart_register_driver(&sc16is7xx_uart);
+ if (ret) {
+ pr_err("Registering UART driver failed\n");
+ return ret;
+ }
+
#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver);
if (ret < 0) {
@@ -1411,6 +1461,7 @@ static void __exit sc16is7xx_exit(void)
#ifdef CONFIG_SERIAL_SC16IS7XX_SPI
spi_unregister_driver(&sc16is7xx_spi_uart_driver);
#endif
+ uart_unregister_driver(&sc16is7xx_uart);
}
module_exit(sc16is7xx_exit);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 7ae1592f7ec9..603d2cc3f424 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1377,7 +1377,6 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
struct uart_state *state = tty->driver_data;
struct tty_port *port;
struct uart_port *uport;
- unsigned long flags;
if (!state) {
struct uart_driver *drv = tty->driver->driver_state;
@@ -1403,10 +1402,9 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
* disable the receive line status interrupts.
*/
if (port->flags & ASYNC_INITIALIZED) {
- unsigned long flags;
- spin_lock_irqsave(&uport->lock, flags);
+ spin_lock_irq(&uport->lock);
uport->ops->stop_rx(uport);
- spin_unlock_irqrestore(&uport->lock, flags);
+ spin_unlock_irq(&uport->lock);
/*
* Before we drop DTR, make sure the UART transmitter
* has completely drained; this is especially
@@ -1418,18 +1416,18 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
mutex_lock(&port->mutex);
uart_shutdown(tty, state);
tty_port_tty_set(port, NULL);
- tty->closing = 0;
- spin_lock_irqsave(&port->lock, flags);
+
+ spin_lock_irq(&port->lock);
if (port->blocked_open) {
- spin_unlock_irqrestore(&port->lock, flags);
+ spin_unlock_irq(&port->lock);
if (port->close_delay)
msleep_interruptible(jiffies_to_msecs(port->close_delay));
- spin_lock_irqsave(&port->lock, flags);
+ spin_lock_irq(&port->lock);
} else if (!uart_console(uport)) {
- spin_unlock_irqrestore(&port->lock, flags);
+ spin_unlock_irq(&port->lock);
uart_change_pm(state, UART_PM_STATE_OFF);
- spin_lock_irqsave(&port->lock, flags);
+ spin_lock_irq(&port->lock);
}
/*
@@ -1437,13 +1435,14 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
*/
clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
clear_bit(ASYNCB_CLOSING, &port->flags);
- spin_unlock_irqrestore(&port->lock, flags);
+ spin_unlock_irq(&port->lock);
wake_up_interruptible(&port->open_wait);
wake_up_interruptible(&port->close_wait);
mutex_unlock(&port->mutex);
tty_ldisc_flush(tty);
+ tty->closing = 0;
}
static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
@@ -1531,11 +1530,6 @@ static void uart_hangup(struct tty_struct *tty)
mutex_unlock(&port->mutex);
}
-static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
-{
- return 0;
-}
-
static void uart_port_shutdown(struct tty_port *port)
{
struct uart_state *state = container_of(port, struct uart_state, port);
@@ -2378,8 +2372,6 @@ static const struct tty_operations uart_ops = {
};
static const struct tty_port_operations uart_port_ops = {
- .activate = uart_port_activate,
- .shutdown = uart_port_shutdown,
.carrier_raised = uart_carrier_raised,
.dtr_rts = uart_dtr_rts,
};
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index 653cdd5fb508..c6657de78997 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -413,7 +413,6 @@ sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
break;
}
- sirfport->rx_io_count += rx_count;
port->icount.rx += rx_count;
return rx_count;
@@ -600,7 +599,6 @@ static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port)
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
- sirfport->rx_io_count = 0;
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
~SIRFUART_IO_MODE);
@@ -632,31 +630,6 @@ static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port)
sirfport->uart_reg->uart_type));
}
-static void sirfsoc_uart_start_rx(struct uart_port *port)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
- struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
-
- sirfport->rx_io_count = 0;
- wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
- wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
- wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
- if (sirfport->rx_dma_chan)
- sirfsoc_uart_start_next_rx_dma(port);
- else {
- if (!sirfport->is_atlas7)
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- rd_regl(port, ureg->sirfsoc_int_en_reg) |
- SIRFUART_RX_IO_INT_EN(uint_en,
- sirfport->uart_reg->uart_type));
- else
- wr_regl(port, ureg->sirfsoc_int_en_reg,
- SIRFUART_RX_IO_INT_EN(uint_en,
- sirfport->uart_reg->uart_type));
- }
-}
-
static unsigned int
sirfsoc_usp_calc_sample_div(unsigned long set_rate,
unsigned long ioclk_rate, unsigned long *sample_reg)
@@ -850,7 +823,6 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
rx_time_out = SIRFSOC_UART_RX_TIMEOUT(set_baud, 20000);
rx_time_out = SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out);
txfifo_op_reg = rd_regl(port, ureg->sirfsoc_tx_fifo_op);
- wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_STOP);
wr_regl(port, ureg->sirfsoc_tx_fifo_op,
(txfifo_op_reg & ~SIRFUART_FIFO_START));
if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
@@ -886,9 +858,13 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
else
wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE);
if (sirfport->rx_dma_chan)
- wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_DMA_MODE);
+ wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
+ rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
+ ~SIRFUART_IO_MODE);
else
- wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_IO_MODE);
+ wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
+ rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
+ SIRFUART_IO_MODE);
sirfport->rx_period_time = 20000000;
/* Reset Rx/Tx FIFO Threshold level for proper baudrate */
if (set_baud < 1000000)
@@ -902,7 +878,6 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
txfifo_op_reg |= SIRFUART_FIFO_START;
wr_regl(port, ureg->sirfsoc_tx_fifo_op, txfifo_op_reg);
uart_update_timeout(port, termios->c_cflag, set_baud);
- sirfsoc_uart_start_rx(port);
wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_TX_EN | SIRFUART_RX_EN);
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -921,6 +896,7 @@ static int sirfsoc_uart_startup(struct uart_port *port)
{
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
unsigned int index = port->line;
int ret;
irq_modify_status(port->irq, IRQ_NOREQUEST, IRQ_NOAUTOEN);
@@ -958,9 +934,9 @@ static int sirfsoc_uart_startup(struct uart_port *port)
wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port));
if (sirfport->rx_dma_chan)
wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk,
- SIRFUART_RX_FIFO_CHK_SC(port->line, 0x4) |
- SIRFUART_RX_FIFO_CHK_LC(port->line, 0xe) |
- SIRFUART_RX_FIFO_CHK_HC(port->line, 0x1b));
+ SIRFUART_RX_FIFO_CHK_SC(port->line, 0x1) |
+ SIRFUART_RX_FIFO_CHK_LC(port->line, 0x2) |
+ SIRFUART_RX_FIFO_CHK_HC(port->line, 0x4));
if (sirfport->tx_dma_chan) {
sirfport->tx_dma_state = TX_DMA_IDLE;
wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk,
@@ -981,16 +957,41 @@ static int sirfsoc_uart_startup(struct uart_port *port)
goto init_rx_err;
}
}
- enable_irq(port->irq);
+ if (sirfport->uart_reg->uart_type == SIRF_REAL_UART &&
+ sirfport->rx_dma_chan)
+ wr_regl(port, ureg->sirfsoc_swh_dma_io,
+ SIRFUART_CLEAR_RX_ADDR_EN);
+ if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
+ sirfport->rx_dma_chan)
+ wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
+ rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
+ SIRFSOC_USP_FRADDR_CLR_EN);
if (sirfport->rx_dma_chan && !sirfport->is_hrt_enabled) {
sirfport->is_hrt_enabled = true;
sirfport->rx_period_time = 20000000;
+ sirfport->rx_last_pos = -1;
+ sirfport->pio_fetch_cnt = 0;
sirfport->rx_dma_items.xmit.tail =
sirfport->rx_dma_items.xmit.head = 0;
hrtimer_start(&sirfport->hrt,
ns_to_ktime(sirfport->rx_period_time),
HRTIMER_MODE_REL);
}
+ wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
+ if (sirfport->rx_dma_chan)
+ sirfsoc_uart_start_next_rx_dma(port);
+ else {
+ if (!sirfport->is_atlas7)
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ rd_regl(port, ureg->sirfsoc_int_en_reg) |
+ SIRFUART_RX_IO_INT_EN(uint_en,
+ sirfport->uart_reg->uart_type));
+ else
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ SIRFUART_RX_IO_INT_EN(uint_en,
+ sirfport->uart_reg->uart_type));
+ }
+ enable_irq(port->irq);
return 0;
init_rx_err:
@@ -1003,6 +1004,9 @@ static void sirfsoc_uart_shutdown(struct uart_port *port)
{
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct circ_buf *xmit;
+
+ xmit = &sirfport->rx_dma_items.xmit;
if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg, 0);
else
@@ -1019,8 +1023,10 @@ static void sirfsoc_uart_shutdown(struct uart_port *port)
if (sirfport->tx_dma_chan)
sirfport->tx_dma_state = TX_DMA_IDLE;
if (sirfport->rx_dma_chan && sirfport->is_hrt_enabled) {
- while ((rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
- SIRFUART_RX_FIFO_MASK) > 0)
+ while (((rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
+ SIRFUART_RX_FIFO_MASK) > sirfport->pio_fetch_cnt) &&
+ !CIRC_CNT(xmit->head, xmit->tail,
+ SIRFSOC_RX_DMA_BUF_SIZE))
;
sirfport->is_hrt_enabled = false;
hrtimer_cancel(&sirfport->hrt);
@@ -1169,6 +1175,8 @@ static enum hrtimer_restart
struct tty_struct *tty;
struct sirfsoc_register *ureg;
struct circ_buf *xmit;
+ struct sirfsoc_fifo_status *ufifo_st;
+ int max_pio_cnt;
sirfport = container_of(hrt, struct sirfsoc_uart_port, hrt);
port = &sirfport->port;
@@ -1176,9 +1184,16 @@ static enum hrtimer_restart
tty = port->state->port.tty;
ureg = &sirfport->uart_reg->uart_reg;
xmit = &sirfport->rx_dma_items.xmit;
+ ufifo_st = &sirfport->uart_reg->fifo_status;
+
dmaengine_tx_status(sirfport->rx_dma_chan,
- sirfport->rx_dma_items.cookie, &tx_state);
- xmit->head = SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
+ sirfport->rx_dma_items.cookie, &tx_state);
+ if (SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue !=
+ sirfport->rx_last_pos) {
+ xmit->head = SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
+ sirfport->rx_last_pos = xmit->head;
+ sirfport->pio_fetch_cnt = 0;
+ }
count = CIRC_CNT_TO_END(xmit->head, xmit->tail,
SIRFSOC_RX_DMA_BUF_SIZE);
while (count > 0) {
@@ -1200,23 +1215,38 @@ static enum hrtimer_restart
*/
if (!inserted && !count &&
((rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
- SIRFUART_RX_FIFO_MASK) > 0)) {
+ SIRFUART_RX_FIFO_MASK) > sirfport->pio_fetch_cnt)) {
+ dmaengine_pause(sirfport->rx_dma_chan);
/* switch to pio mode */
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
SIRFUART_IO_MODE);
- while ((rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
- SIRFUART_RX_FIFO_MASK) > 0) {
- if (sirfsoc_uart_pio_rx_chars(port, 16) > 0)
- tty_flip_buffer_push(tty->port);
+ /*
+ * UART controller SWH_DMA_IO register have CLEAR_RX_ADDR_EN
+ * When found changing I/O to DMA mode, it clears
+ * two low bits of read point;
+ * USP have similar FRADDR_CLR_EN bit in USP_RX_DMA_IO_CTRL.
+ * Fetch data out from rxfifo into DMA buffer in PIO mode,
+ * while switch back to DMA mode, the data fetched will override
+ * by DMA, as hardware have a strange behaviour:
+ * after switch back to DMA mode, check rxfifo status it will
+ * be the number PIO fetched, so record the fetched data count
+ * to avoid the repeated fetch
+ */
+ max_pio_cnt = 3;
+ while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
+ ufifo_st->ff_empty(port)) && max_pio_cnt--) {
+ xmit->buf[xmit->head] =
+ rd_regl(port, ureg->sirfsoc_rx_fifo_data);
+ xmit->head = (xmit->head + 1) &
+ (SIRFSOC_RX_DMA_BUF_SIZE - 1);
+ sirfport->pio_fetch_cnt++;
}
- wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
- wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
- wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
/* switch back to dma mode */
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
~SIRFUART_IO_MODE);
+ dmaengine_resume(sirfport->rx_dma_chan);
}
next_hrt:
hrtimer_forward_now(hrt, ns_to_ktime(sirfport->rx_period_time));
@@ -1239,7 +1269,7 @@ static int sirfsoc_uart_probe(struct platform_device *pdev)
struct resource *res;
int ret;
struct dma_slave_config slv_cfg = {
- .src_maxburst = 2,
+ .src_maxburst = 1,
};
struct dma_slave_config tx_slv_cfg = {
.dst_maxburst = 2,
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
index eb162b012eec..c3a885b4d76a 100644
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -296,6 +296,7 @@ struct sirfsoc_uart_register sirfsoc_uart = {
#define SIRFUART_DMA_MODE 0x0
#define SIRFUART_RX_DMA_FLUSH 0x4
+#define SIRFUART_CLEAR_RX_ADDR_EN 0x2
/* Baud Rate Calculation */
#define SIRF_USP_MIN_SAMPLE_DIV 0x1
#define SIRF_MIN_SAMPLE_DIV 0xf
@@ -325,6 +326,7 @@ struct sirfsoc_uart_register sirfsoc_uart = {
#define SIRFSOC_USP_ASYNC_DIV2_MASK 0x3f
#define SIRFSOC_USP_ASYNC_DIV2_OFFSET 16
#define SIRFSOC_USP_LOOP_BACK_CTRL BIT(2)
+#define SIRFSOC_USP_FRADDR_CLR_EN BIT(1)
/* USP-UART Common */
#define SIRFSOC_UART_RX_TIMEOUT(br, to) (((br) * (((to) + 999) / 1000)) / 1000)
#define SIRFUART_RECV_TIMEOUT_VALUE(x) \
@@ -421,7 +423,6 @@ struct sirfsoc_uart_port {
struct dma_chan *tx_dma_chan;
dma_addr_t tx_dma_addr;
struct dma_async_tx_descriptor *tx_dma_desc;
- unsigned int rx_io_count;
unsigned long transfer_size;
enum sirfsoc_tx_state tx_dma_state;
unsigned int cts_gpio;
@@ -431,6 +432,8 @@ struct sirfsoc_uart_port {
struct hrtimer hrt;
bool is_hrt_enabled;
unsigned long rx_period_time;
+ unsigned long rx_last_pos;
+ unsigned long pio_fetch_cnt;
};
/* Register Access Control */
diff --git a/drivers/tty/serial/sn_console.c b/drivers/tty/serial/sn_console.c
index 33e94e56dcdb..d4692d888e9d 100644
--- a/drivers/tty/serial/sn_console.c
+++ b/drivers/tty/serial/sn_console.c
@@ -42,7 +42,7 @@
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/console.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/sysrq.h>
#include <linux/circ_buf.h>
#include <linux/serial_reg.h>
@@ -659,7 +659,7 @@ static void sn_sal_timer_poll(unsigned long data)
* @port: Our sn_cons_port (which contains the uart port)
*
* So this is used by sn_sal_serial_console_init (early on, before we're
- * registered with serial core). It's also used by sn_sal_module_init
+ * registered with serial core). It's also used by sn_sal_init
* right after we've registered with serial core. The later only happens
* if we didn't already come through here via sn_sal_serial_console_init.
*
@@ -709,7 +709,7 @@ static void __init sn_sal_switch_to_asynch(struct sn_cons_port *port)
* sn_sal_switch_to_interrupts - Switch to interrupt driven mode
* @port: Our sn_cons_port (which contains the uart port)
*
- * In sn_sal_module_init, after we're registered with serial core and
+ * In sn_sal_init, after we're registered with serial core and
* the port is added, this function is called to switch us to interrupt
* mode. We were previously in asynch/polling mode (using init_timer).
*
@@ -773,7 +773,7 @@ static struct uart_driver sal_console_uart = {
};
/**
- * sn_sal_module_init - When the kernel loads us, get us rolling w/ serial core
+ * sn_sal_init - When the kernel loads us, get us rolling w/ serial core
*
* Before this is called, we've been printing kernel messages in a special
* early mode not making use of the serial core infrastructure. When our
@@ -781,7 +781,7 @@ static struct uart_driver sal_console_uart = {
* core and try to enable interrupt driven mode.
*
*/
-static int __init sn_sal_module_init(void)
+static int __init sn_sal_init(void)
{
int retval;
@@ -811,7 +811,7 @@ static int __init sn_sal_module_init(void)
if (uart_register_driver(&sal_console_uart) < 0) {
printk
- ("ERROR sn_sal_module_init failed uart_register_driver, line %d\n",
+ ("ERROR sn_sal_init failed uart_register_driver, line %d\n",
__LINE__);
return -ENODEV;
}
@@ -832,33 +832,19 @@ static int __init sn_sal_module_init(void)
/* when this driver is compiled in, the console initialization
* will have already switched us into asynchronous operation
- * before we get here through the module initcalls */
+ * before we get here through the initcalls */
if (!sal_console_port.sc_is_asynch) {
sn_sal_switch_to_asynch(&sal_console_port);
}
- /* at this point (module_init) we can try to turn on interrupts */
+ /* at this point (device_init) we can try to turn on interrupts */
if (!IS_RUNNING_ON_SIMULATOR()) {
sn_sal_switch_to_interrupts(&sal_console_port);
}
sn_process_input = 1;
return 0;
}
-
-/**
- * sn_sal_module_exit - When we're unloaded, remove the driver/port
- *
- */
-static void __exit sn_sal_module_exit(void)
-{
- del_timer_sync(&sal_console_port.sc_timer);
- uart_remove_one_port(&sal_console_uart, &sal_console_port.sc_port);
- uart_unregister_driver(&sal_console_uart);
- misc_deregister(&misc);
-}
-
-module_init(sn_sal_module_init);
-module_exit(sn_sal_module_exit);
+device_initcall(sn_sal_init);
/**
* puts_raw_fixed - sn_sal_console_write helper for adding \r's as required
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 4a6eab6da63e..e3de9c6d2226 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -6,7 +6,7 @@
* Inspired by st-asc.c from STMicroelectronics (c)
*/
-#if defined(CONFIG_SERIAL_STM32_USART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#if defined(CONFIG_SERIAL_STM32_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
diff --git a/drivers/tty/serial/suncore.c b/drivers/tty/serial/suncore.c
index 6e4ac8db2d79..127472bd6a7c 100644
--- a/drivers/tty/serial/suncore.c
+++ b/drivers/tty/serial/suncore.c
@@ -10,7 +10,6 @@
* Copyright (C) 2002 David S. Miller (davem@redhat.com)
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/console.h>
#include <linux/tty.h>
@@ -234,14 +233,10 @@ static int __init suncore_init(void)
{
return 0;
}
+device_initcall(suncore_init);
-static void __exit suncore_exit(void)
-{
-}
-
-module_init(suncore_init);
-module_exit(suncore_exit);
-
+#if 0 /* ..def MODULE ; never supported as such */
MODULE_AUTHOR("Eddie C. Dost, David S. Miller");
MODULE_DESCRIPTION("Sun serial common layer");
MODULE_LICENSE("GPL");
+#endif
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index 534754440fa8..064031870ba0 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -3,7 +3,6 @@
* Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/tty.h>
@@ -621,7 +620,6 @@ static const struct of_device_id hv_match[] = {
},
{},
};
-MODULE_DEVICE_TABLE(of, hv_match);
static struct platform_driver hv_driver = {
.driver = {
@@ -639,16 +637,11 @@ static int __init sunhv_init(void)
return platform_driver_register(&hv_driver);
}
+device_initcall(sunhv_init);
-static void __exit sunhv_exit(void)
-{
- platform_driver_unregister(&hv_driver);
-}
-
-module_init(sunhv_init);
-module_exit(sunhv_exit);
-
+#if 0 /* ...def MODULE ; never supported as such */
MODULE_AUTHOR("David S. Miller");
MODULE_DESCRIPTION("SUN4V Hypervisor console driver");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
+#endif
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 7d2532b23969..73190f5d2832 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -950,7 +950,7 @@ static void qe_uart_set_termios(struct uart_port *port,
if ((termios->c_cflag & CREAD) == 0)
port->read_status_mask &= ~BD_SC_EMPTY;
- baud = uart_get_baud_rate(port, termios, old, 0, 115200);
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
/* Do we really need a spinlock here? */
spin_lock_irqsave(&port->lock, flags);
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index b5b427888b24..95b330a9ea98 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -353,9 +353,16 @@ static struct sysrq_key_op sysrq_term_op = {
static void moom_callback(struct work_struct *ignored)
{
+ const gfp_t gfp_mask = GFP_KERNEL;
+ struct oom_control oc = {
+ .zonelist = node_zonelist(first_memory_node, gfp_mask),
+ .nodemask = NULL,
+ .gfp_mask = gfp_mask,
+ .order = -1,
+ };
+
mutex_lock(&oom_lock);
- if (!out_of_memory(node_zonelist(first_memory_node, GFP_KERNEL),
- GFP_KERNEL, 0, NULL, true))
+ if (!out_of_memory(&oc))
pr_info("OOM request ignored because killer is disabled\n");
mutex_unlock(&oom_lock);
}
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 4cf263d7dffc..5a3fa8913880 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -291,12 +291,11 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size,
n->flags = flags;
buf->tail = n;
b->commit = b->used;
- /* paired w/ barrier in flush_to_ldisc(); ensures the
+ /* paired w/ acquire in flush_to_ldisc(); ensures the
* latest commit value can be read before the head is
* advanced to the next buffer
*/
- smp_wmb();
- b->next = n;
+ smp_store_release(&b->next, n);
} else if (change)
size = 0;
else
@@ -445,7 +444,6 @@ receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count)
if (count)
disc->ops->receive_buf(tty, p, f, count);
}
- head->read += count;
return count;
}
@@ -488,12 +486,11 @@ static void flush_to_ldisc(struct work_struct *work)
if (atomic_read(&buf->priority))
break;
- next = head->next;
- /* paired w/ barrier in __tty_buffer_request_room();
+ /* paired w/ release in __tty_buffer_request_room();
* ensures commit value read is not stale if the head
* is advancing to the next buffer
*/
- smp_rmb();
+ next = smp_load_acquire(&head->next);
count = head->commit - head->read;
if (!count) {
if (next == NULL) {
@@ -508,6 +505,7 @@ static void flush_to_ldisc(struct work_struct *work)
count = receive_buf(tty, head, count);
if (!count)
break;
+ head->read += count;
}
mutex_unlock(&buf->lock);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 57fc6ee12332..02785d844354 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -106,6 +106,11 @@
#include <linux/nsproxy.h>
#undef TTY_DEBUG_HANGUP
+#ifdef TTY_DEBUG_HANGUP
+# define tty_debug_hangup(tty, f, args...) tty_debug(tty, f, ##args)
+#else
+# define tty_debug_hangup(tty, f, args...) do { } while (0)
+#endif
#define TTY_PARANOIA_CHECK 1
#define CHECK_TTY_COUNT 1
@@ -388,33 +393,40 @@ EXPORT_SYMBOL_GPL(tty_find_polling_driver);
int tty_check_change(struct tty_struct *tty)
{
unsigned long flags;
+ struct pid *pgrp;
int ret = 0;
if (current->signal->tty != tty)
return 0;
+ rcu_read_lock();
+ pgrp = task_pgrp(current);
+
spin_lock_irqsave(&tty->ctrl_lock, flags);
if (!tty->pgrp) {
printk(KERN_WARNING "tty_check_change: tty->pgrp == NULL!\n");
goto out_unlock;
}
- if (task_pgrp(current) == tty->pgrp)
+ if (pgrp == tty->pgrp)
goto out_unlock;
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+
if (is_ignored(SIGTTOU))
- goto out;
+ goto out_rcuunlock;
if (is_current_pgrp_orphaned()) {
ret = -EIO;
- goto out;
+ goto out_rcuunlock;
}
- kill_pgrp(task_pgrp(current), SIGTTOU, 1);
+ kill_pgrp(pgrp, SIGTTOU, 1);
+ rcu_read_unlock();
set_thread_flag(TIF_SIGPENDING);
ret = -ERESTARTSYS;
-out:
return ret;
out_unlock:
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+out_rcuunlock:
+ rcu_read_unlock();
return ret;
}
@@ -524,7 +536,8 @@ static void __proc_set_tty(struct tty_struct *tty)
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
tty->session = get_pid(task_session(current));
if (current->signal->tty) {
- printk(KERN_DEBUG "tty not NULL!!\n");
+ tty_debug(tty, "current tty %s not NULL!!\n",
+ current->signal->tty->name);
tty_kref_put(current->signal->tty);
}
put_pid(current->signal->tty_old_pgrp);
@@ -766,9 +779,7 @@ static void do_tty_hangup(struct work_struct *work)
void tty_hangup(struct tty_struct *tty)
{
-#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "%s hangup...\n", tty_name(tty));
-#endif
+ tty_debug_hangup(tty, "\n");
schedule_work(&tty->hangup_work);
}
@@ -785,9 +796,7 @@ EXPORT_SYMBOL(tty_hangup);
void tty_vhangup(struct tty_struct *tty)
{
-#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty));
-#endif
+ tty_debug_hangup(tty, "\n");
__tty_hangup(tty, 0);
}
@@ -824,9 +833,7 @@ void tty_vhangup_self(void)
static void tty_vhangup_session(struct tty_struct *tty)
{
-#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "%s vhangup session...\n", tty_name(tty));
-#endif
+ tty_debug_hangup(tty, "\n");
__tty_hangup(tty, 1);
}
@@ -920,12 +927,8 @@ void disassociate_ctty(int on_exit)
tty->pgrp = NULL;
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
tty_kref_put(tty);
- } else {
-#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "error attempted to write to tty [0x%p]"
- " = NULL", tty);
-#endif
- }
+ } else
+ tty_debug_hangup(tty, "no current tty\n");
spin_unlock_irq(&current->sighand->siglock);
/* Now clear signal->tty under the lock */
@@ -1705,8 +1708,7 @@ static int tty_release_checks(struct tty_struct *tty, int idx)
{
#ifdef TTY_PARANOIA_CHECK
if (idx < 0 || idx >= tty->driver->num) {
- printk(KERN_DEBUG "%s: bad idx when trying to free (%s)\n",
- __func__, tty->name);
+ tty_debug(tty, "bad idx %d\n", idx);
return -1;
}
@@ -1715,20 +1717,20 @@ static int tty_release_checks(struct tty_struct *tty, int idx)
return 0;
if (tty != tty->driver->ttys[idx]) {
- printk(KERN_DEBUG "%s: driver.table[%d] not tty for (%s)\n",
- __func__, idx, tty->name);
+ tty_debug(tty, "bad driver table[%d] = %p\n",
+ idx, tty->driver->ttys[idx]);
return -1;
}
if (tty->driver->other) {
struct tty_struct *o_tty = tty->link;
if (o_tty != tty->driver->other->ttys[idx]) {
- printk(KERN_DEBUG "%s: other->table[%d] not o_tty for (%s)\n",
- __func__, idx, tty->name);
+ tty_debug(tty, "bad other table[%d] = %p\n",
+ idx, tty->driver->other->ttys[idx]);
return -1;
}
if (o_tty->link != tty) {
- printk(KERN_DEBUG "%s: bad pty pointers\n", __func__);
+ tty_debug(tty, "bad link = %p\n", o_tty->link);
return -1;
}
}
@@ -1782,10 +1784,7 @@ int tty_release(struct inode *inode, struct file *filp)
return 0;
}
-#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "%s: %s (tty count=%d)...\n", __func__,
- tty_name(tty), tty->count);
-#endif
+ tty_debug_hangup(tty, "(tty count=%d)...\n", tty->count);
if (tty->ops->close)
tty->ops->close(tty, filp);
@@ -1895,9 +1894,7 @@ int tty_release(struct inode *inode, struct file *filp)
if (!final)
return 0;
-#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "%s: %s: final close\n", __func__, tty_name(tty));
-#endif
+ tty_debug_hangup(tty, "final close\n");
/*
* Ask the line discipline code to release its structures
*/
@@ -1906,10 +1903,7 @@ int tty_release(struct inode *inode, struct file *filp)
/* Wait for pending work before tty destruction commmences */
tty_flush_works(tty);
-#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "%s: %s: freeing structure...\n", __func__,
- tty_name(tty));
-#endif
+ tty_debug_hangup(tty, "freeing structure...\n");
/*
* The release_tty function takes care of the details of clearing
* the slots and preserving the termios structure. The tty_unlock_pair
@@ -2098,9 +2092,9 @@ retry_open:
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER)
noctty = 1;
-#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "%s: opening %s...\n", __func__, tty->name);
-#endif
+
+ tty_debug_hangup(tty, "(tty count=%d)\n", tty->count);
+
if (tty->ops->open)
retval = tty->ops->open(tty, filp);
else
@@ -2108,10 +2102,8 @@ retry_open:
filp->f_flags = saved_flags;
if (retval) {
-#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__,
- retval, tty->name);
-#endif
+ tty_debug_hangup(tty, "error %d, releasing...\n", retval);
+
tty_unlock(tty); /* need to call tty_release without BTM */
tty_release(inode, filp);
if (retval != -ERESTARTSYS)
@@ -3160,9 +3152,12 @@ static int tty_cdev_add(struct tty_driver *driver, dev_t dev,
unsigned int index, unsigned int count)
{
/* init here, since reused cdevs cause crashes */
- cdev_init(&driver->cdevs[index], &tty_fops);
- driver->cdevs[index].owner = driver->owner;
- return cdev_add(&driver->cdevs[index], dev, count);
+ driver->cdevs[index] = cdev_alloc();
+ if (!driver->cdevs[index])
+ return -ENOMEM;
+ cdev_init(driver->cdevs[index], &tty_fops);
+ driver->cdevs[index]->owner = driver->owner;
+ return cdev_add(driver->cdevs[index], dev, count);
}
/**
@@ -3268,8 +3263,10 @@ struct device *tty_register_device_attr(struct tty_driver *driver,
error:
put_device(dev);
- if (cdev)
- cdev_del(&driver->cdevs[index]);
+ if (cdev) {
+ cdev_del(driver->cdevs[index]);
+ driver->cdevs[index] = NULL;
+ }
return ERR_PTR(retval);
}
EXPORT_SYMBOL_GPL(tty_register_device_attr);
@@ -3289,8 +3286,10 @@ void tty_unregister_device(struct tty_driver *driver, unsigned index)
{
device_destroy(tty_class,
MKDEV(driver->major, driver->minor_start) + index);
- if (!(driver->flags & TTY_DRIVER_DYNAMIC_ALLOC))
- cdev_del(&driver->cdevs[index]);
+ if (!(driver->flags & TTY_DRIVER_DYNAMIC_ALLOC)) {
+ cdev_del(driver->cdevs[index]);
+ driver->cdevs[index] = NULL;
+ }
}
EXPORT_SYMBOL(tty_unregister_device);
@@ -3355,6 +3354,7 @@ err_free_all:
kfree(driver->ports);
kfree(driver->ttys);
kfree(driver->termios);
+ kfree(driver->cdevs);
kfree(driver);
return ERR_PTR(err);
}
@@ -3383,7 +3383,7 @@ static void destruct_tty_driver(struct kref *kref)
}
proc_tty_unregister_driver(driver);
if (driver->flags & TTY_DRIVER_DYNAMIC_ALLOC)
- cdev_del(&driver->cdevs[0]);
+ cdev_del(driver->cdevs[0]);
}
kfree(driver->cdevs);
kfree(driver->ports);
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 5232fb60b0b1..9c5aebfe7053 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -26,6 +26,12 @@
#undef TTY_DEBUG_WAIT_UNTIL_SENT
+#ifdef TTY_DEBUG_WAIT_UNTIL_SENT
+# define tty_debug_wait_until_sent(tty, f, args...) tty_debug(tty, f, ##args)
+#else
+# define tty_debug_wait_until_sent(tty, f, args...) do {} while (0)
+#endif
+
#undef DEBUG
/*
@@ -210,9 +216,8 @@ int tty_unthrottle_safe(struct tty_struct *tty)
void tty_wait_until_sent(struct tty_struct *tty, long timeout)
{
-#ifdef TTY_DEBUG_WAIT_UNTIL_SENT
- printk(KERN_DEBUG "%s wait until sent...\n", tty_name(tty));
-#endif
+ tty_debug_wait_until_sent(tty, "\n");
+
if (!timeout)
timeout = MAX_SCHEDULE_TIMEOUT;
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index c07fb5d9bcf9..71750cbac31f 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -22,9 +22,7 @@
#undef LDISC_DEBUG_HANGUP
#ifdef LDISC_DEBUG_HANGUP
-#define tty_ldisc_debug(tty, f, args...) ({ \
- printk(KERN_DEBUG "%s: %s: " f, __func__, tty_name(tty), ##args); \
-})
+#define tty_ldisc_debug(tty, f, args...) tty_debug(tty, f, ##args)
#else
#define tty_ldisc_debug(tty, f, args...)
#endif
@@ -449,6 +447,8 @@ static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
ret = ld->ops->open(tty);
if (ret)
clear_bit(TTY_LDISC_OPEN, &tty->flags);
+
+ tty_ldisc_debug(tty, "%p: opened\n", tty->ldisc);
return ret;
}
return 0;
@@ -469,6 +469,7 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
clear_bit(TTY_LDISC_OPEN, &tty->flags);
if (ld->ops->close)
ld->ops->close(tty);
+ tty_ldisc_debug(tty, "%p: closed\n", tty->ldisc);
}
/**
@@ -662,7 +663,7 @@ void tty_ldisc_hangup(struct tty_struct *tty)
int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS;
int err = 0;
- tty_ldisc_debug(tty, "closing ldisc: %p\n", tty->ldisc);
+ tty_ldisc_debug(tty, "%p: closing\n", tty->ldisc);
ld = tty_ldisc_ref(tty);
if (ld != NULL) {
@@ -712,7 +713,7 @@ void tty_ldisc_hangup(struct tty_struct *tty)
if (reset)
tty_reset_termios(tty);
- tty_ldisc_debug(tty, "re-opened ldisc: %p\n", tty->ldisc);
+ tty_ldisc_debug(tty, "%p: re-opened\n", tty->ldisc);
}
/**
@@ -776,8 +777,6 @@ void tty_ldisc_release(struct tty_struct *tty)
* it does not race with the set_ldisc code path.
*/
- tty_ldisc_debug(tty, "closing ldisc: %p\n", tty->ldisc);
-
tty_ldisc_lock_pair(tty, o_tty);
tty_ldisc_kill(tty);
if (o_tty)
@@ -787,7 +786,7 @@ void tty_ldisc_release(struct tty_struct *tty)
/* And the memory resources remaining (buffers, termios) will be
disposed of when the kref hits zero */
- tty_ldisc_debug(tty, "ldisc closed\n");
+ tty_ldisc_debug(tty, "released\n");
}
/**
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index ea27804d87af..381a2b13682c 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -356,6 +356,7 @@ int paste_selection(struct tty_struct *tty)
schedule();
continue;
}
+ __set_current_state(TASK_RUNNING);
count = sel_buffer_lth - pasted;
count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL,
count);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 8fe52989b380..4462d167900c 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -742,6 +742,8 @@ static void visual_init(struct vc_data *vc, int num, int init)
__module_get(vc->vc_sw->owner);
vc->vc_num = num;
vc->vc_display_fg = &master_display_fg;
+ if (vc->vc_uni_pagedir_loc)
+ con_free_unimap(vc);
vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir;
vc->vc_uni_pagedir = NULL;
vc->vc_hi_font_mask = 0;
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 48fb1d983f6c..52c98ce1b6fe 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -127,7 +127,7 @@ config UIO_FSL_ELBC_GPCM_NETX5152
config UIO_PRUSS
tristate "Texas Instruments PRUSS driver"
select GENERIC_ALLOCATOR
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && HAS_DMA
help
PRUSS driver for OMAPL138/DA850/AM18XX devices
PRUSS driver requires user space components, examples and user space
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 3257d4220d01..8196581f54c2 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -896,6 +896,7 @@ static int __init uio_init(void)
static void __exit uio_exit(void)
{
release_uio_class();
+ idr_destroy(&uio_idr);
}
module_init(uio_init)
diff --git a/drivers/uio/uio_fsl_elbc_gpcm.c b/drivers/uio/uio_fsl_elbc_gpcm.c
index b6cac91c2ced..2bcf80c159c1 100644
--- a/drivers/uio/uio_fsl_elbc_gpcm.c
+++ b/drivers/uio/uio_fsl_elbc_gpcm.c
@@ -480,19 +480,7 @@ static struct platform_driver uio_fsl_elbc_gpcm_driver = {
.probe = uio_fsl_elbc_gpcm_probe,
.remove = uio_fsl_elbc_gpcm_remove,
};
-
-static int __init uio_fsl_elbc_gpcm_init(void)
-{
- return platform_driver_register(&uio_fsl_elbc_gpcm_driver);
-}
-
-static void __exit uio_fsl_elbc_gpcm_exit(void)
-{
- platform_driver_unregister(&uio_fsl_elbc_gpcm_driver);
-}
-
-module_init(uio_fsl_elbc_gpcm_init);
-module_exit(uio_fsl_elbc_gpcm_exit);
+module_platform_driver(uio_fsl_elbc_gpcm_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Ogness <john.ogness@linutronix.de>");
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 813d4d3a51c6..1173f9cbc137 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -270,6 +270,7 @@ static ssize_t cxacru_sysfs_showattr_dB(s16 value, char *buf)
static ssize_t cxacru_sysfs_showattr_bool(u32 value, char *buf)
{
static char *str[] = { "no", "yes" };
+
if (unlikely(value >= ARRAY_SIZE(str)))
return snprintf(buf, PAGE_SIZE, "%u\n", value);
return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
@@ -278,6 +279,7 @@ static ssize_t cxacru_sysfs_showattr_bool(u32 value, char *buf)
static ssize_t cxacru_sysfs_showattr_LINK(u32 value, char *buf)
{
static char *str[] = { NULL, "not connected", "connected", "lost" };
+
if (unlikely(value >= ARRAY_SIZE(str) || str[value] == NULL))
return snprintf(buf, PAGE_SIZE, "%u\n", value);
return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
@@ -702,6 +704,7 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ
len = ret / 4;
for (offb = 0; offb < len; ) {
int l = le32_to_cpu(buf[offb++]);
+
if (l < 0 || l > stride || l > (len - offb) / 2) {
if (printk_ratelimit())
usb_err(instance->usbatm, "invalid data length from cm %#x: %d\n",
@@ -732,6 +735,7 @@ cleanup:
static int cxacru_card_status(struct cxacru_data *instance)
{
int ret = cxacru_cm(instance, CM_REQUEST_CARD_GET_STATUS, NULL, 0, NULL, 0);
+
if (ret < 0) { /* firmware not loaded */
usb_dbg(instance->usbatm, "cxacru_adsl_start: CARD_GET_STATUS returned %d\n", ret);
return ret;
@@ -945,6 +949,7 @@ static int cxacru_fw(struct usb_device *usb_dev, enum cxacru_fw_request fw,
offb = offd = 0;
do {
int l = min_t(int, stride, size - offd);
+
buf[offb++] = fw;
buf[offb++] = l;
buf[offb++] = code1;
@@ -1091,8 +1096,8 @@ static int cxacru_heavy_init(struct usbatm_data *usbatm_instance,
{
const struct firmware *fw, *bp;
struct cxacru_data *instance = usbatm_instance->driver_data;
-
int ret = cxacru_find_firmware(instance, "fw", &fw);
+
if (ret) {
usb_warn(usbatm_instance, "firmware (cxacru-fw.bin) unavailable (system misconfigured?)\n");
return ret;
diff --git a/drivers/usb/chipidea/bits.h b/drivers/usb/chipidea/bits.h
index 3cb9bda51ddf..e462f55c8b99 100644
--- a/drivers/usb/chipidea/bits.h
+++ b/drivers/usb/chipidea/bits.h
@@ -25,6 +25,9 @@
#define VERSION (0xF << 25)
#define CIVERSION (0x7 << 29)
+/* SBUSCFG */
+#define AHBBRST_MASK 0x7
+
/* HCCPARAMS */
#define HCCPARAMS_LEN BIT(17)
@@ -53,6 +56,15 @@
#define DEVICEADDR_USBADRA BIT(24)
#define DEVICEADDR_USBADR (0x7FUL << 25)
+/* TTCTRL */
+#define TTCTRL_TTHA_MASK (0x7fUL << 24)
+/* Set non-zero value for internal TT Hub address representation */
+#define TTCTRL_TTHA (0x7fUL << 24)
+
+/* BURSTSIZE */
+#define RX_BURST_MASK 0xff
+#define TX_BURST_MASK 0xff00
+
/* PORTSC */
#define PORTSC_CCS BIT(0)
#define PORTSC_CSC BIT(1)
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 6d6200e37b71..41d7cf6d63ba 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -50,6 +50,8 @@ enum ci_hw_regs {
OP_USBINTR,
OP_DEVICEADDR,
OP_ENDPTLISTADDR,
+ OP_TTCTRL,
+ OP_BURSTSIZE,
OP_PORTSC,
OP_DEVLC,
OP_OTGSC,
@@ -406,8 +408,11 @@ static inline u32 hw_test_and_write(struct ci_hdrc *ci, enum ci_hw_regs reg,
static inline bool ci_otg_is_fsm_mode(struct ci_hdrc *ci)
{
#ifdef CONFIG_USB_OTG_FSM
+ struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps;
+
return ci->is_otg && ci->roles[CI_ROLE_HOST] &&
- ci->roles[CI_ROLE_GADGET];
+ ci->roles[CI_ROLE_GADGET] && (otg_caps->srp_support ||
+ otg_caps->hnp_support || otg_caps->adp_support);
#else
return false;
#endif
@@ -426,4 +431,6 @@ u8 hw_port_test_get(struct ci_hdrc *ci);
int hw_wait_reg(struct ci_hdrc *ci, enum ci_hw_regs reg, u32 mask,
u32 value, unsigned int timeout_ms);
+void ci_platform_configure(struct ci_hdrc *ci);
+
#endif /* __DRIVERS_USB_CHIPIDEA_CI_H */
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 389f0e034259..867e9f3f3859 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -29,26 +29,31 @@ struct ci_hdrc_imx_platform_flag {
};
static const struct ci_hdrc_imx_platform_flag imx27_usb_data = {
+ CI_HDRC_DISABLE_STREAMING,
};
static const struct ci_hdrc_imx_platform_flag imx28_usb_data = {
.flags = CI_HDRC_IMX28_WRITE_FIX |
- CI_HDRC_TURN_VBUS_EARLY_ON,
+ CI_HDRC_TURN_VBUS_EARLY_ON |
+ CI_HDRC_DISABLE_STREAMING,
};
static const struct ci_hdrc_imx_platform_flag imx6q_usb_data = {
.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
- CI_HDRC_TURN_VBUS_EARLY_ON,
+ CI_HDRC_TURN_VBUS_EARLY_ON |
+ CI_HDRC_DISABLE_STREAMING,
};
static const struct ci_hdrc_imx_platform_flag imx6sl_usb_data = {
.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
- CI_HDRC_TURN_VBUS_EARLY_ON,
+ CI_HDRC_TURN_VBUS_EARLY_ON |
+ CI_HDRC_DISABLE_HOST_STREAMING,
};
static const struct ci_hdrc_imx_platform_flag imx6sx_usb_data = {
.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
- CI_HDRC_TURN_VBUS_EARLY_ON,
+ CI_HDRC_TURN_VBUS_EARLY_ON |
+ CI_HDRC_DISABLE_HOST_STREAMING,
};
static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
@@ -104,7 +109,7 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
misc_pdev = of_find_device_by_node(args.np);
of_node_put(args.np);
- if (!misc_pdev)
+ if (!misc_pdev || !platform_get_drvdata(misc_pdev))
return ERR_PTR(-EPROBE_DEFER);
data->dev = &misc_pdev->dev;
@@ -126,7 +131,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
struct ci_hdrc_platform_data pdata = {
.name = dev_name(&pdev->dev),
.capoffset = DEF_CAPOFFSET,
- .flags = CI_HDRC_DISABLE_STREAMING,
+ .flags = CI_HDRC_SET_NON_ZERO_TTHA,
};
int ret;
const struct of_device_id *of_id =
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 74fea4fa41b1..3feebf7f31f0 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -64,6 +64,7 @@
#include <linux/of.h>
#include <linux/phy.h>
#include <linux/regulator/consumer.h>
+#include <linux/usb/ehci_def.h>
#include "ci.h"
#include "udc.h"
@@ -84,6 +85,8 @@ static const u8 ci_regs_nolpm[] = {
[OP_USBINTR] = 0x08U,
[OP_DEVICEADDR] = 0x14U,
[OP_ENDPTLISTADDR] = 0x18U,
+ [OP_TTCTRL] = 0x1CU,
+ [OP_BURSTSIZE] = 0x20U,
[OP_PORTSC] = 0x44U,
[OP_DEVLC] = 0x84U,
[OP_OTGSC] = 0x64U,
@@ -106,6 +109,8 @@ static const u8 ci_regs_lpm[] = {
[OP_USBINTR] = 0x08U,
[OP_DEVICEADDR] = 0x14U,
[OP_ENDPTLISTADDR] = 0x18U,
+ [OP_TTCTRL] = 0x1CU,
+ [OP_BURSTSIZE] = 0x20U,
[OP_PORTSC] = 0x44U,
[OP_DEVLC] = 0x84U,
[OP_OTGSC] = 0xC4U,
@@ -118,7 +123,7 @@ static const u8 ci_regs_lpm[] = {
[OP_ENDPTCTRL] = 0xECU,
};
-static int hw_alloc_regmap(struct ci_hdrc *ci, bool is_lpm)
+static void hw_alloc_regmap(struct ci_hdrc *ci, bool is_lpm)
{
int i;
@@ -134,7 +139,6 @@ static int hw_alloc_regmap(struct ci_hdrc *ci, bool is_lpm)
? ci_regs_lpm[OP_ENDPTCTRL]
: ci_regs_nolpm[OP_ENDPTCTRL]);
- return 0;
}
static enum ci_revision ci_get_revision(struct ci_hdrc *ci)
@@ -403,6 +407,55 @@ static int ci_usb_phy_init(struct ci_hdrc *ci)
return ret;
}
+
+/**
+ * ci_platform_configure: do controller configure
+ * @ci: the controller
+ *
+ */
+void ci_platform_configure(struct ci_hdrc *ci)
+{
+ bool is_device_mode, is_host_mode;
+
+ is_device_mode = hw_read(ci, OP_USBMODE, USBMODE_CM) == USBMODE_CM_DC;
+ is_host_mode = hw_read(ci, OP_USBMODE, USBMODE_CM) == USBMODE_CM_HC;
+
+ if (is_device_mode &&
+ (ci->platdata->flags & CI_HDRC_DISABLE_DEVICE_STREAMING))
+ hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS, USBMODE_CI_SDIS);
+
+ if (is_host_mode &&
+ (ci->platdata->flags & CI_HDRC_DISABLE_HOST_STREAMING))
+ hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS, USBMODE_CI_SDIS);
+
+ if (ci->platdata->flags & CI_HDRC_FORCE_FULLSPEED) {
+ if (ci->hw_bank.lpm)
+ hw_write(ci, OP_DEVLC, DEVLC_PFSC, DEVLC_PFSC);
+ else
+ hw_write(ci, OP_PORTSC, PORTSC_PFSC, PORTSC_PFSC);
+ }
+
+ if (ci->platdata->flags & CI_HDRC_SET_NON_ZERO_TTHA)
+ hw_write(ci, OP_TTCTRL, TTCTRL_TTHA_MASK, TTCTRL_TTHA);
+
+ hw_write(ci, OP_USBCMD, 0xff0000, ci->platdata->itc_setting << 16);
+
+ if (ci->platdata->flags & CI_HDRC_OVERRIDE_AHB_BURST)
+ hw_write_id_reg(ci, ID_SBUSCFG, AHBBRST_MASK,
+ ci->platdata->ahb_burst_config);
+
+ /* override burst size, take effect only when ahb_burst_config is 0 */
+ if (!hw_read_id_reg(ci, ID_SBUSCFG, AHBBRST_MASK)) {
+ if (ci->platdata->flags & CI_HDRC_OVERRIDE_TX_BURST)
+ hw_write(ci, OP_BURSTSIZE, TX_BURST_MASK,
+ ci->platdata->tx_burst_size << __ffs(TX_BURST_MASK));
+
+ if (ci->platdata->flags & CI_HDRC_OVERRIDE_RX_BURST)
+ hw_write(ci, OP_BURSTSIZE, RX_BURST_MASK,
+ ci->platdata->rx_burst_size);
+ }
+}
+
/**
* hw_controller_reset: do controller reset
* @ci: the controller
@@ -447,16 +500,6 @@ int hw_device_reset(struct ci_hdrc *ci)
ci->platdata->notify_event(ci,
CI_HDRC_CONTROLLER_RESET_EVENT);
- if (ci->platdata->flags & CI_HDRC_DISABLE_STREAMING)
- hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS, USBMODE_CI_SDIS);
-
- if (ci->platdata->flags & CI_HDRC_FORCE_FULLSPEED) {
- if (ci->hw_bank.lpm)
- hw_write(ci, OP_DEVLC, DEVLC_PFSC, DEVLC_PFSC);
- else
- hw_write(ci, OP_PORTSC, PORTSC_PFSC, PORTSC_PFSC);
- }
-
/* USBMODE should be configured step by step */
hw_write(ci, OP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE);
hw_write(ci, OP_USBMODE, USBMODE_CM, USBMODE_CM_DC);
@@ -469,6 +512,8 @@ int hw_device_reset(struct ci_hdrc *ci)
return -ENODEV;
}
+ ci_platform_configure(ci);
+
return 0;
}
@@ -560,6 +605,8 @@ static irqreturn_t ci_irq(int irq, void *data)
static int ci_get_platdata(struct device *dev,
struct ci_hdrc_platform_data *platdata)
{
+ int ret;
+
if (!platdata->phy_mode)
platdata->phy_mode = of_usb_get_phy_mode(dev->of_node);
@@ -588,9 +635,66 @@ static int ci_get_platdata(struct device *dev,
of_usb_host_tpl_support(dev->of_node);
}
+ if (platdata->dr_mode == USB_DR_MODE_OTG) {
+ /* We can support HNP and SRP of OTG 2.0 */
+ platdata->ci_otg_caps.otg_rev = 0x0200;
+ platdata->ci_otg_caps.hnp_support = true;
+ platdata->ci_otg_caps.srp_support = true;
+
+ /* Update otg capabilities by DT properties */
+ ret = of_usb_update_otg_caps(dev->of_node,
+ &platdata->ci_otg_caps);
+ if (ret)
+ return ret;
+ }
+
if (of_usb_get_maximum_speed(dev->of_node) == USB_SPEED_FULL)
platdata->flags |= CI_HDRC_FORCE_FULLSPEED;
+ platdata->itc_setting = 1;
+ if (of_find_property(dev->of_node, "itc-setting", NULL)) {
+ ret = of_property_read_u32(dev->of_node, "itc-setting",
+ &platdata->itc_setting);
+ if (ret) {
+ dev_err(dev,
+ "failed to get itc-setting\n");
+ return ret;
+ }
+ }
+
+ if (of_find_property(dev->of_node, "ahb-burst-config", NULL)) {
+ ret = of_property_read_u32(dev->of_node, "ahb-burst-config",
+ &platdata->ahb_burst_config);
+ if (ret) {
+ dev_err(dev,
+ "failed to get ahb-burst-config\n");
+ return ret;
+ }
+ platdata->flags |= CI_HDRC_OVERRIDE_AHB_BURST;
+ }
+
+ if (of_find_property(dev->of_node, "tx-burst-size-dword", NULL)) {
+ ret = of_property_read_u32(dev->of_node, "tx-burst-size-dword",
+ &platdata->tx_burst_size);
+ if (ret) {
+ dev_err(dev,
+ "failed to get tx-burst-size-dword\n");
+ return ret;
+ }
+ platdata->flags |= CI_HDRC_OVERRIDE_TX_BURST;
+ }
+
+ if (of_find_property(dev->of_node, "rx-burst-size-dword", NULL)) {
+ ret = of_property_read_u32(dev->of_node, "rx-burst-size-dword",
+ &platdata->rx_burst_size);
+ if (ret) {
+ dev_err(dev,
+ "failed to get rx-burst-size-dword\n");
+ return ret;
+ }
+ platdata->flags |= CI_HDRC_OVERRIDE_RX_BURST;
+ }
+
return 0;
}
@@ -1024,7 +1128,18 @@ static struct platform_driver ci_hdrc_driver = {
},
};
-module_platform_driver(ci_hdrc_driver);
+static int __init ci_hdrc_platform_register(void)
+{
+ ci_hdrc_host_driver_init();
+ return platform_driver_register(&ci_hdrc_driver);
+}
+module_init(ci_hdrc_platform_register);
+
+static void __exit ci_hdrc_platform_unregister(void)
+{
+ platform_driver_unregister(&ci_hdrc_driver);
+}
+module_exit(ci_hdrc_platform_unregister);
MODULE_ALIAS("platform:ci_hdrc");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index 5b7061a33103..080b7be3daf0 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -10,6 +10,7 @@
#include <linux/usb/phy.h>
#include <linux/usb/otg.h>
#include <linux/usb/otg-fsm.h>
+#include <linux/usb/chipidea.h>
#include "ci.h"
#include "udc.h"
@@ -66,9 +67,11 @@ static int ci_port_test_show(struct seq_file *s, void *data)
unsigned long flags;
unsigned mode;
+ pm_runtime_get_sync(ci->dev);
spin_lock_irqsave(&ci->lock, flags);
mode = hw_port_test_get(ci);
spin_unlock_irqrestore(&ci->lock, flags);
+ pm_runtime_put_sync(ci->dev);
seq_printf(s, "mode = %u\n", mode);
@@ -98,9 +101,11 @@ static ssize_t ci_port_test_write(struct file *file, const char __user *ubuf,
if (sscanf(buf, "%u", &mode) != 1)
return -EINVAL;
+ pm_runtime_get_sync(ci->dev);
spin_lock_irqsave(&ci->lock, flags);
ret = hw_port_test_set(ci, mode);
spin_unlock_irqrestore(&ci->lock, flags);
+ pm_runtime_put_sync(ci->dev);
return ret ? ret : count;
}
@@ -316,8 +321,10 @@ static ssize_t ci_role_write(struct file *file, const char __user *ubuf,
if (role == CI_ROLE_END || role == ci->role)
return -EINVAL;
+ pm_runtime_get_sync(ci->dev);
ci_role_stop(ci);
ret = ci_role_start(ci, role);
+ pm_runtime_put_sync(ci->dev);
return ret ? ret : count;
}
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 6cf87b8b13a8..3d24304405b3 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -37,15 +37,14 @@ static int (*orig_bus_suspend)(struct usb_hcd *hcd);
struct ehci_ci_priv {
struct regulator *reg_vbus;
- struct ci_hdrc *ci;
};
static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct ehci_ci_priv *priv = (struct ehci_ci_priv *)ehci->priv;
- struct ci_hdrc *ci = priv->ci;
struct device *dev = hcd->self.controller;
+ struct ci_hdrc *ci = dev_get_drvdata(dev);
int ret = 0;
int port = HCS_N_PORTS(ehci->hcs_params);
@@ -78,9 +77,25 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
return 0;
};
+static int ehci_ci_reset(struct usb_hcd *hcd)
+{
+ struct device *dev = hcd->self.controller;
+ struct ci_hdrc *ci = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ehci_setup(hcd);
+ if (ret)
+ return ret;
+
+ ci_platform_configure(ci);
+
+ return ret;
+}
+
static const struct ehci_driver_overrides ehci_ci_overrides = {
.extra_priv_size = sizeof(struct ehci_ci_priv),
.port_power = ehci_ci_portpower,
+ .reset = ehci_ci_reset,
};
static irqreturn_t host_irq(struct ci_hdrc *ci)
@@ -123,7 +138,6 @@ static int host_start(struct ci_hdrc *ci)
priv = (struct ehci_ci_priv *)ehci->priv;
priv->reg_vbus = NULL;
- priv->ci = ci;
if (ci->platdata->reg_vbus && !ci_otg_is_fsm_mode(ci)) {
if (ci->platdata->flags & CI_HDRC_TURN_VBUS_EARLY_ON) {
@@ -153,12 +167,6 @@ static int host_start(struct ci_hdrc *ci)
}
}
- if (ci->platdata->flags & CI_HDRC_DISABLE_STREAMING)
- hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS, USBMODE_CI_SDIS);
-
- if (ci->platdata->flags & CI_HDRC_FORCE_FULLSPEED)
- hw_write(ci, OP_PORTSC, PORTSC_PFSC, PORTSC_PFSC);
-
return ret;
disable_reg:
@@ -249,9 +257,12 @@ int ci_hdrc_host_init(struct ci_hdrc *ci)
rdrv->name = "host";
ci->roles[CI_ROLE_HOST] = rdrv;
+ return 0;
+}
+
+void ci_hdrc_host_driver_init(void)
+{
ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides);
orig_bus_suspend = ci_ehci_hc_driver.bus_suspend;
ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend;
-
- return 0;
}
diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h
index 5707bf379bfb..0f12f131bdd3 100644
--- a/drivers/usb/chipidea/host.h
+++ b/drivers/usb/chipidea/host.h
@@ -5,6 +5,7 @@
int ci_hdrc_host_init(struct ci_hdrc *ci);
void ci_hdrc_host_destroy(struct ci_hdrc *ci);
+void ci_hdrc_host_driver_init(void);
#else
@@ -18,6 +19,11 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci)
}
+static void ci_hdrc_host_driver_init(void)
+{
+
+}
+
#endif
#endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index 19d655a743b5..00ab59d45da1 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -525,7 +525,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on)
ci_role_start(ci, CI_ROLE_HOST);
} else {
ci_role_stop(ci);
- hw_device_reset(ci);
ci_role_start(ci, CI_ROLE_GADGET);
}
return 0;
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 764f668d45a9..a637da25dda0 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -445,7 +445,7 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
rest -= count;
}
- if (hwreq->req.zero && hwreq->req.length
+ if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX
&& (hwreq->req.length % hwep->ep.maxpacket == 0))
add_td_to_list(hwep, hwreq, 0);
@@ -1090,6 +1090,13 @@ __acquires(ci->lock)
if (ci_otg_is_fsm_mode(ci))
err = otg_a_alt_hnp_support(ci);
break;
+ case USB_DEVICE_A_HNP_SUPPORT:
+ if (ci_otg_is_fsm_mode(ci)) {
+ ci->gadget.a_hnp_support = 1;
+ err = isr_setup_status_phase(
+ ci);
+ }
+ break;
default:
goto delegate;
}
@@ -1624,6 +1631,20 @@ static int init_eps(struct ci_hdrc *ci)
hwep->ep.name = hwep->name;
hwep->ep.ops = &usb_ep_ops;
+
+ if (i == 0) {
+ hwep->ep.caps.type_control = true;
+ } else {
+ hwep->ep.caps.type_iso = true;
+ hwep->ep.caps.type_bulk = true;
+ hwep->ep.caps.type_int = true;
+ }
+
+ if (j == TX)
+ hwep->ep.caps.dir_in = true;
+ else
+ hwep->ep.caps.dir_out = true;
+
/*
* for ep0: maxP defined in desc, for other
* eps, maxP is set by epautoconfig() called
@@ -1827,6 +1848,7 @@ static irqreturn_t udc_irq(struct ci_hdrc *ci)
static int udc_start(struct ci_hdrc *ci)
{
struct device *dev = ci->dev;
+ struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps;
int retval = 0;
spin_lock_init(&ci->lock);
@@ -1834,8 +1856,12 @@ static int udc_start(struct ci_hdrc *ci)
ci->gadget.ops = &usb_gadget_ops;
ci->gadget.speed = USB_SPEED_UNKNOWN;
ci->gadget.max_speed = USB_SPEED_HIGH;
- ci->gadget.is_otg = ci->is_otg ? 1 : 0;
ci->gadget.name = ci->platdata->name;
+ ci->gadget.otg_caps = otg_caps;
+
+ if (ci->is_otg && (otg_caps->hnp_support || otg_caps->srp_support ||
+ otg_caps->adp_support))
+ ci->gadget.is_otg = 1;
INIT_LIST_HEAD(&ci->gadget.ep_list);
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 3cefd49ddb00..5ddab30ee240 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -54,6 +54,7 @@
#define MX53_USB_PHYCTRL1_PLLDIV_MASK 0x3
#define MX53_USB_PLL_DIV_24_MHZ 0x01
+#define MX6_BM_NON_BURST_SETTING BIT(1)
#define MX6_BM_OVER_CUR_DIS BIT(7)
#define MX6_BM_WAKEUP_ENABLE BIT(10)
#define MX6_BM_ID_WAKEUP BIT(16)
@@ -255,14 +256,21 @@ static int usbmisc_imx6q_init(struct imx_usbmisc_data *data)
if (data->index > 3)
return -EINVAL;
+ spin_lock_irqsave(&usbmisc->lock, flags);
+
if (data->disable_oc) {
- spin_lock_irqsave(&usbmisc->lock, flags);
reg = readl(usbmisc->base + data->index * 4);
writel(reg | MX6_BM_OVER_CUR_DIS,
usbmisc->base + data->index * 4);
- spin_unlock_irqrestore(&usbmisc->lock, flags);
}
+ /* SoC non-burst setting */
+ reg = readl(usbmisc->base + data->index * 4);
+ writel(reg | MX6_BM_NON_BURST_SETTING,
+ usbmisc->base + data->index * 4);
+
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
usbmisc_imx6q_set_wakeup(data, false);
return 0;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 519a77ba214c..b30e7423549b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1944,6 +1944,7 @@ static void __exit acm_exit(void)
usb_deregister(&acm_driver);
tty_unregister_driver(acm_tty_driver);
put_tty_driver(acm_tty_driver);
+ idr_destroy(&acm_minors);
}
module_init(acm_init);
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index f38e875a3fb1..433bbc34a8a4 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -57,6 +57,7 @@
#include <linux/mutex.h>
#undef DEBUG
#include <linux/usb.h>
+#include <linux/usb/ch9.h>
#include <linux/ratelimit.h>
/*
@@ -79,12 +80,20 @@
#define IOCNR_SOFT_RESET 7
/* Get device_id string: */
#define LPIOC_GET_DEVICE_ID(len) _IOC(_IOC_READ, 'P', IOCNR_GET_DEVICE_ID, len)
-/* The following ioctls were added for http://hpoj.sourceforge.net: */
-/* Get two-int array:
- * [0]=current protocol (1=7/1/1, 2=7/1/2, 3=7/1/3),
- * [1]=supported protocol mask (mask&(1<<n)!=0 means 7/1/n supported): */
+/* The following ioctls were added for http://hpoj.sourceforge.net:
+ * Get two-int array:
+ * [0]=current protocol
+ * (1=USB_CLASS_PRINTER/1/1, 2=USB_CLASS_PRINTER/1/2,
+ * 3=USB_CLASS_PRINTER/1/3),
+ * [1]=supported protocol mask (mask&(1<<n)!=0 means
+ * USB_CLASS_PRINTER/1/n supported):
+ */
#define LPIOC_GET_PROTOCOLS(len) _IOC(_IOC_READ, 'P', IOCNR_GET_PROTOCOLS, len)
-/* Set protocol (arg: 1=7/1/1, 2=7/1/2, 3=7/1/3): */
+/*
+ * Set protocol
+ * (arg: 1=USB_CLASS_PRINTER/1/1, 2=USB_CLASS_PRINTER/1/2,
+ * 3=USB_CLASS_PRINTER/1/3):
+ */
#define LPIOC_SET_PROTOCOL _IOC(_IOC_WRITE, 'P', IOCNR_SET_PROTOCOL, 0)
/* Set channel number (HP Vendor-specific command): */
#define LPIOC_HP_SET_CHANNEL _IOC(_IOC_WRITE, 'P', IOCNR_HP_SET_CHANNEL, 0)
@@ -146,8 +155,10 @@ struct usblp {
int readcount; /* Counter for reads */
int ifnum; /* Interface number */
struct usb_interface *intf; /* The interface */
- /* Alternate-setting numbers and endpoints for each protocol
- * (7/1/{index=1,2,3}) that the device supports: */
+ /*
+ * Alternate-setting numbers and endpoints for each protocol
+ * (USB_CLASS_PRINTER/1/{index=1,2,3}) that the device supports:
+ */
struct {
int alt_setting;
struct usb_endpoint_descriptor *epwrite;
@@ -1206,19 +1217,23 @@ abort_ret:
* but our requirements are too intricate for simple match to handle.
*
* The "proto_bias" option may be used to specify the preferred protocol
- * for all USB printers (1=7/1/1, 2=7/1/2, 3=7/1/3). If the device
- * supports the preferred protocol, then we bind to it.
+ * for all USB printers (1=USB_CLASS_PRINTER/1/1, 2=USB_CLASS_PRINTER/1/2,
+ * 3=USB_CLASS_PRINTER/1/3). If the device supports the preferred protocol,
+ * then we bind to it.
*
- * The best interface for us is 7/1/2, because it is compatible
- * with a stream of characters. If we find it, we bind to it.
+ * The best interface for us is USB_CLASS_PRINTER/1/2, because it
+ * is compatible with a stream of characters. If we find it, we bind to it.
*
* Note that the people from hpoj.sourceforge.net need to be able to
- * bind to 7/1/3 (MLC/1284.4), so we provide them ioctls for this purpose.
+ * bind to USB_CLASS_PRINTER/1/3 (MLC/1284.4), so we provide them ioctls
+ * for this purpose.
*
- * Failing 7/1/2, we look for 7/1/3, even though it's probably not
- * stream-compatible, because this matches the behaviour of the old code.
+ * Failing USB_CLASS_PRINTER/1/2, we look for USB_CLASS_PRINTER/1/3,
+ * even though it's probably not stream-compatible, because this matches
+ * the behaviour of the old code.
*
- * If nothing else, we bind to 7/1/1 - the unidirectional interface.
+ * If nothing else, we bind to USB_CLASS_PRINTER/1/1
+ * - the unidirectional interface.
*/
static int usblp_select_alts(struct usblp *usblp)
{
@@ -1236,7 +1251,8 @@ static int usblp_select_alts(struct usblp *usblp)
for (i = 0; i < if_alt->num_altsetting; i++) {
ifd = &if_alt->altsetting[i];
- if (ifd->desc.bInterfaceClass != 7 || ifd->desc.bInterfaceSubClass != 1)
+ if (ifd->desc.bInterfaceClass != USB_CLASS_PRINTER ||
+ ifd->desc.bInterfaceSubClass != 1)
if (!(usblp->quirks & USBLP_QUIRK_BAD_CLASS))
continue;
@@ -1262,8 +1278,10 @@ static int usblp_select_alts(struct usblp *usblp)
if (!epwrite || (ifd->desc.bInterfaceProtocol > 1 && !epread))
continue;
- /* Turn off reads for 7/1/1 (unidirectional) interfaces
- * and buggy bidirectional printers. */
+ /*
+ * Turn off reads for USB_CLASS_PRINTER/1/1 (unidirectional)
+ * interfaces and buggy bidirectional printers.
+ */
if (ifd->desc.bInterfaceProtocol == 1) {
epread = NULL;
} else if (usblp->quirks & USBLP_QUIRK_BIDIR) {
@@ -1406,12 +1424,12 @@ static int usblp_resume(struct usb_interface *intf)
}
static const struct usb_device_id usblp_ids[] = {
- { USB_DEVICE_INFO(7, 1, 1) },
- { USB_DEVICE_INFO(7, 1, 2) },
- { USB_DEVICE_INFO(7, 1, 3) },
- { USB_INTERFACE_INFO(7, 1, 1) },
- { USB_INTERFACE_INFO(7, 1, 2) },
- { USB_INTERFACE_INFO(7, 1, 3) },
+ { USB_DEVICE_INFO(USB_CLASS_PRINTER, 1, 1) },
+ { USB_DEVICE_INFO(USB_CLASS_PRINTER, 1, 2) },
+ { USB_DEVICE_INFO(USB_CLASS_PRINTER, 1, 3) },
+ { USB_INTERFACE_INFO(USB_CLASS_PRINTER, 1, 1) },
+ { USB_INTERFACE_INFO(USB_CLASS_PRINTER, 1, 2) },
+ { USB_INTERFACE_INFO(USB_CLASS_PRINTER, 1, 3) },
{ USB_DEVICE(0x04b8, 0x0202) }, /* Seiko Epson Receipt Printer M129C */
{ } /* Terminating entry */
};
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index b530fd403ffb..9e39286a4e5a 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -154,6 +154,62 @@ bool of_usb_host_tpl_support(struct device_node *np)
return false;
}
EXPORT_SYMBOL_GPL(of_usb_host_tpl_support);
+
+/**
+ * of_usb_update_otg_caps - to update usb otg capabilities according to
+ * the passed properties in DT.
+ * @np: Pointer to the given device_node
+ * @otg_caps: Pointer to the target usb_otg_caps to be set
+ *
+ * The function updates the otg capabilities
+ */
+int of_usb_update_otg_caps(struct device_node *np,
+ struct usb_otg_caps *otg_caps)
+{
+ u32 otg_rev;
+
+ if (!otg_caps)
+ return -EINVAL;
+
+ if (!of_property_read_u32(np, "otg-rev", &otg_rev)) {
+ switch (otg_rev) {
+ case 0x0100:
+ case 0x0120:
+ case 0x0130:
+ case 0x0200:
+ /* Choose the lesser one if it's already been set */
+ if (otg_caps->otg_rev)
+ otg_caps->otg_rev = min_t(u16, otg_rev,
+ otg_caps->otg_rev);
+ else
+ otg_caps->otg_rev = otg_rev;
+ break;
+ default:
+ pr_err("%s: unsupported otg-rev: 0x%x\n",
+ np->full_name, otg_rev);
+ return -EINVAL;
+ }
+ } else {
+ /*
+ * otg-rev is mandatory for otg properties, if not passed
+ * we set it to be 0 and assume it's a legacy otg device.
+ * Non-dt platform can set it afterwards.
+ */
+ otg_caps->otg_rev = 0;
+ }
+
+ if (of_find_property(np, "hnp-disable", NULL))
+ otg_caps->hnp_support = false;
+ if (of_find_property(np, "srp-disable", NULL))
+ otg_caps->srp_support = false;
+ if (of_find_property(np, "adp-disable", NULL) ||
+ (otg_caps->otg_rev < 0x0200))
+ otg_caps->adp_support = false;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_usb_update_otg_caps);
+
#endif
MODULE_LICENSE("GPL");
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 0e6f968e93fe..01c0c0477a9e 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -242,7 +242,7 @@ static int __init ulpi_init(void)
{
return bus_register(&ulpi_bus);
}
-module_init(ulpi_init);
+subsys_initcall(ulpi_init);
static void __exit ulpi_exit(void)
{
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 986abde07683..38ae877c46e3 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -103,7 +103,7 @@ MODULE_PARM_DESC(usbfs_snoop, "true to log all usbfs traffic");
#define snoop(dev, format, arg...) \
do { \
if (usbfs_snoop) \
- dev_info(dev , format , ## arg); \
+ dev_info(dev, format, ## arg); \
} while (0)
enum snoop_when {
@@ -1082,7 +1082,8 @@ static int proc_bulk(struct usb_dev_state *ps, void __user *arg)
ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
if (ret)
return ret;
- if (!(tbuf = kmalloc(len1, GFP_KERNEL))) {
+ tbuf = kmalloc(len1, GFP_KERNEL);
+ if (!tbuf) {
ret = -ENOMEM;
goto done;
}
@@ -1224,7 +1225,8 @@ static int proc_setintf(struct usb_dev_state *ps, void __user *arg)
if (copy_from_user(&setintf, arg, sizeof(setintf)))
return -EFAULT;
- if ((ret = checkintf(ps, setintf.interface)))
+ ret = checkintf(ps, setintf.interface);
+ if (ret)
return ret;
destroy_async_on_interface(ps, setintf.interface);
@@ -1319,7 +1321,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
is_in = (uurb->endpoint & USB_ENDPOINT_DIR_MASK) != 0;
u = 0;
- switch(uurb->type) {
+ switch (uurb->type) {
case USBDEVFS_URB_TYPE_CONTROL:
if (!usb_endpoint_xfer_control(&ep->desc))
return -EINVAL;
@@ -1393,7 +1395,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
number_of_packets = uurb->number_of_packets;
isofrmlen = sizeof(struct usbdevfs_iso_packet_desc) *
number_of_packets;
- if (!(isopkt = kmalloc(isofrmlen, GFP_KERNEL)))
+ isopkt = kmalloc(isofrmlen, GFP_KERNEL);
+ if (!isopkt)
return -ENOMEM;
if (copy_from_user(isopkt, iso_frame_desc, isofrmlen)) {
ret = -EFAULT;
@@ -1904,7 +1907,8 @@ static int proc_releaseinterface(struct usb_dev_state *ps, void __user *arg)
if (get_user(ifnum, (unsigned int __user *)arg))
return -EFAULT;
- if ((ret = releaseintf(ps, ifnum)) < 0)
+ ret = releaseintf(ps, ifnum);
+ if (ret < 0)
return ret;
destroy_async_on_interface (ps, ifnum);
return 0;
@@ -1919,7 +1923,8 @@ static int proc_ioctl(struct usb_dev_state *ps, struct usbdevfs_ioctl *ctl)
struct usb_driver *driver = NULL;
/* alloc buffer */
- if ((size = _IOC_SIZE(ctl->ioctl_code)) > 0) {
+ size = _IOC_SIZE(ctl->ioctl_code);
+ if (size > 0) {
buf = kmalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 818369afff63..6b5063e7943f 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -160,6 +160,7 @@ static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
spin_lock(&usb_driver->dynids.lock);
list_for_each_entry_safe(dynid, n, &usb_driver->dynids.list, node) {
struct usb_device_id *id = &dynid->id;
+
if ((id->idVendor == idVendor) &&
(id->idProduct == idProduct)) {
list_del(&dynid->node);
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 39a24021fe4d..101983b7e8d2 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -51,7 +51,7 @@ static ssize_t wMaxPacketSize_show(struct device *dev,
{
struct ep_device *ep = to_ep_device(dev);
return sprintf(buf, "%04x\n",
- usb_endpoint_maxp(ep->desc) & 0x07ff);
+ usb_endpoint_maxp(ep->desc) & 0x07ff);
}
static DEVICE_ATTR_RO(wMaxPacketSize);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index be5b2074f906..4d64e5c499e1 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1022,9 +1022,12 @@ static int register_root_hub(struct usb_hcd *hcd)
dev_name(&usb_dev->dev), retval);
return (retval < 0) ? retval : -EMSGSIZE;
}
- if (usb_dev->speed == USB_SPEED_SUPER) {
+
+ if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) {
retval = usb_get_bos_descriptor(usb_dev);
- if (retval < 0) {
+ if (!retval) {
+ usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
+ } else if (usb_dev->speed == USB_SPEED_SUPER) {
mutex_unlock(&usb_bus_list_lock);
dev_dbg(parent_dev, "can't read %s bos descriptor %d\n",
dev_name(&usb_dev->dev), retval);
@@ -2683,12 +2686,14 @@ int usb_add_hcd(struct usb_hcd *hcd,
* bottom up so that hcds can customize the root hubs before hub_wq
* starts talking to them. (Note, bus id is assigned early too.)
*/
- if ((retval = hcd_buffer_create(hcd)) != 0) {
+ retval = hcd_buffer_create(hcd);
+ if (retval != 0) {
dev_dbg(hcd->self.controller, "pool alloc failed\n");
goto err_create_buf;
}
- if ((retval = usb_register_bus(&hcd->self)) < 0)
+ retval = usb_register_bus(&hcd->self);
+ if (retval < 0)
goto err_register_bus;
rhdev = usb_alloc_dev(NULL, &hcd->self, 0);
@@ -2734,9 +2739,13 @@ int usb_add_hcd(struct usb_hcd *hcd,
/* "reset" is misnamed; its role is now one-time init. the controller
* should already have been reset (and boot firmware kicked off etc).
*/
- if (hcd->driver->reset && (retval = hcd->driver->reset(hcd)) < 0) {
- dev_err(hcd->self.controller, "can't setup: %d\n", retval);
- goto err_hcd_driver_setup;
+ if (hcd->driver->reset) {
+ retval = hcd->driver->reset(hcd);
+ if (retval < 0) {
+ dev_err(hcd->self.controller, "can't setup: %d\n",
+ retval);
+ goto err_hcd_driver_setup;
+ }
}
hcd->rh_pollable = 1;
@@ -2766,7 +2775,8 @@ int usb_add_hcd(struct usb_hcd *hcd,
}
/* starting here, usbcore will pay attention to this root hub */
- if ((retval = register_root_hub(hcd)) != 0)
+ retval = register_root_hub(hcd);
+ if (retval != 0)
goto err_register_root_hub;
retval = sysfs_create_group(&rhdev->dev.kobj, &usb_bus_attr_group);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 43cb2f2e3b43..431839bd291f 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -50,8 +50,8 @@ DEFINE_MUTEX(usb_port_peer_mutex);
/* cycle leds on hubs that aren't blinking for attention */
static bool blinkenlights = 0;
-module_param (blinkenlights, bool, S_IRUGO);
-MODULE_PARM_DESC (blinkenlights, "true to cycle leds on hubs");
+module_param(blinkenlights, bool, S_IRUGO);
+MODULE_PARM_DESC(blinkenlights, "true to cycle leds on hubs");
/*
* Device SATA8000 FW1.0 from DATAST0R Technology Corp requires about
@@ -122,7 +122,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
return usb_get_intfdata(hdev->actconfig->interface[0]);
}
-static int usb_device_supports_lpm(struct usb_device *udev)
+int usb_device_supports_lpm(struct usb_device *udev)
{
/* USB 2.1 (and greater) devices indicate LPM support through
* their USB 2.0 Extended Capabilities BOS descriptor.
@@ -439,7 +439,7 @@ static void set_port_led(struct usb_hub *hub, int port1, int selector)
#define LED_CYCLE_PERIOD ((2*HZ)/3)
-static void led_work (struct work_struct *work)
+static void led_work(struct work_struct *work)
{
struct usb_hub *hub =
container_of(work, struct usb_hub, leds.work);
@@ -646,7 +646,7 @@ static void hub_irq(struct urb *urb)
default: /* presumably an error */
/* Cause a hub reset after 10 consecutive errors */
- dev_dbg (hub->intfdev, "transfer --> %d\n", status);
+ dev_dbg(hub->intfdev, "transfer --> %d\n", status);
if ((++hub->nerrors < 10) || hub->error)
goto resubmit;
hub->error = status;
@@ -671,14 +671,14 @@ resubmit:
if (hub->quiescing)
return;
- if ((status = usb_submit_urb (hub->urb, GFP_ATOMIC)) != 0
- && status != -ENODEV && status != -EPERM)
- dev_err (hub->intfdev, "resubmit --> %d\n", status);
+ status = usb_submit_urb(hub->urb, GFP_ATOMIC);
+ if (status != 0 && status != -ENODEV && status != -EPERM)
+ dev_err(hub->intfdev, "resubmit --> %d\n", status);
}
/* USB 2.0 spec Section 11.24.2.3 */
static inline int
-hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
+hub_clear_tt_buffer(struct usb_device *hdev, u16 devinfo, u16 tt)
{
/* Need to clear both directions for control ep */
if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) ==
@@ -706,7 +706,7 @@ static void hub_tt_work(struct work_struct *work)
container_of(work, struct usb_hub, tt.clear_work);
unsigned long flags;
- spin_lock_irqsave (&hub->tt.lock, flags);
+ spin_lock_irqsave(&hub->tt.lock, flags);
while (!list_empty(&hub->tt.clear_list)) {
struct list_head *next;
struct usb_tt_clear *clear;
@@ -715,14 +715,14 @@ static void hub_tt_work(struct work_struct *work)
int status;
next = hub->tt.clear_list.next;
- clear = list_entry (next, struct usb_tt_clear, clear_list);
- list_del (&clear->clear_list);
+ clear = list_entry(next, struct usb_tt_clear, clear_list);
+ list_del(&clear->clear_list);
/* drop lock so HCD can concurrently report other TT errors */
- spin_unlock_irqrestore (&hub->tt.lock, flags);
- status = hub_clear_tt_buffer (hdev, clear->devinfo, clear->tt);
+ spin_unlock_irqrestore(&hub->tt.lock, flags);
+ status = hub_clear_tt_buffer(hdev, clear->devinfo, clear->tt);
if (status && status != -ENODEV)
- dev_err (&hdev->dev,
+ dev_err(&hdev->dev,
"clear tt %d (%04x) error %d\n",
clear->tt, clear->devinfo, status);
@@ -734,7 +734,7 @@ static void hub_tt_work(struct work_struct *work)
kfree(clear);
spin_lock_irqsave(&hub->tt.lock, flags);
}
- spin_unlock_irqrestore (&hub->tt.lock, flags);
+ spin_unlock_irqrestore(&hub->tt.lock, flags);
}
/**
@@ -797,7 +797,7 @@ int usb_hub_clear_tt_buffer(struct urb *urb)
*/
clear = kmalloc(sizeof *clear, GFP_ATOMIC);
if (clear == NULL) {
- dev_err (&udev->dev, "can't save CLEAR_TT_BUFFER state\n");
+ dev_err(&udev->dev, "can't save CLEAR_TT_BUFFER state\n");
/* FIXME recover somehow ... RESET_TT? */
return -ENOMEM;
}
@@ -806,10 +806,10 @@ int usb_hub_clear_tt_buffer(struct urb *urb)
clear->tt = tt->multi ? udev->ttport : 1;
clear->devinfo = usb_pipeendpoint (pipe);
clear->devinfo |= udev->devnum << 4;
- clear->devinfo |= usb_pipecontrol (pipe)
+ clear->devinfo |= usb_pipecontrol(pipe)
? (USB_ENDPOINT_XFER_CONTROL << 11)
: (USB_ENDPOINT_XFER_BULK << 11);
- if (usb_pipein (pipe))
+ if (usb_pipein(pipe))
clear->devinfo |= 1 << 15;
/* info for completion callback */
@@ -817,10 +817,10 @@ int usb_hub_clear_tt_buffer(struct urb *urb)
clear->ep = urb->ep;
/* tell keventd to clear state for this TT */
- spin_lock_irqsave (&tt->lock, flags);
- list_add_tail (&clear->clear_list, &tt->clear_list);
+ spin_lock_irqsave(&tt->lock, flags);
+ list_add_tail(&clear->clear_list, &tt->clear_list);
schedule_work(&tt->clear_work);
- spin_unlock_irqrestore (&tt->lock, flags);
+ spin_unlock_irqrestore(&tt->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(usb_hub_clear_tt_buffer);
@@ -1442,8 +1442,8 @@ static int hub_configure(struct usb_hub *hub,
break;
}
- spin_lock_init (&hub->tt.lock);
- INIT_LIST_HEAD (&hub->tt.clear_list);
+ spin_lock_init(&hub->tt.lock);
+ INIT_LIST_HEAD(&hub->tt.clear_list);
INIT_WORK(&hub->tt.clear_work, hub_tt_work);
switch (hdev->descriptor.bDeviceProtocol) {
case USB_HUB_PR_FS:
@@ -1632,7 +1632,7 @@ static int hub_configure(struct usb_hub *hub,
return 0;
fail:
- dev_err (hub_dev, "config failed, %s (err %d)\n",
+ dev_err(hub_dev, "config failed, %s (err %d)\n",
message, ret);
/* hub_disconnect() frees urb and descriptor */
return ret;
@@ -1775,7 +1775,7 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
if ((desc->desc.bInterfaceSubClass != 0) &&
(desc->desc.bInterfaceSubClass != 1)) {
descriptor_error:
- dev_err (&intf->dev, "bad descriptor, ignoring hub\n");
+ dev_err(&intf->dev, "bad descriptor, ignoring hub\n");
return -EIO;
}
@@ -1790,11 +1790,11 @@ descriptor_error:
goto descriptor_error;
/* We found a hub */
- dev_info (&intf->dev, "USB hub found\n");
+ dev_info(&intf->dev, "USB hub found\n");
hub = kzalloc(sizeof(*hub), GFP_KERNEL);
if (!hub) {
- dev_dbg (&intf->dev, "couldn't kmalloc hub struct\n");
+ dev_dbg(&intf->dev, "couldn't kmalloc hub struct\n");
return -ENOMEM;
}
@@ -1807,7 +1807,7 @@ descriptor_error:
usb_get_intf(intf);
usb_get_dev(hdev);
- usb_set_intfdata (intf, hub);
+ usb_set_intfdata(intf, hub);
intf->needs_remote_wakeup = 1;
pm_suspend_ignore_children(&intf->dev, true);
@@ -1820,14 +1820,14 @@ descriptor_error:
if (hub_configure(hub, endpoint) >= 0)
return 0;
- hub_disconnect (intf);
+ hub_disconnect(intf);
return -ENODEV;
}
static int
hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data)
{
- struct usb_device *hdev = interface_to_usbdev (intf);
+ struct usb_device *hdev = interface_to_usbdev(intf);
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
/* assert ifno == 0 (part of hub spec) */
@@ -2143,7 +2143,7 @@ void usb_disconnect(struct usb_device **pdev)
* cleaning up all state associated with the current configuration
* so that the hardware is now fully quiesced.
*/
- dev_dbg (&udev->dev, "unregistering device\n");
+ dev_dbg(&udev->dev, "unregistering device\n");
usb_disable_device(udev, 0);
usb_hcd_synchronize_unlinks(udev);
@@ -2242,7 +2242,7 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
struct usb_bus *bus = udev->bus;
/* descriptor may appear anywhere in config */
- if (__usb_get_extra_descriptor (udev->rawdescriptors[0],
+ if (__usb_get_extra_descriptor(udev->rawdescriptors[0],
le16_to_cpu(udev->config[0].desc.wTotalLength),
USB_DT_OTG, (void **) &desc) == 0) {
if (desc->bmAttributes & USB_OTG_HNP) {
@@ -3526,7 +3526,7 @@ static int check_ports_changed(struct usb_hub *hub)
static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
{
- struct usb_hub *hub = usb_get_intfdata (intf);
+ struct usb_hub *hub = usb_get_intfdata(intf);
struct usb_device *hdev = hub->hdev;
unsigned port1;
int status;
@@ -3950,6 +3950,8 @@ int usb_disable_lpm(struct usb_device *udev)
if (usb_disable_link_state(hcd, udev, USB3_LPM_U2))
goto enable_lpm;
+ udev->usb3_lpm_enabled = 0;
+
return 0;
enable_lpm:
@@ -4007,6 +4009,8 @@ void usb_enable_lpm(struct usb_device *udev)
usb_enable_link_state(hcd, udev, USB3_LPM_U1);
usb_enable_link_state(hcd, udev, USB3_LPM_U2);
+
+ udev->usb3_lpm_enabled = 1;
}
EXPORT_SYMBOL_GPL(usb_enable_lpm);
diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
index a6315abe7b7c..a95b0c989c21 100644
--- a/drivers/usb/core/otg_whitelist.h
+++ b/drivers/usb/core/otg_whitelist.h
@@ -16,7 +16,7 @@
* YOU _SHOULD_ CHANGE THIS LIST TO MATCH YOUR PRODUCT AND ITS TESTING!
*/
-static struct usb_device_id whitelist_table [] = {
+static struct usb_device_id whitelist_table[] = {
/* hubs are optional in OTG, but very handy ... */
{ USB_DEVICE_INFO(USB_CLASS_HUB, 0, 0), },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index d26973844a4d..cfc68c11c3f5 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -531,6 +531,25 @@ static ssize_t usb2_lpm_besl_store(struct device *dev,
}
static DEVICE_ATTR_RW(usb2_lpm_besl);
+static ssize_t usb3_hardware_lpm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_device *udev = to_usb_device(dev);
+ const char *p;
+
+ usb_lock_device(udev);
+
+ if (udev->usb3_lpm_enabled)
+ p = "enabled";
+ else
+ p = "disabled";
+
+ usb_unlock_device(udev);
+
+ return sprintf(buf, "%s\n", p);
+}
+static DEVICE_ATTR_RO(usb3_hardware_lpm);
+
static struct attribute *usb2_hardware_lpm_attr[] = {
&dev_attr_usb2_hardware_lpm.attr,
&dev_attr_usb2_lpm_l1_timeout.attr,
@@ -542,6 +561,15 @@ static struct attribute_group usb2_hardware_lpm_attr_group = {
.attrs = usb2_hardware_lpm_attr,
};
+static struct attribute *usb3_hardware_lpm_attr[] = {
+ &dev_attr_usb3_hardware_lpm.attr,
+ NULL,
+};
+static struct attribute_group usb3_hardware_lpm_attr_group = {
+ .name = power_group_name,
+ .attrs = usb3_hardware_lpm_attr,
+};
+
static struct attribute *power_attrs[] = {
&dev_attr_autosuspend.attr,
&dev_attr_level.attr,
@@ -564,6 +592,9 @@ static int add_power_attributes(struct device *dev)
if (udev->usb2_hw_lpm_capable == 1)
rc = sysfs_merge_group(&dev->kobj,
&usb2_hardware_lpm_attr_group);
+ if (udev->lpm_capable == 1)
+ rc = sysfs_merge_group(&dev->kobj,
+ &usb3_hardware_lpm_attr_group);
}
return rc;
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 7eb1e26798e5..457255a3306a 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -65,6 +65,7 @@ extern int usb_hub_init(void);
extern void usb_hub_cleanup(void);
extern int usb_major_init(void);
extern void usb_major_cleanup(void);
+extern int usb_device_supports_lpm(struct usb_device *udev);
#ifdef CONFIG_PM
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index e5b546f1152e..b00fe9539184 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -72,17 +72,7 @@ static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "%s\n", __func__);
/* Backup Host regs */
- hr = hsotg->hr_backup;
- if (!hr) {
- hr = devm_kzalloc(hsotg->dev, sizeof(*hr), GFP_KERNEL);
- if (!hr) {
- dev_err(hsotg->dev, "%s: can't allocate host regs\n",
- __func__);
- return -ENOMEM;
- }
-
- hsotg->hr_backup = hr;
- }
+ hr = &hsotg->hr_backup;
hr->hcfg = readl(hsotg->regs + HCFG);
hr->haintmsk = readl(hsotg->regs + HAINTMSK);
for (i = 0; i < hsotg->core_params->host_channels; ++i)
@@ -90,6 +80,7 @@ static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
hr->hprt0 = readl(hsotg->regs + HPRT0);
hr->hfir = readl(hsotg->regs + HFIR);
+ hr->valid = true;
return 0;
}
@@ -109,12 +100,13 @@ static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "%s\n", __func__);
/* Restore host regs */
- hr = hsotg->hr_backup;
- if (!hr) {
+ hr = &hsotg->hr_backup;
+ if (!hr->valid) {
dev_err(hsotg->dev, "%s: no host registers to restore\n",
__func__);
return -EINVAL;
}
+ hr->valid = false;
writel(hr->hcfg, hsotg->regs + HCFG);
writel(hr->haintmsk, hsotg->regs + HAINTMSK);
@@ -152,17 +144,7 @@ static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "%s\n", __func__);
/* Backup dev regs */
- dr = hsotg->dr_backup;
- if (!dr) {
- dr = devm_kzalloc(hsotg->dev, sizeof(*dr), GFP_KERNEL);
- if (!dr) {
- dev_err(hsotg->dev, "%s: can't allocate device regs\n",
- __func__);
- return -ENOMEM;
- }
-
- hsotg->dr_backup = dr;
- }
+ dr = &hsotg->dr_backup;
dr->dcfg = readl(hsotg->regs + DCFG);
dr->dctl = readl(hsotg->regs + DCTL);
@@ -195,7 +177,7 @@ static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
dr->doeptsiz[i] = readl(hsotg->regs + DOEPTSIZ(i));
dr->doepdma[i] = readl(hsotg->regs + DOEPDMA(i));
}
-
+ dr->valid = true;
return 0;
}
@@ -215,12 +197,13 @@ static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "%s\n", __func__);
/* Restore dev regs */
- dr = hsotg->dr_backup;
- if (!dr) {
+ dr = &hsotg->dr_backup;
+ if (!dr->valid) {
dev_err(hsotg->dev, "%s: no device registers to restore\n",
__func__);
return -EINVAL;
}
+ dr->valid = false;
writel(dr->dcfg, hsotg->regs + DCFG);
writel(dr->dctl, hsotg->regs + DCTL);
@@ -268,17 +251,7 @@ static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
int i;
/* Backup global regs */
- gr = hsotg->gr_backup;
- if (!gr) {
- gr = devm_kzalloc(hsotg->dev, sizeof(*gr), GFP_KERNEL);
- if (!gr) {
- dev_err(hsotg->dev, "%s: can't allocate global regs\n",
- __func__);
- return -ENOMEM;
- }
-
- hsotg->gr_backup = gr;
- }
+ gr = &hsotg->gr_backup;
gr->gotgctl = readl(hsotg->regs + GOTGCTL);
gr->gintmsk = readl(hsotg->regs + GINTMSK);
@@ -291,6 +264,7 @@ static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
for (i = 0; i < MAX_EPS_CHANNELS; i++)
gr->dtxfsiz[i] = readl(hsotg->regs + DPTXFSIZN(i));
+ gr->valid = true;
return 0;
}
@@ -309,12 +283,13 @@ static int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "%s\n", __func__);
/* Restore global regs */
- gr = hsotg->gr_backup;
- if (!gr) {
+ gr = &hsotg->gr_backup;
+ if (!gr->valid) {
dev_err(hsotg->dev, "%s: no global registers to restore\n",
__func__);
return -EINVAL;
}
+ gr->valid = false;
writel(0xffffffff, hsotg->regs + GINTSTS);
writel(gr->gotgctl, hsotg->regs + GOTGCTL);
@@ -3200,7 +3175,7 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
hw->hs_phy_type);
dev_dbg(hsotg->dev, " fs_phy_type=%d\n",
hw->fs_phy_type);
- dev_dbg(hsotg->dev, " utmi_phy_data_wdith=%d\n",
+ dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n",
hw->utmi_phy_data_width);
dev_dbg(hsotg->dev, " num_dev_ep=%d\n",
hw->num_dev_ep);
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 53b8de03f102..0ed87620941b 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -492,6 +492,7 @@ struct dwc2_gregs_backup {
u32 gdfifocfg;
u32 dtxfsiz[MAX_EPS_CHANNELS];
u32 gpwrdn;
+ bool valid;
};
/**
@@ -521,6 +522,7 @@ struct dwc2_dregs_backup {
u32 doepctl[MAX_EPS_CHANNELS];
u32 doeptsiz[MAX_EPS_CHANNELS];
u32 doepdma[MAX_EPS_CHANNELS];
+ bool valid;
};
/**
@@ -538,6 +540,7 @@ struct dwc2_hregs_backup {
u32 hcintmsk[MAX_EPS_CHANNELS];
u32 hprt0;
u32 hfir;
+ bool valid;
};
/**
@@ -705,9 +708,9 @@ struct dwc2_hsotg {
struct work_struct wf_otg;
struct timer_list wkp_timer;
enum dwc2_lx_state lx_state;
- struct dwc2_gregs_backup *gr_backup;
- struct dwc2_dregs_backup *dr_backup;
- struct dwc2_hregs_backup *hr_backup;
+ struct dwc2_gregs_backup gr_backup;
+ struct dwc2_dregs_backup dr_backup;
+ struct dwc2_hregs_backup hr_backup;
struct dentry *debug_root;
struct debugfs_regset32 *regset;
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 4d47b7c09238..3ee5b4c77a1f 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -2880,7 +2880,7 @@ static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value)
epctl = readl(hs->regs + epreg);
if (value) {
- epctl |= DXEPCTL_STALL + DXEPCTL_SNAK;
+ epctl |= DXEPCTL_STALL | DXEPCTL_SNAK;
if (epctl & DXEPCTL_EPENA)
epctl |= DXEPCTL_EPDIS;
} else {
@@ -3289,6 +3289,19 @@ static void s3c_hsotg_initep(struct dwc2_hsotg *hsotg,
usb_ep_set_maxpacket_limit(&hs_ep->ep, epnum ? 1024 : EP0_MPS_LIMIT);
hs_ep->ep.ops = &s3c_hsotg_ep_ops;
+ if (epnum == 0) {
+ hs_ep->ep.caps.type_control = true;
+ } else {
+ hs_ep->ep.caps.type_iso = true;
+ hs_ep->ep.caps.type_bulk = true;
+ hs_ep->ep.caps.type_int = true;
+ }
+
+ if (dir_in)
+ hs_ep->ep.caps.dir_in = true;
+ else
+ hs_ep->ep.caps.dir_out = true;
+
/*
* if we're using dma, we need to set the next-endpoint pointer
* to be something valid.
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index b10377c65064..f845c41fe9e5 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -359,10 +359,9 @@ void dwc2_hcd_stop(struct dwc2_hsotg *hsotg)
/* Caller must hold driver lock */
static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
- struct dwc2_hcd_urb *urb, void **ep_handle,
- gfp_t mem_flags)
+ struct dwc2_hcd_urb *urb, struct dwc2_qh *qh,
+ struct dwc2_qtd *qtd)
{
- struct dwc2_qtd *qtd;
u32 intr_mask;
int retval;
int dev_speed;
@@ -386,18 +385,15 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
return -ENODEV;
}
- qtd = kzalloc(sizeof(*qtd), mem_flags);
if (!qtd)
- return -ENOMEM;
+ return -EINVAL;
dwc2_hcd_qtd_init(qtd, urb);
- retval = dwc2_hcd_qtd_add(hsotg, qtd, (struct dwc2_qh **)ep_handle,
- mem_flags);
+ retval = dwc2_hcd_qtd_add(hsotg, qtd, qh);
if (retval) {
dev_err(hsotg->dev,
"DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n",
retval);
- kfree(qtd);
return retval;
}
@@ -2445,6 +2441,9 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
u32 tflags = 0;
void *buf;
unsigned long flags;
+ struct dwc2_qh *qh;
+ bool qh_allocated = false;
+ struct dwc2_qtd *qtd;
if (dbg_urb(urb)) {
dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n");
@@ -2523,15 +2522,32 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
urb->iso_frame_desc[i].length);
urb->hcpriv = dwc2_urb;
+ qh = (struct dwc2_qh *) ep->hcpriv;
+ /* Create QH for the endpoint if it doesn't exist */
+ if (!qh) {
+ qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, mem_flags);
+ if (!qh) {
+ retval = -ENOMEM;
+ goto fail0;
+ }
+ ep->hcpriv = qh;
+ qh_allocated = true;
+ }
+
+ qtd = kzalloc(sizeof(*qtd), mem_flags);
+ if (!qtd) {
+ retval = -ENOMEM;
+ goto fail1;
+ }
spin_lock_irqsave(&hsotg->lock, flags);
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval)
- goto fail1;
+ goto fail2;
- retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, &ep->hcpriv, mem_flags);
+ retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd);
if (retval)
- goto fail2;
+ goto fail3;
if (alloc_bandwidth) {
dwc2_allocate_bus_bandwidth(hcd,
@@ -2543,12 +2559,25 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
return 0;
-fail2:
+fail3:
dwc2_urb->priv = NULL;
usb_hcd_unlink_urb_from_ep(hcd, urb);
-fail1:
+fail2:
spin_unlock_irqrestore(&hsotg->lock, flags);
urb->hcpriv = NULL;
+ kfree(qtd);
+fail1:
+ if (qh_allocated) {
+ struct dwc2_qtd *qtd2, *qtd2_tmp;
+
+ ep->hcpriv = NULL;
+ dwc2_hcd_qh_unlink(hsotg, qh);
+ /* Free each QTD in the QH's QTD list */
+ list_for_each_entry_safe(qtd2, qtd2_tmp, &qh->qtd_list,
+ qtd_list_entry)
+ dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh);
+ dwc2_hcd_qh_free(hsotg, qh);
+ }
fail0:
kfree(dwc2_urb);
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 7b5841c40033..fc1054965552 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -463,6 +463,9 @@ extern void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
/* Schedule Queue Functions */
/* Implemented in hcd_queue.c */
extern void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg);
+extern struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
+ struct dwc2_hcd_urb *urb,
+ gfp_t mem_flags);
extern void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
extern int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
extern void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
@@ -471,7 +474,7 @@ extern void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
extern void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb);
extern int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
- struct dwc2_qh **qh, gfp_t mem_flags);
+ struct dwc2_qh *qh);
/* Unlinks and frees a QTD */
static inline void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg,
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 9b5c36256627..3ad63d392e13 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -191,7 +191,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
*
* Return: Pointer to the newly allocated QH, or NULL on error
*/
-static struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
+struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
struct dwc2_hcd_urb *urb,
gfp_t mem_flags)
{
@@ -767,57 +767,32 @@ void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
*
* @hsotg: The DWC HCD structure
* @qtd: The QTD to add
- * @qh: Out parameter to return queue head
- * @atomic_alloc: Flag to do atomic alloc if needed
+ * @qh: Queue head to add qtd to
*
* Return: 0 if successful, negative error code otherwise
*
- * Finds the correct QH to place the QTD into. If it does not find a QH, it
- * will create a new QH. If the QH to which the QTD is added is not currently
- * scheduled, it is placed into the proper schedule based on its EP type.
+ * If the QH to which the QTD is added is not currently scheduled, it is placed
+ * into the proper schedule based on its EP type.
*/
int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
- struct dwc2_qh **qh, gfp_t mem_flags)
+ struct dwc2_qh *qh)
{
- struct dwc2_hcd_urb *urb = qtd->urb;
- int allocated = 0;
int retval;
- /*
- * Get the QH which holds the QTD-list to insert to. Create QH if it
- * doesn't exist.
- */
- if (*qh == NULL) {
- *qh = dwc2_hcd_qh_create(hsotg, urb, mem_flags);
- if (*qh == NULL)
- return -ENOMEM;
- allocated = 1;
+ if (unlikely(!qh)) {
+ dev_err(hsotg->dev, "%s: Invalid QH\n", __func__);
+ retval = -EINVAL;
+ goto fail;
}
- retval = dwc2_hcd_qh_add(hsotg, *qh);
+ retval = dwc2_hcd_qh_add(hsotg, qh);
if (retval)
goto fail;
- qtd->qh = *qh;
- list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list);
+ qtd->qh = qh;
+ list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);
return 0;
-
fail:
- if (allocated) {
- struct dwc2_qtd *qtd2, *qtd2_tmp;
- struct dwc2_qh *qh_tmp = *qh;
-
- *qh = NULL;
- dwc2_hcd_qh_unlink(hsotg, qh_tmp);
-
- /* Free each QTD in the QH's QTD list */
- list_for_each_entry_safe(qtd2, qtd2_tmp, &qh_tmp->qtd_list,
- qtd_list_entry)
- dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh_tmp);
-
- dwc2_hcd_qh_free(hsotg, qh_tmp);
- }
-
return retval;
}
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index dede32e809b6..5a42c4590402 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -104,11 +104,4 @@ config USB_DWC3_QCOM
Recent Qualcomm SoCs ship with one DesignWare Core USB3 IP inside,
say 'Y' or 'M' if you have one such device.
-comment "Debugging features"
-
-config USB_DWC3_DEBUG
- bool "Enable Debugging Messages"
- help
- Say Y here to enable debugging messages on DWC3 Driver.
-
endif
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index c7076e37c4ed..acc951d46c27 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -1,8 +1,6 @@
# define_trace.h needs to know how to find our header
CFLAGS_trace.o := -I$(src)
-ccflags-$(CONFIG_USB_DWC3_DEBUG) := -DDEBUG
-
obj-$(CONFIG_USB_DWC3) += dwc3.o
dwc3-y := core.o debug.o trace.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 5c110d8e293b..064123e44566 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -446,15 +446,15 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
/* Select the HS PHY interface */
switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) {
case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI:
- if (!strncmp(dwc->hsphy_interface, "utmi", 4)) {
+ if (dwc->hsphy_interface &&
+ !strncmp(dwc->hsphy_interface, "utmi", 4)) {
reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI;
break;
- } else if (!strncmp(dwc->hsphy_interface, "ulpi", 4)) {
+ } else if (dwc->hsphy_interface &&
+ !strncmp(dwc->hsphy_interface, "ulpi", 4)) {
reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
} else {
- dev_warn(dwc->dev, "HSPHY Interface not defined\n");
-
/* Relying on default value. */
if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI))
break;
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 7bd0a95b2815..dd5cb5577dca 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -145,7 +145,7 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
exynos->susp_clk = devm_clk_get(dev, "usbdrd30_susp_clk");
if (IS_ERR(exynos->susp_clk)) {
- dev_dbg(dev, "no suspend clk specified\n");
+ dev_info(dev, "no suspend clk specified\n");
exynos->susp_clk = NULL;
}
clk_prepare_enable(exynos->susp_clk);
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index fe3b9335a74e..2be268d2423d 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -115,7 +115,7 @@ static int kdwc3_probe(struct platform_device *pdev)
error = clk_prepare_enable(kdwc->clk);
if (error < 0) {
- dev_dbg(kdwc->dev, "unable to enable usb clock, err %d\n",
+ dev_err(kdwc->dev, "unable to enable usb clock, error %d\n",
error);
return error;
}
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 6b486a36863c..a5a1b7c45743 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -128,8 +128,7 @@ struct dwc3_omap {
u32 dma_status:1;
- struct extcon_specific_cable_nb extcon_vbus_dev;
- struct extcon_specific_cable_nb extcon_id_dev;
+ struct extcon_dev *edev;
struct notifier_block vbus_nb;
struct notifier_block id_nb;
@@ -225,12 +224,10 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
switch (status) {
case OMAP_DWC3_ID_GROUND:
- dev_dbg(omap->dev, "ID GND\n");
-
if (omap->vbus_reg) {
ret = regulator_enable(omap->vbus_reg);
if (ret) {
- dev_dbg(omap->dev, "regulator enable failed\n");
+ dev_err(omap->dev, "regulator enable failed\n");
return;
}
}
@@ -245,8 +242,6 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
break;
case OMAP_DWC3_VBUS_VALID:
- dev_dbg(omap->dev, "VBUS Connect\n");
-
val = dwc3_omap_read_utmi_ctrl(omap);
val &= ~USBOTGSS_UTMI_OTG_CTRL_SESSEND;
val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG
@@ -261,8 +256,6 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
regulator_disable(omap->vbus_reg);
case OMAP_DWC3_VBUS_OFF:
- dev_dbg(omap->dev, "VBUS Disconnect\n");
-
val = dwc3_omap_read_utmi_ctrl(omap);
val &= ~(USBOTGSS_UTMI_OTG_CTRL_SESSVALID
| USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
@@ -273,7 +266,7 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
break;
default:
- dev_dbg(omap->dev, "invalid state\n");
+ dev_WARN(omap->dev, "invalid state\n");
}
}
@@ -284,37 +277,8 @@ static irqreturn_t dwc3_omap_interrupt(int irq, void *_omap)
reg = dwc3_omap_read_irqmisc_status(omap);
- if (reg & USBOTGSS_IRQMISC_DMADISABLECLR) {
- dev_dbg(omap->dev, "DMA Disable was Cleared\n");
+ if (reg & USBOTGSS_IRQMISC_DMADISABLECLR)
omap->dma_status = false;
- }
-
- if (reg & USBOTGSS_IRQMISC_OEVT)
- dev_dbg(omap->dev, "OTG Event\n");
-
- if (reg & USBOTGSS_IRQMISC_DRVVBUS_RISE)
- dev_dbg(omap->dev, "DRVVBUS Rise\n");
-
- if (reg & USBOTGSS_IRQMISC_CHRGVBUS_RISE)
- dev_dbg(omap->dev, "CHRGVBUS Rise\n");
-
- if (reg & USBOTGSS_IRQMISC_DISCHRGVBUS_RISE)
- dev_dbg(omap->dev, "DISCHRGVBUS Rise\n");
-
- if (reg & USBOTGSS_IRQMISC_IDPULLUP_RISE)
- dev_dbg(omap->dev, "IDPULLUP Rise\n");
-
- if (reg & USBOTGSS_IRQMISC_DRVVBUS_FALL)
- dev_dbg(omap->dev, "DRVVBUS Fall\n");
-
- if (reg & USBOTGSS_IRQMISC_CHRGVBUS_FALL)
- dev_dbg(omap->dev, "CHRGVBUS Fall\n");
-
- if (reg & USBOTGSS_IRQMISC_DISCHRGVBUS_FALL)
- dev_dbg(omap->dev, "DISCHRGVBUS Fall\n");
-
- if (reg & USBOTGSS_IRQMISC_IDPULLUP_FALL)
- dev_dbg(omap->dev, "IDPULLUP Fall\n");
dwc3_omap_write_irqmisc_status(omap, reg);
@@ -434,7 +398,7 @@ static void dwc3_omap_set_utmi_mode(struct dwc3_omap *omap)
reg &= ~USBOTGSS_UTMI_OTG_CTRL_SW_MODE;
break;
default:
- dev_dbg(omap->dev, "UNKNOWN utmi mode %d\n", utmi_mode);
+ dev_WARN(omap->dev, "UNKNOWN utmi mode %d\n", utmi_mode);
}
dwc3_omap_write_utmi_ctrl(omap, reg);
@@ -454,23 +418,23 @@ static int dwc3_omap_extcon_register(struct dwc3_omap *omap)
}
omap->vbus_nb.notifier_call = dwc3_omap_vbus_notifier;
- ret = extcon_register_interest(&omap->extcon_vbus_dev,
- edev->name, "USB",
- &omap->vbus_nb);
+ ret = extcon_register_notifier(edev, EXTCON_USB,
+ &omap->vbus_nb);
if (ret < 0)
dev_vdbg(omap->dev, "failed to register notifier for USB\n");
omap->id_nb.notifier_call = dwc3_omap_id_notifier;
- ret = extcon_register_interest(&omap->extcon_id_dev,
- edev->name, "USB-HOST",
- &omap->id_nb);
+ ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
+ &omap->id_nb);
if (ret < 0)
dev_vdbg(omap->dev, "failed to register notifier for USB-HOST\n");
- if (extcon_get_cable_state(edev, "USB") == true)
+ if (extcon_get_cable_state_(edev, EXTCON_USB) == true)
dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
- if (extcon_get_cable_state(edev, "USB-HOST") == true)
+ if (extcon_get_cable_state_(edev, EXTCON_USB_HOST) == true)
dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
+
+ omap->edev = edev;
}
return 0;
@@ -565,11 +529,8 @@ static int dwc3_omap_probe(struct platform_device *pdev)
return 0;
err3:
- if (omap->extcon_vbus_dev.edev)
- extcon_unregister_interest(&omap->extcon_vbus_dev);
- if (omap->extcon_id_dev.edev)
- extcon_unregister_interest(&omap->extcon_id_dev);
-
+ extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb);
+ extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb);
err2:
dwc3_omap_disable_irqs(omap);
@@ -586,10 +547,8 @@ static int dwc3_omap_remove(struct platform_device *pdev)
{
struct dwc3_omap *omap = platform_get_drvdata(pdev);
- if (omap->extcon_vbus_dev.edev)
- extcon_unregister_interest(&omap->extcon_vbus_dev);
- if (omap->extcon_id_dev.edev)
- extcon_unregister_interest(&omap->extcon_id_dev);
+ extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb);
+ extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb);
dwc3_omap_disable_irqs(omap);
of_platform_depopulate(omap->dev);
pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 27e4fc896e9d..f62617999f3c 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -83,17 +83,23 @@ static int dwc3_pci_quirks(struct pci_dev *pdev)
acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
acpi_dwc3_byt_gpios);
- /* These GPIOs will turn on the USB2 PHY */
- gpio = gpiod_get(&pdev->dev, "cs");
- if (!IS_ERR(gpio)) {
- gpiod_direction_output(gpio, 0);
- gpiod_set_value_cansleep(gpio, 1);
- gpiod_put(gpio);
- }
+ /*
+ * These GPIOs will turn on the USB2 PHY. Note that we have to
+ * put the gpio descriptors again here because the phy driver
+ * might want to grab them, too.
+ */
+ gpio = gpiod_get_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ gpiod_set_value_cansleep(gpio, 1);
+ gpiod_put(gpio);
+
+ gpio = gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
- gpio = gpiod_get(&pdev->dev, "reset");
- if (!IS_ERR(gpio)) {
- gpiod_direction_output(gpio, 0);
+ if (gpio) {
gpiod_set_value_cansleep(gpio, 1);
gpiod_put(gpio);
usleep_range(10000, 11000);
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index 8c2e8eec80c2..088026048f49 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -48,13 +48,13 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
qdwc->iface_clk = devm_clk_get(qdwc->dev, "iface");
if (IS_ERR(qdwc->iface_clk)) {
- dev_dbg(qdwc->dev, "failed to get optional iface clock\n");
+ dev_info(qdwc->dev, "failed to get optional iface clock\n");
qdwc->iface_clk = NULL;
}
qdwc->sleep_clk = devm_clk_get(qdwc->dev, "sleep");
if (IS_ERR(qdwc->sleep_clk)) {
- dev_dbg(qdwc->dev, "failed to get optional sleep clock\n");
+ dev_info(qdwc->dev, "failed to get optional sleep clock\n");
qdwc->sleep_clk = NULL;
}
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 4a1a543deeda..de4d52f62517 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -135,8 +135,6 @@ static int st_dwc3_drd_init(struct st_dwc3 *dwc3_data)
| USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2);
val |= USB3_DEVICE_NOT_HOST;
-
- dev_dbg(dwc3_data->dev, "Configuring as Device\n");
break;
case USB_DR_MODE_HOST:
@@ -154,8 +152,6 @@ static int st_dwc3_drd_init(struct st_dwc3 *dwc3_data)
*/
val |= USB3_DELAY_VBUSVALID;
-
- dev_dbg(dwc3_data->dev, "Configuring as Host\n");
break;
default:
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 2ef3c8d6a9db..5320e939e090 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -56,7 +56,7 @@ static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
}
static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
- u32 len, u32 type)
+ u32 len, u32 type, bool chain)
{
struct dwc3_gadget_ep_cmd_params params;
struct dwc3_trb *trb;
@@ -70,7 +70,10 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
return 0;
}
- trb = dwc->ep0_trb;
+ trb = &dwc->ep0_trb[dep->free_slot];
+
+ if (chain)
+ dep->free_slot++;
trb->bpl = lower_32_bits(buf_dma);
trb->bph = upper_32_bits(buf_dma);
@@ -78,10 +81,17 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
trb->ctrl = type;
trb->ctrl |= (DWC3_TRB_CTRL_HWO
- | DWC3_TRB_CTRL_LST
- | DWC3_TRB_CTRL_IOC
| DWC3_TRB_CTRL_ISP_IMI);
+ if (chain)
+ trb->ctrl |= DWC3_TRB_CTRL_CHN;
+ else
+ trb->ctrl |= (DWC3_TRB_CTRL_IOC
+ | DWC3_TRB_CTRL_LST);
+
+ if (chain)
+ return 0;
+
memset(&params, 0, sizeof(params));
params.param0 = upper_32_bits(dwc->ep0_trb_addr);
params.param1 = lower_32_bits(dwc->ep0_trb_addr);
@@ -302,7 +312,7 @@ void dwc3_ep0_out_start(struct dwc3 *dwc)
int ret;
ret = dwc3_ep0_start_trans(dwc, 0, dwc->ctrl_req_addr, 8,
- DWC3_TRBCTL_CONTROL_SETUP);
+ DWC3_TRBCTL_CONTROL_SETUP, false);
WARN_ON(ret < 0);
}
@@ -727,6 +737,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
break;
+ case USB_REQ_SET_INTERFACE:
+ dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
+ dwc->start_config_issued = false;
+ /* Fall through */
default:
dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
ret = dwc3_ep0_delegate_req(dwc, ctrl);
@@ -779,7 +793,11 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
struct usb_request *ur;
struct dwc3_trb *trb;
struct dwc3_ep *ep0;
- u32 transferred;
+ unsigned transfer_size = 0;
+ unsigned maxp;
+ unsigned remaining_ur_length;
+ void *buf;
+ u32 transferred = 0;
u32 status;
u32 length;
u8 epnum;
@@ -808,17 +826,37 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
}
ur = &r->request;
+ buf = ur->buf;
+ remaining_ur_length = ur->length;
length = trb->size & DWC3_TRB_SIZE_MASK;
+ maxp = ep0->endpoint.maxpacket;
+
if (dwc->ep0_bounced) {
- unsigned transfer_size = ur->length;
- unsigned maxp = ep0->endpoint.maxpacket;
+ /*
+ * Handle the first TRB before handling the bounce buffer if
+ * the request length is greater than the bounce buffer size
+ */
+ if (ur->length > DWC3_EP0_BOUNCE_SIZE) {
+ transfer_size = ALIGN(ur->length - maxp, maxp);
+ transferred = transfer_size - length;
+ buf = (u8 *)buf + transferred;
+ ur->actual += transferred;
+ remaining_ur_length -= transferred;
+
+ trb++;
+ length = trb->size & DWC3_TRB_SIZE_MASK;
- transfer_size += (maxp - (transfer_size % maxp));
- transferred = min_t(u32, ur->length,
- transfer_size - length);
- memcpy(ur->buf, dwc->ep0_bounce, transferred);
+ ep0->free_slot = 0;
+ }
+
+ transfer_size = roundup((ur->length - transfer_size),
+ maxp);
+
+ transferred = min_t(u32, remaining_ur_length,
+ transfer_size - length);
+ memcpy(buf, dwc->ep0_bounce, transferred);
} else {
transferred = ur->length - length;
}
@@ -840,7 +878,7 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
ret = dwc3_ep0_start_trans(dwc, epnum,
dwc->ctrl_req_addr, 0,
- DWC3_TRBCTL_CONTROL_DATA);
+ DWC3_TRBCTL_CONTROL_DATA, false);
WARN_ON(ret < 0);
}
}
@@ -924,10 +962,10 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
if (req->request.length == 0) {
ret = dwc3_ep0_start_trans(dwc, dep->number,
dwc->ctrl_req_addr, 0,
- DWC3_TRBCTL_CONTROL_DATA);
+ DWC3_TRBCTL_CONTROL_DATA, false);
} else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket)
&& (dep->number == 0)) {
- u32 transfer_size;
+ u32 transfer_size = 0;
u32 maxpacket;
ret = usb_gadget_map_request(&dwc->gadget, &req->request,
@@ -937,21 +975,26 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
return;
}
- WARN_ON(req->request.length > DWC3_EP0_BOUNCE_SIZE);
-
maxpacket = dep->endpoint.maxpacket;
- transfer_size = roundup(req->request.length, maxpacket);
+
+ if (req->request.length > DWC3_EP0_BOUNCE_SIZE) {
+ transfer_size = ALIGN(req->request.length - maxpacket,
+ maxpacket);
+ ret = dwc3_ep0_start_trans(dwc, dep->number,
+ req->request.dma,
+ transfer_size,
+ DWC3_TRBCTL_CONTROL_DATA,
+ true);
+ }
+
+ transfer_size = roundup((req->request.length - transfer_size),
+ maxpacket);
dwc->ep0_bounced = true;
- /*
- * REVISIT in case request length is bigger than
- * DWC3_EP0_BOUNCE_SIZE we will need two chained
- * TRBs to handle the transfer.
- */
ret = dwc3_ep0_start_trans(dwc, dep->number,
dwc->ep0_bounce_addr, transfer_size,
- DWC3_TRBCTL_CONTROL_DATA);
+ DWC3_TRBCTL_CONTROL_DATA, false);
} else {
ret = usb_gadget_map_request(&dwc->gadget, &req->request,
dep->number);
@@ -961,7 +1004,8 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
}
ret = dwc3_ep0_start_trans(dwc, dep->number, req->request.dma,
- req->request.length, DWC3_TRBCTL_CONTROL_DATA);
+ req->request.length, DWC3_TRBCTL_CONTROL_DATA,
+ false);
}
WARN_ON(ret < 0);
@@ -976,7 +1020,7 @@ static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
: DWC3_TRBCTL_CONTROL_STATUS2;
return dwc3_ep0_start_trans(dwc, dep->number,
- dwc->ctrl_req_addr, 0, type);
+ dwc->ctrl_req_addr, 0, type, false);
}
static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 333a7c0078fc..0c25704dcb6b 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -547,6 +547,23 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
}
+ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ strlcat(dep->name, "-control", sizeof(dep->name));
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ strlcat(dep->name, "-isoc", sizeof(dep->name));
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ strlcat(dep->name, "-bulk", sizeof(dep->name));
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ strlcat(dep->name, "-int", sizeof(dep->name));
+ break;
+ default:
+ dev_err(dwc->dev, "invalid endpoint transfer type\n");
+ }
+
return 0;
}
@@ -586,6 +603,8 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
struct dwc3 *dwc = dep->dwc;
u32 reg;
+ dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name);
+
dwc3_remove_requests(dwc, dep);
/* make sure HW endpoint isn't stalled */
@@ -602,6 +621,10 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
dep->type = 0;
dep->flags = 0;
+ snprintf(dep->name, sizeof(dep->name), "ep%d%s",
+ dep->number >> 1,
+ (dep->number & 1) ? "in" : "out");
+
return 0;
}
@@ -647,23 +670,6 @@ static int dwc3_gadget_ep_enable(struct usb_ep *ep,
return 0;
}
- switch (usb_endpoint_type(desc)) {
- case USB_ENDPOINT_XFER_CONTROL:
- strlcat(dep->name, "-control", sizeof(dep->name));
- break;
- case USB_ENDPOINT_XFER_ISOC:
- strlcat(dep->name, "-isoc", sizeof(dep->name));
- break;
- case USB_ENDPOINT_XFER_BULK:
- strlcat(dep->name, "-bulk", sizeof(dep->name));
- break;
- case USB_ENDPOINT_XFER_INT:
- strlcat(dep->name, "-int", sizeof(dep->name));
- break;
- default:
- dev_err(dwc->dev, "invalid endpoint transfer type\n");
- }
-
spin_lock_irqsave(&dwc->lock, flags);
ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -692,10 +698,6 @@ static int dwc3_gadget_ep_disable(struct usb_ep *ep)
return 0;
}
- snprintf(dep->name, sizeof(dep->name), "ep%d%s",
- dep->number >> 1,
- (dep->number & 1) ? "in" : "out");
-
spin_lock_irqsave(&dwc->lock, flags);
ret = __dwc3_gadget_ep_disable(dep);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1713,6 +1715,17 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
return ret;
}
+ if (epnum == 0 || epnum == 1) {
+ dep->endpoint.caps.type_control = true;
+ } else {
+ dep->endpoint.caps.type_iso = true;
+ dep->endpoint.caps.type_bulk = true;
+ dep->endpoint.caps.type_int = true;
+ }
+
+ dep->endpoint.caps.dir_in = !!direction;
+ dep->endpoint.caps.dir_out = !direction;
+
INIT_LIST_HEAD(&dep->request_list);
INIT_LIST_HEAD(&dep->req_queued);
}
@@ -2685,7 +2698,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
goto err0;
}
- dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
+ dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
&dwc->ep0_trb_addr, GFP_KERNEL);
if (!dwc->ep0_trb) {
dev_err(dwc->dev, "failed to allocate ep0 trb\n");
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 4e3447bbd097..b474499839d3 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -19,6 +19,7 @@
#include <linux/utsname.h>
#include <linux/usb/composite.h>
+#include <linux/usb/otg.h>
#include <asm/unaligned.h>
#include "u_os_desc.h"
@@ -209,6 +210,12 @@ int usb_add_function(struct usb_configuration *config,
function->config = config;
list_add_tail(&function->list, &config->functions);
+ if (function->bind_deactivated) {
+ value = usb_function_deactivate(function);
+ if (value)
+ goto done;
+ }
+
/* REVISIT *require* function->bind? */
if (function->bind) {
value = function->bind(config, function);
@@ -279,7 +286,7 @@ int usb_function_deactivate(struct usb_function *function)
spin_lock_irqsave(&cdev->lock, flags);
if (cdev->deactivations == 0)
- status = usb_gadget_disconnect(cdev->gadget);
+ status = usb_gadget_deactivate(cdev->gadget);
if (status == 0)
cdev->deactivations++;
@@ -311,7 +318,7 @@ int usb_function_activate(struct usb_function *function)
else {
cdev->deactivations--;
if (cdev->deactivations == 0)
- status = usb_gadget_connect(cdev->gadget);
+ status = usb_gadget_activate(cdev->gadget);
}
spin_unlock_irqrestore(&cdev->lock, flags);
@@ -896,7 +903,7 @@ void usb_remove_config(struct usb_composite_dev *cdev,
/* We support strings in multiple languages ... string descriptor zero
* says which languages are supported. The typical case will be that
- * only one language (probably English) is used, with I18N handled on
+ * only one language (probably English) is used, with i18n handled on
* the host side.
*/
@@ -949,7 +956,7 @@ static int get_string(struct usb_composite_dev *cdev,
struct usb_function *f;
int len;
- /* Yes, not only is USB's I18N support probably more than most
+ /* Yes, not only is USB's i18n support probably more than most
* folk will ever care about ... also, it's all supported here.
* (Except for UTF8 support for Unicode's "Astral Planes".)
*/
@@ -1534,6 +1541,32 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
value = min(w_length, (u16) value);
}
break;
+ case USB_DT_OTG:
+ if (gadget_is_otg(gadget)) {
+ struct usb_configuration *config;
+ int otg_desc_len = 0;
+
+ if (cdev->config)
+ config = cdev->config;
+ else
+ config = list_first_entry(
+ &cdev->configs,
+ struct usb_configuration, list);
+ if (!config)
+ goto done;
+
+ if (gadget->otg_caps &&
+ (gadget->otg_caps->otg_rev >= 0x0200))
+ otg_desc_len += sizeof(
+ struct usb_otg20_descriptor);
+ else
+ otg_desc_len += sizeof(
+ struct usb_otg_descriptor);
+
+ value = min_t(int, w_length, otg_desc_len);
+ memcpy(req->buf, config->descriptors[0], value);
+ }
+ break;
}
break;
@@ -1758,10 +1791,13 @@ unknown:
* take such requests too, if that's ever needed: to work
* in config 0, etc.
*/
- list_for_each_entry(f, &cdev->config->functions, list)
- if (f->req_match && f->req_match(f, ctrl))
- goto try_fun_setup;
- f = NULL;
+ if (cdev->config) {
+ list_for_each_entry(f, &cdev->config->functions, list)
+ if (f->req_match && f->req_match(f, ctrl))
+ goto try_fun_setup;
+ f = NULL;
+ }
+
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_INTERFACE:
if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index 34e12fc52c23..0fafa7a1b6f6 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -20,6 +20,7 @@
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/composite.h>
+#include <linux/usb/otg.h>
/**
* usb_descriptor_fillbuf - fill buffer with descriptors
@@ -195,3 +196,58 @@ void usb_free_all_descriptors(struct usb_function *f)
usb_free_descriptors(f->ss_descriptors);
}
EXPORT_SYMBOL_GPL(usb_free_all_descriptors);
+
+struct usb_descriptor_header *usb_otg_descriptor_alloc(
+ struct usb_gadget *gadget)
+{
+ struct usb_descriptor_header *otg_desc;
+ unsigned length = 0;
+
+ if (gadget->otg_caps && (gadget->otg_caps->otg_rev >= 0x0200))
+ length = sizeof(struct usb_otg20_descriptor);
+ else
+ length = sizeof(struct usb_otg_descriptor);
+
+ otg_desc = kzalloc(length, GFP_KERNEL);
+ return otg_desc;
+}
+EXPORT_SYMBOL_GPL(usb_otg_descriptor_alloc);
+
+int usb_otg_descriptor_init(struct usb_gadget *gadget,
+ struct usb_descriptor_header *otg_desc)
+{
+ struct usb_otg_descriptor *otg1x_desc;
+ struct usb_otg20_descriptor *otg20_desc;
+ struct usb_otg_caps *otg_caps = gadget->otg_caps;
+ u8 otg_attributes = 0;
+
+ if (!otg_desc)
+ return -EINVAL;
+
+ if (otg_caps && otg_caps->otg_rev) {
+ if (otg_caps->hnp_support)
+ otg_attributes |= USB_OTG_HNP;
+ if (otg_caps->srp_support)
+ otg_attributes |= USB_OTG_SRP;
+ if (otg_caps->adp_support && (otg_caps->otg_rev >= 0x0200))
+ otg_attributes |= USB_OTG_ADP;
+ } else {
+ otg_attributes = USB_OTG_SRP | USB_OTG_HNP;
+ }
+
+ if (otg_caps && (otg_caps->otg_rev >= 0x0200)) {
+ otg20_desc = (struct usb_otg20_descriptor *)otg_desc;
+ otg20_desc->bLength = sizeof(struct usb_otg20_descriptor);
+ otg20_desc->bDescriptorType = USB_DT_OTG;
+ otg20_desc->bmAttributes = otg_attributes;
+ otg20_desc->bcdOTG = cpu_to_le16(otg_caps->otg_rev);
+ } else {
+ otg1x_desc = (struct usb_otg_descriptor *)otg_desc;
+ otg1x_desc->bLength = sizeof(struct usb_otg_descriptor);
+ otg1x_desc->bDescriptorType = USB_DT_OTG;
+ otg1x_desc->bmAttributes = otg_attributes;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usb_otg_descriptor_init);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 0495c94a23d7..294eb74fb078 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -41,6 +41,8 @@ int check_user_usb_string(const char *name,
#define MAX_NAME_LEN 40
#define MAX_USB_STRING_LANGS 2
+static const struct usb_descriptor_header *otg_desc[2];
+
struct gadget_info {
struct config_group group;
struct config_group functions_group;
@@ -55,9 +57,6 @@ struct gadget_info {
struct list_head available_func;
const char *udc_name;
-#ifdef CONFIG_USB_OTG
- struct usb_otg_descriptor otg;
-#endif
struct usb_composite_driver composite;
struct usb_composite_dev cdev;
bool use_os_desc;
@@ -571,7 +570,7 @@ static struct config_group *function_make(
if (IS_ERR(fi))
return ERR_CAST(fi);
- ret = config_item_set_name(&fi->group.cg_item, name);
+ ret = config_item_set_name(&fi->group.cg_item, "%s", name);
if (ret) {
usb_put_function_instance(fi);
return ERR_PTR(ret);
@@ -1376,6 +1375,19 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
memcpy(cdev->qw_sign, gi->qw_sign, OS_STRING_QW_SIGN_LEN);
}
+ if (gadget_is_otg(gadget) && !otg_desc[0]) {
+ struct usb_descriptor_header *usb_desc;
+
+ usb_desc = usb_otg_descriptor_alloc(gadget);
+ if (!usb_desc) {
+ ret = -ENOMEM;
+ goto err_comp_cleanup;
+ }
+ usb_otg_descriptor_init(gadget, usb_desc);
+ otg_desc[0] = usb_desc;
+ otg_desc[1] = NULL;
+ }
+
/* Go through all configs, attach all functions */
list_for_each_entry(c, &gi->cdev.configs, list) {
struct config_usb_cfg *cfg;
@@ -1383,6 +1395,9 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
struct usb_function *tmp;
struct gadget_config_name *cn;
+ if (gadget_is_otg(gadget))
+ c->descriptors = otg_desc;
+
cfg = container_of(c, struct config_usb_cfg, c);
if (!list_empty(&cfg->string_list)) {
i = 0;
@@ -1437,6 +1452,8 @@ static void configfs_composite_unbind(struct usb_gadget *gadget)
cdev = get_gadget_data(gadget);
gi = container_of(cdev, struct gadget_info, cdev);
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
purge_configs_funcs(gi);
composite_dev_cleanup(cdev);
usb_ep_autoconfig_reset(cdev->gadget);
@@ -1510,12 +1527,6 @@ static struct config_group *gadgets_make(
if (!gi->composite.gadget_driver.function)
goto err;
-#ifdef CONFIG_USB_OTG
- gi->otg.bLength = sizeof(struct usb_otg_descriptor);
- gi->otg.bDescriptorType = USB_DT_OTG;
- gi->otg.bmAttributes = USB_OTG_SRP | USB_OTG_HNP;
-#endif
-
config_group_init_type_name(&gi->group, name,
&gadget_root_type);
return &gi->group;
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 919cdfdda78b..978435a51038 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -20,186 +20,6 @@
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
-#include "gadget_chips.h"
-
-/*
- * This should work with endpoints from controller drivers sharing the
- * same endpoint naming convention. By example:
- *
- * - ep1, ep2, ... address is fixed, not direction or type
- * - ep1in, ep2out, ... address and direction are fixed, not type
- * - ep1-bulk, ep2-bulk, ... address and type are fixed, not direction
- * - ep1in-bulk, ep2out-iso, ... all three are fixed
- * - ep-* ... no functionality restrictions
- *
- * Type suffixes are "-bulk", "-iso", or "-int". Numbers are decimal.
- * Less common restrictions are implied by gadget_is_*().
- *
- * NOTE: each endpoint is unidirectional, as specified by its USB
- * descriptor; and isn't specific to a configuration or altsetting.
- */
-static int
-ep_matches (
- struct usb_gadget *gadget,
- struct usb_ep *ep,
- struct usb_endpoint_descriptor *desc,
- struct usb_ss_ep_comp_descriptor *ep_comp
-)
-{
- u8 type;
- const char *tmp;
- u16 max;
-
- int num_req_streams = 0;
-
- /* endpoint already claimed? */
- if (NULL != ep->driver_data)
- return 0;
-
- /* only support ep0 for portable CONTROL traffic */
- type = usb_endpoint_type(desc);
- if (USB_ENDPOINT_XFER_CONTROL == type)
- return 0;
-
- /* some other naming convention */
- if ('e' != ep->name[0])
- return 0;
-
- /* type-restriction: "-iso", "-bulk", or "-int".
- * direction-restriction: "in", "out".
- */
- if ('-' != ep->name[2]) {
- tmp = strrchr (ep->name, '-');
- if (tmp) {
- switch (type) {
- case USB_ENDPOINT_XFER_INT:
- /* bulk endpoints handle interrupt transfers,
- * except the toggle-quirky iso-synch kind
- */
- if ('s' == tmp[2]) // == "-iso"
- return 0;
- /* for now, avoid PXA "interrupt-in";
- * it's documented as never using DATA1.
- */
- if (gadget_is_pxa (gadget)
- && 'i' == tmp [1])
- return 0;
- break;
- case USB_ENDPOINT_XFER_BULK:
- if ('b' != tmp[1]) // != "-bulk"
- return 0;
- break;
- case USB_ENDPOINT_XFER_ISOC:
- if ('s' != tmp[2]) // != "-iso"
- return 0;
- }
- } else {
- tmp = ep->name + strlen (ep->name);
- }
-
- /* direction-restriction: "..in-..", "out-.." */
- tmp--;
- if (!isdigit (*tmp)) {
- if (desc->bEndpointAddress & USB_DIR_IN) {
- if ('n' != *tmp)
- return 0;
- } else {
- if ('t' != *tmp)
- return 0;
- }
- }
- }
-
- /*
- * Get the number of required streams from the EP companion
- * descriptor and see if the EP matches it
- */
- if (usb_endpoint_xfer_bulk(desc)) {
- if (ep_comp && gadget->max_speed >= USB_SPEED_SUPER) {
- num_req_streams = ep_comp->bmAttributes & 0x1f;
- if (num_req_streams > ep->max_streams)
- return 0;
- }
-
- }
-
- /*
- * If the protocol driver hasn't yet decided on wMaxPacketSize
- * and wants to know the maximum possible, provide the info.
- */
- if (desc->wMaxPacketSize == 0)
- desc->wMaxPacketSize = cpu_to_le16(ep->maxpacket_limit);
-
- /* endpoint maxpacket size is an input parameter, except for bulk
- * where it's an output parameter representing the full speed limit.
- * the usb spec fixes high speed bulk maxpacket at 512 bytes.
- */
- max = 0x7ff & usb_endpoint_maxp(desc);
- switch (type) {
- case USB_ENDPOINT_XFER_INT:
- /* INT: limit 64 bytes full speed, 1024 high/super speed */
- if (!gadget_is_dualspeed(gadget) && max > 64)
- return 0;
- /* FALLTHROUGH */
-
- case USB_ENDPOINT_XFER_ISOC:
- /* ISO: limit 1023 bytes full speed, 1024 high/super speed */
- if (ep->maxpacket_limit < max)
- return 0;
- if (!gadget_is_dualspeed(gadget) && max > 1023)
- return 0;
-
- /* BOTH: "high bandwidth" works only at high speed */
- if ((desc->wMaxPacketSize & cpu_to_le16(3<<11))) {
- if (!gadget_is_dualspeed(gadget))
- return 0;
- /* configure your hardware with enough buffering!! */
- }
- break;
- }
-
- /* MATCH!! */
-
- /* report address */
- desc->bEndpointAddress &= USB_DIR_IN;
- if (isdigit (ep->name [2])) {
- u8 num = simple_strtoul (&ep->name [2], NULL, 10);
- desc->bEndpointAddress |= num;
- } else if (desc->bEndpointAddress & USB_DIR_IN) {
- if (++gadget->in_epnum > 15)
- return 0;
- desc->bEndpointAddress = USB_DIR_IN | gadget->in_epnum;
- } else {
- if (++gadget->out_epnum > 15)
- return 0;
- desc->bEndpointAddress |= gadget->out_epnum;
- }
-
- /* report (variable) full speed bulk maxpacket */
- if ((USB_ENDPOINT_XFER_BULK == type) && !ep_comp) {
- int size = ep->maxpacket_limit;
-
- /* min() doesn't work on bitfields with gcc-3.5 */
- if (size > 64)
- size = 64;
- desc->wMaxPacketSize = cpu_to_le16(size);
- }
- ep->address = desc->bEndpointAddress;
- return 1;
-}
-
-static struct usb_ep *
-find_ep (struct usb_gadget *gadget, const char *name)
-{
- struct usb_ep *ep;
-
- list_for_each_entry (ep, &gadget->ep_list, ep_list) {
- if (0 == strcmp (ep->name, name))
- return ep;
- }
- return NULL;
-}
-
/**
* usb_ep_autoconfig_ss() - choose an endpoint matching the ep
* descriptor and ep companion descriptor
@@ -240,7 +60,7 @@ find_ep (struct usb_gadget *gadget, const char *name)
* updated with the assigned number of streams if it is
* different from the original value. To prevent the endpoint
* from being returned by a later autoconfig call, claim it by
- * assigning ep->driver_data to some non-null value.
+ * assigning ep->claimed to true.
*
* On failure, this returns a null endpoint descriptor.
*/
@@ -255,74 +75,58 @@ struct usb_ep *usb_ep_autoconfig_ss(
type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
- /* First, apply chip-specific "best usage" knowledge.
- * This might make a good usb_gadget_ops hook ...
- */
- if (gadget_is_net2280(gadget)) {
- char name[8];
-
- if (type == USB_ENDPOINT_XFER_INT) {
- /* ep-e, ep-f are PIO with only 64 byte fifos */
- ep = find_ep(gadget, "ep-e");
- if (ep && ep_matches(gadget, ep, desc, ep_comp))
- goto found_ep;
- ep = find_ep(gadget, "ep-f");
- if (ep && ep_matches(gadget, ep, desc, ep_comp))
- goto found_ep;
- }
-
- /* USB3380: use same address for usb and hardware endpoints */
- snprintf(name, sizeof(name), "ep%d%s", usb_endpoint_num(desc),
- usb_endpoint_dir_in(desc) ? "in" : "out");
- ep = find_ep(gadget, name);
- if (ep && ep_matches(gadget, ep, desc, ep_comp))
+ if (gadget->ops->match_ep) {
+ ep = gadget->ops->match_ep(gadget, desc, ep_comp);
+ if (ep)
goto found_ep;
- } else if (gadget_is_goku (gadget)) {
- if (USB_ENDPOINT_XFER_INT == type) {
- /* single buffering is enough */
- ep = find_ep(gadget, "ep3-bulk");
- if (ep && ep_matches(gadget, ep, desc, ep_comp))
- goto found_ep;
- } else if (USB_ENDPOINT_XFER_BULK == type
- && (USB_DIR_IN & desc->bEndpointAddress)) {
- /* DMA may be available */
- ep = find_ep(gadget, "ep2-bulk");
- if (ep && ep_matches(gadget, ep, desc,
- ep_comp))
- goto found_ep;
- }
-
-#ifdef CONFIG_BLACKFIN
- } else if (gadget_is_musbhdrc(gadget)) {
- if ((USB_ENDPOINT_XFER_BULK == type) ||
- (USB_ENDPOINT_XFER_ISOC == type)) {
- if (USB_DIR_IN & desc->bEndpointAddress)
- ep = find_ep (gadget, "ep5in");
- else
- ep = find_ep (gadget, "ep6out");
- } else if (USB_ENDPOINT_XFER_INT == type) {
- if (USB_DIR_IN & desc->bEndpointAddress)
- ep = find_ep(gadget, "ep1in");
- else
- ep = find_ep(gadget, "ep2out");
- } else
- ep = NULL;
- if (ep && ep_matches(gadget, ep, desc, ep_comp))
- goto found_ep;
-#endif
}
/* Second, look at endpoints until an unclaimed one looks usable */
list_for_each_entry (ep, &gadget->ep_list, ep_list) {
- if (ep_matches(gadget, ep, desc, ep_comp))
+ if (usb_gadget_ep_match_desc(gadget, ep, desc, ep_comp))
goto found_ep;
}
/* Fail */
return NULL;
found_ep:
+
+ /*
+ * If the protocol driver hasn't yet decided on wMaxPacketSize
+ * and wants to know the maximum possible, provide the info.
+ */
+ if (desc->wMaxPacketSize == 0)
+ desc->wMaxPacketSize = cpu_to_le16(ep->maxpacket_limit);
+
+ /* report address */
+ desc->bEndpointAddress &= USB_DIR_IN;
+ if (isdigit(ep->name[2])) {
+ u8 num = simple_strtoul(&ep->name[2], NULL, 10);
+ desc->bEndpointAddress |= num;
+ } else if (desc->bEndpointAddress & USB_DIR_IN) {
+ if (++gadget->in_epnum > 15)
+ return NULL;
+ desc->bEndpointAddress = USB_DIR_IN | gadget->in_epnum;
+ } else {
+ if (++gadget->out_epnum > 15)
+ return NULL;
+ desc->bEndpointAddress |= gadget->out_epnum;
+ }
+
+ /* report (variable) full speed bulk maxpacket */
+ if ((type == USB_ENDPOINT_XFER_BULK) && !ep_comp) {
+ int size = ep->maxpacket_limit;
+
+ /* min() doesn't work on bitfields with gcc-3.5 */
+ if (size > 64)
+ size = 64;
+ desc->wMaxPacketSize = cpu_to_le16(size);
+ }
+
+ ep->address = desc->bEndpointAddress;
ep->desc = NULL;
ep->comp_desc = NULL;
+ ep->claimed = true;
return ep;
}
EXPORT_SYMBOL_GPL(usb_ep_autoconfig_ss);
@@ -354,7 +158,7 @@ EXPORT_SYMBOL_GPL(usb_ep_autoconfig_ss);
* descriptor bEndpointAddress. For bulk endpoints, the wMaxPacket value
* is initialized as if the endpoint were used at full speed. To prevent
* the endpoint from being returned by a later autoconfig call, claim it
- * by assigning ep->driver_data to some non-null value.
+ * by assigning ep->claimed to true.
*
* On failure, this returns a null endpoint descriptor.
*/
@@ -373,7 +177,7 @@ EXPORT_SYMBOL_GPL(usb_ep_autoconfig);
*
* Use this for devices where one configuration may need to assign
* endpoint resources very differently from the next one. It clears
- * state such as ep->driver_data and the record of assigned endpoints
+ * state such as ep->claimed and the record of assigned endpoints
* used by usb_ep_autoconfig().
*/
void usb_ep_autoconfig_reset (struct usb_gadget *gadget)
@@ -381,7 +185,7 @@ void usb_ep_autoconfig_reset (struct usb_gadget *gadget)
struct usb_ep *ep;
list_for_each_entry (ep, &gadget->ep_list, ep_list) {
- ep->driver_data = NULL;
+ ep->claimed = false;
}
gadget->in_epnum = 0;
gadget->out_epnum = 0;
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index aad8165e98ef..be9df09fde26 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -21,7 +21,6 @@
#include <linux/err.h>
#include "u_serial.h"
-#include "gadget_chips.h"
/*
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 798760fa7e70..7b7424f10ddd 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -585,8 +585,8 @@ static int ecm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
/* Enable zlps by default for ECM conformance;
* override for musb_hdrc (avoids txdma ovhead).
*/
- ecm->port.is_zlp_ok = !(gadget_is_musbhdrc(cdev->gadget)
- );
+ ecm->port.is_zlp_ok =
+ gadget_is_zlp_supported(cdev->gadget);
ecm->port.cdc_filter = DEFAULT_FILTER;
DBG(cdev, "activate ecm\n");
net = gether_connect(&ecm->port);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 45b8c8b338df..adc6d52efa46 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -924,7 +924,8 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
kiocb->private = p;
- kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
+ if (p->aio)
+ kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
res = ffs_epfile_io(kiocb->ki_filp, p);
if (res == -EIOCBQUEUED)
@@ -968,7 +969,8 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
kiocb->private = p;
- kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
+ if (p->aio)
+ kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
res = ffs_epfile_io(kiocb->ki_filp, p);
if (res == -EIOCBQUEUED)
@@ -2895,11 +2897,17 @@ static int ffs_func_bind(struct usb_configuration *c,
struct usb_function *f)
{
struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
+ struct ffs_function *func = ffs_func_from_usb(f);
+ int ret;
if (IS_ERR(ffs_opts))
return PTR_ERR(ffs_opts);
- return _ffs_func_bind(c, f);
+ ret = _ffs_func_bind(c, f);
+ if (ret && !--ffs_opts->refcnt)
+ functionfs_unbind(func->ffs);
+
+ return ret;
}
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index f7f35a36c09a..6df9715a4bcd 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -699,6 +699,10 @@ static inline int hidg_get_minor(void)
int ret;
ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL);
+ if (ret >= HIDG_MINORS) {
+ ida_simple_remove(&hidg_ida, ret);
+ ret = -ENODEV;
+ }
return ret;
}
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index 39f49f1ad22f..6e2fe63b9267 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -28,11 +28,6 @@
* This takes messages of various sizes written OUT to a device, and loops
* them back so they can be read IN from it. It has been used by certain
* test applications. It supports limited testing of data queueing logic.
- *
- *
- * This is currently packaged as a configuration driver, which can't be
- * combined with other functions to make composite devices. However, it
- * can be combined with other independent configurations.
*/
struct f_loopback {
struct usb_function function;
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index d2259c663996..a6eb537d7768 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -54,7 +54,7 @@
* following fields:
*
* nluns Number of LUNs function have (anywhere from 1
- * to FSG_MAX_LUNS which is 8).
+ * to FSG_MAX_LUNS).
* luns An array of LUN configuration values. This
* should be filled for each LUN that
* function will include (ie. for "nluns"
@@ -214,12 +214,12 @@
#include <linux/string.h>
#include <linux/freezer.h>
#include <linux/module.h>
+#include <linux/uaccess.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/composite.h>
-#include "gadget_chips.h"
#include "configfs.h"
@@ -279,9 +279,8 @@ struct fsg_common {
int cmnd_size;
u8 cmnd[MAX_COMMAND_SIZE];
- unsigned int nluns;
unsigned int lun;
- struct fsg_lun **luns;
+ struct fsg_lun *luns[FSG_MAX_LUNS];
struct fsg_lun *curlun;
unsigned int bulk_out_maxpacket;
@@ -490,6 +489,16 @@ static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
spin_unlock(&common->lock);
}
+static int _fsg_common_get_max_lun(struct fsg_common *common)
+{
+ int i = ARRAY_SIZE(common->luns) - 1;
+
+ while (i >= 0 && !common->luns[i])
+ --i;
+
+ return i;
+}
+
static int fsg_setup(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
@@ -533,7 +542,7 @@ static int fsg_setup(struct usb_function *f,
w_length != 1)
return -EDOM;
VDBG(fsg, "get max LUN\n");
- *(u8 *)req->buf = fsg->common->nluns - 1;
+ *(u8 *)req->buf = _fsg_common_get_max_lun(fsg->common);
/* Respond with data/status */
req->length = min((u16)1, w_length);
@@ -2131,8 +2140,9 @@ static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
}
/* Is the CBW meaningful? */
- if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~US_BULK_FLAG_IN ||
- cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
+ if (cbw->Lun >= ARRAY_SIZE(common->luns) ||
+ cbw->Flags & ~US_BULK_FLAG_IN || cbw->Length <= 0 ||
+ cbw->Length > MAX_COMMAND_SIZE) {
DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
"cmdlen %u\n",
cbw->Lun, cbw->Flags, cbw->Length);
@@ -2159,7 +2169,7 @@ static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
if (common->data_size == 0)
common->data_dir = DATA_DIR_NONE;
common->lun = cbw->Lun;
- if (common->lun < common->nluns)
+ if (common->lun < ARRAY_SIZE(common->luns))
common->curlun = common->luns[common->lun];
else
common->curlun = NULL;
@@ -2307,7 +2317,7 @@ reset:
}
common->running = 1;
- for (i = 0; i < common->nluns; ++i)
+ for (i = 0; i < ARRAY_SIZE(common->luns); ++i)
if (common->luns[i])
common->luns[i]->unit_attention_data =
SS_RESET_OCCURRED;
@@ -2409,7 +2419,7 @@ static void handle_exception(struct fsg_common *common)
if (old_state == FSG_STATE_ABORT_BULK_OUT)
common->state = FSG_STATE_STATUS_PHASE;
else {
- for (i = 0; i < common->nluns; ++i) {
+ for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
curlun = common->luns[i];
if (!curlun)
continue;
@@ -2453,7 +2463,7 @@ static void handle_exception(struct fsg_common *common)
* a waste of time. Ditto for the INTERFACE_CHANGE and
* CONFIG_CHANGE cases.
*/
- /* for (i = 0; i < common->nluns; ++i) */
+ /* for (i = 0; i < common->ARRAY_SIZE(common->luns); ++i) */
/* if (common->luns[i]) */
/* common->luns[i]->unit_attention_data = */
/* SS_RESET_OCCURRED; */
@@ -2552,12 +2562,11 @@ static int fsg_main_thread(void *common_)
if (!common->ops || !common->ops->thread_exits
|| common->ops->thread_exits(common) < 0) {
- struct fsg_lun **curlun_it = common->luns;
- unsigned i = common->nluns;
+ int i;
down_write(&common->filesem);
- for (; i--; ++curlun_it) {
- struct fsg_lun *curlun = *curlun_it;
+ for (i = 0; i < ARRAY_SIZE(common->luns); --i) {
+ struct fsg_lun *curlun = common->luns[i];
if (!curlun || !fsg_lun_is_open(curlun))
continue;
@@ -2676,6 +2685,7 @@ static struct fsg_common *fsg_common_setup(struct fsg_common *common)
init_completion(&common->thread_notifier);
init_waitqueue_head(&common->fsg_wait);
common->state = FSG_STATE_TERMINATED;
+ memset(common->luns, 0, sizeof(common->luns));
return common;
}
@@ -2742,9 +2752,9 @@ error_release:
}
EXPORT_SYMBOL_GPL(fsg_common_set_num_buffers);
-void fsg_common_remove_lun(struct fsg_lun *lun, bool sysfs)
+void fsg_common_remove_lun(struct fsg_lun *lun)
{
- if (sysfs)
+ if (device_is_registered(&lun->dev))
device_unregister(&lun->dev);
fsg_lun_close(lun);
kfree(lun);
@@ -2757,50 +2767,16 @@ static void _fsg_common_remove_luns(struct fsg_common *common, int n)
for (i = 0; i < n; ++i)
if (common->luns[i]) {
- fsg_common_remove_lun(common->luns[i], common->sysfs);
+ fsg_common_remove_lun(common->luns[i]);
common->luns[i] = NULL;
}
}
-EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
void fsg_common_remove_luns(struct fsg_common *common)
{
- _fsg_common_remove_luns(common, common->nluns);
-}
-
-void fsg_common_free_luns(struct fsg_common *common)
-{
- fsg_common_remove_luns(common);
- kfree(common->luns);
- common->luns = NULL;
-}
-EXPORT_SYMBOL_GPL(fsg_common_free_luns);
-
-int fsg_common_set_nluns(struct fsg_common *common, int nluns)
-{
- struct fsg_lun **curlun;
-
- /* Find out how many LUNs there should be */
- if (nluns < 1 || nluns > FSG_MAX_LUNS) {
- pr_err("invalid number of LUNs: %u\n", nluns);
- return -EINVAL;
- }
-
- curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL);
- if (unlikely(!curlun))
- return -ENOMEM;
-
- if (common->luns)
- fsg_common_free_luns(common);
-
- common->luns = curlun;
- common->nluns = nluns;
-
- pr_info("Number of LUNs=%d\n", common->nluns);
-
- return 0;
+ _fsg_common_remove_luns(common, ARRAY_SIZE(common->luns));
}
-EXPORT_SYMBOL_GPL(fsg_common_set_nluns);
+EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
void fsg_common_set_ops(struct fsg_common *common,
const struct fsg_operations *ops)
@@ -2838,7 +2814,8 @@ int fsg_common_set_cdev(struct fsg_common *common,
* halt bulk endpoints correctly. If one of them is present,
* disable stalls.
*/
- common->can_stall = can_stall && !(gadget_is_at91(common->gadget));
+ common->can_stall = can_stall &&
+ gadget_is_stall_supported(common->gadget);
return 0;
}
@@ -2882,7 +2859,7 @@ int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
char *pathbuf, *p;
int rc = -ENOMEM;
- if (!common->nluns || !common->luns)
+ if (id >= ARRAY_SIZE(common->luns))
return -ENODEV;
if (common->luns[id])
@@ -2951,7 +2928,7 @@ int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
return 0;
error_lun:
- if (common->sysfs)
+ if (device_is_registered(&lun->dev))
device_unregister(&lun->dev);
fsg_lun_close(lun);
common->luns[id] = NULL;
@@ -2966,14 +2943,16 @@ int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg)
char buf[8]; /* enough for 100000000 different numbers, decimal */
int i, rc;
- for (i = 0; i < common->nluns; ++i) {
+ fsg_common_remove_luns(common);
+
+ for (i = 0; i < cfg->nluns; ++i) {
snprintf(buf, sizeof(buf), "lun%d", i);
rc = fsg_common_create_lun(common, &cfg->luns[i], i, buf, NULL);
if (rc)
goto fail;
}
- pr_info("Number of LUNs=%d\n", common->nluns);
+ pr_info("Number of LUNs=%d\n", cfg->nluns);
return 0;
@@ -3022,6 +3001,7 @@ EXPORT_SYMBOL_GPL(fsg_common_run_thread);
static void fsg_common_release(struct kref *ref)
{
struct fsg_common *common = container_of(ref, struct fsg_common, ref);
+ int i;
/* If the thread isn't already dead, tell it to exit now */
if (common->state != FSG_STATE_TERMINATED) {
@@ -3029,22 +3009,14 @@ static void fsg_common_release(struct kref *ref)
wait_for_completion(&common->thread_notifier);
}
- if (likely(common->luns)) {
- struct fsg_lun **lun_it = common->luns;
- unsigned i = common->nluns;
-
- /* In error recovery common->nluns may be zero. */
- for (; i; --i, ++lun_it) {
- struct fsg_lun *lun = *lun_it;
- if (!lun)
- continue;
- fsg_lun_close(lun);
- if (common->sysfs)
- device_unregister(&lun->dev);
- kfree(lun);
- }
-
- kfree(common->luns);
+ for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
+ struct fsg_lun *lun = common->luns[i];
+ if (!lun)
+ continue;
+ fsg_lun_close(lun);
+ if (device_is_registered(&lun->dev))
+ device_unregister(&lun->dev);
+ kfree(lun);
}
_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
@@ -3058,6 +3030,7 @@ static void fsg_common_release(struct kref *ref)
static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
{
struct fsg_dev *fsg = fsg_from_func(f);
+ struct fsg_common *common = fsg->common;
struct usb_gadget *gadget = c->cdev->gadget;
int i;
struct usb_ep *ep;
@@ -3065,6 +3038,13 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
int ret;
struct fsg_opts *opts;
+ /* Don't allow to bind if we don't have at least one LUN */
+ ret = _fsg_common_get_max_lun(common);
+ if (ret < 0) {
+ pr_err("There should be at least one LUN.\n");
+ return -EINVAL;
+ }
+
opts = fsg_opts_from_func_inst(f->fi);
if (!opts->no_configfs) {
ret = fsg_common_set_cdev(fsg->common, c->cdev,
@@ -3082,7 +3062,7 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
/* New interface */
i = usb_interface_id(c, f);
if (i < 0)
- return i;
+ goto fail;
fsg_intf_desc.bInterfaceNumber = i;
fsg->interface_number = i;
@@ -3125,7 +3105,14 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
autoconf_fail:
ERROR(fsg, "unable to autoconfigure all endpoints\n");
- return -ENOTSUPP;
+ i = -ENOTSUPP;
+fail:
+ /* terminate the thread */
+ if (fsg->common->state != FSG_STATE_TERMINATED) {
+ raise_exception(fsg->common, FSG_STATE_EXIT);
+ wait_for_completion(&fsg->common->thread_notifier);
+ }
+ return i;
}
/****************************** ALLOCATE FUNCTION *************************/
@@ -3357,7 +3344,7 @@ static void fsg_lun_drop(struct config_group *group, struct config_item *item)
unregister_gadget_item(gadget);
}
- fsg_common_remove_lun(lun_opts->lun, fsg_opts->common->sysfs);
+ fsg_common_remove_lun(lun_opts->lun);
fsg_opts->common->luns[lun_opts->lun_id] = NULL;
lun_opts->lun_id = 0;
mutex_unlock(&fsg_opts->lock);
@@ -3511,14 +3498,11 @@ static struct usb_function_instance *fsg_alloc_inst(void)
rc = PTR_ERR(opts->common);
goto release_opts;
}
- rc = fsg_common_set_nluns(opts->common, FSG_MAX_LUNS);
- if (rc)
- goto release_opts;
rc = fsg_common_set_num_buffers(opts->common,
CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS);
if (rc)
- goto release_luns;
+ goto release_opts;
pr_info(FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
@@ -3526,6 +3510,9 @@ static struct usb_function_instance *fsg_alloc_inst(void)
config.removable = true;
rc = fsg_common_create_lun(opts->common, &config, 0, "lun.0",
(const char **)&opts->func_inst.group.cg_item.ci_name);
+ if (rc)
+ goto release_buffers;
+
opts->lun0.lun = opts->common->luns[0];
opts->lun0.lun_id = 0;
config_group_init_type_name(&opts->lun0.group, "lun.0", &fsg_lun_type);
@@ -3536,8 +3523,8 @@ static struct usb_function_instance *fsg_alloc_inst(void)
return &opts->func_inst;
-release_luns:
- kfree(opts->common->luns);
+release_buffers:
+ fsg_common_free_buffers(opts->common);
release_opts:
kfree(opts);
return ERR_PTR(rc);
@@ -3571,6 +3558,7 @@ static struct usb_function *fsg_alloc(struct usb_function_instance *fi)
mutex_lock(&opts->lock);
opts->refcnt++;
mutex_unlock(&opts->lock);
+
fsg->function.name = FSG_DRIVER_DESC;
fsg->function.bind = fsg_bind;
fsg->function.unbind = fsg_unbind;
diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
index b4866fcef30b..445df6775609 100644
--- a/drivers/usb/gadget/function/f_mass_storage.h
+++ b/drivers/usb/gadget/function/f_mass_storage.h
@@ -137,14 +137,10 @@ void fsg_common_free_buffers(struct fsg_common *common);
int fsg_common_set_cdev(struct fsg_common *common,
struct usb_composite_dev *cdev, bool can_stall);
-void fsg_common_remove_lun(struct fsg_lun *lun, bool sysfs);
+void fsg_common_remove_lun(struct fsg_lun *lun);
void fsg_common_remove_luns(struct fsg_common *common);
-void fsg_common_free_luns(struct fsg_common *common);
-
-int fsg_common_set_nluns(struct fsg_common *common, int nluns);
-
void fsg_common_set_ops(struct fsg_common *common,
const struct fsg_operations *ops);
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 6316aa5b1c49..a287a4829273 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -329,6 +329,10 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
unsigned i;
int err;
+ /* For Control Device interface we do nothing */
+ if (intf == 0)
+ return 0;
+
err = f_midi_start_ep(midi, f, midi->in_ep);
if (err)
return err;
@@ -1145,7 +1149,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
if (opts->id && !midi->id) {
status = -ENOMEM;
mutex_unlock(&opts->lock);
- goto kstrdup_fail;
+ goto setup_fail;
}
midi->in_ports = opts->in_ports;
midi->out_ports = opts->out_ports;
@@ -1164,8 +1168,6 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
return &midi->func;
-kstrdup_fail:
- f_midi_unregister_card(midi);
setup_fail:
for (--i; i >= 0; i--)
kfree(midi->in_port[i]);
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index bdcda9f5148e..3f05c6bd57f0 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -853,9 +853,8 @@ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
/* Enable zlps by default for NCM conformance;
* override for musb_hdrc (avoids txdma ovhead)
*/
- ncm->port.is_zlp_ok = !(
- gadget_is_musbhdrc(cdev->gadget)
- );
+ ncm->port.is_zlp_ok =
+ gadget_is_zlp_supported(cdev->gadget);
ncm->port.cdc_filter = DEFAULT_FILTER;
DBG(cdev, "activate ncm\n");
net = gether_connect(&ncm->port);
diff --git a/drivers/usb/gadget/function/f_obex.c b/drivers/usb/gadget/function/f_obex.c
index a1b79c53499c..5460426057eb 100644
--- a/drivers/usb/gadget/function/f_obex.c
+++ b/drivers/usb/gadget/function/f_obex.c
@@ -20,7 +20,6 @@
#include <linux/module.h>
#include "u_serial.h"
-#include "gadget_chips.h"
/*
@@ -37,7 +36,6 @@ struct f_obex {
u8 data_id;
u8 cur_alt;
u8 port_num;
- u8 can_activate;
};
static inline struct f_obex *func_to_obex(struct usb_function *f)
@@ -268,9 +266,6 @@ static void obex_connect(struct gserial *g)
struct usb_composite_dev *cdev = g->func.config->cdev;
int status;
- if (!obex->can_activate)
- return;
-
status = usb_function_activate(&g->func);
if (status)
dev_dbg(&cdev->gadget->dev,
@@ -284,9 +279,6 @@ static void obex_disconnect(struct gserial *g)
struct usb_composite_dev *cdev = g->func.config->cdev;
int status;
- if (!obex->can_activate)
- return;
-
status = usb_function_deactivate(&g->func);
if (status)
dev_dbg(&cdev->gadget->dev,
@@ -304,7 +296,7 @@ static inline bool can_support_obex(struct usb_configuration *c)
*
* Altsettings are mandatory, however...
*/
- if (!gadget_supports_altsettings(c->cdev->gadget))
+ if (!gadget_is_altset_supported(c->cdev->gadget))
return false;
/* everything else is *probably* fine ... */
@@ -378,17 +370,6 @@ static int obex_bind(struct usb_configuration *c, struct usb_function *f)
if (status)
goto fail;
- /* Avoid letting this gadget enumerate until the userspace
- * OBEX server is active.
- */
- status = usb_function_deactivate(f);
- if (status < 0)
- WARNING(cdev, "obex ttyGS%d: can't prevent enumeration, %d\n",
- obex->port_num, status);
- else
- obex->can_activate = true;
-
-
dev_dbg(&cdev->gadget->dev, "obex ttyGS%d: %s speed IN/%s OUT/%s\n",
obex->port_num,
gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
@@ -529,6 +510,7 @@ static struct usb_function *obex_alloc(struct usb_function_instance *fi)
obex->port.func.get_alt = obex_get_alt;
obex->port.func.disable = obex_disable;
obex->port.func.free_func = obex_free;
+ obex->port.func.bind_deactivated = true;
return &obex->port.func;
}
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 44173df27273..8e2b6bea07bc 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -804,6 +804,8 @@ done:
static void printer_reset_interface(struct printer_dev *dev)
{
+ unsigned long flags;
+
if (dev->interface < 0)
return;
@@ -815,9 +817,11 @@ static void printer_reset_interface(struct printer_dev *dev)
if (dev->out_ep->desc)
usb_ep_disable(dev->out_ep);
+ spin_lock_irqsave(&dev->lock, flags);
dev->in_ep->desc = NULL;
dev->out_ep->desc = NULL;
dev->interface = -1;
+ spin_unlock_irqrestore(&dev->lock, flags);
}
/* Change our operational Interface. */
@@ -1131,13 +1135,10 @@ static int printer_func_set_alt(struct usb_function *f,
static void printer_func_disable(struct usb_function *f)
{
struct printer_dev *dev = func_to_printer(f);
- unsigned long flags;
DBG(dev, "%s\n", __func__);
- spin_lock_irqsave(&dev->lock, flags);
printer_reset_interface(dev);
- spin_unlock_irqrestore(&dev->lock, flags);
}
static inline struct f_printer_opts
@@ -1248,7 +1249,15 @@ static struct config_item_type printer_func_type = {
static inline int gprinter_get_minor(void)
{
- return ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL);
+ int ret;
+
+ ret = ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL);
+ if (ret >= PRINTER_MINORS) {
+ ida_simple_remove(&printer_ida, ret);
+ ret = -ENODEV;
+ }
+
+ return ret;
}
static inline void gprinter_put_minor(int minor)
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index 2e02dfabc7ae..1d162e200e83 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -16,7 +16,6 @@
#include <linux/device.h>
#include "u_serial.h"
-#include "gadget_chips.h"
/*
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 3a5ae9900b1e..cbfaf86fe456 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -20,7 +20,6 @@
#include <linux/err.h>
#include "g_zero.h"
-#include "gadget_chips.h"
#include "u_f.h"
/*
@@ -42,11 +41,6 @@
* queues are relatively independent, will receive a range of packet sizes,
* and can often be made to run out completely. Those issues are important
* when stress testing peripheral controller drivers.
- *
- *
- * This is currently packaged as a configuration driver, which can't be
- * combined with other functions to make composite devices. However, it
- * can be combined with other independent configurations.
*/
struct f_sourcesink {
struct usb_function function;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 6d3eb8b00a48..f8de7ea2a0c1 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -975,6 +975,29 @@ free_ep(struct uac2_rtd_params *prm, struct usb_ep *ep)
"%s:%d Error!\n", __func__, __LINE__);
}
+static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
+ struct usb_endpoint_descriptor *ep_desc,
+ unsigned int factor, bool is_playback)
+{
+ int chmask, srate, ssize;
+ u16 max_packet_size;
+
+ if (is_playback) {
+ chmask = uac2_opts->p_chmask;
+ srate = uac2_opts->p_srate;
+ ssize = uac2_opts->p_ssize;
+ } else {
+ chmask = uac2_opts->c_chmask;
+ srate = uac2_opts->c_srate;
+ ssize = uac2_opts->c_ssize;
+ }
+
+ max_packet_size = num_channels(chmask) * ssize *
+ DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
+ ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_packet_size,
+ le16_to_cpu(ep_desc->wMaxPacketSize)));
+}
+
static int
afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
{
@@ -1070,10 +1093,14 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
uac2->p_prm.uac2 = uac2;
uac2->c_prm.uac2 = uac2;
+ /* Calculate wMaxPacketSize according to audio bandwidth */
+ set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true);
+ set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false);
+ set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true);
+ set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false);
+
hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
- hs_epout_desc.wMaxPacketSize = fs_epout_desc.wMaxPacketSize;
hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
- hs_epin_desc.wMaxPacketSize = fs_epin_desc.wMaxPacketSize;
ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL);
if (ret)
@@ -1162,14 +1189,14 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
factor = 1000;
} else {
ep_desc = &hs_epin_desc;
- factor = 125;
+ factor = 8000;
}
/* pre-compute some values for iso_complete() */
uac2->p_framesize = opts->p_ssize *
num_channels(opts->p_chmask);
rate = opts->p_srate * uac2->p_framesize;
- uac2->p_interval = (1 << (ep_desc->bInterval - 1)) * factor;
+ uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1));
uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval,
prm->max_psize);
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index cf0df8fbba89..743be34605dc 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -733,12 +733,6 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
uvc->control_req->complete = uvc_function_ep0_complete;
uvc->control_req->context = uvc;
- /* Avoid letting this gadget enumerate until the userspace server is
- * active.
- */
- if ((ret = usb_function_deactivate(f)) < 0)
- goto error;
-
if (v4l2_device_register(&cdev->gadget->dev, &uvc->v4l2_dev)) {
printk(KERN_INFO "v4l2_device_register failed\n");
goto error;
@@ -949,6 +943,7 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
uvc->func.disable = uvc_function_disable;
uvc->func.setup = uvc_function_setup;
uvc->func.free_func = uvc_free;
+ uvc->func.bind_deactivated = true;
return &uvc->func;
}
diff --git a/drivers/usb/gadget/function/storage_common.h b/drivers/usb/gadget/function/storage_common.h
index 70c891469f57..c3544e61da66 100644
--- a/drivers/usb/gadget/function/storage_common.h
+++ b/drivers/usb/gadget/function/storage_common.h
@@ -123,7 +123,7 @@ static inline bool fsg_lun_is_open(struct fsg_lun *curlun)
#define FSG_BUFLEN ((u32)16384)
/* Maximal number of LUNs supported in mass storage function */
-#define FSG_MAX_LUNS 8
+#define FSG_MAX_LUNS 16
enum fsg_buffer_state {
BUF_STATE_EMPTY = 0,
diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
index 334b38947916..c77145bd6b5b 100644
--- a/drivers/usb/gadget/function/u_ether.h
+++ b/drivers/usb/gadget/function/u_ether.h
@@ -20,8 +20,6 @@
#include <linux/usb/cdc.h>
#include <linux/netdevice.h>
-#include "gadget_chips.h"
-
#define QMULT_DEFAULT 5
/*
@@ -259,7 +257,7 @@ void gether_disconnect(struct gether *);
/* Some controllers can't support CDC Ethernet (ECM) ... */
static inline bool can_support_ecm(struct usb_gadget *gadget)
{
- if (!gadget_supports_altsettings(gadget))
+ if (!gadget_is_altset_supported(gadget))
return false;
/* Everything else is *presumably* fine ... but this is a bit
diff --git a/drivers/usb/gadget/function/u_uac1.h b/drivers/usb/gadget/function/u_uac1.h
index fe386df6dd3e..5c2ac8e8456d 100644
--- a/drivers/usb/gadget/function/u_uac1.h
+++ b/drivers/usb/gadget/function/u_uac1.h
@@ -21,8 +21,6 @@
#include <sound/pcm.h>
#include <sound/pcm_params.h>
-#include "gadget_chips.h"
-
#define FILE_PCM_PLAYBACK "/dev/snd/pcmC0D0p"
#define FILE_PCM_CAPTURE "/dev/snd/pcmC0D0c"
#define FILE_CONTROL "/dev/snd/controlC0"
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index d5a7102de696..4d682ad7bf23 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -339,6 +339,7 @@ config USB_CDC_COMPOSITE
config USB_G_NOKIA
tristate "Nokia composite gadget"
depends on PHONET
+ depends on BLOCK
select USB_LIBCOMPOSITE
select USB_U_SERIAL
select USB_U_ETHER
@@ -346,6 +347,7 @@ config USB_G_NOKIA
select USB_F_OBEX
select USB_F_PHONET
select USB_F_ECM
+ select USB_F_MASS_STORAGE
help
The Nokia composite gadget provides support for acm, obex
and phonet in only one composite gadget driver.
diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
index 1194b09ae746..4b158e2d1e57 100644
--- a/drivers/usb/gadget/legacy/acm_ms.c
+++ b/drivers/usb/gadget/legacy/acm_ms.c
@@ -58,21 +58,7 @@ static struct usb_device_descriptor device_desc = {
/*.bNumConfigurations = DYNAMIC*/
};
-static struct usb_otg_descriptor otg_descriptor = {
- .bLength = sizeof otg_descriptor,
- .bDescriptorType = USB_DT_OTG,
-
- /*
- * REVISIT SRP-only hardware is possible, although
- * it would not be called "OTG" ...
- */
- .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
-};
-
-static const struct usb_descriptor_header *otg_desc[] = {
- (struct usb_descriptor_header *) &otg_descriptor,
- NULL,
-};
+static const struct usb_descriptor_header *otg_desc[2];
/* string IDs are assigned dynamically */
static struct usb_string strings_dev[] = {
@@ -200,10 +186,6 @@ static int acm_ms_bind(struct usb_composite_dev *cdev)
if (status)
goto fail;
- status = fsg_common_set_nluns(opts->common, config.nluns);
- if (status)
- goto fail_set_nluns;
-
status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
if (status)
goto fail_set_cdev;
@@ -225,10 +207,21 @@ static int acm_ms_bind(struct usb_composite_dev *cdev)
device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
+ if (gadget_is_otg(gadget) && !otg_desc[0]) {
+ struct usb_descriptor_header *usb_desc;
+
+ usb_desc = usb_otg_descriptor_alloc(gadget);
+ if (!usb_desc)
+ goto fail_string_ids;
+ usb_otg_descriptor_init(gadget, usb_desc);
+ otg_desc[0] = usb_desc;
+ otg_desc[1] = NULL;
+ }
+
/* register our configuration */
status = usb_add_config(cdev, &acm_ms_config_driver, acm_ms_do_config);
if (status < 0)
- goto fail_string_ids;
+ goto fail_otg_desc;
usb_composite_overwrite_options(cdev, &coverwrite);
dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n",
@@ -236,11 +229,12 @@ static int acm_ms_bind(struct usb_composite_dev *cdev)
return 0;
/* error recovery */
+fail_otg_desc:
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
fail_string_ids:
fsg_common_remove_luns(opts->common);
fail_set_cdev:
- fsg_common_free_luns(opts->common);
-fail_set_nluns:
fsg_common_free_buffers(opts->common);
fail:
usb_put_function_instance(fi_msg);
@@ -255,6 +249,9 @@ static int acm_ms_unbind(struct usb_composite_dev *cdev)
usb_put_function_instance(fi_msg);
usb_put_function(f_acm);
usb_put_function_instance(f_acm_inst);
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
+
return 0;
}
diff --git a/drivers/usb/gadget/legacy/audio.c b/drivers/usb/gadget/legacy/audio.c
index f289caf18a45..685cf3b4b78f 100644
--- a/drivers/usb/gadget/legacy/audio.c
+++ b/drivers/usb/gadget/legacy/audio.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/usb/composite.h>
-#include "gadget_chips.h"
#define DRIVER_DESC "Linux USB Audio Gadget"
#define DRIVER_VERSION "Feb 2, 2012"
@@ -124,7 +123,7 @@ static struct usb_device_descriptor device_desc = {
.bLength = sizeof device_desc,
.bDescriptorType = USB_DT_DEVICE,
- .bcdUSB = __constant_cpu_to_le16(0x200),
+ .bcdUSB = cpu_to_le16(0x200),
#ifdef CONFIG_GADGET_UAC1
.bDeviceClass = USB_CLASS_PER_INTERFACE,
@@ -141,8 +140,8 @@ static struct usb_device_descriptor device_desc = {
* we support. (As does bNumConfigurations.) These values can
* also be overridden by module parameters.
*/
- .idVendor = __constant_cpu_to_le16(AUDIO_VENDOR_NUM),
- .idProduct = __constant_cpu_to_le16(AUDIO_PRODUCT_NUM),
+ .idVendor = cpu_to_le16(AUDIO_VENDOR_NUM),
+ .idProduct = cpu_to_le16(AUDIO_PRODUCT_NUM),
/* .bcdDevice = f(hardware) */
/* .iManufacturer = DYNAMIC */
/* .iProduct = DYNAMIC */
@@ -150,20 +149,7 @@ static struct usb_device_descriptor device_desc = {
.bNumConfigurations = 1,
};
-static struct usb_otg_descriptor otg_descriptor = {
- .bLength = sizeof otg_descriptor,
- .bDescriptorType = USB_DT_OTG,
-
- /* REVISIT SRP-only hardware is possible, although
- * it would not be called "OTG" ...
- */
- .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
-};
-
-static const struct usb_descriptor_header *otg_desc[] = {
- (struct usb_descriptor_header *) &otg_descriptor,
- NULL,
-};
+static const struct usb_descriptor_header *otg_desc[2];
/*-------------------------------------------------------------------------*/
@@ -259,14 +245,28 @@ static int audio_bind(struct usb_composite_dev *cdev)
device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
+ if (gadget_is_otg(cdev->gadget) && !otg_desc[0]) {
+ struct usb_descriptor_header *usb_desc;
+
+ usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
+ if (!usb_desc)
+ goto fail;
+ usb_otg_descriptor_init(cdev->gadget, usb_desc);
+ otg_desc[0] = usb_desc;
+ otg_desc[1] = NULL;
+ }
+
status = usb_add_config(cdev, &audio_config_driver, audio_do_config);
if (status < 0)
- goto fail;
+ goto fail_otg_desc;
usb_composite_overwrite_options(cdev, &coverwrite);
INFO(cdev, "%s, version: %s\n", DRIVER_DESC, DRIVER_VERSION);
return 0;
+fail_otg_desc:
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
fail:
#ifndef CONFIG_GADGET_UAC1
usb_put_function_instance(fi_uac2);
@@ -289,6 +289,9 @@ static int audio_unbind(struct usb_composite_dev *cdev)
if (!IS_ERR_OR_NULL(fi_uac2))
usb_put_function_instance(fi_uac2);
#endif
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
+
return 0;
}
diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c
index afd3e37921a7..ecd8c8d62f2e 100644
--- a/drivers/usb/gadget/legacy/cdc2.c
+++ b/drivers/usb/gadget/legacy/cdc2.c
@@ -60,21 +60,7 @@ static struct usb_device_descriptor device_desc = {
.bNumConfigurations = 1,
};
-static struct usb_otg_descriptor otg_descriptor = {
- .bLength = sizeof otg_descriptor,
- .bDescriptorType = USB_DT_OTG,
-
- /* REVISIT SRP-only hardware is possible, although
- * it would not be called "OTG" ...
- */
- .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
-};
-
-static const struct usb_descriptor_header *otg_desc[] = {
- (struct usb_descriptor_header *) &otg_descriptor,
- NULL,
-};
-
+static const struct usb_descriptor_header *otg_desc[2];
/* string IDs are assigned dynamically */
static struct usb_string strings_dev[] = {
@@ -193,10 +179,21 @@ static int cdc_bind(struct usb_composite_dev *cdev)
device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
+ if (gadget_is_otg(gadget) && !otg_desc[0]) {
+ struct usb_descriptor_header *usb_desc;
+
+ usb_desc = usb_otg_descriptor_alloc(gadget);
+ if (!usb_desc)
+ goto fail1;
+ usb_otg_descriptor_init(gadget, usb_desc);
+ otg_desc[0] = usb_desc;
+ otg_desc[1] = NULL;
+ }
+
/* register our configuration */
status = usb_add_config(cdev, &cdc_config_driver, cdc_do_config);
if (status < 0)
- goto fail1;
+ goto fail2;
usb_composite_overwrite_options(cdev, &coverwrite);
dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n",
@@ -204,6 +201,9 @@ static int cdc_bind(struct usb_composite_dev *cdev)
return 0;
+fail2:
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
fail1:
usb_put_function_instance(fi_serial);
fail:
@@ -219,6 +219,9 @@ static int cdc_unbind(struct usb_composite_dev *cdev)
usb_put_function(f_ecm);
if (!IS_ERR_OR_NULL(fi_ecm))
usb_put_function_instance(fi_ecm);
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
+
return 0;
}
diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c
index 204b10b1a7e7..5231a32aef55 100644
--- a/drivers/usb/gadget/legacy/dbgp.c
+++ b/drivers/usb/gadget/legacy/dbgp.c
@@ -35,10 +35,10 @@ static struct dbgp {
static struct usb_device_descriptor device_desc = {
.bLength = sizeof device_desc,
.bDescriptorType = USB_DT_DEVICE,
- .bcdUSB = __constant_cpu_to_le16(0x0200),
+ .bcdUSB = cpu_to_le16(0x0200),
.bDeviceClass = USB_CLASS_VENDOR_SPEC,
- .idVendor = __constant_cpu_to_le16(DRIVER_VENDOR_ID),
- .idProduct = __constant_cpu_to_le16(DRIVER_PRODUCT_ID),
+ .idVendor = cpu_to_le16(DRIVER_VENDOR_ID),
+ .idProduct = cpu_to_le16(DRIVER_PRODUCT_ID),
.bNumConfigurations = 1,
};
@@ -251,7 +251,7 @@ static int dbgp_configure_endpoints(struct usb_gadget *gadget)
dbgp.i_ep->driver_data = gadget;
i_desc.wMaxPacketSize =
- __constant_cpu_to_le16(USB_DEBUG_MAX_PACKET_SIZE);
+ cpu_to_le16(USB_DEBUG_MAX_PACKET_SIZE);
dbgp.o_ep = usb_ep_autoconfig(gadget, &o_desc);
if (!dbgp.o_ep) {
@@ -262,7 +262,7 @@ static int dbgp_configure_endpoints(struct usb_gadget *gadget)
dbgp.o_ep->driver_data = gadget;
o_desc.wMaxPacketSize =
- __constant_cpu_to_le16(USB_DEBUG_MAX_PACKET_SIZE);
+ cpu_to_le16(USB_DEBUG_MAX_PACKET_SIZE);
dbg_desc.bDebugInEndpoint = i_desc.bEndpointAddress;
dbg_desc.bDebugOutEndpoint = o_desc.bEndpointAddress;
diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c
index a3323dca218f..31e9160223e9 100644
--- a/drivers/usb/gadget/legacy/ether.c
+++ b/drivers/usb/gadget/legacy/ether.c
@@ -171,20 +171,7 @@ static struct usb_device_descriptor device_desc = {
.bNumConfigurations = 1,
};
-static struct usb_otg_descriptor otg_descriptor = {
- .bLength = sizeof otg_descriptor,
- .bDescriptorType = USB_DT_OTG,
-
- /* REVISIT SRP-only hardware is possible, although
- * it would not be called "OTG" ...
- */
- .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
-};
-
-static const struct usb_descriptor_header *otg_desc[] = {
- (struct usb_descriptor_header *) &otg_descriptor,
- NULL,
-};
+static const struct usb_descriptor_header *otg_desc[2];
static struct usb_string strings_dev[] = {
[USB_GADGET_MANUFACTURER_IDX].s = "",
@@ -416,17 +403,28 @@ static int eth_bind(struct usb_composite_dev *cdev)
device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
+ if (gadget_is_otg(gadget) && !otg_desc[0]) {
+ struct usb_descriptor_header *usb_desc;
+
+ usb_desc = usb_otg_descriptor_alloc(gadget);
+ if (!usb_desc)
+ goto fail1;
+ usb_otg_descriptor_init(gadget, usb_desc);
+ otg_desc[0] = usb_desc;
+ otg_desc[1] = NULL;
+ }
+
/* register our configuration(s); RNDIS first, if it's used */
if (has_rndis()) {
status = usb_add_config(cdev, &rndis_config_driver,
rndis_do_config);
if (status < 0)
- goto fail1;
+ goto fail2;
}
status = usb_add_config(cdev, &eth_config_driver, eth_do_config);
if (status < 0)
- goto fail1;
+ goto fail2;
usb_composite_overwrite_options(cdev, &coverwrite);
dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n",
@@ -434,6 +432,9 @@ static int eth_bind(struct usb_composite_dev *cdev)
return 0;
+fail2:
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
fail1:
if (has_rndis())
usb_put_function_instance(fi_rndis);
@@ -463,6 +464,9 @@ static int eth_unbind(struct usb_composite_dev *cdev)
usb_put_function(f_geth);
usb_put_function_instance(fi_geth);
}
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
+
return 0;
}
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index e821931c965c..320a81b2baa6 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -88,21 +88,7 @@ MODULE_PARM_DESC(bDeviceProtocol, "USB Device protocol");
module_param_array_named(functions, func_names, charp, &func_num, 0);
MODULE_PARM_DESC(functions, "USB Functions list");
-static const struct usb_descriptor_header *gfs_otg_desc[] = {
- (const struct usb_descriptor_header *)
- &(const struct usb_otg_descriptor) {
- .bLength = sizeof(struct usb_otg_descriptor),
- .bDescriptorType = USB_DT_OTG,
-
- /*
- * REVISIT SRP-only hardware is possible, although
- * it would not be called "OTG" ...
- */
- .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
- },
-
- NULL
-};
+static const struct usb_descriptor_header *gfs_otg_desc[2];
/* String IDs are assigned dynamically */
static struct usb_string gfs_strings[] = {
@@ -412,6 +398,17 @@ static int gfs_bind(struct usb_composite_dev *cdev)
goto error_rndis;
gfs_dev_desc.iProduct = gfs_strings[USB_GADGET_PRODUCT_IDX].id;
+ if (gadget_is_otg(cdev->gadget) && !gfs_otg_desc[0]) {
+ struct usb_descriptor_header *usb_desc;
+
+ usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
+ if (!usb_desc)
+ goto error_rndis;
+ usb_otg_descriptor_init(cdev->gadget, usb_desc);
+ gfs_otg_desc[0] = usb_desc;
+ gfs_otg_desc[1] = NULL;
+ }
+
for (i = 0; i < ARRAY_SIZE(gfs_configurations); ++i) {
struct gfs_configuration *c = gfs_configurations + i;
int sid = USB_GADGET_FIRST_AVAIL_IDX + i;
@@ -432,6 +429,8 @@ static int gfs_bind(struct usb_composite_dev *cdev)
/* TODO */
error_unbind:
+ kfree(gfs_otg_desc[0]);
+ gfs_otg_desc[0] = NULL;
error_rndis:
#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
usb_put_function_instance(fi_rndis);
@@ -473,6 +472,9 @@ static int gfs_unbind(struct usb_composite_dev *cdev)
for (i = 0; i < N_CONF * func_num; ++i)
usb_put_function(*(f_ffs[0] + i));
+ kfree(gfs_otg_desc[0]);
+ gfs_otg_desc[0] = NULL;
+
return 0;
}
diff --git a/drivers/usb/gadget/legacy/gmidi.c b/drivers/usb/gadget/legacy/gmidi.c
index da19c486b61e..8a18348ae86e 100644
--- a/drivers/usb/gadget/legacy/gmidi.c
+++ b/drivers/usb/gadget/legacy/gmidi.c
@@ -35,8 +35,6 @@
#include <linux/usb/audio.h>
#include <linux/usb/midi.h>
-#include "gadget_chips.h"
-
#include "u_midi.h"
/*-------------------------------------------------------------------------*/
@@ -88,10 +86,10 @@ MODULE_PARM_DESC(out_ports, "Number of MIDI output ports");
static struct usb_device_descriptor device_desc = {
.bLength = USB_DT_DEVICE_SIZE,
.bDescriptorType = USB_DT_DEVICE,
- .bcdUSB = __constant_cpu_to_le16(0x0200),
+ .bcdUSB = cpu_to_le16(0x0200),
.bDeviceClass = USB_CLASS_PER_INTERFACE,
- .idVendor = __constant_cpu_to_le16(DRIVER_VENDOR_NUM),
- .idProduct = __constant_cpu_to_le16(DRIVER_PRODUCT_NUM),
+ .idVendor = cpu_to_le16(DRIVER_VENDOR_NUM),
+ .idProduct = cpu_to_le16(DRIVER_PRODUCT_NUM),
/* .iManufacturer = DYNAMIC */
/* .iProduct = DYNAMIC */
.bNumConfigurations = 1,
diff --git a/drivers/usb/gadget/legacy/hid.c b/drivers/usb/gadget/legacy/hid.c
index 2baa572686c6..7e5d2c48476e 100644
--- a/drivers/usb/gadget/legacy/hid.c
+++ b/drivers/usb/gadget/legacy/hid.c
@@ -19,7 +19,6 @@
#include <linux/usb/composite.h>
#include <linux/usb/g_hid.h>
-#include "gadget_chips.h"
#define DRIVER_DESC "HID Gadget"
#define DRIVER_VERSION "2010/03/16"
@@ -68,21 +67,7 @@ static struct usb_device_descriptor device_desc = {
.bNumConfigurations = 1,
};
-static struct usb_otg_descriptor otg_descriptor = {
- .bLength = sizeof otg_descriptor,
- .bDescriptorType = USB_DT_OTG,
-
- /* REVISIT SRP-only hardware is possible, although
- * it would not be called "OTG" ...
- */
- .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
-};
-
-static const struct usb_descriptor_header *otg_desc[] = {
- (struct usb_descriptor_header *) &otg_descriptor,
- NULL,
-};
-
+static const struct usb_descriptor_header *otg_desc[2];
/* string IDs are assigned dynamically */
static struct usb_string strings_dev[] = {
@@ -186,16 +171,30 @@ static int hid_bind(struct usb_composite_dev *cdev)
device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
+ if (gadget_is_otg(gadget) && !otg_desc[0]) {
+ struct usb_descriptor_header *usb_desc;
+
+ usb_desc = usb_otg_descriptor_alloc(gadget);
+ if (!usb_desc)
+ goto put;
+ usb_otg_descriptor_init(gadget, usb_desc);
+ otg_desc[0] = usb_desc;
+ otg_desc[1] = NULL;
+ }
+
/* register our configuration */
status = usb_add_config(cdev, &config_driver, do_config);
if (status < 0)
- goto put;
+ goto free_otg_desc;
usb_composite_overwrite_options(cdev, &coverwrite);
dev_info(&gadget->dev, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
return 0;
+free_otg_desc:
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
put:
list_for_each_entry(m, &hidg_func_list, node) {
if (m == n)
@@ -213,6 +212,10 @@ static int hid_unbind(struct usb_composite_dev *cdev)
usb_put_function(n->f);
usb_put_function_instance(n->fi);
}
+
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
+
return 0;
}
diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
index e7bfb081f111..bda3c519110f 100644
--- a/drivers/usb/gadget/legacy/mass_storage.c
+++ b/drivers/usb/gadget/legacy/mass_storage.c
@@ -64,21 +64,7 @@ static struct usb_device_descriptor msg_device_desc = {
.bNumConfigurations = 1,
};
-static struct usb_otg_descriptor otg_descriptor = {
- .bLength = sizeof otg_descriptor,
- .bDescriptorType = USB_DT_OTG,
-
- /*
- * REVISIT SRP-only hardware is possible, although
- * it would not be called "OTG" ...
- */
- .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
-};
-
-static const struct usb_descriptor_header *otg_desc[] = {
- (struct usb_descriptor_header *) &otg_descriptor,
- NULL,
-};
+static const struct usb_descriptor_header *otg_desc[2];
static struct usb_string strings_dev[] = {
[USB_GADGET_MANUFACTURER_IDX].s = "",
@@ -191,10 +177,6 @@ static int msg_bind(struct usb_composite_dev *cdev)
if (status)
goto fail;
- status = fsg_common_set_nluns(opts->common, config.nluns);
- if (status)
- goto fail_set_nluns;
-
fsg_common_set_ops(opts->common, &ops);
status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
@@ -214,9 +196,20 @@ static int msg_bind(struct usb_composite_dev *cdev)
goto fail_string_ids;
msg_device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
+ if (gadget_is_otg(cdev->gadget) && !otg_desc[0]) {
+ struct usb_descriptor_header *usb_desc;
+
+ usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
+ if (!usb_desc)
+ goto fail_string_ids;
+ usb_otg_descriptor_init(cdev->gadget, usb_desc);
+ otg_desc[0] = usb_desc;
+ otg_desc[1] = NULL;
+ }
+
status = usb_add_config(cdev, &msg_config_driver, msg_do_config);
if (status < 0)
- goto fail_string_ids;
+ goto fail_otg_desc;
usb_composite_overwrite_options(cdev, &coverwrite);
dev_info(&cdev->gadget->dev,
@@ -224,11 +217,12 @@ static int msg_bind(struct usb_composite_dev *cdev)
set_bit(0, &msg_registered);
return 0;
+fail_otg_desc:
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
fail_string_ids:
fsg_common_remove_luns(opts->common);
fail_set_cdev:
- fsg_common_free_luns(opts->common);
-fail_set_nluns:
fsg_common_free_buffers(opts->common);
fail:
usb_put_function_instance(fi_msg);
@@ -243,6 +237,9 @@ static int msg_unbind(struct usb_composite_dev *cdev)
if (!IS_ERR(fi_msg))
usb_put_function_instance(fi_msg);
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
+
return 0;
}
diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c
index b21b51f0c9fa..4fe794ddcd49 100644
--- a/drivers/usb/gadget/legacy/multi.c
+++ b/drivers/usb/gadget/legacy/multi.c
@@ -78,21 +78,7 @@ static struct usb_device_descriptor device_desc = {
.idProduct = cpu_to_le16(MULTI_PRODUCT_NUM),
};
-
-static const struct usb_descriptor_header *otg_desc[] = {
- (struct usb_descriptor_header *) &(struct usb_otg_descriptor){
- .bLength = sizeof(struct usb_otg_descriptor),
- .bDescriptorType = USB_DT_OTG,
-
- /*
- * REVISIT SRP-only hardware is possible, although
- * it would not be called "OTG" ...
- */
- .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
- },
- NULL,
-};
-
+static const struct usb_descriptor_header *otg_desc[2];
enum {
MULTI_STRING_RNDIS_CONFIG_IDX = USB_GADGET_FIRST_AVAIL_IDX,
@@ -407,10 +393,6 @@ static int __ref multi_bind(struct usb_composite_dev *cdev)
if (status)
goto fail2;
- status = fsg_common_set_nluns(fsg_opts->common, config.nluns);
- if (status)
- goto fail_set_nluns;
-
status = fsg_common_set_cdev(fsg_opts->common, cdev, config.can_stall);
if (status)
goto fail_set_cdev;
@@ -429,14 +411,25 @@ static int __ref multi_bind(struct usb_composite_dev *cdev)
goto fail_string_ids;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
+ if (gadget_is_otg(gadget) && !otg_desc[0]) {
+ struct usb_descriptor_header *usb_desc;
+
+ usb_desc = usb_otg_descriptor_alloc(gadget);
+ if (!usb_desc)
+ goto fail_string_ids;
+ usb_otg_descriptor_init(gadget, usb_desc);
+ otg_desc[0] = usb_desc;
+ otg_desc[1] = NULL;
+ }
+
/* register configurations */
status = rndis_config_register(cdev);
if (unlikely(status < 0))
- goto fail_string_ids;
+ goto fail_otg_desc;
status = cdc_config_register(cdev);
if (unlikely(status < 0))
- goto fail_string_ids;
+ goto fail_otg_desc;
usb_composite_overwrite_options(cdev, &coverwrite);
/* we're done */
@@ -445,11 +438,12 @@ static int __ref multi_bind(struct usb_composite_dev *cdev)
/* error recovery */
+fail_otg_desc:
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
fail_string_ids:
fsg_common_remove_luns(fsg_opts->common);
fail_set_cdev:
- fsg_common_free_luns(fsg_opts->common);
-fail_set_nluns:
fsg_common_free_buffers(fsg_opts->common);
fail2:
usb_put_function_instance(fi_msg);
@@ -490,6 +484,9 @@ static int multi_unbind(struct usb_composite_dev *cdev)
usb_put_function(f_ecm);
usb_put_function_instance(fi_ecm);
#endif
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
+
return 0;
}
diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c
index 6ce7421412e9..2bae4381332d 100644
--- a/drivers/usb/gadget/legacy/ncm.c
+++ b/drivers/usb/gadget/legacy/ncm.c
@@ -69,20 +69,7 @@ static struct usb_device_descriptor device_desc = {
.bNumConfigurations = 1,
};
-static struct usb_otg_descriptor otg_descriptor = {
- .bLength = sizeof otg_descriptor,
- .bDescriptorType = USB_DT_OTG,
-
- /* REVISIT SRP-only hardware is possible, although
- * it would not be called "OTG" ...
- */
- .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
-};
-
-static const struct usb_descriptor_header *otg_desc[] = {
- (struct usb_descriptor_header *) &otg_descriptor,
- NULL,
-};
+static const struct usb_descriptor_header *otg_desc[2];
/* string IDs are assigned dynamically */
static struct usb_string strings_dev[] = {
@@ -171,16 +158,30 @@ static int gncm_bind(struct usb_composite_dev *cdev)
device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
+ if (gadget_is_otg(gadget) && !otg_desc[0]) {
+ struct usb_descriptor_header *usb_desc;
+
+ usb_desc = usb_otg_descriptor_alloc(gadget);
+ if (!usb_desc)
+ goto fail;
+ usb_otg_descriptor_init(gadget, usb_desc);
+ otg_desc[0] = usb_desc;
+ otg_desc[1] = NULL;
+ }
+
status = usb_add_config(cdev, &ncm_config_driver,
ncm_do_config);
if (status < 0)
- goto fail;
+ goto fail1;
usb_composite_overwrite_options(cdev, &coverwrite);
dev_info(&gadget->dev, "%s\n", DRIVER_DESC);
return 0;
+fail1:
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
fail:
usb_put_function_instance(f_ncm_inst);
return status;
@@ -192,6 +193,9 @@ static int gncm_unbind(struct usb_composite_dev *cdev)
usb_put_function(f_ncm);
if (!IS_ERR_OR_NULL(f_ncm_inst))
usb_put_function_instance(f_ncm_inst);
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
+
return 0;
}
diff --git a/drivers/usb/gadget/legacy/nokia.c b/drivers/usb/gadget/legacy/nokia.c
index 4bb498a38a1c..8b3f6fb1825d 100644
--- a/drivers/usb/gadget/legacy/nokia.c
+++ b/drivers/usb/gadget/legacy/nokia.c
@@ -23,7 +23,7 @@
#include "u_ether.h"
#include "u_phonet.h"
#include "u_ecm.h"
-#include "gadget_chips.h"
+#include "f_mass_storage.h"
/* Defines */
@@ -34,6 +34,29 @@ USB_GADGET_COMPOSITE_OPTIONS();
USB_ETHERNET_MODULE_PARAMETERS();
+static struct fsg_module_parameters fsg_mod_data = {
+ .stall = 0,
+ .luns = 2,
+ .removable_count = 2,
+ .removable = { 1, 1, },
+};
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
+
+#else
+
+/*
+ * Number of buffers we will use.
+ * 2 is usually enough for good buffering pipeline
+ */
+#define fsg_num_buffers CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS
+
+#endif /* CONFIG_USB_DEBUG */
+
+FSG_MODULE_PARAMETERS(/* no prefix */, fsg_mod_data);
+
#define NOKIA_VENDOR_ID 0x0421 /* Nokia */
#define NOKIA_PRODUCT_ID 0x01c8 /* Nokia Gadget */
@@ -66,10 +89,10 @@ static struct usb_gadget_strings *dev_strings[] = {
static struct usb_device_descriptor device_desc = {
.bLength = USB_DT_DEVICE_SIZE,
.bDescriptorType = USB_DT_DEVICE,
- .bcdUSB = __constant_cpu_to_le16(0x0200),
+ .bcdUSB = cpu_to_le16(0x0200),
.bDeviceClass = USB_CLASS_COMM,
- .idVendor = __constant_cpu_to_le16(NOKIA_VENDOR_ID),
- .idProduct = __constant_cpu_to_le16(NOKIA_PRODUCT_ID),
+ .idVendor = cpu_to_le16(NOKIA_VENDOR_ID),
+ .idProduct = cpu_to_le16(NOKIA_PRODUCT_ID),
.bcdDevice = cpu_to_le16(NOKIA_VERSION_NUM),
/* .iManufacturer = DYNAMIC */
/* .iProduct = DYNAMIC */
@@ -94,6 +117,8 @@ static struct usb_function *f_obex1_cfg2;
static struct usb_function *f_obex2_cfg2;
static struct usb_function *f_phonet_cfg1;
static struct usb_function *f_phonet_cfg2;
+static struct usb_function *f_msg_cfg1;
+static struct usb_function *f_msg_cfg2;
static struct usb_configuration nokia_config_500ma_driver = {
@@ -117,6 +142,7 @@ static struct usb_function_instance *fi_ecm;
static struct usb_function_instance *fi_obex1;
static struct usb_function_instance *fi_obex2;
static struct usb_function_instance *fi_phonet;
+static struct usb_function_instance *fi_msg;
static int nokia_bind_config(struct usb_configuration *c)
{
@@ -125,6 +151,8 @@ static int nokia_bind_config(struct usb_configuration *c)
struct usb_function *f_obex1 = NULL;
struct usb_function *f_ecm;
struct usb_function *f_obex2 = NULL;
+ struct usb_function *f_msg;
+ struct fsg_opts *fsg_opts;
int status = 0;
int obex1_stat = -1;
int obex2_stat = -1;
@@ -160,6 +188,12 @@ static int nokia_bind_config(struct usb_configuration *c)
goto err_get_ecm;
}
+ f_msg = usb_get_function(fi_msg);
+ if (IS_ERR(f_msg)) {
+ status = PTR_ERR(f_msg);
+ goto err_get_msg;
+ }
+
if (!IS_ERR_OR_NULL(f_phonet)) {
phonet_stat = usb_add_function(c, f_phonet);
if (phonet_stat)
@@ -187,21 +221,36 @@ static int nokia_bind_config(struct usb_configuration *c)
pr_debug("could not bind ecm config %d\n", status);
goto err_ecm;
}
+
+ fsg_opts = fsg_opts_from_func_inst(fi_msg);
+
+ status = fsg_common_run_thread(fsg_opts->common);
+ if (status)
+ goto err_msg;
+
+ status = usb_add_function(c, f_msg);
+ if (status)
+ goto err_msg;
+
if (c == &nokia_config_500ma_driver) {
f_acm_cfg1 = f_acm;
f_ecm_cfg1 = f_ecm;
f_phonet_cfg1 = f_phonet;
f_obex1_cfg1 = f_obex1;
f_obex2_cfg1 = f_obex2;
+ f_msg_cfg1 = f_msg;
} else {
f_acm_cfg2 = f_acm;
f_ecm_cfg2 = f_ecm;
f_phonet_cfg2 = f_phonet;
f_obex1_cfg2 = f_obex1;
f_obex2_cfg2 = f_obex2;
+ f_msg_cfg2 = f_msg;
}
return status;
+err_msg:
+ usb_remove_function(c, f_ecm);
err_ecm:
usb_remove_function(c, f_acm);
err_conf:
@@ -211,6 +260,8 @@ err_conf:
usb_remove_function(c, f_obex1);
if (!phonet_stat)
usb_remove_function(c, f_phonet);
+ usb_put_function(f_msg);
+err_get_msg:
usb_put_function(f_ecm);
err_get_ecm:
usb_put_function(f_acm);
@@ -227,6 +278,8 @@ err_get_acm:
static int nokia_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
+ struct fsg_opts *fsg_opts;
+ struct fsg_config fsg_config;
int status;
status = usb_string_ids_tab(cdev, strings_dev);
@@ -238,7 +291,7 @@ static int nokia_bind(struct usb_composite_dev *cdev)
nokia_config_500ma_driver.iConfiguration = status;
nokia_config_100ma_driver.iConfiguration = status;
- if (!gadget_supports_altsettings(gadget)) {
+ if (!gadget_is_altset_supported(gadget)) {
status = -ENODEV;
goto err_usb;
}
@@ -267,11 +320,42 @@ static int nokia_bind(struct usb_composite_dev *cdev)
goto err_acm_inst;
}
+ fi_msg = usb_get_function_instance("mass_storage");
+ if (IS_ERR(fi_msg)) {
+ status = PTR_ERR(fi_msg);
+ goto err_ecm_inst;
+ }
+
+ /* set up mass storage function */
+ fsg_config_from_params(&fsg_config, &fsg_mod_data, fsg_num_buffers);
+ fsg_config.vendor_name = "Nokia";
+ fsg_config.product_name = "N900";
+
+ fsg_opts = fsg_opts_from_func_inst(fi_msg);
+ fsg_opts->no_configfs = true;
+
+ status = fsg_common_set_num_buffers(fsg_opts->common, fsg_num_buffers);
+ if (status)
+ goto err_msg_inst;
+
+ status = fsg_common_set_cdev(fsg_opts->common, cdev, fsg_config.can_stall);
+ if (status)
+ goto err_msg_buf;
+
+ fsg_common_set_sysfs(fsg_opts->common, true);
+
+ status = fsg_common_create_luns(fsg_opts->common, &fsg_config);
+ if (status)
+ goto err_msg_buf;
+
+ fsg_common_set_inquiry_string(fsg_opts->common, fsg_config.vendor_name,
+ fsg_config.product_name);
+
/* finally register the configuration */
status = usb_add_config(cdev, &nokia_config_500ma_driver,
nokia_bind_config);
if (status < 0)
- goto err_ecm_inst;
+ goto err_msg_luns;
status = usb_add_config(cdev, &nokia_config_100ma_driver,
nokia_bind_config);
@@ -292,6 +376,12 @@ err_put_cfg1:
if (!IS_ERR_OR_NULL(f_phonet_cfg1))
usb_put_function(f_phonet_cfg1);
usb_put_function(f_ecm_cfg1);
+err_msg_luns:
+ fsg_common_remove_luns(fsg_opts->common);
+err_msg_buf:
+ fsg_common_free_buffers(fsg_opts->common);
+err_msg_inst:
+ usb_put_function_instance(fi_msg);
err_ecm_inst:
usb_put_function_instance(fi_ecm);
err_acm_inst:
@@ -325,7 +415,10 @@ static int nokia_unbind(struct usb_composite_dev *cdev)
usb_put_function(f_acm_cfg2);
usb_put_function(f_ecm_cfg1);
usb_put_function(f_ecm_cfg2);
+ usb_put_function(f_msg_cfg1);
+ usb_put_function(f_msg_cfg2);
+ usb_put_function_instance(fi_msg);
usb_put_function_instance(fi_ecm);
if (!IS_ERR(fi_obex2))
usb_put_function_instance(fi_obex2);
diff --git a/drivers/usb/gadget/legacy/printer.c b/drivers/usb/gadget/legacy/printer.c
index 1ce7df1060a5..a22d30a4def1 100644
--- a/drivers/usb/gadget/legacy/printer.c
+++ b/drivers/usb/gadget/legacy/printer.c
@@ -19,8 +19,6 @@
#include <linux/usb/gadget.h>
#include <linux/usb/g_printer.h>
-#include "gadget_chips.h"
-
USB_GADGET_COMPOSITE_OPTIONS();
#define DRIVER_DESC "Printer Gadget"
@@ -82,16 +80,7 @@ static struct usb_device_descriptor device_desc = {
.bNumConfigurations = 1
};
-static struct usb_otg_descriptor otg_descriptor = {
- .bLength = sizeof otg_descriptor,
- .bDescriptorType = USB_DT_OTG,
- .bmAttributes = USB_OTG_SRP,
-};
-
-static const struct usb_descriptor_header *otg_desc[] = {
- (struct usb_descriptor_header *) &otg_descriptor,
- NULL,
-};
+static const struct usb_descriptor_header *otg_desc[2];
/*-------------------------------------------------------------------------*/
@@ -136,7 +125,6 @@ static int printer_do_config(struct usb_configuration *c)
usb_gadget_set_selfpowered(gadget);
if (gadget_is_otg(gadget)) {
- otg_descriptor.bmAttributes |= USB_OTG_HNP;
printer_cfg_driver.descriptors = otg_desc;
printer_cfg_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
@@ -174,21 +162,39 @@ static int printer_bind(struct usb_composite_dev *cdev)
opts->q_len = QLEN;
ret = usb_string_ids_tab(cdev, strings);
- if (ret < 0) {
- usb_put_function_instance(fi_printer);
- return ret;
- }
+ if (ret < 0)
+ goto fail_put_func_inst;
+
device_desc.iManufacturer = strings[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings[USB_GADGET_PRODUCT_IDX].id;
device_desc.iSerialNumber = strings[USB_GADGET_SERIAL_IDX].id;
- ret = usb_add_config(cdev, &printer_cfg_driver, printer_do_config);
- if (ret) {
- usb_put_function_instance(fi_printer);
- return ret;
+ if (gadget_is_otg(cdev->gadget) && !otg_desc[0]) {
+ struct usb_descriptor_header *usb_desc;
+
+ usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
+ if (!usb_desc) {
+ ret = -ENOMEM;
+ goto fail_put_func_inst;
+ }
+ usb_otg_descriptor_init(cdev->gadget, usb_desc);
+ otg_desc[0] = usb_desc;
+ otg_desc[1] = NULL;
}
+
+ ret = usb_add_config(cdev, &printer_cfg_driver, printer_do_config);
+ if (ret)
+ goto fail_free_otg_desc;
+
usb_composite_overwrite_options(cdev, &coverwrite);
return ret;
+
+fail_free_otg_desc:
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
+fail_put_func_inst:
+ usb_put_function_instance(fi_printer);
+ return ret;
}
static int printer_unbind(struct usb_composite_dev *cdev)
@@ -196,6 +202,9 @@ static int printer_unbind(struct usb_composite_dev *cdev)
usb_put_function(f_printer);
usb_put_function_instance(fi_printer);
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
+
return 0;
}
diff --git a/drivers/usb/gadget/legacy/serial.c b/drivers/usb/gadget/legacy/serial.c
index 8b7528f9b78e..c5d42e0347a9 100644
--- a/drivers/usb/gadget/legacy/serial.c
+++ b/drivers/usb/gadget/legacy/serial.c
@@ -17,7 +17,6 @@
#include <linux/tty_flip.h>
#include "u_serial.h"
-#include "gadget_chips.h"
/* Defines */
@@ -79,20 +78,7 @@ static struct usb_device_descriptor device_desc = {
.bNumConfigurations = 1,
};
-static struct usb_otg_descriptor otg_descriptor = {
- .bLength = sizeof otg_descriptor,
- .bDescriptorType = USB_DT_OTG,
-
- /* REVISIT SRP-only hardware is possible, although
- * it would not be called "OTG" ...
- */
- .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
-};
-
-static const struct usb_descriptor_header *otg_desc[] = {
- (struct usb_descriptor_header *) &otg_descriptor,
- NULL,
-};
+static const struct usb_descriptor_header *otg_desc[2];
/*-------------------------------------------------------------------------*/
@@ -191,6 +177,18 @@ static int gs_bind(struct usb_composite_dev *cdev)
serial_config_driver.iConfiguration = status;
if (gadget_is_otg(cdev->gadget)) {
+ if (!otg_desc[0]) {
+ struct usb_descriptor_header *usb_desc;
+
+ usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
+ if (!usb_desc) {
+ status = -ENOMEM;
+ goto fail;
+ }
+ usb_otg_descriptor_init(cdev->gadget, usb_desc);
+ otg_desc[0] = usb_desc;
+ otg_desc[1] = NULL;
+ }
serial_config_driver.descriptors = otg_desc;
serial_config_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
@@ -208,13 +206,15 @@ static int gs_bind(struct usb_composite_dev *cdev)
"gser");
}
if (status < 0)
- goto fail;
+ goto fail1;
usb_composite_overwrite_options(cdev, &coverwrite);
INFO(cdev, "%s\n", GS_VERSION_NAME);
return 0;
-
+fail1:
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
fail:
return status;
}
@@ -227,6 +227,10 @@ static int gs_unbind(struct usb_composite_dev *cdev)
usb_put_function(f_serial[i]);
usb_put_function_instance(fi_serial[i]);
}
+
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
+
return 0;
}
diff --git a/drivers/usb/gadget/legacy/zero.c b/drivers/usb/gadget/legacy/zero.c
index c986e8addb90..37a410056fed 100644
--- a/drivers/usb/gadget/legacy/zero.c
+++ b/drivers/usb/gadget/legacy/zero.c
@@ -121,24 +121,7 @@ static struct usb_device_descriptor device_desc = {
.bNumConfigurations = 2,
};
-#ifdef CONFIG_USB_OTG
-static struct usb_otg_descriptor otg_descriptor = {
- .bLength = sizeof otg_descriptor,
- .bDescriptorType = USB_DT_OTG,
-
- /* REVISIT SRP-only hardware is possible, although
- * it would not be called "OTG" ...
- */
- .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
-};
-
-static const struct usb_descriptor_header *otg_desc[] = {
- (struct usb_descriptor_header *) &otg_descriptor,
- NULL,
-};
-#else
-#define otg_desc NULL
-#endif
+static const struct usb_descriptor_header *otg_desc[2];
/* string IDs are assigned dynamically */
/* default serial number takes at least two packets */
@@ -341,6 +324,18 @@ static int zero_bind(struct usb_composite_dev *cdev)
/* support OTG systems */
if (gadget_is_otg(cdev->gadget)) {
+ if (!otg_desc[0]) {
+ struct usb_descriptor_header *usb_desc;
+
+ usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
+ if (!usb_desc) {
+ status = -ENOMEM;
+ goto err_conf_flb;
+ }
+ usb_otg_descriptor_init(cdev->gadget, usb_desc);
+ otg_desc[0] = usb_desc;
+ otg_desc[1] = NULL;
+ }
sourcesink_driver.descriptors = otg_desc;
sourcesink_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
loopback_driver.descriptors = otg_desc;
@@ -359,12 +354,12 @@ static int zero_bind(struct usb_composite_dev *cdev)
}
status = usb_add_function(&sourcesink_driver, func_ss);
if (status)
- goto err_conf_flb;
+ goto err_free_otg_desc;
usb_ep_autoconfig_reset(cdev->gadget);
status = usb_add_function(&loopback_driver, func_lb);
if (status)
- goto err_conf_flb;
+ goto err_free_otg_desc;
usb_ep_autoconfig_reset(cdev->gadget);
usb_composite_overwrite_options(cdev, &coverwrite);
@@ -373,6 +368,9 @@ static int zero_bind(struct usb_composite_dev *cdev)
return 0;
+err_free_otg_desc:
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
err_conf_flb:
usb_put_function(func_lb);
func_lb = NULL;
@@ -397,6 +395,9 @@ static int zero_unbind(struct usb_composite_dev *cdev)
if (!IS_ERR_OR_NULL(func_lb))
usb_put_function(func_lb);
usb_put_function_instance(func_inst_lb);
+ kfree(otg_desc[0]);
+ otg_desc[0] = NULL;
+
return 0;
}
diff --git a/drivers/usb/gadget/udc/amd5536udc.c b/drivers/usb/gadget/udc/amd5536udc.c
index de7e5e2ccf1c..fdacddb18c00 100644
--- a/drivers/usb/gadget/udc/amd5536udc.c
+++ b/drivers/usb/gadget/udc/amd5536udc.c
@@ -138,15 +138,82 @@ static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
/* endpoint names used for print */
static const char ep0_string[] = "ep0in";
-static const char *const ep_string[] = {
- ep0_string,
- "ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk",
- "ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk",
- "ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk",
- "ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk",
- "ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk",
- "ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk",
- "ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk"
+static const struct {
+ const char *name;
+ const struct usb_ep_caps caps;
+} ep_info[] = {
+#define EP_INFO(_name, _caps) \
+ { \
+ .name = _name, \
+ .caps = _caps, \
+ }
+
+ EP_INFO(ep0_string,
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep1in-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep2in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep3in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep4in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep5in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep6in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep7in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep8in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep9in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep10in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep11in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep12in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep13in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep14in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep15in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep0out",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep1out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep2out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep3out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep4out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep5out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep6out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep7out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep8out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep9out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep10out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep11out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep12out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep13out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep14out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep15out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+
+#undef EP_INFO
};
/* DMA usage flag */
@@ -1517,7 +1584,8 @@ static void udc_setup_endpoints(struct udc *dev)
for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
ep = &dev->ep[tmp];
ep->dev = dev;
- ep->ep.name = ep_string[tmp];
+ ep->ep.name = ep_info[tmp].name;
+ ep->ep.caps = ep_info[tmp].caps;
ep->num = tmp;
/* txfifo size is calculated at enable time */
ep->txfifo = dev->txfifo;
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index fc4226462f8f..d0d18947f58b 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -59,15 +59,34 @@
#define DRIVER_VERSION "3 May 2006"
static const char driver_name [] = "at91_udc";
-static const char * const ep_names[] = {
- "ep0",
- "ep1",
- "ep2",
- "ep3-int",
- "ep4",
- "ep5",
+
+static const struct {
+ const char *name;
+ const struct usb_ep_caps caps;
+} ep_info[] = {
+#define EP_INFO(_name, _caps) \
+ { \
+ .name = _name, \
+ .caps = _caps, \
+ }
+
+ EP_INFO("ep0",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep1",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep2",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep3-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep4",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep5",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+
+#undef EP_INFO
};
-#define ep0name ep_names[0]
+
+#define ep0name ep_info[0].name
#define VBUS_POLL_TIMEOUT msecs_to_jiffies(1000)
@@ -825,6 +844,7 @@ static void udc_reinit(struct at91_udc *udc)
INIT_LIST_HEAD(&udc->gadget.ep_list);
INIT_LIST_HEAD(&udc->gadget.ep0->ep_list);
+ udc->gadget.quirk_stall_not_supp = 1;
for (i = 0; i < NUM_ENDPOINTS; i++) {
struct at91_ep *ep = &udc->ep[i];
@@ -1830,7 +1850,8 @@ static int at91udc_probe(struct platform_device *pdev)
for (i = 0; i < NUM_ENDPOINTS; i++) {
ep = &udc->ep[i];
- ep->ep.name = ep_names[i];
+ ep->ep.name = ep_info[i].name;
+ ep->ep.caps = ep_info[i].caps;
ep->ep.ops = &at91_ep_ops;
ep->udc = udc;
ep->int_mask = BIT(i);
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 4095cce05e6a..3dfada8d6061 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -22,7 +22,6 @@
#include <linux/usb/gadget.h>
#include <linux/usb/atmel_usba_udc.h>
#include <linux/delay.h>
-#include <linux/platform_data/atmel.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
@@ -1989,6 +1988,10 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
ep->can_isoc = of_property_read_bool(pp, "atmel,can-isoc");
ret = of_property_read_string(pp, "name", &name);
+ if (ret) {
+ dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
+ goto err;
+ }
ep->ep.name = name;
ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
@@ -2063,6 +2066,17 @@ static struct usba_ep * usba_udc_pdata(struct platform_device *pdev,
ep->can_dma = pdata->ep[i].can_dma;
ep->can_isoc = pdata->ep[i].can_isoc;
+ if (i == 0) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = ep->can_isoc;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
+
if (i)
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
}
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index 9db968ba39f5..8cbb00325824 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -44,9 +44,29 @@
#define DRV_MODULE_NAME "bcm63xx_udc"
static const char bcm63xx_ep0name[] = "ep0";
-static const char *const bcm63xx_ep_name[] = {
- bcm63xx_ep0name,
- "ep1in-bulk", "ep2out-bulk", "ep3in-int", "ep4out-int",
+
+static const struct {
+ const char *name;
+ const struct usb_ep_caps caps;
+} bcm63xx_ep_info[] = {
+#define EP_INFO(_name, _caps) \
+ { \
+ .name = _name, \
+ .caps = _caps, \
+ }
+
+ EP_INFO(bcm63xx_ep0name,
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep1in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep2out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep3in-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep4out-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
+
+#undef EP_INFO
};
static bool use_fullspeed;
@@ -943,7 +963,8 @@ static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
for (i = 0; i < BCM63XX_NUM_EP; i++) {
struct bcm63xx_ep *bep = &udc->bep[i];
- bep->ep.name = bcm63xx_ep_name[i];
+ bep->ep.name = bcm63xx_ep_info[i].name;
+ bep->ep.caps = bcm63xx_ep_info[i].caps;
bep->ep_num = i;
bep->ep.ops = &bcm63xx_udc_ep_ops;
list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
diff --git a/drivers/usb/gadget/udc/bdc/bdc.h b/drivers/usb/gadget/udc/bdc/bdc.h
index dc18a20bf040..916d47135cac 100644
--- a/drivers/usb/gadget/udc/bdc/bdc.h
+++ b/drivers/usb/gadget/udc/bdc/bdc.h
@@ -290,7 +290,7 @@ struct bdc_sr {
__le32 offset[4];
};
-/* bd_table: contigous bd's in a table */
+/* bd_table: contiguous bd's in a table */
struct bd_table {
struct bdc_bd *start_bd;
/* dma address of start bd of table*/
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index b04980cf6dc4..d1b81539d632 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -779,7 +779,7 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
/* The current hw dequeue pointer */
tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(0));
deq_ptr_64 = tmp_32;
- tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(1));
+ tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS1(0));
deq_ptr_64 |= ((u64)tmp_32 << 32);
/* we have the dma addr of next bd that will be fetched by hardware */
@@ -1952,12 +1952,18 @@ static int init_ep(struct bdc *bdc, u32 epnum, u32 dir)
ep->bdc = bdc;
ep->dir = dir;
+ if (dir)
+ ep->usb_ep.caps.dir_in = true;
+ else
+ ep->usb_ep.caps.dir_out = true;
+
/* ep->ep_num is the index inside bdc_ep */
if (epnum == 1) {
ep->ep_num = 1;
bdc->bdc_ep_array[ep->ep_num] = ep;
snprintf(ep->name, sizeof(ep->name), "ep%d", epnum - 1);
usb_ep_set_maxpacket_limit(&ep->usb_ep, EP0_MAX_PKT_SIZE);
+ ep->usb_ep.caps.type_control = true;
ep->comp_desc = NULL;
bdc->gadget.ep0 = &ep->usb_ep;
} else {
@@ -1971,6 +1977,9 @@ static int init_ep(struct bdc *bdc, u32 epnum, u32 dir)
dir & 1 ? "in" : "out");
usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
+ ep->usb_ep.caps.type_iso = true;
+ ep->usb_ep.caps.type_bulk = true;
+ ep->usb_ep.caps.type_int = true;
ep->usb_ep.max_streams = 0;
list_add_tail(&ep->usb_ep.ep_list, &bdc->gadget.ep_list);
}
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 181112c88f43..1379ad40d864 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -127,23 +127,87 @@ static inline struct dummy_request *usb_request_to_dummy_request
static const char ep0name[] = "ep0";
-static const char *const ep_name[] = {
- ep0name, /* everyone has ep0 */
+static const struct {
+ const char *name;
+ const struct usb_ep_caps caps;
+} ep_info[] = {
+#define EP_INFO(_name, _caps) \
+ { \
+ .name = _name, \
+ .caps = _caps, \
+ }
+ /* everyone has ep0 */
+ EP_INFO(ep0name,
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
/* act like a pxa250: fifteen fixed function endpoints */
- "ep1in-bulk", "ep2out-bulk", "ep3in-iso", "ep4out-iso", "ep5in-int",
- "ep6in-bulk", "ep7out-bulk", "ep8in-iso", "ep9out-iso", "ep10in-int",
- "ep11in-bulk", "ep12out-bulk", "ep13in-iso", "ep14out-iso",
- "ep15in-int",
-
+ EP_INFO("ep1in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep2out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep3in-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep4out-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep5in-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep6in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep7out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep8in-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep9out-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep10in-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep11in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep12out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep13in-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep14out-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep15in-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
/* or like sa1100: two fixed function endpoints */
- "ep1out-bulk", "ep2in-bulk",
-
+ EP_INFO("ep1out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep2in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
/* and now some generic EPs so we have enough in multi config */
- "ep3out", "ep4in", "ep5out", "ep6out", "ep7in", "ep8out", "ep9in",
- "ep10out", "ep11out", "ep12in", "ep13out", "ep14in", "ep15out",
+ EP_INFO("ep3out",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep4in",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep5out",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep6out",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep7in",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep8out",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep9in",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep10out",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep11out",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep12in",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep13out",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep14in",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep15out",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+
+#undef EP_INFO
};
-#define DUMMY_ENDPOINTS ARRAY_SIZE(ep_name)
+
+#define DUMMY_ENDPOINTS ARRAY_SIZE(ep_info)
/*-------------------------------------------------------------------------*/
@@ -938,9 +1002,10 @@ static void init_dummy_udc_hw(struct dummy *dum)
for (i = 0; i < DUMMY_ENDPOINTS; i++) {
struct dummy_ep *ep = &dum->ep[i];
- if (!ep_name[i])
+ if (!ep_info[i].name)
break;
- ep->ep.name = ep_name[i];
+ ep->ep.name = ep_info[i].name;
+ ep->ep.caps = ep_info[i].caps;
ep->ep.ops = &dummy_ep_ops;
list_add_tail(&ep->ep.ep_list, &dum->gadget.ep_list);
ep->halted = ep->wedged = ep->already_seen =
@@ -1684,7 +1749,7 @@ static void dummy_timer(unsigned long _dum_hcd)
}
for (i = 0; i < DUMMY_ENDPOINTS; i++) {
- if (!ep_name[i])
+ if (!ep_info[i].name)
break;
dum->ep[i].already_seen = 0;
}
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index e547ea7f56b1..6ba122cc7490 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -384,25 +384,15 @@ static void fotg210_ep0_queue(struct fotg210_ep *ep,
return;
}
if (ep->dir_in) { /* if IN */
- if (req->req.length) {
- fotg210_start_dma(ep, req);
- } else {
- pr_err("%s : req->req.length = 0x%x\n",
- __func__, req->req.length);
- }
+ fotg210_start_dma(ep, req);
if ((req->req.length == req->req.actual) ||
(req->req.actual < ep->ep.maxpacket))
fotg210_done(ep, req, 0);
} else { /* OUT */
- if (!req->req.length) {
- fotg210_done(ep, req, 0);
- } else {
- u32 value = ioread32(ep->fotg210->reg +
- FOTG210_DMISGR0);
+ u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR0);
- value &= ~DMISGR0_MCX_OUT_INT;
- iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR0);
- }
+ value &= ~DMISGR0_MCX_OUT_INT;
+ iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR0);
}
}
@@ -1153,6 +1143,17 @@ static int fotg210_udc_probe(struct platform_device *pdev)
ep->ep.name = fotg210_ep_name[i];
ep->ep.ops = &fotg210_ep_ops;
usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
+
+ if (i == 0) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
}
usb_ep_set_maxpacket_limit(&fotg210->ep[0]->ep, 0x40);
fotg210->gadget.ep0 = &fotg210->ep[0]->ep;
@@ -1171,7 +1172,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
udc_name, fotg210);
if (ret < 0) {
pr_err("request_irq error (%d)\n", ret);
- goto err_irq;
+ goto err_req;
}
ret = usb_add_gadget_udc(&pdev->dev, &fotg210->gadget);
@@ -1183,7 +1184,6 @@ static int fotg210_udc_probe(struct platform_device *pdev)
return 0;
err_add_udc:
-err_irq:
free_irq(ires->start, fotg210);
err_req:
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index e0822f1b6639..5fb6f8b4f0b4 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -2417,6 +2417,17 @@ static int qe_ep_config(struct qe_udc *udc, unsigned char pipe_num)
strcpy(ep->name, ep_name[pipe_num]);
ep->ep.name = ep_name[pipe_num];
+ if (pipe_num == 0) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
+
ep->ep.ops = &qe_ep_ops;
ep->stopped = 1;
usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index c60022b46a48..aab5221d6c2e 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -2313,6 +2313,19 @@ static int struct_ep_setup(struct fsl_udc *udc, unsigned char index,
ep->ep.ops = &fsl_ep_ops;
ep->stopped = 0;
+ if (index == 0) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+
+ if (index & 1)
+ ep->ep.caps.dir_in = true;
+ else
+ ep->ep.caps.dir_out = true;
+
/* for ep0: maxP defined in desc
* for other eps, maxP is set by epautoconfig() called by gadget layer
*/
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
index 3970f453de49..948845c90e47 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -1450,6 +1450,17 @@ static int fusb300_probe(struct platform_device *pdev)
ep->ep.name = fusb300_ep_name[i];
ep->ep.ops = &fusb300_ep_ops;
usb_ep_set_maxpacket_limit(&ep->ep, HS_BULK_MAX_PACKET_SIZE);
+
+ if (i == 0) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
}
usb_ep_set_maxpacket_limit(&fusb300->ep[0]->ep, HS_CTL_MAX_PACKET_SIZE);
fusb300->ep[0]->epnum = 0;
diff --git a/drivers/usb/gadget/udc/gadget_chips.h b/drivers/usb/gadget/udc/gadget_chips.h
deleted file mode 100644
index bcd04bc66b98..000000000000
--- a/drivers/usb/gadget/udc/gadget_chips.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * USB device controllers have lots of quirks. Use these macros in
- * gadget drivers or other code that needs to deal with them, and which
- * autoconfigures instead of using early binding to the hardware.
- *
- * This SHOULD eventually work like the ARM mach_is_*() stuff, driven by
- * some config file that gets updated as new hardware is supported.
- * (And avoiding all runtime comparisons in typical one-choice configs!)
- *
- * NOTE: some of these controller drivers may not be available yet.
- * Some are available on 2.4 kernels; several are available, but not
- * yet pushed in the 2.6 mainline tree.
- */
-
-#ifndef __GADGET_CHIPS_H
-#define __GADGET_CHIPS_H
-
-#include <linux/usb/gadget.h>
-
-/*
- * NOTICE: the entries below are alphabetical and should be kept
- * that way.
- *
- * Always be sure to add new entries to the correct position or
- * accept the bashing later.
- *
- * If you have forgotten the alphabetical order let VIM/EMACS
- * do that for you.
- */
-#define gadget_is_at91(g) (!strcmp("at91_udc", (g)->name))
-#define gadget_is_goku(g) (!strcmp("goku_udc", (g)->name))
-#define gadget_is_musbhdrc(g) (!strcmp("musb-hdrc", (g)->name))
-#define gadget_is_net2280(g) (!strcmp("net2280", (g)->name))
-#define gadget_is_pxa(g) (!strcmp("pxa25x_udc", (g)->name))
-#define gadget_is_pxa27x(g) (!strcmp("pxa27x_udc", (g)->name))
-
-/**
- * gadget_supports_altsettings - return true if altsettings work
- * @gadget: the gadget in question
- */
-static inline bool gadget_supports_altsettings(struct usb_gadget *gadget)
-{
- /* PXA 21x/25x/26x has no altsettings at all */
- if (gadget_is_pxa(gadget))
- return false;
-
- /* PXA 27x and 3xx have *broken* altsetting support */
- if (gadget_is_pxa27x(gadget))
- return false;
-
- /* Everything else is *presumably* fine ... */
- return true;
-}
-
-#endif /* __GADGET_CHIPS_H */
diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
index 9e8d842e8c08..1fdfec14a3ba 100644
--- a/drivers/usb/gadget/udc/goku_udc.c
+++ b/drivers/usb/gadget/udc/goku_udc.c
@@ -990,6 +990,35 @@ static int goku_get_frame(struct usb_gadget *_gadget)
return -EOPNOTSUPP;
}
+static struct usb_ep *goku_match_ep(struct usb_gadget *g,
+ struct usb_endpoint_descriptor *desc,
+ struct usb_ss_ep_comp_descriptor *ep_comp)
+{
+ struct goku_udc *dev = to_goku_udc(g);
+ struct usb_ep *ep;
+
+ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_INT:
+ /* single buffering is enough */
+ ep = &dev->ep[3].ep;
+ if (usb_gadget_ep_match_desc(g, ep, desc, ep_comp))
+ return ep;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ if (usb_endpoint_dir_in(desc)) {
+ /* DMA may be available */
+ ep = &dev->ep[2].ep;
+ if (usb_gadget_ep_match_desc(g, ep, desc, ep_comp))
+ return ep;
+ }
+ break;
+ default:
+ /* nothing */ ;
+ }
+
+ return NULL;
+}
+
static int goku_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
static int goku_udc_stop(struct usb_gadget *g);
@@ -998,6 +1027,7 @@ static const struct usb_gadget_ops goku_ops = {
.get_frame = goku_get_frame,
.udc_start = goku_udc_start,
.udc_stop = goku_udc_stop,
+ .match_ep = goku_match_ep,
// no remote wakeup
// not selfpowered
};
@@ -1257,6 +1287,14 @@ static void udc_reinit (struct goku_udc *dev)
INIT_LIST_HEAD (&ep->queue);
ep_reset(NULL, ep);
+
+ if (i == 0)
+ ep->ep.caps.type_control = true;
+ else
+ ep->ep.caps.type_bulk = true;
+
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
}
dev->ep[0].reg_mode = NULL;
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index c8868870e217..8aa2593c2c36 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -2018,12 +2018,23 @@ static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
+
+ ep->ep.caps.type_control = true;
} else {
usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
+
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
}
list_add_tail(&ep->ep_list, &dev->ep_list);
+ if (is_in)
+ ep->ep.caps.dir_in = true;
+ else
+ ep->ep.caps.dir_out = true;
+
ep->tailbuf = dma_alloc_coherent(dev->dev, ep->ep.maxpacket_limit,
&ep->tailbuf_paddr, GFP_ATOMIC);
if (!ep->tailbuf)
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index 3b6a7852822d..00b5006baf15 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -2575,6 +2575,8 @@ static const struct lpc32xx_udc controller_template = {
.ep = {
.name = "ep0",
.ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL,
+ USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 0,
@@ -2586,6 +2588,8 @@ static const struct lpc32xx_udc controller_template = {
.ep = {
.name = "ep1-int",
.ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+ USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 2,
@@ -2597,6 +2601,8 @@ static const struct lpc32xx_udc controller_template = {
.ep = {
.name = "ep2-bulk",
.ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 4,
@@ -2608,6 +2614,8 @@ static const struct lpc32xx_udc controller_template = {
.ep = {
.name = "ep3-iso",
.ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 1023,
.hwep_num_base = 6,
@@ -2619,6 +2627,8 @@ static const struct lpc32xx_udc controller_template = {
.ep = {
.name = "ep4-int",
.ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+ USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 8,
@@ -2630,6 +2640,8 @@ static const struct lpc32xx_udc controller_template = {
.ep = {
.name = "ep5-bulk",
.ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 10,
@@ -2641,6 +2653,8 @@ static const struct lpc32xx_udc controller_template = {
.ep = {
.name = "ep6-iso",
.ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 1023,
.hwep_num_base = 12,
@@ -2652,6 +2666,8 @@ static const struct lpc32xx_udc controller_template = {
.ep = {
.name = "ep7-int",
.ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+ USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 14,
@@ -2663,6 +2679,8 @@ static const struct lpc32xx_udc controller_template = {
.ep = {
.name = "ep8-bulk",
.ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 16,
@@ -2674,6 +2692,8 @@ static const struct lpc32xx_udc controller_template = {
.ep = {
.name = "ep9-iso",
.ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 1023,
.hwep_num_base = 18,
@@ -2685,6 +2705,8 @@ static const struct lpc32xx_udc controller_template = {
.ep = {
.name = "ep10-int",
.ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+ USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 20,
@@ -2696,6 +2718,8 @@ static const struct lpc32xx_udc controller_template = {
.ep = {
.name = "ep11-bulk",
.ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 22,
@@ -2707,6 +2731,8 @@ static const struct lpc32xx_udc controller_template = {
.ep = {
.name = "ep12-iso",
.ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 1023,
.hwep_num_base = 24,
@@ -2718,6 +2744,8 @@ static const struct lpc32xx_udc controller_template = {
.ep = {
.name = "ep13-int",
.ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+ USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 26,
@@ -2729,6 +2757,8 @@ static const struct lpc32xx_udc controller_template = {
.ep = {
.name = "ep14-bulk",
.ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 64,
.hwep_num_base = 28,
@@ -2740,6 +2770,8 @@ static const struct lpc32xx_udc controller_template = {
.ep = {
.name = "ep15-bulk",
.ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
},
.maxpacket = 1023,
.hwep_num_base = 30,
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index 309706fe4bf0..b1cfa96cc88f 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1052,7 +1052,7 @@ static void set_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
tmp = m66592_read(m66592, M66592_INTSTS0) &
M66592_CTSQ;
udelay(1);
- } while (tmp != M66592_CS_IDST || timeout-- > 0);
+ } while (tmp != M66592_CS_IDST && timeout-- > 0);
if (tmp == M66592_CS_IDST)
m66592_bset(m66592,
@@ -1644,6 +1644,17 @@ static int m66592_probe(struct platform_device *pdev)
ep->ep.name = m66592_ep_name[i];
ep->ep.ops = &m66592_ep_ops;
usb_ep_set_maxpacket_limit(&ep->ep, 512);
+
+ if (i == 0) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
}
usb_ep_set_maxpacket_limit(&m66592->ep[0].ep, 64);
m66592->ep[0].pipenum = 0;
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index ea35a248c898..4c489692745e 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -1324,6 +1324,9 @@ static int mv_u3d_eps_init(struct mv_u3d *u3d)
ep->ep.ops = &mv_u3d_ep_ops;
ep->wedge = 0;
usb_ep_set_maxpacket_limit(&ep->ep, MV_U3D_EP0_MAX_PKT_SIZE);
+ ep->ep.caps.type_control = true;
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
ep->ep_num = 0;
ep->ep.desc = &mv_u3d_ep0_desc;
INIT_LIST_HEAD(&ep->queue);
@@ -1339,14 +1342,20 @@ static int mv_u3d_eps_init(struct mv_u3d *u3d)
if (i & 1) {
snprintf(name, sizeof(name), "ep%din", i >> 1);
ep->direction = MV_U3D_EP_DIR_IN;
+ ep->ep.caps.dir_in = true;
} else {
snprintf(name, sizeof(name), "ep%dout", i >> 1);
ep->direction = MV_U3D_EP_DIR_OUT;
+ ep->ep.caps.dir_out = true;
}
ep->u3d = u3d;
strncpy(ep->name, name, sizeof(ep->name));
ep->ep.name = ep->name;
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+
ep->ep.ops = &mv_u3d_ep_ops;
usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
ep->ep_num = i / 2;
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index d32160d6463f..339af51df57d 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -1257,6 +1257,9 @@ static int eps_init(struct mv_udc *udc)
ep->wedge = 0;
ep->stopped = 0;
usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE);
+ ep->ep.caps.type_control = true;
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
ep->ep_num = 0;
ep->ep.desc = &mv_ep0_desc;
INIT_LIST_HEAD(&ep->queue);
@@ -1269,14 +1272,20 @@ static int eps_init(struct mv_udc *udc)
if (i % 2) {
snprintf(name, sizeof(name), "ep%din", i / 2);
ep->direction = EP_DIR_IN;
+ ep->ep.caps.dir_in = true;
} else {
snprintf(name, sizeof(name), "ep%dout", i / 2);
ep->direction = EP_DIR_OUT;
+ ep->ep.caps.dir_out = true;
}
ep->udc = udc;
strncpy(ep->name, name, sizeof(ep->name));
ep->ep.name = ep->name;
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+
ep->ep.ops = &mv_ep_ops;
ep->stopped = 0;
usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
@@ -2167,7 +2176,7 @@ static int mv_udc_probe(struct platform_device *pdev)
return -ENODEV;
}
- udc->phy_regs = ioremap(r->start, resource_size(r));
+ udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (udc->phy_regs == NULL) {
dev_err(&pdev->dev, "failed to map phy I/O memory\n");
return -EBUSY;
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 195baf3e1fcd..18f5ebd447b8 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -1404,6 +1404,17 @@ net2272_usb_reinit(struct net2272 *dev)
else
ep->fifo_size = 64;
net2272_ep_reset(ep);
+
+ if (i == 0) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
}
usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
@@ -1826,9 +1837,9 @@ net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
if (!e || u.r.wLength > 2)
goto do_stall;
if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
- status = __constant_cpu_to_le16(1);
+ status = cpu_to_le16(1);
else
- status = __constant_cpu_to_le16(0);
+ status = cpu_to_le16(0);
/* don't bother with a request object! */
net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 2bee912ca65b..cf0ed42f5591 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -74,19 +74,58 @@ static const char driver_desc[] = DRIVER_DESC;
static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 };
static const char ep0name[] = "ep0";
-static const char *const ep_name[] = {
- ep0name,
- "ep-a", "ep-b", "ep-c", "ep-d",
- "ep-e", "ep-f", "ep-g", "ep-h",
-};
-/* Endpoint names for usb3380 advance mode */
-static const char *const ep_name_adv[] = {
- ep0name,
- "ep1in", "ep2out", "ep3in", "ep4out",
- "ep1out", "ep2in", "ep3out", "ep4in",
+#define EP_INFO(_name, _caps) \
+ { \
+ .name = _name, \
+ .caps = _caps, \
+ }
+
+static const struct {
+ const char *name;
+ const struct usb_ep_caps caps;
+} ep_info_dft[] = { /* Default endpoint configuration */
+ EP_INFO(ep0name,
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep-a",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep-b",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep-c",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep-d",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep-e",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep-f",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep-g",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep-h",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+}, ep_info_adv[] = { /* Endpoints for usb3380 advance mode */
+ EP_INFO(ep0name,
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep1in",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep2out",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep3in",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep4out",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep1out",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep2in",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep3out",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep4in",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
};
+#undef EP_INFO
+
/* mode 0 == ep-{a,b,c,d} 1K fifo each
* mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
* mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
@@ -1511,6 +1550,33 @@ static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
return 0;
}
+static struct usb_ep *net2280_match_ep(struct usb_gadget *_gadget,
+ struct usb_endpoint_descriptor *desc,
+ struct usb_ss_ep_comp_descriptor *ep_comp)
+{
+ char name[8];
+ struct usb_ep *ep;
+
+ if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT) {
+ /* ep-e, ep-f are PIO with only 64 byte fifos */
+ ep = gadget_find_ep_by_name(_gadget, "ep-e");
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+ ep = gadget_find_ep_by_name(_gadget, "ep-f");
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+ }
+
+ /* USB3380: use same address for usb and hardware endpoints */
+ snprintf(name, sizeof(name), "ep%d%s", usb_endpoint_num(desc),
+ usb_endpoint_dir_in(desc) ? "in" : "out");
+ ep = gadget_find_ep_by_name(_gadget, name);
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+
+ return NULL;
+}
+
static int net2280_start(struct usb_gadget *_gadget,
struct usb_gadget_driver *driver);
static int net2280_stop(struct usb_gadget *_gadget);
@@ -1522,6 +1588,7 @@ static const struct usb_gadget_ops net2280_ops = {
.pullup = net2280_pullup,
.udc_start = net2280_start,
.udc_stop = net2280_stop,
+ .match_ep = net2280_match_ep,
};
/*-------------------------------------------------------------------------*/
@@ -2055,7 +2122,8 @@ static void usb_reinit_228x(struct net2280 *dev)
for (tmp = 0; tmp < 7; tmp++) {
struct net2280_ep *ep = &dev->ep[tmp];
- ep->ep.name = ep_name[tmp];
+ ep->ep.name = ep_info_dft[tmp].name;
+ ep->ep.caps = ep_info_dft[tmp].caps;
ep->dev = dev;
ep->num = tmp;
@@ -2095,7 +2163,10 @@ static void usb_reinit_338x(struct net2280 *dev)
for (i = 0; i < dev->n_ep; i++) {
struct net2280_ep *ep = &dev->ep[i];
- ep->ep.name = dev->enhanced_mode ? ep_name_adv[i] : ep_name[i];
+ ep->ep.name = dev->enhanced_mode ? ep_info_adv[i].name :
+ ep_info_dft[i].name;
+ ep->ep.caps = dev->enhanced_mode ? ep_info_adv[i].caps :
+ ep_info_dft[i].caps;
ep->dev = dev;
ep->num = i;
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index e2fcdb8e5596..9b7d39484ed3 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2579,6 +2579,28 @@ omap_ep_setup(char *name, u8 addr, u8 type,
ep->double_buf = dbuf;
ep->udc = udc;
+ switch (type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ ep->ep.caps.type_control = true;
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ ep->ep.caps.type_iso = true;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ ep->ep.caps.type_bulk = true;
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ ep->ep.caps.type_int = true;
+ break;
+ };
+
+ if (addr & USB_DIR_IN)
+ ep->ep.caps.dir_in = true;
+ else
+ ep->ep.caps.dir_out = true;
+
ep->ep.name = ep->name;
ep->ep.ops = &omap_ep_ops;
ep->maxpacket = maxp;
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 613547f07828..e5f4c5274298 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -620,9 +620,9 @@ static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
dev->vbus_session = 1;
} else {
if (dev->driver && dev->driver->disconnect) {
- spin_unlock(&dev->lock);
- dev->driver->disconnect(&dev->gadget);
spin_lock(&dev->lock);
+ dev->driver->disconnect(&dev->gadget);
+ spin_unlock(&dev->lock);
}
pch_udc_set_disconnect(dev);
dev->vbus_session = 0;
@@ -1191,9 +1191,9 @@ static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
pch_udc_reconnect(dev);
} else {
if (dev->driver && dev->driver->disconnect) {
- spin_unlock(&dev->lock);
- dev->driver->disconnect(&dev->gadget);
spin_lock(&dev->lock);
+ dev->driver->disconnect(&dev->gadget);
+ spin_unlock(&dev->lock);
}
pch_udc_set_disconnect(dev);
}
@@ -1488,11 +1488,11 @@ static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
req->dma_mapped = 0;
}
ep->halted = 1;
- spin_unlock(&dev->lock);
+ spin_lock(&dev->lock);
if (!ep->in)
pch_udc_ep_clear_rrdy(ep);
usb_gadget_giveback_request(&ep->ep, &req->req);
- spin_lock(&dev->lock);
+ spin_unlock(&dev->lock);
ep->halted = halted;
}
@@ -1793,7 +1793,7 @@ static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
}
/* prevent from using desc. - set HOST BUSY */
dma_desc->status |= PCH_UDC_BS_HST_BSY;
- dma_desc->dataptr = __constant_cpu_to_le32(DMA_ADDR_INVALID);
+ dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
req->td_data = dma_desc;
req->td_data_last = dma_desc;
req->chain_len = 1;
@@ -2414,7 +2414,7 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
else /* OUT */
dev->gadget.ep0 = &ep->ep;
- spin_unlock(&dev->lock);
+ spin_lock(&dev->lock);
/* If Mass storage Reset */
if ((dev->setup_data.bRequestType == 0x21) &&
(dev->setup_data.bRequest == 0xFF))
@@ -2422,7 +2422,7 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
/* call gadget with setup data received */
setup_supported = dev->driver->setup(&dev->gadget,
&dev->setup_data);
- spin_lock(&dev->lock);
+ spin_unlock(&dev->lock);
if (dev->setup_data.bRequestType & USB_DIR_IN) {
ep->td_data->status = (ep->td_data->status &
@@ -2594,9 +2594,9 @@ static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
empty_req_queue(ep);
}
if (dev->driver) {
- spin_unlock(&dev->lock);
- usb_gadget_udc_reset(&dev->gadget, dev->driver);
spin_lock(&dev->lock);
+ usb_gadget_udc_reset(&dev->gadget, dev->driver);
+ spin_unlock(&dev->lock);
}
}
@@ -2675,9 +2675,9 @@ static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
dev->ep[i].halted = 0;
}
dev->stall = 0;
- spin_unlock(&dev->lock);
- ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
spin_lock(&dev->lock);
+ ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
+ spin_unlock(&dev->lock);
}
/**
@@ -2712,9 +2712,9 @@ static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
dev->stall = 0;
/* call gadget zero with setup data received */
- spin_unlock(&dev->lock);
- ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
spin_lock(&dev->lock);
+ ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
+ spin_unlock(&dev->lock);
}
/**
@@ -2747,18 +2747,18 @@ static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
if (dev_intr & UDC_DEVINT_US) {
if (dev->driver
&& dev->driver->suspend) {
- spin_unlock(&dev->lock);
- dev->driver->suspend(&dev->gadget);
spin_lock(&dev->lock);
+ dev->driver->suspend(&dev->gadget);
+ spin_unlock(&dev->lock);
}
vbus = pch_vbus_gpio_get_value(dev);
if ((dev->vbus_session == 0)
&& (vbus != 1)) {
if (dev->driver && dev->driver->disconnect) {
- spin_unlock(&dev->lock);
- dev->driver->disconnect(&dev->gadget);
spin_lock(&dev->lock);
+ dev->driver->disconnect(&dev->gadget);
+ spin_unlock(&dev->lock);
}
pch_udc_reconnect(dev);
} else if ((dev->vbus_session == 0)
@@ -2895,11 +2895,21 @@ static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
ep->in = ~i & 1;
ep->ep.name = ep_string[i];
ep->ep.ops = &pch_udc_ep_ops;
- if (ep->in)
+ if (ep->in) {
ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
- else
+ ep->ep.caps.dir_in = true;
+ } else {
ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
UDC_EP_REG_SHIFT;
+ ep->ep.caps.dir_out = true;
+ }
+ if (i == UDC_EP0IN_IDX || i == UDC_EP0OUT_IDX) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
/* need to set ep->ep.maxpacket and set Default Configuration?*/
usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index f6cbe667ce39..b82cb14850b6 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -1176,6 +1176,7 @@ static void udc_reinit(struct pxa25x_udc *dev)
INIT_LIST_HEAD (&dev->gadget.ep_list);
INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
dev->ep0state = EP0_IDLE;
+ dev->gadget.quirk_altset_not_supp = 1;
/* basic endpoint records init */
for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
@@ -1821,6 +1822,8 @@ static struct pxa25x_udc memory = {
.name = ep0name,
.ops = &pxa25x_ep_ops,
.maxpacket = EP0_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL,
+ USB_EP_CAPS_DIR_ALL),
},
.dev = &memory,
.reg_udccs = &UDCCS0,
@@ -1833,6 +1836,8 @@ static struct pxa25x_udc memory = {
.name = "ep1in-bulk",
.ops = &pxa25x_ep_ops,
.maxpacket = BULK_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_IN),
},
.dev = &memory,
.fifo_size = BULK_FIFO_SIZE,
@@ -1846,6 +1851,8 @@ static struct pxa25x_udc memory = {
.name = "ep2out-bulk",
.ops = &pxa25x_ep_ops,
.maxpacket = BULK_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_OUT),
},
.dev = &memory,
.fifo_size = BULK_FIFO_SIZE,
@@ -1861,6 +1868,8 @@ static struct pxa25x_udc memory = {
.name = "ep3in-iso",
.ops = &pxa25x_ep_ops,
.maxpacket = ISO_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_IN),
},
.dev = &memory,
.fifo_size = ISO_FIFO_SIZE,
@@ -1874,6 +1883,8 @@ static struct pxa25x_udc memory = {
.name = "ep4out-iso",
.ops = &pxa25x_ep_ops,
.maxpacket = ISO_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_OUT),
},
.dev = &memory,
.fifo_size = ISO_FIFO_SIZE,
@@ -1888,6 +1899,7 @@ static struct pxa25x_udc memory = {
.name = "ep5in-int",
.ops = &pxa25x_ep_ops,
.maxpacket = INT_FIFO_SIZE,
+ .caps = USB_EP_CAPS(0, 0),
},
.dev = &memory,
.fifo_size = INT_FIFO_SIZE,
@@ -1903,6 +1915,8 @@ static struct pxa25x_udc memory = {
.name = "ep6in-bulk",
.ops = &pxa25x_ep_ops,
.maxpacket = BULK_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_IN),
},
.dev = &memory,
.fifo_size = BULK_FIFO_SIZE,
@@ -1916,6 +1930,8 @@ static struct pxa25x_udc memory = {
.name = "ep7out-bulk",
.ops = &pxa25x_ep_ops,
.maxpacket = BULK_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_OUT),
},
.dev = &memory,
.fifo_size = BULK_FIFO_SIZE,
@@ -1930,6 +1946,8 @@ static struct pxa25x_udc memory = {
.name = "ep8in-iso",
.ops = &pxa25x_ep_ops,
.maxpacket = ISO_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_IN),
},
.dev = &memory,
.fifo_size = ISO_FIFO_SIZE,
@@ -1943,6 +1961,8 @@ static struct pxa25x_udc memory = {
.name = "ep9out-iso",
.ops = &pxa25x_ep_ops,
.maxpacket = ISO_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_OUT),
},
.dev = &memory,
.fifo_size = ISO_FIFO_SIZE,
@@ -1957,6 +1977,7 @@ static struct pxa25x_udc memory = {
.name = "ep10in-int",
.ops = &pxa25x_ep_ops,
.maxpacket = INT_FIFO_SIZE,
+ .caps = USB_EP_CAPS(0, 0),
},
.dev = &memory,
.fifo_size = INT_FIFO_SIZE,
@@ -1972,6 +1993,8 @@ static struct pxa25x_udc memory = {
.name = "ep11in-bulk",
.ops = &pxa25x_ep_ops,
.maxpacket = BULK_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_IN),
},
.dev = &memory,
.fifo_size = BULK_FIFO_SIZE,
@@ -1985,6 +2008,8 @@ static struct pxa25x_udc memory = {
.name = "ep12out-bulk",
.ops = &pxa25x_ep_ops,
.maxpacket = BULK_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_OUT),
},
.dev = &memory,
.fifo_size = BULK_FIFO_SIZE,
@@ -1999,6 +2024,8 @@ static struct pxa25x_udc memory = {
.name = "ep13in-iso",
.ops = &pxa25x_ep_ops,
.maxpacket = ISO_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_IN),
},
.dev = &memory,
.fifo_size = ISO_FIFO_SIZE,
@@ -2012,6 +2039,8 @@ static struct pxa25x_udc memory = {
.name = "ep14out-iso",
.ops = &pxa25x_ep_ops,
.maxpacket = ISO_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_OUT),
},
.dev = &memory,
.fifo_size = ISO_FIFO_SIZE,
@@ -2026,6 +2055,7 @@ static struct pxa25x_udc memory = {
.name = "ep15in-int",
.ops = &pxa25x_ep_ops,
.maxpacket = INT_FIFO_SIZE,
+ .caps = USB_EP_CAPS(0, 0),
},
.dev = &memory,
.fifo_size = INT_FIFO_SIZE,
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index b51226abade6..670ac0b12f00 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -1710,6 +1710,7 @@ static void udc_init_data(struct pxa_udc *dev)
INIT_LIST_HEAD(&dev->gadget.ep_list);
INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
dev->udc_usb_ep[0].pxa_ep = &dev->pxa_ep[0];
+ dev->gadget.quirk_altset_not_supp = 1;
ep0_idle(dev);
/* PXA endpoints init */
@@ -2422,7 +2423,7 @@ static int pxa_udc_probe(struct platform_device *pdev)
}
udc->udc_command = mach->udc_command;
} else {
- udc->gpiod = devm_gpiod_get(&pdev->dev, NULL);
+ udc->gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_ASIS);
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.h b/drivers/usb/gadget/udc/pxa27x_udc.h
index 11e14232794b..cea2cb79b30c 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.h
+++ b/drivers/usb/gadget/udc/pxa27x_udc.h
@@ -234,25 +234,35 @@
/*
* Endpoint definition helpers
*/
-#define USB_EP_DEF(addr, bname, dir, type, maxpkt) \
-{ .usb_ep = { .name = bname, .ops = &pxa_ep_ops, .maxpacket = maxpkt, }, \
+#define USB_EP_DEF(addr, bname, dir, type, maxpkt, ctype, cdir) \
+{ .usb_ep = { .name = bname, .ops = &pxa_ep_ops, .maxpacket = maxpkt, \
+ .caps = USB_EP_CAPS(ctype, cdir), }, \
.desc = { .bEndpointAddress = addr | (dir ? USB_DIR_IN : 0), \
- .bmAttributes = type, \
+ .bmAttributes = USB_ENDPOINT_XFER_ ## type, \
.wMaxPacketSize = maxpkt, }, \
.dev = &memory \
}
-#define USB_EP_BULK(addr, bname, dir) \
- USB_EP_DEF(addr, bname, dir, USB_ENDPOINT_XFER_BULK, BULK_FIFO_SIZE)
-#define USB_EP_ISO(addr, bname, dir) \
- USB_EP_DEF(addr, bname, dir, USB_ENDPOINT_XFER_ISOC, ISO_FIFO_SIZE)
-#define USB_EP_INT(addr, bname, dir) \
- USB_EP_DEF(addr, bname, dir, USB_ENDPOINT_XFER_INT, INT_FIFO_SIZE)
-#define USB_EP_IN_BULK(n) USB_EP_BULK(n, "ep" #n "in-bulk", 1)
-#define USB_EP_OUT_BULK(n) USB_EP_BULK(n, "ep" #n "out-bulk", 0)
-#define USB_EP_IN_ISO(n) USB_EP_ISO(n, "ep" #n "in-iso", 1)
-#define USB_EP_OUT_ISO(n) USB_EP_ISO(n, "ep" #n "out-iso", 0)
-#define USB_EP_IN_INT(n) USB_EP_INT(n, "ep" #n "in-int", 1)
-#define USB_EP_CTRL USB_EP_DEF(0, "ep0", 0, 0, EP0_FIFO_SIZE)
+#define USB_EP_BULK(addr, bname, dir, cdir) \
+ USB_EP_DEF(addr, bname, dir, BULK, BULK_FIFO_SIZE, \
+ USB_EP_CAPS_TYPE_BULK, cdir)
+#define USB_EP_ISO(addr, bname, dir, cdir) \
+ USB_EP_DEF(addr, bname, dir, ISOC, ISO_FIFO_SIZE, \
+ USB_EP_CAPS_TYPE_ISO, cdir)
+#define USB_EP_INT(addr, bname, dir, cdir) \
+ USB_EP_DEF(addr, bname, dir, INT, INT_FIFO_SIZE, \
+ USB_EP_CAPS_TYPE_INT, cdir)
+#define USB_EP_IN_BULK(n) USB_EP_BULK(n, "ep" #n "in-bulk", 1, \
+ USB_EP_CAPS_DIR_IN)
+#define USB_EP_OUT_BULK(n) USB_EP_BULK(n, "ep" #n "out-bulk", 0, \
+ USB_EP_CAPS_DIR_OUT)
+#define USB_EP_IN_ISO(n) USB_EP_ISO(n, "ep" #n "in-iso", 1, \
+ USB_EP_CAPS_DIR_IN)
+#define USB_EP_OUT_ISO(n) USB_EP_ISO(n, "ep" #n "out-iso", 0, \
+ USB_EP_CAPS_DIR_OUT)
+#define USB_EP_IN_INT(n) USB_EP_INT(n, "ep" #n "in-int", 1, \
+ USB_EP_CAPS_DIR_IN)
+#define USB_EP_CTRL USB_EP_DEF(0, "ep0", 0, CONTROL, EP0_FIFO_SIZE, \
+ USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)
#define PXA_EP_DEF(_idx, _addr, dir, _type, maxpkt, _config, iface, altset) \
{ \
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 0293f7169dee..baa0609a429d 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1935,6 +1935,16 @@ static int r8a66597_probe(struct platform_device *pdev)
ep->ep.name = r8a66597_ep_name[i];
ep->ep.ops = &r8a66597_ep_ops;
usb_ep_set_maxpacket_limit(&ep->ep, 512);
+
+ if (i == 0) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
}
usb_ep_set_maxpacket_limit(&r8a66597->ep[0].ep, 64);
r8a66597->ep[0].pipenum = 0;
diff --git a/drivers/usb/gadget/udc/s3c-hsudc.c b/drivers/usb/gadget/udc/s3c-hsudc.c
index 85a712a03343..e9def42ce50d 100644
--- a/drivers/usb/gadget/udc/s3c-hsudc.c
+++ b/drivers/usb/gadget/udc/s3c-hsudc.c
@@ -1005,6 +1005,21 @@ static void s3c_hsudc_initep(struct s3c_hsudc *hsudc,
hsep->stopped = 0;
hsep->wedge = 0;
+ if (epnum == 0) {
+ hsep->ep.caps.type_control = true;
+ hsep->ep.caps.dir_in = true;
+ hsep->ep.caps.dir_out = true;
+ } else {
+ hsep->ep.caps.type_iso = true;
+ hsep->ep.caps.type_bulk = true;
+ hsep->ep.caps.type_int = true;
+ }
+
+ if (epnum & 1)
+ hsep->ep.caps.dir_in = true;
+ else
+ hsep->ep.caps.dir_out = true;
+
set_index(hsudc, epnum);
writel(hsep->ep.maxpacket, hsudc->regs + S3C_MPR);
}
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index 5d9aa81969b4..eb3571ee59e3 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -1691,6 +1691,8 @@ static struct s3c2410_udc memory = {
.name = ep0name,
.ops = &s3c2410_ep_ops,
.maxpacket = EP0_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL,
+ USB_EP_CAPS_DIR_ALL),
},
.dev = &memory,
},
@@ -1702,6 +1704,8 @@ static struct s3c2410_udc memory = {
.name = "ep1-bulk",
.ops = &s3c2410_ep_ops,
.maxpacket = EP_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
},
.dev = &memory,
.fifo_size = EP_FIFO_SIZE,
@@ -1714,6 +1718,8 @@ static struct s3c2410_udc memory = {
.name = "ep2-bulk",
.ops = &s3c2410_ep_ops,
.maxpacket = EP_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
},
.dev = &memory,
.fifo_size = EP_FIFO_SIZE,
@@ -1726,6 +1732,8 @@ static struct s3c2410_udc memory = {
.name = "ep3-bulk",
.ops = &s3c2410_ep_ops,
.maxpacket = EP_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
},
.dev = &memory,
.fifo_size = EP_FIFO_SIZE,
@@ -1738,6 +1746,8 @@ static struct s3c2410_udc memory = {
.name = "ep4-bulk",
.ops = &s3c2410_ep_ops,
.maxpacket = EP_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
},
.dev = &memory,
.fifo_size = EP_FIFO_SIZE,
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index d69c35558f68..f660afba715d 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -60,13 +60,15 @@ static DEFINE_MUTEX(udc_lock);
int usb_gadget_map_request(struct usb_gadget *gadget,
struct usb_request *req, int is_in)
{
+ struct device *dev = gadget->dev.parent;
+
if (req->length == 0)
return 0;
if (req->num_sgs) {
int mapped;
- mapped = dma_map_sg(&gadget->dev, req->sg, req->num_sgs,
+ mapped = dma_map_sg(dev, req->sg, req->num_sgs,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (mapped == 0) {
dev_err(&gadget->dev, "failed to map SGs\n");
@@ -75,11 +77,11 @@ int usb_gadget_map_request(struct usb_gadget *gadget,
req->num_mapped_sgs = mapped;
} else {
- req->dma = dma_map_single(&gadget->dev, req->buf, req->length,
+ req->dma = dma_map_single(dev, req->buf, req->length,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (dma_mapping_error(&gadget->dev, req->dma)) {
- dev_err(&gadget->dev, "failed to map buffer\n");
+ if (dma_mapping_error(dev, req->dma)) {
+ dev_err(dev, "failed to map buffer\n");
return -EFAULT;
}
}
@@ -95,12 +97,12 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget,
return;
if (req->num_mapped_sgs) {
- dma_unmap_sg(&gadget->dev, req->sg, req->num_mapped_sgs,
+ dma_unmap_sg(gadget->dev.parent, req->sg, req->num_mapped_sgs,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
req->num_mapped_sgs = 0;
} else {
- dma_unmap_single(&gadget->dev, req->dma, req->length,
+ dma_unmap_single(gadget->dev.parent, req->dma, req->length,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
}
}
@@ -129,6 +131,96 @@ EXPORT_SYMBOL_GPL(usb_gadget_giveback_request);
/* ------------------------------------------------------------------------- */
+/**
+ * gadget_find_ep_by_name - returns ep whose name is the same as sting passed
+ * in second parameter or NULL if searched endpoint not found
+ * @g: controller to check for quirk
+ * @name: name of searched endpoint
+ */
+struct usb_ep *gadget_find_ep_by_name(struct usb_gadget *g, const char *name)
+{
+ struct usb_ep *ep;
+
+ gadget_for_each_ep(ep, g) {
+ if (!strcmp(ep->name, name))
+ return ep;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(gadget_find_ep_by_name);
+
+/* ------------------------------------------------------------------------- */
+
+int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
+ struct usb_ep *ep, struct usb_endpoint_descriptor *desc,
+ struct usb_ss_ep_comp_descriptor *ep_comp)
+{
+ u8 type;
+ u16 max;
+ int num_req_streams = 0;
+
+ /* endpoint already claimed? */
+ if (ep->claimed)
+ return 0;
+
+ type = usb_endpoint_type(desc);
+ max = 0x7ff & usb_endpoint_maxp(desc);
+
+ if (usb_endpoint_dir_in(desc) && !ep->caps.dir_in)
+ return 0;
+ if (usb_endpoint_dir_out(desc) && !ep->caps.dir_out)
+ return 0;
+
+ if (max > ep->maxpacket_limit)
+ return 0;
+
+ /* "high bandwidth" works only at high speed */
+ if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp(desc) & (3<<11))
+ return 0;
+
+ switch (type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ /* only support ep0 for portable CONTROL traffic */
+ return 0;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (!ep->caps.type_iso)
+ return 0;
+ /* ISO: limit 1023 bytes full speed, 1024 high/super speed */
+ if (!gadget_is_dualspeed(gadget) && max > 1023)
+ return 0;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ if (!ep->caps.type_bulk)
+ return 0;
+ if (ep_comp && gadget_is_superspeed(gadget)) {
+ /* Get the number of required streams from the
+ * EP companion descriptor and see if the EP
+ * matches it
+ */
+ num_req_streams = ep_comp->bmAttributes & 0x1f;
+ if (num_req_streams > ep->max_streams)
+ return 0;
+ }
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ /* Bulk endpoints handle interrupt transfers,
+ * except the toggle-quirky iso-synch kind
+ */
+ if (!ep->caps.type_int && !ep->caps.type_bulk)
+ return 0;
+ /* INT: limit 64 bytes full speed, 1024 high/super speed */
+ if (!gadget_is_dualspeed(gadget) && max > 64)
+ return 0;
+ break;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_ep_match_desc);
+
+/* ------------------------------------------------------------------------- */
+
static void usb_gadget_state_work(struct work_struct *work)
{
struct usb_gadget *gadget = work_to_gadget(work);
@@ -321,6 +413,7 @@ err4:
err3:
put_device(&udc->dev);
+ device_del(&gadget->dev);
err2:
put_device(&gadget->dev);
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 1f24274477ab..1cbb0ac6b182 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -1317,12 +1317,21 @@ static void xudc_eps_init(struct xusb_udc *udc)
snprintf(ep->name, EPNAME_SIZE, "ep%d", ep_number);
ep->ep_usb.name = ep->name;
ep->ep_usb.ops = &xusb_ep_ops;
+
+ ep->ep_usb.caps.type_iso = true;
+ ep->ep_usb.caps.type_bulk = true;
+ ep->ep_usb.caps.type_int = true;
} else {
ep->ep_usb.name = ep0name;
usb_ep_set_maxpacket_limit(&ep->ep_usb, EP0_MAX_PACKET);
ep->ep_usb.ops = &xusb_ep0_ops;
+
+ ep->ep_usb.caps.type_control = true;
}
+ ep->ep_usb.caps.dir_in = true;
+ ep->ep_usb.caps.dir_out = true;
+
ep->udc = udc;
ep->epnumber = ep_number;
ep->desc = NULL;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 8afc3c1efdab..079991e283e9 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -32,7 +32,14 @@ config USB_XHCI_PCI
default y
config USB_XHCI_PLATFORM
- tristate
+ tristate "Generic xHCI driver for a platform device"
+ ---help---
+ Adds an xHCI host driver for a generic platform device, which
+ provides a memory space and an irq.
+ It is also a prerequisite for platform specific drivers that
+ implement some extra quirks.
+
+ If unsure, say N.
config USB_XHCI_MVEBU
tristate "xHCI support for Marvell Armada 375/38x"
@@ -441,10 +448,10 @@ config USB_OHCI_HCD_PXA27X
PXA27x/PXA3xx chips.
config USB_OHCI_HCD_AT91
- tristate "Support for Atmel on-chip OHCI USB controller"
- depends on USB_OHCI_HCD && ARCH_AT91
- default y
- ---help---
+ tristate "Support for Atmel on-chip OHCI USB controller"
+ depends on USB_OHCI_HCD && ARCH_AT91 && OF
+ default y
+ ---help---
Enables support for the on-chip OHCI controller on
Atmel chips.
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index 526cfab41d5f..5398e3d42822 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -2,7 +2,8 @@
* Broadcom specific Advanced Microcontroller Bus
* Broadcom USB-core driver (BCMA bus glue)
*
- * Copyright 2011-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright 2011-2015 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright 2015 Felix Fietkau <nbd@openwrt.org>
*
* Based on ssb-ohci driver
* Copyright 2007 Michael Buesch <m@bues.ch>
@@ -23,6 +24,8 @@
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/usb/ehci_pdriver.h>
#include <linux/usb/ohci_pdriver.h>
@@ -88,7 +91,7 @@ static void bcma_hcd_4716wa(struct bcma_device *dev)
}
/* based on arch/mips/brcm-boards/bcm947xx/pcibios.c */
-static void bcma_hcd_init_chip(struct bcma_device *dev)
+static void bcma_hcd_init_chip_mips(struct bcma_device *dev)
{
u32 tmp;
@@ -159,6 +162,87 @@ static void bcma_hcd_init_chip(struct bcma_device *dev)
}
}
+static void bcma_hcd_init_chip_arm_phy(struct bcma_device *dev)
+{
+ struct bcma_device *arm_core;
+ void __iomem *dmu;
+
+ arm_core = bcma_find_core(dev->bus, BCMA_CORE_ARMCA9);
+ if (!arm_core) {
+ dev_err(&dev->dev, "can not find ARM Cortex A9 ihost core\n");
+ return;
+ }
+
+ dmu = ioremap_nocache(arm_core->addr_s[0], 0x1000);
+ if (!dmu) {
+ dev_err(&dev->dev, "can not map ARM Cortex A9 ihost core\n");
+ return;
+ }
+
+ /* Unlock DMU PLL settings */
+ iowrite32(0x0000ea68, dmu + 0x180);
+
+ /* Write USB 2.0 PLL control setting */
+ iowrite32(0x00dd10c3, dmu + 0x164);
+
+ /* Lock DMU PLL settings */
+ iowrite32(0x00000000, dmu + 0x180);
+
+ iounmap(dmu);
+}
+
+static void bcma_hcd_init_chip_arm_hc(struct bcma_device *dev)
+{
+ u32 val;
+
+ /*
+ * Delay after PHY initialized to ensure HC is ready to be configured
+ */
+ usleep_range(1000, 2000);
+
+ /* Set packet buffer OUT threshold */
+ val = bcma_read32(dev, 0x94);
+ val &= 0xffff;
+ val |= 0x80 << 16;
+ bcma_write32(dev, 0x94, val);
+
+ /* Enable break memory transfer */
+ val = bcma_read32(dev, 0x9c);
+ val |= 1;
+ bcma_write32(dev, 0x9c, val);
+}
+
+static void bcma_hcd_init_chip_arm(struct bcma_device *dev)
+{
+ bcma_core_enable(dev, 0);
+
+ if (dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM4707 ||
+ dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM53018) {
+ if (dev->bus->chipinfo.pkg == BCMA_PKG_ID_BCM4707 ||
+ dev->bus->chipinfo.pkg == BCMA_PKG_ID_BCM4708)
+ bcma_hcd_init_chip_arm_phy(dev);
+
+ bcma_hcd_init_chip_arm_hc(dev);
+ }
+}
+
+static void bcma_hci_platform_power_gpio(struct bcma_device *dev, bool val)
+{
+ int gpio;
+
+ gpio = of_get_named_gpio(dev->dev.of_node, "vcc-gpio", 0);
+ if (!gpio_is_valid(gpio))
+ return;
+
+ if (val) {
+ gpio_request(gpio, "bcma-hcd-gpio");
+ gpio_set_value(gpio, 1);
+ } else {
+ gpio_set_value(gpio, 0);
+ gpio_free(gpio);
+ }
+}
+
static const struct usb_ehci_pdata ehci_pdata = {
};
@@ -169,7 +253,7 @@ static struct platform_device *bcma_hcd_create_pdev(struct bcma_device *dev, boo
{
struct platform_device *hci_dev;
struct resource hci_res[2];
- int ret = -ENOMEM;
+ int ret;
memset(hci_res, 0, sizeof(hci_res));
@@ -183,7 +267,7 @@ static struct platform_device *bcma_hcd_create_pdev(struct bcma_device *dev, boo
hci_dev = platform_device_alloc(ohci ? "ohci-platform" :
"ehci-platform" , 0);
if (!hci_dev)
- return NULL;
+ return ERR_PTR(-ENOMEM);
hci_dev->dev.parent = &dev->dev;
hci_dev->dev.dma_mask = &hci_dev->dev.coherent_dma_mask;
@@ -214,39 +298,45 @@ err_alloc:
static int bcma_hcd_probe(struct bcma_device *dev)
{
int err;
- u16 chipid_top;
u32 ohci_addr;
struct bcma_hcd_device *usb_dev;
struct bcma_chipinfo *chipinfo;
chipinfo = &dev->bus->chipinfo;
- /* USBcores are only connected on embedded devices. */
- chipid_top = (chipinfo->id & 0xFF00);
- if (chipid_top != 0x4700 && chipid_top != 0x5300)
- return -ENODEV;
/* TODO: Probably need checks here; is the core connected? */
if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
return -EOPNOTSUPP;
- usb_dev = kzalloc(sizeof(struct bcma_hcd_device), GFP_KERNEL);
+ usb_dev = devm_kzalloc(&dev->dev, sizeof(struct bcma_hcd_device),
+ GFP_KERNEL);
if (!usb_dev)
return -ENOMEM;
- bcma_hcd_init_chip(dev);
+ bcma_hci_platform_power_gpio(dev, true);
+
+ switch (dev->id.id) {
+ case BCMA_CORE_NS_USB20:
+ bcma_hcd_init_chip_arm(dev);
+ break;
+ case BCMA_CORE_USB20_HOST:
+ bcma_hcd_init_chip_mips(dev);
+ break;
+ default:
+ return -ENODEV;
+ }
/* In AI chips EHCI is addrspace 0, OHCI is 1 */
ohci_addr = dev->addr_s[0];
- if ((chipinfo->id == 0x5357 || chipinfo->id == 0x4749)
+ if ((chipinfo->id == BCMA_CHIP_ID_BCM5357 ||
+ chipinfo->id == BCMA_CHIP_ID_BCM4749)
&& chipinfo->rev == 0)
ohci_addr = 0x18009000;
usb_dev->ohci_dev = bcma_hcd_create_pdev(dev, true, ohci_addr);
- if (IS_ERR(usb_dev->ohci_dev)) {
- err = PTR_ERR(usb_dev->ohci_dev);
- goto err_free_usb_dev;
- }
+ if (IS_ERR(usb_dev->ohci_dev))
+ return PTR_ERR(usb_dev->ohci_dev);
usb_dev->ehci_dev = bcma_hcd_create_pdev(dev, false, dev->addr);
if (IS_ERR(usb_dev->ehci_dev)) {
@@ -259,8 +349,6 @@ static int bcma_hcd_probe(struct bcma_device *dev)
err_unregister_ohci_dev:
platform_device_unregister(usb_dev->ohci_dev);
-err_free_usb_dev:
- kfree(usb_dev);
return err;
}
@@ -280,6 +368,7 @@ static void bcma_hcd_remove(struct bcma_device *dev)
static void bcma_hcd_shutdown(struct bcma_device *dev)
{
+ bcma_hci_platform_power_gpio(dev, false);
bcma_core_disable(dev, 0);
}
@@ -287,6 +376,7 @@ static void bcma_hcd_shutdown(struct bcma_device *dev)
static int bcma_hcd_suspend(struct bcma_device *dev)
{
+ bcma_hci_platform_power_gpio(dev, false);
bcma_core_disable(dev, 0);
return 0;
@@ -294,6 +384,7 @@ static int bcma_hcd_suspend(struct bcma_device *dev)
static int bcma_hcd_resume(struct bcma_device *dev)
{
+ bcma_hci_platform_power_gpio(dev, true);
bcma_core_enable(dev, 0);
return 0;
@@ -306,6 +397,7 @@ static int bcma_hcd_resume(struct bcma_device *dev)
static const struct bcma_device_id bcma_hcd_table[] = {
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_USB20_HOST, BCMA_ANY_REV, BCMA_ANY_CLASS),
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_USB20, BCMA_ANY_REV, BCMA_ANY_CLASS),
{},
};
MODULE_DEVICE_TABLE(bcma, bcma_hcd_table);
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 5352e74b92e2..3b6eb219de1a 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -127,7 +127,18 @@ static int fsl_ehci_drv_probe(struct platform_device *pdev)
/* Enable USB controller, 83xx or 8536 */
if (pdata->have_sysif_regs && pdata->controller_ver < FSL_USB_VER_1_6)
- setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);
+ clrsetbits_be32(hcd->regs + FSL_SOC_USB_CTRL,
+ CONTROL_REGISTER_W1C_MASK, 0x4);
+
+ /*
+ * Enable UTMI phy and program PTS field in UTMI mode before asserting
+ * controller reset for USB Controller version 2.5
+ */
+ if (pdata->has_fsl_erratum_a007792) {
+ clrsetbits_be32(hcd->regs + FSL_SOC_USB_CTRL,
+ CONTROL_REGISTER_W1C_MASK, CTRL_UTMI_PHY_EN);
+ writel(PORT_PTS_UTMI, hcd->regs + FSL_SOC_USB_PORTSC1);
+ }
/* Don't need to set host mode here. It will be done by tdi_reset() */
@@ -191,9 +202,11 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
case FSL_USB2_PHY_ULPI:
if (pdata->have_sysif_regs && pdata->controller_ver) {
/* controller version 1.6 or above */
- clrbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
- setbits32(non_ehci + FSL_SOC_USB_CTRL,
- ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN);
+ clrbits32(non_ehci + FSL_SOC_USB_CTRL,
+ CONTROL_REGISTER_W1C_MASK | UTMI_PHY_EN);
+ clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
+ CONTROL_REGISTER_W1C_MASK,
+ ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN);
}
portsc |= PORT_PTS_ULPI;
break;
@@ -204,30 +217,33 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
portsc |= PORT_PTS_PTW;
/* fall through */
case FSL_USB2_PHY_UTMI:
+ case FSL_USB2_PHY_UTMI_DUAL:
if (pdata->have_sysif_regs && pdata->controller_ver) {
/* controller version 1.6 or above */
- setbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
+ clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
+ CONTROL_REGISTER_W1C_MASK, UTMI_PHY_EN);
mdelay(FSL_UTMI_PHY_DLY); /* Delay for UTMI PHY CLK to
become stable - 10ms*/
}
/* enable UTMI PHY */
if (pdata->have_sysif_regs)
- setbits32(non_ehci + FSL_SOC_USB_CTRL,
- CTRL_UTMI_PHY_EN);
+ clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
+ CONTROL_REGISTER_W1C_MASK,
+ CTRL_UTMI_PHY_EN);
portsc |= PORT_PTS_UTMI;
break;
case FSL_USB2_PHY_NONE:
break;
}
- if (pdata->have_sysif_regs &&
- pdata->controller_ver > FSL_USB_VER_1_6 &&
- (phy_mode == FSL_USB2_PHY_ULPI)) {
- /* check PHY_CLK_VALID to get phy clk valid */
- if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
- PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0) ||
- in_be32(non_ehci + FSL_SOC_USB_PRICTRL))) {
- dev_warn(hcd->self.controller, "USB PHY clock invalid\n");
+ /*
+ * check PHY_CLK_VALID to determine phy clock presence before writing
+ * to portsc
+ */
+ if (pdata->check_phy_clk_valid) {
+ if (!(in_be32(non_ehci + FSL_SOC_USB_CTRL) & PHY_CLK_VALID)) {
+ dev_warn(hcd->self.controller,
+ "USB PHY clock invalid\n");
return -EINVAL;
}
}
@@ -235,7 +251,8 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]);
if (phy_mode != FSL_USB2_PHY_ULPI && pdata->have_sysif_regs)
- setbits32(non_ehci + FSL_SOC_USB_CTRL, USB_CTRL_USB_EN);
+ clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
+ CONTROL_REGISTER_W1C_MASK, USB_CTRL_USB_EN);
return 0;
}
@@ -261,6 +278,10 @@ static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
out_be32(non_ehci + FSL_SOC_USB_SNOOP2, 0x80000000 | SNOOP_SIZE_2GB);
}
+ /* Deal with USB erratum A-005275 */
+ if (pdata->has_fsl_erratum_a005275 == 1)
+ ehci->has_fsl_hs_errata = 1;
+
if ((pdata->operating_mode == FSL_USB2_DR_HOST) ||
(pdata->operating_mode == FSL_USB2_DR_OTG))
if (ehci_fsl_setup_phy(hcd, pdata->phy_mode, 0))
diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
index dbd292e9f0a7..1a8a60a57cf2 100644
--- a/drivers/usb/host/ehci-fsl.h
+++ b/drivers/usb/host/ehci-fsl.h
@@ -52,6 +52,7 @@
#define SNOOP_SIZE_2GB 0x1e
/* control Register Bit Masks */
+#define CONTROL_REGISTER_W1C_MASK 0x00020000 /* W1C: PHY_CLK_VALID */
#define ULPI_INT_EN (1<<0)
#define WU_INT_EN (1<<1)
#define USB_CTRL_USB_EN (1<<2)
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 22abb6830dfa..086a7115d263 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -1221,6 +1221,13 @@ int ehci_hub_control(
*/
ehci->reset_done [wIndex] = jiffies
+ msecs_to_jiffies (50);
+
+ /*
+ * Force full-speed connect for FSL high-speed
+ * erratum; disable HS Chirp by setting PFSC bit
+ */
+ if (ehci_has_fsl_hs_errata(ehci))
+ temp |= (1 << PORTSC_FSL_PFSC);
}
ehci_writel(ehci, temp, status_reg);
break;
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 2593def13cea..5c3c08598682 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -45,6 +45,7 @@ struct ehci_platform_priv {
struct reset_control *rst;
struct phy **phys;
int num_phys;
+ bool reset_on_resume;
};
static const char hcd_name[] = "ehci-platform";
@@ -56,7 +57,6 @@ static int ehci_platform_reset(struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int retval;
- hcd->has_tt = pdata->has_tt;
ehci->has_synopsys_hc_bug = pdata->has_synopsys_hc_bug;
if (pdata->pre_setup) {
@@ -193,11 +193,11 @@ static int ehci_platform_probe(struct platform_device *dev)
if (of_property_read_bool(dev->dev.of_node,
"needs-reset-on-resume"))
- pdata->reset_on_resume = 1;
+ priv->reset_on_resume = true;
if (of_property_read_bool(dev->dev.of_node,
"has-transaction-translator"))
- pdata->has_tt = 1;
+ hcd->has_tt = 1;
priv->num_phys = of_count_phandle_with_args(dev->dev.of_node,
"phys", "#phy-cells");
@@ -247,6 +247,10 @@ static int ehci_platform_probe(struct platform_device *dev)
ehci->big_endian_desc = 1;
if (pdata->big_endian_mmio)
ehci->big_endian_mmio = 1;
+ if (pdata->has_tt)
+ hcd->has_tt = 1;
+ if (pdata->reset_on_resume)
+ priv->reset_on_resume = true;
#ifndef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
if (ehci->big_endian_mmio) {
@@ -359,6 +363,7 @@ static int ehci_platform_resume(struct device *dev)
struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
struct platform_device *pdev =
container_of(dev, struct platform_device, dev);
+ struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
if (pdata->power_on) {
int err = pdata->power_on(pdev);
@@ -366,7 +371,7 @@ static int ehci_platform_resume(struct device *dev)
return err;
}
- ehci_resume(hcd, pdata->reset_on_resume);
+ ehci_resume(hcd, priv->reset_on_resume);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/usb/host/ehci-st.c b/drivers/usb/host/ehci-st.c
index 7e4bd39cf757..b7c5cfa37a83 100644
--- a/drivers/usb/host/ehci-st.c
+++ b/drivers/usb/host/ehci-st.c
@@ -54,7 +54,6 @@ static int st_ehci_platform_reset(struct usb_hcd *hcd)
struct platform_device *pdev = to_platform_device(hcd->self.controller);
struct usb_ehci_pdata *pdata = dev_get_platdata(&pdev->dev);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- int retval;
u32 threshold;
/* Set EHCI packet buffer IN/OUT threshold to 128 bytes */
@@ -62,11 +61,7 @@ static int st_ehci_platform_reset(struct usb_hcd *hcd)
writel(threshold, hcd->regs + AHB2STBUS_INSREG01);
ehci->caps = hcd->regs + pdata->caps_offset;
- retval = ehci_setup(hcd);
- if (retval)
- return retval;
-
- return 0;
+ return ehci_setup(hcd);
}
static int st_ehci_platform_power_on(struct platform_device *dev)
diff --git a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c
index 5e44407aa099..5216f2b09d63 100644
--- a/drivers/usb/host/ehci-sysfs.c
+++ b/drivers/usb/host/ehci-sysfs.c
@@ -29,7 +29,7 @@ static ssize_t show_companion(struct device *dev,
int count = PAGE_SIZE;
char *ptr = buf;
- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+ ehci = hcd_to_ehci(dev_get_drvdata(dev));
nports = HCS_N_PORTS(ehci->hcs_params);
for (index = 0; index < nports; ++index) {
@@ -54,7 +54,7 @@ static ssize_t store_companion(struct device *dev,
struct ehci_hcd *ehci;
int portnum, new_owner;
- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+ ehci = hcd_to_ehci(dev_get_drvdata(dev));
new_owner = PORT_OWNER; /* Owned by companion */
if (sscanf(buf, "%d", &portnum) != 1)
return -EINVAL;
@@ -85,7 +85,7 @@ static ssize_t show_uframe_periodic_max(struct device *dev,
struct ehci_hcd *ehci;
int n;
- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+ ehci = hcd_to_ehci(dev_get_drvdata(dev));
n = scnprintf(buf, PAGE_SIZE, "%d\n", ehci->uframe_periodic_max);
return n;
}
@@ -101,7 +101,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev,
unsigned long flags;
ssize_t ret;
- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+ ehci = hcd_to_ehci(dev_get_drvdata(dev));
if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
return -EINVAL;
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index f700157cd084..46f62e41bcde 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -215,6 +215,7 @@ struct ehci_hcd { /* one per controller */
/* SILICON QUIRKS */
unsigned no_selective_suspend:1;
unsigned has_fsl_port_bug:1; /* FreeScale */
+ unsigned has_fsl_hs_errata:1; /* Freescale HS quirk */
unsigned big_endian_mmio:1;
unsigned big_endian_desc:1;
unsigned big_endian_capbase:1;
@@ -686,6 +687,17 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
#define ehci_has_fsl_portno_bug(e) (0)
#endif
+#define PORTSC_FSL_PFSC 24 /* Port Force Full-Speed Connect */
+
+#if defined(CONFIG_PPC_85xx)
+/* Some Freescale processors have an erratum (USB A-005275) in which
+ * incoming packets get corrupted in HS mode
+ */
+#define ehci_has_fsl_hs_errata(e) ((e)->has_fsl_hs_errata)
+#else
+#define ehci_has_fsl_hs_errata(e) (0)
+#endif
+
/*
* While most USB host controllers implement their registers in
* little-endian format, a minority (celleb companion chip) implement
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 5e0d60035216..534c4c5d278a 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -69,6 +69,8 @@ static enum fsl_usb2_phy_modes determine_usb_phy(const char *phy_type)
return FSL_USB2_PHY_UTMI;
if (!strcasecmp(phy_type, "utmi_wide"))
return FSL_USB2_PHY_UTMI_WIDE;
+ if (!strcasecmp(phy_type, "utmi_dual"))
+ return FSL_USB2_PHY_UTMI_DUAL;
if (!strcasecmp(phy_type, "serial"))
return FSL_USB2_PHY_SERIAL;
@@ -119,9 +121,9 @@ error:
static const struct of_device_id fsl_usb2_mph_dr_of_match[];
-static int usb_get_ver_info(struct device_node *np)
+static enum fsl_usb2_controller_ver usb_get_ver_info(struct device_node *np)
{
- int ver = -1;
+ enum fsl_usb2_controller_ver ver = FSL_USB_VER_NONE;
/*
* returns 1 for usb controller version 1.6
@@ -142,7 +144,7 @@ static int usb_get_ver_info(struct device_node *np)
else /* for previous controller versions */
ver = FSL_USB_VER_OLD;
- if (ver > -1)
+ if (ver > FSL_USB_VER_NONE)
return ver;
}
@@ -214,8 +216,27 @@ static int fsl_usb2_mph_dr_of_probe(struct platform_device *ofdev)
pdata->phy_mode = determine_usb_phy(prop);
pdata->controller_ver = usb_get_ver_info(np);
+ /* Activate Erratum by reading property in device tree */
+ if (of_get_property(np, "fsl,usb-erratum-a007792", NULL))
+ pdata->has_fsl_erratum_a007792 = 1;
+ else
+ pdata->has_fsl_erratum_a007792 = 0;
+ if (of_get_property(np, "fsl,usb-erratum-a005275", NULL))
+ pdata->has_fsl_erratum_a005275 = 1;
+ else
+ pdata->has_fsl_erratum_a005275 = 0;
+
+ /*
+ * Determine whether phy_clk_valid needs to be checked
+ * by reading property in device tree
+ */
+ if (of_get_property(np, "phy-clk-valid", NULL))
+ pdata->check_phy_clk_valid = 1;
+ else
+ pdata->check_phy_clk_valid = 0;
+
if (pdata->have_sysif_regs) {
- if (pdata->controller_ver < 0) {
+ if (pdata->controller_ver == FSL_USB_VER_NONE) {
dev_warn(&ofdev->dev, "Could not get controller version\n");
return -ENODEV;
}
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 15df00cceed9..342ffd140122 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -36,6 +36,17 @@
#define hcd_to_ohci_at91_priv(h) \
((struct ohci_at91_priv *)hcd_to_ohci(h)->priv)
+#define AT91_MAX_USBH_PORTS 3
+struct at91_usbh_data {
+ int vbus_pin[AT91_MAX_USBH_PORTS]; /* port power-control pin */
+ int overcurrent_pin[AT91_MAX_USBH_PORTS];
+ u8 ports; /* number of ports on root hub */
+ u8 overcurrent_supported;
+ u8 vbus_pin_active_low[AT91_MAX_USBH_PORTS];
+ u8 overcurrent_status[AT91_MAX_USBH_PORTS];
+ u8 overcurrent_changed[AT91_MAX_USBH_PORTS];
+};
+
struct ohci_at91_priv {
struct clk *iclk;
struct clk *fclk;
@@ -431,7 +442,6 @@ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
return IRQ_HANDLED;
}
-#ifdef CONFIG_OF
static const struct of_device_id at91_ohci_dt_ids[] = {
{ .compatible = "atmel,at91rm9200-ohci" },
{ /* sentinel */ }
@@ -439,16 +449,17 @@ static const struct of_device_id at91_ohci_dt_ids[] = {
MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids);
-static int ohci_at91_of_init(struct platform_device *pdev)
+/*-------------------------------------------------------------------------*/
+
+static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- int i, gpio, ret;
- enum of_gpio_flags flags;
struct at91_usbh_data *pdata;
- u32 ports;
-
- if (!np)
- return 0;
+ int i;
+ int gpio;
+ int ret;
+ enum of_gpio_flags flags;
+ u32 ports;
/* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
@@ -466,111 +477,83 @@ static int ohci_at91_of_init(struct platform_device *pdev)
pdata->ports = ports;
at91_for_each_port(i) {
- gpio = of_get_named_gpio_flags(np, "atmel,vbus-gpio", i, &flags);
+ /*
+ * do not configure PIO if not in relation with
+ * real USB port on board
+ */
+ if (i >= pdata->ports) {
+ pdata->vbus_pin[i] = -EINVAL;
+ continue;
+ }
+
+ gpio = of_get_named_gpio_flags(np, "atmel,vbus-gpio", i,
+ &flags);
pdata->vbus_pin[i] = gpio;
if (!gpio_is_valid(gpio))
continue;
pdata->vbus_pin_active_low[i] = flags & OF_GPIO_ACTIVE_LOW;
- }
-
- at91_for_each_port(i)
- pdata->overcurrent_pin[i] =
- of_get_named_gpio_flags(np, "atmel,oc-gpio", i, &flags);
-
- pdev->dev.platform_data = pdata;
-
- return 0;
-}
-#else
-static int ohci_at91_of_init(struct platform_device *pdev)
-{
- return 0;
-}
-#endif
-
-/*-------------------------------------------------------------------------*/
-static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
-{
- struct at91_usbh_data *pdata;
- int i;
- int gpio;
- int ret;
-
- ret = ohci_at91_of_init(pdev);
- if (ret)
- return ret;
+ ret = gpio_request(gpio, "ohci_vbus");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "can't request vbus gpio %d\n", gpio);
+ continue;
+ }
+ ret = gpio_direction_output(gpio,
+ !pdata->vbus_pin_active_low[i]);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "can't put vbus gpio %d as output %d\n",
+ gpio, !pdata->vbus_pin_active_low[i]);
+ gpio_free(gpio);
+ continue;
+ }
- pdata = dev_get_platdata(&pdev->dev);
+ ohci_at91_usb_set_power(pdata, i, 1);
+ }
- if (pdata) {
- at91_for_each_port(i) {
- /*
- * do not configure PIO if not in relation with
- * real USB port on board
- */
- if (i >= pdata->ports) {
- pdata->vbus_pin[i] = -EINVAL;
- pdata->overcurrent_pin[i] = -EINVAL;
- break;
- }
+ at91_for_each_port(i) {
+ if (i >= pdata->ports) {
+ pdata->overcurrent_pin[i] = -EINVAL;
+ continue;
+ }
- if (!gpio_is_valid(pdata->vbus_pin[i]))
- continue;
- gpio = pdata->vbus_pin[i];
+ pdata->overcurrent_pin[i] =
+ of_get_named_gpio_flags(np, "atmel,oc-gpio", i, &flags);
- ret = gpio_request(gpio, "ohci_vbus");
- if (ret) {
- dev_err(&pdev->dev,
- "can't request vbus gpio %d\n", gpio);
- continue;
- }
- ret = gpio_direction_output(gpio,
- !pdata->vbus_pin_active_low[i]);
- if (ret) {
- dev_err(&pdev->dev,
- "can't put vbus gpio %d as output %d\n",
- gpio, !pdata->vbus_pin_active_low[i]);
- gpio_free(gpio);
- continue;
- }
+ if (!gpio_is_valid(pdata->overcurrent_pin[i]))
+ continue;
+ gpio = pdata->overcurrent_pin[i];
- ohci_at91_usb_set_power(pdata, i, 1);
+ ret = gpio_request(gpio, "ohci_overcurrent");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "can't request overcurrent gpio %d\n",
+ gpio);
+ continue;
}
- at91_for_each_port(i) {
- if (!gpio_is_valid(pdata->overcurrent_pin[i]))
- continue;
- gpio = pdata->overcurrent_pin[i];
-
- ret = gpio_request(gpio, "ohci_overcurrent");
- if (ret) {
- dev_err(&pdev->dev,
- "can't request overcurrent gpio %d\n",
- gpio);
- continue;
- }
-
- ret = gpio_direction_input(gpio);
- if (ret) {
- dev_err(&pdev->dev,
- "can't configure overcurrent gpio %d as input\n",
- gpio);
- gpio_free(gpio);
- continue;
- }
+ ret = gpio_direction_input(gpio);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "can't configure overcurrent gpio %d as input\n",
+ gpio);
+ gpio_free(gpio);
+ continue;
+ }
- ret = request_irq(gpio_to_irq(gpio),
- ohci_hcd_at91_overcurrent_irq,
- IRQF_SHARED, "ohci_overcurrent", pdev);
- if (ret) {
- gpio_free(gpio);
- dev_err(&pdev->dev,
- "can't get gpio IRQ for overcurrent\n");
- }
+ ret = request_irq(gpio_to_irq(gpio),
+ ohci_hcd_at91_overcurrent_irq,
+ IRQF_SHARED, "ohci_overcurrent", pdev);
+ if (ret) {
+ gpio_free(gpio);
+ dev_err(&pdev->dev,
+ "can't get gpio IRQ for overcurrent\n");
}
}
+ pdev->dev.platform_data = pdata;
+
device_init_wakeup(&pdev->dev, 1);
return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev);
}
@@ -673,7 +656,7 @@ static struct platform_driver ohci_hcd_at91_driver = {
.driver = {
.name = "at91_ohci",
.pm = &ohci_hcd_at91_pm_ops,
- .of_match_table = of_match_ptr(at91_ohci_dt_ids),
+ .of_match_table = at91_ohci_dt_ids,
},
};
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index f7d561ed3c23..d029bbe9eb36 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -981,10 +981,6 @@ rescan_all:
int completed, modified;
__hc32 *prev;
- /* Is this ED already invisible to the hardware? */
- if (ed->state == ED_IDLE)
- goto ed_idle;
-
/* only take off EDs that the HC isn't using, accounting for
* frame counter wraps and EDs with partially retired TDs
*/
@@ -1012,12 +1008,10 @@ skip_ed:
}
/* ED's now officially unlinked, hc doesn't see */
- ed->state = ED_IDLE;
ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
ed->hwNextED = 0;
wmb();
ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
-ed_idle:
/* reentrancy: if we drop the schedule lock, someone might
* have modified this list. normally it's just prepending
@@ -1088,6 +1082,7 @@ rescan_this:
if (list_empty(&ed->td_list)) {
*last = ed->ed_next;
ed->ed_next = NULL;
+ ed->state = ED_IDLE;
list_del(&ed->in_use_list);
} else if (ohci->rh_state == OHCI_RH_RUNNING) {
*last = ed->ed_next;
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index e9a6eec39142..cfcfadfc94fc 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -58,7 +58,7 @@
#define CCR_PM_CKRNEN 0x0002
#define CCR_PM_USBPW1 0x0004
#define CCR_PM_USBPW2 0x0008
-#define CCR_PM_USBPW3 0x0008
+#define CCR_PM_USBPW3 0x0010
#define CCR_PM_PMEE 0x0100
#define CCR_PM_PMES 0x8000
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 6352f54e65a1..fe3bd1cb8b6b 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -2670,7 +2670,6 @@ static int oxu_hcd_init(struct usb_hcd *hcd)
static int oxu_reset(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
- int ret;
spin_lock_init(&oxu->mem_lock);
INIT_LIST_HEAD(&oxu->urb_list);
@@ -2696,11 +2695,7 @@ static int oxu_reset(struct usb_hcd *hcd)
oxu->hcs_params = readl(&oxu->caps->hcs_params);
oxu->sbrn = 0x20;
- ret = oxu_hcd_init(hcd);
- if (ret)
- return ret;
-
- return 0;
+ return oxu_hcd_init(hcd);
}
static int oxu_run(struct usb_hcd *hcd)
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index d51687780b61..a67bd5090330 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -1542,11 +1542,8 @@ static int u132_periodic_reinit(struct u132 *u132)
(fit ^ FIT) | u132->hc_fminterval);
if (retval)
return retval;
- retval = u132_write_pcimem(u132, periodicstart,
- ((9 * fi) / 10) & 0x3fff);
- if (retval)
- return retval;
- return 0;
+ return u132_write_pcimem(u132, periodicstart,
+ ((9 * fi) / 10) & 0x3fff);
}
static char *hcfs2string(int state)
@@ -2701,28 +2698,18 @@ static int u132_roothub_setportfeature(struct u132 *u132, u16 wValue,
if (wIndex == 0 || wIndex > u132->num_ports) {
return -EINVAL;
} else {
- int retval;
int port_index = wIndex - 1;
struct u132_port *port = &u132->port[port_index];
port->Status &= ~(1 << wValue);
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- retval = u132_write_pcimem(u132,
- roothub.portstatus[port_index], RH_PS_PSS);
- if (retval)
- return retval;
- return 0;
+ return u132_write_pcimem(u132,
+ roothub.portstatus[port_index], RH_PS_PSS);
case USB_PORT_FEAT_POWER:
- retval = u132_write_pcimem(u132,
- roothub.portstatus[port_index], RH_PS_PPS);
- if (retval)
- return retval;
- return 0;
+ return u132_write_pcimem(u132,
+ roothub.portstatus[port_index], RH_PS_PPS);
case USB_PORT_FEAT_RESET:
- retval = u132_roothub_portreset(u132, port_index);
- if (retval)
- return retval;
- return 0;
+ return u132_roothub_portreset(u132, port_index);
default:
return -EPIPE;
}
@@ -2737,7 +2724,6 @@ static int u132_roothub_clearportfeature(struct u132 *u132, u16 wValue,
} else {
int port_index = wIndex - 1;
u32 temp;
- int retval;
struct u132_port *port = &u132->port[port_index];
port->Status &= ~(1 << wValue);
switch (wValue) {
@@ -2773,11 +2759,8 @@ static int u132_roothub_clearportfeature(struct u132 *u132, u16 wValue,
default:
return -EPIPE;
}
- retval = u132_write_pcimem(u132, roothub.portstatus[port_index],
- temp);
- if (retval)
- return retval;
- return 0;
+ return u132_write_pcimem(u132, roothub.portstatus[port_index],
+ temp);
}
}
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 745717ec9c89..2d16faefb429 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -99,6 +99,10 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci)
xhci_dbg(xhci, "HCC PARAMS 0x%x:\n", (unsigned int) temp);
xhci_dbg(xhci, " HC generates %s bit addresses\n",
HCC_64BIT_ADDR(temp) ? "64" : "32");
+ xhci_dbg(xhci, " HC %s Contiguous Frame ID Capability\n",
+ HCC_CFC(temp) ? "has" : "hasn't");
+ xhci_dbg(xhci, " HC %s generate Stopped - Short Package event\n",
+ HCC_SPC(temp) ? "can" : "can't");
/* FIXME */
xhci_dbg(xhci, " FIXME: more HCCPARAMS debugging\n");
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index e75c565feb53..78241b5550df 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -484,10 +484,13 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
u32 pls = status_reg & PORT_PLS_MASK;
/* resume state is a xHCI internal state.
- * Do not report it to usb core.
+ * Do not report it to usb core, instead, pretend to be U3,
+ * thus usb core knows it's not ready for transfer
*/
- if (pls == XDEV_RESUME)
+ if (pls == XDEV_RESUME) {
+ *status |= USB_SS_PORT_LS_U3;
return;
+ }
/* When the CAS bit is set then warm reset
* should be performed on port
@@ -588,7 +591,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
status |= USB_PORT_STAT_C_RESET << 16;
/* USB3.0 only */
if (hcd->speed == HCD_USB3) {
- if ((raw_port_status & PORT_PLC))
+ /* Port link change with port in resume state should not be
+ * reported to usbcore, as this is an internal state to be
+ * handled by xhci driver. Reporting PLC to usbcore may
+ * cause usbcore clearing PLC first and port change event
+ * irq won't be generated.
+ */
+ if ((raw_port_status & PORT_PLC) &&
+ (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME)
status |= USB_PORT_STAT_C_LINK_STATE << 16;
if ((raw_port_status & PORT_WRC))
status |= USB_PORT_STAT_C_BH_RESET << 16;
@@ -1120,10 +1130,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
spin_lock_irqsave(&xhci->lock, flags);
if (hcd->self.root_hub->do_remote_wakeup) {
- if (bus_state->resuming_ports) {
+ if (bus_state->resuming_ports || /* USB2 */
+ bus_state->port_remote_wakeup) { /* USB3 */
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg(xhci, "suspend failed because "
- "a port is resuming\n");
+ xhci_dbg(xhci, "suspend failed because a port is resuming\n");
return -EBUSY;
}
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index f8336408ef07..9a8c936cd42c 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1427,10 +1427,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
/* Attempt to use the ring cache */
if (virt_dev->num_rings_cached == 0)
return -ENOMEM;
+ virt_dev->num_rings_cached--;
virt_dev->eps[ep_index].new_ring =
virt_dev->ring_cache[virt_dev->num_rings_cached];
virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
- virt_dev->num_rings_cached--;
xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1, type);
}
@@ -1792,7 +1792,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
int size;
int i, j, num_ports;
- del_timer_sync(&xhci->cmd_timer);
+ if (timer_pending(&xhci->cmd_timer))
+ del_timer_sync(&xhci->cmd_timer);
/* Free the Event Ring Segment Table and the actual Event Ring */
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 4a4cb1d91ac8..5590eac2b22d 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -23,10 +23,15 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/acpi.h>
#include "xhci.h"
#include "xhci-trace.h"
+#define PORT2_SSIC_CONFIG_REG2 0x883c
+#define PROG_DONE (1 << 30)
+#define SSIC_PORT_UNUSED (1 << 31)
+
/* Device for a quirk */
#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
@@ -176,20 +181,63 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
}
/*
+ * In some Intel xHCI controllers, in order to get D3 working,
+ * through a vendor specific SSIC CONFIG register at offset 0x883c,
+ * SSIC PORT need to be marked as "unused" before putting xHCI
+ * into D3. After D3 exit, the SSIC port need to be marked as "used".
+ * Without this change, xHCI might not enter D3 state.
* Make sure PME works on some Intel xHCI controllers by writing 1 to clear
* the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
*/
-static void xhci_pme_quirk(struct xhci_hcd *xhci)
+static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
u32 val;
void __iomem *reg;
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
+
+ reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
+
+ /* Notify SSIC that SSIC profile programming is not done */
+ val = readl(reg) & ~PROG_DONE;
+ writel(val, reg);
+
+ /* Mark SSIC port as unused(suspend) or used(resume) */
+ val = readl(reg);
+ if (suspend)
+ val |= SSIC_PORT_UNUSED;
+ else
+ val &= ~SSIC_PORT_UNUSED;
+ writel(val, reg);
+
+ /* Notify SSIC that SSIC profile programming is done */
+ val = readl(reg) | PROG_DONE;
+ writel(val, reg);
+ readl(reg);
+ }
+
reg = (void __iomem *) xhci->cap_regs + 0x80a4;
val = readl(reg);
writel(val | BIT(28), reg);
readl(reg);
}
+#ifdef CONFIG_ACPI
+static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
+{
+ static const u8 intel_dsm_uuid[] = {
+ 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45,
+ 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23,
+ };
+ acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL);
+}
+#else
+ static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
+#endif /* CONFIG_ACPI */
+
/* called during probe() after chip reset completes */
static int xhci_pci_setup(struct usb_hcd *hcd)
{
@@ -263,6 +311,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
HCC_MAX_PSA(xhci->hcc_params) >= 4)
xhci->shared_hcd->can_do_streams = 1;
+ if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+ xhci_pme_acpi_rtd3_enable(dev);
+
/* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
pm_runtime_put_noidle(&dev->dev);
@@ -307,7 +358,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
pdev->no_d3cold = true;
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
- xhci_pme_quirk(xhci);
+ xhci_pme_quirk(hcd, true);
return xhci_suspend(xhci, do_wakeup);
}
@@ -340,7 +391,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
usb_enable_intel_xhci_ports(pdev);
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
- xhci_pme_quirk(xhci);
+ xhci_pme_quirk(hcd, false);
retval = xhci_resume(xhci, hibernated);
return retval;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 94416ff70810..a47a1e897086 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -82,7 +82,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
return 0;
/* offset in TRBs */
segment_offset = trb - seg->trbs;
- if (segment_offset > TRBS_PER_SEGMENT)
+ if (segment_offset >= TRBS_PER_SEGMENT)
return 0;
return seg->dma + (segment_offset * sizeof(*trb));
}
@@ -1546,6 +1546,9 @@ static void handle_port_status(struct xhci_hcd *xhci,
usb_hcd_resume_root_hub(hcd);
}
+ if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
+ bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
+
if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
xhci_dbg(xhci, "port resume event for port %d\n", port_id);
@@ -1809,7 +1812,9 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
if (skip)
goto td_cleanup;
- if (trb_comp_code == COMP_STOP_INVAL || trb_comp_code == COMP_STOP) {
+ if (trb_comp_code == COMP_STOP_INVAL ||
+ trb_comp_code == COMP_STOP ||
+ trb_comp_code == COMP_STOP_SHORT) {
/* The Endpoint Stop Command completion will take care of any
* stopped TDs. A stopped TD may be restarted, so don't update
* the ring dequeue pointer or take this TD off any lists yet.
@@ -1916,8 +1921,22 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
else
*status = 0;
break;
- case COMP_STOP_INVAL:
+ case COMP_STOP_SHORT:
+ if (event_trb == ep_ring->dequeue || event_trb == td->last_trb)
+ xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
+ else
+ td->urb->actual_length =
+ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+
+ return finish_td(xhci, td, event_trb, event, ep, status, false);
case COMP_STOP:
+ /* Did we stop at data stage? */
+ if (event_trb != ep_ring->dequeue && event_trb != td->last_trb)
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length -
+ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ /* fall through */
+ case COMP_STOP_INVAL:
return finish_td(xhci, td, event_trb, event, ep, status, false);
default:
if (!xhci_requires_manual_halt_cleanup(xhci,
@@ -2011,6 +2030,8 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
}
if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
trb_comp_code = COMP_SHORT_TX;
+ /* fallthrough */
+ case COMP_STOP_SHORT:
case COMP_SHORT_TX:
frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
-EREMOTEIO : 0;
@@ -2046,6 +2067,10 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
if (trb_comp_code == COMP_SUCCESS || skip_td) {
frame->actual_length = frame->length;
td->urb->actual_length += frame->length;
+ } else if (trb_comp_code == COMP_STOP_SHORT) {
+ frame->actual_length =
+ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ td->urb->actual_length += frame->actual_length;
} else {
for (cur_trb = ep_ring->dequeue,
cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
@@ -2126,6 +2151,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
*status = 0;
}
break;
+ case COMP_STOP_SHORT:
case COMP_SHORT_TX:
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
*status = -EREMOTEIO;
@@ -2142,8 +2168,20 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
td->urb->ep->desc.bEndpointAddress,
td->urb->transfer_buffer_length,
EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
+ /* Stopped - short packet completion */
+ if (trb_comp_code == COMP_STOP_SHORT) {
+ td->urb->actual_length =
+ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+
+ if (td->urb->transfer_buffer_length <
+ td->urb->actual_length) {
+ xhci_warn(xhci, "HC gave bad length of %d bytes txed\n",
+ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
+ td->urb->actual_length = 0;
+ /* status will be set by usb core for canceled urbs */
+ }
/* Fast path - was this the last TRB in the TD for this URB? */
- if (event_trb == td->last_trb) {
+ } else if (event_trb == td->last_trb) {
if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
td->urb->actual_length =
td->urb->transfer_buffer_length -
@@ -2297,6 +2335,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
case COMP_STOP_INVAL:
xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
break;
+ case COMP_STOP_SHORT:
+ xhci_dbg(xhci, "Stopped with short packet transfer detected\n");
+ break;
case COMP_STALL:
xhci_dbg(xhci, "Stalled endpoint\n");
ep->ep_state |= EP_HALTED;
@@ -3038,9 +3079,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct xhci_td *td;
struct scatterlist *sg;
int num_sgs;
- int trb_buff_len, this_sg_len, running_total;
+ int trb_buff_len, this_sg_len, running_total, ret;
unsigned int total_packet_count;
+ bool zero_length_needed;
bool first_trb;
+ int last_trb_num;
u64 addr;
bool more_trbs_coming;
@@ -3056,13 +3099,27 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
usb_endpoint_maxp(&urb->ep->desc));
- trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
+ ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, mem_flags);
- if (trb_buff_len < 0)
- return trb_buff_len;
+ if (ret < 0)
+ return ret;
urb_priv = urb->hcpriv;
+
+ /* Deal with URB_ZERO_PACKET - need one more td/trb */
+ zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
+ urb_priv->length == 2;
+ if (zero_length_needed) {
+ num_trbs++;
+ xhci_dbg(xhci, "Creating zero length td.\n");
+ ret = prepare_transfer(xhci, xhci->devs[slot_id],
+ ep_index, urb->stream_id,
+ 1, urb, 1, mem_flags);
+ if (ret < 0)
+ return ret;
+ }
+
td = urb_priv->td[0];
/*
@@ -3092,6 +3149,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trb_buff_len = urb->transfer_buffer_length;
first_trb = true;
+ last_trb_num = zero_length_needed ? 2 : 1;
/* Queue the first TRB, even if it's zero-length */
do {
u32 field = 0;
@@ -3109,12 +3167,15 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Chain all the TRBs together; clear the chain bit in the last
* TRB to indicate it's the last TRB in the chain.
*/
- if (num_trbs > 1) {
+ if (num_trbs > last_trb_num) {
field |= TRB_CHAIN;
- } else {
- /* FIXME - add check for ZERO_PACKET flag before this */
+ } else if (num_trbs == last_trb_num) {
td->last_trb = ep_ring->enqueue;
field |= TRB_IOC;
+ } else if (zero_length_needed && num_trbs == 1) {
+ trb_buff_len = 0;
+ urb_priv->td[1]->last_trb = ep_ring->enqueue;
+ field |= TRB_IOC;
}
/* Only set interrupt on short packet for IN endpoints */
@@ -3176,7 +3237,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (running_total + trb_buff_len > urb->transfer_buffer_length)
trb_buff_len =
urb->transfer_buffer_length - running_total;
- } while (running_total < urb->transfer_buffer_length);
+ } while (num_trbs > 0);
check_trb_math(urb, num_trbs, running_total);
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
@@ -3194,7 +3255,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
int num_trbs;
struct xhci_generic_trb *start_trb;
bool first_trb;
+ int last_trb_num;
bool more_trbs_coming;
+ bool zero_length_needed;
int start_cycle;
u32 field, length_field;
@@ -3225,7 +3288,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs++;
running_total += TRB_MAX_BUFF_SIZE;
}
- /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
@@ -3234,6 +3296,20 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
return ret;
urb_priv = urb->hcpriv;
+
+ /* Deal with URB_ZERO_PACKET - need one more td/trb */
+ zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
+ urb_priv->length == 2;
+ if (zero_length_needed) {
+ num_trbs++;
+ xhci_dbg(xhci, "Creating zero length td.\n");
+ ret = prepare_transfer(xhci, xhci->devs[slot_id],
+ ep_index, urb->stream_id,
+ 1, urb, 1, mem_flags);
+ if (ret < 0)
+ return ret;
+ }
+
td = urb_priv->td[0];
/*
@@ -3255,7 +3331,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trb_buff_len = urb->transfer_buffer_length;
first_trb = true;
-
+ last_trb_num = zero_length_needed ? 2 : 1;
/* Queue the first TRB, even if it's zero-length */
do {
u32 remainder = 0;
@@ -3272,12 +3348,15 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Chain all the TRBs together; clear the chain bit in the last
* TRB to indicate it's the last TRB in the chain.
*/
- if (num_trbs > 1) {
+ if (num_trbs > last_trb_num) {
field |= TRB_CHAIN;
- } else {
- /* FIXME - add check for ZERO_PACKET flag before this */
+ } else if (num_trbs == last_trb_num) {
td->last_trb = ep_ring->enqueue;
field |= TRB_IOC;
+ } else if (zero_length_needed && num_trbs == 1) {
+ trb_buff_len = 0;
+ urb_priv->td[1]->last_trb = ep_ring->enqueue;
+ field |= TRB_IOC;
}
/* Only set interrupt on short packet for IN endpoints */
@@ -3315,7 +3394,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trb_buff_len = urb->transfer_buffer_length - running_total;
if (trb_buff_len > TRB_MAX_BUFF_SIZE)
trb_buff_len = TRB_MAX_BUFF_SIZE;
- } while (running_total < urb->transfer_buffer_length);
+ } while (num_trbs > 0);
check_trb_math(urb, num_trbs, running_total);
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
@@ -3514,6 +3593,97 @@ static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
}
}
+/*
+ * Calculates Frame ID field of the isochronous TRB identifies the
+ * target frame that the Interval associated with this Isochronous
+ * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
+ *
+ * Returns actual frame id on success, negative value on error.
+ */
+static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
+ struct urb *urb, int index)
+{
+ int start_frame, ist, ret = 0;
+ int start_frame_id, end_frame_id, current_frame_id;
+
+ if (urb->dev->speed == USB_SPEED_LOW ||
+ urb->dev->speed == USB_SPEED_FULL)
+ start_frame = urb->start_frame + index * urb->interval;
+ else
+ start_frame = (urb->start_frame + index * urb->interval) >> 3;
+
+ /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
+ *
+ * If bit [3] of IST is cleared to '0', software can add a TRB no
+ * later than IST[2:0] Microframes before that TRB is scheduled to
+ * be executed.
+ * If bit [3] of IST is set to '1', software can add a TRB no later
+ * than IST[2:0] Frames before that TRB is scheduled to be executed.
+ */
+ ist = HCS_IST(xhci->hcs_params2) & 0x7;
+ if (HCS_IST(xhci->hcs_params2) & (1 << 3))
+ ist <<= 3;
+
+ /* Software shall not schedule an Isoch TD with a Frame ID value that
+ * is less than the Start Frame ID or greater than the End Frame ID,
+ * where:
+ *
+ * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
+ * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
+ *
+ * Both the End Frame ID and Start Frame ID values are calculated
+ * in microframes. When software determines the valid Frame ID value;
+ * The End Frame ID value should be rounded down to the nearest Frame
+ * boundary, and the Start Frame ID value should be rounded up to the
+ * nearest Frame boundary.
+ */
+ current_frame_id = readl(&xhci->run_regs->microframe_index);
+ start_frame_id = roundup(current_frame_id + ist + 1, 8);
+ end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
+
+ start_frame &= 0x7ff;
+ start_frame_id = (start_frame_id >> 3) & 0x7ff;
+ end_frame_id = (end_frame_id >> 3) & 0x7ff;
+
+ xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
+ __func__, index, readl(&xhci->run_regs->microframe_index),
+ start_frame_id, end_frame_id, start_frame);
+
+ if (start_frame_id < end_frame_id) {
+ if (start_frame > end_frame_id ||
+ start_frame < start_frame_id)
+ ret = -EINVAL;
+ } else if (start_frame_id > end_frame_id) {
+ if ((start_frame > end_frame_id &&
+ start_frame < start_frame_id))
+ ret = -EINVAL;
+ } else {
+ ret = -EINVAL;
+ }
+
+ if (index == 0) {
+ if (ret == -EINVAL || start_frame == start_frame_id) {
+ start_frame = start_frame_id + 1;
+ if (urb->dev->speed == USB_SPEED_LOW ||
+ urb->dev->speed == USB_SPEED_FULL)
+ urb->start_frame = start_frame;
+ else
+ urb->start_frame = start_frame << 3;
+ ret = 0;
+ }
+ }
+
+ if (ret) {
+ xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
+ start_frame, current_frame_id, index,
+ start_frame_id, end_frame_id);
+ xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
+ return ret;
+ }
+
+ return start_frame;
+}
+
/* This is for isoc transfer */
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
@@ -3530,7 +3700,9 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
u64 start_addr, addr;
int i, j;
bool more_trbs_coming;
+ struct xhci_virt_ep *xep;
+ xep = &xhci->devs[slot_id]->eps[ep_index];
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
num_tds = urb->number_of_packets;
@@ -3578,6 +3750,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
td = urb_priv->td[i];
for (j = 0; j < trbs_per_td; j++) {
+ int frame_id = 0;
u32 remainder = 0;
field = 0;
@@ -3586,8 +3759,20 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
TRB_TLBPC(residue);
/* Queue the isoc TRB */
field |= TRB_TYPE(TRB_ISOC);
- /* Assume URB_ISO_ASAP is set */
- field |= TRB_SIA;
+
+ /* Calculate Frame ID and SIA fields */
+ if (!(urb->transfer_flags & URB_ISO_ASAP) &&
+ HCC_CFC(xhci->hcc_params)) {
+ frame_id = xhci_get_isoc_frame_id(xhci,
+ urb,
+ i);
+ if (frame_id >= 0)
+ field |= TRB_FRAME_ID(frame_id);
+ else
+ field |= TRB_SIA;
+ } else
+ field |= TRB_SIA;
+
if (i == 0) {
if (start_cycle == 0)
field |= 0x1;
@@ -3663,6 +3848,10 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
}
+ /* store the next frame id */
+ if (HCC_CFC(xhci->hcc_params))
+ xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
+
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_disable();
@@ -3696,12 +3885,34 @@ cleanup:
return ret;
}
+static int ep_ring_is_processing(struct xhci_hcd *xhci,
+ int slot_id, unsigned int ep_index)
+{
+ struct xhci_virt_device *xdev;
+ struct xhci_ring *ep_ring;
+ struct xhci_ep_ctx *ep_ctx;
+ struct xhci_virt_ep *xep;
+ dma_addr_t hw_deq;
+
+ xdev = xhci->devs[slot_id];
+ xep = &xhci->devs[slot_id]->eps[ep_index];
+ ep_ring = xep->ring;
+ ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+
+ if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) != EP_STATE_RUNNING)
+ return 0;
+
+ hw_deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
+ return (hw_deq !=
+ xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue));
+}
+
/*
* Check transfer ring to guarantee there is enough room for the urb.
* Update ISO URB start_frame and interval.
- * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
- * update the urb->start_frame by now.
- * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
+ * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
+ * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
+ * Contiguous Frame ID is not supported by HC.
*/
int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
@@ -3714,8 +3925,11 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
int ep_interval;
int num_tds, num_trbs, i;
int ret;
+ struct xhci_virt_ep *xep;
+ int ist;
xdev = xhci->devs[slot_id];
+ xep = &xhci->devs[slot_id]->eps[ep_index];
ep_ring = xdev->eps[ep_index].ring;
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
@@ -3732,14 +3946,10 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
if (ret)
return ret;
- start_frame = readl(&xhci->run_regs->microframe_index);
- start_frame &= 0x3fff;
-
- urb->start_frame = start_frame;
- if (urb->dev->speed == USB_SPEED_LOW ||
- urb->dev->speed == USB_SPEED_FULL)
- urb->start_frame >>= 3;
-
+ /*
+ * Check interval value. This should be done before we start to
+ * calculate the start frame value.
+ */
xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
ep_interval = urb->interval;
/* Convert to microframes */
@@ -3760,6 +3970,40 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
urb->dev->speed == USB_SPEED_FULL)
urb->interval /= 8;
}
+
+ /* Calculate the start frame and put it in urb->start_frame. */
+ if (HCC_CFC(xhci->hcc_params) &&
+ ep_ring_is_processing(xhci, slot_id, ep_index)) {
+ urb->start_frame = xep->next_frame_id;
+ goto skip_start_over;
+ }
+
+ start_frame = readl(&xhci->run_regs->microframe_index);
+ start_frame &= 0x3fff;
+ /*
+ * Round up to the next frame and consider the time before trb really
+ * gets scheduled by hardare.
+ */
+ ist = HCS_IST(xhci->hcs_params2) & 0x7;
+ if (HCS_IST(xhci->hcs_params2) & (1 << 3))
+ ist <<= 3;
+ start_frame += ist + XHCI_CFC_DELAY;
+ start_frame = roundup(start_frame, 8);
+
+ /*
+ * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
+ * is greate than 8 microframes.
+ */
+ if (urb->dev->speed == USB_SPEED_LOW ||
+ urb->dev->speed == USB_SPEED_FULL) {
+ start_frame = roundup(start_frame, urb->interval << 3);
+ urb->start_frame = start_frame >> 3;
+ } else {
+ start_frame = roundup(start_frame, urb->interval);
+ urb->start_frame = start_frame;
+ }
+
+skip_start_over:
ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 7da0d6043d33..6b0f4a47e402 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1340,6 +1340,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
if (usb_endpoint_xfer_isoc(&urb->ep->desc))
size = urb->number_of_packets;
+ else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
+ urb->transfer_buffer_length > 0 &&
+ urb->transfer_flags & URB_ZERO_PACKET &&
+ !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
+ size = 2;
else
size = 1;
@@ -3117,7 +3122,7 @@ static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
}
/*
- * The USB device drivers use this function (though the HCD interface in USB
+ * The USB device drivers use this function (through the HCD interface in USB
* core) to prepare a set of bulk endpoints to use streams. Streams are used to
* coordinate mass storage command queueing across multiple endpoints (basically
* a stream ID == a task ID).
@@ -3453,6 +3458,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
return -EINVAL;
}
+ if (virt_dev->tt_info)
+ old_active_eps = virt_dev->tt_info->active_eps;
+
if (virt_dev->udev != udev) {
/* If the virt_dev and the udev does not match, this virt_dev
* may belong to another udev.
@@ -4675,7 +4683,6 @@ int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
{
struct xhci_hcd *xhci;
u16 mel;
- int ret;
xhci = hcd_to_xhci(hcd);
if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
@@ -4683,10 +4690,7 @@ int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
return 0;
mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
- ret = xhci_change_max_exit_latency(xhci, udev, mel);
- if (ret)
- return ret;
- return 0;
+ return xhci_change_max_exit_latency(xhci, udev, mel);
}
#else /* CONFIG_PM */
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 31e46cc55807..dbda41e91c84 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -119,6 +119,10 @@ struct xhci_cap_regs {
#define HCC_LTC(p) ((p) & (1 << 6))
/* true: no secondary Stream ID Support */
#define HCC_NSS(p) ((p) & (1 << 7))
+/* true: HC supports Stopped - Short Packet */
+#define HCC_SPC(p) ((p) & (1 << 9))
+/* true: HC has Contiguous Frame ID Capability */
+#define HCC_CFC(p) ((p) & (1 << 11))
/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1))
/* Extended Capabilities pointer from PCI base - section 5.3.6 */
@@ -285,6 +289,7 @@ struct xhci_op_regs {
#define XDEV_U0 (0x0 << 5)
#define XDEV_U2 (0x2 << 5)
#define XDEV_U3 (0x3 << 5)
+#define XDEV_INACTIVE (0x6 << 5)
#define XDEV_RESUME (0xf << 5)
/* true: port has power (see HCC_PPC) */
#define PORT_POWER (1 << 9)
@@ -890,6 +895,8 @@ struct xhci_virt_ep {
/* Bandwidth checking storage */
struct xhci_bw_info bw_info;
struct list_head bw_endpoint_list;
+ /* Isoch Frame ID checking storage */
+ int next_frame_id;
};
enum xhci_overhead_type {
@@ -1058,8 +1065,8 @@ struct xhci_transfer_event {
#define COMP_STOP 26
/* Same as COMP_EP_STOPPED, but the transferred length in the event is invalid */
#define COMP_STOP_INVAL 27
-/* Control Abort Error - Debug Capability - control pipe aborted */
-#define COMP_DBG_ABORT 28
+/* Same as COMP_EP_STOPPED, but a short packet detected */
+#define COMP_STOP_SHORT 28
/* Max Exit Latency Too Large Error */
#define COMP_MEL_ERR 29
/* TRB type 30 reserved */
@@ -1164,6 +1171,7 @@ enum xhci_setup_dev {
/* Isochronous TRB specific fields */
#define TRB_SIA (1<<31)
+#define TRB_FRAME_ID(p) (((p) & 0x7ff) << 20)
struct xhci_generic_trb {
__le32 field[4];
@@ -1600,6 +1608,8 @@ struct xhci_driver_overrides {
int (*start)(struct usb_hcd *hcd);
};
+#define XHCI_CFC_DELAY 10
+
/* convert between an HCD pointer and the corresponding EHCI_HCD */
static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd)
{
diff --git a/drivers/usb/isp1760/isp1760-udc.c b/drivers/usb/isp1760/isp1760-udc.c
index 18ebf5b1f256..1c3d0fd658fa 100644
--- a/drivers/usb/isp1760/isp1760-udc.c
+++ b/drivers/usb/isp1760/isp1760-udc.c
@@ -1382,14 +1382,25 @@ static void isp1760_udc_init_eps(struct isp1760_udc *udc)
* This fits in the 8kB FIFO without double-buffering.
*/
if (ep_num == 0) {
- ep->ep.maxpacket = 64;
+ usb_ep_set_maxpacket_limit(&ep->ep, 64);
+ ep->ep.caps.type_control = true;
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
ep->maxpacket = 64;
udc->gadget.ep0 = &ep->ep;
} else {
- ep->ep.maxpacket = 512;
+ usb_ep_set_maxpacket_limit(&ep->ep, 512);
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
ep->maxpacket = 0;
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
}
+
+ if (is_in)
+ ep->ep.caps.dir_in = true;
+ else
+ ep->ep.caps.dir_out = true;
}
}
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 8ab1f8f3c26e..52c27cab78c3 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -2568,11 +2568,7 @@ static int ftdi_elan_close_controller(struct usb_ftdi *ftdi, int fn)
0x00);
if (UxxxStatus)
return UxxxStatus;
- UxxxStatus = ftdi_elan_read_config(ftdi, activePCIfn | reg, 0,
- &pcidata);
- if (UxxxStatus)
- return UxxxStatus;
- return 0;
+ return ftdi_elan_read_config(ftdi, activePCIfn | reg, 0, &pcidata);
}
static int ftdi_elan_found_controller(struct usb_ftdi *ftdi, int fn, int quirk)
@@ -2695,11 +2691,7 @@ static int ftdi_elan_setupOHCI(struct usb_ftdi *ftdi)
}
}
if (ftdi->function > 0) {
- UxxxStatus = ftdi_elan_setup_controller(ftdi,
- ftdi->function - 1);
- if (UxxxStatus)
- return UxxxStatus;
- return 0;
+ return ftdi_elan_setup_controller(ftdi, ftdi->function - 1);
} else if (controllers > 0) {
return -ENXIO;
} else if (unrecognized > 0) {
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 0bbafe795a72..9517812a50e2 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1925,10 +1925,11 @@ test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
memset(urbs, 0, sizeof(urbs));
udev = testdev_to_usbdev(dev);
dev_info(&dev->intf->dev,
- "... iso period %d %sframes, wMaxPacket %04x\n",
+ "iso period %d %sframes, wMaxPacket %d, transactions: %d\n",
1 << (desc->bInterval - 1),
(udev->speed == USB_SPEED_HIGH) ? "micro" : "",
- usb_endpoint_maxp(desc));
+ usb_endpoint_maxp(desc) & 0x7ff,
+ 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11)));
for (i = 0; i < param->sglen; i++) {
urbs[i] = iso_alloc_urb(udev, pipe, desc,
@@ -1942,7 +1943,7 @@ test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
}
packets *= param->iterations;
dev_info(&dev->intf->dev,
- "... total %lu msec (%lu packets)\n",
+ "total %lu msec (%lu packets)\n",
(packets * (1 << (desc->bInterval - 1)))
/ ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
packets);
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 39db8b603627..1f2037bbeb0d 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -5,7 +5,7 @@
# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
config USB_MUSB_HDRC
- tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
+ tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, AW, ...)'
depends on (USB || USB_GADGET)
help
Say Y here if your system has a dual role high speed USB
@@ -20,6 +20,8 @@ config USB_MUSB_HDRC
Analog Devices parts using this IP include Blackfin BF54x,
BF525 and BF527.
+ Allwinner SoCs using this IP include A10, A13, A20, ...
+
If you do not know what this is, please say N.
To compile this driver as a module, choose M here; the
@@ -60,6 +62,15 @@ endchoice
comment "Platform Glue Layer"
+config USB_MUSB_SUNXI
+ tristate "Allwinner (sunxi)"
+ depends on ARCH_SUNXI
+ depends on NOP_USB_XCEIV
+ depends on PHY_SUN4I_USB
+ depends on EXTCON
+ depends on GENERIC_PHY
+ select SUNXI_SRAM
+
config USB_MUSB_DAVINCI
tristate "DaVinci"
depends on ARCH_DAVINCI_DMx
@@ -113,19 +124,20 @@ config USB_MUSB_JZ4740
config USB_MUSB_AM335X_CHILD
tristate
-choice
- prompt 'MUSB DMA mode'
- default MUSB_PIO_ONLY if ARCH_MULTIPLATFORM || USB_MUSB_JZ4740
- default USB_UX500_DMA if USB_MUSB_UX500
- default USB_INVENTRA_DMA if USB_MUSB_OMAP2PLUS || USB_MUSB_BLACKFIN
- default USB_TI_CPPI_DMA if USB_MUSB_DAVINCI
- default USB_TUSB_OMAP_DMA if USB_MUSB_TUSB6010
- default MUSB_PIO_ONLY if USB_MUSB_TUSB6010 || USB_MUSB_DA8XX || USB_MUSB_AM35X \
- || USB_MUSB_DSPS
+comment "MUSB DMA mode"
+
+config MUSB_PIO_ONLY
+ bool 'Disable DMA (always use PIO)'
help
- Unfortunately, only one option can be enabled here. Ideally one
- should be able to build all these drivers into one kernel to
- allow using DMA on multiplatform kernels.
+ All data is copied between memory and FIFO by the CPU.
+ DMA controllers are ignored.
+
+ Do not choose this unless DMA support for your SOC or board
+ is unavailable (or unstable). When DMA is enabled at compile time,
+ you can still disable it at run time using the "use_dma=n" module
+ parameter.
+
+if !MUSB_PIO_ONLY
config USB_UX500_DMA
bool 'ST Ericsson Ux500'
@@ -157,17 +169,6 @@ config USB_TUSB_OMAP_DMA
help
Enable DMA transfers on TUSB 6010 when OMAP DMA is available.
-config MUSB_PIO_ONLY
- bool 'Disable DMA (always use PIO)'
- help
- All data is copied between memory and FIFO by the CPU.
- DMA controllers are ignored.
-
- Do not choose this unless DMA support for your SOC or board
- is unavailable (or unstable). When DMA is enabled at compile time,
- you can still disable it at run time using the "use_dma=n" module
- parameter.
-
-endchoice
+endif # !MUSB_PIO_ONLY
endif # USB_MUSB_HDRC
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index ba495018b416..f95befe18cc1 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_USB_MUSB_DA8XX) += da8xx.o
obj-$(CONFIG_USB_MUSB_BLACKFIN) += blackfin.o
obj-$(CONFIG_USB_MUSB_UX500) += ux500.o
obj-$(CONFIG_USB_MUSB_JZ4740) += jz4740.o
+obj-$(CONFIG_USB_MUSB_SUNXI) += sunxi.o
obj-$(CONFIG_USB_MUSB_AM335X_CHILD) += musb_am335x.o
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index 4d1b44c232ee..d07cafb7d5f5 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -614,7 +614,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
{
struct musb *musb = controller->musb;
struct device *dev = musb->controller;
- struct device_node *np = dev->of_node;
+ struct device_node *np = dev->parent->of_node;
struct cppi41_dma_channel *cppi41_channel;
int count;
int i;
@@ -664,7 +664,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
musb_dma->status = MUSB_DMA_STATUS_FREE;
musb_dma->max_len = SZ_4M;
- dc = dma_request_slave_channel(dev, str);
+ dc = dma_request_slave_channel(dev->parent, str);
if (!dc) {
dev_err(dev, "Failed to request %s.\n", str);
ret = -EPROBE_DEFER;
@@ -695,7 +695,7 @@ cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
struct cppi41_dma_controller *controller;
int ret = 0;
- if (!musb->controller->of_node) {
+ if (!musb->controller->parent->of_node) {
dev_err(musb->controller, "Need DT for the DMA engine.\n");
return NULL;
}
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 1334a3de31b8..a0cfead6150f 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -482,11 +482,7 @@ static int dsps_musb_init(struct musb *musb)
dsps_writeb(musb->mregs, MUSB_BABBLE_CTL, val);
}
- ret = dsps_musb_dbg_init(musb, glue);
- if (ret)
- return ret;
-
- return 0;
+ return dsps_musb_dbg_init(musb, glue);
}
static int dsps_musb_exit(struct musb *musb)
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 625d482f1a97..67ad630c86c9 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -313,8 +313,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
/* MUSB_TXCSR_P_ISO is still set correctly */
-#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
- {
+ if (musb_dma_inventra(musb) || musb_dma_ux500(musb)) {
if (request_size < musb_ep->packet_sz)
musb_ep->dma->desired_mode = 0;
else
@@ -365,7 +364,6 @@ static void txstate(struct musb *musb, struct musb_request *req)
}
}
-#endif
if (is_cppi_enabled(musb)) {
/* program endpoint CSR first, then setup DMA */
csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
@@ -641,8 +639,10 @@ static void rxstate(struct musb *musb, struct musb_request *req)
use_mode_1 = 0;
if (request->actual < request->length) {
-#ifdef CONFIG_USB_INVENTRA_DMA
- if (is_buffer_mapped(req)) {
+ if (!is_buffer_mapped(req))
+ goto buffer_aint_mapped;
+
+ if (musb_dma_inventra(musb)) {
struct dma_controller *c;
struct dma_channel *channel;
int use_dma = 0;
@@ -716,8 +716,8 @@ static void rxstate(struct musb *musb, struct musb_request *req)
if (use_dma)
return;
}
-#elif defined(CONFIG_USB_UX500_DMA)
- if ((is_buffer_mapped(req)) &&
+
+ if ((musb_dma_ux500(musb)) &&
(request->actual < request->length)) {
struct dma_controller *c;
@@ -765,7 +765,6 @@ static void rxstate(struct musb *musb, struct musb_request *req)
return;
}
-#endif /* Mentor's DMA */
len = request->length - request->actual;
dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
@@ -775,8 +774,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
fifo_count = min_t(unsigned, len, fifo_count);
-#ifdef CONFIG_USB_TUSB_OMAP_DMA
- if (tusb_dma_omap(musb) && is_buffer_mapped(req)) {
+ if (tusb_dma_omap(musb)) {
struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma;
u32 dma_addr = request->dma + request->actual;
@@ -790,23 +788,22 @@ static void rxstate(struct musb *musb, struct musb_request *req)
if (ret)
return;
}
-#endif
+
/*
* Unmap the dma buffer back to cpu if dma channel
* programming fails. This buffer is mapped if the
* channel allocation is successful
*/
- if (is_buffer_mapped(req)) {
- unmap_dma_buffer(req, musb);
-
- /*
- * Clear DMAENAB and AUTOCLEAR for the
- * PIO mode transfer
- */
- csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
- musb_writew(epio, MUSB_RXCSR, csr);
- }
+ unmap_dma_buffer(req, musb);
+ /*
+ * Clear DMAENAB and AUTOCLEAR for the
+ * PIO mode transfer
+ */
+ csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+buffer_aint_mapped:
musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
(request->buf + request->actual));
request->actual += fifo_count;
@@ -1684,6 +1681,40 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
return 0;
}
+#ifdef CONFIG_BLACKFIN
+static struct usb_ep *musb_match_ep(struct usb_gadget *g,
+ struct usb_endpoint_descriptor *desc,
+ struct usb_ss_ep_comp_descriptor *ep_comp)
+{
+ struct usb_ep *ep = NULL;
+
+ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_BULK:
+ if (usb_endpoint_dir_in(desc))
+ ep = gadget_find_ep_by_name(g, "ep5in");
+ else
+ ep = gadget_find_ep_by_name(g, "ep6out");
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ if (usb_endpoint_dir_in(desc))
+ ep = gadget_find_ep_by_name(g, "ep1in");
+ else
+ ep = gadget_find_ep_by_name(g, "ep2out");
+ break;
+ default:
+ break;
+ }
+
+ if (ep && usb_gadget_ep_match_desc(g, ep, desc, ep_comp))
+ return ep;
+
+ return NULL;
+}
+#else
+#define musb_match_ep NULL
+#endif
+
static int musb_gadget_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
static int musb_gadget_stop(struct usb_gadget *g);
@@ -1697,6 +1728,7 @@ static const struct usb_gadget_ops musb_gadget_operations = {
.pullup = musb_gadget_pullup,
.udc_start = musb_gadget_start,
.udc_stop = musb_gadget_stop,
+ .match_ep = musb_match_ep,
};
/* ----------------------------------------------------------------------- */
@@ -1729,6 +1761,7 @@ init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
INIT_LIST_HEAD(&ep->end_point.ep_list);
if (!epnum) {
usb_ep_set_maxpacket_limit(&ep->end_point, 64);
+ ep->end_point.caps.type_control = true;
ep->end_point.ops = &musb_g_ep0_ops;
musb->g.ep0 = &ep->end_point;
} else {
@@ -1736,9 +1769,20 @@ init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_tx);
else
usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_rx);
+ ep->end_point.caps.type_iso = true;
+ ep->end_point.caps.type_bulk = true;
+ ep->end_point.caps.type_int = true;
ep->end_point.ops = &musb_ep_ops;
list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
}
+
+ if (!epnum || hw_ep->is_shared_fifo) {
+ ep->end_point.caps.dir_in = true;
+ ep->end_point.caps.dir_out = true;
+ } else if (is_in)
+ ep->end_point.caps.dir_in = true;
+ else
+ ep->end_point.caps.dir_out = true;
}
/*
@@ -2075,6 +2119,7 @@ __acquires(musb->lock)
musb->g.b_hnp_enable = 0;
musb->g.a_alt_hnp_support = 0;
musb->g.a_hnp_support = 0;
+ musb->g.quirk_zlp_not_supp = 1;
/* Normal reset, as B-Device;
* or else after HNP, as A-Device
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 30842bc195f5..92d5f718659b 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -275,9 +275,7 @@ static int musb_has_gadget(struct musb *musb)
#ifdef CONFIG_USB_MUSB_HOST
return 1;
#else
- if (musb->port_mode == MUSB_PORT_MODE_HOST)
- return 1;
- return musb->g.dev.driver != NULL;
+ return musb->port_mode == MUSB_PORT_MODE_HOST;
#endif
}
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
new file mode 100644
index 000000000000..f9f6304ad854
--- /dev/null
+++ b/drivers/usb/musb/sunxi.c
@@ -0,0 +1,756 @@
+/*
+ * Allwinner sun4i MUSB Glue Layer
+ *
+ * Copyright (C) 2015 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on code from
+ * Allwinner Technology Co., Ltd. <www.allwinnertech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/extcon.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy-sun4i-usb.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/soc/sunxi/sunxi_sram.h>
+#include <linux/usb/musb.h>
+#include <linux/usb/of.h>
+#include <linux/usb/usb_phy_generic.h>
+#include <linux/workqueue.h>
+#include "musb_core.h"
+
+/*
+ * Register offsets, note sunxi musb has a different layout then most
+ * musb implementations, we translate the layout in musb_readb & friends.
+ */
+#define SUNXI_MUSB_POWER 0x0040
+#define SUNXI_MUSB_DEVCTL 0x0041
+#define SUNXI_MUSB_INDEX 0x0042
+#define SUNXI_MUSB_VEND0 0x0043
+#define SUNXI_MUSB_INTRTX 0x0044
+#define SUNXI_MUSB_INTRRX 0x0046
+#define SUNXI_MUSB_INTRTXE 0x0048
+#define SUNXI_MUSB_INTRRXE 0x004a
+#define SUNXI_MUSB_INTRUSB 0x004c
+#define SUNXI_MUSB_INTRUSBE 0x0050
+#define SUNXI_MUSB_FRAME 0x0054
+#define SUNXI_MUSB_TXFIFOSZ 0x0090
+#define SUNXI_MUSB_TXFIFOADD 0x0092
+#define SUNXI_MUSB_RXFIFOSZ 0x0094
+#define SUNXI_MUSB_RXFIFOADD 0x0096
+#define SUNXI_MUSB_FADDR 0x0098
+#define SUNXI_MUSB_TXFUNCADDR 0x0098
+#define SUNXI_MUSB_TXHUBADDR 0x009a
+#define SUNXI_MUSB_TXHUBPORT 0x009b
+#define SUNXI_MUSB_RXFUNCADDR 0x009c
+#define SUNXI_MUSB_RXHUBADDR 0x009e
+#define SUNXI_MUSB_RXHUBPORT 0x009f
+#define SUNXI_MUSB_CONFIGDATA 0x00c0
+
+/* VEND0 bits */
+#define SUNXI_MUSB_VEND0_PIO_MODE 0
+
+/* flags */
+#define SUNXI_MUSB_FL_ENABLED 0
+#define SUNXI_MUSB_FL_HOSTMODE 1
+#define SUNXI_MUSB_FL_HOSTMODE_PEND 2
+#define SUNXI_MUSB_FL_VBUS_ON 3
+#define SUNXI_MUSB_FL_PHY_ON 4
+#define SUNXI_MUSB_FL_HAS_SRAM 5
+#define SUNXI_MUSB_FL_HAS_RESET 6
+#define SUNXI_MUSB_FL_NO_CONFIGDATA 7
+
+/* Our read/write methods need access and do not get passed in a musb ref :| */
+static struct musb *sunxi_musb;
+
+struct sunxi_glue {
+ struct device *dev;
+ struct platform_device *musb;
+ struct clk *clk;
+ struct reset_control *rst;
+ struct phy *phy;
+ struct platform_device *usb_phy;
+ struct usb_phy *xceiv;
+ unsigned long flags;
+ struct work_struct work;
+ struct extcon_dev *extcon;
+ struct notifier_block host_nb;
+};
+
+/* phy_power_on / off may sleep, so we use a workqueue */
+static void sunxi_musb_work(struct work_struct *work)
+{
+ struct sunxi_glue *glue = container_of(work, struct sunxi_glue, work);
+ bool vbus_on, phy_on;
+
+ if (!test_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags))
+ return;
+
+ if (test_and_clear_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags)) {
+ struct musb *musb = platform_get_drvdata(glue->musb);
+ unsigned long flags;
+ u8 devctl;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ devctl = readb(musb->mregs + SUNXI_MUSB_DEVCTL);
+ if (test_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags)) {
+ set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+ musb->xceiv->otg->default_a = 1;
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ devctl |= MUSB_DEVCTL_SESSION;
+ } else {
+ clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+ musb->xceiv->otg->default_a = 0;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ }
+ writeb(devctl, musb->mregs + SUNXI_MUSB_DEVCTL);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+ }
+
+ vbus_on = test_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+ phy_on = test_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
+
+ if (phy_on != vbus_on) {
+ if (vbus_on) {
+ phy_power_on(glue->phy);
+ set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
+ } else {
+ phy_power_off(glue->phy);
+ clear_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
+ }
+ }
+}
+
+static void sunxi_musb_set_vbus(struct musb *musb, int is_on)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ if (is_on)
+ set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+ else
+ clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+
+ schedule_work(&glue->work);
+}
+
+static void sunxi_musb_pre_root_reset_end(struct musb *musb)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ sun4i_usb_phy_set_squelch_detect(glue->phy, false);
+}
+
+static void sunxi_musb_post_root_reset_end(struct musb *musb)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ sun4i_usb_phy_set_squelch_detect(glue->phy, true);
+}
+
+static irqreturn_t sunxi_musb_interrupt(int irq, void *__hci)
+{
+ struct musb *musb = __hci;
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb->int_usb = readb(musb->mregs + SUNXI_MUSB_INTRUSB);
+ if (musb->int_usb)
+ writeb(musb->int_usb, musb->mregs + SUNXI_MUSB_INTRUSB);
+
+ /*
+ * sunxi musb often signals babble on low / full speed device
+ * disconnect, without ever raising MUSB_INTR_DISCONNECT, since
+ * normally babble never happens treat it as disconnect.
+ */
+ if ((musb->int_usb & MUSB_INTR_BABBLE) && is_host_active(musb)) {
+ musb->int_usb &= ~MUSB_INTR_BABBLE;
+ musb->int_usb |= MUSB_INTR_DISCONNECT;
+ }
+
+ if ((musb->int_usb & MUSB_INTR_RESET) && !is_host_active(musb)) {
+ /* ep0 FADDR must be 0 when (re)entering peripheral mode */
+ musb_ep_select(musb->mregs, 0);
+ musb_writeb(musb->mregs, MUSB_FADDR, 0);
+ }
+
+ musb->int_tx = readw(musb->mregs + SUNXI_MUSB_INTRTX);
+ if (musb->int_tx)
+ writew(musb->int_tx, musb->mregs + SUNXI_MUSB_INTRTX);
+
+ musb->int_rx = readw(musb->mregs + SUNXI_MUSB_INTRRX);
+ if (musb->int_rx)
+ writew(musb->int_rx, musb->mregs + SUNXI_MUSB_INTRRX);
+
+ musb_interrupt(musb);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int sunxi_musb_host_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct sunxi_glue *glue = container_of(nb, struct sunxi_glue, host_nb);
+
+ if (event)
+ set_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags);
+ else
+ clear_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags);
+
+ set_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags);
+ schedule_work(&glue->work);
+
+ return NOTIFY_DONE;
+}
+
+static int sunxi_musb_init(struct musb *musb)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+ int ret;
+
+ sunxi_musb = musb;
+ musb->phy = glue->phy;
+ musb->xceiv = glue->xceiv;
+
+ if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags)) {
+ ret = sunxi_sram_claim(musb->controller->parent);
+ if (ret)
+ return ret;
+ }
+
+ ret = clk_prepare_enable(glue->clk);
+ if (ret)
+ goto error_sram_release;
+
+ if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags)) {
+ ret = reset_control_deassert(glue->rst);
+ if (ret)
+ goto error_clk_disable;
+ }
+
+ writeb(SUNXI_MUSB_VEND0_PIO_MODE, musb->mregs + SUNXI_MUSB_VEND0);
+
+ /* Register notifier before calling phy_init() */
+ if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE) {
+ ret = extcon_register_notifier(glue->extcon, EXTCON_USB_HOST,
+ &glue->host_nb);
+ if (ret)
+ goto error_reset_assert;
+ }
+
+ ret = phy_init(glue->phy);
+ if (ret)
+ goto error_unregister_notifier;
+
+ if (musb->port_mode == MUSB_PORT_MODE_HOST) {
+ ret = phy_power_on(glue->phy);
+ if (ret)
+ goto error_phy_exit;
+ set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
+ /* Stop musb work from turning vbus off again */
+ set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+ }
+
+ musb->isr = sunxi_musb_interrupt;
+
+ /* Stop the musb-core from doing runtime pm (not supported on sunxi) */
+ pm_runtime_get(musb->controller);
+
+ return 0;
+
+error_phy_exit:
+ phy_exit(glue->phy);
+error_unregister_notifier:
+ if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
+ extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
+ &glue->host_nb);
+error_reset_assert:
+ if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags))
+ reset_control_assert(glue->rst);
+error_clk_disable:
+ clk_disable_unprepare(glue->clk);
+error_sram_release:
+ if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags))
+ sunxi_sram_release(musb->controller->parent);
+ return ret;
+}
+
+static int sunxi_musb_exit(struct musb *musb)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ pm_runtime_put(musb->controller);
+
+ cancel_work_sync(&glue->work);
+ if (test_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags))
+ phy_power_off(glue->phy);
+
+ phy_exit(glue->phy);
+
+ if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
+ extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
+ &glue->host_nb);
+
+ if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags))
+ reset_control_assert(glue->rst);
+
+ clk_disable_unprepare(glue->clk);
+ if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags))
+ sunxi_sram_release(musb->controller->parent);
+
+ return 0;
+}
+
+static void sunxi_musb_enable(struct musb *musb)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ /* musb_core does not call us in a balanced manner */
+ if (test_and_set_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags))
+ return;
+
+ schedule_work(&glue->work);
+}
+
+static void sunxi_musb_disable(struct musb *musb)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ clear_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags);
+}
+
+/*
+ * sunxi musb register layout
+ * 0x00 - 0x17 fifo regs, 1 long per fifo
+ * 0x40 - 0x57 generic control regs (power - frame)
+ * 0x80 - 0x8f ep control regs (addressed through hw_ep->regs, indexed)
+ * 0x90 - 0x97 fifo control regs (indexed)
+ * 0x98 - 0x9f multipoint / busctl regs (indexed)
+ * 0xc0 configdata reg
+ */
+
+static u32 sunxi_musb_fifo_offset(u8 epnum)
+{
+ return (epnum * 4);
+}
+
+static u32 sunxi_musb_ep_offset(u8 epnum, u16 offset)
+{
+ WARN_ONCE(offset != 0,
+ "sunxi_musb_ep_offset called with non 0 offset\n");
+
+ return 0x80; /* indexed, so ignore epnum */
+}
+
+static u32 sunxi_musb_busctl_offset(u8 epnum, u16 offset)
+{
+ return SUNXI_MUSB_TXFUNCADDR + offset;
+}
+
+static u8 sunxi_musb_readb(const void __iomem *addr, unsigned offset)
+{
+ struct sunxi_glue *glue;
+
+ if (addr == sunxi_musb->mregs) {
+ /* generic control or fifo control reg access */
+ switch (offset) {
+ case MUSB_FADDR:
+ return readb(addr + SUNXI_MUSB_FADDR);
+ case MUSB_POWER:
+ return readb(addr + SUNXI_MUSB_POWER);
+ case MUSB_INTRUSB:
+ return readb(addr + SUNXI_MUSB_INTRUSB);
+ case MUSB_INTRUSBE:
+ return readb(addr + SUNXI_MUSB_INTRUSBE);
+ case MUSB_INDEX:
+ return readb(addr + SUNXI_MUSB_INDEX);
+ case MUSB_TESTMODE:
+ return 0; /* No testmode on sunxi */
+ case MUSB_DEVCTL:
+ return readb(addr + SUNXI_MUSB_DEVCTL);
+ case MUSB_TXFIFOSZ:
+ return readb(addr + SUNXI_MUSB_TXFIFOSZ);
+ case MUSB_RXFIFOSZ:
+ return readb(addr + SUNXI_MUSB_RXFIFOSZ);
+ case MUSB_CONFIGDATA + 0x10: /* See musb_read_configdata() */
+ glue = dev_get_drvdata(sunxi_musb->controller->parent);
+ /* A33 saves a reg, and we get to hardcode this */
+ if (test_bit(SUNXI_MUSB_FL_NO_CONFIGDATA,
+ &glue->flags))
+ return 0xde;
+
+ return readb(addr + SUNXI_MUSB_CONFIGDATA);
+ /* Offset for these is fixed by sunxi_musb_busctl_offset() */
+ case SUNXI_MUSB_TXFUNCADDR:
+ case SUNXI_MUSB_TXHUBADDR:
+ case SUNXI_MUSB_TXHUBPORT:
+ case SUNXI_MUSB_RXFUNCADDR:
+ case SUNXI_MUSB_RXHUBADDR:
+ case SUNXI_MUSB_RXHUBPORT:
+ /* multipoint / busctl reg access */
+ return readb(addr + offset);
+ default:
+ dev_err(sunxi_musb->controller->parent,
+ "Error unknown readb offset %u\n", offset);
+ return 0;
+ }
+ } else if (addr == (sunxi_musb->mregs + 0x80)) {
+ /* ep control reg access */
+ /* sunxi has a 2 byte hole before the txtype register */
+ if (offset >= MUSB_TXTYPE)
+ offset += 2;
+ return readb(addr + offset);
+ }
+
+ dev_err(sunxi_musb->controller->parent,
+ "Error unknown readb at 0x%x bytes offset\n",
+ (int)(addr - sunxi_musb->mregs));
+ return 0;
+}
+
+static void sunxi_musb_writeb(void __iomem *addr, unsigned offset, u8 data)
+{
+ if (addr == sunxi_musb->mregs) {
+ /* generic control or fifo control reg access */
+ switch (offset) {
+ case MUSB_FADDR:
+ return writeb(data, addr + SUNXI_MUSB_FADDR);
+ case MUSB_POWER:
+ return writeb(data, addr + SUNXI_MUSB_POWER);
+ case MUSB_INTRUSB:
+ return writeb(data, addr + SUNXI_MUSB_INTRUSB);
+ case MUSB_INTRUSBE:
+ return writeb(data, addr + SUNXI_MUSB_INTRUSBE);
+ case MUSB_INDEX:
+ return writeb(data, addr + SUNXI_MUSB_INDEX);
+ case MUSB_TESTMODE:
+ if (data)
+ dev_warn(sunxi_musb->controller->parent,
+ "sunxi-musb does not have testmode\n");
+ return;
+ case MUSB_DEVCTL:
+ return writeb(data, addr + SUNXI_MUSB_DEVCTL);
+ case MUSB_TXFIFOSZ:
+ return writeb(data, addr + SUNXI_MUSB_TXFIFOSZ);
+ case MUSB_RXFIFOSZ:
+ return writeb(data, addr + SUNXI_MUSB_RXFIFOSZ);
+ /* Offset for these is fixed by sunxi_musb_busctl_offset() */
+ case SUNXI_MUSB_TXFUNCADDR:
+ case SUNXI_MUSB_TXHUBADDR:
+ case SUNXI_MUSB_TXHUBPORT:
+ case SUNXI_MUSB_RXFUNCADDR:
+ case SUNXI_MUSB_RXHUBADDR:
+ case SUNXI_MUSB_RXHUBPORT:
+ /* multipoint / busctl reg access */
+ return writeb(data, addr + offset);
+ default:
+ dev_err(sunxi_musb->controller->parent,
+ "Error unknown writeb offset %u\n", offset);
+ return;
+ }
+ } else if (addr == (sunxi_musb->mregs + 0x80)) {
+ /* ep control reg access */
+ if (offset >= MUSB_TXTYPE)
+ offset += 2;
+ return writeb(data, addr + offset);
+ }
+
+ dev_err(sunxi_musb->controller->parent,
+ "Error unknown writeb at 0x%x bytes offset\n",
+ (int)(addr - sunxi_musb->mregs));
+}
+
+static u16 sunxi_musb_readw(const void __iomem *addr, unsigned offset)
+{
+ if (addr == sunxi_musb->mregs) {
+ /* generic control or fifo control reg access */
+ switch (offset) {
+ case MUSB_INTRTX:
+ return readw(addr + SUNXI_MUSB_INTRTX);
+ case MUSB_INTRRX:
+ return readw(addr + SUNXI_MUSB_INTRRX);
+ case MUSB_INTRTXE:
+ return readw(addr + SUNXI_MUSB_INTRTXE);
+ case MUSB_INTRRXE:
+ return readw(addr + SUNXI_MUSB_INTRRXE);
+ case MUSB_FRAME:
+ return readw(addr + SUNXI_MUSB_FRAME);
+ case MUSB_TXFIFOADD:
+ return readw(addr + SUNXI_MUSB_TXFIFOADD);
+ case MUSB_RXFIFOADD:
+ return readw(addr + SUNXI_MUSB_RXFIFOADD);
+ case MUSB_HWVERS:
+ return 0; /* sunxi musb version is not known */
+ default:
+ dev_err(sunxi_musb->controller->parent,
+ "Error unknown readw offset %u\n", offset);
+ return 0;
+ }
+ } else if (addr == (sunxi_musb->mregs + 0x80)) {
+ /* ep control reg access */
+ return readw(addr + offset);
+ }
+
+ dev_err(sunxi_musb->controller->parent,
+ "Error unknown readw at 0x%x bytes offset\n",
+ (int)(addr - sunxi_musb->mregs));
+ return 0;
+}
+
+static void sunxi_musb_writew(void __iomem *addr, unsigned offset, u16 data)
+{
+ if (addr == sunxi_musb->mregs) {
+ /* generic control or fifo control reg access */
+ switch (offset) {
+ case MUSB_INTRTX:
+ return writew(data, addr + SUNXI_MUSB_INTRTX);
+ case MUSB_INTRRX:
+ return writew(data, addr + SUNXI_MUSB_INTRRX);
+ case MUSB_INTRTXE:
+ return writew(data, addr + SUNXI_MUSB_INTRTXE);
+ case MUSB_INTRRXE:
+ return writew(data, addr + SUNXI_MUSB_INTRRXE);
+ case MUSB_FRAME:
+ return writew(data, addr + SUNXI_MUSB_FRAME);
+ case MUSB_TXFIFOADD:
+ return writew(data, addr + SUNXI_MUSB_TXFIFOADD);
+ case MUSB_RXFIFOADD:
+ return writew(data, addr + SUNXI_MUSB_RXFIFOADD);
+ default:
+ dev_err(sunxi_musb->controller->parent,
+ "Error unknown writew offset %u\n", offset);
+ return;
+ }
+ } else if (addr == (sunxi_musb->mregs + 0x80)) {
+ /* ep control reg access */
+ return writew(data, addr + offset);
+ }
+
+ dev_err(sunxi_musb->controller->parent,
+ "Error unknown writew at 0x%x bytes offset\n",
+ (int)(addr - sunxi_musb->mregs));
+}
+
+static const struct musb_platform_ops sunxi_musb_ops = {
+ .quirks = MUSB_INDEXED_EP,
+ .init = sunxi_musb_init,
+ .exit = sunxi_musb_exit,
+ .enable = sunxi_musb_enable,
+ .disable = sunxi_musb_disable,
+ .fifo_offset = sunxi_musb_fifo_offset,
+ .ep_offset = sunxi_musb_ep_offset,
+ .busctl_offset = sunxi_musb_busctl_offset,
+ .readb = sunxi_musb_readb,
+ .writeb = sunxi_musb_writeb,
+ .readw = sunxi_musb_readw,
+ .writew = sunxi_musb_writew,
+ .set_vbus = sunxi_musb_set_vbus,
+ .pre_root_reset_end = sunxi_musb_pre_root_reset_end,
+ .post_root_reset_end = sunxi_musb_post_root_reset_end,
+};
+
+/* Allwinner OTG supports up to 5 endpoints */
+#define SUNXI_MUSB_MAX_EP_NUM 6
+#define SUNXI_MUSB_RAM_BITS 11
+
+static struct musb_fifo_cfg sunxi_musb_mode_cfg[] = {
+ MUSB_EP_FIFO_SINGLE(1, FIFO_TX, 512),
+ MUSB_EP_FIFO_SINGLE(1, FIFO_RX, 512),
+ MUSB_EP_FIFO_SINGLE(2, FIFO_TX, 512),
+ MUSB_EP_FIFO_SINGLE(2, FIFO_RX, 512),
+ MUSB_EP_FIFO_SINGLE(3, FIFO_TX, 512),
+ MUSB_EP_FIFO_SINGLE(3, FIFO_RX, 512),
+ MUSB_EP_FIFO_SINGLE(4, FIFO_TX, 512),
+ MUSB_EP_FIFO_SINGLE(4, FIFO_RX, 512),
+ MUSB_EP_FIFO_SINGLE(5, FIFO_TX, 512),
+ MUSB_EP_FIFO_SINGLE(5, FIFO_RX, 512),
+};
+
+static struct musb_hdrc_config sunxi_musb_hdrc_config = {
+ .fifo_cfg = sunxi_musb_mode_cfg,
+ .fifo_cfg_size = ARRAY_SIZE(sunxi_musb_mode_cfg),
+ .multipoint = true,
+ .dyn_fifo = true,
+ .soft_con = true,
+ .num_eps = SUNXI_MUSB_MAX_EP_NUM,
+ .ram_bits = SUNXI_MUSB_RAM_BITS,
+ .dma = 0,
+};
+
+static int sunxi_musb_probe(struct platform_device *pdev)
+{
+ struct musb_hdrc_platform_data pdata;
+ struct platform_device_info pinfo;
+ struct sunxi_glue *glue;
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ if (!np) {
+ dev_err(&pdev->dev, "Error no device tree node found\n");
+ return -EINVAL;
+ }
+
+ glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
+
+ memset(&pdata, 0, sizeof(pdata));
+ switch (of_usb_get_dr_mode(np)) {
+#if defined CONFIG_USB_MUSB_DUAL_ROLE || defined CONFIG_USB_MUSB_HOST
+ case USB_DR_MODE_HOST:
+ pdata.mode = MUSB_PORT_MODE_HOST;
+ break;
+#endif
+#ifdef CONFIG_USB_MUSB_DUAL_ROLE
+ case USB_DR_MODE_OTG:
+ glue->extcon = extcon_get_edev_by_phandle(&pdev->dev, 0);
+ if (IS_ERR(glue->extcon)) {
+ if (PTR_ERR(glue->extcon) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "Invalid or missing extcon\n");
+ return PTR_ERR(glue->extcon);
+ }
+ pdata.mode = MUSB_PORT_MODE_DUAL_ROLE;
+ break;
+#endif
+ default:
+ dev_err(&pdev->dev, "Invalid or missing 'dr_mode' property\n");
+ return -EINVAL;
+ }
+ pdata.platform_ops = &sunxi_musb_ops;
+ pdata.config = &sunxi_musb_hdrc_config;
+
+ glue->dev = &pdev->dev;
+ INIT_WORK(&glue->work, sunxi_musb_work);
+ glue->host_nb.notifier_call = sunxi_musb_host_notifier;
+
+ if (of_device_is_compatible(np, "allwinner,sun4i-a10-musb"))
+ set_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags);
+
+ if (of_device_is_compatible(np, "allwinner,sun6i-a31-musb"))
+ set_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags);
+
+ if (of_device_is_compatible(np, "allwinner,sun8i-a33-musb")) {
+ set_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags);
+ set_bit(SUNXI_MUSB_FL_NO_CONFIGDATA, &glue->flags);
+ }
+
+ glue->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(glue->clk)) {
+ dev_err(&pdev->dev, "Error getting clock: %ld\n",
+ PTR_ERR(glue->clk));
+ return PTR_ERR(glue->clk);
+ }
+
+ if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags)) {
+ glue->rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(glue->rst)) {
+ if (PTR_ERR(glue->rst) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "Error getting reset %ld\n",
+ PTR_ERR(glue->rst));
+ return PTR_ERR(glue->rst);
+ }
+ }
+
+ glue->phy = devm_phy_get(&pdev->dev, "usb");
+ if (IS_ERR(glue->phy)) {
+ if (PTR_ERR(glue->phy) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "Error getting phy %ld\n",
+ PTR_ERR(glue->phy));
+ return PTR_ERR(glue->phy);
+ }
+
+ glue->usb_phy = usb_phy_generic_register();
+ if (IS_ERR(glue->usb_phy)) {
+ dev_err(&pdev->dev, "Error registering usb-phy %ld\n",
+ PTR_ERR(glue->usb_phy));
+ return PTR_ERR(glue->usb_phy);
+ }
+
+ glue->xceiv = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR(glue->xceiv)) {
+ ret = PTR_ERR(glue->xceiv);
+ dev_err(&pdev->dev, "Error getting usb-phy %d\n", ret);
+ goto err_unregister_usb_phy;
+ }
+
+ platform_set_drvdata(pdev, glue);
+
+ memset(&pinfo, 0, sizeof(pinfo));
+ pinfo.name = "musb-hdrc";
+ pinfo.id = PLATFORM_DEVID_AUTO;
+ pinfo.parent = &pdev->dev;
+ pinfo.res = pdev->resource;
+ pinfo.num_res = pdev->num_resources;
+ pinfo.data = &pdata;
+ pinfo.size_data = sizeof(pdata);
+
+ glue->musb = platform_device_register_full(&pinfo);
+ if (IS_ERR(glue->musb)) {
+ ret = PTR_ERR(glue->musb);
+ dev_err(&pdev->dev, "Error registering musb dev: %d\n", ret);
+ goto err_unregister_usb_phy;
+ }
+
+ return 0;
+
+err_unregister_usb_phy:
+ usb_phy_generic_unregister(glue->usb_phy);
+ return ret;
+}
+
+static int sunxi_musb_remove(struct platform_device *pdev)
+{
+ struct sunxi_glue *glue = platform_get_drvdata(pdev);
+ struct platform_device *usb_phy = glue->usb_phy;
+
+ platform_device_unregister(glue->musb); /* Frees glue ! */
+ usb_phy_generic_unregister(usb_phy);
+
+ return 0;
+}
+
+static const struct of_device_id sunxi_musb_match[] = {
+ { .compatible = "allwinner,sun4i-a10-musb", },
+ { .compatible = "allwinner,sun6i-a31-musb", },
+ { .compatible = "allwinner,sun8i-a33-musb", },
+ {}
+};
+
+static struct platform_driver sunxi_musb_driver = {
+ .probe = sunxi_musb_probe,
+ .remove = sunxi_musb_remove,
+ .driver = {
+ .name = "musb-sunxi",
+ .of_match_table = sunxi_musb_match,
+ },
+};
+module_platform_driver(sunxi_musb_driver);
+
+MODULE_DESCRIPTION("Allwinner sunxi MUSB Glue Layer");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 869c0cfcad98..7d3beee2a587 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -152,6 +152,20 @@ config USB_MSM_OTG
This driver is not supported on boards like trout which
has an external PHY.
+config USB_QCOM_8X16_PHY
+ tristate "Qualcomm APQ8016/MSM8916 on-chip USB PHY controller support"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on RESET_CONTROLLER
+ select USB_PHY
+ select USB_ULPI_VIEWPORT
+ help
+ Enable this to support the USB transceiver on Qualcomm 8x16 chipsets.
+ It handles PHY initialization, clock management, power management,
+ and workarounds required after resetting the hardware.
+
+ To compile this driver as a module, choose M here: the
+ module will be called phy-qcom-8x16-usb.
+
config USB_MV_OTG
tristate "Marvell USB OTG support"
depends on USB_EHCI_MV && USB_MV_UDC && PM
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index e36ab1d46d8b..19c0dccbb116 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_USB_EHCI_TEGRA) += phy-tegra-usb.o
obj-$(CONFIG_USB_GPIO_VBUS) += phy-gpio-vbus-usb.o
obj-$(CONFIG_USB_ISP1301) += phy-isp1301.o
obj-$(CONFIG_USB_MSM_OTG) += phy-msm-usb.o
+obj-$(CONFIG_USB_QCOM_8X16_PHY) += phy-qcom-8x16-usb.o
obj-$(CONFIG_USB_MV_OTG) += phy-mv-usb.o
obj-$(CONFIG_USB_MXS_PHY) += phy-mxs-usb.o
obj-$(CONFIG_USB_RCAR_PHY) += phy-rcar-usb.o
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index deee68eafb72..ec6ecd03269c 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -218,11 +218,13 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
clk_rate = 0;
needs_vcc = of_property_read_bool(node, "vcc-supply");
- nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset");
+ nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_ASIS);
err = PTR_ERR_OR_ZERO(nop->gpiod_reset);
if (!err) {
nop->gpiod_vbus = devm_gpiod_get_optional(dev,
- "vbus-detect");
+ "vbus-detect",
+ GPIOD_ASIS);
err = PTR_ERR_OR_ZERO(nop->gpiod_vbus);
}
} else if (pdata) {
diff --git a/drivers/usb/phy/phy-keystone.c b/drivers/usb/phy/phy-keystone.c
index e0556f7832b5..01d4e4cdbc79 100644
--- a/drivers/usb/phy/phy-keystone.c
+++ b/drivers/usb/phy/phy-keystone.c
@@ -96,11 +96,7 @@ static int keystone_usbphy_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, k_phy);
- ret = usb_add_phy_dev(&k_phy->usb_phy_gen.phy);
- if (ret)
- return ret;
-
- return 0;
+ return usb_add_phy_dev(&k_phy->usb_phy_gen.phy);
}
static int keystone_usbphy_remove(struct platform_device *pdev)
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 00c49bb1bd29..c58c3c0dbe35 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/slab.h>
@@ -32,6 +33,7 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/reboot.h>
#include <linux/reset.h>
#include <linux/usb.h>
@@ -1471,6 +1473,14 @@ static int msm_otg_vbus_notifier(struct notifier_block *nb, unsigned long event,
else
clear_bit(B_SESS_VLD, &motg->inputs);
+ if (test_bit(B_SESS_VLD, &motg->inputs)) {
+ /* Switch D+/D- lines to Device connector */
+ gpiod_set_value_cansleep(motg->switch_gpio, 0);
+ } else {
+ /* Switch D+/D- lines to Hub */
+ gpiod_set_value_cansleep(motg->switch_gpio, 1);
+ }
+
schedule_work(&motg->sm_work);
return NOTIFY_DONE;
@@ -1546,6 +1556,11 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
motg->manual_pullup = of_property_read_bool(node, "qcom,manual-pullup");
+ motg->switch_gpio = devm_gpiod_get_optional(&pdev->dev, "switch",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(motg->switch_gpio))
+ return PTR_ERR(motg->switch_gpio);
+
ext_id = ERR_PTR(-ENODEV);
ext_vbus = ERR_PTR(-ENODEV);
if (of_property_read_bool(node, "extcon")) {
@@ -1561,15 +1576,16 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
}
if (!IS_ERR(ext_vbus)) {
+ motg->vbus.extcon = ext_vbus;
motg->vbus.nb.notifier_call = msm_otg_vbus_notifier;
- ret = extcon_register_interest(&motg->vbus.conn, ext_vbus->name,
- "USB", &motg->vbus.nb);
+ ret = extcon_register_notifier(ext_vbus, EXTCON_USB,
+ &motg->vbus.nb);
if (ret < 0) {
dev_err(&pdev->dev, "register VBUS notifier failed\n");
return ret;
}
- ret = extcon_get_cable_state(ext_vbus, "USB");
+ ret = extcon_get_cable_state_(ext_vbus, EXTCON_USB);
if (ret)
set_bit(B_SESS_VLD, &motg->inputs);
else
@@ -1577,15 +1593,16 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
}
if (!IS_ERR(ext_id)) {
+ motg->id.extcon = ext_id;
motg->id.nb.notifier_call = msm_otg_id_notifier;
- ret = extcon_register_interest(&motg->id.conn, ext_id->name,
- "USB-HOST", &motg->id.nb);
+ ret = extcon_register_notifier(ext_id, EXTCON_USB_HOST,
+ &motg->id.nb);
if (ret < 0) {
dev_err(&pdev->dev, "register ID notifier failed\n");
return ret;
}
- ret = extcon_get_cable_state(ext_id, "USB-HOST");
+ ret = extcon_get_cable_state_(ext_id, EXTCON_USB_HOST);
if (ret)
clear_bit(ID, &motg->inputs);
else
@@ -1615,6 +1632,19 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
return 0;
}
+static int msm_otg_reboot_notify(struct notifier_block *this,
+ unsigned long code, void *unused)
+{
+ struct msm_otg *motg = container_of(this, struct msm_otg, reboot);
+
+ /*
+ * Ensure that D+/D- lines are routed to uB connector, so
+ * we could load bootloader/kernel at next reboot
+ */
+ gpiod_set_value_cansleep(motg->switch_gpio, 0);
+ return NOTIFY_DONE;
+}
+
static int msm_otg_probe(struct platform_device *pdev)
{
struct regulator_bulk_data regs[3];
@@ -1779,6 +1809,17 @@ static int msm_otg_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Can not create mode change file\n");
}
+ if (test_bit(B_SESS_VLD, &motg->inputs)) {
+ /* Switch D+/D- lines to Device connector */
+ gpiod_set_value_cansleep(motg->switch_gpio, 0);
+ } else {
+ /* Switch D+/D- lines to Hub */
+ gpiod_set_value_cansleep(motg->switch_gpio, 1);
+ }
+
+ motg->reboot.notifier_call = msm_otg_reboot_notify;
+ register_reboot_notifier(&motg->reboot);
+
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -1805,10 +1846,16 @@ static int msm_otg_remove(struct platform_device *pdev)
if (phy->otg->host || phy->otg->gadget)
return -EBUSY;
- if (motg->id.conn.edev)
- extcon_unregister_interest(&motg->id.conn);
- if (motg->vbus.conn.edev)
- extcon_unregister_interest(&motg->vbus.conn);
+ unregister_reboot_notifier(&motg->reboot);
+
+ /*
+ * Ensure that D+/D- lines are routed to uB connector, so
+ * we could load bootloader/kernel at next reboot
+ */
+ gpiod_set_value_cansleep(motg->switch_gpio, 0);
+
+ extcon_unregister_notifier(motg->id.extcon, EXTCON_USB_HOST, &motg->id.nb);
+ extcon_unregister_notifier(motg->vbus.extcon, EXTCON_USB, &motg->vbus.nb);
msm_otg_debugfs_cleanup();
cancel_delayed_work_sync(&motg->chg_work);
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 8f7cb068d29b..4d863ebc117c 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -217,6 +217,9 @@ static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy)
{
unsigned int vbus_value;
+ if (!mxs_phy->regmap_anatop)
+ return false;
+
if (mxs_phy->port_id == 0)
regmap_read(mxs_phy->regmap_anatop,
ANADIG_USB1_VBUS_DET_STAT,
@@ -503,11 +506,7 @@ static int mxs_phy_probe(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, true);
- ret = usb_add_phy_dev(&mxs_phy->phy);
- if (ret)
- return ret;
-
- return 0;
+ return usb_add_phy_dev(&mxs_phy->phy);
}
static int mxs_phy_remove(struct platform_device *pdev)
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c
index 56ee7603034b..1270906ccb95 100644
--- a/drivers/usb/phy/phy-omap-otg.c
+++ b/drivers/usb/phy/phy-omap-otg.c
@@ -30,8 +30,7 @@ struct otg_device {
void __iomem *base;
bool id;
bool vbus;
- struct extcon_specific_cable_nb vbus_dev;
- struct extcon_specific_cable_nb id_dev;
+ struct extcon_dev *extcon;
struct notifier_block vbus_nb;
struct notifier_block id_nb;
};
@@ -106,6 +105,7 @@ static int omap_otg_probe(struct platform_device *pdev)
extcon = extcon_get_extcon_dev(config->extcon);
if (!extcon)
return -EPROBE_DEFER;
+ otg_dev->extcon = extcon;
otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL);
if (!otg_dev)
@@ -118,20 +118,19 @@ static int omap_otg_probe(struct platform_device *pdev)
otg_dev->id_nb.notifier_call = omap_otg_id_notifier;
otg_dev->vbus_nb.notifier_call = omap_otg_vbus_notifier;
- ret = extcon_register_interest(&otg_dev->id_dev, config->extcon,
- "USB-HOST", &otg_dev->id_nb);
+ ret = extcon_register_notifier(extcon, EXTCON_USB_HOST, &otg_dev->id_nb);
if (ret)
return ret;
- ret = extcon_register_interest(&otg_dev->vbus_dev, config->extcon,
- "USB", &otg_dev->vbus_nb);
+ ret = extcon_register_notifier(extcon, EXTCON_USB, &otg_dev->vbus_nb);
if (ret) {
- extcon_unregister_interest(&otg_dev->id_dev);
+ extcon_unregister_notifier(extcon, EXTCON_USB_HOST,
+ &otg_dev->id_nb);
return ret;
}
- otg_dev->id = extcon_get_cable_state(extcon, "USB-HOST");
- otg_dev->vbus = extcon_get_cable_state(extcon, "USB");
+ otg_dev->id = extcon_get_cable_state_(extcon, EXTCON_USB_HOST);
+ otg_dev->vbus = extcon_get_cable_state_(extcon, EXTCON_USB);
omap_otg_set_mode(otg_dev);
rev = readl(otg_dev->base);
@@ -147,9 +146,10 @@ static int omap_otg_probe(struct platform_device *pdev)
static int omap_otg_remove(struct platform_device *pdev)
{
struct otg_device *otg_dev = platform_get_drvdata(pdev);
+ struct extcon_dev *edev = otg_dev->extcon;
- extcon_unregister_interest(&otg_dev->id_dev);
- extcon_unregister_interest(&otg_dev->vbus_dev);
+ extcon_unregister_notifier(edev, EXTCON_USB_HOST,&otg_dev->id_nb);
+ extcon_unregister_notifier(edev, EXTCON_USB, &otg_dev->vbus_nb);
return 0;
}
diff --git a/drivers/usb/phy/phy-qcom-8x16-usb.c b/drivers/usb/phy/phy-qcom-8x16-usb.c
new file mode 100644
index 000000000000..5d357a94599e
--- /dev/null
+++ b/drivers/usb/phy/phy-qcom-8x16-usb.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/extcon.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/ulpi.h>
+
+#define HSPHY_AHBBURST 0x0090
+#define HSPHY_AHBMODE 0x0098
+#define HSPHY_GENCONFIG 0x009c
+#define HSPHY_GENCONFIG_2 0x00a0
+
+#define HSPHY_USBCMD 0x0140
+#define HSPHY_ULPI_VIEWPORT 0x0170
+#define HSPHY_CTRL 0x0240
+
+#define HSPHY_TXFIFO_IDLE_FORCE_DIS BIT(4)
+#define HSPHY_SESS_VLD_CTRL_EN BIT(7)
+#define HSPHY_POR_ASSERT BIT(0)
+#define HSPHY_RETEN BIT(1)
+
+#define HSPHY_SESS_VLD_CTRL BIT(25)
+
+#define ULPI_PWR_CLK_MNG_REG 0x88
+#define ULPI_PWR_OTG_COMP_DISABLE BIT(0)
+
+#define ULPI_MISC_A 0x96
+#define ULPI_MISC_A_VBUSVLDEXTSEL BIT(1)
+#define ULPI_MISC_A_VBUSVLDEXT BIT(0)
+
+#define HSPHY_3P3_MIN 3050000 /* uV */
+#define HSPHY_3P3_MAX 3300000 /* uV */
+
+#define HSPHY_1P8_MIN 1800000 /* uV */
+#define HSPHY_1P8_MAX 1800000 /* uV */
+
+#define HSPHY_VDD_MIN 5
+#define HSPHY_VDD_MAX 7
+
+struct phy_8x16 {
+ struct usb_phy phy;
+ void __iomem *regs;
+ struct clk *core_clk;
+ struct clk *iface_clk;
+ struct regulator *v3p3;
+ struct regulator *v1p8;
+ struct regulator *vdd;
+
+ struct reset_control *phy_reset;
+
+ struct extcon_specific_cable_nb vbus_cable;
+ struct notifier_block vbus_notify;
+
+ struct gpio_desc *switch_gpio;
+ struct notifier_block reboot_notify;
+};
+
+static int phy_8x16_regulators_enable(struct phy_8x16 *qphy)
+{
+ int ret;
+
+ ret = regulator_set_voltage(qphy->vdd, HSPHY_VDD_MIN, HSPHY_VDD_MAX);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(qphy->vdd);
+ if (ret)
+ return ret;
+
+ ret = regulator_set_voltage(qphy->v3p3, HSPHY_3P3_MIN, HSPHY_3P3_MAX);
+ if (ret)
+ goto off_vdd;
+
+ ret = regulator_enable(qphy->v3p3);
+ if (ret)
+ goto off_vdd;
+
+ ret = regulator_set_voltage(qphy->v1p8, HSPHY_1P8_MIN, HSPHY_1P8_MAX);
+ if (ret)
+ goto off_3p3;
+
+ ret = regulator_enable(qphy->v1p8);
+ if (ret)
+ goto off_3p3;
+
+ return 0;
+
+off_3p3:
+ regulator_disable(qphy->v3p3);
+off_vdd:
+ regulator_disable(qphy->vdd);
+
+ return ret;
+}
+
+static void phy_8x16_regulators_disable(struct phy_8x16 *qphy)
+{
+ regulator_disable(qphy->v1p8);
+ regulator_disable(qphy->v3p3);
+ regulator_disable(qphy->vdd);
+}
+
+static int phy_8x16_notify_connect(struct usb_phy *phy,
+ enum usb_device_speed speed)
+{
+ struct phy_8x16 *qphy = container_of(phy, struct phy_8x16, phy);
+ u32 val;
+
+ val = ULPI_MISC_A_VBUSVLDEXTSEL | ULPI_MISC_A_VBUSVLDEXT;
+ usb_phy_io_write(&qphy->phy, val, ULPI_SET(ULPI_MISC_A));
+
+ val = readl(qphy->regs + HSPHY_USBCMD);
+ val |= HSPHY_SESS_VLD_CTRL;
+ writel(val, qphy->regs + HSPHY_USBCMD);
+
+ return 0;
+}
+
+static int phy_8x16_notify_disconnect(struct usb_phy *phy,
+ enum usb_device_speed speed)
+{
+ struct phy_8x16 *qphy = container_of(phy, struct phy_8x16, phy);
+ u32 val;
+
+ val = ULPI_MISC_A_VBUSVLDEXT | ULPI_MISC_A_VBUSVLDEXTSEL;
+ usb_phy_io_write(&qphy->phy, val, ULPI_CLR(ULPI_MISC_A));
+
+ val = readl(qphy->regs + HSPHY_USBCMD);
+ val &= ~HSPHY_SESS_VLD_CTRL;
+ writel(val, qphy->regs + HSPHY_USBCMD);
+
+ return 0;
+}
+
+static int phy_8x16_vbus_on(struct phy_8x16 *qphy)
+{
+ phy_8x16_notify_connect(&qphy->phy, USB_SPEED_UNKNOWN);
+
+ /* Switch D+/D- lines to Device connector */
+ gpiod_set_value_cansleep(qphy->switch_gpio, 0);
+
+ return 0;
+}
+
+static int phy_8x16_vbus_off(struct phy_8x16 *qphy)
+{
+ phy_8x16_notify_disconnect(&qphy->phy, USB_SPEED_UNKNOWN);
+
+ /* Switch D+/D- lines to USB HUB */
+ gpiod_set_value_cansleep(qphy->switch_gpio, 1);
+
+ return 0;
+}
+
+static int phy_8x16_vbus_notify(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct phy_8x16 *qphy = container_of(nb, struct phy_8x16, vbus_notify);
+
+ if (event)
+ phy_8x16_vbus_on(qphy);
+ else
+ phy_8x16_vbus_off(qphy);
+
+ return NOTIFY_DONE;
+}
+
+static int phy_8x16_init(struct usb_phy *phy)
+{
+ struct phy_8x16 *qphy = container_of(phy, struct phy_8x16, phy);
+ u32 val, init[] = {0x44, 0x6B, 0x24, 0x13};
+ u32 addr = ULPI_EXT_VENDOR_SPECIFIC;
+ int idx, state;
+
+ for (idx = 0; idx < ARRAY_SIZE(init); idx++)
+ usb_phy_io_write(phy, init[idx], addr + idx);
+
+ reset_control_reset(qphy->phy_reset);
+
+ /* Assert USB HSPHY_POR */
+ val = readl(qphy->regs + HSPHY_CTRL);
+ val |= HSPHY_POR_ASSERT;
+ writel(val, qphy->regs + HSPHY_CTRL);
+
+ /*
+ * wait for minimum 10 microseconds as suggested in HPG.
+ * Use a slightly larger value since the exact value didn't
+ * work 100% of the time.
+ */
+ usleep_range(12, 15);
+
+ /* Deassert USB HSPHY_POR */
+ val = readl(qphy->regs + HSPHY_CTRL);
+ val &= ~HSPHY_POR_ASSERT;
+ writel(val, qphy->regs + HSPHY_CTRL);
+
+ usleep_range(10, 15);
+
+ writel(0x00, qphy->regs + HSPHY_AHBBURST);
+ writel(0x08, qphy->regs + HSPHY_AHBMODE);
+
+ /* workaround for rx buffer collision issue */
+ val = readl(qphy->regs + HSPHY_GENCONFIG);
+ val &= ~HSPHY_TXFIFO_IDLE_FORCE_DIS;
+ writel(val, qphy->regs + HSPHY_GENCONFIG);
+
+ val = readl(qphy->regs + HSPHY_GENCONFIG_2);
+ val |= HSPHY_SESS_VLD_CTRL_EN;
+ writel(val, qphy->regs + HSPHY_GENCONFIG_2);
+
+ val = ULPI_PWR_OTG_COMP_DISABLE;
+ usb_phy_io_write(phy, val, ULPI_SET(ULPI_PWR_CLK_MNG_REG));
+
+ state = extcon_get_cable_state(qphy->vbus_cable.edev, "USB");
+ if (state)
+ phy_8x16_vbus_on(qphy);
+ else
+ phy_8x16_vbus_off(qphy);
+
+ val = usb_phy_io_read(&qphy->phy, ULPI_FUNC_CTRL);
+ val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+ val |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
+ usb_phy_io_write(&qphy->phy, val, ULPI_FUNC_CTRL);
+
+ return 0;
+}
+
+static void phy_8x16_shutdown(struct usb_phy *phy)
+{
+ u32 val;
+
+ /* Put the controller in non-driving mode */
+ val = usb_phy_io_read(phy, ULPI_FUNC_CTRL);
+ val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+ val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
+ usb_phy_io_write(phy, val, ULPI_FUNC_CTRL);
+}
+
+static int phy_8x16_read_devicetree(struct phy_8x16 *qphy)
+{
+ struct regulator_bulk_data regs[3];
+ struct device *dev = qphy->phy.dev;
+ int ret;
+
+ qphy->core_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(qphy->core_clk))
+ return PTR_ERR(qphy->core_clk);
+
+ qphy->iface_clk = devm_clk_get(dev, "iface");
+ if (IS_ERR(qphy->iface_clk))
+ return PTR_ERR(qphy->iface_clk);
+
+ regs[0].supply = "v3p3";
+ regs[1].supply = "v1p8";
+ regs[2].supply = "vddcx";
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(regs), regs);
+ if (ret)
+ return ret;
+
+ qphy->v3p3 = regs[0].consumer;
+ qphy->v1p8 = regs[1].consumer;
+ qphy->vdd = regs[2].consumer;
+
+ qphy->phy_reset = devm_reset_control_get(dev, "phy");
+ if (IS_ERR(qphy->phy_reset))
+ return PTR_ERR(qphy->phy_reset);
+
+ qphy->switch_gpio = devm_gpiod_get_optional(dev, "switch",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(qphy->switch_gpio))
+ return PTR_ERR(qphy->switch_gpio);
+
+ return 0;
+}
+
+static int phy_8x16_reboot_notify(struct notifier_block *this,
+ unsigned long code, void *unused)
+{
+ struct phy_8x16 *qphy;
+
+ qphy = container_of(this, struct phy_8x16, reboot_notify);
+
+ /*
+ * Ensure that D+/D- lines are routed to uB connector, so
+ * we could load bootloader/kernel at next reboot_notify
+ */
+ gpiod_set_value_cansleep(qphy->switch_gpio, 0);
+ return NOTIFY_DONE;
+}
+
+static int phy_8x16_probe(struct platform_device *pdev)
+{
+ struct extcon_dev *edev;
+ struct phy_8x16 *qphy;
+ struct resource *res;
+ struct usb_phy *phy;
+ int ret;
+
+ qphy = devm_kzalloc(&pdev->dev, sizeof(*qphy), GFP_KERNEL);
+ if (!qphy)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, qphy);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ qphy->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!qphy->regs)
+ return -ENOMEM;
+
+ phy = &qphy->phy;
+ phy->dev = &pdev->dev;
+ phy->label = dev_name(&pdev->dev);
+ phy->init = phy_8x16_init;
+ phy->shutdown = phy_8x16_shutdown;
+ phy->notify_connect = phy_8x16_notify_connect;
+ phy->notify_disconnect = phy_8x16_notify_disconnect;
+ phy->io_priv = qphy->regs + HSPHY_ULPI_VIEWPORT;
+ phy->io_ops = &ulpi_viewport_access_ops;
+ phy->type = USB_PHY_TYPE_USB2;
+
+ ret = phy_8x16_read_devicetree(qphy);
+ if (ret < 0)
+ return ret;
+
+ edev = extcon_get_edev_by_phandle(phy->dev, 0);
+ if (IS_ERR(edev))
+ return PTR_ERR(edev);
+
+ ret = clk_set_rate(qphy->core_clk, INT_MAX);
+ if (ret < 0)
+ dev_dbg(phy->dev, "Can't boost core clock\n");
+
+ ret = clk_prepare_enable(qphy->core_clk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(qphy->iface_clk);
+ if (ret < 0)
+ goto off_core;
+
+ ret = phy_8x16_regulators_enable(qphy);
+ if (0 && ret)
+ goto off_clks;
+
+ qphy->vbus_notify.notifier_call = phy_8x16_vbus_notify;
+ ret = extcon_register_interest(&qphy->vbus_cable, edev->name,
+ "USB", &qphy->vbus_notify);
+ if (ret < 0)
+ goto off_power;
+
+ ret = usb_add_phy_dev(&qphy->phy);
+ if (ret)
+ goto off_extcon;
+
+ qphy->reboot_notify.notifier_call = phy_8x16_reboot_notify;
+ register_reboot_notifier(&qphy->reboot_notify);
+
+ return 0;
+
+off_extcon:
+ extcon_unregister_interest(&qphy->vbus_cable);
+off_power:
+ phy_8x16_regulators_disable(qphy);
+off_clks:
+ clk_disable_unprepare(qphy->iface_clk);
+off_core:
+ clk_disable_unprepare(qphy->core_clk);
+ return ret;
+}
+
+static int phy_8x16_remove(struct platform_device *pdev)
+{
+ struct phy_8x16 *qphy = platform_get_drvdata(pdev);
+
+ unregister_reboot_notifier(&qphy->reboot_notify);
+ extcon_unregister_interest(&qphy->vbus_cable);
+
+ /*
+ * Ensure that D+/D- lines are routed to uB connector, so
+ * we could load bootloader/kernel at next reboot_notify
+ */
+ gpiod_set_value_cansleep(qphy->switch_gpio, 0);
+
+ usb_remove_phy(&qphy->phy);
+
+ clk_disable_unprepare(qphy->iface_clk);
+ clk_disable_unprepare(qphy->core_clk);
+ phy_8x16_regulators_disable(qphy);
+ return 0;
+}
+
+static const struct of_device_id phy_8x16_dt_match[] = {
+ { .compatible = "qcom,usb-8x16-phy" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, phy_8x16_dt_match);
+
+static struct platform_driver phy_8x16_driver = {
+ .probe = phy_8x16_probe,
+ .remove = phy_8x16_remove,
+ .driver = {
+ .name = "phy-qcom-8x16-usb",
+ .of_match_table = phy_8x16_dt_match,
+ },
+};
+module_platform_driver(phy_8x16_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm APQ8016/MSM8916 chipsets USB transceiver driver");
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
index b40d6a87d694..ab5d364f6e8c 100644
--- a/drivers/usb/phy/phy-tahvo.c
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -57,7 +57,7 @@ struct tahvo_usb {
struct clk *ick;
int irq;
int tahvo_mode;
- struct extcon_dev extcon;
+ struct extcon_dev *extcon;
};
static const unsigned int tahvo_cable[] = {
@@ -121,7 +121,7 @@ static void check_vbus_state(struct tahvo_usb *tu)
prev_state = tu->vbus_state;
tu->vbus_state = reg & TAHVO_STAT_VBUS;
if (prev_state != tu->vbus_state) {
- extcon_set_cable_state(&tu->extcon, "USB", tu->vbus_state);
+ extcon_set_cable_state_(tu->extcon, EXTCON_USB, tu->vbus_state);
sysfs_notify(&tu->pt_dev->dev.kobj, NULL, "vbus_state");
}
}
@@ -130,7 +130,7 @@ static void tahvo_usb_become_host(struct tahvo_usb *tu)
{
struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
- extcon_set_cable_state(&tu->extcon, "USB-HOST", true);
+ extcon_set_cable_state_(tu->extcon, EXTCON_USB_HOST, true);
/* Power up the transceiver in USB host mode */
retu_write(rdev, TAHVO_REG_USBR, USBR_REGOUT | USBR_NSUSPEND |
@@ -149,7 +149,7 @@ static void tahvo_usb_become_peripheral(struct tahvo_usb *tu)
{
struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
- extcon_set_cable_state(&tu->extcon, "USB-HOST", false);
+ extcon_set_cable_state_(tu->extcon, EXTCON_USB_HOST, false);
/* Power up transceiver and set it in USB peripheral mode */
retu_write(rdev, TAHVO_REG_USBR, USBR_SLAVE_CONTROL | USBR_REGOUT |
@@ -365,11 +365,13 @@ static int tahvo_usb_probe(struct platform_device *pdev)
*/
tu->vbus_state = retu_read(rdev, TAHVO_REG_IDSR) & TAHVO_STAT_VBUS;
- tu->extcon.name = DRIVER_NAME;
- tu->extcon.supported_cable = tahvo_cable;
- tu->extcon.dev.parent = &pdev->dev;
+ tu->extcon = devm_extcon_dev_allocate(&pdev->dev, tahvo_cable);
+ if (IS_ERR(tu->extcon)) {
+ dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
+ return -ENOMEM;
+ }
- ret = extcon_dev_register(&tu->extcon);
+ ret = devm_extcon_dev_register(&pdev->dev, tu->extcon);
if (ret) {
dev_err(&pdev->dev, "could not register extcon device: %d\n",
ret);
@@ -377,9 +379,9 @@ static int tahvo_usb_probe(struct platform_device *pdev)
}
/* Set the initial cable state. */
- extcon_set_cable_state(&tu->extcon, "USB-HOST",
+ extcon_set_cable_state_(tu->extcon, EXTCON_USB_HOST,
tu->tahvo_mode == TAHVO_MODE_HOST);
- extcon_set_cable_state(&tu->extcon, "USB", tu->vbus_state);
+ extcon_set_cable_state_(tu->extcon, EXTCON_USB, tu->vbus_state);
/* Create OTG interface */
tahvo_usb_power_off(tu);
@@ -396,7 +398,7 @@ static int tahvo_usb_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "cannot register USB transceiver: %d\n",
ret);
- goto err_extcon_unreg;
+ goto err_disable_clk;
}
dev_set_drvdata(&pdev->dev, tu);
@@ -424,8 +426,6 @@ err_free_irq:
free_irq(tu->irq, tu);
err_remove_phy:
usb_remove_phy(&tu->phy);
-err_extcon_unreg:
- extcon_dev_unregister(&tu->extcon);
err_disable_clk:
if (!IS_ERR(tu->ick))
clk_disable(tu->ick);
@@ -440,7 +440,6 @@ static int tahvo_usb_remove(struct platform_device *pdev)
sysfs_remove_group(&pdev->dev.kobj, &tahvo_attr_group);
free_irq(tu->irq, tu);
usb_remove_phy(&tu->phy);
- extcon_dev_unregister(&tu->extcon);
if (!IS_ERR(tu->ick))
clk_disable(tu->ick);
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index e8bf40808b39..7b98e1d9194c 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -388,7 +388,7 @@ static void usbhsc_hotplug(struct usbhs_priv *priv)
if (enable && !mod) {
if (priv->edev) {
- cable = extcon_get_cable_state(priv->edev, "USB-HOST");
+ cable = extcon_get_cable_state_(priv->edev, EXTCON_USB_HOST);
if ((cable > 0 && id != USBHS_HOST) ||
(!cable && id != USBHS_GADGET)) {
dev_info(&pdev->dev,
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index dc2aa3261202..de4f97d84a82 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
#include "common.h"
/*
@@ -50,6 +51,8 @@ struct usbhsg_gpriv {
int uep_size;
struct usb_gadget_driver *driver;
+ struct usb_phy *transceiver;
+ bool vbus_active;
u32 status;
#define USBHSG_STATUS_STARTED (1 << 0)
@@ -873,6 +876,27 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
}
/*
+ * VBUS provided by the PHY
+ */
+static int usbhsm_phy_get_vbus(struct platform_device *pdev)
+{
+ struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
+ struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
+
+ return gpriv->vbus_active;
+}
+
+static void usbhs_mod_phy_mode(struct usbhs_priv *priv)
+{
+ struct usbhs_mod_info *info = &priv->mod_info;
+
+ info->irq_vbus = NULL;
+ priv->pfunc.get_vbus = usbhsm_phy_get_vbus;
+
+ usbhs_irq_callback_update(priv, NULL);
+}
+
+/*
*
* linux usb function
*
@@ -882,12 +906,28 @@ static int usbhsg_gadget_start(struct usb_gadget *gadget,
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
+ struct device *dev = usbhs_priv_to_dev(priv);
+ int ret;
if (!driver ||
!driver->setup ||
driver->max_speed < USB_SPEED_FULL)
return -EINVAL;
+ /* connect to bus through transceiver */
+ if (!IS_ERR_OR_NULL(gpriv->transceiver)) {
+ ret = otg_set_peripheral(gpriv->transceiver->otg,
+ &gpriv->gadget);
+ if (ret) {
+ dev_err(dev, "%s: can't bind to transceiver\n",
+ gpriv->gadget.name);
+ return ret;
+ }
+
+ /* get vbus using phy versions */
+ usbhs_mod_phy_mode(priv);
+ }
+
/* first hook up the driver ... */
gpriv->driver = driver;
@@ -900,6 +940,10 @@ static int usbhsg_gadget_stop(struct usb_gadget *gadget)
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD);
+
+ if (!IS_ERR_OR_NULL(gpriv->transceiver))
+ otg_set_peripheral(gpriv->transceiver->otg, NULL);
+
gpriv->driver = NULL;
return 0;
@@ -947,12 +991,26 @@ static int usbhsg_set_selfpowered(struct usb_gadget *gadget, int is_self)
return 0;
}
+static int usbhsg_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+ struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
+ struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
+ struct platform_device *pdev = usbhs_priv_to_pdev(priv);
+
+ gpriv->vbus_active = !!is_active;
+
+ renesas_usbhs_call_notify_hotplug(pdev);
+
+ return 0;
+}
+
static const struct usb_gadget_ops usbhsg_gadget_ops = {
.get_frame = usbhsg_get_frame,
.set_selfpowered = usbhsg_set_selfpowered,
.udc_start = usbhsg_gadget_start,
.udc_stop = usbhsg_gadget_stop,
.pullup = usbhsg_pullup,
+ .vbus_session = usbhsg_vbus_session,
};
static int usbhsg_start(struct usbhs_priv *priv)
@@ -994,6 +1052,10 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
goto usbhs_mod_gadget_probe_err_gpriv;
}
+ gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
+ dev_info(dev, "%stransceiver found\n",
+ gpriv->transceiver ? "" : "no ");
+
/*
* CAUTION
*
@@ -1041,12 +1103,18 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
if (usbhsg_is_dcp(uep)) {
gpriv->gadget.ep0 = &uep->ep;
usb_ep_set_maxpacket_limit(&uep->ep, 64);
+ uep->ep.caps.type_control = true;
}
/* init normal pipe */
else {
usb_ep_set_maxpacket_limit(&uep->ep, 512);
+ uep->ep.caps.type_iso = true;
+ uep->ep.caps.type_bulk = true;
+ uep->ep.caps.type_int = true;
list_add_tail(&uep->ep.ep_list, &gpriv->gadget.ep_list);
}
+ uep->ep.caps.dir_in = true;
+ uep->ep.caps.dir_out = true;
}
ret = usb_add_gadget_udc(dev, &gpriv->gadget);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index ffd739e31bfc..eac7ccaa3c85 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -187,6 +187,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
{ USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
{ USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
+ { USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
{ USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 4c8b3b82103d..a5a0376bbd48 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -605,6 +605,10 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2WI_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX3_PID) },
/*
* ELV devices:
*/
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 792e054126de..67c6d4469730 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -568,6 +568,14 @@
*/
#define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */
+/*
+ * CustomWare / ShipModul NMEA multiplexers product ids (FTDI_VID)
+ */
+#define FTDI_CUSTOMWARE_MINIPLEX_PID 0xfd48 /* MiniPlex first generation NMEA Multiplexer */
+#define FTDI_CUSTOMWARE_MINIPLEX2_PID 0xfd49 /* MiniPlex-USB and MiniPlex-2 series */
+#define FTDI_CUSTOMWARE_MINIPLEX2WI_PID 0xfd4a /* MiniPlex-2Wi */
+#define FTDI_CUSTOMWARE_MINIPLEX3_PID 0xfd4b /* MiniPlex-3 series */
+
/********************************/
/** third-party VID/PID combos **/
@@ -1365,7 +1373,7 @@
#define FTDI_CTI_NANO_PID 0xF60B
/*
- * ZeitControl cardsystems GmbH rfid-readers http://zeitconrol.de
+ * ZeitControl cardsystems GmbH rfid-readers http://zeitcontrol.de
*/
/* TagTracer MIFARE*/
#define FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID 0xF7C0
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index ddbb8fe1046d..0ac1b10be1f7 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -71,6 +71,25 @@ struct product_info {
__u8 hardware_type; /* Type of hardware */
} __attribute__((packed));
+/*
+ * Edgeport firmware header
+ *
+ * "build_number" has been set to 0 in all three of the images I have
+ * seen, and Digi Tech Support suggests that it is safe to ignore it.
+ *
+ * "length" is the number of bytes of actual data following the header.
+ *
+ * "checksum" is the low order byte resulting from adding the values of
+ * all the data bytes.
+ */
+struct edgeport_fw_hdr {
+ u8 major_version;
+ u8 minor_version;
+ __le16 build_number;
+ __le16 length;
+ u8 checksum;
+} __packed;
+
struct edgeport_port {
__u16 uart_base;
__u16 dma_address;
@@ -101,6 +120,9 @@ struct edgeport_serial {
struct mutex es_lock;
int num_ports_open;
struct usb_serial *serial;
+ struct delayed_work heartbeat_work;
+ int fw_version;
+ bool use_heartbeat;
};
@@ -187,10 +209,6 @@ static const struct usb_device_id id_table_combined[] = {
MODULE_DEVICE_TABLE(usb, id_table_combined);
-static unsigned char OperationalMajorVersion;
-static unsigned char OperationalMinorVersion;
-static unsigned short OperationalBuildNumber;
-
static int closing_wait = EDGE_CLOSING_WAIT;
static bool ignore_cpu_rev;
static int default_uart_mode; /* RS232 */
@@ -209,6 +227,26 @@ static void edge_send(struct usb_serial_port *port, struct tty_struct *tty);
static int edge_create_sysfs_attrs(struct usb_serial_port *port);
static int edge_remove_sysfs_attrs(struct usb_serial_port *port);
+/*
+ * Some release of Edgeport firmware "down3.bin" after version 4.80
+ * introduced code to automatically disconnect idle devices on some
+ * Edgeport models after periods of inactivity, typically ~60 seconds.
+ * This occurs without regard to whether ports on the device are open
+ * or not. Digi International Tech Support suggested:
+ *
+ * 1. Adding driver "heartbeat" code to reset the firmware timer by
+ * requesting a descriptor record every 15 seconds, which should be
+ * effective with newer firmware versions that require it, and benign
+ * with older versions that do not. In practice 40 seconds seems often
+ * enough.
+ * 2. The heartbeat code is currently required only on Edgeport/416 models.
+ */
+#define FW_HEARTBEAT_VERSION_CUTOFF ((4 << 8) + 80)
+#define FW_HEARTBEAT_SECS 40
+
+/* Timeouts in msecs: firmware downloads take longer */
+#define TI_VSEND_TIMEOUT_DEFAULT 1000
+#define TI_VSEND_TIMEOUT_FW_DOWNLOAD 10000
static int ti_vread_sync(struct usb_device *dev, __u8 request,
__u16 value, __u16 index, u8 *data, int size)
@@ -228,14 +266,14 @@ static int ti_vread_sync(struct usb_device *dev, __u8 request,
return 0;
}
-static int ti_vsend_sync(struct usb_device *dev, __u8 request,
- __u16 value, __u16 index, u8 *data, int size)
+static int ti_vsend_sync(struct usb_device *dev, u8 request, u16 value,
+ u16 index, u8 *data, int size, int timeout)
{
int status;
status = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request,
(USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT),
- value, index, data, size, 1000);
+ value, index, data, size, timeout);
if (status < 0)
return status;
if (status != size) {
@@ -250,7 +288,8 @@ static int send_cmd(struct usb_device *dev, __u8 command,
__u8 moduleid, __u16 value, u8 *data,
int size)
{
- return ti_vsend_sync(dev, command, value, moduleid, data, size);
+ return ti_vsend_sync(dev, command, value, moduleid, data, size,
+ TI_VSEND_TIMEOUT_DEFAULT);
}
/* clear tx/rx buffers and fifo in TI UMP */
@@ -378,9 +417,9 @@ static int write_boot_mem(struct edgeport_serial *serial,
}
for (i = 0; i < length; ++i) {
- status = ti_vsend_sync(serial->serial->dev,
- UMPC_MEMORY_WRITE, buffer[i],
- (__u16)(i + start_address), NULL, 0);
+ status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
+ buffer[i], (u16)(i + start_address), NULL,
+ 0, TI_VSEND_TIMEOUT_DEFAULT);
if (status)
return status;
}
@@ -421,10 +460,9 @@ static int write_i2c_mem(struct edgeport_serial *serial,
* regardless of host byte order.
*/
be_start_address = swab16((u16)start_address);
- status = ti_vsend_sync(serial->serial->dev,
- UMPC_MEMORY_WRITE, (__u16)address_type,
- be_start_address,
- buffer, write_length);
+ status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
+ (u16)address_type, be_start_address,
+ buffer, write_length, TI_VSEND_TIMEOUT_DEFAULT);
if (status) {
dev_dbg(dev, "%s - ERROR %d\n", __func__, status);
return status;
@@ -454,9 +492,8 @@ static int write_i2c_mem(struct edgeport_serial *serial,
*/
be_start_address = swab16((u16)start_address);
status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
- (__u16)address_type,
- be_start_address,
- buffer, write_length);
+ (u16)address_type, be_start_address, buffer,
+ write_length, TI_VSEND_TIMEOUT_DEFAULT);
if (status) {
dev_err(dev, "%s - ERROR %d\n", __func__, status);
return status;
@@ -748,18 +785,17 @@ exit:
}
/* Build firmware header used for firmware update */
-static int build_i2c_fw_hdr(__u8 *header, struct device *dev)
+static int build_i2c_fw_hdr(u8 *header, struct device *dev,
+ const struct firmware *fw)
{
__u8 *buffer;
int buffer_size;
int i;
- int err;
__u8 cs = 0;
struct ti_i2c_desc *i2c_header;
struct ti_i2c_image_header *img_header;
struct ti_i2c_firmware_rec *firmware_rec;
- const struct firmware *fw;
- const char *fw_name = "edgeport/down3.bin";
+ struct edgeport_fw_hdr *fw_hdr = (struct edgeport_fw_hdr *)fw->data;
/* In order to update the I2C firmware we must change the type 2 record
* to type 0xF2. This will force the UMP to come up in Boot Mode.
@@ -782,24 +818,11 @@ static int build_i2c_fw_hdr(__u8 *header, struct device *dev)
// Set entire image of 0xffs
memset(buffer, 0xff, buffer_size);
- err = request_firmware(&fw, fw_name, dev);
- if (err) {
- dev_err(dev, "Failed to load image \"%s\" err %d\n",
- fw_name, err);
- kfree(buffer);
- return err;
- }
-
- /* Save Download Version Number */
- OperationalMajorVersion = fw->data[0];
- OperationalMinorVersion = fw->data[1];
- OperationalBuildNumber = fw->data[2] | (fw->data[3] << 8);
-
/* Copy version number into firmware record */
firmware_rec = (struct ti_i2c_firmware_rec *)buffer;
- firmware_rec->Ver_Major = OperationalMajorVersion;
- firmware_rec->Ver_Minor = OperationalMinorVersion;
+ firmware_rec->Ver_Major = fw_hdr->major_version;
+ firmware_rec->Ver_Minor = fw_hdr->minor_version;
/* Pointer to fw_down memory image */
img_header = (struct ti_i2c_image_header *)&fw->data[4];
@@ -808,8 +831,6 @@ static int build_i2c_fw_hdr(__u8 *header, struct device *dev)
&fw->data[4 + sizeof(struct ti_i2c_image_header)],
le16_to_cpu(img_header->Length));
- release_firmware(fw);
-
for (i=0; i < buffer_size; i++) {
cs = (__u8)(cs + buffer[i]);
}
@@ -823,8 +844,8 @@ static int build_i2c_fw_hdr(__u8 *header, struct device *dev)
i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK;
i2c_header->Size = cpu_to_le16(buffer_size);
i2c_header->CheckSum = cs;
- firmware_rec->Ver_Major = OperationalMajorVersion;
- firmware_rec->Ver_Minor = OperationalMinorVersion;
+ firmware_rec->Ver_Major = fw_hdr->major_version;
+ firmware_rec->Ver_Minor = fw_hdr->minor_version;
return 0;
}
@@ -925,13 +946,49 @@ static int ti_cpu_rev(struct edge_ti_manuf_descriptor *desc)
return TI_GET_CPU_REVISION(desc->CpuRev_BoardRev);
}
+static int check_fw_sanity(struct edgeport_serial *serial,
+ const struct firmware *fw)
+{
+ u16 length_total;
+ u8 checksum = 0;
+ int pos;
+ struct device *dev = &serial->serial->interface->dev;
+ struct edgeport_fw_hdr *fw_hdr = (struct edgeport_fw_hdr *)fw->data;
+
+ if (fw->size < sizeof(struct edgeport_fw_hdr)) {
+ dev_err(dev, "incomplete fw header\n");
+ return -EINVAL;
+ }
+
+ length_total = le16_to_cpu(fw_hdr->length) +
+ sizeof(struct edgeport_fw_hdr);
+
+ if (fw->size != length_total) {
+ dev_err(dev, "bad fw size (expected: %u, got: %zu)\n",
+ length_total, fw->size);
+ return -EINVAL;
+ }
+
+ for (pos = sizeof(struct edgeport_fw_hdr); pos < fw->size; ++pos)
+ checksum += fw->data[pos];
+
+ if (checksum != fw_hdr->checksum) {
+ dev_err(dev, "bad fw checksum (expected: 0x%x, got: 0x%x)\n",
+ fw_hdr->checksum, checksum);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/**
* DownloadTIFirmware - Download run-time operating firmware to the TI5052
*
* This routine downloads the main operating code into the TI5052, using the
* boot code already burned into E2PROM or ROM.
*/
-static int download_fw(struct edgeport_serial *serial)
+static int download_fw(struct edgeport_serial *serial,
+ const struct firmware *fw)
{
struct device *dev = &serial->serial->dev->dev;
int status = 0;
@@ -940,6 +997,14 @@ static int download_fw(struct edgeport_serial *serial)
struct usb_interface_descriptor *interface;
int download_cur_ver;
int download_new_ver;
+ struct edgeport_fw_hdr *fw_hdr = (struct edgeport_fw_hdr *)fw->data;
+
+ if (check_fw_sanity(serial, fw))
+ return -EINVAL;
+
+ /* If on-board version is newer, "fw_version" will be updated below. */
+ serial->fw_version = (fw_hdr->major_version << 8) +
+ fw_hdr->minor_version;
/* This routine is entered by both the BOOT mode and the Download mode
* We can determine which code is running by the reading the config
@@ -1047,14 +1112,13 @@ static int download_fw(struct edgeport_serial *serial)
version in I2c */
download_cur_ver = (firmware_version->Ver_Major << 8) +
(firmware_version->Ver_Minor);
- download_new_ver = (OperationalMajorVersion << 8) +
- (OperationalMinorVersion);
+ download_new_ver = (fw_hdr->major_version << 8) +
+ (fw_hdr->minor_version);
dev_dbg(dev, "%s - >> FW Versions Device %d.%d Driver %d.%d\n",
__func__, firmware_version->Ver_Major,
firmware_version->Ver_Minor,
- OperationalMajorVersion,
- OperationalMinorVersion);
+ fw_hdr->major_version, fw_hdr->minor_version);
/* Check if we have an old version in the I2C and
update if necessary */
@@ -1063,8 +1127,8 @@ static int download_fw(struct edgeport_serial *serial)
__func__,
firmware_version->Ver_Major,
firmware_version->Ver_Minor,
- OperationalMajorVersion,
- OperationalMinorVersion);
+ fw_hdr->major_version,
+ fw_hdr->minor_version);
record = kmalloc(1, GFP_KERNEL);
if (!record) {
@@ -1129,7 +1193,8 @@ static int download_fw(struct edgeport_serial *serial)
/* Reset UMP -- Back to BOOT MODE */
status = ti_vsend_sync(serial->serial->dev,
UMPC_HARDWARE_RESET,
- 0, 0, NULL, 0);
+ 0, 0, NULL, 0,
+ TI_VSEND_TIMEOUT_DEFAULT);
dev_dbg(dev, "%s - HARDWARE RESET return %d\n", __func__, status);
@@ -1139,6 +1204,9 @@ static int download_fw(struct edgeport_serial *serial)
kfree(rom_desc);
kfree(ti_manuf_desc);
return -ENODEV;
+ } else {
+ /* Same or newer fw version is already loaded */
+ serial->fw_version = download_cur_ver;
}
kfree(firmware_version);
}
@@ -1177,7 +1245,7 @@ static int download_fw(struct edgeport_serial *serial)
* UMP Ram to I2C and the firmware will update the
* record type from 0xf2 to 0x02.
*/
- status = build_i2c_fw_hdr(header, dev);
+ status = build_i2c_fw_hdr(header, dev, fw);
if (status) {
kfree(vheader);
kfree(header);
@@ -1229,7 +1297,9 @@ static int download_fw(struct edgeport_serial *serial)
/* Tell firmware to copy download image into I2C */
status = ti_vsend_sync(serial->serial->dev,
- UMPC_COPY_DNLD_TO_I2C, 0, 0, NULL, 0);
+ UMPC_COPY_DNLD_TO_I2C,
+ 0, 0, NULL, 0,
+ TI_VSEND_TIMEOUT_FW_DOWNLOAD);
dev_dbg(dev, "%s - Update complete 0x%x\n", __func__, status);
if (status) {
@@ -1278,9 +1348,6 @@ static int download_fw(struct edgeport_serial *serial)
__u8 cs = 0;
__u8 *buffer;
int buffer_size;
- int err;
- const struct firmware *fw;
- const char *fw_name = "edgeport/down3.bin";
/* Validate Hardware version number
* Read Manufacturing Descriptor from TI Based Edgeport
@@ -1328,16 +1395,7 @@ static int download_fw(struct edgeport_serial *serial)
/* Initialize the buffer to 0xff (pad the buffer) */
memset(buffer, 0xff, buffer_size);
-
- err = request_firmware(&fw, fw_name, dev);
- if (err) {
- dev_err(dev, "Failed to load image \"%s\" err %d\n",
- fw_name, err);
- kfree(buffer);
- return err;
- }
memcpy(buffer, &fw->data[4], fw->size - 4);
- release_firmware(fw);
for (i = sizeof(struct ti_i2c_image_header);
i < buffer_size; i++) {
@@ -1352,7 +1410,9 @@ static int download_fw(struct edgeport_serial *serial)
header->CheckSum = cs;
/* Download the operational code */
- dev_dbg(dev, "%s - Downloading operational code image (TI UMP)\n", __func__);
+ dev_dbg(dev, "%s - Downloading operational code image version %d.%d (TI UMP)\n",
+ __func__,
+ fw_hdr->major_version, fw_hdr->minor_version);
status = download_code(serial, buffer, buffer_size);
kfree(buffer);
@@ -2373,10 +2433,44 @@ static void edge_break(struct tty_struct *tty, int break_state)
__func__, status);
}
+static void edge_heartbeat_schedule(struct edgeport_serial *edge_serial)
+{
+ if (!edge_serial->use_heartbeat)
+ return;
+
+ schedule_delayed_work(&edge_serial->heartbeat_work,
+ FW_HEARTBEAT_SECS * HZ);
+}
+
+static void edge_heartbeat_work(struct work_struct *work)
+{
+ struct edgeport_serial *serial;
+ struct ti_i2c_desc *rom_desc;
+
+ serial = container_of(work, struct edgeport_serial,
+ heartbeat_work.work);
+
+ rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL);
+
+ /* Descriptor address request is enough to reset the firmware timer */
+ if (!rom_desc || !get_descriptor_addr(serial, I2C_DESC_TYPE_ION,
+ rom_desc)) {
+ dev_err(&serial->serial->interface->dev,
+ "%s - Incomplete heartbeat\n", __func__);
+ }
+ kfree(rom_desc);
+
+ edge_heartbeat_schedule(serial);
+}
+
static int edge_startup(struct usb_serial *serial)
{
struct edgeport_serial *edge_serial;
int status;
+ const struct firmware *fw;
+ const char *fw_name = "edgeport/down3.bin";
+ struct device *dev = &serial->interface->dev;
+ u16 product_id;
/* create our private serial structure */
edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL);
@@ -2387,12 +2481,35 @@ static int edge_startup(struct usb_serial *serial)
edge_serial->serial = serial;
usb_set_serial_data(serial, edge_serial);
- status = download_fw(edge_serial);
+ status = request_firmware(&fw, fw_name, dev);
if (status) {
+ dev_err(dev, "Failed to load image \"%s\" err %d\n",
+ fw_name, status);
kfree(edge_serial);
return status;
}
+ status = download_fw(edge_serial, fw);
+ release_firmware(fw);
+ if (status) {
+ kfree(edge_serial);
+ return status;
+ }
+
+ product_id = le16_to_cpu(
+ edge_serial->serial->dev->descriptor.idProduct);
+
+ /* Currently only the EP/416 models require heartbeat support */
+ if (edge_serial->fw_version > FW_HEARTBEAT_VERSION_CUTOFF) {
+ if (product_id == ION_DEVICE_ID_TI_EDGEPORT_416 ||
+ product_id == ION_DEVICE_ID_TI_EDGEPORT_416B) {
+ edge_serial->use_heartbeat = true;
+ }
+ }
+
+ INIT_DELAYED_WORK(&edge_serial->heartbeat_work, edge_heartbeat_work);
+ edge_heartbeat_schedule(edge_serial);
+
return 0;
}
@@ -2402,7 +2519,10 @@ static void edge_disconnect(struct usb_serial *serial)
static void edge_release(struct usb_serial *serial)
{
- kfree(usb_get_serial_data(serial));
+ struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
+
+ cancel_delayed_work_sync(&edge_serial->heartbeat_work);
+ kfree(edge_serial);
}
static int edge_port_probe(struct usb_serial_port *port)
@@ -2506,6 +2626,25 @@ static int edge_remove_sysfs_attrs(struct usb_serial_port *port)
return 0;
}
+#ifdef CONFIG_PM
+static int edge_suspend(struct usb_serial *serial, pm_message_t message)
+{
+ struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
+
+ cancel_delayed_work_sync(&edge_serial->heartbeat_work);
+
+ return 0;
+}
+
+static int edge_resume(struct usb_serial *serial)
+{
+ struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
+
+ edge_heartbeat_schedule(edge_serial);
+
+ return 0;
+}
+#endif
static struct usb_serial_driver edgeport_1port_device = {
.driver = {
@@ -2538,6 +2677,10 @@ static struct usb_serial_driver edgeport_1port_device = {
.read_int_callback = edge_interrupt_callback,
.read_bulk_callback = edge_bulk_in_callback,
.write_bulk_callback = edge_bulk_out_callback,
+#ifdef CONFIG_PM
+ .suspend = edge_suspend,
+ .resume = edge_resume,
+#endif
};
static struct usb_serial_driver edgeport_2port_device = {
@@ -2571,6 +2714,10 @@ static struct usb_serial_driver edgeport_2port_device = {
.read_int_callback = edge_interrupt_callback,
.read_bulk_callback = edge_bulk_in_callback,
.write_bulk_callback = edge_bulk_out_callback,
+#ifdef CONFIG_PM
+ .suspend = edge_suspend,
+ .resume = edge_resume,
+#endif
};
static struct usb_serial_driver * const serial_drivers[] = {
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 4f70df33975a..78b4f64c6b00 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -121,26 +121,26 @@ static DEFINE_SPINLOCK(release_lock);
static const unsigned int dummy; /* for clarity in register access fns */
enum mos_regs {
- THR, /* serial port regs */
- RHR,
- IER,
- FCR,
- ISR,
- LCR,
- MCR,
- LSR,
- MSR,
- SPR,
- DLL,
- DLM,
- DPR, /* parallel port regs */
- DSR,
- DCR,
- ECR,
- SP1_REG, /* device control regs */
- SP2_REG, /* serial port 2 (7720 only) */
- PP_REG,
- SP_CONTROL_REG,
+ MOS7720_THR, /* serial port regs */
+ MOS7720_RHR,
+ MOS7720_IER,
+ MOS7720_FCR,
+ MOS7720_ISR,
+ MOS7720_LCR,
+ MOS7720_MCR,
+ MOS7720_LSR,
+ MOS7720_MSR,
+ MOS7720_SPR,
+ MOS7720_DLL,
+ MOS7720_DLM,
+ MOS7720_DPR, /* parallel port regs */
+ MOS7720_DSR,
+ MOS7720_DCR,
+ MOS7720_ECR,
+ MOS7720_SP1_REG, /* device control regs */
+ MOS7720_SP2_REG, /* serial port 2 (7720 only) */
+ MOS7720_PP_REG,
+ MOS7720_SP_CONTROL_REG,
};
/*
@@ -150,26 +150,26 @@ enum mos_regs {
static inline __u16 get_reg_index(enum mos_regs reg)
{
static const __u16 mos7715_index_lookup_table[] = {
- 0x00, /* THR */
- 0x00, /* RHR */
- 0x01, /* IER */
- 0x02, /* FCR */
- 0x02, /* ISR */
- 0x03, /* LCR */
- 0x04, /* MCR */
- 0x05, /* LSR */
- 0x06, /* MSR */
- 0x07, /* SPR */
- 0x00, /* DLL */
- 0x01, /* DLM */
- 0x00, /* DPR */
- 0x01, /* DSR */
- 0x02, /* DCR */
- 0x0a, /* ECR */
- 0x01, /* SP1_REG */
- 0x02, /* SP2_REG (7720 only) */
- 0x04, /* PP_REG (7715 only) */
- 0x08, /* SP_CONTROL_REG */
+ 0x00, /* MOS7720_THR */
+ 0x00, /* MOS7720_RHR */
+ 0x01, /* MOS7720_IER */
+ 0x02, /* MOS7720_FCR */
+ 0x02, /* MOS7720_ISR */
+ 0x03, /* MOS7720_LCR */
+ 0x04, /* MOS7720_MCR */
+ 0x05, /* MOS7720_LSR */
+ 0x06, /* MOS7720_MSR */
+ 0x07, /* MOS7720_SPR */
+ 0x00, /* MOS7720_DLL */
+ 0x01, /* MOS7720_DLM */
+ 0x00, /* MOS7720_DPR */
+ 0x01, /* MOS7720_DSR */
+ 0x02, /* MOS7720_DCR */
+ 0x0a, /* MOS7720_ECR */
+ 0x01, /* MOS7720_SP1_REG */
+ 0x02, /* MOS7720_SP2_REG (7720 only) */
+ 0x04, /* MOS7720_PP_REG (7715 only) */
+ 0x08, /* MOS7720_SP_CONTROL_REG */
};
return mos7715_index_lookup_table[reg];
}
@@ -181,10 +181,10 @@ static inline __u16 get_reg_index(enum mos_regs reg)
static inline __u16 get_reg_value(enum mos_regs reg,
unsigned int serial_portnum)
{
- if (reg >= SP1_REG) /* control reg */
+ if (reg >= MOS7720_SP1_REG) /* control reg */
return 0x0000;
- else if (reg >= DPR) /* parallel port reg (7715 only) */
+ else if (reg >= MOS7720_DPR) /* parallel port reg (7715 only) */
return 0x0100;
else /* serial port reg */
@@ -252,7 +252,8 @@ static inline int mos7715_change_mode(struct mos7715_parport *mos_parport,
enum mos7715_pp_modes mode)
{
mos_parport->shadowECR = mode;
- write_mos_reg(mos_parport->serial, dummy, ECR, mos_parport->shadowECR);
+ write_mos_reg(mos_parport->serial, dummy, MOS7720_ECR,
+ mos_parport->shadowECR);
return 0;
}
@@ -486,7 +487,7 @@ static void parport_mos7715_write_data(struct parport *pp, unsigned char d)
if (parport_prologue(pp) < 0)
return;
mos7715_change_mode(mos_parport, SPP);
- write_mos_reg(mos_parport->serial, dummy, DPR, (__u8)d);
+ write_mos_reg(mos_parport->serial, dummy, MOS7720_DPR, (__u8)d);
parport_epilogue(pp);
}
@@ -497,7 +498,7 @@ static unsigned char parport_mos7715_read_data(struct parport *pp)
if (parport_prologue(pp) < 0)
return 0;
- read_mos_reg(mos_parport->serial, dummy, DPR, &d);
+ read_mos_reg(mos_parport->serial, dummy, MOS7720_DPR, &d);
parport_epilogue(pp);
return d;
}
@@ -510,7 +511,7 @@ static void parport_mos7715_write_control(struct parport *pp, unsigned char d)
if (parport_prologue(pp) < 0)
return;
data = ((__u8)d & 0x0f) | (mos_parport->shadowDCR & 0xf0);
- write_mos_reg(mos_parport->serial, dummy, DCR, data);
+ write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, data);
mos_parport->shadowDCR = data;
parport_epilogue(pp);
}
@@ -543,7 +544,8 @@ static unsigned char parport_mos7715_frob_control(struct parport *pp,
if (parport_prologue(pp) < 0)
return 0;
mos_parport->shadowDCR = (mos_parport->shadowDCR & (~mask)) ^ val;
- write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR);
+ write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
+ mos_parport->shadowDCR);
dcr = mos_parport->shadowDCR & 0x0f;
parport_epilogue(pp);
return dcr;
@@ -581,7 +583,8 @@ static void parport_mos7715_data_forward(struct parport *pp)
return;
mos7715_change_mode(mos_parport, PS2);
mos_parport->shadowDCR &= ~0x20;
- write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR);
+ write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
+ mos_parport->shadowDCR);
parport_epilogue(pp);
}
@@ -593,7 +596,8 @@ static void parport_mos7715_data_reverse(struct parport *pp)
return;
mos7715_change_mode(mos_parport, PS2);
mos_parport->shadowDCR |= 0x20;
- write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR);
+ write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
+ mos_parport->shadowDCR);
parport_epilogue(pp);
}
@@ -633,8 +637,10 @@ static void parport_mos7715_restore_state(struct parport *pp,
spin_unlock(&release_lock);
return;
}
- write_parport_reg_nonblock(mos_parport, DCR, mos_parport->shadowDCR);
- write_parport_reg_nonblock(mos_parport, ECR, mos_parport->shadowECR);
+ write_parport_reg_nonblock(mos_parport, MOS7720_DCR,
+ mos_parport->shadowDCR);
+ write_parport_reg_nonblock(mos_parport, MOS7720_ECR,
+ mos_parport->shadowECR);
spin_unlock(&release_lock);
}
@@ -714,14 +720,16 @@ static int mos7715_parport_init(struct usb_serial *serial)
init_completion(&mos_parport->syncmsg_compl);
/* cycle parallel port reset bit */
- write_mos_reg(mos_parport->serial, dummy, PP_REG, (__u8)0x80);
- write_mos_reg(mos_parport->serial, dummy, PP_REG, (__u8)0x00);
+ write_mos_reg(mos_parport->serial, dummy, MOS7720_PP_REG, (__u8)0x80);
+ write_mos_reg(mos_parport->serial, dummy, MOS7720_PP_REG, (__u8)0x00);
/* initialize device registers */
mos_parport->shadowDCR = DCR_INIT_VAL;
- write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR);
+ write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
+ mos_parport->shadowDCR);
mos_parport->shadowECR = ECR_INIT_VAL;
- write_mos_reg(mos_parport->serial, dummy, ECR, mos_parport->shadowECR);
+ write_mos_reg(mos_parport->serial, dummy, MOS7720_ECR,
+ mos_parport->shadowECR);
/* register with parport core */
mos_parport->pp = parport_register_port(0, PARPORT_IRQ_NONE,
@@ -1033,45 +1041,49 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
/* Initialize MCS7720 -- Write Init values to corresponding Registers
*
* Register Index
- * 0 : THR/RHR
- * 1 : IER
- * 2 : FCR
- * 3 : LCR
- * 4 : MCR
- * 5 : LSR
- * 6 : MSR
- * 7 : SPR
+ * 0 : MOS7720_THR/MOS7720_RHR
+ * 1 : MOS7720_IER
+ * 2 : MOS7720_FCR
+ * 3 : MOS7720_LCR
+ * 4 : MOS7720_MCR
+ * 5 : MOS7720_LSR
+ * 6 : MOS7720_MSR
+ * 7 : MOS7720_SPR
*
* 0x08 : SP1/2 Control Reg
*/
port_number = port->port_number;
- read_mos_reg(serial, port_number, LSR, &data);
+ read_mos_reg(serial, port_number, MOS7720_LSR, &data);
dev_dbg(&port->dev, "SS::%p LSR:%x\n", mos7720_port, data);
- write_mos_reg(serial, dummy, SP1_REG, 0x02);
- write_mos_reg(serial, dummy, SP2_REG, 0x02);
+ write_mos_reg(serial, dummy, MOS7720_SP1_REG, 0x02);
+ write_mos_reg(serial, dummy, MOS7720_SP2_REG, 0x02);
- write_mos_reg(serial, port_number, IER, 0x00);
- write_mos_reg(serial, port_number, FCR, 0x00);
+ write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
+ write_mos_reg(serial, port_number, MOS7720_FCR, 0x00);
- write_mos_reg(serial, port_number, FCR, 0xcf);
+ write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf);
mos7720_port->shadowLCR = 0x03;
- write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
+ write_mos_reg(serial, port_number, MOS7720_LCR,
+ mos7720_port->shadowLCR);
mos7720_port->shadowMCR = 0x0b;
- write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
+ write_mos_reg(serial, port_number, MOS7720_MCR,
+ mos7720_port->shadowMCR);
- write_mos_reg(serial, port_number, SP_CONTROL_REG, 0x00);
- read_mos_reg(serial, dummy, SP_CONTROL_REG, &data);
+ write_mos_reg(serial, port_number, MOS7720_SP_CONTROL_REG, 0x00);
+ read_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, &data);
data = data | (port->port_number + 1);
- write_mos_reg(serial, dummy, SP_CONTROL_REG, data);
+ write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, data);
mos7720_port->shadowLCR = 0x83;
- write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
- write_mos_reg(serial, port_number, THR, 0x0c);
- write_mos_reg(serial, port_number, IER, 0x00);
+ write_mos_reg(serial, port_number, MOS7720_LCR,
+ mos7720_port->shadowLCR);
+ write_mos_reg(serial, port_number, MOS7720_THR, 0x0c);
+ write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
mos7720_port->shadowLCR = 0x03;
- write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
- write_mos_reg(serial, port_number, IER, 0x0c);
+ write_mos_reg(serial, port_number, MOS7720_LCR,
+ mos7720_port->shadowLCR);
+ write_mos_reg(serial, port_number, MOS7720_IER, 0x0c);
response = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (response)
@@ -1144,8 +1156,8 @@ static void mos7720_close(struct usb_serial_port *port)
usb_kill_urb(port->write_urb);
usb_kill_urb(port->read_urb);
- write_mos_reg(serial, port->port_number, MCR, 0x00);
- write_mos_reg(serial, port->port_number, IER, 0x00);
+ write_mos_reg(serial, port->port_number, MOS7720_MCR, 0x00);
+ write_mos_reg(serial, port->port_number, MOS7720_IER, 0x00);
mos7720_port->open = 0;
}
@@ -1169,7 +1181,8 @@ static void mos7720_break(struct tty_struct *tty, int break_state)
data = mos7720_port->shadowLCR & ~UART_LCR_SBC;
mos7720_port->shadowLCR = data;
- write_mos_reg(serial, port->port_number, LCR, mos7720_port->shadowLCR);
+ write_mos_reg(serial, port->port_number, MOS7720_LCR,
+ mos7720_port->shadowLCR);
}
/*
@@ -1297,7 +1310,7 @@ static void mos7720_throttle(struct tty_struct *tty)
/* if we are implementing RTS/CTS, toggle that line */
if (tty->termios.c_cflag & CRTSCTS) {
mos7720_port->shadowMCR &= ~UART_MCR_RTS;
- write_mos_reg(port->serial, port->port_number, MCR,
+ write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
mos7720_port->shadowMCR);
}
}
@@ -1327,7 +1340,7 @@ static void mos7720_unthrottle(struct tty_struct *tty)
/* if we are implementing RTS/CTS, toggle that line */
if (tty->termios.c_cflag & CRTSCTS) {
mos7720_port->shadowMCR |= UART_MCR_RTS;
- write_mos_reg(port->serial, port->port_number, MCR,
+ write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
mos7720_port->shadowMCR);
}
}
@@ -1352,35 +1365,39 @@ static int set_higher_rates(struct moschip_port *mos7720_port,
dev_dbg(&port->dev, "Sending Setting Commands ..........\n");
port_number = port->port_number;
- write_mos_reg(serial, port_number, IER, 0x00);
- write_mos_reg(serial, port_number, FCR, 0x00);
- write_mos_reg(serial, port_number, FCR, 0xcf);
+ write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
+ write_mos_reg(serial, port_number, MOS7720_FCR, 0x00);
+ write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf);
mos7720_port->shadowMCR = 0x0b;
- write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
- write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x00);
+ write_mos_reg(serial, port_number, MOS7720_MCR,
+ mos7720_port->shadowMCR);
+ write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, 0x00);
/***********************************************
* Set for higher rates *
***********************************************/
/* writing baud rate verbatum into uart clock field clearly not right */
if (port_number == 0)
- sp_reg = SP1_REG;
+ sp_reg = MOS7720_SP1_REG;
else
- sp_reg = SP2_REG;
+ sp_reg = MOS7720_SP2_REG;
write_mos_reg(serial, dummy, sp_reg, baud * 0x10);
- write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x03);
+ write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, 0x03);
mos7720_port->shadowMCR = 0x2b;
- write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
+ write_mos_reg(serial, port_number, MOS7720_MCR,
+ mos7720_port->shadowMCR);
/***********************************************
* Set DLL/DLM
***********************************************/
mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB;
- write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
- write_mos_reg(serial, port_number, DLL, 0x01);
- write_mos_reg(serial, port_number, DLM, 0x00);
+ write_mos_reg(serial, port_number, MOS7720_LCR,
+ mos7720_port->shadowLCR);
+ write_mos_reg(serial, port_number, MOS7720_DLL, 0x01);
+ write_mos_reg(serial, port_number, MOS7720_DLM, 0x00);
mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB;
- write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
+ write_mos_reg(serial, port_number, MOS7720_LCR,
+ mos7720_port->shadowLCR);
return 0;
}
@@ -1488,15 +1505,16 @@ static int send_cmd_write_baud_rate(struct moschip_port *mos7720_port,
/* Enable access to divisor latch */
mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB;
- write_mos_reg(serial, number, LCR, mos7720_port->shadowLCR);
+ write_mos_reg(serial, number, MOS7720_LCR, mos7720_port->shadowLCR);
/* Write the divisor */
- write_mos_reg(serial, number, DLL, (__u8)(divisor & 0xff));
- write_mos_reg(serial, number, DLM, (__u8)((divisor & 0xff00) >> 8));
+ write_mos_reg(serial, number, MOS7720_DLL, (__u8)(divisor & 0xff));
+ write_mos_reg(serial, number, MOS7720_DLM,
+ (__u8)((divisor & 0xff00) >> 8));
/* Disable access to divisor latch */
mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB;
- write_mos_reg(serial, number, LCR, mos7720_port->shadowLCR);
+ write_mos_reg(serial, number, MOS7720_LCR, mos7720_port->shadowLCR);
return status;
}
@@ -1600,14 +1618,16 @@ static void change_port_settings(struct tty_struct *tty,
/* Disable Interrupts */
- write_mos_reg(serial, port_number, IER, 0x00);
- write_mos_reg(serial, port_number, FCR, 0x00);
- write_mos_reg(serial, port_number, FCR, 0xcf);
+ write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
+ write_mos_reg(serial, port_number, MOS7720_FCR, 0x00);
+ write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf);
/* Send the updated LCR value to the mos7720 */
- write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
+ write_mos_reg(serial, port_number, MOS7720_LCR,
+ mos7720_port->shadowLCR);
mos7720_port->shadowMCR = 0x0b;
- write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
+ write_mos_reg(serial, port_number, MOS7720_MCR,
+ mos7720_port->shadowMCR);
/* set up the MCR register and send it to the mos7720 */
mos7720_port->shadowMCR = UART_MCR_OUT2;
@@ -1619,14 +1639,17 @@ static void change_port_settings(struct tty_struct *tty,
/* To set hardware flow control to the specified *
* serial port, in SP1/2_CONTROL_REG */
if (port_number)
- write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01);
+ write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG,
+ 0x01);
else
- write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02);
+ write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG,
+ 0x02);
} else
mos7720_port->shadowMCR &= ~(UART_MCR_XONANY);
- write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
+ write_mos_reg(serial, port_number, MOS7720_MCR,
+ mos7720_port->shadowMCR);
/* Determine divisor based on baud rate */
baud = tty_get_baud_rate(tty);
@@ -1639,7 +1662,7 @@ static void change_port_settings(struct tty_struct *tty,
if (baud >= 230400) {
set_higher_rates(mos7720_port, baud);
/* Enable Interrupts */
- write_mos_reg(serial, port_number, IER, 0x0c);
+ write_mos_reg(serial, port_number, MOS7720_IER, 0x0c);
return;
}
@@ -1650,7 +1673,7 @@ static void change_port_settings(struct tty_struct *tty,
if (cflag & CBAUD)
tty_encode_baud_rate(tty, baud, baud);
/* Enable Interrupts */
- write_mos_reg(serial, port_number, IER, 0x0c);
+ write_mos_reg(serial, port_number, MOS7720_IER, 0x0c);
if (port->read_urb->status != -EINPROGRESS) {
status = usb_submit_urb(port->read_urb, GFP_KERNEL);
@@ -1725,7 +1748,7 @@ static int get_lsr_info(struct tty_struct *tty,
count = mos7720_chars_in_buffer(tty);
if (count == 0) {
- read_mos_reg(port->serial, port_number, LSR, &data);
+ read_mos_reg(port->serial, port_number, MOS7720_LSR, &data);
if ((data & (UART_LSR_TEMT | UART_LSR_THRE))
== (UART_LSR_TEMT | UART_LSR_THRE)) {
dev_dbg(&port->dev, "%s -- Empty\n", __func__);
@@ -1782,7 +1805,7 @@ static int mos7720_tiocmset(struct tty_struct *tty,
mcr &= ~UART_MCR_LOOP;
mos7720_port->shadowMCR = mcr;
- write_mos_reg(port->serial, port->port_number, MCR,
+ write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
mos7720_port->shadowMCR);
return 0;
@@ -1827,7 +1850,7 @@ static int set_modem_info(struct moschip_port *mos7720_port, unsigned int cmd,
}
mos7720_port->shadowMCR = mcr;
- write_mos_reg(port->serial, port->port_number, MCR,
+ write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
mos7720_port->shadowMCR);
return 0;
@@ -1942,7 +1965,7 @@ static int mos7720_startup(struct usb_serial *serial)
}
#endif
/* LSR For Port 1 */
- read_mos_reg(serial, 0, LSR, &data);
+ read_mos_reg(serial, 0, MOS7720_LSR, &data);
dev_dbg(&dev->dev, "LSR:%x\n", data);
return 0;
diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
index 460a40669967..31a8b47f1ac6 100644
--- a/drivers/usb/serial/mxuport.c
+++ b/drivers/usb/serial/mxuport.c
@@ -1137,13 +1137,9 @@ static int mxuport_port_probe(struct usb_serial_port *port)
return err;
/* Set interface (RS-232) */
- err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_INTERFACE,
- MX_INT_RS232,
- port->port_number);
- if (err)
- return err;
-
- return 0;
+ return mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_INTERFACE,
+ MX_INT_RS232,
+ port->port_number);
}
static int mxuport_alloc_write_urb(struct usb_serial *serial,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index f0c0c53359ad..6d1941a2396a 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1099,6 +1099,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
{ USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
.driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
+ { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff),
+ .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1765,6 +1767,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
{ } /* Terminating entry */
@@ -1938,7 +1941,7 @@ static void option_instat_callback(struct urb *urb)
} else if (status == -ENOENT || status == -ESHUTDOWN) {
dev_dbg(dev, "%s: urb stopped: %d\n", __func__, status);
} else
- dev_err(dev, "%s: error %d\n", __func__, status);
+ dev_dbg(dev, "%s: error %d\n", __func__, status);
/* Resubmit urb so we continue receiving IRQ data */
if (status != -ESHUTDOWN && status != -ENOENT) {
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index f5257af33ecf..ae682e4eeaef 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -362,21 +362,38 @@ static speed_t pl2303_encode_baud_rate_direct(unsigned char buf[4],
static speed_t pl2303_encode_baud_rate_divisor(unsigned char buf[4],
speed_t baud)
{
- unsigned int tmp;
+ unsigned int baseline, mantissa, exponent;
/*
* Apparently the formula is:
- * baudrate = 12M * 32 / (2^buf[1]) / buf[0]
+ * baudrate = 12M * 32 / (mantissa * 4^exponent)
+ * where
+ * mantissa = buf[8:0]
+ * exponent = buf[11:9]
*/
- tmp = 12000000 * 32 / baud;
+ baseline = 12000000 * 32;
+ mantissa = baseline / baud;
+ if (mantissa == 0)
+ mantissa = 1; /* Avoid dividing by zero if baud > 32*12M. */
+ exponent = 0;
+ while (mantissa >= 512) {
+ if (exponent < 7) {
+ mantissa >>= 2; /* divide by 4 */
+ exponent++;
+ } else {
+ /* Exponent is maxed. Trim mantissa and leave. */
+ mantissa = 511;
+ break;
+ }
+ }
+
buf[3] = 0x80;
buf[2] = 0;
- buf[1] = (tmp >= 256);
- while (tmp >= 256) {
- tmp >>= 2;
- buf[1] <<= 1;
- }
- buf[0] = tmp;
+ buf[1] = exponent << 1 | mantissa >> 8;
+ buf[0] = mantissa & 0xff;
+
+ /* Calculate and return the exact baud rate. */
+ baud = (baseline / mantissa) >> (exponent << 1);
return baud;
}
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 9c63897b3a56..ebcec8cda858 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -139,13 +139,13 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x0AF0, 0x8120)}, /* Option GTM681W */
/* non-Gobi Sierra Wireless devices */
+ {DEVICE_SWI(0x03f0, 0x4e1d)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{DEVICE_SWI(0x0f3d, 0x68a2)}, /* Sierra Wireless MC7700 */
{DEVICE_SWI(0x114f, 0x68a2)}, /* Sierra Wireless MC7750 */
{DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */
{DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */
{DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */
{DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */
- {DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */
{DEVICE_SWI(0x1199, 0x9051)}, /* Netgear AirCard 340U */
{DEVICE_SWI(0x1199, 0x9053)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9054)}, /* Sierra Wireless Modem */
@@ -158,6 +158,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+ {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
/* Huawei devices */
{DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 46179a0828eb..07d1ecd564f7 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
+ { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */
/* AT&T Direct IP LTE modems */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 8fceec7298e0..37f3ad15ed06 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -60,17 +60,15 @@ static void symbol_int_callback(struct urb *urb)
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
+ /*
+ * Data from the device comes with a 1 byte header:
+ *
+ * <size of data> <data>...
+ */
if (urb->actual_length > 1) {
- data_length = urb->actual_length - 1;
-
- /*
- * Data from the device comes with a 1 byte header:
- *
- * <size of data>data...
- * This is real data to be sent to the tty layer
- * we pretty much just ignore the size and send everything
- * else to the tty layer.
- */
+ data_length = data[0];
+ if (data_length > (urb->actual_length - 1))
+ data_length = urb->actual_length - 1;
tty_insert_flip_string(&port->port, &data[1], data_length);
tty_flip_buffer_push(&port->port);
} else {
@@ -94,7 +92,7 @@ exit:
static int symbol_open(struct tty_struct *tty, struct usb_serial_port *port)
{
- struct symbol_private *priv = usb_get_serial_data(port->serial);
+ struct symbol_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
int result = 0;
@@ -120,7 +118,7 @@ static void symbol_close(struct usb_serial_port *port)
static void symbol_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- struct symbol_private *priv = usb_get_serial_data(port->serial);
+ struct symbol_private *priv = usb_get_serial_port_data(port);
spin_lock_irq(&priv->lock);
priv->throttled = true;
@@ -130,7 +128,7 @@ static void symbol_throttle(struct tty_struct *tty)
static void symbol_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- struct symbol_private *priv = usb_get_serial_data(port->serial);
+ struct symbol_private *priv = usb_get_serial_port_data(port);
int result;
bool was_throttled;
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 529066bbc7e8..46f1f13b41f1 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1306,6 +1306,7 @@ static void __exit usb_serial_exit(void)
tty_unregister_driver(usb_serial_tty_driver);
put_tty_driver(usb_serial_tty_driver);
bus_unregister(&usb_serial_bus_type);
+ idr_destroy(&serial_minors);
}
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 2f805cb386a5..825305cb71d9 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -282,7 +282,7 @@ static void usb_wwan_indat_callback(struct urb *urb)
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err) {
- if (err != -EPERM) {
+ if (err != -EPERM && err != -ENODEV) {
dev_err(dev, "%s: resubmit read urb failed. (%d)\n",
__func__, err);
/* busy also in error unless we are killed */
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 540add24a12f..5e67f63b2e46 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -1111,7 +1111,7 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
* command phase and the data phase. Some devices need a little
* more than that, probably because of clock rate inaccuracies. */
if (unlikely(us->fflags & US_FL_GO_SLOW))
- udelay(125);
+ usleep_range(125, 150);
if (transfer_length) {
unsigned int pipe = srb->sc_data_direction == DMA_FROM_DEVICE ?
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index caf188800c67..6b2479123de7 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2065,6 +2065,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_READ_DISC_INFO ),
+/* Reported by Oliver Neukum <oneukum@suse.com>
+ * This device morphes spontaneously into another device if the access
+ * pattern of Windows isn't followed. Thus writable media would be dirty
+ * if the initial instance is used. So the device is limited to its
+ * virtual CD.
+ * And yes, the concept that BCD goes up to 9 is not heeded */
+UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff,
+ "ZTE,Incorporated",
+ "ZTE WCDMA Technologies MSM",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_SINGLE_LUN ),
+
/* Reported by Sven Geggus <sven-usbst@geggus.net>
* This encrypted pen drive returns bogus data for the initial READ(10).
*/
@@ -2074,6 +2086,17 @@ UNUSUAL_DEV( 0x1b1c, 0x1ab5, 0x0200, 0x0200,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_INITIAL_READ10 ),
+/* Reported by Hans de Goede <hdegoede@redhat.com>
+ * These are mini projectors using USB for both power and video data transport
+ * The usb-storage interface is a virtual windows driver CD, which the gm12u320
+ * driver automatically converts into framebuffer & kms dri device nodes.
+ */
+UNUSUAL_DEV( 0x1de1, 0xc102, 0x0000, 0xffff,
+ "Grain-media Technology Corp.",
+ "USB3.0 Device GM12U320",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_DEVICE ),
+
/* Patch by Richard Schütz <r.schtz@t-online.de>
* This external hard drive enclosure uses a JMicron chip which
* needs the US_FL_IGNORE_RESIDUE flag to work properly. */
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 2fb29dfeffbd..563c510f285c 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -689,6 +689,23 @@ struct vfio_device *vfio_device_get_from_dev(struct device *dev)
}
EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
+static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
+ char *buf)
+{
+ struct vfio_device *device;
+
+ mutex_lock(&group->device_lock);
+ list_for_each_entry(device, &group->device_list, group_next) {
+ if (!strcmp(dev_name(device->dev), buf)) {
+ vfio_device_get(device);
+ break;
+ }
+ }
+ mutex_unlock(&group->device_lock);
+
+ return device;
+}
+
/*
* Caller must hold a reference to the vfio_device
*/
@@ -1198,53 +1215,53 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
{
struct vfio_device *device;
struct file *filep;
- int ret = -ENODEV;
+ int ret;
if (0 == atomic_read(&group->container_users) ||
!group->container->iommu_driver || !vfio_group_viable(group))
return -EINVAL;
- mutex_lock(&group->device_lock);
- list_for_each_entry(device, &group->device_list, group_next) {
- if (strcmp(dev_name(device->dev), buf))
- continue;
+ device = vfio_device_get_from_name(group, buf);
+ if (!device)
+ return -ENODEV;
- ret = device->ops->open(device->device_data);
- if (ret)
- break;
- /*
- * We can't use anon_inode_getfd() because we need to modify
- * the f_mode flags directly to allow more than just ioctls
- */
- ret = get_unused_fd_flags(O_CLOEXEC);
- if (ret < 0) {
- device->ops->release(device->device_data);
- break;
- }
+ ret = device->ops->open(device->device_data);
+ if (ret) {
+ vfio_device_put(device);
+ return ret;
+ }
- filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
- device, O_RDWR);
- if (IS_ERR(filep)) {
- put_unused_fd(ret);
- ret = PTR_ERR(filep);
- device->ops->release(device->device_data);
- break;
- }
+ /*
+ * We can't use anon_inode_getfd() because we need to modify
+ * the f_mode flags directly to allow more than just ioctls
+ */
+ ret = get_unused_fd_flags(O_CLOEXEC);
+ if (ret < 0) {
+ device->ops->release(device->device_data);
+ vfio_device_put(device);
+ return ret;
+ }
- /*
- * TODO: add an anon_inode interface to do this.
- * Appears to be missing by lack of need rather than
- * explicitly prevented. Now there's need.
- */
- filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
+ device, O_RDWR);
+ if (IS_ERR(filep)) {
+ put_unused_fd(ret);
+ ret = PTR_ERR(filep);
+ device->ops->release(device->device_data);
+ vfio_device_put(device);
+ return ret;
+ }
+
+ /*
+ * TODO: add an anon_inode interface to do this.
+ * Appears to be missing by lack of need rather than
+ * explicitly prevented. Now there's need.
+ */
+ filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
- vfio_device_get(device);
- atomic_inc(&group->container_users);
+ atomic_inc(&group->container_users);
- fd_install(ret, filep);
- break;
- }
- mutex_unlock(&group->device_lock);
+ fd_install(ret, filep);
return ret;
}
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index dfcc02c93648..f114a9dbb48f 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1573,9 +1573,9 @@ static int __init vhost_scsi_register(void)
return misc_register(&vhost_scsi_misc);
}
-static int vhost_scsi_deregister(void)
+static void vhost_scsi_deregister(void)
{
- return misc_deregister(&vhost_scsi_misc);
+ misc_deregister(&vhost_scsi_misc);
}
static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9e8e004bb1c3..eec2f11809ff 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -22,14 +22,20 @@
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/kthread.h>
#include <linux/cgroup.h>
#include <linux/module.h>
+#include <linux/sort.h>
#include "vhost.h"
+static ushort max_mem_regions = 64;
+module_param(max_mem_regions, ushort, 0444);
+MODULE_PARM_DESC(max_mem_regions,
+ "Maximum number of memory regions in memory map. (default: 64)");
+
enum {
- VHOST_MEMORY_MAX_NREGIONS = 64,
VHOST_MEMORY_F_LOG = 0x1,
};
@@ -543,7 +549,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
fput(dev->log_file);
dev->log_file = NULL;
/* No one will access memory at this point */
- kfree(dev->memory);
+ kvfree(dev->memory);
dev->memory = NULL;
WARN_ON(!list_empty(&dev->work_list));
if (dev->worker) {
@@ -663,6 +669,25 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq)
}
EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
+static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2)
+{
+ const struct vhost_memory_region *r1 = p1, *r2 = p2;
+ if (r1->guest_phys_addr < r2->guest_phys_addr)
+ return 1;
+ if (r1->guest_phys_addr > r2->guest_phys_addr)
+ return -1;
+ return 0;
+}
+
+static void *vhost_kvzalloc(unsigned long size)
+{
+ void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+
+ if (!n)
+ n = vzalloc(size);
+ return n;
+}
+
static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{
struct vhost_memory mem, *newmem, *oldmem;
@@ -673,21 +698,23 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
return -EFAULT;
if (mem.padding)
return -EOPNOTSUPP;
- if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
+ if (mem.nregions > max_mem_regions)
return -E2BIG;
- newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
+ newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions));
if (!newmem)
return -ENOMEM;
memcpy(newmem, &mem, size);
if (copy_from_user(newmem->regions, m->regions,
mem.nregions * sizeof *m->regions)) {
- kfree(newmem);
+ kvfree(newmem);
return -EFAULT;
}
+ sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions),
+ vhost_memory_reg_sort_cmp, NULL);
if (!memory_access_ok(d, newmem, 0)) {
- kfree(newmem);
+ kvfree(newmem);
return -EFAULT;
}
oldmem = d->memory;
@@ -699,7 +726,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
d->vqs[i]->memory = newmem;
mutex_unlock(&d->vqs[i]->mutex);
}
- kfree(oldmem);
+ kvfree(oldmem);
return 0;
}
@@ -965,6 +992,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
}
if (eventfp != d->log_file) {
filep = d->log_file;
+ d->log_file = eventfp;
ctx = d->log_ctx;
d->log_ctx = eventfp ?
eventfd_ctx_fileget(eventfp) : NULL;
@@ -992,17 +1020,22 @@ EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
__u64 addr, __u32 len)
{
- struct vhost_memory_region *reg;
- int i;
+ const struct vhost_memory_region *reg;
+ int start = 0, end = mem->nregions;
- /* linear search is not brilliant, but we really have on the order of 6
- * regions in practice */
- for (i = 0; i < mem->nregions; ++i) {
- reg = mem->regions + i;
- if (reg->guest_phys_addr <= addr &&
- reg->guest_phys_addr + reg->memory_size - 1 >= addr)
- return reg;
+ while (start < end) {
+ int slot = start + (end - start) / 2;
+ reg = mem->regions + slot;
+ if (addr >= reg->guest_phys_addr)
+ end = slot;
+ else
+ start = slot + 1;
}
+
+ reg = mem->regions + start;
+ if (addr >= reg->guest_phys_addr &&
+ reg->guest_phys_addr + reg->memory_size > addr)
+ return reg;
return NULL;
}
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 8bf495ffb020..e0606c01e8ac 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -22,9 +22,7 @@ source "drivers/gpu/vga/Kconfig"
source "drivers/gpu/host1x/Kconfig"
source "drivers/gpu/ipu-v3/Kconfig"
-menu "Direct Rendering Manager"
source "drivers/gpu/drm/Kconfig"
-endmenu
menu "Frame buffer Devices"
source "drivers/video/fbdev/Kconfig"
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 0505b796d743..5ffa4b4e26c0 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -299,6 +299,13 @@ config BACKLIGHT_TOSA
If you have an Sharp SL-6000 Zaurus say Y to enable a driver
for its backlight
+config BACKLIGHT_PM8941_WLED
+ tristate "Qualcomm PM8941 WLED Driver"
+ select REGMAP
+ help
+ If you have the Qualcomm PM8941, say Y to enable a driver for the
+ WLED block.
+
config BACKLIGHT_SAHARA
tristate "Tabletkiosk Sahara Touch-iT Backlight Driver"
depends on X86
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index d67073f9d421..16ec534cff30 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
obj-$(CONFIG_BACKLIGHT_OT200) += ot200_bl.o
obj-$(CONFIG_BACKLIGHT_PANDORA) += pandora_bl.o
obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
+obj-$(CONFIG_BACKLIGHT_PM8941_WLED) += pm8941-wled.o
obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
obj-$(CONFIG_BACKLIGHT_SKY81452) += sky81452-backlight.o
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index 88116b493f3b..f88df9ec08d0 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -73,6 +73,7 @@ struct lp855x {
struct device *dev;
struct lp855x_platform_data *pdata;
struct pwm_device *pwm;
+ struct regulator *supply; /* regulator for VDD input */
};
static int lp855x_write_byte(struct lp855x *lp, u8 reg, u8 data)
@@ -378,13 +379,6 @@ static int lp855x_parse_dt(struct lp855x *lp)
pdata->rom_data = &rom[0];
}
- pdata->supply = devm_regulator_get(dev, "power");
- if (IS_ERR(pdata->supply)) {
- if (PTR_ERR(pdata->supply) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- pdata->supply = NULL;
- }
-
lp->pdata = pdata;
return 0;
@@ -425,8 +419,15 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
else
lp->mode = REGISTER_BASED;
- if (lp->pdata->supply) {
- ret = regulator_enable(lp->pdata->supply);
+ lp->supply = devm_regulator_get(lp->dev, "power");
+ if (IS_ERR(lp->supply)) {
+ if (PTR_ERR(lp->supply) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ lp->supply = NULL;
+ }
+
+ if (lp->supply) {
+ ret = regulator_enable(lp->supply);
if (ret < 0) {
dev_err(&cl->dev, "failed to enable supply: %d\n", ret);
return ret;
@@ -464,8 +465,8 @@ static int lp855x_remove(struct i2c_client *cl)
lp->bl->props.brightness = 0;
backlight_update_status(lp->bl);
- if (lp->pdata->supply)
- regulator_disable(lp->pdata->supply);
+ if (lp->supply)
+ regulator_disable(lp->supply);
sysfs_remove_group(&lp->dev->kobj, &lp855x_attr_group);
return 0;
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
index e418d5b1aa55..5d583d7a517b 100644
--- a/drivers/video/backlight/lp8788_bl.c
+++ b/drivers/video/backlight/lp8788_bl.c
@@ -221,8 +221,7 @@ static void lp8788_backlight_unregister(struct lp8788_bl *bl)
{
struct backlight_device *bl_dev = bl->bl_dev;
- if (bl_dev)
- backlight_device_unregister(bl_dev);
+ backlight_device_unregister(bl_dev);
}
static ssize_t lp8788_get_bl_ctl_mode(struct device *dev,
diff --git a/drivers/leds/leds-pm8941-wled.c b/drivers/video/backlight/pm8941-wled.c
index bf64a593fbf1..c704c3236034 100644
--- a/drivers/leds/leds-pm8941-wled.c
+++ b/drivers/video/backlight/pm8941-wled.c
@@ -11,7 +11,7 @@
*/
#include <linux/kernel.h>
-#include <linux/leds.h>
+#include <linux/backlight.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -76,30 +76,29 @@ struct pm8941_wled_config {
};
struct pm8941_wled {
+ const char *name;
struct regmap *regmap;
u16 addr;
- struct led_classdev cdev;
-
struct pm8941_wled_config cfg;
};
-static int pm8941_wled_set(struct led_classdev *cdev,
- enum led_brightness value)
+static int pm8941_wled_update_status(struct backlight_device *bl)
{
- struct pm8941_wled *wled;
+ struct pm8941_wled *wled = bl_get_data(bl);
+ u16 val = bl->props.brightness;
u8 ctrl = 0;
- u16 val;
int rc;
int i;
- wled = container_of(cdev, struct pm8941_wled, cdev);
+ if (bl->props.power != FB_BLANK_UNBLANK ||
+ bl->props.fb_blank != FB_BLANK_UNBLANK ||
+ bl->props.state & BL_CORE_FBBLANK)
+ val = 0;
- if (value != 0)
+ if (val != 0)
ctrl = PM8941_WLED_REG_MOD_EN_BIT;
- val = value * PM8941_WLED_REG_VAL_MAX / LED_FULL;
-
rc = regmap_update_bits(wled->regmap,
wled->addr + PM8941_WLED_REG_MOD_EN,
PM8941_WLED_REG_MOD_EN_MASK, ctrl);
@@ -128,16 +127,6 @@ static int pm8941_wled_set(struct led_classdev *cdev,
return rc;
}
-static void pm8941_wled_set_brightness(struct led_classdev *cdev,
- enum led_brightness value)
-{
- if (pm8941_wled_set(cdev, value)) {
- dev_err(cdev->dev, "Unable to set brightness\n");
- return;
- }
- cdev->brightness = value;
-}
-
static int pm8941_wled_setup(struct pm8941_wled *wled)
{
int rc;
@@ -336,12 +325,9 @@ static int pm8941_wled_configure(struct pm8941_wled *wled, struct device *dev)
}
wled->addr = val;
- rc = of_property_read_string(dev->of_node, "label", &wled->cdev.name);
+ rc = of_property_read_string(dev->of_node, "label", &wled->name);
if (rc)
- wled->cdev.name = dev->of_node->name;
-
- wled->cdev.default_trigger = of_get_property(dev->of_node,
- "linux,default-trigger", NULL);
+ wled->name = dev->of_node->name;
*cfg = pm8941_wled_config_defaults;
for (i = 0; i < ARRAY_SIZE(u32_opts); ++i) {
@@ -377,8 +363,14 @@ static int pm8941_wled_configure(struct pm8941_wled *wled, struct device *dev)
return 0;
}
+static const struct backlight_ops pm8941_wled_ops = {
+ .update_status = pm8941_wled_update_status,
+};
+
static int pm8941_wled_probe(struct platform_device *pdev)
{
+ struct backlight_properties props;
+ struct backlight_device *bl;
struct pm8941_wled *wled;
struct regmap *regmap;
int rc;
@@ -403,13 +395,14 @@ static int pm8941_wled_probe(struct platform_device *pdev)
if (rc)
return rc;
- wled->cdev.brightness_set = pm8941_wled_set_brightness;
-
- rc = devm_led_classdev_register(&pdev->dev, &wled->cdev);
- if (rc)
- return rc;
-
- platform_set_drvdata(pdev, wled);
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = PM8941_WLED_REG_VAL_MAX;
+ bl = devm_backlight_device_register(&pdev->dev, wled->name,
+ &pdev->dev, wled,
+ &pm8941_wled_ops, &props);
+ if (IS_ERR(bl))
+ return PTR_ERR(bl);
return 0;
};
@@ -432,4 +425,3 @@ module_platform_driver(pm8941_wled_driver);
MODULE_DESCRIPTION("pm8941 wled driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:pm8941-wled");
diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c
index 052fa1bac03d..d414c7a3acf5 100644
--- a/drivers/video/backlight/sky81452-backlight.c
+++ b/drivers/video/backlight/sky81452-backlight.c
@@ -65,7 +65,7 @@ static int sky81452_bl_update_status(struct backlight_device *bd)
if (brightness > 0) {
ret = regmap_write(regmap, SKY81452_REG0, brightness - 1);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
return regmap_update_bits(regmap, SKY81452_REG1, SKY81452_EN,
@@ -87,12 +87,12 @@ static ssize_t sky81452_bl_store_enable(struct device *dev,
int ret;
ret = kstrtoul(buf, 16, &value);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
ret = regmap_update_bits(regmap, SKY81452_REG1, SKY81452_EN,
value << CTZ(SKY81452_EN));
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
return count;
@@ -108,7 +108,7 @@ static ssize_t sky81452_bl_show_open_short(struct device *dev,
reg = !strcmp(attr->attr.name, "open") ? SKY81452_REG5 : SKY81452_REG4;
ret = regmap_read(regmap, reg, &value);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
if (value & SKY81452_SHRT) {
@@ -136,7 +136,7 @@ static ssize_t sky81452_bl_show_fault(struct device *dev,
int ret;
ret = regmap_read(regmap, SKY81452_REG4, &value);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
*buf = 0;
@@ -196,7 +196,7 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt(
pdata->gpio_enable = of_get_gpio(np, 0);
ret = of_property_count_u32_elems(np, "led-sources");
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
pdata->enable = SKY81452_EN >> CTZ(SKY81452_EN);
} else {
num_entry = ret;
@@ -205,7 +205,7 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt(
ret = of_property_read_u32_array(np, "led-sources", sources,
num_entry);
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
dev_err(dev, "led-sources node is invalid.\n");
return ERR_PTR(-EINVAL);
}
@@ -218,12 +218,12 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt(
ret = of_property_read_u32(np,
"skyworks,short-detection-threshold-volt",
&pdata->short_detection_threshold);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
pdata->short_detection_threshold = 7;
ret = of_property_read_u32(np, "skyworks,current-limit-mA",
&pdata->boost_current_limit);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
pdata->boost_current_limit = 2750;
of_node_put(np);
@@ -278,14 +278,14 @@ static int sky81452_bl_probe(struct platform_device *pdev)
if (gpio_is_valid(pdata->gpio_enable)) {
ret = devm_gpio_request_one(dev, pdata->gpio_enable,
GPIOF_OUT_INIT_HIGH, "sky81452-en");
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
dev_err(dev, "failed to request GPIO. err=%d\n", ret);
return ret;
}
}
ret = sky81452_bl_init_device(regmap, pdata);
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
dev_err(dev, "failed to initialize. err=%d\n", ret);
return ret;
}
@@ -302,8 +302,8 @@ static int sky81452_bl_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, bd);
- ret = sysfs_create_group(&bd->dev.kobj, &sky81452_bl_attr_group);
- if (IS_ERR_VALUE(ret)) {
+ ret = sysfs_create_group(&bd->dev.kobj, &sky81452_bl_attr_group);
+ if (ret < 0) {
dev_err(dev, "failed to create attribute. err=%d\n", ret);
return ret;
}
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 3ad676558c80..83742d806391 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -158,6 +158,7 @@ static const struct i2c_device_id tosa_bl_id[] = {
{ "tosa-bl", 0 },
{ },
};
+MODULE_DEVICE_TABLE(i2c, tosa_bl_id);
static struct i2c_driver tosa_bl_driver = {
.driver = {
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index ba97efc3bf70..38da6e299149 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -9,7 +9,7 @@ config VGA_CONSOLE
depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && \
!SUPERH && !BLACKFIN && !AVR32 && !MN10300 && !CRIS && \
(!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && \
- !ARM64
+ !ARM64 && !ARC && !MICROBLAZE
default y
help
Saying Y here will allow you to use Linux in text mode through a
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 658c34bb9076..1aaf89300621 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1306,10 +1306,11 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
int y;
int c = scr_readw((u16 *) vc->vc_pos);
+ ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
+
if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1)
return;
- ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
if (vc->vc_cursor_type & 0x10)
fbcon_del_cursor_timer(info);
else
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 2d98de535e0f..8b1d371b5404 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -298,7 +298,7 @@ config FB_ARMCLCD
# Helper logic selected only by the ARM Versatile platform family.
config PLAT_VERSATILE_CLCD
- def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
+ def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR
depends on ARM
depends on FB_ARMCLCD && FB=y
@@ -2464,7 +2464,7 @@ config FB_SSD1307
tristate "Solomon SSD1307 framebuffer support"
depends on FB && I2C
depends on OF
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
select FB_SYS_FOPS
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
@@ -2475,3 +2475,17 @@ config FB_SSD1307
help
This driver implements support for the Solomon SSD1307
OLED controller over I2C.
+
+config FB_SM712
+ tristate "Silicon Motion SM712 framebuffer support"
+ depends on FB && PCI
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ Frame buffer driver for the Silicon Motion SM710, SM712, SM721
+ and SM722 chips.
+
+ This driver is also available as a module. The module will be
+ called sm712fb. If you want to compile it as a module, say M
+ here and read <file:Documentation/kbuild/modules.txt>.
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index cecea5063a80..50ed1b4fc2bf 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -131,6 +131,7 @@ obj-$(CONFIG_FB_JZ4740) += jz4740_fb.o
obj-$(CONFIG_FB_PUV3_UNIGFX) += fb-puv3.o
obj-$(CONFIG_FB_HYPERV) += hyperv_fb.o
obj-$(CONFIG_FB_OPENCORES) += ocfb.o
+obj-$(CONFIG_FB_SM712) += sm712fb.o
# Platform or fallback drivers go here
obj-$(CONFIG_FB_UVESA) += uvesafb.o
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index b305a1e7cc76..6a317de7082c 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -26,13 +26,9 @@
#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
#include <video/vga.h>
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
-
struct arkfb_info {
int mclk_freq;
- int mtrr_reg;
+ int wc_cookie;
struct dac_info *dac;
struct vgastate state;
@@ -102,10 +98,6 @@ static const struct svga_timing_regs ark_timing_regs = {
static char *mode_option = "640x480-8@60";
-#ifdef CONFIG_MTRR
-static int mtrr = 1;
-#endif
-
MODULE_AUTHOR("(c) 2007 Ondrej Zajicek <santiago@crfreenet.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("fbdev driver for ARK 2000PV");
@@ -115,11 +107,6 @@ MODULE_PARM_DESC(mode_option, "Default video mode ('640x480-8@60', etc)");
module_param_named(mode, mode_option, charp, 0444);
MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc) (deprecated)");
-#ifdef CONFIG_MTRR
-module_param(mtrr, int, 0444);
-MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
-#endif
-
static int threshold = 4;
module_param(threshold, int, 0644);
@@ -1002,7 +989,7 @@ static int ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
info->fix.smem_len = pci_resource_len(dev, 0);
/* Map physical IO memory address into kernel space */
- info->screen_base = pci_iomap(dev, 0, 0);
+ info->screen_base = pci_iomap_wc(dev, 0, 0);
if (! info->screen_base) {
rc = -ENOMEM;
dev_err(info->device, "iomap for framebuffer failed\n");
@@ -1057,14 +1044,8 @@ static int ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Record a reference to the driver data */
pci_set_drvdata(dev, info);
-
-#ifdef CONFIG_MTRR
- if (mtrr) {
- par->mtrr_reg = -1;
- par->mtrr_reg = mtrr_add(info->fix.smem_start, info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
- }
-#endif
-
+ par->wc_cookie = arch_phys_wc_add(info->fix.smem_start,
+ info->fix.smem_len);
return 0;
/* Error handling */
@@ -1092,14 +1073,7 @@ static void ark_pci_remove(struct pci_dev *dev)
if (info) {
struct arkfb_info *par = info->par;
-
-#ifdef CONFIG_MTRR
- if (par->mtrr_reg >= 0) {
- mtrr_del(par->mtrr_reg, 0, 0);
- par->mtrr_reg = -1;
- }
-#endif
-
+ arch_phys_wc_del(par->wc_cookie);
dac_release(par->dac);
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index abadc490fa1f..19eb42b57d87 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -19,7 +19,6 @@
#include <linux/backlight.h>
#include <linux/gfp.h>
#include <linux/module.h>
-#include <linux/platform_data/atmel.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
@@ -999,7 +998,7 @@ static const char *atmel_lcdfb_wiring_modes[] = {
[ATMEL_LCDC_WIRING_RGB] = "RGB",
};
-const int atmel_lcdfb_get_of_wiring_modes(struct device_node *np)
+static int atmel_lcdfb_get_of_wiring_modes(struct device_node *np)
{
const char *mode;
int err, i;
diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h
index 1f39a62f899b..63c4842eb224 100644
--- a/drivers/video/fbdev/aty/atyfb.h
+++ b/drivers/video/fbdev/aty/atyfb.h
@@ -182,10 +182,7 @@ struct atyfb_par {
unsigned long irq_flags;
unsigned int irq;
spinlock_t int_lock;
-#ifdef CONFIG_MTRR
- int mtrr_aper;
- int mtrr_reg;
-#endif
+ int wc_cookie;
u32 mem_cntl;
struct crtc saved_crtc;
union aty_pll saved_pll;
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 8789e487b96e..f34ed47fcaf8 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -98,9 +98,6 @@
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
#endif
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
/*
* Debug flags.
@@ -303,9 +300,7 @@ static struct fb_ops atyfb_ops = {
};
static bool noaccel;
-#ifdef CONFIG_MTRR
static bool nomtrr;
-#endif
static int vram;
static int pll;
static int mclk;
@@ -427,6 +422,20 @@ static struct {
#endif /* CONFIG_FB_ATY_CT */
};
+/*
+ * Last page of 8 MB (4 MB on ISA) aperture is MMIO,
+ * unless the auxiliary register aperture is used.
+ */
+static void aty_fudge_framebuffer_len(struct fb_info *info)
+{
+ struct atyfb_par *par = (struct atyfb_par *) info->par;
+
+ if (!par->aux_start &&
+ (info->fix.smem_len == 0x800000 ||
+ (par->bus_type == ISA && info->fix.smem_len == 0x400000)))
+ info->fix.smem_len -= GUI_RESERVE;
+}
+
static int correct_chipset(struct atyfb_par *par)
{
u8 rev;
@@ -2603,14 +2612,7 @@ static int aty_init(struct fb_info *info)
if (par->pll_ops->resume_pll)
par->pll_ops->resume_pll(info, &par->pll);
- /*
- * Last page of 8 MB (4 MB on ISA) aperture is MMIO,
- * unless the auxiliary register aperture is used.
- */
- if (!par->aux_start &&
- (info->fix.smem_len == 0x800000 ||
- (par->bus_type == ISA && info->fix.smem_len == 0x400000)))
- info->fix.smem_len -= GUI_RESERVE;
+ aty_fudge_framebuffer_len(info);
/*
* Disable register access through the linear aperture
@@ -2621,25 +2623,13 @@ static int aty_init(struct fb_info *info)
aty_st_le32(BUS_CNTL, aty_ld_le32(BUS_CNTL, par) |
BUS_APER_REG_DIS, par);
-#ifdef CONFIG_MTRR
- par->mtrr_aper = -1;
- par->mtrr_reg = -1;
- if (!nomtrr) {
- /* Cover the whole resource. */
- par->mtrr_aper = mtrr_add(par->res_start, par->res_size,
- MTRR_TYPE_WRCOMB, 1);
- if (par->mtrr_aper >= 0 && !par->aux_start) {
- /* Make a hole for mmio. */
- par->mtrr_reg = mtrr_add(par->res_start + 0x800000 -
- GUI_RESERVE, GUI_RESERVE,
- MTRR_TYPE_UNCACHABLE, 1);
- if (par->mtrr_reg < 0) {
- mtrr_del(par->mtrr_aper, 0, 0);
- par->mtrr_aper = -1;
- }
- }
- }
-#endif
+ if (!nomtrr)
+ /*
+ * Only the ioremap_wc()'d area will get WC here
+ * since ioremap_uc() was used on the entire PCI BAR.
+ */
+ par->wc_cookie = arch_phys_wc_add(par->res_start,
+ par->res_size);
info->fbops = &atyfb_ops;
info->pseudo_palette = par->pseudo_palette;
@@ -2767,17 +2757,8 @@ aty_init_exit:
/* restore video mode */
aty_set_crtc(par, &par->saved_crtc);
par->pll_ops->set_pll(info, &par->saved_pll);
+ arch_phys_wc_del(par->wc_cookie);
-#ifdef CONFIG_MTRR
- if (par->mtrr_reg >= 0) {
- mtrr_del(par->mtrr_reg, 0, 0);
- par->mtrr_reg = -1;
- }
- if (par->mtrr_aper >= 0) {
- mtrr_del(par->mtrr_aper, 0, 0);
- par->mtrr_aper = -1;
- }
-#endif
return ret;
}
@@ -3459,7 +3440,11 @@ static int atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info,
}
info->fix.mmio_start = raddr;
- par->ati_regbase = ioremap(info->fix.mmio_start, 0x1000);
+ /*
+ * By using strong UC we force the MTRR to never have an
+ * effect on the MMIO region on both non-PAT and PAT systems.
+ */
+ par->ati_regbase = ioremap_uc(info->fix.mmio_start, 0x1000);
if (par->ati_regbase == NULL)
return -ENOMEM;
@@ -3482,7 +3467,24 @@ static int atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info,
/* Map in frame buffer */
info->fix.smem_start = addr;
- info->screen_base = ioremap(addr, 0x800000);
+
+ /*
+ * The framebuffer is not always 8 MiB, that's just the size of the
+ * PCI BAR. We temporarily abuse smem_len here to store the size
+ * of the BAR. aty_init() will later correct it to match the actual
+ * framebuffer size.
+ *
+ * On devices that don't have the auxiliary register aperture, the
+ * registers are housed at the top end of the framebuffer PCI BAR.
+ * aty_fudge_framebuffer_len() is used to reduce smem_len to not
+ * overlap with the registers.
+ */
+ info->fix.smem_len = 0x800000;
+
+ aty_fudge_framebuffer_len(info);
+
+ info->screen_base = ioremap_wc(info->fix.smem_start,
+ info->fix.smem_len);
if (info->screen_base == NULL) {
ret = -ENOMEM;
goto atyfb_setup_generic_fail;
@@ -3554,6 +3556,7 @@ static int atyfb_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
}
par = info->par;
+ par->bus_type = PCI;
info->fix = atyfb_fix;
info->device = &pdev->dev;
par->pci_id = pdev->device;
@@ -3655,7 +3658,8 @@ static int __init atyfb_atari_probe(void)
* Map the video memory (physical address given)
* to somewhere in the kernel address space.
*/
- info->screen_base = ioremap(phys_vmembase[m64_num], phys_size[m64_num]);
+ info->screen_base = ioremap_wc(phys_vmembase[m64_num],
+ phys_size[m64_num]);
info->fix.smem_start = (unsigned long)info->screen_base; /* Fake! */
par->ati_regbase = ioremap(phys_guiregbase[m64_num], 0x10000) +
0xFC00ul;
@@ -3721,17 +3725,8 @@ static void atyfb_remove(struct fb_info *info)
if (M64_HAS(MOBIL_BUS))
aty_bl_exit(info->bl_dev);
#endif
+ arch_phys_wc_del(par->wc_cookie);
-#ifdef CONFIG_MTRR
- if (par->mtrr_reg >= 0) {
- mtrr_del(par->mtrr_reg, 0, 0);
- par->mtrr_reg = -1;
- }
- if (par->mtrr_aper >= 0) {
- mtrr_del(par->mtrr_aper, 0, 0);
- par->mtrr_aper = -1;
- }
-#endif
#ifndef __sparc__
if (par->ati_regbase)
iounmap(par->ati_regbase);
@@ -3847,10 +3842,8 @@ static int __init atyfb_setup(char *options)
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!strncmp(this_opt, "noaccel", 7)) {
noaccel = 1;
-#ifdef CONFIG_MTRR
} else if (!strncmp(this_opt, "nomtrr", 6)) {
nomtrr = 1;
-#endif
} else if (!strncmp(this_opt, "vram:", 5))
vram = simple_strtoul(this_opt + 5, NULL, 0);
else if (!strncmp(this_opt, "pll:", 4))
@@ -4020,7 +4013,5 @@ module_param(comp_sync, int, 0);
MODULE_PARM_DESC(comp_sync, "Set composite sync signal to low (0) or high (1)");
module_param(mode, charp, 0);
MODULE_PARM_DESC(mode, "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\" ");
-#ifdef CONFIG_MTRR
module_param(nomtrr, bool, 0);
MODULE_PARM_DESC(nomtrr, "bool: disable use of MTRR registers");
-#endif
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index d787533d9c8b..47c3191ec313 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -1072,9 +1072,9 @@ void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
for (i = specs->modedb_len + num; i < specs->modedb_len + num + svd_n; i++) {
int idx = svd[i - specs->modedb_len - num];
- if (!idx || idx > 63) {
+ if (!idx || idx >= ARRAY_SIZE(cea_modes)) {
pr_warning("Reserved SVD code %d\n", idx);
- } else if (idx > ARRAY_SIZE(cea_modes) || !cea_modes[idx].xres) {
+ } else if (!cea_modes[idx].xres) {
pr_warning("Unimplemented SVD code %d\n", idx);
} else {
memcpy(&m[i], cea_modes + idx, sizeof(m[i]));
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index 60c3f0a16341..15755ce1d26c 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -485,7 +485,7 @@ static ssize_t show_bl_curve(struct device *device,
mutex_lock(&fb_info->bl_curve_mutex);
for (i = 0; i < FB_BACKLIGHT_LEVELS; i += 8)
- len += snprintf(&buf[len], PAGE_SIZE, "%8ph\n",
+ len += scnprintf(&buf[len], PAGE_SIZE - len, "%8ph\n",
fb_info->bl_curve + i);
mutex_unlock(&fb_info->bl_curve_mutex);
diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
index 7d07cf824b64..2510fa728d77 100644
--- a/drivers/video/fbdev/core/modedb.c
+++ b/drivers/video/fbdev/core/modedb.c
@@ -289,7 +289,7 @@ static const struct fb_videomode modedb[] = {
};
#ifdef CONFIG_FB_MODE_HELPERS
-const struct fb_videomode cea_modes[64] = {
+const struct fb_videomode cea_modes[65] = {
/* #1: 640x480p@59.94/60Hz */
[1] = {
NULL, 60, 640, 480, 39722, 48, 16, 33, 10, 96, 2, 0,
diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c
index 7ec251cc9c03..5b1081030cbb 100644
--- a/drivers/video/fbdev/ep93xx-fb.c
+++ b/drivers/video/fbdev/ep93xx-fb.c
@@ -419,36 +419,15 @@ static struct fb_ops ep93xxfb_ops = {
.fb_mmap = ep93xxfb_mmap,
};
-static int ep93xxfb_calc_fbsize(struct ep93xxfb_mach_info *mach_info)
-{
- int i, fb_size = 0;
-
- if (mach_info->num_modes == EP93XXFB_USE_MODEDB) {
- fb_size = EP93XXFB_MAX_XRES * EP93XXFB_MAX_YRES *
- mach_info->bpp / 8;
- } else {
- for (i = 0; i < mach_info->num_modes; i++) {
- const struct fb_videomode *mode;
- int size;
-
- mode = &mach_info->modes[i];
- size = mode->xres * mode->yres * mach_info->bpp / 8;
- if (size > fb_size)
- fb_size = size;
- }
- }
-
- return fb_size;
-}
-
static int ep93xxfb_alloc_videomem(struct fb_info *info)
{
- struct ep93xx_fbi *fbi = info->par;
char __iomem *virt_addr;
dma_addr_t phys_addr;
unsigned int fb_size;
- fb_size = ep93xxfb_calc_fbsize(fbi->mach_info);
+ /* Maximum 16bpp -> used memory is maximum x*y*2 bytes */
+ fb_size = EP93XXFB_MAX_XRES * EP93XXFB_MAX_YRES * 2;
+
virt_addr = dma_alloc_writecombine(info->dev, fb_size,
&phys_addr, GFP_KERNEL);
if (!virt_addr)
@@ -550,8 +529,7 @@ static int ep93xxfb_probe(struct platform_device *pdev)
fb_get_options("ep93xx-fb", &video_mode);
err = fb_find_mode(&info->var, info, video_mode,
- fbi->mach_info->modes, fbi->mach_info->num_modes,
- fbi->mach_info->default_mode, fbi->mach_info->bpp);
+ NULL, 0, NULL, 16);
if (err == 0) {
dev_err(info->dev, "No suitable video mode found\n");
err = -EINVAL;
diff --git a/drivers/video/fbdev/gxt4500.c b/drivers/video/fbdev/gxt4500.c
index 135d78a02588..f19133a80e8c 100644
--- a/drivers/video/fbdev/gxt4500.c
+++ b/drivers/video/fbdev/gxt4500.c
@@ -662,7 +662,7 @@ static int gxt4500_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
info->fix.smem_start = fb_phys;
info->fix.smem_len = pci_resource_len(pdev, 1);
- info->screen_base = pci_ioremap_bar(pdev, 1);
+ info->screen_base = pci_ioremap_wc_bar(pdev, 1);
if (!info->screen_base) {
dev_err(&pdev->dev, "gxt4500: cannot map framebuffer\n");
goto err_unmap_regs;
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 807ee22ef229..e2451bdb4525 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -213,7 +213,7 @@ struct synthvid_msg {
struct hvfb_par {
struct fb_info *info;
- struct resource mem;
+ struct resource *mem;
bool fb_ready; /* fb device is ready */
struct completion wait;
u32 synthvid_version;
@@ -677,26 +677,18 @@ static void hvfb_get_option(struct fb_info *info)
/* Get framebuffer memory from Hyper-V video pci space */
-static int hvfb_getmem(struct fb_info *info)
+static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
{
struct hvfb_par *par = info->par;
struct pci_dev *pdev = NULL;
void __iomem *fb_virt;
int gen2vm = efi_enabled(EFI_BOOT);
+ resource_size_t pot_start, pot_end;
int ret;
- par->mem.name = KBUILD_MODNAME;
- par->mem.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (gen2vm) {
- ret = allocate_resource(&hyperv_mmio, &par->mem,
- screen_fb_size,
- 0, -1,
- screen_fb_size,
- NULL, NULL);
- if (ret != 0) {
- pr_err("Unable to allocate framebuffer memory\n");
- return -ENODEV;
- }
+ pot_start = 0;
+ pot_end = -1;
} else {
pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
@@ -709,16 +701,18 @@ static int hvfb_getmem(struct fb_info *info)
pci_resource_len(pdev, 0) < screen_fb_size)
goto err1;
- par->mem.end = pci_resource_end(pdev, 0);
- par->mem.start = par->mem.end - screen_fb_size + 1;
- ret = request_resource(&pdev->resource[0], &par->mem);
- if (ret != 0) {
- pr_err("Unable to request framebuffer memory\n");
- goto err1;
- }
+ pot_end = pci_resource_end(pdev, 0);
+ pot_start = pot_end - screen_fb_size + 1;
+ }
+
+ ret = vmbus_allocate_mmio(&par->mem, hdev, pot_start, pot_end,
+ screen_fb_size, 0x100000, true);
+ if (ret != 0) {
+ pr_err("Unable to allocate framebuffer memory\n");
+ goto err1;
}
- fb_virt = ioremap(par->mem.start, screen_fb_size);
+ fb_virt = ioremap(par->mem->start, screen_fb_size);
if (!fb_virt)
goto err2;
@@ -736,7 +730,7 @@ static int hvfb_getmem(struct fb_info *info)
info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
}
- info->fix.smem_start = par->mem.start;
+ info->fix.smem_start = par->mem->start;
info->fix.smem_len = screen_fb_size;
info->screen_base = fb_virt;
info->screen_size = screen_fb_size;
@@ -749,7 +743,8 @@ static int hvfb_getmem(struct fb_info *info)
err3:
iounmap(fb_virt);
err2:
- release_resource(&par->mem);
+ release_mem_region(par->mem->start, screen_fb_size);
+ par->mem = NULL;
err1:
if (!gen2vm)
pci_dev_put(pdev);
@@ -763,7 +758,8 @@ static void hvfb_putmem(struct fb_info *info)
struct hvfb_par *par = info->par;
iounmap(info->screen_base);
- release_resource(&par->mem);
+ release_mem_region(par->mem->start, screen_fb_size);
+ par->mem = NULL;
}
@@ -794,7 +790,7 @@ static int hvfb_probe(struct hv_device *hdev,
goto error1;
}
- ret = hvfb_getmem(info);
+ ret = hvfb_getmem(hdev, info);
if (ret) {
pr_err("No memory for framebuffer\n");
goto error2;
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index a2b4204b42bb..452e1163ad02 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -27,24 +27,15 @@
#include <linux/console.h>
#include <video/vga.h>
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
-
#include "i740_reg.h"
static char *mode_option;
-
-#ifdef CONFIG_MTRR
static int mtrr = 1;
-#endif
struct i740fb_par {
unsigned char __iomem *regs;
bool has_sgram;
-#ifdef CONFIG_MTRR
- int mtrr_reg;
-#endif
+ int wc_cookie;
bool ddc_registered;
struct i2c_adapter ddc_adapter;
struct i2c_algo_bit_data ddc_algo;
@@ -1040,7 +1031,7 @@ static int i740fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
goto err_request_regions;
}
- info->screen_base = pci_ioremap_bar(dev, 0);
+ info->screen_base = pci_ioremap_wc_bar(dev, 0);
if (!info->screen_base) {
dev_err(info->device, "error remapping base\n");
ret = -ENOMEM;
@@ -1144,13 +1135,9 @@ static int i740fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
fb_info(info, "%s frame buffer device\n", info->fix.id);
pci_set_drvdata(dev, info);
-#ifdef CONFIG_MTRR
- if (mtrr) {
- par->mtrr_reg = -1;
- par->mtrr_reg = mtrr_add(info->fix.smem_start,
- info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
- }
-#endif
+ if (mtrr)
+ par->wc_cookie = arch_phys_wc_add(info->fix.smem_start,
+ info->fix.smem_len);
return 0;
err_reg_framebuffer:
@@ -1177,13 +1164,7 @@ static void i740fb_remove(struct pci_dev *dev)
if (info) {
struct i740fb_par *par = info->par;
-
-#ifdef CONFIG_MTRR
- if (par->mtrr_reg >= 0) {
- mtrr_del(par->mtrr_reg, 0, 0);
- par->mtrr_reg = -1;
- }
-#endif
+ arch_phys_wc_del(par->wc_cookie);
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
if (par->ddc_registered)
@@ -1287,10 +1268,8 @@ static int __init i740fb_setup(char *options)
while ((opt = strsep(&options, ",")) != NULL) {
if (!*opt)
continue;
-#ifdef CONFIG_MTRR
else if (!strncmp(opt, "mtrr:", 5))
mtrr = simple_strtoul(opt + 5, NULL, 0);
-#endif
else
mode_option = opt;
}
@@ -1327,7 +1306,5 @@ MODULE_DESCRIPTION("fbdev driver for Intel740");
module_param(mode_option, charp, 0444);
MODULE_PARM_DESC(mode_option, "Default video mode ('640x480-8@60', etc)");
-#ifdef CONFIG_MTRR
module_param(mtrr, int, 0444);
MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
-#endif
diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
index 65041e15fd59..5bb01533271e 100644
--- a/drivers/video/fbdev/kyro/fbdev.c
+++ b/drivers/video/fbdev/kyro/fbdev.c
@@ -22,9 +22,6 @@
#include <linux/pci.h>
#include <asm/io.h>
#include <linux/uaccess.h>
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
#include <video/kyro.h>
@@ -84,9 +81,7 @@ static device_info_t deviceInfo;
static char *mode_option = NULL;
static int nopan = 0;
static int nowrap = 1;
-#ifdef CONFIG_MTRR
static int nomtrr = 0;
-#endif
/* PCI driver prototypes */
static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -570,10 +565,8 @@ static int __init kyrofb_setup(char *options)
nopan = 1;
} else if (strcmp(this_opt, "nowrap") == 0) {
nowrap = 1;
-#ifdef CONFIG_MTRR
} else if (strcmp(this_opt, "nomtrr") == 0) {
nomtrr = 1;
-#endif
} else {
mode_option = this_opt;
}
@@ -691,17 +684,16 @@ static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
currentpar->regbase = deviceInfo.pSTGReg =
ioremap_nocache(kyro_fix.mmio_start, kyro_fix.mmio_len);
+ if (!currentpar->regbase)
+ goto out_free_fb;
- info->screen_base = ioremap_nocache(kyro_fix.smem_start,
- kyro_fix.smem_len);
+ info->screen_base = pci_ioremap_wc_bar(pdev, 0);
+ if (!info->screen_base)
+ goto out_unmap_regs;
-#ifdef CONFIG_MTRR
if (!nomtrr)
- currentpar->mtrr_handle =
- mtrr_add(kyro_fix.smem_start,
- kyro_fix.smem_len,
- MTRR_TYPE_WRCOMB, 1);
-#endif
+ currentpar->wc_cookie = arch_phys_wc_add(kyro_fix.smem_start,
+ kyro_fix.smem_len);
kyro_fix.ypanstep = nopan ? 0 : 1;
kyro_fix.ywrapstep = nowrap ? 0 : 1;
@@ -745,8 +737,10 @@ static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
out_unmap:
- iounmap(currentpar->regbase);
iounmap(info->screen_base);
+out_unmap_regs:
+ iounmap(currentpar->regbase);
+out_free_fb:
framebuffer_release(info);
return -EINVAL;
@@ -770,12 +764,7 @@ static void kyrofb_remove(struct pci_dev *pdev)
iounmap(info->screen_base);
iounmap(par->regbase);
-#ifdef CONFIG_MTRR
- if (par->mtrr_handle)
- mtrr_del(par->mtrr_handle,
- info->fix.smem_start,
- info->fix.smem_len);
-#endif
+ arch_phys_wc_del(par->wc_cookie);
unregister_framebuffer(info);
framebuffer_release(info);
diff --git a/drivers/video/fbdev/ocfb.c b/drivers/video/fbdev/ocfb.c
index de9819660ca0..c9293aea8ec3 100644
--- a/drivers/video/fbdev/ocfb.c
+++ b/drivers/video/fbdev/ocfb.c
@@ -325,7 +325,6 @@ static int ocfb_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "I/O resource request failed\n");
return -ENXIO;
}
- res->flags &= ~IORESOURCE_CACHEABLE;
fbdev->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(fbdev->regs))
return PTR_ERR(fbdev->regs);
diff --git a/drivers/video/fbdev/omap2/displays-new/encoder-opa362.c b/drivers/video/fbdev/omap2/displays-new/encoder-opa362.c
index a14d993f719d..8c246c213e06 100644
--- a/drivers/video/fbdev/omap2/displays-new/encoder-opa362.c
+++ b/drivers/video/fbdev/omap2/displays-new/encoder-opa362.c
@@ -266,7 +266,6 @@ static struct platform_driver opa362_driver = {
.remove = __exit_p(opa362_remove),
.driver = {
.name = "amplifier-opa362",
- .owner = THIS_MODULE,
.of_match_table = opa362_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/video/fbdev/omap2/dss/dss-of.c b/drivers/video/fbdev/omap2/dss/dss-of.c
index 928ee639c0c1..bf407b6ba15c 100644
--- a/drivers/video/fbdev/omap2/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/dss/dss-of.c
@@ -60,6 +60,8 @@ omapdss_of_get_next_port(const struct device_node *parent,
}
prev = port;
} while (of_node_cmp(port->name, "port") != 0);
+
+ of_node_put(ports);
}
return port;
@@ -94,7 +96,7 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port)
if (!port)
return NULL;
- np = of_get_next_parent(port);
+ np = of_get_parent(port);
for (i = 0; i < 2 && np; ++i) {
struct property *prop;
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index 4f0cbb54d4db..d3af01c94a58 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1091,7 +1091,7 @@ static void mmap_user_close(struct vm_area_struct *vma)
omapfb_put_mem_region(rg);
}
-static struct vm_operations_struct mmap_user_ops = {
+static const struct vm_operations_struct mmap_user_ops = {
.open = mmap_user_open,
.close = mmap_user_close,
};
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index e209b039f553..efb57c059997 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -615,7 +615,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
return -EINVAL;
}
- clk = clk_get(&pdev->dev, "LCDCLK");
+ clk = devm_clk_get(&pdev->dev, "LCDCLK");
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "unable to get LCDCLK");
return PTR_ERR(clk);
@@ -624,21 +624,18 @@ static int pxa168fb_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "no IO memory defined\n");
- ret = -ENOENT;
- goto failed_put_clk;
+ return -ENOENT;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no IRQ defined\n");
- ret = -ENOENT;
- goto failed_put_clk;
+ return -ENOENT;
}
info = framebuffer_alloc(sizeof(struct pxa168fb_info), &pdev->dev);
if (info == NULL) {
- ret = -ENOMEM;
- goto failed_put_clk;
+ return -ENOMEM;
}
/* Initialize private data */
@@ -776,8 +773,6 @@ failed_free_fbmem:
info->screen_base, fbi->fb_start_dma);
failed_free_info:
kfree(info);
-failed_put_clk:
- clk_put(clk);
dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret);
return ret;
@@ -813,7 +808,6 @@ static int pxa168fb_remove(struct platform_device *pdev)
info->screen_base, info->fix.smem_start);
clk_disable(fbi->clk);
- clk_put(fbi->clk);
framebuffer_release(info);
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 86bd457d039d..50bce45e7f3d 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -653,7 +653,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
goto err_free_dma;
}
- ret = clk_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
if (ret < 0) {
dev_err(dev, "failed to enable clock\n");
goto err_misc_deregister;
@@ -685,7 +685,7 @@ err_misc_deregister:
misc_deregister(&priv->misc_dev);
err_disable_clk:
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
return ret;
}
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index 7245611ec963..94813af97f09 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -1668,7 +1668,6 @@ pxafb_freq_policy(struct notifier_block *nb, unsigned long val, void *data)
switch (val) {
case CPUFREQ_ADJUST:
- case CPUFREQ_INCOMPATIBLE:
pr_debug("min dma period: %d ps, "
"new clock %d kHz\n", pxafb_display_dma_period(var),
policy->max);
diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
index 83433cb0dfba..96aa46dc696c 100644
--- a/drivers/video/fbdev/s1d13xxxfb.c
+++ b/drivers/video/fbdev/s1d13xxxfb.c
@@ -32,8 +32,7 @@
#include <linux/spinlock_types.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#include <video/s1d13xxxfb.h>
diff --git a/drivers/video/fbdev/s3c-fb.c b/drivers/video/fbdev/s3c-fb.c
index 7e3a05fc47aa..f72dd12456f9 100644
--- a/drivers/video/fbdev/s3c-fb.c
+++ b/drivers/video/fbdev/s3c-fb.c
@@ -1938,7 +1938,7 @@ static struct s3c_fb_driverdata s3c_fb_data_s3c2443 = {
},
};
-static struct platform_device_id s3c_fb_driver_ids[] = {
+static const struct platform_device_id s3c_fb_driver_ids[] = {
{
.name = "s3c-fb",
.driver_data = (unsigned long)&s3c_fb_data_64xx,
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index f0ae61a37f04..13b109073c63 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -28,13 +28,9 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
-
struct s3fb_info {
int chip, rev, mclk_freq;
- int mtrr_reg;
+ int wc_cookie;
struct vgastate state;
struct mutex open_lock;
unsigned int ref_count;
@@ -154,11 +150,7 @@ static const struct svga_timing_regs s3_timing_regs = {
static char *mode_option;
-
-#ifdef CONFIG_MTRR
static int mtrr = 1;
-#endif
-
static int fasttext = 1;
@@ -170,11 +162,8 @@ module_param(mode_option, charp, 0444);
MODULE_PARM_DESC(mode_option, "Default video mode ('640x480-8@60', etc)");
module_param_named(mode, mode_option, charp, 0444);
MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc) (deprecated)");
-
-#ifdef CONFIG_MTRR
module_param(mtrr, int, 0444);
MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
-#endif
module_param(fasttext, int, 0644);
MODULE_PARM_DESC(fasttext, "Enable S3 fast text mode (1=enable, 0=disable, default=1)");
@@ -1168,7 +1157,7 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
info->fix.smem_len = pci_resource_len(dev, 0);
/* Map physical IO memory address into kernel space */
- info->screen_base = pci_iomap(dev, 0, 0);
+ info->screen_base = pci_iomap_wc(dev, 0, 0);
if (! info->screen_base) {
rc = -ENOMEM;
dev_err(info->device, "iomap for framebuffer failed\n");
@@ -1365,12 +1354,9 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Record a reference to the driver data */
pci_set_drvdata(dev, info);
-#ifdef CONFIG_MTRR
- if (mtrr) {
- par->mtrr_reg = -1;
- par->mtrr_reg = mtrr_add(info->fix.smem_start, info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
- }
-#endif
+ if (mtrr)
+ par->wc_cookie = arch_phys_wc_add(info->fix.smem_start,
+ info->fix.smem_len);
return 0;
@@ -1405,14 +1391,7 @@ static void s3_pci_remove(struct pci_dev *dev)
if (info) {
par = info->par;
-
-#ifdef CONFIG_MTRR
- if (par->mtrr_reg >= 0) {
- mtrr_del(par->mtrr_reg, 0, 0);
- par->mtrr_reg = -1;
- }
-#endif
-
+ arch_phys_wc_del(par->wc_cookie);
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
@@ -1551,10 +1530,8 @@ static int __init s3fb_setup(char *options)
if (!*opt)
continue;
-#ifdef CONFIG_MTRR
else if (!strncmp(opt, "mtrr:", 5))
mtrr = simple_strtoul(opt + 5, NULL, 0);
-#endif
else if (!strncmp(opt, "fasttext:", 9))
fasttext = simple_strtoul(opt + 9, NULL, 0);
else
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index 89dd7e02197f..dcf774c15889 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -1042,7 +1042,6 @@ sa1100fb_freq_policy(struct notifier_block *nb, unsigned long val,
switch (val) {
case CPUFREQ_ADJUST:
- case CPUFREQ_INCOMPATIBLE:
dev_dbg(fbi->dev, "min dma period: %d ps, "
"new clock %d kHz\n", sa1100fb_min_dma_period(fbi),
policy->max);
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 1085c0432158..52c5c7e63b52 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of_platform.h>
diff --git a/drivers/staging/sm7xxfb/sm7xx.h b/drivers/video/fbdev/sm712.h
index 4bed0946c1b1..aad1cc4be34a 100644
--- a/drivers/staging/sm7xxfb/sm7xx.h
+++ b/drivers/video/fbdev/sm712.h
@@ -13,8 +13,6 @@
* more details.
*/
-#define NR_PALETTE 256
-
#define FB_ACCEL_SMI_LYNX 88
#define SCREEN_X_RES 1024
@@ -31,12 +29,8 @@
extern void __iomem *smtc_regbaseaddress;
#define smtc_mmiowb(dat, reg) writeb(dat, smtc_regbaseaddress + reg)
-#define smtc_mmioww(dat, reg) writew(dat, smtc_regbaseaddress + reg)
-#define smtc_mmiowl(dat, reg) writel(dat, smtc_regbaseaddress + reg)
#define smtc_mmiorb(reg) readb(smtc_regbaseaddress + reg)
-#define smtc_mmiorw(reg) readw(smtc_regbaseaddress + reg)
-#define smtc_mmiorl(reg) readl(smtc_regbaseaddress + reg)
#define SIZE_SR00_SR04 (0x04 - 0x00 + 1)
#define SIZE_SR10_SR24 (0x24 - 0x10 + 1)
@@ -48,8 +42,6 @@ extern void __iomem *smtc_regbaseaddress;
#define SIZE_CR00_CR18 (0x18 - 0x00 + 1)
#define SIZE_CR30_CR4D (0x4D - 0x30 + 1)
#define SIZE_CR90_CRA7 (0xA7 - 0x90 + 1)
-#define SIZE_VPR (0x6C + 1)
-#define SIZE_DPR (0x44 + 1)
static inline void smtc_crtcw(int reg, int val)
{
@@ -57,24 +49,12 @@ static inline void smtc_crtcw(int reg, int val)
smtc_mmiowb(val, 0x3d5);
}
-static inline unsigned int smtc_crtcr(int reg)
-{
- smtc_mmiowb(reg, 0x3d4);
- return smtc_mmiorb(0x3d5);
-}
-
static inline void smtc_grphw(int reg, int val)
{
smtc_mmiowb(reg, 0x3ce);
smtc_mmiowb(val, 0x3cf);
}
-static inline unsigned int smtc_grphr(int reg)
-{
- smtc_mmiowb(reg, 0x3ce);
- return smtc_mmiorb(0x3cf);
-}
-
static inline void smtc_attrw(int reg, int val)
{
smtc_mmiorb(0x3da);
@@ -115,3 +95,22 @@ struct modeinit {
unsigned char init_cr30_cr4d[SIZE_CR30_CR4D];
unsigned char init_cr90_cra7[SIZE_CR90_CRA7];
};
+
+#ifdef __BIG_ENDIAN
+#define pal_rgb(r, g, b, val) (((r & 0xf800) >> 8) | \
+ ((g & 0xe000) >> 13) | \
+ ((g & 0x1c00) << 3) | \
+ ((b & 0xf800) >> 3))
+#define big_addr 0x800000
+#define mmio_addr 0x00800000
+#define seqw17() smtc_seqw(0x17, 0x30)
+#define big_pixel_depth(p, d) {if (p == 24) {p = 32; d = 32; } }
+#define big_swap(p) ((p & 0xff00ff00 >> 8) | (p & 0x00ff00ff << 8))
+#else
+#define pal_rgb(r, g, b, val) val
+#define big_addr 0
+#define mmio_addr 0x00c00000
+#define seqw17() do { } while (0)
+#define big_pixel_depth(p, d) do { } while (0)
+#define big_swap(p) p
+#endif
diff --git a/drivers/staging/sm7xxfb/sm7xxfb.c b/drivers/video/fbdev/sm712fb.c
index 2ff4fe73d148..629bfa2d2f51 100644
--- a/drivers/staging/sm7xxfb/sm7xxfb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -32,7 +32,7 @@
#include <linux/pm.h>
#endif
-#include "sm7xx.h"
+#include "sm712.h"
/*
* Private structure
@@ -923,25 +923,14 @@ static int smtc_setcolreg(unsigned regno, unsigned red, unsigned green,
val = chan_to_field(red, &sfb->fb->var.red);
val |= chan_to_field(green, &sfb->fb->var.green);
val |= chan_to_field(blue, &sfb->fb->var.blue);
-#ifdef __BIG_ENDIAN
- pal[regno] = ((red & 0xf800) >> 8) |
- ((green & 0xe000) >> 13) |
- ((green & 0x1c00) << 3) |
- ((blue & 0xf800) >> 3);
-#else
- pal[regno] = val;
-#endif
+ pal[regno] = pal_rgb(red, green, blue, val);
} else {
u32 *pal = sfb->fb->pseudo_palette;
val = chan_to_field(red, &sfb->fb->var.red);
val |= chan_to_field(green, &sfb->fb->var.green);
val |= chan_to_field(blue, &sfb->fb->var.blue);
-#ifdef __BIG_ENDIAN
- val = (val & 0xff00ff00 >> 8) |
- (val & 0x00ff00ff << 8);
-#endif
- pal[regno] = val;
+ pal[regno] = big_swap(val);
}
break;
@@ -957,7 +946,6 @@ static int smtc_setcolreg(unsigned regno, unsigned red, unsigned green,
return 0;
}
-#ifdef __BIG_ENDIAN
static ssize_t smtcfb_read(struct fb_info *info, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -1002,8 +990,7 @@ static ssize_t smtcfb_read(struct fb_info *info, char __user *buf,
dst = buffer;
for (i = c >> 2; i--;) {
*dst = fb_readl(src++);
- *dst = (*dst & 0xff00ff00 >> 8) |
- (*dst & 0x00ff00ff << 8);
+ *dst = big_swap(*dst);
dst++;
}
if (c & 3) {
@@ -1091,8 +1078,7 @@ static ssize_t smtcfb_write(struct fb_info *info, const char __user *buf,
}
for (i = c >> 2; i--;) {
- fb_writel((*src & 0xff00ff00 >> 8) |
- (*src & 0x00ff00ff << 8), dst++);
+ fb_writel(big_swap(*src), dst++);
src++;
}
if (c & 3) {
@@ -1120,7 +1106,6 @@ static ssize_t smtcfb_write(struct fb_info *info, const char __user *buf,
return (cnt) ? cnt : err;
}
-#endif /* ! __BIG_ENDIAN */
static void sm7xx_set_timing(struct smtcfb_info *sfb)
{
@@ -1316,10 +1301,8 @@ static struct fb_ops smtcfb_ops = {
.fb_fillrect = cfb_fillrect,
.fb_imageblit = cfb_imageblit,
.fb_copyarea = cfb_copyarea,
-#ifdef __BIG_ENDIAN
.fb_read = smtcfb_read,
.fb_write = smtcfb_write,
-#endif
};
/*
@@ -1341,10 +1324,8 @@ static int smtc_map_smem(struct smtcfb_info *sfb,
{
sfb->fb->fix.smem_start = pci_resource_start(pdev, 0);
-#ifdef __BIG_ENDIAN
if (sfb->fb->var.bits_per_pixel == 32)
- sfb->fb->fix.smem_start += 0x800000;
-#endif
+ sfb->fb->fix.smem_start += big_addr;
sfb->fb->fix.smem_len = smem_len;
@@ -1437,10 +1418,7 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
sfb->fb->var.bits_per_pixel = SCREEN_BPP;
}
-#ifdef __BIG_ENDIAN
- if (sfb->fb->var.bits_per_pixel == 24)
- sfb->fb->var.bits_per_pixel = (smtc_scr_info.lfb_depth = 32);
-#endif
+ big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth);
/* Map address and memory detection */
mmio_base = pci_resource_start(pdev, 0);
pci_read_config_byte(pdev, PCI_REVISION_ID, &sfb->chip_rev_id);
@@ -1451,28 +1429,23 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
sfb->fb->fix.mmio_start = mmio_base + 0x00400000;
sfb->fb->fix.mmio_len = 0x00400000;
smem_size = SM712_VIDEOMEMORYSIZE;
-#ifdef __BIG_ENDIAN
- sfb->lfb = ioremap(mmio_base, 0x00c00000);
-#else
- sfb->lfb = ioremap(mmio_base, 0x00800000);
-#endif
+ sfb->lfb = ioremap(mmio_base, mmio_addr);
+ if (!sfb->lfb) {
+ dev_err(&pdev->dev,
+ "%s: unable to map memory mapped IO!\n",
+ sfb->fb->fix.id);
+ err = -ENOMEM;
+ goto failed_fb;
+ }
+
sfb->mmio = (smtc_regbaseaddress =
sfb->lfb + 0x00700000);
sfb->dp_regs = sfb->lfb + 0x00408000;
sfb->vp_regs = sfb->lfb + 0x0040c000;
-#ifdef __BIG_ENDIAN
if (sfb->fb->var.bits_per_pixel == 32) {
- sfb->lfb += 0x800000;
+ sfb->lfb += big_addr;
dev_info(&pdev->dev, "sfb->lfb=%p\n", sfb->lfb);
}
-#endif
- if (!smtc_regbaseaddress) {
- dev_err(&pdev->dev,
- "%s: unable to map memory mapped IO!\n",
- sfb->fb->fix.id);
- err = -ENOMEM;
- goto failed_fb;
- }
/* set MCLK = 14.31818 * (0x16 / 0x2) */
smtc_seqw(0x6a, 0x16);
@@ -1481,10 +1454,8 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
/* enable PCI burst */
smtc_seqw(0x17, 0x20);
/* enable word swap */
-#ifdef __BIG_ENDIAN
if (sfb->fb->var.bits_per_pixel == 32)
- smtc_seqw(0x17, 0x30);
-#endif
+ seqw17();
break;
case 0x720:
sfb->fb->fix.mmio_start = mmio_base;
@@ -1616,10 +1587,8 @@ static int smtcfb_pci_resume(struct device *device)
smtc_seqw(0x62, 0x3e);
/* enable PCI burst */
smtc_seqw(0x17, 0x20);
-#ifdef __BIG_ENDIAN
if (sfb->fb->var.bits_per_pixel == 32)
- smtc_seqw(0x17, 0x30);
-#endif
+ seqw17();
break;
case 0x720:
smtc_seqw(0x62, 0xff);
@@ -1659,14 +1628,12 @@ static struct pci_driver smtcfb_driver = {
static int __init sm712fb_init(void)
{
-#ifndef MODULE
char *option = NULL;
if (fb_get_options("sm712fb", &option))
return -ENODEV;
if (option && *option)
mode_option = option;
-#endif
sm7xx_vga_setup(mode_option);
return pci_register_driver(&smtcfb_driver);
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 3e153c06131a..93f4c902d0f9 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -656,8 +656,9 @@ static int ssd1307fb_probe(struct i2c_client *client,
bl = backlight_device_register(bl_name, &client->dev, par,
&ssd1307fb_bl_ops, NULL);
if (IS_ERR(bl)) {
- dev_err(&client->dev, "unable to register backlight device: %ld\n",
- PTR_ERR(bl));
+ ret = PTR_ERR(bl);
+ dev_err(&client->dev, "unable to register backlight device: %d\n",
+ ret);
goto bl_init_error;
}
@@ -719,7 +720,6 @@ static struct i2c_driver ssd1307fb_driver = {
.driver = {
.name = "ssd1307fb",
.of_match_table = ssd1307fb_of_match,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 86621fabbb8b..7df4228e25f0 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -64,6 +64,7 @@
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/ioport.h>
+#include <linux/io.h>
#include <asm/grfioctl.h> /* for HP-UX compatibility */
#include <asm/uaccess.h>
@@ -121,6 +122,7 @@ static int __initdata stifb_bpp_pref[MAX_STI_ROMS];
#define REG_3 0x0004a0
#define REG_4 0x000600
#define REG_6 0x000800
+#define REG_7 0x000804
#define REG_8 0x000820
#define REG_9 0x000a04
#define REG_10 0x018000
@@ -135,6 +137,8 @@ static int __initdata stifb_bpp_pref[MAX_STI_ROMS];
#define REG_21 0x200218
#define REG_22 0x0005a0
#define REG_23 0x0005c0
+#define REG_24 0x000808
+#define REG_25 0x000b00
#define REG_26 0x200118
#define REG_27 0x200308
#define REG_32 0x21003c
@@ -429,6 +433,9 @@ ARTIST_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable)
#define SET_LENXY_START_RECFILL(fb, lenxy) \
WRITE_WORD(lenxy, fb, REG_9)
+#define SETUP_COPYAREA(fb) \
+ WRITE_BYTE(0, fb, REG_16b1)
+
static void
HYPER_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable)
{
@@ -1004,6 +1011,36 @@ stifb_blank(int blank_mode, struct fb_info *info)
return 0;
}
+static void
+stifb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+{
+ struct stifb_info *fb = container_of(info, struct stifb_info, info);
+
+ SETUP_COPYAREA(fb);
+
+ SETUP_HW(fb);
+ if (fb->info.var.bits_per_pixel == 32) {
+ WRITE_WORD(0xBBA0A000, fb, REG_10);
+
+ NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xffffffff);
+ } else {
+ WRITE_WORD(fb->id == S9000_ID_HCRX ? 0x13a02000 : 0x13a01000, fb, REG_10);
+
+ NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xff);
+ }
+
+ NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb,
+ IBOvals(RopSrc, MaskAddrOffset(0),
+ BitmapExtent08, StaticReg(1),
+ DataDynamic, MaskOtc, BGx(0), FGx(0)));
+
+ WRITE_WORD(((area->sx << 16) | area->sy), fb, REG_24);
+ WRITE_WORD(((area->width << 16) | area->height), fb, REG_7);
+ WRITE_WORD(((area->dx << 16) | area->dy), fb, REG_25);
+
+ SETUP_FB(fb);
+}
+
static void __init
stifb_init_display(struct stifb_info *fb)
{
@@ -1069,7 +1106,7 @@ static struct fb_ops stifb_ops = {
.fb_setcolreg = stifb_setcolreg,
.fb_blank = stifb_blank,
.fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
+ .fb_copyarea = stifb_copyarea,
.fb_imageblit = cfb_imageblit,
};
@@ -1258,7 +1295,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
info->fbops = &stifb_ops;
info->screen_base = ioremap_nocache(REGION_BASE(fb,1), fix->smem_len);
info->screen_size = fix->smem_len;
- info->flags = FBINFO_DEFAULT;
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA;
info->pseudo_palette = &fb->pseudo_palette;
/* This has to be done !!! */
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index ff2b8731a2dc..e9c2f7ba3c8e 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -279,7 +279,7 @@ static int dlfb_set_video_mode(struct dlfb_data *dev,
{
char *buf;
char *wrptr;
- int retval = 0;
+ int retval;
int writesize;
struct urb *urb;
@@ -1505,8 +1505,7 @@ static int dlfb_parse_vendor_descriptor(struct dlfb_data *dev,
char *desc;
char *buf;
char *desc_end;
-
- int total_len = 0;
+ int total_len;
buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
if (!buf)
@@ -1582,7 +1581,7 @@ static int dlfb_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *usbdev;
- struct dlfb_data *dev = NULL;
+ struct dlfb_data *dev;
int retval = -ENOMEM;
/* usb initialization */
@@ -1665,7 +1664,6 @@ static void dlfb_init_framebuffer_work(struct work_struct *work)
/* allocates framebuffer driver structure, not framebuffer memory */
info = framebuffer_alloc(0, dev->gdev);
if (!info) {
- retval = -ENOMEM;
pr_err("framebuffer_alloc failed\n");
goto error;
}
@@ -1912,7 +1910,7 @@ static int dlfb_alloc_urb_list(struct dlfb_data *dev, int count, size_t size)
static struct urb *dlfb_get_urb(struct dlfb_data *dev)
{
- int ret = 0;
+ int ret;
struct list_head *entry;
struct urb_node *unode;
struct urb *urb = NULL;
diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c
index 70a897b1e458..b9c2f81fb6b9 100644
--- a/drivers/video/fbdev/vfb.c
+++ b/drivers/video/fbdev/vfb.c
@@ -51,7 +51,14 @@ static void *rvmalloc(unsigned long size)
if (!mem)
return NULL;
- memset(mem, 0, size); /* Clear the ram out, no junk to the user */
+ /*
+ * VFB must clear memory to prevent kernel info
+ * leakage into userspace
+ * VGA-based drivers MUST NOT clear memory if
+ * they want to be able to take over vgacon
+ */
+
+ memset(mem, 0, size);
adr = (unsigned long) mem;
while (size > 0) {
SetPageReserved(vmalloc_to_page((void *)adr));
@@ -490,14 +497,6 @@ static int vfb_probe(struct platform_device *dev)
if (!(videomemory = rvmalloc(videomemorysize)))
return retval;
- /*
- * VFB must clear memory to prevent kernel info
- * leakage into userspace
- * VGA-based drivers MUST NOT clear memory if
- * they want to be able to take over vgacon
- */
- memset(videomemory, 0, videomemorysize);
-
info = framebuffer_alloc(sizeof(u32) * 256, &dev->dev);
if (!info)
goto err;
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index 8bac309c24b9..dd0f18e42d3e 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -26,13 +26,9 @@
#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
#include <video/vga.h>
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
-
struct vt8623fb_info {
char __iomem *mmio_base;
- int mtrr_reg;
+ int wc_cookie;
struct vgastate state;
struct mutex open_lock;
unsigned int ref_count;
@@ -99,10 +95,7 @@ static struct svga_timing_regs vt8623_timing_regs = {
/* Module parameters */
static char *mode_option = "640x480-8@60";
-
-#ifdef CONFIG_MTRR
static int mtrr = 1;
-#endif
MODULE_AUTHOR("(c) 2006 Ondrej Zajicek <santiago@crfreenet.org>");
MODULE_LICENSE("GPL");
@@ -112,11 +105,8 @@ module_param(mode_option, charp, 0644);
MODULE_PARM_DESC(mode_option, "Default video mode ('640x480-8@60', etc)");
module_param_named(mode, mode_option, charp, 0);
MODULE_PARM_DESC(mode, "Default video mode e.g. '648x480-8@60' (deprecated)");
-
-#ifdef CONFIG_MTRR
module_param(mtrr, int, 0444);
MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
-#endif
/* ------------------------------------------------------------------------- */
@@ -710,7 +700,7 @@ static int vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
info->fix.mmio_len = pci_resource_len(dev, 1);
/* Map physical IO memory address into kernel space */
- info->screen_base = pci_iomap(dev, 0, 0);
+ info->screen_base = pci_iomap_wc(dev, 0, 0);
if (! info->screen_base) {
rc = -ENOMEM;
dev_err(info->device, "iomap for framebuffer failed\n");
@@ -781,12 +771,9 @@ static int vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Record a reference to the driver data */
pci_set_drvdata(dev, info);
-#ifdef CONFIG_MTRR
- if (mtrr) {
- par->mtrr_reg = -1;
- par->mtrr_reg = mtrr_add(info->fix.smem_start, info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
- }
-#endif
+ if (mtrr)
+ par->wc_cookie = arch_phys_wc_add(info->fix.smem_start,
+ info->fix.smem_len);
return 0;
@@ -816,13 +803,7 @@ static void vt8623_pci_remove(struct pci_dev *dev)
if (info) {
struct vt8623fb_info *par = info->par;
-#ifdef CONFIG_MTRR
- if (par->mtrr_reg >= 0) {
- mtrr_del(par->mtrr_reg, 0, 0);
- par->mtrr_reg = -1;
- }
-#endif
-
+ arch_phys_wc_del(par->wc_cookie);
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index 09dc44736c1a..0567d517eed3 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -46,7 +46,7 @@ struct xenfb_info {
int nr_pages;
int irq;
struct xenfb_page *page;
- unsigned long *mfns;
+ unsigned long *gfns;
int update_wanted; /* XENFB_TYPE_UPDATE wanted */
int feature_resize; /* XENFB_TYPE_RESIZE ok */
struct xenfb_resize resize; /* protected by resize_lock */
@@ -402,8 +402,8 @@ static int xenfb_probe(struct xenbus_device *dev,
info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
- if (!info->mfns)
+ info->gfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
+ if (!info->gfns)
goto error_nomem;
/* set up shared page */
@@ -530,29 +530,29 @@ static int xenfb_remove(struct xenbus_device *dev)
framebuffer_release(info->fb_info);
}
free_page((unsigned long)info->page);
- vfree(info->mfns);
+ vfree(info->gfns);
vfree(info->fb);
kfree(info);
return 0;
}
-static unsigned long vmalloc_to_mfn(void *address)
+static unsigned long vmalloc_to_gfn(void *address)
{
- return pfn_to_mfn(vmalloc_to_pfn(address));
+ return xen_page_to_gfn(vmalloc_to_page(address));
}
static void xenfb_init_shared_page(struct xenfb_info *info,
struct fb_info *fb_info)
{
int i;
- int epd = PAGE_SIZE / sizeof(info->mfns[0]);
+ int epd = PAGE_SIZE / sizeof(info->gfns[0]);
for (i = 0; i < info->nr_pages; i++)
- info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
+ info->gfns[i] = vmalloc_to_gfn(info->fb + i * PAGE_SIZE);
for (i = 0; i * epd < info->nr_pages; i++)
- info->page->pd[i] = vmalloc_to_mfn(&info->mfns[i * epd]);
+ info->page->pd[i] = vmalloc_to_gfn(&info->gfns[i * epd]);
info->page->width = fb_info->var.xres;
info->page->height = fb_info->var.yres;
@@ -586,7 +586,7 @@ static int xenfb_connect_backend(struct xenbus_device *dev,
goto unbind_irq;
}
ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
- virt_to_mfn(info->page));
+ virt_to_gfn(info->page));
if (ret)
goto error_xenbus;
ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
diff --git a/drivers/video/of_videomode.c b/drivers/video/of_videomode.c
index 111c2d1911d3..b5102aa6090d 100644
--- a/drivers/video/of_videomode.c
+++ b/drivers/video/of_videomode.c
@@ -44,11 +44,9 @@ int of_get_videomode(struct device_node *np, struct videomode *vm,
index = disp->native_mode;
ret = videomode_from_timings(disp, vm, index);
- if (ret)
- return ret;
display_timings_release(disp);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(of_get_videomode);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 82e80e034f25..7efc32945810 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -157,7 +157,9 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
}
set_page_pfns(vb->pfns + vb->num_pfns, page);
vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
- adjust_managed_page_count(page, -1);
+ if (!virtio_has_feature(vb->vdev,
+ VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
+ adjust_managed_page_count(page, -1);
}
/* Did we get any? */
@@ -166,14 +168,16 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
mutex_unlock(&vb->balloon_lock);
}
-static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
+static void release_pages_balloon(struct virtio_balloon *vb)
{
unsigned int i;
/* Find pfns pointing at start of each page, get pages and free them. */
- for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
- struct page *page = balloon_pfn_to_page(pfns[i]);
- adjust_managed_page_count(page, 1);
+ for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
+ struct page *page = balloon_pfn_to_page(vb->pfns[i]);
+ if (!virtio_has_feature(vb->vdev,
+ VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
+ adjust_managed_page_count(page, 1);
put_page(page); /* balloon reference */
}
}
@@ -206,7 +210,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
if (vb->num_pfns != 0)
tell_host(vb, vb->deflate_vq);
mutex_unlock(&vb->balloon_lock);
- release_pages_by_pfn(vb->pfns, vb->num_pfns);
+ release_pages_balloon(vb);
return num_freed_pages;
}
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
index 60e2a1677563..c96944b59856 100644
--- a/drivers/virtio/virtio_input.c
+++ b/drivers/virtio/virtio_input.c
@@ -313,6 +313,7 @@ err_init_vq:
static void virtinput_remove(struct virtio_device *vdev)
{
struct virtio_input *vi = vdev->priv;
+ void *buf;
unsigned long flags;
spin_lock_irqsave(&vi->lock, flags);
@@ -320,6 +321,9 @@ static void virtinput_remove(struct virtio_device *vdev)
spin_unlock_irqrestore(&vi->lock, flags);
input_unregister_device(vi->idev);
+ vdev->config->reset(vdev);
+ while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL)
+ kfree(buf);
vdev->config->del_vqs(vdev);
kfree(vi);
}
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 10189b5b627f..f499d9da7237 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -58,6 +58,7 @@
#define pr_fmt(fmt) "virtio-mmio: " fmt
+#include <linux/acpi.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -732,12 +733,21 @@ static struct of_device_id virtio_mmio_match[] = {
};
MODULE_DEVICE_TABLE(of, virtio_mmio_match);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id virtio_mmio_acpi_match[] = {
+ { "LNRO0005", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, virtio_mmio_acpi_match);
+#endif
+
static struct platform_driver virtio_mmio_driver = {
.probe = virtio_mmio_probe,
.remove = virtio_mmio_remove,
.driver = {
.name = "virtio-mmio",
.of_match_table = virtio_mmio_match,
+ .acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match),
},
};
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index a674409edfb3..b05e8fefbabd 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -97,7 +97,6 @@ MODULE_DEVICE_TABLE(i2c, ds2482_id);
static struct i2c_driver ds2482_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "ds2482",
},
.probe = ds2482_probe,
diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c
index d8667b0212d7..3749db8b4396 100644
--- a/drivers/w1/masters/matrox_w1.c
+++ b/drivers/w1/masters/matrox_w1.c
@@ -40,7 +40,7 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
-MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire prtocol) over VGA DDC(matrox gpio).");
+MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire protocol) over VGA DDC(matrox gpio).");
static struct pci_device_id matrox_w1_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G400) },
@@ -232,16 +232,4 @@ static void matrox_w1_remove(struct pci_dev *pdev)
}
kfree(dev);
}
-
-static int __init matrox_w1_init(void)
-{
- return pci_register_driver(&matrox_w1_pci_driver);
-}
-
-static void __exit matrox_w1_fini(void)
-{
- pci_unregister_driver(&matrox_w1_pci_driver);
-}
-
-module_init(matrox_w1_init);
-module_exit(matrox_w1_fini);
+module_pci_driver(matrox_w1_pci_driver);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 241fafde42cb..c68edc16aa54 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -188,6 +188,15 @@ config AT91SAM9X_WATCHDOG
Watchdog timer embedded into AT91SAM9X and AT91CAP9 chips. This will
reboot your system when the timeout is reached.
+config SAMA5D4_WATCHDOG
+ tristate "Atmel SAMA5D4 Watchdog Timer"
+ depends on ARCH_AT91
+ select WATCHDOG_CORE
+ help
+ Atmel SAMA5D4 watchdog timer is embedded into SAMA5D4 chips.
+ Its Watchdog Timer Mode Register can be written more than once.
+ This will reboot your system when the timeout is reached.
+
config CADENCE_WATCHDOG
tristate "Cadence Watchdog Timer"
depends on HAS_IOMEM
@@ -558,6 +567,17 @@ config DIGICOLOR_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called digicolor_wdt.
+config LPC18XX_WATCHDOG
+ tristate "LPC18xx/43xx Watchdog"
+ depends on ARCH_LPC18XX || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ Say Y here if to include support for the watchdog timer
+ in NXP LPC SoCs family, which includes LPC18xx/LPC43xx
+ processors.
+ To compile this driver as a module, choose M here: the
+ module will be called lpc18xx_wdt.
+
# AVR32 Architecture
config AT32AP700X_WDT
@@ -797,7 +817,8 @@ config ITCO_WDT
tristate "Intel TCO Timer/Watchdog"
depends on (X86 || IA64) && PCI
select WATCHDOG_CORE
- select LPC_ICH
+ select LPC_ICH if !EXPERT
+ select I2C_I801 if !EXPERT
---help---
Hardware driver for the intel TCO timer based watchdog devices.
These drivers are included in the Intel 82801 I/O Controller
@@ -1333,7 +1354,7 @@ config MPC5200_WDT
config 8xxx_WDT
tristate "MPC8xxx Platform Watchdog Timer"
- depends on PPC_8xx || PPC_83xx || PPC_86xx
+ depends on PPC_8xx || PPC_83xx || PPC_86xx || PPC_MPC512x
select WATCHDOG_CORE
help
This driver is for a SoC level watchdog that exists on some
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 59ea9a1b8e76..0c616e3f67bb 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_IXP4XX_WATCHDOG) += ixp4xx_wdt.o
obj-$(CONFIG_KS8695_WATCHDOG) += ks8695_wdt.o
obj-$(CONFIG_S3C2410_WATCHDOG) += s3c2410_wdt.o
obj-$(CONFIG_SA1100_WATCHDOG) += sa1100_wdt.o
+obj-$(CONFIG_SAMA5D4_WATCHDOG) += sama5d4_wdt.o
obj-$(CONFIG_DW_WATCHDOG) += dw_wdt.o
obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o
obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o
@@ -66,6 +67,7 @@ obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o
obj-$(CONFIG_MESON_WATCHDOG) += meson_wdt.o
obj-$(CONFIG_MEDIATEK_WATCHDOG) += mtk_wdt.o
obj-$(CONFIG_DIGICOLOR_WATCHDOG) += digicolor_wdt.o
+obj-$(CONFIG_LPC18XX_WATCHDOG) += lpc18xx_wdt.o
# AVR32 Architecture
obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index 41cecb55766c..e12a797cb820 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -244,7 +244,7 @@ static int at91wdt_probe(struct platform_device *pdev)
}
regmap_st = syscon_node_to_regmap(parent->of_node);
- if (!regmap_st)
+ if (IS_ERR(regmap_st))
return -ENODEV;
res = misc_register(&at91wdt_miscdev);
@@ -269,9 +269,8 @@ static int at91wdt_remove(struct platform_device *pdev)
if (res)
dev_warn(dev, "failed to unregister restart handler\n");
- res = misc_deregister(&at91wdt_miscdev);
- if (!res)
- at91wdt_miscdev.parent = NULL;
+ misc_deregister(&at91wdt_miscdev);
+ at91wdt_miscdev.parent = NULL;
return res;
}
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index e4698f7c5f93..7e6acaf3ece4 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -17,6 +17,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -90,6 +91,7 @@ struct at91wdt {
unsigned long heartbeat; /* WDT heartbeat in jiffies */
bool nowayout;
unsigned int irq;
+ struct clk *sclk;
};
/* ......................................................................... */
@@ -352,15 +354,25 @@ static int __init at91wdt_probe(struct platform_device *pdev)
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
+ wdt->sclk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(wdt->sclk))
+ return PTR_ERR(wdt->sclk);
+
+ err = clk_prepare_enable(wdt->sclk);
+ if (err) {
+ dev_err(&pdev->dev, "Could not enable slow clock\n");
+ return err;
+ }
+
if (pdev->dev.of_node) {
err = of_at91wdt_init(pdev->dev.of_node, wdt);
if (err)
- return err;
+ goto err_clk;
}
err = at91_wdt_init(pdev, wdt);
if (err)
- return err;
+ goto err_clk;
platform_set_drvdata(pdev, wdt);
@@ -368,6 +380,11 @@ static int __init at91wdt_probe(struct platform_device *pdev)
wdt->wdd.timeout, wdt->nowayout);
return 0;
+
+err_clk:
+ clk_disable_unprepare(wdt->sclk);
+
+ return err;
}
static int __exit at91wdt_remove(struct platform_device *pdev)
@@ -377,6 +394,7 @@ static int __exit at91wdt_remove(struct platform_device *pdev)
pr_warn("I quit now, hardware will probably reboot!\n");
del_timer(&wdt->timer);
+ clk_disable_unprepare(wdt->sclk);
return 0;
}
diff --git a/drivers/watchdog/at91sam9_wdt.h b/drivers/watchdog/at91sam9_wdt.h
index c6fbb2e6c41b..b79a83b467ce 100644
--- a/drivers/watchdog/at91sam9_wdt.h
+++ b/drivers/watchdog/at91sam9_wdt.h
@@ -22,11 +22,13 @@
#define AT91_WDT_MR 0x04 /* Watchdog Mode Register */
#define AT91_WDT_WDV (0xfff << 0) /* Counter Value */
+#define AT91_WDT_SET_WDV(x) ((x) & AT91_WDT_WDV)
#define AT91_WDT_WDFIEN (1 << 12) /* Fault Interrupt Enable */
#define AT91_WDT_WDRSTEN (1 << 13) /* Reset Processor */
#define AT91_WDT_WDRPROC (1 << 14) /* Timer Restart */
#define AT91_WDT_WDDIS (1 << 15) /* Watchdog Disable */
#define AT91_WDT_WDD (0xfff << 16) /* Delta Value */
+#define AT91_WDT_SET_WDD(x) (((x) << 16) & AT91_WDT_WDD)
#define AT91_WDT_WDDBGHLT (1 << 28) /* Debug Halt */
#define AT91_WDT_WDIDLEHLT (1 << 29) /* Idle Halt */
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index 7116968dee12..66c3e656a616 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -182,6 +182,7 @@ static int bcm2835_wdt_probe(struct platform_device *pdev)
watchdog_set_drvdata(&bcm2835_wdt_wdd, wdt);
watchdog_init_timeout(&bcm2835_wdt_wdd, heartbeat, dev);
watchdog_set_nowayout(&bcm2835_wdt_wdd, nowayout);
+ bcm2835_wdt_wdd.parent = &pdev->dev;
err = watchdog_register_device(&bcm2835_wdt_wdd);
if (err) {
dev_err(dev, "Failed to register watchdog device");
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index b28a072abf78..4064a43f1360 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -209,6 +209,7 @@ static int bcm47xx_wdt_probe(struct platform_device *pdev)
wdt->wdd.info = &bcm47xx_wdt_info;
wdt->wdd.timeout = WDT_DEFAULT_TIME;
+ wdt->wdd.parent = &pdev->dev;
ret = wdt->wdd.ops->set_timeout(&wdt->wdd, timeout);
if (ret)
goto err_timer;
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
index 22d8ae65772a..e0c98423f2c9 100644
--- a/drivers/watchdog/bcm_kona_wdt.c
+++ b/drivers/watchdog/bcm_kona_wdt.c
@@ -319,6 +319,7 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
spin_lock_init(&wdt->lock);
platform_set_drvdata(pdev, wdt);
watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
+ bcm_kona_wdt_wdd.parent = &pdev->dev;
ret = bcm_kona_wdt_set_timeout_reg(&bcm_kona_wdt_wdd, 0);
if (ret) {
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index e96b09b135c8..04da4b66c75e 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -186,8 +186,6 @@ static int booke_wdt_stop(struct watchdog_device *wdog)
static int booke_wdt_set_timeout(struct watchdog_device *wdt_dev,
unsigned int timeout)
{
- if (timeout > MAX_WDT_TIMEOUT)
- return -EINVAL;
wdt_dev->timeout = timeout;
booke_wdt_set(wdt_dev);
@@ -211,7 +209,6 @@ static struct watchdog_device booke_wdt_dev = {
.info = &booke_wdt_info,
.ops = &booke_wdt_ops,
.min_timeout = 1,
- .max_timeout = 0xFFFF
};
static void __exit booke_wdt_exit(void)
@@ -229,6 +226,7 @@ static int __init booke_wdt_init(void)
booke_wdt_set_timeout(&booke_wdt_dev,
period_to_sec(booke_wdt_period));
watchdog_set_nowayout(&booke_wdt_dev, nowayout);
+ booke_wdt_dev.max_timeout = MAX_WDT_TIMEOUT;
if (booke_wdt_enabled)
booke_wdt_start(&booke_wdt_dev);
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index ce12f437f195..a099b77fc0b9 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -358,6 +358,7 @@ static int __init coh901327_probe(struct platform_device *pdev)
if (ret < 0)
coh901327_wdt.timeout = 60;
+ coh901327_wdt.parent = &pdev->dev;
ret = watchdog_register_device(&coh901327_wdt);
if (ret == 0)
dev_info(&pdev->dev,
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
index 2e9589652e1e..67e67977bd29 100644
--- a/drivers/watchdog/da9052_wdt.c
+++ b/drivers/watchdog/da9052_wdt.c
@@ -195,6 +195,7 @@ static int da9052_wdt_probe(struct platform_device *pdev)
da9052_wdt->timeout = DA9052_DEF_TIMEOUT;
da9052_wdt->info = &da9052_wdt_info;
da9052_wdt->ops = &da9052_wdt_ops;
+ da9052_wdt->parent = &pdev->dev;
watchdog_set_drvdata(da9052_wdt, driver_data);
kref_init(&driver_data->kref);
diff --git a/drivers/watchdog/da9055_wdt.c b/drivers/watchdog/da9055_wdt.c
index 495089d8dbfe..04d1430d93d2 100644
--- a/drivers/watchdog/da9055_wdt.c
+++ b/drivers/watchdog/da9055_wdt.c
@@ -161,6 +161,7 @@ static int da9055_wdt_probe(struct platform_device *pdev)
da9055_wdt->timeout = DA9055_DEF_TIMEOUT;
da9055_wdt->info = &da9055_wdt_info;
da9055_wdt->ops = &da9055_wdt_ops;
+ da9055_wdt->parent = &pdev->dev;
watchdog_set_nowayout(da9055_wdt, nowayout);
watchdog_set_drvdata(da9055_wdt, driver_data);
diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
index b3a870ce85be..7386111220d5 100644
--- a/drivers/watchdog/da9062_wdt.c
+++ b/drivers/watchdog/da9062_wdt.c
@@ -210,6 +210,7 @@ static int da9062_wdt_probe(struct platform_device *pdev)
wdt->wdtdev.max_timeout = DA9062_WDT_MAX_TIMEOUT;
wdt->wdtdev.timeout = DA9062_WDG_DEFAULT_TIMEOUT;
wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
+ wdt->wdtdev.parent = &pdev->dev;
watchdog_set_drvdata(&wdt->wdtdev, wdt);
dev_set_drvdata(&pdev->dev, wdt);
diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
index e2fe2ebdebd4..6bf130bd863d 100644
--- a/drivers/watchdog/da9063_wdt.c
+++ b/drivers/watchdog/da9063_wdt.c
@@ -175,6 +175,7 @@ static int da9063_wdt_probe(struct platform_device *pdev)
wdt->wdtdev.min_timeout = DA9063_WDT_MIN_TIMEOUT;
wdt->wdtdev.max_timeout = DA9063_WDT_MAX_TIMEOUT;
wdt->wdtdev.timeout = DA9063_WDG_TIMEOUT;
+ wdt->wdtdev.parent = &pdev->dev;
wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index cfdf8a408aea..17454ca653f4 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -179,6 +179,7 @@ static int davinci_wdt_probe(struct platform_device *pdev)
wdd->min_timeout = 1;
wdd->max_timeout = MAX_HEARTBEAT;
wdd->timeout = DEFAULT_HEARTBEAT;
+ wdd->parent = &pdev->dev;
watchdog_init_timeout(wdd, heartbeat, dev);
diff --git a/drivers/watchdog/digicolor_wdt.c b/drivers/watchdog/digicolor_wdt.c
index 31d8e4936611..50abe1bf62a5 100644
--- a/drivers/watchdog/digicolor_wdt.c
+++ b/drivers/watchdog/digicolor_wdt.c
@@ -143,6 +143,7 @@ static int dc_wdt_probe(struct platform_device *pdev)
}
dc_wdt_wdd.max_timeout = U32_MAX / clk_get_rate(wdt->clk);
dc_wdt_wdd.timeout = dc_wdt_wdd.max_timeout;
+ dc_wdt_wdd.parent = &pdev->dev;
spin_lock_init(&wdt->lock);
diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c
index 7a2cc7191c58..0a4d7cc05d54 100644
--- a/drivers/watchdog/ep93xx_wdt.c
+++ b/drivers/watchdog/ep93xx_wdt.c
@@ -132,6 +132,7 @@ static int ep93xx_wdt_probe(struct platform_device *pdev)
val = readl(mmio_base + EP93XX_WATCHDOG);
ep93xx_wdt_wdd.bootstatus = (val & 0x01) ? WDIOF_CARDRESET : 0;
ep93xx_wdt_wdd.timeout = timeout;
+ ep93xx_wdt_wdd.parent = &pdev->dev;
watchdog_set_nowayout(&ep93xx_wdt_wdd, nowayout);
diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c
index 1687cc2d7122..90d59d3f38a3 100644
--- a/drivers/watchdog/gpio_wdt.c
+++ b/drivers/watchdog/gpio_wdt.c
@@ -50,12 +50,41 @@ static void gpio_wdt_disable(struct gpio_wdt_priv *priv)
gpio_direction_input(priv->gpio);
}
+static void gpio_wdt_hwping(unsigned long data)
+{
+ struct watchdog_device *wdd = (struct watchdog_device *)data;
+ struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+ if (priv->armed && time_after(jiffies, priv->last_jiffies +
+ msecs_to_jiffies(wdd->timeout * 1000))) {
+ dev_crit(wdd->dev, "Timer expired. System will reboot soon!\n");
+ return;
+ }
+
+ /* Restart timer */
+ mod_timer(&priv->timer, jiffies + priv->hw_margin);
+
+ switch (priv->hw_algo) {
+ case HW_ALGO_TOGGLE:
+ /* Toggle output pin */
+ priv->state = !priv->state;
+ gpio_set_value_cansleep(priv->gpio, priv->state);
+ break;
+ case HW_ALGO_LEVEL:
+ /* Pulse */
+ gpio_set_value_cansleep(priv->gpio, !priv->active_low);
+ udelay(1);
+ gpio_set_value_cansleep(priv->gpio, priv->active_low);
+ break;
+ }
+}
+
static void gpio_wdt_start_impl(struct gpio_wdt_priv *priv)
{
priv->state = priv->active_low;
gpio_direction_output(priv->gpio, priv->state);
priv->last_jiffies = jiffies;
- mod_timer(&priv->timer, priv->last_jiffies + priv->hw_margin);
+ gpio_wdt_hwping((unsigned long)&priv->wdd);
}
static int gpio_wdt_start(struct watchdog_device *wdd)
@@ -97,35 +126,6 @@ static int gpio_wdt_set_timeout(struct watchdog_device *wdd, unsigned int t)
return gpio_wdt_ping(wdd);
}
-static void gpio_wdt_hwping(unsigned long data)
-{
- struct watchdog_device *wdd = (struct watchdog_device *)data;
- struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
-
- if (priv->armed && time_after(jiffies, priv->last_jiffies +
- msecs_to_jiffies(wdd->timeout * 1000))) {
- dev_crit(wdd->dev, "Timer expired. System will reboot soon!\n");
- return;
- }
-
- /* Restart timer */
- mod_timer(&priv->timer, jiffies + priv->hw_margin);
-
- switch (priv->hw_algo) {
- case HW_ALGO_TOGGLE:
- /* Toggle output pin */
- priv->state = !priv->state;
- gpio_set_value_cansleep(priv->gpio, priv->state);
- break;
- case HW_ALGO_LEVEL:
- /* Pulse */
- gpio_set_value_cansleep(priv->gpio, !priv->active_low);
- udelay(1);
- gpio_set_value_cansleep(priv->gpio, priv->active_low);
- break;
- }
-}
-
static int gpio_wdt_notify_sys(struct notifier_block *nb, unsigned long code,
void *unused)
{
@@ -182,10 +182,10 @@ static int gpio_wdt_probe(struct platform_device *pdev)
ret = of_property_read_string(pdev->dev.of_node, "hw_algo", &algo);
if (ret)
return ret;
- if (!strncmp(algo, "toggle", 6)) {
+ if (!strcmp(algo, "toggle")) {
priv->hw_algo = HW_ALGO_TOGGLE;
f = GPIOF_IN;
- } else if (!strncmp(algo, "level", 5)) {
+ } else if (!strcmp(algo, "level")) {
priv->hw_algo = HW_ALGO_LEVEL;
f = priv->active_low ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
} else {
@@ -217,6 +217,7 @@ static int gpio_wdt_probe(struct platform_device *pdev)
priv->wdd.ops = &gpio_wdt_ops;
priv->wdd.min_timeout = SOFT_TIMEOUT_MIN;
priv->wdd.max_timeout = SOFT_TIMEOUT_MAX;
+ priv->wdd.parent = &pdev->dev;
if (watchdog_init_timeout(&priv->wdd, 0, &pdev->dev) < 0)
priv->wdd.timeout = SOFT_TIMEOUT_DEF;
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 3c3fd417ddeb..0acc6c5f729d 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -66,8 +66,7 @@
#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
#include <linux/io.h> /* For inb/outb/... */
-#include <linux/mfd/core.h>
-#include <linux/mfd/lpc_ich.h>
+#include <linux/platform_data/itco_wdt.h>
#include "iTCO_vendor.h"
@@ -146,59 +145,67 @@ static inline unsigned int ticks_to_seconds(int ticks)
return iTCO_wdt_private.iTCO_version == 3 ? ticks : (ticks * 6) / 10;
}
+static inline u32 no_reboot_bit(void)
+{
+ u32 enable_bit;
+
+ switch (iTCO_wdt_private.iTCO_version) {
+ case 3:
+ enable_bit = 0x00000010;
+ break;
+ case 2:
+ enable_bit = 0x00000020;
+ break;
+ case 4:
+ case 1:
+ default:
+ enable_bit = 0x00000002;
+ break;
+ }
+
+ return enable_bit;
+}
+
static void iTCO_wdt_set_NO_REBOOT_bit(void)
{
u32 val32;
/* Set the NO_REBOOT bit: this disables reboots */
- if (iTCO_wdt_private.iTCO_version == 3) {
- val32 = readl(iTCO_wdt_private.gcs_pmc);
- val32 |= 0x00000010;
- writel(val32, iTCO_wdt_private.gcs_pmc);
- } else if (iTCO_wdt_private.iTCO_version == 2) {
+ if (iTCO_wdt_private.iTCO_version >= 2) {
val32 = readl(iTCO_wdt_private.gcs_pmc);
- val32 |= 0x00000020;
+ val32 |= no_reboot_bit();
writel(val32, iTCO_wdt_private.gcs_pmc);
} else if (iTCO_wdt_private.iTCO_version == 1) {
pci_read_config_dword(iTCO_wdt_private.pdev, 0xd4, &val32);
- val32 |= 0x00000002;
+ val32 |= no_reboot_bit();
pci_write_config_dword(iTCO_wdt_private.pdev, 0xd4, val32);
}
}
static int iTCO_wdt_unset_NO_REBOOT_bit(void)
{
- int ret = 0;
- u32 val32;
+ u32 enable_bit = no_reboot_bit();
+ u32 val32 = 0;
/* Unset the NO_REBOOT bit: this enables reboots */
- if (iTCO_wdt_private.iTCO_version == 3) {
- val32 = readl(iTCO_wdt_private.gcs_pmc);
- val32 &= 0xffffffef;
- writel(val32, iTCO_wdt_private.gcs_pmc);
-
- val32 = readl(iTCO_wdt_private.gcs_pmc);
- if (val32 & 0x00000010)
- ret = -EIO;
- } else if (iTCO_wdt_private.iTCO_version == 2) {
+ if (iTCO_wdt_private.iTCO_version >= 2) {
val32 = readl(iTCO_wdt_private.gcs_pmc);
- val32 &= 0xffffffdf;
+ val32 &= ~enable_bit;
writel(val32, iTCO_wdt_private.gcs_pmc);
val32 = readl(iTCO_wdt_private.gcs_pmc);
- if (val32 & 0x00000020)
- ret = -EIO;
} else if (iTCO_wdt_private.iTCO_version == 1) {
pci_read_config_dword(iTCO_wdt_private.pdev, 0xd4, &val32);
- val32 &= 0xfffffffd;
+ val32 &= ~enable_bit;
pci_write_config_dword(iTCO_wdt_private.pdev, 0xd4, val32);
pci_read_config_dword(iTCO_wdt_private.pdev, 0xd4, &val32);
- if (val32 & 0x00000002)
- ret = -EIO;
}
- return ret; /* returns: 0 = OK, -EIO = Error */
+ if (val32 & enable_bit)
+ return -EIO;
+
+ return 0;
}
static int iTCO_wdt_start(struct watchdog_device *wd_dev)
@@ -418,9 +425,9 @@ static int iTCO_wdt_probe(struct platform_device *dev)
{
int ret = -ENODEV;
unsigned long val32;
- struct lpc_ich_info *ich_info = dev_get_platdata(&dev->dev);
+ struct itco_wdt_platform_data *pdata = dev_get_platdata(&dev->dev);
- if (!ich_info)
+ if (!pdata)
goto out;
spin_lock_init(&iTCO_wdt_private.io_lock);
@@ -435,7 +442,7 @@ static int iTCO_wdt_probe(struct platform_device *dev)
if (!iTCO_wdt_private.smi_res)
goto out;
- iTCO_wdt_private.iTCO_version = ich_info->iTCO_version;
+ iTCO_wdt_private.iTCO_version = pdata->version;
iTCO_wdt_private.dev = dev;
iTCO_wdt_private.pdev = to_pci_dev(dev->dev.parent);
@@ -501,15 +508,24 @@ static int iTCO_wdt_probe(struct platform_device *dev)
}
pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04llx)\n",
- ich_info->name, ich_info->iTCO_version, (u64)TCOBASE);
+ pdata->name, pdata->version, (u64)TCOBASE);
/* Clear out the (probably old) status */
- if (iTCO_wdt_private.iTCO_version == 3) {
+ switch (iTCO_wdt_private.iTCO_version) {
+ case 4:
+ outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */
+ outw(0x0002, TCO2_STS); /* Clear SECOND_TO_STS bit */
+ break;
+ case 3:
outl(0x20008, TCO1_STS);
- } else {
+ break;
+ case 2:
+ case 1:
+ default:
outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */
outw(0x0002, TCO2_STS); /* Clear SECOND_TO_STS bit */
outw(0x0004, TCO2_STS); /* Clear BOOT_STS bit */
+ break;
}
iTCO_wdt_watchdog_dev.bootstatus = 0;
diff --git a/drivers/watchdog/ie6xx_wdt.c b/drivers/watchdog/ie6xx_wdt.c
index 9bc39ae51624..78c2541f5d52 100644
--- a/drivers/watchdog/ie6xx_wdt.c
+++ b/drivers/watchdog/ie6xx_wdt.c
@@ -267,6 +267,7 @@ static int ie6xx_wdt_probe(struct platform_device *pdev)
ie6xx_wdt_dev.timeout = timeout;
watchdog_set_nowayout(&ie6xx_wdt_dev, nowayout);
+ ie6xx_wdt_dev.parent = &pdev->dev;
spin_lock_init(&ie6xx_wdt_data.unlock_sequence);
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c
index 0f73621827ab..15ab07230960 100644
--- a/drivers/watchdog/imgpdc_wdt.c
+++ b/drivers/watchdog/imgpdc_wdt.c
@@ -316,6 +316,7 @@ static int pdc_wdt_remove(struct platform_device *pdev)
{
struct pdc_wdt_dev *pdc_wdt = platform_get_drvdata(pdev);
+ unregister_restart_handler(&pdc_wdt->restart_handler);
pdc_wdt_stop(&pdc_wdt->wdt_dev);
watchdog_unregister_device(&pdc_wdt->wdt_dev);
clk_disable_unprepare(pdc_wdt->wdt_clk);
diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
index 84f6701c391f..0a436b5d1e84 100644
--- a/drivers/watchdog/intel-mid_wdt.c
+++ b/drivers/watchdog/intel-mid_wdt.c
@@ -137,6 +137,7 @@ static int mid_wdt_probe(struct platform_device *pdev)
wdt_dev->min_timeout = MID_WDT_TIMEOUT_MIN;
wdt_dev->max_timeout = MID_WDT_TIMEOUT_MAX;
wdt_dev->timeout = MID_WDT_DEFAULT_TIMEOUT;
+ wdt_dev->parent = &pdev->dev;
watchdog_set_drvdata(wdt_dev, &pdev->dev);
platform_set_drvdata(pdev, wdt_dev);
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index 4c2cc09c0c57..6a7d5c365438 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -174,6 +174,7 @@ static int jz4740_wdt_probe(struct platform_device *pdev)
jz4740_wdt->timeout = heartbeat;
jz4740_wdt->min_timeout = 1;
jz4740_wdt->max_timeout = MAX_HEARTBEAT;
+ jz4740_wdt->parent = &pdev->dev;
watchdog_set_nowayout(jz4740_wdt, nowayout);
watchdog_set_drvdata(jz4740_wdt, drvdata);
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c
index b7ea39b455c8..1e41818a44bc 100644
--- a/drivers/watchdog/ks8695_wdt.c
+++ b/drivers/watchdog/ks8695_wdt.c
@@ -254,13 +254,10 @@ static int ks8695wdt_probe(struct platform_device *pdev)
static int ks8695wdt_remove(struct platform_device *pdev)
{
- int res;
-
- res = misc_deregister(&ks8695wdt_miscdev);
- if (!res)
- ks8695wdt_miscdev.parent = NULL;
+ misc_deregister(&ks8695wdt_miscdev);
+ ks8695wdt_miscdev.parent = NULL;
- return res;
+ return 0;
}
static void ks8695wdt_shutdown(struct platform_device *pdev)
diff --git a/drivers/watchdog/lpc18xx_wdt.c b/drivers/watchdog/lpc18xx_wdt.c
new file mode 100644
index 000000000000..ab7b8b185d99
--- /dev/null
+++ b/drivers/watchdog/lpc18xx_wdt.c
@@ -0,0 +1,340 @@
+/*
+ * NXP LPC18xx Watchdog Timer (WDT)
+ *
+ * Copyright (c) 2015 Ariel D'Alessandro <ariel@vanguardiasur.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Notes
+ * -----
+ * The Watchdog consists of a fixed divide-by-4 clock pre-scaler and a 24-bit
+ * counter which decrements on every clock cycle.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/watchdog.h>
+
+/* Registers */
+#define LPC18XX_WDT_MOD 0x00
+#define LPC18XX_WDT_MOD_WDEN BIT(0)
+#define LPC18XX_WDT_MOD_WDRESET BIT(1)
+
+#define LPC18XX_WDT_TC 0x04
+#define LPC18XX_WDT_TC_MIN 0xff
+#define LPC18XX_WDT_TC_MAX 0xffffff
+
+#define LPC18XX_WDT_FEED 0x08
+#define LPC18XX_WDT_FEED_MAGIC1 0xaa
+#define LPC18XX_WDT_FEED_MAGIC2 0x55
+
+#define LPC18XX_WDT_TV 0x0c
+
+/* Clock pre-scaler */
+#define LPC18XX_WDT_CLK_DIV 4
+
+/* Timeout values in seconds */
+#define LPC18XX_WDT_DEF_TIMEOUT 30U
+
+static int heartbeat;
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds (default="
+ __MODULE_STRING(LPC18XX_WDT_DEF_TIMEOUT) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+struct lpc18xx_wdt_dev {
+ struct watchdog_device wdt_dev;
+ struct clk *reg_clk;
+ struct clk *wdt_clk;
+ unsigned long clk_rate;
+ void __iomem *base;
+ struct timer_list timer;
+ struct notifier_block restart_handler;
+ spinlock_t lock;
+};
+
+static int lpc18xx_wdt_feed(struct watchdog_device *wdt_dev)
+{
+ struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
+ unsigned long flags;
+
+ /*
+ * An abort condition will occur if an interrupt happens during the feed
+ * sequence.
+ */
+ spin_lock_irqsave(&lpc18xx_wdt->lock, flags);
+ writel(LPC18XX_WDT_FEED_MAGIC1, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
+ writel(LPC18XX_WDT_FEED_MAGIC2, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
+ spin_unlock_irqrestore(&lpc18xx_wdt->lock, flags);
+
+ return 0;
+}
+
+static void lpc18xx_wdt_timer_feed(unsigned long data)
+{
+ struct watchdog_device *wdt_dev = (struct watchdog_device *)data;
+ struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
+
+ lpc18xx_wdt_feed(wdt_dev);
+
+ /* Use safe value (1/2 of real timeout) */
+ mod_timer(&lpc18xx_wdt->timer, jiffies +
+ msecs_to_jiffies((wdt_dev->timeout * MSEC_PER_SEC) / 2));
+}
+
+/*
+ * Since LPC18xx Watchdog cannot be disabled in hardware, we must keep feeding
+ * it with a timer until userspace watchdog software takes over.
+ */
+static int lpc18xx_wdt_stop(struct watchdog_device *wdt_dev)
+{
+ lpc18xx_wdt_timer_feed((unsigned long)wdt_dev);
+
+ return 0;
+}
+
+static void __lpc18xx_wdt_set_timeout(struct lpc18xx_wdt_dev *lpc18xx_wdt)
+{
+ unsigned int val;
+
+ val = DIV_ROUND_UP(lpc18xx_wdt->wdt_dev.timeout * lpc18xx_wdt->clk_rate,
+ LPC18XX_WDT_CLK_DIV);
+ writel(val, lpc18xx_wdt->base + LPC18XX_WDT_TC);
+}
+
+static int lpc18xx_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int new_timeout)
+{
+ struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
+
+ lpc18xx_wdt->wdt_dev.timeout = new_timeout;
+ __lpc18xx_wdt_set_timeout(lpc18xx_wdt);
+
+ return 0;
+}
+
+static unsigned int lpc18xx_wdt_get_timeleft(struct watchdog_device *wdt_dev)
+{
+ struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
+ unsigned int val;
+
+ val = readl(lpc18xx_wdt->base + LPC18XX_WDT_TV);
+ return (val * LPC18XX_WDT_CLK_DIV) / lpc18xx_wdt->clk_rate;
+}
+
+static int lpc18xx_wdt_start(struct watchdog_device *wdt_dev)
+{
+ struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
+ unsigned int val;
+
+ if (timer_pending(&lpc18xx_wdt->timer))
+ del_timer(&lpc18xx_wdt->timer);
+
+ val = readl(lpc18xx_wdt->base + LPC18XX_WDT_MOD);
+ val |= LPC18XX_WDT_MOD_WDEN;
+ val |= LPC18XX_WDT_MOD_WDRESET;
+ writel(val, lpc18xx_wdt->base + LPC18XX_WDT_MOD);
+
+ /*
+ * Setting the WDEN bit in the WDMOD register is not sufficient to
+ * enable the Watchdog. A valid feed sequence must be completed after
+ * setting WDEN before the Watchdog is capable of generating a reset.
+ */
+ lpc18xx_wdt_feed(wdt_dev);
+
+ return 0;
+}
+
+static struct watchdog_info lpc18xx_wdt_info = {
+ .identity = "NXP LPC18xx Watchdog",
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+};
+
+static const struct watchdog_ops lpc18xx_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = lpc18xx_wdt_start,
+ .stop = lpc18xx_wdt_stop,
+ .ping = lpc18xx_wdt_feed,
+ .set_timeout = lpc18xx_wdt_set_timeout,
+ .get_timeleft = lpc18xx_wdt_get_timeleft,
+};
+
+static int lpc18xx_wdt_restart(struct notifier_block *this, unsigned long mode,
+ void *cmd)
+{
+ struct lpc18xx_wdt_dev *lpc18xx_wdt = container_of(this,
+ struct lpc18xx_wdt_dev, restart_handler);
+ unsigned long flags;
+ int val;
+
+ /*
+ * Incorrect feed sequence causes immediate watchdog reset if enabled.
+ */
+ spin_lock_irqsave(&lpc18xx_wdt->lock, flags);
+
+ val = readl(lpc18xx_wdt->base + LPC18XX_WDT_MOD);
+ val |= LPC18XX_WDT_MOD_WDEN;
+ val |= LPC18XX_WDT_MOD_WDRESET;
+ writel(val, lpc18xx_wdt->base + LPC18XX_WDT_MOD);
+
+ writel(LPC18XX_WDT_FEED_MAGIC1, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
+ writel(LPC18XX_WDT_FEED_MAGIC2, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
+
+ writel(LPC18XX_WDT_FEED_MAGIC1, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
+ writel(LPC18XX_WDT_FEED_MAGIC1, lpc18xx_wdt->base + LPC18XX_WDT_FEED);
+
+ spin_unlock_irqrestore(&lpc18xx_wdt->lock, flags);
+
+ return NOTIFY_OK;
+}
+
+static int lpc18xx_wdt_probe(struct platform_device *pdev)
+{
+ struct lpc18xx_wdt_dev *lpc18xx_wdt;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret;
+
+ lpc18xx_wdt = devm_kzalloc(dev, sizeof(*lpc18xx_wdt), GFP_KERNEL);
+ if (!lpc18xx_wdt)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lpc18xx_wdt->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(lpc18xx_wdt->base))
+ return PTR_ERR(lpc18xx_wdt->base);
+
+ lpc18xx_wdt->reg_clk = devm_clk_get(dev, "reg");
+ if (IS_ERR(lpc18xx_wdt->reg_clk)) {
+ dev_err(dev, "failed to get the reg clock\n");
+ return PTR_ERR(lpc18xx_wdt->reg_clk);
+ }
+
+ lpc18xx_wdt->wdt_clk = devm_clk_get(dev, "wdtclk");
+ if (IS_ERR(lpc18xx_wdt->wdt_clk)) {
+ dev_err(dev, "failed to get the wdt clock\n");
+ return PTR_ERR(lpc18xx_wdt->wdt_clk);
+ }
+
+ ret = clk_prepare_enable(lpc18xx_wdt->reg_clk);
+ if (ret) {
+ dev_err(dev, "could not prepare or enable sys clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(lpc18xx_wdt->wdt_clk);
+ if (ret) {
+ dev_err(dev, "could not prepare or enable wdt clock\n");
+ goto disable_reg_clk;
+ }
+
+ /* We use the clock rate to calculate timeouts */
+ lpc18xx_wdt->clk_rate = clk_get_rate(lpc18xx_wdt->wdt_clk);
+ if (lpc18xx_wdt->clk_rate == 0) {
+ dev_err(dev, "failed to get clock rate\n");
+ ret = -EINVAL;
+ goto disable_wdt_clk;
+ }
+
+ lpc18xx_wdt->wdt_dev.info = &lpc18xx_wdt_info;
+ lpc18xx_wdt->wdt_dev.ops = &lpc18xx_wdt_ops;
+
+ lpc18xx_wdt->wdt_dev.min_timeout = DIV_ROUND_UP(LPC18XX_WDT_TC_MIN *
+ LPC18XX_WDT_CLK_DIV, lpc18xx_wdt->clk_rate);
+
+ lpc18xx_wdt->wdt_dev.max_timeout = (LPC18XX_WDT_TC_MAX *
+ LPC18XX_WDT_CLK_DIV) / lpc18xx_wdt->clk_rate;
+
+ lpc18xx_wdt->wdt_dev.timeout = min(lpc18xx_wdt->wdt_dev.max_timeout,
+ LPC18XX_WDT_DEF_TIMEOUT);
+
+ spin_lock_init(&lpc18xx_wdt->lock);
+
+ lpc18xx_wdt->wdt_dev.parent = dev;
+ watchdog_set_drvdata(&lpc18xx_wdt->wdt_dev, lpc18xx_wdt);
+
+ ret = watchdog_init_timeout(&lpc18xx_wdt->wdt_dev, heartbeat, dev);
+
+ __lpc18xx_wdt_set_timeout(lpc18xx_wdt);
+
+ setup_timer(&lpc18xx_wdt->timer, lpc18xx_wdt_timer_feed,
+ (unsigned long)&lpc18xx_wdt->wdt_dev);
+
+ watchdog_set_nowayout(&lpc18xx_wdt->wdt_dev, nowayout);
+
+ platform_set_drvdata(pdev, lpc18xx_wdt);
+
+ ret = watchdog_register_device(&lpc18xx_wdt->wdt_dev);
+ if (ret)
+ goto disable_wdt_clk;
+
+ lpc18xx_wdt->restart_handler.notifier_call = lpc18xx_wdt_restart;
+ lpc18xx_wdt->restart_handler.priority = 128;
+ ret = register_restart_handler(&lpc18xx_wdt->restart_handler);
+ if (ret)
+ dev_warn(dev, "failed to register restart handler: %d\n", ret);
+
+ return 0;
+
+disable_wdt_clk:
+ clk_disable_unprepare(lpc18xx_wdt->wdt_clk);
+disable_reg_clk:
+ clk_disable_unprepare(lpc18xx_wdt->reg_clk);
+ return ret;
+}
+
+static void lpc18xx_wdt_shutdown(struct platform_device *pdev)
+{
+ struct lpc18xx_wdt_dev *lpc18xx_wdt = platform_get_drvdata(pdev);
+
+ lpc18xx_wdt_stop(&lpc18xx_wdt->wdt_dev);
+}
+
+static int lpc18xx_wdt_remove(struct platform_device *pdev)
+{
+ struct lpc18xx_wdt_dev *lpc18xx_wdt = platform_get_drvdata(pdev);
+
+ unregister_restart_handler(&lpc18xx_wdt->restart_handler);
+
+ dev_warn(&pdev->dev, "I quit now, hardware will probably reboot!\n");
+ del_timer(&lpc18xx_wdt->timer);
+
+ watchdog_unregister_device(&lpc18xx_wdt->wdt_dev);
+ clk_disable_unprepare(lpc18xx_wdt->wdt_clk);
+ clk_disable_unprepare(lpc18xx_wdt->reg_clk);
+
+ return 0;
+}
+
+static const struct of_device_id lpc18xx_wdt_match[] = {
+ { .compatible = "nxp,lpc1850-wwdt" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_wdt_match);
+
+static struct platform_driver lpc18xx_wdt_driver = {
+ .driver = {
+ .name = "lpc18xx-wdt",
+ .of_match_table = lpc18xx_wdt_match,
+ },
+ .probe = lpc18xx_wdt_probe,
+ .remove = lpc18xx_wdt_remove,
+ .shutdown = lpc18xx_wdt_shutdown,
+};
+module_platform_driver(lpc18xx_wdt_driver);
+
+MODULE_AUTHOR("Ariel D'Alessandro <ariel@vanguardiasur.com.ar>");
+MODULE_DESCRIPTION("NXP LPC18xx Watchdog Timer Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/mena21_wdt.c b/drivers/watchdog/mena21_wdt.c
index d193a5e79c38..69013007dc47 100644
--- a/drivers/watchdog/mena21_wdt.c
+++ b/drivers/watchdog/mena21_wdt.c
@@ -197,6 +197,7 @@ static int a21_wdt_probe(struct platform_device *pdev)
watchdog_init_timeout(&a21_wdt, 30, &pdev->dev);
watchdog_set_nowayout(&a21_wdt, nowayout);
watchdog_set_drvdata(&a21_wdt, drv);
+ a21_wdt.parent = &pdev->dev;
reset = a21_wdt_get_bootstatus(drv);
if (reset == 2)
diff --git a/drivers/watchdog/menf21bmc_wdt.c b/drivers/watchdog/menf21bmc_wdt.c
index 59f0913c7341..3aefddebb386 100644
--- a/drivers/watchdog/menf21bmc_wdt.c
+++ b/drivers/watchdog/menf21bmc_wdt.c
@@ -130,6 +130,7 @@ static int menf21bmc_wdt_probe(struct platform_device *pdev)
drv_data->wdt.info = &menf21bmc_wdt_info;
drv_data->wdt.min_timeout = BMC_WD_TIMEOUT_MIN;
drv_data->wdt.max_timeout = BMC_WD_TIMEOUT_MAX;
+ drv_data->wdt.parent = &pdev->dev;
drv_data->i2c_client = i2c_client;
/*
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 689381a24887..5f2273aac37d 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -50,8 +50,12 @@ struct mpc8xxx_wdt_type {
bool hw_enabled;
};
-static struct mpc8xxx_wdt __iomem *wd_base;
-static int mpc8xxx_wdt_init_late(void);
+struct mpc8xxx_wdt_ddata {
+ struct mpc8xxx_wdt __iomem *base;
+ struct watchdog_device wdd;
+ struct timer_list timer;
+ spinlock_t lock;
+};
static u16 timeout = 0xffff;
module_param(timeout, ushort, 0);
@@ -68,65 +72,59 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
"(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-/*
- * We always prescale, but if someone really doesn't want to they can set this
- * to 0
- */
-static int prescale = 1;
-
-static DEFINE_SPINLOCK(wdt_spinlock);
-
-static void mpc8xxx_wdt_keepalive(void)
+static void mpc8xxx_wdt_keepalive(struct mpc8xxx_wdt_ddata *ddata)
{
/* Ping the WDT */
- spin_lock(&wdt_spinlock);
- out_be16(&wd_base->swsrr, 0x556c);
- out_be16(&wd_base->swsrr, 0xaa39);
- spin_unlock(&wdt_spinlock);
+ spin_lock(&ddata->lock);
+ out_be16(&ddata->base->swsrr, 0x556c);
+ out_be16(&ddata->base->swsrr, 0xaa39);
+ spin_unlock(&ddata->lock);
}
-static struct watchdog_device mpc8xxx_wdt_dev;
-static void mpc8xxx_wdt_timer_ping(unsigned long arg);
-static DEFINE_TIMER(wdt_timer, mpc8xxx_wdt_timer_ping, 0,
- (unsigned long)&mpc8xxx_wdt_dev);
-
static void mpc8xxx_wdt_timer_ping(unsigned long arg)
{
- struct watchdog_device *w = (struct watchdog_device *)arg;
+ struct mpc8xxx_wdt_ddata *ddata = (void *)arg;
- mpc8xxx_wdt_keepalive();
+ mpc8xxx_wdt_keepalive(ddata);
/* We're pinging it twice faster than needed, just to be sure. */
- mod_timer(&wdt_timer, jiffies + HZ * w->timeout / 2);
+ mod_timer(&ddata->timer, jiffies + HZ * ddata->wdd.timeout / 2);
}
static int mpc8xxx_wdt_start(struct watchdog_device *w)
{
- u32 tmp = SWCRR_SWEN;
+ struct mpc8xxx_wdt_ddata *ddata =
+ container_of(w, struct mpc8xxx_wdt_ddata, wdd);
+
+ u32 tmp = SWCRR_SWEN | SWCRR_SWPR;
/* Good, fire up the show */
- if (prescale)
- tmp |= SWCRR_SWPR;
if (reset)
tmp |= SWCRR_SWRI;
tmp |= timeout << 16;
- out_be32(&wd_base->swcrr, tmp);
+ out_be32(&ddata->base->swcrr, tmp);
- del_timer_sync(&wdt_timer);
+ del_timer_sync(&ddata->timer);
return 0;
}
static int mpc8xxx_wdt_ping(struct watchdog_device *w)
{
- mpc8xxx_wdt_keepalive();
+ struct mpc8xxx_wdt_ddata *ddata =
+ container_of(w, struct mpc8xxx_wdt_ddata, wdd);
+
+ mpc8xxx_wdt_keepalive(ddata);
return 0;
}
static int mpc8xxx_wdt_stop(struct watchdog_device *w)
{
- mod_timer(&wdt_timer, jiffies);
+ struct mpc8xxx_wdt_ddata *ddata =
+ container_of(w, struct mpc8xxx_wdt_ddata, wdd);
+
+ mod_timer(&ddata->timer, jiffies);
return 0;
}
@@ -143,53 +141,57 @@ static struct watchdog_ops mpc8xxx_wdt_ops = {
.stop = mpc8xxx_wdt_stop,
};
-static struct watchdog_device mpc8xxx_wdt_dev = {
- .info = &mpc8xxx_wdt_info,
- .ops = &mpc8xxx_wdt_ops,
-};
-
-static const struct of_device_id mpc8xxx_wdt_match[];
static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
{
int ret;
- const struct of_device_id *match;
- struct device_node *np = ofdev->dev.of_node;
+ struct resource *res;
const struct mpc8xxx_wdt_type *wdt_type;
+ struct mpc8xxx_wdt_ddata *ddata;
u32 freq = fsl_get_sys_freq();
bool enabled;
unsigned int timeout_sec;
- match = of_match_device(mpc8xxx_wdt_match, &ofdev->dev);
- if (!match)
+ wdt_type = of_device_get_match_data(&ofdev->dev);
+ if (!wdt_type)
return -EINVAL;
- wdt_type = match->data;
if (!freq || freq == -1)
return -EINVAL;
- wd_base = of_iomap(np, 0);
- if (!wd_base)
+ ddata = devm_kzalloc(&ofdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
return -ENOMEM;
- enabled = in_be32(&wd_base->swcrr) & SWCRR_SWEN;
+ res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
+ ddata->base = devm_ioremap_resource(&ofdev->dev, res);
+ if (IS_ERR(ddata->base))
+ return PTR_ERR(ddata->base);
+
+ enabled = in_be32(&ddata->base->swcrr) & SWCRR_SWEN;
if (!enabled && wdt_type->hw_enabled) {
pr_info("could not be enabled in software\n");
- ret = -ENOSYS;
- goto err_unmap;
+ return -ENODEV;
}
+ spin_lock_init(&ddata->lock);
+ setup_timer(&ddata->timer, mpc8xxx_wdt_timer_ping,
+ (unsigned long)ddata);
+
+ ddata->wdd.info = &mpc8xxx_wdt_info,
+ ddata->wdd.ops = &mpc8xxx_wdt_ops,
+
/* Calculate the timeout in seconds */
- if (prescale)
- timeout_sec = (timeout * wdt_type->prescaler) / freq;
- else
- timeout_sec = timeout / freq;
-
- mpc8xxx_wdt_dev.timeout = timeout_sec;
-#ifdef MODULE
- ret = mpc8xxx_wdt_init_late();
- if (ret)
- goto err_unmap;
-#endif
+ timeout_sec = (timeout * wdt_type->prescaler) / freq;
+
+ ddata->wdd.timeout = timeout_sec;
+
+ watchdog_set_nowayout(&ddata->wdd, nowayout);
+
+ ret = watchdog_register_device(&ddata->wdd);
+ if (ret) {
+ pr_err("cannot register watchdog device (err=%d)\n", ret);
+ return ret;
+ }
pr_info("WDT driver for MPC8xxx initialized. mode:%s timeout=%d (%d seconds)\n",
reset ? "reset" : "interrupt", timeout, timeout_sec);
@@ -200,21 +202,20 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
* userspace handles it.
*/
if (enabled)
- mod_timer(&wdt_timer, jiffies);
+ mod_timer(&ddata->timer, jiffies);
+
+ platform_set_drvdata(ofdev, ddata);
return 0;
-err_unmap:
- iounmap(wd_base);
- wd_base = NULL;
- return ret;
}
static int mpc8xxx_wdt_remove(struct platform_device *ofdev)
{
+ struct mpc8xxx_wdt_ddata *ddata = platform_get_drvdata(ofdev);
+
pr_crit("Watchdog removed, expect the %s soon!\n",
reset ? "reset" : "machine check exception");
- del_timer_sync(&wdt_timer);
- watchdog_unregister_device(&mpc8xxx_wdt_dev);
- iounmap(wd_base);
+ del_timer_sync(&ddata->timer);
+ watchdog_unregister_device(&ddata->wdd);
return 0;
}
@@ -253,31 +254,6 @@ static struct platform_driver mpc8xxx_wdt_driver = {
},
};
-/*
- * We do wdt initialization in two steps: arch_initcall probes the wdt
- * very early to start pinging the watchdog (misc devices are not yet
- * available), and later module_init() just registers the misc device.
- */
-static int mpc8xxx_wdt_init_late(void)
-{
- int ret;
-
- if (!wd_base)
- return -ENODEV;
-
- watchdog_set_nowayout(&mpc8xxx_wdt_dev, nowayout);
-
- ret = watchdog_register_device(&mpc8xxx_wdt_dev);
- if (ret) {
- pr_err("cannot register watchdog device (err=%d)\n", ret);
- return ret;
- }
- return 0;
-}
-#ifndef MODULE
-module_init(mpc8xxx_wdt_init_late);
-#endif
-
static int __init mpc8xxx_wdt_init(void)
{
return platform_driver_register(&mpc8xxx_wdt_driver);
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index 938b987de551..6ad9df948711 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -210,6 +210,14 @@ static int mtk_wdt_probe(struct platform_device *pdev)
return 0;
}
+static void mtk_wdt_shutdown(struct platform_device *pdev)
+{
+ struct mtk_wdt_dev *mtk_wdt = platform_get_drvdata(pdev);
+
+ if (watchdog_active(&mtk_wdt->wdt_dev))
+ mtk_wdt_stop(&mtk_wdt->wdt_dev);
+}
+
static int mtk_wdt_remove(struct platform_device *pdev)
{
struct mtk_wdt_dev *mtk_wdt = platform_get_drvdata(pdev);
@@ -221,17 +229,48 @@ static int mtk_wdt_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int mtk_wdt_suspend(struct device *dev)
+{
+ struct mtk_wdt_dev *mtk_wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&mtk_wdt->wdt_dev))
+ mtk_wdt_stop(&mtk_wdt->wdt_dev);
+
+ return 0;
+}
+
+static int mtk_wdt_resume(struct device *dev)
+{
+ struct mtk_wdt_dev *mtk_wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&mtk_wdt->wdt_dev)) {
+ mtk_wdt_start(&mtk_wdt->wdt_dev);
+ mtk_wdt_ping(&mtk_wdt->wdt_dev);
+ }
+
+ return 0;
+}
+#endif
+
static const struct of_device_id mtk_wdt_dt_ids[] = {
{ .compatible = "mediatek,mt6589-wdt" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mtk_wdt_dt_ids);
+static const struct dev_pm_ops mtk_wdt_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mtk_wdt_suspend,
+ mtk_wdt_resume)
+};
+
static struct platform_driver mtk_wdt_driver = {
.probe = mtk_wdt_probe,
.remove = mtk_wdt_remove,
+ .shutdown = mtk_wdt_shutdown,
.driver = {
.name = DRV_NAME,
+ .pm = &mtk_wdt_pm_ops,
.of_match_table = mtk_wdt_dt_ids,
},
};
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index c028454be66c..bd917bb757b8 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -294,6 +294,8 @@ static const struct pci_device_id tco_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS,
PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS,
+ PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }, /* End of list */
};
MODULE_DEVICE_TABLE(pci, tco_pci_tbl);
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index de911c7e477c..d96bee017fd3 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -253,6 +253,7 @@ static int omap_wdt_probe(struct platform_device *pdev)
wdev->wdog.ops = &omap_wdt_ops;
wdev->wdog.min_timeout = TIMER_MARGIN_MIN;
wdev->wdog.max_timeout = TIMER_MARGIN_MAX;
+ wdev->wdog.parent = &pdev->dev;
if (watchdog_init_timeout(&wdev->wdog, timer_margin, &pdev->dev) < 0)
wdev->wdog.timeout = TIMER_MARGIN_DEFAULT;
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index ef0c628d5037..c6b8f4a43bde 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -567,6 +567,7 @@ static int orion_wdt_probe(struct platform_device *pdev)
dev->wdt.timeout = wdt_max_duration;
dev->wdt.max_timeout = wdt_max_duration;
+ dev->wdt.parent = &pdev->dev;
watchdog_init_timeout(&dev->wdt, heartbeat, &pdev->dev);
platform_set_drvdata(pdev, &dev->wdt);
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index b9c6049c3e78..4224b3ec83a5 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -167,6 +167,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev)
pnx4008_wdd.bootstatus = (readl(WDTIM_RES(wdt_base)) & WDOG_RESET) ?
WDIOF_CARDRESET : 0;
+ pnx4008_wdd.parent = &pdev->dev;
watchdog_set_nowayout(&pnx4008_wdd, nowayout);
pnx4008_wdt_stop(&pnx4008_wdd); /* disable for now */
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index aa03ca8f2d9b..773dcfaee7b2 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -171,6 +171,7 @@ static int qcom_wdt_probe(struct platform_device *pdev)
wdt->wdd.ops = &qcom_wdt_ops;
wdt->wdd.min_timeout = 1;
wdt->wdd.max_timeout = 0x10000000U / wdt->rate;
+ wdt->wdd.parent = &pdev->dev;
/*
* If 'timeout-sec' unspecified in devicetree, assume a 30 second
diff --git a/drivers/watchdog/retu_wdt.c b/drivers/watchdog/retu_wdt.c
index b7c68e275aeb..39cd51df2ffc 100644
--- a/drivers/watchdog/retu_wdt.c
+++ b/drivers/watchdog/retu_wdt.c
@@ -127,6 +127,7 @@ static int retu_wdt_probe(struct platform_device *pdev)
retu_wdt->timeout = RETU_WDT_MAX_TIMER;
retu_wdt->min_timeout = 0;
retu_wdt->max_timeout = RETU_WDT_MAX_TIMER;
+ retu_wdt->parent = &pdev->dev;
watchdog_set_drvdata(retu_wdt, wdev);
watchdog_set_nowayout(retu_wdt, nowayout);
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index a6f7e2e29beb..1967919ae743 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -161,6 +161,7 @@ static int rt288x_wdt_probe(struct platform_device *pdev)
rt288x_wdt_dev.dev = &pdev->dev;
rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause();
rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq);
+ rt288x_wdt_dev.parent = &pdev->dev;
watchdog_init_timeout(&rt288x_wdt_dev, rt288x_wdt_dev.max_timeout,
&pdev->dev);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index e89ae027c91d..d781000c7825 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -607,6 +607,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(&wdt->wdt_device, nowayout);
wdt->wdt_device.bootstatus = s3c2410wdt_get_bootstatus(wdt);
+ wdt->wdt_device.parent = &pdev->dev;
ret = watchdog_register_device(&wdt->wdt_device);
if (ret) {
diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c
new file mode 100644
index 000000000000..a49634cdc1cc
--- /dev/null
+++ b/drivers/watchdog/sama5d4_wdt.c
@@ -0,0 +1,280 @@
+/*
+ * Driver for Atmel SAMA5D4 Watchdog Timer
+ *
+ * Copyright (C) 2015 Atmel Corporation
+ *
+ * Licensed under GPLv2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/watchdog.h>
+
+#include "at91sam9_wdt.h"
+
+/* minimum and maximum watchdog timeout, in seconds */
+#define MIN_WDT_TIMEOUT 1
+#define MAX_WDT_TIMEOUT 16
+#define WDT_DEFAULT_TIMEOUT MAX_WDT_TIMEOUT
+
+#define WDT_SEC2TICKS(s) ((s) ? (((s) << 8) - 1) : 0)
+
+struct sama5d4_wdt {
+ struct watchdog_device wdd;
+ void __iomem *reg_base;
+ u32 config;
+};
+
+static int wdt_timeout = WDT_DEFAULT_TIMEOUT;
+static bool nowayout = WATCHDOG_NOWAYOUT;
+
+module_param(wdt_timeout, int, 0);
+MODULE_PARM_DESC(wdt_timeout,
+ "Watchdog timeout in seconds. (default = "
+ __MODULE_STRING(WDT_DEFAULT_TIMEOUT) ")");
+
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+#define wdt_read(wdt, field) \
+ readl_relaxed((wdt)->reg_base + (field))
+
+#define wdt_write(wtd, field, val) \
+ writel_relaxed((val), (wdt)->reg_base + (field))
+
+static int sama5d4_wdt_start(struct watchdog_device *wdd)
+{
+ struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
+ u32 reg;
+
+ reg = wdt_read(wdt, AT91_WDT_MR);
+ reg &= ~AT91_WDT_WDDIS;
+ wdt_write(wdt, AT91_WDT_MR, reg);
+
+ return 0;
+}
+
+static int sama5d4_wdt_stop(struct watchdog_device *wdd)
+{
+ struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
+ u32 reg;
+
+ reg = wdt_read(wdt, AT91_WDT_MR);
+ reg |= AT91_WDT_WDDIS;
+ wdt_write(wdt, AT91_WDT_MR, reg);
+
+ return 0;
+}
+
+static int sama5d4_wdt_ping(struct watchdog_device *wdd)
+{
+ struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
+
+ wdt_write(wdt, AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
+
+ return 0;
+}
+
+static int sama5d4_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
+ u32 value = WDT_SEC2TICKS(timeout);
+ u32 reg;
+
+ reg = wdt_read(wdt, AT91_WDT_MR);
+ reg &= ~AT91_WDT_WDV;
+ reg &= ~AT91_WDT_WDD;
+ reg |= AT91_WDT_SET_WDV(value);
+ reg |= AT91_WDT_SET_WDD(value);
+ wdt_write(wdt, AT91_WDT_MR, reg);
+
+ wdd->timeout = timeout;
+
+ return 0;
+}
+
+static const struct watchdog_info sama5d4_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
+ .identity = "Atmel SAMA5D4 Watchdog",
+};
+
+static struct watchdog_ops sama5d4_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = sama5d4_wdt_start,
+ .stop = sama5d4_wdt_stop,
+ .ping = sama5d4_wdt_ping,
+ .set_timeout = sama5d4_wdt_set_timeout,
+};
+
+static irqreturn_t sama5d4_wdt_irq_handler(int irq, void *dev_id)
+{
+ struct sama5d4_wdt *wdt = platform_get_drvdata(dev_id);
+
+ if (wdt_read(wdt, AT91_WDT_SR)) {
+ pr_crit("Atmel Watchdog Software Reset\n");
+ emergency_restart();
+ pr_crit("Reboot didn't succeed\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int of_sama5d4_wdt_init(struct device_node *np, struct sama5d4_wdt *wdt)
+{
+ const char *tmp;
+
+ wdt->config = AT91_WDT_WDDIS;
+
+ if (!of_property_read_string(np, "atmel,watchdog-type", &tmp) &&
+ !strcmp(tmp, "software"))
+ wdt->config |= AT91_WDT_WDFIEN;
+ else
+ wdt->config |= AT91_WDT_WDRSTEN;
+
+ if (of_property_read_bool(np, "atmel,idle-halt"))
+ wdt->config |= AT91_WDT_WDIDLEHLT;
+
+ if (of_property_read_bool(np, "atmel,dbg-halt"))
+ wdt->config |= AT91_WDT_WDDBGHLT;
+
+ return 0;
+}
+
+static int sama5d4_wdt_init(struct sama5d4_wdt *wdt)
+{
+ struct watchdog_device *wdd = &wdt->wdd;
+ u32 value = WDT_SEC2TICKS(wdd->timeout);
+ u32 reg;
+
+ /*
+ * Because the fields WDV and WDD must not be modified when the WDDIS
+ * bit is set, so clear the WDDIS bit before writing the WDT_MR.
+ */
+ reg = wdt_read(wdt, AT91_WDT_MR);
+ reg &= ~AT91_WDT_WDDIS;
+ wdt_write(wdt, AT91_WDT_MR, reg);
+
+ reg = wdt->config;
+ reg |= AT91_WDT_SET_WDD(value);
+ reg |= AT91_WDT_SET_WDV(value);
+
+ wdt_write(wdt, AT91_WDT_MR, reg);
+
+ return 0;
+}
+
+static int sama5d4_wdt_probe(struct platform_device *pdev)
+{
+ struct watchdog_device *wdd;
+ struct sama5d4_wdt *wdt;
+ struct resource *res;
+ void __iomem *regs;
+ u32 irq = 0;
+ int ret;
+
+ wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ wdd = &wdt->wdd;
+ wdd->timeout = wdt_timeout;
+ wdd->info = &sama5d4_wdt_info;
+ wdd->ops = &sama5d4_wdt_ops;
+ wdd->min_timeout = MIN_WDT_TIMEOUT;
+ wdd->max_timeout = MAX_WDT_TIMEOUT;
+
+ watchdog_set_drvdata(wdd, wdt);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ wdt->reg_base = regs;
+
+ if (pdev->dev.of_node) {
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (!irq)
+ dev_warn(&pdev->dev, "failed to get IRQ from DT\n");
+
+ ret = of_sama5d4_wdt_init(pdev->dev.of_node, wdt);
+ if (ret)
+ return ret;
+ }
+
+ if ((wdt->config & AT91_WDT_WDFIEN) && irq) {
+ ret = devm_request_irq(&pdev->dev, irq, sama5d4_wdt_irq_handler,
+ IRQF_SHARED | IRQF_IRQPOLL |
+ IRQF_NO_SUSPEND, pdev->name, pdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "cannot register interrupt handler\n");
+ return ret;
+ }
+ }
+
+ ret = watchdog_init_timeout(wdd, wdt_timeout, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to set timeout value\n");
+ return ret;
+ }
+
+ ret = sama5d4_wdt_init(wdt);
+ if (ret)
+ return ret;
+
+ watchdog_set_nowayout(wdd, nowayout);
+
+ ret = watchdog_register_device(wdd);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register watchdog device\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, wdt);
+
+ dev_info(&pdev->dev, "initialized (timeout = %d sec, nowayout = %d)\n",
+ wdt_timeout, nowayout);
+
+ return 0;
+}
+
+static int sama5d4_wdt_remove(struct platform_device *pdev)
+{
+ struct sama5d4_wdt *wdt = platform_get_drvdata(pdev);
+
+ sama5d4_wdt_stop(&wdt->wdd);
+
+ watchdog_unregister_device(&wdt->wdd);
+
+ return 0;
+}
+
+static const struct of_device_id sama5d4_wdt_of_match[] = {
+ { .compatible = "atmel,sama5d4-wdt", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sama5d4_wdt_of_match);
+
+static struct platform_driver sama5d4_wdt_driver = {
+ .probe = sama5d4_wdt_probe,
+ .remove = sama5d4_wdt_remove,
+ .driver = {
+ .name = "sama5d4_wdt",
+ .of_match_table = sama5d4_wdt_of_match,
+ }
+};
+module_platform_driver(sama5d4_wdt_driver);
+
+MODULE_AUTHOR("Atmel Corporation");
+MODULE_DESCRIPTION("Atmel SAMA5D4 Watchdog Timer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index 567458b137a6..f90812170657 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -252,6 +252,7 @@ static int sh_wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(&sh_wdt_dev, nowayout);
watchdog_set_drvdata(&sh_wdt_dev, wdt);
+ sh_wdt_dev.parent = &pdev->dev;
spin_lock_init(&wdt->lock);
diff --git a/drivers/watchdog/sirfsoc_wdt.c b/drivers/watchdog/sirfsoc_wdt.c
index 42fa5c0c518a..d0578ab2e636 100644
--- a/drivers/watchdog/sirfsoc_wdt.c
+++ b/drivers/watchdog/sirfsoc_wdt.c
@@ -154,6 +154,7 @@ static int sirfsoc_wdt_probe(struct platform_device *pdev)
watchdog_init_timeout(&sirfsoc_wdd, timeout, &pdev->dev);
watchdog_set_nowayout(&sirfsoc_wdd, nowayout);
+ sirfsoc_wdd.parent = &pdev->dev;
ret = watchdog_register_device(&sirfsoc_wdd);
if (ret)
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index c1b03f4235b9..01d816251302 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -4,7 +4,7 @@
* Watchdog driver for ARM SP805 watchdog module
*
* Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2 or later. This program is licensed "as is" without any
@@ -226,6 +226,7 @@ sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
wdt->adev = adev;
wdt->wdd.info = &wdt_info;
wdt->wdd.ops = &wdt_ops;
+ wdt->wdd.parent = &adev->dev;
spin_lock_init(&wdt->lock);
watchdog_set_nowayout(&wdt->wdd, nowayout);
@@ -303,6 +304,6 @@ static struct amba_driver sp805_wdt_driver = {
module_amba_driver(sp805_wdt_driver);
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
MODULE_DESCRIPTION("ARM SP805 Watchdog Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/st_lpc_wdt.c b/drivers/watchdog/st_lpc_wdt.c
index 6785afdc0fca..14e9badf2bfa 100644
--- a/drivers/watchdog/st_lpc_wdt.c
+++ b/drivers/watchdog/st_lpc_wdt.c
@@ -241,6 +241,7 @@ static int st_wdog_probe(struct platform_device *pdev)
return -EINVAL;
}
st_wdog_dev.max_timeout = 0xFFFFFFFF / st_wdog->clkrate;
+ st_wdog_dev.parent = &pdev->dev;
ret = clk_prepare_enable(clk);
if (ret) {
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c
index e7f0d5b60d3d..3ee6128a540e 100644
--- a/drivers/watchdog/stmp3xxx_rtc_wdt.c
+++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c
@@ -76,6 +76,7 @@ static int stmp3xxx_wdt_probe(struct platform_device *pdev)
watchdog_set_drvdata(&stmp3xxx_wdd, &pdev->dev);
stmp3xxx_wdd.timeout = clamp_t(unsigned, heartbeat, 1, STMP3XXX_MAX_TIMEOUT);
+ stmp3xxx_wdd.parent = &pdev->dev;
ret = watchdog_register_device(&stmp3xxx_wdd);
if (ret < 0) {
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index a29afb37c48c..47bd8a14d01f 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -184,7 +184,7 @@ static int sunxi_wdt_start(struct watchdog_device *wdt_dev)
/* Set system reset function */
reg = readl(wdt_base + regs->wdt_cfg);
reg &= ~(regs->wdt_reset_mask);
- reg |= ~(regs->wdt_reset_val);
+ reg |= regs->wdt_reset_val;
writel(reg, wdt_base + regs->wdt_cfg);
/* Enable watchdog */
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
index 30451ea46902..7f97cdd53f29 100644
--- a/drivers/watchdog/tegra_wdt.c
+++ b/drivers/watchdog/tegra_wdt.c
@@ -218,6 +218,7 @@ static int tegra_wdt_probe(struct platform_device *pdev)
wdd->ops = &tegra_wdt_ops;
wdd->min_timeout = MIN_WDT_TIMEOUT;
wdd->max_timeout = MAX_WDT_TIMEOUT;
+ wdd->parent = &pdev->dev;
watchdog_set_drvdata(wdd, wdt);
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 119beb7f6017..4b541934b6c5 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -428,7 +428,8 @@ static int ts72xx_wdt_probe(struct platform_device *pdev)
static int ts72xx_wdt_remove(struct platform_device *pdev)
{
- return misc_deregister(&ts72xx_wdt_miscdev);
+ misc_deregister(&ts72xx_wdt_miscdev);
+ return 0;
}
static struct platform_driver ts72xx_wdt_driver = {
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index 2c1db6fa9a27..9bf3cc0f3961 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -83,6 +83,7 @@ static int twl4030_wdt_probe(struct platform_device *pdev)
wdt->timeout = 30;
wdt->min_timeout = 1;
wdt->max_timeout = 30;
+ wdt->parent = &pdev->dev;
watchdog_set_nowayout(wdt, nowayout);
platform_set_drvdata(pdev, wdt);
diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c
index 7f615933d31a..c2da880292bc 100644
--- a/drivers/watchdog/txx9wdt.c
+++ b/drivers/watchdog/txx9wdt.c
@@ -131,6 +131,7 @@ static int __init txx9wdt_probe(struct platform_device *dev)
txx9wdt.timeout = timeout;
txx9wdt.min_timeout = 1;
txx9wdt.max_timeout = WD_MAX_TIMEOUT;
+ txx9wdt.parent = &dev->dev;
watchdog_set_nowayout(&txx9wdt, nowayout);
ret = watchdog_register_device(&txx9wdt);
diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c
index 9de09ab00838..37c084353cce 100644
--- a/drivers/watchdog/ux500_wdt.c
+++ b/drivers/watchdog/ux500_wdt.c
@@ -96,6 +96,7 @@ static int ux500_wdt_probe(struct platform_device *pdev)
ux500_wdt.max_timeout = WATCHDOG_MAX28;
}
+ ux500_wdt.parent = &pdev->dev;
watchdog_set_nowayout(&ux500_wdt, nowayout);
/* disable auto off on sleep */
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
index 56369c4f1961..5f9cbc37520d 100644
--- a/drivers/watchdog/via_wdt.c
+++ b/drivers/watchdog/via_wdt.c
@@ -206,6 +206,7 @@ static int wdt_probe(struct pci_dev *pdev,
timeout = WDT_TIMEOUT;
wdt_dev.timeout = timeout;
+ wdt_dev.parent = &pdev->dev;
watchdog_set_nowayout(&wdt_dev, nowayout);
if (readl(wdt_mem) & VIA_WDT_FIRED)
wdt_dev.bootstatus |= WDIOF_CARDRESET;
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index 2fa17e746ff6..8d1184aee932 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -215,6 +215,7 @@ static int wm831x_wdt_probe(struct platform_device *pdev)
wm831x_wdt->info = &wm831x_wdt_info;
wm831x_wdt->ops = &wm831x_wdt_ops;
+ wm831x_wdt->parent = &pdev->dev;
watchdog_set_nowayout(wm831x_wdt, nowayout);
watchdog_set_drvdata(wm831x_wdt, driver_data);
diff --git a/drivers/watchdog/wm8350_wdt.c b/drivers/watchdog/wm8350_wdt.c
index 34d272ada23d..4ab4b8347d45 100644
--- a/drivers/watchdog/wm8350_wdt.c
+++ b/drivers/watchdog/wm8350_wdt.c
@@ -151,6 +151,7 @@ static int wm8350_wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(&wm8350_wdt, nowayout);
watchdog_set_drvdata(&wm8350_wdt, wm8350);
+ wm8350_wdt.parent = &pdev->dev;
/* Default to 4s timeout */
wm8350_wdt_set_timeout(&wm8350_wdt, 4);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 7cd226da15fe..73708acce3ca 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -280,4 +280,15 @@ config XEN_ACPI
def_bool y
depends on X86 && ACPI
+config XEN_SYMS
+ bool "Xen symbols"
+ depends on X86 && XEN_DOM0 && XENFS
+ default y if KALLSYMS
+ help
+ Exports hypervisor symbols (along with their types and addresses) via
+ /proc/xen/xensyms file, similar to /proc/kallsyms
+
+config XEN_HAVE_VPMU
+ bool
+
endmenu
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index fd933695f232..c79329fcfa78 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -441,7 +441,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
/* Update direct mapping, invalidate P2M, and add to balloon. */
for (i = 0; i < nr_pages; i++) {
pfn = frame_list[i];
- frame_list[i] = pfn_to_mfn(pfn);
+ frame_list[i] = pfn_to_gfn(pfn);
page = pfn_to_page(pfn);
#ifdef CONFIG_XEN_HAVE_PVMMU
@@ -472,7 +472,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
}
/*
- * We avoid multiple worker processes conflicting via the balloon mutex.
+ * As this is a work item it is guaranteed to run as a single instance only.
* We may of course race updates of the target counts (which are protected
* by the balloon lock), or with changes to the Xen hard limit, but we will
* recover from these in time.
@@ -482,9 +482,10 @@ static void balloon_process(struct work_struct *work)
enum bp_state state = BP_DONE;
long credit;
- mutex_lock(&balloon_mutex);
do {
+ mutex_lock(&balloon_mutex);
+
credit = current_credit();
if (credit > 0) {
@@ -499,17 +500,15 @@ static void balloon_process(struct work_struct *work)
state = update_schedule(state);
-#ifndef CONFIG_PREEMPT
- if (need_resched())
- schedule();
-#endif
+ mutex_unlock(&balloon_mutex);
+
+ cond_resched();
+
} while (credit && state == BP_DONE);
/* Schedule more work if there is some still to be done. */
if (state == BP_EAGAIN)
schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
-
- mutex_unlock(&balloon_mutex);
}
/* Resets the Xen limit, sets new target, and kicks off processing. */
@@ -639,9 +638,9 @@ static int __init balloon_init(void)
* regions (see arch/x86/xen/setup.c).
*/
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
- if (xen_extra_mem[i].size)
- balloon_add_region(PFN_UP(xen_extra_mem[i].start),
- PFN_DOWN(xen_extra_mem[i].size));
+ if (xen_extra_mem[i].n_pfns)
+ balloon_add_region(xen_extra_mem[i].start_pfn,
+ xen_extra_mem[i].n_pfns);
return 0;
}
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index 0edb91c0de6b..8ae2fc90e1ea 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -6,10 +6,10 @@
bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
const struct bio_vec *vec2)
{
- unsigned long mfn1 = pfn_to_mfn(page_to_pfn(vec1->bv_page));
- unsigned long mfn2 = pfn_to_mfn(page_to_pfn(vec2->bv_page));
+ unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
+ unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
- ((mfn1 == mfn2) || ((mfn1+1) == mfn2));
+ ((bfn1 == bfn2) || ((bfn1+1) == bfn2));
}
EXPORT_SYMBOL(xen_biovec_phys_mergeable);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 96093ae369a5..6cd5e65c4aff 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -336,7 +336,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
BUG_ON(irq == -1);
#ifdef CONFIG_SMP
- cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(cpu));
+ cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
#endif
xen_evtchn_port_bind_to_cpu(info, cpu);
@@ -373,7 +373,7 @@ static void xen_irq_init(unsigned irq)
struct irq_info *info;
#ifdef CONFIG_SMP
/* By default all event channels notify CPU#0. */
- cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(0));
+ cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0));
#endif
info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -1301,11 +1301,7 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
if (!VALID_EVTCHN(evtchn))
return -1;
- /*
- * Events delivered via platform PCI interrupts are always
- * routed to vcpu 0 and hence cannot be rebound.
- */
- if (xen_hvm_domain() && !xen_have_vector_callback)
+ if (!xen_support_evtchn_rebind())
return -1;
/* Send future instances of this interrupt to other vcpu. */
@@ -1692,7 +1688,7 @@ void __init xen_init_IRQ(void)
struct physdev_pirq_eoi_gmfn eoi_gmfn;
pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
- eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map);
+ eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
/* TODO: No PVH support for PIRQ EOI */
if (rc != 0) {
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index ed673e1acd61..1d4baf56c36b 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -111,7 +111,7 @@ static int init_control_block(int cpu,
for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++)
q->head[i] = 0;
- init_control.control_gfn = virt_to_mfn(control_block);
+ init_control.control_gfn = virt_to_gfn(control_block);
init_control.offset = 0;
init_control.vcpu = cpu;
@@ -167,7 +167,7 @@ static int evtchn_fifo_setup(struct irq_info *info)
/* Mask all events in this page before adding it. */
init_array_page(array_page);
- expand_array.array_gfn = virt_to_mfn(array_page);
+ expand_array.array_gfn = virt_to_gfn(array_page);
ret = HYPERVISOR_event_channel_op(EVTCHNOP_expand_array, &expand_array);
if (ret < 0)
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index e53fe191738c..4547a91bca67 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -142,7 +142,8 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
/* Grant foreign access to the page. */
rc = gnttab_grant_foreign_access(op->domid,
- pfn_to_mfn(page_to_pfn(gref->page)), readonly);
+ xen_page_to_gfn(gref->page),
+ readonly);
if (rc < 0)
goto undo;
gref_ids[i] = gref->gref_id = rc;
@@ -493,7 +494,7 @@ static void gntalloc_vma_close(struct vm_area_struct *vma)
mutex_unlock(&gref_mutex);
}
-static struct vm_operations_struct gntalloc_vmops = {
+static const struct vm_operations_struct gntalloc_vmops = {
.open = gntalloc_vma_open,
.close = gntalloc_vma_close,
};
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 67b9163db718..2ea0b3b2a91d 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -433,7 +433,7 @@ static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
}
-static struct vm_operations_struct gntdev_vmops = {
+static const struct vm_operations_struct gntdev_vmops = {
.open = gntdev_vma_open,
.close = gntdev_vma_close,
.find_special_page = gntdev_vma_find_special_page,
@@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
pr_debug("priv %p\n", priv);
+ mutex_lock(&priv->lock);
while (!list_empty(&priv->maps)) {
map = list_entry(priv->maps.next, struct grant_map, next);
list_del(&map->next);
gntdev_put_map(NULL /* already removed */, map);
}
WARN_ON(!list_empty(&priv->freeable_maps));
+ mutex_unlock(&priv->lock);
if (use_ptemod)
mmu_notifier_unregister(&priv->mn, priv->mm);
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index d10effee9b9e..e12bd3635f83 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -80,7 +80,7 @@ static int xen_suspend(void *data)
* is resuming in a new domain.
*/
si->cancelled = HYPERVISOR_suspend(xen_pv_domain()
- ? virt_to_mfn(xen_start_info)
+ ? virt_to_gfn(xen_start_info)
: 0);
xen_arch_post_suspend(si->cancelled);
diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
index a1800c150839..08cb419eb4e6 100644
--- a/drivers/xen/preempt.c
+++ b/drivers/xen/preempt.c
@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
asmlinkage __visible void xen_maybe_preempt_hcall(void)
{
if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
- && should_resched())) {
+ && need_resched())) {
/*
* Clear flag as we may be rescheduled on a different
* cpu.
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 5a296161d843..5e9adac928e6 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -193,16 +193,16 @@ static int traverse_pages_block(unsigned nelem, size_t size,
return ret;
}
-struct mmap_mfn_state {
+struct mmap_gfn_state {
unsigned long va;
struct vm_area_struct *vma;
domid_t domain;
};
-static int mmap_mfn_range(void *data, void *state)
+static int mmap_gfn_range(void *data, void *state)
{
struct privcmd_mmap_entry *msg = data;
- struct mmap_mfn_state *st = state;
+ struct mmap_gfn_state *st = state;
struct vm_area_struct *vma = st->vma;
int rc;
@@ -216,7 +216,7 @@ static int mmap_mfn_range(void *data, void *state)
((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
return -EINVAL;
- rc = xen_remap_domain_mfn_range(vma,
+ rc = xen_remap_domain_gfn_range(vma,
msg->va & PAGE_MASK,
msg->mfn, msg->npages,
vma->vm_page_prot,
@@ -236,7 +236,7 @@ static long privcmd_ioctl_mmap(void __user *udata)
struct vm_area_struct *vma;
int rc;
LIST_HEAD(pagelist);
- struct mmap_mfn_state state;
+ struct mmap_gfn_state state;
/* We only support privcmd_ioctl_mmap_batch for auto translated. */
if (xen_feature(XENFEAT_auto_translated_physmap))
@@ -273,7 +273,7 @@ static long privcmd_ioctl_mmap(void __user *udata)
rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
&pagelist,
- mmap_mfn_range, &state);
+ mmap_gfn_range, &state);
out_up:
@@ -299,18 +299,18 @@ struct mmap_batch_state {
int global_error;
int version;
- /* User-space mfn array to store errors in the second pass for V1. */
- xen_pfn_t __user *user_mfn;
+ /* User-space gfn array to store errors in the second pass for V1. */
+ xen_pfn_t __user *user_gfn;
/* User-space int array to store errors in the second pass for V2. */
int __user *user_err;
};
-/* auto translated dom0 note: if domU being created is PV, then mfn is
- * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
+/* auto translated dom0 note: if domU being created is PV, then gfn is
+ * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
*/
static int mmap_batch_fn(void *data, int nr, void *state)
{
- xen_pfn_t *mfnp = data;
+ xen_pfn_t *gfnp = data;
struct mmap_batch_state *st = state;
struct vm_area_struct *vma = st->vma;
struct page **pages = vma->vm_private_data;
@@ -321,8 +321,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
cur_pages = &pages[st->index];
BUG_ON(nr < 0);
- ret = xen_remap_domain_mfn_array(st->vma, st->va & PAGE_MASK, mfnp, nr,
- (int *)mfnp, st->vma->vm_page_prot,
+ ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
+ (int *)gfnp, st->vma->vm_page_prot,
st->domain, cur_pages);
/* Adjust the global_error? */
@@ -347,22 +347,22 @@ static int mmap_return_error(int err, struct mmap_batch_state *st)
if (st->version == 1) {
if (err) {
- xen_pfn_t mfn;
+ xen_pfn_t gfn;
- ret = get_user(mfn, st->user_mfn);
+ ret = get_user(gfn, st->user_gfn);
if (ret < 0)
return ret;
/*
* V1 encodes the error codes in the 32bit top
- * nibble of the mfn (with its known
+ * nibble of the gfn (with its known
* limitations vis-a-vis 64 bit callers).
*/
- mfn |= (err == -ENOENT) ?
+ gfn |= (err == -ENOENT) ?
PRIVCMD_MMAPBATCH_PAGED_ERROR :
PRIVCMD_MMAPBATCH_MFN_ERROR;
- return __put_user(mfn, st->user_mfn++);
+ return __put_user(gfn, st->user_gfn++);
} else
- st->user_mfn++;
+ st->user_gfn++;
} else { /* st->version == 2 */
if (err)
return __put_user(err, st->user_err++);
@@ -388,7 +388,7 @@ static int mmap_return_errors(void *data, int nr, void *state)
return 0;
}
-/* Allocate pfns that are then mapped with gmfns from foreign domid. Update
+/* Allocate pfns that are then mapped with gfns from foreign domid. Update
* the vma with the page info to use later.
* Returns: 0 if success, otherwise -errno
*/
@@ -414,7 +414,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
return 0;
}
-static struct vm_operations_struct privcmd_vm_ops;
+static const struct vm_operations_struct privcmd_vm_ops;
static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
{
@@ -526,7 +526,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
if (state.global_error) {
/* Write back errors in second pass. */
- state.user_mfn = (xen_pfn_t *)m.arr;
+ state.user_gfn = (xen_pfn_t *)m.arr;
state.user_err = m.err;
ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
&pagelist, mmap_return_errors, &state);
@@ -587,7 +587,7 @@ static void privcmd_close(struct vm_area_struct *vma)
if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
return;
- rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
+ rc = xen_unmap_domain_gfn_range(vma, numpgs, pages);
if (rc == 0)
free_xenballooned_pages(numpgs, pages);
else
@@ -605,7 +605,7 @@ static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
-static struct vm_operations_struct privcmd_vm_ops = {
+static const struct vm_operations_struct privcmd_vm_ops = {
.close = privcmd_close,
.fault = privcmd_fault
};
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 4c549323c605..79bc4933b13e 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -82,8 +82,8 @@ static u64 start_dma_addr;
*/
static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
{
- unsigned long mfn = pfn_to_mfn(PFN_DOWN(paddr));
- dma_addr_t dma = (dma_addr_t)mfn << PAGE_SHIFT;
+ unsigned long bfn = pfn_to_bfn(PFN_DOWN(paddr));
+ dma_addr_t dma = (dma_addr_t)bfn << PAGE_SHIFT;
dma |= paddr & ~PAGE_MASK;
@@ -92,7 +92,7 @@ static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
{
- unsigned long pfn = mfn_to_pfn(PFN_DOWN(baddr));
+ unsigned long pfn = bfn_to_pfn(PFN_DOWN(baddr));
dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
phys_addr_t paddr = dma;
@@ -110,15 +110,15 @@ static int check_pages_physically_contiguous(unsigned long pfn,
unsigned int offset,
size_t length)
{
- unsigned long next_mfn;
+ unsigned long next_bfn;
int i;
int nr_pages;
- next_mfn = pfn_to_mfn(pfn);
+ next_bfn = pfn_to_bfn(pfn);
nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
for (i = 1; i < nr_pages; i++) {
- if (pfn_to_mfn(++pfn) != ++next_mfn)
+ if (pfn_to_bfn(++pfn) != ++next_bfn)
return 0;
}
return 1;
@@ -138,8 +138,8 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
{
- unsigned long mfn = PFN_DOWN(dma_addr);
- unsigned long pfn = mfn_to_local_pfn(mfn);
+ unsigned long bfn = PFN_DOWN(dma_addr);
+ unsigned long pfn = bfn_to_local_pfn(bfn);
phys_addr_t paddr;
/* If the address is outside our domain, it CAN
@@ -311,9 +311,6 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
*/
flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
- if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
- return ret;
-
/* On ARM this function returns an ioremap'ped virtual address for
* which virt_to_phys doesn't return the corresponding physical
* address. In fact on ARM virt_to_phys only works for kernel direct
@@ -356,9 +353,6 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
phys_addr_t phys;
u64 dma_mask = DMA_BIT_MASK(32);
- if (dma_release_from_coherent(hwdev, order, vaddr))
- return;
-
if (hwdev && hwdev->coherent_dma_mask)
dma_mask = hwdev->coherent_dma_mask;
diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
index 96453f8a85c5..b5a7342e0ba5 100644
--- a/drivers/xen/sys-hypervisor.c
+++ b/drivers/xen/sys-hypervisor.c
@@ -20,6 +20,9 @@
#include <xen/xenbus.h>
#include <xen/interface/xen.h>
#include <xen/interface/version.h>
+#ifdef CONFIG_XEN_HAVE_VPMU
+#include <xen/interface/xenpmu.h>
+#endif
#define HYPERVISOR_ATTR_RO(_name) \
static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name)
@@ -368,6 +371,126 @@ static void xen_properties_destroy(void)
sysfs_remove_group(hypervisor_kobj, &xen_properties_group);
}
+#ifdef CONFIG_XEN_HAVE_VPMU
+struct pmu_mode {
+ const char *name;
+ uint32_t mode;
+};
+
+static struct pmu_mode pmu_modes[] = {
+ {"off", XENPMU_MODE_OFF},
+ {"self", XENPMU_MODE_SELF},
+ {"hv", XENPMU_MODE_HV},
+ {"all", XENPMU_MODE_ALL}
+};
+
+static ssize_t pmu_mode_store(struct hyp_sysfs_attr *attr,
+ const char *buffer, size_t len)
+{
+ int ret;
+ struct xen_pmu_params xp;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmu_modes); i++) {
+ if (strncmp(buffer, pmu_modes[i].name, len - 1) == 0) {
+ xp.val = pmu_modes[i].mode;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(pmu_modes))
+ return -EINVAL;
+
+ xp.version.maj = XENPMU_VER_MAJ;
+ xp.version.min = XENPMU_VER_MIN;
+ ret = HYPERVISOR_xenpmu_op(XENPMU_mode_set, &xp);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t pmu_mode_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int ret;
+ struct xen_pmu_params xp;
+ int i;
+ uint32_t mode;
+
+ xp.version.maj = XENPMU_VER_MAJ;
+ xp.version.min = XENPMU_VER_MIN;
+ ret = HYPERVISOR_xenpmu_op(XENPMU_mode_get, &xp);
+ if (ret)
+ return ret;
+
+ mode = (uint32_t)xp.val;
+ for (i = 0; i < ARRAY_SIZE(pmu_modes); i++) {
+ if (mode == pmu_modes[i].mode)
+ return sprintf(buffer, "%s\n", pmu_modes[i].name);
+ }
+
+ return -EINVAL;
+}
+HYPERVISOR_ATTR_RW(pmu_mode);
+
+static ssize_t pmu_features_store(struct hyp_sysfs_attr *attr,
+ const char *buffer, size_t len)
+{
+ int ret;
+ uint32_t features;
+ struct xen_pmu_params xp;
+
+ ret = kstrtou32(buffer, 0, &features);
+ if (ret)
+ return ret;
+
+ xp.val = features;
+ xp.version.maj = XENPMU_VER_MAJ;
+ xp.version.min = XENPMU_VER_MIN;
+ ret = HYPERVISOR_xenpmu_op(XENPMU_feature_set, &xp);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t pmu_features_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ int ret;
+ struct xen_pmu_params xp;
+
+ xp.version.maj = XENPMU_VER_MAJ;
+ xp.version.min = XENPMU_VER_MIN;
+ ret = HYPERVISOR_xenpmu_op(XENPMU_feature_get, &xp);
+ if (ret)
+ return ret;
+
+ return sprintf(buffer, "0x%x\n", (uint32_t)xp.val);
+}
+HYPERVISOR_ATTR_RW(pmu_features);
+
+static struct attribute *xen_pmu_attrs[] = {
+ &pmu_mode_attr.attr,
+ &pmu_features_attr.attr,
+ NULL
+};
+
+static const struct attribute_group xen_pmu_group = {
+ .name = "pmu",
+ .attrs = xen_pmu_attrs,
+};
+
+static int __init xen_pmu_init(void)
+{
+ return sysfs_create_group(hypervisor_kobj, &xen_pmu_group);
+}
+
+static void xen_pmu_destroy(void)
+{
+ sysfs_remove_group(hypervisor_kobj, &xen_pmu_group);
+}
+#endif
+
static int __init hyper_sysfs_init(void)
{
int ret;
@@ -390,7 +513,15 @@ static int __init hyper_sysfs_init(void)
ret = xen_properties_init();
if (ret)
goto prop_out;
-
+#ifdef CONFIG_XEN_HAVE_VPMU
+ if (xen_initial_domain()) {
+ ret = xen_pmu_init();
+ if (ret) {
+ xen_properties_destroy();
+ goto prop_out;
+ }
+ }
+#endif
goto out;
prop_out:
@@ -407,6 +538,9 @@ out:
static void __exit hyper_sysfs_exit(void)
{
+#ifdef CONFIG_XEN_HAVE_VPMU
+ xen_pmu_destroy();
+#endif
xen_properties_destroy();
xen_compilation_destroy();
xen_sysfs_uuid_destroy();
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index 239738f944ba..945fc4327201 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -129,21 +129,17 @@ static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
/* xen generic tmem ops */
static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
- u32 index, unsigned long pfn)
+ u32 index, struct page *page)
{
- unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
-
return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
- gmfn, 0, 0, 0);
+ xen_page_to_gfn(page), 0, 0, 0);
}
static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
- u32 index, unsigned long pfn)
+ u32 index, struct page *page)
{
- unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
-
return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
- gmfn, 0, 0, 0);
+ xen_page_to_gfn(page), 0, 0, 0);
}
static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
@@ -173,14 +169,13 @@ static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
{
u32 ind = (u32) index;
struct tmem_oid oid = *(struct tmem_oid *)&key;
- unsigned long pfn = page_to_pfn(page);
if (pool < 0)
return;
if (ind != index)
return;
mb(); /* ensure page is quiescent; tmem may address it with an alias */
- (void)xen_tmem_put_page((u32)pool, oid, ind, pfn);
+ (void)xen_tmem_put_page((u32)pool, oid, ind, page);
}
static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
@@ -188,7 +183,6 @@ static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
{
u32 ind = (u32) index;
struct tmem_oid oid = *(struct tmem_oid *)&key;
- unsigned long pfn = page_to_pfn(page);
int ret;
/* translate return values to linux semantics */
@@ -196,7 +190,7 @@ static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
return -1;
if (ind != index)
return -1;
- ret = xen_tmem_get_page((u32)pool, oid, ind, pfn);
+ ret = xen_tmem_get_page((u32)pool, oid, ind, page);
if (ret == 1)
return 0;
else
@@ -287,7 +281,6 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset,
{
u64 ind64 = (u64)offset;
u32 ind = (u32)offset;
- unsigned long pfn = page_to_pfn(page);
int pool = tmem_frontswap_poolid;
int ret;
@@ -296,7 +289,7 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset,
if (ind64 != ind)
return -1;
mb(); /* ensure page is quiescent; tmem may address it with an alias */
- ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn);
+ ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), page);
/* translate Xen tmem return values to linux semantics */
if (ret == 1)
return 0;
@@ -313,7 +306,6 @@ static int tmem_frontswap_load(unsigned type, pgoff_t offset,
{
u64 ind64 = (u64)offset;
u32 ind = (u32)offset;
- unsigned long pfn = page_to_pfn(page);
int pool = tmem_frontswap_poolid;
int ret;
@@ -321,7 +313,7 @@ static int tmem_frontswap_load(unsigned type, pgoff_t offset,
return -1;
if (ind64 != ind)
return -1;
- ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn);
+ ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), page);
/* translate Xen tmem return values to linux semantics */
if (ret == 1)
return 0;
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 59fc190f1e92..70fa438000af 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -560,11 +560,9 @@ static int __init xen_acpi_processor_init(void)
return 0;
err_unregister:
- for_each_possible_cpu(i) {
- struct acpi_processor_performance *perf;
- perf = per_cpu_ptr(acpi_perf_data, i);
- acpi_processor_unregister_performance(perf, i);
- }
+ for_each_possible_cpu(i)
+ acpi_processor_unregister_performance(i);
+
err_out:
/* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
free_acpi_perf_data();
@@ -579,11 +577,9 @@ static void __exit xen_acpi_processor_exit(void)
kfree(acpi_ids_done);
kfree(acpi_id_present);
kfree(acpi_id_cst_present);
- for_each_possible_cpu(i) {
- struct acpi_processor_performance *perf;
- perf = per_cpu_ptr(acpi_perf_data, i);
- acpi_processor_unregister_performance(perf, i);
- }
+ for_each_possible_cpu(i)
+ acpi_processor_unregister_performance(i);
+
free_acpi_perf_data();
}
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 9ad327238ba9..2ba09c1195c8 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -380,7 +380,7 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
for (i = 0; i < nr_pages; i++) {
err = gnttab_grant_foreign_access(dev->otherend_id,
- virt_to_mfn(vaddr), 0);
+ virt_to_gfn(vaddr), 0);
if (err < 0) {
xenbus_dev_fatal(dev, err,
"granting access to ring page");
@@ -814,8 +814,10 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
addrs);
- if (!rv)
+ if (!rv) {
vunmap(vaddr);
+ free_xenballooned_pages(node->nr_handles, node->hvm.pages);
+ }
else
WARN(1, "Leaking %p, size %u page(s)\n", vaddr,
node->nr_handles);
diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c
index b17707ee07d4..ee6d9efd7b76 100644
--- a/drivers/xen/xenbus/xenbus_dev_backend.c
+++ b/drivers/xen/xenbus/xenbus_dev_backend.c
@@ -49,7 +49,7 @@ static long xenbus_alloc(domid_t domid)
goto out_err;
gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid,
- virt_to_mfn(xen_store_interface), 0 /* writable */);
+ virt_to_gfn(xen_store_interface), 0 /* writable */);
arg.dom = DOMID_SELF;
arg.remote_dom = domid;
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 4308fb3cf7c2..3cbe0556de26 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -75,7 +75,7 @@ EXPORT_SYMBOL_GPL(xen_store_interface);
enum xenstore_init xen_store_domain_type;
EXPORT_SYMBOL_GPL(xen_store_domain_type);
-static unsigned long xen_store_mfn;
+static unsigned long xen_store_gfn;
static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
@@ -711,9 +711,7 @@ static int __init xenstored_local_init(void)
if (!page)
goto out_err;
- xen_store_mfn = xen_start_info->store_mfn =
- pfn_to_mfn(virt_to_phys((void *)page) >>
- PAGE_SHIFT);
+ xen_store_gfn = xen_start_info->store_mfn = virt_to_gfn((void *)page);
/* Next allocate a local port which xenstored can bind to */
alloc_unbound.dom = DOMID_SELF;
@@ -787,12 +785,12 @@ static int __init xenbus_init(void)
err = xenstored_local_init();
if (err)
goto out_error;
- xen_store_interface = mfn_to_virt(xen_store_mfn);
+ xen_store_interface = gfn_to_virt(xen_store_gfn);
break;
case XS_PV:
xen_store_evtchn = xen_start_info->store_evtchn;
- xen_store_mfn = xen_start_info->store_mfn;
- xen_store_interface = mfn_to_virt(xen_store_mfn);
+ xen_store_gfn = xen_start_info->store_mfn;
+ xen_store_interface = gfn_to_virt(xen_store_gfn);
break;
case XS_HVM:
err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
@@ -802,9 +800,9 @@ static int __init xenbus_init(void)
err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
if (err)
goto out_error;
- xen_store_mfn = (unsigned long)v;
+ xen_store_gfn = (unsigned long)v;
xen_store_interface =
- xen_remap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
+ xen_remap(xen_store_gfn << PAGE_SHIFT, PAGE_SIZE);
break;
default:
pr_warn("Xenstore state unknown\n");
diff --git a/drivers/xen/xenfs/Makefile b/drivers/xen/xenfs/Makefile
index b019865fcc56..1a83010ddffa 100644
--- a/drivers/xen/xenfs/Makefile
+++ b/drivers/xen/xenfs/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_XENFS) += xenfs.o
xenfs-y = super.o
xenfs-$(CONFIG_XEN_DOM0) += xenstored.o
+xenfs-$(CONFIG_XEN_SYMS) += xensyms.o
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 06092e0fe8ce..8559a71f36b1 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -57,6 +57,9 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
{ "privcmd", &xen_privcmd_fops, S_IRUSR|S_IWUSR },
{ "xsd_kva", &xsd_kva_file_ops, S_IRUSR|S_IWUSR},
{ "xsd_port", &xsd_port_file_ops, S_IRUSR|S_IWUSR},
+#ifdef CONFIG_XEN_SYMS
+ { "xensyms", &xensyms_ops, S_IRUSR},
+#endif
{""},
};
diff --git a/drivers/xen/xenfs/xenfs.h b/drivers/xen/xenfs/xenfs.h
index 6b80c7779c02..2c5934ea9b1e 100644
--- a/drivers/xen/xenfs/xenfs.h
+++ b/drivers/xen/xenfs/xenfs.h
@@ -3,5 +3,6 @@
extern const struct file_operations xsd_kva_file_ops;
extern const struct file_operations xsd_port_file_ops;
+extern const struct file_operations xensyms_ops;
#endif /* _XENFS_XENBUS_H */
diff --git a/drivers/xen/xenfs/xensyms.c b/drivers/xen/xenfs/xensyms.c
new file mode 100644
index 000000000000..f8b12856753f
--- /dev/null
+++ b/drivers/xen/xenfs/xensyms.c
@@ -0,0 +1,152 @@
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <xen/interface/platform.h>
+#include <asm/xen/hypercall.h>
+#include <xen/xen-ops.h>
+#include "xenfs.h"
+
+
+#define XEN_KSYM_NAME_LEN 127 /* Hypervisor may have different name length */
+
+struct xensyms {
+ struct xen_platform_op op;
+ char *name;
+ uint32_t namelen;
+};
+
+/* Grab next output page from the hypervisor */
+static int xensyms_next_sym(struct xensyms *xs)
+{
+ int ret;
+ struct xenpf_symdata *symdata = &xs->op.u.symdata;
+ uint64_t symnum;
+
+ memset(xs->name, 0, xs->namelen);
+ symdata->namelen = xs->namelen;
+
+ symnum = symdata->symnum;
+
+ ret = HYPERVISOR_dom0_op(&xs->op);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * If hypervisor's symbol didn't fit into the buffer then allocate
+ * a larger buffer and try again.
+ */
+ if (unlikely(symdata->namelen > xs->namelen)) {
+ kfree(xs->name);
+
+ xs->namelen = symdata->namelen;
+ xs->name = kzalloc(xs->namelen, GFP_KERNEL);
+ if (!xs->name)
+ return -ENOMEM;
+
+ set_xen_guest_handle(symdata->name, xs->name);
+ symdata->symnum--; /* Rewind */
+
+ ret = HYPERVISOR_dom0_op(&xs->op);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (symdata->symnum == symnum)
+ /* End of symbols */
+ return 1;
+
+ return 0;
+}
+
+static void *xensyms_start(struct seq_file *m, loff_t *pos)
+{
+ struct xensyms *xs = (struct xensyms *)m->private;
+
+ xs->op.u.symdata.symnum = *pos;
+
+ if (xensyms_next_sym(xs))
+ return NULL;
+
+ return m->private;
+}
+
+static void *xensyms_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ struct xensyms *xs = (struct xensyms *)m->private;
+
+ xs->op.u.symdata.symnum = ++(*pos);
+
+ if (xensyms_next_sym(xs))
+ return NULL;
+
+ return p;
+}
+
+static int xensyms_show(struct seq_file *m, void *p)
+{
+ struct xensyms *xs = (struct xensyms *)m->private;
+ struct xenpf_symdata *symdata = &xs->op.u.symdata;
+
+ seq_printf(m, "%016llx %c %s\n", symdata->address,
+ symdata->type, xs->name);
+
+ return 0;
+}
+
+static void xensyms_stop(struct seq_file *m, void *p)
+{
+}
+
+static const struct seq_operations xensyms_seq_ops = {
+ .start = xensyms_start,
+ .next = xensyms_next,
+ .show = xensyms_show,
+ .stop = xensyms_stop,
+};
+
+static int xensyms_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *m;
+ struct xensyms *xs;
+ int ret;
+
+ ret = seq_open_private(file, &xensyms_seq_ops,
+ sizeof(struct xensyms));
+ if (ret)
+ return ret;
+
+ m = file->private_data;
+ xs = (struct xensyms *)m->private;
+
+ xs->namelen = XEN_KSYM_NAME_LEN + 1;
+ xs->name = kzalloc(xs->namelen, GFP_KERNEL);
+ if (!xs->name) {
+ seq_release_private(inode, file);
+ return -ENOMEM;
+ }
+ set_xen_guest_handle(xs->op.u.symdata.name, xs->name);
+ xs->op.cmd = XENPF_get_symbol;
+ xs->op.u.symdata.namelen = xs->namelen;
+
+ return 0;
+}
+
+static int xensyms_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m = file->private_data;
+ struct xensyms *xs = (struct xensyms *)m->private;
+
+ kfree(xs->name);
+ return seq_release_private(inode, file);
+}
+
+const struct file_operations xensyms_ops = {
+ .open = xensyms_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = xensyms_release
+};
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index 58a5389aec89..cff23872c5a9 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -38,8 +38,8 @@
#include <xen/interface/xen.h>
#include <xen/interface/memory.h>
-/* map fgmfn of domid to lpfn in the current domain */
-static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
+/* map fgfn of domid to lpfn in the current domain */
+static int map_foreign_page(unsigned long lpfn, unsigned long fgfn,
unsigned int domid)
{
int rc;
@@ -49,7 +49,7 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
.size = 1,
.space = XENMAPSPACE_gmfn_foreign,
};
- xen_ulong_t idx = fgmfn;
+ xen_ulong_t idx = fgfn;
xen_pfn_t gpfn = lpfn;
int err = 0;
@@ -62,13 +62,13 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
}
struct remap_data {
- xen_pfn_t *fgmfn; /* foreign domain's gmfn */
+ xen_pfn_t *fgfn; /* foreign domain's gfn */
pgprot_t prot;
domid_t domid;
struct vm_area_struct *vma;
int index;
struct page **pages;
- struct xen_remap_mfn_info *info;
+ struct xen_remap_gfn_info *info;
int *err_ptr;
int mapped;
};
@@ -82,20 +82,20 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
int rc;
- rc = map_foreign_page(pfn, *info->fgmfn, info->domid);
+ rc = map_foreign_page(pfn, *info->fgfn, info->domid);
*info->err_ptr++ = rc;
if (!rc) {
set_pte_at(info->vma->vm_mm, addr, ptep, pte);
info->mapped++;
}
- info->fgmfn++;
+ info->fgfn++;
return 0;
}
int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
unsigned long addr,
- xen_pfn_t *mfn, int nr,
+ xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot,
unsigned domid,
struct page **pages)
@@ -108,7 +108,7 @@ int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
x86 PVOPS */
BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
- data.fgmfn = mfn;
+ data.fgfn = gfn;
data.prot = prot;
data.domid = domid;
data.vma = vma;